summaryrefslogtreecommitdiffstats
path: root/third_party/python
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /third_party/python
parentInitial commit. (diff)
downloadfirefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.tar.xz
firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/python')
-rw-r--r--third_party/python/Jinja2/Jinja2-2.11.3.dist-info/LICENSE.rst28
-rw-r--r--third_party/python/Jinja2/Jinja2-2.11.3.dist-info/METADATA106
-rw-r--r--third_party/python/Jinja2/Jinja2-2.11.3.dist-info/RECORD33
-rw-r--r--third_party/python/Jinja2/Jinja2-2.11.3.dist-info/WHEEL6
-rw-r--r--third_party/python/Jinja2/Jinja2-2.11.3.dist-info/entry_points.txt3
-rw-r--r--third_party/python/Jinja2/Jinja2-2.11.3.dist-info/top_level.txt1
-rw-r--r--third_party/python/Jinja2/jinja2/__init__.py44
-rw-r--r--third_party/python/Jinja2/jinja2/_compat.py132
-rw-r--r--third_party/python/Jinja2/jinja2/_identifier.py6
-rw-r--r--third_party/python/Jinja2/jinja2/asyncfilters.py158
-rw-r--r--third_party/python/Jinja2/jinja2/asyncsupport.py264
-rw-r--r--third_party/python/Jinja2/jinja2/bccache.py350
-rw-r--r--third_party/python/Jinja2/jinja2/compiler.py1843
-rw-r--r--third_party/python/Jinja2/jinja2/constants.py21
-rw-r--r--third_party/python/Jinja2/jinja2/debug.py268
-rw-r--r--third_party/python/Jinja2/jinja2/defaults.py44
-rw-r--r--third_party/python/Jinja2/jinja2/environment.py1362
-rw-r--r--third_party/python/Jinja2/jinja2/exceptions.py177
-rw-r--r--third_party/python/Jinja2/jinja2/ext.py704
-rw-r--r--third_party/python/Jinja2/jinja2/filters.py1382
-rw-r--r--third_party/python/Jinja2/jinja2/idtracking.py290
-rw-r--r--third_party/python/Jinja2/jinja2/lexer.py848
-rw-r--r--third_party/python/Jinja2/jinja2/loaders.py504
-rw-r--r--third_party/python/Jinja2/jinja2/meta.py101
-rw-r--r--third_party/python/Jinja2/jinja2/nativetypes.py94
-rw-r--r--third_party/python/Jinja2/jinja2/nodes.py1088
-rw-r--r--third_party/python/Jinja2/jinja2/optimizer.py41
-rw-r--r--third_party/python/Jinja2/jinja2/parser.py939
-rw-r--r--third_party/python/Jinja2/jinja2/runtime.py1011
-rw-r--r--third_party/python/Jinja2/jinja2/sandbox.py510
-rw-r--r--third_party/python/Jinja2/jinja2/tests.py215
-rw-r--r--third_party/python/Jinja2/jinja2/utils.py737
-rw-r--r--third_party/python/Jinja2/jinja2/visitor.py81
-rw-r--r--third_party/python/MarkupSafe/CHANGES.rst112
-rw-r--r--third_party/python/MarkupSafe/LICENSE.rst28
-rw-r--r--third_party/python/MarkupSafe/MANIFEST.in9
-rw-r--r--third_party/python/MarkupSafe/PKG-INFO98
-rw-r--r--third_party/python/MarkupSafe/README.rst69
-rw-r--r--third_party/python/MarkupSafe/requirements/dev.txt132
-rw-r--r--third_party/python/MarkupSafe/requirements/docs.txt67
-rw-r--r--third_party/python/MarkupSafe/requirements/tests.txt22
-rw-r--r--third_party/python/MarkupSafe/requirements/typing.txt14
-rw-r--r--third_party/python/MarkupSafe/setup.cfg86
-rw-r--r--third_party/python/MarkupSafe/setup.py81
-rw-r--r--third_party/python/MarkupSafe/src/MarkupSafe.egg-info/PKG-INFO98
-rw-r--r--third_party/python/MarkupSafe/src/MarkupSafe.egg-info/SOURCES.txt34
-rw-r--r--third_party/python/MarkupSafe/src/MarkupSafe.egg-info/dependency_links.txt1
-rw-r--r--third_party/python/MarkupSafe/src/MarkupSafe.egg-info/top_level.txt1
-rw-r--r--third_party/python/MarkupSafe/src/markupsafe/__init__.py288
-rw-r--r--third_party/python/MarkupSafe/src/markupsafe/_native.py75
-rw-r--r--third_party/python/MarkupSafe/src/markupsafe/_speedups.c339
-rw-r--r--third_party/python/MarkupSafe/src/markupsafe/_speedups.pyi9
-rw-r--r--third_party/python/MarkupSafe/src/markupsafe/py.typed0
-rw-r--r--third_party/python/MarkupSafe/tox.ini24
-rw-r--r--third_party/python/PyYAML/CHANGES254
-rw-r--r--third_party/python/PyYAML/LICENSE20
-rw-r--r--third_party/python/PyYAML/MANIFEST.in10
-rw-r--r--third_party/python/PyYAML/Makefile44
-rw-r--r--third_party/python/PyYAML/PKG-INFO44
-rw-r--r--third_party/python/PyYAML/README43
-rw-r--r--third_party/python/PyYAML/examples/pygments-lexer/example.yaml302
-rw-r--r--third_party/python/PyYAML/examples/pygments-lexer/yaml.py431
-rw-r--r--third_party/python/PyYAML/examples/yaml-highlight/yaml_hl.cfg115
-rwxr-xr-xthird_party/python/PyYAML/examples/yaml-highlight/yaml_hl.py114
-rw-r--r--third_party/python/PyYAML/lib/_yaml/__init__.py33
-rw-r--r--third_party/python/PyYAML/lib/yaml/__init__.py431
-rw-r--r--third_party/python/PyYAML/lib/yaml/composer.py139
-rw-r--r--third_party/python/PyYAML/lib/yaml/constructor.py766
-rw-r--r--third_party/python/PyYAML/lib/yaml/cyaml.py101
-rw-r--r--third_party/python/PyYAML/lib/yaml/dumper.py62
-rw-r--r--third_party/python/PyYAML/lib/yaml/emitter.py1144
-rw-r--r--third_party/python/PyYAML/lib/yaml/error.py75
-rw-r--r--third_party/python/PyYAML/lib/yaml/events.py86
-rw-r--r--third_party/python/PyYAML/lib/yaml/loader.py63
-rw-r--r--third_party/python/PyYAML/lib/yaml/nodes.py49
-rw-r--r--third_party/python/PyYAML/lib/yaml/parser.py589
-rw-r--r--third_party/python/PyYAML/lib/yaml/reader.py193
-rw-r--r--third_party/python/PyYAML/lib/yaml/representer.py489
-rw-r--r--third_party/python/PyYAML/lib/yaml/resolver.py227
-rw-r--r--third_party/python/PyYAML/lib/yaml/scanner.py1444
-rw-r--r--third_party/python/PyYAML/lib/yaml/serializer.py111
-rw-r--r--third_party/python/PyYAML/lib/yaml/tokens.py104
-rw-r--r--third_party/python/PyYAML/lib3/PyYAML.egg-info/PKG-INFO44
-rw-r--r--third_party/python/PyYAML/lib3/PyYAML.egg-info/SOURCES.txt670
-rw-r--r--third_party/python/PyYAML/lib3/PyYAML.egg-info/dependency_links.txt1
-rw-r--r--third_party/python/PyYAML/lib3/PyYAML.egg-info/top_level.txt2
-rw-r--r--third_party/python/PyYAML/lib3/_yaml/__init__.py33
-rw-r--r--third_party/python/PyYAML/lib3/yaml/__init__.py427
-rw-r--r--third_party/python/PyYAML/lib3/yaml/composer.py139
-rw-r--r--third_party/python/PyYAML/lib3/yaml/constructor.py748
-rw-r--r--third_party/python/PyYAML/lib3/yaml/cyaml.py101
-rw-r--r--third_party/python/PyYAML/lib3/yaml/dumper.py62
-rw-r--r--third_party/python/PyYAML/lib3/yaml/emitter.py1137
-rw-r--r--third_party/python/PyYAML/lib3/yaml/error.py75
-rw-r--r--third_party/python/PyYAML/lib3/yaml/events.py86
-rw-r--r--third_party/python/PyYAML/lib3/yaml/loader.py63
-rw-r--r--third_party/python/PyYAML/lib3/yaml/nodes.py49
-rw-r--r--third_party/python/PyYAML/lib3/yaml/parser.py589
-rw-r--r--third_party/python/PyYAML/lib3/yaml/reader.py185
-rw-r--r--third_party/python/PyYAML/lib3/yaml/representer.py389
-rw-r--r--third_party/python/PyYAML/lib3/yaml/resolver.py227
-rw-r--r--third_party/python/PyYAML/lib3/yaml/scanner.py1435
-rw-r--r--third_party/python/PyYAML/lib3/yaml/serializer.py111
-rw-r--r--third_party/python/PyYAML/lib3/yaml/tokens.py104
-rw-r--r--third_party/python/PyYAML/pyproject.toml3
-rw-r--r--third_party/python/PyYAML/setup.cfg9
-rw-r--r--third_party/python/PyYAML/setup.py296
-rw-r--r--third_party/python/PyYAML/yaml/__init__.pxd0
-rw-r--r--third_party/python/PyYAML/yaml/_yaml.h23
-rw-r--r--third_party/python/PyYAML/yaml/_yaml.pxd251
-rw-r--r--third_party/python/PyYAML/yaml/_yaml.pyx1527
-rw-r--r--third_party/python/_venv/wheels/pip-23.0.1-py3-none-any.whlbin0 -> 2055563 bytes
-rw-r--r--third_party/python/_venv/wheels/setuptools-51.2.0-py3-none-any.whlbin0 -> 784868 bytes
-rw-r--r--third_party/python/aiohttp/CHANGES.rst728
-rw-r--r--third_party/python/aiohttp/CONTRIBUTORS.txt312
-rw-r--r--third_party/python/aiohttp/LICENSE.txt201
-rw-r--r--third_party/python/aiohttp/MANIFEST.in20
-rw-r--r--third_party/python/aiohttp/Makefile144
-rw-r--r--third_party/python/aiohttp/PKG-INFO966
-rw-r--r--third_party/python/aiohttp/README.rst204
-rw-r--r--third_party/python/aiohttp/aiohttp.egg-info/PKG-INFO966
-rw-r--r--third_party/python/aiohttp/aiohttp.egg-info/SOURCES.txt246
-rw-r--r--third_party/python/aiohttp/aiohttp.egg-info/dependency_links.txt1
-rw-r--r--third_party/python/aiohttp/aiohttp.egg-info/requires.txt14
-rw-r--r--third_party/python/aiohttp/aiohttp.egg-info/top_level.txt1
-rw-r--r--third_party/python/aiohttp/aiohttp/.hash/_cparser.pxd.hash1
-rw-r--r--third_party/python/aiohttp/aiohttp/.hash/_find_header.pxd.hash1
-rw-r--r--third_party/python/aiohttp/aiohttp/.hash/_frozenlist.pyx.hash1
-rw-r--r--third_party/python/aiohttp/aiohttp/.hash/_helpers.pyi.hash1
-rw-r--r--third_party/python/aiohttp/aiohttp/.hash/_helpers.pyx.hash1
-rw-r--r--third_party/python/aiohttp/aiohttp/.hash/_http_parser.pyx.hash1
-rw-r--r--third_party/python/aiohttp/aiohttp/.hash/_http_writer.pyx.hash1
-rw-r--r--third_party/python/aiohttp/aiohttp/.hash/_websocket.pyx.hash1
-rw-r--r--third_party/python/aiohttp/aiohttp/.hash/frozenlist.pyi.hash1
-rw-r--r--third_party/python/aiohttp/aiohttp/.hash/hdrs.py.hash1
-rw-r--r--third_party/python/aiohttp/aiohttp/.hash/signals.pyi.hash1
-rw-r--r--third_party/python/aiohttp/aiohttp/__init__.py217
-rw-r--r--third_party/python/aiohttp/aiohttp/_cparser.pxd140
-rw-r--r--third_party/python/aiohttp/aiohttp/_find_header.c9870
-rw-r--r--third_party/python/aiohttp/aiohttp/_find_header.h14
-rw-r--r--third_party/python/aiohttp/aiohttp/_find_header.pxd2
-rw-r--r--third_party/python/aiohttp/aiohttp/_frozenlist.c7512
-rw-r--r--third_party/python/aiohttp/aiohttp/_frozenlist.pyx108
-rw-r--r--third_party/python/aiohttp/aiohttp/_headers.pxi83
-rw-r--r--third_party/python/aiohttp/aiohttp/_helpers.c5433
-rw-r--r--third_party/python/aiohttp/aiohttp/_helpers.pyi6
-rw-r--r--third_party/python/aiohttp/aiohttp/_helpers.pyx35
-rw-r--r--third_party/python/aiohttp/aiohttp/_http_parser.c24607
-rw-r--r--third_party/python/aiohttp/aiohttp/_http_parser.pyx875
-rw-r--r--third_party/python/aiohttp/aiohttp/_http_writer.c5840
-rw-r--r--third_party/python/aiohttp/aiohttp/_http_writer.pyx151
-rw-r--r--third_party/python/aiohttp/aiohttp/_websocket.c3588
-rw-r--r--third_party/python/aiohttp/aiohttp/_websocket.pyx56
-rw-r--r--third_party/python/aiohttp/aiohttp/abc.py200
-rw-r--r--third_party/python/aiohttp/aiohttp/base_protocol.py87
-rw-r--r--third_party/python/aiohttp/aiohttp/client.py1275
-rw-r--r--third_party/python/aiohttp/aiohttp/client_exceptions.py317
-rw-r--r--third_party/python/aiohttp/aiohttp/client_proto.py251
-rw-r--r--third_party/python/aiohttp/aiohttp/client_reqrep.py1127
-rw-r--r--third_party/python/aiohttp/aiohttp/client_ws.py301
-rw-r--r--third_party/python/aiohttp/aiohttp/connector.py1262
-rw-r--r--third_party/python/aiohttp/aiohttp/cookiejar.py382
-rw-r--r--third_party/python/aiohttp/aiohttp/formdata.py170
-rw-r--r--third_party/python/aiohttp/aiohttp/frozenlist.py72
-rw-r--r--third_party/python/aiohttp/aiohttp/frozenlist.pyi46
-rw-r--r--third_party/python/aiohttp/aiohttp/hdrs.py108
-rw-r--r--third_party/python/aiohttp/aiohttp/helpers.py780
-rw-r--r--third_party/python/aiohttp/aiohttp/http.py72
-rw-r--r--third_party/python/aiohttp/aiohttp/http_exceptions.py105
-rw-r--r--third_party/python/aiohttp/aiohttp/http_parser.py901
-rw-r--r--third_party/python/aiohttp/aiohttp/http_websocket.py698
-rw-r--r--third_party/python/aiohttp/aiohttp/http_writer.py182
-rw-r--r--third_party/python/aiohttp/aiohttp/locks.py45
-rw-r--r--third_party/python/aiohttp/aiohttp/log.py8
-rw-r--r--third_party/python/aiohttp/aiohttp/multipart.py957
-rw-r--r--third_party/python/aiohttp/aiohttp/payload.py448
-rw-r--r--third_party/python/aiohttp/aiohttp/payload_streamer.py74
-rw-r--r--third_party/python/aiohttp/aiohttp/py.typed1
-rw-r--r--third_party/python/aiohttp/aiohttp/pytest_plugin.py380
-rw-r--r--third_party/python/aiohttp/aiohttp/resolver.py149
-rw-r--r--third_party/python/aiohttp/aiohttp/signals.py34
-rw-r--r--third_party/python/aiohttp/aiohttp/signals.pyi12
-rw-r--r--third_party/python/aiohttp/aiohttp/streams.py647
-rw-r--r--third_party/python/aiohttp/aiohttp/tcp_helpers.py38
-rw-r--r--third_party/python/aiohttp/aiohttp/test_utils.py676
-rw-r--r--third_party/python/aiohttp/aiohttp/tracing.py442
-rw-r--r--third_party/python/aiohttp/aiohttp/typedefs.py46
-rw-r--r--third_party/python/aiohttp/aiohttp/web.py581
-rw-r--r--third_party/python/aiohttp/aiohttp/web_app.py552
-rw-r--r--third_party/python/aiohttp/aiohttp/web_exceptions.py441
-rw-r--r--third_party/python/aiohttp/aiohttp/web_fileresponse.py243
-rw-r--r--third_party/python/aiohttp/aiohttp/web_log.py208
-rw-r--r--third_party/python/aiohttp/aiohttp/web_middlewares.py121
-rw-r--r--third_party/python/aiohttp/aiohttp/web_protocol.py667
-rw-r--r--third_party/python/aiohttp/aiohttp/web_request.py824
-rw-r--r--third_party/python/aiohttp/aiohttp/web_response.py781
-rw-r--r--third_party/python/aiohttp/aiohttp/web_routedef.py215
-rw-r--r--third_party/python/aiohttp/aiohttp/web_runner.py381
-rw-r--r--third_party/python/aiohttp/aiohttp/web_server.py62
-rw-r--r--third_party/python/aiohttp/aiohttp/web_urldispatcher.py1233
-rw-r--r--third_party/python/aiohttp/aiohttp/web_ws.py481
-rw-r--r--third_party/python/aiohttp/aiohttp/worker.py252
-rwxr-xr-xthird_party/python/aiohttp/examples/background_tasks.py66
-rwxr-xr-xthird_party/python/aiohttp/examples/cli_app.py51
-rwxr-xr-xthird_party/python/aiohttp/examples/client_auth.py23
-rwxr-xr-xthird_party/python/aiohttp/examples/client_json.py22
-rwxr-xr-xthird_party/python/aiohttp/examples/client_ws.py73
-rwxr-xr-xthird_party/python/aiohttp/examples/curl.py35
-rwxr-xr-xthird_party/python/aiohttp/examples/fake_server.py115
-rwxr-xr-xthird_party/python/aiohttp/examples/legacy/crawl.py108
-rwxr-xr-xthird_party/python/aiohttp/examples/legacy/srv.py178
-rwxr-xr-xthird_party/python/aiohttp/examples/legacy/tcp_protocol_parser.py172
-rw-r--r--third_party/python/aiohttp/examples/lowlevel_srv.py26
-rw-r--r--third_party/python/aiohttp/examples/server.crt19
-rw-r--r--third_party/python/aiohttp/examples/server.csr16
-rw-r--r--third_party/python/aiohttp/examples/server.key27
-rw-r--r--third_party/python/aiohttp/examples/server_simple.py31
-rwxr-xr-xthird_party/python/aiohttp/examples/static_files.py9
-rwxr-xr-xthird_party/python/aiohttp/examples/web_classview.py63
-rwxr-xr-xthird_party/python/aiohttp/examples/web_cookies.py45
-rwxr-xr-xthird_party/python/aiohttp/examples/web_rewrite_headers_middleware.py30
-rwxr-xr-xthird_party/python/aiohttp/examples/web_srv.py59
-rw-r--r--third_party/python/aiohttp/examples/web_srv_route_deco.py62
-rw-r--r--third_party/python/aiohttp/examples/web_srv_route_table.py64
-rwxr-xr-xthird_party/python/aiohttp/examples/web_ws.py58
-rw-r--r--third_party/python/aiohttp/examples/websocket.html89
-rw-r--r--third_party/python/aiohttp/pyproject.toml7
-rw-r--r--third_party/python/aiohttp/setup.cfg93
-rw-r--r--third_party/python/aiohttp/setup.py159
-rw-r--r--third_party/python/aiohttp/vendor/http-parser/.gitignore30
-rw-r--r--third_party/python/aiohttp/vendor/http-parser/.mailmap8
-rw-r--r--third_party/python/aiohttp/vendor/http-parser/.travis.yml13
-rw-r--r--third_party/python/aiohttp/vendor/http-parser/AUTHORS68
-rw-r--r--third_party/python/aiohttp/vendor/http-parser/LICENSE-MIT19
-rw-r--r--third_party/python/aiohttp/vendor/http-parser/Makefile160
-rw-r--r--third_party/python/aiohttp/vendor/http-parser/README.md246
-rw-r--r--third_party/python/aiohttp/vendor/http-parser/bench.c128
-rw-r--r--third_party/python/aiohttp/vendor/http-parser/contrib/parsertrace.c157
-rw-r--r--third_party/python/aiohttp/vendor/http-parser/contrib/url_parser.c47
-rw-r--r--third_party/python/aiohttp/vendor/http-parser/http_parser.c2568
-rw-r--r--third_party/python/aiohttp/vendor/http-parser/http_parser.gyp111
-rw-r--r--third_party/python/aiohttp/vendor/http-parser/http_parser.h443
-rw-r--r--third_party/python/aiohttp/vendor/http-parser/test.c4600
-rw-r--r--third_party/python/ansicon/ansicon-1.89.0.dist-info/LICENSE.txt373
-rw-r--r--third_party/python/ansicon/ansicon-1.89.0.dist-info/METADATA2
-rw-r--r--third_party/python/ansicon/ansicon-1.89.0.dist-info/RECORD0
-rw-r--r--third_party/python/ansicon/ansicon-1.89.0.dist-info/WHEEL6
-rw-r--r--third_party/python/ansicon/ansicon-1.89.0.dist-info/top_level.txt1
-rw-r--r--third_party/python/ansicon/ansicon/__init__.py18
-rw-r--r--third_party/python/appdirs/appdirs-1.4.4.dist-info/LICENSE.txt23
-rw-r--r--third_party/python/appdirs/appdirs-1.4.4.dist-info/METADATA264
-rw-r--r--third_party/python/appdirs/appdirs-1.4.4.dist-info/RECORD6
-rw-r--r--third_party/python/appdirs/appdirs-1.4.4.dist-info/WHEEL6
-rw-r--r--third_party/python/appdirs/appdirs-1.4.4.dist-info/top_level.txt1
-rw-r--r--third_party/python/appdirs/appdirs.py608
-rw-r--r--third_party/python/async_timeout/async_timeout-3.0.1.dist-info/LICENSE201
-rw-r--r--third_party/python/async_timeout/async_timeout-3.0.1.dist-info/METADATA165
-rw-r--r--third_party/python/async_timeout/async_timeout-3.0.1.dist-info/RECORD7
-rw-r--r--third_party/python/async_timeout/async_timeout-3.0.1.dist-info/WHEEL5
-rw-r--r--third_party/python/async_timeout/async_timeout-3.0.1.dist-info/top_level.txt1
-rw-r--r--third_party/python/async_timeout/async_timeout/__init__.py115
-rw-r--r--third_party/python/async_timeout/async_timeout/py.typed1
-rw-r--r--third_party/python/attrs/attr/__init__.py132
-rw-r--r--third_party/python/attrs/attr/__init__.pyi571
-rw-r--r--third_party/python/attrs/attr/_cmp.py155
-rw-r--r--third_party/python/attrs/attr/_cmp.pyi13
-rw-r--r--third_party/python/attrs/attr/_compat.py185
-rw-r--r--third_party/python/attrs/attr/_config.py31
-rw-r--r--third_party/python/attrs/attr/_funcs.py477
-rw-r--r--third_party/python/attrs/attr/_make.py2987
-rw-r--r--third_party/python/attrs/attr/_next_gen.py232
-rw-r--r--third_party/python/attrs/attr/_typing_compat.pyi15
-rw-r--r--third_party/python/attrs/attr/_version_info.py86
-rw-r--r--third_party/python/attrs/attr/_version_info.pyi9
-rw-r--r--third_party/python/attrs/attr/converters.py144
-rw-r--r--third_party/python/attrs/attr/converters.pyi13
-rw-r--r--third_party/python/attrs/attr/exceptions.py91
-rw-r--r--third_party/python/attrs/attr/exceptions.pyi17
-rw-r--r--third_party/python/attrs/attr/filters.py66
-rw-r--r--third_party/python/attrs/attr/filters.pyi6
-rw-r--r--third_party/python/attrs/attr/py.typed0
-rw-r--r--third_party/python/attrs/attr/setters.py73
-rw-r--r--third_party/python/attrs/attr/setters.pyi19
-rw-r--r--third_party/python/attrs/attr/validators.py720
-rw-r--r--third_party/python/attrs/attr/validators.pyi88
-rw-r--r--third_party/python/attrs/attrs-23.1.0.dist-info/METADATA243
-rw-r--r--third_party/python/attrs/attrs-23.1.0.dist-info/RECORD35
-rw-r--r--third_party/python/attrs/attrs-23.1.0.dist-info/WHEEL4
-rw-r--r--third_party/python/attrs/attrs-23.1.0.dist-info/licenses/LICENSE21
-rw-r--r--third_party/python/attrs/attrs/__init__.py65
-rw-r--r--third_party/python/attrs/attrs/__init__.pyi67
-rw-r--r--third_party/python/attrs/attrs/converters.py3
-rw-r--r--third_party/python/attrs/attrs/exceptions.py3
-rw-r--r--third_party/python/attrs/attrs/filters.py3
-rw-r--r--third_party/python/attrs/attrs/py.typed0
-rw-r--r--third_party/python/attrs/attrs/setters.py3
-rw-r--r--third_party/python/attrs/attrs/validators.py3
-rw-r--r--third_party/python/blessed/blessed-1.19.1.dist-info/LICENSE20
-rw-r--r--third_party/python/blessed/blessed-1.19.1.dist-info/METADATA269
-rw-r--r--third_party/python/blessed/blessed-1.19.1.dist-info/RECORD23
-rw-r--r--third_party/python/blessed/blessed-1.19.1.dist-info/WHEEL6
-rw-r--r--third_party/python/blessed/blessed-1.19.1.dist-info/top_level.txt1
-rw-r--r--third_party/python/blessed/blessed/__init__.py23
-rw-r--r--third_party/python/blessed/blessed/_capabilities.py168
-rw-r--r--third_party/python/blessed/blessed/_capabilities.pyi7
-rw-r--r--third_party/python/blessed/blessed/color.py258
-rw-r--r--third_party/python/blessed/blessed/color.pyi17
-rw-r--r--third_party/python/blessed/blessed/colorspace.py973
-rw-r--r--third_party/python/blessed/blessed/colorspace.pyi12
-rw-r--r--third_party/python/blessed/blessed/formatters.py498
-rw-r--r--third_party/python/blessed/blessed/formatters.pyi70
-rw-r--r--third_party/python/blessed/blessed/keyboard.py449
-rw-r--r--third_party/python/blessed/blessed/keyboard.pyi28
-rw-r--r--third_party/python/blessed/blessed/py.typed0
-rw-r--r--third_party/python/blessed/blessed/sequences.py461
-rw-r--r--third_party/python/blessed/blessed/sequences.pyi55
-rw-r--r--third_party/python/blessed/blessed/terminal.py1502
-rw-r--r--third_party/python/blessed/blessed/terminal.pyi106
-rw-r--r--third_party/python/blessed/blessed/win_terminal.py163
-rw-r--r--third_party/python/blessed/blessed/win_terminal.pyi11
-rw-r--r--third_party/python/cbor2/cbor2-4.0.1.dist-info/DESCRIPTION.rst26
-rw-r--r--third_party/python/cbor2/cbor2-4.0.1.dist-info/METADATA50
-rw-r--r--third_party/python/cbor2/cbor2-4.0.1.dist-info/RECORD11
-rw-r--r--third_party/python/cbor2/cbor2-4.0.1.dist-info/WHEEL6
-rw-r--r--third_party/python/cbor2/cbor2-4.0.1.dist-info/metadata.json1
-rw-r--r--third_party/python/cbor2/cbor2-4.0.1.dist-info/top_level.txt1
-rw-r--r--third_party/python/cbor2/cbor2/__init__.py3
-rw-r--r--third_party/python/cbor2/cbor2/compat.py49
-rw-r--r--third_party/python/cbor2/cbor2/decoder.py411
-rw-r--r--third_party/python/cbor2/cbor2/encoder.py362
-rw-r--r--third_party/python/cbor2/cbor2/types.py55
-rw-r--r--third_party/python/certifi/certifi-2022.12.7.dist-info/LICENSE21
-rw-r--r--third_party/python/certifi/certifi-2022.12.7.dist-info/METADATA83
-rw-r--r--third_party/python/certifi/certifi-2022.12.7.dist-info/RECORD10
-rw-r--r--third_party/python/certifi/certifi-2022.12.7.dist-info/WHEEL5
-rw-r--r--third_party/python/certifi/certifi-2022.12.7.dist-info/top_level.txt1
-rw-r--r--third_party/python/certifi/certifi/__init__.py4
-rw-r--r--third_party/python/certifi/certifi/__main__.py12
-rw-r--r--third_party/python/certifi/certifi/cacert.pem4527
-rw-r--r--third_party/python/certifi/certifi/core.py108
-rw-r--r--third_party/python/certifi/certifi/py.typed0
-rw-r--r--third_party/python/chardet/chardet-4.0.0.dist-info/LICENSE504
-rw-r--r--third_party/python/chardet/chardet-4.0.0.dist-info/METADATA101
-rw-r--r--third_party/python/chardet/chardet-4.0.0.dist-info/RECORD49
-rw-r--r--third_party/python/chardet/chardet-4.0.0.dist-info/WHEEL6
-rw-r--r--third_party/python/chardet/chardet-4.0.0.dist-info/entry_points.txt3
-rw-r--r--third_party/python/chardet/chardet-4.0.0.dist-info/top_level.txt1
-rw-r--r--third_party/python/chardet/chardet/__init__.py83
-rw-r--r--third_party/python/chardet/chardet/big5freq.py386
-rw-r--r--third_party/python/chardet/chardet/big5prober.py47
-rw-r--r--third_party/python/chardet/chardet/chardistribution.py233
-rw-r--r--third_party/python/chardet/chardet/charsetgroupprober.py107
-rw-r--r--third_party/python/chardet/chardet/charsetprober.py145
-rw-r--r--third_party/python/chardet/chardet/cli/__init__.py1
-rw-r--r--third_party/python/chardet/chardet/cli/chardetect.py84
-rw-r--r--third_party/python/chardet/chardet/codingstatemachine.py88
-rw-r--r--third_party/python/chardet/chardet/compat.py36
-rw-r--r--third_party/python/chardet/chardet/cp949prober.py49
-rw-r--r--third_party/python/chardet/chardet/enums.py76
-rw-r--r--third_party/python/chardet/chardet/escprober.py101
-rw-r--r--third_party/python/chardet/chardet/escsm.py246
-rw-r--r--third_party/python/chardet/chardet/eucjpprober.py92
-rw-r--r--third_party/python/chardet/chardet/euckrfreq.py195
-rw-r--r--third_party/python/chardet/chardet/euckrprober.py47
-rw-r--r--third_party/python/chardet/chardet/euctwfreq.py387
-rw-r--r--third_party/python/chardet/chardet/euctwprober.py46
-rw-r--r--third_party/python/chardet/chardet/gb2312freq.py283
-rw-r--r--third_party/python/chardet/chardet/gb2312prober.py46
-rw-r--r--third_party/python/chardet/chardet/hebrewprober.py292
-rw-r--r--third_party/python/chardet/chardet/jisfreq.py325
-rw-r--r--third_party/python/chardet/chardet/jpcntx.py233
-rw-r--r--third_party/python/chardet/chardet/langbulgarianmodel.py4650
-rw-r--r--third_party/python/chardet/chardet/langgreekmodel.py4398
-rw-r--r--third_party/python/chardet/chardet/langhebrewmodel.py4383
-rw-r--r--third_party/python/chardet/chardet/langhungarianmodel.py4650
-rw-r--r--third_party/python/chardet/chardet/langrussianmodel.py5718
-rw-r--r--third_party/python/chardet/chardet/langthaimodel.py4383
-rw-r--r--third_party/python/chardet/chardet/langturkishmodel.py4383
-rw-r--r--third_party/python/chardet/chardet/latin1prober.py145
-rw-r--r--third_party/python/chardet/chardet/mbcharsetprober.py91
-rw-r--r--third_party/python/chardet/chardet/mbcsgroupprober.py54
-rw-r--r--third_party/python/chardet/chardet/mbcssm.py572
-rw-r--r--third_party/python/chardet/chardet/metadata/__init__.py0
-rw-r--r--third_party/python/chardet/chardet/metadata/languages.py310
-rw-r--r--third_party/python/chardet/chardet/sbcharsetprober.py145
-rw-r--r--third_party/python/chardet/chardet/sbcsgroupprober.py83
-rw-r--r--third_party/python/chardet/chardet/sjisprober.py92
-rw-r--r--third_party/python/chardet/chardet/universaldetector.py286
-rw-r--r--third_party/python/chardet/chardet/utf8prober.py82
-rw-r--r--third_party/python/chardet/chardet/version.py9
-rw-r--r--third_party/python/click/click-7.1.2.dist-info/LICENSE.rst28
-rw-r--r--third_party/python/click/click-7.1.2.dist-info/METADATA102
-rw-r--r--third_party/python/click/click-7.1.2.dist-info/RECORD22
-rw-r--r--third_party/python/click/click-7.1.2.dist-info/WHEEL6
-rw-r--r--third_party/python/click/click-7.1.2.dist-info/top_level.txt1
-rw-r--r--third_party/python/click/click/__init__.py79
-rw-r--r--third_party/python/click/click/_bashcomplete.py375
-rw-r--r--third_party/python/click/click/_compat.py786
-rw-r--r--third_party/python/click/click/_termui_impl.py657
-rw-r--r--third_party/python/click/click/_textwrap.py37
-rw-r--r--third_party/python/click/click/_unicodefun.py131
-rw-r--r--third_party/python/click/click/_winconsole.py370
-rw-r--r--third_party/python/click/click/core.py2030
-rw-r--r--third_party/python/click/click/decorators.py333
-rw-r--r--third_party/python/click/click/exceptions.py253
-rw-r--r--third_party/python/click/click/formatting.py283
-rw-r--r--third_party/python/click/click/globals.py47
-rw-r--r--third_party/python/click/click/parser.py428
-rw-r--r--third_party/python/click/click/termui.py681
-rw-r--r--third_party/python/click/click/testing.py382
-rw-r--r--third_party/python/click/click/types.py762
-rw-r--r--third_party/python/click/click/utils.py455
-rw-r--r--third_party/python/colorama/colorama-0.4.5.dist-info/LICENSE.txt27
-rw-r--r--third_party/python/colorama/colorama-0.4.5.dist-info/METADATA411
-rw-r--r--third_party/python/colorama/colorama-0.4.5.dist-info/RECORD11
-rw-r--r--third_party/python/colorama/colorama-0.4.5.dist-info/WHEEL6
-rw-r--r--third_party/python/colorama/colorama-0.4.5.dist-info/top_level.txt1
-rw-r--r--third_party/python/colorama/colorama/__init__.py6
-rw-r--r--third_party/python/colorama/colorama/ansi.py102
-rw-r--r--third_party/python/colorama/colorama/ansitowin32.py266
-rw-r--r--third_party/python/colorama/colorama/initialise.py80
-rw-r--r--third_party/python/colorama/colorama/win32.py152
-rw-r--r--third_party/python/colorama/colorama/winterm.py169
-rw-r--r--third_party/python/compare_locales/compare_locales-9.0.1.dist-info/LICENSE.md373
-rw-r--r--third_party/python/compare_locales/compare_locales-9.0.1.dist-info/METADATA84
-rw-r--r--third_party/python/compare_locales/compare_locales-9.0.1.dist-info/RECORD45
-rw-r--r--third_party/python/compare_locales/compare_locales-9.0.1.dist-info/WHEEL6
-rw-r--r--third_party/python/compare_locales/compare_locales-9.0.1.dist-info/entry_points.txt3
-rw-r--r--third_party/python/compare_locales/compare_locales-9.0.1.dist-info/top_level.txt1
-rw-r--r--third_party/python/compare_locales/compare_locales/__init__.py1
-rw-r--r--third_party/python/compare_locales/compare_locales/checks/__init__.py27
-rw-r--r--third_party/python/compare_locales/compare_locales/checks/android.py256
-rw-r--r--third_party/python/compare_locales/compare_locales/checks/base.py122
-rw-r--r--third_party/python/compare_locales/compare_locales/checks/dtd.py238
-rw-r--r--third_party/python/compare_locales/compare_locales/checks/fluent.py351
-rw-r--r--third_party/python/compare_locales/compare_locales/checks/properties.py162
-rw-r--r--third_party/python/compare_locales/compare_locales/commands.py203
-rw-r--r--third_party/python/compare_locales/compare_locales/compare/__init__.py89
-rw-r--r--third_party/python/compare_locales/compare_locales/compare/content.py304
-rw-r--r--third_party/python/compare_locales/compare_locales/compare/observer.py215
-rw-r--r--third_party/python/compare_locales/compare_locales/compare/utils.py133
-rw-r--r--third_party/python/compare_locales/compare_locales/integration_tests/__init__.py5
-rw-r--r--third_party/python/compare_locales/compare_locales/integration_tests/test_plurals.py51
-rw-r--r--third_party/python/compare_locales/compare_locales/keyedtuple.py55
-rw-r--r--third_party/python/compare_locales/compare_locales/lint/__init__.py0
-rw-r--r--third_party/python/compare_locales/compare_locales/lint/cli.py93
-rw-r--r--third_party/python/compare_locales/compare_locales/lint/linter.py121
-rw-r--r--third_party/python/compare_locales/compare_locales/lint/util.py38
-rw-r--r--third_party/python/compare_locales/compare_locales/merge.py143
-rw-r--r--third_party/python/compare_locales/compare_locales/mozpath.py154
-rw-r--r--third_party/python/compare_locales/compare_locales/parser/__init__.py81
-rw-r--r--third_party/python/compare_locales/compare_locales/parser/android.py303
-rw-r--r--third_party/python/compare_locales/compare_locales/parser/base.py443
-rw-r--r--third_party/python/compare_locales/compare_locales/parser/defines.py104
-rw-r--r--third_party/python/compare_locales/compare_locales/parser/dtd.py115
-rw-r--r--third_party/python/compare_locales/compare_locales/parser/fluent.py218
-rw-r--r--third_party/python/compare_locales/compare_locales/parser/ini.py56
-rw-r--r--third_party/python/compare_locales/compare_locales/parser/po.py125
-rw-r--r--third_party/python/compare_locales/compare_locales/parser/properties.py113
-rw-r--r--third_party/python/compare_locales/compare_locales/paths/__init__.py53
-rw-r--r--third_party/python/compare_locales/compare_locales/paths/configparser.py138
-rw-r--r--third_party/python/compare_locales/compare_locales/paths/files.py224
-rw-r--r--third_party/python/compare_locales/compare_locales/paths/ini.py224
-rw-r--r--third_party/python/compare_locales/compare_locales/paths/matcher.py470
-rw-r--r--third_party/python/compare_locales/compare_locales/paths/project.py260
-rw-r--r--third_party/python/compare_locales/compare_locales/plurals.py221
-rw-r--r--third_party/python/compare_locales/compare_locales/serializer.py137
-rw-r--r--third_party/python/compare_locales/compare_locales/util.py11
-rw-r--r--third_party/python/cookies/cookies-2.2.1.dist-info/DESCRIPTION.rst90
-rw-r--r--third_party/python/cookies/cookies-2.2.1.dist-info/METADATA111
-rw-r--r--third_party/python/cookies/cookies-2.2.1.dist-info/RECORD8
-rw-r--r--third_party/python/cookies/cookies-2.2.1.dist-info/WHEEL6
-rw-r--r--third_party/python/cookies/cookies-2.2.1.dist-info/metadata.json1
-rw-r--r--third_party/python/cookies/cookies-2.2.1.dist-info/top_level.txt2
-rw-r--r--third_party/python/cookies/cookies.py1169
-rw-r--r--third_party/python/cookies/test_cookies.py2447
-rwxr-xr-xthird_party/python/cram/cram-0.7.data/scripts/cram9
-rw-r--r--third_party/python/cram/cram-0.7.dist-info/DESCRIPTION.rst227
-rw-r--r--third_party/python/cram/cram-0.7.dist-info/METADATA250
-rw-r--r--third_party/python/cram/cram-0.7.dist-info/RECORD16
-rw-r--r--third_party/python/cram/cram-0.7.dist-info/WHEEL6
-rw-r--r--third_party/python/cram/cram-0.7.dist-info/metadata.json1
-rw-r--r--third_party/python/cram/cram/__init__.py6
-rw-r--r--third_party/python/cram/cram/__main__.py10
-rw-r--r--third_party/python/cram/cram/_cli.py134
-rw-r--r--third_party/python/cram/cram/_diff.py158
-rw-r--r--third_party/python/cram/cram/_encoding.py106
-rw-r--r--third_party/python/cram/cram/_main.py211
-rw-r--r--third_party/python/cram/cram/_process.py54
-rw-r--r--third_party/python/cram/cram/_run.py77
-rw-r--r--third_party/python/cram/cram/_test.py230
-rw-r--r--third_party/python/cram/cram/_xunit.py173
-rw-r--r--third_party/python/diskcache/diskcache-4.1.0.dist-info/LICENSE12
-rw-r--r--third_party/python/diskcache/diskcache-4.1.0.dist-info/METADATA430
-rw-r--r--third_party/python/diskcache/diskcache-4.1.0.dist-info/RECORD12
-rw-r--r--third_party/python/diskcache/diskcache-4.1.0.dist-info/WHEEL6
-rw-r--r--third_party/python/diskcache/diskcache-4.1.0.dist-info/top_level.txt1
-rw-r--r--third_party/python/diskcache/diskcache/__init__.py51
-rw-r--r--third_party/python/diskcache/diskcache/cli.py1
-rw-r--r--third_party/python/diskcache/diskcache/core.py2481
-rw-r--r--third_party/python/diskcache/diskcache/djangocache.py433
-rw-r--r--third_party/python/diskcache/diskcache/fanout.py677
-rw-r--r--third_party/python/diskcache/diskcache/persistent.py1403
-rw-r--r--third_party/python/diskcache/diskcache/recipes.py437
-rw-r--r--third_party/python/distro/distro-1.4.0.dist-info/LICENSE202
-rw-r--r--third_party/python/distro/distro-1.4.0.dist-info/METADATA170
-rw-r--r--third_party/python/distro/distro-1.4.0.dist-info/RECORD7
-rw-r--r--third_party/python/distro/distro-1.4.0.dist-info/WHEEL6
-rw-r--r--third_party/python/distro/distro-1.4.0.dist-info/entry_points.txt3
-rw-r--r--third_party/python/distro/distro-1.4.0.dist-info/top_level.txt1
-rw-r--r--third_party/python/distro/distro.py1216
-rw-r--r--third_party/python/dlmanager/README.rst59
-rwxr-xr-xthird_party/python/dlmanager/check.py67
-rw-r--r--third_party/python/dlmanager/dlmanager/__init__.py18
-rw-r--r--third_party/python/dlmanager/dlmanager/fs.py116
-rw-r--r--third_party/python/dlmanager/dlmanager/manager.py323
-rw-r--r--third_party/python/dlmanager/dlmanager/persist_limit.py65
-rw-r--r--third_party/python/dlmanager/doc/Makefile216
-rw-r--r--third_party/python/dlmanager/doc/api.rst25
-rw-r--r--third_party/python/dlmanager/doc/conf.py289
-rw-r--r--third_party/python/dlmanager/doc/index.rst26
-rw-r--r--third_party/python/dlmanager/doc/make.bat263
-rw-r--r--third_party/python/dlmanager/examples/dl_progressbar.py41
-rw-r--r--third_party/python/dlmanager/examples/dl_tqdm.py45
-rw-r--r--third_party/python/dlmanager/requirements.txt2
-rw-r--r--third_party/python/dlmanager/setup.cfg2
-rw-r--r--third_party/python/dlmanager/setup.py60
-rw-r--r--third_party/python/dlmanager/test-requirements.txt7
-rw-r--r--third_party/python/dlmanager/tests/__init__.py0
-rw-r--r--third_party/python/dlmanager/tests/test_manager.py251
-rw-r--r--third_party/python/dlmanager/tests/test_persist_limit.py56
-rw-r--r--third_party/python/ecdsa/ecdsa-0.15.dist-info/LICENSE24
-rw-r--r--third_party/python/ecdsa/ecdsa-0.15.dist-info/METADATA625
-rw-r--r--third_party/python/ecdsa/ecdsa-0.15.dist-info/RECORD28
-rw-r--r--third_party/python/ecdsa/ecdsa-0.15.dist-info/WHEEL6
-rw-r--r--third_party/python/ecdsa/ecdsa-0.15.dist-info/top_level.txt1
-rw-r--r--third_party/python/ecdsa/ecdsa/__init__.py25
-rw-r--r--third_party/python/ecdsa/ecdsa/_compat.py39
-rw-r--r--third_party/python/ecdsa/ecdsa/_rwlock.py85
-rw-r--r--third_party/python/ecdsa/ecdsa/_version.py21
-rw-r--r--third_party/python/ecdsa/ecdsa/curves.py128
-rw-r--r--third_party/python/ecdsa/ecdsa/der.py384
-rw-r--r--third_party/python/ecdsa/ecdsa/ecdh.py306
-rw-r--r--third_party/python/ecdsa/ecdsa/ecdsa.py446
-rw-r--r--third_party/python/ecdsa/ecdsa/ellipticcurve.py780
-rw-r--r--third_party/python/ecdsa/ecdsa/keys.py1219
-rw-r--r--third_party/python/ecdsa/ecdsa/numbertheory.py600
-rw-r--r--third_party/python/ecdsa/ecdsa/rfc6979.py107
-rw-r--r--third_party/python/ecdsa/ecdsa/test_der.py384
-rw-r--r--third_party/python/ecdsa/ecdsa/test_ecdh.py350
-rw-r--r--third_party/python/ecdsa/ecdsa/test_ecdsa.py448
-rw-r--r--third_party/python/ecdsa/ecdsa/test_ellipticcurve.py188
-rw-r--r--third_party/python/ecdsa/ecdsa/test_jacobi.py365
-rw-r--r--third_party/python/ecdsa/ecdsa/test_keys.py373
-rw-r--r--third_party/python/ecdsa/ecdsa/test_malformed_sigs.py306
-rw-r--r--third_party/python/ecdsa/ecdsa/test_numbertheory.py275
-rw-r--r--third_party/python/ecdsa/ecdsa/test_pyecdsa.py1445
-rw-r--r--third_party/python/ecdsa/ecdsa/test_rw_lock.py175
-rw-r--r--third_party/python/ecdsa/ecdsa/util.py401
-rw-r--r--third_party/python/esprima/PKG-INFO143
-rw-r--r--third_party/python/esprima/README117
-rw-r--r--third_party/python/esprima/esprima.egg-info/PKG-INFO143
-rw-r--r--third_party/python/esprima/esprima.egg-info/SOURCES.txt29
-rw-r--r--third_party/python/esprima/esprima.egg-info/dependency_links.txt1
-rw-r--r--third_party/python/esprima/esprima.egg-info/entry_points.txt3
-rw-r--r--third_party/python/esprima/esprima.egg-info/pbr.json1
-rw-r--r--third_party/python/esprima/esprima.egg-info/top_level.txt1
-rw-r--r--third_party/python/esprima/esprima/__init__.py29
-rw-r--r--third_party/python/esprima/esprima/__main__.py105
-rw-r--r--third_party/python/esprima/esprima/character.py125
-rw-r--r--third_party/python/esprima/esprima/comment_handler.py176
-rw-r--r--third_party/python/esprima/esprima/compat.py72
-rw-r--r--third_party/python/esprima/esprima/error_handler.py74
-rw-r--r--third_party/python/esprima/esprima/esprima.py125
-rw-r--r--third_party/python/esprima/esprima/jsx_nodes.py100
-rw-r--r--third_party/python/esprima/esprima/jsx_parser.py584
-rw-r--r--third_party/python/esprima/esprima/jsx_syntax.py38
-rw-r--r--third_party/python/esprima/esprima/messages.py90
-rw-r--r--third_party/python/esprima/esprima/nodes.py620
-rw-r--r--third_party/python/esprima/esprima/objects.py46
-rw-r--r--third_party/python/esprima/esprima/parser.py3104
-rw-r--r--third_party/python/esprima/esprima/scanner.py1189
-rw-r--r--third_party/python/esprima/esprima/syntax.py100
-rw-r--r--third_party/python/esprima/esprima/token.py50
-rw-r--r--third_party/python/esprima/esprima/tokenizer.py193
-rw-r--r--third_party/python/esprima/esprima/utils.py40
-rw-r--r--third_party/python/esprima/esprima/visitor.py288
-rw-r--r--third_party/python/esprima/esprima/xhtml_entities.py281
-rw-r--r--third_party/python/esprima/setup.cfg4
-rw-r--r--third_party/python/esprima/setup.py55
-rw-r--r--third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/LICENSE13
-rw-r--r--third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/METADATA62
-rw-r--r--third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/RECORD20
-rw-r--r--third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/WHEEL6
-rw-r--r--third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/entry_points.txt3
-rw-r--r--third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/top_level.txt1
-rw-r--r--third_party/python/fluent.migrate/fluent/__init__.py1
-rw-r--r--third_party/python/fluent.migrate/fluent/migrate/__init__.py3
-rw-r--r--third_party/python/fluent.migrate/fluent/migrate/_context.py329
-rw-r--r--third_party/python/fluent.migrate/fluent/migrate/blame.py80
-rw-r--r--third_party/python/fluent.migrate/fluent/migrate/changesets.py56
-rw-r--r--third_party/python/fluent.migrate/fluent/migrate/context.py148
-rw-r--r--third_party/python/fluent.migrate/fluent/migrate/errors.py22
-rw-r--r--third_party/python/fluent.migrate/fluent/migrate/evaluator.py28
-rw-r--r--third_party/python/fluent.migrate/fluent/migrate/helpers.py147
-rw-r--r--third_party/python/fluent.migrate/fluent/migrate/merge.py55
-rw-r--r--third_party/python/fluent.migrate/fluent/migrate/tool.py181
-rw-r--r--third_party/python/fluent.migrate/fluent/migrate/transforms.py576
-rw-r--r--third_party/python/fluent.migrate/fluent/migrate/util.py110
-rw-r--r--third_party/python/fluent.migrate/fluent/migrate/validator.py335
-rw-r--r--third_party/python/fluent.syntax/fluent.syntax-0.19.0.dist-info/METADATA42
-rw-r--r--third_party/python/fluent.syntax/fluent.syntax-0.19.0.dist-info/RECORD12
-rw-r--r--third_party/python/fluent.syntax/fluent.syntax-0.19.0.dist-info/WHEEL6
-rw-r--r--third_party/python/fluent.syntax/fluent.syntax-0.19.0.dist-info/top_level.txt1
-rw-r--r--third_party/python/fluent.syntax/fluent/syntax/__init__.py34
-rw-r--r--third_party/python/fluent.syntax/fluent/syntax/ast.py376
-rw-r--r--third_party/python/fluent.syntax/fluent/syntax/errors.py70
-rw-r--r--third_party/python/fluent.syntax/fluent/syntax/parser.py701
-rw-r--r--third_party/python/fluent.syntax/fluent/syntax/py.typed0
-rw-r--r--third_party/python/fluent.syntax/fluent/syntax/serializer.py237
-rw-r--r--third_party/python/fluent.syntax/fluent/syntax/stream.py283
-rw-r--r--third_party/python/fluent.syntax/fluent/syntax/visitor.py65
-rw-r--r--third_party/python/giturlparse/giturlparse-0.10.0.dist-info/LICENSE191
-rw-r--r--third_party/python/giturlparse/giturlparse-0.10.0.dist-info/METADATA165
-rw-r--r--third_party/python/giturlparse/giturlparse-0.10.0.dist-info/RECORD18
-rw-r--r--third_party/python/giturlparse/giturlparse-0.10.0.dist-info/WHEEL6
-rw-r--r--third_party/python/giturlparse/giturlparse-0.10.0.dist-info/top_level.txt1
-rw-r--r--third_party/python/giturlparse/giturlparse/__init__.py14
-rw-r--r--third_party/python/giturlparse/giturlparse/parser.py69
-rw-r--r--third_party/python/giturlparse/giturlparse/platforms/__init__.py18
-rw-r--r--third_party/python/giturlparse/giturlparse/platforms/assembla.py14
-rw-r--r--third_party/python/giturlparse/giturlparse/platforms/base.py43
-rw-r--r--third_party/python/giturlparse/giturlparse/platforms/bitbucket.py20
-rw-r--r--third_party/python/giturlparse/giturlparse/platforms/friendcode.py14
-rw-r--r--third_party/python/giturlparse/giturlparse/platforms/github.py39
-rw-r--r--third_party/python/giturlparse/giturlparse/platforms/gitlab.py43
-rw-r--r--third_party/python/giturlparse/giturlparse/result.py131
-rw-r--r--third_party/python/glean_parser/glean_parser-7.2.1.dist-info/AUTHORS.md17
-rw-r--r--third_party/python/glean_parser/glean_parser-7.2.1.dist-info/LICENSE373
-rw-r--r--third_party/python/glean_parser/glean_parser-7.2.1.dist-info/METADATA726
-rw-r--r--third_party/python/glean_parser/glean_parser-7.2.1.dist-info/RECORD40
-rw-r--r--third_party/python/glean_parser/glean_parser-7.2.1.dist-info/WHEEL5
-rw-r--r--third_party/python/glean_parser/glean_parser-7.2.1.dist-info/entry_points.txt3
-rw-r--r--third_party/python/glean_parser/glean_parser-7.2.1.dist-info/top_level.txt1
-rw-r--r--third_party/python/glean_parser/glean_parser/__init__.py18
-rw-r--r--third_party/python/glean_parser/glean_parser/__main__.py349
-rw-r--r--third_party/python/glean_parser/glean_parser/coverage.py140
-rw-r--r--third_party/python/glean_parser/glean_parser/data_review.py79
-rw-r--r--third_party/python/glean_parser/glean_parser/javascript.py322
-rw-r--r--third_party/python/glean_parser/glean_parser/kotlin.py356
-rw-r--r--third_party/python/glean_parser/glean_parser/lint.py538
-rw-r--r--third_party/python/glean_parser/glean_parser/markdown.py273
-rw-r--r--third_party/python/glean_parser/glean_parser/metrics.py435
-rw-r--r--third_party/python/glean_parser/glean_parser/parser.py446
-rw-r--r--third_party/python/glean_parser/glean_parser/pings.py97
-rw-r--r--third_party/python/glean_parser/glean_parser/rust.py218
-rw-r--r--third_party/python/glean_parser/glean_parser/schemas/metrics.1-0-0.schema.yaml605
-rw-r--r--third_party/python/glean_parser/glean_parser/schemas/metrics.2-0-0.schema.yaml735
-rw-r--r--third_party/python/glean_parser/glean_parser/schemas/pings.1-0-0.schema.yaml157
-rw-r--r--third_party/python/glean_parser/glean_parser/schemas/pings.2-0-0.schema.yaml169
-rw-r--r--third_party/python/glean_parser/glean_parser/schemas/tags.1-0-0.schema.yaml51
-rw-r--r--third_party/python/glean_parser/glean_parser/swift.py260
-rw-r--r--third_party/python/glean_parser/glean_parser/tags.py49
-rw-r--r--third_party/python/glean_parser/glean_parser/templates/data_review.jinja282
-rw-r--r--third_party/python/glean_parser/glean_parser/templates/javascript.buildinfo.jinja211
-rw-r--r--third_party/python/glean_parser/glean_parser/templates/javascript.jinja273
-rw-r--r--third_party/python/glean_parser/glean_parser/templates/kotlin.buildinfo.jinja231
-rw-r--r--third_party/python/glean_parser/glean_parser/templates/kotlin.geckoview.jinja2124
-rw-r--r--third_party/python/glean_parser/glean_parser/templates/kotlin.jinja2133
-rw-r--r--third_party/python/glean_parser/glean_parser/templates/markdown.jinja298
-rw-r--r--third_party/python/glean_parser/glean_parser/templates/qmldir.jinja24
-rw-r--r--third_party/python/glean_parser/glean_parser/templates/rust.jinja2276
-rw-r--r--third_party/python/glean_parser/glean_parser/templates/swift.jinja2138
-rw-r--r--third_party/python/glean_parser/glean_parser/translate.py227
-rw-r--r--third_party/python/glean_parser/glean_parser/translation_options.py54
-rw-r--r--third_party/python/glean_parser/glean_parser/util.py560
-rw-r--r--third_party/python/glean_parser/glean_parser/validate_ping.py74
-rw-r--r--third_party/python/gyp/.gitignore1
-rw-r--r--third_party/python/gyp/AUTHORS17
-rw-r--r--third_party/python/gyp/DEPS23
-rw-r--r--third_party/python/gyp/LICENSE27
-rw-r--r--third_party/python/gyp/OWNERS1
-rw-r--r--third_party/python/gyp/PRESUBMIT.py125
-rw-r--r--third_party/python/gyp/README.md5
-rwxr-xr-xthird_party/python/gyp/buildbot/buildbot_run.py138
-rw-r--r--third_party/python/gyp/buildbot/commit_queue/OWNERS6
-rw-r--r--third_party/python/gyp/buildbot/commit_queue/README3
-rw-r--r--third_party/python/gyp/buildbot/commit_queue/cq_config.json15
-rwxr-xr-xthird_party/python/gyp/buildbot/travis-checkout.sh27
-rwxr-xr-xthird_party/python/gyp/buildbot/travis-test.sh12
-rw-r--r--third_party/python/gyp/codereview.settings6
-rw-r--r--third_party/python/gyp/data/win/large-pdb-shim.cc12
-rwxr-xr-xthird_party/python/gyp/gyp13
-rwxr-xr-xthird_party/python/gyp/gyp.bat5
-rwxr-xr-xthird_party/python/gyp/gyp_main.py16
-rwxr-xr-xthird_party/python/gyp/gyptest.py243
-rw-r--r--third_party/python/gyp/pylib/gyp/MSVSNew.py353
-rw-r--r--third_party/python/gyp/pylib/gyp/MSVSProject.py208
-rw-r--r--third_party/python/gyp/pylib/gyp/MSVSSettings.py1106
-rwxr-xr-xthird_party/python/gyp/pylib/gyp/MSVSSettings_test.py1486
-rw-r--r--third_party/python/gyp/pylib/gyp/MSVSToolFile.py58
-rw-r--r--third_party/python/gyp/pylib/gyp/MSVSUserFile.py147
-rw-r--r--third_party/python/gyp/pylib/gyp/MSVSUtil.py271
-rw-r--r--third_party/python/gyp/pylib/gyp/MSVSVersion.py537
-rwxr-xr-xthird_party/python/gyp/pylib/gyp/__init__.py555
-rw-r--r--third_party/python/gyp/pylib/gyp/common.py608
-rwxr-xr-xthird_party/python/gyp/pylib/gyp/common_test.py73
-rw-r--r--third_party/python/gyp/pylib/gyp/easy_xml.py170
-rwxr-xr-xthird_party/python/gyp/pylib/gyp/easy_xml_test.py108
-rwxr-xr-xthird_party/python/gyp/pylib/gyp/flock_tool.py54
-rw-r--r--third_party/python/gyp/pylib/gyp/generator/__init__.py0
-rw-r--r--third_party/python/gyp/pylib/gyp/generator/analyzer.py744
-rw-r--r--third_party/python/gyp/pylib/gyp/generator/cmake.py1256
-rw-r--r--third_party/python/gyp/pylib/gyp/generator/dump_dependency_json.py101
-rw-r--r--third_party/python/gyp/pylib/gyp/generator/eclipse.py425
-rw-r--r--third_party/python/gyp/pylib/gyp/generator/gypd.py94
-rw-r--r--third_party/python/gyp/pylib/gyp/generator/gypsh.py56
-rw-r--r--third_party/python/gyp/pylib/gyp/generator/make.py2260
-rw-r--r--third_party/python/gyp/pylib/gyp/generator/msvs.py3543
-rwxr-xr-xthird_party/python/gyp/pylib/gyp/generator/msvs_test.py40
-rw-r--r--third_party/python/gyp/pylib/gyp/generator/ninja.py2501
-rw-r--r--third_party/python/gyp/pylib/gyp/generator/ninja_test.py46
-rw-r--r--third_party/python/gyp/pylib/gyp/generator/xcode.py1302
-rw-r--r--third_party/python/gyp/pylib/gyp/generator/xcode_test.py23
-rw-r--r--third_party/python/gyp/pylib/gyp/input.py2908
-rwxr-xr-xthird_party/python/gyp/pylib/gyp/input_test.py90
-rwxr-xr-xthird_party/python/gyp/pylib/gyp/mac_tool.py721
-rw-r--r--third_party/python/gyp/pylib/gyp/msvs_emulation.py1118
-rw-r--r--third_party/python/gyp/pylib/gyp/ninja_syntax.py168
-rw-r--r--third_party/python/gyp/pylib/gyp/simple_copy.py57
-rwxr-xr-xthird_party/python/gyp/pylib/gyp/win_tool.py331
-rw-r--r--third_party/python/gyp/pylib/gyp/xcode_emulation.py1800
-rw-r--r--third_party/python/gyp/pylib/gyp/xcode_ninja.py289
-rw-r--r--third_party/python/gyp/pylib/gyp/xcodeproj_file.py2995
-rw-r--r--third_party/python/gyp/pylib/gyp/xml_fix.py68
-rwxr-xr-xthird_party/python/gyp/samples/samples83
-rw-r--r--third_party/python/gyp/samples/samples.bat5
-rwxr-xr-xthird_party/python/gyp/setup.py19
-rwxr-xr-xthird_party/python/gyp/test/actions-bare/gyptest-bare.py24
-rw-r--r--third_party/python/gyp/test/actions-bare/src/bare.gyp25
-rwxr-xr-xthird_party/python/gyp/test/actions-bare/src/bare.py11
-rw-r--r--third_party/python/gyp/test/actions-depfile/depfile.gyp42
-rw-r--r--third_party/python/gyp/test/actions-depfile/gyptest-all.py30
-rw-r--r--third_party/python/gyp/test/actions-depfile/input.txt1
-rw-r--r--third_party/python/gyp/test/actions-depfile/touch.py18
-rwxr-xr-xthird_party/python/gyp/test/actions-multiple-outputs-with-dependencies/gyptest-action.py45
-rw-r--r--third_party/python/gyp/test/actions-multiple-outputs-with-dependencies/src/action.gyp28
-rw-r--r--third_party/python/gyp/test/actions-multiple-outputs-with-dependencies/src/rcopy.py20
-rwxr-xr-xthird_party/python/gyp/test/actions-multiple-outputs/gyptest-multiple-outputs.py45
-rw-r--r--third_party/python/gyp/test/actions-multiple-outputs/src/multiple-outputs.gyp23
-rw-r--r--third_party/python/gyp/test/actions-multiple-outputs/src/touch.py16
-rwxr-xr-xthird_party/python/gyp/test/actions-multiple/gyptest-all.py72
-rw-r--r--third_party/python/gyp/test/actions-multiple/src/actions.gyp226
-rwxr-xr-xthird_party/python/gyp/test/actions-multiple/src/copyfile.py9
-rwxr-xr-xthird_party/python/gyp/test/actions-multiple/src/filter.py12
-rw-r--r--third_party/python/gyp/test/actions-multiple/src/foo.c11
-rw-r--r--third_party/python/gyp/test/actions-multiple/src/input.txt1
-rw-r--r--third_party/python/gyp/test/actions-multiple/src/main.c22
-rwxr-xr-xthird_party/python/gyp/test/actions-none/gyptest-none.py24
-rw-r--r--third_party/python/gyp/test/actions-none/src/fake_cross.py12
-rw-r--r--third_party/python/gyp/test/actions-none/src/foo.cc1
-rw-r--r--third_party/python/gyp/test/actions-none/src/none_with_source_files.gyp35
-rwxr-xr-xthird_party/python/gyp/test/actions-subdir/gyptest-action.py26
-rwxr-xr-xthird_party/python/gyp/test/actions-subdir/src/make-file.py11
-rw-r--r--third_party/python/gyp/test/actions-subdir/src/none.gyp31
-rwxr-xr-xthird_party/python/gyp/test/actions-subdir/src/subdir/make-subdir-file.py11
-rw-r--r--third_party/python/gyp/test/actions-subdir/src/subdir/subdir.gyp28
-rw-r--r--third_party/python/gyp/test/actions/generated-header/action.py11
-rw-r--r--third_party/python/gyp/test/actions/generated-header/main.cc7
-rw-r--r--third_party/python/gyp/test/actions/generated-header/test.gyp34
-rwxr-xr-xthird_party/python/gyp/test/actions/gyptest-all.py101
-rwxr-xr-xthird_party/python/gyp/test/actions/gyptest-default.py68
-rwxr-xr-xthird_party/python/gyp/test/actions/gyptest-errors.py24
-rw-r--r--third_party/python/gyp/test/actions/gyptest-generated-header.py38
-rw-r--r--third_party/python/gyp/test/actions/src/action_missing_name.gyp24
-rw-r--r--third_party/python/gyp/test/actions/src/actions.gyp114
-rwxr-xr-xthird_party/python/gyp/test/actions/src/confirm-dep-files.py21
-rwxr-xr-xthird_party/python/gyp/test/actions/src/subdir1/counter.py44
-rw-r--r--third_party/python/gyp/test/actions/src/subdir1/executable.gyp74
-rwxr-xr-xthird_party/python/gyp/test/actions/src/subdir1/make-prog1.py20
-rwxr-xr-xthird_party/python/gyp/test/actions/src/subdir1/make-prog2.py20
-rw-r--r--third_party/python/gyp/test/actions/src/subdir1/program.c12
-rwxr-xr-xthird_party/python/gyp/test/actions/src/subdir2/make-file.py11
-rw-r--r--third_party/python/gyp/test/actions/src/subdir2/none.gyp33
-rwxr-xr-xthird_party/python/gyp/test/actions/src/subdir3/generate_main.py21
-rw-r--r--third_party/python/gyp/test/actions/src/subdir3/null_input.gyp29
-rwxr-xr-xthird_party/python/gyp/test/additional-targets/gyptest-additional.py63
-rw-r--r--third_party/python/gyp/test/additional-targets/src/all.gyp13
-rw-r--r--third_party/python/gyp/test/additional-targets/src/dir1/actions.gyp56
-rwxr-xr-xthird_party/python/gyp/test/additional-targets/src/dir1/emit.py11
-rw-r--r--third_party/python/gyp/test/additional-targets/src/dir1/lib1.c6
-rw-r--r--third_party/python/gyp/test/analyzer/common.gypi6
-rw-r--r--third_party/python/gyp/test/analyzer/gyptest-analyzer.py427
-rw-r--r--third_party/python/gyp/test/analyzer/static_library_test.gyp34
-rw-r--r--third_party/python/gyp/test/analyzer/subdir/subdir.gyp36
-rw-r--r--third_party/python/gyp/test/analyzer/subdir/subdir2/subdir2.gyp15
-rw-r--r--third_party/python/gyp/test/analyzer/subdir2/subdir.gyp18
-rw-r--r--third_party/python/gyp/test/analyzer/subdir2/subdir.includes.gypi9
-rw-r--r--third_party/python/gyp/test/analyzer/test.gyp114
-rw-r--r--third_party/python/gyp/test/analyzer/test2.gyp25
-rw-r--r--third_party/python/gyp/test/analyzer/test2.includes.gypi13
-rw-r--r--third_party/python/gyp/test/analyzer/test2.includes.includes.gypi9
-rw-r--r--third_party/python/gyp/test/analyzer/test2.toplevel_includes.gypi15
-rw-r--r--third_party/python/gyp/test/analyzer/test3.gyp77
-rw-r--r--third_party/python/gyp/test/analyzer/test4.gyp80
-rw-r--r--third_party/python/gyp/test/analyzer/test5.gyp25
-rw-r--r--third_party/python/gyp/test/arflags/gyptest-arflags.py26
-rw-r--r--third_party/python/gyp/test/arflags/lib.cc0
-rw-r--r--third_party/python/gyp/test/arflags/test.gyp10
-rwxr-xr-xthird_party/python/gyp/test/assembly/gyptest-assembly.py31
-rw-r--r--third_party/python/gyp/test/assembly/gyptest-override.py24
-rw-r--r--third_party/python/gyp/test/assembly/src/as.bat4
-rw-r--r--third_party/python/gyp/test/assembly/src/assembly.gyp62
-rw-r--r--third_party/python/gyp/test/assembly/src/lib1.S15
-rw-r--r--third_party/python/gyp/test/assembly/src/lib1.c3
-rw-r--r--third_party/python/gyp/test/assembly/src/override.gyp34
-rw-r--r--third_party/python/gyp/test/assembly/src/override_asm.asm8
-rw-r--r--third_party/python/gyp/test/assembly/src/program.c12
-rwxr-xr-xthird_party/python/gyp/test/build-option/gyptest-build.py27
-rw-r--r--third_party/python/gyp/test/build-option/hello.c13
-rw-r--r--third_party/python/gyp/test/build-option/hello.gyp15
-rwxr-xr-xthird_party/python/gyp/test/builddir/gyptest-all.py85
-rwxr-xr-xthird_party/python/gyp/test/builddir/gyptest-default.py85
-rw-r--r--third_party/python/gyp/test/builddir/src/builddir.gypi18
-rw-r--r--third_party/python/gyp/test/builddir/src/func1.c6
-rw-r--r--third_party/python/gyp/test/builddir/src/func2.c6
-rw-r--r--third_party/python/gyp/test/builddir/src/func3.c6
-rw-r--r--third_party/python/gyp/test/builddir/src/func4.c6
-rw-r--r--third_party/python/gyp/test/builddir/src/func5.c6
-rw-r--r--third_party/python/gyp/test/builddir/src/prog1.c10
-rw-r--r--third_party/python/gyp/test/builddir/src/prog1.gyp30
-rw-r--r--third_party/python/gyp/test/builddir/src/subdir2/prog2.c10
-rw-r--r--third_party/python/gyp/test/builddir/src/subdir2/prog2.gyp19
-rw-r--r--third_party/python/gyp/test/builddir/src/subdir2/subdir3/prog3.c10
-rw-r--r--third_party/python/gyp/test/builddir/src/subdir2/subdir3/prog3.gyp19
-rw-r--r--third_party/python/gyp/test/builddir/src/subdir2/subdir3/subdir4/prog4.c10
-rw-r--r--third_party/python/gyp/test/builddir/src/subdir2/subdir3/subdir4/prog4.gyp19
-rw-r--r--third_party/python/gyp/test/builddir/src/subdir2/subdir3/subdir4/subdir5/prog5.c10
-rw-r--r--third_party/python/gyp/test/builddir/src/subdir2/subdir3/subdir4/subdir5/prog5.gyp19
-rw-r--r--third_party/python/gyp/test/cflags/cflags.c15
-rw-r--r--third_party/python/gyp/test/cflags/cflags.gyp23
-rwxr-xr-xthird_party/python/gyp/test/cflags/gyptest-cflags.py75
-rwxr-xr-xthird_party/python/gyp/test/compilable/gyptest-headers.py29
-rw-r--r--third_party/python/gyp/test/compilable/src/headers.gyp26
-rw-r--r--third_party/python/gyp/test/compilable/src/lib1.cpp7
-rw-r--r--third_party/python/gyp/test/compilable/src/lib1.hpp6
-rw-r--r--third_party/python/gyp/test/compilable/src/program.cpp9
-rw-r--r--third_party/python/gyp/test/compiler-override/compiler-exe.gyp16
-rw-r--r--third_party/python/gyp/test/compiler-override/compiler-global-settings.gyp.in34
-rw-r--r--third_party/python/gyp/test/compiler-override/compiler-host.gyp17
-rw-r--r--third_party/python/gyp/test/compiler-override/compiler-shared-lib.gyp16
-rw-r--r--third_party/python/gyp/test/compiler-override/cxxtest.cc7
-rw-r--r--third_party/python/gyp/test/compiler-override/gyptest-compiler-env-toolchain.py78
-rwxr-xr-xthird_party/python/gyp/test/compiler-override/gyptest-compiler-env.py110
-rwxr-xr-xthird_party/python/gyp/test/compiler-override/gyptest-compiler-global-settings.py82
-rwxr-xr-xthird_party/python/gyp/test/compiler-override/my_cc.py7
-rwxr-xr-xthird_party/python/gyp/test/compiler-override/my_cxx.py7
-rwxr-xr-xthird_party/python/gyp/test/compiler-override/my_ld.py7
-rwxr-xr-xthird_party/python/gyp/test/compiler-override/my_nm.py9
-rwxr-xr-xthird_party/python/gyp/test/compiler-override/my_readelf.py9
-rw-r--r--third_party/python/gyp/test/compiler-override/test.c7
-rw-r--r--third_party/python/gyp/test/conditions/elseif/elseif.gyp43
-rw-r--r--third_party/python/gyp/test/conditions/elseif/elseif_bad1.gyp20
-rw-r--r--third_party/python/gyp/test/conditions/elseif/elseif_bad2.gyp22
-rw-r--r--third_party/python/gyp/test/conditions/elseif/elseif_bad3.gyp23
-rw-r--r--third_party/python/gyp/test/conditions/elseif/elseif_conditions.gypi15
-rw-r--r--third_party/python/gyp/test/conditions/elseif/gyptest_elseif.py33
-rw-r--r--third_party/python/gyp/test/conditions/elseif/program.cc10
-rw-r--r--third_party/python/gyp/test/configurations/basics/configurations.c15
-rw-r--r--third_party/python/gyp/test/configurations/basics/configurations.gyp32
-rwxr-xr-xthird_party/python/gyp/test/configurations/basics/gyptest-configurations.py29
-rw-r--r--third_party/python/gyp/test/configurations/inheritance/configurations.c21
-rw-r--r--third_party/python/gyp/test/configurations/inheritance/configurations.gyp40
-rw-r--r--third_party/python/gyp/test/configurations/inheritance/duplicates.gyp27
-rw-r--r--third_party/python/gyp/test/configurations/inheritance/duplicates.gypd.golden12
-rwxr-xr-xthird_party/python/gyp/test/configurations/inheritance/gyptest-duplicates.py36
-rwxr-xr-xthird_party/python/gyp/test/configurations/inheritance/gyptest-inheritance.py33
-rw-r--r--third_party/python/gyp/test/configurations/invalid/actions.gyp18
-rw-r--r--third_party/python/gyp/test/configurations/invalid/all_dependent_settings.gyp18
-rw-r--r--third_party/python/gyp/test/configurations/invalid/configurations.gyp18
-rw-r--r--third_party/python/gyp/test/configurations/invalid/dependencies.gyp18
-rw-r--r--third_party/python/gyp/test/configurations/invalid/direct_dependent_settings.gyp18
-rwxr-xr-xthird_party/python/gyp/test/configurations/invalid/gyptest-configurations.py36
-rw-r--r--third_party/python/gyp/test/configurations/invalid/libraries.gyp18
-rw-r--r--third_party/python/gyp/test/configurations/invalid/link_settings.gyp18
-rw-r--r--third_party/python/gyp/test/configurations/invalid/sources.gyp18
-rw-r--r--third_party/python/gyp/test/configurations/invalid/standalone_static_library.gyp17
-rw-r--r--third_party/python/gyp/test/configurations/invalid/target_name.gyp18
-rw-r--r--third_party/python/gyp/test/configurations/invalid/type.gyp18
-rw-r--r--third_party/python/gyp/test/configurations/target_platform/configurations.gyp58
-rw-r--r--third_party/python/gyp/test/configurations/target_platform/front.c8
-rwxr-xr-xthird_party/python/gyp/test/configurations/target_platform/gyptest-target_platform.py40
-rw-r--r--third_party/python/gyp/test/configurations/target_platform/left.c3
-rw-r--r--third_party/python/gyp/test/configurations/target_platform/right.c3
-rw-r--r--third_party/python/gyp/test/configurations/x64/configurations.c12
-rw-r--r--third_party/python/gyp/test/configurations/x64/configurations.gyp38
-rwxr-xr-xthird_party/python/gyp/test/configurations/x64/gyptest-x86.py31
-rwxr-xr-xthird_party/python/gyp/test/copies/gyptest-all.py42
-rw-r--r--third_party/python/gyp/test/copies/gyptest-attribs.py41
-rwxr-xr-xthird_party/python/gyp/test/copies/gyptest-default.py42
-rwxr-xr-xthird_party/python/gyp/test/copies/gyptest-samedir.py28
-rwxr-xr-xthird_party/python/gyp/test/copies/gyptest-slash.py39
-rw-r--r--third_party/python/gyp/test/copies/gyptest-sourceless-shared-lib.py20
-rwxr-xr-xthird_party/python/gyp/test/copies/gyptest-updir.py32
-rw-r--r--third_party/python/gyp/test/copies/src/copies-attribs.gyp20
-rw-r--r--third_party/python/gyp/test/copies/src/copies-samedir.gyp37
-rw-r--r--third_party/python/gyp/test/copies/src/copies-slash.gyp36
-rw-r--r--third_party/python/gyp/test/copies/src/copies-sourceless-shared-lib.gyp27
-rw-r--r--third_party/python/gyp/test/copies/src/copies-updir.gyp21
-rw-r--r--third_party/python/gyp/test/copies/src/copies.gyp70
-rw-r--r--third_party/python/gyp/test/copies/src/directory/file31
-rw-r--r--third_party/python/gyp/test/copies/src/directory/file41
-rw-r--r--third_party/python/gyp/test/copies/src/directory/subdir/file51
-rwxr-xr-xthird_party/python/gyp/test/copies/src/executable-file.sh3
-rw-r--r--third_party/python/gyp/test/copies/src/file11
-rw-r--r--third_party/python/gyp/test/copies/src/file21
-rw-r--r--third_party/python/gyp/test/copies/src/foo.c13
-rw-r--r--third_party/python/gyp/test/copies/src/parentdir/subdir/file61
-rwxr-xr-xthird_party/python/gyp/test/custom-generator/gyptest-custom-generator.py18
-rw-r--r--third_party/python/gyp/test/custom-generator/mygenerator.py14
-rw-r--r--third_party/python/gyp/test/custom-generator/test.gyp15
-rw-r--r--third_party/python/gyp/test/cxxflags/cxxflags.cc15
-rw-r--r--third_party/python/gyp/test/cxxflags/cxxflags.gyp15
-rwxr-xr-xthird_party/python/gyp/test/cxxflags/gyptest-cxxflags.py45
-rw-r--r--third_party/python/gyp/test/defines-escaping/defines-escaping.c11
-rw-r--r--third_party/python/gyp/test/defines-escaping/defines-escaping.gyp19
-rwxr-xr-xthird_party/python/gyp/test/defines-escaping/gyptest-defines-escaping.py184
-rw-r--r--third_party/python/gyp/test/defines/defines-env.gyp22
-rw-r--r--third_party/python/gyp/test/defines/defines.c23
-rw-r--r--third_party/python/gyp/test/defines/defines.gyp38
-rwxr-xr-xthird_party/python/gyp/test/defines/gyptest-define-override.py43
-rwxr-xr-xthird_party/python/gyp/test/defines/gyptest-defines-env-regyp.py51
-rwxr-xr-xthird_party/python/gyp/test/defines/gyptest-defines-env.py85
-rwxr-xr-xthird_party/python/gyp/test/defines/gyptest-defines.py39
-rwxr-xr-xthird_party/python/gyp/test/dependencies/a.c9
-rw-r--r--third_party/python/gyp/test/dependencies/adso/all_dependent_settings_order.gyp45
-rwxr-xr-xthird_party/python/gyp/test/dependencies/adso/write_args.py11
-rwxr-xr-xthird_party/python/gyp/test/dependencies/b/b.c3
-rwxr-xr-xthird_party/python/gyp/test/dependencies/b/b.gyp22
-rwxr-xr-xthird_party/python/gyp/test/dependencies/b/b3.c9
-rw-r--r--third_party/python/gyp/test/dependencies/c/c.c4
-rw-r--r--third_party/python/gyp/test/dependencies/c/c.gyp22
-rw-r--r--third_party/python/gyp/test/dependencies/c/d.c3
-rw-r--r--third_party/python/gyp/test/dependencies/double_dependency.gyp23
-rw-r--r--third_party/python/gyp/test/dependencies/double_dependent.gyp12
-rw-r--r--third_party/python/gyp/test/dependencies/extra_targets.gyp18
-rw-r--r--third_party/python/gyp/test/dependencies/gyptest-all-dependent-settings-order.py19
-rw-r--r--third_party/python/gyp/test/dependencies/gyptest-double-dependency.py19
-rwxr-xr-xthird_party/python/gyp/test/dependencies/gyptest-extra-targets.py22
-rw-r--r--third_party/python/gyp/test/dependencies/gyptest-indirect-module-dependency.py22
-rwxr-xr-xthird_party/python/gyp/test/dependencies/gyptest-lib-only.py39
-rwxr-xr-xthird_party/python/gyp/test/dependencies/gyptest-none-traversal.py25
-rw-r--r--third_party/python/gyp/test/dependencies/gyptest-sharedlib-linksettings.py21
-rwxr-xr-xthird_party/python/gyp/test/dependencies/lib_only.gyp16
-rw-r--r--third_party/python/gyp/test/dependencies/main.c14
-rw-r--r--third_party/python/gyp/test/dependencies/module-dep/a.cc7
-rw-r--r--third_party/python/gyp/test/dependencies/module-dep/dll.cc9
-rw-r--r--third_party/python/gyp/test/dependencies/module-dep/exe.cc7
-rw-r--r--third_party/python/gyp/test/dependencies/module-dep/indirect-module-dependency.gyp37
-rwxr-xr-xthird_party/python/gyp/test/dependencies/none_traversal.gyp46
-rw-r--r--third_party/python/gyp/test/dependencies/sharedlib-linksettings/program.c25
-rw-r--r--third_party/python/gyp/test/dependencies/sharedlib-linksettings/sharedlib.c16
-rw-r--r--third_party/python/gyp/test/dependencies/sharedlib-linksettings/staticlib.c24
-rw-r--r--third_party/python/gyp/test/dependencies/sharedlib-linksettings/test.gyp37
-rwxr-xr-xthird_party/python/gyp/test/dependency-copy/gyptest-copy.py26
-rw-r--r--third_party/python/gyp/test/dependency-copy/src/copies.gyp25
-rw-r--r--third_party/python/gyp/test/dependency-copy/src/file1.c7
-rw-r--r--third_party/python/gyp/test/dependency-copy/src/file2.c7
-rw-r--r--third_party/python/gyp/test/dependent-settings/nested-dependent-settings/all-dependent-settings.gyp19
-rw-r--r--third_party/python/gyp/test/dependent-settings/nested-dependent-settings/direct-dependent-settings.gyp19
-rw-r--r--third_party/python/gyp/test/dependent-settings/nested-dependent-settings/gyptest-nested-dependent-settings.py18
-rw-r--r--third_party/python/gyp/test/determinism/determinism.gyp59
-rw-r--r--third_party/python/gyp/test/determinism/empty-targets.gyp32
-rw-r--r--third_party/python/gyp/test/determinism/gyptest-determinism.py30
-rw-r--r--third_party/python/gyp/test/determinism/gyptest-empty-target-names.py30
-rw-r--r--third_party/python/gyp/test/determinism/gyptest-needed-variables.py30
-rw-r--r--third_party/python/gyp/test/determinism/gyptest-solibs.py37
-rw-r--r--third_party/python/gyp/test/determinism/main.cc5
-rw-r--r--third_party/python/gyp/test/determinism/needed-variables.gyp33
-rw-r--r--third_party/python/gyp/test/determinism/rule.py8
-rw-r--r--third_party/python/gyp/test/determinism/solib.cc8
-rw-r--r--third_party/python/gyp/test/determinism/solibs.gyp32
-rw-r--r--third_party/python/gyp/test/empty-target/empty-target.gyp12
-rw-r--r--third_party/python/gyp/test/empty-target/gyptest-empty-target.py18
-rw-r--r--third_party/python/gyp/test/errors/dependency_cycle.gyp23
-rw-r--r--third_party/python/gyp/test/errors/duplicate_basenames.gyp13
-rw-r--r--third_party/python/gyp/test/errors/duplicate_node.gyp12
-rw-r--r--third_party/python/gyp/test/errors/duplicate_rule.gyp22
-rw-r--r--third_party/python/gyp/test/errors/duplicate_targets.gyp14
-rw-r--r--third_party/python/gyp/test/errors/error_command.gyp12
-rw-r--r--third_party/python/gyp/test/errors/file_cycle0.gyp17
-rw-r--r--third_party/python/gyp/test/errors/file_cycle1.gyp13
-rwxr-xr-xthird_party/python/gyp/test/errors/gyptest-errors.py80
-rw-r--r--third_party/python/gyp/test/errors/missing_command.gyp12
-rw-r--r--third_party/python/gyp/test/errors/missing_dep.gyp15
-rw-r--r--third_party/python/gyp/test/errors/missing_targets.gyp8
-rw-r--r--third_party/python/gyp/test/escaping/colon/test.gyp21
-rw-r--r--third_party/python/gyp/test/escaping/gyptest-colon.py51
-rw-r--r--third_party/python/gyp/test/exclusion/exclusion.gyp23
-rwxr-xr-xthird_party/python/gyp/test/exclusion/gyptest-exclusion.py22
-rw-r--r--third_party/python/gyp/test/exclusion/hello.c15
-rwxr-xr-xthird_party/python/gyp/test/external-cross-compile/gyptest-cross.py31
-rw-r--r--third_party/python/gyp/test/external-cross-compile/src/bogus1.cc1
-rw-r--r--third_party/python/gyp/test/external-cross-compile/src/bogus2.c1
-rw-r--r--third_party/python/gyp/test/external-cross-compile/src/cross.gyp83
-rw-r--r--third_party/python/gyp/test/external-cross-compile/src/cross_compile.gypi23
-rw-r--r--third_party/python/gyp/test/external-cross-compile/src/fake_cross.py18
-rw-r--r--third_party/python/gyp/test/external-cross-compile/src/program.cc16
-rw-r--r--third_party/python/gyp/test/external-cross-compile/src/test1.cc1
-rw-r--r--third_party/python/gyp/test/external-cross-compile/src/test2.c1
-rw-r--r--third_party/python/gyp/test/external-cross-compile/src/test3.cc1
-rw-r--r--third_party/python/gyp/test/external-cross-compile/src/test4.c1
-rw-r--r--third_party/python/gyp/test/external-cross-compile/src/tochar.py13
-rw-r--r--third_party/python/gyp/test/generator-output/actions/actions.gyp16
-rw-r--r--third_party/python/gyp/test/generator-output/actions/build/README.txt4
-rw-r--r--third_party/python/gyp/test/generator-output/actions/subdir1/actions-out/README.txt4
-rw-r--r--third_party/python/gyp/test/generator-output/actions/subdir1/build/README.txt4
-rw-r--r--third_party/python/gyp/test/generator-output/actions/subdir1/executable.gyp44
-rwxr-xr-xthird_party/python/gyp/test/generator-output/actions/subdir1/make-prog1.py20
-rwxr-xr-xthird_party/python/gyp/test/generator-output/actions/subdir1/make-prog2.py20
-rw-r--r--third_party/python/gyp/test/generator-output/actions/subdir1/program.c12
-rw-r--r--third_party/python/gyp/test/generator-output/actions/subdir2/actions-out/README.txt4
-rw-r--r--third_party/python/gyp/test/generator-output/actions/subdir2/build/README.txt4
-rwxr-xr-xthird_party/python/gyp/test/generator-output/actions/subdir2/make-file.py11
-rw-r--r--third_party/python/gyp/test/generator-output/actions/subdir2/none.gyp31
-rw-r--r--third_party/python/gyp/test/generator-output/copies/build/README.txt4
-rw-r--r--third_party/python/gyp/test/generator-output/copies/copies-out/README.txt4
-rw-r--r--third_party/python/gyp/test/generator-output/copies/copies.gyp50
-rw-r--r--third_party/python/gyp/test/generator-output/copies/file11
-rw-r--r--third_party/python/gyp/test/generator-output/copies/file21
-rw-r--r--third_party/python/gyp/test/generator-output/copies/subdir/build/README.txt4
-rw-r--r--third_party/python/gyp/test/generator-output/copies/subdir/copies-out/README.txt4
-rw-r--r--third_party/python/gyp/test/generator-output/copies/subdir/file31
-rw-r--r--third_party/python/gyp/test/generator-output/copies/subdir/file41
-rw-r--r--third_party/python/gyp/test/generator-output/copies/subdir/subdir.gyp32
-rwxr-xr-xthird_party/python/gyp/test/generator-output/gyptest-actions.py57
-rwxr-xr-xthird_party/python/gyp/test/generator-output/gyptest-copies.py59
-rwxr-xr-xthird_party/python/gyp/test/generator-output/gyptest-depth.py58
-rw-r--r--third_party/python/gyp/test/generator-output/gyptest-mac-bundle.py33
-rwxr-xr-xthird_party/python/gyp/test/generator-output/gyptest-relocate.py59
-rwxr-xr-xthird_party/python/gyp/test/generator-output/gyptest-rules.py58
-rwxr-xr-xthird_party/python/gyp/test/generator-output/gyptest-subdir2-deep.py36
-rwxr-xr-xthird_party/python/gyp/test/generator-output/gyptest-symlink.py44
-rwxr-xr-xthird_party/python/gyp/test/generator-output/gyptest-top-all.py53
-rw-r--r--third_party/python/gyp/test/generator-output/mac-bundle/Info.plist32
-rw-r--r--third_party/python/gyp/test/generator-output/mac-bundle/app.order1
-rw-r--r--third_party/python/gyp/test/generator-output/mac-bundle/header.h1
-rw-r--r--third_party/python/gyp/test/generator-output/mac-bundle/main.c1
-rw-r--r--third_party/python/gyp/test/generator-output/mac-bundle/resource.sb1
-rw-r--r--third_party/python/gyp/test/generator-output/mac-bundle/test.gyp25
-rw-r--r--third_party/python/gyp/test/generator-output/rules/build/README.txt4
-rwxr-xr-xthird_party/python/gyp/test/generator-output/rules/copy-file.py12
-rw-r--r--third_party/python/gyp/test/generator-output/rules/rules.gyp16
-rw-r--r--third_party/python/gyp/test/generator-output/rules/subdir1/build/README.txt4
-rw-r--r--third_party/python/gyp/test/generator-output/rules/subdir1/define3.in01
-rw-r--r--third_party/python/gyp/test/generator-output/rules/subdir1/define4.in01
-rw-r--r--third_party/python/gyp/test/generator-output/rules/subdir1/executable.gyp59
-rw-r--r--third_party/python/gyp/test/generator-output/rules/subdir1/function1.in16
-rw-r--r--third_party/python/gyp/test/generator-output/rules/subdir1/function2.in16
-rw-r--r--third_party/python/gyp/test/generator-output/rules/subdir1/program.c18
-rw-r--r--third_party/python/gyp/test/generator-output/rules/subdir2/build/README.txt4
-rw-r--r--third_party/python/gyp/test/generator-output/rules/subdir2/file1.in01
-rw-r--r--third_party/python/gyp/test/generator-output/rules/subdir2/file2.in01
-rw-r--r--third_party/python/gyp/test/generator-output/rules/subdir2/file3.in11
-rw-r--r--third_party/python/gyp/test/generator-output/rules/subdir2/file4.in11
-rw-r--r--third_party/python/gyp/test/generator-output/rules/subdir2/none.gyp49
-rw-r--r--third_party/python/gyp/test/generator-output/rules/subdir2/rules-out/README.txt4
-rw-r--r--third_party/python/gyp/test/generator-output/src/build/README.txt4
-rw-r--r--third_party/python/gyp/test/generator-output/src/inc.h1
-rw-r--r--third_party/python/gyp/test/generator-output/src/inc1/include1.h1
-rw-r--r--third_party/python/gyp/test/generator-output/src/prog1.c18
-rw-r--r--third_party/python/gyp/test/generator-output/src/prog1.gyp28
-rw-r--r--third_party/python/gyp/test/generator-output/src/subdir2/build/README.txt4
-rw-r--r--third_party/python/gyp/test/generator-output/src/subdir2/deeper/build/README.txt4
-rw-r--r--third_party/python/gyp/test/generator-output/src/subdir2/deeper/deeper.c7
-rw-r--r--third_party/python/gyp/test/generator-output/src/subdir2/deeper/deeper.gyp18
-rw-r--r--third_party/python/gyp/test/generator-output/src/subdir2/deeper/deeper.h1
-rw-r--r--third_party/python/gyp/test/generator-output/src/subdir2/inc2/include2.h1
-rw-r--r--third_party/python/gyp/test/generator-output/src/subdir2/prog2.c18
-rw-r--r--third_party/python/gyp/test/generator-output/src/subdir2/prog2.gyp28
-rw-r--r--third_party/python/gyp/test/generator-output/src/subdir3/build/README.txt4
-rw-r--r--third_party/python/gyp/test/generator-output/src/subdir3/inc3/include3.h1
-rw-r--r--third_party/python/gyp/test/generator-output/src/subdir3/prog3.c18
-rw-r--r--third_party/python/gyp/test/generator-output/src/subdir3/prog3.gyp25
-rw-r--r--third_party/python/gyp/test/generator-output/src/symroot.gypi16
-rw-r--r--third_party/python/gyp/test/gyp-defines/defines.gyp26
-rw-r--r--third_party/python/gyp/test/gyp-defines/echo.py11
-rw-r--r--third_party/python/gyp/test/gyp-defines/gyptest-multiple-values.py36
-rw-r--r--third_party/python/gyp/test/gyp-defines/gyptest-regyp.py40
-rwxr-xr-xthird_party/python/gyp/test/hard_dependency/gyptest-exported-hard-dependency.py37
-rwxr-xr-xthird_party/python/gyp/test/hard_dependency/gyptest-no-exported-hard-dependency.py36
-rw-r--r--third_party/python/gyp/test/hard_dependency/src/a.c9
-rw-r--r--third_party/python/gyp/test/hard_dependency/src/a.h12
-rw-r--r--third_party/python/gyp/test/hard_dependency/src/b.c9
-rw-r--r--third_party/python/gyp/test/hard_dependency/src/b.h12
-rw-r--r--third_party/python/gyp/test/hard_dependency/src/c.c10
-rw-r--r--third_party/python/gyp/test/hard_dependency/src/c.h10
-rw-r--r--third_party/python/gyp/test/hard_dependency/src/d.c9
-rwxr-xr-xthird_party/python/gyp/test/hard_dependency/src/emit.py11
-rw-r--r--third_party/python/gyp/test/hard_dependency/src/hard_dependency.gyp78
-rwxr-xr-xthird_party/python/gyp/test/hello/gyptest-all.py24
-rwxr-xr-xthird_party/python/gyp/test/hello/gyptest-default.py24
-rwxr-xr-xthird_party/python/gyp/test/hello/gyptest-disable-regyp.py32
-rw-r--r--third_party/python/gyp/test/hello/gyptest-regyp-output.py36
-rwxr-xr-xthird_party/python/gyp/test/hello/gyptest-regyp.py32
-rwxr-xr-xthird_party/python/gyp/test/hello/gyptest-target.py24
-rw-r--r--third_party/python/gyp/test/hello/hello.c11
-rw-r--r--third_party/python/gyp/test/hello/hello.gyp15
-rw-r--r--third_party/python/gyp/test/hello/hello2.c11
-rw-r--r--third_party/python/gyp/test/hello/hello2.gyp15
-rwxr-xr-xthird_party/python/gyp/test/home_dot_gyp/gyptest-home-includes-config-arg.py31
-rwxr-xr-xthird_party/python/gyp/test/home_dot_gyp/gyptest-home-includes-config-env.py33
-rwxr-xr-xthird_party/python/gyp/test/home_dot_gyp/gyptest-home-includes-regyp.py44
-rwxr-xr-xthird_party/python/gyp/test/home_dot_gyp/gyptest-home-includes.py30
-rw-r--r--third_party/python/gyp/test/home_dot_gyp/home/.gyp/include.gypi5
-rw-r--r--third_party/python/gyp/test/home_dot_gyp/home2/.gyp/include.gypi5
-rw-r--r--third_party/python/gyp/test/home_dot_gyp/home2/.gyp_new/include.gypi5
-rw-r--r--third_party/python/gyp/test/home_dot_gyp/src/all.gyp22
-rw-r--r--third_party/python/gyp/test/home_dot_gyp/src/printfoo.c7
-rwxr-xr-xthird_party/python/gyp/test/include_dirs/gyptest-all.py43
-rwxr-xr-xthird_party/python/gyp/test/include_dirs/gyptest-default.py43
-rw-r--r--third_party/python/gyp/test/include_dirs/src/inc.h1
-rw-r--r--third_party/python/gyp/test/include_dirs/src/inc1/include1.h1
-rw-r--r--third_party/python/gyp/test/include_dirs/src/includes.c19
-rw-r--r--third_party/python/gyp/test/include_dirs/src/includes.gyp27
-rw-r--r--third_party/python/gyp/test/include_dirs/src/shadow1/shadow.h1
-rw-r--r--third_party/python/gyp/test/include_dirs/src/shadow2/shadow.h1
-rw-r--r--third_party/python/gyp/test/include_dirs/src/subdir/inc.h1
-rw-r--r--third_party/python/gyp/test/include_dirs/src/subdir/inc2/include2.h1
-rw-r--r--third_party/python/gyp/test/include_dirs/src/subdir/subdir_includes.c14
-rw-r--r--third_party/python/gyp/test/include_dirs/src/subdir/subdir_includes.gyp20
-rwxr-xr-xthird_party/python/gyp/test/intermediate_dir/gyptest-intermediate-dir.py44
-rwxr-xr-xthird_party/python/gyp/test/intermediate_dir/src/script.py22
-rw-r--r--third_party/python/gyp/test/intermediate_dir/src/shared_infile.txt1
-rw-r--r--third_party/python/gyp/test/intermediate_dir/src/test.gyp42
-rw-r--r--third_party/python/gyp/test/intermediate_dir/src/test2.gyp42
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/TestApp/English.lproj/InfoPlist-error.strings3
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/TestApp/English.lproj/InfoPlist.strings3
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/TestApp/English.lproj/LanguageMap.plist8
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/TestApp/English.lproj/MainMenu.xib17
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/TestApp/English.lproj/Main_iPhone.storyboard27
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/TestApp/Images.xcassets/AppIcon.appiconset/Contents.json58
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/TestApp/Images.xcassets/image.imageset/Contents.json23
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain.pngbin0 -> 3263 bytes
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain@2x.pngbin0 -> 3847 bytes
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain@3x.pngbin0 -> 4394 bytes
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/TestApp/TestApp-Info.plist28
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/TestApp/check_no_signature.py13
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/TestApp/main.m13
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/TestApp/only-compile-in-32-bits.m7
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/TestApp/only-compile-in-64-bits.m7
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/test-archs.gyp109
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/test-assets-catalog.gyp45
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/test-crosscompile.gyp47
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/test-device.gyp109
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/test.gyp75
-rw-r--r--third_party/python/gyp/test/ios/app-bundle/tool_main.cc7
-rw-r--r--third_party/python/gyp/test/ios/copies-with-xcode-envvars/Info.plist24
-rw-r--r--third_party/python/gyp/test/ios/copies-with-xcode-envvars/copies-with-xcode-envvars.gyp97
-rw-r--r--third_party/python/gyp/test/ios/copies-with-xcode-envvars/empty.c1
-rw-r--r--third_party/python/gyp/test/ios/copies-with-xcode-envvars/file01
-rw-r--r--third_party/python/gyp/test/ios/copies-with-xcode-envvars/file11
-rw-r--r--third_party/python/gyp/test/ios/copies-with-xcode-envvars/file101
-rw-r--r--third_party/python/gyp/test/ios/copies-with-xcode-envvars/file111
-rw-r--r--third_party/python/gyp/test/ios/copies-with-xcode-envvars/file21
-rw-r--r--third_party/python/gyp/test/ios/copies-with-xcode-envvars/file31
-rw-r--r--third_party/python/gyp/test/ios/copies-with-xcode-envvars/file41
-rw-r--r--third_party/python/gyp/test/ios/copies-with-xcode-envvars/file51
-rw-r--r--third_party/python/gyp/test/ios/copies-with-xcode-envvars/file61
-rw-r--r--third_party/python/gyp/test/ios/copies-with-xcode-envvars/file71
-rw-r--r--third_party/python/gyp/test/ios/copies-with-xcode-envvars/file81
-rw-r--r--third_party/python/gyp/test/ios/copies-with-xcode-envvars/file91
-rw-r--r--third_party/python/gyp/test/ios/deployment-target/check-version-min.c33
-rw-r--r--third_party/python/gyp/test/ios/deployment-target/deployment-target.gyp34
-rw-r--r--third_party/python/gyp/test/ios/extension/ActionExtension/ActionViewController.h9
-rw-r--r--third_party/python/gyp/test/ios/extension/ActionExtension/ActionViewController.m31
-rw-r--r--third_party/python/gyp/test/ios/extension/ActionExtension/Info.plist42
-rw-r--r--third_party/python/gyp/test/ios/extension/ActionExtension/MainInterface.storyboard63
-rw-r--r--third_party/python/gyp/test/ios/extension/ExtensionContainer/AppDelegate.h12
-rw-r--r--third_party/python/gyp/test/ios/extension/ExtensionContainer/AppDelegate.m19
-rw-r--r--third_party/python/gyp/test/ios/extension/ExtensionContainer/Base.lproj/Main.storyboard25
-rw-r--r--third_party/python/gyp/test/ios/extension/ExtensionContainer/Images.xcassets/AppIcon.appiconset/Contents.json53
-rw-r--r--third_party/python/gyp/test/ios/extension/ExtensionContainer/Images.xcassets/LaunchImage.launchimage/Contents.json51
-rw-r--r--third_party/python/gyp/test/ios/extension/ExtensionContainer/Info.plist32
-rw-r--r--third_party/python/gyp/test/ios/extension/ExtensionContainer/ViewController.h11
-rw-r--r--third_party/python/gyp/test/ios/extension/ExtensionContainer/ViewController.m24
-rw-r--r--third_party/python/gyp/test/ios/extension/ExtensionContainer/main.m13
-rw-r--r--third_party/python/gyp/test/ios/extension/extension.gyp91
-rw-r--r--third_party/python/gyp/test/ios/framework/framework.gyp43
-rw-r--r--third_party/python/gyp/test/ios/framework/iOSFramework/Info.plist26
-rw-r--r--third_party/python/gyp/test/ios/framework/iOSFramework/Thing.h10
-rw-r--r--third_party/python/gyp/test/ios/framework/iOSFramework/Thing.m22
-rw-r--r--third_party/python/gyp/test/ios/framework/iOSFramework/iOSFramework.h9
-rwxr-xr-xthird_party/python/gyp/test/ios/gyptest-app-ios-assets-catalog.py57
-rwxr-xr-xthird_party/python/gyp/test/ios/gyptest-app-ios.py76
-rw-r--r--third_party/python/gyp/test/ios/gyptest-archs.py62
-rw-r--r--third_party/python/gyp/test/ios/gyptest-copies-with-xcode-envvars.py65
-rw-r--r--third_party/python/gyp/test/ios/gyptest-crosscompile.py34
-rw-r--r--third_party/python/gyp/test/ios/gyptest-deployment-target.py23
-rwxr-xr-xthird_party/python/gyp/test/ios/gyptest-extension.py51
-rwxr-xr-xthird_party/python/gyp/test/ios/gyptest-framework.py37
-rw-r--r--third_party/python/gyp/test/ios/gyptest-per-config-settings.py190
-rwxr-xr-xthird_party/python/gyp/test/ios/gyptest-watch.py44
-rw-r--r--third_party/python/gyp/test/ios/gyptest-xcode-ninja.py25
-rw-r--r--third_party/python/gyp/test/ios/watch/WatchApp/Images.xcassets/AppIcon.appiconset/Contents.json62
-rw-r--r--third_party/python/gyp/test/ios/watch/WatchApp/Images.xcassets/LaunchImage.launchimage/Contents.json24
-rw-r--r--third_party/python/gyp/test/ios/watch/WatchApp/Info.plist35
-rw-r--r--third_party/python/gyp/test/ios/watch/WatchApp/Interface.storyboard15
-rw-r--r--third_party/python/gyp/test/ios/watch/WatchContainer/AppDelegate.h12
-rw-r--r--third_party/python/gyp/test/ios/watch/WatchContainer/AppDelegate.m19
-rw-r--r--third_party/python/gyp/test/ios/watch/WatchContainer/Base.lproj/Main.storyboard25
-rw-r--r--third_party/python/gyp/test/ios/watch/WatchContainer/Images.xcassets/AppIcon.appiconset/Contents.json53
-rw-r--r--third_party/python/gyp/test/ios/watch/WatchContainer/Images.xcassets/LaunchImage.launchimage/Contents.json51
-rw-r--r--third_party/python/gyp/test/ios/watch/WatchContainer/Info.plist32
-rw-r--r--third_party/python/gyp/test/ios/watch/WatchContainer/ViewController.h11
-rw-r--r--third_party/python/gyp/test/ios/watch/WatchContainer/ViewController.m24
-rw-r--r--third_party/python/gyp/test/ios/watch/WatchContainer/main.m13
-rw-r--r--third_party/python/gyp/test/ios/watch/WatchKitExtension/Images.xcassets/MyImage.imageset/Contents.json20
-rw-r--r--third_party/python/gyp/test/ios/watch/WatchKitExtension/Info.plist38
-rw-r--r--third_party/python/gyp/test/ios/watch/WatchKitExtension/InterfaceController.h10
-rw-r--r--third_party/python/gyp/test/ios/watch/WatchKitExtension/InterfaceController.m25
-rw-r--r--third_party/python/gyp/test/ios/watch/WatchKitExtension/MainInterface.storyboard63
-rw-r--r--third_party/python/gyp/test/ios/watch/watch.gyp105
-rw-r--r--third_party/python/gyp/test/ios/xctests/App/AppDelegate.h11
-rw-r--r--third_party/python/gyp/test/ios/xctests/App/AppDelegate.m18
-rw-r--r--third_party/python/gyp/test/ios/xctests/App/Base.lproj/LaunchScreen.xib41
-rw-r--r--third_party/python/gyp/test/ios/xctests/App/Base.lproj/Main.storyboard25
-rw-r--r--third_party/python/gyp/test/ios/xctests/App/Images.xcassets/AppIcon.appiconset/Contents.json68
-rw-r--r--third_party/python/gyp/test/ios/xctests/App/Info.plist47
-rw-r--r--third_party/python/gyp/test/ios/xctests/App/ViewController.h9
-rw-r--r--third_party/python/gyp/test/ios/xctests/App/ViewController.m21
-rw-r--r--third_party/python/gyp/test/ios/xctests/App/main.m13
-rw-r--r--third_party/python/gyp/test/ios/xctests/AppTests/AppTests.m31
-rw-r--r--third_party/python/gyp/test/ios/xctests/AppTests/Info.plist24
-rw-r--r--third_party/python/gyp/test/ios/xctests/gyptest-xctests.py49
-rw-r--r--third_party/python/gyp/test/ios/xctests/xctests.gyp74
-rw-r--r--third_party/python/gyp/test/lib/README.txt17
-rw-r--r--third_party/python/gyp/test/lib/TestCmd.py1597
-rw-r--r--third_party/python/gyp/test/lib/TestCommon.py591
-rw-r--r--third_party/python/gyp/test/lib/TestGyp.py1260
-rw-r--r--third_party/python/gyp/test/lib/TestMac.py76
-rw-r--r--third_party/python/gyp/test/lib/TestWin.py101
-rwxr-xr-xthird_party/python/gyp/test/library/gyptest-shared-obj-install-path.py39
-rwxr-xr-xthird_party/python/gyp/test/library/gyptest-shared.py84
-rwxr-xr-xthird_party/python/gyp/test/library/gyptest-static.py84
-rw-r--r--third_party/python/gyp/test/library/src/lib1.c10
-rw-r--r--third_party/python/gyp/test/library/src/lib1_moveable.c10
-rw-r--r--third_party/python/gyp/test/library/src/lib2.c10
-rw-r--r--third_party/python/gyp/test/library/src/lib2_moveable.c10
-rw-r--r--third_party/python/gyp/test/library/src/library.gyp58
-rw-r--r--third_party/python/gyp/test/library/src/program.c15
-rw-r--r--third_party/python/gyp/test/library/src/shared_dependency.gyp33
-rw-r--r--third_party/python/gyp/test/library_dirs/gyptest-library-dirs.py50
-rw-r--r--third_party/python/gyp/test/library_dirs/subdir/README.txt1
-rw-r--r--third_party/python/gyp/test/library_dirs/subdir/hello.cc11
-rw-r--r--third_party/python/gyp/test/library_dirs/subdir/mylib.cc9
-rw-r--r--third_party/python/gyp/test/library_dirs/subdir/mylib.h12
-rw-r--r--third_party/python/gyp/test/library_dirs/subdir/test-win.gyp60
-rw-r--r--third_party/python/gyp/test/library_dirs/subdir/test.gyp68
-rwxr-xr-xthird_party/python/gyp/test/link-dependency/gyptest-link-dependency.py23
-rw-r--r--third_party/python/gyp/test/link-dependency/main.c7
-rw-r--r--third_party/python/gyp/test/link-dependency/mymalloc.c12
-rw-r--r--third_party/python/gyp/test/link-dependency/test.gyp37
-rw-r--r--third_party/python/gyp/test/link-objects/base.c6
-rw-r--r--third_party/python/gyp/test/link-objects/extra.c5
-rwxr-xr-xthird_party/python/gyp/test/link-objects/gyptest-all.py28
-rw-r--r--third_party/python/gyp/test/link-objects/link-objects.gyp24
-rw-r--r--third_party/python/gyp/test/linux/gyptest-implicit-rpath.py48
-rw-r--r--third_party/python/gyp/test/linux/gyptest-ldflags-duplicates.py22
-rw-r--r--third_party/python/gyp/test/linux/gyptest-ldflags-from-environment.py45
-rw-r--r--third_party/python/gyp/test/linux/gyptest-target-rpath.py43
-rw-r--r--third_party/python/gyp/test/linux/implicit-rpath/file.c1
-rw-r--r--third_party/python/gyp/test/linux/implicit-rpath/main.c1
-rw-r--r--third_party/python/gyp/test/linux/implicit-rpath/test.gyp47
-rwxr-xr-xthird_party/python/gyp/test/linux/ldflags-duplicates/check-ldflags.py28
-rw-r--r--third_party/python/gyp/test/linux/ldflags-duplicates/lib1.c6
-rw-r--r--third_party/python/gyp/test/linux/ldflags-duplicates/lib2.c6
-rw-r--r--third_party/python/gyp/test/linux/ldflags-duplicates/main.c7
-rw-r--r--third_party/python/gyp/test/linux/ldflags-duplicates/test.gyp45
-rw-r--r--third_party/python/gyp/test/linux/ldflags-from-environment/main.c7
-rw-r--r--third_party/python/gyp/test/linux/ldflags-from-environment/test.gyp23
-rw-r--r--third_party/python/gyp/test/linux/target-rpath/file.c1
-rw-r--r--third_party/python/gyp/test/linux/target-rpath/main.c1
-rw-r--r--third_party/python/gyp/test/linux/target-rpath/test.gyp47
-rw-r--r--third_party/python/gyp/test/mac/action-envvars/action/action.gyp34
-rwxr-xr-xthird_party/python/gyp/test/mac/action-envvars/action/action.sh8
-rw-r--r--third_party/python/gyp/test/mac/app-bundle/TestApp/English.lproj/InfoPlist-error.strings3
-rw-r--r--third_party/python/gyp/test/mac/app-bundle/TestApp/English.lproj/InfoPlist.strings3
-rw-r--r--third_party/python/gyp/test/mac/app-bundle/TestApp/English.lproj/MainMenu.xib4119
-rw-r--r--third_party/python/gyp/test/mac/app-bundle/TestApp/English.lproj/utf-16be.stringsbin0 -> 208 bytes
-rw-r--r--third_party/python/gyp/test/mac/app-bundle/TestApp/English.lproj/utf-16le.stringsbin0 -> 208 bytes
-rw-r--r--third_party/python/gyp/test/mac/app-bundle/TestApp/Images.xcassets/AppIcon.appiconset/Contents.json58
-rw-r--r--third_party/python/gyp/test/mac/app-bundle/TestApp/Images.xcassets/image.imageset/Contents.json23
-rw-r--r--third_party/python/gyp/test/mac/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain.pngbin0 -> 3263 bytes
-rw-r--r--third_party/python/gyp/test/mac/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain@2x.pngbin0 -> 3847 bytes
-rw-r--r--third_party/python/gyp/test/mac/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain@3x.pngbin0 -> 4394 bytes
-rw-r--r--third_party/python/gyp/test/mac/app-bundle/TestApp/TestApp-Info.plist34
-rw-r--r--third_party/python/gyp/test/mac/app-bundle/TestApp/TestAppAppDelegate.h13
-rw-r--r--third_party/python/gyp/test/mac/app-bundle/TestApp/TestAppAppDelegate.m15
-rw-r--r--third_party/python/gyp/test/mac/app-bundle/TestApp/main.m10
-rw-r--r--third_party/python/gyp/test/mac/app-bundle/empty.c0
-rw-r--r--third_party/python/gyp/test/mac/app-bundle/test-assets-catalog.gyp43
-rw-r--r--third_party/python/gyp/test/mac/app-bundle/test-error.gyp31
-rw-r--r--third_party/python/gyp/test/mac/app-bundle/test.gyp41
-rw-r--r--third_party/python/gyp/test/mac/archs/empty_main.cc1
-rw-r--r--third_party/python/gyp/test/mac/archs/file.mm1
-rw-r--r--third_party/python/gyp/test/mac/archs/file_a.cc8
-rw-r--r--third_party/python/gyp/test/mac/archs/file_a.h10
-rw-r--r--third_party/python/gyp/test/mac/archs/file_b.cc8
-rw-r--r--third_party/python/gyp/test/mac/archs/file_b.h10
-rw-r--r--third_party/python/gyp/test/mac/archs/file_c.cc11
-rw-r--r--third_party/python/gyp/test/mac/archs/file_d.cc11
-rw-r--r--third_party/python/gyp/test/mac/archs/header.h1
-rw-r--r--third_party/python/gyp/test/mac/archs/my_file.cc4
-rw-r--r--third_party/python/gyp/test/mac/archs/my_main_file.cc9
-rw-r--r--third_party/python/gyp/test/mac/archs/test-archs-multiarch.gyp92
-rw-r--r--third_party/python/gyp/test/mac/archs/test-archs-x86_64.gyp27
-rw-r--r--third_party/python/gyp/test/mac/archs/test-dependencies.gyp92
-rw-r--r--third_party/python/gyp/test/mac/archs/test-no-archs.gyp21
-rw-r--r--third_party/python/gyp/test/mac/archs/test-valid-archs.gyp28
-rwxr-xr-xthird_party/python/gyp/test/mac/bundle-resources/change.sh3
-rwxr-xr-xthird_party/python/gyp/test/mac/bundle-resources/executable-file.sh3
-rw-r--r--third_party/python/gyp/test/mac/bundle-resources/secret.txt1
-rw-r--r--third_party/python/gyp/test/mac/bundle-resources/test.gyp59
-rw-r--r--third_party/python/gyp/test/mac/cflags/ccfile.cc7
-rw-r--r--third_party/python/gyp/test/mac/cflags/ccfile_withcflags.cc7
-rw-r--r--third_party/python/gyp/test/mac/cflags/cfile.c7
-rw-r--r--third_party/python/gyp/test/mac/cflags/cppfile.cpp7
-rw-r--r--third_party/python/gyp/test/mac/cflags/cppfile_withcflags.cpp7
-rw-r--r--third_party/python/gyp/test/mac/cflags/cxxfile.cxx7
-rw-r--r--third_party/python/gyp/test/mac/cflags/cxxfile_withcflags.cxx7
-rw-r--r--third_party/python/gyp/test/mac/cflags/mfile.m7
-rw-r--r--third_party/python/gyp/test/mac/cflags/mmfile.mm7
-rw-r--r--third_party/python/gyp/test/mac/cflags/mmfile_withcflags.mm7
-rw-r--r--third_party/python/gyp/test/mac/cflags/test.gyp132
-rw-r--r--third_party/python/gyp/test/mac/clang-cxx-language-standard/c++11.cc8
-rw-r--r--third_party/python/gyp/test/mac/clang-cxx-language-standard/c++98.cc24
-rw-r--r--third_party/python/gyp/test/mac/clang-cxx-language-standard/clang-cxx-language-standard.gyp30
-rw-r--r--third_party/python/gyp/test/mac/clang-cxx-library/clang-cxx-library.gyp32
-rw-r--r--third_party/python/gyp/test/mac/clang-cxx-library/libc++.cc11
-rw-r--r--third_party/python/gyp/test/mac/clang-cxx-library/libstdc++.cc11
-rw-r--r--third_party/python/gyp/test/mac/copies-with-xcode-envvars/copies-with-xcode-envvars.gyp87
-rw-r--r--third_party/python/gyp/test/mac/copies-with-xcode-envvars/empty.c1
-rw-r--r--third_party/python/gyp/test/mac/copies-with-xcode-envvars/file01
-rw-r--r--third_party/python/gyp/test/mac/copies-with-xcode-envvars/file11
-rw-r--r--third_party/python/gyp/test/mac/copies-with-xcode-envvars/file101
-rw-r--r--third_party/python/gyp/test/mac/copies-with-xcode-envvars/file111
-rw-r--r--third_party/python/gyp/test/mac/copies-with-xcode-envvars/file21
-rw-r--r--third_party/python/gyp/test/mac/copies-with-xcode-envvars/file31
-rw-r--r--third_party/python/gyp/test/mac/copies-with-xcode-envvars/file41
-rw-r--r--third_party/python/gyp/test/mac/copies-with-xcode-envvars/file51
-rw-r--r--third_party/python/gyp/test/mac/copies-with-xcode-envvars/file61
-rw-r--r--third_party/python/gyp/test/mac/copies-with-xcode-envvars/file71
-rw-r--r--third_party/python/gyp/test/mac/copies-with-xcode-envvars/file81
-rw-r--r--third_party/python/gyp/test/mac/copies-with-xcode-envvars/file91
-rw-r--r--third_party/python/gyp/test/mac/copy-dylib/empty.c1
-rw-r--r--third_party/python/gyp/test/mac/copy-dylib/test.gyp31
-rw-r--r--third_party/python/gyp/test/mac/debuginfo/file.c6
-rw-r--r--third_party/python/gyp/test/mac/debuginfo/test.gyp82
-rw-r--r--third_party/python/gyp/test/mac/depend-on-bundle/English.lproj/InfoPlist.strings1
-rw-r--r--third_party/python/gyp/test/mac/depend-on-bundle/Info.plist28
-rw-r--r--third_party/python/gyp/test/mac/depend-on-bundle/bundle.c1
-rw-r--r--third_party/python/gyp/test/mac/depend-on-bundle/executable.c4
-rw-r--r--third_party/python/gyp/test/mac/depend-on-bundle/test.gyp28
-rw-r--r--third_party/python/gyp/test/mac/deployment-target/check-version-min.c33
-rw-r--r--third_party/python/gyp/test/mac/deployment-target/deployment-target.gyp28
-rw-r--r--third_party/python/gyp/test/mac/framework-dirs/calculate.c15
-rw-r--r--third_party/python/gyp/test/mac/framework-dirs/framework-dirs.gyp21
-rw-r--r--third_party/python/gyp/test/mac/framework-headers/myframework.h8
-rw-r--r--third_party/python/gyp/test/mac/framework-headers/myframework.m8
-rw-r--r--third_party/python/gyp/test/mac/framework-headers/test.gyp44
-rw-r--r--third_party/python/gyp/test/mac/framework/TestFramework/English.lproj/InfoPlist.strings2
-rw-r--r--third_party/python/gyp/test/mac/framework/TestFramework/Info.plist28
-rw-r--r--third_party/python/gyp/test/mac/framework/TestFramework/ObjCVector.h28
-rw-r--r--third_party/python/gyp/test/mac/framework/TestFramework/ObjCVector.mm63
-rw-r--r--third_party/python/gyp/test/mac/framework/TestFramework/ObjCVectorInternal.h9
-rw-r--r--third_party/python/gyp/test/mac/framework/TestFramework/TestFramework_Prefix.pch7
-rw-r--r--third_party/python/gyp/test/mac/framework/empty.c0
-rw-r--r--third_party/python/gyp/test/mac/framework/framework.gyp108
-rw-r--r--third_party/python/gyp/test/mac/global-settings/src/dir1/dir1.gyp11
-rw-r--r--third_party/python/gyp/test/mac/global-settings/src/dir2/dir2.gyp22
-rw-r--r--third_party/python/gyp/test/mac/global-settings/src/dir2/file.txt1
-rw-r--r--third_party/python/gyp/test/mac/gyptest-action-envvars.py36
-rwxr-xr-xthird_party/python/gyp/test/mac/gyptest-app-assets-catalog.py125
-rwxr-xr-xthird_party/python/gyp/test/mac/gyptest-app-error.py49
-rwxr-xr-xthird_party/python/gyp/test/mac/gyptest-app.py122
-rw-r--r--third_party/python/gyp/test/mac/gyptest-archs.py96
-rw-r--r--third_party/python/gyp/test/mac/gyptest-bundle-resources.py64
-rw-r--r--third_party/python/gyp/test/mac/gyptest-cflags.py21
-rw-r--r--third_party/python/gyp/test/mac/gyptest-clang-cxx-language-standard.py25
-rw-r--r--third_party/python/gyp/test/mac/gyptest-clang-cxx-library.py32
-rw-r--r--third_party/python/gyp/test/mac/gyptest-copies-with-xcode-envvars.py65
-rwxr-xr-xthird_party/python/gyp/test/mac/gyptest-copies.py62
-rw-r--r--third_party/python/gyp/test/mac/gyptest-copy-dylib.py25
-rwxr-xr-xthird_party/python/gyp/test/mac/gyptest-debuginfo.py36
-rw-r--r--third_party/python/gyp/test/mac/gyptest-depend-on-bundle.py45
-rw-r--r--third_party/python/gyp/test/mac/gyptest-deployment-target.py27
-rw-r--r--third_party/python/gyp/test/mac/gyptest-framework-dirs.py23
-rw-r--r--third_party/python/gyp/test/mac/gyptest-framework-headers.py38
-rwxr-xr-xthird_party/python/gyp/test/mac/gyptest-framework.py80
-rw-r--r--third_party/python/gyp/test/mac/gyptest-global-settings.py33
-rw-r--r--third_party/python/gyp/test/mac/gyptest-identical-name.py45
-rwxr-xr-xthird_party/python/gyp/test/mac/gyptest-infoplist-process.py56
-rw-r--r--third_party/python/gyp/test/mac/gyptest-installname.py85
-rwxr-xr-xthird_party/python/gyp/test/mac/gyptest-kext.py27
-rw-r--r--third_party/python/gyp/test/mac/gyptest-ldflags-passed-to-libtool.py37
-rw-r--r--third_party/python/gyp/test/mac/gyptest-ldflags.py74
-rwxr-xr-xthird_party/python/gyp/test/mac/gyptest-libraries.py30
-rw-r--r--third_party/python/gyp/test/mac/gyptest-libtool-zero.py26
-rw-r--r--third_party/python/gyp/test/mac/gyptest-loadable-module-bundle-product-extension.py31
-rwxr-xr-xthird_party/python/gyp/test/mac/gyptest-loadable-module.py54
-rw-r--r--third_party/python/gyp/test/mac/gyptest-lto.py66
-rw-r--r--third_party/python/gyp/test/mac/gyptest-missing-cfbundlesignature.py34
-rw-r--r--third_party/python/gyp/test/mac/gyptest-non-strs-flattened-to-env.py38
-rwxr-xr-xthird_party/python/gyp/test/mac/gyptest-objc-arc.py27
-rw-r--r--third_party/python/gyp/test/mac/gyptest-objc-gc.py51
-rw-r--r--third_party/python/gyp/test/mac/gyptest-postbuild-copy-bundle.py75
-rw-r--r--third_party/python/gyp/test/mac/gyptest-postbuild-defaults.py34
-rwxr-xr-xthird_party/python/gyp/test/mac/gyptest-postbuild-fail.py71
-rw-r--r--third_party/python/gyp/test/mac/gyptest-postbuild-multiple-configurations.py26
-rw-r--r--third_party/python/gyp/test/mac/gyptest-postbuild-static-library.py28
-rwxr-xr-xthird_party/python/gyp/test/mac/gyptest-postbuild.py53
-rwxr-xr-xthird_party/python/gyp/test/mac/gyptest-prefixheader.py20
-rwxr-xr-xthird_party/python/gyp/test/mac/gyptest-rebuild.py46
-rw-r--r--third_party/python/gyp/test/mac/gyptest-rpath.py50
-rw-r--r--third_party/python/gyp/test/mac/gyptest-sdkroot.py56
-rw-r--r--third_party/python/gyp/test/mac/gyptest-sourceless-module.py77
-rw-r--r--third_party/python/gyp/test/mac/gyptest-strip-default.py97
-rwxr-xr-xthird_party/python/gyp/test/mac/gyptest-strip.py66
-rw-r--r--third_party/python/gyp/test/mac/gyptest-swift-library.py67
-rwxr-xr-xthird_party/python/gyp/test/mac/gyptest-type-envvars.py26
-rw-r--r--third_party/python/gyp/test/mac/gyptest-unicode-settings.py20
-rwxr-xr-xthird_party/python/gyp/test/mac/gyptest-xcode-env-order.py95
-rw-r--r--third_party/python/gyp/test/mac/gyptest-xcode-gcc-clang.py40
-rw-r--r--third_party/python/gyp/test/mac/gyptest-xcode-gcc.py60
-rwxr-xr-xthird_party/python/gyp/test/mac/gyptest-xcode-support-actions.py25
-rw-r--r--third_party/python/gyp/test/mac/gyptest-xctest.py41
-rwxr-xr-xthird_party/python/gyp/test/mac/gyptest-xcuitest.py39
-rw-r--r--third_party/python/gyp/test/mac/identical-name/proxy/proxy.cc2
-rw-r--r--third_party/python/gyp/test/mac/identical-name/proxy/proxy.gyp9
-rw-r--r--third_party/python/gyp/test/mac/identical-name/proxy/testlib/testlib.cc2
-rw-r--r--third_party/python/gyp/test/mac/identical-name/proxy/testlib/testlib.gyp8
-rw-r--r--third_party/python/gyp/test/mac/identical-name/test-should-fail.gyp10
-rw-r--r--third_party/python/gyp/test/mac/identical-name/test.gyp11
-rw-r--r--third_party/python/gyp/test/mac/identical-name/test.gypi7
-rw-r--r--third_party/python/gyp/test/mac/identical-name/testlib/main.cc3
-rw-r--r--third_party/python/gyp/test/mac/identical-name/testlib/testlib.gyp14
-rw-r--r--third_party/python/gyp/test/mac/identical-name/testlib/void.cc2
-rw-r--r--third_party/python/gyp/test/mac/infoplist-process/Info.plist36
-rw-r--r--third_party/python/gyp/test/mac/infoplist-process/main.c7
-rw-r--r--third_party/python/gyp/test/mac/infoplist-process/test1.gyp25
-rw-r--r--third_party/python/gyp/test/mac/infoplist-process/test2.gyp25
-rw-r--r--third_party/python/gyp/test/mac/infoplist-process/test3.gyp25
-rw-r--r--third_party/python/gyp/test/mac/installname/Info.plist28
-rw-r--r--third_party/python/gyp/test/mac/installname/file.c1
-rw-r--r--third_party/python/gyp/test/mac/installname/main.c1
-rw-r--r--third_party/python/gyp/test/mac/installname/test.gyp93
-rw-r--r--third_party/python/gyp/test/mac/kext/GypKext/GypKext-Info.plist35
-rw-r--r--third_party/python/gyp/test/mac/kext/GypKext/GypKext.c16
-rw-r--r--third_party/python/gyp/test/mac/kext/kext.gyp18
-rw-r--r--third_party/python/gyp/test/mac/ldflags-libtool/file.c1
-rw-r--r--third_party/python/gyp/test/mac/ldflags-libtool/test.gyp17
-rw-r--r--third_party/python/gyp/test/mac/ldflags/subdirectory/Info.plist8
-rw-r--r--third_party/python/gyp/test/mac/ldflags/subdirectory/file.c2
-rw-r--r--third_party/python/gyp/test/mac/ldflags/subdirectory/symbol_list.def1
-rw-r--r--third_party/python/gyp/test/mac/ldflags/subdirectory/test.gyp66
-rw-r--r--third_party/python/gyp/test/mac/libraries/subdir/README.txt1
-rw-r--r--third_party/python/gyp/test/mac/libraries/subdir/hello.cc10
-rw-r--r--third_party/python/gyp/test/mac/libraries/subdir/mylib.c7
-rw-r--r--third_party/python/gyp/test/mac/libraries/subdir/test.gyp65
-rw-r--r--third_party/python/gyp/test/mac/libtool-zero/mylib.c7
-rw-r--r--third_party/python/gyp/test/mac/libtool-zero/test.gyp18
-rw-r--r--third_party/python/gyp/test/mac/loadable-module-bundle-product-extension/src.cc7
-rw-r--r--third_party/python/gyp/test/mac/loadable-module-bundle-product-extension/test.gyp24
-rw-r--r--third_party/python/gyp/test/mac/loadable-module/Info.plist26
-rw-r--r--third_party/python/gyp/test/mac/loadable-module/module.c11
-rw-r--r--third_party/python/gyp/test/mac/loadable-module/test.gyp18
-rw-r--r--third_party/python/gyp/test/mac/lto/asmfile.S2
-rw-r--r--third_party/python/gyp/test/mac/lto/ccfile.cc1
-rw-r--r--third_party/python/gyp/test/mac/lto/cfile.c1
-rw-r--r--third_party/python/gyp/test/mac/lto/mfile.m1
-rw-r--r--third_party/python/gyp/test/mac/lto/mmfile.mm1
-rw-r--r--third_party/python/gyp/test/mac/lto/test.gyp35
-rw-r--r--third_party/python/gyp/test/mac/missing-cfbundlesignature/Info.plist10
-rw-r--r--third_party/python/gyp/test/mac/missing-cfbundlesignature/Other-Info.plist12
-rw-r--r--third_party/python/gyp/test/mac/missing-cfbundlesignature/Third-Info.plist12
-rw-r--r--third_party/python/gyp/test/mac/missing-cfbundlesignature/file.c1
-rw-r--r--third_party/python/gyp/test/mac/missing-cfbundlesignature/test.gyp34
-rw-r--r--third_party/python/gyp/test/mac/non-strs-flattened-to-env/Info.plist15
-rw-r--r--third_party/python/gyp/test/mac/non-strs-flattened-to-env/main.c7
-rw-r--r--third_party/python/gyp/test/mac/non-strs-flattened-to-env/test.gyp27
-rw-r--r--third_party/python/gyp/test/mac/objc-arc/c-file.c5
-rw-r--r--third_party/python/gyp/test/mac/objc-arc/cc-file.cc5
-rw-r--r--third_party/python/gyp/test/mac/objc-arc/m-file-arc-weak.m9
-rw-r--r--third_party/python/gyp/test/mac/objc-arc/m-file-no-arc.m9
-rw-r--r--third_party/python/gyp/test/mac/objc-arc/m-file.m9
-rw-r--r--third_party/python/gyp/test/mac/objc-arc/mm-file-arc-weak.mm9
-rw-r--r--third_party/python/gyp/test/mac/objc-arc/mm-file-no-arc.mm9
-rw-r--r--third_party/python/gyp/test/mac/objc-arc/mm-file.mm9
-rw-r--r--third_party/python/gyp/test/mac/objc-arc/test.gyp53
-rw-r--r--third_party/python/gyp/test/mac/objc-gc/c-file.c1
-rw-r--r--third_party/python/gyp/test/mac/objc-gc/cc-file.cc1
-rw-r--r--third_party/python/gyp/test/mac/objc-gc/main.m6
-rw-r--r--third_party/python/gyp/test/mac/objc-gc/needs-gc-mm.mm1
-rw-r--r--third_party/python/gyp/test/mac/objc-gc/needs-gc.m1
-rw-r--r--third_party/python/gyp/test/mac/objc-gc/test.gyp102
-rw-r--r--third_party/python/gyp/test/mac/postbuild-copy-bundle/Framework-Info.plist30
-rw-r--r--third_party/python/gyp/test/mac/postbuild-copy-bundle/TestApp-Info.plist32
-rw-r--r--third_party/python/gyp/test/mac/postbuild-copy-bundle/copied.txt1
-rw-r--r--third_party/python/gyp/test/mac/postbuild-copy-bundle/empty.c0
-rw-r--r--third_party/python/gyp/test/mac/postbuild-copy-bundle/main.c4
-rwxr-xr-xthird_party/python/gyp/test/mac/postbuild-copy-bundle/postbuild-copy-framework.sh9
-rw-r--r--third_party/python/gyp/test/mac/postbuild-copy-bundle/resource_file.sb1
-rw-r--r--third_party/python/gyp/test/mac/postbuild-copy-bundle/test.gyp49
-rw-r--r--third_party/python/gyp/test/mac/postbuild-defaults/Info.plist13
-rw-r--r--third_party/python/gyp/test/mac/postbuild-defaults/main.c7
-rwxr-xr-xthird_party/python/gyp/test/mac/postbuild-defaults/postbuild-defaults.sh15
-rw-r--r--third_party/python/gyp/test/mac/postbuild-defaults/test.gyp26
-rw-r--r--third_party/python/gyp/test/mac/postbuild-fail/file.c6
-rwxr-xr-xthird_party/python/gyp/test/mac/postbuild-fail/postbuild-fail.sh6
-rw-r--r--third_party/python/gyp/test/mac/postbuild-fail/test.gyp38
-rwxr-xr-xthird_party/python/gyp/test/mac/postbuild-fail/touch-dynamic.sh7
-rwxr-xr-xthird_party/python/gyp/test/mac/postbuild-fail/touch-static.sh7
-rw-r--r--third_party/python/gyp/test/mac/postbuild-multiple-configurations/main.c4
-rwxr-xr-xthird_party/python/gyp/test/mac/postbuild-multiple-configurations/postbuild-touch-file.sh7
-rw-r--r--third_party/python/gyp/test/mac/postbuild-multiple-configurations/test.gyp26
-rw-r--r--third_party/python/gyp/test/mac/postbuild-static-library/empty.c4
-rwxr-xr-xthird_party/python/gyp/test/mac/postbuild-static-library/postbuild-touch-file.sh7
-rw-r--r--third_party/python/gyp/test/mac/postbuild-static-library/test.gyp34
-rwxr-xr-xthird_party/python/gyp/test/mac/postbuilds/copy.sh3
-rw-r--r--third_party/python/gyp/test/mac/postbuilds/file.c4
-rw-r--r--third_party/python/gyp/test/mac/postbuilds/file_g.c4
-rw-r--r--third_party/python/gyp/test/mac/postbuilds/file_h.c4
-rwxr-xr-xthird_party/python/gyp/test/mac/postbuilds/script/shared_library_postbuild.sh23
-rwxr-xr-xthird_party/python/gyp/test/mac/postbuilds/script/static_library_postbuild.sh23
-rw-r--r--third_party/python/gyp/test/mac/postbuilds/subdirectory/copied_file.txt1
-rw-r--r--third_party/python/gyp/test/mac/postbuilds/subdirectory/nested_target.gyp53
-rw-r--r--third_party/python/gyp/test/mac/postbuilds/test.gyp93
-rw-r--r--third_party/python/gyp/test/mac/prefixheader/file.c1
-rw-r--r--third_party/python/gyp/test/mac/prefixheader/file.cc1
-rw-r--r--third_party/python/gyp/test/mac/prefixheader/file.m1
-rw-r--r--third_party/python/gyp/test/mac/prefixheader/file.mm1
-rw-r--r--third_party/python/gyp/test/mac/prefixheader/header.h1
-rw-r--r--third_party/python/gyp/test/mac/prefixheader/test.gyp82
-rw-r--r--third_party/python/gyp/test/mac/rebuild/TestApp-Info.plist32
-rwxr-xr-xthird_party/python/gyp/test/mac/rebuild/delay-touch.sh6
-rw-r--r--third_party/python/gyp/test/mac/rebuild/empty.c0
-rw-r--r--third_party/python/gyp/test/mac/rebuild/main.c1
-rw-r--r--third_party/python/gyp/test/mac/rebuild/test.gyp56
-rw-r--r--third_party/python/gyp/test/mac/rpath/file.c1
-rw-r--r--third_party/python/gyp/test/mac/rpath/main.c1
-rw-r--r--third_party/python/gyp/test/mac/rpath/test.gyp48
-rw-r--r--third_party/python/gyp/test/mac/sdkroot/file.cc5
-rw-r--r--third_party/python/gyp/test/mac/sdkroot/test.gyp35
-rwxr-xr-xthird_party/python/gyp/test/mac/sdkroot/test_shorthand.sh20
-rw-r--r--third_party/python/gyp/test/mac/sourceless-module/empty.c1
-rw-r--r--third_party/python/gyp/test/mac/sourceless-module/empty.txt2
-rw-r--r--third_party/python/gyp/test/mac/sourceless-module/fun.c1
-rw-r--r--third_party/python/gyp/test/mac/sourceless-module/test.gyp96
-rw-r--r--third_party/python/gyp/test/mac/strip/file.c22
-rw-r--r--third_party/python/gyp/test/mac/strip/main.c25
-rw-r--r--third_party/python/gyp/test/mac/strip/strip.saves5
-rw-r--r--third_party/python/gyp/test/mac/strip/subdirectory/nested_file.c1
-rw-r--r--third_party/python/gyp/test/mac/strip/subdirectory/nested_strip.saves5
-rw-r--r--third_party/python/gyp/test/mac/strip/subdirectory/subdirectory.gyp38
-rwxr-xr-xthird_party/python/gyp/test/mac/strip/subdirectory/test_reading_save_file_from_postbuild.sh5
-rw-r--r--third_party/python/gyp/test/mac/strip/test-defaults.gyp51
-rw-r--r--third_party/python/gyp/test/mac/strip/test.gyp119
-rw-r--r--third_party/python/gyp/test/mac/swift-library/Info.plist28
-rw-r--r--third_party/python/gyp/test/mac/swift-library/file.swift9
-rw-r--r--third_party/python/gyp/test/mac/swift-library/test.gyp21
-rw-r--r--third_party/python/gyp/test/mac/type_envvars/file.c6
-rw-r--r--third_party/python/gyp/test/mac/type_envvars/test.gyp100
-rwxr-xr-xthird_party/python/gyp/test/mac/type_envvars/test_bundle_executable.sh30
-rwxr-xr-xthird_party/python/gyp/test/mac/type_envvars/test_bundle_loadable_module.sh35
-rwxr-xr-xthird_party/python/gyp/test/mac/type_envvars/test_bundle_shared_library.sh38
-rwxr-xr-xthird_party/python/gyp/test/mac/type_envvars/test_check_sdkroot.sh47
-rwxr-xr-xthird_party/python/gyp/test/mac/type_envvars/test_nonbundle_executable.sh33
-rwxr-xr-xthird_party/python/gyp/test/mac/type_envvars/test_nonbundle_loadable_module.sh31
-rwxr-xr-xthird_party/python/gyp/test/mac/type_envvars/test_nonbundle_none.sh32
-rwxr-xr-xthird_party/python/gyp/test/mac/type_envvars/test_nonbundle_shared_library.sh31
-rwxr-xr-xthird_party/python/gyp/test/mac/type_envvars/test_nonbundle_static_library.sh31
-rw-r--r--third_party/python/gyp/test/mac/unicode-settings/file.cc2
-rw-r--r--third_party/python/gyp/test/mac/unicode-settings/test.gyp23
-rwxr-xr-xthird_party/python/gyp/test/mac/unicode-settings/test_bundle_display_name.sh7
-rw-r--r--third_party/python/gyp/test/mac/xcode-env-order/Info.plist56
-rw-r--r--third_party/python/gyp/test/mac/xcode-env-order/file.ext10
-rw-r--r--third_party/python/gyp/test/mac/xcode-env-order/file.ext20
-rw-r--r--third_party/python/gyp/test/mac/xcode-env-order/file.ext30
-rw-r--r--third_party/python/gyp/test/mac/xcode-env-order/main.c7
-rw-r--r--third_party/python/gyp/test/mac/xcode-env-order/test.gyp121
-rw-r--r--third_party/python/gyp/test/mac/xcode-gcc/aliasing.cc13
-rw-r--r--third_party/python/gyp/test/mac/xcode-gcc/test-clang.gyp42
-rw-r--r--third_party/python/gyp/test/mac/xcode-gcc/test.gyp60
-rw-r--r--third_party/python/gyp/test/mac/xcode-gcc/valid_c.c8
-rw-r--r--third_party/python/gyp/test/mac/xcode-gcc/valid_cc.cc8
-rw-r--r--third_party/python/gyp/test/mac/xcode-gcc/valid_m.m8
-rw-r--r--third_party/python/gyp/test/mac/xcode-gcc/valid_mm.mm8
-rw-r--r--third_party/python/gyp/test/mac/xcode-gcc/warn_about_invalid_offsetof_macro.cc15
-rw-r--r--third_party/python/gyp/test/mac/xcode-gcc/warn_about_missing_newline.c8
-rw-r--r--third_party/python/gyp/test/mac/xcode-support-actions/source.c0
-rw-r--r--third_party/python/gyp/test/mac/xcode-support-actions/test.gyp26
-rw-r--r--third_party/python/gyp/test/mac/xctest/MyClass.h8
-rw-r--r--third_party/python/gyp/test/mac/xctest/MyClass.m8
-rw-r--r--third_party/python/gyp/test/mac/xctest/TestCase.m16
-rw-r--r--third_party/python/gyp/test/mac/xctest/resource.txt1
-rw-r--r--third_party/python/gyp/test/mac/xctest/test.gyp47
-rw-r--r--third_party/python/gyp/test/mac/xctest/test.xcodeproj/xcshareddata/xcschemes/classes.xcscheme69
-rw-r--r--third_party/python/gyp/test/mac/xcuitest/Info.plist28
-rw-r--r--third_party/python/gyp/test/mac/xcuitest/MyAppDelegate.h8
-rw-r--r--third_party/python/gyp/test/mac/xcuitest/MyAppDelegate.m19
-rw-r--r--third_party/python/gyp/test/mac/xcuitest/TestCase.m15
-rw-r--r--third_party/python/gyp/test/mac/xcuitest/main.m15
-rw-r--r--third_party/python/gyp/test/mac/xcuitest/resource.txt1
-rw-r--r--third_party/python/gyp/test/mac/xcuitest/test.gyp69
-rw-r--r--third_party/python/gyp/test/make/dependencies.gyp15
-rwxr-xr-xthird_party/python/gyp/test/make/gyptest-dependencies.py26
-rwxr-xr-xthird_party/python/gyp/test/make/gyptest-noload.py57
-rw-r--r--third_party/python/gyp/test/make/main.cc12
-rw-r--r--third_party/python/gyp/test/make/main.h0
-rw-r--r--third_party/python/gyp/test/make/noload/all.gyp18
-rw-r--r--third_party/python/gyp/test/make/noload/lib/shared.c3
-rw-r--r--third_party/python/gyp/test/make/noload/lib/shared.gyp16
-rw-r--r--third_party/python/gyp/test/make/noload/lib/shared.h1
-rw-r--r--third_party/python/gyp/test/make/noload/main.c9
-rw-r--r--third_party/python/gyp/test/make_global_settings/ar/gyptest-make_global_settings_ar.py126
-rw-r--r--third_party/python/gyp/test/make_global_settings/ar/make_global_settings_ar.gyp29
-rw-r--r--third_party/python/gyp/test/make_global_settings/basics/gyptest-make_global_settings.py51
-rw-r--r--third_party/python/gyp/test/make_global_settings/basics/make_global_settings.gyp17
-rw-r--r--third_party/python/gyp/test/make_global_settings/env-wrapper/gyptest-wrapper.py51
-rw-r--r--third_party/python/gyp/test/make_global_settings/env-wrapper/wrapper.gyp17
-rw-r--r--third_party/python/gyp/test/make_global_settings/full-toolchain/bar.cc1
-rw-r--r--third_party/python/gyp/test/make_global_settings/full-toolchain/foo.c1
-rw-r--r--third_party/python/gyp/test/make_global_settings/full-toolchain/gyptest-make_global_settings.py53
-rw-r--r--third_party/python/gyp/test/make_global_settings/full-toolchain/make_global_settings.gyp22
-rwxr-xr-xthird_party/python/gyp/test/make_global_settings/full-toolchain/my_nm.py9
-rwxr-xr-xthird_party/python/gyp/test/make_global_settings/full-toolchain/my_readelf.py9
-rw-r--r--third_party/python/gyp/test/make_global_settings/ld/gyptest-make_global_settings_ld.py130
-rw-r--r--third_party/python/gyp/test/make_global_settings/ld/make_global_settings_ld.gyp29
-rw-r--r--third_party/python/gyp/test/make_global_settings/wrapper/gyptest-wrapper.py52
-rw-r--r--third_party/python/gyp/test/make_global_settings/wrapper/wrapper.gyp21
-rw-r--r--third_party/python/gyp/test/many-actions/file00
-rw-r--r--third_party/python/gyp/test/many-actions/file10
-rw-r--r--third_party/python/gyp/test/many-actions/file20
-rw-r--r--third_party/python/gyp/test/many-actions/file30
-rw-r--r--third_party/python/gyp/test/many-actions/file40
-rw-r--r--third_party/python/gyp/test/many-actions/gyptest-many-actions-unsorted.py43
-rw-r--r--third_party/python/gyp/test/many-actions/gyptest-many-actions.py29
-rw-r--r--third_party/python/gyp/test/many-actions/many-actions-unsorted.gyp154
-rw-r--r--third_party/python/gyp/test/many-actions/many-actions.gyp1817
-rwxr-xr-xthird_party/python/gyp/test/module/gyptest-default.py28
-rw-r--r--third_party/python/gyp/test/module/src/lib1.c10
-rw-r--r--third_party/python/gyp/test/module/src/lib2.c10
-rw-r--r--third_party/python/gyp/test/module/src/module.gyp53
-rw-r--r--third_party/python/gyp/test/module/src/program.c111
-rw-r--r--third_party/python/gyp/test/msvs/buildevents/buildevents.gyp14
-rwxr-xr-xthird_party/python/gyp/test/msvs/buildevents/gyptest-msbuild-supports-prepostbuild.py24
-rwxr-xr-xthird_party/python/gyp/test/msvs/buildevents/gyptest-ninja-warnings.py29
-rw-r--r--third_party/python/gyp/test/msvs/buildevents/main.cc5
-rw-r--r--third_party/python/gyp/test/msvs/config_attrs/gyptest-config_attrs.py41
-rw-r--r--third_party/python/gyp/test/msvs/config_attrs/hello.c11
-rw-r--r--third_party/python/gyp/test/msvs/config_attrs/hello.gyp21
-rw-r--r--third_party/python/gyp/test/msvs/express/base/base.gyp22
-rw-r--r--third_party/python/gyp/test/msvs/express/express.gyp19
-rwxr-xr-xthird_party/python/gyp/test/msvs/express/gyptest-express.py29
-rw-r--r--third_party/python/gyp/test/msvs/external_builder/external.gyp68
-rw-r--r--third_party/python/gyp/test/msvs/external_builder/external_builder.py9
-rw-r--r--third_party/python/gyp/test/msvs/external_builder/gyptest-all.py59
-rw-r--r--third_party/python/gyp/test/msvs/external_builder/hello.cpp10
-rw-r--r--third_party/python/gyp/test/msvs/external_builder/hello.z6
-rw-r--r--third_party/python/gyp/test/msvs/external_builder/msbuild_action.py9
-rw-r--r--third_party/python/gyp/test/msvs/external_builder/msbuild_rule.py11
-rw-r--r--third_party/python/gyp/test/msvs/filters/filters.gyp47
-rw-r--r--third_party/python/gyp/test/msvs/filters/gyptest-filters-2008.py68
-rw-r--r--third_party/python/gyp/test/msvs/filters/gyptest-filters-2010.py57
-rw-r--r--third_party/python/gyp/test/msvs/list_excluded/gyptest-all.py51
-rw-r--r--third_party/python/gyp/test/msvs/list_excluded/hello.cpp10
-rw-r--r--third_party/python/gyp/test/msvs/list_excluded/hello_exclude.gyp19
-rw-r--r--third_party/python/gyp/test/msvs/list_excluded/hello_mac.cpp10
-rw-r--r--third_party/python/gyp/test/msvs/missing_sources/gyptest-missing.py43
-rw-r--r--third_party/python/gyp/test/msvs/missing_sources/hello_missing.gyp15
-rw-r--r--third_party/python/gyp/test/msvs/multiple_actions_error_handling/action_fail.py7
-rw-r--r--third_party/python/gyp/test/msvs/multiple_actions_error_handling/action_succeed.py7
-rw-r--r--third_party/python/gyp/test/msvs/multiple_actions_error_handling/actions.gyp40
-rw-r--r--third_party/python/gyp/test/msvs/multiple_actions_error_handling/gyptest.py26
-rw-r--r--third_party/python/gyp/test/msvs/props/AppName.props14
-rw-r--r--third_party/python/gyp/test/msvs/props/AppName.vsprops11
-rw-r--r--third_party/python/gyp/test/msvs/props/gyptest-props.py22
-rw-r--r--third_party/python/gyp/test/msvs/props/hello.c11
-rw-r--r--third_party/python/gyp/test/msvs/props/hello.gyp22
-rw-r--r--third_party/python/gyp/test/msvs/rules_stdout_stderr/dummy.bar5
-rw-r--r--third_party/python/gyp/test/msvs/rules_stdout_stderr/dummy.foo5
-rw-r--r--third_party/python/gyp/test/msvs/rules_stdout_stderr/gyptest-rules-stdout-stderr.py29
-rw-r--r--third_party/python/gyp/test/msvs/rules_stdout_stderr/rule_stderr.py8
-rw-r--r--third_party/python/gyp/test/msvs/rules_stdout_stderr/rule_stdout.py7
-rw-r--r--third_party/python/gyp/test/msvs/rules_stdout_stderr/rules-stdout-stderr.gyp52
-rw-r--r--third_party/python/gyp/test/msvs/shared_output/common.gypi17
-rw-r--r--third_party/python/gyp/test/msvs/shared_output/gyptest-shared_output.py41
-rw-r--r--third_party/python/gyp/test/msvs/shared_output/hello.c12
-rw-r--r--third_party/python/gyp/test/msvs/shared_output/hello.gyp21
-rw-r--r--third_party/python/gyp/test/msvs/shared_output/there/there.c12
-rw-r--r--third_party/python/gyp/test/msvs/shared_output/there/there.gyp16
-rw-r--r--third_party/python/gyp/test/msvs/uldi2010/gyptest-all.py20
-rw-r--r--third_party/python/gyp/test/msvs/uldi2010/hello.c13
-rw-r--r--third_party/python/gyp/test/msvs/uldi2010/hello.gyp26
-rw-r--r--third_party/python/gyp/test/msvs/uldi2010/hello2.c10
-rwxr-xr-xthird_party/python/gyp/test/multiple-targets/gyptest-all.py30
-rwxr-xr-xthird_party/python/gyp/test/multiple-targets/gyptest-default.py30
-rw-r--r--third_party/python/gyp/test/multiple-targets/src/common.c7
-rw-r--r--third_party/python/gyp/test/multiple-targets/src/multiple.gyp24
-rw-r--r--third_party/python/gyp/test/multiple-targets/src/prog1.c10
-rw-r--r--third_party/python/gyp/test/multiple-targets/src/prog2.c10
-rw-r--r--third_party/python/gyp/test/ninja/action-rule-hash/gyptest-action-rule-hash.py32
-rw-r--r--third_party/python/gyp/test/ninja/action-rule-hash/subdir/action-rule-hash.gyp29
-rw-r--r--third_party/python/gyp/test/ninja/action-rule-hash/subdir/emit.py13
-rwxr-xr-xthird_party/python/gyp/test/ninja/action_dependencies/gyptest-action-dependencies.py64
-rw-r--r--third_party/python/gyp/test/ninja/action_dependencies/src/a.c10
-rw-r--r--third_party/python/gyp/test/ninja/action_dependencies/src/a.h13
-rw-r--r--third_party/python/gyp/test/ninja/action_dependencies/src/action_dependencies.gyp88
-rw-r--r--third_party/python/gyp/test/ninja/action_dependencies/src/b.c18
-rw-r--r--third_party/python/gyp/test/ninja/action_dependencies/src/b.h13
-rw-r--r--third_party/python/gyp/test/ninja/action_dependencies/src/c.c10
-rw-r--r--third_party/python/gyp/test/ninja/action_dependencies/src/c.h13
-rwxr-xr-xthird_party/python/gyp/test/ninja/action_dependencies/src/emit.py11
-rw-r--r--third_party/python/gyp/test/ninja/chained-dependency/chained-dependency.gyp53
-rw-r--r--third_party/python/gyp/test/ninja/chained-dependency/chained.c5
-rwxr-xr-xthird_party/python/gyp/test/ninja/chained-dependency/gyptest-chained-dependency.py30
-rw-r--r--third_party/python/gyp/test/ninja/empty-and-non-empty-duplicate-name/gyptest-empty-and-non-empty-duplicate-name.py23
-rw-r--r--third_party/python/gyp/test/ninja/empty-and-non-empty-duplicate-name/subdir/included.gyp19
-rw-r--r--third_party/python/gyp/test/ninja/empty-and-non-empty-duplicate-name/test.gyp19
-rw-r--r--third_party/python/gyp/test/ninja/normalize-paths-win/gyptest-normalize-paths.py46
-rw-r--r--third_party/python/gyp/test/ninja/normalize-paths-win/hello.cc7
-rw-r--r--third_party/python/gyp/test/ninja/normalize-paths-win/normalize-paths.gyp68
-rw-r--r--third_party/python/gyp/test/ninja/s-needs-no-depfiles/empty.s1
-rwxr-xr-xthird_party/python/gyp/test/ninja/s-needs-no-depfiles/gyptest-s-needs-no-depfiles.py42
-rw-r--r--third_party/python/gyp/test/ninja/s-needs-no-depfiles/s-needs-no-depfiles.gyp13
-rwxr-xr-xthird_party/python/gyp/test/ninja/solibs_avoid_relinking/gyptest-solibs-avoid-relinking.py48
-rw-r--r--third_party/python/gyp/test/ninja/solibs_avoid_relinking/main.cc5
-rw-r--r--third_party/python/gyp/test/ninja/solibs_avoid_relinking/solib.cc8
-rw-r--r--third_party/python/gyp/test/ninja/solibs_avoid_relinking/solibs_avoid_relinking.gyp38
-rw-r--r--third_party/python/gyp/test/ninja/use-console/foo.bar5
-rw-r--r--third_party/python/gyp/test/ninja/use-console/gyptest-use-console.py29
-rw-r--r--third_party/python/gyp/test/ninja/use-console/use-console.gyp60
-rw-r--r--third_party/python/gyp/test/ninja/use-custom-environment-files/gyptest-use-custom-environment-files.py28
-rw-r--r--third_party/python/gyp/test/ninja/use-custom-environment-files/use-custom-environment-files.cc7
-rw-r--r--third_party/python/gyp/test/ninja/use-custom-environment-files/use-custom-environment-files.gyp15
-rw-r--r--third_party/python/gyp/test/no-cpp/gyptest-no-cpp.py53
-rw-r--r--third_party/python/gyp/test/no-cpp/src/call-f-main.c2
-rw-r--r--third_party/python/gyp/test/no-cpp/src/empty-main.c1
-rw-r--r--third_party/python/gyp/test/no-cpp/src/f.cc3
-rw-r--r--third_party/python/gyp/test/no-cpp/src/test.gyp25
-rwxr-xr-xthird_party/python/gyp/test/no-output/gyptest-no-output.py21
-rw-r--r--third_party/python/gyp/test/no-output/src/nooutput.gyp17
-rwxr-xr-xthird_party/python/gyp/test/product/gyptest-product.py43
-rw-r--r--third_party/python/gyp/test/product/hello.c15
-rw-r--r--third_party/python/gyp/test/product/product.gyp128
-rw-r--r--third_party/python/gyp/test/prune_targets/gyptest-prune-targets.py66
-rw-r--r--third_party/python/gyp/test/prune_targets/lib1.cc6
-rw-r--r--third_party/python/gyp/test/prune_targets/lib2.cc6
-rw-r--r--third_party/python/gyp/test/prune_targets/lib3.cc6
-rw-r--r--third_party/python/gyp/test/prune_targets/lib_indirect.cc6
-rw-r--r--third_party/python/gyp/test/prune_targets/program.cc7
-rw-r--r--third_party/python/gyp/test/prune_targets/test1.gyp26
-rw-r--r--third_party/python/gyp/test/prune_targets/test2.gyp30
-rw-r--r--third_party/python/gyp/test/relative/foo/a/a.cc9
-rw-r--r--third_party/python/gyp/test/relative/foo/a/a.gyp13
-rw-r--r--third_party/python/gyp/test/relative/foo/a/c/c.cc9
-rw-r--r--third_party/python/gyp/test/relative/foo/a/c/c.gyp12
-rw-r--r--third_party/python/gyp/test/relative/foo/b/b.cc9
-rw-r--r--third_party/python/gyp/test/relative/foo/b/b.gyp9
-rwxr-xr-xthird_party/python/gyp/test/relative/gyptest-default.py25
-rw-r--r--third_party/python/gyp/test/rename/filecase/file.c1
-rw-r--r--third_party/python/gyp/test/rename/filecase/test-casesensitive.gyp15
-rw-r--r--third_party/python/gyp/test/rename/filecase/test.gyp14
-rw-r--r--third_party/python/gyp/test/rename/gyptest-filecase.py35
-rw-r--r--third_party/python/gyp/test/restat/gyptest-restat.py31
-rw-r--r--third_party/python/gyp/test/restat/src/create_intermediate.py17
-rw-r--r--third_party/python/gyp/test/restat/src/restat.gyp50
-rw-r--r--third_party/python/gyp/test/restat/src/touch.py16
-rwxr-xr-xthird_party/python/gyp/test/rules-dirname/gyptest-dirname.py57
-rw-r--r--third_party/python/gyp/test/rules-dirname/src/actions.gyp15
-rwxr-xr-xthird_party/python/gyp/test/rules-dirname/src/copy-file.py11
-rw-r--r--third_party/python/gyp/test/rules-dirname/src/subdir/a/b/c.gencc8
-rw-r--r--third_party/python/gyp/test/rules-dirname/src/subdir/a/b/c.printvars1
-rw-r--r--third_party/python/gyp/test/rules-dirname/src/subdir/foo/bar/baz.gencc8
-rw-r--r--third_party/python/gyp/test/rules-dirname/src/subdir/foo/bar/baz.printvars1
-rw-r--r--third_party/python/gyp/test/rules-dirname/src/subdir/input-rule-dirname.gyp140
-rw-r--r--third_party/python/gyp/test/rules-dirname/src/subdir/main.cc14
-rw-r--r--third_party/python/gyp/test/rules-dirname/src/subdir/nodir.gencc8
-rwxr-xr-xthird_party/python/gyp/test/rules-dirname/src/subdir/printvars.py14
-rwxr-xr-xthird_party/python/gyp/test/rules-rebuild/gyptest-all.py70
-rwxr-xr-xthird_party/python/gyp/test/rules-rebuild/gyptest-default.py91
-rw-r--r--third_party/python/gyp/test/rules-rebuild/src/main.c12
-rwxr-xr-xthird_party/python/gyp/test/rules-rebuild/src/make-sources.py19
-rw-r--r--third_party/python/gyp/test/rules-rebuild/src/prog1.in7
-rw-r--r--third_party/python/gyp/test/rules-rebuild/src/prog2.in7
-rw-r--r--third_party/python/gyp/test/rules-rebuild/src/same_target.gyp31
-rwxr-xr-xthird_party/python/gyp/test/rules-use-built-dependencies/gyptest-use-built-dependencies.py23
-rw-r--r--third_party/python/gyp/test/rules-use-built-dependencies/src/main.cc17
-rw-r--r--third_party/python/gyp/test/rules-use-built-dependencies/src/use-built-dependencies-rule.gyp42
-rwxr-xr-xthird_party/python/gyp/test/rules-variables/gyptest-rules-variables.py35
-rw-r--r--third_party/python/gyp/test/rules-variables/src/input_ext.c9
-rw-r--r--third_party/python/gyp/test/rules-variables/src/input_name/test.c9
-rw-r--r--third_party/python/gyp/test/rules-variables/src/input_path/subdir/test.c9
-rw-r--r--third_party/python/gyp/test/rules-variables/src/subdir/input_dirname.c9
-rw-r--r--third_party/python/gyp/test/rules-variables/src/subdir/test.c18
-rw-r--r--third_party/python/gyp/test/rules-variables/src/test.input_root.c9
-rw-r--r--third_party/python/gyp/test/rules-variables/src/variables.gyp40
-rwxr-xr-xthird_party/python/gyp/test/rules/gyptest-all.py84
-rwxr-xr-xthird_party/python/gyp/test/rules/gyptest-default.py70
-rwxr-xr-xthird_party/python/gyp/test/rules/gyptest-input-root.py26
-rw-r--r--third_party/python/gyp/test/rules/gyptest-special-variables.py18
-rw-r--r--third_party/python/gyp/test/rules/src/actions.gyp23
-rw-r--r--third_party/python/gyp/test/rules/src/an_asm.S6
-rw-r--r--third_party/python/gyp/test/rules/src/as.bat7
-rwxr-xr-xthird_party/python/gyp/test/rules/src/copy-file.py11
-rw-r--r--third_party/python/gyp/test/rules/src/external/external.gyp66
-rw-r--r--third_party/python/gyp/test/rules/src/external/file1.in1
-rw-r--r--third_party/python/gyp/test/rules/src/external/file2.in1
-rw-r--r--third_party/python/gyp/test/rules/src/input-root.gyp24
-rw-r--r--third_party/python/gyp/test/rules/src/noaction/file1.in1
-rw-r--r--third_party/python/gyp/test/rules/src/noaction/no_action_with_rules_fails.gyp37
-rwxr-xr-xthird_party/python/gyp/test/rules/src/rule.py17
-rw-r--r--third_party/python/gyp/test/rules/src/somefile.ext0
-rw-r--r--third_party/python/gyp/test/rules/src/special-variables.gyp34
-rw-r--r--third_party/python/gyp/test/rules/src/subdir1/executable.gyp37
-rw-r--r--third_party/python/gyp/test/rules/src/subdir1/function1.in6
-rw-r--r--third_party/python/gyp/test/rules/src/subdir1/function2.in6
-rw-r--r--third_party/python/gyp/test/rules/src/subdir1/program.c12
-rw-r--r--third_party/python/gyp/test/rules/src/subdir2/both_rule_and_action_input.gyp50
-rw-r--r--third_party/python/gyp/test/rules/src/subdir2/file1.in1
-rw-r--r--third_party/python/gyp/test/rules/src/subdir2/file2.in1
-rw-r--r--third_party/python/gyp/test/rules/src/subdir2/never_used.gyp31
-rw-r--r--third_party/python/gyp/test/rules/src/subdir2/no_action.gyp38
-rw-r--r--third_party/python/gyp/test/rules/src/subdir2/no_inputs.gyp32
-rw-r--r--third_party/python/gyp/test/rules/src/subdir2/none.gyp33
-rw-r--r--third_party/python/gyp/test/rules/src/subdir2/program.c12
-rw-r--r--third_party/python/gyp/test/rules/src/subdir3/executable2.gyp37
-rw-r--r--third_party/python/gyp/test/rules/src/subdir3/function3.in6
-rw-r--r--third_party/python/gyp/test/rules/src/subdir3/program.c10
-rw-r--r--third_party/python/gyp/test/rules/src/subdir4/asm-function.assem10
-rw-r--r--third_party/python/gyp/test/rules/src/subdir4/build-asm.gyp49
-rw-r--r--third_party/python/gyp/test/rules/src/subdir4/program.c19
-rwxr-xr-xthird_party/python/gyp/test/same-gyp-name/gyptest-all.py38
-rwxr-xr-xthird_party/python/gyp/test/same-gyp-name/gyptest-default.py38
-rw-r--r--third_party/python/gyp/test/same-gyp-name/gyptest-library.py20
-rw-r--r--third_party/python/gyp/test/same-gyp-name/library/one/sub.gyp11
-rw-r--r--third_party/python/gyp/test/same-gyp-name/library/test.gyp15
-rw-r--r--third_party/python/gyp/test/same-gyp-name/library/two/sub.gyp11
-rw-r--r--third_party/python/gyp/test/same-gyp-name/src/all.gyp16
-rw-r--r--third_party/python/gyp/test/same-gyp-name/src/subdir1/executable.gyp15
-rw-r--r--third_party/python/gyp/test/same-gyp-name/src/subdir1/main1.cc6
-rw-r--r--third_party/python/gyp/test/same-gyp-name/src/subdir2/executable.gyp15
-rw-r--r--third_party/python/gyp/test/same-gyp-name/src/subdir2/main2.cc6
-rw-r--r--third_party/python/gyp/test/same-rule-output-file-name/gyptest-all.py23
-rw-r--r--third_party/python/gyp/test/same-rule-output-file-name/src/subdir1/subdir1.gyp30
-rw-r--r--third_party/python/gyp/test/same-rule-output-file-name/src/subdir2/subdir2.gyp30
-rw-r--r--third_party/python/gyp/test/same-rule-output-file-name/src/subdirs.gyp16
-rw-r--r--third_party/python/gyp/test/same-rule-output-file-name/src/touch.py11
-rwxr-xr-xthird_party/python/gyp/test/same-source-file-name/gyptest-all.py34
-rwxr-xr-xthird_party/python/gyp/test/same-source-file-name/gyptest-default.py34
-rwxr-xr-xthird_party/python/gyp/test/same-source-file-name/gyptest-pass-executable.py33
-rwxr-xr-xthird_party/python/gyp/test/same-source-file-name/gyptest-pass-shared.py18
-rwxr-xr-xthird_party/python/gyp/test/same-source-file-name/gyptest-static.py34
-rw-r--r--third_party/python/gyp/test/same-source-file-name/src/all.gyp30
-rw-r--r--third_party/python/gyp/test/same-source-file-name/src/double-executable.gyp21
-rw-r--r--third_party/python/gyp/test/same-source-file-name/src/double-shared.gyp27
-rw-r--r--third_party/python/gyp/test/same-source-file-name/src/double-static.gyp22
-rw-r--r--third_party/python/gyp/test/same-source-file-name/src/func.c6
-rw-r--r--third_party/python/gyp/test/same-source-file-name/src/prog1.c16
-rw-r--r--third_party/python/gyp/test/same-source-file-name/src/prog2.c16
-rw-r--r--third_party/python/gyp/test/same-source-file-name/src/prog3.c18
-rw-r--r--third_party/python/gyp/test/same-source-file-name/src/subdir1/func.c6
-rw-r--r--third_party/python/gyp/test/same-source-file-name/src/subdir2/func.c6
-rw-r--r--third_party/python/gyp/test/same-target-name-different-directory/gyptest-all.py41
-rw-r--r--third_party/python/gyp/test/same-target-name-different-directory/src/subdir1/subdir1.gyp66
-rw-r--r--third_party/python/gyp/test/same-target-name-different-directory/src/subdir2/subdir2.gyp66
-rw-r--r--third_party/python/gyp/test/same-target-name-different-directory/src/subdirs.gyp16
-rw-r--r--third_party/python/gyp/test/same-target-name-different-directory/src/touch.py11
-rwxr-xr-xthird_party/python/gyp/test/same-target-name/gyptest-same-target-name.py18
-rw-r--r--third_party/python/gyp/test/same-target-name/src/all.gyp16
-rw-r--r--third_party/python/gyp/test/same-target-name/src/executable1.gyp15
-rw-r--r--third_party/python/gyp/test/same-target-name/src/executable2.gyp15
-rw-r--r--third_party/python/gyp/test/sanitize-rule-names/blah.S0
-rw-r--r--third_party/python/gyp/test/sanitize-rule-names/gyptest-sanitize-rule-names.py17
-rw-r--r--third_party/python/gyp/test/sanitize-rule-names/hello.cc7
-rw-r--r--third_party/python/gyp/test/sanitize-rule-names/sanitize-rule-names.gyp27
-rw-r--r--third_party/python/gyp/test/sanitize-rule-names/script.py10
-rw-r--r--third_party/python/gyp/test/self-dependency/common.gypi13
-rw-r--r--third_party/python/gyp/test/self-dependency/dep.gyp23
-rwxr-xr-xthird_party/python/gyp/test/self-dependency/gyptest-self-dependency.py19
-rw-r--r--third_party/python/gyp/test/self-dependency/self_dependency.gyp15
-rwxr-xr-xthird_party/python/gyp/test/sibling/gyptest-all.py42
-rwxr-xr-xthird_party/python/gyp/test/sibling/gyptest-relocate.py44
-rw-r--r--third_party/python/gyp/test/sibling/src/build/all.gyp16
-rw-r--r--third_party/python/gyp/test/sibling/src/prog1/prog1.c7
-rw-r--r--third_party/python/gyp/test/sibling/src/prog1/prog1.gyp15
-rw-r--r--third_party/python/gyp/test/sibling/src/prog2/prog2.c7
-rw-r--r--third_party/python/gyp/test/sibling/src/prog2/prog2.gyp15
-rwxr-xr-xthird_party/python/gyp/test/small/gyptest-small.py56
-rw-r--r--third_party/python/gyp/test/standalone-static-library/gyptest-standalone-static-library.py50
-rw-r--r--third_party/python/gyp/test/standalone-static-library/invalid.gyp16
-rw-r--r--third_party/python/gyp/test/standalone-static-library/mylib.c7
-rw-r--r--third_party/python/gyp/test/standalone-static-library/mylib.gyp26
-rw-r--r--third_party/python/gyp/test/standalone-static-library/prog.c7
-rw-r--r--third_party/python/gyp/test/standalone/gyptest-standalone.py35
-rw-r--r--third_party/python/gyp/test/standalone/standalone.gyp12
-rwxr-xr-xthird_party/python/gyp/test/subdirectory/gyptest-SYMROOT-all.py36
-rwxr-xr-xthird_party/python/gyp/test/subdirectory/gyptest-SYMROOT-default.py37
-rwxr-xr-xthird_party/python/gyp/test/subdirectory/gyptest-subdir-all.py34
-rwxr-xr-xthird_party/python/gyp/test/subdirectory/gyptest-subdir-default.py34
-rwxr-xr-xthird_party/python/gyp/test/subdirectory/gyptest-subdir2-deep.py25
-rwxr-xr-xthird_party/python/gyp/test/subdirectory/gyptest-top-all.py43
-rwxr-xr-xthird_party/python/gyp/test/subdirectory/gyptest-top-default.py43
-rw-r--r--third_party/python/gyp/test/subdirectory/src/prog1.c7
-rw-r--r--third_party/python/gyp/test/subdirectory/src/prog1.gyp21
-rw-r--r--third_party/python/gyp/test/subdirectory/src/subdir/prog2.c7
-rw-r--r--third_party/python/gyp/test/subdirectory/src/subdir/prog2.gyp18
-rw-r--r--third_party/python/gyp/test/subdirectory/src/subdir/subdir2/prog3.c7
-rw-r--r--third_party/python/gyp/test/subdirectory/src/subdir/subdir2/prog3.gyp18
-rw-r--r--third_party/python/gyp/test/subdirectory/src/symroot.gypi16
-rwxr-xr-xthird_party/python/gyp/test/symlinks/gyptest-symlinks.py66
-rw-r--r--third_party/python/gyp/test/symlinks/hello.c12
-rw-r--r--third_party/python/gyp/test/symlinks/hello.gyp15
-rw-r--r--third_party/python/gyp/test/target/gyptest-target.py37
-rw-r--r--third_party/python/gyp/test/target/hello.c7
-rw-r--r--third_party/python/gyp/test/target/target.gyp24
-rwxr-xr-xthird_party/python/gyp/test/toolsets/gyptest-toolsets.py31
-rw-r--r--third_party/python/gyp/test/toolsets/main.cc13
-rw-r--r--third_party/python/gyp/test/toolsets/toolsets.cc11
-rw-r--r--third_party/python/gyp/test/toolsets/toolsets.gyp62
-rw-r--r--third_party/python/gyp/test/toolsets/toolsets_shared.cc11
-rwxr-xr-xthird_party/python/gyp/test/toplevel-dir/gyptest-toplevel-dir.py31
-rw-r--r--third_party/python/gyp/test/toplevel-dir/src/sub1/main.gyp18
-rw-r--r--third_party/python/gyp/test/toplevel-dir/src/sub1/prog1.c7
-rw-r--r--third_party/python/gyp/test/toplevel-dir/src/sub2/prog2.c7
-rw-r--r--third_party/python/gyp/test/toplevel-dir/src/sub2/prog2.gyp15
-rw-r--r--third_party/python/gyp/test/variables/commands/commands-repeated.gyp128
-rw-r--r--third_party/python/gyp/test/variables/commands/commands-repeated.gyp.stdout136
-rw-r--r--third_party/python/gyp/test/variables/commands/commands-repeated.gypd.golden77
-rw-r--r--third_party/python/gyp/test/variables/commands/commands.gyp91
-rw-r--r--third_party/python/gyp/test/variables/commands/commands.gyp.ignore-env.stdout96
-rw-r--r--third_party/python/gyp/test/variables/commands/commands.gyp.stdout96
-rw-r--r--third_party/python/gyp/test/variables/commands/commands.gypd.golden66
-rw-r--r--third_party/python/gyp/test/variables/commands/commands.gypi23
-rwxr-xr-xthird_party/python/gyp/test/variables/commands/gyptest-commands-ignore-env.py47
-rwxr-xr-xthird_party/python/gyp/test/variables/commands/gyptest-commands-repeated-multidir.py23
-rwxr-xr-xthird_party/python/gyp/test/variables/commands/gyptest-commands-repeated.py40
-rwxr-xr-xthird_party/python/gyp/test/variables/commands/gyptest-commands.py40
-rw-r--r--third_party/python/gyp/test/variables/commands/repeated_multidir/dir_1/test_1.gyp13
-rw-r--r--third_party/python/gyp/test/variables/commands/repeated_multidir/dir_2/test_2.gyp13
-rw-r--r--third_party/python/gyp/test/variables/commands/repeated_multidir/main.gyp16
-rwxr-xr-xthird_party/python/gyp/test/variables/commands/repeated_multidir/print_cwd_basename.py11
-rw-r--r--third_party/python/gyp/test/variables/commands/repeated_multidir/repeated_command_common.gypi25
-rw-r--r--third_party/python/gyp/test/variables/commands/test.py7
-rwxr-xr-xthird_party/python/gyp/test/variables/commands/update_golden11
-rw-r--r--third_party/python/gyp/test/variables/empty/empty.gyp13
-rw-r--r--third_party/python/gyp/test/variables/empty/empty.gypi9
-rwxr-xr-xthird_party/python/gyp/test/variables/empty/gyptest-empty.py19
-rw-r--r--third_party/python/gyp/test/variables/filelist/filelist.gyp.stdout26
-rw-r--r--third_party/python/gyp/test/variables/filelist/filelist.gypd.golden43
-rw-r--r--third_party/python/gyp/test/variables/filelist/gyptest-filelist-golden.py53
-rwxr-xr-xthird_party/python/gyp/test/variables/filelist/gyptest-filelist.py29
-rw-r--r--third_party/python/gyp/test/variables/filelist/src/dummy.py5
-rw-r--r--third_party/python/gyp/test/variables/filelist/src/filelist.gyp93
-rw-r--r--third_party/python/gyp/test/variables/filelist/src/filelist2.gyp40
-rwxr-xr-xthird_party/python/gyp/test/variables/filelist/update_golden8
-rwxr-xr-xthird_party/python/gyp/test/variables/latelate/gyptest-latelate.py25
-rw-r--r--third_party/python/gyp/test/variables/latelate/src/latelate.gyp34
-rw-r--r--third_party/python/gyp/test/variables/latelate/src/program.cc13
-rw-r--r--third_party/python/gyp/test/variables/variable-in-path/C1/hello.cc7
-rw-r--r--third_party/python/gyp/test/variables/variable-in-path/gyptest-variable-in-path.py23
-rw-r--r--third_party/python/gyp/test/variables/variable-in-path/variable-in-path.gyp31
-rw-r--r--third_party/python/gyp/test/win/asm-files/asm-files.gyp17
-rw-r--r--third_party/python/gyp/test/win/asm-files/b.s0
-rw-r--r--third_party/python/gyp/test/win/asm-files/c.S0
-rw-r--r--third_party/python/gyp/test/win/asm-files/hello.cc7
-rw-r--r--third_party/python/gyp/test/win/batch-file-action/batch-file-action.gyp21
-rw-r--r--third_party/python/gyp/test/win/batch-file-action/infile1
-rw-r--r--third_party/python/gyp/test/win/batch-file-action/somecmd.bat5
-rw-r--r--third_party/python/gyp/test/win/command-quote/a.S0
-rw-r--r--third_party/python/gyp/test/win/command-quote/bat with spaces.bat7
-rw-r--r--third_party/python/gyp/test/win/command-quote/command-quote.gyp79
-rw-r--r--third_party/python/gyp/test/win/command-quote/go.bat7
-rw-r--r--third_party/python/gyp/test/win/command-quote/subdir/and/another/in-subdir.gyp27
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/additional-include-dirs.cc10
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/additional-include-dirs.gyp20
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/additional-options.cc10
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/additional-options.gyp31
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/analysis.gyp40
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/buffer-security-check.gyp51
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/buffer-security.cc12
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/calling-convention-cdecl.def6
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/calling-convention-fastcall.def6
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/calling-convention-stdcall.def6
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/calling-convention-vectorcall.def6
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/calling-convention.cc6
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/calling-convention.gyp66
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/character-set-mbcs.cc11
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/character-set-unicode.cc15
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/character-set.gyp35
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/compile-as-managed.cc9
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/compile-as-managed.gyp29
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/compile-as-winrt.cc12
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/compile-as-winrt.gyp20
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/debug-format.gyp48
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/default-char-is-unsigned.cc15
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/default-char-is-unsigned.gyp20
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/disable-specific-warnings.cc9
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/disable-specific-warnings.gyp29
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/enable-enhanced-instruction-set.cc28
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/enable-enhanced-instruction-set.gyp68
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/exception-handling-on.cc24
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/exception-handling.gyp46
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/floating-point-model-fast.cc19
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/floating-point-model-precise.cc19
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/floating-point-model-strict.cc19
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/floating-point-model.gyp43
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/force-include-files-with-precompiled.cc10
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/force-include-files.cc8
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/force-include-files.gyp36
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/function-level-linking.cc11
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/function-level-linking.gyp28
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/hello.cc7
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/optimizations.gyp207
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/pdbname-override.gyp26
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/pdbname.cc7
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/pdbname.gyp24
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/precomp.cc6
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/rtti-on.cc11
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/rtti.gyp37
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/runtime-checks.cc11
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/runtime-checks.gyp29
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/runtime-library-md.cc19
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/runtime-library-mdd.cc19
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/runtime-library-mt.cc19
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/runtime-library-mtd.cc19
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/runtime-library.gyp48
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/spectre-mitigation.gyp44
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/subdir/header.h0
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/treat-wchar-t-as-built-in-type.gyp33
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/treat-wchar-t-as-built-in-type1.cc11
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/treat-wchar-t-as-built-in-type2.cc11
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/uninit.cc13
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/warning-as-error.cc9
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/warning-as-error.gyp37
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/warning-level.gyp115
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/warning-level1.cc8
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/warning-level2.cc14
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/warning-level3.cc11
-rw-r--r--third_party/python/gyp/test/win/compiler-flags/warning-level4.cc10
-rw-r--r--third_party/python/gyp/test/win/enable-winrt/dllmain.cc30
-rw-r--r--third_party/python/gyp/test/win/enable-winrt/enable-winrt.gyp39
-rw-r--r--third_party/python/gyp/test/win/generator-output-different-drive/gyptest-generator-output-different-drive.py44
-rw-r--r--third_party/python/gyp/test/win/generator-output-different-drive/prog.c10
-rw-r--r--third_party/python/gyp/test/win/generator-output-different-drive/prog.gyp15
-rw-r--r--third_party/python/gyp/test/win/gyptest-asm-files.py26
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-additional-include-dirs.py22
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-additional-options.py28
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-analysis.py30
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-buffer-security-check.py53
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-calling-convention.py22
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-character-set.py22
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-compile-as-managed.py24
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-compile-as-winrt.py20
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-debug-format.py43
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-default-char-is-unsigned.py22
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-disable-specific-warnings.py32
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-enable-enhanced-instruction-set.py49
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-exception-handling.py33
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-floating-point-model.py22
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-force-include-files.py22
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-function-level-linking.py54
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-optimizations.py105
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-pdbname-override.py27
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-pdbname.py30
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-rtti.py30
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-runtime-checks.py30
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-runtime-library.py22
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-treat-wchar-t-as-built-in-type.py22
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-warning-as-error.py30
-rw-r--r--third_party/python/gyp/test/win/gyptest-cl-warning-level.py41
-rw-r--r--third_party/python/gyp/test/win/gyptest-command-quote.py42
-rw-r--r--third_party/python/gyp/test/win/gyptest-crosscompile-ar.py29
-rw-r--r--third_party/python/gyp/test/win/gyptest-lib-ltcg.py22
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-additional-deps.py22
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-additional-options.py22
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-aslr.py35
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-base-address.py62
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-debug-info.py26
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-default-libs.py22
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-deffile.py43
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-defrelink.py56
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-delay-load-dlls.py35
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-embed-manifest.py100
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-enable-uac.py104
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-enable-winrt-app-revision.py43
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-enable-winrt-target-platform-version.py47
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-enable-winrt.py37
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-entrypointsymbol.py24
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-fixed-base.py40
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-force-symbol-reference.py26
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-generate-manifest.py127
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-incremental.py37
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-large-address-aware.py35
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-large-pdb.py76
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-library-adjust.py21
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-library-directories.py35
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-ltcg.py44
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-mapfile.py44
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-nodefaultlib.py24
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-noimportlib.py30
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-nxcompat.py37
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-opt-icf.py41
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-opt-ref.py40
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-ordering.py103
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-outputfile.py28
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-pdb-no-output.py25
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-pdb-output.py33
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-pdb.py35
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-pgo.py75
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-profile.py37
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-restat-importlib.py47
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-safeseh.py46
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-shard.py30
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-stacksize.py62
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-subsystem.py38
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-target-machine.py28
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-tsaware.py33
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-uldi-depending-on-module.py24
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-uldi.py28
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-unsupported-manifest.py27
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-update-manifest.py104
-rw-r--r--third_party/python/gyp/test/win/gyptest-link-warnings-as-errors.py24
-rw-r--r--third_party/python/gyp/test/win/gyptest-long-command-line.py23
-rw-r--r--third_party/python/gyp/test/win/gyptest-macro-projectname.py24
-rw-r--r--third_party/python/gyp/test/win/gyptest-macro-targetext.py26
-rw-r--r--third_party/python/gyp/test/win/gyptest-macro-targetfilename.py37
-rw-r--r--third_party/python/gyp/test/win/gyptest-macro-targetname.py29
-rw-r--r--third_party/python/gyp/test/win/gyptest-macro-targetpath.py30
-rw-r--r--third_party/python/gyp/test/win/gyptest-macro-vcinstalldir.py24
-rw-r--r--third_party/python/gyp/test/win/gyptest-macros-containing-gyp.py21
-rw-r--r--third_party/python/gyp/test/win/gyptest-macros-in-inputs-and-outputs.py27
-rw-r--r--third_party/python/gyp/test/win/gyptest-midl-excluded.py22
-rw-r--r--third_party/python/gyp/test/win/gyptest-midl-includedirs.py21
-rw-r--r--third_party/python/gyp/test/win/gyptest-midl-rules.py28
-rw-r--r--third_party/python/gyp/test/win/gyptest-ml-safeseh.py22
-rw-r--r--third_party/python/gyp/test/win/gyptest-quoting-commands.py25
-rw-r--r--third_party/python/gyp/test/win/gyptest-rc-build.py29
-rw-r--r--third_party/python/gyp/test/win/gyptest-sys.py27
-rw-r--r--third_party/python/gyp/test/win/gyptest-system-include.py21
-rw-r--r--third_party/python/gyp/test/win/idl-excluded/bad.idl6
-rw-r--r--third_party/python/gyp/test/win/idl-excluded/copy-file.py11
-rw-r--r--third_party/python/gyp/test/win/idl-excluded/idl-excluded.gyp58
-rw-r--r--third_party/python/gyp/test/win/idl-excluded/program.cc7
-rw-r--r--third_party/python/gyp/test/win/idl-includedirs/hello.cc7
-rw-r--r--third_party/python/gyp/test/win/idl-includedirs/idl-includedirs.gyp26
-rw-r--r--third_party/python/gyp/test/win/idl-includedirs/subdir/bar.idl13
-rw-r--r--third_party/python/gyp/test/win/idl-includedirs/subdir/foo.idl14
-rw-r--r--third_party/python/gyp/test/win/idl-rules/Window.idl9
-rw-r--r--third_party/python/gyp/test/win/idl-rules/basic-idl.gyp67
-rw-r--r--third_party/python/gyp/test/win/idl-rules/history_indexer.idl17
-rw-r--r--third_party/python/gyp/test/win/idl-rules/history_indexer_user.cc15
-rw-r--r--third_party/python/gyp/test/win/idl-rules/idl_compiler.py17
-rw-r--r--third_party/python/gyp/test/win/importlib/dll_no_exports.cc9
-rw-r--r--third_party/python/gyp/test/win/importlib/has-exports.cc10
-rw-r--r--third_party/python/gyp/test/win/importlib/hello.cc9
-rw-r--r--third_party/python/gyp/test/win/importlib/importlib.gyp30
-rw-r--r--third_party/python/gyp/test/win/importlib/noimplib.gyp16
-rw-r--r--third_party/python/gyp/test/win/large-pdb/dllmain.cc9
-rw-r--r--third_party/python/gyp/test/win/large-pdb/large-pdb.gyp98
-rw-r--r--third_party/python/gyp/test/win/large-pdb/main.cc7
-rw-r--r--third_party/python/gyp/test/win/lib-crosscompile/answer.cc9
-rw-r--r--third_party/python/gyp/test/win/lib-crosscompile/answer.h5
-rw-r--r--third_party/python/gyp/test/win/lib-crosscompile/use_host_ar.gyp17
-rw-r--r--third_party/python/gyp/test/win/lib-flags/answer.cc9
-rw-r--r--third_party/python/gyp/test/win/lib-flags/answer.h5
-rw-r--r--third_party/python/gyp/test/win/lib-flags/ltcg.gyp21
-rw-r--r--third_party/python/gyp/test/win/linker-flags/a/x.cc7
-rw-r--r--third_party/python/gyp/test/win/linker-flags/a/z.cc7
-rw-r--r--third_party/python/gyp/test/win/linker-flags/additional-deps.cc10
-rw-r--r--third_party/python/gyp/test/win/linker-flags/additional-deps.gyp30
-rw-r--r--third_party/python/gyp/test/win/linker-flags/additional-options.gyp29
-rw-r--r--third_party/python/gyp/test/win/linker-flags/aslr.gyp35
-rw-r--r--third_party/python/gyp/test/win/linker-flags/b/y.cc7
-rw-r--r--third_party/python/gyp/test/win/linker-flags/base-address.gyp38
-rw-r--r--third_party/python/gyp/test/win/linker-flags/debug-info.gyp28
-rw-r--r--third_party/python/gyp/test/win/linker-flags/deffile-multiple.gyp17
-rw-r--r--third_party/python/gyp/test/win/linker-flags/deffile.cc13
-rw-r--r--third_party/python/gyp/test/win/linker-flags/deffile.def8
-rw-r--r--third_party/python/gyp/test/win/linker-flags/deffile.gyp38
-rw-r--r--third_party/python/gyp/test/win/linker-flags/delay-load-dlls.gyp35
-rw-r--r--third_party/python/gyp/test/win/linker-flags/delay-load.cc10
-rw-r--r--third_party/python/gyp/test/win/linker-flags/embed-manifest.gyp109
-rw-r--r--third_party/python/gyp/test/win/linker-flags/enable-uac.gyp45
-rw-r--r--third_party/python/gyp/test/win/linker-flags/entrypointsymbol.cc13
-rw-r--r--third_party/python/gyp/test/win/linker-flags/entrypointsymbol.gyp28
-rw-r--r--third_party/python/gyp/test/win/linker-flags/extra.manifest11
-rw-r--r--third_party/python/gyp/test/win/linker-flags/extra2.manifest11
-rw-r--r--third_party/python/gyp/test/win/linker-flags/fixed-base.gyp52
-rw-r--r--third_party/python/gyp/test/win/linker-flags/force-symbol-reference.gyp39
-rw-r--r--third_party/python/gyp/test/win/linker-flags/generate-manifest.gyp166
-rw-r--r--third_party/python/gyp/test/win/linker-flags/hello.cc7
-rw-r--r--third_party/python/gyp/test/win/linker-flags/incremental.gyp65
-rw-r--r--third_party/python/gyp/test/win/linker-flags/inline_test.cc12
-rw-r--r--third_party/python/gyp/test/win/linker-flags/inline_test.h5
-rw-r--r--third_party/python/gyp/test/win/linker-flags/inline_test_main.cc15
-rw-r--r--third_party/python/gyp/test/win/linker-flags/large-address-aware.gyp28
-rw-r--r--third_party/python/gyp/test/win/linker-flags/library-adjust.cc10
-rw-r--r--third_party/python/gyp/test/win/linker-flags/library-adjust.gyp16
-rw-r--r--third_party/python/gyp/test/win/linker-flags/library-directories-define.cc7
-rw-r--r--third_party/python/gyp/test/win/linker-flags/library-directories-reference.cc10
-rw-r--r--third_party/python/gyp/test/win/linker-flags/library-directories.gyp42
-rw-r--r--third_party/python/gyp/test/win/linker-flags/link-ordering.gyp95
-rw-r--r--third_party/python/gyp/test/win/linker-flags/link-warning.cc10
-rw-r--r--third_party/python/gyp/test/win/linker-flags/ltcg.gyp42
-rw-r--r--third_party/python/gyp/test/win/linker-flags/main-crt.c8
-rw-r--r--third_party/python/gyp/test/win/linker-flags/manifest-in-comment.cc13
-rw-r--r--third_party/python/gyp/test/win/linker-flags/mapfile.cc12
-rw-r--r--third_party/python/gyp/test/win/linker-flags/mapfile.gyp45
-rw-r--r--third_party/python/gyp/test/win/linker-flags/no-default-libs.cc18
-rw-r--r--third_party/python/gyp/test/win/linker-flags/no-default-libs.gyp13
-rw-r--r--third_party/python/gyp/test/win/linker-flags/nodefaultlib.cc13
-rw-r--r--third_party/python/gyp/test/win/linker-flags/nodefaultlib.gyp30
-rw-r--r--third_party/python/gyp/test/win/linker-flags/nxcompat.gyp35
-rw-r--r--third_party/python/gyp/test/win/linker-flags/opt-icf.cc29
-rw-r--r--third_party/python/gyp/test/win/linker-flags/opt-icf.gyp63
-rw-r--r--third_party/python/gyp/test/win/linker-flags/opt-ref.cc11
-rw-r--r--third_party/python/gyp/test/win/linker-flags/opt-ref.gyp56
-rw-r--r--third_party/python/gyp/test/win/linker-flags/outputfile.gyp58
-rw-r--r--third_party/python/gyp/test/win/linker-flags/pdb-output.gyp49
-rw-r--r--third_party/python/gyp/test/win/linker-flags/pgo.gyp143
-rw-r--r--third_party/python/gyp/test/win/linker-flags/profile.gyp50
-rw-r--r--third_party/python/gyp/test/win/linker-flags/program-database.gyp40
-rw-r--r--third_party/python/gyp/test/win/linker-flags/safeseh.gyp79
-rw-r--r--third_party/python/gyp/test/win/linker-flags/safeseh_hello.cc11
-rw-r--r--third_party/python/gyp/test/win/linker-flags/safeseh_zero.asm10
-rw-r--r--third_party/python/gyp/test/win/linker-flags/safeseh_zero64.asm9
-rw-r--r--third_party/python/gyp/test/win/linker-flags/stacksize.gyp44
-rw-r--r--third_party/python/gyp/test/win/linker-flags/subdir/library.gyp13
-rw-r--r--third_party/python/gyp/test/win/linker-flags/subsystem-windows.cc9
-rw-r--r--third_party/python/gyp/test/win/linker-flags/subsystem.gyp70
-rw-r--r--third_party/python/gyp/test/win/linker-flags/target-machine.gyp48
-rw-r--r--third_party/python/gyp/test/win/linker-flags/tsaware.gyp28
-rw-r--r--third_party/python/gyp/test/win/linker-flags/unsupported-manifest.gyp13
-rw-r--r--third_party/python/gyp/test/win/linker-flags/update_pgd.py35
-rw-r--r--third_party/python/gyp/test/win/linker-flags/warn-as-error.gyp33
-rw-r--r--third_party/python/gyp/test/win/linker-flags/x.cc7
-rw-r--r--third_party/python/gyp/test/win/linker-flags/y.cc7
-rw-r--r--third_party/python/gyp/test/win/linker-flags/z.cc7
-rw-r--r--third_party/python/gyp/test/win/long-command-line/function.cc7
-rw-r--r--third_party/python/gyp/test/win/long-command-line/hello.cc7
-rw-r--r--third_party/python/gyp/test/win/long-command-line/long-command-line.gyp54
-rw-r--r--third_party/python/gyp/test/win/ml-safeseh/a.asm10
-rw-r--r--third_party/python/gyp/test/win/ml-safeseh/hello.cc11
-rw-r--r--third_party/python/gyp/test/win/ml-safeseh/ml-safeseh.gyp24
-rw-r--r--third_party/python/gyp/test/win/precompiled/gyptest-all.py21
-rw-r--r--third_party/python/gyp/test/win/precompiled/hello.c14
-rw-r--r--third_party/python/gyp/test/win/precompiled/hello.gyp28
-rw-r--r--third_party/python/gyp/test/win/precompiled/hello2.c13
-rw-r--r--third_party/python/gyp/test/win/precompiled/precomp.c8
-rw-r--r--third_party/python/gyp/test/win/rc-build/Resource.h26
-rw-r--r--third_party/python/gyp/test/win/rc-build/hello.cpp30
-rw-r--r--third_party/python/gyp/test/win/rc-build/hello.gyp92
-rw-r--r--third_party/python/gyp/test/win/rc-build/hello.h3
-rw-r--r--third_party/python/gyp/test/win/rc-build/hello.icobin0 -> 23558 bytes
-rw-r--r--third_party/python/gyp/test/win/rc-build/hello.rc86
-rw-r--r--third_party/python/gyp/test/win/rc-build/hello3.rc87
-rw-r--r--third_party/python/gyp/test/win/rc-build/small.icobin0 -> 23558 bytes
-rw-r--r--third_party/python/gyp/test/win/rc-build/subdir/hello2.rc87
-rw-r--r--third_party/python/gyp/test/win/rc-build/subdir/include.h1
-rw-r--r--third_party/python/gyp/test/win/rc-build/targetver.h24
-rw-r--r--third_party/python/gyp/test/win/shard/hello.cc7
-rw-r--r--third_party/python/gyp/test/win/shard/hello1.cc7
-rw-r--r--third_party/python/gyp/test/win/shard/hello2.cc7
-rw-r--r--third_party/python/gyp/test/win/shard/hello3.cc7
-rw-r--r--third_party/python/gyp/test/win/shard/hello4.cc7
-rw-r--r--third_party/python/gyp/test/win/shard/shard.gyp31
-rw-r--r--third_party/python/gyp/test/win/shard/shard_ref.gyp41
-rw-r--r--third_party/python/gyp/test/win/system-include/bar/header.h0
-rw-r--r--third_party/python/gyp/test/win/system-include/common/commonheader.h0
-rw-r--r--third_party/python/gyp/test/win/system-include/foo/header.h0
-rw-r--r--third_party/python/gyp/test/win/system-include/main.cc4
-rw-r--r--third_party/python/gyp/test/win/system-include/test.gyp26
-rw-r--r--third_party/python/gyp/test/win/uldi/a.cc7
-rw-r--r--third_party/python/gyp/test/win/uldi/b.cc7
-rw-r--r--third_party/python/gyp/test/win/uldi/dll.cc6
-rw-r--r--third_party/python/gyp/test/win/uldi/exe.cc7
-rw-r--r--third_party/python/gyp/test/win/uldi/main.cc10
-rw-r--r--third_party/python/gyp/test/win/uldi/uldi-depending-on-module.gyp42
-rw-r--r--third_party/python/gyp/test/win/uldi/uldi.gyp45
-rw-r--r--third_party/python/gyp/test/win/vs-macros/as.py20
-rw-r--r--third_party/python/gyp/test/win/vs-macros/containing-gyp.gyp39
-rw-r--r--third_party/python/gyp/test/win/vs-macros/do_stuff.py8
-rw-r--r--third_party/python/gyp/test/win/vs-macros/hello.cc7
-rw-r--r--third_party/python/gyp/test/win/vs-macros/input-output-macros.gyp32
-rw-r--r--third_party/python/gyp/test/win/vs-macros/input.S0
-rw-r--r--third_party/python/gyp/test/win/vs-macros/projectname.gyp29
-rw-r--r--third_party/python/gyp/test/win/vs-macros/stuff.blah1
-rw-r--r--third_party/python/gyp/test/win/vs-macros/targetext.gyp59
-rw-r--r--third_party/python/gyp/test/win/vs-macros/targetfilename.gyp59
-rw-r--r--third_party/python/gyp/test/win/vs-macros/targetname.gyp52
-rw-r--r--third_party/python/gyp/test/win/vs-macros/targetpath.gyp59
-rw-r--r--third_party/python/gyp/test/win/vs-macros/test_exists.py10
-rw-r--r--third_party/python/gyp/test/win/vs-macros/vcinstalldir.gyp41
-rw-r--r--third_party/python/gyp/test/win/win-driver-target-type/win-driver-target-type.c10
-rw-r--r--third_party/python/gyp/test/win/win-driver-target-type/win-driver-target-type.gyp32
-rw-r--r--third_party/python/gyp/test/win/win-driver-target-type/win-driver-target-type.h13
-rw-r--r--third_party/python/gyp/test/win/win-driver-target-type/win-driver-target-type.rc14
-rw-r--r--third_party/python/gyp/test/win/win-tool/copies_readonly_files.gyp29
-rw-r--r--third_party/python/gyp/test/win/win-tool/gyptest-win-tool-handles-readonly-files.py55
-rw-r--r--third_party/python/gyp/test/win/winrt-app-type-revision/dllmain.cc30
-rw-r--r--third_party/python/gyp/test/win/winrt-app-type-revision/winrt-app-type-revison.gyp43
-rw-r--r--third_party/python/gyp/test/win/winrt-target-platform-version/dllmain.cc30
-rw-r--r--third_party/python/gyp/test/win/winrt-target-platform-version/winrt-target-platform-version.gyp49
-rw-r--r--third_party/python/gyp/test/xcode-ninja/list_excluded/gyptest-all.py49
-rw-r--r--third_party/python/gyp/test/xcode-ninja/list_excluded/hello.cpp7
-rw-r--r--third_party/python/gyp/test/xcode-ninja/list_excluded/hello_exclude.gyp19
-rw-r--r--third_party/python/gyp/test/xcode-ninja/list_excluded/hello_excluded.cpp7
-rw-r--r--third_party/python/gyp/tools/README15
-rw-r--r--third_party/python/gyp/tools/Xcode/README5
-rw-r--r--third_party/python/gyp/tools/Xcode/Specifications/gyp.pbfilespec27
-rw-r--r--third_party/python/gyp/tools/Xcode/Specifications/gyp.xclangspec226
-rw-r--r--third_party/python/gyp/tools/emacs/README12
-rw-r--r--third_party/python/gyp/tools/emacs/gyp-tests.el63
-rw-r--r--third_party/python/gyp/tools/emacs/gyp.el275
-rwxr-xr-xthird_party/python/gyp/tools/emacs/run-unit-tests.sh7
-rw-r--r--third_party/python/gyp/tools/emacs/testdata/media.gyp1105
-rw-r--r--third_party/python/gyp/tools/emacs/testdata/media.gyp.fontified1107
-rwxr-xr-xthird_party/python/gyp/tools/graphviz.py102
-rwxr-xr-xthird_party/python/gyp/tools/pretty_gyp.py156
-rwxr-xr-xthird_party/python/gyp/tools/pretty_sln.py171
-rwxr-xr-xthird_party/python/gyp/tools/pretty_vcproj.py337
-rw-r--r--third_party/python/idna/idna-2.10.dist-info/LICENSE.rst34
-rw-r--r--third_party/python/idna/idna-2.10.dist-info/METADATA243
-rw-r--r--third_party/python/idna/idna-2.10.dist-info/RECORD13
-rw-r--r--third_party/python/idna/idna-2.10.dist-info/WHEEL6
-rw-r--r--third_party/python/idna/idna-2.10.dist-info/top_level.txt1
-rw-r--r--third_party/python/idna/idna/__init__.py2
-rw-r--r--third_party/python/idna/idna/codec.py118
-rw-r--r--third_party/python/idna/idna/compat.py12
-rw-r--r--third_party/python/idna/idna/core.py400
-rw-r--r--third_party/python/idna/idna/idnadata.py2050
-rw-r--r--third_party/python/idna/idna/intranges.py53
-rw-r--r--third_party/python/idna/idna/package_data.py2
-rw-r--r--third_party/python/idna/idna/uts46data.py8357
-rw-r--r--third_party/python/importlib_metadata/importlib_metadata-6.0.0.dist-info/LICENSE202
-rw-r--r--third_party/python/importlib_metadata/importlib_metadata-6.0.0.dist-info/METADATA135
-rw-r--r--third_party/python/importlib_metadata/importlib_metadata-6.0.0.dist-info/RECORD15
-rw-r--r--third_party/python/importlib_metadata/importlib_metadata-6.0.0.dist-info/WHEEL5
-rw-r--r--third_party/python/importlib_metadata/importlib_metadata-6.0.0.dist-info/top_level.txt1
-rw-r--r--third_party/python/importlib_metadata/importlib_metadata/__init__.py904
-rw-r--r--third_party/python/importlib_metadata/importlib_metadata/_adapters.py90
-rw-r--r--third_party/python/importlib_metadata/importlib_metadata/_collections.py30
-rw-r--r--third_party/python/importlib_metadata/importlib_metadata/_compat.py72
-rw-r--r--third_party/python/importlib_metadata/importlib_metadata/_functools.py104
-rw-r--r--third_party/python/importlib_metadata/importlib_metadata/_itertools.py73
-rw-r--r--third_party/python/importlib_metadata/importlib_metadata/_meta.py49
-rw-r--r--third_party/python/importlib_metadata/importlib_metadata/_py39compat.py35
-rw-r--r--third_party/python/importlib_metadata/importlib_metadata/_text.py99
-rw-r--r--third_party/python/importlib_metadata/importlib_metadata/py.typed0
-rw-r--r--third_party/python/importlib_resources/importlib_resources-5.12.0.dist-info/LICENSE202
-rw-r--r--third_party/python/importlib_resources/importlib_resources-5.12.0.dist-info/METADATA104
-rw-r--r--third_party/python/importlib_resources/importlib_resources-5.12.0.dist-info/RECORD48
-rw-r--r--third_party/python/importlib_resources/importlib_resources-5.12.0.dist-info/WHEEL5
-rw-r--r--third_party/python/importlib_resources/importlib_resources-5.12.0.dist-info/top_level.txt1
-rw-r--r--third_party/python/importlib_resources/importlib_resources/__init__.py36
-rw-r--r--third_party/python/importlib_resources/importlib_resources/_adapters.py168
-rw-r--r--third_party/python/importlib_resources/importlib_resources/_common.py207
-rw-r--r--third_party/python/importlib_resources/importlib_resources/_compat.py109
-rw-r--r--third_party/python/importlib_resources/importlib_resources/_itertools.py38
-rw-r--r--third_party/python/importlib_resources/importlib_resources/_legacy.py120
-rw-r--r--third_party/python/importlib_resources/importlib_resources/abc.py170
-rw-r--r--third_party/python/importlib_resources/importlib_resources/py.typed0
-rw-r--r--third_party/python/importlib_resources/importlib_resources/readers.py144
-rw-r--r--third_party/python/importlib_resources/importlib_resources/simple.py106
-rw-r--r--third_party/python/jinxed/jinxed-1.2.0.dist-info/LICENSE373
-rw-r--r--third_party/python/jinxed/jinxed-1.2.0.dist-info/METADATA112
-rw-r--r--third_party/python/jinxed/jinxed-1.2.0.dist-info/RECORD18
-rw-r--r--third_party/python/jinxed/jinxed-1.2.0.dist-info/WHEEL6
-rw-r--r--third_party/python/jinxed/jinxed-1.2.0.dist-info/top_level.txt1
-rw-r--r--third_party/python/jinxed/jinxed/__init__.py39
-rw-r--r--third_party/python/jinxed/jinxed/_keys.py164
-rw-r--r--third_party/python/jinxed/jinxed/_terminal.py123
-rw-r--r--third_party/python/jinxed/jinxed/_tparm.py291
-rw-r--r--third_party/python/jinxed/jinxed/_util.py52
-rw-r--r--third_party/python/jinxed/jinxed/has_key.py158
-rw-r--r--third_party/python/jinxed/jinxed/terminfo/__init__.py87
-rw-r--r--third_party/python/jinxed/jinxed/terminfo/ansicon.py158
-rw-r--r--third_party/python/jinxed/jinxed/terminfo/vtwin10.py68
-rw-r--r--third_party/python/jinxed/jinxed/terminfo/xterm.py482
-rw-r--r--third_party/python/jinxed/jinxed/terminfo/xterm_256color.py28
-rw-r--r--third_party/python/jinxed/jinxed/terminfo/xterm_256colors.py28
-rw-r--r--third_party/python/jinxed/jinxed/win32.py352
-rw-r--r--third_party/python/jsmin/CHANGELOG.txt79
-rw-r--r--third_party/python/jsmin/LICENSE.txt23
-rw-r--r--third_party/python/jsmin/MANIFEST.in1
-rw-r--r--third_party/python/jsmin/PKG-INFO196
-rw-r--r--third_party/python/jsmin/README.rst95
-rw-r--r--third_party/python/jsmin/jsmin.egg-info/PKG-INFO196
-rw-r--r--third_party/python/jsmin/jsmin.egg-info/SOURCES.txt13
-rw-r--r--third_party/python/jsmin/jsmin.egg-info/dependency_links.txt1
-rw-r--r--third_party/python/jsmin/jsmin.egg-info/top_level.txt1
-rw-r--r--third_party/python/jsmin/jsmin/__init__.py252
-rw-r--r--third_party/python/jsmin/jsmin/__main__.py37
-rw-r--r--third_party/python/jsmin/jsmin/test.py644
-rw-r--r--third_party/python/jsmin/setup.cfg5
-rw-r--r--third_party/python/jsmin/setup.py36
-rw-r--r--third_party/python/json-e/MANIFEST.in3
-rw-r--r--third_party/python/json-e/PKG-INFO11
-rw-r--r--third_party/python/json-e/README.md730
-rw-r--r--third_party/python/json-e/json_e.egg-info/PKG-INFO11
-rw-r--r--third_party/python/json-e/json_e.egg-info/SOURCES.txt17
-rw-r--r--third_party/python/json-e/json_e.egg-info/dependency_links.txt1
-rw-r--r--third_party/python/json-e/json_e.egg-info/requires.txt3
-rw-r--r--third_party/python/json-e/json_e.egg-info/top_level.txt1
-rw-r--r--third_party/python/json-e/jsone/__init__.py21
-rw-r--r--third_party/python/json-e/jsone/builtins.py121
-rw-r--r--third_party/python/json-e/jsone/interpreter.py289
-rw-r--r--third_party/python/json-e/jsone/prattparser.py191
-rw-r--r--third_party/python/json-e/jsone/render.py354
-rw-r--r--third_party/python/json-e/jsone/shared.py131
-rw-r--r--third_party/python/json-e/jsone/six.py23
-rw-r--r--third_party/python/json-e/package.json35
-rw-r--r--third_party/python/json-e/setup.cfg8
-rw-r--r--third_party/python/json-e/setup.py31
-rw-r--r--third_party/python/jsonschema/jsonschema-4.17.3.dist-info/METADATA195
-rw-r--r--third_party/python/jsonschema/jsonschema-4.17.3.dist-info/RECORD52
-rw-r--r--third_party/python/jsonschema/jsonschema-4.17.3.dist-info/WHEEL4
-rw-r--r--third_party/python/jsonschema/jsonschema-4.17.3.dist-info/entry_points.txt2
-rw-r--r--third_party/python/jsonschema/jsonschema-4.17.3.dist-info/licenses/COPYING19
-rw-r--r--third_party/python/jsonschema/jsonschema/__init__.py71
-rw-r--r--third_party/python/jsonschema/jsonschema/__main__.py3
-rw-r--r--third_party/python/jsonschema/jsonschema/_format.py518
-rw-r--r--third_party/python/jsonschema/jsonschema/_legacy_validators.py319
-rw-r--r--third_party/python/jsonschema/jsonschema/_types.py203
-rw-r--r--third_party/python/jsonschema/jsonschema/_utils.py349
-rw-r--r--third_party/python/jsonschema/jsonschema/_validators.py476
-rw-r--r--third_party/python/jsonschema/jsonschema/benchmarks/__init__.py5
-rw-r--r--third_party/python/jsonschema/jsonschema/benchmarks/issue232.py25
-rw-r--r--third_party/python/jsonschema/jsonschema/benchmarks/issue232/issue.json2653
-rw-r--r--third_party/python/jsonschema/jsonschema/benchmarks/json_schema_test_suite.py12
-rw-r--r--third_party/python/jsonschema/jsonschema/cli.py299
-rw-r--r--third_party/python/jsonschema/jsonschema/exceptions.py396
-rw-r--r--third_party/python/jsonschema/jsonschema/protocols.py225
-rw-r--r--third_party/python/jsonschema/jsonschema/schemas/draft2019-09.json42
-rw-r--r--third_party/python/jsonschema/jsonschema/schemas/draft2020-12.json58
-rw-r--r--third_party/python/jsonschema/jsonschema/schemas/draft3.json172
-rw-r--r--third_party/python/jsonschema/jsonschema/schemas/draft4.json149
-rw-r--r--third_party/python/jsonschema/jsonschema/schemas/draft6.json153
-rw-r--r--third_party/python/jsonschema/jsonschema/schemas/draft7.json166
-rw-r--r--third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2019-09/applicator56
-rw-r--r--third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2019-09/content17
-rw-r--r--third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2019-09/core57
-rw-r--r--third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2019-09/meta-data37
-rw-r--r--third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2019-09/validation98
-rw-r--r--third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/applicator48
-rw-r--r--third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/content17
-rw-r--r--third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/core51
-rw-r--r--third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/format14
-rw-r--r--third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/format-annotation14
-rw-r--r--third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/format-assertion14
-rw-r--r--third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/meta-data37
-rw-r--r--third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/unevaluated15
-rw-r--r--third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/validation98
-rw-r--r--third_party/python/jsonschema/jsonschema/validators.py1161
-rw-r--r--third_party/python/looseversion/looseversion-1.0.1.dist-info/LICENSE48
-rw-r--r--third_party/python/looseversion/looseversion-1.0.1.dist-info/METADATA56
-rw-r--r--third_party/python/looseversion/looseversion-1.0.1.dist-info/RECORD6
-rw-r--r--third_party/python/looseversion/looseversion-1.0.1.dist-info/WHEEL5
-rw-r--r--third_party/python/looseversion/looseversion-1.0.1.dist-info/top_level.txt1
-rw-r--r--third_party/python/looseversion/looseversion.py204
-rw-r--r--third_party/python/mohawk/PKG-INFO19
-rw-r--r--third_party/python/mohawk/README.rst25
-rw-r--r--third_party/python/mohawk/mohawk.egg-info/PKG-INFO19
-rw-r--r--third_party/python/mohawk/mohawk.egg-info/SOURCES.txt15
-rw-r--r--third_party/python/mohawk/mohawk.egg-info/dependency_links.txt1
-rw-r--r--third_party/python/mohawk/mohawk.egg-info/requires.txt1
-rw-r--r--third_party/python/mohawk/mohawk.egg-info/top_level.txt1
-rw-r--r--third_party/python/mohawk/mohawk/__init__.py2
-rw-r--r--third_party/python/mohawk/mohawk/base.py230
-rw-r--r--third_party/python/mohawk/mohawk/bewit.py167
-rw-r--r--third_party/python/mohawk/mohawk/exc.py98
-rw-r--r--third_party/python/mohawk/mohawk/receiver.py170
-rw-r--r--third_party/python/mohawk/mohawk/sender.py178
-rw-r--r--third_party/python/mohawk/mohawk/tests.py823
-rw-r--r--third_party/python/mohawk/mohawk/util.py267
-rw-r--r--third_party/python/mohawk/setup.cfg5
-rw-r--r--third_party/python/mohawk/setup.py25
-rw-r--r--third_party/python/moz.build58
-rw-r--r--third_party/python/mozilla_repo_urls/mozilla_repo_urls-0.1.1.dist-info/METADATA16
-rw-r--r--third_party/python/mozilla_repo_urls/mozilla_repo_urls-0.1.1.dist-info/RECORD13
-rw-r--r--third_party/python/mozilla_repo_urls/mozilla_repo_urls-0.1.1.dist-info/WHEEL5
-rw-r--r--third_party/python/mozilla_repo_urls/mozilla_repo_urls-0.1.1.dist-info/top_level.txt2
-rw-r--r--third_party/python/mozilla_repo_urls/mozilla_repo_urls/__init__.py2
-rw-r--r--third_party/python/mozilla_repo_urls/mozilla_repo_urls/errors.py15
-rw-r--r--third_party/python/mozilla_repo_urls/mozilla_repo_urls/parser.py43
-rw-r--r--third_party/python/mozilla_repo_urls/mozilla_repo_urls/platforms/__init__.py5
-rw-r--r--third_party/python/mozilla_repo_urls/mozilla_repo_urls/platforms/hgmo.py55
-rw-r--r--third_party/python/mozilla_repo_urls/mozilla_repo_urls/result.py35
-rw-r--r--third_party/python/mozilla_repo_urls/test/__init__.py6
-rw-r--r--third_party/python/mozilla_repo_urls/test/test_integration.py493
-rw-r--r--third_party/python/mozilla_repo_urls/test/test_parser.py9
-rw-r--r--third_party/python/mozilla_version/mozilla_version-2.0.0.dist-info/LICENSE363
-rw-r--r--third_party/python/mozilla_version/mozilla_version-2.0.0.dist-info/METADATA12
-rw-r--r--third_party/python/mozilla_version/mozilla_version-2.0.0.dist-info/RECORD22
-rw-r--r--third_party/python/mozilla_version/mozilla_version-2.0.0.dist-info/WHEEL5
-rw-r--r--third_party/python/mozilla_version/mozilla_version-2.0.0.dist-info/top_level.txt1
-rw-r--r--third_party/python/mozilla_version/mozilla_version/__init__.py1
-rw-r--r--third_party/python/mozilla_version/mozilla_version/balrog.py142
-rw-r--r--third_party/python/mozilla_version/mozilla_version/errors.py75
-rw-r--r--third_party/python/mozilla_version/mozilla_version/fenix.py3
-rw-r--r--third_party/python/mozilla_version/mozilla_version/gecko.py672
-rw-r--r--third_party/python/mozilla_version/mozilla_version/maven.py65
-rw-r--r--third_party/python/mozilla_version/mozilla_version/mobile.py250
-rw-r--r--third_party/python/mozilla_version/mozilla_version/parser.py48
-rw-r--r--third_party/python/mozilla_version/mozilla_version/version.py236
-rw-r--r--third_party/python/multidict/CHANGES.rst255
-rw-r--r--third_party/python/multidict/LICENSE201
-rw-r--r--third_party/python/multidict/MANIFEST.in14
-rw-r--r--third_party/python/multidict/Makefile108
-rw-r--r--third_party/python/multidict/PKG-INFO128
-rw-r--r--third_party/python/multidict/README.rst103
-rw-r--r--third_party/python/multidict/multidict.egg-info/PKG-INFO128
-rw-r--r--third_party/python/multidict/multidict.egg-info/SOURCES.txt71
-rw-r--r--third_party/python/multidict/multidict.egg-info/dependency_links.txt1
-rw-r--r--third_party/python/multidict/multidict.egg-info/top_level.txt1
-rw-r--r--third_party/python/multidict/multidict/__init__.py48
-rw-r--r--third_party/python/multidict/multidict/__init__.pyi152
-rw-r--r--third_party/python/multidict/multidict/_abc.py48
-rw-r--r--third_party/python/multidict/multidict/_compat.py14
-rw-r--r--third_party/python/multidict/multidict/_multidict.c1646
-rw-r--r--third_party/python/multidict/multidict/_multidict_base.py144
-rw-r--r--third_party/python/multidict/multidict/_multidict_py.py515
-rw-r--r--third_party/python/multidict/multidict/_multilib/defs.h22
-rw-r--r--third_party/python/multidict/multidict/_multilib/dict.h24
-rw-r--r--third_party/python/multidict/multidict/_multilib/istr.h85
-rw-r--r--third_party/python/multidict/multidict/_multilib/iter.h238
-rw-r--r--third_party/python/multidict/multidict/_multilib/pair_list.h1244
-rw-r--r--third_party/python/multidict/multidict/_multilib/views.h464
-rw-r--r--third_party/python/multidict/multidict/py.typed1
-rw-r--r--third_party/python/multidict/pyproject.toml11
-rw-r--r--third_party/python/multidict/setup.cfg37
-rw-r--r--third_party/python/multidict/setup.py96
-rw-r--r--third_party/python/packaging/packaging-21.3.dist-info/LICENSE3
-rw-r--r--third_party/python/packaging/packaging-21.3.dist-info/LICENSE.APACHE177
-rw-r--r--third_party/python/packaging/packaging-21.3.dist-info/LICENSE.BSD23
-rw-r--r--third_party/python/packaging/packaging-21.3.dist-info/METADATA453
-rw-r--r--third_party/python/packaging/packaging-21.3.dist-info/RECORD19
-rw-r--r--third_party/python/packaging/packaging-21.3.dist-info/WHEEL5
-rw-r--r--third_party/python/packaging/packaging-21.3.dist-info/top_level.txt1
-rw-r--r--third_party/python/packaging/packaging/__about__.py26
-rw-r--r--third_party/python/packaging/packaging/__init__.py25
-rw-r--r--third_party/python/packaging/packaging/_manylinux.py301
-rw-r--r--third_party/python/packaging/packaging/_musllinux.py136
-rw-r--r--third_party/python/packaging/packaging/_structures.py61
-rw-r--r--third_party/python/packaging/packaging/markers.py304
-rw-r--r--third_party/python/packaging/packaging/py.typed0
-rw-r--r--third_party/python/packaging/packaging/requirements.py146
-rw-r--r--third_party/python/packaging/packaging/specifiers.py802
-rw-r--r--third_party/python/packaging/packaging/tags.py487
-rw-r--r--third_party/python/packaging/packaging/utils.py136
-rw-r--r--third_party/python/packaging/packaging/version.py504
-rw-r--r--third_party/python/pathspec/pathspec-0.9.0.dist-info/LICENSE373
-rw-r--r--third_party/python/pathspec/pathspec-0.9.0.dist-info/METADATA411
-rw-r--r--third_party/python/pathspec/pathspec-0.9.0.dist-info/RECORD17
-rw-r--r--third_party/python/pathspec/pathspec-0.9.0.dist-info/WHEEL6
-rw-r--r--third_party/python/pathspec/pathspec-0.9.0.dist-info/top_level.txt1
-rw-r--r--third_party/python/pathspec/pathspec/__init__.py43
-rw-r--r--third_party/python/pathspec/pathspec/_meta.py43
-rw-r--r--third_party/python/pathspec/pathspec/compat.py41
-rw-r--r--third_party/python/pathspec/pathspec/pathspec.py243
-rw-r--r--third_party/python/pathspec/pathspec/pattern.py164
-rw-r--r--third_party/python/pathspec/pathspec/patterns/__init__.py8
-rw-r--r--third_party/python/pathspec/pathspec/patterns/gitwildmatch.py400
-rw-r--r--third_party/python/pathspec/pathspec/util.py665
-rw-r--r--third_party/python/pip/pip-23.0.1.dist-info/LICENSE.txt20
-rw-r--r--third_party/python/pip/pip-23.0.1.dist-info/METADATA88
-rw-r--r--third_party/python/pip/pip-23.0.1.dist-info/RECORD506
-rw-r--r--third_party/python/pip/pip-23.0.1.dist-info/WHEEL5
-rw-r--r--third_party/python/pip/pip-23.0.1.dist-info/entry_points.txt4
-rw-r--r--third_party/python/pip/pip-23.0.1.dist-info/top_level.txt1
-rw-r--r--third_party/python/pip/pip/__init__.py13
-rw-r--r--third_party/python/pip/pip/__main__.py31
-rw-r--r--third_party/python/pip/pip/__pip-runner__.py50
-rw-r--r--third_party/python/pip/pip/_internal/__init__.py19
-rw-r--r--third_party/python/pip/pip/_internal/build_env.py311
-rw-r--r--third_party/python/pip/pip/_internal/cache.py293
-rw-r--r--third_party/python/pip/pip/_internal/cli/__init__.py4
-rw-r--r--third_party/python/pip/pip/_internal/cli/autocompletion.py171
-rw-r--r--third_party/python/pip/pip/_internal/cli/base_command.py216
-rw-r--r--third_party/python/pip/pip/_internal/cli/cmdoptions.py1055
-rw-r--r--third_party/python/pip/pip/_internal/cli/command_context.py27
-rw-r--r--third_party/python/pip/pip/_internal/cli/main.py70
-rw-r--r--third_party/python/pip/pip/_internal/cli/main_parser.py134
-rw-r--r--third_party/python/pip/pip/_internal/cli/parser.py294
-rw-r--r--third_party/python/pip/pip/_internal/cli/progress_bars.py68
-rw-r--r--third_party/python/pip/pip/_internal/cli/req_command.py502
-rw-r--r--third_party/python/pip/pip/_internal/cli/spinners.py159
-rw-r--r--third_party/python/pip/pip/_internal/cli/status_codes.py6
-rw-r--r--third_party/python/pip/pip/_internal/commands/__init__.py132
-rw-r--r--third_party/python/pip/pip/_internal/commands/cache.py223
-rw-r--r--third_party/python/pip/pip/_internal/commands/check.py53
-rw-r--r--third_party/python/pip/pip/_internal/commands/completion.py126
-rw-r--r--third_party/python/pip/pip/_internal/commands/configuration.py282
-rw-r--r--third_party/python/pip/pip/_internal/commands/debug.py199
-rw-r--r--third_party/python/pip/pip/_internal/commands/download.py149
-rw-r--r--third_party/python/pip/pip/_internal/commands/freeze.py97
-rw-r--r--third_party/python/pip/pip/_internal/commands/hash.py59
-rw-r--r--third_party/python/pip/pip/_internal/commands/help.py41
-rw-r--r--third_party/python/pip/pip/_internal/commands/index.py139
-rw-r--r--third_party/python/pip/pip/_internal/commands/inspect.py92
-rw-r--r--third_party/python/pip/pip/_internal/commands/install.py873
-rw-r--r--third_party/python/pip/pip/_internal/commands/list.py365
-rw-r--r--third_party/python/pip/pip/_internal/commands/search.py174
-rw-r--r--third_party/python/pip/pip/_internal/commands/show.py189
-rw-r--r--third_party/python/pip/pip/_internal/commands/uninstall.py113
-rw-r--r--third_party/python/pip/pip/_internal/commands/wheel.py203
-rw-r--r--third_party/python/pip/pip/_internal/configuration.py374
-rw-r--r--third_party/python/pip/pip/_internal/distributions/__init__.py21
-rw-r--r--third_party/python/pip/pip/_internal/distributions/base.py39
-rw-r--r--third_party/python/pip/pip/_internal/distributions/installed.py23
-rw-r--r--third_party/python/pip/pip/_internal/distributions/sdist.py150
-rw-r--r--third_party/python/pip/pip/_internal/distributions/wheel.py34
-rw-r--r--third_party/python/pip/pip/_internal/exceptions.py747
-rw-r--r--third_party/python/pip/pip/_internal/index/__init__.py2
-rw-r--r--third_party/python/pip/pip/_internal/index/collector.py505
-rw-r--r--third_party/python/pip/pip/_internal/index/package_finder.py1029
-rw-r--r--third_party/python/pip/pip/_internal/index/sources.py224
-rw-r--r--third_party/python/pip/pip/_internal/locations/__init__.py467
-rw-r--r--third_party/python/pip/pip/_internal/locations/_distutils.py173
-rw-r--r--third_party/python/pip/pip/_internal/locations/_sysconfig.py213
-rw-r--r--third_party/python/pip/pip/_internal/locations/base.py81
-rw-r--r--third_party/python/pip/pip/_internal/main.py12
-rw-r--r--third_party/python/pip/pip/_internal/metadata/__init__.py127
-rw-r--r--third_party/python/pip/pip/_internal/metadata/_json.py84
-rw-r--r--third_party/python/pip/pip/_internal/metadata/base.py688
-rw-r--r--third_party/python/pip/pip/_internal/metadata/importlib/__init__.py4
-rw-r--r--third_party/python/pip/pip/_internal/metadata/importlib/_compat.py55
-rw-r--r--third_party/python/pip/pip/_internal/metadata/importlib/_dists.py224
-rw-r--r--third_party/python/pip/pip/_internal/metadata/importlib/_envs.py188
-rw-r--r--third_party/python/pip/pip/_internal/metadata/pkg_resources.py270
-rw-r--r--third_party/python/pip/pip/_internal/models/__init__.py2
-rw-r--r--third_party/python/pip/pip/_internal/models/candidate.py34
-rw-r--r--third_party/python/pip/pip/_internal/models/direct_url.py228
-rw-r--r--third_party/python/pip/pip/_internal/models/format_control.py80
-rw-r--r--third_party/python/pip/pip/_internal/models/index.py28
-rw-r--r--third_party/python/pip/pip/_internal/models/installation_report.py53
-rw-r--r--third_party/python/pip/pip/_internal/models/link.py524
-rw-r--r--third_party/python/pip/pip/_internal/models/scheme.py31
-rw-r--r--third_party/python/pip/pip/_internal/models/search_scope.py133
-rw-r--r--third_party/python/pip/pip/_internal/models/selection_prefs.py51
-rw-r--r--third_party/python/pip/pip/_internal/models/target_python.py110
-rw-r--r--third_party/python/pip/pip/_internal/models/wheel.py92
-rw-r--r--third_party/python/pip/pip/_internal/network/__init__.py2
-rw-r--r--third_party/python/pip/pip/_internal/network/auth.py446
-rw-r--r--third_party/python/pip/pip/_internal/network/cache.py69
-rw-r--r--third_party/python/pip/pip/_internal/network/download.py186
-rw-r--r--third_party/python/pip/pip/_internal/network/lazy_wheel.py210
-rw-r--r--third_party/python/pip/pip/_internal/network/session.py518
-rw-r--r--third_party/python/pip/pip/_internal/network/utils.py96
-rw-r--r--third_party/python/pip/pip/_internal/network/xmlrpc.py60
-rw-r--r--third_party/python/pip/pip/_internal/operations/__init__.py0
-rw-r--r--third_party/python/pip/pip/_internal/operations/build/__init__.py0
-rw-r--r--third_party/python/pip/pip/_internal/operations/build/build_tracker.py124
-rw-r--r--third_party/python/pip/pip/_internal/operations/build/metadata.py39
-rw-r--r--third_party/python/pip/pip/_internal/operations/build/metadata_editable.py41
-rw-r--r--third_party/python/pip/pip/_internal/operations/build/metadata_legacy.py74
-rw-r--r--third_party/python/pip/pip/_internal/operations/build/wheel.py37
-rw-r--r--third_party/python/pip/pip/_internal/operations/build/wheel_editable.py46
-rw-r--r--third_party/python/pip/pip/_internal/operations/build/wheel_legacy.py102
-rw-r--r--third_party/python/pip/pip/_internal/operations/check.py149
-rw-r--r--third_party/python/pip/pip/_internal/operations/freeze.py254
-rw-r--r--third_party/python/pip/pip/_internal/operations/install/__init__.py2
-rw-r--r--third_party/python/pip/pip/_internal/operations/install/editable_legacy.py47
-rw-r--r--third_party/python/pip/pip/_internal/operations/install/legacy.py120
-rw-r--r--third_party/python/pip/pip/_internal/operations/install/wheel.py738
-rw-r--r--third_party/python/pip/pip/_internal/operations/prepare.py667
-rw-r--r--third_party/python/pip/pip/_internal/pyproject.py174
-rw-r--r--third_party/python/pip/pip/_internal/req/__init__.py94
-rw-r--r--third_party/python/pip/pip/_internal/req/constructors.py501
-rw-r--r--third_party/python/pip/pip/_internal/req/req_file.py544
-rw-r--r--third_party/python/pip/pip/_internal/req/req_install.py946
-rw-r--r--third_party/python/pip/pip/_internal/req/req_set.py82
-rw-r--r--third_party/python/pip/pip/_internal/req/req_uninstall.py640
-rw-r--r--third_party/python/pip/pip/_internal/resolution/__init__.py0
-rw-r--r--third_party/python/pip/pip/_internal/resolution/base.py20
-rw-r--r--third_party/python/pip/pip/_internal/resolution/legacy/__init__.py0
-rw-r--r--third_party/python/pip/pip/_internal/resolution/legacy/resolver.py600
-rw-r--r--third_party/python/pip/pip/_internal/resolution/resolvelib/__init__.py0
-rw-r--r--third_party/python/pip/pip/_internal/resolution/resolvelib/base.py141
-rw-r--r--third_party/python/pip/pip/_internal/resolution/resolvelib/candidates.py556
-rw-r--r--third_party/python/pip/pip/_internal/resolution/resolvelib/factory.py731
-rw-r--r--third_party/python/pip/pip/_internal/resolution/resolvelib/found_candidates.py155
-rw-r--r--third_party/python/pip/pip/_internal/resolution/resolvelib/provider.py248
-rw-r--r--third_party/python/pip/pip/_internal/resolution/resolvelib/reporter.py68
-rw-r--r--third_party/python/pip/pip/_internal/resolution/resolvelib/requirements.py166
-rw-r--r--third_party/python/pip/pip/_internal/resolution/resolvelib/resolver.py296
-rw-r--r--third_party/python/pip/pip/_internal/self_outdated_check.py242
-rw-r--r--third_party/python/pip/pip/_internal/utils/__init__.py0
-rw-r--r--third_party/python/pip/pip/_internal/utils/_log.py38
-rw-r--r--third_party/python/pip/pip/_internal/utils/appdirs.py52
-rw-r--r--third_party/python/pip/pip/_internal/utils/compat.py63
-rw-r--r--third_party/python/pip/pip/_internal/utils/compatibility_tags.py165
-rw-r--r--third_party/python/pip/pip/_internal/utils/datetime.py11
-rw-r--r--third_party/python/pip/pip/_internal/utils/deprecation.py188
-rw-r--r--third_party/python/pip/pip/_internal/utils/direct_url_helpers.py87
-rw-r--r--third_party/python/pip/pip/_internal/utils/distutils_args.py43
-rw-r--r--third_party/python/pip/pip/_internal/utils/egg_link.py72
-rw-r--r--third_party/python/pip/pip/_internal/utils/encoding.py36
-rw-r--r--third_party/python/pip/pip/_internal/utils/entrypoints.py84
-rw-r--r--third_party/python/pip/pip/_internal/utils/filesystem.py153
-rw-r--r--third_party/python/pip/pip/_internal/utils/filetypes.py27
-rw-r--r--third_party/python/pip/pip/_internal/utils/glibc.py88
-rw-r--r--third_party/python/pip/pip/_internal/utils/hashes.py144
-rw-r--r--third_party/python/pip/pip/_internal/utils/inject_securetransport.py35
-rw-r--r--third_party/python/pip/pip/_internal/utils/logging.py348
-rw-r--r--third_party/python/pip/pip/_internal/utils/misc.py739
-rw-r--r--third_party/python/pip/pip/_internal/utils/models.py39
-rw-r--r--third_party/python/pip/pip/_internal/utils/packaging.py57
-rw-r--r--third_party/python/pip/pip/_internal/utils/setuptools_build.py195
-rw-r--r--third_party/python/pip/pip/_internal/utils/subprocess.py260
-rw-r--r--third_party/python/pip/pip/_internal/utils/temp_dir.py246
-rw-r--r--third_party/python/pip/pip/_internal/utils/unpacking.py257
-rw-r--r--third_party/python/pip/pip/_internal/utils/urls.py62
-rw-r--r--third_party/python/pip/pip/_internal/utils/virtualenv.py104
-rw-r--r--third_party/python/pip/pip/_internal/utils/wheel.py136
-rw-r--r--third_party/python/pip/pip/_internal/vcs/__init__.py15
-rw-r--r--third_party/python/pip/pip/_internal/vcs/bazaar.py112
-rw-r--r--third_party/python/pip/pip/_internal/vcs/git.py526
-rw-r--r--third_party/python/pip/pip/_internal/vcs/mercurial.py163
-rw-r--r--third_party/python/pip/pip/_internal/vcs/subversion.py324
-rw-r--r--third_party/python/pip/pip/_internal/vcs/versioncontrol.py705
-rw-r--r--third_party/python/pip/pip/_internal/wheel_builder.py382
-rw-r--r--third_party/python/pip/pip/_vendor/__init__.py120
-rw-r--r--third_party/python/pip/pip/_vendor/cachecontrol/__init__.py18
-rw-r--r--third_party/python/pip/pip/_vendor/cachecontrol/_cmd.py61
-rw-r--r--third_party/python/pip/pip/_vendor/cachecontrol/adapter.py137
-rw-r--r--third_party/python/pip/pip/_vendor/cachecontrol/cache.py65
-rw-r--r--third_party/python/pip/pip/_vendor/cachecontrol/caches/__init__.py9
-rw-r--r--third_party/python/pip/pip/_vendor/cachecontrol/caches/file_cache.py188
-rw-r--r--third_party/python/pip/pip/_vendor/cachecontrol/caches/redis_cache.py39
-rw-r--r--third_party/python/pip/pip/_vendor/cachecontrol/compat.py32
-rw-r--r--third_party/python/pip/pip/_vendor/cachecontrol/controller.py439
-rw-r--r--third_party/python/pip/pip/_vendor/cachecontrol/filewrapper.py111
-rw-r--r--third_party/python/pip/pip/_vendor/cachecontrol/heuristics.py139
-rw-r--r--third_party/python/pip/pip/_vendor/cachecontrol/serialize.py190
-rw-r--r--third_party/python/pip/pip/_vendor/cachecontrol/wrapper.py33
-rw-r--r--third_party/python/pip/pip/_vendor/certifi/__init__.py4
-rw-r--r--third_party/python/pip/pip/_vendor/certifi/__main__.py12
-rw-r--r--third_party/python/pip/pip/_vendor/certifi/cacert.pem4527
-rw-r--r--third_party/python/pip/pip/_vendor/certifi/core.py108
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/__init__.py115
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/big5freq.py386
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/big5prober.py47
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/chardistribution.py261
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/charsetgroupprober.py106
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/charsetprober.py147
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/cli/__init__.py0
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/cli/chardetect.py112
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/codingstatemachine.py90
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/codingstatemachinedict.py19
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/cp949prober.py49
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/enums.py85
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/escprober.py102
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/escsm.py261
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/eucjpprober.py102
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/euckrfreq.py196
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/euckrprober.py47
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/euctwfreq.py388
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/euctwprober.py47
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/gb2312freq.py284
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/gb2312prober.py47
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/hebrewprober.py316
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/jisfreq.py325
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/johabfreq.py2382
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/johabprober.py47
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/jpcntx.py238
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/langbulgarianmodel.py4649
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/langgreekmodel.py4397
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/langhebrewmodel.py4380
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/langhungarianmodel.py4649
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/langrussianmodel.py5725
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/langthaimodel.py4380
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/langturkishmodel.py4380
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/latin1prober.py147
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/macromanprober.py162
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/mbcharsetprober.py95
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/mbcsgroupprober.py57
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/mbcssm.py661
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/metadata/__init__.py0
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/metadata/languages.py352
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/resultdict.py16
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/sbcharsetprober.py162
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/sbcsgroupprober.py88
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/sjisprober.py105
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/universaldetector.py362
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/utf1632prober.py225
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/utf8prober.py82
-rw-r--r--third_party/python/pip/pip/_vendor/chardet/version.py9
-rw-r--r--third_party/python/pip/pip/_vendor/colorama/__init__.py7
-rw-r--r--third_party/python/pip/pip/_vendor/colorama/ansi.py102
-rw-r--r--third_party/python/pip/pip/_vendor/colorama/ansitowin32.py277
-rw-r--r--third_party/python/pip/pip/_vendor/colorama/initialise.py121
-rw-r--r--third_party/python/pip/pip/_vendor/colorama/tests/__init__.py1
-rw-r--r--third_party/python/pip/pip/_vendor/colorama/tests/ansi_test.py76
-rw-r--r--third_party/python/pip/pip/_vendor/colorama/tests/ansitowin32_test.py294
-rw-r--r--third_party/python/pip/pip/_vendor/colorama/tests/initialise_test.py189
-rw-r--r--third_party/python/pip/pip/_vendor/colorama/tests/isatty_test.py57
-rw-r--r--third_party/python/pip/pip/_vendor/colorama/tests/utils.py49
-rw-r--r--third_party/python/pip/pip/_vendor/colorama/tests/winterm_test.py131
-rw-r--r--third_party/python/pip/pip/_vendor/colorama/win32.py180
-rw-r--r--third_party/python/pip/pip/_vendor/colorama/winterm.py195
-rw-r--r--third_party/python/pip/pip/_vendor/distlib/__init__.py23
-rw-r--r--third_party/python/pip/pip/_vendor/distlib/compat.py1116
-rw-r--r--third_party/python/pip/pip/_vendor/distlib/database.py1350
-rw-r--r--third_party/python/pip/pip/_vendor/distlib/index.py508
-rw-r--r--third_party/python/pip/pip/_vendor/distlib/locators.py1300
-rw-r--r--third_party/python/pip/pip/_vendor/distlib/manifest.py393
-rw-r--r--third_party/python/pip/pip/_vendor/distlib/markers.py152
-rw-r--r--third_party/python/pip/pip/_vendor/distlib/metadata.py1076
-rw-r--r--third_party/python/pip/pip/_vendor/distlib/resources.py358
-rw-r--r--third_party/python/pip/pip/_vendor/distlib/scripts.py437
-rw-r--r--third_party/python/pip/pip/_vendor/distlib/t32.exebin0 -> 97792 bytes
-rw-r--r--third_party/python/pip/pip/_vendor/distlib/t64-arm.exebin0 -> 182784 bytes
-rw-r--r--third_party/python/pip/pip/_vendor/distlib/t64.exebin0 -> 108032 bytes
-rw-r--r--third_party/python/pip/pip/_vendor/distlib/util.py1932
-rw-r--r--third_party/python/pip/pip/_vendor/distlib/version.py739
-rw-r--r--third_party/python/pip/pip/_vendor/distlib/w32.exebin0 -> 91648 bytes
-rw-r--r--third_party/python/pip/pip/_vendor/distlib/w64-arm.exebin0 -> 168448 bytes
-rw-r--r--third_party/python/pip/pip/_vendor/distlib/w64.exebin0 -> 101888 bytes
-rw-r--r--third_party/python/pip/pip/_vendor/distlib/wheel.py1082
-rw-r--r--third_party/python/pip/pip/_vendor/distro/__init__.py54
-rw-r--r--third_party/python/pip/pip/_vendor/distro/__main__.py4
-rw-r--r--third_party/python/pip/pip/_vendor/distro/distro.py1399
-rw-r--r--third_party/python/pip/pip/_vendor/idna/__init__.py44
-rw-r--r--third_party/python/pip/pip/_vendor/idna/codec.py112
-rw-r--r--third_party/python/pip/pip/_vendor/idna/compat.py13
-rw-r--r--third_party/python/pip/pip/_vendor/idna/core.py400
-rw-r--r--third_party/python/pip/pip/_vendor/idna/idnadata.py2151
-rw-r--r--third_party/python/pip/pip/_vendor/idna/intranges.py54
-rw-r--r--third_party/python/pip/pip/_vendor/idna/package_data.py2
-rw-r--r--third_party/python/pip/pip/_vendor/idna/uts46data.py8600
-rw-r--r--third_party/python/pip/pip/_vendor/msgpack/__init__.py57
-rw-r--r--third_party/python/pip/pip/_vendor/msgpack/exceptions.py48
-rw-r--r--third_party/python/pip/pip/_vendor/msgpack/ext.py193
-rw-r--r--third_party/python/pip/pip/_vendor/msgpack/fallback.py1010
-rw-r--r--third_party/python/pip/pip/_vendor/packaging/__about__.py26
-rw-r--r--third_party/python/pip/pip/_vendor/packaging/__init__.py25
-rw-r--r--third_party/python/pip/pip/_vendor/packaging/_manylinux.py301
-rw-r--r--third_party/python/pip/pip/_vendor/packaging/_musllinux.py136
-rw-r--r--third_party/python/pip/pip/_vendor/packaging/_structures.py61
-rw-r--r--third_party/python/pip/pip/_vendor/packaging/markers.py304
-rw-r--r--third_party/python/pip/pip/_vendor/packaging/requirements.py146
-rw-r--r--third_party/python/pip/pip/_vendor/packaging/specifiers.py802
-rw-r--r--third_party/python/pip/pip/_vendor/packaging/tags.py487
-rw-r--r--third_party/python/pip/pip/_vendor/packaging/utils.py136
-rw-r--r--third_party/python/pip/pip/_vendor/packaging/version.py504
-rw-r--r--third_party/python/pip/pip/_vendor/pkg_resources/__init__.py3296
-rw-r--r--third_party/python/pip/pip/_vendor/pkg_resources/py31compat.py23
-rw-r--r--third_party/python/pip/pip/_vendor/platformdirs/__init__.py342
-rw-r--r--third_party/python/pip/pip/_vendor/platformdirs/__main__.py46
-rw-r--r--third_party/python/pip/pip/_vendor/platformdirs/android.py120
-rw-r--r--third_party/python/pip/pip/_vendor/platformdirs/api.py156
-rw-r--r--third_party/python/pip/pip/_vendor/platformdirs/macos.py64
-rw-r--r--third_party/python/pip/pip/_vendor/platformdirs/unix.py181
-rw-r--r--third_party/python/pip/pip/_vendor/platformdirs/version.py4
-rw-r--r--third_party/python/pip/pip/_vendor/platformdirs/windows.py184
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/__init__.py82
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/__main__.py17
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/cmdline.py668
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/console.py70
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/filter.py71
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/filters/__init__.py940
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/formatter.py94
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/formatters/__init__.py143
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/formatters/_mapping.py23
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/formatters/bbcode.py108
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/formatters/groff.py170
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/formatters/html.py989
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/formatters/img.py645
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/formatters/irc.py179
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/formatters/latex.py521
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/formatters/other.py161
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/formatters/pangomarkup.py83
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/formatters/rtf.py146
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/formatters/svg.py188
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/formatters/terminal.py127
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/formatters/terminal256.py338
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/lexer.py882
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/lexers/__init__.py335
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/lexers/_mapping.py541
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/lexers/python.py1204
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/modeline.py43
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/plugin.py88
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/regexopt.py91
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/scanner.py104
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/sphinxext.py155
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/style.py197
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/styles/__init__.py97
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/token.py213
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/unistring.py153
-rw-r--r--third_party/python/pip/pip/_vendor/pygments/util.py308
-rw-r--r--third_party/python/pip/pip/_vendor/pyparsing/__init__.py331
-rw-r--r--third_party/python/pip/pip/_vendor/pyparsing/actions.py207
-rw-r--r--third_party/python/pip/pip/_vendor/pyparsing/common.py424
-rw-r--r--third_party/python/pip/pip/_vendor/pyparsing/core.py5814
-rw-r--r--third_party/python/pip/pip/_vendor/pyparsing/diagram/__init__.py642
-rw-r--r--third_party/python/pip/pip/_vendor/pyparsing/exceptions.py267
-rw-r--r--third_party/python/pip/pip/_vendor/pyparsing/helpers.py1088
-rw-r--r--third_party/python/pip/pip/_vendor/pyparsing/results.py760
-rw-r--r--third_party/python/pip/pip/_vendor/pyparsing/testing.py331
-rw-r--r--third_party/python/pip/pip/_vendor/pyparsing/unicode.py352
-rw-r--r--third_party/python/pip/pip/_vendor/pyparsing/util.py235
-rw-r--r--third_party/python/pip/pip/_vendor/pyproject_hooks/__init__.py23
-rw-r--r--third_party/python/pip/pip/_vendor/pyproject_hooks/_compat.py8
-rw-r--r--third_party/python/pip/pip/_vendor/pyproject_hooks/_impl.py330
-rw-r--r--third_party/python/pip/pip/_vendor/pyproject_hooks/_in_process/__init__.py18
-rw-r--r--third_party/python/pip/pip/_vendor/pyproject_hooks/_in_process/_in_process.py353
-rw-r--r--third_party/python/pip/pip/_vendor/requests/__init__.py182
-rw-r--r--third_party/python/pip/pip/_vendor/requests/__version__.py14
-rw-r--r--third_party/python/pip/pip/_vendor/requests/_internal_utils.py48
-rw-r--r--third_party/python/pip/pip/_vendor/requests/adapters.py584
-rw-r--r--third_party/python/pip/pip/_vendor/requests/api.py157
-rw-r--r--third_party/python/pip/pip/_vendor/requests/auth.py315
-rw-r--r--third_party/python/pip/pip/_vendor/requests/certs.py24
-rw-r--r--third_party/python/pip/pip/_vendor/requests/compat.py67
-rw-r--r--third_party/python/pip/pip/_vendor/requests/cookies.py561
-rw-r--r--third_party/python/pip/pip/_vendor/requests/exceptions.py141
-rw-r--r--third_party/python/pip/pip/_vendor/requests/help.py131
-rw-r--r--third_party/python/pip/pip/_vendor/requests/hooks.py33
-rw-r--r--third_party/python/pip/pip/_vendor/requests/models.py1034
-rw-r--r--third_party/python/pip/pip/_vendor/requests/packages.py16
-rw-r--r--third_party/python/pip/pip/_vendor/requests/sessions.py831
-rw-r--r--third_party/python/pip/pip/_vendor/requests/status_codes.py128
-rw-r--r--third_party/python/pip/pip/_vendor/requests/structures.py99
-rw-r--r--third_party/python/pip/pip/_vendor/requests/utils.py1086
-rw-r--r--third_party/python/pip/pip/_vendor/resolvelib/__init__.py26
-rw-r--r--third_party/python/pip/pip/_vendor/resolvelib/compat/__init__.py0
-rw-r--r--third_party/python/pip/pip/_vendor/resolvelib/compat/collections_abc.py6
-rw-r--r--third_party/python/pip/pip/_vendor/resolvelib/providers.py133
-rw-r--r--third_party/python/pip/pip/_vendor/resolvelib/reporters.py43
-rw-r--r--third_party/python/pip/pip/_vendor/resolvelib/resolvers.py482
-rw-r--r--third_party/python/pip/pip/_vendor/resolvelib/structs.py165
-rw-r--r--third_party/python/pip/pip/_vendor/rich/__init__.py177
-rw-r--r--third_party/python/pip/pip/_vendor/rich/__main__.py274
-rw-r--r--third_party/python/pip/pip/_vendor/rich/_cell_widths.py451
-rw-r--r--third_party/python/pip/pip/_vendor/rich/_emoji_codes.py3610
-rw-r--r--third_party/python/pip/pip/_vendor/rich/_emoji_replace.py32
-rw-r--r--third_party/python/pip/pip/_vendor/rich/_export_format.py78
-rw-r--r--third_party/python/pip/pip/_vendor/rich/_extension.py10
-rw-r--r--third_party/python/pip/pip/_vendor/rich/_inspect.py270
-rw-r--r--third_party/python/pip/pip/_vendor/rich/_log_render.py94
-rw-r--r--third_party/python/pip/pip/_vendor/rich/_loop.py43
-rw-r--r--third_party/python/pip/pip/_vendor/rich/_null_file.py83
-rw-r--r--third_party/python/pip/pip/_vendor/rich/_palettes.py309
-rw-r--r--third_party/python/pip/pip/_vendor/rich/_pick.py17
-rw-r--r--third_party/python/pip/pip/_vendor/rich/_ratio.py160
-rw-r--r--third_party/python/pip/pip/_vendor/rich/_spinners.py482
-rw-r--r--third_party/python/pip/pip/_vendor/rich/_stack.py16
-rw-r--r--third_party/python/pip/pip/_vendor/rich/_timer.py19
-rw-r--r--third_party/python/pip/pip/_vendor/rich/_win32_console.py662
-rw-r--r--third_party/python/pip/pip/_vendor/rich/_windows.py72
-rw-r--r--third_party/python/pip/pip/_vendor/rich/_windows_renderer.py56
-rw-r--r--third_party/python/pip/pip/_vendor/rich/_wrap.py56
-rw-r--r--third_party/python/pip/pip/_vendor/rich/abc.py33
-rw-r--r--third_party/python/pip/pip/_vendor/rich/align.py311
-rw-r--r--third_party/python/pip/pip/_vendor/rich/ansi.py237
-rw-r--r--third_party/python/pip/pip/_vendor/rich/bar.py94
-rw-r--r--third_party/python/pip/pip/_vendor/rich/box.py517
-rw-r--r--third_party/python/pip/pip/_vendor/rich/cells.py154
-rw-r--r--third_party/python/pip/pip/_vendor/rich/color.py618
-rw-r--r--third_party/python/pip/pip/_vendor/rich/color_triplet.py38
-rw-r--r--third_party/python/pip/pip/_vendor/rich/columns.py187
-rw-r--r--third_party/python/pip/pip/_vendor/rich/console.py2612
-rw-r--r--third_party/python/pip/pip/_vendor/rich/constrain.py37
-rw-r--r--third_party/python/pip/pip/_vendor/rich/containers.py167
-rw-r--r--third_party/python/pip/pip/_vendor/rich/control.py225
-rw-r--r--third_party/python/pip/pip/_vendor/rich/default_styles.py188
-rw-r--r--third_party/python/pip/pip/_vendor/rich/diagnose.py37
-rw-r--r--third_party/python/pip/pip/_vendor/rich/emoji.py96
-rw-r--r--third_party/python/pip/pip/_vendor/rich/errors.py34
-rw-r--r--third_party/python/pip/pip/_vendor/rich/file_proxy.py54
-rw-r--r--third_party/python/pip/pip/_vendor/rich/filesize.py89
-rw-r--r--third_party/python/pip/pip/_vendor/rich/highlighter.py232
-rw-r--r--third_party/python/pip/pip/_vendor/rich/json.py140
-rw-r--r--third_party/python/pip/pip/_vendor/rich/jupyter.py101
-rw-r--r--third_party/python/pip/pip/_vendor/rich/layout.py443
-rw-r--r--third_party/python/pip/pip/_vendor/rich/live.py373
-rw-r--r--third_party/python/pip/pip/_vendor/rich/live_render.py113
-rw-r--r--third_party/python/pip/pip/_vendor/rich/logging.py289
-rw-r--r--third_party/python/pip/pip/_vendor/rich/markup.py246
-rw-r--r--third_party/python/pip/pip/_vendor/rich/measure.py151
-rw-r--r--third_party/python/pip/pip/_vendor/rich/padding.py141
-rw-r--r--third_party/python/pip/pip/_vendor/rich/pager.py34
-rw-r--r--third_party/python/pip/pip/_vendor/rich/palette.py100
-rw-r--r--third_party/python/pip/pip/_vendor/rich/panel.py308
-rw-r--r--third_party/python/pip/pip/_vendor/rich/pretty.py1029
-rw-r--r--third_party/python/pip/pip/_vendor/rich/progress.py1707
-rw-r--r--third_party/python/pip/pip/_vendor/rich/progress_bar.py224
-rw-r--r--third_party/python/pip/pip/_vendor/rich/prompt.py376
-rw-r--r--third_party/python/pip/pip/_vendor/rich/protocol.py42
-rw-r--r--third_party/python/pip/pip/_vendor/rich/region.py10
-rw-r--r--third_party/python/pip/pip/_vendor/rich/repr.py149
-rw-r--r--third_party/python/pip/pip/_vendor/rich/rule.py134
-rw-r--r--third_party/python/pip/pip/_vendor/rich/scope.py86
-rw-r--r--third_party/python/pip/pip/_vendor/rich/screen.py54
-rw-r--r--third_party/python/pip/pip/_vendor/rich/segment.py739
-rw-r--r--third_party/python/pip/pip/_vendor/rich/spinner.py136
-rw-r--r--third_party/python/pip/pip/_vendor/rich/status.py132
-rw-r--r--third_party/python/pip/pip/_vendor/rich/style.py773
-rw-r--r--third_party/python/pip/pip/_vendor/rich/styled.py42
-rw-r--r--third_party/python/pip/pip/_vendor/rich/syntax.py945
-rw-r--r--third_party/python/pip/pip/_vendor/rich/table.py1002
-rw-r--r--third_party/python/pip/pip/_vendor/rich/terminal_theme.py153
-rw-r--r--third_party/python/pip/pip/_vendor/rich/text.py1311
-rw-r--r--third_party/python/pip/pip/_vendor/rich/theme.py112
-rw-r--r--third_party/python/pip/pip/_vendor/rich/themes.py5
-rw-r--r--third_party/python/pip/pip/_vendor/rich/traceback.py677
-rw-r--r--third_party/python/pip/pip/_vendor/rich/tree.py251
-rw-r--r--third_party/python/pip/pip/_vendor/six.py998
-rw-r--r--third_party/python/pip/pip/_vendor/tenacity/__init__.py519
-rw-r--r--third_party/python/pip/pip/_vendor/tenacity/_asyncio.py92
-rw-r--r--third_party/python/pip/pip/_vendor/tenacity/_utils.py68
-rw-r--r--third_party/python/pip/pip/_vendor/tenacity/after.py46
-rw-r--r--third_party/python/pip/pip/_vendor/tenacity/before.py41
-rw-r--r--third_party/python/pip/pip/_vendor/tenacity/before_sleep.py58
-rw-r--r--third_party/python/pip/pip/_vendor/tenacity/nap.py43
-rw-r--r--third_party/python/pip/pip/_vendor/tenacity/retry.py240
-rw-r--r--third_party/python/pip/pip/_vendor/tenacity/stop.py96
-rw-r--r--third_party/python/pip/pip/_vendor/tenacity/tornadoweb.py59
-rw-r--r--third_party/python/pip/pip/_vendor/tenacity/wait.py232
-rw-r--r--third_party/python/pip/pip/_vendor/tomli/__init__.py11
-rw-r--r--third_party/python/pip/pip/_vendor/tomli/_parser.py691
-rw-r--r--third_party/python/pip/pip/_vendor/tomli/_re.py107
-rw-r--r--third_party/python/pip/pip/_vendor/tomli/_types.py10
-rw-r--r--third_party/python/pip/pip/_vendor/typing_extensions.py2209
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/__init__.py102
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/_collections.py337
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/_version.py2
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/connection.py567
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/connectionpool.py1110
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/contrib/__init__.py0
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/contrib/_appengine_environ.py36
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/contrib/_securetransport/__init__.py0
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/contrib/_securetransport/bindings.py519
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/contrib/_securetransport/low_level.py397
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/contrib/appengine.py314
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/contrib/ntlmpool.py130
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/contrib/pyopenssl.py518
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/contrib/securetransport.py921
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/contrib/socks.py216
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/exceptions.py323
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/fields.py274
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/filepost.py98
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/packages/__init__.py0
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/packages/backports/__init__.py0
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/packages/backports/makefile.py51
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/packages/six.py1076
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/poolmanager.py537
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/request.py170
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/response.py879
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/util/__init__.py49
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/util/connection.py149
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/util/proxy.py57
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/util/queue.py22
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/util/request.py137
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/util/response.py107
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/util/retry.py620
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/util/ssl_.py495
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/util/ssl_match_hostname.py159
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/util/ssltransport.py221
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/util/timeout.py268
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/util/url.py435
-rw-r--r--third_party/python/pip/pip/_vendor/urllib3/util/wait.py152
-rw-r--r--third_party/python/pip/pip/_vendor/vendor.txt23
-rw-r--r--third_party/python/pip/pip/_vendor/webencodings/__init__.py342
-rw-r--r--third_party/python/pip/pip/_vendor/webencodings/labels.py231
-rw-r--r--third_party/python/pip/pip/_vendor/webencodings/mklabels.py59
-rw-r--r--third_party/python/pip/pip/_vendor/webencodings/tests.py153
-rw-r--r--third_party/python/pip/pip/_vendor/webencodings/x_user_defined.py325
-rw-r--r--third_party/python/pip/pip/py.typed4
-rw-r--r--third_party/python/pip_tools/pip_tools-5.5.0.dist-info/LICENSE26
-rw-r--r--third_party/python/pip_tools/pip_tools-5.5.0.dist-info/METADATA535
-rw-r--r--third_party/python/pip_tools/pip_tools-5.5.0.dist-info/RECORD28
-rw-r--r--third_party/python/pip_tools/pip_tools-5.5.0.dist-info/WHEEL6
-rw-r--r--third_party/python/pip_tools/pip_tools-5.5.0.dist-info/entry_points.txt4
-rw-r--r--third_party/python/pip_tools/pip_tools-5.5.0.dist-info/top_level.txt1
-rw-r--r--third_party/python/pip_tools/piptools/__init__.py11
-rw-r--r--third_party/python/pip_tools/piptools/__main__.py17
-rw-r--r--third_party/python/pip_tools/piptools/_compat/__init__.py26
-rw-r--r--third_party/python/pip_tools/piptools/_compat/contextlib.py18
-rw-r--r--third_party/python/pip_tools/piptools/_compat/pip_compat.py18
-rw-r--r--third_party/python/pip_tools/piptools/_compat/tempfile.py88
-rw-r--r--third_party/python/pip_tools/piptools/cache.py173
-rw-r--r--third_party/python/pip_tools/piptools/click.py6
-rw-r--r--third_party/python/pip_tools/piptools/exceptions.py66
-rw-r--r--third_party/python/pip_tools/piptools/locations.py25
-rw-r--r--third_party/python/pip_tools/piptools/logging.py62
-rw-r--r--third_party/python/pip_tools/piptools/repositories/__init__.py3
-rw-r--r--third_party/python/pip_tools/piptools/repositories/base.py57
-rw-r--r--third_party/python/pip_tools/piptools/repositories/local.py97
-rw-r--r--third_party/python/pip_tools/piptools/repositories/pypi.py531
-rw-r--r--third_party/python/pip_tools/piptools/resolver.py405
-rw-r--r--third_party/python/pip_tools/piptools/scripts/__init__.py0
-rw-r--r--third_party/python/pip_tools/piptools/scripts/compile.py495
-rw-r--r--third_party/python/pip_tools/piptools/scripts/sync.py214
-rw-r--r--third_party/python/pip_tools/piptools/sync.py216
-rw-r--r--third_party/python/pip_tools/piptools/utils.py384
-rw-r--r--third_party/python/pip_tools/piptools/writer.py243
-rw-r--r--third_party/python/pkgutil_resolve_name/pkgutil_resolve_name-1.3.10.dist-info/LICENSE75
-rw-r--r--third_party/python/pkgutil_resolve_name/pkgutil_resolve_name-1.3.10.dist-info/METADATA19
-rw-r--r--third_party/python/pkgutil_resolve_name/pkgutil_resolve_name-1.3.10.dist-info/RECORD5
-rw-r--r--third_party/python/pkgutil_resolve_name/pkgutil_resolve_name-1.3.10.dist-info/WHEEL4
-rw-r--r--third_party/python/pkgutil_resolve_name/pkgutil_resolve_name.py112
-rw-r--r--third_party/python/ply/ANNOUNCE40
-rw-r--r--third_party/python/ply/CHANGES1394
-rw-r--r--third_party/python/ply/MANIFEST.in8
-rw-r--r--third_party/python/ply/PKG-INFO22
-rw-r--r--third_party/python/ply/README.md273
-rw-r--r--third_party/python/ply/TODO16
-rw-r--r--third_party/python/ply/example/BASIC/README79
-rw-r--r--third_party/python/ply/example/BASIC/basic.py65
-rw-r--r--third_party/python/ply/example/BASIC/basiclex.py61
-rw-r--r--third_party/python/ply/example/BASIC/basiclog.py73
-rw-r--r--third_party/python/ply/example/BASIC/basinterp.py496
-rw-r--r--third_party/python/ply/example/BASIC/basparse.py474
-rw-r--r--third_party/python/ply/example/BASIC/dim.bas14
-rw-r--r--third_party/python/ply/example/BASIC/func.bas5
-rw-r--r--third_party/python/ply/example/BASIC/gcd.bas22
-rw-r--r--third_party/python/ply/example/BASIC/gosub.bas13
-rw-r--r--third_party/python/ply/example/BASIC/hello.bas4
-rw-r--r--third_party/python/ply/example/BASIC/linear.bas17
-rw-r--r--third_party/python/ply/example/BASIC/maxsin.bas12
-rw-r--r--third_party/python/ply/example/BASIC/powers.bas13
-rw-r--r--third_party/python/ply/example/BASIC/rand.bas4
-rw-r--r--third_party/python/ply/example/BASIC/sales.bas20
-rw-r--r--third_party/python/ply/example/BASIC/sears.bas18
-rw-r--r--third_party/python/ply/example/BASIC/sqrt1.bas5
-rw-r--r--third_party/python/ply/example/BASIC/sqrt2.bas4
-rw-r--r--third_party/python/ply/example/GardenSnake/GardenSnake.py777
-rw-r--r--third_party/python/ply/example/GardenSnake/README5
-rw-r--r--third_party/python/ply/example/README10
-rw-r--r--third_party/python/ply/example/ansic/README2
-rw-r--r--third_party/python/ply/example/ansic/clex.py168
-rw-r--r--third_party/python/ply/example/ansic/cparse.py1048
-rw-r--r--third_party/python/ply/example/calc/calc.py123
-rw-r--r--third_party/python/ply/example/calcdebug/calc.py129
-rw-r--r--third_party/python/ply/example/calceof/calc.py132
-rwxr-xr-xthird_party/python/ply/example/classcalc/calc.py165
-rwxr-xr-xthird_party/python/ply/example/cleanup.sh2
-rw-r--r--third_party/python/ply/example/closurecalc/calc.py132
-rw-r--r--third_party/python/ply/example/hedit/hedit.py48
-rwxr-xr-xthird_party/python/ply/example/newclasscalc/calc.py167
-rw-r--r--third_party/python/ply/example/optcalc/README9
-rw-r--r--third_party/python/ply/example/optcalc/calc.py134
-rw-r--r--third_party/python/ply/example/unicalc/calc.py133
-rw-r--r--third_party/python/ply/example/yply/README41
-rw-r--r--third_party/python/ply/example/yply/ylex.py119
-rw-r--r--third_party/python/ply/example/yply/yparse.py244
-rwxr-xr-xthird_party/python/ply/example/yply/yply.py51
-rw-r--r--third_party/python/ply/ply.egg-info/PKG-INFO22
-rw-r--r--third_party/python/ply/ply.egg-info/SOURCES.txt172
-rw-r--r--third_party/python/ply/ply.egg-info/dependency_links.txt1
-rw-r--r--third_party/python/ply/ply.egg-info/top_level.txt1
-rw-r--r--third_party/python/ply/ply/__init__.py5
-rw-r--r--third_party/python/ply/ply/cpp.py918
-rw-r--r--third_party/python/ply/ply/ctokens.py133
-rw-r--r--third_party/python/ply/ply/lex.py1100
-rw-r--r--third_party/python/ply/ply/yacc.py3494
-rw-r--r--third_party/python/ply/ply/ygen.py74
-rw-r--r--third_party/python/ply/setup.cfg11
-rw-r--r--third_party/python/ply/setup.py31
-rw-r--r--third_party/python/poetry.lock1282
-rw-r--r--third_party/python/pyasn1/pyasn1-0.4.8.dist-info/LICENSE.rst24
-rw-r--r--third_party/python/pyasn1/pyasn1-0.4.8.dist-info/METADATA38
-rw-r--r--third_party/python/pyasn1/pyasn1-0.4.8.dist-info/RECORD42
-rw-r--r--third_party/python/pyasn1/pyasn1-0.4.8.dist-info/WHEEL6
-rw-r--r--third_party/python/pyasn1/pyasn1-0.4.8.dist-info/top_level.txt1
-rw-r--r--third_party/python/pyasn1/pyasn1-0.4.8.dist-info/zip-safe1
-rw-r--r--third_party/python/pyasn1/pyasn1/__init__.py7
-rw-r--r--third_party/python/pyasn1/pyasn1/codec/__init__.py1
-rw-r--r--third_party/python/pyasn1/pyasn1/codec/ber/__init__.py1
-rw-r--r--third_party/python/pyasn1/pyasn1/codec/ber/decoder.py1682
-rw-r--r--third_party/python/pyasn1/pyasn1/codec/ber/encoder.py890
-rw-r--r--third_party/python/pyasn1/pyasn1/codec/ber/eoo.py28
-rw-r--r--third_party/python/pyasn1/pyasn1/codec/cer/__init__.py1
-rw-r--r--third_party/python/pyasn1/pyasn1/codec/cer/decoder.py114
-rw-r--r--third_party/python/pyasn1/pyasn1/codec/cer/encoder.py313
-rw-r--r--third_party/python/pyasn1/pyasn1/codec/der/__init__.py1
-rw-r--r--third_party/python/pyasn1/pyasn1/codec/der/decoder.py94
-rw-r--r--third_party/python/pyasn1/pyasn1/codec/der/encoder.py107
-rw-r--r--third_party/python/pyasn1/pyasn1/codec/native/__init__.py1
-rw-r--r--third_party/python/pyasn1/pyasn1/codec/native/decoder.py213
-rw-r--r--third_party/python/pyasn1/pyasn1/codec/native/encoder.py256
-rw-r--r--third_party/python/pyasn1/pyasn1/compat/__init__.py1
-rw-r--r--third_party/python/pyasn1/pyasn1/compat/binary.py33
-rw-r--r--third_party/python/pyasn1/pyasn1/compat/calling.py20
-rw-r--r--third_party/python/pyasn1/pyasn1/compat/dateandtime.py22
-rw-r--r--third_party/python/pyasn1/pyasn1/compat/integer.py110
-rw-r--r--third_party/python/pyasn1/pyasn1/compat/octets.py46
-rw-r--r--third_party/python/pyasn1/pyasn1/compat/string.py26
-rw-r--r--third_party/python/pyasn1/pyasn1/debug.py157
-rw-r--r--third_party/python/pyasn1/pyasn1/error.py75
-rw-r--r--third_party/python/pyasn1/pyasn1/type/__init__.py1
-rw-r--r--third_party/python/pyasn1/pyasn1/type/base.py707
-rw-r--r--third_party/python/pyasn1/pyasn1/type/char.py335
-rw-r--r--third_party/python/pyasn1/pyasn1/type/constraint.py756
-rw-r--r--third_party/python/pyasn1/pyasn1/type/error.py11
-rw-r--r--third_party/python/pyasn1/pyasn1/type/namedtype.py561
-rw-r--r--third_party/python/pyasn1/pyasn1/type/namedval.py192
-rw-r--r--third_party/python/pyasn1/pyasn1/type/opentype.py104
-rw-r--r--third_party/python/pyasn1/pyasn1/type/tag.py335
-rw-r--r--third_party/python/pyasn1/pyasn1/type/tagmap.py96
-rw-r--r--third_party/python/pyasn1/pyasn1/type/univ.py3321
-rw-r--r--third_party/python/pyasn1/pyasn1/type/useful.py191
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/LICENSE.txt24
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/METADATA42
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/RECORD113
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/WHEEL6
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/top_level.txt1
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/zip-safe1
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/__init__.py2
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/pem.py65
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc1155.py96
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc1157.py126
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc1901.py22
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc1902.py129
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc1905.py135
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc2251.py563
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc2314.py48
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc2315.py294
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc2437.py69
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc2459.py1339
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc2511.py258
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc2560.py225
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc2631.py37
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc2634.py336
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc2985.py588
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc2986.py75
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc3114.py77
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc3161.py142
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc3274.py59
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc3279.py260
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc3280.py1543
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc3281.py331
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc3412.py53
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc3414.py28
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc3447.py45
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc3560.py74
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc3565.py57
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc3709.py207
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc3770.py75
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc3779.py137
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc3852.py706
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc4043.py43
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc4055.py258
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc4073.py59
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc4108.py350
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc4210.py803
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc4211.py396
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc4334.py75
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc4985.py49
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc5035.py199
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc5083.py52
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc5084.py97
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc5208.py56
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc5280.py1658
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc5480.py190
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc5649.py33
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc5652.py761
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc5751.py124
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc5755.py398
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc5913.py44
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc5914.py119
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc5915.py32
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc5916.py35
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc5917.py55
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc5924.py19
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc5934.py786
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc5940.py59
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc5958.py98
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc5990.py237
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc6010.py88
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc6019.py45
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc6031.py469
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc6032.py68
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc6120.py43
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc6170.py17
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc6187.py22
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc6210.py42
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc6211.py72
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc6402-1.py627
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc6402.py628
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc6482.py74
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc6486.py68
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc6487.py22
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc6664.py147
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc6955.py108
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc6960.py223
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc7030.py66
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc7191.py261
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc7229.py29
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc7292.py357
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc7296.py32
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc7508.py90
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc7585.py50
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc7633.py38
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc7773.py52
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc7894-1.py92
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc7894.py92
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc7906.py736
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc7914.py49
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc8017.py153
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc8018.py260
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc8103.py36
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc8209.py20
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc8226.py149
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc8358.py50
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc8360.py44
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc8398.py52
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc8410.py43
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc8418.py36
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc8419.py68
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc8479.py45
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc8494.py80
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc8520.py63
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc8619.py45
-rw-r--r--third_party/python/pyasn1_modules/pyasn1_modules/rfc8649.py40
-rw-r--r--third_party/python/pylru/LICENSE.txt339
-rw-r--r--third_party/python/pylru/PKG-INFO263
-rw-r--r--third_party/python/pylru/README.txt245
-rw-r--r--third_party/python/pylru/pylru.py556
-rw-r--r--third_party/python/pylru/setup.py23
-rw-r--r--third_party/python/pylru/test.py238
-rw-r--r--third_party/python/pyparsing/pyparsing-2.4.7.dist-info/LICENSE18
-rw-r--r--third_party/python/pyparsing/pyparsing-2.4.7.dist-info/METADATA104
-rw-r--r--third_party/python/pyparsing/pyparsing-2.4.7.dist-info/RECORD6
-rw-r--r--third_party/python/pyparsing/pyparsing-2.4.7.dist-info/WHEEL6
-rw-r--r--third_party/python/pyparsing/pyparsing-2.4.7.dist-info/top_level.txt1
-rw-r--r--third_party/python/pyparsing/pyparsing.py7107
-rw-r--r--third_party/python/pyrsistent/CHANGES.txt333
-rw-r--r--third_party/python/pyrsistent/LICENCE.mit22
-rw-r--r--third_party/python/pyrsistent/MANIFEST.in5
-rw-r--r--third_party/python/pyrsistent/PKG-INFO742
-rw-r--r--third_party/python/pyrsistent/README725
-rw-r--r--third_party/python/pyrsistent/README.rst725
-rw-r--r--third_party/python/pyrsistent/_pyrsistent_version.py1
-rw-r--r--third_party/python/pyrsistent/pvectorcmodule.c1642
-rw-r--r--third_party/python/pyrsistent/pyrsistent.egg-info/PKG-INFO742
-rw-r--r--third_party/python/pyrsistent/pyrsistent.egg-info/SOURCES.txt53
-rw-r--r--third_party/python/pyrsistent/pyrsistent.egg-info/dependency_links.txt1
-rw-r--r--third_party/python/pyrsistent/pyrsistent.egg-info/requires.txt1
-rw-r--r--third_party/python/pyrsistent/pyrsistent.egg-info/top_level.txt3
-rw-r--r--third_party/python/pyrsistent/pyrsistent/__init__.py47
-rw-r--r--third_party/python/pyrsistent/pyrsistent/__init__.pyi213
-rw-r--r--third_party/python/pyrsistent/pyrsistent/_checked_types.py542
-rw-r--r--third_party/python/pyrsistent/pyrsistent/_compat.py31
-rw-r--r--third_party/python/pyrsistent/pyrsistent/_field_common.py330
-rw-r--r--third_party/python/pyrsistent/pyrsistent/_helpers.py82
-rw-r--r--third_party/python/pyrsistent/pyrsistent/_immutable.py105
-rw-r--r--third_party/python/pyrsistent/pyrsistent/_pbag.py267
-rw-r--r--third_party/python/pyrsistent/pyrsistent/_pclass.py264
-rw-r--r--third_party/python/pyrsistent/pyrsistent/_pdeque.py376
-rw-r--r--third_party/python/pyrsistent/pyrsistent/_plist.py313
-rw-r--r--third_party/python/pyrsistent/pyrsistent/_pmap.py460
-rw-r--r--third_party/python/pyrsistent/pyrsistent/_precord.py169
-rw-r--r--third_party/python/pyrsistent/pyrsistent/_pset.py229
-rw-r--r--third_party/python/pyrsistent/pyrsistent/_pvector.py713
-rw-r--r--third_party/python/pyrsistent/pyrsistent/_toolz.py83
-rw-r--r--third_party/python/pyrsistent/pyrsistent/_transformations.py143
-rw-r--r--third_party/python/pyrsistent/pyrsistent/py.typed0
-rw-r--r--third_party/python/pyrsistent/pyrsistent/typing.py80
-rw-r--r--third_party/python/pyrsistent/pyrsistent/typing.pyi292
-rw-r--r--third_party/python/pyrsistent/setup.cfg7
-rw-r--r--third_party/python/pyrsistent/setup.py81
-rw-r--r--third_party/python/python-hglib/LICENSE20
-rw-r--r--third_party/python/python-hglib/Makefile17
-rw-r--r--third_party/python/python-hglib/PKG-INFO26
-rw-r--r--third_party/python/python-hglib/README9
-rw-r--r--third_party/python/python-hglib/examples/stats.py35
-rw-r--r--third_party/python/python-hglib/hglib/__init__.py40
-rw-r--r--third_party/python/python-hglib/hglib/client.py1717
-rw-r--r--third_party/python/python-hglib/hglib/context.py238
-rw-r--r--third_party/python/python-hglib/hglib/error.py18
-rw-r--r--third_party/python/python-hglib/hglib/merge.py21
-rw-r--r--third_party/python/python-hglib/hglib/templates.py4
-rw-r--r--third_party/python/python-hglib/hglib/util.py217
-rw-r--r--third_party/python/python-hglib/setup.py54
-rw-r--r--third_party/python/python-hglib/test.py7
-rw-r--r--third_party/python/redo/redo-2.0.3.dist-info/AUTHORS7
-rw-r--r--third_party/python/redo/redo-2.0.3.dist-info/METADATA13
-rw-r--r--third_party/python/redo/redo-2.0.3.dist-info/RECORD8
-rw-r--r--third_party/python/redo/redo-2.0.3.dist-info/WHEEL6
-rw-r--r--third_party/python/redo/redo-2.0.3.dist-info/entry_points.txt3
-rw-r--r--third_party/python/redo/redo-2.0.3.dist-info/top_level.txt1
-rw-r--r--third_party/python/redo/redo/__init__.py265
-rw-r--r--third_party/python/redo/redo/cmd.py70
-rw-r--r--third_party/python/requests/requests-2.25.1.dist-info/LICENSE175
-rw-r--r--third_party/python/requests/requests-2.25.1.dist-info/METADATA103
-rw-r--r--third_party/python/requests/requests-2.25.1.dist-info/RECORD23
-rw-r--r--third_party/python/requests/requests-2.25.1.dist-info/WHEEL6
-rw-r--r--third_party/python/requests/requests-2.25.1.dist-info/top_level.txt1
-rw-r--r--third_party/python/requests/requests/__init__.py137
-rw-r--r--third_party/python/requests/requests/__version__.py14
-rw-r--r--third_party/python/requests/requests/_internal_utils.py42
-rw-r--r--third_party/python/requests/requests/adapters.py533
-rw-r--r--third_party/python/requests/requests/api.py161
-rw-r--r--third_party/python/requests/requests/auth.py305
-rw-r--r--third_party/python/requests/requests/certs.py18
-rw-r--r--third_party/python/requests/requests/compat.py72
-rw-r--r--third_party/python/requests/requests/cookies.py549
-rw-r--r--third_party/python/requests/requests/exceptions.py123
-rw-r--r--third_party/python/requests/requests/help.py119
-rw-r--r--third_party/python/requests/requests/hooks.py34
-rw-r--r--third_party/python/requests/requests/models.py956
-rw-r--r--third_party/python/requests/requests/packages.py14
-rw-r--r--third_party/python/requests/requests/sessions.py781
-rw-r--r--third_party/python/requests/requests/status_codes.py123
-rw-r--r--third_party/python/requests/requests/structures.py105
-rw-r--r--third_party/python/requests/requests/utils.py992
-rw-r--r--third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/AUTHORS11
-rw-r--r--third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/LICENSE202
-rw-r--r--third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/METADATA117
-rw-r--r--third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/RECORD11
-rw-r--r--third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/WHEEL6
-rw-r--r--third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/pbr.json1
-rw-r--r--third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/top_level.txt1
-rw-r--r--third_party/python/requests_unixsocket/requests_unixsocket/__init__.py77
-rw-r--r--third_party/python/requests_unixsocket/requests_unixsocket/adapters.py89
-rw-r--r--third_party/python/requests_unixsocket/requests_unixsocket/testutils.py97
-rw-r--r--third_party/python/requirements.in52
-rw-r--r--third_party/python/requirements.txt407
-rw-r--r--third_party/python/responses/responses-0.10.6.dist-info/LICENSE201
-rw-r--r--third_party/python/responses/responses-0.10.6.dist-info/METADATA454
-rw-r--r--third_party/python/responses/responses-0.10.6.dist-info/RECORD6
-rw-r--r--third_party/python/responses/responses-0.10.6.dist-info/WHEEL6
-rw-r--r--third_party/python/responses/responses-0.10.6.dist-info/top_level.txt1
-rw-r--r--third_party/python/responses/responses.py653
-rw-r--r--third_party/python/rsa/LICENSE13
-rw-r--r--third_party/python/rsa/MANIFEST.in5
-rw-r--r--third_party/python/rsa/PKG-INFO18
-rw-r--r--third_party/python/rsa/README.rst31
-rwxr-xr-xthird_party/python/rsa/create_timing_table.py29
-rwxr-xr-xthird_party/python/rsa/playstuff.py41
-rw-r--r--third_party/python/rsa/rsa.egg-info/PKG-INFO18
-rw-r--r--third_party/python/rsa/rsa.egg-info/SOURCES.txt46
-rw-r--r--third_party/python/rsa/rsa.egg-info/dependency_links.txt1
-rw-r--r--third_party/python/rsa/rsa.egg-info/entry_points.txt10
-rw-r--r--third_party/python/rsa/rsa.egg-info/requires.txt1
-rw-r--r--third_party/python/rsa/rsa.egg-info/top_level.txt1
-rw-r--r--third_party/python/rsa/rsa/__init__.py45
-rw-r--r--third_party/python/rsa/rsa/_compat.py160
-rw-r--r--third_party/python/rsa/rsa/_version133.py442
-rw-r--r--third_party/python/rsa/rsa/_version200.py529
-rw-r--r--third_party/python/rsa/rsa/asn1.py35
-rw-r--r--third_party/python/rsa/rsa/bigfile.py87
-rw-r--r--third_party/python/rsa/rsa/cli.py379
-rw-r--r--third_party/python/rsa/rsa/common.py185
-rw-r--r--third_party/python/rsa/rsa/core.py58
-rw-r--r--third_party/python/rsa/rsa/key.py612
-rw-r--r--third_party/python/rsa/rsa/parallel.py94
-rw-r--r--third_party/python/rsa/rsa/pem.py120
-rw-r--r--third_party/python/rsa/rsa/pkcs1.py391
-rw-r--r--third_party/python/rsa/rsa/prime.py166
-rw-r--r--third_party/python/rsa/rsa/randnum.py85
-rw-r--r--third_party/python/rsa/rsa/transform.py220
-rw-r--r--third_party/python/rsa/rsa/util.py81
-rw-r--r--third_party/python/rsa/rsa/varblock.py155
-rw-r--r--third_party/python/rsa/run_tests.py43
-rw-r--r--third_party/python/rsa/setup.cfg8
-rwxr-xr-xthird_party/python/rsa/setup.py41
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk-0.14.3.dist-info/LICENSE9
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk-0.14.3.dist-info/METADATA60
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk-0.14.3.dist-info/RECORD58
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk-0.14.3.dist-info/WHEEL6
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk-0.14.3.dist-info/top_level.txt1
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/__init__.py25
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/_compat.py92
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/_types.py37
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/api.py256
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/client.py406
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/consts.py97
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/debug.py44
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/envelope.py293
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/hub.py647
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/__init__.py183
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/_wsgi_common.py180
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/aiohttp.py211
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/argv.py33
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/asgi.py194
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/atexit.py62
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/aws_lambda.py254
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/beam.py184
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/bottle.py199
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/celery.py258
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/dedupe.py43
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/django/__init__.py484
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/django/asgi.py47
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/django/middleware.py136
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/django/templates.py121
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/django/transactions.py134
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/excepthook.py76
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/falcon.py209
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/flask.py260
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/gnu_backtrace.py107
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/logging.py237
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/modules.py56
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/pyramid.py217
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/redis.py70
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/rq.py150
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/sanic.py233
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/serverless.py87
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/spark/__init__.py4
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/spark/spark_driver.py263
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/spark/spark_worker.py120
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/sqlalchemy.py86
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/stdlib.py230
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/threading.py90
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/tornado.py203
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/trytond.py55
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/integrations/wsgi.py309
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/py.typed0
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/scope.py408
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/serializer.py336
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/sessions.py249
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/tracing.py498
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/transport.py365
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/utils.py831
-rw-r--r--third_party/python/sentry_sdk/sentry_sdk/worker.py142
-rw-r--r--third_party/python/setuptools/_distutils_hack/__init__.py123
-rw-r--r--third_party/python/setuptools/_distutils_hack/override.py1
-rw-r--r--third_party/python/setuptools/distutils-precedence.pth1
-rw-r--r--third_party/python/setuptools/easy_install.py5
-rw-r--r--third_party/python/setuptools/pkg_resources/__init__.py3285
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/__init__.py0
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/appdirs.py608
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/__about__.py27
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/__init__.py26
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/_compat.py38
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/_structures.py86
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/_typing.py48
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/markers.py328
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/requirements.py145
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/specifiers.py863
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/tags.py751
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/utils.py65
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/packaging/version.py535
-rw-r--r--third_party/python/setuptools/pkg_resources/_vendor/pyparsing.py5742
-rw-r--r--third_party/python/setuptools/pkg_resources/extern/__init__.py66
-rw-r--r--third_party/python/setuptools/setuptools-51.2.0.dist-info/LICENSE19
-rw-r--r--third_party/python/setuptools/setuptools-51.2.0.dist-info/METADATA110
-rw-r--r--third_party/python/setuptools/setuptools-51.2.0.dist-info/RECORD156
-rw-r--r--third_party/python/setuptools/setuptools-51.2.0.dist-info/WHEEL5
-rw-r--r--third_party/python/setuptools/setuptools-51.2.0.dist-info/dependency_links.txt2
-rw-r--r--third_party/python/setuptools/setuptools-51.2.0.dist-info/entry_points.txt68
-rw-r--r--third_party/python/setuptools/setuptools-51.2.0.dist-info/top_level.txt4
-rw-r--r--third_party/python/setuptools/setuptools/__init__.py241
-rw-r--r--third_party/python/setuptools/setuptools/_deprecation_warning.py7
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/__init__.py15
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/_msvccompiler.py561
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/archive_util.py256
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/bcppcompiler.py393
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/ccompiler.py1116
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/cmd.py403
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/__init__.py31
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/bdist.py143
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/bdist_dumb.py123
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/bdist_msi.py749
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/bdist_rpm.py579
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/bdist_wininst.py377
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/build.py157
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/build_clib.py209
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/build_ext.py755
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/build_py.py416
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/build_scripts.py160
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/check.py148
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/clean.py76
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/config.py344
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/install.py677
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/install_data.py79
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/install_egg_info.py77
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/install_headers.py47
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/install_lib.py217
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/install_scripts.py60
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/py37compat.py30
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/register.py304
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/sdist.py494
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/command/upload.py214
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/config.py130
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/core.py234
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/cygwinccompiler.py403
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/debug.py5
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/dep_util.py92
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/dir_util.py210
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/dist.py1257
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/errors.py97
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/extension.py240
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/fancy_getopt.py457
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/file_util.py238
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/filelist.py327
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/log.py77
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/msvc9compiler.py788
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/msvccompiler.py643
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/py35compat.py19
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/py38compat.py7
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/spawn.py125
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/sysconfig.py573
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/text_file.py286
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/unixccompiler.py328
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/util.py561
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/version.py347
-rw-r--r--third_party/python/setuptools/setuptools/_distutils/versionpredicate.py166
-rw-r--r--third_party/python/setuptools/setuptools/_imp.py82
-rw-r--r--third_party/python/setuptools/setuptools/_vendor/__init__.py0
-rw-r--r--third_party/python/setuptools/setuptools/_vendor/ordered_set.py488
-rw-r--r--third_party/python/setuptools/setuptools/_vendor/packaging/__about__.py27
-rw-r--r--third_party/python/setuptools/setuptools/_vendor/packaging/__init__.py26
-rw-r--r--third_party/python/setuptools/setuptools/_vendor/packaging/_compat.py38
-rw-r--r--third_party/python/setuptools/setuptools/_vendor/packaging/_structures.py86
-rw-r--r--third_party/python/setuptools/setuptools/_vendor/packaging/_typing.py48
-rw-r--r--third_party/python/setuptools/setuptools/_vendor/packaging/markers.py328
-rw-r--r--third_party/python/setuptools/setuptools/_vendor/packaging/requirements.py145
-rw-r--r--third_party/python/setuptools/setuptools/_vendor/packaging/specifiers.py863
-rw-r--r--third_party/python/setuptools/setuptools/_vendor/packaging/tags.py751
-rw-r--r--third_party/python/setuptools/setuptools/_vendor/packaging/utils.py65
-rw-r--r--third_party/python/setuptools/setuptools/_vendor/packaging/version.py535
-rw-r--r--third_party/python/setuptools/setuptools/_vendor/pyparsing.py5742
-rw-r--r--third_party/python/setuptools/setuptools/archive_util.py175
-rw-r--r--third_party/python/setuptools/setuptools/build_meta.py276
-rw-r--r--third_party/python/setuptools/setuptools/cli-32.exebin0 -> 65536 bytes
-rw-r--r--third_party/python/setuptools/setuptools/cli-64.exebin0 -> 74752 bytes
-rw-r--r--third_party/python/setuptools/setuptools/cli.exebin0 -> 65536 bytes
-rw-r--r--third_party/python/setuptools/setuptools/command/__init__.py17
-rw-r--r--third_party/python/setuptools/setuptools/command/alias.py78
-rw-r--r--third_party/python/setuptools/setuptools/command/bdist_egg.py501
-rw-r--r--third_party/python/setuptools/setuptools/command/bdist_rpm.py31
-rw-r--r--third_party/python/setuptools/setuptools/command/bdist_wininst.py30
-rw-r--r--third_party/python/setuptools/setuptools/command/build_clib.py101
-rw-r--r--third_party/python/setuptools/setuptools/command/build_ext.py322
-rw-r--r--third_party/python/setuptools/setuptools/command/build_py.py270
-rw-r--r--third_party/python/setuptools/setuptools/command/develop.py216
-rw-r--r--third_party/python/setuptools/setuptools/command/dist_info.py36
-rw-r--r--third_party/python/setuptools/setuptools/command/easy_install.py2318
-rw-r--r--third_party/python/setuptools/setuptools/command/egg_info.py722
-rw-r--r--third_party/python/setuptools/setuptools/command/install.py125
-rw-r--r--third_party/python/setuptools/setuptools/command/install_egg_info.py62
-rw-r--r--third_party/python/setuptools/setuptools/command/install_lib.py122
-rw-r--r--third_party/python/setuptools/setuptools/command/install_scripts.py68
-rw-r--r--third_party/python/setuptools/setuptools/command/launcher manifest.xml15
-rw-r--r--third_party/python/setuptools/setuptools/command/py36compat.py134
-rw-r--r--third_party/python/setuptools/setuptools/command/register.py18
-rw-r--r--third_party/python/setuptools/setuptools/command/rotate.py64
-rw-r--r--third_party/python/setuptools/setuptools/command/saveopts.py22
-rw-r--r--third_party/python/setuptools/setuptools/command/sdist.py222
-rw-r--r--third_party/python/setuptools/setuptools/command/setopt.py148
-rw-r--r--third_party/python/setuptools/setuptools/command/test.py274
-rw-r--r--third_party/python/setuptools/setuptools/command/upload.py17
-rw-r--r--third_party/python/setuptools/setuptools/command/upload_docs.py202
-rw-r--r--third_party/python/setuptools/setuptools/config.py693
-rw-r--r--third_party/python/setuptools/setuptools/dep_util.py25
-rw-r--r--third_party/python/setuptools/setuptools/depends.py175
-rw-r--r--third_party/python/setuptools/setuptools/dist.py1009
-rw-r--r--third_party/python/setuptools/setuptools/errors.py16
-rw-r--r--third_party/python/setuptools/setuptools/extension.py55
-rw-r--r--third_party/python/setuptools/setuptools/extern/__init__.py66
-rw-r--r--third_party/python/setuptools/setuptools/glob.py174
-rw-r--r--third_party/python/setuptools/setuptools/gui-32.exebin0 -> 65536 bytes
-rw-r--r--third_party/python/setuptools/setuptools/gui-64.exebin0 -> 75264 bytes
-rw-r--r--third_party/python/setuptools/setuptools/gui.exebin0 -> 65536 bytes
-rw-r--r--third_party/python/setuptools/setuptools/installer.py148
-rw-r--r--third_party/python/setuptools/setuptools/launch.py36
-rw-r--r--third_party/python/setuptools/setuptools/lib2to3_ex.py68
-rw-r--r--third_party/python/setuptools/setuptools/monkey.py177
-rw-r--r--third_party/python/setuptools/setuptools/msvc.py1830
-rw-r--r--third_party/python/setuptools/setuptools/namespaces.py107
-rw-r--r--third_party/python/setuptools/setuptools/package_index.py1139
-rw-r--r--third_party/python/setuptools/setuptools/py34compat.py13
-rw-r--r--third_party/python/setuptools/setuptools/sandbox.py496
-rw-r--r--third_party/python/setuptools/setuptools/script (dev).tmpl6
-rw-r--r--third_party/python/setuptools/setuptools/script.tmpl3
-rw-r--r--third_party/python/setuptools/setuptools/ssl_support.py266
-rw-r--r--third_party/python/setuptools/setuptools/unicode_utils.py42
-rw-r--r--third_party/python/setuptools/setuptools/version.py6
-rw-r--r--third_party/python/setuptools/setuptools/wheel.py213
-rw-r--r--third_party/python/setuptools/setuptools/windows_support.py29
-rw-r--r--third_party/python/six/six-1.13.0.dist-info/LICENSE18
-rw-r--r--third_party/python/six/six-1.13.0.dist-info/METADATA52
-rw-r--r--third_party/python/six/six-1.13.0.dist-info/RECORD6
-rw-r--r--third_party/python/six/six-1.13.0.dist-info/WHEEL6
-rw-r--r--third_party/python/six/six-1.13.0.dist-info/top_level.txt1
-rw-r--r--third_party/python/six/six.py963
-rw-r--r--third_party/python/slugid/slugid-2.0.0.dist-info/LICENSE373
-rw-r--r--third_party/python/slugid/slugid-2.0.0.dist-info/METADATA17
-rw-r--r--third_party/python/slugid/slugid-2.0.0.dist-info/RECORD7
-rw-r--r--third_party/python/slugid/slugid-2.0.0.dist-info/WHEEL6
-rw-r--r--third_party/python/slugid/slugid-2.0.0.dist-info/top_level.txt1
-rw-r--r--third_party/python/slugid/slugid/__init__.py48
-rw-r--r--third_party/python/slugid/slugid/slugid.py55
-rw-r--r--third_party/python/taskcluster/taskcluster-44.2.2.dist-info/LICENSE373
-rw-r--r--third_party/python/taskcluster/taskcluster-44.2.2.dist-info/METADATA595
-rw-r--r--third_party/python/taskcluster/taskcluster-44.2.2.dist-info/RECORD92
-rw-r--r--third_party/python/taskcluster/taskcluster-44.2.2.dist-info/WHEEL5
-rw-r--r--third_party/python/taskcluster/taskcluster-44.2.2.dist-info/top_level.txt1
-rw-r--r--third_party/python/taskcluster/taskcluster/__init__.py18
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/__init__.py16
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/asyncclient.py306
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/asyncutils.py147
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/auth.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/authevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/awsprovisioner.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/download.py191
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/ec2manager.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/github.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/githubevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/hooks.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/hooksevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/index.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/login.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/notify.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/notifyevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/purgecache.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/queue.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/queueevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/reader_writer.py81
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/retry.py41
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/secrets.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/upload.py177
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/workermanager.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/aio/workermanagerevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/auth.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/authevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/awsprovisioner.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/client.py711
-rw-r--r--third_party/python/taskcluster/taskcluster/download.py94
-rw-r--r--third_party/python/taskcluster/taskcluster/ec2manager.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/exceptions.py43
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/__init__.py0
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/_client_importer.py20
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/__init__.py0
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/_client_importer.py20
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/auth.py781
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/authevents.py180
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/github.py197
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/githubevents.py199
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/hooks.py300
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/hooksevents.py101
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/index.py204
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/notify.py207
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/notifyevents.py68
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/object.py187
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/purgecache.py123
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/queue.py1120
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/queueevents.py719
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/secrets.py143
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/workermanager.py406
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/aio/workermanagerevents.py91
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/auth.py781
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/authevents.py180
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/github.py197
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/githubevents.py199
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/hooks.py300
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/hooksevents.py101
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/index.py204
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/notify.py207
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/notifyevents.py68
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/object.py187
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/purgecache.py123
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/queue.py1120
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/queueevents.py719
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/secrets.py143
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/workermanager.py406
-rw-r--r--third_party/python/taskcluster/taskcluster/generated/workermanagerevents.py91
-rw-r--r--third_party/python/taskcluster/taskcluster/github.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/githubevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/helper.py185
-rw-r--r--third_party/python/taskcluster/taskcluster/hooks.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/hooksevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/index.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/login.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/notify.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/notifyevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/purgecache.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/queue.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/queueevents.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/retry.py41
-rw-r--r--third_party/python/taskcluster/taskcluster/secrets.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/upload.py65
-rw-r--r--third_party/python/taskcluster/taskcluster/utils.py354
-rw-r--r--third_party/python/taskcluster/taskcluster/workermanager.py2
-rw-r--r--third_party/python/taskcluster/taskcluster/workermanagerevents.py2
-rw-r--r--third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/LICENSE373
-rw-r--r--third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/METADATA33
-rw-r--r--third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/RECORD74
-rw-r--r--third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/WHEEL5
-rw-r--r--third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/entry_points.txt3
-rw-r--r--third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/top_level.txt1
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/__init__.py15
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/actions/__init__.py16
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/actions/add_new_jobs.py64
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/actions/cancel.py42
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/actions/cancel_all.py61
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/actions/registry.py352
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/actions/retrigger.py301
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/actions/util.py282
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/config.py136
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/create.py132
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/decision.py377
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/docker.py215
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/files_changed.py91
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/filter_tasks.py34
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/generator.py449
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/graph.py134
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/loader/__init__.py0
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/loader/transform.py58
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/main.py756
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/morph.py271
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/optimize/__init__.py8
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/optimize/base.py551
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/optimize/strategies.py65
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/parameters.py369
-rwxr-xr-xthird_party/python/taskcluster_taskgraph/taskgraph/run-task/fetch-content899
-rwxr-xr-xthird_party/python/taskcluster_taskgraph/taskgraph/run-task/hgrc33
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/run-task/robustcheckout.py826
-rwxr-xr-xthird_party/python/taskcluster_taskgraph/taskgraph/run-task/run-task1307
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/target_tasks.py107
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/task.py84
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/taskgraph.py72
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/transforms/__init__.py0
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/transforms/base.py157
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/transforms/cached_tasks.py90
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/transforms/code_review.py23
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/transforms/docker_image.py213
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/transforms/fetch.py335
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/__init__.py438
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/common.py196
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/index_search.py37
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/run_task.py240
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/toolchain.py174
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/transforms/release_notifications.py100
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/transforms/task.py1288
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/__init__.py0
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/archive.py86
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/attributes.py84
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/cached_tasks.py86
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/decision.py79
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/docker.py342
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/hash.py54
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/keyed_by.py97
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/memoize.py40
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/parameterization.py97
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/path.py172
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/python_path.py52
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/readonlydict.py22
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/schema.py260
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/shell.py40
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/taskcluster.py373
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/taskgraph.py54
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/templates.py50
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/time.py115
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/treeherder.py64
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/vcs.py539
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/verify.py283
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/workertypes.py75
-rw-r--r--third_party/python/taskcluster_taskgraph/taskgraph/util/yaml.py36
-rw-r--r--third_party/python/taskcluster_urls/taskcluster_urls-13.0.1.dist-info/LICENSE373
-rw-r--r--third_party/python/taskcluster_urls/taskcluster_urls-13.0.1.dist-info/METADATA291
-rw-r--r--third_party/python/taskcluster_urls/taskcluster_urls-13.0.1.dist-info/RECORD6
-rw-r--r--third_party/python/taskcluster_urls/taskcluster_urls-13.0.1.dist-info/WHEEL5
-rw-r--r--third_party/python/taskcluster_urls/taskcluster_urls-13.0.1.dist-info/top_level.txt1
-rw-r--r--third_party/python/taskcluster_urls/taskcluster_urls/__init__.py63
-rw-r--r--third_party/python/toml/toml-0.10.2.dist-info/LICENSE27
-rw-r--r--third_party/python/toml/toml-0.10.2.dist-info/METADATA255
-rw-r--r--third_party/python/toml/toml-0.10.2.dist-info/RECORD10
-rw-r--r--third_party/python/toml/toml-0.10.2.dist-info/WHEEL6
-rw-r--r--third_party/python/toml/toml-0.10.2.dist-info/top_level.txt1
-rw-r--r--third_party/python/toml/toml/__init__.py25
-rw-r--r--third_party/python/toml/toml/decoder.py1057
-rw-r--r--third_party/python/toml/toml/encoder.py304
-rw-r--r--third_party/python/toml/toml/ordered.py15
-rw-r--r--third_party/python/toml/toml/tz.py24
-rw-r--r--third_party/python/tqdm/tqdm-4.62.3.dist-info/LICENCE49
-rw-r--r--third_party/python/tqdm/tqdm-4.62.3.dist-info/METADATA1585
-rw-r--r--third_party/python/tqdm/tqdm-4.62.3.dist-info/RECORD39
-rw-r--r--third_party/python/tqdm/tqdm-4.62.3.dist-info/WHEEL6
-rw-r--r--third_party/python/tqdm/tqdm-4.62.3.dist-info/entry_points.txt3
-rw-r--r--third_party/python/tqdm/tqdm-4.62.3.dist-info/top_level.txt1
-rw-r--r--third_party/python/tqdm/tqdm/__init__.py41
-rw-r--r--third_party/python/tqdm/tqdm/__main__.py3
-rw-r--r--third_party/python/tqdm/tqdm/_dist_ver.py1
-rw-r--r--third_party/python/tqdm/tqdm/_main.py9
-rw-r--r--third_party/python/tqdm/tqdm/_monitor.py95
-rw-r--r--third_party/python/tqdm/tqdm/_tqdm.py9
-rw-r--r--third_party/python/tqdm/tqdm/_tqdm_gui.py9
-rw-r--r--third_party/python/tqdm/tqdm/_tqdm_notebook.py9
-rw-r--r--third_party/python/tqdm/tqdm/_tqdm_pandas.py24
-rw-r--r--third_party/python/tqdm/tqdm/_utils.py12
-rw-r--r--third_party/python/tqdm/tqdm/asyncio.py93
-rw-r--r--third_party/python/tqdm/tqdm/auto.py44
-rw-r--r--third_party/python/tqdm/tqdm/autonotebook.py28
-rw-r--r--third_party/python/tqdm/tqdm/cli.py308
-rwxr-xr-xthird_party/python/tqdm/tqdm/completion.sh19
-rw-r--r--third_party/python/tqdm/tqdm/contrib/__init__.py98
-rw-r--r--third_party/python/tqdm/tqdm/contrib/bells.py24
-rw-r--r--third_party/python/tqdm/tqdm/contrib/concurrent.py130
-rw-r--r--third_party/python/tqdm/tqdm/contrib/discord.py121
-rw-r--r--third_party/python/tqdm/tqdm/contrib/itertools.py36
-rw-r--r--third_party/python/tqdm/tqdm/contrib/logging.py128
-rw-r--r--third_party/python/tqdm/tqdm/contrib/telegram.py159
-rw-r--r--third_party/python/tqdm/tqdm/contrib/utils_worker.py40
-rw-r--r--third_party/python/tqdm/tqdm/dask.py46
-rw-r--r--third_party/python/tqdm/tqdm/gui.py191
-rw-r--r--third_party/python/tqdm/tqdm/keras.py124
-rw-r--r--third_party/python/tqdm/tqdm/notebook.py327
-rw-r--r--third_party/python/tqdm/tqdm/rich.py152
-rw-r--r--third_party/python/tqdm/tqdm/std.py1526
-rw-r--r--third_party/python/tqdm/tqdm/tk.py207
-rw-r--r--third_party/python/tqdm/tqdm/tqdm.1316
-rw-r--r--third_party/python/tqdm/tqdm/utils.py354
-rw-r--r--third_party/python/tqdm/tqdm/version.py9
-rw-r--r--third_party/python/typing_extensions/typing_extensions-3.10.0.0.dist-info/LICENSE254
-rw-r--r--third_party/python/typing_extensions/typing_extensions-3.10.0.0.dist-info/METADATA45
-rw-r--r--third_party/python/typing_extensions/typing_extensions-3.10.0.0.dist-info/RECORD6
-rw-r--r--third_party/python/typing_extensions/typing_extensions-3.10.0.0.dist-info/WHEEL5
-rw-r--r--third_party/python/typing_extensions/typing_extensions-3.10.0.0.dist-info/top_level.txt1
-rw-r--r--third_party/python/typing_extensions/typing_extensions.py2805
-rw-r--r--third_party/python/urllib3/urllib3-1.26.0.dist-info/LICENSE.txt21
-rw-r--r--third_party/python/urllib3/urllib3-1.26.0.dist-info/METADATA1335
-rw-r--r--third_party/python/urllib3/urllib3-1.26.0.dist-info/RECORD44
-rw-r--r--third_party/python/urllib3/urllib3-1.26.0.dist-info/WHEEL6
-rw-r--r--third_party/python/urllib3/urllib3-1.26.0.dist-info/top_level.txt1
-rw-r--r--third_party/python/urllib3/urllib3/__init__.py85
-rw-r--r--third_party/python/urllib3/urllib3/_collections.py337
-rw-r--r--third_party/python/urllib3/urllib3/_version.py2
-rw-r--r--third_party/python/urllib3/urllib3/connection.py534
-rw-r--r--third_party/python/urllib3/urllib3/connectionpool.py1067
-rw-r--r--third_party/python/urllib3/urllib3/contrib/__init__.py0
-rw-r--r--third_party/python/urllib3/urllib3/contrib/_appengine_environ.py36
-rw-r--r--third_party/python/urllib3/urllib3/contrib/_securetransport/__init__.py0
-rw-r--r--third_party/python/urllib3/urllib3/contrib/_securetransport/bindings.py519
-rw-r--r--third_party/python/urllib3/urllib3/contrib/_securetransport/low_level.py396
-rw-r--r--third_party/python/urllib3/urllib3/contrib/appengine.py314
-rw-r--r--third_party/python/urllib3/urllib3/contrib/ntlmpool.py121
-rw-r--r--third_party/python/urllib3/urllib3/contrib/pyopenssl.py509
-rw-r--r--third_party/python/urllib3/urllib3/contrib/securetransport.py920
-rw-r--r--third_party/python/urllib3/urllib3/contrib/socks.py216
-rw-r--r--third_party/python/urllib3/urllib3/exceptions.py313
-rw-r--r--third_party/python/urllib3/urllib3/fields.py274
-rw-r--r--third_party/python/urllib3/urllib3/filepost.py98
-rw-r--r--third_party/python/urllib3/urllib3/packages/__init__.py5
-rw-r--r--third_party/python/urllib3/urllib3/packages/backports/__init__.py0
-rw-r--r--third_party/python/urllib3/urllib3/packages/backports/makefile.py51
-rw-r--r--third_party/python/urllib3/urllib3/packages/six.py1021
-rw-r--r--third_party/python/urllib3/urllib3/packages/ssl_match_hostname/__init__.py22
-rw-r--r--third_party/python/urllib3/urllib3/packages/ssl_match_hostname/_implementation.py160
-rw-r--r--third_party/python/urllib3/urllib3/poolmanager.py536
-rw-r--r--third_party/python/urllib3/urllib3/request.py170
-rw-r--r--third_party/python/urllib3/urllib3/response.py821
-rw-r--r--third_party/python/urllib3/urllib3/util/__init__.py49
-rw-r--r--third_party/python/urllib3/urllib3/util/connection.py150
-rw-r--r--third_party/python/urllib3/urllib3/util/proxy.py56
-rw-r--r--third_party/python/urllib3/urllib3/util/queue.py22
-rw-r--r--third_party/python/urllib3/urllib3/util/request.py143
-rw-r--r--third_party/python/urllib3/urllib3/util/response.py107
-rw-r--r--third_party/python/urllib3/urllib3/util/retry.py601
-rw-r--r--third_party/python/urllib3/urllib3/util/ssl_.py466
-rw-r--r--third_party/python/urllib3/urllib3/util/ssltransport.py221
-rw-r--r--third_party/python/urllib3/urllib3/util/timeout.py268
-rw-r--r--third_party/python/urllib3/urllib3/util/url.py430
-rw-r--r--third_party/python/urllib3/urllib3/util/wait.py153
-rw-r--r--third_party/python/voluptuous/voluptuous-0.12.1.dist-info/COPYING25
-rw-r--r--third_party/python/voluptuous/voluptuous-0.12.1.dist-info/METADATA760
-rw-r--r--third_party/python/voluptuous/voluptuous-0.12.1.dist-info/RECORD11
-rw-r--r--third_party/python/voluptuous/voluptuous-0.12.1.dist-info/WHEEL5
-rw-r--r--third_party/python/voluptuous/voluptuous-0.12.1.dist-info/top_level.txt1
-rw-r--r--third_party/python/voluptuous/voluptuous/__init__.py9
-rw-r--r--third_party/python/voluptuous/voluptuous/error.py199
-rw-r--r--third_party/python/voluptuous/voluptuous/humanize.py40
-rw-r--r--third_party/python/voluptuous/voluptuous/schema_builder.py1301
-rw-r--r--third_party/python/voluptuous/voluptuous/util.py162
-rw-r--r--third_party/python/voluptuous/voluptuous/validators.py1080
-rw-r--r--third_party/python/vsdownload/LICENSE.txt20
-rw-r--r--third_party/python/vsdownload/moz.yaml45
-rwxr-xr-xthird_party/python/vsdownload/vsdownload.py635
-rw-r--r--third_party/python/wcwidth/wcwidth-0.2.5.dist-info/LICENSE27
-rw-r--r--third_party/python/wcwidth/wcwidth-0.2.5.dist-info/METADATA309
-rw-r--r--third_party/python/wcwidth/wcwidth-0.2.5.dist-info/RECORD14
-rw-r--r--third_party/python/wcwidth/wcwidth-0.2.5.dist-info/WHEEL6
-rw-r--r--third_party/python/wcwidth/wcwidth-0.2.5.dist-info/top_level.txt1
-rw-r--r--third_party/python/wcwidth/wcwidth-0.2.5.dist-info/zip-safe1
-rw-r--r--third_party/python/wcwidth/wcwidth/__init__.py37
-rw-r--r--third_party/python/wcwidth/wcwidth/table_wide.py1102
-rw-r--r--third_party/python/wcwidth/wcwidth/table_zero.py3910
-rw-r--r--third_party/python/wcwidth/wcwidth/unicode_versions.py35
-rw-r--r--third_party/python/wcwidth/wcwidth/version.json1
-rw-r--r--third_party/python/wcwidth/wcwidth/wcwidth.py375
-rw-r--r--third_party/python/wheel/wheel-0.37.0.dist-info/LICENSE.txt22
-rw-r--r--third_party/python/wheel/wheel-0.37.0.dist-info/METADATA69
-rw-r--r--third_party/python/wheel/wheel-0.37.0.dist-info/RECORD22
-rw-r--r--third_party/python/wheel/wheel-0.37.0.dist-info/WHEEL6
-rw-r--r--third_party/python/wheel/wheel-0.37.0.dist-info/entry_points.txt6
-rw-r--r--third_party/python/wheel/wheel-0.37.0.dist-info/top_level.txt1
-rw-r--r--third_party/python/wheel/wheel/__init__.py1
-rw-r--r--third_party/python/wheel/wheel/__main__.py19
-rw-r--r--third_party/python/wheel/wheel/bdist_wheel.py492
-rw-r--r--third_party/python/wheel/wheel/cli/__init__.py88
-rw-r--r--third_party/python/wheel/wheel/cli/convert.py269
-rw-r--r--third_party/python/wheel/wheel/cli/pack.py79
-rw-r--r--third_party/python/wheel/wheel/cli/unpack.py25
-rw-r--r--third_party/python/wheel/wheel/macosx_libfile.py428
-rw-r--r--third_party/python/wheel/wheel/metadata.py133
-rw-r--r--third_party/python/wheel/wheel/pkginfo.py43
-rw-r--r--third_party/python/wheel/wheel/util.py46
-rw-r--r--third_party/python/wheel/wheel/vendored/__init__.py0
-rw-r--r--third_party/python/wheel/wheel/vendored/packaging/__init__.py0
-rw-r--r--third_party/python/wheel/wheel/vendored/packaging/_typing.py48
-rw-r--r--third_party/python/wheel/wheel/vendored/packaging/tags.py866
-rw-r--r--third_party/python/wheel/wheel/wheelfile.py169
-rw-r--r--third_party/python/yamllint/yamllint-1.23.0.dist-info/LICENSE674
-rw-r--r--third_party/python/yamllint/yamllint-1.23.0.dist-info/METADATA34
-rw-r--r--third_party/python/yamllint/yamllint-1.23.0.dist-info/RECORD37
-rw-r--r--third_party/python/yamllint/yamllint-1.23.0.dist-info/WHEEL6
-rw-r--r--third_party/python/yamllint/yamllint-1.23.0.dist-info/entry_points.txt3
-rw-r--r--third_party/python/yamllint/yamllint-1.23.0.dist-info/top_level.txt1
-rw-r--r--third_party/python/yamllint/yamllint/__init__.py31
-rw-r--r--third_party/python/yamllint/yamllint/__main__.py4
-rw-r--r--third_party/python/yamllint/yamllint/cli.py207
-rw-r--r--third_party/python/yamllint/yamllint/conf/default.yaml33
-rw-r--r--third_party/python/yamllint/yamllint/conf/relaxed.yaml29
-rw-r--r--third_party/python/yamllint/yamllint/config.py205
-rw-r--r--third_party/python/yamllint/yamllint/linter.py240
-rw-r--r--third_party/python/yamllint/yamllint/parser.py161
-rw-r--r--third_party/python/yamllint/yamllint/rules/__init__.py70
-rw-r--r--third_party/python/yamllint/yamllint/rules/braces.py143
-rw-r--r--third_party/python/yamllint/yamllint/rules/brackets.py145
-rw-r--r--third_party/python/yamllint/yamllint/rules/colons.py105
-rw-r--r--third_party/python/yamllint/yamllint/rules/commas.py131
-rw-r--r--third_party/python/yamllint/yamllint/rules/comments.py104
-rw-r--r--third_party/python/yamllint/yamllint/rules/comments_indentation.py139
-rw-r--r--third_party/python/yamllint/yamllint/rules/common.py89
-rw-r--r--third_party/python/yamllint/yamllint/rules/document_end.py107
-rw-r--r--third_party/python/yamllint/yamllint/rules/document_start.py93
-rw-r--r--third_party/python/yamllint/yamllint/rules/empty_lines.py108
-rw-r--r--third_party/python/yamllint/yamllint/rules/empty_values.py96
-rw-r--r--third_party/python/yamllint/yamllint/rules/hyphens.py88
-rw-r--r--third_party/python/yamllint/yamllint/rules/indentation.py575
-rw-r--r--third_party/python/yamllint/yamllint/rules/key_duplicates.py100
-rw-r--r--third_party/python/yamllint/yamllint/rules/key_ordering.py109
-rw-r--r--third_party/python/yamllint/yamllint/rules/line_length.py149
-rw-r--r--third_party/python/yamllint/yamllint/rules/new_line_at_end_of_file.py37
-rw-r--r--third_party/python/yamllint/yamllint/rules/new_lines.py46
-rw-r--r--third_party/python/yamllint/yamllint/rules/octal_values.py95
-rw-r--r--third_party/python/yamllint/yamllint/rules/quoted_strings.py230
-rw-r--r--third_party/python/yamllint/yamllint/rules/trailing_spaces.py62
-rw-r--r--third_party/python/yamllint/yamllint/rules/truthy.py149
-rw-r--r--third_party/python/yarl/CHANGES.rst572
-rw-r--r--third_party/python/yarl/LICENSE201
-rw-r--r--third_party/python/yarl/MANIFEST.in13
-rw-r--r--third_party/python/yarl/PKG-INFO797
-rw-r--r--third_party/python/yarl/README.rst202
-rw-r--r--third_party/python/yarl/pyproject.toml7
-rw-r--r--third_party/python/yarl/setup.cfg27
-rw-r--r--third_party/python/yarl/setup.py83
-rw-r--r--third_party/python/yarl/yarl.egg-info/PKG-INFO797
-rw-r--r--third_party/python/yarl/yarl.egg-info/SOURCES.txt42
-rw-r--r--third_party/python/yarl/yarl.egg-info/dependency_links.txt1
-rw-r--r--third_party/python/yarl/yarl.egg-info/requires.txt5
-rw-r--r--third_party/python/yarl/yarl.egg-info/top_level.txt1
-rw-r--r--third_party/python/yarl/yarl/__init__.py5
-rw-r--r--third_party/python/yarl/yarl/__init__.pyi111
-rw-r--r--third_party/python/yarl/yarl/_quoting.py18
-rw-r--r--third_party/python/yarl/yarl/_quoting_c.c11612
-rw-r--r--third_party/python/yarl/yarl/_quoting_c.pyi16
-rw-r--r--third_party/python/yarl/yarl/_quoting_c.pyx371
-rw-r--r--third_party/python/yarl/yarl/_quoting_py.py198
-rw-r--r--third_party/python/yarl/yarl/_url.py1144
-rw-r--r--third_party/python/yarl/yarl/py.typed1
-rw-r--r--third_party/python/zipp/zipp-3.4.1.dist-info/LICENSE19
-rw-r--r--third_party/python/zipp/zipp-3.4.1.dist-info/METADATA54
-rw-r--r--third_party/python/zipp/zipp-3.4.1.dist-info/RECORD6
-rw-r--r--third_party/python/zipp/zipp-3.4.1.dist-info/WHEEL5
-rw-r--r--third_party/python/zipp/zipp-3.4.1.dist-info/top_level.txt1
-rw-r--r--third_party/python/zipp/zipp.py314
4083 files changed, 791729 insertions, 0 deletions
diff --git a/third_party/python/Jinja2/Jinja2-2.11.3.dist-info/LICENSE.rst b/third_party/python/Jinja2/Jinja2-2.11.3.dist-info/LICENSE.rst
new file mode 100644
index 0000000000..c37cae49ec
--- /dev/null
+++ b/third_party/python/Jinja2/Jinja2-2.11.3.dist-info/LICENSE.rst
@@ -0,0 +1,28 @@
+Copyright 2007 Pallets
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/third_party/python/Jinja2/Jinja2-2.11.3.dist-info/METADATA b/third_party/python/Jinja2/Jinja2-2.11.3.dist-info/METADATA
new file mode 100644
index 0000000000..1af8df0f71
--- /dev/null
+++ b/third_party/python/Jinja2/Jinja2-2.11.3.dist-info/METADATA
@@ -0,0 +1,106 @@
+Metadata-Version: 2.1
+Name: Jinja2
+Version: 2.11.3
+Summary: A very fast and expressive template engine.
+Home-page: https://palletsprojects.com/p/jinja/
+Author: Armin Ronacher
+Author-email: armin.ronacher@active-4.com
+Maintainer: Pallets
+Maintainer-email: contact@palletsprojects.com
+License: BSD-3-Clause
+Project-URL: Documentation, https://jinja.palletsprojects.com/
+Project-URL: Code, https://github.com/pallets/jinja
+Project-URL: Issue tracker, https://github.com/pallets/jinja/issues
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup :: HTML
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
+Description-Content-Type: text/x-rst
+Requires-Dist: MarkupSafe (>=0.23)
+Provides-Extra: i18n
+Requires-Dist: Babel (>=0.8) ; extra == 'i18n'
+
+Jinja
+=====
+
+Jinja is a fast, expressive, extensible templating engine. Special
+placeholders in the template allow writing code similar to Python
+syntax. Then the template is passed data to render the final document.
+
+It includes:
+
+- Template inheritance and inclusion.
+- Define and import macros within templates.
+- HTML templates can use autoescaping to prevent XSS from untrusted
+ user input.
+- A sandboxed environment can safely render untrusted templates.
+- AsyncIO support for generating templates and calling async
+ functions.
+- I18N support with Babel.
+- Templates are compiled to optimized Python code just-in-time and
+ cached, or can be compiled ahead-of-time.
+- Exceptions point to the correct line in templates to make debugging
+ easier.
+- Extensible filters, tests, functions, and even syntax.
+
+Jinja's philosophy is that while application logic belongs in Python if
+possible, it shouldn't make the template designer's job difficult by
+restricting functionality too much.
+
+
+Installing
+----------
+
+Install and update using `pip`_:
+
+.. code-block:: text
+
+ $ pip install -U Jinja2
+
+.. _pip: https://pip.pypa.io/en/stable/quickstart/
+
+
+In A Nutshell
+-------------
+
+.. code-block:: jinja
+
+ {% extends "base.html" %}
+ {% block title %}Members{% endblock %}
+ {% block content %}
+ <ul>
+ {% for user in users %}
+ <li><a href="{{ user.url }}">{{ user.username }}</a></li>
+ {% endfor %}
+ </ul>
+ {% endblock %}
+
+
+Links
+-----
+
+- Website: https://palletsprojects.com/p/jinja/
+- Documentation: https://jinja.palletsprojects.com/
+- Releases: https://pypi.org/project/Jinja2/
+- Code: https://github.com/pallets/jinja
+- Issue tracker: https://github.com/pallets/jinja/issues
+- Test status: https://dev.azure.com/pallets/jinja/_build
+- Official chat: https://discord.gg/t6rrQZH
+
+
diff --git a/third_party/python/Jinja2/Jinja2-2.11.3.dist-info/RECORD b/third_party/python/Jinja2/Jinja2-2.11.3.dist-info/RECORD
new file mode 100644
index 0000000000..5362e8cb49
--- /dev/null
+++ b/third_party/python/Jinja2/Jinja2-2.11.3.dist-info/RECORD
@@ -0,0 +1,33 @@
+jinja2/__init__.py,sha256=LZUXmxJc2GIchfSAeMWsxCWiQYO-w1-736f2Q3I8ms8,1549
+jinja2/_compat.py,sha256=B6Se8HjnXVpzz9-vfHejn-DV2NjaVK-Iewupc5kKlu8,3191
+jinja2/_identifier.py,sha256=EdgGJKi7O1yvr4yFlvqPNEqV6M1qHyQr8Gt8GmVTKVM,1775
+jinja2/asyncfilters.py,sha256=XJtYXTxFvcJ5xwk6SaDL4S0oNnT0wPYvXBCSzc482fI,4250
+jinja2/asyncsupport.py,sha256=ZBFsDLuq3Gtji3Ia87lcyuDbqaHZJRdtShZcqwpFnSQ,7209
+jinja2/bccache.py,sha256=3Pmp4jo65M9FQuIxdxoDBbEDFwe4acDMQf77nEJfrHA,12139
+jinja2/compiler.py,sha256=Ta9W1Lit542wItAHXlDcg0sEOsFDMirCdlFPHAurg4o,66284
+jinja2/constants.py,sha256=RR1sTzNzUmKco6aZicw4JpQpJGCuPuqm1h1YmCNUEFY,1458
+jinja2/debug.py,sha256=neR7GIGGjZH3_ILJGVUYy3eLQCCaWJMXOb7o0kGInWc,8529
+jinja2/defaults.py,sha256=85B6YUUCyWPSdrSeVhcqFVuu_bHUAQXeey--FIwSeVQ,1126
+jinja2/environment.py,sha256=XDSLKc4SqNLMOwTSq3TbWEyA5WyXfuLuVD0wAVjEFwM,50629
+jinja2/exceptions.py,sha256=VjNLawcmf2ODffqVMCQK1cRmvFaUfQWF4u8ouP3QPcE,5425
+jinja2/ext.py,sha256=AtwL5O5enT_L3HR9-oBvhGyUTdGoyaqG_ICtnR_EVd4,26441
+jinja2/filters.py,sha256=9ORilsZrUoydSI9upz8_qGy7gozDWLYoFmlIBFSVRnQ,41439
+jinja2/idtracking.py,sha256=J3O4VHsrbf3wzwiBc7Cro26kHb6_5kbULeIOzocchIU,9211
+jinja2/lexer.py,sha256=nUFLRKhhKmmEWkLI65nQePgcQs7qsRdjVYZETMt_v0g,30331
+jinja2/loaders.py,sha256=C-fST_dmFjgWkp0ZuCkrgICAoOsoSIF28wfAFink0oU,17666
+jinja2/meta.py,sha256=QjyYhfNRD3QCXjBJpiPl9KgkEkGXJbAkCUq4-Ur10EQ,4131
+jinja2/nativetypes.py,sha256=Ul__gtVw4xH-0qvUvnCNHedQeNDwmEuyLJztzzSPeRg,2753
+jinja2/nodes.py,sha256=Mk1oJPVgIjnQw9WOqILvcu3rLepcFZ0ahxQm2mbwDwc,31095
+jinja2/optimizer.py,sha256=gQLlMYzvQhluhzmAIFA1tXS0cwgWYOjprN-gTRcHVsc,1457
+jinja2/parser.py,sha256=fcfdqePNTNyvosIvczbytVA332qpsURvYnCGcjDHSkA,35660
+jinja2/runtime.py,sha256=0y-BRyIEZ9ltByL2Id6GpHe1oDRQAwNeQvI0SKobNMw,30618
+jinja2/sandbox.py,sha256=knayyUvXsZ-F0mk15mO2-ehK9gsw04UhB8td-iUOtLc,17127
+jinja2/tests.py,sha256=iO_Y-9Vo60zrVe1lMpSl5sKHqAxe2leZHC08OoZ8K24,4799
+jinja2/utils.py,sha256=Wy4yC3IByqUWwnKln6SdaixdzgK74P6F5nf-gQZrYnU,22436
+jinja2/visitor.py,sha256=DUHupl0a4PGp7nxRtZFttUzAi1ccxzqc2hzetPYUz8U,3240
+Jinja2-2.11.3.dist-info/LICENSE.rst,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475
+Jinja2-2.11.3.dist-info/METADATA,sha256=PscpJ1C3RSp8xcjV3fAuTz13rKbGxmzJXnMQFH-WKhs,3535
+Jinja2-2.11.3.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110
+Jinja2-2.11.3.dist-info/entry_points.txt,sha256=Qy_DkVo6Xj_zzOtmErrATe8lHZhOqdjpt3e4JJAGyi8,61
+Jinja2-2.11.3.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7
+Jinja2-2.11.3.dist-info/RECORD,,
diff --git a/third_party/python/Jinja2/Jinja2-2.11.3.dist-info/WHEEL b/third_party/python/Jinja2/Jinja2-2.11.3.dist-info/WHEEL
new file mode 100644
index 0000000000..01b8fc7d4a
--- /dev/null
+++ b/third_party/python/Jinja2/Jinja2-2.11.3.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.36.2)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/Jinja2/Jinja2-2.11.3.dist-info/entry_points.txt b/third_party/python/Jinja2/Jinja2-2.11.3.dist-info/entry_points.txt
new file mode 100644
index 0000000000..3619483fd4
--- /dev/null
+++ b/third_party/python/Jinja2/Jinja2-2.11.3.dist-info/entry_points.txt
@@ -0,0 +1,3 @@
+[babel.extractors]
+jinja2 = jinja2.ext:babel_extract [i18n]
+
diff --git a/third_party/python/Jinja2/Jinja2-2.11.3.dist-info/top_level.txt b/third_party/python/Jinja2/Jinja2-2.11.3.dist-info/top_level.txt
new file mode 100644
index 0000000000..7f7afbf3bf
--- /dev/null
+++ b/third_party/python/Jinja2/Jinja2-2.11.3.dist-info/top_level.txt
@@ -0,0 +1 @@
+jinja2
diff --git a/third_party/python/Jinja2/jinja2/__init__.py b/third_party/python/Jinja2/jinja2/__init__.py
new file mode 100644
index 0000000000..f17866f6c4
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/__init__.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+"""Jinja is a template engine written in pure Python. It provides a
+non-XML syntax that supports inline expressions and an optional
+sandboxed environment.
+"""
+from markupsafe import escape
+from markupsafe import Markup
+
+from .bccache import BytecodeCache
+from .bccache import FileSystemBytecodeCache
+from .bccache import MemcachedBytecodeCache
+from .environment import Environment
+from .environment import Template
+from .exceptions import TemplateAssertionError
+from .exceptions import TemplateError
+from .exceptions import TemplateNotFound
+from .exceptions import TemplateRuntimeError
+from .exceptions import TemplatesNotFound
+from .exceptions import TemplateSyntaxError
+from .exceptions import UndefinedError
+from .filters import contextfilter
+from .filters import environmentfilter
+from .filters import evalcontextfilter
+from .loaders import BaseLoader
+from .loaders import ChoiceLoader
+from .loaders import DictLoader
+from .loaders import FileSystemLoader
+from .loaders import FunctionLoader
+from .loaders import ModuleLoader
+from .loaders import PackageLoader
+from .loaders import PrefixLoader
+from .runtime import ChainableUndefined
+from .runtime import DebugUndefined
+from .runtime import make_logging_undefined
+from .runtime import StrictUndefined
+from .runtime import Undefined
+from .utils import clear_caches
+from .utils import contextfunction
+from .utils import environmentfunction
+from .utils import evalcontextfunction
+from .utils import is_undefined
+from .utils import select_autoescape
+
+__version__ = "2.11.3"
diff --git a/third_party/python/Jinja2/jinja2/_compat.py b/third_party/python/Jinja2/jinja2/_compat.py
new file mode 100644
index 0000000000..1f044954a0
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/_compat.py
@@ -0,0 +1,132 @@
+# -*- coding: utf-8 -*-
+# flake8: noqa
+import marshal
+import sys
+
+PY2 = sys.version_info[0] == 2
+PYPY = hasattr(sys, "pypy_translation_info")
+_identity = lambda x: x
+
+if not PY2:
+ unichr = chr
+ range_type = range
+ text_type = str
+ string_types = (str,)
+ integer_types = (int,)
+
+ iterkeys = lambda d: iter(d.keys())
+ itervalues = lambda d: iter(d.values())
+ iteritems = lambda d: iter(d.items())
+
+ import pickle
+ from io import BytesIO, StringIO
+
+ NativeStringIO = StringIO
+
+ def reraise(tp, value, tb=None):
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+
+ ifilter = filter
+ imap = map
+ izip = zip
+ intern = sys.intern
+
+ implements_iterator = _identity
+ implements_to_string = _identity
+ encode_filename = _identity
+
+ marshal_dump = marshal.dump
+ marshal_load = marshal.load
+
+else:
+ unichr = unichr
+ text_type = unicode
+ range_type = xrange
+ string_types = (str, unicode)
+ integer_types = (int, long)
+
+ iterkeys = lambda d: d.iterkeys()
+ itervalues = lambda d: d.itervalues()
+ iteritems = lambda d: d.iteritems()
+
+ import cPickle as pickle
+ from cStringIO import StringIO as BytesIO, StringIO
+
+ NativeStringIO = BytesIO
+
+ exec("def reraise(tp, value, tb=None):\n raise tp, value, tb")
+
+ from itertools import imap, izip, ifilter
+
+ intern = intern
+
+ def implements_iterator(cls):
+ cls.next = cls.__next__
+ del cls.__next__
+ return cls
+
+ def implements_to_string(cls):
+ cls.__unicode__ = cls.__str__
+ cls.__str__ = lambda x: x.__unicode__().encode("utf-8")
+ return cls
+
+ def encode_filename(filename):
+ if isinstance(filename, unicode):
+ return filename.encode("utf-8")
+ return filename
+
+ def marshal_dump(code, f):
+ if isinstance(f, file):
+ marshal.dump(code, f)
+ else:
+ f.write(marshal.dumps(code))
+
+ def marshal_load(f):
+ if isinstance(f, file):
+ return marshal.load(f)
+ return marshal.loads(f.read())
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a
+ # dummy metaclass for one level of class instantiation that replaces
+ # itself with the actual metaclass.
+ class metaclass(type):
+ def __new__(cls, name, this_bases, d):
+ return meta(name, bases, d)
+
+ return type.__new__(metaclass, "temporary_class", (), {})
+
+
+try:
+ from urllib.parse import quote_from_bytes as url_quote
+except ImportError:
+ from urllib import quote as url_quote
+
+
+try:
+ from collections import abc
+except ImportError:
+ import collections as abc
+
+
+try:
+ from os import fspath
+except ImportError:
+ try:
+ from pathlib import PurePath
+ except ImportError:
+ PurePath = None
+
+ def fspath(path):
+ if hasattr(path, "__fspath__"):
+ return path.__fspath__()
+
+ # Python 3.5 doesn't have __fspath__ yet, use str.
+ if PurePath is not None and isinstance(path, PurePath):
+ return str(path)
+
+ return path
diff --git a/third_party/python/Jinja2/jinja2/_identifier.py b/third_party/python/Jinja2/jinja2/_identifier.py
new file mode 100644
index 0000000000..224d5449d1
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/_identifier.py
@@ -0,0 +1,6 @@
+import re
+
+# generated by scripts/generate_identifier_pattern.py
+pattern = re.compile(
+ r"[\w·̀-ͯ·҃-֑҇-ׇֽֿׁׂׅׄؐ-ًؚ-ٰٟۖ-ۜ۟-۪ۤۧۨ-ܑۭܰ-݊ަ-ް߫-߳ࠖ-࠙ࠛ-ࠣࠥ-ࠧࠩ-࡙࠭-࡛ࣔ-ࣣ࣡-ःऺ-़ा-ॏ॑-ॗॢॣঁ-ঃ়া-ৄেৈো-্ৗৢৣਁ-ਃ਼ਾ-ੂੇੈੋ-੍ੑੰੱੵઁ-ઃ઼ા-ૅે-ૉો-્ૢૣଁ-ଃ଼ା-ୄେୈୋ-୍ୖୗୢୣஂா-ூெ-ைொ-்ௗఀ-ఃా-ౄె-ైొ-్ౕౖౢౣಁ-ಃ಼ಾ-ೄೆ-ೈೊ-್ೕೖೢೣഁ-ഃാ-ൄെ-ൈൊ-്ൗൢൣංඃ්ා-ුූෘ-ෟෲෳัิ-ฺ็-๎ັິ-ູົຼ່-ໍ༹༘༙༵༷༾༿ཱ-྄྆྇ྍ-ྗྙ-ྼ࿆ါ-ှၖ-ၙၞ-ၠၢ-ၤၧ-ၭၱ-ၴႂ-ႍႏႚ-ႝ፝-፟ᜒ-᜔ᜲ-᜴ᝒᝓᝲᝳ឴-៓៝᠋-᠍ᢅᢆᢩᤠ-ᤫᤰ-᤻ᨗ-ᨛᩕ-ᩞ᩠-᩿᩼᪰-᪽ᬀ-ᬄ᬴-᭄᭫-᭳ᮀ-ᮂᮡ-ᮭ᯦-᯳ᰤ-᰷᳐-᳔᳒-᳨᳭ᳲ-᳴᳸᳹᷀-᷵᷻-᷿‿⁀⁔⃐-⃥⃜⃡-⃰℘℮⳯-⵿⳱ⷠ-〪ⷿ-゙゚〯꙯ꙴ-꙽ꚞꚟ꛰꛱ꠂ꠆ꠋꠣ-ꠧꢀꢁꢴ-ꣅ꣠-꣱ꤦ-꤭ꥇ-꥓ꦀ-ꦃ꦳-꧀ꧥꨩ-ꨶꩃꩌꩍꩻ-ꩽꪰꪲ-ꪴꪷꪸꪾ꪿꫁ꫫ-ꫯꫵ꫶ꯣ-ꯪ꯬꯭ﬞ︀-️︠-︯︳︴﹍-﹏_𐇽𐋠𐍶-𐍺𐨁-𐨃𐨅𐨆𐨌-𐨏𐨸-𐨿𐨺𐫦𐫥𑀀-𑀂𑀸-𑁆𑁿-𑂂𑂰-𑂺𑄀-𑄂𑄧-𑅳𑄴𑆀-𑆂𑆳-𑇊𑇀-𑇌𑈬-𑈷𑈾𑋟-𑋪𑌀-𑌃𑌼𑌾-𑍄𑍇𑍈𑍋-𑍍𑍗𑍢𑍣𑍦-𑍬𑍰-𑍴𑐵-𑑆𑒰-𑓃𑖯-𑖵𑖸-𑗀𑗜𑗝𑘰-𑙀𑚫-𑚷𑜝-𑜫𑰯-𑰶𑰸-𑰿𑲒-𑲧𑲩-𑲶𖫰-𖫴𖬰-𖬶𖽑-𖽾𖾏-𖾒𛲝𛲞𝅥-𝅩𝅭-𝅲𝅻-𝆂𝆅-𝆋𝆪-𝆭𝉂-𝉄𝨀-𝨶𝨻-𝩬𝩵𝪄𝪛-𝪟𝪡-𝪯𞀀-𞀆𞀈-𞀘𞀛-𞀡𞀣𞀤𞀦-𞣐𞀪-𞣖𞥄-𞥊󠄀-󠇯]+" # noqa: B950
+)
diff --git a/third_party/python/Jinja2/jinja2/asyncfilters.py b/third_party/python/Jinja2/jinja2/asyncfilters.py
new file mode 100644
index 0000000000..3d98dbcc00
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/asyncfilters.py
@@ -0,0 +1,158 @@
+from functools import wraps
+
+from . import filters
+from .asyncsupport import auto_aiter
+from .asyncsupport import auto_await
+
+
+async def auto_to_seq(value):
+ seq = []
+ if hasattr(value, "__aiter__"):
+ async for item in value:
+ seq.append(item)
+ else:
+ for item in value:
+ seq.append(item)
+ return seq
+
+
+async def async_select_or_reject(args, kwargs, modfunc, lookup_attr):
+ seq, func = filters.prepare_select_or_reject(args, kwargs, modfunc, lookup_attr)
+ if seq:
+ async for item in auto_aiter(seq):
+ if func(item):
+ yield item
+
+
+def dualfilter(normal_filter, async_filter):
+ wrap_evalctx = False
+ if getattr(normal_filter, "environmentfilter", False) is True:
+
+ def is_async(args):
+ return args[0].is_async
+
+ wrap_evalctx = False
+ else:
+ has_evalctxfilter = getattr(normal_filter, "evalcontextfilter", False) is True
+ has_ctxfilter = getattr(normal_filter, "contextfilter", False) is True
+ wrap_evalctx = not has_evalctxfilter and not has_ctxfilter
+
+ def is_async(args):
+ return args[0].environment.is_async
+
+ @wraps(normal_filter)
+ def wrapper(*args, **kwargs):
+ b = is_async(args)
+ if wrap_evalctx:
+ args = args[1:]
+ if b:
+ return async_filter(*args, **kwargs)
+ return normal_filter(*args, **kwargs)
+
+ if wrap_evalctx:
+ wrapper.evalcontextfilter = True
+
+ wrapper.asyncfiltervariant = True
+
+ return wrapper
+
+
+def asyncfiltervariant(original):
+ def decorator(f):
+ return dualfilter(original, f)
+
+ return decorator
+
+
+@asyncfiltervariant(filters.do_first)
+async def do_first(environment, seq):
+ try:
+ return await auto_aiter(seq).__anext__()
+ except StopAsyncIteration:
+ return environment.undefined("No first item, sequence was empty.")
+
+
+@asyncfiltervariant(filters.do_groupby)
+async def do_groupby(environment, value, attribute):
+ expr = filters.make_attrgetter(environment, attribute)
+ return [
+ filters._GroupTuple(key, await auto_to_seq(values))
+ for key, values in filters.groupby(
+ sorted(await auto_to_seq(value), key=expr), expr
+ )
+ ]
+
+
+@asyncfiltervariant(filters.do_join)
+async def do_join(eval_ctx, value, d=u"", attribute=None):
+ return filters.do_join(eval_ctx, await auto_to_seq(value), d, attribute)
+
+
+@asyncfiltervariant(filters.do_list)
+async def do_list(value):
+ return await auto_to_seq(value)
+
+
+@asyncfiltervariant(filters.do_reject)
+async def do_reject(*args, **kwargs):
+ return async_select_or_reject(args, kwargs, lambda x: not x, False)
+
+
+@asyncfiltervariant(filters.do_rejectattr)
+async def do_rejectattr(*args, **kwargs):
+ return async_select_or_reject(args, kwargs, lambda x: not x, True)
+
+
+@asyncfiltervariant(filters.do_select)
+async def do_select(*args, **kwargs):
+ return async_select_or_reject(args, kwargs, lambda x: x, False)
+
+
+@asyncfiltervariant(filters.do_selectattr)
+async def do_selectattr(*args, **kwargs):
+ return async_select_or_reject(args, kwargs, lambda x: x, True)
+
+
+@asyncfiltervariant(filters.do_map)
+async def do_map(*args, **kwargs):
+ seq, func = filters.prepare_map(args, kwargs)
+ if seq:
+ async for item in auto_aiter(seq):
+ yield await auto_await(func(item))
+
+
+@asyncfiltervariant(filters.do_sum)
+async def do_sum(environment, iterable, attribute=None, start=0):
+ rv = start
+ if attribute is not None:
+ func = filters.make_attrgetter(environment, attribute)
+ else:
+
+ def func(x):
+ return x
+
+ async for item in auto_aiter(iterable):
+ rv += func(item)
+ return rv
+
+
+@asyncfiltervariant(filters.do_slice)
+async def do_slice(value, slices, fill_with=None):
+ return filters.do_slice(await auto_to_seq(value), slices, fill_with)
+
+
+ASYNC_FILTERS = {
+ "first": do_first,
+ "groupby": do_groupby,
+ "join": do_join,
+ "list": do_list,
+ # we intentionally do not support do_last because that would be
+ # ridiculous
+ "reject": do_reject,
+ "rejectattr": do_rejectattr,
+ "map": do_map,
+ "select": do_select,
+ "selectattr": do_selectattr,
+ "sum": do_sum,
+ "slice": do_slice,
+}
diff --git a/third_party/python/Jinja2/jinja2/asyncsupport.py b/third_party/python/Jinja2/jinja2/asyncsupport.py
new file mode 100644
index 0000000000..78ba3739d8
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/asyncsupport.py
@@ -0,0 +1,264 @@
+# -*- coding: utf-8 -*-
+"""The code for async support. Importing this patches Jinja on supported
+Python versions.
+"""
+import asyncio
+import inspect
+from functools import update_wrapper
+
+from markupsafe import Markup
+
+from .environment import TemplateModule
+from .runtime import LoopContext
+from .utils import concat
+from .utils import internalcode
+from .utils import missing
+
+
+async def concat_async(async_gen):
+ rv = []
+
+ async def collect():
+ async for event in async_gen:
+ rv.append(event)
+
+ await collect()
+ return concat(rv)
+
+
+async def generate_async(self, *args, **kwargs):
+ vars = dict(*args, **kwargs)
+ try:
+ async for event in self.root_render_func(self.new_context(vars)):
+ yield event
+ except Exception:
+ yield self.environment.handle_exception()
+
+
+def wrap_generate_func(original_generate):
+ def _convert_generator(self, loop, args, kwargs):
+ async_gen = self.generate_async(*args, **kwargs)
+ try:
+ while 1:
+ yield loop.run_until_complete(async_gen.__anext__())
+ except StopAsyncIteration:
+ pass
+
+ def generate(self, *args, **kwargs):
+ if not self.environment.is_async:
+ return original_generate(self, *args, **kwargs)
+ return _convert_generator(self, asyncio.get_event_loop(), args, kwargs)
+
+ return update_wrapper(generate, original_generate)
+
+
+async def render_async(self, *args, **kwargs):
+ if not self.environment.is_async:
+ raise RuntimeError("The environment was not created with async mode enabled.")
+
+ vars = dict(*args, **kwargs)
+ ctx = self.new_context(vars)
+
+ try:
+ return await concat_async(self.root_render_func(ctx))
+ except Exception:
+ return self.environment.handle_exception()
+
+
+def wrap_render_func(original_render):
+ def render(self, *args, **kwargs):
+ if not self.environment.is_async:
+ return original_render(self, *args, **kwargs)
+ loop = asyncio.get_event_loop()
+ return loop.run_until_complete(self.render_async(*args, **kwargs))
+
+ return update_wrapper(render, original_render)
+
+
+def wrap_block_reference_call(original_call):
+ @internalcode
+ async def async_call(self):
+ rv = await concat_async(self._stack[self._depth](self._context))
+ if self._context.eval_ctx.autoescape:
+ rv = Markup(rv)
+ return rv
+
+ @internalcode
+ def __call__(self):
+ if not self._context.environment.is_async:
+ return original_call(self)
+ return async_call(self)
+
+ return update_wrapper(__call__, original_call)
+
+
+def wrap_macro_invoke(original_invoke):
+ @internalcode
+ async def async_invoke(self, arguments, autoescape):
+ rv = await self._func(*arguments)
+ if autoescape:
+ rv = Markup(rv)
+ return rv
+
+ @internalcode
+ def _invoke(self, arguments, autoescape):
+ if not self._environment.is_async:
+ return original_invoke(self, arguments, autoescape)
+ return async_invoke(self, arguments, autoescape)
+
+ return update_wrapper(_invoke, original_invoke)
+
+
+@internalcode
+async def get_default_module_async(self):
+ if self._module is not None:
+ return self._module
+ self._module = rv = await self.make_module_async()
+ return rv
+
+
+def wrap_default_module(original_default_module):
+ @internalcode
+ def _get_default_module(self):
+ if self.environment.is_async:
+ raise RuntimeError("Template module attribute is unavailable in async mode")
+ return original_default_module(self)
+
+ return _get_default_module
+
+
+async def make_module_async(self, vars=None, shared=False, locals=None):
+ context = self.new_context(vars, shared, locals)
+ body_stream = []
+ async for item in self.root_render_func(context):
+ body_stream.append(item)
+ return TemplateModule(self, context, body_stream)
+
+
+def patch_template():
+ from . import Template
+
+ Template.generate = wrap_generate_func(Template.generate)
+ Template.generate_async = update_wrapper(generate_async, Template.generate_async)
+ Template.render_async = update_wrapper(render_async, Template.render_async)
+ Template.render = wrap_render_func(Template.render)
+ Template._get_default_module = wrap_default_module(Template._get_default_module)
+ Template._get_default_module_async = get_default_module_async
+ Template.make_module_async = update_wrapper(
+ make_module_async, Template.make_module_async
+ )
+
+
+def patch_runtime():
+ from .runtime import BlockReference, Macro
+
+ BlockReference.__call__ = wrap_block_reference_call(BlockReference.__call__)
+ Macro._invoke = wrap_macro_invoke(Macro._invoke)
+
+
+def patch_filters():
+ from .filters import FILTERS
+ from .asyncfilters import ASYNC_FILTERS
+
+ FILTERS.update(ASYNC_FILTERS)
+
+
+def patch_all():
+ patch_template()
+ patch_runtime()
+ patch_filters()
+
+
+async def auto_await(value):
+ if inspect.isawaitable(value):
+ return await value
+ return value
+
+
+async def auto_aiter(iterable):
+ if hasattr(iterable, "__aiter__"):
+ async for item in iterable:
+ yield item
+ return
+ for item in iterable:
+ yield item
+
+
+class AsyncLoopContext(LoopContext):
+ _to_iterator = staticmethod(auto_aiter)
+
+ @property
+ async def length(self):
+ if self._length is not None:
+ return self._length
+
+ try:
+ self._length = len(self._iterable)
+ except TypeError:
+ iterable = [x async for x in self._iterator]
+ self._iterator = self._to_iterator(iterable)
+ self._length = len(iterable) + self.index + (self._after is not missing)
+
+ return self._length
+
+ @property
+ async def revindex0(self):
+ return await self.length - self.index
+
+ @property
+ async def revindex(self):
+ return await self.length - self.index0
+
+ async def _peek_next(self):
+ if self._after is not missing:
+ return self._after
+
+ try:
+ self._after = await self._iterator.__anext__()
+ except StopAsyncIteration:
+ self._after = missing
+
+ return self._after
+
+ @property
+ async def last(self):
+ return await self._peek_next() is missing
+
+ @property
+ async def nextitem(self):
+ rv = await self._peek_next()
+
+ if rv is missing:
+ return self._undefined("there is no next item")
+
+ return rv
+
+ def __aiter__(self):
+ return self
+
+ async def __anext__(self):
+ if self._after is not missing:
+ rv = self._after
+ self._after = missing
+ else:
+ rv = await self._iterator.__anext__()
+
+ self.index0 += 1
+ self._before = self._current
+ self._current = rv
+ return rv, self
+
+
+async def make_async_loop_context(iterable, undefined, recurse=None, depth0=0):
+ import warnings
+
+ warnings.warn(
+ "This template must be recompiled with at least Jinja 2.11, or"
+ " it will fail in 3.0.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return AsyncLoopContext(iterable, undefined, recurse, depth0)
+
+
+patch_all()
diff --git a/third_party/python/Jinja2/jinja2/bccache.py b/third_party/python/Jinja2/jinja2/bccache.py
new file mode 100644
index 0000000000..9c0661030f
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/bccache.py
@@ -0,0 +1,350 @@
+# -*- coding: utf-8 -*-
+"""The optional bytecode cache system. This is useful if you have very
+complex template situations and the compilation of all those templates
+slows down your application too much.
+
+Situations where this is useful are often forking web applications that
+are initialized on the first request.
+"""
+import errno
+import fnmatch
+import os
+import stat
+import sys
+import tempfile
+from hashlib import sha1
+from os import listdir
+from os import path
+
+from ._compat import BytesIO
+from ._compat import marshal_dump
+from ._compat import marshal_load
+from ._compat import pickle
+from ._compat import text_type
+from .utils import open_if_exists
+
+bc_version = 4
+# Magic bytes to identify Jinja bytecode cache files. Contains the
+# Python major and minor version to avoid loading incompatible bytecode
+# if a project upgrades its Python version.
+bc_magic = (
+ b"j2"
+ + pickle.dumps(bc_version, 2)
+ + pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1], 2)
+)
+
+
+class Bucket(object):
+ """Buckets are used to store the bytecode for one template. It's created
+ and initialized by the bytecode cache and passed to the loading functions.
+
+ The buckets get an internal checksum from the cache assigned and use this
+ to automatically reject outdated cache material. Individual bytecode
+ cache subclasses don't have to care about cache invalidation.
+ """
+
+ def __init__(self, environment, key, checksum):
+ self.environment = environment
+ self.key = key
+ self.checksum = checksum
+ self.reset()
+
+ def reset(self):
+ """Resets the bucket (unloads the bytecode)."""
+ self.code = None
+
+ def load_bytecode(self, f):
+ """Loads bytecode from a file or file like object."""
+ # make sure the magic header is correct
+ magic = f.read(len(bc_magic))
+ if magic != bc_magic:
+ self.reset()
+ return
+ # the source code of the file changed, we need to reload
+ checksum = pickle.load(f)
+ if self.checksum != checksum:
+ self.reset()
+ return
+ # if marshal_load fails then we need to reload
+ try:
+ self.code = marshal_load(f)
+ except (EOFError, ValueError, TypeError):
+ self.reset()
+ return
+
+ def write_bytecode(self, f):
+ """Dump the bytecode into the file or file like object passed."""
+ if self.code is None:
+ raise TypeError("can't write empty bucket")
+ f.write(bc_magic)
+ pickle.dump(self.checksum, f, 2)
+ marshal_dump(self.code, f)
+
+ def bytecode_from_string(self, string):
+ """Load bytecode from a string."""
+ self.load_bytecode(BytesIO(string))
+
+ def bytecode_to_string(self):
+ """Return the bytecode as string."""
+ out = BytesIO()
+ self.write_bytecode(out)
+ return out.getvalue()
+
+
+class BytecodeCache(object):
+ """To implement your own bytecode cache you have to subclass this class
+ and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
+ these methods are passed a :class:`~jinja2.bccache.Bucket`.
+
+ A very basic bytecode cache that saves the bytecode on the file system::
+
+ from os import path
+
+ class MyCache(BytecodeCache):
+
+ def __init__(self, directory):
+ self.directory = directory
+
+ def load_bytecode(self, bucket):
+ filename = path.join(self.directory, bucket.key)
+ if path.exists(filename):
+ with open(filename, 'rb') as f:
+ bucket.load_bytecode(f)
+
+ def dump_bytecode(self, bucket):
+ filename = path.join(self.directory, bucket.key)
+ with open(filename, 'wb') as f:
+ bucket.write_bytecode(f)
+
+ A more advanced version of a filesystem based bytecode cache is part of
+ Jinja.
+ """
+
+ def load_bytecode(self, bucket):
+ """Subclasses have to override this method to load bytecode into a
+ bucket. If they are not able to find code in the cache for the
+ bucket, it must not do anything.
+ """
+ raise NotImplementedError()
+
+ def dump_bytecode(self, bucket):
+ """Subclasses have to override this method to write the bytecode
+ from a bucket back to the cache. If it unable to do so it must not
+ fail silently but raise an exception.
+ """
+ raise NotImplementedError()
+
+ def clear(self):
+ """Clears the cache. This method is not used by Jinja but should be
+ implemented to allow applications to clear the bytecode cache used
+ by a particular environment.
+ """
+
+ def get_cache_key(self, name, filename=None):
+ """Returns the unique hash key for this template name."""
+ hash = sha1(name.encode("utf-8"))
+ if filename is not None:
+ filename = "|" + filename
+ if isinstance(filename, text_type):
+ filename = filename.encode("utf-8")
+ hash.update(filename)
+ return hash.hexdigest()
+
+ def get_source_checksum(self, source):
+ """Returns a checksum for the source."""
+ return sha1(source.encode("utf-8")).hexdigest()
+
+ def get_bucket(self, environment, name, filename, source):
+ """Return a cache bucket for the given template. All arguments are
+ mandatory but filename may be `None`.
+ """
+ key = self.get_cache_key(name, filename)
+ checksum = self.get_source_checksum(source)
+ bucket = Bucket(environment, key, checksum)
+ self.load_bytecode(bucket)
+ return bucket
+
+ def set_bucket(self, bucket):
+ """Put the bucket into the cache."""
+ self.dump_bytecode(bucket)
+
+
+class FileSystemBytecodeCache(BytecodeCache):
+ """A bytecode cache that stores bytecode on the filesystem. It accepts
+ two arguments: The directory where the cache items are stored and a
+ pattern string that is used to build the filename.
+
+ If no directory is specified a default cache directory is selected. On
+ Windows the user's temp directory is used, on UNIX systems a directory
+ is created for the user in the system temp directory.
+
+ The pattern can be used to have multiple separate caches operate on the
+ same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
+ is replaced with the cache key.
+
+ >>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
+
+ This bytecode cache supports clearing of the cache using the clear method.
+ """
+
+ def __init__(self, directory=None, pattern="__jinja2_%s.cache"):
+ if directory is None:
+ directory = self._get_default_cache_dir()
+ self.directory = directory
+ self.pattern = pattern
+
+ def _get_default_cache_dir(self):
+ def _unsafe_dir():
+ raise RuntimeError(
+ "Cannot determine safe temp directory. You "
+ "need to explicitly provide one."
+ )
+
+ tmpdir = tempfile.gettempdir()
+
+ # On windows the temporary directory is used specific unless
+ # explicitly forced otherwise. We can just use that.
+ if os.name == "nt":
+ return tmpdir
+ if not hasattr(os, "getuid"):
+ _unsafe_dir()
+
+ dirname = "_jinja2-cache-%d" % os.getuid()
+ actual_dir = os.path.join(tmpdir, dirname)
+
+ try:
+ os.mkdir(actual_dir, stat.S_IRWXU)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ try:
+ os.chmod(actual_dir, stat.S_IRWXU)
+ actual_dir_stat = os.lstat(actual_dir)
+ if (
+ actual_dir_stat.st_uid != os.getuid()
+ or not stat.S_ISDIR(actual_dir_stat.st_mode)
+ or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
+ ):
+ _unsafe_dir()
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ actual_dir_stat = os.lstat(actual_dir)
+ if (
+ actual_dir_stat.st_uid != os.getuid()
+ or not stat.S_ISDIR(actual_dir_stat.st_mode)
+ or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
+ ):
+ _unsafe_dir()
+
+ return actual_dir
+
+ def _get_cache_filename(self, bucket):
+ return path.join(self.directory, self.pattern % bucket.key)
+
+ def load_bytecode(self, bucket):
+ f = open_if_exists(self._get_cache_filename(bucket), "rb")
+ if f is not None:
+ try:
+ bucket.load_bytecode(f)
+ finally:
+ f.close()
+
+ def dump_bytecode(self, bucket):
+ f = open(self._get_cache_filename(bucket), "wb")
+ try:
+ bucket.write_bytecode(f)
+ finally:
+ f.close()
+
+ def clear(self):
+ # imported lazily here because google app-engine doesn't support
+ # write access on the file system and the function does not exist
+ # normally.
+ from os import remove
+
+ files = fnmatch.filter(listdir(self.directory), self.pattern % "*")
+ for filename in files:
+ try:
+ remove(path.join(self.directory, filename))
+ except OSError:
+ pass
+
+
+class MemcachedBytecodeCache(BytecodeCache):
+ """This class implements a bytecode cache that uses a memcache cache for
+ storing the information. It does not enforce a specific memcache library
+ (tummy's memcache or cmemcache) but will accept any class that provides
+ the minimal interface required.
+
+ Libraries compatible with this class:
+
+ - `cachelib <https://github.com/pallets/cachelib>`_
+ - `python-memcached <https://pypi.org/project/python-memcached/>`_
+
+ (Unfortunately the django cache interface is not compatible because it
+ does not support storing binary data, only unicode. You can however pass
+ the underlying cache client to the bytecode cache which is available
+ as `django.core.cache.cache._client`.)
+
+ The minimal interface for the client passed to the constructor is this:
+
+ .. class:: MinimalClientInterface
+
+ .. method:: set(key, value[, timeout])
+
+ Stores the bytecode in the cache. `value` is a string and
+ `timeout` the timeout of the key. If timeout is not provided
+ a default timeout or no timeout should be assumed, if it's
+ provided it's an integer with the number of seconds the cache
+ item should exist.
+
+ .. method:: get(key)
+
+ Returns the value for the cache key. If the item does not
+ exist in the cache the return value must be `None`.
+
+ The other arguments to the constructor are the prefix for all keys that
+ is added before the actual cache key and the timeout for the bytecode in
+ the cache system. We recommend a high (or no) timeout.
+
+ This bytecode cache does not support clearing of used items in the cache.
+ The clear method is a no-operation function.
+
+ .. versionadded:: 2.7
+ Added support for ignoring memcache errors through the
+ `ignore_memcache_errors` parameter.
+ """
+
+ def __init__(
+ self,
+ client,
+ prefix="jinja2/bytecode/",
+ timeout=None,
+ ignore_memcache_errors=True,
+ ):
+ self.client = client
+ self.prefix = prefix
+ self.timeout = timeout
+ self.ignore_memcache_errors = ignore_memcache_errors
+
+ def load_bytecode(self, bucket):
+ try:
+ code = self.client.get(self.prefix + bucket.key)
+ except Exception:
+ if not self.ignore_memcache_errors:
+ raise
+ code = None
+ if code is not None:
+ bucket.bytecode_from_string(code)
+
+ def dump_bytecode(self, bucket):
+ args = (self.prefix + bucket.key, bucket.bytecode_to_string())
+ if self.timeout is not None:
+ args += (self.timeout,)
+ try:
+ self.client.set(*args)
+ except Exception:
+ if not self.ignore_memcache_errors:
+ raise
diff --git a/third_party/python/Jinja2/jinja2/compiler.py b/third_party/python/Jinja2/jinja2/compiler.py
new file mode 100644
index 0000000000..63297b42c3
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/compiler.py
@@ -0,0 +1,1843 @@
+# -*- coding: utf-8 -*-
+"""Compiles nodes from the parser into Python code."""
+from collections import namedtuple
+from functools import update_wrapper
+from itertools import chain
+from keyword import iskeyword as is_python_keyword
+
+from markupsafe import escape
+from markupsafe import Markup
+
+from . import nodes
+from ._compat import imap
+from ._compat import iteritems
+from ._compat import izip
+from ._compat import NativeStringIO
+from ._compat import range_type
+from ._compat import string_types
+from ._compat import text_type
+from .exceptions import TemplateAssertionError
+from .idtracking import Symbols
+from .idtracking import VAR_LOAD_ALIAS
+from .idtracking import VAR_LOAD_PARAMETER
+from .idtracking import VAR_LOAD_RESOLVE
+from .idtracking import VAR_LOAD_UNDEFINED
+from .nodes import EvalContext
+from .optimizer import Optimizer
+from .utils import concat
+from .visitor import NodeVisitor
+
+operators = {
+ "eq": "==",
+ "ne": "!=",
+ "gt": ">",
+ "gteq": ">=",
+ "lt": "<",
+ "lteq": "<=",
+ "in": "in",
+ "notin": "not in",
+}
+
+# what method to iterate over items do we want to use for dict iteration
+# in generated code? on 2.x let's go with iteritems, on 3.x with items
+if hasattr(dict, "iteritems"):
+ dict_item_iter = "iteritems"
+else:
+ dict_item_iter = "items"
+
+code_features = ["division"]
+
+# does this python version support generator stops? (PEP 0479)
+try:
+ exec("from __future__ import generator_stop")
+ code_features.append("generator_stop")
+except SyntaxError:
+ pass
+
+# does this python version support yield from?
+try:
+ exec("def f(): yield from x()")
+except SyntaxError:
+ supports_yield_from = False
+else:
+ supports_yield_from = True
+
+
+def optimizeconst(f):
+ def new_func(self, node, frame, **kwargs):
+ # Only optimize if the frame is not volatile
+ if self.optimized and not frame.eval_ctx.volatile:
+ new_node = self.optimizer.visit(node, frame.eval_ctx)
+ if new_node != node:
+ return self.visit(new_node, frame)
+ return f(self, node, frame, **kwargs)
+
+ return update_wrapper(new_func, f)
+
+
+def generate(
+ node, environment, name, filename, stream=None, defer_init=False, optimized=True
+):
+ """Generate the python source for a node tree."""
+ if not isinstance(node, nodes.Template):
+ raise TypeError("Can't compile non template nodes")
+ generator = environment.code_generator_class(
+ environment, name, filename, stream, defer_init, optimized
+ )
+ generator.visit(node)
+ if stream is None:
+ return generator.stream.getvalue()
+
+
+def has_safe_repr(value):
+ """Does the node have a safe representation?"""
+ if value is None or value is NotImplemented or value is Ellipsis:
+ return True
+ if type(value) in (bool, int, float, complex, range_type, Markup) + string_types:
+ return True
+ if type(value) in (tuple, list, set, frozenset):
+ for item in value:
+ if not has_safe_repr(item):
+ return False
+ return True
+ elif type(value) is dict:
+ for key, value in iteritems(value):
+ if not has_safe_repr(key):
+ return False
+ if not has_safe_repr(value):
+ return False
+ return True
+ return False
+
+
+def find_undeclared(nodes, names):
+ """Check if the names passed are accessed undeclared. The return value
+ is a set of all the undeclared names from the sequence of names found.
+ """
+ visitor = UndeclaredNameVisitor(names)
+ try:
+ for node in nodes:
+ visitor.visit(node)
+ except VisitorExit:
+ pass
+ return visitor.undeclared
+
+
+class MacroRef(object):
+ def __init__(self, node):
+ self.node = node
+ self.accesses_caller = False
+ self.accesses_kwargs = False
+ self.accesses_varargs = False
+
+
+class Frame(object):
+ """Holds compile time information for us."""
+
+ def __init__(self, eval_ctx, parent=None, level=None):
+ self.eval_ctx = eval_ctx
+ self.symbols = Symbols(parent and parent.symbols or None, level=level)
+
+ # a toplevel frame is the root + soft frames such as if conditions.
+ self.toplevel = False
+
+ # the root frame is basically just the outermost frame, so no if
+ # conditions. This information is used to optimize inheritance
+ # situations.
+ self.rootlevel = False
+
+ # in some dynamic inheritance situations the compiler needs to add
+ # write tests around output statements.
+ self.require_output_check = parent and parent.require_output_check
+
+ # inside some tags we are using a buffer rather than yield statements.
+ # this for example affects {% filter %} or {% macro %}. If a frame
+ # is buffered this variable points to the name of the list used as
+ # buffer.
+ self.buffer = None
+
+ # the name of the block we're in, otherwise None.
+ self.block = parent and parent.block or None
+
+ # the parent of this frame
+ self.parent = parent
+
+ if parent is not None:
+ self.buffer = parent.buffer
+
+ def copy(self):
+ """Create a copy of the current one."""
+ rv = object.__new__(self.__class__)
+ rv.__dict__.update(self.__dict__)
+ rv.symbols = self.symbols.copy()
+ return rv
+
+ def inner(self, isolated=False):
+ """Return an inner frame."""
+ if isolated:
+ return Frame(self.eval_ctx, level=self.symbols.level + 1)
+ return Frame(self.eval_ctx, self)
+
+ def soft(self):
+ """Return a soft frame. A soft frame may not be modified as
+ standalone thing as it shares the resources with the frame it
+ was created of, but it's not a rootlevel frame any longer.
+
+ This is only used to implement if-statements.
+ """
+ rv = self.copy()
+ rv.rootlevel = False
+ return rv
+
+ __copy__ = copy
+
+
+class VisitorExit(RuntimeError):
+ """Exception used by the `UndeclaredNameVisitor` to signal a stop."""
+
+
+class DependencyFinderVisitor(NodeVisitor):
+ """A visitor that collects filter and test calls."""
+
+ def __init__(self):
+ self.filters = set()
+ self.tests = set()
+
+ def visit_Filter(self, node):
+ self.generic_visit(node)
+ self.filters.add(node.name)
+
+ def visit_Test(self, node):
+ self.generic_visit(node)
+ self.tests.add(node.name)
+
+ def visit_Block(self, node):
+ """Stop visiting at blocks."""
+
+
+class UndeclaredNameVisitor(NodeVisitor):
+ """A visitor that checks if a name is accessed without being
+ declared. This is different from the frame visitor as it will
+ not stop at closure frames.
+ """
+
+ def __init__(self, names):
+ self.names = set(names)
+ self.undeclared = set()
+
+ def visit_Name(self, node):
+ if node.ctx == "load" and node.name in self.names:
+ self.undeclared.add(node.name)
+ if self.undeclared == self.names:
+ raise VisitorExit()
+ else:
+ self.names.discard(node.name)
+
+ def visit_Block(self, node):
+ """Stop visiting a blocks."""
+
+
+class CompilerExit(Exception):
+ """Raised if the compiler encountered a situation where it just
+ doesn't make sense to further process the code. Any block that
+ raises such an exception is not further processed.
+ """
+
+
+class CodeGenerator(NodeVisitor):
+ def __init__(
+ self, environment, name, filename, stream=None, defer_init=False, optimized=True
+ ):
+ if stream is None:
+ stream = NativeStringIO()
+ self.environment = environment
+ self.name = name
+ self.filename = filename
+ self.stream = stream
+ self.created_block_context = False
+ self.defer_init = defer_init
+ self.optimized = optimized
+ if optimized:
+ self.optimizer = Optimizer(environment)
+
+ # aliases for imports
+ self.import_aliases = {}
+
+ # a registry for all blocks. Because blocks are moved out
+ # into the global python scope they are registered here
+ self.blocks = {}
+
+ # the number of extends statements so far
+ self.extends_so_far = 0
+
+ # some templates have a rootlevel extends. In this case we
+ # can safely assume that we're a child template and do some
+ # more optimizations.
+ self.has_known_extends = False
+
+ # the current line number
+ self.code_lineno = 1
+
+ # registry of all filters and tests (global, not block local)
+ self.tests = {}
+ self.filters = {}
+
+ # the debug information
+ self.debug_info = []
+ self._write_debug_info = None
+
+ # the number of new lines before the next write()
+ self._new_lines = 0
+
+ # the line number of the last written statement
+ self._last_line = 0
+
+ # true if nothing was written so far.
+ self._first_write = True
+
+ # used by the `temporary_identifier` method to get new
+ # unique, temporary identifier
+ self._last_identifier = 0
+
+ # the current indentation
+ self._indentation = 0
+
+ # Tracks toplevel assignments
+ self._assign_stack = []
+
+ # Tracks parameter definition blocks
+ self._param_def_block = []
+
+ # Tracks the current context.
+ self._context_reference_stack = ["context"]
+
+ # -- Various compilation helpers
+
+ def fail(self, msg, lineno):
+ """Fail with a :exc:`TemplateAssertionError`."""
+ raise TemplateAssertionError(msg, lineno, self.name, self.filename)
+
+ def temporary_identifier(self):
+ """Get a new unique identifier."""
+ self._last_identifier += 1
+ return "t_%d" % self._last_identifier
+
+ def buffer(self, frame):
+ """Enable buffering for the frame from that point onwards."""
+ frame.buffer = self.temporary_identifier()
+ self.writeline("%s = []" % frame.buffer)
+
+ def return_buffer_contents(self, frame, force_unescaped=False):
+ """Return the buffer contents of the frame."""
+ if not force_unescaped:
+ if frame.eval_ctx.volatile:
+ self.writeline("if context.eval_ctx.autoescape:")
+ self.indent()
+ self.writeline("return Markup(concat(%s))" % frame.buffer)
+ self.outdent()
+ self.writeline("else:")
+ self.indent()
+ self.writeline("return concat(%s)" % frame.buffer)
+ self.outdent()
+ return
+ elif frame.eval_ctx.autoescape:
+ self.writeline("return Markup(concat(%s))" % frame.buffer)
+ return
+ self.writeline("return concat(%s)" % frame.buffer)
+
+ def indent(self):
+ """Indent by one."""
+ self._indentation += 1
+
+ def outdent(self, step=1):
+ """Outdent by step."""
+ self._indentation -= step
+
+ def start_write(self, frame, node=None):
+ """Yield or write into the frame buffer."""
+ if frame.buffer is None:
+ self.writeline("yield ", node)
+ else:
+ self.writeline("%s.append(" % frame.buffer, node)
+
+ def end_write(self, frame):
+ """End the writing process started by `start_write`."""
+ if frame.buffer is not None:
+ self.write(")")
+
+ def simple_write(self, s, frame, node=None):
+ """Simple shortcut for start_write + write + end_write."""
+ self.start_write(frame, node)
+ self.write(s)
+ self.end_write(frame)
+
+ def blockvisit(self, nodes, frame):
+ """Visit a list of nodes as block in a frame. If the current frame
+ is no buffer a dummy ``if 0: yield None`` is written automatically.
+ """
+ try:
+ self.writeline("pass")
+ for node in nodes:
+ self.visit(node, frame)
+ except CompilerExit:
+ pass
+
+ def write(self, x):
+ """Write a string into the output stream."""
+ if self._new_lines:
+ if not self._first_write:
+ self.stream.write("\n" * self._new_lines)
+ self.code_lineno += self._new_lines
+ if self._write_debug_info is not None:
+ self.debug_info.append((self._write_debug_info, self.code_lineno))
+ self._write_debug_info = None
+ self._first_write = False
+ self.stream.write(" " * self._indentation)
+ self._new_lines = 0
+ self.stream.write(x)
+
+ def writeline(self, x, node=None, extra=0):
+ """Combination of newline and write."""
+ self.newline(node, extra)
+ self.write(x)
+
+ def newline(self, node=None, extra=0):
+ """Add one or more newlines before the next write."""
+ self._new_lines = max(self._new_lines, 1 + extra)
+ if node is not None and node.lineno != self._last_line:
+ self._write_debug_info = node.lineno
+ self._last_line = node.lineno
+
+ def signature(self, node, frame, extra_kwargs=None):
+ """Writes a function call to the stream for the current node.
+ A leading comma is added automatically. The extra keyword
+ arguments may not include python keywords otherwise a syntax
+ error could occur. The extra keyword arguments should be given
+ as python dict.
+ """
+ # if any of the given keyword arguments is a python keyword
+ # we have to make sure that no invalid call is created.
+ kwarg_workaround = False
+ for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
+ if is_python_keyword(kwarg):
+ kwarg_workaround = True
+ break
+
+ for arg in node.args:
+ self.write(", ")
+ self.visit(arg, frame)
+
+ if not kwarg_workaround:
+ for kwarg in node.kwargs:
+ self.write(", ")
+ self.visit(kwarg, frame)
+ if extra_kwargs is not None:
+ for key, value in iteritems(extra_kwargs):
+ self.write(", %s=%s" % (key, value))
+ if node.dyn_args:
+ self.write(", *")
+ self.visit(node.dyn_args, frame)
+
+ if kwarg_workaround:
+ if node.dyn_kwargs is not None:
+ self.write(", **dict({")
+ else:
+ self.write(", **{")
+ for kwarg in node.kwargs:
+ self.write("%r: " % kwarg.key)
+ self.visit(kwarg.value, frame)
+ self.write(", ")
+ if extra_kwargs is not None:
+ for key, value in iteritems(extra_kwargs):
+ self.write("%r: %s, " % (key, value))
+ if node.dyn_kwargs is not None:
+ self.write("}, **")
+ self.visit(node.dyn_kwargs, frame)
+ self.write(")")
+ else:
+ self.write("}")
+
+ elif node.dyn_kwargs is not None:
+ self.write(", **")
+ self.visit(node.dyn_kwargs, frame)
+
+ def pull_dependencies(self, nodes):
+ """Pull all the dependencies."""
+ visitor = DependencyFinderVisitor()
+ for node in nodes:
+ visitor.visit(node)
+ for dependency in "filters", "tests":
+ mapping = getattr(self, dependency)
+ for name in getattr(visitor, dependency):
+ if name not in mapping:
+ mapping[name] = self.temporary_identifier()
+ self.writeline(
+ "%s = environment.%s[%r]" % (mapping[name], dependency, name)
+ )
+
+ def enter_frame(self, frame):
+ undefs = []
+ for target, (action, param) in iteritems(frame.symbols.loads):
+ if action == VAR_LOAD_PARAMETER:
+ pass
+ elif action == VAR_LOAD_RESOLVE:
+ self.writeline("%s = %s(%r)" % (target, self.get_resolve_func(), param))
+ elif action == VAR_LOAD_ALIAS:
+ self.writeline("%s = %s" % (target, param))
+ elif action == VAR_LOAD_UNDEFINED:
+ undefs.append(target)
+ else:
+ raise NotImplementedError("unknown load instruction")
+ if undefs:
+ self.writeline("%s = missing" % " = ".join(undefs))
+
+ def leave_frame(self, frame, with_python_scope=False):
+ if not with_python_scope:
+ undefs = []
+ for target, _ in iteritems(frame.symbols.loads):
+ undefs.append(target)
+ if undefs:
+ self.writeline("%s = missing" % " = ".join(undefs))
+
+ def func(self, name):
+ if self.environment.is_async:
+ return "async def %s" % name
+ return "def %s" % name
+
+ def macro_body(self, node, frame):
+ """Dump the function def of a macro or call block."""
+ frame = frame.inner()
+ frame.symbols.analyze_node(node)
+ macro_ref = MacroRef(node)
+
+ explicit_caller = None
+ skip_special_params = set()
+ args = []
+ for idx, arg in enumerate(node.args):
+ if arg.name == "caller":
+ explicit_caller = idx
+ if arg.name in ("kwargs", "varargs"):
+ skip_special_params.add(arg.name)
+ args.append(frame.symbols.ref(arg.name))
+
+ undeclared = find_undeclared(node.body, ("caller", "kwargs", "varargs"))
+
+ if "caller" in undeclared:
+ # In older Jinja versions there was a bug that allowed caller
+ # to retain the special behavior even if it was mentioned in
+ # the argument list. However thankfully this was only really
+ # working if it was the last argument. So we are explicitly
+ # checking this now and error out if it is anywhere else in
+ # the argument list.
+ if explicit_caller is not None:
+ try:
+ node.defaults[explicit_caller - len(node.args)]
+ except IndexError:
+ self.fail(
+ "When defining macros or call blocks the "
+ 'special "caller" argument must be omitted '
+ "or be given a default.",
+ node.lineno,
+ )
+ else:
+ args.append(frame.symbols.declare_parameter("caller"))
+ macro_ref.accesses_caller = True
+ if "kwargs" in undeclared and "kwargs" not in skip_special_params:
+ args.append(frame.symbols.declare_parameter("kwargs"))
+ macro_ref.accesses_kwargs = True
+ if "varargs" in undeclared and "varargs" not in skip_special_params:
+ args.append(frame.symbols.declare_parameter("varargs"))
+ macro_ref.accesses_varargs = True
+
+ # macros are delayed, they never require output checks
+ frame.require_output_check = False
+ frame.symbols.analyze_node(node)
+ self.writeline("%s(%s):" % (self.func("macro"), ", ".join(args)), node)
+ self.indent()
+
+ self.buffer(frame)
+ self.enter_frame(frame)
+
+ self.push_parameter_definitions(frame)
+ for idx, arg in enumerate(node.args):
+ ref = frame.symbols.ref(arg.name)
+ self.writeline("if %s is missing:" % ref)
+ self.indent()
+ try:
+ default = node.defaults[idx - len(node.args)]
+ except IndexError:
+ self.writeline(
+ "%s = undefined(%r, name=%r)"
+ % (ref, "parameter %r was not provided" % arg.name, arg.name)
+ )
+ else:
+ self.writeline("%s = " % ref)
+ self.visit(default, frame)
+ self.mark_parameter_stored(ref)
+ self.outdent()
+ self.pop_parameter_definitions()
+
+ self.blockvisit(node.body, frame)
+ self.return_buffer_contents(frame, force_unescaped=True)
+ self.leave_frame(frame, with_python_scope=True)
+ self.outdent()
+
+ return frame, macro_ref
+
+ def macro_def(self, macro_ref, frame):
+ """Dump the macro definition for the def created by macro_body."""
+ arg_tuple = ", ".join(repr(x.name) for x in macro_ref.node.args)
+ name = getattr(macro_ref.node, "name", None)
+ if len(macro_ref.node.args) == 1:
+ arg_tuple += ","
+ self.write(
+ "Macro(environment, macro, %r, (%s), %r, %r, %r, "
+ "context.eval_ctx.autoescape)"
+ % (
+ name,
+ arg_tuple,
+ macro_ref.accesses_kwargs,
+ macro_ref.accesses_varargs,
+ macro_ref.accesses_caller,
+ )
+ )
+
+ def position(self, node):
+ """Return a human readable position for the node."""
+ rv = "line %d" % node.lineno
+ if self.name is not None:
+ rv += " in " + repr(self.name)
+ return rv
+
+ def dump_local_context(self, frame):
+ return "{%s}" % ", ".join(
+ "%r: %s" % (name, target)
+ for name, target in iteritems(frame.symbols.dump_stores())
+ )
+
+ def write_commons(self):
+ """Writes a common preamble that is used by root and block functions.
+ Primarily this sets up common local helpers and enforces a generator
+ through a dead branch.
+ """
+ self.writeline("resolve = context.resolve_or_missing")
+ self.writeline("undefined = environment.undefined")
+ # always use the standard Undefined class for the implicit else of
+ # conditional expressions
+ self.writeline("cond_expr_undefined = Undefined")
+ self.writeline("if 0: yield None")
+
+ def push_parameter_definitions(self, frame):
+ """Pushes all parameter targets from the given frame into a local
+ stack that permits tracking of yet to be assigned parameters. In
+ particular this enables the optimization from `visit_Name` to skip
+ undefined expressions for parameters in macros as macros can reference
+ otherwise unbound parameters.
+ """
+ self._param_def_block.append(frame.symbols.dump_param_targets())
+
+ def pop_parameter_definitions(self):
+ """Pops the current parameter definitions set."""
+ self._param_def_block.pop()
+
+ def mark_parameter_stored(self, target):
+ """Marks a parameter in the current parameter definitions as stored.
+ This will skip the enforced undefined checks.
+ """
+ if self._param_def_block:
+ self._param_def_block[-1].discard(target)
+
+ def push_context_reference(self, target):
+ self._context_reference_stack.append(target)
+
+ def pop_context_reference(self):
+ self._context_reference_stack.pop()
+
+ def get_context_ref(self):
+ return self._context_reference_stack[-1]
+
+ def get_resolve_func(self):
+ target = self._context_reference_stack[-1]
+ if target == "context":
+ return "resolve"
+ return "%s.resolve" % target
+
+ def derive_context(self, frame):
+ return "%s.derived(%s)" % (
+ self.get_context_ref(),
+ self.dump_local_context(frame),
+ )
+
+ def parameter_is_undeclared(self, target):
+ """Checks if a given target is an undeclared parameter."""
+ if not self._param_def_block:
+ return False
+ return target in self._param_def_block[-1]
+
+ def push_assign_tracking(self):
+ """Pushes a new layer for assignment tracking."""
+ self._assign_stack.append(set())
+
+ def pop_assign_tracking(self, frame):
+ """Pops the topmost level for assignment tracking and updates the
+ context variables if necessary.
+ """
+ vars = self._assign_stack.pop()
+ if not frame.toplevel or not vars:
+ return
+ public_names = [x for x in vars if x[:1] != "_"]
+ if len(vars) == 1:
+ name = next(iter(vars))
+ ref = frame.symbols.ref(name)
+ self.writeline("context.vars[%r] = %s" % (name, ref))
+ else:
+ self.writeline("context.vars.update({")
+ for idx, name in enumerate(vars):
+ if idx:
+ self.write(", ")
+ ref = frame.symbols.ref(name)
+ self.write("%r: %s" % (name, ref))
+ self.write("})")
+ if public_names:
+ if len(public_names) == 1:
+ self.writeline("context.exported_vars.add(%r)" % public_names[0])
+ else:
+ self.writeline(
+ "context.exported_vars.update((%s))"
+ % ", ".join(imap(repr, public_names))
+ )
+
+ # -- Statement Visitors
+
+ def visit_Template(self, node, frame=None):
+ assert frame is None, "no root frame allowed"
+ eval_ctx = EvalContext(self.environment, self.name)
+
+ from .runtime import exported
+
+ self.writeline("from __future__ import %s" % ", ".join(code_features))
+ self.writeline("from jinja2.runtime import " + ", ".join(exported))
+
+ if self.environment.is_async:
+ self.writeline(
+ "from jinja2.asyncsupport import auto_await, "
+ "auto_aiter, AsyncLoopContext"
+ )
+
+ # if we want a deferred initialization we cannot move the
+ # environment into a local name
+ envenv = not self.defer_init and ", environment=environment" or ""
+
+ # do we have an extends tag at all? If not, we can save some
+ # overhead by just not processing any inheritance code.
+ have_extends = node.find(nodes.Extends) is not None
+
+ # find all blocks
+ for block in node.find_all(nodes.Block):
+ if block.name in self.blocks:
+ self.fail("block %r defined twice" % block.name, block.lineno)
+ self.blocks[block.name] = block
+
+ # find all imports and import them
+ for import_ in node.find_all(nodes.ImportedName):
+ if import_.importname not in self.import_aliases:
+ imp = import_.importname
+ self.import_aliases[imp] = alias = self.temporary_identifier()
+ if "." in imp:
+ module, obj = imp.rsplit(".", 1)
+ self.writeline("from %s import %s as %s" % (module, obj, alias))
+ else:
+ self.writeline("import %s as %s" % (imp, alias))
+
+ # add the load name
+ self.writeline("name = %r" % self.name)
+
+ # generate the root render function.
+ self.writeline(
+ "%s(context, missing=missing%s):" % (self.func("root"), envenv), extra=1
+ )
+ self.indent()
+ self.write_commons()
+
+ # process the root
+ frame = Frame(eval_ctx)
+ if "self" in find_undeclared(node.body, ("self",)):
+ ref = frame.symbols.declare_parameter("self")
+ self.writeline("%s = TemplateReference(context)" % ref)
+ frame.symbols.analyze_node(node)
+ frame.toplevel = frame.rootlevel = True
+ frame.require_output_check = have_extends and not self.has_known_extends
+ if have_extends:
+ self.writeline("parent_template = None")
+ self.enter_frame(frame)
+ self.pull_dependencies(node.body)
+ self.blockvisit(node.body, frame)
+ self.leave_frame(frame, with_python_scope=True)
+ self.outdent()
+
+ # make sure that the parent root is called.
+ if have_extends:
+ if not self.has_known_extends:
+ self.indent()
+ self.writeline("if parent_template is not None:")
+ self.indent()
+ if supports_yield_from and not self.environment.is_async:
+ self.writeline("yield from parent_template.root_render_func(context)")
+ else:
+ self.writeline(
+ "%sfor event in parent_template."
+ "root_render_func(context):"
+ % (self.environment.is_async and "async " or "")
+ )
+ self.indent()
+ self.writeline("yield event")
+ self.outdent()
+ self.outdent(1 + (not self.has_known_extends))
+
+ # at this point we now have the blocks collected and can visit them too.
+ for name, block in iteritems(self.blocks):
+ self.writeline(
+ "%s(context, missing=missing%s):"
+ % (self.func("block_" + name), envenv),
+ block,
+ 1,
+ )
+ self.indent()
+ self.write_commons()
+ # It's important that we do not make this frame a child of the
+ # toplevel template. This would cause a variety of
+ # interesting issues with identifier tracking.
+ block_frame = Frame(eval_ctx)
+ undeclared = find_undeclared(block.body, ("self", "super"))
+ if "self" in undeclared:
+ ref = block_frame.symbols.declare_parameter("self")
+ self.writeline("%s = TemplateReference(context)" % ref)
+ if "super" in undeclared:
+ ref = block_frame.symbols.declare_parameter("super")
+ self.writeline("%s = context.super(%r, block_%s)" % (ref, name, name))
+ block_frame.symbols.analyze_node(block)
+ block_frame.block = name
+ self.enter_frame(block_frame)
+ self.pull_dependencies(block.body)
+ self.blockvisit(block.body, block_frame)
+ self.leave_frame(block_frame, with_python_scope=True)
+ self.outdent()
+
+ self.writeline(
+ "blocks = {%s}" % ", ".join("%r: block_%s" % (x, x) for x in self.blocks),
+ extra=1,
+ )
+
+ # add a function that returns the debug info
+ self.writeline(
+ "debug_info = %r" % "&".join("%s=%s" % x for x in self.debug_info)
+ )
+
+ def visit_Block(self, node, frame):
+ """Call a block and register it for the template."""
+ level = 0
+ if frame.toplevel:
+ # if we know that we are a child template, there is no need to
+ # check if we are one
+ if self.has_known_extends:
+ return
+ if self.extends_so_far > 0:
+ self.writeline("if parent_template is None:")
+ self.indent()
+ level += 1
+
+ if node.scoped:
+ context = self.derive_context(frame)
+ else:
+ context = self.get_context_ref()
+
+ if (
+ supports_yield_from
+ and not self.environment.is_async
+ and frame.buffer is None
+ ):
+ self.writeline(
+ "yield from context.blocks[%r][0](%s)" % (node.name, context), node
+ )
+ else:
+ loop = self.environment.is_async and "async for" or "for"
+ self.writeline(
+ "%s event in context.blocks[%r][0](%s):" % (loop, node.name, context),
+ node,
+ )
+ self.indent()
+ self.simple_write("event", frame)
+ self.outdent()
+
+ self.outdent(level)
+
+ def visit_Extends(self, node, frame):
+ """Calls the extender."""
+ if not frame.toplevel:
+ self.fail("cannot use extend from a non top-level scope", node.lineno)
+
+ # if the number of extends statements in general is zero so
+ # far, we don't have to add a check if something extended
+ # the template before this one.
+ if self.extends_so_far > 0:
+
+ # if we have a known extends we just add a template runtime
+ # error into the generated code. We could catch that at compile
+ # time too, but i welcome it not to confuse users by throwing the
+ # same error at different times just "because we can".
+ if not self.has_known_extends:
+ self.writeline("if parent_template is not None:")
+ self.indent()
+ self.writeline("raise TemplateRuntimeError(%r)" % "extended multiple times")
+
+ # if we have a known extends already we don't need that code here
+ # as we know that the template execution will end here.
+ if self.has_known_extends:
+ raise CompilerExit()
+ else:
+ self.outdent()
+
+ self.writeline("parent_template = environment.get_template(", node)
+ self.visit(node.template, frame)
+ self.write(", %r)" % self.name)
+ self.writeline(
+ "for name, parent_block in parent_template.blocks.%s():" % dict_item_iter
+ )
+ self.indent()
+ self.writeline("context.blocks.setdefault(name, []).append(parent_block)")
+ self.outdent()
+
+ # if this extends statement was in the root level we can take
+ # advantage of that information and simplify the generated code
+ # in the top level from this point onwards
+ if frame.rootlevel:
+ self.has_known_extends = True
+
+ # and now we have one more
+ self.extends_so_far += 1
+
+ def visit_Include(self, node, frame):
+ """Handles includes."""
+ if node.ignore_missing:
+ self.writeline("try:")
+ self.indent()
+
+ func_name = "get_or_select_template"
+ if isinstance(node.template, nodes.Const):
+ if isinstance(node.template.value, string_types):
+ func_name = "get_template"
+ elif isinstance(node.template.value, (tuple, list)):
+ func_name = "select_template"
+ elif isinstance(node.template, (nodes.Tuple, nodes.List)):
+ func_name = "select_template"
+
+ self.writeline("template = environment.%s(" % func_name, node)
+ self.visit(node.template, frame)
+ self.write(", %r)" % self.name)
+ if node.ignore_missing:
+ self.outdent()
+ self.writeline("except TemplateNotFound:")
+ self.indent()
+ self.writeline("pass")
+ self.outdent()
+ self.writeline("else:")
+ self.indent()
+
+ skip_event_yield = False
+ if node.with_context:
+ loop = self.environment.is_async and "async for" or "for"
+ self.writeline(
+ "%s event in template.root_render_func("
+ "template.new_context(context.get_all(), True, "
+ "%s)):" % (loop, self.dump_local_context(frame))
+ )
+ elif self.environment.is_async:
+ self.writeline(
+ "for event in (await "
+ "template._get_default_module_async())"
+ "._body_stream:"
+ )
+ else:
+ if supports_yield_from:
+ self.writeline("yield from template._get_default_module()._body_stream")
+ skip_event_yield = True
+ else:
+ self.writeline(
+ "for event in template._get_default_module()._body_stream:"
+ )
+
+ if not skip_event_yield:
+ self.indent()
+ self.simple_write("event", frame)
+ self.outdent()
+
+ if node.ignore_missing:
+ self.outdent()
+
+ def visit_Import(self, node, frame):
+ """Visit regular imports."""
+ self.writeline("%s = " % frame.symbols.ref(node.target), node)
+ if frame.toplevel:
+ self.write("context.vars[%r] = " % node.target)
+ if self.environment.is_async:
+ self.write("await ")
+ self.write("environment.get_template(")
+ self.visit(node.template, frame)
+ self.write(", %r)." % self.name)
+ if node.with_context:
+ self.write(
+ "make_module%s(context.get_all(), True, %s)"
+ % (
+ self.environment.is_async and "_async" or "",
+ self.dump_local_context(frame),
+ )
+ )
+ elif self.environment.is_async:
+ self.write("_get_default_module_async()")
+ else:
+ self.write("_get_default_module()")
+ if frame.toplevel and not node.target.startswith("_"):
+ self.writeline("context.exported_vars.discard(%r)" % node.target)
+
+ def visit_FromImport(self, node, frame):
+ """Visit named imports."""
+ self.newline(node)
+ self.write(
+ "included_template = %senvironment.get_template("
+ % (self.environment.is_async and "await " or "")
+ )
+ self.visit(node.template, frame)
+ self.write(", %r)." % self.name)
+ if node.with_context:
+ self.write(
+ "make_module%s(context.get_all(), True, %s)"
+ % (
+ self.environment.is_async and "_async" or "",
+ self.dump_local_context(frame),
+ )
+ )
+ elif self.environment.is_async:
+ self.write("_get_default_module_async()")
+ else:
+ self.write("_get_default_module()")
+
+ var_names = []
+ discarded_names = []
+ for name in node.names:
+ if isinstance(name, tuple):
+ name, alias = name
+ else:
+ alias = name
+ self.writeline(
+ "%s = getattr(included_template, "
+ "%r, missing)" % (frame.symbols.ref(alias), name)
+ )
+ self.writeline("if %s is missing:" % frame.symbols.ref(alias))
+ self.indent()
+ self.writeline(
+ "%s = undefined(%r %% "
+ "included_template.__name__, "
+ "name=%r)"
+ % (
+ frame.symbols.ref(alias),
+ "the template %%r (imported on %s) does "
+ "not export the requested name %s"
+ % (self.position(node), repr(name)),
+ name,
+ )
+ )
+ self.outdent()
+ if frame.toplevel:
+ var_names.append(alias)
+ if not alias.startswith("_"):
+ discarded_names.append(alias)
+
+ if var_names:
+ if len(var_names) == 1:
+ name = var_names[0]
+ self.writeline(
+ "context.vars[%r] = %s" % (name, frame.symbols.ref(name))
+ )
+ else:
+ self.writeline(
+ "context.vars.update({%s})"
+ % ", ".join(
+ "%r: %s" % (name, frame.symbols.ref(name)) for name in var_names
+ )
+ )
+ if discarded_names:
+ if len(discarded_names) == 1:
+ self.writeline("context.exported_vars.discard(%r)" % discarded_names[0])
+ else:
+ self.writeline(
+ "context.exported_vars.difference_"
+ "update((%s))" % ", ".join(imap(repr, discarded_names))
+ )
+
+ def visit_For(self, node, frame):
+ loop_frame = frame.inner()
+ test_frame = frame.inner()
+ else_frame = frame.inner()
+
+ # try to figure out if we have an extended loop. An extended loop
+ # is necessary if the loop is in recursive mode if the special loop
+ # variable is accessed in the body.
+ extended_loop = node.recursive or "loop" in find_undeclared(
+ node.iter_child_nodes(only=("body",)), ("loop",)
+ )
+
+ loop_ref = None
+ if extended_loop:
+ loop_ref = loop_frame.symbols.declare_parameter("loop")
+
+ loop_frame.symbols.analyze_node(node, for_branch="body")
+ if node.else_:
+ else_frame.symbols.analyze_node(node, for_branch="else")
+
+ if node.test:
+ loop_filter_func = self.temporary_identifier()
+ test_frame.symbols.analyze_node(node, for_branch="test")
+ self.writeline("%s(fiter):" % self.func(loop_filter_func), node.test)
+ self.indent()
+ self.enter_frame(test_frame)
+ self.writeline(self.environment.is_async and "async for " or "for ")
+ self.visit(node.target, loop_frame)
+ self.write(" in ")
+ self.write(self.environment.is_async and "auto_aiter(fiter)" or "fiter")
+ self.write(":")
+ self.indent()
+ self.writeline("if ", node.test)
+ self.visit(node.test, test_frame)
+ self.write(":")
+ self.indent()
+ self.writeline("yield ")
+ self.visit(node.target, loop_frame)
+ self.outdent(3)
+ self.leave_frame(test_frame, with_python_scope=True)
+
+ # if we don't have an recursive loop we have to find the shadowed
+ # variables at that point. Because loops can be nested but the loop
+ # variable is a special one we have to enforce aliasing for it.
+ if node.recursive:
+ self.writeline(
+ "%s(reciter, loop_render_func, depth=0):" % self.func("loop"), node
+ )
+ self.indent()
+ self.buffer(loop_frame)
+
+ # Use the same buffer for the else frame
+ else_frame.buffer = loop_frame.buffer
+
+ # make sure the loop variable is a special one and raise a template
+ # assertion error if a loop tries to write to loop
+ if extended_loop:
+ self.writeline("%s = missing" % loop_ref)
+
+ for name in node.find_all(nodes.Name):
+ if name.ctx == "store" and name.name == "loop":
+ self.fail(
+ "Can't assign to special loop variable in for-loop target",
+ name.lineno,
+ )
+
+ if node.else_:
+ iteration_indicator = self.temporary_identifier()
+ self.writeline("%s = 1" % iteration_indicator)
+
+ self.writeline(self.environment.is_async and "async for " or "for ", node)
+ self.visit(node.target, loop_frame)
+ if extended_loop:
+ if self.environment.is_async:
+ self.write(", %s in AsyncLoopContext(" % loop_ref)
+ else:
+ self.write(", %s in LoopContext(" % loop_ref)
+ else:
+ self.write(" in ")
+
+ if node.test:
+ self.write("%s(" % loop_filter_func)
+ if node.recursive:
+ self.write("reciter")
+ else:
+ if self.environment.is_async and not extended_loop:
+ self.write("auto_aiter(")
+ self.visit(node.iter, frame)
+ if self.environment.is_async and not extended_loop:
+ self.write(")")
+ if node.test:
+ self.write(")")
+
+ if node.recursive:
+ self.write(", undefined, loop_render_func, depth):")
+ else:
+ self.write(extended_loop and ", undefined):" or ":")
+
+ self.indent()
+ self.enter_frame(loop_frame)
+
+ self.blockvisit(node.body, loop_frame)
+ if node.else_:
+ self.writeline("%s = 0" % iteration_indicator)
+ self.outdent()
+ self.leave_frame(
+ loop_frame, with_python_scope=node.recursive and not node.else_
+ )
+
+ if node.else_:
+ self.writeline("if %s:" % iteration_indicator)
+ self.indent()
+ self.enter_frame(else_frame)
+ self.blockvisit(node.else_, else_frame)
+ self.leave_frame(else_frame)
+ self.outdent()
+
+ # if the node was recursive we have to return the buffer contents
+ # and start the iteration code
+ if node.recursive:
+ self.return_buffer_contents(loop_frame)
+ self.outdent()
+ self.start_write(frame, node)
+ if self.environment.is_async:
+ self.write("await ")
+ self.write("loop(")
+ if self.environment.is_async:
+ self.write("auto_aiter(")
+ self.visit(node.iter, frame)
+ if self.environment.is_async:
+ self.write(")")
+ self.write(", loop)")
+ self.end_write(frame)
+
+ def visit_If(self, node, frame):
+ if_frame = frame.soft()
+ self.writeline("if ", node)
+ self.visit(node.test, if_frame)
+ self.write(":")
+ self.indent()
+ self.blockvisit(node.body, if_frame)
+ self.outdent()
+ for elif_ in node.elif_:
+ self.writeline("elif ", elif_)
+ self.visit(elif_.test, if_frame)
+ self.write(":")
+ self.indent()
+ self.blockvisit(elif_.body, if_frame)
+ self.outdent()
+ if node.else_:
+ self.writeline("else:")
+ self.indent()
+ self.blockvisit(node.else_, if_frame)
+ self.outdent()
+
+ def visit_Macro(self, node, frame):
+ macro_frame, macro_ref = self.macro_body(node, frame)
+ self.newline()
+ if frame.toplevel:
+ if not node.name.startswith("_"):
+ self.write("context.exported_vars.add(%r)" % node.name)
+ self.writeline("context.vars[%r] = " % node.name)
+ self.write("%s = " % frame.symbols.ref(node.name))
+ self.macro_def(macro_ref, macro_frame)
+
+ def visit_CallBlock(self, node, frame):
+ call_frame, macro_ref = self.macro_body(node, frame)
+ self.writeline("caller = ")
+ self.macro_def(macro_ref, call_frame)
+ self.start_write(frame, node)
+ self.visit_Call(node.call, frame, forward_caller=True)
+ self.end_write(frame)
+
+ def visit_FilterBlock(self, node, frame):
+ filter_frame = frame.inner()
+ filter_frame.symbols.analyze_node(node)
+ self.enter_frame(filter_frame)
+ self.buffer(filter_frame)
+ self.blockvisit(node.body, filter_frame)
+ self.start_write(frame, node)
+ self.visit_Filter(node.filter, filter_frame)
+ self.end_write(frame)
+ self.leave_frame(filter_frame)
+
+ def visit_With(self, node, frame):
+ with_frame = frame.inner()
+ with_frame.symbols.analyze_node(node)
+ self.enter_frame(with_frame)
+ for target, expr in izip(node.targets, node.values):
+ self.newline()
+ self.visit(target, with_frame)
+ self.write(" = ")
+ self.visit(expr, frame)
+ self.blockvisit(node.body, with_frame)
+ self.leave_frame(with_frame)
+
+ def visit_ExprStmt(self, node, frame):
+ self.newline(node)
+ self.visit(node.node, frame)
+
+ _FinalizeInfo = namedtuple("_FinalizeInfo", ("const", "src"))
+ #: The default finalize function if the environment isn't configured
+ #: with one. Or if the environment has one, this is called on that
+ #: function's output for constants.
+ _default_finalize = text_type
+ _finalize = None
+
+ def _make_finalize(self):
+ """Build the finalize function to be used on constants and at
+ runtime. Cached so it's only created once for all output nodes.
+
+ Returns a ``namedtuple`` with the following attributes:
+
+ ``const``
+ A function to finalize constant data at compile time.
+
+ ``src``
+ Source code to output around nodes to be evaluated at
+ runtime.
+ """
+ if self._finalize is not None:
+ return self._finalize
+
+ finalize = default = self._default_finalize
+ src = None
+
+ if self.environment.finalize:
+ src = "environment.finalize("
+ env_finalize = self.environment.finalize
+
+ def finalize(value):
+ return default(env_finalize(value))
+
+ if getattr(env_finalize, "contextfunction", False) is True:
+ src += "context, "
+ finalize = None # noqa: F811
+ elif getattr(env_finalize, "evalcontextfunction", False) is True:
+ src += "context.eval_ctx, "
+ finalize = None
+ elif getattr(env_finalize, "environmentfunction", False) is True:
+ src += "environment, "
+
+ def finalize(value):
+ return default(env_finalize(self.environment, value))
+
+ self._finalize = self._FinalizeInfo(finalize, src)
+ return self._finalize
+
+ def _output_const_repr(self, group):
+ """Given a group of constant values converted from ``Output``
+ child nodes, produce a string to write to the template module
+ source.
+ """
+ return repr(concat(group))
+
+ def _output_child_to_const(self, node, frame, finalize):
+ """Try to optimize a child of an ``Output`` node by trying to
+ convert it to constant, finalized data at compile time.
+
+ If :exc:`Impossible` is raised, the node is not constant and
+ will be evaluated at runtime. Any other exception will also be
+ evaluated at runtime for easier debugging.
+ """
+ const = node.as_const(frame.eval_ctx)
+
+ if frame.eval_ctx.autoescape:
+ const = escape(const)
+
+ # Template data doesn't go through finalize.
+ if isinstance(node, nodes.TemplateData):
+ return text_type(const)
+
+ return finalize.const(const)
+
+ def _output_child_pre(self, node, frame, finalize):
+ """Output extra source code before visiting a child of an
+ ``Output`` node.
+ """
+ if frame.eval_ctx.volatile:
+ self.write("(escape if context.eval_ctx.autoescape else to_string)(")
+ elif frame.eval_ctx.autoescape:
+ self.write("escape(")
+ else:
+ self.write("to_string(")
+
+ if finalize.src is not None:
+ self.write(finalize.src)
+
+ def _output_child_post(self, node, frame, finalize):
+ """Output extra source code after visiting a child of an
+ ``Output`` node.
+ """
+ self.write(")")
+
+ if finalize.src is not None:
+ self.write(")")
+
+ def visit_Output(self, node, frame):
+ # If an extends is active, don't render outside a block.
+ if frame.require_output_check:
+ # A top-level extends is known to exist at compile time.
+ if self.has_known_extends:
+ return
+
+ self.writeline("if parent_template is None:")
+ self.indent()
+
+ finalize = self._make_finalize()
+ body = []
+
+ # Evaluate constants at compile time if possible. Each item in
+ # body will be either a list of static data or a node to be
+ # evaluated at runtime.
+ for child in node.nodes:
+ try:
+ if not (
+ # If the finalize function requires runtime context,
+ # constants can't be evaluated at compile time.
+ finalize.const
+ # Unless it's basic template data that won't be
+ # finalized anyway.
+ or isinstance(child, nodes.TemplateData)
+ ):
+ raise nodes.Impossible()
+
+ const = self._output_child_to_const(child, frame, finalize)
+ except (nodes.Impossible, Exception):
+ # The node was not constant and needs to be evaluated at
+ # runtime. Or another error was raised, which is easier
+ # to debug at runtime.
+ body.append(child)
+ continue
+
+ if body and isinstance(body[-1], list):
+ body[-1].append(const)
+ else:
+ body.append([const])
+
+ if frame.buffer is not None:
+ if len(body) == 1:
+ self.writeline("%s.append(" % frame.buffer)
+ else:
+ self.writeline("%s.extend((" % frame.buffer)
+
+ self.indent()
+
+ for item in body:
+ if isinstance(item, list):
+ # A group of constant data to join and output.
+ val = self._output_const_repr(item)
+
+ if frame.buffer is None:
+ self.writeline("yield " + val)
+ else:
+ self.writeline(val + ",")
+ else:
+ if frame.buffer is None:
+ self.writeline("yield ", item)
+ else:
+ self.newline(item)
+
+ # A node to be evaluated at runtime.
+ self._output_child_pre(item, frame, finalize)
+ self.visit(item, frame)
+ self._output_child_post(item, frame, finalize)
+
+ if frame.buffer is not None:
+ self.write(",")
+
+ if frame.buffer is not None:
+ self.outdent()
+ self.writeline(")" if len(body) == 1 else "))")
+
+ if frame.require_output_check:
+ self.outdent()
+
+ def visit_Assign(self, node, frame):
+ self.push_assign_tracking()
+ self.newline(node)
+ self.visit(node.target, frame)
+ self.write(" = ")
+ self.visit(node.node, frame)
+ self.pop_assign_tracking(frame)
+
+ def visit_AssignBlock(self, node, frame):
+ self.push_assign_tracking()
+ block_frame = frame.inner()
+ # This is a special case. Since a set block always captures we
+ # will disable output checks. This way one can use set blocks
+ # toplevel even in extended templates.
+ block_frame.require_output_check = False
+ block_frame.symbols.analyze_node(node)
+ self.enter_frame(block_frame)
+ self.buffer(block_frame)
+ self.blockvisit(node.body, block_frame)
+ self.newline(node)
+ self.visit(node.target, frame)
+ self.write(" = (Markup if context.eval_ctx.autoescape else identity)(")
+ if node.filter is not None:
+ self.visit_Filter(node.filter, block_frame)
+ else:
+ self.write("concat(%s)" % block_frame.buffer)
+ self.write(")")
+ self.pop_assign_tracking(frame)
+ self.leave_frame(block_frame)
+
+ # -- Expression Visitors
+
+ def visit_Name(self, node, frame):
+ if node.ctx == "store" and frame.toplevel:
+ if self._assign_stack:
+ self._assign_stack[-1].add(node.name)
+ ref = frame.symbols.ref(node.name)
+
+ # If we are looking up a variable we might have to deal with the
+ # case where it's undefined. We can skip that case if the load
+ # instruction indicates a parameter which are always defined.
+ if node.ctx == "load":
+ load = frame.symbols.find_load(ref)
+ if not (
+ load is not None
+ and load[0] == VAR_LOAD_PARAMETER
+ and not self.parameter_is_undeclared(ref)
+ ):
+ self.write(
+ "(undefined(name=%r) if %s is missing else %s)"
+ % (node.name, ref, ref)
+ )
+ return
+
+ self.write(ref)
+
+ def visit_NSRef(self, node, frame):
+ # NSRefs can only be used to store values; since they use the normal
+ # `foo.bar` notation they will be parsed as a normal attribute access
+ # when used anywhere but in a `set` context
+ ref = frame.symbols.ref(node.name)
+ self.writeline("if not isinstance(%s, Namespace):" % ref)
+ self.indent()
+ self.writeline(
+ "raise TemplateRuntimeError(%r)"
+ % "cannot assign attribute on non-namespace object"
+ )
+ self.outdent()
+ self.writeline("%s[%r]" % (ref, node.attr))
+
+ def visit_Const(self, node, frame):
+ val = node.as_const(frame.eval_ctx)
+ if isinstance(val, float):
+ self.write(str(val))
+ else:
+ self.write(repr(val))
+
+ def visit_TemplateData(self, node, frame):
+ try:
+ self.write(repr(node.as_const(frame.eval_ctx)))
+ except nodes.Impossible:
+ self.write(
+ "(Markup if context.eval_ctx.autoescape else identity)(%r)" % node.data
+ )
+
+ def visit_Tuple(self, node, frame):
+ self.write("(")
+ idx = -1
+ for idx, item in enumerate(node.items):
+ if idx:
+ self.write(", ")
+ self.visit(item, frame)
+ self.write(idx == 0 and ",)" or ")")
+
+ def visit_List(self, node, frame):
+ self.write("[")
+ for idx, item in enumerate(node.items):
+ if idx:
+ self.write(", ")
+ self.visit(item, frame)
+ self.write("]")
+
+ def visit_Dict(self, node, frame):
+ self.write("{")
+ for idx, item in enumerate(node.items):
+ if idx:
+ self.write(", ")
+ self.visit(item.key, frame)
+ self.write(": ")
+ self.visit(item.value, frame)
+ self.write("}")
+
+ def binop(operator, interceptable=True): # noqa: B902
+ @optimizeconst
+ def visitor(self, node, frame):
+ if (
+ self.environment.sandboxed
+ and operator in self.environment.intercepted_binops
+ ):
+ self.write("environment.call_binop(context, %r, " % operator)
+ self.visit(node.left, frame)
+ self.write(", ")
+ self.visit(node.right, frame)
+ else:
+ self.write("(")
+ self.visit(node.left, frame)
+ self.write(" %s " % operator)
+ self.visit(node.right, frame)
+ self.write(")")
+
+ return visitor
+
+ def uaop(operator, interceptable=True): # noqa: B902
+ @optimizeconst
+ def visitor(self, node, frame):
+ if (
+ self.environment.sandboxed
+ and operator in self.environment.intercepted_unops
+ ):
+ self.write("environment.call_unop(context, %r, " % operator)
+ self.visit(node.node, frame)
+ else:
+ self.write("(" + operator)
+ self.visit(node.node, frame)
+ self.write(")")
+
+ return visitor
+
+ visit_Add = binop("+")
+ visit_Sub = binop("-")
+ visit_Mul = binop("*")
+ visit_Div = binop("/")
+ visit_FloorDiv = binop("//")
+ visit_Pow = binop("**")
+ visit_Mod = binop("%")
+ visit_And = binop("and", interceptable=False)
+ visit_Or = binop("or", interceptable=False)
+ visit_Pos = uaop("+")
+ visit_Neg = uaop("-")
+ visit_Not = uaop("not ", interceptable=False)
+ del binop, uaop
+
+ @optimizeconst
+ def visit_Concat(self, node, frame):
+ if frame.eval_ctx.volatile:
+ func_name = "(context.eval_ctx.volatile and markup_join or unicode_join)"
+ elif frame.eval_ctx.autoescape:
+ func_name = "markup_join"
+ else:
+ func_name = "unicode_join"
+ self.write("%s((" % func_name)
+ for arg in node.nodes:
+ self.visit(arg, frame)
+ self.write(", ")
+ self.write("))")
+
+ @optimizeconst
+ def visit_Compare(self, node, frame):
+ self.write("(")
+ self.visit(node.expr, frame)
+ for op in node.ops:
+ self.visit(op, frame)
+ self.write(")")
+
+ def visit_Operand(self, node, frame):
+ self.write(" %s " % operators[node.op])
+ self.visit(node.expr, frame)
+
+ @optimizeconst
+ def visit_Getattr(self, node, frame):
+ if self.environment.is_async:
+ self.write("(await auto_await(")
+
+ self.write("environment.getattr(")
+ self.visit(node.node, frame)
+ self.write(", %r)" % node.attr)
+
+ if self.environment.is_async:
+ self.write("))")
+
+ @optimizeconst
+ def visit_Getitem(self, node, frame):
+ # slices bypass the environment getitem method.
+ if isinstance(node.arg, nodes.Slice):
+ self.visit(node.node, frame)
+ self.write("[")
+ self.visit(node.arg, frame)
+ self.write("]")
+ else:
+ if self.environment.is_async:
+ self.write("(await auto_await(")
+
+ self.write("environment.getitem(")
+ self.visit(node.node, frame)
+ self.write(", ")
+ self.visit(node.arg, frame)
+ self.write(")")
+
+ if self.environment.is_async:
+ self.write("))")
+
+ def visit_Slice(self, node, frame):
+ if node.start is not None:
+ self.visit(node.start, frame)
+ self.write(":")
+ if node.stop is not None:
+ self.visit(node.stop, frame)
+ if node.step is not None:
+ self.write(":")
+ self.visit(node.step, frame)
+
+ @optimizeconst
+ def visit_Filter(self, node, frame):
+ if self.environment.is_async:
+ self.write("await auto_await(")
+ self.write(self.filters[node.name] + "(")
+ func = self.environment.filters.get(node.name)
+ if func is None:
+ self.fail("no filter named %r" % node.name, node.lineno)
+ if getattr(func, "contextfilter", False) is True:
+ self.write("context, ")
+ elif getattr(func, "evalcontextfilter", False) is True:
+ self.write("context.eval_ctx, ")
+ elif getattr(func, "environmentfilter", False) is True:
+ self.write("environment, ")
+
+ # if the filter node is None we are inside a filter block
+ # and want to write to the current buffer
+ if node.node is not None:
+ self.visit(node.node, frame)
+ elif frame.eval_ctx.volatile:
+ self.write(
+ "(context.eval_ctx.autoescape and"
+ " Markup(concat(%s)) or concat(%s))" % (frame.buffer, frame.buffer)
+ )
+ elif frame.eval_ctx.autoescape:
+ self.write("Markup(concat(%s))" % frame.buffer)
+ else:
+ self.write("concat(%s)" % frame.buffer)
+ self.signature(node, frame)
+ self.write(")")
+ if self.environment.is_async:
+ self.write(")")
+
+ @optimizeconst
+ def visit_Test(self, node, frame):
+ self.write(self.tests[node.name] + "(")
+ if node.name not in self.environment.tests:
+ self.fail("no test named %r" % node.name, node.lineno)
+ self.visit(node.node, frame)
+ self.signature(node, frame)
+ self.write(")")
+
+ @optimizeconst
+ def visit_CondExpr(self, node, frame):
+ def write_expr2():
+ if node.expr2 is not None:
+ return self.visit(node.expr2, frame)
+ self.write(
+ "cond_expr_undefined(%r)"
+ % (
+ "the inline if-"
+ "expression on %s evaluated to false and "
+ "no else section was defined." % self.position(node)
+ )
+ )
+
+ self.write("(")
+ self.visit(node.expr1, frame)
+ self.write(" if ")
+ self.visit(node.test, frame)
+ self.write(" else ")
+ write_expr2()
+ self.write(")")
+
+ @optimizeconst
+ def visit_Call(self, node, frame, forward_caller=False):
+ if self.environment.is_async:
+ self.write("await auto_await(")
+ if self.environment.sandboxed:
+ self.write("environment.call(context, ")
+ else:
+ self.write("context.call(")
+ self.visit(node.node, frame)
+ extra_kwargs = forward_caller and {"caller": "caller"} or None
+ self.signature(node, frame, extra_kwargs)
+ self.write(")")
+ if self.environment.is_async:
+ self.write(")")
+
+ def visit_Keyword(self, node, frame):
+ self.write(node.key + "=")
+ self.visit(node.value, frame)
+
+ # -- Unused nodes for extensions
+
+ def visit_MarkSafe(self, node, frame):
+ self.write("Markup(")
+ self.visit(node.expr, frame)
+ self.write(")")
+
+ def visit_MarkSafeIfAutoescape(self, node, frame):
+ self.write("(context.eval_ctx.autoescape and Markup or identity)(")
+ self.visit(node.expr, frame)
+ self.write(")")
+
+ def visit_EnvironmentAttribute(self, node, frame):
+ self.write("environment." + node.name)
+
+ def visit_ExtensionAttribute(self, node, frame):
+ self.write("environment.extensions[%r].%s" % (node.identifier, node.name))
+
+ def visit_ImportedName(self, node, frame):
+ self.write(self.import_aliases[node.importname])
+
+ def visit_InternalName(self, node, frame):
+ self.write(node.name)
+
+ def visit_ContextReference(self, node, frame):
+ self.write("context")
+
+ def visit_DerivedContextReference(self, node, frame):
+ self.write(self.derive_context(frame))
+
+ def visit_Continue(self, node, frame):
+ self.writeline("continue", node)
+
+ def visit_Break(self, node, frame):
+ self.writeline("break", node)
+
+ def visit_Scope(self, node, frame):
+ scope_frame = frame.inner()
+ scope_frame.symbols.analyze_node(node)
+ self.enter_frame(scope_frame)
+ self.blockvisit(node.body, scope_frame)
+ self.leave_frame(scope_frame)
+
+ def visit_OverlayScope(self, node, frame):
+ ctx = self.temporary_identifier()
+ self.writeline("%s = %s" % (ctx, self.derive_context(frame)))
+ self.writeline("%s.vars = " % ctx)
+ self.visit(node.context, frame)
+ self.push_context_reference(ctx)
+
+ scope_frame = frame.inner(isolated=True)
+ scope_frame.symbols.analyze_node(node)
+ self.enter_frame(scope_frame)
+ self.blockvisit(node.body, scope_frame)
+ self.leave_frame(scope_frame)
+ self.pop_context_reference()
+
+ def visit_EvalContextModifier(self, node, frame):
+ for keyword in node.options:
+ self.writeline("context.eval_ctx.%s = " % keyword.key)
+ self.visit(keyword.value, frame)
+ try:
+ val = keyword.value.as_const(frame.eval_ctx)
+ except nodes.Impossible:
+ frame.eval_ctx.volatile = True
+ else:
+ setattr(frame.eval_ctx, keyword.key, val)
+
+ def visit_ScopedEvalContextModifier(self, node, frame):
+ old_ctx_name = self.temporary_identifier()
+ saved_ctx = frame.eval_ctx.save()
+ self.writeline("%s = context.eval_ctx.save()" % old_ctx_name)
+ self.visit_EvalContextModifier(node, frame)
+ for child in node.body:
+ self.visit(child, frame)
+ frame.eval_ctx.revert(saved_ctx)
+ self.writeline("context.eval_ctx.revert(%s)" % old_ctx_name)
diff --git a/third_party/python/Jinja2/jinja2/constants.py b/third_party/python/Jinja2/jinja2/constants.py
new file mode 100644
index 0000000000..bf7f2ca721
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/constants.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+#: list of lorem ipsum words used by the lipsum() helper function
+LOREM_IPSUM_WORDS = u"""\
+a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
+auctor augue bibendum blandit class commodo condimentum congue consectetuer
+consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
+diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
+elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
+faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
+hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
+justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
+luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
+mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
+nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
+penatibus per pharetra phasellus placerat platea porta porttitor posuere
+potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
+ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
+sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
+tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
+ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
+viverra volutpat vulputate"""
diff --git a/third_party/python/Jinja2/jinja2/debug.py b/third_party/python/Jinja2/jinja2/debug.py
new file mode 100644
index 0000000000..5d8aec31d0
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/debug.py
@@ -0,0 +1,268 @@
+import sys
+from types import CodeType
+
+from . import TemplateSyntaxError
+from ._compat import PYPY
+from .utils import internal_code
+from .utils import missing
+
+
+def rewrite_traceback_stack(source=None):
+ """Rewrite the current exception to replace any tracebacks from
+ within compiled template code with tracebacks that look like they
+ came from the template source.
+
+ This must be called within an ``except`` block.
+
+ :param exc_info: A :meth:`sys.exc_info` tuple. If not provided,
+ the current ``exc_info`` is used.
+ :param source: For ``TemplateSyntaxError``, the original source if
+ known.
+ :return: A :meth:`sys.exc_info` tuple that can be re-raised.
+ """
+ exc_type, exc_value, tb = sys.exc_info()
+
+ if isinstance(exc_value, TemplateSyntaxError) and not exc_value.translated:
+ exc_value.translated = True
+ exc_value.source = source
+
+ try:
+ # Remove the old traceback on Python 3, otherwise the frames
+ # from the compiler still show up.
+ exc_value.with_traceback(None)
+ except AttributeError:
+ pass
+
+ # Outside of runtime, so the frame isn't executing template
+ # code, but it still needs to point at the template.
+ tb = fake_traceback(
+ exc_value, None, exc_value.filename or "<unknown>", exc_value.lineno
+ )
+ else:
+ # Skip the frame for the render function.
+ tb = tb.tb_next
+
+ stack = []
+
+ # Build the stack of traceback object, replacing any in template
+ # code with the source file and line information.
+ while tb is not None:
+ # Skip frames decorated with @internalcode. These are internal
+ # calls that aren't useful in template debugging output.
+ if tb.tb_frame.f_code in internal_code:
+ tb = tb.tb_next
+ continue
+
+ template = tb.tb_frame.f_globals.get("__jinja_template__")
+
+ if template is not None:
+ lineno = template.get_corresponding_lineno(tb.tb_lineno)
+ fake_tb = fake_traceback(exc_value, tb, template.filename, lineno)
+ stack.append(fake_tb)
+ else:
+ stack.append(tb)
+
+ tb = tb.tb_next
+
+ tb_next = None
+
+ # Assign tb_next in reverse to avoid circular references.
+ for tb in reversed(stack):
+ tb_next = tb_set_next(tb, tb_next)
+
+ return exc_type, exc_value, tb_next
+
+
+def fake_traceback(exc_value, tb, filename, lineno):
+ """Produce a new traceback object that looks like it came from the
+ template source instead of the compiled code. The filename, line
+ number, and location name will point to the template, and the local
+ variables will be the current template context.
+
+ :param exc_value: The original exception to be re-raised to create
+ the new traceback.
+ :param tb: The original traceback to get the local variables and
+ code info from.
+ :param filename: The template filename.
+ :param lineno: The line number in the template source.
+ """
+ if tb is not None:
+ # Replace the real locals with the context that would be
+ # available at that point in the template.
+ locals = get_template_locals(tb.tb_frame.f_locals)
+ locals.pop("__jinja_exception__", None)
+ else:
+ locals = {}
+
+ globals = {
+ "__name__": filename,
+ "__file__": filename,
+ "__jinja_exception__": exc_value,
+ }
+ # Raise an exception at the correct line number.
+ code = compile("\n" * (lineno - 1) + "raise __jinja_exception__", filename, "exec")
+
+ # Build a new code object that points to the template file and
+ # replaces the location with a block name.
+ try:
+ location = "template"
+
+ if tb is not None:
+ function = tb.tb_frame.f_code.co_name
+
+ if function == "root":
+ location = "top-level template code"
+ elif function.startswith("block_"):
+ location = 'block "%s"' % function[6:]
+
+ # Collect arguments for the new code object. CodeType only
+ # accepts positional arguments, and arguments were inserted in
+ # new Python versions.
+ code_args = []
+
+ for attr in (
+ "argcount",
+ "posonlyargcount", # Python 3.8
+ "kwonlyargcount", # Python 3
+ "nlocals",
+ "stacksize",
+ "flags",
+ "code", # codestring
+ "consts", # constants
+ "names",
+ "varnames",
+ ("filename", filename),
+ ("name", location),
+ "firstlineno",
+ "lnotab",
+ "freevars",
+ "cellvars",
+ ):
+ if isinstance(attr, tuple):
+ # Replace with given value.
+ code_args.append(attr[1])
+ continue
+
+ try:
+ # Copy original value if it exists.
+ code_args.append(getattr(code, "co_" + attr))
+ except AttributeError:
+ # Some arguments were added later.
+ continue
+
+ code = CodeType(*code_args)
+ except Exception:
+ # Some environments such as Google App Engine don't support
+ # modifying code objects.
+ pass
+
+ # Execute the new code, which is guaranteed to raise, and return
+ # the new traceback without this frame.
+ try:
+ exec(code, globals, locals)
+ except BaseException:
+ return sys.exc_info()[2].tb_next
+
+
+def get_template_locals(real_locals):
+ """Based on the runtime locals, get the context that would be
+ available at that point in the template.
+ """
+ # Start with the current template context.
+ ctx = real_locals.get("context")
+
+ if ctx:
+ data = ctx.get_all().copy()
+ else:
+ data = {}
+
+ # Might be in a derived context that only sets local variables
+ # rather than pushing a context. Local variables follow the scheme
+ # l_depth_name. Find the highest-depth local that has a value for
+ # each name.
+ local_overrides = {}
+
+ for name, value in real_locals.items():
+ if not name.startswith("l_") or value is missing:
+ # Not a template variable, or no longer relevant.
+ continue
+
+ try:
+ _, depth, name = name.split("_", 2)
+ depth = int(depth)
+ except ValueError:
+ continue
+
+ cur_depth = local_overrides.get(name, (-1,))[0]
+
+ if cur_depth < depth:
+ local_overrides[name] = (depth, value)
+
+ # Modify the context with any derived context.
+ for name, (_, value) in local_overrides.items():
+ if value is missing:
+ data.pop(name, None)
+ else:
+ data[name] = value
+
+ return data
+
+
+if sys.version_info >= (3, 7):
+ # tb_next is directly assignable as of Python 3.7
+ def tb_set_next(tb, tb_next):
+ tb.tb_next = tb_next
+ return tb
+
+
+elif PYPY:
+ # PyPy might have special support, and won't work with ctypes.
+ try:
+ import tputil
+ except ImportError:
+ # Without tproxy support, use the original traceback.
+ def tb_set_next(tb, tb_next):
+ return tb
+
+ else:
+ # With tproxy support, create a proxy around the traceback that
+ # returns the new tb_next.
+ def tb_set_next(tb, tb_next):
+ def controller(op):
+ if op.opname == "__getattribute__" and op.args[0] == "tb_next":
+ return tb_next
+
+ return op.delegate()
+
+ return tputil.make_proxy(controller, obj=tb)
+
+
+else:
+ # Use ctypes to assign tb_next at the C level since it's read-only
+ # from Python.
+ import ctypes
+
+ class _CTraceback(ctypes.Structure):
+ _fields_ = [
+ # Extra PyObject slots when compiled with Py_TRACE_REFS.
+ ("PyObject_HEAD", ctypes.c_byte * object().__sizeof__()),
+ # Only care about tb_next as an object, not a traceback.
+ ("tb_next", ctypes.py_object),
+ ]
+
+ def tb_set_next(tb, tb_next):
+ c_tb = _CTraceback.from_address(id(tb))
+
+ # Clear out the old tb_next.
+ if tb.tb_next is not None:
+ c_tb_next = ctypes.py_object(tb.tb_next)
+ c_tb.tb_next = ctypes.py_object()
+ ctypes.pythonapi.Py_DecRef(c_tb_next)
+
+ # Assign the new tb_next.
+ if tb_next is not None:
+ c_tb_next = ctypes.py_object(tb_next)
+ ctypes.pythonapi.Py_IncRef(c_tb_next)
+ c_tb.tb_next = c_tb_next
+
+ return tb
diff --git a/third_party/python/Jinja2/jinja2/defaults.py b/third_party/python/Jinja2/jinja2/defaults.py
new file mode 100644
index 0000000000..8e0e7d7710
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/defaults.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+from ._compat import range_type
+from .filters import FILTERS as DEFAULT_FILTERS # noqa: F401
+from .tests import TESTS as DEFAULT_TESTS # noqa: F401
+from .utils import Cycler
+from .utils import generate_lorem_ipsum
+from .utils import Joiner
+from .utils import Namespace
+
+# defaults for the parser / lexer
+BLOCK_START_STRING = "{%"
+BLOCK_END_STRING = "%}"
+VARIABLE_START_STRING = "{{"
+VARIABLE_END_STRING = "}}"
+COMMENT_START_STRING = "{#"
+COMMENT_END_STRING = "#}"
+LINE_STATEMENT_PREFIX = None
+LINE_COMMENT_PREFIX = None
+TRIM_BLOCKS = False
+LSTRIP_BLOCKS = False
+NEWLINE_SEQUENCE = "\n"
+KEEP_TRAILING_NEWLINE = False
+
+# default filters, tests and namespace
+
+DEFAULT_NAMESPACE = {
+ "range": range_type,
+ "dict": dict,
+ "lipsum": generate_lorem_ipsum,
+ "cycler": Cycler,
+ "joiner": Joiner,
+ "namespace": Namespace,
+}
+
+# default policies
+DEFAULT_POLICIES = {
+ "compiler.ascii_str": True,
+ "urlize.rel": "noopener",
+ "urlize.target": None,
+ "truncate.leeway": 5,
+ "json.dumps_function": None,
+ "json.dumps_kwargs": {"sort_keys": True},
+ "ext.i18n.trimmed": False,
+}
diff --git a/third_party/python/Jinja2/jinja2/environment.py b/third_party/python/Jinja2/jinja2/environment.py
new file mode 100644
index 0000000000..8430390eea
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/environment.py
@@ -0,0 +1,1362 @@
+# -*- coding: utf-8 -*-
+"""Classes for managing templates and their runtime and compile time
+options.
+"""
+import os
+import sys
+import weakref
+from functools import partial
+from functools import reduce
+
+from markupsafe import Markup
+
+from . import nodes
+from ._compat import encode_filename
+from ._compat import implements_iterator
+from ._compat import implements_to_string
+from ._compat import iteritems
+from ._compat import PY2
+from ._compat import PYPY
+from ._compat import reraise
+from ._compat import string_types
+from ._compat import text_type
+from .compiler import CodeGenerator
+from .compiler import generate
+from .defaults import BLOCK_END_STRING
+from .defaults import BLOCK_START_STRING
+from .defaults import COMMENT_END_STRING
+from .defaults import COMMENT_START_STRING
+from .defaults import DEFAULT_FILTERS
+from .defaults import DEFAULT_NAMESPACE
+from .defaults import DEFAULT_POLICIES
+from .defaults import DEFAULT_TESTS
+from .defaults import KEEP_TRAILING_NEWLINE
+from .defaults import LINE_COMMENT_PREFIX
+from .defaults import LINE_STATEMENT_PREFIX
+from .defaults import LSTRIP_BLOCKS
+from .defaults import NEWLINE_SEQUENCE
+from .defaults import TRIM_BLOCKS
+from .defaults import VARIABLE_END_STRING
+from .defaults import VARIABLE_START_STRING
+from .exceptions import TemplateNotFound
+from .exceptions import TemplateRuntimeError
+from .exceptions import TemplatesNotFound
+from .exceptions import TemplateSyntaxError
+from .exceptions import UndefinedError
+from .lexer import get_lexer
+from .lexer import TokenStream
+from .nodes import EvalContext
+from .parser import Parser
+from .runtime import Context
+from .runtime import new_context
+from .runtime import Undefined
+from .utils import concat
+from .utils import consume
+from .utils import have_async_gen
+from .utils import import_string
+from .utils import internalcode
+from .utils import LRUCache
+from .utils import missing
+
+# for direct template usage we have up to ten living environments
+_spontaneous_environments = LRUCache(10)
+
+
+def get_spontaneous_environment(cls, *args):
+ """Return a new spontaneous environment. A spontaneous environment
+ is used for templates created directly rather than through an
+ existing environment.
+
+ :param cls: Environment class to create.
+ :param args: Positional arguments passed to environment.
+ """
+ key = (cls, args)
+
+ try:
+ return _spontaneous_environments[key]
+ except KeyError:
+ _spontaneous_environments[key] = env = cls(*args)
+ env.shared = True
+ return env
+
+
+def create_cache(size):
+ """Return the cache class for the given size."""
+ if size == 0:
+ return None
+ if size < 0:
+ return {}
+ return LRUCache(size)
+
+
+def copy_cache(cache):
+ """Create an empty copy of the given cache."""
+ if cache is None:
+ return None
+ elif type(cache) is dict:
+ return {}
+ return LRUCache(cache.capacity)
+
+
+def load_extensions(environment, extensions):
+ """Load the extensions from the list and bind it to the environment.
+ Returns a dict of instantiated environments.
+ """
+ result = {}
+ for extension in extensions:
+ if isinstance(extension, string_types):
+ extension = import_string(extension)
+ result[extension.identifier] = extension(environment)
+ return result
+
+
+def fail_for_missing_callable(string, name):
+ msg = string % name
+ if isinstance(name, Undefined):
+ try:
+ name._fail_with_undefined_error()
+ except Exception as e:
+ msg = "%s (%s; did you forget to quote the callable name?)" % (msg, e)
+ raise TemplateRuntimeError(msg)
+
+
+def _environment_sanity_check(environment):
+ """Perform a sanity check on the environment."""
+ assert issubclass(
+ environment.undefined, Undefined
+ ), "undefined must be a subclass of undefined because filters depend on it."
+ assert (
+ environment.block_start_string
+ != environment.variable_start_string
+ != environment.comment_start_string
+ ), "block, variable and comment start strings must be different"
+ assert environment.newline_sequence in (
+ "\r",
+ "\r\n",
+ "\n",
+ ), "newline_sequence set to unknown line ending string."
+ return environment
+
+
+class Environment(object):
+ r"""The core component of Jinja is the `Environment`. It contains
+ important shared variables like configuration, filters, tests,
+ globals and others. Instances of this class may be modified if
+ they are not shared and if no template was loaded so far.
+ Modifications on environments after the first template was loaded
+ will lead to surprising effects and undefined behavior.
+
+ Here are the possible initialization parameters:
+
+ `block_start_string`
+ The string marking the beginning of a block. Defaults to ``'{%'``.
+
+ `block_end_string`
+ The string marking the end of a block. Defaults to ``'%}'``.
+
+ `variable_start_string`
+ The string marking the beginning of a print statement.
+ Defaults to ``'{{'``.
+
+ `variable_end_string`
+ The string marking the end of a print statement. Defaults to
+ ``'}}'``.
+
+ `comment_start_string`
+ The string marking the beginning of a comment. Defaults to ``'{#'``.
+
+ `comment_end_string`
+ The string marking the end of a comment. Defaults to ``'#}'``.
+
+ `line_statement_prefix`
+ If given and a string, this will be used as prefix for line based
+ statements. See also :ref:`line-statements`.
+
+ `line_comment_prefix`
+ If given and a string, this will be used as prefix for line based
+ comments. See also :ref:`line-statements`.
+
+ .. versionadded:: 2.2
+
+ `trim_blocks`
+ If this is set to ``True`` the first newline after a block is
+ removed (block, not variable tag!). Defaults to `False`.
+
+ `lstrip_blocks`
+ If this is set to ``True`` leading spaces and tabs are stripped
+ from the start of a line to a block. Defaults to `False`.
+
+ `newline_sequence`
+ The sequence that starts a newline. Must be one of ``'\r'``,
+ ``'\n'`` or ``'\r\n'``. The default is ``'\n'`` which is a
+ useful default for Linux and OS X systems as well as web
+ applications.
+
+ `keep_trailing_newline`
+ Preserve the trailing newline when rendering templates.
+ The default is ``False``, which causes a single newline,
+ if present, to be stripped from the end of the template.
+
+ .. versionadded:: 2.7
+
+ `extensions`
+ List of Jinja extensions to use. This can either be import paths
+ as strings or extension classes. For more information have a
+ look at :ref:`the extensions documentation <jinja-extensions>`.
+
+ `optimized`
+ should the optimizer be enabled? Default is ``True``.
+
+ `undefined`
+ :class:`Undefined` or a subclass of it that is used to represent
+ undefined values in the template.
+
+ `finalize`
+ A callable that can be used to process the result of a variable
+ expression before it is output. For example one can convert
+ ``None`` implicitly into an empty string here.
+
+ `autoescape`
+ If set to ``True`` the XML/HTML autoescaping feature is enabled by
+ default. For more details about autoescaping see
+ :class:`~markupsafe.Markup`. As of Jinja 2.4 this can also
+ be a callable that is passed the template name and has to
+ return ``True`` or ``False`` depending on autoescape should be
+ enabled by default.
+
+ .. versionchanged:: 2.4
+ `autoescape` can now be a function
+
+ `loader`
+ The template loader for this environment.
+
+ `cache_size`
+ The size of the cache. Per default this is ``400`` which means
+ that if more than 400 templates are loaded the loader will clean
+ out the least recently used template. If the cache size is set to
+ ``0`` templates are recompiled all the time, if the cache size is
+ ``-1`` the cache will not be cleaned.
+
+ .. versionchanged:: 2.8
+ The cache size was increased to 400 from a low 50.
+
+ `auto_reload`
+ Some loaders load templates from locations where the template
+ sources may change (ie: file system or database). If
+ ``auto_reload`` is set to ``True`` (default) every time a template is
+ requested the loader checks if the source changed and if yes, it
+ will reload the template. For higher performance it's possible to
+ disable that.
+
+ `bytecode_cache`
+ If set to a bytecode cache object, this object will provide a
+ cache for the internal Jinja bytecode so that templates don't
+ have to be parsed if they were not changed.
+
+ See :ref:`bytecode-cache` for more information.
+
+ `enable_async`
+ If set to true this enables async template execution which allows
+ you to take advantage of newer Python features. This requires
+ Python 3.6 or later.
+ """
+
+ #: if this environment is sandboxed. Modifying this variable won't make
+ #: the environment sandboxed though. For a real sandboxed environment
+ #: have a look at jinja2.sandbox. This flag alone controls the code
+ #: generation by the compiler.
+ sandboxed = False
+
+ #: True if the environment is just an overlay
+ overlayed = False
+
+ #: the environment this environment is linked to if it is an overlay
+ linked_to = None
+
+ #: shared environments have this set to `True`. A shared environment
+ #: must not be modified
+ shared = False
+
+ #: the class that is used for code generation. See
+ #: :class:`~jinja2.compiler.CodeGenerator` for more information.
+ code_generator_class = CodeGenerator
+
+ #: the context class thatis used for templates. See
+ #: :class:`~jinja2.runtime.Context` for more information.
+ context_class = Context
+
+ def __init__(
+ self,
+ block_start_string=BLOCK_START_STRING,
+ block_end_string=BLOCK_END_STRING,
+ variable_start_string=VARIABLE_START_STRING,
+ variable_end_string=VARIABLE_END_STRING,
+ comment_start_string=COMMENT_START_STRING,
+ comment_end_string=COMMENT_END_STRING,
+ line_statement_prefix=LINE_STATEMENT_PREFIX,
+ line_comment_prefix=LINE_COMMENT_PREFIX,
+ trim_blocks=TRIM_BLOCKS,
+ lstrip_blocks=LSTRIP_BLOCKS,
+ newline_sequence=NEWLINE_SEQUENCE,
+ keep_trailing_newline=KEEP_TRAILING_NEWLINE,
+ extensions=(),
+ optimized=True,
+ undefined=Undefined,
+ finalize=None,
+ autoescape=False,
+ loader=None,
+ cache_size=400,
+ auto_reload=True,
+ bytecode_cache=None,
+ enable_async=False,
+ ):
+ # !!Important notice!!
+ # The constructor accepts quite a few arguments that should be
+ # passed by keyword rather than position. However it's important to
+ # not change the order of arguments because it's used at least
+ # internally in those cases:
+ # - spontaneous environments (i18n extension and Template)
+ # - unittests
+ # If parameter changes are required only add parameters at the end
+ # and don't change the arguments (or the defaults!) of the arguments
+ # existing already.
+
+ # lexer / parser information
+ self.block_start_string = block_start_string
+ self.block_end_string = block_end_string
+ self.variable_start_string = variable_start_string
+ self.variable_end_string = variable_end_string
+ self.comment_start_string = comment_start_string
+ self.comment_end_string = comment_end_string
+ self.line_statement_prefix = line_statement_prefix
+ self.line_comment_prefix = line_comment_prefix
+ self.trim_blocks = trim_blocks
+ self.lstrip_blocks = lstrip_blocks
+ self.newline_sequence = newline_sequence
+ self.keep_trailing_newline = keep_trailing_newline
+
+ # runtime information
+ self.undefined = undefined
+ self.optimized = optimized
+ self.finalize = finalize
+ self.autoescape = autoescape
+
+ # defaults
+ self.filters = DEFAULT_FILTERS.copy()
+ self.tests = DEFAULT_TESTS.copy()
+ self.globals = DEFAULT_NAMESPACE.copy()
+
+ # set the loader provided
+ self.loader = loader
+ self.cache = create_cache(cache_size)
+ self.bytecode_cache = bytecode_cache
+ self.auto_reload = auto_reload
+
+ # configurable policies
+ self.policies = DEFAULT_POLICIES.copy()
+
+ # load extensions
+ self.extensions = load_extensions(self, extensions)
+
+ self.enable_async = enable_async
+ self.is_async = self.enable_async and have_async_gen
+ if self.is_async:
+ # runs patch_all() to enable async support
+ from . import asyncsupport # noqa: F401
+
+ _environment_sanity_check(self)
+
+ def add_extension(self, extension):
+ """Adds an extension after the environment was created.
+
+ .. versionadded:: 2.5
+ """
+ self.extensions.update(load_extensions(self, [extension]))
+
+ def extend(self, **attributes):
+ """Add the items to the instance of the environment if they do not exist
+ yet. This is used by :ref:`extensions <writing-extensions>` to register
+ callbacks and configuration values without breaking inheritance.
+ """
+ for key, value in iteritems(attributes):
+ if not hasattr(self, key):
+ setattr(self, key, value)
+
+ def overlay(
+ self,
+ block_start_string=missing,
+ block_end_string=missing,
+ variable_start_string=missing,
+ variable_end_string=missing,
+ comment_start_string=missing,
+ comment_end_string=missing,
+ line_statement_prefix=missing,
+ line_comment_prefix=missing,
+ trim_blocks=missing,
+ lstrip_blocks=missing,
+ extensions=missing,
+ optimized=missing,
+ undefined=missing,
+ finalize=missing,
+ autoescape=missing,
+ loader=missing,
+ cache_size=missing,
+ auto_reload=missing,
+ bytecode_cache=missing,
+ ):
+ """Create a new overlay environment that shares all the data with the
+ current environment except for cache and the overridden attributes.
+ Extensions cannot be removed for an overlayed environment. An overlayed
+ environment automatically gets all the extensions of the environment it
+ is linked to plus optional extra extensions.
+
+ Creating overlays should happen after the initial environment was set
+ up completely. Not all attributes are truly linked, some are just
+ copied over so modifications on the original environment may not shine
+ through.
+ """
+ args = dict(locals())
+ del args["self"], args["cache_size"], args["extensions"]
+
+ rv = object.__new__(self.__class__)
+ rv.__dict__.update(self.__dict__)
+ rv.overlayed = True
+ rv.linked_to = self
+
+ for key, value in iteritems(args):
+ if value is not missing:
+ setattr(rv, key, value)
+
+ if cache_size is not missing:
+ rv.cache = create_cache(cache_size)
+ else:
+ rv.cache = copy_cache(self.cache)
+
+ rv.extensions = {}
+ for key, value in iteritems(self.extensions):
+ rv.extensions[key] = value.bind(rv)
+ if extensions is not missing:
+ rv.extensions.update(load_extensions(rv, extensions))
+
+ return _environment_sanity_check(rv)
+
+ lexer = property(get_lexer, doc="The lexer for this environment.")
+
+ def iter_extensions(self):
+ """Iterates over the extensions by priority."""
+ return iter(sorted(self.extensions.values(), key=lambda x: x.priority))
+
+ def getitem(self, obj, argument):
+ """Get an item or attribute of an object but prefer the item."""
+ try:
+ return obj[argument]
+ except (AttributeError, TypeError, LookupError):
+ if isinstance(argument, string_types):
+ try:
+ attr = str(argument)
+ except Exception:
+ pass
+ else:
+ try:
+ return getattr(obj, attr)
+ except AttributeError:
+ pass
+ return self.undefined(obj=obj, name=argument)
+
+ def getattr(self, obj, attribute):
+ """Get an item or attribute of an object but prefer the attribute.
+ Unlike :meth:`getitem` the attribute *must* be a bytestring.
+ """
+ try:
+ return getattr(obj, attribute)
+ except AttributeError:
+ pass
+ try:
+ return obj[attribute]
+ except (TypeError, LookupError, AttributeError):
+ return self.undefined(obj=obj, name=attribute)
+
+ def call_filter(
+ self, name, value, args=None, kwargs=None, context=None, eval_ctx=None
+ ):
+ """Invokes a filter on a value the same way the compiler does it.
+
+ Note that on Python 3 this might return a coroutine in case the
+ filter is running from an environment in async mode and the filter
+ supports async execution. It's your responsibility to await this
+ if needed.
+
+ .. versionadded:: 2.7
+ """
+ func = self.filters.get(name)
+ if func is None:
+ fail_for_missing_callable("no filter named %r", name)
+ args = [value] + list(args or ())
+ if getattr(func, "contextfilter", False) is True:
+ if context is None:
+ raise TemplateRuntimeError(
+ "Attempted to invoke context filter without context"
+ )
+ args.insert(0, context)
+ elif getattr(func, "evalcontextfilter", False) is True:
+ if eval_ctx is None:
+ if context is not None:
+ eval_ctx = context.eval_ctx
+ else:
+ eval_ctx = EvalContext(self)
+ args.insert(0, eval_ctx)
+ elif getattr(func, "environmentfilter", False) is True:
+ args.insert(0, self)
+ return func(*args, **(kwargs or {}))
+
+ def call_test(self, name, value, args=None, kwargs=None):
+ """Invokes a test on a value the same way the compiler does it.
+
+ .. versionadded:: 2.7
+ """
+ func = self.tests.get(name)
+ if func is None:
+ fail_for_missing_callable("no test named %r", name)
+ return func(value, *(args or ()), **(kwargs or {}))
+
+ @internalcode
+ def parse(self, source, name=None, filename=None):
+ """Parse the sourcecode and return the abstract syntax tree. This
+ tree of nodes is used by the compiler to convert the template into
+ executable source- or bytecode. This is useful for debugging or to
+ extract information from templates.
+
+ If you are :ref:`developing Jinja extensions <writing-extensions>`
+ this gives you a good overview of the node tree generated.
+ """
+ try:
+ return self._parse(source, name, filename)
+ except TemplateSyntaxError:
+ self.handle_exception(source=source)
+
+ def _parse(self, source, name, filename):
+ """Internal parsing function used by `parse` and `compile`."""
+ return Parser(self, source, name, encode_filename(filename)).parse()
+
+ def lex(self, source, name=None, filename=None):
+ """Lex the given sourcecode and return a generator that yields
+ tokens as tuples in the form ``(lineno, token_type, value)``.
+ This can be useful for :ref:`extension development <writing-extensions>`
+ and debugging templates.
+
+ This does not perform preprocessing. If you want the preprocessing
+ of the extensions to be applied you have to filter source through
+ the :meth:`preprocess` method.
+ """
+ source = text_type(source)
+ try:
+ return self.lexer.tokeniter(source, name, filename)
+ except TemplateSyntaxError:
+ self.handle_exception(source=source)
+
+ def preprocess(self, source, name=None, filename=None):
+ """Preprocesses the source with all extensions. This is automatically
+ called for all parsing and compiling methods but *not* for :meth:`lex`
+ because there you usually only want the actual source tokenized.
+ """
+ return reduce(
+ lambda s, e: e.preprocess(s, name, filename),
+ self.iter_extensions(),
+ text_type(source),
+ )
+
+ def _tokenize(self, source, name, filename=None, state=None):
+ """Called by the parser to do the preprocessing and filtering
+ for all the extensions. Returns a :class:`~jinja2.lexer.TokenStream`.
+ """
+ source = self.preprocess(source, name, filename)
+ stream = self.lexer.tokenize(source, name, filename, state)
+ for ext in self.iter_extensions():
+ stream = ext.filter_stream(stream)
+ if not isinstance(stream, TokenStream):
+ stream = TokenStream(stream, name, filename)
+ return stream
+
+ def _generate(self, source, name, filename, defer_init=False):
+ """Internal hook that can be overridden to hook a different generate
+ method in.
+
+ .. versionadded:: 2.5
+ """
+ return generate(
+ source,
+ self,
+ name,
+ filename,
+ defer_init=defer_init,
+ optimized=self.optimized,
+ )
+
+ def _compile(self, source, filename):
+ """Internal hook that can be overridden to hook a different compile
+ method in.
+
+ .. versionadded:: 2.5
+ """
+ return compile(source, filename, "exec")
+
+ @internalcode
+ def compile(self, source, name=None, filename=None, raw=False, defer_init=False):
+ """Compile a node or template source code. The `name` parameter is
+ the load name of the template after it was joined using
+ :meth:`join_path` if necessary, not the filename on the file system.
+ the `filename` parameter is the estimated filename of the template on
+ the file system. If the template came from a database or memory this
+ can be omitted.
+
+ The return value of this method is a python code object. If the `raw`
+ parameter is `True` the return value will be a string with python
+ code equivalent to the bytecode returned otherwise. This method is
+ mainly used internally.
+
+ `defer_init` is use internally to aid the module code generator. This
+ causes the generated code to be able to import without the global
+ environment variable to be set.
+
+ .. versionadded:: 2.4
+ `defer_init` parameter added.
+ """
+ source_hint = None
+ try:
+ if isinstance(source, string_types):
+ source_hint = source
+ source = self._parse(source, name, filename)
+ source = self._generate(source, name, filename, defer_init=defer_init)
+ if raw:
+ return source
+ if filename is None:
+ filename = "<template>"
+ else:
+ filename = encode_filename(filename)
+ return self._compile(source, filename)
+ except TemplateSyntaxError:
+ self.handle_exception(source=source_hint)
+
+ def compile_expression(self, source, undefined_to_none=True):
+ """A handy helper method that returns a callable that accepts keyword
+ arguments that appear as variables in the expression. If called it
+ returns the result of the expression.
+
+ This is useful if applications want to use the same rules as Jinja
+ in template "configuration files" or similar situations.
+
+ Example usage:
+
+ >>> env = Environment()
+ >>> expr = env.compile_expression('foo == 42')
+ >>> expr(foo=23)
+ False
+ >>> expr(foo=42)
+ True
+
+ Per default the return value is converted to `None` if the
+ expression returns an undefined value. This can be changed
+ by setting `undefined_to_none` to `False`.
+
+ >>> env.compile_expression('var')() is None
+ True
+ >>> env.compile_expression('var', undefined_to_none=False)()
+ Undefined
+
+ .. versionadded:: 2.1
+ """
+ parser = Parser(self, source, state="variable")
+ try:
+ expr = parser.parse_expression()
+ if not parser.stream.eos:
+ raise TemplateSyntaxError(
+ "chunk after expression", parser.stream.current.lineno, None, None
+ )
+ expr.set_environment(self)
+ except TemplateSyntaxError:
+ if sys.exc_info() is not None:
+ self.handle_exception(source=source)
+
+ body = [nodes.Assign(nodes.Name("result", "store"), expr, lineno=1)]
+ template = self.from_string(nodes.Template(body, lineno=1))
+ return TemplateExpression(template, undefined_to_none)
+
+ def compile_templates(
+ self,
+ target,
+ extensions=None,
+ filter_func=None,
+ zip="deflated",
+ log_function=None,
+ ignore_errors=True,
+ py_compile=False,
+ ):
+ """Finds all the templates the loader can find, compiles them
+ and stores them in `target`. If `zip` is `None`, instead of in a
+ zipfile, the templates will be stored in a directory.
+ By default a deflate zip algorithm is used. To switch to
+ the stored algorithm, `zip` can be set to ``'stored'``.
+
+ `extensions` and `filter_func` are passed to :meth:`list_templates`.
+ Each template returned will be compiled to the target folder or
+ zipfile.
+
+ By default template compilation errors are ignored. In case a
+ log function is provided, errors are logged. If you want template
+ syntax errors to abort the compilation you can set `ignore_errors`
+ to `False` and you will get an exception on syntax errors.
+
+ If `py_compile` is set to `True` .pyc files will be written to the
+ target instead of standard .py files. This flag does not do anything
+ on pypy and Python 3 where pyc files are not picked up by itself and
+ don't give much benefit.
+
+ .. versionadded:: 2.4
+ """
+ from .loaders import ModuleLoader
+
+ if log_function is None:
+
+ def log_function(x):
+ pass
+
+ if py_compile:
+ if not PY2 or PYPY:
+ import warnings
+
+ warnings.warn(
+ "'py_compile=True' has no effect on PyPy or Python"
+ " 3 and will be removed in version 3.0",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ py_compile = False
+ else:
+ import imp
+ import marshal
+
+ py_header = imp.get_magic() + u"\xff\xff\xff\xff".encode("iso-8859-15")
+
+ # Python 3.3 added a source filesize to the header
+ if sys.version_info >= (3, 3):
+ py_header += u"\x00\x00\x00\x00".encode("iso-8859-15")
+
+ def write_file(filename, data):
+ if zip:
+ info = ZipInfo(filename)
+ info.external_attr = 0o755 << 16
+ zip_file.writestr(info, data)
+ else:
+ if isinstance(data, text_type):
+ data = data.encode("utf8")
+
+ with open(os.path.join(target, filename), "wb") as f:
+ f.write(data)
+
+ if zip is not None:
+ from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED, ZIP_STORED
+
+ zip_file = ZipFile(
+ target, "w", dict(deflated=ZIP_DEFLATED, stored=ZIP_STORED)[zip]
+ )
+ log_function('Compiling into Zip archive "%s"' % target)
+ else:
+ if not os.path.isdir(target):
+ os.makedirs(target)
+ log_function('Compiling into folder "%s"' % target)
+
+ try:
+ for name in self.list_templates(extensions, filter_func):
+ source, filename, _ = self.loader.get_source(self, name)
+ try:
+ code = self.compile(source, name, filename, True, True)
+ except TemplateSyntaxError as e:
+ if not ignore_errors:
+ raise
+ log_function('Could not compile "%s": %s' % (name, e))
+ continue
+
+ filename = ModuleLoader.get_module_filename(name)
+
+ if py_compile:
+ c = self._compile(code, encode_filename(filename))
+ write_file(filename + "c", py_header + marshal.dumps(c))
+ log_function('Byte-compiled "%s" as %s' % (name, filename + "c"))
+ else:
+ write_file(filename, code)
+ log_function('Compiled "%s" as %s' % (name, filename))
+ finally:
+ if zip:
+ zip_file.close()
+
+ log_function("Finished compiling templates")
+
+ def list_templates(self, extensions=None, filter_func=None):
+ """Returns a list of templates for this environment. This requires
+ that the loader supports the loader's
+ :meth:`~BaseLoader.list_templates` method.
+
+ If there are other files in the template folder besides the
+ actual templates, the returned list can be filtered. There are two
+ ways: either `extensions` is set to a list of file extensions for
+ templates, or a `filter_func` can be provided which is a callable that
+ is passed a template name and should return `True` if it should end up
+ in the result list.
+
+ If the loader does not support that, a :exc:`TypeError` is raised.
+
+ .. versionadded:: 2.4
+ """
+ names = self.loader.list_templates()
+
+ if extensions is not None:
+ if filter_func is not None:
+ raise TypeError(
+ "either extensions or filter_func can be passed, but not both"
+ )
+
+ def filter_func(x):
+ return "." in x and x.rsplit(".", 1)[1] in extensions
+
+ if filter_func is not None:
+ names = [name for name in names if filter_func(name)]
+
+ return names
+
+ def handle_exception(self, source=None):
+ """Exception handling helper. This is used internally to either raise
+ rewritten exceptions or return a rendered traceback for the template.
+ """
+ from .debug import rewrite_traceback_stack
+
+ reraise(*rewrite_traceback_stack(source=source))
+
+ def join_path(self, template, parent):
+ """Join a template with the parent. By default all the lookups are
+ relative to the loader root so this method returns the `template`
+ parameter unchanged, but if the paths should be relative to the
+ parent template, this function can be used to calculate the real
+ template name.
+
+ Subclasses may override this method and implement template path
+ joining here.
+ """
+ return template
+
+ @internalcode
+ def _load_template(self, name, globals):
+ if self.loader is None:
+ raise TypeError("no loader for this environment specified")
+ cache_key = (weakref.ref(self.loader), name)
+ if self.cache is not None:
+ template = self.cache.get(cache_key)
+ if template is not None and (
+ not self.auto_reload or template.is_up_to_date
+ ):
+ return template
+ template = self.loader.load(self, name, globals)
+ if self.cache is not None:
+ self.cache[cache_key] = template
+ return template
+
+ @internalcode
+ def get_template(self, name, parent=None, globals=None):
+ """Load a template from the loader. If a loader is configured this
+ method asks the loader for the template and returns a :class:`Template`.
+ If the `parent` parameter is not `None`, :meth:`join_path` is called
+ to get the real template name before loading.
+
+ The `globals` parameter can be used to provide template wide globals.
+ These variables are available in the context at render time.
+
+ If the template does not exist a :exc:`TemplateNotFound` exception is
+ raised.
+
+ .. versionchanged:: 2.4
+ If `name` is a :class:`Template` object it is returned from the
+ function unchanged.
+ """
+ if isinstance(name, Template):
+ return name
+ if parent is not None:
+ name = self.join_path(name, parent)
+ return self._load_template(name, self.make_globals(globals))
+
+ @internalcode
+ def select_template(self, names, parent=None, globals=None):
+ """Works like :meth:`get_template` but tries a number of templates
+ before it fails. If it cannot find any of the templates, it will
+ raise a :exc:`TemplatesNotFound` exception.
+
+ .. versionchanged:: 2.11
+ If names is :class:`Undefined`, an :exc:`UndefinedError` is
+ raised instead. If no templates were found and names
+ contains :class:`Undefined`, the message is more helpful.
+
+ .. versionchanged:: 2.4
+ If `names` contains a :class:`Template` object it is returned
+ from the function unchanged.
+
+ .. versionadded:: 2.3
+ """
+ if isinstance(names, Undefined):
+ names._fail_with_undefined_error()
+
+ if not names:
+ raise TemplatesNotFound(
+ message=u"Tried to select from an empty list " u"of templates."
+ )
+ globals = self.make_globals(globals)
+ for name in names:
+ if isinstance(name, Template):
+ return name
+ if parent is not None:
+ name = self.join_path(name, parent)
+ try:
+ return self._load_template(name, globals)
+ except (TemplateNotFound, UndefinedError):
+ pass
+ raise TemplatesNotFound(names)
+
+ @internalcode
+ def get_or_select_template(self, template_name_or_list, parent=None, globals=None):
+ """Does a typecheck and dispatches to :meth:`select_template`
+ if an iterable of template names is given, otherwise to
+ :meth:`get_template`.
+
+ .. versionadded:: 2.3
+ """
+ if isinstance(template_name_or_list, (string_types, Undefined)):
+ return self.get_template(template_name_or_list, parent, globals)
+ elif isinstance(template_name_or_list, Template):
+ return template_name_or_list
+ return self.select_template(template_name_or_list, parent, globals)
+
+ def from_string(self, source, globals=None, template_class=None):
+ """Load a template from a string. This parses the source given and
+ returns a :class:`Template` object.
+ """
+ globals = self.make_globals(globals)
+ cls = template_class or self.template_class
+ return cls.from_code(self, self.compile(source), globals, None)
+
+ def make_globals(self, d):
+ """Return a dict for the globals."""
+ if not d:
+ return self.globals
+ return dict(self.globals, **d)
+
+
+class Template(object):
+ """The central template object. This class represents a compiled template
+ and is used to evaluate it.
+
+ Normally the template object is generated from an :class:`Environment` but
+ it also has a constructor that makes it possible to create a template
+ instance directly using the constructor. It takes the same arguments as
+ the environment constructor but it's not possible to specify a loader.
+
+ Every template object has a few methods and members that are guaranteed
+ to exist. However it's important that a template object should be
+ considered immutable. Modifications on the object are not supported.
+
+ Template objects created from the constructor rather than an environment
+ do have an `environment` attribute that points to a temporary environment
+ that is probably shared with other templates created with the constructor
+ and compatible settings.
+
+ >>> template = Template('Hello {{ name }}!')
+ >>> template.render(name='John Doe') == u'Hello John Doe!'
+ True
+ >>> stream = template.stream(name='John Doe')
+ >>> next(stream) == u'Hello John Doe!'
+ True
+ >>> next(stream)
+ Traceback (most recent call last):
+ ...
+ StopIteration
+ """
+
+ #: Type of environment to create when creating a template directly
+ #: rather than through an existing environment.
+ environment_class = Environment
+
+ def __new__(
+ cls,
+ source,
+ block_start_string=BLOCK_START_STRING,
+ block_end_string=BLOCK_END_STRING,
+ variable_start_string=VARIABLE_START_STRING,
+ variable_end_string=VARIABLE_END_STRING,
+ comment_start_string=COMMENT_START_STRING,
+ comment_end_string=COMMENT_END_STRING,
+ line_statement_prefix=LINE_STATEMENT_PREFIX,
+ line_comment_prefix=LINE_COMMENT_PREFIX,
+ trim_blocks=TRIM_BLOCKS,
+ lstrip_blocks=LSTRIP_BLOCKS,
+ newline_sequence=NEWLINE_SEQUENCE,
+ keep_trailing_newline=KEEP_TRAILING_NEWLINE,
+ extensions=(),
+ optimized=True,
+ undefined=Undefined,
+ finalize=None,
+ autoescape=False,
+ enable_async=False,
+ ):
+ env = get_spontaneous_environment(
+ cls.environment_class,
+ block_start_string,
+ block_end_string,
+ variable_start_string,
+ variable_end_string,
+ comment_start_string,
+ comment_end_string,
+ line_statement_prefix,
+ line_comment_prefix,
+ trim_blocks,
+ lstrip_blocks,
+ newline_sequence,
+ keep_trailing_newline,
+ frozenset(extensions),
+ optimized,
+ undefined,
+ finalize,
+ autoescape,
+ None,
+ 0,
+ False,
+ None,
+ enable_async,
+ )
+ return env.from_string(source, template_class=cls)
+
+ @classmethod
+ def from_code(cls, environment, code, globals, uptodate=None):
+ """Creates a template object from compiled code and the globals. This
+ is used by the loaders and environment to create a template object.
+ """
+ namespace = {"environment": environment, "__file__": code.co_filename}
+ exec(code, namespace)
+ rv = cls._from_namespace(environment, namespace, globals)
+ rv._uptodate = uptodate
+ return rv
+
+ @classmethod
+ def from_module_dict(cls, environment, module_dict, globals):
+ """Creates a template object from a module. This is used by the
+ module loader to create a template object.
+
+ .. versionadded:: 2.4
+ """
+ return cls._from_namespace(environment, module_dict, globals)
+
+ @classmethod
+ def _from_namespace(cls, environment, namespace, globals):
+ t = object.__new__(cls)
+ t.environment = environment
+ t.globals = globals
+ t.name = namespace["name"]
+ t.filename = namespace["__file__"]
+ t.blocks = namespace["blocks"]
+
+ # render function and module
+ t.root_render_func = namespace["root"]
+ t._module = None
+
+ # debug and loader helpers
+ t._debug_info = namespace["debug_info"]
+ t._uptodate = None
+
+ # store the reference
+ namespace["environment"] = environment
+ namespace["__jinja_template__"] = t
+
+ return t
+
+ def render(self, *args, **kwargs):
+ """This method accepts the same arguments as the `dict` constructor:
+ A dict, a dict subclass or some keyword arguments. If no arguments
+ are given the context will be empty. These two calls do the same::
+
+ template.render(knights='that say nih')
+ template.render({'knights': 'that say nih'})
+
+ This will return the rendered template as unicode string.
+ """
+ vars = dict(*args, **kwargs)
+ try:
+ return concat(self.root_render_func(self.new_context(vars)))
+ except Exception:
+ self.environment.handle_exception()
+
+ def render_async(self, *args, **kwargs):
+ """This works similar to :meth:`render` but returns a coroutine
+ that when awaited returns the entire rendered template string. This
+ requires the async feature to be enabled.
+
+ Example usage::
+
+ await template.render_async(knights='that say nih; asynchronously')
+ """
+ # see asyncsupport for the actual implementation
+ raise NotImplementedError(
+ "This feature is not available for this version of Python"
+ )
+
+ def stream(self, *args, **kwargs):
+ """Works exactly like :meth:`generate` but returns a
+ :class:`TemplateStream`.
+ """
+ return TemplateStream(self.generate(*args, **kwargs))
+
+ def generate(self, *args, **kwargs):
+ """For very large templates it can be useful to not render the whole
+ template at once but evaluate each statement after another and yield
+ piece for piece. This method basically does exactly that and returns
+ a generator that yields one item after another as unicode strings.
+
+ It accepts the same arguments as :meth:`render`.
+ """
+ vars = dict(*args, **kwargs)
+ try:
+ for event in self.root_render_func(self.new_context(vars)):
+ yield event
+ except Exception:
+ yield self.environment.handle_exception()
+
+ def generate_async(self, *args, **kwargs):
+ """An async version of :meth:`generate`. Works very similarly but
+ returns an async iterator instead.
+ """
+ # see asyncsupport for the actual implementation
+ raise NotImplementedError(
+ "This feature is not available for this version of Python"
+ )
+
+ def new_context(self, vars=None, shared=False, locals=None):
+ """Create a new :class:`Context` for this template. The vars
+ provided will be passed to the template. Per default the globals
+ are added to the context. If shared is set to `True` the data
+ is passed as is to the context without adding the globals.
+
+ `locals` can be a dict of local variables for internal usage.
+ """
+ return new_context(
+ self.environment, self.name, self.blocks, vars, shared, self.globals, locals
+ )
+
+ def make_module(self, vars=None, shared=False, locals=None):
+ """This method works like the :attr:`module` attribute when called
+ without arguments but it will evaluate the template on every call
+ rather than caching it. It's also possible to provide
+ a dict which is then used as context. The arguments are the same
+ as for the :meth:`new_context` method.
+ """
+ return TemplateModule(self, self.new_context(vars, shared, locals))
+
+ def make_module_async(self, vars=None, shared=False, locals=None):
+ """As template module creation can invoke template code for
+ asynchronous executions this method must be used instead of the
+ normal :meth:`make_module` one. Likewise the module attribute
+ becomes unavailable in async mode.
+ """
+ # see asyncsupport for the actual implementation
+ raise NotImplementedError(
+ "This feature is not available for this version of Python"
+ )
+
+ @internalcode
+ def _get_default_module(self):
+ if self._module is not None:
+ return self._module
+ self._module = rv = self.make_module()
+ return rv
+
+ @property
+ def module(self):
+ """The template as module. This is used for imports in the
+ template runtime but is also useful if one wants to access
+ exported template variables from the Python layer:
+
+ >>> t = Template('{% macro foo() %}42{% endmacro %}23')
+ >>> str(t.module)
+ '23'
+ >>> t.module.foo() == u'42'
+ True
+
+ This attribute is not available if async mode is enabled.
+ """
+ return self._get_default_module()
+
+ def get_corresponding_lineno(self, lineno):
+ """Return the source line number of a line number in the
+ generated bytecode as they are not in sync.
+ """
+ for template_line, code_line in reversed(self.debug_info):
+ if code_line <= lineno:
+ return template_line
+ return 1
+
+ @property
+ def is_up_to_date(self):
+ """If this variable is `False` there is a newer version available."""
+ if self._uptodate is None:
+ return True
+ return self._uptodate()
+
+ @property
+ def debug_info(self):
+ """The debug info mapping."""
+ if self._debug_info:
+ return [tuple(map(int, x.split("="))) for x in self._debug_info.split("&")]
+ return []
+
+ def __repr__(self):
+ if self.name is None:
+ name = "memory:%x" % id(self)
+ else:
+ name = repr(self.name)
+ return "<%s %s>" % (self.__class__.__name__, name)
+
+
+@implements_to_string
+class TemplateModule(object):
+ """Represents an imported template. All the exported names of the
+ template are available as attributes on this object. Additionally
+ converting it into an unicode- or bytestrings renders the contents.
+ """
+
+ def __init__(self, template, context, body_stream=None):
+ if body_stream is None:
+ if context.environment.is_async:
+ raise RuntimeError(
+ "Async mode requires a body stream "
+ "to be passed to a template module. Use "
+ "the async methods of the API you are "
+ "using."
+ )
+ body_stream = list(template.root_render_func(context))
+ self._body_stream = body_stream
+ self.__dict__.update(context.get_exported())
+ self.__name__ = template.name
+
+ def __html__(self):
+ return Markup(concat(self._body_stream))
+
+ def __str__(self):
+ return concat(self._body_stream)
+
+ def __repr__(self):
+ if self.__name__ is None:
+ name = "memory:%x" % id(self)
+ else:
+ name = repr(self.__name__)
+ return "<%s %s>" % (self.__class__.__name__, name)
+
+
+class TemplateExpression(object):
+ """The :meth:`jinja2.Environment.compile_expression` method returns an
+ instance of this object. It encapsulates the expression-like access
+ to the template with an expression it wraps.
+ """
+
+ def __init__(self, template, undefined_to_none):
+ self._template = template
+ self._undefined_to_none = undefined_to_none
+
+ def __call__(self, *args, **kwargs):
+ context = self._template.new_context(dict(*args, **kwargs))
+ consume(self._template.root_render_func(context))
+ rv = context.vars["result"]
+ if self._undefined_to_none and isinstance(rv, Undefined):
+ rv = None
+ return rv
+
+
+@implements_iterator
+class TemplateStream(object):
+ """A template stream works pretty much like an ordinary python generator
+ but it can buffer multiple items to reduce the number of total iterations.
+ Per default the output is unbuffered which means that for every unbuffered
+ instruction in the template one unicode string is yielded.
+
+ If buffering is enabled with a buffer size of 5, five items are combined
+ into a new unicode string. This is mainly useful if you are streaming
+ big templates to a client via WSGI which flushes after each iteration.
+ """
+
+ def __init__(self, gen):
+ self._gen = gen
+ self.disable_buffering()
+
+ def dump(self, fp, encoding=None, errors="strict"):
+ """Dump the complete stream into a file or file-like object.
+ Per default unicode strings are written, if you want to encode
+ before writing specify an `encoding`.
+
+ Example usage::
+
+ Template('Hello {{ name }}!').stream(name='foo').dump('hello.html')
+ """
+ close = False
+ if isinstance(fp, string_types):
+ if encoding is None:
+ encoding = "utf-8"
+ fp = open(fp, "wb")
+ close = True
+ try:
+ if encoding is not None:
+ iterable = (x.encode(encoding, errors) for x in self)
+ else:
+ iterable = self
+ if hasattr(fp, "writelines"):
+ fp.writelines(iterable)
+ else:
+ for item in iterable:
+ fp.write(item)
+ finally:
+ if close:
+ fp.close()
+
+ def disable_buffering(self):
+ """Disable the output buffering."""
+ self._next = partial(next, self._gen)
+ self.buffered = False
+
+ def _buffered_generator(self, size):
+ buf = []
+ c_size = 0
+ push = buf.append
+
+ while 1:
+ try:
+ while c_size < size:
+ c = next(self._gen)
+ push(c)
+ if c:
+ c_size += 1
+ except StopIteration:
+ if not c_size:
+ return
+ yield concat(buf)
+ del buf[:]
+ c_size = 0
+
+ def enable_buffering(self, size=5):
+ """Enable buffering. Buffer `size` items before yielding them."""
+ if size <= 1:
+ raise ValueError("buffer size too small")
+
+ self.buffered = True
+ self._next = partial(next, self._buffered_generator(size))
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return self._next()
+
+
+# hook in default template class. if anyone reads this comment: ignore that
+# it's possible to use custom templates ;-)
+Environment.template_class = Template
diff --git a/third_party/python/Jinja2/jinja2/exceptions.py b/third_party/python/Jinja2/jinja2/exceptions.py
new file mode 100644
index 0000000000..0bf2003e30
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/exceptions.py
@@ -0,0 +1,177 @@
+# -*- coding: utf-8 -*-
+from ._compat import imap
+from ._compat import implements_to_string
+from ._compat import PY2
+from ._compat import text_type
+
+
+class TemplateError(Exception):
+ """Baseclass for all template errors."""
+
+ if PY2:
+
+ def __init__(self, message=None):
+ if message is not None:
+ message = text_type(message).encode("utf-8")
+ Exception.__init__(self, message)
+
+ @property
+ def message(self):
+ if self.args:
+ message = self.args[0]
+ if message is not None:
+ return message.decode("utf-8", "replace")
+
+ def __unicode__(self):
+ return self.message or u""
+
+ else:
+
+ def __init__(self, message=None):
+ Exception.__init__(self, message)
+
+ @property
+ def message(self):
+ if self.args:
+ message = self.args[0]
+ if message is not None:
+ return message
+
+
+@implements_to_string
+class TemplateNotFound(IOError, LookupError, TemplateError):
+ """Raised if a template does not exist.
+
+ .. versionchanged:: 2.11
+ If the given name is :class:`Undefined` and no message was
+ provided, an :exc:`UndefinedError` is raised.
+ """
+
+ # looks weird, but removes the warning descriptor that just
+ # bogusly warns us about message being deprecated
+ message = None
+
+ def __init__(self, name, message=None):
+ IOError.__init__(self, name)
+
+ if message is None:
+ from .runtime import Undefined
+
+ if isinstance(name, Undefined):
+ name._fail_with_undefined_error()
+
+ message = name
+
+ self.message = message
+ self.name = name
+ self.templates = [name]
+
+ def __str__(self):
+ return self.message
+
+
+class TemplatesNotFound(TemplateNotFound):
+ """Like :class:`TemplateNotFound` but raised if multiple templates
+ are selected. This is a subclass of :class:`TemplateNotFound`
+ exception, so just catching the base exception will catch both.
+
+ .. versionchanged:: 2.11
+ If a name in the list of names is :class:`Undefined`, a message
+ about it being undefined is shown rather than the empty string.
+
+ .. versionadded:: 2.2
+ """
+
+ def __init__(self, names=(), message=None):
+ if message is None:
+ from .runtime import Undefined
+
+ parts = []
+
+ for name in names:
+ if isinstance(name, Undefined):
+ parts.append(name._undefined_message)
+ else:
+ parts.append(name)
+
+ message = u"none of the templates given were found: " + u", ".join(
+ imap(text_type, parts)
+ )
+ TemplateNotFound.__init__(self, names and names[-1] or None, message)
+ self.templates = list(names)
+
+
+@implements_to_string
+class TemplateSyntaxError(TemplateError):
+ """Raised to tell the user that there is a problem with the template."""
+
+ def __init__(self, message, lineno, name=None, filename=None):
+ TemplateError.__init__(self, message)
+ self.lineno = lineno
+ self.name = name
+ self.filename = filename
+ self.source = None
+
+ # this is set to True if the debug.translate_syntax_error
+ # function translated the syntax error into a new traceback
+ self.translated = False
+
+ def __str__(self):
+ # for translated errors we only return the message
+ if self.translated:
+ return self.message
+
+ # otherwise attach some stuff
+ location = "line %d" % self.lineno
+ name = self.filename or self.name
+ if name:
+ location = 'File "%s", %s' % (name, location)
+ lines = [self.message, " " + location]
+
+ # if the source is set, add the line to the output
+ if self.source is not None:
+ try:
+ line = self.source.splitlines()[self.lineno - 1]
+ except IndexError:
+ line = None
+ if line:
+ lines.append(" " + line.strip())
+
+ return u"\n".join(lines)
+
+ def __reduce__(self):
+ # https://bugs.python.org/issue1692335 Exceptions that take
+ # multiple required arguments have problems with pickling.
+ # Without this, raises TypeError: __init__() missing 1 required
+ # positional argument: 'lineno'
+ return self.__class__, (self.message, self.lineno, self.name, self.filename)
+
+
+class TemplateAssertionError(TemplateSyntaxError):
+ """Like a template syntax error, but covers cases where something in the
+ template caused an error at compile time that wasn't necessarily caused
+ by a syntax error. However it's a direct subclass of
+ :exc:`TemplateSyntaxError` and has the same attributes.
+ """
+
+
+class TemplateRuntimeError(TemplateError):
+ """A generic runtime error in the template engine. Under some situations
+ Jinja may raise this exception.
+ """
+
+
+class UndefinedError(TemplateRuntimeError):
+ """Raised if a template tries to operate on :class:`Undefined`."""
+
+
+class SecurityError(TemplateRuntimeError):
+ """Raised if a template tries to do something insecure if the
+ sandbox is enabled.
+ """
+
+
+class FilterArgumentError(TemplateRuntimeError):
+ """This error is raised if a filter was called with inappropriate
+ arguments
+ """
diff --git a/third_party/python/Jinja2/jinja2/ext.py b/third_party/python/Jinja2/jinja2/ext.py
new file mode 100644
index 0000000000..9141be4dac
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/ext.py
@@ -0,0 +1,704 @@
+# -*- coding: utf-8 -*-
+"""Extension API for adding custom tags and behavior."""
+import pprint
+import re
+from sys import version_info
+
+from markupsafe import Markup
+
+from . import nodes
+from ._compat import iteritems
+from ._compat import string_types
+from ._compat import with_metaclass
+from .defaults import BLOCK_END_STRING
+from .defaults import BLOCK_START_STRING
+from .defaults import COMMENT_END_STRING
+from .defaults import COMMENT_START_STRING
+from .defaults import KEEP_TRAILING_NEWLINE
+from .defaults import LINE_COMMENT_PREFIX
+from .defaults import LINE_STATEMENT_PREFIX
+from .defaults import LSTRIP_BLOCKS
+from .defaults import NEWLINE_SEQUENCE
+from .defaults import TRIM_BLOCKS
+from .defaults import VARIABLE_END_STRING
+from .defaults import VARIABLE_START_STRING
+from .environment import Environment
+from .exceptions import TemplateAssertionError
+from .exceptions import TemplateSyntaxError
+from .nodes import ContextReference
+from .runtime import concat
+from .utils import contextfunction
+from .utils import import_string
+
+# the only real useful gettext functions for a Jinja template. Note
+# that ugettext must be assigned to gettext as Jinja doesn't support
+# non unicode strings.
+GETTEXT_FUNCTIONS = ("_", "gettext", "ngettext")
+
+_ws_re = re.compile(r"\s*\n\s*")
+
+
+class ExtensionRegistry(type):
+ """Gives the extension an unique identifier."""
+
+ def __new__(mcs, name, bases, d):
+ rv = type.__new__(mcs, name, bases, d)
+ rv.identifier = rv.__module__ + "." + rv.__name__
+ return rv
+
+
+class Extension(with_metaclass(ExtensionRegistry, object)):
+ """Extensions can be used to add extra functionality to the Jinja template
+ system at the parser level. Custom extensions are bound to an environment
+ but may not store environment specific data on `self`. The reason for
+ this is that an extension can be bound to another environment (for
+ overlays) by creating a copy and reassigning the `environment` attribute.
+
+ As extensions are created by the environment they cannot accept any
+ arguments for configuration. One may want to work around that by using
+ a factory function, but that is not possible as extensions are identified
+ by their import name. The correct way to configure the extension is
+ storing the configuration values on the environment. Because this way the
+ environment ends up acting as central configuration storage the
+ attributes may clash which is why extensions have to ensure that the names
+ they choose for configuration are not too generic. ``prefix`` for example
+ is a terrible name, ``fragment_cache_prefix`` on the other hand is a good
+ name as includes the name of the extension (fragment cache).
+ """
+
+ #: if this extension parses this is the list of tags it's listening to.
+ tags = set()
+
+ #: the priority of that extension. This is especially useful for
+ #: extensions that preprocess values. A lower value means higher
+ #: priority.
+ #:
+ #: .. versionadded:: 2.4
+ priority = 100
+
+ def __init__(self, environment):
+ self.environment = environment
+
+ def bind(self, environment):
+ """Create a copy of this extension bound to another environment."""
+ rv = object.__new__(self.__class__)
+ rv.__dict__.update(self.__dict__)
+ rv.environment = environment
+ return rv
+
+ def preprocess(self, source, name, filename=None):
+ """This method is called before the actual lexing and can be used to
+ preprocess the source. The `filename` is optional. The return value
+ must be the preprocessed source.
+ """
+ return source
+
+ def filter_stream(self, stream):
+ """It's passed a :class:`~jinja2.lexer.TokenStream` that can be used
+ to filter tokens returned. This method has to return an iterable of
+ :class:`~jinja2.lexer.Token`\\s, but it doesn't have to return a
+ :class:`~jinja2.lexer.TokenStream`.
+ """
+ return stream
+
+ def parse(self, parser):
+ """If any of the :attr:`tags` matched this method is called with the
+ parser as first argument. The token the parser stream is pointing at
+ is the name token that matched. This method has to return one or a
+ list of multiple nodes.
+ """
+ raise NotImplementedError()
+
+ def attr(self, name, lineno=None):
+ """Return an attribute node for the current extension. This is useful
+ to pass constants on extensions to generated template code.
+
+ ::
+
+ self.attr('_my_attribute', lineno=lineno)
+ """
+ return nodes.ExtensionAttribute(self.identifier, name, lineno=lineno)
+
+ def call_method(
+ self, name, args=None, kwargs=None, dyn_args=None, dyn_kwargs=None, lineno=None
+ ):
+ """Call a method of the extension. This is a shortcut for
+ :meth:`attr` + :class:`jinja2.nodes.Call`.
+ """
+ if args is None:
+ args = []
+ if kwargs is None:
+ kwargs = []
+ return nodes.Call(
+ self.attr(name, lineno=lineno),
+ args,
+ kwargs,
+ dyn_args,
+ dyn_kwargs,
+ lineno=lineno,
+ )
+
+
+@contextfunction
+def _gettext_alias(__context, *args, **kwargs):
+ return __context.call(__context.resolve("gettext"), *args, **kwargs)
+
+
+def _make_new_gettext(func):
+ @contextfunction
+ def gettext(__context, __string, **variables):
+ rv = __context.call(func, __string)
+ if __context.eval_ctx.autoescape:
+ rv = Markup(rv)
+ # Always treat as a format string, even if there are no
+ # variables. This makes translation strings more consistent
+ # and predictable. This requires escaping
+ return rv % variables
+
+ return gettext
+
+
+def _make_new_ngettext(func):
+ @contextfunction
+ def ngettext(__context, __singular, __plural, __num, **variables):
+ variables.setdefault("num", __num)
+ rv = __context.call(func, __singular, __plural, __num)
+ if __context.eval_ctx.autoescape:
+ rv = Markup(rv)
+ # Always treat as a format string, see gettext comment above.
+ return rv % variables
+
+ return ngettext
+
+
+class InternationalizationExtension(Extension):
+ """This extension adds gettext support to Jinja."""
+
+ tags = {"trans"}
+
+ # TODO: the i18n extension is currently reevaluating values in a few
+ # situations. Take this example:
+ # {% trans count=something() %}{{ count }} foo{% pluralize
+ # %}{{ count }} fooss{% endtrans %}
+ # something is called twice here. One time for the gettext value and
+ # the other time for the n-parameter of the ngettext function.
+
+ def __init__(self, environment):
+ Extension.__init__(self, environment)
+ environment.globals["_"] = _gettext_alias
+ environment.extend(
+ install_gettext_translations=self._install,
+ install_null_translations=self._install_null,
+ install_gettext_callables=self._install_callables,
+ uninstall_gettext_translations=self._uninstall,
+ extract_translations=self._extract,
+ newstyle_gettext=False,
+ )
+
+ def _install(self, translations, newstyle=None):
+ gettext = getattr(translations, "ugettext", None)
+ if gettext is None:
+ gettext = translations.gettext
+ ngettext = getattr(translations, "ungettext", None)
+ if ngettext is None:
+ ngettext = translations.ngettext
+ self._install_callables(gettext, ngettext, newstyle)
+
+ def _install_null(self, newstyle=None):
+ self._install_callables(
+ lambda x: x, lambda s, p, n: (n != 1 and (p,) or (s,))[0], newstyle
+ )
+
+ def _install_callables(self, gettext, ngettext, newstyle=None):
+ if newstyle is not None:
+ self.environment.newstyle_gettext = newstyle
+ if self.environment.newstyle_gettext:
+ gettext = _make_new_gettext(gettext)
+ ngettext = _make_new_ngettext(ngettext)
+ self.environment.globals.update(gettext=gettext, ngettext=ngettext)
+
+ def _uninstall(self, translations):
+ for key in "gettext", "ngettext":
+ self.environment.globals.pop(key, None)
+
+ def _extract(self, source, gettext_functions=GETTEXT_FUNCTIONS):
+ if isinstance(source, string_types):
+ source = self.environment.parse(source)
+ return extract_from_ast(source, gettext_functions)
+
+ def parse(self, parser):
+ """Parse a translatable tag."""
+ lineno = next(parser.stream).lineno
+ num_called_num = False
+
+ # find all the variables referenced. Additionally a variable can be
+ # defined in the body of the trans block too, but this is checked at
+ # a later state.
+ plural_expr = None
+ plural_expr_assignment = None
+ variables = {}
+ trimmed = None
+ while parser.stream.current.type != "block_end":
+ if variables:
+ parser.stream.expect("comma")
+
+ # skip colon for python compatibility
+ if parser.stream.skip_if("colon"):
+ break
+
+ name = parser.stream.expect("name")
+ if name.value in variables:
+ parser.fail(
+ "translatable variable %r defined twice." % name.value,
+ name.lineno,
+ exc=TemplateAssertionError,
+ )
+
+ # expressions
+ if parser.stream.current.type == "assign":
+ next(parser.stream)
+ variables[name.value] = var = parser.parse_expression()
+ elif trimmed is None and name.value in ("trimmed", "notrimmed"):
+ trimmed = name.value == "trimmed"
+ continue
+ else:
+ variables[name.value] = var = nodes.Name(name.value, "load")
+
+ if plural_expr is None:
+ if isinstance(var, nodes.Call):
+ plural_expr = nodes.Name("_trans", "load")
+ variables[name.value] = plural_expr
+ plural_expr_assignment = nodes.Assign(
+ nodes.Name("_trans", "store"), var
+ )
+ else:
+ plural_expr = var
+ num_called_num = name.value == "num"
+
+ parser.stream.expect("block_end")
+
+ plural = None
+ have_plural = False
+ referenced = set()
+
+ # now parse until endtrans or pluralize
+ singular_names, singular = self._parse_block(parser, True)
+ if singular_names:
+ referenced.update(singular_names)
+ if plural_expr is None:
+ plural_expr = nodes.Name(singular_names[0], "load")
+ num_called_num = singular_names[0] == "num"
+
+ # if we have a pluralize block, we parse that too
+ if parser.stream.current.test("name:pluralize"):
+ have_plural = True
+ next(parser.stream)
+ if parser.stream.current.type != "block_end":
+ name = parser.stream.expect("name")
+ if name.value not in variables:
+ parser.fail(
+ "unknown variable %r for pluralization" % name.value,
+ name.lineno,
+ exc=TemplateAssertionError,
+ )
+ plural_expr = variables[name.value]
+ num_called_num = name.value == "num"
+ parser.stream.expect("block_end")
+ plural_names, plural = self._parse_block(parser, False)
+ next(parser.stream)
+ referenced.update(plural_names)
+ else:
+ next(parser.stream)
+
+ # register free names as simple name expressions
+ for var in referenced:
+ if var not in variables:
+ variables[var] = nodes.Name(var, "load")
+
+ if not have_plural:
+ plural_expr = None
+ elif plural_expr is None:
+ parser.fail("pluralize without variables", lineno)
+
+ if trimmed is None:
+ trimmed = self.environment.policies["ext.i18n.trimmed"]
+ if trimmed:
+ singular = self._trim_whitespace(singular)
+ if plural:
+ plural = self._trim_whitespace(plural)
+
+ node = self._make_node(
+ singular,
+ plural,
+ variables,
+ plural_expr,
+ bool(referenced),
+ num_called_num and have_plural,
+ )
+ node.set_lineno(lineno)
+ if plural_expr_assignment is not None:
+ return [plural_expr_assignment, node]
+ else:
+ return node
+
+ def _trim_whitespace(self, string, _ws_re=_ws_re):
+ return _ws_re.sub(" ", string.strip())
+
+ def _parse_block(self, parser, allow_pluralize):
+ """Parse until the next block tag with a given name."""
+ referenced = []
+ buf = []
+ while 1:
+ if parser.stream.current.type == "data":
+ buf.append(parser.stream.current.value.replace("%", "%%"))
+ next(parser.stream)
+ elif parser.stream.current.type == "variable_begin":
+ next(parser.stream)
+ name = parser.stream.expect("name").value
+ referenced.append(name)
+ buf.append("%%(%s)s" % name)
+ parser.stream.expect("variable_end")
+ elif parser.stream.current.type == "block_begin":
+ next(parser.stream)
+ if parser.stream.current.test("name:endtrans"):
+ break
+ elif parser.stream.current.test("name:pluralize"):
+ if allow_pluralize:
+ break
+ parser.fail(
+ "a translatable section can have only one pluralize section"
+ )
+ parser.fail(
+ "control structures in translatable sections are not allowed"
+ )
+ elif parser.stream.eos:
+ parser.fail("unclosed translation block")
+ else:
+ raise RuntimeError("internal parser error")
+
+ return referenced, concat(buf)
+
+ def _make_node(
+ self, singular, plural, variables, plural_expr, vars_referenced, num_called_num
+ ):
+ """Generates a useful node from the data provided."""
+ # no variables referenced? no need to escape for old style
+ # gettext invocations only if there are vars.
+ if not vars_referenced and not self.environment.newstyle_gettext:
+ singular = singular.replace("%%", "%")
+ if plural:
+ plural = plural.replace("%%", "%")
+
+ # singular only:
+ if plural_expr is None:
+ gettext = nodes.Name("gettext", "load")
+ node = nodes.Call(gettext, [nodes.Const(singular)], [], None, None)
+
+ # singular and plural
+ else:
+ ngettext = nodes.Name("ngettext", "load")
+ node = nodes.Call(
+ ngettext,
+ [nodes.Const(singular), nodes.Const(plural), plural_expr],
+ [],
+ None,
+ None,
+ )
+
+ # in case newstyle gettext is used, the method is powerful
+ # enough to handle the variable expansion and autoescape
+ # handling itself
+ if self.environment.newstyle_gettext:
+ for key, value in iteritems(variables):
+ # the function adds that later anyways in case num was
+ # called num, so just skip it.
+ if num_called_num and key == "num":
+ continue
+ node.kwargs.append(nodes.Keyword(key, value))
+
+ # otherwise do that here
+ else:
+ # mark the return value as safe if we are in an
+ # environment with autoescaping turned on
+ node = nodes.MarkSafeIfAutoescape(node)
+ if variables:
+ node = nodes.Mod(
+ node,
+ nodes.Dict(
+ [
+ nodes.Pair(nodes.Const(key), value)
+ for key, value in variables.items()
+ ]
+ ),
+ )
+ return nodes.Output([node])
+
+
+class ExprStmtExtension(Extension):
+ """Adds a `do` tag to Jinja that works like the print statement just
+ that it doesn't print the return value.
+ """
+
+ tags = set(["do"])
+
+ def parse(self, parser):
+ node = nodes.ExprStmt(lineno=next(parser.stream).lineno)
+ node.node = parser.parse_tuple()
+ return node
+
+
+class LoopControlExtension(Extension):
+ """Adds break and continue to the template engine."""
+
+ tags = set(["break", "continue"])
+
+ def parse(self, parser):
+ token = next(parser.stream)
+ if token.value == "break":
+ return nodes.Break(lineno=token.lineno)
+ return nodes.Continue(lineno=token.lineno)
+
+
+class WithExtension(Extension):
+ pass
+
+
+class AutoEscapeExtension(Extension):
+ pass
+
+
+class DebugExtension(Extension):
+ """A ``{% debug %}`` tag that dumps the available variables,
+ filters, and tests.
+
+ .. code-block:: html+jinja
+
+ <pre>{% debug %}</pre>
+
+ .. code-block:: text
+
+ {'context': {'cycler': <class 'jinja2.utils.Cycler'>,
+ ...,
+ 'namespace': <class 'jinja2.utils.Namespace'>},
+ 'filters': ['abs', 'attr', 'batch', 'capitalize', 'center', 'count', 'd',
+ ..., 'urlencode', 'urlize', 'wordcount', 'wordwrap', 'xmlattr'],
+ 'tests': ['!=', '<', '<=', '==', '>', '>=', 'callable', 'defined',
+ ..., 'odd', 'sameas', 'sequence', 'string', 'undefined', 'upper']}
+
+ .. versionadded:: 2.11.0
+ """
+
+ tags = {"debug"}
+
+ def parse(self, parser):
+ lineno = parser.stream.expect("name:debug").lineno
+ context = ContextReference()
+ result = self.call_method("_render", [context], lineno=lineno)
+ return nodes.Output([result], lineno=lineno)
+
+ def _render(self, context):
+ result = {
+ "context": context.get_all(),
+ "filters": sorted(self.environment.filters.keys()),
+ "tests": sorted(self.environment.tests.keys()),
+ }
+
+ # Set the depth since the intent is to show the top few names.
+ if version_info[:2] >= (3, 4):
+ return pprint.pformat(result, depth=3, compact=True)
+ else:
+ return pprint.pformat(result, depth=3)
+
+
+def extract_from_ast(node, gettext_functions=GETTEXT_FUNCTIONS, babel_style=True):
+ """Extract localizable strings from the given template node. Per
+ default this function returns matches in babel style that means non string
+ parameters as well as keyword arguments are returned as `None`. This
+ allows Babel to figure out what you really meant if you are using
+ gettext functions that allow keyword arguments for placeholder expansion.
+ If you don't want that behavior set the `babel_style` parameter to `False`
+ which causes only strings to be returned and parameters are always stored
+ in tuples. As a consequence invalid gettext calls (calls without a single
+ string parameter or string parameters after non-string parameters) are
+ skipped.
+
+ This example explains the behavior:
+
+ >>> from jinja2 import Environment
+ >>> env = Environment()
+ >>> node = env.parse('{{ (_("foo"), _(), ngettext("foo", "bar", 42)) }}')
+ >>> list(extract_from_ast(node))
+ [(1, '_', 'foo'), (1, '_', ()), (1, 'ngettext', ('foo', 'bar', None))]
+ >>> list(extract_from_ast(node, babel_style=False))
+ [(1, '_', ('foo',)), (1, 'ngettext', ('foo', 'bar'))]
+
+ For every string found this function yields a ``(lineno, function,
+ message)`` tuple, where:
+
+ * ``lineno`` is the number of the line on which the string was found,
+ * ``function`` is the name of the ``gettext`` function used (if the
+ string was extracted from embedded Python code), and
+ * ``message`` is the string itself (a ``unicode`` object, or a tuple
+ of ``unicode`` objects for functions with multiple string arguments).
+
+ This extraction function operates on the AST and is because of that unable
+ to extract any comments. For comment support you have to use the babel
+ extraction interface or extract comments yourself.
+ """
+ for node in node.find_all(nodes.Call):
+ if (
+ not isinstance(node.node, nodes.Name)
+ or node.node.name not in gettext_functions
+ ):
+ continue
+
+ strings = []
+ for arg in node.args:
+ if isinstance(arg, nodes.Const) and isinstance(arg.value, string_types):
+ strings.append(arg.value)
+ else:
+ strings.append(None)
+
+ for _ in node.kwargs:
+ strings.append(None)
+ if node.dyn_args is not None:
+ strings.append(None)
+ if node.dyn_kwargs is not None:
+ strings.append(None)
+
+ if not babel_style:
+ strings = tuple(x for x in strings if x is not None)
+ if not strings:
+ continue
+ else:
+ if len(strings) == 1:
+ strings = strings[0]
+ else:
+ strings = tuple(strings)
+ yield node.lineno, node.node.name, strings
+
+
+class _CommentFinder(object):
+ """Helper class to find comments in a token stream. Can only
+ find comments for gettext calls forwards. Once the comment
+ from line 4 is found, a comment for line 1 will not return a
+ usable value.
+ """
+
+ def __init__(self, tokens, comment_tags):
+ self.tokens = tokens
+ self.comment_tags = comment_tags
+ self.offset = 0
+ self.last_lineno = 0
+
+ def find_backwards(self, offset):
+ try:
+ for _, token_type, token_value in reversed(
+ self.tokens[self.offset : offset]
+ ):
+ if token_type in ("comment", "linecomment"):
+ try:
+ prefix, comment = token_value.split(None, 1)
+ except ValueError:
+ continue
+ if prefix in self.comment_tags:
+ return [comment.rstrip()]
+ return []
+ finally:
+ self.offset = offset
+
+ def find_comments(self, lineno):
+ if not self.comment_tags or self.last_lineno > lineno:
+ return []
+ for idx, (token_lineno, _, _) in enumerate(self.tokens[self.offset :]):
+ if token_lineno > lineno:
+ return self.find_backwards(self.offset + idx)
+ return self.find_backwards(len(self.tokens))
+
+
+def babel_extract(fileobj, keywords, comment_tags, options):
+ """Babel extraction method for Jinja templates.
+
+ .. versionchanged:: 2.3
+ Basic support for translation comments was added. If `comment_tags`
+ is now set to a list of keywords for extraction, the extractor will
+ try to find the best preceding comment that begins with one of the
+ keywords. For best results, make sure to not have more than one
+ gettext call in one line of code and the matching comment in the
+ same line or the line before.
+
+ .. versionchanged:: 2.5.1
+ The `newstyle_gettext` flag can be set to `True` to enable newstyle
+ gettext calls.
+
+ .. versionchanged:: 2.7
+ A `silent` option can now be provided. If set to `False` template
+ syntax errors are propagated instead of being ignored.
+
+ :param fileobj: the file-like object the messages should be extracted from
+ :param keywords: a list of keywords (i.e. function names) that should be
+ recognized as translation functions
+ :param comment_tags: a list of translator tags to search for and include
+ in the results.
+ :param options: a dictionary of additional options (optional)
+ :return: an iterator over ``(lineno, funcname, message, comments)`` tuples.
+ (comments will be empty currently)
+ """
+ extensions = set()
+ for extension in options.get("extensions", "").split(","):
+ extension = extension.strip()
+ if not extension:
+ continue
+ extensions.add(import_string(extension))
+ if InternationalizationExtension not in extensions:
+ extensions.add(InternationalizationExtension)
+
+ def getbool(options, key, default=False):
+ return options.get(key, str(default)).lower() in ("1", "on", "yes", "true")
+
+ silent = getbool(options, "silent", True)
+ environment = Environment(
+ options.get("block_start_string", BLOCK_START_STRING),
+ options.get("block_end_string", BLOCK_END_STRING),
+ options.get("variable_start_string", VARIABLE_START_STRING),
+ options.get("variable_end_string", VARIABLE_END_STRING),
+ options.get("comment_start_string", COMMENT_START_STRING),
+ options.get("comment_end_string", COMMENT_END_STRING),
+ options.get("line_statement_prefix") or LINE_STATEMENT_PREFIX,
+ options.get("line_comment_prefix") or LINE_COMMENT_PREFIX,
+ getbool(options, "trim_blocks", TRIM_BLOCKS),
+ getbool(options, "lstrip_blocks", LSTRIP_BLOCKS),
+ NEWLINE_SEQUENCE,
+ getbool(options, "keep_trailing_newline", KEEP_TRAILING_NEWLINE),
+ frozenset(extensions),
+ cache_size=0,
+ auto_reload=False,
+ )
+
+ if getbool(options, "trimmed"):
+ environment.policies["ext.i18n.trimmed"] = True
+ if getbool(options, "newstyle_gettext"):
+ environment.newstyle_gettext = True
+
+ source = fileobj.read().decode(options.get("encoding", "utf-8"))
+ try:
+ node = environment.parse(source)
+ tokens = list(environment.lex(environment.preprocess(source)))
+ except TemplateSyntaxError:
+ if not silent:
+ raise
+ # skip templates with syntax errors
+ return
+
+ finder = _CommentFinder(tokens, comment_tags)
+ for lineno, func, message in extract_from_ast(node, keywords):
+ yield lineno, func, message, finder.find_comments(lineno)
+
+
+#: nicer import names
+i18n = InternationalizationExtension
+do = ExprStmtExtension
+loopcontrols = LoopControlExtension
+with_ = WithExtension
+autoescape = AutoEscapeExtension
+debug = DebugExtension
diff --git a/third_party/python/Jinja2/jinja2/filters.py b/third_party/python/Jinja2/jinja2/filters.py
new file mode 100644
index 0000000000..74b108dcec
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/filters.py
@@ -0,0 +1,1382 @@
+# -*- coding: utf-8 -*-
+"""Built-in template filters used with the ``|`` operator."""
+import math
+import random
+import re
+import warnings
+from collections import namedtuple
+from itertools import chain
+from itertools import groupby
+
+from markupsafe import escape
+from markupsafe import Markup
+from markupsafe import soft_unicode
+
+from ._compat import abc
+from ._compat import imap
+from ._compat import iteritems
+from ._compat import string_types
+from ._compat import text_type
+from .exceptions import FilterArgumentError
+from .runtime import Undefined
+from .utils import htmlsafe_json_dumps
+from .utils import pformat
+from .utils import unicode_urlencode
+from .utils import urlize
+
+_word_re = re.compile(r"\w+", re.UNICODE)
+_word_beginning_split_re = re.compile(r"([-\s\(\{\[\<]+)", re.UNICODE)
+
+
+def contextfilter(f):
+ """Decorator for marking context dependent filters. The current
+ :class:`Context` will be passed as first argument.
+ """
+ f.contextfilter = True
+ return f
+
+
+def evalcontextfilter(f):
+ """Decorator for marking eval-context dependent filters. An eval
+ context object is passed as first argument. For more information
+ about the eval context, see :ref:`eval-context`.
+
+ .. versionadded:: 2.4
+ """
+ f.evalcontextfilter = True
+ return f
+
+
+def environmentfilter(f):
+ """Decorator for marking environment dependent filters. The current
+ :class:`Environment` is passed to the filter as first argument.
+ """
+ f.environmentfilter = True
+ return f
+
+
+def ignore_case(value):
+ """For use as a postprocessor for :func:`make_attrgetter`. Converts strings
+ to lowercase and returns other types as-is."""
+ return value.lower() if isinstance(value, string_types) else value
+
+
+def make_attrgetter(environment, attribute, postprocess=None, default=None):
+ """Returns a callable that looks up the given attribute from a
+ passed object with the rules of the environment. Dots are allowed
+ to access attributes of attributes. Integer parts in paths are
+ looked up as integers.
+ """
+ attribute = _prepare_attribute_parts(attribute)
+
+ def attrgetter(item):
+ for part in attribute:
+ item = environment.getitem(item, part)
+
+ if default and isinstance(item, Undefined):
+ item = default
+
+ if postprocess is not None:
+ item = postprocess(item)
+
+ return item
+
+ return attrgetter
+
+
+def make_multi_attrgetter(environment, attribute, postprocess=None):
+ """Returns a callable that looks up the given comma separated
+ attributes from a passed object with the rules of the environment.
+ Dots are allowed to access attributes of each attribute. Integer
+ parts in paths are looked up as integers.
+
+ The value returned by the returned callable is a list of extracted
+ attribute values.
+
+ Examples of attribute: "attr1,attr2", "attr1.inner1.0,attr2.inner2.0", etc.
+ """
+ attribute_parts = (
+ attribute.split(",") if isinstance(attribute, string_types) else [attribute]
+ )
+ attribute = [
+ _prepare_attribute_parts(attribute_part) for attribute_part in attribute_parts
+ ]
+
+ def attrgetter(item):
+ items = [None] * len(attribute)
+ for i, attribute_part in enumerate(attribute):
+ item_i = item
+ for part in attribute_part:
+ item_i = environment.getitem(item_i, part)
+
+ if postprocess is not None:
+ item_i = postprocess(item_i)
+
+ items[i] = item_i
+ return items
+
+ return attrgetter
+
+
+def _prepare_attribute_parts(attr):
+ if attr is None:
+ return []
+ elif isinstance(attr, string_types):
+ return [int(x) if x.isdigit() else x for x in attr.split(".")]
+ else:
+ return [attr]
+
+
+def do_forceescape(value):
+ """Enforce HTML escaping. This will probably double escape variables."""
+ if hasattr(value, "__html__"):
+ value = value.__html__()
+ return escape(text_type(value))
+
+
+def do_urlencode(value):
+ """Quote data for use in a URL path or query using UTF-8.
+
+ Basic wrapper around :func:`urllib.parse.quote` when given a
+ string, or :func:`urllib.parse.urlencode` for a dict or iterable.
+
+ :param value: Data to quote. A string will be quoted directly. A
+ dict or iterable of ``(key, value)`` pairs will be joined as a
+ query string.
+
+ When given a string, "/" is not quoted. HTTP servers treat "/" and
+ "%2F" equivalently in paths. If you need quoted slashes, use the
+ ``|replace("/", "%2F")`` filter.
+
+ .. versionadded:: 2.7
+ """
+ if isinstance(value, string_types) or not isinstance(value, abc.Iterable):
+ return unicode_urlencode(value)
+
+ if isinstance(value, dict):
+ items = iteritems(value)
+ else:
+ items = iter(value)
+
+ return u"&".join(
+ "%s=%s" % (unicode_urlencode(k, for_qs=True), unicode_urlencode(v, for_qs=True))
+ for k, v in items
+ )
+
+
+@evalcontextfilter
+def do_replace(eval_ctx, s, old, new, count=None):
+ """Return a copy of the value with all occurrences of a substring
+ replaced with a new one. The first argument is the substring
+ that should be replaced, the second is the replacement string.
+ If the optional third argument ``count`` is given, only the first
+ ``count`` occurrences are replaced:
+
+ .. sourcecode:: jinja
+
+ {{ "Hello World"|replace("Hello", "Goodbye") }}
+ -> Goodbye World
+
+ {{ "aaaaargh"|replace("a", "d'oh, ", 2) }}
+ -> d'oh, d'oh, aaargh
+ """
+ if count is None:
+ count = -1
+ if not eval_ctx.autoescape:
+ return text_type(s).replace(text_type(old), text_type(new), count)
+ if (
+ hasattr(old, "__html__")
+ or hasattr(new, "__html__")
+ and not hasattr(s, "__html__")
+ ):
+ s = escape(s)
+ else:
+ s = soft_unicode(s)
+ return s.replace(soft_unicode(old), soft_unicode(new), count)
+
+
+def do_upper(s):
+ """Convert a value to uppercase."""
+ return soft_unicode(s).upper()
+
+
+def do_lower(s):
+ """Convert a value to lowercase."""
+ return soft_unicode(s).lower()
+
+
+@evalcontextfilter
+def do_xmlattr(_eval_ctx, d, autospace=True):
+ """Create an SGML/XML attribute string based on the items in a dict.
+ All values that are neither `none` nor `undefined` are automatically
+ escaped:
+
+ .. sourcecode:: html+jinja
+
+ <ul{{ {'class': 'my_list', 'missing': none,
+ 'id': 'list-%d'|format(variable)}|xmlattr }}>
+ ...
+ </ul>
+
+ Results in something like this:
+
+ .. sourcecode:: html
+
+ <ul class="my_list" id="list-42">
+ ...
+ </ul>
+
+ As you can see it automatically prepends a space in front of the item
+ if the filter returned something unless the second parameter is false.
+ """
+ rv = u" ".join(
+ u'%s="%s"' % (escape(key), escape(value))
+ for key, value in iteritems(d)
+ if value is not None and not isinstance(value, Undefined)
+ )
+ if autospace and rv:
+ rv = u" " + rv
+ if _eval_ctx.autoescape:
+ rv = Markup(rv)
+ return rv
+
+
+def do_capitalize(s):
+ """Capitalize a value. The first character will be uppercase, all others
+ lowercase.
+ """
+ return soft_unicode(s).capitalize()
+
+
+def do_title(s):
+ """Return a titlecased version of the value. I.e. words will start with
+ uppercase letters, all remaining characters are lowercase.
+ """
+ return "".join(
+ [
+ item[0].upper() + item[1:].lower()
+ for item in _word_beginning_split_re.split(soft_unicode(s))
+ if item
+ ]
+ )
+
+
+def do_dictsort(value, case_sensitive=False, by="key", reverse=False):
+ """Sort a dict and yield (key, value) pairs. Because python dicts are
+ unsorted you may want to use this function to order them by either
+ key or value:
+
+ .. sourcecode:: jinja
+
+ {% for key, value in mydict|dictsort %}
+ sort the dict by key, case insensitive
+
+ {% for key, value in mydict|dictsort(reverse=true) %}
+ sort the dict by key, case insensitive, reverse order
+
+ {% for key, value in mydict|dictsort(true) %}
+ sort the dict by key, case sensitive
+
+ {% for key, value in mydict|dictsort(false, 'value') %}
+ sort the dict by value, case insensitive
+ """
+ if by == "key":
+ pos = 0
+ elif by == "value":
+ pos = 1
+ else:
+ raise FilterArgumentError('You can only sort by either "key" or "value"')
+
+ def sort_func(item):
+ value = item[pos]
+
+ if not case_sensitive:
+ value = ignore_case(value)
+
+ return value
+
+ return sorted(value.items(), key=sort_func, reverse=reverse)
+
+
+@environmentfilter
+def do_sort(environment, value, reverse=False, case_sensitive=False, attribute=None):
+ """Sort an iterable using Python's :func:`sorted`.
+
+ .. sourcecode:: jinja
+
+ {% for city in cities|sort %}
+ ...
+ {% endfor %}
+
+ :param reverse: Sort descending instead of ascending.
+ :param case_sensitive: When sorting strings, sort upper and lower
+ case separately.
+ :param attribute: When sorting objects or dicts, an attribute or
+ key to sort by. Can use dot notation like ``"address.city"``.
+ Can be a list of attributes like ``"age,name"``.
+
+ The sort is stable, it does not change the relative order of
+ elements that compare equal. This makes it is possible to chain
+ sorts on different attributes and ordering.
+
+ .. sourcecode:: jinja
+
+ {% for user in users|sort(attribute="name")
+ |sort(reverse=true, attribute="age") %}
+ ...
+ {% endfor %}
+
+ As a shortcut to chaining when the direction is the same for all
+ attributes, pass a comma separate list of attributes.
+
+ .. sourcecode:: jinja
+
+ {% for user users|sort(attribute="age,name") %}
+ ...
+ {% endfor %}
+
+ .. versionchanged:: 2.11.0
+ The ``attribute`` parameter can be a comma separated list of
+ attributes, e.g. ``"age,name"``.
+
+ .. versionchanged:: 2.6
+ The ``attribute`` parameter was added.
+ """
+ key_func = make_multi_attrgetter(
+ environment, attribute, postprocess=ignore_case if not case_sensitive else None
+ )
+ return sorted(value, key=key_func, reverse=reverse)
+
+
+@environmentfilter
+def do_unique(environment, value, case_sensitive=False, attribute=None):
+ """Returns a list of unique items from the given iterable.
+
+ .. sourcecode:: jinja
+
+ {{ ['foo', 'bar', 'foobar', 'FooBar']|unique|list }}
+ -> ['foo', 'bar', 'foobar']
+
+ The unique items are yielded in the same order as their first occurrence in
+ the iterable passed to the filter.
+
+ :param case_sensitive: Treat upper and lower case strings as distinct.
+ :param attribute: Filter objects with unique values for this attribute.
+ """
+ getter = make_attrgetter(
+ environment, attribute, postprocess=ignore_case if not case_sensitive else None
+ )
+ seen = set()
+
+ for item in value:
+ key = getter(item)
+
+ if key not in seen:
+ seen.add(key)
+ yield item
+
+
+def _min_or_max(environment, value, func, case_sensitive, attribute):
+ it = iter(value)
+
+ try:
+ first = next(it)
+ except StopIteration:
+ return environment.undefined("No aggregated item, sequence was empty.")
+
+ key_func = make_attrgetter(
+ environment, attribute, postprocess=ignore_case if not case_sensitive else None
+ )
+ return func(chain([first], it), key=key_func)
+
+
+@environmentfilter
+def do_min(environment, value, case_sensitive=False, attribute=None):
+ """Return the smallest item from the sequence.
+
+ .. sourcecode:: jinja
+
+ {{ [1, 2, 3]|min }}
+ -> 1
+
+ :param case_sensitive: Treat upper and lower case strings as distinct.
+ :param attribute: Get the object with the min value of this attribute.
+ """
+ return _min_or_max(environment, value, min, case_sensitive, attribute)
+
+
+@environmentfilter
+def do_max(environment, value, case_sensitive=False, attribute=None):
+ """Return the largest item from the sequence.
+
+ .. sourcecode:: jinja
+
+ {{ [1, 2, 3]|max }}
+ -> 3
+
+ :param case_sensitive: Treat upper and lower case strings as distinct.
+ :param attribute: Get the object with the max value of this attribute.
+ """
+ return _min_or_max(environment, value, max, case_sensitive, attribute)
+
+
+def do_default(value, default_value=u"", boolean=False):
+ """If the value is undefined it will return the passed default value,
+ otherwise the value of the variable:
+
+ .. sourcecode:: jinja
+
+ {{ my_variable|default('my_variable is not defined') }}
+
+ This will output the value of ``my_variable`` if the variable was
+ defined, otherwise ``'my_variable is not defined'``. If you want
+ to use default with variables that evaluate to false you have to
+ set the second parameter to `true`:
+
+ .. sourcecode:: jinja
+
+ {{ ''|default('the string was empty', true) }}
+
+ .. versionchanged:: 2.11
+ It's now possible to configure the :class:`~jinja2.Environment` with
+ :class:`~jinja2.ChainableUndefined` to make the `default` filter work
+ on nested elements and attributes that may contain undefined values
+ in the chain without getting an :exc:`~jinja2.UndefinedError`.
+ """
+ if isinstance(value, Undefined) or (boolean and not value):
+ return default_value
+ return value
+
+
+@evalcontextfilter
+def do_join(eval_ctx, value, d=u"", attribute=None):
+ """Return a string which is the concatenation of the strings in the
+ sequence. The separator between elements is an empty string per
+ default, you can define it with the optional parameter:
+
+ .. sourcecode:: jinja
+
+ {{ [1, 2, 3]|join('|') }}
+ -> 1|2|3
+
+ {{ [1, 2, 3]|join }}
+ -> 123
+
+ It is also possible to join certain attributes of an object:
+
+ .. sourcecode:: jinja
+
+ {{ users|join(', ', attribute='username') }}
+
+ .. versionadded:: 2.6
+ The `attribute` parameter was added.
+ """
+ if attribute is not None:
+ value = imap(make_attrgetter(eval_ctx.environment, attribute), value)
+
+ # no automatic escaping? joining is a lot easier then
+ if not eval_ctx.autoescape:
+ return text_type(d).join(imap(text_type, value))
+
+ # if the delimiter doesn't have an html representation we check
+ # if any of the items has. If yes we do a coercion to Markup
+ if not hasattr(d, "__html__"):
+ value = list(value)
+ do_escape = False
+ for idx, item in enumerate(value):
+ if hasattr(item, "__html__"):
+ do_escape = True
+ else:
+ value[idx] = text_type(item)
+ if do_escape:
+ d = escape(d)
+ else:
+ d = text_type(d)
+ return d.join(value)
+
+ # no html involved, to normal joining
+ return soft_unicode(d).join(imap(soft_unicode, value))
+
+
+def do_center(value, width=80):
+ """Centers the value in a field of a given width."""
+ return text_type(value).center(width)
+
+
+@environmentfilter
+def do_first(environment, seq):
+ """Return the first item of a sequence."""
+ try:
+ return next(iter(seq))
+ except StopIteration:
+ return environment.undefined("No first item, sequence was empty.")
+
+
+@environmentfilter
+def do_last(environment, seq):
+ """
+ Return the last item of a sequence.
+
+ Note: Does not work with generators. You may want to explicitly
+ convert it to a list:
+
+ .. sourcecode:: jinja
+
+ {{ data | selectattr('name', '==', 'Jinja') | list | last }}
+ """
+ try:
+ return next(iter(reversed(seq)))
+ except StopIteration:
+ return environment.undefined("No last item, sequence was empty.")
+
+
+@contextfilter
+def do_random(context, seq):
+ """Return a random item from the sequence."""
+ try:
+ return random.choice(seq)
+ except IndexError:
+ return context.environment.undefined("No random item, sequence was empty.")
+
+
+def do_filesizeformat(value, binary=False):
+ """Format the value like a 'human-readable' file size (i.e. 13 kB,
+ 4.1 MB, 102 Bytes, etc). Per default decimal prefixes are used (Mega,
+ Giga, etc.), if the second parameter is set to `True` the binary
+ prefixes are used (Mebi, Gibi).
+ """
+ bytes = float(value)
+ base = binary and 1024 or 1000
+ prefixes = [
+ (binary and "KiB" or "kB"),
+ (binary and "MiB" or "MB"),
+ (binary and "GiB" or "GB"),
+ (binary and "TiB" or "TB"),
+ (binary and "PiB" or "PB"),
+ (binary and "EiB" or "EB"),
+ (binary and "ZiB" or "ZB"),
+ (binary and "YiB" or "YB"),
+ ]
+ if bytes == 1:
+ return "1 Byte"
+ elif bytes < base:
+ return "%d Bytes" % bytes
+ else:
+ for i, prefix in enumerate(prefixes):
+ unit = base ** (i + 2)
+ if bytes < unit:
+ return "%.1f %s" % ((base * bytes / unit), prefix)
+ return "%.1f %s" % ((base * bytes / unit), prefix)
+
+
+def do_pprint(value, verbose=False):
+ """Pretty print a variable. Useful for debugging.
+
+ With Jinja 1.2 onwards you can pass it a parameter. If this parameter
+ is truthy the output will be more verbose (this requires `pretty`)
+ """
+ return pformat(value, verbose=verbose)
+
+
+@evalcontextfilter
+def do_urlize(
+ eval_ctx, value, trim_url_limit=None, nofollow=False, target=None, rel=None
+):
+ """Converts URLs in plain text into clickable links.
+
+ If you pass the filter an additional integer it will shorten the urls
+ to that number. Also a third argument exists that makes the urls
+ "nofollow":
+
+ .. sourcecode:: jinja
+
+ {{ mytext|urlize(40, true) }}
+ links are shortened to 40 chars and defined with rel="nofollow"
+
+ If *target* is specified, the ``target`` attribute will be added to the
+ ``<a>`` tag:
+
+ .. sourcecode:: jinja
+
+ {{ mytext|urlize(40, target='_blank') }}
+
+ .. versionchanged:: 2.8+
+ The *target* parameter was added.
+ """
+ policies = eval_ctx.environment.policies
+ rel = set((rel or "").split() or [])
+ if nofollow:
+ rel.add("nofollow")
+ rel.update((policies["urlize.rel"] or "").split())
+ if target is None:
+ target = policies["urlize.target"]
+ rel = " ".join(sorted(rel)) or None
+ rv = urlize(value, trim_url_limit, rel=rel, target=target)
+ if eval_ctx.autoescape:
+ rv = Markup(rv)
+ return rv
+
+
+def do_indent(s, width=4, first=False, blank=False, indentfirst=None):
+ """Return a copy of the string with each line indented by 4 spaces. The
+ first line and blank lines are not indented by default.
+
+ :param width: Number of spaces to indent by.
+ :param first: Don't skip indenting the first line.
+ :param blank: Don't skip indenting empty lines.
+
+ .. versionchanged:: 2.10
+ Blank lines are not indented by default.
+
+ Rename the ``indentfirst`` argument to ``first``.
+ """
+ if indentfirst is not None:
+ warnings.warn(
+ "The 'indentfirst' argument is renamed to 'first' and will"
+ " be removed in version 3.0.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ first = indentfirst
+
+ indention = u" " * width
+ newline = u"\n"
+
+ if isinstance(s, Markup):
+ indention = Markup(indention)
+ newline = Markup(newline)
+
+ s += newline # this quirk is necessary for splitlines method
+
+ if blank:
+ rv = (newline + indention).join(s.splitlines())
+ else:
+ lines = s.splitlines()
+ rv = lines.pop(0)
+
+ if lines:
+ rv += newline + newline.join(
+ indention + line if line else line for line in lines
+ )
+
+ if first:
+ rv = indention + rv
+
+ return rv
+
+
+@environmentfilter
+def do_truncate(env, s, length=255, killwords=False, end="...", leeway=None):
+ """Return a truncated copy of the string. The length is specified
+ with the first parameter which defaults to ``255``. If the second
+ parameter is ``true`` the filter will cut the text at length. Otherwise
+ it will discard the last word. If the text was in fact
+ truncated it will append an ellipsis sign (``"..."``). If you want a
+ different ellipsis sign than ``"..."`` you can specify it using the
+ third parameter. Strings that only exceed the length by the tolerance
+ margin given in the fourth parameter will not be truncated.
+
+ .. sourcecode:: jinja
+
+ {{ "foo bar baz qux"|truncate(9) }}
+ -> "foo..."
+ {{ "foo bar baz qux"|truncate(9, True) }}
+ -> "foo ba..."
+ {{ "foo bar baz qux"|truncate(11) }}
+ -> "foo bar baz qux"
+ {{ "foo bar baz qux"|truncate(11, False, '...', 0) }}
+ -> "foo bar..."
+
+ The default leeway on newer Jinja versions is 5 and was 0 before but
+ can be reconfigured globally.
+ """
+ if leeway is None:
+ leeway = env.policies["truncate.leeway"]
+ assert length >= len(end), "expected length >= %s, got %s" % (len(end), length)
+ assert leeway >= 0, "expected leeway >= 0, got %s" % leeway
+ if len(s) <= length + leeway:
+ return s
+ if killwords:
+ return s[: length - len(end)] + end
+ result = s[: length - len(end)].rsplit(" ", 1)[0]
+ return result + end
+
+
+@environmentfilter
+def do_wordwrap(
+ environment,
+ s,
+ width=79,
+ break_long_words=True,
+ wrapstring=None,
+ break_on_hyphens=True,
+):
+ """Wrap a string to the given width. Existing newlines are treated
+ as paragraphs to be wrapped separately.
+
+ :param s: Original text to wrap.
+ :param width: Maximum length of wrapped lines.
+ :param break_long_words: If a word is longer than ``width``, break
+ it across lines.
+ :param break_on_hyphens: If a word contains hyphens, it may be split
+ across lines.
+ :param wrapstring: String to join each wrapped line. Defaults to
+ :attr:`Environment.newline_sequence`.
+
+ .. versionchanged:: 2.11
+ Existing newlines are treated as paragraphs wrapped separately.
+
+ .. versionchanged:: 2.11
+ Added the ``break_on_hyphens`` parameter.
+
+ .. versionchanged:: 2.7
+ Added the ``wrapstring`` parameter.
+ """
+
+ import textwrap
+
+ if not wrapstring:
+ wrapstring = environment.newline_sequence
+
+ # textwrap.wrap doesn't consider existing newlines when wrapping.
+ # If the string has a newline before width, wrap will still insert
+ # a newline at width, resulting in a short line. Instead, split and
+ # wrap each paragraph individually.
+ return wrapstring.join(
+ [
+ wrapstring.join(
+ textwrap.wrap(
+ line,
+ width=width,
+ expand_tabs=False,
+ replace_whitespace=False,
+ break_long_words=break_long_words,
+ break_on_hyphens=break_on_hyphens,
+ )
+ )
+ for line in s.splitlines()
+ ]
+ )
+
+
+def do_wordcount(s):
+ """Count the words in that string."""
+ return len(_word_re.findall(soft_unicode(s)))
+
+
+def do_int(value, default=0, base=10):
+ """Convert the value into an integer. If the
+ conversion doesn't work it will return ``0``. You can
+ override this default using the first parameter. You
+ can also override the default base (10) in the second
+ parameter, which handles input with prefixes such as
+ 0b, 0o and 0x for bases 2, 8 and 16 respectively.
+ The base is ignored for decimal numbers and non-string values.
+ """
+ try:
+ if isinstance(value, string_types):
+ return int(value, base)
+ return int(value)
+ except (TypeError, ValueError):
+ # this quirk is necessary so that "42.23"|int gives 42.
+ try:
+ return int(float(value))
+ except (TypeError, ValueError):
+ return default
+
+
+def do_float(value, default=0.0):
+ """Convert the value into a floating point number. If the
+ conversion doesn't work it will return ``0.0``. You can
+ override this default using the first parameter.
+ """
+ try:
+ return float(value)
+ except (TypeError, ValueError):
+ return default
+
+
+def do_format(value, *args, **kwargs):
+ """Apply the given values to a `printf-style`_ format string, like
+ ``string % values``.
+
+ .. sourcecode:: jinja
+
+ {{ "%s, %s!"|format(greeting, name) }}
+ Hello, World!
+
+ In most cases it should be more convenient and efficient to use the
+ ``%`` operator or :meth:`str.format`.
+
+ .. code-block:: text
+
+ {{ "%s, %s!" % (greeting, name) }}
+ {{ "{}, {}!".format(greeting, name) }}
+
+ .. _printf-style: https://docs.python.org/library/stdtypes.html
+ #printf-style-string-formatting
+ """
+ if args and kwargs:
+ raise FilterArgumentError(
+ "can't handle positional and keyword arguments at the same time"
+ )
+ return soft_unicode(value) % (kwargs or args)
+
+
+def do_trim(value, chars=None):
+ """Strip leading and trailing characters, by default whitespace."""
+ return soft_unicode(value).strip(chars)
+
+
+def do_striptags(value):
+ """Strip SGML/XML tags and replace adjacent whitespace by one space."""
+ if hasattr(value, "__html__"):
+ value = value.__html__()
+ return Markup(text_type(value)).striptags()
+
+
+def do_slice(value, slices, fill_with=None):
+ """Slice an iterator and return a list of lists containing
+ those items. Useful if you want to create a div containing
+ three ul tags that represent columns:
+
+ .. sourcecode:: html+jinja
+
+ <div class="columnwrapper">
+ {%- for column in items|slice(3) %}
+ <ul class="column-{{ loop.index }}">
+ {%- for item in column %}
+ <li>{{ item }}</li>
+ {%- endfor %}
+ </ul>
+ {%- endfor %}
+ </div>
+
+ If you pass it a second argument it's used to fill missing
+ values on the last iteration.
+ """
+ seq = list(value)
+ length = len(seq)
+ items_per_slice = length // slices
+ slices_with_extra = length % slices
+ offset = 0
+ for slice_number in range(slices):
+ start = offset + slice_number * items_per_slice
+ if slice_number < slices_with_extra:
+ offset += 1
+ end = offset + (slice_number + 1) * items_per_slice
+ tmp = seq[start:end]
+ if fill_with is not None and slice_number >= slices_with_extra:
+ tmp.append(fill_with)
+ yield tmp
+
+
+def do_batch(value, linecount, fill_with=None):
+ """
+ A filter that batches items. It works pretty much like `slice`
+ just the other way round. It returns a list of lists with the
+ given number of items. If you provide a second parameter this
+ is used to fill up missing items. See this example:
+
+ .. sourcecode:: html+jinja
+
+ <table>
+ {%- for row in items|batch(3, '&nbsp;') %}
+ <tr>
+ {%- for column in row %}
+ <td>{{ column }}</td>
+ {%- endfor %}
+ </tr>
+ {%- endfor %}
+ </table>
+ """
+ tmp = []
+ for item in value:
+ if len(tmp) == linecount:
+ yield tmp
+ tmp = []
+ tmp.append(item)
+ if tmp:
+ if fill_with is not None and len(tmp) < linecount:
+ tmp += [fill_with] * (linecount - len(tmp))
+ yield tmp
+
+
+def do_round(value, precision=0, method="common"):
+ """Round the number to a given precision. The first
+ parameter specifies the precision (default is ``0``), the
+ second the rounding method:
+
+ - ``'common'`` rounds either up or down
+ - ``'ceil'`` always rounds up
+ - ``'floor'`` always rounds down
+
+ If you don't specify a method ``'common'`` is used.
+
+ .. sourcecode:: jinja
+
+ {{ 42.55|round }}
+ -> 43.0
+ {{ 42.55|round(1, 'floor') }}
+ -> 42.5
+
+ Note that even if rounded to 0 precision, a float is returned. If
+ you need a real integer, pipe it through `int`:
+
+ .. sourcecode:: jinja
+
+ {{ 42.55|round|int }}
+ -> 43
+ """
+ if method not in {"common", "ceil", "floor"}:
+ raise FilterArgumentError("method must be common, ceil or floor")
+ if method == "common":
+ return round(value, precision)
+ func = getattr(math, method)
+ return func(value * (10 ** precision)) / (10 ** precision)
+
+
+# Use a regular tuple repr here. This is what we did in the past and we
+# really want to hide this custom type as much as possible. In particular
+# we do not want to accidentally expose an auto generated repr in case
+# people start to print this out in comments or something similar for
+# debugging.
+_GroupTuple = namedtuple("_GroupTuple", ["grouper", "list"])
+_GroupTuple.__repr__ = tuple.__repr__
+_GroupTuple.__str__ = tuple.__str__
+
+
+@environmentfilter
+def do_groupby(environment, value, attribute):
+ """Group a sequence of objects by an attribute using Python's
+ :func:`itertools.groupby`. The attribute can use dot notation for
+ nested access, like ``"address.city"``. Unlike Python's ``groupby``,
+ the values are sorted first so only one group is returned for each
+ unique value.
+
+ For example, a list of ``User`` objects with a ``city`` attribute
+ can be rendered in groups. In this example, ``grouper`` refers to
+ the ``city`` value of the group.
+
+ .. sourcecode:: html+jinja
+
+ <ul>{% for city, items in users|groupby("city") %}
+ <li>{{ city }}
+ <ul>{% for user in items %}
+ <li>{{ user.name }}
+ {% endfor %}</ul>
+ </li>
+ {% endfor %}</ul>
+
+ ``groupby`` yields namedtuples of ``(grouper, list)``, which
+ can be used instead of the tuple unpacking above. ``grouper`` is the
+ value of the attribute, and ``list`` is the items with that value.
+
+ .. sourcecode:: html+jinja
+
+ <ul>{% for group in users|groupby("city") %}
+ <li>{{ group.grouper }}: {{ group.list|join(", ") }}
+ {% endfor %}</ul>
+
+ .. versionchanged:: 2.6
+ The attribute supports dot notation for nested access.
+ """
+ expr = make_attrgetter(environment, attribute)
+ return [
+ _GroupTuple(key, list(values))
+ for key, values in groupby(sorted(value, key=expr), expr)
+ ]
+
+
+@environmentfilter
+def do_sum(environment, iterable, attribute=None, start=0):
+ """Returns the sum of a sequence of numbers plus the value of parameter
+ 'start' (which defaults to 0). When the sequence is empty it returns
+ start.
+
+ It is also possible to sum up only certain attributes:
+
+ .. sourcecode:: jinja
+
+ Total: {{ items|sum(attribute='price') }}
+
+ .. versionchanged:: 2.6
+ The `attribute` parameter was added to allow suming up over
+ attributes. Also the `start` parameter was moved on to the right.
+ """
+ if attribute is not None:
+ iterable = imap(make_attrgetter(environment, attribute), iterable)
+ return sum(iterable, start)
+
+
+def do_list(value):
+ """Convert the value into a list. If it was a string the returned list
+ will be a list of characters.
+ """
+ return list(value)
+
+
+def do_mark_safe(value):
+ """Mark the value as safe which means that in an environment with automatic
+ escaping enabled this variable will not be escaped.
+ """
+ return Markup(value)
+
+
+def do_mark_unsafe(value):
+ """Mark a value as unsafe. This is the reverse operation for :func:`safe`."""
+ return text_type(value)
+
+
+def do_reverse(value):
+ """Reverse the object or return an iterator that iterates over it the other
+ way round.
+ """
+ if isinstance(value, string_types):
+ return value[::-1]
+ try:
+ return reversed(value)
+ except TypeError:
+ try:
+ rv = list(value)
+ rv.reverse()
+ return rv
+ except TypeError:
+ raise FilterArgumentError("argument must be iterable")
+
+
+@environmentfilter
+def do_attr(environment, obj, name):
+ """Get an attribute of an object. ``foo|attr("bar")`` works like
+ ``foo.bar`` just that always an attribute is returned and items are not
+ looked up.
+
+ See :ref:`Notes on subscriptions <notes-on-subscriptions>` for more details.
+ """
+ try:
+ name = str(name)
+ except UnicodeError:
+ pass
+ else:
+ try:
+ value = getattr(obj, name)
+ except AttributeError:
+ pass
+ else:
+ if environment.sandboxed and not environment.is_safe_attribute(
+ obj, name, value
+ ):
+ return environment.unsafe_undefined(obj, name)
+ return value
+ return environment.undefined(obj=obj, name=name)
+
+
+@contextfilter
+def do_map(*args, **kwargs):
+ """Applies a filter on a sequence of objects or looks up an attribute.
+ This is useful when dealing with lists of objects but you are really
+ only interested in a certain value of it.
+
+ The basic usage is mapping on an attribute. Imagine you have a list
+ of users but you are only interested in a list of usernames:
+
+ .. sourcecode:: jinja
+
+ Users on this page: {{ users|map(attribute='username')|join(', ') }}
+
+ You can specify a ``default`` value to use if an object in the list
+ does not have the given attribute.
+
+ .. sourcecode:: jinja
+
+ {{ users|map(attribute="username", default="Anonymous")|join(", ") }}
+
+ Alternatively you can let it invoke a filter by passing the name of the
+ filter and the arguments afterwards. A good example would be applying a
+ text conversion filter on a sequence:
+
+ .. sourcecode:: jinja
+
+ Users on this page: {{ titles|map('lower')|join(', ') }}
+
+ Similar to a generator comprehension such as:
+
+ .. code-block:: python
+
+ (u.username for u in users)
+ (u.username or "Anonymous" for u in users)
+ (do_lower(x) for x in titles)
+
+ .. versionchanged:: 2.11.0
+ Added the ``default`` parameter.
+
+ .. versionadded:: 2.7
+ """
+ seq, func = prepare_map(args, kwargs)
+ if seq:
+ for item in seq:
+ yield func(item)
+
+
+@contextfilter
+def do_select(*args, **kwargs):
+ """Filters a sequence of objects by applying a test to each object,
+ and only selecting the objects with the test succeeding.
+
+ If no test is specified, each object will be evaluated as a boolean.
+
+ Example usage:
+
+ .. sourcecode:: jinja
+
+ {{ numbers|select("odd") }}
+ {{ numbers|select("odd") }}
+ {{ numbers|select("divisibleby", 3) }}
+ {{ numbers|select("lessthan", 42) }}
+ {{ strings|select("equalto", "mystring") }}
+
+ Similar to a generator comprehension such as:
+
+ .. code-block:: python
+
+ (n for n in numbers if test_odd(n))
+ (n for n in numbers if test_divisibleby(n, 3))
+
+ .. versionadded:: 2.7
+ """
+ return select_or_reject(args, kwargs, lambda x: x, False)
+
+
+@contextfilter
+def do_reject(*args, **kwargs):
+ """Filters a sequence of objects by applying a test to each object,
+ and rejecting the objects with the test succeeding.
+
+ If no test is specified, each object will be evaluated as a boolean.
+
+ Example usage:
+
+ .. sourcecode:: jinja
+
+ {{ numbers|reject("odd") }}
+
+ Similar to a generator comprehension such as:
+
+ .. code-block:: python
+
+ (n for n in numbers if not test_odd(n))
+
+ .. versionadded:: 2.7
+ """
+ return select_or_reject(args, kwargs, lambda x: not x, False)
+
+
+@contextfilter
+def do_selectattr(*args, **kwargs):
+ """Filters a sequence of objects by applying a test to the specified
+ attribute of each object, and only selecting the objects with the
+ test succeeding.
+
+ If no test is specified, the attribute's value will be evaluated as
+ a boolean.
+
+ Example usage:
+
+ .. sourcecode:: jinja
+
+ {{ users|selectattr("is_active") }}
+ {{ users|selectattr("email", "none") }}
+
+ Similar to a generator comprehension such as:
+
+ .. code-block:: python
+
+ (u for user in users if user.is_active)
+ (u for user in users if test_none(user.email))
+
+ .. versionadded:: 2.7
+ """
+ return select_or_reject(args, kwargs, lambda x: x, True)
+
+
+@contextfilter
+def do_rejectattr(*args, **kwargs):
+ """Filters a sequence of objects by applying a test to the specified
+ attribute of each object, and rejecting the objects with the test
+ succeeding.
+
+ If no test is specified, the attribute's value will be evaluated as
+ a boolean.
+
+ .. sourcecode:: jinja
+
+ {{ users|rejectattr("is_active") }}
+ {{ users|rejectattr("email", "none") }}
+
+ Similar to a generator comprehension such as:
+
+ .. code-block:: python
+
+ (u for user in users if not user.is_active)
+ (u for user in users if not test_none(user.email))
+
+ .. versionadded:: 2.7
+ """
+ return select_or_reject(args, kwargs, lambda x: not x, True)
+
+
+@evalcontextfilter
+def do_tojson(eval_ctx, value, indent=None):
+ """Dumps a structure to JSON so that it's safe to use in ``<script>``
+ tags. It accepts the same arguments and returns a JSON string. Note that
+ this is available in templates through the ``|tojson`` filter which will
+ also mark the result as safe. Due to how this function escapes certain
+ characters this is safe even if used outside of ``<script>`` tags.
+
+ The following characters are escaped in strings:
+
+ - ``<``
+ - ``>``
+ - ``&``
+ - ``'``
+
+ This makes it safe to embed such strings in any place in HTML with the
+ notable exception of double quoted attributes. In that case single
+ quote your attributes or HTML escape it in addition.
+
+ The indent parameter can be used to enable pretty printing. Set it to
+ the number of spaces that the structures should be indented with.
+
+ Note that this filter is for use in HTML contexts only.
+
+ .. versionadded:: 2.9
+ """
+ policies = eval_ctx.environment.policies
+ dumper = policies["json.dumps_function"]
+ options = policies["json.dumps_kwargs"]
+ if indent is not None:
+ options = dict(options)
+ options["indent"] = indent
+ return htmlsafe_json_dumps(value, dumper=dumper, **options)
+
+
+def prepare_map(args, kwargs):
+ context = args[0]
+ seq = args[1]
+ default = None
+
+ if len(args) == 2 and "attribute" in kwargs:
+ attribute = kwargs.pop("attribute")
+ default = kwargs.pop("default", None)
+ if kwargs:
+ raise FilterArgumentError(
+ "Unexpected keyword argument %r" % next(iter(kwargs))
+ )
+ func = make_attrgetter(context.environment, attribute, default=default)
+ else:
+ try:
+ name = args[2]
+ args = args[3:]
+ except LookupError:
+ raise FilterArgumentError("map requires a filter argument")
+
+ def func(item):
+ return context.environment.call_filter(
+ name, item, args, kwargs, context=context
+ )
+
+ return seq, func
+
+
+def prepare_select_or_reject(args, kwargs, modfunc, lookup_attr):
+ context = args[0]
+ seq = args[1]
+ if lookup_attr:
+ try:
+ attr = args[2]
+ except LookupError:
+ raise FilterArgumentError("Missing parameter for attribute name")
+ transfunc = make_attrgetter(context.environment, attr)
+ off = 1
+ else:
+ off = 0
+
+ def transfunc(x):
+ return x
+
+ try:
+ name = args[2 + off]
+ args = args[3 + off :]
+
+ def func(item):
+ return context.environment.call_test(name, item, args, kwargs)
+
+ except LookupError:
+ func = bool
+
+ return seq, lambda item: modfunc(func(transfunc(item)))
+
+
+def select_or_reject(args, kwargs, modfunc, lookup_attr):
+ seq, func = prepare_select_or_reject(args, kwargs, modfunc, lookup_attr)
+ if seq:
+ for item in seq:
+ if func(item):
+ yield item
+
+
+FILTERS = {
+ "abs": abs,
+ "attr": do_attr,
+ "batch": do_batch,
+ "capitalize": do_capitalize,
+ "center": do_center,
+ "count": len,
+ "d": do_default,
+ "default": do_default,
+ "dictsort": do_dictsort,
+ "e": escape,
+ "escape": escape,
+ "filesizeformat": do_filesizeformat,
+ "first": do_first,
+ "float": do_float,
+ "forceescape": do_forceescape,
+ "format": do_format,
+ "groupby": do_groupby,
+ "indent": do_indent,
+ "int": do_int,
+ "join": do_join,
+ "last": do_last,
+ "length": len,
+ "list": do_list,
+ "lower": do_lower,
+ "map": do_map,
+ "min": do_min,
+ "max": do_max,
+ "pprint": do_pprint,
+ "random": do_random,
+ "reject": do_reject,
+ "rejectattr": do_rejectattr,
+ "replace": do_replace,
+ "reverse": do_reverse,
+ "round": do_round,
+ "safe": do_mark_safe,
+ "select": do_select,
+ "selectattr": do_selectattr,
+ "slice": do_slice,
+ "sort": do_sort,
+ "string": soft_unicode,
+ "striptags": do_striptags,
+ "sum": do_sum,
+ "title": do_title,
+ "trim": do_trim,
+ "truncate": do_truncate,
+ "unique": do_unique,
+ "upper": do_upper,
+ "urlencode": do_urlencode,
+ "urlize": do_urlize,
+ "wordcount": do_wordcount,
+ "wordwrap": do_wordwrap,
+ "xmlattr": do_xmlattr,
+ "tojson": do_tojson,
+}
diff --git a/third_party/python/Jinja2/jinja2/idtracking.py b/third_party/python/Jinja2/jinja2/idtracking.py
new file mode 100644
index 0000000000..9a0d838017
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/idtracking.py
@@ -0,0 +1,290 @@
+from ._compat import iteritems
+from .visitor import NodeVisitor
+
+VAR_LOAD_PARAMETER = "param"
+VAR_LOAD_RESOLVE = "resolve"
+VAR_LOAD_ALIAS = "alias"
+VAR_LOAD_UNDEFINED = "undefined"
+
+
+def find_symbols(nodes, parent_symbols=None):
+ sym = Symbols(parent=parent_symbols)
+ visitor = FrameSymbolVisitor(sym)
+ for node in nodes:
+ visitor.visit(node)
+ return sym
+
+
+def symbols_for_node(node, parent_symbols=None):
+ sym = Symbols(parent=parent_symbols)
+ sym.analyze_node(node)
+ return sym
+
+
+class Symbols(object):
+ def __init__(self, parent=None, level=None):
+ if level is None:
+ if parent is None:
+ level = 0
+ else:
+ level = parent.level + 1
+ self.level = level
+ self.parent = parent
+ self.refs = {}
+ self.loads = {}
+ self.stores = set()
+
+ def analyze_node(self, node, **kwargs):
+ visitor = RootVisitor(self)
+ visitor.visit(node, **kwargs)
+
+ def _define_ref(self, name, load=None):
+ ident = "l_%d_%s" % (self.level, name)
+ self.refs[name] = ident
+ if load is not None:
+ self.loads[ident] = load
+ return ident
+
+ def find_load(self, target):
+ if target in self.loads:
+ return self.loads[target]
+ if self.parent is not None:
+ return self.parent.find_load(target)
+
+ def find_ref(self, name):
+ if name in self.refs:
+ return self.refs[name]
+ if self.parent is not None:
+ return self.parent.find_ref(name)
+
+ def ref(self, name):
+ rv = self.find_ref(name)
+ if rv is None:
+ raise AssertionError(
+ "Tried to resolve a name to a reference that "
+ "was unknown to the frame (%r)" % name
+ )
+ return rv
+
+ def copy(self):
+ rv = object.__new__(self.__class__)
+ rv.__dict__.update(self.__dict__)
+ rv.refs = self.refs.copy()
+ rv.loads = self.loads.copy()
+ rv.stores = self.stores.copy()
+ return rv
+
+ def store(self, name):
+ self.stores.add(name)
+
+ # If we have not see the name referenced yet, we need to figure
+ # out what to set it to.
+ if name not in self.refs:
+ # If there is a parent scope we check if the name has a
+ # reference there. If it does it means we might have to alias
+ # to a variable there.
+ if self.parent is not None:
+ outer_ref = self.parent.find_ref(name)
+ if outer_ref is not None:
+ self._define_ref(name, load=(VAR_LOAD_ALIAS, outer_ref))
+ return
+
+ # Otherwise we can just set it to undefined.
+ self._define_ref(name, load=(VAR_LOAD_UNDEFINED, None))
+
+ def declare_parameter(self, name):
+ self.stores.add(name)
+ return self._define_ref(name, load=(VAR_LOAD_PARAMETER, None))
+
+ def load(self, name):
+ target = self.find_ref(name)
+ if target is None:
+ self._define_ref(name, load=(VAR_LOAD_RESOLVE, name))
+
+ def branch_update(self, branch_symbols):
+ stores = {}
+ for branch in branch_symbols:
+ for target in branch.stores:
+ if target in self.stores:
+ continue
+ stores[target] = stores.get(target, 0) + 1
+
+ for sym in branch_symbols:
+ self.refs.update(sym.refs)
+ self.loads.update(sym.loads)
+ self.stores.update(sym.stores)
+
+ for name, branch_count in iteritems(stores):
+ if branch_count == len(branch_symbols):
+ continue
+ target = self.find_ref(name)
+ assert target is not None, "should not happen"
+
+ if self.parent is not None:
+ outer_target = self.parent.find_ref(name)
+ if outer_target is not None:
+ self.loads[target] = (VAR_LOAD_ALIAS, outer_target)
+ continue
+ self.loads[target] = (VAR_LOAD_RESOLVE, name)
+
+ def dump_stores(self):
+ rv = {}
+ node = self
+ while node is not None:
+ for name in node.stores:
+ if name not in rv:
+ rv[name] = self.find_ref(name)
+ node = node.parent
+ return rv
+
+ def dump_param_targets(self):
+ rv = set()
+ node = self
+ while node is not None:
+ for target, (instr, _) in iteritems(self.loads):
+ if instr == VAR_LOAD_PARAMETER:
+ rv.add(target)
+ node = node.parent
+ return rv
+
+
+class RootVisitor(NodeVisitor):
+ def __init__(self, symbols):
+ self.sym_visitor = FrameSymbolVisitor(symbols)
+
+ def _simple_visit(self, node, **kwargs):
+ for child in node.iter_child_nodes():
+ self.sym_visitor.visit(child)
+
+ visit_Template = (
+ visit_Block
+ ) = (
+ visit_Macro
+ ) = (
+ visit_FilterBlock
+ ) = visit_Scope = visit_If = visit_ScopedEvalContextModifier = _simple_visit
+
+ def visit_AssignBlock(self, node, **kwargs):
+ for child in node.body:
+ self.sym_visitor.visit(child)
+
+ def visit_CallBlock(self, node, **kwargs):
+ for child in node.iter_child_nodes(exclude=("call",)):
+ self.sym_visitor.visit(child)
+
+ def visit_OverlayScope(self, node, **kwargs):
+ for child in node.body:
+ self.sym_visitor.visit(child)
+
+ def visit_For(self, node, for_branch="body", **kwargs):
+ if for_branch == "body":
+ self.sym_visitor.visit(node.target, store_as_param=True)
+ branch = node.body
+ elif for_branch == "else":
+ branch = node.else_
+ elif for_branch == "test":
+ self.sym_visitor.visit(node.target, store_as_param=True)
+ if node.test is not None:
+ self.sym_visitor.visit(node.test)
+ return
+ else:
+ raise RuntimeError("Unknown for branch")
+ for item in branch or ():
+ self.sym_visitor.visit(item)
+
+ def visit_With(self, node, **kwargs):
+ for target in node.targets:
+ self.sym_visitor.visit(target)
+ for child in node.body:
+ self.sym_visitor.visit(child)
+
+ def generic_visit(self, node, *args, **kwargs):
+ raise NotImplementedError(
+ "Cannot find symbols for %r" % node.__class__.__name__
+ )
+
+
+class FrameSymbolVisitor(NodeVisitor):
+ """A visitor for `Frame.inspect`."""
+
+ def __init__(self, symbols):
+ self.symbols = symbols
+
+ def visit_Name(self, node, store_as_param=False, **kwargs):
+ """All assignments to names go through this function."""
+ if store_as_param or node.ctx == "param":
+ self.symbols.declare_parameter(node.name)
+ elif node.ctx == "store":
+ self.symbols.store(node.name)
+ elif node.ctx == "load":
+ self.symbols.load(node.name)
+
+ def visit_NSRef(self, node, **kwargs):
+ self.symbols.load(node.name)
+
+ def visit_If(self, node, **kwargs):
+ self.visit(node.test, **kwargs)
+
+ original_symbols = self.symbols
+
+ def inner_visit(nodes):
+ self.symbols = rv = original_symbols.copy()
+ for subnode in nodes:
+ self.visit(subnode, **kwargs)
+ self.symbols = original_symbols
+ return rv
+
+ body_symbols = inner_visit(node.body)
+ elif_symbols = inner_visit(node.elif_)
+ else_symbols = inner_visit(node.else_ or ())
+
+ self.symbols.branch_update([body_symbols, elif_symbols, else_symbols])
+
+ def visit_Macro(self, node, **kwargs):
+ self.symbols.store(node.name)
+
+ def visit_Import(self, node, **kwargs):
+ self.generic_visit(node, **kwargs)
+ self.symbols.store(node.target)
+
+ def visit_FromImport(self, node, **kwargs):
+ self.generic_visit(node, **kwargs)
+ for name in node.names:
+ if isinstance(name, tuple):
+ self.symbols.store(name[1])
+ else:
+ self.symbols.store(name)
+
+ def visit_Assign(self, node, **kwargs):
+ """Visit assignments in the correct order."""
+ self.visit(node.node, **kwargs)
+ self.visit(node.target, **kwargs)
+
+ def visit_For(self, node, **kwargs):
+ """Visiting stops at for blocks. However the block sequence
+ is visited as part of the outer scope.
+ """
+ self.visit(node.iter, **kwargs)
+
+ def visit_CallBlock(self, node, **kwargs):
+ self.visit(node.call, **kwargs)
+
+ def visit_FilterBlock(self, node, **kwargs):
+ self.visit(node.filter, **kwargs)
+
+ def visit_With(self, node, **kwargs):
+ for target in node.values:
+ self.visit(target)
+
+ def visit_AssignBlock(self, node, **kwargs):
+ """Stop visiting at block assigns."""
+ self.visit(node.target, **kwargs)
+
+ def visit_Scope(self, node, **kwargs):
+ """Stop visiting at scopes."""
+
+ def visit_Block(self, node, **kwargs):
+ """Stop visiting at blocks."""
+
+ def visit_OverlayScope(self, node, **kwargs):
+ """Do not visit into overlay scopes."""
diff --git a/third_party/python/Jinja2/jinja2/lexer.py b/third_party/python/Jinja2/jinja2/lexer.py
new file mode 100644
index 0000000000..552356a12d
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/lexer.py
@@ -0,0 +1,848 @@
+# -*- coding: utf-8 -*-
+"""Implements a Jinja / Python combination lexer. The ``Lexer`` class
+is used to do some preprocessing. It filters out invalid operators like
+the bitshift operators we don't allow in templates. It separates
+template code and python code in expressions.
+"""
+import re
+from ast import literal_eval
+from collections import deque
+from operator import itemgetter
+
+from ._compat import implements_iterator
+from ._compat import intern
+from ._compat import iteritems
+from ._compat import text_type
+from .exceptions import TemplateSyntaxError
+from .utils import LRUCache
+
+# cache for the lexers. Exists in order to be able to have multiple
+# environments with the same lexer
+_lexer_cache = LRUCache(50)
+
+# static regular expressions
+whitespace_re = re.compile(r"\s+", re.U)
+newline_re = re.compile(r"(\r\n|\r|\n)")
+string_re = re.compile(
+ r"('([^'\\]*(?:\\.[^'\\]*)*)'" r'|"([^"\\]*(?:\\.[^"\\]*)*)")', re.S
+)
+integer_re = re.compile(r"(\d+_)*\d+")
+float_re = re.compile(
+ r"""
+ (?<!\.) # doesn't start with a .
+ (\d+_)*\d+ # digits, possibly _ separated
+ (
+ (\.(\d+_)*\d+)? # optional fractional part
+ e[+\-]?(\d+_)*\d+ # exponent part
+ |
+ \.(\d+_)*\d+ # required fractional part
+ )
+ """,
+ re.IGNORECASE | re.VERBOSE,
+)
+
+try:
+ # check if this Python supports Unicode identifiers
+ compile("föö", "<unknown>", "eval")
+except SyntaxError:
+ # Python 2, no Unicode support, use ASCII identifiers
+ name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*")
+ check_ident = False
+else:
+ # Unicode support, import generated re pattern and set flag to use
+ # str.isidentifier to validate during lexing.
+ from ._identifier import pattern as name_re
+
+ check_ident = True
+
+# internal the tokens and keep references to them
+TOKEN_ADD = intern("add")
+TOKEN_ASSIGN = intern("assign")
+TOKEN_COLON = intern("colon")
+TOKEN_COMMA = intern("comma")
+TOKEN_DIV = intern("div")
+TOKEN_DOT = intern("dot")
+TOKEN_EQ = intern("eq")
+TOKEN_FLOORDIV = intern("floordiv")
+TOKEN_GT = intern("gt")
+TOKEN_GTEQ = intern("gteq")
+TOKEN_LBRACE = intern("lbrace")
+TOKEN_LBRACKET = intern("lbracket")
+TOKEN_LPAREN = intern("lparen")
+TOKEN_LT = intern("lt")
+TOKEN_LTEQ = intern("lteq")
+TOKEN_MOD = intern("mod")
+TOKEN_MUL = intern("mul")
+TOKEN_NE = intern("ne")
+TOKEN_PIPE = intern("pipe")
+TOKEN_POW = intern("pow")
+TOKEN_RBRACE = intern("rbrace")
+TOKEN_RBRACKET = intern("rbracket")
+TOKEN_RPAREN = intern("rparen")
+TOKEN_SEMICOLON = intern("semicolon")
+TOKEN_SUB = intern("sub")
+TOKEN_TILDE = intern("tilde")
+TOKEN_WHITESPACE = intern("whitespace")
+TOKEN_FLOAT = intern("float")
+TOKEN_INTEGER = intern("integer")
+TOKEN_NAME = intern("name")
+TOKEN_STRING = intern("string")
+TOKEN_OPERATOR = intern("operator")
+TOKEN_BLOCK_BEGIN = intern("block_begin")
+TOKEN_BLOCK_END = intern("block_end")
+TOKEN_VARIABLE_BEGIN = intern("variable_begin")
+TOKEN_VARIABLE_END = intern("variable_end")
+TOKEN_RAW_BEGIN = intern("raw_begin")
+TOKEN_RAW_END = intern("raw_end")
+TOKEN_COMMENT_BEGIN = intern("comment_begin")
+TOKEN_COMMENT_END = intern("comment_end")
+TOKEN_COMMENT = intern("comment")
+TOKEN_LINESTATEMENT_BEGIN = intern("linestatement_begin")
+TOKEN_LINESTATEMENT_END = intern("linestatement_end")
+TOKEN_LINECOMMENT_BEGIN = intern("linecomment_begin")
+TOKEN_LINECOMMENT_END = intern("linecomment_end")
+TOKEN_LINECOMMENT = intern("linecomment")
+TOKEN_DATA = intern("data")
+TOKEN_INITIAL = intern("initial")
+TOKEN_EOF = intern("eof")
+
+# bind operators to token types
+operators = {
+ "+": TOKEN_ADD,
+ "-": TOKEN_SUB,
+ "/": TOKEN_DIV,
+ "//": TOKEN_FLOORDIV,
+ "*": TOKEN_MUL,
+ "%": TOKEN_MOD,
+ "**": TOKEN_POW,
+ "~": TOKEN_TILDE,
+ "[": TOKEN_LBRACKET,
+ "]": TOKEN_RBRACKET,
+ "(": TOKEN_LPAREN,
+ ")": TOKEN_RPAREN,
+ "{": TOKEN_LBRACE,
+ "}": TOKEN_RBRACE,
+ "==": TOKEN_EQ,
+ "!=": TOKEN_NE,
+ ">": TOKEN_GT,
+ ">=": TOKEN_GTEQ,
+ "<": TOKEN_LT,
+ "<=": TOKEN_LTEQ,
+ "=": TOKEN_ASSIGN,
+ ".": TOKEN_DOT,
+ ":": TOKEN_COLON,
+ "|": TOKEN_PIPE,
+ ",": TOKEN_COMMA,
+ ";": TOKEN_SEMICOLON,
+}
+
+reverse_operators = dict([(v, k) for k, v in iteritems(operators)])
+assert len(operators) == len(reverse_operators), "operators dropped"
+operator_re = re.compile(
+ "(%s)" % "|".join(re.escape(x) for x in sorted(operators, key=lambda x: -len(x)))
+)
+
+ignored_tokens = frozenset(
+ [
+ TOKEN_COMMENT_BEGIN,
+ TOKEN_COMMENT,
+ TOKEN_COMMENT_END,
+ TOKEN_WHITESPACE,
+ TOKEN_LINECOMMENT_BEGIN,
+ TOKEN_LINECOMMENT_END,
+ TOKEN_LINECOMMENT,
+ ]
+)
+ignore_if_empty = frozenset(
+ [TOKEN_WHITESPACE, TOKEN_DATA, TOKEN_COMMENT, TOKEN_LINECOMMENT]
+)
+
+
+def _describe_token_type(token_type):
+ if token_type in reverse_operators:
+ return reverse_operators[token_type]
+ return {
+ TOKEN_COMMENT_BEGIN: "begin of comment",
+ TOKEN_COMMENT_END: "end of comment",
+ TOKEN_COMMENT: "comment",
+ TOKEN_LINECOMMENT: "comment",
+ TOKEN_BLOCK_BEGIN: "begin of statement block",
+ TOKEN_BLOCK_END: "end of statement block",
+ TOKEN_VARIABLE_BEGIN: "begin of print statement",
+ TOKEN_VARIABLE_END: "end of print statement",
+ TOKEN_LINESTATEMENT_BEGIN: "begin of line statement",
+ TOKEN_LINESTATEMENT_END: "end of line statement",
+ TOKEN_DATA: "template data / text",
+ TOKEN_EOF: "end of template",
+ }.get(token_type, token_type)
+
+
+def describe_token(token):
+ """Returns a description of the token."""
+ if token.type == TOKEN_NAME:
+ return token.value
+ return _describe_token_type(token.type)
+
+
+def describe_token_expr(expr):
+ """Like `describe_token` but for token expressions."""
+ if ":" in expr:
+ type, value = expr.split(":", 1)
+ if type == TOKEN_NAME:
+ return value
+ else:
+ type = expr
+ return _describe_token_type(type)
+
+
+def count_newlines(value):
+ """Count the number of newline characters in the string. This is
+ useful for extensions that filter a stream.
+ """
+ return len(newline_re.findall(value))
+
+
+def compile_rules(environment):
+ """Compiles all the rules from the environment into a list of rules."""
+ e = re.escape
+ rules = [
+ (
+ len(environment.comment_start_string),
+ TOKEN_COMMENT_BEGIN,
+ e(environment.comment_start_string),
+ ),
+ (
+ len(environment.block_start_string),
+ TOKEN_BLOCK_BEGIN,
+ e(environment.block_start_string),
+ ),
+ (
+ len(environment.variable_start_string),
+ TOKEN_VARIABLE_BEGIN,
+ e(environment.variable_start_string),
+ ),
+ ]
+
+ if environment.line_statement_prefix is not None:
+ rules.append(
+ (
+ len(environment.line_statement_prefix),
+ TOKEN_LINESTATEMENT_BEGIN,
+ r"^[ \t\v]*" + e(environment.line_statement_prefix),
+ )
+ )
+ if environment.line_comment_prefix is not None:
+ rules.append(
+ (
+ len(environment.line_comment_prefix),
+ TOKEN_LINECOMMENT_BEGIN,
+ r"(?:^|(?<=\S))[^\S\r\n]*" + e(environment.line_comment_prefix),
+ )
+ )
+
+ return [x[1:] for x in sorted(rules, reverse=True)]
+
+
+class Failure(object):
+ """Class that raises a `TemplateSyntaxError` if called.
+ Used by the `Lexer` to specify known errors.
+ """
+
+ def __init__(self, message, cls=TemplateSyntaxError):
+ self.message = message
+ self.error_class = cls
+
+ def __call__(self, lineno, filename):
+ raise self.error_class(self.message, lineno, filename)
+
+
+class Token(tuple):
+ """Token class."""
+
+ __slots__ = ()
+ lineno, type, value = (property(itemgetter(x)) for x in range(3))
+
+ def __new__(cls, lineno, type, value):
+ return tuple.__new__(cls, (lineno, intern(str(type)), value))
+
+ def __str__(self):
+ if self.type in reverse_operators:
+ return reverse_operators[self.type]
+ elif self.type == "name":
+ return self.value
+ return self.type
+
+ def test(self, expr):
+ """Test a token against a token expression. This can either be a
+ token type or ``'token_type:token_value'``. This can only test
+ against string values and types.
+ """
+ # here we do a regular string equality check as test_any is usually
+ # passed an iterable of not interned strings.
+ if self.type == expr:
+ return True
+ elif ":" in expr:
+ return expr.split(":", 1) == [self.type, self.value]
+ return False
+
+ def test_any(self, *iterable):
+ """Test against multiple token expressions."""
+ for expr in iterable:
+ if self.test(expr):
+ return True
+ return False
+
+ def __repr__(self):
+ return "Token(%r, %r, %r)" % (self.lineno, self.type, self.value)
+
+
+@implements_iterator
+class TokenStreamIterator(object):
+ """The iterator for tokenstreams. Iterate over the stream
+ until the eof token is reached.
+ """
+
+ def __init__(self, stream):
+ self.stream = stream
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ token = self.stream.current
+ if token.type is TOKEN_EOF:
+ self.stream.close()
+ raise StopIteration()
+ next(self.stream)
+ return token
+
+
+@implements_iterator
+class TokenStream(object):
+ """A token stream is an iterable that yields :class:`Token`\\s. The
+ parser however does not iterate over it but calls :meth:`next` to go
+ one token ahead. The current active token is stored as :attr:`current`.
+ """
+
+ def __init__(self, generator, name, filename):
+ self._iter = iter(generator)
+ self._pushed = deque()
+ self.name = name
+ self.filename = filename
+ self.closed = False
+ self.current = Token(1, TOKEN_INITIAL, "")
+ next(self)
+
+ def __iter__(self):
+ return TokenStreamIterator(self)
+
+ def __bool__(self):
+ return bool(self._pushed) or self.current.type is not TOKEN_EOF
+
+ __nonzero__ = __bool__ # py2
+
+ @property
+ def eos(self):
+ """Are we at the end of the stream?"""
+ return not self
+
+ def push(self, token):
+ """Push a token back to the stream."""
+ self._pushed.append(token)
+
+ def look(self):
+ """Look at the next token."""
+ old_token = next(self)
+ result = self.current
+ self.push(result)
+ self.current = old_token
+ return result
+
+ def skip(self, n=1):
+ """Got n tokens ahead."""
+ for _ in range(n):
+ next(self)
+
+ def next_if(self, expr):
+ """Perform the token test and return the token if it matched.
+ Otherwise the return value is `None`.
+ """
+ if self.current.test(expr):
+ return next(self)
+
+ def skip_if(self, expr):
+ """Like :meth:`next_if` but only returns `True` or `False`."""
+ return self.next_if(expr) is not None
+
+ def __next__(self):
+ """Go one token ahead and return the old one.
+
+ Use the built-in :func:`next` instead of calling this directly.
+ """
+ rv = self.current
+ if self._pushed:
+ self.current = self._pushed.popleft()
+ elif self.current.type is not TOKEN_EOF:
+ try:
+ self.current = next(self._iter)
+ except StopIteration:
+ self.close()
+ return rv
+
+ def close(self):
+ """Close the stream."""
+ self.current = Token(self.current.lineno, TOKEN_EOF, "")
+ self._iter = None
+ self.closed = True
+
+ def expect(self, expr):
+ """Expect a given token type and return it. This accepts the same
+ argument as :meth:`jinja2.lexer.Token.test`.
+ """
+ if not self.current.test(expr):
+ expr = describe_token_expr(expr)
+ if self.current.type is TOKEN_EOF:
+ raise TemplateSyntaxError(
+ "unexpected end of template, expected %r." % expr,
+ self.current.lineno,
+ self.name,
+ self.filename,
+ )
+ raise TemplateSyntaxError(
+ "expected token %r, got %r" % (expr, describe_token(self.current)),
+ self.current.lineno,
+ self.name,
+ self.filename,
+ )
+ try:
+ return self.current
+ finally:
+ next(self)
+
+
+def get_lexer(environment):
+ """Return a lexer which is probably cached."""
+ key = (
+ environment.block_start_string,
+ environment.block_end_string,
+ environment.variable_start_string,
+ environment.variable_end_string,
+ environment.comment_start_string,
+ environment.comment_end_string,
+ environment.line_statement_prefix,
+ environment.line_comment_prefix,
+ environment.trim_blocks,
+ environment.lstrip_blocks,
+ environment.newline_sequence,
+ environment.keep_trailing_newline,
+ )
+ lexer = _lexer_cache.get(key)
+ if lexer is None:
+ lexer = Lexer(environment)
+ _lexer_cache[key] = lexer
+ return lexer
+
+
+class OptionalLStrip(tuple):
+ """A special tuple for marking a point in the state that can have
+ lstrip applied.
+ """
+
+ __slots__ = ()
+
+ # Even though it looks like a no-op, creating instances fails
+ # without this.
+ def __new__(cls, *members, **kwargs):
+ return super(OptionalLStrip, cls).__new__(cls, members)
+
+
+class Lexer(object):
+ """Class that implements a lexer for a given environment. Automatically
+ created by the environment class, usually you don't have to do that.
+
+ Note that the lexer is not automatically bound to an environment.
+ Multiple environments can share the same lexer.
+ """
+
+ def __init__(self, environment):
+ # shortcuts
+ e = re.escape
+
+ def c(x):
+ return re.compile(x, re.M | re.S)
+
+ # lexing rules for tags
+ tag_rules = [
+ (whitespace_re, TOKEN_WHITESPACE, None),
+ (float_re, TOKEN_FLOAT, None),
+ (integer_re, TOKEN_INTEGER, None),
+ (name_re, TOKEN_NAME, None),
+ (string_re, TOKEN_STRING, None),
+ (operator_re, TOKEN_OPERATOR, None),
+ ]
+
+ # assemble the root lexing rule. because "|" is ungreedy
+ # we have to sort by length so that the lexer continues working
+ # as expected when we have parsing rules like <% for block and
+ # <%= for variables. (if someone wants asp like syntax)
+ # variables are just part of the rules if variable processing
+ # is required.
+ root_tag_rules = compile_rules(environment)
+
+ # block suffix if trimming is enabled
+ block_suffix_re = environment.trim_blocks and "\\n?" or ""
+
+ # If lstrip is enabled, it should not be applied if there is any
+ # non-whitespace between the newline and block.
+ self.lstrip_unless_re = c(r"[^ \t]") if environment.lstrip_blocks else None
+
+ self.newline_sequence = environment.newline_sequence
+ self.keep_trailing_newline = environment.keep_trailing_newline
+
+ # global lexing rules
+ self.rules = {
+ "root": [
+ # directives
+ (
+ c(
+ "(.*?)(?:%s)"
+ % "|".join(
+ [
+ r"(?P<raw_begin>%s(\-|\+|)\s*raw\s*(?:\-%s\s*|%s))"
+ % (
+ e(environment.block_start_string),
+ e(environment.block_end_string),
+ e(environment.block_end_string),
+ )
+ ]
+ + [
+ r"(?P<%s>%s(\-|\+|))" % (n, r)
+ for n, r in root_tag_rules
+ ]
+ )
+ ),
+ OptionalLStrip(TOKEN_DATA, "#bygroup"),
+ "#bygroup",
+ ),
+ # data
+ (c(".+"), TOKEN_DATA, None),
+ ],
+ # comments
+ TOKEN_COMMENT_BEGIN: [
+ (
+ c(
+ r"(.*?)((?:\-%s\s*|%s)%s)"
+ % (
+ e(environment.comment_end_string),
+ e(environment.comment_end_string),
+ block_suffix_re,
+ )
+ ),
+ (TOKEN_COMMENT, TOKEN_COMMENT_END),
+ "#pop",
+ ),
+ (c("(.)"), (Failure("Missing end of comment tag"),), None),
+ ],
+ # blocks
+ TOKEN_BLOCK_BEGIN: [
+ (
+ c(
+ r"(?:\-%s\s*|%s)%s"
+ % (
+ e(environment.block_end_string),
+ e(environment.block_end_string),
+ block_suffix_re,
+ )
+ ),
+ TOKEN_BLOCK_END,
+ "#pop",
+ ),
+ ]
+ + tag_rules,
+ # variables
+ TOKEN_VARIABLE_BEGIN: [
+ (
+ c(
+ r"\-%s\s*|%s"
+ % (
+ e(environment.variable_end_string),
+ e(environment.variable_end_string),
+ )
+ ),
+ TOKEN_VARIABLE_END,
+ "#pop",
+ )
+ ]
+ + tag_rules,
+ # raw block
+ TOKEN_RAW_BEGIN: [
+ (
+ c(
+ r"(.*?)((?:%s(\-|\+|))\s*endraw\s*(?:\-%s\s*|%s%s))"
+ % (
+ e(environment.block_start_string),
+ e(environment.block_end_string),
+ e(environment.block_end_string),
+ block_suffix_re,
+ )
+ ),
+ OptionalLStrip(TOKEN_DATA, TOKEN_RAW_END),
+ "#pop",
+ ),
+ (c("(.)"), (Failure("Missing end of raw directive"),), None),
+ ],
+ # line statements
+ TOKEN_LINESTATEMENT_BEGIN: [
+ (c(r"\s*(\n|$)"), TOKEN_LINESTATEMENT_END, "#pop")
+ ]
+ + tag_rules,
+ # line comments
+ TOKEN_LINECOMMENT_BEGIN: [
+ (
+ c(r"(.*?)()(?=\n|$)"),
+ (TOKEN_LINECOMMENT, TOKEN_LINECOMMENT_END),
+ "#pop",
+ )
+ ],
+ }
+
+ def _normalize_newlines(self, value):
+ """Called for strings and template data to normalize it to unicode."""
+ return newline_re.sub(self.newline_sequence, value)
+
+ def tokenize(self, source, name=None, filename=None, state=None):
+ """Calls tokeniter + tokenize and wraps it in a token stream."""
+ stream = self.tokeniter(source, name, filename, state)
+ return TokenStream(self.wrap(stream, name, filename), name, filename)
+
+ def wrap(self, stream, name=None, filename=None):
+ """This is called with the stream as returned by `tokenize` and wraps
+ every token in a :class:`Token` and converts the value.
+ """
+ for lineno, token, value in stream:
+ if token in ignored_tokens:
+ continue
+ elif token == TOKEN_LINESTATEMENT_BEGIN:
+ token = TOKEN_BLOCK_BEGIN
+ elif token == TOKEN_LINESTATEMENT_END:
+ token = TOKEN_BLOCK_END
+ # we are not interested in those tokens in the parser
+ elif token in (TOKEN_RAW_BEGIN, TOKEN_RAW_END):
+ continue
+ elif token == TOKEN_DATA:
+ value = self._normalize_newlines(value)
+ elif token == "keyword":
+ token = value
+ elif token == TOKEN_NAME:
+ value = str(value)
+ if check_ident and not value.isidentifier():
+ raise TemplateSyntaxError(
+ "Invalid character in identifier", lineno, name, filename
+ )
+ elif token == TOKEN_STRING:
+ # try to unescape string
+ try:
+ value = (
+ self._normalize_newlines(value[1:-1])
+ .encode("ascii", "backslashreplace")
+ .decode("unicode-escape")
+ )
+ except Exception as e:
+ msg = str(e).split(":")[-1].strip()
+ raise TemplateSyntaxError(msg, lineno, name, filename)
+ elif token == TOKEN_INTEGER:
+ value = int(value.replace("_", ""))
+ elif token == TOKEN_FLOAT:
+ # remove all "_" first to support more Python versions
+ value = literal_eval(value.replace("_", ""))
+ elif token == TOKEN_OPERATOR:
+ token = operators[value]
+ yield Token(lineno, token, value)
+
+ def tokeniter(self, source, name, filename=None, state=None):
+ """This method tokenizes the text and returns the tokens in a
+ generator. Use this method if you just want to tokenize a template.
+ """
+ source = text_type(source)
+ lines = source.splitlines()
+ if self.keep_trailing_newline and source:
+ for newline in ("\r\n", "\r", "\n"):
+ if source.endswith(newline):
+ lines.append("")
+ break
+ source = "\n".join(lines)
+ pos = 0
+ lineno = 1
+ stack = ["root"]
+ if state is not None and state != "root":
+ assert state in ("variable", "block"), "invalid state"
+ stack.append(state + "_begin")
+ statetokens = self.rules[stack[-1]]
+ source_length = len(source)
+ balancing_stack = []
+ lstrip_unless_re = self.lstrip_unless_re
+ newlines_stripped = 0
+ line_starting = True
+
+ while 1:
+ # tokenizer loop
+ for regex, tokens, new_state in statetokens:
+ m = regex.match(source, pos)
+ # if no match we try again with the next rule
+ if m is None:
+ continue
+
+ # we only match blocks and variables if braces / parentheses
+ # are balanced. continue parsing with the lower rule which
+ # is the operator rule. do this only if the end tags look
+ # like operators
+ if balancing_stack and tokens in (
+ TOKEN_VARIABLE_END,
+ TOKEN_BLOCK_END,
+ TOKEN_LINESTATEMENT_END,
+ ):
+ continue
+
+ # tuples support more options
+ if isinstance(tokens, tuple):
+ groups = m.groups()
+
+ if isinstance(tokens, OptionalLStrip):
+ # Rule supports lstrip. Match will look like
+ # text, block type, whitespace control, type, control, ...
+ text = groups[0]
+
+ # Skipping the text and first type, every other group is the
+ # whitespace control for each type. One of the groups will be
+ # -, +, or empty string instead of None.
+ strip_sign = next(g for g in groups[2::2] if g is not None)
+
+ if strip_sign == "-":
+ # Strip all whitespace between the text and the tag.
+ stripped = text.rstrip()
+ newlines_stripped = text[len(stripped) :].count("\n")
+ groups = (stripped,) + groups[1:]
+ elif (
+ # Not marked for preserving whitespace.
+ strip_sign != "+"
+ # lstrip is enabled.
+ and lstrip_unless_re is not None
+ # Not a variable expression.
+ and not m.groupdict().get(TOKEN_VARIABLE_BEGIN)
+ ):
+ # The start of text between the last newline and the tag.
+ l_pos = text.rfind("\n") + 1
+ if l_pos > 0 or line_starting:
+ # If there's only whitespace between the newline and the
+ # tag, strip it.
+ if not lstrip_unless_re.search(text, l_pos):
+ groups = (text[:l_pos],) + groups[1:]
+
+ for idx, token in enumerate(tokens):
+ # failure group
+ if token.__class__ is Failure:
+ raise token(lineno, filename)
+ # bygroup is a bit more complex, in that case we
+ # yield for the current token the first named
+ # group that matched
+ elif token == "#bygroup":
+ for key, value in iteritems(m.groupdict()):
+ if value is not None:
+ yield lineno, key, value
+ lineno += value.count("\n")
+ break
+ else:
+ raise RuntimeError(
+ "%r wanted to resolve "
+ "the token dynamically"
+ " but no group matched" % regex
+ )
+ # normal group
+ else:
+ data = groups[idx]
+ if data or token not in ignore_if_empty:
+ yield lineno, token, data
+ lineno += data.count("\n") + newlines_stripped
+ newlines_stripped = 0
+
+ # strings as token just are yielded as it.
+ else:
+ data = m.group()
+ # update brace/parentheses balance
+ if tokens == TOKEN_OPERATOR:
+ if data == "{":
+ balancing_stack.append("}")
+ elif data == "(":
+ balancing_stack.append(")")
+ elif data == "[":
+ balancing_stack.append("]")
+ elif data in ("}", ")", "]"):
+ if not balancing_stack:
+ raise TemplateSyntaxError(
+ "unexpected '%s'" % data, lineno, name, filename
+ )
+ expected_op = balancing_stack.pop()
+ if expected_op != data:
+ raise TemplateSyntaxError(
+ "unexpected '%s', "
+ "expected '%s'" % (data, expected_op),
+ lineno,
+ name,
+ filename,
+ )
+ # yield items
+ if data or tokens not in ignore_if_empty:
+ yield lineno, tokens, data
+ lineno += data.count("\n")
+
+ line_starting = m.group()[-1:] == "\n"
+
+ # fetch new position into new variable so that we can check
+ # if there is a internal parsing error which would result
+ # in an infinite loop
+ pos2 = m.end()
+
+ # handle state changes
+ if new_state is not None:
+ # remove the uppermost state
+ if new_state == "#pop":
+ stack.pop()
+ # resolve the new state by group checking
+ elif new_state == "#bygroup":
+ for key, value in iteritems(m.groupdict()):
+ if value is not None:
+ stack.append(key)
+ break
+ else:
+ raise RuntimeError(
+ "%r wanted to resolve the "
+ "new state dynamically but"
+ " no group matched" % regex
+ )
+ # direct state name given
+ else:
+ stack.append(new_state)
+ statetokens = self.rules[stack[-1]]
+ # we are still at the same position and no stack change.
+ # this means a loop without break condition, avoid that and
+ # raise error
+ elif pos2 == pos:
+ raise RuntimeError(
+ "%r yielded empty string without stack change" % regex
+ )
+ # publish new function and start again
+ pos = pos2
+ break
+ # if loop terminated without break we haven't found a single match
+ # either we are at the end of the file or we have a problem
+ else:
+ # end of text
+ if pos >= source_length:
+ return
+ # something went wrong
+ raise TemplateSyntaxError(
+ "unexpected char %r at %d" % (source[pos], pos),
+ lineno,
+ name,
+ filename,
+ )
diff --git a/third_party/python/Jinja2/jinja2/loaders.py b/third_party/python/Jinja2/jinja2/loaders.py
new file mode 100644
index 0000000000..457c4b59a7
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/loaders.py
@@ -0,0 +1,504 @@
+# -*- coding: utf-8 -*-
+"""API and implementations for loading templates from different data
+sources.
+"""
+import os
+import sys
+import weakref
+from hashlib import sha1
+from os import path
+from types import ModuleType
+
+from ._compat import abc
+from ._compat import fspath
+from ._compat import iteritems
+from ._compat import string_types
+from .exceptions import TemplateNotFound
+from .utils import internalcode
+from .utils import open_if_exists
+
+
+def split_template_path(template):
+ """Split a path into segments and perform a sanity check. If it detects
+ '..' in the path it will raise a `TemplateNotFound` error.
+ """
+ pieces = []
+ for piece in template.split("/"):
+ if (
+ path.sep in piece
+ or (path.altsep and path.altsep in piece)
+ or piece == path.pardir
+ ):
+ raise TemplateNotFound(template)
+ elif piece and piece != ".":
+ pieces.append(piece)
+ return pieces
+
+
+class BaseLoader(object):
+ """Baseclass for all loaders. Subclass this and override `get_source` to
+ implement a custom loading mechanism. The environment provides a
+ `get_template` method that calls the loader's `load` method to get the
+ :class:`Template` object.
+
+ A very basic example for a loader that looks up templates on the file
+ system could look like this::
+
+ from jinja2 import BaseLoader, TemplateNotFound
+ from os.path import join, exists, getmtime
+
+ class MyLoader(BaseLoader):
+
+ def __init__(self, path):
+ self.path = path
+
+ def get_source(self, environment, template):
+ path = join(self.path, template)
+ if not exists(path):
+ raise TemplateNotFound(template)
+ mtime = getmtime(path)
+ with file(path) as f:
+ source = f.read().decode('utf-8')
+ return source, path, lambda: mtime == getmtime(path)
+ """
+
+ #: if set to `False` it indicates that the loader cannot provide access
+ #: to the source of templates.
+ #:
+ #: .. versionadded:: 2.4
+ has_source_access = True
+
+ def get_source(self, environment, template):
+ """Get the template source, filename and reload helper for a template.
+ It's passed the environment and template name and has to return a
+ tuple in the form ``(source, filename, uptodate)`` or raise a
+ `TemplateNotFound` error if it can't locate the template.
+
+ The source part of the returned tuple must be the source of the
+ template as unicode string or a ASCII bytestring. The filename should
+ be the name of the file on the filesystem if it was loaded from there,
+ otherwise `None`. The filename is used by python for the tracebacks
+ if no loader extension is used.
+
+ The last item in the tuple is the `uptodate` function. If auto
+ reloading is enabled it's always called to check if the template
+ changed. No arguments are passed so the function must store the
+ old state somewhere (for example in a closure). If it returns `False`
+ the template will be reloaded.
+ """
+ if not self.has_source_access:
+ raise RuntimeError(
+ "%s cannot provide access to the source" % self.__class__.__name__
+ )
+ raise TemplateNotFound(template)
+
+ def list_templates(self):
+ """Iterates over all templates. If the loader does not support that
+ it should raise a :exc:`TypeError` which is the default behavior.
+ """
+ raise TypeError("this loader cannot iterate over all templates")
+
+ @internalcode
+ def load(self, environment, name, globals=None):
+ """Loads a template. This method looks up the template in the cache
+ or loads one by calling :meth:`get_source`. Subclasses should not
+ override this method as loaders working on collections of other
+ loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
+ will not call this method but `get_source` directly.
+ """
+ code = None
+ if globals is None:
+ globals = {}
+
+ # first we try to get the source for this template together
+ # with the filename and the uptodate function.
+ source, filename, uptodate = self.get_source(environment, name)
+
+ # try to load the code from the bytecode cache if there is a
+ # bytecode cache configured.
+ bcc = environment.bytecode_cache
+ if bcc is not None:
+ bucket = bcc.get_bucket(environment, name, filename, source)
+ code = bucket.code
+
+ # if we don't have code so far (not cached, no longer up to
+ # date) etc. we compile the template
+ if code is None:
+ code = environment.compile(source, name, filename)
+
+ # if the bytecode cache is available and the bucket doesn't
+ # have a code so far, we give the bucket the new code and put
+ # it back to the bytecode cache.
+ if bcc is not None and bucket.code is None:
+ bucket.code = code
+ bcc.set_bucket(bucket)
+
+ return environment.template_class.from_code(
+ environment, code, globals, uptodate
+ )
+
+
+class FileSystemLoader(BaseLoader):
+ """Loads templates from the file system. This loader can find templates
+ in folders on the file system and is the preferred way to load them.
+
+ The loader takes the path to the templates as string, or if multiple
+ locations are wanted a list of them which is then looked up in the
+ given order::
+
+ >>> loader = FileSystemLoader('/path/to/templates')
+ >>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
+
+ Per default the template encoding is ``'utf-8'`` which can be changed
+ by setting the `encoding` parameter to something else.
+
+ To follow symbolic links, set the *followlinks* parameter to ``True``::
+
+ >>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
+
+ .. versionchanged:: 2.8
+ The ``followlinks`` parameter was added.
+ """
+
+ def __init__(self, searchpath, encoding="utf-8", followlinks=False):
+ if not isinstance(searchpath, abc.Iterable) or isinstance(
+ searchpath, string_types
+ ):
+ searchpath = [searchpath]
+
+ # In Python 3.5, os.path.join doesn't support Path. This can be
+ # simplified to list(searchpath) when Python 3.5 is dropped.
+ self.searchpath = [fspath(p) for p in searchpath]
+
+ self.encoding = encoding
+ self.followlinks = followlinks
+
+ def get_source(self, environment, template):
+ pieces = split_template_path(template)
+ for searchpath in self.searchpath:
+ filename = path.join(searchpath, *pieces)
+ f = open_if_exists(filename)
+ if f is None:
+ continue
+ try:
+ contents = f.read().decode(self.encoding)
+ finally:
+ f.close()
+
+ mtime = path.getmtime(filename)
+
+ def uptodate():
+ try:
+ return path.getmtime(filename) == mtime
+ except OSError:
+ return False
+
+ return contents, filename, uptodate
+ raise TemplateNotFound(template)
+
+ def list_templates(self):
+ found = set()
+ for searchpath in self.searchpath:
+ walk_dir = os.walk(searchpath, followlinks=self.followlinks)
+ for dirpath, _, filenames in walk_dir:
+ for filename in filenames:
+ template = (
+ os.path.join(dirpath, filename)[len(searchpath) :]
+ .strip(os.path.sep)
+ .replace(os.path.sep, "/")
+ )
+ if template[:2] == "./":
+ template = template[2:]
+ if template not in found:
+ found.add(template)
+ return sorted(found)
+
+
+class PackageLoader(BaseLoader):
+ """Load templates from python eggs or packages. It is constructed with
+ the name of the python package and the path to the templates in that
+ package::
+
+ loader = PackageLoader('mypackage', 'views')
+
+ If the package path is not given, ``'templates'`` is assumed.
+
+ Per default the template encoding is ``'utf-8'`` which can be changed
+ by setting the `encoding` parameter to something else. Due to the nature
+ of eggs it's only possible to reload templates if the package was loaded
+ from the file system and not a zip file.
+ """
+
+ def __init__(self, package_name, package_path="templates", encoding="utf-8"):
+ from pkg_resources import DefaultProvider
+ from pkg_resources import get_provider
+ from pkg_resources import ResourceManager
+
+ provider = get_provider(package_name)
+ self.encoding = encoding
+ self.manager = ResourceManager()
+ self.filesystem_bound = isinstance(provider, DefaultProvider)
+ self.provider = provider
+ self.package_path = package_path
+
+ def get_source(self, environment, template):
+ pieces = split_template_path(template)
+ p = "/".join((self.package_path,) + tuple(pieces))
+
+ if not self.provider.has_resource(p):
+ raise TemplateNotFound(template)
+
+ filename = uptodate = None
+
+ if self.filesystem_bound:
+ filename = self.provider.get_resource_filename(self.manager, p)
+ mtime = path.getmtime(filename)
+
+ def uptodate():
+ try:
+ return path.getmtime(filename) == mtime
+ except OSError:
+ return False
+
+ source = self.provider.get_resource_string(self.manager, p)
+ return source.decode(self.encoding), filename, uptodate
+
+ def list_templates(self):
+ path = self.package_path
+
+ if path[:2] == "./":
+ path = path[2:]
+ elif path == ".":
+ path = ""
+
+ offset = len(path)
+ results = []
+
+ def _walk(path):
+ for filename in self.provider.resource_listdir(path):
+ fullname = path + "/" + filename
+
+ if self.provider.resource_isdir(fullname):
+ _walk(fullname)
+ else:
+ results.append(fullname[offset:].lstrip("/"))
+
+ _walk(path)
+ results.sort()
+ return results
+
+
+class DictLoader(BaseLoader):
+ """Loads a template from a python dict. It's passed a dict of unicode
+ strings bound to template names. This loader is useful for unittesting:
+
+ >>> loader = DictLoader({'index.html': 'source here'})
+
+ Because auto reloading is rarely useful this is disabled per default.
+ """
+
+ def __init__(self, mapping):
+ self.mapping = mapping
+
+ def get_source(self, environment, template):
+ if template in self.mapping:
+ source = self.mapping[template]
+ return source, None, lambda: source == self.mapping.get(template)
+ raise TemplateNotFound(template)
+
+ def list_templates(self):
+ return sorted(self.mapping)
+
+
+class FunctionLoader(BaseLoader):
+ """A loader that is passed a function which does the loading. The
+ function receives the name of the template and has to return either
+ an unicode string with the template source, a tuple in the form ``(source,
+ filename, uptodatefunc)`` or `None` if the template does not exist.
+
+ >>> def load_template(name):
+ ... if name == 'index.html':
+ ... return '...'
+ ...
+ >>> loader = FunctionLoader(load_template)
+
+ The `uptodatefunc` is a function that is called if autoreload is enabled
+ and has to return `True` if the template is still up to date. For more
+ details have a look at :meth:`BaseLoader.get_source` which has the same
+ return value.
+ """
+
+ def __init__(self, load_func):
+ self.load_func = load_func
+
+ def get_source(self, environment, template):
+ rv = self.load_func(template)
+ if rv is None:
+ raise TemplateNotFound(template)
+ elif isinstance(rv, string_types):
+ return rv, None, None
+ return rv
+
+
+class PrefixLoader(BaseLoader):
+ """A loader that is passed a dict of loaders where each loader is bound
+ to a prefix. The prefix is delimited from the template by a slash per
+ default, which can be changed by setting the `delimiter` argument to
+ something else::
+
+ loader = PrefixLoader({
+ 'app1': PackageLoader('mypackage.app1'),
+ 'app2': PackageLoader('mypackage.app2')
+ })
+
+ By loading ``'app1/index.html'`` the file from the app1 package is loaded,
+ by loading ``'app2/index.html'`` the file from the second.
+ """
+
+ def __init__(self, mapping, delimiter="/"):
+ self.mapping = mapping
+ self.delimiter = delimiter
+
+ def get_loader(self, template):
+ try:
+ prefix, name = template.split(self.delimiter, 1)
+ loader = self.mapping[prefix]
+ except (ValueError, KeyError):
+ raise TemplateNotFound(template)
+ return loader, name
+
+ def get_source(self, environment, template):
+ loader, name = self.get_loader(template)
+ try:
+ return loader.get_source(environment, name)
+ except TemplateNotFound:
+ # re-raise the exception with the correct filename here.
+ # (the one that includes the prefix)
+ raise TemplateNotFound(template)
+
+ @internalcode
+ def load(self, environment, name, globals=None):
+ loader, local_name = self.get_loader(name)
+ try:
+ return loader.load(environment, local_name, globals)
+ except TemplateNotFound:
+ # re-raise the exception with the correct filename here.
+ # (the one that includes the prefix)
+ raise TemplateNotFound(name)
+
+ def list_templates(self):
+ result = []
+ for prefix, loader in iteritems(self.mapping):
+ for template in loader.list_templates():
+ result.append(prefix + self.delimiter + template)
+ return result
+
+
+class ChoiceLoader(BaseLoader):
+ """This loader works like the `PrefixLoader` just that no prefix is
+ specified. If a template could not be found by one loader the next one
+ is tried.
+
+ >>> loader = ChoiceLoader([
+ ... FileSystemLoader('/path/to/user/templates'),
+ ... FileSystemLoader('/path/to/system/templates')
+ ... ])
+
+ This is useful if you want to allow users to override builtin templates
+ from a different location.
+ """
+
+ def __init__(self, loaders):
+ self.loaders = loaders
+
+ def get_source(self, environment, template):
+ for loader in self.loaders:
+ try:
+ return loader.get_source(environment, template)
+ except TemplateNotFound:
+ pass
+ raise TemplateNotFound(template)
+
+ @internalcode
+ def load(self, environment, name, globals=None):
+ for loader in self.loaders:
+ try:
+ return loader.load(environment, name, globals)
+ except TemplateNotFound:
+ pass
+ raise TemplateNotFound(name)
+
+ def list_templates(self):
+ found = set()
+ for loader in self.loaders:
+ found.update(loader.list_templates())
+ return sorted(found)
+
+
+class _TemplateModule(ModuleType):
+ """Like a normal module but with support for weak references"""
+
+
+class ModuleLoader(BaseLoader):
+ """This loader loads templates from precompiled templates.
+
+ Example usage:
+
+ >>> loader = ChoiceLoader([
+ ... ModuleLoader('/path/to/compiled/templates'),
+ ... FileSystemLoader('/path/to/templates')
+ ... ])
+
+ Templates can be precompiled with :meth:`Environment.compile_templates`.
+ """
+
+ has_source_access = False
+
+ def __init__(self, path):
+ package_name = "_jinja2_module_templates_%x" % id(self)
+
+ # create a fake module that looks for the templates in the
+ # path given.
+ mod = _TemplateModule(package_name)
+
+ if not isinstance(path, abc.Iterable) or isinstance(path, string_types):
+ path = [path]
+
+ mod.__path__ = [fspath(p) for p in path]
+
+ sys.modules[package_name] = weakref.proxy(
+ mod, lambda x: sys.modules.pop(package_name, None)
+ )
+
+ # the only strong reference, the sys.modules entry is weak
+ # so that the garbage collector can remove it once the
+ # loader that created it goes out of business.
+ self.module = mod
+ self.package_name = package_name
+
+ @staticmethod
+ def get_template_key(name):
+ return "tmpl_" + sha1(name.encode("utf-8")).hexdigest()
+
+ @staticmethod
+ def get_module_filename(name):
+ return ModuleLoader.get_template_key(name) + ".py"
+
+ @internalcode
+ def load(self, environment, name, globals=None):
+ key = self.get_template_key(name)
+ module = "%s.%s" % (self.package_name, key)
+ mod = getattr(self.module, module, None)
+ if mod is None:
+ try:
+ mod = __import__(module, None, None, ["root"])
+ except ImportError:
+ raise TemplateNotFound(name)
+
+ # remove the entry from sys.modules, we only want the attribute
+ # on the module object we have stored on the loader.
+ sys.modules.pop(module, None)
+
+ return environment.template_class.from_module_dict(
+ environment, mod.__dict__, globals
+ )
diff --git a/third_party/python/Jinja2/jinja2/meta.py b/third_party/python/Jinja2/jinja2/meta.py
new file mode 100644
index 0000000000..3795aace59
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/meta.py
@@ -0,0 +1,101 @@
+# -*- coding: utf-8 -*-
+"""Functions that expose information about templates that might be
+interesting for introspection.
+"""
+from . import nodes
+from ._compat import iteritems
+from ._compat import string_types
+from .compiler import CodeGenerator
+
+
+class TrackingCodeGenerator(CodeGenerator):
+ """We abuse the code generator for introspection."""
+
+ def __init__(self, environment):
+ CodeGenerator.__init__(self, environment, "<introspection>", "<introspection>")
+ self.undeclared_identifiers = set()
+
+ def write(self, x):
+ """Don't write."""
+
+ def enter_frame(self, frame):
+ """Remember all undeclared identifiers."""
+ CodeGenerator.enter_frame(self, frame)
+ for _, (action, param) in iteritems(frame.symbols.loads):
+ if action == "resolve" and param not in self.environment.globals:
+ self.undeclared_identifiers.add(param)
+
+
+def find_undeclared_variables(ast):
+ """Returns a set of all variables in the AST that will be looked up from
+ the context at runtime. Because at compile time it's not known which
+ variables will be used depending on the path the execution takes at
+ runtime, all variables are returned.
+
+ >>> from jinja2 import Environment, meta
+ >>> env = Environment()
+ >>> ast = env.parse('{% set foo = 42 %}{{ bar + foo }}')
+ >>> meta.find_undeclared_variables(ast) == set(['bar'])
+ True
+
+ .. admonition:: Implementation
+
+ Internally the code generator is used for finding undeclared variables.
+ This is good to know because the code generator might raise a
+ :exc:`TemplateAssertionError` during compilation and as a matter of
+ fact this function can currently raise that exception as well.
+ """
+ codegen = TrackingCodeGenerator(ast.environment)
+ codegen.visit(ast)
+ return codegen.undeclared_identifiers
+
+
+def find_referenced_templates(ast):
+ """Finds all the referenced templates from the AST. This will return an
+ iterator over all the hardcoded template extensions, inclusions and
+ imports. If dynamic inheritance or inclusion is used, `None` will be
+ yielded.
+
+ >>> from jinja2 import Environment, meta
+ >>> env = Environment()
+ >>> ast = env.parse('{% extends "layout.html" %}{% include helper %}')
+ >>> list(meta.find_referenced_templates(ast))
+ ['layout.html', None]
+
+ This function is useful for dependency tracking. For example if you want
+ to rebuild parts of the website after a layout template has changed.
+ """
+ for node in ast.find_all(
+ (nodes.Extends, nodes.FromImport, nodes.Import, nodes.Include)
+ ):
+ if not isinstance(node.template, nodes.Const):
+ # a tuple with some non consts in there
+ if isinstance(node.template, (nodes.Tuple, nodes.List)):
+ for template_name in node.template.items:
+ # something const, only yield the strings and ignore
+ # non-string consts that really just make no sense
+ if isinstance(template_name, nodes.Const):
+ if isinstance(template_name.value, string_types):
+ yield template_name.value
+ # something dynamic in there
+ else:
+ yield None
+ # something dynamic we don't know about here
+ else:
+ yield None
+ continue
+ # constant is a basestring, direct template name
+ if isinstance(node.template.value, string_types):
+ yield node.template.value
+ # a tuple or list (latter *should* not happen) made of consts,
+ # yield the consts that are strings. We could warn here for
+ # non string values
+ elif isinstance(node, nodes.Include) and isinstance(
+ node.template.value, (tuple, list)
+ ):
+ for template_name in node.template.value:
+ if isinstance(template_name, string_types):
+ yield template_name
+ # something else we don't care about, we could warn here
+ else:
+ yield None
diff --git a/third_party/python/Jinja2/jinja2/nativetypes.py b/third_party/python/Jinja2/jinja2/nativetypes.py
new file mode 100644
index 0000000000..a9ead4e2bb
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/nativetypes.py
@@ -0,0 +1,94 @@
+from ast import literal_eval
+from itertools import chain
+from itertools import islice
+
+from . import nodes
+from ._compat import text_type
+from .compiler import CodeGenerator
+from .compiler import has_safe_repr
+from .environment import Environment
+from .environment import Template
+
+
+def native_concat(nodes):
+ """Return a native Python type from the list of compiled nodes. If
+ the result is a single node, its value is returned. Otherwise, the
+ nodes are concatenated as strings. If the result can be parsed with
+ :func:`ast.literal_eval`, the parsed value is returned. Otherwise,
+ the string is returned.
+
+ :param nodes: Iterable of nodes to concatenate.
+ """
+ head = list(islice(nodes, 2))
+
+ if not head:
+ return None
+
+ if len(head) == 1:
+ raw = head[0]
+ else:
+ raw = u"".join([text_type(v) for v in chain(head, nodes)])
+
+ try:
+ return literal_eval(raw)
+ except (ValueError, SyntaxError, MemoryError):
+ return raw
+
+
+class NativeCodeGenerator(CodeGenerator):
+ """A code generator which renders Python types by not adding
+ ``to_string()`` around output nodes.
+ """
+
+ @staticmethod
+ def _default_finalize(value):
+ return value
+
+ def _output_const_repr(self, group):
+ return repr(u"".join([text_type(v) for v in group]))
+
+ def _output_child_to_const(self, node, frame, finalize):
+ const = node.as_const(frame.eval_ctx)
+
+ if not has_safe_repr(const):
+ raise nodes.Impossible()
+
+ if isinstance(node, nodes.TemplateData):
+ return const
+
+ return finalize.const(const)
+
+ def _output_child_pre(self, node, frame, finalize):
+ if finalize.src is not None:
+ self.write(finalize.src)
+
+ def _output_child_post(self, node, frame, finalize):
+ if finalize.src is not None:
+ self.write(")")
+
+
+class NativeEnvironment(Environment):
+ """An environment that renders templates to native Python types."""
+
+ code_generator_class = NativeCodeGenerator
+
+
+class NativeTemplate(Template):
+ environment_class = NativeEnvironment
+
+ def render(self, *args, **kwargs):
+ """Render the template to produce a native Python type. If the
+ result is a single node, its value is returned. Otherwise, the
+ nodes are concatenated as strings. If the result can be parsed
+ with :func:`ast.literal_eval`, the parsed value is returned.
+ Otherwise, the string is returned.
+ """
+ vars = dict(*args, **kwargs)
+
+ try:
+ return native_concat(self.root_render_func(self.new_context(vars)))
+ except Exception:
+ return self.environment.handle_exception()
+
+
+NativeEnvironment.template_class = NativeTemplate
diff --git a/third_party/python/Jinja2/jinja2/nodes.py b/third_party/python/Jinja2/jinja2/nodes.py
new file mode 100644
index 0000000000..95bd614a14
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/nodes.py
@@ -0,0 +1,1088 @@
+# -*- coding: utf-8 -*-
+"""AST nodes generated by the parser for the compiler. Also provides
+some node tree helper functions used by the parser and compiler in order
+to normalize nodes.
+"""
+import operator
+from collections import deque
+
+from markupsafe import Markup
+
+from ._compat import izip
+from ._compat import PY2
+from ._compat import text_type
+from ._compat import with_metaclass
+
+_binop_to_func = {
+ "*": operator.mul,
+ "/": operator.truediv,
+ "//": operator.floordiv,
+ "**": operator.pow,
+ "%": operator.mod,
+ "+": operator.add,
+ "-": operator.sub,
+}
+
+_uaop_to_func = {"not": operator.not_, "+": operator.pos, "-": operator.neg}
+
+_cmpop_to_func = {
+ "eq": operator.eq,
+ "ne": operator.ne,
+ "gt": operator.gt,
+ "gteq": operator.ge,
+ "lt": operator.lt,
+ "lteq": operator.le,
+ "in": lambda a, b: a in b,
+ "notin": lambda a, b: a not in b,
+}
+
+
+class Impossible(Exception):
+ """Raised if the node could not perform a requested action."""
+
+
+class NodeType(type):
+ """A metaclass for nodes that handles the field and attribute
+ inheritance. fields and attributes from the parent class are
+ automatically forwarded to the child."""
+
+ def __new__(mcs, name, bases, d):
+ for attr in "fields", "attributes":
+ storage = []
+ storage.extend(getattr(bases[0], attr, ()))
+ storage.extend(d.get(attr, ()))
+ assert len(bases) == 1, "multiple inheritance not allowed"
+ assert len(storage) == len(set(storage)), "layout conflict"
+ d[attr] = tuple(storage)
+ d.setdefault("abstract", False)
+ return type.__new__(mcs, name, bases, d)
+
+
+class EvalContext(object):
+ """Holds evaluation time information. Custom attributes can be attached
+ to it in extensions.
+ """
+
+ def __init__(self, environment, template_name=None):
+ self.environment = environment
+ if callable(environment.autoescape):
+ self.autoescape = environment.autoescape(template_name)
+ else:
+ self.autoescape = environment.autoescape
+ self.volatile = False
+
+ def save(self):
+ return self.__dict__.copy()
+
+ def revert(self, old):
+ self.__dict__.clear()
+ self.__dict__.update(old)
+
+
+def get_eval_context(node, ctx):
+ if ctx is None:
+ if node.environment is None:
+ raise RuntimeError(
+ "if no eval context is passed, the "
+ "node must have an attached "
+ "environment."
+ )
+ return EvalContext(node.environment)
+ return ctx
+
+
+class Node(with_metaclass(NodeType, object)):
+ """Baseclass for all Jinja nodes. There are a number of nodes available
+ of different types. There are four major types:
+
+ - :class:`Stmt`: statements
+ - :class:`Expr`: expressions
+ - :class:`Helper`: helper nodes
+ - :class:`Template`: the outermost wrapper node
+
+ All nodes have fields and attributes. Fields may be other nodes, lists,
+ or arbitrary values. Fields are passed to the constructor as regular
+ positional arguments, attributes as keyword arguments. Each node has
+ two attributes: `lineno` (the line number of the node) and `environment`.
+ The `environment` attribute is set at the end of the parsing process for
+ all nodes automatically.
+ """
+
+ fields = ()
+ attributes = ("lineno", "environment")
+ abstract = True
+
+ def __init__(self, *fields, **attributes):
+ if self.abstract:
+ raise TypeError("abstract nodes are not instantiable")
+ if fields:
+ if len(fields) != len(self.fields):
+ if not self.fields:
+ raise TypeError("%r takes 0 arguments" % self.__class__.__name__)
+ raise TypeError(
+ "%r takes 0 or %d argument%s"
+ % (
+ self.__class__.__name__,
+ len(self.fields),
+ len(self.fields) != 1 and "s" or "",
+ )
+ )
+ for name, arg in izip(self.fields, fields):
+ setattr(self, name, arg)
+ for attr in self.attributes:
+ setattr(self, attr, attributes.pop(attr, None))
+ if attributes:
+ raise TypeError("unknown attribute %r" % next(iter(attributes)))
+
+ def iter_fields(self, exclude=None, only=None):
+ """This method iterates over all fields that are defined and yields
+ ``(key, value)`` tuples. Per default all fields are returned, but
+ it's possible to limit that to some fields by providing the `only`
+ parameter or to exclude some using the `exclude` parameter. Both
+ should be sets or tuples of field names.
+ """
+ for name in self.fields:
+ if (
+ (exclude is only is None)
+ or (exclude is not None and name not in exclude)
+ or (only is not None and name in only)
+ ):
+ try:
+ yield name, getattr(self, name)
+ except AttributeError:
+ pass
+
+ def iter_child_nodes(self, exclude=None, only=None):
+ """Iterates over all direct child nodes of the node. This iterates
+ over all fields and yields the values of they are nodes. If the value
+ of a field is a list all the nodes in that list are returned.
+ """
+ for _, item in self.iter_fields(exclude, only):
+ if isinstance(item, list):
+ for n in item:
+ if isinstance(n, Node):
+ yield n
+ elif isinstance(item, Node):
+ yield item
+
+ def find(self, node_type):
+ """Find the first node of a given type. If no such node exists the
+ return value is `None`.
+ """
+ for result in self.find_all(node_type):
+ return result
+
+ def find_all(self, node_type):
+ """Find all the nodes of a given type. If the type is a tuple,
+ the check is performed for any of the tuple items.
+ """
+ for child in self.iter_child_nodes():
+ if isinstance(child, node_type):
+ yield child
+ for result in child.find_all(node_type):
+ yield result
+
+ def set_ctx(self, ctx):
+ """Reset the context of a node and all child nodes. Per default the
+ parser will all generate nodes that have a 'load' context as it's the
+ most common one. This method is used in the parser to set assignment
+ targets and other nodes to a store context.
+ """
+ todo = deque([self])
+ while todo:
+ node = todo.popleft()
+ if "ctx" in node.fields:
+ node.ctx = ctx
+ todo.extend(node.iter_child_nodes())
+ return self
+
+ def set_lineno(self, lineno, override=False):
+ """Set the line numbers of the node and children."""
+ todo = deque([self])
+ while todo:
+ node = todo.popleft()
+ if "lineno" in node.attributes:
+ if node.lineno is None or override:
+ node.lineno = lineno
+ todo.extend(node.iter_child_nodes())
+ return self
+
+ def set_environment(self, environment):
+ """Set the environment for all nodes."""
+ todo = deque([self])
+ while todo:
+ node = todo.popleft()
+ node.environment = environment
+ todo.extend(node.iter_child_nodes())
+ return self
+
+ def __eq__(self, other):
+ return type(self) is type(other) and tuple(self.iter_fields()) == tuple(
+ other.iter_fields()
+ )
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ # Restore Python 2 hashing behavior on Python 3
+ __hash__ = object.__hash__
+
+ def __repr__(self):
+ return "%s(%s)" % (
+ self.__class__.__name__,
+ ", ".join("%s=%r" % (arg, getattr(self, arg, None)) for arg in self.fields),
+ )
+
+ def dump(self):
+ def _dump(node):
+ if not isinstance(node, Node):
+ buf.append(repr(node))
+ return
+
+ buf.append("nodes.%s(" % node.__class__.__name__)
+ if not node.fields:
+ buf.append(")")
+ return
+ for idx, field in enumerate(node.fields):
+ if idx:
+ buf.append(", ")
+ value = getattr(node, field)
+ if isinstance(value, list):
+ buf.append("[")
+ for idx, item in enumerate(value):
+ if idx:
+ buf.append(", ")
+ _dump(item)
+ buf.append("]")
+ else:
+ _dump(value)
+ buf.append(")")
+
+ buf = []
+ _dump(self)
+ return "".join(buf)
+
+
+class Stmt(Node):
+ """Base node for all statements."""
+
+ abstract = True
+
+
+class Helper(Node):
+ """Nodes that exist in a specific context only."""
+
+ abstract = True
+
+
+class Template(Node):
+ """Node that represents a template. This must be the outermost node that
+ is passed to the compiler.
+ """
+
+ fields = ("body",)
+
+
+class Output(Stmt):
+ """A node that holds multiple expressions which are then printed out.
+ This is used both for the `print` statement and the regular template data.
+ """
+
+ fields = ("nodes",)
+
+
+class Extends(Stmt):
+ """Represents an extends statement."""
+
+ fields = ("template",)
+
+
+class For(Stmt):
+ """The for loop. `target` is the target for the iteration (usually a
+ :class:`Name` or :class:`Tuple`), `iter` the iterable. `body` is a list
+ of nodes that are used as loop-body, and `else_` a list of nodes for the
+ `else` block. If no else node exists it has to be an empty list.
+
+ For filtered nodes an expression can be stored as `test`, otherwise `None`.
+ """
+
+ fields = ("target", "iter", "body", "else_", "test", "recursive")
+
+
+class If(Stmt):
+ """If `test` is true, `body` is rendered, else `else_`."""
+
+ fields = ("test", "body", "elif_", "else_")
+
+
+class Macro(Stmt):
+ """A macro definition. `name` is the name of the macro, `args` a list of
+ arguments and `defaults` a list of defaults if there are any. `body` is
+ a list of nodes for the macro body.
+ """
+
+ fields = ("name", "args", "defaults", "body")
+
+
+class CallBlock(Stmt):
+ """Like a macro without a name but a call instead. `call` is called with
+ the unnamed macro as `caller` argument this node holds.
+ """
+
+ fields = ("call", "args", "defaults", "body")
+
+
+class FilterBlock(Stmt):
+ """Node for filter sections."""
+
+ fields = ("body", "filter")
+
+
+class With(Stmt):
+ """Specific node for with statements. In older versions of Jinja the
+ with statement was implemented on the base of the `Scope` node instead.
+
+ .. versionadded:: 2.9.3
+ """
+
+ fields = ("targets", "values", "body")
+
+
+class Block(Stmt):
+ """A node that represents a block."""
+
+ fields = ("name", "body", "scoped")
+
+
+class Include(Stmt):
+ """A node that represents the include tag."""
+
+ fields = ("template", "with_context", "ignore_missing")
+
+
+class Import(Stmt):
+ """A node that represents the import tag."""
+
+ fields = ("template", "target", "with_context")
+
+
+class FromImport(Stmt):
+ """A node that represents the from import tag. It's important to not
+ pass unsafe names to the name attribute. The compiler translates the
+ attribute lookups directly into getattr calls and does *not* use the
+ subscript callback of the interface. As exported variables may not
+ start with double underscores (which the parser asserts) this is not a
+ problem for regular Jinja code, but if this node is used in an extension
+ extra care must be taken.
+
+ The list of names may contain tuples if aliases are wanted.
+ """
+
+ fields = ("template", "names", "with_context")
+
+
+class ExprStmt(Stmt):
+ """A statement that evaluates an expression and discards the result."""
+
+ fields = ("node",)
+
+
+class Assign(Stmt):
+ """Assigns an expression to a target."""
+
+ fields = ("target", "node")
+
+
+class AssignBlock(Stmt):
+ """Assigns a block to a target."""
+
+ fields = ("target", "filter", "body")
+
+
+class Expr(Node):
+ """Baseclass for all expressions."""
+
+ abstract = True
+
+ def as_const(self, eval_ctx=None):
+ """Return the value of the expression as constant or raise
+ :exc:`Impossible` if this was not possible.
+
+ An :class:`EvalContext` can be provided, if none is given
+ a default context is created which requires the nodes to have
+ an attached environment.
+
+ .. versionchanged:: 2.4
+ the `eval_ctx` parameter was added.
+ """
+ raise Impossible()
+
+ def can_assign(self):
+ """Check if it's possible to assign something to this node."""
+ return False
+
+
+class BinExpr(Expr):
+ """Baseclass for all binary expressions."""
+
+ fields = ("left", "right")
+ operator = None
+ abstract = True
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ # intercepted operators cannot be folded at compile time
+ if (
+ self.environment.sandboxed
+ and self.operator in self.environment.intercepted_binops
+ ):
+ raise Impossible()
+ f = _binop_to_func[self.operator]
+ try:
+ return f(self.left.as_const(eval_ctx), self.right.as_const(eval_ctx))
+ except Exception:
+ raise Impossible()
+
+
+class UnaryExpr(Expr):
+ """Baseclass for all unary expressions."""
+
+ fields = ("node",)
+ operator = None
+ abstract = True
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ # intercepted operators cannot be folded at compile time
+ if (
+ self.environment.sandboxed
+ and self.operator in self.environment.intercepted_unops
+ ):
+ raise Impossible()
+ f = _uaop_to_func[self.operator]
+ try:
+ return f(self.node.as_const(eval_ctx))
+ except Exception:
+ raise Impossible()
+
+
+class Name(Expr):
+ """Looks up a name or stores a value in a name.
+ The `ctx` of the node can be one of the following values:
+
+ - `store`: store a value in the name
+ - `load`: load that name
+ - `param`: like `store` but if the name was defined as function parameter.
+ """
+
+ fields = ("name", "ctx")
+
+ def can_assign(self):
+ return self.name not in ("true", "false", "none", "True", "False", "None")
+
+
+class NSRef(Expr):
+ """Reference to a namespace value assignment"""
+
+ fields = ("name", "attr")
+
+ def can_assign(self):
+ # We don't need any special checks here; NSRef assignments have a
+ # runtime check to ensure the target is a namespace object which will
+ # have been checked already as it is created using a normal assignment
+ # which goes through a `Name` node.
+ return True
+
+
+class Literal(Expr):
+ """Baseclass for literals."""
+
+ abstract = True
+
+
+class Const(Literal):
+ """All constant values. The parser will return this node for simple
+ constants such as ``42`` or ``"foo"`` but it can be used to store more
+ complex values such as lists too. Only constants with a safe
+ representation (objects where ``eval(repr(x)) == x`` is true).
+ """
+
+ fields = ("value",)
+
+ def as_const(self, eval_ctx=None):
+ rv = self.value
+ if (
+ PY2
+ and type(rv) is text_type
+ and self.environment.policies["compiler.ascii_str"]
+ ):
+ try:
+ rv = rv.encode("ascii")
+ except UnicodeError:
+ pass
+ return rv
+
+ @classmethod
+ def from_untrusted(cls, value, lineno=None, environment=None):
+ """Return a const object if the value is representable as
+ constant value in the generated code, otherwise it will raise
+ an `Impossible` exception.
+ """
+ from .compiler import has_safe_repr
+
+ if not has_safe_repr(value):
+ raise Impossible()
+ return cls(value, lineno=lineno, environment=environment)
+
+
+class TemplateData(Literal):
+ """A constant template string."""
+
+ fields = ("data",)
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ if eval_ctx.volatile:
+ raise Impossible()
+ if eval_ctx.autoescape:
+ return Markup(self.data)
+ return self.data
+
+
+class Tuple(Literal):
+ """For loop unpacking and some other things like multiple arguments
+ for subscripts. Like for :class:`Name` `ctx` specifies if the tuple
+ is used for loading the names or storing.
+ """
+
+ fields = ("items", "ctx")
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return tuple(x.as_const(eval_ctx) for x in self.items)
+
+ def can_assign(self):
+ for item in self.items:
+ if not item.can_assign():
+ return False
+ return True
+
+
+class List(Literal):
+ """Any list literal such as ``[1, 2, 3]``"""
+
+ fields = ("items",)
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return [x.as_const(eval_ctx) for x in self.items]
+
+
+class Dict(Literal):
+ """Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
+ :class:`Pair` nodes.
+ """
+
+ fields = ("items",)
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return dict(x.as_const(eval_ctx) for x in self.items)
+
+
+class Pair(Helper):
+ """A key, value pair for dicts."""
+
+ fields = ("key", "value")
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return self.key.as_const(eval_ctx), self.value.as_const(eval_ctx)
+
+
+class Keyword(Helper):
+ """A key, value pair for keyword arguments where key is a string."""
+
+ fields = ("key", "value")
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return self.key, self.value.as_const(eval_ctx)
+
+
+class CondExpr(Expr):
+ """A conditional expression (inline if expression). (``{{
+ foo if bar else baz }}``)
+ """
+
+ fields = ("test", "expr1", "expr2")
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ if self.test.as_const(eval_ctx):
+ return self.expr1.as_const(eval_ctx)
+
+ # if we evaluate to an undefined object, we better do that at runtime
+ if self.expr2 is None:
+ raise Impossible()
+
+ return self.expr2.as_const(eval_ctx)
+
+
+def args_as_const(node, eval_ctx):
+ args = [x.as_const(eval_ctx) for x in node.args]
+ kwargs = dict(x.as_const(eval_ctx) for x in node.kwargs)
+
+ if node.dyn_args is not None:
+ try:
+ args.extend(node.dyn_args.as_const(eval_ctx))
+ except Exception:
+ raise Impossible()
+
+ if node.dyn_kwargs is not None:
+ try:
+ kwargs.update(node.dyn_kwargs.as_const(eval_ctx))
+ except Exception:
+ raise Impossible()
+
+ return args, kwargs
+
+
+class Filter(Expr):
+ """This node applies a filter on an expression. `name` is the name of
+ the filter, the rest of the fields are the same as for :class:`Call`.
+
+ If the `node` of a filter is `None` the contents of the last buffer are
+ filtered. Buffers are created by macros and filter blocks.
+ """
+
+ fields = ("node", "name", "args", "kwargs", "dyn_args", "dyn_kwargs")
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+
+ if eval_ctx.volatile or self.node is None:
+ raise Impossible()
+
+ # we have to be careful here because we call filter_ below.
+ # if this variable would be called filter, 2to3 would wrap the
+ # call in a list because it is assuming we are talking about the
+ # builtin filter function here which no longer returns a list in
+ # python 3. because of that, do not rename filter_ to filter!
+ filter_ = self.environment.filters.get(self.name)
+
+ if filter_ is None or getattr(filter_, "contextfilter", False) is True:
+ raise Impossible()
+
+ # We cannot constant handle async filters, so we need to make sure
+ # to not go down this path.
+ if eval_ctx.environment.is_async and getattr(
+ filter_, "asyncfiltervariant", False
+ ):
+ raise Impossible()
+
+ args, kwargs = args_as_const(self, eval_ctx)
+ args.insert(0, self.node.as_const(eval_ctx))
+
+ if getattr(filter_, "evalcontextfilter", False) is True:
+ args.insert(0, eval_ctx)
+ elif getattr(filter_, "environmentfilter", False) is True:
+ args.insert(0, self.environment)
+
+ try:
+ return filter_(*args, **kwargs)
+ except Exception:
+ raise Impossible()
+
+
+class Test(Expr):
+ """Applies a test on an expression. `name` is the name of the test, the
+ rest of the fields are the same as for :class:`Call`.
+ """
+
+ fields = ("node", "name", "args", "kwargs", "dyn_args", "dyn_kwargs")
+
+ def as_const(self, eval_ctx=None):
+ test = self.environment.tests.get(self.name)
+
+ if test is None:
+ raise Impossible()
+
+ eval_ctx = get_eval_context(self, eval_ctx)
+ args, kwargs = args_as_const(self, eval_ctx)
+ args.insert(0, self.node.as_const(eval_ctx))
+
+ try:
+ return test(*args, **kwargs)
+ except Exception:
+ raise Impossible()
+
+
+class Call(Expr):
+ """Calls an expression. `args` is a list of arguments, `kwargs` a list
+ of keyword arguments (list of :class:`Keyword` nodes), and `dyn_args`
+ and `dyn_kwargs` has to be either `None` or a node that is used as
+ node for dynamic positional (``*args``) or keyword (``**kwargs``)
+ arguments.
+ """
+
+ fields = ("node", "args", "kwargs", "dyn_args", "dyn_kwargs")
+
+
+class Getitem(Expr):
+ """Get an attribute or item from an expression and prefer the item."""
+
+ fields = ("node", "arg", "ctx")
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ if self.ctx != "load":
+ raise Impossible()
+ try:
+ return self.environment.getitem(
+ self.node.as_const(eval_ctx), self.arg.as_const(eval_ctx)
+ )
+ except Exception:
+ raise Impossible()
+
+ def can_assign(self):
+ return False
+
+
+class Getattr(Expr):
+ """Get an attribute or item from an expression that is a ascii-only
+ bytestring and prefer the attribute.
+ """
+
+ fields = ("node", "attr", "ctx")
+
+ def as_const(self, eval_ctx=None):
+ if self.ctx != "load":
+ raise Impossible()
+ try:
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return self.environment.getattr(self.node.as_const(eval_ctx), self.attr)
+ except Exception:
+ raise Impossible()
+
+ def can_assign(self):
+ return False
+
+
+class Slice(Expr):
+ """Represents a slice object. This must only be used as argument for
+ :class:`Subscript`.
+ """
+
+ fields = ("start", "stop", "step")
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+
+ def const(obj):
+ if obj is None:
+ return None
+ return obj.as_const(eval_ctx)
+
+ return slice(const(self.start), const(self.stop), const(self.step))
+
+
+class Concat(Expr):
+ """Concatenates the list of expressions provided after converting them to
+ unicode.
+ """
+
+ fields = ("nodes",)
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return "".join(text_type(x.as_const(eval_ctx)) for x in self.nodes)
+
+
+class Compare(Expr):
+ """Compares an expression with some other expressions. `ops` must be a
+ list of :class:`Operand`\\s.
+ """
+
+ fields = ("expr", "ops")
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ result = value = self.expr.as_const(eval_ctx)
+
+ try:
+ for op in self.ops:
+ new_value = op.expr.as_const(eval_ctx)
+ result = _cmpop_to_func[op.op](value, new_value)
+
+ if not result:
+ return False
+
+ value = new_value
+ except Exception:
+ raise Impossible()
+
+ return result
+
+
+class Operand(Helper):
+ """Holds an operator and an expression."""
+
+ fields = ("op", "expr")
+
+
+if __debug__:
+ Operand.__doc__ += "\nThe following operators are available: " + ", ".join(
+ sorted(
+ "``%s``" % x
+ for x in set(_binop_to_func) | set(_uaop_to_func) | set(_cmpop_to_func)
+ )
+ )
+
+
+class Mul(BinExpr):
+ """Multiplies the left with the right node."""
+
+ operator = "*"
+
+
+class Div(BinExpr):
+ """Divides the left by the right node."""
+
+ operator = "/"
+
+
+class FloorDiv(BinExpr):
+ """Divides the left by the right node and truncates conver the
+ result into an integer by truncating.
+ """
+
+ operator = "//"
+
+
+class Add(BinExpr):
+ """Add the left to the right node."""
+
+ operator = "+"
+
+
+class Sub(BinExpr):
+ """Subtract the right from the left node."""
+
+ operator = "-"
+
+
+class Mod(BinExpr):
+ """Left modulo right."""
+
+ operator = "%"
+
+
+class Pow(BinExpr):
+ """Left to the power of right."""
+
+ operator = "**"
+
+
+class And(BinExpr):
+ """Short circuited AND."""
+
+ operator = "and"
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return self.left.as_const(eval_ctx) and self.right.as_const(eval_ctx)
+
+
+class Or(BinExpr):
+ """Short circuited OR."""
+
+ operator = "or"
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return self.left.as_const(eval_ctx) or self.right.as_const(eval_ctx)
+
+
+class Not(UnaryExpr):
+ """Negate the expression."""
+
+ operator = "not"
+
+
+class Neg(UnaryExpr):
+ """Make the expression negative."""
+
+ operator = "-"
+
+
+class Pos(UnaryExpr):
+ """Make the expression positive (noop for most expressions)"""
+
+ operator = "+"
+
+
+# Helpers for extensions
+
+
+class EnvironmentAttribute(Expr):
+ """Loads an attribute from the environment object. This is useful for
+ extensions that want to call a callback stored on the environment.
+ """
+
+ fields = ("name",)
+
+
+class ExtensionAttribute(Expr):
+ """Returns the attribute of an extension bound to the environment.
+ The identifier is the identifier of the :class:`Extension`.
+
+ This node is usually constructed by calling the
+ :meth:`~jinja2.ext.Extension.attr` method on an extension.
+ """
+
+ fields = ("identifier", "name")
+
+
+class ImportedName(Expr):
+ """If created with an import name the import name is returned on node
+ access. For example ``ImportedName('cgi.escape')`` returns the `escape`
+ function from the cgi module on evaluation. Imports are optimized by the
+ compiler so there is no need to assign them to local variables.
+ """
+
+ fields = ("importname",)
+
+
+class InternalName(Expr):
+ """An internal name in the compiler. You cannot create these nodes
+ yourself but the parser provides a
+ :meth:`~jinja2.parser.Parser.free_identifier` method that creates
+ a new identifier for you. This identifier is not available from the
+ template and is not threated specially by the compiler.
+ """
+
+ fields = ("name",)
+
+ def __init__(self):
+ raise TypeError(
+ "Can't create internal names. Use the "
+ "`free_identifier` method on a parser."
+ )
+
+
+class MarkSafe(Expr):
+ """Mark the wrapped expression as safe (wrap it as `Markup`)."""
+
+ fields = ("expr",)
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ return Markup(self.expr.as_const(eval_ctx))
+
+
+class MarkSafeIfAutoescape(Expr):
+ """Mark the wrapped expression as safe (wrap it as `Markup`) but
+ only if autoescaping is active.
+
+ .. versionadded:: 2.5
+ """
+
+ fields = ("expr",)
+
+ def as_const(self, eval_ctx=None):
+ eval_ctx = get_eval_context(self, eval_ctx)
+ if eval_ctx.volatile:
+ raise Impossible()
+ expr = self.expr.as_const(eval_ctx)
+ if eval_ctx.autoescape:
+ return Markup(expr)
+ return expr
+
+
+class ContextReference(Expr):
+ """Returns the current template context. It can be used like a
+ :class:`Name` node, with a ``'load'`` ctx and will return the
+ current :class:`~jinja2.runtime.Context` object.
+
+ Here an example that assigns the current template name to a
+ variable named `foo`::
+
+ Assign(Name('foo', ctx='store'),
+ Getattr(ContextReference(), 'name'))
+
+ This is basically equivalent to using the
+ :func:`~jinja2.contextfunction` decorator when using the
+ high-level API, which causes a reference to the context to be passed
+ as the first argument to a function.
+ """
+
+
+class DerivedContextReference(Expr):
+ """Return the current template context including locals. Behaves
+ exactly like :class:`ContextReference`, but includes local
+ variables, such as from a ``for`` loop.
+
+ .. versionadded:: 2.11
+ """
+
+
+class Continue(Stmt):
+ """Continue a loop."""
+
+
+class Break(Stmt):
+ """Break a loop."""
+
+
+class Scope(Stmt):
+ """An artificial scope."""
+
+ fields = ("body",)
+
+
+class OverlayScope(Stmt):
+ """An overlay scope for extensions. This is a largely unoptimized scope
+ that however can be used to introduce completely arbitrary variables into
+ a sub scope from a dictionary or dictionary like object. The `context`
+ field has to evaluate to a dictionary object.
+
+ Example usage::
+
+ OverlayScope(context=self.call_method('get_context'),
+ body=[...])
+
+ .. versionadded:: 2.10
+ """
+
+ fields = ("context", "body")
+
+
+class EvalContextModifier(Stmt):
+ """Modifies the eval context. For each option that should be modified,
+ a :class:`Keyword` has to be added to the :attr:`options` list.
+
+ Example to change the `autoescape` setting::
+
+ EvalContextModifier(options=[Keyword('autoescape', Const(True))])
+ """
+
+ fields = ("options",)
+
+
+class ScopedEvalContextModifier(EvalContextModifier):
+ """Modifies the eval context and reverts it later. Works exactly like
+ :class:`EvalContextModifier` but will only modify the
+ :class:`~jinja2.nodes.EvalContext` for nodes in the :attr:`body`.
+ """
+
+ fields = ("body",)
+
+
+# make sure nobody creates custom nodes
+def _failing_new(*args, **kwargs):
+ raise TypeError("can't create custom node types")
+
+
+NodeType.__new__ = staticmethod(_failing_new)
+del _failing_new
diff --git a/third_party/python/Jinja2/jinja2/optimizer.py b/third_party/python/Jinja2/jinja2/optimizer.py
new file mode 100644
index 0000000000..7bc78c4524
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/optimizer.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+"""The optimizer tries to constant fold expressions and modify the AST
+in place so that it should be faster to evaluate.
+
+Because the AST does not contain all the scoping information and the
+compiler has to find that out, we cannot do all the optimizations we
+want. For example, loop unrolling doesn't work because unrolled loops
+would have a different scope. The solution would be a second syntax tree
+that stored the scoping rules.
+"""
+from . import nodes
+from .visitor import NodeTransformer
+
+
+def optimize(node, environment):
+ """The context hint can be used to perform an static optimization
+ based on the context given."""
+ optimizer = Optimizer(environment)
+ return optimizer.visit(node)
+
+
+class Optimizer(NodeTransformer):
+ def __init__(self, environment):
+ self.environment = environment
+
+ def generic_visit(self, node, *args, **kwargs):
+ node = super(Optimizer, self).generic_visit(node, *args, **kwargs)
+
+ # Do constant folding. Some other nodes besides Expr have
+ # as_const, but folding them causes errors later on.
+ if isinstance(node, nodes.Expr):
+ try:
+ return nodes.Const.from_untrusted(
+ node.as_const(args[0] if args else None),
+ lineno=node.lineno,
+ environment=self.environment,
+ )
+ except nodes.Impossible:
+ pass
+
+ return node
diff --git a/third_party/python/Jinja2/jinja2/parser.py b/third_party/python/Jinja2/jinja2/parser.py
new file mode 100644
index 0000000000..d5881066f7
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/parser.py
@@ -0,0 +1,939 @@
+# -*- coding: utf-8 -*-
+"""Parse tokens from the lexer into nodes for the compiler."""
+from . import nodes
+from ._compat import imap
+from .exceptions import TemplateAssertionError
+from .exceptions import TemplateSyntaxError
+from .lexer import describe_token
+from .lexer import describe_token_expr
+
+_statement_keywords = frozenset(
+ [
+ "for",
+ "if",
+ "block",
+ "extends",
+ "print",
+ "macro",
+ "include",
+ "from",
+ "import",
+ "set",
+ "with",
+ "autoescape",
+ ]
+)
+_compare_operators = frozenset(["eq", "ne", "lt", "lteq", "gt", "gteq"])
+
+_math_nodes = {
+ "add": nodes.Add,
+ "sub": nodes.Sub,
+ "mul": nodes.Mul,
+ "div": nodes.Div,
+ "floordiv": nodes.FloorDiv,
+ "mod": nodes.Mod,
+}
+
+
+class Parser(object):
+ """This is the central parsing class Jinja uses. It's passed to
+ extensions and can be used to parse expressions or statements.
+ """
+
+ def __init__(self, environment, source, name=None, filename=None, state=None):
+ self.environment = environment
+ self.stream = environment._tokenize(source, name, filename, state)
+ self.name = name
+ self.filename = filename
+ self.closed = False
+ self.extensions = {}
+ for extension in environment.iter_extensions():
+ for tag in extension.tags:
+ self.extensions[tag] = extension.parse
+ self._last_identifier = 0
+ self._tag_stack = []
+ self._end_token_stack = []
+
+ def fail(self, msg, lineno=None, exc=TemplateSyntaxError):
+ """Convenience method that raises `exc` with the message, passed
+ line number or last line number as well as the current name and
+ filename.
+ """
+ if lineno is None:
+ lineno = self.stream.current.lineno
+ raise exc(msg, lineno, self.name, self.filename)
+
+ def _fail_ut_eof(self, name, end_token_stack, lineno):
+ expected = []
+ for exprs in end_token_stack:
+ expected.extend(imap(describe_token_expr, exprs))
+ if end_token_stack:
+ currently_looking = " or ".join(
+ "'%s'" % describe_token_expr(expr) for expr in end_token_stack[-1]
+ )
+ else:
+ currently_looking = None
+
+ if name is None:
+ message = ["Unexpected end of template."]
+ else:
+ message = ["Encountered unknown tag '%s'." % name]
+
+ if currently_looking:
+ if name is not None and name in expected:
+ message.append(
+ "You probably made a nesting mistake. Jinja "
+ "is expecting this tag, but currently looking "
+ "for %s." % currently_looking
+ )
+ else:
+ message.append(
+ "Jinja was looking for the following tags: "
+ "%s." % currently_looking
+ )
+
+ if self._tag_stack:
+ message.append(
+ "The innermost block that needs to be "
+ "closed is '%s'." % self._tag_stack[-1]
+ )
+
+ self.fail(" ".join(message), lineno)
+
+ def fail_unknown_tag(self, name, lineno=None):
+ """Called if the parser encounters an unknown tag. Tries to fail
+ with a human readable error message that could help to identify
+ the problem.
+ """
+ return self._fail_ut_eof(name, self._end_token_stack, lineno)
+
+ def fail_eof(self, end_tokens=None, lineno=None):
+ """Like fail_unknown_tag but for end of template situations."""
+ stack = list(self._end_token_stack)
+ if end_tokens is not None:
+ stack.append(end_tokens)
+ return self._fail_ut_eof(None, stack, lineno)
+
+ def is_tuple_end(self, extra_end_rules=None):
+ """Are we at the end of a tuple?"""
+ if self.stream.current.type in ("variable_end", "block_end", "rparen"):
+ return True
+ elif extra_end_rules is not None:
+ return self.stream.current.test_any(extra_end_rules)
+ return False
+
+ def free_identifier(self, lineno=None):
+ """Return a new free identifier as :class:`~jinja2.nodes.InternalName`."""
+ self._last_identifier += 1
+ rv = object.__new__(nodes.InternalName)
+ nodes.Node.__init__(rv, "fi%d" % self._last_identifier, lineno=lineno)
+ return rv
+
+ def parse_statement(self):
+ """Parse a single statement."""
+ token = self.stream.current
+ if token.type != "name":
+ self.fail("tag name expected", token.lineno)
+ self._tag_stack.append(token.value)
+ pop_tag = True
+ try:
+ if token.value in _statement_keywords:
+ return getattr(self, "parse_" + self.stream.current.value)()
+ if token.value == "call":
+ return self.parse_call_block()
+ if token.value == "filter":
+ return self.parse_filter_block()
+ ext = self.extensions.get(token.value)
+ if ext is not None:
+ return ext(self)
+
+ # did not work out, remove the token we pushed by accident
+ # from the stack so that the unknown tag fail function can
+ # produce a proper error message.
+ self._tag_stack.pop()
+ pop_tag = False
+ self.fail_unknown_tag(token.value, token.lineno)
+ finally:
+ if pop_tag:
+ self._tag_stack.pop()
+
+ def parse_statements(self, end_tokens, drop_needle=False):
+ """Parse multiple statements into a list until one of the end tokens
+ is reached. This is used to parse the body of statements as it also
+ parses template data if appropriate. The parser checks first if the
+ current token is a colon and skips it if there is one. Then it checks
+ for the block end and parses until if one of the `end_tokens` is
+ reached. Per default the active token in the stream at the end of
+ the call is the matched end token. If this is not wanted `drop_needle`
+ can be set to `True` and the end token is removed.
+ """
+ # the first token may be a colon for python compatibility
+ self.stream.skip_if("colon")
+
+ # in the future it would be possible to add whole code sections
+ # by adding some sort of end of statement token and parsing those here.
+ self.stream.expect("block_end")
+ result = self.subparse(end_tokens)
+
+ # we reached the end of the template too early, the subparser
+ # does not check for this, so we do that now
+ if self.stream.current.type == "eof":
+ self.fail_eof(end_tokens)
+
+ if drop_needle:
+ next(self.stream)
+ return result
+
+ def parse_set(self):
+ """Parse an assign statement."""
+ lineno = next(self.stream).lineno
+ target = self.parse_assign_target(with_namespace=True)
+ if self.stream.skip_if("assign"):
+ expr = self.parse_tuple()
+ return nodes.Assign(target, expr, lineno=lineno)
+ filter_node = self.parse_filter(None)
+ body = self.parse_statements(("name:endset",), drop_needle=True)
+ return nodes.AssignBlock(target, filter_node, body, lineno=lineno)
+
+ def parse_for(self):
+ """Parse a for loop."""
+ lineno = self.stream.expect("name:for").lineno
+ target = self.parse_assign_target(extra_end_rules=("name:in",))
+ self.stream.expect("name:in")
+ iter = self.parse_tuple(
+ with_condexpr=False, extra_end_rules=("name:recursive",)
+ )
+ test = None
+ if self.stream.skip_if("name:if"):
+ test = self.parse_expression()
+ recursive = self.stream.skip_if("name:recursive")
+ body = self.parse_statements(("name:endfor", "name:else"))
+ if next(self.stream).value == "endfor":
+ else_ = []
+ else:
+ else_ = self.parse_statements(("name:endfor",), drop_needle=True)
+ return nodes.For(target, iter, body, else_, test, recursive, lineno=lineno)
+
+ def parse_if(self):
+ """Parse an if construct."""
+ node = result = nodes.If(lineno=self.stream.expect("name:if").lineno)
+ while 1:
+ node.test = self.parse_tuple(with_condexpr=False)
+ node.body = self.parse_statements(("name:elif", "name:else", "name:endif"))
+ node.elif_ = []
+ node.else_ = []
+ token = next(self.stream)
+ if token.test("name:elif"):
+ node = nodes.If(lineno=self.stream.current.lineno)
+ result.elif_.append(node)
+ continue
+ elif token.test("name:else"):
+ result.else_ = self.parse_statements(("name:endif",), drop_needle=True)
+ break
+ return result
+
+ def parse_with(self):
+ node = nodes.With(lineno=next(self.stream).lineno)
+ targets = []
+ values = []
+ while self.stream.current.type != "block_end":
+ if targets:
+ self.stream.expect("comma")
+ target = self.parse_assign_target()
+ target.set_ctx("param")
+ targets.append(target)
+ self.stream.expect("assign")
+ values.append(self.parse_expression())
+ node.targets = targets
+ node.values = values
+ node.body = self.parse_statements(("name:endwith",), drop_needle=True)
+ return node
+
+ def parse_autoescape(self):
+ node = nodes.ScopedEvalContextModifier(lineno=next(self.stream).lineno)
+ node.options = [nodes.Keyword("autoescape", self.parse_expression())]
+ node.body = self.parse_statements(("name:endautoescape",), drop_needle=True)
+ return nodes.Scope([node])
+
+ def parse_block(self):
+ node = nodes.Block(lineno=next(self.stream).lineno)
+ node.name = self.stream.expect("name").value
+ node.scoped = self.stream.skip_if("name:scoped")
+
+ # common problem people encounter when switching from django
+ # to jinja. we do not support hyphens in block names, so let's
+ # raise a nicer error message in that case.
+ if self.stream.current.type == "sub":
+ self.fail(
+ "Block names in Jinja have to be valid Python "
+ "identifiers and may not contain hyphens, use an "
+ "underscore instead."
+ )
+
+ node.body = self.parse_statements(("name:endblock",), drop_needle=True)
+ self.stream.skip_if("name:" + node.name)
+ return node
+
+ def parse_extends(self):
+ node = nodes.Extends(lineno=next(self.stream).lineno)
+ node.template = self.parse_expression()
+ return node
+
+ def parse_import_context(self, node, default):
+ if self.stream.current.test_any(
+ "name:with", "name:without"
+ ) and self.stream.look().test("name:context"):
+ node.with_context = next(self.stream).value == "with"
+ self.stream.skip()
+ else:
+ node.with_context = default
+ return node
+
+ def parse_include(self):
+ node = nodes.Include(lineno=next(self.stream).lineno)
+ node.template = self.parse_expression()
+ if self.stream.current.test("name:ignore") and self.stream.look().test(
+ "name:missing"
+ ):
+ node.ignore_missing = True
+ self.stream.skip(2)
+ else:
+ node.ignore_missing = False
+ return self.parse_import_context(node, True)
+
+ def parse_import(self):
+ node = nodes.Import(lineno=next(self.stream).lineno)
+ node.template = self.parse_expression()
+ self.stream.expect("name:as")
+ node.target = self.parse_assign_target(name_only=True).name
+ return self.parse_import_context(node, False)
+
+ def parse_from(self):
+ node = nodes.FromImport(lineno=next(self.stream).lineno)
+ node.template = self.parse_expression()
+ self.stream.expect("name:import")
+ node.names = []
+
+ def parse_context():
+ if self.stream.current.value in (
+ "with",
+ "without",
+ ) and self.stream.look().test("name:context"):
+ node.with_context = next(self.stream).value == "with"
+ self.stream.skip()
+ return True
+ return False
+
+ while 1:
+ if node.names:
+ self.stream.expect("comma")
+ if self.stream.current.type == "name":
+ if parse_context():
+ break
+ target = self.parse_assign_target(name_only=True)
+ if target.name.startswith("_"):
+ self.fail(
+ "names starting with an underline can not be imported",
+ target.lineno,
+ exc=TemplateAssertionError,
+ )
+ if self.stream.skip_if("name:as"):
+ alias = self.parse_assign_target(name_only=True)
+ node.names.append((target.name, alias.name))
+ else:
+ node.names.append(target.name)
+ if parse_context() or self.stream.current.type != "comma":
+ break
+ else:
+ self.stream.expect("name")
+ if not hasattr(node, "with_context"):
+ node.with_context = False
+ return node
+
+ def parse_signature(self, node):
+ node.args = args = []
+ node.defaults = defaults = []
+ self.stream.expect("lparen")
+ while self.stream.current.type != "rparen":
+ if args:
+ self.stream.expect("comma")
+ arg = self.parse_assign_target(name_only=True)
+ arg.set_ctx("param")
+ if self.stream.skip_if("assign"):
+ defaults.append(self.parse_expression())
+ elif defaults:
+ self.fail("non-default argument follows default argument")
+ args.append(arg)
+ self.stream.expect("rparen")
+
+ def parse_call_block(self):
+ node = nodes.CallBlock(lineno=next(self.stream).lineno)
+ if self.stream.current.type == "lparen":
+ self.parse_signature(node)
+ else:
+ node.args = []
+ node.defaults = []
+
+ node.call = self.parse_expression()
+ if not isinstance(node.call, nodes.Call):
+ self.fail("expected call", node.lineno)
+ node.body = self.parse_statements(("name:endcall",), drop_needle=True)
+ return node
+
+ def parse_filter_block(self):
+ node = nodes.FilterBlock(lineno=next(self.stream).lineno)
+ node.filter = self.parse_filter(None, start_inline=True)
+ node.body = self.parse_statements(("name:endfilter",), drop_needle=True)
+ return node
+
+ def parse_macro(self):
+ node = nodes.Macro(lineno=next(self.stream).lineno)
+ node.name = self.parse_assign_target(name_only=True).name
+ self.parse_signature(node)
+ node.body = self.parse_statements(("name:endmacro",), drop_needle=True)
+ return node
+
+ def parse_print(self):
+ node = nodes.Output(lineno=next(self.stream).lineno)
+ node.nodes = []
+ while self.stream.current.type != "block_end":
+ if node.nodes:
+ self.stream.expect("comma")
+ node.nodes.append(self.parse_expression())
+ return node
+
+ def parse_assign_target(
+ self,
+ with_tuple=True,
+ name_only=False,
+ extra_end_rules=None,
+ with_namespace=False,
+ ):
+ """Parse an assignment target. As Jinja allows assignments to
+ tuples, this function can parse all allowed assignment targets. Per
+ default assignments to tuples are parsed, that can be disable however
+ by setting `with_tuple` to `False`. If only assignments to names are
+ wanted `name_only` can be set to `True`. The `extra_end_rules`
+ parameter is forwarded to the tuple parsing function. If
+ `with_namespace` is enabled, a namespace assignment may be parsed.
+ """
+ if with_namespace and self.stream.look().type == "dot":
+ token = self.stream.expect("name")
+ next(self.stream) # dot
+ attr = self.stream.expect("name")
+ target = nodes.NSRef(token.value, attr.value, lineno=token.lineno)
+ elif name_only:
+ token = self.stream.expect("name")
+ target = nodes.Name(token.value, "store", lineno=token.lineno)
+ else:
+ if with_tuple:
+ target = self.parse_tuple(
+ simplified=True, extra_end_rules=extra_end_rules
+ )
+ else:
+ target = self.parse_primary()
+ target.set_ctx("store")
+ if not target.can_assign():
+ self.fail(
+ "can't assign to %r" % target.__class__.__name__.lower(), target.lineno
+ )
+ return target
+
+ def parse_expression(self, with_condexpr=True):
+ """Parse an expression. Per default all expressions are parsed, if
+ the optional `with_condexpr` parameter is set to `False` conditional
+ expressions are not parsed.
+ """
+ if with_condexpr:
+ return self.parse_condexpr()
+ return self.parse_or()
+
+ def parse_condexpr(self):
+ lineno = self.stream.current.lineno
+ expr1 = self.parse_or()
+ while self.stream.skip_if("name:if"):
+ expr2 = self.parse_or()
+ if self.stream.skip_if("name:else"):
+ expr3 = self.parse_condexpr()
+ else:
+ expr3 = None
+ expr1 = nodes.CondExpr(expr2, expr1, expr3, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return expr1
+
+ def parse_or(self):
+ lineno = self.stream.current.lineno
+ left = self.parse_and()
+ while self.stream.skip_if("name:or"):
+ right = self.parse_and()
+ left = nodes.Or(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_and(self):
+ lineno = self.stream.current.lineno
+ left = self.parse_not()
+ while self.stream.skip_if("name:and"):
+ right = self.parse_not()
+ left = nodes.And(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_not(self):
+ if self.stream.current.test("name:not"):
+ lineno = next(self.stream).lineno
+ return nodes.Not(self.parse_not(), lineno=lineno)
+ return self.parse_compare()
+
+ def parse_compare(self):
+ lineno = self.stream.current.lineno
+ expr = self.parse_math1()
+ ops = []
+ while 1:
+ token_type = self.stream.current.type
+ if token_type in _compare_operators:
+ next(self.stream)
+ ops.append(nodes.Operand(token_type, self.parse_math1()))
+ elif self.stream.skip_if("name:in"):
+ ops.append(nodes.Operand("in", self.parse_math1()))
+ elif self.stream.current.test("name:not") and self.stream.look().test(
+ "name:in"
+ ):
+ self.stream.skip(2)
+ ops.append(nodes.Operand("notin", self.parse_math1()))
+ else:
+ break
+ lineno = self.stream.current.lineno
+ if not ops:
+ return expr
+ return nodes.Compare(expr, ops, lineno=lineno)
+
+ def parse_math1(self):
+ lineno = self.stream.current.lineno
+ left = self.parse_concat()
+ while self.stream.current.type in ("add", "sub"):
+ cls = _math_nodes[self.stream.current.type]
+ next(self.stream)
+ right = self.parse_concat()
+ left = cls(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_concat(self):
+ lineno = self.stream.current.lineno
+ args = [self.parse_math2()]
+ while self.stream.current.type == "tilde":
+ next(self.stream)
+ args.append(self.parse_math2())
+ if len(args) == 1:
+ return args[0]
+ return nodes.Concat(args, lineno=lineno)
+
+ def parse_math2(self):
+ lineno = self.stream.current.lineno
+ left = self.parse_pow()
+ while self.stream.current.type in ("mul", "div", "floordiv", "mod"):
+ cls = _math_nodes[self.stream.current.type]
+ next(self.stream)
+ right = self.parse_pow()
+ left = cls(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_pow(self):
+ lineno = self.stream.current.lineno
+ left = self.parse_unary()
+ while self.stream.current.type == "pow":
+ next(self.stream)
+ right = self.parse_unary()
+ left = nodes.Pow(left, right, lineno=lineno)
+ lineno = self.stream.current.lineno
+ return left
+
+ def parse_unary(self, with_filter=True):
+ token_type = self.stream.current.type
+ lineno = self.stream.current.lineno
+ if token_type == "sub":
+ next(self.stream)
+ node = nodes.Neg(self.parse_unary(False), lineno=lineno)
+ elif token_type == "add":
+ next(self.stream)
+ node = nodes.Pos(self.parse_unary(False), lineno=lineno)
+ else:
+ node = self.parse_primary()
+ node = self.parse_postfix(node)
+ if with_filter:
+ node = self.parse_filter_expr(node)
+ return node
+
+ def parse_primary(self):
+ token = self.stream.current
+ if token.type == "name":
+ if token.value in ("true", "false", "True", "False"):
+ node = nodes.Const(token.value in ("true", "True"), lineno=token.lineno)
+ elif token.value in ("none", "None"):
+ node = nodes.Const(None, lineno=token.lineno)
+ else:
+ node = nodes.Name(token.value, "load", lineno=token.lineno)
+ next(self.stream)
+ elif token.type == "string":
+ next(self.stream)
+ buf = [token.value]
+ lineno = token.lineno
+ while self.stream.current.type == "string":
+ buf.append(self.stream.current.value)
+ next(self.stream)
+ node = nodes.Const("".join(buf), lineno=lineno)
+ elif token.type in ("integer", "float"):
+ next(self.stream)
+ node = nodes.Const(token.value, lineno=token.lineno)
+ elif token.type == "lparen":
+ next(self.stream)
+ node = self.parse_tuple(explicit_parentheses=True)
+ self.stream.expect("rparen")
+ elif token.type == "lbracket":
+ node = self.parse_list()
+ elif token.type == "lbrace":
+ node = self.parse_dict()
+ else:
+ self.fail("unexpected '%s'" % describe_token(token), token.lineno)
+ return node
+
+ def parse_tuple(
+ self,
+ simplified=False,
+ with_condexpr=True,
+ extra_end_rules=None,
+ explicit_parentheses=False,
+ ):
+ """Works like `parse_expression` but if multiple expressions are
+ delimited by a comma a :class:`~jinja2.nodes.Tuple` node is created.
+ This method could also return a regular expression instead of a tuple
+ if no commas where found.
+
+ The default parsing mode is a full tuple. If `simplified` is `True`
+ only names and literals are parsed. The `no_condexpr` parameter is
+ forwarded to :meth:`parse_expression`.
+
+ Because tuples do not require delimiters and may end in a bogus comma
+ an extra hint is needed that marks the end of a tuple. For example
+ for loops support tuples between `for` and `in`. In that case the
+ `extra_end_rules` is set to ``['name:in']``.
+
+ `explicit_parentheses` is true if the parsing was triggered by an
+ expression in parentheses. This is used to figure out if an empty
+ tuple is a valid expression or not.
+ """
+ lineno = self.stream.current.lineno
+ if simplified:
+ parse = self.parse_primary
+ elif with_condexpr:
+ parse = self.parse_expression
+ else:
+
+ def parse():
+ return self.parse_expression(with_condexpr=False)
+
+ args = []
+ is_tuple = False
+ while 1:
+ if args:
+ self.stream.expect("comma")
+ if self.is_tuple_end(extra_end_rules):
+ break
+ args.append(parse())
+ if self.stream.current.type == "comma":
+ is_tuple = True
+ else:
+ break
+ lineno = self.stream.current.lineno
+
+ if not is_tuple:
+ if args:
+ return args[0]
+
+ # if we don't have explicit parentheses, an empty tuple is
+ # not a valid expression. This would mean nothing (literally
+ # nothing) in the spot of an expression would be an empty
+ # tuple.
+ if not explicit_parentheses:
+ self.fail(
+ "Expected an expression, got '%s'"
+ % describe_token(self.stream.current)
+ )
+
+ return nodes.Tuple(args, "load", lineno=lineno)
+
+ def parse_list(self):
+ token = self.stream.expect("lbracket")
+ items = []
+ while self.stream.current.type != "rbracket":
+ if items:
+ self.stream.expect("comma")
+ if self.stream.current.type == "rbracket":
+ break
+ items.append(self.parse_expression())
+ self.stream.expect("rbracket")
+ return nodes.List(items, lineno=token.lineno)
+
+ def parse_dict(self):
+ token = self.stream.expect("lbrace")
+ items = []
+ while self.stream.current.type != "rbrace":
+ if items:
+ self.stream.expect("comma")
+ if self.stream.current.type == "rbrace":
+ break
+ key = self.parse_expression()
+ self.stream.expect("colon")
+ value = self.parse_expression()
+ items.append(nodes.Pair(key, value, lineno=key.lineno))
+ self.stream.expect("rbrace")
+ return nodes.Dict(items, lineno=token.lineno)
+
+ def parse_postfix(self, node):
+ while 1:
+ token_type = self.stream.current.type
+ if token_type == "dot" or token_type == "lbracket":
+ node = self.parse_subscript(node)
+ # calls are valid both after postfix expressions (getattr
+ # and getitem) as well as filters and tests
+ elif token_type == "lparen":
+ node = self.parse_call(node)
+ else:
+ break
+ return node
+
+ def parse_filter_expr(self, node):
+ while 1:
+ token_type = self.stream.current.type
+ if token_type == "pipe":
+ node = self.parse_filter(node)
+ elif token_type == "name" and self.stream.current.value == "is":
+ node = self.parse_test(node)
+ # calls are valid both after postfix expressions (getattr
+ # and getitem) as well as filters and tests
+ elif token_type == "lparen":
+ node = self.parse_call(node)
+ else:
+ break
+ return node
+
+ def parse_subscript(self, node):
+ token = next(self.stream)
+ if token.type == "dot":
+ attr_token = self.stream.current
+ next(self.stream)
+ if attr_token.type == "name":
+ return nodes.Getattr(
+ node, attr_token.value, "load", lineno=token.lineno
+ )
+ elif attr_token.type != "integer":
+ self.fail("expected name or number", attr_token.lineno)
+ arg = nodes.Const(attr_token.value, lineno=attr_token.lineno)
+ return nodes.Getitem(node, arg, "load", lineno=token.lineno)
+ if token.type == "lbracket":
+ args = []
+ while self.stream.current.type != "rbracket":
+ if args:
+ self.stream.expect("comma")
+ args.append(self.parse_subscribed())
+ self.stream.expect("rbracket")
+ if len(args) == 1:
+ arg = args[0]
+ else:
+ arg = nodes.Tuple(args, "load", lineno=token.lineno)
+ return nodes.Getitem(node, arg, "load", lineno=token.lineno)
+ self.fail("expected subscript expression", token.lineno)
+
+ def parse_subscribed(self):
+ lineno = self.stream.current.lineno
+
+ if self.stream.current.type == "colon":
+ next(self.stream)
+ args = [None]
+ else:
+ node = self.parse_expression()
+ if self.stream.current.type != "colon":
+ return node
+ next(self.stream)
+ args = [node]
+
+ if self.stream.current.type == "colon":
+ args.append(None)
+ elif self.stream.current.type not in ("rbracket", "comma"):
+ args.append(self.parse_expression())
+ else:
+ args.append(None)
+
+ if self.stream.current.type == "colon":
+ next(self.stream)
+ if self.stream.current.type not in ("rbracket", "comma"):
+ args.append(self.parse_expression())
+ else:
+ args.append(None)
+ else:
+ args.append(None)
+
+ return nodes.Slice(lineno=lineno, *args)
+
+ def parse_call(self, node):
+ token = self.stream.expect("lparen")
+ args = []
+ kwargs = []
+ dyn_args = dyn_kwargs = None
+ require_comma = False
+
+ def ensure(expr):
+ if not expr:
+ self.fail("invalid syntax for function call expression", token.lineno)
+
+ while self.stream.current.type != "rparen":
+ if require_comma:
+ self.stream.expect("comma")
+ # support for trailing comma
+ if self.stream.current.type == "rparen":
+ break
+ if self.stream.current.type == "mul":
+ ensure(dyn_args is None and dyn_kwargs is None)
+ next(self.stream)
+ dyn_args = self.parse_expression()
+ elif self.stream.current.type == "pow":
+ ensure(dyn_kwargs is None)
+ next(self.stream)
+ dyn_kwargs = self.parse_expression()
+ else:
+ if (
+ self.stream.current.type == "name"
+ and self.stream.look().type == "assign"
+ ):
+ # Parsing a kwarg
+ ensure(dyn_kwargs is None)
+ key = self.stream.current.value
+ self.stream.skip(2)
+ value = self.parse_expression()
+ kwargs.append(nodes.Keyword(key, value, lineno=value.lineno))
+ else:
+ # Parsing an arg
+ ensure(dyn_args is None and dyn_kwargs is None and not kwargs)
+ args.append(self.parse_expression())
+
+ require_comma = True
+ self.stream.expect("rparen")
+
+ if node is None:
+ return args, kwargs, dyn_args, dyn_kwargs
+ return nodes.Call(node, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno)
+
+ def parse_filter(self, node, start_inline=False):
+ while self.stream.current.type == "pipe" or start_inline:
+ if not start_inline:
+ next(self.stream)
+ token = self.stream.expect("name")
+ name = token.value
+ while self.stream.current.type == "dot":
+ next(self.stream)
+ name += "." + self.stream.expect("name").value
+ if self.stream.current.type == "lparen":
+ args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
+ else:
+ args = []
+ kwargs = []
+ dyn_args = dyn_kwargs = None
+ node = nodes.Filter(
+ node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno
+ )
+ start_inline = False
+ return node
+
+ def parse_test(self, node):
+ token = next(self.stream)
+ if self.stream.current.test("name:not"):
+ next(self.stream)
+ negated = True
+ else:
+ negated = False
+ name = self.stream.expect("name").value
+ while self.stream.current.type == "dot":
+ next(self.stream)
+ name += "." + self.stream.expect("name").value
+ dyn_args = dyn_kwargs = None
+ kwargs = []
+ if self.stream.current.type == "lparen":
+ args, kwargs, dyn_args, dyn_kwargs = self.parse_call(None)
+ elif self.stream.current.type in (
+ "name",
+ "string",
+ "integer",
+ "float",
+ "lparen",
+ "lbracket",
+ "lbrace",
+ ) and not self.stream.current.test_any("name:else", "name:or", "name:and"):
+ if self.stream.current.test("name:is"):
+ self.fail("You cannot chain multiple tests with is")
+ arg_node = self.parse_primary()
+ arg_node = self.parse_postfix(arg_node)
+ args = [arg_node]
+ else:
+ args = []
+ node = nodes.Test(
+ node, name, args, kwargs, dyn_args, dyn_kwargs, lineno=token.lineno
+ )
+ if negated:
+ node = nodes.Not(node, lineno=token.lineno)
+ return node
+
+ def subparse(self, end_tokens=None):
+ body = []
+ data_buffer = []
+ add_data = data_buffer.append
+
+ if end_tokens is not None:
+ self._end_token_stack.append(end_tokens)
+
+ def flush_data():
+ if data_buffer:
+ lineno = data_buffer[0].lineno
+ body.append(nodes.Output(data_buffer[:], lineno=lineno))
+ del data_buffer[:]
+
+ try:
+ while self.stream:
+ token = self.stream.current
+ if token.type == "data":
+ if token.value:
+ add_data(nodes.TemplateData(token.value, lineno=token.lineno))
+ next(self.stream)
+ elif token.type == "variable_begin":
+ next(self.stream)
+ add_data(self.parse_tuple(with_condexpr=True))
+ self.stream.expect("variable_end")
+ elif token.type == "block_begin":
+ flush_data()
+ next(self.stream)
+ if end_tokens is not None and self.stream.current.test_any(
+ *end_tokens
+ ):
+ return body
+ rv = self.parse_statement()
+ if isinstance(rv, list):
+ body.extend(rv)
+ else:
+ body.append(rv)
+ self.stream.expect("block_end")
+ else:
+ raise AssertionError("internal parsing error")
+
+ flush_data()
+ finally:
+ if end_tokens is not None:
+ self._end_token_stack.pop()
+
+ return body
+
+ def parse(self):
+ """Parse the whole template into a `Template` node."""
+ result = nodes.Template(self.subparse(), lineno=1)
+ result.set_environment(self.environment)
+ return result
diff --git a/third_party/python/Jinja2/jinja2/runtime.py b/third_party/python/Jinja2/jinja2/runtime.py
new file mode 100644
index 0000000000..3ad7968624
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/runtime.py
@@ -0,0 +1,1011 @@
+# -*- coding: utf-8 -*-
+"""The runtime functions and state used by compiled templates."""
+import sys
+from itertools import chain
+from types import MethodType
+
+from markupsafe import escape # noqa: F401
+from markupsafe import Markup
+from markupsafe import soft_unicode
+
+from ._compat import abc
+from ._compat import imap
+from ._compat import implements_iterator
+from ._compat import implements_to_string
+from ._compat import iteritems
+from ._compat import PY2
+from ._compat import string_types
+from ._compat import text_type
+from ._compat import with_metaclass
+from .exceptions import TemplateNotFound # noqa: F401
+from .exceptions import TemplateRuntimeError # noqa: F401
+from .exceptions import UndefinedError
+from .nodes import EvalContext
+from .utils import concat
+from .utils import evalcontextfunction
+from .utils import internalcode
+from .utils import missing
+from .utils import Namespace # noqa: F401
+from .utils import object_type_repr
+
+# these variables are exported to the template runtime
+exported = [
+ "LoopContext",
+ "TemplateReference",
+ "Macro",
+ "Markup",
+ "TemplateRuntimeError",
+ "missing",
+ "concat",
+ "escape",
+ "markup_join",
+ "unicode_join",
+ "to_string",
+ "identity",
+ "TemplateNotFound",
+ "Namespace",
+ "Undefined",
+]
+
+#: the name of the function that is used to convert something into
+#: a string. We can just use the text type here.
+to_string = text_type
+
+
+def identity(x):
+ """Returns its argument. Useful for certain things in the
+ environment.
+ """
+ return x
+
+
+def markup_join(seq):
+ """Concatenation that escapes if necessary and converts to unicode."""
+ buf = []
+ iterator = imap(soft_unicode, seq)
+ for arg in iterator:
+ buf.append(arg)
+ if hasattr(arg, "__html__"):
+ return Markup(u"").join(chain(buf, iterator))
+ return concat(buf)
+
+
+def unicode_join(seq):
+ """Simple args to unicode conversion and concatenation."""
+ return concat(imap(text_type, seq))
+
+
+def new_context(
+ environment,
+ template_name,
+ blocks,
+ vars=None,
+ shared=None,
+ globals=None,
+ locals=None,
+):
+ """Internal helper for context creation."""
+ if vars is None:
+ vars = {}
+ if shared:
+ parent = vars
+ else:
+ parent = dict(globals or (), **vars)
+ if locals:
+ # if the parent is shared a copy should be created because
+ # we don't want to modify the dict passed
+ if shared:
+ parent = dict(parent)
+ for key, value in iteritems(locals):
+ if value is not missing:
+ parent[key] = value
+ return environment.context_class(environment, parent, template_name, blocks)
+
+
+class TemplateReference(object):
+ """The `self` in templates."""
+
+ def __init__(self, context):
+ self.__context = context
+
+ def __getitem__(self, name):
+ blocks = self.__context.blocks[name]
+ return BlockReference(name, self.__context, blocks, 0)
+
+ def __repr__(self):
+ return "<%s %r>" % (self.__class__.__name__, self.__context.name)
+
+
+def _get_func(x):
+ return getattr(x, "__func__", x)
+
+
+class ContextMeta(type):
+ def __new__(mcs, name, bases, d):
+ rv = type.__new__(mcs, name, bases, d)
+ if bases == ():
+ return rv
+
+ resolve = _get_func(rv.resolve)
+ default_resolve = _get_func(Context.resolve)
+ resolve_or_missing = _get_func(rv.resolve_or_missing)
+ default_resolve_or_missing = _get_func(Context.resolve_or_missing)
+
+ # If we have a changed resolve but no changed default or missing
+ # resolve we invert the call logic.
+ if (
+ resolve is not default_resolve
+ and resolve_or_missing is default_resolve_or_missing
+ ):
+ rv._legacy_resolve_mode = True
+ elif (
+ resolve is default_resolve
+ and resolve_or_missing is default_resolve_or_missing
+ ):
+ rv._fast_resolve_mode = True
+
+ return rv
+
+
+def resolve_or_missing(context, key, missing=missing):
+ if key in context.vars:
+ return context.vars[key]
+ if key in context.parent:
+ return context.parent[key]
+ return missing
+
+
+class Context(with_metaclass(ContextMeta)):
+ """The template context holds the variables of a template. It stores the
+ values passed to the template and also the names the template exports.
+ Creating instances is neither supported nor useful as it's created
+ automatically at various stages of the template evaluation and should not
+ be created by hand.
+
+ The context is immutable. Modifications on :attr:`parent` **must not**
+ happen and modifications on :attr:`vars` are allowed from generated
+ template code only. Template filters and global functions marked as
+ :func:`contextfunction`\\s get the active context passed as first argument
+ and are allowed to access the context read-only.
+
+ The template context supports read only dict operations (`get`,
+ `keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`,
+ `__getitem__`, `__contains__`). Additionally there is a :meth:`resolve`
+ method that doesn't fail with a `KeyError` but returns an
+ :class:`Undefined` object for missing variables.
+ """
+
+ # XXX: we want to eventually make this be a deprecation warning and
+ # remove it.
+ _legacy_resolve_mode = False
+ _fast_resolve_mode = False
+
+ def __init__(self, environment, parent, name, blocks):
+ self.parent = parent
+ self.vars = {}
+ self.environment = environment
+ self.eval_ctx = EvalContext(self.environment, name)
+ self.exported_vars = set()
+ self.name = name
+
+ # create the initial mapping of blocks. Whenever template inheritance
+ # takes place the runtime will update this mapping with the new blocks
+ # from the template.
+ self.blocks = dict((k, [v]) for k, v in iteritems(blocks))
+
+ # In case we detect the fast resolve mode we can set up an alias
+ # here that bypasses the legacy code logic.
+ if self._fast_resolve_mode:
+ self.resolve_or_missing = MethodType(resolve_or_missing, self)
+
+ def super(self, name, current):
+ """Render a parent block."""
+ try:
+ blocks = self.blocks[name]
+ index = blocks.index(current) + 1
+ blocks[index]
+ except LookupError:
+ return self.environment.undefined(
+ "there is no parent block called %r." % name, name="super"
+ )
+ return BlockReference(name, self, blocks, index)
+
+ def get(self, key, default=None):
+ """Returns an item from the template context, if it doesn't exist
+ `default` is returned.
+ """
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def resolve(self, key):
+ """Looks up a variable like `__getitem__` or `get` but returns an
+ :class:`Undefined` object with the name of the name looked up.
+ """
+ if self._legacy_resolve_mode:
+ rv = resolve_or_missing(self, key)
+ else:
+ rv = self.resolve_or_missing(key)
+ if rv is missing:
+ return self.environment.undefined(name=key)
+ return rv
+
+ def resolve_or_missing(self, key):
+ """Resolves a variable like :meth:`resolve` but returns the
+ special `missing` value if it cannot be found.
+ """
+ if self._legacy_resolve_mode:
+ rv = self.resolve(key)
+ if isinstance(rv, Undefined):
+ rv = missing
+ return rv
+ return resolve_or_missing(self, key)
+
+ def get_exported(self):
+ """Get a new dict with the exported variables."""
+ return dict((k, self.vars[k]) for k in self.exported_vars)
+
+ def get_all(self):
+ """Return the complete context as dict including the exported
+ variables. For optimizations reasons this might not return an
+ actual copy so be careful with using it.
+ """
+ if not self.vars:
+ return self.parent
+ if not self.parent:
+ return self.vars
+ return dict(self.parent, **self.vars)
+
+ @internalcode
+ def call(__self, __obj, *args, **kwargs): # noqa: B902
+ """Call the callable with the arguments and keyword arguments
+ provided but inject the active context or environment as first
+ argument if the callable is a :func:`contextfunction` or
+ :func:`environmentfunction`.
+ """
+ if __debug__:
+ __traceback_hide__ = True # noqa
+
+ # Allow callable classes to take a context
+ if hasattr(__obj, "__call__"): # noqa: B004
+ fn = __obj.__call__
+ for fn_type in (
+ "contextfunction",
+ "evalcontextfunction",
+ "environmentfunction",
+ ):
+ if hasattr(fn, fn_type):
+ __obj = fn
+ break
+
+ if callable(__obj):
+ if getattr(__obj, "contextfunction", False) is True:
+ args = (__self,) + args
+ elif getattr(__obj, "evalcontextfunction", False) is True:
+ args = (__self.eval_ctx,) + args
+ elif getattr(__obj, "environmentfunction", False) is True:
+ args = (__self.environment,) + args
+ try:
+ return __obj(*args, **kwargs)
+ except StopIteration:
+ return __self.environment.undefined(
+ "value was undefined because "
+ "a callable raised a "
+ "StopIteration exception"
+ )
+
+ def derived(self, locals=None):
+ """Internal helper function to create a derived context. This is
+ used in situations where the system needs a new context in the same
+ template that is independent.
+ """
+ context = new_context(
+ self.environment, self.name, {}, self.get_all(), True, None, locals
+ )
+ context.eval_ctx = self.eval_ctx
+ context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
+ return context
+
+ def _all(meth): # noqa: B902
+ def proxy(self):
+ return getattr(self.get_all(), meth)()
+
+ proxy.__doc__ = getattr(dict, meth).__doc__
+ proxy.__name__ = meth
+ return proxy
+
+ keys = _all("keys")
+ values = _all("values")
+ items = _all("items")
+
+ # not available on python 3
+ if PY2:
+ iterkeys = _all("iterkeys")
+ itervalues = _all("itervalues")
+ iteritems = _all("iteritems")
+ del _all
+
+ def __contains__(self, name):
+ return name in self.vars or name in self.parent
+
+ def __getitem__(self, key):
+ """Lookup a variable or raise `KeyError` if the variable is
+ undefined.
+ """
+ item = self.resolve_or_missing(key)
+ if item is missing:
+ raise KeyError(key)
+ return item
+
+ def __repr__(self):
+ return "<%s %s of %r>" % (
+ self.__class__.__name__,
+ repr(self.get_all()),
+ self.name,
+ )
+
+
+abc.Mapping.register(Context)
+
+
+class BlockReference(object):
+ """One block on a template reference."""
+
+ def __init__(self, name, context, stack, depth):
+ self.name = name
+ self._context = context
+ self._stack = stack
+ self._depth = depth
+
+ @property
+ def super(self):
+ """Super the block."""
+ if self._depth + 1 >= len(self._stack):
+ return self._context.environment.undefined(
+ "there is no parent block called %r." % self.name, name="super"
+ )
+ return BlockReference(self.name, self._context, self._stack, self._depth + 1)
+
+ @internalcode
+ def __call__(self):
+ rv = concat(self._stack[self._depth](self._context))
+ if self._context.eval_ctx.autoescape:
+ rv = Markup(rv)
+ return rv
+
+
+@implements_iterator
+class LoopContext:
+ """A wrapper iterable for dynamic ``for`` loops, with information
+ about the loop and iteration.
+ """
+
+ #: Current iteration of the loop, starting at 0.
+ index0 = -1
+
+ _length = None
+ _after = missing
+ _current = missing
+ _before = missing
+ _last_changed_value = missing
+
+ def __init__(self, iterable, undefined, recurse=None, depth0=0):
+ """
+ :param iterable: Iterable to wrap.
+ :param undefined: :class:`Undefined` class to use for next and
+ previous items.
+ :param recurse: The function to render the loop body when the
+ loop is marked recursive.
+ :param depth0: Incremented when looping recursively.
+ """
+ self._iterable = iterable
+ self._iterator = self._to_iterator(iterable)
+ self._undefined = undefined
+ self._recurse = recurse
+ #: How many levels deep a recursive loop currently is, starting at 0.
+ self.depth0 = depth0
+
+ @staticmethod
+ def _to_iterator(iterable):
+ return iter(iterable)
+
+ @property
+ def length(self):
+ """Length of the iterable.
+
+ If the iterable is a generator or otherwise does not have a
+ size, it is eagerly evaluated to get a size.
+ """
+ if self._length is not None:
+ return self._length
+
+ try:
+ self._length = len(self._iterable)
+ except TypeError:
+ iterable = list(self._iterator)
+ self._iterator = self._to_iterator(iterable)
+ self._length = len(iterable) + self.index + (self._after is not missing)
+
+ return self._length
+
+ def __len__(self):
+ return self.length
+
+ @property
+ def depth(self):
+ """How many levels deep a recursive loop currently is, starting at 1."""
+ return self.depth0 + 1
+
+ @property
+ def index(self):
+ """Current iteration of the loop, starting at 1."""
+ return self.index0 + 1
+
+ @property
+ def revindex0(self):
+ """Number of iterations from the end of the loop, ending at 0.
+
+ Requires calculating :attr:`length`.
+ """
+ return self.length - self.index
+
+ @property
+ def revindex(self):
+ """Number of iterations from the end of the loop, ending at 1.
+
+ Requires calculating :attr:`length`.
+ """
+ return self.length - self.index0
+
+ @property
+ def first(self):
+ """Whether this is the first iteration of the loop."""
+ return self.index0 == 0
+
+ def _peek_next(self):
+ """Return the next element in the iterable, or :data:`missing`
+ if the iterable is exhausted. Only peeks one item ahead, caching
+ the result in :attr:`_last` for use in subsequent checks. The
+ cache is reset when :meth:`__next__` is called.
+ """
+ if self._after is not missing:
+ return self._after
+
+ self._after = next(self._iterator, missing)
+ return self._after
+
+ @property
+ def last(self):
+ """Whether this is the last iteration of the loop.
+
+ Causes the iterable to advance early. See
+ :func:`itertools.groupby` for issues this can cause.
+ The :func:`groupby` filter avoids that issue.
+ """
+ return self._peek_next() is missing
+
+ @property
+ def previtem(self):
+ """The item in the previous iteration. Undefined during the
+ first iteration.
+ """
+ if self.first:
+ return self._undefined("there is no previous item")
+
+ return self._before
+
+ @property
+ def nextitem(self):
+ """The item in the next iteration. Undefined during the last
+ iteration.
+
+ Causes the iterable to advance early. See
+ :func:`itertools.groupby` for issues this can cause.
+ The :func:`groupby` filter avoids that issue.
+ """
+ rv = self._peek_next()
+
+ if rv is missing:
+ return self._undefined("there is no next item")
+
+ return rv
+
+ def cycle(self, *args):
+ """Return a value from the given args, cycling through based on
+ the current :attr:`index0`.
+
+ :param args: One or more values to cycle through.
+ """
+ if not args:
+ raise TypeError("no items for cycling given")
+
+ return args[self.index0 % len(args)]
+
+ def changed(self, *value):
+ """Return ``True`` if previously called with a different value
+ (including when called for the first time).
+
+ :param value: One or more values to compare to the last call.
+ """
+ if self._last_changed_value != value:
+ self._last_changed_value = value
+ return True
+
+ return False
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self._after is not missing:
+ rv = self._after
+ self._after = missing
+ else:
+ rv = next(self._iterator)
+
+ self.index0 += 1
+ self._before = self._current
+ self._current = rv
+ return rv, self
+
+ @internalcode
+ def __call__(self, iterable):
+ """When iterating over nested data, render the body of the loop
+ recursively with the given inner iterable data.
+
+ The loop must have the ``recursive`` marker for this to work.
+ """
+ if self._recurse is None:
+ raise TypeError(
+ "The loop must have the 'recursive' marker to be called recursively."
+ )
+
+ return self._recurse(iterable, self._recurse, depth=self.depth)
+
+ def __repr__(self):
+ return "<%s %d/%d>" % (self.__class__.__name__, self.index, self.length)
+
+
+class Macro(object):
+ """Wraps a macro function."""
+
+ def __init__(
+ self,
+ environment,
+ func,
+ name,
+ arguments,
+ catch_kwargs,
+ catch_varargs,
+ caller,
+ default_autoescape=None,
+ ):
+ self._environment = environment
+ self._func = func
+ self._argument_count = len(arguments)
+ self.name = name
+ self.arguments = arguments
+ self.catch_kwargs = catch_kwargs
+ self.catch_varargs = catch_varargs
+ self.caller = caller
+ self.explicit_caller = "caller" in arguments
+ if default_autoescape is None:
+ default_autoescape = environment.autoescape
+ self._default_autoescape = default_autoescape
+
+ @internalcode
+ @evalcontextfunction
+ def __call__(self, *args, **kwargs):
+ # This requires a bit of explanation, In the past we used to
+ # decide largely based on compile-time information if a macro is
+ # safe or unsafe. While there was a volatile mode it was largely
+ # unused for deciding on escaping. This turns out to be
+ # problematic for macros because whether a macro is safe depends not
+ # on the escape mode when it was defined, but rather when it was used.
+ #
+ # Because however we export macros from the module system and
+ # there are historic callers that do not pass an eval context (and
+ # will continue to not pass one), we need to perform an instance
+ # check here.
+ #
+ # This is considered safe because an eval context is not a valid
+ # argument to callables otherwise anyway. Worst case here is
+ # that if no eval context is passed we fall back to the compile
+ # time autoescape flag.
+ if args and isinstance(args[0], EvalContext):
+ autoescape = args[0].autoescape
+ args = args[1:]
+ else:
+ autoescape = self._default_autoescape
+
+ # try to consume the positional arguments
+ arguments = list(args[: self._argument_count])
+ off = len(arguments)
+
+ # For information why this is necessary refer to the handling
+ # of caller in the `macro_body` handler in the compiler.
+ found_caller = False
+
+ # if the number of arguments consumed is not the number of
+ # arguments expected we start filling in keyword arguments
+ # and defaults.
+ if off != self._argument_count:
+ for name in self.arguments[len(arguments) :]:
+ try:
+ value = kwargs.pop(name)
+ except KeyError:
+ value = missing
+ if name == "caller":
+ found_caller = True
+ arguments.append(value)
+ else:
+ found_caller = self.explicit_caller
+
+ # it's important that the order of these arguments does not change
+ # if not also changed in the compiler's `function_scoping` method.
+ # the order is caller, keyword arguments, positional arguments!
+ if self.caller and not found_caller:
+ caller = kwargs.pop("caller", None)
+ if caller is None:
+ caller = self._environment.undefined("No caller defined", name="caller")
+ arguments.append(caller)
+
+ if self.catch_kwargs:
+ arguments.append(kwargs)
+ elif kwargs:
+ if "caller" in kwargs:
+ raise TypeError(
+ "macro %r was invoked with two values for "
+ "the special caller argument. This is "
+ "most likely a bug." % self.name
+ )
+ raise TypeError(
+ "macro %r takes no keyword argument %r"
+ % (self.name, next(iter(kwargs)))
+ )
+ if self.catch_varargs:
+ arguments.append(args[self._argument_count :])
+ elif len(args) > self._argument_count:
+ raise TypeError(
+ "macro %r takes not more than %d argument(s)"
+ % (self.name, len(self.arguments))
+ )
+
+ return self._invoke(arguments, autoescape)
+
+ def _invoke(self, arguments, autoescape):
+ """This method is being swapped out by the async implementation."""
+ rv = self._func(*arguments)
+ if autoescape:
+ rv = Markup(rv)
+ return rv
+
+ def __repr__(self):
+ return "<%s %s>" % (
+ self.__class__.__name__,
+ self.name is None and "anonymous" or repr(self.name),
+ )
+
+
+@implements_to_string
+class Undefined(object):
+ """The default undefined type. This undefined type can be printed and
+ iterated over, but every other access will raise an :exc:`UndefinedError`:
+
+ >>> foo = Undefined(name='foo')
+ >>> str(foo)
+ ''
+ >>> not foo
+ True
+ >>> foo + 42
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+ """
+
+ __slots__ = (
+ "_undefined_hint",
+ "_undefined_obj",
+ "_undefined_name",
+ "_undefined_exception",
+ )
+
+ def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
+ self._undefined_hint = hint
+ self._undefined_obj = obj
+ self._undefined_name = name
+ self._undefined_exception = exc
+
+ @property
+ def _undefined_message(self):
+ """Build a message about the undefined value based on how it was
+ accessed.
+ """
+ if self._undefined_hint:
+ return self._undefined_hint
+
+ if self._undefined_obj is missing:
+ return "%r is undefined" % self._undefined_name
+
+ if not isinstance(self._undefined_name, string_types):
+ return "%s has no element %r" % (
+ object_type_repr(self._undefined_obj),
+ self._undefined_name,
+ )
+
+ return "%r has no attribute %r" % (
+ object_type_repr(self._undefined_obj),
+ self._undefined_name,
+ )
+
+ @internalcode
+ def _fail_with_undefined_error(self, *args, **kwargs):
+ """Raise an :exc:`UndefinedError` when operations are performed
+ on the undefined value.
+ """
+ raise self._undefined_exception(self._undefined_message)
+
+ @internalcode
+ def __getattr__(self, name):
+ if name[:2] == "__":
+ raise AttributeError(name)
+ return self._fail_with_undefined_error()
+
+ __add__ = (
+ __radd__
+ ) = (
+ __mul__
+ ) = (
+ __rmul__
+ ) = (
+ __div__
+ ) = (
+ __rdiv__
+ ) = (
+ __truediv__
+ ) = (
+ __rtruediv__
+ ) = (
+ __floordiv__
+ ) = (
+ __rfloordiv__
+ ) = (
+ __mod__
+ ) = (
+ __rmod__
+ ) = (
+ __pos__
+ ) = (
+ __neg__
+ ) = (
+ __call__
+ ) = (
+ __getitem__
+ ) = (
+ __lt__
+ ) = (
+ __le__
+ ) = (
+ __gt__
+ ) = (
+ __ge__
+ ) = (
+ __int__
+ ) = (
+ __float__
+ ) = (
+ __complex__
+ ) = __pow__ = __rpow__ = __sub__ = __rsub__ = _fail_with_undefined_error
+
+ def __eq__(self, other):
+ return type(self) is type(other)
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ return id(type(self))
+
+ def __str__(self):
+ return u""
+
+ def __len__(self):
+ return 0
+
+ def __iter__(self):
+ if 0:
+ yield None
+
+ def __nonzero__(self):
+ return False
+
+ __bool__ = __nonzero__
+
+ def __repr__(self):
+ return "Undefined"
+
+
+def make_logging_undefined(logger=None, base=None):
+ """Given a logger object this returns a new undefined class that will
+ log certain failures. It will log iterations and printing. If no
+ logger is given a default logger is created.
+
+ Example::
+
+ logger = logging.getLogger(__name__)
+ LoggingUndefined = make_logging_undefined(
+ logger=logger,
+ base=Undefined
+ )
+
+ .. versionadded:: 2.8
+
+ :param logger: the logger to use. If not provided, a default logger
+ is created.
+ :param base: the base class to add logging functionality to. This
+ defaults to :class:`Undefined`.
+ """
+ if logger is None:
+ import logging
+
+ logger = logging.getLogger(__name__)
+ logger.addHandler(logging.StreamHandler(sys.stderr))
+ if base is None:
+ base = Undefined
+
+ def _log_message(undef):
+ if undef._undefined_hint is None:
+ if undef._undefined_obj is missing:
+ hint = "%s is undefined" % undef._undefined_name
+ elif not isinstance(undef._undefined_name, string_types):
+ hint = "%s has no element %s" % (
+ object_type_repr(undef._undefined_obj),
+ undef._undefined_name,
+ )
+ else:
+ hint = "%s has no attribute %s" % (
+ object_type_repr(undef._undefined_obj),
+ undef._undefined_name,
+ )
+ else:
+ hint = undef._undefined_hint
+ logger.warning("Template variable warning: %s", hint)
+
+ class LoggingUndefined(base):
+ def _fail_with_undefined_error(self, *args, **kwargs):
+ try:
+ return base._fail_with_undefined_error(self, *args, **kwargs)
+ except self._undefined_exception as e:
+ logger.error("Template variable error: %s", str(e))
+ raise e
+
+ def __str__(self):
+ rv = base.__str__(self)
+ _log_message(self)
+ return rv
+
+ def __iter__(self):
+ rv = base.__iter__(self)
+ _log_message(self)
+ return rv
+
+ if PY2:
+
+ def __nonzero__(self):
+ rv = base.__nonzero__(self)
+ _log_message(self)
+ return rv
+
+ def __unicode__(self):
+ rv = base.__unicode__(self)
+ _log_message(self)
+ return rv
+
+ else:
+
+ def __bool__(self):
+ rv = base.__bool__(self)
+ _log_message(self)
+ return rv
+
+ return LoggingUndefined
+
+
+# No @implements_to_string decorator here because __str__
+# is not overwritten from Undefined in this class.
+# This would cause a recursion error in Python 2.
+class ChainableUndefined(Undefined):
+ """An undefined that is chainable, where both ``__getattr__`` and
+ ``__getitem__`` return itself rather than raising an
+ :exc:`UndefinedError`.
+
+ >>> foo = ChainableUndefined(name='foo')
+ >>> str(foo.bar['baz'])
+ ''
+ >>> foo.bar['baz'] + 42
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+
+ .. versionadded:: 2.11.0
+ """
+
+ __slots__ = ()
+
+ def __html__(self):
+ return self.__str__()
+
+ def __getattr__(self, _):
+ return self
+
+ __getitem__ = __getattr__
+
+
+@implements_to_string
+class DebugUndefined(Undefined):
+ """An undefined that returns the debug info when printed.
+
+ >>> foo = DebugUndefined(name='foo')
+ >>> str(foo)
+ '{{ foo }}'
+ >>> not foo
+ True
+ >>> foo + 42
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+ """
+
+ __slots__ = ()
+
+ def __str__(self):
+ if self._undefined_hint is None:
+ if self._undefined_obj is missing:
+ return u"{{ %s }}" % self._undefined_name
+ return "{{ no such element: %s[%r] }}" % (
+ object_type_repr(self._undefined_obj),
+ self._undefined_name,
+ )
+ return u"{{ undefined value printed: %s }}" % self._undefined_hint
+
+
+@implements_to_string
+class StrictUndefined(Undefined):
+ """An undefined that barks on print and iteration as well as boolean
+ tests and all kinds of comparisons. In other words: you can do nothing
+ with it except checking if it's defined using the `defined` test.
+
+ >>> foo = StrictUndefined(name='foo')
+ >>> str(foo)
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+ >>> not foo
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+ >>> foo + 42
+ Traceback (most recent call last):
+ ...
+ jinja2.exceptions.UndefinedError: 'foo' is undefined
+ """
+
+ __slots__ = ()
+ __iter__ = (
+ __str__
+ ) = (
+ __len__
+ ) = (
+ __nonzero__
+ ) = __eq__ = __ne__ = __bool__ = __hash__ = Undefined._fail_with_undefined_error
+
+
+# remove remaining slots attributes, after the metaclass did the magic they
+# are unneeded and irritating as they contain wrong data for the subclasses.
+del (
+ Undefined.__slots__,
+ ChainableUndefined.__slots__,
+ DebugUndefined.__slots__,
+ StrictUndefined.__slots__,
+)
diff --git a/third_party/python/Jinja2/jinja2/sandbox.py b/third_party/python/Jinja2/jinja2/sandbox.py
new file mode 100644
index 0000000000..cfd7993aee
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/sandbox.py
@@ -0,0 +1,510 @@
+# -*- coding: utf-8 -*-
+"""A sandbox layer that ensures unsafe operations cannot be performed.
+Useful when the template itself comes from an untrusted source.
+"""
+import operator
+import types
+import warnings
+from collections import deque
+from string import Formatter
+
+from markupsafe import EscapeFormatter
+from markupsafe import Markup
+
+from ._compat import abc
+from ._compat import PY2
+from ._compat import range_type
+from ._compat import string_types
+from .environment import Environment
+from .exceptions import SecurityError
+
+#: maximum number of items a range may produce
+MAX_RANGE = 100000
+
+#: attributes of function objects that are considered unsafe.
+if PY2:
+ UNSAFE_FUNCTION_ATTRIBUTES = {
+ "func_closure",
+ "func_code",
+ "func_dict",
+ "func_defaults",
+ "func_globals",
+ }
+else:
+ # On versions > python 2 the special attributes on functions are gone,
+ # but they remain on methods and generators for whatever reason.
+ UNSAFE_FUNCTION_ATTRIBUTES = set()
+
+#: unsafe method attributes. function attributes are unsafe for methods too
+UNSAFE_METHOD_ATTRIBUTES = {"im_class", "im_func", "im_self"}
+
+#: unsafe generator attributes.
+UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"}
+
+#: unsafe attributes on coroutines
+UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"}
+
+#: unsafe attributes on async generators
+UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"}
+
+# make sure we don't warn in python 2.6 about stuff we don't care about
+warnings.filterwarnings(
+ "ignore", "the sets module", DeprecationWarning, module=__name__
+)
+
+_mutable_set_types = (set,)
+_mutable_mapping_types = (dict,)
+_mutable_sequence_types = (list,)
+
+# on python 2.x we can register the user collection types
+try:
+ from UserDict import UserDict, DictMixin
+ from UserList import UserList
+
+ _mutable_mapping_types += (UserDict, DictMixin)
+ _mutable_set_types += (UserList,)
+except ImportError:
+ pass
+
+# if sets is still available, register the mutable set from there as well
+try:
+ from sets import Set
+
+ _mutable_set_types += (Set,)
+except ImportError:
+ pass
+
+#: register Python 2.6 abstract base classes
+_mutable_set_types += (abc.MutableSet,)
+_mutable_mapping_types += (abc.MutableMapping,)
+_mutable_sequence_types += (abc.MutableSequence,)
+
+_mutable_spec = (
+ (
+ _mutable_set_types,
+ frozenset(
+ [
+ "add",
+ "clear",
+ "difference_update",
+ "discard",
+ "pop",
+ "remove",
+ "symmetric_difference_update",
+ "update",
+ ]
+ ),
+ ),
+ (
+ _mutable_mapping_types,
+ frozenset(["clear", "pop", "popitem", "setdefault", "update"]),
+ ),
+ (
+ _mutable_sequence_types,
+ frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]),
+ ),
+ (
+ deque,
+ frozenset(
+ [
+ "append",
+ "appendleft",
+ "clear",
+ "extend",
+ "extendleft",
+ "pop",
+ "popleft",
+ "remove",
+ "rotate",
+ ]
+ ),
+ ),
+)
+
+
+class _MagicFormatMapping(abc.Mapping):
+ """This class implements a dummy wrapper to fix a bug in the Python
+ standard library for string formatting.
+
+ See https://bugs.python.org/issue13598 for information about why
+ this is necessary.
+ """
+
+ def __init__(self, args, kwargs):
+ self._args = args
+ self._kwargs = kwargs
+ self._last_index = 0
+
+ def __getitem__(self, key):
+ if key == "":
+ idx = self._last_index
+ self._last_index += 1
+ try:
+ return self._args[idx]
+ except LookupError:
+ pass
+ key = str(idx)
+ return self._kwargs[key]
+
+ def __iter__(self):
+ return iter(self._kwargs)
+
+ def __len__(self):
+ return len(self._kwargs)
+
+
+def inspect_format_method(callable):
+ if not isinstance(
+ callable, (types.MethodType, types.BuiltinMethodType)
+ ) or callable.__name__ not in ("format", "format_map"):
+ return None
+ obj = callable.__self__
+ if isinstance(obj, string_types):
+ return obj
+
+
+def safe_range(*args):
+ """A range that can't generate ranges with a length of more than
+ MAX_RANGE items.
+ """
+ rng = range_type(*args)
+
+ if len(rng) > MAX_RANGE:
+ raise OverflowError(
+ "Range too big. The sandbox blocks ranges larger than"
+ " MAX_RANGE (%d)." % MAX_RANGE
+ )
+
+ return rng
+
+
+def unsafe(f):
+ """Marks a function or method as unsafe.
+
+ ::
+
+ @unsafe
+ def delete(self):
+ pass
+ """
+ f.unsafe_callable = True
+ return f
+
+
+def is_internal_attribute(obj, attr):
+ """Test if the attribute given is an internal python attribute. For
+ example this function returns `True` for the `func_code` attribute of
+ python objects. This is useful if the environment method
+ :meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
+
+ >>> from jinja2.sandbox import is_internal_attribute
+ >>> is_internal_attribute(str, "mro")
+ True
+ >>> is_internal_attribute(str, "upper")
+ False
+ """
+ if isinstance(obj, types.FunctionType):
+ if attr in UNSAFE_FUNCTION_ATTRIBUTES:
+ return True
+ elif isinstance(obj, types.MethodType):
+ if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES:
+ return True
+ elif isinstance(obj, type):
+ if attr == "mro":
+ return True
+ elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
+ return True
+ elif isinstance(obj, types.GeneratorType):
+ if attr in UNSAFE_GENERATOR_ATTRIBUTES:
+ return True
+ elif hasattr(types, "CoroutineType") and isinstance(obj, types.CoroutineType):
+ if attr in UNSAFE_COROUTINE_ATTRIBUTES:
+ return True
+ elif hasattr(types, "AsyncGeneratorType") and isinstance(
+ obj, types.AsyncGeneratorType
+ ):
+ if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
+ return True
+ return attr.startswith("__")
+
+
+def modifies_known_mutable(obj, attr):
+ """This function checks if an attribute on a builtin mutable object
+ (list, dict, set or deque) would modify it if called. It also supports
+ the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
+ with Python 2.6 onwards the abstract base classes `MutableSet`,
+ `MutableMapping`, and `MutableSequence`.
+
+ >>> modifies_known_mutable({}, "clear")
+ True
+ >>> modifies_known_mutable({}, "keys")
+ False
+ >>> modifies_known_mutable([], "append")
+ True
+ >>> modifies_known_mutable([], "index")
+ False
+
+ If called with an unsupported object (such as unicode) `False` is
+ returned.
+
+ >>> modifies_known_mutable("foo", "upper")
+ False
+ """
+ for typespec, unsafe in _mutable_spec:
+ if isinstance(obj, typespec):
+ return attr in unsafe
+ return False
+
+
+class SandboxedEnvironment(Environment):
+ """The sandboxed environment. It works like the regular environment but
+ tells the compiler to generate sandboxed code. Additionally subclasses of
+ this environment may override the methods that tell the runtime what
+ attributes or functions are safe to access.
+
+ If the template tries to access insecure code a :exc:`SecurityError` is
+ raised. However also other exceptions may occur during the rendering so
+ the caller has to ensure that all exceptions are caught.
+ """
+
+ sandboxed = True
+
+ #: default callback table for the binary operators. A copy of this is
+ #: available on each instance of a sandboxed environment as
+ #: :attr:`binop_table`
+ default_binop_table = {
+ "+": operator.add,
+ "-": operator.sub,
+ "*": operator.mul,
+ "/": operator.truediv,
+ "//": operator.floordiv,
+ "**": operator.pow,
+ "%": operator.mod,
+ }
+
+ #: default callback table for the unary operators. A copy of this is
+ #: available on each instance of a sandboxed environment as
+ #: :attr:`unop_table`
+ default_unop_table = {"+": operator.pos, "-": operator.neg}
+
+ #: a set of binary operators that should be intercepted. Each operator
+ #: that is added to this set (empty by default) is delegated to the
+ #: :meth:`call_binop` method that will perform the operator. The default
+ #: operator callback is specified by :attr:`binop_table`.
+ #:
+ #: The following binary operators are interceptable:
+ #: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
+ #:
+ #: The default operation form the operator table corresponds to the
+ #: builtin function. Intercepted calls are always slower than the native
+ #: operator call, so make sure only to intercept the ones you are
+ #: interested in.
+ #:
+ #: .. versionadded:: 2.6
+ intercepted_binops = frozenset()
+
+ #: a set of unary operators that should be intercepted. Each operator
+ #: that is added to this set (empty by default) is delegated to the
+ #: :meth:`call_unop` method that will perform the operator. The default
+ #: operator callback is specified by :attr:`unop_table`.
+ #:
+ #: The following unary operators are interceptable: ``+``, ``-``
+ #:
+ #: The default operation form the operator table corresponds to the
+ #: builtin function. Intercepted calls are always slower than the native
+ #: operator call, so make sure only to intercept the ones you are
+ #: interested in.
+ #:
+ #: .. versionadded:: 2.6
+ intercepted_unops = frozenset()
+
+ def intercept_unop(self, operator):
+ """Called during template compilation with the name of a unary
+ operator to check if it should be intercepted at runtime. If this
+ method returns `True`, :meth:`call_unop` is executed for this unary
+ operator. The default implementation of :meth:`call_unop` will use
+ the :attr:`unop_table` dictionary to perform the operator with the
+ same logic as the builtin one.
+
+ The following unary operators are interceptable: ``+`` and ``-``
+
+ Intercepted calls are always slower than the native operator call,
+ so make sure only to intercept the ones you are interested in.
+
+ .. versionadded:: 2.6
+ """
+ return False
+
+ def __init__(self, *args, **kwargs):
+ Environment.__init__(self, *args, **kwargs)
+ self.globals["range"] = safe_range
+ self.binop_table = self.default_binop_table.copy()
+ self.unop_table = self.default_unop_table.copy()
+
+ def is_safe_attribute(self, obj, attr, value):
+ """The sandboxed environment will call this method to check if the
+ attribute of an object is safe to access. Per default all attributes
+ starting with an underscore are considered private as well as the
+ special attributes of internal python objects as returned by the
+ :func:`is_internal_attribute` function.
+ """
+ return not (attr.startswith("_") or is_internal_attribute(obj, attr))
+
+ def is_safe_callable(self, obj):
+ """Check if an object is safely callable. Per default a function is
+ considered safe unless the `unsafe_callable` attribute exists and is
+ True. Override this method to alter the behavior, but this won't
+ affect the `unsafe` decorator from this module.
+ """
+ return not (
+ getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
+ )
+
+ def call_binop(self, context, operator, left, right):
+ """For intercepted binary operator calls (:meth:`intercepted_binops`)
+ this function is executed instead of the builtin operator. This can
+ be used to fine tune the behavior of certain operators.
+
+ .. versionadded:: 2.6
+ """
+ return self.binop_table[operator](left, right)
+
+ def call_unop(self, context, operator, arg):
+ """For intercepted unary operator calls (:meth:`intercepted_unops`)
+ this function is executed instead of the builtin operator. This can
+ be used to fine tune the behavior of certain operators.
+
+ .. versionadded:: 2.6
+ """
+ return self.unop_table[operator](arg)
+
+ def getitem(self, obj, argument):
+ """Subscribe an object from sandboxed code."""
+ try:
+ return obj[argument]
+ except (TypeError, LookupError):
+ if isinstance(argument, string_types):
+ try:
+ attr = str(argument)
+ except Exception:
+ pass
+ else:
+ try:
+ value = getattr(obj, attr)
+ except AttributeError:
+ pass
+ else:
+ if self.is_safe_attribute(obj, argument, value):
+ return value
+ return self.unsafe_undefined(obj, argument)
+ return self.undefined(obj=obj, name=argument)
+
+ def getattr(self, obj, attribute):
+ """Subscribe an object from sandboxed code and prefer the
+ attribute. The attribute passed *must* be a bytestring.
+ """
+ try:
+ value = getattr(obj, attribute)
+ except AttributeError:
+ try:
+ return obj[attribute]
+ except (TypeError, LookupError):
+ pass
+ else:
+ if self.is_safe_attribute(obj, attribute, value):
+ return value
+ return self.unsafe_undefined(obj, attribute)
+ return self.undefined(obj=obj, name=attribute)
+
+ def unsafe_undefined(self, obj, attribute):
+ """Return an undefined object for unsafe attributes."""
+ return self.undefined(
+ "access to attribute %r of %r "
+ "object is unsafe." % (attribute, obj.__class__.__name__),
+ name=attribute,
+ obj=obj,
+ exc=SecurityError,
+ )
+
+ def format_string(self, s, args, kwargs, format_func=None):
+ """If a format call is detected, then this is routed through this
+ method so that our safety sandbox can be used for it.
+ """
+ if isinstance(s, Markup):
+ formatter = SandboxedEscapeFormatter(self, s.escape)
+ else:
+ formatter = SandboxedFormatter(self)
+
+ if format_func is not None and format_func.__name__ == "format_map":
+ if len(args) != 1 or kwargs:
+ raise TypeError(
+ "format_map() takes exactly one argument %d given"
+ % (len(args) + (kwargs is not None))
+ )
+
+ kwargs = args[0]
+ args = None
+
+ kwargs = _MagicFormatMapping(args, kwargs)
+ rv = formatter.vformat(s, args, kwargs)
+ return type(s)(rv)
+
+ def call(__self, __context, __obj, *args, **kwargs): # noqa: B902
+ """Call an object from sandboxed code."""
+ fmt = inspect_format_method(__obj)
+ if fmt is not None:
+ return __self.format_string(fmt, args, kwargs, __obj)
+
+ # the double prefixes are to avoid double keyword argument
+ # errors when proxying the call.
+ if not __self.is_safe_callable(__obj):
+ raise SecurityError("%r is not safely callable" % (__obj,))
+ return __context.call(__obj, *args, **kwargs)
+
+
+class ImmutableSandboxedEnvironment(SandboxedEnvironment):
+ """Works exactly like the regular `SandboxedEnvironment` but does not
+ permit modifications on the builtin mutable objects `list`, `set`, and
+ `dict` by using the :func:`modifies_known_mutable` function.
+ """
+
+ def is_safe_attribute(self, obj, attr, value):
+ if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
+ return False
+ return not modifies_known_mutable(obj, attr)
+
+
+# This really is not a public API apparently.
+try:
+ from _string import formatter_field_name_split
+except ImportError:
+
+ def formatter_field_name_split(field_name):
+ return field_name._formatter_field_name_split()
+
+
+class SandboxedFormatterMixin(object):
+ def __init__(self, env):
+ self._env = env
+
+ def get_field(self, field_name, args, kwargs):
+ first, rest = formatter_field_name_split(field_name)
+ obj = self.get_value(first, args, kwargs)
+ for is_attr, i in rest:
+ if is_attr:
+ obj = self._env.getattr(obj, i)
+ else:
+ obj = self._env.getitem(obj, i)
+ return obj, first
+
+
+class SandboxedFormatter(SandboxedFormatterMixin, Formatter):
+ def __init__(self, env):
+ SandboxedFormatterMixin.__init__(self, env)
+ Formatter.__init__(self)
+
+
+class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter):
+ def __init__(self, env, escape):
+ SandboxedFormatterMixin.__init__(self, env)
+ EscapeFormatter.__init__(self, escape)
diff --git a/third_party/python/Jinja2/jinja2/tests.py b/third_party/python/Jinja2/jinja2/tests.py
new file mode 100644
index 0000000000..fabd4ce51b
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/tests.py
@@ -0,0 +1,215 @@
+# -*- coding: utf-8 -*-
+"""Built-in template tests used with the ``is`` operator."""
+import decimal
+import operator
+import re
+
+from ._compat import abc
+from ._compat import integer_types
+from ._compat import string_types
+from ._compat import text_type
+from .runtime import Undefined
+
+number_re = re.compile(r"^-?\d+(\.\d+)?$")
+regex_type = type(number_re)
+test_callable = callable
+
+
+def test_odd(value):
+ """Return true if the variable is odd."""
+ return value % 2 == 1
+
+
+def test_even(value):
+ """Return true if the variable is even."""
+ return value % 2 == 0
+
+
+def test_divisibleby(value, num):
+ """Check if a variable is divisible by a number."""
+ return value % num == 0
+
+
+def test_defined(value):
+ """Return true if the variable is defined:
+
+ .. sourcecode:: jinja
+
+ {% if variable is defined %}
+ value of variable: {{ variable }}
+ {% else %}
+ variable is not defined
+ {% endif %}
+
+ See the :func:`default` filter for a simple way to set undefined
+ variables.
+ """
+ return not isinstance(value, Undefined)
+
+
+def test_undefined(value):
+ """Like :func:`defined` but the other way round."""
+ return isinstance(value, Undefined)
+
+
+def test_none(value):
+ """Return true if the variable is none."""
+ return value is None
+
+
+def test_boolean(value):
+ """Return true if the object is a boolean value.
+
+ .. versionadded:: 2.11
+ """
+ return value is True or value is False
+
+
+def test_false(value):
+ """Return true if the object is False.
+
+ .. versionadded:: 2.11
+ """
+ return value is False
+
+
+def test_true(value):
+ """Return true if the object is True.
+
+ .. versionadded:: 2.11
+ """
+ return value is True
+
+
+# NOTE: The existing 'number' test matches booleans and floats
+def test_integer(value):
+ """Return true if the object is an integer.
+
+ .. versionadded:: 2.11
+ """
+ return isinstance(value, integer_types) and value is not True and value is not False
+
+
+# NOTE: The existing 'number' test matches booleans and integers
+def test_float(value):
+ """Return true if the object is a float.
+
+ .. versionadded:: 2.11
+ """
+ return isinstance(value, float)
+
+
+def test_lower(value):
+ """Return true if the variable is lowercased."""
+ return text_type(value).islower()
+
+
+def test_upper(value):
+ """Return true if the variable is uppercased."""
+ return text_type(value).isupper()
+
+
+def test_string(value):
+ """Return true if the object is a string."""
+ return isinstance(value, string_types)
+
+
+def test_mapping(value):
+ """Return true if the object is a mapping (dict etc.).
+
+ .. versionadded:: 2.6
+ """
+ return isinstance(value, abc.Mapping)
+
+
+def test_number(value):
+ """Return true if the variable is a number."""
+ return isinstance(value, integer_types + (float, complex, decimal.Decimal))
+
+
+def test_sequence(value):
+ """Return true if the variable is a sequence. Sequences are variables
+ that are iterable.
+ """
+ try:
+ len(value)
+ value.__getitem__
+ except Exception:
+ return False
+ return True
+
+
+def test_sameas(value, other):
+ """Check if an object points to the same memory address than another
+ object:
+
+ .. sourcecode:: jinja
+
+ {% if foo.attribute is sameas false %}
+ the foo attribute really is the `False` singleton
+ {% endif %}
+ """
+ return value is other
+
+
+def test_iterable(value):
+ """Check if it's possible to iterate over an object."""
+ try:
+ iter(value)
+ except TypeError:
+ return False
+ return True
+
+
+def test_escaped(value):
+ """Check if the value is escaped."""
+ return hasattr(value, "__html__")
+
+
+def test_in(value, seq):
+ """Check if value is in seq.
+
+ .. versionadded:: 2.10
+ """
+ return value in seq
+
+
+TESTS = {
+ "odd": test_odd,
+ "even": test_even,
+ "divisibleby": test_divisibleby,
+ "defined": test_defined,
+ "undefined": test_undefined,
+ "none": test_none,
+ "boolean": test_boolean,
+ "false": test_false,
+ "true": test_true,
+ "integer": test_integer,
+ "float": test_float,
+ "lower": test_lower,
+ "upper": test_upper,
+ "string": test_string,
+ "mapping": test_mapping,
+ "number": test_number,
+ "sequence": test_sequence,
+ "iterable": test_iterable,
+ "callable": test_callable,
+ "sameas": test_sameas,
+ "escaped": test_escaped,
+ "in": test_in,
+ "==": operator.eq,
+ "eq": operator.eq,
+ "equalto": operator.eq,
+ "!=": operator.ne,
+ "ne": operator.ne,
+ ">": operator.gt,
+ "gt": operator.gt,
+ "greaterthan": operator.gt,
+ "ge": operator.ge,
+ ">=": operator.ge,
+ "<": operator.lt,
+ "lt": operator.lt,
+ "lessthan": operator.lt,
+ "<=": operator.le,
+ "le": operator.le,
+}
diff --git a/third_party/python/Jinja2/jinja2/utils.py b/third_party/python/Jinja2/jinja2/utils.py
new file mode 100644
index 0000000000..6afca81055
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/utils.py
@@ -0,0 +1,737 @@
+# -*- coding: utf-8 -*-
+import json
+import os
+import re
+import warnings
+from collections import deque
+from random import choice
+from random import randrange
+from string import ascii_letters as _letters
+from string import digits as _digits
+from threading import Lock
+
+from markupsafe import escape
+from markupsafe import Markup
+
+from ._compat import abc
+from ._compat import string_types
+from ._compat import text_type
+from ._compat import url_quote
+
+# special singleton representing missing values for the runtime
+missing = type("MissingType", (), {"__repr__": lambda x: "missing"})()
+
+# internal code
+internal_code = set()
+
+concat = u"".join
+
+_slash_escape = "\\/" not in json.dumps("/")
+
+
+def contextfunction(f):
+ """This decorator can be used to mark a function or method context callable.
+ A context callable is passed the active :class:`Context` as first argument when
+ called from the template. This is useful if a function wants to get access
+ to the context or functions provided on the context object. For example
+ a function that returns a sorted list of template variables the current
+ template exports could look like this::
+
+ @contextfunction
+ def get_exported_names(context):
+ return sorted(context.exported_vars)
+ """
+ f.contextfunction = True
+ return f
+
+
+def evalcontextfunction(f):
+ """This decorator can be used to mark a function or method as an eval
+ context callable. This is similar to the :func:`contextfunction`
+ but instead of passing the context, an evaluation context object is
+ passed. For more information about the eval context, see
+ :ref:`eval-context`.
+
+ .. versionadded:: 2.4
+ """
+ f.evalcontextfunction = True
+ return f
+
+
+def environmentfunction(f):
+ """This decorator can be used to mark a function or method as environment
+ callable. This decorator works exactly like the :func:`contextfunction`
+ decorator just that the first argument is the active :class:`Environment`
+ and not context.
+ """
+ f.environmentfunction = True
+ return f
+
+
+def internalcode(f):
+ """Marks the function as internally used"""
+ internal_code.add(f.__code__)
+ return f
+
+
+def is_undefined(obj):
+ """Check if the object passed is undefined. This does nothing more than
+ performing an instance check against :class:`Undefined` but looks nicer.
+ This can be used for custom filters or tests that want to react to
+ undefined variables. For example a custom default filter can look like
+ this::
+
+ def default(var, default=''):
+ if is_undefined(var):
+ return default
+ return var
+ """
+ from .runtime import Undefined
+
+ return isinstance(obj, Undefined)
+
+
+def consume(iterable):
+ """Consumes an iterable without doing anything with it."""
+ for _ in iterable:
+ pass
+
+
+def clear_caches():
+ """Jinja keeps internal caches for environments and lexers. These are
+ used so that Jinja doesn't have to recreate environments and lexers all
+ the time. Normally you don't have to care about that but if you are
+ measuring memory consumption you may want to clean the caches.
+ """
+ from .environment import _spontaneous_environments
+ from .lexer import _lexer_cache
+
+ _spontaneous_environments.clear()
+ _lexer_cache.clear()
+
+
+def import_string(import_name, silent=False):
+ """Imports an object based on a string. This is useful if you want to
+ use import paths as endpoints or something similar. An import path can
+ be specified either in dotted notation (``xml.sax.saxutils.escape``)
+ or with a colon as object delimiter (``xml.sax.saxutils:escape``).
+
+ If the `silent` is True the return value will be `None` if the import
+ fails.
+
+ :return: imported object
+ """
+ try:
+ if ":" in import_name:
+ module, obj = import_name.split(":", 1)
+ elif "." in import_name:
+ module, _, obj = import_name.rpartition(".")
+ else:
+ return __import__(import_name)
+ return getattr(__import__(module, None, None, [obj]), obj)
+ except (ImportError, AttributeError):
+ if not silent:
+ raise
+
+
+def open_if_exists(filename, mode="rb"):
+ """Returns a file descriptor for the filename if that file exists,
+ otherwise ``None``.
+ """
+ if not os.path.isfile(filename):
+ return None
+
+ return open(filename, mode)
+
+
+def object_type_repr(obj):
+ """Returns the name of the object's type. For some recognized
+ singletons the name of the object is returned instead. (For
+ example for `None` and `Ellipsis`).
+ """
+ if obj is None:
+ return "None"
+ elif obj is Ellipsis:
+ return "Ellipsis"
+
+ cls = type(obj)
+
+ # __builtin__ in 2.x, builtins in 3.x
+ if cls.__module__ in ("__builtin__", "builtins"):
+ name = cls.__name__
+ else:
+ name = cls.__module__ + "." + cls.__name__
+
+ return "%s object" % name
+
+
+def pformat(obj, verbose=False):
+ """Prettyprint an object. Either use the `pretty` library or the
+ builtin `pprint`.
+ """
+ try:
+ from pretty import pretty
+
+ return pretty(obj, verbose=verbose)
+ except ImportError:
+ from pprint import pformat
+
+ return pformat(obj)
+
+
+def urlize(text, trim_url_limit=None, rel=None, target=None):
+ """Converts any URLs in text into clickable links. Works on http://,
+ https:// and www. links. Links can have trailing punctuation (periods,
+ commas, close-parens) and leading punctuation (opening parens) and
+ it'll still do the right thing.
+
+ If trim_url_limit is not None, the URLs in link text will be limited
+ to trim_url_limit characters.
+
+ If nofollow is True, the URLs in link text will get a rel="nofollow"
+ attribute.
+
+ If target is not None, a target attribute will be added to the link.
+ """
+ trim_url = (
+ lambda x, limit=trim_url_limit: limit is not None
+ and (x[:limit] + (len(x) >= limit and "..." or ""))
+ or x
+ )
+ words = re.split(r"(\s+)", text_type(escape(text)))
+ rel_attr = rel and ' rel="%s"' % text_type(escape(rel)) or ""
+ target_attr = target and ' target="%s"' % escape(target) or ""
+
+ for i, word in enumerate(words):
+ head, middle, tail = "", word, ""
+ match = re.match(r"^([(<]|&lt;)+", middle)
+
+ if match:
+ head = match.group()
+ middle = middle[match.end() :]
+
+ # Unlike lead, which is anchored to the start of the string,
+ # need to check that the string ends with any of the characters
+ # before trying to match all of them, to avoid backtracking.
+ if middle.endswith((")", ">", ".", ",", "\n", "&gt;")):
+ match = re.search(r"([)>.,\n]|&gt;)+$", middle)
+
+ if match:
+ tail = match.group()
+ middle = middle[: match.start()]
+
+ if middle.startswith("www.") or (
+ "@" not in middle
+ and not middle.startswith("http://")
+ and not middle.startswith("https://")
+ and len(middle) > 0
+ and middle[0] in _letters + _digits
+ and (
+ middle.endswith(".org")
+ or middle.endswith(".net")
+ or middle.endswith(".com")
+ )
+ ):
+ middle = '<a href="http://%s"%s%s>%s</a>' % (
+ middle,
+ rel_attr,
+ target_attr,
+ trim_url(middle),
+ )
+
+ if middle.startswith("http://") or middle.startswith("https://"):
+ middle = '<a href="%s"%s%s>%s</a>' % (
+ middle,
+ rel_attr,
+ target_attr,
+ trim_url(middle),
+ )
+
+ if (
+ "@" in middle
+ and not middle.startswith("www.")
+ and ":" not in middle
+ and re.match(r"^\S+@\w[\w.-]*\.\w+$", middle)
+ ):
+ middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
+
+ words[i] = head + middle + tail
+
+ return u"".join(words)
+
+
+def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
+ """Generate some lorem ipsum for the template."""
+ from .constants import LOREM_IPSUM_WORDS
+
+ words = LOREM_IPSUM_WORDS.split()
+ result = []
+
+ for _ in range(n):
+ next_capitalized = True
+ last_comma = last_fullstop = 0
+ word = None
+ last = None
+ p = []
+
+ # each paragraph contains out of 20 to 100 words.
+ for idx, _ in enumerate(range(randrange(min, max))):
+ while True:
+ word = choice(words)
+ if word != last:
+ last = word
+ break
+ if next_capitalized:
+ word = word.capitalize()
+ next_capitalized = False
+ # add commas
+ if idx - randrange(3, 8) > last_comma:
+ last_comma = idx
+ last_fullstop += 2
+ word += ","
+ # add end of sentences
+ if idx - randrange(10, 20) > last_fullstop:
+ last_comma = last_fullstop = idx
+ word += "."
+ next_capitalized = True
+ p.append(word)
+
+ # ensure that the paragraph ends with a dot.
+ p = u" ".join(p)
+ if p.endswith(","):
+ p = p[:-1] + "."
+ elif not p.endswith("."):
+ p += "."
+ result.append(p)
+
+ if not html:
+ return u"\n\n".join(result)
+ return Markup(u"\n".join(u"<p>%s</p>" % escape(x) for x in result))
+
+
+def unicode_urlencode(obj, charset="utf-8", for_qs=False):
+ """Quote a string for use in a URL using the given charset.
+
+ This function is misnamed, it is a wrapper around
+ :func:`urllib.parse.quote`.
+
+ :param obj: String or bytes to quote. Other types are converted to
+ string then encoded to bytes using the given charset.
+ :param charset: Encode text to bytes using this charset.
+ :param for_qs: Quote "/" and use "+" for spaces.
+ """
+ if not isinstance(obj, string_types):
+ obj = text_type(obj)
+
+ if isinstance(obj, text_type):
+ obj = obj.encode(charset)
+
+ safe = b"" if for_qs else b"/"
+ rv = url_quote(obj, safe)
+
+ if not isinstance(rv, text_type):
+ rv = rv.decode("utf-8")
+
+ if for_qs:
+ rv = rv.replace("%20", "+")
+
+ return rv
+
+
+class LRUCache(object):
+ """A simple LRU Cache implementation."""
+
+ # this is fast for small capacities (something below 1000) but doesn't
+ # scale. But as long as it's only used as storage for templates this
+ # won't do any harm.
+
+ def __init__(self, capacity):
+ self.capacity = capacity
+ self._mapping = {}
+ self._queue = deque()
+ self._postinit()
+
+ def _postinit(self):
+ # alias all queue methods for faster lookup
+ self._popleft = self._queue.popleft
+ self._pop = self._queue.pop
+ self._remove = self._queue.remove
+ self._wlock = Lock()
+ self._append = self._queue.append
+
+ def __getstate__(self):
+ return {
+ "capacity": self.capacity,
+ "_mapping": self._mapping,
+ "_queue": self._queue,
+ }
+
+ def __setstate__(self, d):
+ self.__dict__.update(d)
+ self._postinit()
+
+ def __getnewargs__(self):
+ return (self.capacity,)
+
+ def copy(self):
+ """Return a shallow copy of the instance."""
+ rv = self.__class__(self.capacity)
+ rv._mapping.update(self._mapping)
+ rv._queue.extend(self._queue)
+ return rv
+
+ def get(self, key, default=None):
+ """Return an item from the cache dict or `default`"""
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def setdefault(self, key, default=None):
+ """Set `default` if the key is not in the cache otherwise
+ leave unchanged. Return the value of this key.
+ """
+ try:
+ return self[key]
+ except KeyError:
+ self[key] = default
+ return default
+
+ def clear(self):
+ """Clear the cache."""
+ self._wlock.acquire()
+ try:
+ self._mapping.clear()
+ self._queue.clear()
+ finally:
+ self._wlock.release()
+
+ def __contains__(self, key):
+ """Check if a key exists in this cache."""
+ return key in self._mapping
+
+ def __len__(self):
+ """Return the current size of the cache."""
+ return len(self._mapping)
+
+ def __repr__(self):
+ return "<%s %r>" % (self.__class__.__name__, self._mapping)
+
+ def __getitem__(self, key):
+ """Get an item from the cache. Moves the item up so that it has the
+ highest priority then.
+
+ Raise a `KeyError` if it does not exist.
+ """
+ self._wlock.acquire()
+ try:
+ rv = self._mapping[key]
+ if self._queue[-1] != key:
+ try:
+ self._remove(key)
+ except ValueError:
+ # if something removed the key from the container
+ # when we read, ignore the ValueError that we would
+ # get otherwise.
+ pass
+ self._append(key)
+ return rv
+ finally:
+ self._wlock.release()
+
+ def __setitem__(self, key, value):
+ """Sets the value for an item. Moves the item up so that it
+ has the highest priority then.
+ """
+ self._wlock.acquire()
+ try:
+ if key in self._mapping:
+ self._remove(key)
+ elif len(self._mapping) == self.capacity:
+ del self._mapping[self._popleft()]
+ self._append(key)
+ self._mapping[key] = value
+ finally:
+ self._wlock.release()
+
+ def __delitem__(self, key):
+ """Remove an item from the cache dict.
+ Raise a `KeyError` if it does not exist.
+ """
+ self._wlock.acquire()
+ try:
+ del self._mapping[key]
+ try:
+ self._remove(key)
+ except ValueError:
+ pass
+ finally:
+ self._wlock.release()
+
+ def items(self):
+ """Return a list of items."""
+ result = [(key, self._mapping[key]) for key in list(self._queue)]
+ result.reverse()
+ return result
+
+ def iteritems(self):
+ """Iterate over all items."""
+ warnings.warn(
+ "'iteritems()' will be removed in version 3.0. Use"
+ " 'iter(cache.items())' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return iter(self.items())
+
+ def values(self):
+ """Return a list of all values."""
+ return [x[1] for x in self.items()]
+
+ def itervalue(self):
+ """Iterate over all values."""
+ warnings.warn(
+ "'itervalue()' will be removed in version 3.0. Use"
+ " 'iter(cache.values())' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return iter(self.values())
+
+ def itervalues(self):
+ """Iterate over all values."""
+ warnings.warn(
+ "'itervalues()' will be removed in version 3.0. Use"
+ " 'iter(cache.values())' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return iter(self.values())
+
+ def keys(self):
+ """Return a list of all keys ordered by most recent usage."""
+ return list(self)
+
+ def iterkeys(self):
+ """Iterate over all keys in the cache dict, ordered by
+ the most recent usage.
+ """
+ warnings.warn(
+ "'iterkeys()' will be removed in version 3.0. Use"
+ " 'iter(cache.keys())' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return iter(self)
+
+ def __iter__(self):
+ return reversed(tuple(self._queue))
+
+ def __reversed__(self):
+ """Iterate over the keys in the cache dict, oldest items
+ coming first.
+ """
+ return iter(tuple(self._queue))
+
+ __copy__ = copy
+
+
+abc.MutableMapping.register(LRUCache)
+
+
+def select_autoescape(
+ enabled_extensions=("html", "htm", "xml"),
+ disabled_extensions=(),
+ default_for_string=True,
+ default=False,
+):
+ """Intelligently sets the initial value of autoescaping based on the
+ filename of the template. This is the recommended way to configure
+ autoescaping if you do not want to write a custom function yourself.
+
+ If you want to enable it for all templates created from strings or
+ for all templates with `.html` and `.xml` extensions::
+
+ from jinja2 import Environment, select_autoescape
+ env = Environment(autoescape=select_autoescape(
+ enabled_extensions=('html', 'xml'),
+ default_for_string=True,
+ ))
+
+ Example configuration to turn it on at all times except if the template
+ ends with `.txt`::
+
+ from jinja2 import Environment, select_autoescape
+ env = Environment(autoescape=select_autoescape(
+ disabled_extensions=('txt',),
+ default_for_string=True,
+ default=True,
+ ))
+
+ The `enabled_extensions` is an iterable of all the extensions that
+ autoescaping should be enabled for. Likewise `disabled_extensions` is
+ a list of all templates it should be disabled for. If a template is
+ loaded from a string then the default from `default_for_string` is used.
+ If nothing matches then the initial value of autoescaping is set to the
+ value of `default`.
+
+ For security reasons this function operates case insensitive.
+
+ .. versionadded:: 2.9
+ """
+ enabled_patterns = tuple("." + x.lstrip(".").lower() for x in enabled_extensions)
+ disabled_patterns = tuple("." + x.lstrip(".").lower() for x in disabled_extensions)
+
+ def autoescape(template_name):
+ if template_name is None:
+ return default_for_string
+ template_name = template_name.lower()
+ if template_name.endswith(enabled_patterns):
+ return True
+ if template_name.endswith(disabled_patterns):
+ return False
+ return default
+
+ return autoescape
+
+
+def htmlsafe_json_dumps(obj, dumper=None, **kwargs):
+ """Works exactly like :func:`dumps` but is safe for use in ``<script>``
+ tags. It accepts the same arguments and returns a JSON string. Note that
+ this is available in templates through the ``|tojson`` filter which will
+ also mark the result as safe. Due to how this function escapes certain
+ characters this is safe even if used outside of ``<script>`` tags.
+
+ The following characters are escaped in strings:
+
+ - ``<``
+ - ``>``
+ - ``&``
+ - ``'``
+
+ This makes it safe to embed such strings in any place in HTML with the
+ notable exception of double quoted attributes. In that case single
+ quote your attributes or HTML escape it in addition.
+ """
+ if dumper is None:
+ dumper = json.dumps
+ rv = (
+ dumper(obj, **kwargs)
+ .replace(u"<", u"\\u003c")
+ .replace(u">", u"\\u003e")
+ .replace(u"&", u"\\u0026")
+ .replace(u"'", u"\\u0027")
+ )
+ return Markup(rv)
+
+
+class Cycler(object):
+ """Cycle through values by yield them one at a time, then restarting
+ once the end is reached. Available as ``cycler`` in templates.
+
+ Similar to ``loop.cycle``, but can be used outside loops or across
+ multiple loops. For example, render a list of folders and files in a
+ list, alternating giving them "odd" and "even" classes.
+
+ .. code-block:: html+jinja
+
+ {% set row_class = cycler("odd", "even") %}
+ <ul class="browser">
+ {% for folder in folders %}
+ <li class="folder {{ row_class.next() }}">{{ folder }}
+ {% endfor %}
+ {% for file in files %}
+ <li class="file {{ row_class.next() }}">{{ file }}
+ {% endfor %}
+ </ul>
+
+ :param items: Each positional argument will be yielded in the order
+ given for each cycle.
+
+ .. versionadded:: 2.1
+ """
+
+ def __init__(self, *items):
+ if not items:
+ raise RuntimeError("at least one item has to be provided")
+ self.items = items
+ self.pos = 0
+
+ def reset(self):
+ """Resets the current item to the first item."""
+ self.pos = 0
+
+ @property
+ def current(self):
+ """Return the current item. Equivalent to the item that will be
+ returned next time :meth:`next` is called.
+ """
+ return self.items[self.pos]
+
+ def next(self):
+ """Return the current item, then advance :attr:`current` to the
+ next item.
+ """
+ rv = self.current
+ self.pos = (self.pos + 1) % len(self.items)
+ return rv
+
+ __next__ = next
+
+
+class Joiner(object):
+ """A joining helper for templates."""
+
+ def __init__(self, sep=u", "):
+ self.sep = sep
+ self.used = False
+
+ def __call__(self):
+ if not self.used:
+ self.used = True
+ return u""
+ return self.sep
+
+
+class Namespace(object):
+ """A namespace object that can hold arbitrary attributes. It may be
+ initialized from a dictionary or with keyword arguments."""
+
+ def __init__(*args, **kwargs): # noqa: B902
+ self, args = args[0], args[1:]
+ self.__attrs = dict(*args, **kwargs)
+
+ def __getattribute__(self, name):
+ # __class__ is needed for the awaitable check in async mode
+ if name in {"_Namespace__attrs", "__class__"}:
+ return object.__getattribute__(self, name)
+ try:
+ return self.__attrs[name]
+ except KeyError:
+ raise AttributeError(name)
+
+ def __setitem__(self, name, value):
+ self.__attrs[name] = value
+
+ def __repr__(self):
+ return "<Namespace %r>" % self.__attrs
+
+
+# does this python version support async for in and async generators?
+try:
+ exec("async def _():\n async for _ in ():\n yield _")
+ have_async_gen = True
+except SyntaxError:
+ have_async_gen = False
+
+
+def soft_unicode(s):
+ from markupsafe import soft_unicode
+
+ warnings.warn(
+ "'jinja2.utils.soft_unicode' will be removed in version 3.0."
+ " Use 'markupsafe.soft_unicode' instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return soft_unicode(s)
diff --git a/third_party/python/Jinja2/jinja2/visitor.py b/third_party/python/Jinja2/jinja2/visitor.py
new file mode 100644
index 0000000000..d1365bf10e
--- /dev/null
+++ b/third_party/python/Jinja2/jinja2/visitor.py
@@ -0,0 +1,81 @@
+# -*- coding: utf-8 -*-
+"""API for traversing the AST nodes. Implemented by the compiler and
+meta introspection.
+"""
+from .nodes import Node
+
+
+class NodeVisitor(object):
+ """Walks the abstract syntax tree and call visitor functions for every
+ node found. The visitor functions may return values which will be
+ forwarded by the `visit` method.
+
+ Per default the visitor functions for the nodes are ``'visit_'`` +
+ class name of the node. So a `TryFinally` node visit function would
+ be `visit_TryFinally`. This behavior can be changed by overriding
+ the `get_visitor` function. If no visitor function exists for a node
+ (return value `None`) the `generic_visit` visitor is used instead.
+ """
+
+ def get_visitor(self, node):
+ """Return the visitor function for this node or `None` if no visitor
+ exists for this node. In that case the generic visit function is
+ used instead.
+ """
+ method = "visit_" + node.__class__.__name__
+ return getattr(self, method, None)
+
+ def visit(self, node, *args, **kwargs):
+ """Visit a node."""
+ f = self.get_visitor(node)
+ if f is not None:
+ return f(node, *args, **kwargs)
+ return self.generic_visit(node, *args, **kwargs)
+
+ def generic_visit(self, node, *args, **kwargs):
+ """Called if no explicit visitor function exists for a node."""
+ for node in node.iter_child_nodes():
+ self.visit(node, *args, **kwargs)
+
+
+class NodeTransformer(NodeVisitor):
+ """Walks the abstract syntax tree and allows modifications of nodes.
+
+ The `NodeTransformer` will walk the AST and use the return value of the
+ visitor functions to replace or remove the old node. If the return
+ value of the visitor function is `None` the node will be removed
+ from the previous location otherwise it's replaced with the return
+ value. The return value may be the original node in which case no
+ replacement takes place.
+ """
+
+ def generic_visit(self, node, *args, **kwargs):
+ for field, old_value in node.iter_fields():
+ if isinstance(old_value, list):
+ new_values = []
+ for value in old_value:
+ if isinstance(value, Node):
+ value = self.visit(value, *args, **kwargs)
+ if value is None:
+ continue
+ elif not isinstance(value, Node):
+ new_values.extend(value)
+ continue
+ new_values.append(value)
+ old_value[:] = new_values
+ elif isinstance(old_value, Node):
+ new_node = self.visit(old_value, *args, **kwargs)
+ if new_node is None:
+ delattr(node, field)
+ else:
+ setattr(node, field, new_node)
+ return node
+
+ def visit_list(self, node, *args, **kwargs):
+ """As transformers may return lists in some places this method
+ can be used to enforce a list as return value.
+ """
+ rv = self.visit(node, *args, **kwargs)
+ if not isinstance(rv, list):
+ rv = [rv]
+ return rv
diff --git a/third_party/python/MarkupSafe/CHANGES.rst b/third_party/python/MarkupSafe/CHANGES.rst
new file mode 100644
index 0000000000..e50fc98021
--- /dev/null
+++ b/third_party/python/MarkupSafe/CHANGES.rst
@@ -0,0 +1,112 @@
+Version 2.0.1
+-------------
+
+Released 2021-05-18
+
+- Mark top-level names as exported so type checking understands
+ imports in user projects. :pr:`215`
+- Fix some types that weren't available in Python 3.6.0. :pr:`215`
+
+
+Version 2.0.0
+-------------
+
+Released 2021-05-11
+
+- Drop Python 2.7, 3.4, and 3.5 support.
+- ``Markup.unescape`` uses :func:`html.unescape` to support HTML5
+ character references. :pr:`117`
+- Add type annotations for static typing tools. :pr:`149`
+
+
+Version 1.1.1
+-------------
+
+Released 2019-02-23
+
+- Fix segfault when ``__html__`` method raises an exception when using
+ the C speedups. The exception is now propagated correctly. :pr:`109`
+
+
+Version 1.1.0
+-------------
+
+Released 2018-11-05
+
+- Drop support for Python 2.6 and 3.3.
+- Build wheels for Linux, Mac, and Windows, allowing systems without
+ a compiler to take advantage of the C extension speedups. :pr:`104`
+- Use newer CPython API on Python 3, resulting in a 1.5x speedup.
+ :pr`64`
+- ``escape`` wraps ``__html__`` result in ``Markup``, consistent with
+ documented behavior. :pr:`69`
+
+
+Version 1.0
+-----------
+
+Released 2017-03-07
+
+- Fixed custom types not invoking ``__unicode__`` when used with
+ ``format()``.
+- Added ``__version__`` module attribute.
+- Improve unescape code to leave lone ampersands alone.
+
+
+Version 0.18
+------------
+
+Released 2013-05-22
+
+- Fixed ``__mul__`` and string splitting on Python 3.
+
+
+Version 0.17
+------------
+
+Released 2013-05-21
+
+- Fixed a bug with broken interpolation on tuples.
+
+
+Version 0.16
+------------
+
+Released 2013-05-20
+
+- Improved Python 3 Support and removed 2to3.
+- Removed support for Python 3.2 and 2.5.
+
+
+Version 0.15
+------------
+
+Released 2011-07-20
+
+- Fixed a typo that caused the library to fail to install on pypy and
+ jython.
+
+
+Version 0.14
+------------
+
+Released 2011-07-20
+
+- Release fix for 0.13.
+
+
+Version 0.13
+------------
+
+Released 2011-07-20
+
+- Do not attempt to compile extension for PyPy or Jython.
+- Work around some 64bit Windows issues.
+
+
+Version 0.12
+------------
+
+Released 2011-02-17
+
+- Improved PyPy compatibility.
diff --git a/third_party/python/MarkupSafe/LICENSE.rst b/third_party/python/MarkupSafe/LICENSE.rst
new file mode 100644
index 0000000000..9d227a0cc4
--- /dev/null
+++ b/third_party/python/MarkupSafe/LICENSE.rst
@@ -0,0 +1,28 @@
+Copyright 2010 Pallets
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/third_party/python/MarkupSafe/MANIFEST.in b/third_party/python/MarkupSafe/MANIFEST.in
new file mode 100644
index 0000000000..7dfa3f60c7
--- /dev/null
+++ b/third_party/python/MarkupSafe/MANIFEST.in
@@ -0,0 +1,9 @@
+include CHANGES.rst
+include tox.ini
+include requirements/*.txt
+graft docs
+prune docs/_build
+graft tests
+include src/markupsafe/py.typed
+include src/markupsafe/*.pyi
+global-exclude *.pyc
diff --git a/third_party/python/MarkupSafe/PKG-INFO b/third_party/python/MarkupSafe/PKG-INFO
new file mode 100644
index 0000000000..bd2f99cba8
--- /dev/null
+++ b/third_party/python/MarkupSafe/PKG-INFO
@@ -0,0 +1,98 @@
+Metadata-Version: 2.1
+Name: MarkupSafe
+Version: 2.0.1
+Summary: Safely add untrusted strings to HTML/XML markup.
+Home-page: https://palletsprojects.com/p/markupsafe/
+Author: Armin Ronacher
+Author-email: armin.ronacher@active-4.com
+Maintainer: Pallets
+Maintainer-email: contact@palletsprojects.com
+License: BSD-3-Clause
+Project-URL: Donate, https://palletsprojects.com/donate
+Project-URL: Documentation, https://markupsafe.palletsprojects.com/
+Project-URL: Changes, https://markupsafe.palletsprojects.com/changes/
+Project-URL: Source Code, https://github.com/pallets/markupsafe/
+Project-URL: Issue Tracker, https://github.com/pallets/markupsafe/issues/
+Project-URL: Twitter, https://twitter.com/PalletsTeam
+Project-URL: Chat, https://discord.gg/pallets
+Description: MarkupSafe
+ ==========
+
+ MarkupSafe implements a text object that escapes characters so it is
+ safe to use in HTML and XML. Characters that have special meanings are
+ replaced so that they display as the actual characters. This mitigates
+ injection attacks, meaning untrusted user input can safely be displayed
+ on a page.
+
+
+ Installing
+ ----------
+
+ Install and update using `pip`_:
+
+ .. code-block:: text
+
+ pip install -U MarkupSafe
+
+ .. _pip: https://pip.pypa.io/en/stable/quickstart/
+
+
+ Examples
+ --------
+
+ .. code-block:: pycon
+
+ >>> from markupsafe import Markup, escape
+
+ >>> # escape replaces special characters and wraps in Markup
+ >>> escape("<script>alert(document.cookie);</script>")
+ Markup('&lt;script&gt;alert(document.cookie);&lt;/script&gt;')
+
+ >>> # wrap in Markup to mark text "safe" and prevent escaping
+ >>> Markup("<strong>Hello</strong>")
+ Markup('<strong>hello</strong>')
+
+ >>> escape(Markup("<strong>Hello</strong>"))
+ Markup('<strong>hello</strong>')
+
+ >>> # Markup is a str subclass
+ >>> # methods and operators escape their arguments
+ >>> template = Markup("Hello <em>{name}</em>")
+ >>> template.format(name='"World"')
+ Markup('Hello <em>&#34;World&#34;</em>')
+
+
+ Donate
+ ------
+
+ The Pallets organization develops and supports MarkupSafe and other
+ popular packages. In order to grow the community of contributors and
+ users, and allow the maintainers to devote more time to the projects,
+ `please donate today`_.
+
+ .. _please donate today: https://palletsprojects.com/donate
+
+
+ Links
+ -----
+
+ - Documentation: https://markupsafe.palletsprojects.com/
+ - Changes: https://markupsafe.palletsprojects.com/changes/
+ - PyPI Releases: https://pypi.org/project/MarkupSafe/
+ - Source Code: https://github.com/pallets/markupsafe/
+ - Issue Tracker: https://github.com/pallets/markupsafe/issues/
+ - Website: https://palletsprojects.com/p/markupsafe/
+ - Twitter: https://twitter.com/PalletsTeam
+ - Chat: https://discord.gg/pallets
+
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Text Processing :: Markup :: HTML
+Requires-Python: >=3.6
+Description-Content-Type: text/x-rst
diff --git a/third_party/python/MarkupSafe/README.rst b/third_party/python/MarkupSafe/README.rst
new file mode 100644
index 0000000000..273e1db40f
--- /dev/null
+++ b/third_party/python/MarkupSafe/README.rst
@@ -0,0 +1,69 @@
+MarkupSafe
+==========
+
+MarkupSafe implements a text object that escapes characters so it is
+safe to use in HTML and XML. Characters that have special meanings are
+replaced so that they display as the actual characters. This mitigates
+injection attacks, meaning untrusted user input can safely be displayed
+on a page.
+
+
+Installing
+----------
+
+Install and update using `pip`_:
+
+.. code-block:: text
+
+ pip install -U MarkupSafe
+
+.. _pip: https://pip.pypa.io/en/stable/quickstart/
+
+
+Examples
+--------
+
+.. code-block:: pycon
+
+ >>> from markupsafe import Markup, escape
+
+ >>> # escape replaces special characters and wraps in Markup
+ >>> escape("<script>alert(document.cookie);</script>")
+ Markup('&lt;script&gt;alert(document.cookie);&lt;/script&gt;')
+
+ >>> # wrap in Markup to mark text "safe" and prevent escaping
+ >>> Markup("<strong>Hello</strong>")
+ Markup('<strong>hello</strong>')
+
+ >>> escape(Markup("<strong>Hello</strong>"))
+ Markup('<strong>hello</strong>')
+
+ >>> # Markup is a str subclass
+ >>> # methods and operators escape their arguments
+ >>> template = Markup("Hello <em>{name}</em>")
+ >>> template.format(name='"World"')
+ Markup('Hello <em>&#34;World&#34;</em>')
+
+
+Donate
+------
+
+The Pallets organization develops and supports MarkupSafe and other
+popular packages. In order to grow the community of contributors and
+users, and allow the maintainers to devote more time to the projects,
+`please donate today`_.
+
+.. _please donate today: https://palletsprojects.com/donate
+
+
+Links
+-----
+
+- Documentation: https://markupsafe.palletsprojects.com/
+- Changes: https://markupsafe.palletsprojects.com/changes/
+- PyPI Releases: https://pypi.org/project/MarkupSafe/
+- Source Code: https://github.com/pallets/markupsafe/
+- Issue Tracker: https://github.com/pallets/markupsafe/issues/
+- Website: https://palletsprojects.com/p/markupsafe/
+- Twitter: https://twitter.com/PalletsTeam
+- Chat: https://discord.gg/pallets
diff --git a/third_party/python/MarkupSafe/requirements/dev.txt b/third_party/python/MarkupSafe/requirements/dev.txt
new file mode 100644
index 0000000000..0dbc2bc860
--- /dev/null
+++ b/third_party/python/MarkupSafe/requirements/dev.txt
@@ -0,0 +1,132 @@
+#
+# This file is autogenerated by pip-compile
+# To update, run:
+#
+# pip-compile requirements/dev.in
+#
+alabaster==0.7.12
+ # via sphinx
+appdirs==1.4.4
+ # via virtualenv
+attrs==21.2.0
+ # via pytest
+babel==2.9.1
+ # via sphinx
+certifi==2020.12.5
+ # via requests
+cfgv==3.2.0
+ # via pre-commit
+chardet==4.0.0
+ # via requests
+click==8.0.0
+ # via pip-tools
+distlib==0.3.1
+ # via virtualenv
+docutils==0.17.1
+ # via sphinx
+filelock==3.0.12
+ # via
+ # tox
+ # virtualenv
+identify==2.2.4
+ # via pre-commit
+idna==2.10
+ # via requests
+imagesize==1.2.0
+ # via sphinx
+iniconfig==1.1.1
+ # via pytest
+jinja2==3.0.0
+ # via sphinx
+markupsafe==2.0.0
+ # via jinja2
+mypy-extensions==0.4.3
+ # via mypy
+mypy==0.812
+ # via -r requirements/typing.in
+nodeenv==1.6.0
+ # via pre-commit
+packaging==20.9
+ # via
+ # pallets-sphinx-themes
+ # pytest
+ # sphinx
+ # tox
+pallets-sphinx-themes==2.0.0
+ # via -r requirements/docs.in
+pep517==0.10.0
+ # via pip-tools
+pip-tools==6.1.0
+ # via -r requirements/dev.in
+pluggy==0.13.1
+ # via
+ # pytest
+ # tox
+pre-commit==2.12.1
+ # via -r requirements/dev.in
+py==1.10.0
+ # via
+ # pytest
+ # tox
+pygments==2.9.0
+ # via sphinx
+pyparsing==2.4.7
+ # via packaging
+pytest==6.2.4
+ # via -r requirements/tests.in
+pytz==2021.1
+ # via babel
+pyyaml==5.4.1
+ # via pre-commit
+requests==2.25.1
+ # via sphinx
+six==1.16.0
+ # via
+ # tox
+ # virtualenv
+snowballstemmer==2.1.0
+ # via sphinx
+sphinx-issues==1.2.0
+ # via -r requirements/docs.in
+git+https://github.com/sphinx-doc/sphinx.git@96dbe5e3
+ # via
+ # -r requirements/docs.in
+ # pallets-sphinx-themes
+ # sphinx-issues
+ # sphinxcontrib-log-cabinet
+sphinxcontrib-applehelp==1.0.2
+ # via sphinx
+sphinxcontrib-devhelp==1.0.2
+ # via sphinx
+sphinxcontrib-htmlhelp==1.0.3
+ # via sphinx
+sphinxcontrib-jsmath==1.0.1
+ # via sphinx
+sphinxcontrib-log-cabinet==1.0.1
+ # via -r requirements/docs.in
+sphinxcontrib-qthelp==1.0.3
+ # via sphinx
+sphinxcontrib-serializinghtml==1.1.4
+ # via sphinx
+toml==0.10.2
+ # via
+ # pep517
+ # pre-commit
+ # pytest
+ # tox
+tox==3.23.1
+ # via -r requirements/dev.in
+typed-ast==1.4.3
+ # via mypy
+typing-extensions==3.10.0.0
+ # via mypy
+urllib3==1.26.4
+ # via requests
+virtualenv==20.4.6
+ # via
+ # pre-commit
+ # tox
+
+# The following packages are considered to be unsafe in a requirements file:
+# pip
+# setuptools
diff --git a/third_party/python/MarkupSafe/requirements/docs.txt b/third_party/python/MarkupSafe/requirements/docs.txt
new file mode 100644
index 0000000000..ec9cfcb668
--- /dev/null
+++ b/third_party/python/MarkupSafe/requirements/docs.txt
@@ -0,0 +1,67 @@
+#
+# This file is autogenerated by pip-compile
+# To update, run:
+#
+# pip-compile requirements/docs.in
+#
+alabaster==0.7.12
+ # via sphinx
+babel==2.9.1
+ # via sphinx
+certifi==2020.12.5
+ # via requests
+chardet==4.0.0
+ # via requests
+docutils==0.17.1
+ # via sphinx
+idna==2.10
+ # via requests
+imagesize==1.2.0
+ # via sphinx
+jinja2==3.0.0
+ # via sphinx
+markupsafe==2.0.0
+ # via jinja2
+packaging==20.9
+ # via
+ # pallets-sphinx-themes
+ # sphinx
+pallets-sphinx-themes==2.0.0
+ # via -r requirements/docs.in
+pygments==2.9.0
+ # via sphinx
+pyparsing==2.4.7
+ # via packaging
+pytz==2021.1
+ # via babel
+requests==2.25.1
+ # via sphinx
+snowballstemmer==2.1.0
+ # via sphinx
+sphinx-issues==1.2.0
+ # via -r requirements/docs.in
+git+https://github.com/sphinx-doc/sphinx.git@96dbe5e3
+ # via
+ # -r requirements/docs.in
+ # pallets-sphinx-themes
+ # sphinx-issues
+ # sphinxcontrib-log-cabinet
+sphinxcontrib-applehelp==1.0.2
+ # via sphinx
+sphinxcontrib-devhelp==1.0.2
+ # via sphinx
+sphinxcontrib-htmlhelp==1.0.3
+ # via sphinx
+sphinxcontrib-jsmath==1.0.1
+ # via sphinx
+sphinxcontrib-log-cabinet==1.0.1
+ # via -r requirements/docs.in
+sphinxcontrib-qthelp==1.0.3
+ # via sphinx
+sphinxcontrib-serializinghtml==1.1.4
+ # via sphinx
+urllib3==1.26.4
+ # via requests
+
+# The following packages are considered to be unsafe in a requirements file:
+# setuptools
diff --git a/third_party/python/MarkupSafe/requirements/tests.txt b/third_party/python/MarkupSafe/requirements/tests.txt
new file mode 100644
index 0000000000..4ff31e3187
--- /dev/null
+++ b/third_party/python/MarkupSafe/requirements/tests.txt
@@ -0,0 +1,22 @@
+#
+# This file is autogenerated by pip-compile
+# To update, run:
+#
+# pip-compile requirements/tests.in
+#
+attrs==21.2.0
+ # via pytest
+iniconfig==1.1.1
+ # via pytest
+packaging==20.9
+ # via pytest
+pluggy==0.13.1
+ # via pytest
+py==1.10.0
+ # via pytest
+pyparsing==2.4.7
+ # via packaging
+pytest==6.2.4
+ # via -r requirements/tests.in
+toml==0.10.2
+ # via pytest
diff --git a/third_party/python/MarkupSafe/requirements/typing.txt b/third_party/python/MarkupSafe/requirements/typing.txt
new file mode 100644
index 0000000000..0e342aaad8
--- /dev/null
+++ b/third_party/python/MarkupSafe/requirements/typing.txt
@@ -0,0 +1,14 @@
+#
+# This file is autogenerated by pip-compile
+# To update, run:
+#
+# pip-compile requirements/typing.in
+#
+mypy-extensions==0.4.3
+ # via mypy
+mypy==0.812
+ # via -r requirements/typing.in
+typed-ast==1.4.3
+ # via mypy
+typing-extensions==3.10.0.0
+ # via mypy
diff --git a/third_party/python/MarkupSafe/setup.cfg b/third_party/python/MarkupSafe/setup.cfg
new file mode 100644
index 0000000000..1bbc1d4ceb
--- /dev/null
+++ b/third_party/python/MarkupSafe/setup.cfg
@@ -0,0 +1,86 @@
+[metadata]
+name = MarkupSafe
+version = attr: markupsafe.__version__
+url = https://palletsprojects.com/p/markupsafe/
+project_urls =
+ Donate = https://palletsprojects.com/donate
+ Documentation = https://markupsafe.palletsprojects.com/
+ Changes = https://markupsafe.palletsprojects.com/changes/
+ Source Code = https://github.com/pallets/markupsafe/
+ Issue Tracker = https://github.com/pallets/markupsafe/issues/
+ Twitter = https://twitter.com/PalletsTeam
+ Chat = https://discord.gg/pallets
+license = BSD-3-Clause
+license_files = LICENSE.rst
+author = Armin Ronacher
+author_email = armin.ronacher@active-4.com
+maintainer = Pallets
+maintainer_email = contact@palletsprojects.com
+description = Safely add untrusted strings to HTML/XML markup.
+long_description = file: README.rst
+long_description_content_type = text/x-rst
+classifiers =
+ Development Status :: 5 - Production/Stable
+ Environment :: Web Environment
+ Intended Audience :: Developers
+ License :: OSI Approved :: BSD License
+ Operating System :: OS Independent
+ Programming Language :: Python
+ Topic :: Internet :: WWW/HTTP :: Dynamic Content
+ Topic :: Text Processing :: Markup :: HTML
+
+[options]
+packages = find:
+package_dir = = src
+include_package_data = true
+python_requires = >= 3.6
+
+[options.packages.find]
+where = src
+
+[tool:pytest]
+testpaths = tests
+filterwarnings =
+ error
+
+[coverage:run]
+branch = true
+source =
+ markupsafe
+ tests
+
+[coverage:paths]
+source =
+ src
+ */site-packages
+
+[flake8]
+select = B, E, F, W, B9
+ignore =
+ E203
+ E501
+ E722
+ W503
+max-line-length = 80
+
+[mypy]
+files = src/markupsafe
+python_version = 3.6
+disallow_subclassing_any = True
+disallow_untyped_calls = True
+disallow_untyped_defs = True
+disallow_incomplete_defs = True
+no_implicit_optional = True
+local_partial_types = True
+no_implicit_reexport = True
+strict_equality = True
+warn_redundant_casts = True
+warn_unused_configs = True
+warn_unused_ignores = True
+warn_return_any = True
+warn_unreachable = True
+
+[egg_info]
+tag_build =
+tag_date = 0
+
diff --git a/third_party/python/MarkupSafe/setup.py b/third_party/python/MarkupSafe/setup.py
new file mode 100644
index 0000000000..c6ee5bf176
--- /dev/null
+++ b/third_party/python/MarkupSafe/setup.py
@@ -0,0 +1,81 @@
+import os
+import platform
+import sys
+from distutils.errors import CCompilerError
+from distutils.errors import DistutilsExecError
+from distutils.errors import DistutilsPlatformError
+
+from setuptools import Extension
+from setuptools import setup
+from setuptools.command.build_ext import build_ext
+
+ext_modules = [Extension("markupsafe._speedups", ["src/markupsafe/_speedups.c"])]
+
+
+class BuildFailed(Exception):
+ pass
+
+
+class ve_build_ext(build_ext):
+ """This class allows C extension building to fail."""
+
+ def run(self):
+ try:
+ build_ext.run(self)
+ except DistutilsPlatformError:
+ raise BuildFailed()
+
+ def build_extension(self, ext):
+ try:
+ build_ext.build_extension(self, ext)
+ except (CCompilerError, DistutilsExecError, DistutilsPlatformError):
+ raise BuildFailed()
+ except ValueError:
+ # this can happen on Windows 64 bit, see Python issue 7511
+ if "'path'" in str(sys.exc_info()[1]): # works with Python 2 and 3
+ raise BuildFailed()
+ raise
+
+
+def run_setup(with_binary):
+ setup(
+ name="MarkupSafe",
+ cmdclass={"build_ext": ve_build_ext},
+ ext_modules=ext_modules if with_binary else [],
+ )
+
+
+def show_message(*lines):
+ print("=" * 74)
+ for line in lines:
+ print(line)
+ print("=" * 74)
+
+
+supports_speedups = platform.python_implementation() not in {"PyPy", "Jython"}
+
+if os.environ.get("CIBUILDWHEEL", "0") == "1" and supports_speedups:
+ run_setup(True)
+elif supports_speedups:
+ try:
+ run_setup(True)
+ except BuildFailed:
+ show_message(
+ "WARNING: The C extension could not be compiled, speedups"
+ " are not enabled.",
+ "Failure information, if any, is above.",
+ "Retrying the build without the C extension now.",
+ )
+ run_setup(False)
+ show_message(
+ "WARNING: The C extension could not be compiled, speedups"
+ " are not enabled.",
+ "Plain-Python build succeeded.",
+ )
+else:
+ run_setup(False)
+ show_message(
+ "WARNING: C extensions are not supported on this Python"
+ " platform, speedups are not enabled.",
+ "Plain-Python build succeeded.",
+ )
diff --git a/third_party/python/MarkupSafe/src/MarkupSafe.egg-info/PKG-INFO b/third_party/python/MarkupSafe/src/MarkupSafe.egg-info/PKG-INFO
new file mode 100644
index 0000000000..bd2f99cba8
--- /dev/null
+++ b/third_party/python/MarkupSafe/src/MarkupSafe.egg-info/PKG-INFO
@@ -0,0 +1,98 @@
+Metadata-Version: 2.1
+Name: MarkupSafe
+Version: 2.0.1
+Summary: Safely add untrusted strings to HTML/XML markup.
+Home-page: https://palletsprojects.com/p/markupsafe/
+Author: Armin Ronacher
+Author-email: armin.ronacher@active-4.com
+Maintainer: Pallets
+Maintainer-email: contact@palletsprojects.com
+License: BSD-3-Clause
+Project-URL: Donate, https://palletsprojects.com/donate
+Project-URL: Documentation, https://markupsafe.palletsprojects.com/
+Project-URL: Changes, https://markupsafe.palletsprojects.com/changes/
+Project-URL: Source Code, https://github.com/pallets/markupsafe/
+Project-URL: Issue Tracker, https://github.com/pallets/markupsafe/issues/
+Project-URL: Twitter, https://twitter.com/PalletsTeam
+Project-URL: Chat, https://discord.gg/pallets
+Description: MarkupSafe
+ ==========
+
+ MarkupSafe implements a text object that escapes characters so it is
+ safe to use in HTML and XML. Characters that have special meanings are
+ replaced so that they display as the actual characters. This mitigates
+ injection attacks, meaning untrusted user input can safely be displayed
+ on a page.
+
+
+ Installing
+ ----------
+
+ Install and update using `pip`_:
+
+ .. code-block:: text
+
+ pip install -U MarkupSafe
+
+ .. _pip: https://pip.pypa.io/en/stable/quickstart/
+
+
+ Examples
+ --------
+
+ .. code-block:: pycon
+
+ >>> from markupsafe import Markup, escape
+
+ >>> # escape replaces special characters and wraps in Markup
+ >>> escape("<script>alert(document.cookie);</script>")
+ Markup('&lt;script&gt;alert(document.cookie);&lt;/script&gt;')
+
+ >>> # wrap in Markup to mark text "safe" and prevent escaping
+ >>> Markup("<strong>Hello</strong>")
+ Markup('<strong>hello</strong>')
+
+ >>> escape(Markup("<strong>Hello</strong>"))
+ Markup('<strong>hello</strong>')
+
+ >>> # Markup is a str subclass
+ >>> # methods and operators escape their arguments
+ >>> template = Markup("Hello <em>{name}</em>")
+ >>> template.format(name='"World"')
+ Markup('Hello <em>&#34;World&#34;</em>')
+
+
+ Donate
+ ------
+
+ The Pallets organization develops and supports MarkupSafe and other
+ popular packages. In order to grow the community of contributors and
+ users, and allow the maintainers to devote more time to the projects,
+ `please donate today`_.
+
+ .. _please donate today: https://palletsprojects.com/donate
+
+
+ Links
+ -----
+
+ - Documentation: https://markupsafe.palletsprojects.com/
+ - Changes: https://markupsafe.palletsprojects.com/changes/
+ - PyPI Releases: https://pypi.org/project/MarkupSafe/
+ - Source Code: https://github.com/pallets/markupsafe/
+ - Issue Tracker: https://github.com/pallets/markupsafe/issues/
+ - Website: https://palletsprojects.com/p/markupsafe/
+ - Twitter: https://twitter.com/PalletsTeam
+ - Chat: https://discord.gg/pallets
+
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Text Processing :: Markup :: HTML
+Requires-Python: >=3.6
+Description-Content-Type: text/x-rst
diff --git a/third_party/python/MarkupSafe/src/MarkupSafe.egg-info/SOURCES.txt b/third_party/python/MarkupSafe/src/MarkupSafe.egg-info/SOURCES.txt
new file mode 100644
index 0000000000..3f8c1a146d
--- /dev/null
+++ b/third_party/python/MarkupSafe/src/MarkupSafe.egg-info/SOURCES.txt
@@ -0,0 +1,34 @@
+CHANGES.rst
+LICENSE.rst
+MANIFEST.in
+README.rst
+setup.cfg
+setup.py
+tox.ini
+docs/Makefile
+docs/changes.rst
+docs/conf.py
+docs/escaping.rst
+docs/formatting.rst
+docs/html.rst
+docs/index.rst
+docs/license.rst
+docs/make.bat
+requirements/dev.txt
+requirements/docs.txt
+requirements/tests.txt
+requirements/typing.txt
+src/MarkupSafe.egg-info/PKG-INFO
+src/MarkupSafe.egg-info/SOURCES.txt
+src/MarkupSafe.egg-info/dependency_links.txt
+src/MarkupSafe.egg-info/top_level.txt
+src/markupsafe/__init__.py
+src/markupsafe/_native.py
+src/markupsafe/_speedups.c
+src/markupsafe/_speedups.pyi
+src/markupsafe/py.typed
+tests/conftest.py
+tests/test_escape.py
+tests/test_exception_custom_html.py
+tests/test_leak.py
+tests/test_markupsafe.py \ No newline at end of file
diff --git a/third_party/python/MarkupSafe/src/MarkupSafe.egg-info/dependency_links.txt b/third_party/python/MarkupSafe/src/MarkupSafe.egg-info/dependency_links.txt
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/third_party/python/MarkupSafe/src/MarkupSafe.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/third_party/python/MarkupSafe/src/MarkupSafe.egg-info/top_level.txt b/third_party/python/MarkupSafe/src/MarkupSafe.egg-info/top_level.txt
new file mode 100644
index 0000000000..75bf729258
--- /dev/null
+++ b/third_party/python/MarkupSafe/src/MarkupSafe.egg-info/top_level.txt
@@ -0,0 +1 @@
+markupsafe
diff --git a/third_party/python/MarkupSafe/src/markupsafe/__init__.py b/third_party/python/MarkupSafe/src/markupsafe/__init__.py
new file mode 100644
index 0000000000..d331ac3622
--- /dev/null
+++ b/third_party/python/MarkupSafe/src/markupsafe/__init__.py
@@ -0,0 +1,288 @@
+import functools
+import re
+import string
+import typing as t
+
+if t.TYPE_CHECKING:
+ import typing_extensions as te
+
+ class HasHTML(te.Protocol):
+ def __html__(self) -> str:
+ pass
+
+
+__version__ = "2.0.1"
+
+_striptags_re = re.compile(r"(<!--.*?-->|<[^>]*>)")
+
+
+def _simple_escaping_wrapper(name: str) -> t.Callable[..., "Markup"]:
+ orig = getattr(str, name)
+
+ @functools.wraps(orig)
+ def wrapped(self: "Markup", *args: t.Any, **kwargs: t.Any) -> "Markup":
+ args = _escape_argspec(list(args), enumerate(args), self.escape) # type: ignore
+ _escape_argspec(kwargs, kwargs.items(), self.escape)
+ return self.__class__(orig(self, *args, **kwargs))
+
+ return wrapped
+
+
+class Markup(str):
+ """A string that is ready to be safely inserted into an HTML or XML
+ document, either because it was escaped or because it was marked
+ safe.
+
+ Passing an object to the constructor converts it to text and wraps
+ it to mark it safe without escaping. To escape the text, use the
+ :meth:`escape` class method instead.
+
+ >>> Markup("Hello, <em>World</em>!")
+ Markup('Hello, <em>World</em>!')
+ >>> Markup(42)
+ Markup('42')
+ >>> Markup.escape("Hello, <em>World</em>!")
+ Markup('Hello &lt;em&gt;World&lt;/em&gt;!')
+
+ This implements the ``__html__()`` interface that some frameworks
+ use. Passing an object that implements ``__html__()`` will wrap the
+ output of that method, marking it safe.
+
+ >>> class Foo:
+ ... def __html__(self):
+ ... return '<a href="/foo">foo</a>'
+ ...
+ >>> Markup(Foo())
+ Markup('<a href="/foo">foo</a>')
+
+ This is a subclass of :class:`str`. It has the same methods, but
+ escapes their arguments and returns a ``Markup`` instance.
+
+ >>> Markup("<em>%s</em>") % ("foo & bar",)
+ Markup('<em>foo &amp; bar</em>')
+ >>> Markup("<em>Hello</em> ") + "<foo>"
+ Markup('<em>Hello</em> &lt;foo&gt;')
+ """
+
+ __slots__ = ()
+
+ def __new__(
+ cls, base: t.Any = "", encoding: t.Optional[str] = None, errors: str = "strict"
+ ) -> "Markup":
+ if hasattr(base, "__html__"):
+ base = base.__html__()
+
+ if encoding is None:
+ return super().__new__(cls, base)
+
+ return super().__new__(cls, base, encoding, errors)
+
+ def __html__(self) -> "Markup":
+ return self
+
+ def __add__(self, other: t.Union[str, "HasHTML"]) -> "Markup":
+ if isinstance(other, str) or hasattr(other, "__html__"):
+ return self.__class__(super().__add__(self.escape(other)))
+
+ return NotImplemented
+
+ def __radd__(self, other: t.Union[str, "HasHTML"]) -> "Markup":
+ if isinstance(other, str) or hasattr(other, "__html__"):
+ return self.escape(other).__add__(self)
+
+ return NotImplemented
+
+ def __mul__(self, num: int) -> "Markup":
+ if isinstance(num, int):
+ return self.__class__(super().__mul__(num))
+
+ return NotImplemented # type: ignore
+
+ __rmul__ = __mul__
+
+ def __mod__(self, arg: t.Any) -> "Markup":
+ if isinstance(arg, tuple):
+ arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
+ else:
+ arg = _MarkupEscapeHelper(arg, self.escape)
+
+ return self.__class__(super().__mod__(arg))
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}({super().__repr__()})"
+
+ def join(self, seq: t.Iterable[t.Union[str, "HasHTML"]]) -> "Markup":
+ return self.__class__(super().join(map(self.escape, seq)))
+
+ join.__doc__ = str.join.__doc__
+
+ def split( # type: ignore
+ self, sep: t.Optional[str] = None, maxsplit: int = -1
+ ) -> t.List["Markup"]:
+ return [self.__class__(v) for v in super().split(sep, maxsplit)]
+
+ split.__doc__ = str.split.__doc__
+
+ def rsplit( # type: ignore
+ self, sep: t.Optional[str] = None, maxsplit: int = -1
+ ) -> t.List["Markup"]:
+ return [self.__class__(v) for v in super().rsplit(sep, maxsplit)]
+
+ rsplit.__doc__ = str.rsplit.__doc__
+
+ def splitlines(self, keepends: bool = False) -> t.List["Markup"]: # type: ignore
+ return [self.__class__(v) for v in super().splitlines(keepends)]
+
+ splitlines.__doc__ = str.splitlines.__doc__
+
+ def unescape(self) -> str:
+ """Convert escaped markup back into a text string. This replaces
+ HTML entities with the characters they represent.
+
+ >>> Markup("Main &raquo; <em>About</em>").unescape()
+ 'Main » <em>About</em>'
+ """
+ from html import unescape
+
+ return unescape(str(self))
+
+ def striptags(self) -> str:
+ """:meth:`unescape` the markup, remove tags, and normalize
+ whitespace to single spaces.
+
+ >>> Markup("Main &raquo;\t<em>About</em>").striptags()
+ 'Main » About'
+ """
+ stripped = " ".join(_striptags_re.sub("", self).split())
+ return Markup(stripped).unescape()
+
+ @classmethod
+ def escape(cls, s: t.Any) -> "Markup":
+ """Escape a string. Calls :func:`escape` and ensures that for
+ subclasses the correct type is returned.
+ """
+ rv = escape(s)
+
+ if rv.__class__ is not cls:
+ return cls(rv)
+
+ return rv
+
+ for method in (
+ "__getitem__",
+ "capitalize",
+ "title",
+ "lower",
+ "upper",
+ "replace",
+ "ljust",
+ "rjust",
+ "lstrip",
+ "rstrip",
+ "center",
+ "strip",
+ "translate",
+ "expandtabs",
+ "swapcase",
+ "zfill",
+ ):
+ locals()[method] = _simple_escaping_wrapper(method)
+
+ del method
+
+ def partition(self, sep: str) -> t.Tuple["Markup", "Markup", "Markup"]:
+ l, s, r = super().partition(self.escape(sep))
+ cls = self.__class__
+ return cls(l), cls(s), cls(r)
+
+ def rpartition(self, sep: str) -> t.Tuple["Markup", "Markup", "Markup"]:
+ l, s, r = super().rpartition(self.escape(sep))
+ cls = self.__class__
+ return cls(l), cls(s), cls(r)
+
+ def format(self, *args: t.Any, **kwargs: t.Any) -> "Markup":
+ formatter = EscapeFormatter(self.escape)
+ return self.__class__(formatter.vformat(self, args, kwargs))
+
+ def __html_format__(self, format_spec: str) -> "Markup":
+ if format_spec:
+ raise ValueError("Unsupported format specification for Markup.")
+
+ return self
+
+
+class EscapeFormatter(string.Formatter):
+ __slots__ = ("escape",)
+
+ def __init__(self, escape: t.Callable[[t.Any], Markup]) -> None:
+ self.escape = escape
+ super().__init__()
+
+ def format_field(self, value: t.Any, format_spec: str) -> str:
+ if hasattr(value, "__html_format__"):
+ rv = value.__html_format__(format_spec)
+ elif hasattr(value, "__html__"):
+ if format_spec:
+ raise ValueError(
+ f"Format specifier {format_spec} given, but {type(value)} does not"
+ " define __html_format__. A class that defines __html__ must define"
+ " __html_format__ to work with format specifiers."
+ )
+ rv = value.__html__()
+ else:
+ # We need to make sure the format spec is str here as
+ # otherwise the wrong callback methods are invoked.
+ rv = string.Formatter.format_field(self, value, str(format_spec))
+ return str(self.escape(rv))
+
+
+_ListOrDict = t.TypeVar("_ListOrDict", list, dict)
+
+
+def _escape_argspec(
+ obj: _ListOrDict, iterable: t.Iterable[t.Any], escape: t.Callable[[t.Any], Markup]
+) -> _ListOrDict:
+ """Helper for various string-wrapped functions."""
+ for key, value in iterable:
+ if isinstance(value, str) or hasattr(value, "__html__"):
+ obj[key] = escape(value)
+
+ return obj
+
+
+class _MarkupEscapeHelper:
+ """Helper for :meth:`Markup.__mod__`."""
+
+ __slots__ = ("obj", "escape")
+
+ def __init__(self, obj: t.Any, escape: t.Callable[[t.Any], Markup]) -> None:
+ self.obj = obj
+ self.escape = escape
+
+ def __getitem__(self, item: t.Any) -> "_MarkupEscapeHelper":
+ return _MarkupEscapeHelper(self.obj[item], self.escape)
+
+ def __str__(self) -> str:
+ return str(self.escape(self.obj))
+
+ def __repr__(self) -> str:
+ return str(self.escape(repr(self.obj)))
+
+ def __int__(self) -> int:
+ return int(self.obj)
+
+ def __float__(self) -> float:
+ return float(self.obj)
+
+
+# circular import
+try:
+ from ._speedups import escape as escape
+ from ._speedups import escape_silent as escape_silent
+ from ._speedups import soft_str as soft_str
+ from ._speedups import soft_unicode
+except ImportError:
+ from ._native import escape as escape
+ from ._native import escape_silent as escape_silent # noqa: F401
+ from ._native import soft_str as soft_str # noqa: F401
+ from ._native import soft_unicode # noqa: F401
diff --git a/third_party/python/MarkupSafe/src/markupsafe/_native.py b/third_party/python/MarkupSafe/src/markupsafe/_native.py
new file mode 100644
index 0000000000..6f7eb7a8cb
--- /dev/null
+++ b/third_party/python/MarkupSafe/src/markupsafe/_native.py
@@ -0,0 +1,75 @@
+import typing as t
+
+from . import Markup
+
+
+def escape(s: t.Any) -> Markup:
+ """Replace the characters ``&``, ``<``, ``>``, ``'``, and ``"`` in
+ the string with HTML-safe sequences. Use this if you need to display
+ text that might contain such characters in HTML.
+
+ If the object has an ``__html__`` method, it is called and the
+ return value is assumed to already be safe for HTML.
+
+ :param s: An object to be converted to a string and escaped.
+ :return: A :class:`Markup` string with the escaped text.
+ """
+ if hasattr(s, "__html__"):
+ return Markup(s.__html__())
+
+ return Markup(
+ str(s)
+ .replace("&", "&amp;")
+ .replace(">", "&gt;")
+ .replace("<", "&lt;")
+ .replace("'", "&#39;")
+ .replace('"', "&#34;")
+ )
+
+
+def escape_silent(s: t.Optional[t.Any]) -> Markup:
+ """Like :func:`escape` but treats ``None`` as the empty string.
+ Useful with optional values, as otherwise you get the string
+ ``'None'`` when the value is ``None``.
+
+ >>> escape(None)
+ Markup('None')
+ >>> escape_silent(None)
+ Markup('')
+ """
+ if s is None:
+ return Markup()
+
+ return escape(s)
+
+
+def soft_str(s: t.Any) -> str:
+ """Convert an object to a string if it isn't already. This preserves
+ a :class:`Markup` string rather than converting it back to a basic
+ string, so it will still be marked as safe and won't be escaped
+ again.
+
+ >>> value = escape("<User 1>")
+ >>> value
+ Markup('&lt;User 1&gt;')
+ >>> escape(str(value))
+ Markup('&amp;lt;User 1&amp;gt;')
+ >>> escape(soft_str(value))
+ Markup('&lt;User 1&gt;')
+ """
+ if not isinstance(s, str):
+ return str(s)
+
+ return s
+
+
+def soft_unicode(s: t.Any) -> str:
+ import warnings
+
+ warnings.warn(
+ "'soft_unicode' has been renamed to 'soft_str'. The old name"
+ " will be removed in MarkupSafe 2.1.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return soft_str(s)
diff --git a/third_party/python/MarkupSafe/src/markupsafe/_speedups.c b/third_party/python/MarkupSafe/src/markupsafe/_speedups.c
new file mode 100644
index 0000000000..44967b1fdc
--- /dev/null
+++ b/third_party/python/MarkupSafe/src/markupsafe/_speedups.c
@@ -0,0 +1,339 @@
+#include <Python.h>
+
+static PyObject* markup;
+
+static int
+init_constants(void)
+{
+ PyObject *module;
+
+ /* import markup type so that we can mark the return value */
+ module = PyImport_ImportModule("markupsafe");
+ if (!module)
+ return 0;
+ markup = PyObject_GetAttrString(module, "Markup");
+ Py_DECREF(module);
+
+ return 1;
+}
+
+#define GET_DELTA(inp, inp_end, delta) \
+ while (inp < inp_end) { \
+ switch (*inp++) { \
+ case '"': \
+ case '\'': \
+ case '&': \
+ delta += 4; \
+ break; \
+ case '<': \
+ case '>': \
+ delta += 3; \
+ break; \
+ } \
+ }
+
+#define DO_ESCAPE(inp, inp_end, outp) \
+ { \
+ Py_ssize_t ncopy = 0; \
+ while (inp < inp_end) { \
+ switch (*inp) { \
+ case '"': \
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+ outp += ncopy; ncopy = 0; \
+ *outp++ = '&'; \
+ *outp++ = '#'; \
+ *outp++ = '3'; \
+ *outp++ = '4'; \
+ *outp++ = ';'; \
+ break; \
+ case '\'': \
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+ outp += ncopy; ncopy = 0; \
+ *outp++ = '&'; \
+ *outp++ = '#'; \
+ *outp++ = '3'; \
+ *outp++ = '9'; \
+ *outp++ = ';'; \
+ break; \
+ case '&': \
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+ outp += ncopy; ncopy = 0; \
+ *outp++ = '&'; \
+ *outp++ = 'a'; \
+ *outp++ = 'm'; \
+ *outp++ = 'p'; \
+ *outp++ = ';'; \
+ break; \
+ case '<': \
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+ outp += ncopy; ncopy = 0; \
+ *outp++ = '&'; \
+ *outp++ = 'l'; \
+ *outp++ = 't'; \
+ *outp++ = ';'; \
+ break; \
+ case '>': \
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+ outp += ncopy; ncopy = 0; \
+ *outp++ = '&'; \
+ *outp++ = 'g'; \
+ *outp++ = 't'; \
+ *outp++ = ';'; \
+ break; \
+ default: \
+ ncopy++; \
+ } \
+ inp++; \
+ } \
+ memcpy(outp, inp-ncopy, sizeof(*outp)*ncopy); \
+ }
+
+static PyObject*
+escape_unicode_kind1(PyUnicodeObject *in)
+{
+ Py_UCS1 *inp = PyUnicode_1BYTE_DATA(in);
+ Py_UCS1 *inp_end = inp + PyUnicode_GET_LENGTH(in);
+ Py_UCS1 *outp;
+ PyObject *out;
+ Py_ssize_t delta = 0;
+
+ GET_DELTA(inp, inp_end, delta);
+ if (!delta) {
+ Py_INCREF(in);
+ return (PyObject*)in;
+ }
+
+ out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta,
+ PyUnicode_IS_ASCII(in) ? 127 : 255);
+ if (!out)
+ return NULL;
+
+ inp = PyUnicode_1BYTE_DATA(in);
+ outp = PyUnicode_1BYTE_DATA(out);
+ DO_ESCAPE(inp, inp_end, outp);
+ return out;
+}
+
+static PyObject*
+escape_unicode_kind2(PyUnicodeObject *in)
+{
+ Py_UCS2 *inp = PyUnicode_2BYTE_DATA(in);
+ Py_UCS2 *inp_end = inp + PyUnicode_GET_LENGTH(in);
+ Py_UCS2 *outp;
+ PyObject *out;
+ Py_ssize_t delta = 0;
+
+ GET_DELTA(inp, inp_end, delta);
+ if (!delta) {
+ Py_INCREF(in);
+ return (PyObject*)in;
+ }
+
+ out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, 65535);
+ if (!out)
+ return NULL;
+
+ inp = PyUnicode_2BYTE_DATA(in);
+ outp = PyUnicode_2BYTE_DATA(out);
+ DO_ESCAPE(inp, inp_end, outp);
+ return out;
+}
+
+
+static PyObject*
+escape_unicode_kind4(PyUnicodeObject *in)
+{
+ Py_UCS4 *inp = PyUnicode_4BYTE_DATA(in);
+ Py_UCS4 *inp_end = inp + PyUnicode_GET_LENGTH(in);
+ Py_UCS4 *outp;
+ PyObject *out;
+ Py_ssize_t delta = 0;
+
+ GET_DELTA(inp, inp_end, delta);
+ if (!delta) {
+ Py_INCREF(in);
+ return (PyObject*)in;
+ }
+
+ out = PyUnicode_New(PyUnicode_GET_LENGTH(in) + delta, 1114111);
+ if (!out)
+ return NULL;
+
+ inp = PyUnicode_4BYTE_DATA(in);
+ outp = PyUnicode_4BYTE_DATA(out);
+ DO_ESCAPE(inp, inp_end, outp);
+ return out;
+}
+
+static PyObject*
+escape_unicode(PyUnicodeObject *in)
+{
+ if (PyUnicode_READY(in))
+ return NULL;
+
+ switch (PyUnicode_KIND(in)) {
+ case PyUnicode_1BYTE_KIND:
+ return escape_unicode_kind1(in);
+ case PyUnicode_2BYTE_KIND:
+ return escape_unicode_kind2(in);
+ case PyUnicode_4BYTE_KIND:
+ return escape_unicode_kind4(in);
+ }
+ assert(0); /* shouldn't happen */
+ return NULL;
+}
+
+static PyObject*
+escape(PyObject *self, PyObject *text)
+{
+ static PyObject *id_html;
+ PyObject *s = NULL, *rv = NULL, *html;
+
+ if (id_html == NULL) {
+ id_html = PyUnicode_InternFromString("__html__");
+ if (id_html == NULL) {
+ return NULL;
+ }
+ }
+
+ /* we don't have to escape integers, bools or floats */
+ if (PyLong_CheckExact(text) ||
+ PyFloat_CheckExact(text) || PyBool_Check(text) ||
+ text == Py_None)
+ return PyObject_CallFunctionObjArgs(markup, text, NULL);
+
+ /* if the object has an __html__ method that performs the escaping */
+ html = PyObject_GetAttr(text ,id_html);
+ if (html) {
+ s = PyObject_CallObject(html, NULL);
+ Py_DECREF(html);
+ if (s == NULL) {
+ return NULL;
+ }
+ /* Convert to Markup object */
+ rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL);
+ Py_DECREF(s);
+ return rv;
+ }
+
+ /* otherwise make the object unicode if it isn't, then escape */
+ PyErr_Clear();
+ if (!PyUnicode_Check(text)) {
+ PyObject *unicode = PyObject_Str(text);
+ if (!unicode)
+ return NULL;
+ s = escape_unicode((PyUnicodeObject*)unicode);
+ Py_DECREF(unicode);
+ }
+ else
+ s = escape_unicode((PyUnicodeObject*)text);
+
+ /* convert the unicode string into a markup object. */
+ rv = PyObject_CallFunctionObjArgs(markup, (PyObject*)s, NULL);
+ Py_DECREF(s);
+ return rv;
+}
+
+
+static PyObject*
+escape_silent(PyObject *self, PyObject *text)
+{
+ if (text != Py_None)
+ return escape(self, text);
+ return PyObject_CallFunctionObjArgs(markup, NULL);
+}
+
+
+static PyObject*
+soft_str(PyObject *self, PyObject *s)
+{
+ if (!PyUnicode_Check(s))
+ return PyObject_Str(s);
+ Py_INCREF(s);
+ return s;
+}
+
+
+static PyObject*
+soft_unicode(PyObject *self, PyObject *s)
+{
+ PyErr_WarnEx(
+ PyExc_DeprecationWarning,
+ "'soft_unicode' has been renamed to 'soft_str'. The old name"
+ " will be removed in MarkupSafe 2.1.",
+ 2
+ );
+ return soft_str(self, s);
+}
+
+
+static PyMethodDef module_methods[] = {
+ {
+ "escape",
+ (PyCFunction)escape,
+ METH_O,
+ "Replace the characters ``&``, ``<``, ``>``, ``'``, and ``\"`` in"
+ " the string with HTML-safe sequences. Use this if you need to display"
+ " text that might contain such characters in HTML.\n\n"
+ "If the object has an ``__html__`` method, it is called and the"
+ " return value is assumed to already be safe for HTML.\n\n"
+ ":param s: An object to be converted to a string and escaped.\n"
+ ":return: A :class:`Markup` string with the escaped text.\n"
+ },
+ {
+ "escape_silent",
+ (PyCFunction)escape_silent,
+ METH_O,
+ "Like :func:`escape` but treats ``None`` as the empty string."
+ " Useful with optional values, as otherwise you get the string"
+ " ``'None'`` when the value is ``None``.\n\n"
+ ">>> escape(None)\n"
+ "Markup('None')\n"
+ ">>> escape_silent(None)\n"
+ "Markup('')\n"
+ },
+ {
+ "soft_str",
+ (PyCFunction)soft_str,
+ METH_O,
+ "Convert an object to a string if it isn't already. This preserves"
+ " a :class:`Markup` string rather than converting it back to a basic"
+ " string, so it will still be marked as safe and won't be escaped"
+ " again.\n\n"
+ ">>> value = escape(\"<User 1>\")\n"
+ ">>> value\n"
+ "Markup('&lt;User 1&gt;')\n"
+ ">>> escape(str(value))\n"
+ "Markup('&amp;lt;User 1&amp;gt;')\n"
+ ">>> escape(soft_str(value))\n"
+ "Markup('&lt;User 1&gt;')\n"
+ },
+ {
+ "soft_unicode",
+ (PyCFunction)soft_unicode,
+ METH_O,
+ ""
+ },
+ {NULL, NULL, 0, NULL} /* Sentinel */
+};
+
+static struct PyModuleDef module_definition = {
+ PyModuleDef_HEAD_INIT,
+ "markupsafe._speedups",
+ NULL,
+ -1,
+ module_methods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+
+PyMODINIT_FUNC
+PyInit__speedups(void)
+{
+ if (!init_constants())
+ return NULL;
+
+ return PyModule_Create(&module_definition);
+}
diff --git a/third_party/python/MarkupSafe/src/markupsafe/_speedups.pyi b/third_party/python/MarkupSafe/src/markupsafe/_speedups.pyi
new file mode 100644
index 0000000000..f673240f6d
--- /dev/null
+++ b/third_party/python/MarkupSafe/src/markupsafe/_speedups.pyi
@@ -0,0 +1,9 @@
+from typing import Any
+from typing import Optional
+
+from . import Markup
+
+def escape(s: Any) -> Markup: ...
+def escape_silent(s: Optional[Any]) -> Markup: ...
+def soft_str(s: Any) -> str: ...
+def soft_unicode(s: Any) -> str: ...
diff --git a/third_party/python/MarkupSafe/src/markupsafe/py.typed b/third_party/python/MarkupSafe/src/markupsafe/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/MarkupSafe/src/markupsafe/py.typed
diff --git a/third_party/python/MarkupSafe/tox.ini b/third_party/python/MarkupSafe/tox.ini
new file mode 100644
index 0000000000..de68730f28
--- /dev/null
+++ b/third_party/python/MarkupSafe/tox.ini
@@ -0,0 +1,24 @@
+[tox]
+envlist =
+ py{39,38,37,36,py3}
+ style
+ typing
+ docs
+skip_missing_interpreters = true
+
+[testenv]
+deps = -r requirements/tests.txt
+commands = pytest -v --tb=short --basetemp={envtmpdir} {posargs}
+
+[testenv:style]
+deps = pre-commit
+skip_install = true
+commands = pre-commit run --all-files --show-diff-on-failure
+
+[testenv:typing]
+deps = -r requirements/typing.txt
+commands = mypy
+
+[testenv:docs]
+deps = -r requirements/docs.txt
+commands = sphinx-build -W -b html -d {envtmpdir}/doctrees docs {envtmpdir}/html
diff --git a/third_party/python/PyYAML/CHANGES b/third_party/python/PyYAML/CHANGES
new file mode 100644
index 0000000000..8d647a597c
--- /dev/null
+++ b/third_party/python/PyYAML/CHANGES
@@ -0,0 +1,254 @@
+
+For a complete changelog, see:
+
+* https://github.com/yaml/pyyaml/commits/
+* https://bitbucket.org/xi/pyyaml/commits/
+
+5.4.1 (2021-01-20)
+
+* https://github.com/yaml/pyyaml/pull/480 -- Fix stub compat with older pyyaml versions that may unwittingly load it
+
+5.4 (2021-01-19)
+
+* https://github.com/yaml/pyyaml/pull/407 -- Build modernization, remove distutils, fix metadata, build wheels, CI to GHA
+* https://github.com/yaml/pyyaml/pull/472 -- Fix for CVE-2020-14343, moves arbitrary python tags to UnsafeLoader
+* https://github.com/yaml/pyyaml/pull/441 -- Fix memory leak in implicit resolver setup
+* https://github.com/yaml/pyyaml/pull/392 -- Fix py2 copy support for timezone objects
+* https://github.com/yaml/pyyaml/pull/378 -- Fix compatibility with Jython
+
+5.3.1 (2020-03-18)
+
+* https://github.com/yaml/pyyaml/pull/386 -- Prevents arbitrary code execution during python/object/new constructor
+
+5.3 (2020-01-06)
+
+* https://github.com/yaml/pyyaml/pull/290 -- Use `is` instead of equality for comparing with `None`
+* https://github.com/yaml/pyyaml/pull/270 -- Fix typos and stylistic nit
+* https://github.com/yaml/pyyaml/pull/309 -- Fix up small typo
+* https://github.com/yaml/pyyaml/pull/161 -- Fix handling of __slots__
+* https://github.com/yaml/pyyaml/pull/358 -- Allow calling add_multi_constructor with None
+* https://github.com/yaml/pyyaml/pull/285 -- Add use of safe_load() function in README
+* https://github.com/yaml/pyyaml/pull/351 -- Fix reader for Unicode code points over 0xFFFF
+* https://github.com/yaml/pyyaml/pull/360 -- Enable certain unicode tests when maxunicode not > 0xffff
+* https://github.com/yaml/pyyaml/pull/359 -- Use full_load in yaml-highlight example
+* https://github.com/yaml/pyyaml/pull/244 -- Document that PyYAML is implemented with Cython
+* https://github.com/yaml/pyyaml/pull/329 -- Fix for Python 3.10
+* https://github.com/yaml/pyyaml/pull/310 -- Increase size of index, line, and column fields
+* https://github.com/yaml/pyyaml/pull/260 -- Remove some unused imports
+* https://github.com/yaml/pyyaml/pull/163 -- Create timezone-aware datetimes when parsed as such
+* https://github.com/yaml/pyyaml/pull/363 -- Add tests for timezone
+
+5.2 (2019-12-02)
+------------------
+
+* Repair incompatibilities introduced with 5.1. The default Loader was changed,
+ but several methods like add_constructor still used the old default
+ https://github.com/yaml/pyyaml/pull/279 -- A more flexible fix for custom tag constructors
+ https://github.com/yaml/pyyaml/pull/287 -- Change default loader for yaml.add_constructor
+ https://github.com/yaml/pyyaml/pull/305 -- Change default loader for add_implicit_resolver, add_path_resolver
+* Make FullLoader safer by removing python/object/apply from the default FullLoader
+ https://github.com/yaml/pyyaml/pull/347 -- Move constructor for object/apply to UnsafeConstructor
+* Fix bug introduced in 5.1 where quoting went wrong on systems with sys.maxunicode <= 0xffff
+ https://github.com/yaml/pyyaml/pull/276 -- Fix logic for quoting special characters
+* Other PRs:
+ https://github.com/yaml/pyyaml/pull/280 -- Update CHANGES for 5.1
+
+5.1.2 (2019-07-30)
+------------------
+
+* Re-release of 5.1 with regenerated Cython sources to build properly for Python 3.8b2+
+
+5.1.1 (2019-06-05)
+------------------
+
+* Re-release of 5.1 with regenerated Cython sources to build properly for Python 3.8b1
+
+5.1 (2019-03-13)
+----------------
+
+* https://github.com/yaml/pyyaml/pull/35 -- Some modernization of the test running
+* https://github.com/yaml/pyyaml/pull/42 -- Install tox in a virtualenv
+* https://github.com/yaml/pyyaml/pull/45 -- Allow colon in a plain scalar in a flow context
+* https://github.com/yaml/pyyaml/pull/48 -- Fix typos
+* https://github.com/yaml/pyyaml/pull/55 -- Improve RepresenterError creation
+* https://github.com/yaml/pyyaml/pull/59 -- Resolves #57, update readme issues link
+* https://github.com/yaml/pyyaml/pull/60 -- Document and test Python 3.6 support
+* https://github.com/yaml/pyyaml/pull/61 -- Use Travis CI built in pip cache support
+* https://github.com/yaml/pyyaml/pull/62 -- Remove tox workaround for Travis CI
+* https://github.com/yaml/pyyaml/pull/63 -- Adding support to Unicode characters over codepoint 0xffff
+* https://github.com/yaml/pyyaml/pull/75 -- add 3.12 changelog
+* https://github.com/yaml/pyyaml/pull/76 -- Fallback to Pure Python if Compilation fails
+* https://github.com/yaml/pyyaml/pull/84 -- Drop unsupported Python 3.3
+* https://github.com/yaml/pyyaml/pull/102 -- Include license file in the generated wheel package
+* https://github.com/yaml/pyyaml/pull/105 -- Removed Python 2.6 & 3.3 support
+* https://github.com/yaml/pyyaml/pull/111 -- Remove commented out Psyco code
+* https://github.com/yaml/pyyaml/pull/129 -- Remove call to `ord` in lib3 emitter code
+* https://github.com/yaml/pyyaml/pull/149 -- Test on Python 3.7-dev
+* https://github.com/yaml/pyyaml/pull/158 -- Support escaped slash in double quotes "\/"
+* https://github.com/yaml/pyyaml/pull/175 -- Updated link to pypi in release announcement
+* https://github.com/yaml/pyyaml/pull/181 -- Import Hashable from collections.abc
+* https://github.com/yaml/pyyaml/pull/194 -- Reverting https://github.com/yaml/pyyaml/pull/74
+* https://github.com/yaml/pyyaml/pull/195 -- Build libyaml on travis
+* https://github.com/yaml/pyyaml/pull/196 -- Force cython when building sdist
+* https://github.com/yaml/pyyaml/pull/254 -- Allow to turn off sorting keys in Dumper (2)
+* https://github.com/yaml/pyyaml/pull/256 -- Make default_flow_style=False
+* https://github.com/yaml/pyyaml/pull/257 -- Deprecate yaml.load and add FullLoader and UnsafeLoader classes
+* https://github.com/yaml/pyyaml/pull/261 -- Skip certain unicode tests when maxunicode not > 0xffff
+* https://github.com/yaml/pyyaml/pull/263 -- Windows Appveyor build
+
+3.13 (2018-07-05)
+-----------------
+
+* Resolved issues around PyYAML working in Python 3.7.
+
+3.12 (2016-08-28)
+-----------------
+
+* Wheel packages for Windows binaries.
+* Adding an implicit resolver to a derived loader should not affect the base loader.
+* Uniform representation for OrderedDict? across different versions of Python.
+* Fixed comparison to None warning.
+
+3.11 (2014-03-26)
+-----------------
+
+* Source and binary distributions are rebuilt against the latest
+ versions of Cython and LibYAML.
+
+3.10 (2011-05-30)
+-----------------
+
+* Do not try to build LibYAML bindings on platforms other than CPython
+ (Thank to olt(at)bogosoft(dot)com).
+* Clear cyclic references in the parser and the emitter
+ (Thank to kristjan(at)ccpgames(dot)com).
+* Dropped support for Python 2.3 and 2.4.
+
+3.09 (2009-08-31)
+-----------------
+
+* Fixed an obscure scanner error not reported when there is
+ no line break at the end of the stream (Thank to Ingy).
+* Fixed use of uninitialized memory when emitting anchors with
+ LibYAML bindings (Thank to cegner(at)yahoo-inc(dot)com).
+* Fixed emitting incorrect BOM characters for UTF-16 (Thank to
+ Valentin Nechayev)
+* Fixed the emitter for folded scalars not respecting the preferred
+ line width (Thank to Ingy).
+* Fixed a subtle ordering issue with emitting '%TAG' directives
+ (Thank to Andrey Somov).
+* Fixed performance regression with LibYAML bindings.
+
+
+3.08 (2008-12-31)
+-----------------
+
+* Python 3 support (Thank to Erick Tryzelaar).
+* Use Cython instead of Pyrex to build LibYAML bindings.
+* Refactored support for unicode and byte input/output streams.
+
+
+3.07 (2008-12-29)
+-----------------
+
+* The emitter learned to use an optional indentation indicator
+ for block scalar; thus scalars with leading whitespaces
+ could now be represented in a literal or folded style.
+* The test suite is now included in the source distribution.
+ To run the tests, type 'python setup.py test'.
+* Refactored the test suite: dropped unittest in favor of
+ a custom test appliance.
+* Fixed the path resolver in CDumper.
+* Forced an explicit document end indicator when there is
+ a possibility of parsing ambiguity.
+* More setup.py improvements: the package should be usable
+ when any combination of setuptools, Pyrex and LibYAML
+ is installed.
+* Windows binary packages are built against LibYAML-0.1.2.
+* Minor typos and corrections (Thank to Ingy dot Net
+ and Andrey Somov).
+
+
+3.06 (2008-10-03)
+-----------------
+
+* setup.py checks whether LibYAML is installed and if so, builds
+ and installs LibYAML bindings. To force or disable installation
+ of LibYAML bindings, use '--with-libyaml' or '--without-libyaml'
+ respectively.
+* The source distribution includes compiled Pyrex sources so
+ building LibYAML bindings no longer requires Pyrex installed.
+* 'yaml.load()' raises an exception if the input stream contains
+ more than one YAML document.
+* Fixed exceptions produced by LibYAML bindings.
+* Fixed a dot '.' character being recognized as !!float.
+* Fixed Python 2.3 compatibility issue in constructing !!timestamp values.
+* Windows binary packages are built against the LibYAML stable branch.
+* Added attributes 'yaml.__version__' and 'yaml.__with_libyaml__'.
+
+
+3.05 (2007-05-13)
+-----------------
+
+* Windows binary packages were built with LibYAML trunk.
+* Fixed a bug that prevent processing a live stream of YAML documents in
+ timely manner (Thanks edward(at)sweetbytes(dot)net).
+* Fixed a bug when the path in add_path_resolver contains boolean values
+ (Thanks jstroud(at)mbi(dot)ucla(dot)edu).
+* Fixed loss of microsecond precision in timestamps
+ (Thanks edemaine(at)mit(dot)edu).
+* Fixed loading an empty YAML stream.
+* Allowed immutable subclasses of YAMLObject.
+* Made the encoding of the unicode->str conversion explicit so that
+ the conversion does not depend on the default Python encoding.
+* Forced emitting float values in a YAML compatible form.
+
+
+3.04 (2006-08-20)
+-----------------
+
+* Include experimental LibYAML bindings.
+* Fully support recursive structures.
+* Sort dictionary keys. Mapping node values are now represented
+ as lists of pairs instead of dictionaries. No longer check
+ for duplicate mapping keys as it didn't work correctly anyway.
+* Fix invalid output of single-quoted scalars in cases when a single
+ quote is not escaped when preceded by whitespaces or line breaks.
+* To make porting easier, rewrite Parser not using generators.
+* Fix handling of unexpected block mapping values.
+* Fix a bug in Representer.represent_object: copy_reg.dispatch_table
+ was not correctly handled.
+* Fix a bug when a block scalar is incorrectly emitted in the simple
+ key context.
+* Hold references to the objects being represented.
+* Make Representer not try to guess !!pairs when a list is represented.
+* Fix timestamp constructing and representing.
+* Fix the 'N' plain scalar being incorrectly recognized as !!bool.
+
+
+3.03 (2006-06-19)
+-----------------
+
+* Fix Python 2.5 compatibility issues.
+* Fix numerous bugs in the float handling.
+* Fix scanning some ill-formed documents.
+* Other minor fixes.
+
+
+3.02 (2006-05-15)
+-----------------
+
+* Fix win32 installer. Apparently bdist_wininst does not work well
+ under Linux.
+* Fix a bug in add_path_resolver.
+* Add the yaml-highlight example. Try to run on a color terminal:
+ `python yaml_hl.py <any_document.yaml`.
+
+
+3.01 (2006-05-07)
+-----------------
+
+* Initial release. The version number reflects the codename
+ of the project (PyYAML 3000) and differentiates it from
+ the abandoned PyYaml module.
+
diff --git a/third_party/python/PyYAML/LICENSE b/third_party/python/PyYAML/LICENSE
new file mode 100644
index 0000000000..2f1b8e15e5
--- /dev/null
+++ b/third_party/python/PyYAML/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2017-2021 Ingy döt Net
+Copyright (c) 2006-2016 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/third_party/python/PyYAML/MANIFEST.in b/third_party/python/PyYAML/MANIFEST.in
new file mode 100644
index 0000000000..f4051a11f9
--- /dev/null
+++ b/third_party/python/PyYAML/MANIFEST.in
@@ -0,0 +1,10 @@
+include CHANGES README LICENSE Makefile pyproject.toml setup.py
+recursive-include lib/yaml *.py
+recursive-include lib/_yaml *.py
+recursive-include lib3/yaml *.py
+recursive-include lib3/_yaml *.py
+recursive-include examples *.py *.cfg *.yaml
+recursive-include tests/data *
+recursive-include tests/lib *.py
+recursive-include tests/lib3 *.py
+recursive-include yaml *
diff --git a/third_party/python/PyYAML/Makefile b/third_party/python/PyYAML/Makefile
new file mode 100644
index 0000000000..69efbdc7ea
--- /dev/null
+++ b/third_party/python/PyYAML/Makefile
@@ -0,0 +1,44 @@
+
+.PHONY: default build buildext force forceext install installext test testext dist clean
+
+PYTHON=/usr/bin/python
+TEST=
+PARAMETERS=
+
+build:
+ ${PYTHON} setup.py build ${PARAMETERS}
+
+buildext:
+ ${PYTHON} setup.py --with-libyaml build ${PARAMETERS}
+
+force:
+ ${PYTHON} setup.py build -f ${PARAMETERS}
+
+forceext:
+ ${PYTHON} setup.py --with-libyaml build -f ${PARAMETERS}
+
+install:
+ ${PYTHON} setup.py install ${PARAMETERS}
+
+installext:
+ ${PYTHON} setup.py --with-libyaml install ${PARAMETERS}
+
+test: build
+ ${PYTHON} tests/lib/test_build.py ${TEST}
+
+testext: buildext
+ ${PYTHON} tests/lib/test_build_ext.py ${TEST}
+
+testall:
+ ${PYTHON} setup.py test
+
+dist:
+ @# No longer uploading a zip file to pypi
+ @# ${PYTHON} setup.py --with-libyaml sdist --formats=zip,gztar
+ ${PYTHON} setup.py --with-libyaml sdist --formats=gztar
+
+windist:
+ ${PYTHON} setup.py --with-libyaml bdist_wininst
+
+clean:
+ ${PYTHON} setup.py --with-libyaml clean -a
diff --git a/third_party/python/PyYAML/PKG-INFO b/third_party/python/PyYAML/PKG-INFO
new file mode 100644
index 0000000000..04d0abf6e5
--- /dev/null
+++ b/third_party/python/PyYAML/PKG-INFO
@@ -0,0 +1,44 @@
+Metadata-Version: 1.2
+Name: PyYAML
+Version: 5.4.1
+Summary: YAML parser and emitter for Python
+Home-page: https://pyyaml.org/
+Author: Kirill Simonov
+Author-email: xi@resolvent.net
+License: MIT
+Download-URL: https://pypi.org/project/PyYAML/
+Project-URL: Bug Tracker, https://github.com/yaml/pyyaml/issues
+Project-URL: CI, https://github.com/yaml/pyyaml/actions
+Project-URL: Documentation, https://pyyaml.org/wiki/PyYAMLDocumentation
+Project-URL: Mailing lists, http://lists.sourceforge.net/lists/listinfo/yaml-core
+Project-URL: Source Code, https://github.com/yaml/pyyaml
+Description: YAML is a data serialization format designed for human readability
+ and interaction with scripting languages. PyYAML is a YAML parser
+ and emitter for Python.
+
+ PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
+ support, capable extension API, and sensible error messages. PyYAML
+ supports standard YAML tags and provides Python-specific tags that
+ allow to represent an arbitrary Python object.
+
+ PyYAML is applicable for a broad range of tasks from complex
+ configuration files to object serialization and persistence.
+Platform: Any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Cython
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*
diff --git a/third_party/python/PyYAML/README b/third_party/python/PyYAML/README
new file mode 100644
index 0000000000..49c87e7642
--- /dev/null
+++ b/third_party/python/PyYAML/README
@@ -0,0 +1,43 @@
+PyYAML - The next generation YAML parser and emitter for Python.
+
+To install, type 'python setup.py install'.
+
+By default, the setup.py script checks whether LibYAML is installed
+and if so, builds and installs LibYAML bindings. To skip the check
+and force installation of LibYAML bindings, use the option '--with-libyaml':
+'python setup.py --with-libyaml install'. To disable the check and
+skip building and installing LibYAML bindings, use '--without-libyaml':
+'python setup.py --without-libyaml install'.
+
+When LibYAML bindings are installed, you may use fast LibYAML-based
+parser and emitter as follows:
+
+ >>> yaml.load(stream, Loader=yaml.CLoader)
+ >>> yaml.dump(data, Dumper=yaml.CDumper)
+
+If you don't trust the input stream, you should use:
+
+ >>> yaml.safe_load(stream)
+
+PyYAML includes a comprehensive test suite. To run the tests,
+type 'python setup.py test'.
+
+For more information, check the PyYAML homepage:
+'https://github.com/yaml/pyyaml'.
+
+For PyYAML tutorial and reference, see:
+'http://pyyaml.org/wiki/PyYAMLDocumentation'.
+
+Discuss PyYAML with the maintainers in IRC #pyyaml irc.freenode.net.
+
+You may also use the YAML-Core mailing list:
+'http://lists.sourceforge.net/lists/listinfo/yaml-core'.
+
+Submit bug reports and feature requests to the PyYAML bug tracker:
+'https://github.com/yaml/pyyaml/issues'.
+
+The PyYAML module was written by Kirill Simonov <xi@resolvent.net>.
+It is currently maintained by the YAML and Python communities.
+
+PyYAML is released under the MIT license.
+See the file LICENSE for more details.
diff --git a/third_party/python/PyYAML/examples/pygments-lexer/example.yaml b/third_party/python/PyYAML/examples/pygments-lexer/example.yaml
new file mode 100644
index 0000000000..9c0ed9d082
--- /dev/null
+++ b/third_party/python/PyYAML/examples/pygments-lexer/example.yaml
@@ -0,0 +1,302 @@
+
+#
+# Examples from the Preview section of the YAML specification
+# (http://yaml.org/spec/1.2/#Preview)
+#
+
+# Sequence of scalars
+---
+- Mark McGwire
+- Sammy Sosa
+- Ken Griffey
+
+# Mapping scalars to scalars
+---
+hr: 65 # Home runs
+avg: 0.278 # Batting average
+rbi: 147 # Runs Batted In
+
+# Mapping scalars to sequences
+---
+american:
+ - Boston Red Sox
+ - Detroit Tigers
+ - New York Yankees
+national:
+ - New York Mets
+ - Chicago Cubs
+ - Atlanta Braves
+
+# Sequence of mappings
+---
+-
+ name: Mark McGwire
+ hr: 65
+ avg: 0.278
+-
+ name: Sammy Sosa
+ hr: 63
+ avg: 0.288
+
+# Sequence of sequences
+---
+- [name , hr, avg ]
+- [Mark McGwire, 65, 0.278]
+- [Sammy Sosa , 63, 0.288]
+
+# Mapping of mappings
+---
+Mark McGwire: {hr: 65, avg: 0.278}
+Sammy Sosa: {
+ hr: 63,
+ avg: 0.288
+ }
+
+# Two documents in a stream
+--- # Ranking of 1998 home runs
+- Mark McGwire
+- Sammy Sosa
+- Ken Griffey
+--- # Team ranking
+- Chicago Cubs
+- St Louis Cardinals
+
+# Documents with the end indicator
+---
+time: 20:03:20
+player: Sammy Sosa
+action: strike (miss)
+...
+---
+time: 20:03:47
+player: Sammy Sosa
+action: grand slam
+...
+
+# Comments
+---
+hr: # 1998 hr ranking
+ - Mark McGwire
+ - Sammy Sosa
+rbi:
+ # 1998 rbi ranking
+ - Sammy Sosa
+ - Ken Griffey
+
+# Anchors and aliases
+---
+hr:
+ - Mark McGwire
+ # Following node labeled SS
+ - &SS Sammy Sosa
+rbi:
+ - *SS # Subsequent occurrence
+ - Ken Griffey
+
+# Mapping between sequences
+---
+? - Detroit Tigers
+ - Chicago cubs
+:
+ - 2001-07-23
+? [ New York Yankees,
+ Atlanta Braves ]
+: [ 2001-07-02, 2001-08-12,
+ 2001-08-14 ]
+
+# Inline nested mapping
+---
+# products purchased
+- item : Super Hoop
+ quantity: 1
+- item : Basketball
+ quantity: 4
+- item : Big Shoes
+ quantity: 1
+
+# Literal scalars
+--- | # ASCII art
+ \//||\/||
+ // || ||__
+
+# Folded scalars
+--- >
+ Mark McGwire's
+ year was crippled
+ by a knee injury.
+
+# Preserved indented block in a folded scalar
+---
+>
+ Sammy Sosa completed another
+ fine season with great stats.
+
+ 63 Home Runs
+ 0.288 Batting Average
+
+ What a year!
+
+# Indentation determines scope
+---
+name: Mark McGwire
+accomplishment: >
+ Mark set a major league
+ home run record in 1998.
+stats: |
+ 65 Home Runs
+ 0.278 Batting Average
+
+# Quoted scalars
+---
+unicode: "Sosa did fine.\u263A"
+control: "\b1998\t1999\t2000\n"
+hex esc: "\x0d\x0a is \r\n"
+single: '"Howdy!" he cried.'
+quoted: ' # not a ''comment''.'
+tie-fighter: '|\-*-/|'
+
+# Multi-line flow scalars
+---
+plain:
+ This unquoted scalar
+ spans many lines.
+quoted: "So does this
+ quoted scalar.\n"
+
+# Integers
+---
+canonical: 12345
+decimal: +12_345
+sexagesimal: 3:25:45
+octal: 014
+hexadecimal: 0xC
+
+# Floating point
+---
+canonical: 1.23015e+3
+exponential: 12.3015e+02
+sexagesimal: 20:30.15
+fixed: 1_230.15
+negative infinity: -.inf
+not a number: .NaN
+
+# Miscellaneous
+---
+null: ~
+true: boolean
+false: boolean
+string: '12345'
+
+# Timestamps
+---
+canonical: 2001-12-15T02:59:43.1Z
+iso8601: 2001-12-14t21:59:43.10-05:00
+spaced: 2001-12-14 21:59:43.10 -5
+date: 2002-12-14
+
+# Various explicit tags
+---
+not-date: !!str 2002-04-28
+picture: !!binary |
+ R0lGODlhDAAMAIQAAP//9/X
+ 17unp5WZmZgAAAOfn515eXv
+ Pz7Y6OjuDg4J+fn5OTk6enp
+ 56enmleECcgggoBADs=
+application specific tag: !something |
+ The semantics of the tag
+ above may be different for
+ different documents.
+
+# Global tags
+%TAG ! tag:clarkevans.com,2002:
+--- !shape
+ # Use the ! handle for presenting
+ # tag:clarkevans.com,2002:circle
+- !circle
+ center: &ORIGIN {x: 73, y: 129}
+ radius: 7
+- !line
+ start: *ORIGIN
+ finish: { x: 89, y: 102 }
+- !label
+ start: *ORIGIN
+ color: 0xFFEEBB
+ text: Pretty vector drawing.
+
+# Unordered sets
+--- !!set
+# sets are represented as a
+# mapping where each key is
+# associated with the empty string
+? Mark McGwire
+? Sammy Sosa
+? Ken Griff
+
+# Ordered mappings
+--- !!omap
+# ordered maps are represented as
+# a sequence of mappings, with
+# each mapping having one key
+- Mark McGwire: 65
+- Sammy Sosa: 63
+- Ken Griffy: 58
+
+# Full length example
+--- !<tag:clarkevans.com,2002:invoice>
+invoice: 34843
+date : 2001-01-23
+bill-to: &id001
+ given : Chris
+ family : Dumars
+ address:
+ lines: |
+ 458 Walkman Dr.
+ Suite #292
+ city : Royal Oak
+ state : MI
+ postal : 48046
+ship-to: *id001
+product:
+ - sku : BL394D
+ quantity : 4
+ description : Basketball
+ price : 450.00
+ - sku : BL4438H
+ quantity : 1
+ description : Super Hoop
+ price : 2392.00
+tax : 251.42
+total: 4443.52
+comments:
+ Late afternoon is best.
+ Backup contact is Nancy
+ Billsmer @ 338-4338.
+
+# Another full-length example
+---
+Time: 2001-11-23 15:01:42 -5
+User: ed
+Warning:
+ This is an error message
+ for the log file
+---
+Time: 2001-11-23 15:02:31 -5
+User: ed
+Warning:
+ A slightly different error
+ message.
+---
+Date: 2001-11-23 15:03:17 -5
+User: ed
+Fatal:
+ Unknown variable "bar"
+Stack:
+ - file: TopClass.py
+ line: 23
+ code: |
+ x = MoreObject("345\n")
+ - file: MoreClass.py
+ line: 58
+ code: |-
+ foo = bar
+
diff --git a/third_party/python/PyYAML/examples/pygments-lexer/yaml.py b/third_party/python/PyYAML/examples/pygments-lexer/yaml.py
new file mode 100644
index 0000000000..1a1bbdeb3a
--- /dev/null
+++ b/third_party/python/PyYAML/examples/pygments-lexer/yaml.py
@@ -0,0 +1,431 @@
+
+"""
+yaml.py
+
+Lexer for YAML, a human-friendly data serialization language
+(http://yaml.org/).
+
+Written by Kirill Simonov <xi@resolvent.net>.
+
+License: Whatever suitable for inclusion into the Pygments package.
+"""
+
+from pygments.lexer import \
+ ExtendedRegexLexer, LexerContext, include, bygroups
+from pygments.token import \
+ Text, Comment, Punctuation, Name, Literal
+
+__all__ = ['YAMLLexer']
+
+
+class YAMLLexerContext(LexerContext):
+ """Indentation context for the YAML lexer."""
+
+ def __init__(self, *args, **kwds):
+ super(YAMLLexerContext, self).__init__(*args, **kwds)
+ self.indent_stack = []
+ self.indent = -1
+ self.next_indent = 0
+ self.block_scalar_indent = None
+
+
+def something(TokenClass):
+ """Do not produce empty tokens."""
+ def callback(lexer, match, context):
+ text = match.group()
+ if not text:
+ return
+ yield match.start(), TokenClass, text
+ context.pos = match.end()
+ return callback
+
+def reset_indent(TokenClass):
+ """Reset the indentation levels."""
+ def callback(lexer, match, context):
+ text = match.group()
+ context.indent_stack = []
+ context.indent = -1
+ context.next_indent = 0
+ context.block_scalar_indent = None
+ yield match.start(), TokenClass, text
+ context.pos = match.end()
+ return callback
+
+def save_indent(TokenClass, start=False):
+ """Save a possible indentation level."""
+ def callback(lexer, match, context):
+ text = match.group()
+ extra = ''
+ if start:
+ context.next_indent = len(text)
+ if context.next_indent < context.indent:
+ while context.next_indent < context.indent:
+ context.indent = context.indent_stack.pop()
+ if context.next_indent > context.indent:
+ extra = text[context.indent:]
+ text = text[:context.indent]
+ else:
+ context.next_indent += len(text)
+ if text:
+ yield match.start(), TokenClass, text
+ if extra:
+ yield match.start()+len(text), TokenClass.Error, extra
+ context.pos = match.end()
+ return callback
+
+def set_indent(TokenClass, implicit=False):
+ """Set the previously saved indentation level."""
+ def callback(lexer, match, context):
+ text = match.group()
+ if context.indent < context.next_indent:
+ context.indent_stack.append(context.indent)
+ context.indent = context.next_indent
+ if not implicit:
+ context.next_indent += len(text)
+ yield match.start(), TokenClass, text
+ context.pos = match.end()
+ return callback
+
+def set_block_scalar_indent(TokenClass):
+ """Set an explicit indentation level for a block scalar."""
+ def callback(lexer, match, context):
+ text = match.group()
+ context.block_scalar_indent = None
+ if not text:
+ return
+ increment = match.group(1)
+ if increment:
+ current_indent = max(context.indent, 0)
+ increment = int(increment)
+ context.block_scalar_indent = current_indent + increment
+ if text:
+ yield match.start(), TokenClass, text
+ context.pos = match.end()
+ return callback
+
+def parse_block_scalar_empty_line(IndentTokenClass, ContentTokenClass):
+ """Process an empty line in a block scalar."""
+ def callback(lexer, match, context):
+ text = match.group()
+ if (context.block_scalar_indent is None or
+ len(text) <= context.block_scalar_indent):
+ if text:
+ yield match.start(), IndentTokenClass, text
+ else:
+ indentation = text[:context.block_scalar_indent]
+ content = text[context.block_scalar_indent:]
+ yield match.start(), IndentTokenClass, indentation
+ yield (match.start()+context.block_scalar_indent,
+ ContentTokenClass, content)
+ context.pos = match.end()
+ return callback
+
+def parse_block_scalar_indent(TokenClass):
+ """Process indentation spaces in a block scalar."""
+ def callback(lexer, match, context):
+ text = match.group()
+ if context.block_scalar_indent is None:
+ if len(text) <= max(context.indent, 0):
+ context.stack.pop()
+ context.stack.pop()
+ return
+ context.block_scalar_indent = len(text)
+ else:
+ if len(text) < context.block_scalar_indent:
+ context.stack.pop()
+ context.stack.pop()
+ return
+ if text:
+ yield match.start(), TokenClass, text
+ context.pos = match.end()
+ return callback
+
+def parse_plain_scalar_indent(TokenClass):
+ """Process indentation spaces in a plain scalar."""
+ def callback(lexer, match, context):
+ text = match.group()
+ if len(text) <= context.indent:
+ context.stack.pop()
+ context.stack.pop()
+ return
+ if text:
+ yield match.start(), TokenClass, text
+ context.pos = match.end()
+ return callback
+
+
+class YAMLLexer(ExtendedRegexLexer):
+ """Lexer for the YAML language."""
+
+ name = 'YAML'
+ aliases = ['yaml']
+ filenames = ['*.yaml', '*.yml']
+ mimetypes = ['text/x-yaml']
+
+ tokens = {
+
+ # the root rules
+ 'root': [
+ # ignored whitespaces
+ (r'[ ]+(?=#|$)', Text.Blank),
+ # line breaks
+ (r'\n+', Text.Break),
+ # a comment
+ (r'#[^\n]*', Comment.Single),
+ # the '%YAML' directive
+ (r'^%YAML(?=[ ]|$)', reset_indent(Name.Directive),
+ 'yaml-directive'),
+ # the %TAG directive
+ (r'^%TAG(?=[ ]|$)', reset_indent(Name.Directive),
+ 'tag-directive'),
+ # document start and document end indicators
+ (r'^(?:---|\.\.\.)(?=[ ]|$)',
+ reset_indent(Punctuation.Document), 'block-line'),
+ # indentation spaces
+ (r'[ ]*(?![ \t\n\r\f\v]|$)',
+ save_indent(Text.Indent, start=True),
+ ('block-line', 'indentation')),
+ ],
+
+ # trailing whitespaces after directives or a block scalar indicator
+ 'ignored-line': [
+ # ignored whitespaces
+ (r'[ ]+(?=#|$)', Text.Blank),
+ # a comment
+ (r'#[^\n]*', Comment.Single),
+ # line break
+ (r'\n', Text.Break, '#pop:2'),
+ ],
+
+ # the %YAML directive
+ 'yaml-directive': [
+ # the version number
+ (r'([ ]+)([0-9]+\.[0-9]+)',
+ bygroups(Text.Blank, Literal.Version), 'ignored-line'),
+ ],
+
+ # the %YAG directive
+ 'tag-directive': [
+ # a tag handle and the corresponding prefix
+ (r'([ ]+)(!|![0-9A-Za-z_-]*!)'
+ r'([ ]+)(!|!?[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)',
+ bygroups(Text.Blank, Name.Type, Text.Blank, Name.Type),
+ 'ignored-line'),
+ ],
+
+ # block scalar indicators and indentation spaces
+ 'indentation': [
+ # trailing whitespaces are ignored
+ (r'[ ]*$', something(Text.Blank), '#pop:2'),
+ # whitespaces preceding block collection indicators
+ (r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text.Indent)),
+ # block collection indicators
+ (r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
+ # the beginning a block line
+ (r'[ ]*', save_indent(Text.Indent), '#pop'),
+ ],
+
+ # an indented line in the block context
+ 'block-line': [
+ # the line end
+ (r'[ ]*(?=#|$)', something(Text.Blank), '#pop'),
+ # whitespaces separating tokens
+ (r'[ ]+', Text.Blank),
+ # tags, anchors and aliases,
+ include('descriptors'),
+ # block collections and scalars
+ include('block-nodes'),
+ # flow collections and quoted scalars
+ include('flow-nodes'),
+ # a plain scalar
+ (r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`-]|[?:-][^ \t\n\r\f\v])',
+ something(Literal.Scalar.Plain),
+ 'plain-scalar-in-block-context'),
+ ],
+
+ # tags, anchors, aliases
+ 'descriptors' : [
+ # a full-form tag
+ (r'!<[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+>', Name.Type),
+ # a tag in the form '!', '!suffix' or '!handle!suffix'
+ (r'!(?:[0-9A-Za-z_-]+)?'
+ r'(?:![0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)?', Name.Type),
+ # an anchor
+ (r'&[0-9A-Za-z_-]+', Name.Anchor),
+ # an alias
+ (r'\*[0-9A-Za-z_-]+', Name.Alias),
+ ],
+
+ # block collections and scalars
+ 'block-nodes': [
+ # implicit key
+ (r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
+ # literal and folded scalars
+ (r'[|>]', Punctuation.Indicator,
+ ('block-scalar-content', 'block-scalar-header')),
+ ],
+
+ # flow collections and quoted scalars
+ 'flow-nodes': [
+ # a flow sequence
+ (r'\[', Punctuation.Indicator, 'flow-sequence'),
+ # a flow mapping
+ (r'\{', Punctuation.Indicator, 'flow-mapping'),
+ # a single-quoted scalar
+ (r'\'', Literal.Scalar.Flow.Quote, 'single-quoted-scalar'),
+ # a double-quoted scalar
+ (r'\"', Literal.Scalar.Flow.Quote, 'double-quoted-scalar'),
+ ],
+
+ # the content of a flow collection
+ 'flow-collection': [
+ # whitespaces
+ (r'[ ]+', Text.Blank),
+ # line breaks
+ (r'\n+', Text.Break),
+ # a comment
+ (r'#[^\n]*', Comment.Single),
+ # simple indicators
+ (r'[?:,]', Punctuation.Indicator),
+ # tags, anchors and aliases
+ include('descriptors'),
+ # nested collections and quoted scalars
+ include('flow-nodes'),
+ # a plain scalar
+ (r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`])',
+ something(Literal.Scalar.Plain),
+ 'plain-scalar-in-flow-context'),
+ ],
+
+ # a flow sequence indicated by '[' and ']'
+ 'flow-sequence': [
+ # include flow collection rules
+ include('flow-collection'),
+ # the closing indicator
+ (r'\]', Punctuation.Indicator, '#pop'),
+ ],
+
+ # a flow mapping indicated by '{' and '}'
+ 'flow-mapping': [
+ # include flow collection rules
+ include('flow-collection'),
+ # the closing indicator
+ (r'\}', Punctuation.Indicator, '#pop'),
+ ],
+
+ # block scalar lines
+ 'block-scalar-content': [
+ # line break
+ (r'\n', Text.Break),
+ # empty line
+ (r'^[ ]+$',
+ parse_block_scalar_empty_line(Text.Indent,
+ Literal.Scalar.Block)),
+ # indentation spaces (we may leave the state here)
+ (r'^[ ]*', parse_block_scalar_indent(Text.Indent)),
+ # line content
+ (r'[^\n\r\f\v]+', Literal.Scalar.Block),
+ ],
+
+ # the content of a literal or folded scalar
+ 'block-scalar-header': [
+ # indentation indicator followed by chomping flag
+ (r'([1-9])?[+-]?(?=[ ]|$)',
+ set_block_scalar_indent(Punctuation.Indicator),
+ 'ignored-line'),
+ # chomping flag followed by indentation indicator
+ (r'[+-]?([1-9])?(?=[ ]|$)',
+ set_block_scalar_indent(Punctuation.Indicator),
+ 'ignored-line'),
+ ],
+
+ # ignored and regular whitespaces in quoted scalars
+ 'quoted-scalar-whitespaces': [
+ # leading and trailing whitespaces are ignored
+ (r'^[ ]+|[ ]+$', Text.Blank),
+ # line breaks are ignored
+ (r'\n+', Text.Break),
+ # other whitespaces are a part of the value
+ (r'[ ]+', Literal.Scalar.Flow),
+ ],
+
+ # single-quoted scalars
+ 'single-quoted-scalar': [
+ # include whitespace and line break rules
+ include('quoted-scalar-whitespaces'),
+ # escaping of the quote character
+ (r'\'\'', Literal.Scalar.Flow.Escape),
+ # regular non-whitespace characters
+ (r'[^ \t\n\r\f\v\']+', Literal.Scalar.Flow),
+ # the closing quote
+ (r'\'', Literal.Scalar.Flow.Quote, '#pop'),
+ ],
+
+ # double-quoted scalars
+ 'double-quoted-scalar': [
+ # include whitespace and line break rules
+ include('quoted-scalar-whitespaces'),
+ # escaping of special characters
+ (r'\\[0abt\tn\nvfre "\\N_LP]', Literal.Scalar.Flow.Escape),
+ # escape codes
+ (r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
+ Literal.Scalar.Flow.Escape),
+ # regular non-whitespace characters
+ (r'[^ \t\n\r\f\v\"\\]+', Literal.Scalar.Flow),
+ # the closing quote
+ (r'"', Literal.Scalar.Flow.Quote, '#pop'),
+ ],
+
+ # the beginning of a new line while scanning a plain scalar
+ 'plain-scalar-in-block-context-new-line': [
+ # empty lines
+ (r'^[ ]+$', Text.Blank),
+ # line breaks
+ (r'\n+', Text.Break),
+ # document start and document end indicators
+ (r'^(?=---|\.\.\.)', something(Punctuation.Document), '#pop:3'),
+ # indentation spaces (we may leave the block line state here)
+ (r'^[ ]*', parse_plain_scalar_indent(Text.Indent), '#pop'),
+ ],
+
+ # a plain scalar in the block context
+ 'plain-scalar-in-block-context': [
+ # the scalar ends with the ':' indicator
+ (r'[ ]*(?=:[ ]|:$)', something(Text.Blank), '#pop'),
+ # the scalar ends with whitespaces followed by a comment
+ (r'[ ]+(?=#)', Text.Blank, '#pop'),
+ # trailing whitespaces are ignored
+ (r'[ ]+$', Text.Blank),
+ # line breaks are ignored
+ (r'\n+', Text.Break, 'plain-scalar-in-block-context-new-line'),
+ # other whitespaces are a part of the value
+ (r'[ ]+', Literal.Scalar.Plain),
+ # regular non-whitespace characters
+ (r'(?::(?![ \t\n\r\f\v])|[^ \t\n\r\f\v:])+',
+ Literal.Scalar.Plain),
+ ],
+
+ # a plain scalar is the flow context
+ 'plain-scalar-in-flow-context': [
+ # the scalar ends with an indicator character
+ (r'[ ]*(?=[,:?\[\]{}])', something(Text.Blank), '#pop'),
+ # the scalar ends with a comment
+ (r'[ ]+(?=#)', Text.Blank, '#pop'),
+ # leading and trailing whitespaces are ignored
+ (r'^[ ]+|[ ]+$', Text.Blank),
+ # line breaks are ignored
+ (r'\n+', Text.Break),
+ # other whitespaces are a part of the value
+ (r'[ ]+', Literal.Scalar.Plain),
+ # regular non-whitespace characters
+ (r'[^ \t\n\r\f\v,:?\[\]{}]+', Literal.Scalar.Plain),
+ ],
+
+ }
+
+ def get_tokens_unprocessed(self, text=None, context=None):
+ if context is None:
+ context = YAMLLexerContext(text, 0)
+ return super(YAMLLexer, self).get_tokens_unprocessed(text, context)
+
+
diff --git a/third_party/python/PyYAML/examples/yaml-highlight/yaml_hl.cfg b/third_party/python/PyYAML/examples/yaml-highlight/yaml_hl.cfg
new file mode 100644
index 0000000000..69bb847764
--- /dev/null
+++ b/third_party/python/PyYAML/examples/yaml-highlight/yaml_hl.cfg
@@ -0,0 +1,115 @@
+%YAML 1.1
+---
+
+ascii:
+
+ header: "\e[0;1;30;40m"
+
+ footer: "\e[0m"
+
+ tokens:
+ stream-start:
+ stream-end:
+ directive: { start: "\e[35m", end: "\e[0;1;30;40m" }
+ document-start: { start: "\e[35m", end: "\e[0;1;30;40m" }
+ document-end: { start: "\e[35m", end: "\e[0;1;30;40m" }
+ block-sequence-start:
+ block-mapping-start:
+ block-end:
+ flow-sequence-start: { start: "\e[33m", end: "\e[0;1;30;40m" }
+ flow-mapping-start: { start: "\e[33m", end: "\e[0;1;30;40m" }
+ flow-sequence-end: { start: "\e[33m", end: "\e[0;1;30;40m" }
+ flow-mapping-end: { start: "\e[33m", end: "\e[0;1;30;40m" }
+ key: { start: "\e[33m", end: "\e[0;1;30;40m" }
+ value: { start: "\e[33m", end: "\e[0;1;30;40m" }
+ block-entry: { start: "\e[33m", end: "\e[0;1;30;40m" }
+ flow-entry: { start: "\e[33m", end: "\e[0;1;30;40m" }
+ alias: { start: "\e[32m", end: "\e[0;1;30;40m" }
+ anchor: { start: "\e[32m", end: "\e[0;1;30;40m" }
+ tag: { start: "\e[32m", end: "\e[0;1;30;40m" }
+ scalar: { start: "\e[36m", end: "\e[0;1;30;40m" }
+
+ replaces:
+ - "\r\n": "\n"
+ - "\r": "\n"
+ - "\n": "\n"
+ - "\x85": "\n"
+ - "\u2028": "\n"
+ - "\u2029": "\n"
+
+html: &html
+
+ tokens:
+ stream-start:
+ stream-end:
+ directive: { start: <code class="directive_token">, end: </code> }
+ document-start: { start: <code class="document_start_token">, end: </code> }
+ document-end: { start: <code class="document_end_token">, end: </code> }
+ block-sequence-start:
+ block-mapping-start:
+ block-end:
+ flow-sequence-start: { start: <code class="delimiter_token">, end: </code> }
+ flow-mapping-start: { start: <code class="delimiter_token">, end: </code> }
+ flow-sequence-end: { start: <code class="delimiter_token">, end: </code> }
+ flow-mapping-end: { start: <code class="delimiter_token">, end: </code> }
+ key: { start: <code class="delimiter_token">, end: </code> }
+ value: { start: <code class="delimiter_token">, end: </code> }
+ block-entry: { start: <code class="delimiter_token">, end: </code> }
+ flow-entry: { start: <code class="delimiter_token">, end: </code> }
+ alias: { start: <code class="anchor_token">, end: </code> }
+ anchor: { start: <code class="anchor_token">, end: </code> }
+ tag: { start: <code class="tag_token">, end: </code> }
+ scalar: { start: <code class="scalar_token">, end: </code> }
+
+ events:
+ stream-start: { start: <pre class="yaml_stream"> }
+ stream-end: { end: </pre> }
+ document-start: { start: <span class="document"> }
+ document-end: { end: </span> }
+ sequence-start: { start: <span class="sequence"> }
+ sequence-end: { end: </span> }
+ mapping-start: { start: <span class="mapping"> }
+ mapping-end: { end: </span> }
+ scalar: { start: <span class="scalar">, end: </span> }
+
+ replaces:
+ - "\r\n": "\n"
+ - "\r": "\n"
+ - "\n": "\n"
+ - "\x85": "\n"
+ - "\u2028": "\n"
+ - "\u2029": "\n"
+ - "&": "&amp;"
+ - "<": "&lt;"
+ - ">": "&gt;"
+
+html-page:
+
+ header: |
+ <html>
+ <head>
+ <title>A YAML stream</title>
+ <style type="text/css">
+ .document { background: #FFF }
+ .sequence { background: #EEF }
+ .mapping { background: #EFE }
+ .scalar { background: #FEE }
+ .directive_token { color: #C0C }
+ .document_start_token { color: #C0C; font-weight: bold }
+ .document_end_token { color: #C0C; font-weight: bold }
+ .delimiter_token { color: #600; font-weight: bold }
+ .anchor_token { color: #090 }
+ .tag_token { color: #090 }
+ .scalar_token { color: #000 }
+ .yaml_stream { color: #999 }
+ </style>
+ <body>
+
+ footer: |
+ </body>
+ </html>
+
+ <<: *html
+
+
+# vim: ft=yaml
diff --git a/third_party/python/PyYAML/examples/yaml-highlight/yaml_hl.py b/third_party/python/PyYAML/examples/yaml-highlight/yaml_hl.py
new file mode 100755
index 0000000000..96e0ae7b1b
--- /dev/null
+++ b/third_party/python/PyYAML/examples/yaml-highlight/yaml_hl.py
@@ -0,0 +1,114 @@
+#!/usr/bin/python
+
+import yaml, codecs, sys, os.path, optparse
+
+class Style:
+
+ def __init__(self, header=None, footer=None,
+ tokens=None, events=None, replaces=None):
+ self.header = header
+ self.footer = footer
+ self.replaces = replaces
+ self.substitutions = {}
+ for domain, Class in [(tokens, 'Token'), (events, 'Event')]:
+ if not domain:
+ continue
+ for key in domain:
+ name = ''.join([part.capitalize() for part in key.split('-')])
+ cls = getattr(yaml, '%s%s' % (name, Class))
+ value = domain[key]
+ if not value:
+ continue
+ start = value.get('start')
+ end = value.get('end')
+ if start:
+ self.substitutions[cls, -1] = start
+ if end:
+ self.substitutions[cls, +1] = end
+
+ def __setstate__(self, state):
+ self.__init__(**state)
+
+yaml.add_path_resolver(u'tag:yaml.org,2002:python/object:__main__.Style',
+ [None], dict)
+yaml.add_path_resolver(u'tag:yaml.org,2002:pairs',
+ [None, u'replaces'], list)
+
+class YAMLHighlight:
+
+ def __init__(self, options):
+ config = yaml.full_load(file(options.config, 'rb').read())
+ self.style = config[options.style]
+ if options.input:
+ self.input = file(options.input, 'rb')
+ else:
+ self.input = sys.stdin
+ if options.output:
+ self.output = file(options.output, 'wb')
+ else:
+ self.output = sys.stdout
+
+ def highlight(self):
+ input = self.input.read()
+ if input.startswith(codecs.BOM_UTF16_LE):
+ input = unicode(input, 'utf-16-le')
+ elif input.startswith(codecs.BOM_UTF16_BE):
+ input = unicode(input, 'utf-16-be')
+ else:
+ input = unicode(input, 'utf-8')
+ substitutions = self.style.substitutions
+ tokens = yaml.scan(input)
+ events = yaml.parse(input)
+ markers = []
+ number = 0
+ for token in tokens:
+ number += 1
+ if token.start_mark.index != token.end_mark.index:
+ cls = token.__class__
+ if (cls, -1) in substitutions:
+ markers.append([token.start_mark.index, +2, number, substitutions[cls, -1]])
+ if (cls, +1) in substitutions:
+ markers.append([token.end_mark.index, -2, number, substitutions[cls, +1]])
+ number = 0
+ for event in events:
+ number += 1
+ cls = event.__class__
+ if (cls, -1) in substitutions:
+ markers.append([event.start_mark.index, +1, number, substitutions[cls, -1]])
+ if (cls, +1) in substitutions:
+ markers.append([event.end_mark.index, -1, number, substitutions[cls, +1]])
+ markers.sort()
+ markers.reverse()
+ chunks = []
+ position = len(input)
+ for index, weight1, weight2, substitution in markers:
+ if index < position:
+ chunk = input[index:position]
+ for substring, replacement in self.style.replaces:
+ chunk = chunk.replace(substring, replacement)
+ chunks.append(chunk)
+ position = index
+ chunks.append(substitution)
+ chunks.reverse()
+ result = u''.join(chunks)
+ if self.style.header:
+ self.output.write(self.style.header)
+ self.output.write(result.encode('utf-8'))
+ if self.style.footer:
+ self.output.write(self.style.footer)
+
+if __name__ == '__main__':
+ parser = optparse.OptionParser()
+ parser.add_option('-s', '--style', dest='style', default='ascii',
+ help="specify the highlighting style", metavar='STYLE')
+ parser.add_option('-c', '--config', dest='config',
+ default=os.path.join(os.path.dirname(sys.argv[0]), 'yaml_hl.cfg'),
+ help="set an alternative configuration file", metavar='CONFIG')
+ parser.add_option('-i', '--input', dest='input', default=None,
+ help="set the input file (default: stdin)", metavar='FILE')
+ parser.add_option('-o', '--output', dest='output', default=None,
+ help="set the output file (default: stdout)", metavar='FILE')
+ (options, args) = parser.parse_args()
+ hl = YAMLHighlight(options)
+ hl.highlight()
+
diff --git a/third_party/python/PyYAML/lib/_yaml/__init__.py b/third_party/python/PyYAML/lib/_yaml/__init__.py
new file mode 100644
index 0000000000..7baa8c4b68
--- /dev/null
+++ b/third_party/python/PyYAML/lib/_yaml/__init__.py
@@ -0,0 +1,33 @@
+# This is a stub package designed to roughly emulate the _yaml
+# extension module, which previously existed as a standalone module
+# and has been moved into the `yaml` package namespace.
+# It does not perfectly mimic its old counterpart, but should get
+# close enough for anyone who's relying on it even when they shouldn't.
+import yaml
+
+# in some circumstances, the yaml module we imoprted may be from a different version, so we need
+# to tread carefully when poking at it here (it may not have the attributes we expect)
+if not getattr(yaml, '__with_libyaml__', False):
+ from sys import version_info
+
+ exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError
+ raise exc("No module named '_yaml'")
+else:
+ from yaml._yaml import *
+ import warnings
+ warnings.warn(
+ 'The _yaml extension module is now located at yaml._yaml'
+ ' and its location is subject to change. To use the'
+ ' LibYAML-based parser and emitter, import from `yaml`:'
+ ' `from yaml import CLoader as Loader, CDumper as Dumper`.',
+ DeprecationWarning
+ )
+ del warnings
+ # Don't `del yaml` here because yaml is actually an existing
+ # namespace member of _yaml.
+
+__name__ = '_yaml'
+# If the module is top-level (i.e. not a part of any specific package)
+# then the attribute should be set to ''.
+# https://docs.python.org/3.8/library/types.html
+__package__ = ''
diff --git a/third_party/python/PyYAML/lib/yaml/__init__.py b/third_party/python/PyYAML/lib/yaml/__init__.py
new file mode 100644
index 0000000000..3c988198d5
--- /dev/null
+++ b/third_party/python/PyYAML/lib/yaml/__init__.py
@@ -0,0 +1,431 @@
+
+from error import *
+
+from tokens import *
+from events import *
+from nodes import *
+
+from loader import *
+from dumper import *
+
+__version__ = '5.4.1'
+
+try:
+ from cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
+
+#------------------------------------------------------------------------------
+# Warnings control
+#------------------------------------------------------------------------------
+
+# 'Global' warnings state:
+_warnings_enabled = {
+ 'YAMLLoadWarning': True,
+}
+
+# Get or set global warnings' state
+def warnings(settings=None):
+ if settings is None:
+ return _warnings_enabled
+
+ if type(settings) is dict:
+ for key in settings:
+ if key in _warnings_enabled:
+ _warnings_enabled[key] = settings[key]
+
+# Warn when load() is called without Loader=...
+class YAMLLoadWarning(RuntimeWarning):
+ pass
+
+def load_warning(method):
+ if _warnings_enabled['YAMLLoadWarning'] is False:
+ return
+
+ import warnings
+
+ message = (
+ "calling yaml.%s() without Loader=... is deprecated, as the "
+ "default Loader is unsafe. Please read "
+ "https://msg.pyyaml.org/load for full details."
+ ) % method
+
+ warnings.warn(message, YAMLLoadWarning, stacklevel=3)
+
+#------------------------------------------------------------------------------
+def scan(stream, Loader=Loader):
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
+
+def parse(stream, Loader=Loader):
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
+
+def compose(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
+
+def load(stream, Loader=None):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ if Loader is None:
+ load_warning('load')
+ Loader = FullLoader
+
+ loader = Loader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+def load_all(stream, Loader=None):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ if Loader is None:
+ load_warning('load_all')
+ Loader = FullLoader
+
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
+
+def full_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+
+ Resolve all tags except those known to be
+ unsafe on untrusted input.
+ """
+ return load(stream, FullLoader)
+
+def full_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+
+ Resolve all tags except those known to be
+ unsafe on untrusted input.
+ """
+ return load_all(stream, FullLoader)
+
+def safe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+
+ Resolve only basic YAML tags. This is known
+ to be safe for untrusted input.
+ """
+ return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+
+ Resolve only basic YAML tags. This is known
+ to be safe for untrusted input.
+ """
+ return load_all(stream, SafeLoader)
+
+def unsafe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+
+ Resolve all tags, even those known to be
+ unsafe on untrusted input.
+ """
+ return load(stream, UnsafeLoader)
+
+def unsafe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+
+ Resolve all tags, even those known to be
+ unsafe on untrusted input.
+ """
+ return load_all(stream, UnsafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ from StringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding='utf-8', explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
+ stream = StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end, sort_keys=sort_keys)
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+ Loader=None, Dumper=Dumper):
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ if Loader is None:
+ loader.Loader.add_implicit_resolver(tag, regexp, first)
+ loader.FullLoader.add_implicit_resolver(tag, regexp, first)
+ loader.UnsafeLoader.add_implicit_resolver(tag, regexp, first)
+ else:
+ Loader.add_implicit_resolver(tag, regexp, first)
+ Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=Dumper):
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ if Loader is None:
+ loader.Loader.add_path_resolver(tag, path, kind)
+ loader.FullLoader.add_path_resolver(tag, path, kind)
+ loader.UnsafeLoader.add_path_resolver(tag, path, kind)
+ else:
+ Loader.add_path_resolver(tag, path, kind)
+ Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=None):
+ """
+ Add a constructor for the given tag.
+ Constructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ if Loader is None:
+ loader.Loader.add_constructor(tag, constructor)
+ loader.FullLoader.add_constructor(tag, constructor)
+ loader.UnsafeLoader.add_constructor(tag, constructor)
+ else:
+ Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=None):
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ if Loader is None:
+ loader.Loader.add_multi_constructor(tag_prefix, multi_constructor)
+ loader.FullLoader.add_multi_constructor(tag_prefix, multi_constructor)
+ loader.UnsafeLoader.add_multi_constructor(tag_prefix, multi_constructor)
+ else:
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Multi-representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+ def __init__(cls, name, bases, kwds):
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ if isinstance(cls.yaml_loader, list):
+ for loader in cls.yaml_loader:
+ loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+ else:
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(object):
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __metaclass__ = YAMLObjectMetaclass
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_loader = [Loader, FullLoader, UnsafeLoader]
+ yaml_dumper = Dumper
+
+ yaml_tag = None
+ yaml_flow_style = None
+
+ def from_yaml(cls, loader, node):
+ """
+ Convert a representation node to a Python object.
+ """
+ return loader.construct_yaml_object(node, cls)
+ from_yaml = classmethod(from_yaml)
+
+ def to_yaml(cls, dumper, data):
+ """
+ Convert a Python object to a representation node.
+ """
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+ flow_style=cls.yaml_flow_style)
+ to_yaml = classmethod(to_yaml)
+
diff --git a/third_party/python/PyYAML/lib/yaml/composer.py b/third_party/python/PyYAML/lib/yaml/composer.py
new file mode 100644
index 0000000000..df85ef653b
--- /dev/null
+++ b/third_party/python/PyYAML/lib/yaml/composer.py
@@ -0,0 +1,139 @@
+
+__all__ = ['Composer', 'ComposerError']
+
+from error import MarkedYAMLError
+from events import *
+from nodes import *
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+class Composer(object):
+
+ def __init__(self):
+ self.anchors = {}
+
+ def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
+ # If there are more documents available?
+ return not self.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # Get the root node of the next document.
+ if not self.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
+ # Drop the DOCUMENT-START event.
+ self.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ if self.check_event(AliasEvent):
+ event = self.get_event()
+ anchor = event.anchor
+ if anchor not in self.anchors:
+ raise ComposerError(None, None, "found undefined alias %r"
+ % anchor.encode('utf-8'), event.start_mark)
+ return self.anchors[anchor]
+ event = self.peek_event()
+ anchor = event.anchor
+ if anchor is not None:
+ if anchor in self.anchors:
+ raise ComposerError("found duplicate anchor %r; first occurrence"
+ % anchor.encode('utf-8'), self.anchors[anchor].start_mark,
+ "second occurrence", event.start_mark)
+ self.descend_resolver(parent, index)
+ if self.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ event = self.get_event()
+ tag = event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(tag, event.value,
+ event.start_mark, event.end_mark, style=event.style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
+ def compose_mapping_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == u'!':
+ tag = self.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.check_event(MappingEndEvent):
+ #key_event = self.peek_event()
+ item_key = self.compose_node(node, None)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
diff --git a/third_party/python/PyYAML/lib/yaml/constructor.py b/third_party/python/PyYAML/lib/yaml/constructor.py
new file mode 100644
index 0000000000..ff4e36828e
--- /dev/null
+++ b/third_party/python/PyYAML/lib/yaml/constructor.py
@@ -0,0 +1,766 @@
+
+__all__ = [
+ 'BaseConstructor',
+ 'SafeConstructor',
+ 'FullConstructor',
+ 'UnsafeConstructor',
+ 'Constructor',
+ 'ConstructorError'
+]
+
+from error import *
+from nodes import *
+
+import datetime
+
+import binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+
+class timezone(datetime.tzinfo):
+ def __init__(self, offset):
+ self._offset = offset
+ seconds = abs(offset).total_seconds()
+ self._name = 'UTC%s%02d:%02d' % (
+ '-' if offset.days < 0 else '+',
+ seconds // 3600,
+ seconds % 3600 // 60
+ )
+
+ def tzname(self, dt=None):
+ return self._name
+
+ def utcoffset(self, dt=None):
+ return self._offset
+
+ def dst(self, dt=None):
+ return datetime.timedelta(0)
+
+ def __copy__(self):
+ return self.__deepcopy__()
+
+ def __deepcopy__(self, memodict={}):
+ return self.__class__(self.utcoffset())
+
+ __repr__ = __str__ = tzname
+
+
+class BaseConstructor(object):
+
+ yaml_constructors = {}
+ yaml_multi_constructors = {}
+
+ def __init__(self):
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
+
+ def check_data(self):
+ # If there are more documents available?
+ return self.check_node()
+
+ def check_state_key(self, key):
+ """Block special attributes/methods from being set in a newly created
+ object, to prevent user-controlled methods from being called during
+ deserialization"""
+ if self.get_state_keys_blacklist_regexp().match(key):
+ raise ConstructorError(None, None,
+ "blacklisted key '%s' in instance state found" % (key,), None)
+
+ def get_data(self):
+ # Construct and return the next document.
+ if self.check_node():
+ return self.construct_document(self.get_node())
+
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ raise ConstructorError(None, None,
+ "found unconstructable recursive node", node.start_mark)
+ self.recursive_objects[node] = None
+ constructor = None
+ tag_suffix = None
+ if node.tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[node.tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if tag_prefix is not None and node.tag.startswith(tag_prefix):
+ tag_suffix = node.tag[len(tag_prefix):]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = generator.next()
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_scalar(self, node):
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(None, None,
+ "expected a scalar node, but found %s" % node.id,
+ node.start_mark)
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ mapping = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ try:
+ hash(key)
+ except TypeError, exc:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unacceptable key (%s)" % exc, key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
+ return mapping
+
+ def construct_pairs(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ def add_constructor(cls, tag, constructor):
+ if not 'yaml_constructors' in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+ add_constructor = classmethod(add_constructor)
+
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ if not 'yaml_multi_constructors' in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+ add_multi_constructor = classmethod(add_multi_constructor)
+
+class SafeConstructor(BaseConstructor):
+
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == u'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return BaseConstructor.construct_scalar(self, node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == u'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == u'tag:yaml.org,2002:value':
+ key_node.tag = u'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return BaseConstructor.construct_mapping(self, node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ self.construct_scalar(node)
+ return None
+
+ bool_values = {
+ u'yes': True,
+ u'no': False,
+ u'true': True,
+ u'false': False,
+ u'on': True,
+ u'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '')
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '0':
+ return 0
+ elif value.startswith('0b'):
+ return sign*int(value[2:], 2)
+ elif value.startswith('0x'):
+ return sign*int(value[2:], 16)
+ elif value[0] == '0':
+ return sign*int(value, 8)
+ elif ':' in value:
+ digits = [int(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*int(value)
+
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ value = str(self.construct_scalar(node))
+ value = value.replace('_', '').lower()
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '.inf':
+ return sign*self.inf_value
+ elif value == '.nan':
+ return self.nan_value
+ elif ':' in value:
+ digits = [float(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*float(value)
+
+ def construct_yaml_binary(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return str(value).decode('base64')
+ except (binascii.Error, UnicodeEncodeError), exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ timestamp_regexp = re.compile(
+ ur'''^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:(?:[Tt]|[ \t]+)
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\.(?P<fraction>[0-9]*))?
+ (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+
+ def construct_yaml_timestamp(self, node):
+ value = self.construct_scalar(node)
+ match = self.timestamp_regexp.match(node.value)
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ tzinfo = None
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ tzinfo = timezone(delta)
+ elif values['tz']:
+ tzinfo = timezone(datetime.timedelta(0))
+ return datetime.datetime(year, month, day, hour, minute, second, fraction,
+ tzinfo=tzinfo)
+
+ def construct_yaml_omap(self, node):
+ # Note: we do not check for duplicate keys, because it's too
+ # CPU-expensive.
+ omap = []
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ omap.append((key, value))
+
+ def construct_yaml_pairs(self, node):
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ data = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ value = self.construct_scalar(node)
+ try:
+ return value.encode('ascii')
+ except UnicodeEncodeError:
+ return value
+
+ def construct_yaml_seq(self, node):
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ raise ConstructorError(None, None,
+ "could not determine a constructor for the tag %r" % node.tag.encode('utf-8'),
+ node.start_mark)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:null',
+ SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:bool',
+ SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:int',
+ SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:float',
+ SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:binary',
+ SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:omap',
+ SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:pairs',
+ SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:set',
+ SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:str',
+ SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:seq',
+ SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:map',
+ SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+ SafeConstructor.construct_undefined)
+
+class FullConstructor(SafeConstructor):
+ # 'extend' is blacklisted because it is used by
+ # construct_python_object_apply to add `listitems` to a newly generate
+ # python instance
+ def get_state_keys_blacklist(self):
+ return ['^extend$', '^__.*__$']
+
+ def get_state_keys_blacklist_regexp(self):
+ if not hasattr(self, 'state_keys_blacklist_regexp'):
+ self.state_keys_blacklist_regexp = re.compile('(' + '|'.join(self.get_state_keys_blacklist()) + ')')
+ return self.state_keys_blacklist_regexp
+
+ def construct_python_str(self, node):
+ return self.construct_scalar(node).encode('utf-8')
+
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_long(self, node):
+ return long(self.construct_yaml_int(node))
+
+ def construct_python_complex(self, node):
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark, unsafe=False):
+ if not name:
+ raise ConstructorError("while constructing a Python module", mark,
+ "expected non-empty name appended to the tag", mark)
+ if unsafe:
+ try:
+ __import__(name)
+ except ImportError, exc:
+ raise ConstructorError("while constructing a Python module", mark,
+ "cannot find module %r (%s)" % (name.encode('utf-8'), exc), mark)
+ if name not in sys.modules:
+ raise ConstructorError("while constructing a Python module", mark,
+ "module %r is not imported" % name.encode('utf-8'), mark)
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark, unsafe=False):
+ if not name:
+ raise ConstructorError("while constructing a Python object", mark,
+ "expected non-empty name appended to the tag", mark)
+ if u'.' in name:
+ module_name, object_name = name.rsplit('.', 1)
+ else:
+ module_name = '__builtin__'
+ object_name = name
+ if unsafe:
+ try:
+ __import__(module_name)
+ except ImportError, exc:
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find module %r (%s)" % (module_name.encode('utf-8'), exc), mark)
+ if module_name not in sys.modules:
+ raise ConstructorError("while constructing a Python object", mark,
+ "module %r is not imported" % module_name.encode('utf-8'), mark)
+ module = sys.modules[module_name]
+ if not hasattr(module, object_name):
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find %r in the module %r" % (object_name.encode('utf-8'),
+ module.__name__), mark)
+ return getattr(module, object_name)
+
+ def construct_python_name(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python name", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python module", node.start_mark,
+ "expected the empty value, but found %r" % value.encode('utf-8'),
+ node.start_mark)
+ return self.find_python_module(suffix, node.start_mark)
+
+ class classobj: pass
+
+ def make_python_instance(self, suffix, node,
+ args=None, kwds=None, newobj=False, unsafe=False):
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if not (unsafe or isinstance(cls, type) or isinstance(cls, type(self.classobj))):
+ raise ConstructorError("while constructing a Python instance", node.start_mark,
+ "expected a class, but found %r" % type(cls),
+ node.start_mark)
+ if newobj and isinstance(cls, type(self.classobj)) \
+ and not args and not kwds:
+ instance = self.classobj()
+ instance.__class__ = cls
+ return instance
+ elif newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state, unsafe=False):
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ if not unsafe and state:
+ for key in state.keys():
+ self.check_state_key(key)
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ if not unsafe:
+ self.check_state_key(key)
+ setattr(instance, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {}
+ state = {}
+ listitems = []
+ dictitems = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if state:
+ self.set_python_instance_state(instance, state)
+ if listitems:
+ instance.extend(listitems)
+ if dictitems:
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+FullConstructor.add_constructor(
+ u'tag:yaml.org,2002:python/none',
+ FullConstructor.construct_yaml_null)
+
+FullConstructor.add_constructor(
+ u'tag:yaml.org,2002:python/bool',
+ FullConstructor.construct_yaml_bool)
+
+FullConstructor.add_constructor(
+ u'tag:yaml.org,2002:python/str',
+ FullConstructor.construct_python_str)
+
+FullConstructor.add_constructor(
+ u'tag:yaml.org,2002:python/unicode',
+ FullConstructor.construct_python_unicode)
+
+FullConstructor.add_constructor(
+ u'tag:yaml.org,2002:python/int',
+ FullConstructor.construct_yaml_int)
+
+FullConstructor.add_constructor(
+ u'tag:yaml.org,2002:python/long',
+ FullConstructor.construct_python_long)
+
+FullConstructor.add_constructor(
+ u'tag:yaml.org,2002:python/float',
+ FullConstructor.construct_yaml_float)
+
+FullConstructor.add_constructor(
+ u'tag:yaml.org,2002:python/complex',
+ FullConstructor.construct_python_complex)
+
+FullConstructor.add_constructor(
+ u'tag:yaml.org,2002:python/list',
+ FullConstructor.construct_yaml_seq)
+
+FullConstructor.add_constructor(
+ u'tag:yaml.org,2002:python/tuple',
+ FullConstructor.construct_python_tuple)
+
+FullConstructor.add_constructor(
+ u'tag:yaml.org,2002:python/dict',
+ FullConstructor.construct_yaml_map)
+
+FullConstructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/name:',
+ FullConstructor.construct_python_name)
+
+class UnsafeConstructor(FullConstructor):
+
+ def find_python_module(self, name, mark):
+ return super(UnsafeConstructor, self).find_python_module(name, mark, unsafe=True)
+
+ def find_python_name(self, name, mark):
+ return super(UnsafeConstructor, self).find_python_name(name, mark, unsafe=True)
+
+ def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False):
+ return super(UnsafeConstructor, self).make_python_instance(
+ suffix, node, args, kwds, newobj, unsafe=True)
+
+ def set_python_instance_state(self, instance, state):
+ return super(UnsafeConstructor, self).set_python_instance_state(
+ instance, state, unsafe=True)
+
+UnsafeConstructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/module:',
+ UnsafeConstructor.construct_python_module)
+
+UnsafeConstructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object:',
+ UnsafeConstructor.construct_python_object)
+
+UnsafeConstructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/new:',
+ UnsafeConstructor.construct_python_object_new)
+
+UnsafeConstructor.add_multi_constructor(
+ u'tag:yaml.org,2002:python/object/apply:',
+ UnsafeConstructor.construct_python_object_apply)
+
+# Constructor is same as UnsafeConstructor. Need to leave this in place in case
+# people have extended it directly.
+class Constructor(UnsafeConstructor):
+ pass
diff --git a/third_party/python/PyYAML/lib/yaml/cyaml.py b/third_party/python/PyYAML/lib/yaml/cyaml.py
new file mode 100644
index 0000000000..768b49d6b9
--- /dev/null
+++ b/third_party/python/PyYAML/lib/yaml/cyaml.py
@@ -0,0 +1,101 @@
+
+__all__ = [
+ 'CBaseLoader', 'CSafeLoader', 'CFullLoader', 'CUnsafeLoader', 'CLoader',
+ 'CBaseDumper', 'CSafeDumper', 'CDumper'
+]
+
+from yaml._yaml import CParser, CEmitter
+
+from constructor import *
+
+from serializer import *
+from representer import *
+
+from resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CFullLoader(CParser, FullConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ FullConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CUnsafeLoader(CParser, UnsafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ UnsafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
diff --git a/third_party/python/PyYAML/lib/yaml/dumper.py b/third_party/python/PyYAML/lib/yaml/dumper.py
new file mode 100644
index 0000000000..f9cd49fda5
--- /dev/null
+++ b/third_party/python/PyYAML/lib/yaml/dumper.py
@@ -0,0 +1,62 @@
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from emitter import *
+from serializer import *
+from representer import *
+from resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
diff --git a/third_party/python/PyYAML/lib/yaml/emitter.py b/third_party/python/PyYAML/lib/yaml/emitter.py
new file mode 100644
index 0000000000..23c25ca80a
--- /dev/null
+++ b/third_party/python/PyYAML/lib/yaml/emitter.py
@@ -0,0 +1,1144 @@
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+import sys
+
+from error import YAMLError
+from events import *
+
+has_ucs4 = sys.maxunicode > 0xffff
+
+class EmitterError(YAMLError):
+ pass
+
+class ScalarAnalysis(object):
+ def __init__(self, scalar, empty, multiline,
+ allow_flow_plain, allow_block_plain,
+ allow_single_quoted, allow_double_quoted,
+ allow_block):
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+class Emitter(object):
+
+ DEFAULT_TAG_PREFIXES = {
+ u'!' : u'!',
+ u'tag:yaml.org,2002:' : u'!!',
+ }
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+
+ # The stream should have the methods `write` and possibly `flush`.
+ self.stream = stream
+
+ # Encoding can be overridden by STREAM-START.
+ self.encoding = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = []
+ self.state = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events = []
+ self.event = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = []
+ self.indent = None
+
+ # Flow level.
+ self.flow_level = 0
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ self.best_indent = 2
+ if indent and 1 < indent < 10:
+ self.best_indent = indent
+ self.best_width = 80
+ if width and width > self.best_indent*2:
+ self.best_width = width
+ self.best_line_break = u'\n'
+ if line_break in [u'\r', u'\n', u'\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None
+ self.prepared_tag = None
+
+ # Scalar analysis and style.
+ self.analysis = None
+ self.style = None
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return (len(self.events) < count+1)
+
+ def increase_indent(self, flow=False, indentless=False):
+ self.indents.append(self.indent)
+ if self.indent is None:
+ if flow:
+ self.indent = self.best_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += self.best_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not getattr(self.stream, 'encoding', None):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError("expected StreamStartEvent, but got %s"
+ % self.event)
+
+ def expect_nothing(self):
+ raise EmitterError("expected nothing, but got %s" % self.event)
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = self.event.tags.keys()
+ handles.sort()
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (first and not self.event.explicit and not self.canonical
+ and not self.event.version and not self.event.tags
+ and not self.check_empty_document())
+ if not implicit:
+ self.write_indent()
+ self.write_indicator(u'---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError("expected DocumentStartEvent, but got %s"
+ % self.event)
+
+ def expect_document_end(self):
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator(u'...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError("expected DocumentEndEvent, but got %s"
+ % self.event)
+
+ def expect_document_root(self):
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False,
+ simple_key=False):
+ self.root_context = root
+ self.sequence_context = sequence
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ self.process_anchor(u'&')
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_sequence():
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_mapping():
+ self.expect_flow_mapping()
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+ def expect_alias(self):
+ if self.event.anchor is None:
+ raise EmitterError("anchor is not specified for alias")
+ self.process_anchor(u'*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ self.write_indicator(u'[', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u']', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self):
+ self.write_indicator(u'{', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(u',', False)
+ self.write_indent()
+ self.write_indicator(u'}', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(u',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(u':', True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ indentless = (self.mapping_context and not self.indention)
+ self.increase_indent(flow=False, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ if not first and isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ self.write_indicator(u'-', True, indention=True)
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ self.increase_indent(flow=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ if not first and isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ if self.check_simple_key():
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator(u'?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ self.write_indicator(u':', False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ self.write_indent()
+ self.write_indicator(u':', True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ return (isinstance(self.event, SequenceStartEvent) and self.events
+ and isinstance(self.events[0], SequenceEndEvent))
+
+ def check_empty_mapping(self):
+ return (isinstance(self.event, MappingStartEvent) and self.events
+ and isinstance(self.events[0], MappingEndEvent))
+
+ def check_empty_document(self):
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (isinstance(event, ScalarEvent) and event.anchor is None
+ and event.tag is None and event.implicit and event.value == u'')
+
+ def check_simple_key(self):
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
+ and self.event.tag is not None:
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return (length < 128 and (isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty and not self.analysis.multiline)
+ or self.check_empty_sequence() or self.check_empty_mapping()))
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator+self.prepared_anchor, True)
+ self.prepared_anchor = None
+
+ def process_tag(self):
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if ((not self.canonical or tag is None) and
+ ((self.style == '' and self.event.implicit[0])
+ or (self.style != '' and self.event.implicit[1]))):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = u'!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError("tag is not specified")
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if not self.event.style and self.event.implicit[0]:
+ if (not (self.simple_key_context and
+ (self.analysis.empty or self.analysis.multiline))
+ and (self.flow_level and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain))):
+ return ''
+ if self.event.style and self.event.style in '|>':
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
+ return self.event.style
+ if not self.event.style or self.event.style == '\'':
+ if (self.analysis.allow_single_quoted and
+ not (self.simple_key_context and self.analysis.multiline)):
+ return '\''
+ return '"'
+
+ def process_scalar(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = (not self.simple_key_context)
+ #if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == '\'':
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ major, minor = version
+ if major != 1:
+ raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+ return u'%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ if not handle:
+ raise EmitterError("tag handle must not be empty")
+ if handle[0] != u'!' or handle[-1] != u'!':
+ raise EmitterError("tag handle must start and end with '!': %r"
+ % (handle.encode('utf-8')))
+ for ch in handle[1:-1]:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the tag handle: %r"
+ % (ch.encode('utf-8'), handle.encode('utf-8')))
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ if not prefix:
+ raise EmitterError("tag prefix must not be empty")
+ chunks = []
+ start = end = 0
+ if prefix[0] == u'!':
+ end = 1
+ while end < len(prefix):
+ ch = prefix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?!:@&=+$,_.~*\'()[]':
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return u''.join(chunks)
+
+ def prepare_tag(self, tag):
+ if not tag:
+ raise EmitterError("tag must not be empty")
+ if tag == u'!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = self.tag_prefixes.keys()
+ prefixes.sort()
+ for prefix in prefixes:
+ if tag.startswith(prefix) \
+ and (prefix == u'!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix):]
+ chunks = []
+ start = end = 0
+ while end < len(suffix):
+ ch = suffix[end]
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.~*\'()[]' \
+ or (ch == u'!' and handle != u'!'):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append(u'%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = u''.join(chunks)
+ if handle:
+ return u'%s%s' % (handle, suffix_text)
+ else:
+ return u'!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ if not anchor:
+ raise EmitterError("anchor must not be empty")
+ for ch in anchor:
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_'):
+ raise EmitterError("invalid character %r in the anchor: %r"
+ % (ch.encode('utf-8'), anchor.encode('utf-8')))
+ return anchor
+
+ def analyze_scalar(self, scalar):
+
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+ allow_flow_plain=False, allow_block_plain=True,
+ allow_single_quoted=True, allow_double_quoted=True,
+ allow_block=False)
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith(u'---') or scalar.startswith(u'...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = (len(scalar) == 1 or
+ scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in u'#,[]{}&*!|>\'\"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in u'?:':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in u',?[]{}':
+ flow_indicators = True
+ if ch == u':':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == u'#' and preceded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in u'\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
+ if (ch == u'\x85' or u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD'
+ or (u'\U00010000' <= ch < u'\U0010ffff')) and ch != u'\uFEFF':
+ unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == u' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in u'\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = (index+1 >= len(scalar) or
+ scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if space_break or special_characters:
+ allow_flow_plain = allow_block_plain = \
+ allow_single_quoted = allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(scalar=scalar,
+ empty=False, multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block)
+
+ # Writers.
+
+ def flush_stream(self):
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write(u'\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace,
+ whitespace=False, indention=False):
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = u' '+indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ indent = self.indent or 0
+ if not self.indention or self.column > indent \
+ or (self.column == indent and not self.whitespace):
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = u' '*(indent-self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ data = u'%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ data = u'%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ self.write_indicator(u'\'', True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != u' ':
+ if start+1 == end and self.column > self.best_width and split \
+ and start != 0 and end != len(text):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029' or ch == u'\'':
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == u'\'':
+ data = u'\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+ self.write_indicator(u'\'', False)
+
+ ESCAPE_REPLACEMENTS = {
+ u'\0': u'0',
+ u'\x07': u'a',
+ u'\x08': u'b',
+ u'\x09': u't',
+ u'\x0A': u'n',
+ u'\x0B': u'v',
+ u'\x0C': u'f',
+ u'\x0D': u'r',
+ u'\x1B': u'e',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'\x85': u'N',
+ u'\xA0': u'_',
+ u'\u2028': u'L',
+ u'\u2029': u'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ self.write_indicator(u'"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if ch is None or ch in u'"\\\x85\u2028\u2029\uFEFF' \
+ or not (u'\x20' <= ch <= u'\x7E'
+ or (self.allow_unicode
+ and (u'\xA0' <= ch <= u'\uD7FF'
+ or u'\uE000' <= ch <= u'\uFFFD'))):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = u'\\'+self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= u'\xFF':
+ data = u'\\x%02X' % ord(ch)
+ elif ch <= u'\uFFFF':
+ data = u'\\u%04X' % ord(ch)
+ else:
+ data = u'\\U%08X' % ord(ch)
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end+1
+ if 0 < end < len(text)-1 and (ch == u' ' or start >= end) \
+ and self.column+(end-start) > self.best_width and split:
+ data = text[start:end]+u'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == u' ':
+ data = u'\\'
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator(u'"', False)
+
+ def determine_block_hints(self, text):
+ hints = u''
+ if text:
+ if text[0] in u' \n\x85\u2028\u2029':
+ hints += unicode(self.best_indent)
+ if text[-1] not in u'\n\x85\u2028\u2029':
+ hints += u'-'
+ elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
+ hints += u'+'
+ return hints
+
+ def write_folded(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'>'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ if not leading_space and ch is not None and ch != u' ' \
+ and text[start] == u'\n':
+ self.write_line_break()
+ leading_space = (ch == u' ')
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ spaces = (ch == u' ')
+ end += 1
+
+ def write_literal(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'|'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in u'\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in u'\n\x85\u2028\u2029':
+ data = text[start:end]
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
+
+ def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = u' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != u' ':
+ if start+1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in u'\n\x85\u2028\u2029':
+ if text[start] == u'\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == u'\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in u' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ spaces = (ch == u' ')
+ breaks = (ch in u'\n\x85\u2028\u2029')
+ end += 1
diff --git a/third_party/python/PyYAML/lib/yaml/error.py b/third_party/python/PyYAML/lib/yaml/error.py
new file mode 100644
index 0000000000..577686db5f
--- /dev/null
+++ b/third_party/python/PyYAML/lib/yaml/error.py
@@ -0,0 +1,75 @@
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark(object):
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ if self.buffer is None:
+ return None
+ head = ''
+ start = self.pointer
+ while start > 0 and self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer-start > max_length/2-1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ''
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in u'\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end-self.pointer > max_length/2-1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end].encode('utf-8')
+ return ' '*indent + head + snippet + tail + '\n' \
+ + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+ def __str__(self):
+ snippet = self.get_snippet()
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ if snippet is not None:
+ where += ":\n"+snippet
+ return where
+
+class YAMLError(Exception):
+ pass
+
+class MarkedYAMLError(YAMLError):
+
+ def __init__(self, context=None, context_mark=None,
+ problem=None, problem_mark=None, note=None):
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+
+ def __str__(self):
+ lines = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None \
+ and (self.problem is None or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None:
+ lines.append(self.note)
+ return '\n'.join(lines)
+
diff --git a/third_party/python/PyYAML/lib/yaml/events.py b/third_party/python/PyYAML/lib/yaml/events.py
new file mode 100644
index 0000000000..f79ad389cb
--- /dev/null
+++ b/third_party/python/PyYAML/lib/yaml/events.py
@@ -0,0 +1,86 @@
+
+# Abstract classes.
+
+class Event(object):
+ def __init__(self, start_mark=None, end_mark=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+ if hasattr(self, key)]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+ def __init__(self, anchor, start_mark=None, end_mark=None):
+ self.anchor = anchor
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+ flow_style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+ pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndEvent(Event):
+ pass
+
+class DocumentStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None, version=None, tags=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+class DocumentEndEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+ pass
+
+class ScalarEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, value,
+ start_mark=None, end_mark=None, style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+ pass
+
+class SequenceEndEvent(CollectionEndEvent):
+ pass
+
+class MappingStartEvent(CollectionStartEvent):
+ pass
+
+class MappingEndEvent(CollectionEndEvent):
+ pass
+
diff --git a/third_party/python/PyYAML/lib/yaml/loader.py b/third_party/python/PyYAML/lib/yaml/loader.py
new file mode 100644
index 0000000000..4d773c3cc1
--- /dev/null
+++ b/third_party/python/PyYAML/lib/yaml/loader.py
@@ -0,0 +1,63 @@
+
+__all__ = ['BaseLoader', 'FullLoader', 'SafeLoader', 'Loader', 'UnsafeLoader']
+
+from reader import *
+from scanner import *
+from parser import *
+from composer import *
+from constructor import *
+from resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class FullLoader(Reader, Scanner, Parser, Composer, FullConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ FullConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+# UnsafeLoader is the same as Loader (which is and was always unsafe on
+# untrusted input). Use of either Loader or UnsafeLoader should be rare, since
+# FullLoad should be able to load almost all YAML safely. Loader is left intact
+# to ensure backwards compatibility.
+class UnsafeLoader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
diff --git a/third_party/python/PyYAML/lib/yaml/nodes.py b/third_party/python/PyYAML/lib/yaml/nodes.py
new file mode 100644
index 0000000000..c4f070c41e
--- /dev/null
+++ b/third_party/python/PyYAML/lib/yaml/nodes.py
@@ -0,0 +1,49 @@
+
+class Node(object):
+ def __init__(self, tag, value, start_mark, end_mark):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ value = self.value
+ #if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ #else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+ id = 'scalar'
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class CollectionNode(Node):
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, flow_style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+ id = 'sequence'
+
+class MappingNode(CollectionNode):
+ id = 'mapping'
+
diff --git a/third_party/python/PyYAML/lib/yaml/parser.py b/third_party/python/PyYAML/lib/yaml/parser.py
new file mode 100644
index 0000000000..f9e3057f33
--- /dev/null
+++ b/third_party/python/PyYAML/lib/yaml/parser.py
@@ -0,0 +1,589 @@
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from error import MarkedYAMLError
+from tokens import *
+from events import *
+from scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser(object):
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ u'!': u'!',
+ u'!!': u'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '<document start>', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == u'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == u'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle.encode('utf-8'),
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle.encode('utf-8'),
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == u'!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == u'!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == u'!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), u'',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), u'', mark, mark)
+
diff --git a/third_party/python/PyYAML/lib/yaml/reader.py b/third_party/python/PyYAML/lib/yaml/reader.py
new file mode 100644
index 0000000000..4c42150989
--- /dev/null
+++ b/third_party/python/PyYAML/lib/yaml/reader.py
@@ -0,0 +1,193 @@
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length` characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from error import YAMLError, Mark
+
+import codecs, re, sys
+
+has_ucs4 = sys.maxunicode > 0xffff
+
+class ReaderError(YAMLError):
+
+ def __init__(self, name, position, character, encoding, reason):
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ if isinstance(self.character, str):
+ return "'%s' codec can't decode byte #x%02x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.encoding, ord(self.character), self.reason,
+ self.name, self.position)
+ else:
+ return "unacceptable character #x%04x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.character, self.reason,
+ self.name, self.position)
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to unicode,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `str` object,
+ # - a `unicode` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream):
+ self.name = None
+ self.stream = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = u''
+ self.pointer = 0
+ self.raw_buffer = None
+ self.raw_decode = None
+ self.encoding = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+ if isinstance(stream, unicode):
+ self.name = "<unicode string>"
+ self.check_printable(stream)
+ self.buffer = stream+u'\0'
+ elif isinstance(stream, str):
+ self.name = "<string>"
+ self.raw_buffer = stream
+ self.determine_encoding()
+ else:
+ self.stream = stream
+ self.name = getattr(stream, 'name', "<file>")
+ self.eof = False
+ self.raw_buffer = ''
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
+ self.update(index+1)
+ return self.buffer[self.pointer+index]
+
+ def prefix(self, length=1):
+ if self.pointer+length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer:self.pointer+length]
+
+ def forward(self, length=1):
+ if self.pointer+length+1 >= len(self.buffer):
+ self.update(length+1)
+ while length:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in u'\n\x85\u2028\u2029' \
+ or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != u'\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ if self.stream is None:
+ return Mark(self.name, self.index, self.line, self.column,
+ self.buffer, self.pointer)
+ else:
+ return Mark(self.name, self.index, self.line, self.column,
+ None, None)
+
+ def determine_encoding(self):
+ while not self.eof and len(self.raw_buffer) < 2:
+ self.update_raw()
+ if not isinstance(self.raw_buffer, unicode):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ if has_ucs4:
+ NON_PRINTABLE = u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]'
+ elif sys.platform.startswith('java'):
+ # Jython doesn't support lone surrogates https://bugs.jython.org/issue2048
+ NON_PRINTABLE = u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD]'
+ else:
+ # Need to use eval here due to the above Jython issue
+ NON_PRINTABLE = eval(r"u'[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uFFFD]|(?:^|[^\uD800-\uDBFF])[\uDC00-\uDFFF]|[\uD800-\uDBFF](?:[^\uDC00-\uDFFF]|$)'")
+ NON_PRINTABLE = re.compile(NON_PRINTABLE)
+ def check_printable(self, data):
+ match = self.NON_PRINTABLE.search(data)
+ if match:
+ character = match.group()
+ position = self.index+(len(self.buffer)-self.pointer)+match.start()
+ raise ReaderError(self.name, position, ord(character),
+ 'unicode', "special characters are not allowed")
+
+ def update(self, length):
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer:]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer,
+ 'strict', self.eof)
+ except UnicodeDecodeError, exc:
+ character = exc.object[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer-len(self.raw_buffer)+exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character,
+ exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += u'\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=1024):
+ data = self.stream.read(size)
+ if data:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ else:
+ self.eof = True
diff --git a/third_party/python/PyYAML/lib/yaml/representer.py b/third_party/python/PyYAML/lib/yaml/representer.py
new file mode 100644
index 0000000000..93e09b67b3
--- /dev/null
+++ b/third_party/python/PyYAML/lib/yaml/representer.py
@@ -0,0 +1,489 @@
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError']
+
+from error import *
+
+from nodes import *
+
+import datetime
+
+import copy_reg, types
+
+class RepresenterError(YAMLError):
+ pass
+
+class BaseRepresenter(object):
+
+ yaml_representers = {}
+ yaml_multi_representers = {}
+
+ def __init__(self, default_style=None, default_flow_style=False, sort_keys=True):
+ self.default_style = default_style
+ self.default_flow_style = default_flow_style
+ self.sort_keys = sort_keys
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent(self, data):
+ node = self.represent_data(data)
+ self.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def get_classobj_bases(self, cls):
+ bases = [cls]
+ for base in cls.__bases__:
+ bases.extend(self.get_classobj_bases(base))
+ return bases
+
+ def represent_data(self, data):
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
+ return node
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if type(data) is types.InstanceType:
+ data_types = self.get_classobj_bases(data.__class__)+list(data_types)
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, unicode(data))
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ def add_representer(cls, data_type, representer):
+ if not 'yaml_representers' in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+ add_representer = classmethod(add_representer)
+
+ def add_multi_representer(cls, data_type, representer):
+ if not 'yaml_multi_representers' in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+ add_multi_representer = classmethod(add_multi_representer)
+
+ def represent_scalar(self, tag, value, style=None):
+ if style is None:
+ style = self.default_style
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = mapping.items()
+ if self.sort_keys:
+ mapping.sort()
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ return False
+
+class SafeRepresenter(BaseRepresenter):
+
+ def ignore_aliases(self, data):
+ if data is None:
+ return True
+ if isinstance(data, tuple) and data == ():
+ return True
+ if isinstance(data, (str, unicode, bool, int, float)):
+ return True
+
+ def represent_none(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:null',
+ u'null')
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:str', data)
+
+ def represent_bool(self, data):
+ if data:
+ value = u'true'
+ else:
+ value = u'false'
+ return self.represent_scalar(u'tag:yaml.org,2002:bool', value)
+
+ def represent_int(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ def represent_long(self, data):
+ return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ if data != data or (data == 0.0 and data == 1.0):
+ value = u'.nan'
+ elif data == self.inf_value:
+ value = u'.inf'
+ elif data == -self.inf_value:
+ value = u'-.inf'
+ else:
+ value = unicode(repr(data)).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if u'.' not in value and u'e' in value:
+ value = value.replace(u'e', u'.0e', 1)
+ return self.represent_scalar(u'tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
+ return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ return self.represent_mapping(u'tag:yaml.org,2002:map', data)
+
+ def represent_set(self, data):
+ value = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping(u'tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ value = unicode(data.isoformat())
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ value = unicode(data.isoformat(' '))
+ return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ raise RepresenterError("cannot represent an object", data)
+
+SafeRepresenter.add_representer(type(None),
+ SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+ SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(unicode,
+ SafeRepresenter.represent_unicode)
+
+SafeRepresenter.add_representer(bool,
+ SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+ SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(long,
+ SafeRepresenter.represent_long)
+
+SafeRepresenter.add_representer(float,
+ SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+ SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+ SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+ SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+ def represent_str(self, data):
+ tag = None
+ style = None
+ try:
+ data = unicode(data, 'ascii')
+ tag = u'tag:yaml.org,2002:str'
+ except UnicodeDecodeError:
+ try:
+ data = unicode(data, 'utf-8')
+ tag = u'tag:yaml.org,2002:python/str'
+ except UnicodeDecodeError:
+ data = data.encode('base64')
+ tag = u'tag:yaml.org,2002:binary'
+ style = '|'
+ return self.represent_scalar(tag, data, style=style)
+
+ def represent_unicode(self, data):
+ tag = None
+ try:
+ data.encode('ascii')
+ tag = u'tag:yaml.org,2002:python/unicode'
+ except UnicodeEncodeError:
+ tag = u'tag:yaml.org,2002:str'
+ return self.represent_scalar(tag, data)
+
+ def represent_long(self, data):
+ tag = u'tag:yaml.org,2002:int'
+ if int(data) is not data:
+ tag = u'tag:yaml.org,2002:python/long'
+ return self.represent_scalar(tag, unicode(data))
+
+ def represent_complex(self, data):
+ if data.imag == 0.0:
+ data = u'%r' % data.real
+ elif data.real == 0.0:
+ data = u'%rj' % data.imag
+ elif data.imag > 0:
+ data = u'%r+%rj' % (data.real, data.imag)
+ else:
+ data = u'%r%rj' % (data.real, data.imag)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ return self.represent_sequence(u'tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ name = u'%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar(u'tag:yaml.org,2002:python/name:'+name, u'')
+
+ def represent_module(self, data):
+ return self.represent_scalar(
+ u'tag:yaml.org,2002:python/module:'+data.__name__, u'')
+
+ def represent_instance(self, data):
+ # For instances of classic classes, we use __getinitargs__ and
+ # __getstate__ to serialize the data.
+
+ # If data.__getinitargs__ exists, the object must be reconstructed by
+ # calling cls(**args), where args is a tuple returned by
+ # __getinitargs__. Otherwise, the cls.__init__ method should never be
+ # called and the class instance is created by instantiating a trivial
+ # class and assigning to the instance's __class__ variable.
+
+ # If data.__getstate__ exists, it returns the state of the object.
+ # Otherwise, the state of the object is data.__dict__.
+
+ # We produce either a !!python/object or !!python/object/new node.
+ # If data.__getinitargs__ does not exist and state is a dictionary, we
+ # produce a !!python/object node . Otherwise we produce a
+ # !!python/object/new node.
+
+ cls = data.__class__
+ class_name = u'%s.%s' % (cls.__module__, cls.__name__)
+ args = None
+ state = None
+ if hasattr(data, '__getinitargs__'):
+ args = list(data.__getinitargs__())
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__
+ if args is None and isinstance(state, dict):
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+class_name, state)
+ if isinstance(state, dict) and not state:
+ return self.represent_sequence(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ value['state'] = state
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object/new:'+class_name, value)
+
+ def represent_object(self, data):
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copy_reg.dispatch_table:
+ reduce = copy_reg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError("cannot represent an object", data)
+ reduce = (list(reduce)+[None]*5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = u'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = u'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ function_name = u'%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems \
+ and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ u'tag:yaml.org,2002:python/object:'+function_name, state)
+ if not listitems and not dictitems \
+ and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag+function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag+function_name, value)
+
+Representer.add_representer(str,
+ Representer.represent_str)
+
+Representer.add_representer(unicode,
+ Representer.represent_unicode)
+
+Representer.add_representer(long,
+ Representer.represent_long)
+
+Representer.add_representer(complex,
+ Representer.represent_complex)
+
+Representer.add_representer(tuple,
+ Representer.represent_tuple)
+
+Representer.add_representer(type,
+ Representer.represent_name)
+
+Representer.add_representer(types.ClassType,
+ Representer.represent_name)
+
+Representer.add_representer(types.FunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+ Representer.represent_module)
+
+Representer.add_multi_representer(types.InstanceType,
+ Representer.represent_instance)
+
+Representer.add_multi_representer(object,
+ Representer.represent_object)
+
diff --git a/third_party/python/PyYAML/lib/yaml/resolver.py b/third_party/python/PyYAML/lib/yaml/resolver.py
new file mode 100644
index 0000000000..ba9aeab21d
--- /dev/null
+++ b/third_party/python/PyYAML/lib/yaml/resolver.py
@@ -0,0 +1,227 @@
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from error import *
+from nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+ pass
+
+class BaseResolver(object):
+
+ DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = u'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {}
+ yaml_path_resolvers = {}
+
+ def __init__(self):
+ self.resolver_exact_paths = []
+ self.resolver_prefix_paths = []
+
+ def add_implicit_resolver(cls, tag, regexp, first):
+ if not 'yaml_implicit_resolvers' in cls.__dict__:
+ implicit_resolvers = {}
+ for key in cls.yaml_implicit_resolvers:
+ implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:]
+ cls.yaml_implicit_resolvers = implicit_resolvers
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+ add_implicit_resolver = classmethod(add_implicit_resolver)
+
+ def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if not 'yaml_path_resolvers' in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError("Invalid path element: %s" % element)
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
+ and not isinstance(node_check, basestring) \
+ and node_check is not None:
+ raise ResolverError("Invalid node checker: %s" % node_check)
+ if not isinstance(index_check, (basestring, int)) \
+ and index_check is not None:
+ raise ResolverError("Invalid index checker: %s" % index_check)
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] \
+ and kind is not None:
+ raise ResolverError("Invalid node kind: %s" % kind)
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+ add_path_resolver = classmethod(add_path_resolver)
+
+ def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind,
+ current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind,
+ current_node, current_index):
+ node_check, index_check = path[depth-1]
+ if isinstance(node_check, basestring):
+ if current_node.tag != node_check:
+ return
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return
+ if index_check is True and current_index is not None:
+ return
+ if (index_check is False or index_check is None) \
+ and current_index is None:
+ return
+ if isinstance(index_check, basestring):
+ if not (isinstance(current_index, ScalarNode)
+ and index_check == current_index.value):
+ return
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return
+ return True
+
+ def resolve(self, kind, value, implicit):
+ if kind is ScalarNode and implicit[0]:
+ if value == u'':
+ resolvers = self.yaml_implicit_resolvers.get(u'', [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ wildcard_resolvers = self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers + wildcard_resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+ pass
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:bool',
+ re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list(u'yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:float',
+ re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+ |[-+]?\.(?:inf|Inf|INF)
+ |\.(?:nan|NaN|NAN))$''', re.X),
+ list(u'-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:int',
+ re.compile(ur'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+ list(u'-+0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:merge',
+ re.compile(ur'^(?:<<)$'),
+ [u'<'])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:null',
+ re.compile(ur'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ [u'~', u'n', u'N', u''])
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:timestamp',
+ re.compile(ur'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+ (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list(u'0123456789'))
+
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:value',
+ re.compile(ur'^(?:=)$'),
+ [u'='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+ u'tag:yaml.org,2002:yaml',
+ re.compile(ur'^(?:!|&|\*)$'),
+ list(u'!&*'))
+
diff --git a/third_party/python/PyYAML/lib/yaml/scanner.py b/third_party/python/PyYAML/lib/yaml/scanner.py
new file mode 100644
index 0000000000..098ea7be82
--- /dev/null
+++ b/third_party/python/PyYAML/lib/yaml/scanner.py
@@ -0,0 +1,1444 @@
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from error import MarkedYAMLError
+from tokens import *
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+class SimpleKey(object):
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+class Scanner(object):
+
+ def __init__(self):
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer.
+
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # The number of unclosed '{' and '['. `flow_level == 0` means block
+ # context.
+ self.flow_level = 0
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {}
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # Return the next token, but do not delete if from the queue.
+ # Return None if no more tokens.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ return self.tokens[0]
+ else:
+ return None
+
+ def get_token(self):
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+
+ def fetch_more_tokens(self):
+
+ # Eat whitespaces and comments until we reach the next token.
+ self.scan_to_next_token()
+
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.column)
+
+ # Peek the next character.
+ ch = self.peek()
+
+ # Is it the end of stream?
+ if ch == u'\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == u'%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == u'-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == u'.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ #if ch == u'\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == u'[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == u'{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == u']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == u'}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == u',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == u'-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == u'?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == u':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == u'*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == u'&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == u'!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == u'|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == u'>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == u'\'':
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == u'\"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError("while scanning for the next token", None,
+ "found character %r that cannot start any token"
+ % ch.encode('utf-8'), self.get_mark())
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in self.possible_simple_keys.keys():
+ key = self.possible_simple_keys[level]
+ if key.line != self.line \
+ or self.index-key.index > 1024:
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not find expected ':'", self.get_mark())
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.column
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken+len(self.tokens)
+ key = SimpleKey(token_number, required,
+ self.index, self.line, self.column, self.get_mark())
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not find expected ':'", self.get_mark())
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+
+ ## In flow context, tokens should respect indentation.
+ ## Actually the condition should be `self.indent >= column` according to
+ ## the spec. But this condition will prohibit intuitively correct
+ ## constructions such as
+ ## key : {
+ ## }
+ #if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid indentation or unclosed '[' or '{'",
+ # self.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if self.flow_level:
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark,
+ encoding=self.encoding))
+
+
+ def fetch_stream_end(self):
+
+ # Set the current indentation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+
+ # Set the current indentation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+
+ # Set the current indentation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.get_mark()
+ self.forward(3)
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+ def fetch_flow_mapping_start(self):
+ self.fetch_flow_collection_start(FlowMappingStartToken)
+
+ def fetch_flow_collection_start(self, TokenClass):
+
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+
+ # Increase the flow level.
+ self.flow_level += 1
+
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Decrease the flow level.
+ self.flow_level -= 1
+
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add FLOW-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "sequence entries are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not necessary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping keys are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ KeyToken(key.mark, key.mark))
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark))
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be caught by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping values are not allowed here",
+ self.get_mark())
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ self.fetch_flow_scalar(style='\'')
+
+ def fetch_double(self):
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.column == 0:
+ return True
+
+ def check_document_start(self):
+
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'---' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_document_end(self):
+
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == u'...' \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_block_entry(self):
+
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_key(self):
+
+ # KEY(flow context): '?'
+ if self.flow_level:
+ return True
+
+ # KEY(block context): '?' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_value(self):
+
+ # VALUE(flow context): ':'
+ if self.flow_level:
+ return True
+
+ # VALUE(block context): ':' (' '|'\n')
+ else:
+ return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
+
+ def check_plain(self):
+
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ ch = self.peek()
+ return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
+ or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
+ and (ch == u'-' or (not self.flow_level and ch in u'?:')))
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ if self.index == 0 and self.peek() == u'\uFEFF':
+ self.forward()
+ found = False
+ while not found:
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+
+ def scan_directive(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ self.forward()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == u'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.get_mark()
+ elif name == u'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.get_mark()
+ else:
+ end_mark = self.get_mark()
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # See the specification for details.
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ major = self.scan_yaml_directive_number(start_mark)
+ if self.peek() != '.':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or '.', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or ' ', but found %r"
+ % self.peek().encode('utf-8'),
+ self.get_mark())
+ return (major, minor)
+
+ def scan_yaml_directive_number(self, start_mark):
+ # See the specification for details.
+ ch = self.peek()
+ if not (u'0' <= ch <= u'9'):
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 0
+ while u'0' <= self.peek(length) <= u'9':
+ length += 1
+ value = int(self.prefix(length))
+ self.forward(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while self.peek() == u' ':
+ self.forward()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.peek()
+ if ch != u' ':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpreted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ start_mark = self.get_mark()
+ indicator = self.peek()
+ if indicator == u'*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.forward()
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ end_mark = self.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ ch = self.peek(1)
+ if ch == u'<':
+ handle = None
+ self.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if self.peek() != u'>':
+ raise ScannerError("while parsing a tag", start_mark,
+ "expected '>', but found %r" % self.peek().encode('utf-8'),
+ self.get_mark())
+ self.forward()
+ elif ch in u'\0 \t\r\n\x85\u2028\u2029':
+ handle = None
+ suffix = u'!'
+ self.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in u'\0 \r\n\x85\u2028\u2029':
+ if ch == u'!':
+ use_handle = True
+ break
+ length += 1
+ ch = self.peek(length)
+ handle = u'!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = u'!'
+ self.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a tag", start_mark,
+ "expected ' ', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ value = (handle, suffix)
+ end_mark = self.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style):
+ # See the specification for details.
+
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = []
+ start_mark = self.get_mark()
+
+ # Scan the header.
+ self.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent+1
+ if min_indent < 1:
+ min_indent = 1
+ if increment is None:
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ indent = min_indent+increment-1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = u''
+
+ # Scan the inner part of the block scalar.
+ while self.column == indent and self.peek() != u'\0':
+ chunks.extend(breaks)
+ leading_non_space = self.peek() not in u' \t'
+ length = 0
+ while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
+ length += 1
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if self.column == indent and self.peek() != u'\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if folded and line_break == u'\n' \
+ and leading_non_space and self.peek() not in u' \t':
+ if not breaks:
+ chunks.append(u' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ #if folded and line_break == u'\n':
+ # if not breaks:
+ # if self.peek() not in ' \t':
+ # chunks.append(u' ')
+ # else:
+ # chunks.append(line_break)
+ #else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Chomp the tail.
+ if chomping is not False:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+
+ # We are done.
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # See the specification for details.
+ chomping = None
+ increment = None
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ elif ch in u'0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ ch = self.peek()
+ if ch in u'+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected chomping or indentation indicators, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == u' ':
+ self.forward()
+ if self.peek() == u'#':
+ while self.peek() not in u'\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in u'\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch.encode('utf-8'), self.get_mark())
+ self.scan_line_break()
+
+ def scan_block_scalar_indentation(self):
+ # See the specification for details.
+ chunks = []
+ max_indent = 0
+ end_mark = self.get_mark()
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() != u' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ else:
+ self.forward()
+ if self.column > max_indent:
+ max_indent = self.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # See the specification for details.
+ chunks = []
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ while self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == u' ':
+ self.forward()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ chunks = []
+ start_mark = self.get_mark()
+ quote = self.peek()
+ self.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while self.peek() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.forward()
+ end_mark = self.get_mark()
+ return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ ESCAPE_REPLACEMENTS = {
+ u'0': u'\0',
+ u'a': u'\x07',
+ u'b': u'\x08',
+ u't': u'\x09',
+ u'\t': u'\x09',
+ u'n': u'\x0A',
+ u'v': u'\x0B',
+ u'f': u'\x0C',
+ u'r': u'\x0D',
+ u'e': u'\x1B',
+ u' ': u'\x20',
+ u'\"': u'\"',
+ u'\\': u'\\',
+ u'/': u'/',
+ u'N': u'\x85',
+ u'_': u'\xA0',
+ u'L': u'\u2028',
+ u'P': u'\u2029',
+ }
+
+ ESCAPE_CODES = {
+ u'x': 2,
+ u'u': 4,
+ u'U': 8,
+ }
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ length = 0
+ while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ length += 1
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ ch = self.peek()
+ if not double and ch == u'\'' and self.peek(1) == u'\'':
+ chunks.append(u'\'')
+ self.forward(2)
+ elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
+ chunks.append(ch)
+ self.forward()
+ elif double and ch == u'\\':
+ self.forward()
+ ch = self.peek()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ self.forward()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ self.forward()
+ for k in range(length):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "expected escape sequence of %d hexdecimal numbers, but found %r" %
+ (length, self.peek(k).encode('utf-8')), self.get_mark())
+ code = int(self.prefix(length), 16)
+ chunks.append(unichr(code))
+ self.forward(length)
+ elif ch in u'\r\n\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ length = 0
+ while self.peek(length) in u' \t':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch == u'\0':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected end of stream", self.get_mark())
+ elif ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected document separator", self.get_mark())
+ while self.peek() in u' \t':
+ self.forward()
+ if self.peek() in u'\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',' or '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ chunks = []
+ start_mark = self.get_mark()
+ end_mark = start_mark
+ indent = self.indent+1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ #if indent == 0:
+ # indent = 1
+ spaces = []
+ while True:
+ length = 0
+ if self.peek() == u'#':
+ break
+ while True:
+ ch = self.peek(length)
+ if ch in u'\0 \t\r\n\x85\u2028\u2029' \
+ or (ch == u':' and
+ self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029'
+ + (u',[]{}' if self.flow_level else u''))\
+ or (self.flow_level and ch in u',?[]{}'):
+ break
+ length += 1
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ end_mark = self.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if not spaces or self.peek() == u'#' \
+ or (not self.flow_level and self.column < indent):
+ break
+ return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ chunks = []
+ length = 0
+ while self.peek(length) in u' ':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch in u'\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ breaks = []
+ while self.peek() in u' \r\n\x85\u2028\u2029':
+ if self.peek() == ' ':
+ self.forward()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.prefix(3)
+ if (prefix == u'---' or prefix == u'...') \
+ and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
+ return
+ if line_break != u'\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(u' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ ch = self.peek()
+ if ch != u'!':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length = 1
+ ch = self.peek(length)
+ if ch != u' ':
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-_':
+ length += 1
+ ch = self.peek(length)
+ if ch != u'!':
+ self.forward(length)
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ length += 1
+ value = self.prefix(length)
+ self.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ chunks = []
+ length = 0
+ ch = self.peek(length)
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
+ or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
+ if ch == u'%':
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = self.peek(length)
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError("while parsing a %s" % name, start_mark,
+ "expected URI, but found %r" % ch.encode('utf-8'),
+ self.get_mark())
+ return u''.join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # See the specification for details.
+ bytes = []
+ mark = self.get_mark()
+ while self.peek() == u'%':
+ self.forward()
+ for k in range(2):
+ if self.peek(k) not in u'0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
+ (self.peek(k).encode('utf-8')), self.get_mark())
+ bytes.append(chr(int(self.prefix(2), 16)))
+ self.forward(2)
+ try:
+ value = unicode(''.join(bytes), 'utf-8')
+ except UnicodeDecodeError, exc:
+ raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.peek()
+ if ch in u'\r\n\x85':
+ if self.prefix(2) == u'\r\n':
+ self.forward(2)
+ else:
+ self.forward()
+ return u'\n'
+ elif ch in u'\u2028\u2029':
+ self.forward()
+ return ch
+ return u''
diff --git a/third_party/python/PyYAML/lib/yaml/serializer.py b/third_party/python/PyYAML/lib/yaml/serializer.py
new file mode 100644
index 0000000000..0bf1e96dc1
--- /dev/null
+++ b/third_party/python/PyYAML/lib/yaml/serializer.py
@@ -0,0 +1,111 @@
+
+__all__ = ['Serializer', 'SerializerError']
+
+from error import YAMLError
+from events import *
+from nodes import *
+
+class SerializerError(YAMLError):
+ pass
+
+class Serializer(object):
+
+ ANCHOR_TEMPLATE = u'id%03d'
+
+ def __init__(self, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+ self.closed = None
+
+ def open(self):
+ if self.closed is None:
+ self.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError("serializer is already opened")
+
+ def close(self):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif not self.closed:
+ self.emit(StreamEndEvent())
+ self.closed = True
+
+ #def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+ version=self.use_version, tags=self.use_tags))
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ self.anchors[node] = None
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+ def serialize_node(self, node, parent, index):
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolve(ScalarNode, node.value, (False, True))
+ implicit = (node.tag == detected_tag), (node.tag == default_tag)
+ self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+ style=node.style))
+ elif isinstance(node, SequenceNode):
+ implicit = (node.tag
+ == self.resolve(SequenceNode, node.value, True))
+ self.emit(SequenceStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emit(SequenceEndEvent())
+ elif isinstance(node, MappingNode):
+ implicit = (node.tag
+ == self.resolve(MappingNode, node.value, True))
+ self.emit(MappingStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emit(MappingEndEvent())
+ self.ascend_resolver()
+
diff --git a/third_party/python/PyYAML/lib/yaml/tokens.py b/third_party/python/PyYAML/lib/yaml/tokens.py
new file mode 100644
index 0000000000..4d0b48a394
--- /dev/null
+++ b/third_party/python/PyYAML/lib/yaml/tokens.py
@@ -0,0 +1,104 @@
+
+class Token(object):
+ def __init__(self, start_mark, end_mark):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in self.__dict__
+ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+# id = '<byte order mark>'
+
+class DirectiveToken(Token):
+ id = '<directive>'
+ def __init__(self, name, value, start_mark, end_mark):
+ self.name = name
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+ id = '<document start>'
+
+class DocumentEndToken(Token):
+ id = '<document end>'
+
+class StreamStartToken(Token):
+ id = '<stream start>'
+ def __init__(self, start_mark=None, end_mark=None,
+ encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndToken(Token):
+ id = '<stream end>'
+
+class BlockSequenceStartToken(Token):
+ id = '<block sequence start>'
+
+class BlockMappingStartToken(Token):
+ id = '<block mapping start>'
+
+class BlockEndToken(Token):
+ id = '<block end>'
+
+class FlowSequenceStartToken(Token):
+ id = '['
+
+class FlowMappingStartToken(Token):
+ id = '{'
+
+class FlowSequenceEndToken(Token):
+ id = ']'
+
+class FlowMappingEndToken(Token):
+ id = '}'
+
+class KeyToken(Token):
+ id = '?'
+
+class ValueToken(Token):
+ id = ':'
+
+class BlockEntryToken(Token):
+ id = '-'
+
+class FlowEntryToken(Token):
+ id = ','
+
+class AliasToken(Token):
+ id = '<alias>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class AnchorToken(Token):
+ id = '<anchor>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class TagToken(Token):
+ id = '<tag>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class ScalarToken(Token):
+ id = '<scalar>'
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ self.value = value
+ self.plain = plain
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
diff --git a/third_party/python/PyYAML/lib3/PyYAML.egg-info/PKG-INFO b/third_party/python/PyYAML/lib3/PyYAML.egg-info/PKG-INFO
new file mode 100644
index 0000000000..04d0abf6e5
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/PyYAML.egg-info/PKG-INFO
@@ -0,0 +1,44 @@
+Metadata-Version: 1.2
+Name: PyYAML
+Version: 5.4.1
+Summary: YAML parser and emitter for Python
+Home-page: https://pyyaml.org/
+Author: Kirill Simonov
+Author-email: xi@resolvent.net
+License: MIT
+Download-URL: https://pypi.org/project/PyYAML/
+Project-URL: Bug Tracker, https://github.com/yaml/pyyaml/issues
+Project-URL: CI, https://github.com/yaml/pyyaml/actions
+Project-URL: Documentation, https://pyyaml.org/wiki/PyYAMLDocumentation
+Project-URL: Mailing lists, http://lists.sourceforge.net/lists/listinfo/yaml-core
+Project-URL: Source Code, https://github.com/yaml/pyyaml
+Description: YAML is a data serialization format designed for human readability
+ and interaction with scripting languages. PyYAML is a YAML parser
+ and emitter for Python.
+
+ PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
+ support, capable extension API, and sensible error messages. PyYAML
+ supports standard YAML tags and provides Python-specific tags that
+ allow to represent an arbitrary Python object.
+
+ PyYAML is applicable for a broad range of tasks from complex
+ configuration files to object serialization and persistence.
+Platform: Any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Cython
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*
diff --git a/third_party/python/PyYAML/lib3/PyYAML.egg-info/SOURCES.txt b/third_party/python/PyYAML/lib3/PyYAML.egg-info/SOURCES.txt
new file mode 100644
index 0000000000..89f5a63c09
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/PyYAML.egg-info/SOURCES.txt
@@ -0,0 +1,670 @@
+CHANGES
+LICENSE
+MANIFEST.in
+Makefile
+README
+pyproject.toml
+setup.cfg
+setup.py
+examples/pygments-lexer/example.yaml
+examples/pygments-lexer/yaml.py
+examples/yaml-highlight/yaml_hl.cfg
+examples/yaml-highlight/yaml_hl.py
+lib/_yaml/__init__.py
+lib/yaml/__init__.py
+lib/yaml/composer.py
+lib/yaml/constructor.py
+lib/yaml/cyaml.py
+lib/yaml/dumper.py
+lib/yaml/emitter.py
+lib/yaml/error.py
+lib/yaml/events.py
+lib/yaml/loader.py
+lib/yaml/nodes.py
+lib/yaml/parser.py
+lib/yaml/reader.py
+lib/yaml/representer.py
+lib/yaml/resolver.py
+lib/yaml/scanner.py
+lib/yaml/serializer.py
+lib/yaml/tokens.py
+lib3/PyYAML.egg-info/PKG-INFO
+lib3/PyYAML.egg-info/SOURCES.txt
+lib3/PyYAML.egg-info/dependency_links.txt
+lib3/PyYAML.egg-info/top_level.txt
+lib3/_yaml/__init__.py
+lib3/yaml/__init__.py
+lib3/yaml/composer.py
+lib3/yaml/constructor.py
+lib3/yaml/cyaml.py
+lib3/yaml/dumper.py
+lib3/yaml/emitter.py
+lib3/yaml/error.py
+lib3/yaml/events.py
+lib3/yaml/loader.py
+lib3/yaml/nodes.py
+lib3/yaml/parser.py
+lib3/yaml/reader.py
+lib3/yaml/representer.py
+lib3/yaml/resolver.py
+lib3/yaml/scanner.py
+lib3/yaml/serializer.py
+lib3/yaml/tokens.py
+tests/data/a-nasty-libyaml-bug.loader-error
+tests/data/aliases-cdumper-bug.code
+tests/data/aliases.events
+tests/data/bool.data
+tests/data/bool.detect
+tests/data/construct-binary-py2.code
+tests/data/construct-binary-py2.data
+tests/data/construct-binary-py3.code
+tests/data/construct-binary-py3.data
+tests/data/construct-bool.code
+tests/data/construct-bool.data
+tests/data/construct-custom.code
+tests/data/construct-custom.data
+tests/data/construct-float.code
+tests/data/construct-float.data
+tests/data/construct-int.code
+tests/data/construct-int.data
+tests/data/construct-map.code
+tests/data/construct-map.data
+tests/data/construct-merge.code
+tests/data/construct-merge.data
+tests/data/construct-null.code
+tests/data/construct-null.data
+tests/data/construct-omap.code
+tests/data/construct-omap.data
+tests/data/construct-pairs.code
+tests/data/construct-pairs.data
+tests/data/construct-python-bool.code
+tests/data/construct-python-bool.data
+tests/data/construct-python-bytes-py3.code
+tests/data/construct-python-bytes-py3.data
+tests/data/construct-python-complex.code
+tests/data/construct-python-complex.data
+tests/data/construct-python-float.code
+tests/data/construct-python-float.data
+tests/data/construct-python-int.code
+tests/data/construct-python-int.data
+tests/data/construct-python-long-short-py2.code
+tests/data/construct-python-long-short-py2.data
+tests/data/construct-python-long-short-py3.code
+tests/data/construct-python-long-short-py3.data
+tests/data/construct-python-name-module.code
+tests/data/construct-python-name-module.data
+tests/data/construct-python-none.code
+tests/data/construct-python-none.data
+tests/data/construct-python-object.code
+tests/data/construct-python-object.data
+tests/data/construct-python-str-ascii.code
+tests/data/construct-python-str-ascii.data
+tests/data/construct-python-str-utf8-py2.code
+tests/data/construct-python-str-utf8-py2.data
+tests/data/construct-python-str-utf8-py3.code
+tests/data/construct-python-str-utf8-py3.data
+tests/data/construct-python-tuple-list-dict.code
+tests/data/construct-python-tuple-list-dict.data
+tests/data/construct-python-unicode-ascii-py2.code
+tests/data/construct-python-unicode-ascii-py2.data
+tests/data/construct-python-unicode-ascii-py3.code
+tests/data/construct-python-unicode-ascii-py3.data
+tests/data/construct-python-unicode-utf8-py2.code
+tests/data/construct-python-unicode-utf8-py2.data
+tests/data/construct-python-unicode-utf8-py3.code
+tests/data/construct-python-unicode-utf8-py3.data
+tests/data/construct-seq.code
+tests/data/construct-seq.data
+tests/data/construct-set.code
+tests/data/construct-set.data
+tests/data/construct-str-ascii.code
+tests/data/construct-str-ascii.data
+tests/data/construct-str-utf8-py2.code
+tests/data/construct-str-utf8-py2.data
+tests/data/construct-str-utf8-py3.code
+tests/data/construct-str-utf8-py3.data
+tests/data/construct-str.code
+tests/data/construct-str.data
+tests/data/construct-timestamp.code
+tests/data/construct-timestamp.data
+tests/data/construct-value.code
+tests/data/construct-value.data
+tests/data/document-separator-in-quoted-scalar.loader-error
+tests/data/documents.events
+tests/data/duplicate-anchor-1.loader-error
+tests/data/duplicate-anchor-2.loader-error
+tests/data/duplicate-key.former-loader-error.code
+tests/data/duplicate-key.former-loader-error.data
+tests/data/duplicate-mapping-key.former-loader-error.code
+tests/data/duplicate-mapping-key.former-loader-error.data
+tests/data/duplicate-merge-key.former-loader-error.code
+tests/data/duplicate-merge-key.former-loader-error.data
+tests/data/duplicate-tag-directive.loader-error
+tests/data/duplicate-value-key.former-loader-error.code
+tests/data/duplicate-value-key.former-loader-error.data
+tests/data/duplicate-yaml-directive.loader-error
+tests/data/emit-block-scalar-in-simple-key-context-bug.canonical
+tests/data/emit-block-scalar-in-simple-key-context-bug.data
+tests/data/emitting-unacceptable-unicode-character-bug-py3.code
+tests/data/emitting-unacceptable-unicode-character-bug-py3.data
+tests/data/emitting-unacceptable-unicode-character-bug-py3.skip-ext
+tests/data/emitting-unacceptable-unicode-character-bug.code
+tests/data/emitting-unacceptable-unicode-character-bug.data
+tests/data/emitting-unacceptable-unicode-character-bug.skip-ext
+tests/data/emoticons.unicode
+tests/data/emoticons2.unicode
+tests/data/empty-anchor.emitter-error
+tests/data/empty-document-bug.canonical
+tests/data/empty-document-bug.data
+tests/data/empty-document-bug.empty
+tests/data/empty-documents.single-loader-error
+tests/data/empty-python-module.loader-error
+tests/data/empty-python-name.loader-error
+tests/data/empty-tag-handle.emitter-error
+tests/data/empty-tag-prefix.emitter-error
+tests/data/empty-tag.emitter-error
+tests/data/expected-document-end.emitter-error
+tests/data/expected-document-start.emitter-error
+tests/data/expected-mapping.loader-error
+tests/data/expected-node-1.emitter-error
+tests/data/expected-node-2.emitter-error
+tests/data/expected-nothing.emitter-error
+tests/data/expected-scalar.loader-error
+tests/data/expected-sequence.loader-error
+tests/data/expected-stream-start.emitter-error
+tests/data/explicit-document.single-loader-error
+tests/data/fetch-complex-value-bug.loader-error
+tests/data/float-representer-2.3-bug.code
+tests/data/float-representer-2.3-bug.data
+tests/data/float.data
+tests/data/float.detect
+tests/data/forbidden-entry.loader-error
+tests/data/forbidden-key.loader-error
+tests/data/forbidden-value.loader-error
+tests/data/implicit-document.single-loader-error
+tests/data/int.data
+tests/data/int.detect
+tests/data/invalid-anchor-1.loader-error
+tests/data/invalid-anchor-2.loader-error
+tests/data/invalid-anchor.emitter-error
+tests/data/invalid-base64-data-2.loader-error
+tests/data/invalid-base64-data.loader-error
+tests/data/invalid-block-scalar-indicator.loader-error
+tests/data/invalid-character.loader-error
+tests/data/invalid-character.stream-error
+tests/data/invalid-directive-line.loader-error
+tests/data/invalid-directive-name-1.loader-error
+tests/data/invalid-directive-name-2.loader-error
+tests/data/invalid-escape-character.loader-error
+tests/data/invalid-escape-numbers.loader-error
+tests/data/invalid-indentation-indicator-1.loader-error
+tests/data/invalid-indentation-indicator-2.loader-error
+tests/data/invalid-item-without-trailing-break.loader-error
+tests/data/invalid-merge-1.loader-error
+tests/data/invalid-merge-2.loader-error
+tests/data/invalid-omap-1.loader-error
+tests/data/invalid-omap-2.loader-error
+tests/data/invalid-omap-3.loader-error
+tests/data/invalid-pairs-1.loader-error
+tests/data/invalid-pairs-2.loader-error
+tests/data/invalid-pairs-3.loader-error
+tests/data/invalid-python-bytes-2-py3.loader-error
+tests/data/invalid-python-bytes-py3.loader-error
+tests/data/invalid-python-module-kind.loader-error
+tests/data/invalid-python-module-value.loader-error
+tests/data/invalid-python-module.loader-error
+tests/data/invalid-python-name-kind.loader-error
+tests/data/invalid-python-name-module.loader-error
+tests/data/invalid-python-name-object.loader-error
+tests/data/invalid-python-name-value.loader-error
+tests/data/invalid-simple-key.loader-error
+tests/data/invalid-single-quote-bug.code
+tests/data/invalid-single-quote-bug.data
+tests/data/invalid-starting-character.loader-error
+tests/data/invalid-tag-1.loader-error
+tests/data/invalid-tag-2.loader-error
+tests/data/invalid-tag-directive-handle.loader-error
+tests/data/invalid-tag-directive-prefix.loader-error
+tests/data/invalid-tag-handle-1.emitter-error
+tests/data/invalid-tag-handle-1.loader-error
+tests/data/invalid-tag-handle-2.emitter-error
+tests/data/invalid-tag-handle-2.loader-error
+tests/data/invalid-uri-escapes-1.loader-error
+tests/data/invalid-uri-escapes-2.loader-error
+tests/data/invalid-uri-escapes-3.loader-error
+tests/data/invalid-uri.loader-error
+tests/data/invalid-utf8-byte.loader-error
+tests/data/invalid-utf8-byte.stream-error
+tests/data/invalid-yaml-directive-version-1.loader-error
+tests/data/invalid-yaml-directive-version-2.loader-error
+tests/data/invalid-yaml-directive-version-3.loader-error
+tests/data/invalid-yaml-directive-version-4.loader-error
+tests/data/invalid-yaml-directive-version-5.loader-error
+tests/data/invalid-yaml-directive-version-6.loader-error
+tests/data/invalid-yaml-version.loader-error
+tests/data/latin.unicode
+tests/data/mapping.sort
+tests/data/mapping.sorted
+tests/data/mappings.events
+tests/data/merge.data
+tests/data/merge.detect
+tests/data/more-floats.code
+tests/data/more-floats.data
+tests/data/multi-constructor.code
+tests/data/multi-constructor.multi
+tests/data/myfullloader.subclass_blacklist
+tests/data/negative-float-bug.code
+tests/data/negative-float-bug.data
+tests/data/no-alias-anchor.emitter-error
+tests/data/no-alias-anchor.skip-ext
+tests/data/no-block-collection-end.loader-error
+tests/data/no-block-mapping-end-2.loader-error
+tests/data/no-block-mapping-end.loader-error
+tests/data/no-document-start.loader-error
+tests/data/no-flow-mapping-end.loader-error
+tests/data/no-flow-sequence-end.loader-error
+tests/data/no-node-1.loader-error
+tests/data/no-node-2.loader-error
+tests/data/no-tag.emitter-error
+tests/data/null.data
+tests/data/null.detect
+tests/data/odd-utf16.stream-error
+tests/data/overwrite-state-new-constructor.loader-error
+tests/data/recursive-anchor.former-loader-error
+tests/data/recursive-dict.recursive
+tests/data/recursive-list.recursive
+tests/data/recursive-set.recursive
+tests/data/recursive-state.recursive
+tests/data/recursive-tuple.recursive
+tests/data/recursive.former-dumper-error
+tests/data/remove-possible-simple-key-bug.loader-error
+tests/data/resolver.data
+tests/data/resolver.path
+tests/data/run-parser-crash-bug.data
+tests/data/scalars.events
+tests/data/scan-document-end-bug.canonical
+tests/data/scan-document-end-bug.data
+tests/data/scan-line-break-bug.canonical
+tests/data/scan-line-break-bug.data
+tests/data/sequences.events
+tests/data/serializer-is-already-opened.dumper-error
+tests/data/serializer-is-closed-1.dumper-error
+tests/data/serializer-is-closed-2.dumper-error
+tests/data/serializer-is-not-opened-1.dumper-error
+tests/data/serializer-is-not-opened-2.dumper-error
+tests/data/single-dot-is-not-float-bug.code
+tests/data/single-dot-is-not-float-bug.data
+tests/data/sloppy-indentation.canonical
+tests/data/sloppy-indentation.data
+tests/data/spec-02-01.data
+tests/data/spec-02-01.structure
+tests/data/spec-02-01.tokens
+tests/data/spec-02-02.data
+tests/data/spec-02-02.structure
+tests/data/spec-02-02.tokens
+tests/data/spec-02-03.data
+tests/data/spec-02-03.structure
+tests/data/spec-02-03.tokens
+tests/data/spec-02-04.data
+tests/data/spec-02-04.structure
+tests/data/spec-02-04.tokens
+tests/data/spec-02-05.data
+tests/data/spec-02-05.structure
+tests/data/spec-02-05.tokens
+tests/data/spec-02-06.data
+tests/data/spec-02-06.structure
+tests/data/spec-02-06.tokens
+tests/data/spec-02-07.data
+tests/data/spec-02-07.structure
+tests/data/spec-02-07.tokens
+tests/data/spec-02-08.data
+tests/data/spec-02-08.structure
+tests/data/spec-02-08.tokens
+tests/data/spec-02-09.data
+tests/data/spec-02-09.structure
+tests/data/spec-02-09.tokens
+tests/data/spec-02-10.data
+tests/data/spec-02-10.structure
+tests/data/spec-02-10.tokens
+tests/data/spec-02-11.data
+tests/data/spec-02-11.structure
+tests/data/spec-02-11.tokens
+tests/data/spec-02-12.data
+tests/data/spec-02-12.structure
+tests/data/spec-02-12.tokens
+tests/data/spec-02-13.data
+tests/data/spec-02-13.structure
+tests/data/spec-02-13.tokens
+tests/data/spec-02-14.data
+tests/data/spec-02-14.structure
+tests/data/spec-02-14.tokens
+tests/data/spec-02-15.data
+tests/data/spec-02-15.structure
+tests/data/spec-02-15.tokens
+tests/data/spec-02-16.data
+tests/data/spec-02-16.structure
+tests/data/spec-02-16.tokens
+tests/data/spec-02-17.data
+tests/data/spec-02-17.structure
+tests/data/spec-02-17.tokens
+tests/data/spec-02-18.data
+tests/data/spec-02-18.structure
+tests/data/spec-02-18.tokens
+tests/data/spec-02-19.data
+tests/data/spec-02-19.structure
+tests/data/spec-02-19.tokens
+tests/data/spec-02-20.data
+tests/data/spec-02-20.structure
+tests/data/spec-02-20.tokens
+tests/data/spec-02-21.data
+tests/data/spec-02-21.structure
+tests/data/spec-02-21.tokens
+tests/data/spec-02-22.data
+tests/data/spec-02-22.structure
+tests/data/spec-02-22.tokens
+tests/data/spec-02-23.data
+tests/data/spec-02-23.structure
+tests/data/spec-02-23.tokens
+tests/data/spec-02-24.data
+tests/data/spec-02-24.structure
+tests/data/spec-02-24.tokens
+tests/data/spec-02-25.data
+tests/data/spec-02-25.structure
+tests/data/spec-02-25.tokens
+tests/data/spec-02-26.data
+tests/data/spec-02-26.structure
+tests/data/spec-02-26.tokens
+tests/data/spec-02-27.data
+tests/data/spec-02-27.structure
+tests/data/spec-02-27.tokens
+tests/data/spec-02-28.data
+tests/data/spec-02-28.structure
+tests/data/spec-02-28.tokens
+tests/data/spec-05-01-utf16be.data
+tests/data/spec-05-01-utf16be.empty
+tests/data/spec-05-01-utf16le.data
+tests/data/spec-05-01-utf16le.empty
+tests/data/spec-05-01-utf8.data
+tests/data/spec-05-01-utf8.empty
+tests/data/spec-05-02-utf16be.data
+tests/data/spec-05-02-utf16be.error
+tests/data/spec-05-02-utf16le.data
+tests/data/spec-05-02-utf16le.error
+tests/data/spec-05-02-utf8.data
+tests/data/spec-05-02-utf8.error
+tests/data/spec-05-03.canonical
+tests/data/spec-05-03.data
+tests/data/spec-05-04.canonical
+tests/data/spec-05-04.data
+tests/data/spec-05-05.data
+tests/data/spec-05-05.empty
+tests/data/spec-05-06.canonical
+tests/data/spec-05-06.data
+tests/data/spec-05-07.canonical
+tests/data/spec-05-07.data
+tests/data/spec-05-08.canonical
+tests/data/spec-05-08.data
+tests/data/spec-05-09.canonical
+tests/data/spec-05-09.data
+tests/data/spec-05-10.data
+tests/data/spec-05-10.error
+tests/data/spec-05-11.canonical
+tests/data/spec-05-11.data
+tests/data/spec-05-12.data
+tests/data/spec-05-12.error
+tests/data/spec-05-13.canonical
+tests/data/spec-05-13.data
+tests/data/spec-05-14.canonical
+tests/data/spec-05-14.data
+tests/data/spec-05-15.data
+tests/data/spec-05-15.error
+tests/data/spec-06-01.canonical
+tests/data/spec-06-01.data
+tests/data/spec-06-02.data
+tests/data/spec-06-02.empty
+tests/data/spec-06-03.canonical
+tests/data/spec-06-03.data
+tests/data/spec-06-04.canonical
+tests/data/spec-06-04.data
+tests/data/spec-06-05.canonical
+tests/data/spec-06-05.data
+tests/data/spec-06-06.canonical
+tests/data/spec-06-06.data
+tests/data/spec-06-07.canonical
+tests/data/spec-06-07.data
+tests/data/spec-06-08.canonical
+tests/data/spec-06-08.data
+tests/data/spec-07-01.canonical
+tests/data/spec-07-01.data
+tests/data/spec-07-01.skip-ext
+tests/data/spec-07-02.canonical
+tests/data/spec-07-02.data
+tests/data/spec-07-02.skip-ext
+tests/data/spec-07-03.data
+tests/data/spec-07-03.error
+tests/data/spec-07-04.canonical
+tests/data/spec-07-04.data
+tests/data/spec-07-05.data
+tests/data/spec-07-05.error
+tests/data/spec-07-06.canonical
+tests/data/spec-07-06.data
+tests/data/spec-07-07a.canonical
+tests/data/spec-07-07a.data
+tests/data/spec-07-07b.canonical
+tests/data/spec-07-07b.data
+tests/data/spec-07-08.canonical
+tests/data/spec-07-08.data
+tests/data/spec-07-09.canonical
+tests/data/spec-07-09.data
+tests/data/spec-07-10.canonical
+tests/data/spec-07-10.data
+tests/data/spec-07-11.data
+tests/data/spec-07-11.empty
+tests/data/spec-07-12a.canonical
+tests/data/spec-07-12a.data
+tests/data/spec-07-12b.canonical
+tests/data/spec-07-12b.data
+tests/data/spec-07-13.canonical
+tests/data/spec-07-13.data
+tests/data/spec-08-01.canonical
+tests/data/spec-08-01.data
+tests/data/spec-08-02.canonical
+tests/data/spec-08-02.data
+tests/data/spec-08-03.canonical
+tests/data/spec-08-03.data
+tests/data/spec-08-04.data
+tests/data/spec-08-04.error
+tests/data/spec-08-05.canonical
+tests/data/spec-08-05.data
+tests/data/spec-08-06.data
+tests/data/spec-08-06.error
+tests/data/spec-08-07.canonical
+tests/data/spec-08-07.data
+tests/data/spec-08-08.canonical
+tests/data/spec-08-08.data
+tests/data/spec-08-09.canonical
+tests/data/spec-08-09.data
+tests/data/spec-08-10.canonical
+tests/data/spec-08-10.data
+tests/data/spec-08-11.canonical
+tests/data/spec-08-11.data
+tests/data/spec-08-12.canonical
+tests/data/spec-08-12.data
+tests/data/spec-08-13.canonical
+tests/data/spec-08-13.data
+tests/data/spec-08-13.skip-ext
+tests/data/spec-08-14.canonical
+tests/data/spec-08-14.data
+tests/data/spec-08-15.canonical
+tests/data/spec-08-15.data
+tests/data/spec-09-01.canonical
+tests/data/spec-09-01.data
+tests/data/spec-09-02.canonical
+tests/data/spec-09-02.data
+tests/data/spec-09-03.canonical
+tests/data/spec-09-03.data
+tests/data/spec-09-04.canonical
+tests/data/spec-09-04.data
+tests/data/spec-09-05.canonical
+tests/data/spec-09-05.data
+tests/data/spec-09-06.canonical
+tests/data/spec-09-06.data
+tests/data/spec-09-07.canonical
+tests/data/spec-09-07.data
+tests/data/spec-09-08.canonical
+tests/data/spec-09-08.data
+tests/data/spec-09-09.canonical
+tests/data/spec-09-09.data
+tests/data/spec-09-10.canonical
+tests/data/spec-09-10.data
+tests/data/spec-09-11.canonical
+tests/data/spec-09-11.data
+tests/data/spec-09-12.canonical
+tests/data/spec-09-12.data
+tests/data/spec-09-13.canonical
+tests/data/spec-09-13.data
+tests/data/spec-09-14.data
+tests/data/spec-09-14.error
+tests/data/spec-09-15.canonical
+tests/data/spec-09-15.data
+tests/data/spec-09-16.canonical
+tests/data/spec-09-16.data
+tests/data/spec-09-17.canonical
+tests/data/spec-09-17.data
+tests/data/spec-09-18.canonical
+tests/data/spec-09-18.data
+tests/data/spec-09-19.canonical
+tests/data/spec-09-19.data
+tests/data/spec-09-20.canonical
+tests/data/spec-09-20.data
+tests/data/spec-09-20.skip-ext
+tests/data/spec-09-21.data
+tests/data/spec-09-21.error
+tests/data/spec-09-22.canonical
+tests/data/spec-09-22.data
+tests/data/spec-09-23.canonical
+tests/data/spec-09-23.data
+tests/data/spec-09-24.canonical
+tests/data/spec-09-24.data
+tests/data/spec-09-25.canonical
+tests/data/spec-09-25.data
+tests/data/spec-09-26.canonical
+tests/data/spec-09-26.data
+tests/data/spec-09-27.canonical
+tests/data/spec-09-27.data
+tests/data/spec-09-28.canonical
+tests/data/spec-09-28.data
+tests/data/spec-09-29.canonical
+tests/data/spec-09-29.data
+tests/data/spec-09-30.canonical
+tests/data/spec-09-30.data
+tests/data/spec-09-31.canonical
+tests/data/spec-09-31.data
+tests/data/spec-09-32.canonical
+tests/data/spec-09-32.data
+tests/data/spec-09-33.canonical
+tests/data/spec-09-33.data
+tests/data/spec-10-01.canonical
+tests/data/spec-10-01.data
+tests/data/spec-10-02.canonical
+tests/data/spec-10-02.data
+tests/data/spec-10-03.canonical
+tests/data/spec-10-03.data
+tests/data/spec-10-04.canonical
+tests/data/spec-10-04.data
+tests/data/spec-10-05.canonical
+tests/data/spec-10-05.data
+tests/data/spec-10-06.canonical
+tests/data/spec-10-06.data
+tests/data/spec-10-07.canonical
+tests/data/spec-10-07.data
+tests/data/spec-10-08.data
+tests/data/spec-10-08.error
+tests/data/spec-10-09.canonical
+tests/data/spec-10-09.data
+tests/data/spec-10-10.canonical
+tests/data/spec-10-10.data
+tests/data/spec-10-11.canonical
+tests/data/spec-10-11.data
+tests/data/spec-10-12.canonical
+tests/data/spec-10-12.data
+tests/data/spec-10-13.canonical
+tests/data/spec-10-13.data
+tests/data/spec-10-14.canonical
+tests/data/spec-10-14.data
+tests/data/spec-10-15.canonical
+tests/data/spec-10-15.data
+tests/data/str.data
+tests/data/str.detect
+tests/data/tags.events
+tests/data/test_mark.marks
+tests/data/timestamp-bugs.code
+tests/data/timestamp-bugs.data
+tests/data/timestamp.data
+tests/data/timestamp.detect
+tests/data/unacceptable-key.loader-error
+tests/data/unclosed-bracket.loader-error
+tests/data/unclosed-quoted-scalar.loader-error
+tests/data/undefined-anchor.loader-error
+tests/data/undefined-constructor.loader-error
+tests/data/undefined-tag-handle.loader-error
+tests/data/unknown.dumper-error
+tests/data/unsupported-version.emitter-error
+tests/data/utf16be.code
+tests/data/utf16be.data
+tests/data/utf16le.code
+tests/data/utf16le.data
+tests/data/utf8-implicit.code
+tests/data/utf8-implicit.data
+tests/data/utf8.code
+tests/data/utf8.data
+tests/data/value.data
+tests/data/value.detect
+tests/data/yaml.data
+tests/data/yaml.detect
+tests/lib/canonical.py
+tests/lib/test_all.py
+tests/lib/test_appliance.py
+tests/lib/test_build.py
+tests/lib/test_build_ext.py
+tests/lib/test_canonical.py
+tests/lib/test_constructor.py
+tests/lib/test_emitter.py
+tests/lib/test_errors.py
+tests/lib/test_input_output.py
+tests/lib/test_mark.py
+tests/lib/test_multi_constructor.py
+tests/lib/test_reader.py
+tests/lib/test_recursive.py
+tests/lib/test_representer.py
+tests/lib/test_resolver.py
+tests/lib/test_sort_keys.py
+tests/lib/test_structure.py
+tests/lib/test_tokens.py
+tests/lib/test_yaml.py
+tests/lib/test_yaml_ext.py
+tests/lib3/canonical.py
+tests/lib3/test_all.py
+tests/lib3/test_appliance.py
+tests/lib3/test_build.py
+tests/lib3/test_build_ext.py
+tests/lib3/test_canonical.py
+tests/lib3/test_constructor.py
+tests/lib3/test_emitter.py
+tests/lib3/test_errors.py
+tests/lib3/test_input_output.py
+tests/lib3/test_mark.py
+tests/lib3/test_multi_constructor.py
+tests/lib3/test_reader.py
+tests/lib3/test_recursive.py
+tests/lib3/test_representer.py
+tests/lib3/test_resolver.py
+tests/lib3/test_sort_keys.py
+tests/lib3/test_structure.py
+tests/lib3/test_tokens.py
+tests/lib3/test_yaml.py
+tests/lib3/test_yaml_ext.py
+yaml/__init__.pxd
+yaml/_yaml.h
+yaml/_yaml.pxd
+yaml/_yaml.pyx \ No newline at end of file
diff --git a/third_party/python/PyYAML/lib3/PyYAML.egg-info/dependency_links.txt b/third_party/python/PyYAML/lib3/PyYAML.egg-info/dependency_links.txt
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/PyYAML.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/third_party/python/PyYAML/lib3/PyYAML.egg-info/top_level.txt b/third_party/python/PyYAML/lib3/PyYAML.egg-info/top_level.txt
new file mode 100644
index 0000000000..e6475e911f
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/PyYAML.egg-info/top_level.txt
@@ -0,0 +1,2 @@
+_yaml
+yaml
diff --git a/third_party/python/PyYAML/lib3/_yaml/__init__.py b/third_party/python/PyYAML/lib3/_yaml/__init__.py
new file mode 100644
index 0000000000..7baa8c4b68
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/_yaml/__init__.py
@@ -0,0 +1,33 @@
+# This is a stub package designed to roughly emulate the _yaml
+# extension module, which previously existed as a standalone module
+# and has been moved into the `yaml` package namespace.
+# It does not perfectly mimic its old counterpart, but should get
+# close enough for anyone who's relying on it even when they shouldn't.
+import yaml
+
+# in some circumstances, the yaml module we imoprted may be from a different version, so we need
+# to tread carefully when poking at it here (it may not have the attributes we expect)
+if not getattr(yaml, '__with_libyaml__', False):
+ from sys import version_info
+
+ exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError
+ raise exc("No module named '_yaml'")
+else:
+ from yaml._yaml import *
+ import warnings
+ warnings.warn(
+ 'The _yaml extension module is now located at yaml._yaml'
+ ' and its location is subject to change. To use the'
+ ' LibYAML-based parser and emitter, import from `yaml`:'
+ ' `from yaml import CLoader as Loader, CDumper as Dumper`.',
+ DeprecationWarning
+ )
+ del warnings
+ # Don't `del yaml` here because yaml is actually an existing
+ # namespace member of _yaml.
+
+__name__ = '_yaml'
+# If the module is top-level (i.e. not a part of any specific package)
+# then the attribute should be set to ''.
+# https://docs.python.org/3.8/library/types.html
+__package__ = ''
diff --git a/third_party/python/PyYAML/lib3/yaml/__init__.py b/third_party/python/PyYAML/lib3/yaml/__init__.py
new file mode 100644
index 0000000000..86d07b5525
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/yaml/__init__.py
@@ -0,0 +1,427 @@
+
+from .error import *
+
+from .tokens import *
+from .events import *
+from .nodes import *
+
+from .loader import *
+from .dumper import *
+
+__version__ = '5.4.1'
+try:
+ from .cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
+import io
+
+#------------------------------------------------------------------------------
+# Warnings control
+#------------------------------------------------------------------------------
+
+# 'Global' warnings state:
+_warnings_enabled = {
+ 'YAMLLoadWarning': True,
+}
+
+# Get or set global warnings' state
+def warnings(settings=None):
+ if settings is None:
+ return _warnings_enabled
+
+ if type(settings) is dict:
+ for key in settings:
+ if key in _warnings_enabled:
+ _warnings_enabled[key] = settings[key]
+
+# Warn when load() is called without Loader=...
+class YAMLLoadWarning(RuntimeWarning):
+ pass
+
+def load_warning(method):
+ if _warnings_enabled['YAMLLoadWarning'] is False:
+ return
+
+ import warnings
+
+ message = (
+ "calling yaml.%s() without Loader=... is deprecated, as the "
+ "default Loader is unsafe. Please read "
+ "https://msg.pyyaml.org/load for full details."
+ ) % method
+
+ warnings.warn(message, YAMLLoadWarning, stacklevel=3)
+
+#------------------------------------------------------------------------------
+def scan(stream, Loader=Loader):
+ """
+ Scan a YAML stream and produce scanning tokens.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
+
+def parse(stream, Loader=Loader):
+ """
+ Parse a YAML stream and produce parsing events.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
+
+def compose(stream, Loader=Loader):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding representation tree.
+ """
+ loader = Loader(stream)
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
+
+def compose_all(stream, Loader=Loader):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding representation trees.
+ """
+ loader = Loader(stream)
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
+
+def load(stream, Loader=None):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ if Loader is None:
+ load_warning('load')
+ Loader = FullLoader
+
+ loader = Loader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+def load_all(stream, Loader=None):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ """
+ if Loader is None:
+ load_warning('load_all')
+ Loader = FullLoader
+
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
+
+def full_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+
+ Resolve all tags except those known to be
+ unsafe on untrusted input.
+ """
+ return load(stream, FullLoader)
+
+def full_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+
+ Resolve all tags except those known to be
+ unsafe on untrusted input.
+ """
+ return load_all(stream, FullLoader)
+
+def safe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+
+ Resolve only basic YAML tags. This is known
+ to be safe for untrusted input.
+ """
+ return load(stream, SafeLoader)
+
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+
+ Resolve only basic YAML tags. This is known
+ to be safe for untrusted input.
+ """
+ return load_all(stream, SafeLoader)
+
+def unsafe_load(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+
+ Resolve all tags, even those known to be
+ unsafe on untrusted input.
+ """
+ return load(stream, UnsafeLoader)
+
+def unsafe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+
+ Resolve all tags, even those known to be
+ unsafe on untrusted input.
+ """
+ return load_all(stream, UnsafeLoader)
+
+def emit(events, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+ """
+ Emit YAML parsing events into a stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ stream = io.StringIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize_all(nodes, stream=None, Dumper=Dumper,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ """
+ Serialize a sequence of representation trees into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = io.StringIO()
+ else:
+ stream = io.BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end)
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def serialize(node, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a representation tree into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return serialize_all([node], stream, Dumper=Dumper, **kwds)
+
+def dump_all(documents, stream=None, Dumper=Dumper,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ getvalue = None
+ if stream is None:
+ if encoding is None:
+ stream = io.StringIO()
+ else:
+ stream = io.BytesIO()
+ getvalue = stream.getvalue
+ dumper = Dumper(stream, default_style=default_style,
+ default_flow_style=default_flow_style,
+ canonical=canonical, indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break,
+ encoding=encoding, version=version, tags=tags,
+ explicit_start=explicit_start, explicit_end=explicit_end, sort_keys=sort_keys)
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
+ if getvalue:
+ return getvalue()
+
+def dump(data, stream=None, Dumper=Dumper, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=Dumper, **kwds)
+
+def safe_dump_all(documents, stream=None, **kwds):
+ """
+ Serialize a sequence of Python objects into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
+
+def safe_dump(data, stream=None, **kwds):
+ """
+ Serialize a Python object into a YAML stream.
+ Produce only basic YAML tags.
+ If stream is None, return the produced string instead.
+ """
+ return dump_all([data], stream, Dumper=SafeDumper, **kwds)
+
+def add_implicit_resolver(tag, regexp, first=None,
+ Loader=None, Dumper=Dumper):
+ """
+ Add an implicit scalar detector.
+ If an implicit scalar value matches the given regexp,
+ the corresponding tag is assigned to the scalar.
+ first is a sequence of possible initial characters or None.
+ """
+ if Loader is None:
+ loader.Loader.add_implicit_resolver(tag, regexp, first)
+ loader.FullLoader.add_implicit_resolver(tag, regexp, first)
+ loader.UnsafeLoader.add_implicit_resolver(tag, regexp, first)
+ else:
+ Loader.add_implicit_resolver(tag, regexp, first)
+ Dumper.add_implicit_resolver(tag, regexp, first)
+
+def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=Dumper):
+ """
+ Add a path based resolver for the given tag.
+ A path is a list of keys that forms a path
+ to a node in the representation tree.
+ Keys can be string values, integers, or None.
+ """
+ if Loader is None:
+ loader.Loader.add_path_resolver(tag, path, kind)
+ loader.FullLoader.add_path_resolver(tag, path, kind)
+ loader.UnsafeLoader.add_path_resolver(tag, path, kind)
+ else:
+ Loader.add_path_resolver(tag, path, kind)
+ Dumper.add_path_resolver(tag, path, kind)
+
+def add_constructor(tag, constructor, Loader=None):
+ """
+ Add a constructor for the given tag.
+ Constructor is a function that accepts a Loader instance
+ and a node object and produces the corresponding Python object.
+ """
+ if Loader is None:
+ loader.Loader.add_constructor(tag, constructor)
+ loader.FullLoader.add_constructor(tag, constructor)
+ loader.UnsafeLoader.add_constructor(tag, constructor)
+ else:
+ Loader.add_constructor(tag, constructor)
+
+def add_multi_constructor(tag_prefix, multi_constructor, Loader=None):
+ """
+ Add a multi-constructor for the given tag prefix.
+ Multi-constructor is called for a node if its tag starts with tag_prefix.
+ Multi-constructor accepts a Loader instance, a tag suffix,
+ and a node object and produces the corresponding Python object.
+ """
+ if Loader is None:
+ loader.Loader.add_multi_constructor(tag_prefix, multi_constructor)
+ loader.FullLoader.add_multi_constructor(tag_prefix, multi_constructor)
+ loader.UnsafeLoader.add_multi_constructor(tag_prefix, multi_constructor)
+ else:
+ Loader.add_multi_constructor(tag_prefix, multi_constructor)
+
+def add_representer(data_type, representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Representer is a function accepting a Dumper instance
+ and an instance of the given data type
+ and producing the corresponding representation node.
+ """
+ Dumper.add_representer(data_type, representer)
+
+def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
+ """
+ Add a representer for the given type.
+ Multi-representer is a function accepting a Dumper instance
+ and an instance of the given data type or subtype
+ and producing the corresponding representation node.
+ """
+ Dumper.add_multi_representer(data_type, multi_representer)
+
+class YAMLObjectMetaclass(type):
+ """
+ The metaclass for YAMLObject.
+ """
+ def __init__(cls, name, bases, kwds):
+ super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
+ if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
+ if isinstance(cls.yaml_loader, list):
+ for loader in cls.yaml_loader:
+ loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+ else:
+ cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
+
+ cls.yaml_dumper.add_representer(cls, cls.to_yaml)
+
+class YAMLObject(metaclass=YAMLObjectMetaclass):
+ """
+ An object that can dump itself to a YAML stream
+ and load itself from a YAML stream.
+ """
+
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
+
+ yaml_loader = [Loader, FullLoader, UnsafeLoader]
+ yaml_dumper = Dumper
+
+ yaml_tag = None
+ yaml_flow_style = None
+
+ @classmethod
+ def from_yaml(cls, loader, node):
+ """
+ Convert a representation node to a Python object.
+ """
+ return loader.construct_yaml_object(node, cls)
+
+ @classmethod
+ def to_yaml(cls, dumper, data):
+ """
+ Convert a Python object to a representation node.
+ """
+ return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
+ flow_style=cls.yaml_flow_style)
+
diff --git a/third_party/python/PyYAML/lib3/yaml/composer.py b/third_party/python/PyYAML/lib3/yaml/composer.py
new file mode 100644
index 0000000000..6d15cb40e3
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/yaml/composer.py
@@ -0,0 +1,139 @@
+
+__all__ = ['Composer', 'ComposerError']
+
+from .error import MarkedYAMLError
+from .events import *
+from .nodes import *
+
+class ComposerError(MarkedYAMLError):
+ pass
+
+class Composer:
+
+ def __init__(self):
+ self.anchors = {}
+
+ def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
+ # If there are more documents available?
+ return not self.check_event(StreamEndEvent)
+
+ def get_node(self):
+ # Get the root node of the next document.
+ if not self.check_event(StreamEndEvent):
+ return self.compose_document()
+
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
+
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
+
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
+ # Drop the DOCUMENT-START event.
+ self.get_event()
+
+ # Compose the root node.
+ node = self.compose_node(None, None)
+
+ # Drop the DOCUMENT-END event.
+ self.get_event()
+
+ self.anchors = {}
+ return node
+
+ def compose_node(self, parent, index):
+ if self.check_event(AliasEvent):
+ event = self.get_event()
+ anchor = event.anchor
+ if anchor not in self.anchors:
+ raise ComposerError(None, None, "found undefined alias %r"
+ % anchor, event.start_mark)
+ return self.anchors[anchor]
+ event = self.peek_event()
+ anchor = event.anchor
+ if anchor is not None:
+ if anchor in self.anchors:
+ raise ComposerError("found duplicate anchor %r; first occurrence"
+ % anchor, self.anchors[anchor].start_mark,
+ "second occurrence", event.start_mark)
+ self.descend_resolver(parent, index)
+ if self.check_event(ScalarEvent):
+ node = self.compose_scalar_node(anchor)
+ elif self.check_event(SequenceStartEvent):
+ node = self.compose_sequence_node(anchor)
+ elif self.check_event(MappingStartEvent):
+ node = self.compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ def compose_scalar_node(self, anchor):
+ event = self.get_event()
+ tag = event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(ScalarNode, event.value, event.implicit)
+ node = ScalarNode(tag, event.value,
+ event.start_mark, event.end_mark, style=event.style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ return node
+
+ def compose_sequence_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(SequenceNode, None, start_event.implicit)
+ node = SequenceNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ index = 0
+ while not self.check_event(SequenceEndEvent):
+ node.value.append(self.compose_node(node, index))
+ index += 1
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
+ def compose_mapping_node(self, anchor):
+ start_event = self.get_event()
+ tag = start_event.tag
+ if tag is None or tag == '!':
+ tag = self.resolve(MappingNode, None, start_event.implicit)
+ node = MappingNode(tag, [],
+ start_event.start_mark, None,
+ flow_style=start_event.flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ while not self.check_event(MappingEndEvent):
+ #key_event = self.peek_event()
+ item_key = self.compose_node(node, None)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
+ item_value = self.compose_node(node, item_key)
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
+ end_event = self.get_event()
+ node.end_mark = end_event.end_mark
+ return node
+
diff --git a/third_party/python/PyYAML/lib3/yaml/constructor.py b/third_party/python/PyYAML/lib3/yaml/constructor.py
new file mode 100644
index 0000000000..619acd3070
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/yaml/constructor.py
@@ -0,0 +1,748 @@
+
+__all__ = [
+ 'BaseConstructor',
+ 'SafeConstructor',
+ 'FullConstructor',
+ 'UnsafeConstructor',
+ 'Constructor',
+ 'ConstructorError'
+]
+
+from .error import *
+from .nodes import *
+
+import collections.abc, datetime, base64, binascii, re, sys, types
+
+class ConstructorError(MarkedYAMLError):
+ pass
+
+class BaseConstructor:
+
+ yaml_constructors = {}
+ yaml_multi_constructors = {}
+
+ def __init__(self):
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
+
+ def check_data(self):
+ # If there are more documents available?
+ return self.check_node()
+
+ def check_state_key(self, key):
+ """Block special attributes/methods from being set in a newly created
+ object, to prevent user-controlled methods from being called during
+ deserialization"""
+ if self.get_state_keys_blacklist_regexp().match(key):
+ raise ConstructorError(None, None,
+ "blacklisted key '%s' in instance state found" % (key,), None)
+
+ def get_data(self):
+ # Construct and return the next document.
+ if self.check_node():
+ return self.construct_document(self.get_node())
+
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
+
+ def construct_document(self, node):
+ data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
+ self.constructed_objects = {}
+ self.recursive_objects = {}
+ self.deep_construct = False
+ return data
+
+ def construct_object(self, node, deep=False):
+ if node in self.constructed_objects:
+ return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
+ if node in self.recursive_objects:
+ raise ConstructorError(None, None,
+ "found unconstructable recursive node", node.start_mark)
+ self.recursive_objects[node] = None
+ constructor = None
+ tag_suffix = None
+ if node.tag in self.yaml_constructors:
+ constructor = self.yaml_constructors[node.tag]
+ else:
+ for tag_prefix in self.yaml_multi_constructors:
+ if tag_prefix is not None and node.tag.startswith(tag_prefix):
+ tag_suffix = node.tag[len(tag_prefix):]
+ constructor = self.yaml_multi_constructors[tag_prefix]
+ break
+ else:
+ if None in self.yaml_multi_constructors:
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
+ elif None in self.yaml_constructors:
+ constructor = self.yaml_constructors[None]
+ elif isinstance(node, ScalarNode):
+ constructor = self.__class__.construct_scalar
+ elif isinstance(node, SequenceNode):
+ constructor = self.__class__.construct_sequence
+ elif isinstance(node, MappingNode):
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = next(generator)
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
+ self.constructed_objects[node] = data
+ del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
+ return data
+
+ def construct_scalar(self, node):
+ if not isinstance(node, ScalarNode):
+ raise ConstructorError(None, None,
+ "expected a scalar node, but found %s" % node.id,
+ node.start_mark)
+ return node.value
+
+ def construct_sequence(self, node, deep=False):
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError(None, None,
+ "expected a sequence node, but found %s" % node.id,
+ node.start_mark)
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
+
+ def construct_mapping(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ mapping = {}
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ if not isinstance(key, collections.abc.Hashable):
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unhashable key", key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
+ return mapping
+
+ def construct_pairs(self, node, deep=False):
+ if not isinstance(node, MappingNode):
+ raise ConstructorError(None, None,
+ "expected a mapping node, but found %s" % node.id,
+ node.start_mark)
+ pairs = []
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
+ pairs.append((key, value))
+ return pairs
+
+ @classmethod
+ def add_constructor(cls, tag, constructor):
+ if not 'yaml_constructors' in cls.__dict__:
+ cls.yaml_constructors = cls.yaml_constructors.copy()
+ cls.yaml_constructors[tag] = constructor
+
+ @classmethod
+ def add_multi_constructor(cls, tag_prefix, multi_constructor):
+ if not 'yaml_multi_constructors' in cls.__dict__:
+ cls.yaml_multi_constructors = cls.yaml_multi_constructors.copy()
+ cls.yaml_multi_constructors[tag_prefix] = multi_constructor
+
+class SafeConstructor(BaseConstructor):
+
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == 'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return super().construct_scalar(node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == 'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == 'tag:yaml.org,2002:value':
+ key_node.tag = 'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return super().construct_mapping(node, deep=deep)
+
+ def construct_yaml_null(self, node):
+ self.construct_scalar(node)
+ return None
+
+ bool_values = {
+ 'yes': True,
+ 'no': False,
+ 'true': True,
+ 'false': False,
+ 'on': True,
+ 'off': False,
+ }
+
+ def construct_yaml_bool(self, node):
+ value = self.construct_scalar(node)
+ return self.bool_values[value.lower()]
+
+ def construct_yaml_int(self, node):
+ value = self.construct_scalar(node)
+ value = value.replace('_', '')
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '0':
+ return 0
+ elif value.startswith('0b'):
+ return sign*int(value[2:], 2)
+ elif value.startswith('0x'):
+ return sign*int(value[2:], 16)
+ elif value[0] == '0':
+ return sign*int(value, 8)
+ elif ':' in value:
+ digits = [int(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*int(value)
+
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
+
+ def construct_yaml_float(self, node):
+ value = self.construct_scalar(node)
+ value = value.replace('_', '').lower()
+ sign = +1
+ if value[0] == '-':
+ sign = -1
+ if value[0] in '+-':
+ value = value[1:]
+ if value == '.inf':
+ return sign*self.inf_value
+ elif value == '.nan':
+ return self.nan_value
+ elif ':' in value:
+ digits = [float(part) for part in value.split(':')]
+ digits.reverse()
+ base = 1
+ value = 0.0
+ for digit in digits:
+ value += digit*base
+ base *= 60
+ return sign*value
+ else:
+ return sign*float(value)
+
+ def construct_yaml_binary(self, node):
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(None, None,
+ "failed to convert base64 data into ascii: %s" % exc,
+ node.start_mark)
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ timestamp_regexp = re.compile(
+ r'''^(?P<year>[0-9][0-9][0-9][0-9])
+ -(?P<month>[0-9][0-9]?)
+ -(?P<day>[0-9][0-9]?)
+ (?:(?:[Tt]|[ \t]+)
+ (?P<hour>[0-9][0-9]?)
+ :(?P<minute>[0-9][0-9])
+ :(?P<second>[0-9][0-9])
+ (?:\.(?P<fraction>[0-9]*))?
+ (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
+
+ def construct_yaml_timestamp(self, node):
+ value = self.construct_scalar(node)
+ match = self.timestamp_regexp.match(node.value)
+ values = match.groupdict()
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ tzinfo = None
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ tzinfo = datetime.timezone(delta)
+ elif values['tz']:
+ tzinfo = datetime.timezone.utc
+ return datetime.datetime(year, month, day, hour, minute, second, fraction,
+ tzinfo=tzinfo)
+
+ def construct_yaml_omap(self, node):
+ # Note: we do not check for duplicate keys, because it's too
+ # CPU-expensive.
+ omap = []
+ yield omap
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing an ordered map", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ omap.append((key, value))
+
+ def construct_yaml_pairs(self, node):
+ # Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
+ if not isinstance(node, SequenceNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a sequence, but found %s" % node.id, node.start_mark)
+ for subnode in node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a mapping of length 1, but found %s" % subnode.id,
+ subnode.start_mark)
+ if len(subnode.value) != 1:
+ raise ConstructorError("while constructing pairs", node.start_mark,
+ "expected a single mapping item, but found %d items" % len(subnode.value),
+ subnode.start_mark)
+ key_node, value_node = subnode.value[0]
+ key = self.construct_object(key_node)
+ value = self.construct_object(value_node)
+ pairs.append((key, value))
+
+ def construct_yaml_set(self, node):
+ data = set()
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_str(self, node):
+ return self.construct_scalar(node)
+
+ def construct_yaml_seq(self, node):
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
+
+ def construct_yaml_map(self, node):
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
+
+ def construct_yaml_object(self, node, cls):
+ data = cls.__new__(cls)
+ yield data
+ if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
+ data.__setstate__(state)
+ else:
+ state = self.construct_mapping(node)
+ data.__dict__.update(state)
+
+ def construct_undefined(self, node):
+ raise ConstructorError(None, None,
+ "could not determine a constructor for the tag %r" % node.tag,
+ node.start_mark)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:null',
+ SafeConstructor.construct_yaml_null)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:bool',
+ SafeConstructor.construct_yaml_bool)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:int',
+ SafeConstructor.construct_yaml_int)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:float',
+ SafeConstructor.construct_yaml_float)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:binary',
+ SafeConstructor.construct_yaml_binary)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:omap',
+ SafeConstructor.construct_yaml_omap)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:pairs',
+ SafeConstructor.construct_yaml_pairs)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:set',
+ SafeConstructor.construct_yaml_set)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:str',
+ SafeConstructor.construct_yaml_str)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:seq',
+ SafeConstructor.construct_yaml_seq)
+
+SafeConstructor.add_constructor(
+ 'tag:yaml.org,2002:map',
+ SafeConstructor.construct_yaml_map)
+
+SafeConstructor.add_constructor(None,
+ SafeConstructor.construct_undefined)
+
+class FullConstructor(SafeConstructor):
+ # 'extend' is blacklisted because it is used by
+ # construct_python_object_apply to add `listitems` to a newly generate
+ # python instance
+ def get_state_keys_blacklist(self):
+ return ['^extend$', '^__.*__$']
+
+ def get_state_keys_blacklist_regexp(self):
+ if not hasattr(self, 'state_keys_blacklist_regexp'):
+ self.state_keys_blacklist_regexp = re.compile('(' + '|'.join(self.get_state_keys_blacklist()) + ')')
+ return self.state_keys_blacklist_regexp
+
+ def construct_python_str(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_unicode(self, node):
+ return self.construct_scalar(node)
+
+ def construct_python_bytes(self, node):
+ try:
+ value = self.construct_scalar(node).encode('ascii')
+ except UnicodeEncodeError as exc:
+ raise ConstructorError(None, None,
+ "failed to convert base64 data into ascii: %s" % exc,
+ node.start_mark)
+ try:
+ if hasattr(base64, 'decodebytes'):
+ return base64.decodebytes(value)
+ else:
+ return base64.decodestring(value)
+ except binascii.Error as exc:
+ raise ConstructorError(None, None,
+ "failed to decode base64 data: %s" % exc, node.start_mark)
+
+ def construct_python_long(self, node):
+ return self.construct_yaml_int(node)
+
+ def construct_python_complex(self, node):
+ return complex(self.construct_scalar(node))
+
+ def construct_python_tuple(self, node):
+ return tuple(self.construct_sequence(node))
+
+ def find_python_module(self, name, mark, unsafe=False):
+ if not name:
+ raise ConstructorError("while constructing a Python module", mark,
+ "expected non-empty name appended to the tag", mark)
+ if unsafe:
+ try:
+ __import__(name)
+ except ImportError as exc:
+ raise ConstructorError("while constructing a Python module", mark,
+ "cannot find module %r (%s)" % (name, exc), mark)
+ if name not in sys.modules:
+ raise ConstructorError("while constructing a Python module", mark,
+ "module %r is not imported" % name, mark)
+ return sys.modules[name]
+
+ def find_python_name(self, name, mark, unsafe=False):
+ if not name:
+ raise ConstructorError("while constructing a Python object", mark,
+ "expected non-empty name appended to the tag", mark)
+ if '.' in name:
+ module_name, object_name = name.rsplit('.', 1)
+ else:
+ module_name = 'builtins'
+ object_name = name
+ if unsafe:
+ try:
+ __import__(module_name)
+ except ImportError as exc:
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find module %r (%s)" % (module_name, exc), mark)
+ if module_name not in sys.modules:
+ raise ConstructorError("while constructing a Python object", mark,
+ "module %r is not imported" % module_name, mark)
+ module = sys.modules[module_name]
+ if not hasattr(module, object_name):
+ raise ConstructorError("while constructing a Python object", mark,
+ "cannot find %r in the module %r"
+ % (object_name, module.__name__), mark)
+ return getattr(module, object_name)
+
+ def construct_python_name(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python name", node.start_mark,
+ "expected the empty value, but found %r" % value, node.start_mark)
+ return self.find_python_name(suffix, node.start_mark)
+
+ def construct_python_module(self, suffix, node):
+ value = self.construct_scalar(node)
+ if value:
+ raise ConstructorError("while constructing a Python module", node.start_mark,
+ "expected the empty value, but found %r" % value, node.start_mark)
+ return self.find_python_module(suffix, node.start_mark)
+
+ def make_python_instance(self, suffix, node,
+ args=None, kwds=None, newobj=False, unsafe=False):
+ if not args:
+ args = []
+ if not kwds:
+ kwds = {}
+ cls = self.find_python_name(suffix, node.start_mark)
+ if not (unsafe or isinstance(cls, type)):
+ raise ConstructorError("while constructing a Python instance", node.start_mark,
+ "expected a class, but found %r" % type(cls),
+ node.start_mark)
+ if newobj and isinstance(cls, type):
+ return cls.__new__(cls, *args, **kwds)
+ else:
+ return cls(*args, **kwds)
+
+ def set_python_instance_state(self, instance, state, unsafe=False):
+ if hasattr(instance, '__setstate__'):
+ instance.__setstate__(state)
+ else:
+ slotstate = {}
+ if isinstance(state, tuple) and len(state) == 2:
+ state, slotstate = state
+ if hasattr(instance, '__dict__'):
+ if not unsafe and state:
+ for key in state.keys():
+ self.check_state_key(key)
+ instance.__dict__.update(state)
+ elif state:
+ slotstate.update(state)
+ for key, value in slotstate.items():
+ if not unsafe:
+ self.check_state_key(key)
+ setattr(instance, key, value)
+
+ def construct_python_object(self, suffix, node):
+ # Format:
+ # !!python/object:module.name { ... state ... }
+ instance = self.make_python_instance(suffix, node, newobj=True)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
+ self.set_python_instance_state(instance, state)
+
+ def construct_python_object_apply(self, suffix, node, newobj=False):
+ # Format:
+ # !!python/object/apply # (or !!python/object/new)
+ # args: [ ... arguments ... ]
+ # kwds: { ... keywords ... }
+ # state: ... state ...
+ # listitems: [ ... listitems ... ]
+ # dictitems: { ... dictitems ... }
+ # or short format:
+ # !!python/object/apply [ ... arguments ... ]
+ # The difference between !!python/object/apply and !!python/object/new
+ # is how an object is created, check make_python_instance for details.
+ if isinstance(node, SequenceNode):
+ args = self.construct_sequence(node, deep=True)
+ kwds = {}
+ state = {}
+ listitems = []
+ dictitems = {}
+ else:
+ value = self.construct_mapping(node, deep=True)
+ args = value.get('args', [])
+ kwds = value.get('kwds', {})
+ state = value.get('state', {})
+ listitems = value.get('listitems', [])
+ dictitems = value.get('dictitems', {})
+ instance = self.make_python_instance(suffix, node, args, kwds, newobj)
+ if state:
+ self.set_python_instance_state(instance, state)
+ if listitems:
+ instance.extend(listitems)
+ if dictitems:
+ for key in dictitems:
+ instance[key] = dictitems[key]
+ return instance
+
+ def construct_python_object_new(self, suffix, node):
+ return self.construct_python_object_apply(suffix, node, newobj=True)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/none',
+ FullConstructor.construct_yaml_null)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/bool',
+ FullConstructor.construct_yaml_bool)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/str',
+ FullConstructor.construct_python_str)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/unicode',
+ FullConstructor.construct_python_unicode)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/bytes',
+ FullConstructor.construct_python_bytes)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/int',
+ FullConstructor.construct_yaml_int)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/long',
+ FullConstructor.construct_python_long)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/float',
+ FullConstructor.construct_yaml_float)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/complex',
+ FullConstructor.construct_python_complex)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/list',
+ FullConstructor.construct_yaml_seq)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/tuple',
+ FullConstructor.construct_python_tuple)
+
+FullConstructor.add_constructor(
+ 'tag:yaml.org,2002:python/dict',
+ FullConstructor.construct_yaml_map)
+
+FullConstructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/name:',
+ FullConstructor.construct_python_name)
+
+class UnsafeConstructor(FullConstructor):
+
+ def find_python_module(self, name, mark):
+ return super(UnsafeConstructor, self).find_python_module(name, mark, unsafe=True)
+
+ def find_python_name(self, name, mark):
+ return super(UnsafeConstructor, self).find_python_name(name, mark, unsafe=True)
+
+ def make_python_instance(self, suffix, node, args=None, kwds=None, newobj=False):
+ return super(UnsafeConstructor, self).make_python_instance(
+ suffix, node, args, kwds, newobj, unsafe=True)
+
+ def set_python_instance_state(self, instance, state):
+ return super(UnsafeConstructor, self).set_python_instance_state(
+ instance, state, unsafe=True)
+
+UnsafeConstructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/module:',
+ UnsafeConstructor.construct_python_module)
+
+UnsafeConstructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object:',
+ UnsafeConstructor.construct_python_object)
+
+UnsafeConstructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/new:',
+ UnsafeConstructor.construct_python_object_new)
+
+UnsafeConstructor.add_multi_constructor(
+ 'tag:yaml.org,2002:python/object/apply:',
+ UnsafeConstructor.construct_python_object_apply)
+
+# Constructor is same as UnsafeConstructor. Need to leave this in place in case
+# people have extended it directly.
+class Constructor(UnsafeConstructor):
+ pass
diff --git a/third_party/python/PyYAML/lib3/yaml/cyaml.py b/third_party/python/PyYAML/lib3/yaml/cyaml.py
new file mode 100644
index 0000000000..0c21345879
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/yaml/cyaml.py
@@ -0,0 +1,101 @@
+
+__all__ = [
+ 'CBaseLoader', 'CSafeLoader', 'CFullLoader', 'CUnsafeLoader', 'CLoader',
+ 'CBaseDumper', 'CSafeDumper', 'CDumper'
+]
+
+from yaml._yaml import CParser, CEmitter
+
+from .constructor import *
+
+from .serializer import *
+from .representer import *
+
+from .resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CFullLoader(CParser, FullConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ FullConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CUnsafeLoader(CParser, UnsafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ UnsafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
diff --git a/third_party/python/PyYAML/lib3/yaml/dumper.py b/third_party/python/PyYAML/lib3/yaml/dumper.py
new file mode 100644
index 0000000000..6aadba551f
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/yaml/dumper.py
@@ -0,0 +1,62 @@
+
+__all__ = ['BaseDumper', 'SafeDumper', 'Dumper']
+
+from .emitter import *
+from .serializer import *
+from .representer import *
+from .resolver import *
+
+class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
+class SafeDumper(Emitter, Serializer, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
+class Dumper(Emitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=False,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None, sort_keys=True):
+ Emitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width,
+ allow_unicode=allow_unicode, line_break=line_break)
+ Serializer.__init__(self, encoding=encoding,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style, sort_keys=sort_keys)
+ Resolver.__init__(self)
+
diff --git a/third_party/python/PyYAML/lib3/yaml/emitter.py b/third_party/python/PyYAML/lib3/yaml/emitter.py
new file mode 100644
index 0000000000..a664d01116
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/yaml/emitter.py
@@ -0,0 +1,1137 @@
+
+# Emitter expects events obeying the following grammar:
+# stream ::= STREAM-START document* STREAM-END
+# document ::= DOCUMENT-START node DOCUMENT-END
+# node ::= SCALAR | sequence | mapping
+# sequence ::= SEQUENCE-START node* SEQUENCE-END
+# mapping ::= MAPPING-START (node node)* MAPPING-END
+
+__all__ = ['Emitter', 'EmitterError']
+
+from .error import YAMLError
+from .events import *
+
+class EmitterError(YAMLError):
+ pass
+
+class ScalarAnalysis:
+ def __init__(self, scalar, empty, multiline,
+ allow_flow_plain, allow_block_plain,
+ allow_single_quoted, allow_double_quoted,
+ allow_block):
+ self.scalar = scalar
+ self.empty = empty
+ self.multiline = multiline
+ self.allow_flow_plain = allow_flow_plain
+ self.allow_block_plain = allow_block_plain
+ self.allow_single_quoted = allow_single_quoted
+ self.allow_double_quoted = allow_double_quoted
+ self.allow_block = allow_block
+
+class Emitter:
+
+ DEFAULT_TAG_PREFIXES = {
+ '!' : '!',
+ 'tag:yaml.org,2002:' : '!!',
+ }
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None):
+
+ # The stream should have the methods `write` and possibly `flush`.
+ self.stream = stream
+
+ # Encoding can be overridden by STREAM-START.
+ self.encoding = None
+
+ # Emitter is a state machine with a stack of states to handle nested
+ # structures.
+ self.states = []
+ self.state = self.expect_stream_start
+
+ # Current event and the event queue.
+ self.events = []
+ self.event = None
+
+ # The current indentation level and the stack of previous indents.
+ self.indents = []
+ self.indent = None
+
+ # Flow level.
+ self.flow_level = 0
+
+ # Contexts.
+ self.root_context = False
+ self.sequence_context = False
+ self.mapping_context = False
+ self.simple_key_context = False
+
+ # Characteristics of the last emitted character:
+ # - current position.
+ # - is it a whitespace?
+ # - is it an indention character
+ # (indentation space, '-', '?', or ':')?
+ self.line = 0
+ self.column = 0
+ self.whitespace = True
+ self.indention = True
+
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
+ # Formatting details.
+ self.canonical = canonical
+ self.allow_unicode = allow_unicode
+ self.best_indent = 2
+ if indent and 1 < indent < 10:
+ self.best_indent = indent
+ self.best_width = 80
+ if width and width > self.best_indent*2:
+ self.best_width = width
+ self.best_line_break = '\n'
+ if line_break in ['\r', '\n', '\r\n']:
+ self.best_line_break = line_break
+
+ # Tag prefixes.
+ self.tag_prefixes = None
+
+ # Prepared anchor and tag.
+ self.prepared_anchor = None
+ self.prepared_tag = None
+
+ # Scalar analysis and style.
+ self.analysis = None
+ self.style = None
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def emit(self, event):
+ self.events.append(event)
+ while not self.need_more_events():
+ self.event = self.events.pop(0)
+ self.state()
+ self.event = None
+
+ # In some cases, we wait for a few next events before emitting.
+
+ def need_more_events(self):
+ if not self.events:
+ return True
+ event = self.events[0]
+ if isinstance(event, DocumentStartEvent):
+ return self.need_events(1)
+ elif isinstance(event, SequenceStartEvent):
+ return self.need_events(2)
+ elif isinstance(event, MappingStartEvent):
+ return self.need_events(3)
+ else:
+ return False
+
+ def need_events(self, count):
+ level = 0
+ for event in self.events[1:]:
+ if isinstance(event, (DocumentStartEvent, CollectionStartEvent)):
+ level += 1
+ elif isinstance(event, (DocumentEndEvent, CollectionEndEvent)):
+ level -= 1
+ elif isinstance(event, StreamEndEvent):
+ level = -1
+ if level < 0:
+ return False
+ return (len(self.events) < count+1)
+
+ def increase_indent(self, flow=False, indentless=False):
+ self.indents.append(self.indent)
+ if self.indent is None:
+ if flow:
+ self.indent = self.best_indent
+ else:
+ self.indent = 0
+ elif not indentless:
+ self.indent += self.best_indent
+
+ # States.
+
+ # Stream handlers.
+
+ def expect_stream_start(self):
+ if isinstance(self.event, StreamStartEvent):
+ if self.event.encoding and not hasattr(self.stream, 'encoding'):
+ self.encoding = self.event.encoding
+ self.write_stream_start()
+ self.state = self.expect_first_document_start
+ else:
+ raise EmitterError("expected StreamStartEvent, but got %s"
+ % self.event)
+
+ def expect_nothing(self):
+ raise EmitterError("expected nothing, but got %s" % self.event)
+
+ # Document handlers.
+
+ def expect_first_document_start(self):
+ return self.expect_document_start(first=True)
+
+ def expect_document_start(self, first=False):
+ if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ if self.event.version:
+ version_text = self.prepare_version(self.event.version)
+ self.write_version_directive(version_text)
+ self.tag_prefixes = self.DEFAULT_TAG_PREFIXES.copy()
+ if self.event.tags:
+ handles = sorted(self.event.tags.keys())
+ for handle in handles:
+ prefix = self.event.tags[handle]
+ self.tag_prefixes[prefix] = handle
+ handle_text = self.prepare_tag_handle(handle)
+ prefix_text = self.prepare_tag_prefix(prefix)
+ self.write_tag_directive(handle_text, prefix_text)
+ implicit = (first and not self.event.explicit and not self.canonical
+ and not self.event.version and not self.event.tags
+ and not self.check_empty_document())
+ if not implicit:
+ self.write_indent()
+ self.write_indicator('---', True)
+ if self.canonical:
+ self.write_indent()
+ self.state = self.expect_document_root
+ elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.write_stream_end()
+ self.state = self.expect_nothing
+ else:
+ raise EmitterError("expected DocumentStartEvent, but got %s"
+ % self.event)
+
+ def expect_document_end(self):
+ if isinstance(self.event, DocumentEndEvent):
+ self.write_indent()
+ if self.event.explicit:
+ self.write_indicator('...', True)
+ self.write_indent()
+ self.flush_stream()
+ self.state = self.expect_document_start
+ else:
+ raise EmitterError("expected DocumentEndEvent, but got %s"
+ % self.event)
+
+ def expect_document_root(self):
+ self.states.append(self.expect_document_end)
+ self.expect_node(root=True)
+
+ # Node handlers.
+
+ def expect_node(self, root=False, sequence=False, mapping=False,
+ simple_key=False):
+ self.root_context = root
+ self.sequence_context = sequence
+ self.mapping_context = mapping
+ self.simple_key_context = simple_key
+ if isinstance(self.event, AliasEvent):
+ self.expect_alias()
+ elif isinstance(self.event, (ScalarEvent, CollectionStartEvent)):
+ self.process_anchor('&')
+ self.process_tag()
+ if isinstance(self.event, ScalarEvent):
+ self.expect_scalar()
+ elif isinstance(self.event, SequenceStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_sequence():
+ self.expect_flow_sequence()
+ else:
+ self.expect_block_sequence()
+ elif isinstance(self.event, MappingStartEvent):
+ if self.flow_level or self.canonical or self.event.flow_style \
+ or self.check_empty_mapping():
+ self.expect_flow_mapping()
+ else:
+ self.expect_block_mapping()
+ else:
+ raise EmitterError("expected NodeEvent, but got %s" % self.event)
+
+ def expect_alias(self):
+ if self.event.anchor is None:
+ raise EmitterError("anchor is not specified for alias")
+ self.process_anchor('*')
+ self.state = self.states.pop()
+
+ def expect_scalar(self):
+ self.increase_indent(flow=True)
+ self.process_scalar()
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+
+ # Flow sequence handlers.
+
+ def expect_flow_sequence(self):
+ self.write_indicator('[', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_sequence_item
+
+ def expect_first_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator(']', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ def expect_flow_sequence_item(self):
+ if isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(',', False)
+ self.write_indent()
+ self.write_indicator(']', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.states.append(self.expect_flow_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Flow mapping handlers.
+
+ def expect_flow_mapping(self):
+ self.write_indicator('{', True, whitespace=True)
+ self.flow_level += 1
+ self.increase_indent(flow=True)
+ self.state = self.expect_first_flow_mapping_key
+
+ def expect_first_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ self.write_indicator('}', False)
+ self.state = self.states.pop()
+ else:
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_key(self):
+ if isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.flow_level -= 1
+ if self.canonical:
+ self.write_indicator(',', False)
+ self.write_indent()
+ self.write_indicator('}', False)
+ self.state = self.states.pop()
+ else:
+ self.write_indicator(',', False)
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ if not self.canonical and self.check_simple_key():
+ self.states.append(self.expect_flow_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True)
+ self.states.append(self.expect_flow_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_simple_value(self):
+ self.write_indicator(':', False)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_flow_mapping_value(self):
+ if self.canonical or self.column > self.best_width:
+ self.write_indent()
+ self.write_indicator(':', True)
+ self.states.append(self.expect_flow_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Block sequence handlers.
+
+ def expect_block_sequence(self):
+ indentless = (self.mapping_context and not self.indention)
+ self.increase_indent(flow=False, indentless=indentless)
+ self.state = self.expect_first_block_sequence_item
+
+ def expect_first_block_sequence_item(self):
+ return self.expect_block_sequence_item(first=True)
+
+ def expect_block_sequence_item(self, first=False):
+ if not first and isinstance(self.event, SequenceEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ self.write_indicator('-', True, indention=True)
+ self.states.append(self.expect_block_sequence_item)
+ self.expect_node(sequence=True)
+
+ # Block mapping handlers.
+
+ def expect_block_mapping(self):
+ self.increase_indent(flow=False)
+ self.state = self.expect_first_block_mapping_key
+
+ def expect_first_block_mapping_key(self):
+ return self.expect_block_mapping_key(first=True)
+
+ def expect_block_mapping_key(self, first=False):
+ if not first and isinstance(self.event, MappingEndEvent):
+ self.indent = self.indents.pop()
+ self.state = self.states.pop()
+ else:
+ self.write_indent()
+ if self.check_simple_key():
+ self.states.append(self.expect_block_mapping_simple_value)
+ self.expect_node(mapping=True, simple_key=True)
+ else:
+ self.write_indicator('?', True, indention=True)
+ self.states.append(self.expect_block_mapping_value)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_simple_value(self):
+ self.write_indicator(':', False)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ def expect_block_mapping_value(self):
+ self.write_indent()
+ self.write_indicator(':', True, indention=True)
+ self.states.append(self.expect_block_mapping_key)
+ self.expect_node(mapping=True)
+
+ # Checkers.
+
+ def check_empty_sequence(self):
+ return (isinstance(self.event, SequenceStartEvent) and self.events
+ and isinstance(self.events[0], SequenceEndEvent))
+
+ def check_empty_mapping(self):
+ return (isinstance(self.event, MappingStartEvent) and self.events
+ and isinstance(self.events[0], MappingEndEvent))
+
+ def check_empty_document(self):
+ if not isinstance(self.event, DocumentStartEvent) or not self.events:
+ return False
+ event = self.events[0]
+ return (isinstance(event, ScalarEvent) and event.anchor is None
+ and event.tag is None and event.implicit and event.value == '')
+
+ def check_simple_key(self):
+ length = 0
+ if isinstance(self.event, NodeEvent) and self.event.anchor is not None:
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ length += len(self.prepared_anchor)
+ if isinstance(self.event, (ScalarEvent, CollectionStartEvent)) \
+ and self.event.tag is not None:
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(self.event.tag)
+ length += len(self.prepared_tag)
+ if isinstance(self.event, ScalarEvent):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ length += len(self.analysis.scalar)
+ return (length < 128 and (isinstance(self.event, AliasEvent)
+ or (isinstance(self.event, ScalarEvent)
+ and not self.analysis.empty and not self.analysis.multiline)
+ or self.check_empty_sequence() or self.check_empty_mapping()))
+
+ # Anchor, Tag, and Scalar processors.
+
+ def process_anchor(self, indicator):
+ if self.event.anchor is None:
+ self.prepared_anchor = None
+ return
+ if self.prepared_anchor is None:
+ self.prepared_anchor = self.prepare_anchor(self.event.anchor)
+ if self.prepared_anchor:
+ self.write_indicator(indicator+self.prepared_anchor, True)
+ self.prepared_anchor = None
+
+ def process_tag(self):
+ tag = self.event.tag
+ if isinstance(self.event, ScalarEvent):
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ if ((not self.canonical or tag is None) and
+ ((self.style == '' and self.event.implicit[0])
+ or (self.style != '' and self.event.implicit[1]))):
+ self.prepared_tag = None
+ return
+ if self.event.implicit[0] and tag is None:
+ tag = '!'
+ self.prepared_tag = None
+ else:
+ if (not self.canonical or tag is None) and self.event.implicit:
+ self.prepared_tag = None
+ return
+ if tag is None:
+ raise EmitterError("tag is not specified")
+ if self.prepared_tag is None:
+ self.prepared_tag = self.prepare_tag(tag)
+ if self.prepared_tag:
+ self.write_indicator(self.prepared_tag, True)
+ self.prepared_tag = None
+
+ def choose_scalar_style(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.event.style == '"' or self.canonical:
+ return '"'
+ if not self.event.style and self.event.implicit[0]:
+ if (not (self.simple_key_context and
+ (self.analysis.empty or self.analysis.multiline))
+ and (self.flow_level and self.analysis.allow_flow_plain
+ or (not self.flow_level and self.analysis.allow_block_plain))):
+ return ''
+ if self.event.style and self.event.style in '|>':
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
+ return self.event.style
+ if not self.event.style or self.event.style == '\'':
+ if (self.analysis.allow_single_quoted and
+ not (self.simple_key_context and self.analysis.multiline)):
+ return '\''
+ return '"'
+
+ def process_scalar(self):
+ if self.analysis is None:
+ self.analysis = self.analyze_scalar(self.event.value)
+ if self.style is None:
+ self.style = self.choose_scalar_style()
+ split = (not self.simple_key_context)
+ #if self.analysis.multiline and split \
+ # and (not self.style or self.style in '\'\"'):
+ # self.write_indent()
+ if self.style == '"':
+ self.write_double_quoted(self.analysis.scalar, split)
+ elif self.style == '\'':
+ self.write_single_quoted(self.analysis.scalar, split)
+ elif self.style == '>':
+ self.write_folded(self.analysis.scalar)
+ elif self.style == '|':
+ self.write_literal(self.analysis.scalar)
+ else:
+ self.write_plain(self.analysis.scalar, split)
+ self.analysis = None
+ self.style = None
+
+ # Analyzers.
+
+ def prepare_version(self, version):
+ major, minor = version
+ if major != 1:
+ raise EmitterError("unsupported YAML version: %d.%d" % (major, minor))
+ return '%d.%d' % (major, minor)
+
+ def prepare_tag_handle(self, handle):
+ if not handle:
+ raise EmitterError("tag handle must not be empty")
+ if handle[0] != '!' or handle[-1] != '!':
+ raise EmitterError("tag handle must start and end with '!': %r" % handle)
+ for ch in handle[1:-1]:
+ if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_'):
+ raise EmitterError("invalid character %r in the tag handle: %r"
+ % (ch, handle))
+ return handle
+
+ def prepare_tag_prefix(self, prefix):
+ if not prefix:
+ raise EmitterError("tag prefix must not be empty")
+ chunks = []
+ start = end = 0
+ if prefix[0] == '!':
+ end = 1
+ while end < len(prefix):
+ ch = prefix[end]
+ if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?!:@&=+$,_.~*\'()[]':
+ end += 1
+ else:
+ if start < end:
+ chunks.append(prefix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append('%%%02X' % ord(ch))
+ if start < end:
+ chunks.append(prefix[start:end])
+ return ''.join(chunks)
+
+ def prepare_tag(self, tag):
+ if not tag:
+ raise EmitterError("tag must not be empty")
+ if tag == '!':
+ return tag
+ handle = None
+ suffix = tag
+ prefixes = sorted(self.tag_prefixes.keys())
+ for prefix in prefixes:
+ if tag.startswith(prefix) \
+ and (prefix == '!' or len(prefix) < len(tag)):
+ handle = self.tag_prefixes[prefix]
+ suffix = tag[len(prefix):]
+ chunks = []
+ start = end = 0
+ while end < len(suffix):
+ ch = suffix[end]
+ if '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?:@&=+$,_.~*\'()[]' \
+ or (ch == '!' and handle != '!'):
+ end += 1
+ else:
+ if start < end:
+ chunks.append(suffix[start:end])
+ start = end = end+1
+ data = ch.encode('utf-8')
+ for ch in data:
+ chunks.append('%%%02X' % ch)
+ if start < end:
+ chunks.append(suffix[start:end])
+ suffix_text = ''.join(chunks)
+ if handle:
+ return '%s%s' % (handle, suffix_text)
+ else:
+ return '!<%s>' % suffix_text
+
+ def prepare_anchor(self, anchor):
+ if not anchor:
+ raise EmitterError("anchor must not be empty")
+ for ch in anchor:
+ if not ('0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_'):
+ raise EmitterError("invalid character %r in the anchor: %r"
+ % (ch, anchor))
+ return anchor
+
+ def analyze_scalar(self, scalar):
+
+ # Empty scalar is a special case.
+ if not scalar:
+ return ScalarAnalysis(scalar=scalar, empty=True, multiline=False,
+ allow_flow_plain=False, allow_block_plain=True,
+ allow_single_quoted=True, allow_double_quoted=True,
+ allow_block=False)
+
+ # Indicators and special characters.
+ block_indicators = False
+ flow_indicators = False
+ line_breaks = False
+ special_characters = False
+
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
+
+ # Check document indicators.
+ if scalar.startswith('---') or scalar.startswith('...'):
+ block_indicators = True
+ flow_indicators = True
+
+ # First character or preceded by a whitespace.
+ preceded_by_whitespace = True
+
+ # Last character or followed by a whitespace.
+ followed_by_whitespace = (len(scalar) == 1 or
+ scalar[1] in '\0 \t\r\n\x85\u2028\u2029')
+
+ # The previous character is a space.
+ previous_space = False
+
+ # The previous character is a break.
+ previous_break = False
+
+ index = 0
+ while index < len(scalar):
+ ch = scalar[index]
+
+ # Check for indicators.
+ if index == 0:
+ # Leading indicators are special characters.
+ if ch in '#,[]{}&*!|>\'\"%@`':
+ flow_indicators = True
+ block_indicators = True
+ if ch in '?:':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == '-' and followed_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+ else:
+ # Some indicators cannot appear within a scalar as well.
+ if ch in ',?[]{}':
+ flow_indicators = True
+ if ch == ':':
+ flow_indicators = True
+ if followed_by_whitespace:
+ block_indicators = True
+ if ch == '#' and preceded_by_whitespace:
+ flow_indicators = True
+ block_indicators = True
+
+ # Check for line breaks, special, and unicode characters.
+ if ch in '\n\x85\u2028\u2029':
+ line_breaks = True
+ if not (ch == '\n' or '\x20' <= ch <= '\x7E'):
+ if (ch == '\x85' or '\xA0' <= ch <= '\uD7FF'
+ or '\uE000' <= ch <= '\uFFFD'
+ or '\U00010000' <= ch < '\U0010ffff') and ch != '\uFEFF':
+ unicode_characters = True
+ if not self.allow_unicode:
+ special_characters = True
+ else:
+ special_characters = True
+
+ # Detect important whitespace combinations.
+ if ch == ' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in '\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
+
+ # Prepare for the next character.
+ index += 1
+ preceded_by_whitespace = (ch in '\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = (index+1 >= len(scalar) or
+ scalar[index+1] in '\0 \t\r\n\x85\u2028\u2029')
+
+ # Let's decide what styles are allowed.
+ allow_flow_plain = True
+ allow_block_plain = True
+ allow_single_quoted = True
+ allow_double_quoted = True
+ allow_block = True
+
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
+ allow_flow_plain = allow_block_plain = False
+
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
+ # scalars.
+ if break_space:
+ allow_flow_plain = allow_block_plain = allow_single_quoted = False
+
+ # Spaces followed by breaks, as well as special character are only
+ # allowed for double quoted scalars.
+ if space_break or special_characters:
+ allow_flow_plain = allow_block_plain = \
+ allow_single_quoted = allow_block = False
+
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
+ if line_breaks:
+ allow_flow_plain = allow_block_plain = False
+
+ # Flow indicators are forbidden for flow plain scalars.
+ if flow_indicators:
+ allow_flow_plain = False
+
+ # Block indicators are forbidden for block plain scalars.
+ if block_indicators:
+ allow_block_plain = False
+
+ return ScalarAnalysis(scalar=scalar,
+ empty=False, multiline=line_breaks,
+ allow_flow_plain=allow_flow_plain,
+ allow_block_plain=allow_block_plain,
+ allow_single_quoted=allow_single_quoted,
+ allow_double_quoted=allow_double_quoted,
+ allow_block=allow_block)
+
+ # Writers.
+
+ def flush_stream(self):
+ if hasattr(self.stream, 'flush'):
+ self.stream.flush()
+
+ def write_stream_start(self):
+ # Write BOM if needed.
+ if self.encoding and self.encoding.startswith('utf-16'):
+ self.stream.write('\uFEFF'.encode(self.encoding))
+
+ def write_stream_end(self):
+ self.flush_stream()
+
+ def write_indicator(self, indicator, need_whitespace,
+ whitespace=False, indention=False):
+ if self.whitespace or not need_whitespace:
+ data = indicator
+ else:
+ data = ' '+indicator
+ self.whitespace = whitespace
+ self.indention = self.indention and indention
+ self.column += len(data)
+ self.open_ended = False
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_indent(self):
+ indent = self.indent or 0
+ if not self.indention or self.column > indent \
+ or (self.column == indent and not self.whitespace):
+ self.write_line_break()
+ if self.column < indent:
+ self.whitespace = True
+ data = ' '*(indent-self.column)
+ self.column = indent
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_line_break(self, data=None):
+ if data is None:
+ data = self.best_line_break
+ self.whitespace = True
+ self.indention = True
+ self.line += 1
+ self.column = 0
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+
+ def write_version_directive(self, version_text):
+ data = '%%YAML %s' % version_text
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ def write_tag_directive(self, handle_text, prefix_text):
+ data = '%%TAG %s %s' % (handle_text, prefix_text)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_line_break()
+
+ # Scalar streams.
+
+ def write_single_quoted(self, text, split=True):
+ self.write_indicator('\'', True)
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch is None or ch != ' ':
+ if start+1 == end and self.column > self.best_width and split \
+ and start != 0 and end != len(text):
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029' or ch == '\'':
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch == '\'':
+ data = '\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
+ if ch is not None:
+ spaces = (ch == ' ')
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+ self.write_indicator('\'', False)
+
+ ESCAPE_REPLACEMENTS = {
+ '\0': '0',
+ '\x07': 'a',
+ '\x08': 'b',
+ '\x09': 't',
+ '\x0A': 'n',
+ '\x0B': 'v',
+ '\x0C': 'f',
+ '\x0D': 'r',
+ '\x1B': 'e',
+ '\"': '\"',
+ '\\': '\\',
+ '\x85': 'N',
+ '\xA0': '_',
+ '\u2028': 'L',
+ '\u2029': 'P',
+ }
+
+ def write_double_quoted(self, text, split=True):
+ self.write_indicator('"', True)
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if ch is None or ch in '"\\\x85\u2028\u2029\uFEFF' \
+ or not ('\x20' <= ch <= '\x7E'
+ or (self.allow_unicode
+ and ('\xA0' <= ch <= '\uD7FF'
+ or '\uE000' <= ch <= '\uFFFD'))):
+ if start < end:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ if ch in self.ESCAPE_REPLACEMENTS:
+ data = '\\'+self.ESCAPE_REPLACEMENTS[ch]
+ elif ch <= '\xFF':
+ data = '\\x%02X' % ord(ch)
+ elif ch <= '\uFFFF':
+ data = '\\u%04X' % ord(ch)
+ else:
+ data = '\\U%08X' % ord(ch)
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end+1
+ if 0 < end < len(text)-1 and (ch == ' ' or start >= end) \
+ and self.column+(end-start) > self.best_width and split:
+ data = text[start:end]+'\\'
+ if start < end:
+ start = end
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ if text[start] == ' ':
+ data = '\\'
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ end += 1
+ self.write_indicator('"', False)
+
+ def determine_block_hints(self, text):
+ hints = ''
+ if text:
+ if text[0] in ' \n\x85\u2028\u2029':
+ hints += str(self.best_indent)
+ if text[-1] not in '\n\x85\u2028\u2029':
+ hints += '-'
+ elif len(text) == 1 or text[-2] in '\n\x85\u2028\u2029':
+ hints += '+'
+ return hints
+
+ def write_folded(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator('>'+hints, True)
+ if hints[-1:] == '+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
+ spaces = False
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ if not leading_space and ch is not None and ch != ' ' \
+ and text[start] == '\n':
+ self.write_line_break()
+ leading_space = (ch == ' ')
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ elif spaces:
+ if ch != ' ':
+ if start+1 == end and self.column > self.best_width:
+ self.write_indent()
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in '\n\x85\u2028\u2029')
+ spaces = (ch == ' ')
+ end += 1
+
+ def write_literal(self, text):
+ hints = self.determine_block_hints(text)
+ self.write_indicator('|'+hints, True)
+ if hints[-1:] == '+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if breaks:
+ if ch is None or ch not in '\n\x85\u2028\u2029':
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ if ch is not None:
+ self.write_indent()
+ start = end
+ else:
+ if ch is None or ch in '\n\x85\u2028\u2029':
+ data = text[start:end]
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ if ch is None:
+ self.write_line_break()
+ start = end
+ if ch is not None:
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
+
+ def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
+ if not text:
+ return
+ if not self.whitespace:
+ data = ' '
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ self.whitespace = False
+ self.indention = False
+ spaces = False
+ breaks = False
+ start = end = 0
+ while end <= len(text):
+ ch = None
+ if end < len(text):
+ ch = text[end]
+ if spaces:
+ if ch != ' ':
+ if start+1 == end and self.column > self.best_width and split:
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ else:
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ elif breaks:
+ if ch not in '\n\x85\u2028\u2029':
+ if text[start] == '\n':
+ self.write_line_break()
+ for br in text[start:end]:
+ if br == '\n':
+ self.write_line_break()
+ else:
+ self.write_line_break(br)
+ self.write_indent()
+ self.whitespace = False
+ self.indention = False
+ start = end
+ else:
+ if ch is None or ch in ' \n\x85\u2028\u2029':
+ data = text[start:end]
+ self.column += len(data)
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end
+ if ch is not None:
+ spaces = (ch == ' ')
+ breaks = (ch in '\n\x85\u2028\u2029')
+ end += 1
diff --git a/third_party/python/PyYAML/lib3/yaml/error.py b/third_party/python/PyYAML/lib3/yaml/error.py
new file mode 100644
index 0000000000..b796b4dc51
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/yaml/error.py
@@ -0,0 +1,75 @@
+
+__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
+
+class Mark:
+
+ def __init__(self, name, index, line, column, buffer, pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self, indent=4, max_length=75):
+ if self.buffer is None:
+ return None
+ head = ''
+ start = self.pointer
+ while start > 0 and self.buffer[start-1] not in '\0\r\n\x85\u2028\u2029':
+ start -= 1
+ if self.pointer-start > max_length/2-1:
+ head = ' ... '
+ start += 5
+ break
+ tail = ''
+ end = self.pointer
+ while end < len(self.buffer) and self.buffer[end] not in '\0\r\n\x85\u2028\u2029':
+ end += 1
+ if end-self.pointer > max_length/2-1:
+ tail = ' ... '
+ end -= 5
+ break
+ snippet = self.buffer[start:end]
+ return ' '*indent + head + snippet + tail + '\n' \
+ + ' '*(indent+self.pointer-start+len(head)) + '^'
+
+ def __str__(self):
+ snippet = self.get_snippet()
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ if snippet is not None:
+ where += ":\n"+snippet
+ return where
+
+class YAMLError(Exception):
+ pass
+
+class MarkedYAMLError(YAMLError):
+
+ def __init__(self, context=None, context_mark=None,
+ problem=None, problem_mark=None, note=None):
+ self.context = context
+ self.context_mark = context_mark
+ self.problem = problem
+ self.problem_mark = problem_mark
+ self.note = note
+
+ def __str__(self):
+ lines = []
+ if self.context is not None:
+ lines.append(self.context)
+ if self.context_mark is not None \
+ and (self.problem is None or self.problem_mark is None
+ or self.context_mark.name != self.problem_mark.name
+ or self.context_mark.line != self.problem_mark.line
+ or self.context_mark.column != self.problem_mark.column):
+ lines.append(str(self.context_mark))
+ if self.problem is not None:
+ lines.append(self.problem)
+ if self.problem_mark is not None:
+ lines.append(str(self.problem_mark))
+ if self.note is not None:
+ lines.append(self.note)
+ return '\n'.join(lines)
+
diff --git a/third_party/python/PyYAML/lib3/yaml/events.py b/third_party/python/PyYAML/lib3/yaml/events.py
new file mode 100644
index 0000000000..f79ad389cb
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/yaml/events.py
@@ -0,0 +1,86 @@
+
+# Abstract classes.
+
+class Event(object):
+ def __init__(self, start_mark=None, end_mark=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in ['anchor', 'tag', 'implicit', 'value']
+ if hasattr(self, key)]
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+class NodeEvent(Event):
+ def __init__(self, anchor, start_mark=None, end_mark=None):
+ self.anchor = anchor
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class CollectionStartEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, start_mark=None, end_mark=None,
+ flow_style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class CollectionEndEvent(Event):
+ pass
+
+# Implementations.
+
+class StreamStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None, encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndEvent(Event):
+ pass
+
+class DocumentStartEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None, version=None, tags=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+ self.version = version
+ self.tags = tags
+
+class DocumentEndEvent(Event):
+ def __init__(self, start_mark=None, end_mark=None,
+ explicit=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.explicit = explicit
+
+class AliasEvent(NodeEvent):
+ pass
+
+class ScalarEvent(NodeEvent):
+ def __init__(self, anchor, tag, implicit, value,
+ start_mark=None, end_mark=None, style=None):
+ self.anchor = anchor
+ self.tag = tag
+ self.implicit = implicit
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class SequenceStartEvent(CollectionStartEvent):
+ pass
+
+class SequenceEndEvent(CollectionEndEvent):
+ pass
+
+class MappingStartEvent(CollectionStartEvent):
+ pass
+
+class MappingEndEvent(CollectionEndEvent):
+ pass
+
diff --git a/third_party/python/PyYAML/lib3/yaml/loader.py b/third_party/python/PyYAML/lib3/yaml/loader.py
new file mode 100644
index 0000000000..e90c11224c
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/yaml/loader.py
@@ -0,0 +1,63 @@
+
+__all__ = ['BaseLoader', 'FullLoader', 'SafeLoader', 'Loader', 'UnsafeLoader']
+
+from .reader import *
+from .scanner import *
+from .parser import *
+from .composer import *
+from .constructor import *
+from .resolver import *
+
+class BaseLoader(Reader, Scanner, Parser, Composer, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class FullLoader(Reader, Scanner, Parser, Composer, FullConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ FullConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class SafeLoader(Reader, Scanner, Parser, Composer, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class Loader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+# UnsafeLoader is the same as Loader (which is and was always unsafe on
+# untrusted input). Use of either Loader or UnsafeLoader should be rare, since
+# FullLoad should be able to load almost all YAML safely. Loader is left intact
+# to ensure backwards compatibility.
+class UnsafeLoader(Reader, Scanner, Parser, Composer, Constructor, Resolver):
+
+ def __init__(self, stream):
+ Reader.__init__(self, stream)
+ Scanner.__init__(self)
+ Parser.__init__(self)
+ Composer.__init__(self)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
diff --git a/third_party/python/PyYAML/lib3/yaml/nodes.py b/third_party/python/PyYAML/lib3/yaml/nodes.py
new file mode 100644
index 0000000000..c4f070c41e
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/yaml/nodes.py
@@ -0,0 +1,49 @@
+
+class Node(object):
+ def __init__(self, tag, value, start_mark, end_mark):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ value = self.value
+ #if isinstance(value, list):
+ # if len(value) == 0:
+ # value = '<empty>'
+ # elif len(value) == 1:
+ # value = '<1 item>'
+ # else:
+ # value = '<%d items>' % len(value)
+ #else:
+ # if len(value) > 75:
+ # value = repr(value[:70]+u' ... ')
+ # else:
+ # value = repr(value)
+ value = repr(value)
+ return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
+
+class ScalarNode(Node):
+ id = 'scalar'
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
+class CollectionNode(Node):
+ def __init__(self, tag, value,
+ start_mark=None, end_mark=None, flow_style=None):
+ self.tag = tag
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.flow_style = flow_style
+
+class SequenceNode(CollectionNode):
+ id = 'sequence'
+
+class MappingNode(CollectionNode):
+ id = 'mapping'
+
diff --git a/third_party/python/PyYAML/lib3/yaml/parser.py b/third_party/python/PyYAML/lib3/yaml/parser.py
new file mode 100644
index 0000000000..13a5995d29
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/yaml/parser.py
@@ -0,0 +1,589 @@
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+from .events import *
+from .scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser:
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ '!': '!',
+ '!!': 'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '<document start>', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == 'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == 'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle,
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle,
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == '!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == '!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == '!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), '',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), '', mark, mark)
+
diff --git a/third_party/python/PyYAML/lib3/yaml/reader.py b/third_party/python/PyYAML/lib3/yaml/reader.py
new file mode 100644
index 0000000000..774b0219b5
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/yaml/reader.py
@@ -0,0 +1,185 @@
+# This module contains abstractions for the input stream. You don't have to
+# looks further, there are no pretty code.
+#
+# We define two classes here.
+#
+# Mark(source, line, column)
+# It's just a record and its only use is producing nice error messages.
+# Parser does not use it for any other purposes.
+#
+# Reader(source, data)
+# Reader determines the encoding of `data` and converts it to unicode.
+# Reader provides the following methods and attributes:
+# reader.peek(length=1) - return the next `length` characters
+# reader.forward(length=1) - move the current position to `length` characters.
+# reader.index - the number of the current character.
+# reader.line, stream.column - the line and the column of the current character.
+
+__all__ = ['Reader', 'ReaderError']
+
+from .error import YAMLError, Mark
+
+import codecs, re
+
+class ReaderError(YAMLError):
+
+ def __init__(self, name, position, character, encoding, reason):
+ self.name = name
+ self.character = character
+ self.position = position
+ self.encoding = encoding
+ self.reason = reason
+
+ def __str__(self):
+ if isinstance(self.character, bytes):
+ return "'%s' codec can't decode byte #x%02x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.encoding, ord(self.character), self.reason,
+ self.name, self.position)
+ else:
+ return "unacceptable character #x%04x: %s\n" \
+ " in \"%s\", position %d" \
+ % (self.character, self.reason,
+ self.name, self.position)
+
+class Reader(object):
+ # Reader:
+ # - determines the data encoding and converts it to a unicode string,
+ # - checks if characters are in allowed range,
+ # - adds '\0' to the end.
+
+ # Reader accepts
+ # - a `bytes` object,
+ # - a `str` object,
+ # - a file-like object with its `read` method returning `str`,
+ # - a file-like object with its `read` method returning `unicode`.
+
+ # Yeah, it's ugly and slow.
+
+ def __init__(self, stream):
+ self.name = None
+ self.stream = None
+ self.stream_pointer = 0
+ self.eof = True
+ self.buffer = ''
+ self.pointer = 0
+ self.raw_buffer = None
+ self.raw_decode = None
+ self.encoding = None
+ self.index = 0
+ self.line = 0
+ self.column = 0
+ if isinstance(stream, str):
+ self.name = "<unicode string>"
+ self.check_printable(stream)
+ self.buffer = stream+'\0'
+ elif isinstance(stream, bytes):
+ self.name = "<byte string>"
+ self.raw_buffer = stream
+ self.determine_encoding()
+ else:
+ self.stream = stream
+ self.name = getattr(stream, 'name', "<file>")
+ self.eof = False
+ self.raw_buffer = None
+ self.determine_encoding()
+
+ def peek(self, index=0):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
+ self.update(index+1)
+ return self.buffer[self.pointer+index]
+
+ def prefix(self, length=1):
+ if self.pointer+length >= len(self.buffer):
+ self.update(length)
+ return self.buffer[self.pointer:self.pointer+length]
+
+ def forward(self, length=1):
+ if self.pointer+length+1 >= len(self.buffer):
+ self.update(length+1)
+ while length:
+ ch = self.buffer[self.pointer]
+ self.pointer += 1
+ self.index += 1
+ if ch in '\n\x85\u2028\u2029' \
+ or (ch == '\r' and self.buffer[self.pointer] != '\n'):
+ self.line += 1
+ self.column = 0
+ elif ch != '\uFEFF':
+ self.column += 1
+ length -= 1
+
+ def get_mark(self):
+ if self.stream is None:
+ return Mark(self.name, self.index, self.line, self.column,
+ self.buffer, self.pointer)
+ else:
+ return Mark(self.name, self.index, self.line, self.column,
+ None, None)
+
+ def determine_encoding(self):
+ while not self.eof and (self.raw_buffer is None or len(self.raw_buffer) < 2):
+ self.update_raw()
+ if isinstance(self.raw_buffer, bytes):
+ if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
+ self.raw_decode = codecs.utf_16_le_decode
+ self.encoding = 'utf-16-le'
+ elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
+ self.raw_decode = codecs.utf_16_be_decode
+ self.encoding = 'utf-16-be'
+ else:
+ self.raw_decode = codecs.utf_8_decode
+ self.encoding = 'utf-8'
+ self.update(1)
+
+ NON_PRINTABLE = re.compile('[^\x09\x0A\x0D\x20-\x7E\x85\xA0-\uD7FF\uE000-\uFFFD\U00010000-\U0010ffff]')
+ def check_printable(self, data):
+ match = self.NON_PRINTABLE.search(data)
+ if match:
+ character = match.group()
+ position = self.index+(len(self.buffer)-self.pointer)+match.start()
+ raise ReaderError(self.name, position, ord(character),
+ 'unicode', "special characters are not allowed")
+
+ def update(self, length):
+ if self.raw_buffer is None:
+ return
+ self.buffer = self.buffer[self.pointer:]
+ self.pointer = 0
+ while len(self.buffer) < length:
+ if not self.eof:
+ self.update_raw()
+ if self.raw_decode is not None:
+ try:
+ data, converted = self.raw_decode(self.raw_buffer,
+ 'strict', self.eof)
+ except UnicodeDecodeError as exc:
+ character = self.raw_buffer[exc.start]
+ if self.stream is not None:
+ position = self.stream_pointer-len(self.raw_buffer)+exc.start
+ else:
+ position = exc.start
+ raise ReaderError(self.name, position, character,
+ exc.encoding, exc.reason)
+ else:
+ data = self.raw_buffer
+ converted = len(data)
+ self.check_printable(data)
+ self.buffer += data
+ self.raw_buffer = self.raw_buffer[converted:]
+ if self.eof:
+ self.buffer += '\0'
+ self.raw_buffer = None
+ break
+
+ def update_raw(self, size=4096):
+ data = self.stream.read(size)
+ if self.raw_buffer is None:
+ self.raw_buffer = data
+ else:
+ self.raw_buffer += data
+ self.stream_pointer += len(data)
+ if not data:
+ self.eof = True
diff --git a/third_party/python/PyYAML/lib3/yaml/representer.py b/third_party/python/PyYAML/lib3/yaml/representer.py
new file mode 100644
index 0000000000..3b0b192ef3
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/yaml/representer.py
@@ -0,0 +1,389 @@
+
+__all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
+ 'RepresenterError']
+
+from .error import *
+from .nodes import *
+
+import datetime, copyreg, types, base64, collections
+
+class RepresenterError(YAMLError):
+ pass
+
+class BaseRepresenter:
+
+ yaml_representers = {}
+ yaml_multi_representers = {}
+
+ def __init__(self, default_style=None, default_flow_style=False, sort_keys=True):
+ self.default_style = default_style
+ self.sort_keys = sort_keys
+ self.default_flow_style = default_flow_style
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent(self, data):
+ node = self.represent_data(data)
+ self.serialize(node)
+ self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
+
+ def represent_data(self, data):
+ if self.ignore_aliases(data):
+ self.alias_key = None
+ else:
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
+ return node
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
+ data_types = type(data).__mro__
+ if data_types[0] in self.yaml_representers:
+ node = self.yaml_representers[data_types[0]](self, data)
+ else:
+ for data_type in data_types:
+ if data_type in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[data_type](self, data)
+ break
+ else:
+ if None in self.yaml_multi_representers:
+ node = self.yaml_multi_representers[None](self, data)
+ elif None in self.yaml_representers:
+ node = self.yaml_representers[None](self, data)
+ else:
+ node = ScalarNode(None, str(data))
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
+ return node
+
+ @classmethod
+ def add_representer(cls, data_type, representer):
+ if not 'yaml_representers' in cls.__dict__:
+ cls.yaml_representers = cls.yaml_representers.copy()
+ cls.yaml_representers[data_type] = representer
+
+ @classmethod
+ def add_multi_representer(cls, data_type, representer):
+ if not 'yaml_multi_representers' in cls.__dict__:
+ cls.yaml_multi_representers = cls.yaml_multi_representers.copy()
+ cls.yaml_multi_representers[data_type] = representer
+
+ def represent_scalar(self, tag, value, style=None):
+ if style is None:
+ style = self.default_style
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
+
+ def represent_sequence(self, tag, sequence, flow_style=None):
+ value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ for item in sequence:
+ node_item = self.represent_data(item)
+ if not (isinstance(node_item, ScalarNode) and not node_item.style):
+ best_style = False
+ value.append(node_item)
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
+ if hasattr(mapping, 'items'):
+ mapping = list(mapping.items())
+ if self.sort_keys:
+ try:
+ mapping = sorted(mapping)
+ except TypeError:
+ pass
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
+ if flow_style is None:
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
+
+ def ignore_aliases(self, data):
+ return False
+
+class SafeRepresenter(BaseRepresenter):
+
+ def ignore_aliases(self, data):
+ if data is None:
+ return True
+ if isinstance(data, tuple) and data == ():
+ return True
+ if isinstance(data, (str, bytes, bool, int, float)):
+ return True
+
+ def represent_none(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:null', 'null')
+
+ def represent_str(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:str', data)
+
+ def represent_binary(self, data):
+ if hasattr(base64, 'encodebytes'):
+ data = base64.encodebytes(data).decode('ascii')
+ else:
+ data = base64.encodestring(data).decode('ascii')
+ return self.represent_scalar('tag:yaml.org,2002:binary', data, style='|')
+
+ def represent_bool(self, data):
+ if data:
+ value = 'true'
+ else:
+ value = 'false'
+ return self.represent_scalar('tag:yaml.org,2002:bool', value)
+
+ def represent_int(self, data):
+ return self.represent_scalar('tag:yaml.org,2002:int', str(data))
+
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
+
+ def represent_float(self, data):
+ if data != data or (data == 0.0 and data == 1.0):
+ value = '.nan'
+ elif data == self.inf_value:
+ value = '.inf'
+ elif data == -self.inf_value:
+ value = '-.inf'
+ else:
+ value = repr(data).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if '.' not in value and 'e' in value:
+ value = value.replace('e', '.0e', 1)
+ return self.represent_scalar('tag:yaml.org,2002:float', value)
+
+ def represent_list(self, data):
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
+ return self.represent_sequence('tag:yaml.org,2002:seq', data)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+
+ def represent_dict(self, data):
+ return self.represent_mapping('tag:yaml.org,2002:map', data)
+
+ def represent_set(self, data):
+ value = {}
+ for key in data:
+ value[key] = None
+ return self.represent_mapping('tag:yaml.org,2002:set', value)
+
+ def represent_date(self, data):
+ value = data.isoformat()
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_datetime(self, data):
+ value = data.isoformat(' ')
+ return self.represent_scalar('tag:yaml.org,2002:timestamp', value)
+
+ def represent_yaml_object(self, tag, data, cls, flow_style=None):
+ if hasattr(data, '__getstate__'):
+ state = data.__getstate__()
+ else:
+ state = data.__dict__.copy()
+ return self.represent_mapping(tag, state, flow_style=flow_style)
+
+ def represent_undefined(self, data):
+ raise RepresenterError("cannot represent an object", data)
+
+SafeRepresenter.add_representer(type(None),
+ SafeRepresenter.represent_none)
+
+SafeRepresenter.add_representer(str,
+ SafeRepresenter.represent_str)
+
+SafeRepresenter.add_representer(bytes,
+ SafeRepresenter.represent_binary)
+
+SafeRepresenter.add_representer(bool,
+ SafeRepresenter.represent_bool)
+
+SafeRepresenter.add_representer(int,
+ SafeRepresenter.represent_int)
+
+SafeRepresenter.add_representer(float,
+ SafeRepresenter.represent_float)
+
+SafeRepresenter.add_representer(list,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(tuple,
+ SafeRepresenter.represent_list)
+
+SafeRepresenter.add_representer(dict,
+ SafeRepresenter.represent_dict)
+
+SafeRepresenter.add_representer(set,
+ SafeRepresenter.represent_set)
+
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
+
+SafeRepresenter.add_representer(None,
+ SafeRepresenter.represent_undefined)
+
+class Representer(SafeRepresenter):
+
+ def represent_complex(self, data):
+ if data.imag == 0.0:
+ data = '%r' % data.real
+ elif data.real == 0.0:
+ data = '%rj' % data.imag
+ elif data.imag > 0:
+ data = '%r+%rj' % (data.real, data.imag)
+ else:
+ data = '%r%rj' % (data.real, data.imag)
+ return self.represent_scalar('tag:yaml.org,2002:python/complex', data)
+
+ def represent_tuple(self, data):
+ return self.represent_sequence('tag:yaml.org,2002:python/tuple', data)
+
+ def represent_name(self, data):
+ name = '%s.%s' % (data.__module__, data.__name__)
+ return self.represent_scalar('tag:yaml.org,2002:python/name:'+name, '')
+
+ def represent_module(self, data):
+ return self.represent_scalar(
+ 'tag:yaml.org,2002:python/module:'+data.__name__, '')
+
+ def represent_object(self, data):
+ # We use __reduce__ API to save the data. data.__reduce__ returns
+ # a tuple of length 2-5:
+ # (function, args, state, listitems, dictitems)
+
+ # For reconstructing, we calls function(*args), then set its state,
+ # listitems, and dictitems if they are not None.
+
+ # A special case is when function.__name__ == '__newobj__'. In this
+ # case we create the object with args[0].__new__(*args).
+
+ # Another special case is when __reduce__ returns a string - we don't
+ # support it.
+
+ # We produce a !!python/object, !!python/object/new or
+ # !!python/object/apply node.
+
+ cls = type(data)
+ if cls in copyreg.dispatch_table:
+ reduce = copyreg.dispatch_table[cls](data)
+ elif hasattr(data, '__reduce_ex__'):
+ reduce = data.__reduce_ex__(2)
+ elif hasattr(data, '__reduce__'):
+ reduce = data.__reduce__()
+ else:
+ raise RepresenterError("cannot represent an object", data)
+ reduce = (list(reduce)+[None]*5)[:5]
+ function, args, state, listitems, dictitems = reduce
+ args = list(args)
+ if state is None:
+ state = {}
+ if listitems is not None:
+ listitems = list(listitems)
+ if dictitems is not None:
+ dictitems = dict(dictitems)
+ if function.__name__ == '__newobj__':
+ function = args[0]
+ args = args[1:]
+ tag = 'tag:yaml.org,2002:python/object/new:'
+ newobj = True
+ else:
+ tag = 'tag:yaml.org,2002:python/object/apply:'
+ newobj = False
+ function_name = '%s.%s' % (function.__module__, function.__name__)
+ if not args and not listitems and not dictitems \
+ and isinstance(state, dict) and newobj:
+ return self.represent_mapping(
+ 'tag:yaml.org,2002:python/object:'+function_name, state)
+ if not listitems and not dictitems \
+ and isinstance(state, dict) and not state:
+ return self.represent_sequence(tag+function_name, args)
+ value = {}
+ if args:
+ value['args'] = args
+ if state or not isinstance(state, dict):
+ value['state'] = state
+ if listitems:
+ value['listitems'] = listitems
+ if dictitems:
+ value['dictitems'] = dictitems
+ return self.represent_mapping(tag+function_name, value)
+
+ def represent_ordered_dict(self, data):
+ # Provide uniform representation across different Python versions.
+ data_type = type(data)
+ tag = 'tag:yaml.org,2002:python/object/apply:%s.%s' \
+ % (data_type.__module__, data_type.__name__)
+ items = [[key, value] for key, value in data.items()]
+ return self.represent_sequence(tag, [items])
+
+Representer.add_representer(complex,
+ Representer.represent_complex)
+
+Representer.add_representer(tuple,
+ Representer.represent_tuple)
+
+Representer.add_representer(type,
+ Representer.represent_name)
+
+Representer.add_representer(collections.OrderedDict,
+ Representer.represent_ordered_dict)
+
+Representer.add_representer(types.FunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.BuiltinFunctionType,
+ Representer.represent_name)
+
+Representer.add_representer(types.ModuleType,
+ Representer.represent_module)
+
+Representer.add_multi_representer(object,
+ Representer.represent_object)
+
diff --git a/third_party/python/PyYAML/lib3/yaml/resolver.py b/third_party/python/PyYAML/lib3/yaml/resolver.py
new file mode 100644
index 0000000000..013896d2f1
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/yaml/resolver.py
@@ -0,0 +1,227 @@
+
+__all__ = ['BaseResolver', 'Resolver']
+
+from .error import *
+from .nodes import *
+
+import re
+
+class ResolverError(YAMLError):
+ pass
+
+class BaseResolver:
+
+ DEFAULT_SCALAR_TAG = 'tag:yaml.org,2002:str'
+ DEFAULT_SEQUENCE_TAG = 'tag:yaml.org,2002:seq'
+ DEFAULT_MAPPING_TAG = 'tag:yaml.org,2002:map'
+
+ yaml_implicit_resolvers = {}
+ yaml_path_resolvers = {}
+
+ def __init__(self):
+ self.resolver_exact_paths = []
+ self.resolver_prefix_paths = []
+
+ @classmethod
+ def add_implicit_resolver(cls, tag, regexp, first):
+ if not 'yaml_implicit_resolvers' in cls.__dict__:
+ implicit_resolvers = {}
+ for key in cls.yaml_implicit_resolvers:
+ implicit_resolvers[key] = cls.yaml_implicit_resolvers[key][:]
+ cls.yaml_implicit_resolvers = implicit_resolvers
+ if first is None:
+ first = [None]
+ for ch in first:
+ cls.yaml_implicit_resolvers.setdefault(ch, []).append((tag, regexp))
+
+ @classmethod
+ def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
+ if not 'yaml_path_resolvers' in cls.__dict__:
+ cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
+ new_path = []
+ for element in path:
+ if isinstance(element, (list, tuple)):
+ if len(element) == 2:
+ node_check, index_check = element
+ elif len(element) == 1:
+ node_check = element[0]
+ index_check = True
+ else:
+ raise ResolverError("Invalid path element: %s" % element)
+ else:
+ node_check = None
+ index_check = element
+ if node_check is str:
+ node_check = ScalarNode
+ elif node_check is list:
+ node_check = SequenceNode
+ elif node_check is dict:
+ node_check = MappingNode
+ elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
+ and not isinstance(node_check, str) \
+ and node_check is not None:
+ raise ResolverError("Invalid node checker: %s" % node_check)
+ if not isinstance(index_check, (str, int)) \
+ and index_check is not None:
+ raise ResolverError("Invalid index checker: %s" % index_check)
+ new_path.append((node_check, index_check))
+ if kind is str:
+ kind = ScalarNode
+ elif kind is list:
+ kind = SequenceNode
+ elif kind is dict:
+ kind = MappingNode
+ elif kind not in [ScalarNode, SequenceNode, MappingNode] \
+ and kind is not None:
+ raise ResolverError("Invalid node kind: %s" % kind)
+ cls.yaml_path_resolvers[tuple(new_path), kind] = tag
+
+ def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
+ exact_paths = {}
+ prefix_paths = []
+ if current_node:
+ depth = len(self.resolver_prefix_paths)
+ for path, kind in self.resolver_prefix_paths[-1]:
+ if self.check_resolver_prefix(depth, path, kind,
+ current_node, current_index):
+ if len(path) > depth:
+ prefix_paths.append((path, kind))
+ else:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ for path, kind in self.yaml_path_resolvers:
+ if not path:
+ exact_paths[kind] = self.yaml_path_resolvers[path, kind]
+ else:
+ prefix_paths.append((path, kind))
+ self.resolver_exact_paths.append(exact_paths)
+ self.resolver_prefix_paths.append(prefix_paths)
+
+ def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
+ self.resolver_exact_paths.pop()
+ self.resolver_prefix_paths.pop()
+
+ def check_resolver_prefix(self, depth, path, kind,
+ current_node, current_index):
+ node_check, index_check = path[depth-1]
+ if isinstance(node_check, str):
+ if current_node.tag != node_check:
+ return
+ elif node_check is not None:
+ if not isinstance(current_node, node_check):
+ return
+ if index_check is True and current_index is not None:
+ return
+ if (index_check is False or index_check is None) \
+ and current_index is None:
+ return
+ if isinstance(index_check, str):
+ if not (isinstance(current_index, ScalarNode)
+ and index_check == current_index.value):
+ return
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
+ if index_check != current_index:
+ return
+ return True
+
+ def resolve(self, kind, value, implicit):
+ if kind is ScalarNode and implicit[0]:
+ if value == '':
+ resolvers = self.yaml_implicit_resolvers.get('', [])
+ else:
+ resolvers = self.yaml_implicit_resolvers.get(value[0], [])
+ wildcard_resolvers = self.yaml_implicit_resolvers.get(None, [])
+ for tag, regexp in resolvers + wildcard_resolvers:
+ if regexp.match(value):
+ return tag
+ implicit = implicit[1]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
+ if kind is ScalarNode:
+ return self.DEFAULT_SCALAR_TAG
+ elif kind is SequenceNode:
+ return self.DEFAULT_SEQUENCE_TAG
+ elif kind is MappingNode:
+ return self.DEFAULT_MAPPING_TAG
+
+class Resolver(BaseResolver):
+ pass
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:bool',
+ re.compile(r'''^(?:yes|Yes|YES|no|No|NO
+ |true|True|TRUE|false|False|FALSE
+ |on|On|ON|off|Off|OFF)$''', re.X),
+ list('yYnNtTfFoO'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:float',
+ re.compile(r'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
+ |[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
+ |[-+]?\.(?:inf|Inf|INF)
+ |\.(?:nan|NaN|NAN))$''', re.X),
+ list('-+0123456789.'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:int',
+ re.compile(r'''^(?:[-+]?0b[0-1_]+
+ |[-+]?0[0-7_]+
+ |[-+]?(?:0|[1-9][0-9_]*)
+ |[-+]?0x[0-9a-fA-F_]+
+ |[-+]?[1-9][0-9_]*(?::[0-5]?[0-9])+)$''', re.X),
+ list('-+0123456789'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:merge',
+ re.compile(r'^(?:<<)$'),
+ ['<'])
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:null',
+ re.compile(r'''^(?: ~
+ |null|Null|NULL
+ | )$''', re.X),
+ ['~', 'n', 'N', ''])
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:timestamp',
+ re.compile(r'''^(?:[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]
+ |[0-9][0-9][0-9][0-9] -[0-9][0-9]? -[0-9][0-9]?
+ (?:[Tt]|[ \t]+)[0-9][0-9]?
+ :[0-9][0-9] :[0-9][0-9] (?:\.[0-9]*)?
+ (?:[ \t]*(?:Z|[-+][0-9][0-9]?(?::[0-9][0-9])?))?)$''', re.X),
+ list('0123456789'))
+
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:value',
+ re.compile(r'^(?:=)$'),
+ ['='])
+
+# The following resolver is only for documentation purposes. It cannot work
+# because plain scalars cannot start with '!', '&', or '*'.
+Resolver.add_implicit_resolver(
+ 'tag:yaml.org,2002:yaml',
+ re.compile(r'^(?:!|&|\*)$'),
+ list('!&*'))
+
diff --git a/third_party/python/PyYAML/lib3/yaml/scanner.py b/third_party/python/PyYAML/lib3/yaml/scanner.py
new file mode 100644
index 0000000000..7437ede1c6
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/yaml/scanner.py
@@ -0,0 +1,1435 @@
+
+# Scanner produces tokens of the following types:
+# STREAM-START
+# STREAM-END
+# DIRECTIVE(name, value)
+# DOCUMENT-START
+# DOCUMENT-END
+# BLOCK-SEQUENCE-START
+# BLOCK-MAPPING-START
+# BLOCK-END
+# FLOW-SEQUENCE-START
+# FLOW-MAPPING-START
+# FLOW-SEQUENCE-END
+# FLOW-MAPPING-END
+# BLOCK-ENTRY
+# FLOW-ENTRY
+# KEY
+# VALUE
+# ALIAS(value)
+# ANCHOR(value)
+# TAG(value)
+# SCALAR(value, plain, style)
+#
+# Read comments in the Scanner code for more details.
+#
+
+__all__ = ['Scanner', 'ScannerError']
+
+from .error import MarkedYAMLError
+from .tokens import *
+
+class ScannerError(MarkedYAMLError):
+ pass
+
+class SimpleKey:
+ # See below simple keys treatment.
+
+ def __init__(self, token_number, required, index, line, column, mark):
+ self.token_number = token_number
+ self.required = required
+ self.index = index
+ self.line = line
+ self.column = column
+ self.mark = mark
+
+class Scanner:
+
+ def __init__(self):
+ """Initialize the scanner."""
+ # It is assumed that Scanner and Reader will have a common descendant.
+ # Reader do the dirty work of checking for BOM and converting the
+ # input data to Unicode. It also adds NUL to the end.
+ #
+ # Reader supports the following methods
+ # self.peek(i=0) # peek the next i-th character
+ # self.prefix(l=1) # peek the next l characters
+ # self.forward(l=1) # read the next l characters and move the pointer.
+
+ # Had we reached the end of the stream?
+ self.done = False
+
+ # The number of unclosed '{' and '['. `flow_level == 0` means block
+ # context.
+ self.flow_level = 0
+
+ # List of processed tokens that are not yet emitted.
+ self.tokens = []
+
+ # Add the STREAM-START token.
+ self.fetch_stream_start()
+
+ # Number of tokens that were emitted through the `get_token` method.
+ self.tokens_taken = 0
+
+ # The current indentation level.
+ self.indent = -1
+
+ # Past indentation levels.
+ self.indents = []
+
+ # Variables related to simple keys treatment.
+
+ # A simple key is a key that is not denoted by the '?' indicator.
+ # Example of simple keys:
+ # ---
+ # block simple key: value
+ # ? not a simple key:
+ # : { flow simple key: value }
+ # We emit the KEY token before all keys, so when we find a potential
+ # simple key, we try to locate the corresponding ':' indicator.
+ # Simple keys should be limited to a single line and 1024 characters.
+
+ # Can a simple key start at the current position? A simple key may
+ # start:
+ # - at the beginning of the line, not counting indentation spaces
+ # (in block context),
+ # - after '{', '[', ',' (in the flow context),
+ # - after '?', ':', '-' (in the block context).
+ # In the block context, this flag also signifies if a block collection
+ # may start at the current position.
+ self.allow_simple_key = True
+
+ # Keep track of possible simple keys. This is a dictionary. The key
+ # is `flow_level`; there can be no more that one possible simple key
+ # for each level. The value is a SimpleKey record:
+ # (token_number, required, index, line, column, mark)
+ # A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
+ # '[', or '{' tokens.
+ self.possible_simple_keys = {}
+
+ # Public methods.
+
+ def check_token(self, *choices):
+ # Check if the next token is one of the given types.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.tokens[0], choice):
+ return True
+ return False
+
+ def peek_token(self):
+ # Return the next token, but do not delete if from the queue.
+ # Return None if no more tokens.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ return self.tokens[0]
+ else:
+ return None
+
+ def get_token(self):
+ # Return the next token.
+ while self.need_more_tokens():
+ self.fetch_more_tokens()
+ if self.tokens:
+ self.tokens_taken += 1
+ return self.tokens.pop(0)
+
+ # Private methods.
+
+ def need_more_tokens(self):
+ if self.done:
+ return False
+ if not self.tokens:
+ return True
+ # The current token may be a potential simple key, so we
+ # need to look further.
+ self.stale_possible_simple_keys()
+ if self.next_possible_simple_key() == self.tokens_taken:
+ return True
+
+ def fetch_more_tokens(self):
+
+ # Eat whitespaces and comments until we reach the next token.
+ self.scan_to_next_token()
+
+ # Remove obsolete possible simple keys.
+ self.stale_possible_simple_keys()
+
+ # Compare the current indentation and column. It may add some tokens
+ # and decrease the current indentation level.
+ self.unwind_indent(self.column)
+
+ # Peek the next character.
+ ch = self.peek()
+
+ # Is it the end of stream?
+ if ch == '\0':
+ return self.fetch_stream_end()
+
+ # Is it a directive?
+ if ch == '%' and self.check_directive():
+ return self.fetch_directive()
+
+ # Is it the document start?
+ if ch == '-' and self.check_document_start():
+ return self.fetch_document_start()
+
+ # Is it the document end?
+ if ch == '.' and self.check_document_end():
+ return self.fetch_document_end()
+
+ # TODO: support for BOM within a stream.
+ #if ch == '\uFEFF':
+ # return self.fetch_bom() <-- issue BOMToken
+
+ # Note: the order of the following checks is NOT significant.
+
+ # Is it the flow sequence start indicator?
+ if ch == '[':
+ return self.fetch_flow_sequence_start()
+
+ # Is it the flow mapping start indicator?
+ if ch == '{':
+ return self.fetch_flow_mapping_start()
+
+ # Is it the flow sequence end indicator?
+ if ch == ']':
+ return self.fetch_flow_sequence_end()
+
+ # Is it the flow mapping end indicator?
+ if ch == '}':
+ return self.fetch_flow_mapping_end()
+
+ # Is it the flow entry indicator?
+ if ch == ',':
+ return self.fetch_flow_entry()
+
+ # Is it the block entry indicator?
+ if ch == '-' and self.check_block_entry():
+ return self.fetch_block_entry()
+
+ # Is it the key indicator?
+ if ch == '?' and self.check_key():
+ return self.fetch_key()
+
+ # Is it the value indicator?
+ if ch == ':' and self.check_value():
+ return self.fetch_value()
+
+ # Is it an alias?
+ if ch == '*':
+ return self.fetch_alias()
+
+ # Is it an anchor?
+ if ch == '&':
+ return self.fetch_anchor()
+
+ # Is it a tag?
+ if ch == '!':
+ return self.fetch_tag()
+
+ # Is it a literal scalar?
+ if ch == '|' and not self.flow_level:
+ return self.fetch_literal()
+
+ # Is it a folded scalar?
+ if ch == '>' and not self.flow_level:
+ return self.fetch_folded()
+
+ # Is it a single quoted scalar?
+ if ch == '\'':
+ return self.fetch_single()
+
+ # Is it a double quoted scalar?
+ if ch == '\"':
+ return self.fetch_double()
+
+ # It must be a plain scalar then.
+ if self.check_plain():
+ return self.fetch_plain()
+
+ # No? It's an error. Let's produce a nice error message.
+ raise ScannerError("while scanning for the next token", None,
+ "found character %r that cannot start any token" % ch,
+ self.get_mark())
+
+ # Simple keys treatment.
+
+ def next_possible_simple_key(self):
+ # Return the number of the nearest possible simple key. Actually we
+ # don't need to loop through the whole dictionary. We may replace it
+ # with the following code:
+ # if not self.possible_simple_keys:
+ # return None
+ # return self.possible_simple_keys[
+ # min(self.possible_simple_keys.keys())].token_number
+ min_token_number = None
+ for level in self.possible_simple_keys:
+ key = self.possible_simple_keys[level]
+ if min_token_number is None or key.token_number < min_token_number:
+ min_token_number = key.token_number
+ return min_token_number
+
+ def stale_possible_simple_keys(self):
+ # Remove entries that are no longer possible simple keys. According to
+ # the YAML specification, simple keys
+ # - should be limited to a single line,
+ # - should be no longer than 1024 characters.
+ # Disabling this procedure will allow simple keys of any length and
+ # height (may cause problems if indentation is broken though).
+ for level in list(self.possible_simple_keys):
+ key = self.possible_simple_keys[level]
+ if key.line != self.line \
+ or self.index-key.index > 1024:
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not find expected ':'", self.get_mark())
+ del self.possible_simple_keys[level]
+
+ def save_possible_simple_key(self):
+ # The next token may start a simple key. We check if it's possible
+ # and save its position. This function is called for
+ # ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
+
+ # Check if a simple key is required at the current position.
+ required = not self.flow_level and self.indent == self.column
+
+ # The next token might be a simple key. Let's save it's number and
+ # position.
+ if self.allow_simple_key:
+ self.remove_possible_simple_key()
+ token_number = self.tokens_taken+len(self.tokens)
+ key = SimpleKey(token_number, required,
+ self.index, self.line, self.column, self.get_mark())
+ self.possible_simple_keys[self.flow_level] = key
+
+ def remove_possible_simple_key(self):
+ # Remove the saved possible key position at the current flow level.
+ if self.flow_level in self.possible_simple_keys:
+ key = self.possible_simple_keys[self.flow_level]
+
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not find expected ':'", self.get_mark())
+
+ del self.possible_simple_keys[self.flow_level]
+
+ # Indentation functions.
+
+ def unwind_indent(self, column):
+
+ ## In flow context, tokens should respect indentation.
+ ## Actually the condition should be `self.indent >= column` according to
+ ## the spec. But this condition will prohibit intuitively correct
+ ## constructions such as
+ ## key : {
+ ## }
+ #if self.flow_level and self.indent > column:
+ # raise ScannerError(None, None,
+ # "invalid indentation or unclosed '[' or '{'",
+ # self.get_mark())
+
+ # In the flow context, indentation is ignored. We make the scanner less
+ # restrictive then specification requires.
+ if self.flow_level:
+ return
+
+ # In block context, we may need to issue the BLOCK-END tokens.
+ while self.indent > column:
+ mark = self.get_mark()
+ self.indent = self.indents.pop()
+ self.tokens.append(BlockEndToken(mark, mark))
+
+ def add_indent(self, column):
+ # Check if we need to increase indentation.
+ if self.indent < column:
+ self.indents.append(self.indent)
+ self.indent = column
+ return True
+ return False
+
+ # Fetchers.
+
+ def fetch_stream_start(self):
+ # We always add STREAM-START as the first token and STREAM-END as the
+ # last token.
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-START.
+ self.tokens.append(StreamStartToken(mark, mark,
+ encoding=self.encoding))
+
+
+ def fetch_stream_end(self):
+
+ # Set the current indentation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+ self.possible_simple_keys = {}
+
+ # Read the token.
+ mark = self.get_mark()
+
+ # Add STREAM-END.
+ self.tokens.append(StreamEndToken(mark, mark))
+
+ # The steam is finished.
+ self.done = True
+
+ def fetch_directive(self):
+
+ # Set the current indentation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Scan and add DIRECTIVE.
+ self.tokens.append(self.scan_directive())
+
+ def fetch_document_start(self):
+ self.fetch_document_indicator(DocumentStartToken)
+
+ def fetch_document_end(self):
+ self.fetch_document_indicator(DocumentEndToken)
+
+ def fetch_document_indicator(self, TokenClass):
+
+ # Set the current indentation to -1.
+ self.unwind_indent(-1)
+
+ # Reset simple keys. Note that there could not be a block collection
+ # after '---'.
+ self.remove_possible_simple_key()
+ self.allow_simple_key = False
+
+ # Add DOCUMENT-START or DOCUMENT-END.
+ start_mark = self.get_mark()
+ self.forward(3)
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_start(self):
+ self.fetch_flow_collection_start(FlowSequenceStartToken)
+
+ def fetch_flow_mapping_start(self):
+ self.fetch_flow_collection_start(FlowMappingStartToken)
+
+ def fetch_flow_collection_start(self, TokenClass):
+
+ # '[' and '{' may start a simple key.
+ self.save_possible_simple_key()
+
+ # Increase the flow level.
+ self.flow_level += 1
+
+ # Simple keys are allowed after '[' and '{'.
+ self.allow_simple_key = True
+
+ # Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_sequence_end(self):
+ self.fetch_flow_collection_end(FlowSequenceEndToken)
+
+ def fetch_flow_mapping_end(self):
+ self.fetch_flow_collection_end(FlowMappingEndToken)
+
+ def fetch_flow_collection_end(self, TokenClass):
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Decrease the flow level.
+ self.flow_level -= 1
+
+ # No simple keys after ']' or '}'.
+ self.allow_simple_key = False
+
+ # Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(TokenClass(start_mark, end_mark))
+
+ def fetch_flow_entry(self):
+
+ # Simple keys are allowed after ','.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add FLOW-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(FlowEntryToken(start_mark, end_mark))
+
+ def fetch_block_entry(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a new entry?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "sequence entries are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-SEQUENCE-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockSequenceStartToken(mark, mark))
+
+ # It's an error for the block entry to occur in the flow context,
+ # but we let the parser detect this.
+ else:
+ pass
+
+ # Simple keys are allowed after '-'.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add BLOCK-ENTRY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(BlockEntryToken(start_mark, end_mark))
+
+ def fetch_key(self):
+
+ # Block context needs additional checks.
+ if not self.flow_level:
+
+ # Are we allowed to start a key (not necessary a simple)?
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping keys are not allowed here",
+ self.get_mark())
+
+ # We may need to add BLOCK-MAPPING-START.
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after '?' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add KEY.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(KeyToken(start_mark, end_mark))
+
+ def fetch_value(self):
+
+ # Do we determine a simple key?
+ if self.flow_level in self.possible_simple_keys:
+
+ # Add KEY.
+ key = self.possible_simple_keys[self.flow_level]
+ del self.possible_simple_keys[self.flow_level]
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ KeyToken(key.mark, key.mark))
+
+ # If this key starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START.
+ if not self.flow_level:
+ if self.add_indent(key.column):
+ self.tokens.insert(key.token_number-self.tokens_taken,
+ BlockMappingStartToken(key.mark, key.mark))
+
+ # There cannot be two simple keys one after another.
+ self.allow_simple_key = False
+
+ # It must be a part of a complex key.
+ else:
+
+ # Block context needs additional checks.
+ # (Do we really need them? They will be caught by the parser
+ # anyway.)
+ if not self.flow_level:
+
+ # We are allowed to start a complex value if and only if
+ # we can start a simple key.
+ if not self.allow_simple_key:
+ raise ScannerError(None, None,
+ "mapping values are not allowed here",
+ self.get_mark())
+
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
+ # Simple keys are allowed after ':' in the block context.
+ self.allow_simple_key = not self.flow_level
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Add VALUE.
+ start_mark = self.get_mark()
+ self.forward()
+ end_mark = self.get_mark()
+ self.tokens.append(ValueToken(start_mark, end_mark))
+
+ def fetch_alias(self):
+
+ # ALIAS could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ALIAS.
+ self.allow_simple_key = False
+
+ # Scan and add ALIAS.
+ self.tokens.append(self.scan_anchor(AliasToken))
+
+ def fetch_anchor(self):
+
+ # ANCHOR could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after ANCHOR.
+ self.allow_simple_key = False
+
+ # Scan and add ANCHOR.
+ self.tokens.append(self.scan_anchor(AnchorToken))
+
+ def fetch_tag(self):
+
+ # TAG could start a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after TAG.
+ self.allow_simple_key = False
+
+ # Scan and add TAG.
+ self.tokens.append(self.scan_tag())
+
+ def fetch_literal(self):
+ self.fetch_block_scalar(style='|')
+
+ def fetch_folded(self):
+ self.fetch_block_scalar(style='>')
+
+ def fetch_block_scalar(self, style):
+
+ # A simple key may follow a block scalar.
+ self.allow_simple_key = True
+
+ # Reset possible simple key on the current level.
+ self.remove_possible_simple_key()
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_block_scalar(style))
+
+ def fetch_single(self):
+ self.fetch_flow_scalar(style='\'')
+
+ def fetch_double(self):
+ self.fetch_flow_scalar(style='"')
+
+ def fetch_flow_scalar(self, style):
+
+ # A flow scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after flow scalars.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR.
+ self.tokens.append(self.scan_flow_scalar(style))
+
+ def fetch_plain(self):
+
+ # A plain scalar could be a simple key.
+ self.save_possible_simple_key()
+
+ # No simple keys after plain scalars. But note that `scan_plain` will
+ # change this flag if the scan is finished at the beginning of the
+ # line.
+ self.allow_simple_key = False
+
+ # Scan and add SCALAR. May change `allow_simple_key`.
+ self.tokens.append(self.scan_plain())
+
+ # Checkers.
+
+ def check_directive(self):
+
+ # DIRECTIVE: ^ '%' ...
+ # The '%' indicator is already checked.
+ if self.column == 0:
+ return True
+
+ def check_document_start(self):
+
+ # DOCUMENT-START: ^ '---' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == '---' \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_document_end(self):
+
+ # DOCUMENT-END: ^ '...' (' '|'\n')
+ if self.column == 0:
+ if self.prefix(3) == '...' \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return True
+
+ def check_block_entry(self):
+
+ # BLOCK-ENTRY: '-' (' '|'\n')
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_key(self):
+
+ # KEY(flow context): '?'
+ if self.flow_level:
+ return True
+
+ # KEY(block context): '?' (' '|'\n')
+ else:
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_value(self):
+
+ # VALUE(flow context): ':'
+ if self.flow_level:
+ return True
+
+ # VALUE(block context): ':' (' '|'\n')
+ else:
+ return self.peek(1) in '\0 \t\r\n\x85\u2028\u2029'
+
+ def check_plain(self):
+
+ # A plain scalar may start with any non-space character except:
+ # '-', '?', ':', ',', '[', ']', '{', '}',
+ # '#', '&', '*', '!', '|', '>', '\'', '\"',
+ # '%', '@', '`'.
+ #
+ # It may also start with
+ # '-', '?', ':'
+ # if it is followed by a non-space character.
+ #
+ # Note that we limit the last rule to the block context (except the
+ # '-' character) because we want the flow context to be space
+ # independent.
+ ch = self.peek()
+ return ch not in '\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
+ or (self.peek(1) not in '\0 \t\r\n\x85\u2028\u2029'
+ and (ch == '-' or (not self.flow_level and ch in '?:')))
+
+ # Scanners.
+
+ def scan_to_next_token(self):
+ # We ignore spaces, line breaks and comments.
+ # If we find a line break in the block context, we set the flag
+ # `allow_simple_key` on.
+ # The byte order mark is stripped if it's the first character in the
+ # stream. We do not yet support BOM inside the stream as the
+ # specification requires. Any such mark will be considered as a part
+ # of the document.
+ #
+ # TODO: We need to make tab handling rules more sane. A good rule is
+ # Tabs cannot precede tokens
+ # BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
+ # KEY(block), VALUE(block), BLOCK-ENTRY
+ # So the checking code is
+ # if <TAB>:
+ # self.allow_simple_keys = False
+ # We also need to add the check for `allow_simple_keys == True` to
+ # `unwind_indent` before issuing BLOCK-END.
+ # Scanners for block, flow, and plain scalars need to be modified.
+
+ if self.index == 0 and self.peek() == '\uFEFF':
+ self.forward()
+ found = False
+ while not found:
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ if self.scan_line_break():
+ if not self.flow_level:
+ self.allow_simple_key = True
+ else:
+ found = True
+
+ def scan_directive(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ self.forward()
+ name = self.scan_directive_name(start_mark)
+ value = None
+ if name == 'YAML':
+ value = self.scan_yaml_directive_value(start_mark)
+ end_mark = self.get_mark()
+ elif name == 'TAG':
+ value = self.scan_tag_directive_value(start_mark)
+ end_mark = self.get_mark()
+ else:
+ end_mark = self.get_mark()
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ self.scan_directive_ignored_line(start_mark)
+ return DirectiveToken(name, value, start_mark, end_mark)
+
+ def scan_directive_name(self, start_mark):
+ # See the specification for details.
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ return value
+
+ def scan_yaml_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ major = self.scan_yaml_directive_number(start_mark)
+ if self.peek() != '.':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or '.', but found %r" % self.peek(),
+ self.get_mark())
+ self.forward()
+ minor = self.scan_yaml_directive_number(start_mark)
+ if self.peek() not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit or ' ', but found %r" % self.peek(),
+ self.get_mark())
+ return (major, minor)
+
+ def scan_yaml_directive_number(self, start_mark):
+ # See the specification for details.
+ ch = self.peek()
+ if not ('0' <= ch <= '9'):
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a digit, but found %r" % ch, self.get_mark())
+ length = 0
+ while '0' <= self.peek(length) <= '9':
+ length += 1
+ value = int(self.prefix(length))
+ self.forward(length)
+ return value
+
+ def scan_tag_directive_value(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ handle = self.scan_tag_directive_handle(start_mark)
+ while self.peek() == ' ':
+ self.forward()
+ prefix = self.scan_tag_directive_prefix(start_mark)
+ return (handle, prefix)
+
+ def scan_tag_directive_handle(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_handle('directive', start_mark)
+ ch = self.peek()
+ if ch != ' ':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ return value
+
+ def scan_tag_directive_prefix(self, start_mark):
+ # See the specification for details.
+ value = self.scan_tag_uri('directive', start_mark)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ return value
+
+ def scan_directive_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a directive", start_mark,
+ "expected a comment or a line break, but found %r"
+ % ch, self.get_mark())
+ self.scan_line_break()
+
+ def scan_anchor(self, TokenClass):
+ # The specification does not restrict characters for anchors and
+ # aliases. This may lead to problems, for instance, the document:
+ # [ *alias, value ]
+ # can be interpreted in two ways, as
+ # [ "value" ]
+ # and
+ # [ *alias , "value" ]
+ # Therefore we restrict aliases to numbers and ASCII letters.
+ start_mark = self.get_mark()
+ indicator = self.peek()
+ if indicator == '*':
+ name = 'alias'
+ else:
+ name = 'anchor'
+ self.forward()
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if not length:
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ value = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch not in '\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
+ raise ScannerError("while scanning an %s" % name, start_mark,
+ "expected alphabetic or numeric character, but found %r"
+ % ch, self.get_mark())
+ end_mark = self.get_mark()
+ return TokenClass(value, start_mark, end_mark)
+
+ def scan_tag(self):
+ # See the specification for details.
+ start_mark = self.get_mark()
+ ch = self.peek(1)
+ if ch == '<':
+ handle = None
+ self.forward(2)
+ suffix = self.scan_tag_uri('tag', start_mark)
+ if self.peek() != '>':
+ raise ScannerError("while parsing a tag", start_mark,
+ "expected '>', but found %r" % self.peek(),
+ self.get_mark())
+ self.forward()
+ elif ch in '\0 \t\r\n\x85\u2028\u2029':
+ handle = None
+ suffix = '!'
+ self.forward()
+ else:
+ length = 1
+ use_handle = False
+ while ch not in '\0 \r\n\x85\u2028\u2029':
+ if ch == '!':
+ use_handle = True
+ break
+ length += 1
+ ch = self.peek(length)
+ handle = '!'
+ if use_handle:
+ handle = self.scan_tag_handle('tag', start_mark)
+ else:
+ handle = '!'
+ self.forward()
+ suffix = self.scan_tag_uri('tag', start_mark)
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a tag", start_mark,
+ "expected ' ', but found %r" % ch, self.get_mark())
+ value = (handle, suffix)
+ end_mark = self.get_mark()
+ return TagToken(value, start_mark, end_mark)
+
+ def scan_block_scalar(self, style):
+ # See the specification for details.
+
+ if style == '>':
+ folded = True
+ else:
+ folded = False
+
+ chunks = []
+ start_mark = self.get_mark()
+
+ # Scan the header.
+ self.forward()
+ chomping, increment = self.scan_block_scalar_indicators(start_mark)
+ self.scan_block_scalar_ignored_line(start_mark)
+
+ # Determine the indentation level and go to the first non-empty line.
+ min_indent = self.indent+1
+ if min_indent < 1:
+ min_indent = 1
+ if increment is None:
+ breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
+ indent = max(min_indent, max_indent)
+ else:
+ indent = min_indent+increment-1
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ line_break = ''
+
+ # Scan the inner part of the block scalar.
+ while self.column == indent and self.peek() != '\0':
+ chunks.extend(breaks)
+ leading_non_space = self.peek() not in ' \t'
+ length = 0
+ while self.peek(length) not in '\0\r\n\x85\u2028\u2029':
+ length += 1
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ line_break = self.scan_line_break()
+ breaks, end_mark = self.scan_block_scalar_breaks(indent)
+ if self.column == indent and self.peek() != '\0':
+
+ # Unfortunately, folding rules are ambiguous.
+ #
+ # This is the folding according to the specification:
+
+ if folded and line_break == '\n' \
+ and leading_non_space and self.peek() not in ' \t':
+ if not breaks:
+ chunks.append(' ')
+ else:
+ chunks.append(line_break)
+
+ # This is Clark Evans's interpretation (also in the spec
+ # examples):
+ #
+ #if folded and line_break == '\n':
+ # if not breaks:
+ # if self.peek() not in ' \t':
+ # chunks.append(' ')
+ # else:
+ # chunks.append(line_break)
+ #else:
+ # chunks.append(line_break)
+ else:
+ break
+
+ # Chomp the tail.
+ if chomping is not False:
+ chunks.append(line_break)
+ if chomping is True:
+ chunks.extend(breaks)
+
+ # We are done.
+ return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ def scan_block_scalar_indicators(self, start_mark):
+ # See the specification for details.
+ chomping = None
+ increment = None
+ ch = self.peek()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ elif ch in '0123456789':
+ increment = int(ch)
+ if increment == 0:
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected indentation indicator in the range 1-9, but found 0",
+ self.get_mark())
+ self.forward()
+ ch = self.peek()
+ if ch in '+-':
+ if ch == '+':
+ chomping = True
+ else:
+ chomping = False
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0 \r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected chomping or indentation indicators, but found %r"
+ % ch, self.get_mark())
+ return chomping, increment
+
+ def scan_block_scalar_ignored_line(self, start_mark):
+ # See the specification for details.
+ while self.peek() == ' ':
+ self.forward()
+ if self.peek() == '#':
+ while self.peek() not in '\0\r\n\x85\u2028\u2029':
+ self.forward()
+ ch = self.peek()
+ if ch not in '\0\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a block scalar", start_mark,
+ "expected a comment or a line break, but found %r" % ch,
+ self.get_mark())
+ self.scan_line_break()
+
+ def scan_block_scalar_indentation(self):
+ # See the specification for details.
+ chunks = []
+ max_indent = 0
+ end_mark = self.get_mark()
+ while self.peek() in ' \r\n\x85\u2028\u2029':
+ if self.peek() != ' ':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ else:
+ self.forward()
+ if self.column > max_indent:
+ max_indent = self.column
+ return chunks, max_indent, end_mark
+
+ def scan_block_scalar_breaks(self, indent):
+ # See the specification for details.
+ chunks = []
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == ' ':
+ self.forward()
+ while self.peek() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ end_mark = self.get_mark()
+ while self.column < indent and self.peek() == ' ':
+ self.forward()
+ return chunks, end_mark
+
+ def scan_flow_scalar(self, style):
+ # See the specification for details.
+ # Note that we loose indentation rules for quoted scalars. Quoted
+ # scalars don't need to adhere indentation because " and ' clearly
+ # mark the beginning and the end of them. Therefore we are less
+ # restrictive then the specification requires. We only need to check
+ # that document separators are not included in scalars.
+ if style == '"':
+ double = True
+ else:
+ double = False
+ chunks = []
+ start_mark = self.get_mark()
+ quote = self.peek()
+ self.forward()
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ while self.peek() != quote:
+ chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
+ chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
+ self.forward()
+ end_mark = self.get_mark()
+ return ScalarToken(''.join(chunks), False, start_mark, end_mark,
+ style)
+
+ ESCAPE_REPLACEMENTS = {
+ '0': '\0',
+ 'a': '\x07',
+ 'b': '\x08',
+ 't': '\x09',
+ '\t': '\x09',
+ 'n': '\x0A',
+ 'v': '\x0B',
+ 'f': '\x0C',
+ 'r': '\x0D',
+ 'e': '\x1B',
+ ' ': '\x20',
+ '\"': '\"',
+ '\\': '\\',
+ '/': '/',
+ 'N': '\x85',
+ '_': '\xA0',
+ 'L': '\u2028',
+ 'P': '\u2029',
+ }
+
+ ESCAPE_CODES = {
+ 'x': 2,
+ 'u': 4,
+ 'U': 8,
+ }
+
+ def scan_flow_scalar_non_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ length = 0
+ while self.peek(length) not in '\'\"\\\0 \t\r\n\x85\u2028\u2029':
+ length += 1
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ ch = self.peek()
+ if not double and ch == '\'' and self.peek(1) == '\'':
+ chunks.append('\'')
+ self.forward(2)
+ elif (double and ch == '\'') or (not double and ch in '\"\\'):
+ chunks.append(ch)
+ self.forward()
+ elif double and ch == '\\':
+ self.forward()
+ ch = self.peek()
+ if ch in self.ESCAPE_REPLACEMENTS:
+ chunks.append(self.ESCAPE_REPLACEMENTS[ch])
+ self.forward()
+ elif ch in self.ESCAPE_CODES:
+ length = self.ESCAPE_CODES[ch]
+ self.forward()
+ for k in range(length):
+ if self.peek(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "expected escape sequence of %d hexdecimal numbers, but found %r" %
+ (length, self.peek(k)), self.get_mark())
+ code = int(self.prefix(length), 16)
+ chunks.append(chr(code))
+ self.forward(length)
+ elif ch in '\r\n\x85\u2028\u2029':
+ self.scan_line_break()
+ chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
+ else:
+ raise ScannerError("while scanning a double-quoted scalar", start_mark,
+ "found unknown escape character %r" % ch, self.get_mark())
+ else:
+ return chunks
+
+ def scan_flow_scalar_spaces(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ length = 0
+ while self.peek(length) in ' \t':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch == '\0':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected end of stream", self.get_mark())
+ elif ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ breaks = self.scan_flow_scalar_breaks(double, start_mark)
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ else:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_flow_scalar_breaks(self, double, start_mark):
+ # See the specification for details.
+ chunks = []
+ while True:
+ # Instead of checking indentation, we check for document
+ # separators.
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ raise ScannerError("while scanning a quoted scalar", start_mark,
+ "found unexpected document separator", self.get_mark())
+ while self.peek() in ' \t':
+ self.forward()
+ if self.peek() in '\r\n\x85\u2028\u2029':
+ chunks.append(self.scan_line_break())
+ else:
+ return chunks
+
+ def scan_plain(self):
+ # See the specification for details.
+ # We add an additional restriction for the flow context:
+ # plain scalars in the flow context cannot contain ',' or '?'.
+ # We also keep track of the `allow_simple_key` flag here.
+ # Indentation rules are loosed for the flow context.
+ chunks = []
+ start_mark = self.get_mark()
+ end_mark = start_mark
+ indent = self.indent+1
+ # We allow zero indentation for scalars, but then we need to check for
+ # document separators at the beginning of the line.
+ #if indent == 0:
+ # indent = 1
+ spaces = []
+ while True:
+ length = 0
+ if self.peek() == '#':
+ break
+ while True:
+ ch = self.peek(length)
+ if ch in '\0 \t\r\n\x85\u2028\u2029' \
+ or (ch == ':' and
+ self.peek(length+1) in '\0 \t\r\n\x85\u2028\u2029'
+ + (u',[]{}' if self.flow_level else u''))\
+ or (self.flow_level and ch in ',?[]{}'):
+ break
+ length += 1
+ if length == 0:
+ break
+ self.allow_simple_key = False
+ chunks.extend(spaces)
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ end_mark = self.get_mark()
+ spaces = self.scan_plain_spaces(indent, start_mark)
+ if not spaces or self.peek() == '#' \
+ or (not self.flow_level and self.column < indent):
+ break
+ return ScalarToken(''.join(chunks), True, start_mark, end_mark)
+
+ def scan_plain_spaces(self, indent, start_mark):
+ # See the specification for details.
+ # The specification is really confusing about tabs in plain scalars.
+ # We just forbid them completely. Do not use tabs in YAML!
+ chunks = []
+ length = 0
+ while self.peek(length) in ' ':
+ length += 1
+ whitespaces = self.prefix(length)
+ self.forward(length)
+ ch = self.peek()
+ if ch in '\r\n\x85\u2028\u2029':
+ line_break = self.scan_line_break()
+ self.allow_simple_key = True
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return
+ breaks = []
+ while self.peek() in ' \r\n\x85\u2028\u2029':
+ if self.peek() == ' ':
+ self.forward()
+ else:
+ breaks.append(self.scan_line_break())
+ prefix = self.prefix(3)
+ if (prefix == '---' or prefix == '...') \
+ and self.peek(3) in '\0 \t\r\n\x85\u2028\u2029':
+ return
+ if line_break != '\n':
+ chunks.append(line_break)
+ elif not breaks:
+ chunks.append(' ')
+ chunks.extend(breaks)
+ elif whitespaces:
+ chunks.append(whitespaces)
+ return chunks
+
+ def scan_tag_handle(self, name, start_mark):
+ # See the specification for details.
+ # For some strange reasons, the specification does not allow '_' in
+ # tag handles. I have allowed it anyway.
+ ch = self.peek()
+ if ch != '!':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch, self.get_mark())
+ length = 1
+ ch = self.peek(length)
+ if ch != ' ':
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-_':
+ length += 1
+ ch = self.peek(length)
+ if ch != '!':
+ self.forward(length)
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected '!', but found %r" % ch, self.get_mark())
+ length += 1
+ value = self.prefix(length)
+ self.forward(length)
+ return value
+
+ def scan_tag_uri(self, name, start_mark):
+ # See the specification for details.
+ # Note: we do not check if URI is well-formed.
+ chunks = []
+ length = 0
+ ch = self.peek(length)
+ while '0' <= ch <= '9' or 'A' <= ch <= 'Z' or 'a' <= ch <= 'z' \
+ or ch in '-;/?:@&=+$,_.!~*\'()[]%':
+ if ch == '%':
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ chunks.append(self.scan_uri_escapes(name, start_mark))
+ else:
+ length += 1
+ ch = self.peek(length)
+ if length:
+ chunks.append(self.prefix(length))
+ self.forward(length)
+ length = 0
+ if not chunks:
+ raise ScannerError("while parsing a %s" % name, start_mark,
+ "expected URI, but found %r" % ch, self.get_mark())
+ return ''.join(chunks)
+
+ def scan_uri_escapes(self, name, start_mark):
+ # See the specification for details.
+ codes = []
+ mark = self.get_mark()
+ while self.peek() == '%':
+ self.forward()
+ for k in range(2):
+ if self.peek(k) not in '0123456789ABCDEFabcdef':
+ raise ScannerError("while scanning a %s" % name, start_mark,
+ "expected URI escape sequence of 2 hexdecimal numbers, but found %r"
+ % self.peek(k), self.get_mark())
+ codes.append(int(self.prefix(2), 16))
+ self.forward(2)
+ try:
+ value = bytes(codes).decode('utf-8')
+ except UnicodeDecodeError as exc:
+ raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
+ return value
+
+ def scan_line_break(self):
+ # Transforms:
+ # '\r\n' : '\n'
+ # '\r' : '\n'
+ # '\n' : '\n'
+ # '\x85' : '\n'
+ # '\u2028' : '\u2028'
+ # '\u2029 : '\u2029'
+ # default : ''
+ ch = self.peek()
+ if ch in '\r\n\x85':
+ if self.prefix(2) == '\r\n':
+ self.forward(2)
+ else:
+ self.forward()
+ return '\n'
+ elif ch in '\u2028\u2029':
+ self.forward()
+ return ch
+ return ''
diff --git a/third_party/python/PyYAML/lib3/yaml/serializer.py b/third_party/python/PyYAML/lib3/yaml/serializer.py
new file mode 100644
index 0000000000..fe911e67ae
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/yaml/serializer.py
@@ -0,0 +1,111 @@
+
+__all__ = ['Serializer', 'SerializerError']
+
+from .error import YAMLError
+from .events import *
+from .nodes import *
+
+class SerializerError(YAMLError):
+ pass
+
+class Serializer:
+
+ ANCHOR_TEMPLATE = 'id%03d'
+
+ def __init__(self, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ self.use_encoding = encoding
+ self.use_explicit_start = explicit_start
+ self.use_explicit_end = explicit_end
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+ self.closed = None
+
+ def open(self):
+ if self.closed is None:
+ self.emit(StreamStartEvent(encoding=self.use_encoding))
+ self.closed = False
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError("serializer is already opened")
+
+ def close(self):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif not self.closed:
+ self.emit(StreamEndEvent())
+ self.closed = True
+
+ #def __del__(self):
+ # self.close()
+
+ def serialize(self, node):
+ if self.closed is None:
+ raise SerializerError("serializer is not opened")
+ elif self.closed:
+ raise SerializerError("serializer is closed")
+ self.emit(DocumentStartEvent(explicit=self.use_explicit_start,
+ version=self.use_version, tags=self.use_tags))
+ self.anchor_node(node)
+ self.serialize_node(node, None, None)
+ self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_anchor_id = 0
+
+ def anchor_node(self, node):
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.anchors[node] = self.generate_anchor(node)
+ else:
+ self.anchors[node] = None
+ if isinstance(node, SequenceNode):
+ for item in node.value:
+ self.anchor_node(item)
+ elif isinstance(node, MappingNode):
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
+
+ def generate_anchor(self, node):
+ self.last_anchor_id += 1
+ return self.ANCHOR_TEMPLATE % self.last_anchor_id
+
+ def serialize_node(self, node, parent, index):
+ alias = self.anchors[node]
+ if node in self.serialized_nodes:
+ self.emit(AliasEvent(alias))
+ else:
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if isinstance(node, ScalarNode):
+ detected_tag = self.resolve(ScalarNode, node.value, (True, False))
+ default_tag = self.resolve(ScalarNode, node.value, (False, True))
+ implicit = (node.tag == detected_tag), (node.tag == default_tag)
+ self.emit(ScalarEvent(alias, node.tag, implicit, node.value,
+ style=node.style))
+ elif isinstance(node, SequenceNode):
+ implicit = (node.tag
+ == self.resolve(SequenceNode, node.value, True))
+ self.emit(SequenceStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ index = 0
+ for item in node.value:
+ self.serialize_node(item, node, index)
+ index += 1
+ self.emit(SequenceEndEvent())
+ elif isinstance(node, MappingNode):
+ implicit = (node.tag
+ == self.resolve(MappingNode, node.value, True))
+ self.emit(MappingStartEvent(alias, node.tag, implicit,
+ flow_style=node.flow_style))
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
+ self.emit(MappingEndEvent())
+ self.ascend_resolver()
+
diff --git a/third_party/python/PyYAML/lib3/yaml/tokens.py b/third_party/python/PyYAML/lib3/yaml/tokens.py
new file mode 100644
index 0000000000..4d0b48a394
--- /dev/null
+++ b/third_party/python/PyYAML/lib3/yaml/tokens.py
@@ -0,0 +1,104 @@
+
+class Token(object):
+ def __init__(self, start_mark, end_mark):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ def __repr__(self):
+ attributes = [key for key in self.__dict__
+ if not key.endswith('_mark')]
+ attributes.sort()
+ arguments = ', '.join(['%s=%r' % (key, getattr(self, key))
+ for key in attributes])
+ return '%s(%s)' % (self.__class__.__name__, arguments)
+
+#class BOMToken(Token):
+# id = '<byte order mark>'
+
+class DirectiveToken(Token):
+ id = '<directive>'
+ def __init__(self, name, value, start_mark, end_mark):
+ self.name = name
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class DocumentStartToken(Token):
+ id = '<document start>'
+
+class DocumentEndToken(Token):
+ id = '<document end>'
+
+class StreamStartToken(Token):
+ id = '<stream start>'
+ def __init__(self, start_mark=None, end_mark=None,
+ encoding=None):
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.encoding = encoding
+
+class StreamEndToken(Token):
+ id = '<stream end>'
+
+class BlockSequenceStartToken(Token):
+ id = '<block sequence start>'
+
+class BlockMappingStartToken(Token):
+ id = '<block mapping start>'
+
+class BlockEndToken(Token):
+ id = '<block end>'
+
+class FlowSequenceStartToken(Token):
+ id = '['
+
+class FlowMappingStartToken(Token):
+ id = '{'
+
+class FlowSequenceEndToken(Token):
+ id = ']'
+
+class FlowMappingEndToken(Token):
+ id = '}'
+
+class KeyToken(Token):
+ id = '?'
+
+class ValueToken(Token):
+ id = ':'
+
+class BlockEntryToken(Token):
+ id = '-'
+
+class FlowEntryToken(Token):
+ id = ','
+
+class AliasToken(Token):
+ id = '<alias>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class AnchorToken(Token):
+ id = '<anchor>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class TagToken(Token):
+ id = '<tag>'
+ def __init__(self, value, start_mark, end_mark):
+ self.value = value
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+
+class ScalarToken(Token):
+ id = '<scalar>'
+ def __init__(self, value, plain, start_mark, end_mark, style=None):
+ self.value = value
+ self.plain = plain
+ self.start_mark = start_mark
+ self.end_mark = end_mark
+ self.style = style
+
diff --git a/third_party/python/PyYAML/pyproject.toml b/third_party/python/PyYAML/pyproject.toml
new file mode 100644
index 0000000000..2bf5ec8096
--- /dev/null
+++ b/third_party/python/PyYAML/pyproject.toml
@@ -0,0 +1,3 @@
+[build-system]
+requires = ["setuptools", "wheel", "Cython"]
+build-backend = "setuptools.build_meta"
diff --git a/third_party/python/PyYAML/setup.cfg b/third_party/python/PyYAML/setup.cfg
new file mode 100644
index 0000000000..da51366b86
--- /dev/null
+++ b/third_party/python/PyYAML/setup.cfg
@@ -0,0 +1,9 @@
+[build_ext]
+
+[metadata]
+license_file = LICENSE
+
+[egg_info]
+tag_build =
+tag_date = 0
+
diff --git a/third_party/python/PyYAML/setup.py b/third_party/python/PyYAML/setup.py
new file mode 100644
index 0000000000..d7476c61c6
--- /dev/null
+++ b/third_party/python/PyYAML/setup.py
@@ -0,0 +1,296 @@
+
+NAME = 'PyYAML'
+VERSION = '5.4.1'
+DESCRIPTION = "YAML parser and emitter for Python"
+LONG_DESCRIPTION = """\
+YAML is a data serialization format designed for human readability
+and interaction with scripting languages. PyYAML is a YAML parser
+and emitter for Python.
+
+PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
+support, capable extension API, and sensible error messages. PyYAML
+supports standard YAML tags and provides Python-specific tags that
+allow to represent an arbitrary Python object.
+
+PyYAML is applicable for a broad range of tasks from complex
+configuration files to object serialization and persistence."""
+AUTHOR = "Kirill Simonov"
+AUTHOR_EMAIL = 'xi@resolvent.net'
+LICENSE = "MIT"
+PLATFORMS = "Any"
+URL = "https://pyyaml.org/"
+DOWNLOAD_URL = "https://pypi.org/project/PyYAML/"
+CLASSIFIERS = [
+ "Development Status :: 5 - Production/Stable",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: MIT License",
+ "Operating System :: OS Independent",
+ "Programming Language :: Cython",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 2",
+ "Programming Language :: Python :: 2.7",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: Implementation :: CPython",
+ "Programming Language :: Python :: Implementation :: PyPy",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ "Topic :: Text Processing :: Markup",
+]
+PROJECT_URLS = {
+ 'Bug Tracker': 'https://github.com/yaml/pyyaml/issues',
+ 'CI': 'https://github.com/yaml/pyyaml/actions',
+ 'Documentation': 'https://pyyaml.org/wiki/PyYAMLDocumentation',
+ 'Mailing lists': 'http://lists.sourceforge.net/lists/listinfo/yaml-core',
+ 'Source Code': 'https://github.com/yaml/pyyaml',
+}
+
+LIBYAML_CHECK = """
+#include <yaml.h>
+
+int main(void) {
+ yaml_parser_t parser;
+ yaml_emitter_t emitter;
+
+ yaml_parser_initialize(&parser);
+ yaml_parser_delete(&parser);
+
+ yaml_emitter_initialize(&emitter);
+ yaml_emitter_delete(&emitter);
+
+ return 0;
+}
+"""
+
+
+import sys, os, os.path, platform, warnings
+
+from distutils import log
+from setuptools import setup, Command, Distribution as _Distribution, Extension as _Extension
+from setuptools.command.build_ext import build_ext as _build_ext
+from distutils.errors import DistutilsError, CompileError, LinkError, DistutilsPlatformError
+
+with_cython = False
+if 'sdist' in sys.argv or os.environ.get('PYYAML_FORCE_CYTHON') == '1':
+ # we need cython here
+ with_cython = True
+try:
+ from Cython.Distutils.extension import Extension as _Extension
+ from Cython.Distutils import build_ext as _build_ext
+ with_cython = True
+except ImportError:
+ if with_cython:
+ raise
+
+try:
+ from wheel.bdist_wheel import bdist_wheel
+except ImportError:
+ bdist_wheel = None
+
+
+# on Windows, disable wheel generation warning noise
+windows_ignore_warnings = [
+"Unknown distribution option: 'python_requires'",
+"Config variable 'Py_DEBUG' is unset",
+"Config variable 'WITH_PYMALLOC' is unset",
+"Config variable 'Py_UNICODE_SIZE' is unset",
+"Cython directive 'language_level' not set"
+]
+
+if platform.system() == 'Windows':
+ for w in windows_ignore_warnings:
+ warnings.filterwarnings('ignore', w)
+
+
+class Distribution(_Distribution):
+ def __init__(self, attrs=None):
+ _Distribution.__init__(self, attrs)
+ if not self.ext_modules:
+ return
+ for idx in range(len(self.ext_modules)-1, -1, -1):
+ ext = self.ext_modules[idx]
+ if not isinstance(ext, Extension):
+ continue
+ setattr(self, ext.attr_name, None)
+ self.global_options = [
+ (ext.option_name, None,
+ "include %s (default if %s is available)"
+ % (ext.feature_description, ext.feature_name)),
+ (ext.neg_option_name, None,
+ "exclude %s" % ext.feature_description),
+ ] + self.global_options
+ self.negative_opt = self.negative_opt.copy()
+ self.negative_opt[ext.neg_option_name] = ext.option_name
+
+ def has_ext_modules(self):
+ if not self.ext_modules:
+ return False
+ for ext in self.ext_modules:
+ with_ext = self.ext_status(ext)
+ if with_ext is None or with_ext:
+ return True
+ return False
+
+ def ext_status(self, ext):
+ implementation = platform.python_implementation()
+ if implementation not in ['CPython', 'PyPy']:
+ return False
+ if isinstance(ext, Extension):
+ # the "build by default" behavior is implemented by this returning None
+ with_ext = getattr(self, ext.attr_name) or os.environ.get('PYYAML_FORCE_{0}'.format(ext.feature_name.upper()))
+ try:
+ with_ext = int(with_ext) # attempt coerce envvar to int
+ except TypeError:
+ pass
+ return with_ext
+ else:
+ return True
+
+
+class Extension(_Extension):
+
+ def __init__(self, name, sources, feature_name, feature_description,
+ feature_check, **kwds):
+ if not with_cython:
+ for filename in sources[:]:
+ base, ext = os.path.splitext(filename)
+ if ext == '.pyx':
+ sources.remove(filename)
+ sources.append('%s.c' % base)
+ _Extension.__init__(self, name, sources, **kwds)
+ self.feature_name = feature_name
+ self.feature_description = feature_description
+ self.feature_check = feature_check
+ self.attr_name = 'with_' + feature_name.replace('-', '_')
+ self.option_name = 'with-' + feature_name
+ self.neg_option_name = 'without-' + feature_name
+
+
+class build_ext(_build_ext):
+
+ def run(self):
+ optional = True
+ disabled = True
+ for ext in self.extensions:
+ with_ext = self.distribution.ext_status(ext)
+ if with_ext is None:
+ disabled = False
+ elif with_ext:
+ optional = False
+ disabled = False
+ break
+ if disabled:
+ return
+ try:
+ _build_ext.run(self)
+ except DistutilsPlatformError:
+ exc = sys.exc_info()[1]
+ if optional:
+ log.warn(str(exc))
+ log.warn("skipping build_ext")
+ else:
+ raise
+
+ def get_source_files(self):
+ self.check_extensions_list(self.extensions)
+ filenames = []
+ for ext in self.extensions:
+ if with_cython:
+ self.cython_sources(ext.sources, ext)
+ for filename in ext.sources:
+ filenames.append(filename)
+ base = os.path.splitext(filename)[0]
+ for ext in ['c', 'h', 'pyx', 'pxd']:
+ filename = '%s.%s' % (base, ext)
+ if filename not in filenames and os.path.isfile(filename):
+ filenames.append(filename)
+ return filenames
+
+ def get_outputs(self):
+ self.check_extensions_list(self.extensions)
+ outputs = []
+ for ext in self.extensions:
+ fullname = self.get_ext_fullname(ext.name)
+ filename = os.path.join(self.build_lib,
+ self.get_ext_filename(fullname))
+ if os.path.isfile(filename):
+ outputs.append(filename)
+ return outputs
+
+ def build_extensions(self):
+ self.check_extensions_list(self.extensions)
+ for ext in self.extensions:
+ with_ext = self.distribution.ext_status(ext)
+ if with_ext is not None and not with_ext:
+ continue
+ if with_cython:
+ ext.sources = self.cython_sources(ext.sources, ext)
+ try:
+ self.build_extension(ext)
+ except (CompileError, LinkError):
+ if with_ext is not None:
+ raise
+ log.warn("Error compiling module, falling back to pure Python")
+
+
+class test(Command):
+
+ user_options = []
+
+ def initialize_options(self):
+ pass
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ build_cmd = self.get_finalized_command('build')
+ build_cmd.run()
+ sys.path.insert(0, build_cmd.build_lib)
+ if sys.version_info[0] < 3:
+ sys.path.insert(0, 'tests/lib')
+ else:
+ sys.path.insert(0, 'tests/lib3')
+ import test_all
+ if not test_all.main([]):
+ raise DistutilsError("Tests failed")
+
+
+cmdclass = {
+ 'build_ext': build_ext,
+ 'test': test,
+}
+if bdist_wheel:
+ cmdclass['bdist_wheel'] = bdist_wheel
+
+
+if __name__ == '__main__':
+
+ setup(
+ name=NAME,
+ version=VERSION,
+ description=DESCRIPTION,
+ long_description=LONG_DESCRIPTION,
+ author=AUTHOR,
+ author_email=AUTHOR_EMAIL,
+ license=LICENSE,
+ platforms=PLATFORMS,
+ url=URL,
+ download_url=DOWNLOAD_URL,
+ classifiers=CLASSIFIERS,
+ project_urls=PROJECT_URLS,
+
+ package_dir={'': {2: 'lib', 3: 'lib3'}[sys.version_info[0]]},
+ packages=['yaml', '_yaml'],
+ ext_modules=[
+ Extension('yaml._yaml', ['yaml/_yaml.pyx'],
+ 'libyaml', "LibYAML bindings", LIBYAML_CHECK,
+ libraries=['yaml']),
+ ],
+
+ distclass=Distribution,
+ cmdclass=cmdclass,
+ python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*',
+ )
diff --git a/third_party/python/PyYAML/yaml/__init__.pxd b/third_party/python/PyYAML/yaml/__init__.pxd
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/PyYAML/yaml/__init__.pxd
diff --git a/third_party/python/PyYAML/yaml/_yaml.h b/third_party/python/PyYAML/yaml/_yaml.h
new file mode 100644
index 0000000000..21fd6a991b
--- /dev/null
+++ b/third_party/python/PyYAML/yaml/_yaml.h
@@ -0,0 +1,23 @@
+
+#include <yaml.h>
+
+#if PY_MAJOR_VERSION < 3
+
+#define PyUnicode_FromString(s) PyUnicode_DecodeUTF8((s), strlen(s), "strict")
+
+#else
+
+#define PyString_CheckExact PyBytes_CheckExact
+#define PyString_AS_STRING PyBytes_AS_STRING
+#define PyString_GET_SIZE PyBytes_GET_SIZE
+#define PyString_FromStringAndSize PyBytes_FromStringAndSize
+
+#endif
+
+#ifdef _MSC_VER /* MS Visual C++ 6.0 */
+#if _MSC_VER == 1200
+
+#define PyLong_FromUnsignedLongLong(z) PyInt_FromLong(i)
+
+#endif
+#endif
diff --git a/third_party/python/PyYAML/yaml/_yaml.pxd b/third_party/python/PyYAML/yaml/_yaml.pxd
new file mode 100644
index 0000000000..7937c9db51
--- /dev/null
+++ b/third_party/python/PyYAML/yaml/_yaml.pxd
@@ -0,0 +1,251 @@
+
+cdef extern from "_yaml.h":
+
+ void malloc(int l)
+ void memcpy(char *d, char *s, int l)
+ int strlen(char *s)
+ int PyString_CheckExact(object o)
+ int PyUnicode_CheckExact(object o)
+ char *PyString_AS_STRING(object o)
+ int PyString_GET_SIZE(object o)
+ object PyString_FromStringAndSize(char *v, int l)
+ object PyUnicode_FromString(char *u)
+ object PyUnicode_DecodeUTF8(char *u, int s, char *e)
+ object PyUnicode_AsUTF8String(object o)
+ int PY_MAJOR_VERSION
+
+ ctypedef enum:
+ SIZEOF_VOID_P
+ ctypedef enum yaml_encoding_t:
+ YAML_ANY_ENCODING
+ YAML_UTF8_ENCODING
+ YAML_UTF16LE_ENCODING
+ YAML_UTF16BE_ENCODING
+ ctypedef enum yaml_break_t:
+ YAML_ANY_BREAK
+ YAML_CR_BREAK
+ YAML_LN_BREAK
+ YAML_CRLN_BREAK
+ ctypedef enum yaml_error_type_t:
+ YAML_NO_ERROR
+ YAML_MEMORY_ERROR
+ YAML_READER_ERROR
+ YAML_SCANNER_ERROR
+ YAML_PARSER_ERROR
+ YAML_WRITER_ERROR
+ YAML_EMITTER_ERROR
+ ctypedef enum yaml_scalar_style_t:
+ YAML_ANY_SCALAR_STYLE
+ YAML_PLAIN_SCALAR_STYLE
+ YAML_SINGLE_QUOTED_SCALAR_STYLE
+ YAML_DOUBLE_QUOTED_SCALAR_STYLE
+ YAML_LITERAL_SCALAR_STYLE
+ YAML_FOLDED_SCALAR_STYLE
+ ctypedef enum yaml_sequence_style_t:
+ YAML_ANY_SEQUENCE_STYLE
+ YAML_BLOCK_SEQUENCE_STYLE
+ YAML_FLOW_SEQUENCE_STYLE
+ ctypedef enum yaml_mapping_style_t:
+ YAML_ANY_MAPPING_STYLE
+ YAML_BLOCK_MAPPING_STYLE
+ YAML_FLOW_MAPPING_STYLE
+ ctypedef enum yaml_token_type_t:
+ YAML_NO_TOKEN
+ YAML_STREAM_START_TOKEN
+ YAML_STREAM_END_TOKEN
+ YAML_VERSION_DIRECTIVE_TOKEN
+ YAML_TAG_DIRECTIVE_TOKEN
+ YAML_DOCUMENT_START_TOKEN
+ YAML_DOCUMENT_END_TOKEN
+ YAML_BLOCK_SEQUENCE_START_TOKEN
+ YAML_BLOCK_MAPPING_START_TOKEN
+ YAML_BLOCK_END_TOKEN
+ YAML_FLOW_SEQUENCE_START_TOKEN
+ YAML_FLOW_SEQUENCE_END_TOKEN
+ YAML_FLOW_MAPPING_START_TOKEN
+ YAML_FLOW_MAPPING_END_TOKEN
+ YAML_BLOCK_ENTRY_TOKEN
+ YAML_FLOW_ENTRY_TOKEN
+ YAML_KEY_TOKEN
+ YAML_VALUE_TOKEN
+ YAML_ALIAS_TOKEN
+ YAML_ANCHOR_TOKEN
+ YAML_TAG_TOKEN
+ YAML_SCALAR_TOKEN
+ ctypedef enum yaml_event_type_t:
+ YAML_NO_EVENT
+ YAML_STREAM_START_EVENT
+ YAML_STREAM_END_EVENT
+ YAML_DOCUMENT_START_EVENT
+ YAML_DOCUMENT_END_EVENT
+ YAML_ALIAS_EVENT
+ YAML_SCALAR_EVENT
+ YAML_SEQUENCE_START_EVENT
+ YAML_SEQUENCE_END_EVENT
+ YAML_MAPPING_START_EVENT
+ YAML_MAPPING_END_EVENT
+
+ ctypedef int yaml_read_handler_t(void *data, char *buffer,
+ size_t size, size_t *size_read) except 0
+
+ ctypedef int yaml_write_handler_t(void *data, char *buffer,
+ size_t size) except 0
+
+ ctypedef struct yaml_mark_t:
+ size_t index
+ size_t line
+ size_t column
+ ctypedef struct yaml_version_directive_t:
+ int major
+ int minor
+ ctypedef struct yaml_tag_directive_t:
+ char *handle
+ char *prefix
+
+ ctypedef struct _yaml_token_stream_start_data_t:
+ yaml_encoding_t encoding
+ ctypedef struct _yaml_token_alias_data_t:
+ char *value
+ ctypedef struct _yaml_token_anchor_data_t:
+ char *value
+ ctypedef struct _yaml_token_tag_data_t:
+ char *handle
+ char *suffix
+ ctypedef struct _yaml_token_scalar_data_t:
+ char *value
+ size_t length
+ yaml_scalar_style_t style
+ ctypedef struct _yaml_token_version_directive_data_t:
+ int major
+ int minor
+ ctypedef struct _yaml_token_tag_directive_data_t:
+ char *handle
+ char *prefix
+ ctypedef union _yaml_token_data_t:
+ _yaml_token_stream_start_data_t stream_start
+ _yaml_token_alias_data_t alias
+ _yaml_token_anchor_data_t anchor
+ _yaml_token_tag_data_t tag
+ _yaml_token_scalar_data_t scalar
+ _yaml_token_version_directive_data_t version_directive
+ _yaml_token_tag_directive_data_t tag_directive
+ ctypedef struct yaml_token_t:
+ yaml_token_type_t type
+ _yaml_token_data_t data
+ yaml_mark_t start_mark
+ yaml_mark_t end_mark
+
+ ctypedef struct _yaml_event_stream_start_data_t:
+ yaml_encoding_t encoding
+ ctypedef struct _yaml_event_document_start_data_tag_directives_t:
+ yaml_tag_directive_t *start
+ yaml_tag_directive_t *end
+ ctypedef struct _yaml_event_document_start_data_t:
+ yaml_version_directive_t *version_directive
+ _yaml_event_document_start_data_tag_directives_t tag_directives
+ int implicit
+ ctypedef struct _yaml_event_document_end_data_t:
+ int implicit
+ ctypedef struct _yaml_event_alias_data_t:
+ char *anchor
+ ctypedef struct _yaml_event_scalar_data_t:
+ char *anchor
+ char *tag
+ char *value
+ size_t length
+ int plain_implicit
+ int quoted_implicit
+ yaml_scalar_style_t style
+ ctypedef struct _yaml_event_sequence_start_data_t:
+ char *anchor
+ char *tag
+ int implicit
+ yaml_sequence_style_t style
+ ctypedef struct _yaml_event_mapping_start_data_t:
+ char *anchor
+ char *tag
+ int implicit
+ yaml_mapping_style_t style
+ ctypedef union _yaml_event_data_t:
+ _yaml_event_stream_start_data_t stream_start
+ _yaml_event_document_start_data_t document_start
+ _yaml_event_document_end_data_t document_end
+ _yaml_event_alias_data_t alias
+ _yaml_event_scalar_data_t scalar
+ _yaml_event_sequence_start_data_t sequence_start
+ _yaml_event_mapping_start_data_t mapping_start
+ ctypedef struct yaml_event_t:
+ yaml_event_type_t type
+ _yaml_event_data_t data
+ yaml_mark_t start_mark
+ yaml_mark_t end_mark
+
+ ctypedef struct yaml_parser_t:
+ yaml_error_type_t error
+ char *problem
+ size_t problem_offset
+ int problem_value
+ yaml_mark_t problem_mark
+ char *context
+ yaml_mark_t context_mark
+
+ ctypedef struct yaml_emitter_t:
+ yaml_error_type_t error
+ char *problem
+
+ char *yaml_get_version_string()
+ void yaml_get_version(int *major, int *minor, int *patch)
+
+ void yaml_token_delete(yaml_token_t *token)
+
+ int yaml_stream_start_event_initialize(yaml_event_t *event,
+ yaml_encoding_t encoding)
+ int yaml_stream_end_event_initialize(yaml_event_t *event)
+ int yaml_document_start_event_initialize(yaml_event_t *event,
+ yaml_version_directive_t *version_directive,
+ yaml_tag_directive_t *tag_directives_start,
+ yaml_tag_directive_t *tag_directives_end,
+ int implicit)
+ int yaml_document_end_event_initialize(yaml_event_t *event,
+ int implicit)
+ int yaml_alias_event_initialize(yaml_event_t *event, char *anchor)
+ int yaml_scalar_event_initialize(yaml_event_t *event,
+ char *anchor, char *tag, char *value, size_t length,
+ int plain_implicit, int quoted_implicit,
+ yaml_scalar_style_t style)
+ int yaml_sequence_start_event_initialize(yaml_event_t *event,
+ char *anchor, char *tag, int implicit, yaml_sequence_style_t style)
+ int yaml_sequence_end_event_initialize(yaml_event_t *event)
+ int yaml_mapping_start_event_initialize(yaml_event_t *event,
+ char *anchor, char *tag, int implicit, yaml_mapping_style_t style)
+ int yaml_mapping_end_event_initialize(yaml_event_t *event)
+ void yaml_event_delete(yaml_event_t *event)
+
+ int yaml_parser_initialize(yaml_parser_t *parser)
+ void yaml_parser_delete(yaml_parser_t *parser)
+ void yaml_parser_set_input_string(yaml_parser_t *parser,
+ char *input, size_t size)
+ void yaml_parser_set_input(yaml_parser_t *parser,
+ yaml_read_handler_t *handler, void *data)
+ void yaml_parser_set_encoding(yaml_parser_t *parser,
+ yaml_encoding_t encoding)
+ int yaml_parser_scan(yaml_parser_t *parser, yaml_token_t *token) except *
+ int yaml_parser_parse(yaml_parser_t *parser, yaml_event_t *event) except *
+
+ int yaml_emitter_initialize(yaml_emitter_t *emitter)
+ void yaml_emitter_delete(yaml_emitter_t *emitter)
+ void yaml_emitter_set_output_string(yaml_emitter_t *emitter,
+ char *output, size_t size, size_t *size_written)
+ void yaml_emitter_set_output(yaml_emitter_t *emitter,
+ yaml_write_handler_t *handler, void *data)
+ void yaml_emitter_set_encoding(yaml_emitter_t *emitter,
+ yaml_encoding_t encoding)
+ void yaml_emitter_set_canonical(yaml_emitter_t *emitter, int canonical)
+ void yaml_emitter_set_indent(yaml_emitter_t *emitter, int indent)
+ void yaml_emitter_set_width(yaml_emitter_t *emitter, int width)
+ void yaml_emitter_set_unicode(yaml_emitter_t *emitter, int unicode)
+ void yaml_emitter_set_break(yaml_emitter_t *emitter,
+ yaml_break_t line_break)
+ int yaml_emitter_emit(yaml_emitter_t *emitter, yaml_event_t *event) except *
+ int yaml_emitter_flush(yaml_emitter_t *emitter)
+
diff --git a/third_party/python/PyYAML/yaml/_yaml.pyx b/third_party/python/PyYAML/yaml/_yaml.pyx
new file mode 100644
index 0000000000..ff4efe80b5
--- /dev/null
+++ b/third_party/python/PyYAML/yaml/_yaml.pyx
@@ -0,0 +1,1527 @@
+
+import yaml
+
+def get_version_string():
+ cdef char *value
+ value = yaml_get_version_string()
+ if PY_MAJOR_VERSION < 3:
+ return value
+ else:
+ return PyUnicode_FromString(value)
+
+def get_version():
+ cdef int major, minor, patch
+ yaml_get_version(&major, &minor, &patch)
+ return (major, minor, patch)
+
+#Mark = yaml.error.Mark
+YAMLError = yaml.error.YAMLError
+ReaderError = yaml.reader.ReaderError
+ScannerError = yaml.scanner.ScannerError
+ParserError = yaml.parser.ParserError
+ComposerError = yaml.composer.ComposerError
+ConstructorError = yaml.constructor.ConstructorError
+EmitterError = yaml.emitter.EmitterError
+SerializerError = yaml.serializer.SerializerError
+RepresenterError = yaml.representer.RepresenterError
+
+StreamStartToken = yaml.tokens.StreamStartToken
+StreamEndToken = yaml.tokens.StreamEndToken
+DirectiveToken = yaml.tokens.DirectiveToken
+DocumentStartToken = yaml.tokens.DocumentStartToken
+DocumentEndToken = yaml.tokens.DocumentEndToken
+BlockSequenceStartToken = yaml.tokens.BlockSequenceStartToken
+BlockMappingStartToken = yaml.tokens.BlockMappingStartToken
+BlockEndToken = yaml.tokens.BlockEndToken
+FlowSequenceStartToken = yaml.tokens.FlowSequenceStartToken
+FlowMappingStartToken = yaml.tokens.FlowMappingStartToken
+FlowSequenceEndToken = yaml.tokens.FlowSequenceEndToken
+FlowMappingEndToken = yaml.tokens.FlowMappingEndToken
+KeyToken = yaml.tokens.KeyToken
+ValueToken = yaml.tokens.ValueToken
+BlockEntryToken = yaml.tokens.BlockEntryToken
+FlowEntryToken = yaml.tokens.FlowEntryToken
+AliasToken = yaml.tokens.AliasToken
+AnchorToken = yaml.tokens.AnchorToken
+TagToken = yaml.tokens.TagToken
+ScalarToken = yaml.tokens.ScalarToken
+
+StreamStartEvent = yaml.events.StreamStartEvent
+StreamEndEvent = yaml.events.StreamEndEvent
+DocumentStartEvent = yaml.events.DocumentStartEvent
+DocumentEndEvent = yaml.events.DocumentEndEvent
+AliasEvent = yaml.events.AliasEvent
+ScalarEvent = yaml.events.ScalarEvent
+SequenceStartEvent = yaml.events.SequenceStartEvent
+SequenceEndEvent = yaml.events.SequenceEndEvent
+MappingStartEvent = yaml.events.MappingStartEvent
+MappingEndEvent = yaml.events.MappingEndEvent
+
+ScalarNode = yaml.nodes.ScalarNode
+SequenceNode = yaml.nodes.SequenceNode
+MappingNode = yaml.nodes.MappingNode
+
+cdef class Mark:
+ cdef readonly object name
+ cdef readonly size_t index
+ cdef readonly size_t line
+ cdef readonly size_t column
+ cdef readonly buffer
+ cdef readonly pointer
+
+ def __init__(self, object name, size_t index, size_t line, size_t column,
+ object buffer, object pointer):
+ self.name = name
+ self.index = index
+ self.line = line
+ self.column = column
+ self.buffer = buffer
+ self.pointer = pointer
+
+ def get_snippet(self):
+ return None
+
+ def __str__(self):
+ where = " in \"%s\", line %d, column %d" \
+ % (self.name, self.line+1, self.column+1)
+ return where
+
+#class YAMLError(Exception):
+# pass
+#
+#class MarkedYAMLError(YAMLError):
+#
+# def __init__(self, context=None, context_mark=None,
+# problem=None, problem_mark=None, note=None):
+# self.context = context
+# self.context_mark = context_mark
+# self.problem = problem
+# self.problem_mark = problem_mark
+# self.note = note
+#
+# def __str__(self):
+# lines = []
+# if self.context is not None:
+# lines.append(self.context)
+# if self.context_mark is not None \
+# and (self.problem is None or self.problem_mark is None
+# or self.context_mark.name != self.problem_mark.name
+# or self.context_mark.line != self.problem_mark.line
+# or self.context_mark.column != self.problem_mark.column):
+# lines.append(str(self.context_mark))
+# if self.problem is not None:
+# lines.append(self.problem)
+# if self.problem_mark is not None:
+# lines.append(str(self.problem_mark))
+# if self.note is not None:
+# lines.append(self.note)
+# return '\n'.join(lines)
+#
+#class ReaderError(YAMLError):
+#
+# def __init__(self, name, position, character, encoding, reason):
+# self.name = name
+# self.character = character
+# self.position = position
+# self.encoding = encoding
+# self.reason = reason
+#
+# def __str__(self):
+# if isinstance(self.character, str):
+# return "'%s' codec can't decode byte #x%02x: %s\n" \
+# " in \"%s\", position %d" \
+# % (self.encoding, ord(self.character), self.reason,
+# self.name, self.position)
+# else:
+# return "unacceptable character #x%04x: %s\n" \
+# " in \"%s\", position %d" \
+# % (ord(self.character), self.reason,
+# self.name, self.position)
+#
+#class ScannerError(MarkedYAMLError):
+# pass
+#
+#class ParserError(MarkedYAMLError):
+# pass
+#
+#class EmitterError(YAMLError):
+# pass
+#
+#cdef class Token:
+# cdef readonly Mark start_mark
+# cdef readonly Mark end_mark
+# def __init__(self, Mark start_mark, Mark end_mark):
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class StreamStartToken(Token):
+# cdef readonly object encoding
+# def __init__(self, Mark start_mark, Mark end_mark, encoding):
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+# self.encoding = encoding
+#
+#cdef class StreamEndToken(Token):
+# pass
+#
+#cdef class DirectiveToken(Token):
+# cdef readonly object name
+# cdef readonly object value
+# def __init__(self, name, value, Mark start_mark, Mark end_mark):
+# self.name = name
+# self.value = value
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class DocumentStartToken(Token):
+# pass
+#
+#cdef class DocumentEndToken(Token):
+# pass
+#
+#cdef class BlockSequenceStartToken(Token):
+# pass
+#
+#cdef class BlockMappingStartToken(Token):
+# pass
+#
+#cdef class BlockEndToken(Token):
+# pass
+#
+#cdef class FlowSequenceStartToken(Token):
+# pass
+#
+#cdef class FlowMappingStartToken(Token):
+# pass
+#
+#cdef class FlowSequenceEndToken(Token):
+# pass
+#
+#cdef class FlowMappingEndToken(Token):
+# pass
+#
+#cdef class KeyToken(Token):
+# pass
+#
+#cdef class ValueToken(Token):
+# pass
+#
+#cdef class BlockEntryToken(Token):
+# pass
+#
+#cdef class FlowEntryToken(Token):
+# pass
+#
+#cdef class AliasToken(Token):
+# cdef readonly object value
+# def __init__(self, value, Mark start_mark, Mark end_mark):
+# self.value = value
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class AnchorToken(Token):
+# cdef readonly object value
+# def __init__(self, value, Mark start_mark, Mark end_mark):
+# self.value = value
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class TagToken(Token):
+# cdef readonly object value
+# def __init__(self, value, Mark start_mark, Mark end_mark):
+# self.value = value
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+#
+#cdef class ScalarToken(Token):
+# cdef readonly object value
+# cdef readonly object plain
+# cdef readonly object style
+# def __init__(self, value, plain, Mark start_mark, Mark end_mark, style=None):
+# self.value = value
+# self.plain = plain
+# self.start_mark = start_mark
+# self.end_mark = end_mark
+# self.style = style
+
+cdef class CParser:
+
+ cdef yaml_parser_t parser
+ cdef yaml_event_t parsed_event
+
+ cdef object stream
+ cdef object stream_name
+ cdef object current_token
+ cdef object current_event
+ cdef object anchors
+ cdef object stream_cache
+ cdef int stream_cache_len
+ cdef int stream_cache_pos
+ cdef int unicode_source
+
+ def __init__(self, stream):
+ cdef is_readable
+ if yaml_parser_initialize(&self.parser) == 0:
+ raise MemoryError
+ self.parsed_event.type = YAML_NO_EVENT
+ is_readable = 1
+ try:
+ stream.read
+ except AttributeError:
+ is_readable = 0
+ self.unicode_source = 0
+ if is_readable:
+ self.stream = stream
+ try:
+ self.stream_name = stream.name
+ except AttributeError:
+ if PY_MAJOR_VERSION < 3:
+ self.stream_name = '<file>'
+ else:
+ self.stream_name = u'<file>'
+ self.stream_cache = None
+ self.stream_cache_len = 0
+ self.stream_cache_pos = 0
+ yaml_parser_set_input(&self.parser, input_handler, <void *>self)
+ else:
+ if PyUnicode_CheckExact(stream) != 0:
+ stream = PyUnicode_AsUTF8String(stream)
+ if PY_MAJOR_VERSION < 3:
+ self.stream_name = '<unicode string>'
+ else:
+ self.stream_name = u'<unicode string>'
+ self.unicode_source = 1
+ else:
+ if PY_MAJOR_VERSION < 3:
+ self.stream_name = '<byte string>'
+ else:
+ self.stream_name = u'<byte string>'
+ if PyString_CheckExact(stream) == 0:
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("a string or stream input is required")
+ else:
+ raise TypeError(u"a string or stream input is required")
+ self.stream = stream
+ yaml_parser_set_input_string(&self.parser, PyString_AS_STRING(stream), PyString_GET_SIZE(stream))
+ self.current_token = None
+ self.current_event = None
+ self.anchors = {}
+
+ def __dealloc__(self):
+ yaml_parser_delete(&self.parser)
+ yaml_event_delete(&self.parsed_event)
+
+ def dispose(self):
+ pass
+
+ cdef object _parser_error(self):
+ if self.parser.error == YAML_MEMORY_ERROR:
+ return MemoryError
+ elif self.parser.error == YAML_READER_ERROR:
+ if PY_MAJOR_VERSION < 3:
+ return ReaderError(self.stream_name, self.parser.problem_offset,
+ self.parser.problem_value, '?', self.parser.problem)
+ else:
+ return ReaderError(self.stream_name, self.parser.problem_offset,
+ self.parser.problem_value, u'?', PyUnicode_FromString(self.parser.problem))
+ elif self.parser.error == YAML_SCANNER_ERROR \
+ or self.parser.error == YAML_PARSER_ERROR:
+ context_mark = None
+ problem_mark = None
+ if self.parser.context != NULL:
+ context_mark = Mark(self.stream_name,
+ self.parser.context_mark.index,
+ self.parser.context_mark.line,
+ self.parser.context_mark.column, None, None)
+ if self.parser.problem != NULL:
+ problem_mark = Mark(self.stream_name,
+ self.parser.problem_mark.index,
+ self.parser.problem_mark.line,
+ self.parser.problem_mark.column, None, None)
+ context = None
+ if self.parser.context != NULL:
+ if PY_MAJOR_VERSION < 3:
+ context = self.parser.context
+ else:
+ context = PyUnicode_FromString(self.parser.context)
+ if PY_MAJOR_VERSION < 3:
+ problem = self.parser.problem
+ else:
+ problem = PyUnicode_FromString(self.parser.problem)
+ if self.parser.error == YAML_SCANNER_ERROR:
+ return ScannerError(context, context_mark, problem, problem_mark)
+ else:
+ return ParserError(context, context_mark, problem, problem_mark)
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("no parser error")
+ else:
+ raise ValueError(u"no parser error")
+
+ def raw_scan(self):
+ cdef yaml_token_t token
+ cdef int done
+ cdef int count
+ count = 0
+ done = 0
+ while done == 0:
+ if yaml_parser_scan(&self.parser, &token) == 0:
+ error = self._parser_error()
+ raise error
+ if token.type == YAML_NO_TOKEN:
+ done = 1
+ else:
+ count = count+1
+ yaml_token_delete(&token)
+ return count
+
+ cdef object _scan(self):
+ cdef yaml_token_t token
+ if yaml_parser_scan(&self.parser, &token) == 0:
+ error = self._parser_error()
+ raise error
+ token_object = self._token_to_object(&token)
+ yaml_token_delete(&token)
+ return token_object
+
+ cdef object _token_to_object(self, yaml_token_t *token):
+ start_mark = Mark(self.stream_name,
+ token.start_mark.index,
+ token.start_mark.line,
+ token.start_mark.column,
+ None, None)
+ end_mark = Mark(self.stream_name,
+ token.end_mark.index,
+ token.end_mark.line,
+ token.end_mark.column,
+ None, None)
+ if token.type == YAML_NO_TOKEN:
+ return None
+ elif token.type == YAML_STREAM_START_TOKEN:
+ encoding = None
+ if token.data.stream_start.encoding == YAML_UTF8_ENCODING:
+ if self.unicode_source == 0:
+ encoding = u"utf-8"
+ elif token.data.stream_start.encoding == YAML_UTF16LE_ENCODING:
+ encoding = u"utf-16-le"
+ elif token.data.stream_start.encoding == YAML_UTF16BE_ENCODING:
+ encoding = u"utf-16-be"
+ return StreamStartToken(start_mark, end_mark, encoding)
+ elif token.type == YAML_STREAM_END_TOKEN:
+ return StreamEndToken(start_mark, end_mark)
+ elif token.type == YAML_VERSION_DIRECTIVE_TOKEN:
+ return DirectiveToken(u"YAML",
+ (token.data.version_directive.major,
+ token.data.version_directive.minor),
+ start_mark, end_mark)
+ elif token.type == YAML_TAG_DIRECTIVE_TOKEN:
+ handle = PyUnicode_FromString(token.data.tag_directive.handle)
+ prefix = PyUnicode_FromString(token.data.tag_directive.prefix)
+ return DirectiveToken(u"TAG", (handle, prefix),
+ start_mark, end_mark)
+ elif token.type == YAML_DOCUMENT_START_TOKEN:
+ return DocumentStartToken(start_mark, end_mark)
+ elif token.type == YAML_DOCUMENT_END_TOKEN:
+ return DocumentEndToken(start_mark, end_mark)
+ elif token.type == YAML_BLOCK_SEQUENCE_START_TOKEN:
+ return BlockSequenceStartToken(start_mark, end_mark)
+ elif token.type == YAML_BLOCK_MAPPING_START_TOKEN:
+ return BlockMappingStartToken(start_mark, end_mark)
+ elif token.type == YAML_BLOCK_END_TOKEN:
+ return BlockEndToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_SEQUENCE_START_TOKEN:
+ return FlowSequenceStartToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_SEQUENCE_END_TOKEN:
+ return FlowSequenceEndToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_MAPPING_START_TOKEN:
+ return FlowMappingStartToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_MAPPING_END_TOKEN:
+ return FlowMappingEndToken(start_mark, end_mark)
+ elif token.type == YAML_BLOCK_ENTRY_TOKEN:
+ return BlockEntryToken(start_mark, end_mark)
+ elif token.type == YAML_FLOW_ENTRY_TOKEN:
+ return FlowEntryToken(start_mark, end_mark)
+ elif token.type == YAML_KEY_TOKEN:
+ return KeyToken(start_mark, end_mark)
+ elif token.type == YAML_VALUE_TOKEN:
+ return ValueToken(start_mark, end_mark)
+ elif token.type == YAML_ALIAS_TOKEN:
+ value = PyUnicode_FromString(token.data.alias.value)
+ return AliasToken(value, start_mark, end_mark)
+ elif token.type == YAML_ANCHOR_TOKEN:
+ value = PyUnicode_FromString(token.data.anchor.value)
+ return AnchorToken(value, start_mark, end_mark)
+ elif token.type == YAML_TAG_TOKEN:
+ handle = PyUnicode_FromString(token.data.tag.handle)
+ suffix = PyUnicode_FromString(token.data.tag.suffix)
+ if not handle:
+ handle = None
+ return TagToken((handle, suffix), start_mark, end_mark)
+ elif token.type == YAML_SCALAR_TOKEN:
+ value = PyUnicode_DecodeUTF8(token.data.scalar.value,
+ token.data.scalar.length, 'strict')
+ plain = False
+ style = None
+ if token.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
+ plain = True
+ style = u''
+ elif token.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
+ style = u'\''
+ elif token.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
+ style = u'"'
+ elif token.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
+ style = u'|'
+ elif token.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
+ style = u'>'
+ return ScalarToken(value, plain,
+ start_mark, end_mark, style)
+ else:
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("unknown token type")
+ else:
+ raise ValueError(u"unknown token type")
+
+ def get_token(self):
+ if self.current_token is not None:
+ value = self.current_token
+ self.current_token = None
+ else:
+ value = self._scan()
+ return value
+
+ def peek_token(self):
+ if self.current_token is None:
+ self.current_token = self._scan()
+ return self.current_token
+
+ def check_token(self, *choices):
+ if self.current_token is None:
+ self.current_token = self._scan()
+ if self.current_token is None:
+ return False
+ if not choices:
+ return True
+ token_class = self.current_token.__class__
+ for choice in choices:
+ if token_class is choice:
+ return True
+ return False
+
+ def raw_parse(self):
+ cdef yaml_event_t event
+ cdef int done
+ cdef int count
+ count = 0
+ done = 0
+ while done == 0:
+ if yaml_parser_parse(&self.parser, &event) == 0:
+ error = self._parser_error()
+ raise error
+ if event.type == YAML_NO_EVENT:
+ done = 1
+ else:
+ count = count+1
+ yaml_event_delete(&event)
+ return count
+
+ cdef object _parse(self):
+ cdef yaml_event_t event
+ if yaml_parser_parse(&self.parser, &event) == 0:
+ error = self._parser_error()
+ raise error
+ event_object = self._event_to_object(&event)
+ yaml_event_delete(&event)
+ return event_object
+
+ cdef object _event_to_object(self, yaml_event_t *event):
+ cdef yaml_tag_directive_t *tag_directive
+ start_mark = Mark(self.stream_name,
+ event.start_mark.index,
+ event.start_mark.line,
+ event.start_mark.column,
+ None, None)
+ end_mark = Mark(self.stream_name,
+ event.end_mark.index,
+ event.end_mark.line,
+ event.end_mark.column,
+ None, None)
+ if event.type == YAML_NO_EVENT:
+ return None
+ elif event.type == YAML_STREAM_START_EVENT:
+ encoding = None
+ if event.data.stream_start.encoding == YAML_UTF8_ENCODING:
+ if self.unicode_source == 0:
+ encoding = u"utf-8"
+ elif event.data.stream_start.encoding == YAML_UTF16LE_ENCODING:
+ encoding = u"utf-16-le"
+ elif event.data.stream_start.encoding == YAML_UTF16BE_ENCODING:
+ encoding = u"utf-16-be"
+ return StreamStartEvent(start_mark, end_mark, encoding)
+ elif event.type == YAML_STREAM_END_EVENT:
+ return StreamEndEvent(start_mark, end_mark)
+ elif event.type == YAML_DOCUMENT_START_EVENT:
+ explicit = False
+ if event.data.document_start.implicit == 0:
+ explicit = True
+ version = None
+ if event.data.document_start.version_directive != NULL:
+ version = (event.data.document_start.version_directive.major,
+ event.data.document_start.version_directive.minor)
+ tags = None
+ if event.data.document_start.tag_directives.start != NULL:
+ tags = {}
+ tag_directive = event.data.document_start.tag_directives.start
+ while tag_directive != event.data.document_start.tag_directives.end:
+ handle = PyUnicode_FromString(tag_directive.handle)
+ prefix = PyUnicode_FromString(tag_directive.prefix)
+ tags[handle] = prefix
+ tag_directive = tag_directive+1
+ return DocumentStartEvent(start_mark, end_mark,
+ explicit, version, tags)
+ elif event.type == YAML_DOCUMENT_END_EVENT:
+ explicit = False
+ if event.data.document_end.implicit == 0:
+ explicit = True
+ return DocumentEndEvent(start_mark, end_mark, explicit)
+ elif event.type == YAML_ALIAS_EVENT:
+ anchor = PyUnicode_FromString(event.data.alias.anchor)
+ return AliasEvent(anchor, start_mark, end_mark)
+ elif event.type == YAML_SCALAR_EVENT:
+ anchor = None
+ if event.data.scalar.anchor != NULL:
+ anchor = PyUnicode_FromString(event.data.scalar.anchor)
+ tag = None
+ if event.data.scalar.tag != NULL:
+ tag = PyUnicode_FromString(event.data.scalar.tag)
+ value = PyUnicode_DecodeUTF8(event.data.scalar.value,
+ event.data.scalar.length, 'strict')
+ plain_implicit = False
+ if event.data.scalar.plain_implicit == 1:
+ plain_implicit = True
+ quoted_implicit = False
+ if event.data.scalar.quoted_implicit == 1:
+ quoted_implicit = True
+ style = None
+ if event.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
+ style = u''
+ elif event.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
+ style = u'\''
+ elif event.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
+ style = u'"'
+ elif event.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
+ style = u'|'
+ elif event.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
+ style = u'>'
+ return ScalarEvent(anchor, tag,
+ (plain_implicit, quoted_implicit),
+ value, start_mark, end_mark, style)
+ elif event.type == YAML_SEQUENCE_START_EVENT:
+ anchor = None
+ if event.data.sequence_start.anchor != NULL:
+ anchor = PyUnicode_FromString(event.data.sequence_start.anchor)
+ tag = None
+ if event.data.sequence_start.tag != NULL:
+ tag = PyUnicode_FromString(event.data.sequence_start.tag)
+ implicit = False
+ if event.data.sequence_start.implicit == 1:
+ implicit = True
+ flow_style = None
+ if event.data.sequence_start.style == YAML_FLOW_SEQUENCE_STYLE:
+ flow_style = True
+ elif event.data.sequence_start.style == YAML_BLOCK_SEQUENCE_STYLE:
+ flow_style = False
+ return SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style)
+ elif event.type == YAML_MAPPING_START_EVENT:
+ anchor = None
+ if event.data.mapping_start.anchor != NULL:
+ anchor = PyUnicode_FromString(event.data.mapping_start.anchor)
+ tag = None
+ if event.data.mapping_start.tag != NULL:
+ tag = PyUnicode_FromString(event.data.mapping_start.tag)
+ implicit = False
+ if event.data.mapping_start.implicit == 1:
+ implicit = True
+ flow_style = None
+ if event.data.mapping_start.style == YAML_FLOW_MAPPING_STYLE:
+ flow_style = True
+ elif event.data.mapping_start.style == YAML_BLOCK_MAPPING_STYLE:
+ flow_style = False
+ return MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style)
+ elif event.type == YAML_SEQUENCE_END_EVENT:
+ return SequenceEndEvent(start_mark, end_mark)
+ elif event.type == YAML_MAPPING_END_EVENT:
+ return MappingEndEvent(start_mark, end_mark)
+ else:
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("unknown event type")
+ else:
+ raise ValueError(u"unknown event type")
+
+ def get_event(self):
+ if self.current_event is not None:
+ value = self.current_event
+ self.current_event = None
+ else:
+ value = self._parse()
+ return value
+
+ def peek_event(self):
+ if self.current_event is None:
+ self.current_event = self._parse()
+ return self.current_event
+
+ def check_event(self, *choices):
+ if self.current_event is None:
+ self.current_event = self._parse()
+ if self.current_event is None:
+ return False
+ if not choices:
+ return True
+ event_class = self.current_event.__class__
+ for choice in choices:
+ if event_class is choice:
+ return True
+ return False
+
+ def check_node(self):
+ self._parse_next_event()
+ if self.parsed_event.type == YAML_STREAM_START_EVENT:
+ yaml_event_delete(&self.parsed_event)
+ self._parse_next_event()
+ if self.parsed_event.type != YAML_STREAM_END_EVENT:
+ return True
+ return False
+
+ def get_node(self):
+ self._parse_next_event()
+ if self.parsed_event.type != YAML_STREAM_END_EVENT:
+ return self._compose_document()
+
+ def get_single_node(self):
+ self._parse_next_event()
+ yaml_event_delete(&self.parsed_event)
+ self._parse_next_event()
+ document = None
+ if self.parsed_event.type != YAML_STREAM_END_EVENT:
+ document = self._compose_document()
+ self._parse_next_event()
+ if self.parsed_event.type != YAML_STREAM_END_EVENT:
+ mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ if PY_MAJOR_VERSION < 3:
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document", mark)
+ else:
+ raise ComposerError(u"expected a single document in the stream",
+ document.start_mark, u"but found another document", mark)
+ return document
+
+ cdef object _compose_document(self):
+ yaml_event_delete(&self.parsed_event)
+ node = self._compose_node(None, None)
+ self._parse_next_event()
+ yaml_event_delete(&self.parsed_event)
+ self.anchors = {}
+ return node
+
+ cdef object _compose_node(self, object parent, object index):
+ self._parse_next_event()
+ if self.parsed_event.type == YAML_ALIAS_EVENT:
+ anchor = PyUnicode_FromString(self.parsed_event.data.alias.anchor)
+ if anchor not in self.anchors:
+ mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ if PY_MAJOR_VERSION < 3:
+ raise ComposerError(None, None, "found undefined alias", mark)
+ else:
+ raise ComposerError(None, None, u"found undefined alias", mark)
+ yaml_event_delete(&self.parsed_event)
+ return self.anchors[anchor]
+ anchor = None
+ if self.parsed_event.type == YAML_SCALAR_EVENT \
+ and self.parsed_event.data.scalar.anchor != NULL:
+ anchor = PyUnicode_FromString(self.parsed_event.data.scalar.anchor)
+ elif self.parsed_event.type == YAML_SEQUENCE_START_EVENT \
+ and self.parsed_event.data.sequence_start.anchor != NULL:
+ anchor = PyUnicode_FromString(self.parsed_event.data.sequence_start.anchor)
+ elif self.parsed_event.type == YAML_MAPPING_START_EVENT \
+ and self.parsed_event.data.mapping_start.anchor != NULL:
+ anchor = PyUnicode_FromString(self.parsed_event.data.mapping_start.anchor)
+ if anchor is not None:
+ if anchor in self.anchors:
+ mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ if PY_MAJOR_VERSION < 3:
+ raise ComposerError("found duplicate anchor; first occurrence",
+ self.anchors[anchor].start_mark, "second occurrence", mark)
+ else:
+ raise ComposerError(u"found duplicate anchor; first occurrence",
+ self.anchors[anchor].start_mark, u"second occurrence", mark)
+ self.descend_resolver(parent, index)
+ if self.parsed_event.type == YAML_SCALAR_EVENT:
+ node = self._compose_scalar_node(anchor)
+ elif self.parsed_event.type == YAML_SEQUENCE_START_EVENT:
+ node = self._compose_sequence_node(anchor)
+ elif self.parsed_event.type == YAML_MAPPING_START_EVENT:
+ node = self._compose_mapping_node(anchor)
+ self.ascend_resolver()
+ return node
+
+ cdef _compose_scalar_node(self, object anchor):
+ start_mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ end_mark = Mark(self.stream_name,
+ self.parsed_event.end_mark.index,
+ self.parsed_event.end_mark.line,
+ self.parsed_event.end_mark.column,
+ None, None)
+ value = PyUnicode_DecodeUTF8(self.parsed_event.data.scalar.value,
+ self.parsed_event.data.scalar.length, 'strict')
+ plain_implicit = False
+ if self.parsed_event.data.scalar.plain_implicit == 1:
+ plain_implicit = True
+ quoted_implicit = False
+ if self.parsed_event.data.scalar.quoted_implicit == 1:
+ quoted_implicit = True
+ if self.parsed_event.data.scalar.tag == NULL \
+ or (self.parsed_event.data.scalar.tag[0] == c'!'
+ and self.parsed_event.data.scalar.tag[1] == c'\0'):
+ tag = self.resolve(ScalarNode, value, (plain_implicit, quoted_implicit))
+ else:
+ tag = PyUnicode_FromString(self.parsed_event.data.scalar.tag)
+ style = None
+ if self.parsed_event.data.scalar.style == YAML_PLAIN_SCALAR_STYLE:
+ style = u''
+ elif self.parsed_event.data.scalar.style == YAML_SINGLE_QUOTED_SCALAR_STYLE:
+ style = u'\''
+ elif self.parsed_event.data.scalar.style == YAML_DOUBLE_QUOTED_SCALAR_STYLE:
+ style = u'"'
+ elif self.parsed_event.data.scalar.style == YAML_LITERAL_SCALAR_STYLE:
+ style = u'|'
+ elif self.parsed_event.data.scalar.style == YAML_FOLDED_SCALAR_STYLE:
+ style = u'>'
+ node = ScalarNode(tag, value, start_mark, end_mark, style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ yaml_event_delete(&self.parsed_event)
+ return node
+
+ cdef _compose_sequence_node(self, object anchor):
+ cdef int index
+ start_mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ implicit = False
+ if self.parsed_event.data.sequence_start.implicit == 1:
+ implicit = True
+ if self.parsed_event.data.sequence_start.tag == NULL \
+ or (self.parsed_event.data.sequence_start.tag[0] == c'!'
+ and self.parsed_event.data.sequence_start.tag[1] == c'\0'):
+ tag = self.resolve(SequenceNode, None, implicit)
+ else:
+ tag = PyUnicode_FromString(self.parsed_event.data.sequence_start.tag)
+ flow_style = None
+ if self.parsed_event.data.sequence_start.style == YAML_FLOW_SEQUENCE_STYLE:
+ flow_style = True
+ elif self.parsed_event.data.sequence_start.style == YAML_BLOCK_SEQUENCE_STYLE:
+ flow_style = False
+ value = []
+ node = SequenceNode(tag, value, start_mark, None, flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ yaml_event_delete(&self.parsed_event)
+ index = 0
+ self._parse_next_event()
+ while self.parsed_event.type != YAML_SEQUENCE_END_EVENT:
+ value.append(self._compose_node(node, index))
+ index = index+1
+ self._parse_next_event()
+ node.end_mark = Mark(self.stream_name,
+ self.parsed_event.end_mark.index,
+ self.parsed_event.end_mark.line,
+ self.parsed_event.end_mark.column,
+ None, None)
+ yaml_event_delete(&self.parsed_event)
+ return node
+
+ cdef _compose_mapping_node(self, object anchor):
+ start_mark = Mark(self.stream_name,
+ self.parsed_event.start_mark.index,
+ self.parsed_event.start_mark.line,
+ self.parsed_event.start_mark.column,
+ None, None)
+ implicit = False
+ if self.parsed_event.data.mapping_start.implicit == 1:
+ implicit = True
+ if self.parsed_event.data.mapping_start.tag == NULL \
+ or (self.parsed_event.data.mapping_start.tag[0] == c'!'
+ and self.parsed_event.data.mapping_start.tag[1] == c'\0'):
+ tag = self.resolve(MappingNode, None, implicit)
+ else:
+ tag = PyUnicode_FromString(self.parsed_event.data.mapping_start.tag)
+ flow_style = None
+ if self.parsed_event.data.mapping_start.style == YAML_FLOW_MAPPING_STYLE:
+ flow_style = True
+ elif self.parsed_event.data.mapping_start.style == YAML_BLOCK_MAPPING_STYLE:
+ flow_style = False
+ value = []
+ node = MappingNode(tag, value, start_mark, None, flow_style)
+ if anchor is not None:
+ self.anchors[anchor] = node
+ yaml_event_delete(&self.parsed_event)
+ self._parse_next_event()
+ while self.parsed_event.type != YAML_MAPPING_END_EVENT:
+ item_key = self._compose_node(node, None)
+ item_value = self._compose_node(node, item_key)
+ value.append((item_key, item_value))
+ self._parse_next_event()
+ node.end_mark = Mark(self.stream_name,
+ self.parsed_event.end_mark.index,
+ self.parsed_event.end_mark.line,
+ self.parsed_event.end_mark.column,
+ None, None)
+ yaml_event_delete(&self.parsed_event)
+ return node
+
+ cdef int _parse_next_event(self) except 0:
+ if self.parsed_event.type == YAML_NO_EVENT:
+ if yaml_parser_parse(&self.parser, &self.parsed_event) == 0:
+ error = self._parser_error()
+ raise error
+ return 1
+
+cdef int input_handler(void *data, char *buffer, size_t size, size_t *read) except 0:
+ cdef CParser parser
+ parser = <CParser>data
+ if parser.stream_cache is None:
+ value = parser.stream.read(size)
+ if PyUnicode_CheckExact(value) != 0:
+ value = PyUnicode_AsUTF8String(value)
+ parser.unicode_source = 1
+ if PyString_CheckExact(value) == 0:
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("a string value is expected")
+ else:
+ raise TypeError(u"a string value is expected")
+ parser.stream_cache = value
+ parser.stream_cache_pos = 0
+ parser.stream_cache_len = PyString_GET_SIZE(value)
+ if (parser.stream_cache_len - parser.stream_cache_pos) < size:
+ size = parser.stream_cache_len - parser.stream_cache_pos
+ if size > 0:
+ memcpy(buffer, PyString_AS_STRING(parser.stream_cache)
+ + parser.stream_cache_pos, size)
+ read[0] = size
+ parser.stream_cache_pos += size
+ if parser.stream_cache_pos == parser.stream_cache_len:
+ parser.stream_cache = None
+ return 1
+
+cdef class CEmitter:
+
+ cdef yaml_emitter_t emitter
+
+ cdef object stream
+
+ cdef int document_start_implicit
+ cdef int document_end_implicit
+ cdef object use_version
+ cdef object use_tags
+
+ cdef object serialized_nodes
+ cdef object anchors
+ cdef int last_alias_id
+ cdef int closed
+ cdef int dump_unicode
+ cdef object use_encoding
+
+ def __init__(self, stream, canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None, encoding=None,
+ explicit_start=None, explicit_end=None, version=None, tags=None):
+ if yaml_emitter_initialize(&self.emitter) == 0:
+ raise MemoryError
+ self.stream = stream
+ self.dump_unicode = 0
+ if PY_MAJOR_VERSION < 3:
+ if getattr3(stream, 'encoding', None):
+ self.dump_unicode = 1
+ else:
+ if hasattr(stream, u'encoding'):
+ self.dump_unicode = 1
+ self.use_encoding = encoding
+ yaml_emitter_set_output(&self.emitter, output_handler, <void *>self)
+ if canonical:
+ yaml_emitter_set_canonical(&self.emitter, 1)
+ if indent is not None:
+ yaml_emitter_set_indent(&self.emitter, indent)
+ if width is not None:
+ yaml_emitter_set_width(&self.emitter, width)
+ if allow_unicode:
+ yaml_emitter_set_unicode(&self.emitter, 1)
+ if line_break is not None:
+ if line_break == '\r':
+ yaml_emitter_set_break(&self.emitter, YAML_CR_BREAK)
+ elif line_break == '\n':
+ yaml_emitter_set_break(&self.emitter, YAML_LN_BREAK)
+ elif line_break == '\r\n':
+ yaml_emitter_set_break(&self.emitter, YAML_CRLN_BREAK)
+ self.document_start_implicit = 1
+ if explicit_start:
+ self.document_start_implicit = 0
+ self.document_end_implicit = 1
+ if explicit_end:
+ self.document_end_implicit = 0
+ self.use_version = version
+ self.use_tags = tags
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_alias_id = 0
+ self.closed = -1
+
+ def __dealloc__(self):
+ yaml_emitter_delete(&self.emitter)
+
+ def dispose(self):
+ pass
+
+ cdef object _emitter_error(self):
+ if self.emitter.error == YAML_MEMORY_ERROR:
+ return MemoryError
+ elif self.emitter.error == YAML_EMITTER_ERROR:
+ if PY_MAJOR_VERSION < 3:
+ problem = self.emitter.problem
+ else:
+ problem = PyUnicode_FromString(self.emitter.problem)
+ return EmitterError(problem)
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("no emitter error")
+ else:
+ raise ValueError(u"no emitter error")
+
+ cdef int _object_to_event(self, object event_object, yaml_event_t *event) except 0:
+ cdef yaml_encoding_t encoding
+ cdef yaml_version_directive_t version_directive_value
+ cdef yaml_version_directive_t *version_directive
+ cdef yaml_tag_directive_t tag_directives_value[128]
+ cdef yaml_tag_directive_t *tag_directives_start
+ cdef yaml_tag_directive_t *tag_directives_end
+ cdef int implicit
+ cdef int plain_implicit
+ cdef int quoted_implicit
+ cdef char *anchor
+ cdef char *tag
+ cdef char *value
+ cdef int length
+ cdef yaml_scalar_style_t scalar_style
+ cdef yaml_sequence_style_t sequence_style
+ cdef yaml_mapping_style_t mapping_style
+ event_class = event_object.__class__
+ if event_class is StreamStartEvent:
+ encoding = YAML_UTF8_ENCODING
+ if event_object.encoding == u'utf-16-le' or event_object.encoding == 'utf-16-le':
+ encoding = YAML_UTF16LE_ENCODING
+ elif event_object.encoding == u'utf-16-be' or event_object.encoding == 'utf-16-be':
+ encoding = YAML_UTF16BE_ENCODING
+ if event_object.encoding is None:
+ self.dump_unicode = 1
+ if self.dump_unicode == 1:
+ encoding = YAML_UTF8_ENCODING
+ yaml_stream_start_event_initialize(event, encoding)
+ elif event_class is StreamEndEvent:
+ yaml_stream_end_event_initialize(event)
+ elif event_class is DocumentStartEvent:
+ version_directive = NULL
+ if event_object.version:
+ version_directive_value.major = event_object.version[0]
+ version_directive_value.minor = event_object.version[1]
+ version_directive = &version_directive_value
+ tag_directives_start = NULL
+ tag_directives_end = NULL
+ if event_object.tags:
+ if len(event_object.tags) > 128:
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("too many tags")
+ else:
+ raise ValueError(u"too many tags")
+ tag_directives_start = tag_directives_value
+ tag_directives_end = tag_directives_value
+ cache = []
+ for handle in event_object.tags:
+ prefix = event_object.tags[handle]
+ if PyUnicode_CheckExact(handle):
+ handle = PyUnicode_AsUTF8String(handle)
+ cache.append(handle)
+ if not PyString_CheckExact(handle):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag handle must be a string")
+ else:
+ raise TypeError(u"tag handle must be a string")
+ tag_directives_end.handle = PyString_AS_STRING(handle)
+ if PyUnicode_CheckExact(prefix):
+ prefix = PyUnicode_AsUTF8String(prefix)
+ cache.append(prefix)
+ if not PyString_CheckExact(prefix):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag prefix must be a string")
+ else:
+ raise TypeError(u"tag prefix must be a string")
+ tag_directives_end.prefix = PyString_AS_STRING(prefix)
+ tag_directives_end = tag_directives_end+1
+ implicit = 1
+ if event_object.explicit:
+ implicit = 0
+ if yaml_document_start_event_initialize(event, version_directive,
+ tag_directives_start, tag_directives_end, implicit) == 0:
+ raise MemoryError
+ elif event_class is DocumentEndEvent:
+ implicit = 1
+ if event_object.explicit:
+ implicit = 0
+ yaml_document_end_event_initialize(event, implicit)
+ elif event_class is AliasEvent:
+ anchor = NULL
+ anchor_object = event_object.anchor
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ if yaml_alias_event_initialize(event, anchor) == 0:
+ raise MemoryError
+ elif event_class is ScalarEvent:
+ anchor = NULL
+ anchor_object = event_object.anchor
+ if anchor_object is not None:
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ tag = NULL
+ tag_object = event_object.tag
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ value_object = event_object.value
+ if PyUnicode_CheckExact(value_object):
+ value_object = PyUnicode_AsUTF8String(value_object)
+ if not PyString_CheckExact(value_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("value must be a string")
+ else:
+ raise TypeError(u"value must be a string")
+ value = PyString_AS_STRING(value_object)
+ length = PyString_GET_SIZE(value_object)
+ plain_implicit = 0
+ quoted_implicit = 0
+ if event_object.implicit is not None:
+ plain_implicit = event_object.implicit[0]
+ quoted_implicit = event_object.implicit[1]
+ style_object = event_object.style
+ scalar_style = YAML_PLAIN_SCALAR_STYLE
+ if style_object == "'" or style_object == u"'":
+ scalar_style = YAML_SINGLE_QUOTED_SCALAR_STYLE
+ elif style_object == "\"" or style_object == u"\"":
+ scalar_style = YAML_DOUBLE_QUOTED_SCALAR_STYLE
+ elif style_object == "|" or style_object == u"|":
+ scalar_style = YAML_LITERAL_SCALAR_STYLE
+ elif style_object == ">" or style_object == u">":
+ scalar_style = YAML_FOLDED_SCALAR_STYLE
+ if yaml_scalar_event_initialize(event, anchor, tag, value, length,
+ plain_implicit, quoted_implicit, scalar_style) == 0:
+ raise MemoryError
+ elif event_class is SequenceStartEvent:
+ anchor = NULL
+ anchor_object = event_object.anchor
+ if anchor_object is not None:
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ tag = NULL
+ tag_object = event_object.tag
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ implicit = 0
+ if event_object.implicit:
+ implicit = 1
+ sequence_style = YAML_BLOCK_SEQUENCE_STYLE
+ if event_object.flow_style:
+ sequence_style = YAML_FLOW_SEQUENCE_STYLE
+ if yaml_sequence_start_event_initialize(event, anchor, tag,
+ implicit, sequence_style) == 0:
+ raise MemoryError
+ elif event_class is MappingStartEvent:
+ anchor = NULL
+ anchor_object = event_object.anchor
+ if anchor_object is not None:
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ tag = NULL
+ tag_object = event_object.tag
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ implicit = 0
+ if event_object.implicit:
+ implicit = 1
+ mapping_style = YAML_BLOCK_MAPPING_STYLE
+ if event_object.flow_style:
+ mapping_style = YAML_FLOW_MAPPING_STYLE
+ if yaml_mapping_start_event_initialize(event, anchor, tag,
+ implicit, mapping_style) == 0:
+ raise MemoryError
+ elif event_class is SequenceEndEvent:
+ yaml_sequence_end_event_initialize(event)
+ elif event_class is MappingEndEvent:
+ yaml_mapping_end_event_initialize(event)
+ else:
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("invalid event %s" % event_object)
+ else:
+ raise TypeError(u"invalid event %s" % event_object)
+ return 1
+
+ def emit(self, event_object):
+ cdef yaml_event_t event
+ self._object_to_event(event_object, &event)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+
+ def open(self):
+ cdef yaml_event_t event
+ cdef yaml_encoding_t encoding
+ if self.closed == -1:
+ if self.use_encoding == u'utf-16-le' or self.use_encoding == 'utf-16-le':
+ encoding = YAML_UTF16LE_ENCODING
+ elif self.use_encoding == u'utf-16-be' or self.use_encoding == 'utf-16-be':
+ encoding = YAML_UTF16BE_ENCODING
+ else:
+ encoding = YAML_UTF8_ENCODING
+ if self.use_encoding is None:
+ self.dump_unicode = 1
+ if self.dump_unicode == 1:
+ encoding = YAML_UTF8_ENCODING
+ yaml_stream_start_event_initialize(&event, encoding)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self.closed = 0
+ elif self.closed == 1:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError(u"serializer is closed")
+ else:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is already opened")
+ else:
+ raise SerializerError(u"serializer is already opened")
+
+ def close(self):
+ cdef yaml_event_t event
+ if self.closed == -1:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is not opened")
+ else:
+ raise SerializerError(u"serializer is not opened")
+ elif self.closed == 0:
+ yaml_stream_end_event_initialize(&event)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self.closed = 1
+
+ def serialize(self, node):
+ cdef yaml_event_t event
+ cdef yaml_version_directive_t version_directive_value
+ cdef yaml_version_directive_t *version_directive
+ cdef yaml_tag_directive_t tag_directives_value[128]
+ cdef yaml_tag_directive_t *tag_directives_start
+ cdef yaml_tag_directive_t *tag_directives_end
+ if self.closed == -1:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is not opened")
+ else:
+ raise SerializerError(u"serializer is not opened")
+ elif self.closed == 1:
+ if PY_MAJOR_VERSION < 3:
+ raise SerializerError("serializer is closed")
+ else:
+ raise SerializerError(u"serializer is closed")
+ cache = []
+ version_directive = NULL
+ if self.use_version:
+ version_directive_value.major = self.use_version[0]
+ version_directive_value.minor = self.use_version[1]
+ version_directive = &version_directive_value
+ tag_directives_start = NULL
+ tag_directives_end = NULL
+ if self.use_tags:
+ if len(self.use_tags) > 128:
+ if PY_MAJOR_VERSION < 3:
+ raise ValueError("too many tags")
+ else:
+ raise ValueError(u"too many tags")
+ tag_directives_start = tag_directives_value
+ tag_directives_end = tag_directives_value
+ for handle in self.use_tags:
+ prefix = self.use_tags[handle]
+ if PyUnicode_CheckExact(handle):
+ handle = PyUnicode_AsUTF8String(handle)
+ cache.append(handle)
+ if not PyString_CheckExact(handle):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag handle must be a string")
+ else:
+ raise TypeError(u"tag handle must be a string")
+ tag_directives_end.handle = PyString_AS_STRING(handle)
+ if PyUnicode_CheckExact(prefix):
+ prefix = PyUnicode_AsUTF8String(prefix)
+ cache.append(prefix)
+ if not PyString_CheckExact(prefix):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag prefix must be a string")
+ else:
+ raise TypeError(u"tag prefix must be a string")
+ tag_directives_end.prefix = PyString_AS_STRING(prefix)
+ tag_directives_end = tag_directives_end+1
+ if yaml_document_start_event_initialize(&event, version_directive,
+ tag_directives_start, tag_directives_end,
+ self.document_start_implicit) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self._anchor_node(node)
+ self._serialize_node(node, None, None)
+ yaml_document_end_event_initialize(&event, self.document_end_implicit)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self.serialized_nodes = {}
+ self.anchors = {}
+ self.last_alias_id = 0
+
+ cdef int _anchor_node(self, object node) except 0:
+ if node in self.anchors:
+ if self.anchors[node] is None:
+ self.last_alias_id = self.last_alias_id+1
+ self.anchors[node] = u"id%03d" % self.last_alias_id
+ else:
+ self.anchors[node] = None
+ node_class = node.__class__
+ if node_class is SequenceNode:
+ for item in node.value:
+ self._anchor_node(item)
+ elif node_class is MappingNode:
+ for key, value in node.value:
+ self._anchor_node(key)
+ self._anchor_node(value)
+ return 1
+
+ cdef int _serialize_node(self, object node, object parent, object index) except 0:
+ cdef yaml_event_t event
+ cdef int implicit
+ cdef int plain_implicit
+ cdef int quoted_implicit
+ cdef char *anchor
+ cdef char *tag
+ cdef char *value
+ cdef int length
+ cdef int item_index
+ cdef yaml_scalar_style_t scalar_style
+ cdef yaml_sequence_style_t sequence_style
+ cdef yaml_mapping_style_t mapping_style
+ anchor_object = self.anchors[node]
+ anchor = NULL
+ if anchor_object is not None:
+ if PyUnicode_CheckExact(anchor_object):
+ anchor_object = PyUnicode_AsUTF8String(anchor_object)
+ if not PyString_CheckExact(anchor_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("anchor must be a string")
+ else:
+ raise TypeError(u"anchor must be a string")
+ anchor = PyString_AS_STRING(anchor_object)
+ if node in self.serialized_nodes:
+ if yaml_alias_event_initialize(&event, anchor) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ else:
+ node_class = node.__class__
+ self.serialized_nodes[node] = True
+ self.descend_resolver(parent, index)
+ if node_class is ScalarNode:
+ plain_implicit = 0
+ quoted_implicit = 0
+ tag_object = node.tag
+ if self.resolve(ScalarNode, node.value, (True, False)) == tag_object:
+ plain_implicit = 1
+ if self.resolve(ScalarNode, node.value, (False, True)) == tag_object:
+ quoted_implicit = 1
+ tag = NULL
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ value_object = node.value
+ if PyUnicode_CheckExact(value_object):
+ value_object = PyUnicode_AsUTF8String(value_object)
+ if not PyString_CheckExact(value_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("value must be a string")
+ else:
+ raise TypeError(u"value must be a string")
+ value = PyString_AS_STRING(value_object)
+ length = PyString_GET_SIZE(value_object)
+ style_object = node.style
+ scalar_style = YAML_PLAIN_SCALAR_STYLE
+ if style_object == "'" or style_object == u"'":
+ scalar_style = YAML_SINGLE_QUOTED_SCALAR_STYLE
+ elif style_object == "\"" or style_object == u"\"":
+ scalar_style = YAML_DOUBLE_QUOTED_SCALAR_STYLE
+ elif style_object == "|" or style_object == u"|":
+ scalar_style = YAML_LITERAL_SCALAR_STYLE
+ elif style_object == ">" or style_object == u">":
+ scalar_style = YAML_FOLDED_SCALAR_STYLE
+ if yaml_scalar_event_initialize(&event, anchor, tag, value, length,
+ plain_implicit, quoted_implicit, scalar_style) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ elif node_class is SequenceNode:
+ implicit = 0
+ tag_object = node.tag
+ if self.resolve(SequenceNode, node.value, True) == tag_object:
+ implicit = 1
+ tag = NULL
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ sequence_style = YAML_BLOCK_SEQUENCE_STYLE
+ if node.flow_style:
+ sequence_style = YAML_FLOW_SEQUENCE_STYLE
+ if yaml_sequence_start_event_initialize(&event, anchor, tag,
+ implicit, sequence_style) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ item_index = 0
+ for item in node.value:
+ self._serialize_node(item, node, item_index)
+ item_index = item_index+1
+ yaml_sequence_end_event_initialize(&event)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ elif node_class is MappingNode:
+ implicit = 0
+ tag_object = node.tag
+ if self.resolve(MappingNode, node.value, True) == tag_object:
+ implicit = 1
+ tag = NULL
+ if tag_object is not None:
+ if PyUnicode_CheckExact(tag_object):
+ tag_object = PyUnicode_AsUTF8String(tag_object)
+ if not PyString_CheckExact(tag_object):
+ if PY_MAJOR_VERSION < 3:
+ raise TypeError("tag must be a string")
+ else:
+ raise TypeError(u"tag must be a string")
+ tag = PyString_AS_STRING(tag_object)
+ mapping_style = YAML_BLOCK_MAPPING_STYLE
+ if node.flow_style:
+ mapping_style = YAML_FLOW_MAPPING_STYLE
+ if yaml_mapping_start_event_initialize(&event, anchor, tag,
+ implicit, mapping_style) == 0:
+ raise MemoryError
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ for item_key, item_value in node.value:
+ self._serialize_node(item_key, node, None)
+ self._serialize_node(item_value, node, item_key)
+ yaml_mapping_end_event_initialize(&event)
+ if yaml_emitter_emit(&self.emitter, &event) == 0:
+ error = self._emitter_error()
+ raise error
+ self.ascend_resolver()
+ return 1
+
+cdef int output_handler(void *data, char *buffer, size_t size) except 0:
+ cdef CEmitter emitter
+ emitter = <CEmitter>data
+ if emitter.dump_unicode == 0:
+ value = PyString_FromStringAndSize(buffer, size)
+ else:
+ value = PyUnicode_DecodeUTF8(buffer, size, 'strict')
+ emitter.stream.write(value)
+ return 1
+
diff --git a/third_party/python/_venv/wheels/pip-23.0.1-py3-none-any.whl b/third_party/python/_venv/wheels/pip-23.0.1-py3-none-any.whl
new file mode 100644
index 0000000000..a855dc40e8
--- /dev/null
+++ b/third_party/python/_venv/wheels/pip-23.0.1-py3-none-any.whl
Binary files differ
diff --git a/third_party/python/_venv/wheels/setuptools-51.2.0-py3-none-any.whl b/third_party/python/_venv/wheels/setuptools-51.2.0-py3-none-any.whl
new file mode 100644
index 0000000000..4cb44cf15c
--- /dev/null
+++ b/third_party/python/_venv/wheels/setuptools-51.2.0-py3-none-any.whl
Binary files differ
diff --git a/third_party/python/aiohttp/CHANGES.rst b/third_party/python/aiohttp/CHANGES.rst
new file mode 100644
index 0000000000..f064f4895c
--- /dev/null
+++ b/third_party/python/aiohttp/CHANGES.rst
@@ -0,0 +1,728 @@
+=========
+Changelog
+=========
+
+..
+ You should *NOT* be adding new change log entries to this file, this
+ file is managed by towncrier. You *may* edit previous change logs to
+ fix problems like typo corrections or such.
+ To add a new change log entry, please see
+ https://pip.pypa.io/en/latest/development/#adding-a-news-entry
+ we named the news folder "changes".
+
+ WARNING: Don't drop the next directive!
+
+.. towncrier release notes start
+
+3.7.4.post0 (2021-03-06)
+========================
+
+Misc
+----
+
+- Bumped upper bound of the ``chardet`` runtime dependency
+ to allow their v4.0 version stream.
+ `#5366 <https://github.com/aio-libs/aiohttp/issues/5366>`_
+
+
+----
+
+
+3.7.4 (2021-02-25)
+==================
+
+Bugfixes
+--------
+
+- **(SECURITY BUG)** Started preventing open redirects in the
+ ``aiohttp.web.normalize_path_middleware`` middleware. For
+ more details, see
+ https://github.com/aio-libs/aiohttp/security/advisories/GHSA-v6wp-4m6f-gcjg.
+
+ Thanks to `Beast Glatisant <https://github.com/g147>`__ for
+ finding the first instance of this issue and `Jelmer Vernooij
+ <https://jelmer.uk/>`__ for reporting and tracking it down
+ in aiohttp.
+ `#5497 <https://github.com/aio-libs/aiohttp/issues/5497>`_
+- Fix interpretation difference of the pure-Python and the Cython-based
+ HTTP parsers construct a ``yarl.URL`` object for HTTP request-target.
+
+ Before this fix, the Python parser would turn the URI's absolute-path
+ for ``//some-path`` into ``/`` while the Cython code preserved it as
+ ``//some-path``. Now, both do the latter.
+ `#5498 <https://github.com/aio-libs/aiohttp/issues/5498>`_
+
+
+----
+
+
+3.7.3 (2020-11-18)
+==================
+
+Features
+--------
+
+- Use Brotli instead of brotlipy
+ `#3803 <https://github.com/aio-libs/aiohttp/issues/3803>`_
+- Made exceptions pickleable. Also changed the repr of some exceptions.
+ `#4077 <https://github.com/aio-libs/aiohttp/issues/4077>`_
+
+
+Bugfixes
+--------
+
+- Raise a ClientResponseError instead of an AssertionError for a blank
+ HTTP Reason Phrase.
+ `#3532 <https://github.com/aio-libs/aiohttp/issues/3532>`_
+- Fix ``web_middlewares.normalize_path_middleware`` behavior for patch without slash.
+ `#3669 <https://github.com/aio-libs/aiohttp/issues/3669>`_
+- Fix overshadowing of overlapped sub-applications prefixes.
+ `#3701 <https://github.com/aio-libs/aiohttp/issues/3701>`_
+- Make `BaseConnector.close()` a coroutine and wait until the client closes all connections. Drop deprecated "with Connector():" syntax.
+ `#3736 <https://github.com/aio-libs/aiohttp/issues/3736>`_
+- Reset the ``sock_read`` timeout each time data is received for a ``aiohttp.client`` response.
+ `#3808 <https://github.com/aio-libs/aiohttp/issues/3808>`_
+- Fixed type annotation for add_view method of UrlDispatcher to accept any subclass of View
+ `#3880 <https://github.com/aio-libs/aiohttp/issues/3880>`_
+- Fixed querying the address families from DNS that the current host supports.
+ `#5156 <https://github.com/aio-libs/aiohttp/issues/5156>`_
+- Change return type of MultipartReader.__aiter__() and BodyPartReader.__aiter__() to AsyncIterator.
+ `#5163 <https://github.com/aio-libs/aiohttp/issues/5163>`_
+- Provide x86 Windows wheels.
+ `#5230 <https://github.com/aio-libs/aiohttp/issues/5230>`_
+
+
+Improved Documentation
+----------------------
+
+- Add documentation for ``aiohttp.web.FileResponse``.
+ `#3958 <https://github.com/aio-libs/aiohttp/issues/3958>`_
+- Removed deprecation warning in tracing example docs
+ `#3964 <https://github.com/aio-libs/aiohttp/issues/3964>`_
+- Fixed wrong "Usage" docstring of ``aiohttp.client.request``.
+ `#4603 <https://github.com/aio-libs/aiohttp/issues/4603>`_
+- Add aiohttp-pydantic to third party libraries
+ `#5228 <https://github.com/aio-libs/aiohttp/issues/5228>`_
+
+
+Misc
+----
+
+- `#4102 <https://github.com/aio-libs/aiohttp/issues/4102>`_
+
+
+----
+
+
+3.7.2 (2020-10-27)
+==================
+
+Bugfixes
+--------
+
+- Fixed static files handling for loops without ``.sendfile()`` support
+ `#5149 <https://github.com/aio-libs/aiohttp/issues/5149>`_
+
+
+----
+
+
+3.7.1 (2020-10-25)
+==================
+
+Bugfixes
+--------
+
+- Fixed a type error caused by the conditional import of `Protocol`.
+ `#5111 <https://github.com/aio-libs/aiohttp/issues/5111>`_
+- Server doesn't send Content-Length for 1xx or 204
+ `#4901 <https://github.com/aio-libs/aiohttp/issues/4901>`_
+- Fix run_app typing
+ `#4957 <https://github.com/aio-libs/aiohttp/issues/4957>`_
+- Always require ``typing_extensions`` library.
+ `#5107 <https://github.com/aio-libs/aiohttp/issues/5107>`_
+- Fix a variable-shadowing bug causing `ThreadedResolver.resolve` to
+ return the resolved IP as the ``hostname`` in each record, which prevented
+ validation of HTTPS connections.
+ `#5110 <https://github.com/aio-libs/aiohttp/issues/5110>`_
+- Added annotations to all public attributes.
+ `#5115 <https://github.com/aio-libs/aiohttp/issues/5115>`_
+- Fix flaky test_when_timeout_smaller_second
+ `#5116 <https://github.com/aio-libs/aiohttp/issues/5116>`_
+- Ensure sending a zero byte file does not throw an exception
+ `#5124 <https://github.com/aio-libs/aiohttp/issues/5124>`_
+- Fix a bug in ``web.run_app()`` about Python version checking on Windows
+ `#5127 <https://github.com/aio-libs/aiohttp/issues/5127>`_
+
+
+----
+
+
+3.7.0 (2020-10-24)
+==================
+
+Features
+--------
+
+- Response headers are now prepared prior to running ``on_response_prepare`` hooks, directly before headers are sent to the client.
+ `#1958 <https://github.com/aio-libs/aiohttp/issues/1958>`_
+- Add a ``quote_cookie`` option to ``CookieJar``, a way to skip quotation wrapping of cookies containing special characters.
+ `#2571 <https://github.com/aio-libs/aiohttp/issues/2571>`_
+- Call ``AccessLogger.log`` with the current exception available from ``sys.exc_info()``.
+ `#3557 <https://github.com/aio-libs/aiohttp/issues/3557>`_
+- `web.UrlDispatcher.add_routes` and `web.Application.add_routes` return a list
+ of registered `AbstractRoute` instances. `AbstractRouteDef.register` (and all
+ subclasses) return a list of registered resources registered resource.
+ `#3866 <https://github.com/aio-libs/aiohttp/issues/3866>`_
+- Added properties of default ClientSession params to ClientSession class so it is available for introspection
+ `#3882 <https://github.com/aio-libs/aiohttp/issues/3882>`_
+- Don't cancel web handler on peer disconnection, raise `OSError` on reading/writing instead.
+ `#4080 <https://github.com/aio-libs/aiohttp/issues/4080>`_
+- Implement BaseRequest.get_extra_info() to access a protocol transports' extra info.
+ `#4189 <https://github.com/aio-libs/aiohttp/issues/4189>`_
+- Added `ClientSession.timeout` property.
+ `#4191 <https://github.com/aio-libs/aiohttp/issues/4191>`_
+- allow use of SameSite in cookies.
+ `#4224 <https://github.com/aio-libs/aiohttp/issues/4224>`_
+- Use ``loop.sendfile()`` instead of custom implementation if available.
+ `#4269 <https://github.com/aio-libs/aiohttp/issues/4269>`_
+- Apply SO_REUSEADDR to test server's socket.
+ `#4393 <https://github.com/aio-libs/aiohttp/issues/4393>`_
+- Use .raw_host instead of slower .host in client API
+ `#4402 <https://github.com/aio-libs/aiohttp/issues/4402>`_
+- Allow configuring the buffer size of input stream by passing ``read_bufsize`` argument.
+ `#4453 <https://github.com/aio-libs/aiohttp/issues/4453>`_
+- Pass tests on Python 3.8 for Windows.
+ `#4513 <https://github.com/aio-libs/aiohttp/issues/4513>`_
+- Add `method` and `url` attributes to `TraceRequestChunkSentParams` and `TraceResponseChunkReceivedParams`.
+ `#4674 <https://github.com/aio-libs/aiohttp/issues/4674>`_
+- Add ClientResponse.ok property for checking status code under 400.
+ `#4711 <https://github.com/aio-libs/aiohttp/issues/4711>`_
+- Don't ceil timeouts that are smaller than 5 seconds.
+ `#4850 <https://github.com/aio-libs/aiohttp/issues/4850>`_
+- TCPSite now listens by default on all interfaces instead of just IPv4 when `None` is passed in as the host.
+ `#4894 <https://github.com/aio-libs/aiohttp/issues/4894>`_
+- Bump ``http_parser`` to 2.9.4
+ `#5070 <https://github.com/aio-libs/aiohttp/issues/5070>`_
+
+
+Bugfixes
+--------
+
+- Fix keepalive connections not being closed in time
+ `#3296 <https://github.com/aio-libs/aiohttp/issues/3296>`_
+- Fix failed websocket handshake leaving connection hanging.
+ `#3380 <https://github.com/aio-libs/aiohttp/issues/3380>`_
+- Fix tasks cancellation order on exit. The run_app task needs to be cancelled first for cleanup hooks to run with all tasks intact.
+ `#3805 <https://github.com/aio-libs/aiohttp/issues/3805>`_
+- Don't start heartbeat until _writer is set
+ `#4062 <https://github.com/aio-libs/aiohttp/issues/4062>`_
+- Fix handling of multipart file uploads without a content type.
+ `#4089 <https://github.com/aio-libs/aiohttp/issues/4089>`_
+- Preserve view handler function attributes across middlewares
+ `#4174 <https://github.com/aio-libs/aiohttp/issues/4174>`_
+- Fix the string representation of ``ServerDisconnectedError``.
+ `#4175 <https://github.com/aio-libs/aiohttp/issues/4175>`_
+- Raising RuntimeError when trying to get encoding from not read body
+ `#4214 <https://github.com/aio-libs/aiohttp/issues/4214>`_
+- Remove warning messages from noop.
+ `#4282 <https://github.com/aio-libs/aiohttp/issues/4282>`_
+- Raise ClientPayloadError if FormData re-processed.
+ `#4345 <https://github.com/aio-libs/aiohttp/issues/4345>`_
+- Fix a warning about unfinished task in ``web_protocol.py``
+ `#4408 <https://github.com/aio-libs/aiohttp/issues/4408>`_
+- Fixed 'deflate' compression. According to RFC 2616 now.
+ `#4506 <https://github.com/aio-libs/aiohttp/issues/4506>`_
+- Fixed OverflowError on platforms with 32-bit time_t
+ `#4515 <https://github.com/aio-libs/aiohttp/issues/4515>`_
+- Fixed request.body_exists returns wrong value for methods without body.
+ `#4528 <https://github.com/aio-libs/aiohttp/issues/4528>`_
+- Fix connecting to link-local IPv6 addresses.
+ `#4554 <https://github.com/aio-libs/aiohttp/issues/4554>`_
+- Fix a problem with connection waiters that are never awaited.
+ `#4562 <https://github.com/aio-libs/aiohttp/issues/4562>`_
+- Always make sure transport is not closing before reuse a connection.
+
+ Reuse a protocol based on keepalive in headers is unreliable.
+ For example, uWSGI will not support keepalive even it serves a
+ HTTP 1.1 request, except explicitly configure uWSGI with a
+ ``--http-keepalive`` option.
+
+ Servers designed like uWSGI could cause aiohttp intermittently
+ raise a ConnectionResetException when the protocol poll runs
+ out and some protocol is reused.
+ `#4587 <https://github.com/aio-libs/aiohttp/issues/4587>`_
+- Handle the last CRLF correctly even if it is received via separate TCP segment.
+ `#4630 <https://github.com/aio-libs/aiohttp/issues/4630>`_
+- Fix the register_resource function to validate route name before splitting it so that route name can include python keywords.
+ `#4691 <https://github.com/aio-libs/aiohttp/issues/4691>`_
+- Improve typing annotations for ``web.Request``, ``aiohttp.ClientResponse`` and
+ ``multipart`` module.
+ `#4736 <https://github.com/aio-libs/aiohttp/issues/4736>`_
+- Fix resolver task is not awaited when connector is cancelled
+ `#4795 <https://github.com/aio-libs/aiohttp/issues/4795>`_
+- Fix a bug "Aiohttp doesn't return any error on invalid request methods"
+ `#4798 <https://github.com/aio-libs/aiohttp/issues/4798>`_
+- Fix HEAD requests for static content.
+ `#4809 <https://github.com/aio-libs/aiohttp/issues/4809>`_
+- Fix incorrect size calculation for memoryview
+ `#4890 <https://github.com/aio-libs/aiohttp/issues/4890>`_
+- Add HTTPMove to _all__.
+ `#4897 <https://github.com/aio-libs/aiohttp/issues/4897>`_
+- Fixed the type annotations in the ``tracing`` module.
+ `#4912 <https://github.com/aio-libs/aiohttp/issues/4912>`_
+- Fix typing for multipart ``__aiter__``.
+ `#4931 <https://github.com/aio-libs/aiohttp/issues/4931>`_
+- Fix for race condition on connections in BaseConnector that leads to exceeding the connection limit.
+ `#4936 <https://github.com/aio-libs/aiohttp/issues/4936>`_
+- Add forced UTF-8 encoding for ``application/rdap+json`` responses.
+ `#4938 <https://github.com/aio-libs/aiohttp/issues/4938>`_
+- Fix inconsistency between Python and C http request parsers in parsing pct-encoded URL.
+ `#4972 <https://github.com/aio-libs/aiohttp/issues/4972>`_
+- Fix connection closing issue in HEAD request.
+ `#5012 <https://github.com/aio-libs/aiohttp/issues/5012>`_
+- Fix type hint on BaseRunner.addresses (from ``List[str]`` to ``List[Any]``)
+ `#5086 <https://github.com/aio-libs/aiohttp/issues/5086>`_
+- Make `web.run_app()` more responsive to Ctrl+C on Windows for Python < 3.8. It slightly
+ increases CPU load as a side effect.
+ `#5098 <https://github.com/aio-libs/aiohttp/issues/5098>`_
+
+
+Improved Documentation
+----------------------
+
+- Fix example code in client quick-start
+ `#3376 <https://github.com/aio-libs/aiohttp/issues/3376>`_
+- Updated the docs so there is no contradiction in ``ttl_dns_cache`` default value
+ `#3512 <https://github.com/aio-libs/aiohttp/issues/3512>`_
+- Add 'Deploy with SSL' to docs.
+ `#4201 <https://github.com/aio-libs/aiohttp/issues/4201>`_
+- Change typing of the secure argument on StreamResponse.set_cookie from ``Optional[str]`` to ``Optional[bool]``
+ `#4204 <https://github.com/aio-libs/aiohttp/issues/4204>`_
+- Changes ``ttl_dns_cache`` type from int to Optional[int].
+ `#4270 <https://github.com/aio-libs/aiohttp/issues/4270>`_
+- Simplify README hello word example and add a documentation page for people coming from requests.
+ `#4272 <https://github.com/aio-libs/aiohttp/issues/4272>`_
+- Improve some code examples in the documentation involving websockets and starting a simple HTTP site with an AppRunner.
+ `#4285 <https://github.com/aio-libs/aiohttp/issues/4285>`_
+- Fix typo in code example in Multipart docs
+ `#4312 <https://github.com/aio-libs/aiohttp/issues/4312>`_
+- Fix code example in Multipart section.
+ `#4314 <https://github.com/aio-libs/aiohttp/issues/4314>`_
+- Update contributing guide so new contributors read the most recent version of that guide. Update command used to create test coverage reporting.
+ `#4810 <https://github.com/aio-libs/aiohttp/issues/4810>`_
+- Spelling: Change "canonize" to "canonicalize".
+ `#4986 <https://github.com/aio-libs/aiohttp/issues/4986>`_
+- Add ``aiohttp-sse-client`` library to third party usage list.
+ `#5084 <https://github.com/aio-libs/aiohttp/issues/5084>`_
+
+
+Misc
+----
+
+- `#2856 <https://github.com/aio-libs/aiohttp/issues/2856>`_, `#4218 <https://github.com/aio-libs/aiohttp/issues/4218>`_, `#4250 <https://github.com/aio-libs/aiohttp/issues/4250>`_
+
+
+----
+
+
+3.6.3 (2020-10-12)
+==================
+
+Bugfixes
+--------
+
+- Pin yarl to ``<1.6.0`` to avoid buggy behavior that will be fixed by the next aiohttp
+ release.
+
+3.6.2 (2019-10-09)
+==================
+
+Features
+--------
+
+- Made exceptions pickleable. Also changed the repr of some exceptions.
+ `#4077 <https://github.com/aio-libs/aiohttp/issues/4077>`_
+- Use ``Iterable`` type hint instead of ``Sequence`` for ``Application`` *middleware*
+ parameter. `#4125 <https://github.com/aio-libs/aiohttp/issues/4125>`_
+
+
+Bugfixes
+--------
+
+- Reset the ``sock_read`` timeout each time data is received for a
+ ``aiohttp.ClientResponse``. `#3808
+ <https://github.com/aio-libs/aiohttp/issues/3808>`_
+- Fix handling of expired cookies so they are not stored in CookieJar.
+ `#4063 <https://github.com/aio-libs/aiohttp/issues/4063>`_
+- Fix misleading message in the string representation of ``ClientConnectorError``;
+ ``self.ssl == None`` means default SSL context, not SSL disabled `#4097
+ <https://github.com/aio-libs/aiohttp/issues/4097>`_
+- Don't clobber HTTP status when using FileResponse.
+ `#4106 <https://github.com/aio-libs/aiohttp/issues/4106>`_
+
+
+Improved Documentation
+----------------------
+
+- Added minimal required logging configuration to logging documentation.
+ `#2469 <https://github.com/aio-libs/aiohttp/issues/2469>`_
+- Update docs to reflect proxy support.
+ `#4100 <https://github.com/aio-libs/aiohttp/issues/4100>`_
+- Fix typo in code example in testing docs.
+ `#4108 <https://github.com/aio-libs/aiohttp/issues/4108>`_
+
+
+Misc
+----
+
+- `#4102 <https://github.com/aio-libs/aiohttp/issues/4102>`_
+
+
+----
+
+
+3.6.1 (2019-09-19)
+==================
+
+Features
+--------
+
+- Compatibility with Python 3.8.
+ `#4056 <https://github.com/aio-libs/aiohttp/issues/4056>`_
+
+
+Bugfixes
+--------
+
+- correct some exception string format
+ `#4068 <https://github.com/aio-libs/aiohttp/issues/4068>`_
+- Emit a warning when ``ssl.OP_NO_COMPRESSION`` is
+ unavailable because the runtime is built against
+ an outdated OpenSSL.
+ `#4052 <https://github.com/aio-libs/aiohttp/issues/4052>`_
+- Update multidict requirement to >= 4.5
+ `#4057 <https://github.com/aio-libs/aiohttp/issues/4057>`_
+
+
+Improved Documentation
+----------------------
+
+- Provide pytest-aiohttp namespace for pytest fixtures in docs.
+ `#3723 <https://github.com/aio-libs/aiohttp/issues/3723>`_
+
+
+----
+
+
+3.6.0 (2019-09-06)
+==================
+
+Features
+--------
+
+- Add support for Named Pipes (Site and Connector) under Windows. This feature requires
+ Proactor event loop to work. `#3629
+ <https://github.com/aio-libs/aiohttp/issues/3629>`_
+- Removed ``Transfer-Encoding: chunked`` header from websocket responses to be
+ compatible with more http proxy servers. `#3798
+ <https://github.com/aio-libs/aiohttp/issues/3798>`_
+- Accept non-GET request for starting websocket handshake on server side.
+ `#3980 <https://github.com/aio-libs/aiohttp/issues/3980>`_
+
+
+Bugfixes
+--------
+
+- Raise a ClientResponseError instead of an AssertionError for a blank
+ HTTP Reason Phrase.
+ `#3532 <https://github.com/aio-libs/aiohttp/issues/3532>`_
+- Fix an issue where cookies would sometimes not be set during a redirect.
+ `#3576 <https://github.com/aio-libs/aiohttp/issues/3576>`_
+- Change normalize_path_middleware to use 308 redirect instead of 301.
+
+ This behavior should prevent clients from being unable to use PUT/POST
+ methods on endpoints that are redirected because of a trailing slash.
+ `#3579 <https://github.com/aio-libs/aiohttp/issues/3579>`_
+- Drop the processed task from ``all_tasks()`` list early. It prevents logging about a
+ task with unhandled exception when the server is used in conjunction with
+ ``asyncio.run()``. `#3587 <https://github.com/aio-libs/aiohttp/issues/3587>`_
+- ``Signal`` type annotation changed from ``Signal[Callable[['TraceConfig'],
+ Awaitable[None]]]`` to ``Signal[Callable[ClientSession, SimpleNamespace, ...]``.
+ `#3595 <https://github.com/aio-libs/aiohttp/issues/3595>`_
+- Use sanitized URL as Location header in redirects
+ `#3614 <https://github.com/aio-libs/aiohttp/issues/3614>`_
+- Improve typing annotations for multipart.py along with changes required
+ by mypy in files that references multipart.py.
+ `#3621 <https://github.com/aio-libs/aiohttp/issues/3621>`_
+- Close session created inside ``aiohttp.request`` when unhandled exception occurs
+ `#3628 <https://github.com/aio-libs/aiohttp/issues/3628>`_
+- Cleanup per-chunk data in generic data read. Memory leak fixed.
+ `#3631 <https://github.com/aio-libs/aiohttp/issues/3631>`_
+- Use correct type for add_view and family
+ `#3633 <https://github.com/aio-libs/aiohttp/issues/3633>`_
+- Fix _keepalive field in __slots__ of ``RequestHandler``.
+ `#3644 <https://github.com/aio-libs/aiohttp/issues/3644>`_
+- Properly handle ConnectionResetError, to silence the "Cannot write to closing
+ transport" exception when clients disconnect uncleanly.
+ `#3648 <https://github.com/aio-libs/aiohttp/issues/3648>`_
+- Suppress pytest warnings due to ``test_utils`` classes
+ `#3660 <https://github.com/aio-libs/aiohttp/issues/3660>`_
+- Fix overshadowing of overlapped sub-application prefixes.
+ `#3701 <https://github.com/aio-libs/aiohttp/issues/3701>`_
+- Fixed return type annotation for WSMessage.json()
+ `#3720 <https://github.com/aio-libs/aiohttp/issues/3720>`_
+- Properly expose TooManyRedirects publicly as documented.
+ `#3818 <https://github.com/aio-libs/aiohttp/issues/3818>`_
+- Fix missing brackets for IPv6 in proxy CONNECT request
+ `#3841 <https://github.com/aio-libs/aiohttp/issues/3841>`_
+- Make the signature of ``aiohttp.test_utils.TestClient.request`` match
+ ``asyncio.ClientSession.request`` according to the docs `#3852
+ <https://github.com/aio-libs/aiohttp/issues/3852>`_
+- Use correct style for re-exported imports, makes mypy ``--strict`` mode happy.
+ `#3868 <https://github.com/aio-libs/aiohttp/issues/3868>`_
+- Fixed type annotation for add_view method of UrlDispatcher to accept any subclass of
+ View `#3880 <https://github.com/aio-libs/aiohttp/issues/3880>`_
+- Made cython HTTP parser set Reason-Phrase of the response to an empty string if it is
+ missing. `#3906 <https://github.com/aio-libs/aiohttp/issues/3906>`_
+- Add URL to the string representation of ClientResponseError.
+ `#3959 <https://github.com/aio-libs/aiohttp/issues/3959>`_
+- Accept ``istr`` keys in ``LooseHeaders`` type hints.
+ `#3976 <https://github.com/aio-libs/aiohttp/issues/3976>`_
+- Fixed race conditions in _resolve_host caching and throttling when tracing is enabled.
+ `#4013 <https://github.com/aio-libs/aiohttp/issues/4013>`_
+- For URLs like "unix://localhost/..." set Host HTTP header to "localhost" instead of
+ "localhost:None". `#4039 <https://github.com/aio-libs/aiohttp/issues/4039>`_
+
+
+Improved Documentation
+----------------------
+
+- Modify documentation for Background Tasks to remove deprecated usage of event loop.
+ `#3526 <https://github.com/aio-libs/aiohttp/issues/3526>`_
+- use ``if __name__ == '__main__':`` in server examples.
+ `#3775 <https://github.com/aio-libs/aiohttp/issues/3775>`_
+- Update documentation reference to the default access logger.
+ `#3783 <https://github.com/aio-libs/aiohttp/issues/3783>`_
+- Improve documentation for ``web.BaseRequest.path`` and ``web.BaseRequest.raw_path``.
+ `#3791 <https://github.com/aio-libs/aiohttp/issues/3791>`_
+- Removed deprecation warning in tracing example docs
+ `#3964 <https://github.com/aio-libs/aiohttp/issues/3964>`_
+
+
+----
+
+
+3.5.4 (2019-01-12)
+==================
+
+Bugfixes
+--------
+
+- Fix stream ``.read()`` / ``.readany()`` / ``.iter_any()`` which used to return a
+ partial content only in case of compressed content
+ `#3525 <https://github.com/aio-libs/aiohttp/issues/3525>`_
+
+
+3.5.3 (2019-01-10)
+==================
+
+Bugfixes
+--------
+
+- Fix type stubs for ``aiohttp.web.run_app(access_log=True)`` and fix edge case of
+ ``access_log=True`` and the event loop being in debug mode. `#3504
+ <https://github.com/aio-libs/aiohttp/issues/3504>`_
+- Fix ``aiohttp.ClientTimeout`` type annotations to accept ``None`` for fields
+ `#3511 <https://github.com/aio-libs/aiohttp/issues/3511>`_
+- Send custom per-request cookies even if session jar is empty
+ `#3515 <https://github.com/aio-libs/aiohttp/issues/3515>`_
+- Restore Linux binary wheels publishing on PyPI
+
+----
+
+
+3.5.2 (2019-01-08)
+==================
+
+Features
+--------
+
+- ``FileResponse`` from ``web_fileresponse.py`` uses a ``ThreadPoolExecutor`` to work
+ with files asynchronously. I/O based payloads from ``payload.py`` uses a
+ ``ThreadPoolExecutor`` to work with I/O objects asynchronously. `#3313
+ <https://github.com/aio-libs/aiohttp/issues/3313>`_
+- Internal Server Errors in plain text if the browser does not support HTML.
+ `#3483 <https://github.com/aio-libs/aiohttp/issues/3483>`_
+
+
+Bugfixes
+--------
+
+- Preserve MultipartWriter parts headers on write. Refactor the way how
+ ``Payload.headers`` are handled. Payload instances now always have headers and
+ Content-Type defined. Fix Payload Content-Disposition header reset after initial
+ creation. `#3035 <https://github.com/aio-libs/aiohttp/issues/3035>`_
+- Log suppressed exceptions in ``GunicornWebWorker``.
+ `#3464 <https://github.com/aio-libs/aiohttp/issues/3464>`_
+- Remove wildcard imports.
+ `#3468 <https://github.com/aio-libs/aiohttp/issues/3468>`_
+- Use the same task for app initialization and web server handling in gunicorn workers.
+ It allows to use Python3.7 context vars smoothly.
+ `#3471 <https://github.com/aio-libs/aiohttp/issues/3471>`_
+- Fix handling of chunked+gzipped response when first chunk does not give uncompressed
+ data `#3477 <https://github.com/aio-libs/aiohttp/issues/3477>`_
+- Replace ``collections.MutableMapping`` with ``collections.abc.MutableMapping`` to
+ avoid a deprecation warning. `#3480
+ <https://github.com/aio-libs/aiohttp/issues/3480>`_
+- ``Payload.size`` type annotation changed from ``Optional[float]`` to
+ ``Optional[int]``. `#3484 <https://github.com/aio-libs/aiohttp/issues/3484>`_
+- Ignore done tasks when cancels pending activities on ``web.run_app`` finalization.
+ `#3497 <https://github.com/aio-libs/aiohttp/issues/3497>`_
+
+
+Improved Documentation
+----------------------
+
+- Add documentation for ``aiohttp.web.HTTPException``.
+ `#3490 <https://github.com/aio-libs/aiohttp/issues/3490>`_
+
+
+Misc
+----
+
+- `#3487 <https://github.com/aio-libs/aiohttp/issues/3487>`_
+
+
+----
+
+
+3.5.1 (2018-12-24)
+====================
+
+- Fix a regression about ``ClientSession._requote_redirect_url`` modification in debug
+ mode.
+
+3.5.0 (2018-12-22)
+====================
+
+Features
+--------
+
+- The library type annotations are checked in strict mode now.
+- Add support for setting cookies for individual request (`#2387
+ <https://github.com/aio-libs/aiohttp/pull/2387>`_)
+- Application.add_domain implementation (`#2809
+ <https://github.com/aio-libs/aiohttp/pull/2809>`_)
+- The default ``app`` in the request returned by ``test_utils.make_mocked_request`` can
+ now have objects assigned to it and retrieved using the ``[]`` operator. (`#3174
+ <https://github.com/aio-libs/aiohttp/pull/3174>`_)
+- Make ``request.url`` accessible when transport is closed. (`#3177
+ <https://github.com/aio-libs/aiohttp/pull/3177>`_)
+- Add ``zlib_executor_size`` argument to ``Response`` constructor to allow compression
+ to run in a background executor to avoid blocking the main thread and potentially
+ triggering health check failures. (`#3205
+ <https://github.com/aio-libs/aiohttp/pull/3205>`_)
+- Enable users to set ``ClientTimeout`` in ``aiohttp.request`` (`#3213
+ <https://github.com/aio-libs/aiohttp/pull/3213>`_)
+- Don't raise a warning if ``NETRC`` environment variable is not set and ``~/.netrc``
+ file doesn't exist. (`#3267 <https://github.com/aio-libs/aiohttp/pull/3267>`_)
+- Add default logging handler to web.run_app If the ``Application.debug``` flag is set
+ and the default logger ``aiohttp.access`` is used, access logs will now be output
+ using a *stderr* ``StreamHandler`` if no handlers are attached. Furthermore, if the
+ default logger has no log level set, the log level will be set to ``DEBUG``. (`#3324
+ <https://github.com/aio-libs/aiohttp/pull/3324>`_)
+- Add method argument to ``session.ws_connect()``. Sometimes server API requires a
+ different HTTP method for WebSocket connection establishment. For example, ``Docker
+ exec`` needs POST. (`#3378 <https://github.com/aio-libs/aiohttp/pull/3378>`_)
+- Create a task per request handling. (`#3406
+ <https://github.com/aio-libs/aiohttp/pull/3406>`_)
+
+
+Bugfixes
+--------
+
+- Enable passing ``access_log_class`` via ``handler_args`` (`#3158
+ <https://github.com/aio-libs/aiohttp/pull/3158>`_)
+- Return empty bytes with end-of-chunk marker in empty stream reader. (`#3186
+ <https://github.com/aio-libs/aiohttp/pull/3186>`_)
+- Accept ``CIMultiDictProxy`` instances for ``headers`` argument in ``web.Response``
+ constructor. (`#3207 <https://github.com/aio-libs/aiohttp/pull/3207>`_)
+- Don't uppercase HTTP method in parser (`#3233
+ <https://github.com/aio-libs/aiohttp/pull/3233>`_)
+- Make method match regexp RFC-7230 compliant (`#3235
+ <https://github.com/aio-libs/aiohttp/pull/3235>`_)
+- Add ``app.pre_frozen`` state to properly handle startup signals in
+ sub-applications. (`#3237 <https://github.com/aio-libs/aiohttp/pull/3237>`_)
+- Enhanced parsing and validation of helpers.BasicAuth.decode. (`#3239
+ <https://github.com/aio-libs/aiohttp/pull/3239>`_)
+- Change imports from collections module in preparation for 3.8. (`#3258
+ <https://github.com/aio-libs/aiohttp/pull/3258>`_)
+- Ensure Host header is added first to ClientRequest to better replicate browser (`#3265
+ <https://github.com/aio-libs/aiohttp/pull/3265>`_)
+- Fix forward compatibility with Python 3.8: importing ABCs directly from the
+ collections module will not be supported anymore. (`#3273
+ <https://github.com/aio-libs/aiohttp/pull/3273>`_)
+- Keep the query string by ``normalize_path_middleware``. (`#3278
+ <https://github.com/aio-libs/aiohttp/pull/3278>`_)
+- Fix missing parameter ``raise_for_status`` for aiohttp.request() (`#3290
+ <https://github.com/aio-libs/aiohttp/pull/3290>`_)
+- Bracket IPv6 addresses in the HOST header (`#3304
+ <https://github.com/aio-libs/aiohttp/pull/3304>`_)
+- Fix default message for server ping and pong frames. (`#3308
+ <https://github.com/aio-libs/aiohttp/pull/3308>`_)
+- Fix tests/test_connector.py typo and tests/autobahn/server.py duplicate loop
+ def. (`#3337 <https://github.com/aio-libs/aiohttp/pull/3337>`_)
+- Fix false-negative indicator end_of_HTTP_chunk in StreamReader.readchunk function
+ (`#3361 <https://github.com/aio-libs/aiohttp/pull/3361>`_)
+- Release HTTP response before raising status exception (`#3364
+ <https://github.com/aio-libs/aiohttp/pull/3364>`_)
+- Fix task cancellation when ``sendfile()`` syscall is used by static file
+ handling. (`#3383 <https://github.com/aio-libs/aiohttp/pull/3383>`_)
+- Fix stack trace for ``asyncio.TimeoutError`` which was not logged, when it is caught
+ in the handler. (`#3414 <https://github.com/aio-libs/aiohttp/pull/3414>`_)
+
+
+Improved Documentation
+----------------------
+
+- Improve documentation of ``Application.make_handler`` parameters. (`#3152
+ <https://github.com/aio-libs/aiohttp/pull/3152>`_)
+- Fix BaseRequest.raw_headers doc. (`#3215
+ <https://github.com/aio-libs/aiohttp/pull/3215>`_)
+- Fix typo in TypeError exception reason in ``web.Application._handle`` (`#3229
+ <https://github.com/aio-libs/aiohttp/pull/3229>`_)
+- Make server access log format placeholder %b documentation reflect
+ behavior and docstring. (`#3307 <https://github.com/aio-libs/aiohttp/pull/3307>`_)
+
+
+Deprecations and Removals
+-------------------------
+
+- Deprecate modification of ``session.requote_redirect_url`` (`#2278
+ <https://github.com/aio-libs/aiohttp/pull/2278>`_)
+- Deprecate ``stream.unread_data()`` (`#3260
+ <https://github.com/aio-libs/aiohttp/pull/3260>`_)
+- Deprecated use of boolean in ``resp.enable_compression()`` (`#3318
+ <https://github.com/aio-libs/aiohttp/pull/3318>`_)
+- Encourage creation of aiohttp public objects inside a coroutine (`#3331
+ <https://github.com/aio-libs/aiohttp/pull/3331>`_)
+- Drop dead ``Connection.detach()`` and ``Connection.writer``. Both methods were broken
+ for more than 2 years. (`#3358 <https://github.com/aio-libs/aiohttp/pull/3358>`_)
+- Deprecate ``app.loop``, ``request.loop``, ``client.loop`` and ``connector.loop``
+ properties. (`#3374 <https://github.com/aio-libs/aiohttp/pull/3374>`_)
+- Deprecate explicit debug argument. Use asyncio debug mode instead. (`#3381
+ <https://github.com/aio-libs/aiohttp/pull/3381>`_)
+- Deprecate body parameter in HTTPException (and derived classes) constructor. (`#3385
+ <https://github.com/aio-libs/aiohttp/pull/3385>`_)
+- Deprecate bare connector close, use ``async with connector:`` and ``await
+ connector.close()`` instead. (`#3417
+ <https://github.com/aio-libs/aiohttp/pull/3417>`_)
+- Deprecate obsolete ``read_timeout`` and ``conn_timeout`` in ``ClientSession``
+ constructor. (`#3438 <https://github.com/aio-libs/aiohttp/pull/3438>`_)
+
+
+Misc
+----
+
+- #3341, #3351
diff --git a/third_party/python/aiohttp/CONTRIBUTORS.txt b/third_party/python/aiohttp/CONTRIBUTORS.txt
new file mode 100644
index 0000000000..ad63ce9e4d
--- /dev/null
+++ b/third_party/python/aiohttp/CONTRIBUTORS.txt
@@ -0,0 +1,312 @@
+- Contributors -
+----------------
+A. Jesse Jiryu Davis
+Adam Bannister
+Adam Cooper
+Adam Mills
+Adrian Krupa
+Adrián Chaves
+Alan Tse
+Alec Hanefeld
+Alejandro Gómez
+Aleksandr Danshyn
+Aleksey Kutepov
+Alex Hayes
+Alex Key
+Alex Khomchenko
+Alex Kuzmenko
+Alex Lisovoy
+Alexander Bayandin
+Alexander Karpinsky
+Alexander Koshevoy
+Alexander Malev
+Alexander Mohr
+Alexander Shorin
+Alexander Travov
+Alexandru Mihai
+Alexey Firsov
+Alexey Popravka
+Alexey Stepanov
+Amin Etesamian
+Amit Tulshyan
+Amy Boyle
+Anders Melchiorsen
+Andrei Ursulenko
+Andrej Antonov
+Andrew Leech
+Andrew Lytvyn
+Andrew Svetlov
+Andrew Zhou
+Andrii Soldatenko
+Antoine Pietri
+Anton Kasyanov
+Anton Zhdan-Pushkin
+Arseny Timoniq
+Artem Yushkovskiy
+Arthur Darcet
+Ben Bader
+Ben Timby
+Benedikt Reinartz
+Boris Feld
+Boyi Chen
+Brett Cannon
+Brian C. Lane
+Brian Muller
+Bruce Merry
+Bryan Kok
+Bryce Drennan
+Carl George
+Cecile Tonglet
+Chien-Wei Huang
+Chih-Yuan Chen
+Chris AtLee
+Chris Laws
+Chris Moore
+Christopher Schmitt
+Claudiu Popa
+Colin Dunklau
+Cong Xu
+Damien Nadé
+Dan Xu
+Daniel García
+Daniel Grossmann-Kavanagh
+Daniel Nelson
+Danny Song
+David Bibb
+David Michael Brown
+Denilson Amorim
+Denis Matiychuk
+Dennis Kliban
+Dima Veselov
+Dimitar Dimitrov
+Dmitriy Safonov
+Dmitry Doroshev
+Dmitry Erlikh
+Dmitry Lukashin
+Dmitry Marakasov
+Dmitry Shamov
+Dmitry Trofimov
+Dmytro Bohomiakov
+Dmytro Kuznetsov
+Dustin J. Mitchell
+Eduard Iskandarov
+Eli Ribble
+Elizabeth Leddy
+Enrique Saez
+Eric Sheng
+Erich Healy
+Erik Peterson
+Eugene Chernyshov
+Eugene Naydenov
+Eugene Nikolaiev
+Eugene Tolmachev
+Evan Kepner
+Evert Lammerts
+Felix Yan
+Fernanda Guimarães
+FichteFoll
+Florian Scheffler
+Frederik Gladhorn
+Frederik Peter Aalund
+Gabriel Tremblay
+Gary Wilson Jr.
+Gennady Andreyev
+Georges Dubus
+Greg Holt
+Gregory Haynes
+Gus Goulart
+Gustavo Carneiro
+Günther Jena
+Hans Adema
+Harmon Y.
+Hrishikesh Paranjape
+Hu Bo
+Hugh Young
+Hugo Herter
+Hynek Schlawack
+Igor Alexandrov
+Igor Davydenko
+Igor Mozharovsky
+Igor Pavlov
+Illia Volochii
+Ilya Chichak
+Ilya Gruzinov
+Ingmar Steen
+Jacob Champion
+Jaesung Lee
+Jake Davis
+Jakob Ackermann
+Jakub Wilk
+Jashandeep Sohi
+Jens Steinhauser
+Jeonghun Lee
+Jeongkyu Shin
+Jeroen van der Heijden
+Jesus Cea
+Jian Zeng
+Jinkyu Yi
+Joel Watts
+Jon Nabozny
+Jonas Krüger Svensson
+Jonas Obrist
+Jonathan Wright
+Jonny Tan
+Joongi Kim
+Josep Cugat
+Josh Junon
+Joshu Coats
+Julia Tsemusheva
+Julien Duponchelle
+Jungkook Park
+Junjie Tao
+Junyeong Jeong
+Justas Trimailovas
+Justin Foo
+Justin Turner Arthur
+Kay Zheng
+Kevin Samuel
+Kimmo Parviainen-Jalanko
+Kirill Klenov
+Kirill Malovitsa
+Konstantin Valetov
+Krzysztof Blazewicz
+Kyrylo Perevozchikov
+Kyungmin Lee
+Lars P. Søndergaard
+Liu Hua
+Louis-Philippe Huberdeau
+Loïc Lajeanne
+Lu Gong
+Lubomir Gelo
+Ludovic Gasc
+Luis Pedrosa
+Lukasz Marcin Dobrzanski
+Makc Belousow
+Manuel Miranda
+Marat Sharafutdinov
+Marco Paolini
+Mariano Anaya
+Martijn Pieters
+Martin Melka
+Martin Richard
+Mathias Fröjdman
+Mathieu Dugré
+Matthieu Hauglustaine
+Matthieu Rigal
+Michael Ihnatenko
+Michał Górny
+Mikhail Burshteyn
+Mikhail Kashkin
+Mikhail Lukyanchenko
+Mikhail Nacharov
+Misha Behersky
+Mitchell Ferree
+Morgan Delahaye-Prat
+Moss Collum
+Mun Gwan-gyeong
+Navid Sheikhol
+Nicolas Braem
+Nikolay Kim
+Nikolay Novik
+Oisin Aylward
+Olaf Conradi
+Pahaz Blinov
+Panagiotis Kolokotronis
+Pankaj Pandey
+Pau Freixes
+Paul Colomiets
+Paulius Šileikis
+Paulus Schoutsen
+Pavel Kamaev
+Pavel Polyakov
+Pawel Kowalski
+Pawel Miech
+Pepe Osca
+Philipp A.
+Pieter van Beek
+Rafael Viotti
+Raphael Bialon
+Raúl Cumplido
+Required Field
+Robert Lu
+Robert Nikolich
+Roman Podoliaka
+Samuel Colvin
+Sean Hunt
+Sebastian Acuna
+Sebastian Hanula
+Sebastian Hüther
+Sebastien Geffroy
+SeongSoo Cho
+Sergey Ninua
+Sergey Skripnick
+Serhii Charykov
+Serhii Kostel
+Serhiy Storchaka
+Simon Kennedy
+Sin-Woo Bang
+Stanislas Plum
+Stanislav Prokop
+Stefan Tjarks
+Stepan Pletnev
+Stephan Jaensch
+Stephen Granade
+Steven Seguin
+Sunghyun Hwang
+Sunit Deshpande
+Sviatoslav Bulbakha
+Sviatoslav Sydorenko
+Taha Jahangir
+Taras Voinarovskyi
+Terence Honles
+Thanos Lefteris
+Thijs Vermeir
+Thomas Forbes
+Thomas Grainger
+Tolga Tezel
+Tomasz Trebski
+Toshiaki Tanaka
+Trinh Hoang Nhu
+Vadim Suharnikov
+Vaibhav Sagar
+Vamsi Krishna Avula
+Vasiliy Faronov
+Vasyl Baran
+Viacheslav Greshilov
+Victor Collod
+Victor Kovtun
+Vikas Kawadia
+Viktor Danyliuk
+Ville Skyttä
+Vincent Maillol
+Vitalik Verhovodov
+Vitaly Haritonsky
+Vitaly Magerya
+Vladimir Kamarzin
+Vladimir Kozlovski
+Vladimir Rutsky
+Vladimir Shulyak
+Vladimir Zakharov
+Vladyslav Bohaichuk
+Vladyslav Bondar
+W. Trevor King
+Wei Lin
+Weiwei Wang
+Will McGugan
+Willem de Groot
+William Grzybowski
+William S.
+Wilson Ong
+Yang Zhou
+Yannick Koechlin
+Yannick Péroux
+Ye Cao
+Yegor Roganov
+Yifei Kong
+Young-Ho Cha
+Yuriy Shatrov
+Yury Selivanov
+Yusuke Tsutsumi
+Zlatan Sičanica
+Марк Коренберг
+Семён Марьясин
diff --git a/third_party/python/aiohttp/LICENSE.txt b/third_party/python/aiohttp/LICENSE.txt
new file mode 100644
index 0000000000..90c9d01bc5
--- /dev/null
+++ b/third_party/python/aiohttp/LICENSE.txt
@@ -0,0 +1,201 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2013-2020 aiohttp maintainers
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/third_party/python/aiohttp/MANIFEST.in b/third_party/python/aiohttp/MANIFEST.in
new file mode 100644
index 0000000000..05084efddb
--- /dev/null
+++ b/third_party/python/aiohttp/MANIFEST.in
@@ -0,0 +1,20 @@
+include LICENSE.txt
+include CHANGES.rst
+include README.rst
+include CONTRIBUTORS.txt
+include Makefile
+graft aiohttp
+graft docs
+graft examples
+graft tests
+recursive-include vendor *
+global-include aiohttp *.pyi
+global-exclude *.pyc
+global-exclude *.pyd
+global-exclude *.so
+global-exclude *.lib
+global-exclude *.dll
+global-exclude *.a
+global-exclude *.obj
+exclude aiohttp/*.html
+prune docs/_build
diff --git a/third_party/python/aiohttp/Makefile b/third_party/python/aiohttp/Makefile
new file mode 100644
index 0000000000..5e4a9eaf11
--- /dev/null
+++ b/third_party/python/aiohttp/Makefile
@@ -0,0 +1,144 @@
+# Some simple testing tasks (sorry, UNIX only).
+
+to-hash-one = $(dir $1).hash/$(addsuffix .hash,$(notdir $1))
+to-hash = $(foreach fname,$1,$(call to-hash-one,$(fname)))
+
+CYS := $(wildcard aiohttp/*.pyx) $(wildcard aiohttp/*.pyi) $(wildcard aiohttp/*.pxd)
+PYXS := $(wildcard aiohttp/*.pyx)
+CS := $(wildcard aiohttp/*.c)
+PYS := $(wildcard aiohttp/*.py)
+REQS := $(wildcard requirements/*.txt)
+ALLS := $(sort $(CYS) $(CS) $(PYS) $(REQS))
+
+.PHONY: all
+all: test
+
+tst:
+ @echo $(call to-hash,requirements/cython.txt)
+ @echo $(call to-hash,aiohttp/%.pyx)
+
+
+# Recipe from https://www.cmcrossroads.com/article/rebuilding-when-files-checksum-changes
+FORCE:
+
+# check_sum.py works perfectly fine but slow when called for every file from $(ALLS)
+# (perhaps even several times for each file).
+# That is why much less readable but faster solution exists
+ifneq (, $(shell which sha256sum))
+%.hash: FORCE
+ $(eval $@_ABS := $(abspath $@))
+ $(eval $@_NAME := $($@_ABS))
+ $(eval $@_HASHDIR := $(dir $($@_ABS)))
+ $(eval $@_TMP := $($@_HASHDIR)../$(notdir $($@_ABS)))
+ $(eval $@_ORIG := $(subst /.hash/../,/,$(basename $($@_TMP))))
+ @#echo ==== $($@_ABS) $($@_HASHDIR) $($@_NAME) $($@_TMP) $($@_ORIG)
+ @if ! (sha256sum --check $($@_ABS) 1>/dev/null 2>/dev/null); then \
+ mkdir -p $($@_HASHDIR); \
+ echo re-hash $($@_ORIG); \
+ sha256sum $($@_ORIG) > $($@_ABS); \
+ fi
+else
+%.hash: FORCE
+ @./tools/check_sum.py $@ # --debug
+endif
+
+# Enumerate intermediate files to don't remove them automatically.
+.SECONDARY: $(call to-hash,$(ALLS))
+
+
+.install-cython: $(call to-hash,requirements/cython.txt)
+ pip install -r requirements/cython.txt
+ @touch .install-cython
+
+aiohttp/_find_header.c: $(call to-hash,aiohttp/hdrs.py ./tools/gen.py)
+ ./tools/gen.py
+
+# _find_headers generator creates _headers.pyi as well
+aiohttp/%.c: aiohttp/%.pyx $(call to-hash,$(CYS)) aiohttp/_find_header.c
+ cython -3 -o $@ $< -I aiohttp
+
+
+.PHONY: cythonize
+cythonize: .install-cython $(PYXS:.pyx=.c)
+
+.install-deps: .install-cython $(PYXS:.pyx=.c) $(call to-hash,$(CYS) $(REQS))
+ pip install -r requirements/dev.txt
+ @touch .install-deps
+
+.PHONY: lint
+lint: fmt mypy
+
+.PHONY: fmt format
+fmt format:
+ python -m pre_commit run --all-files --show-diff-on-failure
+
+.PHONY: mypy
+mypy:
+ mypy aiohttp
+
+.develop: .install-deps $(call to-hash,$(PYS) $(CYS) $(CS))
+ pip install -e .
+ @touch .develop
+
+.PHONY: test
+test: .develop
+ @pytest -q
+
+.PHONY: vtest
+vtest: .develop
+ @pytest -s -v
+
+.PHONY: vvtest
+vvtest: .develop
+ @pytest -vv
+
+.PHONY: clean
+clean:
+ @rm -rf `find . -name __pycache__`
+ @rm -rf `find . -name .hash`
+ @rm -rf `find . -name .md5` # old styling
+ @rm -f `find . -type f -name '*.py[co]' `
+ @rm -f `find . -type f -name '*~' `
+ @rm -f `find . -type f -name '.*~' `
+ @rm -f `find . -type f -name '@*' `
+ @rm -f `find . -type f -name '#*#' `
+ @rm -f `find . -type f -name '*.orig' `
+ @rm -f `find . -type f -name '*.rej' `
+ @rm -f `find . -type f -name '*.md5' ` # old styling
+ @rm -f .coverage
+ @rm -rf htmlcov
+ @rm -rf build
+ @rm -rf cover
+ @make -C docs clean
+ @python setup.py clean
+ @rm -f aiohttp/*.so
+ @rm -f aiohttp/*.pyd
+ @rm -f aiohttp/*.html
+ @rm -f aiohttp/_frozenlist.c
+ @rm -f aiohttp/_find_header.c
+ @rm -f aiohttp/_http_parser.c
+ @rm -f aiohttp/_http_writer.c
+ @rm -f aiohttp/_websocket.c
+ @rm -rf .tox
+ @rm -f .develop
+ @rm -f .flake
+ @rm -rf aiohttp.egg-info
+ @rm -f .install-deps
+ @rm -f .install-cython
+
+.PHONY: doc
+doc:
+ @make -C docs html SPHINXOPTS="-W --keep-going -E"
+ @echo "open file://`pwd`/docs/_build/html/index.html"
+
+.PHONY: doc-spelling
+doc-spelling:
+ @make -C docs spelling SPHINXOPTS="-W -E"
+
+.PHONY: install
+install:
+ @pip install -U 'pip'
+ @pip install -Ur requirements/dev.txt
+
+.PHONY: install-dev
+install-dev: .develop
diff --git a/third_party/python/aiohttp/PKG-INFO b/third_party/python/aiohttp/PKG-INFO
new file mode 100644
index 0000000000..a0c00158c7
--- /dev/null
+++ b/third_party/python/aiohttp/PKG-INFO
@@ -0,0 +1,966 @@
+Metadata-Version: 2.1
+Name: aiohttp
+Version: 3.7.4.post0
+Summary: Async http client/server framework (asyncio)
+Home-page: https://github.com/aio-libs/aiohttp
+Author: Nikolay Kim
+Author-email: fafhrd91@gmail.com
+Maintainer: Nikolay Kim <fafhrd91@gmail.com>, Andrew Svetlov <andrew.svetlov@gmail.com>
+Maintainer-email: aio-libs@googlegroups.com
+License: Apache 2
+Project-URL: Chat: Gitter, https://gitter.im/aio-libs/Lobby
+Project-URL: CI: Azure Pipelines, https://dev.azure.com/aio-libs/aiohttp/_build
+Project-URL: Coverage: codecov, https://codecov.io/github/aio-libs/aiohttp
+Project-URL: Docs: RTD, https://docs.aiohttp.org
+Project-URL: GitHub: issues, https://github.com/aio-libs/aiohttp/issues
+Project-URL: GitHub: repo, https://github.com/aio-libs/aiohttp
+Description: ==================================
+ Async http client/server framework
+ ==================================
+
+ .. image:: https://raw.githubusercontent.com/aio-libs/aiohttp/master/docs/_static/aiohttp-icon-128x128.png
+ :height: 64px
+ :width: 64px
+ :alt: aiohttp logo
+
+ |
+
+ .. image:: https://github.com/aio-libs/aiohttp/workflows/CI/badge.svg
+ :target: https://github.com/aio-libs/aiohttp/actions?query=workflow%3ACI
+ :alt: GitHub Actions status for master branch
+
+ .. image:: https://codecov.io/gh/aio-libs/aiohttp/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/aio-libs/aiohttp
+ :alt: codecov.io status for master branch
+
+ .. image:: https://badge.fury.io/py/aiohttp.svg
+ :target: https://pypi.org/project/aiohttp
+ :alt: Latest PyPI package version
+
+ .. image:: https://readthedocs.org/projects/aiohttp/badge/?version=latest
+ :target: https://docs.aiohttp.org/
+ :alt: Latest Read The Docs
+
+ .. image:: https://img.shields.io/discourse/status?server=https%3A%2F%2Faio-libs.discourse.group
+ :target: https://aio-libs.discourse.group
+ :alt: Discourse status
+
+ .. image:: https://badges.gitter.im/Join%20Chat.svg
+ :target: https://gitter.im/aio-libs/Lobby
+ :alt: Chat on Gitter
+
+
+ Key Features
+ ============
+
+ - Supports both client and server side of HTTP protocol.
+ - Supports both client and server Web-Sockets out-of-the-box and avoids
+ Callback Hell.
+ - Provides Web-server with middlewares and plugable routing.
+
+
+ Getting started
+ ===============
+
+ Client
+ ------
+
+ To get something from the web:
+
+ .. code-block:: python
+
+ import aiohttp
+ import asyncio
+
+ async def main():
+
+ async with aiohttp.ClientSession() as session:
+ async with session.get('http://python.org') as response:
+
+ print("Status:", response.status)
+ print("Content-type:", response.headers['content-type'])
+
+ html = await response.text()
+ print("Body:", html[:15], "...")
+
+ loop = asyncio.get_event_loop()
+ loop.run_until_complete(main())
+
+ This prints:
+
+ .. code-block::
+
+ Status: 200
+ Content-type: text/html; charset=utf-8
+ Body: <!doctype html> ...
+
+ Coming from `requests <https://requests.readthedocs.io/>`_ ? Read `why we need so many lines <https://aiohttp.readthedocs.io/en/latest/http_request_lifecycle.html>`_.
+
+ Server
+ ------
+
+ An example using a simple server:
+
+ .. code-block:: python
+
+ # examples/server_simple.py
+ from aiohttp import web
+
+ async def handle(request):
+ name = request.match_info.get('name', "Anonymous")
+ text = "Hello, " + name
+ return web.Response(text=text)
+
+ async def wshandle(request):
+ ws = web.WebSocketResponse()
+ await ws.prepare(request)
+
+ async for msg in ws:
+ if msg.type == web.WSMsgType.text:
+ await ws.send_str("Hello, {}".format(msg.data))
+ elif msg.type == web.WSMsgType.binary:
+ await ws.send_bytes(msg.data)
+ elif msg.type == web.WSMsgType.close:
+ break
+
+ return ws
+
+
+ app = web.Application()
+ app.add_routes([web.get('/', handle),
+ web.get('/echo', wshandle),
+ web.get('/{name}', handle)])
+
+ if __name__ == '__main__':
+ web.run_app(app)
+
+
+ Documentation
+ =============
+
+ https://aiohttp.readthedocs.io/
+
+
+ Demos
+ =====
+
+ https://github.com/aio-libs/aiohttp-demos
+
+
+ External links
+ ==============
+
+ * `Third party libraries
+ <http://aiohttp.readthedocs.io/en/latest/third_party.html>`_
+ * `Built with aiohttp
+ <http://aiohttp.readthedocs.io/en/latest/built_with.html>`_
+ * `Powered by aiohttp
+ <http://aiohttp.readthedocs.io/en/latest/powered_by.html>`_
+
+ Feel free to make a Pull Request for adding your link to these pages!
+
+
+ Communication channels
+ ======================
+
+ *aio-libs discourse group*: https://aio-libs.discourse.group
+
+ *gitter chat* https://gitter.im/aio-libs/Lobby
+
+ We support `Stack Overflow
+ <https://stackoverflow.com/questions/tagged/aiohttp>`_.
+ Please add *aiohttp* tag to your question there.
+
+ Requirements
+ ============
+
+ - Python >= 3.6
+ - async-timeout_
+ - attrs_
+ - chardet_
+ - multidict_
+ - yarl_
+
+ Optionally you may install the cChardet_ and aiodns_ libraries (highly
+ recommended for sake of speed).
+
+ .. _chardet: https://pypi.python.org/pypi/chardet
+ .. _aiodns: https://pypi.python.org/pypi/aiodns
+ .. _attrs: https://github.com/python-attrs/attrs
+ .. _multidict: https://pypi.python.org/pypi/multidict
+ .. _yarl: https://pypi.python.org/pypi/yarl
+ .. _async-timeout: https://pypi.python.org/pypi/async_timeout
+ .. _cChardet: https://pypi.python.org/pypi/cchardet
+
+ License
+ =======
+
+ ``aiohttp`` is offered under the Apache 2 license.
+
+
+ Keepsafe
+ ========
+
+ The aiohttp community would like to thank Keepsafe
+ (https://www.getkeepsafe.com) for its support in the early days of
+ the project.
+
+
+ Source code
+ ===========
+
+ The latest developer version is available in a GitHub repository:
+ https://github.com/aio-libs/aiohttp
+
+ Benchmarks
+ ==========
+
+ If you are interested in efficiency, the AsyncIO community maintains a
+ list of benchmarks on the official wiki:
+ https://github.com/python/asyncio/wiki/Benchmarks
+
+ =========
+ Changelog
+ =========
+
+ ..
+ You should *NOT* be adding new change log entries to this file, this
+ file is managed by towncrier. You *may* edit previous change logs to
+ fix problems like typo corrections or such.
+ To add a new change log entry, please see
+ https://pip.pypa.io/en/latest/development/#adding-a-news-entry
+ we named the news folder "changes".
+
+ WARNING: Don't drop the next directive!
+
+ .. towncrier release notes start
+
+ 3.7.4.post0 (2021-03-06)
+ ========================
+
+ Misc
+ ----
+
+ - Bumped upper bound of the ``chardet`` runtime dependency
+ to allow their v4.0 version stream.
+ `#5366 <https://github.com/aio-libs/aiohttp/issues/5366>`_
+
+
+ ----
+
+
+ 3.7.4 (2021-02-25)
+ ==================
+
+ Bugfixes
+ --------
+
+ - **(SECURITY BUG)** Started preventing open redirects in the
+ ``aiohttp.web.normalize_path_middleware`` middleware. For
+ more details, see
+ https://github.com/aio-libs/aiohttp/security/advisories/GHSA-v6wp-4m6f-gcjg.
+
+ Thanks to `Beast Glatisant <https://github.com/g147>`__ for
+ finding the first instance of this issue and `Jelmer Vernooij
+ <https://jelmer.uk/>`__ for reporting and tracking it down
+ in aiohttp.
+ `#5497 <https://github.com/aio-libs/aiohttp/issues/5497>`_
+ - Fix interpretation difference of the pure-Python and the Cython-based
+ HTTP parsers construct a ``yarl.URL`` object for HTTP request-target.
+
+ Before this fix, the Python parser would turn the URI's absolute-path
+ for ``//some-path`` into ``/`` while the Cython code preserved it as
+ ``//some-path``. Now, both do the latter.
+ `#5498 <https://github.com/aio-libs/aiohttp/issues/5498>`_
+
+
+ ----
+
+
+ 3.7.3 (2020-11-18)
+ ==================
+
+ Features
+ --------
+
+ - Use Brotli instead of brotlipy
+ `#3803 <https://github.com/aio-libs/aiohttp/issues/3803>`_
+ - Made exceptions pickleable. Also changed the repr of some exceptions.
+ `#4077 <https://github.com/aio-libs/aiohttp/issues/4077>`_
+
+
+ Bugfixes
+ --------
+
+ - Raise a ClientResponseError instead of an AssertionError for a blank
+ HTTP Reason Phrase.
+ `#3532 <https://github.com/aio-libs/aiohttp/issues/3532>`_
+ - Fix ``web_middlewares.normalize_path_middleware`` behavior for patch without slash.
+ `#3669 <https://github.com/aio-libs/aiohttp/issues/3669>`_
+ - Fix overshadowing of overlapped sub-applications prefixes.
+ `#3701 <https://github.com/aio-libs/aiohttp/issues/3701>`_
+ - Make `BaseConnector.close()` a coroutine and wait until the client closes all connections. Drop deprecated "with Connector():" syntax.
+ `#3736 <https://github.com/aio-libs/aiohttp/issues/3736>`_
+ - Reset the ``sock_read`` timeout each time data is received for a ``aiohttp.client`` response.
+ `#3808 <https://github.com/aio-libs/aiohttp/issues/3808>`_
+ - Fixed type annotation for add_view method of UrlDispatcher to accept any subclass of View
+ `#3880 <https://github.com/aio-libs/aiohttp/issues/3880>`_
+ - Fixed querying the address families from DNS that the current host supports.
+ `#5156 <https://github.com/aio-libs/aiohttp/issues/5156>`_
+ - Change return type of MultipartReader.__aiter__() and BodyPartReader.__aiter__() to AsyncIterator.
+ `#5163 <https://github.com/aio-libs/aiohttp/issues/5163>`_
+ - Provide x86 Windows wheels.
+ `#5230 <https://github.com/aio-libs/aiohttp/issues/5230>`_
+
+
+ Improved Documentation
+ ----------------------
+
+ - Add documentation for ``aiohttp.web.FileResponse``.
+ `#3958 <https://github.com/aio-libs/aiohttp/issues/3958>`_
+ - Removed deprecation warning in tracing example docs
+ `#3964 <https://github.com/aio-libs/aiohttp/issues/3964>`_
+ - Fixed wrong "Usage" docstring of ``aiohttp.client.request``.
+ `#4603 <https://github.com/aio-libs/aiohttp/issues/4603>`_
+ - Add aiohttp-pydantic to third party libraries
+ `#5228 <https://github.com/aio-libs/aiohttp/issues/5228>`_
+
+
+ Misc
+ ----
+
+ - `#4102 <https://github.com/aio-libs/aiohttp/issues/4102>`_
+
+
+ ----
+
+
+ 3.7.2 (2020-10-27)
+ ==================
+
+ Bugfixes
+ --------
+
+ - Fixed static files handling for loops without ``.sendfile()`` support
+ `#5149 <https://github.com/aio-libs/aiohttp/issues/5149>`_
+
+
+ ----
+
+
+ 3.7.1 (2020-10-25)
+ ==================
+
+ Bugfixes
+ --------
+
+ - Fixed a type error caused by the conditional import of `Protocol`.
+ `#5111 <https://github.com/aio-libs/aiohttp/issues/5111>`_
+ - Server doesn't send Content-Length for 1xx or 204
+ `#4901 <https://github.com/aio-libs/aiohttp/issues/4901>`_
+ - Fix run_app typing
+ `#4957 <https://github.com/aio-libs/aiohttp/issues/4957>`_
+ - Always require ``typing_extensions`` library.
+ `#5107 <https://github.com/aio-libs/aiohttp/issues/5107>`_
+ - Fix a variable-shadowing bug causing `ThreadedResolver.resolve` to
+ return the resolved IP as the ``hostname`` in each record, which prevented
+ validation of HTTPS connections.
+ `#5110 <https://github.com/aio-libs/aiohttp/issues/5110>`_
+ - Added annotations to all public attributes.
+ `#5115 <https://github.com/aio-libs/aiohttp/issues/5115>`_
+ - Fix flaky test_when_timeout_smaller_second
+ `#5116 <https://github.com/aio-libs/aiohttp/issues/5116>`_
+ - Ensure sending a zero byte file does not throw an exception
+ `#5124 <https://github.com/aio-libs/aiohttp/issues/5124>`_
+ - Fix a bug in ``web.run_app()`` about Python version checking on Windows
+ `#5127 <https://github.com/aio-libs/aiohttp/issues/5127>`_
+
+
+ ----
+
+
+ 3.7.0 (2020-10-24)
+ ==================
+
+ Features
+ --------
+
+ - Response headers are now prepared prior to running ``on_response_prepare`` hooks, directly before headers are sent to the client.
+ `#1958 <https://github.com/aio-libs/aiohttp/issues/1958>`_
+ - Add a ``quote_cookie`` option to ``CookieJar``, a way to skip quotation wrapping of cookies containing special characters.
+ `#2571 <https://github.com/aio-libs/aiohttp/issues/2571>`_
+ - Call ``AccessLogger.log`` with the current exception available from ``sys.exc_info()``.
+ `#3557 <https://github.com/aio-libs/aiohttp/issues/3557>`_
+ - `web.UrlDispatcher.add_routes` and `web.Application.add_routes` return a list
+ of registered `AbstractRoute` instances. `AbstractRouteDef.register` (and all
+ subclasses) return a list of registered resources registered resource.
+ `#3866 <https://github.com/aio-libs/aiohttp/issues/3866>`_
+ - Added properties of default ClientSession params to ClientSession class so it is available for introspection
+ `#3882 <https://github.com/aio-libs/aiohttp/issues/3882>`_
+ - Don't cancel web handler on peer disconnection, raise `OSError` on reading/writing instead.
+ `#4080 <https://github.com/aio-libs/aiohttp/issues/4080>`_
+ - Implement BaseRequest.get_extra_info() to access a protocol transports' extra info.
+ `#4189 <https://github.com/aio-libs/aiohttp/issues/4189>`_
+ - Added `ClientSession.timeout` property.
+ `#4191 <https://github.com/aio-libs/aiohttp/issues/4191>`_
+ - allow use of SameSite in cookies.
+ `#4224 <https://github.com/aio-libs/aiohttp/issues/4224>`_
+ - Use ``loop.sendfile()`` instead of custom implementation if available.
+ `#4269 <https://github.com/aio-libs/aiohttp/issues/4269>`_
+ - Apply SO_REUSEADDR to test server's socket.
+ `#4393 <https://github.com/aio-libs/aiohttp/issues/4393>`_
+ - Use .raw_host instead of slower .host in client API
+ `#4402 <https://github.com/aio-libs/aiohttp/issues/4402>`_
+ - Allow configuring the buffer size of input stream by passing ``read_bufsize`` argument.
+ `#4453 <https://github.com/aio-libs/aiohttp/issues/4453>`_
+ - Pass tests on Python 3.8 for Windows.
+ `#4513 <https://github.com/aio-libs/aiohttp/issues/4513>`_
+ - Add `method` and `url` attributes to `TraceRequestChunkSentParams` and `TraceResponseChunkReceivedParams`.
+ `#4674 <https://github.com/aio-libs/aiohttp/issues/4674>`_
+ - Add ClientResponse.ok property for checking status code under 400.
+ `#4711 <https://github.com/aio-libs/aiohttp/issues/4711>`_
+ - Don't ceil timeouts that are smaller than 5 seconds.
+ `#4850 <https://github.com/aio-libs/aiohttp/issues/4850>`_
+ - TCPSite now listens by default on all interfaces instead of just IPv4 when `None` is passed in as the host.
+ `#4894 <https://github.com/aio-libs/aiohttp/issues/4894>`_
+ - Bump ``http_parser`` to 2.9.4
+ `#5070 <https://github.com/aio-libs/aiohttp/issues/5070>`_
+
+
+ Bugfixes
+ --------
+
+ - Fix keepalive connections not being closed in time
+ `#3296 <https://github.com/aio-libs/aiohttp/issues/3296>`_
+ - Fix failed websocket handshake leaving connection hanging.
+ `#3380 <https://github.com/aio-libs/aiohttp/issues/3380>`_
+ - Fix tasks cancellation order on exit. The run_app task needs to be cancelled first for cleanup hooks to run with all tasks intact.
+ `#3805 <https://github.com/aio-libs/aiohttp/issues/3805>`_
+ - Don't start heartbeat until _writer is set
+ `#4062 <https://github.com/aio-libs/aiohttp/issues/4062>`_
+ - Fix handling of multipart file uploads without a content type.
+ `#4089 <https://github.com/aio-libs/aiohttp/issues/4089>`_
+ - Preserve view handler function attributes across middlewares
+ `#4174 <https://github.com/aio-libs/aiohttp/issues/4174>`_
+ - Fix the string representation of ``ServerDisconnectedError``.
+ `#4175 <https://github.com/aio-libs/aiohttp/issues/4175>`_
+ - Raising RuntimeError when trying to get encoding from not read body
+ `#4214 <https://github.com/aio-libs/aiohttp/issues/4214>`_
+ - Remove warning messages from noop.
+ `#4282 <https://github.com/aio-libs/aiohttp/issues/4282>`_
+ - Raise ClientPayloadError if FormData re-processed.
+ `#4345 <https://github.com/aio-libs/aiohttp/issues/4345>`_
+ - Fix a warning about unfinished task in ``web_protocol.py``
+ `#4408 <https://github.com/aio-libs/aiohttp/issues/4408>`_
+ - Fixed 'deflate' compression. According to RFC 2616 now.
+ `#4506 <https://github.com/aio-libs/aiohttp/issues/4506>`_
+ - Fixed OverflowError on platforms with 32-bit time_t
+ `#4515 <https://github.com/aio-libs/aiohttp/issues/4515>`_
+ - Fixed request.body_exists returns wrong value for methods without body.
+ `#4528 <https://github.com/aio-libs/aiohttp/issues/4528>`_
+ - Fix connecting to link-local IPv6 addresses.
+ `#4554 <https://github.com/aio-libs/aiohttp/issues/4554>`_
+ - Fix a problem with connection waiters that are never awaited.
+ `#4562 <https://github.com/aio-libs/aiohttp/issues/4562>`_
+ - Always make sure transport is not closing before reuse a connection.
+
+ Reuse a protocol based on keepalive in headers is unreliable.
+ For example, uWSGI will not support keepalive even it serves a
+ HTTP 1.1 request, except explicitly configure uWSGI with a
+ ``--http-keepalive`` option.
+
+ Servers designed like uWSGI could cause aiohttp intermittently
+ raise a ConnectionResetException when the protocol poll runs
+ out and some protocol is reused.
+ `#4587 <https://github.com/aio-libs/aiohttp/issues/4587>`_
+ - Handle the last CRLF correctly even if it is received via separate TCP segment.
+ `#4630 <https://github.com/aio-libs/aiohttp/issues/4630>`_
+ - Fix the register_resource function to validate route name before splitting it so that route name can include python keywords.
+ `#4691 <https://github.com/aio-libs/aiohttp/issues/4691>`_
+ - Improve typing annotations for ``web.Request``, ``aiohttp.ClientResponse`` and
+ ``multipart`` module.
+ `#4736 <https://github.com/aio-libs/aiohttp/issues/4736>`_
+ - Fix resolver task is not awaited when connector is cancelled
+ `#4795 <https://github.com/aio-libs/aiohttp/issues/4795>`_
+ - Fix a bug "Aiohttp doesn't return any error on invalid request methods"
+ `#4798 <https://github.com/aio-libs/aiohttp/issues/4798>`_
+ - Fix HEAD requests for static content.
+ `#4809 <https://github.com/aio-libs/aiohttp/issues/4809>`_
+ - Fix incorrect size calculation for memoryview
+ `#4890 <https://github.com/aio-libs/aiohttp/issues/4890>`_
+ - Add HTTPMove to _all__.
+ `#4897 <https://github.com/aio-libs/aiohttp/issues/4897>`_
+ - Fixed the type annotations in the ``tracing`` module.
+ `#4912 <https://github.com/aio-libs/aiohttp/issues/4912>`_
+ - Fix typing for multipart ``__aiter__``.
+ `#4931 <https://github.com/aio-libs/aiohttp/issues/4931>`_
+ - Fix for race condition on connections in BaseConnector that leads to exceeding the connection limit.
+ `#4936 <https://github.com/aio-libs/aiohttp/issues/4936>`_
+ - Add forced UTF-8 encoding for ``application/rdap+json`` responses.
+ `#4938 <https://github.com/aio-libs/aiohttp/issues/4938>`_
+ - Fix inconsistency between Python and C http request parsers in parsing pct-encoded URL.
+ `#4972 <https://github.com/aio-libs/aiohttp/issues/4972>`_
+ - Fix connection closing issue in HEAD request.
+ `#5012 <https://github.com/aio-libs/aiohttp/issues/5012>`_
+ - Fix type hint on BaseRunner.addresses (from ``List[str]`` to ``List[Any]``)
+ `#5086 <https://github.com/aio-libs/aiohttp/issues/5086>`_
+ - Make `web.run_app()` more responsive to Ctrl+C on Windows for Python < 3.8. It slightly
+ increases CPU load as a side effect.
+ `#5098 <https://github.com/aio-libs/aiohttp/issues/5098>`_
+
+
+ Improved Documentation
+ ----------------------
+
+ - Fix example code in client quick-start
+ `#3376 <https://github.com/aio-libs/aiohttp/issues/3376>`_
+ - Updated the docs so there is no contradiction in ``ttl_dns_cache`` default value
+ `#3512 <https://github.com/aio-libs/aiohttp/issues/3512>`_
+ - Add 'Deploy with SSL' to docs.
+ `#4201 <https://github.com/aio-libs/aiohttp/issues/4201>`_
+ - Change typing of the secure argument on StreamResponse.set_cookie from ``Optional[str]`` to ``Optional[bool]``
+ `#4204 <https://github.com/aio-libs/aiohttp/issues/4204>`_
+ - Changes ``ttl_dns_cache`` type from int to Optional[int].
+ `#4270 <https://github.com/aio-libs/aiohttp/issues/4270>`_
+ - Simplify README hello word example and add a documentation page for people coming from requests.
+ `#4272 <https://github.com/aio-libs/aiohttp/issues/4272>`_
+ - Improve some code examples in the documentation involving websockets and starting a simple HTTP site with an AppRunner.
+ `#4285 <https://github.com/aio-libs/aiohttp/issues/4285>`_
+ - Fix typo in code example in Multipart docs
+ `#4312 <https://github.com/aio-libs/aiohttp/issues/4312>`_
+ - Fix code example in Multipart section.
+ `#4314 <https://github.com/aio-libs/aiohttp/issues/4314>`_
+ - Update contributing guide so new contributors read the most recent version of that guide. Update command used to create test coverage reporting.
+ `#4810 <https://github.com/aio-libs/aiohttp/issues/4810>`_
+ - Spelling: Change "canonize" to "canonicalize".
+ `#4986 <https://github.com/aio-libs/aiohttp/issues/4986>`_
+ - Add ``aiohttp-sse-client`` library to third party usage list.
+ `#5084 <https://github.com/aio-libs/aiohttp/issues/5084>`_
+
+
+ Misc
+ ----
+
+ - `#2856 <https://github.com/aio-libs/aiohttp/issues/2856>`_, `#4218 <https://github.com/aio-libs/aiohttp/issues/4218>`_, `#4250 <https://github.com/aio-libs/aiohttp/issues/4250>`_
+
+
+ ----
+
+
+ 3.6.3 (2020-10-12)
+ ==================
+
+ Bugfixes
+ --------
+
+ - Pin yarl to ``<1.6.0`` to avoid buggy behavior that will be fixed by the next aiohttp
+ release.
+
+ 3.6.2 (2019-10-09)
+ ==================
+
+ Features
+ --------
+
+ - Made exceptions pickleable. Also changed the repr of some exceptions.
+ `#4077 <https://github.com/aio-libs/aiohttp/issues/4077>`_
+ - Use ``Iterable`` type hint instead of ``Sequence`` for ``Application`` *middleware*
+ parameter. `#4125 <https://github.com/aio-libs/aiohttp/issues/4125>`_
+
+
+ Bugfixes
+ --------
+
+ - Reset the ``sock_read`` timeout each time data is received for a
+ ``aiohttp.ClientResponse``. `#3808
+ <https://github.com/aio-libs/aiohttp/issues/3808>`_
+ - Fix handling of expired cookies so they are not stored in CookieJar.
+ `#4063 <https://github.com/aio-libs/aiohttp/issues/4063>`_
+ - Fix misleading message in the string representation of ``ClientConnectorError``;
+ ``self.ssl == None`` means default SSL context, not SSL disabled `#4097
+ <https://github.com/aio-libs/aiohttp/issues/4097>`_
+ - Don't clobber HTTP status when using FileResponse.
+ `#4106 <https://github.com/aio-libs/aiohttp/issues/4106>`_
+
+
+ Improved Documentation
+ ----------------------
+
+ - Added minimal required logging configuration to logging documentation.
+ `#2469 <https://github.com/aio-libs/aiohttp/issues/2469>`_
+ - Update docs to reflect proxy support.
+ `#4100 <https://github.com/aio-libs/aiohttp/issues/4100>`_
+ - Fix typo in code example in testing docs.
+ `#4108 <https://github.com/aio-libs/aiohttp/issues/4108>`_
+
+
+ Misc
+ ----
+
+ - `#4102 <https://github.com/aio-libs/aiohttp/issues/4102>`_
+
+
+ ----
+
+
+ 3.6.1 (2019-09-19)
+ ==================
+
+ Features
+ --------
+
+ - Compatibility with Python 3.8.
+ `#4056 <https://github.com/aio-libs/aiohttp/issues/4056>`_
+
+
+ Bugfixes
+ --------
+
+ - correct some exception string format
+ `#4068 <https://github.com/aio-libs/aiohttp/issues/4068>`_
+ - Emit a warning when ``ssl.OP_NO_COMPRESSION`` is
+ unavailable because the runtime is built against
+ an outdated OpenSSL.
+ `#4052 <https://github.com/aio-libs/aiohttp/issues/4052>`_
+ - Update multidict requirement to >= 4.5
+ `#4057 <https://github.com/aio-libs/aiohttp/issues/4057>`_
+
+
+ Improved Documentation
+ ----------------------
+
+ - Provide pytest-aiohttp namespace for pytest fixtures in docs.
+ `#3723 <https://github.com/aio-libs/aiohttp/issues/3723>`_
+
+
+ ----
+
+
+ 3.6.0 (2019-09-06)
+ ==================
+
+ Features
+ --------
+
+ - Add support for Named Pipes (Site and Connector) under Windows. This feature requires
+ Proactor event loop to work. `#3629
+ <https://github.com/aio-libs/aiohttp/issues/3629>`_
+ - Removed ``Transfer-Encoding: chunked`` header from websocket responses to be
+ compatible with more http proxy servers. `#3798
+ <https://github.com/aio-libs/aiohttp/issues/3798>`_
+ - Accept non-GET request for starting websocket handshake on server side.
+ `#3980 <https://github.com/aio-libs/aiohttp/issues/3980>`_
+
+
+ Bugfixes
+ --------
+
+ - Raise a ClientResponseError instead of an AssertionError for a blank
+ HTTP Reason Phrase.
+ `#3532 <https://github.com/aio-libs/aiohttp/issues/3532>`_
+ - Fix an issue where cookies would sometimes not be set during a redirect.
+ `#3576 <https://github.com/aio-libs/aiohttp/issues/3576>`_
+ - Change normalize_path_middleware to use 308 redirect instead of 301.
+
+ This behavior should prevent clients from being unable to use PUT/POST
+ methods on endpoints that are redirected because of a trailing slash.
+ `#3579 <https://github.com/aio-libs/aiohttp/issues/3579>`_
+ - Drop the processed task from ``all_tasks()`` list early. It prevents logging about a
+ task with unhandled exception when the server is used in conjunction with
+ ``asyncio.run()``. `#3587 <https://github.com/aio-libs/aiohttp/issues/3587>`_
+ - ``Signal`` type annotation changed from ``Signal[Callable[['TraceConfig'],
+ Awaitable[None]]]`` to ``Signal[Callable[ClientSession, SimpleNamespace, ...]``.
+ `#3595 <https://github.com/aio-libs/aiohttp/issues/3595>`_
+ - Use sanitized URL as Location header in redirects
+ `#3614 <https://github.com/aio-libs/aiohttp/issues/3614>`_
+ - Improve typing annotations for multipart.py along with changes required
+ by mypy in files that references multipart.py.
+ `#3621 <https://github.com/aio-libs/aiohttp/issues/3621>`_
+ - Close session created inside ``aiohttp.request`` when unhandled exception occurs
+ `#3628 <https://github.com/aio-libs/aiohttp/issues/3628>`_
+ - Cleanup per-chunk data in generic data read. Memory leak fixed.
+ `#3631 <https://github.com/aio-libs/aiohttp/issues/3631>`_
+ - Use correct type for add_view and family
+ `#3633 <https://github.com/aio-libs/aiohttp/issues/3633>`_
+ - Fix _keepalive field in __slots__ of ``RequestHandler``.
+ `#3644 <https://github.com/aio-libs/aiohttp/issues/3644>`_
+ - Properly handle ConnectionResetError, to silence the "Cannot write to closing
+ transport" exception when clients disconnect uncleanly.
+ `#3648 <https://github.com/aio-libs/aiohttp/issues/3648>`_
+ - Suppress pytest warnings due to ``test_utils`` classes
+ `#3660 <https://github.com/aio-libs/aiohttp/issues/3660>`_
+ - Fix overshadowing of overlapped sub-application prefixes.
+ `#3701 <https://github.com/aio-libs/aiohttp/issues/3701>`_
+ - Fixed return type annotation for WSMessage.json()
+ `#3720 <https://github.com/aio-libs/aiohttp/issues/3720>`_
+ - Properly expose TooManyRedirects publicly as documented.
+ `#3818 <https://github.com/aio-libs/aiohttp/issues/3818>`_
+ - Fix missing brackets for IPv6 in proxy CONNECT request
+ `#3841 <https://github.com/aio-libs/aiohttp/issues/3841>`_
+ - Make the signature of ``aiohttp.test_utils.TestClient.request`` match
+ ``asyncio.ClientSession.request`` according to the docs `#3852
+ <https://github.com/aio-libs/aiohttp/issues/3852>`_
+ - Use correct style for re-exported imports, makes mypy ``--strict`` mode happy.
+ `#3868 <https://github.com/aio-libs/aiohttp/issues/3868>`_
+ - Fixed type annotation for add_view method of UrlDispatcher to accept any subclass of
+ View `#3880 <https://github.com/aio-libs/aiohttp/issues/3880>`_
+ - Made cython HTTP parser set Reason-Phrase of the response to an empty string if it is
+ missing. `#3906 <https://github.com/aio-libs/aiohttp/issues/3906>`_
+ - Add URL to the string representation of ClientResponseError.
+ `#3959 <https://github.com/aio-libs/aiohttp/issues/3959>`_
+ - Accept ``istr`` keys in ``LooseHeaders`` type hints.
+ `#3976 <https://github.com/aio-libs/aiohttp/issues/3976>`_
+ - Fixed race conditions in _resolve_host caching and throttling when tracing is enabled.
+ `#4013 <https://github.com/aio-libs/aiohttp/issues/4013>`_
+ - For URLs like "unix://localhost/..." set Host HTTP header to "localhost" instead of
+ "localhost:None". `#4039 <https://github.com/aio-libs/aiohttp/issues/4039>`_
+
+
+ Improved Documentation
+ ----------------------
+
+ - Modify documentation for Background Tasks to remove deprecated usage of event loop.
+ `#3526 <https://github.com/aio-libs/aiohttp/issues/3526>`_
+ - use ``if __name__ == '__main__':`` in server examples.
+ `#3775 <https://github.com/aio-libs/aiohttp/issues/3775>`_
+ - Update documentation reference to the default access logger.
+ `#3783 <https://github.com/aio-libs/aiohttp/issues/3783>`_
+ - Improve documentation for ``web.BaseRequest.path`` and ``web.BaseRequest.raw_path``.
+ `#3791 <https://github.com/aio-libs/aiohttp/issues/3791>`_
+ - Removed deprecation warning in tracing example docs
+ `#3964 <https://github.com/aio-libs/aiohttp/issues/3964>`_
+
+
+ ----
+
+
+ 3.5.4 (2019-01-12)
+ ==================
+
+ Bugfixes
+ --------
+
+ - Fix stream ``.read()`` / ``.readany()`` / ``.iter_any()`` which used to return a
+ partial content only in case of compressed content
+ `#3525 <https://github.com/aio-libs/aiohttp/issues/3525>`_
+
+
+ 3.5.3 (2019-01-10)
+ ==================
+
+ Bugfixes
+ --------
+
+ - Fix type stubs for ``aiohttp.web.run_app(access_log=True)`` and fix edge case of
+ ``access_log=True`` and the event loop being in debug mode. `#3504
+ <https://github.com/aio-libs/aiohttp/issues/3504>`_
+ - Fix ``aiohttp.ClientTimeout`` type annotations to accept ``None`` for fields
+ `#3511 <https://github.com/aio-libs/aiohttp/issues/3511>`_
+ - Send custom per-request cookies even if session jar is empty
+ `#3515 <https://github.com/aio-libs/aiohttp/issues/3515>`_
+ - Restore Linux binary wheels publishing on PyPI
+
+ ----
+
+
+ 3.5.2 (2019-01-08)
+ ==================
+
+ Features
+ --------
+
+ - ``FileResponse`` from ``web_fileresponse.py`` uses a ``ThreadPoolExecutor`` to work
+ with files asynchronously. I/O based payloads from ``payload.py`` uses a
+ ``ThreadPoolExecutor`` to work with I/O objects asynchronously. `#3313
+ <https://github.com/aio-libs/aiohttp/issues/3313>`_
+ - Internal Server Errors in plain text if the browser does not support HTML.
+ `#3483 <https://github.com/aio-libs/aiohttp/issues/3483>`_
+
+
+ Bugfixes
+ --------
+
+ - Preserve MultipartWriter parts headers on write. Refactor the way how
+ ``Payload.headers`` are handled. Payload instances now always have headers and
+ Content-Type defined. Fix Payload Content-Disposition header reset after initial
+ creation. `#3035 <https://github.com/aio-libs/aiohttp/issues/3035>`_
+ - Log suppressed exceptions in ``GunicornWebWorker``.
+ `#3464 <https://github.com/aio-libs/aiohttp/issues/3464>`_
+ - Remove wildcard imports.
+ `#3468 <https://github.com/aio-libs/aiohttp/issues/3468>`_
+ - Use the same task for app initialization and web server handling in gunicorn workers.
+ It allows to use Python3.7 context vars smoothly.
+ `#3471 <https://github.com/aio-libs/aiohttp/issues/3471>`_
+ - Fix handling of chunked+gzipped response when first chunk does not give uncompressed
+ data `#3477 <https://github.com/aio-libs/aiohttp/issues/3477>`_
+ - Replace ``collections.MutableMapping`` with ``collections.abc.MutableMapping`` to
+ avoid a deprecation warning. `#3480
+ <https://github.com/aio-libs/aiohttp/issues/3480>`_
+ - ``Payload.size`` type annotation changed from ``Optional[float]`` to
+ ``Optional[int]``. `#3484 <https://github.com/aio-libs/aiohttp/issues/3484>`_
+ - Ignore done tasks when cancels pending activities on ``web.run_app`` finalization.
+ `#3497 <https://github.com/aio-libs/aiohttp/issues/3497>`_
+
+
+ Improved Documentation
+ ----------------------
+
+ - Add documentation for ``aiohttp.web.HTTPException``.
+ `#3490 <https://github.com/aio-libs/aiohttp/issues/3490>`_
+
+
+ Misc
+ ----
+
+ - `#3487 <https://github.com/aio-libs/aiohttp/issues/3487>`_
+
+
+ ----
+
+
+ 3.5.1 (2018-12-24)
+ ====================
+
+ - Fix a regression about ``ClientSession._requote_redirect_url`` modification in debug
+ mode.
+
+ 3.5.0 (2018-12-22)
+ ====================
+
+ Features
+ --------
+
+ - The library type annotations are checked in strict mode now.
+ - Add support for setting cookies for individual request (`#2387
+ <https://github.com/aio-libs/aiohttp/pull/2387>`_)
+ - Application.add_domain implementation (`#2809
+ <https://github.com/aio-libs/aiohttp/pull/2809>`_)
+ - The default ``app`` in the request returned by ``test_utils.make_mocked_request`` can
+ now have objects assigned to it and retrieved using the ``[]`` operator. (`#3174
+ <https://github.com/aio-libs/aiohttp/pull/3174>`_)
+ - Make ``request.url`` accessible when transport is closed. (`#3177
+ <https://github.com/aio-libs/aiohttp/pull/3177>`_)
+ - Add ``zlib_executor_size`` argument to ``Response`` constructor to allow compression
+ to run in a background executor to avoid blocking the main thread and potentially
+ triggering health check failures. (`#3205
+ <https://github.com/aio-libs/aiohttp/pull/3205>`_)
+ - Enable users to set ``ClientTimeout`` in ``aiohttp.request`` (`#3213
+ <https://github.com/aio-libs/aiohttp/pull/3213>`_)
+ - Don't raise a warning if ``NETRC`` environment variable is not set and ``~/.netrc``
+ file doesn't exist. (`#3267 <https://github.com/aio-libs/aiohttp/pull/3267>`_)
+ - Add default logging handler to web.run_app If the ``Application.debug``` flag is set
+ and the default logger ``aiohttp.access`` is used, access logs will now be output
+ using a *stderr* ``StreamHandler`` if no handlers are attached. Furthermore, if the
+ default logger has no log level set, the log level will be set to ``DEBUG``. (`#3324
+ <https://github.com/aio-libs/aiohttp/pull/3324>`_)
+ - Add method argument to ``session.ws_connect()``. Sometimes server API requires a
+ different HTTP method for WebSocket connection establishment. For example, ``Docker
+ exec`` needs POST. (`#3378 <https://github.com/aio-libs/aiohttp/pull/3378>`_)
+ - Create a task per request handling. (`#3406
+ <https://github.com/aio-libs/aiohttp/pull/3406>`_)
+
+
+ Bugfixes
+ --------
+
+ - Enable passing ``access_log_class`` via ``handler_args`` (`#3158
+ <https://github.com/aio-libs/aiohttp/pull/3158>`_)
+ - Return empty bytes with end-of-chunk marker in empty stream reader. (`#3186
+ <https://github.com/aio-libs/aiohttp/pull/3186>`_)
+ - Accept ``CIMultiDictProxy`` instances for ``headers`` argument in ``web.Response``
+ constructor. (`#3207 <https://github.com/aio-libs/aiohttp/pull/3207>`_)
+ - Don't uppercase HTTP method in parser (`#3233
+ <https://github.com/aio-libs/aiohttp/pull/3233>`_)
+ - Make method match regexp RFC-7230 compliant (`#3235
+ <https://github.com/aio-libs/aiohttp/pull/3235>`_)
+ - Add ``app.pre_frozen`` state to properly handle startup signals in
+ sub-applications. (`#3237 <https://github.com/aio-libs/aiohttp/pull/3237>`_)
+ - Enhanced parsing and validation of helpers.BasicAuth.decode. (`#3239
+ <https://github.com/aio-libs/aiohttp/pull/3239>`_)
+ - Change imports from collections module in preparation for 3.8. (`#3258
+ <https://github.com/aio-libs/aiohttp/pull/3258>`_)
+ - Ensure Host header is added first to ClientRequest to better replicate browser (`#3265
+ <https://github.com/aio-libs/aiohttp/pull/3265>`_)
+ - Fix forward compatibility with Python 3.8: importing ABCs directly from the
+ collections module will not be supported anymore. (`#3273
+ <https://github.com/aio-libs/aiohttp/pull/3273>`_)
+ - Keep the query string by ``normalize_path_middleware``. (`#3278
+ <https://github.com/aio-libs/aiohttp/pull/3278>`_)
+ - Fix missing parameter ``raise_for_status`` for aiohttp.request() (`#3290
+ <https://github.com/aio-libs/aiohttp/pull/3290>`_)
+ - Bracket IPv6 addresses in the HOST header (`#3304
+ <https://github.com/aio-libs/aiohttp/pull/3304>`_)
+ - Fix default message for server ping and pong frames. (`#3308
+ <https://github.com/aio-libs/aiohttp/pull/3308>`_)
+ - Fix tests/test_connector.py typo and tests/autobahn/server.py duplicate loop
+ def. (`#3337 <https://github.com/aio-libs/aiohttp/pull/3337>`_)
+ - Fix false-negative indicator end_of_HTTP_chunk in StreamReader.readchunk function
+ (`#3361 <https://github.com/aio-libs/aiohttp/pull/3361>`_)
+ - Release HTTP response before raising status exception (`#3364
+ <https://github.com/aio-libs/aiohttp/pull/3364>`_)
+ - Fix task cancellation when ``sendfile()`` syscall is used by static file
+ handling. (`#3383 <https://github.com/aio-libs/aiohttp/pull/3383>`_)
+ - Fix stack trace for ``asyncio.TimeoutError`` which was not logged, when it is caught
+ in the handler. (`#3414 <https://github.com/aio-libs/aiohttp/pull/3414>`_)
+
+
+ Improved Documentation
+ ----------------------
+
+ - Improve documentation of ``Application.make_handler`` parameters. (`#3152
+ <https://github.com/aio-libs/aiohttp/pull/3152>`_)
+ - Fix BaseRequest.raw_headers doc. (`#3215
+ <https://github.com/aio-libs/aiohttp/pull/3215>`_)
+ - Fix typo in TypeError exception reason in ``web.Application._handle`` (`#3229
+ <https://github.com/aio-libs/aiohttp/pull/3229>`_)
+ - Make server access log format placeholder %b documentation reflect
+ behavior and docstring. (`#3307 <https://github.com/aio-libs/aiohttp/pull/3307>`_)
+
+
+ Deprecations and Removals
+ -------------------------
+
+ - Deprecate modification of ``session.requote_redirect_url`` (`#2278
+ <https://github.com/aio-libs/aiohttp/pull/2278>`_)
+ - Deprecate ``stream.unread_data()`` (`#3260
+ <https://github.com/aio-libs/aiohttp/pull/3260>`_)
+ - Deprecated use of boolean in ``resp.enable_compression()`` (`#3318
+ <https://github.com/aio-libs/aiohttp/pull/3318>`_)
+ - Encourage creation of aiohttp public objects inside a coroutine (`#3331
+ <https://github.com/aio-libs/aiohttp/pull/3331>`_)
+ - Drop dead ``Connection.detach()`` and ``Connection.writer``. Both methods were broken
+ for more than 2 years. (`#3358 <https://github.com/aio-libs/aiohttp/pull/3358>`_)
+ - Deprecate ``app.loop``, ``request.loop``, ``client.loop`` and ``connector.loop``
+ properties. (`#3374 <https://github.com/aio-libs/aiohttp/pull/3374>`_)
+ - Deprecate explicit debug argument. Use asyncio debug mode instead. (`#3381
+ <https://github.com/aio-libs/aiohttp/pull/3381>`_)
+ - Deprecate body parameter in HTTPException (and derived classes) constructor. (`#3385
+ <https://github.com/aio-libs/aiohttp/pull/3385>`_)
+ - Deprecate bare connector close, use ``async with connector:`` and ``await
+ connector.close()`` instead. (`#3417
+ <https://github.com/aio-libs/aiohttp/pull/3417>`_)
+ - Deprecate obsolete ``read_timeout`` and ``conn_timeout`` in ``ClientSession``
+ constructor. (`#3438 <https://github.com/aio-libs/aiohttp/pull/3438>`_)
+
+
+ Misc
+ ----
+
+ - #3341, #3351
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Intended Audience :: Developers
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Topic :: Internet :: WWW/HTTP
+Classifier: Framework :: AsyncIO
+Requires-Python: >=3.6
+Provides-Extra: speedups
diff --git a/third_party/python/aiohttp/README.rst b/third_party/python/aiohttp/README.rst
new file mode 100644
index 0000000000..338adbcae2
--- /dev/null
+++ b/third_party/python/aiohttp/README.rst
@@ -0,0 +1,204 @@
+==================================
+Async http client/server framework
+==================================
+
+.. image:: https://raw.githubusercontent.com/aio-libs/aiohttp/master/docs/_static/aiohttp-icon-128x128.png
+ :height: 64px
+ :width: 64px
+ :alt: aiohttp logo
+
+|
+
+.. image:: https://github.com/aio-libs/aiohttp/workflows/CI/badge.svg
+ :target: https://github.com/aio-libs/aiohttp/actions?query=workflow%3ACI
+ :alt: GitHub Actions status for master branch
+
+.. image:: https://codecov.io/gh/aio-libs/aiohttp/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/aio-libs/aiohttp
+ :alt: codecov.io status for master branch
+
+.. image:: https://badge.fury.io/py/aiohttp.svg
+ :target: https://pypi.org/project/aiohttp
+ :alt: Latest PyPI package version
+
+.. image:: https://readthedocs.org/projects/aiohttp/badge/?version=latest
+ :target: https://docs.aiohttp.org/
+ :alt: Latest Read The Docs
+
+.. image:: https://img.shields.io/discourse/status?server=https%3A%2F%2Faio-libs.discourse.group
+ :target: https://aio-libs.discourse.group
+ :alt: Discourse status
+
+.. image:: https://badges.gitter.im/Join%20Chat.svg
+ :target: https://gitter.im/aio-libs/Lobby
+ :alt: Chat on Gitter
+
+
+Key Features
+============
+
+- Supports both client and server side of HTTP protocol.
+- Supports both client and server Web-Sockets out-of-the-box and avoids
+ Callback Hell.
+- Provides Web-server with middlewares and plugable routing.
+
+
+Getting started
+===============
+
+Client
+------
+
+To get something from the web:
+
+.. code-block:: python
+
+ import aiohttp
+ import asyncio
+
+ async def main():
+
+ async with aiohttp.ClientSession() as session:
+ async with session.get('http://python.org') as response:
+
+ print("Status:", response.status)
+ print("Content-type:", response.headers['content-type'])
+
+ html = await response.text()
+ print("Body:", html[:15], "...")
+
+ loop = asyncio.get_event_loop()
+ loop.run_until_complete(main())
+
+This prints:
+
+.. code-block::
+
+ Status: 200
+ Content-type: text/html; charset=utf-8
+ Body: <!doctype html> ...
+
+Coming from `requests <https://requests.readthedocs.io/>`_ ? Read `why we need so many lines <https://aiohttp.readthedocs.io/en/latest/http_request_lifecycle.html>`_.
+
+Server
+------
+
+An example using a simple server:
+
+.. code-block:: python
+
+ # examples/server_simple.py
+ from aiohttp import web
+
+ async def handle(request):
+ name = request.match_info.get('name', "Anonymous")
+ text = "Hello, " + name
+ return web.Response(text=text)
+
+ async def wshandle(request):
+ ws = web.WebSocketResponse()
+ await ws.prepare(request)
+
+ async for msg in ws:
+ if msg.type == web.WSMsgType.text:
+ await ws.send_str("Hello, {}".format(msg.data))
+ elif msg.type == web.WSMsgType.binary:
+ await ws.send_bytes(msg.data)
+ elif msg.type == web.WSMsgType.close:
+ break
+
+ return ws
+
+
+ app = web.Application()
+ app.add_routes([web.get('/', handle),
+ web.get('/echo', wshandle),
+ web.get('/{name}', handle)])
+
+ if __name__ == '__main__':
+ web.run_app(app)
+
+
+Documentation
+=============
+
+https://aiohttp.readthedocs.io/
+
+
+Demos
+=====
+
+https://github.com/aio-libs/aiohttp-demos
+
+
+External links
+==============
+
+* `Third party libraries
+ <http://aiohttp.readthedocs.io/en/latest/third_party.html>`_
+* `Built with aiohttp
+ <http://aiohttp.readthedocs.io/en/latest/built_with.html>`_
+* `Powered by aiohttp
+ <http://aiohttp.readthedocs.io/en/latest/powered_by.html>`_
+
+Feel free to make a Pull Request for adding your link to these pages!
+
+
+Communication channels
+======================
+
+*aio-libs discourse group*: https://aio-libs.discourse.group
+
+*gitter chat* https://gitter.im/aio-libs/Lobby
+
+We support `Stack Overflow
+<https://stackoverflow.com/questions/tagged/aiohttp>`_.
+Please add *aiohttp* tag to your question there.
+
+Requirements
+============
+
+- Python >= 3.6
+- async-timeout_
+- attrs_
+- chardet_
+- multidict_
+- yarl_
+
+Optionally you may install the cChardet_ and aiodns_ libraries (highly
+recommended for sake of speed).
+
+.. _chardet: https://pypi.python.org/pypi/chardet
+.. _aiodns: https://pypi.python.org/pypi/aiodns
+.. _attrs: https://github.com/python-attrs/attrs
+.. _multidict: https://pypi.python.org/pypi/multidict
+.. _yarl: https://pypi.python.org/pypi/yarl
+.. _async-timeout: https://pypi.python.org/pypi/async_timeout
+.. _cChardet: https://pypi.python.org/pypi/cchardet
+
+License
+=======
+
+``aiohttp`` is offered under the Apache 2 license.
+
+
+Keepsafe
+========
+
+The aiohttp community would like to thank Keepsafe
+(https://www.getkeepsafe.com) for its support in the early days of
+the project.
+
+
+Source code
+===========
+
+The latest developer version is available in a GitHub repository:
+https://github.com/aio-libs/aiohttp
+
+Benchmarks
+==========
+
+If you are interested in efficiency, the AsyncIO community maintains a
+list of benchmarks on the official wiki:
+https://github.com/python/asyncio/wiki/Benchmarks
diff --git a/third_party/python/aiohttp/aiohttp.egg-info/PKG-INFO b/third_party/python/aiohttp/aiohttp.egg-info/PKG-INFO
new file mode 100644
index 0000000000..a0c00158c7
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp.egg-info/PKG-INFO
@@ -0,0 +1,966 @@
+Metadata-Version: 2.1
+Name: aiohttp
+Version: 3.7.4.post0
+Summary: Async http client/server framework (asyncio)
+Home-page: https://github.com/aio-libs/aiohttp
+Author: Nikolay Kim
+Author-email: fafhrd91@gmail.com
+Maintainer: Nikolay Kim <fafhrd91@gmail.com>, Andrew Svetlov <andrew.svetlov@gmail.com>
+Maintainer-email: aio-libs@googlegroups.com
+License: Apache 2
+Project-URL: Chat: Gitter, https://gitter.im/aio-libs/Lobby
+Project-URL: CI: Azure Pipelines, https://dev.azure.com/aio-libs/aiohttp/_build
+Project-URL: Coverage: codecov, https://codecov.io/github/aio-libs/aiohttp
+Project-URL: Docs: RTD, https://docs.aiohttp.org
+Project-URL: GitHub: issues, https://github.com/aio-libs/aiohttp/issues
+Project-URL: GitHub: repo, https://github.com/aio-libs/aiohttp
+Description: ==================================
+ Async http client/server framework
+ ==================================
+
+ .. image:: https://raw.githubusercontent.com/aio-libs/aiohttp/master/docs/_static/aiohttp-icon-128x128.png
+ :height: 64px
+ :width: 64px
+ :alt: aiohttp logo
+
+ |
+
+ .. image:: https://github.com/aio-libs/aiohttp/workflows/CI/badge.svg
+ :target: https://github.com/aio-libs/aiohttp/actions?query=workflow%3ACI
+ :alt: GitHub Actions status for master branch
+
+ .. image:: https://codecov.io/gh/aio-libs/aiohttp/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/aio-libs/aiohttp
+ :alt: codecov.io status for master branch
+
+ .. image:: https://badge.fury.io/py/aiohttp.svg
+ :target: https://pypi.org/project/aiohttp
+ :alt: Latest PyPI package version
+
+ .. image:: https://readthedocs.org/projects/aiohttp/badge/?version=latest
+ :target: https://docs.aiohttp.org/
+ :alt: Latest Read The Docs
+
+ .. image:: https://img.shields.io/discourse/status?server=https%3A%2F%2Faio-libs.discourse.group
+ :target: https://aio-libs.discourse.group
+ :alt: Discourse status
+
+ .. image:: https://badges.gitter.im/Join%20Chat.svg
+ :target: https://gitter.im/aio-libs/Lobby
+ :alt: Chat on Gitter
+
+
+ Key Features
+ ============
+
+ - Supports both client and server side of HTTP protocol.
+ - Supports both client and server Web-Sockets out-of-the-box and avoids
+ Callback Hell.
+ - Provides Web-server with middlewares and plugable routing.
+
+
+ Getting started
+ ===============
+
+ Client
+ ------
+
+ To get something from the web:
+
+ .. code-block:: python
+
+ import aiohttp
+ import asyncio
+
+ async def main():
+
+ async with aiohttp.ClientSession() as session:
+ async with session.get('http://python.org') as response:
+
+ print("Status:", response.status)
+ print("Content-type:", response.headers['content-type'])
+
+ html = await response.text()
+ print("Body:", html[:15], "...")
+
+ loop = asyncio.get_event_loop()
+ loop.run_until_complete(main())
+
+ This prints:
+
+ .. code-block::
+
+ Status: 200
+ Content-type: text/html; charset=utf-8
+ Body: <!doctype html> ...
+
+ Coming from `requests <https://requests.readthedocs.io/>`_ ? Read `why we need so many lines <https://aiohttp.readthedocs.io/en/latest/http_request_lifecycle.html>`_.
+
+ Server
+ ------
+
+ An example using a simple server:
+
+ .. code-block:: python
+
+ # examples/server_simple.py
+ from aiohttp import web
+
+ async def handle(request):
+ name = request.match_info.get('name', "Anonymous")
+ text = "Hello, " + name
+ return web.Response(text=text)
+
+ async def wshandle(request):
+ ws = web.WebSocketResponse()
+ await ws.prepare(request)
+
+ async for msg in ws:
+ if msg.type == web.WSMsgType.text:
+ await ws.send_str("Hello, {}".format(msg.data))
+ elif msg.type == web.WSMsgType.binary:
+ await ws.send_bytes(msg.data)
+ elif msg.type == web.WSMsgType.close:
+ break
+
+ return ws
+
+
+ app = web.Application()
+ app.add_routes([web.get('/', handle),
+ web.get('/echo', wshandle),
+ web.get('/{name}', handle)])
+
+ if __name__ == '__main__':
+ web.run_app(app)
+
+
+ Documentation
+ =============
+
+ https://aiohttp.readthedocs.io/
+
+
+ Demos
+ =====
+
+ https://github.com/aio-libs/aiohttp-demos
+
+
+ External links
+ ==============
+
+ * `Third party libraries
+ <http://aiohttp.readthedocs.io/en/latest/third_party.html>`_
+ * `Built with aiohttp
+ <http://aiohttp.readthedocs.io/en/latest/built_with.html>`_
+ * `Powered by aiohttp
+ <http://aiohttp.readthedocs.io/en/latest/powered_by.html>`_
+
+ Feel free to make a Pull Request for adding your link to these pages!
+
+
+ Communication channels
+ ======================
+
+ *aio-libs discourse group*: https://aio-libs.discourse.group
+
+ *gitter chat* https://gitter.im/aio-libs/Lobby
+
+ We support `Stack Overflow
+ <https://stackoverflow.com/questions/tagged/aiohttp>`_.
+ Please add *aiohttp* tag to your question there.
+
+ Requirements
+ ============
+
+ - Python >= 3.6
+ - async-timeout_
+ - attrs_
+ - chardet_
+ - multidict_
+ - yarl_
+
+ Optionally you may install the cChardet_ and aiodns_ libraries (highly
+ recommended for sake of speed).
+
+ .. _chardet: https://pypi.python.org/pypi/chardet
+ .. _aiodns: https://pypi.python.org/pypi/aiodns
+ .. _attrs: https://github.com/python-attrs/attrs
+ .. _multidict: https://pypi.python.org/pypi/multidict
+ .. _yarl: https://pypi.python.org/pypi/yarl
+ .. _async-timeout: https://pypi.python.org/pypi/async_timeout
+ .. _cChardet: https://pypi.python.org/pypi/cchardet
+
+ License
+ =======
+
+ ``aiohttp`` is offered under the Apache 2 license.
+
+
+ Keepsafe
+ ========
+
+ The aiohttp community would like to thank Keepsafe
+ (https://www.getkeepsafe.com) for its support in the early days of
+ the project.
+
+
+ Source code
+ ===========
+
+ The latest developer version is available in a GitHub repository:
+ https://github.com/aio-libs/aiohttp
+
+ Benchmarks
+ ==========
+
+ If you are interested in efficiency, the AsyncIO community maintains a
+ list of benchmarks on the official wiki:
+ https://github.com/python/asyncio/wiki/Benchmarks
+
+ =========
+ Changelog
+ =========
+
+ ..
+ You should *NOT* be adding new change log entries to this file, this
+ file is managed by towncrier. You *may* edit previous change logs to
+ fix problems like typo corrections or such.
+ To add a new change log entry, please see
+ https://pip.pypa.io/en/latest/development/#adding-a-news-entry
+ we named the news folder "changes".
+
+ WARNING: Don't drop the next directive!
+
+ .. towncrier release notes start
+
+ 3.7.4.post0 (2021-03-06)
+ ========================
+
+ Misc
+ ----
+
+ - Bumped upper bound of the ``chardet`` runtime dependency
+ to allow their v4.0 version stream.
+ `#5366 <https://github.com/aio-libs/aiohttp/issues/5366>`_
+
+
+ ----
+
+
+ 3.7.4 (2021-02-25)
+ ==================
+
+ Bugfixes
+ --------
+
+ - **(SECURITY BUG)** Started preventing open redirects in the
+ ``aiohttp.web.normalize_path_middleware`` middleware. For
+ more details, see
+ https://github.com/aio-libs/aiohttp/security/advisories/GHSA-v6wp-4m6f-gcjg.
+
+ Thanks to `Beast Glatisant <https://github.com/g147>`__ for
+ finding the first instance of this issue and `Jelmer Vernooij
+ <https://jelmer.uk/>`__ for reporting and tracking it down
+ in aiohttp.
+ `#5497 <https://github.com/aio-libs/aiohttp/issues/5497>`_
+ - Fix interpretation difference of the pure-Python and the Cython-based
+ HTTP parsers construct a ``yarl.URL`` object for HTTP request-target.
+
+ Before this fix, the Python parser would turn the URI's absolute-path
+ for ``//some-path`` into ``/`` while the Cython code preserved it as
+ ``//some-path``. Now, both do the latter.
+ `#5498 <https://github.com/aio-libs/aiohttp/issues/5498>`_
+
+
+ ----
+
+
+ 3.7.3 (2020-11-18)
+ ==================
+
+ Features
+ --------
+
+ - Use Brotli instead of brotlipy
+ `#3803 <https://github.com/aio-libs/aiohttp/issues/3803>`_
+ - Made exceptions pickleable. Also changed the repr of some exceptions.
+ `#4077 <https://github.com/aio-libs/aiohttp/issues/4077>`_
+
+
+ Bugfixes
+ --------
+
+ - Raise a ClientResponseError instead of an AssertionError for a blank
+ HTTP Reason Phrase.
+ `#3532 <https://github.com/aio-libs/aiohttp/issues/3532>`_
+ - Fix ``web_middlewares.normalize_path_middleware`` behavior for patch without slash.
+ `#3669 <https://github.com/aio-libs/aiohttp/issues/3669>`_
+ - Fix overshadowing of overlapped sub-applications prefixes.
+ `#3701 <https://github.com/aio-libs/aiohttp/issues/3701>`_
+ - Make `BaseConnector.close()` a coroutine and wait until the client closes all connections. Drop deprecated "with Connector():" syntax.
+ `#3736 <https://github.com/aio-libs/aiohttp/issues/3736>`_
+ - Reset the ``sock_read`` timeout each time data is received for a ``aiohttp.client`` response.
+ `#3808 <https://github.com/aio-libs/aiohttp/issues/3808>`_
+ - Fixed type annotation for add_view method of UrlDispatcher to accept any subclass of View
+ `#3880 <https://github.com/aio-libs/aiohttp/issues/3880>`_
+ - Fixed querying the address families from DNS that the current host supports.
+ `#5156 <https://github.com/aio-libs/aiohttp/issues/5156>`_
+ - Change return type of MultipartReader.__aiter__() and BodyPartReader.__aiter__() to AsyncIterator.
+ `#5163 <https://github.com/aio-libs/aiohttp/issues/5163>`_
+ - Provide x86 Windows wheels.
+ `#5230 <https://github.com/aio-libs/aiohttp/issues/5230>`_
+
+
+ Improved Documentation
+ ----------------------
+
+ - Add documentation for ``aiohttp.web.FileResponse``.
+ `#3958 <https://github.com/aio-libs/aiohttp/issues/3958>`_
+ - Removed deprecation warning in tracing example docs
+ `#3964 <https://github.com/aio-libs/aiohttp/issues/3964>`_
+ - Fixed wrong "Usage" docstring of ``aiohttp.client.request``.
+ `#4603 <https://github.com/aio-libs/aiohttp/issues/4603>`_
+ - Add aiohttp-pydantic to third party libraries
+ `#5228 <https://github.com/aio-libs/aiohttp/issues/5228>`_
+
+
+ Misc
+ ----
+
+ - `#4102 <https://github.com/aio-libs/aiohttp/issues/4102>`_
+
+
+ ----
+
+
+ 3.7.2 (2020-10-27)
+ ==================
+
+ Bugfixes
+ --------
+
+ - Fixed static files handling for loops without ``.sendfile()`` support
+ `#5149 <https://github.com/aio-libs/aiohttp/issues/5149>`_
+
+
+ ----
+
+
+ 3.7.1 (2020-10-25)
+ ==================
+
+ Bugfixes
+ --------
+
+ - Fixed a type error caused by the conditional import of `Protocol`.
+ `#5111 <https://github.com/aio-libs/aiohttp/issues/5111>`_
+ - Server doesn't send Content-Length for 1xx or 204
+ `#4901 <https://github.com/aio-libs/aiohttp/issues/4901>`_
+ - Fix run_app typing
+ `#4957 <https://github.com/aio-libs/aiohttp/issues/4957>`_
+ - Always require ``typing_extensions`` library.
+ `#5107 <https://github.com/aio-libs/aiohttp/issues/5107>`_
+ - Fix a variable-shadowing bug causing `ThreadedResolver.resolve` to
+ return the resolved IP as the ``hostname`` in each record, which prevented
+ validation of HTTPS connections.
+ `#5110 <https://github.com/aio-libs/aiohttp/issues/5110>`_
+ - Added annotations to all public attributes.
+ `#5115 <https://github.com/aio-libs/aiohttp/issues/5115>`_
+ - Fix flaky test_when_timeout_smaller_second
+ `#5116 <https://github.com/aio-libs/aiohttp/issues/5116>`_
+ - Ensure sending a zero byte file does not throw an exception
+ `#5124 <https://github.com/aio-libs/aiohttp/issues/5124>`_
+ - Fix a bug in ``web.run_app()`` about Python version checking on Windows
+ `#5127 <https://github.com/aio-libs/aiohttp/issues/5127>`_
+
+
+ ----
+
+
+ 3.7.0 (2020-10-24)
+ ==================
+
+ Features
+ --------
+
+ - Response headers are now prepared prior to running ``on_response_prepare`` hooks, directly before headers are sent to the client.
+ `#1958 <https://github.com/aio-libs/aiohttp/issues/1958>`_
+ - Add a ``quote_cookie`` option to ``CookieJar``, a way to skip quotation wrapping of cookies containing special characters.
+ `#2571 <https://github.com/aio-libs/aiohttp/issues/2571>`_
+ - Call ``AccessLogger.log`` with the current exception available from ``sys.exc_info()``.
+ `#3557 <https://github.com/aio-libs/aiohttp/issues/3557>`_
+ - `web.UrlDispatcher.add_routes` and `web.Application.add_routes` return a list
+ of registered `AbstractRoute` instances. `AbstractRouteDef.register` (and all
+ subclasses) return a list of registered resources registered resource.
+ `#3866 <https://github.com/aio-libs/aiohttp/issues/3866>`_
+ - Added properties of default ClientSession params to ClientSession class so it is available for introspection
+ `#3882 <https://github.com/aio-libs/aiohttp/issues/3882>`_
+ - Don't cancel web handler on peer disconnection, raise `OSError` on reading/writing instead.
+ `#4080 <https://github.com/aio-libs/aiohttp/issues/4080>`_
+ - Implement BaseRequest.get_extra_info() to access a protocol transports' extra info.
+ `#4189 <https://github.com/aio-libs/aiohttp/issues/4189>`_
+ - Added `ClientSession.timeout` property.
+ `#4191 <https://github.com/aio-libs/aiohttp/issues/4191>`_
+ - allow use of SameSite in cookies.
+ `#4224 <https://github.com/aio-libs/aiohttp/issues/4224>`_
+ - Use ``loop.sendfile()`` instead of custom implementation if available.
+ `#4269 <https://github.com/aio-libs/aiohttp/issues/4269>`_
+ - Apply SO_REUSEADDR to test server's socket.
+ `#4393 <https://github.com/aio-libs/aiohttp/issues/4393>`_
+ - Use .raw_host instead of slower .host in client API
+ `#4402 <https://github.com/aio-libs/aiohttp/issues/4402>`_
+ - Allow configuring the buffer size of input stream by passing ``read_bufsize`` argument.
+ `#4453 <https://github.com/aio-libs/aiohttp/issues/4453>`_
+ - Pass tests on Python 3.8 for Windows.
+ `#4513 <https://github.com/aio-libs/aiohttp/issues/4513>`_
+ - Add `method` and `url` attributes to `TraceRequestChunkSentParams` and `TraceResponseChunkReceivedParams`.
+ `#4674 <https://github.com/aio-libs/aiohttp/issues/4674>`_
+ - Add ClientResponse.ok property for checking status code under 400.
+ `#4711 <https://github.com/aio-libs/aiohttp/issues/4711>`_
+ - Don't ceil timeouts that are smaller than 5 seconds.
+ `#4850 <https://github.com/aio-libs/aiohttp/issues/4850>`_
+ - TCPSite now listens by default on all interfaces instead of just IPv4 when `None` is passed in as the host.
+ `#4894 <https://github.com/aio-libs/aiohttp/issues/4894>`_
+ - Bump ``http_parser`` to 2.9.4
+ `#5070 <https://github.com/aio-libs/aiohttp/issues/5070>`_
+
+
+ Bugfixes
+ --------
+
+ - Fix keepalive connections not being closed in time
+ `#3296 <https://github.com/aio-libs/aiohttp/issues/3296>`_
+ - Fix failed websocket handshake leaving connection hanging.
+ `#3380 <https://github.com/aio-libs/aiohttp/issues/3380>`_
+ - Fix tasks cancellation order on exit. The run_app task needs to be cancelled first for cleanup hooks to run with all tasks intact.
+ `#3805 <https://github.com/aio-libs/aiohttp/issues/3805>`_
+ - Don't start heartbeat until _writer is set
+ `#4062 <https://github.com/aio-libs/aiohttp/issues/4062>`_
+ - Fix handling of multipart file uploads without a content type.
+ `#4089 <https://github.com/aio-libs/aiohttp/issues/4089>`_
+ - Preserve view handler function attributes across middlewares
+ `#4174 <https://github.com/aio-libs/aiohttp/issues/4174>`_
+ - Fix the string representation of ``ServerDisconnectedError``.
+ `#4175 <https://github.com/aio-libs/aiohttp/issues/4175>`_
+ - Raising RuntimeError when trying to get encoding from not read body
+ `#4214 <https://github.com/aio-libs/aiohttp/issues/4214>`_
+ - Remove warning messages from noop.
+ `#4282 <https://github.com/aio-libs/aiohttp/issues/4282>`_
+ - Raise ClientPayloadError if FormData re-processed.
+ `#4345 <https://github.com/aio-libs/aiohttp/issues/4345>`_
+ - Fix a warning about unfinished task in ``web_protocol.py``
+ `#4408 <https://github.com/aio-libs/aiohttp/issues/4408>`_
+ - Fixed 'deflate' compression. According to RFC 2616 now.
+ `#4506 <https://github.com/aio-libs/aiohttp/issues/4506>`_
+ - Fixed OverflowError on platforms with 32-bit time_t
+ `#4515 <https://github.com/aio-libs/aiohttp/issues/4515>`_
+ - Fixed request.body_exists returns wrong value for methods without body.
+ `#4528 <https://github.com/aio-libs/aiohttp/issues/4528>`_
+ - Fix connecting to link-local IPv6 addresses.
+ `#4554 <https://github.com/aio-libs/aiohttp/issues/4554>`_
+ - Fix a problem with connection waiters that are never awaited.
+ `#4562 <https://github.com/aio-libs/aiohttp/issues/4562>`_
+ - Always make sure transport is not closing before reuse a connection.
+
+ Reuse a protocol based on keepalive in headers is unreliable.
+ For example, uWSGI will not support keepalive even it serves a
+ HTTP 1.1 request, except explicitly configure uWSGI with a
+ ``--http-keepalive`` option.
+
+ Servers designed like uWSGI could cause aiohttp intermittently
+ raise a ConnectionResetException when the protocol poll runs
+ out and some protocol is reused.
+ `#4587 <https://github.com/aio-libs/aiohttp/issues/4587>`_
+ - Handle the last CRLF correctly even if it is received via separate TCP segment.
+ `#4630 <https://github.com/aio-libs/aiohttp/issues/4630>`_
+ - Fix the register_resource function to validate route name before splitting it so that route name can include python keywords.
+ `#4691 <https://github.com/aio-libs/aiohttp/issues/4691>`_
+ - Improve typing annotations for ``web.Request``, ``aiohttp.ClientResponse`` and
+ ``multipart`` module.
+ `#4736 <https://github.com/aio-libs/aiohttp/issues/4736>`_
+ - Fix resolver task is not awaited when connector is cancelled
+ `#4795 <https://github.com/aio-libs/aiohttp/issues/4795>`_
+ - Fix a bug "Aiohttp doesn't return any error on invalid request methods"
+ `#4798 <https://github.com/aio-libs/aiohttp/issues/4798>`_
+ - Fix HEAD requests for static content.
+ `#4809 <https://github.com/aio-libs/aiohttp/issues/4809>`_
+ - Fix incorrect size calculation for memoryview
+ `#4890 <https://github.com/aio-libs/aiohttp/issues/4890>`_
+ - Add HTTPMove to _all__.
+ `#4897 <https://github.com/aio-libs/aiohttp/issues/4897>`_
+ - Fixed the type annotations in the ``tracing`` module.
+ `#4912 <https://github.com/aio-libs/aiohttp/issues/4912>`_
+ - Fix typing for multipart ``__aiter__``.
+ `#4931 <https://github.com/aio-libs/aiohttp/issues/4931>`_
+ - Fix for race condition on connections in BaseConnector that leads to exceeding the connection limit.
+ `#4936 <https://github.com/aio-libs/aiohttp/issues/4936>`_
+ - Add forced UTF-8 encoding for ``application/rdap+json`` responses.
+ `#4938 <https://github.com/aio-libs/aiohttp/issues/4938>`_
+ - Fix inconsistency between Python and C http request parsers in parsing pct-encoded URL.
+ `#4972 <https://github.com/aio-libs/aiohttp/issues/4972>`_
+ - Fix connection closing issue in HEAD request.
+ `#5012 <https://github.com/aio-libs/aiohttp/issues/5012>`_
+ - Fix type hint on BaseRunner.addresses (from ``List[str]`` to ``List[Any]``)
+ `#5086 <https://github.com/aio-libs/aiohttp/issues/5086>`_
+ - Make `web.run_app()` more responsive to Ctrl+C on Windows for Python < 3.8. It slightly
+ increases CPU load as a side effect.
+ `#5098 <https://github.com/aio-libs/aiohttp/issues/5098>`_
+
+
+ Improved Documentation
+ ----------------------
+
+ - Fix example code in client quick-start
+ `#3376 <https://github.com/aio-libs/aiohttp/issues/3376>`_
+ - Updated the docs so there is no contradiction in ``ttl_dns_cache`` default value
+ `#3512 <https://github.com/aio-libs/aiohttp/issues/3512>`_
+ - Add 'Deploy with SSL' to docs.
+ `#4201 <https://github.com/aio-libs/aiohttp/issues/4201>`_
+ - Change typing of the secure argument on StreamResponse.set_cookie from ``Optional[str]`` to ``Optional[bool]``
+ `#4204 <https://github.com/aio-libs/aiohttp/issues/4204>`_
+ - Changes ``ttl_dns_cache`` type from int to Optional[int].
+ `#4270 <https://github.com/aio-libs/aiohttp/issues/4270>`_
+ - Simplify README hello word example and add a documentation page for people coming from requests.
+ `#4272 <https://github.com/aio-libs/aiohttp/issues/4272>`_
+ - Improve some code examples in the documentation involving websockets and starting a simple HTTP site with an AppRunner.
+ `#4285 <https://github.com/aio-libs/aiohttp/issues/4285>`_
+ - Fix typo in code example in Multipart docs
+ `#4312 <https://github.com/aio-libs/aiohttp/issues/4312>`_
+ - Fix code example in Multipart section.
+ `#4314 <https://github.com/aio-libs/aiohttp/issues/4314>`_
+ - Update contributing guide so new contributors read the most recent version of that guide. Update command used to create test coverage reporting.
+ `#4810 <https://github.com/aio-libs/aiohttp/issues/4810>`_
+ - Spelling: Change "canonize" to "canonicalize".
+ `#4986 <https://github.com/aio-libs/aiohttp/issues/4986>`_
+ - Add ``aiohttp-sse-client`` library to third party usage list.
+ `#5084 <https://github.com/aio-libs/aiohttp/issues/5084>`_
+
+
+ Misc
+ ----
+
+ - `#2856 <https://github.com/aio-libs/aiohttp/issues/2856>`_, `#4218 <https://github.com/aio-libs/aiohttp/issues/4218>`_, `#4250 <https://github.com/aio-libs/aiohttp/issues/4250>`_
+
+
+ ----
+
+
+ 3.6.3 (2020-10-12)
+ ==================
+
+ Bugfixes
+ --------
+
+ - Pin yarl to ``<1.6.0`` to avoid buggy behavior that will be fixed by the next aiohttp
+ release.
+
+ 3.6.2 (2019-10-09)
+ ==================
+
+ Features
+ --------
+
+ - Made exceptions pickleable. Also changed the repr of some exceptions.
+ `#4077 <https://github.com/aio-libs/aiohttp/issues/4077>`_
+ - Use ``Iterable`` type hint instead of ``Sequence`` for ``Application`` *middleware*
+ parameter. `#4125 <https://github.com/aio-libs/aiohttp/issues/4125>`_
+
+
+ Bugfixes
+ --------
+
+ - Reset the ``sock_read`` timeout each time data is received for a
+ ``aiohttp.ClientResponse``. `#3808
+ <https://github.com/aio-libs/aiohttp/issues/3808>`_
+ - Fix handling of expired cookies so they are not stored in CookieJar.
+ `#4063 <https://github.com/aio-libs/aiohttp/issues/4063>`_
+ - Fix misleading message in the string representation of ``ClientConnectorError``;
+ ``self.ssl == None`` means default SSL context, not SSL disabled `#4097
+ <https://github.com/aio-libs/aiohttp/issues/4097>`_
+ - Don't clobber HTTP status when using FileResponse.
+ `#4106 <https://github.com/aio-libs/aiohttp/issues/4106>`_
+
+
+ Improved Documentation
+ ----------------------
+
+ - Added minimal required logging configuration to logging documentation.
+ `#2469 <https://github.com/aio-libs/aiohttp/issues/2469>`_
+ - Update docs to reflect proxy support.
+ `#4100 <https://github.com/aio-libs/aiohttp/issues/4100>`_
+ - Fix typo in code example in testing docs.
+ `#4108 <https://github.com/aio-libs/aiohttp/issues/4108>`_
+
+
+ Misc
+ ----
+
+ - `#4102 <https://github.com/aio-libs/aiohttp/issues/4102>`_
+
+
+ ----
+
+
+ 3.6.1 (2019-09-19)
+ ==================
+
+ Features
+ --------
+
+ - Compatibility with Python 3.8.
+ `#4056 <https://github.com/aio-libs/aiohttp/issues/4056>`_
+
+
+ Bugfixes
+ --------
+
+ - correct some exception string format
+ `#4068 <https://github.com/aio-libs/aiohttp/issues/4068>`_
+ - Emit a warning when ``ssl.OP_NO_COMPRESSION`` is
+ unavailable because the runtime is built against
+ an outdated OpenSSL.
+ `#4052 <https://github.com/aio-libs/aiohttp/issues/4052>`_
+ - Update multidict requirement to >= 4.5
+ `#4057 <https://github.com/aio-libs/aiohttp/issues/4057>`_
+
+
+ Improved Documentation
+ ----------------------
+
+ - Provide pytest-aiohttp namespace for pytest fixtures in docs.
+ `#3723 <https://github.com/aio-libs/aiohttp/issues/3723>`_
+
+
+ ----
+
+
+ 3.6.0 (2019-09-06)
+ ==================
+
+ Features
+ --------
+
+ - Add support for Named Pipes (Site and Connector) under Windows. This feature requires
+ Proactor event loop to work. `#3629
+ <https://github.com/aio-libs/aiohttp/issues/3629>`_
+ - Removed ``Transfer-Encoding: chunked`` header from websocket responses to be
+ compatible with more http proxy servers. `#3798
+ <https://github.com/aio-libs/aiohttp/issues/3798>`_
+ - Accept non-GET request for starting websocket handshake on server side.
+ `#3980 <https://github.com/aio-libs/aiohttp/issues/3980>`_
+
+
+ Bugfixes
+ --------
+
+ - Raise a ClientResponseError instead of an AssertionError for a blank
+ HTTP Reason Phrase.
+ `#3532 <https://github.com/aio-libs/aiohttp/issues/3532>`_
+ - Fix an issue where cookies would sometimes not be set during a redirect.
+ `#3576 <https://github.com/aio-libs/aiohttp/issues/3576>`_
+ - Change normalize_path_middleware to use 308 redirect instead of 301.
+
+ This behavior should prevent clients from being unable to use PUT/POST
+ methods on endpoints that are redirected because of a trailing slash.
+ `#3579 <https://github.com/aio-libs/aiohttp/issues/3579>`_
+ - Drop the processed task from ``all_tasks()`` list early. It prevents logging about a
+ task with unhandled exception when the server is used in conjunction with
+ ``asyncio.run()``. `#3587 <https://github.com/aio-libs/aiohttp/issues/3587>`_
+ - ``Signal`` type annotation changed from ``Signal[Callable[['TraceConfig'],
+ Awaitable[None]]]`` to ``Signal[Callable[ClientSession, SimpleNamespace, ...]``.
+ `#3595 <https://github.com/aio-libs/aiohttp/issues/3595>`_
+ - Use sanitized URL as Location header in redirects
+ `#3614 <https://github.com/aio-libs/aiohttp/issues/3614>`_
+ - Improve typing annotations for multipart.py along with changes required
+ by mypy in files that references multipart.py.
+ `#3621 <https://github.com/aio-libs/aiohttp/issues/3621>`_
+ - Close session created inside ``aiohttp.request`` when unhandled exception occurs
+ `#3628 <https://github.com/aio-libs/aiohttp/issues/3628>`_
+ - Cleanup per-chunk data in generic data read. Memory leak fixed.
+ `#3631 <https://github.com/aio-libs/aiohttp/issues/3631>`_
+ - Use correct type for add_view and family
+ `#3633 <https://github.com/aio-libs/aiohttp/issues/3633>`_
+ - Fix _keepalive field in __slots__ of ``RequestHandler``.
+ `#3644 <https://github.com/aio-libs/aiohttp/issues/3644>`_
+ - Properly handle ConnectionResetError, to silence the "Cannot write to closing
+ transport" exception when clients disconnect uncleanly.
+ `#3648 <https://github.com/aio-libs/aiohttp/issues/3648>`_
+ - Suppress pytest warnings due to ``test_utils`` classes
+ `#3660 <https://github.com/aio-libs/aiohttp/issues/3660>`_
+ - Fix overshadowing of overlapped sub-application prefixes.
+ `#3701 <https://github.com/aio-libs/aiohttp/issues/3701>`_
+ - Fixed return type annotation for WSMessage.json()
+ `#3720 <https://github.com/aio-libs/aiohttp/issues/3720>`_
+ - Properly expose TooManyRedirects publicly as documented.
+ `#3818 <https://github.com/aio-libs/aiohttp/issues/3818>`_
+ - Fix missing brackets for IPv6 in proxy CONNECT request
+ `#3841 <https://github.com/aio-libs/aiohttp/issues/3841>`_
+ - Make the signature of ``aiohttp.test_utils.TestClient.request`` match
+ ``asyncio.ClientSession.request`` according to the docs `#3852
+ <https://github.com/aio-libs/aiohttp/issues/3852>`_
+ - Use correct style for re-exported imports, makes mypy ``--strict`` mode happy.
+ `#3868 <https://github.com/aio-libs/aiohttp/issues/3868>`_
+ - Fixed type annotation for add_view method of UrlDispatcher to accept any subclass of
+ View `#3880 <https://github.com/aio-libs/aiohttp/issues/3880>`_
+ - Made cython HTTP parser set Reason-Phrase of the response to an empty string if it is
+ missing. `#3906 <https://github.com/aio-libs/aiohttp/issues/3906>`_
+ - Add URL to the string representation of ClientResponseError.
+ `#3959 <https://github.com/aio-libs/aiohttp/issues/3959>`_
+ - Accept ``istr`` keys in ``LooseHeaders`` type hints.
+ `#3976 <https://github.com/aio-libs/aiohttp/issues/3976>`_
+ - Fixed race conditions in _resolve_host caching and throttling when tracing is enabled.
+ `#4013 <https://github.com/aio-libs/aiohttp/issues/4013>`_
+ - For URLs like "unix://localhost/..." set Host HTTP header to "localhost" instead of
+ "localhost:None". `#4039 <https://github.com/aio-libs/aiohttp/issues/4039>`_
+
+
+ Improved Documentation
+ ----------------------
+
+ - Modify documentation for Background Tasks to remove deprecated usage of event loop.
+ `#3526 <https://github.com/aio-libs/aiohttp/issues/3526>`_
+ - use ``if __name__ == '__main__':`` in server examples.
+ `#3775 <https://github.com/aio-libs/aiohttp/issues/3775>`_
+ - Update documentation reference to the default access logger.
+ `#3783 <https://github.com/aio-libs/aiohttp/issues/3783>`_
+ - Improve documentation for ``web.BaseRequest.path`` and ``web.BaseRequest.raw_path``.
+ `#3791 <https://github.com/aio-libs/aiohttp/issues/3791>`_
+ - Removed deprecation warning in tracing example docs
+ `#3964 <https://github.com/aio-libs/aiohttp/issues/3964>`_
+
+
+ ----
+
+
+ 3.5.4 (2019-01-12)
+ ==================
+
+ Bugfixes
+ --------
+
+ - Fix stream ``.read()`` / ``.readany()`` / ``.iter_any()`` which used to return a
+ partial content only in case of compressed content
+ `#3525 <https://github.com/aio-libs/aiohttp/issues/3525>`_
+
+
+ 3.5.3 (2019-01-10)
+ ==================
+
+ Bugfixes
+ --------
+
+ - Fix type stubs for ``aiohttp.web.run_app(access_log=True)`` and fix edge case of
+ ``access_log=True`` and the event loop being in debug mode. `#3504
+ <https://github.com/aio-libs/aiohttp/issues/3504>`_
+ - Fix ``aiohttp.ClientTimeout`` type annotations to accept ``None`` for fields
+ `#3511 <https://github.com/aio-libs/aiohttp/issues/3511>`_
+ - Send custom per-request cookies even if session jar is empty
+ `#3515 <https://github.com/aio-libs/aiohttp/issues/3515>`_
+ - Restore Linux binary wheels publishing on PyPI
+
+ ----
+
+
+ 3.5.2 (2019-01-08)
+ ==================
+
+ Features
+ --------
+
+ - ``FileResponse`` from ``web_fileresponse.py`` uses a ``ThreadPoolExecutor`` to work
+ with files asynchronously. I/O based payloads from ``payload.py`` uses a
+ ``ThreadPoolExecutor`` to work with I/O objects asynchronously. `#3313
+ <https://github.com/aio-libs/aiohttp/issues/3313>`_
+ - Internal Server Errors in plain text if the browser does not support HTML.
+ `#3483 <https://github.com/aio-libs/aiohttp/issues/3483>`_
+
+
+ Bugfixes
+ --------
+
+ - Preserve MultipartWriter parts headers on write. Refactor the way how
+ ``Payload.headers`` are handled. Payload instances now always have headers and
+ Content-Type defined. Fix Payload Content-Disposition header reset after initial
+ creation. `#3035 <https://github.com/aio-libs/aiohttp/issues/3035>`_
+ - Log suppressed exceptions in ``GunicornWebWorker``.
+ `#3464 <https://github.com/aio-libs/aiohttp/issues/3464>`_
+ - Remove wildcard imports.
+ `#3468 <https://github.com/aio-libs/aiohttp/issues/3468>`_
+ - Use the same task for app initialization and web server handling in gunicorn workers.
+ It allows to use Python3.7 context vars smoothly.
+ `#3471 <https://github.com/aio-libs/aiohttp/issues/3471>`_
+ - Fix handling of chunked+gzipped response when first chunk does not give uncompressed
+ data `#3477 <https://github.com/aio-libs/aiohttp/issues/3477>`_
+ - Replace ``collections.MutableMapping`` with ``collections.abc.MutableMapping`` to
+ avoid a deprecation warning. `#3480
+ <https://github.com/aio-libs/aiohttp/issues/3480>`_
+ - ``Payload.size`` type annotation changed from ``Optional[float]`` to
+ ``Optional[int]``. `#3484 <https://github.com/aio-libs/aiohttp/issues/3484>`_
+ - Ignore done tasks when cancels pending activities on ``web.run_app`` finalization.
+ `#3497 <https://github.com/aio-libs/aiohttp/issues/3497>`_
+
+
+ Improved Documentation
+ ----------------------
+
+ - Add documentation for ``aiohttp.web.HTTPException``.
+ `#3490 <https://github.com/aio-libs/aiohttp/issues/3490>`_
+
+
+ Misc
+ ----
+
+ - `#3487 <https://github.com/aio-libs/aiohttp/issues/3487>`_
+
+
+ ----
+
+
+ 3.5.1 (2018-12-24)
+ ====================
+
+ - Fix a regression about ``ClientSession._requote_redirect_url`` modification in debug
+ mode.
+
+ 3.5.0 (2018-12-22)
+ ====================
+
+ Features
+ --------
+
+ - The library type annotations are checked in strict mode now.
+ - Add support for setting cookies for individual request (`#2387
+ <https://github.com/aio-libs/aiohttp/pull/2387>`_)
+ - Application.add_domain implementation (`#2809
+ <https://github.com/aio-libs/aiohttp/pull/2809>`_)
+ - The default ``app`` in the request returned by ``test_utils.make_mocked_request`` can
+ now have objects assigned to it and retrieved using the ``[]`` operator. (`#3174
+ <https://github.com/aio-libs/aiohttp/pull/3174>`_)
+ - Make ``request.url`` accessible when transport is closed. (`#3177
+ <https://github.com/aio-libs/aiohttp/pull/3177>`_)
+ - Add ``zlib_executor_size`` argument to ``Response`` constructor to allow compression
+ to run in a background executor to avoid blocking the main thread and potentially
+ triggering health check failures. (`#3205
+ <https://github.com/aio-libs/aiohttp/pull/3205>`_)
+ - Enable users to set ``ClientTimeout`` in ``aiohttp.request`` (`#3213
+ <https://github.com/aio-libs/aiohttp/pull/3213>`_)
+ - Don't raise a warning if ``NETRC`` environment variable is not set and ``~/.netrc``
+ file doesn't exist. (`#3267 <https://github.com/aio-libs/aiohttp/pull/3267>`_)
+ - Add default logging handler to web.run_app If the ``Application.debug``` flag is set
+ and the default logger ``aiohttp.access`` is used, access logs will now be output
+ using a *stderr* ``StreamHandler`` if no handlers are attached. Furthermore, if the
+ default logger has no log level set, the log level will be set to ``DEBUG``. (`#3324
+ <https://github.com/aio-libs/aiohttp/pull/3324>`_)
+ - Add method argument to ``session.ws_connect()``. Sometimes server API requires a
+ different HTTP method for WebSocket connection establishment. For example, ``Docker
+ exec`` needs POST. (`#3378 <https://github.com/aio-libs/aiohttp/pull/3378>`_)
+ - Create a task per request handling. (`#3406
+ <https://github.com/aio-libs/aiohttp/pull/3406>`_)
+
+
+ Bugfixes
+ --------
+
+ - Enable passing ``access_log_class`` via ``handler_args`` (`#3158
+ <https://github.com/aio-libs/aiohttp/pull/3158>`_)
+ - Return empty bytes with end-of-chunk marker in empty stream reader. (`#3186
+ <https://github.com/aio-libs/aiohttp/pull/3186>`_)
+ - Accept ``CIMultiDictProxy`` instances for ``headers`` argument in ``web.Response``
+ constructor. (`#3207 <https://github.com/aio-libs/aiohttp/pull/3207>`_)
+ - Don't uppercase HTTP method in parser (`#3233
+ <https://github.com/aio-libs/aiohttp/pull/3233>`_)
+ - Make method match regexp RFC-7230 compliant (`#3235
+ <https://github.com/aio-libs/aiohttp/pull/3235>`_)
+ - Add ``app.pre_frozen`` state to properly handle startup signals in
+ sub-applications. (`#3237 <https://github.com/aio-libs/aiohttp/pull/3237>`_)
+ - Enhanced parsing and validation of helpers.BasicAuth.decode. (`#3239
+ <https://github.com/aio-libs/aiohttp/pull/3239>`_)
+ - Change imports from collections module in preparation for 3.8. (`#3258
+ <https://github.com/aio-libs/aiohttp/pull/3258>`_)
+ - Ensure Host header is added first to ClientRequest to better replicate browser (`#3265
+ <https://github.com/aio-libs/aiohttp/pull/3265>`_)
+ - Fix forward compatibility with Python 3.8: importing ABCs directly from the
+ collections module will not be supported anymore. (`#3273
+ <https://github.com/aio-libs/aiohttp/pull/3273>`_)
+ - Keep the query string by ``normalize_path_middleware``. (`#3278
+ <https://github.com/aio-libs/aiohttp/pull/3278>`_)
+ - Fix missing parameter ``raise_for_status`` for aiohttp.request() (`#3290
+ <https://github.com/aio-libs/aiohttp/pull/3290>`_)
+ - Bracket IPv6 addresses in the HOST header (`#3304
+ <https://github.com/aio-libs/aiohttp/pull/3304>`_)
+ - Fix default message for server ping and pong frames. (`#3308
+ <https://github.com/aio-libs/aiohttp/pull/3308>`_)
+ - Fix tests/test_connector.py typo and tests/autobahn/server.py duplicate loop
+ def. (`#3337 <https://github.com/aio-libs/aiohttp/pull/3337>`_)
+ - Fix false-negative indicator end_of_HTTP_chunk in StreamReader.readchunk function
+ (`#3361 <https://github.com/aio-libs/aiohttp/pull/3361>`_)
+ - Release HTTP response before raising status exception (`#3364
+ <https://github.com/aio-libs/aiohttp/pull/3364>`_)
+ - Fix task cancellation when ``sendfile()`` syscall is used by static file
+ handling. (`#3383 <https://github.com/aio-libs/aiohttp/pull/3383>`_)
+ - Fix stack trace for ``asyncio.TimeoutError`` which was not logged, when it is caught
+ in the handler. (`#3414 <https://github.com/aio-libs/aiohttp/pull/3414>`_)
+
+
+ Improved Documentation
+ ----------------------
+
+ - Improve documentation of ``Application.make_handler`` parameters. (`#3152
+ <https://github.com/aio-libs/aiohttp/pull/3152>`_)
+ - Fix BaseRequest.raw_headers doc. (`#3215
+ <https://github.com/aio-libs/aiohttp/pull/3215>`_)
+ - Fix typo in TypeError exception reason in ``web.Application._handle`` (`#3229
+ <https://github.com/aio-libs/aiohttp/pull/3229>`_)
+ - Make server access log format placeholder %b documentation reflect
+ behavior and docstring. (`#3307 <https://github.com/aio-libs/aiohttp/pull/3307>`_)
+
+
+ Deprecations and Removals
+ -------------------------
+
+ - Deprecate modification of ``session.requote_redirect_url`` (`#2278
+ <https://github.com/aio-libs/aiohttp/pull/2278>`_)
+ - Deprecate ``stream.unread_data()`` (`#3260
+ <https://github.com/aio-libs/aiohttp/pull/3260>`_)
+ - Deprecated use of boolean in ``resp.enable_compression()`` (`#3318
+ <https://github.com/aio-libs/aiohttp/pull/3318>`_)
+ - Encourage creation of aiohttp public objects inside a coroutine (`#3331
+ <https://github.com/aio-libs/aiohttp/pull/3331>`_)
+ - Drop dead ``Connection.detach()`` and ``Connection.writer``. Both methods were broken
+ for more than 2 years. (`#3358 <https://github.com/aio-libs/aiohttp/pull/3358>`_)
+ - Deprecate ``app.loop``, ``request.loop``, ``client.loop`` and ``connector.loop``
+ properties. (`#3374 <https://github.com/aio-libs/aiohttp/pull/3374>`_)
+ - Deprecate explicit debug argument. Use asyncio debug mode instead. (`#3381
+ <https://github.com/aio-libs/aiohttp/pull/3381>`_)
+ - Deprecate body parameter in HTTPException (and derived classes) constructor. (`#3385
+ <https://github.com/aio-libs/aiohttp/pull/3385>`_)
+ - Deprecate bare connector close, use ``async with connector:`` and ``await
+ connector.close()`` instead. (`#3417
+ <https://github.com/aio-libs/aiohttp/pull/3417>`_)
+ - Deprecate obsolete ``read_timeout`` and ``conn_timeout`` in ``ClientSession``
+ constructor. (`#3438 <https://github.com/aio-libs/aiohttp/pull/3438>`_)
+
+
+ Misc
+ ----
+
+ - #3341, #3351
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Intended Audience :: Developers
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Topic :: Internet :: WWW/HTTP
+Classifier: Framework :: AsyncIO
+Requires-Python: >=3.6
+Provides-Extra: speedups
diff --git a/third_party/python/aiohttp/aiohttp.egg-info/SOURCES.txt b/third_party/python/aiohttp/aiohttp.egg-info/SOURCES.txt
new file mode 100644
index 0000000000..e006e2a0d8
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp.egg-info/SOURCES.txt
@@ -0,0 +1,246 @@
+CHANGES.rst
+CONTRIBUTORS.txt
+LICENSE.txt
+MANIFEST.in
+Makefile
+README.rst
+pyproject.toml
+setup.cfg
+setup.py
+aiohttp/__init__.py
+aiohttp/_cparser.pxd
+aiohttp/_find_header.c
+aiohttp/_find_header.h
+aiohttp/_find_header.pxd
+aiohttp/_frozenlist.c
+aiohttp/_frozenlist.pyx
+aiohttp/_headers.pxi
+aiohttp/_helpers.c
+aiohttp/_helpers.pyi
+aiohttp/_helpers.pyx
+aiohttp/_http_parser.c
+aiohttp/_http_parser.pyx
+aiohttp/_http_writer.c
+aiohttp/_http_writer.pyx
+aiohttp/_websocket.c
+aiohttp/_websocket.pyx
+aiohttp/abc.py
+aiohttp/base_protocol.py
+aiohttp/client.py
+aiohttp/client_exceptions.py
+aiohttp/client_proto.py
+aiohttp/client_reqrep.py
+aiohttp/client_ws.py
+aiohttp/connector.py
+aiohttp/cookiejar.py
+aiohttp/formdata.py
+aiohttp/frozenlist.py
+aiohttp/frozenlist.pyi
+aiohttp/hdrs.py
+aiohttp/helpers.py
+aiohttp/http.py
+aiohttp/http_exceptions.py
+aiohttp/http_parser.py
+aiohttp/http_websocket.py
+aiohttp/http_writer.py
+aiohttp/locks.py
+aiohttp/log.py
+aiohttp/multipart.py
+aiohttp/payload.py
+aiohttp/payload_streamer.py
+aiohttp/py.typed
+aiohttp/pytest_plugin.py
+aiohttp/resolver.py
+aiohttp/signals.py
+aiohttp/signals.pyi
+aiohttp/streams.py
+aiohttp/tcp_helpers.py
+aiohttp/test_utils.py
+aiohttp/tracing.py
+aiohttp/typedefs.py
+aiohttp/web.py
+aiohttp/web_app.py
+aiohttp/web_exceptions.py
+aiohttp/web_fileresponse.py
+aiohttp/web_log.py
+aiohttp/web_middlewares.py
+aiohttp/web_protocol.py
+aiohttp/web_request.py
+aiohttp/web_response.py
+aiohttp/web_routedef.py
+aiohttp/web_runner.py
+aiohttp/web_server.py
+aiohttp/web_urldispatcher.py
+aiohttp/web_ws.py
+aiohttp/worker.py
+aiohttp.egg-info/PKG-INFO
+aiohttp.egg-info/SOURCES.txt
+aiohttp.egg-info/dependency_links.txt
+aiohttp.egg-info/requires.txt
+aiohttp.egg-info/top_level.txt
+aiohttp/.hash/_cparser.pxd.hash
+aiohttp/.hash/_find_header.pxd.hash
+aiohttp/.hash/_frozenlist.pyx.hash
+aiohttp/.hash/_helpers.pyi.hash
+aiohttp/.hash/_helpers.pyx.hash
+aiohttp/.hash/_http_parser.pyx.hash
+aiohttp/.hash/_http_writer.pyx.hash
+aiohttp/.hash/_websocket.pyx.hash
+aiohttp/.hash/frozenlist.pyi.hash
+aiohttp/.hash/hdrs.py.hash
+aiohttp/.hash/signals.pyi.hash
+docs/Makefile
+docs/abc.rst
+docs/aiohttp-icon.svg
+docs/aiohttp-plain.svg
+docs/built_with.rst
+docs/changes.rst
+docs/client.rst
+docs/client_advanced.rst
+docs/client_quickstart.rst
+docs/client_reference.rst
+docs/conf.py
+docs/contributing.rst
+docs/deployment.rst
+docs/essays.rst
+docs/external.rst
+docs/faq.rst
+docs/favicon.ico
+docs/glossary.rst
+docs/http_request_lifecycle.rst
+docs/index.rst
+docs/logging.rst
+docs/make.bat
+docs/migration_to_2xx.rst
+docs/misc.rst
+docs/multipart.rst
+docs/multipart_reference.rst
+docs/new_router.rst
+docs/old-logo.png
+docs/old-logo.svg
+docs/powered_by.rst
+docs/signals.rst
+docs/spelling_wordlist.txt
+docs/streams.rst
+docs/structures.rst
+docs/testing.rst
+docs/third_party.rst
+docs/tracing_reference.rst
+docs/utilities.rst
+docs/web.rst
+docs/web_advanced.rst
+docs/web_lowlevel.rst
+docs/web_quickstart.rst
+docs/web_reference.rst
+docs/websocket_utilities.rst
+docs/whats_new_1_1.rst
+docs/whats_new_3_0.rst
+docs/_static/aiohttp-icon-128x128.png
+examples/background_tasks.py
+examples/cli_app.py
+examples/client_auth.py
+examples/client_json.py
+examples/client_ws.py
+examples/curl.py
+examples/fake_server.py
+examples/lowlevel_srv.py
+examples/server.crt
+examples/server.csr
+examples/server.key
+examples/server_simple.py
+examples/static_files.py
+examples/web_classview.py
+examples/web_cookies.py
+examples/web_rewrite_headers_middleware.py
+examples/web_srv.py
+examples/web_srv_route_deco.py
+examples/web_srv_route_table.py
+examples/web_ws.py
+examples/websocket.html
+examples/legacy/crawl.py
+examples/legacy/srv.py
+examples/legacy/tcp_protocol_parser.py
+tests/aiohttp.jpg
+tests/aiohttp.png
+tests/conftest.py
+tests/data.unknown_mime_type
+tests/data.zero_bytes
+tests/hello.txt.gz
+tests/test_base_protocol.py
+tests/test_classbasedview.py
+tests/test_client_connection.py
+tests/test_client_exceptions.py
+tests/test_client_fingerprint.py
+tests/test_client_functional.py
+tests/test_client_proto.py
+tests/test_client_request.py
+tests/test_client_response.py
+tests/test_client_session.py
+tests/test_client_ws.py
+tests/test_client_ws_functional.py
+tests/test_connector.py
+tests/test_cookiejar.py
+tests/test_flowcontrol_streams.py
+tests/test_formdata.py
+tests/test_frozenlist.py
+tests/test_helpers.py
+tests/test_http_exceptions.py
+tests/test_http_parser.py
+tests/test_http_writer.py
+tests/test_locks.py
+tests/test_loop.py
+tests/test_multipart.py
+tests/test_multipart_helpers.py
+tests/test_payload.py
+tests/test_proxy.py
+tests/test_proxy_functional.py
+tests/test_pytest_plugin.py
+tests/test_resolver.py
+tests/test_route_def.py
+tests/test_run_app.py
+tests/test_signals.py
+tests/test_streams.py
+tests/test_tcp_helpers.py
+tests/test_test_utils.py
+tests/test_tracing.py
+tests/test_urldispatch.py
+tests/test_web_app.py
+tests/test_web_cli.py
+tests/test_web_exceptions.py
+tests/test_web_functional.py
+tests/test_web_log.py
+tests/test_web_middleware.py
+tests/test_web_protocol.py
+tests/test_web_request.py
+tests/test_web_request_handler.py
+tests/test_web_response.py
+tests/test_web_runner.py
+tests/test_web_sendfile.py
+tests/test_web_sendfile_functional.py
+tests/test_web_server.py
+tests/test_web_urldispatcher.py
+tests/test_web_websocket.py
+tests/test_web_websocket_functional.py
+tests/test_websocket_handshake.py
+tests/test_websocket_parser.py
+tests/test_websocket_writer.py
+tests/test_worker.py
+tests/autobahn/client.py
+tests/autobahn/fuzzingclient.json
+tests/autobahn/fuzzingserver.json
+tests/autobahn/server.py
+vendor/http-parser/.git
+vendor/http-parser/.gitignore
+vendor/http-parser/.mailmap
+vendor/http-parser/.travis.yml
+vendor/http-parser/AUTHORS
+vendor/http-parser/LICENSE-MIT
+vendor/http-parser/Makefile
+vendor/http-parser/README.md
+vendor/http-parser/bench.c
+vendor/http-parser/http_parser.c
+vendor/http-parser/http_parser.gyp
+vendor/http-parser/http_parser.h
+vendor/http-parser/test.c
+vendor/http-parser/contrib/parsertrace.c
+vendor/http-parser/contrib/url_parser.c \ No newline at end of file
diff --git a/third_party/python/aiohttp/aiohttp.egg-info/dependency_links.txt b/third_party/python/aiohttp/aiohttp.egg-info/dependency_links.txt
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/third_party/python/aiohttp/aiohttp.egg-info/requires.txt b/third_party/python/aiohttp/aiohttp.egg-info/requires.txt
new file mode 100644
index 0000000000..746f3f8655
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp.egg-info/requires.txt
@@ -0,0 +1,14 @@
+attrs>=17.3.0
+chardet<5.0,>=2.0
+multidict<7.0,>=4.5
+async_timeout<4.0,>=3.0
+yarl<2.0,>=1.0
+typing_extensions>=3.6.5
+
+[:python_version < "3.7"]
+idna-ssl>=1.0
+
+[speedups]
+aiodns
+brotlipy
+cchardet
diff --git a/third_party/python/aiohttp/aiohttp.egg-info/top_level.txt b/third_party/python/aiohttp/aiohttp.egg-info/top_level.txt
new file mode 100644
index 0000000000..ee4ba4f3d7
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp.egg-info/top_level.txt
@@ -0,0 +1 @@
+aiohttp
diff --git a/third_party/python/aiohttp/aiohttp/.hash/_cparser.pxd.hash b/third_party/python/aiohttp/aiohttp/.hash/_cparser.pxd.hash
new file mode 100644
index 0000000000..7f4060a33f
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/.hash/_cparser.pxd.hash
@@ -0,0 +1 @@
+b60c37d122fa91049ccf318c94c871d82ba17ff3bc3fc64f8a65426fce7120b7 /home/runner/work/aiohttp/aiohttp/aiohttp/_cparser.pxd
diff --git a/third_party/python/aiohttp/aiohttp/.hash/_find_header.pxd.hash b/third_party/python/aiohttp/aiohttp/.hash/_find_header.pxd.hash
new file mode 100644
index 0000000000..f006c2de5d
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/.hash/_find_header.pxd.hash
@@ -0,0 +1 @@
+d067f01423cddb3c442933b5fcc039b18ab651fcec1bc91c577693aafc25cf78 /home/runner/work/aiohttp/aiohttp/aiohttp/_find_header.pxd
diff --git a/third_party/python/aiohttp/aiohttp/.hash/_frozenlist.pyx.hash b/third_party/python/aiohttp/aiohttp/.hash/_frozenlist.pyx.hash
new file mode 100644
index 0000000000..ccad753d9a
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/.hash/_frozenlist.pyx.hash
@@ -0,0 +1 @@
+043f0b704444c6c59da38ab3bae43ce1ff8bfe91d5ce45103b494400e7b71688 /home/runner/work/aiohttp/aiohttp/aiohttp/_frozenlist.pyx
diff --git a/third_party/python/aiohttp/aiohttp/.hash/_helpers.pyi.hash b/third_party/python/aiohttp/aiohttp/.hash/_helpers.pyi.hash
new file mode 100644
index 0000000000..6a30d6325b
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/.hash/_helpers.pyi.hash
@@ -0,0 +1 @@
+6682a22524b9d4fc442e123672622be7bdfb6238d9709b7b15b2113b7ca6d52b /home/runner/work/aiohttp/aiohttp/aiohttp/_helpers.pyi
diff --git a/third_party/python/aiohttp/aiohttp/.hash/_helpers.pyx.hash b/third_party/python/aiohttp/aiohttp/.hash/_helpers.pyx.hash
new file mode 100644
index 0000000000..8f38727d78
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/.hash/_helpers.pyx.hash
@@ -0,0 +1 @@
+5de2db35fb795ffe227e2f1007c8ba4f2ad1b9aca28cc48edc80c779203cf6e3 /home/runner/work/aiohttp/aiohttp/aiohttp/_helpers.pyx
diff --git a/third_party/python/aiohttp/aiohttp/.hash/_http_parser.pyx.hash b/third_party/python/aiohttp/aiohttp/.hash/_http_parser.pyx.hash
new file mode 100644
index 0000000000..ea0ea796ec
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/.hash/_http_parser.pyx.hash
@@ -0,0 +1 @@
+f0688fb2e81ea92bf0a17822260d9591a30979101da12a4b873113fc459fb5fa /home/runner/work/aiohttp/aiohttp/aiohttp/_http_parser.pyx
diff --git a/third_party/python/aiohttp/aiohttp/.hash/_http_writer.pyx.hash b/third_party/python/aiohttp/aiohttp/.hash/_http_writer.pyx.hash
new file mode 100644
index 0000000000..b325e7dfbf
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/.hash/_http_writer.pyx.hash
@@ -0,0 +1 @@
+4e7b7f7baa5c65954e85a5b7c8db7786a0ec3498081b0a9420f792a803086281 /home/runner/work/aiohttp/aiohttp/aiohttp/_http_writer.pyx
diff --git a/third_party/python/aiohttp/aiohttp/.hash/_websocket.pyx.hash b/third_party/python/aiohttp/aiohttp/.hash/_websocket.pyx.hash
new file mode 100644
index 0000000000..ddbb4c7a6f
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/.hash/_websocket.pyx.hash
@@ -0,0 +1 @@
+d57b8e48d0c26f20ebcc5e6e300da2b2a6aeb12b3c9768d64cb0e53432ccf48a /home/runner/work/aiohttp/aiohttp/aiohttp/_websocket.pyx
diff --git a/third_party/python/aiohttp/aiohttp/.hash/frozenlist.pyi.hash b/third_party/python/aiohttp/aiohttp/.hash/frozenlist.pyi.hash
new file mode 100644
index 0000000000..e461073ac4
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/.hash/frozenlist.pyi.hash
@@ -0,0 +1 @@
+6d134aa08da3d6ba0f76d81fc7f9ec7836a7bc1a99b1950d1c3aa65ed7e3951a /home/runner/work/aiohttp/aiohttp/aiohttp/frozenlist.pyi
diff --git a/third_party/python/aiohttp/aiohttp/.hash/hdrs.py.hash b/third_party/python/aiohttp/aiohttp/.hash/hdrs.py.hash
new file mode 100644
index 0000000000..0e34777442
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/.hash/hdrs.py.hash
@@ -0,0 +1 @@
+5ac8c3258003604c8993bfa8357361036337330b722e4849024972ccbb5c95f5 /home/runner/work/aiohttp/aiohttp/aiohttp/hdrs.py
diff --git a/third_party/python/aiohttp/aiohttp/.hash/signals.pyi.hash b/third_party/python/aiohttp/aiohttp/.hash/signals.pyi.hash
new file mode 100644
index 0000000000..29acd69f02
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/.hash/signals.pyi.hash
@@ -0,0 +1 @@
+48b4df50f771d7e8385524ea0a7057ca1482974f8a43e674982b04b08bc17d5e /home/runner/work/aiohttp/aiohttp/aiohttp/signals.pyi
diff --git a/third_party/python/aiohttp/aiohttp/__init__.py b/third_party/python/aiohttp/aiohttp/__init__.py
new file mode 100644
index 0000000000..12c73f4a32
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/__init__.py
@@ -0,0 +1,217 @@
+__version__ = "3.7.4.post0"
+
+from typing import Tuple
+
+from . import hdrs as hdrs
+from .client import (
+ BaseConnector as BaseConnector,
+ ClientConnectionError as ClientConnectionError,
+ ClientConnectorCertificateError as ClientConnectorCertificateError,
+ ClientConnectorError as ClientConnectorError,
+ ClientConnectorSSLError as ClientConnectorSSLError,
+ ClientError as ClientError,
+ ClientHttpProxyError as ClientHttpProxyError,
+ ClientOSError as ClientOSError,
+ ClientPayloadError as ClientPayloadError,
+ ClientProxyConnectionError as ClientProxyConnectionError,
+ ClientRequest as ClientRequest,
+ ClientResponse as ClientResponse,
+ ClientResponseError as ClientResponseError,
+ ClientSession as ClientSession,
+ ClientSSLError as ClientSSLError,
+ ClientTimeout as ClientTimeout,
+ ClientWebSocketResponse as ClientWebSocketResponse,
+ ContentTypeError as ContentTypeError,
+ Fingerprint as Fingerprint,
+ InvalidURL as InvalidURL,
+ NamedPipeConnector as NamedPipeConnector,
+ RequestInfo as RequestInfo,
+ ServerConnectionError as ServerConnectionError,
+ ServerDisconnectedError as ServerDisconnectedError,
+ ServerFingerprintMismatch as ServerFingerprintMismatch,
+ ServerTimeoutError as ServerTimeoutError,
+ TCPConnector as TCPConnector,
+ TooManyRedirects as TooManyRedirects,
+ UnixConnector as UnixConnector,
+ WSServerHandshakeError as WSServerHandshakeError,
+ request as request,
+)
+from .cookiejar import CookieJar as CookieJar, DummyCookieJar as DummyCookieJar
+from .formdata import FormData as FormData
+from .helpers import BasicAuth as BasicAuth, ChainMapProxy as ChainMapProxy
+from .http import (
+ HttpVersion as HttpVersion,
+ HttpVersion10 as HttpVersion10,
+ HttpVersion11 as HttpVersion11,
+ WebSocketError as WebSocketError,
+ WSCloseCode as WSCloseCode,
+ WSMessage as WSMessage,
+ WSMsgType as WSMsgType,
+)
+from .multipart import (
+ BadContentDispositionHeader as BadContentDispositionHeader,
+ BadContentDispositionParam as BadContentDispositionParam,
+ BodyPartReader as BodyPartReader,
+ MultipartReader as MultipartReader,
+ MultipartWriter as MultipartWriter,
+ content_disposition_filename as content_disposition_filename,
+ parse_content_disposition as parse_content_disposition,
+)
+from .payload import (
+ PAYLOAD_REGISTRY as PAYLOAD_REGISTRY,
+ AsyncIterablePayload as AsyncIterablePayload,
+ BufferedReaderPayload as BufferedReaderPayload,
+ BytesIOPayload as BytesIOPayload,
+ BytesPayload as BytesPayload,
+ IOBasePayload as IOBasePayload,
+ JsonPayload as JsonPayload,
+ Payload as Payload,
+ StringIOPayload as StringIOPayload,
+ StringPayload as StringPayload,
+ TextIOPayload as TextIOPayload,
+ get_payload as get_payload,
+ payload_type as payload_type,
+)
+from .payload_streamer import streamer as streamer
+from .resolver import (
+ AsyncResolver as AsyncResolver,
+ DefaultResolver as DefaultResolver,
+ ThreadedResolver as ThreadedResolver,
+)
+from .signals import Signal as Signal
+from .streams import (
+ EMPTY_PAYLOAD as EMPTY_PAYLOAD,
+ DataQueue as DataQueue,
+ EofStream as EofStream,
+ FlowControlDataQueue as FlowControlDataQueue,
+ StreamReader as StreamReader,
+)
+from .tracing import (
+ TraceConfig as TraceConfig,
+ TraceConnectionCreateEndParams as TraceConnectionCreateEndParams,
+ TraceConnectionCreateStartParams as TraceConnectionCreateStartParams,
+ TraceConnectionQueuedEndParams as TraceConnectionQueuedEndParams,
+ TraceConnectionQueuedStartParams as TraceConnectionQueuedStartParams,
+ TraceConnectionReuseconnParams as TraceConnectionReuseconnParams,
+ TraceDnsCacheHitParams as TraceDnsCacheHitParams,
+ TraceDnsCacheMissParams as TraceDnsCacheMissParams,
+ TraceDnsResolveHostEndParams as TraceDnsResolveHostEndParams,
+ TraceDnsResolveHostStartParams as TraceDnsResolveHostStartParams,
+ TraceRequestChunkSentParams as TraceRequestChunkSentParams,
+ TraceRequestEndParams as TraceRequestEndParams,
+ TraceRequestExceptionParams as TraceRequestExceptionParams,
+ TraceRequestRedirectParams as TraceRequestRedirectParams,
+ TraceRequestStartParams as TraceRequestStartParams,
+ TraceResponseChunkReceivedParams as TraceResponseChunkReceivedParams,
+)
+
+__all__: Tuple[str, ...] = (
+ "hdrs",
+ # client
+ "BaseConnector",
+ "ClientConnectionError",
+ "ClientConnectorCertificateError",
+ "ClientConnectorError",
+ "ClientConnectorSSLError",
+ "ClientError",
+ "ClientHttpProxyError",
+ "ClientOSError",
+ "ClientPayloadError",
+ "ClientProxyConnectionError",
+ "ClientResponse",
+ "ClientRequest",
+ "ClientResponseError",
+ "ClientSSLError",
+ "ClientSession",
+ "ClientTimeout",
+ "ClientWebSocketResponse",
+ "ContentTypeError",
+ "Fingerprint",
+ "InvalidURL",
+ "RequestInfo",
+ "ServerConnectionError",
+ "ServerDisconnectedError",
+ "ServerFingerprintMismatch",
+ "ServerTimeoutError",
+ "TCPConnector",
+ "TooManyRedirects",
+ "UnixConnector",
+ "NamedPipeConnector",
+ "WSServerHandshakeError",
+ "request",
+ # cookiejar
+ "CookieJar",
+ "DummyCookieJar",
+ # formdata
+ "FormData",
+ # helpers
+ "BasicAuth",
+ "ChainMapProxy",
+ # http
+ "HttpVersion",
+ "HttpVersion10",
+ "HttpVersion11",
+ "WSMsgType",
+ "WSCloseCode",
+ "WSMessage",
+ "WebSocketError",
+ # multipart
+ "BadContentDispositionHeader",
+ "BadContentDispositionParam",
+ "BodyPartReader",
+ "MultipartReader",
+ "MultipartWriter",
+ "content_disposition_filename",
+ "parse_content_disposition",
+ # payload
+ "AsyncIterablePayload",
+ "BufferedReaderPayload",
+ "BytesIOPayload",
+ "BytesPayload",
+ "IOBasePayload",
+ "JsonPayload",
+ "PAYLOAD_REGISTRY",
+ "Payload",
+ "StringIOPayload",
+ "StringPayload",
+ "TextIOPayload",
+ "get_payload",
+ "payload_type",
+ # payload_streamer
+ "streamer",
+ # resolver
+ "AsyncResolver",
+ "DefaultResolver",
+ "ThreadedResolver",
+ # signals
+ "Signal",
+ "DataQueue",
+ "EMPTY_PAYLOAD",
+ "EofStream",
+ "FlowControlDataQueue",
+ "StreamReader",
+ # tracing
+ "TraceConfig",
+ "TraceConnectionCreateEndParams",
+ "TraceConnectionCreateStartParams",
+ "TraceConnectionQueuedEndParams",
+ "TraceConnectionQueuedStartParams",
+ "TraceConnectionReuseconnParams",
+ "TraceDnsCacheHitParams",
+ "TraceDnsCacheMissParams",
+ "TraceDnsResolveHostEndParams",
+ "TraceDnsResolveHostStartParams",
+ "TraceRequestChunkSentParams",
+ "TraceRequestEndParams",
+ "TraceRequestExceptionParams",
+ "TraceRequestRedirectParams",
+ "TraceRequestStartParams",
+ "TraceResponseChunkReceivedParams",
+)
+
+try:
+ from .worker import GunicornUVLoopWebWorker, GunicornWebWorker
+
+ __all__ += ("GunicornWebWorker", "GunicornUVLoopWebWorker")
+except ImportError: # pragma: no cover
+ pass
diff --git a/third_party/python/aiohttp/aiohttp/_cparser.pxd b/third_party/python/aiohttp/aiohttp/_cparser.pxd
new file mode 100644
index 0000000000..0f9fc00923
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/_cparser.pxd
@@ -0,0 +1,140 @@
+from libc.stdint cimport uint16_t, uint32_t, uint64_t
+
+
+cdef extern from "../vendor/http-parser/http_parser.h":
+ ctypedef int (*http_data_cb) (http_parser*,
+ const char *at,
+ size_t length) except -1
+
+ ctypedef int (*http_cb) (http_parser*) except -1
+
+ struct http_parser:
+ unsigned int type
+ unsigned int flags
+ unsigned int state
+ unsigned int header_state
+ unsigned int index
+
+ uint32_t nread
+ uint64_t content_length
+
+ unsigned short http_major
+ unsigned short http_minor
+ unsigned int status_code
+ unsigned int method
+ unsigned int http_errno
+
+ unsigned int upgrade
+
+ void *data
+
+ struct http_parser_settings:
+ http_cb on_message_begin
+ http_data_cb on_url
+ http_data_cb on_status
+ http_data_cb on_header_field
+ http_data_cb on_header_value
+ http_cb on_headers_complete
+ http_data_cb on_body
+ http_cb on_message_complete
+ http_cb on_chunk_header
+ http_cb on_chunk_complete
+
+ enum http_parser_type:
+ HTTP_REQUEST,
+ HTTP_RESPONSE,
+ HTTP_BOTH
+
+ enum http_errno:
+ HPE_OK,
+ HPE_CB_message_begin,
+ HPE_CB_url,
+ HPE_CB_header_field,
+ HPE_CB_header_value,
+ HPE_CB_headers_complete,
+ HPE_CB_body,
+ HPE_CB_message_complete,
+ HPE_CB_status,
+ HPE_CB_chunk_header,
+ HPE_CB_chunk_complete,
+ HPE_INVALID_EOF_STATE,
+ HPE_HEADER_OVERFLOW,
+ HPE_CLOSED_CONNECTION,
+ HPE_INVALID_VERSION,
+ HPE_INVALID_STATUS,
+ HPE_INVALID_METHOD,
+ HPE_INVALID_URL,
+ HPE_INVALID_HOST,
+ HPE_INVALID_PORT,
+ HPE_INVALID_PATH,
+ HPE_INVALID_QUERY_STRING,
+ HPE_INVALID_FRAGMENT,
+ HPE_LF_EXPECTED,
+ HPE_INVALID_HEADER_TOKEN,
+ HPE_INVALID_CONTENT_LENGTH,
+ HPE_INVALID_CHUNK_SIZE,
+ HPE_INVALID_CONSTANT,
+ HPE_INVALID_INTERNAL_STATE,
+ HPE_STRICT,
+ HPE_PAUSED,
+ HPE_UNKNOWN
+
+ enum flags:
+ F_CHUNKED,
+ F_CONNECTION_KEEP_ALIVE,
+ F_CONNECTION_CLOSE,
+ F_CONNECTION_UPGRADE,
+ F_TRAILING,
+ F_UPGRADE,
+ F_SKIPBODY,
+ F_CONTENTLENGTH
+
+ enum http_method:
+ DELETE, GET, HEAD, POST, PUT, CONNECT, OPTIONS, TRACE, COPY,
+ LOCK, MKCOL, MOVE, PROPFIND, PROPPATCH, SEARCH, UNLOCK, BIND,
+ REBIND, UNBIND, ACL, REPORT, MKACTIVITY, CHECKOUT, MERGE,
+ MSEARCH, NOTIFY, SUBSCRIBE, UNSUBSCRIBE, PATCH, PURGE, MKCALENDAR,
+ LINK, UNLINK
+
+ void http_parser_init(http_parser *parser, http_parser_type type)
+
+ size_t http_parser_execute(http_parser *parser,
+ const http_parser_settings *settings,
+ const char *data,
+ size_t len)
+
+ int http_should_keep_alive(const http_parser *parser)
+
+ void http_parser_settings_init(http_parser_settings *settings)
+
+ const char *http_errno_name(http_errno err)
+ const char *http_errno_description(http_errno err)
+ const char *http_method_str(http_method m)
+
+ # URL Parser
+
+ enum http_parser_url_fields:
+ UF_SCHEMA = 0,
+ UF_HOST = 1,
+ UF_PORT = 2,
+ UF_PATH = 3,
+ UF_QUERY = 4,
+ UF_FRAGMENT = 5,
+ UF_USERINFO = 6,
+ UF_MAX = 7
+
+ struct http_parser_url_field_data:
+ uint16_t off
+ uint16_t len
+
+ struct http_parser_url:
+ uint16_t field_set
+ uint16_t port
+ http_parser_url_field_data[<int>UF_MAX] field_data
+
+ void http_parser_url_init(http_parser_url *u)
+
+ int http_parser_parse_url(const char *buf,
+ size_t buflen,
+ int is_connect,
+ http_parser_url *u)
diff --git a/third_party/python/aiohttp/aiohttp/_find_header.c b/third_party/python/aiohttp/aiohttp/_find_header.c
new file mode 100644
index 0000000000..012cba33ac
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/_find_header.c
@@ -0,0 +1,9870 @@
+/* The file is autogenerated from aiohttp/hdrs.py
+Run ./tools/gen.py to update it after the origin changing. */
+
+#include "_find_header.h"
+
+#define NEXT_CHAR() \
+{ \
+ count++; \
+ if (count == size) { \
+ /* end of search */ \
+ return -1; \
+ } \
+ pchar++; \
+ ch = *pchar; \
+ last = (count == size -1); \
+} while(0);
+
+int
+find_header(const char *str, int size)
+{
+ char *pchar = str;
+ int last;
+ char ch;
+ int count = -1;
+ pchar--;
+
+
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto A;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto A;
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto C;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto C;
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto D;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto D;
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto E;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto E;
+ case 'F':
+ if (last) {
+ return -1;
+ }
+ goto F;
+ case 'f':
+ if (last) {
+ return -1;
+ }
+ goto F;
+ case 'H':
+ if (last) {
+ return -1;
+ }
+ goto H;
+ case 'h':
+ if (last) {
+ return -1;
+ }
+ goto H;
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto I;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto I;
+ case 'K':
+ if (last) {
+ return -1;
+ }
+ goto K;
+ case 'k':
+ if (last) {
+ return -1;
+ }
+ goto K;
+ case 'L':
+ if (last) {
+ return -1;
+ }
+ goto L;
+ case 'l':
+ if (last) {
+ return -1;
+ }
+ goto L;
+ case 'M':
+ if (last) {
+ return -1;
+ }
+ goto M;
+ case 'm':
+ if (last) {
+ return -1;
+ }
+ goto M;
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto O;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto O;
+ case 'P':
+ if (last) {
+ return -1;
+ }
+ goto P;
+ case 'p':
+ if (last) {
+ return -1;
+ }
+ goto P;
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto R;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto R;
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto S;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto S;
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto T;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto T;
+ case 'U':
+ if (last) {
+ return -1;
+ }
+ goto U;
+ case 'u':
+ if (last) {
+ return -1;
+ }
+ goto U;
+ case 'V':
+ if (last) {
+ return -1;
+ }
+ goto V;
+ case 'v':
+ if (last) {
+ return -1;
+ }
+ goto V;
+ case 'W':
+ if (last) {
+ return -1;
+ }
+ goto W;
+ case 'w':
+ if (last) {
+ return -1;
+ }
+ goto W;
+ case 'X':
+ if (last) {
+ return -1;
+ }
+ goto X;
+ case 'x':
+ if (last) {
+ return -1;
+ }
+ goto X;
+ default:
+ return -1;
+ }
+
+A:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto AC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto AC;
+ case 'G':
+ if (last) {
+ return -1;
+ }
+ goto AG;
+ case 'g':
+ if (last) {
+ return -1;
+ }
+ goto AG;
+ case 'L':
+ if (last) {
+ return -1;
+ }
+ goto AL;
+ case 'l':
+ if (last) {
+ return -1;
+ }
+ goto AL;
+ case 'U':
+ if (last) {
+ return -1;
+ }
+ goto AU;
+ case 'u':
+ if (last) {
+ return -1;
+ }
+ goto AU;
+ default:
+ return -1;
+ }
+
+AC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto ACC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto ACC;
+ default:
+ return -1;
+ }
+
+ACC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto ACCE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto ACCE;
+ default:
+ return -1;
+ }
+
+ACCE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'P':
+ if (last) {
+ return -1;
+ }
+ goto ACCEP;
+ case 'p':
+ if (last) {
+ return -1;
+ }
+ goto ACCEP;
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto ACCES;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto ACCES;
+ default:
+ return -1;
+ }
+
+ACCEP:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return 0;
+ }
+ goto ACCEPT;
+ case 't':
+ if (last) {
+ return 0;
+ }
+ goto ACCEPT;
+ default:
+ return -1;
+ }
+
+ACCEPT:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_;
+ default:
+ return -1;
+ }
+
+ACCEPT_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_C;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_C;
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_E;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_E;
+ case 'L':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_L;
+ case 'l':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_L;
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_R;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_R;
+ default:
+ return -1;
+ }
+
+ACCEPT_C:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'H':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_CH;
+ case 'h':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_CH;
+ default:
+ return -1;
+ }
+
+ACCEPT_CH:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_CHA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_CHA;
+ default:
+ return -1;
+ }
+
+ACCEPT_CHA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_CHAR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_CHAR;
+ default:
+ return -1;
+ }
+
+ACCEPT_CHAR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_CHARS;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_CHARS;
+ default:
+ return -1;
+ }
+
+ACCEPT_CHARS:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_CHARSE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_CHARSE;
+ default:
+ return -1;
+ }
+
+ACCEPT_CHARSE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return 1;
+ }
+ goto ACCEPT_CHARSET;
+ case 't':
+ if (last) {
+ return 1;
+ }
+ goto ACCEPT_CHARSET;
+ default:
+ return -1;
+ }
+
+ACCEPT_E:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_EN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_EN;
+ default:
+ return -1;
+ }
+
+ACCEPT_EN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_ENC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_ENC;
+ default:
+ return -1;
+ }
+
+ACCEPT_ENC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_ENCO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_ENCO;
+ default:
+ return -1;
+ }
+
+ACCEPT_ENCO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_ENCOD;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_ENCOD;
+ default:
+ return -1;
+ }
+
+ACCEPT_ENCOD:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_ENCODI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_ENCODI;
+ default:
+ return -1;
+ }
+
+ACCEPT_ENCODI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_ENCODIN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_ENCODIN;
+ default:
+ return -1;
+ }
+
+ACCEPT_ENCODIN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return 2;
+ }
+ goto ACCEPT_ENCODING;
+ case 'g':
+ if (last) {
+ return 2;
+ }
+ goto ACCEPT_ENCODING;
+ default:
+ return -1;
+ }
+
+ACCEPT_L:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_LA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_LA;
+ default:
+ return -1;
+ }
+
+ACCEPT_LA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_LAN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_LAN;
+ default:
+ return -1;
+ }
+
+ACCEPT_LAN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_LANG;
+ case 'g':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_LANG;
+ default:
+ return -1;
+ }
+
+ACCEPT_LANG:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'U':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_LANGU;
+ case 'u':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_LANGU;
+ default:
+ return -1;
+ }
+
+ACCEPT_LANGU:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_LANGUA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_LANGUA;
+ default:
+ return -1;
+ }
+
+ACCEPT_LANGUA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_LANGUAG;
+ case 'g':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_LANGUAG;
+ default:
+ return -1;
+ }
+
+ACCEPT_LANGUAG:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return 3;
+ }
+ goto ACCEPT_LANGUAGE;
+ case 'e':
+ if (last) {
+ return 3;
+ }
+ goto ACCEPT_LANGUAGE;
+ default:
+ return -1;
+ }
+
+ACCEPT_R:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_RA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_RA;
+ default:
+ return -1;
+ }
+
+ACCEPT_RA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_RAN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_RAN;
+ default:
+ return -1;
+ }
+
+ACCEPT_RAN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_RANG;
+ case 'g':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_RANG;
+ default:
+ return -1;
+ }
+
+ACCEPT_RANG:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_RANGE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto ACCEPT_RANGE;
+ default:
+ return -1;
+ }
+
+ACCEPT_RANGE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return 4;
+ }
+ goto ACCEPT_RANGES;
+ case 's':
+ if (last) {
+ return 4;
+ }
+ goto ACCEPT_RANGES;
+ default:
+ return -1;
+ }
+
+ACCES:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS;
+ default:
+ return -1;
+ }
+
+ACCESS:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_;
+ default:
+ return -1;
+ }
+
+ACCESS_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_C;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_C;
+ default:
+ return -1;
+ }
+
+ACCESS_C:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CO;
+ default:
+ return -1;
+ }
+
+ACCESS_CO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CON;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CON;
+ default:
+ return -1;
+ }
+
+ACCESS_CON:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONT;
+ default:
+ return -1;
+ }
+
+ACCESS_CONT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTR;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTRO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTRO;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTRO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'L':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL;
+ case 'l':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_A;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_A;
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_E;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_E;
+ case 'M':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_M;
+ case 'm':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_M;
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_R;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_R;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_A:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'L':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_AL;
+ case 'l':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_AL;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_AL:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'L':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALL;
+ case 'l':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALL;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALL:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLO;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'W':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW;
+ case 'w':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_C;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_C;
+ case 'H':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_H;
+ case 'h':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_H;
+ case 'M':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_M;
+ case 'm':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_M;
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_O;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_O;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_C:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_CR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_CR;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_CR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_CRE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_CRE;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_CRE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_CRED;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_CRED;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_CRED:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_CREDE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_CREDE;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_CREDE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_CREDEN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_CREDEN;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_CREDEN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_CREDENT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_CREDENT;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_CREDENT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_CREDENTI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_CREDENTI;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_CREDENTI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_CREDENTIA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_CREDENTIA;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_CREDENTIA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'L':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_CREDENTIAL;
+ case 'l':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_CREDENTIAL;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_CREDENTIAL:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return 5;
+ }
+ goto ACCESS_CONTROL_ALLOW_CREDENTIALS;
+ case 's':
+ if (last) {
+ return 5;
+ }
+ goto ACCESS_CONTROL_ALLOW_CREDENTIALS;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_H:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_HE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_HE;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_HE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_HEA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_HEA;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_HEA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_HEAD;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_HEAD;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_HEAD:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_HEADE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_HEADE;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_HEADE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_HEADER;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_HEADER;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_HEADER:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return 6;
+ }
+ goto ACCESS_CONTROL_ALLOW_HEADERS;
+ case 's':
+ if (last) {
+ return 6;
+ }
+ goto ACCESS_CONTROL_ALLOW_HEADERS;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_M:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_ME;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_ME;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_ME:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_MET;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_MET;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_MET:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'H':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_METH;
+ case 'h':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_METH;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_METH:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_METHO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_METHO;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_METHO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_METHOD;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_METHOD;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_METHOD:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return 7;
+ }
+ goto ACCESS_CONTROL_ALLOW_METHODS;
+ case 's':
+ if (last) {
+ return 7;
+ }
+ goto ACCESS_CONTROL_ALLOW_METHODS;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_O:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_OR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_OR;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_OR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_ORI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_ORI;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_ORI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_ORIG;
+ case 'g':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_ORIG;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_ORIG:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_ORIGI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_ALLOW_ORIGI;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_ALLOW_ORIGI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return 8;
+ }
+ goto ACCESS_CONTROL_ALLOW_ORIGIN;
+ case 'n':
+ if (last) {
+ return 8;
+ }
+ goto ACCESS_CONTROL_ALLOW_ORIGIN;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_E:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'X':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EX;
+ case 'x':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EX;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_EX:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'P':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXP;
+ case 'p':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXP;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_EXP:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXPO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXPO;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_EXPO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXPOS;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXPOS;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_EXPOS:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXPOSE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXPOSE;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_EXPOSE:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXPOSE_;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_EXPOSE_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'H':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXPOSE_H;
+ case 'h':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXPOSE_H;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_EXPOSE_H:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXPOSE_HE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXPOSE_HE;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_EXPOSE_HE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXPOSE_HEA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXPOSE_HEA;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_EXPOSE_HEA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXPOSE_HEAD;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXPOSE_HEAD;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_EXPOSE_HEAD:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXPOSE_HEADE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXPOSE_HEADE;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_EXPOSE_HEADE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXPOSE_HEADER;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_EXPOSE_HEADER;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_EXPOSE_HEADER:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return 9;
+ }
+ goto ACCESS_CONTROL_EXPOSE_HEADERS;
+ case 's':
+ if (last) {
+ return 9;
+ }
+ goto ACCESS_CONTROL_EXPOSE_HEADERS;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_M:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_MA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_MA;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_MA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'X':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_MAX;
+ case 'x':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_MAX;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_MAX:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_MAX_;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_MAX_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_MAX_A;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_MAX_A;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_MAX_A:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_MAX_AG;
+ case 'g':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_MAX_AG;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_MAX_AG:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return 10;
+ }
+ goto ACCESS_CONTROL_MAX_AGE;
+ case 'e':
+ if (last) {
+ return 10;
+ }
+ goto ACCESS_CONTROL_MAX_AGE;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_R:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_RE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_RE;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_RE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'Q':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQ;
+ case 'q':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQ;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_REQ:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'U':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQU;
+ case 'u':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQU;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_REQU:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUE;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_REQUE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUES;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUES;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_REQUES:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_REQUEST:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_REQUEST_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'H':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_H;
+ case 'h':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_H;
+ case 'M':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_M;
+ case 'm':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_M;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_REQUEST_H:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_HE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_HE;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_REQUEST_HE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_HEA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_HEA;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_REQUEST_HEA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_HEAD;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_HEAD;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_REQUEST_HEAD:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_HEADE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_HEADE;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_REQUEST_HEADE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_HEADER;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_HEADER;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_REQUEST_HEADER:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return 11;
+ }
+ goto ACCESS_CONTROL_REQUEST_HEADERS;
+ case 's':
+ if (last) {
+ return 11;
+ }
+ goto ACCESS_CONTROL_REQUEST_HEADERS;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_REQUEST_M:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_ME;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_ME;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_REQUEST_ME:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_MET;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_MET;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_REQUEST_MET:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'H':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_METH;
+ case 'h':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_METH;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_REQUEST_METH:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_METHO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto ACCESS_CONTROL_REQUEST_METHO;
+ default:
+ return -1;
+ }
+
+ACCESS_CONTROL_REQUEST_METHO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return 12;
+ }
+ goto ACCESS_CONTROL_REQUEST_METHOD;
+ case 'd':
+ if (last) {
+ return 12;
+ }
+ goto ACCESS_CONTROL_REQUEST_METHOD;
+ default:
+ return -1;
+ }
+
+AG:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return 13;
+ }
+ goto AGE;
+ case 'e':
+ if (last) {
+ return 13;
+ }
+ goto AGE;
+ default:
+ return -1;
+ }
+
+AL:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'L':
+ if (last) {
+ return -1;
+ }
+ goto ALL;
+ case 'l':
+ if (last) {
+ return -1;
+ }
+ goto ALL;
+ default:
+ return -1;
+ }
+
+ALL:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto ALLO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto ALLO;
+ default:
+ return -1;
+ }
+
+ALLO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'W':
+ if (last) {
+ return 14;
+ }
+ goto ALLOW;
+ case 'w':
+ if (last) {
+ return 14;
+ }
+ goto ALLOW;
+ default:
+ return -1;
+ }
+
+AU:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto AUT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto AUT;
+ default:
+ return -1;
+ }
+
+AUT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'H':
+ if (last) {
+ return -1;
+ }
+ goto AUTH;
+ case 'h':
+ if (last) {
+ return -1;
+ }
+ goto AUTH;
+ default:
+ return -1;
+ }
+
+AUTH:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto AUTHO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto AUTHO;
+ default:
+ return -1;
+ }
+
+AUTHO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto AUTHOR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto AUTHOR;
+ default:
+ return -1;
+ }
+
+AUTHOR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto AUTHORI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto AUTHORI;
+ default:
+ return -1;
+ }
+
+AUTHORI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'Z':
+ if (last) {
+ return -1;
+ }
+ goto AUTHORIZ;
+ case 'z':
+ if (last) {
+ return -1;
+ }
+ goto AUTHORIZ;
+ default:
+ return -1;
+ }
+
+AUTHORIZ:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto AUTHORIZA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto AUTHORIZA;
+ default:
+ return -1;
+ }
+
+AUTHORIZA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto AUTHORIZAT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto AUTHORIZAT;
+ default:
+ return -1;
+ }
+
+AUTHORIZAT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto AUTHORIZATI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto AUTHORIZATI;
+ default:
+ return -1;
+ }
+
+AUTHORIZATI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto AUTHORIZATIO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto AUTHORIZATIO;
+ default:
+ return -1;
+ }
+
+AUTHORIZATIO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return 15;
+ }
+ goto AUTHORIZATION;
+ case 'n':
+ if (last) {
+ return 15;
+ }
+ goto AUTHORIZATION;
+ default:
+ return -1;
+ }
+
+C:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto CA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto CA;
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto CO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto CO;
+ default:
+ return -1;
+ }
+
+CA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto CAC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto CAC;
+ default:
+ return -1;
+ }
+
+CAC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'H':
+ if (last) {
+ return -1;
+ }
+ goto CACH;
+ case 'h':
+ if (last) {
+ return -1;
+ }
+ goto CACH;
+ default:
+ return -1;
+ }
+
+CACH:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto CACHE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto CACHE;
+ default:
+ return -1;
+ }
+
+CACHE:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto CACHE_;
+ default:
+ return -1;
+ }
+
+CACHE_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto CACHE_C;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto CACHE_C;
+ default:
+ return -1;
+ }
+
+CACHE_C:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto CACHE_CO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto CACHE_CO;
+ default:
+ return -1;
+ }
+
+CACHE_CO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto CACHE_CON;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto CACHE_CON;
+ default:
+ return -1;
+ }
+
+CACHE_CON:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto CACHE_CONT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto CACHE_CONT;
+ default:
+ return -1;
+ }
+
+CACHE_CONT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto CACHE_CONTR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto CACHE_CONTR;
+ default:
+ return -1;
+ }
+
+CACHE_CONTR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto CACHE_CONTRO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto CACHE_CONTRO;
+ default:
+ return -1;
+ }
+
+CACHE_CONTRO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'L':
+ if (last) {
+ return 16;
+ }
+ goto CACHE_CONTROL;
+ case 'l':
+ if (last) {
+ return 16;
+ }
+ goto CACHE_CONTROL;
+ default:
+ return -1;
+ }
+
+CO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto CON;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto CON;
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto COO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto COO;
+ default:
+ return -1;
+ }
+
+CON:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto CONN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto CONN;
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto CONT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto CONT;
+ default:
+ return -1;
+ }
+
+CONN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto CONNE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto CONNE;
+ default:
+ return -1;
+ }
+
+CONNE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto CONNEC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto CONNEC;
+ default:
+ return -1;
+ }
+
+CONNEC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto CONNECT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto CONNECT;
+ default:
+ return -1;
+ }
+
+CONNECT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto CONNECTI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto CONNECTI;
+ default:
+ return -1;
+ }
+
+CONNECTI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto CONNECTIO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto CONNECTIO;
+ default:
+ return -1;
+ }
+
+CONNECTIO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return 17;
+ }
+ goto CONNECTION;
+ case 'n':
+ if (last) {
+ return 17;
+ }
+ goto CONNECTION;
+ default:
+ return -1;
+ }
+
+CONT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto CONTE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto CONTE;
+ default:
+ return -1;
+ }
+
+CONTE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto CONTEN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto CONTEN;
+ default:
+ return -1;
+ }
+
+CONTEN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT;
+ default:
+ return -1;
+ }
+
+CONTENT:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_;
+ default:
+ return -1;
+ }
+
+CONTENT_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_D;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_D;
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_E;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_E;
+ case 'L':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_L;
+ case 'l':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_L;
+ case 'M':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_M;
+ case 'm':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_M;
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_R;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_R;
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_T;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_T;
+ default:
+ return -1;
+ }
+
+CONTENT_D:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_DI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_DI;
+ default:
+ return -1;
+ }
+
+CONTENT_DI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_DIS;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_DIS;
+ default:
+ return -1;
+ }
+
+CONTENT_DIS:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'P':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_DISP;
+ case 'p':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_DISP;
+ default:
+ return -1;
+ }
+
+CONTENT_DISP:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_DISPO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_DISPO;
+ default:
+ return -1;
+ }
+
+CONTENT_DISPO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_DISPOS;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_DISPOS;
+ default:
+ return -1;
+ }
+
+CONTENT_DISPOS:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_DISPOSI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_DISPOSI;
+ default:
+ return -1;
+ }
+
+CONTENT_DISPOSI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_DISPOSIT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_DISPOSIT;
+ default:
+ return -1;
+ }
+
+CONTENT_DISPOSIT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_DISPOSITI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_DISPOSITI;
+ default:
+ return -1;
+ }
+
+CONTENT_DISPOSITI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_DISPOSITIO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_DISPOSITIO;
+ default:
+ return -1;
+ }
+
+CONTENT_DISPOSITIO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return 18;
+ }
+ goto CONTENT_DISPOSITION;
+ case 'n':
+ if (last) {
+ return 18;
+ }
+ goto CONTENT_DISPOSITION;
+ default:
+ return -1;
+ }
+
+CONTENT_E:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_EN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_EN;
+ default:
+ return -1;
+ }
+
+CONTENT_EN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_ENC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_ENC;
+ default:
+ return -1;
+ }
+
+CONTENT_ENC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_ENCO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_ENCO;
+ default:
+ return -1;
+ }
+
+CONTENT_ENCO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_ENCOD;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_ENCOD;
+ default:
+ return -1;
+ }
+
+CONTENT_ENCOD:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_ENCODI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_ENCODI;
+ default:
+ return -1;
+ }
+
+CONTENT_ENCODI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_ENCODIN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_ENCODIN;
+ default:
+ return -1;
+ }
+
+CONTENT_ENCODIN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return 19;
+ }
+ goto CONTENT_ENCODING;
+ case 'g':
+ if (last) {
+ return 19;
+ }
+ goto CONTENT_ENCODING;
+ default:
+ return -1;
+ }
+
+CONTENT_L:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LA;
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LE;
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LO;
+ default:
+ return -1;
+ }
+
+CONTENT_LA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LAN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LAN;
+ default:
+ return -1;
+ }
+
+CONTENT_LAN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LANG;
+ case 'g':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LANG;
+ default:
+ return -1;
+ }
+
+CONTENT_LANG:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'U':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LANGU;
+ case 'u':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LANGU;
+ default:
+ return -1;
+ }
+
+CONTENT_LANGU:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LANGUA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LANGUA;
+ default:
+ return -1;
+ }
+
+CONTENT_LANGUA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LANGUAG;
+ case 'g':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LANGUAG;
+ default:
+ return -1;
+ }
+
+CONTENT_LANGUAG:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return 20;
+ }
+ goto CONTENT_LANGUAGE;
+ case 'e':
+ if (last) {
+ return 20;
+ }
+ goto CONTENT_LANGUAGE;
+ default:
+ return -1;
+ }
+
+CONTENT_LE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LEN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LEN;
+ default:
+ return -1;
+ }
+
+CONTENT_LEN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LENG;
+ case 'g':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LENG;
+ default:
+ return -1;
+ }
+
+CONTENT_LENG:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LENGT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LENGT;
+ default:
+ return -1;
+ }
+
+CONTENT_LENGT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'H':
+ if (last) {
+ return 21;
+ }
+ goto CONTENT_LENGTH;
+ case 'h':
+ if (last) {
+ return 21;
+ }
+ goto CONTENT_LENGTH;
+ default:
+ return -1;
+ }
+
+CONTENT_LO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LOC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LOC;
+ default:
+ return -1;
+ }
+
+CONTENT_LOC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LOCA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LOCA;
+ default:
+ return -1;
+ }
+
+CONTENT_LOCA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LOCAT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LOCAT;
+ default:
+ return -1;
+ }
+
+CONTENT_LOCAT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LOCATI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LOCATI;
+ default:
+ return -1;
+ }
+
+CONTENT_LOCATI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LOCATIO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_LOCATIO;
+ default:
+ return -1;
+ }
+
+CONTENT_LOCATIO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return 22;
+ }
+ goto CONTENT_LOCATION;
+ case 'n':
+ if (last) {
+ return 22;
+ }
+ goto CONTENT_LOCATION;
+ default:
+ return -1;
+ }
+
+CONTENT_M:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_MD;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_MD;
+ default:
+ return -1;
+ }
+
+CONTENT_MD:
+ NEXT_CHAR();
+ switch (ch) {
+ case '5':
+ if (last) {
+ return 23;
+ }
+ goto CONTENT_MD5;
+ default:
+ return -1;
+ }
+
+CONTENT_R:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_RA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_RA;
+ default:
+ return -1;
+ }
+
+CONTENT_RA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_RAN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_RAN;
+ default:
+ return -1;
+ }
+
+CONTENT_RAN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_RANG;
+ case 'g':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_RANG;
+ default:
+ return -1;
+ }
+
+CONTENT_RANG:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return 24;
+ }
+ goto CONTENT_RANGE;
+ case 'e':
+ if (last) {
+ return 24;
+ }
+ goto CONTENT_RANGE;
+ default:
+ return -1;
+ }
+
+CONTENT_T:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TR;
+ case 'Y':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TY;
+ case 'y':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TY;
+ default:
+ return -1;
+ }
+
+CONTENT_TR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRA;
+ default:
+ return -1;
+ }
+
+CONTENT_TRA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRAN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRAN;
+ default:
+ return -1;
+ }
+
+CONTENT_TRAN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANS;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANS;
+ default:
+ return -1;
+ }
+
+CONTENT_TRANS:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'F':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSF;
+ case 'f':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSF;
+ default:
+ return -1;
+ }
+
+CONTENT_TRANSF:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSFE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSFE;
+ default:
+ return -1;
+ }
+
+CONTENT_TRANSFE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSFER;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSFER;
+ default:
+ return -1;
+ }
+
+CONTENT_TRANSFER:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSFER_;
+ default:
+ return -1;
+ }
+
+CONTENT_TRANSFER_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSFER_E;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSFER_E;
+ default:
+ return -1;
+ }
+
+CONTENT_TRANSFER_E:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSFER_EN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSFER_EN;
+ default:
+ return -1;
+ }
+
+CONTENT_TRANSFER_EN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSFER_ENC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSFER_ENC;
+ default:
+ return -1;
+ }
+
+CONTENT_TRANSFER_ENC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSFER_ENCO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSFER_ENCO;
+ default:
+ return -1;
+ }
+
+CONTENT_TRANSFER_ENCO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSFER_ENCOD;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSFER_ENCOD;
+ default:
+ return -1;
+ }
+
+CONTENT_TRANSFER_ENCOD:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSFER_ENCODI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSFER_ENCODI;
+ default:
+ return -1;
+ }
+
+CONTENT_TRANSFER_ENCODI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSFER_ENCODIN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TRANSFER_ENCODIN;
+ default:
+ return -1;
+ }
+
+CONTENT_TRANSFER_ENCODIN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return 25;
+ }
+ goto CONTENT_TRANSFER_ENCODING;
+ case 'g':
+ if (last) {
+ return 25;
+ }
+ goto CONTENT_TRANSFER_ENCODING;
+ default:
+ return -1;
+ }
+
+CONTENT_TY:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'P':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TYP;
+ case 'p':
+ if (last) {
+ return -1;
+ }
+ goto CONTENT_TYP;
+ default:
+ return -1;
+ }
+
+CONTENT_TYP:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return 26;
+ }
+ goto CONTENT_TYPE;
+ case 'e':
+ if (last) {
+ return 26;
+ }
+ goto CONTENT_TYPE;
+ default:
+ return -1;
+ }
+
+COO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'K':
+ if (last) {
+ return -1;
+ }
+ goto COOK;
+ case 'k':
+ if (last) {
+ return -1;
+ }
+ goto COOK;
+ default:
+ return -1;
+ }
+
+COOK:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto COOKI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto COOKI;
+ default:
+ return -1;
+ }
+
+COOKI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return 27;
+ }
+ goto COOKIE;
+ case 'e':
+ if (last) {
+ return 27;
+ }
+ goto COOKIE;
+ default:
+ return -1;
+ }
+
+D:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto DA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto DA;
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto DE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto DE;
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto DI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto DI;
+ default:
+ return -1;
+ }
+
+DA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto DAT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto DAT;
+ default:
+ return -1;
+ }
+
+DAT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return 28;
+ }
+ goto DATE;
+ case 'e':
+ if (last) {
+ return 28;
+ }
+ goto DATE;
+ default:
+ return -1;
+ }
+
+DE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto DES;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto DES;
+ default:
+ return -1;
+ }
+
+DES:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto DEST;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto DEST;
+ default:
+ return -1;
+ }
+
+DEST:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto DESTI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto DESTI;
+ default:
+ return -1;
+ }
+
+DESTI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto DESTIN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto DESTIN;
+ default:
+ return -1;
+ }
+
+DESTIN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto DESTINA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto DESTINA;
+ default:
+ return -1;
+ }
+
+DESTINA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto DESTINAT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto DESTINAT;
+ default:
+ return -1;
+ }
+
+DESTINAT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto DESTINATI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto DESTINATI;
+ default:
+ return -1;
+ }
+
+DESTINATI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto DESTINATIO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto DESTINATIO;
+ default:
+ return -1;
+ }
+
+DESTINATIO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return 29;
+ }
+ goto DESTINATION;
+ case 'n':
+ if (last) {
+ return 29;
+ }
+ goto DESTINATION;
+ default:
+ return -1;
+ }
+
+DI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return -1;
+ }
+ goto DIG;
+ case 'g':
+ if (last) {
+ return -1;
+ }
+ goto DIG;
+ default:
+ return -1;
+ }
+
+DIG:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto DIGE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto DIGE;
+ default:
+ return -1;
+ }
+
+DIGE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto DIGES;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto DIGES;
+ default:
+ return -1;
+ }
+
+DIGES:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return 30;
+ }
+ goto DIGEST;
+ case 't':
+ if (last) {
+ return 30;
+ }
+ goto DIGEST;
+ default:
+ return -1;
+ }
+
+E:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto ET;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto ET;
+ case 'X':
+ if (last) {
+ return -1;
+ }
+ goto EX;
+ case 'x':
+ if (last) {
+ return -1;
+ }
+ goto EX;
+ default:
+ return -1;
+ }
+
+ET:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto ETA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto ETA;
+ default:
+ return -1;
+ }
+
+ETA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return 31;
+ }
+ goto ETAG;
+ case 'g':
+ if (last) {
+ return 31;
+ }
+ goto ETAG;
+ default:
+ return -1;
+ }
+
+EX:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'P':
+ if (last) {
+ return -1;
+ }
+ goto EXP;
+ case 'p':
+ if (last) {
+ return -1;
+ }
+ goto EXP;
+ default:
+ return -1;
+ }
+
+EXP:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto EXPE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto EXPE;
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto EXPI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto EXPI;
+ default:
+ return -1;
+ }
+
+EXPE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto EXPEC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto EXPEC;
+ default:
+ return -1;
+ }
+
+EXPEC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return 32;
+ }
+ goto EXPECT;
+ case 't':
+ if (last) {
+ return 32;
+ }
+ goto EXPECT;
+ default:
+ return -1;
+ }
+
+EXPI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto EXPIR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto EXPIR;
+ default:
+ return -1;
+ }
+
+EXPIR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto EXPIRE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto EXPIRE;
+ default:
+ return -1;
+ }
+
+EXPIRE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return 33;
+ }
+ goto EXPIRES;
+ case 's':
+ if (last) {
+ return 33;
+ }
+ goto EXPIRES;
+ default:
+ return -1;
+ }
+
+F:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto FO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto FO;
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto FR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto FR;
+ default:
+ return -1;
+ }
+
+FO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto FOR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto FOR;
+ default:
+ return -1;
+ }
+
+FOR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'W':
+ if (last) {
+ return -1;
+ }
+ goto FORW;
+ case 'w':
+ if (last) {
+ return -1;
+ }
+ goto FORW;
+ default:
+ return -1;
+ }
+
+FORW:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto FORWA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto FORWA;
+ default:
+ return -1;
+ }
+
+FORWA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto FORWAR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto FORWAR;
+ default:
+ return -1;
+ }
+
+FORWAR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto FORWARD;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto FORWARD;
+ default:
+ return -1;
+ }
+
+FORWARD:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto FORWARDE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto FORWARDE;
+ default:
+ return -1;
+ }
+
+FORWARDE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return 34;
+ }
+ goto FORWARDED;
+ case 'd':
+ if (last) {
+ return 34;
+ }
+ goto FORWARDED;
+ default:
+ return -1;
+ }
+
+FR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto FRO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto FRO;
+ default:
+ return -1;
+ }
+
+FRO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'M':
+ if (last) {
+ return 35;
+ }
+ goto FROM;
+ case 'm':
+ if (last) {
+ return 35;
+ }
+ goto FROM;
+ default:
+ return -1;
+ }
+
+H:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto HO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto HO;
+ default:
+ return -1;
+ }
+
+HO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto HOS;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto HOS;
+ default:
+ return -1;
+ }
+
+HOS:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return 36;
+ }
+ goto HOST;
+ case 't':
+ if (last) {
+ return 36;
+ }
+ goto HOST;
+ default:
+ return -1;
+ }
+
+I:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'F':
+ if (last) {
+ return -1;
+ }
+ goto IF;
+ case 'f':
+ if (last) {
+ return -1;
+ }
+ goto IF;
+ default:
+ return -1;
+ }
+
+IF:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto IF_;
+ default:
+ return -1;
+ }
+
+IF_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'M':
+ if (last) {
+ return -1;
+ }
+ goto IF_M;
+ case 'm':
+ if (last) {
+ return -1;
+ }
+ goto IF_M;
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto IF_N;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto IF_N;
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto IF_R;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto IF_R;
+ case 'U':
+ if (last) {
+ return -1;
+ }
+ goto IF_U;
+ case 'u':
+ if (last) {
+ return -1;
+ }
+ goto IF_U;
+ default:
+ return -1;
+ }
+
+IF_M:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto IF_MA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto IF_MA;
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto IF_MO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto IF_MO;
+ default:
+ return -1;
+ }
+
+IF_MA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto IF_MAT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto IF_MAT;
+ default:
+ return -1;
+ }
+
+IF_MAT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto IF_MATC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto IF_MATC;
+ default:
+ return -1;
+ }
+
+IF_MATC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'H':
+ if (last) {
+ return 37;
+ }
+ goto IF_MATCH;
+ case 'h':
+ if (last) {
+ return 37;
+ }
+ goto IF_MATCH;
+ default:
+ return -1;
+ }
+
+IF_MO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto IF_MOD;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto IF_MOD;
+ default:
+ return -1;
+ }
+
+IF_MOD:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto IF_MODI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto IF_MODI;
+ default:
+ return -1;
+ }
+
+IF_MODI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'F':
+ if (last) {
+ return -1;
+ }
+ goto IF_MODIF;
+ case 'f':
+ if (last) {
+ return -1;
+ }
+ goto IF_MODIF;
+ default:
+ return -1;
+ }
+
+IF_MODIF:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto IF_MODIFI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto IF_MODIFI;
+ default:
+ return -1;
+ }
+
+IF_MODIFI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto IF_MODIFIE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto IF_MODIFIE;
+ default:
+ return -1;
+ }
+
+IF_MODIFIE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto IF_MODIFIED;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto IF_MODIFIED;
+ default:
+ return -1;
+ }
+
+IF_MODIFIED:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto IF_MODIFIED_;
+ default:
+ return -1;
+ }
+
+IF_MODIFIED_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto IF_MODIFIED_S;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto IF_MODIFIED_S;
+ default:
+ return -1;
+ }
+
+IF_MODIFIED_S:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto IF_MODIFIED_SI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto IF_MODIFIED_SI;
+ default:
+ return -1;
+ }
+
+IF_MODIFIED_SI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto IF_MODIFIED_SIN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto IF_MODIFIED_SIN;
+ default:
+ return -1;
+ }
+
+IF_MODIFIED_SIN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto IF_MODIFIED_SINC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto IF_MODIFIED_SINC;
+ default:
+ return -1;
+ }
+
+IF_MODIFIED_SINC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return 38;
+ }
+ goto IF_MODIFIED_SINCE;
+ case 'e':
+ if (last) {
+ return 38;
+ }
+ goto IF_MODIFIED_SINCE;
+ default:
+ return -1;
+ }
+
+IF_N:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto IF_NO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto IF_NO;
+ default:
+ return -1;
+ }
+
+IF_NO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto IF_NON;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto IF_NON;
+ default:
+ return -1;
+ }
+
+IF_NON:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto IF_NONE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto IF_NONE;
+ default:
+ return -1;
+ }
+
+IF_NONE:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto IF_NONE_;
+ default:
+ return -1;
+ }
+
+IF_NONE_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'M':
+ if (last) {
+ return -1;
+ }
+ goto IF_NONE_M;
+ case 'm':
+ if (last) {
+ return -1;
+ }
+ goto IF_NONE_M;
+ default:
+ return -1;
+ }
+
+IF_NONE_M:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto IF_NONE_MA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto IF_NONE_MA;
+ default:
+ return -1;
+ }
+
+IF_NONE_MA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto IF_NONE_MAT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto IF_NONE_MAT;
+ default:
+ return -1;
+ }
+
+IF_NONE_MAT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto IF_NONE_MATC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto IF_NONE_MATC;
+ default:
+ return -1;
+ }
+
+IF_NONE_MATC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'H':
+ if (last) {
+ return 39;
+ }
+ goto IF_NONE_MATCH;
+ case 'h':
+ if (last) {
+ return 39;
+ }
+ goto IF_NONE_MATCH;
+ default:
+ return -1;
+ }
+
+IF_R:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto IF_RA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto IF_RA;
+ default:
+ return -1;
+ }
+
+IF_RA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto IF_RAN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto IF_RAN;
+ default:
+ return -1;
+ }
+
+IF_RAN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return -1;
+ }
+ goto IF_RANG;
+ case 'g':
+ if (last) {
+ return -1;
+ }
+ goto IF_RANG;
+ default:
+ return -1;
+ }
+
+IF_RANG:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return 40;
+ }
+ goto IF_RANGE;
+ case 'e':
+ if (last) {
+ return 40;
+ }
+ goto IF_RANGE;
+ default:
+ return -1;
+ }
+
+IF_U:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto IF_UN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto IF_UN;
+ default:
+ return -1;
+ }
+
+IF_UN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'M':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNM;
+ case 'm':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNM;
+ default:
+ return -1;
+ }
+
+IF_UNM:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMO;
+ default:
+ return -1;
+ }
+
+IF_UNMO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMOD;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMOD;
+ default:
+ return -1;
+ }
+
+IF_UNMOD:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMODI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMODI;
+ default:
+ return -1;
+ }
+
+IF_UNMODI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'F':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMODIF;
+ case 'f':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMODIF;
+ default:
+ return -1;
+ }
+
+IF_UNMODIF:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMODIFI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMODIFI;
+ default:
+ return -1;
+ }
+
+IF_UNMODIFI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMODIFIE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMODIFIE;
+ default:
+ return -1;
+ }
+
+IF_UNMODIFIE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMODIFIED;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMODIFIED;
+ default:
+ return -1;
+ }
+
+IF_UNMODIFIED:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMODIFIED_;
+ default:
+ return -1;
+ }
+
+IF_UNMODIFIED_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMODIFIED_S;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMODIFIED_S;
+ default:
+ return -1;
+ }
+
+IF_UNMODIFIED_S:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMODIFIED_SI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMODIFIED_SI;
+ default:
+ return -1;
+ }
+
+IF_UNMODIFIED_SI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMODIFIED_SIN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMODIFIED_SIN;
+ default:
+ return -1;
+ }
+
+IF_UNMODIFIED_SIN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMODIFIED_SINC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto IF_UNMODIFIED_SINC;
+ default:
+ return -1;
+ }
+
+IF_UNMODIFIED_SINC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return 41;
+ }
+ goto IF_UNMODIFIED_SINCE;
+ case 'e':
+ if (last) {
+ return 41;
+ }
+ goto IF_UNMODIFIED_SINCE;
+ default:
+ return -1;
+ }
+
+K:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto KE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto KE;
+ default:
+ return -1;
+ }
+
+KE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto KEE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto KEE;
+ default:
+ return -1;
+ }
+
+KEE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'P':
+ if (last) {
+ return -1;
+ }
+ goto KEEP;
+ case 'p':
+ if (last) {
+ return -1;
+ }
+ goto KEEP;
+ default:
+ return -1;
+ }
+
+KEEP:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto KEEP_;
+ default:
+ return -1;
+ }
+
+KEEP_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto KEEP_A;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto KEEP_A;
+ default:
+ return -1;
+ }
+
+KEEP_A:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'L':
+ if (last) {
+ return -1;
+ }
+ goto KEEP_AL;
+ case 'l':
+ if (last) {
+ return -1;
+ }
+ goto KEEP_AL;
+ default:
+ return -1;
+ }
+
+KEEP_AL:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto KEEP_ALI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto KEEP_ALI;
+ default:
+ return -1;
+ }
+
+KEEP_ALI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'V':
+ if (last) {
+ return -1;
+ }
+ goto KEEP_ALIV;
+ case 'v':
+ if (last) {
+ return -1;
+ }
+ goto KEEP_ALIV;
+ default:
+ return -1;
+ }
+
+KEEP_ALIV:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return 42;
+ }
+ goto KEEP_ALIVE;
+ case 'e':
+ if (last) {
+ return 42;
+ }
+ goto KEEP_ALIVE;
+ default:
+ return -1;
+ }
+
+L:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto LA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto LA;
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto LI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto LI;
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto LO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto LO;
+ default:
+ return -1;
+ }
+
+LA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto LAS;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto LAS;
+ default:
+ return -1;
+ }
+
+LAS:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto LAST;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto LAST;
+ default:
+ return -1;
+ }
+
+LAST:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto LAST_;
+ default:
+ return -1;
+ }
+
+LAST_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto LAST_E;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto LAST_E;
+ case 'M':
+ if (last) {
+ return -1;
+ }
+ goto LAST_M;
+ case 'm':
+ if (last) {
+ return -1;
+ }
+ goto LAST_M;
+ default:
+ return -1;
+ }
+
+LAST_E:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'V':
+ if (last) {
+ return -1;
+ }
+ goto LAST_EV;
+ case 'v':
+ if (last) {
+ return -1;
+ }
+ goto LAST_EV;
+ default:
+ return -1;
+ }
+
+LAST_EV:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto LAST_EVE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto LAST_EVE;
+ default:
+ return -1;
+ }
+
+LAST_EVE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto LAST_EVEN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto LAST_EVEN;
+ default:
+ return -1;
+ }
+
+LAST_EVEN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto LAST_EVENT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto LAST_EVENT;
+ default:
+ return -1;
+ }
+
+LAST_EVENT:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto LAST_EVENT_;
+ default:
+ return -1;
+ }
+
+LAST_EVENT_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto LAST_EVENT_I;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto LAST_EVENT_I;
+ default:
+ return -1;
+ }
+
+LAST_EVENT_I:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return 43;
+ }
+ goto LAST_EVENT_ID;
+ case 'd':
+ if (last) {
+ return 43;
+ }
+ goto LAST_EVENT_ID;
+ default:
+ return -1;
+ }
+
+LAST_M:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto LAST_MO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto LAST_MO;
+ default:
+ return -1;
+ }
+
+LAST_MO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto LAST_MOD;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto LAST_MOD;
+ default:
+ return -1;
+ }
+
+LAST_MOD:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto LAST_MODI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto LAST_MODI;
+ default:
+ return -1;
+ }
+
+LAST_MODI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'F':
+ if (last) {
+ return -1;
+ }
+ goto LAST_MODIF;
+ case 'f':
+ if (last) {
+ return -1;
+ }
+ goto LAST_MODIF;
+ default:
+ return -1;
+ }
+
+LAST_MODIF:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto LAST_MODIFI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto LAST_MODIFI;
+ default:
+ return -1;
+ }
+
+LAST_MODIFI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto LAST_MODIFIE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto LAST_MODIFIE;
+ default:
+ return -1;
+ }
+
+LAST_MODIFIE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return 44;
+ }
+ goto LAST_MODIFIED;
+ case 'd':
+ if (last) {
+ return 44;
+ }
+ goto LAST_MODIFIED;
+ default:
+ return -1;
+ }
+
+LI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto LIN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto LIN;
+ default:
+ return -1;
+ }
+
+LIN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'K':
+ if (last) {
+ return 45;
+ }
+ goto LINK;
+ case 'k':
+ if (last) {
+ return 45;
+ }
+ goto LINK;
+ default:
+ return -1;
+ }
+
+LO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto LOC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto LOC;
+ default:
+ return -1;
+ }
+
+LOC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto LOCA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto LOCA;
+ default:
+ return -1;
+ }
+
+LOCA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto LOCAT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto LOCAT;
+ default:
+ return -1;
+ }
+
+LOCAT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto LOCATI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto LOCATI;
+ default:
+ return -1;
+ }
+
+LOCATI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto LOCATIO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto LOCATIO;
+ default:
+ return -1;
+ }
+
+LOCATIO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return 46;
+ }
+ goto LOCATION;
+ case 'n':
+ if (last) {
+ return 46;
+ }
+ goto LOCATION;
+ default:
+ return -1;
+ }
+
+M:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto MA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto MA;
+ default:
+ return -1;
+ }
+
+MA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'X':
+ if (last) {
+ return -1;
+ }
+ goto MAX;
+ case 'x':
+ if (last) {
+ return -1;
+ }
+ goto MAX;
+ default:
+ return -1;
+ }
+
+MAX:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto MAX_;
+ default:
+ return -1;
+ }
+
+MAX_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'F':
+ if (last) {
+ return -1;
+ }
+ goto MAX_F;
+ case 'f':
+ if (last) {
+ return -1;
+ }
+ goto MAX_F;
+ default:
+ return -1;
+ }
+
+MAX_F:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto MAX_FO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto MAX_FO;
+ default:
+ return -1;
+ }
+
+MAX_FO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto MAX_FOR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto MAX_FOR;
+ default:
+ return -1;
+ }
+
+MAX_FOR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'W':
+ if (last) {
+ return -1;
+ }
+ goto MAX_FORW;
+ case 'w':
+ if (last) {
+ return -1;
+ }
+ goto MAX_FORW;
+ default:
+ return -1;
+ }
+
+MAX_FORW:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto MAX_FORWA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto MAX_FORWA;
+ default:
+ return -1;
+ }
+
+MAX_FORWA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto MAX_FORWAR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto MAX_FORWAR;
+ default:
+ return -1;
+ }
+
+MAX_FORWAR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto MAX_FORWARD;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto MAX_FORWARD;
+ default:
+ return -1;
+ }
+
+MAX_FORWARD:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return 47;
+ }
+ goto MAX_FORWARDS;
+ case 's':
+ if (last) {
+ return 47;
+ }
+ goto MAX_FORWARDS;
+ default:
+ return -1;
+ }
+
+O:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto OR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto OR;
+ default:
+ return -1;
+ }
+
+OR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto ORI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto ORI;
+ default:
+ return -1;
+ }
+
+ORI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return -1;
+ }
+ goto ORIG;
+ case 'g':
+ if (last) {
+ return -1;
+ }
+ goto ORIG;
+ default:
+ return -1;
+ }
+
+ORIG:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto ORIGI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto ORIGI;
+ default:
+ return -1;
+ }
+
+ORIGI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return 48;
+ }
+ goto ORIGIN;
+ case 'n':
+ if (last) {
+ return 48;
+ }
+ goto ORIGIN;
+ default:
+ return -1;
+ }
+
+P:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto PR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto PR;
+ default:
+ return -1;
+ }
+
+PR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto PRA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto PRA;
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto PRO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto PRO;
+ default:
+ return -1;
+ }
+
+PRA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return -1;
+ }
+ goto PRAG;
+ case 'g':
+ if (last) {
+ return -1;
+ }
+ goto PRAG;
+ default:
+ return -1;
+ }
+
+PRAG:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'M':
+ if (last) {
+ return -1;
+ }
+ goto PRAGM;
+ case 'm':
+ if (last) {
+ return -1;
+ }
+ goto PRAGM;
+ default:
+ return -1;
+ }
+
+PRAGM:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return 49;
+ }
+ goto PRAGMA;
+ case 'a':
+ if (last) {
+ return 49;
+ }
+ goto PRAGMA;
+ default:
+ return -1;
+ }
+
+PRO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'X':
+ if (last) {
+ return -1;
+ }
+ goto PROX;
+ case 'x':
+ if (last) {
+ return -1;
+ }
+ goto PROX;
+ default:
+ return -1;
+ }
+
+PROX:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'Y':
+ if (last) {
+ return -1;
+ }
+ goto PROXY;
+ case 'y':
+ if (last) {
+ return -1;
+ }
+ goto PROXY;
+ default:
+ return -1;
+ }
+
+PROXY:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_;
+ default:
+ return -1;
+ }
+
+PROXY_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_A;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_A;
+ default:
+ return -1;
+ }
+
+PROXY_A:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'U':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AU;
+ case 'u':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AU;
+ default:
+ return -1;
+ }
+
+PROXY_AU:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUT;
+ default:
+ return -1;
+ }
+
+PROXY_AUT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'H':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTH;
+ case 'h':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTH;
+ default:
+ return -1;
+ }
+
+PROXY_AUTH:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHE;
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHO;
+ default:
+ return -1;
+ }
+
+PROXY_AUTHE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHEN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHEN;
+ default:
+ return -1;
+ }
+
+PROXY_AUTHEN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHENT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHENT;
+ default:
+ return -1;
+ }
+
+PROXY_AUTHENT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHENTI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHENTI;
+ default:
+ return -1;
+ }
+
+PROXY_AUTHENTI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHENTIC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHENTIC;
+ default:
+ return -1;
+ }
+
+PROXY_AUTHENTIC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHENTICA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHENTICA;
+ default:
+ return -1;
+ }
+
+PROXY_AUTHENTICA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHENTICAT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHENTICAT;
+ default:
+ return -1;
+ }
+
+PROXY_AUTHENTICAT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return 50;
+ }
+ goto PROXY_AUTHENTICATE;
+ case 'e':
+ if (last) {
+ return 50;
+ }
+ goto PROXY_AUTHENTICATE;
+ default:
+ return -1;
+ }
+
+PROXY_AUTHO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHOR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHOR;
+ default:
+ return -1;
+ }
+
+PROXY_AUTHOR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHORI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHORI;
+ default:
+ return -1;
+ }
+
+PROXY_AUTHORI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'Z':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHORIZ;
+ case 'z':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHORIZ;
+ default:
+ return -1;
+ }
+
+PROXY_AUTHORIZ:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHORIZA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHORIZA;
+ default:
+ return -1;
+ }
+
+PROXY_AUTHORIZA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHORIZAT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHORIZAT;
+ default:
+ return -1;
+ }
+
+PROXY_AUTHORIZAT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHORIZATI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHORIZATI;
+ default:
+ return -1;
+ }
+
+PROXY_AUTHORIZATI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHORIZATIO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto PROXY_AUTHORIZATIO;
+ default:
+ return -1;
+ }
+
+PROXY_AUTHORIZATIO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return 51;
+ }
+ goto PROXY_AUTHORIZATION;
+ case 'n':
+ if (last) {
+ return 51;
+ }
+ goto PROXY_AUTHORIZATION;
+ default:
+ return -1;
+ }
+
+R:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto RA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto RA;
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto RE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto RE;
+ default:
+ return -1;
+ }
+
+RA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto RAN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto RAN;
+ default:
+ return -1;
+ }
+
+RAN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return -1;
+ }
+ goto RANG;
+ case 'g':
+ if (last) {
+ return -1;
+ }
+ goto RANG;
+ default:
+ return -1;
+ }
+
+RANG:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return 52;
+ }
+ goto RANGE;
+ case 'e':
+ if (last) {
+ return 52;
+ }
+ goto RANGE;
+ default:
+ return -1;
+ }
+
+RE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'F':
+ if (last) {
+ return -1;
+ }
+ goto REF;
+ case 'f':
+ if (last) {
+ return -1;
+ }
+ goto REF;
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto RET;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto RET;
+ default:
+ return -1;
+ }
+
+REF:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto REFE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto REFE;
+ default:
+ return -1;
+ }
+
+REFE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto REFER;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto REFER;
+ default:
+ return -1;
+ }
+
+REFER:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto REFERE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto REFERE;
+ default:
+ return -1;
+ }
+
+REFERE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return 53;
+ }
+ goto REFERER;
+ case 'r':
+ if (last) {
+ return 53;
+ }
+ goto REFERER;
+ default:
+ return -1;
+ }
+
+RET:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto RETR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto RETR;
+ default:
+ return -1;
+ }
+
+RETR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'Y':
+ if (last) {
+ return -1;
+ }
+ goto RETRY;
+ case 'y':
+ if (last) {
+ return -1;
+ }
+ goto RETRY;
+ default:
+ return -1;
+ }
+
+RETRY:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto RETRY_;
+ default:
+ return -1;
+ }
+
+RETRY_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto RETRY_A;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto RETRY_A;
+ default:
+ return -1;
+ }
+
+RETRY_A:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'F':
+ if (last) {
+ return -1;
+ }
+ goto RETRY_AF;
+ case 'f':
+ if (last) {
+ return -1;
+ }
+ goto RETRY_AF;
+ default:
+ return -1;
+ }
+
+RETRY_AF:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto RETRY_AFT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto RETRY_AFT;
+ default:
+ return -1;
+ }
+
+RETRY_AFT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto RETRY_AFTE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto RETRY_AFTE;
+ default:
+ return -1;
+ }
+
+RETRY_AFTE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return 54;
+ }
+ goto RETRY_AFTER;
+ case 'r':
+ if (last) {
+ return 54;
+ }
+ goto RETRY_AFTER;
+ default:
+ return -1;
+ }
+
+S:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto SE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto SE;
+ default:
+ return -1;
+ }
+
+SE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto SEC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto SEC;
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto SER;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto SER;
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto SET;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto SET;
+ default:
+ return -1;
+ }
+
+SEC:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto SEC_;
+ default:
+ return -1;
+ }
+
+SEC_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'W':
+ if (last) {
+ return -1;
+ }
+ goto SEC_W;
+ case 'w':
+ if (last) {
+ return -1;
+ }
+ goto SEC_W;
+ default:
+ return -1;
+ }
+
+SEC_W:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WE;
+ default:
+ return -1;
+ }
+
+SEC_WE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'B':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEB;
+ case 'b':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEB;
+ default:
+ return -1;
+ }
+
+SEC_WEB:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBS;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBS;
+ default:
+ return -1;
+ }
+
+SEC_WEBS:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSO;
+ default:
+ return -1;
+ }
+
+SEC_WEBSO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOC;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'K':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCK;
+ case 'k':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCK;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCK:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKE;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_A;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_A;
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_E;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_E;
+ case 'K':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_K;
+ case 'k':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_K;
+ case 'P':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_P;
+ case 'p':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_P;
+ case 'V':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_V;
+ case 'v':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_V;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_A:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_AC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_AC;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_AC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_ACC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_ACC;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_ACC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_ACCE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_ACCE;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_ACCE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'P':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_ACCEP;
+ case 'p':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_ACCEP;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_ACCEP:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return 55;
+ }
+ goto SEC_WEBSOCKET_ACCEPT;
+ case 't':
+ if (last) {
+ return 55;
+ }
+ goto SEC_WEBSOCKET_ACCEPT;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_E:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'X':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_EX;
+ case 'x':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_EX;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_EX:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_EXT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_EXT;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_EXT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_EXTE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_EXTE;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_EXTE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_EXTEN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_EXTEN;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_EXTEN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_EXTENS;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_EXTENS;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_EXTENS:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_EXTENSI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_EXTENSI;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_EXTENSI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_EXTENSIO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_EXTENSIO;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_EXTENSIO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_EXTENSION;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_EXTENSION;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_EXTENSION:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return 56;
+ }
+ goto SEC_WEBSOCKET_EXTENSIONS;
+ case 's':
+ if (last) {
+ return 56;
+ }
+ goto SEC_WEBSOCKET_EXTENSIONS;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_K:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_KE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_KE;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_KE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'Y':
+ if (last) {
+ return 57;
+ }
+ goto SEC_WEBSOCKET_KEY;
+ case 'y':
+ if (last) {
+ return 57;
+ }
+ goto SEC_WEBSOCKET_KEY;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_KEY:
+ NEXT_CHAR();
+ switch (ch) {
+ case '1':
+ if (last) {
+ return 58;
+ }
+ goto SEC_WEBSOCKET_KEY1;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_P:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_PR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_PR;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_PR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_PRO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_PRO;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_PRO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_PROT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_PROT;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_PROT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_PROTO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_PROTO;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_PROTO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_PROTOC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_PROTOC;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_PROTOC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_PROTOCO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_PROTOCO;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_PROTOCO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'L':
+ if (last) {
+ return 59;
+ }
+ goto SEC_WEBSOCKET_PROTOCOL;
+ case 'l':
+ if (last) {
+ return 59;
+ }
+ goto SEC_WEBSOCKET_PROTOCOL;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_V:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_VE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_VE;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_VE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_VER;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_VER;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_VER:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_VERS;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_VERS;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_VERS:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_VERSI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_VERSI;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_VERSI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_VERSIO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto SEC_WEBSOCKET_VERSIO;
+ default:
+ return -1;
+ }
+
+SEC_WEBSOCKET_VERSIO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return 60;
+ }
+ goto SEC_WEBSOCKET_VERSION;
+ case 'n':
+ if (last) {
+ return 60;
+ }
+ goto SEC_WEBSOCKET_VERSION;
+ default:
+ return -1;
+ }
+
+SER:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'V':
+ if (last) {
+ return -1;
+ }
+ goto SERV;
+ case 'v':
+ if (last) {
+ return -1;
+ }
+ goto SERV;
+ default:
+ return -1;
+ }
+
+SERV:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto SERVE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto SERVE;
+ default:
+ return -1;
+ }
+
+SERVE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return 61;
+ }
+ goto SERVER;
+ case 'r':
+ if (last) {
+ return 61;
+ }
+ goto SERVER;
+ default:
+ return -1;
+ }
+
+SET:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto SET_;
+ default:
+ return -1;
+ }
+
+SET_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto SET_C;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto SET_C;
+ default:
+ return -1;
+ }
+
+SET_C:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto SET_CO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto SET_CO;
+ default:
+ return -1;
+ }
+
+SET_CO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto SET_COO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto SET_COO;
+ default:
+ return -1;
+ }
+
+SET_COO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'K':
+ if (last) {
+ return -1;
+ }
+ goto SET_COOK;
+ case 'k':
+ if (last) {
+ return -1;
+ }
+ goto SET_COOK;
+ default:
+ return -1;
+ }
+
+SET_COOK:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto SET_COOKI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto SET_COOKI;
+ default:
+ return -1;
+ }
+
+SET_COOKI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return 62;
+ }
+ goto SET_COOKIE;
+ case 'e':
+ if (last) {
+ return 62;
+ }
+ goto SET_COOKIE;
+ default:
+ return -1;
+ }
+
+T:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return 63;
+ }
+ goto TE;
+ case 'e':
+ if (last) {
+ return 63;
+ }
+ goto TE;
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto TR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto TR;
+ default:
+ return -1;
+ }
+
+TR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto TRA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto TRA;
+ default:
+ return -1;
+ }
+
+TRA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto TRAI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto TRAI;
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto TRAN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto TRAN;
+ default:
+ return -1;
+ }
+
+TRAI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'L':
+ if (last) {
+ return -1;
+ }
+ goto TRAIL;
+ case 'l':
+ if (last) {
+ return -1;
+ }
+ goto TRAIL;
+ default:
+ return -1;
+ }
+
+TRAIL:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto TRAILE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto TRAILE;
+ default:
+ return -1;
+ }
+
+TRAILE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return 64;
+ }
+ goto TRAILER;
+ case 'r':
+ if (last) {
+ return 64;
+ }
+ goto TRAILER;
+ default:
+ return -1;
+ }
+
+TRAN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto TRANS;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto TRANS;
+ default:
+ return -1;
+ }
+
+TRANS:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'F':
+ if (last) {
+ return -1;
+ }
+ goto TRANSF;
+ case 'f':
+ if (last) {
+ return -1;
+ }
+ goto TRANSF;
+ default:
+ return -1;
+ }
+
+TRANSF:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto TRANSFE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto TRANSFE;
+ default:
+ return -1;
+ }
+
+TRANSFE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto TRANSFER;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto TRANSFER;
+ default:
+ return -1;
+ }
+
+TRANSFER:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto TRANSFER_;
+ default:
+ return -1;
+ }
+
+TRANSFER_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto TRANSFER_E;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto TRANSFER_E;
+ default:
+ return -1;
+ }
+
+TRANSFER_E:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto TRANSFER_EN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto TRANSFER_EN;
+ default:
+ return -1;
+ }
+
+TRANSFER_EN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto TRANSFER_ENC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto TRANSFER_ENC;
+ default:
+ return -1;
+ }
+
+TRANSFER_ENC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto TRANSFER_ENCO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto TRANSFER_ENCO;
+ default:
+ return -1;
+ }
+
+TRANSFER_ENCO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto TRANSFER_ENCOD;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto TRANSFER_ENCOD;
+ default:
+ return -1;
+ }
+
+TRANSFER_ENCOD:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto TRANSFER_ENCODI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto TRANSFER_ENCODI;
+ default:
+ return -1;
+ }
+
+TRANSFER_ENCODI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto TRANSFER_ENCODIN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto TRANSFER_ENCODIN;
+ default:
+ return -1;
+ }
+
+TRANSFER_ENCODIN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return 65;
+ }
+ goto TRANSFER_ENCODING;
+ case 'g':
+ if (last) {
+ return 65;
+ }
+ goto TRANSFER_ENCODING;
+ default:
+ return -1;
+ }
+
+U:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto UR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto UR;
+ case 'P':
+ if (last) {
+ return -1;
+ }
+ goto UP;
+ case 'p':
+ if (last) {
+ return -1;
+ }
+ goto UP;
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto US;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto US;
+ default:
+ return -1;
+ }
+
+UR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return 66;
+ }
+ goto URI;
+ case 'i':
+ if (last) {
+ return 66;
+ }
+ goto URI;
+ default:
+ return -1;
+ }
+
+UP:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return -1;
+ }
+ goto UPG;
+ case 'g':
+ if (last) {
+ return -1;
+ }
+ goto UPG;
+ default:
+ return -1;
+ }
+
+UPG:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto UPGR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto UPGR;
+ default:
+ return -1;
+ }
+
+UPGR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto UPGRA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto UPGRA;
+ default:
+ return -1;
+ }
+
+UPGRA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto UPGRAD;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto UPGRAD;
+ default:
+ return -1;
+ }
+
+UPGRAD:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return 67;
+ }
+ goto UPGRADE;
+ case 'e':
+ if (last) {
+ return 67;
+ }
+ goto UPGRADE;
+ default:
+ return -1;
+ }
+
+US:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto USE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto USE;
+ default:
+ return -1;
+ }
+
+USE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto USER;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto USER;
+ default:
+ return -1;
+ }
+
+USER:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto USER_;
+ default:
+ return -1;
+ }
+
+USER_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto USER_A;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto USER_A;
+ default:
+ return -1;
+ }
+
+USER_A:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return -1;
+ }
+ goto USER_AG;
+ case 'g':
+ if (last) {
+ return -1;
+ }
+ goto USER_AG;
+ default:
+ return -1;
+ }
+
+USER_AG:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto USER_AGE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto USER_AGE;
+ default:
+ return -1;
+ }
+
+USER_AGE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto USER_AGEN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto USER_AGEN;
+ default:
+ return -1;
+ }
+
+USER_AGEN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return 68;
+ }
+ goto USER_AGENT;
+ case 't':
+ if (last) {
+ return 68;
+ }
+ goto USER_AGENT;
+ default:
+ return -1;
+ }
+
+V:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto VA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto VA;
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto VI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto VI;
+ default:
+ return -1;
+ }
+
+VA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto VAR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto VAR;
+ default:
+ return -1;
+ }
+
+VAR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'Y':
+ if (last) {
+ return 69;
+ }
+ goto VARY;
+ case 'y':
+ if (last) {
+ return 69;
+ }
+ goto VARY;
+ default:
+ return -1;
+ }
+
+VI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return 70;
+ }
+ goto VIA;
+ case 'a':
+ if (last) {
+ return 70;
+ }
+ goto VIA;
+ default:
+ return -1;
+ }
+
+W:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'W':
+ if (last) {
+ return -1;
+ }
+ goto WW;
+ case 'w':
+ if (last) {
+ return -1;
+ }
+ goto WW;
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto WA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto WA;
+ default:
+ return -1;
+ }
+
+WW:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'W':
+ if (last) {
+ return -1;
+ }
+ goto WWW;
+ case 'w':
+ if (last) {
+ return -1;
+ }
+ goto WWW;
+ default:
+ return -1;
+ }
+
+WWW:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto WWW_;
+ default:
+ return -1;
+ }
+
+WWW_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto WWW_A;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto WWW_A;
+ default:
+ return -1;
+ }
+
+WWW_A:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'U':
+ if (last) {
+ return -1;
+ }
+ goto WWW_AU;
+ case 'u':
+ if (last) {
+ return -1;
+ }
+ goto WWW_AU;
+ default:
+ return -1;
+ }
+
+WWW_AU:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto WWW_AUT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto WWW_AUT;
+ default:
+ return -1;
+ }
+
+WWW_AUT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'H':
+ if (last) {
+ return -1;
+ }
+ goto WWW_AUTH;
+ case 'h':
+ if (last) {
+ return -1;
+ }
+ goto WWW_AUTH;
+ default:
+ return -1;
+ }
+
+WWW_AUTH:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto WWW_AUTHE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto WWW_AUTHE;
+ default:
+ return -1;
+ }
+
+WWW_AUTHE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto WWW_AUTHEN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto WWW_AUTHEN;
+ default:
+ return -1;
+ }
+
+WWW_AUTHEN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto WWW_AUTHENT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto WWW_AUTHENT;
+ default:
+ return -1;
+ }
+
+WWW_AUTHENT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto WWW_AUTHENTI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto WWW_AUTHENTI;
+ default:
+ return -1;
+ }
+
+WWW_AUTHENTI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'C':
+ if (last) {
+ return -1;
+ }
+ goto WWW_AUTHENTIC;
+ case 'c':
+ if (last) {
+ return -1;
+ }
+ goto WWW_AUTHENTIC;
+ default:
+ return -1;
+ }
+
+WWW_AUTHENTIC:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto WWW_AUTHENTICA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto WWW_AUTHENTICA;
+ default:
+ return -1;
+ }
+
+WWW_AUTHENTICA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto WWW_AUTHENTICAT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto WWW_AUTHENTICAT;
+ default:
+ return -1;
+ }
+
+WWW_AUTHENTICAT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return 71;
+ }
+ goto WWW_AUTHENTICATE;
+ case 'e':
+ if (last) {
+ return 71;
+ }
+ goto WWW_AUTHENTICATE;
+ default:
+ return -1;
+ }
+
+WA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto WAN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto WAN;
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto WAR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto WAR;
+ default:
+ return -1;
+ }
+
+WAN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto WANT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto WANT;
+ default:
+ return -1;
+ }
+
+WANT:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto WANT_;
+ default:
+ return -1;
+ }
+
+WANT_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto WANT_D;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto WANT_D;
+ default:
+ return -1;
+ }
+
+WANT_D:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto WANT_DI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto WANT_DI;
+ default:
+ return -1;
+ }
+
+WANT_DI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return -1;
+ }
+ goto WANT_DIG;
+ case 'g':
+ if (last) {
+ return -1;
+ }
+ goto WANT_DIG;
+ default:
+ return -1;
+ }
+
+WANT_DIG:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto WANT_DIGE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto WANT_DIGE;
+ default:
+ return -1;
+ }
+
+WANT_DIGE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto WANT_DIGES;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto WANT_DIGES;
+ default:
+ return -1;
+ }
+
+WANT_DIGES:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return 72;
+ }
+ goto WANT_DIGEST;
+ case 't':
+ if (last) {
+ return 72;
+ }
+ goto WANT_DIGEST;
+ default:
+ return -1;
+ }
+
+WAR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto WARN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto WARN;
+ default:
+ return -1;
+ }
+
+WARN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'I':
+ if (last) {
+ return -1;
+ }
+ goto WARNI;
+ case 'i':
+ if (last) {
+ return -1;
+ }
+ goto WARNI;
+ default:
+ return -1;
+ }
+
+WARNI:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'N':
+ if (last) {
+ return -1;
+ }
+ goto WARNIN;
+ case 'n':
+ if (last) {
+ return -1;
+ }
+ goto WARNIN;
+ default:
+ return -1;
+ }
+
+WARNIN:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'G':
+ if (last) {
+ return 73;
+ }
+ goto WARNING;
+ case 'g':
+ if (last) {
+ return 73;
+ }
+ goto WARNING;
+ default:
+ return -1;
+ }
+
+X:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto X_;
+ default:
+ return -1;
+ }
+
+X_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'F':
+ if (last) {
+ return -1;
+ }
+ goto X_F;
+ case 'f':
+ if (last) {
+ return -1;
+ }
+ goto X_F;
+ default:
+ return -1;
+ }
+
+X_F:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto X_FO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto X_FO;
+ default:
+ return -1;
+ }
+
+X_FO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto X_FOR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto X_FOR;
+ default:
+ return -1;
+ }
+
+X_FOR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'W':
+ if (last) {
+ return -1;
+ }
+ goto X_FORW;
+ case 'w':
+ if (last) {
+ return -1;
+ }
+ goto X_FORW;
+ default:
+ return -1;
+ }
+
+X_FORW:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'A':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWA;
+ case 'a':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWA;
+ default:
+ return -1;
+ }
+
+X_FORWA:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWAR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWAR;
+ default:
+ return -1;
+ }
+
+X_FORWAR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARD;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARD;
+ default:
+ return -1;
+ }
+
+X_FORWARD:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'E':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDE;
+ case 'e':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDE;
+ default:
+ return -1;
+ }
+
+X_FORWARDE:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'D':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED;
+ case 'd':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED;
+ default:
+ return -1;
+ }
+
+X_FORWARDED:
+ NEXT_CHAR();
+ switch (ch) {
+ case '-':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED_;
+ default:
+ return -1;
+ }
+
+X_FORWARDED_:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'F':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED_F;
+ case 'f':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED_F;
+ case 'H':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED_H;
+ case 'h':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED_H;
+ case 'P':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED_P;
+ case 'p':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED_P;
+ default:
+ return -1;
+ }
+
+X_FORWARDED_F:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED_FO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED_FO;
+ default:
+ return -1;
+ }
+
+X_FORWARDED_FO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return 74;
+ }
+ goto X_FORWARDED_FOR;
+ case 'r':
+ if (last) {
+ return 74;
+ }
+ goto X_FORWARDED_FOR;
+ default:
+ return -1;
+ }
+
+X_FORWARDED_H:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED_HO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED_HO;
+ default:
+ return -1;
+ }
+
+X_FORWARDED_HO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'S':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED_HOS;
+ case 's':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED_HOS;
+ default:
+ return -1;
+ }
+
+X_FORWARDED_HOS:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return 75;
+ }
+ goto X_FORWARDED_HOST;
+ case 't':
+ if (last) {
+ return 75;
+ }
+ goto X_FORWARDED_HOST;
+ default:
+ return -1;
+ }
+
+X_FORWARDED_P:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'R':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED_PR;
+ case 'r':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED_PR;
+ default:
+ return -1;
+ }
+
+X_FORWARDED_PR:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED_PRO;
+ case 'o':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED_PRO;
+ default:
+ return -1;
+ }
+
+X_FORWARDED_PRO:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'T':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED_PROT;
+ case 't':
+ if (last) {
+ return -1;
+ }
+ goto X_FORWARDED_PROT;
+ default:
+ return -1;
+ }
+
+X_FORWARDED_PROT:
+ NEXT_CHAR();
+ switch (ch) {
+ case 'O':
+ if (last) {
+ return 76;
+ }
+ goto X_FORWARDED_PROTO;
+ case 'o':
+ if (last) {
+ return 76;
+ }
+ goto X_FORWARDED_PROTO;
+ default:
+ return -1;
+ }
+
+ACCEPT_CHARSET:
+ACCEPT_ENCODING:
+ACCEPT_LANGUAGE:
+ACCEPT_RANGES:
+ACCESS_CONTROL_ALLOW_CREDENTIALS:
+ACCESS_CONTROL_ALLOW_HEADERS:
+ACCESS_CONTROL_ALLOW_METHODS:
+ACCESS_CONTROL_ALLOW_ORIGIN:
+ACCESS_CONTROL_EXPOSE_HEADERS:
+ACCESS_CONTROL_MAX_AGE:
+ACCESS_CONTROL_REQUEST_HEADERS:
+ACCESS_CONTROL_REQUEST_METHOD:
+AGE:
+ALLOW:
+AUTHORIZATION:
+CACHE_CONTROL:
+CONNECTION:
+CONTENT_DISPOSITION:
+CONTENT_ENCODING:
+CONTENT_LANGUAGE:
+CONTENT_LENGTH:
+CONTENT_LOCATION:
+CONTENT_MD5:
+CONTENT_RANGE:
+CONTENT_TRANSFER_ENCODING:
+CONTENT_TYPE:
+COOKIE:
+DATE:
+DESTINATION:
+DIGEST:
+ETAG:
+EXPECT:
+EXPIRES:
+FORWARDED:
+FROM:
+HOST:
+IF_MATCH:
+IF_MODIFIED_SINCE:
+IF_NONE_MATCH:
+IF_RANGE:
+IF_UNMODIFIED_SINCE:
+KEEP_ALIVE:
+LAST_EVENT_ID:
+LAST_MODIFIED:
+LINK:
+LOCATION:
+MAX_FORWARDS:
+ORIGIN:
+PRAGMA:
+PROXY_AUTHENTICATE:
+PROXY_AUTHORIZATION:
+RANGE:
+REFERER:
+RETRY_AFTER:
+SEC_WEBSOCKET_ACCEPT:
+SEC_WEBSOCKET_EXTENSIONS:
+SEC_WEBSOCKET_KEY1:
+SEC_WEBSOCKET_PROTOCOL:
+SEC_WEBSOCKET_VERSION:
+SERVER:
+SET_COOKIE:
+TE:
+TRAILER:
+TRANSFER_ENCODING:
+UPGRADE:
+URI:
+USER_AGENT:
+VARY:
+VIA:
+WANT_DIGEST:
+WARNING:
+WWW_AUTHENTICATE:
+X_FORWARDED_FOR:
+X_FORWARDED_HOST:
+X_FORWARDED_PROTO:
+missing:
+ /* nothing found */
+ return -1;
+}
diff --git a/third_party/python/aiohttp/aiohttp/_find_header.h b/third_party/python/aiohttp/aiohttp/_find_header.h
new file mode 100644
index 0000000000..99b7b4f828
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/_find_header.h
@@ -0,0 +1,14 @@
+#ifndef _FIND_HEADERS_H
+#define _FIND_HEADERS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int find_header(const char *str, int size);
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/third_party/python/aiohttp/aiohttp/_find_header.pxd b/third_party/python/aiohttp/aiohttp/_find_header.pxd
new file mode 100644
index 0000000000..37a6c37268
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/_find_header.pxd
@@ -0,0 +1,2 @@
+cdef extern from "_find_header.h":
+ int find_header(char *, int)
diff --git a/third_party/python/aiohttp/aiohttp/_frozenlist.c b/third_party/python/aiohttp/aiohttp/_frozenlist.c
new file mode 100644
index 0000000000..4a9d38237f
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/_frozenlist.c
@@ -0,0 +1,7512 @@
+/* Generated by Cython 0.29.21 */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#ifndef Py_PYTHON_H
+ #error Python headers needed to compile C extensions, please install development version of Python.
+#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
+ #error Cython requires Python 2.6+ or Python 3.3+.
+#else
+#define CYTHON_ABI "0_29_21"
+#define CYTHON_HEX_VERSION 0x001D15F0
+#define CYTHON_FUTURE_DIVISION 1
+#include <stddef.h>
+#ifndef offsetof
+ #define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
+#endif
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+ #ifndef __fastcall
+ #define __fastcall
+ #endif
+#endif
+#ifndef DL_IMPORT
+ #define DL_IMPORT(t) t
+#endif
+#ifndef DL_EXPORT
+ #define DL_EXPORT(t) t
+#endif
+#define __PYX_COMMA ,
+#ifndef HAVE_LONG_LONG
+ #if PY_VERSION_HEX >= 0x02070000
+ #define HAVE_LONG_LONG
+ #endif
+#endif
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+#ifndef Py_HUGE_VAL
+ #define Py_HUGE_VAL HUGE_VAL
+#endif
+#ifdef PYPY_VERSION
+ #define CYTHON_COMPILING_IN_PYPY 1
+ #define CYTHON_COMPILING_IN_PYSTON 0
+ #define CYTHON_COMPILING_IN_CPYTHON 0
+ #undef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 0
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #if PY_VERSION_HEX < 0x03050000
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #elif !defined(CYTHON_USE_ASYNC_SLOTS)
+ #define CYTHON_USE_ASYNC_SLOTS 1
+ #endif
+ #undef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 0
+ #undef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 0
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #undef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 1
+ #undef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 0
+ #undef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 0
+ #undef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 0
+ #undef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 0
+ #undef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT 0
+ #undef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE 0
+ #undef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS 0
+ #undef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK 0
+#elif defined(PYSTON_VERSION)
+ #define CYTHON_COMPILING_IN_PYPY 0
+ #define CYTHON_COMPILING_IN_PYSTON 1
+ #define CYTHON_COMPILING_IN_CPYTHON 0
+ #ifndef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 1
+ #endif
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #undef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 0
+ #ifndef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 1
+ #endif
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #ifndef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 0
+ #endif
+ #ifndef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 1
+ #endif
+ #ifndef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 1
+ #endif
+ #undef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 0
+ #undef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 0
+ #undef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT 0
+ #undef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE 0
+ #undef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS 0
+ #undef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK 0
+#else
+ #define CYTHON_COMPILING_IN_PYPY 0
+ #define CYTHON_COMPILING_IN_PYSTON 0
+ #define CYTHON_COMPILING_IN_CPYTHON 1
+ #ifndef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 1
+ #endif
+ #if PY_VERSION_HEX < 0x02070000
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
+ #define CYTHON_USE_PYTYPE_LOOKUP 1
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #elif !defined(CYTHON_USE_ASYNC_SLOTS)
+ #define CYTHON_USE_ASYNC_SLOTS 1
+ #endif
+ #if PY_VERSION_HEX < 0x02070000
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #elif !defined(CYTHON_USE_PYLONG_INTERNALS)
+ #define CYTHON_USE_PYLONG_INTERNALS 1
+ #endif
+ #ifndef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 1
+ #endif
+ #ifndef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 1
+ #endif
+ #if PY_VERSION_HEX < 0x030300F0
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #elif !defined(CYTHON_USE_UNICODE_WRITER)
+ #define CYTHON_USE_UNICODE_WRITER 1
+ #endif
+ #ifndef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 0
+ #endif
+ #ifndef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 1
+ #endif
+ #ifndef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 1
+ #endif
+ #ifndef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 1
+ #endif
+ #ifndef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 1
+ #endif
+ #ifndef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
+ #endif
+ #ifndef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
+ #endif
+ #ifndef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
+ #endif
+ #ifndef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
+ #endif
+#endif
+#if !defined(CYTHON_FAST_PYCCALL)
+#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
+#endif
+#if CYTHON_USE_PYLONG_INTERNALS
+ #include "longintrepr.h"
+ #undef SHIFT
+ #undef BASE
+ #undef MASK
+ #ifdef SIZEOF_VOID_P
+ enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
+ #endif
+#endif
+#ifndef __has_attribute
+ #define __has_attribute(x) 0
+#endif
+#ifndef __has_cpp_attribute
+ #define __has_cpp_attribute(x) 0
+#endif
+#ifndef CYTHON_RESTRICT
+ #if defined(__GNUC__)
+ #define CYTHON_RESTRICT __restrict__
+ #elif defined(_MSC_VER) && _MSC_VER >= 1400
+ #define CYTHON_RESTRICT __restrict
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_RESTRICT restrict
+ #else
+ #define CYTHON_RESTRICT
+ #endif
+#endif
+#ifndef CYTHON_UNUSED
+# if defined(__GNUC__)
+# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+#endif
+#ifndef CYTHON_MAYBE_UNUSED_VAR
+# if defined(__cplusplus)
+ template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
+# else
+# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
+# endif
+#endif
+#ifndef CYTHON_NCP_UNUSED
+# if CYTHON_COMPILING_IN_CPYTHON
+# define CYTHON_NCP_UNUSED
+# else
+# define CYTHON_NCP_UNUSED CYTHON_UNUSED
+# endif
+#endif
+#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
+#ifdef _MSC_VER
+ #ifndef _MSC_STDINT_H_
+ #if _MSC_VER < 1300
+ typedef unsigned char uint8_t;
+ typedef unsigned int uint32_t;
+ #else
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int32 uint32_t;
+ #endif
+ #endif
+#else
+ #include <stdint.h>
+#endif
+#ifndef CYTHON_FALLTHROUGH
+ #if defined(__cplusplus) && __cplusplus >= 201103L
+ #if __has_cpp_attribute(fallthrough)
+ #define CYTHON_FALLTHROUGH [[fallthrough]]
+ #elif __has_cpp_attribute(clang::fallthrough)
+ #define CYTHON_FALLTHROUGH [[clang::fallthrough]]
+ #elif __has_cpp_attribute(gnu::fallthrough)
+ #define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
+ #endif
+ #endif
+ #ifndef CYTHON_FALLTHROUGH
+ #if __has_attribute(fallthrough)
+ #define CYTHON_FALLTHROUGH __attribute__((fallthrough))
+ #else
+ #define CYTHON_FALLTHROUGH
+ #endif
+ #endif
+ #if defined(__clang__ ) && defined(__apple_build_version__)
+ #if __apple_build_version__ < 7000000
+ #undef CYTHON_FALLTHROUGH
+ #define CYTHON_FALLTHROUGH
+ #endif
+ #endif
+#endif
+
+#ifndef CYTHON_INLINE
+ #if defined(__clang__)
+ #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
+ #elif defined(__GNUC__)
+ #define CYTHON_INLINE __inline__
+ #elif defined(_MSC_VER)
+ #define CYTHON_INLINE __inline
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_INLINE inline
+ #else
+ #define CYTHON_INLINE
+ #endif
+#endif
+
+#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
+ #define Py_OptimizeFlag 0
+#endif
+#define __PYX_BUILD_PY_SSIZE_T "n"
+#define CYTHON_FORMAT_SSIZE_T "z"
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+ #define __Pyx_DefaultClassType PyClass_Type
+#else
+ #define __Pyx_BUILTIN_MODULE_NAME "builtins"
+#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+#else
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+#endif
+ #define __Pyx_DefaultClassType PyType_Type
+#endif
+#ifndef Py_TPFLAGS_CHECKTYPES
+ #define Py_TPFLAGS_CHECKTYPES 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_INDEX
+ #define Py_TPFLAGS_HAVE_INDEX 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
+ #define Py_TPFLAGS_HAVE_NEWBUFFER 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_FINALIZE
+ #define Py_TPFLAGS_HAVE_FINALIZE 0
+#endif
+#ifndef METH_STACKLESS
+ #define METH_STACKLESS 0
+#endif
+#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
+ #ifndef METH_FASTCALL
+ #define METH_FASTCALL 0x80
+ #endif
+ typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
+ typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
+ Py_ssize_t nargs, PyObject *kwnames);
+#else
+ #define __Pyx_PyCFunctionFast _PyCFunctionFast
+ #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
+#endif
+#if CYTHON_FAST_PYCCALL
+#define __Pyx_PyFastCFunction_Check(func)\
+ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
+#else
+#define __Pyx_PyFastCFunction_Check(func) 0
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
+ #define PyObject_Malloc(s) PyMem_Malloc(s)
+ #define PyObject_Free(p) PyMem_Free(p)
+ #define PyObject_Realloc(p) PyMem_Realloc(p)
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
+ #define PyMem_RawMalloc(n) PyMem_Malloc(n)
+ #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
+ #define PyMem_RawFree(p) PyMem_Free(p)
+#endif
+#if CYTHON_COMPILING_IN_PYSTON
+ #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
+ #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
+#else
+ #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
+ #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
+#endif
+#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
+ #define __Pyx_PyThreadState_Current PyThreadState_GET()
+#elif PY_VERSION_HEX >= 0x03060000
+ #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
+#elif PY_VERSION_HEX >= 0x03000000
+ #define __Pyx_PyThreadState_Current PyThreadState_GET()
+#else
+ #define __Pyx_PyThreadState_Current _PyThreadState_Current
+#endif
+#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
+#include "pythread.h"
+#define Py_tss_NEEDS_INIT 0
+typedef int Py_tss_t;
+static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
+ *key = PyThread_create_key();
+ return 0;
+}
+static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
+ Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
+ *key = Py_tss_NEEDS_INIT;
+ return key;
+}
+static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
+ PyObject_Free(key);
+}
+static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
+ return *key != Py_tss_NEEDS_INIT;
+}
+static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
+ PyThread_delete_key(*key);
+ *key = Py_tss_NEEDS_INIT;
+}
+static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
+ return PyThread_set_key_value(*key, value);
+}
+static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
+ return PyThread_get_key_value(*key);
+}
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
+#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
+#else
+#define __Pyx_PyDict_NewPresized(n) PyDict_New()
+#endif
+#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
+#else
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
+#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
+#else
+#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
+#endif
+#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
+ #define CYTHON_PEP393_ENABLED 1
+ #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
+ 0 : _PyUnicode_Ready((PyObject *)(op)))
+ #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
+ #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
+ #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
+ #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
+ #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
+ #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
+ #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
+ #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
+ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
+ #else
+ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
+ #endif
+#else
+ #define CYTHON_PEP393_ENABLED 0
+ #define PyUnicode_1BYTE_KIND 1
+ #define PyUnicode_2BYTE_KIND 2
+ #define PyUnicode_4BYTE_KIND 4
+ #define __Pyx_PyUnicode_READY(op) (0)
+ #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
+ #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
+ #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
+ #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
+ #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
+ #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
+ #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
+ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
+#endif
+#if CYTHON_COMPILING_IN_PYPY
+ #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
+ #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
+#else
+ #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
+ #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
+ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
+ #define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
+ #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
+ #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
+#endif
+#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
+#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
+#else
+ #define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
+#endif
+#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
+ #define PyObject_ASCII(o) PyObject_Repr(o)
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyBaseString_Type PyUnicode_Type
+ #define PyStringObject PyUnicodeObject
+ #define PyString_Type PyUnicode_Type
+ #define PyString_Check PyUnicode_Check
+ #define PyString_CheckExact PyUnicode_CheckExact
+#ifndef PyObject_Unicode
+ #define PyObject_Unicode PyObject_Str
+#endif
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
+ #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
+#else
+ #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
+ #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
+#endif
+#ifndef PySet_CheckExact
+ #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
+#endif
+#if PY_VERSION_HEX >= 0x030900A4
+ #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
+ #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
+#else
+ #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
+ #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
+#endif
+#if CYTHON_ASSUME_SAFE_MACROS
+ #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
+#else
+ #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyIntObject PyLongObject
+ #define PyInt_Type PyLong_Type
+ #define PyInt_Check(op) PyLong_Check(op)
+ #define PyInt_CheckExact(op) PyLong_CheckExact(op)
+ #define PyInt_FromString PyLong_FromString
+ #define PyInt_FromUnicode PyLong_FromUnicode
+ #define PyInt_FromLong PyLong_FromLong
+ #define PyInt_FromSize_t PyLong_FromSize_t
+ #define PyInt_FromSsize_t PyLong_FromSsize_t
+ #define PyInt_AsLong PyLong_AsLong
+ #define PyInt_AS_LONG PyLong_AS_LONG
+ #define PyInt_AsSsize_t PyLong_AsSsize_t
+ #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
+ #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
+ #define PyNumber_Int PyNumber_Long
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyBoolObject PyLongObject
+#endif
+#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
+ #ifndef PyUnicode_InternFromString
+ #define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
+ #endif
+#endif
+#if PY_VERSION_HEX < 0x030200A4
+ typedef long Py_hash_t;
+ #define __Pyx_PyInt_FromHash_t PyInt_FromLong
+ #define __Pyx_PyInt_AsHash_t PyInt_AsLong
+#else
+ #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
+ #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
+#else
+ #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
+#endif
+#if CYTHON_USE_ASYNC_SLOTS
+ #if PY_VERSION_HEX >= 0x030500B1
+ #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
+ #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
+ #else
+ #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
+ #endif
+#else
+ #define __Pyx_PyType_AsAsync(obj) NULL
+#endif
+#ifndef __Pyx_PyAsyncMethodsStruct
+ typedef struct {
+ unaryfunc am_await;
+ unaryfunc am_aiter;
+ unaryfunc am_anext;
+ } __Pyx_PyAsyncMethodsStruct;
+#endif
+
+#if defined(WIN32) || defined(MS_WINDOWS)
+ #define _USE_MATH_DEFINES
+#endif
+#include <math.h>
+#ifdef NAN
+#define __PYX_NAN() ((float) NAN)
+#else
+static CYTHON_INLINE float __PYX_NAN() {
+ float value;
+ memset(&value, 0xFF, sizeof(value));
+ return value;
+}
+#endif
+#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
+#define __Pyx_truncl trunc
+#else
+#define __Pyx_truncl truncl
+#endif
+
+#define __PYX_MARK_ERR_POS(f_index, lineno) \
+ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
+#define __PYX_ERR(f_index, lineno, Ln_error) \
+ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
+
+#ifndef __PYX_EXTERN_C
+ #ifdef __cplusplus
+ #define __PYX_EXTERN_C extern "C"
+ #else
+ #define __PYX_EXTERN_C extern
+ #endif
+#endif
+
+#define __PYX_HAVE__aiohttp___frozenlist
+#define __PYX_HAVE_API__aiohttp___frozenlist
+/* Early includes */
+#ifdef _OPENMP
+#include <omp.h>
+#endif /* _OPENMP */
+
+#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
+#define CYTHON_WITHOUT_ASSERTIONS
+#endif
+
+typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
+ const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
+
+#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
+#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
+#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
+#define __PYX_DEFAULT_STRING_ENCODING ""
+#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
+#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#define __Pyx_uchar_cast(c) ((unsigned char)c)
+#define __Pyx_long_cast(x) ((long)x)
+#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
+ (sizeof(type) < sizeof(Py_ssize_t)) ||\
+ (sizeof(type) > sizeof(Py_ssize_t) &&\
+ likely(v < (type)PY_SSIZE_T_MAX ||\
+ v == (type)PY_SSIZE_T_MAX) &&\
+ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
+ v == (type)PY_SSIZE_T_MIN))) ||\
+ (sizeof(type) == sizeof(Py_ssize_t) &&\
+ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
+ v == (type)PY_SSIZE_T_MAX))) )
+static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
+ return (size_t) i < (size_t) limit;
+}
+#if defined (__cplusplus) && __cplusplus >= 201103L
+ #include <cstdlib>
+ #define __Pyx_sst_abs(value) std::abs(value)
+#elif SIZEOF_INT >= SIZEOF_SIZE_T
+ #define __Pyx_sst_abs(value) abs(value)
+#elif SIZEOF_LONG >= SIZEOF_SIZE_T
+ #define __Pyx_sst_abs(value) labs(value)
+#elif defined (_MSC_VER)
+ #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
+#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define __Pyx_sst_abs(value) llabs(value)
+#elif defined (__GNUC__)
+ #define __Pyx_sst_abs(value) __builtin_llabs(value)
+#else
+ #define __Pyx_sst_abs(value) ((value<0) ? -value : value)
+#endif
+static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
+static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
+#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
+#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
+#define __Pyx_PyBytes_FromString PyBytes_FromString
+#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
+ #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#else
+ #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
+ #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
+#endif
+#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
+#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
+#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
+#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
+#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
+static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
+ const Py_UNICODE *u_end = u;
+ while (*u_end++) ;
+ return (size_t)(u_end - u - 1);
+}
+#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
+#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
+#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
+#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
+#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
+static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
+static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
+#define __Pyx_PySequence_Tuple(obj)\
+ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
+#if CYTHON_ASSUME_SAFE_MACROS
+#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
+#else
+#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
+#endif
+#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
+#if PY_MAJOR_VERSION >= 3
+#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
+#else
+#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
+#endif
+#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
+#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+static int __Pyx_sys_getdefaultencoding_not_ascii;
+static int __Pyx_init_sys_getdefaultencoding_params(void) {
+ PyObject* sys;
+ PyObject* default_encoding = NULL;
+ PyObject* ascii_chars_u = NULL;
+ PyObject* ascii_chars_b = NULL;
+ const char* default_encoding_c;
+ sys = PyImport_ImportModule("sys");
+ if (!sys) goto bad;
+ default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
+ Py_DECREF(sys);
+ if (!default_encoding) goto bad;
+ default_encoding_c = PyBytes_AsString(default_encoding);
+ if (!default_encoding_c) goto bad;
+ if (strcmp(default_encoding_c, "ascii") == 0) {
+ __Pyx_sys_getdefaultencoding_not_ascii = 0;
+ } else {
+ char ascii_chars[128];
+ int c;
+ for (c = 0; c < 128; c++) {
+ ascii_chars[c] = c;
+ }
+ __Pyx_sys_getdefaultencoding_not_ascii = 1;
+ ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
+ if (!ascii_chars_u) goto bad;
+ ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
+ if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
+ PyErr_Format(
+ PyExc_ValueError,
+ "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
+ default_encoding_c);
+ goto bad;
+ }
+ Py_DECREF(ascii_chars_u);
+ Py_DECREF(ascii_chars_b);
+ }
+ Py_DECREF(default_encoding);
+ return 0;
+bad:
+ Py_XDECREF(default_encoding);
+ Py_XDECREF(ascii_chars_u);
+ Py_XDECREF(ascii_chars_b);
+ return -1;
+}
+#endif
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
+#else
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+static char* __PYX_DEFAULT_STRING_ENCODING;
+static int __Pyx_init_sys_getdefaultencoding_params(void) {
+ PyObject* sys;
+ PyObject* default_encoding = NULL;
+ char* default_encoding_c;
+ sys = PyImport_ImportModule("sys");
+ if (!sys) goto bad;
+ default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
+ Py_DECREF(sys);
+ if (!default_encoding) goto bad;
+ default_encoding_c = PyBytes_AsString(default_encoding);
+ if (!default_encoding_c) goto bad;
+ __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
+ if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
+ strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
+ Py_DECREF(default_encoding);
+ return 0;
+bad:
+ Py_XDECREF(default_encoding);
+ return -1;
+}
+#endif
+#endif
+
+
+/* Test for GCC > 2.95 */
+#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
+ #define likely(x) __builtin_expect(!!(x), 1)
+ #define unlikely(x) __builtin_expect(!!(x), 0)
+#else /* !__GNUC__ or GCC < 2.95 */
+ #define likely(x) (x)
+ #define unlikely(x) (x)
+#endif /* __GNUC__ */
+static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
+
+static PyObject *__pyx_m = NULL;
+static PyObject *__pyx_d;
+static PyObject *__pyx_b;
+static PyObject *__pyx_cython_runtime = NULL;
+static PyObject *__pyx_empty_tuple;
+static PyObject *__pyx_empty_bytes;
+static PyObject *__pyx_empty_unicode;
+static int __pyx_lineno;
+static int __pyx_clineno = 0;
+static const char * __pyx_cfilenm= __FILE__;
+static const char *__pyx_filename;
+
+
+static const char *__pyx_f[] = {
+ "aiohttp/_frozenlist.pyx",
+ "stringsource",
+};
+
+/*--- Type declarations ---*/
+struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList;
+
+/* "aiohttp/_frozenlist.pyx":4
+ *
+ *
+ * cdef class FrozenList: # <<<<<<<<<<<<<<
+ *
+ * cdef readonly bint frozen
+ */
+struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_7aiohttp_11_frozenlist_FrozenList *__pyx_vtab;
+ int frozen;
+ PyObject *_items;
+};
+
+
+
+struct __pyx_vtabstruct_7aiohttp_11_frozenlist_FrozenList {
+ PyObject *(*_check_frozen)(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *);
+ PyObject *(*_fast_len)(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *);
+};
+static struct __pyx_vtabstruct_7aiohttp_11_frozenlist_FrozenList *__pyx_vtabptr_7aiohttp_11_frozenlist_FrozenList;
+static CYTHON_INLINE PyObject *__pyx_f_7aiohttp_11_frozenlist_10FrozenList__fast_len(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *);
+
+/* --- Runtime support code (head) --- */
+/* Refnanny.proto */
+#ifndef CYTHON_REFNANNY
+ #define CYTHON_REFNANNY 0
+#endif
+#if CYTHON_REFNANNY
+ typedef struct {
+ void (*INCREF)(void*, PyObject*, int);
+ void (*DECREF)(void*, PyObject*, int);
+ void (*GOTREF)(void*, PyObject*, int);
+ void (*GIVEREF)(void*, PyObject*, int);
+ void* (*SetupContext)(const char*, int, const char*);
+ void (*FinishContext)(void**);
+ } __Pyx_RefNannyAPIStruct;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
+ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
+#ifdef WITH_THREAD
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)\
+ if (acquire_gil) {\
+ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
+ PyGILState_Release(__pyx_gilstate_save);\
+ } else {\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
+ }
+#else
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
+#endif
+ #define __Pyx_RefNannyFinishContext()\
+ __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
+ #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
+ #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
+ #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
+ #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
+#else
+ #define __Pyx_RefNannyDeclarations
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)
+ #define __Pyx_RefNannyFinishContext()
+ #define __Pyx_INCREF(r) Py_INCREF(r)
+ #define __Pyx_DECREF(r) Py_DECREF(r)
+ #define __Pyx_GOTREF(r)
+ #define __Pyx_GIVEREF(r)
+ #define __Pyx_XINCREF(r) Py_XINCREF(r)
+ #define __Pyx_XDECREF(r) Py_XDECREF(r)
+ #define __Pyx_XGOTREF(r)
+ #define __Pyx_XGIVEREF(r)
+#endif
+#define __Pyx_XDECREF_SET(r, v) do {\
+ PyObject *tmp = (PyObject *) r;\
+ r = v; __Pyx_XDECREF(tmp);\
+ } while (0)
+#define __Pyx_DECREF_SET(r, v) do {\
+ PyObject *tmp = (PyObject *) r;\
+ r = v; __Pyx_DECREF(tmp);\
+ } while (0)
+#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
+#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
+
+/* PyObjectGetAttrStr.proto */
+#if CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
+#else
+#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
+#endif
+
+/* GetBuiltinName.proto */
+static PyObject *__Pyx_GetBuiltinName(PyObject *name);
+
+/* RaiseDoubleKeywords.proto */
+static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
+
+/* ParseKeywords.proto */
+static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
+ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
+ const char* function_name);
+
+/* RaiseArgTupleInvalid.proto */
+static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
+ Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
+
+/* PyObjectCall.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
+#else
+#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
+#endif
+
+/* PyThreadStateGet.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
+#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
+#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
+#else
+#define __Pyx_PyThreadState_declare
+#define __Pyx_PyThreadState_assign
+#define __Pyx_PyErr_Occurred() PyErr_Occurred()
+#endif
+
+/* PyErrFetchRestore.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
+#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
+#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
+#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
+#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
+static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
+static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
+#if CYTHON_COMPILING_IN_CPYTHON
+#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
+#else
+#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
+#endif
+#else
+#define __Pyx_PyErr_Clear() PyErr_Clear()
+#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
+#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
+#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
+#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
+#endif
+
+/* RaiseException.proto */
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
+
+/* GetItemInt.proto */
+#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
+ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
+ __Pyx_GetItemInt_Generic(o, to_py_func(i))))
+#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
+ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
+ int wraparound, int boundscheck);
+#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
+ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
+ int wraparound, int boundscheck);
+static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
+ int is_list, int wraparound, int boundscheck);
+
+/* ObjectGetItem.proto */
+#if CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);
+#else
+#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
+#endif
+
+/* PyFunctionFastCall.proto */
+#if CYTHON_FAST_PYCALL
+#define __Pyx_PyFunction_FastCall(func, args, nargs)\
+ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
+#if 1 || PY_VERSION_HEX < 0x030600B1
+static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
+#else
+#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
+#endif
+#define __Pyx_BUILD_ASSERT_EXPR(cond)\
+ (sizeof(char [1 - 2*!(cond)]) - 1)
+#ifndef Py_MEMBER_SIZE
+#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
+#endif
+ static size_t __pyx_pyframe_localsplus_offset = 0;
+ #include "frameobject.h"
+ #define __Pxy_PyFrame_Initialize_Offsets()\
+ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
+ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
+ #define __Pyx_PyFrame_GetLocalsplus(frame)\
+ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
+#endif
+
+/* PyObjectCallMethO.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
+#endif
+
+/* PyObjectCallNoArg.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
+#else
+#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
+#endif
+
+/* PyCFunctionFastCall.proto */
+#if CYTHON_FAST_PYCCALL
+static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
+#else
+#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
+#endif
+
+/* PyObjectCallOneArg.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
+
+/* PyIntCompare.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, long intval, long inplace);
+
+/* PySequenceContains.proto */
+static CYTHON_INLINE int __Pyx_PySequence_ContainsTF(PyObject* item, PyObject* seq, int eq) {
+ int result = PySequence_Contains(seq, item);
+ return unlikely(result < 0) ? result : (result == (eq == Py_EQ));
+}
+
+/* PyObjectCall2Args.proto */
+static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
+
+/* PyObjectGetMethod.proto */
+static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method);
+
+/* PyObjectCallMethod1.proto */
+static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg);
+
+/* pop_index.proto */
+static PyObject* __Pyx__PyObject_PopNewIndex(PyObject* L, PyObject* py_ix);
+static PyObject* __Pyx__PyObject_PopIndex(PyObject* L, PyObject* py_ix);
+#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
+static PyObject* __Pyx__PyList_PopIndex(PyObject* L, PyObject* py_ix, Py_ssize_t ix);
+#define __Pyx_PyObject_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) (\
+ (likely(PyList_CheckExact(L) && __Pyx_fits_Py_ssize_t(ix, type, is_signed))) ?\
+ __Pyx__PyList_PopIndex(L, py_ix, ix) : (\
+ (unlikely((py_ix) == Py_None)) ? __Pyx__PyObject_PopNewIndex(L, to_py_func(ix)) :\
+ __Pyx__PyObject_PopIndex(L, py_ix)))
+#define __Pyx_PyList_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) (\
+ __Pyx_fits_Py_ssize_t(ix, type, is_signed) ?\
+ __Pyx__PyList_PopIndex(L, py_ix, ix) : (\
+ (unlikely((py_ix) == Py_None)) ? __Pyx__PyObject_PopNewIndex(L, to_py_func(ix)) :\
+ __Pyx__PyObject_PopIndex(L, py_ix)))
+#else
+#define __Pyx_PyList_PopIndex(L, py_ix, ix, is_signed, type, to_py_func)\
+ __Pyx_PyObject_PopIndex(L, py_ix, ix, is_signed, type, to_py_func)
+#define __Pyx_PyObject_PopIndex(L, py_ix, ix, is_signed, type, to_py_func) (\
+ (unlikely((py_ix) == Py_None)) ? __Pyx__PyObject_PopNewIndex(L, to_py_func(ix)) :\
+ __Pyx__PyObject_PopIndex(L, py_ix))
+#endif
+
+/* ListAppend.proto */
+#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
+static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
+ PyListObject* L = (PyListObject*) list;
+ Py_ssize_t len = Py_SIZE(list);
+ if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
+ Py_INCREF(x);
+ PyList_SET_ITEM(list, len, x);
+ __Pyx_SET_SIZE(list, len + 1);
+ return 0;
+ }
+ return PyList_Append(list, x);
+}
+#else
+#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
+#endif
+
+/* PyErrExceptionMatches.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
+static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
+#else
+#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
+#endif
+
+/* GetAttr.proto */
+static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
+
+/* GetAttr3.proto */
+static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
+
+/* PyDictVersioning.proto */
+#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
+#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
+#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
+#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
+ (version_var) = __PYX_GET_DICT_VERSION(dict);\
+ (cache_var) = (value);
+#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
+ static PY_UINT64_T __pyx_dict_version = 0;\
+ static PyObject *__pyx_dict_cached_value = NULL;\
+ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
+ (VAR) = __pyx_dict_cached_value;\
+ } else {\
+ (VAR) = __pyx_dict_cached_value = (LOOKUP);\
+ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
+ }\
+}
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
+static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
+#else
+#define __PYX_GET_DICT_VERSION(dict) (0)
+#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
+#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
+#endif
+
+/* GetModuleGlobalName.proto */
+#if CYTHON_USE_DICT_VERSIONS
+#define __Pyx_GetModuleGlobalName(var, name) {\
+ static PY_UINT64_T __pyx_dict_version = 0;\
+ static PyObject *__pyx_dict_cached_value = NULL;\
+ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
+ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
+ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
+}
+#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
+ PY_UINT64_T __pyx_dict_version;\
+ PyObject *__pyx_dict_cached_value;\
+ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
+}
+static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
+#else
+#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
+#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
+static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
+#endif
+
+/* Import.proto */
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
+
+/* ImportFrom.proto */
+static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
+
+/* HasAttr.proto */
+static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
+
+/* PyObject_GenericGetAttrNoDict.proto */
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
+#else
+#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
+#endif
+
+/* PyObject_GenericGetAttr.proto */
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
+#else
+#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
+#endif
+
+/* SetVTable.proto */
+static int __Pyx_SetVtable(PyObject *dict, void *vtable);
+
+/* PyObjectGetAttrStrNoError.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);
+
+/* SetupReduce.proto */
+static int __Pyx_setup_reduce(PyObject* type_obj);
+
+/* CLineInTraceback.proto */
+#ifdef CYTHON_CLINE_IN_TRACEBACK
+#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
+#else
+static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
+#endif
+
+/* CodeObjectCache.proto */
+typedef struct {
+ PyCodeObject* code_object;
+ int code_line;
+} __Pyx_CodeObjectCacheEntry;
+struct __Pyx_CodeObjectCache {
+ int count;
+ int max_count;
+ __Pyx_CodeObjectCacheEntry* entries;
+};
+static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
+static PyCodeObject *__pyx_find_code_object(int code_line);
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
+
+/* AddTraceback.proto */
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+ int py_line, const char *filename);
+
+/* CIntToPy.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
+
+/* CIntToPy.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
+
+/* CIntFromPy.proto */
+static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
+
+/* CIntFromPy.proto */
+static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
+
+/* FastTypeChecks.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
+static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
+#else
+#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
+#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
+#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
+#endif
+#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
+
+/* CheckBinaryVersion.proto */
+static int __Pyx_check_binary_version(void);
+
+/* InitStrings.proto */
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
+
+static PyObject *__pyx_f_7aiohttp_11_frozenlist_10FrozenList__check_frozen(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self); /* proto*/
+static CYTHON_INLINE PyObject *__pyx_f_7aiohttp_11_frozenlist_10FrozenList__fast_len(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self); /* proto*/
+
+/* Module declarations from 'aiohttp._frozenlist' */
+static PyTypeObject *__pyx_ptype_7aiohttp_11_frozenlist_FrozenList = 0;
+static PyObject *__pyx_f_7aiohttp_11_frozenlist___pyx_unpickle_FrozenList__set_state(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *, PyObject *); /*proto*/
+#define __Pyx_MODULE_NAME "aiohttp._frozenlist"
+extern int __pyx_module_is_main_aiohttp___frozenlist;
+int __pyx_module_is_main_aiohttp___frozenlist = 0;
+
+/* Implementation of 'aiohttp._frozenlist' */
+static PyObject *__pyx_builtin_RuntimeError;
+static const char __pyx_k_new[] = "__new__";
+static const char __pyx_k_pop[] = "pop";
+static const char __pyx_k_pos[] = "pos";
+static const char __pyx_k_dict[] = "__dict__";
+static const char __pyx_k_item[] = "item";
+static const char __pyx_k_iter[] = "__iter__";
+static const char __pyx_k_main[] = "__main__";
+static const char __pyx_k_name[] = "__name__";
+static const char __pyx_k_test[] = "__test__";
+static const char __pyx_k_clear[] = "clear";
+static const char __pyx_k_count[] = "count";
+static const char __pyx_k_index[] = "index";
+static const char __pyx_k_items[] = "items";
+static const char __pyx_k_format[] = "format";
+static const char __pyx_k_import[] = "__import__";
+static const char __pyx_k_pickle[] = "pickle";
+static const char __pyx_k_reduce[] = "__reduce__";
+static const char __pyx_k_remove[] = "remove";
+static const char __pyx_k_update[] = "update";
+static const char __pyx_k_getstate[] = "__getstate__";
+static const char __pyx_k_pyx_type[] = "__pyx_type";
+static const char __pyx_k_register[] = "register";
+static const char __pyx_k_reversed[] = "__reversed__";
+static const char __pyx_k_setstate[] = "__setstate__";
+static const char __pyx_k_pyx_state[] = "__pyx_state";
+static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
+static const char __pyx_k_FrozenList[] = "FrozenList";
+static const char __pyx_k_pyx_result[] = "__pyx_result";
+static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
+static const char __pyx_k_PickleError[] = "PickleError";
+static const char __pyx_k_RuntimeError[] = "RuntimeError";
+static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
+static const char __pyx_k_stringsource[] = "stringsource";
+static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
+static const char __pyx_k_MutableSequence[] = "MutableSequence";
+static const char __pyx_k_collections_abc[] = "collections.abc";
+static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
+static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
+static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
+static const char __pyx_k_FrozenList_frozen_r[] = "<FrozenList(frozen={}, {!r})>";
+static const char __pyx_k_aiohttp__frozenlist[] = "aiohttp._frozenlist";
+static const char __pyx_k_pyx_unpickle_FrozenList[] = "__pyx_unpickle_FrozenList";
+static const char __pyx_k_Cannot_modify_frozen_list[] = "Cannot modify frozen list.";
+static const char __pyx_k_Incompatible_checksums_s_vs_0x94[] = "Incompatible checksums (%s vs 0x949a143 = (_items, frozen))";
+static PyObject *__pyx_kp_u_Cannot_modify_frozen_list;
+static PyObject *__pyx_n_s_FrozenList;
+static PyObject *__pyx_kp_u_FrozenList_frozen_r;
+static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0x94;
+static PyObject *__pyx_n_s_MutableSequence;
+static PyObject *__pyx_n_s_PickleError;
+static PyObject *__pyx_n_s_RuntimeError;
+static PyObject *__pyx_n_s_aiohttp__frozenlist;
+static PyObject *__pyx_n_s_clear;
+static PyObject *__pyx_n_s_cline_in_traceback;
+static PyObject *__pyx_n_s_collections_abc;
+static PyObject *__pyx_n_s_count;
+static PyObject *__pyx_n_s_dict;
+static PyObject *__pyx_n_s_format;
+static PyObject *__pyx_n_s_getstate;
+static PyObject *__pyx_n_s_import;
+static PyObject *__pyx_n_s_index;
+static PyObject *__pyx_n_s_item;
+static PyObject *__pyx_n_s_items;
+static PyObject *__pyx_n_s_iter;
+static PyObject *__pyx_n_s_main;
+static PyObject *__pyx_n_s_name;
+static PyObject *__pyx_n_s_new;
+static PyObject *__pyx_n_s_pickle;
+static PyObject *__pyx_n_s_pop;
+static PyObject *__pyx_n_s_pos;
+static PyObject *__pyx_n_s_pyx_PickleError;
+static PyObject *__pyx_n_s_pyx_checksum;
+static PyObject *__pyx_n_s_pyx_result;
+static PyObject *__pyx_n_s_pyx_state;
+static PyObject *__pyx_n_s_pyx_type;
+static PyObject *__pyx_n_s_pyx_unpickle_FrozenList;
+static PyObject *__pyx_n_s_pyx_vtable;
+static PyObject *__pyx_n_s_reduce;
+static PyObject *__pyx_n_s_reduce_cython;
+static PyObject *__pyx_n_s_reduce_ex;
+static PyObject *__pyx_n_s_register;
+static PyObject *__pyx_n_s_remove;
+static PyObject *__pyx_n_s_reversed;
+static PyObject *__pyx_n_s_setstate;
+static PyObject *__pyx_n_s_setstate_cython;
+static PyObject *__pyx_kp_s_stringsource;
+static PyObject *__pyx_n_s_test;
+static PyObject *__pyx_n_s_update;
+static int __pyx_pf_7aiohttp_11_frozenlist_10FrozenList___init__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_items); /* proto */
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_2freeze(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_4__getitem__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
+static int __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_6__setitem__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
+static int __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_8__delitem__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
+static Py_ssize_t __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_10__len__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_12__iter__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_14__reversed__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_16__richcmp__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_other, PyObject *__pyx_v_op); /* proto */
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_18insert(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_pos, PyObject *__pyx_v_item); /* proto */
+static int __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_20__contains__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_22__iadd__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_items); /* proto */
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_24index(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_26remove(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_28clear(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_30extend(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_items); /* proto */
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_32reverse(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_34pop(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_36append(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_38count(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_40__repr__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_6frozen___get__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_42__reduce_cython__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_44__setstate_cython__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist___pyx_unpickle_FrozenList(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
+static PyObject *__pyx_tp_new_7aiohttp_11_frozenlist_FrozenList(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
+static PyObject *__pyx_int_0;
+static PyObject *__pyx_int_1;
+static PyObject *__pyx_int_2;
+static PyObject *__pyx_int_3;
+static PyObject *__pyx_int_4;
+static PyObject *__pyx_int_5;
+static PyObject *__pyx_int_155820355;
+static PyObject *__pyx_int_neg_1;
+static PyObject *__pyx_tuple_;
+static PyObject *__pyx_tuple__2;
+static PyObject *__pyx_codeobj__3;
+/* Late includes */
+
+/* "aiohttp/_frozenlist.pyx":9
+ * cdef list _items
+ *
+ * def __init__(self, items=None): # <<<<<<<<<<<<<<
+ * self.frozen = False
+ * if items is not None:
+ */
+
+/* Python wrapper */
+static int __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_items = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_items,0};
+ PyObject* values[1] = {0};
+ values[0] = ((PyObject *)Py_None);
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (kw_args > 0) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_items);
+ if (value) { values[0] = value; kw_args--; }
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 9, __pyx_L3_error)
+ }
+ } else {
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ }
+ __pyx_v_items = values[0];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 9, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return -1;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList___init__(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self), __pyx_v_items);
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_7aiohttp_11_frozenlist_10FrozenList___init__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_items) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__init__", 0);
+ __Pyx_INCREF(__pyx_v_items);
+
+ /* "aiohttp/_frozenlist.pyx":10
+ *
+ * def __init__(self, items=None):
+ * self.frozen = False # <<<<<<<<<<<<<<
+ * if items is not None:
+ * items = list(items)
+ */
+ __pyx_v_self->frozen = 0;
+
+ /* "aiohttp/_frozenlist.pyx":11
+ * def __init__(self, items=None):
+ * self.frozen = False
+ * if items is not None: # <<<<<<<<<<<<<<
+ * items = list(items)
+ * else:
+ */
+ __pyx_t_1 = (__pyx_v_items != Py_None);
+ __pyx_t_2 = (__pyx_t_1 != 0);
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_frozenlist.pyx":12
+ * self.frozen = False
+ * if items is not None:
+ * items = list(items) # <<<<<<<<<<<<<<
+ * else:
+ * items = []
+ */
+ __pyx_t_3 = PySequence_List(__pyx_v_items); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF_SET(__pyx_v_items, __pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "aiohttp/_frozenlist.pyx":11
+ * def __init__(self, items=None):
+ * self.frozen = False
+ * if items is not None: # <<<<<<<<<<<<<<
+ * items = list(items)
+ * else:
+ */
+ goto __pyx_L3;
+ }
+
+ /* "aiohttp/_frozenlist.pyx":14
+ * items = list(items)
+ * else:
+ * items = [] # <<<<<<<<<<<<<<
+ * self._items = items
+ *
+ */
+ /*else*/ {
+ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF_SET(__pyx_v_items, __pyx_t_3);
+ __pyx_t_3 = 0;
+ }
+ __pyx_L3:;
+
+ /* "aiohttp/_frozenlist.pyx":15
+ * else:
+ * items = []
+ * self._items = items # <<<<<<<<<<<<<<
+ *
+ * cdef object _check_frozen(self):
+ */
+ if (!(likely(PyList_CheckExact(__pyx_v_items))||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "list", Py_TYPE(__pyx_v_items)->tp_name), 0))) __PYX_ERR(0, 15, __pyx_L1_error)
+ __pyx_t_3 = __pyx_v_items;
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __Pyx_GOTREF(__pyx_v_self->_items);
+ __Pyx_DECREF(__pyx_v_self->_items);
+ __pyx_v_self->_items = ((PyObject*)__pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "aiohttp/_frozenlist.pyx":9
+ * cdef list _items
+ *
+ * def __init__(self, items=None): # <<<<<<<<<<<<<<
+ * self.frozen = False
+ * if items is not None:
+ */
+
+ /* function exit code */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_items);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":17
+ * self._items = items
+ *
+ * cdef object _check_frozen(self): # <<<<<<<<<<<<<<
+ * if self.frozen:
+ * raise RuntimeError("Cannot modify frozen list.")
+ */
+
+static PyObject *__pyx_f_7aiohttp_11_frozenlist_10FrozenList__check_frozen(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_check_frozen", 0);
+
+ /* "aiohttp/_frozenlist.pyx":18
+ *
+ * cdef object _check_frozen(self):
+ * if self.frozen: # <<<<<<<<<<<<<<
+ * raise RuntimeError("Cannot modify frozen list.")
+ *
+ */
+ __pyx_t_1 = (__pyx_v_self->frozen != 0);
+ if (unlikely(__pyx_t_1)) {
+
+ /* "aiohttp/_frozenlist.pyx":19
+ * cdef object _check_frozen(self):
+ * if self.frozen:
+ * raise RuntimeError("Cannot modify frozen list.") # <<<<<<<<<<<<<<
+ *
+ * cdef inline object _fast_len(self):
+ */
+ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_Raise(__pyx_t_2, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __PYX_ERR(0, 19, __pyx_L1_error)
+
+ /* "aiohttp/_frozenlist.pyx":18
+ *
+ * cdef object _check_frozen(self):
+ * if self.frozen: # <<<<<<<<<<<<<<
+ * raise RuntimeError("Cannot modify frozen list.")
+ *
+ */
+ }
+
+ /* "aiohttp/_frozenlist.pyx":17
+ * self._items = items
+ *
+ * cdef object _check_frozen(self): # <<<<<<<<<<<<<<
+ * if self.frozen:
+ * raise RuntimeError("Cannot modify frozen list.")
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList._check_frozen", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":21
+ * raise RuntimeError("Cannot modify frozen list.")
+ *
+ * cdef inline object _fast_len(self): # <<<<<<<<<<<<<<
+ * return len(self._items)
+ *
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_7aiohttp_11_frozenlist_10FrozenList__fast_len(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ Py_ssize_t __pyx_t_2;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_fast_len", 0);
+
+ /* "aiohttp/_frozenlist.pyx":22
+ *
+ * cdef inline object _fast_len(self):
+ * return len(self._items) # <<<<<<<<<<<<<<
+ *
+ * def freeze(self):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __pyx_v_self->_items;
+ __Pyx_INCREF(__pyx_t_1);
+ if (unlikely(__pyx_t_1 == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
+ __PYX_ERR(0, 22, __pyx_L1_error)
+ }
+ __pyx_t_2 = PyList_GET_SIZE(__pyx_t_1); if (unlikely(__pyx_t_2 == ((Py_ssize_t)-1))) __PYX_ERR(0, 22, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyInt_FromSsize_t(__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_frozenlist.pyx":21
+ * raise RuntimeError("Cannot modify frozen list.")
+ *
+ * cdef inline object _fast_len(self): # <<<<<<<<<<<<<<
+ * return len(self._items)
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList._fast_len", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":24
+ * return len(self._items)
+ *
+ * def freeze(self): # <<<<<<<<<<<<<<
+ * self.frozen = True
+ *
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_3freeze(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_3freeze(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("freeze (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_2freeze(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_2freeze(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("freeze", 0);
+
+ /* "aiohttp/_frozenlist.pyx":25
+ *
+ * def freeze(self):
+ * self.frozen = True # <<<<<<<<<<<<<<
+ *
+ * def __getitem__(self, index):
+ */
+ __pyx_v_self->frozen = 1;
+
+ /* "aiohttp/_frozenlist.pyx":24
+ * return len(self._items)
+ *
+ * def freeze(self): # <<<<<<<<<<<<<<
+ * self.frozen = True
+ *
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":27
+ * self.frozen = True
+ *
+ * def __getitem__(self, index): # <<<<<<<<<<<<<<
+ * return self._items[index]
+ *
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_5__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_5__getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_4__getitem__(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self), ((PyObject *)__pyx_v_index));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_4__getitem__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_index) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__getitem__", 0);
+
+ /* "aiohttp/_frozenlist.pyx":28
+ *
+ * def __getitem__(self, index):
+ * return self._items[index] # <<<<<<<<<<<<<<
+ *
+ * def __setitem__(self, index, value):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ if (unlikely(__pyx_v_self->_items == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(0, 28, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_PyObject_GetItem(__pyx_v_self->_items, __pyx_v_index); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 28, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_frozenlist.pyx":27
+ * self.frozen = True
+ *
+ * def __getitem__(self, index): # <<<<<<<<<<<<<<
+ * return self._items[index]
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":30
+ * return self._items[index]
+ *
+ * def __setitem__(self, index, value): # <<<<<<<<<<<<<<
+ * self._check_frozen()
+ * self._items[index] = value
+ */
+
+/* Python wrapper */
+static int __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_7__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
+static int __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_7__setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_6__setitem__(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_6__setitem__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__setitem__", 0);
+
+ /* "aiohttp/_frozenlist.pyx":31
+ *
+ * def __setitem__(self, index, value):
+ * self._check_frozen() # <<<<<<<<<<<<<<
+ * self._items[index] = value
+ *
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self->__pyx_vtab)->_check_frozen(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 31, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_frozenlist.pyx":32
+ * def __setitem__(self, index, value):
+ * self._check_frozen()
+ * self._items[index] = value # <<<<<<<<<<<<<<
+ *
+ * def __delitem__(self, index):
+ */
+ if (unlikely(__pyx_v_self->_items == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(0, 32, __pyx_L1_error)
+ }
+ if (unlikely(PyObject_SetItem(__pyx_v_self->_items, __pyx_v_index, __pyx_v_value) < 0)) __PYX_ERR(0, 32, __pyx_L1_error)
+
+ /* "aiohttp/_frozenlist.pyx":30
+ * return self._items[index]
+ *
+ * def __setitem__(self, index, value): # <<<<<<<<<<<<<<
+ * self._check_frozen()
+ * self._items[index] = value
+ */
+
+ /* function exit code */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":34
+ * self._items[index] = value
+ *
+ * def __delitem__(self, index): # <<<<<<<<<<<<<<
+ * self._check_frozen()
+ * del self._items[index]
+ */
+
+/* Python wrapper */
+static int __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_9__delitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
+static int __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_9__delitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__delitem__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_8__delitem__(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self), ((PyObject *)__pyx_v_index));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_8__delitem__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_index) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__delitem__", 0);
+
+ /* "aiohttp/_frozenlist.pyx":35
+ *
+ * def __delitem__(self, index):
+ * self._check_frozen() # <<<<<<<<<<<<<<
+ * del self._items[index]
+ *
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self->__pyx_vtab)->_check_frozen(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 35, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_frozenlist.pyx":36
+ * def __delitem__(self, index):
+ * self._check_frozen()
+ * del self._items[index] # <<<<<<<<<<<<<<
+ *
+ * def __len__(self):
+ */
+ if (unlikely(__pyx_v_self->_items == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(0, 36, __pyx_L1_error)
+ }
+ if (unlikely(PyObject_DelItem(__pyx_v_self->_items, __pyx_v_index) < 0)) __PYX_ERR(0, 36, __pyx_L1_error)
+
+ /* "aiohttp/_frozenlist.pyx":34
+ * self._items[index] = value
+ *
+ * def __delitem__(self, index): # <<<<<<<<<<<<<<
+ * self._check_frozen()
+ * del self._items[index]
+ */
+
+ /* function exit code */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.__delitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":38
+ * del self._items[index]
+ *
+ * def __len__(self): # <<<<<<<<<<<<<<
+ * return self._fast_len()
+ *
+ */
+
+/* Python wrapper */
+static Py_ssize_t __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_11__len__(PyObject *__pyx_v_self); /*proto*/
+static Py_ssize_t __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_11__len__(PyObject *__pyx_v_self) {
+ Py_ssize_t __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_10__len__(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static Py_ssize_t __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_10__len__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self) {
+ Py_ssize_t __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ Py_ssize_t __pyx_t_2;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__len__", 0);
+
+ /* "aiohttp/_frozenlist.pyx":39
+ *
+ * def __len__(self):
+ * return self._fast_len() # <<<<<<<<<<<<<<
+ *
+ * def __iter__(self):
+ */
+ __pyx_t_1 = __pyx_f_7aiohttp_11_frozenlist_10FrozenList__fast_len(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 39, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_t_1); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 39, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_r = __pyx_t_2;
+ goto __pyx_L0;
+
+ /* "aiohttp/_frozenlist.pyx":38
+ * del self._items[index]
+ *
+ * def __len__(self): # <<<<<<<<<<<<<<
+ * return self._fast_len()
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.__len__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":41
+ * return self._fast_len()
+ *
+ * def __iter__(self): # <<<<<<<<<<<<<<
+ * return self._items.__iter__()
+ *
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_13__iter__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_13__iter__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__iter__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_12__iter__(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_12__iter__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__iter__", 0);
+
+ /* "aiohttp/_frozenlist.pyx":42
+ *
+ * def __iter__(self):
+ * return self._items.__iter__() # <<<<<<<<<<<<<<
+ *
+ * def __reversed__(self):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_items, __pyx_n_s_iter); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 42, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_3)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3) : __Pyx_PyObject_CallNoArg(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 42, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_frozenlist.pyx":41
+ * return self._fast_len()
+ *
+ * def __iter__(self): # <<<<<<<<<<<<<<
+ * return self._items.__iter__()
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.__iter__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":44
+ * return self._items.__iter__()
+ *
+ * def __reversed__(self): # <<<<<<<<<<<<<<
+ * return self._items.__reversed__()
+ *
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_15__reversed__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_15__reversed__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__reversed__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_14__reversed__(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_14__reversed__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__reversed__", 0);
+
+ /* "aiohttp/_frozenlist.pyx":45
+ *
+ * def __reversed__(self):
+ * return self._items.__reversed__() # <<<<<<<<<<<<<<
+ *
+ * def __richcmp__(self, other, op):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_items, __pyx_n_s_reversed); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 45, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_3)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3) : __Pyx_PyObject_CallNoArg(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 45, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_frozenlist.pyx":44
+ * return self._items.__iter__()
+ *
+ * def __reversed__(self): # <<<<<<<<<<<<<<
+ * return self._items.__reversed__()
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.__reversed__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":47
+ * return self._items.__reversed__()
+ *
+ * def __richcmp__(self, other, op): # <<<<<<<<<<<<<<
+ * if op == 0: # <
+ * return list(self) < other
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_17__richcmp__(PyObject *__pyx_v_self, PyObject *__pyx_v_other, int __pyx_arg_op); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_17__richcmp__(PyObject *__pyx_v_self, PyObject *__pyx_v_other, int __pyx_arg_op) {
+ PyObject *__pyx_v_op = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__richcmp__ (wrapper)", 0);
+ __pyx_v_op = __Pyx_PyInt_From_int(__pyx_arg_op); if (unlikely(!__pyx_v_op)) __PYX_ERR(0, 47, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_v_op);
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.__richcmp__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_16__richcmp__(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self), ((PyObject *)__pyx_v_other), ((PyObject *)__pyx_v_op));
+
+ /* function exit code */
+ __Pyx_XDECREF(__pyx_v_op);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_16__richcmp__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_other, PyObject *__pyx_v_op) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__richcmp__", 0);
+
+ /* "aiohttp/_frozenlist.pyx":48
+ *
+ * def __richcmp__(self, other, op):
+ * if op == 0: # < # <<<<<<<<<<<<<<
+ * return list(self) < other
+ * if op == 1: # <=
+ */
+ __pyx_t_1 = __Pyx_PyInt_EqObjC(__pyx_v_op, __pyx_int_0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 48, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 48, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_frozenlist.pyx":49
+ * def __richcmp__(self, other, op):
+ * if op == 0: # <
+ * return list(self) < other # <<<<<<<<<<<<<<
+ * if op == 1: # <=
+ * return list(self) <= other
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PySequence_List(((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 49, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_v_other, Py_LT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 49, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_frozenlist.pyx":48
+ *
+ * def __richcmp__(self, other, op):
+ * if op == 0: # < # <<<<<<<<<<<<<<
+ * return list(self) < other
+ * if op == 1: # <=
+ */
+ }
+
+ /* "aiohttp/_frozenlist.pyx":50
+ * if op == 0: # <
+ * return list(self) < other
+ * if op == 1: # <= # <<<<<<<<<<<<<<
+ * return list(self) <= other
+ * if op == 2: # ==
+ */
+ __pyx_t_3 = __Pyx_PyInt_EqObjC(__pyx_v_op, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 50, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 50, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_frozenlist.pyx":51
+ * return list(self) < other
+ * if op == 1: # <=
+ * return list(self) <= other # <<<<<<<<<<<<<<
+ * if op == 2: # ==
+ * return list(self) == other
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_3 = PySequence_List(((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 51, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_1 = PyObject_RichCompare(__pyx_t_3, __pyx_v_other, Py_LE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 51, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_frozenlist.pyx":50
+ * if op == 0: # <
+ * return list(self) < other
+ * if op == 1: # <= # <<<<<<<<<<<<<<
+ * return list(self) <= other
+ * if op == 2: # ==
+ */
+ }
+
+ /* "aiohttp/_frozenlist.pyx":52
+ * if op == 1: # <=
+ * return list(self) <= other
+ * if op == 2: # == # <<<<<<<<<<<<<<
+ * return list(self) == other
+ * if op == 3: # !=
+ */
+ __pyx_t_1 = __Pyx_PyInt_EqObjC(__pyx_v_op, __pyx_int_2, 2, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 52, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 52, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_frozenlist.pyx":53
+ * return list(self) <= other
+ * if op == 2: # ==
+ * return list(self) == other # <<<<<<<<<<<<<<
+ * if op == 3: # !=
+ * return list(self) != other
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PySequence_List(((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 53, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_v_other, Py_EQ); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 53, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_frozenlist.pyx":52
+ * if op == 1: # <=
+ * return list(self) <= other
+ * if op == 2: # == # <<<<<<<<<<<<<<
+ * return list(self) == other
+ * if op == 3: # !=
+ */
+ }
+
+ /* "aiohttp/_frozenlist.pyx":54
+ * if op == 2: # ==
+ * return list(self) == other
+ * if op == 3: # != # <<<<<<<<<<<<<<
+ * return list(self) != other
+ * if op == 4: # >
+ */
+ __pyx_t_3 = __Pyx_PyInt_EqObjC(__pyx_v_op, __pyx_int_3, 3, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 54, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 54, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_frozenlist.pyx":55
+ * return list(self) == other
+ * if op == 3: # !=
+ * return list(self) != other # <<<<<<<<<<<<<<
+ * if op == 4: # >
+ * return list(self) > other
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_3 = PySequence_List(((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 55, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_1 = PyObject_RichCompare(__pyx_t_3, __pyx_v_other, Py_NE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 55, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_frozenlist.pyx":54
+ * if op == 2: # ==
+ * return list(self) == other
+ * if op == 3: # != # <<<<<<<<<<<<<<
+ * return list(self) != other
+ * if op == 4: # >
+ */
+ }
+
+ /* "aiohttp/_frozenlist.pyx":56
+ * if op == 3: # !=
+ * return list(self) != other
+ * if op == 4: # > # <<<<<<<<<<<<<<
+ * return list(self) > other
+ * if op == 5: # =>
+ */
+ __pyx_t_1 = __Pyx_PyInt_EqObjC(__pyx_v_op, __pyx_int_4, 4, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 56, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 56, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_frozenlist.pyx":57
+ * return list(self) != other
+ * if op == 4: # >
+ * return list(self) > other # <<<<<<<<<<<<<<
+ * if op == 5: # =>
+ * return list(self) >= other
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = PySequence_List(((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 57, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyObject_RichCompare(__pyx_t_1, __pyx_v_other, Py_GT); __Pyx_XGOTREF(__pyx_t_3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 57, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_frozenlist.pyx":56
+ * if op == 3: # !=
+ * return list(self) != other
+ * if op == 4: # > # <<<<<<<<<<<<<<
+ * return list(self) > other
+ * if op == 5: # =>
+ */
+ }
+
+ /* "aiohttp/_frozenlist.pyx":58
+ * if op == 4: # >
+ * return list(self) > other
+ * if op == 5: # => # <<<<<<<<<<<<<<
+ * return list(self) >= other
+ *
+ */
+ __pyx_t_3 = __Pyx_PyInt_EqObjC(__pyx_v_op, __pyx_int_5, 5, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 58, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_3); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 58, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_frozenlist.pyx":59
+ * return list(self) > other
+ * if op == 5: # =>
+ * return list(self) >= other # <<<<<<<<<<<<<<
+ *
+ * def insert(self, pos, item):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_3 = PySequence_List(((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 59, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_1 = PyObject_RichCompare(__pyx_t_3, __pyx_v_other, Py_GE); __Pyx_XGOTREF(__pyx_t_1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 59, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_frozenlist.pyx":58
+ * if op == 4: # >
+ * return list(self) > other
+ * if op == 5: # => # <<<<<<<<<<<<<<
+ * return list(self) >= other
+ *
+ */
+ }
+
+ /* "aiohttp/_frozenlist.pyx":47
+ * return self._items.__reversed__()
+ *
+ * def __richcmp__(self, other, op): # <<<<<<<<<<<<<<
+ * if op == 0: # <
+ * return list(self) < other
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.__richcmp__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":61
+ * return list(self) >= other
+ *
+ * def insert(self, pos, item): # <<<<<<<<<<<<<<
+ * self._check_frozen()
+ * self._items.insert(pos, item)
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_19insert(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_19insert(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_pos = 0;
+ PyObject *__pyx_v_item = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("insert (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pos,&__pyx_n_s_item,0};
+ PyObject* values[2] = {0,0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ CYTHON_FALLTHROUGH;
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pos)) != 0)) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ CYTHON_FALLTHROUGH;
+ case 1:
+ if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_item)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("insert", 1, 2, 2, 1); __PYX_ERR(0, 61, __pyx_L3_error)
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "insert") < 0)) __PYX_ERR(0, 61, __pyx_L3_error)
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ }
+ __pyx_v_pos = values[0];
+ __pyx_v_item = values[1];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("insert", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 61, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.insert", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_18insert(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self), __pyx_v_pos, __pyx_v_item);
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_18insert(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_pos, PyObject *__pyx_v_item) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ Py_ssize_t __pyx_t_2;
+ int __pyx_t_3;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("insert", 0);
+
+ /* "aiohttp/_frozenlist.pyx":62
+ *
+ * def insert(self, pos, item):
+ * self._check_frozen() # <<<<<<<<<<<<<<
+ * self._items.insert(pos, item)
+ *
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self->__pyx_vtab)->_check_frozen(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 62, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_frozenlist.pyx":63
+ * def insert(self, pos, item):
+ * self._check_frozen()
+ * self._items.insert(pos, item) # <<<<<<<<<<<<<<
+ *
+ * def __contains__(self, item):
+ */
+ if (unlikely(__pyx_v_self->_items == Py_None)) {
+ PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "insert");
+ __PYX_ERR(0, 63, __pyx_L1_error)
+ }
+ __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_v_pos); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 63, __pyx_L1_error)
+ __pyx_t_3 = PyList_Insert(__pyx_v_self->_items, __pyx_t_2, __pyx_v_item); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 63, __pyx_L1_error)
+
+ /* "aiohttp/_frozenlist.pyx":61
+ * return list(self) >= other
+ *
+ * def insert(self, pos, item): # <<<<<<<<<<<<<<
+ * self._check_frozen()
+ * self._items.insert(pos, item)
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.insert", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":65
+ * self._items.insert(pos, item)
+ *
+ * def __contains__(self, item): # <<<<<<<<<<<<<<
+ * return item in self._items
+ *
+ */
+
+/* Python wrapper */
+static int __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_21__contains__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
+static int __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_21__contains__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__contains__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_20__contains__(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self), ((PyObject *)__pyx_v_item));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_20__contains__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_item) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__contains__", 0);
+
+ /* "aiohttp/_frozenlist.pyx":66
+ *
+ * def __contains__(self, item):
+ * return item in self._items # <<<<<<<<<<<<<<
+ *
+ * def __iadd__(self, items):
+ */
+ __pyx_t_1 = (__Pyx_PySequence_ContainsTF(__pyx_v_item, __pyx_v_self->_items, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 66, __pyx_L1_error)
+ __pyx_r = __pyx_t_1;
+ goto __pyx_L0;
+
+ /* "aiohttp/_frozenlist.pyx":65
+ * self._items.insert(pos, item)
+ *
+ * def __contains__(self, item): # <<<<<<<<<<<<<<
+ * return item in self._items
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.__contains__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":68
+ * return item in self._items
+ *
+ * def __iadd__(self, items): # <<<<<<<<<<<<<<
+ * self._check_frozen()
+ * self._items += list(items)
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_23__iadd__(PyObject *__pyx_v_self, PyObject *__pyx_v_items); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_23__iadd__(PyObject *__pyx_v_self, PyObject *__pyx_v_items) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__iadd__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_22__iadd__(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self), ((PyObject *)__pyx_v_items));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_22__iadd__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_items) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__iadd__", 0);
+
+ /* "aiohttp/_frozenlist.pyx":69
+ *
+ * def __iadd__(self, items):
+ * self._check_frozen() # <<<<<<<<<<<<<<
+ * self._items += list(items)
+ * return self
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self->__pyx_vtab)->_check_frozen(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 69, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_frozenlist.pyx":70
+ * def __iadd__(self, items):
+ * self._check_frozen()
+ * self._items += list(items) # <<<<<<<<<<<<<<
+ * return self
+ *
+ */
+ __pyx_t_1 = PySequence_List(__pyx_v_items); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_self->_items, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 70, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_GIVEREF(__pyx_t_2);
+ __Pyx_GOTREF(__pyx_v_self->_items);
+ __Pyx_DECREF(__pyx_v_self->_items);
+ __pyx_v_self->_items = ((PyObject*)__pyx_t_2);
+ __pyx_t_2 = 0;
+
+ /* "aiohttp/_frozenlist.pyx":71
+ * self._check_frozen()
+ * self._items += list(items)
+ * return self # <<<<<<<<<<<<<<
+ *
+ * def index(self, item):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((PyObject *)__pyx_v_self));
+ __pyx_r = ((PyObject *)__pyx_v_self);
+ goto __pyx_L0;
+
+ /* "aiohttp/_frozenlist.pyx":68
+ * return item in self._items
+ *
+ * def __iadd__(self, items): # <<<<<<<<<<<<<<
+ * self._check_frozen()
+ * self._items += list(items)
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.__iadd__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":73
+ * return self
+ *
+ * def index(self, item): # <<<<<<<<<<<<<<
+ * return self._items.index(item)
+ *
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_25index(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_25index(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("index (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_24index(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self), ((PyObject *)__pyx_v_item));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_24index(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_item) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("index", 0);
+
+ /* "aiohttp/_frozenlist.pyx":74
+ *
+ * def index(self, item):
+ * return self._items.index(item) # <<<<<<<<<<<<<<
+ *
+ * def remove(self, item):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_items, __pyx_n_s_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 74, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_3)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_item) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_item);
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 74, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_frozenlist.pyx":73
+ * return self
+ *
+ * def index(self, item): # <<<<<<<<<<<<<<
+ * return self._items.index(item)
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.index", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":76
+ * return self._items.index(item)
+ *
+ * def remove(self, item): # <<<<<<<<<<<<<<
+ * self._check_frozen()
+ * self._items.remove(item)
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_27remove(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_27remove(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("remove (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_26remove(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self), ((PyObject *)__pyx_v_item));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_26remove(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_item) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("remove", 0);
+
+ /* "aiohttp/_frozenlist.pyx":77
+ *
+ * def remove(self, item):
+ * self._check_frozen() # <<<<<<<<<<<<<<
+ * self._items.remove(item)
+ *
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self->__pyx_vtab)->_check_frozen(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 77, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_frozenlist.pyx":78
+ * def remove(self, item):
+ * self._check_frozen()
+ * self._items.remove(item) # <<<<<<<<<<<<<<
+ *
+ * def clear(self):
+ */
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_items, __pyx_n_s_remove); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 78, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_3)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_item) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_item);
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 78, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_frozenlist.pyx":76
+ * return self._items.index(item)
+ *
+ * def remove(self, item): # <<<<<<<<<<<<<<
+ * self._check_frozen()
+ * self._items.remove(item)
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.remove", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":80
+ * self._items.remove(item)
+ *
+ * def clear(self): # <<<<<<<<<<<<<<
+ * self._check_frozen()
+ * self._items.clear()
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_29clear(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_29clear(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("clear (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_28clear(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_28clear(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("clear", 0);
+
+ /* "aiohttp/_frozenlist.pyx":81
+ *
+ * def clear(self):
+ * self._check_frozen() # <<<<<<<<<<<<<<
+ * self._items.clear()
+ *
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self->__pyx_vtab)->_check_frozen(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 81, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_frozenlist.pyx":82
+ * def clear(self):
+ * self._check_frozen()
+ * self._items.clear() # <<<<<<<<<<<<<<
+ *
+ * def extend(self, items):
+ */
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_items, __pyx_n_s_clear); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 82, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_3)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3) : __Pyx_PyObject_CallNoArg(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 82, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_frozenlist.pyx":80
+ * self._items.remove(item)
+ *
+ * def clear(self): # <<<<<<<<<<<<<<
+ * self._check_frozen()
+ * self._items.clear()
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.clear", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":84
+ * self._items.clear()
+ *
+ * def extend(self, items): # <<<<<<<<<<<<<<
+ * self._check_frozen()
+ * self._items += list(items)
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_31extend(PyObject *__pyx_v_self, PyObject *__pyx_v_items); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_31extend(PyObject *__pyx_v_self, PyObject *__pyx_v_items) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("extend (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_30extend(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self), ((PyObject *)__pyx_v_items));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_30extend(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_items) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("extend", 0);
+
+ /* "aiohttp/_frozenlist.pyx":85
+ *
+ * def extend(self, items):
+ * self._check_frozen() # <<<<<<<<<<<<<<
+ * self._items += list(items)
+ *
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self->__pyx_vtab)->_check_frozen(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 85, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_frozenlist.pyx":86
+ * def extend(self, items):
+ * self._check_frozen()
+ * self._items += list(items) # <<<<<<<<<<<<<<
+ *
+ * def reverse(self):
+ */
+ __pyx_t_1 = PySequence_List(__pyx_v_items); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 86, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyNumber_InPlaceAdd(__pyx_v_self->_items, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 86, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_GIVEREF(__pyx_t_2);
+ __Pyx_GOTREF(__pyx_v_self->_items);
+ __Pyx_DECREF(__pyx_v_self->_items);
+ __pyx_v_self->_items = ((PyObject*)__pyx_t_2);
+ __pyx_t_2 = 0;
+
+ /* "aiohttp/_frozenlist.pyx":84
+ * self._items.clear()
+ *
+ * def extend(self, items): # <<<<<<<<<<<<<<
+ * self._check_frozen()
+ * self._items += list(items)
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.extend", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":88
+ * self._items += list(items)
+ *
+ * def reverse(self): # <<<<<<<<<<<<<<
+ * self._check_frozen()
+ * self._items.reverse()
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_33reverse(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_33reverse(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("reverse (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_32reverse(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_32reverse(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("reverse", 0);
+
+ /* "aiohttp/_frozenlist.pyx":89
+ *
+ * def reverse(self):
+ * self._check_frozen() # <<<<<<<<<<<<<<
+ * self._items.reverse()
+ *
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self->__pyx_vtab)->_check_frozen(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 89, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_frozenlist.pyx":90
+ * def reverse(self):
+ * self._check_frozen()
+ * self._items.reverse() # <<<<<<<<<<<<<<
+ *
+ * def pop(self, index=-1):
+ */
+ if (unlikely(__pyx_v_self->_items == Py_None)) {
+ PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "reverse");
+ __PYX_ERR(0, 90, __pyx_L1_error)
+ }
+ __pyx_t_2 = PyList_Reverse(__pyx_v_self->_items); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 90, __pyx_L1_error)
+
+ /* "aiohttp/_frozenlist.pyx":88
+ * self._items += list(items)
+ *
+ * def reverse(self): # <<<<<<<<<<<<<<
+ * self._check_frozen()
+ * self._items.reverse()
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.reverse", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":92
+ * self._items.reverse()
+ *
+ * def pop(self, index=-1): # <<<<<<<<<<<<<<
+ * self._check_frozen()
+ * return self._items.pop(index)
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_35pop(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_35pop(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_index = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("pop (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_index,0};
+ PyObject* values[1] = {0};
+ values[0] = ((PyObject *)__pyx_int_neg_1);
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (kw_args > 0) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_index);
+ if (value) { values[0] = value; kw_args--; }
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "pop") < 0)) __PYX_ERR(0, 92, __pyx_L3_error)
+ }
+ } else {
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ }
+ __pyx_v_index = values[0];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("pop", 0, 0, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 92, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.pop", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_34pop(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self), __pyx_v_index);
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_34pop(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_index) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ Py_ssize_t __pyx_t_2;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("pop", 0);
+
+ /* "aiohttp/_frozenlist.pyx":93
+ *
+ * def pop(self, index=-1):
+ * self._check_frozen() # <<<<<<<<<<<<<<
+ * return self._items.pop(index)
+ *
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self->__pyx_vtab)->_check_frozen(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 93, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_frozenlist.pyx":94
+ * def pop(self, index=-1):
+ * self._check_frozen()
+ * return self._items.pop(index) # <<<<<<<<<<<<<<
+ *
+ * def append(self, item):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ if (unlikely(__pyx_v_self->_items == Py_None)) {
+ PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "pop");
+ __PYX_ERR(0, 94, __pyx_L1_error)
+ }
+ __pyx_t_2 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_2 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 94, __pyx_L1_error)
+ __pyx_t_1 = __Pyx_PyList_PopIndex(__pyx_v_self->_items, __pyx_v_index, __pyx_t_2, 1, Py_ssize_t, PyInt_FromSsize_t); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 94, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_frozenlist.pyx":92
+ * self._items.reverse()
+ *
+ * def pop(self, index=-1): # <<<<<<<<<<<<<<
+ * self._check_frozen()
+ * return self._items.pop(index)
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.pop", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":96
+ * return self._items.pop(index)
+ *
+ * def append(self, item): # <<<<<<<<<<<<<<
+ * self._check_frozen()
+ * return self._items.append(item)
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_37append(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_37append(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("append (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_36append(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self), ((PyObject *)__pyx_v_item));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_36append(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_item) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("append", 0);
+
+ /* "aiohttp/_frozenlist.pyx":97
+ *
+ * def append(self, item):
+ * self._check_frozen() # <<<<<<<<<<<<<<
+ * return self._items.append(item)
+ *
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self->__pyx_vtab)->_check_frozen(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 97, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_frozenlist.pyx":98
+ * def append(self, item):
+ * self._check_frozen()
+ * return self._items.append(item) # <<<<<<<<<<<<<<
+ *
+ * def count(self, item):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ if (unlikely(__pyx_v_self->_items == Py_None)) {
+ PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "append");
+ __PYX_ERR(0, 98, __pyx_L1_error)
+ }
+ __pyx_t_2 = __Pyx_PyList_Append(__pyx_v_self->_items, __pyx_v_item); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 98, __pyx_L1_error)
+ __pyx_t_1 = __Pyx_Owned_Py_None(__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 98, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_frozenlist.pyx":96
+ * return self._items.pop(index)
+ *
+ * def append(self, item): # <<<<<<<<<<<<<<
+ * self._check_frozen()
+ * return self._items.append(item)
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.append", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":100
+ * return self._items.append(item)
+ *
+ * def count(self, item): # <<<<<<<<<<<<<<
+ * return self._items.count(item)
+ *
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_39count(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_39count(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("count (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_38count(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self), ((PyObject *)__pyx_v_item));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_38count(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v_item) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("count", 0);
+
+ /* "aiohttp/_frozenlist.pyx":101
+ *
+ * def count(self, item):
+ * return self._items.count(item) # <<<<<<<<<<<<<<
+ *
+ * def __repr__(self):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_items, __pyx_n_s_count); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 101, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_3)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_v_item) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v_item);
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 101, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_frozenlist.pyx":100
+ * return self._items.append(item)
+ *
+ * def count(self, item): # <<<<<<<<<<<<<<
+ * return self._items.count(item)
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.count", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":103
+ * return self._items.count(item)
+ *
+ * def __repr__(self): # <<<<<<<<<<<<<<
+ * return '<FrozenList(frozen={}, {!r})>'.format(self.frozen,
+ * self._items)
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_41__repr__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_41__repr__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_40__repr__(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_40__repr__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__repr__", 0);
+
+ /* "aiohttp/_frozenlist.pyx":104
+ *
+ * def __repr__(self):
+ * return '<FrozenList(frozen={}, {!r})>'.format(self.frozen, # <<<<<<<<<<<<<<
+ * self._items)
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_FrozenList_frozen_r, __pyx_n_s_format); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 104, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = __Pyx_PyBool_FromLong(__pyx_v_self->frozen); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 104, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+
+ /* "aiohttp/_frozenlist.pyx":105
+ * def __repr__(self):
+ * return '<FrozenList(frozen={}, {!r})>'.format(self.frozen,
+ * self._items) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_t_4 = NULL;
+ __pyx_t_5 = 0;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_4)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ __pyx_t_5 = 1;
+ }
+ }
+ #if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(__pyx_t_2)) {
+ PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_t_3, __pyx_v_self->_items};
+ __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 104, __pyx_L1_error)
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ } else
+ #endif
+ #if CYTHON_FAST_PYCCALL
+ if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) {
+ PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_t_3, __pyx_v_self->_items};
+ __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 104, __pyx_L1_error)
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ } else
+ #endif
+ {
+ __pyx_t_6 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 104, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ if (__pyx_t_4) {
+ __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __pyx_t_4 = NULL;
+ }
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_5, __pyx_t_3);
+ __Pyx_INCREF(__pyx_v_self->_items);
+ __Pyx_GIVEREF(__pyx_v_self->_items);
+ PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_5, __pyx_v_self->_items);
+ __pyx_t_3 = 0;
+ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 104, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ }
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_frozenlist.pyx":103
+ * return self._items.count(item)
+ *
+ * def __repr__(self): # <<<<<<<<<<<<<<
+ * return '<FrozenList(frozen={}, {!r})>'.format(self.frozen,
+ * self._items)
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_frozenlist.pyx":6
+ * cdef class FrozenList:
+ *
+ * cdef readonly bint frozen # <<<<<<<<<<<<<<
+ * cdef list _items
+ *
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_6frozen_1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_6frozen_1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_6frozen___get__(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_6frozen___get__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__get__", 0);
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_v_self->frozen); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.frozen.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":1
+ * def __reduce_cython__(self): # <<<<<<<<<<<<<<
+ * cdef tuple state
+ * cdef object _dict
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_43__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_43__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_42__reduce_cython__(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_42__reduce_cython__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self) {
+ PyObject *__pyx_v_state = 0;
+ PyObject *__pyx_v__dict = 0;
+ int __pyx_v_use_setstate;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_t_3;
+ int __pyx_t_4;
+ PyObject *__pyx_t_5 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__reduce_cython__", 0);
+
+ /* "(tree fragment)":5
+ * cdef object _dict
+ * cdef bint use_setstate
+ * state = (self._items, self.frozen) # <<<<<<<<<<<<<<
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None:
+ */
+ __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_v_self->frozen); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_v_self->_items);
+ __Pyx_GIVEREF(__pyx_v_self->_items);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_self->_items);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_1);
+ __pyx_t_1 = 0;
+ __pyx_v_state = ((PyObject*)__pyx_t_2);
+ __pyx_t_2 = 0;
+
+ /* "(tree fragment)":6
+ * cdef bint use_setstate
+ * state = (self._items, self.frozen)
+ * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
+ * if _dict is not None:
+ * state += (_dict,)
+ */
+ __pyx_t_2 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_v__dict = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "(tree fragment)":7
+ * state = (self._items, self.frozen)
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None: # <<<<<<<<<<<<<<
+ * state += (_dict,)
+ * use_setstate = True
+ */
+ __pyx_t_3 = (__pyx_v__dict != Py_None);
+ __pyx_t_4 = (__pyx_t_3 != 0);
+ if (__pyx_t_4) {
+
+ /* "(tree fragment)":8
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None:
+ * state += (_dict,) # <<<<<<<<<<<<<<
+ * use_setstate = True
+ * else:
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 8, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_v__dict);
+ __Pyx_GIVEREF(__pyx_v__dict);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v__dict);
+ __pyx_t_1 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_1));
+ __pyx_t_1 = 0;
+
+ /* "(tree fragment)":9
+ * if _dict is not None:
+ * state += (_dict,)
+ * use_setstate = True # <<<<<<<<<<<<<<
+ * else:
+ * use_setstate = self._items is not None
+ */
+ __pyx_v_use_setstate = 1;
+
+ /* "(tree fragment)":7
+ * state = (self._items, self.frozen)
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None: # <<<<<<<<<<<<<<
+ * state += (_dict,)
+ * use_setstate = True
+ */
+ goto __pyx_L3;
+ }
+
+ /* "(tree fragment)":11
+ * use_setstate = True
+ * else:
+ * use_setstate = self._items is not None # <<<<<<<<<<<<<<
+ * if use_setstate:
+ * return __pyx_unpickle_FrozenList, (type(self), 0x949a143, None), state
+ */
+ /*else*/ {
+ __pyx_t_4 = (__pyx_v_self->_items != ((PyObject*)Py_None));
+ __pyx_v_use_setstate = __pyx_t_4;
+ }
+ __pyx_L3:;
+
+ /* "(tree fragment)":12
+ * else:
+ * use_setstate = self._items is not None
+ * if use_setstate: # <<<<<<<<<<<<<<
+ * return __pyx_unpickle_FrozenList, (type(self), 0x949a143, None), state
+ * else:
+ */
+ __pyx_t_4 = (__pyx_v_use_setstate != 0);
+ if (__pyx_t_4) {
+
+ /* "(tree fragment)":13
+ * use_setstate = self._items is not None
+ * if use_setstate:
+ * return __pyx_unpickle_FrozenList, (type(self), 0x949a143, None), state # <<<<<<<<<<<<<<
+ * else:
+ * return __pyx_unpickle_FrozenList, (type(self), 0x949a143, state)
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_pyx_unpickle_FrozenList); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_INCREF(__pyx_int_155820355);
+ __Pyx_GIVEREF(__pyx_int_155820355);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_155820355);
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_2, 2, Py_None);
+ __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
+ __Pyx_INCREF(__pyx_v_state);
+ __Pyx_GIVEREF(__pyx_v_state);
+ PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state);
+ __pyx_t_1 = 0;
+ __pyx_t_2 = 0;
+ __pyx_r = __pyx_t_5;
+ __pyx_t_5 = 0;
+ goto __pyx_L0;
+
+ /* "(tree fragment)":12
+ * else:
+ * use_setstate = self._items is not None
+ * if use_setstate: # <<<<<<<<<<<<<<
+ * return __pyx_unpickle_FrozenList, (type(self), 0x949a143, None), state
+ * else:
+ */
+ }
+
+ /* "(tree fragment)":15
+ * return __pyx_unpickle_FrozenList, (type(self), 0x949a143, None), state
+ * else:
+ * return __pyx_unpickle_FrozenList, (type(self), 0x949a143, state) # <<<<<<<<<<<<<<
+ * def __setstate_cython__(self, __pyx_state):
+ * __pyx_unpickle_FrozenList__set_state(self, __pyx_state)
+ */
+ /*else*/ {
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_FrozenList); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_INCREF(__pyx_int_155820355);
+ __Pyx_GIVEREF(__pyx_int_155820355);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_155820355);
+ __Pyx_INCREF(__pyx_v_state);
+ __Pyx_GIVEREF(__pyx_v_state);
+ PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_state);
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_2);
+ __pyx_t_5 = 0;
+ __pyx_t_2 = 0;
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+ }
+
+ /* "(tree fragment)":1
+ * def __reduce_cython__(self): # <<<<<<<<<<<<<<
+ * cdef tuple state
+ * cdef object _dict
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_state);
+ __Pyx_XDECREF(__pyx_v__dict);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":16
+ * else:
+ * return __pyx_unpickle_FrozenList, (type(self), 0x949a143, state)
+ * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_unpickle_FrozenList__set_state(self, __pyx_state)
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_45__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_45__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist_10FrozenList_44__setstate_cython__(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist_10FrozenList_44__setstate_cython__(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__setstate_cython__", 0);
+
+ /* "(tree fragment)":17
+ * return __pyx_unpickle_FrozenList, (type(self), 0x949a143, state)
+ * def __setstate_cython__(self, __pyx_state):
+ * __pyx_unpickle_FrozenList__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
+ */
+ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
+ __pyx_t_1 = __pyx_f_7aiohttp_11_frozenlist___pyx_unpickle_FrozenList__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "(tree fragment)":16
+ * else:
+ * return __pyx_unpickle_FrozenList, (type(self), 0x949a143, state)
+ * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_unpickle_FrozenList__set_state(self, __pyx_state)
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._frozenlist.FrozenList.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":1
+ * def __pyx_unpickle_FrozenList(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_1__pyx_unpickle_FrozenList(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyMethodDef __pyx_mdef_7aiohttp_11_frozenlist_1__pyx_unpickle_FrozenList = {"__pyx_unpickle_FrozenList", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7aiohttp_11_frozenlist_1__pyx_unpickle_FrozenList, METH_VARARGS|METH_KEYWORDS, 0};
+static PyObject *__pyx_pw_7aiohttp_11_frozenlist_1__pyx_unpickle_FrozenList(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v___pyx_type = 0;
+ long __pyx_v___pyx_checksum;
+ PyObject *__pyx_v___pyx_state = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__pyx_unpickle_FrozenList (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
+ PyObject* values[3] = {0,0,0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ CYTHON_FALLTHROUGH;
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ CYTHON_FALLTHROUGH;
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ CYTHON_FALLTHROUGH;
+ case 1:
+ if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_FrozenList", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 2:
+ if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_FrozenList", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_FrozenList") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ }
+ __pyx_v___pyx_type = values[0];
+ __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
+ __pyx_v___pyx_state = values[2];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_FrozenList", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("aiohttp._frozenlist.__pyx_unpickle_FrozenList", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_r = __pyx_pf_7aiohttp_11_frozenlist___pyx_unpickle_FrozenList(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_11_frozenlist___pyx_unpickle_FrozenList(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_v___pyx_PickleError = 0;
+ PyObject *__pyx_v___pyx_result = 0;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ int __pyx_t_6;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__pyx_unpickle_FrozenList", 0);
+
+ /* "(tree fragment)":4
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ * if __pyx_checksum != 0x949a143: # <<<<<<<<<<<<<<
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x949a143 = (_items, frozen))" % __pyx_checksum)
+ */
+ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0x949a143) != 0);
+ if (__pyx_t_1) {
+
+ /* "(tree fragment)":5
+ * cdef object __pyx_result
+ * if __pyx_checksum != 0x949a143:
+ * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x949a143 = (_items, frozen))" % __pyx_checksum)
+ * __pyx_result = FrozenList.__new__(__pyx_type)
+ */
+ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_n_s_PickleError);
+ __Pyx_GIVEREF(__pyx_n_s_PickleError);
+ PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
+ __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_2);
+ __pyx_v___pyx_PickleError = __pyx_t_2;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "(tree fragment)":6
+ * if __pyx_checksum != 0x949a143:
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x949a143 = (_items, frozen))" % __pyx_checksum) # <<<<<<<<<<<<<<
+ * __pyx_result = FrozenList.__new__(__pyx_type)
+ * if __pyx_state is not None:
+ */
+ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0x94, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_INCREF(__pyx_v___pyx_PickleError);
+ __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_5)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __PYX_ERR(1, 6, __pyx_L1_error)
+
+ /* "(tree fragment)":4
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ * if __pyx_checksum != 0x949a143: # <<<<<<<<<<<<<<
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x949a143 = (_items, frozen))" % __pyx_checksum)
+ */
+ }
+
+ /* "(tree fragment)":7
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x949a143 = (_items, frozen))" % __pyx_checksum)
+ * __pyx_result = FrozenList.__new__(__pyx_type) # <<<<<<<<<<<<<<
+ * if __pyx_state is not None:
+ * __pyx_unpickle_FrozenList__set_state(<FrozenList> __pyx_result, __pyx_state)
+ */
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_ptype_7aiohttp_11_frozenlist_FrozenList), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_4)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v___pyx_result = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "(tree fragment)":8
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x949a143 = (_items, frozen))" % __pyx_checksum)
+ * __pyx_result = FrozenList.__new__(__pyx_type)
+ * if __pyx_state is not None: # <<<<<<<<<<<<<<
+ * __pyx_unpickle_FrozenList__set_state(<FrozenList> __pyx_result, __pyx_state)
+ * return __pyx_result
+ */
+ __pyx_t_1 = (__pyx_v___pyx_state != Py_None);
+ __pyx_t_6 = (__pyx_t_1 != 0);
+ if (__pyx_t_6) {
+
+ /* "(tree fragment)":9
+ * __pyx_result = FrozenList.__new__(__pyx_type)
+ * if __pyx_state is not None:
+ * __pyx_unpickle_FrozenList__set_state(<FrozenList> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
+ * return __pyx_result
+ * cdef __pyx_unpickle_FrozenList__set_state(FrozenList __pyx_result, tuple __pyx_state):
+ */
+ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
+ __pyx_t_3 = __pyx_f_7aiohttp_11_frozenlist___pyx_unpickle_FrozenList__set_state(((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "(tree fragment)":8
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x949a143 = (_items, frozen))" % __pyx_checksum)
+ * __pyx_result = FrozenList.__new__(__pyx_type)
+ * if __pyx_state is not None: # <<<<<<<<<<<<<<
+ * __pyx_unpickle_FrozenList__set_state(<FrozenList> __pyx_result, __pyx_state)
+ * return __pyx_result
+ */
+ }
+
+ /* "(tree fragment)":10
+ * if __pyx_state is not None:
+ * __pyx_unpickle_FrozenList__set_state(<FrozenList> __pyx_result, __pyx_state)
+ * return __pyx_result # <<<<<<<<<<<<<<
+ * cdef __pyx_unpickle_FrozenList__set_state(FrozenList __pyx_result, tuple __pyx_state):
+ * __pyx_result._items = __pyx_state[0]; __pyx_result.frozen = __pyx_state[1]
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v___pyx_result);
+ __pyx_r = __pyx_v___pyx_result;
+ goto __pyx_L0;
+
+ /* "(tree fragment)":1
+ * def __pyx_unpickle_FrozenList(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_AddTraceback("aiohttp._frozenlist.__pyx_unpickle_FrozenList", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v___pyx_PickleError);
+ __Pyx_XDECREF(__pyx_v___pyx_result);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":11
+ * __pyx_unpickle_FrozenList__set_state(<FrozenList> __pyx_result, __pyx_state)
+ * return __pyx_result
+ * cdef __pyx_unpickle_FrozenList__set_state(FrozenList __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_result._items = __pyx_state[0]; __pyx_result.frozen = __pyx_state[1]
+ * if len(__pyx_state) > 2 and hasattr(__pyx_result, '__dict__'):
+ */
+
+static PyObject *__pyx_f_7aiohttp_11_frozenlist___pyx_unpickle_FrozenList__set_state(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ Py_ssize_t __pyx_t_3;
+ int __pyx_t_4;
+ int __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *__pyx_t_8 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__pyx_unpickle_FrozenList__set_state", 0);
+
+ /* "(tree fragment)":12
+ * return __pyx_result
+ * cdef __pyx_unpickle_FrozenList__set_state(FrozenList __pyx_result, tuple __pyx_state):
+ * __pyx_result._items = __pyx_state[0]; __pyx_result.frozen = __pyx_state[1] # <<<<<<<<<<<<<<
+ * if len(__pyx_state) > 2 and hasattr(__pyx_result, '__dict__'):
+ * __pyx_result.__dict__.update(__pyx_state[2])
+ */
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (!(likely(PyList_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "list", Py_TYPE(__pyx_t_1)->tp_name), 0))) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->_items);
+ __Pyx_DECREF(__pyx_v___pyx_result->_items);
+ __pyx_v___pyx_result->_items = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v___pyx_result->frozen = __pyx_t_2;
+
+ /* "(tree fragment)":13
+ * cdef __pyx_unpickle_FrozenList__set_state(FrozenList __pyx_result, tuple __pyx_state):
+ * __pyx_result._items = __pyx_state[0]; __pyx_result.frozen = __pyx_state[1]
+ * if len(__pyx_state) > 2 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
+ * __pyx_result.__dict__.update(__pyx_state[2])
+ */
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
+ __PYX_ERR(1, 13, __pyx_L1_error)
+ }
+ __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
+ __pyx_t_4 = ((__pyx_t_3 > 2) != 0);
+ if (__pyx_t_4) {
+ } else {
+ __pyx_t_2 = __pyx_t_4;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
+ __pyx_t_5 = (__pyx_t_4 != 0);
+ __pyx_t_2 = __pyx_t_5;
+ __pyx_L4_bool_binop_done:;
+ if (__pyx_t_2) {
+
+ /* "(tree fragment)":14
+ * __pyx_result._items = __pyx_state[0]; __pyx_result.frozen = __pyx_state[1]
+ * if len(__pyx_state) > 2 and hasattr(__pyx_result, '__dict__'):
+ * __pyx_result.__dict__.update(__pyx_state[2]) # <<<<<<<<<<<<<<
+ */
+ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 14, __pyx_L1_error)
+ }
+ __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_8 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
+ __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
+ if (likely(__pyx_t_8)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
+ __Pyx_INCREF(__pyx_t_8);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_7, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "(tree fragment)":13
+ * cdef __pyx_unpickle_FrozenList__set_state(FrozenList __pyx_result, tuple __pyx_state):
+ * __pyx_result._items = __pyx_state[0]; __pyx_result.frozen = __pyx_state[1]
+ * if len(__pyx_state) > 2 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
+ * __pyx_result.__dict__.update(__pyx_state[2])
+ */
+ }
+
+ /* "(tree fragment)":11
+ * __pyx_unpickle_FrozenList__set_state(<FrozenList> __pyx_result, __pyx_state)
+ * return __pyx_result
+ * cdef __pyx_unpickle_FrozenList__set_state(FrozenList __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_result._items = __pyx_state[0]; __pyx_result.frozen = __pyx_state[1]
+ * if len(__pyx_state) > 2 and hasattr(__pyx_result, '__dict__'):
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_AddTraceback("aiohttp._frozenlist.__pyx_unpickle_FrozenList__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+static struct __pyx_vtabstruct_7aiohttp_11_frozenlist_FrozenList __pyx_vtable_7aiohttp_11_frozenlist_FrozenList;
+
+static PyObject *__pyx_tp_new_7aiohttp_11_frozenlist_FrozenList(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
+ struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *p;
+ PyObject *o;
+ if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
+ o = (*t->tp_alloc)(t, 0);
+ } else {
+ o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
+ }
+ if (unlikely(!o)) return 0;
+ p = ((struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)o);
+ p->__pyx_vtab = __pyx_vtabptr_7aiohttp_11_frozenlist_FrozenList;
+ p->_items = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ return o;
+}
+
+static void __pyx_tp_dealloc_7aiohttp_11_frozenlist_FrozenList(PyObject *o) {
+ struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *p = (struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)o;
+ #if CYTHON_USE_TP_FINALIZE
+ if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
+ if (PyObject_CallFinalizerFromDealloc(o)) return;
+ }
+ #endif
+ PyObject_GC_UnTrack(o);
+ Py_CLEAR(p->_items);
+ (*Py_TYPE(o)->tp_free)(o);
+}
+
+static int __pyx_tp_traverse_7aiohttp_11_frozenlist_FrozenList(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *p = (struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)o;
+ if (p->_items) {
+ e = (*v)(p->_items, a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_7aiohttp_11_frozenlist_FrozenList(PyObject *o) {
+ PyObject* tmp;
+ struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *p = (struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *)o;
+ tmp = ((PyObject*)p->_items);
+ p->_items = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ return 0;
+}
+static PyObject *__pyx_sq_item_7aiohttp_11_frozenlist_FrozenList(PyObject *o, Py_ssize_t i) {
+ PyObject *r;
+ PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
+ r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
+ Py_DECREF(x);
+ return r;
+}
+
+static int __pyx_mp_ass_subscript_7aiohttp_11_frozenlist_FrozenList(PyObject *o, PyObject *i, PyObject *v) {
+ if (v) {
+ return __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_7__setitem__(o, i, v);
+ }
+ else {
+ return __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_9__delitem__(o, i);
+ }
+}
+
+static PyObject *__pyx_getprop_7aiohttp_11_frozenlist_10FrozenList_frozen(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_6frozen_1__get__(o);
+}
+
+static PyMethodDef __pyx_methods_7aiohttp_11_frozenlist_FrozenList[] = {
+ {"freeze", (PyCFunction)__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_3freeze, METH_NOARGS, 0},
+ {"__reversed__", (PyCFunction)__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_15__reversed__, METH_NOARGS, 0},
+ {"insert", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_19insert, METH_VARARGS|METH_KEYWORDS, 0},
+ {"index", (PyCFunction)__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_25index, METH_O, 0},
+ {"remove", (PyCFunction)__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_27remove, METH_O, 0},
+ {"clear", (PyCFunction)__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_29clear, METH_NOARGS, 0},
+ {"extend", (PyCFunction)__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_31extend, METH_O, 0},
+ {"reverse", (PyCFunction)__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_33reverse, METH_NOARGS, 0},
+ {"pop", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_35pop, METH_VARARGS|METH_KEYWORDS, 0},
+ {"append", (PyCFunction)__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_37append, METH_O, 0},
+ {"count", (PyCFunction)__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_39count, METH_O, 0},
+ {"__reduce_cython__", (PyCFunction)__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_43__reduce_cython__, METH_NOARGS, 0},
+ {"__setstate_cython__", (PyCFunction)__pyx_pw_7aiohttp_11_frozenlist_10FrozenList_45__setstate_cython__, METH_O, 0},
+ {0, 0, 0, 0}
+};
+
+static struct PyGetSetDef __pyx_getsets_7aiohttp_11_frozenlist_FrozenList[] = {
+ {(char *)"frozen", __pyx_getprop_7aiohttp_11_frozenlist_10FrozenList_frozen, 0, (char *)0, 0},
+ {0, 0, 0, 0, 0}
+};
+
+static PyNumberMethods __pyx_tp_as_number_FrozenList = {
+ 0, /*nb_add*/
+ 0, /*nb_subtract*/
+ 0, /*nb_multiply*/
+ #if PY_MAJOR_VERSION < 3 || (CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x03050000)
+ 0, /*nb_divide*/
+ #endif
+ 0, /*nb_remainder*/
+ 0, /*nb_divmod*/
+ 0, /*nb_power*/
+ 0, /*nb_negative*/
+ 0, /*nb_positive*/
+ 0, /*nb_absolute*/
+ 0, /*nb_nonzero*/
+ 0, /*nb_invert*/
+ 0, /*nb_lshift*/
+ 0, /*nb_rshift*/
+ 0, /*nb_and*/
+ 0, /*nb_xor*/
+ 0, /*nb_or*/
+ #if PY_MAJOR_VERSION < 3 || (CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x03050000)
+ 0, /*nb_coerce*/
+ #endif
+ 0, /*nb_int*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*nb_long*/
+ #else
+ 0, /*reserved*/
+ #endif
+ 0, /*nb_float*/
+ #if PY_MAJOR_VERSION < 3 || (CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x03050000)
+ 0, /*nb_oct*/
+ #endif
+ #if PY_MAJOR_VERSION < 3 || (CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x03050000)
+ 0, /*nb_hex*/
+ #endif
+ __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_23__iadd__, /*nb_inplace_add*/
+ 0, /*nb_inplace_subtract*/
+ 0, /*nb_inplace_multiply*/
+ #if PY_MAJOR_VERSION < 3 || (CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x03050000)
+ 0, /*nb_inplace_divide*/
+ #endif
+ 0, /*nb_inplace_remainder*/
+ 0, /*nb_inplace_power*/
+ 0, /*nb_inplace_lshift*/
+ 0, /*nb_inplace_rshift*/
+ 0, /*nb_inplace_and*/
+ 0, /*nb_inplace_xor*/
+ 0, /*nb_inplace_or*/
+ 0, /*nb_floor_divide*/
+ 0, /*nb_true_divide*/
+ 0, /*nb_inplace_floor_divide*/
+ 0, /*nb_inplace_true_divide*/
+ 0, /*nb_index*/
+ #if PY_VERSION_HEX >= 0x03050000
+ 0, /*nb_matrix_multiply*/
+ #endif
+ #if PY_VERSION_HEX >= 0x03050000
+ 0, /*nb_inplace_matrix_multiply*/
+ #endif
+};
+
+static PySequenceMethods __pyx_tp_as_sequence_FrozenList = {
+ __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_11__len__, /*sq_length*/
+ 0, /*sq_concat*/
+ 0, /*sq_repeat*/
+ __pyx_sq_item_7aiohttp_11_frozenlist_FrozenList, /*sq_item*/
+ 0, /*sq_slice*/
+ 0, /*sq_ass_item*/
+ 0, /*sq_ass_slice*/
+ __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_21__contains__, /*sq_contains*/
+ 0, /*sq_inplace_concat*/
+ 0, /*sq_inplace_repeat*/
+};
+
+static PyMappingMethods __pyx_tp_as_mapping_FrozenList = {
+ __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_11__len__, /*mp_length*/
+ __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_5__getitem__, /*mp_subscript*/
+ __pyx_mp_ass_subscript_7aiohttp_11_frozenlist_FrozenList, /*mp_ass_subscript*/
+};
+
+static PyTypeObject __pyx_type_7aiohttp_11_frozenlist_FrozenList = {
+ PyVarObject_HEAD_INIT(0, 0)
+ "aiohttp._frozenlist.FrozenList", /*tp_name*/
+ sizeof(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_7aiohttp_11_frozenlist_FrozenList, /*tp_dealloc*/
+ #if PY_VERSION_HEX < 0x030800b4
+ 0, /*tp_print*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4
+ 0, /*tp_vectorcall_offset*/
+ #endif
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #endif
+ #if PY_MAJOR_VERSION >= 3
+ 0, /*tp_as_async*/
+ #endif
+ __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_41__repr__, /*tp_repr*/
+ &__pyx_tp_as_number_FrozenList, /*tp_as_number*/
+ &__pyx_tp_as_sequence_FrozenList, /*tp_as_sequence*/
+ &__pyx_tp_as_mapping_FrozenList, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ 0, /*tp_doc*/
+ __pyx_tp_traverse_7aiohttp_11_frozenlist_FrozenList, /*tp_traverse*/
+ __pyx_tp_clear_7aiohttp_11_frozenlist_FrozenList, /*tp_clear*/
+ __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_17__richcmp__, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_13__iter__, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_7aiohttp_11_frozenlist_FrozenList, /*tp_methods*/
+ 0, /*tp_members*/
+ __pyx_getsets_7aiohttp_11_frozenlist_FrozenList, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_pw_7aiohttp_11_frozenlist_10FrozenList_1__init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_7aiohttp_11_frozenlist_FrozenList, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ 0, /*tp_version_tag*/
+ #if PY_VERSION_HEX >= 0x030400a1
+ 0, /*tp_finalize*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b1
+ 0, /*tp_vectorcall*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
+ 0, /*tp_print*/
+ #endif
+};
+
+static PyMethodDef __pyx_methods[] = {
+ {0, 0, 0, 0}
+};
+
+#if PY_MAJOR_VERSION >= 3
+#if CYTHON_PEP489_MULTI_PHASE_INIT
+static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
+static int __pyx_pymod_exec__frozenlist(PyObject* module); /*proto*/
+static PyModuleDef_Slot __pyx_moduledef_slots[] = {
+ {Py_mod_create, (void*)__pyx_pymod_create},
+ {Py_mod_exec, (void*)__pyx_pymod_exec__frozenlist},
+ {0, NULL}
+};
+#endif
+
+static struct PyModuleDef __pyx_moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "_frozenlist",
+ 0, /* m_doc */
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ 0, /* m_size */
+ #else
+ -1, /* m_size */
+ #endif
+ __pyx_methods /* m_methods */,
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ __pyx_moduledef_slots, /* m_slots */
+ #else
+ NULL, /* m_reload */
+ #endif
+ NULL, /* m_traverse */
+ NULL, /* m_clear */
+ NULL /* m_free */
+};
+#endif
+#ifndef CYTHON_SMALL_CODE
+#if defined(__clang__)
+ #define CYTHON_SMALL_CODE
+#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
+ #define CYTHON_SMALL_CODE __attribute__((cold))
+#else
+ #define CYTHON_SMALL_CODE
+#endif
+#endif
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_kp_u_Cannot_modify_frozen_list, __pyx_k_Cannot_modify_frozen_list, sizeof(__pyx_k_Cannot_modify_frozen_list), 0, 1, 0, 0},
+ {&__pyx_n_s_FrozenList, __pyx_k_FrozenList, sizeof(__pyx_k_FrozenList), 0, 0, 1, 1},
+ {&__pyx_kp_u_FrozenList_frozen_r, __pyx_k_FrozenList_frozen_r, sizeof(__pyx_k_FrozenList_frozen_r), 0, 1, 0, 0},
+ {&__pyx_kp_s_Incompatible_checksums_s_vs_0x94, __pyx_k_Incompatible_checksums_s_vs_0x94, sizeof(__pyx_k_Incompatible_checksums_s_vs_0x94), 0, 0, 1, 0},
+ {&__pyx_n_s_MutableSequence, __pyx_k_MutableSequence, sizeof(__pyx_k_MutableSequence), 0, 0, 1, 1},
+ {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1},
+ {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1},
+ {&__pyx_n_s_aiohttp__frozenlist, __pyx_k_aiohttp__frozenlist, sizeof(__pyx_k_aiohttp__frozenlist), 0, 0, 1, 1},
+ {&__pyx_n_s_clear, __pyx_k_clear, sizeof(__pyx_k_clear), 0, 0, 1, 1},
+ {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
+ {&__pyx_n_s_collections_abc, __pyx_k_collections_abc, sizeof(__pyx_k_collections_abc), 0, 0, 1, 1},
+ {&__pyx_n_s_count, __pyx_k_count, sizeof(__pyx_k_count), 0, 0, 1, 1},
+ {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1},
+ {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
+ {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1},
+ {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
+ {&__pyx_n_s_index, __pyx_k_index, sizeof(__pyx_k_index), 0, 0, 1, 1},
+ {&__pyx_n_s_item, __pyx_k_item, sizeof(__pyx_k_item), 0, 0, 1, 1},
+ {&__pyx_n_s_items, __pyx_k_items, sizeof(__pyx_k_items), 0, 0, 1, 1},
+ {&__pyx_n_s_iter, __pyx_k_iter, sizeof(__pyx_k_iter), 0, 0, 1, 1},
+ {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
+ {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
+ {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1},
+ {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1},
+ {&__pyx_n_s_pop, __pyx_k_pop, sizeof(__pyx_k_pop), 0, 0, 1, 1},
+ {&__pyx_n_s_pos, __pyx_k_pos, sizeof(__pyx_k_pos), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_unpickle_FrozenList, __pyx_k_pyx_unpickle_FrozenList, sizeof(__pyx_k_pyx_unpickle_FrozenList), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
+ {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1},
+ {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1},
+ {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1},
+ {&__pyx_n_s_register, __pyx_k_register, sizeof(__pyx_k_register), 0, 0, 1, 1},
+ {&__pyx_n_s_remove, __pyx_k_remove, sizeof(__pyx_k_remove), 0, 0, 1, 1},
+ {&__pyx_n_s_reversed, __pyx_k_reversed, sizeof(__pyx_k_reversed), 0, 0, 1, 1},
+ {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1},
+ {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1},
+ {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0},
+ {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
+ {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1},
+ {0, 0, 0, 0, 0, 0, 0}
+};
+static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
+ __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(0, 19, __pyx_L1_error)
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
+
+ /* "aiohttp/_frozenlist.pyx":19
+ * cdef object _check_frozen(self):
+ * if self.frozen:
+ * raise RuntimeError("Cannot modify frozen list.") # <<<<<<<<<<<<<<
+ *
+ * cdef inline object _fast_len(self):
+ */
+ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_Cannot_modify_frozen_list); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 19, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_tuple_);
+ __Pyx_GIVEREF(__pyx_tuple_);
+
+ /* "(tree fragment)":1
+ * def __pyx_unpickle_FrozenList(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ */
+ __pyx_tuple__2 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_tuple__2);
+ __Pyx_GIVEREF(__pyx_tuple__2);
+ __pyx_codeobj__3 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__2, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_FrozenList, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__3)) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_RefNannyFinishContext();
+ return 0;
+ __pyx_L1_error:;
+ __Pyx_RefNannyFinishContext();
+ return -1;
+}
+
+static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
+ __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_int_4 = PyInt_FromLong(4); if (unlikely(!__pyx_int_4)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_int_5 = PyInt_FromLong(5); if (unlikely(!__pyx_int_5)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_int_155820355 = PyInt_FromLong(155820355L); if (unlikely(!__pyx_int_155820355)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error)
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
+
+static int __Pyx_modinit_global_init_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
+ /*--- Global init code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_variable_export_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
+ /*--- Variable export code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_function_export_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
+ /*--- Function export code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_type_init_code(void) {
+ __Pyx_RefNannyDeclarations
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
+ /*--- Type init code ---*/
+ __pyx_vtabptr_7aiohttp_11_frozenlist_FrozenList = &__pyx_vtable_7aiohttp_11_frozenlist_FrozenList;
+ __pyx_vtable_7aiohttp_11_frozenlist_FrozenList._check_frozen = (PyObject *(*)(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *))__pyx_f_7aiohttp_11_frozenlist_10FrozenList__check_frozen;
+ __pyx_vtable_7aiohttp_11_frozenlist_FrozenList._fast_len = (PyObject *(*)(struct __pyx_obj_7aiohttp_11_frozenlist_FrozenList *))__pyx_f_7aiohttp_11_frozenlist_10FrozenList__fast_len;
+ if (PyType_Ready(&__pyx_type_7aiohttp_11_frozenlist_FrozenList) < 0) __PYX_ERR(0, 4, __pyx_L1_error)
+ #if PY_VERSION_HEX < 0x030800B1
+ __pyx_type_7aiohttp_11_frozenlist_FrozenList.tp_print = 0;
+ #endif
+ if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_7aiohttp_11_frozenlist_FrozenList.tp_dictoffset && __pyx_type_7aiohttp_11_frozenlist_FrozenList.tp_getattro == PyObject_GenericGetAttr)) {
+ __pyx_type_7aiohttp_11_frozenlist_FrozenList.tp_getattro = __Pyx_PyObject_GenericGetAttr;
+ }
+ if (__Pyx_SetVtable(__pyx_type_7aiohttp_11_frozenlist_FrozenList.tp_dict, __pyx_vtabptr_7aiohttp_11_frozenlist_FrozenList) < 0) __PYX_ERR(0, 4, __pyx_L1_error)
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s_FrozenList, (PyObject *)&__pyx_type_7aiohttp_11_frozenlist_FrozenList) < 0) __PYX_ERR(0, 4, __pyx_L1_error)
+ if (__Pyx_setup_reduce((PyObject*)&__pyx_type_7aiohttp_11_frozenlist_FrozenList) < 0) __PYX_ERR(0, 4, __pyx_L1_error)
+ __pyx_ptype_7aiohttp_11_frozenlist_FrozenList = &__pyx_type_7aiohttp_11_frozenlist_FrozenList;
+ __Pyx_RefNannyFinishContext();
+ return 0;
+ __pyx_L1_error:;
+ __Pyx_RefNannyFinishContext();
+ return -1;
+}
+
+static int __Pyx_modinit_type_import_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
+ /*--- Type import code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_variable_import_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
+ /*--- Variable import code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_function_import_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
+ /*--- Function import code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+
+#ifndef CYTHON_NO_PYINIT_EXPORT
+#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
+#elif PY_MAJOR_VERSION < 3
+#ifdef __cplusplus
+#define __Pyx_PyMODINIT_FUNC extern "C" void
+#else
+#define __Pyx_PyMODINIT_FUNC void
+#endif
+#else
+#ifdef __cplusplus
+#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
+#else
+#define __Pyx_PyMODINIT_FUNC PyObject *
+#endif
+#endif
+
+
+#if PY_MAJOR_VERSION < 3
+__Pyx_PyMODINIT_FUNC init_frozenlist(void) CYTHON_SMALL_CODE; /*proto*/
+__Pyx_PyMODINIT_FUNC init_frozenlist(void)
+#else
+__Pyx_PyMODINIT_FUNC PyInit__frozenlist(void) CYTHON_SMALL_CODE; /*proto*/
+__Pyx_PyMODINIT_FUNC PyInit__frozenlist(void)
+#if CYTHON_PEP489_MULTI_PHASE_INIT
+{
+ return PyModuleDef_Init(&__pyx_moduledef);
+}
+static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
+ #if PY_VERSION_HEX >= 0x030700A1
+ static PY_INT64_T main_interpreter_id = -1;
+ PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
+ if (main_interpreter_id == -1) {
+ main_interpreter_id = current_id;
+ return (unlikely(current_id == -1)) ? -1 : 0;
+ } else if (unlikely(main_interpreter_id != current_id))
+ #else
+ static PyInterpreterState *main_interpreter = NULL;
+ PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
+ if (!main_interpreter) {
+ main_interpreter = current_interpreter;
+ } else if (unlikely(main_interpreter != current_interpreter))
+ #endif
+ {
+ PyErr_SetString(
+ PyExc_ImportError,
+ "Interpreter change detected - this module can only be loaded into one interpreter per process.");
+ return -1;
+ }
+ return 0;
+}
+static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
+ PyObject *value = PyObject_GetAttrString(spec, from_name);
+ int result = 0;
+ if (likely(value)) {
+ if (allow_none || value != Py_None) {
+ result = PyDict_SetItemString(moddict, to_name, value);
+ }
+ Py_DECREF(value);
+ } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
+ PyErr_Clear();
+ } else {
+ result = -1;
+ }
+ return result;
+}
+static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
+ PyObject *module = NULL, *moddict, *modname;
+ if (__Pyx_check_single_interpreter())
+ return NULL;
+ if (__pyx_m)
+ return __Pyx_NewRef(__pyx_m);
+ modname = PyObject_GetAttrString(spec, "name");
+ if (unlikely(!modname)) goto bad;
+ module = PyModule_NewObject(modname);
+ Py_DECREF(modname);
+ if (unlikely(!module)) goto bad;
+ moddict = PyModule_GetDict(module);
+ if (unlikely(!moddict)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
+ return module;
+bad:
+ Py_XDECREF(module);
+ return NULL;
+}
+
+
+static CYTHON_SMALL_CODE int __pyx_pymod_exec__frozenlist(PyObject *__pyx_pyinit_module)
+#endif
+#endif
+{
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannyDeclarations
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ if (__pyx_m) {
+ if (__pyx_m == __pyx_pyinit_module) return 0;
+ PyErr_SetString(PyExc_RuntimeError, "Module '_frozenlist' has already been imported. Re-initialisation is not supported.");
+ return -1;
+ }
+ #elif PY_MAJOR_VERSION >= 3
+ if (__pyx_m) return __Pyx_NewRef(__pyx_m);
+ #endif
+ #if CYTHON_REFNANNY
+__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
+if (!__Pyx_RefNanny) {
+ PyErr_Clear();
+ __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
+ if (!__Pyx_RefNanny)
+ Py_FatalError("failed to import 'refnanny' module");
+}
+#endif
+ __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit__frozenlist(void)", 0);
+ if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #ifdef __Pxy_PyFrame_Initialize_Offsets
+ __Pxy_PyFrame_Initialize_Offsets();
+ #endif
+ __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
+ #ifdef __Pyx_CyFunction_USED
+ if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_FusedFunction_USED
+ if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_Coroutine_USED
+ if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_Generator_USED
+ if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_AsyncGen_USED
+ if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_StopAsyncIteration_USED
+ if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ /*--- Library function declarations ---*/
+ /*--- Threads initialization code ---*/
+ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
+ #ifdef WITH_THREAD /* Python build with threading support? */
+ PyEval_InitThreads();
+ #endif
+ #endif
+ /*--- Module creation code ---*/
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ __pyx_m = __pyx_pyinit_module;
+ Py_INCREF(__pyx_m);
+ #else
+ #if PY_MAJOR_VERSION < 3
+ __pyx_m = Py_InitModule4("_frozenlist", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
+ #else
+ __pyx_m = PyModule_Create(&__pyx_moduledef);
+ #endif
+ if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
+ Py_INCREF(__pyx_d);
+ __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
+ Py_INCREF(__pyx_b);
+ __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
+ Py_INCREF(__pyx_cython_runtime);
+ if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
+ /*--- Initialize various global constants etc. ---*/
+ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
+ if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ if (__pyx_module_is_main_aiohttp___frozenlist) {
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ }
+ #if PY_MAJOR_VERSION >= 3
+ {
+ PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
+ if (!PyDict_GetItemString(modules, "aiohttp._frozenlist")) {
+ if (unlikely(PyDict_SetItemString(modules, "aiohttp._frozenlist", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
+ }
+ }
+ #endif
+ /*--- Builtin init code ---*/
+ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ /*--- Constants init code ---*/
+ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ /*--- Global type/function init code ---*/
+ (void)__Pyx_modinit_global_init_code();
+ (void)__Pyx_modinit_variable_export_code();
+ (void)__Pyx_modinit_function_export_code();
+ if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
+ (void)__Pyx_modinit_type_import_code();
+ (void)__Pyx_modinit_variable_import_code();
+ (void)__Pyx_modinit_function_import_code();
+ /*--- Execution code ---*/
+ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
+ if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+
+ /* "aiohttp/_frozenlist.pyx":1
+ * from collections.abc import MutableSequence # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_n_s_MutableSequence);
+ __Pyx_GIVEREF(__pyx_n_s_MutableSequence);
+ PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_MutableSequence);
+ __pyx_t_2 = __Pyx_Import(__pyx_n_s_collections_abc, __pyx_t_1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_MutableSequence); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_MutableSequence, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "aiohttp/_frozenlist.pyx":108
+ *
+ *
+ * MutableSequence.register(FrozenList) # <<<<<<<<<<<<<<
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_MutableSequence); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 108, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_register); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 108, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_1, ((PyObject *)__pyx_ptype_7aiohttp_11_frozenlist_FrozenList)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 108, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "(tree fragment)":1
+ * def __pyx_unpickle_FrozenList(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ */
+ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7aiohttp_11_frozenlist_1__pyx_unpickle_FrozenList, NULL, __pyx_n_s_aiohttp__frozenlist); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_FrozenList, __pyx_t_2) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "aiohttp/_frozenlist.pyx":1
+ * from collections.abc import MutableSequence # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /*--- Wrapped vars code ---*/
+
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ if (__pyx_m) {
+ if (__pyx_d) {
+ __Pyx_AddTraceback("init aiohttp._frozenlist", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ }
+ Py_CLEAR(__pyx_m);
+ } else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_ImportError, "init aiohttp._frozenlist");
+ }
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ return (__pyx_m != NULL) ? 0 : -1;
+ #elif PY_MAJOR_VERSION >= 3
+ return __pyx_m;
+ #else
+ return;
+ #endif
+}
+
+/* --- Runtime support code --- */
+/* Refnanny */
+#if CYTHON_REFNANNY
+static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
+ PyObject *m = NULL, *p = NULL;
+ void *r = NULL;
+ m = PyImport_ImportModule(modname);
+ if (!m) goto end;
+ p = PyObject_GetAttrString(m, "RefNannyAPI");
+ if (!p) goto end;
+ r = PyLong_AsVoidPtr(p);
+end:
+ Py_XDECREF(p);
+ Py_XDECREF(m);
+ return (__Pyx_RefNannyAPIStruct *)r;
+}
+#endif
+
+/* PyObjectGetAttrStr */
+#if CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
+ PyTypeObject* tp = Py_TYPE(obj);
+ if (likely(tp->tp_getattro))
+ return tp->tp_getattro(obj, attr_name);
+#if PY_MAJOR_VERSION < 3
+ if (likely(tp->tp_getattr))
+ return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
+#endif
+ return PyObject_GetAttr(obj, attr_name);
+}
+#endif
+
+/* GetBuiltinName */
+static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
+ PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
+ if (unlikely(!result)) {
+ PyErr_Format(PyExc_NameError,
+#if PY_MAJOR_VERSION >= 3
+ "name '%U' is not defined", name);
+#else
+ "name '%.200s' is not defined", PyString_AS_STRING(name));
+#endif
+ }
+ return result;
+}
+
+/* RaiseDoubleKeywords */
+static void __Pyx_RaiseDoubleKeywordsError(
+ const char* func_name,
+ PyObject* kw_name)
+{
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION >= 3
+ "%s() got multiple values for keyword argument '%U'", func_name, kw_name);
+ #else
+ "%s() got multiple values for keyword argument '%s'", func_name,
+ PyString_AsString(kw_name));
+ #endif
+}
+
+/* ParseKeywords */
+static int __Pyx_ParseOptionalKeywords(
+ PyObject *kwds,
+ PyObject **argnames[],
+ PyObject *kwds2,
+ PyObject *values[],
+ Py_ssize_t num_pos_args,
+ const char* function_name)
+{
+ PyObject *key = 0, *value = 0;
+ Py_ssize_t pos = 0;
+ PyObject*** name;
+ PyObject*** first_kw_arg = argnames + num_pos_args;
+ while (PyDict_Next(kwds, &pos, &key, &value)) {
+ name = first_kw_arg;
+ while (*name && (**name != key)) name++;
+ if (*name) {
+ values[name-argnames] = value;
+ continue;
+ }
+ name = first_kw_arg;
+ #if PY_MAJOR_VERSION < 3
+ if (likely(PyString_Check(key))) {
+ while (*name) {
+ if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
+ && _PyString_Eq(**name, key)) {
+ values[name-argnames] = value;
+ break;
+ }
+ name++;
+ }
+ if (*name) continue;
+ else {
+ PyObject*** argname = argnames;
+ while (argname != first_kw_arg) {
+ if ((**argname == key) || (
+ (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
+ && _PyString_Eq(**argname, key))) {
+ goto arg_passed_twice;
+ }
+ argname++;
+ }
+ }
+ } else
+ #endif
+ if (likely(PyUnicode_Check(key))) {
+ while (*name) {
+ int cmp = (**name == key) ? 0 :
+ #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
+ (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
+ #endif
+ PyUnicode_Compare(**name, key);
+ if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
+ if (cmp == 0) {
+ values[name-argnames] = value;
+ break;
+ }
+ name++;
+ }
+ if (*name) continue;
+ else {
+ PyObject*** argname = argnames;
+ while (argname != first_kw_arg) {
+ int cmp = (**argname == key) ? 0 :
+ #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
+ (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
+ #endif
+ PyUnicode_Compare(**argname, key);
+ if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
+ if (cmp == 0) goto arg_passed_twice;
+ argname++;
+ }
+ }
+ } else
+ goto invalid_keyword_type;
+ if (kwds2) {
+ if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
+ } else {
+ goto invalid_keyword;
+ }
+ }
+ return 0;
+arg_passed_twice:
+ __Pyx_RaiseDoubleKeywordsError(function_name, key);
+ goto bad;
+invalid_keyword_type:
+ PyErr_Format(PyExc_TypeError,
+ "%.200s() keywords must be strings", function_name);
+ goto bad;
+invalid_keyword:
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION < 3
+ "%.200s() got an unexpected keyword argument '%.200s'",
+ function_name, PyString_AsString(key));
+ #else
+ "%s() got an unexpected keyword argument '%U'",
+ function_name, key);
+ #endif
+bad:
+ return -1;
+}
+
+/* RaiseArgTupleInvalid */
+static void __Pyx_RaiseArgtupleInvalid(
+ const char* func_name,
+ int exact,
+ Py_ssize_t num_min,
+ Py_ssize_t num_max,
+ Py_ssize_t num_found)
+{
+ Py_ssize_t num_expected;
+ const char *more_or_less;
+ if (num_found < num_min) {
+ num_expected = num_min;
+ more_or_less = "at least";
+ } else {
+ num_expected = num_max;
+ more_or_less = "at most";
+ }
+ if (exact) {
+ more_or_less = "exactly";
+ }
+ PyErr_Format(PyExc_TypeError,
+ "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
+ func_name, more_or_less, num_expected,
+ (num_expected == 1) ? "" : "s", num_found);
+}
+
+/* PyObjectCall */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
+ PyObject *result;
+ ternaryfunc call = func->ob_type->tp_call;
+ if (unlikely(!call))
+ return PyObject_Call(func, arg, kw);
+ if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
+ return NULL;
+ result = (*call)(func, arg, kw);
+ Py_LeaveRecursiveCall();
+ if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
+ PyErr_SetString(
+ PyExc_SystemError,
+ "NULL result without error in PyObject_Call");
+ }
+ return result;
+}
+#endif
+
+/* PyErrFetchRestore */
+#if CYTHON_FAST_THREAD_STATE
+static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ tmp_type = tstate->curexc_type;
+ tmp_value = tstate->curexc_value;
+ tmp_tb = tstate->curexc_traceback;
+ tstate->curexc_type = type;
+ tstate->curexc_value = value;
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+}
+static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
+ *type = tstate->curexc_type;
+ *value = tstate->curexc_value;
+ *tb = tstate->curexc_traceback;
+ tstate->curexc_type = 0;
+ tstate->curexc_value = 0;
+ tstate->curexc_traceback = 0;
+}
+#endif
+
+/* RaiseException */
+#if PY_MAJOR_VERSION < 3
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
+ CYTHON_UNUSED PyObject *cause) {
+ __Pyx_PyThreadState_declare
+ Py_XINCREF(type);
+ if (!value || value == Py_None)
+ value = NULL;
+ else
+ Py_INCREF(value);
+ if (!tb || tb == Py_None)
+ tb = NULL;
+ else {
+ Py_INCREF(tb);
+ if (!PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto raise_error;
+ }
+ }
+ if (PyType_Check(type)) {
+#if CYTHON_COMPILING_IN_PYPY
+ if (!value) {
+ Py_INCREF(Py_None);
+ value = Py_None;
+ }
+#endif
+ PyErr_NormalizeException(&type, &value, &tb);
+ } else {
+ if (value) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto raise_error;
+ }
+ value = type;
+ type = (PyObject*) Py_TYPE(type);
+ Py_INCREF(type);
+ if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto raise_error;
+ }
+ }
+ __Pyx_PyThreadState_assign
+ __Pyx_ErrRestore(type, value, tb);
+ return;
+raise_error:
+ Py_XDECREF(value);
+ Py_XDECREF(type);
+ Py_XDECREF(tb);
+ return;
+}
+#else
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
+ PyObject* owned_instance = NULL;
+ if (tb == Py_None) {
+ tb = 0;
+ } else if (tb && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto bad;
+ }
+ if (value == Py_None)
+ value = 0;
+ if (PyExceptionInstance_Check(type)) {
+ if (value) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto bad;
+ }
+ value = type;
+ type = (PyObject*) Py_TYPE(value);
+ } else if (PyExceptionClass_Check(type)) {
+ PyObject *instance_class = NULL;
+ if (value && PyExceptionInstance_Check(value)) {
+ instance_class = (PyObject*) Py_TYPE(value);
+ if (instance_class != type) {
+ int is_subclass = PyObject_IsSubclass(instance_class, type);
+ if (!is_subclass) {
+ instance_class = NULL;
+ } else if (unlikely(is_subclass == -1)) {
+ goto bad;
+ } else {
+ type = instance_class;
+ }
+ }
+ }
+ if (!instance_class) {
+ PyObject *args;
+ if (!value)
+ args = PyTuple_New(0);
+ else if (PyTuple_Check(value)) {
+ Py_INCREF(value);
+ args = value;
+ } else
+ args = PyTuple_Pack(1, value);
+ if (!args)
+ goto bad;
+ owned_instance = PyObject_Call(type, args, NULL);
+ Py_DECREF(args);
+ if (!owned_instance)
+ goto bad;
+ value = owned_instance;
+ if (!PyExceptionInstance_Check(value)) {
+ PyErr_Format(PyExc_TypeError,
+ "calling %R should have returned an instance of "
+ "BaseException, not %R",
+ type, Py_TYPE(value));
+ goto bad;
+ }
+ }
+ } else {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto bad;
+ }
+ if (cause) {
+ PyObject *fixed_cause;
+ if (cause == Py_None) {
+ fixed_cause = NULL;
+ } else if (PyExceptionClass_Check(cause)) {
+ fixed_cause = PyObject_CallObject(cause, NULL);
+ if (fixed_cause == NULL)
+ goto bad;
+ } else if (PyExceptionInstance_Check(cause)) {
+ fixed_cause = cause;
+ Py_INCREF(fixed_cause);
+ } else {
+ PyErr_SetString(PyExc_TypeError,
+ "exception causes must derive from "
+ "BaseException");
+ goto bad;
+ }
+ PyException_SetCause(value, fixed_cause);
+ }
+ PyErr_SetObject(type, value);
+ if (tb) {
+#if CYTHON_COMPILING_IN_PYPY
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
+ Py_INCREF(tb);
+ PyErr_Restore(tmp_type, tmp_value, tb);
+ Py_XDECREF(tmp_tb);
+#else
+ PyThreadState *tstate = __Pyx_PyThreadState_Current;
+ PyObject* tmp_tb = tstate->curexc_traceback;
+ if (tb != tmp_tb) {
+ Py_INCREF(tb);
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_tb);
+ }
+#endif
+ }
+bad:
+ Py_XDECREF(owned_instance);
+ return;
+}
+#endif
+
+/* GetItemInt */
+static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
+ PyObject *r;
+ if (!j) return NULL;
+ r = PyObject_GetItem(o, j);
+ Py_DECREF(j);
+ return r;
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
+ CYTHON_NCP_UNUSED int wraparound,
+ CYTHON_NCP_UNUSED int boundscheck) {
+#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ Py_ssize_t wrapped_i = i;
+ if (wraparound & unlikely(i < 0)) {
+ wrapped_i += PyList_GET_SIZE(o);
+ }
+ if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
+ PyObject *r = PyList_GET_ITEM(o, wrapped_i);
+ Py_INCREF(r);
+ return r;
+ }
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+#else
+ return PySequence_GetItem(o, i);
+#endif
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
+ CYTHON_NCP_UNUSED int wraparound,
+ CYTHON_NCP_UNUSED int boundscheck) {
+#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ Py_ssize_t wrapped_i = i;
+ if (wraparound & unlikely(i < 0)) {
+ wrapped_i += PyTuple_GET_SIZE(o);
+ }
+ if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
+ PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
+ Py_INCREF(r);
+ return r;
+ }
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+#else
+ return PySequence_GetItem(o, i);
+#endif
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
+ CYTHON_NCP_UNUSED int wraparound,
+ CYTHON_NCP_UNUSED int boundscheck) {
+#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
+ if (is_list || PyList_CheckExact(o)) {
+ Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
+ if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
+ PyObject *r = PyList_GET_ITEM(o, n);
+ Py_INCREF(r);
+ return r;
+ }
+ }
+ else if (PyTuple_CheckExact(o)) {
+ Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
+ if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
+ PyObject *r = PyTuple_GET_ITEM(o, n);
+ Py_INCREF(r);
+ return r;
+ }
+ } else {
+ PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
+ if (likely(m && m->sq_item)) {
+ if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
+ Py_ssize_t l = m->sq_length(o);
+ if (likely(l >= 0)) {
+ i += l;
+ } else {
+ if (!PyErr_ExceptionMatches(PyExc_OverflowError))
+ return NULL;
+ PyErr_Clear();
+ }
+ }
+ return m->sq_item(o, i);
+ }
+ }
+#else
+ if (is_list || PySequence_Check(o)) {
+ return PySequence_GetItem(o, i);
+ }
+#endif
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+}
+
+/* ObjectGetItem */
+#if CYTHON_USE_TYPE_SLOTS
+static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
+ PyObject *runerr;
+ Py_ssize_t key_value;
+ PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
+ if (unlikely(!(m && m->sq_item))) {
+ PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
+ return NULL;
+ }
+ key_value = __Pyx_PyIndex_AsSsize_t(index);
+ if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
+ return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
+ }
+ if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
+ PyErr_Clear();
+ PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
+ }
+ return NULL;
+}
+static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
+ PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
+ if (likely(m && m->mp_subscript)) {
+ return m->mp_subscript(obj, key);
+ }
+ return __Pyx_PyObject_GetIndex(obj, key);
+}
+#endif
+
+/* PyFunctionFastCall */
+#if CYTHON_FAST_PYCALL
+static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
+ PyObject *globals) {
+ PyFrameObject *f;
+ PyThreadState *tstate = __Pyx_PyThreadState_Current;
+ PyObject **fastlocals;
+ Py_ssize_t i;
+ PyObject *result;
+ assert(globals != NULL);
+ /* XXX Perhaps we should create a specialized
+ PyFrame_New() that doesn't take locals, but does
+ take builtins without sanity checking them.
+ */
+ assert(tstate != NULL);
+ f = PyFrame_New(tstate, co, globals, NULL);
+ if (f == NULL) {
+ return NULL;
+ }
+ fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
+ for (i = 0; i < na; i++) {
+ Py_INCREF(*args);
+ fastlocals[i] = *args++;
+ }
+ result = PyEval_EvalFrameEx(f,0);
+ ++tstate->recursion_depth;
+ Py_DECREF(f);
+ --tstate->recursion_depth;
+ return result;
+}
+#if 1 || PY_VERSION_HEX < 0x030600B1
+static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
+ PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
+ PyObject *globals = PyFunction_GET_GLOBALS(func);
+ PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
+ PyObject *closure;
+#if PY_MAJOR_VERSION >= 3
+ PyObject *kwdefs;
+#endif
+ PyObject *kwtuple, **k;
+ PyObject **d;
+ Py_ssize_t nd;
+ Py_ssize_t nk;
+ PyObject *result;
+ assert(kwargs == NULL || PyDict_Check(kwargs));
+ nk = kwargs ? PyDict_Size(kwargs) : 0;
+ if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
+ return NULL;
+ }
+ if (
+#if PY_MAJOR_VERSION >= 3
+ co->co_kwonlyargcount == 0 &&
+#endif
+ likely(kwargs == NULL || nk == 0) &&
+ co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
+ if (argdefs == NULL && co->co_argcount == nargs) {
+ result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
+ goto done;
+ }
+ else if (nargs == 0 && argdefs != NULL
+ && co->co_argcount == Py_SIZE(argdefs)) {
+ /* function called with no arguments, but all parameters have
+ a default value: use default values as arguments .*/
+ args = &PyTuple_GET_ITEM(argdefs, 0);
+ result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
+ goto done;
+ }
+ }
+ if (kwargs != NULL) {
+ Py_ssize_t pos, i;
+ kwtuple = PyTuple_New(2 * nk);
+ if (kwtuple == NULL) {
+ result = NULL;
+ goto done;
+ }
+ k = &PyTuple_GET_ITEM(kwtuple, 0);
+ pos = i = 0;
+ while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
+ Py_INCREF(k[i]);
+ Py_INCREF(k[i+1]);
+ i += 2;
+ }
+ nk = i / 2;
+ }
+ else {
+ kwtuple = NULL;
+ k = NULL;
+ }
+ closure = PyFunction_GET_CLOSURE(func);
+#if PY_MAJOR_VERSION >= 3
+ kwdefs = PyFunction_GET_KW_DEFAULTS(func);
+#endif
+ if (argdefs != NULL) {
+ d = &PyTuple_GET_ITEM(argdefs, 0);
+ nd = Py_SIZE(argdefs);
+ }
+ else {
+ d = NULL;
+ nd = 0;
+ }
+#if PY_MAJOR_VERSION >= 3
+ result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
+ args, (int)nargs,
+ k, (int)nk,
+ d, (int)nd, kwdefs, closure);
+#else
+ result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
+ args, (int)nargs,
+ k, (int)nk,
+ d, (int)nd, closure);
+#endif
+ Py_XDECREF(kwtuple);
+done:
+ Py_LeaveRecursiveCall();
+ return result;
+}
+#endif
+#endif
+
+/* PyObjectCallMethO */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
+ PyObject *self, *result;
+ PyCFunction cfunc;
+ cfunc = PyCFunction_GET_FUNCTION(func);
+ self = PyCFunction_GET_SELF(func);
+ if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
+ return NULL;
+ result = cfunc(self, arg);
+ Py_LeaveRecursiveCall();
+ if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
+ PyErr_SetString(
+ PyExc_SystemError,
+ "NULL result without error in PyObject_Call");
+ }
+ return result;
+}
+#endif
+
+/* PyObjectCallNoArg */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
+#if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(func)) {
+ return __Pyx_PyFunction_FastCall(func, NULL, 0);
+ }
+#endif
+#ifdef __Pyx_CyFunction_USED
+ if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func)))
+#else
+ if (likely(PyCFunction_Check(func)))
+#endif
+ {
+ if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
+ return __Pyx_PyObject_CallMethO(func, NULL);
+ }
+ }
+ return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
+}
+#endif
+
+/* PyCFunctionFastCall */
+#if CYTHON_FAST_PYCCALL
+static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
+ PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
+ PyCFunction meth = PyCFunction_GET_FUNCTION(func);
+ PyObject *self = PyCFunction_GET_SELF(func);
+ int flags = PyCFunction_GET_FLAGS(func);
+ assert(PyCFunction_Check(func));
+ assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
+ assert(nargs >= 0);
+ assert(nargs == 0 || args != NULL);
+ /* _PyCFunction_FastCallDict() must not be called with an exception set,
+ because it may clear it (directly or indirectly) and so the
+ caller loses its exception */
+ assert(!PyErr_Occurred());
+ if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
+ return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
+ } else {
+ return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
+ }
+}
+#endif
+
+/* PyObjectCallOneArg */
+#if CYTHON_COMPILING_IN_CPYTHON
+static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+ PyObject *result;
+ PyObject *args = PyTuple_New(1);
+ if (unlikely(!args)) return NULL;
+ Py_INCREF(arg);
+ PyTuple_SET_ITEM(args, 0, arg);
+ result = __Pyx_PyObject_Call(func, args, NULL);
+ Py_DECREF(args);
+ return result;
+}
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+#if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(func)) {
+ return __Pyx_PyFunction_FastCall(func, &arg, 1);
+ }
+#endif
+ if (likely(PyCFunction_Check(func))) {
+ if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
+ return __Pyx_PyObject_CallMethO(func, arg);
+#if CYTHON_FAST_PYCCALL
+ } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
+ return __Pyx_PyCFunction_FastCall(func, &arg, 1);
+#endif
+ }
+ }
+ return __Pyx__PyObject_CallOneArg(func, arg);
+}
+#else
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+ PyObject *result;
+ PyObject *args = PyTuple_Pack(1, arg);
+ if (unlikely(!args)) return NULL;
+ result = __Pyx_PyObject_Call(func, args, NULL);
+ Py_DECREF(args);
+ return result;
+}
+#endif
+
+/* PyIntCompare */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_EqObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED long inplace) {
+ if (op1 == op2) {
+ Py_RETURN_TRUE;
+ }
+ #if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_CheckExact(op1))) {
+ const long b = intval;
+ long a = PyInt_AS_LONG(op1);
+ if (a == b) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+ }
+ #endif
+ #if CYTHON_USE_PYLONG_INTERNALS
+ if (likely(PyLong_CheckExact(op1))) {
+ int unequal;
+ unsigned long uintval;
+ Py_ssize_t size = Py_SIZE(op1);
+ const digit* digits = ((PyLongObject*)op1)->ob_digit;
+ if (intval == 0) {
+ if (size == 0) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+ } else if (intval < 0) {
+ if (size >= 0)
+ Py_RETURN_FALSE;
+ intval = -intval;
+ size = -size;
+ } else {
+ if (size <= 0)
+ Py_RETURN_FALSE;
+ }
+ uintval = (unsigned long) intval;
+#if PyLong_SHIFT * 4 < SIZEOF_LONG*8
+ if (uintval >> (PyLong_SHIFT * 4)) {
+ unequal = (size != 5) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
+ | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[4] != ((uintval >> (4 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));
+ } else
+#endif
+#if PyLong_SHIFT * 3 < SIZEOF_LONG*8
+ if (uintval >> (PyLong_SHIFT * 3)) {
+ unequal = (size != 4) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
+ | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));
+ } else
+#endif
+#if PyLong_SHIFT * 2 < SIZEOF_LONG*8
+ if (uintval >> (PyLong_SHIFT * 2)) {
+ unequal = (size != 3) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
+ | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));
+ } else
+#endif
+#if PyLong_SHIFT * 1 < SIZEOF_LONG*8
+ if (uintval >> (PyLong_SHIFT * 1)) {
+ unequal = (size != 2) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
+ | (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));
+ } else
+#endif
+ unequal = (size != 1) || (((unsigned long) digits[0]) != (uintval & (unsigned long) PyLong_MASK));
+ if (unequal == 0) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+ }
+ #endif
+ if (PyFloat_CheckExact(op1)) {
+ const long b = intval;
+ double a = PyFloat_AS_DOUBLE(op1);
+ if ((double)a == (double)b) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+ }
+ return (
+ PyObject_RichCompare(op1, op2, Py_EQ));
+}
+
+/* PyObjectCall2Args */
+static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
+ PyObject *args, *result = NULL;
+ #if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(function)) {
+ PyObject *args[2] = {arg1, arg2};
+ return __Pyx_PyFunction_FastCall(function, args, 2);
+ }
+ #endif
+ #if CYTHON_FAST_PYCCALL
+ if (__Pyx_PyFastCFunction_Check(function)) {
+ PyObject *args[2] = {arg1, arg2};
+ return __Pyx_PyCFunction_FastCall(function, args, 2);
+ }
+ #endif
+ args = PyTuple_New(2);
+ if (unlikely(!args)) goto done;
+ Py_INCREF(arg1);
+ PyTuple_SET_ITEM(args, 0, arg1);
+ Py_INCREF(arg2);
+ PyTuple_SET_ITEM(args, 1, arg2);
+ Py_INCREF(function);
+ result = __Pyx_PyObject_Call(function, args, NULL);
+ Py_DECREF(args);
+ Py_DECREF(function);
+done:
+ return result;
+}
+
+/* PyObjectGetMethod */
+static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) {
+ PyObject *attr;
+#if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP
+ PyTypeObject *tp = Py_TYPE(obj);
+ PyObject *descr;
+ descrgetfunc f = NULL;
+ PyObject **dictptr, *dict;
+ int meth_found = 0;
+ assert (*method == NULL);
+ if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) {
+ attr = __Pyx_PyObject_GetAttrStr(obj, name);
+ goto try_unpack;
+ }
+ if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) {
+ return 0;
+ }
+ descr = _PyType_Lookup(tp, name);
+ if (likely(descr != NULL)) {
+ Py_INCREF(descr);
+#if PY_MAJOR_VERSION >= 3
+ #ifdef __Pyx_CyFunction_USED
+ if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr)))
+ #else
+ if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type)))
+ #endif
+#else
+ #ifdef __Pyx_CyFunction_USED
+ if (likely(PyFunction_Check(descr) || __Pyx_CyFunction_Check(descr)))
+ #else
+ if (likely(PyFunction_Check(descr)))
+ #endif
+#endif
+ {
+ meth_found = 1;
+ } else {
+ f = Py_TYPE(descr)->tp_descr_get;
+ if (f != NULL && PyDescr_IsData(descr)) {
+ attr = f(descr, obj, (PyObject *)Py_TYPE(obj));
+ Py_DECREF(descr);
+ goto try_unpack;
+ }
+ }
+ }
+ dictptr = _PyObject_GetDictPtr(obj);
+ if (dictptr != NULL && (dict = *dictptr) != NULL) {
+ Py_INCREF(dict);
+ attr = __Pyx_PyDict_GetItemStr(dict, name);
+ if (attr != NULL) {
+ Py_INCREF(attr);
+ Py_DECREF(dict);
+ Py_XDECREF(descr);
+ goto try_unpack;
+ }
+ Py_DECREF(dict);
+ }
+ if (meth_found) {
+ *method = descr;
+ return 1;
+ }
+ if (f != NULL) {
+ attr = f(descr, obj, (PyObject *)Py_TYPE(obj));
+ Py_DECREF(descr);
+ goto try_unpack;
+ }
+ if (descr != NULL) {
+ *method = descr;
+ return 0;
+ }
+ PyErr_Format(PyExc_AttributeError,
+#if PY_MAJOR_VERSION >= 3
+ "'%.50s' object has no attribute '%U'",
+ tp->tp_name, name);
+#else
+ "'%.50s' object has no attribute '%.400s'",
+ tp->tp_name, PyString_AS_STRING(name));
+#endif
+ return 0;
+#else
+ attr = __Pyx_PyObject_GetAttrStr(obj, name);
+ goto try_unpack;
+#endif
+try_unpack:
+#if CYTHON_UNPACK_METHODS
+ if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) {
+ PyObject *function = PyMethod_GET_FUNCTION(attr);
+ Py_INCREF(function);
+ Py_DECREF(attr);
+ *method = function;
+ return 1;
+ }
+#endif
+ *method = attr;
+ return 0;
+}
+
+/* PyObjectCallMethod1 */
+static PyObject* __Pyx__PyObject_CallMethod1(PyObject* method, PyObject* arg) {
+ PyObject *result = __Pyx_PyObject_CallOneArg(method, arg);
+ Py_DECREF(method);
+ return result;
+}
+static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg) {
+ PyObject *method = NULL, *result;
+ int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method);
+ if (likely(is_method)) {
+ result = __Pyx_PyObject_Call2Args(method, obj, arg);
+ Py_DECREF(method);
+ return result;
+ }
+ if (unlikely(!method)) return NULL;
+ return __Pyx__PyObject_CallMethod1(method, arg);
+}
+
+/* pop_index */
+static PyObject* __Pyx__PyObject_PopNewIndex(PyObject* L, PyObject* py_ix) {
+ PyObject *r;
+ if (unlikely(!py_ix)) return NULL;
+ r = __Pyx__PyObject_PopIndex(L, py_ix);
+ Py_DECREF(py_ix);
+ return r;
+}
+static PyObject* __Pyx__PyObject_PopIndex(PyObject* L, PyObject* py_ix) {
+ return __Pyx_PyObject_CallMethod1(L, __pyx_n_s_pop, py_ix);
+}
+#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
+static PyObject* __Pyx__PyList_PopIndex(PyObject* L, PyObject* py_ix, Py_ssize_t ix) {
+ Py_ssize_t size = PyList_GET_SIZE(L);
+ if (likely(size > (((PyListObject*)L)->allocated >> 1))) {
+ Py_ssize_t cix = ix;
+ if (cix < 0) {
+ cix += size;
+ }
+ if (likely(__Pyx_is_valid_index(cix, size))) {
+ PyObject* v = PyList_GET_ITEM(L, cix);
+ __Pyx_SET_SIZE(L, Py_SIZE(L) - 1);
+ size -= 1;
+ memmove(&PyList_GET_ITEM(L, cix), &PyList_GET_ITEM(L, cix+1), (size_t)(size-cix)*sizeof(PyObject*));
+ return v;
+ }
+ }
+ if (py_ix == Py_None) {
+ return __Pyx__PyObject_PopNewIndex(L, PyInt_FromSsize_t(ix));
+ } else {
+ return __Pyx__PyObject_PopIndex(L, py_ix);
+ }
+}
+#endif
+
+/* PyErrExceptionMatches */
+#if CYTHON_FAST_THREAD_STATE
+static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
+ Py_ssize_t i, n;
+ n = PyTuple_GET_SIZE(tuple);
+#if PY_MAJOR_VERSION >= 3
+ for (i=0; i<n; i++) {
+ if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
+ }
+#endif
+ for (i=0; i<n; i++) {
+ if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
+ }
+ return 0;
+}
+static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
+ PyObject *exc_type = tstate->curexc_type;
+ if (exc_type == err) return 1;
+ if (unlikely(!exc_type)) return 0;
+ if (unlikely(PyTuple_Check(err)))
+ return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
+ return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
+}
+#endif
+
+/* GetAttr */
+static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
+#if CYTHON_USE_TYPE_SLOTS
+#if PY_MAJOR_VERSION >= 3
+ if (likely(PyUnicode_Check(n)))
+#else
+ if (likely(PyString_Check(n)))
+#endif
+ return __Pyx_PyObject_GetAttrStr(o, n);
+#endif
+ return PyObject_GetAttr(o, n);
+}
+
+/* GetAttr3 */
+static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
+ return NULL;
+ __Pyx_PyErr_Clear();
+ Py_INCREF(d);
+ return d;
+}
+static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
+ PyObject *r = __Pyx_GetAttr(o, n);
+ return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
+}
+
+/* PyDictVersioning */
+#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
+ PyObject *dict = Py_TYPE(obj)->tp_dict;
+ return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
+}
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
+ PyObject **dictptr = NULL;
+ Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
+ if (offset) {
+#if CYTHON_COMPILING_IN_CPYTHON
+ dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
+#else
+ dictptr = _PyObject_GetDictPtr(obj);
+#endif
+ }
+ return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
+}
+static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
+ PyObject *dict = Py_TYPE(obj)->tp_dict;
+ if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
+ return 0;
+ return obj_dict_version == __Pyx_get_object_dict_version(obj);
+}
+#endif
+
+/* GetModuleGlobalName */
+#if CYTHON_USE_DICT_VERSIONS
+static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
+#else
+static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
+#endif
+{
+ PyObject *result;
+#if !CYTHON_AVOID_BORROWED_REFS
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
+ result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
+ __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
+ if (likely(result)) {
+ return __Pyx_NewRef(result);
+ } else if (unlikely(PyErr_Occurred())) {
+ return NULL;
+ }
+#else
+ result = PyDict_GetItem(__pyx_d, name);
+ __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
+ if (likely(result)) {
+ return __Pyx_NewRef(result);
+ }
+#endif
+#else
+ result = PyObject_GetItem(__pyx_d, name);
+ __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
+ if (likely(result)) {
+ return __Pyx_NewRef(result);
+ }
+ PyErr_Clear();
+#endif
+ return __Pyx_GetBuiltinName(name);
+}
+
+/* Import */
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
+ PyObject *empty_list = 0;
+ PyObject *module = 0;
+ PyObject *global_dict = 0;
+ PyObject *empty_dict = 0;
+ PyObject *list;
+ #if PY_MAJOR_VERSION < 3
+ PyObject *py_import;
+ py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
+ if (!py_import)
+ goto bad;
+ #endif
+ if (from_list)
+ list = from_list;
+ else {
+ empty_list = PyList_New(0);
+ if (!empty_list)
+ goto bad;
+ list = empty_list;
+ }
+ global_dict = PyModule_GetDict(__pyx_m);
+ if (!global_dict)
+ goto bad;
+ empty_dict = PyDict_New();
+ if (!empty_dict)
+ goto bad;
+ {
+ #if PY_MAJOR_VERSION >= 3
+ if (level == -1) {
+ if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) {
+ module = PyImport_ImportModuleLevelObject(
+ name, global_dict, empty_dict, list, 1);
+ if (!module) {
+ if (!PyErr_ExceptionMatches(PyExc_ImportError))
+ goto bad;
+ PyErr_Clear();
+ }
+ }
+ level = 0;
+ }
+ #endif
+ if (!module) {
+ #if PY_MAJOR_VERSION < 3
+ PyObject *py_level = PyInt_FromLong(level);
+ if (!py_level)
+ goto bad;
+ module = PyObject_CallFunctionObjArgs(py_import,
+ name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
+ Py_DECREF(py_level);
+ #else
+ module = PyImport_ImportModuleLevelObject(
+ name, global_dict, empty_dict, list, level);
+ #endif
+ }
+ }
+bad:
+ #if PY_MAJOR_VERSION < 3
+ Py_XDECREF(py_import);
+ #endif
+ Py_XDECREF(empty_list);
+ Py_XDECREF(empty_dict);
+ return module;
+}
+
+/* ImportFrom */
+static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
+ PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
+ if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
+ PyErr_Format(PyExc_ImportError,
+ #if PY_MAJOR_VERSION < 3
+ "cannot import name %.230s", PyString_AS_STRING(name));
+ #else
+ "cannot import name %S", name);
+ #endif
+ }
+ return value;
+}
+
+/* HasAttr */
+static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
+ PyObject *r;
+ if (unlikely(!__Pyx_PyBaseString_Check(n))) {
+ PyErr_SetString(PyExc_TypeError,
+ "hasattr(): attribute name must be string");
+ return -1;
+ }
+ r = __Pyx_GetAttr(o, n);
+ if (unlikely(!r)) {
+ PyErr_Clear();
+ return 0;
+ } else {
+ Py_DECREF(r);
+ return 1;
+ }
+}
+
+/* PyObject_GenericGetAttrNoDict */
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
+ PyErr_Format(PyExc_AttributeError,
+#if PY_MAJOR_VERSION >= 3
+ "'%.50s' object has no attribute '%U'",
+ tp->tp_name, attr_name);
+#else
+ "'%.50s' object has no attribute '%.400s'",
+ tp->tp_name, PyString_AS_STRING(attr_name));
+#endif
+ return NULL;
+}
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
+ PyObject *descr;
+ PyTypeObject *tp = Py_TYPE(obj);
+ if (unlikely(!PyString_Check(attr_name))) {
+ return PyObject_GenericGetAttr(obj, attr_name);
+ }
+ assert(!tp->tp_dictoffset);
+ descr = _PyType_Lookup(tp, attr_name);
+ if (unlikely(!descr)) {
+ return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
+ }
+ Py_INCREF(descr);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
+ #endif
+ {
+ descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
+ if (unlikely(f)) {
+ PyObject *res = f(descr, obj, (PyObject *)tp);
+ Py_DECREF(descr);
+ return res;
+ }
+ }
+ return descr;
+}
+#endif
+
+/* PyObject_GenericGetAttr */
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
+ if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
+ return PyObject_GenericGetAttr(obj, attr_name);
+ }
+ return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
+}
+#endif
+
+/* SetVTable */
+static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
+#if PY_VERSION_HEX >= 0x02070000
+ PyObject *ob = PyCapsule_New(vtable, 0, 0);
+#else
+ PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
+#endif
+ if (!ob)
+ goto bad;
+ if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
+ goto bad;
+ Py_DECREF(ob);
+ return 0;
+bad:
+ Py_XDECREF(ob);
+ return -1;
+}
+
+/* PyObjectGetAttrStrNoError */
+static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
+ __Pyx_PyErr_Clear();
+}
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) {
+ PyObject *result;
+#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1
+ PyTypeObject* tp = Py_TYPE(obj);
+ if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) {
+ return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1);
+ }
+#endif
+ result = __Pyx_PyObject_GetAttrStr(obj, attr_name);
+ if (unlikely(!result)) {
+ __Pyx_PyObject_GetAttrStr_ClearAttributeError();
+ }
+ return result;
+}
+
+/* SetupReduce */
+static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
+ int ret;
+ PyObject *name_attr;
+ name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name);
+ if (likely(name_attr)) {
+ ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
+ } else {
+ ret = -1;
+ }
+ if (unlikely(ret < 0)) {
+ PyErr_Clear();
+ ret = 0;
+ }
+ Py_XDECREF(name_attr);
+ return ret;
+}
+static int __Pyx_setup_reduce(PyObject* type_obj) {
+ int ret = 0;
+ PyObject *object_reduce = NULL;
+ PyObject *object_reduce_ex = NULL;
+ PyObject *reduce = NULL;
+ PyObject *reduce_ex = NULL;
+ PyObject *reduce_cython = NULL;
+ PyObject *setstate = NULL;
+ PyObject *setstate_cython = NULL;
+#if CYTHON_USE_PYTYPE_LOOKUP
+ if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
+#else
+ if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
+#endif
+#if CYTHON_USE_PYTYPE_LOOKUP
+ object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
+#else
+ object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
+#endif
+ reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD;
+ if (reduce_ex == object_reduce_ex) {
+#if CYTHON_USE_PYTYPE_LOOKUP
+ object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
+#else
+ object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
+#endif
+ reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD;
+ if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) {
+ reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython);
+ if (likely(reduce_cython)) {
+ ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
+ ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
+ } else if (reduce == object_reduce || PyErr_Occurred()) {
+ goto __PYX_BAD;
+ }
+ setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate);
+ if (!setstate) PyErr_Clear();
+ if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) {
+ setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython);
+ if (likely(setstate_cython)) {
+ ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
+ ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
+ } else if (!setstate || PyErr_Occurred()) {
+ goto __PYX_BAD;
+ }
+ }
+ PyType_Modified((PyTypeObject*)type_obj);
+ }
+ }
+ goto __PYX_GOOD;
+__PYX_BAD:
+ if (!PyErr_Occurred())
+ PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
+ ret = -1;
+__PYX_GOOD:
+#if !CYTHON_USE_PYTYPE_LOOKUP
+ Py_XDECREF(object_reduce);
+ Py_XDECREF(object_reduce_ex);
+#endif
+ Py_XDECREF(reduce);
+ Py_XDECREF(reduce_ex);
+ Py_XDECREF(reduce_cython);
+ Py_XDECREF(setstate);
+ Py_XDECREF(setstate_cython);
+ return ret;
+}
+
+/* CLineInTraceback */
+#ifndef CYTHON_CLINE_IN_TRACEBACK
+static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
+ PyObject *use_cline;
+ PyObject *ptype, *pvalue, *ptraceback;
+#if CYTHON_COMPILING_IN_CPYTHON
+ PyObject **cython_runtime_dict;
+#endif
+ if (unlikely(!__pyx_cython_runtime)) {
+ return c_line;
+ }
+ __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
+#if CYTHON_COMPILING_IN_CPYTHON
+ cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
+ if (likely(cython_runtime_dict)) {
+ __PYX_PY_DICT_LOOKUP_IF_MODIFIED(
+ use_cline, *cython_runtime_dict,
+ __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
+ } else
+#endif
+ {
+ PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
+ if (use_cline_obj) {
+ use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
+ Py_DECREF(use_cline_obj);
+ } else {
+ PyErr_Clear();
+ use_cline = NULL;
+ }
+ }
+ if (!use_cline) {
+ c_line = 0;
+ PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
+ }
+ else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
+ c_line = 0;
+ }
+ __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
+ return c_line;
+}
+#endif
+
+/* CodeObjectCache */
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
+ int start = 0, mid = 0, end = count - 1;
+ if (end >= 0 && code_line > entries[end].code_line) {
+ return count;
+ }
+ while (start < end) {
+ mid = start + (end - start) / 2;
+ if (code_line < entries[mid].code_line) {
+ end = mid;
+ } else if (code_line > entries[mid].code_line) {
+ start = mid + 1;
+ } else {
+ return mid;
+ }
+ }
+ if (code_line <= entries[mid].code_line) {
+ return mid;
+ } else {
+ return mid + 1;
+ }
+}
+static PyCodeObject *__pyx_find_code_object(int code_line) {
+ PyCodeObject* code_object;
+ int pos;
+ if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
+ return NULL;
+ }
+ pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
+ if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
+ return NULL;
+ }
+ code_object = __pyx_code_cache.entries[pos].code_object;
+ Py_INCREF(code_object);
+ return code_object;
+}
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
+ int pos, i;
+ __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
+ if (unlikely(!code_line)) {
+ return;
+ }
+ if (unlikely(!entries)) {
+ entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
+ if (likely(entries)) {
+ __pyx_code_cache.entries = entries;
+ __pyx_code_cache.max_count = 64;
+ __pyx_code_cache.count = 1;
+ entries[0].code_line = code_line;
+ entries[0].code_object = code_object;
+ Py_INCREF(code_object);
+ }
+ return;
+ }
+ pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
+ if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
+ PyCodeObject* tmp = entries[pos].code_object;
+ entries[pos].code_object = code_object;
+ Py_DECREF(tmp);
+ return;
+ }
+ if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
+ int new_max = __pyx_code_cache.max_count + 64;
+ entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
+ __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
+ if (unlikely(!entries)) {
+ return;
+ }
+ __pyx_code_cache.entries = entries;
+ __pyx_code_cache.max_count = new_max;
+ }
+ for (i=__pyx_code_cache.count; i>pos; i--) {
+ entries[i] = entries[i-1];
+ }
+ entries[pos].code_line = code_line;
+ entries[pos].code_object = code_object;
+ __pyx_code_cache.count++;
+ Py_INCREF(code_object);
+}
+
+/* AddTraceback */
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
+ const char *funcname, int c_line,
+ int py_line, const char *filename) {
+ PyCodeObject *py_code = 0;
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ #if PY_MAJOR_VERSION < 3
+ py_srcfile = PyString_FromString(filename);
+ #else
+ py_srcfile = PyUnicode_FromString(filename);
+ #endif
+ if (!py_srcfile) goto bad;
+ if (c_line) {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
+ #else
+ py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
+ #endif
+ }
+ else {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromString(funcname);
+ #else
+ py_funcname = PyUnicode_FromString(funcname);
+ #endif
+ }
+ if (!py_funcname) goto bad;
+ py_code = __Pyx_PyCode_New(
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ __pyx_empty_bytes, /*PyObject *code,*/
+ __pyx_empty_tuple, /*PyObject *consts,*/
+ __pyx_empty_tuple, /*PyObject *names,*/
+ __pyx_empty_tuple, /*PyObject *varnames,*/
+ __pyx_empty_tuple, /*PyObject *freevars,*/
+ __pyx_empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ py_line,
+ __pyx_empty_bytes /*PyObject *lnotab*/
+ );
+ Py_DECREF(py_srcfile);
+ Py_DECREF(py_funcname);
+ return py_code;
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ return NULL;
+}
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+ int py_line, const char *filename) {
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+ PyThreadState *tstate = __Pyx_PyThreadState_Current;
+ if (c_line) {
+ c_line = __Pyx_CLineForTraceback(tstate, c_line);
+ }
+ py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
+ if (!py_code) {
+ py_code = __Pyx_CreateCodeObjectForTraceback(
+ funcname, c_line, py_line, filename);
+ if (!py_code) goto bad;
+ __pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
+ }
+ py_frame = PyFrame_New(
+ tstate, /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ __pyx_d, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ __Pyx_PyFrame_SetLineNumber(py_frame, py_line);
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
+
+/* CIntToPy */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
+ const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (is_unsigned) {
+ if (sizeof(int) < sizeof(long)) {
+ return PyInt_FromLong((long) value);
+ } else if (sizeof(int) <= sizeof(unsigned long)) {
+ return PyLong_FromUnsignedLong((unsigned long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
+ return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
+#endif
+ }
+ } else {
+ if (sizeof(int) <= sizeof(long)) {
+ return PyInt_FromLong((long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
+ return PyLong_FromLongLong((PY_LONG_LONG) value);
+#endif
+ }
+ }
+ {
+ int one = 1; int little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&value;
+ return _PyLong_FromByteArray(bytes, sizeof(int),
+ little, !is_unsigned);
+ }
+}
+
+/* CIntFromPyVerify */
+#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
+ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
+#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
+ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
+#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
+ {\
+ func_type value = func_value;\
+ if (sizeof(target_type) < sizeof(func_type)) {\
+ if (unlikely(value != (func_type) (target_type) value)) {\
+ func_type zero = 0;\
+ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
+ return (target_type) -1;\
+ if (is_unsigned && unlikely(value < zero))\
+ goto raise_neg_overflow;\
+ else\
+ goto raise_overflow;\
+ }\
+ }\
+ return (target_type) value;\
+ }
+
+/* CIntToPy */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
+ const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (is_unsigned) {
+ if (sizeof(long) < sizeof(long)) {
+ return PyInt_FromLong((long) value);
+ } else if (sizeof(long) <= sizeof(unsigned long)) {
+ return PyLong_FromUnsignedLong((unsigned long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
+ return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
+#endif
+ }
+ } else {
+ if (sizeof(long) <= sizeof(long)) {
+ return PyInt_FromLong((long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
+ return PyLong_FromLongLong((PY_LONG_LONG) value);
+#endif
+ }
+ }
+ {
+ int one = 1; int little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&value;
+ return _PyLong_FromByteArray(bytes, sizeof(long),
+ little, !is_unsigned);
+ }
+}
+
+/* CIntFromPy */
+static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
+ const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x))) {
+ if (sizeof(long) < sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
+ } else {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ goto raise_neg_overflow;
+ }
+ return (long) val;
+ }
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (long) 0;
+ case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
+ case 2:
+ if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
+ return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
+ return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
+ return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+ }
+ }
+ break;
+ }
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (unlikely(Py_SIZE(x) < 0)) {
+ goto raise_neg_overflow;
+ }
+#else
+ {
+ int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+ if (unlikely(result < 0))
+ return (long) -1;
+ if (unlikely(result == 1))
+ goto raise_neg_overflow;
+ }
+#endif
+ if (sizeof(long) <= sizeof(unsigned long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+#endif
+ }
+ } else {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (long) 0;
+ case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
+ case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
+ case -2:
+ if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+ return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case 2:
+ if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+ return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case -3:
+ if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+ return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+ return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case -4:
+ if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
+ return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
+ return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ }
+#endif
+ if (sizeof(long) <= sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
+#endif
+ }
+ }
+ {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+ PyErr_SetString(PyExc_RuntimeError,
+ "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+ long val;
+ PyObject *v = __Pyx_PyNumber_IntOrLong(x);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(v) && !PyLong_Check(v)) {
+ PyObject *tmp = v;
+ v = PyNumber_Long(tmp);
+ Py_DECREF(tmp);
+ }
+ #endif
+ if (likely(v)) {
+ int one = 1; int is_little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&val;
+ int ret = _PyLong_AsByteArray((PyLongObject *)v,
+ bytes, sizeof(val),
+ is_little, !is_unsigned);
+ Py_DECREF(v);
+ if (likely(!ret))
+ return val;
+ }
+#endif
+ return (long) -1;
+ }
+ } else {
+ long val;
+ PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
+ if (!tmp) return (long) -1;
+ val = __Pyx_PyInt_As_long(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+raise_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to long");
+ return (long) -1;
+raise_neg_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to long");
+ return (long) -1;
+}
+
+/* CIntFromPy */
+static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
+ const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x))) {
+ if (sizeof(int) < sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
+ } else {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ goto raise_neg_overflow;
+ }
+ return (int) val;
+ }
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (int) 0;
+ case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
+ case 2:
+ if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
+ return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
+ return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
+ return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+ }
+ }
+ break;
+ }
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (unlikely(Py_SIZE(x) < 0)) {
+ goto raise_neg_overflow;
+ }
+#else
+ {
+ int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+ if (unlikely(result < 0))
+ return (int) -1;
+ if (unlikely(result == 1))
+ goto raise_neg_overflow;
+ }
+#endif
+ if (sizeof(int) <= sizeof(unsigned long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+#endif
+ }
+ } else {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (int) 0;
+ case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
+ case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
+ case -2:
+ if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+ return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case 2:
+ if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+ return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case -3:
+ if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+ return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+ return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case -4:
+ if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
+ return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
+ return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ }
+#endif
+ if (sizeof(int) <= sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
+#endif
+ }
+ }
+ {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+ PyErr_SetString(PyExc_RuntimeError,
+ "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+ int val;
+ PyObject *v = __Pyx_PyNumber_IntOrLong(x);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(v) && !PyLong_Check(v)) {
+ PyObject *tmp = v;
+ v = PyNumber_Long(tmp);
+ Py_DECREF(tmp);
+ }
+ #endif
+ if (likely(v)) {
+ int one = 1; int is_little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&val;
+ int ret = _PyLong_AsByteArray((PyLongObject *)v,
+ bytes, sizeof(val),
+ is_little, !is_unsigned);
+ Py_DECREF(v);
+ if (likely(!ret))
+ return val;
+ }
+#endif
+ return (int) -1;
+ }
+ } else {
+ int val;
+ PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
+ if (!tmp) return (int) -1;
+ val = __Pyx_PyInt_As_int(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+raise_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to int");
+ return (int) -1;
+raise_neg_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to int");
+ return (int) -1;
+}
+
+/* FastTypeChecks */
+#if CYTHON_COMPILING_IN_CPYTHON
+static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
+ while (a) {
+ a = a->tp_base;
+ if (a == b)
+ return 1;
+ }
+ return b == &PyBaseObject_Type;
+}
+static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
+ PyObject *mro;
+ if (a == b) return 1;
+ mro = a->tp_mro;
+ if (likely(mro)) {
+ Py_ssize_t i, n;
+ n = PyTuple_GET_SIZE(mro);
+ for (i = 0; i < n; i++) {
+ if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
+ return 1;
+ }
+ return 0;
+ }
+ return __Pyx_InBases(a, b);
+}
+#if PY_MAJOR_VERSION == 2
+static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
+ PyObject *exception, *value, *tb;
+ int res;
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ErrFetch(&exception, &value, &tb);
+ res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
+ if (unlikely(res == -1)) {
+ PyErr_WriteUnraisable(err);
+ res = 0;
+ }
+ if (!res) {
+ res = PyObject_IsSubclass(err, exc_type2);
+ if (unlikely(res == -1)) {
+ PyErr_WriteUnraisable(err);
+ res = 0;
+ }
+ }
+ __Pyx_ErrRestore(exception, value, tb);
+ return res;
+}
+#else
+static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
+ int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
+ if (!res) {
+ res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
+ }
+ return res;
+}
+#endif
+static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
+ Py_ssize_t i, n;
+ assert(PyExceptionClass_Check(exc_type));
+ n = PyTuple_GET_SIZE(tuple);
+#if PY_MAJOR_VERSION >= 3
+ for (i=0; i<n; i++) {
+ if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
+ }
+#endif
+ for (i=0; i<n; i++) {
+ PyObject *t = PyTuple_GET_ITEM(tuple, i);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(exc_type == t)) return 1;
+ #endif
+ if (likely(PyExceptionClass_Check(t))) {
+ if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
+ } else {
+ }
+ }
+ return 0;
+}
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
+ if (likely(err == exc_type)) return 1;
+ if (likely(PyExceptionClass_Check(err))) {
+ if (likely(PyExceptionClass_Check(exc_type))) {
+ return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
+ } else if (likely(PyTuple_Check(exc_type))) {
+ return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
+ } else {
+ }
+ }
+ return PyErr_GivenExceptionMatches(err, exc_type);
+}
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
+ assert(PyExceptionClass_Check(exc_type1));
+ assert(PyExceptionClass_Check(exc_type2));
+ if (likely(err == exc_type1 || err == exc_type2)) return 1;
+ if (likely(PyExceptionClass_Check(err))) {
+ return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
+ }
+ return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
+}
+#endif
+
+/* CheckBinaryVersion */
+static int __Pyx_check_binary_version(void) {
+ char ctversion[4], rtversion[4];
+ PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
+ PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
+ if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
+ char message[200];
+ PyOS_snprintf(message, sizeof(message),
+ "compiletime version %s of module '%.100s' "
+ "does not match runtime version %s",
+ ctversion, __Pyx_MODULE_NAME, rtversion);
+ return PyErr_WarnEx(NULL, message, 1);
+ }
+ return 0;
+}
+
+/* InitStrings */
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ #if PY_MAJOR_VERSION < 3
+ if (t->is_unicode) {
+ *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
+ } else if (t->intern) {
+ *t->p = PyString_InternFromString(t->s);
+ } else {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ }
+ #else
+ if (t->is_unicode | t->is_str) {
+ if (t->intern) {
+ *t->p = PyUnicode_InternFromString(t->s);
+ } else if (t->encoding) {
+ *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
+ } else {
+ *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
+ }
+ } else {
+ *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
+ }
+ #endif
+ if (!*t->p)
+ return -1;
+ if (PyObject_Hash(*t->p) == -1)
+ return -1;
+ ++t;
+ }
+ return 0;
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
+ return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
+}
+static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
+ Py_ssize_t ignore;
+ return __Pyx_PyObject_AsStringAndSize(o, &ignore);
+}
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+#if !CYTHON_PEP393_ENABLED
+static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+ char* defenc_c;
+ PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
+ if (!defenc) return NULL;
+ defenc_c = PyBytes_AS_STRING(defenc);
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ {
+ char* end = defenc_c + PyBytes_GET_SIZE(defenc);
+ char* c;
+ for (c = defenc_c; c < end; c++) {
+ if ((unsigned char) (*c) >= 128) {
+ PyUnicode_AsASCIIString(o);
+ return NULL;
+ }
+ }
+ }
+#endif
+ *length = PyBytes_GET_SIZE(defenc);
+ return defenc_c;
+}
+#else
+static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+ if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ if (likely(PyUnicode_IS_ASCII(o))) {
+ *length = PyUnicode_GET_LENGTH(o);
+ return PyUnicode_AsUTF8(o);
+ } else {
+ PyUnicode_AsASCIIString(o);
+ return NULL;
+ }
+#else
+ return PyUnicode_AsUTF8AndSize(o, length);
+#endif
+}
+#endif
+#endif
+static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+ if (
+#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ __Pyx_sys_getdefaultencoding_not_ascii &&
+#endif
+ PyUnicode_Check(o)) {
+ return __Pyx_PyUnicode_AsStringAndSize(o, length);
+ } else
+#endif
+#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
+ if (PyByteArray_Check(o)) {
+ *length = PyByteArray_GET_SIZE(o);
+ return PyByteArray_AS_STRING(o);
+ } else
+#endif
+ {
+ char* result;
+ int r = PyBytes_AsStringAndSize(o, &result, length);
+ if (unlikely(r < 0)) {
+ return NULL;
+ } else {
+ return result;
+ }
+ }
+}
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
+ int is_true = x == Py_True;
+ if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
+ else return PyObject_IsTrue(x);
+}
+static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
+ int retval;
+ if (unlikely(!x)) return -1;
+ retval = __Pyx_PyObject_IsTrue(x);
+ Py_DECREF(x);
+ return retval;
+}
+static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
+#if PY_MAJOR_VERSION >= 3
+ if (PyLong_Check(result)) {
+ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
+ "__int__ returned non-int (type %.200s). "
+ "The ability to return an instance of a strict subclass of int "
+ "is deprecated, and may be removed in a future version of Python.",
+ Py_TYPE(result)->tp_name)) {
+ Py_DECREF(result);
+ return NULL;
+ }
+ return result;
+ }
+#endif
+ PyErr_Format(PyExc_TypeError,
+ "__%.4s__ returned non-%.4s (type %.200s)",
+ type_name, type_name, Py_TYPE(result)->tp_name);
+ Py_DECREF(result);
+ return NULL;
+}
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
+#if CYTHON_USE_TYPE_SLOTS
+ PyNumberMethods *m;
+#endif
+ const char *name = NULL;
+ PyObject *res = NULL;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x) || PyLong_Check(x)))
+#else
+ if (likely(PyLong_Check(x)))
+#endif
+ return __Pyx_NewRef(x);
+#if CYTHON_USE_TYPE_SLOTS
+ m = Py_TYPE(x)->tp_as_number;
+ #if PY_MAJOR_VERSION < 3
+ if (m && m->nb_int) {
+ name = "int";
+ res = m->nb_int(x);
+ }
+ else if (m && m->nb_long) {
+ name = "long";
+ res = m->nb_long(x);
+ }
+ #else
+ if (likely(m && m->nb_int)) {
+ name = "int";
+ res = m->nb_int(x);
+ }
+ #endif
+#else
+ if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
+ res = PyNumber_Int(x);
+ }
+#endif
+ if (likely(res)) {
+#if PY_MAJOR_VERSION < 3
+ if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
+#else
+ if (unlikely(!PyLong_CheckExact(res))) {
+#endif
+ return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
+ }
+ }
+ else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_TypeError,
+ "an integer is required");
+ }
+ return res;
+}
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
+ Py_ssize_t ival;
+ PyObject *x;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_CheckExact(b))) {
+ if (sizeof(Py_ssize_t) >= sizeof(long))
+ return PyInt_AS_LONG(b);
+ else
+ return PyInt_AsSsize_t(b);
+ }
+#endif
+ if (likely(PyLong_CheckExact(b))) {
+ #if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)b)->ob_digit;
+ const Py_ssize_t size = Py_SIZE(b);
+ if (likely(__Pyx_sst_abs(size) <= 1)) {
+ ival = likely(size) ? digits[0] : 0;
+ if (size == -1) ival = -ival;
+ return ival;
+ } else {
+ switch (size) {
+ case 2:
+ if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
+ return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case -2:
+ if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
+ return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case 3:
+ if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
+ return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case -3:
+ if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
+ return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case 4:
+ if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
+ return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case -4:
+ if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
+ return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ }
+ }
+ #endif
+ return PyLong_AsSsize_t(b);
+ }
+ x = PyNumber_Index(b);
+ if (!x) return -1;
+ ival = PyInt_AsSsize_t(x);
+ Py_DECREF(x);
+ return ival;
+}
+static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
+ return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
+}
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
+ return PyInt_FromSize_t(ival);
+}
+
+
+#endif /* Py_PYTHON_H */
diff --git a/third_party/python/aiohttp/aiohttp/_frozenlist.pyx b/third_party/python/aiohttp/aiohttp/_frozenlist.pyx
new file mode 100644
index 0000000000..b1305772f4
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/_frozenlist.pyx
@@ -0,0 +1,108 @@
+from collections.abc import MutableSequence
+
+
+cdef class FrozenList:
+
+ cdef readonly bint frozen
+ cdef list _items
+
+ def __init__(self, items=None):
+ self.frozen = False
+ if items is not None:
+ items = list(items)
+ else:
+ items = []
+ self._items = items
+
+ cdef object _check_frozen(self):
+ if self.frozen:
+ raise RuntimeError("Cannot modify frozen list.")
+
+ cdef inline object _fast_len(self):
+ return len(self._items)
+
+ def freeze(self):
+ self.frozen = True
+
+ def __getitem__(self, index):
+ return self._items[index]
+
+ def __setitem__(self, index, value):
+ self._check_frozen()
+ self._items[index] = value
+
+ def __delitem__(self, index):
+ self._check_frozen()
+ del self._items[index]
+
+ def __len__(self):
+ return self._fast_len()
+
+ def __iter__(self):
+ return self._items.__iter__()
+
+ def __reversed__(self):
+ return self._items.__reversed__()
+
+ def __richcmp__(self, other, op):
+ if op == 0: # <
+ return list(self) < other
+ if op == 1: # <=
+ return list(self) <= other
+ if op == 2: # ==
+ return list(self) == other
+ if op == 3: # !=
+ return list(self) != other
+ if op == 4: # >
+ return list(self) > other
+ if op == 5: # =>
+ return list(self) >= other
+
+ def insert(self, pos, item):
+ self._check_frozen()
+ self._items.insert(pos, item)
+
+ def __contains__(self, item):
+ return item in self._items
+
+ def __iadd__(self, items):
+ self._check_frozen()
+ self._items += list(items)
+ return self
+
+ def index(self, item):
+ return self._items.index(item)
+
+ def remove(self, item):
+ self._check_frozen()
+ self._items.remove(item)
+
+ def clear(self):
+ self._check_frozen()
+ self._items.clear()
+
+ def extend(self, items):
+ self._check_frozen()
+ self._items += list(items)
+
+ def reverse(self):
+ self._check_frozen()
+ self._items.reverse()
+
+ def pop(self, index=-1):
+ self._check_frozen()
+ return self._items.pop(index)
+
+ def append(self, item):
+ self._check_frozen()
+ return self._items.append(item)
+
+ def count(self, item):
+ return self._items.count(item)
+
+ def __repr__(self):
+ return '<FrozenList(frozen={}, {!r})>'.format(self.frozen,
+ self._items)
+
+
+MutableSequence.register(FrozenList)
diff --git a/third_party/python/aiohttp/aiohttp/_headers.pxi b/third_party/python/aiohttp/aiohttp/_headers.pxi
new file mode 100644
index 0000000000..3744721d47
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/_headers.pxi
@@ -0,0 +1,83 @@
+# The file is autogenerated from aiohttp/hdrs.py
+# Run ./tools/gen.py to update it after the origin changing.
+
+from . import hdrs
+cdef tuple headers = (
+ hdrs.ACCEPT,
+ hdrs.ACCEPT_CHARSET,
+ hdrs.ACCEPT_ENCODING,
+ hdrs.ACCEPT_LANGUAGE,
+ hdrs.ACCEPT_RANGES,
+ hdrs.ACCESS_CONTROL_ALLOW_CREDENTIALS,
+ hdrs.ACCESS_CONTROL_ALLOW_HEADERS,
+ hdrs.ACCESS_CONTROL_ALLOW_METHODS,
+ hdrs.ACCESS_CONTROL_ALLOW_ORIGIN,
+ hdrs.ACCESS_CONTROL_EXPOSE_HEADERS,
+ hdrs.ACCESS_CONTROL_MAX_AGE,
+ hdrs.ACCESS_CONTROL_REQUEST_HEADERS,
+ hdrs.ACCESS_CONTROL_REQUEST_METHOD,
+ hdrs.AGE,
+ hdrs.ALLOW,
+ hdrs.AUTHORIZATION,
+ hdrs.CACHE_CONTROL,
+ hdrs.CONNECTION,
+ hdrs.CONTENT_DISPOSITION,
+ hdrs.CONTENT_ENCODING,
+ hdrs.CONTENT_LANGUAGE,
+ hdrs.CONTENT_LENGTH,
+ hdrs.CONTENT_LOCATION,
+ hdrs.CONTENT_MD5,
+ hdrs.CONTENT_RANGE,
+ hdrs.CONTENT_TRANSFER_ENCODING,
+ hdrs.CONTENT_TYPE,
+ hdrs.COOKIE,
+ hdrs.DATE,
+ hdrs.DESTINATION,
+ hdrs.DIGEST,
+ hdrs.ETAG,
+ hdrs.EXPECT,
+ hdrs.EXPIRES,
+ hdrs.FORWARDED,
+ hdrs.FROM,
+ hdrs.HOST,
+ hdrs.IF_MATCH,
+ hdrs.IF_MODIFIED_SINCE,
+ hdrs.IF_NONE_MATCH,
+ hdrs.IF_RANGE,
+ hdrs.IF_UNMODIFIED_SINCE,
+ hdrs.KEEP_ALIVE,
+ hdrs.LAST_EVENT_ID,
+ hdrs.LAST_MODIFIED,
+ hdrs.LINK,
+ hdrs.LOCATION,
+ hdrs.MAX_FORWARDS,
+ hdrs.ORIGIN,
+ hdrs.PRAGMA,
+ hdrs.PROXY_AUTHENTICATE,
+ hdrs.PROXY_AUTHORIZATION,
+ hdrs.RANGE,
+ hdrs.REFERER,
+ hdrs.RETRY_AFTER,
+ hdrs.SEC_WEBSOCKET_ACCEPT,
+ hdrs.SEC_WEBSOCKET_EXTENSIONS,
+ hdrs.SEC_WEBSOCKET_KEY,
+ hdrs.SEC_WEBSOCKET_KEY1,
+ hdrs.SEC_WEBSOCKET_PROTOCOL,
+ hdrs.SEC_WEBSOCKET_VERSION,
+ hdrs.SERVER,
+ hdrs.SET_COOKIE,
+ hdrs.TE,
+ hdrs.TRAILER,
+ hdrs.TRANSFER_ENCODING,
+ hdrs.URI,
+ hdrs.UPGRADE,
+ hdrs.USER_AGENT,
+ hdrs.VARY,
+ hdrs.VIA,
+ hdrs.WWW_AUTHENTICATE,
+ hdrs.WANT_DIGEST,
+ hdrs.WARNING,
+ hdrs.X_FORWARDED_FOR,
+ hdrs.X_FORWARDED_HOST,
+ hdrs.X_FORWARDED_PROTO,
+)
diff --git a/third_party/python/aiohttp/aiohttp/_helpers.c b/third_party/python/aiohttp/aiohttp/_helpers.c
new file mode 100644
index 0000000000..764f998447
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/_helpers.c
@@ -0,0 +1,5433 @@
+/* Generated by Cython 0.29.21 */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#ifndef Py_PYTHON_H
+ #error Python headers needed to compile C extensions, please install development version of Python.
+#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
+ #error Cython requires Python 2.6+ or Python 3.3+.
+#else
+#define CYTHON_ABI "0_29_21"
+#define CYTHON_HEX_VERSION 0x001D15F0
+#define CYTHON_FUTURE_DIVISION 1
+#include <stddef.h>
+#ifndef offsetof
+ #define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
+#endif
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+ #ifndef __fastcall
+ #define __fastcall
+ #endif
+#endif
+#ifndef DL_IMPORT
+ #define DL_IMPORT(t) t
+#endif
+#ifndef DL_EXPORT
+ #define DL_EXPORT(t) t
+#endif
+#define __PYX_COMMA ,
+#ifndef HAVE_LONG_LONG
+ #if PY_VERSION_HEX >= 0x02070000
+ #define HAVE_LONG_LONG
+ #endif
+#endif
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+#ifndef Py_HUGE_VAL
+ #define Py_HUGE_VAL HUGE_VAL
+#endif
+#ifdef PYPY_VERSION
+ #define CYTHON_COMPILING_IN_PYPY 1
+ #define CYTHON_COMPILING_IN_PYSTON 0
+ #define CYTHON_COMPILING_IN_CPYTHON 0
+ #undef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 0
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #if PY_VERSION_HEX < 0x03050000
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #elif !defined(CYTHON_USE_ASYNC_SLOTS)
+ #define CYTHON_USE_ASYNC_SLOTS 1
+ #endif
+ #undef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 0
+ #undef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 0
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #undef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 1
+ #undef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 0
+ #undef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 0
+ #undef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 0
+ #undef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 0
+ #undef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT 0
+ #undef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE 0
+ #undef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS 0
+ #undef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK 0
+#elif defined(PYSTON_VERSION)
+ #define CYTHON_COMPILING_IN_PYPY 0
+ #define CYTHON_COMPILING_IN_PYSTON 1
+ #define CYTHON_COMPILING_IN_CPYTHON 0
+ #ifndef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 1
+ #endif
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #undef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 0
+ #ifndef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 1
+ #endif
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #ifndef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 0
+ #endif
+ #ifndef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 1
+ #endif
+ #ifndef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 1
+ #endif
+ #undef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 0
+ #undef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 0
+ #undef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT 0
+ #undef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE 0
+ #undef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS 0
+ #undef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK 0
+#else
+ #define CYTHON_COMPILING_IN_PYPY 0
+ #define CYTHON_COMPILING_IN_PYSTON 0
+ #define CYTHON_COMPILING_IN_CPYTHON 1
+ #ifndef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 1
+ #endif
+ #if PY_VERSION_HEX < 0x02070000
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
+ #define CYTHON_USE_PYTYPE_LOOKUP 1
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #elif !defined(CYTHON_USE_ASYNC_SLOTS)
+ #define CYTHON_USE_ASYNC_SLOTS 1
+ #endif
+ #if PY_VERSION_HEX < 0x02070000
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #elif !defined(CYTHON_USE_PYLONG_INTERNALS)
+ #define CYTHON_USE_PYLONG_INTERNALS 1
+ #endif
+ #ifndef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 1
+ #endif
+ #ifndef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 1
+ #endif
+ #if PY_VERSION_HEX < 0x030300F0
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #elif !defined(CYTHON_USE_UNICODE_WRITER)
+ #define CYTHON_USE_UNICODE_WRITER 1
+ #endif
+ #ifndef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 0
+ #endif
+ #ifndef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 1
+ #endif
+ #ifndef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 1
+ #endif
+ #ifndef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 1
+ #endif
+ #ifndef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 1
+ #endif
+ #ifndef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
+ #endif
+ #ifndef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
+ #endif
+ #ifndef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
+ #endif
+ #ifndef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
+ #endif
+#endif
+#if !defined(CYTHON_FAST_PYCCALL)
+#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
+#endif
+#if CYTHON_USE_PYLONG_INTERNALS
+ #include "longintrepr.h"
+ #undef SHIFT
+ #undef BASE
+ #undef MASK
+ #ifdef SIZEOF_VOID_P
+ enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
+ #endif
+#endif
+#ifndef __has_attribute
+ #define __has_attribute(x) 0
+#endif
+#ifndef __has_cpp_attribute
+ #define __has_cpp_attribute(x) 0
+#endif
+#ifndef CYTHON_RESTRICT
+ #if defined(__GNUC__)
+ #define CYTHON_RESTRICT __restrict__
+ #elif defined(_MSC_VER) && _MSC_VER >= 1400
+ #define CYTHON_RESTRICT __restrict
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_RESTRICT restrict
+ #else
+ #define CYTHON_RESTRICT
+ #endif
+#endif
+#ifndef CYTHON_UNUSED
+# if defined(__GNUC__)
+# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+#endif
+#ifndef CYTHON_MAYBE_UNUSED_VAR
+# if defined(__cplusplus)
+ template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
+# else
+# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
+# endif
+#endif
+#ifndef CYTHON_NCP_UNUSED
+# if CYTHON_COMPILING_IN_CPYTHON
+# define CYTHON_NCP_UNUSED
+# else
+# define CYTHON_NCP_UNUSED CYTHON_UNUSED
+# endif
+#endif
+#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
+#ifdef _MSC_VER
+ #ifndef _MSC_STDINT_H_
+ #if _MSC_VER < 1300
+ typedef unsigned char uint8_t;
+ typedef unsigned int uint32_t;
+ #else
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int32 uint32_t;
+ #endif
+ #endif
+#else
+ #include <stdint.h>
+#endif
+#ifndef CYTHON_FALLTHROUGH
+ #if defined(__cplusplus) && __cplusplus >= 201103L
+ #if __has_cpp_attribute(fallthrough)
+ #define CYTHON_FALLTHROUGH [[fallthrough]]
+ #elif __has_cpp_attribute(clang::fallthrough)
+ #define CYTHON_FALLTHROUGH [[clang::fallthrough]]
+ #elif __has_cpp_attribute(gnu::fallthrough)
+ #define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
+ #endif
+ #endif
+ #ifndef CYTHON_FALLTHROUGH
+ #if __has_attribute(fallthrough)
+ #define CYTHON_FALLTHROUGH __attribute__((fallthrough))
+ #else
+ #define CYTHON_FALLTHROUGH
+ #endif
+ #endif
+ #if defined(__clang__ ) && defined(__apple_build_version__)
+ #if __apple_build_version__ < 7000000
+ #undef CYTHON_FALLTHROUGH
+ #define CYTHON_FALLTHROUGH
+ #endif
+ #endif
+#endif
+
+#ifndef CYTHON_INLINE
+ #if defined(__clang__)
+ #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
+ #elif defined(__GNUC__)
+ #define CYTHON_INLINE __inline__
+ #elif defined(_MSC_VER)
+ #define CYTHON_INLINE __inline
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_INLINE inline
+ #else
+ #define CYTHON_INLINE
+ #endif
+#endif
+
+#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
+ #define Py_OptimizeFlag 0
+#endif
+#define __PYX_BUILD_PY_SSIZE_T "n"
+#define CYTHON_FORMAT_SSIZE_T "z"
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+ #define __Pyx_DefaultClassType PyClass_Type
+#else
+ #define __Pyx_BUILTIN_MODULE_NAME "builtins"
+#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+#else
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+#endif
+ #define __Pyx_DefaultClassType PyType_Type
+#endif
+#ifndef Py_TPFLAGS_CHECKTYPES
+ #define Py_TPFLAGS_CHECKTYPES 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_INDEX
+ #define Py_TPFLAGS_HAVE_INDEX 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
+ #define Py_TPFLAGS_HAVE_NEWBUFFER 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_FINALIZE
+ #define Py_TPFLAGS_HAVE_FINALIZE 0
+#endif
+#ifndef METH_STACKLESS
+ #define METH_STACKLESS 0
+#endif
+#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
+ #ifndef METH_FASTCALL
+ #define METH_FASTCALL 0x80
+ #endif
+ typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
+ typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
+ Py_ssize_t nargs, PyObject *kwnames);
+#else
+ #define __Pyx_PyCFunctionFast _PyCFunctionFast
+ #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
+#endif
+#if CYTHON_FAST_PYCCALL
+#define __Pyx_PyFastCFunction_Check(func)\
+ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
+#else
+#define __Pyx_PyFastCFunction_Check(func) 0
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
+ #define PyObject_Malloc(s) PyMem_Malloc(s)
+ #define PyObject_Free(p) PyMem_Free(p)
+ #define PyObject_Realloc(p) PyMem_Realloc(p)
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
+ #define PyMem_RawMalloc(n) PyMem_Malloc(n)
+ #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
+ #define PyMem_RawFree(p) PyMem_Free(p)
+#endif
+#if CYTHON_COMPILING_IN_PYSTON
+ #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
+ #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
+#else
+ #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
+ #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
+#endif
+#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
+ #define __Pyx_PyThreadState_Current PyThreadState_GET()
+#elif PY_VERSION_HEX >= 0x03060000
+ #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
+#elif PY_VERSION_HEX >= 0x03000000
+ #define __Pyx_PyThreadState_Current PyThreadState_GET()
+#else
+ #define __Pyx_PyThreadState_Current _PyThreadState_Current
+#endif
+#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
+#include "pythread.h"
+#define Py_tss_NEEDS_INIT 0
+typedef int Py_tss_t;
+static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
+ *key = PyThread_create_key();
+ return 0;
+}
+static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
+ Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
+ *key = Py_tss_NEEDS_INIT;
+ return key;
+}
+static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
+ PyObject_Free(key);
+}
+static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
+ return *key != Py_tss_NEEDS_INIT;
+}
+static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
+ PyThread_delete_key(*key);
+ *key = Py_tss_NEEDS_INIT;
+}
+static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
+ return PyThread_set_key_value(*key, value);
+}
+static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
+ return PyThread_get_key_value(*key);
+}
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
+#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
+#else
+#define __Pyx_PyDict_NewPresized(n) PyDict_New()
+#endif
+#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
+#else
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
+#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
+#else
+#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
+#endif
+#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
+ #define CYTHON_PEP393_ENABLED 1
+ #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
+ 0 : _PyUnicode_Ready((PyObject *)(op)))
+ #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
+ #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
+ #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
+ #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
+ #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
+ #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
+ #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
+ #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
+ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
+ #else
+ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
+ #endif
+#else
+ #define CYTHON_PEP393_ENABLED 0
+ #define PyUnicode_1BYTE_KIND 1
+ #define PyUnicode_2BYTE_KIND 2
+ #define PyUnicode_4BYTE_KIND 4
+ #define __Pyx_PyUnicode_READY(op) (0)
+ #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
+ #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
+ #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
+ #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
+ #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
+ #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
+ #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
+ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
+#endif
+#if CYTHON_COMPILING_IN_PYPY
+ #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
+ #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
+#else
+ #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
+ #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
+ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
+ #define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
+ #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
+ #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
+#endif
+#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
+#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
+#else
+ #define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
+#endif
+#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
+ #define PyObject_ASCII(o) PyObject_Repr(o)
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyBaseString_Type PyUnicode_Type
+ #define PyStringObject PyUnicodeObject
+ #define PyString_Type PyUnicode_Type
+ #define PyString_Check PyUnicode_Check
+ #define PyString_CheckExact PyUnicode_CheckExact
+#ifndef PyObject_Unicode
+ #define PyObject_Unicode PyObject_Str
+#endif
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
+ #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
+#else
+ #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
+ #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
+#endif
+#ifndef PySet_CheckExact
+ #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
+#endif
+#if PY_VERSION_HEX >= 0x030900A4
+ #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
+ #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
+#else
+ #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
+ #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
+#endif
+#if CYTHON_ASSUME_SAFE_MACROS
+ #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
+#else
+ #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyIntObject PyLongObject
+ #define PyInt_Type PyLong_Type
+ #define PyInt_Check(op) PyLong_Check(op)
+ #define PyInt_CheckExact(op) PyLong_CheckExact(op)
+ #define PyInt_FromString PyLong_FromString
+ #define PyInt_FromUnicode PyLong_FromUnicode
+ #define PyInt_FromLong PyLong_FromLong
+ #define PyInt_FromSize_t PyLong_FromSize_t
+ #define PyInt_FromSsize_t PyLong_FromSsize_t
+ #define PyInt_AsLong PyLong_AsLong
+ #define PyInt_AS_LONG PyLong_AS_LONG
+ #define PyInt_AsSsize_t PyLong_AsSsize_t
+ #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
+ #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
+ #define PyNumber_Int PyNumber_Long
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyBoolObject PyLongObject
+#endif
+#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
+ #ifndef PyUnicode_InternFromString
+ #define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
+ #endif
+#endif
+#if PY_VERSION_HEX < 0x030200A4
+ typedef long Py_hash_t;
+ #define __Pyx_PyInt_FromHash_t PyInt_FromLong
+ #define __Pyx_PyInt_AsHash_t PyInt_AsLong
+#else
+ #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
+ #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
+#else
+ #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
+#endif
+#if CYTHON_USE_ASYNC_SLOTS
+ #if PY_VERSION_HEX >= 0x030500B1
+ #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
+ #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
+ #else
+ #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
+ #endif
+#else
+ #define __Pyx_PyType_AsAsync(obj) NULL
+#endif
+#ifndef __Pyx_PyAsyncMethodsStruct
+ typedef struct {
+ unaryfunc am_await;
+ unaryfunc am_aiter;
+ unaryfunc am_anext;
+ } __Pyx_PyAsyncMethodsStruct;
+#endif
+
+#if defined(WIN32) || defined(MS_WINDOWS)
+ #define _USE_MATH_DEFINES
+#endif
+#include <math.h>
+#ifdef NAN
+#define __PYX_NAN() ((float) NAN)
+#else
+static CYTHON_INLINE float __PYX_NAN() {
+ float value;
+ memset(&value, 0xFF, sizeof(value));
+ return value;
+}
+#endif
+#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
+#define __Pyx_truncl trunc
+#else
+#define __Pyx_truncl truncl
+#endif
+
+#define __PYX_MARK_ERR_POS(f_index, lineno) \
+ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
+#define __PYX_ERR(f_index, lineno, Ln_error) \
+ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
+
+#ifndef __PYX_EXTERN_C
+ #ifdef __cplusplus
+ #define __PYX_EXTERN_C extern "C"
+ #else
+ #define __PYX_EXTERN_C extern
+ #endif
+#endif
+
+#define __PYX_HAVE__aiohttp___helpers
+#define __PYX_HAVE_API__aiohttp___helpers
+/* Early includes */
+#ifdef _OPENMP
+#include <omp.h>
+#endif /* _OPENMP */
+
+#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
+#define CYTHON_WITHOUT_ASSERTIONS
+#endif
+
+typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
+ const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
+
+#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
+#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
+#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
+#define __PYX_DEFAULT_STRING_ENCODING ""
+#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
+#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#define __Pyx_uchar_cast(c) ((unsigned char)c)
+#define __Pyx_long_cast(x) ((long)x)
+#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
+ (sizeof(type) < sizeof(Py_ssize_t)) ||\
+ (sizeof(type) > sizeof(Py_ssize_t) &&\
+ likely(v < (type)PY_SSIZE_T_MAX ||\
+ v == (type)PY_SSIZE_T_MAX) &&\
+ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
+ v == (type)PY_SSIZE_T_MIN))) ||\
+ (sizeof(type) == sizeof(Py_ssize_t) &&\
+ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
+ v == (type)PY_SSIZE_T_MAX))) )
+static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
+ return (size_t) i < (size_t) limit;
+}
+#if defined (__cplusplus) && __cplusplus >= 201103L
+ #include <cstdlib>
+ #define __Pyx_sst_abs(value) std::abs(value)
+#elif SIZEOF_INT >= SIZEOF_SIZE_T
+ #define __Pyx_sst_abs(value) abs(value)
+#elif SIZEOF_LONG >= SIZEOF_SIZE_T
+ #define __Pyx_sst_abs(value) labs(value)
+#elif defined (_MSC_VER)
+ #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
+#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define __Pyx_sst_abs(value) llabs(value)
+#elif defined (__GNUC__)
+ #define __Pyx_sst_abs(value) __builtin_llabs(value)
+#else
+ #define __Pyx_sst_abs(value) ((value<0) ? -value : value)
+#endif
+static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
+static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
+#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
+#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
+#define __Pyx_PyBytes_FromString PyBytes_FromString
+#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
+ #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#else
+ #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
+ #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
+#endif
+#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
+#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
+#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
+#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
+#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
+static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
+ const Py_UNICODE *u_end = u;
+ while (*u_end++) ;
+ return (size_t)(u_end - u - 1);
+}
+#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
+#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
+#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
+#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
+#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
+static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
+static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
+#define __Pyx_PySequence_Tuple(obj)\
+ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
+#if CYTHON_ASSUME_SAFE_MACROS
+#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
+#else
+#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
+#endif
+#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
+#if PY_MAJOR_VERSION >= 3
+#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
+#else
+#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
+#endif
+#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
+#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+static int __Pyx_sys_getdefaultencoding_not_ascii;
+static int __Pyx_init_sys_getdefaultencoding_params(void) {
+ PyObject* sys;
+ PyObject* default_encoding = NULL;
+ PyObject* ascii_chars_u = NULL;
+ PyObject* ascii_chars_b = NULL;
+ const char* default_encoding_c;
+ sys = PyImport_ImportModule("sys");
+ if (!sys) goto bad;
+ default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
+ Py_DECREF(sys);
+ if (!default_encoding) goto bad;
+ default_encoding_c = PyBytes_AsString(default_encoding);
+ if (!default_encoding_c) goto bad;
+ if (strcmp(default_encoding_c, "ascii") == 0) {
+ __Pyx_sys_getdefaultencoding_not_ascii = 0;
+ } else {
+ char ascii_chars[128];
+ int c;
+ for (c = 0; c < 128; c++) {
+ ascii_chars[c] = c;
+ }
+ __Pyx_sys_getdefaultencoding_not_ascii = 1;
+ ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
+ if (!ascii_chars_u) goto bad;
+ ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
+ if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
+ PyErr_Format(
+ PyExc_ValueError,
+ "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
+ default_encoding_c);
+ goto bad;
+ }
+ Py_DECREF(ascii_chars_u);
+ Py_DECREF(ascii_chars_b);
+ }
+ Py_DECREF(default_encoding);
+ return 0;
+bad:
+ Py_XDECREF(default_encoding);
+ Py_XDECREF(ascii_chars_u);
+ Py_XDECREF(ascii_chars_b);
+ return -1;
+}
+#endif
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
+#else
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+static char* __PYX_DEFAULT_STRING_ENCODING;
+static int __Pyx_init_sys_getdefaultencoding_params(void) {
+ PyObject* sys;
+ PyObject* default_encoding = NULL;
+ char* default_encoding_c;
+ sys = PyImport_ImportModule("sys");
+ if (!sys) goto bad;
+ default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
+ Py_DECREF(sys);
+ if (!default_encoding) goto bad;
+ default_encoding_c = PyBytes_AsString(default_encoding);
+ if (!default_encoding_c) goto bad;
+ __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
+ if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
+ strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
+ Py_DECREF(default_encoding);
+ return 0;
+bad:
+ Py_XDECREF(default_encoding);
+ return -1;
+}
+#endif
+#endif
+
+
+/* Test for GCC > 2.95 */
+#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
+ #define likely(x) __builtin_expect(!!(x), 1)
+ #define unlikely(x) __builtin_expect(!!(x), 0)
+#else /* !__GNUC__ or GCC < 2.95 */
+ #define likely(x) (x)
+ #define unlikely(x) (x)
+#endif /* __GNUC__ */
+static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
+
+static PyObject *__pyx_m = NULL;
+static PyObject *__pyx_d;
+static PyObject *__pyx_b;
+static PyObject *__pyx_cython_runtime = NULL;
+static PyObject *__pyx_empty_tuple;
+static PyObject *__pyx_empty_bytes;
+static PyObject *__pyx_empty_unicode;
+static int __pyx_lineno;
+static int __pyx_clineno = 0;
+static const char * __pyx_cfilenm= __FILE__;
+static const char *__pyx_filename;
+
+
+static const char *__pyx_f[] = {
+ "aiohttp/_helpers.pyx",
+ "stringsource",
+};
+
+/*--- Type declarations ---*/
+struct __pyx_obj_7aiohttp_8_helpers_reify;
+
+/* "aiohttp/_helpers.pyx":1
+ * cdef class reify: # <<<<<<<<<<<<<<
+ * """Use as a class method decorator. It operates almost exactly like
+ * the Python `@property` decorator, but it puts the result of the
+ */
+struct __pyx_obj_7aiohttp_8_helpers_reify {
+ PyObject_HEAD
+ PyObject *wrapped;
+ PyObject *name;
+};
+
+
+/* --- Runtime support code (head) --- */
+/* Refnanny.proto */
+#ifndef CYTHON_REFNANNY
+ #define CYTHON_REFNANNY 0
+#endif
+#if CYTHON_REFNANNY
+ typedef struct {
+ void (*INCREF)(void*, PyObject*, int);
+ void (*DECREF)(void*, PyObject*, int);
+ void (*GOTREF)(void*, PyObject*, int);
+ void (*GIVEREF)(void*, PyObject*, int);
+ void* (*SetupContext)(const char*, int, const char*);
+ void (*FinishContext)(void**);
+ } __Pyx_RefNannyAPIStruct;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
+ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
+#ifdef WITH_THREAD
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)\
+ if (acquire_gil) {\
+ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
+ PyGILState_Release(__pyx_gilstate_save);\
+ } else {\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
+ }
+#else
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
+#endif
+ #define __Pyx_RefNannyFinishContext()\
+ __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
+ #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
+ #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
+ #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
+ #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
+#else
+ #define __Pyx_RefNannyDeclarations
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)
+ #define __Pyx_RefNannyFinishContext()
+ #define __Pyx_INCREF(r) Py_INCREF(r)
+ #define __Pyx_DECREF(r) Py_DECREF(r)
+ #define __Pyx_GOTREF(r)
+ #define __Pyx_GIVEREF(r)
+ #define __Pyx_XINCREF(r) Py_XINCREF(r)
+ #define __Pyx_XDECREF(r) Py_XDECREF(r)
+ #define __Pyx_XGOTREF(r)
+ #define __Pyx_XGIVEREF(r)
+#endif
+#define __Pyx_XDECREF_SET(r, v) do {\
+ PyObject *tmp = (PyObject *) r;\
+ r = v; __Pyx_XDECREF(tmp);\
+ } while (0)
+#define __Pyx_DECREF_SET(r, v) do {\
+ PyObject *tmp = (PyObject *) r;\
+ r = v; __Pyx_DECREF(tmp);\
+ } while (0)
+#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
+#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
+
+/* PyObjectGetAttrStr.proto */
+#if CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
+#else
+#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
+#endif
+
+/* GetBuiltinName.proto */
+static PyObject *__Pyx_GetBuiltinName(PyObject *name);
+
+/* RaiseDoubleKeywords.proto */
+static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
+
+/* ParseKeywords.proto */
+static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
+ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
+ const char* function_name);
+
+/* RaiseArgTupleInvalid.proto */
+static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
+ Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
+
+/* GetItemInt.proto */
+#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
+ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
+ __Pyx_GetItemInt_Generic(o, to_py_func(i))))
+#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
+ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
+ int wraparound, int boundscheck);
+#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
+ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
+ int wraparound, int boundscheck);
+static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
+ int is_list, int wraparound, int boundscheck);
+
+/* ObjectGetItem.proto */
+#if CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);
+#else
+#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
+#endif
+
+/* GetTopmostException.proto */
+#if CYTHON_USE_EXC_INFO_STACK
+static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
+#endif
+
+/* PyThreadStateGet.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
+#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
+#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
+#else
+#define __Pyx_PyThreadState_declare
+#define __Pyx_PyThreadState_assign
+#define __Pyx_PyErr_Occurred() PyErr_Occurred()
+#endif
+
+/* SaveResetException.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
+static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
+#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
+static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
+#else
+#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
+#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
+#endif
+
+/* PyErrExceptionMatches.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
+static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
+#else
+#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
+#endif
+
+/* GetException.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
+static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
+#else
+static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
+#endif
+
+/* PyCFunctionFastCall.proto */
+#if CYTHON_FAST_PYCCALL
+static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
+#else
+#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
+#endif
+
+/* PyFunctionFastCall.proto */
+#if CYTHON_FAST_PYCALL
+#define __Pyx_PyFunction_FastCall(func, args, nargs)\
+ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
+#if 1 || PY_VERSION_HEX < 0x030600B1
+static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
+#else
+#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
+#endif
+#define __Pyx_BUILD_ASSERT_EXPR(cond)\
+ (sizeof(char [1 - 2*!(cond)]) - 1)
+#ifndef Py_MEMBER_SIZE
+#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
+#endif
+ static size_t __pyx_pyframe_localsplus_offset = 0;
+ #include "frameobject.h"
+ #define __Pxy_PyFrame_Initialize_Offsets()\
+ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
+ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
+ #define __Pyx_PyFrame_GetLocalsplus(frame)\
+ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
+#endif
+
+/* PyObjectCall.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
+#else
+#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
+#endif
+
+/* PyObjectCall2Args.proto */
+static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
+
+/* PyObjectCallMethO.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
+#endif
+
+/* PyObjectCallOneArg.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
+
+/* PyErrFetchRestore.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
+#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
+#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
+#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
+#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
+static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
+static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
+#if CYTHON_COMPILING_IN_CPYTHON
+#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
+#else
+#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
+#endif
+#else
+#define __Pyx_PyErr_Clear() PyErr_Clear()
+#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
+#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
+#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
+#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
+#endif
+
+/* RaiseException.proto */
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
+
+/* GetAttr.proto */
+static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
+
+/* GetAttr3.proto */
+static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
+
+/* PyDictVersioning.proto */
+#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
+#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
+#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
+#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
+ (version_var) = __PYX_GET_DICT_VERSION(dict);\
+ (cache_var) = (value);
+#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
+ static PY_UINT64_T __pyx_dict_version = 0;\
+ static PyObject *__pyx_dict_cached_value = NULL;\
+ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
+ (VAR) = __pyx_dict_cached_value;\
+ } else {\
+ (VAR) = __pyx_dict_cached_value = (LOOKUP);\
+ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
+ }\
+}
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
+static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
+#else
+#define __PYX_GET_DICT_VERSION(dict) (0)
+#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
+#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
+#endif
+
+/* GetModuleGlobalName.proto */
+#if CYTHON_USE_DICT_VERSIONS
+#define __Pyx_GetModuleGlobalName(var, name) {\
+ static PY_UINT64_T __pyx_dict_version = 0;\
+ static PyObject *__pyx_dict_cached_value = NULL;\
+ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
+ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
+ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
+}
+#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
+ PY_UINT64_T __pyx_dict_version;\
+ PyObject *__pyx_dict_cached_value;\
+ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
+}
+static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
+#else
+#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
+#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
+static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
+#endif
+
+/* Import.proto */
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
+
+/* ImportFrom.proto */
+static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
+
+/* HasAttr.proto */
+static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
+
+/* PyObject_GenericGetAttrNoDict.proto */
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
+#else
+#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
+#endif
+
+/* PyObject_GenericGetAttr.proto */
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
+#else
+#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
+#endif
+
+/* PyObjectGetAttrStrNoError.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);
+
+/* SetupReduce.proto */
+static int __Pyx_setup_reduce(PyObject* type_obj);
+
+/* CLineInTraceback.proto */
+#ifdef CYTHON_CLINE_IN_TRACEBACK
+#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
+#else
+static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
+#endif
+
+/* CodeObjectCache.proto */
+typedef struct {
+ PyCodeObject* code_object;
+ int code_line;
+} __Pyx_CodeObjectCacheEntry;
+struct __Pyx_CodeObjectCache {
+ int count;
+ int max_count;
+ __Pyx_CodeObjectCacheEntry* entries;
+};
+static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
+static PyCodeObject *__pyx_find_code_object(int code_line);
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
+
+/* AddTraceback.proto */
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+ int py_line, const char *filename);
+
+/* CIntToPy.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
+
+/* CIntFromPy.proto */
+static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
+
+/* CIntFromPy.proto */
+static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
+
+/* FastTypeChecks.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
+static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
+#else
+#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
+#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
+#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
+#endif
+#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
+
+/* CheckBinaryVersion.proto */
+static int __Pyx_check_binary_version(void);
+
+/* InitStrings.proto */
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
+
+
+/* Module declarations from 'aiohttp._helpers' */
+static PyTypeObject *__pyx_ptype_7aiohttp_8_helpers_reify = 0;
+static PyObject *__pyx_f_7aiohttp_8_helpers___pyx_unpickle_reify__set_state(struct __pyx_obj_7aiohttp_8_helpers_reify *, PyObject *); /*proto*/
+#define __Pyx_MODULE_NAME "aiohttp._helpers"
+extern int __pyx_module_is_main_aiohttp___helpers;
+int __pyx_module_is_main_aiohttp___helpers = 0;
+
+/* Implementation of 'aiohttp._helpers' */
+static PyObject *__pyx_builtin_KeyError;
+static PyObject *__pyx_builtin_AttributeError;
+static const char __pyx_k_doc[] = "__doc__";
+static const char __pyx_k_new[] = "__new__";
+static const char __pyx_k_dict[] = "__dict__";
+static const char __pyx_k_main[] = "__main__";
+static const char __pyx_k_name[] = "__name__";
+static const char __pyx_k_test[] = "__test__";
+static const char __pyx_k_cache[] = "_cache";
+static const char __pyx_k_reify[] = "reify";
+static const char __pyx_k_import[] = "__import__";
+static const char __pyx_k_pickle[] = "pickle";
+static const char __pyx_k_reduce[] = "__reduce__";
+static const char __pyx_k_update[] = "update";
+static const char __pyx_k_wrapped[] = "wrapped";
+static const char __pyx_k_KeyError[] = "KeyError";
+static const char __pyx_k_getstate[] = "__getstate__";
+static const char __pyx_k_pyx_type[] = "__pyx_type";
+static const char __pyx_k_setstate[] = "__setstate__";
+static const char __pyx_k_pyx_state[] = "__pyx_state";
+static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
+static const char __pyx_k_pyx_result[] = "__pyx_result";
+static const char __pyx_k_PickleError[] = "PickleError";
+static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
+static const char __pyx_k_stringsource[] = "stringsource";
+static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
+static const char __pyx_k_AttributeError[] = "AttributeError";
+static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
+static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
+static const char __pyx_k_aiohttp__helpers[] = "aiohttp._helpers";
+static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
+static const char __pyx_k_pyx_unpickle_reify[] = "__pyx_unpickle_reify";
+static const char __pyx_k_reified_property_is_read_only[] = "reified property is read-only";
+static const char __pyx_k_Incompatible_checksums_s_vs_0x77[] = "Incompatible checksums (%s vs 0x770cb8f = (name, wrapped))";
+static PyObject *__pyx_n_s_AttributeError;
+static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0x77;
+static PyObject *__pyx_n_s_KeyError;
+static PyObject *__pyx_n_s_PickleError;
+static PyObject *__pyx_n_s_aiohttp__helpers;
+static PyObject *__pyx_n_s_cache;
+static PyObject *__pyx_n_s_cline_in_traceback;
+static PyObject *__pyx_n_s_dict;
+static PyObject *__pyx_n_s_doc;
+static PyObject *__pyx_n_s_getstate;
+static PyObject *__pyx_n_s_import;
+static PyObject *__pyx_n_s_main;
+static PyObject *__pyx_n_s_name;
+static PyObject *__pyx_n_s_new;
+static PyObject *__pyx_n_s_pickle;
+static PyObject *__pyx_n_s_pyx_PickleError;
+static PyObject *__pyx_n_s_pyx_checksum;
+static PyObject *__pyx_n_s_pyx_result;
+static PyObject *__pyx_n_s_pyx_state;
+static PyObject *__pyx_n_s_pyx_type;
+static PyObject *__pyx_n_s_pyx_unpickle_reify;
+static PyObject *__pyx_n_s_reduce;
+static PyObject *__pyx_n_s_reduce_cython;
+static PyObject *__pyx_n_s_reduce_ex;
+static PyObject *__pyx_kp_u_reified_property_is_read_only;
+static PyObject *__pyx_n_s_reify;
+static PyObject *__pyx_n_s_setstate;
+static PyObject *__pyx_n_s_setstate_cython;
+static PyObject *__pyx_kp_s_stringsource;
+static PyObject *__pyx_n_s_test;
+static PyObject *__pyx_n_s_update;
+static PyObject *__pyx_n_s_wrapped;
+static int __pyx_pf_7aiohttp_8_helpers_5reify___init__(struct __pyx_obj_7aiohttp_8_helpers_reify *__pyx_v_self, PyObject *__pyx_v_wrapped); /* proto */
+static PyObject *__pyx_pf_7aiohttp_8_helpers_5reify_7__doc_____get__(struct __pyx_obj_7aiohttp_8_helpers_reify *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_8_helpers_5reify_2__get__(struct __pyx_obj_7aiohttp_8_helpers_reify *__pyx_v_self, PyObject *__pyx_v_inst, CYTHON_UNUSED PyObject *__pyx_v_owner); /* proto */
+static int __pyx_pf_7aiohttp_8_helpers_5reify_4__set__(CYTHON_UNUSED struct __pyx_obj_7aiohttp_8_helpers_reify *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v_inst, CYTHON_UNUSED PyObject *__pyx_v_value); /* proto */
+static PyObject *__pyx_pf_7aiohttp_8_helpers_5reify_6__reduce_cython__(struct __pyx_obj_7aiohttp_8_helpers_reify *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_8_helpers_5reify_8__setstate_cython__(struct __pyx_obj_7aiohttp_8_helpers_reify *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
+static PyObject *__pyx_pf_7aiohttp_8_helpers___pyx_unpickle_reify(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
+static PyObject *__pyx_tp_new_7aiohttp_8_helpers_reify(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
+static PyObject *__pyx_int_124832655;
+static PyObject *__pyx_tuple_;
+static PyObject *__pyx_tuple__2;
+static PyObject *__pyx_codeobj__3;
+/* Late includes */
+
+/* "aiohttp/_helpers.pyx":13
+ * cdef object name
+ *
+ * def __init__(self, wrapped): # <<<<<<<<<<<<<<
+ * self.wrapped = wrapped
+ * self.name = wrapped.__name__
+ */
+
+/* Python wrapper */
+static int __pyx_pw_7aiohttp_8_helpers_5reify_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_pw_7aiohttp_8_helpers_5reify_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_wrapped = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_wrapped,0};
+ PyObject* values[1] = {0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wrapped)) != 0)) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 13, __pyx_L3_error)
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ }
+ __pyx_v_wrapped = values[0];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 13, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("aiohttp._helpers.reify.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return -1;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_r = __pyx_pf_7aiohttp_8_helpers_5reify___init__(((struct __pyx_obj_7aiohttp_8_helpers_reify *)__pyx_v_self), __pyx_v_wrapped);
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_7aiohttp_8_helpers_5reify___init__(struct __pyx_obj_7aiohttp_8_helpers_reify *__pyx_v_self, PyObject *__pyx_v_wrapped) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__init__", 0);
+
+ /* "aiohttp/_helpers.pyx":14
+ *
+ * def __init__(self, wrapped):
+ * self.wrapped = wrapped # <<<<<<<<<<<<<<
+ * self.name = wrapped.__name__
+ *
+ */
+ __Pyx_INCREF(__pyx_v_wrapped);
+ __Pyx_GIVEREF(__pyx_v_wrapped);
+ __Pyx_GOTREF(__pyx_v_self->wrapped);
+ __Pyx_DECREF(__pyx_v_self->wrapped);
+ __pyx_v_self->wrapped = __pyx_v_wrapped;
+
+ /* "aiohttp/_helpers.pyx":15
+ * def __init__(self, wrapped):
+ * self.wrapped = wrapped
+ * self.name = wrapped.__name__ # <<<<<<<<<<<<<<
+ *
+ * @property
+ */
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_wrapped, __pyx_n_s_name); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v_self->name);
+ __Pyx_DECREF(__pyx_v_self->name);
+ __pyx_v_self->name = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_helpers.pyx":13
+ * cdef object name
+ *
+ * def __init__(self, wrapped): # <<<<<<<<<<<<<<
+ * self.wrapped = wrapped
+ * self.name = wrapped.__name__
+ */
+
+ /* function exit code */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._helpers.reify.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_helpers.pyx":18
+ *
+ * @property
+ * def __doc__(self): # <<<<<<<<<<<<<<
+ * return self.wrapped.__doc__
+ *
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_8_helpers_5reify_7__doc___1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_8_helpers_5reify_7__doc___1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_8_helpers_5reify_7__doc_____get__(((struct __pyx_obj_7aiohttp_8_helpers_reify *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_8_helpers_5reify_7__doc_____get__(struct __pyx_obj_7aiohttp_8_helpers_reify *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__get__", 0);
+
+ /* "aiohttp/_helpers.pyx":19
+ * @property
+ * def __doc__(self):
+ * return self.wrapped.__doc__ # <<<<<<<<<<<<<<
+ *
+ * def __get__(self, inst, owner):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->wrapped, __pyx_n_s_doc); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_helpers.pyx":18
+ *
+ * @property
+ * def __doc__(self): # <<<<<<<<<<<<<<
+ * return self.wrapped.__doc__
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._helpers.reify.__doc__.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_helpers.pyx":21
+ * return self.wrapped.__doc__
+ *
+ * def __get__(self, inst, owner): # <<<<<<<<<<<<<<
+ * try:
+ * try:
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_8_helpers_5reify_3__get__(PyObject *__pyx_v_self, PyObject *__pyx_v_inst, PyObject *__pyx_v_owner); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_8_helpers_5reify_3__get__(PyObject *__pyx_v_self, PyObject *__pyx_v_inst, PyObject *__pyx_v_owner) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_8_helpers_5reify_2__get__(((struct __pyx_obj_7aiohttp_8_helpers_reify *)__pyx_v_self), ((PyObject *)__pyx_v_inst), ((PyObject *)__pyx_v_owner));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_8_helpers_5reify_2__get__(struct __pyx_obj_7aiohttp_8_helpers_reify *__pyx_v_self, PyObject *__pyx_v_inst, CYTHON_UNUSED PyObject *__pyx_v_owner) {
+ PyObject *__pyx_v_val = NULL;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *__pyx_t_8 = NULL;
+ int __pyx_t_9;
+ PyObject *__pyx_t_10 = NULL;
+ PyObject *__pyx_t_11 = NULL;
+ PyObject *__pyx_t_12 = NULL;
+ PyObject *__pyx_t_13 = NULL;
+ int __pyx_t_14;
+ int __pyx_t_15;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__get__", 0);
+
+ /* "aiohttp/_helpers.pyx":22
+ *
+ * def __get__(self, inst, owner):
+ * try: # <<<<<<<<<<<<<<
+ * try:
+ * return inst._cache[self.name]
+ */
+ {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
+ __Pyx_XGOTREF(__pyx_t_1);
+ __Pyx_XGOTREF(__pyx_t_2);
+ __Pyx_XGOTREF(__pyx_t_3);
+ /*try:*/ {
+
+ /* "aiohttp/_helpers.pyx":23
+ * def __get__(self, inst, owner):
+ * try:
+ * try: # <<<<<<<<<<<<<<
+ * return inst._cache[self.name]
+ * except KeyError:
+ */
+ {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ExceptionSave(&__pyx_t_4, &__pyx_t_5, &__pyx_t_6);
+ __Pyx_XGOTREF(__pyx_t_4);
+ __Pyx_XGOTREF(__pyx_t_5);
+ __Pyx_XGOTREF(__pyx_t_6);
+ /*try:*/ {
+
+ /* "aiohttp/_helpers.pyx":24
+ * try:
+ * try:
+ * return inst._cache[self.name] # <<<<<<<<<<<<<<
+ * except KeyError:
+ * val = self.wrapped(inst)
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_inst, __pyx_n_s_cache); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 24, __pyx_L9_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_8 = __Pyx_PyObject_GetItem(__pyx_t_7, __pyx_v_self->name); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 24, __pyx_L9_error)
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __pyx_r = __pyx_t_8;
+ __pyx_t_8 = 0;
+ goto __pyx_L13_try_return;
+
+ /* "aiohttp/_helpers.pyx":23
+ * def __get__(self, inst, owner):
+ * try:
+ * try: # <<<<<<<<<<<<<<
+ * return inst._cache[self.name]
+ * except KeyError:
+ */
+ }
+ __pyx_L9_error:;
+ __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
+
+ /* "aiohttp/_helpers.pyx":25
+ * try:
+ * return inst._cache[self.name]
+ * except KeyError: # <<<<<<<<<<<<<<
+ * val = self.wrapped(inst)
+ * inst._cache[self.name] = val
+ */
+ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_KeyError);
+ if (__pyx_t_9) {
+ __Pyx_AddTraceback("aiohttp._helpers.reify.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ if (__Pyx_GetException(&__pyx_t_8, &__pyx_t_7, &__pyx_t_10) < 0) __PYX_ERR(0, 25, __pyx_L11_except_error)
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_GOTREF(__pyx_t_10);
+
+ /* "aiohttp/_helpers.pyx":26
+ * return inst._cache[self.name]
+ * except KeyError:
+ * val = self.wrapped(inst) # <<<<<<<<<<<<<<
+ * inst._cache[self.name] = val
+ * return val
+ */
+ __Pyx_INCREF(__pyx_v_self->wrapped);
+ __pyx_t_12 = __pyx_v_self->wrapped; __pyx_t_13 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_12))) {
+ __pyx_t_13 = PyMethod_GET_SELF(__pyx_t_12);
+ if (likely(__pyx_t_13)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_12);
+ __Pyx_INCREF(__pyx_t_13);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_12, function);
+ }
+ }
+ __pyx_t_11 = (__pyx_t_13) ? __Pyx_PyObject_Call2Args(__pyx_t_12, __pyx_t_13, __pyx_v_inst) : __Pyx_PyObject_CallOneArg(__pyx_t_12, __pyx_v_inst);
+ __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
+ if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 26, __pyx_L11_except_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __pyx_v_val = __pyx_t_11;
+ __pyx_t_11 = 0;
+
+ /* "aiohttp/_helpers.pyx":27
+ * except KeyError:
+ * val = self.wrapped(inst)
+ * inst._cache[self.name] = val # <<<<<<<<<<<<<<
+ * return val
+ * except AttributeError:
+ */
+ __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_v_inst, __pyx_n_s_cache); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 27, __pyx_L11_except_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ if (unlikely(PyObject_SetItem(__pyx_t_11, __pyx_v_self->name, __pyx_v_val) < 0)) __PYX_ERR(0, 27, __pyx_L11_except_error)
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+
+ /* "aiohttp/_helpers.pyx":28
+ * val = self.wrapped(inst)
+ * inst._cache[self.name] = val
+ * return val # <<<<<<<<<<<<<<
+ * except AttributeError:
+ * if inst is None:
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_val);
+ __pyx_r = __pyx_v_val;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ goto __pyx_L12_except_return;
+ }
+ goto __pyx_L11_except_error;
+ __pyx_L11_except_error:;
+
+ /* "aiohttp/_helpers.pyx":23
+ * def __get__(self, inst, owner):
+ * try:
+ * try: # <<<<<<<<<<<<<<
+ * return inst._cache[self.name]
+ * except KeyError:
+ */
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_XGIVEREF(__pyx_t_5);
+ __Pyx_XGIVEREF(__pyx_t_6);
+ __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6);
+ goto __pyx_L3_error;
+ __pyx_L13_try_return:;
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_XGIVEREF(__pyx_t_5);
+ __Pyx_XGIVEREF(__pyx_t_6);
+ __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6);
+ goto __pyx_L7_try_return;
+ __pyx_L12_except_return:;
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_XGIVEREF(__pyx_t_5);
+ __Pyx_XGIVEREF(__pyx_t_6);
+ __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6);
+ goto __pyx_L7_try_return;
+ }
+
+ /* "aiohttp/_helpers.pyx":22
+ *
+ * def __get__(self, inst, owner):
+ * try: # <<<<<<<<<<<<<<
+ * try:
+ * return inst._cache[self.name]
+ */
+ }
+ __pyx_L3_error:;
+ __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
+
+ /* "aiohttp/_helpers.pyx":29
+ * inst._cache[self.name] = val
+ * return val
+ * except AttributeError: # <<<<<<<<<<<<<<
+ * if inst is None:
+ * return self
+ */
+ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_AttributeError);
+ if (__pyx_t_9) {
+ __Pyx_AddTraceback("aiohttp._helpers.reify.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ if (__Pyx_GetException(&__pyx_t_10, &__pyx_t_7, &__pyx_t_8) < 0) __PYX_ERR(0, 29, __pyx_L5_except_error)
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_GOTREF(__pyx_t_8);
+
+ /* "aiohttp/_helpers.pyx":30
+ * return val
+ * except AttributeError:
+ * if inst is None: # <<<<<<<<<<<<<<
+ * return self
+ * raise
+ */
+ __pyx_t_14 = (__pyx_v_inst == Py_None);
+ __pyx_t_15 = (__pyx_t_14 != 0);
+ if (__pyx_t_15) {
+
+ /* "aiohttp/_helpers.pyx":31
+ * except AttributeError:
+ * if inst is None:
+ * return self # <<<<<<<<<<<<<<
+ * raise
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((PyObject *)__pyx_v_self));
+ __pyx_r = ((PyObject *)__pyx_v_self);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ goto __pyx_L6_except_return;
+
+ /* "aiohttp/_helpers.pyx":30
+ * return val
+ * except AttributeError:
+ * if inst is None: # <<<<<<<<<<<<<<
+ * return self
+ * raise
+ */
+ }
+
+ /* "aiohttp/_helpers.pyx":32
+ * if inst is None:
+ * return self
+ * raise # <<<<<<<<<<<<<<
+ *
+ * def __set__(self, inst, value):
+ */
+ __Pyx_GIVEREF(__pyx_t_10);
+ __Pyx_GIVEREF(__pyx_t_7);
+ __Pyx_XGIVEREF(__pyx_t_8);
+ __Pyx_ErrRestoreWithState(__pyx_t_10, __pyx_t_7, __pyx_t_8);
+ __pyx_t_10 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0;
+ __PYX_ERR(0, 32, __pyx_L5_except_error)
+ }
+ goto __pyx_L5_except_error;
+ __pyx_L5_except_error:;
+
+ /* "aiohttp/_helpers.pyx":22
+ *
+ * def __get__(self, inst, owner):
+ * try: # <<<<<<<<<<<<<<
+ * try:
+ * return inst._cache[self.name]
+ */
+ __Pyx_XGIVEREF(__pyx_t_1);
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
+ goto __pyx_L1_error;
+ __pyx_L7_try_return:;
+ __Pyx_XGIVEREF(__pyx_t_1);
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
+ goto __pyx_L0;
+ __pyx_L6_except_return:;
+ __Pyx_XGIVEREF(__pyx_t_1);
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
+ goto __pyx_L0;
+ }
+
+ /* "aiohttp/_helpers.pyx":21
+ * return self.wrapped.__doc__
+ *
+ * def __get__(self, inst, owner): # <<<<<<<<<<<<<<
+ * try:
+ * try:
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_XDECREF(__pyx_t_10);
+ __Pyx_XDECREF(__pyx_t_11);
+ __Pyx_XDECREF(__pyx_t_12);
+ __Pyx_XDECREF(__pyx_t_13);
+ __Pyx_AddTraceback("aiohttp._helpers.reify.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_val);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_helpers.pyx":34
+ * raise
+ *
+ * def __set__(self, inst, value): # <<<<<<<<<<<<<<
+ * raise AttributeError("reified property is read-only")
+ */
+
+/* Python wrapper */
+static int __pyx_pw_7aiohttp_8_helpers_5reify_5__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_inst, PyObject *__pyx_v_value); /*proto*/
+static int __pyx_pw_7aiohttp_8_helpers_5reify_5__set__(PyObject *__pyx_v_self, PyObject *__pyx_v_inst, PyObject *__pyx_v_value) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__set__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_8_helpers_5reify_4__set__(((struct __pyx_obj_7aiohttp_8_helpers_reify *)__pyx_v_self), ((PyObject *)__pyx_v_inst), ((PyObject *)__pyx_v_value));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_7aiohttp_8_helpers_5reify_4__set__(CYTHON_UNUSED struct __pyx_obj_7aiohttp_8_helpers_reify *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v_inst, CYTHON_UNUSED PyObject *__pyx_v_value) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__set__", 0);
+
+ /* "aiohttp/_helpers.pyx":35
+ *
+ * def __set__(self, inst, value):
+ * raise AttributeError("reified property is read-only") # <<<<<<<<<<<<<<
+ */
+ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_AttributeError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 35, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_Raise(__pyx_t_1, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __PYX_ERR(0, 35, __pyx_L1_error)
+
+ /* "aiohttp/_helpers.pyx":34
+ * raise
+ *
+ * def __set__(self, inst, value): # <<<<<<<<<<<<<<
+ * raise AttributeError("reified property is read-only")
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._helpers.reify.__set__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":1
+ * def __reduce_cython__(self): # <<<<<<<<<<<<<<
+ * cdef tuple state
+ * cdef object _dict
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_8_helpers_5reify_7__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_8_helpers_5reify_7__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_8_helpers_5reify_6__reduce_cython__(((struct __pyx_obj_7aiohttp_8_helpers_reify *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_8_helpers_5reify_6__reduce_cython__(struct __pyx_obj_7aiohttp_8_helpers_reify *__pyx_v_self) {
+ PyObject *__pyx_v_state = 0;
+ PyObject *__pyx_v__dict = 0;
+ int __pyx_v_use_setstate;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__reduce_cython__", 0);
+
+ /* "(tree fragment)":5
+ * cdef object _dict
+ * cdef bint use_setstate
+ * state = (self.name, self.wrapped) # <<<<<<<<<<<<<<
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None:
+ */
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_self->name);
+ __Pyx_GIVEREF(__pyx_v_self->name);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name);
+ __Pyx_INCREF(__pyx_v_self->wrapped);
+ __Pyx_GIVEREF(__pyx_v_self->wrapped);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_self->wrapped);
+ __pyx_v_state = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "(tree fragment)":6
+ * cdef bint use_setstate
+ * state = (self.name, self.wrapped)
+ * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
+ * if _dict is not None:
+ * state += (_dict,)
+ */
+ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_v__dict = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "(tree fragment)":7
+ * state = (self.name, self.wrapped)
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None: # <<<<<<<<<<<<<<
+ * state += (_dict,)
+ * use_setstate = True
+ */
+ __pyx_t_2 = (__pyx_v__dict != Py_None);
+ __pyx_t_3 = (__pyx_t_2 != 0);
+ if (__pyx_t_3) {
+
+ /* "(tree fragment)":8
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None:
+ * state += (_dict,) # <<<<<<<<<<<<<<
+ * use_setstate = True
+ * else:
+ */
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v__dict);
+ __Pyx_GIVEREF(__pyx_v__dict);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict);
+ __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
+ __pyx_t_4 = 0;
+
+ /* "(tree fragment)":9
+ * if _dict is not None:
+ * state += (_dict,)
+ * use_setstate = True # <<<<<<<<<<<<<<
+ * else:
+ * use_setstate = self.name is not None or self.wrapped is not None
+ */
+ __pyx_v_use_setstate = 1;
+
+ /* "(tree fragment)":7
+ * state = (self.name, self.wrapped)
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None: # <<<<<<<<<<<<<<
+ * state += (_dict,)
+ * use_setstate = True
+ */
+ goto __pyx_L3;
+ }
+
+ /* "(tree fragment)":11
+ * use_setstate = True
+ * else:
+ * use_setstate = self.name is not None or self.wrapped is not None # <<<<<<<<<<<<<<
+ * if use_setstate:
+ * return __pyx_unpickle_reify, (type(self), 0x770cb8f, None), state
+ */
+ /*else*/ {
+ __pyx_t_2 = (__pyx_v_self->name != Py_None);
+ __pyx_t_5 = (__pyx_t_2 != 0);
+ if (!__pyx_t_5) {
+ } else {
+ __pyx_t_3 = __pyx_t_5;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_5 = (__pyx_v_self->wrapped != Py_None);
+ __pyx_t_2 = (__pyx_t_5 != 0);
+ __pyx_t_3 = __pyx_t_2;
+ __pyx_L4_bool_binop_done:;
+ __pyx_v_use_setstate = __pyx_t_3;
+ }
+ __pyx_L3:;
+
+ /* "(tree fragment)":12
+ * else:
+ * use_setstate = self.name is not None or self.wrapped is not None
+ * if use_setstate: # <<<<<<<<<<<<<<
+ * return __pyx_unpickle_reify, (type(self), 0x770cb8f, None), state
+ * else:
+ */
+ __pyx_t_3 = (__pyx_v_use_setstate != 0);
+ if (__pyx_t_3) {
+
+ /* "(tree fragment)":13
+ * use_setstate = self.name is not None or self.wrapped is not None
+ * if use_setstate:
+ * return __pyx_unpickle_reify, (type(self), 0x770cb8f, None), state # <<<<<<<<<<<<<<
+ * else:
+ * return __pyx_unpickle_reify, (type(self), 0x770cb8f, state)
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_reify); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_INCREF(__pyx_int_124832655);
+ __Pyx_GIVEREF(__pyx_int_124832655);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_124832655);
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None);
+ __pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_1);
+ __Pyx_INCREF(__pyx_v_state);
+ __Pyx_GIVEREF(__pyx_v_state);
+ PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_v_state);
+ __pyx_t_4 = 0;
+ __pyx_t_1 = 0;
+ __pyx_r = __pyx_t_6;
+ __pyx_t_6 = 0;
+ goto __pyx_L0;
+
+ /* "(tree fragment)":12
+ * else:
+ * use_setstate = self.name is not None or self.wrapped is not None
+ * if use_setstate: # <<<<<<<<<<<<<<
+ * return __pyx_unpickle_reify, (type(self), 0x770cb8f, None), state
+ * else:
+ */
+ }
+
+ /* "(tree fragment)":15
+ * return __pyx_unpickle_reify, (type(self), 0x770cb8f, None), state
+ * else:
+ * return __pyx_unpickle_reify, (type(self), 0x770cb8f, state) # <<<<<<<<<<<<<<
+ * def __setstate_cython__(self, __pyx_state):
+ * __pyx_unpickle_reify__set_state(self, __pyx_state)
+ */
+ /*else*/ {
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_pyx_unpickle_reify); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_INCREF(__pyx_int_124832655);
+ __Pyx_GIVEREF(__pyx_int_124832655);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_124832655);
+ __Pyx_INCREF(__pyx_v_state);
+ __Pyx_GIVEREF(__pyx_v_state);
+ PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
+ __pyx_t_6 = 0;
+ __pyx_t_1 = 0;
+ __pyx_r = __pyx_t_4;
+ __pyx_t_4 = 0;
+ goto __pyx_L0;
+ }
+
+ /* "(tree fragment)":1
+ * def __reduce_cython__(self): # <<<<<<<<<<<<<<
+ * cdef tuple state
+ * cdef object _dict
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_AddTraceback("aiohttp._helpers.reify.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_state);
+ __Pyx_XDECREF(__pyx_v__dict);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":16
+ * else:
+ * return __pyx_unpickle_reify, (type(self), 0x770cb8f, state)
+ * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_unpickle_reify__set_state(self, __pyx_state)
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_8_helpers_5reify_9__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_8_helpers_5reify_9__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_8_helpers_5reify_8__setstate_cython__(((struct __pyx_obj_7aiohttp_8_helpers_reify *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_8_helpers_5reify_8__setstate_cython__(struct __pyx_obj_7aiohttp_8_helpers_reify *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__setstate_cython__", 0);
+
+ /* "(tree fragment)":17
+ * return __pyx_unpickle_reify, (type(self), 0x770cb8f, state)
+ * def __setstate_cython__(self, __pyx_state):
+ * __pyx_unpickle_reify__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
+ */
+ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
+ __pyx_t_1 = __pyx_f_7aiohttp_8_helpers___pyx_unpickle_reify__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "(tree fragment)":16
+ * else:
+ * return __pyx_unpickle_reify, (type(self), 0x770cb8f, state)
+ * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_unpickle_reify__set_state(self, __pyx_state)
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._helpers.reify.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":1
+ * def __pyx_unpickle_reify(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_8_helpers_1__pyx_unpickle_reify(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyMethodDef __pyx_mdef_7aiohttp_8_helpers_1__pyx_unpickle_reify = {"__pyx_unpickle_reify", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7aiohttp_8_helpers_1__pyx_unpickle_reify, METH_VARARGS|METH_KEYWORDS, 0};
+static PyObject *__pyx_pw_7aiohttp_8_helpers_1__pyx_unpickle_reify(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v___pyx_type = 0;
+ long __pyx_v___pyx_checksum;
+ PyObject *__pyx_v___pyx_state = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__pyx_unpickle_reify (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
+ PyObject* values[3] = {0,0,0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ CYTHON_FALLTHROUGH;
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ CYTHON_FALLTHROUGH;
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ CYTHON_FALLTHROUGH;
+ case 1:
+ if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_reify", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 2:
+ if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_reify", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_reify") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ }
+ __pyx_v___pyx_type = values[0];
+ __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
+ __pyx_v___pyx_state = values[2];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_reify", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("aiohttp._helpers.__pyx_unpickle_reify", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_r = __pyx_pf_7aiohttp_8_helpers___pyx_unpickle_reify(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_8_helpers___pyx_unpickle_reify(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_v___pyx_PickleError = 0;
+ PyObject *__pyx_v___pyx_result = 0;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ int __pyx_t_6;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__pyx_unpickle_reify", 0);
+
+ /* "(tree fragment)":4
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ * if __pyx_checksum != 0x770cb8f: # <<<<<<<<<<<<<<
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x770cb8f = (name, wrapped))" % __pyx_checksum)
+ */
+ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0x770cb8f) != 0);
+ if (__pyx_t_1) {
+
+ /* "(tree fragment)":5
+ * cdef object __pyx_result
+ * if __pyx_checksum != 0x770cb8f:
+ * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x770cb8f = (name, wrapped))" % __pyx_checksum)
+ * __pyx_result = reify.__new__(__pyx_type)
+ */
+ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_n_s_PickleError);
+ __Pyx_GIVEREF(__pyx_n_s_PickleError);
+ PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
+ __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_2);
+ __pyx_v___pyx_PickleError = __pyx_t_2;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "(tree fragment)":6
+ * if __pyx_checksum != 0x770cb8f:
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x770cb8f = (name, wrapped))" % __pyx_checksum) # <<<<<<<<<<<<<<
+ * __pyx_result = reify.__new__(__pyx_type)
+ * if __pyx_state is not None:
+ */
+ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0x77, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_INCREF(__pyx_v___pyx_PickleError);
+ __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_5)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __PYX_ERR(1, 6, __pyx_L1_error)
+
+ /* "(tree fragment)":4
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ * if __pyx_checksum != 0x770cb8f: # <<<<<<<<<<<<<<
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x770cb8f = (name, wrapped))" % __pyx_checksum)
+ */
+ }
+
+ /* "(tree fragment)":7
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x770cb8f = (name, wrapped))" % __pyx_checksum)
+ * __pyx_result = reify.__new__(__pyx_type) # <<<<<<<<<<<<<<
+ * if __pyx_state is not None:
+ * __pyx_unpickle_reify__set_state(<reify> __pyx_result, __pyx_state)
+ */
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_ptype_7aiohttp_8_helpers_reify), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_4)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v___pyx_result = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "(tree fragment)":8
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x770cb8f = (name, wrapped))" % __pyx_checksum)
+ * __pyx_result = reify.__new__(__pyx_type)
+ * if __pyx_state is not None: # <<<<<<<<<<<<<<
+ * __pyx_unpickle_reify__set_state(<reify> __pyx_result, __pyx_state)
+ * return __pyx_result
+ */
+ __pyx_t_1 = (__pyx_v___pyx_state != Py_None);
+ __pyx_t_6 = (__pyx_t_1 != 0);
+ if (__pyx_t_6) {
+
+ /* "(tree fragment)":9
+ * __pyx_result = reify.__new__(__pyx_type)
+ * if __pyx_state is not None:
+ * __pyx_unpickle_reify__set_state(<reify> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
+ * return __pyx_result
+ * cdef __pyx_unpickle_reify__set_state(reify __pyx_result, tuple __pyx_state):
+ */
+ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
+ __pyx_t_3 = __pyx_f_7aiohttp_8_helpers___pyx_unpickle_reify__set_state(((struct __pyx_obj_7aiohttp_8_helpers_reify *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "(tree fragment)":8
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x770cb8f = (name, wrapped))" % __pyx_checksum)
+ * __pyx_result = reify.__new__(__pyx_type)
+ * if __pyx_state is not None: # <<<<<<<<<<<<<<
+ * __pyx_unpickle_reify__set_state(<reify> __pyx_result, __pyx_state)
+ * return __pyx_result
+ */
+ }
+
+ /* "(tree fragment)":10
+ * if __pyx_state is not None:
+ * __pyx_unpickle_reify__set_state(<reify> __pyx_result, __pyx_state)
+ * return __pyx_result # <<<<<<<<<<<<<<
+ * cdef __pyx_unpickle_reify__set_state(reify __pyx_result, tuple __pyx_state):
+ * __pyx_result.name = __pyx_state[0]; __pyx_result.wrapped = __pyx_state[1]
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v___pyx_result);
+ __pyx_r = __pyx_v___pyx_result;
+ goto __pyx_L0;
+
+ /* "(tree fragment)":1
+ * def __pyx_unpickle_reify(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_AddTraceback("aiohttp._helpers.__pyx_unpickle_reify", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v___pyx_PickleError);
+ __Pyx_XDECREF(__pyx_v___pyx_result);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":11
+ * __pyx_unpickle_reify__set_state(<reify> __pyx_result, __pyx_state)
+ * return __pyx_result
+ * cdef __pyx_unpickle_reify__set_state(reify __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_result.name = __pyx_state[0]; __pyx_result.wrapped = __pyx_state[1]
+ * if len(__pyx_state) > 2 and hasattr(__pyx_result, '__dict__'):
+ */
+
+static PyObject *__pyx_f_7aiohttp_8_helpers___pyx_unpickle_reify__set_state(struct __pyx_obj_7aiohttp_8_helpers_reify *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ Py_ssize_t __pyx_t_3;
+ int __pyx_t_4;
+ int __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *__pyx_t_8 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__pyx_unpickle_reify__set_state", 0);
+
+ /* "(tree fragment)":12
+ * return __pyx_result
+ * cdef __pyx_unpickle_reify__set_state(reify __pyx_result, tuple __pyx_state):
+ * __pyx_result.name = __pyx_state[0]; __pyx_result.wrapped = __pyx_state[1] # <<<<<<<<<<<<<<
+ * if len(__pyx_state) > 2 and hasattr(__pyx_result, '__dict__'):
+ * __pyx_result.__dict__.update(__pyx_state[2])
+ */
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->name);
+ __Pyx_DECREF(__pyx_v___pyx_result->name);
+ __pyx_v___pyx_result->name = __pyx_t_1;
+ __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->wrapped);
+ __Pyx_DECREF(__pyx_v___pyx_result->wrapped);
+ __pyx_v___pyx_result->wrapped = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "(tree fragment)":13
+ * cdef __pyx_unpickle_reify__set_state(reify __pyx_result, tuple __pyx_state):
+ * __pyx_result.name = __pyx_state[0]; __pyx_result.wrapped = __pyx_state[1]
+ * if len(__pyx_state) > 2 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
+ * __pyx_result.__dict__.update(__pyx_state[2])
+ */
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
+ __PYX_ERR(1, 13, __pyx_L1_error)
+ }
+ __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
+ __pyx_t_4 = ((__pyx_t_3 > 2) != 0);
+ if (__pyx_t_4) {
+ } else {
+ __pyx_t_2 = __pyx_t_4;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
+ __pyx_t_5 = (__pyx_t_4 != 0);
+ __pyx_t_2 = __pyx_t_5;
+ __pyx_L4_bool_binop_done:;
+ if (__pyx_t_2) {
+
+ /* "(tree fragment)":14
+ * __pyx_result.name = __pyx_state[0]; __pyx_result.wrapped = __pyx_state[1]
+ * if len(__pyx_state) > 2 and hasattr(__pyx_result, '__dict__'):
+ * __pyx_result.__dict__.update(__pyx_state[2]) # <<<<<<<<<<<<<<
+ */
+ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 14, __pyx_L1_error)
+ }
+ __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_8 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
+ __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
+ if (likely(__pyx_t_8)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
+ __Pyx_INCREF(__pyx_t_8);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_7, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "(tree fragment)":13
+ * cdef __pyx_unpickle_reify__set_state(reify __pyx_result, tuple __pyx_state):
+ * __pyx_result.name = __pyx_state[0]; __pyx_result.wrapped = __pyx_state[1]
+ * if len(__pyx_state) > 2 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
+ * __pyx_result.__dict__.update(__pyx_state[2])
+ */
+ }
+
+ /* "(tree fragment)":11
+ * __pyx_unpickle_reify__set_state(<reify> __pyx_result, __pyx_state)
+ * return __pyx_result
+ * cdef __pyx_unpickle_reify__set_state(reify __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_result.name = __pyx_state[0]; __pyx_result.wrapped = __pyx_state[1]
+ * if len(__pyx_state) > 2 and hasattr(__pyx_result, '__dict__'):
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_AddTraceback("aiohttp._helpers.__pyx_unpickle_reify__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_tp_new_7aiohttp_8_helpers_reify(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
+ struct __pyx_obj_7aiohttp_8_helpers_reify *p;
+ PyObject *o;
+ if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
+ o = (*t->tp_alloc)(t, 0);
+ } else {
+ o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
+ }
+ if (unlikely(!o)) return 0;
+ p = ((struct __pyx_obj_7aiohttp_8_helpers_reify *)o);
+ p->wrapped = Py_None; Py_INCREF(Py_None);
+ p->name = Py_None; Py_INCREF(Py_None);
+ return o;
+}
+
+static void __pyx_tp_dealloc_7aiohttp_8_helpers_reify(PyObject *o) {
+ struct __pyx_obj_7aiohttp_8_helpers_reify *p = (struct __pyx_obj_7aiohttp_8_helpers_reify *)o;
+ #if CYTHON_USE_TP_FINALIZE
+ if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
+ if (PyObject_CallFinalizerFromDealloc(o)) return;
+ }
+ #endif
+ PyObject_GC_UnTrack(o);
+ Py_CLEAR(p->wrapped);
+ Py_CLEAR(p->name);
+ (*Py_TYPE(o)->tp_free)(o);
+}
+
+static int __pyx_tp_traverse_7aiohttp_8_helpers_reify(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_7aiohttp_8_helpers_reify *p = (struct __pyx_obj_7aiohttp_8_helpers_reify *)o;
+ if (p->wrapped) {
+ e = (*v)(p->wrapped, a); if (e) return e;
+ }
+ if (p->name) {
+ e = (*v)(p->name, a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_7aiohttp_8_helpers_reify(PyObject *o) {
+ PyObject* tmp;
+ struct __pyx_obj_7aiohttp_8_helpers_reify *p = (struct __pyx_obj_7aiohttp_8_helpers_reify *)o;
+ tmp = ((PyObject*)p->wrapped);
+ p->wrapped = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->name);
+ p->name = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ return 0;
+}
+
+static PyObject *__pyx_tp_descr_get_7aiohttp_8_helpers_reify(PyObject *o, PyObject *i, PyObject *c) {
+ PyObject *r = 0;
+ if (!i) i = Py_None;
+ if (!c) c = Py_None;
+ r = __pyx_pw_7aiohttp_8_helpers_5reify_3__get__(o, i, c);
+ return r;
+}
+
+static int __pyx_tp_descr_set_7aiohttp_8_helpers_reify(PyObject *o, PyObject *i, PyObject *v) {
+ if (v) {
+ return __pyx_pw_7aiohttp_8_helpers_5reify_5__set__(o, i, v);
+ }
+ else {
+ PyErr_SetString(PyExc_NotImplementedError, "__delete__");
+ return -1;
+ }
+}
+
+static PyObject *__pyx_getprop_7aiohttp_8_helpers_5reify___doc__(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_8_helpers_5reify_7__doc___1__get__(o);
+}
+
+static PyMethodDef __pyx_methods_7aiohttp_8_helpers_reify[] = {
+ {"__reduce_cython__", (PyCFunction)__pyx_pw_7aiohttp_8_helpers_5reify_7__reduce_cython__, METH_NOARGS, 0},
+ {"__setstate_cython__", (PyCFunction)__pyx_pw_7aiohttp_8_helpers_5reify_9__setstate_cython__, METH_O, 0},
+ {0, 0, 0, 0}
+};
+
+static struct PyGetSetDef __pyx_getsets_7aiohttp_8_helpers_reify[] = {
+ {(char *)"__doc__", __pyx_getprop_7aiohttp_8_helpers_5reify___doc__, 0, (char *)0, 0},
+ {0, 0, 0, 0, 0}
+};
+
+static PyTypeObject __pyx_type_7aiohttp_8_helpers_reify = {
+ PyVarObject_HEAD_INIT(0, 0)
+ "aiohttp._helpers.reify", /*tp_name*/
+ sizeof(struct __pyx_obj_7aiohttp_8_helpers_reify), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_7aiohttp_8_helpers_reify, /*tp_dealloc*/
+ #if PY_VERSION_HEX < 0x030800b4
+ 0, /*tp_print*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4
+ 0, /*tp_vectorcall_offset*/
+ #endif
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #endif
+ #if PY_MAJOR_VERSION >= 3
+ 0, /*tp_as_async*/
+ #endif
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ "Use as a class method decorator. It operates almost exactly like\n the Python `@property` decorator, but it puts the result of the\n method it decorates into the instance dict after the first call,\n effectively replacing the function it decorates with an instance\n variable. It is, in Python parlance, a data descriptor.\n\n ", /*tp_doc*/
+ __pyx_tp_traverse_7aiohttp_8_helpers_reify, /*tp_traverse*/
+ __pyx_tp_clear_7aiohttp_8_helpers_reify, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_7aiohttp_8_helpers_reify, /*tp_methods*/
+ 0, /*tp_members*/
+ __pyx_getsets_7aiohttp_8_helpers_reify, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ __pyx_tp_descr_get_7aiohttp_8_helpers_reify, /*tp_descr_get*/
+ __pyx_tp_descr_set_7aiohttp_8_helpers_reify, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_pw_7aiohttp_8_helpers_5reify_1__init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_7aiohttp_8_helpers_reify, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ 0, /*tp_version_tag*/
+ #if PY_VERSION_HEX >= 0x030400a1
+ 0, /*tp_finalize*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b1
+ 0, /*tp_vectorcall*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
+ 0, /*tp_print*/
+ #endif
+};
+
+static PyMethodDef __pyx_methods[] = {
+ {0, 0, 0, 0}
+};
+
+#if PY_MAJOR_VERSION >= 3
+#if CYTHON_PEP489_MULTI_PHASE_INIT
+static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
+static int __pyx_pymod_exec__helpers(PyObject* module); /*proto*/
+static PyModuleDef_Slot __pyx_moduledef_slots[] = {
+ {Py_mod_create, (void*)__pyx_pymod_create},
+ {Py_mod_exec, (void*)__pyx_pymod_exec__helpers},
+ {0, NULL}
+};
+#endif
+
+static struct PyModuleDef __pyx_moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "_helpers",
+ 0, /* m_doc */
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ 0, /* m_size */
+ #else
+ -1, /* m_size */
+ #endif
+ __pyx_methods /* m_methods */,
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ __pyx_moduledef_slots, /* m_slots */
+ #else
+ NULL, /* m_reload */
+ #endif
+ NULL, /* m_traverse */
+ NULL, /* m_clear */
+ NULL /* m_free */
+};
+#endif
+#ifndef CYTHON_SMALL_CODE
+#if defined(__clang__)
+ #define CYTHON_SMALL_CODE
+#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
+ #define CYTHON_SMALL_CODE __attribute__((cold))
+#else
+ #define CYTHON_SMALL_CODE
+#endif
+#endif
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_n_s_AttributeError, __pyx_k_AttributeError, sizeof(__pyx_k_AttributeError), 0, 0, 1, 1},
+ {&__pyx_kp_s_Incompatible_checksums_s_vs_0x77, __pyx_k_Incompatible_checksums_s_vs_0x77, sizeof(__pyx_k_Incompatible_checksums_s_vs_0x77), 0, 0, 1, 0},
+ {&__pyx_n_s_KeyError, __pyx_k_KeyError, sizeof(__pyx_k_KeyError), 0, 0, 1, 1},
+ {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1},
+ {&__pyx_n_s_aiohttp__helpers, __pyx_k_aiohttp__helpers, sizeof(__pyx_k_aiohttp__helpers), 0, 0, 1, 1},
+ {&__pyx_n_s_cache, __pyx_k_cache, sizeof(__pyx_k_cache), 0, 0, 1, 1},
+ {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
+ {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1},
+ {&__pyx_n_s_doc, __pyx_k_doc, sizeof(__pyx_k_doc), 0, 0, 1, 1},
+ {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1},
+ {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
+ {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
+ {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
+ {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1},
+ {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_unpickle_reify, __pyx_k_pyx_unpickle_reify, sizeof(__pyx_k_pyx_unpickle_reify), 0, 0, 1, 1},
+ {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1},
+ {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1},
+ {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1},
+ {&__pyx_kp_u_reified_property_is_read_only, __pyx_k_reified_property_is_read_only, sizeof(__pyx_k_reified_property_is_read_only), 0, 1, 0, 0},
+ {&__pyx_n_s_reify, __pyx_k_reify, sizeof(__pyx_k_reify), 0, 0, 1, 1},
+ {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1},
+ {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1},
+ {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0},
+ {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
+ {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1},
+ {&__pyx_n_s_wrapped, __pyx_k_wrapped, sizeof(__pyx_k_wrapped), 0, 0, 1, 1},
+ {0, 0, 0, 0, 0, 0, 0}
+};
+static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
+ __pyx_builtin_KeyError = __Pyx_GetBuiltinName(__pyx_n_s_KeyError); if (!__pyx_builtin_KeyError) __PYX_ERR(0, 25, __pyx_L1_error)
+ __pyx_builtin_AttributeError = __Pyx_GetBuiltinName(__pyx_n_s_AttributeError); if (!__pyx_builtin_AttributeError) __PYX_ERR(0, 29, __pyx_L1_error)
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
+
+ /* "aiohttp/_helpers.pyx":35
+ *
+ * def __set__(self, inst, value):
+ * raise AttributeError("reified property is read-only") # <<<<<<<<<<<<<<
+ */
+ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_reified_property_is_read_only); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 35, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_tuple_);
+ __Pyx_GIVEREF(__pyx_tuple_);
+
+ /* "(tree fragment)":1
+ * def __pyx_unpickle_reify(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ */
+ __pyx_tuple__2 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_tuple__2);
+ __Pyx_GIVEREF(__pyx_tuple__2);
+ __pyx_codeobj__3 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__2, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_reify, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__3)) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_RefNannyFinishContext();
+ return 0;
+ __pyx_L1_error:;
+ __Pyx_RefNannyFinishContext();
+ return -1;
+}
+
+static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
+ __pyx_int_124832655 = PyInt_FromLong(124832655L); if (unlikely(!__pyx_int_124832655)) __PYX_ERR(0, 1, __pyx_L1_error)
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
+
+static int __Pyx_modinit_global_init_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
+ /*--- Global init code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_variable_export_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
+ /*--- Variable export code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_function_export_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
+ /*--- Function export code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_type_init_code(void) {
+ __Pyx_RefNannyDeclarations
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
+ /*--- Type init code ---*/
+ if (PyType_Ready(&__pyx_type_7aiohttp_8_helpers_reify) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #if PY_VERSION_HEX < 0x030800B1
+ __pyx_type_7aiohttp_8_helpers_reify.tp_print = 0;
+ #endif
+ if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_7aiohttp_8_helpers_reify.tp_dictoffset && __pyx_type_7aiohttp_8_helpers_reify.tp_getattro == PyObject_GenericGetAttr)) {
+ __pyx_type_7aiohttp_8_helpers_reify.tp_getattro = __Pyx_PyObject_GenericGetAttr;
+ }
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s_reify, (PyObject *)&__pyx_type_7aiohttp_8_helpers_reify) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ if (__Pyx_setup_reduce((PyObject*)&__pyx_type_7aiohttp_8_helpers_reify) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_ptype_7aiohttp_8_helpers_reify = &__pyx_type_7aiohttp_8_helpers_reify;
+ __Pyx_RefNannyFinishContext();
+ return 0;
+ __pyx_L1_error:;
+ __Pyx_RefNannyFinishContext();
+ return -1;
+}
+
+static int __Pyx_modinit_type_import_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
+ /*--- Type import code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_variable_import_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
+ /*--- Variable import code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_function_import_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
+ /*--- Function import code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+
+#ifndef CYTHON_NO_PYINIT_EXPORT
+#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
+#elif PY_MAJOR_VERSION < 3
+#ifdef __cplusplus
+#define __Pyx_PyMODINIT_FUNC extern "C" void
+#else
+#define __Pyx_PyMODINIT_FUNC void
+#endif
+#else
+#ifdef __cplusplus
+#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
+#else
+#define __Pyx_PyMODINIT_FUNC PyObject *
+#endif
+#endif
+
+
+#if PY_MAJOR_VERSION < 3
+__Pyx_PyMODINIT_FUNC init_helpers(void) CYTHON_SMALL_CODE; /*proto*/
+__Pyx_PyMODINIT_FUNC init_helpers(void)
+#else
+__Pyx_PyMODINIT_FUNC PyInit__helpers(void) CYTHON_SMALL_CODE; /*proto*/
+__Pyx_PyMODINIT_FUNC PyInit__helpers(void)
+#if CYTHON_PEP489_MULTI_PHASE_INIT
+{
+ return PyModuleDef_Init(&__pyx_moduledef);
+}
+static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
+ #if PY_VERSION_HEX >= 0x030700A1
+ static PY_INT64_T main_interpreter_id = -1;
+ PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
+ if (main_interpreter_id == -1) {
+ main_interpreter_id = current_id;
+ return (unlikely(current_id == -1)) ? -1 : 0;
+ } else if (unlikely(main_interpreter_id != current_id))
+ #else
+ static PyInterpreterState *main_interpreter = NULL;
+ PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
+ if (!main_interpreter) {
+ main_interpreter = current_interpreter;
+ } else if (unlikely(main_interpreter != current_interpreter))
+ #endif
+ {
+ PyErr_SetString(
+ PyExc_ImportError,
+ "Interpreter change detected - this module can only be loaded into one interpreter per process.");
+ return -1;
+ }
+ return 0;
+}
+static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
+ PyObject *value = PyObject_GetAttrString(spec, from_name);
+ int result = 0;
+ if (likely(value)) {
+ if (allow_none || value != Py_None) {
+ result = PyDict_SetItemString(moddict, to_name, value);
+ }
+ Py_DECREF(value);
+ } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
+ PyErr_Clear();
+ } else {
+ result = -1;
+ }
+ return result;
+}
+static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
+ PyObject *module = NULL, *moddict, *modname;
+ if (__Pyx_check_single_interpreter())
+ return NULL;
+ if (__pyx_m)
+ return __Pyx_NewRef(__pyx_m);
+ modname = PyObject_GetAttrString(spec, "name");
+ if (unlikely(!modname)) goto bad;
+ module = PyModule_NewObject(modname);
+ Py_DECREF(modname);
+ if (unlikely(!module)) goto bad;
+ moddict = PyModule_GetDict(module);
+ if (unlikely(!moddict)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
+ return module;
+bad:
+ Py_XDECREF(module);
+ return NULL;
+}
+
+
+static CYTHON_SMALL_CODE int __pyx_pymod_exec__helpers(PyObject *__pyx_pyinit_module)
+#endif
+#endif
+{
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannyDeclarations
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ if (__pyx_m) {
+ if (__pyx_m == __pyx_pyinit_module) return 0;
+ PyErr_SetString(PyExc_RuntimeError, "Module '_helpers' has already been imported. Re-initialisation is not supported.");
+ return -1;
+ }
+ #elif PY_MAJOR_VERSION >= 3
+ if (__pyx_m) return __Pyx_NewRef(__pyx_m);
+ #endif
+ #if CYTHON_REFNANNY
+__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
+if (!__Pyx_RefNanny) {
+ PyErr_Clear();
+ __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
+ if (!__Pyx_RefNanny)
+ Py_FatalError("failed to import 'refnanny' module");
+}
+#endif
+ __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit__helpers(void)", 0);
+ if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #ifdef __Pxy_PyFrame_Initialize_Offsets
+ __Pxy_PyFrame_Initialize_Offsets();
+ #endif
+ __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
+ #ifdef __Pyx_CyFunction_USED
+ if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_FusedFunction_USED
+ if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_Coroutine_USED
+ if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_Generator_USED
+ if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_AsyncGen_USED
+ if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_StopAsyncIteration_USED
+ if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ /*--- Library function declarations ---*/
+ /*--- Threads initialization code ---*/
+ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
+ #ifdef WITH_THREAD /* Python build with threading support? */
+ PyEval_InitThreads();
+ #endif
+ #endif
+ /*--- Module creation code ---*/
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ __pyx_m = __pyx_pyinit_module;
+ Py_INCREF(__pyx_m);
+ #else
+ #if PY_MAJOR_VERSION < 3
+ __pyx_m = Py_InitModule4("_helpers", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
+ #else
+ __pyx_m = PyModule_Create(&__pyx_moduledef);
+ #endif
+ if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
+ Py_INCREF(__pyx_d);
+ __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
+ Py_INCREF(__pyx_b);
+ __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
+ Py_INCREF(__pyx_cython_runtime);
+ if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
+ /*--- Initialize various global constants etc. ---*/
+ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
+ if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ if (__pyx_module_is_main_aiohttp___helpers) {
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ }
+ #if PY_MAJOR_VERSION >= 3
+ {
+ PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
+ if (!PyDict_GetItemString(modules, "aiohttp._helpers")) {
+ if (unlikely(PyDict_SetItemString(modules, "aiohttp._helpers", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
+ }
+ }
+ #endif
+ /*--- Builtin init code ---*/
+ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ /*--- Constants init code ---*/
+ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ /*--- Global type/function init code ---*/
+ (void)__Pyx_modinit_global_init_code();
+ (void)__Pyx_modinit_variable_export_code();
+ (void)__Pyx_modinit_function_export_code();
+ if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
+ (void)__Pyx_modinit_type_import_code();
+ (void)__Pyx_modinit_variable_import_code();
+ (void)__Pyx_modinit_function_import_code();
+ /*--- Execution code ---*/
+ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
+ if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+
+ /* "(tree fragment)":1
+ * def __pyx_unpickle_reify(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ */
+ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7aiohttp_8_helpers_1__pyx_unpickle_reify, NULL, __pyx_n_s_aiohttp__helpers); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_reify, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_helpers.pyx":1
+ * cdef class reify: # <<<<<<<<<<<<<<
+ * """Use as a class method decorator. It operates almost exactly like
+ * the Python `@property` decorator, but it puts the result of the
+ */
+ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /*--- Wrapped vars code ---*/
+
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ if (__pyx_m) {
+ if (__pyx_d) {
+ __Pyx_AddTraceback("init aiohttp._helpers", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ }
+ Py_CLEAR(__pyx_m);
+ } else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_ImportError, "init aiohttp._helpers");
+ }
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ return (__pyx_m != NULL) ? 0 : -1;
+ #elif PY_MAJOR_VERSION >= 3
+ return __pyx_m;
+ #else
+ return;
+ #endif
+}
+
+/* --- Runtime support code --- */
+/* Refnanny */
+#if CYTHON_REFNANNY
+static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
+ PyObject *m = NULL, *p = NULL;
+ void *r = NULL;
+ m = PyImport_ImportModule(modname);
+ if (!m) goto end;
+ p = PyObject_GetAttrString(m, "RefNannyAPI");
+ if (!p) goto end;
+ r = PyLong_AsVoidPtr(p);
+end:
+ Py_XDECREF(p);
+ Py_XDECREF(m);
+ return (__Pyx_RefNannyAPIStruct *)r;
+}
+#endif
+
+/* PyObjectGetAttrStr */
+#if CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
+ PyTypeObject* tp = Py_TYPE(obj);
+ if (likely(tp->tp_getattro))
+ return tp->tp_getattro(obj, attr_name);
+#if PY_MAJOR_VERSION < 3
+ if (likely(tp->tp_getattr))
+ return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
+#endif
+ return PyObject_GetAttr(obj, attr_name);
+}
+#endif
+
+/* GetBuiltinName */
+static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
+ PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
+ if (unlikely(!result)) {
+ PyErr_Format(PyExc_NameError,
+#if PY_MAJOR_VERSION >= 3
+ "name '%U' is not defined", name);
+#else
+ "name '%.200s' is not defined", PyString_AS_STRING(name));
+#endif
+ }
+ return result;
+}
+
+/* RaiseDoubleKeywords */
+static void __Pyx_RaiseDoubleKeywordsError(
+ const char* func_name,
+ PyObject* kw_name)
+{
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION >= 3
+ "%s() got multiple values for keyword argument '%U'", func_name, kw_name);
+ #else
+ "%s() got multiple values for keyword argument '%s'", func_name,
+ PyString_AsString(kw_name));
+ #endif
+}
+
+/* ParseKeywords */
+static int __Pyx_ParseOptionalKeywords(
+ PyObject *kwds,
+ PyObject **argnames[],
+ PyObject *kwds2,
+ PyObject *values[],
+ Py_ssize_t num_pos_args,
+ const char* function_name)
+{
+ PyObject *key = 0, *value = 0;
+ Py_ssize_t pos = 0;
+ PyObject*** name;
+ PyObject*** first_kw_arg = argnames + num_pos_args;
+ while (PyDict_Next(kwds, &pos, &key, &value)) {
+ name = first_kw_arg;
+ while (*name && (**name != key)) name++;
+ if (*name) {
+ values[name-argnames] = value;
+ continue;
+ }
+ name = first_kw_arg;
+ #if PY_MAJOR_VERSION < 3
+ if (likely(PyString_Check(key))) {
+ while (*name) {
+ if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
+ && _PyString_Eq(**name, key)) {
+ values[name-argnames] = value;
+ break;
+ }
+ name++;
+ }
+ if (*name) continue;
+ else {
+ PyObject*** argname = argnames;
+ while (argname != first_kw_arg) {
+ if ((**argname == key) || (
+ (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
+ && _PyString_Eq(**argname, key))) {
+ goto arg_passed_twice;
+ }
+ argname++;
+ }
+ }
+ } else
+ #endif
+ if (likely(PyUnicode_Check(key))) {
+ while (*name) {
+ int cmp = (**name == key) ? 0 :
+ #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
+ (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
+ #endif
+ PyUnicode_Compare(**name, key);
+ if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
+ if (cmp == 0) {
+ values[name-argnames] = value;
+ break;
+ }
+ name++;
+ }
+ if (*name) continue;
+ else {
+ PyObject*** argname = argnames;
+ while (argname != first_kw_arg) {
+ int cmp = (**argname == key) ? 0 :
+ #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
+ (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
+ #endif
+ PyUnicode_Compare(**argname, key);
+ if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
+ if (cmp == 0) goto arg_passed_twice;
+ argname++;
+ }
+ }
+ } else
+ goto invalid_keyword_type;
+ if (kwds2) {
+ if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
+ } else {
+ goto invalid_keyword;
+ }
+ }
+ return 0;
+arg_passed_twice:
+ __Pyx_RaiseDoubleKeywordsError(function_name, key);
+ goto bad;
+invalid_keyword_type:
+ PyErr_Format(PyExc_TypeError,
+ "%.200s() keywords must be strings", function_name);
+ goto bad;
+invalid_keyword:
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION < 3
+ "%.200s() got an unexpected keyword argument '%.200s'",
+ function_name, PyString_AsString(key));
+ #else
+ "%s() got an unexpected keyword argument '%U'",
+ function_name, key);
+ #endif
+bad:
+ return -1;
+}
+
+/* RaiseArgTupleInvalid */
+static void __Pyx_RaiseArgtupleInvalid(
+ const char* func_name,
+ int exact,
+ Py_ssize_t num_min,
+ Py_ssize_t num_max,
+ Py_ssize_t num_found)
+{
+ Py_ssize_t num_expected;
+ const char *more_or_less;
+ if (num_found < num_min) {
+ num_expected = num_min;
+ more_or_less = "at least";
+ } else {
+ num_expected = num_max;
+ more_or_less = "at most";
+ }
+ if (exact) {
+ more_or_less = "exactly";
+ }
+ PyErr_Format(PyExc_TypeError,
+ "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
+ func_name, more_or_less, num_expected,
+ (num_expected == 1) ? "" : "s", num_found);
+}
+
+/* GetItemInt */
+static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
+ PyObject *r;
+ if (!j) return NULL;
+ r = PyObject_GetItem(o, j);
+ Py_DECREF(j);
+ return r;
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
+ CYTHON_NCP_UNUSED int wraparound,
+ CYTHON_NCP_UNUSED int boundscheck) {
+#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ Py_ssize_t wrapped_i = i;
+ if (wraparound & unlikely(i < 0)) {
+ wrapped_i += PyList_GET_SIZE(o);
+ }
+ if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
+ PyObject *r = PyList_GET_ITEM(o, wrapped_i);
+ Py_INCREF(r);
+ return r;
+ }
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+#else
+ return PySequence_GetItem(o, i);
+#endif
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
+ CYTHON_NCP_UNUSED int wraparound,
+ CYTHON_NCP_UNUSED int boundscheck) {
+#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ Py_ssize_t wrapped_i = i;
+ if (wraparound & unlikely(i < 0)) {
+ wrapped_i += PyTuple_GET_SIZE(o);
+ }
+ if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
+ PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
+ Py_INCREF(r);
+ return r;
+ }
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+#else
+ return PySequence_GetItem(o, i);
+#endif
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
+ CYTHON_NCP_UNUSED int wraparound,
+ CYTHON_NCP_UNUSED int boundscheck) {
+#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
+ if (is_list || PyList_CheckExact(o)) {
+ Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
+ if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
+ PyObject *r = PyList_GET_ITEM(o, n);
+ Py_INCREF(r);
+ return r;
+ }
+ }
+ else if (PyTuple_CheckExact(o)) {
+ Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
+ if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
+ PyObject *r = PyTuple_GET_ITEM(o, n);
+ Py_INCREF(r);
+ return r;
+ }
+ } else {
+ PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
+ if (likely(m && m->sq_item)) {
+ if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
+ Py_ssize_t l = m->sq_length(o);
+ if (likely(l >= 0)) {
+ i += l;
+ } else {
+ if (!PyErr_ExceptionMatches(PyExc_OverflowError))
+ return NULL;
+ PyErr_Clear();
+ }
+ }
+ return m->sq_item(o, i);
+ }
+ }
+#else
+ if (is_list || PySequence_Check(o)) {
+ return PySequence_GetItem(o, i);
+ }
+#endif
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+}
+
+/* ObjectGetItem */
+#if CYTHON_USE_TYPE_SLOTS
+static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
+ PyObject *runerr;
+ Py_ssize_t key_value;
+ PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
+ if (unlikely(!(m && m->sq_item))) {
+ PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
+ return NULL;
+ }
+ key_value = __Pyx_PyIndex_AsSsize_t(index);
+ if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
+ return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
+ }
+ if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
+ PyErr_Clear();
+ PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
+ }
+ return NULL;
+}
+static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
+ PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
+ if (likely(m && m->mp_subscript)) {
+ return m->mp_subscript(obj, key);
+ }
+ return __Pyx_PyObject_GetIndex(obj, key);
+}
+#endif
+
+/* GetTopmostException */
+#if CYTHON_USE_EXC_INFO_STACK
+static _PyErr_StackItem *
+__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
+{
+ _PyErr_StackItem *exc_info = tstate->exc_info;
+ while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
+ exc_info->previous_item != NULL)
+ {
+ exc_info = exc_info->previous_item;
+ }
+ return exc_info;
+}
+#endif
+
+/* SaveResetException */
+#if CYTHON_FAST_THREAD_STATE
+static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
+ #if CYTHON_USE_EXC_INFO_STACK
+ _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
+ *type = exc_info->exc_type;
+ *value = exc_info->exc_value;
+ *tb = exc_info->exc_traceback;
+ #else
+ *type = tstate->exc_type;
+ *value = tstate->exc_value;
+ *tb = tstate->exc_traceback;
+ #endif
+ Py_XINCREF(*type);
+ Py_XINCREF(*value);
+ Py_XINCREF(*tb);
+}
+static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ #if CYTHON_USE_EXC_INFO_STACK
+ _PyErr_StackItem *exc_info = tstate->exc_info;
+ tmp_type = exc_info->exc_type;
+ tmp_value = exc_info->exc_value;
+ tmp_tb = exc_info->exc_traceback;
+ exc_info->exc_type = type;
+ exc_info->exc_value = value;
+ exc_info->exc_traceback = tb;
+ #else
+ tmp_type = tstate->exc_type;
+ tmp_value = tstate->exc_value;
+ tmp_tb = tstate->exc_traceback;
+ tstate->exc_type = type;
+ tstate->exc_value = value;
+ tstate->exc_traceback = tb;
+ #endif
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+}
+#endif
+
+/* PyErrExceptionMatches */
+#if CYTHON_FAST_THREAD_STATE
+static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
+ Py_ssize_t i, n;
+ n = PyTuple_GET_SIZE(tuple);
+#if PY_MAJOR_VERSION >= 3
+ for (i=0; i<n; i++) {
+ if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
+ }
+#endif
+ for (i=0; i<n; i++) {
+ if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
+ }
+ return 0;
+}
+static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
+ PyObject *exc_type = tstate->curexc_type;
+ if (exc_type == err) return 1;
+ if (unlikely(!exc_type)) return 0;
+ if (unlikely(PyTuple_Check(err)))
+ return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
+ return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
+}
+#endif
+
+/* GetException */
+#if CYTHON_FAST_THREAD_STATE
+static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
+#else
+static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
+#endif
+{
+ PyObject *local_type, *local_value, *local_tb;
+#if CYTHON_FAST_THREAD_STATE
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ local_type = tstate->curexc_type;
+ local_value = tstate->curexc_value;
+ local_tb = tstate->curexc_traceback;
+ tstate->curexc_type = 0;
+ tstate->curexc_value = 0;
+ tstate->curexc_traceback = 0;
+#else
+ PyErr_Fetch(&local_type, &local_value, &local_tb);
+#endif
+ PyErr_NormalizeException(&local_type, &local_value, &local_tb);
+#if CYTHON_FAST_THREAD_STATE
+ if (unlikely(tstate->curexc_type))
+#else
+ if (unlikely(PyErr_Occurred()))
+#endif
+ goto bad;
+ #if PY_MAJOR_VERSION >= 3
+ if (local_tb) {
+ if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
+ goto bad;
+ }
+ #endif
+ Py_XINCREF(local_tb);
+ Py_XINCREF(local_type);
+ Py_XINCREF(local_value);
+ *type = local_type;
+ *value = local_value;
+ *tb = local_tb;
+#if CYTHON_FAST_THREAD_STATE
+ #if CYTHON_USE_EXC_INFO_STACK
+ {
+ _PyErr_StackItem *exc_info = tstate->exc_info;
+ tmp_type = exc_info->exc_type;
+ tmp_value = exc_info->exc_value;
+ tmp_tb = exc_info->exc_traceback;
+ exc_info->exc_type = local_type;
+ exc_info->exc_value = local_value;
+ exc_info->exc_traceback = local_tb;
+ }
+ #else
+ tmp_type = tstate->exc_type;
+ tmp_value = tstate->exc_value;
+ tmp_tb = tstate->exc_traceback;
+ tstate->exc_type = local_type;
+ tstate->exc_value = local_value;
+ tstate->exc_traceback = local_tb;
+ #endif
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+#else
+ PyErr_SetExcInfo(local_type, local_value, local_tb);
+#endif
+ return 0;
+bad:
+ *type = 0;
+ *value = 0;
+ *tb = 0;
+ Py_XDECREF(local_type);
+ Py_XDECREF(local_value);
+ Py_XDECREF(local_tb);
+ return -1;
+}
+
+/* PyCFunctionFastCall */
+#if CYTHON_FAST_PYCCALL
+static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
+ PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
+ PyCFunction meth = PyCFunction_GET_FUNCTION(func);
+ PyObject *self = PyCFunction_GET_SELF(func);
+ int flags = PyCFunction_GET_FLAGS(func);
+ assert(PyCFunction_Check(func));
+ assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
+ assert(nargs >= 0);
+ assert(nargs == 0 || args != NULL);
+ /* _PyCFunction_FastCallDict() must not be called with an exception set,
+ because it may clear it (directly or indirectly) and so the
+ caller loses its exception */
+ assert(!PyErr_Occurred());
+ if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
+ return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
+ } else {
+ return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
+ }
+}
+#endif
+
+/* PyFunctionFastCall */
+#if CYTHON_FAST_PYCALL
+static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
+ PyObject *globals) {
+ PyFrameObject *f;
+ PyThreadState *tstate = __Pyx_PyThreadState_Current;
+ PyObject **fastlocals;
+ Py_ssize_t i;
+ PyObject *result;
+ assert(globals != NULL);
+ /* XXX Perhaps we should create a specialized
+ PyFrame_New() that doesn't take locals, but does
+ take builtins without sanity checking them.
+ */
+ assert(tstate != NULL);
+ f = PyFrame_New(tstate, co, globals, NULL);
+ if (f == NULL) {
+ return NULL;
+ }
+ fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
+ for (i = 0; i < na; i++) {
+ Py_INCREF(*args);
+ fastlocals[i] = *args++;
+ }
+ result = PyEval_EvalFrameEx(f,0);
+ ++tstate->recursion_depth;
+ Py_DECREF(f);
+ --tstate->recursion_depth;
+ return result;
+}
+#if 1 || PY_VERSION_HEX < 0x030600B1
+static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
+ PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
+ PyObject *globals = PyFunction_GET_GLOBALS(func);
+ PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
+ PyObject *closure;
+#if PY_MAJOR_VERSION >= 3
+ PyObject *kwdefs;
+#endif
+ PyObject *kwtuple, **k;
+ PyObject **d;
+ Py_ssize_t nd;
+ Py_ssize_t nk;
+ PyObject *result;
+ assert(kwargs == NULL || PyDict_Check(kwargs));
+ nk = kwargs ? PyDict_Size(kwargs) : 0;
+ if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
+ return NULL;
+ }
+ if (
+#if PY_MAJOR_VERSION >= 3
+ co->co_kwonlyargcount == 0 &&
+#endif
+ likely(kwargs == NULL || nk == 0) &&
+ co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
+ if (argdefs == NULL && co->co_argcount == nargs) {
+ result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
+ goto done;
+ }
+ else if (nargs == 0 && argdefs != NULL
+ && co->co_argcount == Py_SIZE(argdefs)) {
+ /* function called with no arguments, but all parameters have
+ a default value: use default values as arguments .*/
+ args = &PyTuple_GET_ITEM(argdefs, 0);
+ result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
+ goto done;
+ }
+ }
+ if (kwargs != NULL) {
+ Py_ssize_t pos, i;
+ kwtuple = PyTuple_New(2 * nk);
+ if (kwtuple == NULL) {
+ result = NULL;
+ goto done;
+ }
+ k = &PyTuple_GET_ITEM(kwtuple, 0);
+ pos = i = 0;
+ while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
+ Py_INCREF(k[i]);
+ Py_INCREF(k[i+1]);
+ i += 2;
+ }
+ nk = i / 2;
+ }
+ else {
+ kwtuple = NULL;
+ k = NULL;
+ }
+ closure = PyFunction_GET_CLOSURE(func);
+#if PY_MAJOR_VERSION >= 3
+ kwdefs = PyFunction_GET_KW_DEFAULTS(func);
+#endif
+ if (argdefs != NULL) {
+ d = &PyTuple_GET_ITEM(argdefs, 0);
+ nd = Py_SIZE(argdefs);
+ }
+ else {
+ d = NULL;
+ nd = 0;
+ }
+#if PY_MAJOR_VERSION >= 3
+ result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
+ args, (int)nargs,
+ k, (int)nk,
+ d, (int)nd, kwdefs, closure);
+#else
+ result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
+ args, (int)nargs,
+ k, (int)nk,
+ d, (int)nd, closure);
+#endif
+ Py_XDECREF(kwtuple);
+done:
+ Py_LeaveRecursiveCall();
+ return result;
+}
+#endif
+#endif
+
+/* PyObjectCall */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
+ PyObject *result;
+ ternaryfunc call = func->ob_type->tp_call;
+ if (unlikely(!call))
+ return PyObject_Call(func, arg, kw);
+ if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
+ return NULL;
+ result = (*call)(func, arg, kw);
+ Py_LeaveRecursiveCall();
+ if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
+ PyErr_SetString(
+ PyExc_SystemError,
+ "NULL result without error in PyObject_Call");
+ }
+ return result;
+}
+#endif
+
+/* PyObjectCall2Args */
+static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
+ PyObject *args, *result = NULL;
+ #if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(function)) {
+ PyObject *args[2] = {arg1, arg2};
+ return __Pyx_PyFunction_FastCall(function, args, 2);
+ }
+ #endif
+ #if CYTHON_FAST_PYCCALL
+ if (__Pyx_PyFastCFunction_Check(function)) {
+ PyObject *args[2] = {arg1, arg2};
+ return __Pyx_PyCFunction_FastCall(function, args, 2);
+ }
+ #endif
+ args = PyTuple_New(2);
+ if (unlikely(!args)) goto done;
+ Py_INCREF(arg1);
+ PyTuple_SET_ITEM(args, 0, arg1);
+ Py_INCREF(arg2);
+ PyTuple_SET_ITEM(args, 1, arg2);
+ Py_INCREF(function);
+ result = __Pyx_PyObject_Call(function, args, NULL);
+ Py_DECREF(args);
+ Py_DECREF(function);
+done:
+ return result;
+}
+
+/* PyObjectCallMethO */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
+ PyObject *self, *result;
+ PyCFunction cfunc;
+ cfunc = PyCFunction_GET_FUNCTION(func);
+ self = PyCFunction_GET_SELF(func);
+ if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
+ return NULL;
+ result = cfunc(self, arg);
+ Py_LeaveRecursiveCall();
+ if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
+ PyErr_SetString(
+ PyExc_SystemError,
+ "NULL result without error in PyObject_Call");
+ }
+ return result;
+}
+#endif
+
+/* PyObjectCallOneArg */
+#if CYTHON_COMPILING_IN_CPYTHON
+static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+ PyObject *result;
+ PyObject *args = PyTuple_New(1);
+ if (unlikely(!args)) return NULL;
+ Py_INCREF(arg);
+ PyTuple_SET_ITEM(args, 0, arg);
+ result = __Pyx_PyObject_Call(func, args, NULL);
+ Py_DECREF(args);
+ return result;
+}
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+#if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(func)) {
+ return __Pyx_PyFunction_FastCall(func, &arg, 1);
+ }
+#endif
+ if (likely(PyCFunction_Check(func))) {
+ if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
+ return __Pyx_PyObject_CallMethO(func, arg);
+#if CYTHON_FAST_PYCCALL
+ } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
+ return __Pyx_PyCFunction_FastCall(func, &arg, 1);
+#endif
+ }
+ }
+ return __Pyx__PyObject_CallOneArg(func, arg);
+}
+#else
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+ PyObject *result;
+ PyObject *args = PyTuple_Pack(1, arg);
+ if (unlikely(!args)) return NULL;
+ result = __Pyx_PyObject_Call(func, args, NULL);
+ Py_DECREF(args);
+ return result;
+}
+#endif
+
+/* PyErrFetchRestore */
+#if CYTHON_FAST_THREAD_STATE
+static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ tmp_type = tstate->curexc_type;
+ tmp_value = tstate->curexc_value;
+ tmp_tb = tstate->curexc_traceback;
+ tstate->curexc_type = type;
+ tstate->curexc_value = value;
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+}
+static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
+ *type = tstate->curexc_type;
+ *value = tstate->curexc_value;
+ *tb = tstate->curexc_traceback;
+ tstate->curexc_type = 0;
+ tstate->curexc_value = 0;
+ tstate->curexc_traceback = 0;
+}
+#endif
+
+/* RaiseException */
+#if PY_MAJOR_VERSION < 3
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
+ CYTHON_UNUSED PyObject *cause) {
+ __Pyx_PyThreadState_declare
+ Py_XINCREF(type);
+ if (!value || value == Py_None)
+ value = NULL;
+ else
+ Py_INCREF(value);
+ if (!tb || tb == Py_None)
+ tb = NULL;
+ else {
+ Py_INCREF(tb);
+ if (!PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto raise_error;
+ }
+ }
+ if (PyType_Check(type)) {
+#if CYTHON_COMPILING_IN_PYPY
+ if (!value) {
+ Py_INCREF(Py_None);
+ value = Py_None;
+ }
+#endif
+ PyErr_NormalizeException(&type, &value, &tb);
+ } else {
+ if (value) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto raise_error;
+ }
+ value = type;
+ type = (PyObject*) Py_TYPE(type);
+ Py_INCREF(type);
+ if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto raise_error;
+ }
+ }
+ __Pyx_PyThreadState_assign
+ __Pyx_ErrRestore(type, value, tb);
+ return;
+raise_error:
+ Py_XDECREF(value);
+ Py_XDECREF(type);
+ Py_XDECREF(tb);
+ return;
+}
+#else
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
+ PyObject* owned_instance = NULL;
+ if (tb == Py_None) {
+ tb = 0;
+ } else if (tb && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto bad;
+ }
+ if (value == Py_None)
+ value = 0;
+ if (PyExceptionInstance_Check(type)) {
+ if (value) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto bad;
+ }
+ value = type;
+ type = (PyObject*) Py_TYPE(value);
+ } else if (PyExceptionClass_Check(type)) {
+ PyObject *instance_class = NULL;
+ if (value && PyExceptionInstance_Check(value)) {
+ instance_class = (PyObject*) Py_TYPE(value);
+ if (instance_class != type) {
+ int is_subclass = PyObject_IsSubclass(instance_class, type);
+ if (!is_subclass) {
+ instance_class = NULL;
+ } else if (unlikely(is_subclass == -1)) {
+ goto bad;
+ } else {
+ type = instance_class;
+ }
+ }
+ }
+ if (!instance_class) {
+ PyObject *args;
+ if (!value)
+ args = PyTuple_New(0);
+ else if (PyTuple_Check(value)) {
+ Py_INCREF(value);
+ args = value;
+ } else
+ args = PyTuple_Pack(1, value);
+ if (!args)
+ goto bad;
+ owned_instance = PyObject_Call(type, args, NULL);
+ Py_DECREF(args);
+ if (!owned_instance)
+ goto bad;
+ value = owned_instance;
+ if (!PyExceptionInstance_Check(value)) {
+ PyErr_Format(PyExc_TypeError,
+ "calling %R should have returned an instance of "
+ "BaseException, not %R",
+ type, Py_TYPE(value));
+ goto bad;
+ }
+ }
+ } else {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto bad;
+ }
+ if (cause) {
+ PyObject *fixed_cause;
+ if (cause == Py_None) {
+ fixed_cause = NULL;
+ } else if (PyExceptionClass_Check(cause)) {
+ fixed_cause = PyObject_CallObject(cause, NULL);
+ if (fixed_cause == NULL)
+ goto bad;
+ } else if (PyExceptionInstance_Check(cause)) {
+ fixed_cause = cause;
+ Py_INCREF(fixed_cause);
+ } else {
+ PyErr_SetString(PyExc_TypeError,
+ "exception causes must derive from "
+ "BaseException");
+ goto bad;
+ }
+ PyException_SetCause(value, fixed_cause);
+ }
+ PyErr_SetObject(type, value);
+ if (tb) {
+#if CYTHON_COMPILING_IN_PYPY
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
+ Py_INCREF(tb);
+ PyErr_Restore(tmp_type, tmp_value, tb);
+ Py_XDECREF(tmp_tb);
+#else
+ PyThreadState *tstate = __Pyx_PyThreadState_Current;
+ PyObject* tmp_tb = tstate->curexc_traceback;
+ if (tb != tmp_tb) {
+ Py_INCREF(tb);
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_tb);
+ }
+#endif
+ }
+bad:
+ Py_XDECREF(owned_instance);
+ return;
+}
+#endif
+
+/* GetAttr */
+static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
+#if CYTHON_USE_TYPE_SLOTS
+#if PY_MAJOR_VERSION >= 3
+ if (likely(PyUnicode_Check(n)))
+#else
+ if (likely(PyString_Check(n)))
+#endif
+ return __Pyx_PyObject_GetAttrStr(o, n);
+#endif
+ return PyObject_GetAttr(o, n);
+}
+
+/* GetAttr3 */
+static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
+ return NULL;
+ __Pyx_PyErr_Clear();
+ Py_INCREF(d);
+ return d;
+}
+static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
+ PyObject *r = __Pyx_GetAttr(o, n);
+ return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
+}
+
+/* PyDictVersioning */
+#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
+ PyObject *dict = Py_TYPE(obj)->tp_dict;
+ return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
+}
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
+ PyObject **dictptr = NULL;
+ Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
+ if (offset) {
+#if CYTHON_COMPILING_IN_CPYTHON
+ dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
+#else
+ dictptr = _PyObject_GetDictPtr(obj);
+#endif
+ }
+ return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
+}
+static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
+ PyObject *dict = Py_TYPE(obj)->tp_dict;
+ if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
+ return 0;
+ return obj_dict_version == __Pyx_get_object_dict_version(obj);
+}
+#endif
+
+/* GetModuleGlobalName */
+#if CYTHON_USE_DICT_VERSIONS
+static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
+#else
+static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
+#endif
+{
+ PyObject *result;
+#if !CYTHON_AVOID_BORROWED_REFS
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
+ result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
+ __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
+ if (likely(result)) {
+ return __Pyx_NewRef(result);
+ } else if (unlikely(PyErr_Occurred())) {
+ return NULL;
+ }
+#else
+ result = PyDict_GetItem(__pyx_d, name);
+ __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
+ if (likely(result)) {
+ return __Pyx_NewRef(result);
+ }
+#endif
+#else
+ result = PyObject_GetItem(__pyx_d, name);
+ __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
+ if (likely(result)) {
+ return __Pyx_NewRef(result);
+ }
+ PyErr_Clear();
+#endif
+ return __Pyx_GetBuiltinName(name);
+}
+
+/* Import */
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
+ PyObject *empty_list = 0;
+ PyObject *module = 0;
+ PyObject *global_dict = 0;
+ PyObject *empty_dict = 0;
+ PyObject *list;
+ #if PY_MAJOR_VERSION < 3
+ PyObject *py_import;
+ py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
+ if (!py_import)
+ goto bad;
+ #endif
+ if (from_list)
+ list = from_list;
+ else {
+ empty_list = PyList_New(0);
+ if (!empty_list)
+ goto bad;
+ list = empty_list;
+ }
+ global_dict = PyModule_GetDict(__pyx_m);
+ if (!global_dict)
+ goto bad;
+ empty_dict = PyDict_New();
+ if (!empty_dict)
+ goto bad;
+ {
+ #if PY_MAJOR_VERSION >= 3
+ if (level == -1) {
+ if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) {
+ module = PyImport_ImportModuleLevelObject(
+ name, global_dict, empty_dict, list, 1);
+ if (!module) {
+ if (!PyErr_ExceptionMatches(PyExc_ImportError))
+ goto bad;
+ PyErr_Clear();
+ }
+ }
+ level = 0;
+ }
+ #endif
+ if (!module) {
+ #if PY_MAJOR_VERSION < 3
+ PyObject *py_level = PyInt_FromLong(level);
+ if (!py_level)
+ goto bad;
+ module = PyObject_CallFunctionObjArgs(py_import,
+ name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
+ Py_DECREF(py_level);
+ #else
+ module = PyImport_ImportModuleLevelObject(
+ name, global_dict, empty_dict, list, level);
+ #endif
+ }
+ }
+bad:
+ #if PY_MAJOR_VERSION < 3
+ Py_XDECREF(py_import);
+ #endif
+ Py_XDECREF(empty_list);
+ Py_XDECREF(empty_dict);
+ return module;
+}
+
+/* ImportFrom */
+static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
+ PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
+ if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
+ PyErr_Format(PyExc_ImportError,
+ #if PY_MAJOR_VERSION < 3
+ "cannot import name %.230s", PyString_AS_STRING(name));
+ #else
+ "cannot import name %S", name);
+ #endif
+ }
+ return value;
+}
+
+/* HasAttr */
+static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
+ PyObject *r;
+ if (unlikely(!__Pyx_PyBaseString_Check(n))) {
+ PyErr_SetString(PyExc_TypeError,
+ "hasattr(): attribute name must be string");
+ return -1;
+ }
+ r = __Pyx_GetAttr(o, n);
+ if (unlikely(!r)) {
+ PyErr_Clear();
+ return 0;
+ } else {
+ Py_DECREF(r);
+ return 1;
+ }
+}
+
+/* PyObject_GenericGetAttrNoDict */
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
+ PyErr_Format(PyExc_AttributeError,
+#if PY_MAJOR_VERSION >= 3
+ "'%.50s' object has no attribute '%U'",
+ tp->tp_name, attr_name);
+#else
+ "'%.50s' object has no attribute '%.400s'",
+ tp->tp_name, PyString_AS_STRING(attr_name));
+#endif
+ return NULL;
+}
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
+ PyObject *descr;
+ PyTypeObject *tp = Py_TYPE(obj);
+ if (unlikely(!PyString_Check(attr_name))) {
+ return PyObject_GenericGetAttr(obj, attr_name);
+ }
+ assert(!tp->tp_dictoffset);
+ descr = _PyType_Lookup(tp, attr_name);
+ if (unlikely(!descr)) {
+ return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
+ }
+ Py_INCREF(descr);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
+ #endif
+ {
+ descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
+ if (unlikely(f)) {
+ PyObject *res = f(descr, obj, (PyObject *)tp);
+ Py_DECREF(descr);
+ return res;
+ }
+ }
+ return descr;
+}
+#endif
+
+/* PyObject_GenericGetAttr */
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
+ if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
+ return PyObject_GenericGetAttr(obj, attr_name);
+ }
+ return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
+}
+#endif
+
+/* PyObjectGetAttrStrNoError */
+static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
+ __Pyx_PyErr_Clear();
+}
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) {
+ PyObject *result;
+#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1
+ PyTypeObject* tp = Py_TYPE(obj);
+ if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) {
+ return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1);
+ }
+#endif
+ result = __Pyx_PyObject_GetAttrStr(obj, attr_name);
+ if (unlikely(!result)) {
+ __Pyx_PyObject_GetAttrStr_ClearAttributeError();
+ }
+ return result;
+}
+
+/* SetupReduce */
+static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
+ int ret;
+ PyObject *name_attr;
+ name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name);
+ if (likely(name_attr)) {
+ ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
+ } else {
+ ret = -1;
+ }
+ if (unlikely(ret < 0)) {
+ PyErr_Clear();
+ ret = 0;
+ }
+ Py_XDECREF(name_attr);
+ return ret;
+}
+static int __Pyx_setup_reduce(PyObject* type_obj) {
+ int ret = 0;
+ PyObject *object_reduce = NULL;
+ PyObject *object_reduce_ex = NULL;
+ PyObject *reduce = NULL;
+ PyObject *reduce_ex = NULL;
+ PyObject *reduce_cython = NULL;
+ PyObject *setstate = NULL;
+ PyObject *setstate_cython = NULL;
+#if CYTHON_USE_PYTYPE_LOOKUP
+ if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
+#else
+ if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
+#endif
+#if CYTHON_USE_PYTYPE_LOOKUP
+ object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
+#else
+ object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
+#endif
+ reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD;
+ if (reduce_ex == object_reduce_ex) {
+#if CYTHON_USE_PYTYPE_LOOKUP
+ object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
+#else
+ object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
+#endif
+ reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD;
+ if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) {
+ reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython);
+ if (likely(reduce_cython)) {
+ ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
+ ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
+ } else if (reduce == object_reduce || PyErr_Occurred()) {
+ goto __PYX_BAD;
+ }
+ setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate);
+ if (!setstate) PyErr_Clear();
+ if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) {
+ setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython);
+ if (likely(setstate_cython)) {
+ ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
+ ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
+ } else if (!setstate || PyErr_Occurred()) {
+ goto __PYX_BAD;
+ }
+ }
+ PyType_Modified((PyTypeObject*)type_obj);
+ }
+ }
+ goto __PYX_GOOD;
+__PYX_BAD:
+ if (!PyErr_Occurred())
+ PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
+ ret = -1;
+__PYX_GOOD:
+#if !CYTHON_USE_PYTYPE_LOOKUP
+ Py_XDECREF(object_reduce);
+ Py_XDECREF(object_reduce_ex);
+#endif
+ Py_XDECREF(reduce);
+ Py_XDECREF(reduce_ex);
+ Py_XDECREF(reduce_cython);
+ Py_XDECREF(setstate);
+ Py_XDECREF(setstate_cython);
+ return ret;
+}
+
+/* CLineInTraceback */
+#ifndef CYTHON_CLINE_IN_TRACEBACK
+static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
+ PyObject *use_cline;
+ PyObject *ptype, *pvalue, *ptraceback;
+#if CYTHON_COMPILING_IN_CPYTHON
+ PyObject **cython_runtime_dict;
+#endif
+ if (unlikely(!__pyx_cython_runtime)) {
+ return c_line;
+ }
+ __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
+#if CYTHON_COMPILING_IN_CPYTHON
+ cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
+ if (likely(cython_runtime_dict)) {
+ __PYX_PY_DICT_LOOKUP_IF_MODIFIED(
+ use_cline, *cython_runtime_dict,
+ __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
+ } else
+#endif
+ {
+ PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
+ if (use_cline_obj) {
+ use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
+ Py_DECREF(use_cline_obj);
+ } else {
+ PyErr_Clear();
+ use_cline = NULL;
+ }
+ }
+ if (!use_cline) {
+ c_line = 0;
+ PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
+ }
+ else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
+ c_line = 0;
+ }
+ __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
+ return c_line;
+}
+#endif
+
+/* CodeObjectCache */
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
+ int start = 0, mid = 0, end = count - 1;
+ if (end >= 0 && code_line > entries[end].code_line) {
+ return count;
+ }
+ while (start < end) {
+ mid = start + (end - start) / 2;
+ if (code_line < entries[mid].code_line) {
+ end = mid;
+ } else if (code_line > entries[mid].code_line) {
+ start = mid + 1;
+ } else {
+ return mid;
+ }
+ }
+ if (code_line <= entries[mid].code_line) {
+ return mid;
+ } else {
+ return mid + 1;
+ }
+}
+static PyCodeObject *__pyx_find_code_object(int code_line) {
+ PyCodeObject* code_object;
+ int pos;
+ if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
+ return NULL;
+ }
+ pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
+ if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
+ return NULL;
+ }
+ code_object = __pyx_code_cache.entries[pos].code_object;
+ Py_INCREF(code_object);
+ return code_object;
+}
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
+ int pos, i;
+ __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
+ if (unlikely(!code_line)) {
+ return;
+ }
+ if (unlikely(!entries)) {
+ entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
+ if (likely(entries)) {
+ __pyx_code_cache.entries = entries;
+ __pyx_code_cache.max_count = 64;
+ __pyx_code_cache.count = 1;
+ entries[0].code_line = code_line;
+ entries[0].code_object = code_object;
+ Py_INCREF(code_object);
+ }
+ return;
+ }
+ pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
+ if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
+ PyCodeObject* tmp = entries[pos].code_object;
+ entries[pos].code_object = code_object;
+ Py_DECREF(tmp);
+ return;
+ }
+ if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
+ int new_max = __pyx_code_cache.max_count + 64;
+ entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
+ __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
+ if (unlikely(!entries)) {
+ return;
+ }
+ __pyx_code_cache.entries = entries;
+ __pyx_code_cache.max_count = new_max;
+ }
+ for (i=__pyx_code_cache.count; i>pos; i--) {
+ entries[i] = entries[i-1];
+ }
+ entries[pos].code_line = code_line;
+ entries[pos].code_object = code_object;
+ __pyx_code_cache.count++;
+ Py_INCREF(code_object);
+}
+
+/* AddTraceback */
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
+ const char *funcname, int c_line,
+ int py_line, const char *filename) {
+ PyCodeObject *py_code = 0;
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ #if PY_MAJOR_VERSION < 3
+ py_srcfile = PyString_FromString(filename);
+ #else
+ py_srcfile = PyUnicode_FromString(filename);
+ #endif
+ if (!py_srcfile) goto bad;
+ if (c_line) {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
+ #else
+ py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
+ #endif
+ }
+ else {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromString(funcname);
+ #else
+ py_funcname = PyUnicode_FromString(funcname);
+ #endif
+ }
+ if (!py_funcname) goto bad;
+ py_code = __Pyx_PyCode_New(
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ __pyx_empty_bytes, /*PyObject *code,*/
+ __pyx_empty_tuple, /*PyObject *consts,*/
+ __pyx_empty_tuple, /*PyObject *names,*/
+ __pyx_empty_tuple, /*PyObject *varnames,*/
+ __pyx_empty_tuple, /*PyObject *freevars,*/
+ __pyx_empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ py_line,
+ __pyx_empty_bytes /*PyObject *lnotab*/
+ );
+ Py_DECREF(py_srcfile);
+ Py_DECREF(py_funcname);
+ return py_code;
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ return NULL;
+}
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+ int py_line, const char *filename) {
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+ PyThreadState *tstate = __Pyx_PyThreadState_Current;
+ if (c_line) {
+ c_line = __Pyx_CLineForTraceback(tstate, c_line);
+ }
+ py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
+ if (!py_code) {
+ py_code = __Pyx_CreateCodeObjectForTraceback(
+ funcname, c_line, py_line, filename);
+ if (!py_code) goto bad;
+ __pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
+ }
+ py_frame = PyFrame_New(
+ tstate, /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ __pyx_d, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ __Pyx_PyFrame_SetLineNumber(py_frame, py_line);
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
+
+/* CIntFromPyVerify */
+#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
+ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
+#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
+ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
+#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
+ {\
+ func_type value = func_value;\
+ if (sizeof(target_type) < sizeof(func_type)) {\
+ if (unlikely(value != (func_type) (target_type) value)) {\
+ func_type zero = 0;\
+ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
+ return (target_type) -1;\
+ if (is_unsigned && unlikely(value < zero))\
+ goto raise_neg_overflow;\
+ else\
+ goto raise_overflow;\
+ }\
+ }\
+ return (target_type) value;\
+ }
+
+/* CIntToPy */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
+ const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (is_unsigned) {
+ if (sizeof(long) < sizeof(long)) {
+ return PyInt_FromLong((long) value);
+ } else if (sizeof(long) <= sizeof(unsigned long)) {
+ return PyLong_FromUnsignedLong((unsigned long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
+ return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
+#endif
+ }
+ } else {
+ if (sizeof(long) <= sizeof(long)) {
+ return PyInt_FromLong((long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
+ return PyLong_FromLongLong((PY_LONG_LONG) value);
+#endif
+ }
+ }
+ {
+ int one = 1; int little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&value;
+ return _PyLong_FromByteArray(bytes, sizeof(long),
+ little, !is_unsigned);
+ }
+}
+
+/* CIntFromPy */
+static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
+ const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x))) {
+ if (sizeof(long) < sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
+ } else {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ goto raise_neg_overflow;
+ }
+ return (long) val;
+ }
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (long) 0;
+ case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
+ case 2:
+ if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
+ return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
+ return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
+ return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+ }
+ }
+ break;
+ }
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (unlikely(Py_SIZE(x) < 0)) {
+ goto raise_neg_overflow;
+ }
+#else
+ {
+ int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+ if (unlikely(result < 0))
+ return (long) -1;
+ if (unlikely(result == 1))
+ goto raise_neg_overflow;
+ }
+#endif
+ if (sizeof(long) <= sizeof(unsigned long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+#endif
+ }
+ } else {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (long) 0;
+ case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
+ case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
+ case -2:
+ if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+ return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case 2:
+ if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+ return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case -3:
+ if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+ return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+ return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case -4:
+ if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
+ return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
+ return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ }
+#endif
+ if (sizeof(long) <= sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
+#endif
+ }
+ }
+ {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+ PyErr_SetString(PyExc_RuntimeError,
+ "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+ long val;
+ PyObject *v = __Pyx_PyNumber_IntOrLong(x);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(v) && !PyLong_Check(v)) {
+ PyObject *tmp = v;
+ v = PyNumber_Long(tmp);
+ Py_DECREF(tmp);
+ }
+ #endif
+ if (likely(v)) {
+ int one = 1; int is_little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&val;
+ int ret = _PyLong_AsByteArray((PyLongObject *)v,
+ bytes, sizeof(val),
+ is_little, !is_unsigned);
+ Py_DECREF(v);
+ if (likely(!ret))
+ return val;
+ }
+#endif
+ return (long) -1;
+ }
+ } else {
+ long val;
+ PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
+ if (!tmp) return (long) -1;
+ val = __Pyx_PyInt_As_long(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+raise_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to long");
+ return (long) -1;
+raise_neg_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to long");
+ return (long) -1;
+}
+
+/* CIntFromPy */
+static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
+ const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x))) {
+ if (sizeof(int) < sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
+ } else {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ goto raise_neg_overflow;
+ }
+ return (int) val;
+ }
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (int) 0;
+ case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
+ case 2:
+ if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
+ return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
+ return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
+ return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+ }
+ }
+ break;
+ }
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (unlikely(Py_SIZE(x) < 0)) {
+ goto raise_neg_overflow;
+ }
+#else
+ {
+ int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+ if (unlikely(result < 0))
+ return (int) -1;
+ if (unlikely(result == 1))
+ goto raise_neg_overflow;
+ }
+#endif
+ if (sizeof(int) <= sizeof(unsigned long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+#endif
+ }
+ } else {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (int) 0;
+ case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
+ case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
+ case -2:
+ if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+ return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case 2:
+ if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+ return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case -3:
+ if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+ return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+ return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case -4:
+ if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
+ return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
+ return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ }
+#endif
+ if (sizeof(int) <= sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
+#endif
+ }
+ }
+ {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+ PyErr_SetString(PyExc_RuntimeError,
+ "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+ int val;
+ PyObject *v = __Pyx_PyNumber_IntOrLong(x);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(v) && !PyLong_Check(v)) {
+ PyObject *tmp = v;
+ v = PyNumber_Long(tmp);
+ Py_DECREF(tmp);
+ }
+ #endif
+ if (likely(v)) {
+ int one = 1; int is_little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&val;
+ int ret = _PyLong_AsByteArray((PyLongObject *)v,
+ bytes, sizeof(val),
+ is_little, !is_unsigned);
+ Py_DECREF(v);
+ if (likely(!ret))
+ return val;
+ }
+#endif
+ return (int) -1;
+ }
+ } else {
+ int val;
+ PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
+ if (!tmp) return (int) -1;
+ val = __Pyx_PyInt_As_int(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+raise_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to int");
+ return (int) -1;
+raise_neg_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to int");
+ return (int) -1;
+}
+
+/* FastTypeChecks */
+#if CYTHON_COMPILING_IN_CPYTHON
+static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
+ while (a) {
+ a = a->tp_base;
+ if (a == b)
+ return 1;
+ }
+ return b == &PyBaseObject_Type;
+}
+static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
+ PyObject *mro;
+ if (a == b) return 1;
+ mro = a->tp_mro;
+ if (likely(mro)) {
+ Py_ssize_t i, n;
+ n = PyTuple_GET_SIZE(mro);
+ for (i = 0; i < n; i++) {
+ if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
+ return 1;
+ }
+ return 0;
+ }
+ return __Pyx_InBases(a, b);
+}
+#if PY_MAJOR_VERSION == 2
+static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
+ PyObject *exception, *value, *tb;
+ int res;
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ErrFetch(&exception, &value, &tb);
+ res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
+ if (unlikely(res == -1)) {
+ PyErr_WriteUnraisable(err);
+ res = 0;
+ }
+ if (!res) {
+ res = PyObject_IsSubclass(err, exc_type2);
+ if (unlikely(res == -1)) {
+ PyErr_WriteUnraisable(err);
+ res = 0;
+ }
+ }
+ __Pyx_ErrRestore(exception, value, tb);
+ return res;
+}
+#else
+static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
+ int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
+ if (!res) {
+ res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
+ }
+ return res;
+}
+#endif
+static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
+ Py_ssize_t i, n;
+ assert(PyExceptionClass_Check(exc_type));
+ n = PyTuple_GET_SIZE(tuple);
+#if PY_MAJOR_VERSION >= 3
+ for (i=0; i<n; i++) {
+ if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
+ }
+#endif
+ for (i=0; i<n; i++) {
+ PyObject *t = PyTuple_GET_ITEM(tuple, i);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(exc_type == t)) return 1;
+ #endif
+ if (likely(PyExceptionClass_Check(t))) {
+ if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
+ } else {
+ }
+ }
+ return 0;
+}
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
+ if (likely(err == exc_type)) return 1;
+ if (likely(PyExceptionClass_Check(err))) {
+ if (likely(PyExceptionClass_Check(exc_type))) {
+ return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
+ } else if (likely(PyTuple_Check(exc_type))) {
+ return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
+ } else {
+ }
+ }
+ return PyErr_GivenExceptionMatches(err, exc_type);
+}
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
+ assert(PyExceptionClass_Check(exc_type1));
+ assert(PyExceptionClass_Check(exc_type2));
+ if (likely(err == exc_type1 || err == exc_type2)) return 1;
+ if (likely(PyExceptionClass_Check(err))) {
+ return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
+ }
+ return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
+}
+#endif
+
+/* CheckBinaryVersion */
+static int __Pyx_check_binary_version(void) {
+ char ctversion[4], rtversion[4];
+ PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
+ PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
+ if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
+ char message[200];
+ PyOS_snprintf(message, sizeof(message),
+ "compiletime version %s of module '%.100s' "
+ "does not match runtime version %s",
+ ctversion, __Pyx_MODULE_NAME, rtversion);
+ return PyErr_WarnEx(NULL, message, 1);
+ }
+ return 0;
+}
+
+/* InitStrings */
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ #if PY_MAJOR_VERSION < 3
+ if (t->is_unicode) {
+ *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
+ } else if (t->intern) {
+ *t->p = PyString_InternFromString(t->s);
+ } else {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ }
+ #else
+ if (t->is_unicode | t->is_str) {
+ if (t->intern) {
+ *t->p = PyUnicode_InternFromString(t->s);
+ } else if (t->encoding) {
+ *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
+ } else {
+ *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
+ }
+ } else {
+ *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
+ }
+ #endif
+ if (!*t->p)
+ return -1;
+ if (PyObject_Hash(*t->p) == -1)
+ return -1;
+ ++t;
+ }
+ return 0;
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
+ return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
+}
+static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
+ Py_ssize_t ignore;
+ return __Pyx_PyObject_AsStringAndSize(o, &ignore);
+}
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+#if !CYTHON_PEP393_ENABLED
+static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+ char* defenc_c;
+ PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
+ if (!defenc) return NULL;
+ defenc_c = PyBytes_AS_STRING(defenc);
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ {
+ char* end = defenc_c + PyBytes_GET_SIZE(defenc);
+ char* c;
+ for (c = defenc_c; c < end; c++) {
+ if ((unsigned char) (*c) >= 128) {
+ PyUnicode_AsASCIIString(o);
+ return NULL;
+ }
+ }
+ }
+#endif
+ *length = PyBytes_GET_SIZE(defenc);
+ return defenc_c;
+}
+#else
+static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+ if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ if (likely(PyUnicode_IS_ASCII(o))) {
+ *length = PyUnicode_GET_LENGTH(o);
+ return PyUnicode_AsUTF8(o);
+ } else {
+ PyUnicode_AsASCIIString(o);
+ return NULL;
+ }
+#else
+ return PyUnicode_AsUTF8AndSize(o, length);
+#endif
+}
+#endif
+#endif
+static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+ if (
+#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ __Pyx_sys_getdefaultencoding_not_ascii &&
+#endif
+ PyUnicode_Check(o)) {
+ return __Pyx_PyUnicode_AsStringAndSize(o, length);
+ } else
+#endif
+#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
+ if (PyByteArray_Check(o)) {
+ *length = PyByteArray_GET_SIZE(o);
+ return PyByteArray_AS_STRING(o);
+ } else
+#endif
+ {
+ char* result;
+ int r = PyBytes_AsStringAndSize(o, &result, length);
+ if (unlikely(r < 0)) {
+ return NULL;
+ } else {
+ return result;
+ }
+ }
+}
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
+ int is_true = x == Py_True;
+ if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
+ else return PyObject_IsTrue(x);
+}
+static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
+ int retval;
+ if (unlikely(!x)) return -1;
+ retval = __Pyx_PyObject_IsTrue(x);
+ Py_DECREF(x);
+ return retval;
+}
+static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
+#if PY_MAJOR_VERSION >= 3
+ if (PyLong_Check(result)) {
+ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
+ "__int__ returned non-int (type %.200s). "
+ "The ability to return an instance of a strict subclass of int "
+ "is deprecated, and may be removed in a future version of Python.",
+ Py_TYPE(result)->tp_name)) {
+ Py_DECREF(result);
+ return NULL;
+ }
+ return result;
+ }
+#endif
+ PyErr_Format(PyExc_TypeError,
+ "__%.4s__ returned non-%.4s (type %.200s)",
+ type_name, type_name, Py_TYPE(result)->tp_name);
+ Py_DECREF(result);
+ return NULL;
+}
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
+#if CYTHON_USE_TYPE_SLOTS
+ PyNumberMethods *m;
+#endif
+ const char *name = NULL;
+ PyObject *res = NULL;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x) || PyLong_Check(x)))
+#else
+ if (likely(PyLong_Check(x)))
+#endif
+ return __Pyx_NewRef(x);
+#if CYTHON_USE_TYPE_SLOTS
+ m = Py_TYPE(x)->tp_as_number;
+ #if PY_MAJOR_VERSION < 3
+ if (m && m->nb_int) {
+ name = "int";
+ res = m->nb_int(x);
+ }
+ else if (m && m->nb_long) {
+ name = "long";
+ res = m->nb_long(x);
+ }
+ #else
+ if (likely(m && m->nb_int)) {
+ name = "int";
+ res = m->nb_int(x);
+ }
+ #endif
+#else
+ if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
+ res = PyNumber_Int(x);
+ }
+#endif
+ if (likely(res)) {
+#if PY_MAJOR_VERSION < 3
+ if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
+#else
+ if (unlikely(!PyLong_CheckExact(res))) {
+#endif
+ return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
+ }
+ }
+ else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_TypeError,
+ "an integer is required");
+ }
+ return res;
+}
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
+ Py_ssize_t ival;
+ PyObject *x;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_CheckExact(b))) {
+ if (sizeof(Py_ssize_t) >= sizeof(long))
+ return PyInt_AS_LONG(b);
+ else
+ return PyInt_AsSsize_t(b);
+ }
+#endif
+ if (likely(PyLong_CheckExact(b))) {
+ #if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)b)->ob_digit;
+ const Py_ssize_t size = Py_SIZE(b);
+ if (likely(__Pyx_sst_abs(size) <= 1)) {
+ ival = likely(size) ? digits[0] : 0;
+ if (size == -1) ival = -ival;
+ return ival;
+ } else {
+ switch (size) {
+ case 2:
+ if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
+ return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case -2:
+ if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
+ return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case 3:
+ if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
+ return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case -3:
+ if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
+ return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case 4:
+ if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
+ return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case -4:
+ if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
+ return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ }
+ }
+ #endif
+ return PyLong_AsSsize_t(b);
+ }
+ x = PyNumber_Index(b);
+ if (!x) return -1;
+ ival = PyInt_AsSsize_t(x);
+ Py_DECREF(x);
+ return ival;
+}
+static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
+ return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
+}
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
+ return PyInt_FromSize_t(ival);
+}
+
+
+#endif /* Py_PYTHON_H */
diff --git a/third_party/python/aiohttp/aiohttp/_helpers.pyi b/third_party/python/aiohttp/aiohttp/_helpers.pyi
new file mode 100644
index 0000000000..1e35893702
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/_helpers.pyi
@@ -0,0 +1,6 @@
+from typing import Any
+
+class reify:
+ def __init__(self, wrapped: Any) -> None: ...
+ def __get__(self, inst: Any, owner: Any) -> Any: ...
+ def __set__(self, inst: Any, value: Any) -> None: ...
diff --git a/third_party/python/aiohttp/aiohttp/_helpers.pyx b/third_party/python/aiohttp/aiohttp/_helpers.pyx
new file mode 100644
index 0000000000..665f367c5d
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/_helpers.pyx
@@ -0,0 +1,35 @@
+cdef class reify:
+ """Use as a class method decorator. It operates almost exactly like
+ the Python `@property` decorator, but it puts the result of the
+ method it decorates into the instance dict after the first call,
+ effectively replacing the function it decorates with an instance
+ variable. It is, in Python parlance, a data descriptor.
+
+ """
+
+ cdef object wrapped
+ cdef object name
+
+ def __init__(self, wrapped):
+ self.wrapped = wrapped
+ self.name = wrapped.__name__
+
+ @property
+ def __doc__(self):
+ return self.wrapped.__doc__
+
+ def __get__(self, inst, owner):
+ try:
+ try:
+ return inst._cache[self.name]
+ except KeyError:
+ val = self.wrapped(inst)
+ inst._cache[self.name] = val
+ return val
+ except AttributeError:
+ if inst is None:
+ return self
+ raise
+
+ def __set__(self, inst, value):
+ raise AttributeError("reified property is read-only")
diff --git a/third_party/python/aiohttp/aiohttp/_http_parser.c b/third_party/python/aiohttp/aiohttp/_http_parser.c
new file mode 100644
index 0000000000..096446e8d8
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/_http_parser.c
@@ -0,0 +1,24607 @@
+/* Generated by Cython 0.29.21 */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#ifndef Py_PYTHON_H
+ #error Python headers needed to compile C extensions, please install development version of Python.
+#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
+ #error Cython requires Python 2.6+ or Python 3.3+.
+#else
+#define CYTHON_ABI "0_29_21"
+#define CYTHON_HEX_VERSION 0x001D15F0
+#define CYTHON_FUTURE_DIVISION 1
+#include <stddef.h>
+#ifndef offsetof
+ #define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
+#endif
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+ #ifndef __fastcall
+ #define __fastcall
+ #endif
+#endif
+#ifndef DL_IMPORT
+ #define DL_IMPORT(t) t
+#endif
+#ifndef DL_EXPORT
+ #define DL_EXPORT(t) t
+#endif
+#define __PYX_COMMA ,
+#ifndef HAVE_LONG_LONG
+ #if PY_VERSION_HEX >= 0x02070000
+ #define HAVE_LONG_LONG
+ #endif
+#endif
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+#ifndef Py_HUGE_VAL
+ #define Py_HUGE_VAL HUGE_VAL
+#endif
+#ifdef PYPY_VERSION
+ #define CYTHON_COMPILING_IN_PYPY 1
+ #define CYTHON_COMPILING_IN_PYSTON 0
+ #define CYTHON_COMPILING_IN_CPYTHON 0
+ #undef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 0
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #if PY_VERSION_HEX < 0x03050000
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #elif !defined(CYTHON_USE_ASYNC_SLOTS)
+ #define CYTHON_USE_ASYNC_SLOTS 1
+ #endif
+ #undef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 0
+ #undef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 0
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #undef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 1
+ #undef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 0
+ #undef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 0
+ #undef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 0
+ #undef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 0
+ #undef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT 0
+ #undef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE 0
+ #undef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS 0
+ #undef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK 0
+#elif defined(PYSTON_VERSION)
+ #define CYTHON_COMPILING_IN_PYPY 0
+ #define CYTHON_COMPILING_IN_PYSTON 1
+ #define CYTHON_COMPILING_IN_CPYTHON 0
+ #ifndef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 1
+ #endif
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #undef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 0
+ #ifndef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 1
+ #endif
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #ifndef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 0
+ #endif
+ #ifndef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 1
+ #endif
+ #ifndef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 1
+ #endif
+ #undef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 0
+ #undef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 0
+ #undef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT 0
+ #undef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE 0
+ #undef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS 0
+ #undef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK 0
+#else
+ #define CYTHON_COMPILING_IN_PYPY 0
+ #define CYTHON_COMPILING_IN_PYSTON 0
+ #define CYTHON_COMPILING_IN_CPYTHON 1
+ #ifndef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 1
+ #endif
+ #if PY_VERSION_HEX < 0x02070000
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
+ #define CYTHON_USE_PYTYPE_LOOKUP 1
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #elif !defined(CYTHON_USE_ASYNC_SLOTS)
+ #define CYTHON_USE_ASYNC_SLOTS 1
+ #endif
+ #if PY_VERSION_HEX < 0x02070000
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #elif !defined(CYTHON_USE_PYLONG_INTERNALS)
+ #define CYTHON_USE_PYLONG_INTERNALS 1
+ #endif
+ #ifndef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 1
+ #endif
+ #ifndef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 1
+ #endif
+ #if PY_VERSION_HEX < 0x030300F0
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #elif !defined(CYTHON_USE_UNICODE_WRITER)
+ #define CYTHON_USE_UNICODE_WRITER 1
+ #endif
+ #ifndef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 0
+ #endif
+ #ifndef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 1
+ #endif
+ #ifndef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 1
+ #endif
+ #ifndef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 1
+ #endif
+ #ifndef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 1
+ #endif
+ #ifndef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
+ #endif
+ #ifndef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
+ #endif
+ #ifndef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
+ #endif
+ #ifndef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
+ #endif
+#endif
+#if !defined(CYTHON_FAST_PYCCALL)
+#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
+#endif
+#if CYTHON_USE_PYLONG_INTERNALS
+ #include "longintrepr.h"
+ #undef SHIFT
+ #undef BASE
+ #undef MASK
+ #ifdef SIZEOF_VOID_P
+ enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
+ #endif
+#endif
+#ifndef __has_attribute
+ #define __has_attribute(x) 0
+#endif
+#ifndef __has_cpp_attribute
+ #define __has_cpp_attribute(x) 0
+#endif
+#ifndef CYTHON_RESTRICT
+ #if defined(__GNUC__)
+ #define CYTHON_RESTRICT __restrict__
+ #elif defined(_MSC_VER) && _MSC_VER >= 1400
+ #define CYTHON_RESTRICT __restrict
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_RESTRICT restrict
+ #else
+ #define CYTHON_RESTRICT
+ #endif
+#endif
+#ifndef CYTHON_UNUSED
+# if defined(__GNUC__)
+# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+#endif
+#ifndef CYTHON_MAYBE_UNUSED_VAR
+# if defined(__cplusplus)
+ template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
+# else
+# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
+# endif
+#endif
+#ifndef CYTHON_NCP_UNUSED
+# if CYTHON_COMPILING_IN_CPYTHON
+# define CYTHON_NCP_UNUSED
+# else
+# define CYTHON_NCP_UNUSED CYTHON_UNUSED
+# endif
+#endif
+#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
+#ifdef _MSC_VER
+ #ifndef _MSC_STDINT_H_
+ #if _MSC_VER < 1300
+ typedef unsigned char uint8_t;
+ typedef unsigned int uint32_t;
+ #else
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int32 uint32_t;
+ #endif
+ #endif
+#else
+ #include <stdint.h>
+#endif
+#ifndef CYTHON_FALLTHROUGH
+ #if defined(__cplusplus) && __cplusplus >= 201103L
+ #if __has_cpp_attribute(fallthrough)
+ #define CYTHON_FALLTHROUGH [[fallthrough]]
+ #elif __has_cpp_attribute(clang::fallthrough)
+ #define CYTHON_FALLTHROUGH [[clang::fallthrough]]
+ #elif __has_cpp_attribute(gnu::fallthrough)
+ #define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
+ #endif
+ #endif
+ #ifndef CYTHON_FALLTHROUGH
+ #if __has_attribute(fallthrough)
+ #define CYTHON_FALLTHROUGH __attribute__((fallthrough))
+ #else
+ #define CYTHON_FALLTHROUGH
+ #endif
+ #endif
+ #if defined(__clang__ ) && defined(__apple_build_version__)
+ #if __apple_build_version__ < 7000000
+ #undef CYTHON_FALLTHROUGH
+ #define CYTHON_FALLTHROUGH
+ #endif
+ #endif
+#endif
+
+#ifndef CYTHON_INLINE
+ #if defined(__clang__)
+ #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
+ #elif defined(__GNUC__)
+ #define CYTHON_INLINE __inline__
+ #elif defined(_MSC_VER)
+ #define CYTHON_INLINE __inline
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_INLINE inline
+ #else
+ #define CYTHON_INLINE
+ #endif
+#endif
+
+#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
+ #define Py_OptimizeFlag 0
+#endif
+#define __PYX_BUILD_PY_SSIZE_T "n"
+#define CYTHON_FORMAT_SSIZE_T "z"
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+ #define __Pyx_DefaultClassType PyClass_Type
+#else
+ #define __Pyx_BUILTIN_MODULE_NAME "builtins"
+#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+#else
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+#endif
+ #define __Pyx_DefaultClassType PyType_Type
+#endif
+#ifndef Py_TPFLAGS_CHECKTYPES
+ #define Py_TPFLAGS_CHECKTYPES 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_INDEX
+ #define Py_TPFLAGS_HAVE_INDEX 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
+ #define Py_TPFLAGS_HAVE_NEWBUFFER 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_FINALIZE
+ #define Py_TPFLAGS_HAVE_FINALIZE 0
+#endif
+#ifndef METH_STACKLESS
+ #define METH_STACKLESS 0
+#endif
+#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
+ #ifndef METH_FASTCALL
+ #define METH_FASTCALL 0x80
+ #endif
+ typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
+ typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
+ Py_ssize_t nargs, PyObject *kwnames);
+#else
+ #define __Pyx_PyCFunctionFast _PyCFunctionFast
+ #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
+#endif
+#if CYTHON_FAST_PYCCALL
+#define __Pyx_PyFastCFunction_Check(func)\
+ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
+#else
+#define __Pyx_PyFastCFunction_Check(func) 0
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
+ #define PyObject_Malloc(s) PyMem_Malloc(s)
+ #define PyObject_Free(p) PyMem_Free(p)
+ #define PyObject_Realloc(p) PyMem_Realloc(p)
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
+ #define PyMem_RawMalloc(n) PyMem_Malloc(n)
+ #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
+ #define PyMem_RawFree(p) PyMem_Free(p)
+#endif
+#if CYTHON_COMPILING_IN_PYSTON
+ #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
+ #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
+#else
+ #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
+ #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
+#endif
+#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
+ #define __Pyx_PyThreadState_Current PyThreadState_GET()
+#elif PY_VERSION_HEX >= 0x03060000
+ #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
+#elif PY_VERSION_HEX >= 0x03000000
+ #define __Pyx_PyThreadState_Current PyThreadState_GET()
+#else
+ #define __Pyx_PyThreadState_Current _PyThreadState_Current
+#endif
+#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
+#include "pythread.h"
+#define Py_tss_NEEDS_INIT 0
+typedef int Py_tss_t;
+static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
+ *key = PyThread_create_key();
+ return 0;
+}
+static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
+ Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
+ *key = Py_tss_NEEDS_INIT;
+ return key;
+}
+static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
+ PyObject_Free(key);
+}
+static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
+ return *key != Py_tss_NEEDS_INIT;
+}
+static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
+ PyThread_delete_key(*key);
+ *key = Py_tss_NEEDS_INIT;
+}
+static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
+ return PyThread_set_key_value(*key, value);
+}
+static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
+ return PyThread_get_key_value(*key);
+}
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
+#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
+#else
+#define __Pyx_PyDict_NewPresized(n) PyDict_New()
+#endif
+#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
+#else
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
+#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
+#else
+#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
+#endif
+#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
+ #define CYTHON_PEP393_ENABLED 1
+ #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
+ 0 : _PyUnicode_Ready((PyObject *)(op)))
+ #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
+ #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
+ #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
+ #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
+ #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
+ #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
+ #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
+ #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
+ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
+ #else
+ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
+ #endif
+#else
+ #define CYTHON_PEP393_ENABLED 0
+ #define PyUnicode_1BYTE_KIND 1
+ #define PyUnicode_2BYTE_KIND 2
+ #define PyUnicode_4BYTE_KIND 4
+ #define __Pyx_PyUnicode_READY(op) (0)
+ #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
+ #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
+ #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
+ #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
+ #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
+ #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
+ #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
+ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
+#endif
+#if CYTHON_COMPILING_IN_PYPY
+ #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
+ #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
+#else
+ #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
+ #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
+ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
+ #define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
+ #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
+ #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
+#endif
+#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
+#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
+#else
+ #define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
+#endif
+#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
+ #define PyObject_ASCII(o) PyObject_Repr(o)
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyBaseString_Type PyUnicode_Type
+ #define PyStringObject PyUnicodeObject
+ #define PyString_Type PyUnicode_Type
+ #define PyString_Check PyUnicode_Check
+ #define PyString_CheckExact PyUnicode_CheckExact
+#ifndef PyObject_Unicode
+ #define PyObject_Unicode PyObject_Str
+#endif
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
+ #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
+#else
+ #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
+ #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
+#endif
+#ifndef PySet_CheckExact
+ #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
+#endif
+#if PY_VERSION_HEX >= 0x030900A4
+ #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
+ #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
+#else
+ #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
+ #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
+#endif
+#if CYTHON_ASSUME_SAFE_MACROS
+ #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
+#else
+ #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyIntObject PyLongObject
+ #define PyInt_Type PyLong_Type
+ #define PyInt_Check(op) PyLong_Check(op)
+ #define PyInt_CheckExact(op) PyLong_CheckExact(op)
+ #define PyInt_FromString PyLong_FromString
+ #define PyInt_FromUnicode PyLong_FromUnicode
+ #define PyInt_FromLong PyLong_FromLong
+ #define PyInt_FromSize_t PyLong_FromSize_t
+ #define PyInt_FromSsize_t PyLong_FromSsize_t
+ #define PyInt_AsLong PyLong_AsLong
+ #define PyInt_AS_LONG PyLong_AS_LONG
+ #define PyInt_AsSsize_t PyLong_AsSsize_t
+ #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
+ #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
+ #define PyNumber_Int PyNumber_Long
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyBoolObject PyLongObject
+#endif
+#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
+ #ifndef PyUnicode_InternFromString
+ #define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
+ #endif
+#endif
+#if PY_VERSION_HEX < 0x030200A4
+ typedef long Py_hash_t;
+ #define __Pyx_PyInt_FromHash_t PyInt_FromLong
+ #define __Pyx_PyInt_AsHash_t PyInt_AsLong
+#else
+ #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
+ #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
+#else
+ #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
+#endif
+#if CYTHON_USE_ASYNC_SLOTS
+ #if PY_VERSION_HEX >= 0x030500B1
+ #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
+ #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
+ #else
+ #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
+ #endif
+#else
+ #define __Pyx_PyType_AsAsync(obj) NULL
+#endif
+#ifndef __Pyx_PyAsyncMethodsStruct
+ typedef struct {
+ unaryfunc am_await;
+ unaryfunc am_aiter;
+ unaryfunc am_anext;
+ } __Pyx_PyAsyncMethodsStruct;
+#endif
+
+#if defined(WIN32) || defined(MS_WINDOWS)
+ #define _USE_MATH_DEFINES
+#endif
+#include <math.h>
+#ifdef NAN
+#define __PYX_NAN() ((float) NAN)
+#else
+static CYTHON_INLINE float __PYX_NAN() {
+ float value;
+ memset(&value, 0xFF, sizeof(value));
+ return value;
+}
+#endif
+#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
+#define __Pyx_truncl trunc
+#else
+#define __Pyx_truncl truncl
+#endif
+
+#define __PYX_MARK_ERR_POS(f_index, lineno) \
+ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
+#define __PYX_ERR(f_index, lineno, Ln_error) \
+ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
+
+#ifndef __PYX_EXTERN_C
+ #ifdef __cplusplus
+ #define __PYX_EXTERN_C extern "C"
+ #else
+ #define __PYX_EXTERN_C extern
+ #endif
+#endif
+
+#define __PYX_HAVE__aiohttp___http_parser
+#define __PYX_HAVE_API__aiohttp___http_parser
+/* Early includes */
+#include <string.h>
+#include <stdio.h>
+#include "pythread.h"
+#include <limits.h>
+#include <stdint.h>
+#include "../vendor/http-parser/http_parser.h"
+#include "_find_header.h"
+#ifdef _OPENMP
+#include <omp.h>
+#endif /* _OPENMP */
+
+#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
+#define CYTHON_WITHOUT_ASSERTIONS
+#endif
+
+typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
+ const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
+
+#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
+#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
+#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
+#define __PYX_DEFAULT_STRING_ENCODING ""
+#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
+#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#define __Pyx_uchar_cast(c) ((unsigned char)c)
+#define __Pyx_long_cast(x) ((long)x)
+#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
+ (sizeof(type) < sizeof(Py_ssize_t)) ||\
+ (sizeof(type) > sizeof(Py_ssize_t) &&\
+ likely(v < (type)PY_SSIZE_T_MAX ||\
+ v == (type)PY_SSIZE_T_MAX) &&\
+ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
+ v == (type)PY_SSIZE_T_MIN))) ||\
+ (sizeof(type) == sizeof(Py_ssize_t) &&\
+ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
+ v == (type)PY_SSIZE_T_MAX))) )
+static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
+ return (size_t) i < (size_t) limit;
+}
+#if defined (__cplusplus) && __cplusplus >= 201103L
+ #include <cstdlib>
+ #define __Pyx_sst_abs(value) std::abs(value)
+#elif SIZEOF_INT >= SIZEOF_SIZE_T
+ #define __Pyx_sst_abs(value) abs(value)
+#elif SIZEOF_LONG >= SIZEOF_SIZE_T
+ #define __Pyx_sst_abs(value) labs(value)
+#elif defined (_MSC_VER)
+ #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
+#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define __Pyx_sst_abs(value) llabs(value)
+#elif defined (__GNUC__)
+ #define __Pyx_sst_abs(value) __builtin_llabs(value)
+#else
+ #define __Pyx_sst_abs(value) ((value<0) ? -value : value)
+#endif
+static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
+static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
+#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
+#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
+#define __Pyx_PyBytes_FromString PyBytes_FromString
+#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
+ #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#else
+ #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
+ #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
+#endif
+#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
+#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
+#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
+#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
+#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
+static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
+ const Py_UNICODE *u_end = u;
+ while (*u_end++) ;
+ return (size_t)(u_end - u - 1);
+}
+#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
+#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
+#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
+#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
+#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
+static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
+static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
+#define __Pyx_PySequence_Tuple(obj)\
+ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
+#if CYTHON_ASSUME_SAFE_MACROS
+#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
+#else
+#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
+#endif
+#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
+#if PY_MAJOR_VERSION >= 3
+#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
+#else
+#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
+#endif
+#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
+#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+static int __Pyx_sys_getdefaultencoding_not_ascii;
+static int __Pyx_init_sys_getdefaultencoding_params(void) {
+ PyObject* sys;
+ PyObject* default_encoding = NULL;
+ PyObject* ascii_chars_u = NULL;
+ PyObject* ascii_chars_b = NULL;
+ const char* default_encoding_c;
+ sys = PyImport_ImportModule("sys");
+ if (!sys) goto bad;
+ default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
+ Py_DECREF(sys);
+ if (!default_encoding) goto bad;
+ default_encoding_c = PyBytes_AsString(default_encoding);
+ if (!default_encoding_c) goto bad;
+ if (strcmp(default_encoding_c, "ascii") == 0) {
+ __Pyx_sys_getdefaultencoding_not_ascii = 0;
+ } else {
+ char ascii_chars[128];
+ int c;
+ for (c = 0; c < 128; c++) {
+ ascii_chars[c] = c;
+ }
+ __Pyx_sys_getdefaultencoding_not_ascii = 1;
+ ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
+ if (!ascii_chars_u) goto bad;
+ ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
+ if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
+ PyErr_Format(
+ PyExc_ValueError,
+ "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
+ default_encoding_c);
+ goto bad;
+ }
+ Py_DECREF(ascii_chars_u);
+ Py_DECREF(ascii_chars_b);
+ }
+ Py_DECREF(default_encoding);
+ return 0;
+bad:
+ Py_XDECREF(default_encoding);
+ Py_XDECREF(ascii_chars_u);
+ Py_XDECREF(ascii_chars_b);
+ return -1;
+}
+#endif
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
+#else
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+static char* __PYX_DEFAULT_STRING_ENCODING;
+static int __Pyx_init_sys_getdefaultencoding_params(void) {
+ PyObject* sys;
+ PyObject* default_encoding = NULL;
+ char* default_encoding_c;
+ sys = PyImport_ImportModule("sys");
+ if (!sys) goto bad;
+ default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
+ Py_DECREF(sys);
+ if (!default_encoding) goto bad;
+ default_encoding_c = PyBytes_AsString(default_encoding);
+ if (!default_encoding_c) goto bad;
+ __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
+ if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
+ strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
+ Py_DECREF(default_encoding);
+ return 0;
+bad:
+ Py_XDECREF(default_encoding);
+ return -1;
+}
+#endif
+#endif
+
+
+/* Test for GCC > 2.95 */
+#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
+ #define likely(x) __builtin_expect(!!(x), 1)
+ #define unlikely(x) __builtin_expect(!!(x), 0)
+#else /* !__GNUC__ or GCC < 2.95 */
+ #define likely(x) (x)
+ #define unlikely(x) (x)
+#endif /* __GNUC__ */
+static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
+
+static PyObject *__pyx_m = NULL;
+static PyObject *__pyx_d;
+static PyObject *__pyx_b;
+static PyObject *__pyx_cython_runtime = NULL;
+static PyObject *__pyx_empty_tuple;
+static PyObject *__pyx_empty_bytes;
+static PyObject *__pyx_empty_unicode;
+static int __pyx_lineno;
+static int __pyx_clineno = 0;
+static const char * __pyx_cfilenm= __FILE__;
+static const char *__pyx_filename;
+
+
+static const char *__pyx_f[] = {
+ "aiohttp/_http_parser.pyx",
+ "stringsource",
+ "type.pxd",
+ "bool.pxd",
+ "complex.pxd",
+ "aiohttp/_headers.pxi",
+};
+
+/*--- Type declarations ---*/
+struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage;
+struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage;
+struct __pyx_obj_7aiohttp_12_http_parser_HttpParser;
+struct __pyx_obj_7aiohttp_12_http_parser_HttpRequestParser;
+struct __pyx_obj_7aiohttp_12_http_parser_HttpResponseParser;
+struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct____repr__;
+struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr;
+struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__;
+struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr;
+struct __pyx_opt_args_7aiohttp_12_http_parser_10HttpParser__init;
+
+/* "aiohttp/_http_parser.pyx":327
+ * PyMem_Free(self._csettings)
+ *
+ * cdef _init(self, cparser.http_parser_type mode, # <<<<<<<<<<<<<<
+ * object protocol, object loop, int limit,
+ * object timer=None,
+ */
+struct __pyx_opt_args_7aiohttp_12_http_parser_10HttpParser__init {
+ int __pyx_n;
+ PyObject *timer;
+ size_t max_line_size;
+ size_t max_headers;
+ size_t max_field_size;
+ PyObject *payload_exception;
+ int response_with_body;
+ int read_until_eof;
+ int auto_decompress;
+};
+
+/* "aiohttp/_http_parser.pyx":110
+ *
+ * @cython.freelist(DEFAULT_FREELIST_SIZE)
+ * cdef class RawRequestMessage: # <<<<<<<<<<<<<<
+ * cdef readonly str method
+ * cdef readonly str path
+ */
+struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage {
+ PyObject_HEAD
+ PyObject *method;
+ PyObject *path;
+ PyObject *version;
+ PyObject *headers;
+ PyObject *raw_headers;
+ PyObject *should_close;
+ PyObject *compression;
+ PyObject *upgrade;
+ PyObject *chunked;
+ PyObject *url;
+};
+
+
+/* "aiohttp/_http_parser.pyx":210
+ *
+ * @cython.freelist(DEFAULT_FREELIST_SIZE)
+ * cdef class RawResponseMessage: # <<<<<<<<<<<<<<
+ * cdef readonly object version # HttpVersion
+ * cdef readonly int code
+ */
+struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage {
+ PyObject_HEAD
+ PyObject *version;
+ int code;
+ PyObject *reason;
+ PyObject *headers;
+ PyObject *raw_headers;
+ PyObject *should_close;
+ PyObject *compression;
+ PyObject *upgrade;
+ PyObject *chunked;
+};
+
+
+/* "aiohttp/_http_parser.pyx":272
+ *
+ * @cython.internal
+ * cdef class HttpParser: # <<<<<<<<<<<<<<
+ *
+ * cdef:
+ */
+struct __pyx_obj_7aiohttp_12_http_parser_HttpParser {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpParser *__pyx_vtab;
+ struct http_parser *_cparser;
+ struct http_parser_settings *_csettings;
+ PyObject *_raw_name;
+ PyObject *_raw_value;
+ int _has_value;
+ PyObject *_protocol;
+ PyObject *_loop;
+ PyObject *_timer;
+ size_t _max_line_size;
+ size_t _max_field_size;
+ size_t _max_headers;
+ int _response_with_body;
+ int _read_until_eof;
+ int _started;
+ PyObject *_url;
+ PyObject *_buf;
+ PyObject *_path;
+ PyObject *_reason;
+ PyObject *_headers;
+ PyObject *_raw_headers;
+ int _upgraded;
+ PyObject *_messages;
+ PyObject *_payload;
+ int _payload_error;
+ PyObject *_payload_exception;
+ PyObject *_last_error;
+ int _auto_decompress;
+ int _limit;
+ PyObject *_content_encoding;
+ Py_buffer py_buf;
+};
+
+
+/* "aiohttp/_http_parser.pyx":563
+ *
+ *
+ * cdef class HttpRequestParser(HttpParser): # <<<<<<<<<<<<<<
+ *
+ * def __init__(self, protocol, loop, int limit, timer=None,
+ */
+struct __pyx_obj_7aiohttp_12_http_parser_HttpRequestParser {
+ struct __pyx_obj_7aiohttp_12_http_parser_HttpParser __pyx_base;
+};
+
+
+/* "aiohttp/_http_parser.pyx":591
+ *
+ *
+ * cdef class HttpResponseParser(HttpParser): # <<<<<<<<<<<<<<
+ *
+ * def __init__(self, protocol, loop, int limit, timer=None,
+ */
+struct __pyx_obj_7aiohttp_12_http_parser_HttpResponseParser {
+ struct __pyx_obj_7aiohttp_12_http_parser_HttpParser __pyx_base;
+};
+
+
+/* "aiohttp/_http_parser.pyx":135
+ * self.url = url
+ *
+ * def __repr__(self): # <<<<<<<<<<<<<<
+ * info = []
+ * info.append(("method", self.method))
+ */
+struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct____repr__ {
+ PyObject_HEAD
+ PyObject *__pyx_v_info;
+};
+
+
+/* "aiohttp/_http_parser.pyx":147
+ * info.append(("chunked", self.chunked))
+ * info.append(("url", self.url))
+ * sinfo = ', '.join(name + '=' + repr(val) for name, val in info) # <<<<<<<<<<<<<<
+ * return '<RawRequestMessage(' + sinfo + ')>'
+ *
+ */
+struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr {
+ PyObject_HEAD
+ struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct____repr__ *__pyx_outer_scope;
+ PyObject *__pyx_v_name;
+ PyObject *__pyx_v_val;
+};
+
+
+/* "aiohttp/_http_parser.pyx":233
+ * self.chunked = chunked
+ *
+ * def __repr__(self): # <<<<<<<<<<<<<<
+ * info = []
+ * info.append(("version", self.version))
+ */
+struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__ {
+ PyObject_HEAD
+ PyObject *__pyx_v_info;
+};
+
+
+/* "aiohttp/_http_parser.pyx":244
+ * info.append(("upgrade", self.upgrade))
+ * info.append(("chunked", self.chunked))
+ * sinfo = ', '.join(name + '=' + repr(val) for name, val in info) # <<<<<<<<<<<<<<
+ * return '<RawResponseMessage(' + sinfo + ')>'
+ *
+ */
+struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr {
+ PyObject_HEAD
+ struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__ *__pyx_outer_scope;
+ PyObject *__pyx_v_name;
+ PyObject *__pyx_v_val;
+};
+
+
+
+/* "aiohttp/_http_parser.pyx":272
+ *
+ * @cython.internal
+ * cdef class HttpParser: # <<<<<<<<<<<<<<
+ *
+ * cdef:
+ */
+
+struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpParser {
+ PyObject *(*_init)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *, enum http_parser_type, PyObject *, PyObject *, int, struct __pyx_opt_args_7aiohttp_12_http_parser_10HttpParser__init *__pyx_optional_args);
+ PyObject *(*_process_header)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *);
+ PyObject *(*_on_header_field)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *, char *, size_t);
+ PyObject *(*_on_header_value)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *, char *, size_t);
+ PyObject *(*_on_headers_complete)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *);
+ PyObject *(*_on_message_complete)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *);
+ PyObject *(*_on_chunk_header)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *);
+ PyObject *(*_on_chunk_complete)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *);
+ PyObject *(*_on_status_complete)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *);
+ PyObject *(*http_version)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *);
+};
+static struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpParser *__pyx_vtabptr_7aiohttp_12_http_parser_HttpParser;
+static CYTHON_INLINE PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser_http_version(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *);
+
+
+/* "aiohttp/_http_parser.pyx":563
+ *
+ *
+ * cdef class HttpRequestParser(HttpParser): # <<<<<<<<<<<<<<
+ *
+ * def __init__(self, protocol, loop, int limit, timer=None,
+ */
+
+struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpRequestParser {
+ struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpParser __pyx_base;
+};
+static struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpRequestParser *__pyx_vtabptr_7aiohttp_12_http_parser_HttpRequestParser;
+
+
+/* "aiohttp/_http_parser.pyx":591
+ *
+ *
+ * cdef class HttpResponseParser(HttpParser): # <<<<<<<<<<<<<<
+ *
+ * def __init__(self, protocol, loop, int limit, timer=None,
+ */
+
+struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpResponseParser {
+ struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpParser __pyx_base;
+};
+static struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpResponseParser *__pyx_vtabptr_7aiohttp_12_http_parser_HttpResponseParser;
+
+/* --- Runtime support code (head) --- */
+/* Refnanny.proto */
+#ifndef CYTHON_REFNANNY
+ #define CYTHON_REFNANNY 0
+#endif
+#if CYTHON_REFNANNY
+ typedef struct {
+ void (*INCREF)(void*, PyObject*, int);
+ void (*DECREF)(void*, PyObject*, int);
+ void (*GOTREF)(void*, PyObject*, int);
+ void (*GIVEREF)(void*, PyObject*, int);
+ void* (*SetupContext)(const char*, int, const char*);
+ void (*FinishContext)(void**);
+ } __Pyx_RefNannyAPIStruct;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
+ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
+#ifdef WITH_THREAD
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)\
+ if (acquire_gil) {\
+ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
+ PyGILState_Release(__pyx_gilstate_save);\
+ } else {\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
+ }
+#else
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
+#endif
+ #define __Pyx_RefNannyFinishContext()\
+ __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
+ #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
+ #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
+ #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
+ #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
+#else
+ #define __Pyx_RefNannyDeclarations
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)
+ #define __Pyx_RefNannyFinishContext()
+ #define __Pyx_INCREF(r) Py_INCREF(r)
+ #define __Pyx_DECREF(r) Py_DECREF(r)
+ #define __Pyx_GOTREF(r)
+ #define __Pyx_GIVEREF(r)
+ #define __Pyx_XINCREF(r) Py_XINCREF(r)
+ #define __Pyx_XDECREF(r) Py_XDECREF(r)
+ #define __Pyx_XGOTREF(r)
+ #define __Pyx_XGIVEREF(r)
+#endif
+#define __Pyx_XDECREF_SET(r, v) do {\
+ PyObject *tmp = (PyObject *) r;\
+ r = v; __Pyx_XDECREF(tmp);\
+ } while (0)
+#define __Pyx_DECREF_SET(r, v) do {\
+ PyObject *tmp = (PyObject *) r;\
+ r = v; __Pyx_DECREF(tmp);\
+ } while (0)
+#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
+#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
+
+/* PyObjectGetAttrStr.proto */
+#if CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
+#else
+#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
+#endif
+
+/* GetBuiltinName.proto */
+static PyObject *__Pyx_GetBuiltinName(PyObject *name);
+
+/* GetItemInt.proto */
+#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
+ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
+ __Pyx_GetItemInt_Generic(o, to_py_func(i))))
+#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
+ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
+ int wraparound, int boundscheck);
+#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
+ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
+ int wraparound, int boundscheck);
+static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
+ int is_list, int wraparound, int boundscheck);
+
+/* decode_c_string_utf16.proto */
+static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) {
+ int byteorder = 0;
+ return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
+}
+static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) {
+ int byteorder = -1;
+ return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
+}
+static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) {
+ int byteorder = 1;
+ return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
+}
+
+/* decode_c_bytes.proto */
+static CYTHON_INLINE PyObject* __Pyx_decode_c_bytes(
+ const char* cstring, Py_ssize_t length, Py_ssize_t start, Py_ssize_t stop,
+ const char* encoding, const char* errors,
+ PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
+
+/* decode_bytes.proto */
+static CYTHON_INLINE PyObject* __Pyx_decode_bytes(
+ PyObject* string, Py_ssize_t start, Py_ssize_t stop,
+ const char* encoding, const char* errors,
+ PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
+ return __Pyx_decode_c_bytes(
+ PyBytes_AS_STRING(string), PyBytes_GET_SIZE(string),
+ start, stop, encoding, errors, decode_func);
+}
+
+/* RaiseArgTupleInvalid.proto */
+static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
+ Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
+
+/* RaiseDoubleKeywords.proto */
+static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
+
+/* ParseKeywords.proto */
+static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
+ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
+ const char* function_name);
+
+/* None.proto */
+static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname);
+
+/* RaiseTooManyValuesToUnpack.proto */
+static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
+
+/* RaiseNeedMoreValuesToUnpack.proto */
+static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
+
+/* IterFinish.proto */
+static CYTHON_INLINE int __Pyx_IterFinish(void);
+
+/* UnpackItemEndCheck.proto */
+static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected);
+
+/* ListCompAppend.proto */
+#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
+static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
+ PyListObject* L = (PyListObject*) list;
+ Py_ssize_t len = Py_SIZE(list);
+ if (likely(L->allocated > len)) {
+ Py_INCREF(x);
+ PyList_SET_ITEM(list, len, x);
+ __Pyx_SET_SIZE(list, len + 1);
+ return 0;
+ }
+ return PyList_Append(list, x);
+}
+#else
+#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
+#endif
+
+/* ListAppend.proto */
+#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
+static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
+ PyListObject* L = (PyListObject*) list;
+ Py_ssize_t len = Py_SIZE(list);
+ if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
+ Py_INCREF(x);
+ PyList_SET_ITEM(list, len, x);
+ __Pyx_SET_SIZE(list, len + 1);
+ return 0;
+ }
+ return PyList_Append(list, x);
+}
+#else
+#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
+#endif
+
+/* KeywordStringCheck.proto */
+static int __Pyx_CheckKeywordStrings(PyObject *kwdict, const char* function_name, int kw_allowed);
+
+/* ExtTypeTest.proto */
+static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
+
+/* PyDictContains.proto */
+static CYTHON_INLINE int __Pyx_PyDict_ContainsTF(PyObject* item, PyObject* dict, int eq) {
+ int result = PyDict_Contains(dict, item);
+ return unlikely(result < 0) ? result : (result == (eq == Py_EQ));
+}
+
+/* DictGetItem.proto */
+#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
+static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key);
+#define __Pyx_PyObject_Dict_GetItem(obj, name)\
+ (likely(PyDict_CheckExact(obj)) ?\
+ __Pyx_PyDict_GetItem(obj, name) : PyObject_GetItem(obj, name))
+#else
+#define __Pyx_PyDict_GetItem(d, key) PyObject_GetItem(d, key)
+#define __Pyx_PyObject_Dict_GetItem(obj, name) PyObject_GetItem(obj, name)
+#endif
+
+/* PyErrExceptionMatches.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
+static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
+#else
+#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
+#endif
+
+/* PyThreadStateGet.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
+#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
+#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
+#else
+#define __Pyx_PyThreadState_declare
+#define __Pyx_PyThreadState_assign
+#define __Pyx_PyErr_Occurred() PyErr_Occurred()
+#endif
+
+/* PyErrFetchRestore.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
+#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
+#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
+#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
+#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
+static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
+static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
+#if CYTHON_COMPILING_IN_CPYTHON
+#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
+#else
+#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
+#endif
+#else
+#define __Pyx_PyErr_Clear() PyErr_Clear()
+#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
+#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
+#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
+#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
+#endif
+
+/* GetAttr.proto */
+static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
+
+/* GetAttr3.proto */
+static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
+
+/* PyDictVersioning.proto */
+#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
+#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
+#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
+#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
+ (version_var) = __PYX_GET_DICT_VERSION(dict);\
+ (cache_var) = (value);
+#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
+ static PY_UINT64_T __pyx_dict_version = 0;\
+ static PyObject *__pyx_dict_cached_value = NULL;\
+ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
+ (VAR) = __pyx_dict_cached_value;\
+ } else {\
+ (VAR) = __pyx_dict_cached_value = (LOOKUP);\
+ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
+ }\
+}
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
+static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
+#else
+#define __PYX_GET_DICT_VERSION(dict) (0)
+#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
+#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
+#endif
+
+/* GetModuleGlobalName.proto */
+#if CYTHON_USE_DICT_VERSIONS
+#define __Pyx_GetModuleGlobalName(var, name) {\
+ static PY_UINT64_T __pyx_dict_version = 0;\
+ static PyObject *__pyx_dict_cached_value = NULL;\
+ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
+ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
+ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
+}
+#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
+ PY_UINT64_T __pyx_dict_version;\
+ PyObject *__pyx_dict_cached_value;\
+ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
+}
+static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
+#else
+#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
+#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
+static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
+#endif
+
+/* PyFunctionFastCall.proto */
+#if CYTHON_FAST_PYCALL
+#define __Pyx_PyFunction_FastCall(func, args, nargs)\
+ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
+#if 1 || PY_VERSION_HEX < 0x030600B1
+static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
+#else
+#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
+#endif
+#define __Pyx_BUILD_ASSERT_EXPR(cond)\
+ (sizeof(char [1 - 2*!(cond)]) - 1)
+#ifndef Py_MEMBER_SIZE
+#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
+#endif
+ static size_t __pyx_pyframe_localsplus_offset = 0;
+ #include "frameobject.h"
+ #define __Pxy_PyFrame_Initialize_Offsets()\
+ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
+ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
+ #define __Pyx_PyFrame_GetLocalsplus(frame)\
+ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
+#endif
+
+/* PyObjectCall.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
+#else
+#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
+#endif
+
+/* PyObjectCallMethO.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
+#endif
+
+/* PyObjectCallNoArg.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
+#else
+#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
+#endif
+
+/* PyCFunctionFastCall.proto */
+#if CYTHON_FAST_PYCCALL
+static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
+#else
+#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
+#endif
+
+/* PyObjectCallOneArg.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
+
+/* PyObjectCall2Args.proto */
+static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
+
+/* PySequenceContains.proto */
+static CYTHON_INLINE int __Pyx_PySequence_ContainsTF(PyObject* item, PyObject* seq, int eq) {
+ int result = PySequence_Contains(seq, item);
+ return unlikely(result < 0) ? result : (result == (eq == Py_EQ));
+}
+
+/* RaiseException.proto */
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
+
+/* IncludeStringH.proto */
+#include <string.h>
+
+/* BytesEquals.proto */
+static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
+
+/* UnicodeEquals.proto */
+static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
+
+/* SliceObject.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
+ PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
+ PyObject** py_start, PyObject** py_stop, PyObject** py_slice,
+ int has_cstart, int has_cstop, int wraparound);
+
+/* decode_bytearray.proto */
+static CYTHON_INLINE PyObject* __Pyx_decode_bytearray(
+ PyObject* string, Py_ssize_t start, Py_ssize_t stop,
+ const char* encoding, const char* errors,
+ PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
+ return __Pyx_decode_c_bytes(
+ PyByteArray_AS_STRING(string), PyByteArray_GET_SIZE(string),
+ start, stop, encoding, errors, decode_func);
+}
+
+/* GetException.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
+static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
+#else
+static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
+#endif
+
+/* SwapException.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
+static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
+#else
+static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
+#endif
+
+/* GetTopmostException.proto */
+#if CYTHON_USE_EXC_INFO_STACK
+static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
+#endif
+
+/* SaveResetException.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
+static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
+#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
+static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
+#else
+#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
+#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
+#endif
+
+/* decode_c_string.proto */
+static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
+ const char* cstring, Py_ssize_t start, Py_ssize_t stop,
+ const char* encoding, const char* errors,
+ PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
+
+/* UnpackUnboundCMethod.proto */
+typedef struct {
+ PyObject *type;
+ PyObject **method_name;
+ PyCFunction func;
+ PyObject *method;
+ int flag;
+} __Pyx_CachedCFunction;
+
+/* CallUnboundCMethod1.proto */
+static PyObject* __Pyx__CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg);
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg);
+#else
+#define __Pyx_CallUnboundCMethod1(cfunc, self, arg) __Pyx__CallUnboundCMethod1(cfunc, self, arg)
+#endif
+
+/* Import.proto */
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
+
+/* ImportFrom.proto */
+static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
+
+/* HasAttr.proto */
+static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
+
+/* PyObject_GenericGetAttrNoDict.proto */
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
+#else
+#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
+#endif
+
+/* PyObject_GenericGetAttr.proto */
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
+#else
+#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
+#endif
+
+/* PyObjectGetAttrStrNoError.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);
+
+/* SetupReduce.proto */
+static int __Pyx_setup_reduce(PyObject* type_obj);
+
+/* SetVTable.proto */
+static int __Pyx_SetVtable(PyObject *dict, void *vtable);
+
+/* TypeImport.proto */
+#ifndef __PYX_HAVE_RT_ImportType_proto
+#define __PYX_HAVE_RT_ImportType_proto
+enum __Pyx_ImportType_CheckSize {
+ __Pyx_ImportType_CheckSize_Error = 0,
+ __Pyx_ImportType_CheckSize_Warn = 1,
+ __Pyx_ImportType_CheckSize_Ignore = 2
+};
+static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size);
+#endif
+
+/* CLineInTraceback.proto */
+#ifdef CYTHON_CLINE_IN_TRACEBACK
+#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
+#else
+static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
+#endif
+
+/* CodeObjectCache.proto */
+typedef struct {
+ PyCodeObject* code_object;
+ int code_line;
+} __Pyx_CodeObjectCacheEntry;
+struct __Pyx_CodeObjectCache {
+ int count;
+ int max_count;
+ __Pyx_CodeObjectCacheEntry* entries;
+};
+static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
+static PyCodeObject *__pyx_find_code_object(int code_line);
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
+
+/* AddTraceback.proto */
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+ int py_line, const char *filename);
+
+/* CIntToPy.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
+
+/* CIntToPy.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value);
+
+/* CIntToPy.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_short(unsigned short value);
+
+/* CIntToPy.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
+
+/* CIntToPy.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint16_t(uint16_t value);
+
+/* CIntFromPy.proto */
+static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
+
+/* CIntFromPy.proto */
+static CYTHON_INLINE enum http_method __Pyx_PyInt_As_enum__http_method(PyObject *);
+
+/* CIntFromPy.proto */
+static CYTHON_INLINE size_t __Pyx_PyInt_As_size_t(PyObject *);
+
+/* CIntFromPy.proto */
+static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
+
+/* FastTypeChecks.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
+static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
+#else
+#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
+#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
+#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
+#endif
+#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
+
+/* FetchCommonType.proto */
+static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type);
+
+/* PyObjectGetMethod.proto */
+static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method);
+
+/* PyObjectCallMethod1.proto */
+static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg);
+
+/* CoroutineBase.proto */
+typedef PyObject *(*__pyx_coroutine_body_t)(PyObject *, PyThreadState *, PyObject *);
+#if CYTHON_USE_EXC_INFO_STACK
+#define __Pyx_ExcInfoStruct _PyErr_StackItem
+#else
+typedef struct {
+ PyObject *exc_type;
+ PyObject *exc_value;
+ PyObject *exc_traceback;
+} __Pyx_ExcInfoStruct;
+#endif
+typedef struct {
+ PyObject_HEAD
+ __pyx_coroutine_body_t body;
+ PyObject *closure;
+ __Pyx_ExcInfoStruct gi_exc_state;
+ PyObject *gi_weakreflist;
+ PyObject *classobj;
+ PyObject *yieldfrom;
+ PyObject *gi_name;
+ PyObject *gi_qualname;
+ PyObject *gi_modulename;
+ PyObject *gi_code;
+ int resume_label;
+ char is_running;
+} __pyx_CoroutineObject;
+static __pyx_CoroutineObject *__Pyx__Coroutine_New(
+ PyTypeObject *type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
+ PyObject *name, PyObject *qualname, PyObject *module_name);
+static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit(
+ __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
+ PyObject *name, PyObject *qualname, PyObject *module_name);
+static CYTHON_INLINE void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *self);
+static int __Pyx_Coroutine_clear(PyObject *self);
+static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value);
+static PyObject *__Pyx_Coroutine_Close(PyObject *self);
+static PyObject *__Pyx_Coroutine_Throw(PyObject *gen, PyObject *args);
+#if CYTHON_USE_EXC_INFO_STACK
+#define __Pyx_Coroutine_SwapException(self)
+#define __Pyx_Coroutine_ResetAndClearException(self) __Pyx_Coroutine_ExceptionClear(&(self)->gi_exc_state)
+#else
+#define __Pyx_Coroutine_SwapException(self) {\
+ __Pyx_ExceptionSwap(&(self)->gi_exc_state.exc_type, &(self)->gi_exc_state.exc_value, &(self)->gi_exc_state.exc_traceback);\
+ __Pyx_Coroutine_ResetFrameBackpointer(&(self)->gi_exc_state);\
+ }
+#define __Pyx_Coroutine_ResetAndClearException(self) {\
+ __Pyx_ExceptionReset((self)->gi_exc_state.exc_type, (self)->gi_exc_state.exc_value, (self)->gi_exc_state.exc_traceback);\
+ (self)->gi_exc_state.exc_type = (self)->gi_exc_state.exc_value = (self)->gi_exc_state.exc_traceback = NULL;\
+ }
+#endif
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_PyGen_FetchStopIterationValue(pvalue)\
+ __Pyx_PyGen__FetchStopIterationValue(__pyx_tstate, pvalue)
+#else
+#define __Pyx_PyGen_FetchStopIterationValue(pvalue)\
+ __Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, pvalue)
+#endif
+static int __Pyx_PyGen__FetchStopIterationValue(PyThreadState *tstate, PyObject **pvalue);
+static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state);
+
+/* PatchModuleWithCoroutine.proto */
+static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code);
+
+/* PatchGeneratorABC.proto */
+static int __Pyx_patch_abc(void);
+
+/* Generator.proto */
+#define __Pyx_Generator_USED
+static PyTypeObject *__pyx_GeneratorType = 0;
+#define __Pyx_Generator_CheckExact(obj) (Py_TYPE(obj) == __pyx_GeneratorType)
+#define __Pyx_Generator_New(body, code, closure, name, qualname, module_name)\
+ __Pyx__Coroutine_New(__pyx_GeneratorType, body, code, closure, name, qualname, module_name)
+static PyObject *__Pyx_Generator_Next(PyObject *self);
+static int __pyx_Generator_init(void);
+
+/* CheckBinaryVersion.proto */
+static int __Pyx_check_binary_version(void);
+
+/* InitStrings.proto */
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
+
+static PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser__init(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self, enum http_parser_type __pyx_v_mode, PyObject *__pyx_v_protocol, PyObject *__pyx_v_loop, int __pyx_v_limit, struct __pyx_opt_args_7aiohttp_12_http_parser_10HttpParser__init *__pyx_optional_args); /* proto*/
+static PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser__process_header(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self); /* proto*/
+static PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_header_field(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self, char *__pyx_v_at, size_t __pyx_v_length); /* proto*/
+static PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_header_value(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self, char *__pyx_v_at, size_t __pyx_v_length); /* proto*/
+static PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_headers_complete(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self); /* proto*/
+static PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_message_complete(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self); /* proto*/
+static PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_chunk_header(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self); /* proto*/
+static PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_chunk_complete(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self); /* proto*/
+static PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_status_complete(CYTHON_UNUSED struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self); /* proto*/
+static CYTHON_INLINE PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser_http_version(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self); /* proto*/
+static PyObject *__pyx_f_7aiohttp_12_http_parser_17HttpRequestParser__on_status_complete(struct __pyx_obj_7aiohttp_12_http_parser_HttpRequestParser *__pyx_v_self); /* proto*/
+static PyObject *__pyx_f_7aiohttp_12_http_parser_18HttpResponseParser__on_status_complete(struct __pyx_obj_7aiohttp_12_http_parser_HttpResponseParser *__pyx_v_self); /* proto*/
+
+/* Module declarations from 'cpython.version' */
+
+/* Module declarations from '__builtin__' */
+
+/* Module declarations from 'cpython.type' */
+static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
+
+/* Module declarations from 'libc.string' */
+
+/* Module declarations from 'libc.stdio' */
+
+/* Module declarations from 'cpython.object' */
+
+/* Module declarations from 'cpython.ref' */
+
+/* Module declarations from 'cpython.exc' */
+
+/* Module declarations from 'cpython.module' */
+
+/* Module declarations from 'cpython.mem' */
+
+/* Module declarations from 'cpython.tuple' */
+
+/* Module declarations from 'cpython.list' */
+
+/* Module declarations from 'cpython.sequence' */
+
+/* Module declarations from 'cpython.mapping' */
+
+/* Module declarations from 'cpython.iterator' */
+
+/* Module declarations from 'cpython.number' */
+
+/* Module declarations from 'cpython.int' */
+
+/* Module declarations from '__builtin__' */
+
+/* Module declarations from 'cpython.bool' */
+static PyTypeObject *__pyx_ptype_7cpython_4bool_bool = 0;
+
+/* Module declarations from 'cpython.long' */
+
+/* Module declarations from 'cpython.float' */
+
+/* Module declarations from '__builtin__' */
+
+/* Module declarations from 'cpython.complex' */
+static PyTypeObject *__pyx_ptype_7cpython_7complex_complex = 0;
+
+/* Module declarations from 'cpython.string' */
+
+/* Module declarations from 'cpython.unicode' */
+
+/* Module declarations from 'cpython.dict' */
+
+/* Module declarations from 'cpython.instance' */
+
+/* Module declarations from 'cpython.function' */
+
+/* Module declarations from 'cpython.method' */
+
+/* Module declarations from 'cpython.weakref' */
+
+/* Module declarations from 'cpython.getargs' */
+
+/* Module declarations from 'cpython.pythread' */
+
+/* Module declarations from 'cpython.pystate' */
+
+/* Module declarations from 'cpython.cobject' */
+
+/* Module declarations from 'cpython.oldbuffer' */
+
+/* Module declarations from 'cpython.set' */
+
+/* Module declarations from 'cpython.buffer' */
+
+/* Module declarations from 'cpython.bytes' */
+
+/* Module declarations from 'cpython.pycapsule' */
+
+/* Module declarations from 'cpython' */
+
+/* Module declarations from 'libc.limits' */
+
+/* Module declarations from 'cython' */
+
+/* Module declarations from 'aiohttp' */
+
+/* Module declarations from 'libc.stdint' */
+
+/* Module declarations from 'aiohttp._cparser' */
+
+/* Module declarations from 'aiohttp._find_header' */
+
+/* Module declarations from 'aiohttp._http_parser' */
+static PyTypeObject *__pyx_ptype_7aiohttp_12_http_parser_RawRequestMessage = 0;
+static PyTypeObject *__pyx_ptype_7aiohttp_12_http_parser_RawResponseMessage = 0;
+static PyTypeObject *__pyx_ptype_7aiohttp_12_http_parser_HttpParser = 0;
+static PyTypeObject *__pyx_ptype_7aiohttp_12_http_parser_HttpRequestParser = 0;
+static PyTypeObject *__pyx_ptype_7aiohttp_12_http_parser_HttpResponseParser = 0;
+static PyTypeObject *__pyx_ptype_7aiohttp_12_http_parser___pyx_scope_struct____repr__ = 0;
+static PyTypeObject *__pyx_ptype_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr = 0;
+static PyTypeObject *__pyx_ptype_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__ = 0;
+static PyTypeObject *__pyx_ptype_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr = 0;
+static PyObject *__pyx_v_7aiohttp_12_http_parser_headers = 0;
+static PyObject *__pyx_v_7aiohttp_12_http_parser_URL = 0;
+static PyObject *__pyx_v_7aiohttp_12_http_parser_URL_build = 0;
+static PyObject *__pyx_v_7aiohttp_12_http_parser_CIMultiDict = 0;
+static PyObject *__pyx_v_7aiohttp_12_http_parser_CIMultiDictProxy = 0;
+static PyObject *__pyx_v_7aiohttp_12_http_parser_HttpVersion = 0;
+static PyObject *__pyx_v_7aiohttp_12_http_parser_HttpVersion10 = 0;
+static PyObject *__pyx_v_7aiohttp_12_http_parser_HttpVersion11 = 0;
+static PyObject *__pyx_v_7aiohttp_12_http_parser_SEC_WEBSOCKET_KEY1 = 0;
+static PyObject *__pyx_v_7aiohttp_12_http_parser_CONTENT_ENCODING = 0;
+static PyObject *__pyx_v_7aiohttp_12_http_parser_EMPTY_PAYLOAD = 0;
+static PyObject *__pyx_v_7aiohttp_12_http_parser_StreamReader = 0;
+static PyObject *__pyx_v_7aiohttp_12_http_parser_DeflateBuffer = 0;
+static PyObject *__pyx_v_7aiohttp_12_http_parser__http_method = 0;
+static CYTHON_INLINE PyObject *__pyx_f_7aiohttp_12_http_parser_extend(PyObject *, char const *, size_t); /*proto*/
+static CYTHON_INLINE PyObject *__pyx_f_7aiohttp_12_http_parser_http_method_str(int); /*proto*/
+static CYTHON_INLINE PyObject *__pyx_f_7aiohttp_12_http_parser_find_header(PyObject *); /*proto*/
+static PyObject *__pyx_f_7aiohttp_12_http_parser__new_request_message(PyObject *, PyObject *, PyObject *, PyObject *, PyObject *, int, PyObject *, int, int, PyObject *); /*proto*/
+static PyObject *__pyx_f_7aiohttp_12_http_parser__new_response_message(PyObject *, int, PyObject *, PyObject *, PyObject *, int, PyObject *, int, int); /*proto*/
+static int __pyx_f_7aiohttp_12_http_parser_cb_on_message_begin(struct http_parser *); /*proto*/
+static int __pyx_f_7aiohttp_12_http_parser_cb_on_url(struct http_parser *, char const *, size_t); /*proto*/
+static int __pyx_f_7aiohttp_12_http_parser_cb_on_status(struct http_parser *, char const *, size_t); /*proto*/
+static int __pyx_f_7aiohttp_12_http_parser_cb_on_header_field(struct http_parser *, char const *, size_t); /*proto*/
+static int __pyx_f_7aiohttp_12_http_parser_cb_on_header_value(struct http_parser *, char const *, size_t); /*proto*/
+static int __pyx_f_7aiohttp_12_http_parser_cb_on_headers_complete(struct http_parser *); /*proto*/
+static int __pyx_f_7aiohttp_12_http_parser_cb_on_body(struct http_parser *, char const *, size_t); /*proto*/
+static int __pyx_f_7aiohttp_12_http_parser_cb_on_message_complete(struct http_parser *); /*proto*/
+static int __pyx_f_7aiohttp_12_http_parser_cb_on_chunk_header(struct http_parser *); /*proto*/
+static int __pyx_f_7aiohttp_12_http_parser_cb_on_chunk_complete(struct http_parser *); /*proto*/
+static PyObject *__pyx_f_7aiohttp_12_http_parser_parser_error_from_errno(enum http_errno); /*proto*/
+static PyObject *__pyx_f_7aiohttp_12_http_parser__parse_url(char *, size_t); /*proto*/
+static PyObject *__pyx_f_7aiohttp_12_http_parser___pyx_unpickle_RawRequestMessage__set_state(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *, PyObject *); /*proto*/
+static PyObject *__pyx_f_7aiohttp_12_http_parser___pyx_unpickle_RawResponseMessage__set_state(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *, PyObject *); /*proto*/
+#define __Pyx_MODULE_NAME "aiohttp._http_parser"
+extern int __pyx_module_is_main_aiohttp___http_parser;
+int __pyx_module_is_main_aiohttp___http_parser = 0;
+
+/* Implementation of 'aiohttp._http_parser' */
+static PyObject *__pyx_builtin_range;
+static PyObject *__pyx_builtin_MemoryError;
+static PyObject *__pyx_builtin_TypeError;
+static PyObject *__pyx_builtin_BaseException;
+static const char __pyx_k_[] = "=";
+static const char __pyx_k_i[] = "i";
+static const char __pyx_k_TE[] = "TE";
+static const char __pyx_k__2[] = ", ";
+static const char __pyx_k__3[] = ")>";
+static const char __pyx_k__4[] = "";
+static const char __pyx_k_br[] = "br";
+static const char __pyx_k_AGE[] = "AGE";
+static const char __pyx_k_URI[] = "URI";
+static const char __pyx_k_URL[] = "URL";
+static const char __pyx_k_VIA[] = "VIA";
+static const char __pyx_k__11[] = ":";
+static const char __pyx_k_add[] = "add";
+static const char __pyx_k_all[] = "__all__";
+static const char __pyx_k_new[] = "__new__";
+static const char __pyx_k_url[] = "url";
+static const char __pyx_k_DATE[] = "DATE";
+static const char __pyx_k_ETAG[] = "ETAG";
+static const char __pyx_k_FROM[] = "FROM";
+static const char __pyx_k_HOST[] = "HOST";
+static const char __pyx_k_LINK[] = "LINK";
+static const char __pyx_k_VARY[] = "VARY";
+static const char __pyx_k_args[] = "args";
+static const char __pyx_k_code[] = "code";
+static const char __pyx_k_dict[] = "__dict__";
+static const char __pyx_k_gzip[] = "gzip";
+static const char __pyx_k_hdrs[] = "hdrs";
+static const char __pyx_k_host[] = "host";
+static const char __pyx_k_loop[] = "loop";
+static const char __pyx_k_main[] = "__main__";
+static const char __pyx_k_name[] = "__name__";
+static const char __pyx_k_path[] = "path";
+static const char __pyx_k_port[] = "port";
+static const char __pyx_k_send[] = "send";
+static const char __pyx_k_test[] = "__test__";
+static const char __pyx_k_user[] = "user";
+static const char __pyx_k_yarl[] = "yarl";
+static const char __pyx_k_ALLOW[] = "ALLOW";
+static const char __pyx_k_RANGE[] = "RANGE";
+static const char __pyx_k_URL_2[] = "_URL";
+static const char __pyx_k_build[] = "build";
+static const char __pyx_k_close[] = "close";
+static const char __pyx_k_limit[] = "limit";
+static const char __pyx_k_lower[] = "lower";
+static const char __pyx_k_range[] = "range";
+static const char __pyx_k_throw[] = "throw";
+static const char __pyx_k_timer[] = "timer";
+static const char __pyx_k_ACCEPT[] = "ACCEPT";
+static const char __pyx_k_COOKIE[] = "COOKIE";
+static const char __pyx_k_DIGEST[] = "DIGEST";
+static const char __pyx_k_EXPECT[] = "EXPECT";
+static const char __pyx_k_ORIGIN[] = "ORIGIN";
+static const char __pyx_k_PRAGMA[] = "PRAGMA";
+static const char __pyx_k_SERVER[] = "SERVER";
+static const char __pyx_k_format[] = "format";
+static const char __pyx_k_import[] = "__import__";
+static const char __pyx_k_method[] = "method";
+static const char __pyx_k_pickle[] = "pickle";
+static const char __pyx_k_py_buf[] = "py_buf";
+static const char __pyx_k_reason[] = "reason";
+static const char __pyx_k_reduce[] = "__reduce__";
+static const char __pyx_k_scheme[] = "scheme";
+static const char __pyx_k_update[] = "update";
+static const char __pyx_k_EXPIRES[] = "EXPIRES";
+static const char __pyx_k_REFERER[] = "REFERER";
+static const char __pyx_k_TRAILER[] = "TRAILER";
+static const char __pyx_k_UPGRADE[] = "UPGRADE";
+static const char __pyx_k_WARNING[] = "WARNING";
+static const char __pyx_k_aiohttp[] = "aiohttp";
+static const char __pyx_k_chunked[] = "chunked";
+static const char __pyx_k_deflate[] = "deflate";
+static const char __pyx_k_encoded[] = "encoded";
+static const char __pyx_k_genexpr[] = "genexpr";
+static const char __pyx_k_headers[] = "headers";
+static const char __pyx_k_streams[] = "streams";
+static const char __pyx_k_unknown[] = "<unknown>";
+static const char __pyx_k_upgrade[] = "upgrade";
+static const char __pyx_k_version[] = "version";
+static const char __pyx_k_IF_MATCH[] = "IF_MATCH";
+static const char __pyx_k_IF_RANGE[] = "IF_RANGE";
+static const char __pyx_k_LOCATION[] = "LOCATION";
+static const char __pyx_k_buf_data[] = "buf_data";
+static const char __pyx_k_feed_eof[] = "feed_eof";
+static const char __pyx_k_fragment[] = "fragment";
+static const char __pyx_k_getstate[] = "__getstate__";
+static const char __pyx_k_password[] = "password";
+static const char __pyx_k_protocol[] = "protocol";
+static const char __pyx_k_pyx_type[] = "__pyx_type";
+static const char __pyx_k_setstate[] = "__setstate__";
+static const char __pyx_k_FORWARDED[] = "FORWARDED";
+static const char __pyx_k_TypeError[] = "TypeError";
+static const char __pyx_k_feed_data[] = "feed_data";
+static const char __pyx_k_multidict[] = "multidict";
+static const char __pyx_k_parse_url[] = "parse_url";
+static const char __pyx_k_partition[] = "partition";
+static const char __pyx_k_pyx_state[] = "__pyx_state";
+static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
+static const char __pyx_k_CONNECTION[] = "CONNECTION";
+static const char __pyx_k_KEEP_ALIVE[] = "KEEP_ALIVE";
+static const char __pyx_k_SET_COOKIE[] = "SET_COOKIE";
+static const char __pyx_k_USER_AGENT[] = "USER_AGENT";
+static const char __pyx_k_pyx_result[] = "__pyx_result";
+static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
+static const char __pyx_k_CIMultiDict[] = "CIMultiDict";
+static const char __pyx_k_CONTENT_MD5[] = "CONTENT_MD5";
+static const char __pyx_k_DESTINATION[] = "DESTINATION";
+static const char __pyx_k_HttpVersion[] = "HttpVersion";
+static const char __pyx_k_LineTooLong[] = "LineTooLong";
+static const char __pyx_k_MemoryError[] = "MemoryError";
+static const char __pyx_k_PickleError[] = "PickleError";
+static const char __pyx_k_RETRY_AFTER[] = "RETRY_AFTER";
+static const char __pyx_k_WANT_DIGEST[] = "WANT_DIGEST";
+static const char __pyx_k_compression[] = "compression";
+static const char __pyx_k_http_parser[] = "http_parser";
+static const char __pyx_k_http_writer[] = "http_writer";
+static const char __pyx_k_max_headers[] = "max_headers";
+static const char __pyx_k_raw_headers[] = "raw_headers";
+static const char __pyx_k_CONTENT_TYPE[] = "CONTENT_TYPE";
+static const char __pyx_k_MAX_FORWARDS[] = "MAX_FORWARDS";
+static const char __pyx_k_StreamReader[] = "StreamReader";
+static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
+static const char __pyx_k_query_string[] = "query_string";
+static const char __pyx_k_should_close[] = "should_close";
+static const char __pyx_k_stringsource[] = "stringsource";
+static const char __pyx_k_ACCEPT_RANGES[] = "ACCEPT_RANGES";
+static const char __pyx_k_AUTHORIZATION[] = "AUTHORIZATION";
+static const char __pyx_k_BadStatusLine[] = "BadStatusLine";
+static const char __pyx_k_BaseException[] = "BaseException";
+static const char __pyx_k_CACHE_CONTROL[] = "CACHE_CONTROL";
+static const char __pyx_k_CIMultiDict_2[] = "_CIMultiDict";
+static const char __pyx_k_CONTENT_RANGE[] = "CONTENT_RANGE";
+static const char __pyx_k_DeflateBuffer[] = "DeflateBuffer";
+static const char __pyx_k_EMPTY_PAYLOAD[] = "EMPTY_PAYLOAD";
+static const char __pyx_k_HttpVersion10[] = "HttpVersion10";
+static const char __pyx_k_HttpVersion11[] = "HttpVersion11";
+static const char __pyx_k_HttpVersion_2[] = "_HttpVersion";
+static const char __pyx_k_IF_NONE_MATCH[] = "IF_NONE_MATCH";
+static const char __pyx_k_InvalidHeader[] = "InvalidHeader";
+static const char __pyx_k_LAST_EVENT_ID[] = "LAST_EVENT_ID";
+static const char __pyx_k_LAST_MODIFIED[] = "LAST_MODIFIED";
+static const char __pyx_k_invalid_url_r[] = "invalid url {!r}";
+static const char __pyx_k_max_line_size[] = "max_line_size";
+static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
+static const char __pyx_k_set_exception[] = "set_exception";
+static const char __pyx_k_ACCEPT_CHARSET[] = "ACCEPT_CHARSET";
+static const char __pyx_k_BadHttpMessage[] = "BadHttpMessage";
+static const char __pyx_k_CONTENT_LENGTH[] = "CONTENT_LENGTH";
+static const char __pyx_k_StreamReader_2[] = "_StreamReader";
+static const char __pyx_k_max_field_size[] = "max_field_size";
+static const char __pyx_k_read_until_eof[] = "read_until_eof";
+static const char __pyx_k_ACCEPT_ENCODING[] = "ACCEPT_ENCODING";
+static const char __pyx_k_ACCEPT_LANGUAGE[] = "ACCEPT_LANGUAGE";
+static const char __pyx_k_DeflateBuffer_2[] = "_DeflateBuffer";
+static const char __pyx_k_EMPTY_PAYLOAD_2[] = "_EMPTY_PAYLOAD";
+static const char __pyx_k_HttpVersion10_2[] = "_HttpVersion10";
+static const char __pyx_k_HttpVersion11_2[] = "_HttpVersion11";
+static const char __pyx_k_InvalidURLError[] = "InvalidURLError";
+static const char __pyx_k_X_FORWARDED_FOR[] = "X_FORWARDED_FOR";
+static const char __pyx_k_auto_decompress[] = "auto_decompress";
+static const char __pyx_k_http_exceptions[] = "http_exceptions";
+static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
+static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
+static const char __pyx_k_CIMultiDictProxy[] = "CIMultiDictProxy";
+static const char __pyx_k_CONTENT_ENCODING[] = "CONTENT_ENCODING";
+static const char __pyx_k_CONTENT_LANGUAGE[] = "CONTENT_LANGUAGE";
+static const char __pyx_k_CONTENT_LOCATION[] = "CONTENT_LOCATION";
+static const char __pyx_k_WWW_AUTHENTICATE[] = "WWW_AUTHENTICATE";
+static const char __pyx_k_X_FORWARDED_HOST[] = "X_FORWARDED_HOST";
+static const char __pyx_k_HttpRequestParser[] = "HttpRequestParser";
+static const char __pyx_k_IF_MODIFIED_SINCE[] = "IF_MODIFIED_SINCE";
+static const char __pyx_k_RawRequestMessage[] = "<RawRequestMessage(";
+static const char __pyx_k_SEC_WEBSOCKET_KEY[] = "SEC_WEBSOCKET_KEY";
+static const char __pyx_k_TRANSFER_ENCODING[] = "TRANSFER_ENCODING";
+static const char __pyx_k_X_FORWARDED_PROTO[] = "X_FORWARDED_PROTO";
+static const char __pyx_k_payload_exception[] = "payload_exception";
+static const char __pyx_k_CIMultiDictProxy_2[] = "_CIMultiDictProxy";
+static const char __pyx_k_ContentLengthError[] = "ContentLengthError";
+static const char __pyx_k_HttpResponseParser[] = "HttpResponseParser";
+static const char __pyx_k_PROXY_AUTHENTICATE[] = "PROXY_AUTHENTICATE";
+static const char __pyx_k_RawResponseMessage[] = "<RawResponseMessage(";
+static const char __pyx_k_SEC_WEBSOCKET_KEY1[] = "SEC_WEBSOCKET_KEY1";
+static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
+static const char __pyx_k_response_with_body[] = "response_with_body";
+static const char __pyx_k_CONTENT_DISPOSITION[] = "CONTENT_DISPOSITION";
+static const char __pyx_k_IF_UNMODIFIED_SINCE[] = "IF_UNMODIFIED_SINCE";
+static const char __pyx_k_PROXY_AUTHORIZATION[] = "PROXY_AUTHORIZATION";
+static const char __pyx_k_RawRequestMessage_2[] = "RawRequestMessage";
+static const char __pyx_k_PayloadEncodingError[] = "PayloadEncodingError";
+static const char __pyx_k_RawResponseMessage_2[] = "RawResponseMessage";
+static const char __pyx_k_SEC_WEBSOCKET_ACCEPT[] = "SEC_WEBSOCKET_ACCEPT";
+static const char __pyx_k_aiohttp__http_parser[] = "aiohttp._http_parser";
+static const char __pyx_k_SEC_WEBSOCKET_VERSION[] = "SEC_WEBSOCKET_VERSION";
+static const char __pyx_k_TransferEncodingError[] = "TransferEncodingError";
+static const char __pyx_k_repr___locals_genexpr[] = "__repr__.<locals>.genexpr";
+static const char __pyx_k_ACCESS_CONTROL_MAX_AGE[] = "ACCESS_CONTROL_MAX_AGE";
+static const char __pyx_k_SEC_WEBSOCKET_PROTOCOL[] = "SEC_WEBSOCKET_PROTOCOL";
+static const char __pyx_k_Header_name_is_too_long[] = "Header name is too long";
+static const char __pyx_k_Status_line_is_too_long[] = "Status line is too long";
+static const char __pyx_k_Header_value_is_too_long[] = "Header value is too long";
+static const char __pyx_k_SEC_WEBSOCKET_EXTENSIONS[] = "SEC_WEBSOCKET_EXTENSIONS";
+static const char __pyx_k_aiohttp__http_parser_pyx[] = "aiohttp/_http_parser.pyx";
+static const char __pyx_k_end_http_chunk_receiving[] = "end_http_chunk_receiving";
+static const char __pyx_k_CONTENT_TRANSFER_ENCODING[] = "CONTENT_TRANSFER_ENCODING";
+static const char __pyx_k_begin_http_chunk_receiving[] = "begin_http_chunk_receiving";
+static const char __pyx_k_ACCESS_CONTROL_ALLOW_ORIGIN[] = "ACCESS_CONTROL_ALLOW_ORIGIN";
+static const char __pyx_k_ACCESS_CONTROL_ALLOW_HEADERS[] = "ACCESS_CONTROL_ALLOW_HEADERS";
+static const char __pyx_k_ACCESS_CONTROL_ALLOW_METHODS[] = "ACCESS_CONTROL_ALLOW_METHODS";
+static const char __pyx_k_ACCESS_CONTROL_EXPOSE_HEADERS[] = "ACCESS_CONTROL_EXPOSE_HEADERS";
+static const char __pyx_k_ACCESS_CONTROL_REQUEST_METHOD[] = "ACCESS_CONTROL_REQUEST_METHOD";
+static const char __pyx_k_ACCESS_CONTROL_REQUEST_HEADERS[] = "ACCESS_CONTROL_REQUEST_HEADERS";
+static const char __pyx_k_pyx_unpickle_RawRequestMessage[] = "__pyx_unpickle_RawRequestMessage";
+static const char __pyx_k_pyx_unpickle_RawResponseMessag[] = "__pyx_unpickle_RawResponseMessage";
+static const char __pyx_k_ACCESS_CONTROL_ALLOW_CREDENTIALS[] = "ACCESS_CONTROL_ALLOW_CREDENTIALS";
+static const char __pyx_k_Incompatible_checksums_s_vs_0x14[] = "Incompatible checksums (%s vs 0x1408252 = (chunked, compression, headers, method, path, raw_headers, should_close, upgrade, url, version))";
+static const char __pyx_k_Incompatible_checksums_s_vs_0xc7[] = "Incompatible checksums (%s vs 0xc7706dc = (chunked, code, compression, headers, raw_headers, reason, should_close, upgrade, version))";
+static const char __pyx_k_Not_enough_data_for_satisfy_cont[] = "Not enough data for satisfy content length header.";
+static const char __pyx_k_Not_enough_data_for_satisfy_tran[] = "Not enough data for satisfy transfer length header.";
+static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__";
+static PyObject *__pyx_kp_u_;
+static PyObject *__pyx_n_s_ACCEPT;
+static PyObject *__pyx_n_s_ACCEPT_CHARSET;
+static PyObject *__pyx_n_s_ACCEPT_ENCODING;
+static PyObject *__pyx_n_s_ACCEPT_LANGUAGE;
+static PyObject *__pyx_n_s_ACCEPT_RANGES;
+static PyObject *__pyx_n_s_ACCESS_CONTROL_ALLOW_CREDENTIALS;
+static PyObject *__pyx_n_s_ACCESS_CONTROL_ALLOW_HEADERS;
+static PyObject *__pyx_n_s_ACCESS_CONTROL_ALLOW_METHODS;
+static PyObject *__pyx_n_s_ACCESS_CONTROL_ALLOW_ORIGIN;
+static PyObject *__pyx_n_s_ACCESS_CONTROL_EXPOSE_HEADERS;
+static PyObject *__pyx_n_s_ACCESS_CONTROL_MAX_AGE;
+static PyObject *__pyx_n_s_ACCESS_CONTROL_REQUEST_HEADERS;
+static PyObject *__pyx_n_s_ACCESS_CONTROL_REQUEST_METHOD;
+static PyObject *__pyx_n_s_AGE;
+static PyObject *__pyx_n_s_ALLOW;
+static PyObject *__pyx_n_s_AUTHORIZATION;
+static PyObject *__pyx_n_s_BadHttpMessage;
+static PyObject *__pyx_n_s_BadStatusLine;
+static PyObject *__pyx_n_s_BaseException;
+static PyObject *__pyx_n_s_CACHE_CONTROL;
+static PyObject *__pyx_n_s_CIMultiDict;
+static PyObject *__pyx_n_s_CIMultiDictProxy;
+static PyObject *__pyx_n_s_CIMultiDictProxy_2;
+static PyObject *__pyx_n_s_CIMultiDict_2;
+static PyObject *__pyx_n_s_CONNECTION;
+static PyObject *__pyx_n_s_CONTENT_DISPOSITION;
+static PyObject *__pyx_n_s_CONTENT_ENCODING;
+static PyObject *__pyx_n_s_CONTENT_LANGUAGE;
+static PyObject *__pyx_n_s_CONTENT_LENGTH;
+static PyObject *__pyx_n_s_CONTENT_LOCATION;
+static PyObject *__pyx_n_s_CONTENT_MD5;
+static PyObject *__pyx_n_s_CONTENT_RANGE;
+static PyObject *__pyx_n_s_CONTENT_TRANSFER_ENCODING;
+static PyObject *__pyx_n_s_CONTENT_TYPE;
+static PyObject *__pyx_n_s_COOKIE;
+static PyObject *__pyx_n_s_ContentLengthError;
+static PyObject *__pyx_n_s_DATE;
+static PyObject *__pyx_n_s_DESTINATION;
+static PyObject *__pyx_n_s_DIGEST;
+static PyObject *__pyx_n_s_DeflateBuffer;
+static PyObject *__pyx_n_s_DeflateBuffer_2;
+static PyObject *__pyx_n_s_EMPTY_PAYLOAD;
+static PyObject *__pyx_n_s_EMPTY_PAYLOAD_2;
+static PyObject *__pyx_n_s_ETAG;
+static PyObject *__pyx_n_s_EXPECT;
+static PyObject *__pyx_n_s_EXPIRES;
+static PyObject *__pyx_n_s_FORWARDED;
+static PyObject *__pyx_n_s_FROM;
+static PyObject *__pyx_n_s_HOST;
+static PyObject *__pyx_kp_u_Header_name_is_too_long;
+static PyObject *__pyx_kp_u_Header_value_is_too_long;
+static PyObject *__pyx_n_s_HttpRequestParser;
+static PyObject *__pyx_n_u_HttpRequestParser;
+static PyObject *__pyx_n_s_HttpResponseParser;
+static PyObject *__pyx_n_u_HttpResponseParser;
+static PyObject *__pyx_n_s_HttpVersion;
+static PyObject *__pyx_n_s_HttpVersion10;
+static PyObject *__pyx_n_s_HttpVersion10_2;
+static PyObject *__pyx_n_s_HttpVersion11;
+static PyObject *__pyx_n_s_HttpVersion11_2;
+static PyObject *__pyx_n_s_HttpVersion_2;
+static PyObject *__pyx_n_s_IF_MATCH;
+static PyObject *__pyx_n_s_IF_MODIFIED_SINCE;
+static PyObject *__pyx_n_s_IF_NONE_MATCH;
+static PyObject *__pyx_n_s_IF_RANGE;
+static PyObject *__pyx_n_s_IF_UNMODIFIED_SINCE;
+static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0x14;
+static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xc7;
+static PyObject *__pyx_n_s_InvalidHeader;
+static PyObject *__pyx_n_s_InvalidURLError;
+static PyObject *__pyx_n_s_KEEP_ALIVE;
+static PyObject *__pyx_n_s_LAST_EVENT_ID;
+static PyObject *__pyx_n_s_LAST_MODIFIED;
+static PyObject *__pyx_n_s_LINK;
+static PyObject *__pyx_n_s_LOCATION;
+static PyObject *__pyx_n_s_LineTooLong;
+static PyObject *__pyx_n_s_MAX_FORWARDS;
+static PyObject *__pyx_n_s_MemoryError;
+static PyObject *__pyx_kp_u_Not_enough_data_for_satisfy_cont;
+static PyObject *__pyx_kp_u_Not_enough_data_for_satisfy_tran;
+static PyObject *__pyx_n_s_ORIGIN;
+static PyObject *__pyx_n_s_PRAGMA;
+static PyObject *__pyx_n_s_PROXY_AUTHENTICATE;
+static PyObject *__pyx_n_s_PROXY_AUTHORIZATION;
+static PyObject *__pyx_n_s_PayloadEncodingError;
+static PyObject *__pyx_n_s_PickleError;
+static PyObject *__pyx_n_s_RANGE;
+static PyObject *__pyx_n_s_REFERER;
+static PyObject *__pyx_n_s_RETRY_AFTER;
+static PyObject *__pyx_kp_u_RawRequestMessage;
+static PyObject *__pyx_n_s_RawRequestMessage_2;
+static PyObject *__pyx_n_u_RawRequestMessage_2;
+static PyObject *__pyx_kp_u_RawResponseMessage;
+static PyObject *__pyx_n_s_RawResponseMessage_2;
+static PyObject *__pyx_n_u_RawResponseMessage_2;
+static PyObject *__pyx_n_s_SEC_WEBSOCKET_ACCEPT;
+static PyObject *__pyx_n_s_SEC_WEBSOCKET_EXTENSIONS;
+static PyObject *__pyx_n_s_SEC_WEBSOCKET_KEY;
+static PyObject *__pyx_n_s_SEC_WEBSOCKET_KEY1;
+static PyObject *__pyx_n_s_SEC_WEBSOCKET_PROTOCOL;
+static PyObject *__pyx_n_s_SEC_WEBSOCKET_VERSION;
+static PyObject *__pyx_n_s_SERVER;
+static PyObject *__pyx_n_s_SET_COOKIE;
+static PyObject *__pyx_kp_u_Status_line_is_too_long;
+static PyObject *__pyx_n_s_StreamReader;
+static PyObject *__pyx_n_s_StreamReader_2;
+static PyObject *__pyx_n_s_TE;
+static PyObject *__pyx_n_s_TRAILER;
+static PyObject *__pyx_n_s_TRANSFER_ENCODING;
+static PyObject *__pyx_n_s_TransferEncodingError;
+static PyObject *__pyx_n_s_TypeError;
+static PyObject *__pyx_n_s_UPGRADE;
+static PyObject *__pyx_n_s_URI;
+static PyObject *__pyx_n_s_URL;
+static PyObject *__pyx_n_s_URL_2;
+static PyObject *__pyx_n_s_USER_AGENT;
+static PyObject *__pyx_n_s_VARY;
+static PyObject *__pyx_n_s_VIA;
+static PyObject *__pyx_n_s_WANT_DIGEST;
+static PyObject *__pyx_n_s_WARNING;
+static PyObject *__pyx_n_s_WWW_AUTHENTICATE;
+static PyObject *__pyx_n_s_X_FORWARDED_FOR;
+static PyObject *__pyx_n_s_X_FORWARDED_HOST;
+static PyObject *__pyx_n_s_X_FORWARDED_PROTO;
+static PyObject *__pyx_kp_u__11;
+static PyObject *__pyx_kp_u__2;
+static PyObject *__pyx_kp_u__3;
+static PyObject *__pyx_n_s__4;
+static PyObject *__pyx_kp_b__4;
+static PyObject *__pyx_kp_u__4;
+static PyObject *__pyx_n_s_add;
+static PyObject *__pyx_n_s_aiohttp;
+static PyObject *__pyx_n_s_aiohttp__http_parser;
+static PyObject *__pyx_kp_s_aiohttp__http_parser_pyx;
+static PyObject *__pyx_n_s_all;
+static PyObject *__pyx_n_s_args;
+static PyObject *__pyx_n_s_auto_decompress;
+static PyObject *__pyx_n_s_begin_http_chunk_receiving;
+static PyObject *__pyx_n_u_br;
+static PyObject *__pyx_n_s_buf_data;
+static PyObject *__pyx_n_s_build;
+static PyObject *__pyx_n_s_chunked;
+static PyObject *__pyx_n_u_chunked;
+static PyObject *__pyx_n_s_cline_in_traceback;
+static PyObject *__pyx_n_s_close;
+static PyObject *__pyx_n_s_code;
+static PyObject *__pyx_n_u_code;
+static PyObject *__pyx_n_s_compression;
+static PyObject *__pyx_n_u_compression;
+static PyObject *__pyx_n_u_deflate;
+static PyObject *__pyx_n_s_dict;
+static PyObject *__pyx_n_s_encoded;
+static PyObject *__pyx_n_s_end_http_chunk_receiving;
+static PyObject *__pyx_n_s_feed_data;
+static PyObject *__pyx_n_s_feed_eof;
+static PyObject *__pyx_n_s_format;
+static PyObject *__pyx_n_s_fragment;
+static PyObject *__pyx_n_s_genexpr;
+static PyObject *__pyx_n_s_getstate;
+static PyObject *__pyx_n_u_gzip;
+static PyObject *__pyx_n_s_hdrs;
+static PyObject *__pyx_n_s_headers;
+static PyObject *__pyx_n_u_headers;
+static PyObject *__pyx_n_s_host;
+static PyObject *__pyx_n_s_http_exceptions;
+static PyObject *__pyx_n_s_http_parser;
+static PyObject *__pyx_n_s_http_writer;
+static PyObject *__pyx_n_s_i;
+static PyObject *__pyx_n_s_import;
+static PyObject *__pyx_kp_u_invalid_url_r;
+static PyObject *__pyx_n_s_limit;
+static PyObject *__pyx_n_s_loop;
+static PyObject *__pyx_n_s_lower;
+static PyObject *__pyx_n_s_main;
+static PyObject *__pyx_n_s_max_field_size;
+static PyObject *__pyx_n_s_max_headers;
+static PyObject *__pyx_n_s_max_line_size;
+static PyObject *__pyx_n_s_method;
+static PyObject *__pyx_n_u_method;
+static PyObject *__pyx_n_s_multidict;
+static PyObject *__pyx_n_s_name;
+static PyObject *__pyx_n_s_new;
+static PyObject *__pyx_kp_s_no_default___reduce___due_to_non;
+static PyObject *__pyx_n_s_parse_url;
+static PyObject *__pyx_n_s_partition;
+static PyObject *__pyx_n_s_password;
+static PyObject *__pyx_n_s_path;
+static PyObject *__pyx_n_u_path;
+static PyObject *__pyx_n_s_payload_exception;
+static PyObject *__pyx_n_s_pickle;
+static PyObject *__pyx_n_s_port;
+static PyObject *__pyx_n_s_protocol;
+static PyObject *__pyx_n_s_py_buf;
+static PyObject *__pyx_n_s_pyx_PickleError;
+static PyObject *__pyx_n_s_pyx_checksum;
+static PyObject *__pyx_n_s_pyx_result;
+static PyObject *__pyx_n_s_pyx_state;
+static PyObject *__pyx_n_s_pyx_type;
+static PyObject *__pyx_n_s_pyx_unpickle_RawRequestMessage;
+static PyObject *__pyx_n_s_pyx_unpickle_RawResponseMessag;
+static PyObject *__pyx_n_s_pyx_vtable;
+static PyObject *__pyx_n_s_query_string;
+static PyObject *__pyx_n_s_range;
+static PyObject *__pyx_n_s_raw_headers;
+static PyObject *__pyx_n_u_raw_headers;
+static PyObject *__pyx_n_s_read_until_eof;
+static PyObject *__pyx_n_s_reason;
+static PyObject *__pyx_n_u_reason;
+static PyObject *__pyx_n_s_reduce;
+static PyObject *__pyx_n_s_reduce_cython;
+static PyObject *__pyx_n_s_reduce_ex;
+static PyObject *__pyx_n_s_repr___locals_genexpr;
+static PyObject *__pyx_n_s_response_with_body;
+static PyObject *__pyx_n_s_scheme;
+static PyObject *__pyx_n_s_send;
+static PyObject *__pyx_n_s_set_exception;
+static PyObject *__pyx_n_s_setstate;
+static PyObject *__pyx_n_s_setstate_cython;
+static PyObject *__pyx_n_s_should_close;
+static PyObject *__pyx_n_u_should_close;
+static PyObject *__pyx_n_s_streams;
+static PyObject *__pyx_kp_s_stringsource;
+static PyObject *__pyx_n_s_test;
+static PyObject *__pyx_n_s_throw;
+static PyObject *__pyx_n_s_timer;
+static PyObject *__pyx_kp_u_unknown;
+static PyObject *__pyx_n_s_update;
+static PyObject *__pyx_n_s_upgrade;
+static PyObject *__pyx_n_u_upgrade;
+static PyObject *__pyx_n_s_url;
+static PyObject *__pyx_n_u_url;
+static PyObject *__pyx_n_s_user;
+static PyObject *__pyx_n_s_version;
+static PyObject *__pyx_n_u_version;
+static PyObject *__pyx_n_s_yarl;
+static int __pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage___init__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self, PyObject *__pyx_v_method, PyObject *__pyx_v_path, PyObject *__pyx_v_version, PyObject *__pyx_v_headers, PyObject *__pyx_v_raw_headers, PyObject *__pyx_v_should_close, PyObject *__pyx_v_compression, PyObject *__pyx_v_upgrade, PyObject *__pyx_v_chunked, PyObject *__pyx_v_url); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_8__repr___genexpr(PyObject *__pyx_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_2__repr__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_4_replace(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self, PyObject *__pyx_v_dct); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_6method___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_4path___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_7version___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_7headers___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_11raw_headers___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_12should_close___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_11compression___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_7upgrade___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_7chunked___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_3url___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_6__reduce_cython__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_8__setstate_cython__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
+static int __pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage___init__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self, PyObject *__pyx_v_version, PyObject *__pyx_v_code, PyObject *__pyx_v_reason, PyObject *__pyx_v_headers, PyObject *__pyx_v_raw_headers, PyObject *__pyx_v_should_close, PyObject *__pyx_v_compression, PyObject *__pyx_v_upgrade, PyObject *__pyx_v_chunked); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_8__repr___genexpr(PyObject *__pyx_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_2__repr__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_7version___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_4code___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_6reason___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_7headers___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_11raw_headers___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_12should_close___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_11compression___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_7upgrade___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_7chunked___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_4__reduce_cython__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_6__setstate_cython__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
+static int __pyx_pf_7aiohttp_12_http_parser_10HttpParser___cinit__(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self); /* proto */
+static void __pyx_pf_7aiohttp_12_http_parser_10HttpParser_2__dealloc__(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_10HttpParser_4feed_eof(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_10HttpParser_6feed_data(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self, PyObject *__pyx_v_data); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_10HttpParser_8set_upgraded(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_10HttpParser_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_10HttpParser_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
+static int __pyx_pf_7aiohttp_12_http_parser_17HttpRequestParser___init__(struct __pyx_obj_7aiohttp_12_http_parser_HttpRequestParser *__pyx_v_self, PyObject *__pyx_v_protocol, PyObject *__pyx_v_loop, int __pyx_v_limit, PyObject *__pyx_v_timer, size_t __pyx_v_max_line_size, size_t __pyx_v_max_headers, size_t __pyx_v_max_field_size, PyObject *__pyx_v_payload_exception, int __pyx_v_response_with_body, int __pyx_v_read_until_eof); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17HttpRequestParser_2__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_7aiohttp_12_http_parser_HttpRequestParser *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17HttpRequestParser_4__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_7aiohttp_12_http_parser_HttpRequestParser *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
+static int __pyx_pf_7aiohttp_12_http_parser_18HttpResponseParser___init__(struct __pyx_obj_7aiohttp_12_http_parser_HttpResponseParser *__pyx_v_self, PyObject *__pyx_v_protocol, PyObject *__pyx_v_loop, int __pyx_v_limit, PyObject *__pyx_v_timer, size_t __pyx_v_max_line_size, size_t __pyx_v_max_headers, size_t __pyx_v_max_field_size, PyObject *__pyx_v_payload_exception, int __pyx_v_response_with_body, int __pyx_v_read_until_eof, int __pyx_v_auto_decompress); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18HttpResponseParser_2__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_7aiohttp_12_http_parser_HttpResponseParser *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18HttpResponseParser_4__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_7aiohttp_12_http_parser_HttpResponseParser *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_parse_url(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_url); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_2__pyx_unpickle_RawRequestMessage(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_4__pyx_unpickle_RawResponseMessage(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
+static PyObject *__pyx_tp_new_7aiohttp_12_http_parser_RawRequestMessage(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
+static PyObject *__pyx_tp_new_7aiohttp_12_http_parser_RawResponseMessage(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
+static PyObject *__pyx_tp_new_7aiohttp_12_http_parser_HttpParser(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
+static PyObject *__pyx_tp_new_7aiohttp_12_http_parser_HttpRequestParser(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
+static PyObject *__pyx_tp_new_7aiohttp_12_http_parser_HttpResponseParser(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
+static PyObject *__pyx_tp_new_7aiohttp_12_http_parser___pyx_scope_struct____repr__(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
+static PyObject *__pyx_tp_new_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
+static PyObject *__pyx_tp_new_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
+static PyObject *__pyx_tp_new_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
+static __Pyx_CachedCFunction __pyx_umethod_PyUnicode_Type_partition = {0, &__pyx_n_s_partition, 0, 0, 0};
+static PyObject *__pyx_int_21004882;
+static PyObject *__pyx_int_209127132;
+static PyObject *__pyx_tuple__5;
+static PyObject *__pyx_tuple__6;
+static PyObject *__pyx_tuple__7;
+static PyObject *__pyx_tuple__8;
+static PyObject *__pyx_tuple__9;
+static PyObject *__pyx_tuple__10;
+static PyObject *__pyx_tuple__12;
+static PyObject *__pyx_tuple__13;
+static PyObject *__pyx_tuple__15;
+static PyObject *__pyx_tuple__17;
+static PyObject *__pyx_codeobj__14;
+static PyObject *__pyx_codeobj__16;
+static PyObject *__pyx_codeobj__18;
+/* Late includes */
+
+/* "aiohttp/_http_parser.pyx":74
+ *
+ *
+ * cdef inline object extend(object buf, const char* at, size_t length): # <<<<<<<<<<<<<<
+ * cdef Py_ssize_t s
+ * cdef char* ptr
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_7aiohttp_12_http_parser_extend(PyObject *__pyx_v_buf, char const *__pyx_v_at, size_t __pyx_v_length) {
+ Py_ssize_t __pyx_v_s;
+ char *__pyx_v_ptr;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ Py_ssize_t __pyx_t_1;
+ int __pyx_t_2;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("extend", 0);
+
+ /* "aiohttp/_http_parser.pyx":77
+ * cdef Py_ssize_t s
+ * cdef char* ptr
+ * s = PyByteArray_Size(buf) # <<<<<<<<<<<<<<
+ * PyByteArray_Resize(buf, s + length)
+ * ptr = PyByteArray_AsString(buf)
+ */
+ __pyx_t_1 = PyByteArray_Size(__pyx_v_buf); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1L))) __PYX_ERR(0, 77, __pyx_L1_error)
+ __pyx_v_s = __pyx_t_1;
+
+ /* "aiohttp/_http_parser.pyx":78
+ * cdef char* ptr
+ * s = PyByteArray_Size(buf)
+ * PyByteArray_Resize(buf, s + length) # <<<<<<<<<<<<<<
+ * ptr = PyByteArray_AsString(buf)
+ * memcpy(ptr + s, at, length)
+ */
+ __pyx_t_2 = PyByteArray_Resize(__pyx_v_buf, (__pyx_v_s + __pyx_v_length)); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 78, __pyx_L1_error)
+
+ /* "aiohttp/_http_parser.pyx":79
+ * s = PyByteArray_Size(buf)
+ * PyByteArray_Resize(buf, s + length)
+ * ptr = PyByteArray_AsString(buf) # <<<<<<<<<<<<<<
+ * memcpy(ptr + s, at, length)
+ *
+ */
+ __pyx_v_ptr = PyByteArray_AsString(__pyx_v_buf);
+
+ /* "aiohttp/_http_parser.pyx":80
+ * PyByteArray_Resize(buf, s + length)
+ * ptr = PyByteArray_AsString(buf)
+ * memcpy(ptr + s, at, length) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ (void)(memcpy((__pyx_v_ptr + __pyx_v_s), __pyx_v_at, __pyx_v_length));
+
+ /* "aiohttp/_http_parser.pyx":74
+ *
+ *
+ * cdef inline object extend(object buf, const char* at, size_t length): # <<<<<<<<<<<<<<
+ * cdef Py_ssize_t s
+ * cdef char* ptr
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_AddTraceback("aiohttp._http_parser.extend", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":92
+ *
+ *
+ * cdef inline str http_method_str(int i): # <<<<<<<<<<<<<<
+ * if i < METHODS_COUNT:
+ * return <str>_http_method[i]
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_7aiohttp_12_http_parser_http_method_str(int __pyx_v_i) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("http_method_str", 0);
+
+ /* "aiohttp/_http_parser.pyx":93
+ *
+ * cdef inline str http_method_str(int i):
+ * if i < METHODS_COUNT: # <<<<<<<<<<<<<<
+ * return <str>_http_method[i]
+ * else:
+ */
+ __pyx_t_1 = ((__pyx_v_i < 34) != 0);
+ if (__pyx_t_1) {
+
+ /* "aiohttp/_http_parser.pyx":94
+ * cdef inline str http_method_str(int i):
+ * if i < METHODS_COUNT:
+ * return <str>_http_method[i] # <<<<<<<<<<<<<<
+ * else:
+ * return "<unknown>"
+ */
+ __Pyx_XDECREF(__pyx_r);
+ if (unlikely(__pyx_v_7aiohttp_12_http_parser__http_method == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(0, 94, __pyx_L1_error)
+ }
+ __pyx_t_2 = __Pyx_GetItemInt_List(__pyx_v_7aiohttp_12_http_parser__http_method, __pyx_v_i, int, 1, __Pyx_PyInt_From_int, 1, 1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 94, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject*)__pyx_t_2));
+ __pyx_r = ((PyObject*)__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_parser.pyx":93
+ *
+ * cdef inline str http_method_str(int i):
+ * if i < METHODS_COUNT: # <<<<<<<<<<<<<<
+ * return <str>_http_method[i]
+ * else:
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":96
+ * return <str>_http_method[i]
+ * else:
+ * return "<unknown>" # <<<<<<<<<<<<<<
+ *
+ * cdef inline object find_header(bytes raw_header):
+ */
+ /*else*/ {
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_kp_u_unknown);
+ __pyx_r = __pyx_kp_u_unknown;
+ goto __pyx_L0;
+ }
+
+ /* "aiohttp/_http_parser.pyx":92
+ *
+ *
+ * cdef inline str http_method_str(int i): # <<<<<<<<<<<<<<
+ * if i < METHODS_COUNT:
+ * return <str>_http_method[i]
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("aiohttp._http_parser.http_method_str", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":98
+ * return "<unknown>"
+ *
+ * cdef inline object find_header(bytes raw_header): # <<<<<<<<<<<<<<
+ * cdef Py_ssize_t size
+ * cdef char *buf
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_7aiohttp_12_http_parser_find_header(PyObject *__pyx_v_raw_header) {
+ Py_ssize_t __pyx_v_size;
+ char *__pyx_v_buf;
+ int __pyx_v_idx;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("find_header", 0);
+
+ /* "aiohttp/_http_parser.pyx":102
+ * cdef char *buf
+ * cdef int idx
+ * PyBytes_AsStringAndSize(raw_header, &buf, &size) # <<<<<<<<<<<<<<
+ * idx = _find_header.find_header(buf, size)
+ * if idx == -1:
+ */
+ __pyx_t_1 = PyBytes_AsStringAndSize(__pyx_v_raw_header, (&__pyx_v_buf), (&__pyx_v_size)); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(0, 102, __pyx_L1_error)
+
+ /* "aiohttp/_http_parser.pyx":103
+ * cdef int idx
+ * PyBytes_AsStringAndSize(raw_header, &buf, &size)
+ * idx = _find_header.find_header(buf, size) # <<<<<<<<<<<<<<
+ * if idx == -1:
+ * return raw_header.decode('utf-8', 'surrogateescape')
+ */
+ __pyx_v_idx = find_header(__pyx_v_buf, __pyx_v_size);
+
+ /* "aiohttp/_http_parser.pyx":104
+ * PyBytes_AsStringAndSize(raw_header, &buf, &size)
+ * idx = _find_header.find_header(buf, size)
+ * if idx == -1: # <<<<<<<<<<<<<<
+ * return raw_header.decode('utf-8', 'surrogateescape')
+ * return headers[idx]
+ */
+ __pyx_t_2 = ((__pyx_v_idx == -1L) != 0);
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_http_parser.pyx":105
+ * idx = _find_header.find_header(buf, size)
+ * if idx == -1:
+ * return raw_header.decode('utf-8', 'surrogateescape') # <<<<<<<<<<<<<<
+ * return headers[idx]
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ if (unlikely(__pyx_v_raw_header == Py_None)) {
+ PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "decode");
+ __PYX_ERR(0, 105, __pyx_L1_error)
+ }
+ __pyx_t_3 = __Pyx_decode_bytes(__pyx_v_raw_header, 0, PY_SSIZE_T_MAX, NULL, ((char const *)"surrogateescape"), PyUnicode_DecodeUTF8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 105, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_parser.pyx":104
+ * PyBytes_AsStringAndSize(raw_header, &buf, &size)
+ * idx = _find_header.find_header(buf, size)
+ * if idx == -1: # <<<<<<<<<<<<<<
+ * return raw_header.decode('utf-8', 'surrogateescape')
+ * return headers[idx]
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":106
+ * if idx == -1:
+ * return raw_header.decode('utf-8', 'surrogateescape')
+ * return headers[idx] # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ if (unlikely(__pyx_v_7aiohttp_12_http_parser_headers == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(0, 106, __pyx_L1_error)
+ }
+ __pyx_t_3 = __Pyx_GetItemInt_Tuple(__pyx_v_7aiohttp_12_http_parser_headers, __pyx_v_idx, int, 1, __Pyx_PyInt_From_int, 0, 1, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 106, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_parser.pyx":98
+ * return "<unknown>"
+ *
+ * cdef inline object find_header(bytes raw_header): # <<<<<<<<<<<<<<
+ * cdef Py_ssize_t size
+ * cdef char *buf
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("aiohttp._http_parser.find_header", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":122
+ * cdef readonly object url # yarl.URL
+ *
+ * def __init__(self, method, path, version, headers, raw_headers, # <<<<<<<<<<<<<<
+ * should_close, compression, upgrade, chunked, url):
+ * self.method = method
+ */
+
+/* Python wrapper */
+static int __pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_method = 0;
+ PyObject *__pyx_v_path = 0;
+ PyObject *__pyx_v_version = 0;
+ PyObject *__pyx_v_headers = 0;
+ PyObject *__pyx_v_raw_headers = 0;
+ PyObject *__pyx_v_should_close = 0;
+ PyObject *__pyx_v_compression = 0;
+ PyObject *__pyx_v_upgrade = 0;
+ PyObject *__pyx_v_chunked = 0;
+ PyObject *__pyx_v_url = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_method,&__pyx_n_s_path,&__pyx_n_s_version,&__pyx_n_s_headers,&__pyx_n_s_raw_headers,&__pyx_n_s_should_close,&__pyx_n_s_compression,&__pyx_n_s_upgrade,&__pyx_n_s_chunked,&__pyx_n_s_url,0};
+ PyObject* values[10] = {0,0,0,0,0,0,0,0,0,0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
+ CYTHON_FALLTHROUGH;
+ case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
+ CYTHON_FALLTHROUGH;
+ case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
+ CYTHON_FALLTHROUGH;
+ case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
+ CYTHON_FALLTHROUGH;
+ case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
+ CYTHON_FALLTHROUGH;
+ case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ CYTHON_FALLTHROUGH;
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ CYTHON_FALLTHROUGH;
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ CYTHON_FALLTHROUGH;
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ CYTHON_FALLTHROUGH;
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_method)) != 0)) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ CYTHON_FALLTHROUGH;
+ case 1:
+ if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_path)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 10, 10, 1); __PYX_ERR(0, 122, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 2:
+ if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_version)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 10, 10, 2); __PYX_ERR(0, 122, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 3:
+ if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_headers)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 10, 10, 3); __PYX_ERR(0, 122, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 4:
+ if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_raw_headers)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 10, 10, 4); __PYX_ERR(0, 122, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 5:
+ if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_should_close)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 10, 10, 5); __PYX_ERR(0, 122, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 6:
+ if (likely((values[6] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_compression)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 10, 10, 6); __PYX_ERR(0, 122, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 7:
+ if (likely((values[7] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_upgrade)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 10, 10, 7); __PYX_ERR(0, 122, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 8:
+ if (likely((values[8] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_chunked)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 10, 10, 8); __PYX_ERR(0, 122, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 9:
+ if (likely((values[9] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_url)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 10, 10, 9); __PYX_ERR(0, 122, __pyx_L3_error)
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 122, __pyx_L3_error)
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 10) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
+ values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
+ values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
+ values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
+ values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
+ }
+ __pyx_v_method = values[0];
+ __pyx_v_path = values[1];
+ __pyx_v_version = values[2];
+ __pyx_v_headers = values[3];
+ __pyx_v_raw_headers = values[4];
+ __pyx_v_should_close = values[5];
+ __pyx_v_compression = values[6];
+ __pyx_v_upgrade = values[7];
+ __pyx_v_chunked = values[8];
+ __pyx_v_url = values[9];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 10, 10, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 122, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("aiohttp._http_parser.RawRequestMessage.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return -1;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage___init__(((struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)__pyx_v_self), __pyx_v_method, __pyx_v_path, __pyx_v_version, __pyx_v_headers, __pyx_v_raw_headers, __pyx_v_should_close, __pyx_v_compression, __pyx_v_upgrade, __pyx_v_chunked, __pyx_v_url);
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage___init__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self, PyObject *__pyx_v_method, PyObject *__pyx_v_path, PyObject *__pyx_v_version, PyObject *__pyx_v_headers, PyObject *__pyx_v_raw_headers, PyObject *__pyx_v_should_close, PyObject *__pyx_v_compression, PyObject *__pyx_v_upgrade, PyObject *__pyx_v_chunked, PyObject *__pyx_v_url) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__init__", 0);
+
+ /* "aiohttp/_http_parser.pyx":124
+ * def __init__(self, method, path, version, headers, raw_headers,
+ * should_close, compression, upgrade, chunked, url):
+ * self.method = method # <<<<<<<<<<<<<<
+ * self.path = path
+ * self.version = version
+ */
+ if (!(likely(PyUnicode_CheckExact(__pyx_v_method))||((__pyx_v_method) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "unicode", Py_TYPE(__pyx_v_method)->tp_name), 0))) __PYX_ERR(0, 124, __pyx_L1_error)
+ __pyx_t_1 = __pyx_v_method;
+ __Pyx_INCREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v_self->method);
+ __Pyx_DECREF(__pyx_v_self->method);
+ __pyx_v_self->method = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":125
+ * should_close, compression, upgrade, chunked, url):
+ * self.method = method
+ * self.path = path # <<<<<<<<<<<<<<
+ * self.version = version
+ * self.headers = headers
+ */
+ if (!(likely(PyUnicode_CheckExact(__pyx_v_path))||((__pyx_v_path) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "unicode", Py_TYPE(__pyx_v_path)->tp_name), 0))) __PYX_ERR(0, 125, __pyx_L1_error)
+ __pyx_t_1 = __pyx_v_path;
+ __Pyx_INCREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v_self->path);
+ __Pyx_DECREF(__pyx_v_self->path);
+ __pyx_v_self->path = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":126
+ * self.method = method
+ * self.path = path
+ * self.version = version # <<<<<<<<<<<<<<
+ * self.headers = headers
+ * self.raw_headers = raw_headers
+ */
+ __Pyx_INCREF(__pyx_v_version);
+ __Pyx_GIVEREF(__pyx_v_version);
+ __Pyx_GOTREF(__pyx_v_self->version);
+ __Pyx_DECREF(__pyx_v_self->version);
+ __pyx_v_self->version = __pyx_v_version;
+
+ /* "aiohttp/_http_parser.pyx":127
+ * self.path = path
+ * self.version = version
+ * self.headers = headers # <<<<<<<<<<<<<<
+ * self.raw_headers = raw_headers
+ * self.should_close = should_close
+ */
+ __Pyx_INCREF(__pyx_v_headers);
+ __Pyx_GIVEREF(__pyx_v_headers);
+ __Pyx_GOTREF(__pyx_v_self->headers);
+ __Pyx_DECREF(__pyx_v_self->headers);
+ __pyx_v_self->headers = __pyx_v_headers;
+
+ /* "aiohttp/_http_parser.pyx":128
+ * self.version = version
+ * self.headers = headers
+ * self.raw_headers = raw_headers # <<<<<<<<<<<<<<
+ * self.should_close = should_close
+ * self.compression = compression
+ */
+ __Pyx_INCREF(__pyx_v_raw_headers);
+ __Pyx_GIVEREF(__pyx_v_raw_headers);
+ __Pyx_GOTREF(__pyx_v_self->raw_headers);
+ __Pyx_DECREF(__pyx_v_self->raw_headers);
+ __pyx_v_self->raw_headers = __pyx_v_raw_headers;
+
+ /* "aiohttp/_http_parser.pyx":129
+ * self.headers = headers
+ * self.raw_headers = raw_headers
+ * self.should_close = should_close # <<<<<<<<<<<<<<
+ * self.compression = compression
+ * self.upgrade = upgrade
+ */
+ __Pyx_INCREF(__pyx_v_should_close);
+ __Pyx_GIVEREF(__pyx_v_should_close);
+ __Pyx_GOTREF(__pyx_v_self->should_close);
+ __Pyx_DECREF(__pyx_v_self->should_close);
+ __pyx_v_self->should_close = __pyx_v_should_close;
+
+ /* "aiohttp/_http_parser.pyx":130
+ * self.raw_headers = raw_headers
+ * self.should_close = should_close
+ * self.compression = compression # <<<<<<<<<<<<<<
+ * self.upgrade = upgrade
+ * self.chunked = chunked
+ */
+ __Pyx_INCREF(__pyx_v_compression);
+ __Pyx_GIVEREF(__pyx_v_compression);
+ __Pyx_GOTREF(__pyx_v_self->compression);
+ __Pyx_DECREF(__pyx_v_self->compression);
+ __pyx_v_self->compression = __pyx_v_compression;
+
+ /* "aiohttp/_http_parser.pyx":131
+ * self.should_close = should_close
+ * self.compression = compression
+ * self.upgrade = upgrade # <<<<<<<<<<<<<<
+ * self.chunked = chunked
+ * self.url = url
+ */
+ __Pyx_INCREF(__pyx_v_upgrade);
+ __Pyx_GIVEREF(__pyx_v_upgrade);
+ __Pyx_GOTREF(__pyx_v_self->upgrade);
+ __Pyx_DECREF(__pyx_v_self->upgrade);
+ __pyx_v_self->upgrade = __pyx_v_upgrade;
+
+ /* "aiohttp/_http_parser.pyx":132
+ * self.compression = compression
+ * self.upgrade = upgrade
+ * self.chunked = chunked # <<<<<<<<<<<<<<
+ * self.url = url
+ *
+ */
+ __Pyx_INCREF(__pyx_v_chunked);
+ __Pyx_GIVEREF(__pyx_v_chunked);
+ __Pyx_GOTREF(__pyx_v_self->chunked);
+ __Pyx_DECREF(__pyx_v_self->chunked);
+ __pyx_v_self->chunked = __pyx_v_chunked;
+
+ /* "aiohttp/_http_parser.pyx":133
+ * self.upgrade = upgrade
+ * self.chunked = chunked
+ * self.url = url # <<<<<<<<<<<<<<
+ *
+ * def __repr__(self):
+ */
+ __Pyx_INCREF(__pyx_v_url);
+ __Pyx_GIVEREF(__pyx_v_url);
+ __Pyx_GOTREF(__pyx_v_self->url);
+ __Pyx_DECREF(__pyx_v_self->url);
+ __pyx_v_self->url = __pyx_v_url;
+
+ /* "aiohttp/_http_parser.pyx":122
+ * cdef readonly object url # yarl.URL
+ *
+ * def __init__(self, method, path, version, headers, raw_headers, # <<<<<<<<<<<<<<
+ * should_close, compression, upgrade, chunked, url):
+ * self.method = method
+ */
+
+ /* function exit code */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._http_parser.RawRequestMessage.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":135
+ * self.url = url
+ *
+ * def __repr__(self): # <<<<<<<<<<<<<<
+ * info = []
+ * info.append(("method", self.method))
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_3__repr__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_3__repr__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_2__repr__(((struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+static PyObject *__pyx_gb_7aiohttp_12_http_parser_17RawRequestMessage_8__repr___2generator(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */
+
+/* "aiohttp/_http_parser.pyx":147
+ * info.append(("chunked", self.chunked))
+ * info.append(("url", self.url))
+ * sinfo = ', '.join(name + '=' + repr(val) for name, val in info) # <<<<<<<<<<<<<<
+ * return '<RawRequestMessage(' + sinfo + ')>'
+ *
+ */
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_8__repr___genexpr(PyObject *__pyx_self) {
+ struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr *__pyx_cur_scope;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("genexpr", 0);
+ __pyx_cur_scope = (struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr *)__pyx_tp_new_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr(__pyx_ptype_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr, __pyx_empty_tuple, NULL);
+ if (unlikely(!__pyx_cur_scope)) {
+ __pyx_cur_scope = ((struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr *)Py_None);
+ __Pyx_INCREF(Py_None);
+ __PYX_ERR(0, 147, __pyx_L1_error)
+ } else {
+ __Pyx_GOTREF(__pyx_cur_scope);
+ }
+ __pyx_cur_scope->__pyx_outer_scope = (struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct____repr__ *) __pyx_self;
+ __Pyx_INCREF(((PyObject *)__pyx_cur_scope->__pyx_outer_scope));
+ __Pyx_GIVEREF(__pyx_cur_scope->__pyx_outer_scope);
+ {
+ __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_7aiohttp_12_http_parser_17RawRequestMessage_8__repr___2generator, NULL, (PyObject *) __pyx_cur_scope, __pyx_n_s_genexpr, __pyx_n_s_repr___locals_genexpr, __pyx_n_s_aiohttp__http_parser); if (unlikely(!gen)) __PYX_ERR(0, 147, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_cur_scope);
+ __Pyx_RefNannyFinishContext();
+ return (PyObject *) gen;
+ }
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_AddTraceback("aiohttp._http_parser.RawRequestMessage.__repr__.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __Pyx_DECREF(((PyObject *)__pyx_cur_scope));
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_gb_7aiohttp_12_http_parser_17RawRequestMessage_8__repr___2generator(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */
+{
+ struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr *__pyx_cur_scope = ((struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr *)__pyx_generator->closure);
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ Py_ssize_t __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *(*__pyx_t_7)(PyObject *);
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("genexpr", 0);
+ switch (__pyx_generator->resume_label) {
+ case 0: goto __pyx_L3_first_run;
+ default: /* CPython raises the right error here */
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ }
+ __pyx_L3_first_run:;
+ if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 147, __pyx_L1_error)
+ __pyx_r = PyList_New(0); if (unlikely(!__pyx_r)) __PYX_ERR(0, 147, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_r);
+ if (unlikely(!__pyx_cur_scope->__pyx_outer_scope->__pyx_v_info)) { __Pyx_RaiseClosureNameError("info"); __PYX_ERR(0, 147, __pyx_L1_error) }
+ if (unlikely(__pyx_cur_scope->__pyx_outer_scope->__pyx_v_info == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
+ __PYX_ERR(0, 147, __pyx_L1_error)
+ }
+ __pyx_t_1 = __pyx_cur_scope->__pyx_outer_scope->__pyx_v_info; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
+ for (;;) {
+ if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_1)) break;
+ #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ __pyx_t_3 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(0, 147, __pyx_L1_error)
+ #else
+ __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 147, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ #endif
+ if ((likely(PyTuple_CheckExact(__pyx_t_3))) || (PyList_CheckExact(__pyx_t_3))) {
+ PyObject* sequence = __pyx_t_3;
+ Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
+ if (unlikely(size != 2)) {
+ if (size > 2) __Pyx_RaiseTooManyValuesError(2);
+ else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
+ __PYX_ERR(0, 147, __pyx_L1_error)
+ }
+ #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ if (likely(PyTuple_CheckExact(sequence))) {
+ __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
+ __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
+ } else {
+ __pyx_t_4 = PyList_GET_ITEM(sequence, 0);
+ __pyx_t_5 = PyList_GET_ITEM(sequence, 1);
+ }
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_INCREF(__pyx_t_5);
+ #else
+ __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 147, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 147, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ #endif
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ } else {
+ Py_ssize_t index = -1;
+ __pyx_t_6 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 147, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_7 = Py_TYPE(__pyx_t_6)->tp_iternext;
+ index = 0; __pyx_t_4 = __pyx_t_7(__pyx_t_6); if (unlikely(!__pyx_t_4)) goto __pyx_L6_unpacking_failed;
+ __Pyx_GOTREF(__pyx_t_4);
+ index = 1; __pyx_t_5 = __pyx_t_7(__pyx_t_6); if (unlikely(!__pyx_t_5)) goto __pyx_L6_unpacking_failed;
+ __Pyx_GOTREF(__pyx_t_5);
+ if (__Pyx_IternextUnpackEndCheck(__pyx_t_7(__pyx_t_6), 2) < 0) __PYX_ERR(0, 147, __pyx_L1_error)
+ __pyx_t_7 = NULL;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ goto __pyx_L7_unpacking_done;
+ __pyx_L6_unpacking_failed:;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_7 = NULL;
+ if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
+ __PYX_ERR(0, 147, __pyx_L1_error)
+ __pyx_L7_unpacking_done:;
+ }
+ __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_name);
+ __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v_name, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ __pyx_t_4 = 0;
+ __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_val);
+ __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v_val, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_5);
+ __pyx_t_5 = 0;
+ __pyx_t_3 = PyNumber_Add(__pyx_cur_scope->__pyx_v_name, __pyx_kp_u_); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 147, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyObject_Repr(__pyx_cur_scope->__pyx_v_val); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 147, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_4 = PyNumber_Add(__pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 147, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ if (unlikely(__Pyx_ListComp_Append(__pyx_r, (PyObject*)__pyx_t_4))) __PYX_ERR(0, 147, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ }
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope);
+
+ /* function exit code */
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_r); __pyx_r = 0;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ #if !CYTHON_USE_EXC_INFO_STACK
+ __Pyx_Coroutine_ResetAndClearException(__pyx_generator);
+ #endif
+ __pyx_generator->resume_label = -1;
+ __Pyx_Coroutine_clear((PyObject*)__pyx_generator);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":135
+ * self.url = url
+ *
+ * def __repr__(self): # <<<<<<<<<<<<<<
+ * info = []
+ * info.append(("method", self.method))
+ */
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_2__repr__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self) {
+ struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct____repr__ *__pyx_cur_scope;
+ PyObject *__pyx_v_sinfo = NULL;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__repr__", 0);
+ __pyx_cur_scope = (struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct____repr__ *)__pyx_tp_new_7aiohttp_12_http_parser___pyx_scope_struct____repr__(__pyx_ptype_7aiohttp_12_http_parser___pyx_scope_struct____repr__, __pyx_empty_tuple, NULL);
+ if (unlikely(!__pyx_cur_scope)) {
+ __pyx_cur_scope = ((struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct____repr__ *)Py_None);
+ __Pyx_INCREF(Py_None);
+ __PYX_ERR(0, 135, __pyx_L1_error)
+ } else {
+ __Pyx_GOTREF(__pyx_cur_scope);
+ }
+
+ /* "aiohttp/_http_parser.pyx":136
+ *
+ * def __repr__(self):
+ * info = [] # <<<<<<<<<<<<<<
+ * info.append(("method", self.method))
+ * info.append(("path", self.path))
+ */
+ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 136, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_cur_scope->__pyx_v_info = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":137
+ * def __repr__(self):
+ * info = []
+ * info.append(("method", self.method)) # <<<<<<<<<<<<<<
+ * info.append(("path", self.path))
+ * info.append(("version", self.version))
+ */
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 137, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_n_u_method);
+ __Pyx_GIVEREF(__pyx_n_u_method);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_n_u_method);
+ __Pyx_INCREF(__pyx_v_self->method);
+ __Pyx_GIVEREF(__pyx_v_self->method);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_self->method);
+ __pyx_t_2 = __Pyx_PyList_Append(__pyx_cur_scope->__pyx_v_info, __pyx_t_1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 137, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":138
+ * info = []
+ * info.append(("method", self.method))
+ * info.append(("path", self.path)) # <<<<<<<<<<<<<<
+ * info.append(("version", self.version))
+ * info.append(("headers", self.headers))
+ */
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 138, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_n_u_path);
+ __Pyx_GIVEREF(__pyx_n_u_path);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_n_u_path);
+ __Pyx_INCREF(__pyx_v_self->path);
+ __Pyx_GIVEREF(__pyx_v_self->path);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_self->path);
+ __pyx_t_2 = __Pyx_PyList_Append(__pyx_cur_scope->__pyx_v_info, __pyx_t_1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 138, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":139
+ * info.append(("method", self.method))
+ * info.append(("path", self.path))
+ * info.append(("version", self.version)) # <<<<<<<<<<<<<<
+ * info.append(("headers", self.headers))
+ * info.append(("raw_headers", self.raw_headers))
+ */
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 139, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_n_u_version);
+ __Pyx_GIVEREF(__pyx_n_u_version);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_n_u_version);
+ __Pyx_INCREF(__pyx_v_self->version);
+ __Pyx_GIVEREF(__pyx_v_self->version);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_self->version);
+ __pyx_t_2 = __Pyx_PyList_Append(__pyx_cur_scope->__pyx_v_info, __pyx_t_1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 139, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":140
+ * info.append(("path", self.path))
+ * info.append(("version", self.version))
+ * info.append(("headers", self.headers)) # <<<<<<<<<<<<<<
+ * info.append(("raw_headers", self.raw_headers))
+ * info.append(("should_close", self.should_close))
+ */
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 140, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_n_u_headers);
+ __Pyx_GIVEREF(__pyx_n_u_headers);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_n_u_headers);
+ __Pyx_INCREF(__pyx_v_self->headers);
+ __Pyx_GIVEREF(__pyx_v_self->headers);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_self->headers);
+ __pyx_t_2 = __Pyx_PyList_Append(__pyx_cur_scope->__pyx_v_info, __pyx_t_1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 140, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":141
+ * info.append(("version", self.version))
+ * info.append(("headers", self.headers))
+ * info.append(("raw_headers", self.raw_headers)) # <<<<<<<<<<<<<<
+ * info.append(("should_close", self.should_close))
+ * info.append(("compression", self.compression))
+ */
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 141, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_n_u_raw_headers);
+ __Pyx_GIVEREF(__pyx_n_u_raw_headers);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_n_u_raw_headers);
+ __Pyx_INCREF(__pyx_v_self->raw_headers);
+ __Pyx_GIVEREF(__pyx_v_self->raw_headers);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_self->raw_headers);
+ __pyx_t_2 = __Pyx_PyList_Append(__pyx_cur_scope->__pyx_v_info, __pyx_t_1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 141, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":142
+ * info.append(("headers", self.headers))
+ * info.append(("raw_headers", self.raw_headers))
+ * info.append(("should_close", self.should_close)) # <<<<<<<<<<<<<<
+ * info.append(("compression", self.compression))
+ * info.append(("upgrade", self.upgrade))
+ */
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 142, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_n_u_should_close);
+ __Pyx_GIVEREF(__pyx_n_u_should_close);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_n_u_should_close);
+ __Pyx_INCREF(__pyx_v_self->should_close);
+ __Pyx_GIVEREF(__pyx_v_self->should_close);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_self->should_close);
+ __pyx_t_2 = __Pyx_PyList_Append(__pyx_cur_scope->__pyx_v_info, __pyx_t_1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 142, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":143
+ * info.append(("raw_headers", self.raw_headers))
+ * info.append(("should_close", self.should_close))
+ * info.append(("compression", self.compression)) # <<<<<<<<<<<<<<
+ * info.append(("upgrade", self.upgrade))
+ * info.append(("chunked", self.chunked))
+ */
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 143, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_n_u_compression);
+ __Pyx_GIVEREF(__pyx_n_u_compression);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_n_u_compression);
+ __Pyx_INCREF(__pyx_v_self->compression);
+ __Pyx_GIVEREF(__pyx_v_self->compression);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_self->compression);
+ __pyx_t_2 = __Pyx_PyList_Append(__pyx_cur_scope->__pyx_v_info, __pyx_t_1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 143, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":144
+ * info.append(("should_close", self.should_close))
+ * info.append(("compression", self.compression))
+ * info.append(("upgrade", self.upgrade)) # <<<<<<<<<<<<<<
+ * info.append(("chunked", self.chunked))
+ * info.append(("url", self.url))
+ */
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 144, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_n_u_upgrade);
+ __Pyx_GIVEREF(__pyx_n_u_upgrade);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_n_u_upgrade);
+ __Pyx_INCREF(__pyx_v_self->upgrade);
+ __Pyx_GIVEREF(__pyx_v_self->upgrade);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_self->upgrade);
+ __pyx_t_2 = __Pyx_PyList_Append(__pyx_cur_scope->__pyx_v_info, __pyx_t_1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 144, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":145
+ * info.append(("compression", self.compression))
+ * info.append(("upgrade", self.upgrade))
+ * info.append(("chunked", self.chunked)) # <<<<<<<<<<<<<<
+ * info.append(("url", self.url))
+ * sinfo = ', '.join(name + '=' + repr(val) for name, val in info)
+ */
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 145, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_n_u_chunked);
+ __Pyx_GIVEREF(__pyx_n_u_chunked);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_n_u_chunked);
+ __Pyx_INCREF(__pyx_v_self->chunked);
+ __Pyx_GIVEREF(__pyx_v_self->chunked);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_self->chunked);
+ __pyx_t_2 = __Pyx_PyList_Append(__pyx_cur_scope->__pyx_v_info, __pyx_t_1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 145, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":146
+ * info.append(("upgrade", self.upgrade))
+ * info.append(("chunked", self.chunked))
+ * info.append(("url", self.url)) # <<<<<<<<<<<<<<
+ * sinfo = ', '.join(name + '=' + repr(val) for name, val in info)
+ * return '<RawRequestMessage(' + sinfo + ')>'
+ */
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 146, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_n_u_url);
+ __Pyx_GIVEREF(__pyx_n_u_url);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_n_u_url);
+ __Pyx_INCREF(__pyx_v_self->url);
+ __Pyx_GIVEREF(__pyx_v_self->url);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_self->url);
+ __pyx_t_2 = __Pyx_PyList_Append(__pyx_cur_scope->__pyx_v_info, __pyx_t_1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 146, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":147
+ * info.append(("chunked", self.chunked))
+ * info.append(("url", self.url))
+ * sinfo = ', '.join(name + '=' + repr(val) for name, val in info) # <<<<<<<<<<<<<<
+ * return '<RawRequestMessage(' + sinfo + ')>'
+ *
+ */
+ __pyx_t_1 = __pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_8__repr___genexpr(((PyObject*)__pyx_cur_scope)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 147, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = __Pyx_Generator_Next(__pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 147, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyUnicode_Join(__pyx_kp_u__2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 147, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_sinfo = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":148
+ * info.append(("url", self.url))
+ * sinfo = ', '.join(name + '=' + repr(val) for name, val in info)
+ * return '<RawRequestMessage(' + sinfo + ')>' # <<<<<<<<<<<<<<
+ *
+ * def _replace(self, **dct):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __Pyx_PyUnicode_ConcatSafe(__pyx_kp_u_RawRequestMessage, __pyx_v_sinfo); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 148, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = __Pyx_PyUnicode_Concat(__pyx_t_1, __pyx_kp_u__3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 148, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_parser.pyx":135
+ * self.url = url
+ *
+ * def __repr__(self): # <<<<<<<<<<<<<<
+ * info = []
+ * info.append(("method", self.method))
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("aiohttp._http_parser.RawRequestMessage.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_sinfo);
+ __Pyx_DECREF(((PyObject *)__pyx_cur_scope));
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":150
+ * return '<RawRequestMessage(' + sinfo + ')>'
+ *
+ * def _replace(self, **dct): # <<<<<<<<<<<<<<
+ * cdef RawRequestMessage ret
+ * ret = _new_request_message(self.method,
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_5_replace(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_5_replace(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_dct = 0;
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("_replace (wrapper)", 0);
+ if (unlikely(PyTuple_GET_SIZE(__pyx_args) > 0)) {
+ __Pyx_RaiseArgtupleInvalid("_replace", 1, 0, 0, PyTuple_GET_SIZE(__pyx_args)); return NULL;}
+ if (__pyx_kwds && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "_replace", 1))) return NULL;
+ __pyx_v_dct = (__pyx_kwds) ? PyDict_Copy(__pyx_kwds) : PyDict_New(); if (unlikely(!__pyx_v_dct)) return NULL;
+ __Pyx_GOTREF(__pyx_v_dct);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_4_replace(((struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)__pyx_v_self), __pyx_v_dct);
+
+ /* function exit code */
+ __Pyx_XDECREF(__pyx_v_dct);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_4_replace(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self, PyObject *__pyx_v_dct) {
+ struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_ret = 0;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ int __pyx_t_6;
+ PyObject *__pyx_t_7 = NULL;
+ int __pyx_t_8;
+ int __pyx_t_9;
+ PyObject *__pyx_t_10 = NULL;
+ PyObject *__pyx_t_11 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_replace", 0);
+
+ /* "aiohttp/_http_parser.pyx":152
+ * def _replace(self, **dct):
+ * cdef RawRequestMessage ret
+ * ret = _new_request_message(self.method, # <<<<<<<<<<<<<<
+ * self.path,
+ * self.version,
+ */
+ __pyx_t_1 = __pyx_v_self->method;
+ __Pyx_INCREF(__pyx_t_1);
+
+ /* "aiohttp/_http_parser.pyx":153
+ * cdef RawRequestMessage ret
+ * ret = _new_request_message(self.method,
+ * self.path, # <<<<<<<<<<<<<<
+ * self.version,
+ * self.headers,
+ */
+ __pyx_t_2 = __pyx_v_self->path;
+ __Pyx_INCREF(__pyx_t_2);
+
+ /* "aiohttp/_http_parser.pyx":154
+ * ret = _new_request_message(self.method,
+ * self.path,
+ * self.version, # <<<<<<<<<<<<<<
+ * self.headers,
+ * self.raw_headers,
+ */
+ __pyx_t_3 = __pyx_v_self->version;
+ __Pyx_INCREF(__pyx_t_3);
+
+ /* "aiohttp/_http_parser.pyx":155
+ * self.path,
+ * self.version,
+ * self.headers, # <<<<<<<<<<<<<<
+ * self.raw_headers,
+ * self.should_close,
+ */
+ __pyx_t_4 = __pyx_v_self->headers;
+ __Pyx_INCREF(__pyx_t_4);
+
+ /* "aiohttp/_http_parser.pyx":156
+ * self.version,
+ * self.headers,
+ * self.raw_headers, # <<<<<<<<<<<<<<
+ * self.should_close,
+ * self.compression,
+ */
+ __pyx_t_5 = __pyx_v_self->raw_headers;
+ __Pyx_INCREF(__pyx_t_5);
+
+ /* "aiohttp/_http_parser.pyx":157
+ * self.headers,
+ * self.raw_headers,
+ * self.should_close, # <<<<<<<<<<<<<<
+ * self.compression,
+ * self.upgrade,
+ */
+ __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_v_self->should_close); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 157, __pyx_L1_error)
+
+ /* "aiohttp/_http_parser.pyx":158
+ * self.raw_headers,
+ * self.should_close,
+ * self.compression, # <<<<<<<<<<<<<<
+ * self.upgrade,
+ * self.chunked,
+ */
+ __pyx_t_7 = __pyx_v_self->compression;
+ __Pyx_INCREF(__pyx_t_7);
+
+ /* "aiohttp/_http_parser.pyx":159
+ * self.should_close,
+ * self.compression,
+ * self.upgrade, # <<<<<<<<<<<<<<
+ * self.chunked,
+ * self.url)
+ */
+ __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_v_self->upgrade); if (unlikely((__pyx_t_8 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 159, __pyx_L1_error)
+
+ /* "aiohttp/_http_parser.pyx":160
+ * self.compression,
+ * self.upgrade,
+ * self.chunked, # <<<<<<<<<<<<<<
+ * self.url)
+ * if "method" in dct:
+ */
+ __pyx_t_9 = __Pyx_PyObject_IsTrue(__pyx_v_self->chunked); if (unlikely((__pyx_t_9 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 160, __pyx_L1_error)
+
+ /* "aiohttp/_http_parser.pyx":161
+ * self.upgrade,
+ * self.chunked,
+ * self.url) # <<<<<<<<<<<<<<
+ * if "method" in dct:
+ * ret.method = dct["method"]
+ */
+ __pyx_t_10 = __pyx_v_self->url;
+ __Pyx_INCREF(__pyx_t_10);
+
+ /* "aiohttp/_http_parser.pyx":152
+ * def _replace(self, **dct):
+ * cdef RawRequestMessage ret
+ * ret = _new_request_message(self.method, # <<<<<<<<<<<<<<
+ * self.path,
+ * self.version,
+ */
+ __pyx_t_11 = __pyx_f_7aiohttp_12_http_parser__new_request_message(((PyObject*)__pyx_t_1), ((PyObject*)__pyx_t_2), __pyx_t_3, __pyx_t_4, __pyx_t_5, __pyx_t_6, __pyx_t_7, __pyx_t_8, __pyx_t_9, __pyx_t_10); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 152, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
+ if (!(likely(((__pyx_t_11) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_11, __pyx_ptype_7aiohttp_12_http_parser_RawRequestMessage))))) __PYX_ERR(0, 152, __pyx_L1_error)
+ __pyx_v_ret = ((struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)__pyx_t_11);
+ __pyx_t_11 = 0;
+
+ /* "aiohttp/_http_parser.pyx":162
+ * self.chunked,
+ * self.url)
+ * if "method" in dct: # <<<<<<<<<<<<<<
+ * ret.method = dct["method"]
+ * if "path" in dct:
+ */
+ __pyx_t_9 = (__Pyx_PyDict_ContainsTF(__pyx_n_u_method, __pyx_v_dct, Py_EQ)); if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 162, __pyx_L1_error)
+ __pyx_t_8 = (__pyx_t_9 != 0);
+ if (__pyx_t_8) {
+
+ /* "aiohttp/_http_parser.pyx":163
+ * self.url)
+ * if "method" in dct:
+ * ret.method = dct["method"] # <<<<<<<<<<<<<<
+ * if "path" in dct:
+ * ret.path = dct["path"]
+ */
+ __pyx_t_11 = __Pyx_PyDict_GetItem(__pyx_v_dct, __pyx_n_u_method); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 163, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ if (!(likely(PyUnicode_CheckExact(__pyx_t_11))||((__pyx_t_11) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "unicode", Py_TYPE(__pyx_t_11)->tp_name), 0))) __PYX_ERR(0, 163, __pyx_L1_error)
+ __Pyx_GIVEREF(__pyx_t_11);
+ __Pyx_GOTREF(__pyx_v_ret->method);
+ __Pyx_DECREF(__pyx_v_ret->method);
+ __pyx_v_ret->method = ((PyObject*)__pyx_t_11);
+ __pyx_t_11 = 0;
+
+ /* "aiohttp/_http_parser.pyx":162
+ * self.chunked,
+ * self.url)
+ * if "method" in dct: # <<<<<<<<<<<<<<
+ * ret.method = dct["method"]
+ * if "path" in dct:
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":164
+ * if "method" in dct:
+ * ret.method = dct["method"]
+ * if "path" in dct: # <<<<<<<<<<<<<<
+ * ret.path = dct["path"]
+ * if "version" in dct:
+ */
+ __pyx_t_8 = (__Pyx_PyDict_ContainsTF(__pyx_n_u_path, __pyx_v_dct, Py_EQ)); if (unlikely(__pyx_t_8 < 0)) __PYX_ERR(0, 164, __pyx_L1_error)
+ __pyx_t_9 = (__pyx_t_8 != 0);
+ if (__pyx_t_9) {
+
+ /* "aiohttp/_http_parser.pyx":165
+ * ret.method = dct["method"]
+ * if "path" in dct:
+ * ret.path = dct["path"] # <<<<<<<<<<<<<<
+ * if "version" in dct:
+ * ret.version = dct["version"]
+ */
+ __pyx_t_11 = __Pyx_PyDict_GetItem(__pyx_v_dct, __pyx_n_u_path); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 165, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ if (!(likely(PyUnicode_CheckExact(__pyx_t_11))||((__pyx_t_11) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "unicode", Py_TYPE(__pyx_t_11)->tp_name), 0))) __PYX_ERR(0, 165, __pyx_L1_error)
+ __Pyx_GIVEREF(__pyx_t_11);
+ __Pyx_GOTREF(__pyx_v_ret->path);
+ __Pyx_DECREF(__pyx_v_ret->path);
+ __pyx_v_ret->path = ((PyObject*)__pyx_t_11);
+ __pyx_t_11 = 0;
+
+ /* "aiohttp/_http_parser.pyx":164
+ * if "method" in dct:
+ * ret.method = dct["method"]
+ * if "path" in dct: # <<<<<<<<<<<<<<
+ * ret.path = dct["path"]
+ * if "version" in dct:
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":166
+ * if "path" in dct:
+ * ret.path = dct["path"]
+ * if "version" in dct: # <<<<<<<<<<<<<<
+ * ret.version = dct["version"]
+ * if "headers" in dct:
+ */
+ __pyx_t_9 = (__Pyx_PyDict_ContainsTF(__pyx_n_u_version, __pyx_v_dct, Py_EQ)); if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 166, __pyx_L1_error)
+ __pyx_t_8 = (__pyx_t_9 != 0);
+ if (__pyx_t_8) {
+
+ /* "aiohttp/_http_parser.pyx":167
+ * ret.path = dct["path"]
+ * if "version" in dct:
+ * ret.version = dct["version"] # <<<<<<<<<<<<<<
+ * if "headers" in dct:
+ * ret.headers = dct["headers"]
+ */
+ __pyx_t_11 = __Pyx_PyDict_GetItem(__pyx_v_dct, __pyx_n_u_version); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 167, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ __Pyx_GIVEREF(__pyx_t_11);
+ __Pyx_GOTREF(__pyx_v_ret->version);
+ __Pyx_DECREF(__pyx_v_ret->version);
+ __pyx_v_ret->version = __pyx_t_11;
+ __pyx_t_11 = 0;
+
+ /* "aiohttp/_http_parser.pyx":166
+ * if "path" in dct:
+ * ret.path = dct["path"]
+ * if "version" in dct: # <<<<<<<<<<<<<<
+ * ret.version = dct["version"]
+ * if "headers" in dct:
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":168
+ * if "version" in dct:
+ * ret.version = dct["version"]
+ * if "headers" in dct: # <<<<<<<<<<<<<<
+ * ret.headers = dct["headers"]
+ * if "raw_headers" in dct:
+ */
+ __pyx_t_8 = (__Pyx_PyDict_ContainsTF(__pyx_n_u_headers, __pyx_v_dct, Py_EQ)); if (unlikely(__pyx_t_8 < 0)) __PYX_ERR(0, 168, __pyx_L1_error)
+ __pyx_t_9 = (__pyx_t_8 != 0);
+ if (__pyx_t_9) {
+
+ /* "aiohttp/_http_parser.pyx":169
+ * ret.version = dct["version"]
+ * if "headers" in dct:
+ * ret.headers = dct["headers"] # <<<<<<<<<<<<<<
+ * if "raw_headers" in dct:
+ * ret.raw_headers = dct["raw_headers"]
+ */
+ __pyx_t_11 = __Pyx_PyDict_GetItem(__pyx_v_dct, __pyx_n_u_headers); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 169, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ __Pyx_GIVEREF(__pyx_t_11);
+ __Pyx_GOTREF(__pyx_v_ret->headers);
+ __Pyx_DECREF(__pyx_v_ret->headers);
+ __pyx_v_ret->headers = __pyx_t_11;
+ __pyx_t_11 = 0;
+
+ /* "aiohttp/_http_parser.pyx":168
+ * if "version" in dct:
+ * ret.version = dct["version"]
+ * if "headers" in dct: # <<<<<<<<<<<<<<
+ * ret.headers = dct["headers"]
+ * if "raw_headers" in dct:
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":170
+ * if "headers" in dct:
+ * ret.headers = dct["headers"]
+ * if "raw_headers" in dct: # <<<<<<<<<<<<<<
+ * ret.raw_headers = dct["raw_headers"]
+ * if "should_close" in dct:
+ */
+ __pyx_t_9 = (__Pyx_PyDict_ContainsTF(__pyx_n_u_raw_headers, __pyx_v_dct, Py_EQ)); if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 170, __pyx_L1_error)
+ __pyx_t_8 = (__pyx_t_9 != 0);
+ if (__pyx_t_8) {
+
+ /* "aiohttp/_http_parser.pyx":171
+ * ret.headers = dct["headers"]
+ * if "raw_headers" in dct:
+ * ret.raw_headers = dct["raw_headers"] # <<<<<<<<<<<<<<
+ * if "should_close" in dct:
+ * ret.should_close = dct["should_close"]
+ */
+ __pyx_t_11 = __Pyx_PyDict_GetItem(__pyx_v_dct, __pyx_n_u_raw_headers); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 171, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ __Pyx_GIVEREF(__pyx_t_11);
+ __Pyx_GOTREF(__pyx_v_ret->raw_headers);
+ __Pyx_DECREF(__pyx_v_ret->raw_headers);
+ __pyx_v_ret->raw_headers = __pyx_t_11;
+ __pyx_t_11 = 0;
+
+ /* "aiohttp/_http_parser.pyx":170
+ * if "headers" in dct:
+ * ret.headers = dct["headers"]
+ * if "raw_headers" in dct: # <<<<<<<<<<<<<<
+ * ret.raw_headers = dct["raw_headers"]
+ * if "should_close" in dct:
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":172
+ * if "raw_headers" in dct:
+ * ret.raw_headers = dct["raw_headers"]
+ * if "should_close" in dct: # <<<<<<<<<<<<<<
+ * ret.should_close = dct["should_close"]
+ * if "compression" in dct:
+ */
+ __pyx_t_8 = (__Pyx_PyDict_ContainsTF(__pyx_n_u_should_close, __pyx_v_dct, Py_EQ)); if (unlikely(__pyx_t_8 < 0)) __PYX_ERR(0, 172, __pyx_L1_error)
+ __pyx_t_9 = (__pyx_t_8 != 0);
+ if (__pyx_t_9) {
+
+ /* "aiohttp/_http_parser.pyx":173
+ * ret.raw_headers = dct["raw_headers"]
+ * if "should_close" in dct:
+ * ret.should_close = dct["should_close"] # <<<<<<<<<<<<<<
+ * if "compression" in dct:
+ * ret.compression = dct["compression"]
+ */
+ __pyx_t_11 = __Pyx_PyDict_GetItem(__pyx_v_dct, __pyx_n_u_should_close); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 173, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ __Pyx_GIVEREF(__pyx_t_11);
+ __Pyx_GOTREF(__pyx_v_ret->should_close);
+ __Pyx_DECREF(__pyx_v_ret->should_close);
+ __pyx_v_ret->should_close = __pyx_t_11;
+ __pyx_t_11 = 0;
+
+ /* "aiohttp/_http_parser.pyx":172
+ * if "raw_headers" in dct:
+ * ret.raw_headers = dct["raw_headers"]
+ * if "should_close" in dct: # <<<<<<<<<<<<<<
+ * ret.should_close = dct["should_close"]
+ * if "compression" in dct:
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":174
+ * if "should_close" in dct:
+ * ret.should_close = dct["should_close"]
+ * if "compression" in dct: # <<<<<<<<<<<<<<
+ * ret.compression = dct["compression"]
+ * if "upgrade" in dct:
+ */
+ __pyx_t_9 = (__Pyx_PyDict_ContainsTF(__pyx_n_u_compression, __pyx_v_dct, Py_EQ)); if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 174, __pyx_L1_error)
+ __pyx_t_8 = (__pyx_t_9 != 0);
+ if (__pyx_t_8) {
+
+ /* "aiohttp/_http_parser.pyx":175
+ * ret.should_close = dct["should_close"]
+ * if "compression" in dct:
+ * ret.compression = dct["compression"] # <<<<<<<<<<<<<<
+ * if "upgrade" in dct:
+ * ret.upgrade = dct["upgrade"]
+ */
+ __pyx_t_11 = __Pyx_PyDict_GetItem(__pyx_v_dct, __pyx_n_u_compression); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 175, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ __Pyx_GIVEREF(__pyx_t_11);
+ __Pyx_GOTREF(__pyx_v_ret->compression);
+ __Pyx_DECREF(__pyx_v_ret->compression);
+ __pyx_v_ret->compression = __pyx_t_11;
+ __pyx_t_11 = 0;
+
+ /* "aiohttp/_http_parser.pyx":174
+ * if "should_close" in dct:
+ * ret.should_close = dct["should_close"]
+ * if "compression" in dct: # <<<<<<<<<<<<<<
+ * ret.compression = dct["compression"]
+ * if "upgrade" in dct:
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":176
+ * if "compression" in dct:
+ * ret.compression = dct["compression"]
+ * if "upgrade" in dct: # <<<<<<<<<<<<<<
+ * ret.upgrade = dct["upgrade"]
+ * if "chunked" in dct:
+ */
+ __pyx_t_8 = (__Pyx_PyDict_ContainsTF(__pyx_n_u_upgrade, __pyx_v_dct, Py_EQ)); if (unlikely(__pyx_t_8 < 0)) __PYX_ERR(0, 176, __pyx_L1_error)
+ __pyx_t_9 = (__pyx_t_8 != 0);
+ if (__pyx_t_9) {
+
+ /* "aiohttp/_http_parser.pyx":177
+ * ret.compression = dct["compression"]
+ * if "upgrade" in dct:
+ * ret.upgrade = dct["upgrade"] # <<<<<<<<<<<<<<
+ * if "chunked" in dct:
+ * ret.chunked = dct["chunked"]
+ */
+ __pyx_t_11 = __Pyx_PyDict_GetItem(__pyx_v_dct, __pyx_n_u_upgrade); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 177, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ __Pyx_GIVEREF(__pyx_t_11);
+ __Pyx_GOTREF(__pyx_v_ret->upgrade);
+ __Pyx_DECREF(__pyx_v_ret->upgrade);
+ __pyx_v_ret->upgrade = __pyx_t_11;
+ __pyx_t_11 = 0;
+
+ /* "aiohttp/_http_parser.pyx":176
+ * if "compression" in dct:
+ * ret.compression = dct["compression"]
+ * if "upgrade" in dct: # <<<<<<<<<<<<<<
+ * ret.upgrade = dct["upgrade"]
+ * if "chunked" in dct:
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":178
+ * if "upgrade" in dct:
+ * ret.upgrade = dct["upgrade"]
+ * if "chunked" in dct: # <<<<<<<<<<<<<<
+ * ret.chunked = dct["chunked"]
+ * if "url" in dct:
+ */
+ __pyx_t_9 = (__Pyx_PyDict_ContainsTF(__pyx_n_u_chunked, __pyx_v_dct, Py_EQ)); if (unlikely(__pyx_t_9 < 0)) __PYX_ERR(0, 178, __pyx_L1_error)
+ __pyx_t_8 = (__pyx_t_9 != 0);
+ if (__pyx_t_8) {
+
+ /* "aiohttp/_http_parser.pyx":179
+ * ret.upgrade = dct["upgrade"]
+ * if "chunked" in dct:
+ * ret.chunked = dct["chunked"] # <<<<<<<<<<<<<<
+ * if "url" in dct:
+ * ret.url = dct["url"]
+ */
+ __pyx_t_11 = __Pyx_PyDict_GetItem(__pyx_v_dct, __pyx_n_u_chunked); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 179, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ __Pyx_GIVEREF(__pyx_t_11);
+ __Pyx_GOTREF(__pyx_v_ret->chunked);
+ __Pyx_DECREF(__pyx_v_ret->chunked);
+ __pyx_v_ret->chunked = __pyx_t_11;
+ __pyx_t_11 = 0;
+
+ /* "aiohttp/_http_parser.pyx":178
+ * if "upgrade" in dct:
+ * ret.upgrade = dct["upgrade"]
+ * if "chunked" in dct: # <<<<<<<<<<<<<<
+ * ret.chunked = dct["chunked"]
+ * if "url" in dct:
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":180
+ * if "chunked" in dct:
+ * ret.chunked = dct["chunked"]
+ * if "url" in dct: # <<<<<<<<<<<<<<
+ * ret.url = dct["url"]
+ * return ret
+ */
+ __pyx_t_8 = (__Pyx_PyDict_ContainsTF(__pyx_n_u_url, __pyx_v_dct, Py_EQ)); if (unlikely(__pyx_t_8 < 0)) __PYX_ERR(0, 180, __pyx_L1_error)
+ __pyx_t_9 = (__pyx_t_8 != 0);
+ if (__pyx_t_9) {
+
+ /* "aiohttp/_http_parser.pyx":181
+ * ret.chunked = dct["chunked"]
+ * if "url" in dct:
+ * ret.url = dct["url"] # <<<<<<<<<<<<<<
+ * return ret
+ *
+ */
+ __pyx_t_11 = __Pyx_PyDict_GetItem(__pyx_v_dct, __pyx_n_u_url); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 181, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ __Pyx_GIVEREF(__pyx_t_11);
+ __Pyx_GOTREF(__pyx_v_ret->url);
+ __Pyx_DECREF(__pyx_v_ret->url);
+ __pyx_v_ret->url = __pyx_t_11;
+ __pyx_t_11 = 0;
+
+ /* "aiohttp/_http_parser.pyx":180
+ * if "chunked" in dct:
+ * ret.chunked = dct["chunked"]
+ * if "url" in dct: # <<<<<<<<<<<<<<
+ * ret.url = dct["url"]
+ * return ret
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":182
+ * if "url" in dct:
+ * ret.url = dct["url"]
+ * return ret # <<<<<<<<<<<<<<
+ *
+ * cdef _new_request_message(str method,
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((PyObject *)__pyx_v_ret));
+ __pyx_r = ((PyObject *)__pyx_v_ret);
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_parser.pyx":150
+ * return '<RawRequestMessage(' + sinfo + ')>'
+ *
+ * def _replace(self, **dct): # <<<<<<<<<<<<<<
+ * cdef RawRequestMessage ret
+ * ret = _new_request_message(self.method,
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_10);
+ __Pyx_XDECREF(__pyx_t_11);
+ __Pyx_AddTraceback("aiohttp._http_parser.RawRequestMessage._replace", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF((PyObject *)__pyx_v_ret);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":111
+ * @cython.freelist(DEFAULT_FREELIST_SIZE)
+ * cdef class RawRequestMessage:
+ * cdef readonly str method # <<<<<<<<<<<<<<
+ * cdef readonly str path
+ * cdef readonly object version # HttpVersion
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_6method_1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_6method_1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_6method___get__(((struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_6method___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__", 0);
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self->method);
+ __pyx_r = __pyx_v_self->method;
+ goto __pyx_L0;
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":112
+ * cdef class RawRequestMessage:
+ * cdef readonly str method
+ * cdef readonly str path # <<<<<<<<<<<<<<
+ * cdef readonly object version # HttpVersion
+ * cdef readonly object headers # CIMultiDict
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_4path_1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_4path_1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_4path___get__(((struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_4path___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__", 0);
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self->path);
+ __pyx_r = __pyx_v_self->path;
+ goto __pyx_L0;
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":113
+ * cdef readonly str method
+ * cdef readonly str path
+ * cdef readonly object version # HttpVersion # <<<<<<<<<<<<<<
+ * cdef readonly object headers # CIMultiDict
+ * cdef readonly object raw_headers # tuple
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_7version_1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_7version___get__(((struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_7version___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__", 0);
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self->version);
+ __pyx_r = __pyx_v_self->version;
+ goto __pyx_L0;
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":114
+ * cdef readonly str path
+ * cdef readonly object version # HttpVersion
+ * cdef readonly object headers # CIMultiDict # <<<<<<<<<<<<<<
+ * cdef readonly object raw_headers # tuple
+ * cdef readonly object should_close
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_7headers_1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_7headers_1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_7headers___get__(((struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_7headers___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__", 0);
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self->headers);
+ __pyx_r = __pyx_v_self->headers;
+ goto __pyx_L0;
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":115
+ * cdef readonly object version # HttpVersion
+ * cdef readonly object headers # CIMultiDict
+ * cdef readonly object raw_headers # tuple # <<<<<<<<<<<<<<
+ * cdef readonly object should_close
+ * cdef readonly object compression
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_11raw_headers_1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_11raw_headers_1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_11raw_headers___get__(((struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_11raw_headers___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__", 0);
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self->raw_headers);
+ __pyx_r = __pyx_v_self->raw_headers;
+ goto __pyx_L0;
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":116
+ * cdef readonly object headers # CIMultiDict
+ * cdef readonly object raw_headers # tuple
+ * cdef readonly object should_close # <<<<<<<<<<<<<<
+ * cdef readonly object compression
+ * cdef readonly object upgrade
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_12should_close_1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_12should_close_1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_12should_close___get__(((struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_12should_close___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__", 0);
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self->should_close);
+ __pyx_r = __pyx_v_self->should_close;
+ goto __pyx_L0;
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":117
+ * cdef readonly object raw_headers # tuple
+ * cdef readonly object should_close
+ * cdef readonly object compression # <<<<<<<<<<<<<<
+ * cdef readonly object upgrade
+ * cdef readonly object chunked
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_11compression_1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_11compression_1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_11compression___get__(((struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_11compression___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__", 0);
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self->compression);
+ __pyx_r = __pyx_v_self->compression;
+ goto __pyx_L0;
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":118
+ * cdef readonly object should_close
+ * cdef readonly object compression
+ * cdef readonly object upgrade # <<<<<<<<<<<<<<
+ * cdef readonly object chunked
+ * cdef readonly object url # yarl.URL
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_7upgrade_1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_7upgrade_1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_7upgrade___get__(((struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_7upgrade___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__", 0);
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self->upgrade);
+ __pyx_r = __pyx_v_self->upgrade;
+ goto __pyx_L0;
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":119
+ * cdef readonly object compression
+ * cdef readonly object upgrade
+ * cdef readonly object chunked # <<<<<<<<<<<<<<
+ * cdef readonly object url # yarl.URL
+ *
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_7chunked_1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_7chunked_1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_7chunked___get__(((struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_7chunked___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__", 0);
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self->chunked);
+ __pyx_r = __pyx_v_self->chunked;
+ goto __pyx_L0;
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":120
+ * cdef readonly object upgrade
+ * cdef readonly object chunked
+ * cdef readonly object url # yarl.URL # <<<<<<<<<<<<<<
+ *
+ * def __init__(self, method, path, version, headers, raw_headers,
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_3url_1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_3url_1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_3url___get__(((struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_3url___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__", 0);
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self->url);
+ __pyx_r = __pyx_v_self->url;
+ goto __pyx_L0;
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":1
+ * def __reduce_cython__(self): # <<<<<<<<<<<<<<
+ * cdef tuple state
+ * cdef object _dict
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_7__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_7__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_6__reduce_cython__(((struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_6__reduce_cython__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self) {
+ PyObject *__pyx_v_state = 0;
+ PyObject *__pyx_v__dict = 0;
+ int __pyx_v_use_setstate;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__reduce_cython__", 0);
+
+ /* "(tree fragment)":5
+ * cdef object _dict
+ * cdef bint use_setstate
+ * state = (self.chunked, self.compression, self.headers, self.method, self.path, self.raw_headers, self.should_close, self.upgrade, self.url, self.version) # <<<<<<<<<<<<<<
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None:
+ */
+ __pyx_t_1 = PyTuple_New(10); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v_self->chunked);
+ __Pyx_GIVEREF(__pyx_v_self->chunked);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->chunked);
+ __Pyx_INCREF(__pyx_v_self->compression);
+ __Pyx_GIVEREF(__pyx_v_self->compression);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_self->compression);
+ __Pyx_INCREF(__pyx_v_self->headers);
+ __Pyx_GIVEREF(__pyx_v_self->headers);
+ PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_self->headers);
+ __Pyx_INCREF(__pyx_v_self->method);
+ __Pyx_GIVEREF(__pyx_v_self->method);
+ PyTuple_SET_ITEM(__pyx_t_1, 3, __pyx_v_self->method);
+ __Pyx_INCREF(__pyx_v_self->path);
+ __Pyx_GIVEREF(__pyx_v_self->path);
+ PyTuple_SET_ITEM(__pyx_t_1, 4, __pyx_v_self->path);
+ __Pyx_INCREF(__pyx_v_self->raw_headers);
+ __Pyx_GIVEREF(__pyx_v_self->raw_headers);
+ PyTuple_SET_ITEM(__pyx_t_1, 5, __pyx_v_self->raw_headers);
+ __Pyx_INCREF(__pyx_v_self->should_close);
+ __Pyx_GIVEREF(__pyx_v_self->should_close);
+ PyTuple_SET_ITEM(__pyx_t_1, 6, __pyx_v_self->should_close);
+ __Pyx_INCREF(__pyx_v_self->upgrade);
+ __Pyx_GIVEREF(__pyx_v_self->upgrade);
+ PyTuple_SET_ITEM(__pyx_t_1, 7, __pyx_v_self->upgrade);
+ __Pyx_INCREF(__pyx_v_self->url);
+ __Pyx_GIVEREF(__pyx_v_self->url);
+ PyTuple_SET_ITEM(__pyx_t_1, 8, __pyx_v_self->url);
+ __Pyx_INCREF(__pyx_v_self->version);
+ __Pyx_GIVEREF(__pyx_v_self->version);
+ PyTuple_SET_ITEM(__pyx_t_1, 9, __pyx_v_self->version);
+ __pyx_v_state = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "(tree fragment)":6
+ * cdef bint use_setstate
+ * state = (self.chunked, self.compression, self.headers, self.method, self.path, self.raw_headers, self.should_close, self.upgrade, self.url, self.version)
+ * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
+ * if _dict is not None:
+ * state += (_dict,)
+ */
+ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_v__dict = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "(tree fragment)":7
+ * state = (self.chunked, self.compression, self.headers, self.method, self.path, self.raw_headers, self.should_close, self.upgrade, self.url, self.version)
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None: # <<<<<<<<<<<<<<
+ * state += (_dict,)
+ * use_setstate = True
+ */
+ __pyx_t_2 = (__pyx_v__dict != Py_None);
+ __pyx_t_3 = (__pyx_t_2 != 0);
+ if (__pyx_t_3) {
+
+ /* "(tree fragment)":8
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None:
+ * state += (_dict,) # <<<<<<<<<<<<<<
+ * use_setstate = True
+ * else:
+ */
+ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_v__dict);
+ __Pyx_GIVEREF(__pyx_v__dict);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict);
+ __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
+ __pyx_t_4 = 0;
+
+ /* "(tree fragment)":9
+ * if _dict is not None:
+ * state += (_dict,)
+ * use_setstate = True # <<<<<<<<<<<<<<
+ * else:
+ * use_setstate = self.chunked is not None or self.compression is not None or self.headers is not None or self.method is not None or self.path is not None or self.raw_headers is not None or self.should_close is not None or self.upgrade is not None or self.url is not None or self.version is not None
+ */
+ __pyx_v_use_setstate = 1;
+
+ /* "(tree fragment)":7
+ * state = (self.chunked, self.compression, self.headers, self.method, self.path, self.raw_headers, self.should_close, self.upgrade, self.url, self.version)
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None: # <<<<<<<<<<<<<<
+ * state += (_dict,)
+ * use_setstate = True
+ */
+ goto __pyx_L3;
+ }
+
+ /* "(tree fragment)":11
+ * use_setstate = True
+ * else:
+ * use_setstate = self.chunked is not None or self.compression is not None or self.headers is not None or self.method is not None or self.path is not None or self.raw_headers is not None or self.should_close is not None or self.upgrade is not None or self.url is not None or self.version is not None # <<<<<<<<<<<<<<
+ * if use_setstate:
+ * return __pyx_unpickle_RawRequestMessage, (type(self), 0x1408252, None), state
+ */
+ /*else*/ {
+ __pyx_t_2 = (__pyx_v_self->chunked != Py_None);
+ __pyx_t_5 = (__pyx_t_2 != 0);
+ if (!__pyx_t_5) {
+ } else {
+ __pyx_t_3 = __pyx_t_5;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_5 = (__pyx_v_self->compression != Py_None);
+ __pyx_t_2 = (__pyx_t_5 != 0);
+ if (!__pyx_t_2) {
+ } else {
+ __pyx_t_3 = __pyx_t_2;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_2 = (__pyx_v_self->headers != Py_None);
+ __pyx_t_5 = (__pyx_t_2 != 0);
+ if (!__pyx_t_5) {
+ } else {
+ __pyx_t_3 = __pyx_t_5;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_5 = (__pyx_v_self->method != ((PyObject*)Py_None));
+ __pyx_t_2 = (__pyx_t_5 != 0);
+ if (!__pyx_t_2) {
+ } else {
+ __pyx_t_3 = __pyx_t_2;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_2 = (__pyx_v_self->path != ((PyObject*)Py_None));
+ __pyx_t_5 = (__pyx_t_2 != 0);
+ if (!__pyx_t_5) {
+ } else {
+ __pyx_t_3 = __pyx_t_5;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_5 = (__pyx_v_self->raw_headers != Py_None);
+ __pyx_t_2 = (__pyx_t_5 != 0);
+ if (!__pyx_t_2) {
+ } else {
+ __pyx_t_3 = __pyx_t_2;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_2 = (__pyx_v_self->should_close != Py_None);
+ __pyx_t_5 = (__pyx_t_2 != 0);
+ if (!__pyx_t_5) {
+ } else {
+ __pyx_t_3 = __pyx_t_5;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_5 = (__pyx_v_self->upgrade != Py_None);
+ __pyx_t_2 = (__pyx_t_5 != 0);
+ if (!__pyx_t_2) {
+ } else {
+ __pyx_t_3 = __pyx_t_2;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_2 = (__pyx_v_self->url != Py_None);
+ __pyx_t_5 = (__pyx_t_2 != 0);
+ if (!__pyx_t_5) {
+ } else {
+ __pyx_t_3 = __pyx_t_5;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_5 = (__pyx_v_self->version != Py_None);
+ __pyx_t_2 = (__pyx_t_5 != 0);
+ __pyx_t_3 = __pyx_t_2;
+ __pyx_L4_bool_binop_done:;
+ __pyx_v_use_setstate = __pyx_t_3;
+ }
+ __pyx_L3:;
+
+ /* "(tree fragment)":12
+ * else:
+ * use_setstate = self.chunked is not None or self.compression is not None or self.headers is not None or self.method is not None or self.path is not None or self.raw_headers is not None or self.should_close is not None or self.upgrade is not None or self.url is not None or self.version is not None
+ * if use_setstate: # <<<<<<<<<<<<<<
+ * return __pyx_unpickle_RawRequestMessage, (type(self), 0x1408252, None), state
+ * else:
+ */
+ __pyx_t_3 = (__pyx_v_use_setstate != 0);
+ if (__pyx_t_3) {
+
+ /* "(tree fragment)":13
+ * use_setstate = self.chunked is not None or self.compression is not None or self.headers is not None or self.method is not None or self.path is not None or self.raw_headers is not None or self.should_close is not None or self.upgrade is not None or self.url is not None or self.version is not None
+ * if use_setstate:
+ * return __pyx_unpickle_RawRequestMessage, (type(self), 0x1408252, None), state # <<<<<<<<<<<<<<
+ * else:
+ * return __pyx_unpickle_RawRequestMessage, (type(self), 0x1408252, state)
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_RawRequestMessage); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_INCREF(__pyx_int_21004882);
+ __Pyx_GIVEREF(__pyx_int_21004882);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_21004882);
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None);
+ __pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_1);
+ __Pyx_INCREF(__pyx_v_state);
+ __Pyx_GIVEREF(__pyx_v_state);
+ PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_v_state);
+ __pyx_t_4 = 0;
+ __pyx_t_1 = 0;
+ __pyx_r = __pyx_t_6;
+ __pyx_t_6 = 0;
+ goto __pyx_L0;
+
+ /* "(tree fragment)":12
+ * else:
+ * use_setstate = self.chunked is not None or self.compression is not None or self.headers is not None or self.method is not None or self.path is not None or self.raw_headers is not None or self.should_close is not None or self.upgrade is not None or self.url is not None or self.version is not None
+ * if use_setstate: # <<<<<<<<<<<<<<
+ * return __pyx_unpickle_RawRequestMessage, (type(self), 0x1408252, None), state
+ * else:
+ */
+ }
+
+ /* "(tree fragment)":15
+ * return __pyx_unpickle_RawRequestMessage, (type(self), 0x1408252, None), state
+ * else:
+ * return __pyx_unpickle_RawRequestMessage, (type(self), 0x1408252, state) # <<<<<<<<<<<<<<
+ * def __setstate_cython__(self, __pyx_state):
+ * __pyx_unpickle_RawRequestMessage__set_state(self, __pyx_state)
+ */
+ /*else*/ {
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_pyx_unpickle_RawRequestMessage); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_INCREF(__pyx_int_21004882);
+ __Pyx_GIVEREF(__pyx_int_21004882);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_21004882);
+ __Pyx_INCREF(__pyx_v_state);
+ __Pyx_GIVEREF(__pyx_v_state);
+ PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
+ __pyx_t_6 = 0;
+ __pyx_t_1 = 0;
+ __pyx_r = __pyx_t_4;
+ __pyx_t_4 = 0;
+ goto __pyx_L0;
+ }
+
+ /* "(tree fragment)":1
+ * def __reduce_cython__(self): # <<<<<<<<<<<<<<
+ * cdef tuple state
+ * cdef object _dict
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_AddTraceback("aiohttp._http_parser.RawRequestMessage.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_state);
+ __Pyx_XDECREF(__pyx_v__dict);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":16
+ * else:
+ * return __pyx_unpickle_RawRequestMessage, (type(self), 0x1408252, state)
+ * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_unpickle_RawRequestMessage__set_state(self, __pyx_state)
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_9__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_9__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_8__setstate_cython__(((struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17RawRequestMessage_8__setstate_cython__(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__setstate_cython__", 0);
+
+ /* "(tree fragment)":17
+ * return __pyx_unpickle_RawRequestMessage, (type(self), 0x1408252, state)
+ * def __setstate_cython__(self, __pyx_state):
+ * __pyx_unpickle_RawRequestMessage__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
+ */
+ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
+ __pyx_t_1 = __pyx_f_7aiohttp_12_http_parser___pyx_unpickle_RawRequestMessage__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "(tree fragment)":16
+ * else:
+ * return __pyx_unpickle_RawRequestMessage, (type(self), 0x1408252, state)
+ * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_unpickle_RawRequestMessage__set_state(self, __pyx_state)
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._http_parser.RawRequestMessage.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":184
+ * return ret
+ *
+ * cdef _new_request_message(str method, # <<<<<<<<<<<<<<
+ * str path,
+ * object version,
+ */
+
+static PyObject *__pyx_f_7aiohttp_12_http_parser__new_request_message(PyObject *__pyx_v_method, PyObject *__pyx_v_path, PyObject *__pyx_v_version, PyObject *__pyx_v_headers, PyObject *__pyx_v_raw_headers, int __pyx_v_should_close, PyObject *__pyx_v_compression, int __pyx_v_upgrade, int __pyx_v_chunked, PyObject *__pyx_v_url) {
+ struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v_ret = 0;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_new_request_message", 0);
+
+ /* "aiohttp/_http_parser.pyx":195
+ * object url):
+ * cdef RawRequestMessage ret
+ * ret = RawRequestMessage.__new__(RawRequestMessage) # <<<<<<<<<<<<<<
+ * ret.method = method
+ * ret.path = path
+ */
+ __pyx_t_1 = ((PyObject *)__pyx_tp_new_7aiohttp_12_http_parser_RawRequestMessage(((PyTypeObject *)__pyx_ptype_7aiohttp_12_http_parser_RawRequestMessage), __pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 195, __pyx_L1_error)
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __pyx_v_ret = ((struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":196
+ * cdef RawRequestMessage ret
+ * ret = RawRequestMessage.__new__(RawRequestMessage)
+ * ret.method = method # <<<<<<<<<<<<<<
+ * ret.path = path
+ * ret.version = version
+ */
+ __Pyx_INCREF(__pyx_v_method);
+ __Pyx_GIVEREF(__pyx_v_method);
+ __Pyx_GOTREF(__pyx_v_ret->method);
+ __Pyx_DECREF(__pyx_v_ret->method);
+ __pyx_v_ret->method = __pyx_v_method;
+
+ /* "aiohttp/_http_parser.pyx":197
+ * ret = RawRequestMessage.__new__(RawRequestMessage)
+ * ret.method = method
+ * ret.path = path # <<<<<<<<<<<<<<
+ * ret.version = version
+ * ret.headers = headers
+ */
+ __Pyx_INCREF(__pyx_v_path);
+ __Pyx_GIVEREF(__pyx_v_path);
+ __Pyx_GOTREF(__pyx_v_ret->path);
+ __Pyx_DECREF(__pyx_v_ret->path);
+ __pyx_v_ret->path = __pyx_v_path;
+
+ /* "aiohttp/_http_parser.pyx":198
+ * ret.method = method
+ * ret.path = path
+ * ret.version = version # <<<<<<<<<<<<<<
+ * ret.headers = headers
+ * ret.raw_headers = raw_headers
+ */
+ __Pyx_INCREF(__pyx_v_version);
+ __Pyx_GIVEREF(__pyx_v_version);
+ __Pyx_GOTREF(__pyx_v_ret->version);
+ __Pyx_DECREF(__pyx_v_ret->version);
+ __pyx_v_ret->version = __pyx_v_version;
+
+ /* "aiohttp/_http_parser.pyx":199
+ * ret.path = path
+ * ret.version = version
+ * ret.headers = headers # <<<<<<<<<<<<<<
+ * ret.raw_headers = raw_headers
+ * ret.should_close = should_close
+ */
+ __Pyx_INCREF(__pyx_v_headers);
+ __Pyx_GIVEREF(__pyx_v_headers);
+ __Pyx_GOTREF(__pyx_v_ret->headers);
+ __Pyx_DECREF(__pyx_v_ret->headers);
+ __pyx_v_ret->headers = __pyx_v_headers;
+
+ /* "aiohttp/_http_parser.pyx":200
+ * ret.version = version
+ * ret.headers = headers
+ * ret.raw_headers = raw_headers # <<<<<<<<<<<<<<
+ * ret.should_close = should_close
+ * ret.compression = compression
+ */
+ __Pyx_INCREF(__pyx_v_raw_headers);
+ __Pyx_GIVEREF(__pyx_v_raw_headers);
+ __Pyx_GOTREF(__pyx_v_ret->raw_headers);
+ __Pyx_DECREF(__pyx_v_ret->raw_headers);
+ __pyx_v_ret->raw_headers = __pyx_v_raw_headers;
+
+ /* "aiohttp/_http_parser.pyx":201
+ * ret.headers = headers
+ * ret.raw_headers = raw_headers
+ * ret.should_close = should_close # <<<<<<<<<<<<<<
+ * ret.compression = compression
+ * ret.upgrade = upgrade
+ */
+ __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_v_should_close); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 201, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v_ret->should_close);
+ __Pyx_DECREF(__pyx_v_ret->should_close);
+ __pyx_v_ret->should_close = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":202
+ * ret.raw_headers = raw_headers
+ * ret.should_close = should_close
+ * ret.compression = compression # <<<<<<<<<<<<<<
+ * ret.upgrade = upgrade
+ * ret.chunked = chunked
+ */
+ __Pyx_INCREF(__pyx_v_compression);
+ __Pyx_GIVEREF(__pyx_v_compression);
+ __Pyx_GOTREF(__pyx_v_ret->compression);
+ __Pyx_DECREF(__pyx_v_ret->compression);
+ __pyx_v_ret->compression = __pyx_v_compression;
+
+ /* "aiohttp/_http_parser.pyx":203
+ * ret.should_close = should_close
+ * ret.compression = compression
+ * ret.upgrade = upgrade # <<<<<<<<<<<<<<
+ * ret.chunked = chunked
+ * ret.url = url
+ */
+ __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_v_upgrade); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 203, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v_ret->upgrade);
+ __Pyx_DECREF(__pyx_v_ret->upgrade);
+ __pyx_v_ret->upgrade = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":204
+ * ret.compression = compression
+ * ret.upgrade = upgrade
+ * ret.chunked = chunked # <<<<<<<<<<<<<<
+ * ret.url = url
+ * return ret
+ */
+ __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_v_chunked); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 204, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v_ret->chunked);
+ __Pyx_DECREF(__pyx_v_ret->chunked);
+ __pyx_v_ret->chunked = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":205
+ * ret.upgrade = upgrade
+ * ret.chunked = chunked
+ * ret.url = url # <<<<<<<<<<<<<<
+ * return ret
+ *
+ */
+ __Pyx_INCREF(__pyx_v_url);
+ __Pyx_GIVEREF(__pyx_v_url);
+ __Pyx_GOTREF(__pyx_v_ret->url);
+ __Pyx_DECREF(__pyx_v_ret->url);
+ __pyx_v_ret->url = __pyx_v_url;
+
+ /* "aiohttp/_http_parser.pyx":206
+ * ret.chunked = chunked
+ * ret.url = url
+ * return ret # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((PyObject *)__pyx_v_ret));
+ __pyx_r = ((PyObject *)__pyx_v_ret);
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_parser.pyx":184
+ * return ret
+ *
+ * cdef _new_request_message(str method, # <<<<<<<<<<<<<<
+ * str path,
+ * object version,
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._http_parser._new_request_message", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XDECREF((PyObject *)__pyx_v_ret);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":221
+ * cdef readonly object chunked
+ *
+ * def __init__(self, version, code, reason, headers, raw_headers, # <<<<<<<<<<<<<<
+ * should_close, compression, upgrade, chunked):
+ * self.version = version
+ */
+
+/* Python wrapper */
+static int __pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_version = 0;
+ PyObject *__pyx_v_code = 0;
+ PyObject *__pyx_v_reason = 0;
+ PyObject *__pyx_v_headers = 0;
+ PyObject *__pyx_v_raw_headers = 0;
+ PyObject *__pyx_v_should_close = 0;
+ PyObject *__pyx_v_compression = 0;
+ PyObject *__pyx_v_upgrade = 0;
+ PyObject *__pyx_v_chunked = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_version,&__pyx_n_s_code,&__pyx_n_s_reason,&__pyx_n_s_headers,&__pyx_n_s_raw_headers,&__pyx_n_s_should_close,&__pyx_n_s_compression,&__pyx_n_s_upgrade,&__pyx_n_s_chunked,0};
+ PyObject* values[9] = {0,0,0,0,0,0,0,0,0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
+ CYTHON_FALLTHROUGH;
+ case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
+ CYTHON_FALLTHROUGH;
+ case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
+ CYTHON_FALLTHROUGH;
+ case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
+ CYTHON_FALLTHROUGH;
+ case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ CYTHON_FALLTHROUGH;
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ CYTHON_FALLTHROUGH;
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ CYTHON_FALLTHROUGH;
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ CYTHON_FALLTHROUGH;
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_version)) != 0)) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ CYTHON_FALLTHROUGH;
+ case 1:
+ if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_code)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 9, 9, 1); __PYX_ERR(0, 221, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 2:
+ if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_reason)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 9, 9, 2); __PYX_ERR(0, 221, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 3:
+ if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_headers)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 9, 9, 3); __PYX_ERR(0, 221, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 4:
+ if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_raw_headers)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 9, 9, 4); __PYX_ERR(0, 221, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 5:
+ if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_should_close)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 9, 9, 5); __PYX_ERR(0, 221, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 6:
+ if (likely((values[6] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_compression)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 9, 9, 6); __PYX_ERR(0, 221, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 7:
+ if (likely((values[7] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_upgrade)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 9, 9, 7); __PYX_ERR(0, 221, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 8:
+ if (likely((values[8] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_chunked)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 9, 9, 8); __PYX_ERR(0, 221, __pyx_L3_error)
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 221, __pyx_L3_error)
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 9) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
+ values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
+ values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
+ values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
+ }
+ __pyx_v_version = values[0];
+ __pyx_v_code = values[1];
+ __pyx_v_reason = values[2];
+ __pyx_v_headers = values[3];
+ __pyx_v_raw_headers = values[4];
+ __pyx_v_should_close = values[5];
+ __pyx_v_compression = values[6];
+ __pyx_v_upgrade = values[7];
+ __pyx_v_chunked = values[8];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 9, 9, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 221, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("aiohttp._http_parser.RawResponseMessage.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return -1;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage___init__(((struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *)__pyx_v_self), __pyx_v_version, __pyx_v_code, __pyx_v_reason, __pyx_v_headers, __pyx_v_raw_headers, __pyx_v_should_close, __pyx_v_compression, __pyx_v_upgrade, __pyx_v_chunked);
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage___init__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self, PyObject *__pyx_v_version, PyObject *__pyx_v_code, PyObject *__pyx_v_reason, PyObject *__pyx_v_headers, PyObject *__pyx_v_raw_headers, PyObject *__pyx_v_should_close, PyObject *__pyx_v_compression, PyObject *__pyx_v_upgrade, PyObject *__pyx_v_chunked) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__init__", 0);
+
+ /* "aiohttp/_http_parser.pyx":223
+ * def __init__(self, version, code, reason, headers, raw_headers,
+ * should_close, compression, upgrade, chunked):
+ * self.version = version # <<<<<<<<<<<<<<
+ * self.code = code
+ * self.reason = reason
+ */
+ __Pyx_INCREF(__pyx_v_version);
+ __Pyx_GIVEREF(__pyx_v_version);
+ __Pyx_GOTREF(__pyx_v_self->version);
+ __Pyx_DECREF(__pyx_v_self->version);
+ __pyx_v_self->version = __pyx_v_version;
+
+ /* "aiohttp/_http_parser.pyx":224
+ * should_close, compression, upgrade, chunked):
+ * self.version = version
+ * self.code = code # <<<<<<<<<<<<<<
+ * self.reason = reason
+ * self.headers = headers
+ */
+ __pyx_t_1 = __Pyx_PyInt_As_int(__pyx_v_code); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 224, __pyx_L1_error)
+ __pyx_v_self->code = __pyx_t_1;
+
+ /* "aiohttp/_http_parser.pyx":225
+ * self.version = version
+ * self.code = code
+ * self.reason = reason # <<<<<<<<<<<<<<
+ * self.headers = headers
+ * self.raw_headers = raw_headers
+ */
+ if (!(likely(PyUnicode_CheckExact(__pyx_v_reason))||((__pyx_v_reason) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "unicode", Py_TYPE(__pyx_v_reason)->tp_name), 0))) __PYX_ERR(0, 225, __pyx_L1_error)
+ __pyx_t_2 = __pyx_v_reason;
+ __Pyx_INCREF(__pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __Pyx_GOTREF(__pyx_v_self->reason);
+ __Pyx_DECREF(__pyx_v_self->reason);
+ __pyx_v_self->reason = ((PyObject*)__pyx_t_2);
+ __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_parser.pyx":226
+ * self.code = code
+ * self.reason = reason
+ * self.headers = headers # <<<<<<<<<<<<<<
+ * self.raw_headers = raw_headers
+ * self.should_close = should_close
+ */
+ __Pyx_INCREF(__pyx_v_headers);
+ __Pyx_GIVEREF(__pyx_v_headers);
+ __Pyx_GOTREF(__pyx_v_self->headers);
+ __Pyx_DECREF(__pyx_v_self->headers);
+ __pyx_v_self->headers = __pyx_v_headers;
+
+ /* "aiohttp/_http_parser.pyx":227
+ * self.reason = reason
+ * self.headers = headers
+ * self.raw_headers = raw_headers # <<<<<<<<<<<<<<
+ * self.should_close = should_close
+ * self.compression = compression
+ */
+ __Pyx_INCREF(__pyx_v_raw_headers);
+ __Pyx_GIVEREF(__pyx_v_raw_headers);
+ __Pyx_GOTREF(__pyx_v_self->raw_headers);
+ __Pyx_DECREF(__pyx_v_self->raw_headers);
+ __pyx_v_self->raw_headers = __pyx_v_raw_headers;
+
+ /* "aiohttp/_http_parser.pyx":228
+ * self.headers = headers
+ * self.raw_headers = raw_headers
+ * self.should_close = should_close # <<<<<<<<<<<<<<
+ * self.compression = compression
+ * self.upgrade = upgrade
+ */
+ __Pyx_INCREF(__pyx_v_should_close);
+ __Pyx_GIVEREF(__pyx_v_should_close);
+ __Pyx_GOTREF(__pyx_v_self->should_close);
+ __Pyx_DECREF(__pyx_v_self->should_close);
+ __pyx_v_self->should_close = __pyx_v_should_close;
+
+ /* "aiohttp/_http_parser.pyx":229
+ * self.raw_headers = raw_headers
+ * self.should_close = should_close
+ * self.compression = compression # <<<<<<<<<<<<<<
+ * self.upgrade = upgrade
+ * self.chunked = chunked
+ */
+ __Pyx_INCREF(__pyx_v_compression);
+ __Pyx_GIVEREF(__pyx_v_compression);
+ __Pyx_GOTREF(__pyx_v_self->compression);
+ __Pyx_DECREF(__pyx_v_self->compression);
+ __pyx_v_self->compression = __pyx_v_compression;
+
+ /* "aiohttp/_http_parser.pyx":230
+ * self.should_close = should_close
+ * self.compression = compression
+ * self.upgrade = upgrade # <<<<<<<<<<<<<<
+ * self.chunked = chunked
+ *
+ */
+ __Pyx_INCREF(__pyx_v_upgrade);
+ __Pyx_GIVEREF(__pyx_v_upgrade);
+ __Pyx_GOTREF(__pyx_v_self->upgrade);
+ __Pyx_DECREF(__pyx_v_self->upgrade);
+ __pyx_v_self->upgrade = __pyx_v_upgrade;
+
+ /* "aiohttp/_http_parser.pyx":231
+ * self.compression = compression
+ * self.upgrade = upgrade
+ * self.chunked = chunked # <<<<<<<<<<<<<<
+ *
+ * def __repr__(self):
+ */
+ __Pyx_INCREF(__pyx_v_chunked);
+ __Pyx_GIVEREF(__pyx_v_chunked);
+ __Pyx_GOTREF(__pyx_v_self->chunked);
+ __Pyx_DECREF(__pyx_v_self->chunked);
+ __pyx_v_self->chunked = __pyx_v_chunked;
+
+ /* "aiohttp/_http_parser.pyx":221
+ * cdef readonly object chunked
+ *
+ * def __init__(self, version, code, reason, headers, raw_headers, # <<<<<<<<<<<<<<
+ * should_close, compression, upgrade, chunked):
+ * self.version = version
+ */
+
+ /* function exit code */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("aiohttp._http_parser.RawResponseMessage.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":233
+ * self.chunked = chunked
+ *
+ * def __repr__(self): # <<<<<<<<<<<<<<
+ * info = []
+ * info.append(("version", self.version))
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_3__repr__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_3__repr__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_2__repr__(((struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+static PyObject *__pyx_gb_7aiohttp_12_http_parser_18RawResponseMessage_8__repr___2generator1(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value); /* proto */
+
+/* "aiohttp/_http_parser.pyx":244
+ * info.append(("upgrade", self.upgrade))
+ * info.append(("chunked", self.chunked))
+ * sinfo = ', '.join(name + '=' + repr(val) for name, val in info) # <<<<<<<<<<<<<<
+ * return '<RawResponseMessage(' + sinfo + ')>'
+ *
+ */
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_8__repr___genexpr(PyObject *__pyx_self) {
+ struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr *__pyx_cur_scope;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("genexpr", 0);
+ __pyx_cur_scope = (struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr *)__pyx_tp_new_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr(__pyx_ptype_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr, __pyx_empty_tuple, NULL);
+ if (unlikely(!__pyx_cur_scope)) {
+ __pyx_cur_scope = ((struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr *)Py_None);
+ __Pyx_INCREF(Py_None);
+ __PYX_ERR(0, 244, __pyx_L1_error)
+ } else {
+ __Pyx_GOTREF(__pyx_cur_scope);
+ }
+ __pyx_cur_scope->__pyx_outer_scope = (struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__ *) __pyx_self;
+ __Pyx_INCREF(((PyObject *)__pyx_cur_scope->__pyx_outer_scope));
+ __Pyx_GIVEREF(__pyx_cur_scope->__pyx_outer_scope);
+ {
+ __pyx_CoroutineObject *gen = __Pyx_Generator_New((__pyx_coroutine_body_t) __pyx_gb_7aiohttp_12_http_parser_18RawResponseMessage_8__repr___2generator1, NULL, (PyObject *) __pyx_cur_scope, __pyx_n_s_genexpr, __pyx_n_s_repr___locals_genexpr, __pyx_n_s_aiohttp__http_parser); if (unlikely(!gen)) __PYX_ERR(0, 244, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_cur_scope);
+ __Pyx_RefNannyFinishContext();
+ return (PyObject *) gen;
+ }
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_AddTraceback("aiohttp._http_parser.RawResponseMessage.__repr__.genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __Pyx_DECREF(((PyObject *)__pyx_cur_scope));
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_gb_7aiohttp_12_http_parser_18RawResponseMessage_8__repr___2generator1(__pyx_CoroutineObject *__pyx_generator, CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject *__pyx_sent_value) /* generator body */
+{
+ struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr *__pyx_cur_scope = ((struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr *)__pyx_generator->closure);
+ PyObject *__pyx_r = NULL;
+ PyObject *__pyx_t_1 = NULL;
+ Py_ssize_t __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *(*__pyx_t_7)(PyObject *);
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("genexpr", 0);
+ switch (__pyx_generator->resume_label) {
+ case 0: goto __pyx_L3_first_run;
+ default: /* CPython raises the right error here */
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ }
+ __pyx_L3_first_run:;
+ if (unlikely(!__pyx_sent_value)) __PYX_ERR(0, 244, __pyx_L1_error)
+ __pyx_r = PyList_New(0); if (unlikely(!__pyx_r)) __PYX_ERR(0, 244, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_r);
+ if (unlikely(!__pyx_cur_scope->__pyx_outer_scope->__pyx_v_info)) { __Pyx_RaiseClosureNameError("info"); __PYX_ERR(0, 244, __pyx_L1_error) }
+ if (unlikely(__pyx_cur_scope->__pyx_outer_scope->__pyx_v_info == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
+ __PYX_ERR(0, 244, __pyx_L1_error)
+ }
+ __pyx_t_1 = __pyx_cur_scope->__pyx_outer_scope->__pyx_v_info; __Pyx_INCREF(__pyx_t_1); __pyx_t_2 = 0;
+ for (;;) {
+ if (__pyx_t_2 >= PyList_GET_SIZE(__pyx_t_1)) break;
+ #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ __pyx_t_3 = PyList_GET_ITEM(__pyx_t_1, __pyx_t_2); __Pyx_INCREF(__pyx_t_3); __pyx_t_2++; if (unlikely(0 < 0)) __PYX_ERR(0, 244, __pyx_L1_error)
+ #else
+ __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_2); __pyx_t_2++; if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 244, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ #endif
+ if ((likely(PyTuple_CheckExact(__pyx_t_3))) || (PyList_CheckExact(__pyx_t_3))) {
+ PyObject* sequence = __pyx_t_3;
+ Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
+ if (unlikely(size != 2)) {
+ if (size > 2) __Pyx_RaiseTooManyValuesError(2);
+ else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
+ __PYX_ERR(0, 244, __pyx_L1_error)
+ }
+ #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ if (likely(PyTuple_CheckExact(sequence))) {
+ __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
+ __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
+ } else {
+ __pyx_t_4 = PyList_GET_ITEM(sequence, 0);
+ __pyx_t_5 = PyList_GET_ITEM(sequence, 1);
+ }
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_INCREF(__pyx_t_5);
+ #else
+ __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 244, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 244, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ #endif
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ } else {
+ Py_ssize_t index = -1;
+ __pyx_t_6 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 244, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_7 = Py_TYPE(__pyx_t_6)->tp_iternext;
+ index = 0; __pyx_t_4 = __pyx_t_7(__pyx_t_6); if (unlikely(!__pyx_t_4)) goto __pyx_L6_unpacking_failed;
+ __Pyx_GOTREF(__pyx_t_4);
+ index = 1; __pyx_t_5 = __pyx_t_7(__pyx_t_6); if (unlikely(!__pyx_t_5)) goto __pyx_L6_unpacking_failed;
+ __Pyx_GOTREF(__pyx_t_5);
+ if (__Pyx_IternextUnpackEndCheck(__pyx_t_7(__pyx_t_6), 2) < 0) __PYX_ERR(0, 244, __pyx_L1_error)
+ __pyx_t_7 = NULL;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ goto __pyx_L7_unpacking_done;
+ __pyx_L6_unpacking_failed:;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_t_7 = NULL;
+ if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
+ __PYX_ERR(0, 244, __pyx_L1_error)
+ __pyx_L7_unpacking_done:;
+ }
+ __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_name);
+ __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v_name, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ __pyx_t_4 = 0;
+ __Pyx_XGOTREF(__pyx_cur_scope->__pyx_v_val);
+ __Pyx_XDECREF_SET(__pyx_cur_scope->__pyx_v_val, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_5);
+ __pyx_t_5 = 0;
+ __pyx_t_3 = PyNumber_Add(__pyx_cur_scope->__pyx_v_name, __pyx_kp_u_); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 244, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyObject_Repr(__pyx_cur_scope->__pyx_v_val); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 244, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_4 = PyNumber_Add(__pyx_t_3, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 244, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ if (unlikely(__Pyx_ListComp_Append(__pyx_r, (PyObject*)__pyx_t_4))) __PYX_ERR(0, 244, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ }
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ CYTHON_MAYBE_UNUSED_VAR(__pyx_cur_scope);
+
+ /* function exit code */
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_r); __pyx_r = 0;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_AddTraceback("genexpr", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ #if !CYTHON_USE_EXC_INFO_STACK
+ __Pyx_Coroutine_ResetAndClearException(__pyx_generator);
+ #endif
+ __pyx_generator->resume_label = -1;
+ __Pyx_Coroutine_clear((PyObject*)__pyx_generator);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":233
+ * self.chunked = chunked
+ *
+ * def __repr__(self): # <<<<<<<<<<<<<<
+ * info = []
+ * info.append(("version", self.version))
+ */
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_2__repr__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self) {
+ struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__ *__pyx_cur_scope;
+ PyObject *__pyx_v_sinfo = NULL;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__repr__", 0);
+ __pyx_cur_scope = (struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__ *)__pyx_tp_new_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__(__pyx_ptype_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__, __pyx_empty_tuple, NULL);
+ if (unlikely(!__pyx_cur_scope)) {
+ __pyx_cur_scope = ((struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__ *)Py_None);
+ __Pyx_INCREF(Py_None);
+ __PYX_ERR(0, 233, __pyx_L1_error)
+ } else {
+ __Pyx_GOTREF(__pyx_cur_scope);
+ }
+
+ /* "aiohttp/_http_parser.pyx":234
+ *
+ * def __repr__(self):
+ * info = [] # <<<<<<<<<<<<<<
+ * info.append(("version", self.version))
+ * info.append(("code", self.code))
+ */
+ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 234, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_cur_scope->__pyx_v_info = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":235
+ * def __repr__(self):
+ * info = []
+ * info.append(("version", self.version)) # <<<<<<<<<<<<<<
+ * info.append(("code", self.code))
+ * info.append(("reason", self.reason))
+ */
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 235, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_n_u_version);
+ __Pyx_GIVEREF(__pyx_n_u_version);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_n_u_version);
+ __Pyx_INCREF(__pyx_v_self->version);
+ __Pyx_GIVEREF(__pyx_v_self->version);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_v_self->version);
+ __pyx_t_2 = __Pyx_PyList_Append(__pyx_cur_scope->__pyx_v_info, __pyx_t_1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 235, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":236
+ * info = []
+ * info.append(("version", self.version))
+ * info.append(("code", self.code)) # <<<<<<<<<<<<<<
+ * info.append(("reason", self.reason))
+ * info.append(("headers", self.headers))
+ */
+ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->code); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 236, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 236, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_n_u_code);
+ __Pyx_GIVEREF(__pyx_n_u_code);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_n_u_code);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
+ __pyx_t_1 = 0;
+ __pyx_t_2 = __Pyx_PyList_Append(__pyx_cur_scope->__pyx_v_info, __pyx_t_3); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 236, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":237
+ * info.append(("version", self.version))
+ * info.append(("code", self.code))
+ * info.append(("reason", self.reason)) # <<<<<<<<<<<<<<
+ * info.append(("headers", self.headers))
+ * info.append(("raw_headers", self.raw_headers))
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 237, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_n_u_reason);
+ __Pyx_GIVEREF(__pyx_n_u_reason);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_n_u_reason);
+ __Pyx_INCREF(__pyx_v_self->reason);
+ __Pyx_GIVEREF(__pyx_v_self->reason);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_self->reason);
+ __pyx_t_2 = __Pyx_PyList_Append(__pyx_cur_scope->__pyx_v_info, __pyx_t_3); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 237, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":238
+ * info.append(("code", self.code))
+ * info.append(("reason", self.reason))
+ * info.append(("headers", self.headers)) # <<<<<<<<<<<<<<
+ * info.append(("raw_headers", self.raw_headers))
+ * info.append(("should_close", self.should_close))
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 238, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_n_u_headers);
+ __Pyx_GIVEREF(__pyx_n_u_headers);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_n_u_headers);
+ __Pyx_INCREF(__pyx_v_self->headers);
+ __Pyx_GIVEREF(__pyx_v_self->headers);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_self->headers);
+ __pyx_t_2 = __Pyx_PyList_Append(__pyx_cur_scope->__pyx_v_info, __pyx_t_3); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 238, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":239
+ * info.append(("reason", self.reason))
+ * info.append(("headers", self.headers))
+ * info.append(("raw_headers", self.raw_headers)) # <<<<<<<<<<<<<<
+ * info.append(("should_close", self.should_close))
+ * info.append(("compression", self.compression))
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 239, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_n_u_raw_headers);
+ __Pyx_GIVEREF(__pyx_n_u_raw_headers);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_n_u_raw_headers);
+ __Pyx_INCREF(__pyx_v_self->raw_headers);
+ __Pyx_GIVEREF(__pyx_v_self->raw_headers);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_self->raw_headers);
+ __pyx_t_2 = __Pyx_PyList_Append(__pyx_cur_scope->__pyx_v_info, __pyx_t_3); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 239, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":240
+ * info.append(("headers", self.headers))
+ * info.append(("raw_headers", self.raw_headers))
+ * info.append(("should_close", self.should_close)) # <<<<<<<<<<<<<<
+ * info.append(("compression", self.compression))
+ * info.append(("upgrade", self.upgrade))
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 240, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_n_u_should_close);
+ __Pyx_GIVEREF(__pyx_n_u_should_close);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_n_u_should_close);
+ __Pyx_INCREF(__pyx_v_self->should_close);
+ __Pyx_GIVEREF(__pyx_v_self->should_close);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_self->should_close);
+ __pyx_t_2 = __Pyx_PyList_Append(__pyx_cur_scope->__pyx_v_info, __pyx_t_3); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 240, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":241
+ * info.append(("raw_headers", self.raw_headers))
+ * info.append(("should_close", self.should_close))
+ * info.append(("compression", self.compression)) # <<<<<<<<<<<<<<
+ * info.append(("upgrade", self.upgrade))
+ * info.append(("chunked", self.chunked))
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 241, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_n_u_compression);
+ __Pyx_GIVEREF(__pyx_n_u_compression);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_n_u_compression);
+ __Pyx_INCREF(__pyx_v_self->compression);
+ __Pyx_GIVEREF(__pyx_v_self->compression);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_self->compression);
+ __pyx_t_2 = __Pyx_PyList_Append(__pyx_cur_scope->__pyx_v_info, __pyx_t_3); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 241, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":242
+ * info.append(("should_close", self.should_close))
+ * info.append(("compression", self.compression))
+ * info.append(("upgrade", self.upgrade)) # <<<<<<<<<<<<<<
+ * info.append(("chunked", self.chunked))
+ * sinfo = ', '.join(name + '=' + repr(val) for name, val in info)
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 242, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_n_u_upgrade);
+ __Pyx_GIVEREF(__pyx_n_u_upgrade);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_n_u_upgrade);
+ __Pyx_INCREF(__pyx_v_self->upgrade);
+ __Pyx_GIVEREF(__pyx_v_self->upgrade);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_self->upgrade);
+ __pyx_t_2 = __Pyx_PyList_Append(__pyx_cur_scope->__pyx_v_info, __pyx_t_3); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 242, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":243
+ * info.append(("compression", self.compression))
+ * info.append(("upgrade", self.upgrade))
+ * info.append(("chunked", self.chunked)) # <<<<<<<<<<<<<<
+ * sinfo = ', '.join(name + '=' + repr(val) for name, val in info)
+ * return '<RawResponseMessage(' + sinfo + ')>'
+ */
+ __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 243, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_n_u_chunked);
+ __Pyx_GIVEREF(__pyx_n_u_chunked);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_n_u_chunked);
+ __Pyx_INCREF(__pyx_v_self->chunked);
+ __Pyx_GIVEREF(__pyx_v_self->chunked);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_v_self->chunked);
+ __pyx_t_2 = __Pyx_PyList_Append(__pyx_cur_scope->__pyx_v_info, __pyx_t_3); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(0, 243, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":244
+ * info.append(("upgrade", self.upgrade))
+ * info.append(("chunked", self.chunked))
+ * sinfo = ', '.join(name + '=' + repr(val) for name, val in info) # <<<<<<<<<<<<<<
+ * return '<RawResponseMessage(' + sinfo + ')>'
+ *
+ */
+ __pyx_t_3 = __pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_8__repr___genexpr(((PyObject*)__pyx_cur_scope)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 244, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_1 = __Pyx_Generator_Next(__pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 244, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = PyUnicode_Join(__pyx_kp_u__2, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 244, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v_sinfo = ((PyObject*)__pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":245
+ * info.append(("chunked", self.chunked))
+ * sinfo = ', '.join(name + '=' + repr(val) for name, val in info)
+ * return '<RawResponseMessage(' + sinfo + ')>' # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_3 = __Pyx_PyUnicode_ConcatSafe(__pyx_kp_u_RawResponseMessage, __pyx_v_sinfo); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 245, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_1 = __Pyx_PyUnicode_Concat(__pyx_t_3, __pyx_kp_u__3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 245, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_parser.pyx":233
+ * self.chunked = chunked
+ *
+ * def __repr__(self): # <<<<<<<<<<<<<<
+ * info = []
+ * info.append(("version", self.version))
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("aiohttp._http_parser.RawResponseMessage.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_sinfo);
+ __Pyx_DECREF(((PyObject *)__pyx_cur_scope));
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":211
+ * @cython.freelist(DEFAULT_FREELIST_SIZE)
+ * cdef class RawResponseMessage:
+ * cdef readonly object version # HttpVersion # <<<<<<<<<<<<<<
+ * cdef readonly int code
+ * cdef readonly str reason
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_7version_1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_7version_1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_7version___get__(((struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_7version___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__", 0);
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self->version);
+ __pyx_r = __pyx_v_self->version;
+ goto __pyx_L0;
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":212
+ * cdef class RawResponseMessage:
+ * cdef readonly object version # HttpVersion
+ * cdef readonly int code # <<<<<<<<<<<<<<
+ * cdef readonly str reason
+ * cdef readonly object headers # CIMultiDict
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_4code_1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_4code_1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_4code___get__(((struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_4code___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__get__", 0);
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->code); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 212, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._http_parser.RawResponseMessage.code.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":213
+ * cdef readonly object version # HttpVersion
+ * cdef readonly int code
+ * cdef readonly str reason # <<<<<<<<<<<<<<
+ * cdef readonly object headers # CIMultiDict
+ * cdef readonly object raw_headers # tuple
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_6reason_1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_6reason_1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_6reason___get__(((struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_6reason___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__", 0);
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self->reason);
+ __pyx_r = __pyx_v_self->reason;
+ goto __pyx_L0;
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":214
+ * cdef readonly int code
+ * cdef readonly str reason
+ * cdef readonly object headers # CIMultiDict # <<<<<<<<<<<<<<
+ * cdef readonly object raw_headers # tuple
+ * cdef readonly object should_close
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_7headers_1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_7headers_1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_7headers___get__(((struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_7headers___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__", 0);
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self->headers);
+ __pyx_r = __pyx_v_self->headers;
+ goto __pyx_L0;
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":215
+ * cdef readonly str reason
+ * cdef readonly object headers # CIMultiDict
+ * cdef readonly object raw_headers # tuple # <<<<<<<<<<<<<<
+ * cdef readonly object should_close
+ * cdef readonly object compression
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_11raw_headers_1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_11raw_headers_1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_11raw_headers___get__(((struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_11raw_headers___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__", 0);
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self->raw_headers);
+ __pyx_r = __pyx_v_self->raw_headers;
+ goto __pyx_L0;
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":216
+ * cdef readonly object headers # CIMultiDict
+ * cdef readonly object raw_headers # tuple
+ * cdef readonly object should_close # <<<<<<<<<<<<<<
+ * cdef readonly object compression
+ * cdef readonly object upgrade
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_12should_close_1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_12should_close_1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_12should_close___get__(((struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_12should_close___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__", 0);
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self->should_close);
+ __pyx_r = __pyx_v_self->should_close;
+ goto __pyx_L0;
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":217
+ * cdef readonly object raw_headers # tuple
+ * cdef readonly object should_close
+ * cdef readonly object compression # <<<<<<<<<<<<<<
+ * cdef readonly object upgrade
+ * cdef readonly object chunked
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_11compression_1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_11compression_1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_11compression___get__(((struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_11compression___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__", 0);
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self->compression);
+ __pyx_r = __pyx_v_self->compression;
+ goto __pyx_L0;
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":218
+ * cdef readonly object should_close
+ * cdef readonly object compression
+ * cdef readonly object upgrade # <<<<<<<<<<<<<<
+ * cdef readonly object chunked
+ *
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_7upgrade_1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_7upgrade_1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_7upgrade___get__(((struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_7upgrade___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__", 0);
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self->upgrade);
+ __pyx_r = __pyx_v_self->upgrade;
+ goto __pyx_L0;
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":219
+ * cdef readonly object compression
+ * cdef readonly object upgrade
+ * cdef readonly object chunked # <<<<<<<<<<<<<<
+ *
+ * def __init__(self, version, code, reason, headers, raw_headers,
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_7chunked_1__get__(PyObject *__pyx_v_self); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_7chunked_1__get__(PyObject *__pyx_v_self) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_7chunked___get__(((struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_7chunked___get__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__get__", 0);
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_self->chunked);
+ __pyx_r = __pyx_v_self->chunked;
+ goto __pyx_L0;
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":1
+ * def __reduce_cython__(self): # <<<<<<<<<<<<<<
+ * cdef tuple state
+ * cdef object _dict
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_4__reduce_cython__(((struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_4__reduce_cython__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self) {
+ PyObject *__pyx_v_state = 0;
+ PyObject *__pyx_v__dict = 0;
+ int __pyx_v_use_setstate;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_t_3;
+ int __pyx_t_4;
+ int __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__reduce_cython__", 0);
+
+ /* "(tree fragment)":5
+ * cdef object _dict
+ * cdef bint use_setstate
+ * state = (self.chunked, self.code, self.compression, self.headers, self.raw_headers, self.reason, self.should_close, self.upgrade, self.version) # <<<<<<<<<<<<<<
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None:
+ */
+ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->code); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyTuple_New(9); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_v_self->chunked);
+ __Pyx_GIVEREF(__pyx_v_self->chunked);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_self->chunked);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_1);
+ __Pyx_INCREF(__pyx_v_self->compression);
+ __Pyx_GIVEREF(__pyx_v_self->compression);
+ PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_self->compression);
+ __Pyx_INCREF(__pyx_v_self->headers);
+ __Pyx_GIVEREF(__pyx_v_self->headers);
+ PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_v_self->headers);
+ __Pyx_INCREF(__pyx_v_self->raw_headers);
+ __Pyx_GIVEREF(__pyx_v_self->raw_headers);
+ PyTuple_SET_ITEM(__pyx_t_2, 4, __pyx_v_self->raw_headers);
+ __Pyx_INCREF(__pyx_v_self->reason);
+ __Pyx_GIVEREF(__pyx_v_self->reason);
+ PyTuple_SET_ITEM(__pyx_t_2, 5, __pyx_v_self->reason);
+ __Pyx_INCREF(__pyx_v_self->should_close);
+ __Pyx_GIVEREF(__pyx_v_self->should_close);
+ PyTuple_SET_ITEM(__pyx_t_2, 6, __pyx_v_self->should_close);
+ __Pyx_INCREF(__pyx_v_self->upgrade);
+ __Pyx_GIVEREF(__pyx_v_self->upgrade);
+ PyTuple_SET_ITEM(__pyx_t_2, 7, __pyx_v_self->upgrade);
+ __Pyx_INCREF(__pyx_v_self->version);
+ __Pyx_GIVEREF(__pyx_v_self->version);
+ PyTuple_SET_ITEM(__pyx_t_2, 8, __pyx_v_self->version);
+ __pyx_t_1 = 0;
+ __pyx_v_state = ((PyObject*)__pyx_t_2);
+ __pyx_t_2 = 0;
+
+ /* "(tree fragment)":6
+ * cdef bint use_setstate
+ * state = (self.chunked, self.code, self.compression, self.headers, self.raw_headers, self.reason, self.should_close, self.upgrade, self.version)
+ * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
+ * if _dict is not None:
+ * state += (_dict,)
+ */
+ __pyx_t_2 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_v__dict = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "(tree fragment)":7
+ * state = (self.chunked, self.code, self.compression, self.headers, self.raw_headers, self.reason, self.should_close, self.upgrade, self.version)
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None: # <<<<<<<<<<<<<<
+ * state += (_dict,)
+ * use_setstate = True
+ */
+ __pyx_t_3 = (__pyx_v__dict != Py_None);
+ __pyx_t_4 = (__pyx_t_3 != 0);
+ if (__pyx_t_4) {
+
+ /* "(tree fragment)":8
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None:
+ * state += (_dict,) # <<<<<<<<<<<<<<
+ * use_setstate = True
+ * else:
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 8, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_v__dict);
+ __Pyx_GIVEREF(__pyx_v__dict);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v__dict);
+ __pyx_t_1 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_1));
+ __pyx_t_1 = 0;
+
+ /* "(tree fragment)":9
+ * if _dict is not None:
+ * state += (_dict,)
+ * use_setstate = True # <<<<<<<<<<<<<<
+ * else:
+ * use_setstate = self.chunked is not None or self.compression is not None or self.headers is not None or self.raw_headers is not None or self.reason is not None or self.should_close is not None or self.upgrade is not None or self.version is not None
+ */
+ __pyx_v_use_setstate = 1;
+
+ /* "(tree fragment)":7
+ * state = (self.chunked, self.code, self.compression, self.headers, self.raw_headers, self.reason, self.should_close, self.upgrade, self.version)
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None: # <<<<<<<<<<<<<<
+ * state += (_dict,)
+ * use_setstate = True
+ */
+ goto __pyx_L3;
+ }
+
+ /* "(tree fragment)":11
+ * use_setstate = True
+ * else:
+ * use_setstate = self.chunked is not None or self.compression is not None or self.headers is not None or self.raw_headers is not None or self.reason is not None or self.should_close is not None or self.upgrade is not None or self.version is not None # <<<<<<<<<<<<<<
+ * if use_setstate:
+ * return __pyx_unpickle_RawResponseMessage, (type(self), 0xc7706dc, None), state
+ */
+ /*else*/ {
+ __pyx_t_3 = (__pyx_v_self->chunked != Py_None);
+ __pyx_t_5 = (__pyx_t_3 != 0);
+ if (!__pyx_t_5) {
+ } else {
+ __pyx_t_4 = __pyx_t_5;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_5 = (__pyx_v_self->compression != Py_None);
+ __pyx_t_3 = (__pyx_t_5 != 0);
+ if (!__pyx_t_3) {
+ } else {
+ __pyx_t_4 = __pyx_t_3;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_3 = (__pyx_v_self->headers != Py_None);
+ __pyx_t_5 = (__pyx_t_3 != 0);
+ if (!__pyx_t_5) {
+ } else {
+ __pyx_t_4 = __pyx_t_5;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_5 = (__pyx_v_self->raw_headers != Py_None);
+ __pyx_t_3 = (__pyx_t_5 != 0);
+ if (!__pyx_t_3) {
+ } else {
+ __pyx_t_4 = __pyx_t_3;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_3 = (__pyx_v_self->reason != ((PyObject*)Py_None));
+ __pyx_t_5 = (__pyx_t_3 != 0);
+ if (!__pyx_t_5) {
+ } else {
+ __pyx_t_4 = __pyx_t_5;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_5 = (__pyx_v_self->should_close != Py_None);
+ __pyx_t_3 = (__pyx_t_5 != 0);
+ if (!__pyx_t_3) {
+ } else {
+ __pyx_t_4 = __pyx_t_3;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_3 = (__pyx_v_self->upgrade != Py_None);
+ __pyx_t_5 = (__pyx_t_3 != 0);
+ if (!__pyx_t_5) {
+ } else {
+ __pyx_t_4 = __pyx_t_5;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_5 = (__pyx_v_self->version != Py_None);
+ __pyx_t_3 = (__pyx_t_5 != 0);
+ __pyx_t_4 = __pyx_t_3;
+ __pyx_L4_bool_binop_done:;
+ __pyx_v_use_setstate = __pyx_t_4;
+ }
+ __pyx_L3:;
+
+ /* "(tree fragment)":12
+ * else:
+ * use_setstate = self.chunked is not None or self.compression is not None or self.headers is not None or self.raw_headers is not None or self.reason is not None or self.should_close is not None or self.upgrade is not None or self.version is not None
+ * if use_setstate: # <<<<<<<<<<<<<<
+ * return __pyx_unpickle_RawResponseMessage, (type(self), 0xc7706dc, None), state
+ * else:
+ */
+ __pyx_t_4 = (__pyx_v_use_setstate != 0);
+ if (__pyx_t_4) {
+
+ /* "(tree fragment)":13
+ * use_setstate = self.chunked is not None or self.compression is not None or self.headers is not None or self.raw_headers is not None or self.reason is not None or self.should_close is not None or self.upgrade is not None or self.version is not None
+ * if use_setstate:
+ * return __pyx_unpickle_RawResponseMessage, (type(self), 0xc7706dc, None), state # <<<<<<<<<<<<<<
+ * else:
+ * return __pyx_unpickle_RawResponseMessage, (type(self), 0xc7706dc, state)
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_pyx_unpickle_RawResponseMessag); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_INCREF(__pyx_int_209127132);
+ __Pyx_GIVEREF(__pyx_int_209127132);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_209127132);
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_2, 2, Py_None);
+ __pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_2);
+ __Pyx_INCREF(__pyx_v_state);
+ __Pyx_GIVEREF(__pyx_v_state);
+ PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_v_state);
+ __pyx_t_1 = 0;
+ __pyx_t_2 = 0;
+ __pyx_r = __pyx_t_6;
+ __pyx_t_6 = 0;
+ goto __pyx_L0;
+
+ /* "(tree fragment)":12
+ * else:
+ * use_setstate = self.chunked is not None or self.compression is not None or self.headers is not None or self.raw_headers is not None or self.reason is not None or self.should_close is not None or self.upgrade is not None or self.version is not None
+ * if use_setstate: # <<<<<<<<<<<<<<
+ * return __pyx_unpickle_RawResponseMessage, (type(self), 0xc7706dc, None), state
+ * else:
+ */
+ }
+
+ /* "(tree fragment)":15
+ * return __pyx_unpickle_RawResponseMessage, (type(self), 0xc7706dc, None), state
+ * else:
+ * return __pyx_unpickle_RawResponseMessage, (type(self), 0xc7706dc, state) # <<<<<<<<<<<<<<
+ * def __setstate_cython__(self, __pyx_state):
+ * __pyx_unpickle_RawResponseMessage__set_state(self, __pyx_state)
+ */
+ /*else*/ {
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_pyx_unpickle_RawResponseMessag); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_INCREF(__pyx_int_209127132);
+ __Pyx_GIVEREF(__pyx_int_209127132);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_209127132);
+ __Pyx_INCREF(__pyx_v_state);
+ __Pyx_GIVEREF(__pyx_v_state);
+ PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_state);
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_2);
+ __pyx_t_6 = 0;
+ __pyx_t_2 = 0;
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+ }
+
+ /* "(tree fragment)":1
+ * def __reduce_cython__(self): # <<<<<<<<<<<<<<
+ * cdef tuple state
+ * cdef object _dict
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_AddTraceback("aiohttp._http_parser.RawResponseMessage.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_state);
+ __Pyx_XDECREF(__pyx_v__dict);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":16
+ * else:
+ * return __pyx_unpickle_RawResponseMessage, (type(self), 0xc7706dc, state)
+ * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_unpickle_RawResponseMessage__set_state(self, __pyx_state)
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_6__setstate_cython__(((struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18RawResponseMessage_6__setstate_cython__(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__setstate_cython__", 0);
+
+ /* "(tree fragment)":17
+ * return __pyx_unpickle_RawResponseMessage, (type(self), 0xc7706dc, state)
+ * def __setstate_cython__(self, __pyx_state):
+ * __pyx_unpickle_RawResponseMessage__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
+ */
+ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
+ __pyx_t_1 = __pyx_f_7aiohttp_12_http_parser___pyx_unpickle_RawResponseMessage__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "(tree fragment)":16
+ * else:
+ * return __pyx_unpickle_RawResponseMessage, (type(self), 0xc7706dc, state)
+ * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_unpickle_RawResponseMessage__set_state(self, __pyx_state)
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._http_parser.RawResponseMessage.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":248
+ *
+ *
+ * cdef _new_response_message(object version, # <<<<<<<<<<<<<<
+ * int code,
+ * str reason,
+ */
+
+static PyObject *__pyx_f_7aiohttp_12_http_parser__new_response_message(PyObject *__pyx_v_version, int __pyx_v_code, PyObject *__pyx_v_reason, PyObject *__pyx_v_headers, PyObject *__pyx_v_raw_headers, int __pyx_v_should_close, PyObject *__pyx_v_compression, int __pyx_v_upgrade, int __pyx_v_chunked) {
+ struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v_ret = 0;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_new_response_message", 0);
+
+ /* "aiohttp/_http_parser.pyx":258
+ * bint chunked):
+ * cdef RawResponseMessage ret
+ * ret = RawResponseMessage.__new__(RawResponseMessage) # <<<<<<<<<<<<<<
+ * ret.version = version
+ * ret.code = code
+ */
+ __pyx_t_1 = ((PyObject *)__pyx_tp_new_7aiohttp_12_http_parser_RawResponseMessage(((PyTypeObject *)__pyx_ptype_7aiohttp_12_http_parser_RawResponseMessage), __pyx_empty_tuple, NULL)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 258, __pyx_L1_error)
+ __Pyx_GOTREF(((PyObject *)__pyx_t_1));
+ __pyx_v_ret = ((struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":259
+ * cdef RawResponseMessage ret
+ * ret = RawResponseMessage.__new__(RawResponseMessage)
+ * ret.version = version # <<<<<<<<<<<<<<
+ * ret.code = code
+ * ret.reason = reason
+ */
+ __Pyx_INCREF(__pyx_v_version);
+ __Pyx_GIVEREF(__pyx_v_version);
+ __Pyx_GOTREF(__pyx_v_ret->version);
+ __Pyx_DECREF(__pyx_v_ret->version);
+ __pyx_v_ret->version = __pyx_v_version;
+
+ /* "aiohttp/_http_parser.pyx":260
+ * ret = RawResponseMessage.__new__(RawResponseMessage)
+ * ret.version = version
+ * ret.code = code # <<<<<<<<<<<<<<
+ * ret.reason = reason
+ * ret.headers = headers
+ */
+ __pyx_v_ret->code = __pyx_v_code;
+
+ /* "aiohttp/_http_parser.pyx":261
+ * ret.version = version
+ * ret.code = code
+ * ret.reason = reason # <<<<<<<<<<<<<<
+ * ret.headers = headers
+ * ret.raw_headers = raw_headers
+ */
+ __Pyx_INCREF(__pyx_v_reason);
+ __Pyx_GIVEREF(__pyx_v_reason);
+ __Pyx_GOTREF(__pyx_v_ret->reason);
+ __Pyx_DECREF(__pyx_v_ret->reason);
+ __pyx_v_ret->reason = __pyx_v_reason;
+
+ /* "aiohttp/_http_parser.pyx":262
+ * ret.code = code
+ * ret.reason = reason
+ * ret.headers = headers # <<<<<<<<<<<<<<
+ * ret.raw_headers = raw_headers
+ * ret.should_close = should_close
+ */
+ __Pyx_INCREF(__pyx_v_headers);
+ __Pyx_GIVEREF(__pyx_v_headers);
+ __Pyx_GOTREF(__pyx_v_ret->headers);
+ __Pyx_DECREF(__pyx_v_ret->headers);
+ __pyx_v_ret->headers = __pyx_v_headers;
+
+ /* "aiohttp/_http_parser.pyx":263
+ * ret.reason = reason
+ * ret.headers = headers
+ * ret.raw_headers = raw_headers # <<<<<<<<<<<<<<
+ * ret.should_close = should_close
+ * ret.compression = compression
+ */
+ __Pyx_INCREF(__pyx_v_raw_headers);
+ __Pyx_GIVEREF(__pyx_v_raw_headers);
+ __Pyx_GOTREF(__pyx_v_ret->raw_headers);
+ __Pyx_DECREF(__pyx_v_ret->raw_headers);
+ __pyx_v_ret->raw_headers = __pyx_v_raw_headers;
+
+ /* "aiohttp/_http_parser.pyx":264
+ * ret.headers = headers
+ * ret.raw_headers = raw_headers
+ * ret.should_close = should_close # <<<<<<<<<<<<<<
+ * ret.compression = compression
+ * ret.upgrade = upgrade
+ */
+ __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_v_should_close); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 264, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v_ret->should_close);
+ __Pyx_DECREF(__pyx_v_ret->should_close);
+ __pyx_v_ret->should_close = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":265
+ * ret.raw_headers = raw_headers
+ * ret.should_close = should_close
+ * ret.compression = compression # <<<<<<<<<<<<<<
+ * ret.upgrade = upgrade
+ * ret.chunked = chunked
+ */
+ __Pyx_INCREF(__pyx_v_compression);
+ __Pyx_GIVEREF(__pyx_v_compression);
+ __Pyx_GOTREF(__pyx_v_ret->compression);
+ __Pyx_DECREF(__pyx_v_ret->compression);
+ __pyx_v_ret->compression = __pyx_v_compression;
+
+ /* "aiohttp/_http_parser.pyx":266
+ * ret.should_close = should_close
+ * ret.compression = compression
+ * ret.upgrade = upgrade # <<<<<<<<<<<<<<
+ * ret.chunked = chunked
+ * return ret
+ */
+ __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_v_upgrade); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 266, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v_ret->upgrade);
+ __Pyx_DECREF(__pyx_v_ret->upgrade);
+ __pyx_v_ret->upgrade = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":267
+ * ret.compression = compression
+ * ret.upgrade = upgrade
+ * ret.chunked = chunked # <<<<<<<<<<<<<<
+ * return ret
+ *
+ */
+ __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_v_chunked); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 267, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v_ret->chunked);
+ __Pyx_DECREF(__pyx_v_ret->chunked);
+ __pyx_v_ret->chunked = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":268
+ * ret.upgrade = upgrade
+ * ret.chunked = chunked
+ * return ret # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((PyObject *)__pyx_v_ret));
+ __pyx_r = ((PyObject *)__pyx_v_ret);
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_parser.pyx":248
+ *
+ *
+ * cdef _new_response_message(object version, # <<<<<<<<<<<<<<
+ * int code,
+ * str reason,
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._http_parser._new_response_message", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XDECREF((PyObject *)__pyx_v_ret);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":312
+ * Py_buffer py_buf
+ *
+ * def __cinit__(self): # <<<<<<<<<<<<<<
+ * self._cparser = <cparser.http_parser*> \
+ * PyMem_Malloc(sizeof(cparser.http_parser))
+ */
+
+/* Python wrapper */
+static int __pyx_pw_7aiohttp_12_http_parser_10HttpParser_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_pw_7aiohttp_12_http_parser_10HttpParser_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
+ if (unlikely(PyTuple_GET_SIZE(__pyx_args) > 0)) {
+ __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 0, 0, PyTuple_GET_SIZE(__pyx_args)); return -1;}
+ if (unlikely(__pyx_kwds) && unlikely(PyDict_Size(__pyx_kwds) > 0) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__cinit__", 0))) return -1;
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_10HttpParser___cinit__(((struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_7aiohttp_12_http_parser_10HttpParser___cinit__(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__cinit__", 0);
+
+ /* "aiohttp/_http_parser.pyx":313
+ *
+ * def __cinit__(self):
+ * self._cparser = <cparser.http_parser*> \ # <<<<<<<<<<<<<<
+ * PyMem_Malloc(sizeof(cparser.http_parser))
+ * if self._cparser is NULL:
+ */
+ __pyx_v_self->_cparser = ((struct http_parser *)PyMem_Malloc((sizeof(struct http_parser))));
+
+ /* "aiohttp/_http_parser.pyx":315
+ * self._cparser = <cparser.http_parser*> \
+ * PyMem_Malloc(sizeof(cparser.http_parser))
+ * if self._cparser is NULL: # <<<<<<<<<<<<<<
+ * raise MemoryError()
+ *
+ */
+ __pyx_t_1 = ((__pyx_v_self->_cparser == NULL) != 0);
+ if (unlikely(__pyx_t_1)) {
+
+ /* "aiohttp/_http_parser.pyx":316
+ * PyMem_Malloc(sizeof(cparser.http_parser))
+ * if self._cparser is NULL:
+ * raise MemoryError() # <<<<<<<<<<<<<<
+ *
+ * self._csettings = <cparser.http_parser_settings*> \
+ */
+ PyErr_NoMemory(); __PYX_ERR(0, 316, __pyx_L1_error)
+
+ /* "aiohttp/_http_parser.pyx":315
+ * self._cparser = <cparser.http_parser*> \
+ * PyMem_Malloc(sizeof(cparser.http_parser))
+ * if self._cparser is NULL: # <<<<<<<<<<<<<<
+ * raise MemoryError()
+ *
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":318
+ * raise MemoryError()
+ *
+ * self._csettings = <cparser.http_parser_settings*> \ # <<<<<<<<<<<<<<
+ * PyMem_Malloc(sizeof(cparser.http_parser_settings))
+ * if self._csettings is NULL:
+ */
+ __pyx_v_self->_csettings = ((struct http_parser_settings *)PyMem_Malloc((sizeof(struct http_parser_settings))));
+
+ /* "aiohttp/_http_parser.pyx":320
+ * self._csettings = <cparser.http_parser_settings*> \
+ * PyMem_Malloc(sizeof(cparser.http_parser_settings))
+ * if self._csettings is NULL: # <<<<<<<<<<<<<<
+ * raise MemoryError()
+ *
+ */
+ __pyx_t_1 = ((__pyx_v_self->_csettings == NULL) != 0);
+ if (unlikely(__pyx_t_1)) {
+
+ /* "aiohttp/_http_parser.pyx":321
+ * PyMem_Malloc(sizeof(cparser.http_parser_settings))
+ * if self._csettings is NULL:
+ * raise MemoryError() # <<<<<<<<<<<<<<
+ *
+ * def __dealloc__(self):
+ */
+ PyErr_NoMemory(); __PYX_ERR(0, 321, __pyx_L1_error)
+
+ /* "aiohttp/_http_parser.pyx":320
+ * self._csettings = <cparser.http_parser_settings*> \
+ * PyMem_Malloc(sizeof(cparser.http_parser_settings))
+ * if self._csettings is NULL: # <<<<<<<<<<<<<<
+ * raise MemoryError()
+ *
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":312
+ * Py_buffer py_buf
+ *
+ * def __cinit__(self): # <<<<<<<<<<<<<<
+ * self._cparser = <cparser.http_parser*> \
+ * PyMem_Malloc(sizeof(cparser.http_parser))
+ */
+
+ /* function exit code */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpParser.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":323
+ * raise MemoryError()
+ *
+ * def __dealloc__(self): # <<<<<<<<<<<<<<
+ * PyMem_Free(self._cparser)
+ * PyMem_Free(self._csettings)
+ */
+
+/* Python wrapper */
+static void __pyx_pw_7aiohttp_12_http_parser_10HttpParser_3__dealloc__(PyObject *__pyx_v_self); /*proto*/
+static void __pyx_pw_7aiohttp_12_http_parser_10HttpParser_3__dealloc__(PyObject *__pyx_v_self) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
+ __pyx_pf_7aiohttp_12_http_parser_10HttpParser_2__dealloc__(((struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+}
+
+static void __pyx_pf_7aiohttp_12_http_parser_10HttpParser_2__dealloc__(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__dealloc__", 0);
+
+ /* "aiohttp/_http_parser.pyx":324
+ *
+ * def __dealloc__(self):
+ * PyMem_Free(self._cparser) # <<<<<<<<<<<<<<
+ * PyMem_Free(self._csettings)
+ *
+ */
+ PyMem_Free(__pyx_v_self->_cparser);
+
+ /* "aiohttp/_http_parser.pyx":325
+ * def __dealloc__(self):
+ * PyMem_Free(self._cparser)
+ * PyMem_Free(self._csettings) # <<<<<<<<<<<<<<
+ *
+ * cdef _init(self, cparser.http_parser_type mode,
+ */
+ PyMem_Free(__pyx_v_self->_csettings);
+
+ /* "aiohttp/_http_parser.pyx":323
+ * raise MemoryError()
+ *
+ * def __dealloc__(self): # <<<<<<<<<<<<<<
+ * PyMem_Free(self._cparser)
+ * PyMem_Free(self._csettings)
+ */
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+}
+
+/* "aiohttp/_http_parser.pyx":327
+ * PyMem_Free(self._csettings)
+ *
+ * cdef _init(self, cparser.http_parser_type mode, # <<<<<<<<<<<<<<
+ * object protocol, object loop, int limit,
+ * object timer=None,
+ */
+
+static PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser__init(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self, enum http_parser_type __pyx_v_mode, PyObject *__pyx_v_protocol, PyObject *__pyx_v_loop, int __pyx_v_limit, struct __pyx_opt_args_7aiohttp_12_http_parser_10HttpParser__init *__pyx_optional_args) {
+
+ /* "aiohttp/_http_parser.pyx":329
+ * cdef _init(self, cparser.http_parser_type mode,
+ * object protocol, object loop, int limit,
+ * object timer=None, # <<<<<<<<<<<<<<
+ * size_t max_line_size=8190, size_t max_headers=32768,
+ * size_t max_field_size=8190, payload_exception=None,
+ */
+ PyObject *__pyx_v_timer = ((PyObject *)Py_None);
+ size_t __pyx_v_max_line_size = ((size_t)0x1FFE);
+ size_t __pyx_v_max_headers = ((size_t)0x8000);
+ size_t __pyx_v_max_field_size = ((size_t)0x1FFE);
+
+ /* "aiohttp/_http_parser.pyx":331
+ * object timer=None,
+ * size_t max_line_size=8190, size_t max_headers=32768,
+ * size_t max_field_size=8190, payload_exception=None, # <<<<<<<<<<<<<<
+ * bint response_with_body=True, bint read_until_eof=False,
+ * bint auto_decompress=True):
+ */
+ PyObject *__pyx_v_payload_exception = ((PyObject *)Py_None);
+
+ /* "aiohttp/_http_parser.pyx":332
+ * size_t max_line_size=8190, size_t max_headers=32768,
+ * size_t max_field_size=8190, payload_exception=None,
+ * bint response_with_body=True, bint read_until_eof=False, # <<<<<<<<<<<<<<
+ * bint auto_decompress=True):
+ * cparser.http_parser_init(self._cparser, mode)
+ */
+ int __pyx_v_response_with_body = ((int)1);
+ int __pyx_v_read_until_eof = ((int)0);
+
+ /* "aiohttp/_http_parser.pyx":333
+ * size_t max_field_size=8190, payload_exception=None,
+ * bint response_with_body=True, bint read_until_eof=False,
+ * bint auto_decompress=True): # <<<<<<<<<<<<<<
+ * cparser.http_parser_init(self._cparser, mode)
+ * self._cparser.data = <void*>self
+ */
+ int __pyx_v_auto_decompress = ((int)1);
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_init", 0);
+ if (__pyx_optional_args) {
+ if (__pyx_optional_args->__pyx_n > 0) {
+ __pyx_v_timer = __pyx_optional_args->timer;
+ if (__pyx_optional_args->__pyx_n > 1) {
+ __pyx_v_max_line_size = __pyx_optional_args->max_line_size;
+ if (__pyx_optional_args->__pyx_n > 2) {
+ __pyx_v_max_headers = __pyx_optional_args->max_headers;
+ if (__pyx_optional_args->__pyx_n > 3) {
+ __pyx_v_max_field_size = __pyx_optional_args->max_field_size;
+ if (__pyx_optional_args->__pyx_n > 4) {
+ __pyx_v_payload_exception = __pyx_optional_args->payload_exception;
+ if (__pyx_optional_args->__pyx_n > 5) {
+ __pyx_v_response_with_body = __pyx_optional_args->response_with_body;
+ if (__pyx_optional_args->__pyx_n > 6) {
+ __pyx_v_read_until_eof = __pyx_optional_args->read_until_eof;
+ if (__pyx_optional_args->__pyx_n > 7) {
+ __pyx_v_auto_decompress = __pyx_optional_args->auto_decompress;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /* "aiohttp/_http_parser.pyx":334
+ * bint response_with_body=True, bint read_until_eof=False,
+ * bint auto_decompress=True):
+ * cparser.http_parser_init(self._cparser, mode) # <<<<<<<<<<<<<<
+ * self._cparser.data = <void*>self
+ * self._cparser.content_length = 0
+ */
+ http_parser_init(__pyx_v_self->_cparser, __pyx_v_mode);
+
+ /* "aiohttp/_http_parser.pyx":335
+ * bint auto_decompress=True):
+ * cparser.http_parser_init(self._cparser, mode)
+ * self._cparser.data = <void*>self # <<<<<<<<<<<<<<
+ * self._cparser.content_length = 0
+ *
+ */
+ __pyx_v_self->_cparser->data = ((void *)__pyx_v_self);
+
+ /* "aiohttp/_http_parser.pyx":336
+ * cparser.http_parser_init(self._cparser, mode)
+ * self._cparser.data = <void*>self
+ * self._cparser.content_length = 0 # <<<<<<<<<<<<<<
+ *
+ * cparser.http_parser_settings_init(self._csettings)
+ */
+ __pyx_v_self->_cparser->content_length = 0;
+
+ /* "aiohttp/_http_parser.pyx":338
+ * self._cparser.content_length = 0
+ *
+ * cparser.http_parser_settings_init(self._csettings) # <<<<<<<<<<<<<<
+ *
+ * self._protocol = protocol
+ */
+ http_parser_settings_init(__pyx_v_self->_csettings);
+
+ /* "aiohttp/_http_parser.pyx":340
+ * cparser.http_parser_settings_init(self._csettings)
+ *
+ * self._protocol = protocol # <<<<<<<<<<<<<<
+ * self._loop = loop
+ * self._timer = timer
+ */
+ __Pyx_INCREF(__pyx_v_protocol);
+ __Pyx_GIVEREF(__pyx_v_protocol);
+ __Pyx_GOTREF(__pyx_v_self->_protocol);
+ __Pyx_DECREF(__pyx_v_self->_protocol);
+ __pyx_v_self->_protocol = __pyx_v_protocol;
+
+ /* "aiohttp/_http_parser.pyx":341
+ *
+ * self._protocol = protocol
+ * self._loop = loop # <<<<<<<<<<<<<<
+ * self._timer = timer
+ *
+ */
+ __Pyx_INCREF(__pyx_v_loop);
+ __Pyx_GIVEREF(__pyx_v_loop);
+ __Pyx_GOTREF(__pyx_v_self->_loop);
+ __Pyx_DECREF(__pyx_v_self->_loop);
+ __pyx_v_self->_loop = __pyx_v_loop;
+
+ /* "aiohttp/_http_parser.pyx":342
+ * self._protocol = protocol
+ * self._loop = loop
+ * self._timer = timer # <<<<<<<<<<<<<<
+ *
+ * self._buf = bytearray()
+ */
+ __Pyx_INCREF(__pyx_v_timer);
+ __Pyx_GIVEREF(__pyx_v_timer);
+ __Pyx_GOTREF(__pyx_v_self->_timer);
+ __Pyx_DECREF(__pyx_v_self->_timer);
+ __pyx_v_self->_timer = __pyx_v_timer;
+
+ /* "aiohttp/_http_parser.pyx":344
+ * self._timer = timer
+ *
+ * self._buf = bytearray() # <<<<<<<<<<<<<<
+ * self._payload = None
+ * self._payload_error = 0
+ */
+ __pyx_t_1 = __Pyx_PyObject_CallNoArg(((PyObject *)(&PyByteArray_Type))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 344, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v_self->_buf);
+ __Pyx_DECREF(__pyx_v_self->_buf);
+ __pyx_v_self->_buf = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":345
+ *
+ * self._buf = bytearray()
+ * self._payload = None # <<<<<<<<<<<<<<
+ * self._payload_error = 0
+ * self._payload_exception = payload_exception
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_self->_payload);
+ __Pyx_DECREF(__pyx_v_self->_payload);
+ __pyx_v_self->_payload = Py_None;
+
+ /* "aiohttp/_http_parser.pyx":346
+ * self._buf = bytearray()
+ * self._payload = None
+ * self._payload_error = 0 # <<<<<<<<<<<<<<
+ * self._payload_exception = payload_exception
+ * self._messages = []
+ */
+ __pyx_v_self->_payload_error = 0;
+
+ /* "aiohttp/_http_parser.pyx":347
+ * self._payload = None
+ * self._payload_error = 0
+ * self._payload_exception = payload_exception # <<<<<<<<<<<<<<
+ * self._messages = []
+ *
+ */
+ __Pyx_INCREF(__pyx_v_payload_exception);
+ __Pyx_GIVEREF(__pyx_v_payload_exception);
+ __Pyx_GOTREF(__pyx_v_self->_payload_exception);
+ __Pyx_DECREF(__pyx_v_self->_payload_exception);
+ __pyx_v_self->_payload_exception = __pyx_v_payload_exception;
+
+ /* "aiohttp/_http_parser.pyx":348
+ * self._payload_error = 0
+ * self._payload_exception = payload_exception
+ * self._messages = [] # <<<<<<<<<<<<<<
+ *
+ * self._raw_name = bytearray()
+ */
+ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 348, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v_self->_messages);
+ __Pyx_DECREF(__pyx_v_self->_messages);
+ __pyx_v_self->_messages = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":350
+ * self._messages = []
+ *
+ * self._raw_name = bytearray() # <<<<<<<<<<<<<<
+ * self._raw_value = bytearray()
+ * self._has_value = False
+ */
+ __pyx_t_1 = __Pyx_PyObject_CallNoArg(((PyObject *)(&PyByteArray_Type))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 350, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v_self->_raw_name);
+ __Pyx_DECREF(__pyx_v_self->_raw_name);
+ __pyx_v_self->_raw_name = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":351
+ *
+ * self._raw_name = bytearray()
+ * self._raw_value = bytearray() # <<<<<<<<<<<<<<
+ * self._has_value = False
+ *
+ */
+ __pyx_t_1 = __Pyx_PyObject_CallNoArg(((PyObject *)(&PyByteArray_Type))); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 351, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v_self->_raw_value);
+ __Pyx_DECREF(__pyx_v_self->_raw_value);
+ __pyx_v_self->_raw_value = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":352
+ * self._raw_name = bytearray()
+ * self._raw_value = bytearray()
+ * self._has_value = False # <<<<<<<<<<<<<<
+ *
+ * self._max_line_size = max_line_size
+ */
+ __pyx_v_self->_has_value = 0;
+
+ /* "aiohttp/_http_parser.pyx":354
+ * self._has_value = False
+ *
+ * self._max_line_size = max_line_size # <<<<<<<<<<<<<<
+ * self._max_headers = max_headers
+ * self._max_field_size = max_field_size
+ */
+ __pyx_v_self->_max_line_size = __pyx_v_max_line_size;
+
+ /* "aiohttp/_http_parser.pyx":355
+ *
+ * self._max_line_size = max_line_size
+ * self._max_headers = max_headers # <<<<<<<<<<<<<<
+ * self._max_field_size = max_field_size
+ * self._response_with_body = response_with_body
+ */
+ __pyx_v_self->_max_headers = __pyx_v_max_headers;
+
+ /* "aiohttp/_http_parser.pyx":356
+ * self._max_line_size = max_line_size
+ * self._max_headers = max_headers
+ * self._max_field_size = max_field_size # <<<<<<<<<<<<<<
+ * self._response_with_body = response_with_body
+ * self._read_until_eof = read_until_eof
+ */
+ __pyx_v_self->_max_field_size = __pyx_v_max_field_size;
+
+ /* "aiohttp/_http_parser.pyx":357
+ * self._max_headers = max_headers
+ * self._max_field_size = max_field_size
+ * self._response_with_body = response_with_body # <<<<<<<<<<<<<<
+ * self._read_until_eof = read_until_eof
+ * self._upgraded = False
+ */
+ __pyx_v_self->_response_with_body = __pyx_v_response_with_body;
+
+ /* "aiohttp/_http_parser.pyx":358
+ * self._max_field_size = max_field_size
+ * self._response_with_body = response_with_body
+ * self._read_until_eof = read_until_eof # <<<<<<<<<<<<<<
+ * self._upgraded = False
+ * self._auto_decompress = auto_decompress
+ */
+ __pyx_v_self->_read_until_eof = __pyx_v_read_until_eof;
+
+ /* "aiohttp/_http_parser.pyx":359
+ * self._response_with_body = response_with_body
+ * self._read_until_eof = read_until_eof
+ * self._upgraded = False # <<<<<<<<<<<<<<
+ * self._auto_decompress = auto_decompress
+ * self._content_encoding = None
+ */
+ __pyx_v_self->_upgraded = 0;
+
+ /* "aiohttp/_http_parser.pyx":360
+ * self._read_until_eof = read_until_eof
+ * self._upgraded = False
+ * self._auto_decompress = auto_decompress # <<<<<<<<<<<<<<
+ * self._content_encoding = None
+ *
+ */
+ __pyx_v_self->_auto_decompress = __pyx_v_auto_decompress;
+
+ /* "aiohttp/_http_parser.pyx":361
+ * self._upgraded = False
+ * self._auto_decompress = auto_decompress
+ * self._content_encoding = None # <<<<<<<<<<<<<<
+ *
+ * self._csettings.on_url = cb_on_url
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_self->_content_encoding);
+ __Pyx_DECREF(__pyx_v_self->_content_encoding);
+ __pyx_v_self->_content_encoding = ((PyObject*)Py_None);
+
+ /* "aiohttp/_http_parser.pyx":363
+ * self._content_encoding = None
+ *
+ * self._csettings.on_url = cb_on_url # <<<<<<<<<<<<<<
+ * self._csettings.on_status = cb_on_status
+ * self._csettings.on_header_field = cb_on_header_field
+ */
+ __pyx_v_self->_csettings->on_url = __pyx_f_7aiohttp_12_http_parser_cb_on_url;
+
+ /* "aiohttp/_http_parser.pyx":364
+ *
+ * self._csettings.on_url = cb_on_url
+ * self._csettings.on_status = cb_on_status # <<<<<<<<<<<<<<
+ * self._csettings.on_header_field = cb_on_header_field
+ * self._csettings.on_header_value = cb_on_header_value
+ */
+ __pyx_v_self->_csettings->on_status = __pyx_f_7aiohttp_12_http_parser_cb_on_status;
+
+ /* "aiohttp/_http_parser.pyx":365
+ * self._csettings.on_url = cb_on_url
+ * self._csettings.on_status = cb_on_status
+ * self._csettings.on_header_field = cb_on_header_field # <<<<<<<<<<<<<<
+ * self._csettings.on_header_value = cb_on_header_value
+ * self._csettings.on_headers_complete = cb_on_headers_complete
+ */
+ __pyx_v_self->_csettings->on_header_field = __pyx_f_7aiohttp_12_http_parser_cb_on_header_field;
+
+ /* "aiohttp/_http_parser.pyx":366
+ * self._csettings.on_status = cb_on_status
+ * self._csettings.on_header_field = cb_on_header_field
+ * self._csettings.on_header_value = cb_on_header_value # <<<<<<<<<<<<<<
+ * self._csettings.on_headers_complete = cb_on_headers_complete
+ * self._csettings.on_body = cb_on_body
+ */
+ __pyx_v_self->_csettings->on_header_value = __pyx_f_7aiohttp_12_http_parser_cb_on_header_value;
+
+ /* "aiohttp/_http_parser.pyx":367
+ * self._csettings.on_header_field = cb_on_header_field
+ * self._csettings.on_header_value = cb_on_header_value
+ * self._csettings.on_headers_complete = cb_on_headers_complete # <<<<<<<<<<<<<<
+ * self._csettings.on_body = cb_on_body
+ * self._csettings.on_message_begin = cb_on_message_begin
+ */
+ __pyx_v_self->_csettings->on_headers_complete = __pyx_f_7aiohttp_12_http_parser_cb_on_headers_complete;
+
+ /* "aiohttp/_http_parser.pyx":368
+ * self._csettings.on_header_value = cb_on_header_value
+ * self._csettings.on_headers_complete = cb_on_headers_complete
+ * self._csettings.on_body = cb_on_body # <<<<<<<<<<<<<<
+ * self._csettings.on_message_begin = cb_on_message_begin
+ * self._csettings.on_message_complete = cb_on_message_complete
+ */
+ __pyx_v_self->_csettings->on_body = __pyx_f_7aiohttp_12_http_parser_cb_on_body;
+
+ /* "aiohttp/_http_parser.pyx":369
+ * self._csettings.on_headers_complete = cb_on_headers_complete
+ * self._csettings.on_body = cb_on_body
+ * self._csettings.on_message_begin = cb_on_message_begin # <<<<<<<<<<<<<<
+ * self._csettings.on_message_complete = cb_on_message_complete
+ * self._csettings.on_chunk_header = cb_on_chunk_header
+ */
+ __pyx_v_self->_csettings->on_message_begin = __pyx_f_7aiohttp_12_http_parser_cb_on_message_begin;
+
+ /* "aiohttp/_http_parser.pyx":370
+ * self._csettings.on_body = cb_on_body
+ * self._csettings.on_message_begin = cb_on_message_begin
+ * self._csettings.on_message_complete = cb_on_message_complete # <<<<<<<<<<<<<<
+ * self._csettings.on_chunk_header = cb_on_chunk_header
+ * self._csettings.on_chunk_complete = cb_on_chunk_complete
+ */
+ __pyx_v_self->_csettings->on_message_complete = __pyx_f_7aiohttp_12_http_parser_cb_on_message_complete;
+
+ /* "aiohttp/_http_parser.pyx":371
+ * self._csettings.on_message_begin = cb_on_message_begin
+ * self._csettings.on_message_complete = cb_on_message_complete
+ * self._csettings.on_chunk_header = cb_on_chunk_header # <<<<<<<<<<<<<<
+ * self._csettings.on_chunk_complete = cb_on_chunk_complete
+ *
+ */
+ __pyx_v_self->_csettings->on_chunk_header = __pyx_f_7aiohttp_12_http_parser_cb_on_chunk_header;
+
+ /* "aiohttp/_http_parser.pyx":372
+ * self._csettings.on_message_complete = cb_on_message_complete
+ * self._csettings.on_chunk_header = cb_on_chunk_header
+ * self._csettings.on_chunk_complete = cb_on_chunk_complete # <<<<<<<<<<<<<<
+ *
+ * self._last_error = None
+ */
+ __pyx_v_self->_csettings->on_chunk_complete = __pyx_f_7aiohttp_12_http_parser_cb_on_chunk_complete;
+
+ /* "aiohttp/_http_parser.pyx":374
+ * self._csettings.on_chunk_complete = cb_on_chunk_complete
+ *
+ * self._last_error = None # <<<<<<<<<<<<<<
+ * self._limit = limit
+ *
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_self->_last_error);
+ __Pyx_DECREF(__pyx_v_self->_last_error);
+ __pyx_v_self->_last_error = Py_None;
+
+ /* "aiohttp/_http_parser.pyx":375
+ *
+ * self._last_error = None
+ * self._limit = limit # <<<<<<<<<<<<<<
+ *
+ * cdef _process_header(self):
+ */
+ __pyx_v_self->_limit = __pyx_v_limit;
+
+ /* "aiohttp/_http_parser.pyx":327
+ * PyMem_Free(self._csettings)
+ *
+ * cdef _init(self, cparser.http_parser_type mode, # <<<<<<<<<<<<<<
+ * object protocol, object loop, int limit,
+ * object timer=None,
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpParser._init", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":377
+ * self._limit = limit
+ *
+ * cdef _process_header(self): # <<<<<<<<<<<<<<
+ * if self._raw_name:
+ * raw_name = bytes(self._raw_name)
+ */
+
+static PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser__process_header(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self) {
+ PyObject *__pyx_v_raw_name = NULL;
+ PyObject *__pyx_v_raw_value = NULL;
+ PyObject *__pyx_v_name = NULL;
+ PyObject *__pyx_v_value = NULL;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ int __pyx_t_7;
+ int __pyx_t_8;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_process_header", 0);
+
+ /* "aiohttp/_http_parser.pyx":378
+ *
+ * cdef _process_header(self):
+ * if self._raw_name: # <<<<<<<<<<<<<<
+ * raw_name = bytes(self._raw_name)
+ * raw_value = bytes(self._raw_value)
+ */
+ __pyx_t_1 = (__pyx_v_self->_raw_name != Py_None)&&(PyByteArray_GET_SIZE(__pyx_v_self->_raw_name) != 0);
+ if (__pyx_t_1) {
+
+ /* "aiohttp/_http_parser.pyx":379
+ * cdef _process_header(self):
+ * if self._raw_name:
+ * raw_name = bytes(self._raw_name) # <<<<<<<<<<<<<<
+ * raw_value = bytes(self._raw_value)
+ *
+ */
+ __pyx_t_2 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyBytes_Type)), __pyx_v_self->_raw_name); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 379, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_v_raw_name = ((PyObject*)__pyx_t_2);
+ __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_parser.pyx":380
+ * if self._raw_name:
+ * raw_name = bytes(self._raw_name)
+ * raw_value = bytes(self._raw_value) # <<<<<<<<<<<<<<
+ *
+ * name = find_header(raw_name)
+ */
+ __pyx_t_2 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyBytes_Type)), __pyx_v_self->_raw_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 380, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_v_raw_value = ((PyObject*)__pyx_t_2);
+ __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_parser.pyx":382
+ * raw_value = bytes(self._raw_value)
+ *
+ * name = find_header(raw_name) # <<<<<<<<<<<<<<
+ * value = raw_value.decode('utf-8', 'surrogateescape')
+ *
+ */
+ __pyx_t_2 = __pyx_f_7aiohttp_12_http_parser_find_header(__pyx_v_raw_name); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 382, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_v_name = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_parser.pyx":383
+ *
+ * name = find_header(raw_name)
+ * value = raw_value.decode('utf-8', 'surrogateescape') # <<<<<<<<<<<<<<
+ *
+ * self._headers.add(name, value)
+ */
+ __pyx_t_2 = __Pyx_decode_bytes(__pyx_v_raw_value, 0, PY_SSIZE_T_MAX, NULL, ((char const *)"surrogateescape"), PyUnicode_DecodeUTF8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 383, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_v_value = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_parser.pyx":385
+ * value = raw_value.decode('utf-8', 'surrogateescape')
+ *
+ * self._headers.add(name, value) # <<<<<<<<<<<<<<
+ *
+ * if name is CONTENT_ENCODING:
+ */
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_headers, __pyx_n_s_add); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 385, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = NULL;
+ __pyx_t_5 = 0;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) {
+ __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
+ if (likely(__pyx_t_4)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_3, function);
+ __pyx_t_5 = 1;
+ }
+ }
+ #if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(__pyx_t_3)) {
+ PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_name, __pyx_v_value};
+ __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 385, __pyx_L1_error)
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_GOTREF(__pyx_t_2);
+ } else
+ #endif
+ #if CYTHON_FAST_PYCCALL
+ if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) {
+ PyObject *__pyx_temp[3] = {__pyx_t_4, __pyx_v_name, __pyx_v_value};
+ __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-__pyx_t_5, 2+__pyx_t_5); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 385, __pyx_L1_error)
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_GOTREF(__pyx_t_2);
+ } else
+ #endif
+ {
+ __pyx_t_6 = PyTuple_New(2+__pyx_t_5); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 385, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ if (__pyx_t_4) {
+ __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_4); __pyx_t_4 = NULL;
+ }
+ __Pyx_INCREF(__pyx_v_name);
+ __Pyx_GIVEREF(__pyx_v_name);
+ PyTuple_SET_ITEM(__pyx_t_6, 0+__pyx_t_5, __pyx_v_name);
+ __Pyx_INCREF(__pyx_v_value);
+ __Pyx_GIVEREF(__pyx_v_value);
+ PyTuple_SET_ITEM(__pyx_t_6, 1+__pyx_t_5, __pyx_v_value);
+ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 385, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ }
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_parser.pyx":387
+ * self._headers.add(name, value)
+ *
+ * if name is CONTENT_ENCODING: # <<<<<<<<<<<<<<
+ * self._content_encoding = value
+ *
+ */
+ __pyx_t_1 = (__pyx_v_name == __pyx_v_7aiohttp_12_http_parser_CONTENT_ENCODING);
+ __pyx_t_7 = (__pyx_t_1 != 0);
+ if (__pyx_t_7) {
+
+ /* "aiohttp/_http_parser.pyx":388
+ *
+ * if name is CONTENT_ENCODING:
+ * self._content_encoding = value # <<<<<<<<<<<<<<
+ *
+ * PyByteArray_Resize(self._raw_name, 0)
+ */
+ if (!(likely(PyUnicode_CheckExact(__pyx_v_value))||((__pyx_v_value) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "unicode", Py_TYPE(__pyx_v_value)->tp_name), 0))) __PYX_ERR(0, 388, __pyx_L1_error)
+ __pyx_t_2 = __pyx_v_value;
+ __Pyx_INCREF(__pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __Pyx_GOTREF(__pyx_v_self->_content_encoding);
+ __Pyx_DECREF(__pyx_v_self->_content_encoding);
+ __pyx_v_self->_content_encoding = ((PyObject*)__pyx_t_2);
+ __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_parser.pyx":387
+ * self._headers.add(name, value)
+ *
+ * if name is CONTENT_ENCODING: # <<<<<<<<<<<<<<
+ * self._content_encoding = value
+ *
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":390
+ * self._content_encoding = value
+ *
+ * PyByteArray_Resize(self._raw_name, 0) # <<<<<<<<<<<<<<
+ * PyByteArray_Resize(self._raw_value, 0)
+ * self._has_value = False
+ */
+ __pyx_t_2 = __pyx_v_self->_raw_name;
+ __Pyx_INCREF(__pyx_t_2);
+ __pyx_t_5 = PyByteArray_Resize(__pyx_t_2, 0); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 390, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_parser.pyx":391
+ *
+ * PyByteArray_Resize(self._raw_name, 0)
+ * PyByteArray_Resize(self._raw_value, 0) # <<<<<<<<<<<<<<
+ * self._has_value = False
+ * self._raw_headers.append((raw_name, raw_value))
+ */
+ __pyx_t_2 = __pyx_v_self->_raw_value;
+ __Pyx_INCREF(__pyx_t_2);
+ __pyx_t_5 = PyByteArray_Resize(__pyx_t_2, 0); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(0, 391, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_parser.pyx":392
+ * PyByteArray_Resize(self._raw_name, 0)
+ * PyByteArray_Resize(self._raw_value, 0)
+ * self._has_value = False # <<<<<<<<<<<<<<
+ * self._raw_headers.append((raw_name, raw_value))
+ *
+ */
+ __pyx_v_self->_has_value = 0;
+
+ /* "aiohttp/_http_parser.pyx":393
+ * PyByteArray_Resize(self._raw_value, 0)
+ * self._has_value = False
+ * self._raw_headers.append((raw_name, raw_value)) # <<<<<<<<<<<<<<
+ *
+ * cdef _on_header_field(self, char* at, size_t length):
+ */
+ if (unlikely(__pyx_v_self->_raw_headers == Py_None)) {
+ PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "append");
+ __PYX_ERR(0, 393, __pyx_L1_error)
+ }
+ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 393, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_v_raw_name);
+ __Pyx_GIVEREF(__pyx_v_raw_name);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_raw_name);
+ __Pyx_INCREF(__pyx_v_raw_value);
+ __Pyx_GIVEREF(__pyx_v_raw_value);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_v_raw_value);
+ __pyx_t_8 = __Pyx_PyList_Append(__pyx_v_self->_raw_headers, __pyx_t_2); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 393, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_parser.pyx":378
+ *
+ * cdef _process_header(self):
+ * if self._raw_name: # <<<<<<<<<<<<<<
+ * raw_name = bytes(self._raw_name)
+ * raw_value = bytes(self._raw_value)
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":377
+ * self._limit = limit
+ *
+ * cdef _process_header(self): # <<<<<<<<<<<<<<
+ * if self._raw_name:
+ * raw_name = bytes(self._raw_name)
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpParser._process_header", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_raw_name);
+ __Pyx_XDECREF(__pyx_v_raw_value);
+ __Pyx_XDECREF(__pyx_v_name);
+ __Pyx_XDECREF(__pyx_v_value);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":395
+ * self._raw_headers.append((raw_name, raw_value))
+ *
+ * cdef _on_header_field(self, char* at, size_t length): # <<<<<<<<<<<<<<
+ * cdef Py_ssize_t size
+ * cdef char *buf
+ */
+
+static PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_header_field(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self, char *__pyx_v_at, size_t __pyx_v_length) {
+ Py_ssize_t __pyx_v_size;
+ char *__pyx_v_buf;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ Py_ssize_t __pyx_t_3;
+ int __pyx_t_4;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_on_header_field", 0);
+
+ /* "aiohttp/_http_parser.pyx":398
+ * cdef Py_ssize_t size
+ * cdef char *buf
+ * if self._has_value: # <<<<<<<<<<<<<<
+ * self._process_header()
+ *
+ */
+ __pyx_t_1 = (__pyx_v_self->_has_value != 0);
+ if (__pyx_t_1) {
+
+ /* "aiohttp/_http_parser.pyx":399
+ * cdef char *buf
+ * if self._has_value:
+ * self._process_header() # <<<<<<<<<<<<<<
+ *
+ * size = PyByteArray_Size(self._raw_name)
+ */
+ __pyx_t_2 = ((struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpParser *)__pyx_v_self->__pyx_vtab)->_process_header(__pyx_v_self); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 399, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_parser.pyx":398
+ * cdef Py_ssize_t size
+ * cdef char *buf
+ * if self._has_value: # <<<<<<<<<<<<<<
+ * self._process_header()
+ *
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":401
+ * self._process_header()
+ *
+ * size = PyByteArray_Size(self._raw_name) # <<<<<<<<<<<<<<
+ * PyByteArray_Resize(self._raw_name, size + length)
+ * buf = PyByteArray_AsString(self._raw_name)
+ */
+ __pyx_t_2 = __pyx_v_self->_raw_name;
+ __Pyx_INCREF(__pyx_t_2);
+ __pyx_t_3 = PyByteArray_Size(__pyx_t_2); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1L))) __PYX_ERR(0, 401, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v_size = __pyx_t_3;
+
+ /* "aiohttp/_http_parser.pyx":402
+ *
+ * size = PyByteArray_Size(self._raw_name)
+ * PyByteArray_Resize(self._raw_name, size + length) # <<<<<<<<<<<<<<
+ * buf = PyByteArray_AsString(self._raw_name)
+ * memcpy(buf + size, at, length)
+ */
+ __pyx_t_2 = __pyx_v_self->_raw_name;
+ __Pyx_INCREF(__pyx_t_2);
+ __pyx_t_4 = PyByteArray_Resize(__pyx_t_2, (__pyx_v_size + __pyx_v_length)); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 402, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_parser.pyx":403
+ * size = PyByteArray_Size(self._raw_name)
+ * PyByteArray_Resize(self._raw_name, size + length)
+ * buf = PyByteArray_AsString(self._raw_name) # <<<<<<<<<<<<<<
+ * memcpy(buf + size, at, length)
+ *
+ */
+ __pyx_t_2 = __pyx_v_self->_raw_name;
+ __Pyx_INCREF(__pyx_t_2);
+ __pyx_v_buf = PyByteArray_AsString(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_parser.pyx":404
+ * PyByteArray_Resize(self._raw_name, size + length)
+ * buf = PyByteArray_AsString(self._raw_name)
+ * memcpy(buf + size, at, length) # <<<<<<<<<<<<<<
+ *
+ * cdef _on_header_value(self, char* at, size_t length):
+ */
+ (void)(memcpy((__pyx_v_buf + __pyx_v_size), __pyx_v_at, __pyx_v_length));
+
+ /* "aiohttp/_http_parser.pyx":395
+ * self._raw_headers.append((raw_name, raw_value))
+ *
+ * cdef _on_header_field(self, char* at, size_t length): # <<<<<<<<<<<<<<
+ * cdef Py_ssize_t size
+ * cdef char *buf
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpParser._on_header_field", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":406
+ * memcpy(buf + size, at, length)
+ *
+ * cdef _on_header_value(self, char* at, size_t length): # <<<<<<<<<<<<<<
+ * cdef Py_ssize_t size
+ * cdef char *buf
+ */
+
+static PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_header_value(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self, char *__pyx_v_at, size_t __pyx_v_length) {
+ Py_ssize_t __pyx_v_size;
+ char *__pyx_v_buf;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ Py_ssize_t __pyx_t_2;
+ int __pyx_t_3;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_on_header_value", 0);
+
+ /* "aiohttp/_http_parser.pyx":410
+ * cdef char *buf
+ *
+ * size = PyByteArray_Size(self._raw_value) # <<<<<<<<<<<<<<
+ * PyByteArray_Resize(self._raw_value, size + length)
+ * buf = PyByteArray_AsString(self._raw_value)
+ */
+ __pyx_t_1 = __pyx_v_self->_raw_value;
+ __Pyx_INCREF(__pyx_t_1);
+ __pyx_t_2 = PyByteArray_Size(__pyx_t_1); if (unlikely(__pyx_t_2 == ((Py_ssize_t)-1L))) __PYX_ERR(0, 410, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v_size = __pyx_t_2;
+
+ /* "aiohttp/_http_parser.pyx":411
+ *
+ * size = PyByteArray_Size(self._raw_value)
+ * PyByteArray_Resize(self._raw_value, size + length) # <<<<<<<<<<<<<<
+ * buf = PyByteArray_AsString(self._raw_value)
+ * memcpy(buf + size, at, length)
+ */
+ __pyx_t_1 = __pyx_v_self->_raw_value;
+ __Pyx_INCREF(__pyx_t_1);
+ __pyx_t_3 = PyByteArray_Resize(__pyx_t_1, (__pyx_v_size + __pyx_v_length)); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 411, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":412
+ * size = PyByteArray_Size(self._raw_value)
+ * PyByteArray_Resize(self._raw_value, size + length)
+ * buf = PyByteArray_AsString(self._raw_value) # <<<<<<<<<<<<<<
+ * memcpy(buf + size, at, length)
+ * self._has_value = True
+ */
+ __pyx_t_1 = __pyx_v_self->_raw_value;
+ __Pyx_INCREF(__pyx_t_1);
+ __pyx_v_buf = PyByteArray_AsString(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":413
+ * PyByteArray_Resize(self._raw_value, size + length)
+ * buf = PyByteArray_AsString(self._raw_value)
+ * memcpy(buf + size, at, length) # <<<<<<<<<<<<<<
+ * self._has_value = True
+ *
+ */
+ (void)(memcpy((__pyx_v_buf + __pyx_v_size), __pyx_v_at, __pyx_v_length));
+
+ /* "aiohttp/_http_parser.pyx":414
+ * buf = PyByteArray_AsString(self._raw_value)
+ * memcpy(buf + size, at, length)
+ * self._has_value = True # <<<<<<<<<<<<<<
+ *
+ * cdef _on_headers_complete(self):
+ */
+ __pyx_v_self->_has_value = 1;
+
+ /* "aiohttp/_http_parser.pyx":406
+ * memcpy(buf + size, at, length)
+ *
+ * cdef _on_header_value(self, char* at, size_t length): # <<<<<<<<<<<<<<
+ * cdef Py_ssize_t size
+ * cdef char *buf
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpParser._on_header_value", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":416
+ * self._has_value = True
+ *
+ * cdef _on_headers_complete(self): # <<<<<<<<<<<<<<
+ * self._process_header()
+ *
+ */
+
+static PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_headers_complete(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self) {
+ PyObject *__pyx_v_method = NULL;
+ int __pyx_v_should_close;
+ unsigned int __pyx_v_upgrade;
+ unsigned int __pyx_v_chunked;
+ PyObject *__pyx_v_raw_headers = NULL;
+ PyObject *__pyx_v_headers = NULL;
+ PyObject *__pyx_v_encoding = NULL;
+ PyObject *__pyx_v_enc = NULL;
+ PyObject *__pyx_v_msg = NULL;
+ PyObject *__pyx_v_payload = NULL;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ unsigned int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_t_5;
+ int __pyx_t_6;
+ PyObject *__pyx_t_7 = NULL;
+ int __pyx_t_8;
+ int __pyx_t_9;
+ int __pyx_t_10;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_on_headers_complete", 0);
+
+ /* "aiohttp/_http_parser.pyx":417
+ *
+ * cdef _on_headers_complete(self):
+ * self._process_header() # <<<<<<<<<<<<<<
+ *
+ * method = http_method_str(self._cparser.method)
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpParser *)__pyx_v_self->__pyx_vtab)->_process_header(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 417, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":419
+ * self._process_header()
+ *
+ * method = http_method_str(self._cparser.method) # <<<<<<<<<<<<<<
+ * should_close = not cparser.http_should_keep_alive(self._cparser)
+ * upgrade = self._cparser.upgrade
+ */
+ __pyx_t_1 = __pyx_f_7aiohttp_12_http_parser_http_method_str(__pyx_v_self->_cparser->method); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 419, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_v_method = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":420
+ *
+ * method = http_method_str(self._cparser.method)
+ * should_close = not cparser.http_should_keep_alive(self._cparser) # <<<<<<<<<<<<<<
+ * upgrade = self._cparser.upgrade
+ * chunked = self._cparser.flags & cparser.F_CHUNKED
+ */
+ __pyx_v_should_close = (!(http_should_keep_alive(__pyx_v_self->_cparser) != 0));
+
+ /* "aiohttp/_http_parser.pyx":421
+ * method = http_method_str(self._cparser.method)
+ * should_close = not cparser.http_should_keep_alive(self._cparser)
+ * upgrade = self._cparser.upgrade # <<<<<<<<<<<<<<
+ * chunked = self._cparser.flags & cparser.F_CHUNKED
+ *
+ */
+ __pyx_t_2 = __pyx_v_self->_cparser->upgrade;
+ __pyx_v_upgrade = __pyx_t_2;
+
+ /* "aiohttp/_http_parser.pyx":422
+ * should_close = not cparser.http_should_keep_alive(self._cparser)
+ * upgrade = self._cparser.upgrade
+ * chunked = self._cparser.flags & cparser.F_CHUNKED # <<<<<<<<<<<<<<
+ *
+ * raw_headers = tuple(self._raw_headers)
+ */
+ __pyx_v_chunked = (__pyx_v_self->_cparser->flags & F_CHUNKED);
+
+ /* "aiohttp/_http_parser.pyx":424
+ * chunked = self._cparser.flags & cparser.F_CHUNKED
+ *
+ * raw_headers = tuple(self._raw_headers) # <<<<<<<<<<<<<<
+ * headers = CIMultiDictProxy(self._headers)
+ *
+ */
+ if (unlikely(__pyx_v_self->_raw_headers == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
+ __PYX_ERR(0, 424, __pyx_L1_error)
+ }
+ __pyx_t_1 = PyList_AsTuple(__pyx_v_self->_raw_headers); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 424, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_v_raw_headers = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":425
+ *
+ * raw_headers = tuple(self._raw_headers)
+ * headers = CIMultiDictProxy(self._headers) # <<<<<<<<<<<<<<
+ *
+ * if upgrade or self._cparser.method == 5: # cparser.CONNECT:
+ */
+ __Pyx_INCREF(__pyx_v_7aiohttp_12_http_parser_CIMultiDictProxy);
+ __pyx_t_3 = __pyx_v_7aiohttp_12_http_parser_CIMultiDictProxy; __pyx_t_4 = NULL;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
+ __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
+ if (likely(__pyx_t_4)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_3, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_v_self->_headers) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_self->_headers);
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 425, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_v_headers = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":427
+ * headers = CIMultiDictProxy(self._headers)
+ *
+ * if upgrade or self._cparser.method == 5: # cparser.CONNECT: # <<<<<<<<<<<<<<
+ * self._upgraded = True
+ *
+ */
+ __pyx_t_6 = (__pyx_v_upgrade != 0);
+ if (!__pyx_t_6) {
+ } else {
+ __pyx_t_5 = __pyx_t_6;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_6 = ((__pyx_v_self->_cparser->method == 5) != 0);
+ __pyx_t_5 = __pyx_t_6;
+ __pyx_L4_bool_binop_done:;
+ if (__pyx_t_5) {
+
+ /* "aiohttp/_http_parser.pyx":428
+ *
+ * if upgrade or self._cparser.method == 5: # cparser.CONNECT:
+ * self._upgraded = True # <<<<<<<<<<<<<<
+ *
+ * # do not support old websocket spec
+ */
+ __pyx_v_self->_upgraded = 1;
+
+ /* "aiohttp/_http_parser.pyx":427
+ * headers = CIMultiDictProxy(self._headers)
+ *
+ * if upgrade or self._cparser.method == 5: # cparser.CONNECT: # <<<<<<<<<<<<<<
+ * self._upgraded = True
+ *
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":431
+ *
+ * # do not support old websocket spec
+ * if SEC_WEBSOCKET_KEY1 in headers: # <<<<<<<<<<<<<<
+ * raise InvalidHeader(SEC_WEBSOCKET_KEY1)
+ *
+ */
+ __pyx_t_5 = (__Pyx_PySequence_ContainsTF(__pyx_v_7aiohttp_12_http_parser_SEC_WEBSOCKET_KEY1, __pyx_v_headers, Py_EQ)); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 431, __pyx_L1_error)
+ __pyx_t_6 = (__pyx_t_5 != 0);
+ if (unlikely(__pyx_t_6)) {
+
+ /* "aiohttp/_http_parser.pyx":432
+ * # do not support old websocket spec
+ * if SEC_WEBSOCKET_KEY1 in headers:
+ * raise InvalidHeader(SEC_WEBSOCKET_KEY1) # <<<<<<<<<<<<<<
+ *
+ * encoding = None
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_InvalidHeader); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 432, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = NULL;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
+ __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
+ if (likely(__pyx_t_4)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_3, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_v_7aiohttp_12_http_parser_SEC_WEBSOCKET_KEY1) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_7aiohttp_12_http_parser_SEC_WEBSOCKET_KEY1);
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 432, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_1, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __PYX_ERR(0, 432, __pyx_L1_error)
+
+ /* "aiohttp/_http_parser.pyx":431
+ *
+ * # do not support old websocket spec
+ * if SEC_WEBSOCKET_KEY1 in headers: # <<<<<<<<<<<<<<
+ * raise InvalidHeader(SEC_WEBSOCKET_KEY1)
+ *
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":434
+ * raise InvalidHeader(SEC_WEBSOCKET_KEY1)
+ *
+ * encoding = None # <<<<<<<<<<<<<<
+ * enc = self._content_encoding
+ * if enc is not None:
+ */
+ __Pyx_INCREF(Py_None);
+ __pyx_v_encoding = Py_None;
+
+ /* "aiohttp/_http_parser.pyx":435
+ *
+ * encoding = None
+ * enc = self._content_encoding # <<<<<<<<<<<<<<
+ * if enc is not None:
+ * self._content_encoding = None
+ */
+ __pyx_t_1 = __pyx_v_self->_content_encoding;
+ __Pyx_INCREF(__pyx_t_1);
+ __pyx_v_enc = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":436
+ * encoding = None
+ * enc = self._content_encoding
+ * if enc is not None: # <<<<<<<<<<<<<<
+ * self._content_encoding = None
+ * enc = enc.lower()
+ */
+ __pyx_t_6 = (__pyx_v_enc != Py_None);
+ __pyx_t_5 = (__pyx_t_6 != 0);
+ if (__pyx_t_5) {
+
+ /* "aiohttp/_http_parser.pyx":437
+ * enc = self._content_encoding
+ * if enc is not None:
+ * self._content_encoding = None # <<<<<<<<<<<<<<
+ * enc = enc.lower()
+ * if enc in ('gzip', 'deflate', 'br'):
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_self->_content_encoding);
+ __Pyx_DECREF(__pyx_v_self->_content_encoding);
+ __pyx_v_self->_content_encoding = ((PyObject*)Py_None);
+
+ /* "aiohttp/_http_parser.pyx":438
+ * if enc is not None:
+ * self._content_encoding = None
+ * enc = enc.lower() # <<<<<<<<<<<<<<
+ * if enc in ('gzip', 'deflate', 'br'):
+ * encoding = enc
+ */
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_enc, __pyx_n_s_lower); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 438, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) {
+ __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
+ if (likely(__pyx_t_4)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_3, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4) : __Pyx_PyObject_CallNoArg(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 438, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF_SET(__pyx_v_enc, __pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":439
+ * self._content_encoding = None
+ * enc = enc.lower()
+ * if enc in ('gzip', 'deflate', 'br'): # <<<<<<<<<<<<<<
+ * encoding = enc
+ *
+ */
+ __Pyx_INCREF(__pyx_v_enc);
+ __pyx_t_1 = __pyx_v_enc;
+ __pyx_t_6 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_n_u_gzip, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 439, __pyx_L1_error)
+ if (!__pyx_t_6) {
+ } else {
+ __pyx_t_5 = __pyx_t_6;
+ goto __pyx_L9_bool_binop_done;
+ }
+ __pyx_t_6 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_n_u_deflate, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 439, __pyx_L1_error)
+ if (!__pyx_t_6) {
+ } else {
+ __pyx_t_5 = __pyx_t_6;
+ goto __pyx_L9_bool_binop_done;
+ }
+ __pyx_t_6 = (__Pyx_PyUnicode_Equals(__pyx_t_1, __pyx_n_u_br, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 439, __pyx_L1_error)
+ __pyx_t_5 = __pyx_t_6;
+ __pyx_L9_bool_binop_done:;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_6 = (__pyx_t_5 != 0);
+ if (__pyx_t_6) {
+
+ /* "aiohttp/_http_parser.pyx":440
+ * enc = enc.lower()
+ * if enc in ('gzip', 'deflate', 'br'):
+ * encoding = enc # <<<<<<<<<<<<<<
+ *
+ * if self._cparser.type == cparser.HTTP_REQUEST:
+ */
+ __Pyx_INCREF(__pyx_v_enc);
+ __Pyx_DECREF_SET(__pyx_v_encoding, __pyx_v_enc);
+
+ /* "aiohttp/_http_parser.pyx":439
+ * self._content_encoding = None
+ * enc = enc.lower()
+ * if enc in ('gzip', 'deflate', 'br'): # <<<<<<<<<<<<<<
+ * encoding = enc
+ *
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":436
+ * encoding = None
+ * enc = self._content_encoding
+ * if enc is not None: # <<<<<<<<<<<<<<
+ * self._content_encoding = None
+ * enc = enc.lower()
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":442
+ * encoding = enc
+ *
+ * if self._cparser.type == cparser.HTTP_REQUEST: # <<<<<<<<<<<<<<
+ * msg = _new_request_message(
+ * method, self._path,
+ */
+ __pyx_t_6 = ((__pyx_v_self->_cparser->type == HTTP_REQUEST) != 0);
+ if (__pyx_t_6) {
+
+ /* "aiohttp/_http_parser.pyx":444
+ * if self._cparser.type == cparser.HTTP_REQUEST:
+ * msg = _new_request_message(
+ * method, self._path, # <<<<<<<<<<<<<<
+ * self.http_version(), headers, raw_headers,
+ * should_close, encoding, upgrade, chunked, self._url)
+ */
+ __pyx_t_1 = __pyx_v_self->_path;
+ __Pyx_INCREF(__pyx_t_1);
+
+ /* "aiohttp/_http_parser.pyx":445
+ * msg = _new_request_message(
+ * method, self._path,
+ * self.http_version(), headers, raw_headers, # <<<<<<<<<<<<<<
+ * should_close, encoding, upgrade, chunked, self._url)
+ * else:
+ */
+ __pyx_t_3 = __pyx_f_7aiohttp_12_http_parser_10HttpParser_http_version(__pyx_v_self); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 445, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+
+ /* "aiohttp/_http_parser.pyx":446
+ * method, self._path,
+ * self.http_version(), headers, raw_headers,
+ * should_close, encoding, upgrade, chunked, self._url) # <<<<<<<<<<<<<<
+ * else:
+ * msg = _new_response_message(
+ */
+ __pyx_t_4 = __pyx_v_self->_url;
+ __Pyx_INCREF(__pyx_t_4);
+
+ /* "aiohttp/_http_parser.pyx":443
+ *
+ * if self._cparser.type == cparser.HTTP_REQUEST:
+ * msg = _new_request_message( # <<<<<<<<<<<<<<
+ * method, self._path,
+ * self.http_version(), headers, raw_headers,
+ */
+ __pyx_t_7 = __pyx_f_7aiohttp_12_http_parser__new_request_message(__pyx_v_method, ((PyObject*)__pyx_t_1), __pyx_t_3, __pyx_v_headers, __pyx_v_raw_headers, __pyx_v_should_close, __pyx_v_encoding, __pyx_v_upgrade, __pyx_v_chunked, __pyx_t_4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 443, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_v_msg = __pyx_t_7;
+ __pyx_t_7 = 0;
+
+ /* "aiohttp/_http_parser.pyx":442
+ * encoding = enc
+ *
+ * if self._cparser.type == cparser.HTTP_REQUEST: # <<<<<<<<<<<<<<
+ * msg = _new_request_message(
+ * method, self._path,
+ */
+ goto __pyx_L12;
+ }
+
+ /* "aiohttp/_http_parser.pyx":448
+ * should_close, encoding, upgrade, chunked, self._url)
+ * else:
+ * msg = _new_response_message( # <<<<<<<<<<<<<<
+ * self.http_version(), self._cparser.status_code, self._reason,
+ * headers, raw_headers, should_close, encoding,
+ */
+ /*else*/ {
+
+ /* "aiohttp/_http_parser.pyx":449
+ * else:
+ * msg = _new_response_message(
+ * self.http_version(), self._cparser.status_code, self._reason, # <<<<<<<<<<<<<<
+ * headers, raw_headers, should_close, encoding,
+ * upgrade, chunked)
+ */
+ __pyx_t_7 = __pyx_f_7aiohttp_12_http_parser_10HttpParser_http_version(__pyx_v_self); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 449, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_4 = __pyx_v_self->_reason;
+ __Pyx_INCREF(__pyx_t_4);
+
+ /* "aiohttp/_http_parser.pyx":448
+ * should_close, encoding, upgrade, chunked, self._url)
+ * else:
+ * msg = _new_response_message( # <<<<<<<<<<<<<<
+ * self.http_version(), self._cparser.status_code, self._reason,
+ * headers, raw_headers, should_close, encoding,
+ */
+ __pyx_t_3 = __pyx_f_7aiohttp_12_http_parser__new_response_message(__pyx_t_7, __pyx_v_self->_cparser->status_code, ((PyObject*)__pyx_t_4), __pyx_v_headers, __pyx_v_raw_headers, __pyx_v_should_close, __pyx_v_encoding, __pyx_v_upgrade, __pyx_v_chunked); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 448, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_v_msg = __pyx_t_3;
+ __pyx_t_3 = 0;
+ }
+ __pyx_L12:;
+
+ /* "aiohttp/_http_parser.pyx":453
+ * upgrade, chunked)
+ *
+ * if (ULLONG_MAX > self._cparser.content_length > 0 or chunked or # <<<<<<<<<<<<<<
+ * self._cparser.method == 5 or # CONNECT: 5
+ * (self._cparser.status_code >= 199 and
+ */
+ __pyx_t_5 = (ULLONG_MAX > __pyx_v_self->_cparser->content_length);
+ if (__pyx_t_5) {
+ __pyx_t_5 = (__pyx_v_self->_cparser->content_length > 0);
+ }
+ __pyx_t_8 = (__pyx_t_5 != 0);
+ if (!__pyx_t_8) {
+ } else {
+ __pyx_t_6 = __pyx_t_8;
+ goto __pyx_L14_bool_binop_done;
+ }
+ __pyx_t_8 = (__pyx_v_chunked != 0);
+ if (!__pyx_t_8) {
+ } else {
+ __pyx_t_6 = __pyx_t_8;
+ goto __pyx_L14_bool_binop_done;
+ }
+
+ /* "aiohttp/_http_parser.pyx":454
+ *
+ * if (ULLONG_MAX > self._cparser.content_length > 0 or chunked or
+ * self._cparser.method == 5 or # CONNECT: 5 # <<<<<<<<<<<<<<
+ * (self._cparser.status_code >= 199 and
+ * self._cparser.content_length == ULLONG_MAX and
+ */
+ __pyx_t_8 = ((__pyx_v_self->_cparser->method == 5) != 0);
+ if (!__pyx_t_8) {
+ } else {
+ __pyx_t_6 = __pyx_t_8;
+ goto __pyx_L14_bool_binop_done;
+ }
+
+ /* "aiohttp/_http_parser.pyx":455
+ * if (ULLONG_MAX > self._cparser.content_length > 0 or chunked or
+ * self._cparser.method == 5 or # CONNECT: 5
+ * (self._cparser.status_code >= 199 and # <<<<<<<<<<<<<<
+ * self._cparser.content_length == ULLONG_MAX and
+ * self._read_until_eof)
+ */
+ __pyx_t_8 = ((__pyx_v_self->_cparser->status_code >= 0xC7) != 0);
+ if (__pyx_t_8) {
+ } else {
+ __pyx_t_6 = __pyx_t_8;
+ goto __pyx_L14_bool_binop_done;
+ }
+
+ /* "aiohttp/_http_parser.pyx":456
+ * self._cparser.method == 5 or # CONNECT: 5
+ * (self._cparser.status_code >= 199 and
+ * self._cparser.content_length == ULLONG_MAX and # <<<<<<<<<<<<<<
+ * self._read_until_eof)
+ * ):
+ */
+ __pyx_t_8 = ((__pyx_v_self->_cparser->content_length == ULLONG_MAX) != 0);
+ if (__pyx_t_8) {
+ } else {
+ __pyx_t_6 = __pyx_t_8;
+ goto __pyx_L14_bool_binop_done;
+ }
+
+ /* "aiohttp/_http_parser.pyx":457
+ * (self._cparser.status_code >= 199 and
+ * self._cparser.content_length == ULLONG_MAX and
+ * self._read_until_eof) # <<<<<<<<<<<<<<
+ * ):
+ * payload = StreamReader(
+ */
+ __pyx_t_8 = (__pyx_v_self->_read_until_eof != 0);
+ __pyx_t_6 = __pyx_t_8;
+ __pyx_L14_bool_binop_done:;
+
+ /* "aiohttp/_http_parser.pyx":453
+ * upgrade, chunked)
+ *
+ * if (ULLONG_MAX > self._cparser.content_length > 0 or chunked or # <<<<<<<<<<<<<<
+ * self._cparser.method == 5 or # CONNECT: 5
+ * (self._cparser.status_code >= 199 and
+ */
+ if (__pyx_t_6) {
+
+ /* "aiohttp/_http_parser.pyx":459
+ * self._read_until_eof)
+ * ):
+ * payload = StreamReader( # <<<<<<<<<<<<<<
+ * self._protocol, timer=self._timer, loop=self._loop,
+ * limit=self._limit)
+ */
+ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 459, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_INCREF(__pyx_v_self->_protocol);
+ __Pyx_GIVEREF(__pyx_v_self->_protocol);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_self->_protocol);
+
+ /* "aiohttp/_http_parser.pyx":460
+ * ):
+ * payload = StreamReader(
+ * self._protocol, timer=self._timer, loop=self._loop, # <<<<<<<<<<<<<<
+ * limit=self._limit)
+ * else:
+ */
+ __pyx_t_4 = __Pyx_PyDict_NewPresized(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 460, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_timer, __pyx_v_self->_timer) < 0) __PYX_ERR(0, 460, __pyx_L1_error)
+ if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_loop, __pyx_v_self->_loop) < 0) __PYX_ERR(0, 460, __pyx_L1_error)
+
+ /* "aiohttp/_http_parser.pyx":461
+ * payload = StreamReader(
+ * self._protocol, timer=self._timer, loop=self._loop,
+ * limit=self._limit) # <<<<<<<<<<<<<<
+ * else:
+ * payload = EMPTY_PAYLOAD
+ */
+ __pyx_t_7 = __Pyx_PyInt_From_int(__pyx_v_self->_limit); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 461, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_limit, __pyx_t_7) < 0) __PYX_ERR(0, 460, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+
+ /* "aiohttp/_http_parser.pyx":459
+ * self._read_until_eof)
+ * ):
+ * payload = StreamReader( # <<<<<<<<<<<<<<
+ * self._protocol, timer=self._timer, loop=self._loop,
+ * limit=self._limit)
+ */
+ __pyx_t_7 = __Pyx_PyObject_Call(__pyx_v_7aiohttp_12_http_parser_StreamReader, __pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 459, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_v_payload = __pyx_t_7;
+ __pyx_t_7 = 0;
+
+ /* "aiohttp/_http_parser.pyx":453
+ * upgrade, chunked)
+ *
+ * if (ULLONG_MAX > self._cparser.content_length > 0 or chunked or # <<<<<<<<<<<<<<
+ * self._cparser.method == 5 or # CONNECT: 5
+ * (self._cparser.status_code >= 199 and
+ */
+ goto __pyx_L13;
+ }
+
+ /* "aiohttp/_http_parser.pyx":463
+ * limit=self._limit)
+ * else:
+ * payload = EMPTY_PAYLOAD # <<<<<<<<<<<<<<
+ *
+ * self._payload = payload
+ */
+ /*else*/ {
+ __Pyx_INCREF(__pyx_v_7aiohttp_12_http_parser_EMPTY_PAYLOAD);
+ __pyx_v_payload = __pyx_v_7aiohttp_12_http_parser_EMPTY_PAYLOAD;
+ }
+ __pyx_L13:;
+
+ /* "aiohttp/_http_parser.pyx":465
+ * payload = EMPTY_PAYLOAD
+ *
+ * self._payload = payload # <<<<<<<<<<<<<<
+ * if encoding is not None and self._auto_decompress:
+ * self._payload = DeflateBuffer(payload, encoding)
+ */
+ __Pyx_INCREF(__pyx_v_payload);
+ __Pyx_GIVEREF(__pyx_v_payload);
+ __Pyx_GOTREF(__pyx_v_self->_payload);
+ __Pyx_DECREF(__pyx_v_self->_payload);
+ __pyx_v_self->_payload = __pyx_v_payload;
+
+ /* "aiohttp/_http_parser.pyx":466
+ *
+ * self._payload = payload
+ * if encoding is not None and self._auto_decompress: # <<<<<<<<<<<<<<
+ * self._payload = DeflateBuffer(payload, encoding)
+ *
+ */
+ __pyx_t_8 = (__pyx_v_encoding != Py_None);
+ __pyx_t_5 = (__pyx_t_8 != 0);
+ if (__pyx_t_5) {
+ } else {
+ __pyx_t_6 = __pyx_t_5;
+ goto __pyx_L21_bool_binop_done;
+ }
+ __pyx_t_5 = (__pyx_v_self->_auto_decompress != 0);
+ __pyx_t_6 = __pyx_t_5;
+ __pyx_L21_bool_binop_done:;
+ if (__pyx_t_6) {
+
+ /* "aiohttp/_http_parser.pyx":467
+ * self._payload = payload
+ * if encoding is not None and self._auto_decompress:
+ * self._payload = DeflateBuffer(payload, encoding) # <<<<<<<<<<<<<<
+ *
+ * if not self._response_with_body:
+ */
+ __Pyx_INCREF(__pyx_v_7aiohttp_12_http_parser_DeflateBuffer);
+ __pyx_t_4 = __pyx_v_7aiohttp_12_http_parser_DeflateBuffer; __pyx_t_3 = NULL;
+ __pyx_t_9 = 0;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
+ __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4);
+ if (likely(__pyx_t_3)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_4, function);
+ __pyx_t_9 = 1;
+ }
+ }
+ #if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(__pyx_t_4)) {
+ PyObject *__pyx_temp[3] = {__pyx_t_3, __pyx_v_payload, __pyx_v_encoding};
+ __pyx_t_7 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_9, 2+__pyx_t_9); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 467, __pyx_L1_error)
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_GOTREF(__pyx_t_7);
+ } else
+ #endif
+ #if CYTHON_FAST_PYCCALL
+ if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) {
+ PyObject *__pyx_temp[3] = {__pyx_t_3, __pyx_v_payload, __pyx_v_encoding};
+ __pyx_t_7 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_9, 2+__pyx_t_9); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 467, __pyx_L1_error)
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_GOTREF(__pyx_t_7);
+ } else
+ #endif
+ {
+ __pyx_t_1 = PyTuple_New(2+__pyx_t_9); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 467, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (__pyx_t_3) {
+ __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_3); __pyx_t_3 = NULL;
+ }
+ __Pyx_INCREF(__pyx_v_payload);
+ __Pyx_GIVEREF(__pyx_v_payload);
+ PyTuple_SET_ITEM(__pyx_t_1, 0+__pyx_t_9, __pyx_v_payload);
+ __Pyx_INCREF(__pyx_v_encoding);
+ __Pyx_GIVEREF(__pyx_v_encoding);
+ PyTuple_SET_ITEM(__pyx_t_1, 1+__pyx_t_9, __pyx_v_encoding);
+ __pyx_t_7 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_1, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 467, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ }
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_GIVEREF(__pyx_t_7);
+ __Pyx_GOTREF(__pyx_v_self->_payload);
+ __Pyx_DECREF(__pyx_v_self->_payload);
+ __pyx_v_self->_payload = __pyx_t_7;
+ __pyx_t_7 = 0;
+
+ /* "aiohttp/_http_parser.pyx":466
+ *
+ * self._payload = payload
+ * if encoding is not None and self._auto_decompress: # <<<<<<<<<<<<<<
+ * self._payload = DeflateBuffer(payload, encoding)
+ *
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":469
+ * self._payload = DeflateBuffer(payload, encoding)
+ *
+ * if not self._response_with_body: # <<<<<<<<<<<<<<
+ * payload = EMPTY_PAYLOAD
+ *
+ */
+ __pyx_t_6 = ((!(__pyx_v_self->_response_with_body != 0)) != 0);
+ if (__pyx_t_6) {
+
+ /* "aiohttp/_http_parser.pyx":470
+ *
+ * if not self._response_with_body:
+ * payload = EMPTY_PAYLOAD # <<<<<<<<<<<<<<
+ *
+ * self._messages.append((msg, payload))
+ */
+ __Pyx_INCREF(__pyx_v_7aiohttp_12_http_parser_EMPTY_PAYLOAD);
+ __Pyx_DECREF_SET(__pyx_v_payload, __pyx_v_7aiohttp_12_http_parser_EMPTY_PAYLOAD);
+
+ /* "aiohttp/_http_parser.pyx":469
+ * self._payload = DeflateBuffer(payload, encoding)
+ *
+ * if not self._response_with_body: # <<<<<<<<<<<<<<
+ * payload = EMPTY_PAYLOAD
+ *
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":472
+ * payload = EMPTY_PAYLOAD
+ *
+ * self._messages.append((msg, payload)) # <<<<<<<<<<<<<<
+ *
+ * cdef _on_message_complete(self):
+ */
+ if (unlikely(__pyx_v_self->_messages == Py_None)) {
+ PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "append");
+ __PYX_ERR(0, 472, __pyx_L1_error)
+ }
+ __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 472, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_INCREF(__pyx_v_msg);
+ __Pyx_GIVEREF(__pyx_v_msg);
+ PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_v_msg);
+ __Pyx_INCREF(__pyx_v_payload);
+ __Pyx_GIVEREF(__pyx_v_payload);
+ PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_v_payload);
+ __pyx_t_10 = __Pyx_PyList_Append(__pyx_v_self->_messages, __pyx_t_7); if (unlikely(__pyx_t_10 == ((int)-1))) __PYX_ERR(0, 472, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+
+ /* "aiohttp/_http_parser.pyx":416
+ * self._has_value = True
+ *
+ * cdef _on_headers_complete(self): # <<<<<<<<<<<<<<
+ * self._process_header()
+ *
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpParser._on_headers_complete", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_method);
+ __Pyx_XDECREF(__pyx_v_raw_headers);
+ __Pyx_XDECREF(__pyx_v_headers);
+ __Pyx_XDECREF(__pyx_v_encoding);
+ __Pyx_XDECREF(__pyx_v_enc);
+ __Pyx_XDECREF(__pyx_v_msg);
+ __Pyx_XDECREF(__pyx_v_payload);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":474
+ * self._messages.append((msg, payload))
+ *
+ * cdef _on_message_complete(self): # <<<<<<<<<<<<<<
+ * self._payload.feed_eof()
+ * self._payload = None
+ */
+
+static PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_message_complete(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_on_message_complete", 0);
+
+ /* "aiohttp/_http_parser.pyx":475
+ *
+ * cdef _on_message_complete(self):
+ * self._payload.feed_eof() # <<<<<<<<<<<<<<
+ * self._payload = None
+ *
+ */
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_payload, __pyx_n_s_feed_eof); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 475, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_3)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3) : __Pyx_PyObject_CallNoArg(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 475, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":476
+ * cdef _on_message_complete(self):
+ * self._payload.feed_eof()
+ * self._payload = None # <<<<<<<<<<<<<<
+ *
+ * cdef _on_chunk_header(self):
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_self->_payload);
+ __Pyx_DECREF(__pyx_v_self->_payload);
+ __pyx_v_self->_payload = Py_None;
+
+ /* "aiohttp/_http_parser.pyx":474
+ * self._messages.append((msg, payload))
+ *
+ * cdef _on_message_complete(self): # <<<<<<<<<<<<<<
+ * self._payload.feed_eof()
+ * self._payload = None
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpParser._on_message_complete", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":478
+ * self._payload = None
+ *
+ * cdef _on_chunk_header(self): # <<<<<<<<<<<<<<
+ * self._payload.begin_http_chunk_receiving()
+ *
+ */
+
+static PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_chunk_header(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_on_chunk_header", 0);
+
+ /* "aiohttp/_http_parser.pyx":479
+ *
+ * cdef _on_chunk_header(self):
+ * self._payload.begin_http_chunk_receiving() # <<<<<<<<<<<<<<
+ *
+ * cdef _on_chunk_complete(self):
+ */
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_payload, __pyx_n_s_begin_http_chunk_receiving); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 479, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_3)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3) : __Pyx_PyObject_CallNoArg(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 479, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":478
+ * self._payload = None
+ *
+ * cdef _on_chunk_header(self): # <<<<<<<<<<<<<<
+ * self._payload.begin_http_chunk_receiving()
+ *
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpParser._on_chunk_header", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":481
+ * self._payload.begin_http_chunk_receiving()
+ *
+ * cdef _on_chunk_complete(self): # <<<<<<<<<<<<<<
+ * self._payload.end_http_chunk_receiving()
+ *
+ */
+
+static PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_chunk_complete(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_on_chunk_complete", 0);
+
+ /* "aiohttp/_http_parser.pyx":482
+ *
+ * cdef _on_chunk_complete(self):
+ * self._payload.end_http_chunk_receiving() # <<<<<<<<<<<<<<
+ *
+ * cdef object _on_status_complete(self):
+ */
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_payload, __pyx_n_s_end_http_chunk_receiving); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 482, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_3)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3) : __Pyx_PyObject_CallNoArg(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 482, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":481
+ * self._payload.begin_http_chunk_receiving()
+ *
+ * cdef _on_chunk_complete(self): # <<<<<<<<<<<<<<
+ * self._payload.end_http_chunk_receiving()
+ *
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpParser._on_chunk_complete", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":484
+ * self._payload.end_http_chunk_receiving()
+ *
+ * cdef object _on_status_complete(self): # <<<<<<<<<<<<<<
+ * pass
+ *
+ */
+
+static PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_status_complete(CYTHON_UNUSED struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("_on_status_complete", 0);
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":487
+ * pass
+ *
+ * cdef inline http_version(self): # <<<<<<<<<<<<<<
+ * cdef cparser.http_parser* parser = self._cparser
+ *
+ */
+
+static CYTHON_INLINE PyObject *__pyx_f_7aiohttp_12_http_parser_10HttpParser_http_version(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self) {
+ struct http_parser *__pyx_v_parser;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ struct http_parser *__pyx_t_1;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ int __pyx_t_8;
+ PyObject *__pyx_t_9 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("http_version", 0);
+
+ /* "aiohttp/_http_parser.pyx":488
+ *
+ * cdef inline http_version(self):
+ * cdef cparser.http_parser* parser = self._cparser # <<<<<<<<<<<<<<
+ *
+ * if parser.http_major == 1:
+ */
+ __pyx_t_1 = __pyx_v_self->_cparser;
+ __pyx_v_parser = __pyx_t_1;
+
+ /* "aiohttp/_http_parser.pyx":490
+ * cdef cparser.http_parser* parser = self._cparser
+ *
+ * if parser.http_major == 1: # <<<<<<<<<<<<<<
+ * if parser.http_minor == 0:
+ * return HttpVersion10
+ */
+ __pyx_t_2 = ((__pyx_v_parser->http_major == 1) != 0);
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_http_parser.pyx":491
+ *
+ * if parser.http_major == 1:
+ * if parser.http_minor == 0: # <<<<<<<<<<<<<<
+ * return HttpVersion10
+ * elif parser.http_minor == 1:
+ */
+ switch (__pyx_v_parser->http_minor) {
+ case 0:
+
+ /* "aiohttp/_http_parser.pyx":492
+ * if parser.http_major == 1:
+ * if parser.http_minor == 0:
+ * return HttpVersion10 # <<<<<<<<<<<<<<
+ * elif parser.http_minor == 1:
+ * return HttpVersion11
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_7aiohttp_12_http_parser_HttpVersion10);
+ __pyx_r = __pyx_v_7aiohttp_12_http_parser_HttpVersion10;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_parser.pyx":491
+ *
+ * if parser.http_major == 1:
+ * if parser.http_minor == 0: # <<<<<<<<<<<<<<
+ * return HttpVersion10
+ * elif parser.http_minor == 1:
+ */
+ break;
+ case 1:
+
+ /* "aiohttp/_http_parser.pyx":494
+ * return HttpVersion10
+ * elif parser.http_minor == 1:
+ * return HttpVersion11 # <<<<<<<<<<<<<<
+ *
+ * return HttpVersion(parser.http_major, parser.http_minor)
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_7aiohttp_12_http_parser_HttpVersion11);
+ __pyx_r = __pyx_v_7aiohttp_12_http_parser_HttpVersion11;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_parser.pyx":493
+ * if parser.http_minor == 0:
+ * return HttpVersion10
+ * elif parser.http_minor == 1: # <<<<<<<<<<<<<<
+ * return HttpVersion11
+ *
+ */
+ break;
+ default: break;
+ }
+
+ /* "aiohttp/_http_parser.pyx":490
+ * cdef cparser.http_parser* parser = self._cparser
+ *
+ * if parser.http_major == 1: # <<<<<<<<<<<<<<
+ * if parser.http_minor == 0:
+ * return HttpVersion10
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":496
+ * return HttpVersion11
+ *
+ * return HttpVersion(parser.http_major, parser.http_minor) # <<<<<<<<<<<<<<
+ *
+ * ### Public API ###
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_4 = __Pyx_PyInt_From_unsigned_short(__pyx_v_parser->http_major); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 496, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_5 = __Pyx_PyInt_From_unsigned_short(__pyx_v_parser->http_minor); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 496, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(__pyx_v_7aiohttp_12_http_parser_HttpVersion);
+ __pyx_t_6 = __pyx_v_7aiohttp_12_http_parser_HttpVersion; __pyx_t_7 = NULL;
+ __pyx_t_8 = 0;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) {
+ __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6);
+ if (likely(__pyx_t_7)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
+ __Pyx_INCREF(__pyx_t_7);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_6, function);
+ __pyx_t_8 = 1;
+ }
+ }
+ #if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(__pyx_t_6)) {
+ PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_4, __pyx_t_5};
+ __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 496, __pyx_L1_error)
+ __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ } else
+ #endif
+ #if CYTHON_FAST_PYCCALL
+ if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
+ PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_4, __pyx_t_5};
+ __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 496, __pyx_L1_error)
+ __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ } else
+ #endif
+ {
+ __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 496, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_9);
+ if (__pyx_t_7) {
+ __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
+ }
+ __Pyx_GIVEREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_t_5);
+ __pyx_t_4 = 0;
+ __pyx_t_5 = 0;
+ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_9, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 496, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ }
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_parser.pyx":487
+ * pass
+ *
+ * cdef inline http_version(self): # <<<<<<<<<<<<<<
+ * cdef cparser.http_parser* parser = self._cparser
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_9);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpParser.http_version", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":500
+ * ### Public API ###
+ *
+ * def feed_eof(self): # <<<<<<<<<<<<<<
+ * cdef bytes desc
+ *
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_10HttpParser_5feed_eof(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_10HttpParser_5feed_eof(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("feed_eof (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_10HttpParser_4feed_eof(((struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_10HttpParser_4feed_eof(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self) {
+ PyObject *__pyx_v_desc = 0;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("feed_eof", 0);
+
+ /* "aiohttp/_http_parser.pyx":503
+ * cdef bytes desc
+ *
+ * if self._payload is not None: # <<<<<<<<<<<<<<
+ * if self._cparser.flags & cparser.F_CHUNKED:
+ * raise TransferEncodingError(
+ */
+ __pyx_t_1 = (__pyx_v_self->_payload != Py_None);
+ __pyx_t_2 = (__pyx_t_1 != 0);
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_http_parser.pyx":504
+ *
+ * if self._payload is not None:
+ * if self._cparser.flags & cparser.F_CHUNKED: # <<<<<<<<<<<<<<
+ * raise TransferEncodingError(
+ * "Not enough data for satisfy transfer length header.")
+ */
+ __pyx_t_2 = ((__pyx_v_self->_cparser->flags & F_CHUNKED) != 0);
+ if (unlikely(__pyx_t_2)) {
+
+ /* "aiohttp/_http_parser.pyx":505
+ * if self._payload is not None:
+ * if self._cparser.flags & cparser.F_CHUNKED:
+ * raise TransferEncodingError( # <<<<<<<<<<<<<<
+ * "Not enough data for satisfy transfer length header.")
+ * elif self._cparser.flags & cparser.F_CONTENTLENGTH:
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_TransferEncodingError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 505, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_5 = NULL;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
+ __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
+ if (likely(__pyx_t_5)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_4, function);
+ }
+ }
+ __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_kp_u_Not_enough_data_for_satisfy_tran) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_kp_u_Not_enough_data_for_satisfy_tran);
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 505, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __PYX_ERR(0, 505, __pyx_L1_error)
+
+ /* "aiohttp/_http_parser.pyx":504
+ *
+ * if self._payload is not None:
+ * if self._cparser.flags & cparser.F_CHUNKED: # <<<<<<<<<<<<<<
+ * raise TransferEncodingError(
+ * "Not enough data for satisfy transfer length header.")
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":507
+ * raise TransferEncodingError(
+ * "Not enough data for satisfy transfer length header.")
+ * elif self._cparser.flags & cparser.F_CONTENTLENGTH: # <<<<<<<<<<<<<<
+ * raise ContentLengthError(
+ * "Not enough data for satisfy content length header.")
+ */
+ __pyx_t_2 = ((__pyx_v_self->_cparser->flags & F_CONTENTLENGTH) != 0);
+ if (unlikely(__pyx_t_2)) {
+
+ /* "aiohttp/_http_parser.pyx":508
+ * "Not enough data for satisfy transfer length header.")
+ * elif self._cparser.flags & cparser.F_CONTENTLENGTH:
+ * raise ContentLengthError( # <<<<<<<<<<<<<<
+ * "Not enough data for satisfy content length header.")
+ * elif self._cparser.http_errno != cparser.HPE_OK:
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_ContentLengthError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 508, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_5 = NULL;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
+ __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
+ if (likely(__pyx_t_5)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_4, function);
+ }
+ }
+ __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_kp_u_Not_enough_data_for_satisfy_cont) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_kp_u_Not_enough_data_for_satisfy_cont);
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 508, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __PYX_ERR(0, 508, __pyx_L1_error)
+
+ /* "aiohttp/_http_parser.pyx":507
+ * raise TransferEncodingError(
+ * "Not enough data for satisfy transfer length header.")
+ * elif self._cparser.flags & cparser.F_CONTENTLENGTH: # <<<<<<<<<<<<<<
+ * raise ContentLengthError(
+ * "Not enough data for satisfy content length header.")
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":510
+ * raise ContentLengthError(
+ * "Not enough data for satisfy content length header.")
+ * elif self._cparser.http_errno != cparser.HPE_OK: # <<<<<<<<<<<<<<
+ * desc = cparser.http_errno_description(
+ * <cparser.http_errno> self._cparser.http_errno)
+ */
+ __pyx_t_2 = ((__pyx_v_self->_cparser->http_errno != HPE_OK) != 0);
+ if (unlikely(__pyx_t_2)) {
+
+ /* "aiohttp/_http_parser.pyx":511
+ * "Not enough data for satisfy content length header.")
+ * elif self._cparser.http_errno != cparser.HPE_OK:
+ * desc = cparser.http_errno_description( # <<<<<<<<<<<<<<
+ * <cparser.http_errno> self._cparser.http_errno)
+ * raise PayloadEncodingError(desc.decode('latin-1'))
+ */
+ __pyx_t_3 = __Pyx_PyBytes_FromString(http_errno_description(((enum http_errno)__pyx_v_self->_cparser->http_errno))); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 511, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_v_desc = ((PyObject*)__pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":513
+ * desc = cparser.http_errno_description(
+ * <cparser.http_errno> self._cparser.http_errno)
+ * raise PayloadEncodingError(desc.decode('latin-1')) # <<<<<<<<<<<<<<
+ * else:
+ * self._payload.feed_eof()
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_PayloadEncodingError); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 513, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_5 = __Pyx_decode_bytes(__pyx_v_desc, 0, PY_SSIZE_T_MAX, NULL, NULL, PyUnicode_DecodeLatin1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 513, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_6 = NULL;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
+ __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_4);
+ if (likely(__pyx_t_6)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
+ __Pyx_INCREF(__pyx_t_6);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_4, function);
+ }
+ }
+ __pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_6, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 513, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __PYX_ERR(0, 513, __pyx_L1_error)
+
+ /* "aiohttp/_http_parser.pyx":510
+ * raise ContentLengthError(
+ * "Not enough data for satisfy content length header.")
+ * elif self._cparser.http_errno != cparser.HPE_OK: # <<<<<<<<<<<<<<
+ * desc = cparser.http_errno_description(
+ * <cparser.http_errno> self._cparser.http_errno)
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":515
+ * raise PayloadEncodingError(desc.decode('latin-1'))
+ * else:
+ * self._payload.feed_eof() # <<<<<<<<<<<<<<
+ * elif self._started:
+ * self._on_headers_complete()
+ */
+ /*else*/ {
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->_payload, __pyx_n_s_feed_eof); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 515, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_5 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) {
+ __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
+ if (likely(__pyx_t_5)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_4, function);
+ }
+ }
+ __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_5) : __Pyx_PyObject_CallNoArg(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 515, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ }
+
+ /* "aiohttp/_http_parser.pyx":503
+ * cdef bytes desc
+ *
+ * if self._payload is not None: # <<<<<<<<<<<<<<
+ * if self._cparser.flags & cparser.F_CHUNKED:
+ * raise TransferEncodingError(
+ */
+ goto __pyx_L3;
+ }
+
+ /* "aiohttp/_http_parser.pyx":516
+ * else:
+ * self._payload.feed_eof()
+ * elif self._started: # <<<<<<<<<<<<<<
+ * self._on_headers_complete()
+ * if self._messages:
+ */
+ __pyx_t_2 = (__pyx_v_self->_started != 0);
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_http_parser.pyx":517
+ * self._payload.feed_eof()
+ * elif self._started:
+ * self._on_headers_complete() # <<<<<<<<<<<<<<
+ * if self._messages:
+ * return self._messages[-1][0]
+ */
+ __pyx_t_3 = ((struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpParser *)__pyx_v_self->__pyx_vtab)->_on_headers_complete(__pyx_v_self); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 517, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":518
+ * elif self._started:
+ * self._on_headers_complete()
+ * if self._messages: # <<<<<<<<<<<<<<
+ * return self._messages[-1][0]
+ *
+ */
+ __pyx_t_2 = (__pyx_v_self->_messages != Py_None)&&(PyList_GET_SIZE(__pyx_v_self->_messages) != 0);
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_http_parser.pyx":519
+ * self._on_headers_complete()
+ * if self._messages:
+ * return self._messages[-1][0] # <<<<<<<<<<<<<<
+ *
+ * def feed_data(self, data):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ if (unlikely(__pyx_v_self->_messages == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(0, 519, __pyx_L1_error)
+ }
+ __pyx_t_3 = __Pyx_GetItemInt_List(__pyx_v_self->_messages, -1L, long, 1, __Pyx_PyInt_From_long, 1, 1, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 519, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_GetItemInt(__pyx_t_3, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 519, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_r = __pyx_t_4;
+ __pyx_t_4 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_parser.pyx":518
+ * elif self._started:
+ * self._on_headers_complete()
+ * if self._messages: # <<<<<<<<<<<<<<
+ * return self._messages[-1][0]
+ *
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":516
+ * else:
+ * self._payload.feed_eof()
+ * elif self._started: # <<<<<<<<<<<<<<
+ * self._on_headers_complete()
+ * if self._messages:
+ */
+ }
+ __pyx_L3:;
+
+ /* "aiohttp/_http_parser.pyx":500
+ * ### Public API ###
+ *
+ * def feed_eof(self): # <<<<<<<<<<<<<<
+ * cdef bytes desc
+ *
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpParser.feed_eof", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_desc);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":521
+ * return self._messages[-1][0]
+ *
+ * def feed_data(self, data): # <<<<<<<<<<<<<<
+ * cdef:
+ * size_t data_len
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_10HttpParser_7feed_data(PyObject *__pyx_v_self, PyObject *__pyx_v_data); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_10HttpParser_7feed_data(PyObject *__pyx_v_self, PyObject *__pyx_v_data) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("feed_data (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_10HttpParser_6feed_data(((struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)__pyx_v_self), ((PyObject *)__pyx_v_data));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_10HttpParser_6feed_data(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self, PyObject *__pyx_v_data) {
+ size_t __pyx_v_data_len;
+ size_t __pyx_v_nb;
+ PyObject *__pyx_v_ex = NULL;
+ PyObject *__pyx_v_messages = NULL;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("feed_data", 0);
+
+ /* "aiohttp/_http_parser.pyx":526
+ * size_t nb
+ *
+ * PyObject_GetBuffer(data, &self.py_buf, PyBUF_SIMPLE) # <<<<<<<<<<<<<<
+ * data_len = <size_t>self.py_buf.len
+ *
+ */
+ __pyx_t_1 = PyObject_GetBuffer(__pyx_v_data, (&__pyx_v_self->py_buf), PyBUF_SIMPLE); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(0, 526, __pyx_L1_error)
+
+ /* "aiohttp/_http_parser.pyx":527
+ *
+ * PyObject_GetBuffer(data, &self.py_buf, PyBUF_SIMPLE)
+ * data_len = <size_t>self.py_buf.len # <<<<<<<<<<<<<<
+ *
+ * nb = cparser.http_parser_execute(
+ */
+ __pyx_v_data_len = ((size_t)__pyx_v_self->py_buf.len);
+
+ /* "aiohttp/_http_parser.pyx":529
+ * data_len = <size_t>self.py_buf.len
+ *
+ * nb = cparser.http_parser_execute( # <<<<<<<<<<<<<<
+ * self._cparser,
+ * self._csettings,
+ */
+ __pyx_v_nb = http_parser_execute(__pyx_v_self->_cparser, __pyx_v_self->_csettings, ((char *)__pyx_v_self->py_buf.buf), __pyx_v_data_len);
+
+ /* "aiohttp/_http_parser.pyx":535
+ * data_len)
+ *
+ * PyBuffer_Release(&self.py_buf) # <<<<<<<<<<<<<<
+ *
+ * if (self._cparser.http_errno != cparser.HPE_OK):
+ */
+ PyBuffer_Release((&__pyx_v_self->py_buf));
+
+ /* "aiohttp/_http_parser.pyx":537
+ * PyBuffer_Release(&self.py_buf)
+ *
+ * if (self._cparser.http_errno != cparser.HPE_OK): # <<<<<<<<<<<<<<
+ * if self._payload_error == 0:
+ * if self._last_error is not None:
+ */
+ __pyx_t_2 = ((__pyx_v_self->_cparser->http_errno != HPE_OK) != 0);
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_http_parser.pyx":538
+ *
+ * if (self._cparser.http_errno != cparser.HPE_OK):
+ * if self._payload_error == 0: # <<<<<<<<<<<<<<
+ * if self._last_error is not None:
+ * ex = self._last_error
+ */
+ __pyx_t_2 = ((__pyx_v_self->_payload_error == 0) != 0);
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_http_parser.pyx":539
+ * if (self._cparser.http_errno != cparser.HPE_OK):
+ * if self._payload_error == 0:
+ * if self._last_error is not None: # <<<<<<<<<<<<<<
+ * ex = self._last_error
+ * self._last_error = None
+ */
+ __pyx_t_2 = (__pyx_v_self->_last_error != Py_None);
+ __pyx_t_3 = (__pyx_t_2 != 0);
+ if (__pyx_t_3) {
+
+ /* "aiohttp/_http_parser.pyx":540
+ * if self._payload_error == 0:
+ * if self._last_error is not None:
+ * ex = self._last_error # <<<<<<<<<<<<<<
+ * self._last_error = None
+ * else:
+ */
+ __pyx_t_4 = __pyx_v_self->_last_error;
+ __Pyx_INCREF(__pyx_t_4);
+ __pyx_v_ex = __pyx_t_4;
+ __pyx_t_4 = 0;
+
+ /* "aiohttp/_http_parser.pyx":541
+ * if self._last_error is not None:
+ * ex = self._last_error
+ * self._last_error = None # <<<<<<<<<<<<<<
+ * else:
+ * ex = parser_error_from_errno(
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_self->_last_error);
+ __Pyx_DECREF(__pyx_v_self->_last_error);
+ __pyx_v_self->_last_error = Py_None;
+
+ /* "aiohttp/_http_parser.pyx":539
+ * if (self._cparser.http_errno != cparser.HPE_OK):
+ * if self._payload_error == 0:
+ * if self._last_error is not None: # <<<<<<<<<<<<<<
+ * ex = self._last_error
+ * self._last_error = None
+ */
+ goto __pyx_L5;
+ }
+
+ /* "aiohttp/_http_parser.pyx":543
+ * self._last_error = None
+ * else:
+ * ex = parser_error_from_errno( # <<<<<<<<<<<<<<
+ * <cparser.http_errno> self._cparser.http_errno)
+ * self._payload = None
+ */
+ /*else*/ {
+
+ /* "aiohttp/_http_parser.pyx":544
+ * else:
+ * ex = parser_error_from_errno(
+ * <cparser.http_errno> self._cparser.http_errno) # <<<<<<<<<<<<<<
+ * self._payload = None
+ * raise ex
+ */
+ __pyx_t_4 = __pyx_f_7aiohttp_12_http_parser_parser_error_from_errno(((enum http_errno)__pyx_v_self->_cparser->http_errno)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 543, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_v_ex = __pyx_t_4;
+ __pyx_t_4 = 0;
+ }
+ __pyx_L5:;
+
+ /* "aiohttp/_http_parser.pyx":545
+ * ex = parser_error_from_errno(
+ * <cparser.http_errno> self._cparser.http_errno)
+ * self._payload = None # <<<<<<<<<<<<<<
+ * raise ex
+ *
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_self->_payload);
+ __Pyx_DECREF(__pyx_v_self->_payload);
+ __pyx_v_self->_payload = Py_None;
+
+ /* "aiohttp/_http_parser.pyx":546
+ * <cparser.http_errno> self._cparser.http_errno)
+ * self._payload = None
+ * raise ex # <<<<<<<<<<<<<<
+ *
+ * if self._messages:
+ */
+ __Pyx_Raise(__pyx_v_ex, 0, 0, 0);
+ __PYX_ERR(0, 546, __pyx_L1_error)
+
+ /* "aiohttp/_http_parser.pyx":538
+ *
+ * if (self._cparser.http_errno != cparser.HPE_OK):
+ * if self._payload_error == 0: # <<<<<<<<<<<<<<
+ * if self._last_error is not None:
+ * ex = self._last_error
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":537
+ * PyBuffer_Release(&self.py_buf)
+ *
+ * if (self._cparser.http_errno != cparser.HPE_OK): # <<<<<<<<<<<<<<
+ * if self._payload_error == 0:
+ * if self._last_error is not None:
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":548
+ * raise ex
+ *
+ * if self._messages: # <<<<<<<<<<<<<<
+ * messages = self._messages
+ * self._messages = []
+ */
+ __pyx_t_3 = (__pyx_v_self->_messages != Py_None)&&(PyList_GET_SIZE(__pyx_v_self->_messages) != 0);
+ if (__pyx_t_3) {
+
+ /* "aiohttp/_http_parser.pyx":549
+ *
+ * if self._messages:
+ * messages = self._messages # <<<<<<<<<<<<<<
+ * self._messages = []
+ * else:
+ */
+ __pyx_t_4 = __pyx_v_self->_messages;
+ __Pyx_INCREF(__pyx_t_4);
+ __pyx_v_messages = __pyx_t_4;
+ __pyx_t_4 = 0;
+
+ /* "aiohttp/_http_parser.pyx":550
+ * if self._messages:
+ * messages = self._messages
+ * self._messages = [] # <<<<<<<<<<<<<<
+ * else:
+ * messages = ()
+ */
+ __pyx_t_4 = PyList_New(0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 550, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_4);
+ __Pyx_GOTREF(__pyx_v_self->_messages);
+ __Pyx_DECREF(__pyx_v_self->_messages);
+ __pyx_v_self->_messages = ((PyObject*)__pyx_t_4);
+ __pyx_t_4 = 0;
+
+ /* "aiohttp/_http_parser.pyx":548
+ * raise ex
+ *
+ * if self._messages: # <<<<<<<<<<<<<<
+ * messages = self._messages
+ * self._messages = []
+ */
+ goto __pyx_L6;
+ }
+
+ /* "aiohttp/_http_parser.pyx":552
+ * self._messages = []
+ * else:
+ * messages = () # <<<<<<<<<<<<<<
+ *
+ * if self._upgraded:
+ */
+ /*else*/ {
+ __Pyx_INCREF(__pyx_empty_tuple);
+ __pyx_v_messages = __pyx_empty_tuple;
+ }
+ __pyx_L6:;
+
+ /* "aiohttp/_http_parser.pyx":554
+ * messages = ()
+ *
+ * if self._upgraded: # <<<<<<<<<<<<<<
+ * return messages, True, data[nb:]
+ * else:
+ */
+ __pyx_t_3 = (__pyx_v_self->_upgraded != 0);
+ if (__pyx_t_3) {
+
+ /* "aiohttp/_http_parser.pyx":555
+ *
+ * if self._upgraded:
+ * return messages, True, data[nb:] # <<<<<<<<<<<<<<
+ * else:
+ * return messages, False, b''
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_4 = __Pyx_PyObject_GetSlice(__pyx_v_data, __pyx_v_nb, 0, NULL, NULL, NULL, 1, 0, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 555, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 555, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(__pyx_v_messages);
+ __Pyx_GIVEREF(__pyx_v_messages);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_messages);
+ __Pyx_INCREF(Py_True);
+ __Pyx_GIVEREF(Py_True);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, Py_True);
+ __Pyx_GIVEREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_4);
+ __pyx_t_4 = 0;
+ __pyx_r = __pyx_t_5;
+ __pyx_t_5 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_parser.pyx":554
+ * messages = ()
+ *
+ * if self._upgraded: # <<<<<<<<<<<<<<
+ * return messages, True, data[nb:]
+ * else:
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":557
+ * return messages, True, data[nb:]
+ * else:
+ * return messages, False, b'' # <<<<<<<<<<<<<<
+ *
+ * def set_upgraded(self, val):
+ */
+ /*else*/ {
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 557, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(__pyx_v_messages);
+ __Pyx_GIVEREF(__pyx_v_messages);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_messages);
+ __Pyx_INCREF(Py_False);
+ __Pyx_GIVEREF(Py_False);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, Py_False);
+ __Pyx_INCREF(__pyx_kp_b__4);
+ __Pyx_GIVEREF(__pyx_kp_b__4);
+ PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_kp_b__4);
+ __pyx_r = __pyx_t_5;
+ __pyx_t_5 = 0;
+ goto __pyx_L0;
+ }
+
+ /* "aiohttp/_http_parser.pyx":521
+ * return self._messages[-1][0]
+ *
+ * def feed_data(self, data): # <<<<<<<<<<<<<<
+ * cdef:
+ * size_t data_len
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpParser.feed_data", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_ex);
+ __Pyx_XDECREF(__pyx_v_messages);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":559
+ * return messages, False, b''
+ *
+ * def set_upgraded(self, val): # <<<<<<<<<<<<<<
+ * self._upgraded = val
+ *
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_10HttpParser_9set_upgraded(PyObject *__pyx_v_self, PyObject *__pyx_v_val); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_10HttpParser_9set_upgraded(PyObject *__pyx_v_self, PyObject *__pyx_v_val) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("set_upgraded (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_10HttpParser_8set_upgraded(((struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)__pyx_v_self), ((PyObject *)__pyx_v_val));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_10HttpParser_8set_upgraded(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self, PyObject *__pyx_v_val) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("set_upgraded", 0);
+
+ /* "aiohttp/_http_parser.pyx":560
+ *
+ * def set_upgraded(self, val):
+ * self._upgraded = val # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_val); if (unlikely((__pyx_t_1 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 560, __pyx_L1_error)
+ __pyx_v_self->_upgraded = __pyx_t_1;
+
+ /* "aiohttp/_http_parser.pyx":559
+ * return messages, False, b''
+ *
+ * def set_upgraded(self, val): # <<<<<<<<<<<<<<
+ * self._upgraded = val
+ *
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpParser.set_upgraded", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":1
+ * def __reduce_cython__(self): # <<<<<<<<<<<<<<
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ * def __setstate_cython__(self, __pyx_state):
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_10HttpParser_11__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_10HttpParser_11__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_10HttpParser_10__reduce_cython__(((struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_10HttpParser_10__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__reduce_cython__", 0);
+
+ /* "(tree fragment)":2
+ * def __reduce_cython__(self):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
+ * def __setstate_cython__(self, __pyx_state):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ */
+ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_Raise(__pyx_t_1, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __PYX_ERR(1, 2, __pyx_L1_error)
+
+ /* "(tree fragment)":1
+ * def __reduce_cython__(self): # <<<<<<<<<<<<<<
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ * def __setstate_cython__(self, __pyx_state):
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpParser.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":3
+ * def __reduce_cython__(self):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_10HttpParser_13__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_10HttpParser_13__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_10HttpParser_12__setstate_cython__(((struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_10HttpParser_12__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__setstate_cython__", 0);
+
+ /* "(tree fragment)":4
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ * def __setstate_cython__(self, __pyx_state):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
+ */
+ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_Raise(__pyx_t_1, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __PYX_ERR(1, 4, __pyx_L1_error)
+
+ /* "(tree fragment)":3
+ * def __reduce_cython__(self):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpParser.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":565
+ * cdef class HttpRequestParser(HttpParser):
+ *
+ * def __init__(self, protocol, loop, int limit, timer=None, # <<<<<<<<<<<<<<
+ * size_t max_line_size=8190, size_t max_headers=32768,
+ * size_t max_field_size=8190, payload_exception=None,
+ */
+
+/* Python wrapper */
+static int __pyx_pw_7aiohttp_12_http_parser_17HttpRequestParser_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_pw_7aiohttp_12_http_parser_17HttpRequestParser_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_protocol = 0;
+ PyObject *__pyx_v_loop = 0;
+ int __pyx_v_limit;
+ PyObject *__pyx_v_timer = 0;
+ size_t __pyx_v_max_line_size;
+ size_t __pyx_v_max_headers;
+ size_t __pyx_v_max_field_size;
+ PyObject *__pyx_v_payload_exception = 0;
+ int __pyx_v_response_with_body;
+ int __pyx_v_read_until_eof;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_protocol,&__pyx_n_s_loop,&__pyx_n_s_limit,&__pyx_n_s_timer,&__pyx_n_s_max_line_size,&__pyx_n_s_max_headers,&__pyx_n_s_max_field_size,&__pyx_n_s_payload_exception,&__pyx_n_s_response_with_body,&__pyx_n_s_read_until_eof,0};
+ PyObject* values[10] = {0,0,0,0,0,0,0,0,0,0};
+ values[3] = ((PyObject *)Py_None);
+
+ /* "aiohttp/_http_parser.pyx":567
+ * def __init__(self, protocol, loop, int limit, timer=None,
+ * size_t max_line_size=8190, size_t max_headers=32768,
+ * size_t max_field_size=8190, payload_exception=None, # <<<<<<<<<<<<<<
+ * bint response_with_body=True, bint read_until_eof=False,
+ * ):
+ */
+ values[7] = ((PyObject *)Py_None);
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
+ CYTHON_FALLTHROUGH;
+ case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
+ CYTHON_FALLTHROUGH;
+ case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
+ CYTHON_FALLTHROUGH;
+ case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
+ CYTHON_FALLTHROUGH;
+ case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
+ CYTHON_FALLTHROUGH;
+ case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ CYTHON_FALLTHROUGH;
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ CYTHON_FALLTHROUGH;
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ CYTHON_FALLTHROUGH;
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ CYTHON_FALLTHROUGH;
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_protocol)) != 0)) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ CYTHON_FALLTHROUGH;
+ case 1:
+ if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_loop)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 10, 1); __PYX_ERR(0, 565, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 2:
+ if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_limit)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 10, 2); __PYX_ERR(0, 565, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 3:
+ if (kw_args > 0) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_timer);
+ if (value) { values[3] = value; kw_args--; }
+ }
+ CYTHON_FALLTHROUGH;
+ case 4:
+ if (kw_args > 0) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_line_size);
+ if (value) { values[4] = value; kw_args--; }
+ }
+ CYTHON_FALLTHROUGH;
+ case 5:
+ if (kw_args > 0) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_headers);
+ if (value) { values[5] = value; kw_args--; }
+ }
+ CYTHON_FALLTHROUGH;
+ case 6:
+ if (kw_args > 0) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_field_size);
+ if (value) { values[6] = value; kw_args--; }
+ }
+ CYTHON_FALLTHROUGH;
+ case 7:
+ if (kw_args > 0) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_payload_exception);
+ if (value) { values[7] = value; kw_args--; }
+ }
+ CYTHON_FALLTHROUGH;
+ case 8:
+ if (kw_args > 0) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_response_with_body);
+ if (value) { values[8] = value; kw_args--; }
+ }
+ CYTHON_FALLTHROUGH;
+ case 9:
+ if (kw_args > 0) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_read_until_eof);
+ if (value) { values[9] = value; kw_args--; }
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 565, __pyx_L3_error)
+ }
+ } else {
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
+ CYTHON_FALLTHROUGH;
+ case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
+ CYTHON_FALLTHROUGH;
+ case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
+ CYTHON_FALLTHROUGH;
+ case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
+ CYTHON_FALLTHROUGH;
+ case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
+ CYTHON_FALLTHROUGH;
+ case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ CYTHON_FALLTHROUGH;
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ CYTHON_FALLTHROUGH;
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ }
+ __pyx_v_protocol = values[0];
+ __pyx_v_loop = values[1];
+ __pyx_v_limit = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_limit == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 565, __pyx_L3_error)
+ __pyx_v_timer = values[3];
+ if (values[4]) {
+ __pyx_v_max_line_size = __Pyx_PyInt_As_size_t(values[4]); if (unlikely((__pyx_v_max_line_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 566, __pyx_L3_error)
+ } else {
+ __pyx_v_max_line_size = ((size_t)0x1FFE);
+ }
+ if (values[5]) {
+ __pyx_v_max_headers = __Pyx_PyInt_As_size_t(values[5]); if (unlikely((__pyx_v_max_headers == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 566, __pyx_L3_error)
+ } else {
+ __pyx_v_max_headers = ((size_t)0x8000);
+ }
+ if (values[6]) {
+ __pyx_v_max_field_size = __Pyx_PyInt_As_size_t(values[6]); if (unlikely((__pyx_v_max_field_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 567, __pyx_L3_error)
+ } else {
+ __pyx_v_max_field_size = ((size_t)0x1FFE);
+ }
+ __pyx_v_payload_exception = values[7];
+ if (values[8]) {
+ __pyx_v_response_with_body = __Pyx_PyObject_IsTrue(values[8]); if (unlikely((__pyx_v_response_with_body == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 568, __pyx_L3_error)
+ } else {
+
+ /* "aiohttp/_http_parser.pyx":568
+ * size_t max_line_size=8190, size_t max_headers=32768,
+ * size_t max_field_size=8190, payload_exception=None,
+ * bint response_with_body=True, bint read_until_eof=False, # <<<<<<<<<<<<<<
+ * ):
+ * self._init(cparser.HTTP_REQUEST, protocol, loop, limit, timer,
+ */
+ __pyx_v_response_with_body = ((int)1);
+ }
+ if (values[9]) {
+ __pyx_v_read_until_eof = __Pyx_PyObject_IsTrue(values[9]); if (unlikely((__pyx_v_read_until_eof == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 568, __pyx_L3_error)
+ } else {
+ __pyx_v_read_until_eof = ((int)0);
+ }
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 10, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 565, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpRequestParser.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return -1;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_17HttpRequestParser___init__(((struct __pyx_obj_7aiohttp_12_http_parser_HttpRequestParser *)__pyx_v_self), __pyx_v_protocol, __pyx_v_loop, __pyx_v_limit, __pyx_v_timer, __pyx_v_max_line_size, __pyx_v_max_headers, __pyx_v_max_field_size, __pyx_v_payload_exception, __pyx_v_response_with_body, __pyx_v_read_until_eof);
+
+ /* "aiohttp/_http_parser.pyx":565
+ * cdef class HttpRequestParser(HttpParser):
+ *
+ * def __init__(self, protocol, loop, int limit, timer=None, # <<<<<<<<<<<<<<
+ * size_t max_line_size=8190, size_t max_headers=32768,
+ * size_t max_field_size=8190, payload_exception=None,
+ */
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_7aiohttp_12_http_parser_17HttpRequestParser___init__(struct __pyx_obj_7aiohttp_12_http_parser_HttpRequestParser *__pyx_v_self, PyObject *__pyx_v_protocol, PyObject *__pyx_v_loop, int __pyx_v_limit, PyObject *__pyx_v_timer, size_t __pyx_v_max_line_size, size_t __pyx_v_max_headers, size_t __pyx_v_max_field_size, PyObject *__pyx_v_payload_exception, int __pyx_v_response_with_body, int __pyx_v_read_until_eof) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ struct __pyx_opt_args_7aiohttp_12_http_parser_10HttpParser__init __pyx_t_2;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__init__", 0);
+
+ /* "aiohttp/_http_parser.pyx":570
+ * bint response_with_body=True, bint read_until_eof=False,
+ * ):
+ * self._init(cparser.HTTP_REQUEST, protocol, loop, limit, timer, # <<<<<<<<<<<<<<
+ * max_line_size, max_headers, max_field_size,
+ * payload_exception, response_with_body, read_until_eof)
+ */
+ __pyx_t_2.__pyx_n = 7;
+ __pyx_t_2.timer = __pyx_v_timer;
+ __pyx_t_2.max_line_size = __pyx_v_max_line_size;
+ __pyx_t_2.max_headers = __pyx_v_max_headers;
+ __pyx_t_2.max_field_size = __pyx_v_max_field_size;
+ __pyx_t_2.payload_exception = __pyx_v_payload_exception;
+ __pyx_t_2.response_with_body = __pyx_v_response_with_body;
+ __pyx_t_2.read_until_eof = __pyx_v_read_until_eof;
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpRequestParser *)__pyx_v_self->__pyx_base.__pyx_vtab)->__pyx_base._init(((struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)__pyx_v_self), HTTP_REQUEST, __pyx_v_protocol, __pyx_v_loop, __pyx_v_limit, &__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 570, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":565
+ * cdef class HttpRequestParser(HttpParser):
+ *
+ * def __init__(self, protocol, loop, int limit, timer=None, # <<<<<<<<<<<<<<
+ * size_t max_line_size=8190, size_t max_headers=32768,
+ * size_t max_field_size=8190, payload_exception=None,
+ */
+
+ /* function exit code */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpRequestParser.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":574
+ * payload_exception, response_with_body, read_until_eof)
+ *
+ * cdef object _on_status_complete(self): # <<<<<<<<<<<<<<
+ * cdef Py_buffer py_buf
+ * if not self._buf:
+ */
+
+static PyObject *__pyx_f_7aiohttp_12_http_parser_17HttpRequestParser__on_status_complete(struct __pyx_obj_7aiohttp_12_http_parser_HttpRequestParser *__pyx_v_self) {
+ Py_buffer __pyx_v_py_buf;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ int __pyx_t_6;
+ int __pyx_t_7;
+ char const *__pyx_t_8;
+ PyObject *__pyx_t_9 = NULL;
+ PyObject *__pyx_t_10 = NULL;
+ PyObject *__pyx_t_11 = NULL;
+ PyObject *__pyx_t_12 = NULL;
+ PyObject *__pyx_t_13 = NULL;
+ PyObject *__pyx_t_14 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_on_status_complete", 0);
+
+ /* "aiohttp/_http_parser.pyx":576
+ * cdef object _on_status_complete(self):
+ * cdef Py_buffer py_buf
+ * if not self._buf: # <<<<<<<<<<<<<<
+ * return
+ * self._path = self._buf.decode('utf-8', 'surrogateescape')
+ */
+ __pyx_t_1 = (__pyx_v_self->__pyx_base._buf != Py_None)&&(PyByteArray_GET_SIZE(__pyx_v_self->__pyx_base._buf) != 0);
+ __pyx_t_2 = ((!__pyx_t_1) != 0);
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_http_parser.pyx":577
+ * cdef Py_buffer py_buf
+ * if not self._buf:
+ * return # <<<<<<<<<<<<<<
+ * self._path = self._buf.decode('utf-8', 'surrogateescape')
+ * if self._cparser.method == 5: # CONNECT
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_parser.pyx":576
+ * cdef object _on_status_complete(self):
+ * cdef Py_buffer py_buf
+ * if not self._buf: # <<<<<<<<<<<<<<
+ * return
+ * self._path = self._buf.decode('utf-8', 'surrogateescape')
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":578
+ * if not self._buf:
+ * return
+ * self._path = self._buf.decode('utf-8', 'surrogateescape') # <<<<<<<<<<<<<<
+ * if self._cparser.method == 5: # CONNECT
+ * self._url = URL(self._path)
+ */
+ if (unlikely(__pyx_v_self->__pyx_base._buf == Py_None)) {
+ PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "decode");
+ __PYX_ERR(0, 578, __pyx_L1_error)
+ }
+ __pyx_t_3 = __Pyx_decode_bytearray(__pyx_v_self->__pyx_base._buf, 0, PY_SSIZE_T_MAX, NULL, ((char const *)"surrogateescape"), PyUnicode_DecodeUTF8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 578, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __Pyx_GOTREF(__pyx_v_self->__pyx_base._path);
+ __Pyx_DECREF(__pyx_v_self->__pyx_base._path);
+ __pyx_v_self->__pyx_base._path = ((PyObject*)__pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":579
+ * return
+ * self._path = self._buf.decode('utf-8', 'surrogateescape')
+ * if self._cparser.method == 5: # CONNECT # <<<<<<<<<<<<<<
+ * self._url = URL(self._path)
+ * else:
+ */
+ __pyx_t_2 = ((__pyx_v_self->__pyx_base._cparser->method == 5) != 0);
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_http_parser.pyx":580
+ * self._path = self._buf.decode('utf-8', 'surrogateescape')
+ * if self._cparser.method == 5: # CONNECT
+ * self._url = URL(self._path) # <<<<<<<<<<<<<<
+ * else:
+ * PyObject_GetBuffer(self._buf, &py_buf, PyBUF_SIMPLE)
+ */
+ __Pyx_INCREF(__pyx_v_7aiohttp_12_http_parser_URL);
+ __pyx_t_4 = __pyx_v_7aiohttp_12_http_parser_URL; __pyx_t_5 = NULL;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
+ __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
+ if (likely(__pyx_t_5)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_4, function);
+ }
+ }
+ __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_v_self->__pyx_base._path) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_v_self->__pyx_base._path);
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 580, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_GIVEREF(__pyx_t_3);
+ __Pyx_GOTREF(__pyx_v_self->__pyx_base._url);
+ __Pyx_DECREF(__pyx_v_self->__pyx_base._url);
+ __pyx_v_self->__pyx_base._url = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":579
+ * return
+ * self._path = self._buf.decode('utf-8', 'surrogateescape')
+ * if self._cparser.method == 5: # CONNECT # <<<<<<<<<<<<<<
+ * self._url = URL(self._path)
+ * else:
+ */
+ goto __pyx_L4;
+ }
+
+ /* "aiohttp/_http_parser.pyx":582
+ * self._url = URL(self._path)
+ * else:
+ * PyObject_GetBuffer(self._buf, &py_buf, PyBUF_SIMPLE) # <<<<<<<<<<<<<<
+ * try:
+ * self._url = _parse_url(<char*>py_buf.buf,
+ */
+ /*else*/ {
+ __pyx_t_3 = __pyx_v_self->__pyx_base._buf;
+ __Pyx_INCREF(__pyx_t_3);
+ __pyx_t_6 = PyObject_GetBuffer(__pyx_t_3, (&__pyx_v_py_buf), PyBUF_SIMPLE); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(0, 582, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":583
+ * else:
+ * PyObject_GetBuffer(self._buf, &py_buf, PyBUF_SIMPLE)
+ * try: # <<<<<<<<<<<<<<
+ * self._url = _parse_url(<char*>py_buf.buf,
+ * py_buf.len)
+ */
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":584
+ * PyObject_GetBuffer(self._buf, &py_buf, PyBUF_SIMPLE)
+ * try:
+ * self._url = _parse_url(<char*>py_buf.buf, # <<<<<<<<<<<<<<
+ * py_buf.len)
+ * finally:
+ */
+ __pyx_t_3 = __pyx_f_7aiohttp_12_http_parser__parse_url(((char *)__pyx_v_py_buf.buf), __pyx_v_py_buf.len); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 584, __pyx_L6_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_3);
+ __Pyx_GOTREF(__pyx_v_self->__pyx_base._url);
+ __Pyx_DECREF(__pyx_v_self->__pyx_base._url);
+ __pyx_v_self->__pyx_base._url = __pyx_t_3;
+ __pyx_t_3 = 0;
+ }
+
+ /* "aiohttp/_http_parser.pyx":587
+ * py_buf.len)
+ * finally:
+ * PyBuffer_Release(&py_buf) # <<<<<<<<<<<<<<
+ * PyByteArray_Resize(self._buf, 0)
+ *
+ */
+ /*finally:*/ {
+ /*normal exit:*/{
+ PyBuffer_Release((&__pyx_v_py_buf));
+ goto __pyx_L7;
+ }
+ __pyx_L6_error:;
+ /*exception exit:*/{
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_t_13 = 0; __pyx_t_14 = 0;
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_12, &__pyx_t_13, &__pyx_t_14);
+ if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11) < 0)) __Pyx_ErrFetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11);
+ __Pyx_XGOTREF(__pyx_t_9);
+ __Pyx_XGOTREF(__pyx_t_10);
+ __Pyx_XGOTREF(__pyx_t_11);
+ __Pyx_XGOTREF(__pyx_t_12);
+ __Pyx_XGOTREF(__pyx_t_13);
+ __Pyx_XGOTREF(__pyx_t_14);
+ __pyx_t_6 = __pyx_lineno; __pyx_t_7 = __pyx_clineno; __pyx_t_8 = __pyx_filename;
+ {
+ PyBuffer_Release((&__pyx_v_py_buf));
+ }
+ if (PY_MAJOR_VERSION >= 3) {
+ __Pyx_XGIVEREF(__pyx_t_12);
+ __Pyx_XGIVEREF(__pyx_t_13);
+ __Pyx_XGIVEREF(__pyx_t_14);
+ __Pyx_ExceptionReset(__pyx_t_12, __pyx_t_13, __pyx_t_14);
+ }
+ __Pyx_XGIVEREF(__pyx_t_9);
+ __Pyx_XGIVEREF(__pyx_t_10);
+ __Pyx_XGIVEREF(__pyx_t_11);
+ __Pyx_ErrRestore(__pyx_t_9, __pyx_t_10, __pyx_t_11);
+ __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_t_13 = 0; __pyx_t_14 = 0;
+ __pyx_lineno = __pyx_t_6; __pyx_clineno = __pyx_t_7; __pyx_filename = __pyx_t_8;
+ goto __pyx_L1_error;
+ }
+ __pyx_L7:;
+ }
+ }
+ __pyx_L4:;
+
+ /* "aiohttp/_http_parser.pyx":588
+ * finally:
+ * PyBuffer_Release(&py_buf)
+ * PyByteArray_Resize(self._buf, 0) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_t_3 = __pyx_v_self->__pyx_base._buf;
+ __Pyx_INCREF(__pyx_t_3);
+ __pyx_t_7 = PyByteArray_Resize(__pyx_t_3, 0); if (unlikely(__pyx_t_7 == ((int)-1))) __PYX_ERR(0, 588, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":574
+ * payload_exception, response_with_body, read_until_eof)
+ *
+ * cdef object _on_status_complete(self): # <<<<<<<<<<<<<<
+ * cdef Py_buffer py_buf
+ * if not self._buf:
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpRequestParser._on_status_complete", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":1
+ * def __reduce_cython__(self): # <<<<<<<<<<<<<<
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ * def __setstate_cython__(self, __pyx_state):
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17HttpRequestParser_3__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17HttpRequestParser_3__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_17HttpRequestParser_2__reduce_cython__(((struct __pyx_obj_7aiohttp_12_http_parser_HttpRequestParser *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17HttpRequestParser_2__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_7aiohttp_12_http_parser_HttpRequestParser *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__reduce_cython__", 0);
+
+ /* "(tree fragment)":2
+ * def __reduce_cython__(self):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
+ * def __setstate_cython__(self, __pyx_state):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ */
+ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_Raise(__pyx_t_1, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __PYX_ERR(1, 2, __pyx_L1_error)
+
+ /* "(tree fragment)":1
+ * def __reduce_cython__(self): # <<<<<<<<<<<<<<
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ * def __setstate_cython__(self, __pyx_state):
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpRequestParser.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":3
+ * def __reduce_cython__(self):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17HttpRequestParser_5__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_17HttpRequestParser_5__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_17HttpRequestParser_4__setstate_cython__(((struct __pyx_obj_7aiohttp_12_http_parser_HttpRequestParser *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_17HttpRequestParser_4__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_7aiohttp_12_http_parser_HttpRequestParser *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__setstate_cython__", 0);
+
+ /* "(tree fragment)":4
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ * def __setstate_cython__(self, __pyx_state):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
+ */
+ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_Raise(__pyx_t_1, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __PYX_ERR(1, 4, __pyx_L1_error)
+
+ /* "(tree fragment)":3
+ * def __reduce_cython__(self):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpRequestParser.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":593
+ * cdef class HttpResponseParser(HttpParser):
+ *
+ * def __init__(self, protocol, loop, int limit, timer=None, # <<<<<<<<<<<<<<
+ * size_t max_line_size=8190, size_t max_headers=32768,
+ * size_t max_field_size=8190, payload_exception=None,
+ */
+
+/* Python wrapper */
+static int __pyx_pw_7aiohttp_12_http_parser_18HttpResponseParser_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_pw_7aiohttp_12_http_parser_18HttpResponseParser_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_protocol = 0;
+ PyObject *__pyx_v_loop = 0;
+ int __pyx_v_limit;
+ PyObject *__pyx_v_timer = 0;
+ size_t __pyx_v_max_line_size;
+ size_t __pyx_v_max_headers;
+ size_t __pyx_v_max_field_size;
+ PyObject *__pyx_v_payload_exception = 0;
+ int __pyx_v_response_with_body;
+ int __pyx_v_read_until_eof;
+ int __pyx_v_auto_decompress;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_protocol,&__pyx_n_s_loop,&__pyx_n_s_limit,&__pyx_n_s_timer,&__pyx_n_s_max_line_size,&__pyx_n_s_max_headers,&__pyx_n_s_max_field_size,&__pyx_n_s_payload_exception,&__pyx_n_s_response_with_body,&__pyx_n_s_read_until_eof,&__pyx_n_s_auto_decompress,0};
+ PyObject* values[11] = {0,0,0,0,0,0,0,0,0,0,0};
+ values[3] = ((PyObject *)Py_None);
+
+ /* "aiohttp/_http_parser.pyx":595
+ * def __init__(self, protocol, loop, int limit, timer=None,
+ * size_t max_line_size=8190, size_t max_headers=32768,
+ * size_t max_field_size=8190, payload_exception=None, # <<<<<<<<<<<<<<
+ * bint response_with_body=True, bint read_until_eof=False,
+ * bint auto_decompress=True
+ */
+ values[7] = ((PyObject *)Py_None);
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
+ CYTHON_FALLTHROUGH;
+ case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
+ CYTHON_FALLTHROUGH;
+ case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
+ CYTHON_FALLTHROUGH;
+ case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
+ CYTHON_FALLTHROUGH;
+ case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
+ CYTHON_FALLTHROUGH;
+ case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
+ CYTHON_FALLTHROUGH;
+ case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ CYTHON_FALLTHROUGH;
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ CYTHON_FALLTHROUGH;
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ CYTHON_FALLTHROUGH;
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ CYTHON_FALLTHROUGH;
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_protocol)) != 0)) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ CYTHON_FALLTHROUGH;
+ case 1:
+ if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_loop)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 11, 1); __PYX_ERR(0, 593, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 2:
+ if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_limit)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 11, 2); __PYX_ERR(0, 593, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 3:
+ if (kw_args > 0) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_timer);
+ if (value) { values[3] = value; kw_args--; }
+ }
+ CYTHON_FALLTHROUGH;
+ case 4:
+ if (kw_args > 0) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_line_size);
+ if (value) { values[4] = value; kw_args--; }
+ }
+ CYTHON_FALLTHROUGH;
+ case 5:
+ if (kw_args > 0) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_headers);
+ if (value) { values[5] = value; kw_args--; }
+ }
+ CYTHON_FALLTHROUGH;
+ case 6:
+ if (kw_args > 0) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_field_size);
+ if (value) { values[6] = value; kw_args--; }
+ }
+ CYTHON_FALLTHROUGH;
+ case 7:
+ if (kw_args > 0) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_payload_exception);
+ if (value) { values[7] = value; kw_args--; }
+ }
+ CYTHON_FALLTHROUGH;
+ case 8:
+ if (kw_args > 0) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_response_with_body);
+ if (value) { values[8] = value; kw_args--; }
+ }
+ CYTHON_FALLTHROUGH;
+ case 9:
+ if (kw_args > 0) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_read_until_eof);
+ if (value) { values[9] = value; kw_args--; }
+ }
+ CYTHON_FALLTHROUGH;
+ case 10:
+ if (kw_args > 0) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_auto_decompress);
+ if (value) { values[10] = value; kw_args--; }
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 593, __pyx_L3_error)
+ }
+ } else {
+ switch (PyTuple_GET_SIZE(__pyx_args)) {
+ case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10);
+ CYTHON_FALLTHROUGH;
+ case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9);
+ CYTHON_FALLTHROUGH;
+ case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8);
+ CYTHON_FALLTHROUGH;
+ case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7);
+ CYTHON_FALLTHROUGH;
+ case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6);
+ CYTHON_FALLTHROUGH;
+ case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5);
+ CYTHON_FALLTHROUGH;
+ case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
+ CYTHON_FALLTHROUGH;
+ case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
+ CYTHON_FALLTHROUGH;
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ }
+ __pyx_v_protocol = values[0];
+ __pyx_v_loop = values[1];
+ __pyx_v_limit = __Pyx_PyInt_As_int(values[2]); if (unlikely((__pyx_v_limit == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 593, __pyx_L3_error)
+ __pyx_v_timer = values[3];
+ if (values[4]) {
+ __pyx_v_max_line_size = __Pyx_PyInt_As_size_t(values[4]); if (unlikely((__pyx_v_max_line_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 594, __pyx_L3_error)
+ } else {
+ __pyx_v_max_line_size = ((size_t)0x1FFE);
+ }
+ if (values[5]) {
+ __pyx_v_max_headers = __Pyx_PyInt_As_size_t(values[5]); if (unlikely((__pyx_v_max_headers == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 594, __pyx_L3_error)
+ } else {
+ __pyx_v_max_headers = ((size_t)0x8000);
+ }
+ if (values[6]) {
+ __pyx_v_max_field_size = __Pyx_PyInt_As_size_t(values[6]); if (unlikely((__pyx_v_max_field_size == (size_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 595, __pyx_L3_error)
+ } else {
+ __pyx_v_max_field_size = ((size_t)0x1FFE);
+ }
+ __pyx_v_payload_exception = values[7];
+ if (values[8]) {
+ __pyx_v_response_with_body = __Pyx_PyObject_IsTrue(values[8]); if (unlikely((__pyx_v_response_with_body == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 596, __pyx_L3_error)
+ } else {
+
+ /* "aiohttp/_http_parser.pyx":596
+ * size_t max_line_size=8190, size_t max_headers=32768,
+ * size_t max_field_size=8190, payload_exception=None,
+ * bint response_with_body=True, bint read_until_eof=False, # <<<<<<<<<<<<<<
+ * bint auto_decompress=True
+ * ):
+ */
+ __pyx_v_response_with_body = ((int)1);
+ }
+ if (values[9]) {
+ __pyx_v_read_until_eof = __Pyx_PyObject_IsTrue(values[9]); if (unlikely((__pyx_v_read_until_eof == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 596, __pyx_L3_error)
+ } else {
+ __pyx_v_read_until_eof = ((int)0);
+ }
+ if (values[10]) {
+ __pyx_v_auto_decompress = __Pyx_PyObject_IsTrue(values[10]); if (unlikely((__pyx_v_auto_decompress == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 597, __pyx_L3_error)
+ } else {
+
+ /* "aiohttp/_http_parser.pyx":597
+ * size_t max_field_size=8190, payload_exception=None,
+ * bint response_with_body=True, bint read_until_eof=False,
+ * bint auto_decompress=True # <<<<<<<<<<<<<<
+ * ):
+ * self._init(cparser.HTTP_RESPONSE, protocol, loop, limit, timer,
+ */
+ __pyx_v_auto_decompress = ((int)1);
+ }
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__init__", 0, 3, 11, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 593, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpResponseParser.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return -1;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_18HttpResponseParser___init__(((struct __pyx_obj_7aiohttp_12_http_parser_HttpResponseParser *)__pyx_v_self), __pyx_v_protocol, __pyx_v_loop, __pyx_v_limit, __pyx_v_timer, __pyx_v_max_line_size, __pyx_v_max_headers, __pyx_v_max_field_size, __pyx_v_payload_exception, __pyx_v_response_with_body, __pyx_v_read_until_eof, __pyx_v_auto_decompress);
+
+ /* "aiohttp/_http_parser.pyx":593
+ * cdef class HttpResponseParser(HttpParser):
+ *
+ * def __init__(self, protocol, loop, int limit, timer=None, # <<<<<<<<<<<<<<
+ * size_t max_line_size=8190, size_t max_headers=32768,
+ * size_t max_field_size=8190, payload_exception=None,
+ */
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_7aiohttp_12_http_parser_18HttpResponseParser___init__(struct __pyx_obj_7aiohttp_12_http_parser_HttpResponseParser *__pyx_v_self, PyObject *__pyx_v_protocol, PyObject *__pyx_v_loop, int __pyx_v_limit, PyObject *__pyx_v_timer, size_t __pyx_v_max_line_size, size_t __pyx_v_max_headers, size_t __pyx_v_max_field_size, PyObject *__pyx_v_payload_exception, int __pyx_v_response_with_body, int __pyx_v_read_until_eof, int __pyx_v_auto_decompress) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ struct __pyx_opt_args_7aiohttp_12_http_parser_10HttpParser__init __pyx_t_2;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__init__", 0);
+
+ /* "aiohttp/_http_parser.pyx":599
+ * bint auto_decompress=True
+ * ):
+ * self._init(cparser.HTTP_RESPONSE, protocol, loop, limit, timer, # <<<<<<<<<<<<<<
+ * max_line_size, max_headers, max_field_size,
+ * payload_exception, response_with_body, read_until_eof,
+ */
+ __pyx_t_2.__pyx_n = 8;
+ __pyx_t_2.timer = __pyx_v_timer;
+ __pyx_t_2.max_line_size = __pyx_v_max_line_size;
+ __pyx_t_2.max_headers = __pyx_v_max_headers;
+ __pyx_t_2.max_field_size = __pyx_v_max_field_size;
+ __pyx_t_2.payload_exception = __pyx_v_payload_exception;
+ __pyx_t_2.response_with_body = __pyx_v_response_with_body;
+ __pyx_t_2.read_until_eof = __pyx_v_read_until_eof;
+ __pyx_t_2.auto_decompress = __pyx_v_auto_decompress;
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpResponseParser *)__pyx_v_self->__pyx_base.__pyx_vtab)->__pyx_base._init(((struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)__pyx_v_self), HTTP_RESPONSE, __pyx_v_protocol, __pyx_v_loop, __pyx_v_limit, &__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 599, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":593
+ * cdef class HttpResponseParser(HttpParser):
+ *
+ * def __init__(self, protocol, loop, int limit, timer=None, # <<<<<<<<<<<<<<
+ * size_t max_line_size=8190, size_t max_headers=32768,
+ * size_t max_field_size=8190, payload_exception=None,
+ */
+
+ /* function exit code */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpResponseParser.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":604
+ * auto_decompress)
+ *
+ * cdef object _on_status_complete(self): # <<<<<<<<<<<<<<
+ * if self._buf:
+ * self._reason = self._buf.decode('utf-8', 'surrogateescape')
+ */
+
+static PyObject *__pyx_f_7aiohttp_12_http_parser_18HttpResponseParser__on_status_complete(struct __pyx_obj_7aiohttp_12_http_parser_HttpResponseParser *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_t_3;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_on_status_complete", 0);
+
+ /* "aiohttp/_http_parser.pyx":605
+ *
+ * cdef object _on_status_complete(self):
+ * if self._buf: # <<<<<<<<<<<<<<
+ * self._reason = self._buf.decode('utf-8', 'surrogateescape')
+ * PyByteArray_Resize(self._buf, 0)
+ */
+ __pyx_t_1 = (__pyx_v_self->__pyx_base._buf != Py_None)&&(PyByteArray_GET_SIZE(__pyx_v_self->__pyx_base._buf) != 0);
+ if (__pyx_t_1) {
+
+ /* "aiohttp/_http_parser.pyx":606
+ * cdef object _on_status_complete(self):
+ * if self._buf:
+ * self._reason = self._buf.decode('utf-8', 'surrogateescape') # <<<<<<<<<<<<<<
+ * PyByteArray_Resize(self._buf, 0)
+ * else:
+ */
+ if (unlikely(__pyx_v_self->__pyx_base._buf == Py_None)) {
+ PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "decode");
+ __PYX_ERR(0, 606, __pyx_L1_error)
+ }
+ __pyx_t_2 = __Pyx_decode_bytearray(__pyx_v_self->__pyx_base._buf, 0, PY_SSIZE_T_MAX, NULL, ((char const *)"surrogateescape"), PyUnicode_DecodeUTF8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 606, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __Pyx_GOTREF(__pyx_v_self->__pyx_base._reason);
+ __Pyx_DECREF(__pyx_v_self->__pyx_base._reason);
+ __pyx_v_self->__pyx_base._reason = ((PyObject*)__pyx_t_2);
+ __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_parser.pyx":607
+ * if self._buf:
+ * self._reason = self._buf.decode('utf-8', 'surrogateescape')
+ * PyByteArray_Resize(self._buf, 0) # <<<<<<<<<<<<<<
+ * else:
+ * self._reason = self._reason or ''
+ */
+ __pyx_t_2 = __pyx_v_self->__pyx_base._buf;
+ __Pyx_INCREF(__pyx_t_2);
+ __pyx_t_3 = PyByteArray_Resize(__pyx_t_2, 0); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 607, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_parser.pyx":605
+ *
+ * cdef object _on_status_complete(self):
+ * if self._buf: # <<<<<<<<<<<<<<
+ * self._reason = self._buf.decode('utf-8', 'surrogateescape')
+ * PyByteArray_Resize(self._buf, 0)
+ */
+ goto __pyx_L3;
+ }
+
+ /* "aiohttp/_http_parser.pyx":609
+ * PyByteArray_Resize(self._buf, 0)
+ * else:
+ * self._reason = self._reason or '' # <<<<<<<<<<<<<<
+ *
+ * cdef int cb_on_message_begin(cparser.http_parser* parser) except -1:
+ */
+ /*else*/ {
+ __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_self->__pyx_base._reason); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 609, __pyx_L1_error)
+ if (!__pyx_t_1) {
+ } else {
+ __Pyx_INCREF(__pyx_v_self->__pyx_base._reason);
+ __pyx_t_2 = __pyx_v_self->__pyx_base._reason;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __Pyx_INCREF(__pyx_kp_u__4);
+ __pyx_t_2 = __pyx_kp_u__4;
+ __pyx_L4_bool_binop_done:;
+ __Pyx_GIVEREF(__pyx_t_2);
+ __Pyx_GOTREF(__pyx_v_self->__pyx_base._reason);
+ __Pyx_DECREF(__pyx_v_self->__pyx_base._reason);
+ __pyx_v_self->__pyx_base._reason = ((PyObject*)__pyx_t_2);
+ __pyx_t_2 = 0;
+ }
+ __pyx_L3:;
+
+ /* "aiohttp/_http_parser.pyx":604
+ * auto_decompress)
+ *
+ * cdef object _on_status_complete(self): # <<<<<<<<<<<<<<
+ * if self._buf:
+ * self._reason = self._buf.decode('utf-8', 'surrogateescape')
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpResponseParser._on_status_complete", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":1
+ * def __reduce_cython__(self): # <<<<<<<<<<<<<<
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ * def __setstate_cython__(self, __pyx_state):
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18HttpResponseParser_3__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18HttpResponseParser_3__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_18HttpResponseParser_2__reduce_cython__(((struct __pyx_obj_7aiohttp_12_http_parser_HttpResponseParser *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18HttpResponseParser_2__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_7aiohttp_12_http_parser_HttpResponseParser *__pyx_v_self) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__reduce_cython__", 0);
+
+ /* "(tree fragment)":2
+ * def __reduce_cython__(self):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
+ * def __setstate_cython__(self, __pyx_state):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ */
+ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_Raise(__pyx_t_1, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __PYX_ERR(1, 2, __pyx_L1_error)
+
+ /* "(tree fragment)":1
+ * def __reduce_cython__(self): # <<<<<<<<<<<<<<
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ * def __setstate_cython__(self, __pyx_state):
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpResponseParser.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":3
+ * def __reduce_cython__(self):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18HttpResponseParser_5__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_18HttpResponseParser_5__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_18HttpResponseParser_4__setstate_cython__(((struct __pyx_obj_7aiohttp_12_http_parser_HttpResponseParser *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_18HttpResponseParser_4__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_7aiohttp_12_http_parser_HttpResponseParser *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__setstate_cython__", 0);
+
+ /* "(tree fragment)":4
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ * def __setstate_cython__(self, __pyx_state):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
+ */
+ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_Raise(__pyx_t_1, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __PYX_ERR(1, 4, __pyx_L1_error)
+
+ /* "(tree fragment)":3
+ * def __reduce_cython__(self):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("aiohttp._http_parser.HttpResponseParser.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":611
+ * self._reason = self._reason or ''
+ *
+ * cdef int cb_on_message_begin(cparser.http_parser* parser) except -1: # <<<<<<<<<<<<<<
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ *
+ */
+
+static int __pyx_f_7aiohttp_12_http_parser_cb_on_message_begin(struct http_parser *__pyx_v_parser) {
+ struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_pyparser = 0;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_t_4;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("cb_on_message_begin", 0);
+
+ /* "aiohttp/_http_parser.pyx":612
+ *
+ * cdef int cb_on_message_begin(cparser.http_parser* parser) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data # <<<<<<<<<<<<<<
+ *
+ * pyparser._started = True
+ */
+ __pyx_t_1 = ((PyObject *)__pyx_v_parser->data);
+ __Pyx_INCREF(__pyx_t_1);
+ __pyx_v_pyparser = ((struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":614
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ *
+ * pyparser._started = True # <<<<<<<<<<<<<<
+ * pyparser._headers = CIMultiDict()
+ * pyparser._raw_headers = []
+ */
+ __pyx_v_pyparser->_started = 1;
+
+ /* "aiohttp/_http_parser.pyx":615
+ *
+ * pyparser._started = True
+ * pyparser._headers = CIMultiDict() # <<<<<<<<<<<<<<
+ * pyparser._raw_headers = []
+ * PyByteArray_Resize(pyparser._buf, 0)
+ */
+ __Pyx_INCREF(__pyx_v_7aiohttp_12_http_parser_CIMultiDict);
+ __pyx_t_2 = __pyx_v_7aiohttp_12_http_parser_CIMultiDict; __pyx_t_3 = NULL;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_3)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3) : __Pyx_PyObject_CallNoArg(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 615, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v_pyparser->_headers);
+ __Pyx_DECREF(__pyx_v_pyparser->_headers);
+ __pyx_v_pyparser->_headers = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":616
+ * pyparser._started = True
+ * pyparser._headers = CIMultiDict()
+ * pyparser._raw_headers = [] # <<<<<<<<<<<<<<
+ * PyByteArray_Resize(pyparser._buf, 0)
+ * pyparser._path = None
+ */
+ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 616, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v_pyparser->_raw_headers);
+ __Pyx_DECREF(__pyx_v_pyparser->_raw_headers);
+ __pyx_v_pyparser->_raw_headers = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":617
+ * pyparser._headers = CIMultiDict()
+ * pyparser._raw_headers = []
+ * PyByteArray_Resize(pyparser._buf, 0) # <<<<<<<<<<<<<<
+ * pyparser._path = None
+ * pyparser._reason = None
+ */
+ __pyx_t_1 = __pyx_v_pyparser->_buf;
+ __Pyx_INCREF(__pyx_t_1);
+ __pyx_t_4 = PyByteArray_Resize(__pyx_t_1, 0); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(0, 617, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":618
+ * pyparser._raw_headers = []
+ * PyByteArray_Resize(pyparser._buf, 0)
+ * pyparser._path = None # <<<<<<<<<<<<<<
+ * pyparser._reason = None
+ * return 0
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_pyparser->_path);
+ __Pyx_DECREF(__pyx_v_pyparser->_path);
+ __pyx_v_pyparser->_path = ((PyObject*)Py_None);
+
+ /* "aiohttp/_http_parser.pyx":619
+ * PyByteArray_Resize(pyparser._buf, 0)
+ * pyparser._path = None
+ * pyparser._reason = None # <<<<<<<<<<<<<<
+ * return 0
+ *
+ */
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ __Pyx_GOTREF(__pyx_v_pyparser->_reason);
+ __Pyx_DECREF(__pyx_v_pyparser->_reason);
+ __pyx_v_pyparser->_reason = ((PyObject*)Py_None);
+
+ /* "aiohttp/_http_parser.pyx":620
+ * pyparser._path = None
+ * pyparser._reason = None
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_parser.pyx":611
+ * self._reason = self._reason or ''
+ *
+ * cdef int cb_on_message_begin(cparser.http_parser* parser) except -1: # <<<<<<<<<<<<<<
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("aiohttp._http_parser.cb_on_message_begin", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_XDECREF((PyObject *)__pyx_v_pyparser);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":623
+ *
+ *
+ * cdef int cb_on_url(cparser.http_parser* parser, # <<<<<<<<<<<<<<
+ * const char *at, size_t length) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ */
+
+static int __pyx_f_7aiohttp_12_http_parser_cb_on_url(struct http_parser *__pyx_v_parser, char const *__pyx_v_at, size_t __pyx_v_length) {
+ struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_pyparser = 0;
+ PyObject *__pyx_v_ex = NULL;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *__pyx_t_8 = NULL;
+ PyObject *__pyx_t_9 = NULL;
+ int __pyx_t_10;
+ PyObject *__pyx_t_11 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("cb_on_url", 0);
+
+ /* "aiohttp/_http_parser.pyx":625
+ * cdef int cb_on_url(cparser.http_parser* parser,
+ * const char *at, size_t length) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data # <<<<<<<<<<<<<<
+ * try:
+ * if length > pyparser._max_line_size:
+ */
+ __pyx_t_1 = ((PyObject *)__pyx_v_parser->data);
+ __Pyx_INCREF(__pyx_t_1);
+ __pyx_v_pyparser = ((struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":626
+ * const char *at, size_t length) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try: # <<<<<<<<<<<<<<
+ * if length > pyparser._max_line_size:
+ * raise LineTooLong(
+ */
+ {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
+ __Pyx_XGOTREF(__pyx_t_2);
+ __Pyx_XGOTREF(__pyx_t_3);
+ __Pyx_XGOTREF(__pyx_t_4);
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":627
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try:
+ * if length > pyparser._max_line_size: # <<<<<<<<<<<<<<
+ * raise LineTooLong(
+ * 'Status line is too long', pyparser._max_line_size, length)
+ */
+ __pyx_t_5 = ((__pyx_v_length > __pyx_v_pyparser->_max_line_size) != 0);
+ if (unlikely(__pyx_t_5)) {
+
+ /* "aiohttp/_http_parser.pyx":628
+ * try:
+ * if length > pyparser._max_line_size:
+ * raise LineTooLong( # <<<<<<<<<<<<<<
+ * 'Status line is too long', pyparser._max_line_size, length)
+ * extend(pyparser._buf, at, length)
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_LineTooLong); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 628, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_6);
+
+ /* "aiohttp/_http_parser.pyx":629
+ * if length > pyparser._max_line_size:
+ * raise LineTooLong(
+ * 'Status line is too long', pyparser._max_line_size, length) # <<<<<<<<<<<<<<
+ * extend(pyparser._buf, at, length)
+ * except BaseException as ex:
+ */
+ __pyx_t_7 = __Pyx_PyInt_FromSize_t(__pyx_v_pyparser->_max_line_size); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 629, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_8 = __Pyx_PyInt_FromSize_t(__pyx_v_length); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 629, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_9 = NULL;
+ __pyx_t_10 = 0;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) {
+ __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_6);
+ if (likely(__pyx_t_9)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
+ __Pyx_INCREF(__pyx_t_9);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_6, function);
+ __pyx_t_10 = 1;
+ }
+ }
+ #if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(__pyx_t_6)) {
+ PyObject *__pyx_temp[4] = {__pyx_t_9, __pyx_kp_u_Status_line_is_too_long, __pyx_t_7, __pyx_t_8};
+ __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_10, 3+__pyx_t_10); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 628, __pyx_L3_error)
+ __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ } else
+ #endif
+ #if CYTHON_FAST_PYCCALL
+ if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
+ PyObject *__pyx_temp[4] = {__pyx_t_9, __pyx_kp_u_Status_line_is_too_long, __pyx_t_7, __pyx_t_8};
+ __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_10, 3+__pyx_t_10); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 628, __pyx_L3_error)
+ __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ } else
+ #endif
+ {
+ __pyx_t_11 = PyTuple_New(3+__pyx_t_10); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 628, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ if (__pyx_t_9) {
+ __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_9); __pyx_t_9 = NULL;
+ }
+ __Pyx_INCREF(__pyx_kp_u_Status_line_is_too_long);
+ __Pyx_GIVEREF(__pyx_kp_u_Status_line_is_too_long);
+ PyTuple_SET_ITEM(__pyx_t_11, 0+__pyx_t_10, __pyx_kp_u_Status_line_is_too_long);
+ __Pyx_GIVEREF(__pyx_t_7);
+ PyTuple_SET_ITEM(__pyx_t_11, 1+__pyx_t_10, __pyx_t_7);
+ __Pyx_GIVEREF(__pyx_t_8);
+ PyTuple_SET_ITEM(__pyx_t_11, 2+__pyx_t_10, __pyx_t_8);
+ __pyx_t_7 = 0;
+ __pyx_t_8 = 0;
+ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_11, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 628, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ }
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_Raise(__pyx_t_1, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __PYX_ERR(0, 628, __pyx_L3_error)
+
+ /* "aiohttp/_http_parser.pyx":627
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try:
+ * if length > pyparser._max_line_size: # <<<<<<<<<<<<<<
+ * raise LineTooLong(
+ * 'Status line is too long', pyparser._max_line_size, length)
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":630
+ * raise LineTooLong(
+ * 'Status line is too long', pyparser._max_line_size, length)
+ * extend(pyparser._buf, at, length) # <<<<<<<<<<<<<<
+ * except BaseException as ex:
+ * pyparser._last_error = ex
+ */
+ __pyx_t_1 = __pyx_v_pyparser->_buf;
+ __Pyx_INCREF(__pyx_t_1);
+ __pyx_t_6 = __pyx_f_7aiohttp_12_http_parser_extend(__pyx_t_1, __pyx_v_at, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 630, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+
+ /* "aiohttp/_http_parser.pyx":626
+ * const char *at, size_t length) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try: # <<<<<<<<<<<<<<
+ * if length > pyparser._max_line_size:
+ * raise LineTooLong(
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":635
+ * return -1
+ * else:
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ /*else:*/ {
+ __pyx_r = 0;
+ goto __pyx_L6_except_return;
+ }
+ __pyx_L3_error:;
+ __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
+
+ /* "aiohttp/_http_parser.pyx":631
+ * 'Status line is too long', pyparser._max_line_size, length)
+ * extend(pyparser._buf, at, length)
+ * except BaseException as ex: # <<<<<<<<<<<<<<
+ * pyparser._last_error = ex
+ * return -1
+ */
+ __pyx_t_10 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException);
+ if (__pyx_t_10) {
+ __Pyx_AddTraceback("aiohttp._http_parser.cb_on_url", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ if (__Pyx_GetException(&__pyx_t_6, &__pyx_t_1, &__pyx_t_11) < 0) __PYX_ERR(0, 631, __pyx_L5_except_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_t_11);
+ __Pyx_INCREF(__pyx_t_1);
+ __pyx_v_ex = __pyx_t_1;
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":632
+ * extend(pyparser._buf, at, length)
+ * except BaseException as ex:
+ * pyparser._last_error = ex # <<<<<<<<<<<<<<
+ * return -1
+ * else:
+ */
+ __Pyx_INCREF(__pyx_v_ex);
+ __Pyx_GIVEREF(__pyx_v_ex);
+ __Pyx_GOTREF(__pyx_v_pyparser->_last_error);
+ __Pyx_DECREF(__pyx_v_pyparser->_last_error);
+ __pyx_v_pyparser->_last_error = __pyx_v_ex;
+
+ /* "aiohttp/_http_parser.pyx":633
+ * except BaseException as ex:
+ * pyparser._last_error = ex
+ * return -1 # <<<<<<<<<<<<<<
+ * else:
+ * return 0
+ */
+ __pyx_r = -1;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ goto __pyx_L14_return;
+ }
+
+ /* "aiohttp/_http_parser.pyx":631
+ * 'Status line is too long', pyparser._max_line_size, length)
+ * extend(pyparser._buf, at, length)
+ * except BaseException as ex: # <<<<<<<<<<<<<<
+ * pyparser._last_error = ex
+ * return -1
+ */
+ /*finally:*/ {
+ __pyx_L14_return: {
+ __pyx_t_10 = __pyx_r;
+ __Pyx_DECREF(__pyx_v_ex);
+ __pyx_v_ex = NULL;
+ __pyx_r = __pyx_t_10;
+ goto __pyx_L6_except_return;
+ }
+ }
+ }
+ goto __pyx_L5_except_error;
+ __pyx_L5_except_error:;
+
+ /* "aiohttp/_http_parser.pyx":626
+ * const char *at, size_t length) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try: # <<<<<<<<<<<<<<
+ * if length > pyparser._max_line_size:
+ * raise LineTooLong(
+ */
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
+ goto __pyx_L1_error;
+ __pyx_L6_except_return:;
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
+ goto __pyx_L0;
+ }
+
+ /* "aiohttp/_http_parser.pyx":623
+ *
+ *
+ * cdef int cb_on_url(cparser.http_parser* parser, # <<<<<<<<<<<<<<
+ * const char *at, size_t length) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_XDECREF(__pyx_t_9);
+ __Pyx_XDECREF(__pyx_t_11);
+ __Pyx_AddTraceback("aiohttp._http_parser.cb_on_url", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_XDECREF((PyObject *)__pyx_v_pyparser);
+ __Pyx_XDECREF(__pyx_v_ex);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":638
+ *
+ *
+ * cdef int cb_on_status(cparser.http_parser* parser, # <<<<<<<<<<<<<<
+ * const char *at, size_t length) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ */
+
+static int __pyx_f_7aiohttp_12_http_parser_cb_on_status(struct http_parser *__pyx_v_parser, char const *__pyx_v_at, size_t __pyx_v_length) {
+ struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_pyparser = 0;
+ PyObject *__pyx_v_ex = NULL;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *__pyx_t_8 = NULL;
+ PyObject *__pyx_t_9 = NULL;
+ int __pyx_t_10;
+ PyObject *__pyx_t_11 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("cb_on_status", 0);
+
+ /* "aiohttp/_http_parser.pyx":640
+ * cdef int cb_on_status(cparser.http_parser* parser,
+ * const char *at, size_t length) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data # <<<<<<<<<<<<<<
+ * cdef str reason
+ * try:
+ */
+ __pyx_t_1 = ((PyObject *)__pyx_v_parser->data);
+ __Pyx_INCREF(__pyx_t_1);
+ __pyx_v_pyparser = ((struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":642
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * cdef str reason
+ * try: # <<<<<<<<<<<<<<
+ * if length > pyparser._max_line_size:
+ * raise LineTooLong(
+ */
+ {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
+ __Pyx_XGOTREF(__pyx_t_2);
+ __Pyx_XGOTREF(__pyx_t_3);
+ __Pyx_XGOTREF(__pyx_t_4);
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":643
+ * cdef str reason
+ * try:
+ * if length > pyparser._max_line_size: # <<<<<<<<<<<<<<
+ * raise LineTooLong(
+ * 'Status line is too long', pyparser._max_line_size, length)
+ */
+ __pyx_t_5 = ((__pyx_v_length > __pyx_v_pyparser->_max_line_size) != 0);
+ if (unlikely(__pyx_t_5)) {
+
+ /* "aiohttp/_http_parser.pyx":644
+ * try:
+ * if length > pyparser._max_line_size:
+ * raise LineTooLong( # <<<<<<<<<<<<<<
+ * 'Status line is too long', pyparser._max_line_size, length)
+ * extend(pyparser._buf, at, length)
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_LineTooLong); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 644, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_6);
+
+ /* "aiohttp/_http_parser.pyx":645
+ * if length > pyparser._max_line_size:
+ * raise LineTooLong(
+ * 'Status line is too long', pyparser._max_line_size, length) # <<<<<<<<<<<<<<
+ * extend(pyparser._buf, at, length)
+ * except BaseException as ex:
+ */
+ __pyx_t_7 = __Pyx_PyInt_FromSize_t(__pyx_v_pyparser->_max_line_size); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 645, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_8 = __Pyx_PyInt_FromSize_t(__pyx_v_length); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 645, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_9 = NULL;
+ __pyx_t_10 = 0;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) {
+ __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_6);
+ if (likely(__pyx_t_9)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
+ __Pyx_INCREF(__pyx_t_9);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_6, function);
+ __pyx_t_10 = 1;
+ }
+ }
+ #if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(__pyx_t_6)) {
+ PyObject *__pyx_temp[4] = {__pyx_t_9, __pyx_kp_u_Status_line_is_too_long, __pyx_t_7, __pyx_t_8};
+ __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_10, 3+__pyx_t_10); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 644, __pyx_L3_error)
+ __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ } else
+ #endif
+ #if CYTHON_FAST_PYCCALL
+ if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
+ PyObject *__pyx_temp[4] = {__pyx_t_9, __pyx_kp_u_Status_line_is_too_long, __pyx_t_7, __pyx_t_8};
+ __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_10, 3+__pyx_t_10); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 644, __pyx_L3_error)
+ __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ } else
+ #endif
+ {
+ __pyx_t_11 = PyTuple_New(3+__pyx_t_10); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 644, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ if (__pyx_t_9) {
+ __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_9); __pyx_t_9 = NULL;
+ }
+ __Pyx_INCREF(__pyx_kp_u_Status_line_is_too_long);
+ __Pyx_GIVEREF(__pyx_kp_u_Status_line_is_too_long);
+ PyTuple_SET_ITEM(__pyx_t_11, 0+__pyx_t_10, __pyx_kp_u_Status_line_is_too_long);
+ __Pyx_GIVEREF(__pyx_t_7);
+ PyTuple_SET_ITEM(__pyx_t_11, 1+__pyx_t_10, __pyx_t_7);
+ __Pyx_GIVEREF(__pyx_t_8);
+ PyTuple_SET_ITEM(__pyx_t_11, 2+__pyx_t_10, __pyx_t_8);
+ __pyx_t_7 = 0;
+ __pyx_t_8 = 0;
+ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_11, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 644, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ }
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_Raise(__pyx_t_1, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __PYX_ERR(0, 644, __pyx_L3_error)
+
+ /* "aiohttp/_http_parser.pyx":643
+ * cdef str reason
+ * try:
+ * if length > pyparser._max_line_size: # <<<<<<<<<<<<<<
+ * raise LineTooLong(
+ * 'Status line is too long', pyparser._max_line_size, length)
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":646
+ * raise LineTooLong(
+ * 'Status line is too long', pyparser._max_line_size, length)
+ * extend(pyparser._buf, at, length) # <<<<<<<<<<<<<<
+ * except BaseException as ex:
+ * pyparser._last_error = ex
+ */
+ __pyx_t_1 = __pyx_v_pyparser->_buf;
+ __Pyx_INCREF(__pyx_t_1);
+ __pyx_t_6 = __pyx_f_7aiohttp_12_http_parser_extend(__pyx_t_1, __pyx_v_at, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 646, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+
+ /* "aiohttp/_http_parser.pyx":642
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * cdef str reason
+ * try: # <<<<<<<<<<<<<<
+ * if length > pyparser._max_line_size:
+ * raise LineTooLong(
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":651
+ * return -1
+ * else:
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ /*else:*/ {
+ __pyx_r = 0;
+ goto __pyx_L6_except_return;
+ }
+ __pyx_L3_error:;
+ __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
+
+ /* "aiohttp/_http_parser.pyx":647
+ * 'Status line is too long', pyparser._max_line_size, length)
+ * extend(pyparser._buf, at, length)
+ * except BaseException as ex: # <<<<<<<<<<<<<<
+ * pyparser._last_error = ex
+ * return -1
+ */
+ __pyx_t_10 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException);
+ if (__pyx_t_10) {
+ __Pyx_AddTraceback("aiohttp._http_parser.cb_on_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ if (__Pyx_GetException(&__pyx_t_6, &__pyx_t_1, &__pyx_t_11) < 0) __PYX_ERR(0, 647, __pyx_L5_except_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_t_11);
+ __Pyx_INCREF(__pyx_t_1);
+ __pyx_v_ex = __pyx_t_1;
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":648
+ * extend(pyparser._buf, at, length)
+ * except BaseException as ex:
+ * pyparser._last_error = ex # <<<<<<<<<<<<<<
+ * return -1
+ * else:
+ */
+ __Pyx_INCREF(__pyx_v_ex);
+ __Pyx_GIVEREF(__pyx_v_ex);
+ __Pyx_GOTREF(__pyx_v_pyparser->_last_error);
+ __Pyx_DECREF(__pyx_v_pyparser->_last_error);
+ __pyx_v_pyparser->_last_error = __pyx_v_ex;
+
+ /* "aiohttp/_http_parser.pyx":649
+ * except BaseException as ex:
+ * pyparser._last_error = ex
+ * return -1 # <<<<<<<<<<<<<<
+ * else:
+ * return 0
+ */
+ __pyx_r = -1;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ goto __pyx_L14_return;
+ }
+
+ /* "aiohttp/_http_parser.pyx":647
+ * 'Status line is too long', pyparser._max_line_size, length)
+ * extend(pyparser._buf, at, length)
+ * except BaseException as ex: # <<<<<<<<<<<<<<
+ * pyparser._last_error = ex
+ * return -1
+ */
+ /*finally:*/ {
+ __pyx_L14_return: {
+ __pyx_t_10 = __pyx_r;
+ __Pyx_DECREF(__pyx_v_ex);
+ __pyx_v_ex = NULL;
+ __pyx_r = __pyx_t_10;
+ goto __pyx_L6_except_return;
+ }
+ }
+ }
+ goto __pyx_L5_except_error;
+ __pyx_L5_except_error:;
+
+ /* "aiohttp/_http_parser.pyx":642
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * cdef str reason
+ * try: # <<<<<<<<<<<<<<
+ * if length > pyparser._max_line_size:
+ * raise LineTooLong(
+ */
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
+ goto __pyx_L1_error;
+ __pyx_L6_except_return:;
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
+ goto __pyx_L0;
+ }
+
+ /* "aiohttp/_http_parser.pyx":638
+ *
+ *
+ * cdef int cb_on_status(cparser.http_parser* parser, # <<<<<<<<<<<<<<
+ * const char *at, size_t length) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_XDECREF(__pyx_t_9);
+ __Pyx_XDECREF(__pyx_t_11);
+ __Pyx_AddTraceback("aiohttp._http_parser.cb_on_status", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_XDECREF((PyObject *)__pyx_v_pyparser);
+ __Pyx_XDECREF(__pyx_v_ex);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":654
+ *
+ *
+ * cdef int cb_on_header_field(cparser.http_parser* parser, # <<<<<<<<<<<<<<
+ * const char *at, size_t length) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ */
+
+static int __pyx_f_7aiohttp_12_http_parser_cb_on_header_field(struct http_parser *__pyx_v_parser, char const *__pyx_v_at, size_t __pyx_v_length) {
+ struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_pyparser = 0;
+ Py_ssize_t __pyx_v_size;
+ PyObject *__pyx_v_ex = NULL;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ Py_ssize_t __pyx_t_5;
+ int __pyx_t_6;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *__pyx_t_8 = NULL;
+ PyObject *__pyx_t_9 = NULL;
+ PyObject *__pyx_t_10 = NULL;
+ int __pyx_t_11;
+ PyObject *__pyx_t_12 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("cb_on_header_field", 0);
+
+ /* "aiohttp/_http_parser.pyx":656
+ * cdef int cb_on_header_field(cparser.http_parser* parser,
+ * const char *at, size_t length) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data # <<<<<<<<<<<<<<
+ * cdef Py_ssize_t size
+ * try:
+ */
+ __pyx_t_1 = ((PyObject *)__pyx_v_parser->data);
+ __Pyx_INCREF(__pyx_t_1);
+ __pyx_v_pyparser = ((struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":658
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * cdef Py_ssize_t size
+ * try: # <<<<<<<<<<<<<<
+ * pyparser._on_status_complete()
+ * size = len(pyparser._raw_name) + length
+ */
+ {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
+ __Pyx_XGOTREF(__pyx_t_2);
+ __Pyx_XGOTREF(__pyx_t_3);
+ __Pyx_XGOTREF(__pyx_t_4);
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":659
+ * cdef Py_ssize_t size
+ * try:
+ * pyparser._on_status_complete() # <<<<<<<<<<<<<<
+ * size = len(pyparser._raw_name) + length
+ * if size > pyparser._max_field_size:
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpParser *)__pyx_v_pyparser->__pyx_vtab)->_on_status_complete(__pyx_v_pyparser); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 659, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":660
+ * try:
+ * pyparser._on_status_complete()
+ * size = len(pyparser._raw_name) + length # <<<<<<<<<<<<<<
+ * if size > pyparser._max_field_size:
+ * raise LineTooLong(
+ */
+ __pyx_t_1 = __pyx_v_pyparser->_raw_name;
+ __Pyx_INCREF(__pyx_t_1);
+ if (unlikely(__pyx_t_1 == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
+ __PYX_ERR(0, 660, __pyx_L3_error)
+ }
+ __pyx_t_5 = PyByteArray_GET_SIZE(__pyx_t_1); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 660, __pyx_L3_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v_size = (__pyx_t_5 + __pyx_v_length);
+
+ /* "aiohttp/_http_parser.pyx":661
+ * pyparser._on_status_complete()
+ * size = len(pyparser._raw_name) + length
+ * if size > pyparser._max_field_size: # <<<<<<<<<<<<<<
+ * raise LineTooLong(
+ * 'Header name is too long', pyparser._max_field_size, size)
+ */
+ __pyx_t_6 = ((__pyx_v_size > __pyx_v_pyparser->_max_field_size) != 0);
+ if (unlikely(__pyx_t_6)) {
+
+ /* "aiohttp/_http_parser.pyx":662
+ * size = len(pyparser._raw_name) + length
+ * if size > pyparser._max_field_size:
+ * raise LineTooLong( # <<<<<<<<<<<<<<
+ * 'Header name is too long', pyparser._max_field_size, size)
+ * pyparser._on_header_field(at, length)
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_LineTooLong); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 662, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_7);
+
+ /* "aiohttp/_http_parser.pyx":663
+ * if size > pyparser._max_field_size:
+ * raise LineTooLong(
+ * 'Header name is too long', pyparser._max_field_size, size) # <<<<<<<<<<<<<<
+ * pyparser._on_header_field(at, length)
+ * except BaseException as ex:
+ */
+ __pyx_t_8 = __Pyx_PyInt_FromSize_t(__pyx_v_pyparser->_max_field_size); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 663, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_9 = PyInt_FromSsize_t(__pyx_v_size); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 663, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_10 = NULL;
+ __pyx_t_11 = 0;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) {
+ __pyx_t_10 = PyMethod_GET_SELF(__pyx_t_7);
+ if (likely(__pyx_t_10)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
+ __Pyx_INCREF(__pyx_t_10);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_7, function);
+ __pyx_t_11 = 1;
+ }
+ }
+ #if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(__pyx_t_7)) {
+ PyObject *__pyx_temp[4] = {__pyx_t_10, __pyx_kp_u_Header_name_is_too_long, __pyx_t_8, __pyx_t_9};
+ __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_11, 3+__pyx_t_11); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 662, __pyx_L3_error)
+ __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ } else
+ #endif
+ #if CYTHON_FAST_PYCCALL
+ if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) {
+ PyObject *__pyx_temp[4] = {__pyx_t_10, __pyx_kp_u_Header_name_is_too_long, __pyx_t_8, __pyx_t_9};
+ __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_11, 3+__pyx_t_11); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 662, __pyx_L3_error)
+ __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ } else
+ #endif
+ {
+ __pyx_t_12 = PyTuple_New(3+__pyx_t_11); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 662, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_12);
+ if (__pyx_t_10) {
+ __Pyx_GIVEREF(__pyx_t_10); PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_10); __pyx_t_10 = NULL;
+ }
+ __Pyx_INCREF(__pyx_kp_u_Header_name_is_too_long);
+ __Pyx_GIVEREF(__pyx_kp_u_Header_name_is_too_long);
+ PyTuple_SET_ITEM(__pyx_t_12, 0+__pyx_t_11, __pyx_kp_u_Header_name_is_too_long);
+ __Pyx_GIVEREF(__pyx_t_8);
+ PyTuple_SET_ITEM(__pyx_t_12, 1+__pyx_t_11, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_9);
+ PyTuple_SET_ITEM(__pyx_t_12, 2+__pyx_t_11, __pyx_t_9);
+ __pyx_t_8 = 0;
+ __pyx_t_9 = 0;
+ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_12, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 662, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ }
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_Raise(__pyx_t_1, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __PYX_ERR(0, 662, __pyx_L3_error)
+
+ /* "aiohttp/_http_parser.pyx":661
+ * pyparser._on_status_complete()
+ * size = len(pyparser._raw_name) + length
+ * if size > pyparser._max_field_size: # <<<<<<<<<<<<<<
+ * raise LineTooLong(
+ * 'Header name is too long', pyparser._max_field_size, size)
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":664
+ * raise LineTooLong(
+ * 'Header name is too long', pyparser._max_field_size, size)
+ * pyparser._on_header_field(at, length) # <<<<<<<<<<<<<<
+ * except BaseException as ex:
+ * pyparser._last_error = ex
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpParser *)__pyx_v_pyparser->__pyx_vtab)->_on_header_field(__pyx_v_pyparser, __pyx_v_at, __pyx_v_length); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 664, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":658
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * cdef Py_ssize_t size
+ * try: # <<<<<<<<<<<<<<
+ * pyparser._on_status_complete()
+ * size = len(pyparser._raw_name) + length
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":669
+ * return -1
+ * else:
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ /*else:*/ {
+ __pyx_r = 0;
+ goto __pyx_L6_except_return;
+ }
+ __pyx_L3_error:;
+ __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
+
+ /* "aiohttp/_http_parser.pyx":665
+ * 'Header name is too long', pyparser._max_field_size, size)
+ * pyparser._on_header_field(at, length)
+ * except BaseException as ex: # <<<<<<<<<<<<<<
+ * pyparser._last_error = ex
+ * return -1
+ */
+ __pyx_t_11 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException);
+ if (__pyx_t_11) {
+ __Pyx_AddTraceback("aiohttp._http_parser.cb_on_header_field", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_7, &__pyx_t_12) < 0) __PYX_ERR(0, 665, __pyx_L5_except_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_INCREF(__pyx_t_7);
+ __pyx_v_ex = __pyx_t_7;
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":666
+ * pyparser._on_header_field(at, length)
+ * except BaseException as ex:
+ * pyparser._last_error = ex # <<<<<<<<<<<<<<
+ * return -1
+ * else:
+ */
+ __Pyx_INCREF(__pyx_v_ex);
+ __Pyx_GIVEREF(__pyx_v_ex);
+ __Pyx_GOTREF(__pyx_v_pyparser->_last_error);
+ __Pyx_DECREF(__pyx_v_pyparser->_last_error);
+ __pyx_v_pyparser->_last_error = __pyx_v_ex;
+
+ /* "aiohttp/_http_parser.pyx":667
+ * except BaseException as ex:
+ * pyparser._last_error = ex
+ * return -1 # <<<<<<<<<<<<<<
+ * else:
+ * return 0
+ */
+ __pyx_r = -1;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ goto __pyx_L14_return;
+ }
+
+ /* "aiohttp/_http_parser.pyx":665
+ * 'Header name is too long', pyparser._max_field_size, size)
+ * pyparser._on_header_field(at, length)
+ * except BaseException as ex: # <<<<<<<<<<<<<<
+ * pyparser._last_error = ex
+ * return -1
+ */
+ /*finally:*/ {
+ __pyx_L14_return: {
+ __pyx_t_11 = __pyx_r;
+ __Pyx_DECREF(__pyx_v_ex);
+ __pyx_v_ex = NULL;
+ __pyx_r = __pyx_t_11;
+ goto __pyx_L6_except_return;
+ }
+ }
+ }
+ goto __pyx_L5_except_error;
+ __pyx_L5_except_error:;
+
+ /* "aiohttp/_http_parser.pyx":658
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * cdef Py_ssize_t size
+ * try: # <<<<<<<<<<<<<<
+ * pyparser._on_status_complete()
+ * size = len(pyparser._raw_name) + length
+ */
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
+ goto __pyx_L1_error;
+ __pyx_L6_except_return:;
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
+ goto __pyx_L0;
+ }
+
+ /* "aiohttp/_http_parser.pyx":654
+ *
+ *
+ * cdef int cb_on_header_field(cparser.http_parser* parser, # <<<<<<<<<<<<<<
+ * const char *at, size_t length) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_XDECREF(__pyx_t_9);
+ __Pyx_XDECREF(__pyx_t_10);
+ __Pyx_XDECREF(__pyx_t_12);
+ __Pyx_AddTraceback("aiohttp._http_parser.cb_on_header_field", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_XDECREF((PyObject *)__pyx_v_pyparser);
+ __Pyx_XDECREF(__pyx_v_ex);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":672
+ *
+ *
+ * cdef int cb_on_header_value(cparser.http_parser* parser, # <<<<<<<<<<<<<<
+ * const char *at, size_t length) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ */
+
+static int __pyx_f_7aiohttp_12_http_parser_cb_on_header_value(struct http_parser *__pyx_v_parser, char const *__pyx_v_at, size_t __pyx_v_length) {
+ struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_pyparser = 0;
+ Py_ssize_t __pyx_v_size;
+ PyObject *__pyx_v_ex = NULL;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ Py_ssize_t __pyx_t_5;
+ int __pyx_t_6;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *__pyx_t_8 = NULL;
+ PyObject *__pyx_t_9 = NULL;
+ PyObject *__pyx_t_10 = NULL;
+ int __pyx_t_11;
+ PyObject *__pyx_t_12 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("cb_on_header_value", 0);
+
+ /* "aiohttp/_http_parser.pyx":674
+ * cdef int cb_on_header_value(cparser.http_parser* parser,
+ * const char *at, size_t length) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data # <<<<<<<<<<<<<<
+ * cdef Py_ssize_t size
+ * try:
+ */
+ __pyx_t_1 = ((PyObject *)__pyx_v_parser->data);
+ __Pyx_INCREF(__pyx_t_1);
+ __pyx_v_pyparser = ((struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":676
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * cdef Py_ssize_t size
+ * try: # <<<<<<<<<<<<<<
+ * size = len(pyparser._raw_value) + length
+ * if size > pyparser._max_field_size:
+ */
+ {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
+ __Pyx_XGOTREF(__pyx_t_2);
+ __Pyx_XGOTREF(__pyx_t_3);
+ __Pyx_XGOTREF(__pyx_t_4);
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":677
+ * cdef Py_ssize_t size
+ * try:
+ * size = len(pyparser._raw_value) + length # <<<<<<<<<<<<<<
+ * if size > pyparser._max_field_size:
+ * raise LineTooLong(
+ */
+ __pyx_t_1 = __pyx_v_pyparser->_raw_value;
+ __Pyx_INCREF(__pyx_t_1);
+ if (unlikely(__pyx_t_1 == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
+ __PYX_ERR(0, 677, __pyx_L3_error)
+ }
+ __pyx_t_5 = PyByteArray_GET_SIZE(__pyx_t_1); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(0, 677, __pyx_L3_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v_size = (__pyx_t_5 + __pyx_v_length);
+
+ /* "aiohttp/_http_parser.pyx":678
+ * try:
+ * size = len(pyparser._raw_value) + length
+ * if size > pyparser._max_field_size: # <<<<<<<<<<<<<<
+ * raise LineTooLong(
+ * 'Header value is too long', pyparser._max_field_size, size)
+ */
+ __pyx_t_6 = ((__pyx_v_size > __pyx_v_pyparser->_max_field_size) != 0);
+ if (unlikely(__pyx_t_6)) {
+
+ /* "aiohttp/_http_parser.pyx":679
+ * size = len(pyparser._raw_value) + length
+ * if size > pyparser._max_field_size:
+ * raise LineTooLong( # <<<<<<<<<<<<<<
+ * 'Header value is too long', pyparser._max_field_size, size)
+ * pyparser._on_header_value(at, length)
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_LineTooLong); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 679, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_7);
+
+ /* "aiohttp/_http_parser.pyx":680
+ * if size > pyparser._max_field_size:
+ * raise LineTooLong(
+ * 'Header value is too long', pyparser._max_field_size, size) # <<<<<<<<<<<<<<
+ * pyparser._on_header_value(at, length)
+ * except BaseException as ex:
+ */
+ __pyx_t_8 = __Pyx_PyInt_FromSize_t(__pyx_v_pyparser->_max_field_size); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 680, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_8);
+ __pyx_t_9 = PyInt_FromSsize_t(__pyx_v_size); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 680, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_t_10 = NULL;
+ __pyx_t_11 = 0;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_7))) {
+ __pyx_t_10 = PyMethod_GET_SELF(__pyx_t_7);
+ if (likely(__pyx_t_10)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
+ __Pyx_INCREF(__pyx_t_10);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_7, function);
+ __pyx_t_11 = 1;
+ }
+ }
+ #if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(__pyx_t_7)) {
+ PyObject *__pyx_temp[4] = {__pyx_t_10, __pyx_kp_u_Header_value_is_too_long, __pyx_t_8, __pyx_t_9};
+ __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_11, 3+__pyx_t_11); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 679, __pyx_L3_error)
+ __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ } else
+ #endif
+ #if CYTHON_FAST_PYCCALL
+ if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) {
+ PyObject *__pyx_temp[4] = {__pyx_t_10, __pyx_kp_u_Header_value_is_too_long, __pyx_t_8, __pyx_t_9};
+ __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-__pyx_t_11, 3+__pyx_t_11); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 679, __pyx_L3_error)
+ __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ } else
+ #endif
+ {
+ __pyx_t_12 = PyTuple_New(3+__pyx_t_11); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 679, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_12);
+ if (__pyx_t_10) {
+ __Pyx_GIVEREF(__pyx_t_10); PyTuple_SET_ITEM(__pyx_t_12, 0, __pyx_t_10); __pyx_t_10 = NULL;
+ }
+ __Pyx_INCREF(__pyx_kp_u_Header_value_is_too_long);
+ __Pyx_GIVEREF(__pyx_kp_u_Header_value_is_too_long);
+ PyTuple_SET_ITEM(__pyx_t_12, 0+__pyx_t_11, __pyx_kp_u_Header_value_is_too_long);
+ __Pyx_GIVEREF(__pyx_t_8);
+ PyTuple_SET_ITEM(__pyx_t_12, 1+__pyx_t_11, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_9);
+ PyTuple_SET_ITEM(__pyx_t_12, 2+__pyx_t_11, __pyx_t_9);
+ __pyx_t_8 = 0;
+ __pyx_t_9 = 0;
+ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_12, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 679, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ }
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_Raise(__pyx_t_1, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __PYX_ERR(0, 679, __pyx_L3_error)
+
+ /* "aiohttp/_http_parser.pyx":678
+ * try:
+ * size = len(pyparser._raw_value) + length
+ * if size > pyparser._max_field_size: # <<<<<<<<<<<<<<
+ * raise LineTooLong(
+ * 'Header value is too long', pyparser._max_field_size, size)
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":681
+ * raise LineTooLong(
+ * 'Header value is too long', pyparser._max_field_size, size)
+ * pyparser._on_header_value(at, length) # <<<<<<<<<<<<<<
+ * except BaseException as ex:
+ * pyparser._last_error = ex
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpParser *)__pyx_v_pyparser->__pyx_vtab)->_on_header_value(__pyx_v_pyparser, __pyx_v_at, __pyx_v_length); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 681, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":676
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * cdef Py_ssize_t size
+ * try: # <<<<<<<<<<<<<<
+ * size = len(pyparser._raw_value) + length
+ * if size > pyparser._max_field_size:
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":686
+ * return -1
+ * else:
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ /*else:*/ {
+ __pyx_r = 0;
+ goto __pyx_L6_except_return;
+ }
+ __pyx_L3_error:;
+ __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
+ __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
+
+ /* "aiohttp/_http_parser.pyx":682
+ * 'Header value is too long', pyparser._max_field_size, size)
+ * pyparser._on_header_value(at, length)
+ * except BaseException as ex: # <<<<<<<<<<<<<<
+ * pyparser._last_error = ex
+ * return -1
+ */
+ __pyx_t_11 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException);
+ if (__pyx_t_11) {
+ __Pyx_AddTraceback("aiohttp._http_parser.cb_on_header_value", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_7, &__pyx_t_12) < 0) __PYX_ERR(0, 682, __pyx_L5_except_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_INCREF(__pyx_t_7);
+ __pyx_v_ex = __pyx_t_7;
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":683
+ * pyparser._on_header_value(at, length)
+ * except BaseException as ex:
+ * pyparser._last_error = ex # <<<<<<<<<<<<<<
+ * return -1
+ * else:
+ */
+ __Pyx_INCREF(__pyx_v_ex);
+ __Pyx_GIVEREF(__pyx_v_ex);
+ __Pyx_GOTREF(__pyx_v_pyparser->_last_error);
+ __Pyx_DECREF(__pyx_v_pyparser->_last_error);
+ __pyx_v_pyparser->_last_error = __pyx_v_ex;
+
+ /* "aiohttp/_http_parser.pyx":684
+ * except BaseException as ex:
+ * pyparser._last_error = ex
+ * return -1 # <<<<<<<<<<<<<<
+ * else:
+ * return 0
+ */
+ __pyx_r = -1;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ goto __pyx_L14_return;
+ }
+
+ /* "aiohttp/_http_parser.pyx":682
+ * 'Header value is too long', pyparser._max_field_size, size)
+ * pyparser._on_header_value(at, length)
+ * except BaseException as ex: # <<<<<<<<<<<<<<
+ * pyparser._last_error = ex
+ * return -1
+ */
+ /*finally:*/ {
+ __pyx_L14_return: {
+ __pyx_t_11 = __pyx_r;
+ __Pyx_DECREF(__pyx_v_ex);
+ __pyx_v_ex = NULL;
+ __pyx_r = __pyx_t_11;
+ goto __pyx_L6_except_return;
+ }
+ }
+ }
+ goto __pyx_L5_except_error;
+ __pyx_L5_except_error:;
+
+ /* "aiohttp/_http_parser.pyx":676
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * cdef Py_ssize_t size
+ * try: # <<<<<<<<<<<<<<
+ * size = len(pyparser._raw_value) + length
+ * if size > pyparser._max_field_size:
+ */
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
+ goto __pyx_L1_error;
+ __pyx_L6_except_return:;
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
+ goto __pyx_L0;
+ }
+
+ /* "aiohttp/_http_parser.pyx":672
+ *
+ *
+ * cdef int cb_on_header_value(cparser.http_parser* parser, # <<<<<<<<<<<<<<
+ * const char *at, size_t length) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_XDECREF(__pyx_t_9);
+ __Pyx_XDECREF(__pyx_t_10);
+ __Pyx_XDECREF(__pyx_t_12);
+ __Pyx_AddTraceback("aiohttp._http_parser.cb_on_header_value", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_XDECREF((PyObject *)__pyx_v_pyparser);
+ __Pyx_XDECREF(__pyx_v_ex);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":689
+ *
+ *
+ * cdef int cb_on_headers_complete(cparser.http_parser* parser) except -1: # <<<<<<<<<<<<<<
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try:
+ */
+
+static int __pyx_f_7aiohttp_12_http_parser_cb_on_headers_complete(struct http_parser *__pyx_v_parser) {
+ struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_pyparser = 0;
+ PyObject *__pyx_v_exc = NULL;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_t_5;
+ int __pyx_t_6;
+ int __pyx_t_7;
+ PyObject *__pyx_t_8 = NULL;
+ PyObject *__pyx_t_9 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("cb_on_headers_complete", 0);
+
+ /* "aiohttp/_http_parser.pyx":690
+ *
+ * cdef int cb_on_headers_complete(cparser.http_parser* parser) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data # <<<<<<<<<<<<<<
+ * try:
+ * pyparser._on_status_complete()
+ */
+ __pyx_t_1 = ((PyObject *)__pyx_v_parser->data);
+ __Pyx_INCREF(__pyx_t_1);
+ __pyx_v_pyparser = ((struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":691
+ * cdef int cb_on_headers_complete(cparser.http_parser* parser) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try: # <<<<<<<<<<<<<<
+ * pyparser._on_status_complete()
+ * pyparser._on_headers_complete()
+ */
+ {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
+ __Pyx_XGOTREF(__pyx_t_2);
+ __Pyx_XGOTREF(__pyx_t_3);
+ __Pyx_XGOTREF(__pyx_t_4);
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":692
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try:
+ * pyparser._on_status_complete() # <<<<<<<<<<<<<<
+ * pyparser._on_headers_complete()
+ * except BaseException as exc:
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpParser *)__pyx_v_pyparser->__pyx_vtab)->_on_status_complete(__pyx_v_pyparser); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 692, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":693
+ * try:
+ * pyparser._on_status_complete()
+ * pyparser._on_headers_complete() # <<<<<<<<<<<<<<
+ * except BaseException as exc:
+ * pyparser._last_error = exc
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpParser *)__pyx_v_pyparser->__pyx_vtab)->_on_headers_complete(__pyx_v_pyparser); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 693, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":691
+ * cdef int cb_on_headers_complete(cparser.http_parser* parser) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try: # <<<<<<<<<<<<<<
+ * pyparser._on_status_complete()
+ * pyparser._on_headers_complete()
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":698
+ * return -1
+ * else:
+ * if pyparser._cparser.upgrade or pyparser._cparser.method == 5: # CONNECT # <<<<<<<<<<<<<<
+ * return 2
+ * else:
+ */
+ /*else:*/ {
+ __pyx_t_6 = (__pyx_v_pyparser->_cparser->upgrade != 0);
+ if (!__pyx_t_6) {
+ } else {
+ __pyx_t_5 = __pyx_t_6;
+ goto __pyx_L10_bool_binop_done;
+ }
+ __pyx_t_6 = ((__pyx_v_pyparser->_cparser->method == 5) != 0);
+ __pyx_t_5 = __pyx_t_6;
+ __pyx_L10_bool_binop_done:;
+ if (__pyx_t_5) {
+
+ /* "aiohttp/_http_parser.pyx":699
+ * else:
+ * if pyparser._cparser.upgrade or pyparser._cparser.method == 5: # CONNECT
+ * return 2 # <<<<<<<<<<<<<<
+ * else:
+ * return 0
+ */
+ __pyx_r = 2;
+ goto __pyx_L6_except_return;
+
+ /* "aiohttp/_http_parser.pyx":698
+ * return -1
+ * else:
+ * if pyparser._cparser.upgrade or pyparser._cparser.method == 5: # CONNECT # <<<<<<<<<<<<<<
+ * return 2
+ * else:
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":701
+ * return 2
+ * else:
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ /*else*/ {
+ __pyx_r = 0;
+ goto __pyx_L6_except_return;
+ }
+ }
+ __pyx_L3_error:;
+ __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":694
+ * pyparser._on_status_complete()
+ * pyparser._on_headers_complete()
+ * except BaseException as exc: # <<<<<<<<<<<<<<
+ * pyparser._last_error = exc
+ * return -1
+ */
+ __pyx_t_7 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException);
+ if (__pyx_t_7) {
+ __Pyx_AddTraceback("aiohttp._http_parser.cb_on_headers_complete", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_8, &__pyx_t_9) < 0) __PYX_ERR(0, 694, __pyx_L5_except_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_INCREF(__pyx_t_8);
+ __pyx_v_exc = __pyx_t_8;
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":695
+ * pyparser._on_headers_complete()
+ * except BaseException as exc:
+ * pyparser._last_error = exc # <<<<<<<<<<<<<<
+ * return -1
+ * else:
+ */
+ __Pyx_INCREF(__pyx_v_exc);
+ __Pyx_GIVEREF(__pyx_v_exc);
+ __Pyx_GOTREF(__pyx_v_pyparser->_last_error);
+ __Pyx_DECREF(__pyx_v_pyparser->_last_error);
+ __pyx_v_pyparser->_last_error = __pyx_v_exc;
+
+ /* "aiohttp/_http_parser.pyx":696
+ * except BaseException as exc:
+ * pyparser._last_error = exc
+ * return -1 # <<<<<<<<<<<<<<
+ * else:
+ * if pyparser._cparser.upgrade or pyparser._cparser.method == 5: # CONNECT
+ */
+ __pyx_r = -1;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ goto __pyx_L16_return;
+ }
+
+ /* "aiohttp/_http_parser.pyx":694
+ * pyparser._on_status_complete()
+ * pyparser._on_headers_complete()
+ * except BaseException as exc: # <<<<<<<<<<<<<<
+ * pyparser._last_error = exc
+ * return -1
+ */
+ /*finally:*/ {
+ __pyx_L16_return: {
+ __pyx_t_7 = __pyx_r;
+ __Pyx_DECREF(__pyx_v_exc);
+ __pyx_v_exc = NULL;
+ __pyx_r = __pyx_t_7;
+ goto __pyx_L6_except_return;
+ }
+ }
+ }
+ goto __pyx_L5_except_error;
+ __pyx_L5_except_error:;
+
+ /* "aiohttp/_http_parser.pyx":691
+ * cdef int cb_on_headers_complete(cparser.http_parser* parser) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try: # <<<<<<<<<<<<<<
+ * pyparser._on_status_complete()
+ * pyparser._on_headers_complete()
+ */
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
+ goto __pyx_L1_error;
+ __pyx_L6_except_return:;
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
+ goto __pyx_L0;
+ }
+
+ /* "aiohttp/_http_parser.pyx":689
+ *
+ *
+ * cdef int cb_on_headers_complete(cparser.http_parser* parser) except -1: # <<<<<<<<<<<<<<
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try:
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_XDECREF(__pyx_t_9);
+ __Pyx_AddTraceback("aiohttp._http_parser.cb_on_headers_complete", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_XDECREF((PyObject *)__pyx_v_pyparser);
+ __Pyx_XDECREF(__pyx_v_exc);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":704
+ *
+ *
+ * cdef int cb_on_body(cparser.http_parser* parser, # <<<<<<<<<<<<<<
+ * const char *at, size_t length) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ */
+
+static int __pyx_f_7aiohttp_12_http_parser_cb_on_body(struct http_parser *__pyx_v_parser, char const *__pyx_v_at, size_t __pyx_v_length) {
+ struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_pyparser = 0;
+ PyObject *__pyx_v_body = 0;
+ PyObject *__pyx_v_exc = NULL;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ int __pyx_t_8;
+ PyObject *__pyx_t_9 = NULL;
+ int __pyx_t_10;
+ int __pyx_t_11;
+ PyObject *__pyx_t_12 = NULL;
+ PyObject *__pyx_t_13 = NULL;
+ PyObject *__pyx_t_14 = NULL;
+ PyObject *__pyx_t_15 = NULL;
+ int __pyx_t_16;
+ char const *__pyx_t_17;
+ PyObject *__pyx_t_18 = NULL;
+ PyObject *__pyx_t_19 = NULL;
+ PyObject *__pyx_t_20 = NULL;
+ PyObject *__pyx_t_21 = NULL;
+ PyObject *__pyx_t_22 = NULL;
+ PyObject *__pyx_t_23 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("cb_on_body", 0);
+
+ /* "aiohttp/_http_parser.pyx":706
+ * cdef int cb_on_body(cparser.http_parser* parser,
+ * const char *at, size_t length) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data # <<<<<<<<<<<<<<
+ * cdef bytes body = at[:length]
+ * try:
+ */
+ __pyx_t_1 = ((PyObject *)__pyx_v_parser->data);
+ __Pyx_INCREF(__pyx_t_1);
+ __pyx_v_pyparser = ((struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":707
+ * const char *at, size_t length) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * cdef bytes body = at[:length] # <<<<<<<<<<<<<<
+ * try:
+ * pyparser._payload.feed_data(body, length)
+ */
+ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_at + 0, __pyx_v_length - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 707, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_v_body = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":708
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * cdef bytes body = at[:length]
+ * try: # <<<<<<<<<<<<<<
+ * pyparser._payload.feed_data(body, length)
+ * except BaseException as exc:
+ */
+ {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
+ __Pyx_XGOTREF(__pyx_t_2);
+ __Pyx_XGOTREF(__pyx_t_3);
+ __Pyx_XGOTREF(__pyx_t_4);
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":709
+ * cdef bytes body = at[:length]
+ * try:
+ * pyparser._payload.feed_data(body, length) # <<<<<<<<<<<<<<
+ * except BaseException as exc:
+ * if pyparser._payload_exception is not None:
+ */
+ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_pyparser->_payload, __pyx_n_s_feed_data); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 709, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_6 = __Pyx_PyInt_FromSize_t(__pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 709, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_7 = NULL;
+ __pyx_t_8 = 0;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
+ __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
+ if (likely(__pyx_t_7)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
+ __Pyx_INCREF(__pyx_t_7);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_5, function);
+ __pyx_t_8 = 1;
+ }
+ }
+ #if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(__pyx_t_5)) {
+ PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_v_body, __pyx_t_6};
+ __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 709, __pyx_L3_error)
+ __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ } else
+ #endif
+ #if CYTHON_FAST_PYCCALL
+ if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
+ PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_v_body, __pyx_t_6};
+ __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 709, __pyx_L3_error)
+ __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ } else
+ #endif
+ {
+ __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 709, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_9);
+ if (__pyx_t_7) {
+ __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
+ }
+ __Pyx_INCREF(__pyx_v_body);
+ __Pyx_GIVEREF(__pyx_v_body);
+ PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_v_body);
+ __Pyx_GIVEREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_t_6);
+ __pyx_t_6 = 0;
+ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 709, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ }
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":708
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * cdef bytes body = at[:length]
+ * try: # <<<<<<<<<<<<<<
+ * pyparser._payload.feed_data(body, length)
+ * except BaseException as exc:
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":718
+ * return -1
+ * else:
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ /*else:*/ {
+ __pyx_r = 0;
+ goto __pyx_L6_except_return;
+ }
+ __pyx_L3_error:;
+ __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
+
+ /* "aiohttp/_http_parser.pyx":710
+ * try:
+ * pyparser._payload.feed_data(body, length)
+ * except BaseException as exc: # <<<<<<<<<<<<<<
+ * if pyparser._payload_exception is not None:
+ * pyparser._payload.set_exception(pyparser._payload_exception(str(exc)))
+ */
+ __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException);
+ if (__pyx_t_8) {
+ __Pyx_AddTraceback("aiohttp._http_parser.cb_on_body", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9) < 0) __PYX_ERR(0, 710, __pyx_L5_except_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_INCREF(__pyx_t_5);
+ __pyx_v_exc = __pyx_t_5;
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":711
+ * pyparser._payload.feed_data(body, length)
+ * except BaseException as exc:
+ * if pyparser._payload_exception is not None: # <<<<<<<<<<<<<<
+ * pyparser._payload.set_exception(pyparser._payload_exception(str(exc)))
+ * else:
+ */
+ __pyx_t_10 = (__pyx_v_pyparser->_payload_exception != Py_None);
+ __pyx_t_11 = (__pyx_t_10 != 0);
+ if (__pyx_t_11) {
+
+ /* "aiohttp/_http_parser.pyx":712
+ * except BaseException as exc:
+ * if pyparser._payload_exception is not None:
+ * pyparser._payload.set_exception(pyparser._payload_exception(str(exc))) # <<<<<<<<<<<<<<
+ * else:
+ * pyparser._payload.set_exception(exc)
+ */
+ __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_pyparser->_payload, __pyx_n_s_set_exception); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 712, __pyx_L14_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_13 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyUnicode_Type)), __pyx_v_exc); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 712, __pyx_L14_error)
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_INCREF(__pyx_v_pyparser->_payload_exception);
+ __pyx_t_14 = __pyx_v_pyparser->_payload_exception; __pyx_t_15 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_14))) {
+ __pyx_t_15 = PyMethod_GET_SELF(__pyx_t_14);
+ if (likely(__pyx_t_15)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_14);
+ __Pyx_INCREF(__pyx_t_15);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_14, function);
+ }
+ }
+ __pyx_t_12 = (__pyx_t_15) ? __Pyx_PyObject_Call2Args(__pyx_t_14, __pyx_t_15, __pyx_t_13) : __Pyx_PyObject_CallOneArg(__pyx_t_14, __pyx_t_13);
+ __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0;
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 712, __pyx_L14_error)
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __pyx_t_14 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
+ __pyx_t_14 = PyMethod_GET_SELF(__pyx_t_7);
+ if (likely(__pyx_t_14)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
+ __Pyx_INCREF(__pyx_t_14);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_7, function);
+ }
+ }
+ __pyx_t_6 = (__pyx_t_14) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_14, __pyx_t_12) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_12);
+ __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 712, __pyx_L14_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+
+ /* "aiohttp/_http_parser.pyx":711
+ * pyparser._payload.feed_data(body, length)
+ * except BaseException as exc:
+ * if pyparser._payload_exception is not None: # <<<<<<<<<<<<<<
+ * pyparser._payload.set_exception(pyparser._payload_exception(str(exc)))
+ * else:
+ */
+ goto __pyx_L16;
+ }
+
+ /* "aiohttp/_http_parser.pyx":714
+ * pyparser._payload.set_exception(pyparser._payload_exception(str(exc)))
+ * else:
+ * pyparser._payload.set_exception(exc) # <<<<<<<<<<<<<<
+ * pyparser._payload_error = 1
+ * return -1
+ */
+ /*else*/ {
+ __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_pyparser->_payload, __pyx_n_s_set_exception); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 714, __pyx_L14_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_12 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
+ __pyx_t_12 = PyMethod_GET_SELF(__pyx_t_7);
+ if (likely(__pyx_t_12)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
+ __Pyx_INCREF(__pyx_t_12);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_7, function);
+ }
+ }
+ __pyx_t_6 = (__pyx_t_12) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_12, __pyx_v_exc) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_v_exc);
+ __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0;
+ if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 714, __pyx_L14_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ }
+ __pyx_L16:;
+
+ /* "aiohttp/_http_parser.pyx":715
+ * else:
+ * pyparser._payload.set_exception(exc)
+ * pyparser._payload_error = 1 # <<<<<<<<<<<<<<
+ * return -1
+ * else:
+ */
+ __pyx_v_pyparser->_payload_error = 1;
+
+ /* "aiohttp/_http_parser.pyx":716
+ * pyparser._payload.set_exception(exc)
+ * pyparser._payload_error = 1
+ * return -1 # <<<<<<<<<<<<<<
+ * else:
+ * return 0
+ */
+ __pyx_r = -1;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ goto __pyx_L13_return;
+ }
+
+ /* "aiohttp/_http_parser.pyx":710
+ * try:
+ * pyparser._payload.feed_data(body, length)
+ * except BaseException as exc: # <<<<<<<<<<<<<<
+ * if pyparser._payload_exception is not None:
+ * pyparser._payload.set_exception(pyparser._payload_exception(str(exc)))
+ */
+ /*finally:*/ {
+ __pyx_L14_error:;
+ /*exception exit:*/{
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __pyx_t_18 = 0; __pyx_t_19 = 0; __pyx_t_20 = 0; __pyx_t_21 = 0; __pyx_t_22 = 0; __pyx_t_23 = 0;
+ __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0;
+ __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0;
+ __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
+ if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_21, &__pyx_t_22, &__pyx_t_23);
+ if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_18, &__pyx_t_19, &__pyx_t_20) < 0)) __Pyx_ErrFetch(&__pyx_t_18, &__pyx_t_19, &__pyx_t_20);
+ __Pyx_XGOTREF(__pyx_t_18);
+ __Pyx_XGOTREF(__pyx_t_19);
+ __Pyx_XGOTREF(__pyx_t_20);
+ __Pyx_XGOTREF(__pyx_t_21);
+ __Pyx_XGOTREF(__pyx_t_22);
+ __Pyx_XGOTREF(__pyx_t_23);
+ __pyx_t_8 = __pyx_lineno; __pyx_t_16 = __pyx_clineno; __pyx_t_17 = __pyx_filename;
+ {
+ __Pyx_DECREF(__pyx_v_exc);
+ __pyx_v_exc = NULL;
+ }
+ if (PY_MAJOR_VERSION >= 3) {
+ __Pyx_XGIVEREF(__pyx_t_21);
+ __Pyx_XGIVEREF(__pyx_t_22);
+ __Pyx_XGIVEREF(__pyx_t_23);
+ __Pyx_ExceptionReset(__pyx_t_21, __pyx_t_22, __pyx_t_23);
+ }
+ __Pyx_XGIVEREF(__pyx_t_18);
+ __Pyx_XGIVEREF(__pyx_t_19);
+ __Pyx_XGIVEREF(__pyx_t_20);
+ __Pyx_ErrRestore(__pyx_t_18, __pyx_t_19, __pyx_t_20);
+ __pyx_t_18 = 0; __pyx_t_19 = 0; __pyx_t_20 = 0; __pyx_t_21 = 0; __pyx_t_22 = 0; __pyx_t_23 = 0;
+ __pyx_lineno = __pyx_t_8; __pyx_clineno = __pyx_t_16; __pyx_filename = __pyx_t_17;
+ goto __pyx_L5_except_error;
+ }
+ __pyx_L13_return: {
+ __pyx_t_16 = __pyx_r;
+ __Pyx_DECREF(__pyx_v_exc);
+ __pyx_v_exc = NULL;
+ __pyx_r = __pyx_t_16;
+ goto __pyx_L6_except_return;
+ }
+ }
+ }
+ goto __pyx_L5_except_error;
+ __pyx_L5_except_error:;
+
+ /* "aiohttp/_http_parser.pyx":708
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * cdef bytes body = at[:length]
+ * try: # <<<<<<<<<<<<<<
+ * pyparser._payload.feed_data(body, length)
+ * except BaseException as exc:
+ */
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
+ goto __pyx_L1_error;
+ __pyx_L6_except_return:;
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
+ goto __pyx_L0;
+ }
+
+ /* "aiohttp/_http_parser.pyx":704
+ *
+ *
+ * cdef int cb_on_body(cparser.http_parser* parser, # <<<<<<<<<<<<<<
+ * const char *at, size_t length) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_9);
+ __Pyx_XDECREF(__pyx_t_12);
+ __Pyx_XDECREF(__pyx_t_13);
+ __Pyx_XDECREF(__pyx_t_14);
+ __Pyx_XDECREF(__pyx_t_15);
+ __Pyx_AddTraceback("aiohttp._http_parser.cb_on_body", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_XDECREF((PyObject *)__pyx_v_pyparser);
+ __Pyx_XDECREF(__pyx_v_body);
+ __Pyx_XDECREF(__pyx_v_exc);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":721
+ *
+ *
+ * cdef int cb_on_message_complete(cparser.http_parser* parser) except -1: # <<<<<<<<<<<<<<
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try:
+ */
+
+static int __pyx_f_7aiohttp_12_http_parser_cb_on_message_complete(struct http_parser *__pyx_v_parser) {
+ struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_pyparser = 0;
+ PyObject *__pyx_v_exc = NULL;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("cb_on_message_complete", 0);
+
+ /* "aiohttp/_http_parser.pyx":722
+ *
+ * cdef int cb_on_message_complete(cparser.http_parser* parser) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data # <<<<<<<<<<<<<<
+ * try:
+ * pyparser._started = False
+ */
+ __pyx_t_1 = ((PyObject *)__pyx_v_parser->data);
+ __Pyx_INCREF(__pyx_t_1);
+ __pyx_v_pyparser = ((struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":723
+ * cdef int cb_on_message_complete(cparser.http_parser* parser) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try: # <<<<<<<<<<<<<<
+ * pyparser._started = False
+ * pyparser._on_message_complete()
+ */
+ {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
+ __Pyx_XGOTREF(__pyx_t_2);
+ __Pyx_XGOTREF(__pyx_t_3);
+ __Pyx_XGOTREF(__pyx_t_4);
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":724
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try:
+ * pyparser._started = False # <<<<<<<<<<<<<<
+ * pyparser._on_message_complete()
+ * except BaseException as exc:
+ */
+ __pyx_v_pyparser->_started = 0;
+
+ /* "aiohttp/_http_parser.pyx":725
+ * try:
+ * pyparser._started = False
+ * pyparser._on_message_complete() # <<<<<<<<<<<<<<
+ * except BaseException as exc:
+ * pyparser._last_error = exc
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpParser *)__pyx_v_pyparser->__pyx_vtab)->_on_message_complete(__pyx_v_pyparser); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 725, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":723
+ * cdef int cb_on_message_complete(cparser.http_parser* parser) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try: # <<<<<<<<<<<<<<
+ * pyparser._started = False
+ * pyparser._on_message_complete()
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":730
+ * return -1
+ * else:
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ /*else:*/ {
+ __pyx_r = 0;
+ goto __pyx_L6_except_return;
+ }
+ __pyx_L3_error:;
+ __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":726
+ * pyparser._started = False
+ * pyparser._on_message_complete()
+ * except BaseException as exc: # <<<<<<<<<<<<<<
+ * pyparser._last_error = exc
+ * return -1
+ */
+ __pyx_t_5 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException);
+ if (__pyx_t_5) {
+ __Pyx_AddTraceback("aiohttp._http_parser.cb_on_message_complete", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(0, 726, __pyx_L5_except_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_INCREF(__pyx_t_6);
+ __pyx_v_exc = __pyx_t_6;
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":727
+ * pyparser._on_message_complete()
+ * except BaseException as exc:
+ * pyparser._last_error = exc # <<<<<<<<<<<<<<
+ * return -1
+ * else:
+ */
+ __Pyx_INCREF(__pyx_v_exc);
+ __Pyx_GIVEREF(__pyx_v_exc);
+ __Pyx_GOTREF(__pyx_v_pyparser->_last_error);
+ __Pyx_DECREF(__pyx_v_pyparser->_last_error);
+ __pyx_v_pyparser->_last_error = __pyx_v_exc;
+
+ /* "aiohttp/_http_parser.pyx":728
+ * except BaseException as exc:
+ * pyparser._last_error = exc
+ * return -1 # <<<<<<<<<<<<<<
+ * else:
+ * return 0
+ */
+ __pyx_r = -1;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ goto __pyx_L13_return;
+ }
+
+ /* "aiohttp/_http_parser.pyx":726
+ * pyparser._started = False
+ * pyparser._on_message_complete()
+ * except BaseException as exc: # <<<<<<<<<<<<<<
+ * pyparser._last_error = exc
+ * return -1
+ */
+ /*finally:*/ {
+ __pyx_L13_return: {
+ __pyx_t_5 = __pyx_r;
+ __Pyx_DECREF(__pyx_v_exc);
+ __pyx_v_exc = NULL;
+ __pyx_r = __pyx_t_5;
+ goto __pyx_L6_except_return;
+ }
+ }
+ }
+ goto __pyx_L5_except_error;
+ __pyx_L5_except_error:;
+
+ /* "aiohttp/_http_parser.pyx":723
+ * cdef int cb_on_message_complete(cparser.http_parser* parser) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try: # <<<<<<<<<<<<<<
+ * pyparser._started = False
+ * pyparser._on_message_complete()
+ */
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
+ goto __pyx_L1_error;
+ __pyx_L6_except_return:;
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
+ goto __pyx_L0;
+ }
+
+ /* "aiohttp/_http_parser.pyx":721
+ *
+ *
+ * cdef int cb_on_message_complete(cparser.http_parser* parser) except -1: # <<<<<<<<<<<<<<
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try:
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_AddTraceback("aiohttp._http_parser.cb_on_message_complete", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_XDECREF((PyObject *)__pyx_v_pyparser);
+ __Pyx_XDECREF(__pyx_v_exc);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":733
+ *
+ *
+ * cdef int cb_on_chunk_header(cparser.http_parser* parser) except -1: # <<<<<<<<<<<<<<
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try:
+ */
+
+static int __pyx_f_7aiohttp_12_http_parser_cb_on_chunk_header(struct http_parser *__pyx_v_parser) {
+ struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_pyparser = 0;
+ PyObject *__pyx_v_exc = NULL;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("cb_on_chunk_header", 0);
+
+ /* "aiohttp/_http_parser.pyx":734
+ *
+ * cdef int cb_on_chunk_header(cparser.http_parser* parser) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data # <<<<<<<<<<<<<<
+ * try:
+ * pyparser._on_chunk_header()
+ */
+ __pyx_t_1 = ((PyObject *)__pyx_v_parser->data);
+ __Pyx_INCREF(__pyx_t_1);
+ __pyx_v_pyparser = ((struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":735
+ * cdef int cb_on_chunk_header(cparser.http_parser* parser) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try: # <<<<<<<<<<<<<<
+ * pyparser._on_chunk_header()
+ * except BaseException as exc:
+ */
+ {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
+ __Pyx_XGOTREF(__pyx_t_2);
+ __Pyx_XGOTREF(__pyx_t_3);
+ __Pyx_XGOTREF(__pyx_t_4);
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":736
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try:
+ * pyparser._on_chunk_header() # <<<<<<<<<<<<<<
+ * except BaseException as exc:
+ * pyparser._last_error = exc
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpParser *)__pyx_v_pyparser->__pyx_vtab)->_on_chunk_header(__pyx_v_pyparser); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 736, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":735
+ * cdef int cb_on_chunk_header(cparser.http_parser* parser) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try: # <<<<<<<<<<<<<<
+ * pyparser._on_chunk_header()
+ * except BaseException as exc:
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":741
+ * return -1
+ * else:
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ /*else:*/ {
+ __pyx_r = 0;
+ goto __pyx_L6_except_return;
+ }
+ __pyx_L3_error:;
+ __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":737
+ * try:
+ * pyparser._on_chunk_header()
+ * except BaseException as exc: # <<<<<<<<<<<<<<
+ * pyparser._last_error = exc
+ * return -1
+ */
+ __pyx_t_5 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException);
+ if (__pyx_t_5) {
+ __Pyx_AddTraceback("aiohttp._http_parser.cb_on_chunk_header", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(0, 737, __pyx_L5_except_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_INCREF(__pyx_t_6);
+ __pyx_v_exc = __pyx_t_6;
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":738
+ * pyparser._on_chunk_header()
+ * except BaseException as exc:
+ * pyparser._last_error = exc # <<<<<<<<<<<<<<
+ * return -1
+ * else:
+ */
+ __Pyx_INCREF(__pyx_v_exc);
+ __Pyx_GIVEREF(__pyx_v_exc);
+ __Pyx_GOTREF(__pyx_v_pyparser->_last_error);
+ __Pyx_DECREF(__pyx_v_pyparser->_last_error);
+ __pyx_v_pyparser->_last_error = __pyx_v_exc;
+
+ /* "aiohttp/_http_parser.pyx":739
+ * except BaseException as exc:
+ * pyparser._last_error = exc
+ * return -1 # <<<<<<<<<<<<<<
+ * else:
+ * return 0
+ */
+ __pyx_r = -1;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ goto __pyx_L13_return;
+ }
+
+ /* "aiohttp/_http_parser.pyx":737
+ * try:
+ * pyparser._on_chunk_header()
+ * except BaseException as exc: # <<<<<<<<<<<<<<
+ * pyparser._last_error = exc
+ * return -1
+ */
+ /*finally:*/ {
+ __pyx_L13_return: {
+ __pyx_t_5 = __pyx_r;
+ __Pyx_DECREF(__pyx_v_exc);
+ __pyx_v_exc = NULL;
+ __pyx_r = __pyx_t_5;
+ goto __pyx_L6_except_return;
+ }
+ }
+ }
+ goto __pyx_L5_except_error;
+ __pyx_L5_except_error:;
+
+ /* "aiohttp/_http_parser.pyx":735
+ * cdef int cb_on_chunk_header(cparser.http_parser* parser) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try: # <<<<<<<<<<<<<<
+ * pyparser._on_chunk_header()
+ * except BaseException as exc:
+ */
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
+ goto __pyx_L1_error;
+ __pyx_L6_except_return:;
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
+ goto __pyx_L0;
+ }
+
+ /* "aiohttp/_http_parser.pyx":733
+ *
+ *
+ * cdef int cb_on_chunk_header(cparser.http_parser* parser) except -1: # <<<<<<<<<<<<<<
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try:
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_AddTraceback("aiohttp._http_parser.cb_on_chunk_header", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_XDECREF((PyObject *)__pyx_v_pyparser);
+ __Pyx_XDECREF(__pyx_v_exc);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":744
+ *
+ *
+ * cdef int cb_on_chunk_complete(cparser.http_parser* parser) except -1: # <<<<<<<<<<<<<<
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try:
+ */
+
+static int __pyx_f_7aiohttp_12_http_parser_cb_on_chunk_complete(struct http_parser *__pyx_v_parser) {
+ struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *__pyx_v_pyparser = 0;
+ PyObject *__pyx_v_exc = NULL;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("cb_on_chunk_complete", 0);
+
+ /* "aiohttp/_http_parser.pyx":745
+ *
+ * cdef int cb_on_chunk_complete(cparser.http_parser* parser) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data # <<<<<<<<<<<<<<
+ * try:
+ * pyparser._on_chunk_complete()
+ */
+ __pyx_t_1 = ((PyObject *)__pyx_v_parser->data);
+ __Pyx_INCREF(__pyx_t_1);
+ __pyx_v_pyparser = ((struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":746
+ * cdef int cb_on_chunk_complete(cparser.http_parser* parser) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try: # <<<<<<<<<<<<<<
+ * pyparser._on_chunk_complete()
+ * except BaseException as exc:
+ */
+ {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
+ __Pyx_XGOTREF(__pyx_t_2);
+ __Pyx_XGOTREF(__pyx_t_3);
+ __Pyx_XGOTREF(__pyx_t_4);
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":747
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try:
+ * pyparser._on_chunk_complete() # <<<<<<<<<<<<<<
+ * except BaseException as exc:
+ * pyparser._last_error = exc
+ */
+ __pyx_t_1 = ((struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpParser *)__pyx_v_pyparser->__pyx_vtab)->_on_chunk_complete(__pyx_v_pyparser); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 747, __pyx_L3_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":746
+ * cdef int cb_on_chunk_complete(cparser.http_parser* parser) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try: # <<<<<<<<<<<<<<
+ * pyparser._on_chunk_complete()
+ * except BaseException as exc:
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":752
+ * return -1
+ * else:
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ /*else:*/ {
+ __pyx_r = 0;
+ goto __pyx_L6_except_return;
+ }
+ __pyx_L3_error:;
+ __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":748
+ * try:
+ * pyparser._on_chunk_complete()
+ * except BaseException as exc: # <<<<<<<<<<<<<<
+ * pyparser._last_error = exc
+ * return -1
+ */
+ __pyx_t_5 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_BaseException);
+ if (__pyx_t_5) {
+ __Pyx_AddTraceback("aiohttp._http_parser.cb_on_chunk_complete", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(0, 748, __pyx_L5_except_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_INCREF(__pyx_t_6);
+ __pyx_v_exc = __pyx_t_6;
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":749
+ * pyparser._on_chunk_complete()
+ * except BaseException as exc:
+ * pyparser._last_error = exc # <<<<<<<<<<<<<<
+ * return -1
+ * else:
+ */
+ __Pyx_INCREF(__pyx_v_exc);
+ __Pyx_GIVEREF(__pyx_v_exc);
+ __Pyx_GOTREF(__pyx_v_pyparser->_last_error);
+ __Pyx_DECREF(__pyx_v_pyparser->_last_error);
+ __pyx_v_pyparser->_last_error = __pyx_v_exc;
+
+ /* "aiohttp/_http_parser.pyx":750
+ * except BaseException as exc:
+ * pyparser._last_error = exc
+ * return -1 # <<<<<<<<<<<<<<
+ * else:
+ * return 0
+ */
+ __pyx_r = -1;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ goto __pyx_L13_return;
+ }
+
+ /* "aiohttp/_http_parser.pyx":748
+ * try:
+ * pyparser._on_chunk_complete()
+ * except BaseException as exc: # <<<<<<<<<<<<<<
+ * pyparser._last_error = exc
+ * return -1
+ */
+ /*finally:*/ {
+ __pyx_L13_return: {
+ __pyx_t_5 = __pyx_r;
+ __Pyx_DECREF(__pyx_v_exc);
+ __pyx_v_exc = NULL;
+ __pyx_r = __pyx_t_5;
+ goto __pyx_L6_except_return;
+ }
+ }
+ }
+ goto __pyx_L5_except_error;
+ __pyx_L5_except_error:;
+
+ /* "aiohttp/_http_parser.pyx":746
+ * cdef int cb_on_chunk_complete(cparser.http_parser* parser) except -1:
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try: # <<<<<<<<<<<<<<
+ * pyparser._on_chunk_complete()
+ * except BaseException as exc:
+ */
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
+ goto __pyx_L1_error;
+ __pyx_L6_except_return:;
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_XGIVEREF(__pyx_t_4);
+ __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
+ goto __pyx_L0;
+ }
+
+ /* "aiohttp/_http_parser.pyx":744
+ *
+ *
+ * cdef int cb_on_chunk_complete(cparser.http_parser* parser) except -1: # <<<<<<<<<<<<<<
+ * cdef HttpParser pyparser = <HttpParser>parser.data
+ * try:
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_AddTraceback("aiohttp._http_parser.cb_on_chunk_complete", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_XDECREF((PyObject *)__pyx_v_pyparser);
+ __Pyx_XDECREF(__pyx_v_exc);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":755
+ *
+ *
+ * cdef parser_error_from_errno(cparser.http_errno errno): # <<<<<<<<<<<<<<
+ * cdef bytes desc = cparser.http_errno_description(errno)
+ *
+ */
+
+static PyObject *__pyx_f_7aiohttp_12_http_parser_parser_error_from_errno(enum http_errno __pyx_v_errno) {
+ PyObject *__pyx_v_desc = 0;
+ PyObject *__pyx_v_cls = NULL;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("parser_error_from_errno", 0);
+
+ /* "aiohttp/_http_parser.pyx":756
+ *
+ * cdef parser_error_from_errno(cparser.http_errno errno):
+ * cdef bytes desc = cparser.http_errno_description(errno) # <<<<<<<<<<<<<<
+ *
+ * if errno in (cparser.HPE_CB_message_begin,
+ */
+ __pyx_t_1 = __Pyx_PyBytes_FromString(http_errno_description(__pyx_v_errno)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 756, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_v_desc = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":758
+ * cdef bytes desc = cparser.http_errno_description(errno)
+ *
+ * if errno in (cparser.HPE_CB_message_begin, # <<<<<<<<<<<<<<
+ * cparser.HPE_CB_url,
+ * cparser.HPE_CB_header_field,
+ */
+ switch (__pyx_v_errno) {
+ case HPE_CB_message_begin:
+ case HPE_CB_url:
+
+ /* "aiohttp/_http_parser.pyx":759
+ *
+ * if errno in (cparser.HPE_CB_message_begin,
+ * cparser.HPE_CB_url, # <<<<<<<<<<<<<<
+ * cparser.HPE_CB_header_field,
+ * cparser.HPE_CB_header_value,
+ */
+ case HPE_CB_header_field:
+
+ /* "aiohttp/_http_parser.pyx":760
+ * if errno in (cparser.HPE_CB_message_begin,
+ * cparser.HPE_CB_url,
+ * cparser.HPE_CB_header_field, # <<<<<<<<<<<<<<
+ * cparser.HPE_CB_header_value,
+ * cparser.HPE_CB_headers_complete,
+ */
+ case HPE_CB_header_value:
+
+ /* "aiohttp/_http_parser.pyx":761
+ * cparser.HPE_CB_url,
+ * cparser.HPE_CB_header_field,
+ * cparser.HPE_CB_header_value, # <<<<<<<<<<<<<<
+ * cparser.HPE_CB_headers_complete,
+ * cparser.HPE_CB_body,
+ */
+ case HPE_CB_headers_complete:
+
+ /* "aiohttp/_http_parser.pyx":762
+ * cparser.HPE_CB_header_field,
+ * cparser.HPE_CB_header_value,
+ * cparser.HPE_CB_headers_complete, # <<<<<<<<<<<<<<
+ * cparser.HPE_CB_body,
+ * cparser.HPE_CB_message_complete,
+ */
+ case HPE_CB_body:
+
+ /* "aiohttp/_http_parser.pyx":763
+ * cparser.HPE_CB_header_value,
+ * cparser.HPE_CB_headers_complete,
+ * cparser.HPE_CB_body, # <<<<<<<<<<<<<<
+ * cparser.HPE_CB_message_complete,
+ * cparser.HPE_CB_status,
+ */
+ case HPE_CB_message_complete:
+
+ /* "aiohttp/_http_parser.pyx":764
+ * cparser.HPE_CB_headers_complete,
+ * cparser.HPE_CB_body,
+ * cparser.HPE_CB_message_complete, # <<<<<<<<<<<<<<
+ * cparser.HPE_CB_status,
+ * cparser.HPE_CB_chunk_header,
+ */
+ case HPE_CB_status:
+
+ /* "aiohttp/_http_parser.pyx":765
+ * cparser.HPE_CB_body,
+ * cparser.HPE_CB_message_complete,
+ * cparser.HPE_CB_status, # <<<<<<<<<<<<<<
+ * cparser.HPE_CB_chunk_header,
+ * cparser.HPE_CB_chunk_complete):
+ */
+ case HPE_CB_chunk_header:
+
+ /* "aiohttp/_http_parser.pyx":766
+ * cparser.HPE_CB_message_complete,
+ * cparser.HPE_CB_status,
+ * cparser.HPE_CB_chunk_header, # <<<<<<<<<<<<<<
+ * cparser.HPE_CB_chunk_complete):
+ * cls = BadHttpMessage
+ */
+ case HPE_CB_chunk_complete:
+
+ /* "aiohttp/_http_parser.pyx":768
+ * cparser.HPE_CB_chunk_header,
+ * cparser.HPE_CB_chunk_complete):
+ * cls = BadHttpMessage # <<<<<<<<<<<<<<
+ *
+ * elif errno == cparser.HPE_INVALID_STATUS:
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_BadHttpMessage); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 768, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_v_cls = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":758
+ * cdef bytes desc = cparser.http_errno_description(errno)
+ *
+ * if errno in (cparser.HPE_CB_message_begin, # <<<<<<<<<<<<<<
+ * cparser.HPE_CB_url,
+ * cparser.HPE_CB_header_field,
+ */
+ break;
+ case HPE_INVALID_STATUS:
+
+ /* "aiohttp/_http_parser.pyx":771
+ *
+ * elif errno == cparser.HPE_INVALID_STATUS:
+ * cls = BadStatusLine # <<<<<<<<<<<<<<
+ *
+ * elif errno == cparser.HPE_INVALID_METHOD:
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_BadStatusLine); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 771, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_v_cls = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":770
+ * cls = BadHttpMessage
+ *
+ * elif errno == cparser.HPE_INVALID_STATUS: # <<<<<<<<<<<<<<
+ * cls = BadStatusLine
+ *
+ */
+ break;
+ case HPE_INVALID_METHOD:
+
+ /* "aiohttp/_http_parser.pyx":774
+ *
+ * elif errno == cparser.HPE_INVALID_METHOD:
+ * cls = BadStatusLine # <<<<<<<<<<<<<<
+ *
+ * elif errno == cparser.HPE_INVALID_URL:
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_BadStatusLine); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 774, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_v_cls = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":773
+ * cls = BadStatusLine
+ *
+ * elif errno == cparser.HPE_INVALID_METHOD: # <<<<<<<<<<<<<<
+ * cls = BadStatusLine
+ *
+ */
+ break;
+ case HPE_INVALID_URL:
+
+ /* "aiohttp/_http_parser.pyx":777
+ *
+ * elif errno == cparser.HPE_INVALID_URL:
+ * cls = InvalidURLError # <<<<<<<<<<<<<<
+ *
+ * else:
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_InvalidURLError); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 777, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_v_cls = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":776
+ * cls = BadStatusLine
+ *
+ * elif errno == cparser.HPE_INVALID_URL: # <<<<<<<<<<<<<<
+ * cls = InvalidURLError
+ *
+ */
+ break;
+ default:
+
+ /* "aiohttp/_http_parser.pyx":780
+ *
+ * else:
+ * cls = BadHttpMessage # <<<<<<<<<<<<<<
+ *
+ * return cls(desc.decode('latin-1'))
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_BadHttpMessage); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 780, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_v_cls = __pyx_t_1;
+ __pyx_t_1 = 0;
+ break;
+ }
+
+ /* "aiohttp/_http_parser.pyx":782
+ * cls = BadHttpMessage
+ *
+ * return cls(desc.decode('latin-1')) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = __Pyx_decode_bytes(__pyx_v_desc, 0, PY_SSIZE_T_MAX, NULL, NULL, PyUnicode_DecodeLatin1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 782, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_v_cls);
+ __pyx_t_3 = __pyx_v_cls; __pyx_t_4 = NULL;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
+ __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
+ if (likely(__pyx_t_4)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_3, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_t_2) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 782, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_parser.pyx":755
+ *
+ *
+ * cdef parser_error_from_errno(cparser.http_errno errno): # <<<<<<<<<<<<<<
+ * cdef bytes desc = cparser.http_errno_description(errno)
+ *
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_AddTraceback("aiohttp._http_parser.parser_error_from_errno", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_desc);
+ __Pyx_XDECREF(__pyx_v_cls);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":785
+ *
+ *
+ * def parse_url(url): # <<<<<<<<<<<<<<
+ * cdef:
+ * Py_buffer py_buf
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_1parse_url(PyObject *__pyx_self, PyObject *__pyx_v_url); /*proto*/
+static PyMethodDef __pyx_mdef_7aiohttp_12_http_parser_1parse_url = {"parse_url", (PyCFunction)__pyx_pw_7aiohttp_12_http_parser_1parse_url, METH_O, 0};
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_1parse_url(PyObject *__pyx_self, PyObject *__pyx_v_url) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("parse_url (wrapper)", 0);
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_parse_url(__pyx_self, ((PyObject *)__pyx_v_url));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_parse_url(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_url) {
+ Py_buffer __pyx_v_py_buf;
+ char *__pyx_v_buf_data;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_t_3;
+ char const *__pyx_t_4;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *__pyx_t_8 = NULL;
+ PyObject *__pyx_t_9 = NULL;
+ PyObject *__pyx_t_10 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("parse_url", 0);
+
+ /* "aiohttp/_http_parser.pyx":790
+ * char* buf_data
+ *
+ * PyObject_GetBuffer(url, &py_buf, PyBUF_SIMPLE) # <<<<<<<<<<<<<<
+ * try:
+ * buf_data = <char*>py_buf.buf
+ */
+ __pyx_t_1 = PyObject_GetBuffer(__pyx_v_url, (&__pyx_v_py_buf), PyBUF_SIMPLE); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(0, 790, __pyx_L1_error)
+
+ /* "aiohttp/_http_parser.pyx":791
+ *
+ * PyObject_GetBuffer(url, &py_buf, PyBUF_SIMPLE)
+ * try: # <<<<<<<<<<<<<<
+ * buf_data = <char*>py_buf.buf
+ * return _parse_url(buf_data, py_buf.len)
+ */
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":792
+ * PyObject_GetBuffer(url, &py_buf, PyBUF_SIMPLE)
+ * try:
+ * buf_data = <char*>py_buf.buf # <<<<<<<<<<<<<<
+ * return _parse_url(buf_data, py_buf.len)
+ * finally:
+ */
+ __pyx_v_buf_data = ((char *)__pyx_v_py_buf.buf);
+
+ /* "aiohttp/_http_parser.pyx":793
+ * try:
+ * buf_data = <char*>py_buf.buf
+ * return _parse_url(buf_data, py_buf.len) # <<<<<<<<<<<<<<
+ * finally:
+ * PyBuffer_Release(&py_buf)
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = __pyx_f_7aiohttp_12_http_parser__parse_url(__pyx_v_buf_data, __pyx_v_py_buf.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 793, __pyx_L4_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L3_return;
+ }
+
+ /* "aiohttp/_http_parser.pyx":795
+ * return _parse_url(buf_data, py_buf.len)
+ * finally:
+ * PyBuffer_Release(&py_buf) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ /*finally:*/ {
+ __pyx_L4_error:;
+ /*exception exit:*/{
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0;
+ __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
+ if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_8, &__pyx_t_9, &__pyx_t_10);
+ if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0)) __Pyx_ErrFetch(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7);
+ __Pyx_XGOTREF(__pyx_t_5);
+ __Pyx_XGOTREF(__pyx_t_6);
+ __Pyx_XGOTREF(__pyx_t_7);
+ __Pyx_XGOTREF(__pyx_t_8);
+ __Pyx_XGOTREF(__pyx_t_9);
+ __Pyx_XGOTREF(__pyx_t_10);
+ __pyx_t_1 = __pyx_lineno; __pyx_t_3 = __pyx_clineno; __pyx_t_4 = __pyx_filename;
+ {
+ PyBuffer_Release((&__pyx_v_py_buf));
+ }
+ if (PY_MAJOR_VERSION >= 3) {
+ __Pyx_XGIVEREF(__pyx_t_8);
+ __Pyx_XGIVEREF(__pyx_t_9);
+ __Pyx_XGIVEREF(__pyx_t_10);
+ __Pyx_ExceptionReset(__pyx_t_8, __pyx_t_9, __pyx_t_10);
+ }
+ __Pyx_XGIVEREF(__pyx_t_5);
+ __Pyx_XGIVEREF(__pyx_t_6);
+ __Pyx_XGIVEREF(__pyx_t_7);
+ __Pyx_ErrRestore(__pyx_t_5, __pyx_t_6, __pyx_t_7);
+ __pyx_t_5 = 0; __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0;
+ __pyx_lineno = __pyx_t_1; __pyx_clineno = __pyx_t_3; __pyx_filename = __pyx_t_4;
+ goto __pyx_L1_error;
+ }
+ __pyx_L3_return: {
+ __pyx_t_10 = __pyx_r;
+ __pyx_r = 0;
+ PyBuffer_Release((&__pyx_v_py_buf));
+ __pyx_r = __pyx_t_10;
+ __pyx_t_10 = 0;
+ goto __pyx_L0;
+ }
+ }
+
+ /* "aiohttp/_http_parser.pyx":785
+ *
+ *
+ * def parse_url(url): # <<<<<<<<<<<<<<
+ * cdef:
+ * Py_buffer py_buf
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_AddTraceback("aiohttp._http_parser.parse_url", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_parser.pyx":798
+ *
+ *
+ * cdef _parse_url(char* buf_data, size_t length): # <<<<<<<<<<<<<<
+ * cdef:
+ * cparser.http_parser_url* parsed
+ */
+
+static PyObject *__pyx_f_7aiohttp_12_http_parser__parse_url(char *__pyx_v_buf_data, size_t __pyx_v_length) {
+ struct http_parser_url *__pyx_v_parsed;
+ int __pyx_v_res;
+ PyObject *__pyx_v_schema = 0;
+ PyObject *__pyx_v_host = 0;
+ PyObject *__pyx_v_port = 0;
+ PyObject *__pyx_v_path = 0;
+ PyObject *__pyx_v_query = 0;
+ PyObject *__pyx_v_fragment = 0;
+ PyObject *__pyx_v_user = 0;
+ PyObject *__pyx_v_password = 0;
+ PyObject *__pyx_v_userinfo = 0;
+ CYTHON_UNUSED PyObject *__pyx_v_result = 0;
+ int __pyx_v_off;
+ int __pyx_v_ln;
+ CYTHON_UNUSED PyObject *__pyx_v_sep = NULL;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ uint16_t __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *(*__pyx_t_8)(PyObject *);
+ PyObject *__pyx_t_9 = NULL;
+ int __pyx_t_10;
+ int __pyx_t_11;
+ char const *__pyx_t_12;
+ PyObject *__pyx_t_13 = NULL;
+ PyObject *__pyx_t_14 = NULL;
+ PyObject *__pyx_t_15 = NULL;
+ PyObject *__pyx_t_16 = NULL;
+ PyObject *__pyx_t_17 = NULL;
+ PyObject *__pyx_t_18 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_parse_url", 0);
+
+ /* "aiohttp/_http_parser.pyx":802
+ * cparser.http_parser_url* parsed
+ * int res
+ * str schema = None # <<<<<<<<<<<<<<
+ * str host = None
+ * object port = None
+ */
+ __Pyx_INCREF(Py_None);
+ __pyx_v_schema = ((PyObject*)Py_None);
+
+ /* "aiohttp/_http_parser.pyx":803
+ * int res
+ * str schema = None
+ * str host = None # <<<<<<<<<<<<<<
+ * object port = None
+ * str path = None
+ */
+ __Pyx_INCREF(Py_None);
+ __pyx_v_host = ((PyObject*)Py_None);
+
+ /* "aiohttp/_http_parser.pyx":804
+ * str schema = None
+ * str host = None
+ * object port = None # <<<<<<<<<<<<<<
+ * str path = None
+ * str query = None
+ */
+ __Pyx_INCREF(Py_None);
+ __pyx_v_port = Py_None;
+
+ /* "aiohttp/_http_parser.pyx":805
+ * str host = None
+ * object port = None
+ * str path = None # <<<<<<<<<<<<<<
+ * str query = None
+ * str fragment = None
+ */
+ __Pyx_INCREF(Py_None);
+ __pyx_v_path = ((PyObject*)Py_None);
+
+ /* "aiohttp/_http_parser.pyx":806
+ * object port = None
+ * str path = None
+ * str query = None # <<<<<<<<<<<<<<
+ * str fragment = None
+ * str user = None
+ */
+ __Pyx_INCREF(Py_None);
+ __pyx_v_query = ((PyObject*)Py_None);
+
+ /* "aiohttp/_http_parser.pyx":807
+ * str path = None
+ * str query = None
+ * str fragment = None # <<<<<<<<<<<<<<
+ * str user = None
+ * str password = None
+ */
+ __Pyx_INCREF(Py_None);
+ __pyx_v_fragment = ((PyObject*)Py_None);
+
+ /* "aiohttp/_http_parser.pyx":808
+ * str query = None
+ * str fragment = None
+ * str user = None # <<<<<<<<<<<<<<
+ * str password = None
+ * str userinfo = None
+ */
+ __Pyx_INCREF(Py_None);
+ __pyx_v_user = ((PyObject*)Py_None);
+
+ /* "aiohttp/_http_parser.pyx":809
+ * str fragment = None
+ * str user = None
+ * str password = None # <<<<<<<<<<<<<<
+ * str userinfo = None
+ * object result = None
+ */
+ __Pyx_INCREF(Py_None);
+ __pyx_v_password = ((PyObject*)Py_None);
+
+ /* "aiohttp/_http_parser.pyx":810
+ * str user = None
+ * str password = None
+ * str userinfo = None # <<<<<<<<<<<<<<
+ * object result = None
+ * int off
+ */
+ __Pyx_INCREF(Py_None);
+ __pyx_v_userinfo = ((PyObject*)Py_None);
+
+ /* "aiohttp/_http_parser.pyx":811
+ * str password = None
+ * str userinfo = None
+ * object result = None # <<<<<<<<<<<<<<
+ * int off
+ * int ln
+ */
+ __Pyx_INCREF(Py_None);
+ __pyx_v_result = Py_None;
+
+ /* "aiohttp/_http_parser.pyx":815
+ * int ln
+ *
+ * parsed = <cparser.http_parser_url*> \ # <<<<<<<<<<<<<<
+ * PyMem_Malloc(sizeof(cparser.http_parser_url))
+ * if parsed is NULL:
+ */
+ __pyx_v_parsed = ((struct http_parser_url *)PyMem_Malloc((sizeof(struct http_parser_url))));
+
+ /* "aiohttp/_http_parser.pyx":817
+ * parsed = <cparser.http_parser_url*> \
+ * PyMem_Malloc(sizeof(cparser.http_parser_url))
+ * if parsed is NULL: # <<<<<<<<<<<<<<
+ * raise MemoryError()
+ * cparser.http_parser_url_init(parsed)
+ */
+ __pyx_t_1 = ((__pyx_v_parsed == NULL) != 0);
+ if (unlikely(__pyx_t_1)) {
+
+ /* "aiohttp/_http_parser.pyx":818
+ * PyMem_Malloc(sizeof(cparser.http_parser_url))
+ * if parsed is NULL:
+ * raise MemoryError() # <<<<<<<<<<<<<<
+ * cparser.http_parser_url_init(parsed)
+ * try:
+ */
+ PyErr_NoMemory(); __PYX_ERR(0, 818, __pyx_L1_error)
+
+ /* "aiohttp/_http_parser.pyx":817
+ * parsed = <cparser.http_parser_url*> \
+ * PyMem_Malloc(sizeof(cparser.http_parser_url))
+ * if parsed is NULL: # <<<<<<<<<<<<<<
+ * raise MemoryError()
+ * cparser.http_parser_url_init(parsed)
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":819
+ * if parsed is NULL:
+ * raise MemoryError()
+ * cparser.http_parser_url_init(parsed) # <<<<<<<<<<<<<<
+ * try:
+ * res = cparser.http_parser_parse_url(buf_data, length, 0, parsed)
+ */
+ http_parser_url_init(__pyx_v_parsed);
+
+ /* "aiohttp/_http_parser.pyx":820
+ * raise MemoryError()
+ * cparser.http_parser_url_init(parsed)
+ * try: # <<<<<<<<<<<<<<
+ * res = cparser.http_parser_parse_url(buf_data, length, 0, parsed)
+ *
+ */
+ /*try:*/ {
+
+ /* "aiohttp/_http_parser.pyx":821
+ * cparser.http_parser_url_init(parsed)
+ * try:
+ * res = cparser.http_parser_parse_url(buf_data, length, 0, parsed) # <<<<<<<<<<<<<<
+ *
+ * if res == 0:
+ */
+ __pyx_v_res = http_parser_parse_url(__pyx_v_buf_data, __pyx_v_length, 0, __pyx_v_parsed);
+
+ /* "aiohttp/_http_parser.pyx":823
+ * res = cparser.http_parser_parse_url(buf_data, length, 0, parsed)
+ *
+ * if res == 0: # <<<<<<<<<<<<<<
+ * if parsed.field_set & (1 << cparser.UF_SCHEMA):
+ * off = parsed.field_data[<int>cparser.UF_SCHEMA].off
+ */
+ __pyx_t_1 = ((__pyx_v_res == 0) != 0);
+ if (likely(__pyx_t_1)) {
+
+ /* "aiohttp/_http_parser.pyx":824
+ *
+ * if res == 0:
+ * if parsed.field_set & (1 << cparser.UF_SCHEMA): # <<<<<<<<<<<<<<
+ * off = parsed.field_data[<int>cparser.UF_SCHEMA].off
+ * ln = parsed.field_data[<int>cparser.UF_SCHEMA].len
+ */
+ __pyx_t_1 = ((__pyx_v_parsed->field_set & (1 << UF_SCHEMA)) != 0);
+ if (__pyx_t_1) {
+
+ /* "aiohttp/_http_parser.pyx":825
+ * if res == 0:
+ * if parsed.field_set & (1 << cparser.UF_SCHEMA):
+ * off = parsed.field_data[<int>cparser.UF_SCHEMA].off # <<<<<<<<<<<<<<
+ * ln = parsed.field_data[<int>cparser.UF_SCHEMA].len
+ * schema = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ */
+ __pyx_t_2 = (__pyx_v_parsed->field_data[((int)UF_SCHEMA)]).off;
+ __pyx_v_off = __pyx_t_2;
+
+ /* "aiohttp/_http_parser.pyx":826
+ * if parsed.field_set & (1 << cparser.UF_SCHEMA):
+ * off = parsed.field_data[<int>cparser.UF_SCHEMA].off
+ * ln = parsed.field_data[<int>cparser.UF_SCHEMA].len # <<<<<<<<<<<<<<
+ * schema = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ * else:
+ */
+ __pyx_t_2 = (__pyx_v_parsed->field_data[((int)UF_SCHEMA)]).len;
+ __pyx_v_ln = __pyx_t_2;
+
+ /* "aiohttp/_http_parser.pyx":827
+ * off = parsed.field_data[<int>cparser.UF_SCHEMA].off
+ * ln = parsed.field_data[<int>cparser.UF_SCHEMA].len
+ * schema = buf_data[off:off+ln].decode('utf-8', 'surrogateescape') # <<<<<<<<<<<<<<
+ * else:
+ * schema = ''
+ */
+ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_buf_data, __pyx_v_off, (__pyx_v_off + __pyx_v_ln), NULL, ((char const *)"surrogateescape"), PyUnicode_DecodeUTF8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 827, __pyx_L5_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF_SET(__pyx_v_schema, ((PyObject*)__pyx_t_3));
+ __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":824
+ *
+ * if res == 0:
+ * if parsed.field_set & (1 << cparser.UF_SCHEMA): # <<<<<<<<<<<<<<
+ * off = parsed.field_data[<int>cparser.UF_SCHEMA].off
+ * ln = parsed.field_data[<int>cparser.UF_SCHEMA].len
+ */
+ goto __pyx_L8;
+ }
+
+ /* "aiohttp/_http_parser.pyx":829
+ * schema = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ * else:
+ * schema = '' # <<<<<<<<<<<<<<
+ *
+ * if parsed.field_set & (1 << cparser.UF_HOST):
+ */
+ /*else*/ {
+ __Pyx_INCREF(__pyx_kp_u__4);
+ __Pyx_DECREF_SET(__pyx_v_schema, __pyx_kp_u__4);
+ }
+ __pyx_L8:;
+
+ /* "aiohttp/_http_parser.pyx":831
+ * schema = ''
+ *
+ * if parsed.field_set & (1 << cparser.UF_HOST): # <<<<<<<<<<<<<<
+ * off = parsed.field_data[<int>cparser.UF_HOST].off
+ * ln = parsed.field_data[<int>cparser.UF_HOST].len
+ */
+ __pyx_t_1 = ((__pyx_v_parsed->field_set & (1 << UF_HOST)) != 0);
+ if (__pyx_t_1) {
+
+ /* "aiohttp/_http_parser.pyx":832
+ *
+ * if parsed.field_set & (1 << cparser.UF_HOST):
+ * off = parsed.field_data[<int>cparser.UF_HOST].off # <<<<<<<<<<<<<<
+ * ln = parsed.field_data[<int>cparser.UF_HOST].len
+ * host = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ */
+ __pyx_t_2 = (__pyx_v_parsed->field_data[((int)UF_HOST)]).off;
+ __pyx_v_off = __pyx_t_2;
+
+ /* "aiohttp/_http_parser.pyx":833
+ * if parsed.field_set & (1 << cparser.UF_HOST):
+ * off = parsed.field_data[<int>cparser.UF_HOST].off
+ * ln = parsed.field_data[<int>cparser.UF_HOST].len # <<<<<<<<<<<<<<
+ * host = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ * else:
+ */
+ __pyx_t_2 = (__pyx_v_parsed->field_data[((int)UF_HOST)]).len;
+ __pyx_v_ln = __pyx_t_2;
+
+ /* "aiohttp/_http_parser.pyx":834
+ * off = parsed.field_data[<int>cparser.UF_HOST].off
+ * ln = parsed.field_data[<int>cparser.UF_HOST].len
+ * host = buf_data[off:off+ln].decode('utf-8', 'surrogateescape') # <<<<<<<<<<<<<<
+ * else:
+ * host = ''
+ */
+ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_buf_data, __pyx_v_off, (__pyx_v_off + __pyx_v_ln), NULL, ((char const *)"surrogateescape"), PyUnicode_DecodeUTF8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 834, __pyx_L5_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF_SET(__pyx_v_host, ((PyObject*)__pyx_t_3));
+ __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":831
+ * schema = ''
+ *
+ * if parsed.field_set & (1 << cparser.UF_HOST): # <<<<<<<<<<<<<<
+ * off = parsed.field_data[<int>cparser.UF_HOST].off
+ * ln = parsed.field_data[<int>cparser.UF_HOST].len
+ */
+ goto __pyx_L9;
+ }
+
+ /* "aiohttp/_http_parser.pyx":836
+ * host = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ * else:
+ * host = '' # <<<<<<<<<<<<<<
+ *
+ * if parsed.field_set & (1 << cparser.UF_PORT):
+ */
+ /*else*/ {
+ __Pyx_INCREF(__pyx_kp_u__4);
+ __Pyx_DECREF_SET(__pyx_v_host, __pyx_kp_u__4);
+ }
+ __pyx_L9:;
+
+ /* "aiohttp/_http_parser.pyx":838
+ * host = ''
+ *
+ * if parsed.field_set & (1 << cparser.UF_PORT): # <<<<<<<<<<<<<<
+ * port = parsed.port
+ *
+ */
+ __pyx_t_1 = ((__pyx_v_parsed->field_set & (1 << UF_PORT)) != 0);
+ if (__pyx_t_1) {
+
+ /* "aiohttp/_http_parser.pyx":839
+ *
+ * if parsed.field_set & (1 << cparser.UF_PORT):
+ * port = parsed.port # <<<<<<<<<<<<<<
+ *
+ * if parsed.field_set & (1 << cparser.UF_PATH):
+ */
+ __pyx_t_3 = __Pyx_PyInt_From_uint16_t(__pyx_v_parsed->port); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 839, __pyx_L5_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF_SET(__pyx_v_port, __pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":838
+ * host = ''
+ *
+ * if parsed.field_set & (1 << cparser.UF_PORT): # <<<<<<<<<<<<<<
+ * port = parsed.port
+ *
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":841
+ * port = parsed.port
+ *
+ * if parsed.field_set & (1 << cparser.UF_PATH): # <<<<<<<<<<<<<<
+ * off = parsed.field_data[<int>cparser.UF_PATH].off
+ * ln = parsed.field_data[<int>cparser.UF_PATH].len
+ */
+ __pyx_t_1 = ((__pyx_v_parsed->field_set & (1 << UF_PATH)) != 0);
+ if (__pyx_t_1) {
+
+ /* "aiohttp/_http_parser.pyx":842
+ *
+ * if parsed.field_set & (1 << cparser.UF_PATH):
+ * off = parsed.field_data[<int>cparser.UF_PATH].off # <<<<<<<<<<<<<<
+ * ln = parsed.field_data[<int>cparser.UF_PATH].len
+ * path = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ */
+ __pyx_t_2 = (__pyx_v_parsed->field_data[((int)UF_PATH)]).off;
+ __pyx_v_off = __pyx_t_2;
+
+ /* "aiohttp/_http_parser.pyx":843
+ * if parsed.field_set & (1 << cparser.UF_PATH):
+ * off = parsed.field_data[<int>cparser.UF_PATH].off
+ * ln = parsed.field_data[<int>cparser.UF_PATH].len # <<<<<<<<<<<<<<
+ * path = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ * else:
+ */
+ __pyx_t_2 = (__pyx_v_parsed->field_data[((int)UF_PATH)]).len;
+ __pyx_v_ln = __pyx_t_2;
+
+ /* "aiohttp/_http_parser.pyx":844
+ * off = parsed.field_data[<int>cparser.UF_PATH].off
+ * ln = parsed.field_data[<int>cparser.UF_PATH].len
+ * path = buf_data[off:off+ln].decode('utf-8', 'surrogateescape') # <<<<<<<<<<<<<<
+ * else:
+ * path = ''
+ */
+ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_buf_data, __pyx_v_off, (__pyx_v_off + __pyx_v_ln), NULL, ((char const *)"surrogateescape"), PyUnicode_DecodeUTF8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 844, __pyx_L5_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF_SET(__pyx_v_path, ((PyObject*)__pyx_t_3));
+ __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":841
+ * port = parsed.port
+ *
+ * if parsed.field_set & (1 << cparser.UF_PATH): # <<<<<<<<<<<<<<
+ * off = parsed.field_data[<int>cparser.UF_PATH].off
+ * ln = parsed.field_data[<int>cparser.UF_PATH].len
+ */
+ goto __pyx_L11;
+ }
+
+ /* "aiohttp/_http_parser.pyx":846
+ * path = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ * else:
+ * path = '' # <<<<<<<<<<<<<<
+ *
+ * if parsed.field_set & (1 << cparser.UF_QUERY):
+ */
+ /*else*/ {
+ __Pyx_INCREF(__pyx_kp_u__4);
+ __Pyx_DECREF_SET(__pyx_v_path, __pyx_kp_u__4);
+ }
+ __pyx_L11:;
+
+ /* "aiohttp/_http_parser.pyx":848
+ * path = ''
+ *
+ * if parsed.field_set & (1 << cparser.UF_QUERY): # <<<<<<<<<<<<<<
+ * off = parsed.field_data[<int>cparser.UF_QUERY].off
+ * ln = parsed.field_data[<int>cparser.UF_QUERY].len
+ */
+ __pyx_t_1 = ((__pyx_v_parsed->field_set & (1 << UF_QUERY)) != 0);
+ if (__pyx_t_1) {
+
+ /* "aiohttp/_http_parser.pyx":849
+ *
+ * if parsed.field_set & (1 << cparser.UF_QUERY):
+ * off = parsed.field_data[<int>cparser.UF_QUERY].off # <<<<<<<<<<<<<<
+ * ln = parsed.field_data[<int>cparser.UF_QUERY].len
+ * query = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ */
+ __pyx_t_2 = (__pyx_v_parsed->field_data[((int)UF_QUERY)]).off;
+ __pyx_v_off = __pyx_t_2;
+
+ /* "aiohttp/_http_parser.pyx":850
+ * if parsed.field_set & (1 << cparser.UF_QUERY):
+ * off = parsed.field_data[<int>cparser.UF_QUERY].off
+ * ln = parsed.field_data[<int>cparser.UF_QUERY].len # <<<<<<<<<<<<<<
+ * query = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ * else:
+ */
+ __pyx_t_2 = (__pyx_v_parsed->field_data[((int)UF_QUERY)]).len;
+ __pyx_v_ln = __pyx_t_2;
+
+ /* "aiohttp/_http_parser.pyx":851
+ * off = parsed.field_data[<int>cparser.UF_QUERY].off
+ * ln = parsed.field_data[<int>cparser.UF_QUERY].len
+ * query = buf_data[off:off+ln].decode('utf-8', 'surrogateescape') # <<<<<<<<<<<<<<
+ * else:
+ * query = ''
+ */
+ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_buf_data, __pyx_v_off, (__pyx_v_off + __pyx_v_ln), NULL, ((char const *)"surrogateescape"), PyUnicode_DecodeUTF8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 851, __pyx_L5_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF_SET(__pyx_v_query, ((PyObject*)__pyx_t_3));
+ __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":848
+ * path = ''
+ *
+ * if parsed.field_set & (1 << cparser.UF_QUERY): # <<<<<<<<<<<<<<
+ * off = parsed.field_data[<int>cparser.UF_QUERY].off
+ * ln = parsed.field_data[<int>cparser.UF_QUERY].len
+ */
+ goto __pyx_L12;
+ }
+
+ /* "aiohttp/_http_parser.pyx":853
+ * query = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ * else:
+ * query = '' # <<<<<<<<<<<<<<
+ *
+ * if parsed.field_set & (1 << cparser.UF_FRAGMENT):
+ */
+ /*else*/ {
+ __Pyx_INCREF(__pyx_kp_u__4);
+ __Pyx_DECREF_SET(__pyx_v_query, __pyx_kp_u__4);
+ }
+ __pyx_L12:;
+
+ /* "aiohttp/_http_parser.pyx":855
+ * query = ''
+ *
+ * if parsed.field_set & (1 << cparser.UF_FRAGMENT): # <<<<<<<<<<<<<<
+ * off = parsed.field_data[<int>cparser.UF_FRAGMENT].off
+ * ln = parsed.field_data[<int>cparser.UF_FRAGMENT].len
+ */
+ __pyx_t_1 = ((__pyx_v_parsed->field_set & (1 << UF_FRAGMENT)) != 0);
+ if (__pyx_t_1) {
+
+ /* "aiohttp/_http_parser.pyx":856
+ *
+ * if parsed.field_set & (1 << cparser.UF_FRAGMENT):
+ * off = parsed.field_data[<int>cparser.UF_FRAGMENT].off # <<<<<<<<<<<<<<
+ * ln = parsed.field_data[<int>cparser.UF_FRAGMENT].len
+ * fragment = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ */
+ __pyx_t_2 = (__pyx_v_parsed->field_data[((int)UF_FRAGMENT)]).off;
+ __pyx_v_off = __pyx_t_2;
+
+ /* "aiohttp/_http_parser.pyx":857
+ * if parsed.field_set & (1 << cparser.UF_FRAGMENT):
+ * off = parsed.field_data[<int>cparser.UF_FRAGMENT].off
+ * ln = parsed.field_data[<int>cparser.UF_FRAGMENT].len # <<<<<<<<<<<<<<
+ * fragment = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ * else:
+ */
+ __pyx_t_2 = (__pyx_v_parsed->field_data[((int)UF_FRAGMENT)]).len;
+ __pyx_v_ln = __pyx_t_2;
+
+ /* "aiohttp/_http_parser.pyx":858
+ * off = parsed.field_data[<int>cparser.UF_FRAGMENT].off
+ * ln = parsed.field_data[<int>cparser.UF_FRAGMENT].len
+ * fragment = buf_data[off:off+ln].decode('utf-8', 'surrogateescape') # <<<<<<<<<<<<<<
+ * else:
+ * fragment = ''
+ */
+ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_buf_data, __pyx_v_off, (__pyx_v_off + __pyx_v_ln), NULL, ((char const *)"surrogateescape"), PyUnicode_DecodeUTF8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 858, __pyx_L5_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF_SET(__pyx_v_fragment, ((PyObject*)__pyx_t_3));
+ __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":855
+ * query = ''
+ *
+ * if parsed.field_set & (1 << cparser.UF_FRAGMENT): # <<<<<<<<<<<<<<
+ * off = parsed.field_data[<int>cparser.UF_FRAGMENT].off
+ * ln = parsed.field_data[<int>cparser.UF_FRAGMENT].len
+ */
+ goto __pyx_L13;
+ }
+
+ /* "aiohttp/_http_parser.pyx":860
+ * fragment = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ * else:
+ * fragment = '' # <<<<<<<<<<<<<<
+ *
+ * if parsed.field_set & (1 << cparser.UF_USERINFO):
+ */
+ /*else*/ {
+ __Pyx_INCREF(__pyx_kp_u__4);
+ __Pyx_DECREF_SET(__pyx_v_fragment, __pyx_kp_u__4);
+ }
+ __pyx_L13:;
+
+ /* "aiohttp/_http_parser.pyx":862
+ * fragment = ''
+ *
+ * if parsed.field_set & (1 << cparser.UF_USERINFO): # <<<<<<<<<<<<<<
+ * off = parsed.field_data[<int>cparser.UF_USERINFO].off
+ * ln = parsed.field_data[<int>cparser.UF_USERINFO].len
+ */
+ __pyx_t_1 = ((__pyx_v_parsed->field_set & (1 << UF_USERINFO)) != 0);
+ if (__pyx_t_1) {
+
+ /* "aiohttp/_http_parser.pyx":863
+ *
+ * if parsed.field_set & (1 << cparser.UF_USERINFO):
+ * off = parsed.field_data[<int>cparser.UF_USERINFO].off # <<<<<<<<<<<<<<
+ * ln = parsed.field_data[<int>cparser.UF_USERINFO].len
+ * userinfo = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ */
+ __pyx_t_2 = (__pyx_v_parsed->field_data[((int)UF_USERINFO)]).off;
+ __pyx_v_off = __pyx_t_2;
+
+ /* "aiohttp/_http_parser.pyx":864
+ * if parsed.field_set & (1 << cparser.UF_USERINFO):
+ * off = parsed.field_data[<int>cparser.UF_USERINFO].off
+ * ln = parsed.field_data[<int>cparser.UF_USERINFO].len # <<<<<<<<<<<<<<
+ * userinfo = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ *
+ */
+ __pyx_t_2 = (__pyx_v_parsed->field_data[((int)UF_USERINFO)]).len;
+ __pyx_v_ln = __pyx_t_2;
+
+ /* "aiohttp/_http_parser.pyx":865
+ * off = parsed.field_data[<int>cparser.UF_USERINFO].off
+ * ln = parsed.field_data[<int>cparser.UF_USERINFO].len
+ * userinfo = buf_data[off:off+ln].decode('utf-8', 'surrogateescape') # <<<<<<<<<<<<<<
+ *
+ * user, sep, password = userinfo.partition(':')
+ */
+ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_buf_data, __pyx_v_off, (__pyx_v_off + __pyx_v_ln), NULL, ((char const *)"surrogateescape"), PyUnicode_DecodeUTF8); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 865, __pyx_L5_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF_SET(__pyx_v_userinfo, ((PyObject*)__pyx_t_3));
+ __pyx_t_3 = 0;
+
+ /* "aiohttp/_http_parser.pyx":867
+ * userinfo = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ *
+ * user, sep, password = userinfo.partition(':') # <<<<<<<<<<<<<<
+ *
+ * return URL_build(scheme=schema,
+ */
+ __pyx_t_3 = __Pyx_CallUnboundCMethod1(&__pyx_umethod_PyUnicode_Type_partition, __pyx_v_userinfo, __pyx_kp_u__11); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 867, __pyx_L5_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ if ((likely(PyTuple_CheckExact(__pyx_t_3))) || (PyList_CheckExact(__pyx_t_3))) {
+ PyObject* sequence = __pyx_t_3;
+ Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
+ if (unlikely(size != 3)) {
+ if (size > 3) __Pyx_RaiseTooManyValuesError(3);
+ else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
+ __PYX_ERR(0, 867, __pyx_L5_error)
+ }
+ #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ if (likely(PyTuple_CheckExact(sequence))) {
+ __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
+ __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
+ __pyx_t_6 = PyTuple_GET_ITEM(sequence, 2);
+ } else {
+ __pyx_t_4 = PyList_GET_ITEM(sequence, 0);
+ __pyx_t_5 = PyList_GET_ITEM(sequence, 1);
+ __pyx_t_6 = PyList_GET_ITEM(sequence, 2);
+ }
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_INCREF(__pyx_t_6);
+ #else
+ __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 867, __pyx_L5_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 867, __pyx_L5_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_t_6 = PySequence_ITEM(sequence, 2); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 867, __pyx_L5_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ #endif
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ } else {
+ Py_ssize_t index = -1;
+ __pyx_t_7 = PyObject_GetIter(__pyx_t_3); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 867, __pyx_L5_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_8 = Py_TYPE(__pyx_t_7)->tp_iternext;
+ index = 0; __pyx_t_4 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_4)) goto __pyx_L15_unpacking_failed;
+ __Pyx_GOTREF(__pyx_t_4);
+ index = 1; __pyx_t_5 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_5)) goto __pyx_L15_unpacking_failed;
+ __Pyx_GOTREF(__pyx_t_5);
+ index = 2; __pyx_t_6 = __pyx_t_8(__pyx_t_7); if (unlikely(!__pyx_t_6)) goto __pyx_L15_unpacking_failed;
+ __Pyx_GOTREF(__pyx_t_6);
+ if (__Pyx_IternextUnpackEndCheck(__pyx_t_8(__pyx_t_7), 3) < 0) __PYX_ERR(0, 867, __pyx_L5_error)
+ __pyx_t_8 = NULL;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ goto __pyx_L16_unpacking_done;
+ __pyx_L15_unpacking_failed:;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __pyx_t_8 = NULL;
+ if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index);
+ __PYX_ERR(0, 867, __pyx_L5_error)
+ __pyx_L16_unpacking_done:;
+ }
+ if (!(likely(PyUnicode_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "unicode", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(0, 867, __pyx_L5_error)
+ if (!(likely(PyUnicode_CheckExact(__pyx_t_6))||((__pyx_t_6) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "unicode", Py_TYPE(__pyx_t_6)->tp_name), 0))) __PYX_ERR(0, 867, __pyx_L5_error)
+ __Pyx_DECREF_SET(__pyx_v_user, ((PyObject*)__pyx_t_4));
+ __pyx_t_4 = 0;
+ __pyx_v_sep = __pyx_t_5;
+ __pyx_t_5 = 0;
+ __Pyx_DECREF_SET(__pyx_v_password, ((PyObject*)__pyx_t_6));
+ __pyx_t_6 = 0;
+
+ /* "aiohttp/_http_parser.pyx":862
+ * fragment = ''
+ *
+ * if parsed.field_set & (1 << cparser.UF_USERINFO): # <<<<<<<<<<<<<<
+ * off = parsed.field_data[<int>cparser.UF_USERINFO].off
+ * ln = parsed.field_data[<int>cparser.UF_USERINFO].len
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":869
+ * user, sep, password = userinfo.partition(':')
+ *
+ * return URL_build(scheme=schema, # <<<<<<<<<<<<<<
+ * user=user, password=password, host=host, port=port,
+ * path=path, query_string=query, fragment=fragment, encoded=True)
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_3 = __Pyx_PyDict_NewPresized(9); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 869, __pyx_L5_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_scheme, __pyx_v_schema) < 0) __PYX_ERR(0, 869, __pyx_L5_error)
+
+ /* "aiohttp/_http_parser.pyx":870
+ *
+ * return URL_build(scheme=schema,
+ * user=user, password=password, host=host, port=port, # <<<<<<<<<<<<<<
+ * path=path, query_string=query, fragment=fragment, encoded=True)
+ * else:
+ */
+ if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_user, __pyx_v_user) < 0) __PYX_ERR(0, 869, __pyx_L5_error)
+ if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_password, __pyx_v_password) < 0) __PYX_ERR(0, 869, __pyx_L5_error)
+ if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_host, __pyx_v_host) < 0) __PYX_ERR(0, 869, __pyx_L5_error)
+ if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_port, __pyx_v_port) < 0) __PYX_ERR(0, 869, __pyx_L5_error)
+
+ /* "aiohttp/_http_parser.pyx":871
+ * return URL_build(scheme=schema,
+ * user=user, password=password, host=host, port=port,
+ * path=path, query_string=query, fragment=fragment, encoded=True) # <<<<<<<<<<<<<<
+ * else:
+ * raise InvalidURLError("invalid url {!r}".format(buf_data))
+ */
+ if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_path, __pyx_v_path) < 0) __PYX_ERR(0, 869, __pyx_L5_error)
+ if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_query_string, __pyx_v_query) < 0) __PYX_ERR(0, 869, __pyx_L5_error)
+ if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_fragment, __pyx_v_fragment) < 0) __PYX_ERR(0, 869, __pyx_L5_error)
+ if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_encoded, Py_True) < 0) __PYX_ERR(0, 869, __pyx_L5_error)
+
+ /* "aiohttp/_http_parser.pyx":869
+ * user, sep, password = userinfo.partition(':')
+ *
+ * return URL_build(scheme=schema, # <<<<<<<<<<<<<<
+ * user=user, password=password, host=host, port=port,
+ * path=path, query_string=query, fragment=fragment, encoded=True)
+ */
+ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_v_7aiohttp_12_http_parser_URL_build, __pyx_empty_tuple, __pyx_t_3); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 869, __pyx_L5_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_r = __pyx_t_6;
+ __pyx_t_6 = 0;
+ goto __pyx_L4_return;
+
+ /* "aiohttp/_http_parser.pyx":823
+ * res = cparser.http_parser_parse_url(buf_data, length, 0, parsed)
+ *
+ * if res == 0: # <<<<<<<<<<<<<<
+ * if parsed.field_set & (1 << cparser.UF_SCHEMA):
+ * off = parsed.field_data[<int>cparser.UF_SCHEMA].off
+ */
+ }
+
+ /* "aiohttp/_http_parser.pyx":873
+ * path=path, query_string=query, fragment=fragment, encoded=True)
+ * else:
+ * raise InvalidURLError("invalid url {!r}".format(buf_data)) # <<<<<<<<<<<<<<
+ * finally:
+ * PyMem_Free(parsed)
+ */
+ /*else*/ {
+ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_InvalidURLError); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 873, __pyx_L5_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_invalid_url_r, __pyx_n_s_format); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 873, __pyx_L5_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_7 = __Pyx_PyBytes_FromString(__pyx_v_buf_data); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 873, __pyx_L5_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_9 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) {
+ __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_4);
+ if (likely(__pyx_t_9)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
+ __Pyx_INCREF(__pyx_t_9);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_4, function);
+ }
+ }
+ __pyx_t_5 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_9, __pyx_t_7) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 873, __pyx_L5_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = NULL;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
+ __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3);
+ if (likely(__pyx_t_4)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_3, function);
+ }
+ }
+ __pyx_t_6 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 873, __pyx_L5_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_6, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __PYX_ERR(0, 873, __pyx_L5_error)
+ }
+ }
+
+ /* "aiohttp/_http_parser.pyx":875
+ * raise InvalidURLError("invalid url {!r}".format(buf_data))
+ * finally:
+ * PyMem_Free(parsed) # <<<<<<<<<<<<<<
+ */
+ /*finally:*/ {
+ __pyx_L5_error:;
+ /*exception exit:*/{
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __pyx_t_13 = 0; __pyx_t_14 = 0; __pyx_t_15 = 0; __pyx_t_16 = 0; __pyx_t_17 = 0; __pyx_t_18 = 0;
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
+ if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_16, &__pyx_t_17, &__pyx_t_18);
+ if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_13, &__pyx_t_14, &__pyx_t_15) < 0)) __Pyx_ErrFetch(&__pyx_t_13, &__pyx_t_14, &__pyx_t_15);
+ __Pyx_XGOTREF(__pyx_t_13);
+ __Pyx_XGOTREF(__pyx_t_14);
+ __Pyx_XGOTREF(__pyx_t_15);
+ __Pyx_XGOTREF(__pyx_t_16);
+ __Pyx_XGOTREF(__pyx_t_17);
+ __Pyx_XGOTREF(__pyx_t_18);
+ __pyx_t_10 = __pyx_lineno; __pyx_t_11 = __pyx_clineno; __pyx_t_12 = __pyx_filename;
+ {
+ PyMem_Free(__pyx_v_parsed);
+ }
+ if (PY_MAJOR_VERSION >= 3) {
+ __Pyx_XGIVEREF(__pyx_t_16);
+ __Pyx_XGIVEREF(__pyx_t_17);
+ __Pyx_XGIVEREF(__pyx_t_18);
+ __Pyx_ExceptionReset(__pyx_t_16, __pyx_t_17, __pyx_t_18);
+ }
+ __Pyx_XGIVEREF(__pyx_t_13);
+ __Pyx_XGIVEREF(__pyx_t_14);
+ __Pyx_XGIVEREF(__pyx_t_15);
+ __Pyx_ErrRestore(__pyx_t_13, __pyx_t_14, __pyx_t_15);
+ __pyx_t_13 = 0; __pyx_t_14 = 0; __pyx_t_15 = 0; __pyx_t_16 = 0; __pyx_t_17 = 0; __pyx_t_18 = 0;
+ __pyx_lineno = __pyx_t_10; __pyx_clineno = __pyx_t_11; __pyx_filename = __pyx_t_12;
+ goto __pyx_L1_error;
+ }
+ __pyx_L4_return: {
+ __pyx_t_18 = __pyx_r;
+ __pyx_r = 0;
+ PyMem_Free(__pyx_v_parsed);
+ __pyx_r = __pyx_t_18;
+ __pyx_t_18 = 0;
+ goto __pyx_L0;
+ }
+ }
+
+ /* "aiohttp/_http_parser.pyx":798
+ *
+ *
+ * cdef _parse_url(char* buf_data, size_t length): # <<<<<<<<<<<<<<
+ * cdef:
+ * cparser.http_parser_url* parsed
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_9);
+ __Pyx_AddTraceback("aiohttp._http_parser._parse_url", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_schema);
+ __Pyx_XDECREF(__pyx_v_host);
+ __Pyx_XDECREF(__pyx_v_port);
+ __Pyx_XDECREF(__pyx_v_path);
+ __Pyx_XDECREF(__pyx_v_query);
+ __Pyx_XDECREF(__pyx_v_fragment);
+ __Pyx_XDECREF(__pyx_v_user);
+ __Pyx_XDECREF(__pyx_v_password);
+ __Pyx_XDECREF(__pyx_v_userinfo);
+ __Pyx_XDECREF(__pyx_v_result);
+ __Pyx_XDECREF(__pyx_v_sep);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":1
+ * def __pyx_unpickle_RawRequestMessage(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_3__pyx_unpickle_RawRequestMessage(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyMethodDef __pyx_mdef_7aiohttp_12_http_parser_3__pyx_unpickle_RawRequestMessage = {"__pyx_unpickle_RawRequestMessage", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7aiohttp_12_http_parser_3__pyx_unpickle_RawRequestMessage, METH_VARARGS|METH_KEYWORDS, 0};
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_3__pyx_unpickle_RawRequestMessage(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v___pyx_type = 0;
+ long __pyx_v___pyx_checksum;
+ PyObject *__pyx_v___pyx_state = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__pyx_unpickle_RawRequestMessage (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
+ PyObject* values[3] = {0,0,0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ CYTHON_FALLTHROUGH;
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ CYTHON_FALLTHROUGH;
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ CYTHON_FALLTHROUGH;
+ case 1:
+ if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_RawRequestMessage", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 2:
+ if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_RawRequestMessage", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_RawRequestMessage") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ }
+ __pyx_v___pyx_type = values[0];
+ __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
+ __pyx_v___pyx_state = values[2];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_RawRequestMessage", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("aiohttp._http_parser.__pyx_unpickle_RawRequestMessage", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_2__pyx_unpickle_RawRequestMessage(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_2__pyx_unpickle_RawRequestMessage(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_v___pyx_PickleError = 0;
+ PyObject *__pyx_v___pyx_result = 0;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ int __pyx_t_6;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__pyx_unpickle_RawRequestMessage", 0);
+
+ /* "(tree fragment)":4
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ * if __pyx_checksum != 0x1408252: # <<<<<<<<<<<<<<
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x1408252 = (chunked, compression, headers, method, path, raw_headers, should_close, upgrade, url, version))" % __pyx_checksum)
+ */
+ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0x1408252) != 0);
+ if (__pyx_t_1) {
+
+ /* "(tree fragment)":5
+ * cdef object __pyx_result
+ * if __pyx_checksum != 0x1408252:
+ * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x1408252 = (chunked, compression, headers, method, path, raw_headers, should_close, upgrade, url, version))" % __pyx_checksum)
+ * __pyx_result = RawRequestMessage.__new__(__pyx_type)
+ */
+ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_n_s_PickleError);
+ __Pyx_GIVEREF(__pyx_n_s_PickleError);
+ PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
+ __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_2);
+ __pyx_v___pyx_PickleError = __pyx_t_2;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "(tree fragment)":6
+ * if __pyx_checksum != 0x1408252:
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x1408252 = (chunked, compression, headers, method, path, raw_headers, should_close, upgrade, url, version))" % __pyx_checksum) # <<<<<<<<<<<<<<
+ * __pyx_result = RawRequestMessage.__new__(__pyx_type)
+ * if __pyx_state is not None:
+ */
+ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0x14, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_INCREF(__pyx_v___pyx_PickleError);
+ __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_5)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __PYX_ERR(1, 6, __pyx_L1_error)
+
+ /* "(tree fragment)":4
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ * if __pyx_checksum != 0x1408252: # <<<<<<<<<<<<<<
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x1408252 = (chunked, compression, headers, method, path, raw_headers, should_close, upgrade, url, version))" % __pyx_checksum)
+ */
+ }
+
+ /* "(tree fragment)":7
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x1408252 = (chunked, compression, headers, method, path, raw_headers, should_close, upgrade, url, version))" % __pyx_checksum)
+ * __pyx_result = RawRequestMessage.__new__(__pyx_type) # <<<<<<<<<<<<<<
+ * if __pyx_state is not None:
+ * __pyx_unpickle_RawRequestMessage__set_state(<RawRequestMessage> __pyx_result, __pyx_state)
+ */
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_ptype_7aiohttp_12_http_parser_RawRequestMessage), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_4)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v___pyx_result = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "(tree fragment)":8
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x1408252 = (chunked, compression, headers, method, path, raw_headers, should_close, upgrade, url, version))" % __pyx_checksum)
+ * __pyx_result = RawRequestMessage.__new__(__pyx_type)
+ * if __pyx_state is not None: # <<<<<<<<<<<<<<
+ * __pyx_unpickle_RawRequestMessage__set_state(<RawRequestMessage> __pyx_result, __pyx_state)
+ * return __pyx_result
+ */
+ __pyx_t_1 = (__pyx_v___pyx_state != Py_None);
+ __pyx_t_6 = (__pyx_t_1 != 0);
+ if (__pyx_t_6) {
+
+ /* "(tree fragment)":9
+ * __pyx_result = RawRequestMessage.__new__(__pyx_type)
+ * if __pyx_state is not None:
+ * __pyx_unpickle_RawRequestMessage__set_state(<RawRequestMessage> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
+ * return __pyx_result
+ * cdef __pyx_unpickle_RawRequestMessage__set_state(RawRequestMessage __pyx_result, tuple __pyx_state):
+ */
+ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
+ __pyx_t_3 = __pyx_f_7aiohttp_12_http_parser___pyx_unpickle_RawRequestMessage__set_state(((struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "(tree fragment)":8
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x1408252 = (chunked, compression, headers, method, path, raw_headers, should_close, upgrade, url, version))" % __pyx_checksum)
+ * __pyx_result = RawRequestMessage.__new__(__pyx_type)
+ * if __pyx_state is not None: # <<<<<<<<<<<<<<
+ * __pyx_unpickle_RawRequestMessage__set_state(<RawRequestMessage> __pyx_result, __pyx_state)
+ * return __pyx_result
+ */
+ }
+
+ /* "(tree fragment)":10
+ * if __pyx_state is not None:
+ * __pyx_unpickle_RawRequestMessage__set_state(<RawRequestMessage> __pyx_result, __pyx_state)
+ * return __pyx_result # <<<<<<<<<<<<<<
+ * cdef __pyx_unpickle_RawRequestMessage__set_state(RawRequestMessage __pyx_result, tuple __pyx_state):
+ * __pyx_result.chunked = __pyx_state[0]; __pyx_result.compression = __pyx_state[1]; __pyx_result.headers = __pyx_state[2]; __pyx_result.method = __pyx_state[3]; __pyx_result.path = __pyx_state[4]; __pyx_result.raw_headers = __pyx_state[5]; __pyx_result.should_close = __pyx_state[6]; __pyx_result.upgrade = __pyx_state[7]; __pyx_result.url = __pyx_state[8]; __pyx_result.version = __pyx_state[9]
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v___pyx_result);
+ __pyx_r = __pyx_v___pyx_result;
+ goto __pyx_L0;
+
+ /* "(tree fragment)":1
+ * def __pyx_unpickle_RawRequestMessage(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_AddTraceback("aiohttp._http_parser.__pyx_unpickle_RawRequestMessage", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v___pyx_PickleError);
+ __Pyx_XDECREF(__pyx_v___pyx_result);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":11
+ * __pyx_unpickle_RawRequestMessage__set_state(<RawRequestMessage> __pyx_result, __pyx_state)
+ * return __pyx_result
+ * cdef __pyx_unpickle_RawRequestMessage__set_state(RawRequestMessage __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_result.chunked = __pyx_state[0]; __pyx_result.compression = __pyx_state[1]; __pyx_result.headers = __pyx_state[2]; __pyx_result.method = __pyx_state[3]; __pyx_result.path = __pyx_state[4]; __pyx_result.raw_headers = __pyx_state[5]; __pyx_result.should_close = __pyx_state[6]; __pyx_result.upgrade = __pyx_state[7]; __pyx_result.url = __pyx_state[8]; __pyx_result.version = __pyx_state[9]
+ * if len(__pyx_state) > 10 and hasattr(__pyx_result, '__dict__'):
+ */
+
+static PyObject *__pyx_f_7aiohttp_12_http_parser___pyx_unpickle_RawRequestMessage__set_state(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ Py_ssize_t __pyx_t_3;
+ int __pyx_t_4;
+ int __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *__pyx_t_8 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__pyx_unpickle_RawRequestMessage__set_state", 0);
+
+ /* "(tree fragment)":12
+ * return __pyx_result
+ * cdef __pyx_unpickle_RawRequestMessage__set_state(RawRequestMessage __pyx_result, tuple __pyx_state):
+ * __pyx_result.chunked = __pyx_state[0]; __pyx_result.compression = __pyx_state[1]; __pyx_result.headers = __pyx_state[2]; __pyx_result.method = __pyx_state[3]; __pyx_result.path = __pyx_state[4]; __pyx_result.raw_headers = __pyx_state[5]; __pyx_result.should_close = __pyx_state[6]; __pyx_result.upgrade = __pyx_state[7]; __pyx_result.url = __pyx_state[8]; __pyx_result.version = __pyx_state[9] # <<<<<<<<<<<<<<
+ * if len(__pyx_state) > 10 and hasattr(__pyx_result, '__dict__'):
+ * __pyx_result.__dict__.update(__pyx_state[10])
+ */
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->chunked);
+ __Pyx_DECREF(__pyx_v___pyx_result->chunked);
+ __pyx_v___pyx_result->chunked = __pyx_t_1;
+ __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->compression);
+ __Pyx_DECREF(__pyx_v___pyx_result->compression);
+ __pyx_v___pyx_result->compression = __pyx_t_1;
+ __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->headers);
+ __Pyx_DECREF(__pyx_v___pyx_result->headers);
+ __pyx_v___pyx_result->headers = __pyx_t_1;
+ __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 3, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (!(likely(PyUnicode_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "unicode", Py_TYPE(__pyx_t_1)->tp_name), 0))) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->method);
+ __Pyx_DECREF(__pyx_v___pyx_result->method);
+ __pyx_v___pyx_result->method = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 4, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (!(likely(PyUnicode_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "unicode", Py_TYPE(__pyx_t_1)->tp_name), 0))) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->path);
+ __Pyx_DECREF(__pyx_v___pyx_result->path);
+ __pyx_v___pyx_result->path = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 5, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->raw_headers);
+ __Pyx_DECREF(__pyx_v___pyx_result->raw_headers);
+ __pyx_v___pyx_result->raw_headers = __pyx_t_1;
+ __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 6, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->should_close);
+ __Pyx_DECREF(__pyx_v___pyx_result->should_close);
+ __pyx_v___pyx_result->should_close = __pyx_t_1;
+ __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 7, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->upgrade);
+ __Pyx_DECREF(__pyx_v___pyx_result->upgrade);
+ __pyx_v___pyx_result->upgrade = __pyx_t_1;
+ __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 8, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->url);
+ __Pyx_DECREF(__pyx_v___pyx_result->url);
+ __pyx_v___pyx_result->url = __pyx_t_1;
+ __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 9, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->version);
+ __Pyx_DECREF(__pyx_v___pyx_result->version);
+ __pyx_v___pyx_result->version = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "(tree fragment)":13
+ * cdef __pyx_unpickle_RawRequestMessage__set_state(RawRequestMessage __pyx_result, tuple __pyx_state):
+ * __pyx_result.chunked = __pyx_state[0]; __pyx_result.compression = __pyx_state[1]; __pyx_result.headers = __pyx_state[2]; __pyx_result.method = __pyx_state[3]; __pyx_result.path = __pyx_state[4]; __pyx_result.raw_headers = __pyx_state[5]; __pyx_result.should_close = __pyx_state[6]; __pyx_result.upgrade = __pyx_state[7]; __pyx_result.url = __pyx_state[8]; __pyx_result.version = __pyx_state[9]
+ * if len(__pyx_state) > 10 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
+ * __pyx_result.__dict__.update(__pyx_state[10])
+ */
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
+ __PYX_ERR(1, 13, __pyx_L1_error)
+ }
+ __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
+ __pyx_t_4 = ((__pyx_t_3 > 10) != 0);
+ if (__pyx_t_4) {
+ } else {
+ __pyx_t_2 = __pyx_t_4;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
+ __pyx_t_5 = (__pyx_t_4 != 0);
+ __pyx_t_2 = __pyx_t_5;
+ __pyx_L4_bool_binop_done:;
+ if (__pyx_t_2) {
+
+ /* "(tree fragment)":14
+ * __pyx_result.chunked = __pyx_state[0]; __pyx_result.compression = __pyx_state[1]; __pyx_result.headers = __pyx_state[2]; __pyx_result.method = __pyx_state[3]; __pyx_result.path = __pyx_state[4]; __pyx_result.raw_headers = __pyx_state[5]; __pyx_result.should_close = __pyx_state[6]; __pyx_result.upgrade = __pyx_state[7]; __pyx_result.url = __pyx_state[8]; __pyx_result.version = __pyx_state[9]
+ * if len(__pyx_state) > 10 and hasattr(__pyx_result, '__dict__'):
+ * __pyx_result.__dict__.update(__pyx_state[10]) # <<<<<<<<<<<<<<
+ */
+ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 14, __pyx_L1_error)
+ }
+ __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 10, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_8 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
+ __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
+ if (likely(__pyx_t_8)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
+ __Pyx_INCREF(__pyx_t_8);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_7, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "(tree fragment)":13
+ * cdef __pyx_unpickle_RawRequestMessage__set_state(RawRequestMessage __pyx_result, tuple __pyx_state):
+ * __pyx_result.chunked = __pyx_state[0]; __pyx_result.compression = __pyx_state[1]; __pyx_result.headers = __pyx_state[2]; __pyx_result.method = __pyx_state[3]; __pyx_result.path = __pyx_state[4]; __pyx_result.raw_headers = __pyx_state[5]; __pyx_result.should_close = __pyx_state[6]; __pyx_result.upgrade = __pyx_state[7]; __pyx_result.url = __pyx_state[8]; __pyx_result.version = __pyx_state[9]
+ * if len(__pyx_state) > 10 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
+ * __pyx_result.__dict__.update(__pyx_state[10])
+ */
+ }
+
+ /* "(tree fragment)":11
+ * __pyx_unpickle_RawRequestMessage__set_state(<RawRequestMessage> __pyx_result, __pyx_state)
+ * return __pyx_result
+ * cdef __pyx_unpickle_RawRequestMessage__set_state(RawRequestMessage __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_result.chunked = __pyx_state[0]; __pyx_result.compression = __pyx_state[1]; __pyx_result.headers = __pyx_state[2]; __pyx_result.method = __pyx_state[3]; __pyx_result.path = __pyx_state[4]; __pyx_result.raw_headers = __pyx_state[5]; __pyx_result.should_close = __pyx_state[6]; __pyx_result.upgrade = __pyx_state[7]; __pyx_result.url = __pyx_state[8]; __pyx_result.version = __pyx_state[9]
+ * if len(__pyx_state) > 10 and hasattr(__pyx_result, '__dict__'):
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_AddTraceback("aiohttp._http_parser.__pyx_unpickle_RawRequestMessage__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":1
+ * def __pyx_unpickle_RawResponseMessage(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_5__pyx_unpickle_RawResponseMessage(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyMethodDef __pyx_mdef_7aiohttp_12_http_parser_5__pyx_unpickle_RawResponseMessage = {"__pyx_unpickle_RawResponseMessage", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7aiohttp_12_http_parser_5__pyx_unpickle_RawResponseMessage, METH_VARARGS|METH_KEYWORDS, 0};
+static PyObject *__pyx_pw_7aiohttp_12_http_parser_5__pyx_unpickle_RawResponseMessage(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v___pyx_type = 0;
+ long __pyx_v___pyx_checksum;
+ PyObject *__pyx_v___pyx_state = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__pyx_unpickle_RawResponseMessage (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
+ PyObject* values[3] = {0,0,0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ CYTHON_FALLTHROUGH;
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ CYTHON_FALLTHROUGH;
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ CYTHON_FALLTHROUGH;
+ case 1:
+ if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_RawResponseMessage", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 2:
+ if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_RawResponseMessage", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_RawResponseMessage") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ }
+ __pyx_v___pyx_type = values[0];
+ __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
+ __pyx_v___pyx_state = values[2];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_RawResponseMessage", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("aiohttp._http_parser.__pyx_unpickle_RawResponseMessage", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_r = __pyx_pf_7aiohttp_12_http_parser_4__pyx_unpickle_RawResponseMessage(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_parser_4__pyx_unpickle_RawResponseMessage(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_v___pyx_PickleError = 0;
+ PyObject *__pyx_v___pyx_result = 0;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ int __pyx_t_6;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__pyx_unpickle_RawResponseMessage", 0);
+
+ /* "(tree fragment)":4
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ * if __pyx_checksum != 0xc7706dc: # <<<<<<<<<<<<<<
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0xc7706dc = (chunked, code, compression, headers, raw_headers, reason, should_close, upgrade, version))" % __pyx_checksum)
+ */
+ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xc7706dc) != 0);
+ if (__pyx_t_1) {
+
+ /* "(tree fragment)":5
+ * cdef object __pyx_result
+ * if __pyx_checksum != 0xc7706dc:
+ * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0xc7706dc = (chunked, code, compression, headers, raw_headers, reason, should_close, upgrade, version))" % __pyx_checksum)
+ * __pyx_result = RawResponseMessage.__new__(__pyx_type)
+ */
+ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_n_s_PickleError);
+ __Pyx_GIVEREF(__pyx_n_s_PickleError);
+ PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
+ __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_2);
+ __pyx_v___pyx_PickleError = __pyx_t_2;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "(tree fragment)":6
+ * if __pyx_checksum != 0xc7706dc:
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0xc7706dc = (chunked, code, compression, headers, raw_headers, reason, should_close, upgrade, version))" % __pyx_checksum) # <<<<<<<<<<<<<<
+ * __pyx_result = RawResponseMessage.__new__(__pyx_type)
+ * if __pyx_state is not None:
+ */
+ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xc7, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_INCREF(__pyx_v___pyx_PickleError);
+ __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_5)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __PYX_ERR(1, 6, __pyx_L1_error)
+
+ /* "(tree fragment)":4
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ * if __pyx_checksum != 0xc7706dc: # <<<<<<<<<<<<<<
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0xc7706dc = (chunked, code, compression, headers, raw_headers, reason, should_close, upgrade, version))" % __pyx_checksum)
+ */
+ }
+
+ /* "(tree fragment)":7
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0xc7706dc = (chunked, code, compression, headers, raw_headers, reason, should_close, upgrade, version))" % __pyx_checksum)
+ * __pyx_result = RawResponseMessage.__new__(__pyx_type) # <<<<<<<<<<<<<<
+ * if __pyx_state is not None:
+ * __pyx_unpickle_RawResponseMessage__set_state(<RawResponseMessage> __pyx_result, __pyx_state)
+ */
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_ptype_7aiohttp_12_http_parser_RawResponseMessage), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_4)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v___pyx_result = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "(tree fragment)":8
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0xc7706dc = (chunked, code, compression, headers, raw_headers, reason, should_close, upgrade, version))" % __pyx_checksum)
+ * __pyx_result = RawResponseMessage.__new__(__pyx_type)
+ * if __pyx_state is not None: # <<<<<<<<<<<<<<
+ * __pyx_unpickle_RawResponseMessage__set_state(<RawResponseMessage> __pyx_result, __pyx_state)
+ * return __pyx_result
+ */
+ __pyx_t_1 = (__pyx_v___pyx_state != Py_None);
+ __pyx_t_6 = (__pyx_t_1 != 0);
+ if (__pyx_t_6) {
+
+ /* "(tree fragment)":9
+ * __pyx_result = RawResponseMessage.__new__(__pyx_type)
+ * if __pyx_state is not None:
+ * __pyx_unpickle_RawResponseMessage__set_state(<RawResponseMessage> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
+ * return __pyx_result
+ * cdef __pyx_unpickle_RawResponseMessage__set_state(RawResponseMessage __pyx_result, tuple __pyx_state):
+ */
+ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
+ __pyx_t_3 = __pyx_f_7aiohttp_12_http_parser___pyx_unpickle_RawResponseMessage__set_state(((struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "(tree fragment)":8
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0xc7706dc = (chunked, code, compression, headers, raw_headers, reason, should_close, upgrade, version))" % __pyx_checksum)
+ * __pyx_result = RawResponseMessage.__new__(__pyx_type)
+ * if __pyx_state is not None: # <<<<<<<<<<<<<<
+ * __pyx_unpickle_RawResponseMessage__set_state(<RawResponseMessage> __pyx_result, __pyx_state)
+ * return __pyx_result
+ */
+ }
+
+ /* "(tree fragment)":10
+ * if __pyx_state is not None:
+ * __pyx_unpickle_RawResponseMessage__set_state(<RawResponseMessage> __pyx_result, __pyx_state)
+ * return __pyx_result # <<<<<<<<<<<<<<
+ * cdef __pyx_unpickle_RawResponseMessage__set_state(RawResponseMessage __pyx_result, tuple __pyx_state):
+ * __pyx_result.chunked = __pyx_state[0]; __pyx_result.code = __pyx_state[1]; __pyx_result.compression = __pyx_state[2]; __pyx_result.headers = __pyx_state[3]; __pyx_result.raw_headers = __pyx_state[4]; __pyx_result.reason = __pyx_state[5]; __pyx_result.should_close = __pyx_state[6]; __pyx_result.upgrade = __pyx_state[7]; __pyx_result.version = __pyx_state[8]
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v___pyx_result);
+ __pyx_r = __pyx_v___pyx_result;
+ goto __pyx_L0;
+
+ /* "(tree fragment)":1
+ * def __pyx_unpickle_RawResponseMessage(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_AddTraceback("aiohttp._http_parser.__pyx_unpickle_RawResponseMessage", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v___pyx_PickleError);
+ __Pyx_XDECREF(__pyx_v___pyx_result);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":11
+ * __pyx_unpickle_RawResponseMessage__set_state(<RawResponseMessage> __pyx_result, __pyx_state)
+ * return __pyx_result
+ * cdef __pyx_unpickle_RawResponseMessage__set_state(RawResponseMessage __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_result.chunked = __pyx_state[0]; __pyx_result.code = __pyx_state[1]; __pyx_result.compression = __pyx_state[2]; __pyx_result.headers = __pyx_state[3]; __pyx_result.raw_headers = __pyx_state[4]; __pyx_result.reason = __pyx_state[5]; __pyx_result.should_close = __pyx_state[6]; __pyx_result.upgrade = __pyx_state[7]; __pyx_result.version = __pyx_state[8]
+ * if len(__pyx_state) > 9 and hasattr(__pyx_result, '__dict__'):
+ */
+
+static PyObject *__pyx_f_7aiohttp_12_http_parser___pyx_unpickle_RawResponseMessage__set_state(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ Py_ssize_t __pyx_t_4;
+ int __pyx_t_5;
+ int __pyx_t_6;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *__pyx_t_8 = NULL;
+ PyObject *__pyx_t_9 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__pyx_unpickle_RawResponseMessage__set_state", 0);
+
+ /* "(tree fragment)":12
+ * return __pyx_result
+ * cdef __pyx_unpickle_RawResponseMessage__set_state(RawResponseMessage __pyx_result, tuple __pyx_state):
+ * __pyx_result.chunked = __pyx_state[0]; __pyx_result.code = __pyx_state[1]; __pyx_result.compression = __pyx_state[2]; __pyx_result.headers = __pyx_state[3]; __pyx_result.raw_headers = __pyx_state[4]; __pyx_result.reason = __pyx_state[5]; __pyx_result.should_close = __pyx_state[6]; __pyx_result.upgrade = __pyx_state[7]; __pyx_result.version = __pyx_state[8] # <<<<<<<<<<<<<<
+ * if len(__pyx_state) > 9 and hasattr(__pyx_result, '__dict__'):
+ * __pyx_result.__dict__.update(__pyx_state[9])
+ */
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->chunked);
+ __Pyx_DECREF(__pyx_v___pyx_result->chunked);
+ __pyx_v___pyx_result->chunked = __pyx_t_1;
+ __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v___pyx_result->code = __pyx_t_2;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->compression);
+ __Pyx_DECREF(__pyx_v___pyx_result->compression);
+ __pyx_v___pyx_result->compression = __pyx_t_1;
+ __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 3, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->headers);
+ __Pyx_DECREF(__pyx_v___pyx_result->headers);
+ __pyx_v___pyx_result->headers = __pyx_t_1;
+ __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 4, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->raw_headers);
+ __Pyx_DECREF(__pyx_v___pyx_result->raw_headers);
+ __pyx_v___pyx_result->raw_headers = __pyx_t_1;
+ __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 5, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (!(likely(PyUnicode_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "unicode", Py_TYPE(__pyx_t_1)->tp_name), 0))) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->reason);
+ __Pyx_DECREF(__pyx_v___pyx_result->reason);
+ __pyx_v___pyx_result->reason = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 6, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->should_close);
+ __Pyx_DECREF(__pyx_v___pyx_result->should_close);
+ __pyx_v___pyx_result->should_close = __pyx_t_1;
+ __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 7, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->upgrade);
+ __Pyx_DECREF(__pyx_v___pyx_result->upgrade);
+ __pyx_v___pyx_result->upgrade = __pyx_t_1;
+ __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 8, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->version);
+ __Pyx_DECREF(__pyx_v___pyx_result->version);
+ __pyx_v___pyx_result->version = __pyx_t_1;
+ __pyx_t_1 = 0;
+
+ /* "(tree fragment)":13
+ * cdef __pyx_unpickle_RawResponseMessage__set_state(RawResponseMessage __pyx_result, tuple __pyx_state):
+ * __pyx_result.chunked = __pyx_state[0]; __pyx_result.code = __pyx_state[1]; __pyx_result.compression = __pyx_state[2]; __pyx_result.headers = __pyx_state[3]; __pyx_result.raw_headers = __pyx_state[4]; __pyx_result.reason = __pyx_state[5]; __pyx_result.should_close = __pyx_state[6]; __pyx_result.upgrade = __pyx_state[7]; __pyx_result.version = __pyx_state[8]
+ * if len(__pyx_state) > 9 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
+ * __pyx_result.__dict__.update(__pyx_state[9])
+ */
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
+ __PYX_ERR(1, 13, __pyx_L1_error)
+ }
+ __pyx_t_4 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
+ __pyx_t_5 = ((__pyx_t_4 > 9) != 0);
+ if (__pyx_t_5) {
+ } else {
+ __pyx_t_3 = __pyx_t_5;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_5 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
+ __pyx_t_6 = (__pyx_t_5 != 0);
+ __pyx_t_3 = __pyx_t_6;
+ __pyx_L4_bool_binop_done:;
+ if (__pyx_t_3) {
+
+ /* "(tree fragment)":14
+ * __pyx_result.chunked = __pyx_state[0]; __pyx_result.code = __pyx_state[1]; __pyx_result.compression = __pyx_state[2]; __pyx_result.headers = __pyx_state[3]; __pyx_result.raw_headers = __pyx_state[4]; __pyx_result.reason = __pyx_state[5]; __pyx_result.should_close = __pyx_state[6]; __pyx_result.upgrade = __pyx_state[7]; __pyx_result.version = __pyx_state[8]
+ * if len(__pyx_state) > 9 and hasattr(__pyx_result, '__dict__'):
+ * __pyx_result.__dict__.update(__pyx_state[9]) # <<<<<<<<<<<<<<
+ */
+ __pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_update); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 14, __pyx_L1_error)
+ }
+ __pyx_t_7 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 9, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_9 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_8))) {
+ __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_8);
+ if (likely(__pyx_t_9)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8);
+ __Pyx_INCREF(__pyx_t_9);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_8, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_8, __pyx_t_9, __pyx_t_7) : __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "(tree fragment)":13
+ * cdef __pyx_unpickle_RawResponseMessage__set_state(RawResponseMessage __pyx_result, tuple __pyx_state):
+ * __pyx_result.chunked = __pyx_state[0]; __pyx_result.code = __pyx_state[1]; __pyx_result.compression = __pyx_state[2]; __pyx_result.headers = __pyx_state[3]; __pyx_result.raw_headers = __pyx_state[4]; __pyx_result.reason = __pyx_state[5]; __pyx_result.should_close = __pyx_state[6]; __pyx_result.upgrade = __pyx_state[7]; __pyx_result.version = __pyx_state[8]
+ * if len(__pyx_state) > 9 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
+ * __pyx_result.__dict__.update(__pyx_state[9])
+ */
+ }
+
+ /* "(tree fragment)":11
+ * __pyx_unpickle_RawResponseMessage__set_state(<RawResponseMessage> __pyx_result, __pyx_state)
+ * return __pyx_result
+ * cdef __pyx_unpickle_RawResponseMessage__set_state(RawResponseMessage __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_result.chunked = __pyx_state[0]; __pyx_result.code = __pyx_state[1]; __pyx_result.compression = __pyx_state[2]; __pyx_result.headers = __pyx_state[3]; __pyx_result.raw_headers = __pyx_state[4]; __pyx_result.reason = __pyx_state[5]; __pyx_result.should_close = __pyx_state[6]; __pyx_result.upgrade = __pyx_state[7]; __pyx_result.version = __pyx_state[8]
+ * if len(__pyx_state) > 9 and hasattr(__pyx_result, '__dict__'):
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_XDECREF(__pyx_t_9);
+ __Pyx_AddTraceback("aiohttp._http_parser.__pyx_unpickle_RawResponseMessage__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *__pyx_freelist_7aiohttp_12_http_parser_RawRequestMessage[250];
+static int __pyx_freecount_7aiohttp_12_http_parser_RawRequestMessage = 0;
+
+static PyObject *__pyx_tp_new_7aiohttp_12_http_parser_RawRequestMessage(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
+ struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *p;
+ PyObject *o;
+ if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_7aiohttp_12_http_parser_RawRequestMessage > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage)) & ((t->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)) == 0))) {
+ o = (PyObject*)__pyx_freelist_7aiohttp_12_http_parser_RawRequestMessage[--__pyx_freecount_7aiohttp_12_http_parser_RawRequestMessage];
+ memset(o, 0, sizeof(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage));
+ (void) PyObject_INIT(o, t);
+ PyObject_GC_Track(o);
+ } else {
+ if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
+ o = (*t->tp_alloc)(t, 0);
+ } else {
+ o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
+ }
+ if (unlikely(!o)) return 0;
+ }
+ p = ((struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)o);
+ p->method = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ p->path = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ p->version = Py_None; Py_INCREF(Py_None);
+ p->headers = Py_None; Py_INCREF(Py_None);
+ p->raw_headers = Py_None; Py_INCREF(Py_None);
+ p->should_close = Py_None; Py_INCREF(Py_None);
+ p->compression = Py_None; Py_INCREF(Py_None);
+ p->upgrade = Py_None; Py_INCREF(Py_None);
+ p->chunked = Py_None; Py_INCREF(Py_None);
+ p->url = Py_None; Py_INCREF(Py_None);
+ return o;
+}
+
+static void __pyx_tp_dealloc_7aiohttp_12_http_parser_RawRequestMessage(PyObject *o) {
+ struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *p = (struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)o;
+ #if CYTHON_USE_TP_FINALIZE
+ if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
+ if (PyObject_CallFinalizerFromDealloc(o)) return;
+ }
+ #endif
+ PyObject_GC_UnTrack(o);
+ Py_CLEAR(p->method);
+ Py_CLEAR(p->path);
+ Py_CLEAR(p->version);
+ Py_CLEAR(p->headers);
+ Py_CLEAR(p->raw_headers);
+ Py_CLEAR(p->should_close);
+ Py_CLEAR(p->compression);
+ Py_CLEAR(p->upgrade);
+ Py_CLEAR(p->chunked);
+ Py_CLEAR(p->url);
+ if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_7aiohttp_12_http_parser_RawRequestMessage < 250) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage)) & ((Py_TYPE(o)->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)) == 0))) {
+ __pyx_freelist_7aiohttp_12_http_parser_RawRequestMessage[__pyx_freecount_7aiohttp_12_http_parser_RawRequestMessage++] = ((struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)o);
+ } else {
+ (*Py_TYPE(o)->tp_free)(o);
+ }
+}
+
+static int __pyx_tp_traverse_7aiohttp_12_http_parser_RawRequestMessage(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *p = (struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)o;
+ if (p->version) {
+ e = (*v)(p->version, a); if (e) return e;
+ }
+ if (p->headers) {
+ e = (*v)(p->headers, a); if (e) return e;
+ }
+ if (p->raw_headers) {
+ e = (*v)(p->raw_headers, a); if (e) return e;
+ }
+ if (p->should_close) {
+ e = (*v)(p->should_close, a); if (e) return e;
+ }
+ if (p->compression) {
+ e = (*v)(p->compression, a); if (e) return e;
+ }
+ if (p->upgrade) {
+ e = (*v)(p->upgrade, a); if (e) return e;
+ }
+ if (p->chunked) {
+ e = (*v)(p->chunked, a); if (e) return e;
+ }
+ if (p->url) {
+ e = (*v)(p->url, a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_7aiohttp_12_http_parser_RawRequestMessage(PyObject *o) {
+ PyObject* tmp;
+ struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *p = (struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage *)o;
+ tmp = ((PyObject*)p->version);
+ p->version = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->headers);
+ p->headers = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->raw_headers);
+ p->raw_headers = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->should_close);
+ p->should_close = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->compression);
+ p->compression = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->upgrade);
+ p->upgrade = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->chunked);
+ p->chunked = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->url);
+ p->url = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ return 0;
+}
+
+static PyObject *__pyx_getprop_7aiohttp_12_http_parser_17RawRequestMessage_method(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_6method_1__get__(o);
+}
+
+static PyObject *__pyx_getprop_7aiohttp_12_http_parser_17RawRequestMessage_path(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_4path_1__get__(o);
+}
+
+static PyObject *__pyx_getprop_7aiohttp_12_http_parser_17RawRequestMessage_version(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_7version_1__get__(o);
+}
+
+static PyObject *__pyx_getprop_7aiohttp_12_http_parser_17RawRequestMessage_headers(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_7headers_1__get__(o);
+}
+
+static PyObject *__pyx_getprop_7aiohttp_12_http_parser_17RawRequestMessage_raw_headers(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_11raw_headers_1__get__(o);
+}
+
+static PyObject *__pyx_getprop_7aiohttp_12_http_parser_17RawRequestMessage_should_close(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_12should_close_1__get__(o);
+}
+
+static PyObject *__pyx_getprop_7aiohttp_12_http_parser_17RawRequestMessage_compression(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_11compression_1__get__(o);
+}
+
+static PyObject *__pyx_getprop_7aiohttp_12_http_parser_17RawRequestMessage_upgrade(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_7upgrade_1__get__(o);
+}
+
+static PyObject *__pyx_getprop_7aiohttp_12_http_parser_17RawRequestMessage_chunked(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_7chunked_1__get__(o);
+}
+
+static PyObject *__pyx_getprop_7aiohttp_12_http_parser_17RawRequestMessage_url(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_3url_1__get__(o);
+}
+
+static PyMethodDef __pyx_methods_7aiohttp_12_http_parser_RawRequestMessage[] = {
+ {"_replace", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_5_replace, METH_VARARGS|METH_KEYWORDS, 0},
+ {"__reduce_cython__", (PyCFunction)__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_7__reduce_cython__, METH_NOARGS, 0},
+ {"__setstate_cython__", (PyCFunction)__pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_9__setstate_cython__, METH_O, 0},
+ {0, 0, 0, 0}
+};
+
+static struct PyGetSetDef __pyx_getsets_7aiohttp_12_http_parser_RawRequestMessage[] = {
+ {(char *)"method", __pyx_getprop_7aiohttp_12_http_parser_17RawRequestMessage_method, 0, (char *)0, 0},
+ {(char *)"path", __pyx_getprop_7aiohttp_12_http_parser_17RawRequestMessage_path, 0, (char *)0, 0},
+ {(char *)"version", __pyx_getprop_7aiohttp_12_http_parser_17RawRequestMessage_version, 0, (char *)0, 0},
+ {(char *)"headers", __pyx_getprop_7aiohttp_12_http_parser_17RawRequestMessage_headers, 0, (char *)0, 0},
+ {(char *)"raw_headers", __pyx_getprop_7aiohttp_12_http_parser_17RawRequestMessage_raw_headers, 0, (char *)0, 0},
+ {(char *)"should_close", __pyx_getprop_7aiohttp_12_http_parser_17RawRequestMessage_should_close, 0, (char *)0, 0},
+ {(char *)"compression", __pyx_getprop_7aiohttp_12_http_parser_17RawRequestMessage_compression, 0, (char *)0, 0},
+ {(char *)"upgrade", __pyx_getprop_7aiohttp_12_http_parser_17RawRequestMessage_upgrade, 0, (char *)0, 0},
+ {(char *)"chunked", __pyx_getprop_7aiohttp_12_http_parser_17RawRequestMessage_chunked, 0, (char *)0, 0},
+ {(char *)"url", __pyx_getprop_7aiohttp_12_http_parser_17RawRequestMessage_url, 0, (char *)0, 0},
+ {0, 0, 0, 0, 0}
+};
+
+static PyTypeObject __pyx_type_7aiohttp_12_http_parser_RawRequestMessage = {
+ PyVarObject_HEAD_INIT(0, 0)
+ "aiohttp._http_parser.RawRequestMessage", /*tp_name*/
+ sizeof(struct __pyx_obj_7aiohttp_12_http_parser_RawRequestMessage), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_7aiohttp_12_http_parser_RawRequestMessage, /*tp_dealloc*/
+ #if PY_VERSION_HEX < 0x030800b4
+ 0, /*tp_print*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4
+ 0, /*tp_vectorcall_offset*/
+ #endif
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #endif
+ #if PY_MAJOR_VERSION >= 3
+ 0, /*tp_as_async*/
+ #endif
+ __pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_3__repr__, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ 0, /*tp_doc*/
+ __pyx_tp_traverse_7aiohttp_12_http_parser_RawRequestMessage, /*tp_traverse*/
+ __pyx_tp_clear_7aiohttp_12_http_parser_RawRequestMessage, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_7aiohttp_12_http_parser_RawRequestMessage, /*tp_methods*/
+ 0, /*tp_members*/
+ __pyx_getsets_7aiohttp_12_http_parser_RawRequestMessage, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_pw_7aiohttp_12_http_parser_17RawRequestMessage_1__init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_7aiohttp_12_http_parser_RawRequestMessage, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ 0, /*tp_version_tag*/
+ #if PY_VERSION_HEX >= 0x030400a1
+ 0, /*tp_finalize*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b1
+ 0, /*tp_vectorcall*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
+ 0, /*tp_print*/
+ #endif
+};
+
+static struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *__pyx_freelist_7aiohttp_12_http_parser_RawResponseMessage[250];
+static int __pyx_freecount_7aiohttp_12_http_parser_RawResponseMessage = 0;
+
+static PyObject *__pyx_tp_new_7aiohttp_12_http_parser_RawResponseMessage(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
+ struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *p;
+ PyObject *o;
+ if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_7aiohttp_12_http_parser_RawResponseMessage > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage)) & ((t->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)) == 0))) {
+ o = (PyObject*)__pyx_freelist_7aiohttp_12_http_parser_RawResponseMessage[--__pyx_freecount_7aiohttp_12_http_parser_RawResponseMessage];
+ memset(o, 0, sizeof(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage));
+ (void) PyObject_INIT(o, t);
+ PyObject_GC_Track(o);
+ } else {
+ if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
+ o = (*t->tp_alloc)(t, 0);
+ } else {
+ o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
+ }
+ if (unlikely(!o)) return 0;
+ }
+ p = ((struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *)o);
+ p->version = Py_None; Py_INCREF(Py_None);
+ p->reason = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ p->headers = Py_None; Py_INCREF(Py_None);
+ p->raw_headers = Py_None; Py_INCREF(Py_None);
+ p->should_close = Py_None; Py_INCREF(Py_None);
+ p->compression = Py_None; Py_INCREF(Py_None);
+ p->upgrade = Py_None; Py_INCREF(Py_None);
+ p->chunked = Py_None; Py_INCREF(Py_None);
+ return o;
+}
+
+static void __pyx_tp_dealloc_7aiohttp_12_http_parser_RawResponseMessage(PyObject *o) {
+ struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *p = (struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *)o;
+ #if CYTHON_USE_TP_FINALIZE
+ if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
+ if (PyObject_CallFinalizerFromDealloc(o)) return;
+ }
+ #endif
+ PyObject_GC_UnTrack(o);
+ Py_CLEAR(p->version);
+ Py_CLEAR(p->reason);
+ Py_CLEAR(p->headers);
+ Py_CLEAR(p->raw_headers);
+ Py_CLEAR(p->should_close);
+ Py_CLEAR(p->compression);
+ Py_CLEAR(p->upgrade);
+ Py_CLEAR(p->chunked);
+ if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_7aiohttp_12_http_parser_RawResponseMessage < 250) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage)) & ((Py_TYPE(o)->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)) == 0))) {
+ __pyx_freelist_7aiohttp_12_http_parser_RawResponseMessage[__pyx_freecount_7aiohttp_12_http_parser_RawResponseMessage++] = ((struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *)o);
+ } else {
+ (*Py_TYPE(o)->tp_free)(o);
+ }
+}
+
+static int __pyx_tp_traverse_7aiohttp_12_http_parser_RawResponseMessage(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *p = (struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *)o;
+ if (p->version) {
+ e = (*v)(p->version, a); if (e) return e;
+ }
+ if (p->headers) {
+ e = (*v)(p->headers, a); if (e) return e;
+ }
+ if (p->raw_headers) {
+ e = (*v)(p->raw_headers, a); if (e) return e;
+ }
+ if (p->should_close) {
+ e = (*v)(p->should_close, a); if (e) return e;
+ }
+ if (p->compression) {
+ e = (*v)(p->compression, a); if (e) return e;
+ }
+ if (p->upgrade) {
+ e = (*v)(p->upgrade, a); if (e) return e;
+ }
+ if (p->chunked) {
+ e = (*v)(p->chunked, a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_7aiohttp_12_http_parser_RawResponseMessage(PyObject *o) {
+ PyObject* tmp;
+ struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *p = (struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage *)o;
+ tmp = ((PyObject*)p->version);
+ p->version = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->headers);
+ p->headers = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->raw_headers);
+ p->raw_headers = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->should_close);
+ p->should_close = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->compression);
+ p->compression = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->upgrade);
+ p->upgrade = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->chunked);
+ p->chunked = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ return 0;
+}
+
+static PyObject *__pyx_getprop_7aiohttp_12_http_parser_18RawResponseMessage_version(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_7version_1__get__(o);
+}
+
+static PyObject *__pyx_getprop_7aiohttp_12_http_parser_18RawResponseMessage_code(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_4code_1__get__(o);
+}
+
+static PyObject *__pyx_getprop_7aiohttp_12_http_parser_18RawResponseMessage_reason(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_6reason_1__get__(o);
+}
+
+static PyObject *__pyx_getprop_7aiohttp_12_http_parser_18RawResponseMessage_headers(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_7headers_1__get__(o);
+}
+
+static PyObject *__pyx_getprop_7aiohttp_12_http_parser_18RawResponseMessage_raw_headers(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_11raw_headers_1__get__(o);
+}
+
+static PyObject *__pyx_getprop_7aiohttp_12_http_parser_18RawResponseMessage_should_close(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_12should_close_1__get__(o);
+}
+
+static PyObject *__pyx_getprop_7aiohttp_12_http_parser_18RawResponseMessage_compression(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_11compression_1__get__(o);
+}
+
+static PyObject *__pyx_getprop_7aiohttp_12_http_parser_18RawResponseMessage_upgrade(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_7upgrade_1__get__(o);
+}
+
+static PyObject *__pyx_getprop_7aiohttp_12_http_parser_18RawResponseMessage_chunked(PyObject *o, CYTHON_UNUSED void *x) {
+ return __pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_7chunked_1__get__(o);
+}
+
+static PyMethodDef __pyx_methods_7aiohttp_12_http_parser_RawResponseMessage[] = {
+ {"__reduce_cython__", (PyCFunction)__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_5__reduce_cython__, METH_NOARGS, 0},
+ {"__setstate_cython__", (PyCFunction)__pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_7__setstate_cython__, METH_O, 0},
+ {0, 0, 0, 0}
+};
+
+static struct PyGetSetDef __pyx_getsets_7aiohttp_12_http_parser_RawResponseMessage[] = {
+ {(char *)"version", __pyx_getprop_7aiohttp_12_http_parser_18RawResponseMessage_version, 0, (char *)0, 0},
+ {(char *)"code", __pyx_getprop_7aiohttp_12_http_parser_18RawResponseMessage_code, 0, (char *)0, 0},
+ {(char *)"reason", __pyx_getprop_7aiohttp_12_http_parser_18RawResponseMessage_reason, 0, (char *)0, 0},
+ {(char *)"headers", __pyx_getprop_7aiohttp_12_http_parser_18RawResponseMessage_headers, 0, (char *)0, 0},
+ {(char *)"raw_headers", __pyx_getprop_7aiohttp_12_http_parser_18RawResponseMessage_raw_headers, 0, (char *)0, 0},
+ {(char *)"should_close", __pyx_getprop_7aiohttp_12_http_parser_18RawResponseMessage_should_close, 0, (char *)0, 0},
+ {(char *)"compression", __pyx_getprop_7aiohttp_12_http_parser_18RawResponseMessage_compression, 0, (char *)0, 0},
+ {(char *)"upgrade", __pyx_getprop_7aiohttp_12_http_parser_18RawResponseMessage_upgrade, 0, (char *)0, 0},
+ {(char *)"chunked", __pyx_getprop_7aiohttp_12_http_parser_18RawResponseMessage_chunked, 0, (char *)0, 0},
+ {0, 0, 0, 0, 0}
+};
+
+static PyTypeObject __pyx_type_7aiohttp_12_http_parser_RawResponseMessage = {
+ PyVarObject_HEAD_INIT(0, 0)
+ "aiohttp._http_parser.RawResponseMessage", /*tp_name*/
+ sizeof(struct __pyx_obj_7aiohttp_12_http_parser_RawResponseMessage), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_7aiohttp_12_http_parser_RawResponseMessage, /*tp_dealloc*/
+ #if PY_VERSION_HEX < 0x030800b4
+ 0, /*tp_print*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4
+ 0, /*tp_vectorcall_offset*/
+ #endif
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #endif
+ #if PY_MAJOR_VERSION >= 3
+ 0, /*tp_as_async*/
+ #endif
+ __pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_3__repr__, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ 0, /*tp_doc*/
+ __pyx_tp_traverse_7aiohttp_12_http_parser_RawResponseMessage, /*tp_traverse*/
+ __pyx_tp_clear_7aiohttp_12_http_parser_RawResponseMessage, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_7aiohttp_12_http_parser_RawResponseMessage, /*tp_methods*/
+ 0, /*tp_members*/
+ __pyx_getsets_7aiohttp_12_http_parser_RawResponseMessage, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_pw_7aiohttp_12_http_parser_18RawResponseMessage_1__init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_7aiohttp_12_http_parser_RawResponseMessage, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ 0, /*tp_version_tag*/
+ #if PY_VERSION_HEX >= 0x030400a1
+ 0, /*tp_finalize*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b1
+ 0, /*tp_vectorcall*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
+ 0, /*tp_print*/
+ #endif
+};
+static struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpParser __pyx_vtable_7aiohttp_12_http_parser_HttpParser;
+
+static PyObject *__pyx_tp_new_7aiohttp_12_http_parser_HttpParser(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
+ struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *p;
+ PyObject *o;
+ if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
+ o = (*t->tp_alloc)(t, 0);
+ } else {
+ o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
+ }
+ if (unlikely(!o)) return 0;
+ p = ((struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)o);
+ p->__pyx_vtab = __pyx_vtabptr_7aiohttp_12_http_parser_HttpParser;
+ p->_raw_name = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ p->_raw_value = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ p->_protocol = Py_None; Py_INCREF(Py_None);
+ p->_loop = Py_None; Py_INCREF(Py_None);
+ p->_timer = Py_None; Py_INCREF(Py_None);
+ p->_url = Py_None; Py_INCREF(Py_None);
+ p->_buf = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ p->_path = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ p->_reason = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ p->_headers = Py_None; Py_INCREF(Py_None);
+ p->_raw_headers = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ p->_messages = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ p->_payload = Py_None; Py_INCREF(Py_None);
+ p->_payload_exception = Py_None; Py_INCREF(Py_None);
+ p->_last_error = Py_None; Py_INCREF(Py_None);
+ p->_content_encoding = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ p->py_buf.obj = NULL;
+ if (unlikely(__pyx_pw_7aiohttp_12_http_parser_10HttpParser_1__cinit__(o, __pyx_empty_tuple, NULL) < 0)) goto bad;
+ return o;
+ bad:
+ Py_DECREF(o); o = 0;
+ return NULL;
+}
+
+static void __pyx_tp_dealloc_7aiohttp_12_http_parser_HttpParser(PyObject *o) {
+ struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *p = (struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)o;
+ #if CYTHON_USE_TP_FINALIZE
+ if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
+ if (PyObject_CallFinalizerFromDealloc(o)) return;
+ }
+ #endif
+ PyObject_GC_UnTrack(o);
+ {
+ PyObject *etype, *eval, *etb;
+ PyErr_Fetch(&etype, &eval, &etb);
+ __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
+ __pyx_pw_7aiohttp_12_http_parser_10HttpParser_3__dealloc__(o);
+ __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
+ PyErr_Restore(etype, eval, etb);
+ }
+ Py_CLEAR(p->_raw_name);
+ Py_CLEAR(p->_raw_value);
+ Py_CLEAR(p->_protocol);
+ Py_CLEAR(p->_loop);
+ Py_CLEAR(p->_timer);
+ Py_CLEAR(p->_url);
+ Py_CLEAR(p->_buf);
+ Py_CLEAR(p->_path);
+ Py_CLEAR(p->_reason);
+ Py_CLEAR(p->_headers);
+ Py_CLEAR(p->_raw_headers);
+ Py_CLEAR(p->_messages);
+ Py_CLEAR(p->_payload);
+ Py_CLEAR(p->_payload_exception);
+ Py_CLEAR(p->_last_error);
+ Py_CLEAR(p->_content_encoding);
+ (*Py_TYPE(o)->tp_free)(o);
+}
+
+static int __pyx_tp_traverse_7aiohttp_12_http_parser_HttpParser(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *p = (struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)o;
+ if (p->_protocol) {
+ e = (*v)(p->_protocol, a); if (e) return e;
+ }
+ if (p->_loop) {
+ e = (*v)(p->_loop, a); if (e) return e;
+ }
+ if (p->_timer) {
+ e = (*v)(p->_timer, a); if (e) return e;
+ }
+ if (p->_url) {
+ e = (*v)(p->_url, a); if (e) return e;
+ }
+ if (p->_headers) {
+ e = (*v)(p->_headers, a); if (e) return e;
+ }
+ if (p->_raw_headers) {
+ e = (*v)(p->_raw_headers, a); if (e) return e;
+ }
+ if (p->_messages) {
+ e = (*v)(p->_messages, a); if (e) return e;
+ }
+ if (p->_payload) {
+ e = (*v)(p->_payload, a); if (e) return e;
+ }
+ if (p->_payload_exception) {
+ e = (*v)(p->_payload_exception, a); if (e) return e;
+ }
+ if (p->_last_error) {
+ e = (*v)(p->_last_error, a); if (e) return e;
+ }
+ if (p->py_buf.obj) {
+ e = (*v)(p->py_buf.obj, a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_7aiohttp_12_http_parser_HttpParser(PyObject *o) {
+ PyObject* tmp;
+ struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *p = (struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *)o;
+ tmp = ((PyObject*)p->_protocol);
+ p->_protocol = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->_loop);
+ p->_loop = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->_timer);
+ p->_timer = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->_url);
+ p->_url = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->_headers);
+ p->_headers = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->_raw_headers);
+ p->_raw_headers = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->_messages);
+ p->_messages = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->_payload);
+ p->_payload = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->_payload_exception);
+ p->_payload_exception = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->_last_error);
+ p->_last_error = Py_None; Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ Py_CLEAR(p->py_buf.obj);
+ return 0;
+}
+
+static PyMethodDef __pyx_methods_7aiohttp_12_http_parser_HttpParser[] = {
+ {"feed_eof", (PyCFunction)__pyx_pw_7aiohttp_12_http_parser_10HttpParser_5feed_eof, METH_NOARGS, 0},
+ {"feed_data", (PyCFunction)__pyx_pw_7aiohttp_12_http_parser_10HttpParser_7feed_data, METH_O, 0},
+ {"set_upgraded", (PyCFunction)__pyx_pw_7aiohttp_12_http_parser_10HttpParser_9set_upgraded, METH_O, 0},
+ {"__reduce_cython__", (PyCFunction)__pyx_pw_7aiohttp_12_http_parser_10HttpParser_11__reduce_cython__, METH_NOARGS, 0},
+ {"__setstate_cython__", (PyCFunction)__pyx_pw_7aiohttp_12_http_parser_10HttpParser_13__setstate_cython__, METH_O, 0},
+ {0, 0, 0, 0}
+};
+
+static PyTypeObject __pyx_type_7aiohttp_12_http_parser_HttpParser = {
+ PyVarObject_HEAD_INIT(0, 0)
+ "aiohttp._http_parser.HttpParser", /*tp_name*/
+ sizeof(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_7aiohttp_12_http_parser_HttpParser, /*tp_dealloc*/
+ #if PY_VERSION_HEX < 0x030800b4
+ 0, /*tp_print*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4
+ 0, /*tp_vectorcall_offset*/
+ #endif
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #endif
+ #if PY_MAJOR_VERSION >= 3
+ 0, /*tp_as_async*/
+ #endif
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ 0, /*tp_doc*/
+ __pyx_tp_traverse_7aiohttp_12_http_parser_HttpParser, /*tp_traverse*/
+ __pyx_tp_clear_7aiohttp_12_http_parser_HttpParser, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_7aiohttp_12_http_parser_HttpParser, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ 0, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_7aiohttp_12_http_parser_HttpParser, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ 0, /*tp_version_tag*/
+ #if PY_VERSION_HEX >= 0x030400a1
+ 0, /*tp_finalize*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b1
+ 0, /*tp_vectorcall*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
+ 0, /*tp_print*/
+ #endif
+};
+static struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpRequestParser __pyx_vtable_7aiohttp_12_http_parser_HttpRequestParser;
+
+static PyObject *__pyx_tp_new_7aiohttp_12_http_parser_HttpRequestParser(PyTypeObject *t, PyObject *a, PyObject *k) {
+ struct __pyx_obj_7aiohttp_12_http_parser_HttpRequestParser *p;
+ PyObject *o = __pyx_tp_new_7aiohttp_12_http_parser_HttpParser(t, a, k);
+ if (unlikely(!o)) return 0;
+ p = ((struct __pyx_obj_7aiohttp_12_http_parser_HttpRequestParser *)o);
+ p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpParser*)__pyx_vtabptr_7aiohttp_12_http_parser_HttpRequestParser;
+ return o;
+}
+
+static PyMethodDef __pyx_methods_7aiohttp_12_http_parser_HttpRequestParser[] = {
+ {"__reduce_cython__", (PyCFunction)__pyx_pw_7aiohttp_12_http_parser_17HttpRequestParser_3__reduce_cython__, METH_NOARGS, 0},
+ {"__setstate_cython__", (PyCFunction)__pyx_pw_7aiohttp_12_http_parser_17HttpRequestParser_5__setstate_cython__, METH_O, 0},
+ {0, 0, 0, 0}
+};
+
+static PyTypeObject __pyx_type_7aiohttp_12_http_parser_HttpRequestParser = {
+ PyVarObject_HEAD_INIT(0, 0)
+ "aiohttp._http_parser.HttpRequestParser", /*tp_name*/
+ sizeof(struct __pyx_obj_7aiohttp_12_http_parser_HttpRequestParser), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_7aiohttp_12_http_parser_HttpParser, /*tp_dealloc*/
+ #if PY_VERSION_HEX < 0x030800b4
+ 0, /*tp_print*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4
+ 0, /*tp_vectorcall_offset*/
+ #endif
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #endif
+ #if PY_MAJOR_VERSION >= 3
+ 0, /*tp_as_async*/
+ #endif
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ 0, /*tp_doc*/
+ __pyx_tp_traverse_7aiohttp_12_http_parser_HttpParser, /*tp_traverse*/
+ __pyx_tp_clear_7aiohttp_12_http_parser_HttpParser, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_7aiohttp_12_http_parser_HttpRequestParser, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_pw_7aiohttp_12_http_parser_17HttpRequestParser_1__init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_7aiohttp_12_http_parser_HttpRequestParser, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ 0, /*tp_version_tag*/
+ #if PY_VERSION_HEX >= 0x030400a1
+ 0, /*tp_finalize*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b1
+ 0, /*tp_vectorcall*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
+ 0, /*tp_print*/
+ #endif
+};
+static struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpResponseParser __pyx_vtable_7aiohttp_12_http_parser_HttpResponseParser;
+
+static PyObject *__pyx_tp_new_7aiohttp_12_http_parser_HttpResponseParser(PyTypeObject *t, PyObject *a, PyObject *k) {
+ struct __pyx_obj_7aiohttp_12_http_parser_HttpResponseParser *p;
+ PyObject *o = __pyx_tp_new_7aiohttp_12_http_parser_HttpParser(t, a, k);
+ if (unlikely(!o)) return 0;
+ p = ((struct __pyx_obj_7aiohttp_12_http_parser_HttpResponseParser *)o);
+ p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_7aiohttp_12_http_parser_HttpParser*)__pyx_vtabptr_7aiohttp_12_http_parser_HttpResponseParser;
+ return o;
+}
+
+static PyMethodDef __pyx_methods_7aiohttp_12_http_parser_HttpResponseParser[] = {
+ {"__reduce_cython__", (PyCFunction)__pyx_pw_7aiohttp_12_http_parser_18HttpResponseParser_3__reduce_cython__, METH_NOARGS, 0},
+ {"__setstate_cython__", (PyCFunction)__pyx_pw_7aiohttp_12_http_parser_18HttpResponseParser_5__setstate_cython__, METH_O, 0},
+ {0, 0, 0, 0}
+};
+
+static PyTypeObject __pyx_type_7aiohttp_12_http_parser_HttpResponseParser = {
+ PyVarObject_HEAD_INIT(0, 0)
+ "aiohttp._http_parser.HttpResponseParser", /*tp_name*/
+ sizeof(struct __pyx_obj_7aiohttp_12_http_parser_HttpResponseParser), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_7aiohttp_12_http_parser_HttpParser, /*tp_dealloc*/
+ #if PY_VERSION_HEX < 0x030800b4
+ 0, /*tp_print*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4
+ 0, /*tp_vectorcall_offset*/
+ #endif
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #endif
+ #if PY_MAJOR_VERSION >= 3
+ 0, /*tp_as_async*/
+ #endif
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ 0, /*tp_doc*/
+ __pyx_tp_traverse_7aiohttp_12_http_parser_HttpParser, /*tp_traverse*/
+ __pyx_tp_clear_7aiohttp_12_http_parser_HttpParser, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_7aiohttp_12_http_parser_HttpResponseParser, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_pw_7aiohttp_12_http_parser_18HttpResponseParser_1__init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_7aiohttp_12_http_parser_HttpResponseParser, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ 0, /*tp_version_tag*/
+ #if PY_VERSION_HEX >= 0x030400a1
+ 0, /*tp_finalize*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b1
+ 0, /*tp_vectorcall*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
+ 0, /*tp_print*/
+ #endif
+};
+
+static struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct____repr__ *__pyx_freelist_7aiohttp_12_http_parser___pyx_scope_struct____repr__[8];
+static int __pyx_freecount_7aiohttp_12_http_parser___pyx_scope_struct____repr__ = 0;
+
+static PyObject *__pyx_tp_new_7aiohttp_12_http_parser___pyx_scope_struct____repr__(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
+ PyObject *o;
+ if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_7aiohttp_12_http_parser___pyx_scope_struct____repr__ > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct____repr__)))) {
+ o = (PyObject*)__pyx_freelist_7aiohttp_12_http_parser___pyx_scope_struct____repr__[--__pyx_freecount_7aiohttp_12_http_parser___pyx_scope_struct____repr__];
+ memset(o, 0, sizeof(struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct____repr__));
+ (void) PyObject_INIT(o, t);
+ PyObject_GC_Track(o);
+ } else {
+ o = (*t->tp_alloc)(t, 0);
+ if (unlikely(!o)) return 0;
+ }
+ return o;
+}
+
+static void __pyx_tp_dealloc_7aiohttp_12_http_parser___pyx_scope_struct____repr__(PyObject *o) {
+ struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct____repr__ *p = (struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct____repr__ *)o;
+ PyObject_GC_UnTrack(o);
+ Py_CLEAR(p->__pyx_v_info);
+ if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_7aiohttp_12_http_parser___pyx_scope_struct____repr__ < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct____repr__)))) {
+ __pyx_freelist_7aiohttp_12_http_parser___pyx_scope_struct____repr__[__pyx_freecount_7aiohttp_12_http_parser___pyx_scope_struct____repr__++] = ((struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct____repr__ *)o);
+ } else {
+ (*Py_TYPE(o)->tp_free)(o);
+ }
+}
+
+static int __pyx_tp_traverse_7aiohttp_12_http_parser___pyx_scope_struct____repr__(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct____repr__ *p = (struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct____repr__ *)o;
+ if (p->__pyx_v_info) {
+ e = (*v)(p->__pyx_v_info, a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_7aiohttp_12_http_parser___pyx_scope_struct____repr__(PyObject *o) {
+ PyObject* tmp;
+ struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct____repr__ *p = (struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct____repr__ *)o;
+ tmp = ((PyObject*)p->__pyx_v_info);
+ p->__pyx_v_info = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ return 0;
+}
+
+static PyTypeObject __pyx_type_7aiohttp_12_http_parser___pyx_scope_struct____repr__ = {
+ PyVarObject_HEAD_INIT(0, 0)
+ "aiohttp._http_parser.__pyx_scope_struct____repr__", /*tp_name*/
+ sizeof(struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct____repr__), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_7aiohttp_12_http_parser___pyx_scope_struct____repr__, /*tp_dealloc*/
+ #if PY_VERSION_HEX < 0x030800b4
+ 0, /*tp_print*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4
+ 0, /*tp_vectorcall_offset*/
+ #endif
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #endif
+ #if PY_MAJOR_VERSION >= 3
+ 0, /*tp_as_async*/
+ #endif
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ 0, /*tp_doc*/
+ __pyx_tp_traverse_7aiohttp_12_http_parser___pyx_scope_struct____repr__, /*tp_traverse*/
+ __pyx_tp_clear_7aiohttp_12_http_parser___pyx_scope_struct____repr__, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ 0, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ 0, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_7aiohttp_12_http_parser___pyx_scope_struct____repr__, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ 0, /*tp_version_tag*/
+ #if PY_VERSION_HEX >= 0x030400a1
+ 0, /*tp_finalize*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b1
+ 0, /*tp_vectorcall*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
+ 0, /*tp_print*/
+ #endif
+};
+
+static struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr *__pyx_freelist_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr[8];
+static int __pyx_freecount_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr = 0;
+
+static PyObject *__pyx_tp_new_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
+ PyObject *o;
+ if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr)))) {
+ o = (PyObject*)__pyx_freelist_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr[--__pyx_freecount_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr];
+ memset(o, 0, sizeof(struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr));
+ (void) PyObject_INIT(o, t);
+ PyObject_GC_Track(o);
+ } else {
+ o = (*t->tp_alloc)(t, 0);
+ if (unlikely(!o)) return 0;
+ }
+ return o;
+}
+
+static void __pyx_tp_dealloc_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr(PyObject *o) {
+ struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr *p = (struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr *)o;
+ PyObject_GC_UnTrack(o);
+ Py_CLEAR(p->__pyx_outer_scope);
+ Py_CLEAR(p->__pyx_v_name);
+ Py_CLEAR(p->__pyx_v_val);
+ if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr)))) {
+ __pyx_freelist_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr[__pyx_freecount_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr++] = ((struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr *)o);
+ } else {
+ (*Py_TYPE(o)->tp_free)(o);
+ }
+}
+
+static int __pyx_tp_traverse_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr *p = (struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr *)o;
+ if (p->__pyx_outer_scope) {
+ e = (*v)(((PyObject *)p->__pyx_outer_scope), a); if (e) return e;
+ }
+ if (p->__pyx_v_name) {
+ e = (*v)(p->__pyx_v_name, a); if (e) return e;
+ }
+ if (p->__pyx_v_val) {
+ e = (*v)(p->__pyx_v_val, a); if (e) return e;
+ }
+ return 0;
+}
+
+static PyTypeObject __pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr = {
+ PyVarObject_HEAD_INIT(0, 0)
+ "aiohttp._http_parser.__pyx_scope_struct_1_genexpr", /*tp_name*/
+ sizeof(struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr, /*tp_dealloc*/
+ #if PY_VERSION_HEX < 0x030800b4
+ 0, /*tp_print*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4
+ 0, /*tp_vectorcall_offset*/
+ #endif
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #endif
+ #if PY_MAJOR_VERSION >= 3
+ 0, /*tp_as_async*/
+ #endif
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ 0, /*tp_doc*/
+ __pyx_tp_traverse_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ 0, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ 0, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ 0, /*tp_version_tag*/
+ #if PY_VERSION_HEX >= 0x030400a1
+ 0, /*tp_finalize*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b1
+ 0, /*tp_vectorcall*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
+ 0, /*tp_print*/
+ #endif
+};
+
+static struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__ *__pyx_freelist_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__[8];
+static int __pyx_freecount_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__ = 0;
+
+static PyObject *__pyx_tp_new_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
+ PyObject *o;
+ if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__ > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__)))) {
+ o = (PyObject*)__pyx_freelist_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__[--__pyx_freecount_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__];
+ memset(o, 0, sizeof(struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__));
+ (void) PyObject_INIT(o, t);
+ PyObject_GC_Track(o);
+ } else {
+ o = (*t->tp_alloc)(t, 0);
+ if (unlikely(!o)) return 0;
+ }
+ return o;
+}
+
+static void __pyx_tp_dealloc_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__(PyObject *o) {
+ struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__ *p = (struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__ *)o;
+ PyObject_GC_UnTrack(o);
+ Py_CLEAR(p->__pyx_v_info);
+ if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__ < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__)))) {
+ __pyx_freelist_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__[__pyx_freecount_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__++] = ((struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__ *)o);
+ } else {
+ (*Py_TYPE(o)->tp_free)(o);
+ }
+}
+
+static int __pyx_tp_traverse_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__ *p = (struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__ *)o;
+ if (p->__pyx_v_info) {
+ e = (*v)(p->__pyx_v_info, a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__(PyObject *o) {
+ PyObject* tmp;
+ struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__ *p = (struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__ *)o;
+ tmp = ((PyObject*)p->__pyx_v_info);
+ p->__pyx_v_info = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ return 0;
+}
+
+static PyTypeObject __pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__ = {
+ PyVarObject_HEAD_INIT(0, 0)
+ "aiohttp._http_parser.__pyx_scope_struct_2___repr__", /*tp_name*/
+ sizeof(struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__, /*tp_dealloc*/
+ #if PY_VERSION_HEX < 0x030800b4
+ 0, /*tp_print*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4
+ 0, /*tp_vectorcall_offset*/
+ #endif
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #endif
+ #if PY_MAJOR_VERSION >= 3
+ 0, /*tp_as_async*/
+ #endif
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ 0, /*tp_doc*/
+ __pyx_tp_traverse_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__, /*tp_traverse*/
+ __pyx_tp_clear_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ 0, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ 0, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ 0, /*tp_version_tag*/
+ #if PY_VERSION_HEX >= 0x030400a1
+ 0, /*tp_finalize*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b1
+ 0, /*tp_vectorcall*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
+ 0, /*tp_print*/
+ #endif
+};
+
+static struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr *__pyx_freelist_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr[8];
+static int __pyx_freecount_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr = 0;
+
+static PyObject *__pyx_tp_new_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
+ PyObject *o;
+ if (CYTHON_COMPILING_IN_CPYTHON && likely((__pyx_freecount_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr > 0) & (t->tp_basicsize == sizeof(struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr)))) {
+ o = (PyObject*)__pyx_freelist_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr[--__pyx_freecount_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr];
+ memset(o, 0, sizeof(struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr));
+ (void) PyObject_INIT(o, t);
+ PyObject_GC_Track(o);
+ } else {
+ o = (*t->tp_alloc)(t, 0);
+ if (unlikely(!o)) return 0;
+ }
+ return o;
+}
+
+static void __pyx_tp_dealloc_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr(PyObject *o) {
+ struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr *p = (struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr *)o;
+ PyObject_GC_UnTrack(o);
+ Py_CLEAR(p->__pyx_outer_scope);
+ Py_CLEAR(p->__pyx_v_name);
+ Py_CLEAR(p->__pyx_v_val);
+ if (CYTHON_COMPILING_IN_CPYTHON && ((__pyx_freecount_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr < 8) & (Py_TYPE(o)->tp_basicsize == sizeof(struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr)))) {
+ __pyx_freelist_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr[__pyx_freecount_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr++] = ((struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr *)o);
+ } else {
+ (*Py_TYPE(o)->tp_free)(o);
+ }
+}
+
+static int __pyx_tp_traverse_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr *p = (struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr *)o;
+ if (p->__pyx_outer_scope) {
+ e = (*v)(((PyObject *)p->__pyx_outer_scope), a); if (e) return e;
+ }
+ if (p->__pyx_v_name) {
+ e = (*v)(p->__pyx_v_name, a); if (e) return e;
+ }
+ if (p->__pyx_v_val) {
+ e = (*v)(p->__pyx_v_val, a); if (e) return e;
+ }
+ return 0;
+}
+
+static PyTypeObject __pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr = {
+ PyVarObject_HEAD_INIT(0, 0)
+ "aiohttp._http_parser.__pyx_scope_struct_3_genexpr", /*tp_name*/
+ sizeof(struct __pyx_obj_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr, /*tp_dealloc*/
+ #if PY_VERSION_HEX < 0x030800b4
+ 0, /*tp_print*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4
+ 0, /*tp_vectorcall_offset*/
+ #endif
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #endif
+ #if PY_MAJOR_VERSION >= 3
+ 0, /*tp_as_async*/
+ #endif
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ 0, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ 0, /*tp_doc*/
+ __pyx_tp_traverse_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ 0, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ 0, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ 0, /*tp_version_tag*/
+ #if PY_VERSION_HEX >= 0x030400a1
+ 0, /*tp_finalize*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b1
+ 0, /*tp_vectorcall*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
+ 0, /*tp_print*/
+ #endif
+};
+
+static PyMethodDef __pyx_methods[] = {
+ {0, 0, 0, 0}
+};
+
+#if PY_MAJOR_VERSION >= 3
+#if CYTHON_PEP489_MULTI_PHASE_INIT
+static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
+static int __pyx_pymod_exec__http_parser(PyObject* module); /*proto*/
+static PyModuleDef_Slot __pyx_moduledef_slots[] = {
+ {Py_mod_create, (void*)__pyx_pymod_create},
+ {Py_mod_exec, (void*)__pyx_pymod_exec__http_parser},
+ {0, NULL}
+};
+#endif
+
+static struct PyModuleDef __pyx_moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "_http_parser",
+ 0, /* m_doc */
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ 0, /* m_size */
+ #else
+ -1, /* m_size */
+ #endif
+ __pyx_methods /* m_methods */,
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ __pyx_moduledef_slots, /* m_slots */
+ #else
+ NULL, /* m_reload */
+ #endif
+ NULL, /* m_traverse */
+ NULL, /* m_clear */
+ NULL /* m_free */
+};
+#endif
+#ifndef CYTHON_SMALL_CODE
+#if defined(__clang__)
+ #define CYTHON_SMALL_CODE
+#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
+ #define CYTHON_SMALL_CODE __attribute__((cold))
+#else
+ #define CYTHON_SMALL_CODE
+#endif
+#endif
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_kp_u_, __pyx_k_, sizeof(__pyx_k_), 0, 1, 0, 0},
+ {&__pyx_n_s_ACCEPT, __pyx_k_ACCEPT, sizeof(__pyx_k_ACCEPT), 0, 0, 1, 1},
+ {&__pyx_n_s_ACCEPT_CHARSET, __pyx_k_ACCEPT_CHARSET, sizeof(__pyx_k_ACCEPT_CHARSET), 0, 0, 1, 1},
+ {&__pyx_n_s_ACCEPT_ENCODING, __pyx_k_ACCEPT_ENCODING, sizeof(__pyx_k_ACCEPT_ENCODING), 0, 0, 1, 1},
+ {&__pyx_n_s_ACCEPT_LANGUAGE, __pyx_k_ACCEPT_LANGUAGE, sizeof(__pyx_k_ACCEPT_LANGUAGE), 0, 0, 1, 1},
+ {&__pyx_n_s_ACCEPT_RANGES, __pyx_k_ACCEPT_RANGES, sizeof(__pyx_k_ACCEPT_RANGES), 0, 0, 1, 1},
+ {&__pyx_n_s_ACCESS_CONTROL_ALLOW_CREDENTIALS, __pyx_k_ACCESS_CONTROL_ALLOW_CREDENTIALS, sizeof(__pyx_k_ACCESS_CONTROL_ALLOW_CREDENTIALS), 0, 0, 1, 1},
+ {&__pyx_n_s_ACCESS_CONTROL_ALLOW_HEADERS, __pyx_k_ACCESS_CONTROL_ALLOW_HEADERS, sizeof(__pyx_k_ACCESS_CONTROL_ALLOW_HEADERS), 0, 0, 1, 1},
+ {&__pyx_n_s_ACCESS_CONTROL_ALLOW_METHODS, __pyx_k_ACCESS_CONTROL_ALLOW_METHODS, sizeof(__pyx_k_ACCESS_CONTROL_ALLOW_METHODS), 0, 0, 1, 1},
+ {&__pyx_n_s_ACCESS_CONTROL_ALLOW_ORIGIN, __pyx_k_ACCESS_CONTROL_ALLOW_ORIGIN, sizeof(__pyx_k_ACCESS_CONTROL_ALLOW_ORIGIN), 0, 0, 1, 1},
+ {&__pyx_n_s_ACCESS_CONTROL_EXPOSE_HEADERS, __pyx_k_ACCESS_CONTROL_EXPOSE_HEADERS, sizeof(__pyx_k_ACCESS_CONTROL_EXPOSE_HEADERS), 0, 0, 1, 1},
+ {&__pyx_n_s_ACCESS_CONTROL_MAX_AGE, __pyx_k_ACCESS_CONTROL_MAX_AGE, sizeof(__pyx_k_ACCESS_CONTROL_MAX_AGE), 0, 0, 1, 1},
+ {&__pyx_n_s_ACCESS_CONTROL_REQUEST_HEADERS, __pyx_k_ACCESS_CONTROL_REQUEST_HEADERS, sizeof(__pyx_k_ACCESS_CONTROL_REQUEST_HEADERS), 0, 0, 1, 1},
+ {&__pyx_n_s_ACCESS_CONTROL_REQUEST_METHOD, __pyx_k_ACCESS_CONTROL_REQUEST_METHOD, sizeof(__pyx_k_ACCESS_CONTROL_REQUEST_METHOD), 0, 0, 1, 1},
+ {&__pyx_n_s_AGE, __pyx_k_AGE, sizeof(__pyx_k_AGE), 0, 0, 1, 1},
+ {&__pyx_n_s_ALLOW, __pyx_k_ALLOW, sizeof(__pyx_k_ALLOW), 0, 0, 1, 1},
+ {&__pyx_n_s_AUTHORIZATION, __pyx_k_AUTHORIZATION, sizeof(__pyx_k_AUTHORIZATION), 0, 0, 1, 1},
+ {&__pyx_n_s_BadHttpMessage, __pyx_k_BadHttpMessage, sizeof(__pyx_k_BadHttpMessage), 0, 0, 1, 1},
+ {&__pyx_n_s_BadStatusLine, __pyx_k_BadStatusLine, sizeof(__pyx_k_BadStatusLine), 0, 0, 1, 1},
+ {&__pyx_n_s_BaseException, __pyx_k_BaseException, sizeof(__pyx_k_BaseException), 0, 0, 1, 1},
+ {&__pyx_n_s_CACHE_CONTROL, __pyx_k_CACHE_CONTROL, sizeof(__pyx_k_CACHE_CONTROL), 0, 0, 1, 1},
+ {&__pyx_n_s_CIMultiDict, __pyx_k_CIMultiDict, sizeof(__pyx_k_CIMultiDict), 0, 0, 1, 1},
+ {&__pyx_n_s_CIMultiDictProxy, __pyx_k_CIMultiDictProxy, sizeof(__pyx_k_CIMultiDictProxy), 0, 0, 1, 1},
+ {&__pyx_n_s_CIMultiDictProxy_2, __pyx_k_CIMultiDictProxy_2, sizeof(__pyx_k_CIMultiDictProxy_2), 0, 0, 1, 1},
+ {&__pyx_n_s_CIMultiDict_2, __pyx_k_CIMultiDict_2, sizeof(__pyx_k_CIMultiDict_2), 0, 0, 1, 1},
+ {&__pyx_n_s_CONNECTION, __pyx_k_CONNECTION, sizeof(__pyx_k_CONNECTION), 0, 0, 1, 1},
+ {&__pyx_n_s_CONTENT_DISPOSITION, __pyx_k_CONTENT_DISPOSITION, sizeof(__pyx_k_CONTENT_DISPOSITION), 0, 0, 1, 1},
+ {&__pyx_n_s_CONTENT_ENCODING, __pyx_k_CONTENT_ENCODING, sizeof(__pyx_k_CONTENT_ENCODING), 0, 0, 1, 1},
+ {&__pyx_n_s_CONTENT_LANGUAGE, __pyx_k_CONTENT_LANGUAGE, sizeof(__pyx_k_CONTENT_LANGUAGE), 0, 0, 1, 1},
+ {&__pyx_n_s_CONTENT_LENGTH, __pyx_k_CONTENT_LENGTH, sizeof(__pyx_k_CONTENT_LENGTH), 0, 0, 1, 1},
+ {&__pyx_n_s_CONTENT_LOCATION, __pyx_k_CONTENT_LOCATION, sizeof(__pyx_k_CONTENT_LOCATION), 0, 0, 1, 1},
+ {&__pyx_n_s_CONTENT_MD5, __pyx_k_CONTENT_MD5, sizeof(__pyx_k_CONTENT_MD5), 0, 0, 1, 1},
+ {&__pyx_n_s_CONTENT_RANGE, __pyx_k_CONTENT_RANGE, sizeof(__pyx_k_CONTENT_RANGE), 0, 0, 1, 1},
+ {&__pyx_n_s_CONTENT_TRANSFER_ENCODING, __pyx_k_CONTENT_TRANSFER_ENCODING, sizeof(__pyx_k_CONTENT_TRANSFER_ENCODING), 0, 0, 1, 1},
+ {&__pyx_n_s_CONTENT_TYPE, __pyx_k_CONTENT_TYPE, sizeof(__pyx_k_CONTENT_TYPE), 0, 0, 1, 1},
+ {&__pyx_n_s_COOKIE, __pyx_k_COOKIE, sizeof(__pyx_k_COOKIE), 0, 0, 1, 1},
+ {&__pyx_n_s_ContentLengthError, __pyx_k_ContentLengthError, sizeof(__pyx_k_ContentLengthError), 0, 0, 1, 1},
+ {&__pyx_n_s_DATE, __pyx_k_DATE, sizeof(__pyx_k_DATE), 0, 0, 1, 1},
+ {&__pyx_n_s_DESTINATION, __pyx_k_DESTINATION, sizeof(__pyx_k_DESTINATION), 0, 0, 1, 1},
+ {&__pyx_n_s_DIGEST, __pyx_k_DIGEST, sizeof(__pyx_k_DIGEST), 0, 0, 1, 1},
+ {&__pyx_n_s_DeflateBuffer, __pyx_k_DeflateBuffer, sizeof(__pyx_k_DeflateBuffer), 0, 0, 1, 1},
+ {&__pyx_n_s_DeflateBuffer_2, __pyx_k_DeflateBuffer_2, sizeof(__pyx_k_DeflateBuffer_2), 0, 0, 1, 1},
+ {&__pyx_n_s_EMPTY_PAYLOAD, __pyx_k_EMPTY_PAYLOAD, sizeof(__pyx_k_EMPTY_PAYLOAD), 0, 0, 1, 1},
+ {&__pyx_n_s_EMPTY_PAYLOAD_2, __pyx_k_EMPTY_PAYLOAD_2, sizeof(__pyx_k_EMPTY_PAYLOAD_2), 0, 0, 1, 1},
+ {&__pyx_n_s_ETAG, __pyx_k_ETAG, sizeof(__pyx_k_ETAG), 0, 0, 1, 1},
+ {&__pyx_n_s_EXPECT, __pyx_k_EXPECT, sizeof(__pyx_k_EXPECT), 0, 0, 1, 1},
+ {&__pyx_n_s_EXPIRES, __pyx_k_EXPIRES, sizeof(__pyx_k_EXPIRES), 0, 0, 1, 1},
+ {&__pyx_n_s_FORWARDED, __pyx_k_FORWARDED, sizeof(__pyx_k_FORWARDED), 0, 0, 1, 1},
+ {&__pyx_n_s_FROM, __pyx_k_FROM, sizeof(__pyx_k_FROM), 0, 0, 1, 1},
+ {&__pyx_n_s_HOST, __pyx_k_HOST, sizeof(__pyx_k_HOST), 0, 0, 1, 1},
+ {&__pyx_kp_u_Header_name_is_too_long, __pyx_k_Header_name_is_too_long, sizeof(__pyx_k_Header_name_is_too_long), 0, 1, 0, 0},
+ {&__pyx_kp_u_Header_value_is_too_long, __pyx_k_Header_value_is_too_long, sizeof(__pyx_k_Header_value_is_too_long), 0, 1, 0, 0},
+ {&__pyx_n_s_HttpRequestParser, __pyx_k_HttpRequestParser, sizeof(__pyx_k_HttpRequestParser), 0, 0, 1, 1},
+ {&__pyx_n_u_HttpRequestParser, __pyx_k_HttpRequestParser, sizeof(__pyx_k_HttpRequestParser), 0, 1, 0, 1},
+ {&__pyx_n_s_HttpResponseParser, __pyx_k_HttpResponseParser, sizeof(__pyx_k_HttpResponseParser), 0, 0, 1, 1},
+ {&__pyx_n_u_HttpResponseParser, __pyx_k_HttpResponseParser, sizeof(__pyx_k_HttpResponseParser), 0, 1, 0, 1},
+ {&__pyx_n_s_HttpVersion, __pyx_k_HttpVersion, sizeof(__pyx_k_HttpVersion), 0, 0, 1, 1},
+ {&__pyx_n_s_HttpVersion10, __pyx_k_HttpVersion10, sizeof(__pyx_k_HttpVersion10), 0, 0, 1, 1},
+ {&__pyx_n_s_HttpVersion10_2, __pyx_k_HttpVersion10_2, sizeof(__pyx_k_HttpVersion10_2), 0, 0, 1, 1},
+ {&__pyx_n_s_HttpVersion11, __pyx_k_HttpVersion11, sizeof(__pyx_k_HttpVersion11), 0, 0, 1, 1},
+ {&__pyx_n_s_HttpVersion11_2, __pyx_k_HttpVersion11_2, sizeof(__pyx_k_HttpVersion11_2), 0, 0, 1, 1},
+ {&__pyx_n_s_HttpVersion_2, __pyx_k_HttpVersion_2, sizeof(__pyx_k_HttpVersion_2), 0, 0, 1, 1},
+ {&__pyx_n_s_IF_MATCH, __pyx_k_IF_MATCH, sizeof(__pyx_k_IF_MATCH), 0, 0, 1, 1},
+ {&__pyx_n_s_IF_MODIFIED_SINCE, __pyx_k_IF_MODIFIED_SINCE, sizeof(__pyx_k_IF_MODIFIED_SINCE), 0, 0, 1, 1},
+ {&__pyx_n_s_IF_NONE_MATCH, __pyx_k_IF_NONE_MATCH, sizeof(__pyx_k_IF_NONE_MATCH), 0, 0, 1, 1},
+ {&__pyx_n_s_IF_RANGE, __pyx_k_IF_RANGE, sizeof(__pyx_k_IF_RANGE), 0, 0, 1, 1},
+ {&__pyx_n_s_IF_UNMODIFIED_SINCE, __pyx_k_IF_UNMODIFIED_SINCE, sizeof(__pyx_k_IF_UNMODIFIED_SINCE), 0, 0, 1, 1},
+ {&__pyx_kp_s_Incompatible_checksums_s_vs_0x14, __pyx_k_Incompatible_checksums_s_vs_0x14, sizeof(__pyx_k_Incompatible_checksums_s_vs_0x14), 0, 0, 1, 0},
+ {&__pyx_kp_s_Incompatible_checksums_s_vs_0xc7, __pyx_k_Incompatible_checksums_s_vs_0xc7, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xc7), 0, 0, 1, 0},
+ {&__pyx_n_s_InvalidHeader, __pyx_k_InvalidHeader, sizeof(__pyx_k_InvalidHeader), 0, 0, 1, 1},
+ {&__pyx_n_s_InvalidURLError, __pyx_k_InvalidURLError, sizeof(__pyx_k_InvalidURLError), 0, 0, 1, 1},
+ {&__pyx_n_s_KEEP_ALIVE, __pyx_k_KEEP_ALIVE, sizeof(__pyx_k_KEEP_ALIVE), 0, 0, 1, 1},
+ {&__pyx_n_s_LAST_EVENT_ID, __pyx_k_LAST_EVENT_ID, sizeof(__pyx_k_LAST_EVENT_ID), 0, 0, 1, 1},
+ {&__pyx_n_s_LAST_MODIFIED, __pyx_k_LAST_MODIFIED, sizeof(__pyx_k_LAST_MODIFIED), 0, 0, 1, 1},
+ {&__pyx_n_s_LINK, __pyx_k_LINK, sizeof(__pyx_k_LINK), 0, 0, 1, 1},
+ {&__pyx_n_s_LOCATION, __pyx_k_LOCATION, sizeof(__pyx_k_LOCATION), 0, 0, 1, 1},
+ {&__pyx_n_s_LineTooLong, __pyx_k_LineTooLong, sizeof(__pyx_k_LineTooLong), 0, 0, 1, 1},
+ {&__pyx_n_s_MAX_FORWARDS, __pyx_k_MAX_FORWARDS, sizeof(__pyx_k_MAX_FORWARDS), 0, 0, 1, 1},
+ {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
+ {&__pyx_kp_u_Not_enough_data_for_satisfy_cont, __pyx_k_Not_enough_data_for_satisfy_cont, sizeof(__pyx_k_Not_enough_data_for_satisfy_cont), 0, 1, 0, 0},
+ {&__pyx_kp_u_Not_enough_data_for_satisfy_tran, __pyx_k_Not_enough_data_for_satisfy_tran, sizeof(__pyx_k_Not_enough_data_for_satisfy_tran), 0, 1, 0, 0},
+ {&__pyx_n_s_ORIGIN, __pyx_k_ORIGIN, sizeof(__pyx_k_ORIGIN), 0, 0, 1, 1},
+ {&__pyx_n_s_PRAGMA, __pyx_k_PRAGMA, sizeof(__pyx_k_PRAGMA), 0, 0, 1, 1},
+ {&__pyx_n_s_PROXY_AUTHENTICATE, __pyx_k_PROXY_AUTHENTICATE, sizeof(__pyx_k_PROXY_AUTHENTICATE), 0, 0, 1, 1},
+ {&__pyx_n_s_PROXY_AUTHORIZATION, __pyx_k_PROXY_AUTHORIZATION, sizeof(__pyx_k_PROXY_AUTHORIZATION), 0, 0, 1, 1},
+ {&__pyx_n_s_PayloadEncodingError, __pyx_k_PayloadEncodingError, sizeof(__pyx_k_PayloadEncodingError), 0, 0, 1, 1},
+ {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1},
+ {&__pyx_n_s_RANGE, __pyx_k_RANGE, sizeof(__pyx_k_RANGE), 0, 0, 1, 1},
+ {&__pyx_n_s_REFERER, __pyx_k_REFERER, sizeof(__pyx_k_REFERER), 0, 0, 1, 1},
+ {&__pyx_n_s_RETRY_AFTER, __pyx_k_RETRY_AFTER, sizeof(__pyx_k_RETRY_AFTER), 0, 0, 1, 1},
+ {&__pyx_kp_u_RawRequestMessage, __pyx_k_RawRequestMessage, sizeof(__pyx_k_RawRequestMessage), 0, 1, 0, 0},
+ {&__pyx_n_s_RawRequestMessage_2, __pyx_k_RawRequestMessage_2, sizeof(__pyx_k_RawRequestMessage_2), 0, 0, 1, 1},
+ {&__pyx_n_u_RawRequestMessage_2, __pyx_k_RawRequestMessage_2, sizeof(__pyx_k_RawRequestMessage_2), 0, 1, 0, 1},
+ {&__pyx_kp_u_RawResponseMessage, __pyx_k_RawResponseMessage, sizeof(__pyx_k_RawResponseMessage), 0, 1, 0, 0},
+ {&__pyx_n_s_RawResponseMessage_2, __pyx_k_RawResponseMessage_2, sizeof(__pyx_k_RawResponseMessage_2), 0, 0, 1, 1},
+ {&__pyx_n_u_RawResponseMessage_2, __pyx_k_RawResponseMessage_2, sizeof(__pyx_k_RawResponseMessage_2), 0, 1, 0, 1},
+ {&__pyx_n_s_SEC_WEBSOCKET_ACCEPT, __pyx_k_SEC_WEBSOCKET_ACCEPT, sizeof(__pyx_k_SEC_WEBSOCKET_ACCEPT), 0, 0, 1, 1},
+ {&__pyx_n_s_SEC_WEBSOCKET_EXTENSIONS, __pyx_k_SEC_WEBSOCKET_EXTENSIONS, sizeof(__pyx_k_SEC_WEBSOCKET_EXTENSIONS), 0, 0, 1, 1},
+ {&__pyx_n_s_SEC_WEBSOCKET_KEY, __pyx_k_SEC_WEBSOCKET_KEY, sizeof(__pyx_k_SEC_WEBSOCKET_KEY), 0, 0, 1, 1},
+ {&__pyx_n_s_SEC_WEBSOCKET_KEY1, __pyx_k_SEC_WEBSOCKET_KEY1, sizeof(__pyx_k_SEC_WEBSOCKET_KEY1), 0, 0, 1, 1},
+ {&__pyx_n_s_SEC_WEBSOCKET_PROTOCOL, __pyx_k_SEC_WEBSOCKET_PROTOCOL, sizeof(__pyx_k_SEC_WEBSOCKET_PROTOCOL), 0, 0, 1, 1},
+ {&__pyx_n_s_SEC_WEBSOCKET_VERSION, __pyx_k_SEC_WEBSOCKET_VERSION, sizeof(__pyx_k_SEC_WEBSOCKET_VERSION), 0, 0, 1, 1},
+ {&__pyx_n_s_SERVER, __pyx_k_SERVER, sizeof(__pyx_k_SERVER), 0, 0, 1, 1},
+ {&__pyx_n_s_SET_COOKIE, __pyx_k_SET_COOKIE, sizeof(__pyx_k_SET_COOKIE), 0, 0, 1, 1},
+ {&__pyx_kp_u_Status_line_is_too_long, __pyx_k_Status_line_is_too_long, sizeof(__pyx_k_Status_line_is_too_long), 0, 1, 0, 0},
+ {&__pyx_n_s_StreamReader, __pyx_k_StreamReader, sizeof(__pyx_k_StreamReader), 0, 0, 1, 1},
+ {&__pyx_n_s_StreamReader_2, __pyx_k_StreamReader_2, sizeof(__pyx_k_StreamReader_2), 0, 0, 1, 1},
+ {&__pyx_n_s_TE, __pyx_k_TE, sizeof(__pyx_k_TE), 0, 0, 1, 1},
+ {&__pyx_n_s_TRAILER, __pyx_k_TRAILER, sizeof(__pyx_k_TRAILER), 0, 0, 1, 1},
+ {&__pyx_n_s_TRANSFER_ENCODING, __pyx_k_TRANSFER_ENCODING, sizeof(__pyx_k_TRANSFER_ENCODING), 0, 0, 1, 1},
+ {&__pyx_n_s_TransferEncodingError, __pyx_k_TransferEncodingError, sizeof(__pyx_k_TransferEncodingError), 0, 0, 1, 1},
+ {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
+ {&__pyx_n_s_UPGRADE, __pyx_k_UPGRADE, sizeof(__pyx_k_UPGRADE), 0, 0, 1, 1},
+ {&__pyx_n_s_URI, __pyx_k_URI, sizeof(__pyx_k_URI), 0, 0, 1, 1},
+ {&__pyx_n_s_URL, __pyx_k_URL, sizeof(__pyx_k_URL), 0, 0, 1, 1},
+ {&__pyx_n_s_URL_2, __pyx_k_URL_2, sizeof(__pyx_k_URL_2), 0, 0, 1, 1},
+ {&__pyx_n_s_USER_AGENT, __pyx_k_USER_AGENT, sizeof(__pyx_k_USER_AGENT), 0, 0, 1, 1},
+ {&__pyx_n_s_VARY, __pyx_k_VARY, sizeof(__pyx_k_VARY), 0, 0, 1, 1},
+ {&__pyx_n_s_VIA, __pyx_k_VIA, sizeof(__pyx_k_VIA), 0, 0, 1, 1},
+ {&__pyx_n_s_WANT_DIGEST, __pyx_k_WANT_DIGEST, sizeof(__pyx_k_WANT_DIGEST), 0, 0, 1, 1},
+ {&__pyx_n_s_WARNING, __pyx_k_WARNING, sizeof(__pyx_k_WARNING), 0, 0, 1, 1},
+ {&__pyx_n_s_WWW_AUTHENTICATE, __pyx_k_WWW_AUTHENTICATE, sizeof(__pyx_k_WWW_AUTHENTICATE), 0, 0, 1, 1},
+ {&__pyx_n_s_X_FORWARDED_FOR, __pyx_k_X_FORWARDED_FOR, sizeof(__pyx_k_X_FORWARDED_FOR), 0, 0, 1, 1},
+ {&__pyx_n_s_X_FORWARDED_HOST, __pyx_k_X_FORWARDED_HOST, sizeof(__pyx_k_X_FORWARDED_HOST), 0, 0, 1, 1},
+ {&__pyx_n_s_X_FORWARDED_PROTO, __pyx_k_X_FORWARDED_PROTO, sizeof(__pyx_k_X_FORWARDED_PROTO), 0, 0, 1, 1},
+ {&__pyx_kp_u__11, __pyx_k__11, sizeof(__pyx_k__11), 0, 1, 0, 0},
+ {&__pyx_kp_u__2, __pyx_k__2, sizeof(__pyx_k__2), 0, 1, 0, 0},
+ {&__pyx_kp_u__3, __pyx_k__3, sizeof(__pyx_k__3), 0, 1, 0, 0},
+ {&__pyx_n_s__4, __pyx_k__4, sizeof(__pyx_k__4), 0, 0, 1, 1},
+ {&__pyx_kp_b__4, __pyx_k__4, sizeof(__pyx_k__4), 0, 0, 0, 0},
+ {&__pyx_kp_u__4, __pyx_k__4, sizeof(__pyx_k__4), 0, 1, 0, 0},
+ {&__pyx_n_s_add, __pyx_k_add, sizeof(__pyx_k_add), 0, 0, 1, 1},
+ {&__pyx_n_s_aiohttp, __pyx_k_aiohttp, sizeof(__pyx_k_aiohttp), 0, 0, 1, 1},
+ {&__pyx_n_s_aiohttp__http_parser, __pyx_k_aiohttp__http_parser, sizeof(__pyx_k_aiohttp__http_parser), 0, 0, 1, 1},
+ {&__pyx_kp_s_aiohttp__http_parser_pyx, __pyx_k_aiohttp__http_parser_pyx, sizeof(__pyx_k_aiohttp__http_parser_pyx), 0, 0, 1, 0},
+ {&__pyx_n_s_all, __pyx_k_all, sizeof(__pyx_k_all), 0, 0, 1, 1},
+ {&__pyx_n_s_args, __pyx_k_args, sizeof(__pyx_k_args), 0, 0, 1, 1},
+ {&__pyx_n_s_auto_decompress, __pyx_k_auto_decompress, sizeof(__pyx_k_auto_decompress), 0, 0, 1, 1},
+ {&__pyx_n_s_begin_http_chunk_receiving, __pyx_k_begin_http_chunk_receiving, sizeof(__pyx_k_begin_http_chunk_receiving), 0, 0, 1, 1},
+ {&__pyx_n_u_br, __pyx_k_br, sizeof(__pyx_k_br), 0, 1, 0, 1},
+ {&__pyx_n_s_buf_data, __pyx_k_buf_data, sizeof(__pyx_k_buf_data), 0, 0, 1, 1},
+ {&__pyx_n_s_build, __pyx_k_build, sizeof(__pyx_k_build), 0, 0, 1, 1},
+ {&__pyx_n_s_chunked, __pyx_k_chunked, sizeof(__pyx_k_chunked), 0, 0, 1, 1},
+ {&__pyx_n_u_chunked, __pyx_k_chunked, sizeof(__pyx_k_chunked), 0, 1, 0, 1},
+ {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
+ {&__pyx_n_s_close, __pyx_k_close, sizeof(__pyx_k_close), 0, 0, 1, 1},
+ {&__pyx_n_s_code, __pyx_k_code, sizeof(__pyx_k_code), 0, 0, 1, 1},
+ {&__pyx_n_u_code, __pyx_k_code, sizeof(__pyx_k_code), 0, 1, 0, 1},
+ {&__pyx_n_s_compression, __pyx_k_compression, sizeof(__pyx_k_compression), 0, 0, 1, 1},
+ {&__pyx_n_u_compression, __pyx_k_compression, sizeof(__pyx_k_compression), 0, 1, 0, 1},
+ {&__pyx_n_u_deflate, __pyx_k_deflate, sizeof(__pyx_k_deflate), 0, 1, 0, 1},
+ {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1},
+ {&__pyx_n_s_encoded, __pyx_k_encoded, sizeof(__pyx_k_encoded), 0, 0, 1, 1},
+ {&__pyx_n_s_end_http_chunk_receiving, __pyx_k_end_http_chunk_receiving, sizeof(__pyx_k_end_http_chunk_receiving), 0, 0, 1, 1},
+ {&__pyx_n_s_feed_data, __pyx_k_feed_data, sizeof(__pyx_k_feed_data), 0, 0, 1, 1},
+ {&__pyx_n_s_feed_eof, __pyx_k_feed_eof, sizeof(__pyx_k_feed_eof), 0, 0, 1, 1},
+ {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
+ {&__pyx_n_s_fragment, __pyx_k_fragment, sizeof(__pyx_k_fragment), 0, 0, 1, 1},
+ {&__pyx_n_s_genexpr, __pyx_k_genexpr, sizeof(__pyx_k_genexpr), 0, 0, 1, 1},
+ {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1},
+ {&__pyx_n_u_gzip, __pyx_k_gzip, sizeof(__pyx_k_gzip), 0, 1, 0, 1},
+ {&__pyx_n_s_hdrs, __pyx_k_hdrs, sizeof(__pyx_k_hdrs), 0, 0, 1, 1},
+ {&__pyx_n_s_headers, __pyx_k_headers, sizeof(__pyx_k_headers), 0, 0, 1, 1},
+ {&__pyx_n_u_headers, __pyx_k_headers, sizeof(__pyx_k_headers), 0, 1, 0, 1},
+ {&__pyx_n_s_host, __pyx_k_host, sizeof(__pyx_k_host), 0, 0, 1, 1},
+ {&__pyx_n_s_http_exceptions, __pyx_k_http_exceptions, sizeof(__pyx_k_http_exceptions), 0, 0, 1, 1},
+ {&__pyx_n_s_http_parser, __pyx_k_http_parser, sizeof(__pyx_k_http_parser), 0, 0, 1, 1},
+ {&__pyx_n_s_http_writer, __pyx_k_http_writer, sizeof(__pyx_k_http_writer), 0, 0, 1, 1},
+ {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
+ {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
+ {&__pyx_kp_u_invalid_url_r, __pyx_k_invalid_url_r, sizeof(__pyx_k_invalid_url_r), 0, 1, 0, 0},
+ {&__pyx_n_s_limit, __pyx_k_limit, sizeof(__pyx_k_limit), 0, 0, 1, 1},
+ {&__pyx_n_s_loop, __pyx_k_loop, sizeof(__pyx_k_loop), 0, 0, 1, 1},
+ {&__pyx_n_s_lower, __pyx_k_lower, sizeof(__pyx_k_lower), 0, 0, 1, 1},
+ {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
+ {&__pyx_n_s_max_field_size, __pyx_k_max_field_size, sizeof(__pyx_k_max_field_size), 0, 0, 1, 1},
+ {&__pyx_n_s_max_headers, __pyx_k_max_headers, sizeof(__pyx_k_max_headers), 0, 0, 1, 1},
+ {&__pyx_n_s_max_line_size, __pyx_k_max_line_size, sizeof(__pyx_k_max_line_size), 0, 0, 1, 1},
+ {&__pyx_n_s_method, __pyx_k_method, sizeof(__pyx_k_method), 0, 0, 1, 1},
+ {&__pyx_n_u_method, __pyx_k_method, sizeof(__pyx_k_method), 0, 1, 0, 1},
+ {&__pyx_n_s_multidict, __pyx_k_multidict, sizeof(__pyx_k_multidict), 0, 0, 1, 1},
+ {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
+ {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1},
+ {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0},
+ {&__pyx_n_s_parse_url, __pyx_k_parse_url, sizeof(__pyx_k_parse_url), 0, 0, 1, 1},
+ {&__pyx_n_s_partition, __pyx_k_partition, sizeof(__pyx_k_partition), 0, 0, 1, 1},
+ {&__pyx_n_s_password, __pyx_k_password, sizeof(__pyx_k_password), 0, 0, 1, 1},
+ {&__pyx_n_s_path, __pyx_k_path, sizeof(__pyx_k_path), 0, 0, 1, 1},
+ {&__pyx_n_u_path, __pyx_k_path, sizeof(__pyx_k_path), 0, 1, 0, 1},
+ {&__pyx_n_s_payload_exception, __pyx_k_payload_exception, sizeof(__pyx_k_payload_exception), 0, 0, 1, 1},
+ {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1},
+ {&__pyx_n_s_port, __pyx_k_port, sizeof(__pyx_k_port), 0, 0, 1, 1},
+ {&__pyx_n_s_protocol, __pyx_k_protocol, sizeof(__pyx_k_protocol), 0, 0, 1, 1},
+ {&__pyx_n_s_py_buf, __pyx_k_py_buf, sizeof(__pyx_k_py_buf), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_unpickle_RawRequestMessage, __pyx_k_pyx_unpickle_RawRequestMessage, sizeof(__pyx_k_pyx_unpickle_RawRequestMessage), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_unpickle_RawResponseMessag, __pyx_k_pyx_unpickle_RawResponseMessag, sizeof(__pyx_k_pyx_unpickle_RawResponseMessag), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
+ {&__pyx_n_s_query_string, __pyx_k_query_string, sizeof(__pyx_k_query_string), 0, 0, 1, 1},
+ {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
+ {&__pyx_n_s_raw_headers, __pyx_k_raw_headers, sizeof(__pyx_k_raw_headers), 0, 0, 1, 1},
+ {&__pyx_n_u_raw_headers, __pyx_k_raw_headers, sizeof(__pyx_k_raw_headers), 0, 1, 0, 1},
+ {&__pyx_n_s_read_until_eof, __pyx_k_read_until_eof, sizeof(__pyx_k_read_until_eof), 0, 0, 1, 1},
+ {&__pyx_n_s_reason, __pyx_k_reason, sizeof(__pyx_k_reason), 0, 0, 1, 1},
+ {&__pyx_n_u_reason, __pyx_k_reason, sizeof(__pyx_k_reason), 0, 1, 0, 1},
+ {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1},
+ {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1},
+ {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1},
+ {&__pyx_n_s_repr___locals_genexpr, __pyx_k_repr___locals_genexpr, sizeof(__pyx_k_repr___locals_genexpr), 0, 0, 1, 1},
+ {&__pyx_n_s_response_with_body, __pyx_k_response_with_body, sizeof(__pyx_k_response_with_body), 0, 0, 1, 1},
+ {&__pyx_n_s_scheme, __pyx_k_scheme, sizeof(__pyx_k_scheme), 0, 0, 1, 1},
+ {&__pyx_n_s_send, __pyx_k_send, sizeof(__pyx_k_send), 0, 0, 1, 1},
+ {&__pyx_n_s_set_exception, __pyx_k_set_exception, sizeof(__pyx_k_set_exception), 0, 0, 1, 1},
+ {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1},
+ {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1},
+ {&__pyx_n_s_should_close, __pyx_k_should_close, sizeof(__pyx_k_should_close), 0, 0, 1, 1},
+ {&__pyx_n_u_should_close, __pyx_k_should_close, sizeof(__pyx_k_should_close), 0, 1, 0, 1},
+ {&__pyx_n_s_streams, __pyx_k_streams, sizeof(__pyx_k_streams), 0, 0, 1, 1},
+ {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0},
+ {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
+ {&__pyx_n_s_throw, __pyx_k_throw, sizeof(__pyx_k_throw), 0, 0, 1, 1},
+ {&__pyx_n_s_timer, __pyx_k_timer, sizeof(__pyx_k_timer), 0, 0, 1, 1},
+ {&__pyx_kp_u_unknown, __pyx_k_unknown, sizeof(__pyx_k_unknown), 0, 1, 0, 0},
+ {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1},
+ {&__pyx_n_s_upgrade, __pyx_k_upgrade, sizeof(__pyx_k_upgrade), 0, 0, 1, 1},
+ {&__pyx_n_u_upgrade, __pyx_k_upgrade, sizeof(__pyx_k_upgrade), 0, 1, 0, 1},
+ {&__pyx_n_s_url, __pyx_k_url, sizeof(__pyx_k_url), 0, 0, 1, 1},
+ {&__pyx_n_u_url, __pyx_k_url, sizeof(__pyx_k_url), 0, 1, 0, 1},
+ {&__pyx_n_s_user, __pyx_k_user, sizeof(__pyx_k_user), 0, 0, 1, 1},
+ {&__pyx_n_s_version, __pyx_k_version, sizeof(__pyx_k_version), 0, 0, 1, 1},
+ {&__pyx_n_u_version, __pyx_k_version, sizeof(__pyx_k_version), 0, 1, 0, 1},
+ {&__pyx_n_s_yarl, __pyx_k_yarl, sizeof(__pyx_k_yarl), 0, 0, 1, 1},
+ {0, 0, 0, 0, 0, 0, 0}
+};
+static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
+ __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 87, __pyx_L1_error)
+ __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(0, 316, __pyx_L1_error)
+ __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error)
+ __pyx_builtin_BaseException = __Pyx_GetBuiltinName(__pyx_n_s_BaseException); if (!__pyx_builtin_BaseException) __PYX_ERR(0, 631, __pyx_L1_error)
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
+
+ /* "(tree fragment)":2
+ * def __reduce_cython__(self):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
+ * def __setstate_cython__(self, __pyx_state):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ */
+ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 2, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_tuple__5);
+ __Pyx_GIVEREF(__pyx_tuple__5);
+
+ /* "(tree fragment)":4
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ * def __setstate_cython__(self, __pyx_state):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
+ */
+ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 4, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_tuple__6);
+ __Pyx_GIVEREF(__pyx_tuple__6);
+
+ /* "(tree fragment)":2
+ * def __reduce_cython__(self):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
+ * def __setstate_cython__(self, __pyx_state):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ */
+ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 2, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_tuple__7);
+ __Pyx_GIVEREF(__pyx_tuple__7);
+
+ /* "(tree fragment)":4
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ * def __setstate_cython__(self, __pyx_state):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
+ */
+ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 4, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_tuple__8);
+ __Pyx_GIVEREF(__pyx_tuple__8);
+
+ /* "(tree fragment)":2
+ * def __reduce_cython__(self):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
+ * def __setstate_cython__(self, __pyx_state):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ */
+ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 2, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_tuple__9);
+ __Pyx_GIVEREF(__pyx_tuple__9);
+
+ /* "(tree fragment)":4
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__")
+ * def __setstate_cython__(self, __pyx_state):
+ * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
+ */
+ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 4, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_tuple__10);
+ __Pyx_GIVEREF(__pyx_tuple__10);
+
+ /* "aiohttp/_http_parser.pyx":57
+ * char* PyByteArray_AsString(object)
+ *
+ * __all__ = ('HttpRequestParser', 'HttpResponseParser', # <<<<<<<<<<<<<<
+ * 'RawRequestMessage', 'RawResponseMessage')
+ *
+ */
+ __pyx_tuple__12 = PyTuple_Pack(4, __pyx_n_u_HttpRequestParser, __pyx_n_u_HttpResponseParser, __pyx_n_u_RawRequestMessage_2, __pyx_n_u_RawResponseMessage_2); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(0, 57, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_tuple__12);
+ __Pyx_GIVEREF(__pyx_tuple__12);
+
+ /* "aiohttp/_http_parser.pyx":785
+ *
+ *
+ * def parse_url(url): # <<<<<<<<<<<<<<
+ * cdef:
+ * Py_buffer py_buf
+ */
+ __pyx_tuple__13 = PyTuple_Pack(3, __pyx_n_s_url, __pyx_n_s_py_buf, __pyx_n_s_buf_data); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(0, 785, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_tuple__13);
+ __Pyx_GIVEREF(__pyx_tuple__13);
+ __pyx_codeobj__14 = (PyObject*)__Pyx_PyCode_New(1, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__13, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_aiohttp__http_parser_pyx, __pyx_n_s_parse_url, 785, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__14)) __PYX_ERR(0, 785, __pyx_L1_error)
+
+ /* "(tree fragment)":1
+ * def __pyx_unpickle_RawRequestMessage(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ */
+ __pyx_tuple__15 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_tuple__15);
+ __Pyx_GIVEREF(__pyx_tuple__15);
+ __pyx_codeobj__16 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__15, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_RawRequestMessage, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__16)) __PYX_ERR(1, 1, __pyx_L1_error)
+ __pyx_tuple__17 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_tuple__17);
+ __Pyx_GIVEREF(__pyx_tuple__17);
+ __pyx_codeobj__18 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__17, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_RawResponseMessag, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__18)) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_RefNannyFinishContext();
+ return 0;
+ __pyx_L1_error:;
+ __Pyx_RefNannyFinishContext();
+ return -1;
+}
+
+static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
+ __pyx_umethod_PyUnicode_Type_partition.type = (PyObject*)&PyUnicode_Type;
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
+ __pyx_int_21004882 = PyInt_FromLong(21004882L); if (unlikely(!__pyx_int_21004882)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_int_209127132 = PyInt_FromLong(209127132L); if (unlikely(!__pyx_int_209127132)) __PYX_ERR(0, 1, __pyx_L1_error)
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
+
+static int __Pyx_modinit_global_init_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
+ /*--- Global init code ---*/
+ __pyx_v_7aiohttp_12_http_parser_headers = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ __pyx_v_7aiohttp_12_http_parser_URL = Py_None; Py_INCREF(Py_None);
+ __pyx_v_7aiohttp_12_http_parser_URL_build = Py_None; Py_INCREF(Py_None);
+ __pyx_v_7aiohttp_12_http_parser_CIMultiDict = Py_None; Py_INCREF(Py_None);
+ __pyx_v_7aiohttp_12_http_parser_CIMultiDictProxy = Py_None; Py_INCREF(Py_None);
+ __pyx_v_7aiohttp_12_http_parser_HttpVersion = Py_None; Py_INCREF(Py_None);
+ __pyx_v_7aiohttp_12_http_parser_HttpVersion10 = Py_None; Py_INCREF(Py_None);
+ __pyx_v_7aiohttp_12_http_parser_HttpVersion11 = Py_None; Py_INCREF(Py_None);
+ __pyx_v_7aiohttp_12_http_parser_SEC_WEBSOCKET_KEY1 = Py_None; Py_INCREF(Py_None);
+ __pyx_v_7aiohttp_12_http_parser_CONTENT_ENCODING = Py_None; Py_INCREF(Py_None);
+ __pyx_v_7aiohttp_12_http_parser_EMPTY_PAYLOAD = Py_None; Py_INCREF(Py_None);
+ __pyx_v_7aiohttp_12_http_parser_StreamReader = Py_None; Py_INCREF(Py_None);
+ __pyx_v_7aiohttp_12_http_parser_DeflateBuffer = Py_None; Py_INCREF(Py_None);
+ __pyx_v_7aiohttp_12_http_parser__http_method = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_variable_export_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
+ /*--- Variable export code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_function_export_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
+ /*--- Function export code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_type_init_code(void) {
+ __Pyx_RefNannyDeclarations
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
+ /*--- Type init code ---*/
+ if (PyType_Ready(&__pyx_type_7aiohttp_12_http_parser_RawRequestMessage) < 0) __PYX_ERR(0, 110, __pyx_L1_error)
+ #if PY_VERSION_HEX < 0x030800B1
+ __pyx_type_7aiohttp_12_http_parser_RawRequestMessage.tp_print = 0;
+ #endif
+ if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_7aiohttp_12_http_parser_RawRequestMessage.tp_dictoffset && __pyx_type_7aiohttp_12_http_parser_RawRequestMessage.tp_getattro == PyObject_GenericGetAttr)) {
+ __pyx_type_7aiohttp_12_http_parser_RawRequestMessage.tp_getattro = __Pyx_PyObject_GenericGetAttr;
+ }
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s_RawRequestMessage_2, (PyObject *)&__pyx_type_7aiohttp_12_http_parser_RawRequestMessage) < 0) __PYX_ERR(0, 110, __pyx_L1_error)
+ if (__Pyx_setup_reduce((PyObject*)&__pyx_type_7aiohttp_12_http_parser_RawRequestMessage) < 0) __PYX_ERR(0, 110, __pyx_L1_error)
+ __pyx_ptype_7aiohttp_12_http_parser_RawRequestMessage = &__pyx_type_7aiohttp_12_http_parser_RawRequestMessage;
+ if (PyType_Ready(&__pyx_type_7aiohttp_12_http_parser_RawResponseMessage) < 0) __PYX_ERR(0, 210, __pyx_L1_error)
+ #if PY_VERSION_HEX < 0x030800B1
+ __pyx_type_7aiohttp_12_http_parser_RawResponseMessage.tp_print = 0;
+ #endif
+ if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_7aiohttp_12_http_parser_RawResponseMessage.tp_dictoffset && __pyx_type_7aiohttp_12_http_parser_RawResponseMessage.tp_getattro == PyObject_GenericGetAttr)) {
+ __pyx_type_7aiohttp_12_http_parser_RawResponseMessage.tp_getattro = __Pyx_PyObject_GenericGetAttr;
+ }
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s_RawResponseMessage_2, (PyObject *)&__pyx_type_7aiohttp_12_http_parser_RawResponseMessage) < 0) __PYX_ERR(0, 210, __pyx_L1_error)
+ if (__Pyx_setup_reduce((PyObject*)&__pyx_type_7aiohttp_12_http_parser_RawResponseMessage) < 0) __PYX_ERR(0, 210, __pyx_L1_error)
+ __pyx_ptype_7aiohttp_12_http_parser_RawResponseMessage = &__pyx_type_7aiohttp_12_http_parser_RawResponseMessage;
+ __pyx_vtabptr_7aiohttp_12_http_parser_HttpParser = &__pyx_vtable_7aiohttp_12_http_parser_HttpParser;
+ __pyx_vtable_7aiohttp_12_http_parser_HttpParser._init = (PyObject *(*)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *, enum http_parser_type, PyObject *, PyObject *, int, struct __pyx_opt_args_7aiohttp_12_http_parser_10HttpParser__init *__pyx_optional_args))__pyx_f_7aiohttp_12_http_parser_10HttpParser__init;
+ __pyx_vtable_7aiohttp_12_http_parser_HttpParser._process_header = (PyObject *(*)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *))__pyx_f_7aiohttp_12_http_parser_10HttpParser__process_header;
+ __pyx_vtable_7aiohttp_12_http_parser_HttpParser._on_header_field = (PyObject *(*)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *, char *, size_t))__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_header_field;
+ __pyx_vtable_7aiohttp_12_http_parser_HttpParser._on_header_value = (PyObject *(*)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *, char *, size_t))__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_header_value;
+ __pyx_vtable_7aiohttp_12_http_parser_HttpParser._on_headers_complete = (PyObject *(*)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *))__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_headers_complete;
+ __pyx_vtable_7aiohttp_12_http_parser_HttpParser._on_message_complete = (PyObject *(*)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *))__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_message_complete;
+ __pyx_vtable_7aiohttp_12_http_parser_HttpParser._on_chunk_header = (PyObject *(*)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *))__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_chunk_header;
+ __pyx_vtable_7aiohttp_12_http_parser_HttpParser._on_chunk_complete = (PyObject *(*)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *))__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_chunk_complete;
+ __pyx_vtable_7aiohttp_12_http_parser_HttpParser._on_status_complete = (PyObject *(*)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *))__pyx_f_7aiohttp_12_http_parser_10HttpParser__on_status_complete;
+ __pyx_vtable_7aiohttp_12_http_parser_HttpParser.http_version = (PyObject *(*)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *))__pyx_f_7aiohttp_12_http_parser_10HttpParser_http_version;
+ if (PyType_Ready(&__pyx_type_7aiohttp_12_http_parser_HttpParser) < 0) __PYX_ERR(0, 272, __pyx_L1_error)
+ #if PY_VERSION_HEX < 0x030800B1
+ __pyx_type_7aiohttp_12_http_parser_HttpParser.tp_print = 0;
+ #endif
+ if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_7aiohttp_12_http_parser_HttpParser.tp_dictoffset && __pyx_type_7aiohttp_12_http_parser_HttpParser.tp_getattro == PyObject_GenericGetAttr)) {
+ __pyx_type_7aiohttp_12_http_parser_HttpParser.tp_getattro = __Pyx_PyObject_GenericGetAttr;
+ }
+ if (__Pyx_SetVtable(__pyx_type_7aiohttp_12_http_parser_HttpParser.tp_dict, __pyx_vtabptr_7aiohttp_12_http_parser_HttpParser) < 0) __PYX_ERR(0, 272, __pyx_L1_error)
+ if (__Pyx_setup_reduce((PyObject*)&__pyx_type_7aiohttp_12_http_parser_HttpParser) < 0) __PYX_ERR(0, 272, __pyx_L1_error)
+ __pyx_ptype_7aiohttp_12_http_parser_HttpParser = &__pyx_type_7aiohttp_12_http_parser_HttpParser;
+ __pyx_vtabptr_7aiohttp_12_http_parser_HttpRequestParser = &__pyx_vtable_7aiohttp_12_http_parser_HttpRequestParser;
+ __pyx_vtable_7aiohttp_12_http_parser_HttpRequestParser.__pyx_base = *__pyx_vtabptr_7aiohttp_12_http_parser_HttpParser;
+ __pyx_vtable_7aiohttp_12_http_parser_HttpRequestParser.__pyx_base._on_status_complete = (PyObject *(*)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *))__pyx_f_7aiohttp_12_http_parser_17HttpRequestParser__on_status_complete;
+ __pyx_type_7aiohttp_12_http_parser_HttpRequestParser.tp_base = __pyx_ptype_7aiohttp_12_http_parser_HttpParser;
+ if (PyType_Ready(&__pyx_type_7aiohttp_12_http_parser_HttpRequestParser) < 0) __PYX_ERR(0, 563, __pyx_L1_error)
+ #if PY_VERSION_HEX < 0x030800B1
+ __pyx_type_7aiohttp_12_http_parser_HttpRequestParser.tp_print = 0;
+ #endif
+ if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_7aiohttp_12_http_parser_HttpRequestParser.tp_dictoffset && __pyx_type_7aiohttp_12_http_parser_HttpRequestParser.tp_getattro == PyObject_GenericGetAttr)) {
+ __pyx_type_7aiohttp_12_http_parser_HttpRequestParser.tp_getattro = __Pyx_PyObject_GenericGetAttr;
+ }
+ if (__Pyx_SetVtable(__pyx_type_7aiohttp_12_http_parser_HttpRequestParser.tp_dict, __pyx_vtabptr_7aiohttp_12_http_parser_HttpRequestParser) < 0) __PYX_ERR(0, 563, __pyx_L1_error)
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s_HttpRequestParser, (PyObject *)&__pyx_type_7aiohttp_12_http_parser_HttpRequestParser) < 0) __PYX_ERR(0, 563, __pyx_L1_error)
+ if (__Pyx_setup_reduce((PyObject*)&__pyx_type_7aiohttp_12_http_parser_HttpRequestParser) < 0) __PYX_ERR(0, 563, __pyx_L1_error)
+ __pyx_ptype_7aiohttp_12_http_parser_HttpRequestParser = &__pyx_type_7aiohttp_12_http_parser_HttpRequestParser;
+ __pyx_vtabptr_7aiohttp_12_http_parser_HttpResponseParser = &__pyx_vtable_7aiohttp_12_http_parser_HttpResponseParser;
+ __pyx_vtable_7aiohttp_12_http_parser_HttpResponseParser.__pyx_base = *__pyx_vtabptr_7aiohttp_12_http_parser_HttpParser;
+ __pyx_vtable_7aiohttp_12_http_parser_HttpResponseParser.__pyx_base._on_status_complete = (PyObject *(*)(struct __pyx_obj_7aiohttp_12_http_parser_HttpParser *))__pyx_f_7aiohttp_12_http_parser_18HttpResponseParser__on_status_complete;
+ __pyx_type_7aiohttp_12_http_parser_HttpResponseParser.tp_base = __pyx_ptype_7aiohttp_12_http_parser_HttpParser;
+ if (PyType_Ready(&__pyx_type_7aiohttp_12_http_parser_HttpResponseParser) < 0) __PYX_ERR(0, 591, __pyx_L1_error)
+ #if PY_VERSION_HEX < 0x030800B1
+ __pyx_type_7aiohttp_12_http_parser_HttpResponseParser.tp_print = 0;
+ #endif
+ if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_7aiohttp_12_http_parser_HttpResponseParser.tp_dictoffset && __pyx_type_7aiohttp_12_http_parser_HttpResponseParser.tp_getattro == PyObject_GenericGetAttr)) {
+ __pyx_type_7aiohttp_12_http_parser_HttpResponseParser.tp_getattro = __Pyx_PyObject_GenericGetAttr;
+ }
+ if (__Pyx_SetVtable(__pyx_type_7aiohttp_12_http_parser_HttpResponseParser.tp_dict, __pyx_vtabptr_7aiohttp_12_http_parser_HttpResponseParser) < 0) __PYX_ERR(0, 591, __pyx_L1_error)
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s_HttpResponseParser, (PyObject *)&__pyx_type_7aiohttp_12_http_parser_HttpResponseParser) < 0) __PYX_ERR(0, 591, __pyx_L1_error)
+ if (__Pyx_setup_reduce((PyObject*)&__pyx_type_7aiohttp_12_http_parser_HttpResponseParser) < 0) __PYX_ERR(0, 591, __pyx_L1_error)
+ __pyx_ptype_7aiohttp_12_http_parser_HttpResponseParser = &__pyx_type_7aiohttp_12_http_parser_HttpResponseParser;
+ if (PyType_Ready(&__pyx_type_7aiohttp_12_http_parser___pyx_scope_struct____repr__) < 0) __PYX_ERR(0, 135, __pyx_L1_error)
+ #if PY_VERSION_HEX < 0x030800B1
+ __pyx_type_7aiohttp_12_http_parser___pyx_scope_struct____repr__.tp_print = 0;
+ #endif
+ if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_7aiohttp_12_http_parser___pyx_scope_struct____repr__.tp_dictoffset && __pyx_type_7aiohttp_12_http_parser___pyx_scope_struct____repr__.tp_getattro == PyObject_GenericGetAttr)) {
+ __pyx_type_7aiohttp_12_http_parser___pyx_scope_struct____repr__.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
+ }
+ __pyx_ptype_7aiohttp_12_http_parser___pyx_scope_struct____repr__ = &__pyx_type_7aiohttp_12_http_parser___pyx_scope_struct____repr__;
+ if (PyType_Ready(&__pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr) < 0) __PYX_ERR(0, 147, __pyx_L1_error)
+ #if PY_VERSION_HEX < 0x030800B1
+ __pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr.tp_print = 0;
+ #endif
+ if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr.tp_dictoffset && __pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr.tp_getattro == PyObject_GenericGetAttr)) {
+ __pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
+ }
+ __pyx_ptype_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr = &__pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_1_genexpr;
+ if (PyType_Ready(&__pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__) < 0) __PYX_ERR(0, 233, __pyx_L1_error)
+ #if PY_VERSION_HEX < 0x030800B1
+ __pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__.tp_print = 0;
+ #endif
+ if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__.tp_dictoffset && __pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__.tp_getattro == PyObject_GenericGetAttr)) {
+ __pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
+ }
+ __pyx_ptype_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__ = &__pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_2___repr__;
+ if (PyType_Ready(&__pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr) < 0) __PYX_ERR(0, 244, __pyx_L1_error)
+ #if PY_VERSION_HEX < 0x030800B1
+ __pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr.tp_print = 0;
+ #endif
+ if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr.tp_dictoffset && __pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr.tp_getattro == PyObject_GenericGetAttr)) {
+ __pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
+ }
+ __pyx_ptype_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr = &__pyx_type_7aiohttp_12_http_parser___pyx_scope_struct_3_genexpr;
+ __Pyx_RefNannyFinishContext();
+ return 0;
+ __pyx_L1_error:;
+ __Pyx_RefNannyFinishContext();
+ return -1;
+}
+
+static int __Pyx_modinit_type_import_code(void) {
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
+ /*--- Type import code ---*/
+ __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 9, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type",
+ #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000
+ sizeof(PyTypeObject),
+ #else
+ sizeof(PyHeapTypeObject),
+ #endif
+ __Pyx_ImportType_CheckSize_Warn);
+ if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(2, 9, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 8, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), __Pyx_ImportType_CheckSize_Warn);
+ if (!__pyx_ptype_7cpython_4bool_bool) __PYX_ERR(3, 8, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(4, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), __Pyx_ImportType_CheckSize_Warn);
+ if (!__pyx_ptype_7cpython_7complex_complex) __PYX_ERR(4, 15, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_RefNannyFinishContext();
+ return 0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_RefNannyFinishContext();
+ return -1;
+}
+
+static int __Pyx_modinit_variable_import_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
+ /*--- Variable import code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_function_import_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
+ /*--- Function import code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+
+#ifndef CYTHON_NO_PYINIT_EXPORT
+#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
+#elif PY_MAJOR_VERSION < 3
+#ifdef __cplusplus
+#define __Pyx_PyMODINIT_FUNC extern "C" void
+#else
+#define __Pyx_PyMODINIT_FUNC void
+#endif
+#else
+#ifdef __cplusplus
+#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
+#else
+#define __Pyx_PyMODINIT_FUNC PyObject *
+#endif
+#endif
+
+
+#if PY_MAJOR_VERSION < 3
+__Pyx_PyMODINIT_FUNC init_http_parser(void) CYTHON_SMALL_CODE; /*proto*/
+__Pyx_PyMODINIT_FUNC init_http_parser(void)
+#else
+__Pyx_PyMODINIT_FUNC PyInit__http_parser(void) CYTHON_SMALL_CODE; /*proto*/
+__Pyx_PyMODINIT_FUNC PyInit__http_parser(void)
+#if CYTHON_PEP489_MULTI_PHASE_INIT
+{
+ return PyModuleDef_Init(&__pyx_moduledef);
+}
+static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
+ #if PY_VERSION_HEX >= 0x030700A1
+ static PY_INT64_T main_interpreter_id = -1;
+ PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
+ if (main_interpreter_id == -1) {
+ main_interpreter_id = current_id;
+ return (unlikely(current_id == -1)) ? -1 : 0;
+ } else if (unlikely(main_interpreter_id != current_id))
+ #else
+ static PyInterpreterState *main_interpreter = NULL;
+ PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
+ if (!main_interpreter) {
+ main_interpreter = current_interpreter;
+ } else if (unlikely(main_interpreter != current_interpreter))
+ #endif
+ {
+ PyErr_SetString(
+ PyExc_ImportError,
+ "Interpreter change detected - this module can only be loaded into one interpreter per process.");
+ return -1;
+ }
+ return 0;
+}
+static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
+ PyObject *value = PyObject_GetAttrString(spec, from_name);
+ int result = 0;
+ if (likely(value)) {
+ if (allow_none || value != Py_None) {
+ result = PyDict_SetItemString(moddict, to_name, value);
+ }
+ Py_DECREF(value);
+ } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
+ PyErr_Clear();
+ } else {
+ result = -1;
+ }
+ return result;
+}
+static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
+ PyObject *module = NULL, *moddict, *modname;
+ if (__Pyx_check_single_interpreter())
+ return NULL;
+ if (__pyx_m)
+ return __Pyx_NewRef(__pyx_m);
+ modname = PyObject_GetAttrString(spec, "name");
+ if (unlikely(!modname)) goto bad;
+ module = PyModule_NewObject(modname);
+ Py_DECREF(modname);
+ if (unlikely(!module)) goto bad;
+ moddict = PyModule_GetDict(module);
+ if (unlikely(!moddict)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
+ return module;
+bad:
+ Py_XDECREF(module);
+ return NULL;
+}
+
+
+static CYTHON_SMALL_CODE int __pyx_pymod_exec__http_parser(PyObject *__pyx_pyinit_module)
+#endif
+#endif
+{
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *__pyx_t_8 = NULL;
+ PyObject *__pyx_t_9 = NULL;
+ PyObject *__pyx_t_10 = NULL;
+ PyObject *__pyx_t_11 = NULL;
+ PyObject *__pyx_t_12 = NULL;
+ PyObject *__pyx_t_13 = NULL;
+ PyObject *__pyx_t_14 = NULL;
+ PyObject *__pyx_t_15 = NULL;
+ PyObject *__pyx_t_16 = NULL;
+ PyObject *__pyx_t_17 = NULL;
+ PyObject *__pyx_t_18 = NULL;
+ PyObject *__pyx_t_19 = NULL;
+ PyObject *__pyx_t_20 = NULL;
+ PyObject *__pyx_t_21 = NULL;
+ PyObject *__pyx_t_22 = NULL;
+ PyObject *__pyx_t_23 = NULL;
+ PyObject *__pyx_t_24 = NULL;
+ PyObject *__pyx_t_25 = NULL;
+ PyObject *__pyx_t_26 = NULL;
+ PyObject *__pyx_t_27 = NULL;
+ PyObject *__pyx_t_28 = NULL;
+ PyObject *__pyx_t_29 = NULL;
+ PyObject *__pyx_t_30 = NULL;
+ PyObject *__pyx_t_31 = NULL;
+ PyObject *__pyx_t_32 = NULL;
+ PyObject *__pyx_t_33 = NULL;
+ PyObject *__pyx_t_34 = NULL;
+ PyObject *__pyx_t_35 = NULL;
+ PyObject *__pyx_t_36 = NULL;
+ PyObject *__pyx_t_37 = NULL;
+ PyObject *__pyx_t_38 = NULL;
+ PyObject *__pyx_t_39 = NULL;
+ PyObject *__pyx_t_40 = NULL;
+ PyObject *__pyx_t_41 = NULL;
+ PyObject *__pyx_t_42 = NULL;
+ PyObject *__pyx_t_43 = NULL;
+ PyObject *__pyx_t_44 = NULL;
+ PyObject *__pyx_t_45 = NULL;
+ PyObject *__pyx_t_46 = NULL;
+ PyObject *__pyx_t_47 = NULL;
+ PyObject *__pyx_t_48 = NULL;
+ PyObject *__pyx_t_49 = NULL;
+ PyObject *__pyx_t_50 = NULL;
+ PyObject *__pyx_t_51 = NULL;
+ PyObject *__pyx_t_52 = NULL;
+ PyObject *__pyx_t_53 = NULL;
+ PyObject *__pyx_t_54 = NULL;
+ PyObject *__pyx_t_55 = NULL;
+ PyObject *__pyx_t_56 = NULL;
+ PyObject *__pyx_t_57 = NULL;
+ PyObject *__pyx_t_58 = NULL;
+ PyObject *__pyx_t_59 = NULL;
+ PyObject *__pyx_t_60 = NULL;
+ PyObject *__pyx_t_61 = NULL;
+ PyObject *__pyx_t_62 = NULL;
+ PyObject *__pyx_t_63 = NULL;
+ PyObject *__pyx_t_64 = NULL;
+ PyObject *__pyx_t_65 = NULL;
+ PyObject *__pyx_t_66 = NULL;
+ PyObject *__pyx_t_67 = NULL;
+ PyObject *__pyx_t_68 = NULL;
+ PyObject *__pyx_t_69 = NULL;
+ PyObject *__pyx_t_70 = NULL;
+ PyObject *__pyx_t_71 = NULL;
+ PyObject *__pyx_t_72 = NULL;
+ PyObject *__pyx_t_73 = NULL;
+ PyObject *__pyx_t_74 = NULL;
+ PyObject *__pyx_t_75 = NULL;
+ PyObject *__pyx_t_76 = NULL;
+ PyObject *__pyx_t_77 = NULL;
+ PyObject *__pyx_t_78 = NULL;
+ long __pyx_t_79;
+ enum http_method __pyx_t_80;
+ char const *__pyx_t_81;
+ int __pyx_t_82;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannyDeclarations
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ if (__pyx_m) {
+ if (__pyx_m == __pyx_pyinit_module) return 0;
+ PyErr_SetString(PyExc_RuntimeError, "Module '_http_parser' has already been imported. Re-initialisation is not supported.");
+ return -1;
+ }
+ #elif PY_MAJOR_VERSION >= 3
+ if (__pyx_m) return __Pyx_NewRef(__pyx_m);
+ #endif
+ #if CYTHON_REFNANNY
+__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
+if (!__Pyx_RefNanny) {
+ PyErr_Clear();
+ __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
+ if (!__Pyx_RefNanny)
+ Py_FatalError("failed to import 'refnanny' module");
+}
+#endif
+ __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit__http_parser(void)", 0);
+ if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #ifdef __Pxy_PyFrame_Initialize_Offsets
+ __Pxy_PyFrame_Initialize_Offsets();
+ #endif
+ __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
+ #ifdef __Pyx_CyFunction_USED
+ if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_FusedFunction_USED
+ if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_Coroutine_USED
+ if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_Generator_USED
+ if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_AsyncGen_USED
+ if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_StopAsyncIteration_USED
+ if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ /*--- Library function declarations ---*/
+ /*--- Threads initialization code ---*/
+ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
+ #ifdef WITH_THREAD /* Python build with threading support? */
+ PyEval_InitThreads();
+ #endif
+ #endif
+ /*--- Module creation code ---*/
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ __pyx_m = __pyx_pyinit_module;
+ Py_INCREF(__pyx_m);
+ #else
+ #if PY_MAJOR_VERSION < 3
+ __pyx_m = Py_InitModule4("_http_parser", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
+ #else
+ __pyx_m = PyModule_Create(&__pyx_moduledef);
+ #endif
+ if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
+ Py_INCREF(__pyx_d);
+ __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
+ Py_INCREF(__pyx_b);
+ __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
+ Py_INCREF(__pyx_cython_runtime);
+ if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
+ /*--- Initialize various global constants etc. ---*/
+ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
+ if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ if (__pyx_module_is_main_aiohttp___http_parser) {
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ }
+ #if PY_MAJOR_VERSION >= 3
+ {
+ PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
+ if (!PyDict_GetItemString(modules, "aiohttp._http_parser")) {
+ if (unlikely(PyDict_SetItemString(modules, "aiohttp._http_parser", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
+ }
+ }
+ #endif
+ /*--- Builtin init code ---*/
+ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ /*--- Constants init code ---*/
+ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ /*--- Global type/function init code ---*/
+ (void)__Pyx_modinit_global_init_code();
+ (void)__Pyx_modinit_variable_export_code();
+ (void)__Pyx_modinit_function_export_code();
+ if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
+ if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
+ (void)__Pyx_modinit_variable_import_code();
+ (void)__Pyx_modinit_function_import_code();
+ /*--- Execution code ---*/
+ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
+ if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+
+ /* "aiohttp/_http_parser.pyx":19
+ * from libc.string cimport memcpy
+ *
+ * from multidict import CIMultiDict as _CIMultiDict, CIMultiDictProxy as _CIMultiDictProxy # <<<<<<<<<<<<<<
+ * from yarl import URL as _URL
+ *
+ */
+ __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_n_s_CIMultiDict);
+ __Pyx_GIVEREF(__pyx_n_s_CIMultiDict);
+ PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_CIMultiDict);
+ __Pyx_INCREF(__pyx_n_s_CIMultiDictProxy);
+ __Pyx_GIVEREF(__pyx_n_s_CIMultiDictProxy);
+ PyList_SET_ITEM(__pyx_t_1, 1, __pyx_n_s_CIMultiDictProxy);
+ __pyx_t_2 = __Pyx_Import(__pyx_n_s_multidict, __pyx_t_1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_CIMultiDict); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_CIMultiDict_2, __pyx_t_1) < 0) __PYX_ERR(0, 19, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_CIMultiDictProxy); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_CIMultiDictProxy_2, __pyx_t_1) < 0) __PYX_ERR(0, 19, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_parser.pyx":20
+ *
+ * from multidict import CIMultiDict as _CIMultiDict, CIMultiDictProxy as _CIMultiDictProxy
+ * from yarl import URL as _URL # <<<<<<<<<<<<<<
+ *
+ * from aiohttp import hdrs
+ */
+ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_n_s_URL);
+ __Pyx_GIVEREF(__pyx_n_s_URL);
+ PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_URL);
+ __pyx_t_1 = __Pyx_Import(__pyx_n_s_yarl, __pyx_t_2, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_URL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 20, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_URL_2, __pyx_t_2) < 0) __PYX_ERR(0, 20, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":22
+ * from yarl import URL as _URL
+ *
+ * from aiohttp import hdrs # <<<<<<<<<<<<<<
+ *
+ * from .http_exceptions import (
+ */
+ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_n_s_hdrs);
+ __Pyx_GIVEREF(__pyx_n_s_hdrs);
+ PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_hdrs);
+ __pyx_t_2 = __Pyx_Import(__pyx_n_s_aiohttp, __pyx_t_1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 22, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_hdrs, __pyx_t_1) < 0) __PYX_ERR(0, 22, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_parser.pyx":25
+ *
+ * from .http_exceptions import (
+ * BadHttpMessage, # <<<<<<<<<<<<<<
+ * BadStatusLine,
+ * ContentLengthError,
+ */
+ __pyx_t_2 = PyList_New(8); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 25, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_n_s_BadHttpMessage);
+ __Pyx_GIVEREF(__pyx_n_s_BadHttpMessage);
+ PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_BadHttpMessage);
+ __Pyx_INCREF(__pyx_n_s_BadStatusLine);
+ __Pyx_GIVEREF(__pyx_n_s_BadStatusLine);
+ PyList_SET_ITEM(__pyx_t_2, 1, __pyx_n_s_BadStatusLine);
+ __Pyx_INCREF(__pyx_n_s_ContentLengthError);
+ __Pyx_GIVEREF(__pyx_n_s_ContentLengthError);
+ PyList_SET_ITEM(__pyx_t_2, 2, __pyx_n_s_ContentLengthError);
+ __Pyx_INCREF(__pyx_n_s_InvalidHeader);
+ __Pyx_GIVEREF(__pyx_n_s_InvalidHeader);
+ PyList_SET_ITEM(__pyx_t_2, 3, __pyx_n_s_InvalidHeader);
+ __Pyx_INCREF(__pyx_n_s_InvalidURLError);
+ __Pyx_GIVEREF(__pyx_n_s_InvalidURLError);
+ PyList_SET_ITEM(__pyx_t_2, 4, __pyx_n_s_InvalidURLError);
+ __Pyx_INCREF(__pyx_n_s_LineTooLong);
+ __Pyx_GIVEREF(__pyx_n_s_LineTooLong);
+ PyList_SET_ITEM(__pyx_t_2, 5, __pyx_n_s_LineTooLong);
+ __Pyx_INCREF(__pyx_n_s_PayloadEncodingError);
+ __Pyx_GIVEREF(__pyx_n_s_PayloadEncodingError);
+ PyList_SET_ITEM(__pyx_t_2, 6, __pyx_n_s_PayloadEncodingError);
+ __Pyx_INCREF(__pyx_n_s_TransferEncodingError);
+ __Pyx_GIVEREF(__pyx_n_s_TransferEncodingError);
+ PyList_SET_ITEM(__pyx_t_2, 7, __pyx_n_s_TransferEncodingError);
+
+ /* "aiohttp/_http_parser.pyx":24
+ * from aiohttp import hdrs
+ *
+ * from .http_exceptions import ( # <<<<<<<<<<<<<<
+ * BadHttpMessage,
+ * BadStatusLine,
+ */
+ __pyx_t_1 = __Pyx_Import(__pyx_n_s_http_exceptions, __pyx_t_2, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_BadHttpMessage); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_BadHttpMessage, __pyx_t_2) < 0) __PYX_ERR(0, 25, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_BadStatusLine); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_BadStatusLine, __pyx_t_2) < 0) __PYX_ERR(0, 26, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_ContentLengthError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_ContentLengthError, __pyx_t_2) < 0) __PYX_ERR(0, 27, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_InvalidHeader); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_InvalidHeader, __pyx_t_2) < 0) __PYX_ERR(0, 28, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_InvalidURLError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_InvalidURLError, __pyx_t_2) < 0) __PYX_ERR(0, 29, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_LineTooLong); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_LineTooLong, __pyx_t_2) < 0) __PYX_ERR(0, 30, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_PayloadEncodingError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_PayloadEncodingError, __pyx_t_2) < 0) __PYX_ERR(0, 31, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_TransferEncodingError); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_TransferEncodingError, __pyx_t_2) < 0) __PYX_ERR(0, 32, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":34
+ * TransferEncodingError,
+ * )
+ * from .http_parser import DeflateBuffer as _DeflateBuffer # <<<<<<<<<<<<<<
+ * from .http_writer import (
+ * HttpVersion as _HttpVersion,
+ */
+ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 34, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_n_s_DeflateBuffer);
+ __Pyx_GIVEREF(__pyx_n_s_DeflateBuffer);
+ PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_DeflateBuffer);
+ __pyx_t_2 = __Pyx_Import(__pyx_n_s_http_parser, __pyx_t_1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 34, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_DeflateBuffer); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 34, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_DeflateBuffer_2, __pyx_t_1) < 0) __PYX_ERR(0, 34, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_parser.pyx":36
+ * from .http_parser import DeflateBuffer as _DeflateBuffer
+ * from .http_writer import (
+ * HttpVersion as _HttpVersion, # <<<<<<<<<<<<<<
+ * HttpVersion10 as _HttpVersion10,
+ * HttpVersion11 as _HttpVersion11,
+ */
+ __pyx_t_2 = PyList_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 36, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_n_s_HttpVersion);
+ __Pyx_GIVEREF(__pyx_n_s_HttpVersion);
+ PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_HttpVersion);
+ __Pyx_INCREF(__pyx_n_s_HttpVersion10);
+ __Pyx_GIVEREF(__pyx_n_s_HttpVersion10);
+ PyList_SET_ITEM(__pyx_t_2, 1, __pyx_n_s_HttpVersion10);
+ __Pyx_INCREF(__pyx_n_s_HttpVersion11);
+ __Pyx_GIVEREF(__pyx_n_s_HttpVersion11);
+ PyList_SET_ITEM(__pyx_t_2, 2, __pyx_n_s_HttpVersion11);
+
+ /* "aiohttp/_http_parser.pyx":35
+ * )
+ * from .http_parser import DeflateBuffer as _DeflateBuffer
+ * from .http_writer import ( # <<<<<<<<<<<<<<
+ * HttpVersion as _HttpVersion,
+ * HttpVersion10 as _HttpVersion10,
+ */
+ __pyx_t_1 = __Pyx_Import(__pyx_n_s_http_writer, __pyx_t_2, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 35, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_HttpVersion); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 35, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_HttpVersion_2, __pyx_t_2) < 0) __PYX_ERR(0, 36, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_HttpVersion10); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 35, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_HttpVersion10_2, __pyx_t_2) < 0) __PYX_ERR(0, 37, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_HttpVersion11); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 35, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_HttpVersion11_2, __pyx_t_2) < 0) __PYX_ERR(0, 38, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":40
+ * HttpVersion11 as _HttpVersion11,
+ * )
+ * from .streams import EMPTY_PAYLOAD as _EMPTY_PAYLOAD, StreamReader as _StreamReader # <<<<<<<<<<<<<<
+ *
+ * cimport cython
+ */
+ __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 40, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_n_s_EMPTY_PAYLOAD);
+ __Pyx_GIVEREF(__pyx_n_s_EMPTY_PAYLOAD);
+ PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_EMPTY_PAYLOAD);
+ __Pyx_INCREF(__pyx_n_s_StreamReader);
+ __Pyx_GIVEREF(__pyx_n_s_StreamReader);
+ PyList_SET_ITEM(__pyx_t_1, 1, __pyx_n_s_StreamReader);
+ __pyx_t_2 = __Pyx_Import(__pyx_n_s_streams, __pyx_t_1, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 40, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_EMPTY_PAYLOAD); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 40, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_EMPTY_PAYLOAD_2, __pyx_t_1) < 0) __PYX_ERR(0, 40, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_StreamReader); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 40, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_StreamReader_2, __pyx_t_1) < 0) __PYX_ERR(0, 40, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "aiohttp/_headers.pxi":4
+ * # Run ./tools/gen.py to update it after the origin changing.
+ *
+ * from . import hdrs # <<<<<<<<<<<<<<
+ * cdef tuple headers = (
+ * hdrs.ACCEPT,
+ */
+ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(5, 4, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_n_s_hdrs);
+ __Pyx_GIVEREF(__pyx_n_s_hdrs);
+ PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_hdrs);
+ __pyx_t_1 = __Pyx_Import(__pyx_n_s__4, __pyx_t_2, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 4, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_2)) __PYX_ERR(5, 4, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_hdrs, __pyx_t_2) < 0) __PYX_ERR(5, 4, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":6
+ * from . import hdrs
+ * cdef tuple headers = (
+ * hdrs.ACCEPT, # <<<<<<<<<<<<<<
+ * hdrs.ACCEPT_CHARSET,
+ * hdrs.ACCEPT_ENCODING,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ACCEPT); if (unlikely(!__pyx_t_2)) __PYX_ERR(5, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":7
+ * cdef tuple headers = (
+ * hdrs.ACCEPT,
+ * hdrs.ACCEPT_CHARSET, # <<<<<<<<<<<<<<
+ * hdrs.ACCEPT_ENCODING,
+ * hdrs.ACCEPT_LANGUAGE,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 7, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ACCEPT_CHARSET); if (unlikely(!__pyx_t_3)) __PYX_ERR(5, 7, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":8
+ * hdrs.ACCEPT,
+ * hdrs.ACCEPT_CHARSET,
+ * hdrs.ACCEPT_ENCODING, # <<<<<<<<<<<<<<
+ * hdrs.ACCEPT_LANGUAGE,
+ * hdrs.ACCEPT_RANGES,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 8, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ACCEPT_ENCODING); if (unlikely(!__pyx_t_4)) __PYX_ERR(5, 8, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":9
+ * hdrs.ACCEPT_CHARSET,
+ * hdrs.ACCEPT_ENCODING,
+ * hdrs.ACCEPT_LANGUAGE, # <<<<<<<<<<<<<<
+ * hdrs.ACCEPT_RANGES,
+ * hdrs.ACCESS_CONTROL_ALLOW_CREDENTIALS,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 9, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ACCEPT_LANGUAGE); if (unlikely(!__pyx_t_5)) __PYX_ERR(5, 9, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":10
+ * hdrs.ACCEPT_ENCODING,
+ * hdrs.ACCEPT_LANGUAGE,
+ * hdrs.ACCEPT_RANGES, # <<<<<<<<<<<<<<
+ * hdrs.ACCESS_CONTROL_ALLOW_CREDENTIALS,
+ * hdrs.ACCESS_CONTROL_ALLOW_HEADERS,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 10, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ACCEPT_RANGES); if (unlikely(!__pyx_t_6)) __PYX_ERR(5, 10, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":11
+ * hdrs.ACCEPT_LANGUAGE,
+ * hdrs.ACCEPT_RANGES,
+ * hdrs.ACCESS_CONTROL_ALLOW_CREDENTIALS, # <<<<<<<<<<<<<<
+ * hdrs.ACCESS_CONTROL_ALLOW_HEADERS,
+ * hdrs.ACCESS_CONTROL_ALLOW_METHODS,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 11, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ACCESS_CONTROL_ALLOW_CREDENTIALS); if (unlikely(!__pyx_t_7)) __PYX_ERR(5, 11, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":12
+ * hdrs.ACCEPT_RANGES,
+ * hdrs.ACCESS_CONTROL_ALLOW_CREDENTIALS,
+ * hdrs.ACCESS_CONTROL_ALLOW_HEADERS, # <<<<<<<<<<<<<<
+ * hdrs.ACCESS_CONTROL_ALLOW_METHODS,
+ * hdrs.ACCESS_CONTROL_ALLOW_ORIGIN,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ACCESS_CONTROL_ALLOW_HEADERS); if (unlikely(!__pyx_t_8)) __PYX_ERR(5, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":13
+ * hdrs.ACCESS_CONTROL_ALLOW_CREDENTIALS,
+ * hdrs.ACCESS_CONTROL_ALLOW_HEADERS,
+ * hdrs.ACCESS_CONTROL_ALLOW_METHODS, # <<<<<<<<<<<<<<
+ * hdrs.ACCESS_CONTROL_ALLOW_ORIGIN,
+ * hdrs.ACCESS_CONTROL_EXPOSE_HEADERS,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ACCESS_CONTROL_ALLOW_METHODS); if (unlikely(!__pyx_t_9)) __PYX_ERR(5, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":14
+ * hdrs.ACCESS_CONTROL_ALLOW_HEADERS,
+ * hdrs.ACCESS_CONTROL_ALLOW_METHODS,
+ * hdrs.ACCESS_CONTROL_ALLOW_ORIGIN, # <<<<<<<<<<<<<<
+ * hdrs.ACCESS_CONTROL_EXPOSE_HEADERS,
+ * hdrs.ACCESS_CONTROL_MAX_AGE,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ACCESS_CONTROL_ALLOW_ORIGIN); if (unlikely(!__pyx_t_10)) __PYX_ERR(5, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_10);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":15
+ * hdrs.ACCESS_CONTROL_ALLOW_METHODS,
+ * hdrs.ACCESS_CONTROL_ALLOW_ORIGIN,
+ * hdrs.ACCESS_CONTROL_EXPOSE_HEADERS, # <<<<<<<<<<<<<<
+ * hdrs.ACCESS_CONTROL_MAX_AGE,
+ * hdrs.ACCESS_CONTROL_REQUEST_HEADERS,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ACCESS_CONTROL_EXPOSE_HEADERS); if (unlikely(!__pyx_t_11)) __PYX_ERR(5, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":16
+ * hdrs.ACCESS_CONTROL_ALLOW_ORIGIN,
+ * hdrs.ACCESS_CONTROL_EXPOSE_HEADERS,
+ * hdrs.ACCESS_CONTROL_MAX_AGE, # <<<<<<<<<<<<<<
+ * hdrs.ACCESS_CONTROL_REQUEST_HEADERS,
+ * hdrs.ACCESS_CONTROL_REQUEST_METHOD,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 16, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_12 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ACCESS_CONTROL_MAX_AGE); if (unlikely(!__pyx_t_12)) __PYX_ERR(5, 16, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":17
+ * hdrs.ACCESS_CONTROL_EXPOSE_HEADERS,
+ * hdrs.ACCESS_CONTROL_MAX_AGE,
+ * hdrs.ACCESS_CONTROL_REQUEST_HEADERS, # <<<<<<<<<<<<<<
+ * hdrs.ACCESS_CONTROL_REQUEST_METHOD,
+ * hdrs.AGE,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 17, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_13 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ACCESS_CONTROL_REQUEST_HEADERS); if (unlikely(!__pyx_t_13)) __PYX_ERR(5, 17, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":18
+ * hdrs.ACCESS_CONTROL_MAX_AGE,
+ * hdrs.ACCESS_CONTROL_REQUEST_HEADERS,
+ * hdrs.ACCESS_CONTROL_REQUEST_METHOD, # <<<<<<<<<<<<<<
+ * hdrs.AGE,
+ * hdrs.ALLOW,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 18, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_14 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ACCESS_CONTROL_REQUEST_METHOD); if (unlikely(!__pyx_t_14)) __PYX_ERR(5, 18, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_14);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":19
+ * hdrs.ACCESS_CONTROL_REQUEST_HEADERS,
+ * hdrs.ACCESS_CONTROL_REQUEST_METHOD,
+ * hdrs.AGE, # <<<<<<<<<<<<<<
+ * hdrs.ALLOW,
+ * hdrs.AUTHORIZATION,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 19, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_15 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_AGE); if (unlikely(!__pyx_t_15)) __PYX_ERR(5, 19, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_15);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":20
+ * hdrs.ACCESS_CONTROL_REQUEST_METHOD,
+ * hdrs.AGE,
+ * hdrs.ALLOW, # <<<<<<<<<<<<<<
+ * hdrs.AUTHORIZATION,
+ * hdrs.CACHE_CONTROL,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 20, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_16 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ALLOW); if (unlikely(!__pyx_t_16)) __PYX_ERR(5, 20, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_16);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":21
+ * hdrs.AGE,
+ * hdrs.ALLOW,
+ * hdrs.AUTHORIZATION, # <<<<<<<<<<<<<<
+ * hdrs.CACHE_CONTROL,
+ * hdrs.CONNECTION,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 21, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_17 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_AUTHORIZATION); if (unlikely(!__pyx_t_17)) __PYX_ERR(5, 21, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_17);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":22
+ * hdrs.ALLOW,
+ * hdrs.AUTHORIZATION,
+ * hdrs.CACHE_CONTROL, # <<<<<<<<<<<<<<
+ * hdrs.CONNECTION,
+ * hdrs.CONTENT_DISPOSITION,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 22, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_18 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_CACHE_CONTROL); if (unlikely(!__pyx_t_18)) __PYX_ERR(5, 22, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_18);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":23
+ * hdrs.AUTHORIZATION,
+ * hdrs.CACHE_CONTROL,
+ * hdrs.CONNECTION, # <<<<<<<<<<<<<<
+ * hdrs.CONTENT_DISPOSITION,
+ * hdrs.CONTENT_ENCODING,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 23, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_19 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_CONNECTION); if (unlikely(!__pyx_t_19)) __PYX_ERR(5, 23, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_19);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":24
+ * hdrs.CACHE_CONTROL,
+ * hdrs.CONNECTION,
+ * hdrs.CONTENT_DISPOSITION, # <<<<<<<<<<<<<<
+ * hdrs.CONTENT_ENCODING,
+ * hdrs.CONTENT_LANGUAGE,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 24, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_20 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_CONTENT_DISPOSITION); if (unlikely(!__pyx_t_20)) __PYX_ERR(5, 24, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_20);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":25
+ * hdrs.CONNECTION,
+ * hdrs.CONTENT_DISPOSITION,
+ * hdrs.CONTENT_ENCODING, # <<<<<<<<<<<<<<
+ * hdrs.CONTENT_LANGUAGE,
+ * hdrs.CONTENT_LENGTH,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 25, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_21 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_CONTENT_ENCODING); if (unlikely(!__pyx_t_21)) __PYX_ERR(5, 25, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_21);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":26
+ * hdrs.CONTENT_DISPOSITION,
+ * hdrs.CONTENT_ENCODING,
+ * hdrs.CONTENT_LANGUAGE, # <<<<<<<<<<<<<<
+ * hdrs.CONTENT_LENGTH,
+ * hdrs.CONTENT_LOCATION,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 26, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_22 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_CONTENT_LANGUAGE); if (unlikely(!__pyx_t_22)) __PYX_ERR(5, 26, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_22);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":27
+ * hdrs.CONTENT_ENCODING,
+ * hdrs.CONTENT_LANGUAGE,
+ * hdrs.CONTENT_LENGTH, # <<<<<<<<<<<<<<
+ * hdrs.CONTENT_LOCATION,
+ * hdrs.CONTENT_MD5,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 27, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_23 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_CONTENT_LENGTH); if (unlikely(!__pyx_t_23)) __PYX_ERR(5, 27, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_23);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":28
+ * hdrs.CONTENT_LANGUAGE,
+ * hdrs.CONTENT_LENGTH,
+ * hdrs.CONTENT_LOCATION, # <<<<<<<<<<<<<<
+ * hdrs.CONTENT_MD5,
+ * hdrs.CONTENT_RANGE,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 28, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_24 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_CONTENT_LOCATION); if (unlikely(!__pyx_t_24)) __PYX_ERR(5, 28, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_24);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":29
+ * hdrs.CONTENT_LENGTH,
+ * hdrs.CONTENT_LOCATION,
+ * hdrs.CONTENT_MD5, # <<<<<<<<<<<<<<
+ * hdrs.CONTENT_RANGE,
+ * hdrs.CONTENT_TRANSFER_ENCODING,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 29, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_25 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_CONTENT_MD5); if (unlikely(!__pyx_t_25)) __PYX_ERR(5, 29, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_25);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":30
+ * hdrs.CONTENT_LOCATION,
+ * hdrs.CONTENT_MD5,
+ * hdrs.CONTENT_RANGE, # <<<<<<<<<<<<<<
+ * hdrs.CONTENT_TRANSFER_ENCODING,
+ * hdrs.CONTENT_TYPE,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 30, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_26 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_CONTENT_RANGE); if (unlikely(!__pyx_t_26)) __PYX_ERR(5, 30, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_26);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":31
+ * hdrs.CONTENT_MD5,
+ * hdrs.CONTENT_RANGE,
+ * hdrs.CONTENT_TRANSFER_ENCODING, # <<<<<<<<<<<<<<
+ * hdrs.CONTENT_TYPE,
+ * hdrs.COOKIE,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 31, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_27 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_CONTENT_TRANSFER_ENCODING); if (unlikely(!__pyx_t_27)) __PYX_ERR(5, 31, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_27);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":32
+ * hdrs.CONTENT_RANGE,
+ * hdrs.CONTENT_TRANSFER_ENCODING,
+ * hdrs.CONTENT_TYPE, # <<<<<<<<<<<<<<
+ * hdrs.COOKIE,
+ * hdrs.DATE,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 32, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_28 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_CONTENT_TYPE); if (unlikely(!__pyx_t_28)) __PYX_ERR(5, 32, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_28);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":33
+ * hdrs.CONTENT_TRANSFER_ENCODING,
+ * hdrs.CONTENT_TYPE,
+ * hdrs.COOKIE, # <<<<<<<<<<<<<<
+ * hdrs.DATE,
+ * hdrs.DESTINATION,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 33, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_29 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_COOKIE); if (unlikely(!__pyx_t_29)) __PYX_ERR(5, 33, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_29);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":34
+ * hdrs.CONTENT_TYPE,
+ * hdrs.COOKIE,
+ * hdrs.DATE, # <<<<<<<<<<<<<<
+ * hdrs.DESTINATION,
+ * hdrs.DIGEST,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 34, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_30 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_DATE); if (unlikely(!__pyx_t_30)) __PYX_ERR(5, 34, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_30);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":35
+ * hdrs.COOKIE,
+ * hdrs.DATE,
+ * hdrs.DESTINATION, # <<<<<<<<<<<<<<
+ * hdrs.DIGEST,
+ * hdrs.ETAG,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 35, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_31 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_DESTINATION); if (unlikely(!__pyx_t_31)) __PYX_ERR(5, 35, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_31);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":36
+ * hdrs.DATE,
+ * hdrs.DESTINATION,
+ * hdrs.DIGEST, # <<<<<<<<<<<<<<
+ * hdrs.ETAG,
+ * hdrs.EXPECT,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 36, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_32 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_DIGEST); if (unlikely(!__pyx_t_32)) __PYX_ERR(5, 36, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_32);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":37
+ * hdrs.DESTINATION,
+ * hdrs.DIGEST,
+ * hdrs.ETAG, # <<<<<<<<<<<<<<
+ * hdrs.EXPECT,
+ * hdrs.EXPIRES,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 37, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_33 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ETAG); if (unlikely(!__pyx_t_33)) __PYX_ERR(5, 37, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_33);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":38
+ * hdrs.DIGEST,
+ * hdrs.ETAG,
+ * hdrs.EXPECT, # <<<<<<<<<<<<<<
+ * hdrs.EXPIRES,
+ * hdrs.FORWARDED,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 38, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_34 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_EXPECT); if (unlikely(!__pyx_t_34)) __PYX_ERR(5, 38, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_34);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":39
+ * hdrs.ETAG,
+ * hdrs.EXPECT,
+ * hdrs.EXPIRES, # <<<<<<<<<<<<<<
+ * hdrs.FORWARDED,
+ * hdrs.FROM,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 39, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_35 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_EXPIRES); if (unlikely(!__pyx_t_35)) __PYX_ERR(5, 39, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_35);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":40
+ * hdrs.EXPECT,
+ * hdrs.EXPIRES,
+ * hdrs.FORWARDED, # <<<<<<<<<<<<<<
+ * hdrs.FROM,
+ * hdrs.HOST,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 40, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_36 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_FORWARDED); if (unlikely(!__pyx_t_36)) __PYX_ERR(5, 40, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_36);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":41
+ * hdrs.EXPIRES,
+ * hdrs.FORWARDED,
+ * hdrs.FROM, # <<<<<<<<<<<<<<
+ * hdrs.HOST,
+ * hdrs.IF_MATCH,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 41, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_37 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_FROM); if (unlikely(!__pyx_t_37)) __PYX_ERR(5, 41, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_37);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":42
+ * hdrs.FORWARDED,
+ * hdrs.FROM,
+ * hdrs.HOST, # <<<<<<<<<<<<<<
+ * hdrs.IF_MATCH,
+ * hdrs.IF_MODIFIED_SINCE,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 42, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_38 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_HOST); if (unlikely(!__pyx_t_38)) __PYX_ERR(5, 42, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_38);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":43
+ * hdrs.FROM,
+ * hdrs.HOST,
+ * hdrs.IF_MATCH, # <<<<<<<<<<<<<<
+ * hdrs.IF_MODIFIED_SINCE,
+ * hdrs.IF_NONE_MATCH,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 43, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_39 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_IF_MATCH); if (unlikely(!__pyx_t_39)) __PYX_ERR(5, 43, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_39);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":44
+ * hdrs.HOST,
+ * hdrs.IF_MATCH,
+ * hdrs.IF_MODIFIED_SINCE, # <<<<<<<<<<<<<<
+ * hdrs.IF_NONE_MATCH,
+ * hdrs.IF_RANGE,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 44, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_40 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_IF_MODIFIED_SINCE); if (unlikely(!__pyx_t_40)) __PYX_ERR(5, 44, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_40);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":45
+ * hdrs.IF_MATCH,
+ * hdrs.IF_MODIFIED_SINCE,
+ * hdrs.IF_NONE_MATCH, # <<<<<<<<<<<<<<
+ * hdrs.IF_RANGE,
+ * hdrs.IF_UNMODIFIED_SINCE,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 45, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_41 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_IF_NONE_MATCH); if (unlikely(!__pyx_t_41)) __PYX_ERR(5, 45, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_41);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":46
+ * hdrs.IF_MODIFIED_SINCE,
+ * hdrs.IF_NONE_MATCH,
+ * hdrs.IF_RANGE, # <<<<<<<<<<<<<<
+ * hdrs.IF_UNMODIFIED_SINCE,
+ * hdrs.KEEP_ALIVE,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 46, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_42 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_IF_RANGE); if (unlikely(!__pyx_t_42)) __PYX_ERR(5, 46, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_42);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":47
+ * hdrs.IF_NONE_MATCH,
+ * hdrs.IF_RANGE,
+ * hdrs.IF_UNMODIFIED_SINCE, # <<<<<<<<<<<<<<
+ * hdrs.KEEP_ALIVE,
+ * hdrs.LAST_EVENT_ID,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 47, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_43 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_IF_UNMODIFIED_SINCE); if (unlikely(!__pyx_t_43)) __PYX_ERR(5, 47, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_43);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":48
+ * hdrs.IF_RANGE,
+ * hdrs.IF_UNMODIFIED_SINCE,
+ * hdrs.KEEP_ALIVE, # <<<<<<<<<<<<<<
+ * hdrs.LAST_EVENT_ID,
+ * hdrs.LAST_MODIFIED,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 48, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_44 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_KEEP_ALIVE); if (unlikely(!__pyx_t_44)) __PYX_ERR(5, 48, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_44);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":49
+ * hdrs.IF_UNMODIFIED_SINCE,
+ * hdrs.KEEP_ALIVE,
+ * hdrs.LAST_EVENT_ID, # <<<<<<<<<<<<<<
+ * hdrs.LAST_MODIFIED,
+ * hdrs.LINK,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 49, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_45 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_LAST_EVENT_ID); if (unlikely(!__pyx_t_45)) __PYX_ERR(5, 49, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_45);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":50
+ * hdrs.KEEP_ALIVE,
+ * hdrs.LAST_EVENT_ID,
+ * hdrs.LAST_MODIFIED, # <<<<<<<<<<<<<<
+ * hdrs.LINK,
+ * hdrs.LOCATION,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 50, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_46 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_LAST_MODIFIED); if (unlikely(!__pyx_t_46)) __PYX_ERR(5, 50, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_46);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":51
+ * hdrs.LAST_EVENT_ID,
+ * hdrs.LAST_MODIFIED,
+ * hdrs.LINK, # <<<<<<<<<<<<<<
+ * hdrs.LOCATION,
+ * hdrs.MAX_FORWARDS,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 51, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_47 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_LINK); if (unlikely(!__pyx_t_47)) __PYX_ERR(5, 51, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_47);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":52
+ * hdrs.LAST_MODIFIED,
+ * hdrs.LINK,
+ * hdrs.LOCATION, # <<<<<<<<<<<<<<
+ * hdrs.MAX_FORWARDS,
+ * hdrs.ORIGIN,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 52, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_48 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_LOCATION); if (unlikely(!__pyx_t_48)) __PYX_ERR(5, 52, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_48);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":53
+ * hdrs.LINK,
+ * hdrs.LOCATION,
+ * hdrs.MAX_FORWARDS, # <<<<<<<<<<<<<<
+ * hdrs.ORIGIN,
+ * hdrs.PRAGMA,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 53, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_49 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_MAX_FORWARDS); if (unlikely(!__pyx_t_49)) __PYX_ERR(5, 53, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_49);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":54
+ * hdrs.LOCATION,
+ * hdrs.MAX_FORWARDS,
+ * hdrs.ORIGIN, # <<<<<<<<<<<<<<
+ * hdrs.PRAGMA,
+ * hdrs.PROXY_AUTHENTICATE,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 54, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_50 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ORIGIN); if (unlikely(!__pyx_t_50)) __PYX_ERR(5, 54, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_50);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":55
+ * hdrs.MAX_FORWARDS,
+ * hdrs.ORIGIN,
+ * hdrs.PRAGMA, # <<<<<<<<<<<<<<
+ * hdrs.PROXY_AUTHENTICATE,
+ * hdrs.PROXY_AUTHORIZATION,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 55, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_51 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_PRAGMA); if (unlikely(!__pyx_t_51)) __PYX_ERR(5, 55, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_51);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":56
+ * hdrs.ORIGIN,
+ * hdrs.PRAGMA,
+ * hdrs.PROXY_AUTHENTICATE, # <<<<<<<<<<<<<<
+ * hdrs.PROXY_AUTHORIZATION,
+ * hdrs.RANGE,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 56, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_52 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_PROXY_AUTHENTICATE); if (unlikely(!__pyx_t_52)) __PYX_ERR(5, 56, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_52);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":57
+ * hdrs.PRAGMA,
+ * hdrs.PROXY_AUTHENTICATE,
+ * hdrs.PROXY_AUTHORIZATION, # <<<<<<<<<<<<<<
+ * hdrs.RANGE,
+ * hdrs.REFERER,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 57, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_53 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_PROXY_AUTHORIZATION); if (unlikely(!__pyx_t_53)) __PYX_ERR(5, 57, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_53);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":58
+ * hdrs.PROXY_AUTHENTICATE,
+ * hdrs.PROXY_AUTHORIZATION,
+ * hdrs.RANGE, # <<<<<<<<<<<<<<
+ * hdrs.REFERER,
+ * hdrs.RETRY_AFTER,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 58, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_54 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_RANGE); if (unlikely(!__pyx_t_54)) __PYX_ERR(5, 58, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_54);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":59
+ * hdrs.PROXY_AUTHORIZATION,
+ * hdrs.RANGE,
+ * hdrs.REFERER, # <<<<<<<<<<<<<<
+ * hdrs.RETRY_AFTER,
+ * hdrs.SEC_WEBSOCKET_ACCEPT,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 59, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_55 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_REFERER); if (unlikely(!__pyx_t_55)) __PYX_ERR(5, 59, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_55);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":60
+ * hdrs.RANGE,
+ * hdrs.REFERER,
+ * hdrs.RETRY_AFTER, # <<<<<<<<<<<<<<
+ * hdrs.SEC_WEBSOCKET_ACCEPT,
+ * hdrs.SEC_WEBSOCKET_EXTENSIONS,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 60, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_56 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_RETRY_AFTER); if (unlikely(!__pyx_t_56)) __PYX_ERR(5, 60, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_56);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":61
+ * hdrs.REFERER,
+ * hdrs.RETRY_AFTER,
+ * hdrs.SEC_WEBSOCKET_ACCEPT, # <<<<<<<<<<<<<<
+ * hdrs.SEC_WEBSOCKET_EXTENSIONS,
+ * hdrs.SEC_WEBSOCKET_KEY,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 61, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_57 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_SEC_WEBSOCKET_ACCEPT); if (unlikely(!__pyx_t_57)) __PYX_ERR(5, 61, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_57);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":62
+ * hdrs.RETRY_AFTER,
+ * hdrs.SEC_WEBSOCKET_ACCEPT,
+ * hdrs.SEC_WEBSOCKET_EXTENSIONS, # <<<<<<<<<<<<<<
+ * hdrs.SEC_WEBSOCKET_KEY,
+ * hdrs.SEC_WEBSOCKET_KEY1,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 62, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_58 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_SEC_WEBSOCKET_EXTENSIONS); if (unlikely(!__pyx_t_58)) __PYX_ERR(5, 62, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_58);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":63
+ * hdrs.SEC_WEBSOCKET_ACCEPT,
+ * hdrs.SEC_WEBSOCKET_EXTENSIONS,
+ * hdrs.SEC_WEBSOCKET_KEY, # <<<<<<<<<<<<<<
+ * hdrs.SEC_WEBSOCKET_KEY1,
+ * hdrs.SEC_WEBSOCKET_PROTOCOL,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 63, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_59 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_SEC_WEBSOCKET_KEY); if (unlikely(!__pyx_t_59)) __PYX_ERR(5, 63, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_59);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":64
+ * hdrs.SEC_WEBSOCKET_EXTENSIONS,
+ * hdrs.SEC_WEBSOCKET_KEY,
+ * hdrs.SEC_WEBSOCKET_KEY1, # <<<<<<<<<<<<<<
+ * hdrs.SEC_WEBSOCKET_PROTOCOL,
+ * hdrs.SEC_WEBSOCKET_VERSION,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 64, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_60 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_SEC_WEBSOCKET_KEY1); if (unlikely(!__pyx_t_60)) __PYX_ERR(5, 64, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_60);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":65
+ * hdrs.SEC_WEBSOCKET_KEY,
+ * hdrs.SEC_WEBSOCKET_KEY1,
+ * hdrs.SEC_WEBSOCKET_PROTOCOL, # <<<<<<<<<<<<<<
+ * hdrs.SEC_WEBSOCKET_VERSION,
+ * hdrs.SERVER,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 65, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_61 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_SEC_WEBSOCKET_PROTOCOL); if (unlikely(!__pyx_t_61)) __PYX_ERR(5, 65, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_61);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":66
+ * hdrs.SEC_WEBSOCKET_KEY1,
+ * hdrs.SEC_WEBSOCKET_PROTOCOL,
+ * hdrs.SEC_WEBSOCKET_VERSION, # <<<<<<<<<<<<<<
+ * hdrs.SERVER,
+ * hdrs.SET_COOKIE,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 66, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_62 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_SEC_WEBSOCKET_VERSION); if (unlikely(!__pyx_t_62)) __PYX_ERR(5, 66, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_62);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":67
+ * hdrs.SEC_WEBSOCKET_PROTOCOL,
+ * hdrs.SEC_WEBSOCKET_VERSION,
+ * hdrs.SERVER, # <<<<<<<<<<<<<<
+ * hdrs.SET_COOKIE,
+ * hdrs.TE,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 67, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_63 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_SERVER); if (unlikely(!__pyx_t_63)) __PYX_ERR(5, 67, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_63);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":68
+ * hdrs.SEC_WEBSOCKET_VERSION,
+ * hdrs.SERVER,
+ * hdrs.SET_COOKIE, # <<<<<<<<<<<<<<
+ * hdrs.TE,
+ * hdrs.TRAILER,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 68, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_64 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_SET_COOKIE); if (unlikely(!__pyx_t_64)) __PYX_ERR(5, 68, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_64);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":69
+ * hdrs.SERVER,
+ * hdrs.SET_COOKIE,
+ * hdrs.TE, # <<<<<<<<<<<<<<
+ * hdrs.TRAILER,
+ * hdrs.TRANSFER_ENCODING,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 69, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_65 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_TE); if (unlikely(!__pyx_t_65)) __PYX_ERR(5, 69, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_65);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":70
+ * hdrs.SET_COOKIE,
+ * hdrs.TE,
+ * hdrs.TRAILER, # <<<<<<<<<<<<<<
+ * hdrs.TRANSFER_ENCODING,
+ * hdrs.URI,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 70, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_66 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_TRAILER); if (unlikely(!__pyx_t_66)) __PYX_ERR(5, 70, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_66);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":71
+ * hdrs.TE,
+ * hdrs.TRAILER,
+ * hdrs.TRANSFER_ENCODING, # <<<<<<<<<<<<<<
+ * hdrs.URI,
+ * hdrs.UPGRADE,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 71, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_67 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_TRANSFER_ENCODING); if (unlikely(!__pyx_t_67)) __PYX_ERR(5, 71, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_67);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":72
+ * hdrs.TRAILER,
+ * hdrs.TRANSFER_ENCODING,
+ * hdrs.URI, # <<<<<<<<<<<<<<
+ * hdrs.UPGRADE,
+ * hdrs.USER_AGENT,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 72, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_68 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_URI); if (unlikely(!__pyx_t_68)) __PYX_ERR(5, 72, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_68);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":73
+ * hdrs.TRANSFER_ENCODING,
+ * hdrs.URI,
+ * hdrs.UPGRADE, # <<<<<<<<<<<<<<
+ * hdrs.USER_AGENT,
+ * hdrs.VARY,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 73, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_69 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_UPGRADE); if (unlikely(!__pyx_t_69)) __PYX_ERR(5, 73, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_69);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":74
+ * hdrs.URI,
+ * hdrs.UPGRADE,
+ * hdrs.USER_AGENT, # <<<<<<<<<<<<<<
+ * hdrs.VARY,
+ * hdrs.VIA,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 74, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_70 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_USER_AGENT); if (unlikely(!__pyx_t_70)) __PYX_ERR(5, 74, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_70);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":75
+ * hdrs.UPGRADE,
+ * hdrs.USER_AGENT,
+ * hdrs.VARY, # <<<<<<<<<<<<<<
+ * hdrs.VIA,
+ * hdrs.WWW_AUTHENTICATE,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 75, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_71 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_VARY); if (unlikely(!__pyx_t_71)) __PYX_ERR(5, 75, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_71);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":76
+ * hdrs.USER_AGENT,
+ * hdrs.VARY,
+ * hdrs.VIA, # <<<<<<<<<<<<<<
+ * hdrs.WWW_AUTHENTICATE,
+ * hdrs.WANT_DIGEST,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 76, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_72 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_VIA); if (unlikely(!__pyx_t_72)) __PYX_ERR(5, 76, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_72);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":77
+ * hdrs.VARY,
+ * hdrs.VIA,
+ * hdrs.WWW_AUTHENTICATE, # <<<<<<<<<<<<<<
+ * hdrs.WANT_DIGEST,
+ * hdrs.WARNING,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 77, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_73 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_WWW_AUTHENTICATE); if (unlikely(!__pyx_t_73)) __PYX_ERR(5, 77, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_73);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":78
+ * hdrs.VIA,
+ * hdrs.WWW_AUTHENTICATE,
+ * hdrs.WANT_DIGEST, # <<<<<<<<<<<<<<
+ * hdrs.WARNING,
+ * hdrs.X_FORWARDED_FOR,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 78, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_74 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_WANT_DIGEST); if (unlikely(!__pyx_t_74)) __PYX_ERR(5, 78, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_74);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":79
+ * hdrs.WWW_AUTHENTICATE,
+ * hdrs.WANT_DIGEST,
+ * hdrs.WARNING, # <<<<<<<<<<<<<<
+ * hdrs.X_FORWARDED_FOR,
+ * hdrs.X_FORWARDED_HOST,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 79, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_75 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_WARNING); if (unlikely(!__pyx_t_75)) __PYX_ERR(5, 79, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_75);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":80
+ * hdrs.WANT_DIGEST,
+ * hdrs.WARNING,
+ * hdrs.X_FORWARDED_FOR, # <<<<<<<<<<<<<<
+ * hdrs.X_FORWARDED_HOST,
+ * hdrs.X_FORWARDED_PROTO,
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 80, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_76 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_X_FORWARDED_FOR); if (unlikely(!__pyx_t_76)) __PYX_ERR(5, 80, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_76);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":81
+ * hdrs.WARNING,
+ * hdrs.X_FORWARDED_FOR,
+ * hdrs.X_FORWARDED_HOST, # <<<<<<<<<<<<<<
+ * hdrs.X_FORWARDED_PROTO,
+ * )
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 81, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_77 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_X_FORWARDED_HOST); if (unlikely(!__pyx_t_77)) __PYX_ERR(5, 81, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_77);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":82
+ * hdrs.X_FORWARDED_FOR,
+ * hdrs.X_FORWARDED_HOST,
+ * hdrs.X_FORWARDED_PROTO, # <<<<<<<<<<<<<<
+ * )
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 82, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_78 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_X_FORWARDED_PROTO); if (unlikely(!__pyx_t_78)) __PYX_ERR(5, 82, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_78);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_headers.pxi":6
+ * from . import hdrs
+ * cdef tuple headers = (
+ * hdrs.ACCEPT, # <<<<<<<<<<<<<<
+ * hdrs.ACCEPT_CHARSET,
+ * hdrs.ACCEPT_ENCODING,
+ */
+ __pyx_t_1 = PyTuple_New(77); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_1, 3, __pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_1, 4, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_7);
+ PyTuple_SET_ITEM(__pyx_t_1, 5, __pyx_t_7);
+ __Pyx_GIVEREF(__pyx_t_8);
+ PyTuple_SET_ITEM(__pyx_t_1, 6, __pyx_t_8);
+ __Pyx_GIVEREF(__pyx_t_9);
+ PyTuple_SET_ITEM(__pyx_t_1, 7, __pyx_t_9);
+ __Pyx_GIVEREF(__pyx_t_10);
+ PyTuple_SET_ITEM(__pyx_t_1, 8, __pyx_t_10);
+ __Pyx_GIVEREF(__pyx_t_11);
+ PyTuple_SET_ITEM(__pyx_t_1, 9, __pyx_t_11);
+ __Pyx_GIVEREF(__pyx_t_12);
+ PyTuple_SET_ITEM(__pyx_t_1, 10, __pyx_t_12);
+ __Pyx_GIVEREF(__pyx_t_13);
+ PyTuple_SET_ITEM(__pyx_t_1, 11, __pyx_t_13);
+ __Pyx_GIVEREF(__pyx_t_14);
+ PyTuple_SET_ITEM(__pyx_t_1, 12, __pyx_t_14);
+ __Pyx_GIVEREF(__pyx_t_15);
+ PyTuple_SET_ITEM(__pyx_t_1, 13, __pyx_t_15);
+ __Pyx_GIVEREF(__pyx_t_16);
+ PyTuple_SET_ITEM(__pyx_t_1, 14, __pyx_t_16);
+ __Pyx_GIVEREF(__pyx_t_17);
+ PyTuple_SET_ITEM(__pyx_t_1, 15, __pyx_t_17);
+ __Pyx_GIVEREF(__pyx_t_18);
+ PyTuple_SET_ITEM(__pyx_t_1, 16, __pyx_t_18);
+ __Pyx_GIVEREF(__pyx_t_19);
+ PyTuple_SET_ITEM(__pyx_t_1, 17, __pyx_t_19);
+ __Pyx_GIVEREF(__pyx_t_20);
+ PyTuple_SET_ITEM(__pyx_t_1, 18, __pyx_t_20);
+ __Pyx_GIVEREF(__pyx_t_21);
+ PyTuple_SET_ITEM(__pyx_t_1, 19, __pyx_t_21);
+ __Pyx_GIVEREF(__pyx_t_22);
+ PyTuple_SET_ITEM(__pyx_t_1, 20, __pyx_t_22);
+ __Pyx_GIVEREF(__pyx_t_23);
+ PyTuple_SET_ITEM(__pyx_t_1, 21, __pyx_t_23);
+ __Pyx_GIVEREF(__pyx_t_24);
+ PyTuple_SET_ITEM(__pyx_t_1, 22, __pyx_t_24);
+ __Pyx_GIVEREF(__pyx_t_25);
+ PyTuple_SET_ITEM(__pyx_t_1, 23, __pyx_t_25);
+ __Pyx_GIVEREF(__pyx_t_26);
+ PyTuple_SET_ITEM(__pyx_t_1, 24, __pyx_t_26);
+ __Pyx_GIVEREF(__pyx_t_27);
+ PyTuple_SET_ITEM(__pyx_t_1, 25, __pyx_t_27);
+ __Pyx_GIVEREF(__pyx_t_28);
+ PyTuple_SET_ITEM(__pyx_t_1, 26, __pyx_t_28);
+ __Pyx_GIVEREF(__pyx_t_29);
+ PyTuple_SET_ITEM(__pyx_t_1, 27, __pyx_t_29);
+ __Pyx_GIVEREF(__pyx_t_30);
+ PyTuple_SET_ITEM(__pyx_t_1, 28, __pyx_t_30);
+ __Pyx_GIVEREF(__pyx_t_31);
+ PyTuple_SET_ITEM(__pyx_t_1, 29, __pyx_t_31);
+ __Pyx_GIVEREF(__pyx_t_32);
+ PyTuple_SET_ITEM(__pyx_t_1, 30, __pyx_t_32);
+ __Pyx_GIVEREF(__pyx_t_33);
+ PyTuple_SET_ITEM(__pyx_t_1, 31, __pyx_t_33);
+ __Pyx_GIVEREF(__pyx_t_34);
+ PyTuple_SET_ITEM(__pyx_t_1, 32, __pyx_t_34);
+ __Pyx_GIVEREF(__pyx_t_35);
+ PyTuple_SET_ITEM(__pyx_t_1, 33, __pyx_t_35);
+ __Pyx_GIVEREF(__pyx_t_36);
+ PyTuple_SET_ITEM(__pyx_t_1, 34, __pyx_t_36);
+ __Pyx_GIVEREF(__pyx_t_37);
+ PyTuple_SET_ITEM(__pyx_t_1, 35, __pyx_t_37);
+ __Pyx_GIVEREF(__pyx_t_38);
+ PyTuple_SET_ITEM(__pyx_t_1, 36, __pyx_t_38);
+ __Pyx_GIVEREF(__pyx_t_39);
+ PyTuple_SET_ITEM(__pyx_t_1, 37, __pyx_t_39);
+ __Pyx_GIVEREF(__pyx_t_40);
+ PyTuple_SET_ITEM(__pyx_t_1, 38, __pyx_t_40);
+ __Pyx_GIVEREF(__pyx_t_41);
+ PyTuple_SET_ITEM(__pyx_t_1, 39, __pyx_t_41);
+ __Pyx_GIVEREF(__pyx_t_42);
+ PyTuple_SET_ITEM(__pyx_t_1, 40, __pyx_t_42);
+ __Pyx_GIVEREF(__pyx_t_43);
+ PyTuple_SET_ITEM(__pyx_t_1, 41, __pyx_t_43);
+ __Pyx_GIVEREF(__pyx_t_44);
+ PyTuple_SET_ITEM(__pyx_t_1, 42, __pyx_t_44);
+ __Pyx_GIVEREF(__pyx_t_45);
+ PyTuple_SET_ITEM(__pyx_t_1, 43, __pyx_t_45);
+ __Pyx_GIVEREF(__pyx_t_46);
+ PyTuple_SET_ITEM(__pyx_t_1, 44, __pyx_t_46);
+ __Pyx_GIVEREF(__pyx_t_47);
+ PyTuple_SET_ITEM(__pyx_t_1, 45, __pyx_t_47);
+ __Pyx_GIVEREF(__pyx_t_48);
+ PyTuple_SET_ITEM(__pyx_t_1, 46, __pyx_t_48);
+ __Pyx_GIVEREF(__pyx_t_49);
+ PyTuple_SET_ITEM(__pyx_t_1, 47, __pyx_t_49);
+ __Pyx_GIVEREF(__pyx_t_50);
+ PyTuple_SET_ITEM(__pyx_t_1, 48, __pyx_t_50);
+ __Pyx_GIVEREF(__pyx_t_51);
+ PyTuple_SET_ITEM(__pyx_t_1, 49, __pyx_t_51);
+ __Pyx_GIVEREF(__pyx_t_52);
+ PyTuple_SET_ITEM(__pyx_t_1, 50, __pyx_t_52);
+ __Pyx_GIVEREF(__pyx_t_53);
+ PyTuple_SET_ITEM(__pyx_t_1, 51, __pyx_t_53);
+ __Pyx_GIVEREF(__pyx_t_54);
+ PyTuple_SET_ITEM(__pyx_t_1, 52, __pyx_t_54);
+ __Pyx_GIVEREF(__pyx_t_55);
+ PyTuple_SET_ITEM(__pyx_t_1, 53, __pyx_t_55);
+ __Pyx_GIVEREF(__pyx_t_56);
+ PyTuple_SET_ITEM(__pyx_t_1, 54, __pyx_t_56);
+ __Pyx_GIVEREF(__pyx_t_57);
+ PyTuple_SET_ITEM(__pyx_t_1, 55, __pyx_t_57);
+ __Pyx_GIVEREF(__pyx_t_58);
+ PyTuple_SET_ITEM(__pyx_t_1, 56, __pyx_t_58);
+ __Pyx_GIVEREF(__pyx_t_59);
+ PyTuple_SET_ITEM(__pyx_t_1, 57, __pyx_t_59);
+ __Pyx_GIVEREF(__pyx_t_60);
+ PyTuple_SET_ITEM(__pyx_t_1, 58, __pyx_t_60);
+ __Pyx_GIVEREF(__pyx_t_61);
+ PyTuple_SET_ITEM(__pyx_t_1, 59, __pyx_t_61);
+ __Pyx_GIVEREF(__pyx_t_62);
+ PyTuple_SET_ITEM(__pyx_t_1, 60, __pyx_t_62);
+ __Pyx_GIVEREF(__pyx_t_63);
+ PyTuple_SET_ITEM(__pyx_t_1, 61, __pyx_t_63);
+ __Pyx_GIVEREF(__pyx_t_64);
+ PyTuple_SET_ITEM(__pyx_t_1, 62, __pyx_t_64);
+ __Pyx_GIVEREF(__pyx_t_65);
+ PyTuple_SET_ITEM(__pyx_t_1, 63, __pyx_t_65);
+ __Pyx_GIVEREF(__pyx_t_66);
+ PyTuple_SET_ITEM(__pyx_t_1, 64, __pyx_t_66);
+ __Pyx_GIVEREF(__pyx_t_67);
+ PyTuple_SET_ITEM(__pyx_t_1, 65, __pyx_t_67);
+ __Pyx_GIVEREF(__pyx_t_68);
+ PyTuple_SET_ITEM(__pyx_t_1, 66, __pyx_t_68);
+ __Pyx_GIVEREF(__pyx_t_69);
+ PyTuple_SET_ITEM(__pyx_t_1, 67, __pyx_t_69);
+ __Pyx_GIVEREF(__pyx_t_70);
+ PyTuple_SET_ITEM(__pyx_t_1, 68, __pyx_t_70);
+ __Pyx_GIVEREF(__pyx_t_71);
+ PyTuple_SET_ITEM(__pyx_t_1, 69, __pyx_t_71);
+ __Pyx_GIVEREF(__pyx_t_72);
+ PyTuple_SET_ITEM(__pyx_t_1, 70, __pyx_t_72);
+ __Pyx_GIVEREF(__pyx_t_73);
+ PyTuple_SET_ITEM(__pyx_t_1, 71, __pyx_t_73);
+ __Pyx_GIVEREF(__pyx_t_74);
+ PyTuple_SET_ITEM(__pyx_t_1, 72, __pyx_t_74);
+ __Pyx_GIVEREF(__pyx_t_75);
+ PyTuple_SET_ITEM(__pyx_t_1, 73, __pyx_t_75);
+ __Pyx_GIVEREF(__pyx_t_76);
+ PyTuple_SET_ITEM(__pyx_t_1, 74, __pyx_t_76);
+ __Pyx_GIVEREF(__pyx_t_77);
+ PyTuple_SET_ITEM(__pyx_t_1, 75, __pyx_t_77);
+ __Pyx_GIVEREF(__pyx_t_78);
+ PyTuple_SET_ITEM(__pyx_t_1, 76, __pyx_t_78);
+ __pyx_t_2 = 0;
+ __pyx_t_3 = 0;
+ __pyx_t_4 = 0;
+ __pyx_t_5 = 0;
+ __pyx_t_6 = 0;
+ __pyx_t_7 = 0;
+ __pyx_t_8 = 0;
+ __pyx_t_9 = 0;
+ __pyx_t_10 = 0;
+ __pyx_t_11 = 0;
+ __pyx_t_12 = 0;
+ __pyx_t_13 = 0;
+ __pyx_t_14 = 0;
+ __pyx_t_15 = 0;
+ __pyx_t_16 = 0;
+ __pyx_t_17 = 0;
+ __pyx_t_18 = 0;
+ __pyx_t_19 = 0;
+ __pyx_t_20 = 0;
+ __pyx_t_21 = 0;
+ __pyx_t_22 = 0;
+ __pyx_t_23 = 0;
+ __pyx_t_24 = 0;
+ __pyx_t_25 = 0;
+ __pyx_t_26 = 0;
+ __pyx_t_27 = 0;
+ __pyx_t_28 = 0;
+ __pyx_t_29 = 0;
+ __pyx_t_30 = 0;
+ __pyx_t_31 = 0;
+ __pyx_t_32 = 0;
+ __pyx_t_33 = 0;
+ __pyx_t_34 = 0;
+ __pyx_t_35 = 0;
+ __pyx_t_36 = 0;
+ __pyx_t_37 = 0;
+ __pyx_t_38 = 0;
+ __pyx_t_39 = 0;
+ __pyx_t_40 = 0;
+ __pyx_t_41 = 0;
+ __pyx_t_42 = 0;
+ __pyx_t_43 = 0;
+ __pyx_t_44 = 0;
+ __pyx_t_45 = 0;
+ __pyx_t_46 = 0;
+ __pyx_t_47 = 0;
+ __pyx_t_48 = 0;
+ __pyx_t_49 = 0;
+ __pyx_t_50 = 0;
+ __pyx_t_51 = 0;
+ __pyx_t_52 = 0;
+ __pyx_t_53 = 0;
+ __pyx_t_54 = 0;
+ __pyx_t_55 = 0;
+ __pyx_t_56 = 0;
+ __pyx_t_57 = 0;
+ __pyx_t_58 = 0;
+ __pyx_t_59 = 0;
+ __pyx_t_60 = 0;
+ __pyx_t_61 = 0;
+ __pyx_t_62 = 0;
+ __pyx_t_63 = 0;
+ __pyx_t_64 = 0;
+ __pyx_t_65 = 0;
+ __pyx_t_66 = 0;
+ __pyx_t_67 = 0;
+ __pyx_t_68 = 0;
+ __pyx_t_69 = 0;
+ __pyx_t_70 = 0;
+ __pyx_t_71 = 0;
+ __pyx_t_72 = 0;
+ __pyx_t_73 = 0;
+ __pyx_t_74 = 0;
+ __pyx_t_75 = 0;
+ __pyx_t_76 = 0;
+ __pyx_t_77 = 0;
+ __pyx_t_78 = 0;
+ __Pyx_XGOTREF(__pyx_v_7aiohttp_12_http_parser_headers);
+ __Pyx_DECREF_SET(__pyx_v_7aiohttp_12_http_parser_headers, ((PyObject*)__pyx_t_1));
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":57
+ * char* PyByteArray_AsString(object)
+ *
+ * __all__ = ('HttpRequestParser', 'HttpResponseParser', # <<<<<<<<<<<<<<
+ * 'RawRequestMessage', 'RawResponseMessage')
+ *
+ */
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_all, __pyx_tuple__12) < 0) __PYX_ERR(0, 57, __pyx_L1_error)
+
+ /* "aiohttp/_http_parser.pyx":60
+ * 'RawRequestMessage', 'RawResponseMessage')
+ *
+ * cdef object URL = _URL # <<<<<<<<<<<<<<
+ * cdef object URL_build = URL.build
+ * cdef object CIMultiDict = _CIMultiDict
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_URL_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 60, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_XGOTREF(__pyx_v_7aiohttp_12_http_parser_URL);
+ __Pyx_DECREF_SET(__pyx_v_7aiohttp_12_http_parser_URL, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":61
+ *
+ * cdef object URL = _URL
+ * cdef object URL_build = URL.build # <<<<<<<<<<<<<<
+ * cdef object CIMultiDict = _CIMultiDict
+ * cdef object CIMultiDictProxy = _CIMultiDictProxy
+ */
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_7aiohttp_12_http_parser_URL, __pyx_n_s_build); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 61, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_XGOTREF(__pyx_v_7aiohttp_12_http_parser_URL_build);
+ __Pyx_DECREF_SET(__pyx_v_7aiohttp_12_http_parser_URL_build, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":62
+ * cdef object URL = _URL
+ * cdef object URL_build = URL.build
+ * cdef object CIMultiDict = _CIMultiDict # <<<<<<<<<<<<<<
+ * cdef object CIMultiDictProxy = _CIMultiDictProxy
+ * cdef object HttpVersion = _HttpVersion
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_CIMultiDict_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 62, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_XGOTREF(__pyx_v_7aiohttp_12_http_parser_CIMultiDict);
+ __Pyx_DECREF_SET(__pyx_v_7aiohttp_12_http_parser_CIMultiDict, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":63
+ * cdef object URL_build = URL.build
+ * cdef object CIMultiDict = _CIMultiDict
+ * cdef object CIMultiDictProxy = _CIMultiDictProxy # <<<<<<<<<<<<<<
+ * cdef object HttpVersion = _HttpVersion
+ * cdef object HttpVersion10 = _HttpVersion10
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_CIMultiDictProxy_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 63, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_XGOTREF(__pyx_v_7aiohttp_12_http_parser_CIMultiDictProxy);
+ __Pyx_DECREF_SET(__pyx_v_7aiohttp_12_http_parser_CIMultiDictProxy, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":64
+ * cdef object CIMultiDict = _CIMultiDict
+ * cdef object CIMultiDictProxy = _CIMultiDictProxy
+ * cdef object HttpVersion = _HttpVersion # <<<<<<<<<<<<<<
+ * cdef object HttpVersion10 = _HttpVersion10
+ * cdef object HttpVersion11 = _HttpVersion11
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_HttpVersion_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 64, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_XGOTREF(__pyx_v_7aiohttp_12_http_parser_HttpVersion);
+ __Pyx_DECREF_SET(__pyx_v_7aiohttp_12_http_parser_HttpVersion, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":65
+ * cdef object CIMultiDictProxy = _CIMultiDictProxy
+ * cdef object HttpVersion = _HttpVersion
+ * cdef object HttpVersion10 = _HttpVersion10 # <<<<<<<<<<<<<<
+ * cdef object HttpVersion11 = _HttpVersion11
+ * cdef object SEC_WEBSOCKET_KEY1 = hdrs.SEC_WEBSOCKET_KEY1
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_HttpVersion10_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 65, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_XGOTREF(__pyx_v_7aiohttp_12_http_parser_HttpVersion10);
+ __Pyx_DECREF_SET(__pyx_v_7aiohttp_12_http_parser_HttpVersion10, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":66
+ * cdef object HttpVersion = _HttpVersion
+ * cdef object HttpVersion10 = _HttpVersion10
+ * cdef object HttpVersion11 = _HttpVersion11 # <<<<<<<<<<<<<<
+ * cdef object SEC_WEBSOCKET_KEY1 = hdrs.SEC_WEBSOCKET_KEY1
+ * cdef object CONTENT_ENCODING = hdrs.CONTENT_ENCODING
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_HttpVersion11_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 66, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_XGOTREF(__pyx_v_7aiohttp_12_http_parser_HttpVersion11);
+ __Pyx_DECREF_SET(__pyx_v_7aiohttp_12_http_parser_HttpVersion11, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":67
+ * cdef object HttpVersion10 = _HttpVersion10
+ * cdef object HttpVersion11 = _HttpVersion11
+ * cdef object SEC_WEBSOCKET_KEY1 = hdrs.SEC_WEBSOCKET_KEY1 # <<<<<<<<<<<<<<
+ * cdef object CONTENT_ENCODING = hdrs.CONTENT_ENCODING
+ * cdef object EMPTY_PAYLOAD = _EMPTY_PAYLOAD
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 67, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_78 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_SEC_WEBSOCKET_KEY1); if (unlikely(!__pyx_t_78)) __PYX_ERR(0, 67, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_78);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_XGOTREF(__pyx_v_7aiohttp_12_http_parser_SEC_WEBSOCKET_KEY1);
+ __Pyx_DECREF_SET(__pyx_v_7aiohttp_12_http_parser_SEC_WEBSOCKET_KEY1, __pyx_t_78);
+ __Pyx_GIVEREF(__pyx_t_78);
+ __pyx_t_78 = 0;
+
+ /* "aiohttp/_http_parser.pyx":68
+ * cdef object HttpVersion11 = _HttpVersion11
+ * cdef object SEC_WEBSOCKET_KEY1 = hdrs.SEC_WEBSOCKET_KEY1
+ * cdef object CONTENT_ENCODING = hdrs.CONTENT_ENCODING # <<<<<<<<<<<<<<
+ * cdef object EMPTY_PAYLOAD = _EMPTY_PAYLOAD
+ * cdef object StreamReader = _StreamReader
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_78, __pyx_n_s_hdrs); if (unlikely(!__pyx_t_78)) __PYX_ERR(0, 68, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_78);
+ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_78, __pyx_n_s_CONTENT_ENCODING); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 68, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_78); __pyx_t_78 = 0;
+ __Pyx_XGOTREF(__pyx_v_7aiohttp_12_http_parser_CONTENT_ENCODING);
+ __Pyx_DECREF_SET(__pyx_v_7aiohttp_12_http_parser_CONTENT_ENCODING, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":69
+ * cdef object SEC_WEBSOCKET_KEY1 = hdrs.SEC_WEBSOCKET_KEY1
+ * cdef object CONTENT_ENCODING = hdrs.CONTENT_ENCODING
+ * cdef object EMPTY_PAYLOAD = _EMPTY_PAYLOAD # <<<<<<<<<<<<<<
+ * cdef object StreamReader = _StreamReader
+ * cdef object DeflateBuffer = _DeflateBuffer
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_EMPTY_PAYLOAD_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 69, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_XGOTREF(__pyx_v_7aiohttp_12_http_parser_EMPTY_PAYLOAD);
+ __Pyx_DECREF_SET(__pyx_v_7aiohttp_12_http_parser_EMPTY_PAYLOAD, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":70
+ * cdef object CONTENT_ENCODING = hdrs.CONTENT_ENCODING
+ * cdef object EMPTY_PAYLOAD = _EMPTY_PAYLOAD
+ * cdef object StreamReader = _StreamReader # <<<<<<<<<<<<<<
+ * cdef object DeflateBuffer = _DeflateBuffer
+ *
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_StreamReader_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_XGOTREF(__pyx_v_7aiohttp_12_http_parser_StreamReader);
+ __Pyx_DECREF_SET(__pyx_v_7aiohttp_12_http_parser_StreamReader, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":71
+ * cdef object EMPTY_PAYLOAD = _EMPTY_PAYLOAD
+ * cdef object StreamReader = _StreamReader
+ * cdef object DeflateBuffer = _DeflateBuffer # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_DeflateBuffer_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 71, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_XGOTREF(__pyx_v_7aiohttp_12_http_parser_DeflateBuffer);
+ __Pyx_DECREF_SET(__pyx_v_7aiohttp_12_http_parser_DeflateBuffer, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":85
+ * DEF METHODS_COUNT = 34;
+ *
+ * cdef list _http_method = [] # <<<<<<<<<<<<<<
+ *
+ * for i in range(METHODS_COUNT):
+ */
+ __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 85, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_XGOTREF(__pyx_v_7aiohttp_12_http_parser__http_method);
+ __Pyx_DECREF_SET(__pyx_v_7aiohttp_12_http_parser__http_method, ((PyObject*)__pyx_t_1));
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":87
+ * cdef list _http_method = []
+ *
+ * for i in range(METHODS_COUNT): # <<<<<<<<<<<<<<
+ * _http_method.append(
+ * cparser.http_method_str(<cparser.http_method> i).decode('ascii'))
+ */
+ for (__pyx_t_79 = 0; __pyx_t_79 < 34; __pyx_t_79+=1) {
+ __pyx_t_1 = __Pyx_PyInt_From_long(__pyx_t_79); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 87, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_i, __pyx_t_1) < 0) __PYX_ERR(0, 87, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":88
+ *
+ * for i in range(METHODS_COUNT):
+ * _http_method.append( # <<<<<<<<<<<<<<
+ * cparser.http_method_str(<cparser.http_method> i).decode('ascii'))
+ *
+ */
+ if (unlikely(__pyx_v_7aiohttp_12_http_parser__http_method == Py_None)) {
+ PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "append");
+ __PYX_ERR(0, 88, __pyx_L1_error)
+ }
+
+ /* "aiohttp/_http_parser.pyx":89
+ * for i in range(METHODS_COUNT):
+ * _http_method.append(
+ * cparser.http_method_str(<cparser.http_method> i).decode('ascii')) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 89, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_80 = ((enum http_method)__Pyx_PyInt_As_enum__http_method(__pyx_t_1)); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 89, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_81 = http_method_str(((enum http_method)__pyx_t_80));
+ __pyx_t_1 = __Pyx_decode_c_string(__pyx_t_81, 0, strlen(__pyx_t_81), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 89, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+
+ /* "aiohttp/_http_parser.pyx":88
+ *
+ * for i in range(METHODS_COUNT):
+ * _http_method.append( # <<<<<<<<<<<<<<
+ * cparser.http_method_str(<cparser.http_method> i).decode('ascii'))
+ *
+ */
+ __pyx_t_82 = __Pyx_PyList_Append(__pyx_v_7aiohttp_12_http_parser__http_method, __pyx_t_1); if (unlikely(__pyx_t_82 == ((int)-1))) __PYX_ERR(0, 88, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ }
+
+ /* "aiohttp/_http_parser.pyx":785
+ *
+ *
+ * def parse_url(url): # <<<<<<<<<<<<<<
+ * cdef:
+ * Py_buffer py_buf
+ */
+ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7aiohttp_12_http_parser_1parse_url, NULL, __pyx_n_s_aiohttp__http_parser); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 785, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_parse_url, __pyx_t_1) < 0) __PYX_ERR(0, 785, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "(tree fragment)":1
+ * def __pyx_unpickle_RawRequestMessage(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ */
+ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7aiohttp_12_http_parser_3__pyx_unpickle_RawRequestMessage, NULL, __pyx_n_s_aiohttp__http_parser); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_RawRequestMessage, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "(tree fragment)":11
+ * __pyx_unpickle_RawRequestMessage__set_state(<RawRequestMessage> __pyx_result, __pyx_state)
+ * return __pyx_result
+ * cdef __pyx_unpickle_RawRequestMessage__set_state(RawRequestMessage __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_result.chunked = __pyx_state[0]; __pyx_result.compression = __pyx_state[1]; __pyx_result.headers = __pyx_state[2]; __pyx_result.method = __pyx_state[3]; __pyx_result.path = __pyx_state[4]; __pyx_result.raw_headers = __pyx_state[5]; __pyx_result.should_close = __pyx_state[6]; __pyx_result.upgrade = __pyx_state[7]; __pyx_result.url = __pyx_state[8]; __pyx_result.version = __pyx_state[9]
+ * if len(__pyx_state) > 10 and hasattr(__pyx_result, '__dict__'):
+ */
+ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7aiohttp_12_http_parser_5__pyx_unpickle_RawResponseMessage, NULL, __pyx_n_s_aiohttp__http_parser); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_RawResponseMessag, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_parser.pyx":1
+ * #cython: language_level=3 # <<<<<<<<<<<<<<
+ * #
+ * # Based on https://github.com/MagicStack/httptools
+ */
+ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /*--- Wrapped vars code ---*/
+
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_XDECREF(__pyx_t_9);
+ __Pyx_XDECREF(__pyx_t_10);
+ __Pyx_XDECREF(__pyx_t_11);
+ __Pyx_XDECREF(__pyx_t_12);
+ __Pyx_XDECREF(__pyx_t_13);
+ __Pyx_XDECREF(__pyx_t_14);
+ __Pyx_XDECREF(__pyx_t_15);
+ __Pyx_XDECREF(__pyx_t_16);
+ __Pyx_XDECREF(__pyx_t_17);
+ __Pyx_XDECREF(__pyx_t_18);
+ __Pyx_XDECREF(__pyx_t_19);
+ __Pyx_XDECREF(__pyx_t_20);
+ __Pyx_XDECREF(__pyx_t_21);
+ __Pyx_XDECREF(__pyx_t_22);
+ __Pyx_XDECREF(__pyx_t_23);
+ __Pyx_XDECREF(__pyx_t_24);
+ __Pyx_XDECREF(__pyx_t_25);
+ __Pyx_XDECREF(__pyx_t_26);
+ __Pyx_XDECREF(__pyx_t_27);
+ __Pyx_XDECREF(__pyx_t_28);
+ __Pyx_XDECREF(__pyx_t_29);
+ __Pyx_XDECREF(__pyx_t_30);
+ __Pyx_XDECREF(__pyx_t_31);
+ __Pyx_XDECREF(__pyx_t_32);
+ __Pyx_XDECREF(__pyx_t_33);
+ __Pyx_XDECREF(__pyx_t_34);
+ __Pyx_XDECREF(__pyx_t_35);
+ __Pyx_XDECREF(__pyx_t_36);
+ __Pyx_XDECREF(__pyx_t_37);
+ __Pyx_XDECREF(__pyx_t_38);
+ __Pyx_XDECREF(__pyx_t_39);
+ __Pyx_XDECREF(__pyx_t_40);
+ __Pyx_XDECREF(__pyx_t_41);
+ __Pyx_XDECREF(__pyx_t_42);
+ __Pyx_XDECREF(__pyx_t_43);
+ __Pyx_XDECREF(__pyx_t_44);
+ __Pyx_XDECREF(__pyx_t_45);
+ __Pyx_XDECREF(__pyx_t_46);
+ __Pyx_XDECREF(__pyx_t_47);
+ __Pyx_XDECREF(__pyx_t_48);
+ __Pyx_XDECREF(__pyx_t_49);
+ __Pyx_XDECREF(__pyx_t_50);
+ __Pyx_XDECREF(__pyx_t_51);
+ __Pyx_XDECREF(__pyx_t_52);
+ __Pyx_XDECREF(__pyx_t_53);
+ __Pyx_XDECREF(__pyx_t_54);
+ __Pyx_XDECREF(__pyx_t_55);
+ __Pyx_XDECREF(__pyx_t_56);
+ __Pyx_XDECREF(__pyx_t_57);
+ __Pyx_XDECREF(__pyx_t_58);
+ __Pyx_XDECREF(__pyx_t_59);
+ __Pyx_XDECREF(__pyx_t_60);
+ __Pyx_XDECREF(__pyx_t_61);
+ __Pyx_XDECREF(__pyx_t_62);
+ __Pyx_XDECREF(__pyx_t_63);
+ __Pyx_XDECREF(__pyx_t_64);
+ __Pyx_XDECREF(__pyx_t_65);
+ __Pyx_XDECREF(__pyx_t_66);
+ __Pyx_XDECREF(__pyx_t_67);
+ __Pyx_XDECREF(__pyx_t_68);
+ __Pyx_XDECREF(__pyx_t_69);
+ __Pyx_XDECREF(__pyx_t_70);
+ __Pyx_XDECREF(__pyx_t_71);
+ __Pyx_XDECREF(__pyx_t_72);
+ __Pyx_XDECREF(__pyx_t_73);
+ __Pyx_XDECREF(__pyx_t_74);
+ __Pyx_XDECREF(__pyx_t_75);
+ __Pyx_XDECREF(__pyx_t_76);
+ __Pyx_XDECREF(__pyx_t_77);
+ __Pyx_XDECREF(__pyx_t_78);
+ if (__pyx_m) {
+ if (__pyx_d) {
+ __Pyx_AddTraceback("init aiohttp._http_parser", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ }
+ Py_CLEAR(__pyx_m);
+ } else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_ImportError, "init aiohttp._http_parser");
+ }
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ return (__pyx_m != NULL) ? 0 : -1;
+ #elif PY_MAJOR_VERSION >= 3
+ return __pyx_m;
+ #else
+ return;
+ #endif
+}
+
+/* --- Runtime support code --- */
+/* Refnanny */
+#if CYTHON_REFNANNY
+static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
+ PyObject *m = NULL, *p = NULL;
+ void *r = NULL;
+ m = PyImport_ImportModule(modname);
+ if (!m) goto end;
+ p = PyObject_GetAttrString(m, "RefNannyAPI");
+ if (!p) goto end;
+ r = PyLong_AsVoidPtr(p);
+end:
+ Py_XDECREF(p);
+ Py_XDECREF(m);
+ return (__Pyx_RefNannyAPIStruct *)r;
+}
+#endif
+
+/* PyObjectGetAttrStr */
+#if CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
+ PyTypeObject* tp = Py_TYPE(obj);
+ if (likely(tp->tp_getattro))
+ return tp->tp_getattro(obj, attr_name);
+#if PY_MAJOR_VERSION < 3
+ if (likely(tp->tp_getattr))
+ return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
+#endif
+ return PyObject_GetAttr(obj, attr_name);
+}
+#endif
+
+/* GetBuiltinName */
+static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
+ PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
+ if (unlikely(!result)) {
+ PyErr_Format(PyExc_NameError,
+#if PY_MAJOR_VERSION >= 3
+ "name '%U' is not defined", name);
+#else
+ "name '%.200s' is not defined", PyString_AS_STRING(name));
+#endif
+ }
+ return result;
+}
+
+/* GetItemInt */
+static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
+ PyObject *r;
+ if (!j) return NULL;
+ r = PyObject_GetItem(o, j);
+ Py_DECREF(j);
+ return r;
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
+ CYTHON_NCP_UNUSED int wraparound,
+ CYTHON_NCP_UNUSED int boundscheck) {
+#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ Py_ssize_t wrapped_i = i;
+ if (wraparound & unlikely(i < 0)) {
+ wrapped_i += PyList_GET_SIZE(o);
+ }
+ if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
+ PyObject *r = PyList_GET_ITEM(o, wrapped_i);
+ Py_INCREF(r);
+ return r;
+ }
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+#else
+ return PySequence_GetItem(o, i);
+#endif
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
+ CYTHON_NCP_UNUSED int wraparound,
+ CYTHON_NCP_UNUSED int boundscheck) {
+#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ Py_ssize_t wrapped_i = i;
+ if (wraparound & unlikely(i < 0)) {
+ wrapped_i += PyTuple_GET_SIZE(o);
+ }
+ if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
+ PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
+ Py_INCREF(r);
+ return r;
+ }
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+#else
+ return PySequence_GetItem(o, i);
+#endif
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
+ CYTHON_NCP_UNUSED int wraparound,
+ CYTHON_NCP_UNUSED int boundscheck) {
+#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
+ if (is_list || PyList_CheckExact(o)) {
+ Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
+ if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
+ PyObject *r = PyList_GET_ITEM(o, n);
+ Py_INCREF(r);
+ return r;
+ }
+ }
+ else if (PyTuple_CheckExact(o)) {
+ Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
+ if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
+ PyObject *r = PyTuple_GET_ITEM(o, n);
+ Py_INCREF(r);
+ return r;
+ }
+ } else {
+ PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
+ if (likely(m && m->sq_item)) {
+ if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
+ Py_ssize_t l = m->sq_length(o);
+ if (likely(l >= 0)) {
+ i += l;
+ } else {
+ if (!PyErr_ExceptionMatches(PyExc_OverflowError))
+ return NULL;
+ PyErr_Clear();
+ }
+ }
+ return m->sq_item(o, i);
+ }
+ }
+#else
+ if (is_list || PySequence_Check(o)) {
+ return PySequence_GetItem(o, i);
+ }
+#endif
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+}
+
+/* decode_c_bytes */
+static CYTHON_INLINE PyObject* __Pyx_decode_c_bytes(
+ const char* cstring, Py_ssize_t length, Py_ssize_t start, Py_ssize_t stop,
+ const char* encoding, const char* errors,
+ PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
+ if (unlikely((start < 0) | (stop < 0))) {
+ if (start < 0) {
+ start += length;
+ if (start < 0)
+ start = 0;
+ }
+ if (stop < 0)
+ stop += length;
+ }
+ if (stop > length)
+ stop = length;
+ if (unlikely(stop <= start))
+ return __Pyx_NewRef(__pyx_empty_unicode);
+ length = stop - start;
+ cstring += start;
+ if (decode_func) {
+ return decode_func(cstring, length, errors);
+ } else {
+ return PyUnicode_Decode(cstring, length, encoding, errors);
+ }
+}
+
+/* RaiseArgTupleInvalid */
+static void __Pyx_RaiseArgtupleInvalid(
+ const char* func_name,
+ int exact,
+ Py_ssize_t num_min,
+ Py_ssize_t num_max,
+ Py_ssize_t num_found)
+{
+ Py_ssize_t num_expected;
+ const char *more_or_less;
+ if (num_found < num_min) {
+ num_expected = num_min;
+ more_or_less = "at least";
+ } else {
+ num_expected = num_max;
+ more_or_less = "at most";
+ }
+ if (exact) {
+ more_or_less = "exactly";
+ }
+ PyErr_Format(PyExc_TypeError,
+ "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
+ func_name, more_or_less, num_expected,
+ (num_expected == 1) ? "" : "s", num_found);
+}
+
+/* RaiseDoubleKeywords */
+static void __Pyx_RaiseDoubleKeywordsError(
+ const char* func_name,
+ PyObject* kw_name)
+{
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION >= 3
+ "%s() got multiple values for keyword argument '%U'", func_name, kw_name);
+ #else
+ "%s() got multiple values for keyword argument '%s'", func_name,
+ PyString_AsString(kw_name));
+ #endif
+}
+
+/* ParseKeywords */
+static int __Pyx_ParseOptionalKeywords(
+ PyObject *kwds,
+ PyObject **argnames[],
+ PyObject *kwds2,
+ PyObject *values[],
+ Py_ssize_t num_pos_args,
+ const char* function_name)
+{
+ PyObject *key = 0, *value = 0;
+ Py_ssize_t pos = 0;
+ PyObject*** name;
+ PyObject*** first_kw_arg = argnames + num_pos_args;
+ while (PyDict_Next(kwds, &pos, &key, &value)) {
+ name = first_kw_arg;
+ while (*name && (**name != key)) name++;
+ if (*name) {
+ values[name-argnames] = value;
+ continue;
+ }
+ name = first_kw_arg;
+ #if PY_MAJOR_VERSION < 3
+ if (likely(PyString_Check(key))) {
+ while (*name) {
+ if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
+ && _PyString_Eq(**name, key)) {
+ values[name-argnames] = value;
+ break;
+ }
+ name++;
+ }
+ if (*name) continue;
+ else {
+ PyObject*** argname = argnames;
+ while (argname != first_kw_arg) {
+ if ((**argname == key) || (
+ (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
+ && _PyString_Eq(**argname, key))) {
+ goto arg_passed_twice;
+ }
+ argname++;
+ }
+ }
+ } else
+ #endif
+ if (likely(PyUnicode_Check(key))) {
+ while (*name) {
+ int cmp = (**name == key) ? 0 :
+ #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
+ (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
+ #endif
+ PyUnicode_Compare(**name, key);
+ if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
+ if (cmp == 0) {
+ values[name-argnames] = value;
+ break;
+ }
+ name++;
+ }
+ if (*name) continue;
+ else {
+ PyObject*** argname = argnames;
+ while (argname != first_kw_arg) {
+ int cmp = (**argname == key) ? 0 :
+ #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
+ (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
+ #endif
+ PyUnicode_Compare(**argname, key);
+ if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
+ if (cmp == 0) goto arg_passed_twice;
+ argname++;
+ }
+ }
+ } else
+ goto invalid_keyword_type;
+ if (kwds2) {
+ if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
+ } else {
+ goto invalid_keyword;
+ }
+ }
+ return 0;
+arg_passed_twice:
+ __Pyx_RaiseDoubleKeywordsError(function_name, key);
+ goto bad;
+invalid_keyword_type:
+ PyErr_Format(PyExc_TypeError,
+ "%.200s() keywords must be strings", function_name);
+ goto bad;
+invalid_keyword:
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION < 3
+ "%.200s() got an unexpected keyword argument '%.200s'",
+ function_name, PyString_AsString(key));
+ #else
+ "%s() got an unexpected keyword argument '%U'",
+ function_name, key);
+ #endif
+bad:
+ return -1;
+}
+
+/* None */
+static CYTHON_INLINE void __Pyx_RaiseClosureNameError(const char *varname) {
+ PyErr_Format(PyExc_NameError, "free variable '%s' referenced before assignment in enclosing scope", varname);
+}
+
+/* RaiseTooManyValuesToUnpack */
+static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
+ PyErr_Format(PyExc_ValueError,
+ "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
+}
+
+/* RaiseNeedMoreValuesToUnpack */
+static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
+ PyErr_Format(PyExc_ValueError,
+ "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
+ index, (index == 1) ? "" : "s");
+}
+
+/* IterFinish */
+static CYTHON_INLINE int __Pyx_IterFinish(void) {
+#if CYTHON_FAST_THREAD_STATE
+ PyThreadState *tstate = __Pyx_PyThreadState_Current;
+ PyObject* exc_type = tstate->curexc_type;
+ if (unlikely(exc_type)) {
+ if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) {
+ PyObject *exc_value, *exc_tb;
+ exc_value = tstate->curexc_value;
+ exc_tb = tstate->curexc_traceback;
+ tstate->curexc_type = 0;
+ tstate->curexc_value = 0;
+ tstate->curexc_traceback = 0;
+ Py_DECREF(exc_type);
+ Py_XDECREF(exc_value);
+ Py_XDECREF(exc_tb);
+ return 0;
+ } else {
+ return -1;
+ }
+ }
+ return 0;
+#else
+ if (unlikely(PyErr_Occurred())) {
+ if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) {
+ PyErr_Clear();
+ return 0;
+ } else {
+ return -1;
+ }
+ }
+ return 0;
+#endif
+}
+
+/* UnpackItemEndCheck */
+static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) {
+ if (unlikely(retval)) {
+ Py_DECREF(retval);
+ __Pyx_RaiseTooManyValuesError(expected);
+ return -1;
+ } else {
+ return __Pyx_IterFinish();
+ }
+ return 0;
+}
+
+/* KeywordStringCheck */
+static int __Pyx_CheckKeywordStrings(
+ PyObject *kwdict,
+ const char* function_name,
+ int kw_allowed)
+{
+ PyObject* key = 0;
+ Py_ssize_t pos = 0;
+#if CYTHON_COMPILING_IN_PYPY
+ if (!kw_allowed && PyDict_Next(kwdict, &pos, &key, 0))
+ goto invalid_keyword;
+ return 1;
+#else
+ while (PyDict_Next(kwdict, &pos, &key, 0)) {
+ #if PY_MAJOR_VERSION < 3
+ if (unlikely(!PyString_Check(key)))
+ #endif
+ if (unlikely(!PyUnicode_Check(key)))
+ goto invalid_keyword_type;
+ }
+ if ((!kw_allowed) && unlikely(key))
+ goto invalid_keyword;
+ return 1;
+invalid_keyword_type:
+ PyErr_Format(PyExc_TypeError,
+ "%.200s() keywords must be strings", function_name);
+ return 0;
+#endif
+invalid_keyword:
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION < 3
+ "%.200s() got an unexpected keyword argument '%.200s'",
+ function_name, PyString_AsString(key));
+ #else
+ "%s() got an unexpected keyword argument '%U'",
+ function_name, key);
+ #endif
+ return 0;
+}
+
+/* ExtTypeTest */
+static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
+ if (unlikely(!type)) {
+ PyErr_SetString(PyExc_SystemError, "Missing type object");
+ return 0;
+ }
+ if (likely(__Pyx_TypeCheck(obj, type)))
+ return 1;
+ PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
+ Py_TYPE(obj)->tp_name, type->tp_name);
+ return 0;
+}
+
+/* DictGetItem */
+#if PY_MAJOR_VERSION >= 3 && !CYTHON_COMPILING_IN_PYPY
+static PyObject *__Pyx_PyDict_GetItem(PyObject *d, PyObject* key) {
+ PyObject *value;
+ value = PyDict_GetItemWithError(d, key);
+ if (unlikely(!value)) {
+ if (!PyErr_Occurred()) {
+ if (unlikely(PyTuple_Check(key))) {
+ PyObject* args = PyTuple_Pack(1, key);
+ if (likely(args)) {
+ PyErr_SetObject(PyExc_KeyError, args);
+ Py_DECREF(args);
+ }
+ } else {
+ PyErr_SetObject(PyExc_KeyError, key);
+ }
+ }
+ return NULL;
+ }
+ Py_INCREF(value);
+ return value;
+}
+#endif
+
+/* PyErrExceptionMatches */
+#if CYTHON_FAST_THREAD_STATE
+static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
+ Py_ssize_t i, n;
+ n = PyTuple_GET_SIZE(tuple);
+#if PY_MAJOR_VERSION >= 3
+ for (i=0; i<n; i++) {
+ if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
+ }
+#endif
+ for (i=0; i<n; i++) {
+ if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
+ }
+ return 0;
+}
+static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
+ PyObject *exc_type = tstate->curexc_type;
+ if (exc_type == err) return 1;
+ if (unlikely(!exc_type)) return 0;
+ if (unlikely(PyTuple_Check(err)))
+ return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
+ return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
+}
+#endif
+
+/* PyErrFetchRestore */
+#if CYTHON_FAST_THREAD_STATE
+static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ tmp_type = tstate->curexc_type;
+ tmp_value = tstate->curexc_value;
+ tmp_tb = tstate->curexc_traceback;
+ tstate->curexc_type = type;
+ tstate->curexc_value = value;
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+}
+static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
+ *type = tstate->curexc_type;
+ *value = tstate->curexc_value;
+ *tb = tstate->curexc_traceback;
+ tstate->curexc_type = 0;
+ tstate->curexc_value = 0;
+ tstate->curexc_traceback = 0;
+}
+#endif
+
+/* GetAttr */
+static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
+#if CYTHON_USE_TYPE_SLOTS
+#if PY_MAJOR_VERSION >= 3
+ if (likely(PyUnicode_Check(n)))
+#else
+ if (likely(PyString_Check(n)))
+#endif
+ return __Pyx_PyObject_GetAttrStr(o, n);
+#endif
+ return PyObject_GetAttr(o, n);
+}
+
+/* GetAttr3 */
+static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
+ return NULL;
+ __Pyx_PyErr_Clear();
+ Py_INCREF(d);
+ return d;
+}
+static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
+ PyObject *r = __Pyx_GetAttr(o, n);
+ return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
+}
+
+/* PyDictVersioning */
+#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
+ PyObject *dict = Py_TYPE(obj)->tp_dict;
+ return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
+}
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
+ PyObject **dictptr = NULL;
+ Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
+ if (offset) {
+#if CYTHON_COMPILING_IN_CPYTHON
+ dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
+#else
+ dictptr = _PyObject_GetDictPtr(obj);
+#endif
+ }
+ return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
+}
+static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
+ PyObject *dict = Py_TYPE(obj)->tp_dict;
+ if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
+ return 0;
+ return obj_dict_version == __Pyx_get_object_dict_version(obj);
+}
+#endif
+
+/* GetModuleGlobalName */
+#if CYTHON_USE_DICT_VERSIONS
+static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
+#else
+static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
+#endif
+{
+ PyObject *result;
+#if !CYTHON_AVOID_BORROWED_REFS
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
+ result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
+ __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
+ if (likely(result)) {
+ return __Pyx_NewRef(result);
+ } else if (unlikely(PyErr_Occurred())) {
+ return NULL;
+ }
+#else
+ result = PyDict_GetItem(__pyx_d, name);
+ __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
+ if (likely(result)) {
+ return __Pyx_NewRef(result);
+ }
+#endif
+#else
+ result = PyObject_GetItem(__pyx_d, name);
+ __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
+ if (likely(result)) {
+ return __Pyx_NewRef(result);
+ }
+ PyErr_Clear();
+#endif
+ return __Pyx_GetBuiltinName(name);
+}
+
+/* PyFunctionFastCall */
+#if CYTHON_FAST_PYCALL
+static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
+ PyObject *globals) {
+ PyFrameObject *f;
+ PyThreadState *tstate = __Pyx_PyThreadState_Current;
+ PyObject **fastlocals;
+ Py_ssize_t i;
+ PyObject *result;
+ assert(globals != NULL);
+ /* XXX Perhaps we should create a specialized
+ PyFrame_New() that doesn't take locals, but does
+ take builtins without sanity checking them.
+ */
+ assert(tstate != NULL);
+ f = PyFrame_New(tstate, co, globals, NULL);
+ if (f == NULL) {
+ return NULL;
+ }
+ fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
+ for (i = 0; i < na; i++) {
+ Py_INCREF(*args);
+ fastlocals[i] = *args++;
+ }
+ result = PyEval_EvalFrameEx(f,0);
+ ++tstate->recursion_depth;
+ Py_DECREF(f);
+ --tstate->recursion_depth;
+ return result;
+}
+#if 1 || PY_VERSION_HEX < 0x030600B1
+static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
+ PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
+ PyObject *globals = PyFunction_GET_GLOBALS(func);
+ PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
+ PyObject *closure;
+#if PY_MAJOR_VERSION >= 3
+ PyObject *kwdefs;
+#endif
+ PyObject *kwtuple, **k;
+ PyObject **d;
+ Py_ssize_t nd;
+ Py_ssize_t nk;
+ PyObject *result;
+ assert(kwargs == NULL || PyDict_Check(kwargs));
+ nk = kwargs ? PyDict_Size(kwargs) : 0;
+ if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
+ return NULL;
+ }
+ if (
+#if PY_MAJOR_VERSION >= 3
+ co->co_kwonlyargcount == 0 &&
+#endif
+ likely(kwargs == NULL || nk == 0) &&
+ co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
+ if (argdefs == NULL && co->co_argcount == nargs) {
+ result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
+ goto done;
+ }
+ else if (nargs == 0 && argdefs != NULL
+ && co->co_argcount == Py_SIZE(argdefs)) {
+ /* function called with no arguments, but all parameters have
+ a default value: use default values as arguments .*/
+ args = &PyTuple_GET_ITEM(argdefs, 0);
+ result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
+ goto done;
+ }
+ }
+ if (kwargs != NULL) {
+ Py_ssize_t pos, i;
+ kwtuple = PyTuple_New(2 * nk);
+ if (kwtuple == NULL) {
+ result = NULL;
+ goto done;
+ }
+ k = &PyTuple_GET_ITEM(kwtuple, 0);
+ pos = i = 0;
+ while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
+ Py_INCREF(k[i]);
+ Py_INCREF(k[i+1]);
+ i += 2;
+ }
+ nk = i / 2;
+ }
+ else {
+ kwtuple = NULL;
+ k = NULL;
+ }
+ closure = PyFunction_GET_CLOSURE(func);
+#if PY_MAJOR_VERSION >= 3
+ kwdefs = PyFunction_GET_KW_DEFAULTS(func);
+#endif
+ if (argdefs != NULL) {
+ d = &PyTuple_GET_ITEM(argdefs, 0);
+ nd = Py_SIZE(argdefs);
+ }
+ else {
+ d = NULL;
+ nd = 0;
+ }
+#if PY_MAJOR_VERSION >= 3
+ result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
+ args, (int)nargs,
+ k, (int)nk,
+ d, (int)nd, kwdefs, closure);
+#else
+ result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
+ args, (int)nargs,
+ k, (int)nk,
+ d, (int)nd, closure);
+#endif
+ Py_XDECREF(kwtuple);
+done:
+ Py_LeaveRecursiveCall();
+ return result;
+}
+#endif
+#endif
+
+/* PyObjectCall */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
+ PyObject *result;
+ ternaryfunc call = func->ob_type->tp_call;
+ if (unlikely(!call))
+ return PyObject_Call(func, arg, kw);
+ if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
+ return NULL;
+ result = (*call)(func, arg, kw);
+ Py_LeaveRecursiveCall();
+ if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
+ PyErr_SetString(
+ PyExc_SystemError,
+ "NULL result without error in PyObject_Call");
+ }
+ return result;
+}
+#endif
+
+/* PyObjectCallMethO */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
+ PyObject *self, *result;
+ PyCFunction cfunc;
+ cfunc = PyCFunction_GET_FUNCTION(func);
+ self = PyCFunction_GET_SELF(func);
+ if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
+ return NULL;
+ result = cfunc(self, arg);
+ Py_LeaveRecursiveCall();
+ if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
+ PyErr_SetString(
+ PyExc_SystemError,
+ "NULL result without error in PyObject_Call");
+ }
+ return result;
+}
+#endif
+
+/* PyObjectCallNoArg */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
+#if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(func)) {
+ return __Pyx_PyFunction_FastCall(func, NULL, 0);
+ }
+#endif
+#ifdef __Pyx_CyFunction_USED
+ if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func)))
+#else
+ if (likely(PyCFunction_Check(func)))
+#endif
+ {
+ if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
+ return __Pyx_PyObject_CallMethO(func, NULL);
+ }
+ }
+ return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
+}
+#endif
+
+/* PyCFunctionFastCall */
+#if CYTHON_FAST_PYCCALL
+static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
+ PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
+ PyCFunction meth = PyCFunction_GET_FUNCTION(func);
+ PyObject *self = PyCFunction_GET_SELF(func);
+ int flags = PyCFunction_GET_FLAGS(func);
+ assert(PyCFunction_Check(func));
+ assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
+ assert(nargs >= 0);
+ assert(nargs == 0 || args != NULL);
+ /* _PyCFunction_FastCallDict() must not be called with an exception set,
+ because it may clear it (directly or indirectly) and so the
+ caller loses its exception */
+ assert(!PyErr_Occurred());
+ if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
+ return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
+ } else {
+ return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
+ }
+}
+#endif
+
+/* PyObjectCallOneArg */
+#if CYTHON_COMPILING_IN_CPYTHON
+static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+ PyObject *result;
+ PyObject *args = PyTuple_New(1);
+ if (unlikely(!args)) return NULL;
+ Py_INCREF(arg);
+ PyTuple_SET_ITEM(args, 0, arg);
+ result = __Pyx_PyObject_Call(func, args, NULL);
+ Py_DECREF(args);
+ return result;
+}
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+#if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(func)) {
+ return __Pyx_PyFunction_FastCall(func, &arg, 1);
+ }
+#endif
+ if (likely(PyCFunction_Check(func))) {
+ if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
+ return __Pyx_PyObject_CallMethO(func, arg);
+#if CYTHON_FAST_PYCCALL
+ } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
+ return __Pyx_PyCFunction_FastCall(func, &arg, 1);
+#endif
+ }
+ }
+ return __Pyx__PyObject_CallOneArg(func, arg);
+}
+#else
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+ PyObject *result;
+ PyObject *args = PyTuple_Pack(1, arg);
+ if (unlikely(!args)) return NULL;
+ result = __Pyx_PyObject_Call(func, args, NULL);
+ Py_DECREF(args);
+ return result;
+}
+#endif
+
+/* PyObjectCall2Args */
+static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
+ PyObject *args, *result = NULL;
+ #if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(function)) {
+ PyObject *args[2] = {arg1, arg2};
+ return __Pyx_PyFunction_FastCall(function, args, 2);
+ }
+ #endif
+ #if CYTHON_FAST_PYCCALL
+ if (__Pyx_PyFastCFunction_Check(function)) {
+ PyObject *args[2] = {arg1, arg2};
+ return __Pyx_PyCFunction_FastCall(function, args, 2);
+ }
+ #endif
+ args = PyTuple_New(2);
+ if (unlikely(!args)) goto done;
+ Py_INCREF(arg1);
+ PyTuple_SET_ITEM(args, 0, arg1);
+ Py_INCREF(arg2);
+ PyTuple_SET_ITEM(args, 1, arg2);
+ Py_INCREF(function);
+ result = __Pyx_PyObject_Call(function, args, NULL);
+ Py_DECREF(args);
+ Py_DECREF(function);
+done:
+ return result;
+}
+
+/* RaiseException */
+#if PY_MAJOR_VERSION < 3
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
+ CYTHON_UNUSED PyObject *cause) {
+ __Pyx_PyThreadState_declare
+ Py_XINCREF(type);
+ if (!value || value == Py_None)
+ value = NULL;
+ else
+ Py_INCREF(value);
+ if (!tb || tb == Py_None)
+ tb = NULL;
+ else {
+ Py_INCREF(tb);
+ if (!PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto raise_error;
+ }
+ }
+ if (PyType_Check(type)) {
+#if CYTHON_COMPILING_IN_PYPY
+ if (!value) {
+ Py_INCREF(Py_None);
+ value = Py_None;
+ }
+#endif
+ PyErr_NormalizeException(&type, &value, &tb);
+ } else {
+ if (value) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto raise_error;
+ }
+ value = type;
+ type = (PyObject*) Py_TYPE(type);
+ Py_INCREF(type);
+ if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto raise_error;
+ }
+ }
+ __Pyx_PyThreadState_assign
+ __Pyx_ErrRestore(type, value, tb);
+ return;
+raise_error:
+ Py_XDECREF(value);
+ Py_XDECREF(type);
+ Py_XDECREF(tb);
+ return;
+}
+#else
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
+ PyObject* owned_instance = NULL;
+ if (tb == Py_None) {
+ tb = 0;
+ } else if (tb && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto bad;
+ }
+ if (value == Py_None)
+ value = 0;
+ if (PyExceptionInstance_Check(type)) {
+ if (value) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto bad;
+ }
+ value = type;
+ type = (PyObject*) Py_TYPE(value);
+ } else if (PyExceptionClass_Check(type)) {
+ PyObject *instance_class = NULL;
+ if (value && PyExceptionInstance_Check(value)) {
+ instance_class = (PyObject*) Py_TYPE(value);
+ if (instance_class != type) {
+ int is_subclass = PyObject_IsSubclass(instance_class, type);
+ if (!is_subclass) {
+ instance_class = NULL;
+ } else if (unlikely(is_subclass == -1)) {
+ goto bad;
+ } else {
+ type = instance_class;
+ }
+ }
+ }
+ if (!instance_class) {
+ PyObject *args;
+ if (!value)
+ args = PyTuple_New(0);
+ else if (PyTuple_Check(value)) {
+ Py_INCREF(value);
+ args = value;
+ } else
+ args = PyTuple_Pack(1, value);
+ if (!args)
+ goto bad;
+ owned_instance = PyObject_Call(type, args, NULL);
+ Py_DECREF(args);
+ if (!owned_instance)
+ goto bad;
+ value = owned_instance;
+ if (!PyExceptionInstance_Check(value)) {
+ PyErr_Format(PyExc_TypeError,
+ "calling %R should have returned an instance of "
+ "BaseException, not %R",
+ type, Py_TYPE(value));
+ goto bad;
+ }
+ }
+ } else {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto bad;
+ }
+ if (cause) {
+ PyObject *fixed_cause;
+ if (cause == Py_None) {
+ fixed_cause = NULL;
+ } else if (PyExceptionClass_Check(cause)) {
+ fixed_cause = PyObject_CallObject(cause, NULL);
+ if (fixed_cause == NULL)
+ goto bad;
+ } else if (PyExceptionInstance_Check(cause)) {
+ fixed_cause = cause;
+ Py_INCREF(fixed_cause);
+ } else {
+ PyErr_SetString(PyExc_TypeError,
+ "exception causes must derive from "
+ "BaseException");
+ goto bad;
+ }
+ PyException_SetCause(value, fixed_cause);
+ }
+ PyErr_SetObject(type, value);
+ if (tb) {
+#if CYTHON_COMPILING_IN_PYPY
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
+ Py_INCREF(tb);
+ PyErr_Restore(tmp_type, tmp_value, tb);
+ Py_XDECREF(tmp_tb);
+#else
+ PyThreadState *tstate = __Pyx_PyThreadState_Current;
+ PyObject* tmp_tb = tstate->curexc_traceback;
+ if (tb != tmp_tb) {
+ Py_INCREF(tb);
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_tb);
+ }
+#endif
+ }
+bad:
+ Py_XDECREF(owned_instance);
+ return;
+}
+#endif
+
+/* BytesEquals */
+static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
+#if CYTHON_COMPILING_IN_PYPY
+ return PyObject_RichCompareBool(s1, s2, equals);
+#else
+ if (s1 == s2) {
+ return (equals == Py_EQ);
+ } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
+ const char *ps1, *ps2;
+ Py_ssize_t length = PyBytes_GET_SIZE(s1);
+ if (length != PyBytes_GET_SIZE(s2))
+ return (equals == Py_NE);
+ ps1 = PyBytes_AS_STRING(s1);
+ ps2 = PyBytes_AS_STRING(s2);
+ if (ps1[0] != ps2[0]) {
+ return (equals == Py_NE);
+ } else if (length == 1) {
+ return (equals == Py_EQ);
+ } else {
+ int result;
+#if CYTHON_USE_UNICODE_INTERNALS
+ Py_hash_t hash1, hash2;
+ hash1 = ((PyBytesObject*)s1)->ob_shash;
+ hash2 = ((PyBytesObject*)s2)->ob_shash;
+ if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
+ return (equals == Py_NE);
+ }
+#endif
+ result = memcmp(ps1, ps2, (size_t)length);
+ return (equals == Py_EQ) ? (result == 0) : (result != 0);
+ }
+ } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
+ return (equals == Py_NE);
+ } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
+ return (equals == Py_NE);
+ } else {
+ int result;
+ PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
+ if (!py_result)
+ return -1;
+ result = __Pyx_PyObject_IsTrue(py_result);
+ Py_DECREF(py_result);
+ return result;
+ }
+#endif
+}
+
+/* UnicodeEquals */
+static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
+#if CYTHON_COMPILING_IN_PYPY
+ return PyObject_RichCompareBool(s1, s2, equals);
+#else
+#if PY_MAJOR_VERSION < 3
+ PyObject* owned_ref = NULL;
+#endif
+ int s1_is_unicode, s2_is_unicode;
+ if (s1 == s2) {
+ goto return_eq;
+ }
+ s1_is_unicode = PyUnicode_CheckExact(s1);
+ s2_is_unicode = PyUnicode_CheckExact(s2);
+#if PY_MAJOR_VERSION < 3
+ if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
+ owned_ref = PyUnicode_FromObject(s2);
+ if (unlikely(!owned_ref))
+ return -1;
+ s2 = owned_ref;
+ s2_is_unicode = 1;
+ } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
+ owned_ref = PyUnicode_FromObject(s1);
+ if (unlikely(!owned_ref))
+ return -1;
+ s1 = owned_ref;
+ s1_is_unicode = 1;
+ } else if (((!s2_is_unicode) & (!s1_is_unicode))) {
+ return __Pyx_PyBytes_Equals(s1, s2, equals);
+ }
+#endif
+ if (s1_is_unicode & s2_is_unicode) {
+ Py_ssize_t length;
+ int kind;
+ void *data1, *data2;
+ if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
+ return -1;
+ length = __Pyx_PyUnicode_GET_LENGTH(s1);
+ if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
+ goto return_ne;
+ }
+#if CYTHON_USE_UNICODE_INTERNALS
+ {
+ Py_hash_t hash1, hash2;
+ #if CYTHON_PEP393_ENABLED
+ hash1 = ((PyASCIIObject*)s1)->hash;
+ hash2 = ((PyASCIIObject*)s2)->hash;
+ #else
+ hash1 = ((PyUnicodeObject*)s1)->hash;
+ hash2 = ((PyUnicodeObject*)s2)->hash;
+ #endif
+ if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
+ goto return_ne;
+ }
+ }
+#endif
+ kind = __Pyx_PyUnicode_KIND(s1);
+ if (kind != __Pyx_PyUnicode_KIND(s2)) {
+ goto return_ne;
+ }
+ data1 = __Pyx_PyUnicode_DATA(s1);
+ data2 = __Pyx_PyUnicode_DATA(s2);
+ if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
+ goto return_ne;
+ } else if (length == 1) {
+ goto return_eq;
+ } else {
+ int result = memcmp(data1, data2, (size_t)(length * kind));
+ #if PY_MAJOR_VERSION < 3
+ Py_XDECREF(owned_ref);
+ #endif
+ return (equals == Py_EQ) ? (result == 0) : (result != 0);
+ }
+ } else if ((s1 == Py_None) & s2_is_unicode) {
+ goto return_ne;
+ } else if ((s2 == Py_None) & s1_is_unicode) {
+ goto return_ne;
+ } else {
+ int result;
+ PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
+ #if PY_MAJOR_VERSION < 3
+ Py_XDECREF(owned_ref);
+ #endif
+ if (!py_result)
+ return -1;
+ result = __Pyx_PyObject_IsTrue(py_result);
+ Py_DECREF(py_result);
+ return result;
+ }
+return_eq:
+ #if PY_MAJOR_VERSION < 3
+ Py_XDECREF(owned_ref);
+ #endif
+ return (equals == Py_EQ);
+return_ne:
+ #if PY_MAJOR_VERSION < 3
+ Py_XDECREF(owned_ref);
+ #endif
+ return (equals == Py_NE);
+#endif
+}
+
+/* SliceObject */
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj,
+ Py_ssize_t cstart, Py_ssize_t cstop,
+ PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice,
+ int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) {
+#if CYTHON_USE_TYPE_SLOTS
+ PyMappingMethods* mp;
+#if PY_MAJOR_VERSION < 3
+ PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence;
+ if (likely(ms && ms->sq_slice)) {
+ if (!has_cstart) {
+ if (_py_start && (*_py_start != Py_None)) {
+ cstart = __Pyx_PyIndex_AsSsize_t(*_py_start);
+ if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
+ } else
+ cstart = 0;
+ }
+ if (!has_cstop) {
+ if (_py_stop && (*_py_stop != Py_None)) {
+ cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop);
+ if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
+ } else
+ cstop = PY_SSIZE_T_MAX;
+ }
+ if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) {
+ Py_ssize_t l = ms->sq_length(obj);
+ if (likely(l >= 0)) {
+ if (cstop < 0) {
+ cstop += l;
+ if (cstop < 0) cstop = 0;
+ }
+ if (cstart < 0) {
+ cstart += l;
+ if (cstart < 0) cstart = 0;
+ }
+ } else {
+ if (!PyErr_ExceptionMatches(PyExc_OverflowError))
+ goto bad;
+ PyErr_Clear();
+ }
+ }
+ return ms->sq_slice(obj, cstart, cstop);
+ }
+#endif
+ mp = Py_TYPE(obj)->tp_as_mapping;
+ if (likely(mp && mp->mp_subscript))
+#endif
+ {
+ PyObject* result;
+ PyObject *py_slice, *py_start, *py_stop;
+ if (_py_slice) {
+ py_slice = *_py_slice;
+ } else {
+ PyObject* owned_start = NULL;
+ PyObject* owned_stop = NULL;
+ if (_py_start) {
+ py_start = *_py_start;
+ } else {
+ if (has_cstart) {
+ owned_start = py_start = PyInt_FromSsize_t(cstart);
+ if (unlikely(!py_start)) goto bad;
+ } else
+ py_start = Py_None;
+ }
+ if (_py_stop) {
+ py_stop = *_py_stop;
+ } else {
+ if (has_cstop) {
+ owned_stop = py_stop = PyInt_FromSsize_t(cstop);
+ if (unlikely(!py_stop)) {
+ Py_XDECREF(owned_start);
+ goto bad;
+ }
+ } else
+ py_stop = Py_None;
+ }
+ py_slice = PySlice_New(py_start, py_stop, Py_None);
+ Py_XDECREF(owned_start);
+ Py_XDECREF(owned_stop);
+ if (unlikely(!py_slice)) goto bad;
+ }
+#if CYTHON_USE_TYPE_SLOTS
+ result = mp->mp_subscript(obj, py_slice);
+#else
+ result = PyObject_GetItem(obj, py_slice);
+#endif
+ if (!_py_slice) {
+ Py_DECREF(py_slice);
+ }
+ return result;
+ }
+ PyErr_Format(PyExc_TypeError,
+ "'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name);
+bad:
+ return NULL;
+}
+
+/* GetException */
+#if CYTHON_FAST_THREAD_STATE
+static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
+#else
+static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
+#endif
+{
+ PyObject *local_type, *local_value, *local_tb;
+#if CYTHON_FAST_THREAD_STATE
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ local_type = tstate->curexc_type;
+ local_value = tstate->curexc_value;
+ local_tb = tstate->curexc_traceback;
+ tstate->curexc_type = 0;
+ tstate->curexc_value = 0;
+ tstate->curexc_traceback = 0;
+#else
+ PyErr_Fetch(&local_type, &local_value, &local_tb);
+#endif
+ PyErr_NormalizeException(&local_type, &local_value, &local_tb);
+#if CYTHON_FAST_THREAD_STATE
+ if (unlikely(tstate->curexc_type))
+#else
+ if (unlikely(PyErr_Occurred()))
+#endif
+ goto bad;
+ #if PY_MAJOR_VERSION >= 3
+ if (local_tb) {
+ if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
+ goto bad;
+ }
+ #endif
+ Py_XINCREF(local_tb);
+ Py_XINCREF(local_type);
+ Py_XINCREF(local_value);
+ *type = local_type;
+ *value = local_value;
+ *tb = local_tb;
+#if CYTHON_FAST_THREAD_STATE
+ #if CYTHON_USE_EXC_INFO_STACK
+ {
+ _PyErr_StackItem *exc_info = tstate->exc_info;
+ tmp_type = exc_info->exc_type;
+ tmp_value = exc_info->exc_value;
+ tmp_tb = exc_info->exc_traceback;
+ exc_info->exc_type = local_type;
+ exc_info->exc_value = local_value;
+ exc_info->exc_traceback = local_tb;
+ }
+ #else
+ tmp_type = tstate->exc_type;
+ tmp_value = tstate->exc_value;
+ tmp_tb = tstate->exc_traceback;
+ tstate->exc_type = local_type;
+ tstate->exc_value = local_value;
+ tstate->exc_traceback = local_tb;
+ #endif
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+#else
+ PyErr_SetExcInfo(local_type, local_value, local_tb);
+#endif
+ return 0;
+bad:
+ *type = 0;
+ *value = 0;
+ *tb = 0;
+ Py_XDECREF(local_type);
+ Py_XDECREF(local_value);
+ Py_XDECREF(local_tb);
+ return -1;
+}
+
+/* SwapException */
+#if CYTHON_FAST_THREAD_STATE
+static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ #if CYTHON_USE_EXC_INFO_STACK
+ _PyErr_StackItem *exc_info = tstate->exc_info;
+ tmp_type = exc_info->exc_type;
+ tmp_value = exc_info->exc_value;
+ tmp_tb = exc_info->exc_traceback;
+ exc_info->exc_type = *type;
+ exc_info->exc_value = *value;
+ exc_info->exc_traceback = *tb;
+ #else
+ tmp_type = tstate->exc_type;
+ tmp_value = tstate->exc_value;
+ tmp_tb = tstate->exc_traceback;
+ tstate->exc_type = *type;
+ tstate->exc_value = *value;
+ tstate->exc_traceback = *tb;
+ #endif
+ *type = tmp_type;
+ *value = tmp_value;
+ *tb = tmp_tb;
+}
+#else
+static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
+ PyErr_SetExcInfo(*type, *value, *tb);
+ *type = tmp_type;
+ *value = tmp_value;
+ *tb = tmp_tb;
+}
+#endif
+
+/* GetTopmostException */
+#if CYTHON_USE_EXC_INFO_STACK
+static _PyErr_StackItem *
+__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
+{
+ _PyErr_StackItem *exc_info = tstate->exc_info;
+ while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
+ exc_info->previous_item != NULL)
+ {
+ exc_info = exc_info->previous_item;
+ }
+ return exc_info;
+}
+#endif
+
+/* SaveResetException */
+#if CYTHON_FAST_THREAD_STATE
+static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
+ #if CYTHON_USE_EXC_INFO_STACK
+ _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
+ *type = exc_info->exc_type;
+ *value = exc_info->exc_value;
+ *tb = exc_info->exc_traceback;
+ #else
+ *type = tstate->exc_type;
+ *value = tstate->exc_value;
+ *tb = tstate->exc_traceback;
+ #endif
+ Py_XINCREF(*type);
+ Py_XINCREF(*value);
+ Py_XINCREF(*tb);
+}
+static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ #if CYTHON_USE_EXC_INFO_STACK
+ _PyErr_StackItem *exc_info = tstate->exc_info;
+ tmp_type = exc_info->exc_type;
+ tmp_value = exc_info->exc_value;
+ tmp_tb = exc_info->exc_traceback;
+ exc_info->exc_type = type;
+ exc_info->exc_value = value;
+ exc_info->exc_traceback = tb;
+ #else
+ tmp_type = tstate->exc_type;
+ tmp_value = tstate->exc_value;
+ tmp_tb = tstate->exc_traceback;
+ tstate->exc_type = type;
+ tstate->exc_value = value;
+ tstate->exc_traceback = tb;
+ #endif
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+}
+#endif
+
+/* decode_c_string */
+static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
+ const char* cstring, Py_ssize_t start, Py_ssize_t stop,
+ const char* encoding, const char* errors,
+ PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
+ Py_ssize_t length;
+ if (unlikely((start < 0) | (stop < 0))) {
+ size_t slen = strlen(cstring);
+ if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "c-string too long to convert to Python");
+ return NULL;
+ }
+ length = (Py_ssize_t) slen;
+ if (start < 0) {
+ start += length;
+ if (start < 0)
+ start = 0;
+ }
+ if (stop < 0)
+ stop += length;
+ }
+ if (unlikely(stop <= start))
+ return __Pyx_NewRef(__pyx_empty_unicode);
+ length = stop - start;
+ cstring += start;
+ if (decode_func) {
+ return decode_func(cstring, length, errors);
+ } else {
+ return PyUnicode_Decode(cstring, length, encoding, errors);
+ }
+}
+
+/* UnpackUnboundCMethod */
+static int __Pyx_TryUnpackUnboundCMethod(__Pyx_CachedCFunction* target) {
+ PyObject *method;
+ method = __Pyx_PyObject_GetAttrStr(target->type, *target->method_name);
+ if (unlikely(!method))
+ return -1;
+ target->method = method;
+#if CYTHON_COMPILING_IN_CPYTHON
+ #if PY_MAJOR_VERSION >= 3
+ if (likely(__Pyx_TypeCheck(method, &PyMethodDescr_Type)))
+ #endif
+ {
+ PyMethodDescrObject *descr = (PyMethodDescrObject*) method;
+ target->func = descr->d_method->ml_meth;
+ target->flag = descr->d_method->ml_flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_STACKLESS);
+ }
+#endif
+ return 0;
+}
+
+/* CallUnboundCMethod1 */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg) {
+ if (likely(cfunc->func)) {
+ int flag = cfunc->flag;
+ if (flag == METH_O) {
+ return (*(cfunc->func))(self, arg);
+ } else if (PY_VERSION_HEX >= 0x030600B1 && flag == METH_FASTCALL) {
+ if (PY_VERSION_HEX >= 0x030700A0) {
+ return (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)cfunc->func)(self, &arg, 1);
+ } else {
+ return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, &arg, 1, NULL);
+ }
+ } else if (PY_VERSION_HEX >= 0x030700A0 && flag == (METH_FASTCALL | METH_KEYWORDS)) {
+ return (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)cfunc->func)(self, &arg, 1, NULL);
+ }
+ }
+ return __Pyx__CallUnboundCMethod1(cfunc, self, arg);
+}
+#endif
+static PyObject* __Pyx__CallUnboundCMethod1(__Pyx_CachedCFunction* cfunc, PyObject* self, PyObject* arg){
+ PyObject *args, *result = NULL;
+ if (unlikely(!cfunc->func && !cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL;
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (cfunc->func && (cfunc->flag & METH_VARARGS)) {
+ args = PyTuple_New(1);
+ if (unlikely(!args)) goto bad;
+ Py_INCREF(arg);
+ PyTuple_SET_ITEM(args, 0, arg);
+ if (cfunc->flag & METH_KEYWORDS)
+ result = (*(PyCFunctionWithKeywords)(void*)(PyCFunction)cfunc->func)(self, args, NULL);
+ else
+ result = (*cfunc->func)(self, args);
+ } else {
+ args = PyTuple_New(2);
+ if (unlikely(!args)) goto bad;
+ Py_INCREF(self);
+ PyTuple_SET_ITEM(args, 0, self);
+ Py_INCREF(arg);
+ PyTuple_SET_ITEM(args, 1, arg);
+ result = __Pyx_PyObject_Call(cfunc->method, args, NULL);
+ }
+#else
+ args = PyTuple_Pack(2, self, arg);
+ if (unlikely(!args)) goto bad;
+ result = __Pyx_PyObject_Call(cfunc->method, args, NULL);
+#endif
+bad:
+ Py_XDECREF(args);
+ return result;
+}
+
+/* Import */
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
+ PyObject *empty_list = 0;
+ PyObject *module = 0;
+ PyObject *global_dict = 0;
+ PyObject *empty_dict = 0;
+ PyObject *list;
+ #if PY_MAJOR_VERSION < 3
+ PyObject *py_import;
+ py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
+ if (!py_import)
+ goto bad;
+ #endif
+ if (from_list)
+ list = from_list;
+ else {
+ empty_list = PyList_New(0);
+ if (!empty_list)
+ goto bad;
+ list = empty_list;
+ }
+ global_dict = PyModule_GetDict(__pyx_m);
+ if (!global_dict)
+ goto bad;
+ empty_dict = PyDict_New();
+ if (!empty_dict)
+ goto bad;
+ {
+ #if PY_MAJOR_VERSION >= 3
+ if (level == -1) {
+ if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) {
+ module = PyImport_ImportModuleLevelObject(
+ name, global_dict, empty_dict, list, 1);
+ if (!module) {
+ if (!PyErr_ExceptionMatches(PyExc_ImportError))
+ goto bad;
+ PyErr_Clear();
+ }
+ }
+ level = 0;
+ }
+ #endif
+ if (!module) {
+ #if PY_MAJOR_VERSION < 3
+ PyObject *py_level = PyInt_FromLong(level);
+ if (!py_level)
+ goto bad;
+ module = PyObject_CallFunctionObjArgs(py_import,
+ name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
+ Py_DECREF(py_level);
+ #else
+ module = PyImport_ImportModuleLevelObject(
+ name, global_dict, empty_dict, list, level);
+ #endif
+ }
+ }
+bad:
+ #if PY_MAJOR_VERSION < 3
+ Py_XDECREF(py_import);
+ #endif
+ Py_XDECREF(empty_list);
+ Py_XDECREF(empty_dict);
+ return module;
+}
+
+/* ImportFrom */
+static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
+ PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
+ if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
+ PyErr_Format(PyExc_ImportError,
+ #if PY_MAJOR_VERSION < 3
+ "cannot import name %.230s", PyString_AS_STRING(name));
+ #else
+ "cannot import name %S", name);
+ #endif
+ }
+ return value;
+}
+
+/* HasAttr */
+static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
+ PyObject *r;
+ if (unlikely(!__Pyx_PyBaseString_Check(n))) {
+ PyErr_SetString(PyExc_TypeError,
+ "hasattr(): attribute name must be string");
+ return -1;
+ }
+ r = __Pyx_GetAttr(o, n);
+ if (unlikely(!r)) {
+ PyErr_Clear();
+ return 0;
+ } else {
+ Py_DECREF(r);
+ return 1;
+ }
+}
+
+/* PyObject_GenericGetAttrNoDict */
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
+ PyErr_Format(PyExc_AttributeError,
+#if PY_MAJOR_VERSION >= 3
+ "'%.50s' object has no attribute '%U'",
+ tp->tp_name, attr_name);
+#else
+ "'%.50s' object has no attribute '%.400s'",
+ tp->tp_name, PyString_AS_STRING(attr_name));
+#endif
+ return NULL;
+}
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
+ PyObject *descr;
+ PyTypeObject *tp = Py_TYPE(obj);
+ if (unlikely(!PyString_Check(attr_name))) {
+ return PyObject_GenericGetAttr(obj, attr_name);
+ }
+ assert(!tp->tp_dictoffset);
+ descr = _PyType_Lookup(tp, attr_name);
+ if (unlikely(!descr)) {
+ return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
+ }
+ Py_INCREF(descr);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
+ #endif
+ {
+ descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
+ if (unlikely(f)) {
+ PyObject *res = f(descr, obj, (PyObject *)tp);
+ Py_DECREF(descr);
+ return res;
+ }
+ }
+ return descr;
+}
+#endif
+
+/* PyObject_GenericGetAttr */
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
+ if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
+ return PyObject_GenericGetAttr(obj, attr_name);
+ }
+ return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
+}
+#endif
+
+/* PyObjectGetAttrStrNoError */
+static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
+ __Pyx_PyErr_Clear();
+}
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) {
+ PyObject *result;
+#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1
+ PyTypeObject* tp = Py_TYPE(obj);
+ if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) {
+ return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1);
+ }
+#endif
+ result = __Pyx_PyObject_GetAttrStr(obj, attr_name);
+ if (unlikely(!result)) {
+ __Pyx_PyObject_GetAttrStr_ClearAttributeError();
+ }
+ return result;
+}
+
+/* SetupReduce */
+static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
+ int ret;
+ PyObject *name_attr;
+ name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name);
+ if (likely(name_attr)) {
+ ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
+ } else {
+ ret = -1;
+ }
+ if (unlikely(ret < 0)) {
+ PyErr_Clear();
+ ret = 0;
+ }
+ Py_XDECREF(name_attr);
+ return ret;
+}
+static int __Pyx_setup_reduce(PyObject* type_obj) {
+ int ret = 0;
+ PyObject *object_reduce = NULL;
+ PyObject *object_reduce_ex = NULL;
+ PyObject *reduce = NULL;
+ PyObject *reduce_ex = NULL;
+ PyObject *reduce_cython = NULL;
+ PyObject *setstate = NULL;
+ PyObject *setstate_cython = NULL;
+#if CYTHON_USE_PYTYPE_LOOKUP
+ if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
+#else
+ if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
+#endif
+#if CYTHON_USE_PYTYPE_LOOKUP
+ object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
+#else
+ object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
+#endif
+ reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD;
+ if (reduce_ex == object_reduce_ex) {
+#if CYTHON_USE_PYTYPE_LOOKUP
+ object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
+#else
+ object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
+#endif
+ reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD;
+ if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) {
+ reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython);
+ if (likely(reduce_cython)) {
+ ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
+ ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
+ } else if (reduce == object_reduce || PyErr_Occurred()) {
+ goto __PYX_BAD;
+ }
+ setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate);
+ if (!setstate) PyErr_Clear();
+ if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) {
+ setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython);
+ if (likely(setstate_cython)) {
+ ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
+ ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
+ } else if (!setstate || PyErr_Occurred()) {
+ goto __PYX_BAD;
+ }
+ }
+ PyType_Modified((PyTypeObject*)type_obj);
+ }
+ }
+ goto __PYX_GOOD;
+__PYX_BAD:
+ if (!PyErr_Occurred())
+ PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
+ ret = -1;
+__PYX_GOOD:
+#if !CYTHON_USE_PYTYPE_LOOKUP
+ Py_XDECREF(object_reduce);
+ Py_XDECREF(object_reduce_ex);
+#endif
+ Py_XDECREF(reduce);
+ Py_XDECREF(reduce_ex);
+ Py_XDECREF(reduce_cython);
+ Py_XDECREF(setstate);
+ Py_XDECREF(setstate_cython);
+ return ret;
+}
+
+/* SetVTable */
+static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
+#if PY_VERSION_HEX >= 0x02070000
+ PyObject *ob = PyCapsule_New(vtable, 0, 0);
+#else
+ PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
+#endif
+ if (!ob)
+ goto bad;
+ if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
+ goto bad;
+ Py_DECREF(ob);
+ return 0;
+bad:
+ Py_XDECREF(ob);
+ return -1;
+}
+
+/* TypeImport */
+#ifndef __PYX_HAVE_RT_ImportType
+#define __PYX_HAVE_RT_ImportType
+static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name,
+ size_t size, enum __Pyx_ImportType_CheckSize check_size)
+{
+ PyObject *result = 0;
+ char warning[200];
+ Py_ssize_t basicsize;
+#ifdef Py_LIMITED_API
+ PyObject *py_basicsize;
+#endif
+ result = PyObject_GetAttrString(module, class_name);
+ if (!result)
+ goto bad;
+ if (!PyType_Check(result)) {
+ PyErr_Format(PyExc_TypeError,
+ "%.200s.%.200s is not a type object",
+ module_name, class_name);
+ goto bad;
+ }
+#ifndef Py_LIMITED_API
+ basicsize = ((PyTypeObject *)result)->tp_basicsize;
+#else
+ py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
+ if (!py_basicsize)
+ goto bad;
+ basicsize = PyLong_AsSsize_t(py_basicsize);
+ Py_DECREF(py_basicsize);
+ py_basicsize = 0;
+ if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
+ goto bad;
+#endif
+ if ((size_t)basicsize < size) {
+ PyErr_Format(PyExc_ValueError,
+ "%.200s.%.200s size changed, may indicate binary incompatibility. "
+ "Expected %zd from C header, got %zd from PyObject",
+ module_name, class_name, size, basicsize);
+ goto bad;
+ }
+ if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) {
+ PyErr_Format(PyExc_ValueError,
+ "%.200s.%.200s size changed, may indicate binary incompatibility. "
+ "Expected %zd from C header, got %zd from PyObject",
+ module_name, class_name, size, basicsize);
+ goto bad;
+ }
+ else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) {
+ PyOS_snprintf(warning, sizeof(warning),
+ "%s.%s size changed, may indicate binary incompatibility. "
+ "Expected %zd from C header, got %zd from PyObject",
+ module_name, class_name, size, basicsize);
+ if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
+ }
+ return (PyTypeObject *)result;
+bad:
+ Py_XDECREF(result);
+ return NULL;
+}
+#endif
+
+/* CLineInTraceback */
+#ifndef CYTHON_CLINE_IN_TRACEBACK
+static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
+ PyObject *use_cline;
+ PyObject *ptype, *pvalue, *ptraceback;
+#if CYTHON_COMPILING_IN_CPYTHON
+ PyObject **cython_runtime_dict;
+#endif
+ if (unlikely(!__pyx_cython_runtime)) {
+ return c_line;
+ }
+ __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
+#if CYTHON_COMPILING_IN_CPYTHON
+ cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
+ if (likely(cython_runtime_dict)) {
+ __PYX_PY_DICT_LOOKUP_IF_MODIFIED(
+ use_cline, *cython_runtime_dict,
+ __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
+ } else
+#endif
+ {
+ PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
+ if (use_cline_obj) {
+ use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
+ Py_DECREF(use_cline_obj);
+ } else {
+ PyErr_Clear();
+ use_cline = NULL;
+ }
+ }
+ if (!use_cline) {
+ c_line = 0;
+ PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
+ }
+ else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
+ c_line = 0;
+ }
+ __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
+ return c_line;
+}
+#endif
+
+/* CodeObjectCache */
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
+ int start = 0, mid = 0, end = count - 1;
+ if (end >= 0 && code_line > entries[end].code_line) {
+ return count;
+ }
+ while (start < end) {
+ mid = start + (end - start) / 2;
+ if (code_line < entries[mid].code_line) {
+ end = mid;
+ } else if (code_line > entries[mid].code_line) {
+ start = mid + 1;
+ } else {
+ return mid;
+ }
+ }
+ if (code_line <= entries[mid].code_line) {
+ return mid;
+ } else {
+ return mid + 1;
+ }
+}
+static PyCodeObject *__pyx_find_code_object(int code_line) {
+ PyCodeObject* code_object;
+ int pos;
+ if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
+ return NULL;
+ }
+ pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
+ if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
+ return NULL;
+ }
+ code_object = __pyx_code_cache.entries[pos].code_object;
+ Py_INCREF(code_object);
+ return code_object;
+}
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
+ int pos, i;
+ __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
+ if (unlikely(!code_line)) {
+ return;
+ }
+ if (unlikely(!entries)) {
+ entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
+ if (likely(entries)) {
+ __pyx_code_cache.entries = entries;
+ __pyx_code_cache.max_count = 64;
+ __pyx_code_cache.count = 1;
+ entries[0].code_line = code_line;
+ entries[0].code_object = code_object;
+ Py_INCREF(code_object);
+ }
+ return;
+ }
+ pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
+ if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
+ PyCodeObject* tmp = entries[pos].code_object;
+ entries[pos].code_object = code_object;
+ Py_DECREF(tmp);
+ return;
+ }
+ if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
+ int new_max = __pyx_code_cache.max_count + 64;
+ entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
+ __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
+ if (unlikely(!entries)) {
+ return;
+ }
+ __pyx_code_cache.entries = entries;
+ __pyx_code_cache.max_count = new_max;
+ }
+ for (i=__pyx_code_cache.count; i>pos; i--) {
+ entries[i] = entries[i-1];
+ }
+ entries[pos].code_line = code_line;
+ entries[pos].code_object = code_object;
+ __pyx_code_cache.count++;
+ Py_INCREF(code_object);
+}
+
+/* AddTraceback */
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
+ const char *funcname, int c_line,
+ int py_line, const char *filename) {
+ PyCodeObject *py_code = 0;
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ #if PY_MAJOR_VERSION < 3
+ py_srcfile = PyString_FromString(filename);
+ #else
+ py_srcfile = PyUnicode_FromString(filename);
+ #endif
+ if (!py_srcfile) goto bad;
+ if (c_line) {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
+ #else
+ py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
+ #endif
+ }
+ else {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromString(funcname);
+ #else
+ py_funcname = PyUnicode_FromString(funcname);
+ #endif
+ }
+ if (!py_funcname) goto bad;
+ py_code = __Pyx_PyCode_New(
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ __pyx_empty_bytes, /*PyObject *code,*/
+ __pyx_empty_tuple, /*PyObject *consts,*/
+ __pyx_empty_tuple, /*PyObject *names,*/
+ __pyx_empty_tuple, /*PyObject *varnames,*/
+ __pyx_empty_tuple, /*PyObject *freevars,*/
+ __pyx_empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ py_line,
+ __pyx_empty_bytes /*PyObject *lnotab*/
+ );
+ Py_DECREF(py_srcfile);
+ Py_DECREF(py_funcname);
+ return py_code;
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ return NULL;
+}
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+ int py_line, const char *filename) {
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+ PyThreadState *tstate = __Pyx_PyThreadState_Current;
+ if (c_line) {
+ c_line = __Pyx_CLineForTraceback(tstate, c_line);
+ }
+ py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
+ if (!py_code) {
+ py_code = __Pyx_CreateCodeObjectForTraceback(
+ funcname, c_line, py_line, filename);
+ if (!py_code) goto bad;
+ __pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
+ }
+ py_frame = PyFrame_New(
+ tstate, /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ __pyx_d, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ __Pyx_PyFrame_SetLineNumber(py_frame, py_line);
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
+
+/* CIntToPy */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
+ const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (is_unsigned) {
+ if (sizeof(int) < sizeof(long)) {
+ return PyInt_FromLong((long) value);
+ } else if (sizeof(int) <= sizeof(unsigned long)) {
+ return PyLong_FromUnsignedLong((unsigned long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
+ return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
+#endif
+ }
+ } else {
+ if (sizeof(int) <= sizeof(long)) {
+ return PyInt_FromLong((long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
+ return PyLong_FromLongLong((PY_LONG_LONG) value);
+#endif
+ }
+ }
+ {
+ int one = 1; int little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&value;
+ return _PyLong_FromByteArray(bytes, sizeof(int),
+ little, !is_unsigned);
+ }
+}
+
+/* CIntFromPyVerify */
+#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
+ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
+#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
+ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
+#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
+ {\
+ func_type value = func_value;\
+ if (sizeof(target_type) < sizeof(func_type)) {\
+ if (unlikely(value != (func_type) (target_type) value)) {\
+ func_type zero = 0;\
+ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
+ return (target_type) -1;\
+ if (is_unsigned && unlikely(value < zero))\
+ goto raise_neg_overflow;\
+ else\
+ goto raise_overflow;\
+ }\
+ }\
+ return (target_type) value;\
+ }
+
+/* CIntToPy */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_int(unsigned int value) {
+ const unsigned int neg_one = (unsigned int) ((unsigned int) 0 - (unsigned int) 1), const_zero = (unsigned int) 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (is_unsigned) {
+ if (sizeof(unsigned int) < sizeof(long)) {
+ return PyInt_FromLong((long) value);
+ } else if (sizeof(unsigned int) <= sizeof(unsigned long)) {
+ return PyLong_FromUnsignedLong((unsigned long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(unsigned int) <= sizeof(unsigned PY_LONG_LONG)) {
+ return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
+#endif
+ }
+ } else {
+ if (sizeof(unsigned int) <= sizeof(long)) {
+ return PyInt_FromLong((long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(unsigned int) <= sizeof(PY_LONG_LONG)) {
+ return PyLong_FromLongLong((PY_LONG_LONG) value);
+#endif
+ }
+ }
+ {
+ int one = 1; int little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&value;
+ return _PyLong_FromByteArray(bytes, sizeof(unsigned int),
+ little, !is_unsigned);
+ }
+}
+
+/* CIntToPy */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_short(unsigned short value) {
+ const unsigned short neg_one = (unsigned short) ((unsigned short) 0 - (unsigned short) 1), const_zero = (unsigned short) 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (is_unsigned) {
+ if (sizeof(unsigned short) < sizeof(long)) {
+ return PyInt_FromLong((long) value);
+ } else if (sizeof(unsigned short) <= sizeof(unsigned long)) {
+ return PyLong_FromUnsignedLong((unsigned long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(unsigned short) <= sizeof(unsigned PY_LONG_LONG)) {
+ return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
+#endif
+ }
+ } else {
+ if (sizeof(unsigned short) <= sizeof(long)) {
+ return PyInt_FromLong((long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(unsigned short) <= sizeof(PY_LONG_LONG)) {
+ return PyLong_FromLongLong((PY_LONG_LONG) value);
+#endif
+ }
+ }
+ {
+ int one = 1; int little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&value;
+ return _PyLong_FromByteArray(bytes, sizeof(unsigned short),
+ little, !is_unsigned);
+ }
+}
+
+/* CIntToPy */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
+ const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (is_unsigned) {
+ if (sizeof(long) < sizeof(long)) {
+ return PyInt_FromLong((long) value);
+ } else if (sizeof(long) <= sizeof(unsigned long)) {
+ return PyLong_FromUnsignedLong((unsigned long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
+ return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
+#endif
+ }
+ } else {
+ if (sizeof(long) <= sizeof(long)) {
+ return PyInt_FromLong((long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
+ return PyLong_FromLongLong((PY_LONG_LONG) value);
+#endif
+ }
+ }
+ {
+ int one = 1; int little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&value;
+ return _PyLong_FromByteArray(bytes, sizeof(long),
+ little, !is_unsigned);
+ }
+}
+
+/* CIntToPy */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_uint16_t(uint16_t value) {
+ const uint16_t neg_one = (uint16_t) ((uint16_t) 0 - (uint16_t) 1), const_zero = (uint16_t) 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (is_unsigned) {
+ if (sizeof(uint16_t) < sizeof(long)) {
+ return PyInt_FromLong((long) value);
+ } else if (sizeof(uint16_t) <= sizeof(unsigned long)) {
+ return PyLong_FromUnsignedLong((unsigned long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(uint16_t) <= sizeof(unsigned PY_LONG_LONG)) {
+ return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
+#endif
+ }
+ } else {
+ if (sizeof(uint16_t) <= sizeof(long)) {
+ return PyInt_FromLong((long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(uint16_t) <= sizeof(PY_LONG_LONG)) {
+ return PyLong_FromLongLong((PY_LONG_LONG) value);
+#endif
+ }
+ }
+ {
+ int one = 1; int little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&value;
+ return _PyLong_FromByteArray(bytes, sizeof(uint16_t),
+ little, !is_unsigned);
+ }
+}
+
+/* CIntFromPy */
+static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
+ const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x))) {
+ if (sizeof(int) < sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
+ } else {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ goto raise_neg_overflow;
+ }
+ return (int) val;
+ }
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (int) 0;
+ case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
+ case 2:
+ if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
+ return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
+ return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
+ return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+ }
+ }
+ break;
+ }
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (unlikely(Py_SIZE(x) < 0)) {
+ goto raise_neg_overflow;
+ }
+#else
+ {
+ int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+ if (unlikely(result < 0))
+ return (int) -1;
+ if (unlikely(result == 1))
+ goto raise_neg_overflow;
+ }
+#endif
+ if (sizeof(int) <= sizeof(unsigned long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+#endif
+ }
+ } else {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (int) 0;
+ case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
+ case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
+ case -2:
+ if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+ return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case 2:
+ if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+ return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case -3:
+ if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+ return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+ return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case -4:
+ if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
+ return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
+ return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ }
+#endif
+ if (sizeof(int) <= sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
+#endif
+ }
+ }
+ {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+ PyErr_SetString(PyExc_RuntimeError,
+ "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+ int val;
+ PyObject *v = __Pyx_PyNumber_IntOrLong(x);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(v) && !PyLong_Check(v)) {
+ PyObject *tmp = v;
+ v = PyNumber_Long(tmp);
+ Py_DECREF(tmp);
+ }
+ #endif
+ if (likely(v)) {
+ int one = 1; int is_little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&val;
+ int ret = _PyLong_AsByteArray((PyLongObject *)v,
+ bytes, sizeof(val),
+ is_little, !is_unsigned);
+ Py_DECREF(v);
+ if (likely(!ret))
+ return val;
+ }
+#endif
+ return (int) -1;
+ }
+ } else {
+ int val;
+ PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
+ if (!tmp) return (int) -1;
+ val = __Pyx_PyInt_As_int(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+raise_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to int");
+ return (int) -1;
+raise_neg_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to int");
+ return (int) -1;
+}
+
+/* CIntFromPy */
+static CYTHON_INLINE enum http_method __Pyx_PyInt_As_enum__http_method(PyObject *x) {
+ const enum http_method neg_one = (enum http_method) ((enum http_method) 0 - (enum http_method) 1), const_zero = (enum http_method) 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x))) {
+ if (sizeof(enum http_method) < sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT(enum http_method, long, PyInt_AS_LONG(x))
+ } else {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ goto raise_neg_overflow;
+ }
+ return (enum http_method) val;
+ }
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (enum http_method) 0;
+ case 1: __PYX_VERIFY_RETURN_INT(enum http_method, digit, digits[0])
+ case 2:
+ if (8 * sizeof(enum http_method) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(enum http_method, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(enum http_method) >= 2 * PyLong_SHIFT) {
+ return (enum http_method) (((((enum http_method)digits[1]) << PyLong_SHIFT) | (enum http_method)digits[0]));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(enum http_method) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(enum http_method, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(enum http_method) >= 3 * PyLong_SHIFT) {
+ return (enum http_method) (((((((enum http_method)digits[2]) << PyLong_SHIFT) | (enum http_method)digits[1]) << PyLong_SHIFT) | (enum http_method)digits[0]));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(enum http_method) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(enum http_method, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(enum http_method) >= 4 * PyLong_SHIFT) {
+ return (enum http_method) (((((((((enum http_method)digits[3]) << PyLong_SHIFT) | (enum http_method)digits[2]) << PyLong_SHIFT) | (enum http_method)digits[1]) << PyLong_SHIFT) | (enum http_method)digits[0]));
+ }
+ }
+ break;
+ }
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (unlikely(Py_SIZE(x) < 0)) {
+ goto raise_neg_overflow;
+ }
+#else
+ {
+ int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+ if (unlikely(result < 0))
+ return (enum http_method) -1;
+ if (unlikely(result == 1))
+ goto raise_neg_overflow;
+ }
+#endif
+ if (sizeof(enum http_method) <= sizeof(unsigned long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(enum http_method, unsigned long, PyLong_AsUnsignedLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(enum http_method) <= sizeof(unsigned PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(enum http_method, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+#endif
+ }
+ } else {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (enum http_method) 0;
+ case -1: __PYX_VERIFY_RETURN_INT(enum http_method, sdigit, (sdigit) (-(sdigit)digits[0]))
+ case 1: __PYX_VERIFY_RETURN_INT(enum http_method, digit, +digits[0])
+ case -2:
+ if (8 * sizeof(enum http_method) - 1 > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(enum http_method, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(enum http_method) - 1 > 2 * PyLong_SHIFT) {
+ return (enum http_method) (((enum http_method)-1)*(((((enum http_method)digits[1]) << PyLong_SHIFT) | (enum http_method)digits[0])));
+ }
+ }
+ break;
+ case 2:
+ if (8 * sizeof(enum http_method) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(enum http_method, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(enum http_method) - 1 > 2 * PyLong_SHIFT) {
+ return (enum http_method) ((((((enum http_method)digits[1]) << PyLong_SHIFT) | (enum http_method)digits[0])));
+ }
+ }
+ break;
+ case -3:
+ if (8 * sizeof(enum http_method) - 1 > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(enum http_method, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(enum http_method) - 1 > 3 * PyLong_SHIFT) {
+ return (enum http_method) (((enum http_method)-1)*(((((((enum http_method)digits[2]) << PyLong_SHIFT) | (enum http_method)digits[1]) << PyLong_SHIFT) | (enum http_method)digits[0])));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(enum http_method) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(enum http_method, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(enum http_method) - 1 > 3 * PyLong_SHIFT) {
+ return (enum http_method) ((((((((enum http_method)digits[2]) << PyLong_SHIFT) | (enum http_method)digits[1]) << PyLong_SHIFT) | (enum http_method)digits[0])));
+ }
+ }
+ break;
+ case -4:
+ if (8 * sizeof(enum http_method) - 1 > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(enum http_method, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(enum http_method) - 1 > 4 * PyLong_SHIFT) {
+ return (enum http_method) (((enum http_method)-1)*(((((((((enum http_method)digits[3]) << PyLong_SHIFT) | (enum http_method)digits[2]) << PyLong_SHIFT) | (enum http_method)digits[1]) << PyLong_SHIFT) | (enum http_method)digits[0])));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(enum http_method) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(enum http_method, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(enum http_method) - 1 > 4 * PyLong_SHIFT) {
+ return (enum http_method) ((((((((((enum http_method)digits[3]) << PyLong_SHIFT) | (enum http_method)digits[2]) << PyLong_SHIFT) | (enum http_method)digits[1]) << PyLong_SHIFT) | (enum http_method)digits[0])));
+ }
+ }
+ break;
+ }
+#endif
+ if (sizeof(enum http_method) <= sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(enum http_method, long, PyLong_AsLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(enum http_method) <= sizeof(PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(enum http_method, PY_LONG_LONG, PyLong_AsLongLong(x))
+#endif
+ }
+ }
+ {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+ PyErr_SetString(PyExc_RuntimeError,
+ "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+ enum http_method val;
+ PyObject *v = __Pyx_PyNumber_IntOrLong(x);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(v) && !PyLong_Check(v)) {
+ PyObject *tmp = v;
+ v = PyNumber_Long(tmp);
+ Py_DECREF(tmp);
+ }
+ #endif
+ if (likely(v)) {
+ int one = 1; int is_little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&val;
+ int ret = _PyLong_AsByteArray((PyLongObject *)v,
+ bytes, sizeof(val),
+ is_little, !is_unsigned);
+ Py_DECREF(v);
+ if (likely(!ret))
+ return val;
+ }
+#endif
+ return (enum http_method) -1;
+ }
+ } else {
+ enum http_method val;
+ PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
+ if (!tmp) return (enum http_method) -1;
+ val = __Pyx_PyInt_As_enum__http_method(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+raise_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to enum http_method");
+ return (enum http_method) -1;
+raise_neg_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to enum http_method");
+ return (enum http_method) -1;
+}
+
+/* CIntFromPy */
+static CYTHON_INLINE size_t __Pyx_PyInt_As_size_t(PyObject *x) {
+ const size_t neg_one = (size_t) ((size_t) 0 - (size_t) 1), const_zero = (size_t) 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x))) {
+ if (sizeof(size_t) < sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT(size_t, long, PyInt_AS_LONG(x))
+ } else {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ goto raise_neg_overflow;
+ }
+ return (size_t) val;
+ }
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (size_t) 0;
+ case 1: __PYX_VERIFY_RETURN_INT(size_t, digit, digits[0])
+ case 2:
+ if (8 * sizeof(size_t) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(size_t) >= 2 * PyLong_SHIFT) {
+ return (size_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(size_t) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(size_t) >= 3 * PyLong_SHIFT) {
+ return (size_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(size_t) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(size_t) >= 4 * PyLong_SHIFT) {
+ return (size_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ }
+ break;
+ }
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (unlikely(Py_SIZE(x) < 0)) {
+ goto raise_neg_overflow;
+ }
+#else
+ {
+ int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+ if (unlikely(result < 0))
+ return (size_t) -1;
+ if (unlikely(result == 1))
+ goto raise_neg_overflow;
+ }
+#endif
+ if (sizeof(size_t) <= sizeof(unsigned long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(size_t, unsigned long, PyLong_AsUnsignedLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(size_t) <= sizeof(unsigned PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(size_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+#endif
+ }
+ } else {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (size_t) 0;
+ case -1: __PYX_VERIFY_RETURN_INT(size_t, sdigit, (sdigit) (-(sdigit)digits[0]))
+ case 1: __PYX_VERIFY_RETURN_INT(size_t, digit, +digits[0])
+ case -2:
+ if (8 * sizeof(size_t) - 1 > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) {
+ return (size_t) (((size_t)-1)*(((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));
+ }
+ }
+ break;
+ case 2:
+ if (8 * sizeof(size_t) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) {
+ return (size_t) ((((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));
+ }
+ }
+ break;
+ case -3:
+ if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) {
+ return (size_t) (((size_t)-1)*(((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(size_t) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) {
+ return (size_t) ((((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));
+ }
+ }
+ break;
+ case -4:
+ if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(size_t) - 1 > 4 * PyLong_SHIFT) {
+ return (size_t) (((size_t)-1)*(((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(size_t) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(size_t) - 1 > 4 * PyLong_SHIFT) {
+ return (size_t) ((((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));
+ }
+ }
+ break;
+ }
+#endif
+ if (sizeof(size_t) <= sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(size_t, long, PyLong_AsLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(size_t) <= sizeof(PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(size_t, PY_LONG_LONG, PyLong_AsLongLong(x))
+#endif
+ }
+ }
+ {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+ PyErr_SetString(PyExc_RuntimeError,
+ "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+ size_t val;
+ PyObject *v = __Pyx_PyNumber_IntOrLong(x);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(v) && !PyLong_Check(v)) {
+ PyObject *tmp = v;
+ v = PyNumber_Long(tmp);
+ Py_DECREF(tmp);
+ }
+ #endif
+ if (likely(v)) {
+ int one = 1; int is_little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&val;
+ int ret = _PyLong_AsByteArray((PyLongObject *)v,
+ bytes, sizeof(val),
+ is_little, !is_unsigned);
+ Py_DECREF(v);
+ if (likely(!ret))
+ return val;
+ }
+#endif
+ return (size_t) -1;
+ }
+ } else {
+ size_t val;
+ PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
+ if (!tmp) return (size_t) -1;
+ val = __Pyx_PyInt_As_size_t(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+raise_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to size_t");
+ return (size_t) -1;
+raise_neg_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to size_t");
+ return (size_t) -1;
+}
+
+/* CIntFromPy */
+static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
+ const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x))) {
+ if (sizeof(long) < sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
+ } else {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ goto raise_neg_overflow;
+ }
+ return (long) val;
+ }
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (long) 0;
+ case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
+ case 2:
+ if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
+ return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
+ return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
+ return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+ }
+ }
+ break;
+ }
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (unlikely(Py_SIZE(x) < 0)) {
+ goto raise_neg_overflow;
+ }
+#else
+ {
+ int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+ if (unlikely(result < 0))
+ return (long) -1;
+ if (unlikely(result == 1))
+ goto raise_neg_overflow;
+ }
+#endif
+ if (sizeof(long) <= sizeof(unsigned long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+#endif
+ }
+ } else {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (long) 0;
+ case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
+ case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
+ case -2:
+ if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+ return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case 2:
+ if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+ return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case -3:
+ if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+ return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+ return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case -4:
+ if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
+ return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
+ return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ }
+#endif
+ if (sizeof(long) <= sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
+#endif
+ }
+ }
+ {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+ PyErr_SetString(PyExc_RuntimeError,
+ "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+ long val;
+ PyObject *v = __Pyx_PyNumber_IntOrLong(x);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(v) && !PyLong_Check(v)) {
+ PyObject *tmp = v;
+ v = PyNumber_Long(tmp);
+ Py_DECREF(tmp);
+ }
+ #endif
+ if (likely(v)) {
+ int one = 1; int is_little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&val;
+ int ret = _PyLong_AsByteArray((PyLongObject *)v,
+ bytes, sizeof(val),
+ is_little, !is_unsigned);
+ Py_DECREF(v);
+ if (likely(!ret))
+ return val;
+ }
+#endif
+ return (long) -1;
+ }
+ } else {
+ long val;
+ PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
+ if (!tmp) return (long) -1;
+ val = __Pyx_PyInt_As_long(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+raise_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to long");
+ return (long) -1;
+raise_neg_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to long");
+ return (long) -1;
+}
+
+/* FastTypeChecks */
+#if CYTHON_COMPILING_IN_CPYTHON
+static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
+ while (a) {
+ a = a->tp_base;
+ if (a == b)
+ return 1;
+ }
+ return b == &PyBaseObject_Type;
+}
+static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
+ PyObject *mro;
+ if (a == b) return 1;
+ mro = a->tp_mro;
+ if (likely(mro)) {
+ Py_ssize_t i, n;
+ n = PyTuple_GET_SIZE(mro);
+ for (i = 0; i < n; i++) {
+ if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
+ return 1;
+ }
+ return 0;
+ }
+ return __Pyx_InBases(a, b);
+}
+#if PY_MAJOR_VERSION == 2
+static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
+ PyObject *exception, *value, *tb;
+ int res;
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ErrFetch(&exception, &value, &tb);
+ res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
+ if (unlikely(res == -1)) {
+ PyErr_WriteUnraisable(err);
+ res = 0;
+ }
+ if (!res) {
+ res = PyObject_IsSubclass(err, exc_type2);
+ if (unlikely(res == -1)) {
+ PyErr_WriteUnraisable(err);
+ res = 0;
+ }
+ }
+ __Pyx_ErrRestore(exception, value, tb);
+ return res;
+}
+#else
+static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
+ int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
+ if (!res) {
+ res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
+ }
+ return res;
+}
+#endif
+static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
+ Py_ssize_t i, n;
+ assert(PyExceptionClass_Check(exc_type));
+ n = PyTuple_GET_SIZE(tuple);
+#if PY_MAJOR_VERSION >= 3
+ for (i=0; i<n; i++) {
+ if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
+ }
+#endif
+ for (i=0; i<n; i++) {
+ PyObject *t = PyTuple_GET_ITEM(tuple, i);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(exc_type == t)) return 1;
+ #endif
+ if (likely(PyExceptionClass_Check(t))) {
+ if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
+ } else {
+ }
+ }
+ return 0;
+}
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
+ if (likely(err == exc_type)) return 1;
+ if (likely(PyExceptionClass_Check(err))) {
+ if (likely(PyExceptionClass_Check(exc_type))) {
+ return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
+ } else if (likely(PyTuple_Check(exc_type))) {
+ return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
+ } else {
+ }
+ }
+ return PyErr_GivenExceptionMatches(err, exc_type);
+}
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
+ assert(PyExceptionClass_Check(exc_type1));
+ assert(PyExceptionClass_Check(exc_type2));
+ if (likely(err == exc_type1 || err == exc_type2)) return 1;
+ if (likely(PyExceptionClass_Check(err))) {
+ return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
+ }
+ return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
+}
+#endif
+
+/* FetchCommonType */
+static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) {
+ PyObject* fake_module;
+ PyTypeObject* cached_type = NULL;
+ fake_module = PyImport_AddModule((char*) "_cython_" CYTHON_ABI);
+ if (!fake_module) return NULL;
+ Py_INCREF(fake_module);
+ cached_type = (PyTypeObject*) PyObject_GetAttrString(fake_module, type->tp_name);
+ if (cached_type) {
+ if (!PyType_Check((PyObject*)cached_type)) {
+ PyErr_Format(PyExc_TypeError,
+ "Shared Cython type %.200s is not a type object",
+ type->tp_name);
+ goto bad;
+ }
+ if (cached_type->tp_basicsize != type->tp_basicsize) {
+ PyErr_Format(PyExc_TypeError,
+ "Shared Cython type %.200s has the wrong size, try recompiling",
+ type->tp_name);
+ goto bad;
+ }
+ } else {
+ if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad;
+ PyErr_Clear();
+ if (PyType_Ready(type) < 0) goto bad;
+ if (PyObject_SetAttrString(fake_module, type->tp_name, (PyObject*) type) < 0)
+ goto bad;
+ Py_INCREF(type);
+ cached_type = type;
+ }
+done:
+ Py_DECREF(fake_module);
+ return cached_type;
+bad:
+ Py_XDECREF(cached_type);
+ cached_type = NULL;
+ goto done;
+}
+
+/* PyObjectGetMethod */
+static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) {
+ PyObject *attr;
+#if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP
+ PyTypeObject *tp = Py_TYPE(obj);
+ PyObject *descr;
+ descrgetfunc f = NULL;
+ PyObject **dictptr, *dict;
+ int meth_found = 0;
+ assert (*method == NULL);
+ if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) {
+ attr = __Pyx_PyObject_GetAttrStr(obj, name);
+ goto try_unpack;
+ }
+ if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) {
+ return 0;
+ }
+ descr = _PyType_Lookup(tp, name);
+ if (likely(descr != NULL)) {
+ Py_INCREF(descr);
+#if PY_MAJOR_VERSION >= 3
+ #ifdef __Pyx_CyFunction_USED
+ if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr)))
+ #else
+ if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type)))
+ #endif
+#else
+ #ifdef __Pyx_CyFunction_USED
+ if (likely(PyFunction_Check(descr) || __Pyx_CyFunction_Check(descr)))
+ #else
+ if (likely(PyFunction_Check(descr)))
+ #endif
+#endif
+ {
+ meth_found = 1;
+ } else {
+ f = Py_TYPE(descr)->tp_descr_get;
+ if (f != NULL && PyDescr_IsData(descr)) {
+ attr = f(descr, obj, (PyObject *)Py_TYPE(obj));
+ Py_DECREF(descr);
+ goto try_unpack;
+ }
+ }
+ }
+ dictptr = _PyObject_GetDictPtr(obj);
+ if (dictptr != NULL && (dict = *dictptr) != NULL) {
+ Py_INCREF(dict);
+ attr = __Pyx_PyDict_GetItemStr(dict, name);
+ if (attr != NULL) {
+ Py_INCREF(attr);
+ Py_DECREF(dict);
+ Py_XDECREF(descr);
+ goto try_unpack;
+ }
+ Py_DECREF(dict);
+ }
+ if (meth_found) {
+ *method = descr;
+ return 1;
+ }
+ if (f != NULL) {
+ attr = f(descr, obj, (PyObject *)Py_TYPE(obj));
+ Py_DECREF(descr);
+ goto try_unpack;
+ }
+ if (descr != NULL) {
+ *method = descr;
+ return 0;
+ }
+ PyErr_Format(PyExc_AttributeError,
+#if PY_MAJOR_VERSION >= 3
+ "'%.50s' object has no attribute '%U'",
+ tp->tp_name, name);
+#else
+ "'%.50s' object has no attribute '%.400s'",
+ tp->tp_name, PyString_AS_STRING(name));
+#endif
+ return 0;
+#else
+ attr = __Pyx_PyObject_GetAttrStr(obj, name);
+ goto try_unpack;
+#endif
+try_unpack:
+#if CYTHON_UNPACK_METHODS
+ if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) {
+ PyObject *function = PyMethod_GET_FUNCTION(attr);
+ Py_INCREF(function);
+ Py_DECREF(attr);
+ *method = function;
+ return 1;
+ }
+#endif
+ *method = attr;
+ return 0;
+}
+
+/* PyObjectCallMethod1 */
+static PyObject* __Pyx__PyObject_CallMethod1(PyObject* method, PyObject* arg) {
+ PyObject *result = __Pyx_PyObject_CallOneArg(method, arg);
+ Py_DECREF(method);
+ return result;
+}
+static PyObject* __Pyx_PyObject_CallMethod1(PyObject* obj, PyObject* method_name, PyObject* arg) {
+ PyObject *method = NULL, *result;
+ int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method);
+ if (likely(is_method)) {
+ result = __Pyx_PyObject_Call2Args(method, obj, arg);
+ Py_DECREF(method);
+ return result;
+ }
+ if (unlikely(!method)) return NULL;
+ return __Pyx__PyObject_CallMethod1(method, arg);
+}
+
+/* CoroutineBase */
+#include <structmember.h>
+#include <frameobject.h>
+#define __Pyx_Coroutine_Undelegate(gen) Py_CLEAR((gen)->yieldfrom)
+static int __Pyx_PyGen__FetchStopIterationValue(CYTHON_UNUSED PyThreadState *__pyx_tstate, PyObject **pvalue) {
+ PyObject *et, *ev, *tb;
+ PyObject *value = NULL;
+ __Pyx_ErrFetch(&et, &ev, &tb);
+ if (!et) {
+ Py_XDECREF(tb);
+ Py_XDECREF(ev);
+ Py_INCREF(Py_None);
+ *pvalue = Py_None;
+ return 0;
+ }
+ if (likely(et == PyExc_StopIteration)) {
+ if (!ev) {
+ Py_INCREF(Py_None);
+ value = Py_None;
+ }
+#if PY_VERSION_HEX >= 0x030300A0
+ else if (Py_TYPE(ev) == (PyTypeObject*)PyExc_StopIteration) {
+ value = ((PyStopIterationObject *)ev)->value;
+ Py_INCREF(value);
+ Py_DECREF(ev);
+ }
+#endif
+ else if (unlikely(PyTuple_Check(ev))) {
+ if (PyTuple_GET_SIZE(ev) >= 1) {
+#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ value = PyTuple_GET_ITEM(ev, 0);
+ Py_INCREF(value);
+#else
+ value = PySequence_ITEM(ev, 0);
+#endif
+ } else {
+ Py_INCREF(Py_None);
+ value = Py_None;
+ }
+ Py_DECREF(ev);
+ }
+ else if (!__Pyx_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration)) {
+ value = ev;
+ }
+ if (likely(value)) {
+ Py_XDECREF(tb);
+ Py_DECREF(et);
+ *pvalue = value;
+ return 0;
+ }
+ } else if (!__Pyx_PyErr_GivenExceptionMatches(et, PyExc_StopIteration)) {
+ __Pyx_ErrRestore(et, ev, tb);
+ return -1;
+ }
+ PyErr_NormalizeException(&et, &ev, &tb);
+ if (unlikely(!PyObject_TypeCheck(ev, (PyTypeObject*)PyExc_StopIteration))) {
+ __Pyx_ErrRestore(et, ev, tb);
+ return -1;
+ }
+ Py_XDECREF(tb);
+ Py_DECREF(et);
+#if PY_VERSION_HEX >= 0x030300A0
+ value = ((PyStopIterationObject *)ev)->value;
+ Py_INCREF(value);
+ Py_DECREF(ev);
+#else
+ {
+ PyObject* args = __Pyx_PyObject_GetAttrStr(ev, __pyx_n_s_args);
+ Py_DECREF(ev);
+ if (likely(args)) {
+ value = PySequence_GetItem(args, 0);
+ Py_DECREF(args);
+ }
+ if (unlikely(!value)) {
+ __Pyx_ErrRestore(NULL, NULL, NULL);
+ Py_INCREF(Py_None);
+ value = Py_None;
+ }
+ }
+#endif
+ *pvalue = value;
+ return 0;
+}
+static CYTHON_INLINE
+void __Pyx_Coroutine_ExceptionClear(__Pyx_ExcInfoStruct *exc_state) {
+ PyObject *t, *v, *tb;
+ t = exc_state->exc_type;
+ v = exc_state->exc_value;
+ tb = exc_state->exc_traceback;
+ exc_state->exc_type = NULL;
+ exc_state->exc_value = NULL;
+ exc_state->exc_traceback = NULL;
+ Py_XDECREF(t);
+ Py_XDECREF(v);
+ Py_XDECREF(tb);
+}
+#define __Pyx_Coroutine_AlreadyRunningError(gen) (__Pyx__Coroutine_AlreadyRunningError(gen), (PyObject*)NULL)
+static void __Pyx__Coroutine_AlreadyRunningError(CYTHON_UNUSED __pyx_CoroutineObject *gen) {
+ const char *msg;
+ if ((0)) {
+ #ifdef __Pyx_Coroutine_USED
+ } else if (__Pyx_Coroutine_Check((PyObject*)gen)) {
+ msg = "coroutine already executing";
+ #endif
+ #ifdef __Pyx_AsyncGen_USED
+ } else if (__Pyx_AsyncGen_CheckExact((PyObject*)gen)) {
+ msg = "async generator already executing";
+ #endif
+ } else {
+ msg = "generator already executing";
+ }
+ PyErr_SetString(PyExc_ValueError, msg);
+}
+#define __Pyx_Coroutine_NotStartedError(gen) (__Pyx__Coroutine_NotStartedError(gen), (PyObject*)NULL)
+static void __Pyx__Coroutine_NotStartedError(CYTHON_UNUSED PyObject *gen) {
+ const char *msg;
+ if ((0)) {
+ #ifdef __Pyx_Coroutine_USED
+ } else if (__Pyx_Coroutine_Check(gen)) {
+ msg = "can't send non-None value to a just-started coroutine";
+ #endif
+ #ifdef __Pyx_AsyncGen_USED
+ } else if (__Pyx_AsyncGen_CheckExact(gen)) {
+ msg = "can't send non-None value to a just-started async generator";
+ #endif
+ } else {
+ msg = "can't send non-None value to a just-started generator";
+ }
+ PyErr_SetString(PyExc_TypeError, msg);
+}
+#define __Pyx_Coroutine_AlreadyTerminatedError(gen, value, closing) (__Pyx__Coroutine_AlreadyTerminatedError(gen, value, closing), (PyObject*)NULL)
+static void __Pyx__Coroutine_AlreadyTerminatedError(CYTHON_UNUSED PyObject *gen, PyObject *value, CYTHON_UNUSED int closing) {
+ #ifdef __Pyx_Coroutine_USED
+ if (!closing && __Pyx_Coroutine_Check(gen)) {
+ PyErr_SetString(PyExc_RuntimeError, "cannot reuse already awaited coroutine");
+ } else
+ #endif
+ if (value) {
+ #ifdef __Pyx_AsyncGen_USED
+ if (__Pyx_AsyncGen_CheckExact(gen))
+ PyErr_SetNone(__Pyx_PyExc_StopAsyncIteration);
+ else
+ #endif
+ PyErr_SetNone(PyExc_StopIteration);
+ }
+}
+static
+PyObject *__Pyx_Coroutine_SendEx(__pyx_CoroutineObject *self, PyObject *value, int closing) {
+ __Pyx_PyThreadState_declare
+ PyThreadState *tstate;
+ __Pyx_ExcInfoStruct *exc_state;
+ PyObject *retval;
+ assert(!self->is_running);
+ if (unlikely(self->resume_label == 0)) {
+ if (unlikely(value && value != Py_None)) {
+ return __Pyx_Coroutine_NotStartedError((PyObject*)self);
+ }
+ }
+ if (unlikely(self->resume_label == -1)) {
+ return __Pyx_Coroutine_AlreadyTerminatedError((PyObject*)self, value, closing);
+ }
+#if CYTHON_FAST_THREAD_STATE
+ __Pyx_PyThreadState_assign
+ tstate = __pyx_tstate;
+#else
+ tstate = __Pyx_PyThreadState_Current;
+#endif
+ exc_state = &self->gi_exc_state;
+ if (exc_state->exc_type) {
+ #if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_PYSTON
+ #else
+ if (exc_state->exc_traceback) {
+ PyTracebackObject *tb = (PyTracebackObject *) exc_state->exc_traceback;
+ PyFrameObject *f = tb->tb_frame;
+ Py_XINCREF(tstate->frame);
+ assert(f->f_back == NULL);
+ f->f_back = tstate->frame;
+ }
+ #endif
+ }
+#if CYTHON_USE_EXC_INFO_STACK
+ exc_state->previous_item = tstate->exc_info;
+ tstate->exc_info = exc_state;
+#else
+ if (exc_state->exc_type) {
+ __Pyx_ExceptionSwap(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback);
+ } else {
+ __Pyx_Coroutine_ExceptionClear(exc_state);
+ __Pyx_ExceptionSave(&exc_state->exc_type, &exc_state->exc_value, &exc_state->exc_traceback);
+ }
+#endif
+ self->is_running = 1;
+ retval = self->body((PyObject *) self, tstate, value);
+ self->is_running = 0;
+#if CYTHON_USE_EXC_INFO_STACK
+ exc_state = &self->gi_exc_state;
+ tstate->exc_info = exc_state->previous_item;
+ exc_state->previous_item = NULL;
+ __Pyx_Coroutine_ResetFrameBackpointer(exc_state);
+#endif
+ return retval;
+}
+static CYTHON_INLINE void __Pyx_Coroutine_ResetFrameBackpointer(__Pyx_ExcInfoStruct *exc_state) {
+ PyObject *exc_tb = exc_state->exc_traceback;
+ if (likely(exc_tb)) {
+#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_PYSTON
+#else
+ PyTracebackObject *tb = (PyTracebackObject *) exc_tb;
+ PyFrameObject *f = tb->tb_frame;
+ Py_CLEAR(f->f_back);
+#endif
+ }
+}
+static CYTHON_INLINE
+PyObject *__Pyx_Coroutine_MethodReturn(CYTHON_UNUSED PyObject* gen, PyObject *retval) {
+ if (unlikely(!retval)) {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ if (!__Pyx_PyErr_Occurred()) {
+ PyObject *exc = PyExc_StopIteration;
+ #ifdef __Pyx_AsyncGen_USED
+ if (__Pyx_AsyncGen_CheckExact(gen))
+ exc = __Pyx_PyExc_StopAsyncIteration;
+ #endif
+ __Pyx_PyErr_SetNone(exc);
+ }
+ }
+ return retval;
+}
+static CYTHON_INLINE
+PyObject *__Pyx_Coroutine_FinishDelegation(__pyx_CoroutineObject *gen) {
+ PyObject *ret;
+ PyObject *val = NULL;
+ __Pyx_Coroutine_Undelegate(gen);
+ __Pyx_PyGen__FetchStopIterationValue(__Pyx_PyThreadState_Current, &val);
+ ret = __Pyx_Coroutine_SendEx(gen, val, 0);
+ Py_XDECREF(val);
+ return ret;
+}
+static PyObject *__Pyx_Coroutine_Send(PyObject *self, PyObject *value) {
+ PyObject *retval;
+ __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self;
+ PyObject *yf = gen->yieldfrom;
+ if (unlikely(gen->is_running))
+ return __Pyx_Coroutine_AlreadyRunningError(gen);
+ if (yf) {
+ PyObject *ret;
+ gen->is_running = 1;
+ #ifdef __Pyx_Generator_USED
+ if (__Pyx_Generator_CheckExact(yf)) {
+ ret = __Pyx_Coroutine_Send(yf, value);
+ } else
+ #endif
+ #ifdef __Pyx_Coroutine_USED
+ if (__Pyx_Coroutine_Check(yf)) {
+ ret = __Pyx_Coroutine_Send(yf, value);
+ } else
+ #endif
+ #ifdef __Pyx_AsyncGen_USED
+ if (__pyx_PyAsyncGenASend_CheckExact(yf)) {
+ ret = __Pyx_async_gen_asend_send(yf, value);
+ } else
+ #endif
+ #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3)
+ if (PyGen_CheckExact(yf)) {
+ ret = _PyGen_Send((PyGenObject*)yf, value == Py_None ? NULL : value);
+ } else
+ #endif
+ #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03050000 && defined(PyCoro_CheckExact) && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3)
+ if (PyCoro_CheckExact(yf)) {
+ ret = _PyGen_Send((PyGenObject*)yf, value == Py_None ? NULL : value);
+ } else
+ #endif
+ {
+ if (value == Py_None)
+ ret = Py_TYPE(yf)->tp_iternext(yf);
+ else
+ ret = __Pyx_PyObject_CallMethod1(yf, __pyx_n_s_send, value);
+ }
+ gen->is_running = 0;
+ if (likely(ret)) {
+ return ret;
+ }
+ retval = __Pyx_Coroutine_FinishDelegation(gen);
+ } else {
+ retval = __Pyx_Coroutine_SendEx(gen, value, 0);
+ }
+ return __Pyx_Coroutine_MethodReturn(self, retval);
+}
+static int __Pyx_Coroutine_CloseIter(__pyx_CoroutineObject *gen, PyObject *yf) {
+ PyObject *retval = NULL;
+ int err = 0;
+ #ifdef __Pyx_Generator_USED
+ if (__Pyx_Generator_CheckExact(yf)) {
+ retval = __Pyx_Coroutine_Close(yf);
+ if (!retval)
+ return -1;
+ } else
+ #endif
+ #ifdef __Pyx_Coroutine_USED
+ if (__Pyx_Coroutine_Check(yf)) {
+ retval = __Pyx_Coroutine_Close(yf);
+ if (!retval)
+ return -1;
+ } else
+ if (__Pyx_CoroutineAwait_CheckExact(yf)) {
+ retval = __Pyx_CoroutineAwait_Close((__pyx_CoroutineAwaitObject*)yf, NULL);
+ if (!retval)
+ return -1;
+ } else
+ #endif
+ #ifdef __Pyx_AsyncGen_USED
+ if (__pyx_PyAsyncGenASend_CheckExact(yf)) {
+ retval = __Pyx_async_gen_asend_close(yf, NULL);
+ } else
+ if (__pyx_PyAsyncGenAThrow_CheckExact(yf)) {
+ retval = __Pyx_async_gen_athrow_close(yf, NULL);
+ } else
+ #endif
+ {
+ PyObject *meth;
+ gen->is_running = 1;
+ meth = __Pyx_PyObject_GetAttrStr(yf, __pyx_n_s_close);
+ if (unlikely(!meth)) {
+ if (!PyErr_ExceptionMatches(PyExc_AttributeError)) {
+ PyErr_WriteUnraisable(yf);
+ }
+ PyErr_Clear();
+ } else {
+ retval = PyObject_CallFunction(meth, NULL);
+ Py_DECREF(meth);
+ if (!retval)
+ err = -1;
+ }
+ gen->is_running = 0;
+ }
+ Py_XDECREF(retval);
+ return err;
+}
+static PyObject *__Pyx_Generator_Next(PyObject *self) {
+ __pyx_CoroutineObject *gen = (__pyx_CoroutineObject*) self;
+ PyObject *yf = gen->yieldfrom;
+ if (unlikely(gen->is_running))
+ return __Pyx_Coroutine_AlreadyRunningError(gen);
+ if (yf) {
+ PyObject *ret;
+ gen->is_running = 1;
+ #ifdef __Pyx_Generator_USED
+ if (__Pyx_Generator_CheckExact(yf)) {
+ ret = __Pyx_Generator_Next(yf);
+ } else
+ #endif
+ #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03030000 && (defined(__linux__) || PY_VERSION_HEX >= 0x030600B3)
+ if (PyGen_CheckExact(yf)) {
+ ret = _PyGen_Send((PyGenObject*)yf, NULL);
+ } else
+ #endif
+ #ifdef __Pyx_Coroutine_USED
+ if (__Pyx_Coroutine_Check(yf)) {
+ ret = __Pyx_Coroutine_Send(yf, Py_None);
+ } else
+ #endif
+ ret = Py_TYPE(yf)->tp_iternext(yf);
+ gen->is_running = 0;
+ if (likely(ret)) {
+ return ret;
+ }
+ return __Pyx_Coroutine_FinishDelegation(gen);
+ }
+ return __Pyx_Coroutine_SendEx(gen, Py_None, 0);
+}
+static PyObject *__Pyx_Coroutine_Close_Method(PyObject *self, CYTHON_UNUSED PyObject *arg) {
+ return __Pyx_Coroutine_Close(self);
+}
+static PyObject *__Pyx_Coroutine_Close(PyObject *self) {
+ __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
+ PyObject *retval, *raised_exception;
+ PyObject *yf = gen->yieldfrom;
+ int err = 0;
+ if (unlikely(gen->is_running))
+ return __Pyx_Coroutine_AlreadyRunningError(gen);
+ if (yf) {
+ Py_INCREF(yf);
+ err = __Pyx_Coroutine_CloseIter(gen, yf);
+ __Pyx_Coroutine_Undelegate(gen);
+ Py_DECREF(yf);
+ }
+ if (err == 0)
+ PyErr_SetNone(PyExc_GeneratorExit);
+ retval = __Pyx_Coroutine_SendEx(gen, NULL, 1);
+ if (unlikely(retval)) {
+ const char *msg;
+ Py_DECREF(retval);
+ if ((0)) {
+ #ifdef __Pyx_Coroutine_USED
+ } else if (__Pyx_Coroutine_Check(self)) {
+ msg = "coroutine ignored GeneratorExit";
+ #endif
+ #ifdef __Pyx_AsyncGen_USED
+ } else if (__Pyx_AsyncGen_CheckExact(self)) {
+#if PY_VERSION_HEX < 0x03060000
+ msg = "async generator ignored GeneratorExit - might require Python 3.6+ finalisation (PEP 525)";
+#else
+ msg = "async generator ignored GeneratorExit";
+#endif
+ #endif
+ } else {
+ msg = "generator ignored GeneratorExit";
+ }
+ PyErr_SetString(PyExc_RuntimeError, msg);
+ return NULL;
+ }
+ raised_exception = PyErr_Occurred();
+ if (likely(!raised_exception || __Pyx_PyErr_GivenExceptionMatches2(raised_exception, PyExc_GeneratorExit, PyExc_StopIteration))) {
+ if (raised_exception) PyErr_Clear();
+ Py_INCREF(Py_None);
+ return Py_None;
+ }
+ return NULL;
+}
+static PyObject *__Pyx__Coroutine_Throw(PyObject *self, PyObject *typ, PyObject *val, PyObject *tb,
+ PyObject *args, int close_on_genexit) {
+ __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
+ PyObject *yf = gen->yieldfrom;
+ if (unlikely(gen->is_running))
+ return __Pyx_Coroutine_AlreadyRunningError(gen);
+ if (yf) {
+ PyObject *ret;
+ Py_INCREF(yf);
+ if (__Pyx_PyErr_GivenExceptionMatches(typ, PyExc_GeneratorExit) && close_on_genexit) {
+ int err = __Pyx_Coroutine_CloseIter(gen, yf);
+ Py_DECREF(yf);
+ __Pyx_Coroutine_Undelegate(gen);
+ if (err < 0)
+ return __Pyx_Coroutine_MethodReturn(self, __Pyx_Coroutine_SendEx(gen, NULL, 0));
+ goto throw_here;
+ }
+ gen->is_running = 1;
+ if (0
+ #ifdef __Pyx_Generator_USED
+ || __Pyx_Generator_CheckExact(yf)
+ #endif
+ #ifdef __Pyx_Coroutine_USED
+ || __Pyx_Coroutine_Check(yf)
+ #endif
+ ) {
+ ret = __Pyx__Coroutine_Throw(yf, typ, val, tb, args, close_on_genexit);
+ #ifdef __Pyx_Coroutine_USED
+ } else if (__Pyx_CoroutineAwait_CheckExact(yf)) {
+ ret = __Pyx__Coroutine_Throw(((__pyx_CoroutineAwaitObject*)yf)->coroutine, typ, val, tb, args, close_on_genexit);
+ #endif
+ } else {
+ PyObject *meth = __Pyx_PyObject_GetAttrStr(yf, __pyx_n_s_throw);
+ if (unlikely(!meth)) {
+ Py_DECREF(yf);
+ if (!PyErr_ExceptionMatches(PyExc_AttributeError)) {
+ gen->is_running = 0;
+ return NULL;
+ }
+ PyErr_Clear();
+ __Pyx_Coroutine_Undelegate(gen);
+ gen->is_running = 0;
+ goto throw_here;
+ }
+ if (likely(args)) {
+ ret = PyObject_CallObject(meth, args);
+ } else {
+ ret = PyObject_CallFunctionObjArgs(meth, typ, val, tb, NULL);
+ }
+ Py_DECREF(meth);
+ }
+ gen->is_running = 0;
+ Py_DECREF(yf);
+ if (!ret) {
+ ret = __Pyx_Coroutine_FinishDelegation(gen);
+ }
+ return __Pyx_Coroutine_MethodReturn(self, ret);
+ }
+throw_here:
+ __Pyx_Raise(typ, val, tb, NULL);
+ return __Pyx_Coroutine_MethodReturn(self, __Pyx_Coroutine_SendEx(gen, NULL, 0));
+}
+static PyObject *__Pyx_Coroutine_Throw(PyObject *self, PyObject *args) {
+ PyObject *typ;
+ PyObject *val = NULL;
+ PyObject *tb = NULL;
+ if (!PyArg_UnpackTuple(args, (char *)"throw", 1, 3, &typ, &val, &tb))
+ return NULL;
+ return __Pyx__Coroutine_Throw(self, typ, val, tb, args, 1);
+}
+static CYTHON_INLINE int __Pyx_Coroutine_traverse_excstate(__Pyx_ExcInfoStruct *exc_state, visitproc visit, void *arg) {
+ Py_VISIT(exc_state->exc_type);
+ Py_VISIT(exc_state->exc_value);
+ Py_VISIT(exc_state->exc_traceback);
+ return 0;
+}
+static int __Pyx_Coroutine_traverse(__pyx_CoroutineObject *gen, visitproc visit, void *arg) {
+ Py_VISIT(gen->closure);
+ Py_VISIT(gen->classobj);
+ Py_VISIT(gen->yieldfrom);
+ return __Pyx_Coroutine_traverse_excstate(&gen->gi_exc_state, visit, arg);
+}
+static int __Pyx_Coroutine_clear(PyObject *self) {
+ __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
+ Py_CLEAR(gen->closure);
+ Py_CLEAR(gen->classobj);
+ Py_CLEAR(gen->yieldfrom);
+ __Pyx_Coroutine_ExceptionClear(&gen->gi_exc_state);
+#ifdef __Pyx_AsyncGen_USED
+ if (__Pyx_AsyncGen_CheckExact(self)) {
+ Py_CLEAR(((__pyx_PyAsyncGenObject*)gen)->ag_finalizer);
+ }
+#endif
+ Py_CLEAR(gen->gi_code);
+ Py_CLEAR(gen->gi_name);
+ Py_CLEAR(gen->gi_qualname);
+ Py_CLEAR(gen->gi_modulename);
+ return 0;
+}
+static void __Pyx_Coroutine_dealloc(PyObject *self) {
+ __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
+ PyObject_GC_UnTrack(gen);
+ if (gen->gi_weakreflist != NULL)
+ PyObject_ClearWeakRefs(self);
+ if (gen->resume_label >= 0) {
+ PyObject_GC_Track(self);
+#if PY_VERSION_HEX >= 0x030400a1 && CYTHON_USE_TP_FINALIZE
+ if (PyObject_CallFinalizerFromDealloc(self))
+#else
+ Py_TYPE(gen)->tp_del(self);
+ if (self->ob_refcnt > 0)
+#endif
+ {
+ return;
+ }
+ PyObject_GC_UnTrack(self);
+ }
+#ifdef __Pyx_AsyncGen_USED
+ if (__Pyx_AsyncGen_CheckExact(self)) {
+ /* We have to handle this case for asynchronous generators
+ right here, because this code has to be between UNTRACK
+ and GC_Del. */
+ Py_CLEAR(((__pyx_PyAsyncGenObject*)self)->ag_finalizer);
+ }
+#endif
+ __Pyx_Coroutine_clear(self);
+ PyObject_GC_Del(gen);
+}
+static void __Pyx_Coroutine_del(PyObject *self) {
+ PyObject *error_type, *error_value, *error_traceback;
+ __pyx_CoroutineObject *gen = (__pyx_CoroutineObject *) self;
+ __Pyx_PyThreadState_declare
+ if (gen->resume_label < 0) {
+ return;
+ }
+#if !CYTHON_USE_TP_FINALIZE
+ assert(self->ob_refcnt == 0);
+ self->ob_refcnt = 1;
+#endif
+ __Pyx_PyThreadState_assign
+ __Pyx_ErrFetch(&error_type, &error_value, &error_traceback);
+#ifdef __Pyx_AsyncGen_USED
+ if (__Pyx_AsyncGen_CheckExact(self)) {
+ __pyx_PyAsyncGenObject *agen = (__pyx_PyAsyncGenObject*)self;
+ PyObject *finalizer = agen->ag_finalizer;
+ if (finalizer && !agen->ag_closed) {
+ PyObject *res = __Pyx_PyObject_CallOneArg(finalizer, self);
+ if (unlikely(!res)) {
+ PyErr_WriteUnraisable(self);
+ } else {
+ Py_DECREF(res);
+ }
+ __Pyx_ErrRestore(error_type, error_value, error_traceback);
+ return;
+ }
+ }
+#endif
+ if (unlikely(gen->resume_label == 0 && !error_value)) {
+#ifdef __Pyx_Coroutine_USED
+#ifdef __Pyx_Generator_USED
+ if (!__Pyx_Generator_CheckExact(self))
+#endif
+ {
+ PyObject_GC_UnTrack(self);
+#if PY_MAJOR_VERSION >= 3 || defined(PyErr_WarnFormat)
+ if (unlikely(PyErr_WarnFormat(PyExc_RuntimeWarning, 1, "coroutine '%.50S' was never awaited", gen->gi_qualname) < 0))
+ PyErr_WriteUnraisable(self);
+#else
+ {PyObject *msg;
+ char *cmsg;
+ #if CYTHON_COMPILING_IN_PYPY
+ msg = NULL;
+ cmsg = (char*) "coroutine was never awaited";
+ #else
+ char *cname;
+ PyObject *qualname;
+ qualname = gen->gi_qualname;
+ cname = PyString_AS_STRING(qualname);
+ msg = PyString_FromFormat("coroutine '%.50s' was never awaited", cname);
+ if (unlikely(!msg)) {
+ PyErr_Clear();
+ cmsg = (char*) "coroutine was never awaited";
+ } else {
+ cmsg = PyString_AS_STRING(msg);
+ }
+ #endif
+ if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, cmsg, 1) < 0))
+ PyErr_WriteUnraisable(self);
+ Py_XDECREF(msg);}
+#endif
+ PyObject_GC_Track(self);
+ }
+#endif
+ } else {
+ PyObject *res = __Pyx_Coroutine_Close(self);
+ if (unlikely(!res)) {
+ if (PyErr_Occurred())
+ PyErr_WriteUnraisable(self);
+ } else {
+ Py_DECREF(res);
+ }
+ }
+ __Pyx_ErrRestore(error_type, error_value, error_traceback);
+#if !CYTHON_USE_TP_FINALIZE
+ assert(self->ob_refcnt > 0);
+ if (--self->ob_refcnt == 0) {
+ return;
+ }
+ {
+ Py_ssize_t refcnt = self->ob_refcnt;
+ _Py_NewReference(self);
+ self->ob_refcnt = refcnt;
+ }
+#if CYTHON_COMPILING_IN_CPYTHON
+ assert(PyType_IS_GC(self->ob_type) &&
+ _Py_AS_GC(self)->gc.gc_refs != _PyGC_REFS_UNTRACKED);
+ _Py_DEC_REFTOTAL;
+#endif
+#ifdef COUNT_ALLOCS
+ --Py_TYPE(self)->tp_frees;
+ --Py_TYPE(self)->tp_allocs;
+#endif
+#endif
+}
+static PyObject *
+__Pyx_Coroutine_get_name(__pyx_CoroutineObject *self, CYTHON_UNUSED void *context)
+{
+ PyObject *name = self->gi_name;
+ if (unlikely(!name)) name = Py_None;
+ Py_INCREF(name);
+ return name;
+}
+static int
+__Pyx_Coroutine_set_name(__pyx_CoroutineObject *self, PyObject *value, CYTHON_UNUSED void *context)
+{
+ PyObject *tmp;
+#if PY_MAJOR_VERSION >= 3
+ if (unlikely(value == NULL || !PyUnicode_Check(value)))
+#else
+ if (unlikely(value == NULL || !PyString_Check(value)))
+#endif
+ {
+ PyErr_SetString(PyExc_TypeError,
+ "__name__ must be set to a string object");
+ return -1;
+ }
+ tmp = self->gi_name;
+ Py_INCREF(value);
+ self->gi_name = value;
+ Py_XDECREF(tmp);
+ return 0;
+}
+static PyObject *
+__Pyx_Coroutine_get_qualname(__pyx_CoroutineObject *self, CYTHON_UNUSED void *context)
+{
+ PyObject *name = self->gi_qualname;
+ if (unlikely(!name)) name = Py_None;
+ Py_INCREF(name);
+ return name;
+}
+static int
+__Pyx_Coroutine_set_qualname(__pyx_CoroutineObject *self, PyObject *value, CYTHON_UNUSED void *context)
+{
+ PyObject *tmp;
+#if PY_MAJOR_VERSION >= 3
+ if (unlikely(value == NULL || !PyUnicode_Check(value)))
+#else
+ if (unlikely(value == NULL || !PyString_Check(value)))
+#endif
+ {
+ PyErr_SetString(PyExc_TypeError,
+ "__qualname__ must be set to a string object");
+ return -1;
+ }
+ tmp = self->gi_qualname;
+ Py_INCREF(value);
+ self->gi_qualname = value;
+ Py_XDECREF(tmp);
+ return 0;
+}
+static __pyx_CoroutineObject *__Pyx__Coroutine_New(
+ PyTypeObject* type, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
+ PyObject *name, PyObject *qualname, PyObject *module_name) {
+ __pyx_CoroutineObject *gen = PyObject_GC_New(__pyx_CoroutineObject, type);
+ if (unlikely(!gen))
+ return NULL;
+ return __Pyx__Coroutine_NewInit(gen, body, code, closure, name, qualname, module_name);
+}
+static __pyx_CoroutineObject *__Pyx__Coroutine_NewInit(
+ __pyx_CoroutineObject *gen, __pyx_coroutine_body_t body, PyObject *code, PyObject *closure,
+ PyObject *name, PyObject *qualname, PyObject *module_name) {
+ gen->body = body;
+ gen->closure = closure;
+ Py_XINCREF(closure);
+ gen->is_running = 0;
+ gen->resume_label = 0;
+ gen->classobj = NULL;
+ gen->yieldfrom = NULL;
+ gen->gi_exc_state.exc_type = NULL;
+ gen->gi_exc_state.exc_value = NULL;
+ gen->gi_exc_state.exc_traceback = NULL;
+#if CYTHON_USE_EXC_INFO_STACK
+ gen->gi_exc_state.previous_item = NULL;
+#endif
+ gen->gi_weakreflist = NULL;
+ Py_XINCREF(qualname);
+ gen->gi_qualname = qualname;
+ Py_XINCREF(name);
+ gen->gi_name = name;
+ Py_XINCREF(module_name);
+ gen->gi_modulename = module_name;
+ Py_XINCREF(code);
+ gen->gi_code = code;
+ PyObject_GC_Track(gen);
+ return gen;
+}
+
+/* PatchModuleWithCoroutine */
+static PyObject* __Pyx_Coroutine_patch_module(PyObject* module, const char* py_code) {
+#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
+ int result;
+ PyObject *globals, *result_obj;
+ globals = PyDict_New(); if (unlikely(!globals)) goto ignore;
+ result = PyDict_SetItemString(globals, "_cython_coroutine_type",
+ #ifdef __Pyx_Coroutine_USED
+ (PyObject*)__pyx_CoroutineType);
+ #else
+ Py_None);
+ #endif
+ if (unlikely(result < 0)) goto ignore;
+ result = PyDict_SetItemString(globals, "_cython_generator_type",
+ #ifdef __Pyx_Generator_USED
+ (PyObject*)__pyx_GeneratorType);
+ #else
+ Py_None);
+ #endif
+ if (unlikely(result < 0)) goto ignore;
+ if (unlikely(PyDict_SetItemString(globals, "_module", module) < 0)) goto ignore;
+ if (unlikely(PyDict_SetItemString(globals, "__builtins__", __pyx_b) < 0)) goto ignore;
+ result_obj = PyRun_String(py_code, Py_file_input, globals, globals);
+ if (unlikely(!result_obj)) goto ignore;
+ Py_DECREF(result_obj);
+ Py_DECREF(globals);
+ return module;
+ignore:
+ Py_XDECREF(globals);
+ PyErr_WriteUnraisable(module);
+ if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning, "Cython module failed to patch module with custom type", 1) < 0)) {
+ Py_DECREF(module);
+ module = NULL;
+ }
+#else
+ py_code++;
+#endif
+ return module;
+}
+
+/* PatchGeneratorABC */
+#ifndef CYTHON_REGISTER_ABCS
+#define CYTHON_REGISTER_ABCS 1
+#endif
+#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
+static PyObject* __Pyx_patch_abc_module(PyObject *module);
+static PyObject* __Pyx_patch_abc_module(PyObject *module) {
+ module = __Pyx_Coroutine_patch_module(
+ module, ""
+"if _cython_generator_type is not None:\n"
+" try: Generator = _module.Generator\n"
+" except AttributeError: pass\n"
+" else: Generator.register(_cython_generator_type)\n"
+"if _cython_coroutine_type is not None:\n"
+" try: Coroutine = _module.Coroutine\n"
+" except AttributeError: pass\n"
+" else: Coroutine.register(_cython_coroutine_type)\n"
+ );
+ return module;
+}
+#endif
+static int __Pyx_patch_abc(void) {
+#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
+ static int abc_patched = 0;
+ if (CYTHON_REGISTER_ABCS && !abc_patched) {
+ PyObject *module;
+ module = PyImport_ImportModule((PY_MAJOR_VERSION >= 3) ? "collections.abc" : "collections");
+ if (!module) {
+ PyErr_WriteUnraisable(NULL);
+ if (unlikely(PyErr_WarnEx(PyExc_RuntimeWarning,
+ ((PY_MAJOR_VERSION >= 3) ?
+ "Cython module failed to register with collections.abc module" :
+ "Cython module failed to register with collections module"), 1) < 0)) {
+ return -1;
+ }
+ } else {
+ module = __Pyx_patch_abc_module(module);
+ abc_patched = 1;
+ if (unlikely(!module))
+ return -1;
+ Py_DECREF(module);
+ }
+ module = PyImport_ImportModule("backports_abc");
+ if (module) {
+ module = __Pyx_patch_abc_module(module);
+ Py_XDECREF(module);
+ }
+ if (!module) {
+ PyErr_Clear();
+ }
+ }
+#else
+ if ((0)) __Pyx_Coroutine_patch_module(NULL, NULL);
+#endif
+ return 0;
+}
+
+/* Generator */
+static PyMethodDef __pyx_Generator_methods[] = {
+ {"send", (PyCFunction) __Pyx_Coroutine_Send, METH_O,
+ (char*) PyDoc_STR("send(arg) -> send 'arg' into generator,\nreturn next yielded value or raise StopIteration.")},
+ {"throw", (PyCFunction) __Pyx_Coroutine_Throw, METH_VARARGS,
+ (char*) PyDoc_STR("throw(typ[,val[,tb]]) -> raise exception in generator,\nreturn next yielded value or raise StopIteration.")},
+ {"close", (PyCFunction) __Pyx_Coroutine_Close_Method, METH_NOARGS,
+ (char*) PyDoc_STR("close() -> raise GeneratorExit inside generator.")},
+ {0, 0, 0, 0}
+};
+static PyMemberDef __pyx_Generator_memberlist[] = {
+ {(char *) "gi_running", T_BOOL, offsetof(__pyx_CoroutineObject, is_running), READONLY, NULL},
+ {(char*) "gi_yieldfrom", T_OBJECT, offsetof(__pyx_CoroutineObject, yieldfrom), READONLY,
+ (char*) PyDoc_STR("object being iterated by 'yield from', or None")},
+ {(char*) "gi_code", T_OBJECT, offsetof(__pyx_CoroutineObject, gi_code), READONLY, NULL},
+ {0, 0, 0, 0, 0}
+};
+static PyGetSetDef __pyx_Generator_getsets[] = {
+ {(char *) "__name__", (getter)__Pyx_Coroutine_get_name, (setter)__Pyx_Coroutine_set_name,
+ (char*) PyDoc_STR("name of the generator"), 0},
+ {(char *) "__qualname__", (getter)__Pyx_Coroutine_get_qualname, (setter)__Pyx_Coroutine_set_qualname,
+ (char*) PyDoc_STR("qualified name of the generator"), 0},
+ {0, 0, 0, 0, 0}
+};
+static PyTypeObject __pyx_GeneratorType_type = {
+ PyVarObject_HEAD_INIT(0, 0)
+ "generator",
+ sizeof(__pyx_CoroutineObject),
+ 0,
+ (destructor) __Pyx_Coroutine_dealloc,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_HAVE_FINALIZE,
+ 0,
+ (traverseproc) __Pyx_Coroutine_traverse,
+ 0,
+ 0,
+ offsetof(__pyx_CoroutineObject, gi_weakreflist),
+ 0,
+ (iternextfunc) __Pyx_Generator_Next,
+ __pyx_Generator_methods,
+ __pyx_Generator_memberlist,
+ __pyx_Generator_getsets,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+#if CYTHON_USE_TP_FINALIZE
+ 0,
+#else
+ __Pyx_Coroutine_del,
+#endif
+ 0,
+#if CYTHON_USE_TP_FINALIZE
+ __Pyx_Coroutine_del,
+#elif PY_VERSION_HEX >= 0x030400a1
+ 0,
+#endif
+#if PY_VERSION_HEX >= 0x030800b1
+ 0,
+#endif
+#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
+ 0,
+#endif
+};
+static int __pyx_Generator_init(void) {
+ __pyx_GeneratorType_type.tp_getattro = __Pyx_PyObject_GenericGetAttrNoDict;
+ __pyx_GeneratorType_type.tp_iter = PyObject_SelfIter;
+ __pyx_GeneratorType = __Pyx_FetchCommonType(&__pyx_GeneratorType_type);
+ if (unlikely(!__pyx_GeneratorType)) {
+ return -1;
+ }
+ return 0;
+}
+
+/* CheckBinaryVersion */
+static int __Pyx_check_binary_version(void) {
+ char ctversion[4], rtversion[4];
+ PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
+ PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
+ if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
+ char message[200];
+ PyOS_snprintf(message, sizeof(message),
+ "compiletime version %s of module '%.100s' "
+ "does not match runtime version %s",
+ ctversion, __Pyx_MODULE_NAME, rtversion);
+ return PyErr_WarnEx(NULL, message, 1);
+ }
+ return 0;
+}
+
+/* InitStrings */
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ #if PY_MAJOR_VERSION < 3
+ if (t->is_unicode) {
+ *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
+ } else if (t->intern) {
+ *t->p = PyString_InternFromString(t->s);
+ } else {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ }
+ #else
+ if (t->is_unicode | t->is_str) {
+ if (t->intern) {
+ *t->p = PyUnicode_InternFromString(t->s);
+ } else if (t->encoding) {
+ *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
+ } else {
+ *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
+ }
+ } else {
+ *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
+ }
+ #endif
+ if (!*t->p)
+ return -1;
+ if (PyObject_Hash(*t->p) == -1)
+ return -1;
+ ++t;
+ }
+ return 0;
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
+ return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
+}
+static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
+ Py_ssize_t ignore;
+ return __Pyx_PyObject_AsStringAndSize(o, &ignore);
+}
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+#if !CYTHON_PEP393_ENABLED
+static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+ char* defenc_c;
+ PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
+ if (!defenc) return NULL;
+ defenc_c = PyBytes_AS_STRING(defenc);
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ {
+ char* end = defenc_c + PyBytes_GET_SIZE(defenc);
+ char* c;
+ for (c = defenc_c; c < end; c++) {
+ if ((unsigned char) (*c) >= 128) {
+ PyUnicode_AsASCIIString(o);
+ return NULL;
+ }
+ }
+ }
+#endif
+ *length = PyBytes_GET_SIZE(defenc);
+ return defenc_c;
+}
+#else
+static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+ if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ if (likely(PyUnicode_IS_ASCII(o))) {
+ *length = PyUnicode_GET_LENGTH(o);
+ return PyUnicode_AsUTF8(o);
+ } else {
+ PyUnicode_AsASCIIString(o);
+ return NULL;
+ }
+#else
+ return PyUnicode_AsUTF8AndSize(o, length);
+#endif
+}
+#endif
+#endif
+static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+ if (
+#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ __Pyx_sys_getdefaultencoding_not_ascii &&
+#endif
+ PyUnicode_Check(o)) {
+ return __Pyx_PyUnicode_AsStringAndSize(o, length);
+ } else
+#endif
+#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
+ if (PyByteArray_Check(o)) {
+ *length = PyByteArray_GET_SIZE(o);
+ return PyByteArray_AS_STRING(o);
+ } else
+#endif
+ {
+ char* result;
+ int r = PyBytes_AsStringAndSize(o, &result, length);
+ if (unlikely(r < 0)) {
+ return NULL;
+ } else {
+ return result;
+ }
+ }
+}
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
+ int is_true = x == Py_True;
+ if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
+ else return PyObject_IsTrue(x);
+}
+static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
+ int retval;
+ if (unlikely(!x)) return -1;
+ retval = __Pyx_PyObject_IsTrue(x);
+ Py_DECREF(x);
+ return retval;
+}
+static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
+#if PY_MAJOR_VERSION >= 3
+ if (PyLong_Check(result)) {
+ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
+ "__int__ returned non-int (type %.200s). "
+ "The ability to return an instance of a strict subclass of int "
+ "is deprecated, and may be removed in a future version of Python.",
+ Py_TYPE(result)->tp_name)) {
+ Py_DECREF(result);
+ return NULL;
+ }
+ return result;
+ }
+#endif
+ PyErr_Format(PyExc_TypeError,
+ "__%.4s__ returned non-%.4s (type %.200s)",
+ type_name, type_name, Py_TYPE(result)->tp_name);
+ Py_DECREF(result);
+ return NULL;
+}
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
+#if CYTHON_USE_TYPE_SLOTS
+ PyNumberMethods *m;
+#endif
+ const char *name = NULL;
+ PyObject *res = NULL;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x) || PyLong_Check(x)))
+#else
+ if (likely(PyLong_Check(x)))
+#endif
+ return __Pyx_NewRef(x);
+#if CYTHON_USE_TYPE_SLOTS
+ m = Py_TYPE(x)->tp_as_number;
+ #if PY_MAJOR_VERSION < 3
+ if (m && m->nb_int) {
+ name = "int";
+ res = m->nb_int(x);
+ }
+ else if (m && m->nb_long) {
+ name = "long";
+ res = m->nb_long(x);
+ }
+ #else
+ if (likely(m && m->nb_int)) {
+ name = "int";
+ res = m->nb_int(x);
+ }
+ #endif
+#else
+ if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
+ res = PyNumber_Int(x);
+ }
+#endif
+ if (likely(res)) {
+#if PY_MAJOR_VERSION < 3
+ if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
+#else
+ if (unlikely(!PyLong_CheckExact(res))) {
+#endif
+ return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
+ }
+ }
+ else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_TypeError,
+ "an integer is required");
+ }
+ return res;
+}
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
+ Py_ssize_t ival;
+ PyObject *x;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_CheckExact(b))) {
+ if (sizeof(Py_ssize_t) >= sizeof(long))
+ return PyInt_AS_LONG(b);
+ else
+ return PyInt_AsSsize_t(b);
+ }
+#endif
+ if (likely(PyLong_CheckExact(b))) {
+ #if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)b)->ob_digit;
+ const Py_ssize_t size = Py_SIZE(b);
+ if (likely(__Pyx_sst_abs(size) <= 1)) {
+ ival = likely(size) ? digits[0] : 0;
+ if (size == -1) ival = -ival;
+ return ival;
+ } else {
+ switch (size) {
+ case 2:
+ if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
+ return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case -2:
+ if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
+ return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case 3:
+ if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
+ return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case -3:
+ if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
+ return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case 4:
+ if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
+ return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case -4:
+ if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
+ return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ }
+ }
+ #endif
+ return PyLong_AsSsize_t(b);
+ }
+ x = PyNumber_Index(b);
+ if (!x) return -1;
+ ival = PyInt_AsSsize_t(x);
+ Py_DECREF(x);
+ return ival;
+}
+static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
+ return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
+}
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
+ return PyInt_FromSize_t(ival);
+}
+
+
+#endif /* Py_PYTHON_H */
diff --git a/third_party/python/aiohttp/aiohttp/_http_parser.pyx b/third_party/python/aiohttp/aiohttp/_http_parser.pyx
new file mode 100644
index 0000000000..c24e31057a
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/_http_parser.pyx
@@ -0,0 +1,875 @@
+#cython: language_level=3
+#
+# Based on https://github.com/MagicStack/httptools
+#
+from __future__ import absolute_import, print_function
+
+from cpython cimport (
+ Py_buffer,
+ PyBUF_SIMPLE,
+ PyBuffer_Release,
+ PyBytes_AsString,
+ PyBytes_AsStringAndSize,
+ PyObject_GetBuffer,
+)
+from cpython.mem cimport PyMem_Free, PyMem_Malloc
+from libc.limits cimport ULLONG_MAX
+from libc.string cimport memcpy
+
+from multidict import CIMultiDict as _CIMultiDict, CIMultiDictProxy as _CIMultiDictProxy
+from yarl import URL as _URL
+
+from aiohttp import hdrs
+
+from .http_exceptions import (
+ BadHttpMessage,
+ BadStatusLine,
+ ContentLengthError,
+ InvalidHeader,
+ InvalidURLError,
+ LineTooLong,
+ PayloadEncodingError,
+ TransferEncodingError,
+)
+from .http_parser import DeflateBuffer as _DeflateBuffer
+from .http_writer import (
+ HttpVersion as _HttpVersion,
+ HttpVersion10 as _HttpVersion10,
+ HttpVersion11 as _HttpVersion11,
+)
+from .streams import EMPTY_PAYLOAD as _EMPTY_PAYLOAD, StreamReader as _StreamReader
+
+cimport cython
+
+from aiohttp cimport _cparser as cparser
+
+include "_headers.pxi"
+
+from aiohttp cimport _find_header
+
+DEF DEFAULT_FREELIST_SIZE = 250
+
+cdef extern from "Python.h":
+ int PyByteArray_Resize(object, Py_ssize_t) except -1
+ Py_ssize_t PyByteArray_Size(object) except -1
+ char* PyByteArray_AsString(object)
+
+__all__ = ('HttpRequestParser', 'HttpResponseParser',
+ 'RawRequestMessage', 'RawResponseMessage')
+
+cdef object URL = _URL
+cdef object URL_build = URL.build
+cdef object CIMultiDict = _CIMultiDict
+cdef object CIMultiDictProxy = _CIMultiDictProxy
+cdef object HttpVersion = _HttpVersion
+cdef object HttpVersion10 = _HttpVersion10
+cdef object HttpVersion11 = _HttpVersion11
+cdef object SEC_WEBSOCKET_KEY1 = hdrs.SEC_WEBSOCKET_KEY1
+cdef object CONTENT_ENCODING = hdrs.CONTENT_ENCODING
+cdef object EMPTY_PAYLOAD = _EMPTY_PAYLOAD
+cdef object StreamReader = _StreamReader
+cdef object DeflateBuffer = _DeflateBuffer
+
+
+cdef inline object extend(object buf, const char* at, size_t length):
+ cdef Py_ssize_t s
+ cdef char* ptr
+ s = PyByteArray_Size(buf)
+ PyByteArray_Resize(buf, s + length)
+ ptr = PyByteArray_AsString(buf)
+ memcpy(ptr + s, at, length)
+
+
+DEF METHODS_COUNT = 34;
+
+cdef list _http_method = []
+
+for i in range(METHODS_COUNT):
+ _http_method.append(
+ cparser.http_method_str(<cparser.http_method> i).decode('ascii'))
+
+
+cdef inline str http_method_str(int i):
+ if i < METHODS_COUNT:
+ return <str>_http_method[i]
+ else:
+ return "<unknown>"
+
+cdef inline object find_header(bytes raw_header):
+ cdef Py_ssize_t size
+ cdef char *buf
+ cdef int idx
+ PyBytes_AsStringAndSize(raw_header, &buf, &size)
+ idx = _find_header.find_header(buf, size)
+ if idx == -1:
+ return raw_header.decode('utf-8', 'surrogateescape')
+ return headers[idx]
+
+
+@cython.freelist(DEFAULT_FREELIST_SIZE)
+cdef class RawRequestMessage:
+ cdef readonly str method
+ cdef readonly str path
+ cdef readonly object version # HttpVersion
+ cdef readonly object headers # CIMultiDict
+ cdef readonly object raw_headers # tuple
+ cdef readonly object should_close
+ cdef readonly object compression
+ cdef readonly object upgrade
+ cdef readonly object chunked
+ cdef readonly object url # yarl.URL
+
+ def __init__(self, method, path, version, headers, raw_headers,
+ should_close, compression, upgrade, chunked, url):
+ self.method = method
+ self.path = path
+ self.version = version
+ self.headers = headers
+ self.raw_headers = raw_headers
+ self.should_close = should_close
+ self.compression = compression
+ self.upgrade = upgrade
+ self.chunked = chunked
+ self.url = url
+
+ def __repr__(self):
+ info = []
+ info.append(("method", self.method))
+ info.append(("path", self.path))
+ info.append(("version", self.version))
+ info.append(("headers", self.headers))
+ info.append(("raw_headers", self.raw_headers))
+ info.append(("should_close", self.should_close))
+ info.append(("compression", self.compression))
+ info.append(("upgrade", self.upgrade))
+ info.append(("chunked", self.chunked))
+ info.append(("url", self.url))
+ sinfo = ', '.join(name + '=' + repr(val) for name, val in info)
+ return '<RawRequestMessage(' + sinfo + ')>'
+
+ def _replace(self, **dct):
+ cdef RawRequestMessage ret
+ ret = _new_request_message(self.method,
+ self.path,
+ self.version,
+ self.headers,
+ self.raw_headers,
+ self.should_close,
+ self.compression,
+ self.upgrade,
+ self.chunked,
+ self.url)
+ if "method" in dct:
+ ret.method = dct["method"]
+ if "path" in dct:
+ ret.path = dct["path"]
+ if "version" in dct:
+ ret.version = dct["version"]
+ if "headers" in dct:
+ ret.headers = dct["headers"]
+ if "raw_headers" in dct:
+ ret.raw_headers = dct["raw_headers"]
+ if "should_close" in dct:
+ ret.should_close = dct["should_close"]
+ if "compression" in dct:
+ ret.compression = dct["compression"]
+ if "upgrade" in dct:
+ ret.upgrade = dct["upgrade"]
+ if "chunked" in dct:
+ ret.chunked = dct["chunked"]
+ if "url" in dct:
+ ret.url = dct["url"]
+ return ret
+
+cdef _new_request_message(str method,
+ str path,
+ object version,
+ object headers,
+ object raw_headers,
+ bint should_close,
+ object compression,
+ bint upgrade,
+ bint chunked,
+ object url):
+ cdef RawRequestMessage ret
+ ret = RawRequestMessage.__new__(RawRequestMessage)
+ ret.method = method
+ ret.path = path
+ ret.version = version
+ ret.headers = headers
+ ret.raw_headers = raw_headers
+ ret.should_close = should_close
+ ret.compression = compression
+ ret.upgrade = upgrade
+ ret.chunked = chunked
+ ret.url = url
+ return ret
+
+
+@cython.freelist(DEFAULT_FREELIST_SIZE)
+cdef class RawResponseMessage:
+ cdef readonly object version # HttpVersion
+ cdef readonly int code
+ cdef readonly str reason
+ cdef readonly object headers # CIMultiDict
+ cdef readonly object raw_headers # tuple
+ cdef readonly object should_close
+ cdef readonly object compression
+ cdef readonly object upgrade
+ cdef readonly object chunked
+
+ def __init__(self, version, code, reason, headers, raw_headers,
+ should_close, compression, upgrade, chunked):
+ self.version = version
+ self.code = code
+ self.reason = reason
+ self.headers = headers
+ self.raw_headers = raw_headers
+ self.should_close = should_close
+ self.compression = compression
+ self.upgrade = upgrade
+ self.chunked = chunked
+
+ def __repr__(self):
+ info = []
+ info.append(("version", self.version))
+ info.append(("code", self.code))
+ info.append(("reason", self.reason))
+ info.append(("headers", self.headers))
+ info.append(("raw_headers", self.raw_headers))
+ info.append(("should_close", self.should_close))
+ info.append(("compression", self.compression))
+ info.append(("upgrade", self.upgrade))
+ info.append(("chunked", self.chunked))
+ sinfo = ', '.join(name + '=' + repr(val) for name, val in info)
+ return '<RawResponseMessage(' + sinfo + ')>'
+
+
+cdef _new_response_message(object version,
+ int code,
+ str reason,
+ object headers,
+ object raw_headers,
+ bint should_close,
+ object compression,
+ bint upgrade,
+ bint chunked):
+ cdef RawResponseMessage ret
+ ret = RawResponseMessage.__new__(RawResponseMessage)
+ ret.version = version
+ ret.code = code
+ ret.reason = reason
+ ret.headers = headers
+ ret.raw_headers = raw_headers
+ ret.should_close = should_close
+ ret.compression = compression
+ ret.upgrade = upgrade
+ ret.chunked = chunked
+ return ret
+
+
+@cython.internal
+cdef class HttpParser:
+
+ cdef:
+ cparser.http_parser* _cparser
+ cparser.http_parser_settings* _csettings
+
+ bytearray _raw_name
+ bytearray _raw_value
+ bint _has_value
+
+ object _protocol
+ object _loop
+ object _timer
+
+ size_t _max_line_size
+ size_t _max_field_size
+ size_t _max_headers
+ bint _response_with_body
+ bint _read_until_eof
+
+ bint _started
+ object _url
+ bytearray _buf
+ str _path
+ str _reason
+ object _headers
+ list _raw_headers
+ bint _upgraded
+ list _messages
+ object _payload
+ bint _payload_error
+ object _payload_exception
+ object _last_error
+ bint _auto_decompress
+ int _limit
+
+ str _content_encoding
+
+ Py_buffer py_buf
+
+ def __cinit__(self):
+ self._cparser = <cparser.http_parser*> \
+ PyMem_Malloc(sizeof(cparser.http_parser))
+ if self._cparser is NULL:
+ raise MemoryError()
+
+ self._csettings = <cparser.http_parser_settings*> \
+ PyMem_Malloc(sizeof(cparser.http_parser_settings))
+ if self._csettings is NULL:
+ raise MemoryError()
+
+ def __dealloc__(self):
+ PyMem_Free(self._cparser)
+ PyMem_Free(self._csettings)
+
+ cdef _init(self, cparser.http_parser_type mode,
+ object protocol, object loop, int limit,
+ object timer=None,
+ size_t max_line_size=8190, size_t max_headers=32768,
+ size_t max_field_size=8190, payload_exception=None,
+ bint response_with_body=True, bint read_until_eof=False,
+ bint auto_decompress=True):
+ cparser.http_parser_init(self._cparser, mode)
+ self._cparser.data = <void*>self
+ self._cparser.content_length = 0
+
+ cparser.http_parser_settings_init(self._csettings)
+
+ self._protocol = protocol
+ self._loop = loop
+ self._timer = timer
+
+ self._buf = bytearray()
+ self._payload = None
+ self._payload_error = 0
+ self._payload_exception = payload_exception
+ self._messages = []
+
+ self._raw_name = bytearray()
+ self._raw_value = bytearray()
+ self._has_value = False
+
+ self._max_line_size = max_line_size
+ self._max_headers = max_headers
+ self._max_field_size = max_field_size
+ self._response_with_body = response_with_body
+ self._read_until_eof = read_until_eof
+ self._upgraded = False
+ self._auto_decompress = auto_decompress
+ self._content_encoding = None
+
+ self._csettings.on_url = cb_on_url
+ self._csettings.on_status = cb_on_status
+ self._csettings.on_header_field = cb_on_header_field
+ self._csettings.on_header_value = cb_on_header_value
+ self._csettings.on_headers_complete = cb_on_headers_complete
+ self._csettings.on_body = cb_on_body
+ self._csettings.on_message_begin = cb_on_message_begin
+ self._csettings.on_message_complete = cb_on_message_complete
+ self._csettings.on_chunk_header = cb_on_chunk_header
+ self._csettings.on_chunk_complete = cb_on_chunk_complete
+
+ self._last_error = None
+ self._limit = limit
+
+ cdef _process_header(self):
+ if self._raw_name:
+ raw_name = bytes(self._raw_name)
+ raw_value = bytes(self._raw_value)
+
+ name = find_header(raw_name)
+ value = raw_value.decode('utf-8', 'surrogateescape')
+
+ self._headers.add(name, value)
+
+ if name is CONTENT_ENCODING:
+ self._content_encoding = value
+
+ PyByteArray_Resize(self._raw_name, 0)
+ PyByteArray_Resize(self._raw_value, 0)
+ self._has_value = False
+ self._raw_headers.append((raw_name, raw_value))
+
+ cdef _on_header_field(self, char* at, size_t length):
+ cdef Py_ssize_t size
+ cdef char *buf
+ if self._has_value:
+ self._process_header()
+
+ size = PyByteArray_Size(self._raw_name)
+ PyByteArray_Resize(self._raw_name, size + length)
+ buf = PyByteArray_AsString(self._raw_name)
+ memcpy(buf + size, at, length)
+
+ cdef _on_header_value(self, char* at, size_t length):
+ cdef Py_ssize_t size
+ cdef char *buf
+
+ size = PyByteArray_Size(self._raw_value)
+ PyByteArray_Resize(self._raw_value, size + length)
+ buf = PyByteArray_AsString(self._raw_value)
+ memcpy(buf + size, at, length)
+ self._has_value = True
+
+ cdef _on_headers_complete(self):
+ self._process_header()
+
+ method = http_method_str(self._cparser.method)
+ should_close = not cparser.http_should_keep_alive(self._cparser)
+ upgrade = self._cparser.upgrade
+ chunked = self._cparser.flags & cparser.F_CHUNKED
+
+ raw_headers = tuple(self._raw_headers)
+ headers = CIMultiDictProxy(self._headers)
+
+ if upgrade or self._cparser.method == 5: # cparser.CONNECT:
+ self._upgraded = True
+
+ # do not support old websocket spec
+ if SEC_WEBSOCKET_KEY1 in headers:
+ raise InvalidHeader(SEC_WEBSOCKET_KEY1)
+
+ encoding = None
+ enc = self._content_encoding
+ if enc is not None:
+ self._content_encoding = None
+ enc = enc.lower()
+ if enc in ('gzip', 'deflate', 'br'):
+ encoding = enc
+
+ if self._cparser.type == cparser.HTTP_REQUEST:
+ msg = _new_request_message(
+ method, self._path,
+ self.http_version(), headers, raw_headers,
+ should_close, encoding, upgrade, chunked, self._url)
+ else:
+ msg = _new_response_message(
+ self.http_version(), self._cparser.status_code, self._reason,
+ headers, raw_headers, should_close, encoding,
+ upgrade, chunked)
+
+ if (ULLONG_MAX > self._cparser.content_length > 0 or chunked or
+ self._cparser.method == 5 or # CONNECT: 5
+ (self._cparser.status_code >= 199 and
+ self._cparser.content_length == ULLONG_MAX and
+ self._read_until_eof)
+ ):
+ payload = StreamReader(
+ self._protocol, timer=self._timer, loop=self._loop,
+ limit=self._limit)
+ else:
+ payload = EMPTY_PAYLOAD
+
+ self._payload = payload
+ if encoding is not None and self._auto_decompress:
+ self._payload = DeflateBuffer(payload, encoding)
+
+ if not self._response_with_body:
+ payload = EMPTY_PAYLOAD
+
+ self._messages.append((msg, payload))
+
+ cdef _on_message_complete(self):
+ self._payload.feed_eof()
+ self._payload = None
+
+ cdef _on_chunk_header(self):
+ self._payload.begin_http_chunk_receiving()
+
+ cdef _on_chunk_complete(self):
+ self._payload.end_http_chunk_receiving()
+
+ cdef object _on_status_complete(self):
+ pass
+
+ cdef inline http_version(self):
+ cdef cparser.http_parser* parser = self._cparser
+
+ if parser.http_major == 1:
+ if parser.http_minor == 0:
+ return HttpVersion10
+ elif parser.http_minor == 1:
+ return HttpVersion11
+
+ return HttpVersion(parser.http_major, parser.http_minor)
+
+ ### Public API ###
+
+ def feed_eof(self):
+ cdef bytes desc
+
+ if self._payload is not None:
+ if self._cparser.flags & cparser.F_CHUNKED:
+ raise TransferEncodingError(
+ "Not enough data for satisfy transfer length header.")
+ elif self._cparser.flags & cparser.F_CONTENTLENGTH:
+ raise ContentLengthError(
+ "Not enough data for satisfy content length header.")
+ elif self._cparser.http_errno != cparser.HPE_OK:
+ desc = cparser.http_errno_description(
+ <cparser.http_errno> self._cparser.http_errno)
+ raise PayloadEncodingError(desc.decode('latin-1'))
+ else:
+ self._payload.feed_eof()
+ elif self._started:
+ self._on_headers_complete()
+ if self._messages:
+ return self._messages[-1][0]
+
+ def feed_data(self, data):
+ cdef:
+ size_t data_len
+ size_t nb
+
+ PyObject_GetBuffer(data, &self.py_buf, PyBUF_SIMPLE)
+ data_len = <size_t>self.py_buf.len
+
+ nb = cparser.http_parser_execute(
+ self._cparser,
+ self._csettings,
+ <char*>self.py_buf.buf,
+ data_len)
+
+ PyBuffer_Release(&self.py_buf)
+
+ if (self._cparser.http_errno != cparser.HPE_OK):
+ if self._payload_error == 0:
+ if self._last_error is not None:
+ ex = self._last_error
+ self._last_error = None
+ else:
+ ex = parser_error_from_errno(
+ <cparser.http_errno> self._cparser.http_errno)
+ self._payload = None
+ raise ex
+
+ if self._messages:
+ messages = self._messages
+ self._messages = []
+ else:
+ messages = ()
+
+ if self._upgraded:
+ return messages, True, data[nb:]
+ else:
+ return messages, False, b''
+
+ def set_upgraded(self, val):
+ self._upgraded = val
+
+
+cdef class HttpRequestParser(HttpParser):
+
+ def __init__(self, protocol, loop, int limit, timer=None,
+ size_t max_line_size=8190, size_t max_headers=32768,
+ size_t max_field_size=8190, payload_exception=None,
+ bint response_with_body=True, bint read_until_eof=False,
+ ):
+ self._init(cparser.HTTP_REQUEST, protocol, loop, limit, timer,
+ max_line_size, max_headers, max_field_size,
+ payload_exception, response_with_body, read_until_eof)
+
+ cdef object _on_status_complete(self):
+ cdef Py_buffer py_buf
+ if not self._buf:
+ return
+ self._path = self._buf.decode('utf-8', 'surrogateescape')
+ if self._cparser.method == 5: # CONNECT
+ self._url = URL(self._path)
+ else:
+ PyObject_GetBuffer(self._buf, &py_buf, PyBUF_SIMPLE)
+ try:
+ self._url = _parse_url(<char*>py_buf.buf,
+ py_buf.len)
+ finally:
+ PyBuffer_Release(&py_buf)
+ PyByteArray_Resize(self._buf, 0)
+
+
+cdef class HttpResponseParser(HttpParser):
+
+ def __init__(self, protocol, loop, int limit, timer=None,
+ size_t max_line_size=8190, size_t max_headers=32768,
+ size_t max_field_size=8190, payload_exception=None,
+ bint response_with_body=True, bint read_until_eof=False,
+ bint auto_decompress=True
+ ):
+ self._init(cparser.HTTP_RESPONSE, protocol, loop, limit, timer,
+ max_line_size, max_headers, max_field_size,
+ payload_exception, response_with_body, read_until_eof,
+ auto_decompress)
+
+ cdef object _on_status_complete(self):
+ if self._buf:
+ self._reason = self._buf.decode('utf-8', 'surrogateescape')
+ PyByteArray_Resize(self._buf, 0)
+ else:
+ self._reason = self._reason or ''
+
+cdef int cb_on_message_begin(cparser.http_parser* parser) except -1:
+ cdef HttpParser pyparser = <HttpParser>parser.data
+
+ pyparser._started = True
+ pyparser._headers = CIMultiDict()
+ pyparser._raw_headers = []
+ PyByteArray_Resize(pyparser._buf, 0)
+ pyparser._path = None
+ pyparser._reason = None
+ return 0
+
+
+cdef int cb_on_url(cparser.http_parser* parser,
+ const char *at, size_t length) except -1:
+ cdef HttpParser pyparser = <HttpParser>parser.data
+ try:
+ if length > pyparser._max_line_size:
+ raise LineTooLong(
+ 'Status line is too long', pyparser._max_line_size, length)
+ extend(pyparser._buf, at, length)
+ except BaseException as ex:
+ pyparser._last_error = ex
+ return -1
+ else:
+ return 0
+
+
+cdef int cb_on_status(cparser.http_parser* parser,
+ const char *at, size_t length) except -1:
+ cdef HttpParser pyparser = <HttpParser>parser.data
+ cdef str reason
+ try:
+ if length > pyparser._max_line_size:
+ raise LineTooLong(
+ 'Status line is too long', pyparser._max_line_size, length)
+ extend(pyparser._buf, at, length)
+ except BaseException as ex:
+ pyparser._last_error = ex
+ return -1
+ else:
+ return 0
+
+
+cdef int cb_on_header_field(cparser.http_parser* parser,
+ const char *at, size_t length) except -1:
+ cdef HttpParser pyparser = <HttpParser>parser.data
+ cdef Py_ssize_t size
+ try:
+ pyparser._on_status_complete()
+ size = len(pyparser._raw_name) + length
+ if size > pyparser._max_field_size:
+ raise LineTooLong(
+ 'Header name is too long', pyparser._max_field_size, size)
+ pyparser._on_header_field(at, length)
+ except BaseException as ex:
+ pyparser._last_error = ex
+ return -1
+ else:
+ return 0
+
+
+cdef int cb_on_header_value(cparser.http_parser* parser,
+ const char *at, size_t length) except -1:
+ cdef HttpParser pyparser = <HttpParser>parser.data
+ cdef Py_ssize_t size
+ try:
+ size = len(pyparser._raw_value) + length
+ if size > pyparser._max_field_size:
+ raise LineTooLong(
+ 'Header value is too long', pyparser._max_field_size, size)
+ pyparser._on_header_value(at, length)
+ except BaseException as ex:
+ pyparser._last_error = ex
+ return -1
+ else:
+ return 0
+
+
+cdef int cb_on_headers_complete(cparser.http_parser* parser) except -1:
+ cdef HttpParser pyparser = <HttpParser>parser.data
+ try:
+ pyparser._on_status_complete()
+ pyparser._on_headers_complete()
+ except BaseException as exc:
+ pyparser._last_error = exc
+ return -1
+ else:
+ if pyparser._cparser.upgrade or pyparser._cparser.method == 5: # CONNECT
+ return 2
+ else:
+ return 0
+
+
+cdef int cb_on_body(cparser.http_parser* parser,
+ const char *at, size_t length) except -1:
+ cdef HttpParser pyparser = <HttpParser>parser.data
+ cdef bytes body = at[:length]
+ try:
+ pyparser._payload.feed_data(body, length)
+ except BaseException as exc:
+ if pyparser._payload_exception is not None:
+ pyparser._payload.set_exception(pyparser._payload_exception(str(exc)))
+ else:
+ pyparser._payload.set_exception(exc)
+ pyparser._payload_error = 1
+ return -1
+ else:
+ return 0
+
+
+cdef int cb_on_message_complete(cparser.http_parser* parser) except -1:
+ cdef HttpParser pyparser = <HttpParser>parser.data
+ try:
+ pyparser._started = False
+ pyparser._on_message_complete()
+ except BaseException as exc:
+ pyparser._last_error = exc
+ return -1
+ else:
+ return 0
+
+
+cdef int cb_on_chunk_header(cparser.http_parser* parser) except -1:
+ cdef HttpParser pyparser = <HttpParser>parser.data
+ try:
+ pyparser._on_chunk_header()
+ except BaseException as exc:
+ pyparser._last_error = exc
+ return -1
+ else:
+ return 0
+
+
+cdef int cb_on_chunk_complete(cparser.http_parser* parser) except -1:
+ cdef HttpParser pyparser = <HttpParser>parser.data
+ try:
+ pyparser._on_chunk_complete()
+ except BaseException as exc:
+ pyparser._last_error = exc
+ return -1
+ else:
+ return 0
+
+
+cdef parser_error_from_errno(cparser.http_errno errno):
+ cdef bytes desc = cparser.http_errno_description(errno)
+
+ if errno in (cparser.HPE_CB_message_begin,
+ cparser.HPE_CB_url,
+ cparser.HPE_CB_header_field,
+ cparser.HPE_CB_header_value,
+ cparser.HPE_CB_headers_complete,
+ cparser.HPE_CB_body,
+ cparser.HPE_CB_message_complete,
+ cparser.HPE_CB_status,
+ cparser.HPE_CB_chunk_header,
+ cparser.HPE_CB_chunk_complete):
+ cls = BadHttpMessage
+
+ elif errno == cparser.HPE_INVALID_STATUS:
+ cls = BadStatusLine
+
+ elif errno == cparser.HPE_INVALID_METHOD:
+ cls = BadStatusLine
+
+ elif errno == cparser.HPE_INVALID_URL:
+ cls = InvalidURLError
+
+ else:
+ cls = BadHttpMessage
+
+ return cls(desc.decode('latin-1'))
+
+
+def parse_url(url):
+ cdef:
+ Py_buffer py_buf
+ char* buf_data
+
+ PyObject_GetBuffer(url, &py_buf, PyBUF_SIMPLE)
+ try:
+ buf_data = <char*>py_buf.buf
+ return _parse_url(buf_data, py_buf.len)
+ finally:
+ PyBuffer_Release(&py_buf)
+
+
+cdef _parse_url(char* buf_data, size_t length):
+ cdef:
+ cparser.http_parser_url* parsed
+ int res
+ str schema = None
+ str host = None
+ object port = None
+ str path = None
+ str query = None
+ str fragment = None
+ str user = None
+ str password = None
+ str userinfo = None
+ object result = None
+ int off
+ int ln
+
+ parsed = <cparser.http_parser_url*> \
+ PyMem_Malloc(sizeof(cparser.http_parser_url))
+ if parsed is NULL:
+ raise MemoryError()
+ cparser.http_parser_url_init(parsed)
+ try:
+ res = cparser.http_parser_parse_url(buf_data, length, 0, parsed)
+
+ if res == 0:
+ if parsed.field_set & (1 << cparser.UF_SCHEMA):
+ off = parsed.field_data[<int>cparser.UF_SCHEMA].off
+ ln = parsed.field_data[<int>cparser.UF_SCHEMA].len
+ schema = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ else:
+ schema = ''
+
+ if parsed.field_set & (1 << cparser.UF_HOST):
+ off = parsed.field_data[<int>cparser.UF_HOST].off
+ ln = parsed.field_data[<int>cparser.UF_HOST].len
+ host = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ else:
+ host = ''
+
+ if parsed.field_set & (1 << cparser.UF_PORT):
+ port = parsed.port
+
+ if parsed.field_set & (1 << cparser.UF_PATH):
+ off = parsed.field_data[<int>cparser.UF_PATH].off
+ ln = parsed.field_data[<int>cparser.UF_PATH].len
+ path = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ else:
+ path = ''
+
+ if parsed.field_set & (1 << cparser.UF_QUERY):
+ off = parsed.field_data[<int>cparser.UF_QUERY].off
+ ln = parsed.field_data[<int>cparser.UF_QUERY].len
+ query = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ else:
+ query = ''
+
+ if parsed.field_set & (1 << cparser.UF_FRAGMENT):
+ off = parsed.field_data[<int>cparser.UF_FRAGMENT].off
+ ln = parsed.field_data[<int>cparser.UF_FRAGMENT].len
+ fragment = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+ else:
+ fragment = ''
+
+ if parsed.field_set & (1 << cparser.UF_USERINFO):
+ off = parsed.field_data[<int>cparser.UF_USERINFO].off
+ ln = parsed.field_data[<int>cparser.UF_USERINFO].len
+ userinfo = buf_data[off:off+ln].decode('utf-8', 'surrogateescape')
+
+ user, sep, password = userinfo.partition(':')
+
+ return URL_build(scheme=schema,
+ user=user, password=password, host=host, port=port,
+ path=path, query_string=query, fragment=fragment, encoded=True)
+ else:
+ raise InvalidURLError("invalid url {!r}".format(buf_data))
+ finally:
+ PyMem_Free(parsed)
diff --git a/third_party/python/aiohttp/aiohttp/_http_writer.c b/third_party/python/aiohttp/aiohttp/_http_writer.c
new file mode 100644
index 0000000000..09e3efa5b0
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/_http_writer.c
@@ -0,0 +1,5840 @@
+/* Generated by Cython 0.29.21 */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#ifndef Py_PYTHON_H
+ #error Python headers needed to compile C extensions, please install development version of Python.
+#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
+ #error Cython requires Python 2.6+ or Python 3.3+.
+#else
+#define CYTHON_ABI "0_29_21"
+#define CYTHON_HEX_VERSION 0x001D15F0
+#define CYTHON_FUTURE_DIVISION 1
+#include <stddef.h>
+#ifndef offsetof
+ #define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
+#endif
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+ #ifndef __fastcall
+ #define __fastcall
+ #endif
+#endif
+#ifndef DL_IMPORT
+ #define DL_IMPORT(t) t
+#endif
+#ifndef DL_EXPORT
+ #define DL_EXPORT(t) t
+#endif
+#define __PYX_COMMA ,
+#ifndef HAVE_LONG_LONG
+ #if PY_VERSION_HEX >= 0x02070000
+ #define HAVE_LONG_LONG
+ #endif
+#endif
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+#ifndef Py_HUGE_VAL
+ #define Py_HUGE_VAL HUGE_VAL
+#endif
+#ifdef PYPY_VERSION
+ #define CYTHON_COMPILING_IN_PYPY 1
+ #define CYTHON_COMPILING_IN_PYSTON 0
+ #define CYTHON_COMPILING_IN_CPYTHON 0
+ #undef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 0
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #if PY_VERSION_HEX < 0x03050000
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #elif !defined(CYTHON_USE_ASYNC_SLOTS)
+ #define CYTHON_USE_ASYNC_SLOTS 1
+ #endif
+ #undef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 0
+ #undef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 0
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #undef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 1
+ #undef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 0
+ #undef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 0
+ #undef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 0
+ #undef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 0
+ #undef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT 0
+ #undef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE 0
+ #undef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS 0
+ #undef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK 0
+#elif defined(PYSTON_VERSION)
+ #define CYTHON_COMPILING_IN_PYPY 0
+ #define CYTHON_COMPILING_IN_PYSTON 1
+ #define CYTHON_COMPILING_IN_CPYTHON 0
+ #ifndef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 1
+ #endif
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #undef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 0
+ #ifndef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 1
+ #endif
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #ifndef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 0
+ #endif
+ #ifndef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 1
+ #endif
+ #ifndef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 1
+ #endif
+ #undef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 0
+ #undef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 0
+ #undef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT 0
+ #undef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE 0
+ #undef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS 0
+ #undef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK 0
+#else
+ #define CYTHON_COMPILING_IN_PYPY 0
+ #define CYTHON_COMPILING_IN_PYSTON 0
+ #define CYTHON_COMPILING_IN_CPYTHON 1
+ #ifndef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 1
+ #endif
+ #if PY_VERSION_HEX < 0x02070000
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
+ #define CYTHON_USE_PYTYPE_LOOKUP 1
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #elif !defined(CYTHON_USE_ASYNC_SLOTS)
+ #define CYTHON_USE_ASYNC_SLOTS 1
+ #endif
+ #if PY_VERSION_HEX < 0x02070000
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #elif !defined(CYTHON_USE_PYLONG_INTERNALS)
+ #define CYTHON_USE_PYLONG_INTERNALS 1
+ #endif
+ #ifndef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 1
+ #endif
+ #ifndef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 1
+ #endif
+ #if PY_VERSION_HEX < 0x030300F0
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #elif !defined(CYTHON_USE_UNICODE_WRITER)
+ #define CYTHON_USE_UNICODE_WRITER 1
+ #endif
+ #ifndef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 0
+ #endif
+ #ifndef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 1
+ #endif
+ #ifndef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 1
+ #endif
+ #ifndef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 1
+ #endif
+ #ifndef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 1
+ #endif
+ #ifndef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
+ #endif
+ #ifndef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
+ #endif
+ #ifndef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
+ #endif
+ #ifndef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
+ #endif
+#endif
+#if !defined(CYTHON_FAST_PYCCALL)
+#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
+#endif
+#if CYTHON_USE_PYLONG_INTERNALS
+ #include "longintrepr.h"
+ #undef SHIFT
+ #undef BASE
+ #undef MASK
+ #ifdef SIZEOF_VOID_P
+ enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
+ #endif
+#endif
+#ifndef __has_attribute
+ #define __has_attribute(x) 0
+#endif
+#ifndef __has_cpp_attribute
+ #define __has_cpp_attribute(x) 0
+#endif
+#ifndef CYTHON_RESTRICT
+ #if defined(__GNUC__)
+ #define CYTHON_RESTRICT __restrict__
+ #elif defined(_MSC_VER) && _MSC_VER >= 1400
+ #define CYTHON_RESTRICT __restrict
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_RESTRICT restrict
+ #else
+ #define CYTHON_RESTRICT
+ #endif
+#endif
+#ifndef CYTHON_UNUSED
+# if defined(__GNUC__)
+# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+#endif
+#ifndef CYTHON_MAYBE_UNUSED_VAR
+# if defined(__cplusplus)
+ template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
+# else
+# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
+# endif
+#endif
+#ifndef CYTHON_NCP_UNUSED
+# if CYTHON_COMPILING_IN_CPYTHON
+# define CYTHON_NCP_UNUSED
+# else
+# define CYTHON_NCP_UNUSED CYTHON_UNUSED
+# endif
+#endif
+#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
+#ifdef _MSC_VER
+ #ifndef _MSC_STDINT_H_
+ #if _MSC_VER < 1300
+ typedef unsigned char uint8_t;
+ typedef unsigned int uint32_t;
+ #else
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int32 uint32_t;
+ #endif
+ #endif
+#else
+ #include <stdint.h>
+#endif
+#ifndef CYTHON_FALLTHROUGH
+ #if defined(__cplusplus) && __cplusplus >= 201103L
+ #if __has_cpp_attribute(fallthrough)
+ #define CYTHON_FALLTHROUGH [[fallthrough]]
+ #elif __has_cpp_attribute(clang::fallthrough)
+ #define CYTHON_FALLTHROUGH [[clang::fallthrough]]
+ #elif __has_cpp_attribute(gnu::fallthrough)
+ #define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
+ #endif
+ #endif
+ #ifndef CYTHON_FALLTHROUGH
+ #if __has_attribute(fallthrough)
+ #define CYTHON_FALLTHROUGH __attribute__((fallthrough))
+ #else
+ #define CYTHON_FALLTHROUGH
+ #endif
+ #endif
+ #if defined(__clang__ ) && defined(__apple_build_version__)
+ #if __apple_build_version__ < 7000000
+ #undef CYTHON_FALLTHROUGH
+ #define CYTHON_FALLTHROUGH
+ #endif
+ #endif
+#endif
+
+#ifndef CYTHON_INLINE
+ #if defined(__clang__)
+ #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
+ #elif defined(__GNUC__)
+ #define CYTHON_INLINE __inline__
+ #elif defined(_MSC_VER)
+ #define CYTHON_INLINE __inline
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_INLINE inline
+ #else
+ #define CYTHON_INLINE
+ #endif
+#endif
+
+#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
+ #define Py_OptimizeFlag 0
+#endif
+#define __PYX_BUILD_PY_SSIZE_T "n"
+#define CYTHON_FORMAT_SSIZE_T "z"
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+ #define __Pyx_DefaultClassType PyClass_Type
+#else
+ #define __Pyx_BUILTIN_MODULE_NAME "builtins"
+#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+#else
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+#endif
+ #define __Pyx_DefaultClassType PyType_Type
+#endif
+#ifndef Py_TPFLAGS_CHECKTYPES
+ #define Py_TPFLAGS_CHECKTYPES 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_INDEX
+ #define Py_TPFLAGS_HAVE_INDEX 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
+ #define Py_TPFLAGS_HAVE_NEWBUFFER 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_FINALIZE
+ #define Py_TPFLAGS_HAVE_FINALIZE 0
+#endif
+#ifndef METH_STACKLESS
+ #define METH_STACKLESS 0
+#endif
+#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
+ #ifndef METH_FASTCALL
+ #define METH_FASTCALL 0x80
+ #endif
+ typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
+ typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
+ Py_ssize_t nargs, PyObject *kwnames);
+#else
+ #define __Pyx_PyCFunctionFast _PyCFunctionFast
+ #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
+#endif
+#if CYTHON_FAST_PYCCALL
+#define __Pyx_PyFastCFunction_Check(func)\
+ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
+#else
+#define __Pyx_PyFastCFunction_Check(func) 0
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
+ #define PyObject_Malloc(s) PyMem_Malloc(s)
+ #define PyObject_Free(p) PyMem_Free(p)
+ #define PyObject_Realloc(p) PyMem_Realloc(p)
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
+ #define PyMem_RawMalloc(n) PyMem_Malloc(n)
+ #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
+ #define PyMem_RawFree(p) PyMem_Free(p)
+#endif
+#if CYTHON_COMPILING_IN_PYSTON
+ #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
+ #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
+#else
+ #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
+ #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
+#endif
+#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
+ #define __Pyx_PyThreadState_Current PyThreadState_GET()
+#elif PY_VERSION_HEX >= 0x03060000
+ #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
+#elif PY_VERSION_HEX >= 0x03000000
+ #define __Pyx_PyThreadState_Current PyThreadState_GET()
+#else
+ #define __Pyx_PyThreadState_Current _PyThreadState_Current
+#endif
+#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
+#include "pythread.h"
+#define Py_tss_NEEDS_INIT 0
+typedef int Py_tss_t;
+static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
+ *key = PyThread_create_key();
+ return 0;
+}
+static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
+ Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
+ *key = Py_tss_NEEDS_INIT;
+ return key;
+}
+static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
+ PyObject_Free(key);
+}
+static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
+ return *key != Py_tss_NEEDS_INIT;
+}
+static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
+ PyThread_delete_key(*key);
+ *key = Py_tss_NEEDS_INIT;
+}
+static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
+ return PyThread_set_key_value(*key, value);
+}
+static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
+ return PyThread_get_key_value(*key);
+}
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
+#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
+#else
+#define __Pyx_PyDict_NewPresized(n) PyDict_New()
+#endif
+#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
+#else
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
+#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
+#else
+#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
+#endif
+#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
+ #define CYTHON_PEP393_ENABLED 1
+ #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
+ 0 : _PyUnicode_Ready((PyObject *)(op)))
+ #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
+ #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
+ #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
+ #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
+ #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
+ #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
+ #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
+ #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
+ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
+ #else
+ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
+ #endif
+#else
+ #define CYTHON_PEP393_ENABLED 0
+ #define PyUnicode_1BYTE_KIND 1
+ #define PyUnicode_2BYTE_KIND 2
+ #define PyUnicode_4BYTE_KIND 4
+ #define __Pyx_PyUnicode_READY(op) (0)
+ #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
+ #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
+ #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
+ #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
+ #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
+ #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
+ #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
+ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
+#endif
+#if CYTHON_COMPILING_IN_PYPY
+ #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
+ #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
+#else
+ #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
+ #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
+ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
+ #define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
+ #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
+ #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
+#endif
+#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
+#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
+#else
+ #define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
+#endif
+#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
+ #define PyObject_ASCII(o) PyObject_Repr(o)
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyBaseString_Type PyUnicode_Type
+ #define PyStringObject PyUnicodeObject
+ #define PyString_Type PyUnicode_Type
+ #define PyString_Check PyUnicode_Check
+ #define PyString_CheckExact PyUnicode_CheckExact
+#ifndef PyObject_Unicode
+ #define PyObject_Unicode PyObject_Str
+#endif
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
+ #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
+#else
+ #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
+ #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
+#endif
+#ifndef PySet_CheckExact
+ #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
+#endif
+#if PY_VERSION_HEX >= 0x030900A4
+ #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
+ #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
+#else
+ #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
+ #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
+#endif
+#if CYTHON_ASSUME_SAFE_MACROS
+ #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
+#else
+ #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyIntObject PyLongObject
+ #define PyInt_Type PyLong_Type
+ #define PyInt_Check(op) PyLong_Check(op)
+ #define PyInt_CheckExact(op) PyLong_CheckExact(op)
+ #define PyInt_FromString PyLong_FromString
+ #define PyInt_FromUnicode PyLong_FromUnicode
+ #define PyInt_FromLong PyLong_FromLong
+ #define PyInt_FromSize_t PyLong_FromSize_t
+ #define PyInt_FromSsize_t PyLong_FromSsize_t
+ #define PyInt_AsLong PyLong_AsLong
+ #define PyInt_AS_LONG PyLong_AS_LONG
+ #define PyInt_AsSsize_t PyLong_AsSsize_t
+ #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
+ #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
+ #define PyNumber_Int PyNumber_Long
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyBoolObject PyLongObject
+#endif
+#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
+ #ifndef PyUnicode_InternFromString
+ #define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
+ #endif
+#endif
+#if PY_VERSION_HEX < 0x030200A4
+ typedef long Py_hash_t;
+ #define __Pyx_PyInt_FromHash_t PyInt_FromLong
+ #define __Pyx_PyInt_AsHash_t PyInt_AsLong
+#else
+ #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
+ #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
+#else
+ #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
+#endif
+#if CYTHON_USE_ASYNC_SLOTS
+ #if PY_VERSION_HEX >= 0x030500B1
+ #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
+ #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
+ #else
+ #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
+ #endif
+#else
+ #define __Pyx_PyType_AsAsync(obj) NULL
+#endif
+#ifndef __Pyx_PyAsyncMethodsStruct
+ typedef struct {
+ unaryfunc am_await;
+ unaryfunc am_aiter;
+ unaryfunc am_anext;
+ } __Pyx_PyAsyncMethodsStruct;
+#endif
+
+#if defined(WIN32) || defined(MS_WINDOWS)
+ #define _USE_MATH_DEFINES
+#endif
+#include <math.h>
+#ifdef NAN
+#define __PYX_NAN() ((float) NAN)
+#else
+static CYTHON_INLINE float __PYX_NAN() {
+ float value;
+ memset(&value, 0xFF, sizeof(value));
+ return value;
+}
+#endif
+#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
+#define __Pyx_truncl trunc
+#else
+#define __Pyx_truncl truncl
+#endif
+
+#define __PYX_MARK_ERR_POS(f_index, lineno) \
+ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
+#define __PYX_ERR(f_index, lineno, Ln_error) \
+ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
+
+#ifndef __PYX_EXTERN_C
+ #ifdef __cplusplus
+ #define __PYX_EXTERN_C extern "C"
+ #else
+ #define __PYX_EXTERN_C extern
+ #endif
+#endif
+
+#define __PYX_HAVE__aiohttp___http_writer
+#define __PYX_HAVE_API__aiohttp___http_writer
+/* Early includes */
+#include <string.h>
+#include <stdio.h>
+#include <stdint.h>
+#ifdef _OPENMP
+#include <omp.h>
+#endif /* _OPENMP */
+
+#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
+#define CYTHON_WITHOUT_ASSERTIONS
+#endif
+
+typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
+ const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
+
+#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
+#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
+#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
+#define __PYX_DEFAULT_STRING_ENCODING ""
+#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
+#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#define __Pyx_uchar_cast(c) ((unsigned char)c)
+#define __Pyx_long_cast(x) ((long)x)
+#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
+ (sizeof(type) < sizeof(Py_ssize_t)) ||\
+ (sizeof(type) > sizeof(Py_ssize_t) &&\
+ likely(v < (type)PY_SSIZE_T_MAX ||\
+ v == (type)PY_SSIZE_T_MAX) &&\
+ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
+ v == (type)PY_SSIZE_T_MIN))) ||\
+ (sizeof(type) == sizeof(Py_ssize_t) &&\
+ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
+ v == (type)PY_SSIZE_T_MAX))) )
+static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
+ return (size_t) i < (size_t) limit;
+}
+#if defined (__cplusplus) && __cplusplus >= 201103L
+ #include <cstdlib>
+ #define __Pyx_sst_abs(value) std::abs(value)
+#elif SIZEOF_INT >= SIZEOF_SIZE_T
+ #define __Pyx_sst_abs(value) abs(value)
+#elif SIZEOF_LONG >= SIZEOF_SIZE_T
+ #define __Pyx_sst_abs(value) labs(value)
+#elif defined (_MSC_VER)
+ #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
+#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define __Pyx_sst_abs(value) llabs(value)
+#elif defined (__GNUC__)
+ #define __Pyx_sst_abs(value) __builtin_llabs(value)
+#else
+ #define __Pyx_sst_abs(value) ((value<0) ? -value : value)
+#endif
+static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
+static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
+#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
+#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
+#define __Pyx_PyBytes_FromString PyBytes_FromString
+#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
+ #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#else
+ #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
+ #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
+#endif
+#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
+#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
+#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
+#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
+#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
+static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
+ const Py_UNICODE *u_end = u;
+ while (*u_end++) ;
+ return (size_t)(u_end - u - 1);
+}
+#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
+#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
+#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
+#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
+#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
+static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
+static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
+#define __Pyx_PySequence_Tuple(obj)\
+ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
+#if CYTHON_ASSUME_SAFE_MACROS
+#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
+#else
+#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
+#endif
+#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
+#if PY_MAJOR_VERSION >= 3
+#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
+#else
+#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
+#endif
+#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
+#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+static int __Pyx_sys_getdefaultencoding_not_ascii;
+static int __Pyx_init_sys_getdefaultencoding_params(void) {
+ PyObject* sys;
+ PyObject* default_encoding = NULL;
+ PyObject* ascii_chars_u = NULL;
+ PyObject* ascii_chars_b = NULL;
+ const char* default_encoding_c;
+ sys = PyImport_ImportModule("sys");
+ if (!sys) goto bad;
+ default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
+ Py_DECREF(sys);
+ if (!default_encoding) goto bad;
+ default_encoding_c = PyBytes_AsString(default_encoding);
+ if (!default_encoding_c) goto bad;
+ if (strcmp(default_encoding_c, "ascii") == 0) {
+ __Pyx_sys_getdefaultencoding_not_ascii = 0;
+ } else {
+ char ascii_chars[128];
+ int c;
+ for (c = 0; c < 128; c++) {
+ ascii_chars[c] = c;
+ }
+ __Pyx_sys_getdefaultencoding_not_ascii = 1;
+ ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
+ if (!ascii_chars_u) goto bad;
+ ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
+ if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
+ PyErr_Format(
+ PyExc_ValueError,
+ "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
+ default_encoding_c);
+ goto bad;
+ }
+ Py_DECREF(ascii_chars_u);
+ Py_DECREF(ascii_chars_b);
+ }
+ Py_DECREF(default_encoding);
+ return 0;
+bad:
+ Py_XDECREF(default_encoding);
+ Py_XDECREF(ascii_chars_u);
+ Py_XDECREF(ascii_chars_b);
+ return -1;
+}
+#endif
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
+#else
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+static char* __PYX_DEFAULT_STRING_ENCODING;
+static int __Pyx_init_sys_getdefaultencoding_params(void) {
+ PyObject* sys;
+ PyObject* default_encoding = NULL;
+ char* default_encoding_c;
+ sys = PyImport_ImportModule("sys");
+ if (!sys) goto bad;
+ default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
+ Py_DECREF(sys);
+ if (!default_encoding) goto bad;
+ default_encoding_c = PyBytes_AsString(default_encoding);
+ if (!default_encoding_c) goto bad;
+ __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
+ if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
+ strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
+ Py_DECREF(default_encoding);
+ return 0;
+bad:
+ Py_XDECREF(default_encoding);
+ return -1;
+}
+#endif
+#endif
+
+
+/* Test for GCC > 2.95 */
+#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
+ #define likely(x) __builtin_expect(!!(x), 1)
+ #define unlikely(x) __builtin_expect(!!(x), 0)
+#else /* !__GNUC__ or GCC < 2.95 */
+ #define likely(x) (x)
+ #define unlikely(x) (x)
+#endif /* __GNUC__ */
+static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
+
+static PyObject *__pyx_m = NULL;
+static PyObject *__pyx_d;
+static PyObject *__pyx_b;
+static PyObject *__pyx_cython_runtime = NULL;
+static PyObject *__pyx_empty_tuple;
+static PyObject *__pyx_empty_bytes;
+static PyObject *__pyx_empty_unicode;
+static int __pyx_lineno;
+static int __pyx_clineno = 0;
+static const char * __pyx_cfilenm= __FILE__;
+static const char *__pyx_filename;
+
+
+static const char *__pyx_f[] = {
+ "aiohttp/_http_writer.pyx",
+ "type.pxd",
+};
+
+/*--- Type declarations ---*/
+struct __pyx_t_7aiohttp_12_http_writer_Writer;
+
+/* "aiohttp/_http_writer.pyx":18
+ * # ----------------- writer ---------------------------
+ *
+ * cdef struct Writer: # <<<<<<<<<<<<<<
+ * char *buf
+ * Py_ssize_t size
+ */
+struct __pyx_t_7aiohttp_12_http_writer_Writer {
+ char *buf;
+ Py_ssize_t size;
+ Py_ssize_t pos;
+};
+
+/* --- Runtime support code (head) --- */
+/* Refnanny.proto */
+#ifndef CYTHON_REFNANNY
+ #define CYTHON_REFNANNY 0
+#endif
+#if CYTHON_REFNANNY
+ typedef struct {
+ void (*INCREF)(void*, PyObject*, int);
+ void (*DECREF)(void*, PyObject*, int);
+ void (*GOTREF)(void*, PyObject*, int);
+ void (*GIVEREF)(void*, PyObject*, int);
+ void* (*SetupContext)(const char*, int, const char*);
+ void (*FinishContext)(void**);
+ } __Pyx_RefNannyAPIStruct;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
+ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
+#ifdef WITH_THREAD
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)\
+ if (acquire_gil) {\
+ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
+ PyGILState_Release(__pyx_gilstate_save);\
+ } else {\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
+ }
+#else
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
+#endif
+ #define __Pyx_RefNannyFinishContext()\
+ __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
+ #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
+ #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
+ #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
+ #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
+#else
+ #define __Pyx_RefNannyDeclarations
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)
+ #define __Pyx_RefNannyFinishContext()
+ #define __Pyx_INCREF(r) Py_INCREF(r)
+ #define __Pyx_DECREF(r) Py_DECREF(r)
+ #define __Pyx_GOTREF(r)
+ #define __Pyx_GIVEREF(r)
+ #define __Pyx_XINCREF(r) Py_XINCREF(r)
+ #define __Pyx_XDECREF(r) Py_XDECREF(r)
+ #define __Pyx_XGOTREF(r)
+ #define __Pyx_XGIVEREF(r)
+#endif
+#define __Pyx_XDECREF_SET(r, v) do {\
+ PyObject *tmp = (PyObject *) r;\
+ r = v; __Pyx_XDECREF(tmp);\
+ } while (0)
+#define __Pyx_DECREF_SET(r, v) do {\
+ PyObject *tmp = (PyObject *) r;\
+ r = v; __Pyx_DECREF(tmp);\
+ } while (0)
+#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
+#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
+
+/* PyObjectGetAttrStr.proto */
+#if CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
+#else
+#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
+#endif
+
+/* GetBuiltinName.proto */
+static PyObject *__Pyx_GetBuiltinName(PyObject *name);
+
+/* PyThreadStateGet.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
+#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
+#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
+#else
+#define __Pyx_PyThreadState_declare
+#define __Pyx_PyThreadState_assign
+#define __Pyx_PyErr_Occurred() PyErr_Occurred()
+#endif
+
+/* PyErrFetchRestore.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
+#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
+#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
+#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
+#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
+static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
+static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
+#if CYTHON_COMPILING_IN_CPYTHON
+#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
+#else
+#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
+#endif
+#else
+#define __Pyx_PyErr_Clear() PyErr_Clear()
+#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
+#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
+#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
+#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
+#endif
+
+/* WriteUnraisableException.proto */
+static void __Pyx_WriteUnraisable(const char *name, int clineno,
+ int lineno, const char *filename,
+ int full_traceback, int nogil);
+
+/* unicode_iter.proto */
+static CYTHON_INLINE int __Pyx_init_unicode_iteration(
+ PyObject* ustring, Py_ssize_t *length, void** data, int *kind);
+
+/* PyCFunctionFastCall.proto */
+#if CYTHON_FAST_PYCCALL
+static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
+#else
+#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
+#endif
+
+/* PyFunctionFastCall.proto */
+#if CYTHON_FAST_PYCALL
+#define __Pyx_PyFunction_FastCall(func, args, nargs)\
+ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
+#if 1 || PY_VERSION_HEX < 0x030600B1
+static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
+#else
+#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
+#endif
+#define __Pyx_BUILD_ASSERT_EXPR(cond)\
+ (sizeof(char [1 - 2*!(cond)]) - 1)
+#ifndef Py_MEMBER_SIZE
+#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
+#endif
+ static size_t __pyx_pyframe_localsplus_offset = 0;
+ #include "frameobject.h"
+ #define __Pxy_PyFrame_Initialize_Offsets()\
+ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
+ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
+ #define __Pyx_PyFrame_GetLocalsplus(frame)\
+ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
+#endif
+
+/* PyObjectCall.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
+#else
+#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
+#endif
+
+/* PyObjectCall2Args.proto */
+static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
+
+/* PyObjectCallMethO.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
+#endif
+
+/* PyObjectCallOneArg.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
+
+/* RaiseException.proto */
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
+
+/* RaiseArgTupleInvalid.proto */
+static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
+ Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
+
+/* RaiseDoubleKeywords.proto */
+static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
+
+/* ParseKeywords.proto */
+static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
+ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
+ const char* function_name);
+
+/* ArgTypeTest.proto */
+#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
+ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
+ __Pyx__ArgTypeTest(obj, type, name, exact))
+static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
+
+/* GetTopmostException.proto */
+#if CYTHON_USE_EXC_INFO_STACK
+static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
+#endif
+
+/* ReRaiseException.proto */
+static CYTHON_INLINE void __Pyx_ReraiseException(void);
+
+/* IterFinish.proto */
+static CYTHON_INLINE int __Pyx_IterFinish(void);
+
+/* PyObjectCallNoArg.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
+#else
+#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
+#endif
+
+/* PyObjectGetMethod.proto */
+static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method);
+
+/* PyObjectCallMethod0.proto */
+static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name);
+
+/* RaiseNeedMoreValuesToUnpack.proto */
+static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
+
+/* RaiseTooManyValuesToUnpack.proto */
+static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
+
+/* UnpackItemEndCheck.proto */
+static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected);
+
+/* RaiseNoneIterError.proto */
+static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
+
+/* UnpackTupleError.proto */
+static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index);
+
+/* UnpackTuple2.proto */
+#define __Pyx_unpack_tuple2(tuple, value1, value2, is_tuple, has_known_size, decref_tuple)\
+ (likely(is_tuple || PyTuple_Check(tuple)) ?\
+ (likely(has_known_size || PyTuple_GET_SIZE(tuple) == 2) ?\
+ __Pyx_unpack_tuple2_exact(tuple, value1, value2, decref_tuple) :\
+ (__Pyx_UnpackTupleError(tuple, 2), -1)) :\
+ __Pyx_unpack_tuple2_generic(tuple, value1, value2, has_known_size, decref_tuple))
+static CYTHON_INLINE int __Pyx_unpack_tuple2_exact(
+ PyObject* tuple, PyObject** value1, PyObject** value2, int decref_tuple);
+static int __Pyx_unpack_tuple2_generic(
+ PyObject* tuple, PyObject** value1, PyObject** value2, int has_known_size, int decref_tuple);
+
+/* dict_iter.proto */
+static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* dict, int is_dict, PyObject* method_name,
+ Py_ssize_t* p_orig_length, int* p_is_dict);
+static CYTHON_INLINE int __Pyx_dict_iter_next(PyObject* dict_or_iter, Py_ssize_t orig_length, Py_ssize_t* ppos,
+ PyObject** pkey, PyObject** pvalue, PyObject** pitem, int is_dict);
+
+/* GetException.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
+static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
+#else
+static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
+#endif
+
+/* SwapException.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
+static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
+#else
+static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
+#endif
+
+/* SaveResetException.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
+static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
+#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
+static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
+#else
+#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
+#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
+#endif
+
+/* TypeImport.proto */
+#ifndef __PYX_HAVE_RT_ImportType_proto
+#define __PYX_HAVE_RT_ImportType_proto
+enum __Pyx_ImportType_CheckSize {
+ __Pyx_ImportType_CheckSize_Error = 0,
+ __Pyx_ImportType_CheckSize_Warn = 1,
+ __Pyx_ImportType_CheckSize_Ignore = 2
+};
+static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size);
+#endif
+
+/* Import.proto */
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
+
+/* ImportFrom.proto */
+static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
+
+/* PyDictVersioning.proto */
+#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
+#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
+#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
+#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
+ (version_var) = __PYX_GET_DICT_VERSION(dict);\
+ (cache_var) = (value);
+#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
+ static PY_UINT64_T __pyx_dict_version = 0;\
+ static PyObject *__pyx_dict_cached_value = NULL;\
+ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
+ (VAR) = __pyx_dict_cached_value;\
+ } else {\
+ (VAR) = __pyx_dict_cached_value = (LOOKUP);\
+ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
+ }\
+}
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
+static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
+#else
+#define __PYX_GET_DICT_VERSION(dict) (0)
+#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
+#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
+#endif
+
+/* GetModuleGlobalName.proto */
+#if CYTHON_USE_DICT_VERSIONS
+#define __Pyx_GetModuleGlobalName(var, name) {\
+ static PY_UINT64_T __pyx_dict_version = 0;\
+ static PyObject *__pyx_dict_cached_value = NULL;\
+ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
+ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
+ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
+}
+#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
+ PY_UINT64_T __pyx_dict_version;\
+ PyObject *__pyx_dict_cached_value;\
+ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
+}
+static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
+#else
+#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
+#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
+static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
+#endif
+
+/* CLineInTraceback.proto */
+#ifdef CYTHON_CLINE_IN_TRACEBACK
+#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
+#else
+static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
+#endif
+
+/* CodeObjectCache.proto */
+typedef struct {
+ PyCodeObject* code_object;
+ int code_line;
+} __Pyx_CodeObjectCacheEntry;
+struct __Pyx_CodeObjectCache {
+ int count;
+ int max_count;
+ __Pyx_CodeObjectCacheEntry* entries;
+};
+static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
+static PyCodeObject *__pyx_find_code_object(int code_line);
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
+
+/* AddTraceback.proto */
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+ int py_line, const char *filename);
+
+/* CIntToPy.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
+
+/* CIntFromPy.proto */
+static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
+
+/* CIntFromPy.proto */
+static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
+
+/* FastTypeChecks.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
+static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
+#else
+#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
+#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
+#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
+#endif
+#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
+
+/* CheckBinaryVersion.proto */
+static int __Pyx_check_binary_version(void);
+
+/* InitStrings.proto */
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
+
+
+/* Module declarations from 'libc.string' */
+
+/* Module declarations from 'libc.stdio' */
+
+/* Module declarations from '__builtin__' */
+
+/* Module declarations from 'cpython.type' */
+static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
+
+/* Module declarations from 'cpython' */
+
+/* Module declarations from 'cpython.object' */
+
+/* Module declarations from 'cpython.bytes' */
+
+/* Module declarations from 'cpython.exc' */
+
+/* Module declarations from 'cpython.mem' */
+
+/* Module declarations from 'libc.stdint' */
+
+/* Module declarations from 'aiohttp._http_writer' */
+static char __pyx_v_7aiohttp_12_http_writer_BUFFER[0x4000];
+static PyObject *__pyx_v_7aiohttp_12_http_writer__istr = 0;
+static CYTHON_INLINE void __pyx_f_7aiohttp_12_http_writer__init_writer(struct __pyx_t_7aiohttp_12_http_writer_Writer *); /*proto*/
+static CYTHON_INLINE void __pyx_f_7aiohttp_12_http_writer__release_writer(struct __pyx_t_7aiohttp_12_http_writer_Writer *); /*proto*/
+static CYTHON_INLINE int __pyx_f_7aiohttp_12_http_writer__write_byte(struct __pyx_t_7aiohttp_12_http_writer_Writer *, uint8_t); /*proto*/
+static CYTHON_INLINE int __pyx_f_7aiohttp_12_http_writer__write_utf8(struct __pyx_t_7aiohttp_12_http_writer_Writer *, Py_UCS4); /*proto*/
+static CYTHON_INLINE int __pyx_f_7aiohttp_12_http_writer__write_str(struct __pyx_t_7aiohttp_12_http_writer_Writer *, PyObject *); /*proto*/
+static PyObject *__pyx_f_7aiohttp_12_http_writer_to_str(PyObject *); /*proto*/
+#define __Pyx_MODULE_NAME "aiohttp._http_writer"
+extern int __pyx_module_is_main_aiohttp___http_writer;
+int __pyx_module_is_main_aiohttp___http_writer = 0;
+
+/* Implementation of 'aiohttp._http_writer' */
+static PyObject *__pyx_builtin_TypeError;
+static const char __pyx_k_key[] = "key";
+static const char __pyx_k_ret[] = "ret";
+static const char __pyx_k_val[] = "val";
+static const char __pyx_k_istr[] = "istr";
+static const char __pyx_k_main[] = "__main__";
+static const char __pyx_k_name[] = "__name__";
+static const char __pyx_k_test[] = "__test__";
+static const char __pyx_k_items[] = "items";
+static const char __pyx_k_format[] = "format";
+static const char __pyx_k_import[] = "__import__";
+static const char __pyx_k_writer[] = "writer";
+static const char __pyx_k_headers[] = "headers";
+static const char __pyx_k_TypeError[] = "TypeError";
+static const char __pyx_k_multidict[] = "multidict";
+static const char __pyx_k_status_line[] = "status_line";
+static const char __pyx_k_serialize_headers[] = "_serialize_headers";
+static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
+static const char __pyx_k_aiohttp__http_writer[] = "aiohttp._http_writer";
+static const char __pyx_k_aiohttp__http_writer_pyx[] = "aiohttp/_http_writer.pyx";
+static const char __pyx_k_Cannot_serialize_non_str_key_r[] = "Cannot serialize non-str key {!r}";
+static PyObject *__pyx_kp_u_Cannot_serialize_non_str_key_r;
+static PyObject *__pyx_n_s_TypeError;
+static PyObject *__pyx_n_s_aiohttp__http_writer;
+static PyObject *__pyx_kp_s_aiohttp__http_writer_pyx;
+static PyObject *__pyx_n_s_cline_in_traceback;
+static PyObject *__pyx_n_s_format;
+static PyObject *__pyx_n_s_headers;
+static PyObject *__pyx_n_s_import;
+static PyObject *__pyx_n_s_istr;
+static PyObject *__pyx_n_s_items;
+static PyObject *__pyx_n_s_key;
+static PyObject *__pyx_n_s_main;
+static PyObject *__pyx_n_s_multidict;
+static PyObject *__pyx_n_s_name;
+static PyObject *__pyx_n_s_ret;
+static PyObject *__pyx_n_s_serialize_headers;
+static PyObject *__pyx_n_s_status_line;
+static PyObject *__pyx_n_s_test;
+static PyObject *__pyx_n_s_val;
+static PyObject *__pyx_n_s_writer;
+static PyObject *__pyx_pf_7aiohttp_12_http_writer__serialize_headers(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_status_line, PyObject *__pyx_v_headers); /* proto */
+static PyObject *__pyx_tuple_;
+static PyObject *__pyx_codeobj__2;
+/* Late includes */
+
+/* "aiohttp/_http_writer.pyx":24
+ *
+ *
+ * cdef inline void _init_writer(Writer* writer): # <<<<<<<<<<<<<<
+ * writer.buf = &BUFFER[0]
+ * writer.size = BUF_SIZE
+ */
+
+static CYTHON_INLINE void __pyx_f_7aiohttp_12_http_writer__init_writer(struct __pyx_t_7aiohttp_12_http_writer_Writer *__pyx_v_writer) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("_init_writer", 0);
+
+ /* "aiohttp/_http_writer.pyx":25
+ *
+ * cdef inline void _init_writer(Writer* writer):
+ * writer.buf = &BUFFER[0] # <<<<<<<<<<<<<<
+ * writer.size = BUF_SIZE
+ * writer.pos = 0
+ */
+ __pyx_v_writer->buf = (&(__pyx_v_7aiohttp_12_http_writer_BUFFER[0]));
+
+ /* "aiohttp/_http_writer.pyx":26
+ * cdef inline void _init_writer(Writer* writer):
+ * writer.buf = &BUFFER[0]
+ * writer.size = BUF_SIZE # <<<<<<<<<<<<<<
+ * writer.pos = 0
+ *
+ */
+ __pyx_v_writer->size = 0x4000;
+
+ /* "aiohttp/_http_writer.pyx":27
+ * writer.buf = &BUFFER[0]
+ * writer.size = BUF_SIZE
+ * writer.pos = 0 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_v_writer->pos = 0;
+
+ /* "aiohttp/_http_writer.pyx":24
+ *
+ *
+ * cdef inline void _init_writer(Writer* writer): # <<<<<<<<<<<<<<
+ * writer.buf = &BUFFER[0]
+ * writer.size = BUF_SIZE
+ */
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+}
+
+/* "aiohttp/_http_writer.pyx":30
+ *
+ *
+ * cdef inline void _release_writer(Writer* writer): # <<<<<<<<<<<<<<
+ * if writer.buf != BUFFER:
+ * PyMem_Free(writer.buf)
+ */
+
+static CYTHON_INLINE void __pyx_f_7aiohttp_12_http_writer__release_writer(struct __pyx_t_7aiohttp_12_http_writer_Writer *__pyx_v_writer) {
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ __Pyx_RefNannySetupContext("_release_writer", 0);
+
+ /* "aiohttp/_http_writer.pyx":31
+ *
+ * cdef inline void _release_writer(Writer* writer):
+ * if writer.buf != BUFFER: # <<<<<<<<<<<<<<
+ * PyMem_Free(writer.buf)
+ *
+ */
+ __pyx_t_1 = ((__pyx_v_writer->buf != __pyx_v_7aiohttp_12_http_writer_BUFFER) != 0);
+ if (__pyx_t_1) {
+
+ /* "aiohttp/_http_writer.pyx":32
+ * cdef inline void _release_writer(Writer* writer):
+ * if writer.buf != BUFFER:
+ * PyMem_Free(writer.buf) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ PyMem_Free(__pyx_v_writer->buf);
+
+ /* "aiohttp/_http_writer.pyx":31
+ *
+ * cdef inline void _release_writer(Writer* writer):
+ * if writer.buf != BUFFER: # <<<<<<<<<<<<<<
+ * PyMem_Free(writer.buf)
+ *
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":30
+ *
+ *
+ * cdef inline void _release_writer(Writer* writer): # <<<<<<<<<<<<<<
+ * if writer.buf != BUFFER:
+ * PyMem_Free(writer.buf)
+ */
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+}
+
+/* "aiohttp/_http_writer.pyx":35
+ *
+ *
+ * cdef inline int _write_byte(Writer* writer, uint8_t ch): # <<<<<<<<<<<<<<
+ * cdef char * buf
+ * cdef Py_ssize_t size
+ */
+
+static CYTHON_INLINE int __pyx_f_7aiohttp_12_http_writer__write_byte(struct __pyx_t_7aiohttp_12_http_writer_Writer *__pyx_v_writer, uint8_t __pyx_v_ch) {
+ char *__pyx_v_buf;
+ Py_ssize_t __pyx_v_size;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_write_byte", 0);
+
+ /* "aiohttp/_http_writer.pyx":39
+ * cdef Py_ssize_t size
+ *
+ * if writer.pos == writer.size: # <<<<<<<<<<<<<<
+ * # reallocate
+ * size = writer.size + BUF_SIZE
+ */
+ __pyx_t_1 = ((__pyx_v_writer->pos == __pyx_v_writer->size) != 0);
+ if (__pyx_t_1) {
+
+ /* "aiohttp/_http_writer.pyx":41
+ * if writer.pos == writer.size:
+ * # reallocate
+ * size = writer.size + BUF_SIZE # <<<<<<<<<<<<<<
+ * if writer.buf == BUFFER:
+ * buf = <char*>PyMem_Malloc(size)
+ */
+ __pyx_v_size = (__pyx_v_writer->size + 0x4000);
+
+ /* "aiohttp/_http_writer.pyx":42
+ * # reallocate
+ * size = writer.size + BUF_SIZE
+ * if writer.buf == BUFFER: # <<<<<<<<<<<<<<
+ * buf = <char*>PyMem_Malloc(size)
+ * if buf == NULL:
+ */
+ __pyx_t_1 = ((__pyx_v_writer->buf == __pyx_v_7aiohttp_12_http_writer_BUFFER) != 0);
+ if (__pyx_t_1) {
+
+ /* "aiohttp/_http_writer.pyx":43
+ * size = writer.size + BUF_SIZE
+ * if writer.buf == BUFFER:
+ * buf = <char*>PyMem_Malloc(size) # <<<<<<<<<<<<<<
+ * if buf == NULL:
+ * PyErr_NoMemory()
+ */
+ __pyx_v_buf = ((char *)PyMem_Malloc(__pyx_v_size));
+
+ /* "aiohttp/_http_writer.pyx":44
+ * if writer.buf == BUFFER:
+ * buf = <char*>PyMem_Malloc(size)
+ * if buf == NULL: # <<<<<<<<<<<<<<
+ * PyErr_NoMemory()
+ * return -1
+ */
+ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
+ if (__pyx_t_1) {
+
+ /* "aiohttp/_http_writer.pyx":45
+ * buf = <char*>PyMem_Malloc(size)
+ * if buf == NULL:
+ * PyErr_NoMemory() # <<<<<<<<<<<<<<
+ * return -1
+ * memcpy(buf, writer.buf, writer.size)
+ */
+ __pyx_t_2 = PyErr_NoMemory(); if (unlikely(__pyx_t_2 == ((PyObject *)NULL))) __PYX_ERR(0, 45, __pyx_L1_error)
+
+ /* "aiohttp/_http_writer.pyx":46
+ * if buf == NULL:
+ * PyErr_NoMemory()
+ * return -1 # <<<<<<<<<<<<<<
+ * memcpy(buf, writer.buf, writer.size)
+ * else:
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_writer.pyx":44
+ * if writer.buf == BUFFER:
+ * buf = <char*>PyMem_Malloc(size)
+ * if buf == NULL: # <<<<<<<<<<<<<<
+ * PyErr_NoMemory()
+ * return -1
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":47
+ * PyErr_NoMemory()
+ * return -1
+ * memcpy(buf, writer.buf, writer.size) # <<<<<<<<<<<<<<
+ * else:
+ * buf = <char*>PyMem_Realloc(writer.buf, size)
+ */
+ (void)(memcpy(__pyx_v_buf, __pyx_v_writer->buf, __pyx_v_writer->size));
+
+ /* "aiohttp/_http_writer.pyx":42
+ * # reallocate
+ * size = writer.size + BUF_SIZE
+ * if writer.buf == BUFFER: # <<<<<<<<<<<<<<
+ * buf = <char*>PyMem_Malloc(size)
+ * if buf == NULL:
+ */
+ goto __pyx_L4;
+ }
+
+ /* "aiohttp/_http_writer.pyx":49
+ * memcpy(buf, writer.buf, writer.size)
+ * else:
+ * buf = <char*>PyMem_Realloc(writer.buf, size) # <<<<<<<<<<<<<<
+ * if buf == NULL:
+ * PyErr_NoMemory()
+ */
+ /*else*/ {
+ __pyx_v_buf = ((char *)PyMem_Realloc(__pyx_v_writer->buf, __pyx_v_size));
+
+ /* "aiohttp/_http_writer.pyx":50
+ * else:
+ * buf = <char*>PyMem_Realloc(writer.buf, size)
+ * if buf == NULL: # <<<<<<<<<<<<<<
+ * PyErr_NoMemory()
+ * return -1
+ */
+ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
+ if (__pyx_t_1) {
+
+ /* "aiohttp/_http_writer.pyx":51
+ * buf = <char*>PyMem_Realloc(writer.buf, size)
+ * if buf == NULL:
+ * PyErr_NoMemory() # <<<<<<<<<<<<<<
+ * return -1
+ * writer.buf = buf
+ */
+ __pyx_t_2 = PyErr_NoMemory(); if (unlikely(__pyx_t_2 == ((PyObject *)NULL))) __PYX_ERR(0, 51, __pyx_L1_error)
+
+ /* "aiohttp/_http_writer.pyx":52
+ * if buf == NULL:
+ * PyErr_NoMemory()
+ * return -1 # <<<<<<<<<<<<<<
+ * writer.buf = buf
+ * writer.size = size
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_writer.pyx":50
+ * else:
+ * buf = <char*>PyMem_Realloc(writer.buf, size)
+ * if buf == NULL: # <<<<<<<<<<<<<<
+ * PyErr_NoMemory()
+ * return -1
+ */
+ }
+ }
+ __pyx_L4:;
+
+ /* "aiohttp/_http_writer.pyx":53
+ * PyErr_NoMemory()
+ * return -1
+ * writer.buf = buf # <<<<<<<<<<<<<<
+ * writer.size = size
+ * writer.buf[writer.pos] = <char>ch
+ */
+ __pyx_v_writer->buf = __pyx_v_buf;
+
+ /* "aiohttp/_http_writer.pyx":54
+ * return -1
+ * writer.buf = buf
+ * writer.size = size # <<<<<<<<<<<<<<
+ * writer.buf[writer.pos] = <char>ch
+ * writer.pos += 1
+ */
+ __pyx_v_writer->size = __pyx_v_size;
+
+ /* "aiohttp/_http_writer.pyx":39
+ * cdef Py_ssize_t size
+ *
+ * if writer.pos == writer.size: # <<<<<<<<<<<<<<
+ * # reallocate
+ * size = writer.size + BUF_SIZE
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":55
+ * writer.buf = buf
+ * writer.size = size
+ * writer.buf[writer.pos] = <char>ch # <<<<<<<<<<<<<<
+ * writer.pos += 1
+ * return 0
+ */
+ (__pyx_v_writer->buf[__pyx_v_writer->pos]) = ((char)__pyx_v_ch);
+
+ /* "aiohttp/_http_writer.pyx":56
+ * writer.size = size
+ * writer.buf[writer.pos] = <char>ch
+ * writer.pos += 1 # <<<<<<<<<<<<<<
+ * return 0
+ *
+ */
+ __pyx_v_writer->pos = (__pyx_v_writer->pos + 1);
+
+ /* "aiohttp/_http_writer.pyx":57
+ * writer.buf[writer.pos] = <char>ch
+ * writer.pos += 1
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_writer.pyx":35
+ *
+ *
+ * cdef inline int _write_byte(Writer* writer, uint8_t ch): # <<<<<<<<<<<<<<
+ * cdef char * buf
+ * cdef Py_ssize_t size
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_WriteUnraisable("aiohttp._http_writer._write_byte", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_writer.pyx":60
+ *
+ *
+ * cdef inline int _write_utf8(Writer* writer, Py_UCS4 symbol): # <<<<<<<<<<<<<<
+ * cdef uint64_t utf = <uint64_t> symbol
+ *
+ */
+
+static CYTHON_INLINE int __pyx_f_7aiohttp_12_http_writer__write_utf8(struct __pyx_t_7aiohttp_12_http_writer_Writer *__pyx_v_writer, Py_UCS4 __pyx_v_symbol) {
+ uint64_t __pyx_v_utf;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_t_2;
+ __Pyx_RefNannySetupContext("_write_utf8", 0);
+
+ /* "aiohttp/_http_writer.pyx":61
+ *
+ * cdef inline int _write_utf8(Writer* writer, Py_UCS4 symbol):
+ * cdef uint64_t utf = <uint64_t> symbol # <<<<<<<<<<<<<<
+ *
+ * if utf < 0x80:
+ */
+ __pyx_v_utf = ((uint64_t)__pyx_v_symbol);
+
+ /* "aiohttp/_http_writer.pyx":63
+ * cdef uint64_t utf = <uint64_t> symbol
+ *
+ * if utf < 0x80: # <<<<<<<<<<<<<<
+ * return _write_byte(writer, <uint8_t>utf)
+ * elif utf < 0x800:
+ */
+ __pyx_t_1 = ((__pyx_v_utf < 0x80) != 0);
+ if (__pyx_t_1) {
+
+ /* "aiohttp/_http_writer.pyx":64
+ *
+ * if utf < 0x80:
+ * return _write_byte(writer, <uint8_t>utf) # <<<<<<<<<<<<<<
+ * elif utf < 0x800:
+ * if _write_byte(writer, <uint8_t>(0xc0 | (utf >> 6))) < 0:
+ */
+ __pyx_r = __pyx_f_7aiohttp_12_http_writer__write_byte(__pyx_v_writer, ((uint8_t)__pyx_v_utf));
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_writer.pyx":63
+ * cdef uint64_t utf = <uint64_t> symbol
+ *
+ * if utf < 0x80: # <<<<<<<<<<<<<<
+ * return _write_byte(writer, <uint8_t>utf)
+ * elif utf < 0x800:
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":65
+ * if utf < 0x80:
+ * return _write_byte(writer, <uint8_t>utf)
+ * elif utf < 0x800: # <<<<<<<<<<<<<<
+ * if _write_byte(writer, <uint8_t>(0xc0 | (utf >> 6))) < 0:
+ * return -1
+ */
+ __pyx_t_1 = ((__pyx_v_utf < 0x800) != 0);
+ if (__pyx_t_1) {
+
+ /* "aiohttp/_http_writer.pyx":66
+ * return _write_byte(writer, <uint8_t>utf)
+ * elif utf < 0x800:
+ * if _write_byte(writer, <uint8_t>(0xc0 | (utf >> 6))) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * return _write_byte(writer, <uint8_t>(0x80 | (utf & 0x3f)))
+ */
+ __pyx_t_1 = ((__pyx_f_7aiohttp_12_http_writer__write_byte(__pyx_v_writer, ((uint8_t)(0xc0 | (__pyx_v_utf >> 6)))) < 0) != 0);
+ if (__pyx_t_1) {
+
+ /* "aiohttp/_http_writer.pyx":67
+ * elif utf < 0x800:
+ * if _write_byte(writer, <uint8_t>(0xc0 | (utf >> 6))) < 0:
+ * return -1 # <<<<<<<<<<<<<<
+ * return _write_byte(writer, <uint8_t>(0x80 | (utf & 0x3f)))
+ * elif 0xD800 <= utf <= 0xDFFF:
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_writer.pyx":66
+ * return _write_byte(writer, <uint8_t>utf)
+ * elif utf < 0x800:
+ * if _write_byte(writer, <uint8_t>(0xc0 | (utf >> 6))) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * return _write_byte(writer, <uint8_t>(0x80 | (utf & 0x3f)))
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":68
+ * if _write_byte(writer, <uint8_t>(0xc0 | (utf >> 6))) < 0:
+ * return -1
+ * return _write_byte(writer, <uint8_t>(0x80 | (utf & 0x3f))) # <<<<<<<<<<<<<<
+ * elif 0xD800 <= utf <= 0xDFFF:
+ * # surogate pair, ignored
+ */
+ __pyx_r = __pyx_f_7aiohttp_12_http_writer__write_byte(__pyx_v_writer, ((uint8_t)(0x80 | (__pyx_v_utf & 0x3f))));
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_writer.pyx":65
+ * if utf < 0x80:
+ * return _write_byte(writer, <uint8_t>utf)
+ * elif utf < 0x800: # <<<<<<<<<<<<<<
+ * if _write_byte(writer, <uint8_t>(0xc0 | (utf >> 6))) < 0:
+ * return -1
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":69
+ * return -1
+ * return _write_byte(writer, <uint8_t>(0x80 | (utf & 0x3f)))
+ * elif 0xD800 <= utf <= 0xDFFF: # <<<<<<<<<<<<<<
+ * # surogate pair, ignored
+ * return 0
+ */
+ __pyx_t_1 = (0xD800 <= __pyx_v_utf);
+ if (__pyx_t_1) {
+ __pyx_t_1 = (__pyx_v_utf <= 0xDFFF);
+ }
+ __pyx_t_2 = (__pyx_t_1 != 0);
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_http_writer.pyx":71
+ * elif 0xD800 <= utf <= 0xDFFF:
+ * # surogate pair, ignored
+ * return 0 # <<<<<<<<<<<<<<
+ * elif utf < 0x10000:
+ * if _write_byte(writer, <uint8_t>(0xe0 | (utf >> 12))) < 0:
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_writer.pyx":69
+ * return -1
+ * return _write_byte(writer, <uint8_t>(0x80 | (utf & 0x3f)))
+ * elif 0xD800 <= utf <= 0xDFFF: # <<<<<<<<<<<<<<
+ * # surogate pair, ignored
+ * return 0
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":72
+ * # surogate pair, ignored
+ * return 0
+ * elif utf < 0x10000: # <<<<<<<<<<<<<<
+ * if _write_byte(writer, <uint8_t>(0xe0 | (utf >> 12))) < 0:
+ * return -1
+ */
+ __pyx_t_2 = ((__pyx_v_utf < 0x10000) != 0);
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_http_writer.pyx":73
+ * return 0
+ * elif utf < 0x10000:
+ * if _write_byte(writer, <uint8_t>(0xe0 | (utf >> 12))) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * if _write_byte(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f))) < 0:
+ */
+ __pyx_t_2 = ((__pyx_f_7aiohttp_12_http_writer__write_byte(__pyx_v_writer, ((uint8_t)(0xe0 | (__pyx_v_utf >> 12)))) < 0) != 0);
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_http_writer.pyx":74
+ * elif utf < 0x10000:
+ * if _write_byte(writer, <uint8_t>(0xe0 | (utf >> 12))) < 0:
+ * return -1 # <<<<<<<<<<<<<<
+ * if _write_byte(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f))) < 0:
+ * return -1
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_writer.pyx":73
+ * return 0
+ * elif utf < 0x10000:
+ * if _write_byte(writer, <uint8_t>(0xe0 | (utf >> 12))) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * if _write_byte(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f))) < 0:
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":75
+ * if _write_byte(writer, <uint8_t>(0xe0 | (utf >> 12))) < 0:
+ * return -1
+ * if _write_byte(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f))) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * return _write_byte(writer, <uint8_t>(0x80 | (utf & 0x3f)))
+ */
+ __pyx_t_2 = ((__pyx_f_7aiohttp_12_http_writer__write_byte(__pyx_v_writer, ((uint8_t)(0x80 | ((__pyx_v_utf >> 6) & 0x3f)))) < 0) != 0);
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_http_writer.pyx":76
+ * return -1
+ * if _write_byte(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f))) < 0:
+ * return -1 # <<<<<<<<<<<<<<
+ * return _write_byte(writer, <uint8_t>(0x80 | (utf & 0x3f)))
+ * elif utf > 0x10FFFF:
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_writer.pyx":75
+ * if _write_byte(writer, <uint8_t>(0xe0 | (utf >> 12))) < 0:
+ * return -1
+ * if _write_byte(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f))) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * return _write_byte(writer, <uint8_t>(0x80 | (utf & 0x3f)))
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":77
+ * if _write_byte(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f))) < 0:
+ * return -1
+ * return _write_byte(writer, <uint8_t>(0x80 | (utf & 0x3f))) # <<<<<<<<<<<<<<
+ * elif utf > 0x10FFFF:
+ * # symbol is too large
+ */
+ __pyx_r = __pyx_f_7aiohttp_12_http_writer__write_byte(__pyx_v_writer, ((uint8_t)(0x80 | (__pyx_v_utf & 0x3f))));
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_writer.pyx":72
+ * # surogate pair, ignored
+ * return 0
+ * elif utf < 0x10000: # <<<<<<<<<<<<<<
+ * if _write_byte(writer, <uint8_t>(0xe0 | (utf >> 12))) < 0:
+ * return -1
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":78
+ * return -1
+ * return _write_byte(writer, <uint8_t>(0x80 | (utf & 0x3f)))
+ * elif utf > 0x10FFFF: # <<<<<<<<<<<<<<
+ * # symbol is too large
+ * return 0
+ */
+ __pyx_t_2 = ((__pyx_v_utf > 0x10FFFF) != 0);
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_http_writer.pyx":80
+ * elif utf > 0x10FFFF:
+ * # symbol is too large
+ * return 0 # <<<<<<<<<<<<<<
+ * else:
+ * if _write_byte(writer, <uint8_t>(0xf0 | (utf >> 18))) < 0:
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_writer.pyx":78
+ * return -1
+ * return _write_byte(writer, <uint8_t>(0x80 | (utf & 0x3f)))
+ * elif utf > 0x10FFFF: # <<<<<<<<<<<<<<
+ * # symbol is too large
+ * return 0
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":82
+ * return 0
+ * else:
+ * if _write_byte(writer, <uint8_t>(0xf0 | (utf >> 18))) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * if _write_byte(writer,
+ */
+ /*else*/ {
+ __pyx_t_2 = ((__pyx_f_7aiohttp_12_http_writer__write_byte(__pyx_v_writer, ((uint8_t)(0xf0 | (__pyx_v_utf >> 18)))) < 0) != 0);
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_http_writer.pyx":83
+ * else:
+ * if _write_byte(writer, <uint8_t>(0xf0 | (utf >> 18))) < 0:
+ * return -1 # <<<<<<<<<<<<<<
+ * if _write_byte(writer,
+ * <uint8_t>(0x80 | ((utf >> 12) & 0x3f))) < 0:
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_writer.pyx":82
+ * return 0
+ * else:
+ * if _write_byte(writer, <uint8_t>(0xf0 | (utf >> 18))) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * if _write_byte(writer,
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":85
+ * return -1
+ * if _write_byte(writer,
+ * <uint8_t>(0x80 | ((utf >> 12) & 0x3f))) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * if _write_byte(writer,
+ */
+ __pyx_t_2 = ((__pyx_f_7aiohttp_12_http_writer__write_byte(__pyx_v_writer, ((uint8_t)(0x80 | ((__pyx_v_utf >> 12) & 0x3f)))) < 0) != 0);
+
+ /* "aiohttp/_http_writer.pyx":84
+ * if _write_byte(writer, <uint8_t>(0xf0 | (utf >> 18))) < 0:
+ * return -1
+ * if _write_byte(writer, # <<<<<<<<<<<<<<
+ * <uint8_t>(0x80 | ((utf >> 12) & 0x3f))) < 0:
+ * return -1
+ */
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_http_writer.pyx":86
+ * if _write_byte(writer,
+ * <uint8_t>(0x80 | ((utf >> 12) & 0x3f))) < 0:
+ * return -1 # <<<<<<<<<<<<<<
+ * if _write_byte(writer,
+ * <uint8_t>(0x80 | ((utf >> 6) & 0x3f))) < 0:
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_writer.pyx":84
+ * if _write_byte(writer, <uint8_t>(0xf0 | (utf >> 18))) < 0:
+ * return -1
+ * if _write_byte(writer, # <<<<<<<<<<<<<<
+ * <uint8_t>(0x80 | ((utf >> 12) & 0x3f))) < 0:
+ * return -1
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":88
+ * return -1
+ * if _write_byte(writer,
+ * <uint8_t>(0x80 | ((utf >> 6) & 0x3f))) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * return _write_byte(writer, <uint8_t>(0x80 | (utf & 0x3f)))
+ */
+ __pyx_t_2 = ((__pyx_f_7aiohttp_12_http_writer__write_byte(__pyx_v_writer, ((uint8_t)(0x80 | ((__pyx_v_utf >> 6) & 0x3f)))) < 0) != 0);
+
+ /* "aiohttp/_http_writer.pyx":87
+ * <uint8_t>(0x80 | ((utf >> 12) & 0x3f))) < 0:
+ * return -1
+ * if _write_byte(writer, # <<<<<<<<<<<<<<
+ * <uint8_t>(0x80 | ((utf >> 6) & 0x3f))) < 0:
+ * return -1
+ */
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_http_writer.pyx":89
+ * if _write_byte(writer,
+ * <uint8_t>(0x80 | ((utf >> 6) & 0x3f))) < 0:
+ * return -1 # <<<<<<<<<<<<<<
+ * return _write_byte(writer, <uint8_t>(0x80 | (utf & 0x3f)))
+ *
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_writer.pyx":87
+ * <uint8_t>(0x80 | ((utf >> 12) & 0x3f))) < 0:
+ * return -1
+ * if _write_byte(writer, # <<<<<<<<<<<<<<
+ * <uint8_t>(0x80 | ((utf >> 6) & 0x3f))) < 0:
+ * return -1
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":90
+ * <uint8_t>(0x80 | ((utf >> 6) & 0x3f))) < 0:
+ * return -1
+ * return _write_byte(writer, <uint8_t>(0x80 | (utf & 0x3f))) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = __pyx_f_7aiohttp_12_http_writer__write_byte(__pyx_v_writer, ((uint8_t)(0x80 | (__pyx_v_utf & 0x3f))));
+ goto __pyx_L0;
+ }
+
+ /* "aiohttp/_http_writer.pyx":60
+ *
+ *
+ * cdef inline int _write_utf8(Writer* writer, Py_UCS4 symbol): # <<<<<<<<<<<<<<
+ * cdef uint64_t utf = <uint64_t> symbol
+ *
+ */
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_writer.pyx":93
+ *
+ *
+ * cdef inline int _write_str(Writer* writer, str s): # <<<<<<<<<<<<<<
+ * cdef Py_UCS4 ch
+ * for ch in s:
+ */
+
+static CYTHON_INLINE int __pyx_f_7aiohttp_12_http_writer__write_str(struct __pyx_t_7aiohttp_12_http_writer_Writer *__pyx_v_writer, PyObject *__pyx_v_s) {
+ Py_UCS4 __pyx_v_ch;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ Py_ssize_t __pyx_t_2;
+ Py_ssize_t __pyx_t_3;
+ void *__pyx_t_4;
+ int __pyx_t_5;
+ int __pyx_t_6;
+ Py_ssize_t __pyx_t_7;
+ int __pyx_t_8;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_write_str", 0);
+
+ /* "aiohttp/_http_writer.pyx":95
+ * cdef inline int _write_str(Writer* writer, str s):
+ * cdef Py_UCS4 ch
+ * for ch in s: # <<<<<<<<<<<<<<
+ * if _write_utf8(writer, ch) < 0:
+ * return -1
+ */
+ if (unlikely(__pyx_v_s == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
+ __PYX_ERR(0, 95, __pyx_L1_error)
+ }
+ __Pyx_INCREF(__pyx_v_s);
+ __pyx_t_1 = __pyx_v_s;
+ __pyx_t_6 = __Pyx_init_unicode_iteration(__pyx_t_1, (&__pyx_t_3), (&__pyx_t_4), (&__pyx_t_5)); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(0, 95, __pyx_L1_error)
+ for (__pyx_t_7 = 0; __pyx_t_7 < __pyx_t_3; __pyx_t_7++) {
+ __pyx_t_2 = __pyx_t_7;
+ __pyx_v_ch = __Pyx_PyUnicode_READ(__pyx_t_5, __pyx_t_4, __pyx_t_2);
+
+ /* "aiohttp/_http_writer.pyx":96
+ * cdef Py_UCS4 ch
+ * for ch in s:
+ * if _write_utf8(writer, ch) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ *
+ */
+ __pyx_t_8 = ((__pyx_f_7aiohttp_12_http_writer__write_utf8(__pyx_v_writer, __pyx_v_ch) < 0) != 0);
+ if (__pyx_t_8) {
+
+ /* "aiohttp/_http_writer.pyx":97
+ * for ch in s:
+ * if _write_utf8(writer, ch) < 0:
+ * return -1 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = -1;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_writer.pyx":96
+ * cdef Py_UCS4 ch
+ * for ch in s:
+ * if _write_utf8(writer, ch) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ *
+ */
+ }
+ }
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_http_writer.pyx":93
+ *
+ *
+ * cdef inline int _write_str(Writer* writer, str s): # <<<<<<<<<<<<<<
+ * cdef Py_UCS4 ch
+ * for ch in s:
+ */
+
+ /* function exit code */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_WriteUnraisable("aiohttp._http_writer._write_str", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_writer.pyx":102
+ * # --------------- _serialize_headers ----------------------
+ *
+ * cdef str to_str(object s): # <<<<<<<<<<<<<<
+ * typ = type(s)
+ * if typ is str:
+ */
+
+static PyObject *__pyx_f_7aiohttp_12_http_writer_to_str(PyObject *__pyx_v_s) {
+ PyTypeObject *__pyx_v_typ = NULL;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("to_str", 0);
+
+ /* "aiohttp/_http_writer.pyx":103
+ *
+ * cdef str to_str(object s):
+ * typ = type(s) # <<<<<<<<<<<<<<
+ * if typ is str:
+ * return <str>s
+ */
+ __Pyx_INCREF(((PyObject *)Py_TYPE(__pyx_v_s)));
+ __pyx_v_typ = ((PyTypeObject*)((PyObject *)Py_TYPE(__pyx_v_s)));
+
+ /* "aiohttp/_http_writer.pyx":104
+ * cdef str to_str(object s):
+ * typ = type(s)
+ * if typ is str: # <<<<<<<<<<<<<<
+ * return <str>s
+ * elif typ is _istr:
+ */
+ __pyx_t_1 = (__pyx_v_typ == (&PyUnicode_Type));
+ __pyx_t_2 = (__pyx_t_1 != 0);
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_http_writer.pyx":105
+ * typ = type(s)
+ * if typ is str:
+ * return <str>s # <<<<<<<<<<<<<<
+ * elif typ is _istr:
+ * return PyObject_Str(s)
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(((PyObject*)__pyx_v_s));
+ __pyx_r = ((PyObject*)__pyx_v_s);
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_writer.pyx":104
+ * cdef str to_str(object s):
+ * typ = type(s)
+ * if typ is str: # <<<<<<<<<<<<<<
+ * return <str>s
+ * elif typ is _istr:
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":106
+ * if typ is str:
+ * return <str>s
+ * elif typ is _istr: # <<<<<<<<<<<<<<
+ * return PyObject_Str(s)
+ * elif not isinstance(s, str):
+ */
+ __pyx_t_2 = (__pyx_v_typ == ((PyTypeObject*)__pyx_v_7aiohttp_12_http_writer__istr));
+ __pyx_t_1 = (__pyx_t_2 != 0);
+ if (__pyx_t_1) {
+
+ /* "aiohttp/_http_writer.pyx":107
+ * return <str>s
+ * elif typ is _istr:
+ * return PyObject_Str(s) # <<<<<<<<<<<<<<
+ * elif not isinstance(s, str):
+ * raise TypeError("Cannot serialize non-str key {!r}".format(s))
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_3 = PyObject_Str(__pyx_v_s); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 107, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ if (!(likely(PyUnicode_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "unicode", Py_TYPE(__pyx_t_3)->tp_name), 0))) __PYX_ERR(0, 107, __pyx_L1_error)
+ __pyx_r = ((PyObject*)__pyx_t_3);
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+
+ /* "aiohttp/_http_writer.pyx":106
+ * if typ is str:
+ * return <str>s
+ * elif typ is _istr: # <<<<<<<<<<<<<<
+ * return PyObject_Str(s)
+ * elif not isinstance(s, str):
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":108
+ * elif typ is _istr:
+ * return PyObject_Str(s)
+ * elif not isinstance(s, str): # <<<<<<<<<<<<<<
+ * raise TypeError("Cannot serialize non-str key {!r}".format(s))
+ * else:
+ */
+ __pyx_t_1 = PyUnicode_Check(__pyx_v_s);
+ __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
+ if (unlikely(__pyx_t_2)) {
+
+ /* "aiohttp/_http_writer.pyx":109
+ * return PyObject_Str(s)
+ * elif not isinstance(s, str):
+ * raise TypeError("Cannot serialize non-str key {!r}".format(s)) # <<<<<<<<<<<<<<
+ * else:
+ * return str(s)
+ */
+ __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_kp_u_Cannot_serialize_non_str_key_r, __pyx_n_s_format); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 109, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_5 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_4))) {
+ __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
+ if (likely(__pyx_t_5)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_4, function);
+ }
+ }
+ __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_v_s) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_v_s);
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 109, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 109, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_Raise(__pyx_t_4, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ __PYX_ERR(0, 109, __pyx_L1_error)
+
+ /* "aiohttp/_http_writer.pyx":108
+ * elif typ is _istr:
+ * return PyObject_Str(s)
+ * elif not isinstance(s, str): # <<<<<<<<<<<<<<
+ * raise TypeError("Cannot serialize non-str key {!r}".format(s))
+ * else:
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":111
+ * raise TypeError("Cannot serialize non-str key {!r}".format(s))
+ * else:
+ * return str(s) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ /*else*/ {
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_4 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyUnicode_Type)), __pyx_v_s); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 111, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_r = ((PyObject*)__pyx_t_4);
+ __pyx_t_4 = 0;
+ goto __pyx_L0;
+ }
+
+ /* "aiohttp/_http_writer.pyx":102
+ * # --------------- _serialize_headers ----------------------
+ *
+ * cdef str to_str(object s): # <<<<<<<<<<<<<<
+ * typ = type(s)
+ * if typ is str:
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_AddTraceback("aiohttp._http_writer.to_str", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_typ);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "aiohttp/_http_writer.pyx":114
+ *
+ *
+ * def _serialize_headers(str status_line, headers): # <<<<<<<<<<<<<<
+ * cdef Writer writer
+ * cdef object key
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_12_http_writer_1_serialize_headers(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyMethodDef __pyx_mdef_7aiohttp_12_http_writer_1_serialize_headers = {"_serialize_headers", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7aiohttp_12_http_writer_1_serialize_headers, METH_VARARGS|METH_KEYWORDS, 0};
+static PyObject *__pyx_pw_7aiohttp_12_http_writer_1_serialize_headers(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_status_line = 0;
+ PyObject *__pyx_v_headers = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("_serialize_headers (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_status_line,&__pyx_n_s_headers,0};
+ PyObject* values[2] = {0,0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ CYTHON_FALLTHROUGH;
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_status_line)) != 0)) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ CYTHON_FALLTHROUGH;
+ case 1:
+ if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_headers)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("_serialize_headers", 1, 2, 2, 1); __PYX_ERR(0, 114, __pyx_L3_error)
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_serialize_headers") < 0)) __PYX_ERR(0, 114, __pyx_L3_error)
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ }
+ __pyx_v_status_line = ((PyObject*)values[0]);
+ __pyx_v_headers = values[1];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("_serialize_headers", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 114, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("aiohttp._http_writer._serialize_headers", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_status_line), (&PyUnicode_Type), 1, "status_line", 1))) __PYX_ERR(0, 114, __pyx_L1_error)
+ __pyx_r = __pyx_pf_7aiohttp_12_http_writer__serialize_headers(__pyx_self, __pyx_v_status_line, __pyx_v_headers);
+
+ /* function exit code */
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_12_http_writer__serialize_headers(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_status_line, PyObject *__pyx_v_headers) {
+ struct __pyx_t_7aiohttp_12_http_writer_Writer __pyx_v_writer;
+ PyObject *__pyx_v_key = 0;
+ PyObject *__pyx_v_val = 0;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ Py_ssize_t __pyx_t_3;
+ Py_ssize_t __pyx_t_4;
+ int __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ int __pyx_t_8;
+ char const *__pyx_t_9;
+ PyObject *__pyx_t_10 = NULL;
+ PyObject *__pyx_t_11 = NULL;
+ PyObject *__pyx_t_12 = NULL;
+ PyObject *__pyx_t_13 = NULL;
+ PyObject *__pyx_t_14 = NULL;
+ PyObject *__pyx_t_15 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_serialize_headers", 0);
+
+ /* "aiohttp/_http_writer.pyx":120
+ * cdef bytes ret
+ *
+ * _init_writer(&writer) # <<<<<<<<<<<<<<
+ *
+ * try:
+ */
+ __pyx_f_7aiohttp_12_http_writer__init_writer((&__pyx_v_writer));
+
+ /* "aiohttp/_http_writer.pyx":122
+ * _init_writer(&writer)
+ *
+ * try: # <<<<<<<<<<<<<<
+ * if _write_str(&writer, status_line) < 0:
+ * raise
+ */
+ /*try:*/ {
+
+ /* "aiohttp/_http_writer.pyx":123
+ *
+ * try:
+ * if _write_str(&writer, status_line) < 0: # <<<<<<<<<<<<<<
+ * raise
+ * if _write_byte(&writer, b'\r') < 0:
+ */
+ __pyx_t_1 = ((__pyx_f_7aiohttp_12_http_writer__write_str((&__pyx_v_writer), __pyx_v_status_line) < 0) != 0);
+ if (unlikely(__pyx_t_1)) {
+
+ /* "aiohttp/_http_writer.pyx":124
+ * try:
+ * if _write_str(&writer, status_line) < 0:
+ * raise # <<<<<<<<<<<<<<
+ * if _write_byte(&writer, b'\r') < 0:
+ * raise
+ */
+ __Pyx_ReraiseException(); __PYX_ERR(0, 124, __pyx_L4_error)
+
+ /* "aiohttp/_http_writer.pyx":123
+ *
+ * try:
+ * if _write_str(&writer, status_line) < 0: # <<<<<<<<<<<<<<
+ * raise
+ * if _write_byte(&writer, b'\r') < 0:
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":125
+ * if _write_str(&writer, status_line) < 0:
+ * raise
+ * if _write_byte(&writer, b'\r') < 0: # <<<<<<<<<<<<<<
+ * raise
+ * if _write_byte(&writer, b'\n') < 0:
+ */
+ __pyx_t_1 = ((__pyx_f_7aiohttp_12_http_writer__write_byte((&__pyx_v_writer), '\r') < 0) != 0);
+ if (unlikely(__pyx_t_1)) {
+
+ /* "aiohttp/_http_writer.pyx":126
+ * raise
+ * if _write_byte(&writer, b'\r') < 0:
+ * raise # <<<<<<<<<<<<<<
+ * if _write_byte(&writer, b'\n') < 0:
+ * raise
+ */
+ __Pyx_ReraiseException(); __PYX_ERR(0, 126, __pyx_L4_error)
+
+ /* "aiohttp/_http_writer.pyx":125
+ * if _write_str(&writer, status_line) < 0:
+ * raise
+ * if _write_byte(&writer, b'\r') < 0: # <<<<<<<<<<<<<<
+ * raise
+ * if _write_byte(&writer, b'\n') < 0:
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":127
+ * if _write_byte(&writer, b'\r') < 0:
+ * raise
+ * if _write_byte(&writer, b'\n') < 0: # <<<<<<<<<<<<<<
+ * raise
+ *
+ */
+ __pyx_t_1 = ((__pyx_f_7aiohttp_12_http_writer__write_byte((&__pyx_v_writer), '\n') < 0) != 0);
+ if (unlikely(__pyx_t_1)) {
+
+ /* "aiohttp/_http_writer.pyx":128
+ * raise
+ * if _write_byte(&writer, b'\n') < 0:
+ * raise # <<<<<<<<<<<<<<
+ *
+ * for key, val in headers.items():
+ */
+ __Pyx_ReraiseException(); __PYX_ERR(0, 128, __pyx_L4_error)
+
+ /* "aiohttp/_http_writer.pyx":127
+ * if _write_byte(&writer, b'\r') < 0:
+ * raise
+ * if _write_byte(&writer, b'\n') < 0: # <<<<<<<<<<<<<<
+ * raise
+ *
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":130
+ * raise
+ *
+ * for key, val in headers.items(): # <<<<<<<<<<<<<<
+ * if _write_str(&writer, to_str(key)) < 0:
+ * raise
+ */
+ __pyx_t_3 = 0;
+ if (unlikely(__pyx_v_headers == Py_None)) {
+ PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "items");
+ __PYX_ERR(0, 130, __pyx_L4_error)
+ }
+ __pyx_t_6 = __Pyx_dict_iterator(__pyx_v_headers, 0, __pyx_n_s_items, (&__pyx_t_4), (&__pyx_t_5)); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 130, __pyx_L4_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_2);
+ __pyx_t_2 = __pyx_t_6;
+ __pyx_t_6 = 0;
+ while (1) {
+ __pyx_t_8 = __Pyx_dict_iter_next(__pyx_t_2, __pyx_t_4, &__pyx_t_3, &__pyx_t_6, &__pyx_t_7, NULL, __pyx_t_5);
+ if (unlikely(__pyx_t_8 == 0)) break;
+ if (unlikely(__pyx_t_8 == -1)) __PYX_ERR(0, 130, __pyx_L4_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_XDECREF_SET(__pyx_v_key, __pyx_t_6);
+ __pyx_t_6 = 0;
+ __Pyx_XDECREF_SET(__pyx_v_val, __pyx_t_7);
+ __pyx_t_7 = 0;
+
+ /* "aiohttp/_http_writer.pyx":131
+ *
+ * for key, val in headers.items():
+ * if _write_str(&writer, to_str(key)) < 0: # <<<<<<<<<<<<<<
+ * raise
+ * if _write_byte(&writer, b':') < 0:
+ */
+ __pyx_t_7 = __pyx_f_7aiohttp_12_http_writer_to_str(__pyx_v_key); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 131, __pyx_L4_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_1 = ((__pyx_f_7aiohttp_12_http_writer__write_str((&__pyx_v_writer), ((PyObject*)__pyx_t_7)) < 0) != 0);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ if (unlikely(__pyx_t_1)) {
+
+ /* "aiohttp/_http_writer.pyx":132
+ * for key, val in headers.items():
+ * if _write_str(&writer, to_str(key)) < 0:
+ * raise # <<<<<<<<<<<<<<
+ * if _write_byte(&writer, b':') < 0:
+ * raise
+ */
+ __Pyx_ReraiseException(); __PYX_ERR(0, 132, __pyx_L4_error)
+
+ /* "aiohttp/_http_writer.pyx":131
+ *
+ * for key, val in headers.items():
+ * if _write_str(&writer, to_str(key)) < 0: # <<<<<<<<<<<<<<
+ * raise
+ * if _write_byte(&writer, b':') < 0:
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":133
+ * if _write_str(&writer, to_str(key)) < 0:
+ * raise
+ * if _write_byte(&writer, b':') < 0: # <<<<<<<<<<<<<<
+ * raise
+ * if _write_byte(&writer, b' ') < 0:
+ */
+ __pyx_t_1 = ((__pyx_f_7aiohttp_12_http_writer__write_byte((&__pyx_v_writer), ':') < 0) != 0);
+ if (unlikely(__pyx_t_1)) {
+
+ /* "aiohttp/_http_writer.pyx":134
+ * raise
+ * if _write_byte(&writer, b':') < 0:
+ * raise # <<<<<<<<<<<<<<
+ * if _write_byte(&writer, b' ') < 0:
+ * raise
+ */
+ __Pyx_ReraiseException(); __PYX_ERR(0, 134, __pyx_L4_error)
+
+ /* "aiohttp/_http_writer.pyx":133
+ * if _write_str(&writer, to_str(key)) < 0:
+ * raise
+ * if _write_byte(&writer, b':') < 0: # <<<<<<<<<<<<<<
+ * raise
+ * if _write_byte(&writer, b' ') < 0:
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":135
+ * if _write_byte(&writer, b':') < 0:
+ * raise
+ * if _write_byte(&writer, b' ') < 0: # <<<<<<<<<<<<<<
+ * raise
+ * if _write_str(&writer, to_str(val)) < 0:
+ */
+ __pyx_t_1 = ((__pyx_f_7aiohttp_12_http_writer__write_byte((&__pyx_v_writer), ' ') < 0) != 0);
+ if (unlikely(__pyx_t_1)) {
+
+ /* "aiohttp/_http_writer.pyx":136
+ * raise
+ * if _write_byte(&writer, b' ') < 0:
+ * raise # <<<<<<<<<<<<<<
+ * if _write_str(&writer, to_str(val)) < 0:
+ * raise
+ */
+ __Pyx_ReraiseException(); __PYX_ERR(0, 136, __pyx_L4_error)
+
+ /* "aiohttp/_http_writer.pyx":135
+ * if _write_byte(&writer, b':') < 0:
+ * raise
+ * if _write_byte(&writer, b' ') < 0: # <<<<<<<<<<<<<<
+ * raise
+ * if _write_str(&writer, to_str(val)) < 0:
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":137
+ * if _write_byte(&writer, b' ') < 0:
+ * raise
+ * if _write_str(&writer, to_str(val)) < 0: # <<<<<<<<<<<<<<
+ * raise
+ * if _write_byte(&writer, b'\r') < 0:
+ */
+ __pyx_t_7 = __pyx_f_7aiohttp_12_http_writer_to_str(__pyx_v_val); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 137, __pyx_L4_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_1 = ((__pyx_f_7aiohttp_12_http_writer__write_str((&__pyx_v_writer), ((PyObject*)__pyx_t_7)) < 0) != 0);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ if (unlikely(__pyx_t_1)) {
+
+ /* "aiohttp/_http_writer.pyx":138
+ * raise
+ * if _write_str(&writer, to_str(val)) < 0:
+ * raise # <<<<<<<<<<<<<<
+ * if _write_byte(&writer, b'\r') < 0:
+ * raise
+ */
+ __Pyx_ReraiseException(); __PYX_ERR(0, 138, __pyx_L4_error)
+
+ /* "aiohttp/_http_writer.pyx":137
+ * if _write_byte(&writer, b' ') < 0:
+ * raise
+ * if _write_str(&writer, to_str(val)) < 0: # <<<<<<<<<<<<<<
+ * raise
+ * if _write_byte(&writer, b'\r') < 0:
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":139
+ * if _write_str(&writer, to_str(val)) < 0:
+ * raise
+ * if _write_byte(&writer, b'\r') < 0: # <<<<<<<<<<<<<<
+ * raise
+ * if _write_byte(&writer, b'\n') < 0:
+ */
+ __pyx_t_1 = ((__pyx_f_7aiohttp_12_http_writer__write_byte((&__pyx_v_writer), '\r') < 0) != 0);
+ if (unlikely(__pyx_t_1)) {
+
+ /* "aiohttp/_http_writer.pyx":140
+ * raise
+ * if _write_byte(&writer, b'\r') < 0:
+ * raise # <<<<<<<<<<<<<<
+ * if _write_byte(&writer, b'\n') < 0:
+ * raise
+ */
+ __Pyx_ReraiseException(); __PYX_ERR(0, 140, __pyx_L4_error)
+
+ /* "aiohttp/_http_writer.pyx":139
+ * if _write_str(&writer, to_str(val)) < 0:
+ * raise
+ * if _write_byte(&writer, b'\r') < 0: # <<<<<<<<<<<<<<
+ * raise
+ * if _write_byte(&writer, b'\n') < 0:
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":141
+ * if _write_byte(&writer, b'\r') < 0:
+ * raise
+ * if _write_byte(&writer, b'\n') < 0: # <<<<<<<<<<<<<<
+ * raise
+ *
+ */
+ __pyx_t_1 = ((__pyx_f_7aiohttp_12_http_writer__write_byte((&__pyx_v_writer), '\n') < 0) != 0);
+ if (unlikely(__pyx_t_1)) {
+
+ /* "aiohttp/_http_writer.pyx":142
+ * raise
+ * if _write_byte(&writer, b'\n') < 0:
+ * raise # <<<<<<<<<<<<<<
+ *
+ * if _write_byte(&writer, b'\r') < 0:
+ */
+ __Pyx_ReraiseException(); __PYX_ERR(0, 142, __pyx_L4_error)
+
+ /* "aiohttp/_http_writer.pyx":141
+ * if _write_byte(&writer, b'\r') < 0:
+ * raise
+ * if _write_byte(&writer, b'\n') < 0: # <<<<<<<<<<<<<<
+ * raise
+ *
+ */
+ }
+ }
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_writer.pyx":144
+ * raise
+ *
+ * if _write_byte(&writer, b'\r') < 0: # <<<<<<<<<<<<<<
+ * raise
+ * if _write_byte(&writer, b'\n') < 0:
+ */
+ __pyx_t_1 = ((__pyx_f_7aiohttp_12_http_writer__write_byte((&__pyx_v_writer), '\r') < 0) != 0);
+ if (unlikely(__pyx_t_1)) {
+
+ /* "aiohttp/_http_writer.pyx":145
+ *
+ * if _write_byte(&writer, b'\r') < 0:
+ * raise # <<<<<<<<<<<<<<
+ * if _write_byte(&writer, b'\n') < 0:
+ * raise
+ */
+ __Pyx_ReraiseException(); __PYX_ERR(0, 145, __pyx_L4_error)
+
+ /* "aiohttp/_http_writer.pyx":144
+ * raise
+ *
+ * if _write_byte(&writer, b'\r') < 0: # <<<<<<<<<<<<<<
+ * raise
+ * if _write_byte(&writer, b'\n') < 0:
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":146
+ * if _write_byte(&writer, b'\r') < 0:
+ * raise
+ * if _write_byte(&writer, b'\n') < 0: # <<<<<<<<<<<<<<
+ * raise
+ *
+ */
+ __pyx_t_1 = ((__pyx_f_7aiohttp_12_http_writer__write_byte((&__pyx_v_writer), '\n') < 0) != 0);
+ if (unlikely(__pyx_t_1)) {
+
+ /* "aiohttp/_http_writer.pyx":147
+ * raise
+ * if _write_byte(&writer, b'\n') < 0:
+ * raise # <<<<<<<<<<<<<<
+ *
+ * return PyBytes_FromStringAndSize(writer.buf, writer.pos)
+ */
+ __Pyx_ReraiseException(); __PYX_ERR(0, 147, __pyx_L4_error)
+
+ /* "aiohttp/_http_writer.pyx":146
+ * if _write_byte(&writer, b'\r') < 0:
+ * raise
+ * if _write_byte(&writer, b'\n') < 0: # <<<<<<<<<<<<<<
+ * raise
+ *
+ */
+ }
+
+ /* "aiohttp/_http_writer.pyx":149
+ * raise
+ *
+ * return PyBytes_FromStringAndSize(writer.buf, writer.pos) # <<<<<<<<<<<<<<
+ * finally:
+ * _release_writer(&writer)
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_2 = PyBytes_FromStringAndSize(__pyx_v_writer.buf, __pyx_v_writer.pos); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 149, __pyx_L4_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_r = __pyx_t_2;
+ __pyx_t_2 = 0;
+ goto __pyx_L3_return;
+ }
+
+ /* "aiohttp/_http_writer.pyx":151
+ * return PyBytes_FromStringAndSize(writer.buf, writer.pos)
+ * finally:
+ * _release_writer(&writer) # <<<<<<<<<<<<<<
+ */
+ /*finally:*/ {
+ __pyx_L4_error:;
+ /*exception exit:*/{
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_t_13 = 0; __pyx_t_14 = 0; __pyx_t_15 = 0;
+ __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
+ __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
+ if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_13, &__pyx_t_14, &__pyx_t_15);
+ if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12) < 0)) __Pyx_ErrFetch(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
+ __Pyx_XGOTREF(__pyx_t_10);
+ __Pyx_XGOTREF(__pyx_t_11);
+ __Pyx_XGOTREF(__pyx_t_12);
+ __Pyx_XGOTREF(__pyx_t_13);
+ __Pyx_XGOTREF(__pyx_t_14);
+ __Pyx_XGOTREF(__pyx_t_15);
+ __pyx_t_5 = __pyx_lineno; __pyx_t_8 = __pyx_clineno; __pyx_t_9 = __pyx_filename;
+ {
+ __pyx_f_7aiohttp_12_http_writer__release_writer((&__pyx_v_writer));
+ }
+ if (PY_MAJOR_VERSION >= 3) {
+ __Pyx_XGIVEREF(__pyx_t_13);
+ __Pyx_XGIVEREF(__pyx_t_14);
+ __Pyx_XGIVEREF(__pyx_t_15);
+ __Pyx_ExceptionReset(__pyx_t_13, __pyx_t_14, __pyx_t_15);
+ }
+ __Pyx_XGIVEREF(__pyx_t_10);
+ __Pyx_XGIVEREF(__pyx_t_11);
+ __Pyx_XGIVEREF(__pyx_t_12);
+ __Pyx_ErrRestore(__pyx_t_10, __pyx_t_11, __pyx_t_12);
+ __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_t_13 = 0; __pyx_t_14 = 0; __pyx_t_15 = 0;
+ __pyx_lineno = __pyx_t_5; __pyx_clineno = __pyx_t_8; __pyx_filename = __pyx_t_9;
+ goto __pyx_L1_error;
+ }
+ __pyx_L3_return: {
+ __pyx_t_15 = __pyx_r;
+ __pyx_r = 0;
+ __pyx_f_7aiohttp_12_http_writer__release_writer((&__pyx_v_writer));
+ __pyx_r = __pyx_t_15;
+ __pyx_t_15 = 0;
+ goto __pyx_L0;
+ }
+ }
+
+ /* "aiohttp/_http_writer.pyx":114
+ *
+ *
+ * def _serialize_headers(str status_line, headers): # <<<<<<<<<<<<<<
+ * cdef Writer writer
+ * cdef object key
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_AddTraceback("aiohttp._http_writer._serialize_headers", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_key);
+ __Pyx_XDECREF(__pyx_v_val);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyMethodDef __pyx_methods[] = {
+ {0, 0, 0, 0}
+};
+
+#if PY_MAJOR_VERSION >= 3
+#if CYTHON_PEP489_MULTI_PHASE_INIT
+static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
+static int __pyx_pymod_exec__http_writer(PyObject* module); /*proto*/
+static PyModuleDef_Slot __pyx_moduledef_slots[] = {
+ {Py_mod_create, (void*)__pyx_pymod_create},
+ {Py_mod_exec, (void*)__pyx_pymod_exec__http_writer},
+ {0, NULL}
+};
+#endif
+
+static struct PyModuleDef __pyx_moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "_http_writer",
+ 0, /* m_doc */
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ 0, /* m_size */
+ #else
+ -1, /* m_size */
+ #endif
+ __pyx_methods /* m_methods */,
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ __pyx_moduledef_slots, /* m_slots */
+ #else
+ NULL, /* m_reload */
+ #endif
+ NULL, /* m_traverse */
+ NULL, /* m_clear */
+ NULL /* m_free */
+};
+#endif
+#ifndef CYTHON_SMALL_CODE
+#if defined(__clang__)
+ #define CYTHON_SMALL_CODE
+#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
+ #define CYTHON_SMALL_CODE __attribute__((cold))
+#else
+ #define CYTHON_SMALL_CODE
+#endif
+#endif
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_kp_u_Cannot_serialize_non_str_key_r, __pyx_k_Cannot_serialize_non_str_key_r, sizeof(__pyx_k_Cannot_serialize_non_str_key_r), 0, 1, 0, 0},
+ {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
+ {&__pyx_n_s_aiohttp__http_writer, __pyx_k_aiohttp__http_writer, sizeof(__pyx_k_aiohttp__http_writer), 0, 0, 1, 1},
+ {&__pyx_kp_s_aiohttp__http_writer_pyx, __pyx_k_aiohttp__http_writer_pyx, sizeof(__pyx_k_aiohttp__http_writer_pyx), 0, 0, 1, 0},
+ {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
+ {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
+ {&__pyx_n_s_headers, __pyx_k_headers, sizeof(__pyx_k_headers), 0, 0, 1, 1},
+ {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
+ {&__pyx_n_s_istr, __pyx_k_istr, sizeof(__pyx_k_istr), 0, 0, 1, 1},
+ {&__pyx_n_s_items, __pyx_k_items, sizeof(__pyx_k_items), 0, 0, 1, 1},
+ {&__pyx_n_s_key, __pyx_k_key, sizeof(__pyx_k_key), 0, 0, 1, 1},
+ {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
+ {&__pyx_n_s_multidict, __pyx_k_multidict, sizeof(__pyx_k_multidict), 0, 0, 1, 1},
+ {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
+ {&__pyx_n_s_ret, __pyx_k_ret, sizeof(__pyx_k_ret), 0, 0, 1, 1},
+ {&__pyx_n_s_serialize_headers, __pyx_k_serialize_headers, sizeof(__pyx_k_serialize_headers), 0, 0, 1, 1},
+ {&__pyx_n_s_status_line, __pyx_k_status_line, sizeof(__pyx_k_status_line), 0, 0, 1, 1},
+ {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
+ {&__pyx_n_s_val, __pyx_k_val, sizeof(__pyx_k_val), 0, 0, 1, 1},
+ {&__pyx_n_s_writer, __pyx_k_writer, sizeof(__pyx_k_writer), 0, 0, 1, 1},
+ {0, 0, 0, 0, 0, 0, 0}
+};
+static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
+ __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(0, 109, __pyx_L1_error)
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
+
+ /* "aiohttp/_http_writer.pyx":114
+ *
+ *
+ * def _serialize_headers(str status_line, headers): # <<<<<<<<<<<<<<
+ * cdef Writer writer
+ * cdef object key
+ */
+ __pyx_tuple_ = PyTuple_Pack(6, __pyx_n_s_status_line, __pyx_n_s_headers, __pyx_n_s_writer, __pyx_n_s_key, __pyx_n_s_val, __pyx_n_s_ret); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 114, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_tuple_);
+ __Pyx_GIVEREF(__pyx_tuple_);
+ __pyx_codeobj__2 = (PyObject*)__Pyx_PyCode_New(2, 0, 6, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple_, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_aiohttp__http_writer_pyx, __pyx_n_s_serialize_headers, 114, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__2)) __PYX_ERR(0, 114, __pyx_L1_error)
+ __Pyx_RefNannyFinishContext();
+ return 0;
+ __pyx_L1_error:;
+ __Pyx_RefNannyFinishContext();
+ return -1;
+}
+
+static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
+
+static int __Pyx_modinit_global_init_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
+ /*--- Global init code ---*/
+ __pyx_v_7aiohttp_12_http_writer__istr = Py_None; Py_INCREF(Py_None);
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_variable_export_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
+ /*--- Variable export code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_function_export_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
+ /*--- Function export code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_type_init_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
+ /*--- Type init code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_type_import_code(void) {
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
+ /*--- Type import code ---*/
+ __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 9, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type",
+ #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000
+ sizeof(PyTypeObject),
+ #else
+ sizeof(PyHeapTypeObject),
+ #endif
+ __Pyx_ImportType_CheckSize_Warn);
+ if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(1, 9, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_RefNannyFinishContext();
+ return 0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_RefNannyFinishContext();
+ return -1;
+}
+
+static int __Pyx_modinit_variable_import_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
+ /*--- Variable import code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_function_import_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
+ /*--- Function import code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+
+#ifndef CYTHON_NO_PYINIT_EXPORT
+#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
+#elif PY_MAJOR_VERSION < 3
+#ifdef __cplusplus
+#define __Pyx_PyMODINIT_FUNC extern "C" void
+#else
+#define __Pyx_PyMODINIT_FUNC void
+#endif
+#else
+#ifdef __cplusplus
+#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
+#else
+#define __Pyx_PyMODINIT_FUNC PyObject *
+#endif
+#endif
+
+
+#if PY_MAJOR_VERSION < 3
+__Pyx_PyMODINIT_FUNC init_http_writer(void) CYTHON_SMALL_CODE; /*proto*/
+__Pyx_PyMODINIT_FUNC init_http_writer(void)
+#else
+__Pyx_PyMODINIT_FUNC PyInit__http_writer(void) CYTHON_SMALL_CODE; /*proto*/
+__Pyx_PyMODINIT_FUNC PyInit__http_writer(void)
+#if CYTHON_PEP489_MULTI_PHASE_INIT
+{
+ return PyModuleDef_Init(&__pyx_moduledef);
+}
+static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
+ #if PY_VERSION_HEX >= 0x030700A1
+ static PY_INT64_T main_interpreter_id = -1;
+ PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
+ if (main_interpreter_id == -1) {
+ main_interpreter_id = current_id;
+ return (unlikely(current_id == -1)) ? -1 : 0;
+ } else if (unlikely(main_interpreter_id != current_id))
+ #else
+ static PyInterpreterState *main_interpreter = NULL;
+ PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
+ if (!main_interpreter) {
+ main_interpreter = current_interpreter;
+ } else if (unlikely(main_interpreter != current_interpreter))
+ #endif
+ {
+ PyErr_SetString(
+ PyExc_ImportError,
+ "Interpreter change detected - this module can only be loaded into one interpreter per process.");
+ return -1;
+ }
+ return 0;
+}
+static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
+ PyObject *value = PyObject_GetAttrString(spec, from_name);
+ int result = 0;
+ if (likely(value)) {
+ if (allow_none || value != Py_None) {
+ result = PyDict_SetItemString(moddict, to_name, value);
+ }
+ Py_DECREF(value);
+ } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
+ PyErr_Clear();
+ } else {
+ result = -1;
+ }
+ return result;
+}
+static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
+ PyObject *module = NULL, *moddict, *modname;
+ if (__Pyx_check_single_interpreter())
+ return NULL;
+ if (__pyx_m)
+ return __Pyx_NewRef(__pyx_m);
+ modname = PyObject_GetAttrString(spec, "name");
+ if (unlikely(!modname)) goto bad;
+ module = PyModule_NewObject(modname);
+ Py_DECREF(modname);
+ if (unlikely(!module)) goto bad;
+ moddict = PyModule_GetDict(module);
+ if (unlikely(!moddict)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
+ return module;
+bad:
+ Py_XDECREF(module);
+ return NULL;
+}
+
+
+static CYTHON_SMALL_CODE int __pyx_pymod_exec__http_writer(PyObject *__pyx_pyinit_module)
+#endif
+#endif
+{
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannyDeclarations
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ if (__pyx_m) {
+ if (__pyx_m == __pyx_pyinit_module) return 0;
+ PyErr_SetString(PyExc_RuntimeError, "Module '_http_writer' has already been imported. Re-initialisation is not supported.");
+ return -1;
+ }
+ #elif PY_MAJOR_VERSION >= 3
+ if (__pyx_m) return __Pyx_NewRef(__pyx_m);
+ #endif
+ #if CYTHON_REFNANNY
+__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
+if (!__Pyx_RefNanny) {
+ PyErr_Clear();
+ __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
+ if (!__Pyx_RefNanny)
+ Py_FatalError("failed to import 'refnanny' module");
+}
+#endif
+ __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit__http_writer(void)", 0);
+ if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #ifdef __Pxy_PyFrame_Initialize_Offsets
+ __Pxy_PyFrame_Initialize_Offsets();
+ #endif
+ __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
+ #ifdef __Pyx_CyFunction_USED
+ if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_FusedFunction_USED
+ if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_Coroutine_USED
+ if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_Generator_USED
+ if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_AsyncGen_USED
+ if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_StopAsyncIteration_USED
+ if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ /*--- Library function declarations ---*/
+ /*--- Threads initialization code ---*/
+ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
+ #ifdef WITH_THREAD /* Python build with threading support? */
+ PyEval_InitThreads();
+ #endif
+ #endif
+ /*--- Module creation code ---*/
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ __pyx_m = __pyx_pyinit_module;
+ Py_INCREF(__pyx_m);
+ #else
+ #if PY_MAJOR_VERSION < 3
+ __pyx_m = Py_InitModule4("_http_writer", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
+ #else
+ __pyx_m = PyModule_Create(&__pyx_moduledef);
+ #endif
+ if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
+ Py_INCREF(__pyx_d);
+ __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
+ Py_INCREF(__pyx_b);
+ __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
+ Py_INCREF(__pyx_cython_runtime);
+ if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
+ /*--- Initialize various global constants etc. ---*/
+ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
+ if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ if (__pyx_module_is_main_aiohttp___http_writer) {
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ }
+ #if PY_MAJOR_VERSION >= 3
+ {
+ PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
+ if (!PyDict_GetItemString(modules, "aiohttp._http_writer")) {
+ if (unlikely(PyDict_SetItemString(modules, "aiohttp._http_writer", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
+ }
+ }
+ #endif
+ /*--- Builtin init code ---*/
+ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ /*--- Constants init code ---*/
+ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ /*--- Global type/function init code ---*/
+ (void)__Pyx_modinit_global_init_code();
+ (void)__Pyx_modinit_variable_export_code();
+ (void)__Pyx_modinit_function_export_code();
+ (void)__Pyx_modinit_type_init_code();
+ if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
+ (void)__Pyx_modinit_variable_import_code();
+ (void)__Pyx_modinit_function_import_code();
+ /*--- Execution code ---*/
+ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
+ if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+
+ /* "aiohttp/_http_writer.pyx":8
+ * from libc.string cimport memcpy
+ *
+ * from multidict import istr # <<<<<<<<<<<<<<
+ *
+ * DEF BUF_SIZE = 16 * 1024 # 16KiB
+ */
+ __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_n_s_istr);
+ __Pyx_GIVEREF(__pyx_n_s_istr);
+ PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_istr);
+ __pyx_t_2 = __Pyx_Import(__pyx_n_s_multidict, __pyx_t_1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 8, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_istr); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 8, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_istr, __pyx_t_1) < 0) __PYX_ERR(0, 8, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_writer.pyx":13
+ * cdef char BUFFER[BUF_SIZE]
+ *
+ * cdef object _istr = istr # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_istr); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_XGOTREF(__pyx_v_7aiohttp_12_http_writer__istr);
+ __Pyx_DECREF_SET(__pyx_v_7aiohttp_12_http_writer__istr, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_writer.pyx":114
+ *
+ *
+ * def _serialize_headers(str status_line, headers): # <<<<<<<<<<<<<<
+ * cdef Writer writer
+ * cdef object key
+ */
+ __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7aiohttp_12_http_writer_1_serialize_headers, NULL, __pyx_n_s_aiohttp__http_writer); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 114, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_serialize_headers, __pyx_t_2) < 0) __PYX_ERR(0, 114, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "aiohttp/_http_writer.pyx":1
+ * from cpython.bytes cimport PyBytes_FromStringAndSize # <<<<<<<<<<<<<<
+ * from cpython.exc cimport PyErr_NoMemory
+ * from cpython.mem cimport PyMem_Free, PyMem_Malloc, PyMem_Realloc
+ */
+ __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /*--- Wrapped vars code ---*/
+
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ if (__pyx_m) {
+ if (__pyx_d) {
+ __Pyx_AddTraceback("init aiohttp._http_writer", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ }
+ Py_CLEAR(__pyx_m);
+ } else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_ImportError, "init aiohttp._http_writer");
+ }
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ return (__pyx_m != NULL) ? 0 : -1;
+ #elif PY_MAJOR_VERSION >= 3
+ return __pyx_m;
+ #else
+ return;
+ #endif
+}
+
+/* --- Runtime support code --- */
+/* Refnanny */
+#if CYTHON_REFNANNY
+static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
+ PyObject *m = NULL, *p = NULL;
+ void *r = NULL;
+ m = PyImport_ImportModule(modname);
+ if (!m) goto end;
+ p = PyObject_GetAttrString(m, "RefNannyAPI");
+ if (!p) goto end;
+ r = PyLong_AsVoidPtr(p);
+end:
+ Py_XDECREF(p);
+ Py_XDECREF(m);
+ return (__Pyx_RefNannyAPIStruct *)r;
+}
+#endif
+
+/* PyObjectGetAttrStr */
+#if CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
+ PyTypeObject* tp = Py_TYPE(obj);
+ if (likely(tp->tp_getattro))
+ return tp->tp_getattro(obj, attr_name);
+#if PY_MAJOR_VERSION < 3
+ if (likely(tp->tp_getattr))
+ return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
+#endif
+ return PyObject_GetAttr(obj, attr_name);
+}
+#endif
+
+/* GetBuiltinName */
+static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
+ PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
+ if (unlikely(!result)) {
+ PyErr_Format(PyExc_NameError,
+#if PY_MAJOR_VERSION >= 3
+ "name '%U' is not defined", name);
+#else
+ "name '%.200s' is not defined", PyString_AS_STRING(name));
+#endif
+ }
+ return result;
+}
+
+/* PyErrFetchRestore */
+#if CYTHON_FAST_THREAD_STATE
+static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ tmp_type = tstate->curexc_type;
+ tmp_value = tstate->curexc_value;
+ tmp_tb = tstate->curexc_traceback;
+ tstate->curexc_type = type;
+ tstate->curexc_value = value;
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+}
+static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
+ *type = tstate->curexc_type;
+ *value = tstate->curexc_value;
+ *tb = tstate->curexc_traceback;
+ tstate->curexc_type = 0;
+ tstate->curexc_value = 0;
+ tstate->curexc_traceback = 0;
+}
+#endif
+
+/* WriteUnraisableException */
+static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno,
+ CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename,
+ int full_traceback, CYTHON_UNUSED int nogil) {
+ PyObject *old_exc, *old_val, *old_tb;
+ PyObject *ctx;
+ __Pyx_PyThreadState_declare
+#ifdef WITH_THREAD
+ PyGILState_STATE state;
+ if (nogil)
+ state = PyGILState_Ensure();
+#ifdef _MSC_VER
+ else state = (PyGILState_STATE)-1;
+#endif
+#endif
+ __Pyx_PyThreadState_assign
+ __Pyx_ErrFetch(&old_exc, &old_val, &old_tb);
+ if (full_traceback) {
+ Py_XINCREF(old_exc);
+ Py_XINCREF(old_val);
+ Py_XINCREF(old_tb);
+ __Pyx_ErrRestore(old_exc, old_val, old_tb);
+ PyErr_PrintEx(1);
+ }
+ #if PY_MAJOR_VERSION < 3
+ ctx = PyString_FromString(name);
+ #else
+ ctx = PyUnicode_FromString(name);
+ #endif
+ __Pyx_ErrRestore(old_exc, old_val, old_tb);
+ if (!ctx) {
+ PyErr_WriteUnraisable(Py_None);
+ } else {
+ PyErr_WriteUnraisable(ctx);
+ Py_DECREF(ctx);
+ }
+#ifdef WITH_THREAD
+ if (nogil)
+ PyGILState_Release(state);
+#endif
+}
+
+/* unicode_iter */
+static CYTHON_INLINE int __Pyx_init_unicode_iteration(
+ PyObject* ustring, Py_ssize_t *length, void** data, int *kind) {
+#if CYTHON_PEP393_ENABLED
+ if (unlikely(__Pyx_PyUnicode_READY(ustring) < 0)) return -1;
+ *kind = PyUnicode_KIND(ustring);
+ *length = PyUnicode_GET_LENGTH(ustring);
+ *data = PyUnicode_DATA(ustring);
+#else
+ *kind = 0;
+ *length = PyUnicode_GET_SIZE(ustring);
+ *data = (void*)PyUnicode_AS_UNICODE(ustring);
+#endif
+ return 0;
+}
+
+/* PyCFunctionFastCall */
+#if CYTHON_FAST_PYCCALL
+static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
+ PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
+ PyCFunction meth = PyCFunction_GET_FUNCTION(func);
+ PyObject *self = PyCFunction_GET_SELF(func);
+ int flags = PyCFunction_GET_FLAGS(func);
+ assert(PyCFunction_Check(func));
+ assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
+ assert(nargs >= 0);
+ assert(nargs == 0 || args != NULL);
+ /* _PyCFunction_FastCallDict() must not be called with an exception set,
+ because it may clear it (directly or indirectly) and so the
+ caller loses its exception */
+ assert(!PyErr_Occurred());
+ if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
+ return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
+ } else {
+ return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
+ }
+}
+#endif
+
+/* PyFunctionFastCall */
+#if CYTHON_FAST_PYCALL
+static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
+ PyObject *globals) {
+ PyFrameObject *f;
+ PyThreadState *tstate = __Pyx_PyThreadState_Current;
+ PyObject **fastlocals;
+ Py_ssize_t i;
+ PyObject *result;
+ assert(globals != NULL);
+ /* XXX Perhaps we should create a specialized
+ PyFrame_New() that doesn't take locals, but does
+ take builtins without sanity checking them.
+ */
+ assert(tstate != NULL);
+ f = PyFrame_New(tstate, co, globals, NULL);
+ if (f == NULL) {
+ return NULL;
+ }
+ fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
+ for (i = 0; i < na; i++) {
+ Py_INCREF(*args);
+ fastlocals[i] = *args++;
+ }
+ result = PyEval_EvalFrameEx(f,0);
+ ++tstate->recursion_depth;
+ Py_DECREF(f);
+ --tstate->recursion_depth;
+ return result;
+}
+#if 1 || PY_VERSION_HEX < 0x030600B1
+static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
+ PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
+ PyObject *globals = PyFunction_GET_GLOBALS(func);
+ PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
+ PyObject *closure;
+#if PY_MAJOR_VERSION >= 3
+ PyObject *kwdefs;
+#endif
+ PyObject *kwtuple, **k;
+ PyObject **d;
+ Py_ssize_t nd;
+ Py_ssize_t nk;
+ PyObject *result;
+ assert(kwargs == NULL || PyDict_Check(kwargs));
+ nk = kwargs ? PyDict_Size(kwargs) : 0;
+ if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
+ return NULL;
+ }
+ if (
+#if PY_MAJOR_VERSION >= 3
+ co->co_kwonlyargcount == 0 &&
+#endif
+ likely(kwargs == NULL || nk == 0) &&
+ co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
+ if (argdefs == NULL && co->co_argcount == nargs) {
+ result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
+ goto done;
+ }
+ else if (nargs == 0 && argdefs != NULL
+ && co->co_argcount == Py_SIZE(argdefs)) {
+ /* function called with no arguments, but all parameters have
+ a default value: use default values as arguments .*/
+ args = &PyTuple_GET_ITEM(argdefs, 0);
+ result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
+ goto done;
+ }
+ }
+ if (kwargs != NULL) {
+ Py_ssize_t pos, i;
+ kwtuple = PyTuple_New(2 * nk);
+ if (kwtuple == NULL) {
+ result = NULL;
+ goto done;
+ }
+ k = &PyTuple_GET_ITEM(kwtuple, 0);
+ pos = i = 0;
+ while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
+ Py_INCREF(k[i]);
+ Py_INCREF(k[i+1]);
+ i += 2;
+ }
+ nk = i / 2;
+ }
+ else {
+ kwtuple = NULL;
+ k = NULL;
+ }
+ closure = PyFunction_GET_CLOSURE(func);
+#if PY_MAJOR_VERSION >= 3
+ kwdefs = PyFunction_GET_KW_DEFAULTS(func);
+#endif
+ if (argdefs != NULL) {
+ d = &PyTuple_GET_ITEM(argdefs, 0);
+ nd = Py_SIZE(argdefs);
+ }
+ else {
+ d = NULL;
+ nd = 0;
+ }
+#if PY_MAJOR_VERSION >= 3
+ result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
+ args, (int)nargs,
+ k, (int)nk,
+ d, (int)nd, kwdefs, closure);
+#else
+ result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
+ args, (int)nargs,
+ k, (int)nk,
+ d, (int)nd, closure);
+#endif
+ Py_XDECREF(kwtuple);
+done:
+ Py_LeaveRecursiveCall();
+ return result;
+}
+#endif
+#endif
+
+/* PyObjectCall */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
+ PyObject *result;
+ ternaryfunc call = func->ob_type->tp_call;
+ if (unlikely(!call))
+ return PyObject_Call(func, arg, kw);
+ if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
+ return NULL;
+ result = (*call)(func, arg, kw);
+ Py_LeaveRecursiveCall();
+ if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
+ PyErr_SetString(
+ PyExc_SystemError,
+ "NULL result without error in PyObject_Call");
+ }
+ return result;
+}
+#endif
+
+/* PyObjectCall2Args */
+static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
+ PyObject *args, *result = NULL;
+ #if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(function)) {
+ PyObject *args[2] = {arg1, arg2};
+ return __Pyx_PyFunction_FastCall(function, args, 2);
+ }
+ #endif
+ #if CYTHON_FAST_PYCCALL
+ if (__Pyx_PyFastCFunction_Check(function)) {
+ PyObject *args[2] = {arg1, arg2};
+ return __Pyx_PyCFunction_FastCall(function, args, 2);
+ }
+ #endif
+ args = PyTuple_New(2);
+ if (unlikely(!args)) goto done;
+ Py_INCREF(arg1);
+ PyTuple_SET_ITEM(args, 0, arg1);
+ Py_INCREF(arg2);
+ PyTuple_SET_ITEM(args, 1, arg2);
+ Py_INCREF(function);
+ result = __Pyx_PyObject_Call(function, args, NULL);
+ Py_DECREF(args);
+ Py_DECREF(function);
+done:
+ return result;
+}
+
+/* PyObjectCallMethO */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
+ PyObject *self, *result;
+ PyCFunction cfunc;
+ cfunc = PyCFunction_GET_FUNCTION(func);
+ self = PyCFunction_GET_SELF(func);
+ if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
+ return NULL;
+ result = cfunc(self, arg);
+ Py_LeaveRecursiveCall();
+ if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
+ PyErr_SetString(
+ PyExc_SystemError,
+ "NULL result without error in PyObject_Call");
+ }
+ return result;
+}
+#endif
+
+/* PyObjectCallOneArg */
+#if CYTHON_COMPILING_IN_CPYTHON
+static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+ PyObject *result;
+ PyObject *args = PyTuple_New(1);
+ if (unlikely(!args)) return NULL;
+ Py_INCREF(arg);
+ PyTuple_SET_ITEM(args, 0, arg);
+ result = __Pyx_PyObject_Call(func, args, NULL);
+ Py_DECREF(args);
+ return result;
+}
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+#if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(func)) {
+ return __Pyx_PyFunction_FastCall(func, &arg, 1);
+ }
+#endif
+ if (likely(PyCFunction_Check(func))) {
+ if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
+ return __Pyx_PyObject_CallMethO(func, arg);
+#if CYTHON_FAST_PYCCALL
+ } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
+ return __Pyx_PyCFunction_FastCall(func, &arg, 1);
+#endif
+ }
+ }
+ return __Pyx__PyObject_CallOneArg(func, arg);
+}
+#else
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+ PyObject *result;
+ PyObject *args = PyTuple_Pack(1, arg);
+ if (unlikely(!args)) return NULL;
+ result = __Pyx_PyObject_Call(func, args, NULL);
+ Py_DECREF(args);
+ return result;
+}
+#endif
+
+/* RaiseException */
+#if PY_MAJOR_VERSION < 3
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
+ CYTHON_UNUSED PyObject *cause) {
+ __Pyx_PyThreadState_declare
+ Py_XINCREF(type);
+ if (!value || value == Py_None)
+ value = NULL;
+ else
+ Py_INCREF(value);
+ if (!tb || tb == Py_None)
+ tb = NULL;
+ else {
+ Py_INCREF(tb);
+ if (!PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto raise_error;
+ }
+ }
+ if (PyType_Check(type)) {
+#if CYTHON_COMPILING_IN_PYPY
+ if (!value) {
+ Py_INCREF(Py_None);
+ value = Py_None;
+ }
+#endif
+ PyErr_NormalizeException(&type, &value, &tb);
+ } else {
+ if (value) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto raise_error;
+ }
+ value = type;
+ type = (PyObject*) Py_TYPE(type);
+ Py_INCREF(type);
+ if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto raise_error;
+ }
+ }
+ __Pyx_PyThreadState_assign
+ __Pyx_ErrRestore(type, value, tb);
+ return;
+raise_error:
+ Py_XDECREF(value);
+ Py_XDECREF(type);
+ Py_XDECREF(tb);
+ return;
+}
+#else
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
+ PyObject* owned_instance = NULL;
+ if (tb == Py_None) {
+ tb = 0;
+ } else if (tb && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto bad;
+ }
+ if (value == Py_None)
+ value = 0;
+ if (PyExceptionInstance_Check(type)) {
+ if (value) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto bad;
+ }
+ value = type;
+ type = (PyObject*) Py_TYPE(value);
+ } else if (PyExceptionClass_Check(type)) {
+ PyObject *instance_class = NULL;
+ if (value && PyExceptionInstance_Check(value)) {
+ instance_class = (PyObject*) Py_TYPE(value);
+ if (instance_class != type) {
+ int is_subclass = PyObject_IsSubclass(instance_class, type);
+ if (!is_subclass) {
+ instance_class = NULL;
+ } else if (unlikely(is_subclass == -1)) {
+ goto bad;
+ } else {
+ type = instance_class;
+ }
+ }
+ }
+ if (!instance_class) {
+ PyObject *args;
+ if (!value)
+ args = PyTuple_New(0);
+ else if (PyTuple_Check(value)) {
+ Py_INCREF(value);
+ args = value;
+ } else
+ args = PyTuple_Pack(1, value);
+ if (!args)
+ goto bad;
+ owned_instance = PyObject_Call(type, args, NULL);
+ Py_DECREF(args);
+ if (!owned_instance)
+ goto bad;
+ value = owned_instance;
+ if (!PyExceptionInstance_Check(value)) {
+ PyErr_Format(PyExc_TypeError,
+ "calling %R should have returned an instance of "
+ "BaseException, not %R",
+ type, Py_TYPE(value));
+ goto bad;
+ }
+ }
+ } else {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto bad;
+ }
+ if (cause) {
+ PyObject *fixed_cause;
+ if (cause == Py_None) {
+ fixed_cause = NULL;
+ } else if (PyExceptionClass_Check(cause)) {
+ fixed_cause = PyObject_CallObject(cause, NULL);
+ if (fixed_cause == NULL)
+ goto bad;
+ } else if (PyExceptionInstance_Check(cause)) {
+ fixed_cause = cause;
+ Py_INCREF(fixed_cause);
+ } else {
+ PyErr_SetString(PyExc_TypeError,
+ "exception causes must derive from "
+ "BaseException");
+ goto bad;
+ }
+ PyException_SetCause(value, fixed_cause);
+ }
+ PyErr_SetObject(type, value);
+ if (tb) {
+#if CYTHON_COMPILING_IN_PYPY
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
+ Py_INCREF(tb);
+ PyErr_Restore(tmp_type, tmp_value, tb);
+ Py_XDECREF(tmp_tb);
+#else
+ PyThreadState *tstate = __Pyx_PyThreadState_Current;
+ PyObject* tmp_tb = tstate->curexc_traceback;
+ if (tb != tmp_tb) {
+ Py_INCREF(tb);
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_tb);
+ }
+#endif
+ }
+bad:
+ Py_XDECREF(owned_instance);
+ return;
+}
+#endif
+
+/* RaiseArgTupleInvalid */
+static void __Pyx_RaiseArgtupleInvalid(
+ const char* func_name,
+ int exact,
+ Py_ssize_t num_min,
+ Py_ssize_t num_max,
+ Py_ssize_t num_found)
+{
+ Py_ssize_t num_expected;
+ const char *more_or_less;
+ if (num_found < num_min) {
+ num_expected = num_min;
+ more_or_less = "at least";
+ } else {
+ num_expected = num_max;
+ more_or_less = "at most";
+ }
+ if (exact) {
+ more_or_less = "exactly";
+ }
+ PyErr_Format(PyExc_TypeError,
+ "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
+ func_name, more_or_less, num_expected,
+ (num_expected == 1) ? "" : "s", num_found);
+}
+
+/* RaiseDoubleKeywords */
+static void __Pyx_RaiseDoubleKeywordsError(
+ const char* func_name,
+ PyObject* kw_name)
+{
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION >= 3
+ "%s() got multiple values for keyword argument '%U'", func_name, kw_name);
+ #else
+ "%s() got multiple values for keyword argument '%s'", func_name,
+ PyString_AsString(kw_name));
+ #endif
+}
+
+/* ParseKeywords */
+static int __Pyx_ParseOptionalKeywords(
+ PyObject *kwds,
+ PyObject **argnames[],
+ PyObject *kwds2,
+ PyObject *values[],
+ Py_ssize_t num_pos_args,
+ const char* function_name)
+{
+ PyObject *key = 0, *value = 0;
+ Py_ssize_t pos = 0;
+ PyObject*** name;
+ PyObject*** first_kw_arg = argnames + num_pos_args;
+ while (PyDict_Next(kwds, &pos, &key, &value)) {
+ name = first_kw_arg;
+ while (*name && (**name != key)) name++;
+ if (*name) {
+ values[name-argnames] = value;
+ continue;
+ }
+ name = first_kw_arg;
+ #if PY_MAJOR_VERSION < 3
+ if (likely(PyString_Check(key))) {
+ while (*name) {
+ if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
+ && _PyString_Eq(**name, key)) {
+ values[name-argnames] = value;
+ break;
+ }
+ name++;
+ }
+ if (*name) continue;
+ else {
+ PyObject*** argname = argnames;
+ while (argname != first_kw_arg) {
+ if ((**argname == key) || (
+ (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
+ && _PyString_Eq(**argname, key))) {
+ goto arg_passed_twice;
+ }
+ argname++;
+ }
+ }
+ } else
+ #endif
+ if (likely(PyUnicode_Check(key))) {
+ while (*name) {
+ int cmp = (**name == key) ? 0 :
+ #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
+ (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
+ #endif
+ PyUnicode_Compare(**name, key);
+ if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
+ if (cmp == 0) {
+ values[name-argnames] = value;
+ break;
+ }
+ name++;
+ }
+ if (*name) continue;
+ else {
+ PyObject*** argname = argnames;
+ while (argname != first_kw_arg) {
+ int cmp = (**argname == key) ? 0 :
+ #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
+ (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
+ #endif
+ PyUnicode_Compare(**argname, key);
+ if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
+ if (cmp == 0) goto arg_passed_twice;
+ argname++;
+ }
+ }
+ } else
+ goto invalid_keyword_type;
+ if (kwds2) {
+ if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
+ } else {
+ goto invalid_keyword;
+ }
+ }
+ return 0;
+arg_passed_twice:
+ __Pyx_RaiseDoubleKeywordsError(function_name, key);
+ goto bad;
+invalid_keyword_type:
+ PyErr_Format(PyExc_TypeError,
+ "%.200s() keywords must be strings", function_name);
+ goto bad;
+invalid_keyword:
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION < 3
+ "%.200s() got an unexpected keyword argument '%.200s'",
+ function_name, PyString_AsString(key));
+ #else
+ "%s() got an unexpected keyword argument '%U'",
+ function_name, key);
+ #endif
+bad:
+ return -1;
+}
+
+/* ArgTypeTest */
+static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
+{
+ if (unlikely(!type)) {
+ PyErr_SetString(PyExc_SystemError, "Missing type object");
+ return 0;
+ }
+ else if (exact) {
+ #if PY_MAJOR_VERSION == 2
+ if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
+ #endif
+ }
+ else {
+ if (likely(__Pyx_TypeCheck(obj, type))) return 1;
+ }
+ PyErr_Format(PyExc_TypeError,
+ "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
+ name, type->tp_name, Py_TYPE(obj)->tp_name);
+ return 0;
+}
+
+/* GetTopmostException */
+#if CYTHON_USE_EXC_INFO_STACK
+static _PyErr_StackItem *
+__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
+{
+ _PyErr_StackItem *exc_info = tstate->exc_info;
+ while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
+ exc_info->previous_item != NULL)
+ {
+ exc_info = exc_info->previous_item;
+ }
+ return exc_info;
+}
+#endif
+
+/* ReRaiseException */
+static CYTHON_INLINE void __Pyx_ReraiseException(void) {
+ PyObject *type = NULL, *value = NULL, *tb = NULL;
+#if CYTHON_FAST_THREAD_STATE
+ PyThreadState *tstate = PyThreadState_GET();
+ #if CYTHON_USE_EXC_INFO_STACK
+ _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
+ type = exc_info->exc_type;
+ value = exc_info->exc_value;
+ tb = exc_info->exc_traceback;
+ #else
+ type = tstate->exc_type;
+ value = tstate->exc_value;
+ tb = tstate->exc_traceback;
+ #endif
+#else
+ PyErr_GetExcInfo(&type, &value, &tb);
+#endif
+ if (!type || type == Py_None) {
+#if !CYTHON_FAST_THREAD_STATE
+ Py_XDECREF(type);
+ Py_XDECREF(value);
+ Py_XDECREF(tb);
+#endif
+ PyErr_SetString(PyExc_RuntimeError,
+ "No active exception to reraise");
+ } else {
+#if CYTHON_FAST_THREAD_STATE
+ Py_INCREF(type);
+ Py_XINCREF(value);
+ Py_XINCREF(tb);
+#endif
+ PyErr_Restore(type, value, tb);
+ }
+}
+
+/* IterFinish */
+static CYTHON_INLINE int __Pyx_IterFinish(void) {
+#if CYTHON_FAST_THREAD_STATE
+ PyThreadState *tstate = __Pyx_PyThreadState_Current;
+ PyObject* exc_type = tstate->curexc_type;
+ if (unlikely(exc_type)) {
+ if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) {
+ PyObject *exc_value, *exc_tb;
+ exc_value = tstate->curexc_value;
+ exc_tb = tstate->curexc_traceback;
+ tstate->curexc_type = 0;
+ tstate->curexc_value = 0;
+ tstate->curexc_traceback = 0;
+ Py_DECREF(exc_type);
+ Py_XDECREF(exc_value);
+ Py_XDECREF(exc_tb);
+ return 0;
+ } else {
+ return -1;
+ }
+ }
+ return 0;
+#else
+ if (unlikely(PyErr_Occurred())) {
+ if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) {
+ PyErr_Clear();
+ return 0;
+ } else {
+ return -1;
+ }
+ }
+ return 0;
+#endif
+}
+
+/* PyObjectCallNoArg */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
+#if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(func)) {
+ return __Pyx_PyFunction_FastCall(func, NULL, 0);
+ }
+#endif
+#ifdef __Pyx_CyFunction_USED
+ if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func)))
+#else
+ if (likely(PyCFunction_Check(func)))
+#endif
+ {
+ if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
+ return __Pyx_PyObject_CallMethO(func, NULL);
+ }
+ }
+ return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
+}
+#endif
+
+/* PyObjectGetMethod */
+static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) {
+ PyObject *attr;
+#if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP
+ PyTypeObject *tp = Py_TYPE(obj);
+ PyObject *descr;
+ descrgetfunc f = NULL;
+ PyObject **dictptr, *dict;
+ int meth_found = 0;
+ assert (*method == NULL);
+ if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) {
+ attr = __Pyx_PyObject_GetAttrStr(obj, name);
+ goto try_unpack;
+ }
+ if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) {
+ return 0;
+ }
+ descr = _PyType_Lookup(tp, name);
+ if (likely(descr != NULL)) {
+ Py_INCREF(descr);
+#if PY_MAJOR_VERSION >= 3
+ #ifdef __Pyx_CyFunction_USED
+ if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr)))
+ #else
+ if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type)))
+ #endif
+#else
+ #ifdef __Pyx_CyFunction_USED
+ if (likely(PyFunction_Check(descr) || __Pyx_CyFunction_Check(descr)))
+ #else
+ if (likely(PyFunction_Check(descr)))
+ #endif
+#endif
+ {
+ meth_found = 1;
+ } else {
+ f = Py_TYPE(descr)->tp_descr_get;
+ if (f != NULL && PyDescr_IsData(descr)) {
+ attr = f(descr, obj, (PyObject *)Py_TYPE(obj));
+ Py_DECREF(descr);
+ goto try_unpack;
+ }
+ }
+ }
+ dictptr = _PyObject_GetDictPtr(obj);
+ if (dictptr != NULL && (dict = *dictptr) != NULL) {
+ Py_INCREF(dict);
+ attr = __Pyx_PyDict_GetItemStr(dict, name);
+ if (attr != NULL) {
+ Py_INCREF(attr);
+ Py_DECREF(dict);
+ Py_XDECREF(descr);
+ goto try_unpack;
+ }
+ Py_DECREF(dict);
+ }
+ if (meth_found) {
+ *method = descr;
+ return 1;
+ }
+ if (f != NULL) {
+ attr = f(descr, obj, (PyObject *)Py_TYPE(obj));
+ Py_DECREF(descr);
+ goto try_unpack;
+ }
+ if (descr != NULL) {
+ *method = descr;
+ return 0;
+ }
+ PyErr_Format(PyExc_AttributeError,
+#if PY_MAJOR_VERSION >= 3
+ "'%.50s' object has no attribute '%U'",
+ tp->tp_name, name);
+#else
+ "'%.50s' object has no attribute '%.400s'",
+ tp->tp_name, PyString_AS_STRING(name));
+#endif
+ return 0;
+#else
+ attr = __Pyx_PyObject_GetAttrStr(obj, name);
+ goto try_unpack;
+#endif
+try_unpack:
+#if CYTHON_UNPACK_METHODS
+ if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) {
+ PyObject *function = PyMethod_GET_FUNCTION(attr);
+ Py_INCREF(function);
+ Py_DECREF(attr);
+ *method = function;
+ return 1;
+ }
+#endif
+ *method = attr;
+ return 0;
+}
+
+/* PyObjectCallMethod0 */
+static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name) {
+ PyObject *method = NULL, *result = NULL;
+ int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method);
+ if (likely(is_method)) {
+ result = __Pyx_PyObject_CallOneArg(method, obj);
+ Py_DECREF(method);
+ return result;
+ }
+ if (unlikely(!method)) goto bad;
+ result = __Pyx_PyObject_CallNoArg(method);
+ Py_DECREF(method);
+bad:
+ return result;
+}
+
+/* RaiseNeedMoreValuesToUnpack */
+static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
+ PyErr_Format(PyExc_ValueError,
+ "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
+ index, (index == 1) ? "" : "s");
+}
+
+/* RaiseTooManyValuesToUnpack */
+static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
+ PyErr_Format(PyExc_ValueError,
+ "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
+}
+
+/* UnpackItemEndCheck */
+static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) {
+ if (unlikely(retval)) {
+ Py_DECREF(retval);
+ __Pyx_RaiseTooManyValuesError(expected);
+ return -1;
+ } else {
+ return __Pyx_IterFinish();
+ }
+ return 0;
+}
+
+/* RaiseNoneIterError */
+static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
+}
+
+/* UnpackTupleError */
+static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) {
+ if (t == Py_None) {
+ __Pyx_RaiseNoneNotIterableError();
+ } else if (PyTuple_GET_SIZE(t) < index) {
+ __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t));
+ } else {
+ __Pyx_RaiseTooManyValuesError(index);
+ }
+}
+
+/* UnpackTuple2 */
+static CYTHON_INLINE int __Pyx_unpack_tuple2_exact(
+ PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2, int decref_tuple) {
+ PyObject *value1 = NULL, *value2 = NULL;
+#if CYTHON_COMPILING_IN_PYPY
+ value1 = PySequence_ITEM(tuple, 0); if (unlikely(!value1)) goto bad;
+ value2 = PySequence_ITEM(tuple, 1); if (unlikely(!value2)) goto bad;
+#else
+ value1 = PyTuple_GET_ITEM(tuple, 0); Py_INCREF(value1);
+ value2 = PyTuple_GET_ITEM(tuple, 1); Py_INCREF(value2);
+#endif
+ if (decref_tuple) {
+ Py_DECREF(tuple);
+ }
+ *pvalue1 = value1;
+ *pvalue2 = value2;
+ return 0;
+#if CYTHON_COMPILING_IN_PYPY
+bad:
+ Py_XDECREF(value1);
+ Py_XDECREF(value2);
+ if (decref_tuple) { Py_XDECREF(tuple); }
+ return -1;
+#endif
+}
+static int __Pyx_unpack_tuple2_generic(PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2,
+ int has_known_size, int decref_tuple) {
+ Py_ssize_t index;
+ PyObject *value1 = NULL, *value2 = NULL, *iter = NULL;
+ iternextfunc iternext;
+ iter = PyObject_GetIter(tuple);
+ if (unlikely(!iter)) goto bad;
+ if (decref_tuple) { Py_DECREF(tuple); tuple = NULL; }
+ iternext = Py_TYPE(iter)->tp_iternext;
+ value1 = iternext(iter); if (unlikely(!value1)) { index = 0; goto unpacking_failed; }
+ value2 = iternext(iter); if (unlikely(!value2)) { index = 1; goto unpacking_failed; }
+ if (!has_known_size && unlikely(__Pyx_IternextUnpackEndCheck(iternext(iter), 2))) goto bad;
+ Py_DECREF(iter);
+ *pvalue1 = value1;
+ *pvalue2 = value2;
+ return 0;
+unpacking_failed:
+ if (!has_known_size && __Pyx_IterFinish() == 0)
+ __Pyx_RaiseNeedMoreValuesError(index);
+bad:
+ Py_XDECREF(iter);
+ Py_XDECREF(value1);
+ Py_XDECREF(value2);
+ if (decref_tuple) { Py_XDECREF(tuple); }
+ return -1;
+}
+
+/* dict_iter */
+static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* iterable, int is_dict, PyObject* method_name,
+ Py_ssize_t* p_orig_length, int* p_source_is_dict) {
+ is_dict = is_dict || likely(PyDict_CheckExact(iterable));
+ *p_source_is_dict = is_dict;
+ if (is_dict) {
+#if !CYTHON_COMPILING_IN_PYPY
+ *p_orig_length = PyDict_Size(iterable);
+ Py_INCREF(iterable);
+ return iterable;
+#elif PY_MAJOR_VERSION >= 3
+ static PyObject *py_items = NULL, *py_keys = NULL, *py_values = NULL;
+ PyObject **pp = NULL;
+ if (method_name) {
+ const char *name = PyUnicode_AsUTF8(method_name);
+ if (strcmp(name, "iteritems") == 0) pp = &py_items;
+ else if (strcmp(name, "iterkeys") == 0) pp = &py_keys;
+ else if (strcmp(name, "itervalues") == 0) pp = &py_values;
+ if (pp) {
+ if (!*pp) {
+ *pp = PyUnicode_FromString(name + 4);
+ if (!*pp)
+ return NULL;
+ }
+ method_name = *pp;
+ }
+ }
+#endif
+ }
+ *p_orig_length = 0;
+ if (method_name) {
+ PyObject* iter;
+ iterable = __Pyx_PyObject_CallMethod0(iterable, method_name);
+ if (!iterable)
+ return NULL;
+#if !CYTHON_COMPILING_IN_PYPY
+ if (PyTuple_CheckExact(iterable) || PyList_CheckExact(iterable))
+ return iterable;
+#endif
+ iter = PyObject_GetIter(iterable);
+ Py_DECREF(iterable);
+ return iter;
+ }
+ return PyObject_GetIter(iterable);
+}
+static CYTHON_INLINE int __Pyx_dict_iter_next(
+ PyObject* iter_obj, CYTHON_NCP_UNUSED Py_ssize_t orig_length, CYTHON_NCP_UNUSED Py_ssize_t* ppos,
+ PyObject** pkey, PyObject** pvalue, PyObject** pitem, int source_is_dict) {
+ PyObject* next_item;
+#if !CYTHON_COMPILING_IN_PYPY
+ if (source_is_dict) {
+ PyObject *key, *value;
+ if (unlikely(orig_length != PyDict_Size(iter_obj))) {
+ PyErr_SetString(PyExc_RuntimeError, "dictionary changed size during iteration");
+ return -1;
+ }
+ if (unlikely(!PyDict_Next(iter_obj, ppos, &key, &value))) {
+ return 0;
+ }
+ if (pitem) {
+ PyObject* tuple = PyTuple_New(2);
+ if (unlikely(!tuple)) {
+ return -1;
+ }
+ Py_INCREF(key);
+ Py_INCREF(value);
+ PyTuple_SET_ITEM(tuple, 0, key);
+ PyTuple_SET_ITEM(tuple, 1, value);
+ *pitem = tuple;
+ } else {
+ if (pkey) {
+ Py_INCREF(key);
+ *pkey = key;
+ }
+ if (pvalue) {
+ Py_INCREF(value);
+ *pvalue = value;
+ }
+ }
+ return 1;
+ } else if (PyTuple_CheckExact(iter_obj)) {
+ Py_ssize_t pos = *ppos;
+ if (unlikely(pos >= PyTuple_GET_SIZE(iter_obj))) return 0;
+ *ppos = pos + 1;
+ next_item = PyTuple_GET_ITEM(iter_obj, pos);
+ Py_INCREF(next_item);
+ } else if (PyList_CheckExact(iter_obj)) {
+ Py_ssize_t pos = *ppos;
+ if (unlikely(pos >= PyList_GET_SIZE(iter_obj))) return 0;
+ *ppos = pos + 1;
+ next_item = PyList_GET_ITEM(iter_obj, pos);
+ Py_INCREF(next_item);
+ } else
+#endif
+ {
+ next_item = PyIter_Next(iter_obj);
+ if (unlikely(!next_item)) {
+ return __Pyx_IterFinish();
+ }
+ }
+ if (pitem) {
+ *pitem = next_item;
+ } else if (pkey && pvalue) {
+ if (__Pyx_unpack_tuple2(next_item, pkey, pvalue, source_is_dict, source_is_dict, 1))
+ return -1;
+ } else if (pkey) {
+ *pkey = next_item;
+ } else {
+ *pvalue = next_item;
+ }
+ return 1;
+}
+
+/* GetException */
+#if CYTHON_FAST_THREAD_STATE
+static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
+#else
+static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
+#endif
+{
+ PyObject *local_type, *local_value, *local_tb;
+#if CYTHON_FAST_THREAD_STATE
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ local_type = tstate->curexc_type;
+ local_value = tstate->curexc_value;
+ local_tb = tstate->curexc_traceback;
+ tstate->curexc_type = 0;
+ tstate->curexc_value = 0;
+ tstate->curexc_traceback = 0;
+#else
+ PyErr_Fetch(&local_type, &local_value, &local_tb);
+#endif
+ PyErr_NormalizeException(&local_type, &local_value, &local_tb);
+#if CYTHON_FAST_THREAD_STATE
+ if (unlikely(tstate->curexc_type))
+#else
+ if (unlikely(PyErr_Occurred()))
+#endif
+ goto bad;
+ #if PY_MAJOR_VERSION >= 3
+ if (local_tb) {
+ if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
+ goto bad;
+ }
+ #endif
+ Py_XINCREF(local_tb);
+ Py_XINCREF(local_type);
+ Py_XINCREF(local_value);
+ *type = local_type;
+ *value = local_value;
+ *tb = local_tb;
+#if CYTHON_FAST_THREAD_STATE
+ #if CYTHON_USE_EXC_INFO_STACK
+ {
+ _PyErr_StackItem *exc_info = tstate->exc_info;
+ tmp_type = exc_info->exc_type;
+ tmp_value = exc_info->exc_value;
+ tmp_tb = exc_info->exc_traceback;
+ exc_info->exc_type = local_type;
+ exc_info->exc_value = local_value;
+ exc_info->exc_traceback = local_tb;
+ }
+ #else
+ tmp_type = tstate->exc_type;
+ tmp_value = tstate->exc_value;
+ tmp_tb = tstate->exc_traceback;
+ tstate->exc_type = local_type;
+ tstate->exc_value = local_value;
+ tstate->exc_traceback = local_tb;
+ #endif
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+#else
+ PyErr_SetExcInfo(local_type, local_value, local_tb);
+#endif
+ return 0;
+bad:
+ *type = 0;
+ *value = 0;
+ *tb = 0;
+ Py_XDECREF(local_type);
+ Py_XDECREF(local_value);
+ Py_XDECREF(local_tb);
+ return -1;
+}
+
+/* SwapException */
+#if CYTHON_FAST_THREAD_STATE
+static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ #if CYTHON_USE_EXC_INFO_STACK
+ _PyErr_StackItem *exc_info = tstate->exc_info;
+ tmp_type = exc_info->exc_type;
+ tmp_value = exc_info->exc_value;
+ tmp_tb = exc_info->exc_traceback;
+ exc_info->exc_type = *type;
+ exc_info->exc_value = *value;
+ exc_info->exc_traceback = *tb;
+ #else
+ tmp_type = tstate->exc_type;
+ tmp_value = tstate->exc_value;
+ tmp_tb = tstate->exc_traceback;
+ tstate->exc_type = *type;
+ tstate->exc_value = *value;
+ tstate->exc_traceback = *tb;
+ #endif
+ *type = tmp_type;
+ *value = tmp_value;
+ *tb = tmp_tb;
+}
+#else
+static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
+ PyErr_SetExcInfo(*type, *value, *tb);
+ *type = tmp_type;
+ *value = tmp_value;
+ *tb = tmp_tb;
+}
+#endif
+
+/* SaveResetException */
+#if CYTHON_FAST_THREAD_STATE
+static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
+ #if CYTHON_USE_EXC_INFO_STACK
+ _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
+ *type = exc_info->exc_type;
+ *value = exc_info->exc_value;
+ *tb = exc_info->exc_traceback;
+ #else
+ *type = tstate->exc_type;
+ *value = tstate->exc_value;
+ *tb = tstate->exc_traceback;
+ #endif
+ Py_XINCREF(*type);
+ Py_XINCREF(*value);
+ Py_XINCREF(*tb);
+}
+static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ #if CYTHON_USE_EXC_INFO_STACK
+ _PyErr_StackItem *exc_info = tstate->exc_info;
+ tmp_type = exc_info->exc_type;
+ tmp_value = exc_info->exc_value;
+ tmp_tb = exc_info->exc_traceback;
+ exc_info->exc_type = type;
+ exc_info->exc_value = value;
+ exc_info->exc_traceback = tb;
+ #else
+ tmp_type = tstate->exc_type;
+ tmp_value = tstate->exc_value;
+ tmp_tb = tstate->exc_traceback;
+ tstate->exc_type = type;
+ tstate->exc_value = value;
+ tstate->exc_traceback = tb;
+ #endif
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+}
+#endif
+
+/* TypeImport */
+#ifndef __PYX_HAVE_RT_ImportType
+#define __PYX_HAVE_RT_ImportType
+static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name,
+ size_t size, enum __Pyx_ImportType_CheckSize check_size)
+{
+ PyObject *result = 0;
+ char warning[200];
+ Py_ssize_t basicsize;
+#ifdef Py_LIMITED_API
+ PyObject *py_basicsize;
+#endif
+ result = PyObject_GetAttrString(module, class_name);
+ if (!result)
+ goto bad;
+ if (!PyType_Check(result)) {
+ PyErr_Format(PyExc_TypeError,
+ "%.200s.%.200s is not a type object",
+ module_name, class_name);
+ goto bad;
+ }
+#ifndef Py_LIMITED_API
+ basicsize = ((PyTypeObject *)result)->tp_basicsize;
+#else
+ py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
+ if (!py_basicsize)
+ goto bad;
+ basicsize = PyLong_AsSsize_t(py_basicsize);
+ Py_DECREF(py_basicsize);
+ py_basicsize = 0;
+ if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
+ goto bad;
+#endif
+ if ((size_t)basicsize < size) {
+ PyErr_Format(PyExc_ValueError,
+ "%.200s.%.200s size changed, may indicate binary incompatibility. "
+ "Expected %zd from C header, got %zd from PyObject",
+ module_name, class_name, size, basicsize);
+ goto bad;
+ }
+ if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) {
+ PyErr_Format(PyExc_ValueError,
+ "%.200s.%.200s size changed, may indicate binary incompatibility. "
+ "Expected %zd from C header, got %zd from PyObject",
+ module_name, class_name, size, basicsize);
+ goto bad;
+ }
+ else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) {
+ PyOS_snprintf(warning, sizeof(warning),
+ "%s.%s size changed, may indicate binary incompatibility. "
+ "Expected %zd from C header, got %zd from PyObject",
+ module_name, class_name, size, basicsize);
+ if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
+ }
+ return (PyTypeObject *)result;
+bad:
+ Py_XDECREF(result);
+ return NULL;
+}
+#endif
+
+/* Import */
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
+ PyObject *empty_list = 0;
+ PyObject *module = 0;
+ PyObject *global_dict = 0;
+ PyObject *empty_dict = 0;
+ PyObject *list;
+ #if PY_MAJOR_VERSION < 3
+ PyObject *py_import;
+ py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
+ if (!py_import)
+ goto bad;
+ #endif
+ if (from_list)
+ list = from_list;
+ else {
+ empty_list = PyList_New(0);
+ if (!empty_list)
+ goto bad;
+ list = empty_list;
+ }
+ global_dict = PyModule_GetDict(__pyx_m);
+ if (!global_dict)
+ goto bad;
+ empty_dict = PyDict_New();
+ if (!empty_dict)
+ goto bad;
+ {
+ #if PY_MAJOR_VERSION >= 3
+ if (level == -1) {
+ if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) {
+ module = PyImport_ImportModuleLevelObject(
+ name, global_dict, empty_dict, list, 1);
+ if (!module) {
+ if (!PyErr_ExceptionMatches(PyExc_ImportError))
+ goto bad;
+ PyErr_Clear();
+ }
+ }
+ level = 0;
+ }
+ #endif
+ if (!module) {
+ #if PY_MAJOR_VERSION < 3
+ PyObject *py_level = PyInt_FromLong(level);
+ if (!py_level)
+ goto bad;
+ module = PyObject_CallFunctionObjArgs(py_import,
+ name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
+ Py_DECREF(py_level);
+ #else
+ module = PyImport_ImportModuleLevelObject(
+ name, global_dict, empty_dict, list, level);
+ #endif
+ }
+ }
+bad:
+ #if PY_MAJOR_VERSION < 3
+ Py_XDECREF(py_import);
+ #endif
+ Py_XDECREF(empty_list);
+ Py_XDECREF(empty_dict);
+ return module;
+}
+
+/* ImportFrom */
+static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
+ PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
+ if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
+ PyErr_Format(PyExc_ImportError,
+ #if PY_MAJOR_VERSION < 3
+ "cannot import name %.230s", PyString_AS_STRING(name));
+ #else
+ "cannot import name %S", name);
+ #endif
+ }
+ return value;
+}
+
+/* PyDictVersioning */
+#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
+ PyObject *dict = Py_TYPE(obj)->tp_dict;
+ return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
+}
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
+ PyObject **dictptr = NULL;
+ Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
+ if (offset) {
+#if CYTHON_COMPILING_IN_CPYTHON
+ dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
+#else
+ dictptr = _PyObject_GetDictPtr(obj);
+#endif
+ }
+ return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
+}
+static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
+ PyObject *dict = Py_TYPE(obj)->tp_dict;
+ if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
+ return 0;
+ return obj_dict_version == __Pyx_get_object_dict_version(obj);
+}
+#endif
+
+/* GetModuleGlobalName */
+#if CYTHON_USE_DICT_VERSIONS
+static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
+#else
+static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
+#endif
+{
+ PyObject *result;
+#if !CYTHON_AVOID_BORROWED_REFS
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
+ result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
+ __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
+ if (likely(result)) {
+ return __Pyx_NewRef(result);
+ } else if (unlikely(PyErr_Occurred())) {
+ return NULL;
+ }
+#else
+ result = PyDict_GetItem(__pyx_d, name);
+ __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
+ if (likely(result)) {
+ return __Pyx_NewRef(result);
+ }
+#endif
+#else
+ result = PyObject_GetItem(__pyx_d, name);
+ __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
+ if (likely(result)) {
+ return __Pyx_NewRef(result);
+ }
+ PyErr_Clear();
+#endif
+ return __Pyx_GetBuiltinName(name);
+}
+
+/* CLineInTraceback */
+#ifndef CYTHON_CLINE_IN_TRACEBACK
+static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
+ PyObject *use_cline;
+ PyObject *ptype, *pvalue, *ptraceback;
+#if CYTHON_COMPILING_IN_CPYTHON
+ PyObject **cython_runtime_dict;
+#endif
+ if (unlikely(!__pyx_cython_runtime)) {
+ return c_line;
+ }
+ __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
+#if CYTHON_COMPILING_IN_CPYTHON
+ cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
+ if (likely(cython_runtime_dict)) {
+ __PYX_PY_DICT_LOOKUP_IF_MODIFIED(
+ use_cline, *cython_runtime_dict,
+ __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
+ } else
+#endif
+ {
+ PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
+ if (use_cline_obj) {
+ use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
+ Py_DECREF(use_cline_obj);
+ } else {
+ PyErr_Clear();
+ use_cline = NULL;
+ }
+ }
+ if (!use_cline) {
+ c_line = 0;
+ PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
+ }
+ else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
+ c_line = 0;
+ }
+ __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
+ return c_line;
+}
+#endif
+
+/* CodeObjectCache */
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
+ int start = 0, mid = 0, end = count - 1;
+ if (end >= 0 && code_line > entries[end].code_line) {
+ return count;
+ }
+ while (start < end) {
+ mid = start + (end - start) / 2;
+ if (code_line < entries[mid].code_line) {
+ end = mid;
+ } else if (code_line > entries[mid].code_line) {
+ start = mid + 1;
+ } else {
+ return mid;
+ }
+ }
+ if (code_line <= entries[mid].code_line) {
+ return mid;
+ } else {
+ return mid + 1;
+ }
+}
+static PyCodeObject *__pyx_find_code_object(int code_line) {
+ PyCodeObject* code_object;
+ int pos;
+ if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
+ return NULL;
+ }
+ pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
+ if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
+ return NULL;
+ }
+ code_object = __pyx_code_cache.entries[pos].code_object;
+ Py_INCREF(code_object);
+ return code_object;
+}
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
+ int pos, i;
+ __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
+ if (unlikely(!code_line)) {
+ return;
+ }
+ if (unlikely(!entries)) {
+ entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
+ if (likely(entries)) {
+ __pyx_code_cache.entries = entries;
+ __pyx_code_cache.max_count = 64;
+ __pyx_code_cache.count = 1;
+ entries[0].code_line = code_line;
+ entries[0].code_object = code_object;
+ Py_INCREF(code_object);
+ }
+ return;
+ }
+ pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
+ if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
+ PyCodeObject* tmp = entries[pos].code_object;
+ entries[pos].code_object = code_object;
+ Py_DECREF(tmp);
+ return;
+ }
+ if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
+ int new_max = __pyx_code_cache.max_count + 64;
+ entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
+ __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
+ if (unlikely(!entries)) {
+ return;
+ }
+ __pyx_code_cache.entries = entries;
+ __pyx_code_cache.max_count = new_max;
+ }
+ for (i=__pyx_code_cache.count; i>pos; i--) {
+ entries[i] = entries[i-1];
+ }
+ entries[pos].code_line = code_line;
+ entries[pos].code_object = code_object;
+ __pyx_code_cache.count++;
+ Py_INCREF(code_object);
+}
+
+/* AddTraceback */
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
+ const char *funcname, int c_line,
+ int py_line, const char *filename) {
+ PyCodeObject *py_code = 0;
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ #if PY_MAJOR_VERSION < 3
+ py_srcfile = PyString_FromString(filename);
+ #else
+ py_srcfile = PyUnicode_FromString(filename);
+ #endif
+ if (!py_srcfile) goto bad;
+ if (c_line) {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
+ #else
+ py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
+ #endif
+ }
+ else {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromString(funcname);
+ #else
+ py_funcname = PyUnicode_FromString(funcname);
+ #endif
+ }
+ if (!py_funcname) goto bad;
+ py_code = __Pyx_PyCode_New(
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ __pyx_empty_bytes, /*PyObject *code,*/
+ __pyx_empty_tuple, /*PyObject *consts,*/
+ __pyx_empty_tuple, /*PyObject *names,*/
+ __pyx_empty_tuple, /*PyObject *varnames,*/
+ __pyx_empty_tuple, /*PyObject *freevars,*/
+ __pyx_empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ py_line,
+ __pyx_empty_bytes /*PyObject *lnotab*/
+ );
+ Py_DECREF(py_srcfile);
+ Py_DECREF(py_funcname);
+ return py_code;
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ return NULL;
+}
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+ int py_line, const char *filename) {
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+ PyThreadState *tstate = __Pyx_PyThreadState_Current;
+ if (c_line) {
+ c_line = __Pyx_CLineForTraceback(tstate, c_line);
+ }
+ py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
+ if (!py_code) {
+ py_code = __Pyx_CreateCodeObjectForTraceback(
+ funcname, c_line, py_line, filename);
+ if (!py_code) goto bad;
+ __pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
+ }
+ py_frame = PyFrame_New(
+ tstate, /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ __pyx_d, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ __Pyx_PyFrame_SetLineNumber(py_frame, py_line);
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
+
+/* CIntToPy */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
+ const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (is_unsigned) {
+ if (sizeof(long) < sizeof(long)) {
+ return PyInt_FromLong((long) value);
+ } else if (sizeof(long) <= sizeof(unsigned long)) {
+ return PyLong_FromUnsignedLong((unsigned long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
+ return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
+#endif
+ }
+ } else {
+ if (sizeof(long) <= sizeof(long)) {
+ return PyInt_FromLong((long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
+ return PyLong_FromLongLong((PY_LONG_LONG) value);
+#endif
+ }
+ }
+ {
+ int one = 1; int little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&value;
+ return _PyLong_FromByteArray(bytes, sizeof(long),
+ little, !is_unsigned);
+ }
+}
+
+/* CIntFromPyVerify */
+#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
+ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
+#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
+ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
+#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
+ {\
+ func_type value = func_value;\
+ if (sizeof(target_type) < sizeof(func_type)) {\
+ if (unlikely(value != (func_type) (target_type) value)) {\
+ func_type zero = 0;\
+ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
+ return (target_type) -1;\
+ if (is_unsigned && unlikely(value < zero))\
+ goto raise_neg_overflow;\
+ else\
+ goto raise_overflow;\
+ }\
+ }\
+ return (target_type) value;\
+ }
+
+/* CIntFromPy */
+static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
+ const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x))) {
+ if (sizeof(long) < sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
+ } else {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ goto raise_neg_overflow;
+ }
+ return (long) val;
+ }
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (long) 0;
+ case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
+ case 2:
+ if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
+ return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
+ return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
+ return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+ }
+ }
+ break;
+ }
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (unlikely(Py_SIZE(x) < 0)) {
+ goto raise_neg_overflow;
+ }
+#else
+ {
+ int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+ if (unlikely(result < 0))
+ return (long) -1;
+ if (unlikely(result == 1))
+ goto raise_neg_overflow;
+ }
+#endif
+ if (sizeof(long) <= sizeof(unsigned long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+#endif
+ }
+ } else {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (long) 0;
+ case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
+ case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
+ case -2:
+ if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+ return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case 2:
+ if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+ return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case -3:
+ if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+ return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+ return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case -4:
+ if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
+ return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
+ return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ }
+#endif
+ if (sizeof(long) <= sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
+#endif
+ }
+ }
+ {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+ PyErr_SetString(PyExc_RuntimeError,
+ "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+ long val;
+ PyObject *v = __Pyx_PyNumber_IntOrLong(x);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(v) && !PyLong_Check(v)) {
+ PyObject *tmp = v;
+ v = PyNumber_Long(tmp);
+ Py_DECREF(tmp);
+ }
+ #endif
+ if (likely(v)) {
+ int one = 1; int is_little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&val;
+ int ret = _PyLong_AsByteArray((PyLongObject *)v,
+ bytes, sizeof(val),
+ is_little, !is_unsigned);
+ Py_DECREF(v);
+ if (likely(!ret))
+ return val;
+ }
+#endif
+ return (long) -1;
+ }
+ } else {
+ long val;
+ PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
+ if (!tmp) return (long) -1;
+ val = __Pyx_PyInt_As_long(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+raise_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to long");
+ return (long) -1;
+raise_neg_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to long");
+ return (long) -1;
+}
+
+/* CIntFromPy */
+static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
+ const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x))) {
+ if (sizeof(int) < sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
+ } else {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ goto raise_neg_overflow;
+ }
+ return (int) val;
+ }
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (int) 0;
+ case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
+ case 2:
+ if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
+ return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
+ return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
+ return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+ }
+ }
+ break;
+ }
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (unlikely(Py_SIZE(x) < 0)) {
+ goto raise_neg_overflow;
+ }
+#else
+ {
+ int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+ if (unlikely(result < 0))
+ return (int) -1;
+ if (unlikely(result == 1))
+ goto raise_neg_overflow;
+ }
+#endif
+ if (sizeof(int) <= sizeof(unsigned long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+#endif
+ }
+ } else {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (int) 0;
+ case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
+ case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
+ case -2:
+ if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+ return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case 2:
+ if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+ return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case -3:
+ if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+ return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+ return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case -4:
+ if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
+ return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
+ return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ }
+#endif
+ if (sizeof(int) <= sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
+#endif
+ }
+ }
+ {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+ PyErr_SetString(PyExc_RuntimeError,
+ "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+ int val;
+ PyObject *v = __Pyx_PyNumber_IntOrLong(x);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(v) && !PyLong_Check(v)) {
+ PyObject *tmp = v;
+ v = PyNumber_Long(tmp);
+ Py_DECREF(tmp);
+ }
+ #endif
+ if (likely(v)) {
+ int one = 1; int is_little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&val;
+ int ret = _PyLong_AsByteArray((PyLongObject *)v,
+ bytes, sizeof(val),
+ is_little, !is_unsigned);
+ Py_DECREF(v);
+ if (likely(!ret))
+ return val;
+ }
+#endif
+ return (int) -1;
+ }
+ } else {
+ int val;
+ PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
+ if (!tmp) return (int) -1;
+ val = __Pyx_PyInt_As_int(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+raise_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to int");
+ return (int) -1;
+raise_neg_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to int");
+ return (int) -1;
+}
+
+/* FastTypeChecks */
+#if CYTHON_COMPILING_IN_CPYTHON
+static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
+ while (a) {
+ a = a->tp_base;
+ if (a == b)
+ return 1;
+ }
+ return b == &PyBaseObject_Type;
+}
+static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
+ PyObject *mro;
+ if (a == b) return 1;
+ mro = a->tp_mro;
+ if (likely(mro)) {
+ Py_ssize_t i, n;
+ n = PyTuple_GET_SIZE(mro);
+ for (i = 0; i < n; i++) {
+ if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
+ return 1;
+ }
+ return 0;
+ }
+ return __Pyx_InBases(a, b);
+}
+#if PY_MAJOR_VERSION == 2
+static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
+ PyObject *exception, *value, *tb;
+ int res;
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ErrFetch(&exception, &value, &tb);
+ res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
+ if (unlikely(res == -1)) {
+ PyErr_WriteUnraisable(err);
+ res = 0;
+ }
+ if (!res) {
+ res = PyObject_IsSubclass(err, exc_type2);
+ if (unlikely(res == -1)) {
+ PyErr_WriteUnraisable(err);
+ res = 0;
+ }
+ }
+ __Pyx_ErrRestore(exception, value, tb);
+ return res;
+}
+#else
+static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
+ int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
+ if (!res) {
+ res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
+ }
+ return res;
+}
+#endif
+static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
+ Py_ssize_t i, n;
+ assert(PyExceptionClass_Check(exc_type));
+ n = PyTuple_GET_SIZE(tuple);
+#if PY_MAJOR_VERSION >= 3
+ for (i=0; i<n; i++) {
+ if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
+ }
+#endif
+ for (i=0; i<n; i++) {
+ PyObject *t = PyTuple_GET_ITEM(tuple, i);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(exc_type == t)) return 1;
+ #endif
+ if (likely(PyExceptionClass_Check(t))) {
+ if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
+ } else {
+ }
+ }
+ return 0;
+}
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
+ if (likely(err == exc_type)) return 1;
+ if (likely(PyExceptionClass_Check(err))) {
+ if (likely(PyExceptionClass_Check(exc_type))) {
+ return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
+ } else if (likely(PyTuple_Check(exc_type))) {
+ return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
+ } else {
+ }
+ }
+ return PyErr_GivenExceptionMatches(err, exc_type);
+}
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
+ assert(PyExceptionClass_Check(exc_type1));
+ assert(PyExceptionClass_Check(exc_type2));
+ if (likely(err == exc_type1 || err == exc_type2)) return 1;
+ if (likely(PyExceptionClass_Check(err))) {
+ return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
+ }
+ return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
+}
+#endif
+
+/* CheckBinaryVersion */
+static int __Pyx_check_binary_version(void) {
+ char ctversion[4], rtversion[4];
+ PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
+ PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
+ if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
+ char message[200];
+ PyOS_snprintf(message, sizeof(message),
+ "compiletime version %s of module '%.100s' "
+ "does not match runtime version %s",
+ ctversion, __Pyx_MODULE_NAME, rtversion);
+ return PyErr_WarnEx(NULL, message, 1);
+ }
+ return 0;
+}
+
+/* InitStrings */
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ #if PY_MAJOR_VERSION < 3
+ if (t->is_unicode) {
+ *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
+ } else if (t->intern) {
+ *t->p = PyString_InternFromString(t->s);
+ } else {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ }
+ #else
+ if (t->is_unicode | t->is_str) {
+ if (t->intern) {
+ *t->p = PyUnicode_InternFromString(t->s);
+ } else if (t->encoding) {
+ *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
+ } else {
+ *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
+ }
+ } else {
+ *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
+ }
+ #endif
+ if (!*t->p)
+ return -1;
+ if (PyObject_Hash(*t->p) == -1)
+ return -1;
+ ++t;
+ }
+ return 0;
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
+ return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
+}
+static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
+ Py_ssize_t ignore;
+ return __Pyx_PyObject_AsStringAndSize(o, &ignore);
+}
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+#if !CYTHON_PEP393_ENABLED
+static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+ char* defenc_c;
+ PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
+ if (!defenc) return NULL;
+ defenc_c = PyBytes_AS_STRING(defenc);
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ {
+ char* end = defenc_c + PyBytes_GET_SIZE(defenc);
+ char* c;
+ for (c = defenc_c; c < end; c++) {
+ if ((unsigned char) (*c) >= 128) {
+ PyUnicode_AsASCIIString(o);
+ return NULL;
+ }
+ }
+ }
+#endif
+ *length = PyBytes_GET_SIZE(defenc);
+ return defenc_c;
+}
+#else
+static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+ if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ if (likely(PyUnicode_IS_ASCII(o))) {
+ *length = PyUnicode_GET_LENGTH(o);
+ return PyUnicode_AsUTF8(o);
+ } else {
+ PyUnicode_AsASCIIString(o);
+ return NULL;
+ }
+#else
+ return PyUnicode_AsUTF8AndSize(o, length);
+#endif
+}
+#endif
+#endif
+static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+ if (
+#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ __Pyx_sys_getdefaultencoding_not_ascii &&
+#endif
+ PyUnicode_Check(o)) {
+ return __Pyx_PyUnicode_AsStringAndSize(o, length);
+ } else
+#endif
+#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
+ if (PyByteArray_Check(o)) {
+ *length = PyByteArray_GET_SIZE(o);
+ return PyByteArray_AS_STRING(o);
+ } else
+#endif
+ {
+ char* result;
+ int r = PyBytes_AsStringAndSize(o, &result, length);
+ if (unlikely(r < 0)) {
+ return NULL;
+ } else {
+ return result;
+ }
+ }
+}
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
+ int is_true = x == Py_True;
+ if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
+ else return PyObject_IsTrue(x);
+}
+static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
+ int retval;
+ if (unlikely(!x)) return -1;
+ retval = __Pyx_PyObject_IsTrue(x);
+ Py_DECREF(x);
+ return retval;
+}
+static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
+#if PY_MAJOR_VERSION >= 3
+ if (PyLong_Check(result)) {
+ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
+ "__int__ returned non-int (type %.200s). "
+ "The ability to return an instance of a strict subclass of int "
+ "is deprecated, and may be removed in a future version of Python.",
+ Py_TYPE(result)->tp_name)) {
+ Py_DECREF(result);
+ return NULL;
+ }
+ return result;
+ }
+#endif
+ PyErr_Format(PyExc_TypeError,
+ "__%.4s__ returned non-%.4s (type %.200s)",
+ type_name, type_name, Py_TYPE(result)->tp_name);
+ Py_DECREF(result);
+ return NULL;
+}
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
+#if CYTHON_USE_TYPE_SLOTS
+ PyNumberMethods *m;
+#endif
+ const char *name = NULL;
+ PyObject *res = NULL;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x) || PyLong_Check(x)))
+#else
+ if (likely(PyLong_Check(x)))
+#endif
+ return __Pyx_NewRef(x);
+#if CYTHON_USE_TYPE_SLOTS
+ m = Py_TYPE(x)->tp_as_number;
+ #if PY_MAJOR_VERSION < 3
+ if (m && m->nb_int) {
+ name = "int";
+ res = m->nb_int(x);
+ }
+ else if (m && m->nb_long) {
+ name = "long";
+ res = m->nb_long(x);
+ }
+ #else
+ if (likely(m && m->nb_int)) {
+ name = "int";
+ res = m->nb_int(x);
+ }
+ #endif
+#else
+ if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
+ res = PyNumber_Int(x);
+ }
+#endif
+ if (likely(res)) {
+#if PY_MAJOR_VERSION < 3
+ if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
+#else
+ if (unlikely(!PyLong_CheckExact(res))) {
+#endif
+ return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
+ }
+ }
+ else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_TypeError,
+ "an integer is required");
+ }
+ return res;
+}
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
+ Py_ssize_t ival;
+ PyObject *x;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_CheckExact(b))) {
+ if (sizeof(Py_ssize_t) >= sizeof(long))
+ return PyInt_AS_LONG(b);
+ else
+ return PyInt_AsSsize_t(b);
+ }
+#endif
+ if (likely(PyLong_CheckExact(b))) {
+ #if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)b)->ob_digit;
+ const Py_ssize_t size = Py_SIZE(b);
+ if (likely(__Pyx_sst_abs(size) <= 1)) {
+ ival = likely(size) ? digits[0] : 0;
+ if (size == -1) ival = -ival;
+ return ival;
+ } else {
+ switch (size) {
+ case 2:
+ if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
+ return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case -2:
+ if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
+ return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case 3:
+ if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
+ return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case -3:
+ if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
+ return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case 4:
+ if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
+ return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case -4:
+ if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
+ return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ }
+ }
+ #endif
+ return PyLong_AsSsize_t(b);
+ }
+ x = PyNumber_Index(b);
+ if (!x) return -1;
+ ival = PyInt_AsSsize_t(x);
+ Py_DECREF(x);
+ return ival;
+}
+static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
+ return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
+}
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
+ return PyInt_FromSize_t(ival);
+}
+
+
+#endif /* Py_PYTHON_H */
diff --git a/third_party/python/aiohttp/aiohttp/_http_writer.pyx b/third_party/python/aiohttp/aiohttp/_http_writer.pyx
new file mode 100644
index 0000000000..84b42fa1c3
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/_http_writer.pyx
@@ -0,0 +1,151 @@
+from cpython.bytes cimport PyBytes_FromStringAndSize
+from cpython.exc cimport PyErr_NoMemory
+from cpython.mem cimport PyMem_Free, PyMem_Malloc, PyMem_Realloc
+from cpython.object cimport PyObject_Str
+from libc.stdint cimport uint8_t, uint64_t
+from libc.string cimport memcpy
+
+from multidict import istr
+
+DEF BUF_SIZE = 16 * 1024 # 16KiB
+cdef char BUFFER[BUF_SIZE]
+
+cdef object _istr = istr
+
+
+# ----------------- writer ---------------------------
+
+cdef struct Writer:
+ char *buf
+ Py_ssize_t size
+ Py_ssize_t pos
+
+
+cdef inline void _init_writer(Writer* writer):
+ writer.buf = &BUFFER[0]
+ writer.size = BUF_SIZE
+ writer.pos = 0
+
+
+cdef inline void _release_writer(Writer* writer):
+ if writer.buf != BUFFER:
+ PyMem_Free(writer.buf)
+
+
+cdef inline int _write_byte(Writer* writer, uint8_t ch):
+ cdef char * buf
+ cdef Py_ssize_t size
+
+ if writer.pos == writer.size:
+ # reallocate
+ size = writer.size + BUF_SIZE
+ if writer.buf == BUFFER:
+ buf = <char*>PyMem_Malloc(size)
+ if buf == NULL:
+ PyErr_NoMemory()
+ return -1
+ memcpy(buf, writer.buf, writer.size)
+ else:
+ buf = <char*>PyMem_Realloc(writer.buf, size)
+ if buf == NULL:
+ PyErr_NoMemory()
+ return -1
+ writer.buf = buf
+ writer.size = size
+ writer.buf[writer.pos] = <char>ch
+ writer.pos += 1
+ return 0
+
+
+cdef inline int _write_utf8(Writer* writer, Py_UCS4 symbol):
+ cdef uint64_t utf = <uint64_t> symbol
+
+ if utf < 0x80:
+ return _write_byte(writer, <uint8_t>utf)
+ elif utf < 0x800:
+ if _write_byte(writer, <uint8_t>(0xc0 | (utf >> 6))) < 0:
+ return -1
+ return _write_byte(writer, <uint8_t>(0x80 | (utf & 0x3f)))
+ elif 0xD800 <= utf <= 0xDFFF:
+ # surogate pair, ignored
+ return 0
+ elif utf < 0x10000:
+ if _write_byte(writer, <uint8_t>(0xe0 | (utf >> 12))) < 0:
+ return -1
+ if _write_byte(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f))) < 0:
+ return -1
+ return _write_byte(writer, <uint8_t>(0x80 | (utf & 0x3f)))
+ elif utf > 0x10FFFF:
+ # symbol is too large
+ return 0
+ else:
+ if _write_byte(writer, <uint8_t>(0xf0 | (utf >> 18))) < 0:
+ return -1
+ if _write_byte(writer,
+ <uint8_t>(0x80 | ((utf >> 12) & 0x3f))) < 0:
+ return -1
+ if _write_byte(writer,
+ <uint8_t>(0x80 | ((utf >> 6) & 0x3f))) < 0:
+ return -1
+ return _write_byte(writer, <uint8_t>(0x80 | (utf & 0x3f)))
+
+
+cdef inline int _write_str(Writer* writer, str s):
+ cdef Py_UCS4 ch
+ for ch in s:
+ if _write_utf8(writer, ch) < 0:
+ return -1
+
+
+# --------------- _serialize_headers ----------------------
+
+cdef str to_str(object s):
+ typ = type(s)
+ if typ is str:
+ return <str>s
+ elif typ is _istr:
+ return PyObject_Str(s)
+ elif not isinstance(s, str):
+ raise TypeError("Cannot serialize non-str key {!r}".format(s))
+ else:
+ return str(s)
+
+
+def _serialize_headers(str status_line, headers):
+ cdef Writer writer
+ cdef object key
+ cdef object val
+ cdef bytes ret
+
+ _init_writer(&writer)
+
+ try:
+ if _write_str(&writer, status_line) < 0:
+ raise
+ if _write_byte(&writer, b'\r') < 0:
+ raise
+ if _write_byte(&writer, b'\n') < 0:
+ raise
+
+ for key, val in headers.items():
+ if _write_str(&writer, to_str(key)) < 0:
+ raise
+ if _write_byte(&writer, b':') < 0:
+ raise
+ if _write_byte(&writer, b' ') < 0:
+ raise
+ if _write_str(&writer, to_str(val)) < 0:
+ raise
+ if _write_byte(&writer, b'\r') < 0:
+ raise
+ if _write_byte(&writer, b'\n') < 0:
+ raise
+
+ if _write_byte(&writer, b'\r') < 0:
+ raise
+ if _write_byte(&writer, b'\n') < 0:
+ raise
+
+ return PyBytes_FromStringAndSize(writer.buf, writer.pos)
+ finally:
+ _release_writer(&writer)
diff --git a/third_party/python/aiohttp/aiohttp/_websocket.c b/third_party/python/aiohttp/aiohttp/_websocket.c
new file mode 100644
index 0000000000..4891c24a84
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/_websocket.c
@@ -0,0 +1,3588 @@
+/* Generated by Cython 0.29.21 */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#ifndef Py_PYTHON_H
+ #error Python headers needed to compile C extensions, please install development version of Python.
+#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
+ #error Cython requires Python 2.6+ or Python 3.3+.
+#else
+#define CYTHON_ABI "0_29_21"
+#define CYTHON_HEX_VERSION 0x001D15F0
+#define CYTHON_FUTURE_DIVISION 1
+#include <stddef.h>
+#ifndef offsetof
+ #define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
+#endif
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+ #ifndef __fastcall
+ #define __fastcall
+ #endif
+#endif
+#ifndef DL_IMPORT
+ #define DL_IMPORT(t) t
+#endif
+#ifndef DL_EXPORT
+ #define DL_EXPORT(t) t
+#endif
+#define __PYX_COMMA ,
+#ifndef HAVE_LONG_LONG
+ #if PY_VERSION_HEX >= 0x02070000
+ #define HAVE_LONG_LONG
+ #endif
+#endif
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+#ifndef Py_HUGE_VAL
+ #define Py_HUGE_VAL HUGE_VAL
+#endif
+#ifdef PYPY_VERSION
+ #define CYTHON_COMPILING_IN_PYPY 1
+ #define CYTHON_COMPILING_IN_PYSTON 0
+ #define CYTHON_COMPILING_IN_CPYTHON 0
+ #undef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 0
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #if PY_VERSION_HEX < 0x03050000
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #elif !defined(CYTHON_USE_ASYNC_SLOTS)
+ #define CYTHON_USE_ASYNC_SLOTS 1
+ #endif
+ #undef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 0
+ #undef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 0
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #undef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 1
+ #undef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 0
+ #undef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 0
+ #undef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 0
+ #undef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 0
+ #undef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT 0
+ #undef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE 0
+ #undef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS 0
+ #undef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK 0
+#elif defined(PYSTON_VERSION)
+ #define CYTHON_COMPILING_IN_PYPY 0
+ #define CYTHON_COMPILING_IN_PYSTON 1
+ #define CYTHON_COMPILING_IN_CPYTHON 0
+ #ifndef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 1
+ #endif
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #undef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 0
+ #ifndef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 1
+ #endif
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #ifndef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 0
+ #endif
+ #ifndef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 1
+ #endif
+ #ifndef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 1
+ #endif
+ #undef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 0
+ #undef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 0
+ #undef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT 0
+ #undef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE 0
+ #undef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS 0
+ #undef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK 0
+#else
+ #define CYTHON_COMPILING_IN_PYPY 0
+ #define CYTHON_COMPILING_IN_PYSTON 0
+ #define CYTHON_COMPILING_IN_CPYTHON 1
+ #ifndef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 1
+ #endif
+ #if PY_VERSION_HEX < 0x02070000
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
+ #define CYTHON_USE_PYTYPE_LOOKUP 1
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #elif !defined(CYTHON_USE_ASYNC_SLOTS)
+ #define CYTHON_USE_ASYNC_SLOTS 1
+ #endif
+ #if PY_VERSION_HEX < 0x02070000
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #elif !defined(CYTHON_USE_PYLONG_INTERNALS)
+ #define CYTHON_USE_PYLONG_INTERNALS 1
+ #endif
+ #ifndef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 1
+ #endif
+ #ifndef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 1
+ #endif
+ #if PY_VERSION_HEX < 0x030300F0
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #elif !defined(CYTHON_USE_UNICODE_WRITER)
+ #define CYTHON_USE_UNICODE_WRITER 1
+ #endif
+ #ifndef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 0
+ #endif
+ #ifndef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 1
+ #endif
+ #ifndef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 1
+ #endif
+ #ifndef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 1
+ #endif
+ #ifndef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 1
+ #endif
+ #ifndef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
+ #endif
+ #ifndef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
+ #endif
+ #ifndef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
+ #endif
+ #ifndef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
+ #endif
+#endif
+#if !defined(CYTHON_FAST_PYCCALL)
+#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
+#endif
+#if CYTHON_USE_PYLONG_INTERNALS
+ #include "longintrepr.h"
+ #undef SHIFT
+ #undef BASE
+ #undef MASK
+ #ifdef SIZEOF_VOID_P
+ enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
+ #endif
+#endif
+#ifndef __has_attribute
+ #define __has_attribute(x) 0
+#endif
+#ifndef __has_cpp_attribute
+ #define __has_cpp_attribute(x) 0
+#endif
+#ifndef CYTHON_RESTRICT
+ #if defined(__GNUC__)
+ #define CYTHON_RESTRICT __restrict__
+ #elif defined(_MSC_VER) && _MSC_VER >= 1400
+ #define CYTHON_RESTRICT __restrict
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_RESTRICT restrict
+ #else
+ #define CYTHON_RESTRICT
+ #endif
+#endif
+#ifndef CYTHON_UNUSED
+# if defined(__GNUC__)
+# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+#endif
+#ifndef CYTHON_MAYBE_UNUSED_VAR
+# if defined(__cplusplus)
+ template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
+# else
+# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
+# endif
+#endif
+#ifndef CYTHON_NCP_UNUSED
+# if CYTHON_COMPILING_IN_CPYTHON
+# define CYTHON_NCP_UNUSED
+# else
+# define CYTHON_NCP_UNUSED CYTHON_UNUSED
+# endif
+#endif
+#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
+#ifdef _MSC_VER
+ #ifndef _MSC_STDINT_H_
+ #if _MSC_VER < 1300
+ typedef unsigned char uint8_t;
+ typedef unsigned int uint32_t;
+ #else
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int32 uint32_t;
+ #endif
+ #endif
+#else
+ #include <stdint.h>
+#endif
+#ifndef CYTHON_FALLTHROUGH
+ #if defined(__cplusplus) && __cplusplus >= 201103L
+ #if __has_cpp_attribute(fallthrough)
+ #define CYTHON_FALLTHROUGH [[fallthrough]]
+ #elif __has_cpp_attribute(clang::fallthrough)
+ #define CYTHON_FALLTHROUGH [[clang::fallthrough]]
+ #elif __has_cpp_attribute(gnu::fallthrough)
+ #define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
+ #endif
+ #endif
+ #ifndef CYTHON_FALLTHROUGH
+ #if __has_attribute(fallthrough)
+ #define CYTHON_FALLTHROUGH __attribute__((fallthrough))
+ #else
+ #define CYTHON_FALLTHROUGH
+ #endif
+ #endif
+ #if defined(__clang__ ) && defined(__apple_build_version__)
+ #if __apple_build_version__ < 7000000
+ #undef CYTHON_FALLTHROUGH
+ #define CYTHON_FALLTHROUGH
+ #endif
+ #endif
+#endif
+
+#ifndef CYTHON_INLINE
+ #if defined(__clang__)
+ #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
+ #elif defined(__GNUC__)
+ #define CYTHON_INLINE __inline__
+ #elif defined(_MSC_VER)
+ #define CYTHON_INLINE __inline
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_INLINE inline
+ #else
+ #define CYTHON_INLINE
+ #endif
+#endif
+
+#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
+ #define Py_OptimizeFlag 0
+#endif
+#define __PYX_BUILD_PY_SSIZE_T "n"
+#define CYTHON_FORMAT_SSIZE_T "z"
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+ #define __Pyx_DefaultClassType PyClass_Type
+#else
+ #define __Pyx_BUILTIN_MODULE_NAME "builtins"
+#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+#else
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+#endif
+ #define __Pyx_DefaultClassType PyType_Type
+#endif
+#ifndef Py_TPFLAGS_CHECKTYPES
+ #define Py_TPFLAGS_CHECKTYPES 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_INDEX
+ #define Py_TPFLAGS_HAVE_INDEX 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
+ #define Py_TPFLAGS_HAVE_NEWBUFFER 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_FINALIZE
+ #define Py_TPFLAGS_HAVE_FINALIZE 0
+#endif
+#ifndef METH_STACKLESS
+ #define METH_STACKLESS 0
+#endif
+#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
+ #ifndef METH_FASTCALL
+ #define METH_FASTCALL 0x80
+ #endif
+ typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
+ typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
+ Py_ssize_t nargs, PyObject *kwnames);
+#else
+ #define __Pyx_PyCFunctionFast _PyCFunctionFast
+ #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
+#endif
+#if CYTHON_FAST_PYCCALL
+#define __Pyx_PyFastCFunction_Check(func)\
+ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
+#else
+#define __Pyx_PyFastCFunction_Check(func) 0
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
+ #define PyObject_Malloc(s) PyMem_Malloc(s)
+ #define PyObject_Free(p) PyMem_Free(p)
+ #define PyObject_Realloc(p) PyMem_Realloc(p)
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
+ #define PyMem_RawMalloc(n) PyMem_Malloc(n)
+ #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
+ #define PyMem_RawFree(p) PyMem_Free(p)
+#endif
+#if CYTHON_COMPILING_IN_PYSTON
+ #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
+ #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
+#else
+ #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
+ #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
+#endif
+#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
+ #define __Pyx_PyThreadState_Current PyThreadState_GET()
+#elif PY_VERSION_HEX >= 0x03060000
+ #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
+#elif PY_VERSION_HEX >= 0x03000000
+ #define __Pyx_PyThreadState_Current PyThreadState_GET()
+#else
+ #define __Pyx_PyThreadState_Current _PyThreadState_Current
+#endif
+#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
+#include "pythread.h"
+#define Py_tss_NEEDS_INIT 0
+typedef int Py_tss_t;
+static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
+ *key = PyThread_create_key();
+ return 0;
+}
+static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
+ Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
+ *key = Py_tss_NEEDS_INIT;
+ return key;
+}
+static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
+ PyObject_Free(key);
+}
+static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
+ return *key != Py_tss_NEEDS_INIT;
+}
+static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
+ PyThread_delete_key(*key);
+ *key = Py_tss_NEEDS_INIT;
+}
+static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
+ return PyThread_set_key_value(*key, value);
+}
+static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
+ return PyThread_get_key_value(*key);
+}
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
+#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
+#else
+#define __Pyx_PyDict_NewPresized(n) PyDict_New()
+#endif
+#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
+#else
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
+#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
+#else
+#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
+#endif
+#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
+ #define CYTHON_PEP393_ENABLED 1
+ #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
+ 0 : _PyUnicode_Ready((PyObject *)(op)))
+ #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
+ #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
+ #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
+ #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
+ #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
+ #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
+ #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
+ #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
+ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
+ #else
+ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
+ #endif
+#else
+ #define CYTHON_PEP393_ENABLED 0
+ #define PyUnicode_1BYTE_KIND 1
+ #define PyUnicode_2BYTE_KIND 2
+ #define PyUnicode_4BYTE_KIND 4
+ #define __Pyx_PyUnicode_READY(op) (0)
+ #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
+ #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
+ #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
+ #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
+ #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
+ #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
+ #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
+ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
+#endif
+#if CYTHON_COMPILING_IN_PYPY
+ #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
+ #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
+#else
+ #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
+ #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
+ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
+ #define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
+ #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
+ #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
+#endif
+#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
+#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
+#else
+ #define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
+#endif
+#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
+ #define PyObject_ASCII(o) PyObject_Repr(o)
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyBaseString_Type PyUnicode_Type
+ #define PyStringObject PyUnicodeObject
+ #define PyString_Type PyUnicode_Type
+ #define PyString_Check PyUnicode_Check
+ #define PyString_CheckExact PyUnicode_CheckExact
+#ifndef PyObject_Unicode
+ #define PyObject_Unicode PyObject_Str
+#endif
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
+ #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
+#else
+ #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
+ #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
+#endif
+#ifndef PySet_CheckExact
+ #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
+#endif
+#if PY_VERSION_HEX >= 0x030900A4
+ #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
+ #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
+#else
+ #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
+ #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
+#endif
+#if CYTHON_ASSUME_SAFE_MACROS
+ #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
+#else
+ #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyIntObject PyLongObject
+ #define PyInt_Type PyLong_Type
+ #define PyInt_Check(op) PyLong_Check(op)
+ #define PyInt_CheckExact(op) PyLong_CheckExact(op)
+ #define PyInt_FromString PyLong_FromString
+ #define PyInt_FromUnicode PyLong_FromUnicode
+ #define PyInt_FromLong PyLong_FromLong
+ #define PyInt_FromSize_t PyLong_FromSize_t
+ #define PyInt_FromSsize_t PyLong_FromSsize_t
+ #define PyInt_AsLong PyLong_AsLong
+ #define PyInt_AS_LONG PyLong_AS_LONG
+ #define PyInt_AsSsize_t PyLong_AsSsize_t
+ #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
+ #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
+ #define PyNumber_Int PyNumber_Long
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyBoolObject PyLongObject
+#endif
+#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
+ #ifndef PyUnicode_InternFromString
+ #define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
+ #endif
+#endif
+#if PY_VERSION_HEX < 0x030200A4
+ typedef long Py_hash_t;
+ #define __Pyx_PyInt_FromHash_t PyInt_FromLong
+ #define __Pyx_PyInt_AsHash_t PyInt_AsLong
+#else
+ #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
+ #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
+#else
+ #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
+#endif
+#if CYTHON_USE_ASYNC_SLOTS
+ #if PY_VERSION_HEX >= 0x030500B1
+ #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
+ #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
+ #else
+ #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
+ #endif
+#else
+ #define __Pyx_PyType_AsAsync(obj) NULL
+#endif
+#ifndef __Pyx_PyAsyncMethodsStruct
+ typedef struct {
+ unaryfunc am_await;
+ unaryfunc am_aiter;
+ unaryfunc am_anext;
+ } __Pyx_PyAsyncMethodsStruct;
+#endif
+
+#if defined(WIN32) || defined(MS_WINDOWS)
+ #define _USE_MATH_DEFINES
+#endif
+#include <math.h>
+#ifdef NAN
+#define __PYX_NAN() ((float) NAN)
+#else
+static CYTHON_INLINE float __PYX_NAN() {
+ float value;
+ memset(&value, 0xFF, sizeof(value));
+ return value;
+}
+#endif
+#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
+#define __Pyx_truncl trunc
+#else
+#define __Pyx_truncl truncl
+#endif
+
+#define __PYX_MARK_ERR_POS(f_index, lineno) \
+ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
+#define __PYX_ERR(f_index, lineno, Ln_error) \
+ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
+
+#ifndef __PYX_EXTERN_C
+ #ifdef __cplusplus
+ #define __PYX_EXTERN_C extern "C"
+ #else
+ #define __PYX_EXTERN_C extern
+ #endif
+#endif
+
+#define __PYX_HAVE__aiohttp___websocket
+#define __PYX_HAVE_API__aiohttp___websocket
+/* Early includes */
+#include <string.h>
+#include <stdio.h>
+#include "pythread.h"
+#include <stdint.h>
+#ifdef _OPENMP
+#include <omp.h>
+#endif /* _OPENMP */
+
+#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
+#define CYTHON_WITHOUT_ASSERTIONS
+#endif
+
+typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
+ const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
+
+#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
+#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
+#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
+#define __PYX_DEFAULT_STRING_ENCODING ""
+#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
+#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#define __Pyx_uchar_cast(c) ((unsigned char)c)
+#define __Pyx_long_cast(x) ((long)x)
+#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
+ (sizeof(type) < sizeof(Py_ssize_t)) ||\
+ (sizeof(type) > sizeof(Py_ssize_t) &&\
+ likely(v < (type)PY_SSIZE_T_MAX ||\
+ v == (type)PY_SSIZE_T_MAX) &&\
+ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
+ v == (type)PY_SSIZE_T_MIN))) ||\
+ (sizeof(type) == sizeof(Py_ssize_t) &&\
+ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
+ v == (type)PY_SSIZE_T_MAX))) )
+static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
+ return (size_t) i < (size_t) limit;
+}
+#if defined (__cplusplus) && __cplusplus >= 201103L
+ #include <cstdlib>
+ #define __Pyx_sst_abs(value) std::abs(value)
+#elif SIZEOF_INT >= SIZEOF_SIZE_T
+ #define __Pyx_sst_abs(value) abs(value)
+#elif SIZEOF_LONG >= SIZEOF_SIZE_T
+ #define __Pyx_sst_abs(value) labs(value)
+#elif defined (_MSC_VER)
+ #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
+#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define __Pyx_sst_abs(value) llabs(value)
+#elif defined (__GNUC__)
+ #define __Pyx_sst_abs(value) __builtin_llabs(value)
+#else
+ #define __Pyx_sst_abs(value) ((value<0) ? -value : value)
+#endif
+static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
+static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
+#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
+#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
+#define __Pyx_PyBytes_FromString PyBytes_FromString
+#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
+ #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#else
+ #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
+ #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
+#endif
+#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
+#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
+#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
+#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
+#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
+static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
+ const Py_UNICODE *u_end = u;
+ while (*u_end++) ;
+ return (size_t)(u_end - u - 1);
+}
+#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
+#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
+#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
+#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
+#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
+static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
+static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
+#define __Pyx_PySequence_Tuple(obj)\
+ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
+#if CYTHON_ASSUME_SAFE_MACROS
+#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
+#else
+#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
+#endif
+#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
+#if PY_MAJOR_VERSION >= 3
+#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
+#else
+#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
+#endif
+#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
+#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+static int __Pyx_sys_getdefaultencoding_not_ascii;
+static int __Pyx_init_sys_getdefaultencoding_params(void) {
+ PyObject* sys;
+ PyObject* default_encoding = NULL;
+ PyObject* ascii_chars_u = NULL;
+ PyObject* ascii_chars_b = NULL;
+ const char* default_encoding_c;
+ sys = PyImport_ImportModule("sys");
+ if (!sys) goto bad;
+ default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
+ Py_DECREF(sys);
+ if (!default_encoding) goto bad;
+ default_encoding_c = PyBytes_AsString(default_encoding);
+ if (!default_encoding_c) goto bad;
+ if (strcmp(default_encoding_c, "ascii") == 0) {
+ __Pyx_sys_getdefaultencoding_not_ascii = 0;
+ } else {
+ char ascii_chars[128];
+ int c;
+ for (c = 0; c < 128; c++) {
+ ascii_chars[c] = c;
+ }
+ __Pyx_sys_getdefaultencoding_not_ascii = 1;
+ ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
+ if (!ascii_chars_u) goto bad;
+ ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
+ if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
+ PyErr_Format(
+ PyExc_ValueError,
+ "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
+ default_encoding_c);
+ goto bad;
+ }
+ Py_DECREF(ascii_chars_u);
+ Py_DECREF(ascii_chars_b);
+ }
+ Py_DECREF(default_encoding);
+ return 0;
+bad:
+ Py_XDECREF(default_encoding);
+ Py_XDECREF(ascii_chars_u);
+ Py_XDECREF(ascii_chars_b);
+ return -1;
+}
+#endif
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
+#else
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+static char* __PYX_DEFAULT_STRING_ENCODING;
+static int __Pyx_init_sys_getdefaultencoding_params(void) {
+ PyObject* sys;
+ PyObject* default_encoding = NULL;
+ char* default_encoding_c;
+ sys = PyImport_ImportModule("sys");
+ if (!sys) goto bad;
+ default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
+ Py_DECREF(sys);
+ if (!default_encoding) goto bad;
+ default_encoding_c = PyBytes_AsString(default_encoding);
+ if (!default_encoding_c) goto bad;
+ __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
+ if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
+ strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
+ Py_DECREF(default_encoding);
+ return 0;
+bad:
+ Py_XDECREF(default_encoding);
+ return -1;
+}
+#endif
+#endif
+
+
+/* Test for GCC > 2.95 */
+#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
+ #define likely(x) __builtin_expect(!!(x), 1)
+ #define unlikely(x) __builtin_expect(!!(x), 0)
+#else /* !__GNUC__ or GCC < 2.95 */
+ #define likely(x) (x)
+ #define unlikely(x) (x)
+#endif /* __GNUC__ */
+static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
+
+static PyObject *__pyx_m = NULL;
+static PyObject *__pyx_d;
+static PyObject *__pyx_b;
+static PyObject *__pyx_cython_runtime = NULL;
+static PyObject *__pyx_empty_tuple;
+static PyObject *__pyx_empty_bytes;
+static PyObject *__pyx_empty_unicode;
+static int __pyx_lineno;
+static int __pyx_clineno = 0;
+static const char * __pyx_cfilenm= __FILE__;
+static const char *__pyx_filename;
+
+
+static const char *__pyx_f[] = {
+ "aiohttp/_websocket.pyx",
+ "type.pxd",
+ "bool.pxd",
+ "complex.pxd",
+};
+
+/*--- Type declarations ---*/
+
+/* --- Runtime support code (head) --- */
+/* Refnanny.proto */
+#ifndef CYTHON_REFNANNY
+ #define CYTHON_REFNANNY 0
+#endif
+#if CYTHON_REFNANNY
+ typedef struct {
+ void (*INCREF)(void*, PyObject*, int);
+ void (*DECREF)(void*, PyObject*, int);
+ void (*GOTREF)(void*, PyObject*, int);
+ void (*GIVEREF)(void*, PyObject*, int);
+ void* (*SetupContext)(const char*, int, const char*);
+ void (*FinishContext)(void**);
+ } __Pyx_RefNannyAPIStruct;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
+ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
+#ifdef WITH_THREAD
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)\
+ if (acquire_gil) {\
+ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
+ PyGILState_Release(__pyx_gilstate_save);\
+ } else {\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
+ }
+#else
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
+#endif
+ #define __Pyx_RefNannyFinishContext()\
+ __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
+ #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
+ #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
+ #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
+ #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
+#else
+ #define __Pyx_RefNannyDeclarations
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)
+ #define __Pyx_RefNannyFinishContext()
+ #define __Pyx_INCREF(r) Py_INCREF(r)
+ #define __Pyx_DECREF(r) Py_DECREF(r)
+ #define __Pyx_GOTREF(r)
+ #define __Pyx_GIVEREF(r)
+ #define __Pyx_XINCREF(r) Py_XINCREF(r)
+ #define __Pyx_XDECREF(r) Py_XDECREF(r)
+ #define __Pyx_XGOTREF(r)
+ #define __Pyx_XGIVEREF(r)
+#endif
+#define __Pyx_XDECREF_SET(r, v) do {\
+ PyObject *tmp = (PyObject *) r;\
+ r = v; __Pyx_XDECREF(tmp);\
+ } while (0)
+#define __Pyx_DECREF_SET(r, v) do {\
+ PyObject *tmp = (PyObject *) r;\
+ r = v; __Pyx_DECREF(tmp);\
+ } while (0)
+#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
+#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
+
+/* PyObjectGetAttrStr.proto */
+#if CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
+#else
+#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
+#endif
+
+/* GetBuiltinName.proto */
+static PyObject *__Pyx_GetBuiltinName(PyObject *name);
+
+/* RaiseArgTupleInvalid.proto */
+static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
+ Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
+
+/* RaiseDoubleKeywords.proto */
+static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
+
+/* ParseKeywords.proto */
+static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
+ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
+ const char* function_name);
+
+/* PyCFunctionFastCall.proto */
+#if CYTHON_FAST_PYCCALL
+static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
+#else
+#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
+#endif
+
+/* PyFunctionFastCall.proto */
+#if CYTHON_FAST_PYCALL
+#define __Pyx_PyFunction_FastCall(func, args, nargs)\
+ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
+#if 1 || PY_VERSION_HEX < 0x030600B1
+static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
+#else
+#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
+#endif
+#define __Pyx_BUILD_ASSERT_EXPR(cond)\
+ (sizeof(char [1 - 2*!(cond)]) - 1)
+#ifndef Py_MEMBER_SIZE
+#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
+#endif
+ static size_t __pyx_pyframe_localsplus_offset = 0;
+ #include "frameobject.h"
+ #define __Pxy_PyFrame_Initialize_Offsets()\
+ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
+ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
+ #define __Pyx_PyFrame_GetLocalsplus(frame)\
+ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
+#endif
+
+/* PyObjectCall.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
+#else
+#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
+#endif
+
+/* PyObjectCallMethO.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
+#endif
+
+/* PyObjectCallOneArg.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
+
+/* TypeImport.proto */
+#ifndef __PYX_HAVE_RT_ImportType_proto
+#define __PYX_HAVE_RT_ImportType_proto
+enum __Pyx_ImportType_CheckSize {
+ __Pyx_ImportType_CheckSize_Error = 0,
+ __Pyx_ImportType_CheckSize_Warn = 1,
+ __Pyx_ImportType_CheckSize_Ignore = 2
+};
+static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size);
+#endif
+
+/* PyDictVersioning.proto */
+#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
+#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
+#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
+#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
+ (version_var) = __PYX_GET_DICT_VERSION(dict);\
+ (cache_var) = (value);
+#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
+ static PY_UINT64_T __pyx_dict_version = 0;\
+ static PyObject *__pyx_dict_cached_value = NULL;\
+ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
+ (VAR) = __pyx_dict_cached_value;\
+ } else {\
+ (VAR) = __pyx_dict_cached_value = (LOOKUP);\
+ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
+ }\
+}
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
+static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
+#else
+#define __PYX_GET_DICT_VERSION(dict) (0)
+#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
+#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
+#endif
+
+/* PyThreadStateGet.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
+#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
+#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
+#else
+#define __Pyx_PyThreadState_declare
+#define __Pyx_PyThreadState_assign
+#define __Pyx_PyErr_Occurred() PyErr_Occurred()
+#endif
+
+/* PyErrFetchRestore.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
+#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
+#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
+#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
+#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
+static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
+static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
+#if CYTHON_COMPILING_IN_CPYTHON
+#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
+#else
+#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
+#endif
+#else
+#define __Pyx_PyErr_Clear() PyErr_Clear()
+#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
+#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
+#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
+#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
+#endif
+
+/* CLineInTraceback.proto */
+#ifdef CYTHON_CLINE_IN_TRACEBACK
+#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
+#else
+static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
+#endif
+
+/* CodeObjectCache.proto */
+typedef struct {
+ PyCodeObject* code_object;
+ int code_line;
+} __Pyx_CodeObjectCacheEntry;
+struct __Pyx_CodeObjectCache {
+ int count;
+ int max_count;
+ __Pyx_CodeObjectCacheEntry* entries;
+};
+static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
+static PyCodeObject *__pyx_find_code_object(int code_line);
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
+
+/* AddTraceback.proto */
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+ int py_line, const char *filename);
+
+/* CIntToPy.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
+
+/* CIntFromPy.proto */
+static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
+
+/* CIntFromPy.proto */
+static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
+
+/* FastTypeChecks.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
+static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
+#else
+#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
+#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
+#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
+#endif
+#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
+
+/* CheckBinaryVersion.proto */
+static int __Pyx_check_binary_version(void);
+
+/* InitStrings.proto */
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
+
+
+/* Module declarations from 'cpython.version' */
+
+/* Module declarations from '__builtin__' */
+
+/* Module declarations from 'cpython.type' */
+static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
+
+/* Module declarations from 'libc.string' */
+
+/* Module declarations from 'libc.stdio' */
+
+/* Module declarations from 'cpython.object' */
+
+/* Module declarations from 'cpython.ref' */
+
+/* Module declarations from 'cpython.exc' */
+
+/* Module declarations from 'cpython.module' */
+
+/* Module declarations from 'cpython.mem' */
+
+/* Module declarations from 'cpython.tuple' */
+
+/* Module declarations from 'cpython.list' */
+
+/* Module declarations from 'cpython.sequence' */
+
+/* Module declarations from 'cpython.mapping' */
+
+/* Module declarations from 'cpython.iterator' */
+
+/* Module declarations from 'cpython.number' */
+
+/* Module declarations from 'cpython.int' */
+
+/* Module declarations from '__builtin__' */
+
+/* Module declarations from 'cpython.bool' */
+static PyTypeObject *__pyx_ptype_7cpython_4bool_bool = 0;
+
+/* Module declarations from 'cpython.long' */
+
+/* Module declarations from 'cpython.float' */
+
+/* Module declarations from '__builtin__' */
+
+/* Module declarations from 'cpython.complex' */
+static PyTypeObject *__pyx_ptype_7cpython_7complex_complex = 0;
+
+/* Module declarations from 'cpython.string' */
+
+/* Module declarations from 'cpython.unicode' */
+
+/* Module declarations from 'cpython.dict' */
+
+/* Module declarations from 'cpython.instance' */
+
+/* Module declarations from 'cpython.function' */
+
+/* Module declarations from 'cpython.method' */
+
+/* Module declarations from 'cpython.weakref' */
+
+/* Module declarations from 'cpython.getargs' */
+
+/* Module declarations from 'cpython.pythread' */
+
+/* Module declarations from 'cpython.pystate' */
+
+/* Module declarations from 'cpython.cobject' */
+
+/* Module declarations from 'cpython.oldbuffer' */
+
+/* Module declarations from 'cpython.set' */
+
+/* Module declarations from 'cpython.buffer' */
+
+/* Module declarations from 'cpython.bytes' */
+
+/* Module declarations from 'cpython.pycapsule' */
+
+/* Module declarations from 'cpython' */
+
+/* Module declarations from 'libc.stdint' */
+
+/* Module declarations from 'aiohttp._websocket' */
+#define __Pyx_MODULE_NAME "aiohttp._websocket"
+extern int __pyx_module_is_main_aiohttp___websocket;
+int __pyx_module_is_main_aiohttp___websocket = 0;
+
+/* Implementation of 'aiohttp._websocket' */
+static PyObject *__pyx_builtin_range;
+static const char __pyx_k_i[] = "i";
+static const char __pyx_k_data[] = "data";
+static const char __pyx_k_main[] = "__main__";
+static const char __pyx_k_mask[] = "mask";
+static const char __pyx_k_name[] = "__name__";
+static const char __pyx_k_test[] = "__test__";
+static const char __pyx_k_range[] = "range";
+static const char __pyx_k_in_buf[] = "in_buf";
+static const char __pyx_k_data_len[] = "data_len";
+static const char __pyx_k_mask_buf[] = "mask_buf";
+static const char __pyx_k_uint32_msk[] = "uint32_msk";
+static const char __pyx_k_uint64_msk[] = "uint64_msk";
+static const char __pyx_k_aiohttp__websocket[] = "aiohttp._websocket";
+static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
+static const char __pyx_k_websocket_mask_cython[] = "_websocket_mask_cython";
+static const char __pyx_k_aiohttp__websocket_pyx[] = "aiohttp/_websocket.pyx";
+static PyObject *__pyx_n_s_aiohttp__websocket;
+static PyObject *__pyx_kp_s_aiohttp__websocket_pyx;
+static PyObject *__pyx_n_s_cline_in_traceback;
+static PyObject *__pyx_n_s_data;
+static PyObject *__pyx_n_s_data_len;
+static PyObject *__pyx_n_s_i;
+static PyObject *__pyx_n_s_in_buf;
+static PyObject *__pyx_n_s_main;
+static PyObject *__pyx_n_s_mask;
+static PyObject *__pyx_n_s_mask_buf;
+static PyObject *__pyx_n_s_name;
+static PyObject *__pyx_n_s_range;
+static PyObject *__pyx_n_s_test;
+static PyObject *__pyx_n_s_uint32_msk;
+static PyObject *__pyx_n_s_uint64_msk;
+static PyObject *__pyx_n_s_websocket_mask_cython;
+static PyObject *__pyx_pf_7aiohttp_10_websocket__websocket_mask_cython(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_mask, PyObject *__pyx_v_data); /* proto */
+static PyObject *__pyx_tuple_;
+static PyObject *__pyx_codeobj__2;
+/* Late includes */
+
+/* "aiohttp/_websocket.pyx":11
+ *
+ *
+ * def _websocket_mask_cython(object mask, object data): # <<<<<<<<<<<<<<
+ * """Note, this function mutates its `data` argument
+ * """
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_7aiohttp_10_websocket_1_websocket_mask_cython(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static char __pyx_doc_7aiohttp_10_websocket__websocket_mask_cython[] = "Note, this function mutates its `data` argument\n ";
+static PyMethodDef __pyx_mdef_7aiohttp_10_websocket_1_websocket_mask_cython = {"_websocket_mask_cython", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7aiohttp_10_websocket_1_websocket_mask_cython, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7aiohttp_10_websocket__websocket_mask_cython};
+static PyObject *__pyx_pw_7aiohttp_10_websocket_1_websocket_mask_cython(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_mask = 0;
+ PyObject *__pyx_v_data = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("_websocket_mask_cython (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_mask,&__pyx_n_s_data,0};
+ PyObject* values[2] = {0,0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ CYTHON_FALLTHROUGH;
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mask)) != 0)) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ CYTHON_FALLTHROUGH;
+ case 1:
+ if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("_websocket_mask_cython", 1, 2, 2, 1); __PYX_ERR(0, 11, __pyx_L3_error)
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "_websocket_mask_cython") < 0)) __PYX_ERR(0, 11, __pyx_L3_error)
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ }
+ __pyx_v_mask = values[0];
+ __pyx_v_data = values[1];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("_websocket_mask_cython", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 11, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("aiohttp._websocket._websocket_mask_cython", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_r = __pyx_pf_7aiohttp_10_websocket__websocket_mask_cython(__pyx_self, __pyx_v_mask, __pyx_v_data);
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_7aiohttp_10_websocket__websocket_mask_cython(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_mask, PyObject *__pyx_v_data) {
+ Py_ssize_t __pyx_v_data_len;
+ Py_ssize_t __pyx_v_i;
+ unsigned char *__pyx_v_in_buf;
+ unsigned char const *__pyx_v_mask_buf;
+ uint32_t __pyx_v_uint32_msk;
+ uint64_t __pyx_v_uint64_msk;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ Py_ssize_t __pyx_t_1;
+ int __pyx_t_2;
+ int __pyx_t_3;
+ PyObject *__pyx_t_4 = NULL;
+ char *__pyx_t_5;
+ uint64_t *__pyx_t_6;
+ long __pyx_t_7;
+ uint32_t *__pyx_t_8;
+ Py_ssize_t __pyx_t_9;
+ Py_ssize_t __pyx_t_10;
+ Py_ssize_t __pyx_t_11;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_websocket_mask_cython", 0);
+ __Pyx_INCREF(__pyx_v_mask);
+ __Pyx_INCREF(__pyx_v_data);
+
+ /* "aiohttp/_websocket.pyx":22
+ * uint64_t uint64_msk
+ *
+ * assert len(mask) == 4 # <<<<<<<<<<<<<<
+ *
+ * if not isinstance(mask, bytes):
+ */
+ #ifndef CYTHON_WITHOUT_ASSERTIONS
+ if (unlikely(!Py_OptimizeFlag)) {
+ __pyx_t_1 = PyObject_Length(__pyx_v_mask); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 22, __pyx_L1_error)
+ if (unlikely(!((__pyx_t_1 == 4) != 0))) {
+ PyErr_SetNone(PyExc_AssertionError);
+ __PYX_ERR(0, 22, __pyx_L1_error)
+ }
+ }
+ #endif
+
+ /* "aiohttp/_websocket.pyx":24
+ * assert len(mask) == 4
+ *
+ * if not isinstance(mask, bytes): # <<<<<<<<<<<<<<
+ * mask = bytes(mask)
+ *
+ */
+ __pyx_t_2 = PyBytes_Check(__pyx_v_mask);
+ __pyx_t_3 = ((!(__pyx_t_2 != 0)) != 0);
+ if (__pyx_t_3) {
+
+ /* "aiohttp/_websocket.pyx":25
+ *
+ * if not isinstance(mask, bytes):
+ * mask = bytes(mask) # <<<<<<<<<<<<<<
+ *
+ * if isinstance(data, bytearray):
+ */
+ __pyx_t_4 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyBytes_Type)), __pyx_v_mask); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 25, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF_SET(__pyx_v_mask, __pyx_t_4);
+ __pyx_t_4 = 0;
+
+ /* "aiohttp/_websocket.pyx":24
+ * assert len(mask) == 4
+ *
+ * if not isinstance(mask, bytes): # <<<<<<<<<<<<<<
+ * mask = bytes(mask)
+ *
+ */
+ }
+
+ /* "aiohttp/_websocket.pyx":27
+ * mask = bytes(mask)
+ *
+ * if isinstance(data, bytearray): # <<<<<<<<<<<<<<
+ * data = <bytearray>data
+ * else:
+ */
+ __pyx_t_3 = PyByteArray_Check(__pyx_v_data);
+ __pyx_t_2 = (__pyx_t_3 != 0);
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_websocket.pyx":28
+ *
+ * if isinstance(data, bytearray):
+ * data = <bytearray>data # <<<<<<<<<<<<<<
+ * else:
+ * data = bytearray(data)
+ */
+ __pyx_t_4 = __pyx_v_data;
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_DECREF_SET(__pyx_v_data, __pyx_t_4);
+ __pyx_t_4 = 0;
+
+ /* "aiohttp/_websocket.pyx":27
+ * mask = bytes(mask)
+ *
+ * if isinstance(data, bytearray): # <<<<<<<<<<<<<<
+ * data = <bytearray>data
+ * else:
+ */
+ goto __pyx_L4;
+ }
+
+ /* "aiohttp/_websocket.pyx":30
+ * data = <bytearray>data
+ * else:
+ * data = bytearray(data) # <<<<<<<<<<<<<<
+ *
+ * data_len = len(data)
+ */
+ /*else*/ {
+ __pyx_t_4 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyByteArray_Type)), __pyx_v_data); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 30, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF_SET(__pyx_v_data, __pyx_t_4);
+ __pyx_t_4 = 0;
+ }
+ __pyx_L4:;
+
+ /* "aiohttp/_websocket.pyx":32
+ * data = bytearray(data)
+ *
+ * data_len = len(data) # <<<<<<<<<<<<<<
+ * in_buf = <unsigned char*>PyByteArray_AsString(data)
+ * mask_buf = <const unsigned char*>PyBytes_AsString(mask)
+ */
+ __pyx_t_1 = PyObject_Length(__pyx_v_data); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 32, __pyx_L1_error)
+ __pyx_v_data_len = __pyx_t_1;
+
+ /* "aiohttp/_websocket.pyx":33
+ *
+ * data_len = len(data)
+ * in_buf = <unsigned char*>PyByteArray_AsString(data) # <<<<<<<<<<<<<<
+ * mask_buf = <const unsigned char*>PyBytes_AsString(mask)
+ * uint32_msk = (<uint32_t*>mask_buf)[0]
+ */
+ if (!(likely(PyByteArray_CheckExact(__pyx_v_data))||((__pyx_v_data) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytearray", Py_TYPE(__pyx_v_data)->tp_name), 0))) __PYX_ERR(0, 33, __pyx_L1_error)
+ __pyx_t_5 = PyByteArray_AsString(((PyObject*)__pyx_v_data)); if (unlikely(__pyx_t_5 == ((char *)NULL))) __PYX_ERR(0, 33, __pyx_L1_error)
+ __pyx_v_in_buf = ((unsigned char *)__pyx_t_5);
+
+ /* "aiohttp/_websocket.pyx":34
+ * data_len = len(data)
+ * in_buf = <unsigned char*>PyByteArray_AsString(data)
+ * mask_buf = <const unsigned char*>PyBytes_AsString(mask) # <<<<<<<<<<<<<<
+ * uint32_msk = (<uint32_t*>mask_buf)[0]
+ *
+ */
+ __pyx_t_5 = PyBytes_AsString(__pyx_v_mask); if (unlikely(__pyx_t_5 == ((char *)NULL))) __PYX_ERR(0, 34, __pyx_L1_error)
+ __pyx_v_mask_buf = ((unsigned char const *)__pyx_t_5);
+
+ /* "aiohttp/_websocket.pyx":35
+ * in_buf = <unsigned char*>PyByteArray_AsString(data)
+ * mask_buf = <const unsigned char*>PyBytes_AsString(mask)
+ * uint32_msk = (<uint32_t*>mask_buf)[0] # <<<<<<<<<<<<<<
+ *
+ * # TODO: align in_data ptr to achieve even faster speeds
+ */
+ __pyx_v_uint32_msk = (((uint32_t *)__pyx_v_mask_buf)[0]);
+
+ /* "aiohttp/_websocket.pyx":40
+ * # does it need in python ?! malloc() always aligns to sizeof(long) bytes
+ *
+ * if sizeof(size_t) >= 8: # <<<<<<<<<<<<<<
+ * uint64_msk = uint32_msk
+ * uint64_msk = (uint64_msk << 32) | uint32_msk
+ */
+ __pyx_t_2 = (((sizeof(size_t)) >= 8) != 0);
+ if (__pyx_t_2) {
+
+ /* "aiohttp/_websocket.pyx":41
+ *
+ * if sizeof(size_t) >= 8:
+ * uint64_msk = uint32_msk # <<<<<<<<<<<<<<
+ * uint64_msk = (uint64_msk << 32) | uint32_msk
+ *
+ */
+ __pyx_v_uint64_msk = __pyx_v_uint32_msk;
+
+ /* "aiohttp/_websocket.pyx":42
+ * if sizeof(size_t) >= 8:
+ * uint64_msk = uint32_msk
+ * uint64_msk = (uint64_msk << 32) | uint32_msk # <<<<<<<<<<<<<<
+ *
+ * while data_len >= 8:
+ */
+ __pyx_v_uint64_msk = ((__pyx_v_uint64_msk << 32) | __pyx_v_uint32_msk);
+
+ /* "aiohttp/_websocket.pyx":44
+ * uint64_msk = (uint64_msk << 32) | uint32_msk
+ *
+ * while data_len >= 8: # <<<<<<<<<<<<<<
+ * (<uint64_t*>in_buf)[0] ^= uint64_msk
+ * in_buf += 8
+ */
+ while (1) {
+ __pyx_t_2 = ((__pyx_v_data_len >= 8) != 0);
+ if (!__pyx_t_2) break;
+
+ /* "aiohttp/_websocket.pyx":45
+ *
+ * while data_len >= 8:
+ * (<uint64_t*>in_buf)[0] ^= uint64_msk # <<<<<<<<<<<<<<
+ * in_buf += 8
+ * data_len -= 8
+ */
+ __pyx_t_6 = ((uint64_t *)__pyx_v_in_buf);
+ __pyx_t_7 = 0;
+ (__pyx_t_6[__pyx_t_7]) = ((__pyx_t_6[__pyx_t_7]) ^ __pyx_v_uint64_msk);
+
+ /* "aiohttp/_websocket.pyx":46
+ * while data_len >= 8:
+ * (<uint64_t*>in_buf)[0] ^= uint64_msk
+ * in_buf += 8 # <<<<<<<<<<<<<<
+ * data_len -= 8
+ *
+ */
+ __pyx_v_in_buf = (__pyx_v_in_buf + 8);
+
+ /* "aiohttp/_websocket.pyx":47
+ * (<uint64_t*>in_buf)[0] ^= uint64_msk
+ * in_buf += 8
+ * data_len -= 8 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_v_data_len = (__pyx_v_data_len - 8);
+ }
+
+ /* "aiohttp/_websocket.pyx":40
+ * # does it need in python ?! malloc() always aligns to sizeof(long) bytes
+ *
+ * if sizeof(size_t) >= 8: # <<<<<<<<<<<<<<
+ * uint64_msk = uint32_msk
+ * uint64_msk = (uint64_msk << 32) | uint32_msk
+ */
+ }
+
+ /* "aiohttp/_websocket.pyx":50
+ *
+ *
+ * while data_len >= 4: # <<<<<<<<<<<<<<
+ * (<uint32_t*>in_buf)[0] ^= uint32_msk
+ * in_buf += 4
+ */
+ while (1) {
+ __pyx_t_2 = ((__pyx_v_data_len >= 4) != 0);
+ if (!__pyx_t_2) break;
+
+ /* "aiohttp/_websocket.pyx":51
+ *
+ * while data_len >= 4:
+ * (<uint32_t*>in_buf)[0] ^= uint32_msk # <<<<<<<<<<<<<<
+ * in_buf += 4
+ * data_len -= 4
+ */
+ __pyx_t_8 = ((uint32_t *)__pyx_v_in_buf);
+ __pyx_t_7 = 0;
+ (__pyx_t_8[__pyx_t_7]) = ((__pyx_t_8[__pyx_t_7]) ^ __pyx_v_uint32_msk);
+
+ /* "aiohttp/_websocket.pyx":52
+ * while data_len >= 4:
+ * (<uint32_t*>in_buf)[0] ^= uint32_msk
+ * in_buf += 4 # <<<<<<<<<<<<<<
+ * data_len -= 4
+ *
+ */
+ __pyx_v_in_buf = (__pyx_v_in_buf + 4);
+
+ /* "aiohttp/_websocket.pyx":53
+ * (<uint32_t*>in_buf)[0] ^= uint32_msk
+ * in_buf += 4
+ * data_len -= 4 # <<<<<<<<<<<<<<
+ *
+ * for i in range(0, data_len):
+ */
+ __pyx_v_data_len = (__pyx_v_data_len - 4);
+ }
+
+ /* "aiohttp/_websocket.pyx":55
+ * data_len -= 4
+ *
+ * for i in range(0, data_len): # <<<<<<<<<<<<<<
+ * in_buf[i] ^= mask_buf[i]
+ */
+ __pyx_t_1 = __pyx_v_data_len;
+ __pyx_t_9 = __pyx_t_1;
+ for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) {
+ __pyx_v_i = __pyx_t_10;
+
+ /* "aiohttp/_websocket.pyx":56
+ *
+ * for i in range(0, data_len):
+ * in_buf[i] ^= mask_buf[i] # <<<<<<<<<<<<<<
+ */
+ __pyx_t_11 = __pyx_v_i;
+ (__pyx_v_in_buf[__pyx_t_11]) = ((__pyx_v_in_buf[__pyx_t_11]) ^ (__pyx_v_mask_buf[__pyx_v_i]));
+ }
+
+ /* "aiohttp/_websocket.pyx":11
+ *
+ *
+ * def _websocket_mask_cython(object mask, object data): # <<<<<<<<<<<<<<
+ * """Note, this function mutates its `data` argument
+ * """
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_AddTraceback("aiohttp._websocket._websocket_mask_cython", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_mask);
+ __Pyx_XDECREF(__pyx_v_data);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyMethodDef __pyx_methods[] = {
+ {0, 0, 0, 0}
+};
+
+#if PY_MAJOR_VERSION >= 3
+#if CYTHON_PEP489_MULTI_PHASE_INIT
+static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
+static int __pyx_pymod_exec__websocket(PyObject* module); /*proto*/
+static PyModuleDef_Slot __pyx_moduledef_slots[] = {
+ {Py_mod_create, (void*)__pyx_pymod_create},
+ {Py_mod_exec, (void*)__pyx_pymod_exec__websocket},
+ {0, NULL}
+};
+#endif
+
+static struct PyModuleDef __pyx_moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "_websocket",
+ 0, /* m_doc */
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ 0, /* m_size */
+ #else
+ -1, /* m_size */
+ #endif
+ __pyx_methods /* m_methods */,
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ __pyx_moduledef_slots, /* m_slots */
+ #else
+ NULL, /* m_reload */
+ #endif
+ NULL, /* m_traverse */
+ NULL, /* m_clear */
+ NULL /* m_free */
+};
+#endif
+#ifndef CYTHON_SMALL_CODE
+#if defined(__clang__)
+ #define CYTHON_SMALL_CODE
+#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
+ #define CYTHON_SMALL_CODE __attribute__((cold))
+#else
+ #define CYTHON_SMALL_CODE
+#endif
+#endif
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_n_s_aiohttp__websocket, __pyx_k_aiohttp__websocket, sizeof(__pyx_k_aiohttp__websocket), 0, 0, 1, 1},
+ {&__pyx_kp_s_aiohttp__websocket_pyx, __pyx_k_aiohttp__websocket_pyx, sizeof(__pyx_k_aiohttp__websocket_pyx), 0, 0, 1, 0},
+ {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
+ {&__pyx_n_s_data, __pyx_k_data, sizeof(__pyx_k_data), 0, 0, 1, 1},
+ {&__pyx_n_s_data_len, __pyx_k_data_len, sizeof(__pyx_k_data_len), 0, 0, 1, 1},
+ {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
+ {&__pyx_n_s_in_buf, __pyx_k_in_buf, sizeof(__pyx_k_in_buf), 0, 0, 1, 1},
+ {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
+ {&__pyx_n_s_mask, __pyx_k_mask, sizeof(__pyx_k_mask), 0, 0, 1, 1},
+ {&__pyx_n_s_mask_buf, __pyx_k_mask_buf, sizeof(__pyx_k_mask_buf), 0, 0, 1, 1},
+ {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
+ {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
+ {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
+ {&__pyx_n_s_uint32_msk, __pyx_k_uint32_msk, sizeof(__pyx_k_uint32_msk), 0, 0, 1, 1},
+ {&__pyx_n_s_uint64_msk, __pyx_k_uint64_msk, sizeof(__pyx_k_uint64_msk), 0, 0, 1, 1},
+ {&__pyx_n_s_websocket_mask_cython, __pyx_k_websocket_mask_cython, sizeof(__pyx_k_websocket_mask_cython), 0, 0, 1, 1},
+ {0, 0, 0, 0, 0, 0, 0}
+};
+static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
+ __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 55, __pyx_L1_error)
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
+
+ /* "aiohttp/_websocket.pyx":11
+ *
+ *
+ * def _websocket_mask_cython(object mask, object data): # <<<<<<<<<<<<<<
+ * """Note, this function mutates its `data` argument
+ * """
+ */
+ __pyx_tuple_ = PyTuple_Pack(8, __pyx_n_s_mask, __pyx_n_s_data, __pyx_n_s_data_len, __pyx_n_s_i, __pyx_n_s_in_buf, __pyx_n_s_mask_buf, __pyx_n_s_uint32_msk, __pyx_n_s_uint64_msk); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 11, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_tuple_);
+ __Pyx_GIVEREF(__pyx_tuple_);
+ __pyx_codeobj__2 = (PyObject*)__Pyx_PyCode_New(2, 0, 8, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple_, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_aiohttp__websocket_pyx, __pyx_n_s_websocket_mask_cython, 11, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__2)) __PYX_ERR(0, 11, __pyx_L1_error)
+ __Pyx_RefNannyFinishContext();
+ return 0;
+ __pyx_L1_error:;
+ __Pyx_RefNannyFinishContext();
+ return -1;
+}
+
+static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
+
+static int __Pyx_modinit_global_init_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
+ /*--- Global init code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_variable_export_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
+ /*--- Variable export code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_function_export_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
+ /*--- Function export code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_type_init_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
+ /*--- Type init code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_type_import_code(void) {
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
+ /*--- Type import code ---*/
+ __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 9, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type",
+ #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000
+ sizeof(PyTypeObject),
+ #else
+ sizeof(PyHeapTypeObject),
+ #endif
+ __Pyx_ImportType_CheckSize_Warn);
+ if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(1, 9, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 8, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), __Pyx_ImportType_CheckSize_Warn);
+ if (!__pyx_ptype_7cpython_4bool_bool) __PYX_ERR(2, 8, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), __Pyx_ImportType_CheckSize_Warn);
+ if (!__pyx_ptype_7cpython_7complex_complex) __PYX_ERR(3, 15, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_RefNannyFinishContext();
+ return 0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_RefNannyFinishContext();
+ return -1;
+}
+
+static int __Pyx_modinit_variable_import_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
+ /*--- Variable import code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_function_import_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
+ /*--- Function import code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+
+#ifndef CYTHON_NO_PYINIT_EXPORT
+#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
+#elif PY_MAJOR_VERSION < 3
+#ifdef __cplusplus
+#define __Pyx_PyMODINIT_FUNC extern "C" void
+#else
+#define __Pyx_PyMODINIT_FUNC void
+#endif
+#else
+#ifdef __cplusplus
+#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
+#else
+#define __Pyx_PyMODINIT_FUNC PyObject *
+#endif
+#endif
+
+
+#if PY_MAJOR_VERSION < 3
+__Pyx_PyMODINIT_FUNC init_websocket(void) CYTHON_SMALL_CODE; /*proto*/
+__Pyx_PyMODINIT_FUNC init_websocket(void)
+#else
+__Pyx_PyMODINIT_FUNC PyInit__websocket(void) CYTHON_SMALL_CODE; /*proto*/
+__Pyx_PyMODINIT_FUNC PyInit__websocket(void)
+#if CYTHON_PEP489_MULTI_PHASE_INIT
+{
+ return PyModuleDef_Init(&__pyx_moduledef);
+}
+static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
+ #if PY_VERSION_HEX >= 0x030700A1
+ static PY_INT64_T main_interpreter_id = -1;
+ PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
+ if (main_interpreter_id == -1) {
+ main_interpreter_id = current_id;
+ return (unlikely(current_id == -1)) ? -1 : 0;
+ } else if (unlikely(main_interpreter_id != current_id))
+ #else
+ static PyInterpreterState *main_interpreter = NULL;
+ PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
+ if (!main_interpreter) {
+ main_interpreter = current_interpreter;
+ } else if (unlikely(main_interpreter != current_interpreter))
+ #endif
+ {
+ PyErr_SetString(
+ PyExc_ImportError,
+ "Interpreter change detected - this module can only be loaded into one interpreter per process.");
+ return -1;
+ }
+ return 0;
+}
+static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
+ PyObject *value = PyObject_GetAttrString(spec, from_name);
+ int result = 0;
+ if (likely(value)) {
+ if (allow_none || value != Py_None) {
+ result = PyDict_SetItemString(moddict, to_name, value);
+ }
+ Py_DECREF(value);
+ } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
+ PyErr_Clear();
+ } else {
+ result = -1;
+ }
+ return result;
+}
+static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
+ PyObject *module = NULL, *moddict, *modname;
+ if (__Pyx_check_single_interpreter())
+ return NULL;
+ if (__pyx_m)
+ return __Pyx_NewRef(__pyx_m);
+ modname = PyObject_GetAttrString(spec, "name");
+ if (unlikely(!modname)) goto bad;
+ module = PyModule_NewObject(modname);
+ Py_DECREF(modname);
+ if (unlikely(!module)) goto bad;
+ moddict = PyModule_GetDict(module);
+ if (unlikely(!moddict)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
+ return module;
+bad:
+ Py_XDECREF(module);
+ return NULL;
+}
+
+
+static CYTHON_SMALL_CODE int __pyx_pymod_exec__websocket(PyObject *__pyx_pyinit_module)
+#endif
+#endif
+{
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannyDeclarations
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ if (__pyx_m) {
+ if (__pyx_m == __pyx_pyinit_module) return 0;
+ PyErr_SetString(PyExc_RuntimeError, "Module '_websocket' has already been imported. Re-initialisation is not supported.");
+ return -1;
+ }
+ #elif PY_MAJOR_VERSION >= 3
+ if (__pyx_m) return __Pyx_NewRef(__pyx_m);
+ #endif
+ #if CYTHON_REFNANNY
+__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
+if (!__Pyx_RefNanny) {
+ PyErr_Clear();
+ __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
+ if (!__Pyx_RefNanny)
+ Py_FatalError("failed to import 'refnanny' module");
+}
+#endif
+ __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit__websocket(void)", 0);
+ if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #ifdef __Pxy_PyFrame_Initialize_Offsets
+ __Pxy_PyFrame_Initialize_Offsets();
+ #endif
+ __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
+ #ifdef __Pyx_CyFunction_USED
+ if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_FusedFunction_USED
+ if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_Coroutine_USED
+ if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_Generator_USED
+ if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_AsyncGen_USED
+ if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_StopAsyncIteration_USED
+ if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ /*--- Library function declarations ---*/
+ /*--- Threads initialization code ---*/
+ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
+ #ifdef WITH_THREAD /* Python build with threading support? */
+ PyEval_InitThreads();
+ #endif
+ #endif
+ /*--- Module creation code ---*/
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ __pyx_m = __pyx_pyinit_module;
+ Py_INCREF(__pyx_m);
+ #else
+ #if PY_MAJOR_VERSION < 3
+ __pyx_m = Py_InitModule4("_websocket", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
+ #else
+ __pyx_m = PyModule_Create(&__pyx_moduledef);
+ #endif
+ if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
+ Py_INCREF(__pyx_d);
+ __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
+ Py_INCREF(__pyx_b);
+ __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
+ Py_INCREF(__pyx_cython_runtime);
+ if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
+ /*--- Initialize various global constants etc. ---*/
+ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
+ if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ if (__pyx_module_is_main_aiohttp___websocket) {
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ }
+ #if PY_MAJOR_VERSION >= 3
+ {
+ PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
+ if (!PyDict_GetItemString(modules, "aiohttp._websocket")) {
+ if (unlikely(PyDict_SetItemString(modules, "aiohttp._websocket", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
+ }
+ }
+ #endif
+ /*--- Builtin init code ---*/
+ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ /*--- Constants init code ---*/
+ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ /*--- Global type/function init code ---*/
+ (void)__Pyx_modinit_global_init_code();
+ (void)__Pyx_modinit_variable_export_code();
+ (void)__Pyx_modinit_function_export_code();
+ (void)__Pyx_modinit_type_init_code();
+ if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
+ (void)__Pyx_modinit_variable_import_code();
+ (void)__Pyx_modinit_function_import_code();
+ /*--- Execution code ---*/
+ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
+ if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+
+ /* "aiohttp/_websocket.pyx":11
+ *
+ *
+ * def _websocket_mask_cython(object mask, object data): # <<<<<<<<<<<<<<
+ * """Note, this function mutates its `data` argument
+ * """
+ */
+ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_7aiohttp_10_websocket_1_websocket_mask_cython, NULL, __pyx_n_s_aiohttp__websocket); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 11, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_websocket_mask_cython, __pyx_t_1) < 0) __PYX_ERR(0, 11, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "aiohttp/_websocket.pyx":1
+ * from cpython cimport PyBytes_AsString # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /*--- Wrapped vars code ---*/
+
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ if (__pyx_m) {
+ if (__pyx_d) {
+ __Pyx_AddTraceback("init aiohttp._websocket", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ }
+ Py_CLEAR(__pyx_m);
+ } else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_ImportError, "init aiohttp._websocket");
+ }
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ return (__pyx_m != NULL) ? 0 : -1;
+ #elif PY_MAJOR_VERSION >= 3
+ return __pyx_m;
+ #else
+ return;
+ #endif
+}
+
+/* --- Runtime support code --- */
+/* Refnanny */
+#if CYTHON_REFNANNY
+static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
+ PyObject *m = NULL, *p = NULL;
+ void *r = NULL;
+ m = PyImport_ImportModule(modname);
+ if (!m) goto end;
+ p = PyObject_GetAttrString(m, "RefNannyAPI");
+ if (!p) goto end;
+ r = PyLong_AsVoidPtr(p);
+end:
+ Py_XDECREF(p);
+ Py_XDECREF(m);
+ return (__Pyx_RefNannyAPIStruct *)r;
+}
+#endif
+
+/* PyObjectGetAttrStr */
+#if CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
+ PyTypeObject* tp = Py_TYPE(obj);
+ if (likely(tp->tp_getattro))
+ return tp->tp_getattro(obj, attr_name);
+#if PY_MAJOR_VERSION < 3
+ if (likely(tp->tp_getattr))
+ return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
+#endif
+ return PyObject_GetAttr(obj, attr_name);
+}
+#endif
+
+/* GetBuiltinName */
+static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
+ PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
+ if (unlikely(!result)) {
+ PyErr_Format(PyExc_NameError,
+#if PY_MAJOR_VERSION >= 3
+ "name '%U' is not defined", name);
+#else
+ "name '%.200s' is not defined", PyString_AS_STRING(name));
+#endif
+ }
+ return result;
+}
+
+/* RaiseArgTupleInvalid */
+static void __Pyx_RaiseArgtupleInvalid(
+ const char* func_name,
+ int exact,
+ Py_ssize_t num_min,
+ Py_ssize_t num_max,
+ Py_ssize_t num_found)
+{
+ Py_ssize_t num_expected;
+ const char *more_or_less;
+ if (num_found < num_min) {
+ num_expected = num_min;
+ more_or_less = "at least";
+ } else {
+ num_expected = num_max;
+ more_or_less = "at most";
+ }
+ if (exact) {
+ more_or_less = "exactly";
+ }
+ PyErr_Format(PyExc_TypeError,
+ "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
+ func_name, more_or_less, num_expected,
+ (num_expected == 1) ? "" : "s", num_found);
+}
+
+/* RaiseDoubleKeywords */
+static void __Pyx_RaiseDoubleKeywordsError(
+ const char* func_name,
+ PyObject* kw_name)
+{
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION >= 3
+ "%s() got multiple values for keyword argument '%U'", func_name, kw_name);
+ #else
+ "%s() got multiple values for keyword argument '%s'", func_name,
+ PyString_AsString(kw_name));
+ #endif
+}
+
+/* ParseKeywords */
+static int __Pyx_ParseOptionalKeywords(
+ PyObject *kwds,
+ PyObject **argnames[],
+ PyObject *kwds2,
+ PyObject *values[],
+ Py_ssize_t num_pos_args,
+ const char* function_name)
+{
+ PyObject *key = 0, *value = 0;
+ Py_ssize_t pos = 0;
+ PyObject*** name;
+ PyObject*** first_kw_arg = argnames + num_pos_args;
+ while (PyDict_Next(kwds, &pos, &key, &value)) {
+ name = first_kw_arg;
+ while (*name && (**name != key)) name++;
+ if (*name) {
+ values[name-argnames] = value;
+ continue;
+ }
+ name = first_kw_arg;
+ #if PY_MAJOR_VERSION < 3
+ if (likely(PyString_Check(key))) {
+ while (*name) {
+ if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
+ && _PyString_Eq(**name, key)) {
+ values[name-argnames] = value;
+ break;
+ }
+ name++;
+ }
+ if (*name) continue;
+ else {
+ PyObject*** argname = argnames;
+ while (argname != first_kw_arg) {
+ if ((**argname == key) || (
+ (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
+ && _PyString_Eq(**argname, key))) {
+ goto arg_passed_twice;
+ }
+ argname++;
+ }
+ }
+ } else
+ #endif
+ if (likely(PyUnicode_Check(key))) {
+ while (*name) {
+ int cmp = (**name == key) ? 0 :
+ #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
+ (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
+ #endif
+ PyUnicode_Compare(**name, key);
+ if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
+ if (cmp == 0) {
+ values[name-argnames] = value;
+ break;
+ }
+ name++;
+ }
+ if (*name) continue;
+ else {
+ PyObject*** argname = argnames;
+ while (argname != first_kw_arg) {
+ int cmp = (**argname == key) ? 0 :
+ #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
+ (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
+ #endif
+ PyUnicode_Compare(**argname, key);
+ if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
+ if (cmp == 0) goto arg_passed_twice;
+ argname++;
+ }
+ }
+ } else
+ goto invalid_keyword_type;
+ if (kwds2) {
+ if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
+ } else {
+ goto invalid_keyword;
+ }
+ }
+ return 0;
+arg_passed_twice:
+ __Pyx_RaiseDoubleKeywordsError(function_name, key);
+ goto bad;
+invalid_keyword_type:
+ PyErr_Format(PyExc_TypeError,
+ "%.200s() keywords must be strings", function_name);
+ goto bad;
+invalid_keyword:
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION < 3
+ "%.200s() got an unexpected keyword argument '%.200s'",
+ function_name, PyString_AsString(key));
+ #else
+ "%s() got an unexpected keyword argument '%U'",
+ function_name, key);
+ #endif
+bad:
+ return -1;
+}
+
+/* PyCFunctionFastCall */
+#if CYTHON_FAST_PYCCALL
+static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
+ PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
+ PyCFunction meth = PyCFunction_GET_FUNCTION(func);
+ PyObject *self = PyCFunction_GET_SELF(func);
+ int flags = PyCFunction_GET_FLAGS(func);
+ assert(PyCFunction_Check(func));
+ assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
+ assert(nargs >= 0);
+ assert(nargs == 0 || args != NULL);
+ /* _PyCFunction_FastCallDict() must not be called with an exception set,
+ because it may clear it (directly or indirectly) and so the
+ caller loses its exception */
+ assert(!PyErr_Occurred());
+ if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
+ return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
+ } else {
+ return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
+ }
+}
+#endif
+
+/* PyFunctionFastCall */
+#if CYTHON_FAST_PYCALL
+static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
+ PyObject *globals) {
+ PyFrameObject *f;
+ PyThreadState *tstate = __Pyx_PyThreadState_Current;
+ PyObject **fastlocals;
+ Py_ssize_t i;
+ PyObject *result;
+ assert(globals != NULL);
+ /* XXX Perhaps we should create a specialized
+ PyFrame_New() that doesn't take locals, but does
+ take builtins without sanity checking them.
+ */
+ assert(tstate != NULL);
+ f = PyFrame_New(tstate, co, globals, NULL);
+ if (f == NULL) {
+ return NULL;
+ }
+ fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
+ for (i = 0; i < na; i++) {
+ Py_INCREF(*args);
+ fastlocals[i] = *args++;
+ }
+ result = PyEval_EvalFrameEx(f,0);
+ ++tstate->recursion_depth;
+ Py_DECREF(f);
+ --tstate->recursion_depth;
+ return result;
+}
+#if 1 || PY_VERSION_HEX < 0x030600B1
+static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
+ PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
+ PyObject *globals = PyFunction_GET_GLOBALS(func);
+ PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
+ PyObject *closure;
+#if PY_MAJOR_VERSION >= 3
+ PyObject *kwdefs;
+#endif
+ PyObject *kwtuple, **k;
+ PyObject **d;
+ Py_ssize_t nd;
+ Py_ssize_t nk;
+ PyObject *result;
+ assert(kwargs == NULL || PyDict_Check(kwargs));
+ nk = kwargs ? PyDict_Size(kwargs) : 0;
+ if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
+ return NULL;
+ }
+ if (
+#if PY_MAJOR_VERSION >= 3
+ co->co_kwonlyargcount == 0 &&
+#endif
+ likely(kwargs == NULL || nk == 0) &&
+ co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
+ if (argdefs == NULL && co->co_argcount == nargs) {
+ result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
+ goto done;
+ }
+ else if (nargs == 0 && argdefs != NULL
+ && co->co_argcount == Py_SIZE(argdefs)) {
+ /* function called with no arguments, but all parameters have
+ a default value: use default values as arguments .*/
+ args = &PyTuple_GET_ITEM(argdefs, 0);
+ result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
+ goto done;
+ }
+ }
+ if (kwargs != NULL) {
+ Py_ssize_t pos, i;
+ kwtuple = PyTuple_New(2 * nk);
+ if (kwtuple == NULL) {
+ result = NULL;
+ goto done;
+ }
+ k = &PyTuple_GET_ITEM(kwtuple, 0);
+ pos = i = 0;
+ while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
+ Py_INCREF(k[i]);
+ Py_INCREF(k[i+1]);
+ i += 2;
+ }
+ nk = i / 2;
+ }
+ else {
+ kwtuple = NULL;
+ k = NULL;
+ }
+ closure = PyFunction_GET_CLOSURE(func);
+#if PY_MAJOR_VERSION >= 3
+ kwdefs = PyFunction_GET_KW_DEFAULTS(func);
+#endif
+ if (argdefs != NULL) {
+ d = &PyTuple_GET_ITEM(argdefs, 0);
+ nd = Py_SIZE(argdefs);
+ }
+ else {
+ d = NULL;
+ nd = 0;
+ }
+#if PY_MAJOR_VERSION >= 3
+ result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
+ args, (int)nargs,
+ k, (int)nk,
+ d, (int)nd, kwdefs, closure);
+#else
+ result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
+ args, (int)nargs,
+ k, (int)nk,
+ d, (int)nd, closure);
+#endif
+ Py_XDECREF(kwtuple);
+done:
+ Py_LeaveRecursiveCall();
+ return result;
+}
+#endif
+#endif
+
+/* PyObjectCall */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
+ PyObject *result;
+ ternaryfunc call = func->ob_type->tp_call;
+ if (unlikely(!call))
+ return PyObject_Call(func, arg, kw);
+ if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
+ return NULL;
+ result = (*call)(func, arg, kw);
+ Py_LeaveRecursiveCall();
+ if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
+ PyErr_SetString(
+ PyExc_SystemError,
+ "NULL result without error in PyObject_Call");
+ }
+ return result;
+}
+#endif
+
+/* PyObjectCallMethO */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
+ PyObject *self, *result;
+ PyCFunction cfunc;
+ cfunc = PyCFunction_GET_FUNCTION(func);
+ self = PyCFunction_GET_SELF(func);
+ if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
+ return NULL;
+ result = cfunc(self, arg);
+ Py_LeaveRecursiveCall();
+ if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
+ PyErr_SetString(
+ PyExc_SystemError,
+ "NULL result without error in PyObject_Call");
+ }
+ return result;
+}
+#endif
+
+/* PyObjectCallOneArg */
+#if CYTHON_COMPILING_IN_CPYTHON
+static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+ PyObject *result;
+ PyObject *args = PyTuple_New(1);
+ if (unlikely(!args)) return NULL;
+ Py_INCREF(arg);
+ PyTuple_SET_ITEM(args, 0, arg);
+ result = __Pyx_PyObject_Call(func, args, NULL);
+ Py_DECREF(args);
+ return result;
+}
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+#if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(func)) {
+ return __Pyx_PyFunction_FastCall(func, &arg, 1);
+ }
+#endif
+ if (likely(PyCFunction_Check(func))) {
+ if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
+ return __Pyx_PyObject_CallMethO(func, arg);
+#if CYTHON_FAST_PYCCALL
+ } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
+ return __Pyx_PyCFunction_FastCall(func, &arg, 1);
+#endif
+ }
+ }
+ return __Pyx__PyObject_CallOneArg(func, arg);
+}
+#else
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+ PyObject *result;
+ PyObject *args = PyTuple_Pack(1, arg);
+ if (unlikely(!args)) return NULL;
+ result = __Pyx_PyObject_Call(func, args, NULL);
+ Py_DECREF(args);
+ return result;
+}
+#endif
+
+/* TypeImport */
+#ifndef __PYX_HAVE_RT_ImportType
+#define __PYX_HAVE_RT_ImportType
+static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name,
+ size_t size, enum __Pyx_ImportType_CheckSize check_size)
+{
+ PyObject *result = 0;
+ char warning[200];
+ Py_ssize_t basicsize;
+#ifdef Py_LIMITED_API
+ PyObject *py_basicsize;
+#endif
+ result = PyObject_GetAttrString(module, class_name);
+ if (!result)
+ goto bad;
+ if (!PyType_Check(result)) {
+ PyErr_Format(PyExc_TypeError,
+ "%.200s.%.200s is not a type object",
+ module_name, class_name);
+ goto bad;
+ }
+#ifndef Py_LIMITED_API
+ basicsize = ((PyTypeObject *)result)->tp_basicsize;
+#else
+ py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
+ if (!py_basicsize)
+ goto bad;
+ basicsize = PyLong_AsSsize_t(py_basicsize);
+ Py_DECREF(py_basicsize);
+ py_basicsize = 0;
+ if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
+ goto bad;
+#endif
+ if ((size_t)basicsize < size) {
+ PyErr_Format(PyExc_ValueError,
+ "%.200s.%.200s size changed, may indicate binary incompatibility. "
+ "Expected %zd from C header, got %zd from PyObject",
+ module_name, class_name, size, basicsize);
+ goto bad;
+ }
+ if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) {
+ PyErr_Format(PyExc_ValueError,
+ "%.200s.%.200s size changed, may indicate binary incompatibility. "
+ "Expected %zd from C header, got %zd from PyObject",
+ module_name, class_name, size, basicsize);
+ goto bad;
+ }
+ else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) {
+ PyOS_snprintf(warning, sizeof(warning),
+ "%s.%s size changed, may indicate binary incompatibility. "
+ "Expected %zd from C header, got %zd from PyObject",
+ module_name, class_name, size, basicsize);
+ if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
+ }
+ return (PyTypeObject *)result;
+bad:
+ Py_XDECREF(result);
+ return NULL;
+}
+#endif
+
+/* PyDictVersioning */
+#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
+ PyObject *dict = Py_TYPE(obj)->tp_dict;
+ return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
+}
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
+ PyObject **dictptr = NULL;
+ Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
+ if (offset) {
+#if CYTHON_COMPILING_IN_CPYTHON
+ dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
+#else
+ dictptr = _PyObject_GetDictPtr(obj);
+#endif
+ }
+ return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
+}
+static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
+ PyObject *dict = Py_TYPE(obj)->tp_dict;
+ if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
+ return 0;
+ return obj_dict_version == __Pyx_get_object_dict_version(obj);
+}
+#endif
+
+/* PyErrFetchRestore */
+#if CYTHON_FAST_THREAD_STATE
+static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ tmp_type = tstate->curexc_type;
+ tmp_value = tstate->curexc_value;
+ tmp_tb = tstate->curexc_traceback;
+ tstate->curexc_type = type;
+ tstate->curexc_value = value;
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+}
+static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
+ *type = tstate->curexc_type;
+ *value = tstate->curexc_value;
+ *tb = tstate->curexc_traceback;
+ tstate->curexc_type = 0;
+ tstate->curexc_value = 0;
+ tstate->curexc_traceback = 0;
+}
+#endif
+
+/* CLineInTraceback */
+#ifndef CYTHON_CLINE_IN_TRACEBACK
+static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
+ PyObject *use_cline;
+ PyObject *ptype, *pvalue, *ptraceback;
+#if CYTHON_COMPILING_IN_CPYTHON
+ PyObject **cython_runtime_dict;
+#endif
+ if (unlikely(!__pyx_cython_runtime)) {
+ return c_line;
+ }
+ __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
+#if CYTHON_COMPILING_IN_CPYTHON
+ cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
+ if (likely(cython_runtime_dict)) {
+ __PYX_PY_DICT_LOOKUP_IF_MODIFIED(
+ use_cline, *cython_runtime_dict,
+ __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
+ } else
+#endif
+ {
+ PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
+ if (use_cline_obj) {
+ use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
+ Py_DECREF(use_cline_obj);
+ } else {
+ PyErr_Clear();
+ use_cline = NULL;
+ }
+ }
+ if (!use_cline) {
+ c_line = 0;
+ PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
+ }
+ else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
+ c_line = 0;
+ }
+ __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
+ return c_line;
+}
+#endif
+
+/* CodeObjectCache */
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
+ int start = 0, mid = 0, end = count - 1;
+ if (end >= 0 && code_line > entries[end].code_line) {
+ return count;
+ }
+ while (start < end) {
+ mid = start + (end - start) / 2;
+ if (code_line < entries[mid].code_line) {
+ end = mid;
+ } else if (code_line > entries[mid].code_line) {
+ start = mid + 1;
+ } else {
+ return mid;
+ }
+ }
+ if (code_line <= entries[mid].code_line) {
+ return mid;
+ } else {
+ return mid + 1;
+ }
+}
+static PyCodeObject *__pyx_find_code_object(int code_line) {
+ PyCodeObject* code_object;
+ int pos;
+ if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
+ return NULL;
+ }
+ pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
+ if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
+ return NULL;
+ }
+ code_object = __pyx_code_cache.entries[pos].code_object;
+ Py_INCREF(code_object);
+ return code_object;
+}
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
+ int pos, i;
+ __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
+ if (unlikely(!code_line)) {
+ return;
+ }
+ if (unlikely(!entries)) {
+ entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
+ if (likely(entries)) {
+ __pyx_code_cache.entries = entries;
+ __pyx_code_cache.max_count = 64;
+ __pyx_code_cache.count = 1;
+ entries[0].code_line = code_line;
+ entries[0].code_object = code_object;
+ Py_INCREF(code_object);
+ }
+ return;
+ }
+ pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
+ if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
+ PyCodeObject* tmp = entries[pos].code_object;
+ entries[pos].code_object = code_object;
+ Py_DECREF(tmp);
+ return;
+ }
+ if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
+ int new_max = __pyx_code_cache.max_count + 64;
+ entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
+ __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
+ if (unlikely(!entries)) {
+ return;
+ }
+ __pyx_code_cache.entries = entries;
+ __pyx_code_cache.max_count = new_max;
+ }
+ for (i=__pyx_code_cache.count; i>pos; i--) {
+ entries[i] = entries[i-1];
+ }
+ entries[pos].code_line = code_line;
+ entries[pos].code_object = code_object;
+ __pyx_code_cache.count++;
+ Py_INCREF(code_object);
+}
+
+/* AddTraceback */
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
+ const char *funcname, int c_line,
+ int py_line, const char *filename) {
+ PyCodeObject *py_code = 0;
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ #if PY_MAJOR_VERSION < 3
+ py_srcfile = PyString_FromString(filename);
+ #else
+ py_srcfile = PyUnicode_FromString(filename);
+ #endif
+ if (!py_srcfile) goto bad;
+ if (c_line) {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
+ #else
+ py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
+ #endif
+ }
+ else {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromString(funcname);
+ #else
+ py_funcname = PyUnicode_FromString(funcname);
+ #endif
+ }
+ if (!py_funcname) goto bad;
+ py_code = __Pyx_PyCode_New(
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ __pyx_empty_bytes, /*PyObject *code,*/
+ __pyx_empty_tuple, /*PyObject *consts,*/
+ __pyx_empty_tuple, /*PyObject *names,*/
+ __pyx_empty_tuple, /*PyObject *varnames,*/
+ __pyx_empty_tuple, /*PyObject *freevars,*/
+ __pyx_empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ py_line,
+ __pyx_empty_bytes /*PyObject *lnotab*/
+ );
+ Py_DECREF(py_srcfile);
+ Py_DECREF(py_funcname);
+ return py_code;
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ return NULL;
+}
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+ int py_line, const char *filename) {
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+ PyThreadState *tstate = __Pyx_PyThreadState_Current;
+ if (c_line) {
+ c_line = __Pyx_CLineForTraceback(tstate, c_line);
+ }
+ py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
+ if (!py_code) {
+ py_code = __Pyx_CreateCodeObjectForTraceback(
+ funcname, c_line, py_line, filename);
+ if (!py_code) goto bad;
+ __pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
+ }
+ py_frame = PyFrame_New(
+ tstate, /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ __pyx_d, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ __Pyx_PyFrame_SetLineNumber(py_frame, py_line);
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
+
+/* CIntToPy */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
+ const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (is_unsigned) {
+ if (sizeof(long) < sizeof(long)) {
+ return PyInt_FromLong((long) value);
+ } else if (sizeof(long) <= sizeof(unsigned long)) {
+ return PyLong_FromUnsignedLong((unsigned long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
+ return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
+#endif
+ }
+ } else {
+ if (sizeof(long) <= sizeof(long)) {
+ return PyInt_FromLong((long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
+ return PyLong_FromLongLong((PY_LONG_LONG) value);
+#endif
+ }
+ }
+ {
+ int one = 1; int little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&value;
+ return _PyLong_FromByteArray(bytes, sizeof(long),
+ little, !is_unsigned);
+ }
+}
+
+/* CIntFromPyVerify */
+#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
+ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
+#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
+ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
+#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
+ {\
+ func_type value = func_value;\
+ if (sizeof(target_type) < sizeof(func_type)) {\
+ if (unlikely(value != (func_type) (target_type) value)) {\
+ func_type zero = 0;\
+ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
+ return (target_type) -1;\
+ if (is_unsigned && unlikely(value < zero))\
+ goto raise_neg_overflow;\
+ else\
+ goto raise_overflow;\
+ }\
+ }\
+ return (target_type) value;\
+ }
+
+/* CIntFromPy */
+static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
+ const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x))) {
+ if (sizeof(long) < sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
+ } else {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ goto raise_neg_overflow;
+ }
+ return (long) val;
+ }
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (long) 0;
+ case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
+ case 2:
+ if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
+ return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
+ return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
+ return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+ }
+ }
+ break;
+ }
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (unlikely(Py_SIZE(x) < 0)) {
+ goto raise_neg_overflow;
+ }
+#else
+ {
+ int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+ if (unlikely(result < 0))
+ return (long) -1;
+ if (unlikely(result == 1))
+ goto raise_neg_overflow;
+ }
+#endif
+ if (sizeof(long) <= sizeof(unsigned long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+#endif
+ }
+ } else {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (long) 0;
+ case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
+ case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
+ case -2:
+ if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+ return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case 2:
+ if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+ return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case -3:
+ if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+ return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+ return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case -4:
+ if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
+ return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
+ return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ }
+#endif
+ if (sizeof(long) <= sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
+#endif
+ }
+ }
+ {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+ PyErr_SetString(PyExc_RuntimeError,
+ "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+ long val;
+ PyObject *v = __Pyx_PyNumber_IntOrLong(x);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(v) && !PyLong_Check(v)) {
+ PyObject *tmp = v;
+ v = PyNumber_Long(tmp);
+ Py_DECREF(tmp);
+ }
+ #endif
+ if (likely(v)) {
+ int one = 1; int is_little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&val;
+ int ret = _PyLong_AsByteArray((PyLongObject *)v,
+ bytes, sizeof(val),
+ is_little, !is_unsigned);
+ Py_DECREF(v);
+ if (likely(!ret))
+ return val;
+ }
+#endif
+ return (long) -1;
+ }
+ } else {
+ long val;
+ PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
+ if (!tmp) return (long) -1;
+ val = __Pyx_PyInt_As_long(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+raise_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to long");
+ return (long) -1;
+raise_neg_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to long");
+ return (long) -1;
+}
+
+/* CIntFromPy */
+static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
+ const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x))) {
+ if (sizeof(int) < sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
+ } else {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ goto raise_neg_overflow;
+ }
+ return (int) val;
+ }
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (int) 0;
+ case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
+ case 2:
+ if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
+ return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
+ return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
+ return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+ }
+ }
+ break;
+ }
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (unlikely(Py_SIZE(x) < 0)) {
+ goto raise_neg_overflow;
+ }
+#else
+ {
+ int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+ if (unlikely(result < 0))
+ return (int) -1;
+ if (unlikely(result == 1))
+ goto raise_neg_overflow;
+ }
+#endif
+ if (sizeof(int) <= sizeof(unsigned long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+#endif
+ }
+ } else {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (int) 0;
+ case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
+ case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
+ case -2:
+ if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+ return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case 2:
+ if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+ return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case -3:
+ if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+ return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+ return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case -4:
+ if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
+ return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
+ return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ }
+#endif
+ if (sizeof(int) <= sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
+#endif
+ }
+ }
+ {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+ PyErr_SetString(PyExc_RuntimeError,
+ "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+ int val;
+ PyObject *v = __Pyx_PyNumber_IntOrLong(x);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(v) && !PyLong_Check(v)) {
+ PyObject *tmp = v;
+ v = PyNumber_Long(tmp);
+ Py_DECREF(tmp);
+ }
+ #endif
+ if (likely(v)) {
+ int one = 1; int is_little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&val;
+ int ret = _PyLong_AsByteArray((PyLongObject *)v,
+ bytes, sizeof(val),
+ is_little, !is_unsigned);
+ Py_DECREF(v);
+ if (likely(!ret))
+ return val;
+ }
+#endif
+ return (int) -1;
+ }
+ } else {
+ int val;
+ PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
+ if (!tmp) return (int) -1;
+ val = __Pyx_PyInt_As_int(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+raise_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to int");
+ return (int) -1;
+raise_neg_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to int");
+ return (int) -1;
+}
+
+/* FastTypeChecks */
+#if CYTHON_COMPILING_IN_CPYTHON
+static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
+ while (a) {
+ a = a->tp_base;
+ if (a == b)
+ return 1;
+ }
+ return b == &PyBaseObject_Type;
+}
+static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
+ PyObject *mro;
+ if (a == b) return 1;
+ mro = a->tp_mro;
+ if (likely(mro)) {
+ Py_ssize_t i, n;
+ n = PyTuple_GET_SIZE(mro);
+ for (i = 0; i < n; i++) {
+ if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
+ return 1;
+ }
+ return 0;
+ }
+ return __Pyx_InBases(a, b);
+}
+#if PY_MAJOR_VERSION == 2
+static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
+ PyObject *exception, *value, *tb;
+ int res;
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ErrFetch(&exception, &value, &tb);
+ res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
+ if (unlikely(res == -1)) {
+ PyErr_WriteUnraisable(err);
+ res = 0;
+ }
+ if (!res) {
+ res = PyObject_IsSubclass(err, exc_type2);
+ if (unlikely(res == -1)) {
+ PyErr_WriteUnraisable(err);
+ res = 0;
+ }
+ }
+ __Pyx_ErrRestore(exception, value, tb);
+ return res;
+}
+#else
+static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
+ int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
+ if (!res) {
+ res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
+ }
+ return res;
+}
+#endif
+static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
+ Py_ssize_t i, n;
+ assert(PyExceptionClass_Check(exc_type));
+ n = PyTuple_GET_SIZE(tuple);
+#if PY_MAJOR_VERSION >= 3
+ for (i=0; i<n; i++) {
+ if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
+ }
+#endif
+ for (i=0; i<n; i++) {
+ PyObject *t = PyTuple_GET_ITEM(tuple, i);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(exc_type == t)) return 1;
+ #endif
+ if (likely(PyExceptionClass_Check(t))) {
+ if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
+ } else {
+ }
+ }
+ return 0;
+}
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
+ if (likely(err == exc_type)) return 1;
+ if (likely(PyExceptionClass_Check(err))) {
+ if (likely(PyExceptionClass_Check(exc_type))) {
+ return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
+ } else if (likely(PyTuple_Check(exc_type))) {
+ return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
+ } else {
+ }
+ }
+ return PyErr_GivenExceptionMatches(err, exc_type);
+}
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
+ assert(PyExceptionClass_Check(exc_type1));
+ assert(PyExceptionClass_Check(exc_type2));
+ if (likely(err == exc_type1 || err == exc_type2)) return 1;
+ if (likely(PyExceptionClass_Check(err))) {
+ return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
+ }
+ return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
+}
+#endif
+
+/* CheckBinaryVersion */
+static int __Pyx_check_binary_version(void) {
+ char ctversion[4], rtversion[4];
+ PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
+ PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
+ if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
+ char message[200];
+ PyOS_snprintf(message, sizeof(message),
+ "compiletime version %s of module '%.100s' "
+ "does not match runtime version %s",
+ ctversion, __Pyx_MODULE_NAME, rtversion);
+ return PyErr_WarnEx(NULL, message, 1);
+ }
+ return 0;
+}
+
+/* InitStrings */
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ #if PY_MAJOR_VERSION < 3
+ if (t->is_unicode) {
+ *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
+ } else if (t->intern) {
+ *t->p = PyString_InternFromString(t->s);
+ } else {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ }
+ #else
+ if (t->is_unicode | t->is_str) {
+ if (t->intern) {
+ *t->p = PyUnicode_InternFromString(t->s);
+ } else if (t->encoding) {
+ *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
+ } else {
+ *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
+ }
+ } else {
+ *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
+ }
+ #endif
+ if (!*t->p)
+ return -1;
+ if (PyObject_Hash(*t->p) == -1)
+ return -1;
+ ++t;
+ }
+ return 0;
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
+ return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
+}
+static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
+ Py_ssize_t ignore;
+ return __Pyx_PyObject_AsStringAndSize(o, &ignore);
+}
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+#if !CYTHON_PEP393_ENABLED
+static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+ char* defenc_c;
+ PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
+ if (!defenc) return NULL;
+ defenc_c = PyBytes_AS_STRING(defenc);
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ {
+ char* end = defenc_c + PyBytes_GET_SIZE(defenc);
+ char* c;
+ for (c = defenc_c; c < end; c++) {
+ if ((unsigned char) (*c) >= 128) {
+ PyUnicode_AsASCIIString(o);
+ return NULL;
+ }
+ }
+ }
+#endif
+ *length = PyBytes_GET_SIZE(defenc);
+ return defenc_c;
+}
+#else
+static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+ if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ if (likely(PyUnicode_IS_ASCII(o))) {
+ *length = PyUnicode_GET_LENGTH(o);
+ return PyUnicode_AsUTF8(o);
+ } else {
+ PyUnicode_AsASCIIString(o);
+ return NULL;
+ }
+#else
+ return PyUnicode_AsUTF8AndSize(o, length);
+#endif
+}
+#endif
+#endif
+static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+ if (
+#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ __Pyx_sys_getdefaultencoding_not_ascii &&
+#endif
+ PyUnicode_Check(o)) {
+ return __Pyx_PyUnicode_AsStringAndSize(o, length);
+ } else
+#endif
+#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
+ if (PyByteArray_Check(o)) {
+ *length = PyByteArray_GET_SIZE(o);
+ return PyByteArray_AS_STRING(o);
+ } else
+#endif
+ {
+ char* result;
+ int r = PyBytes_AsStringAndSize(o, &result, length);
+ if (unlikely(r < 0)) {
+ return NULL;
+ } else {
+ return result;
+ }
+ }
+}
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
+ int is_true = x == Py_True;
+ if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
+ else return PyObject_IsTrue(x);
+}
+static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
+ int retval;
+ if (unlikely(!x)) return -1;
+ retval = __Pyx_PyObject_IsTrue(x);
+ Py_DECREF(x);
+ return retval;
+}
+static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
+#if PY_MAJOR_VERSION >= 3
+ if (PyLong_Check(result)) {
+ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
+ "__int__ returned non-int (type %.200s). "
+ "The ability to return an instance of a strict subclass of int "
+ "is deprecated, and may be removed in a future version of Python.",
+ Py_TYPE(result)->tp_name)) {
+ Py_DECREF(result);
+ return NULL;
+ }
+ return result;
+ }
+#endif
+ PyErr_Format(PyExc_TypeError,
+ "__%.4s__ returned non-%.4s (type %.200s)",
+ type_name, type_name, Py_TYPE(result)->tp_name);
+ Py_DECREF(result);
+ return NULL;
+}
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
+#if CYTHON_USE_TYPE_SLOTS
+ PyNumberMethods *m;
+#endif
+ const char *name = NULL;
+ PyObject *res = NULL;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x) || PyLong_Check(x)))
+#else
+ if (likely(PyLong_Check(x)))
+#endif
+ return __Pyx_NewRef(x);
+#if CYTHON_USE_TYPE_SLOTS
+ m = Py_TYPE(x)->tp_as_number;
+ #if PY_MAJOR_VERSION < 3
+ if (m && m->nb_int) {
+ name = "int";
+ res = m->nb_int(x);
+ }
+ else if (m && m->nb_long) {
+ name = "long";
+ res = m->nb_long(x);
+ }
+ #else
+ if (likely(m && m->nb_int)) {
+ name = "int";
+ res = m->nb_int(x);
+ }
+ #endif
+#else
+ if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
+ res = PyNumber_Int(x);
+ }
+#endif
+ if (likely(res)) {
+#if PY_MAJOR_VERSION < 3
+ if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
+#else
+ if (unlikely(!PyLong_CheckExact(res))) {
+#endif
+ return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
+ }
+ }
+ else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_TypeError,
+ "an integer is required");
+ }
+ return res;
+}
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
+ Py_ssize_t ival;
+ PyObject *x;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_CheckExact(b))) {
+ if (sizeof(Py_ssize_t) >= sizeof(long))
+ return PyInt_AS_LONG(b);
+ else
+ return PyInt_AsSsize_t(b);
+ }
+#endif
+ if (likely(PyLong_CheckExact(b))) {
+ #if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)b)->ob_digit;
+ const Py_ssize_t size = Py_SIZE(b);
+ if (likely(__Pyx_sst_abs(size) <= 1)) {
+ ival = likely(size) ? digits[0] : 0;
+ if (size == -1) ival = -ival;
+ return ival;
+ } else {
+ switch (size) {
+ case 2:
+ if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
+ return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case -2:
+ if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
+ return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case 3:
+ if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
+ return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case -3:
+ if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
+ return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case 4:
+ if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
+ return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case -4:
+ if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
+ return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ }
+ }
+ #endif
+ return PyLong_AsSsize_t(b);
+ }
+ x = PyNumber_Index(b);
+ if (!x) return -1;
+ ival = PyInt_AsSsize_t(x);
+ Py_DECREF(x);
+ return ival;
+}
+static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
+ return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
+}
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
+ return PyInt_FromSize_t(ival);
+}
+
+
+#endif /* Py_PYTHON_H */
diff --git a/third_party/python/aiohttp/aiohttp/_websocket.pyx b/third_party/python/aiohttp/aiohttp/_websocket.pyx
new file mode 100644
index 0000000000..94318d2b1b
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/_websocket.pyx
@@ -0,0 +1,56 @@
+from cpython cimport PyBytes_AsString
+
+
+#from cpython cimport PyByteArray_AsString # cython still not exports that
+cdef extern from "Python.h":
+ char* PyByteArray_AsString(bytearray ba) except NULL
+
+from libc.stdint cimport uint32_t, uint64_t, uintmax_t
+
+
+def _websocket_mask_cython(object mask, object data):
+ """Note, this function mutates its `data` argument
+ """
+ cdef:
+ Py_ssize_t data_len, i
+ # bit operations on signed integers are implementation-specific
+ unsigned char * in_buf
+ const unsigned char * mask_buf
+ uint32_t uint32_msk
+ uint64_t uint64_msk
+
+ assert len(mask) == 4
+
+ if not isinstance(mask, bytes):
+ mask = bytes(mask)
+
+ if isinstance(data, bytearray):
+ data = <bytearray>data
+ else:
+ data = bytearray(data)
+
+ data_len = len(data)
+ in_buf = <unsigned char*>PyByteArray_AsString(data)
+ mask_buf = <const unsigned char*>PyBytes_AsString(mask)
+ uint32_msk = (<uint32_t*>mask_buf)[0]
+
+ # TODO: align in_data ptr to achieve even faster speeds
+ # does it need in python ?! malloc() always aligns to sizeof(long) bytes
+
+ if sizeof(size_t) >= 8:
+ uint64_msk = uint32_msk
+ uint64_msk = (uint64_msk << 32) | uint32_msk
+
+ while data_len >= 8:
+ (<uint64_t*>in_buf)[0] ^= uint64_msk
+ in_buf += 8
+ data_len -= 8
+
+
+ while data_len >= 4:
+ (<uint32_t*>in_buf)[0] ^= uint32_msk
+ in_buf += 4
+ data_len -= 4
+
+ for i in range(0, data_len):
+ in_buf[i] ^= mask_buf[i]
diff --git a/third_party/python/aiohttp/aiohttp/abc.py b/third_party/python/aiohttp/aiohttp/abc.py
new file mode 100644
index 0000000000..4abfd798d7
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/abc.py
@@ -0,0 +1,200 @@
+import asyncio
+import logging
+from abc import ABC, abstractmethod
+from collections.abc import Sized
+from http.cookies import BaseCookie, Morsel
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Awaitable,
+ Callable,
+ Dict,
+ Generator,
+ Iterable,
+ List,
+ Optional,
+ Tuple,
+)
+
+from multidict import CIMultiDict
+from yarl import URL
+
+from .helpers import get_running_loop
+from .typedefs import LooseCookies
+
+if TYPE_CHECKING: # pragma: no cover
+ from .web_app import Application
+ from .web_exceptions import HTTPException
+ from .web_request import BaseRequest, Request
+ from .web_response import StreamResponse
+else:
+ BaseRequest = Request = Application = StreamResponse = None
+ HTTPException = None
+
+
+class AbstractRouter(ABC):
+ def __init__(self) -> None:
+ self._frozen = False
+
+ def post_init(self, app: Application) -> None:
+ """Post init stage.
+
+ Not an abstract method for sake of backward compatibility,
+ but if the router wants to be aware of the application
+ it can override this.
+ """
+
+ @property
+ def frozen(self) -> bool:
+ return self._frozen
+
+ def freeze(self) -> None:
+ """Freeze router."""
+ self._frozen = True
+
+ @abstractmethod
+ async def resolve(self, request: Request) -> "AbstractMatchInfo":
+ """Return MATCH_INFO for given request"""
+
+
+class AbstractMatchInfo(ABC):
+ @property # pragma: no branch
+ @abstractmethod
+ def handler(self) -> Callable[[Request], Awaitable[StreamResponse]]:
+ """Execute matched request handler"""
+
+ @property
+ @abstractmethod
+ def expect_handler(self) -> Callable[[Request], Awaitable[None]]:
+ """Expect handler for 100-continue processing"""
+
+ @property # pragma: no branch
+ @abstractmethod
+ def http_exception(self) -> Optional[HTTPException]:
+ """HTTPException instance raised on router's resolving, or None"""
+
+ @abstractmethod # pragma: no branch
+ def get_info(self) -> Dict[str, Any]:
+ """Return a dict with additional info useful for introspection"""
+
+ @property # pragma: no branch
+ @abstractmethod
+ def apps(self) -> Tuple[Application, ...]:
+ """Stack of nested applications.
+
+ Top level application is left-most element.
+
+ """
+
+ @abstractmethod
+ def add_app(self, app: Application) -> None:
+ """Add application to the nested apps stack."""
+
+ @abstractmethod
+ def freeze(self) -> None:
+ """Freeze the match info.
+
+ The method is called after route resolution.
+
+ After the call .add_app() is forbidden.
+
+ """
+
+
+class AbstractView(ABC):
+ """Abstract class based view."""
+
+ def __init__(self, request: Request) -> None:
+ self._request = request
+
+ @property
+ def request(self) -> Request:
+ """Request instance."""
+ return self._request
+
+ @abstractmethod
+ def __await__(self) -> Generator[Any, None, StreamResponse]:
+ """Execute the view handler."""
+
+
+class AbstractResolver(ABC):
+ """Abstract DNS resolver."""
+
+ @abstractmethod
+ async def resolve(self, host: str, port: int, family: int) -> List[Dict[str, Any]]:
+ """Return IP address for given hostname"""
+
+ @abstractmethod
+ async def close(self) -> None:
+ """Release resolver"""
+
+
+if TYPE_CHECKING: # pragma: no cover
+ IterableBase = Iterable[Morsel[str]]
+else:
+ IterableBase = Iterable
+
+
+class AbstractCookieJar(Sized, IterableBase):
+ """Abstract Cookie Jar."""
+
+ def __init__(self, *, loop: Optional[asyncio.AbstractEventLoop] = None) -> None:
+ self._loop = get_running_loop(loop)
+
+ @abstractmethod
+ def clear(self) -> None:
+ """Clear all cookies."""
+
+ @abstractmethod
+ def update_cookies(self, cookies: LooseCookies, response_url: URL = URL()) -> None:
+ """Update cookies."""
+
+ @abstractmethod
+ def filter_cookies(self, request_url: URL) -> "BaseCookie[str]":
+ """Return the jar's cookies filtered by their attributes."""
+
+
+class AbstractStreamWriter(ABC):
+ """Abstract stream writer."""
+
+ buffer_size = 0
+ output_size = 0
+ length = 0 # type: Optional[int]
+
+ @abstractmethod
+ async def write(self, chunk: bytes) -> None:
+ """Write chunk into stream."""
+
+ @abstractmethod
+ async def write_eof(self, chunk: bytes = b"") -> None:
+ """Write last chunk."""
+
+ @abstractmethod
+ async def drain(self) -> None:
+ """Flush the write buffer."""
+
+ @abstractmethod
+ def enable_compression(self, encoding: str = "deflate") -> None:
+ """Enable HTTP body compression"""
+
+ @abstractmethod
+ def enable_chunking(self) -> None:
+ """Enable HTTP chunked mode"""
+
+ @abstractmethod
+ async def write_headers(
+ self, status_line: str, headers: "CIMultiDict[str]"
+ ) -> None:
+ """Write HTTP headers"""
+
+
+class AbstractAccessLogger(ABC):
+ """Abstract writer to access log."""
+
+ def __init__(self, logger: logging.Logger, log_format: str) -> None:
+ self.logger = logger
+ self.log_format = log_format
+
+ @abstractmethod
+ def log(self, request: BaseRequest, response: StreamResponse, time: float) -> None:
+ """Emit log to logger."""
diff --git a/third_party/python/aiohttp/aiohttp/base_protocol.py b/third_party/python/aiohttp/aiohttp/base_protocol.py
new file mode 100644
index 0000000000..01e18310b4
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/base_protocol.py
@@ -0,0 +1,87 @@
+import asyncio
+from typing import Optional, cast
+
+from .tcp_helpers import tcp_nodelay
+
+
+class BaseProtocol(asyncio.Protocol):
+ __slots__ = (
+ "_loop",
+ "_paused",
+ "_drain_waiter",
+ "_connection_lost",
+ "_reading_paused",
+ "transport",
+ )
+
+ def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
+ self._loop = loop # type: asyncio.AbstractEventLoop
+ self._paused = False
+ self._drain_waiter = None # type: Optional[asyncio.Future[None]]
+ self._connection_lost = False
+ self._reading_paused = False
+
+ self.transport = None # type: Optional[asyncio.Transport]
+
+ def pause_writing(self) -> None:
+ assert not self._paused
+ self._paused = True
+
+ def resume_writing(self) -> None:
+ assert self._paused
+ self._paused = False
+
+ waiter = self._drain_waiter
+ if waiter is not None:
+ self._drain_waiter = None
+ if not waiter.done():
+ waiter.set_result(None)
+
+ def pause_reading(self) -> None:
+ if not self._reading_paused and self.transport is not None:
+ try:
+ self.transport.pause_reading()
+ except (AttributeError, NotImplementedError, RuntimeError):
+ pass
+ self._reading_paused = True
+
+ def resume_reading(self) -> None:
+ if self._reading_paused and self.transport is not None:
+ try:
+ self.transport.resume_reading()
+ except (AttributeError, NotImplementedError, RuntimeError):
+ pass
+ self._reading_paused = False
+
+ def connection_made(self, transport: asyncio.BaseTransport) -> None:
+ tr = cast(asyncio.Transport, transport)
+ tcp_nodelay(tr, True)
+ self.transport = tr
+
+ def connection_lost(self, exc: Optional[BaseException]) -> None:
+ self._connection_lost = True
+ # Wake up the writer if currently paused.
+ self.transport = None
+ if not self._paused:
+ return
+ waiter = self._drain_waiter
+ if waiter is None:
+ return
+ self._drain_waiter = None
+ if waiter.done():
+ return
+ if exc is None:
+ waiter.set_result(None)
+ else:
+ waiter.set_exception(exc)
+
+ async def _drain_helper(self) -> None:
+ if self._connection_lost:
+ raise ConnectionResetError("Connection lost")
+ if not self._paused:
+ return
+ waiter = self._drain_waiter
+ assert waiter is None or waiter.cancelled()
+ waiter = self._loop.create_future()
+ self._drain_waiter = waiter
+ await waiter
diff --git a/third_party/python/aiohttp/aiohttp/client.py b/third_party/python/aiohttp/aiohttp/client.py
new file mode 100644
index 0000000000..a9da8e155d
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/client.py
@@ -0,0 +1,1275 @@
+"""HTTP Client for asyncio."""
+
+import asyncio
+import base64
+import hashlib
+import json
+import os
+import sys
+import traceback
+import warnings
+from types import SimpleNamespace, TracebackType
+from typing import (
+ Any,
+ Awaitable,
+ Callable,
+ Coroutine,
+ FrozenSet,
+ Generator,
+ Generic,
+ Iterable,
+ List,
+ Mapping,
+ Optional,
+ Set,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+)
+
+import attr
+from multidict import CIMultiDict, MultiDict, MultiDictProxy, istr
+from yarl import URL
+
+from . import hdrs, http, payload
+from .abc import AbstractCookieJar
+from .client_exceptions import (
+ ClientConnectionError as ClientConnectionError,
+ ClientConnectorCertificateError as ClientConnectorCertificateError,
+ ClientConnectorError as ClientConnectorError,
+ ClientConnectorSSLError as ClientConnectorSSLError,
+ ClientError as ClientError,
+ ClientHttpProxyError as ClientHttpProxyError,
+ ClientOSError as ClientOSError,
+ ClientPayloadError as ClientPayloadError,
+ ClientProxyConnectionError as ClientProxyConnectionError,
+ ClientResponseError as ClientResponseError,
+ ClientSSLError as ClientSSLError,
+ ContentTypeError as ContentTypeError,
+ InvalidURL as InvalidURL,
+ ServerConnectionError as ServerConnectionError,
+ ServerDisconnectedError as ServerDisconnectedError,
+ ServerFingerprintMismatch as ServerFingerprintMismatch,
+ ServerTimeoutError as ServerTimeoutError,
+ TooManyRedirects as TooManyRedirects,
+ WSServerHandshakeError as WSServerHandshakeError,
+)
+from .client_reqrep import (
+ ClientRequest as ClientRequest,
+ ClientResponse as ClientResponse,
+ Fingerprint as Fingerprint,
+ RequestInfo as RequestInfo,
+ _merge_ssl_params,
+)
+from .client_ws import ClientWebSocketResponse as ClientWebSocketResponse
+from .connector import (
+ BaseConnector as BaseConnector,
+ NamedPipeConnector as NamedPipeConnector,
+ TCPConnector as TCPConnector,
+ UnixConnector as UnixConnector,
+)
+from .cookiejar import CookieJar
+from .helpers import (
+ DEBUG,
+ PY_36,
+ BasicAuth,
+ CeilTimeout,
+ TimeoutHandle,
+ get_running_loop,
+ proxies_from_env,
+ sentinel,
+ strip_auth_from_url,
+)
+from .http import WS_KEY, HttpVersion, WebSocketReader, WebSocketWriter
+from .http_websocket import WSHandshakeError, WSMessage, ws_ext_gen, ws_ext_parse
+from .streams import FlowControlDataQueue
+from .tracing import Trace, TraceConfig
+from .typedefs import JSONEncoder, LooseCookies, LooseHeaders, StrOrURL
+
+__all__ = (
+ # client_exceptions
+ "ClientConnectionError",
+ "ClientConnectorCertificateError",
+ "ClientConnectorError",
+ "ClientConnectorSSLError",
+ "ClientError",
+ "ClientHttpProxyError",
+ "ClientOSError",
+ "ClientPayloadError",
+ "ClientProxyConnectionError",
+ "ClientResponseError",
+ "ClientSSLError",
+ "ContentTypeError",
+ "InvalidURL",
+ "ServerConnectionError",
+ "ServerDisconnectedError",
+ "ServerFingerprintMismatch",
+ "ServerTimeoutError",
+ "TooManyRedirects",
+ "WSServerHandshakeError",
+ # client_reqrep
+ "ClientRequest",
+ "ClientResponse",
+ "Fingerprint",
+ "RequestInfo",
+ # connector
+ "BaseConnector",
+ "TCPConnector",
+ "UnixConnector",
+ "NamedPipeConnector",
+ # client_ws
+ "ClientWebSocketResponse",
+ # client
+ "ClientSession",
+ "ClientTimeout",
+ "request",
+)
+
+
+try:
+ from ssl import SSLContext
+except ImportError: # pragma: no cover
+ SSLContext = object # type: ignore
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class ClientTimeout:
+ total: Optional[float] = None
+ connect: Optional[float] = None
+ sock_read: Optional[float] = None
+ sock_connect: Optional[float] = None
+
+ # pool_queue_timeout: Optional[float] = None
+ # dns_resolution_timeout: Optional[float] = None
+ # socket_connect_timeout: Optional[float] = None
+ # connection_acquiring_timeout: Optional[float] = None
+ # new_connection_timeout: Optional[float] = None
+ # http_header_timeout: Optional[float] = None
+ # response_body_timeout: Optional[float] = None
+
+ # to create a timeout specific for a single request, either
+ # - create a completely new one to overwrite the default
+ # - or use http://www.attrs.org/en/stable/api.html#attr.evolve
+ # to overwrite the defaults
+
+
+# 5 Minute default read timeout
+DEFAULT_TIMEOUT = ClientTimeout(total=5 * 60)
+
+_RetType = TypeVar("_RetType")
+
+
+class ClientSession:
+ """First-class interface for making HTTP requests."""
+
+ ATTRS = frozenset(
+ [
+ "_source_traceback",
+ "_connector",
+ "requote_redirect_url",
+ "_loop",
+ "_cookie_jar",
+ "_connector_owner",
+ "_default_auth",
+ "_version",
+ "_json_serialize",
+ "_requote_redirect_url",
+ "_timeout",
+ "_raise_for_status",
+ "_auto_decompress",
+ "_trust_env",
+ "_default_headers",
+ "_skip_auto_headers",
+ "_request_class",
+ "_response_class",
+ "_ws_response_class",
+ "_trace_configs",
+ "_read_bufsize",
+ ]
+ )
+
+ _source_traceback = None
+
+ def __init__(
+ self,
+ *,
+ connector: Optional[BaseConnector] = None,
+ loop: Optional[asyncio.AbstractEventLoop] = None,
+ cookies: Optional[LooseCookies] = None,
+ headers: Optional[LooseHeaders] = None,
+ skip_auto_headers: Optional[Iterable[str]] = None,
+ auth: Optional[BasicAuth] = None,
+ json_serialize: JSONEncoder = json.dumps,
+ request_class: Type[ClientRequest] = ClientRequest,
+ response_class: Type[ClientResponse] = ClientResponse,
+ ws_response_class: Type[ClientWebSocketResponse] = ClientWebSocketResponse,
+ version: HttpVersion = http.HttpVersion11,
+ cookie_jar: Optional[AbstractCookieJar] = None,
+ connector_owner: bool = True,
+ raise_for_status: bool = False,
+ read_timeout: Union[float, object] = sentinel,
+ conn_timeout: Optional[float] = None,
+ timeout: Union[object, ClientTimeout] = sentinel,
+ auto_decompress: bool = True,
+ trust_env: bool = False,
+ requote_redirect_url: bool = True,
+ trace_configs: Optional[List[TraceConfig]] = None,
+ read_bufsize: int = 2 ** 16,
+ ) -> None:
+
+ if loop is None:
+ if connector is not None:
+ loop = connector._loop
+
+ loop = get_running_loop(loop)
+
+ if connector is None:
+ connector = TCPConnector(loop=loop)
+
+ if connector._loop is not loop:
+ raise RuntimeError("Session and connector has to use same event loop")
+
+ self._loop = loop
+
+ if loop.get_debug():
+ self._source_traceback = traceback.extract_stack(sys._getframe(1))
+
+ if cookie_jar is None:
+ cookie_jar = CookieJar(loop=loop)
+ self._cookie_jar = cookie_jar
+
+ if cookies is not None:
+ self._cookie_jar.update_cookies(cookies)
+
+ self._connector = connector # type: Optional[BaseConnector]
+ self._connector_owner = connector_owner
+ self._default_auth = auth
+ self._version = version
+ self._json_serialize = json_serialize
+ if timeout is sentinel:
+ self._timeout = DEFAULT_TIMEOUT
+ if read_timeout is not sentinel:
+ warnings.warn(
+ "read_timeout is deprecated, " "use timeout argument instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ self._timeout = attr.evolve(self._timeout, total=read_timeout)
+ if conn_timeout is not None:
+ self._timeout = attr.evolve(self._timeout, connect=conn_timeout)
+ warnings.warn(
+ "conn_timeout is deprecated, " "use timeout argument instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ else:
+ self._timeout = timeout # type: ignore
+ if read_timeout is not sentinel:
+ raise ValueError(
+ "read_timeout and timeout parameters "
+ "conflict, please setup "
+ "timeout.read"
+ )
+ if conn_timeout is not None:
+ raise ValueError(
+ "conn_timeout and timeout parameters "
+ "conflict, please setup "
+ "timeout.connect"
+ )
+ self._raise_for_status = raise_for_status
+ self._auto_decompress = auto_decompress
+ self._trust_env = trust_env
+ self._requote_redirect_url = requote_redirect_url
+ self._read_bufsize = read_bufsize
+
+ # Convert to list of tuples
+ if headers:
+ real_headers = CIMultiDict(headers) # type: CIMultiDict[str]
+ else:
+ real_headers = CIMultiDict()
+ self._default_headers = real_headers # type: CIMultiDict[str]
+ if skip_auto_headers is not None:
+ self._skip_auto_headers = frozenset([istr(i) for i in skip_auto_headers])
+ else:
+ self._skip_auto_headers = frozenset()
+
+ self._request_class = request_class
+ self._response_class = response_class
+ self._ws_response_class = ws_response_class
+
+ self._trace_configs = trace_configs or []
+ for trace_config in self._trace_configs:
+ trace_config.freeze()
+
+ def __init_subclass__(cls: Type["ClientSession"]) -> None:
+ warnings.warn(
+ "Inheritance class {} from ClientSession "
+ "is discouraged".format(cls.__name__),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ if DEBUG:
+
+ def __setattr__(self, name: str, val: Any) -> None:
+ if name not in self.ATTRS:
+ warnings.warn(
+ "Setting custom ClientSession.{} attribute "
+ "is discouraged".format(name),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ super().__setattr__(name, val)
+
+ def __del__(self, _warnings: Any = warnings) -> None:
+ if not self.closed:
+ if PY_36:
+ kwargs = {"source": self}
+ else:
+ kwargs = {}
+ _warnings.warn(
+ f"Unclosed client session {self!r}", ResourceWarning, **kwargs
+ )
+ context = {"client_session": self, "message": "Unclosed client session"}
+ if self._source_traceback is not None:
+ context["source_traceback"] = self._source_traceback
+ self._loop.call_exception_handler(context)
+
+ def request(
+ self, method: str, url: StrOrURL, **kwargs: Any
+ ) -> "_RequestContextManager":
+ """Perform HTTP request."""
+ return _RequestContextManager(self._request(method, url, **kwargs))
+
+ async def _request(
+ self,
+ method: str,
+ str_or_url: StrOrURL,
+ *,
+ params: Optional[Mapping[str, str]] = None,
+ data: Any = None,
+ json: Any = None,
+ cookies: Optional[LooseCookies] = None,
+ headers: Optional[LooseHeaders] = None,
+ skip_auto_headers: Optional[Iterable[str]] = None,
+ auth: Optional[BasicAuth] = None,
+ allow_redirects: bool = True,
+ max_redirects: int = 10,
+ compress: Optional[str] = None,
+ chunked: Optional[bool] = None,
+ expect100: bool = False,
+ raise_for_status: Optional[bool] = None,
+ read_until_eof: bool = True,
+ proxy: Optional[StrOrURL] = None,
+ proxy_auth: Optional[BasicAuth] = None,
+ timeout: Union[ClientTimeout, object] = sentinel,
+ verify_ssl: Optional[bool] = None,
+ fingerprint: Optional[bytes] = None,
+ ssl_context: Optional[SSLContext] = None,
+ ssl: Optional[Union[SSLContext, bool, Fingerprint]] = None,
+ proxy_headers: Optional[LooseHeaders] = None,
+ trace_request_ctx: Optional[SimpleNamespace] = None,
+ read_bufsize: Optional[int] = None,
+ ) -> ClientResponse:
+
+ # NOTE: timeout clamps existing connect and read timeouts. We cannot
+ # set the default to None because we need to detect if the user wants
+ # to use the existing timeouts by setting timeout to None.
+
+ if self.closed:
+ raise RuntimeError("Session is closed")
+
+ ssl = _merge_ssl_params(ssl, verify_ssl, ssl_context, fingerprint)
+
+ if data is not None and json is not None:
+ raise ValueError(
+ "data and json parameters can not be used at the same time"
+ )
+ elif json is not None:
+ data = payload.JsonPayload(json, dumps=self._json_serialize)
+
+ if not isinstance(chunked, bool) and chunked is not None:
+ warnings.warn("Chunk size is deprecated #1615", DeprecationWarning)
+
+ redirects = 0
+ history = []
+ version = self._version
+
+ # Merge with default headers and transform to CIMultiDict
+ headers = self._prepare_headers(headers)
+ proxy_headers = self._prepare_headers(proxy_headers)
+
+ try:
+ url = URL(str_or_url)
+ except ValueError as e:
+ raise InvalidURL(str_or_url) from e
+
+ skip_headers = set(self._skip_auto_headers)
+ if skip_auto_headers is not None:
+ for i in skip_auto_headers:
+ skip_headers.add(istr(i))
+
+ if proxy is not None:
+ try:
+ proxy = URL(proxy)
+ except ValueError as e:
+ raise InvalidURL(proxy) from e
+
+ if timeout is sentinel:
+ real_timeout = self._timeout # type: ClientTimeout
+ else:
+ if not isinstance(timeout, ClientTimeout):
+ real_timeout = ClientTimeout(total=timeout) # type: ignore
+ else:
+ real_timeout = timeout
+ # timeout is cumulative for all request operations
+ # (request, redirects, responses, data consuming)
+ tm = TimeoutHandle(self._loop, real_timeout.total)
+ handle = tm.start()
+
+ if read_bufsize is None:
+ read_bufsize = self._read_bufsize
+
+ traces = [
+ Trace(
+ self,
+ trace_config,
+ trace_config.trace_config_ctx(trace_request_ctx=trace_request_ctx),
+ )
+ for trace_config in self._trace_configs
+ ]
+
+ for trace in traces:
+ await trace.send_request_start(method, url, headers)
+
+ timer = tm.timer()
+ try:
+ with timer:
+ while True:
+ url, auth_from_url = strip_auth_from_url(url)
+ if auth and auth_from_url:
+ raise ValueError(
+ "Cannot combine AUTH argument with "
+ "credentials encoded in URL"
+ )
+
+ if auth is None:
+ auth = auth_from_url
+ if auth is None:
+ auth = self._default_auth
+ # It would be confusing if we support explicit
+ # Authorization header with auth argument
+ if (
+ headers is not None
+ and auth is not None
+ and hdrs.AUTHORIZATION in headers
+ ):
+ raise ValueError(
+ "Cannot combine AUTHORIZATION header "
+ "with AUTH argument or credentials "
+ "encoded in URL"
+ )
+
+ all_cookies = self._cookie_jar.filter_cookies(url)
+
+ if cookies is not None:
+ tmp_cookie_jar = CookieJar()
+ tmp_cookie_jar.update_cookies(cookies)
+ req_cookies = tmp_cookie_jar.filter_cookies(url)
+ if req_cookies:
+ all_cookies.load(req_cookies)
+
+ if proxy is not None:
+ proxy = URL(proxy)
+ elif self._trust_env:
+ for scheme, proxy_info in proxies_from_env().items():
+ if scheme == url.scheme:
+ proxy = proxy_info.proxy
+ proxy_auth = proxy_info.proxy_auth
+ break
+
+ req = self._request_class(
+ method,
+ url,
+ params=params,
+ headers=headers,
+ skip_auto_headers=skip_headers,
+ data=data,
+ cookies=all_cookies,
+ auth=auth,
+ version=version,
+ compress=compress,
+ chunked=chunked,
+ expect100=expect100,
+ loop=self._loop,
+ response_class=self._response_class,
+ proxy=proxy,
+ proxy_auth=proxy_auth,
+ timer=timer,
+ session=self,
+ ssl=ssl,
+ proxy_headers=proxy_headers,
+ traces=traces,
+ )
+
+ # connection timeout
+ try:
+ with CeilTimeout(real_timeout.connect, loop=self._loop):
+ assert self._connector is not None
+ conn = await self._connector.connect(
+ req, traces=traces, timeout=real_timeout
+ )
+ except asyncio.TimeoutError as exc:
+ raise ServerTimeoutError(
+ "Connection timeout " "to host {}".format(url)
+ ) from exc
+
+ assert conn.transport is not None
+
+ assert conn.protocol is not None
+ conn.protocol.set_response_params(
+ timer=timer,
+ skip_payload=method.upper() == "HEAD",
+ read_until_eof=read_until_eof,
+ auto_decompress=self._auto_decompress,
+ read_timeout=real_timeout.sock_read,
+ read_bufsize=read_bufsize,
+ )
+
+ try:
+ try:
+ resp = await req.send(conn)
+ try:
+ await resp.start(conn)
+ except BaseException:
+ resp.close()
+ raise
+ except BaseException:
+ conn.close()
+ raise
+ except ClientError:
+ raise
+ except OSError as exc:
+ raise ClientOSError(*exc.args) from exc
+
+ self._cookie_jar.update_cookies(resp.cookies, resp.url)
+
+ # redirects
+ if resp.status in (301, 302, 303, 307, 308) and allow_redirects:
+
+ for trace in traces:
+ await trace.send_request_redirect(
+ method, url, headers, resp
+ )
+
+ redirects += 1
+ history.append(resp)
+ if max_redirects and redirects >= max_redirects:
+ resp.close()
+ raise TooManyRedirects(
+ history[0].request_info, tuple(history)
+ )
+
+ # For 301 and 302, mimic IE, now changed in RFC
+ # https://github.com/kennethreitz/requests/pull/269
+ if (resp.status == 303 and resp.method != hdrs.METH_HEAD) or (
+ resp.status in (301, 302) and resp.method == hdrs.METH_POST
+ ):
+ method = hdrs.METH_GET
+ data = None
+ if headers.get(hdrs.CONTENT_LENGTH):
+ headers.pop(hdrs.CONTENT_LENGTH)
+
+ r_url = resp.headers.get(hdrs.LOCATION) or resp.headers.get(
+ hdrs.URI
+ )
+ if r_url is None:
+ # see github.com/aio-libs/aiohttp/issues/2022
+ break
+ else:
+ # reading from correct redirection
+ # response is forbidden
+ resp.release()
+
+ try:
+ parsed_url = URL(
+ r_url, encoded=not self._requote_redirect_url
+ )
+
+ except ValueError as e:
+ raise InvalidURL(r_url) from e
+
+ scheme = parsed_url.scheme
+ if scheme not in ("http", "https", ""):
+ resp.close()
+ raise ValueError("Can redirect only to http or https")
+ elif not scheme:
+ parsed_url = url.join(parsed_url)
+
+ if url.origin() != parsed_url.origin():
+ auth = None
+ headers.pop(hdrs.AUTHORIZATION, None)
+
+ url = parsed_url
+ params = None
+ resp.release()
+ continue
+
+ break
+
+ # check response status
+ if raise_for_status is None:
+ raise_for_status = self._raise_for_status
+ if raise_for_status:
+ resp.raise_for_status()
+
+ # register connection
+ if handle is not None:
+ if resp.connection is not None:
+ resp.connection.add_callback(handle.cancel)
+ else:
+ handle.cancel()
+
+ resp._history = tuple(history)
+
+ for trace in traces:
+ await trace.send_request_end(method, url, headers, resp)
+ return resp
+
+ except BaseException as e:
+ # cleanup timer
+ tm.close()
+ if handle:
+ handle.cancel()
+ handle = None
+
+ for trace in traces:
+ await trace.send_request_exception(method, url, headers, e)
+ raise
+
+ def ws_connect(
+ self,
+ url: StrOrURL,
+ *,
+ method: str = hdrs.METH_GET,
+ protocols: Iterable[str] = (),
+ timeout: float = 10.0,
+ receive_timeout: Optional[float] = None,
+ autoclose: bool = True,
+ autoping: bool = True,
+ heartbeat: Optional[float] = None,
+ auth: Optional[BasicAuth] = None,
+ origin: Optional[str] = None,
+ headers: Optional[LooseHeaders] = None,
+ proxy: Optional[StrOrURL] = None,
+ proxy_auth: Optional[BasicAuth] = None,
+ ssl: Union[SSLContext, bool, None, Fingerprint] = None,
+ verify_ssl: Optional[bool] = None,
+ fingerprint: Optional[bytes] = None,
+ ssl_context: Optional[SSLContext] = None,
+ proxy_headers: Optional[LooseHeaders] = None,
+ compress: int = 0,
+ max_msg_size: int = 4 * 1024 * 1024,
+ ) -> "_WSRequestContextManager":
+ """Initiate websocket connection."""
+ return _WSRequestContextManager(
+ self._ws_connect(
+ url,
+ method=method,
+ protocols=protocols,
+ timeout=timeout,
+ receive_timeout=receive_timeout,
+ autoclose=autoclose,
+ autoping=autoping,
+ heartbeat=heartbeat,
+ auth=auth,
+ origin=origin,
+ headers=headers,
+ proxy=proxy,
+ proxy_auth=proxy_auth,
+ ssl=ssl,
+ verify_ssl=verify_ssl,
+ fingerprint=fingerprint,
+ ssl_context=ssl_context,
+ proxy_headers=proxy_headers,
+ compress=compress,
+ max_msg_size=max_msg_size,
+ )
+ )
+
+ async def _ws_connect(
+ self,
+ url: StrOrURL,
+ *,
+ method: str = hdrs.METH_GET,
+ protocols: Iterable[str] = (),
+ timeout: float = 10.0,
+ receive_timeout: Optional[float] = None,
+ autoclose: bool = True,
+ autoping: bool = True,
+ heartbeat: Optional[float] = None,
+ auth: Optional[BasicAuth] = None,
+ origin: Optional[str] = None,
+ headers: Optional[LooseHeaders] = None,
+ proxy: Optional[StrOrURL] = None,
+ proxy_auth: Optional[BasicAuth] = None,
+ ssl: Union[SSLContext, bool, None, Fingerprint] = None,
+ verify_ssl: Optional[bool] = None,
+ fingerprint: Optional[bytes] = None,
+ ssl_context: Optional[SSLContext] = None,
+ proxy_headers: Optional[LooseHeaders] = None,
+ compress: int = 0,
+ max_msg_size: int = 4 * 1024 * 1024,
+ ) -> ClientWebSocketResponse:
+
+ if headers is None:
+ real_headers = CIMultiDict() # type: CIMultiDict[str]
+ else:
+ real_headers = CIMultiDict(headers)
+
+ default_headers = {
+ hdrs.UPGRADE: "websocket",
+ hdrs.CONNECTION: "upgrade",
+ hdrs.SEC_WEBSOCKET_VERSION: "13",
+ }
+
+ for key, value in default_headers.items():
+ real_headers.setdefault(key, value)
+
+ sec_key = base64.b64encode(os.urandom(16))
+ real_headers[hdrs.SEC_WEBSOCKET_KEY] = sec_key.decode()
+
+ if protocols:
+ real_headers[hdrs.SEC_WEBSOCKET_PROTOCOL] = ",".join(protocols)
+ if origin is not None:
+ real_headers[hdrs.ORIGIN] = origin
+ if compress:
+ extstr = ws_ext_gen(compress=compress)
+ real_headers[hdrs.SEC_WEBSOCKET_EXTENSIONS] = extstr
+
+ ssl = _merge_ssl_params(ssl, verify_ssl, ssl_context, fingerprint)
+
+ # send request
+ resp = await self.request(
+ method,
+ url,
+ headers=real_headers,
+ read_until_eof=False,
+ auth=auth,
+ proxy=proxy,
+ proxy_auth=proxy_auth,
+ ssl=ssl,
+ proxy_headers=proxy_headers,
+ )
+
+ try:
+ # check handshake
+ if resp.status != 101:
+ raise WSServerHandshakeError(
+ resp.request_info,
+ resp.history,
+ message="Invalid response status",
+ status=resp.status,
+ headers=resp.headers,
+ )
+
+ if resp.headers.get(hdrs.UPGRADE, "").lower() != "websocket":
+ raise WSServerHandshakeError(
+ resp.request_info,
+ resp.history,
+ message="Invalid upgrade header",
+ status=resp.status,
+ headers=resp.headers,
+ )
+
+ if resp.headers.get(hdrs.CONNECTION, "").lower() != "upgrade":
+ raise WSServerHandshakeError(
+ resp.request_info,
+ resp.history,
+ message="Invalid connection header",
+ status=resp.status,
+ headers=resp.headers,
+ )
+
+ # key calculation
+ r_key = resp.headers.get(hdrs.SEC_WEBSOCKET_ACCEPT, "")
+ match = base64.b64encode(hashlib.sha1(sec_key + WS_KEY).digest()).decode()
+ if r_key != match:
+ raise WSServerHandshakeError(
+ resp.request_info,
+ resp.history,
+ message="Invalid challenge response",
+ status=resp.status,
+ headers=resp.headers,
+ )
+
+ # websocket protocol
+ protocol = None
+ if protocols and hdrs.SEC_WEBSOCKET_PROTOCOL in resp.headers:
+ resp_protocols = [
+ proto.strip()
+ for proto in resp.headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(",")
+ ]
+
+ for proto in resp_protocols:
+ if proto in protocols:
+ protocol = proto
+ break
+
+ # websocket compress
+ notakeover = False
+ if compress:
+ compress_hdrs = resp.headers.get(hdrs.SEC_WEBSOCKET_EXTENSIONS)
+ if compress_hdrs:
+ try:
+ compress, notakeover = ws_ext_parse(compress_hdrs)
+ except WSHandshakeError as exc:
+ raise WSServerHandshakeError(
+ resp.request_info,
+ resp.history,
+ message=exc.args[0],
+ status=resp.status,
+ headers=resp.headers,
+ ) from exc
+ else:
+ compress = 0
+ notakeover = False
+
+ conn = resp.connection
+ assert conn is not None
+ conn_proto = conn.protocol
+ assert conn_proto is not None
+ transport = conn.transport
+ assert transport is not None
+ reader = FlowControlDataQueue(
+ conn_proto, 2 ** 16, loop=self._loop
+ ) # type: FlowControlDataQueue[WSMessage]
+ conn_proto.set_parser(WebSocketReader(reader, max_msg_size), reader)
+ writer = WebSocketWriter(
+ conn_proto,
+ transport,
+ use_mask=True,
+ compress=compress,
+ notakeover=notakeover,
+ )
+ except BaseException:
+ resp.close()
+ raise
+ else:
+ return self._ws_response_class(
+ reader,
+ writer,
+ protocol,
+ resp,
+ timeout,
+ autoclose,
+ autoping,
+ self._loop,
+ receive_timeout=receive_timeout,
+ heartbeat=heartbeat,
+ compress=compress,
+ client_notakeover=notakeover,
+ )
+
+ def _prepare_headers(self, headers: Optional[LooseHeaders]) -> "CIMultiDict[str]":
+ """Add default headers and transform it to CIMultiDict"""
+ # Convert headers to MultiDict
+ result = CIMultiDict(self._default_headers)
+ if headers:
+ if not isinstance(headers, (MultiDictProxy, MultiDict)):
+ headers = CIMultiDict(headers)
+ added_names = set() # type: Set[str]
+ for key, value in headers.items():
+ if key in added_names:
+ result.add(key, value)
+ else:
+ result[key] = value
+ added_names.add(key)
+ return result
+
+ def get(
+ self, url: StrOrURL, *, allow_redirects: bool = True, **kwargs: Any
+ ) -> "_RequestContextManager":
+ """Perform HTTP GET request."""
+ return _RequestContextManager(
+ self._request(hdrs.METH_GET, url, allow_redirects=allow_redirects, **kwargs)
+ )
+
+ def options(
+ self, url: StrOrURL, *, allow_redirects: bool = True, **kwargs: Any
+ ) -> "_RequestContextManager":
+ """Perform HTTP OPTIONS request."""
+ return _RequestContextManager(
+ self._request(
+ hdrs.METH_OPTIONS, url, allow_redirects=allow_redirects, **kwargs
+ )
+ )
+
+ def head(
+ self, url: StrOrURL, *, allow_redirects: bool = False, **kwargs: Any
+ ) -> "_RequestContextManager":
+ """Perform HTTP HEAD request."""
+ return _RequestContextManager(
+ self._request(
+ hdrs.METH_HEAD, url, allow_redirects=allow_redirects, **kwargs
+ )
+ )
+
+ def post(
+ self, url: StrOrURL, *, data: Any = None, **kwargs: Any
+ ) -> "_RequestContextManager":
+ """Perform HTTP POST request."""
+ return _RequestContextManager(
+ self._request(hdrs.METH_POST, url, data=data, **kwargs)
+ )
+
+ def put(
+ self, url: StrOrURL, *, data: Any = None, **kwargs: Any
+ ) -> "_RequestContextManager":
+ """Perform HTTP PUT request."""
+ return _RequestContextManager(
+ self._request(hdrs.METH_PUT, url, data=data, **kwargs)
+ )
+
+ def patch(
+ self, url: StrOrURL, *, data: Any = None, **kwargs: Any
+ ) -> "_RequestContextManager":
+ """Perform HTTP PATCH request."""
+ return _RequestContextManager(
+ self._request(hdrs.METH_PATCH, url, data=data, **kwargs)
+ )
+
+ def delete(self, url: StrOrURL, **kwargs: Any) -> "_RequestContextManager":
+ """Perform HTTP DELETE request."""
+ return _RequestContextManager(self._request(hdrs.METH_DELETE, url, **kwargs))
+
+ async def close(self) -> None:
+ """Close underlying connector.
+
+ Release all acquired resources.
+ """
+ if not self.closed:
+ if self._connector is not None and self._connector_owner:
+ await self._connector.close()
+ self._connector = None
+
+ @property
+ def closed(self) -> bool:
+ """Is client session closed.
+
+ A readonly property.
+ """
+ return self._connector is None or self._connector.closed
+
+ @property
+ def connector(self) -> Optional[BaseConnector]:
+ """Connector instance used for the session."""
+ return self._connector
+
+ @property
+ def cookie_jar(self) -> AbstractCookieJar:
+ """The session cookies."""
+ return self._cookie_jar
+
+ @property
+ def version(self) -> Tuple[int, int]:
+ """The session HTTP protocol version."""
+ return self._version
+
+ @property
+ def requote_redirect_url(self) -> bool:
+ """Do URL requoting on redirection handling."""
+ return self._requote_redirect_url
+
+ @requote_redirect_url.setter
+ def requote_redirect_url(self, val: bool) -> None:
+ """Do URL requoting on redirection handling."""
+ warnings.warn(
+ "session.requote_redirect_url modification " "is deprecated #2778",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ self._requote_redirect_url = val
+
+ @property
+ def loop(self) -> asyncio.AbstractEventLoop:
+ """Session's loop."""
+ warnings.warn(
+ "client.loop property is deprecated", DeprecationWarning, stacklevel=2
+ )
+ return self._loop
+
+ @property
+ def timeout(self) -> Union[object, ClientTimeout]:
+ """Timeout for the session."""
+ return self._timeout
+
+ @property
+ def headers(self) -> "CIMultiDict[str]":
+ """The default headers of the client session."""
+ return self._default_headers
+
+ @property
+ def skip_auto_headers(self) -> FrozenSet[istr]:
+ """Headers for which autogeneration should be skipped"""
+ return self._skip_auto_headers
+
+ @property
+ def auth(self) -> Optional[BasicAuth]:
+ """An object that represents HTTP Basic Authorization"""
+ return self._default_auth
+
+ @property
+ def json_serialize(self) -> JSONEncoder:
+ """Json serializer callable"""
+ return self._json_serialize
+
+ @property
+ def connector_owner(self) -> bool:
+ """Should connector be closed on session closing"""
+ return self._connector_owner
+
+ @property
+ def raise_for_status(
+ self,
+ ) -> Union[bool, Callable[[ClientResponse], Awaitable[None]]]:
+ """
+ Should `ClientResponse.raise_for_status()`
+ be called for each response
+ """
+ return self._raise_for_status
+
+ @property
+ def auto_decompress(self) -> bool:
+ """Should the body response be automatically decompressed"""
+ return self._auto_decompress
+
+ @property
+ def trust_env(self) -> bool:
+ """
+ Should get proxies information
+ from HTTP_PROXY / HTTPS_PROXY environment variables
+ or ~/.netrc file if present
+ """
+ return self._trust_env
+
+ @property
+ def trace_configs(self) -> List[TraceConfig]:
+ """A list of TraceConfig instances used for client tracing"""
+ return self._trace_configs
+
+ def detach(self) -> None:
+ """Detach connector from session without closing the former.
+
+ Session is switched to closed state anyway.
+ """
+ self._connector = None
+
+ def __enter__(self) -> None:
+ raise TypeError("Use async with instead")
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ # __exit__ should exist in pair with __enter__ but never executed
+ pass # pragma: no cover
+
+ async def __aenter__(self) -> "ClientSession":
+ return self
+
+ async def __aexit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ await self.close()
+
+
+class _BaseRequestContextManager(Coroutine[Any, Any, _RetType], Generic[_RetType]):
+
+ __slots__ = ("_coro", "_resp")
+
+ def __init__(self, coro: Coroutine["asyncio.Future[Any]", None, _RetType]) -> None:
+ self._coro = coro
+
+ def send(self, arg: None) -> "asyncio.Future[Any]":
+ return self._coro.send(arg)
+
+ def throw(self, arg: BaseException) -> None: # type: ignore
+ self._coro.throw(arg)
+
+ def close(self) -> None:
+ return self._coro.close()
+
+ def __await__(self) -> Generator[Any, None, _RetType]:
+ ret = self._coro.__await__()
+ return ret
+
+ def __iter__(self) -> Generator[Any, None, _RetType]:
+ return self.__await__()
+
+ async def __aenter__(self) -> _RetType:
+ self._resp = await self._coro
+ return self._resp
+
+
+class _RequestContextManager(_BaseRequestContextManager[ClientResponse]):
+ async def __aexit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc: Optional[BaseException],
+ tb: Optional[TracebackType],
+ ) -> None:
+ # We're basing behavior on the exception as it can be caused by
+ # user code unrelated to the status of the connection. If you
+ # would like to close a connection you must do that
+ # explicitly. Otherwise connection error handling should kick in
+ # and close/recycle the connection as required.
+ self._resp.release()
+
+
+class _WSRequestContextManager(_BaseRequestContextManager[ClientWebSocketResponse]):
+ async def __aexit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc: Optional[BaseException],
+ tb: Optional[TracebackType],
+ ) -> None:
+ await self._resp.close()
+
+
+class _SessionRequestContextManager:
+
+ __slots__ = ("_coro", "_resp", "_session")
+
+ def __init__(
+ self,
+ coro: Coroutine["asyncio.Future[Any]", None, ClientResponse],
+ session: ClientSession,
+ ) -> None:
+ self._coro = coro
+ self._resp = None # type: Optional[ClientResponse]
+ self._session = session
+
+ async def __aenter__(self) -> ClientResponse:
+ try:
+ self._resp = await self._coro
+ except BaseException:
+ await self._session.close()
+ raise
+ else:
+ return self._resp
+
+ async def __aexit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc: Optional[BaseException],
+ tb: Optional[TracebackType],
+ ) -> None:
+ assert self._resp is not None
+ self._resp.close()
+ await self._session.close()
+
+
+def request(
+ method: str,
+ url: StrOrURL,
+ *,
+ params: Optional[Mapping[str, str]] = None,
+ data: Any = None,
+ json: Any = None,
+ headers: Optional[LooseHeaders] = None,
+ skip_auto_headers: Optional[Iterable[str]] = None,
+ auth: Optional[BasicAuth] = None,
+ allow_redirects: bool = True,
+ max_redirects: int = 10,
+ compress: Optional[str] = None,
+ chunked: Optional[bool] = None,
+ expect100: bool = False,
+ raise_for_status: Optional[bool] = None,
+ read_until_eof: bool = True,
+ proxy: Optional[StrOrURL] = None,
+ proxy_auth: Optional[BasicAuth] = None,
+ timeout: Union[ClientTimeout, object] = sentinel,
+ cookies: Optional[LooseCookies] = None,
+ version: HttpVersion = http.HttpVersion11,
+ connector: Optional[BaseConnector] = None,
+ read_bufsize: Optional[int] = None,
+ loop: Optional[asyncio.AbstractEventLoop] = None,
+) -> _SessionRequestContextManager:
+ """Constructs and sends a request. Returns response object.
+ method - HTTP method
+ url - request url
+ params - (optional) Dictionary or bytes to be sent in the query
+ string of the new request
+ data - (optional) Dictionary, bytes, or file-like object to
+ send in the body of the request
+ json - (optional) Any json compatible python object
+ headers - (optional) Dictionary of HTTP Headers to send with
+ the request
+ cookies - (optional) Dict object to send with the request
+ auth - (optional) BasicAuth named tuple represent HTTP Basic Auth
+ auth - aiohttp.helpers.BasicAuth
+ allow_redirects - (optional) If set to False, do not follow
+ redirects
+ version - Request HTTP version.
+ compress - Set to True if request has to be compressed
+ with deflate encoding.
+ chunked - Set to chunk size for chunked transfer encoding.
+ expect100 - Expect 100-continue response from server.
+ connector - BaseConnector sub-class instance to support
+ connection pooling.
+ read_until_eof - Read response until eof if response
+ does not have Content-Length header.
+ loop - Optional event loop.
+ timeout - Optional ClientTimeout settings structure, 5min
+ total timeout by default.
+ Usage::
+ >>> import aiohttp
+ >>> resp = await aiohttp.request('GET', 'http://python.org/')
+ >>> resp
+ <ClientResponse(python.org/) [200]>
+ >>> data = await resp.read()
+ """
+ connector_owner = False
+ if connector is None:
+ connector_owner = True
+ connector = TCPConnector(loop=loop, force_close=True)
+
+ session = ClientSession(
+ loop=loop,
+ cookies=cookies,
+ version=version,
+ timeout=timeout,
+ connector=connector,
+ connector_owner=connector_owner,
+ )
+
+ return _SessionRequestContextManager(
+ session._request(
+ method,
+ url,
+ params=params,
+ data=data,
+ json=json,
+ headers=headers,
+ skip_auto_headers=skip_auto_headers,
+ auth=auth,
+ allow_redirects=allow_redirects,
+ max_redirects=max_redirects,
+ compress=compress,
+ chunked=chunked,
+ expect100=expect100,
+ raise_for_status=raise_for_status,
+ read_until_eof=read_until_eof,
+ proxy=proxy,
+ proxy_auth=proxy_auth,
+ read_bufsize=read_bufsize,
+ ),
+ session,
+ )
diff --git a/third_party/python/aiohttp/aiohttp/client_exceptions.py b/third_party/python/aiohttp/aiohttp/client_exceptions.py
new file mode 100644
index 0000000000..f4be3bfb5e
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/client_exceptions.py
@@ -0,0 +1,317 @@
+"""HTTP related errors."""
+
+import asyncio
+import warnings
+from typing import TYPE_CHECKING, Any, Optional, Tuple, Union
+
+from .typedefs import LooseHeaders
+
+try:
+ import ssl
+
+ SSLContext = ssl.SSLContext
+except ImportError: # pragma: no cover
+ ssl = SSLContext = None # type: ignore
+
+
+if TYPE_CHECKING: # pragma: no cover
+ from .client_reqrep import ClientResponse, ConnectionKey, Fingerprint, RequestInfo
+else:
+ RequestInfo = ClientResponse = ConnectionKey = None
+
+__all__ = (
+ "ClientError",
+ "ClientConnectionError",
+ "ClientOSError",
+ "ClientConnectorError",
+ "ClientProxyConnectionError",
+ "ClientSSLError",
+ "ClientConnectorSSLError",
+ "ClientConnectorCertificateError",
+ "ServerConnectionError",
+ "ServerTimeoutError",
+ "ServerDisconnectedError",
+ "ServerFingerprintMismatch",
+ "ClientResponseError",
+ "ClientHttpProxyError",
+ "WSServerHandshakeError",
+ "ContentTypeError",
+ "ClientPayloadError",
+ "InvalidURL",
+)
+
+
+class ClientError(Exception):
+ """Base class for client connection errors."""
+
+
+class ClientResponseError(ClientError):
+ """Connection error during reading response.
+
+ request_info: instance of RequestInfo
+ """
+
+ def __init__(
+ self,
+ request_info: RequestInfo,
+ history: Tuple[ClientResponse, ...],
+ *,
+ code: Optional[int] = None,
+ status: Optional[int] = None,
+ message: str = "",
+ headers: Optional[LooseHeaders] = None,
+ ) -> None:
+ self.request_info = request_info
+ if code is not None:
+ if status is not None:
+ raise ValueError(
+ "Both code and status arguments are provided; "
+ "code is deprecated, use status instead"
+ )
+ warnings.warn(
+ "code argument is deprecated, use status instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ if status is not None:
+ self.status = status
+ elif code is not None:
+ self.status = code
+ else:
+ self.status = 0
+ self.message = message
+ self.headers = headers
+ self.history = history
+ self.args = (request_info, history)
+
+ def __str__(self) -> str:
+ return "{}, message={!r}, url={!r}".format(
+ self.status,
+ self.message,
+ self.request_info.real_url,
+ )
+
+ def __repr__(self) -> str:
+ args = f"{self.request_info!r}, {self.history!r}"
+ if self.status != 0:
+ args += f", status={self.status!r}"
+ if self.message != "":
+ args += f", message={self.message!r}"
+ if self.headers is not None:
+ args += f", headers={self.headers!r}"
+ return "{}({})".format(type(self).__name__, args)
+
+ @property
+ def code(self) -> int:
+ warnings.warn(
+ "code property is deprecated, use status instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return self.status
+
+ @code.setter
+ def code(self, value: int) -> None:
+ warnings.warn(
+ "code property is deprecated, use status instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ self.status = value
+
+
+class ContentTypeError(ClientResponseError):
+ """ContentType found is not valid."""
+
+
+class WSServerHandshakeError(ClientResponseError):
+ """websocket server handshake error."""
+
+
+class ClientHttpProxyError(ClientResponseError):
+ """HTTP proxy error.
+
+ Raised in :class:`aiohttp.connector.TCPConnector` if
+ proxy responds with status other than ``200 OK``
+ on ``CONNECT`` request.
+ """
+
+
+class TooManyRedirects(ClientResponseError):
+ """Client was redirected too many times."""
+
+
+class ClientConnectionError(ClientError):
+ """Base class for client socket errors."""
+
+
+class ClientOSError(ClientConnectionError, OSError):
+ """OSError error."""
+
+
+class ClientConnectorError(ClientOSError):
+ """Client connector error.
+
+ Raised in :class:`aiohttp.connector.TCPConnector` if
+ connection to proxy can not be established.
+ """
+
+ def __init__(self, connection_key: ConnectionKey, os_error: OSError) -> None:
+ self._conn_key = connection_key
+ self._os_error = os_error
+ super().__init__(os_error.errno, os_error.strerror)
+ self.args = (connection_key, os_error)
+
+ @property
+ def os_error(self) -> OSError:
+ return self._os_error
+
+ @property
+ def host(self) -> str:
+ return self._conn_key.host
+
+ @property
+ def port(self) -> Optional[int]:
+ return self._conn_key.port
+
+ @property
+ def ssl(self) -> Union[SSLContext, None, bool, "Fingerprint"]:
+ return self._conn_key.ssl
+
+ def __str__(self) -> str:
+ return "Cannot connect to host {0.host}:{0.port} ssl:{1} [{2}]".format(
+ self, self.ssl if self.ssl is not None else "default", self.strerror
+ )
+
+ # OSError.__reduce__ does too much black magick
+ __reduce__ = BaseException.__reduce__
+
+
+class ClientProxyConnectionError(ClientConnectorError):
+ """Proxy connection error.
+
+ Raised in :class:`aiohttp.connector.TCPConnector` if
+ connection to proxy can not be established.
+ """
+
+
+class ServerConnectionError(ClientConnectionError):
+ """Server connection errors."""
+
+
+class ServerDisconnectedError(ServerConnectionError):
+ """Server disconnected."""
+
+ def __init__(self, message: Optional[str] = None) -> None:
+ if message is None:
+ message = "Server disconnected"
+
+ self.args = (message,)
+ self.message = message
+
+
+class ServerTimeoutError(ServerConnectionError, asyncio.TimeoutError):
+ """Server timeout error."""
+
+
+class ServerFingerprintMismatch(ServerConnectionError):
+ """SSL certificate does not match expected fingerprint."""
+
+ def __init__(self, expected: bytes, got: bytes, host: str, port: int) -> None:
+ self.expected = expected
+ self.got = got
+ self.host = host
+ self.port = port
+ self.args = (expected, got, host, port)
+
+ def __repr__(self) -> str:
+ return "<{} expected={!r} got={!r} host={!r} port={!r}>".format(
+ self.__class__.__name__, self.expected, self.got, self.host, self.port
+ )
+
+
+class ClientPayloadError(ClientError):
+ """Response payload error."""
+
+
+class InvalidURL(ClientError, ValueError):
+ """Invalid URL.
+
+ URL used for fetching is malformed, e.g. it doesn't contains host
+ part."""
+
+ # Derive from ValueError for backward compatibility
+
+ def __init__(self, url: Any) -> None:
+ # The type of url is not yarl.URL because the exception can be raised
+ # on URL(url) call
+ super().__init__(url)
+
+ @property
+ def url(self) -> Any:
+ return self.args[0]
+
+ def __repr__(self) -> str:
+ return f"<{self.__class__.__name__} {self.url}>"
+
+
+class ClientSSLError(ClientConnectorError):
+ """Base error for ssl.*Errors."""
+
+
+if ssl is not None:
+ cert_errors = (ssl.CertificateError,)
+ cert_errors_bases = (
+ ClientSSLError,
+ ssl.CertificateError,
+ )
+
+ ssl_errors = (ssl.SSLError,)
+ ssl_error_bases = (ClientSSLError, ssl.SSLError)
+else: # pragma: no cover
+ cert_errors = tuple()
+ cert_errors_bases = (
+ ClientSSLError,
+ ValueError,
+ )
+
+ ssl_errors = tuple()
+ ssl_error_bases = (ClientSSLError,)
+
+
+class ClientConnectorSSLError(*ssl_error_bases): # type: ignore
+ """Response ssl error."""
+
+
+class ClientConnectorCertificateError(*cert_errors_bases): # type: ignore
+ """Response certificate error."""
+
+ def __init__(
+ self, connection_key: ConnectionKey, certificate_error: Exception
+ ) -> None:
+ self._conn_key = connection_key
+ self._certificate_error = certificate_error
+ self.args = (connection_key, certificate_error)
+
+ @property
+ def certificate_error(self) -> Exception:
+ return self._certificate_error
+
+ @property
+ def host(self) -> str:
+ return self._conn_key.host
+
+ @property
+ def port(self) -> Optional[int]:
+ return self._conn_key.port
+
+ @property
+ def ssl(self) -> bool:
+ return self._conn_key.is_ssl
+
+ def __str__(self) -> str:
+ return (
+ "Cannot connect to host {0.host}:{0.port} ssl:{0.ssl} "
+ "[{0.certificate_error.__class__.__name__}: "
+ "{0.certificate_error.args}]".format(self)
+ )
diff --git a/third_party/python/aiohttp/aiohttp/client_proto.py b/third_party/python/aiohttp/aiohttp/client_proto.py
new file mode 100644
index 0000000000..2973342e44
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/client_proto.py
@@ -0,0 +1,251 @@
+import asyncio
+from contextlib import suppress
+from typing import Any, Optional, Tuple
+
+from .base_protocol import BaseProtocol
+from .client_exceptions import (
+ ClientOSError,
+ ClientPayloadError,
+ ServerDisconnectedError,
+ ServerTimeoutError,
+)
+from .helpers import BaseTimerContext
+from .http import HttpResponseParser, RawResponseMessage
+from .streams import EMPTY_PAYLOAD, DataQueue, StreamReader
+
+
+class ResponseHandler(BaseProtocol, DataQueue[Tuple[RawResponseMessage, StreamReader]]):
+ """Helper class to adapt between Protocol and StreamReader."""
+
+ def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
+ BaseProtocol.__init__(self, loop=loop)
+ DataQueue.__init__(self, loop)
+
+ self._should_close = False
+
+ self._payload = None
+ self._skip_payload = False
+ self._payload_parser = None
+
+ self._timer = None
+
+ self._tail = b""
+ self._upgraded = False
+ self._parser = None # type: Optional[HttpResponseParser]
+
+ self._read_timeout = None # type: Optional[float]
+ self._read_timeout_handle = None # type: Optional[asyncio.TimerHandle]
+
+ @property
+ def upgraded(self) -> bool:
+ return self._upgraded
+
+ @property
+ def should_close(self) -> bool:
+ if self._payload is not None and not self._payload.is_eof() or self._upgraded:
+ return True
+
+ return (
+ self._should_close
+ or self._upgraded
+ or self.exception() is not None
+ or self._payload_parser is not None
+ or len(self) > 0
+ or bool(self._tail)
+ )
+
+ def force_close(self) -> None:
+ self._should_close = True
+
+ def close(self) -> None:
+ transport = self.transport
+ if transport is not None:
+ transport.close()
+ self.transport = None
+ self._payload = None
+ self._drop_timeout()
+
+ def is_connected(self) -> bool:
+ return self.transport is not None and not self.transport.is_closing()
+
+ def connection_lost(self, exc: Optional[BaseException]) -> None:
+ self._drop_timeout()
+
+ if self._payload_parser is not None:
+ with suppress(Exception):
+ self._payload_parser.feed_eof()
+
+ uncompleted = None
+ if self._parser is not None:
+ try:
+ uncompleted = self._parser.feed_eof()
+ except Exception:
+ if self._payload is not None:
+ self._payload.set_exception(
+ ClientPayloadError("Response payload is not completed")
+ )
+
+ if not self.is_eof():
+ if isinstance(exc, OSError):
+ exc = ClientOSError(*exc.args)
+ if exc is None:
+ exc = ServerDisconnectedError(uncompleted)
+ # assigns self._should_close to True as side effect,
+ # we do it anyway below
+ self.set_exception(exc)
+
+ self._should_close = True
+ self._parser = None
+ self._payload = None
+ self._payload_parser = None
+ self._reading_paused = False
+
+ super().connection_lost(exc)
+
+ def eof_received(self) -> None:
+ # should call parser.feed_eof() most likely
+ self._drop_timeout()
+
+ def pause_reading(self) -> None:
+ super().pause_reading()
+ self._drop_timeout()
+
+ def resume_reading(self) -> None:
+ super().resume_reading()
+ self._reschedule_timeout()
+
+ def set_exception(self, exc: BaseException) -> None:
+ self._should_close = True
+ self._drop_timeout()
+ super().set_exception(exc)
+
+ def set_parser(self, parser: Any, payload: Any) -> None:
+ # TODO: actual types are:
+ # parser: WebSocketReader
+ # payload: FlowControlDataQueue
+ # but they are not generi enough
+ # Need an ABC for both types
+ self._payload = payload
+ self._payload_parser = parser
+
+ self._drop_timeout()
+
+ if self._tail:
+ data, self._tail = self._tail, b""
+ self.data_received(data)
+
+ def set_response_params(
+ self,
+ *,
+ timer: Optional[BaseTimerContext] = None,
+ skip_payload: bool = False,
+ read_until_eof: bool = False,
+ auto_decompress: bool = True,
+ read_timeout: Optional[float] = None,
+ read_bufsize: int = 2 ** 16
+ ) -> None:
+ self._skip_payload = skip_payload
+
+ self._read_timeout = read_timeout
+ self._reschedule_timeout()
+
+ self._parser = HttpResponseParser(
+ self,
+ self._loop,
+ read_bufsize,
+ timer=timer,
+ payload_exception=ClientPayloadError,
+ response_with_body=not skip_payload,
+ read_until_eof=read_until_eof,
+ auto_decompress=auto_decompress,
+ )
+
+ if self._tail:
+ data, self._tail = self._tail, b""
+ self.data_received(data)
+
+ def _drop_timeout(self) -> None:
+ if self._read_timeout_handle is not None:
+ self._read_timeout_handle.cancel()
+ self._read_timeout_handle = None
+
+ def _reschedule_timeout(self) -> None:
+ timeout = self._read_timeout
+ if self._read_timeout_handle is not None:
+ self._read_timeout_handle.cancel()
+
+ if timeout:
+ self._read_timeout_handle = self._loop.call_later(
+ timeout, self._on_read_timeout
+ )
+ else:
+ self._read_timeout_handle = None
+
+ def _on_read_timeout(self) -> None:
+ exc = ServerTimeoutError("Timeout on reading data from socket")
+ self.set_exception(exc)
+ if self._payload is not None:
+ self._payload.set_exception(exc)
+
+ def data_received(self, data: bytes) -> None:
+ self._reschedule_timeout()
+
+ if not data:
+ return
+
+ # custom payload parser
+ if self._payload_parser is not None:
+ eof, tail = self._payload_parser.feed_data(data)
+ if eof:
+ self._payload = None
+ self._payload_parser = None
+
+ if tail:
+ self.data_received(tail)
+ return
+ else:
+ if self._upgraded or self._parser is None:
+ # i.e. websocket connection, websocket parser is not set yet
+ self._tail += data
+ else:
+ # parse http messages
+ try:
+ messages, upgraded, tail = self._parser.feed_data(data)
+ except BaseException as exc:
+ if self.transport is not None:
+ # connection.release() could be called BEFORE
+ # data_received(), the transport is already
+ # closed in this case
+ self.transport.close()
+ # should_close is True after the call
+ self.set_exception(exc)
+ return
+
+ self._upgraded = upgraded
+
+ payload = None
+ for message, payload in messages:
+ if message.should_close:
+ self._should_close = True
+
+ self._payload = payload
+
+ if self._skip_payload or message.code in (204, 304):
+ self.feed_data((message, EMPTY_PAYLOAD), 0) # type: ignore
+ else:
+ self.feed_data((message, payload), 0)
+ if payload is not None:
+ # new message(s) was processed
+ # register timeout handler unsubscribing
+ # either on end-of-stream or immediately for
+ # EMPTY_PAYLOAD
+ if payload is not EMPTY_PAYLOAD:
+ payload.on_eof(self._drop_timeout)
+ else:
+ self._drop_timeout()
+
+ if tail:
+ if upgraded:
+ self.data_received(tail)
+ else:
+ self._tail = tail
diff --git a/third_party/python/aiohttp/aiohttp/client_reqrep.py b/third_party/python/aiohttp/aiohttp/client_reqrep.py
new file mode 100644
index 0000000000..d826bfeb7e
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/client_reqrep.py
@@ -0,0 +1,1127 @@
+import asyncio
+import codecs
+import functools
+import io
+import re
+import sys
+import traceback
+import warnings
+from hashlib import md5, sha1, sha256
+from http.cookies import CookieError, Morsel, SimpleCookie
+from types import MappingProxyType, TracebackType
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Dict,
+ Iterable,
+ List,
+ Mapping,
+ Optional,
+ Tuple,
+ Type,
+ Union,
+ cast,
+)
+
+import attr
+from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy
+from yarl import URL
+
+from . import hdrs, helpers, http, multipart, payload
+from .abc import AbstractStreamWriter
+from .client_exceptions import (
+ ClientConnectionError,
+ ClientOSError,
+ ClientResponseError,
+ ContentTypeError,
+ InvalidURL,
+ ServerFingerprintMismatch,
+)
+from .formdata import FormData
+from .helpers import (
+ PY_36,
+ BaseTimerContext,
+ BasicAuth,
+ HeadersMixin,
+ TimerNoop,
+ noop,
+ reify,
+ set_result,
+)
+from .http import SERVER_SOFTWARE, HttpVersion10, HttpVersion11, StreamWriter
+from .log import client_logger
+from .streams import StreamReader
+from .typedefs import (
+ DEFAULT_JSON_DECODER,
+ JSONDecoder,
+ LooseCookies,
+ LooseHeaders,
+ RawHeaders,
+)
+
+try:
+ import ssl
+ from ssl import SSLContext
+except ImportError: # pragma: no cover
+ ssl = None # type: ignore
+ SSLContext = object # type: ignore
+
+try:
+ import cchardet as chardet
+except ImportError: # pragma: no cover
+ import chardet # type: ignore
+
+
+__all__ = ("ClientRequest", "ClientResponse", "RequestInfo", "Fingerprint")
+
+
+if TYPE_CHECKING: # pragma: no cover
+ from .client import ClientSession
+ from .connector import Connection
+ from .tracing import Trace
+
+
+json_re = re.compile(r"^application/(?:[\w.+-]+?\+)?json")
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class ContentDisposition:
+ type: Optional[str]
+ parameters: "MappingProxyType[str, str]"
+ filename: Optional[str]
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class RequestInfo:
+ url: URL
+ method: str
+ headers: "CIMultiDictProxy[str]"
+ real_url: URL = attr.ib()
+
+ @real_url.default
+ def real_url_default(self) -> URL:
+ return self.url
+
+
+class Fingerprint:
+ HASHFUNC_BY_DIGESTLEN = {
+ 16: md5,
+ 20: sha1,
+ 32: sha256,
+ }
+
+ def __init__(self, fingerprint: bytes) -> None:
+ digestlen = len(fingerprint)
+ hashfunc = self.HASHFUNC_BY_DIGESTLEN.get(digestlen)
+ if not hashfunc:
+ raise ValueError("fingerprint has invalid length")
+ elif hashfunc is md5 or hashfunc is sha1:
+ raise ValueError(
+ "md5 and sha1 are insecure and " "not supported. Use sha256."
+ )
+ self._hashfunc = hashfunc
+ self._fingerprint = fingerprint
+
+ @property
+ def fingerprint(self) -> bytes:
+ return self._fingerprint
+
+ def check(self, transport: asyncio.Transport) -> None:
+ if not transport.get_extra_info("sslcontext"):
+ return
+ sslobj = transport.get_extra_info("ssl_object")
+ cert = sslobj.getpeercert(binary_form=True)
+ got = self._hashfunc(cert).digest()
+ if got != self._fingerprint:
+ host, port, *_ = transport.get_extra_info("peername")
+ raise ServerFingerprintMismatch(self._fingerprint, got, host, port)
+
+
+if ssl is not None:
+ SSL_ALLOWED_TYPES = (ssl.SSLContext, bool, Fingerprint, type(None))
+else: # pragma: no cover
+ SSL_ALLOWED_TYPES = type(None)
+
+
+def _merge_ssl_params(
+ ssl: Union["SSLContext", bool, Fingerprint, None],
+ verify_ssl: Optional[bool],
+ ssl_context: Optional["SSLContext"],
+ fingerprint: Optional[bytes],
+) -> Union["SSLContext", bool, Fingerprint, None]:
+ if verify_ssl is not None and not verify_ssl:
+ warnings.warn(
+ "verify_ssl is deprecated, use ssl=False instead",
+ DeprecationWarning,
+ stacklevel=3,
+ )
+ if ssl is not None:
+ raise ValueError(
+ "verify_ssl, ssl_context, fingerprint and ssl "
+ "parameters are mutually exclusive"
+ )
+ else:
+ ssl = False
+ if ssl_context is not None:
+ warnings.warn(
+ "ssl_context is deprecated, use ssl=context instead",
+ DeprecationWarning,
+ stacklevel=3,
+ )
+ if ssl is not None:
+ raise ValueError(
+ "verify_ssl, ssl_context, fingerprint and ssl "
+ "parameters are mutually exclusive"
+ )
+ else:
+ ssl = ssl_context
+ if fingerprint is not None:
+ warnings.warn(
+ "fingerprint is deprecated, " "use ssl=Fingerprint(fingerprint) instead",
+ DeprecationWarning,
+ stacklevel=3,
+ )
+ if ssl is not None:
+ raise ValueError(
+ "verify_ssl, ssl_context, fingerprint and ssl "
+ "parameters are mutually exclusive"
+ )
+ else:
+ ssl = Fingerprint(fingerprint)
+ if not isinstance(ssl, SSL_ALLOWED_TYPES):
+ raise TypeError(
+ "ssl should be SSLContext, bool, Fingerprint or None, "
+ "got {!r} instead.".format(ssl)
+ )
+ return ssl
+
+
+@attr.s(auto_attribs=True, slots=True, frozen=True)
+class ConnectionKey:
+ # the key should contain an information about used proxy / TLS
+ # to prevent reusing wrong connections from a pool
+ host: str
+ port: Optional[int]
+ is_ssl: bool
+ ssl: Union[SSLContext, None, bool, Fingerprint]
+ proxy: Optional[URL]
+ proxy_auth: Optional[BasicAuth]
+ proxy_headers_hash: Optional[int] # hash(CIMultiDict)
+
+
+def _is_expected_content_type(
+ response_content_type: str, expected_content_type: str
+) -> bool:
+ if expected_content_type == "application/json":
+ return json_re.match(response_content_type) is not None
+ return expected_content_type in response_content_type
+
+
+class ClientRequest:
+ GET_METHODS = {
+ hdrs.METH_GET,
+ hdrs.METH_HEAD,
+ hdrs.METH_OPTIONS,
+ hdrs.METH_TRACE,
+ }
+ POST_METHODS = {hdrs.METH_PATCH, hdrs.METH_POST, hdrs.METH_PUT}
+ ALL_METHODS = GET_METHODS.union(POST_METHODS).union({hdrs.METH_DELETE})
+
+ DEFAULT_HEADERS = {
+ hdrs.ACCEPT: "*/*",
+ hdrs.ACCEPT_ENCODING: "gzip, deflate",
+ }
+
+ body = b""
+ auth = None
+ response = None
+
+ _writer = None # async task for streaming data
+ _continue = None # waiter future for '100 Continue' response
+
+ # N.B.
+ # Adding __del__ method with self._writer closing doesn't make sense
+ # because _writer is instance method, thus it keeps a reference to self.
+ # Until writer has finished finalizer will not be called.
+
+ def __init__(
+ self,
+ method: str,
+ url: URL,
+ *,
+ params: Optional[Mapping[str, str]] = None,
+ headers: Optional[LooseHeaders] = None,
+ skip_auto_headers: Iterable[str] = frozenset(),
+ data: Any = None,
+ cookies: Optional[LooseCookies] = None,
+ auth: Optional[BasicAuth] = None,
+ version: http.HttpVersion = http.HttpVersion11,
+ compress: Optional[str] = None,
+ chunked: Optional[bool] = None,
+ expect100: bool = False,
+ loop: Optional[asyncio.AbstractEventLoop] = None,
+ response_class: Optional[Type["ClientResponse"]] = None,
+ proxy: Optional[URL] = None,
+ proxy_auth: Optional[BasicAuth] = None,
+ timer: Optional[BaseTimerContext] = None,
+ session: Optional["ClientSession"] = None,
+ ssl: Union[SSLContext, bool, Fingerprint, None] = None,
+ proxy_headers: Optional[LooseHeaders] = None,
+ traces: Optional[List["Trace"]] = None,
+ ):
+
+ if loop is None:
+ loop = asyncio.get_event_loop()
+
+ assert isinstance(url, URL), url
+ assert isinstance(proxy, (URL, type(None))), proxy
+ # FIXME: session is None in tests only, need to fix tests
+ # assert session is not None
+ self._session = cast("ClientSession", session)
+ if params:
+ q = MultiDict(url.query)
+ url2 = url.with_query(params)
+ q.extend(url2.query)
+ url = url.with_query(q)
+ self.original_url = url
+ self.url = url.with_fragment(None)
+ self.method = method.upper()
+ self.chunked = chunked
+ self.compress = compress
+ self.loop = loop
+ self.length = None
+ if response_class is None:
+ real_response_class = ClientResponse
+ else:
+ real_response_class = response_class
+ self.response_class = real_response_class # type: Type[ClientResponse]
+ self._timer = timer if timer is not None else TimerNoop()
+ self._ssl = ssl
+
+ if loop.get_debug():
+ self._source_traceback = traceback.extract_stack(sys._getframe(1))
+
+ self.update_version(version)
+ self.update_host(url)
+ self.update_headers(headers)
+ self.update_auto_headers(skip_auto_headers)
+ self.update_cookies(cookies)
+ self.update_content_encoding(data)
+ self.update_auth(auth)
+ self.update_proxy(proxy, proxy_auth, proxy_headers)
+
+ self.update_body_from_data(data)
+ if data or self.method not in self.GET_METHODS:
+ self.update_transfer_encoding()
+ self.update_expect_continue(expect100)
+ if traces is None:
+ traces = []
+ self._traces = traces
+
+ def is_ssl(self) -> bool:
+ return self.url.scheme in ("https", "wss")
+
+ @property
+ def ssl(self) -> Union["SSLContext", None, bool, Fingerprint]:
+ return self._ssl
+
+ @property
+ def connection_key(self) -> ConnectionKey:
+ proxy_headers = self.proxy_headers
+ if proxy_headers:
+ h = hash(
+ tuple((k, v) for k, v in proxy_headers.items())
+ ) # type: Optional[int]
+ else:
+ h = None
+ return ConnectionKey(
+ self.host,
+ self.port,
+ self.is_ssl(),
+ self.ssl,
+ self.proxy,
+ self.proxy_auth,
+ h,
+ )
+
+ @property
+ def host(self) -> str:
+ ret = self.url.raw_host
+ assert ret is not None
+ return ret
+
+ @property
+ def port(self) -> Optional[int]:
+ return self.url.port
+
+ @property
+ def request_info(self) -> RequestInfo:
+ headers = CIMultiDictProxy(self.headers) # type: CIMultiDictProxy[str]
+ return RequestInfo(self.url, self.method, headers, self.original_url)
+
+ def update_host(self, url: URL) -> None:
+ """Update destination host, port and connection type (ssl)."""
+ # get host/port
+ if not url.raw_host:
+ raise InvalidURL(url)
+
+ # basic auth info
+ username, password = url.user, url.password
+ if username:
+ self.auth = helpers.BasicAuth(username, password or "")
+
+ def update_version(self, version: Union[http.HttpVersion, str]) -> None:
+ """Convert request version to two elements tuple.
+
+ parser HTTP version '1.1' => (1, 1)
+ """
+ if isinstance(version, str):
+ v = [part.strip() for part in version.split(".", 1)]
+ try:
+ version = http.HttpVersion(int(v[0]), int(v[1]))
+ except ValueError:
+ raise ValueError(
+ f"Can not parse http version number: {version}"
+ ) from None
+ self.version = version
+
+ def update_headers(self, headers: Optional[LooseHeaders]) -> None:
+ """Update request headers."""
+ self.headers = CIMultiDict() # type: CIMultiDict[str]
+
+ # add host
+ netloc = cast(str, self.url.raw_host)
+ if helpers.is_ipv6_address(netloc):
+ netloc = f"[{netloc}]"
+ if self.url.port is not None and not self.url.is_default_port():
+ netloc += ":" + str(self.url.port)
+ self.headers[hdrs.HOST] = netloc
+
+ if headers:
+ if isinstance(headers, (dict, MultiDictProxy, MultiDict)):
+ headers = headers.items() # type: ignore
+
+ for key, value in headers: # type: ignore
+ # A special case for Host header
+ if key.lower() == "host":
+ self.headers[key] = value
+ else:
+ self.headers.add(key, value)
+
+ def update_auto_headers(self, skip_auto_headers: Iterable[str]) -> None:
+ self.skip_auto_headers = CIMultiDict(
+ (hdr, None) for hdr in sorted(skip_auto_headers)
+ )
+ used_headers = self.headers.copy()
+ used_headers.extend(self.skip_auto_headers) # type: ignore
+
+ for hdr, val in self.DEFAULT_HEADERS.items():
+ if hdr not in used_headers:
+ self.headers.add(hdr, val)
+
+ if hdrs.USER_AGENT not in used_headers:
+ self.headers[hdrs.USER_AGENT] = SERVER_SOFTWARE
+
+ def update_cookies(self, cookies: Optional[LooseCookies]) -> None:
+ """Update request cookies header."""
+ if not cookies:
+ return
+
+ c = SimpleCookie() # type: SimpleCookie[str]
+ if hdrs.COOKIE in self.headers:
+ c.load(self.headers.get(hdrs.COOKIE, ""))
+ del self.headers[hdrs.COOKIE]
+
+ if isinstance(cookies, Mapping):
+ iter_cookies = cookies.items()
+ else:
+ iter_cookies = cookies # type: ignore
+ for name, value in iter_cookies:
+ if isinstance(value, Morsel):
+ # Preserve coded_value
+ mrsl_val = value.get(value.key, Morsel())
+ mrsl_val.set(value.key, value.value, value.coded_value)
+ c[name] = mrsl_val
+ else:
+ c[name] = value # type: ignore
+
+ self.headers[hdrs.COOKIE] = c.output(header="", sep=";").strip()
+
+ def update_content_encoding(self, data: Any) -> None:
+ """Set request content encoding."""
+ if not data:
+ return
+
+ enc = self.headers.get(hdrs.CONTENT_ENCODING, "").lower()
+ if enc:
+ if self.compress:
+ raise ValueError(
+ "compress can not be set " "if Content-Encoding header is set"
+ )
+ elif self.compress:
+ if not isinstance(self.compress, str):
+ self.compress = "deflate"
+ self.headers[hdrs.CONTENT_ENCODING] = self.compress
+ self.chunked = True # enable chunked, no need to deal with length
+
+ def update_transfer_encoding(self) -> None:
+ """Analyze transfer-encoding header."""
+ te = self.headers.get(hdrs.TRANSFER_ENCODING, "").lower()
+
+ if "chunked" in te:
+ if self.chunked:
+ raise ValueError(
+ "chunked can not be set "
+ 'if "Transfer-Encoding: chunked" header is set'
+ )
+
+ elif self.chunked:
+ if hdrs.CONTENT_LENGTH in self.headers:
+ raise ValueError(
+ "chunked can not be set " "if Content-Length header is set"
+ )
+
+ self.headers[hdrs.TRANSFER_ENCODING] = "chunked"
+ else:
+ if hdrs.CONTENT_LENGTH not in self.headers:
+ self.headers[hdrs.CONTENT_LENGTH] = str(len(self.body))
+
+ def update_auth(self, auth: Optional[BasicAuth]) -> None:
+ """Set basic auth."""
+ if auth is None:
+ auth = self.auth
+ if auth is None:
+ return
+
+ if not isinstance(auth, helpers.BasicAuth):
+ raise TypeError("BasicAuth() tuple is required instead")
+
+ self.headers[hdrs.AUTHORIZATION] = auth.encode()
+
+ def update_body_from_data(self, body: Any) -> None:
+ if not body:
+ return
+
+ # FormData
+ if isinstance(body, FormData):
+ body = body()
+
+ try:
+ body = payload.PAYLOAD_REGISTRY.get(body, disposition=None)
+ except payload.LookupError:
+ body = FormData(body)()
+
+ self.body = body
+
+ # enable chunked encoding if needed
+ if not self.chunked:
+ if hdrs.CONTENT_LENGTH not in self.headers:
+ size = body.size
+ if size is None:
+ self.chunked = True
+ else:
+ if hdrs.CONTENT_LENGTH not in self.headers:
+ self.headers[hdrs.CONTENT_LENGTH] = str(size)
+
+ # copy payload headers
+ assert body.headers
+ for (key, value) in body.headers.items():
+ if key in self.headers:
+ continue
+ if key in self.skip_auto_headers:
+ continue
+ self.headers[key] = value
+
+ def update_expect_continue(self, expect: bool = False) -> None:
+ if expect:
+ self.headers[hdrs.EXPECT] = "100-continue"
+ elif self.headers.get(hdrs.EXPECT, "").lower() == "100-continue":
+ expect = True
+
+ if expect:
+ self._continue = self.loop.create_future()
+
+ def update_proxy(
+ self,
+ proxy: Optional[URL],
+ proxy_auth: Optional[BasicAuth],
+ proxy_headers: Optional[LooseHeaders],
+ ) -> None:
+ if proxy and not proxy.scheme == "http":
+ raise ValueError("Only http proxies are supported")
+ if proxy_auth and not isinstance(proxy_auth, helpers.BasicAuth):
+ raise ValueError("proxy_auth must be None or BasicAuth() tuple")
+ self.proxy = proxy
+ self.proxy_auth = proxy_auth
+ self.proxy_headers = proxy_headers
+
+ def keep_alive(self) -> bool:
+ if self.version < HttpVersion10:
+ # keep alive not supported at all
+ return False
+ if self.version == HttpVersion10:
+ if self.headers.get(hdrs.CONNECTION) == "keep-alive":
+ return True
+ else: # no headers means we close for Http 1.0
+ return False
+ elif self.headers.get(hdrs.CONNECTION) == "close":
+ return False
+
+ return True
+
+ async def write_bytes(
+ self, writer: AbstractStreamWriter, conn: "Connection"
+ ) -> None:
+ """Support coroutines that yields bytes objects."""
+ # 100 response
+ if self._continue is not None:
+ await writer.drain()
+ await self._continue
+
+ protocol = conn.protocol
+ assert protocol is not None
+ try:
+ if isinstance(self.body, payload.Payload):
+ await self.body.write(writer)
+ else:
+ if isinstance(self.body, (bytes, bytearray)):
+ self.body = (self.body,) # type: ignore
+
+ for chunk in self.body:
+ await writer.write(chunk) # type: ignore
+
+ await writer.write_eof()
+ except OSError as exc:
+ new_exc = ClientOSError(
+ exc.errno, "Can not write request body for %s" % self.url
+ )
+ new_exc.__context__ = exc
+ new_exc.__cause__ = exc
+ protocol.set_exception(new_exc)
+ except asyncio.CancelledError as exc:
+ if not conn.closed:
+ protocol.set_exception(exc)
+ except Exception as exc:
+ protocol.set_exception(exc)
+ finally:
+ self._writer = None
+
+ async def send(self, conn: "Connection") -> "ClientResponse":
+ # Specify request target:
+ # - CONNECT request must send authority form URI
+ # - not CONNECT proxy must send absolute form URI
+ # - most common is origin form URI
+ if self.method == hdrs.METH_CONNECT:
+ connect_host = self.url.raw_host
+ assert connect_host is not None
+ if helpers.is_ipv6_address(connect_host):
+ connect_host = f"[{connect_host}]"
+ path = f"{connect_host}:{self.url.port}"
+ elif self.proxy and not self.is_ssl():
+ path = str(self.url)
+ else:
+ path = self.url.raw_path
+ if self.url.raw_query_string:
+ path += "?" + self.url.raw_query_string
+
+ protocol = conn.protocol
+ assert protocol is not None
+ writer = StreamWriter(
+ protocol,
+ self.loop,
+ on_chunk_sent=functools.partial(
+ self._on_chunk_request_sent, self.method, self.url
+ ),
+ )
+
+ if self.compress:
+ writer.enable_compression(self.compress)
+
+ if self.chunked is not None:
+ writer.enable_chunking()
+
+ # set default content-type
+ if (
+ self.method in self.POST_METHODS
+ and hdrs.CONTENT_TYPE not in self.skip_auto_headers
+ and hdrs.CONTENT_TYPE not in self.headers
+ ):
+ self.headers[hdrs.CONTENT_TYPE] = "application/octet-stream"
+
+ # set the connection header
+ connection = self.headers.get(hdrs.CONNECTION)
+ if not connection:
+ if self.keep_alive():
+ if self.version == HttpVersion10:
+ connection = "keep-alive"
+ else:
+ if self.version == HttpVersion11:
+ connection = "close"
+
+ if connection is not None:
+ self.headers[hdrs.CONNECTION] = connection
+
+ # status + headers
+ status_line = "{0} {1} HTTP/{2[0]}.{2[1]}".format(
+ self.method, path, self.version
+ )
+ await writer.write_headers(status_line, self.headers)
+
+ self._writer = self.loop.create_task(self.write_bytes(writer, conn))
+
+ response_class = self.response_class
+ assert response_class is not None
+ self.response = response_class(
+ self.method,
+ self.original_url,
+ writer=self._writer,
+ continue100=self._continue,
+ timer=self._timer,
+ request_info=self.request_info,
+ traces=self._traces,
+ loop=self.loop,
+ session=self._session,
+ )
+ return self.response
+
+ async def close(self) -> None:
+ if self._writer is not None:
+ try:
+ await self._writer
+ finally:
+ self._writer = None
+
+ def terminate(self) -> None:
+ if self._writer is not None:
+ if not self.loop.is_closed():
+ self._writer.cancel()
+ self._writer = None
+
+ async def _on_chunk_request_sent(self, method: str, url: URL, chunk: bytes) -> None:
+ for trace in self._traces:
+ await trace.send_request_chunk_sent(method, url, chunk)
+
+
+class ClientResponse(HeadersMixin):
+
+ # from the Status-Line of the response
+ version = None # HTTP-Version
+ status = None # type: int # Status-Code
+ reason = None # Reason-Phrase
+
+ content = None # type: StreamReader # Payload stream
+ _headers = None # type: CIMultiDictProxy[str] # Response headers
+ _raw_headers = None # type: RawHeaders # Response raw headers
+
+ _connection = None # current connection
+ _source_traceback = None
+ # setted up by ClientRequest after ClientResponse object creation
+ # post-init stage allows to not change ctor signature
+ _closed = True # to allow __del__ for non-initialized properly response
+ _released = False
+
+ def __init__(
+ self,
+ method: str,
+ url: URL,
+ *,
+ writer: "asyncio.Task[None]",
+ continue100: Optional["asyncio.Future[bool]"],
+ timer: BaseTimerContext,
+ request_info: RequestInfo,
+ traces: List["Trace"],
+ loop: asyncio.AbstractEventLoop,
+ session: "ClientSession",
+ ) -> None:
+ assert isinstance(url, URL)
+
+ self.method = method
+ self.cookies = SimpleCookie() # type: SimpleCookie[str]
+
+ self._real_url = url
+ self._url = url.with_fragment(None)
+ self._body = None # type: Any
+ self._writer = writer # type: Optional[asyncio.Task[None]]
+ self._continue = continue100 # None by default
+ self._closed = True
+ self._history = () # type: Tuple[ClientResponse, ...]
+ self._request_info = request_info
+ self._timer = timer if timer is not None else TimerNoop()
+ self._cache = {} # type: Dict[str, Any]
+ self._traces = traces
+ self._loop = loop
+ # store a reference to session #1985
+ self._session = session # type: Optional[ClientSession]
+ if loop.get_debug():
+ self._source_traceback = traceback.extract_stack(sys._getframe(1))
+
+ @reify
+ def url(self) -> URL:
+ return self._url
+
+ @reify
+ def url_obj(self) -> URL:
+ warnings.warn("Deprecated, use .url #1654", DeprecationWarning, stacklevel=2)
+ return self._url
+
+ @reify
+ def real_url(self) -> URL:
+ return self._real_url
+
+ @reify
+ def host(self) -> str:
+ assert self._url.host is not None
+ return self._url.host
+
+ @reify
+ def headers(self) -> "CIMultiDictProxy[str]":
+ return self._headers
+
+ @reify
+ def raw_headers(self) -> RawHeaders:
+ return self._raw_headers
+
+ @reify
+ def request_info(self) -> RequestInfo:
+ return self._request_info
+
+ @reify
+ def content_disposition(self) -> Optional[ContentDisposition]:
+ raw = self._headers.get(hdrs.CONTENT_DISPOSITION)
+ if raw is None:
+ return None
+ disposition_type, params_dct = multipart.parse_content_disposition(raw)
+ params = MappingProxyType(params_dct)
+ filename = multipart.content_disposition_filename(params)
+ return ContentDisposition(disposition_type, params, filename)
+
+ def __del__(self, _warnings: Any = warnings) -> None:
+ if self._closed:
+ return
+
+ if self._connection is not None:
+ self._connection.release()
+ self._cleanup_writer()
+
+ if self._loop.get_debug():
+ if PY_36:
+ kwargs = {"source": self}
+ else:
+ kwargs = {}
+ _warnings.warn(f"Unclosed response {self!r}", ResourceWarning, **kwargs)
+ context = {"client_response": self, "message": "Unclosed response"}
+ if self._source_traceback:
+ context["source_traceback"] = self._source_traceback
+ self._loop.call_exception_handler(context)
+
+ def __repr__(self) -> str:
+ out = io.StringIO()
+ ascii_encodable_url = str(self.url)
+ if self.reason:
+ ascii_encodable_reason = self.reason.encode(
+ "ascii", "backslashreplace"
+ ).decode("ascii")
+ else:
+ ascii_encodable_reason = self.reason
+ print(
+ "<ClientResponse({}) [{} {}]>".format(
+ ascii_encodable_url, self.status, ascii_encodable_reason
+ ),
+ file=out,
+ )
+ print(self.headers, file=out)
+ return out.getvalue()
+
+ @property
+ def connection(self) -> Optional["Connection"]:
+ return self._connection
+
+ @reify
+ def history(self) -> Tuple["ClientResponse", ...]:
+ """A sequence of of responses, if redirects occurred."""
+ return self._history
+
+ @reify
+ def links(self) -> "MultiDictProxy[MultiDictProxy[Union[str, URL]]]":
+ links_str = ", ".join(self.headers.getall("link", []))
+
+ if not links_str:
+ return MultiDictProxy(MultiDict())
+
+ links = MultiDict() # type: MultiDict[MultiDictProxy[Union[str, URL]]]
+
+ for val in re.split(r",(?=\s*<)", links_str):
+ match = re.match(r"\s*<(.*)>(.*)", val)
+ if match is None: # pragma: no cover
+ # the check exists to suppress mypy error
+ continue
+ url, params_str = match.groups()
+ params = params_str.split(";")[1:]
+
+ link = MultiDict() # type: MultiDict[Union[str, URL]]
+
+ for param in params:
+ match = re.match(r"^\s*(\S*)\s*=\s*(['\"]?)(.*?)(\2)\s*$", param, re.M)
+ if match is None: # pragma: no cover
+ # the check exists to suppress mypy error
+ continue
+ key, _, value, _ = match.groups()
+
+ link.add(key, value)
+
+ key = link.get("rel", url) # type: ignore
+
+ link.add("url", self.url.join(URL(url)))
+
+ links.add(key, MultiDictProxy(link))
+
+ return MultiDictProxy(links)
+
+ async def start(self, connection: "Connection") -> "ClientResponse":
+ """Start response processing."""
+ self._closed = False
+ self._protocol = connection.protocol
+ self._connection = connection
+
+ with self._timer:
+ while True:
+ # read response
+ try:
+ message, payload = await self._protocol.read() # type: ignore
+ except http.HttpProcessingError as exc:
+ raise ClientResponseError(
+ self.request_info,
+ self.history,
+ status=exc.code,
+ message=exc.message,
+ headers=exc.headers,
+ ) from exc
+
+ if message.code < 100 or message.code > 199 or message.code == 101:
+ break
+
+ if self._continue is not None:
+ set_result(self._continue, True)
+ self._continue = None
+
+ # payload eof handler
+ payload.on_eof(self._response_eof)
+
+ # response status
+ self.version = message.version
+ self.status = message.code
+ self.reason = message.reason
+
+ # headers
+ self._headers = message.headers # type is CIMultiDictProxy
+ self._raw_headers = message.raw_headers # type is Tuple[bytes, bytes]
+
+ # payload
+ self.content = payload
+
+ # cookies
+ for hdr in self.headers.getall(hdrs.SET_COOKIE, ()):
+ try:
+ self.cookies.load(hdr)
+ except CookieError as exc:
+ client_logger.warning("Can not load response cookies: %s", exc)
+ return self
+
+ def _response_eof(self) -> None:
+ if self._closed:
+ return
+
+ if self._connection is not None:
+ # websocket, protocol could be None because
+ # connection could be detached
+ if (
+ self._connection.protocol is not None
+ and self._connection.protocol.upgraded
+ ):
+ return
+
+ self._connection.release()
+ self._connection = None
+
+ self._closed = True
+ self._cleanup_writer()
+
+ @property
+ def closed(self) -> bool:
+ return self._closed
+
+ def close(self) -> None:
+ if not self._released:
+ self._notify_content()
+ if self._closed:
+ return
+
+ self._closed = True
+ if self._loop is None or self._loop.is_closed():
+ return
+
+ if self._connection is not None:
+ self._connection.close()
+ self._connection = None
+ self._cleanup_writer()
+
+ def release(self) -> Any:
+ if not self._released:
+ self._notify_content()
+ if self._closed:
+ return noop()
+
+ self._closed = True
+ if self._connection is not None:
+ self._connection.release()
+ self._connection = None
+
+ self._cleanup_writer()
+ return noop()
+
+ @property
+ def ok(self) -> bool:
+ """Returns ``True`` if ``status`` is less than ``400``, ``False`` if not.
+
+ This is **not** a check for ``200 OK`` but a check that the response
+ status is under 400.
+ """
+ try:
+ self.raise_for_status()
+ except ClientResponseError:
+ return False
+ return True
+
+ def raise_for_status(self) -> None:
+ if 400 <= self.status:
+ # reason should always be not None for a started response
+ assert self.reason is not None
+ self.release()
+ raise ClientResponseError(
+ self.request_info,
+ self.history,
+ status=self.status,
+ message=self.reason,
+ headers=self.headers,
+ )
+
+ def _cleanup_writer(self) -> None:
+ if self._writer is not None:
+ self._writer.cancel()
+ self._writer = None
+ self._session = None
+
+ def _notify_content(self) -> None:
+ content = self.content
+ if content and content.exception() is None:
+ content.set_exception(ClientConnectionError("Connection closed"))
+ self._released = True
+
+ async def wait_for_close(self) -> None:
+ if self._writer is not None:
+ try:
+ await self._writer
+ finally:
+ self._writer = None
+ self.release()
+
+ async def read(self) -> bytes:
+ """Read response payload."""
+ if self._body is None:
+ try:
+ self._body = await self.content.read()
+ for trace in self._traces:
+ await trace.send_response_chunk_received(
+ self.method, self.url, self._body
+ )
+ except BaseException:
+ self.close()
+ raise
+ elif self._released:
+ raise ClientConnectionError("Connection closed")
+
+ return self._body
+
+ def get_encoding(self) -> str:
+ ctype = self.headers.get(hdrs.CONTENT_TYPE, "").lower()
+ mimetype = helpers.parse_mimetype(ctype)
+
+ encoding = mimetype.parameters.get("charset")
+ if encoding:
+ try:
+ codecs.lookup(encoding)
+ except LookupError:
+ encoding = None
+ if not encoding:
+ if mimetype.type == "application" and (
+ mimetype.subtype == "json" or mimetype.subtype == "rdap"
+ ):
+ # RFC 7159 states that the default encoding is UTF-8.
+ # RFC 7483 defines application/rdap+json
+ encoding = "utf-8"
+ elif self._body is None:
+ raise RuntimeError(
+ "Cannot guess the encoding of " "a not yet read body"
+ )
+ else:
+ encoding = chardet.detect(self._body)["encoding"]
+ if not encoding:
+ encoding = "utf-8"
+
+ return encoding
+
+ async def text(self, encoding: Optional[str] = None, errors: str = "strict") -> str:
+ """Read response payload and decode."""
+ if self._body is None:
+ await self.read()
+
+ if encoding is None:
+ encoding = self.get_encoding()
+
+ return self._body.decode(encoding, errors=errors) # type: ignore
+
+ async def json(
+ self,
+ *,
+ encoding: Optional[str] = None,
+ loads: JSONDecoder = DEFAULT_JSON_DECODER,
+ content_type: Optional[str] = "application/json",
+ ) -> Any:
+ """Read and decodes JSON response."""
+ if self._body is None:
+ await self.read()
+
+ if content_type:
+ ctype = self.headers.get(hdrs.CONTENT_TYPE, "").lower()
+ if not _is_expected_content_type(ctype, content_type):
+ raise ContentTypeError(
+ self.request_info,
+ self.history,
+ message=(
+ "Attempt to decode JSON with " "unexpected mimetype: %s" % ctype
+ ),
+ headers=self.headers,
+ )
+
+ stripped = self._body.strip() # type: ignore
+ if not stripped:
+ return None
+
+ if encoding is None:
+ encoding = self.get_encoding()
+
+ return loads(stripped.decode(encoding))
+
+ async def __aenter__(self) -> "ClientResponse":
+ return self
+
+ async def __aexit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ # similar to _RequestContextManager, we do not need to check
+ # for exceptions, response object can close connection
+ # if state is broken
+ self.release()
diff --git a/third_party/python/aiohttp/aiohttp/client_ws.py b/third_party/python/aiohttp/aiohttp/client_ws.py
new file mode 100644
index 0000000000..28fa371cce
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/client_ws.py
@@ -0,0 +1,301 @@
+"""WebSocket client for asyncio."""
+
+import asyncio
+from typing import Any, Optional
+
+import async_timeout
+
+from .client_exceptions import ClientError
+from .client_reqrep import ClientResponse
+from .helpers import call_later, set_result
+from .http import (
+ WS_CLOSED_MESSAGE,
+ WS_CLOSING_MESSAGE,
+ WebSocketError,
+ WSMessage,
+ WSMsgType,
+)
+from .http_websocket import WebSocketWriter # WSMessage
+from .streams import EofStream, FlowControlDataQueue
+from .typedefs import (
+ DEFAULT_JSON_DECODER,
+ DEFAULT_JSON_ENCODER,
+ JSONDecoder,
+ JSONEncoder,
+)
+
+
+class ClientWebSocketResponse:
+ def __init__(
+ self,
+ reader: "FlowControlDataQueue[WSMessage]",
+ writer: WebSocketWriter,
+ protocol: Optional[str],
+ response: ClientResponse,
+ timeout: float,
+ autoclose: bool,
+ autoping: bool,
+ loop: asyncio.AbstractEventLoop,
+ *,
+ receive_timeout: Optional[float] = None,
+ heartbeat: Optional[float] = None,
+ compress: int = 0,
+ client_notakeover: bool = False,
+ ) -> None:
+ self._response = response
+ self._conn = response.connection
+
+ self._writer = writer
+ self._reader = reader
+ self._protocol = protocol
+ self._closed = False
+ self._closing = False
+ self._close_code = None # type: Optional[int]
+ self._timeout = timeout
+ self._receive_timeout = receive_timeout
+ self._autoclose = autoclose
+ self._autoping = autoping
+ self._heartbeat = heartbeat
+ self._heartbeat_cb = None
+ if heartbeat is not None:
+ self._pong_heartbeat = heartbeat / 2.0
+ self._pong_response_cb = None
+ self._loop = loop
+ self._waiting = None # type: Optional[asyncio.Future[bool]]
+ self._exception = None # type: Optional[BaseException]
+ self._compress = compress
+ self._client_notakeover = client_notakeover
+
+ self._reset_heartbeat()
+
+ def _cancel_heartbeat(self) -> None:
+ if self._pong_response_cb is not None:
+ self._pong_response_cb.cancel()
+ self._pong_response_cb = None
+
+ if self._heartbeat_cb is not None:
+ self._heartbeat_cb.cancel()
+ self._heartbeat_cb = None
+
+ def _reset_heartbeat(self) -> None:
+ self._cancel_heartbeat()
+
+ if self._heartbeat is not None:
+ self._heartbeat_cb = call_later(
+ self._send_heartbeat, self._heartbeat, self._loop
+ )
+
+ def _send_heartbeat(self) -> None:
+ if self._heartbeat is not None and not self._closed:
+ # fire-and-forget a task is not perfect but maybe ok for
+ # sending ping. Otherwise we need a long-living heartbeat
+ # task in the class.
+ self._loop.create_task(self._writer.ping())
+
+ if self._pong_response_cb is not None:
+ self._pong_response_cb.cancel()
+ self._pong_response_cb = call_later(
+ self._pong_not_received, self._pong_heartbeat, self._loop
+ )
+
+ def _pong_not_received(self) -> None:
+ if not self._closed:
+ self._closed = True
+ self._close_code = 1006
+ self._exception = asyncio.TimeoutError()
+ self._response.close()
+
+ @property
+ def closed(self) -> bool:
+ return self._closed
+
+ @property
+ def close_code(self) -> Optional[int]:
+ return self._close_code
+
+ @property
+ def protocol(self) -> Optional[str]:
+ return self._protocol
+
+ @property
+ def compress(self) -> int:
+ return self._compress
+
+ @property
+ def client_notakeover(self) -> bool:
+ return self._client_notakeover
+
+ def get_extra_info(self, name: str, default: Any = None) -> Any:
+ """extra info from connection transport"""
+ conn = self._response.connection
+ if conn is None:
+ return default
+ transport = conn.transport
+ if transport is None:
+ return default
+ return transport.get_extra_info(name, default)
+
+ def exception(self) -> Optional[BaseException]:
+ return self._exception
+
+ async def ping(self, message: bytes = b"") -> None:
+ await self._writer.ping(message)
+
+ async def pong(self, message: bytes = b"") -> None:
+ await self._writer.pong(message)
+
+ async def send_str(self, data: str, compress: Optional[int] = None) -> None:
+ if not isinstance(data, str):
+ raise TypeError("data argument must be str (%r)" % type(data))
+ await self._writer.send(data, binary=False, compress=compress)
+
+ async def send_bytes(self, data: bytes, compress: Optional[int] = None) -> None:
+ if not isinstance(data, (bytes, bytearray, memoryview)):
+ raise TypeError("data argument must be byte-ish (%r)" % type(data))
+ await self._writer.send(data, binary=True, compress=compress)
+
+ async def send_json(
+ self,
+ data: Any,
+ compress: Optional[int] = None,
+ *,
+ dumps: JSONEncoder = DEFAULT_JSON_ENCODER,
+ ) -> None:
+ await self.send_str(dumps(data), compress=compress)
+
+ async def close(self, *, code: int = 1000, message: bytes = b"") -> bool:
+ # we need to break `receive()` cycle first,
+ # `close()` may be called from different task
+ if self._waiting is not None and not self._closed:
+ self._reader.feed_data(WS_CLOSING_MESSAGE, 0)
+ await self._waiting
+
+ if not self._closed:
+ self._cancel_heartbeat()
+ self._closed = True
+ try:
+ await self._writer.close(code, message)
+ except asyncio.CancelledError:
+ self._close_code = 1006
+ self._response.close()
+ raise
+ except Exception as exc:
+ self._close_code = 1006
+ self._exception = exc
+ self._response.close()
+ return True
+
+ if self._closing:
+ self._response.close()
+ return True
+
+ while True:
+ try:
+ with async_timeout.timeout(self._timeout, loop=self._loop):
+ msg = await self._reader.read()
+ except asyncio.CancelledError:
+ self._close_code = 1006
+ self._response.close()
+ raise
+ except Exception as exc:
+ self._close_code = 1006
+ self._exception = exc
+ self._response.close()
+ return True
+
+ if msg.type == WSMsgType.CLOSE:
+ self._close_code = msg.data
+ self._response.close()
+ return True
+ else:
+ return False
+
+ async def receive(self, timeout: Optional[float] = None) -> WSMessage:
+ while True:
+ if self._waiting is not None:
+ raise RuntimeError("Concurrent call to receive() is not allowed")
+
+ if self._closed:
+ return WS_CLOSED_MESSAGE
+ elif self._closing:
+ await self.close()
+ return WS_CLOSED_MESSAGE
+
+ try:
+ self._waiting = self._loop.create_future()
+ try:
+ with async_timeout.timeout(
+ timeout or self._receive_timeout, loop=self._loop
+ ):
+ msg = await self._reader.read()
+ self._reset_heartbeat()
+ finally:
+ waiter = self._waiting
+ self._waiting = None
+ set_result(waiter, True)
+ except (asyncio.CancelledError, asyncio.TimeoutError):
+ self._close_code = 1006
+ raise
+ except EofStream:
+ self._close_code = 1000
+ await self.close()
+ return WSMessage(WSMsgType.CLOSED, None, None)
+ except ClientError:
+ self._closed = True
+ self._close_code = 1006
+ return WS_CLOSED_MESSAGE
+ except WebSocketError as exc:
+ self._close_code = exc.code
+ await self.close(code=exc.code)
+ return WSMessage(WSMsgType.ERROR, exc, None)
+ except Exception as exc:
+ self._exception = exc
+ self._closing = True
+ self._close_code = 1006
+ await self.close()
+ return WSMessage(WSMsgType.ERROR, exc, None)
+
+ if msg.type == WSMsgType.CLOSE:
+ self._closing = True
+ self._close_code = msg.data
+ if not self._closed and self._autoclose:
+ await self.close()
+ elif msg.type == WSMsgType.CLOSING:
+ self._closing = True
+ elif msg.type == WSMsgType.PING and self._autoping:
+ await self.pong(msg.data)
+ continue
+ elif msg.type == WSMsgType.PONG and self._autoping:
+ continue
+
+ return msg
+
+ async def receive_str(self, *, timeout: Optional[float] = None) -> str:
+ msg = await self.receive(timeout)
+ if msg.type != WSMsgType.TEXT:
+ raise TypeError(f"Received message {msg.type}:{msg.data!r} is not str")
+ return msg.data
+
+ async def receive_bytes(self, *, timeout: Optional[float] = None) -> bytes:
+ msg = await self.receive(timeout)
+ if msg.type != WSMsgType.BINARY:
+ raise TypeError(f"Received message {msg.type}:{msg.data!r} is not bytes")
+ return msg.data
+
+ async def receive_json(
+ self,
+ *,
+ loads: JSONDecoder = DEFAULT_JSON_DECODER,
+ timeout: Optional[float] = None,
+ ) -> Any:
+ data = await self.receive_str(timeout=timeout)
+ return loads(data)
+
+ def __aiter__(self) -> "ClientWebSocketResponse":
+ return self
+
+ async def __anext__(self) -> WSMessage:
+ msg = await self.receive()
+ if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED):
+ raise StopAsyncIteration
+ return msg
diff --git a/third_party/python/aiohttp/aiohttp/connector.py b/third_party/python/aiohttp/aiohttp/connector.py
new file mode 100644
index 0000000000..748b22a422
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/connector.py
@@ -0,0 +1,1262 @@
+import asyncio
+import functools
+import random
+import sys
+import traceback
+import warnings
+from collections import defaultdict, deque
+from contextlib import suppress
+from http.cookies import SimpleCookie
+from itertools import cycle, islice
+from time import monotonic
+from types import TracebackType
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Awaitable,
+ Callable,
+ DefaultDict,
+ Dict,
+ Iterator,
+ List,
+ Optional,
+ Set,
+ Tuple,
+ Type,
+ Union,
+ cast,
+)
+
+import attr
+
+from . import hdrs, helpers
+from .abc import AbstractResolver
+from .client_exceptions import (
+ ClientConnectionError,
+ ClientConnectorCertificateError,
+ ClientConnectorError,
+ ClientConnectorSSLError,
+ ClientHttpProxyError,
+ ClientProxyConnectionError,
+ ServerFingerprintMismatch,
+ cert_errors,
+ ssl_errors,
+)
+from .client_proto import ResponseHandler
+from .client_reqrep import ClientRequest, Fingerprint, _merge_ssl_params
+from .helpers import PY_36, CeilTimeout, get_running_loop, is_ip_address, noop, sentinel
+from .http import RESPONSES
+from .locks import EventResultOrError
+from .resolver import DefaultResolver
+
+try:
+ import ssl
+
+ SSLContext = ssl.SSLContext
+except ImportError: # pragma: no cover
+ ssl = None # type: ignore
+ SSLContext = object # type: ignore
+
+
+__all__ = ("BaseConnector", "TCPConnector", "UnixConnector", "NamedPipeConnector")
+
+
+if TYPE_CHECKING: # pragma: no cover
+ from .client import ClientTimeout
+ from .client_reqrep import ConnectionKey
+ from .tracing import Trace
+
+
+class _DeprecationWaiter:
+ __slots__ = ("_awaitable", "_awaited")
+
+ def __init__(self, awaitable: Awaitable[Any]) -> None:
+ self._awaitable = awaitable
+ self._awaited = False
+
+ def __await__(self) -> Any:
+ self._awaited = True
+ return self._awaitable.__await__()
+
+ def __del__(self) -> None:
+ if not self._awaited:
+ warnings.warn(
+ "Connector.close() is a coroutine, "
+ "please use await connector.close()",
+ DeprecationWarning,
+ )
+
+
+class Connection:
+
+ _source_traceback = None
+ _transport = None
+
+ def __init__(
+ self,
+ connector: "BaseConnector",
+ key: "ConnectionKey",
+ protocol: ResponseHandler,
+ loop: asyncio.AbstractEventLoop,
+ ) -> None:
+ self._key = key
+ self._connector = connector
+ self._loop = loop
+ self._protocol = protocol # type: Optional[ResponseHandler]
+ self._callbacks = [] # type: List[Callable[[], None]]
+
+ if loop.get_debug():
+ self._source_traceback = traceback.extract_stack(sys._getframe(1))
+
+ def __repr__(self) -> str:
+ return f"Connection<{self._key}>"
+
+ def __del__(self, _warnings: Any = warnings) -> None:
+ if self._protocol is not None:
+ if PY_36:
+ kwargs = {"source": self}
+ else:
+ kwargs = {}
+ _warnings.warn(f"Unclosed connection {self!r}", ResourceWarning, **kwargs)
+ if self._loop.is_closed():
+ return
+
+ self._connector._release(self._key, self._protocol, should_close=True)
+
+ context = {"client_connection": self, "message": "Unclosed connection"}
+ if self._source_traceback is not None:
+ context["source_traceback"] = self._source_traceback
+ self._loop.call_exception_handler(context)
+
+ @property
+ def loop(self) -> asyncio.AbstractEventLoop:
+ warnings.warn(
+ "connector.loop property is deprecated", DeprecationWarning, stacklevel=2
+ )
+ return self._loop
+
+ @property
+ def transport(self) -> Optional[asyncio.Transport]:
+ if self._protocol is None:
+ return None
+ return self._protocol.transport
+
+ @property
+ def protocol(self) -> Optional[ResponseHandler]:
+ return self._protocol
+
+ def add_callback(self, callback: Callable[[], None]) -> None:
+ if callback is not None:
+ self._callbacks.append(callback)
+
+ def _notify_release(self) -> None:
+ callbacks, self._callbacks = self._callbacks[:], []
+
+ for cb in callbacks:
+ with suppress(Exception):
+ cb()
+
+ def close(self) -> None:
+ self._notify_release()
+
+ if self._protocol is not None:
+ self._connector._release(self._key, self._protocol, should_close=True)
+ self._protocol = None
+
+ def release(self) -> None:
+ self._notify_release()
+
+ if self._protocol is not None:
+ self._connector._release(
+ self._key, self._protocol, should_close=self._protocol.should_close
+ )
+ self._protocol = None
+
+ @property
+ def closed(self) -> bool:
+ return self._protocol is None or not self._protocol.is_connected()
+
+
+class _TransportPlaceholder:
+ """ placeholder for BaseConnector.connect function """
+
+ def close(self) -> None:
+ pass
+
+
+class BaseConnector:
+ """Base connector class.
+
+ keepalive_timeout - (optional) Keep-alive timeout.
+ force_close - Set to True to force close and do reconnect
+ after each request (and between redirects).
+ limit - The total number of simultaneous connections.
+ limit_per_host - Number of simultaneous connections to one host.
+ enable_cleanup_closed - Enables clean-up closed ssl transports.
+ Disabled by default.
+ loop - Optional event loop.
+ """
+
+ _closed = True # prevent AttributeError in __del__ if ctor was failed
+ _source_traceback = None
+
+ # abort transport after 2 seconds (cleanup broken connections)
+ _cleanup_closed_period = 2.0
+
+ def __init__(
+ self,
+ *,
+ keepalive_timeout: Union[object, None, float] = sentinel,
+ force_close: bool = False,
+ limit: int = 100,
+ limit_per_host: int = 0,
+ enable_cleanup_closed: bool = False,
+ loop: Optional[asyncio.AbstractEventLoop] = None,
+ ) -> None:
+
+ if force_close:
+ if keepalive_timeout is not None and keepalive_timeout is not sentinel:
+ raise ValueError(
+ "keepalive_timeout cannot " "be set if force_close is True"
+ )
+ else:
+ if keepalive_timeout is sentinel:
+ keepalive_timeout = 15.0
+
+ loop = get_running_loop(loop)
+
+ self._closed = False
+ if loop.get_debug():
+ self._source_traceback = traceback.extract_stack(sys._getframe(1))
+
+ self._conns = (
+ {}
+ ) # type: Dict[ConnectionKey, List[Tuple[ResponseHandler, float]]]
+ self._limit = limit
+ self._limit_per_host = limit_per_host
+ self._acquired = set() # type: Set[ResponseHandler]
+ self._acquired_per_host = defaultdict(
+ set
+ ) # type: DefaultDict[ConnectionKey, Set[ResponseHandler]]
+ self._keepalive_timeout = cast(float, keepalive_timeout)
+ self._force_close = force_close
+
+ # {host_key: FIFO list of waiters}
+ self._waiters = defaultdict(deque) # type: ignore
+
+ self._loop = loop
+ self._factory = functools.partial(ResponseHandler, loop=loop)
+
+ self.cookies = SimpleCookie() # type: SimpleCookie[str]
+
+ # start keep-alive connection cleanup task
+ self._cleanup_handle = None
+
+ # start cleanup closed transports task
+ self._cleanup_closed_handle = None
+ self._cleanup_closed_disabled = not enable_cleanup_closed
+ self._cleanup_closed_transports = [] # type: List[Optional[asyncio.Transport]]
+ self._cleanup_closed()
+
+ def __del__(self, _warnings: Any = warnings) -> None:
+ if self._closed:
+ return
+ if not self._conns:
+ return
+
+ conns = [repr(c) for c in self._conns.values()]
+
+ self._close()
+
+ if PY_36:
+ kwargs = {"source": self}
+ else:
+ kwargs = {}
+ _warnings.warn(f"Unclosed connector {self!r}", ResourceWarning, **kwargs)
+ context = {
+ "connector": self,
+ "connections": conns,
+ "message": "Unclosed connector",
+ }
+ if self._source_traceback is not None:
+ context["source_traceback"] = self._source_traceback
+ self._loop.call_exception_handler(context)
+
+ def __enter__(self) -> "BaseConnector":
+ warnings.warn(
+ '"witn Connector():" is deprecated, '
+ 'use "async with Connector():" instead',
+ DeprecationWarning,
+ )
+ return self
+
+ def __exit__(self, *exc: Any) -> None:
+ self.close()
+
+ async def __aenter__(self) -> "BaseConnector":
+ return self
+
+ async def __aexit__(
+ self,
+ exc_type: Optional[Type[BaseException]] = None,
+ exc_value: Optional[BaseException] = None,
+ exc_traceback: Optional[TracebackType] = None,
+ ) -> None:
+ await self.close()
+
+ @property
+ def force_close(self) -> bool:
+ """Ultimately close connection on releasing if True."""
+ return self._force_close
+
+ @property
+ def limit(self) -> int:
+ """The total number for simultaneous connections.
+
+ If limit is 0 the connector has no limit.
+ The default limit size is 100.
+ """
+ return self._limit
+
+ @property
+ def limit_per_host(self) -> int:
+ """The limit_per_host for simultaneous connections
+ to the same endpoint.
+
+ Endpoints are the same if they are have equal
+ (host, port, is_ssl) triple.
+
+ """
+ return self._limit_per_host
+
+ def _cleanup(self) -> None:
+ """Cleanup unused transports."""
+ if self._cleanup_handle:
+ self._cleanup_handle.cancel()
+ # _cleanup_handle should be unset, otherwise _release() will not
+ # recreate it ever!
+ self._cleanup_handle = None
+
+ now = self._loop.time()
+ timeout = self._keepalive_timeout
+
+ if self._conns:
+ connections = {}
+ deadline = now - timeout
+ for key, conns in self._conns.items():
+ alive = []
+ for proto, use_time in conns:
+ if proto.is_connected():
+ if use_time - deadline < 0:
+ transport = proto.transport
+ proto.close()
+ if key.is_ssl and not self._cleanup_closed_disabled:
+ self._cleanup_closed_transports.append(transport)
+ else:
+ alive.append((proto, use_time))
+ else:
+ transport = proto.transport
+ proto.close()
+ if key.is_ssl and not self._cleanup_closed_disabled:
+ self._cleanup_closed_transports.append(transport)
+
+ if alive:
+ connections[key] = alive
+
+ self._conns = connections
+
+ if self._conns:
+ self._cleanup_handle = helpers.weakref_handle(
+ self, "_cleanup", timeout, self._loop
+ )
+
+ def _drop_acquired_per_host(
+ self, key: "ConnectionKey", val: ResponseHandler
+ ) -> None:
+ acquired_per_host = self._acquired_per_host
+ if key not in acquired_per_host:
+ return
+ conns = acquired_per_host[key]
+ conns.remove(val)
+ if not conns:
+ del self._acquired_per_host[key]
+
+ def _cleanup_closed(self) -> None:
+ """Double confirmation for transport close.
+ Some broken ssl servers may leave socket open without proper close.
+ """
+ if self._cleanup_closed_handle:
+ self._cleanup_closed_handle.cancel()
+
+ for transport in self._cleanup_closed_transports:
+ if transport is not None:
+ transport.abort()
+
+ self._cleanup_closed_transports = []
+
+ if not self._cleanup_closed_disabled:
+ self._cleanup_closed_handle = helpers.weakref_handle(
+ self, "_cleanup_closed", self._cleanup_closed_period, self._loop
+ )
+
+ def close(self) -> Awaitable[None]:
+ """Close all opened transports."""
+ self._close()
+ return _DeprecationWaiter(noop())
+
+ def _close(self) -> None:
+ if self._closed:
+ return
+
+ self._closed = True
+
+ try:
+ if self._loop.is_closed():
+ return
+
+ # cancel cleanup task
+ if self._cleanup_handle:
+ self._cleanup_handle.cancel()
+
+ # cancel cleanup close task
+ if self._cleanup_closed_handle:
+ self._cleanup_closed_handle.cancel()
+
+ for data in self._conns.values():
+ for proto, t0 in data:
+ proto.close()
+
+ for proto in self._acquired:
+ proto.close()
+
+ for transport in self._cleanup_closed_transports:
+ if transport is not None:
+ transport.abort()
+
+ finally:
+ self._conns.clear()
+ self._acquired.clear()
+ self._waiters.clear()
+ self._cleanup_handle = None
+ self._cleanup_closed_transports.clear()
+ self._cleanup_closed_handle = None
+
+ @property
+ def closed(self) -> bool:
+ """Is connector closed.
+
+ A readonly property.
+ """
+ return self._closed
+
+ def _available_connections(self, key: "ConnectionKey") -> int:
+ """
+ Return number of available connections taking into account
+ the limit, limit_per_host and the connection key.
+
+ If it returns less than 1 means that there is no connections
+ availables.
+ """
+
+ if self._limit:
+ # total calc available connections
+ available = self._limit - len(self._acquired)
+
+ # check limit per host
+ if (
+ self._limit_per_host
+ and available > 0
+ and key in self._acquired_per_host
+ ):
+ acquired = self._acquired_per_host.get(key)
+ assert acquired is not None
+ available = self._limit_per_host - len(acquired)
+
+ elif self._limit_per_host and key in self._acquired_per_host:
+ # check limit per host
+ acquired = self._acquired_per_host.get(key)
+ assert acquired is not None
+ available = self._limit_per_host - len(acquired)
+ else:
+ available = 1
+
+ return available
+
+ async def connect(
+ self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
+ ) -> Connection:
+ """Get from pool or create new connection."""
+ key = req.connection_key
+ available = self._available_connections(key)
+
+ # Wait if there are no available connections or if there are/were
+ # waiters (i.e. don't steal connection from a waiter about to wake up)
+ if available <= 0 or key in self._waiters:
+ fut = self._loop.create_future()
+
+ # This connection will now count towards the limit.
+ self._waiters[key].append(fut)
+
+ if traces:
+ for trace in traces:
+ await trace.send_connection_queued_start()
+
+ try:
+ await fut
+ except BaseException as e:
+ if key in self._waiters:
+ # remove a waiter even if it was cancelled, normally it's
+ # removed when it's notified
+ try:
+ self._waiters[key].remove(fut)
+ except ValueError: # fut may no longer be in list
+ pass
+
+ raise e
+ finally:
+ if key in self._waiters and not self._waiters[key]:
+ del self._waiters[key]
+
+ if traces:
+ for trace in traces:
+ await trace.send_connection_queued_end()
+
+ proto = self._get(key)
+ if proto is None:
+ placeholder = cast(ResponseHandler, _TransportPlaceholder())
+ self._acquired.add(placeholder)
+ self._acquired_per_host[key].add(placeholder)
+
+ if traces:
+ for trace in traces:
+ await trace.send_connection_create_start()
+
+ try:
+ proto = await self._create_connection(req, traces, timeout)
+ if self._closed:
+ proto.close()
+ raise ClientConnectionError("Connector is closed.")
+ except BaseException:
+ if not self._closed:
+ self._acquired.remove(placeholder)
+ self._drop_acquired_per_host(key, placeholder)
+ self._release_waiter()
+ raise
+ else:
+ if not self._closed:
+ self._acquired.remove(placeholder)
+ self._drop_acquired_per_host(key, placeholder)
+
+ if traces:
+ for trace in traces:
+ await trace.send_connection_create_end()
+ else:
+ if traces:
+ for trace in traces:
+ await trace.send_connection_reuseconn()
+
+ self._acquired.add(proto)
+ self._acquired_per_host[key].add(proto)
+ return Connection(self, key, proto, self._loop)
+
+ def _get(self, key: "ConnectionKey") -> Optional[ResponseHandler]:
+ try:
+ conns = self._conns[key]
+ except KeyError:
+ return None
+
+ t1 = self._loop.time()
+ while conns:
+ proto, t0 = conns.pop()
+ if proto.is_connected():
+ if t1 - t0 > self._keepalive_timeout:
+ transport = proto.transport
+ proto.close()
+ # only for SSL transports
+ if key.is_ssl and not self._cleanup_closed_disabled:
+ self._cleanup_closed_transports.append(transport)
+ else:
+ if not conns:
+ # The very last connection was reclaimed: drop the key
+ del self._conns[key]
+ return proto
+ else:
+ transport = proto.transport
+ proto.close()
+ if key.is_ssl and not self._cleanup_closed_disabled:
+ self._cleanup_closed_transports.append(transport)
+
+ # No more connections: drop the key
+ del self._conns[key]
+ return None
+
+ def _release_waiter(self) -> None:
+ """
+ Iterates over all waiters till found one that is not finsihed and
+ belongs to a host that has available connections.
+ """
+ if not self._waiters:
+ return
+
+ # Having the dict keys ordered this avoids to iterate
+ # at the same order at each call.
+ queues = list(self._waiters.keys())
+ random.shuffle(queues)
+
+ for key in queues:
+ if self._available_connections(key) < 1:
+ continue
+
+ waiters = self._waiters[key]
+ while waiters:
+ waiter = waiters.popleft()
+ if not waiter.done():
+ waiter.set_result(None)
+ return
+
+ def _release_acquired(self, key: "ConnectionKey", proto: ResponseHandler) -> None:
+ if self._closed:
+ # acquired connection is already released on connector closing
+ return
+
+ try:
+ self._acquired.remove(proto)
+ self._drop_acquired_per_host(key, proto)
+ except KeyError: # pragma: no cover
+ # this may be result of undetermenistic order of objects
+ # finalization due garbage collection.
+ pass
+ else:
+ self._release_waiter()
+
+ def _release(
+ self,
+ key: "ConnectionKey",
+ protocol: ResponseHandler,
+ *,
+ should_close: bool = False,
+ ) -> None:
+ if self._closed:
+ # acquired connection is already released on connector closing
+ return
+
+ self._release_acquired(key, protocol)
+
+ if self._force_close:
+ should_close = True
+
+ if should_close or protocol.should_close:
+ transport = protocol.transport
+ protocol.close()
+
+ if key.is_ssl and not self._cleanup_closed_disabled:
+ self._cleanup_closed_transports.append(transport)
+ else:
+ conns = self._conns.get(key)
+ if conns is None:
+ conns = self._conns[key] = []
+ conns.append((protocol, self._loop.time()))
+
+ if self._cleanup_handle is None:
+ self._cleanup_handle = helpers.weakref_handle(
+ self, "_cleanup", self._keepalive_timeout, self._loop
+ )
+
+ async def _create_connection(
+ self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
+ ) -> ResponseHandler:
+ raise NotImplementedError()
+
+
+class _DNSCacheTable:
+ def __init__(self, ttl: Optional[float] = None) -> None:
+ self._addrs_rr = (
+ {}
+ ) # type: Dict[Tuple[str, int], Tuple[Iterator[Dict[str, Any]], int]]
+ self._timestamps = {} # type: Dict[Tuple[str, int], float]
+ self._ttl = ttl
+
+ def __contains__(self, host: object) -> bool:
+ return host in self._addrs_rr
+
+ def add(self, key: Tuple[str, int], addrs: List[Dict[str, Any]]) -> None:
+ self._addrs_rr[key] = (cycle(addrs), len(addrs))
+
+ if self._ttl:
+ self._timestamps[key] = monotonic()
+
+ def remove(self, key: Tuple[str, int]) -> None:
+ self._addrs_rr.pop(key, None)
+
+ if self._ttl:
+ self._timestamps.pop(key, None)
+
+ def clear(self) -> None:
+ self._addrs_rr.clear()
+ self._timestamps.clear()
+
+ def next_addrs(self, key: Tuple[str, int]) -> List[Dict[str, Any]]:
+ loop, length = self._addrs_rr[key]
+ addrs = list(islice(loop, length))
+ # Consume one more element to shift internal state of `cycle`
+ next(loop)
+ return addrs
+
+ def expired(self, key: Tuple[str, int]) -> bool:
+ if self._ttl is None:
+ return False
+
+ return self._timestamps[key] + self._ttl < monotonic()
+
+
+class TCPConnector(BaseConnector):
+ """TCP connector.
+
+ verify_ssl - Set to True to check ssl certifications.
+ fingerprint - Pass the binary sha256
+ digest of the expected certificate in DER format to verify
+ that the certificate the server presents matches. See also
+ https://en.wikipedia.org/wiki/Transport_Layer_Security#Certificate_pinning
+ resolver - Enable DNS lookups and use this
+ resolver
+ use_dns_cache - Use memory cache for DNS lookups.
+ ttl_dns_cache - Max seconds having cached a DNS entry, None forever.
+ family - socket address family
+ local_addr - local tuple of (host, port) to bind socket to
+
+ keepalive_timeout - (optional) Keep-alive timeout.
+ force_close - Set to True to force close and do reconnect
+ after each request (and between redirects).
+ limit - The total number of simultaneous connections.
+ limit_per_host - Number of simultaneous connections to one host.
+ enable_cleanup_closed - Enables clean-up closed ssl transports.
+ Disabled by default.
+ loop - Optional event loop.
+ """
+
+ def __init__(
+ self,
+ *,
+ verify_ssl: bool = True,
+ fingerprint: Optional[bytes] = None,
+ use_dns_cache: bool = True,
+ ttl_dns_cache: Optional[int] = 10,
+ family: int = 0,
+ ssl_context: Optional[SSLContext] = None,
+ ssl: Union[None, bool, Fingerprint, SSLContext] = None,
+ local_addr: Optional[Tuple[str, int]] = None,
+ resolver: Optional[AbstractResolver] = None,
+ keepalive_timeout: Union[None, float, object] = sentinel,
+ force_close: bool = False,
+ limit: int = 100,
+ limit_per_host: int = 0,
+ enable_cleanup_closed: bool = False,
+ loop: Optional[asyncio.AbstractEventLoop] = None,
+ ):
+ super().__init__(
+ keepalive_timeout=keepalive_timeout,
+ force_close=force_close,
+ limit=limit,
+ limit_per_host=limit_per_host,
+ enable_cleanup_closed=enable_cleanup_closed,
+ loop=loop,
+ )
+
+ self._ssl = _merge_ssl_params(ssl, verify_ssl, ssl_context, fingerprint)
+ if resolver is None:
+ resolver = DefaultResolver(loop=self._loop)
+ self._resolver = resolver
+
+ self._use_dns_cache = use_dns_cache
+ self._cached_hosts = _DNSCacheTable(ttl=ttl_dns_cache)
+ self._throttle_dns_events = (
+ {}
+ ) # type: Dict[Tuple[str, int], EventResultOrError]
+ self._family = family
+ self._local_addr = local_addr
+
+ def close(self) -> Awaitable[None]:
+ """Close all ongoing DNS calls."""
+ for ev in self._throttle_dns_events.values():
+ ev.cancel()
+
+ return super().close()
+
+ @property
+ def family(self) -> int:
+ """Socket family like AF_INET."""
+ return self._family
+
+ @property
+ def use_dns_cache(self) -> bool:
+ """True if local DNS caching is enabled."""
+ return self._use_dns_cache
+
+ def clear_dns_cache(
+ self, host: Optional[str] = None, port: Optional[int] = None
+ ) -> None:
+ """Remove specified host/port or clear all dns local cache."""
+ if host is not None and port is not None:
+ self._cached_hosts.remove((host, port))
+ elif host is not None or port is not None:
+ raise ValueError("either both host and port " "or none of them are allowed")
+ else:
+ self._cached_hosts.clear()
+
+ async def _resolve_host(
+ self, host: str, port: int, traces: Optional[List["Trace"]] = None
+ ) -> List[Dict[str, Any]]:
+ if is_ip_address(host):
+ return [
+ {
+ "hostname": host,
+ "host": host,
+ "port": port,
+ "family": self._family,
+ "proto": 0,
+ "flags": 0,
+ }
+ ]
+
+ if not self._use_dns_cache:
+
+ if traces:
+ for trace in traces:
+ await trace.send_dns_resolvehost_start(host)
+
+ res = await self._resolver.resolve(host, port, family=self._family)
+
+ if traces:
+ for trace in traces:
+ await trace.send_dns_resolvehost_end(host)
+
+ return res
+
+ key = (host, port)
+
+ if (key in self._cached_hosts) and (not self._cached_hosts.expired(key)):
+ # get result early, before any await (#4014)
+ result = self._cached_hosts.next_addrs(key)
+
+ if traces:
+ for trace in traces:
+ await trace.send_dns_cache_hit(host)
+ return result
+
+ if key in self._throttle_dns_events:
+ # get event early, before any await (#4014)
+ event = self._throttle_dns_events[key]
+ if traces:
+ for trace in traces:
+ await trace.send_dns_cache_hit(host)
+ await event.wait()
+ else:
+ # update dict early, before any await (#4014)
+ self._throttle_dns_events[key] = EventResultOrError(self._loop)
+ if traces:
+ for trace in traces:
+ await trace.send_dns_cache_miss(host)
+ try:
+
+ if traces:
+ for trace in traces:
+ await trace.send_dns_resolvehost_start(host)
+
+ addrs = await self._resolver.resolve(host, port, family=self._family)
+ if traces:
+ for trace in traces:
+ await trace.send_dns_resolvehost_end(host)
+
+ self._cached_hosts.add(key, addrs)
+ self._throttle_dns_events[key].set()
+ except BaseException as e:
+ # any DNS exception, independently of the implementation
+ # is set for the waiters to raise the same exception.
+ self._throttle_dns_events[key].set(exc=e)
+ raise
+ finally:
+ self._throttle_dns_events.pop(key)
+
+ return self._cached_hosts.next_addrs(key)
+
+ async def _create_connection(
+ self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
+ ) -> ResponseHandler:
+ """Create connection.
+
+ Has same keyword arguments as BaseEventLoop.create_connection.
+ """
+ if req.proxy:
+ _, proto = await self._create_proxy_connection(req, traces, timeout)
+ else:
+ _, proto = await self._create_direct_connection(req, traces, timeout)
+
+ return proto
+
+ @staticmethod
+ @functools.lru_cache(None)
+ def _make_ssl_context(verified: bool) -> SSLContext:
+ if verified:
+ return ssl.create_default_context()
+ else:
+ sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ sslcontext.options |= ssl.OP_NO_SSLv2
+ sslcontext.options |= ssl.OP_NO_SSLv3
+ try:
+ sslcontext.options |= ssl.OP_NO_COMPRESSION
+ except AttributeError as attr_err:
+ warnings.warn(
+ "{!s}: The Python interpreter is compiled "
+ "against OpenSSL < 1.0.0. Ref: "
+ "https://docs.python.org/3/library/ssl.html"
+ "#ssl.OP_NO_COMPRESSION".format(attr_err),
+ )
+ sslcontext.set_default_verify_paths()
+ return sslcontext
+
+ def _get_ssl_context(self, req: "ClientRequest") -> Optional[SSLContext]:
+ """Logic to get the correct SSL context
+
+ 0. if req.ssl is false, return None
+
+ 1. if ssl_context is specified in req, use it
+ 2. if _ssl_context is specified in self, use it
+ 3. otherwise:
+ 1. if verify_ssl is not specified in req, use self.ssl_context
+ (will generate a default context according to self.verify_ssl)
+ 2. if verify_ssl is True in req, generate a default SSL context
+ 3. if verify_ssl is False in req, generate a SSL context that
+ won't verify
+ """
+ if req.is_ssl():
+ if ssl is None: # pragma: no cover
+ raise RuntimeError("SSL is not supported.")
+ sslcontext = req.ssl
+ if isinstance(sslcontext, ssl.SSLContext):
+ return sslcontext
+ if sslcontext is not None:
+ # not verified or fingerprinted
+ return self._make_ssl_context(False)
+ sslcontext = self._ssl
+ if isinstance(sslcontext, ssl.SSLContext):
+ return sslcontext
+ if sslcontext is not None:
+ # not verified or fingerprinted
+ return self._make_ssl_context(False)
+ return self._make_ssl_context(True)
+ else:
+ return None
+
+ def _get_fingerprint(self, req: "ClientRequest") -> Optional["Fingerprint"]:
+ ret = req.ssl
+ if isinstance(ret, Fingerprint):
+ return ret
+ ret = self._ssl
+ if isinstance(ret, Fingerprint):
+ return ret
+ return None
+
+ async def _wrap_create_connection(
+ self,
+ *args: Any,
+ req: "ClientRequest",
+ timeout: "ClientTimeout",
+ client_error: Type[Exception] = ClientConnectorError,
+ **kwargs: Any,
+ ) -> Tuple[asyncio.Transport, ResponseHandler]:
+ try:
+ with CeilTimeout(timeout.sock_connect):
+ return await self._loop.create_connection(*args, **kwargs) # type: ignore # noqa
+ except cert_errors as exc:
+ raise ClientConnectorCertificateError(req.connection_key, exc) from exc
+ except ssl_errors as exc:
+ raise ClientConnectorSSLError(req.connection_key, exc) from exc
+ except OSError as exc:
+ raise client_error(req.connection_key, exc) from exc
+
+ async def _create_direct_connection(
+ self,
+ req: "ClientRequest",
+ traces: List["Trace"],
+ timeout: "ClientTimeout",
+ *,
+ client_error: Type[Exception] = ClientConnectorError,
+ ) -> Tuple[asyncio.Transport, ResponseHandler]:
+ sslcontext = self._get_ssl_context(req)
+ fingerprint = self._get_fingerprint(req)
+
+ host = req.url.raw_host
+ assert host is not None
+ port = req.port
+ assert port is not None
+ host_resolved = asyncio.ensure_future(
+ self._resolve_host(host, port, traces=traces), loop=self._loop
+ )
+ try:
+ # Cancelling this lookup should not cancel the underlying lookup
+ # or else the cancel event will get broadcast to all the waiters
+ # across all connections.
+ hosts = await asyncio.shield(host_resolved)
+ except asyncio.CancelledError:
+
+ def drop_exception(fut: "asyncio.Future[List[Dict[str, Any]]]") -> None:
+ with suppress(Exception, asyncio.CancelledError):
+ fut.result()
+
+ host_resolved.add_done_callback(drop_exception)
+ raise
+ except OSError as exc:
+ # in case of proxy it is not ClientProxyConnectionError
+ # it is problem of resolving proxy ip itself
+ raise ClientConnectorError(req.connection_key, exc) from exc
+
+ last_exc = None # type: Optional[Exception]
+
+ for hinfo in hosts:
+ host = hinfo["host"]
+ port = hinfo["port"]
+
+ try:
+ transp, proto = await self._wrap_create_connection(
+ self._factory,
+ host,
+ port,
+ timeout=timeout,
+ ssl=sslcontext,
+ family=hinfo["family"],
+ proto=hinfo["proto"],
+ flags=hinfo["flags"],
+ server_hostname=hinfo["hostname"] if sslcontext else None,
+ local_addr=self._local_addr,
+ req=req,
+ client_error=client_error,
+ )
+ except ClientConnectorError as exc:
+ last_exc = exc
+ continue
+
+ if req.is_ssl() and fingerprint:
+ try:
+ fingerprint.check(transp)
+ except ServerFingerprintMismatch as exc:
+ transp.close()
+ if not self._cleanup_closed_disabled:
+ self._cleanup_closed_transports.append(transp)
+ last_exc = exc
+ continue
+
+ return transp, proto
+ else:
+ assert last_exc is not None
+ raise last_exc
+
+ async def _create_proxy_connection(
+ self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
+ ) -> Tuple[asyncio.Transport, ResponseHandler]:
+ headers = {} # type: Dict[str, str]
+ if req.proxy_headers is not None:
+ headers = req.proxy_headers # type: ignore
+ headers[hdrs.HOST] = req.headers[hdrs.HOST]
+
+ url = req.proxy
+ assert url is not None
+ proxy_req = ClientRequest(
+ hdrs.METH_GET,
+ url,
+ headers=headers,
+ auth=req.proxy_auth,
+ loop=self._loop,
+ ssl=req.ssl,
+ )
+
+ # create connection to proxy server
+ transport, proto = await self._create_direct_connection(
+ proxy_req, [], timeout, client_error=ClientProxyConnectionError
+ )
+
+ # Many HTTP proxies has buggy keepalive support. Let's not
+ # reuse connection but close it after processing every
+ # response.
+ proto.force_close()
+
+ auth = proxy_req.headers.pop(hdrs.AUTHORIZATION, None)
+ if auth is not None:
+ if not req.is_ssl():
+ req.headers[hdrs.PROXY_AUTHORIZATION] = auth
+ else:
+ proxy_req.headers[hdrs.PROXY_AUTHORIZATION] = auth
+
+ if req.is_ssl():
+ sslcontext = self._get_ssl_context(req)
+ # For HTTPS requests over HTTP proxy
+ # we must notify proxy to tunnel connection
+ # so we send CONNECT command:
+ # CONNECT www.python.org:443 HTTP/1.1
+ # Host: www.python.org
+ #
+ # next we must do TLS handshake and so on
+ # to do this we must wrap raw socket into secure one
+ # asyncio handles this perfectly
+ proxy_req.method = hdrs.METH_CONNECT
+ proxy_req.url = req.url
+ key = attr.evolve(
+ req.connection_key, proxy=None, proxy_auth=None, proxy_headers_hash=None
+ )
+ conn = Connection(self, key, proto, self._loop)
+ proxy_resp = await proxy_req.send(conn)
+ try:
+ protocol = conn._protocol
+ assert protocol is not None
+ protocol.set_response_params()
+ resp = await proxy_resp.start(conn)
+ except BaseException:
+ proxy_resp.close()
+ conn.close()
+ raise
+ else:
+ conn._protocol = None
+ conn._transport = None
+ try:
+ if resp.status != 200:
+ message = resp.reason
+ if message is None:
+ message = RESPONSES[resp.status][0]
+ raise ClientHttpProxyError(
+ proxy_resp.request_info,
+ resp.history,
+ status=resp.status,
+ message=message,
+ headers=resp.headers,
+ )
+ rawsock = transport.get_extra_info("socket", default=None)
+ if rawsock is None:
+ raise RuntimeError("Transport does not expose socket instance")
+ # Duplicate the socket, so now we can close proxy transport
+ rawsock = rawsock.dup()
+ finally:
+ transport.close()
+
+ transport, proto = await self._wrap_create_connection(
+ self._factory,
+ timeout=timeout,
+ ssl=sslcontext,
+ sock=rawsock,
+ server_hostname=req.host,
+ req=req,
+ )
+ finally:
+ proxy_resp.close()
+
+ return transport, proto
+
+
+class UnixConnector(BaseConnector):
+ """Unix socket connector.
+
+ path - Unix socket path.
+ keepalive_timeout - (optional) Keep-alive timeout.
+ force_close - Set to True to force close and do reconnect
+ after each request (and between redirects).
+ limit - The total number of simultaneous connections.
+ limit_per_host - Number of simultaneous connections to one host.
+ loop - Optional event loop.
+ """
+
+ def __init__(
+ self,
+ path: str,
+ force_close: bool = False,
+ keepalive_timeout: Union[object, float, None] = sentinel,
+ limit: int = 100,
+ limit_per_host: int = 0,
+ loop: Optional[asyncio.AbstractEventLoop] = None,
+ ) -> None:
+ super().__init__(
+ force_close=force_close,
+ keepalive_timeout=keepalive_timeout,
+ limit=limit,
+ limit_per_host=limit_per_host,
+ loop=loop,
+ )
+ self._path = path
+
+ @property
+ def path(self) -> str:
+ """Path to unix socket."""
+ return self._path
+
+ async def _create_connection(
+ self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
+ ) -> ResponseHandler:
+ try:
+ with CeilTimeout(timeout.sock_connect):
+ _, proto = await self._loop.create_unix_connection(
+ self._factory, self._path
+ )
+ except OSError as exc:
+ raise ClientConnectorError(req.connection_key, exc) from exc
+
+ return cast(ResponseHandler, proto)
+
+
+class NamedPipeConnector(BaseConnector):
+ """Named pipe connector.
+
+ Only supported by the proactor event loop.
+ See also: https://docs.python.org/3.7/library/asyncio-eventloop.html
+
+ path - Windows named pipe path.
+ keepalive_timeout - (optional) Keep-alive timeout.
+ force_close - Set to True to force close and do reconnect
+ after each request (and between redirects).
+ limit - The total number of simultaneous connections.
+ limit_per_host - Number of simultaneous connections to one host.
+ loop - Optional event loop.
+ """
+
+ def __init__(
+ self,
+ path: str,
+ force_close: bool = False,
+ keepalive_timeout: Union[object, float, None] = sentinel,
+ limit: int = 100,
+ limit_per_host: int = 0,
+ loop: Optional[asyncio.AbstractEventLoop] = None,
+ ) -> None:
+ super().__init__(
+ force_close=force_close,
+ keepalive_timeout=keepalive_timeout,
+ limit=limit,
+ limit_per_host=limit_per_host,
+ loop=loop,
+ )
+ if not isinstance(self._loop, asyncio.ProactorEventLoop): # type: ignore
+ raise RuntimeError(
+ "Named Pipes only available in proactor " "loop under windows"
+ )
+ self._path = path
+
+ @property
+ def path(self) -> str:
+ """Path to the named pipe."""
+ return self._path
+
+ async def _create_connection(
+ self, req: "ClientRequest", traces: List["Trace"], timeout: "ClientTimeout"
+ ) -> ResponseHandler:
+ try:
+ with CeilTimeout(timeout.sock_connect):
+ _, proto = await self._loop.create_pipe_connection( # type: ignore
+ self._factory, self._path
+ )
+ # the drain is required so that the connection_made is called
+ # and transport is set otherwise it is not set before the
+ # `assert conn.transport is not None`
+ # in client.py's _request method
+ await asyncio.sleep(0)
+ # other option is to manually set transport like
+ # `proto.transport = trans`
+ except OSError as exc:
+ raise ClientConnectorError(req.connection_key, exc) from exc
+
+ return cast(ResponseHandler, proto)
diff --git a/third_party/python/aiohttp/aiohttp/cookiejar.py b/third_party/python/aiohttp/aiohttp/cookiejar.py
new file mode 100644
index 0000000000..b6b59d6289
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/cookiejar.py
@@ -0,0 +1,382 @@
+import asyncio
+import datetime
+import os # noqa
+import pathlib
+import pickle
+import re
+from collections import defaultdict
+from http.cookies import BaseCookie, Morsel, SimpleCookie
+from typing import ( # noqa
+ DefaultDict,
+ Dict,
+ Iterable,
+ Iterator,
+ Mapping,
+ Optional,
+ Set,
+ Tuple,
+ Union,
+ cast,
+)
+
+from yarl import URL
+
+from .abc import AbstractCookieJar
+from .helpers import is_ip_address, next_whole_second
+from .typedefs import LooseCookies, PathLike
+
+__all__ = ("CookieJar", "DummyCookieJar")
+
+
+CookieItem = Union[str, "Morsel[str]"]
+
+
+class CookieJar(AbstractCookieJar):
+ """Implements cookie storage adhering to RFC 6265."""
+
+ DATE_TOKENS_RE = re.compile(
+ r"[\x09\x20-\x2F\x3B-\x40\x5B-\x60\x7B-\x7E]*"
+ r"(?P<token>[\x00-\x08\x0A-\x1F\d:a-zA-Z\x7F-\xFF]+)"
+ )
+
+ DATE_HMS_TIME_RE = re.compile(r"(\d{1,2}):(\d{1,2}):(\d{1,2})")
+
+ DATE_DAY_OF_MONTH_RE = re.compile(r"(\d{1,2})")
+
+ DATE_MONTH_RE = re.compile(
+ "(jan)|(feb)|(mar)|(apr)|(may)|(jun)|(jul)|" "(aug)|(sep)|(oct)|(nov)|(dec)",
+ re.I,
+ )
+
+ DATE_YEAR_RE = re.compile(r"(\d{2,4})")
+
+ MAX_TIME = datetime.datetime.max.replace(tzinfo=datetime.timezone.utc)
+
+ MAX_32BIT_TIME = datetime.datetime.utcfromtimestamp(2 ** 31 - 1)
+
+ def __init__(
+ self,
+ *,
+ unsafe: bool = False,
+ quote_cookie: bool = True,
+ loop: Optional[asyncio.AbstractEventLoop] = None
+ ) -> None:
+ super().__init__(loop=loop)
+ self._cookies = defaultdict(
+ SimpleCookie
+ ) # type: DefaultDict[str, SimpleCookie[str]]
+ self._host_only_cookies = set() # type: Set[Tuple[str, str]]
+ self._unsafe = unsafe
+ self._quote_cookie = quote_cookie
+ self._next_expiration = next_whole_second()
+ self._expirations = {} # type: Dict[Tuple[str, str], datetime.datetime]
+ # #4515: datetime.max may not be representable on 32-bit platforms
+ self._max_time = self.MAX_TIME
+ try:
+ self._max_time.timestamp()
+ except OverflowError:
+ self._max_time = self.MAX_32BIT_TIME
+
+ def save(self, file_path: PathLike) -> None:
+ file_path = pathlib.Path(file_path)
+ with file_path.open(mode="wb") as f:
+ pickle.dump(self._cookies, f, pickle.HIGHEST_PROTOCOL)
+
+ def load(self, file_path: PathLike) -> None:
+ file_path = pathlib.Path(file_path)
+ with file_path.open(mode="rb") as f:
+ self._cookies = pickle.load(f)
+
+ def clear(self) -> None:
+ self._cookies.clear()
+ self._host_only_cookies.clear()
+ self._next_expiration = next_whole_second()
+ self._expirations.clear()
+
+ def __iter__(self) -> "Iterator[Morsel[str]]":
+ self._do_expiration()
+ for val in self._cookies.values():
+ yield from val.values()
+
+ def __len__(self) -> int:
+ return sum(1 for i in self)
+
+ def _do_expiration(self) -> None:
+ now = datetime.datetime.now(datetime.timezone.utc)
+ if self._next_expiration > now:
+ return
+ if not self._expirations:
+ return
+ next_expiration = self._max_time
+ to_del = []
+ cookies = self._cookies
+ expirations = self._expirations
+ for (domain, name), when in expirations.items():
+ if when <= now:
+ cookies[domain].pop(name, None)
+ to_del.append((domain, name))
+ self._host_only_cookies.discard((domain, name))
+ else:
+ next_expiration = min(next_expiration, when)
+ for key in to_del:
+ del expirations[key]
+
+ try:
+ self._next_expiration = next_expiration.replace(
+ microsecond=0
+ ) + datetime.timedelta(seconds=1)
+ except OverflowError:
+ self._next_expiration = self._max_time
+
+ def _expire_cookie(self, when: datetime.datetime, domain: str, name: str) -> None:
+ self._next_expiration = min(self._next_expiration, when)
+ self._expirations[(domain, name)] = when
+
+ def update_cookies(self, cookies: LooseCookies, response_url: URL = URL()) -> None:
+ """Update cookies."""
+ hostname = response_url.raw_host
+
+ if not self._unsafe and is_ip_address(hostname):
+ # Don't accept cookies from IPs
+ return
+
+ if isinstance(cookies, Mapping):
+ cookies = cookies.items()
+
+ for name, cookie in cookies:
+ if not isinstance(cookie, Morsel):
+ tmp = SimpleCookie() # type: SimpleCookie[str]
+ tmp[name] = cookie # type: ignore
+ cookie = tmp[name]
+
+ domain = cookie["domain"]
+
+ # ignore domains with trailing dots
+ if domain.endswith("."):
+ domain = ""
+ del cookie["domain"]
+
+ if not domain and hostname is not None:
+ # Set the cookie's domain to the response hostname
+ # and set its host-only-flag
+ self._host_only_cookies.add((hostname, name))
+ domain = cookie["domain"] = hostname
+
+ if domain.startswith("."):
+ # Remove leading dot
+ domain = domain[1:]
+ cookie["domain"] = domain
+
+ if hostname and not self._is_domain_match(domain, hostname):
+ # Setting cookies for different domains is not allowed
+ continue
+
+ path = cookie["path"]
+ if not path or not path.startswith("/"):
+ # Set the cookie's path to the response path
+ path = response_url.path
+ if not path.startswith("/"):
+ path = "/"
+ else:
+ # Cut everything from the last slash to the end
+ path = "/" + path[1 : path.rfind("/")]
+ cookie["path"] = path
+
+ max_age = cookie["max-age"]
+ if max_age:
+ try:
+ delta_seconds = int(max_age)
+ try:
+ max_age_expiration = datetime.datetime.now(
+ datetime.timezone.utc
+ ) + datetime.timedelta(seconds=delta_seconds)
+ except OverflowError:
+ max_age_expiration = self._max_time
+ self._expire_cookie(max_age_expiration, domain, name)
+ except ValueError:
+ cookie["max-age"] = ""
+
+ else:
+ expires = cookie["expires"]
+ if expires:
+ expire_time = self._parse_date(expires)
+ if expire_time:
+ self._expire_cookie(expire_time, domain, name)
+ else:
+ cookie["expires"] = ""
+
+ self._cookies[domain][name] = cookie
+
+ self._do_expiration()
+
+ def filter_cookies(
+ self, request_url: URL = URL()
+ ) -> Union["BaseCookie[str]", "SimpleCookie[str]"]:
+ """Returns this jar's cookies filtered by their attributes."""
+ self._do_expiration()
+ request_url = URL(request_url)
+ filtered: Union["SimpleCookie[str]", "BaseCookie[str]"] = (
+ SimpleCookie() if self._quote_cookie else BaseCookie()
+ )
+ hostname = request_url.raw_host or ""
+ is_not_secure = request_url.scheme not in ("https", "wss")
+
+ for cookie in self:
+ name = cookie.key
+ domain = cookie["domain"]
+
+ # Send shared cookies
+ if not domain:
+ filtered[name] = cookie.value
+ continue
+
+ if not self._unsafe and is_ip_address(hostname):
+ continue
+
+ if (domain, name) in self._host_only_cookies:
+ if domain != hostname:
+ continue
+ elif not self._is_domain_match(domain, hostname):
+ continue
+
+ if not self._is_path_match(request_url.path, cookie["path"]):
+ continue
+
+ if is_not_secure and cookie["secure"]:
+ continue
+
+ # It's critical we use the Morsel so the coded_value
+ # (based on cookie version) is preserved
+ mrsl_val = cast("Morsel[str]", cookie.get(cookie.key, Morsel()))
+ mrsl_val.set(cookie.key, cookie.value, cookie.coded_value)
+ filtered[name] = mrsl_val
+
+ return filtered
+
+ @staticmethod
+ def _is_domain_match(domain: str, hostname: str) -> bool:
+ """Implements domain matching adhering to RFC 6265."""
+ if hostname == domain:
+ return True
+
+ if not hostname.endswith(domain):
+ return False
+
+ non_matching = hostname[: -len(domain)]
+
+ if not non_matching.endswith("."):
+ return False
+
+ return not is_ip_address(hostname)
+
+ @staticmethod
+ def _is_path_match(req_path: str, cookie_path: str) -> bool:
+ """Implements path matching adhering to RFC 6265."""
+ if not req_path.startswith("/"):
+ req_path = "/"
+
+ if req_path == cookie_path:
+ return True
+
+ if not req_path.startswith(cookie_path):
+ return False
+
+ if cookie_path.endswith("/"):
+ return True
+
+ non_matching = req_path[len(cookie_path) :]
+
+ return non_matching.startswith("/")
+
+ @classmethod
+ def _parse_date(cls, date_str: str) -> Optional[datetime.datetime]:
+ """Implements date string parsing adhering to RFC 6265."""
+ if not date_str:
+ return None
+
+ found_time = False
+ found_day = False
+ found_month = False
+ found_year = False
+
+ hour = minute = second = 0
+ day = 0
+ month = 0
+ year = 0
+
+ for token_match in cls.DATE_TOKENS_RE.finditer(date_str):
+
+ token = token_match.group("token")
+
+ if not found_time:
+ time_match = cls.DATE_HMS_TIME_RE.match(token)
+ if time_match:
+ found_time = True
+ hour, minute, second = [int(s) for s in time_match.groups()]
+ continue
+
+ if not found_day:
+ day_match = cls.DATE_DAY_OF_MONTH_RE.match(token)
+ if day_match:
+ found_day = True
+ day = int(day_match.group())
+ continue
+
+ if not found_month:
+ month_match = cls.DATE_MONTH_RE.match(token)
+ if month_match:
+ found_month = True
+ assert month_match.lastindex is not None
+ month = month_match.lastindex
+ continue
+
+ if not found_year:
+ year_match = cls.DATE_YEAR_RE.match(token)
+ if year_match:
+ found_year = True
+ year = int(year_match.group())
+
+ if 70 <= year <= 99:
+ year += 1900
+ elif 0 <= year <= 69:
+ year += 2000
+
+ if False in (found_day, found_month, found_year, found_time):
+ return None
+
+ if not 1 <= day <= 31:
+ return None
+
+ if year < 1601 or hour > 23 or minute > 59 or second > 59:
+ return None
+
+ return datetime.datetime(
+ year, month, day, hour, minute, second, tzinfo=datetime.timezone.utc
+ )
+
+
+class DummyCookieJar(AbstractCookieJar):
+ """Implements a dummy cookie storage.
+
+ It can be used with the ClientSession when no cookie processing is needed.
+
+ """
+
+ def __init__(self, *, loop: Optional[asyncio.AbstractEventLoop] = None) -> None:
+ super().__init__(loop=loop)
+
+ def __iter__(self) -> "Iterator[Morsel[str]]":
+ while False:
+ yield None
+
+ def __len__(self) -> int:
+ return 0
+
+ def clear(self) -> None:
+ pass
+
+ def update_cookies(self, cookies: LooseCookies, response_url: URL = URL()) -> None:
+ pass
+
+ def filter_cookies(self, request_url: URL) -> "BaseCookie[str]":
+ return SimpleCookie()
diff --git a/third_party/python/aiohttp/aiohttp/formdata.py b/third_party/python/aiohttp/aiohttp/formdata.py
new file mode 100644
index 0000000000..900716b72a
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/formdata.py
@@ -0,0 +1,170 @@
+import io
+from typing import Any, Iterable, List, Optional
+from urllib.parse import urlencode
+
+from multidict import MultiDict, MultiDictProxy
+
+from . import hdrs, multipart, payload
+from .helpers import guess_filename
+from .payload import Payload
+
+__all__ = ("FormData",)
+
+
+class FormData:
+ """Helper class for multipart/form-data and
+ application/x-www-form-urlencoded body generation."""
+
+ def __init__(
+ self,
+ fields: Iterable[Any] = (),
+ quote_fields: bool = True,
+ charset: Optional[str] = None,
+ ) -> None:
+ self._writer = multipart.MultipartWriter("form-data")
+ self._fields = [] # type: List[Any]
+ self._is_multipart = False
+ self._is_processed = False
+ self._quote_fields = quote_fields
+ self._charset = charset
+
+ if isinstance(fields, dict):
+ fields = list(fields.items())
+ elif not isinstance(fields, (list, tuple)):
+ fields = (fields,)
+ self.add_fields(*fields)
+
+ @property
+ def is_multipart(self) -> bool:
+ return self._is_multipart
+
+ def add_field(
+ self,
+ name: str,
+ value: Any,
+ *,
+ content_type: Optional[str] = None,
+ filename: Optional[str] = None,
+ content_transfer_encoding: Optional[str] = None
+ ) -> None:
+
+ if isinstance(value, io.IOBase):
+ self._is_multipart = True
+ elif isinstance(value, (bytes, bytearray, memoryview)):
+ if filename is None and content_transfer_encoding is None:
+ filename = name
+
+ type_options = MultiDict({"name": name}) # type: MultiDict[str]
+ if filename is not None and not isinstance(filename, str):
+ raise TypeError(
+ "filename must be an instance of str. " "Got: %s" % filename
+ )
+ if filename is None and isinstance(value, io.IOBase):
+ filename = guess_filename(value, name)
+ if filename is not None:
+ type_options["filename"] = filename
+ self._is_multipart = True
+
+ headers = {}
+ if content_type is not None:
+ if not isinstance(content_type, str):
+ raise TypeError(
+ "content_type must be an instance of str. " "Got: %s" % content_type
+ )
+ headers[hdrs.CONTENT_TYPE] = content_type
+ self._is_multipart = True
+ if content_transfer_encoding is not None:
+ if not isinstance(content_transfer_encoding, str):
+ raise TypeError(
+ "content_transfer_encoding must be an instance"
+ " of str. Got: %s" % content_transfer_encoding
+ )
+ headers[hdrs.CONTENT_TRANSFER_ENCODING] = content_transfer_encoding
+ self._is_multipart = True
+
+ self._fields.append((type_options, headers, value))
+
+ def add_fields(self, *fields: Any) -> None:
+ to_add = list(fields)
+
+ while to_add:
+ rec = to_add.pop(0)
+
+ if isinstance(rec, io.IOBase):
+ k = guess_filename(rec, "unknown")
+ self.add_field(k, rec) # type: ignore
+
+ elif isinstance(rec, (MultiDictProxy, MultiDict)):
+ to_add.extend(rec.items())
+
+ elif isinstance(rec, (list, tuple)) and len(rec) == 2:
+ k, fp = rec
+ self.add_field(k, fp) # type: ignore
+
+ else:
+ raise TypeError(
+ "Only io.IOBase, multidict and (name, file) "
+ "pairs allowed, use .add_field() for passing "
+ "more complex parameters, got {!r}".format(rec)
+ )
+
+ def _gen_form_urlencoded(self) -> payload.BytesPayload:
+ # form data (x-www-form-urlencoded)
+ data = []
+ for type_options, _, value in self._fields:
+ data.append((type_options["name"], value))
+
+ charset = self._charset if self._charset is not None else "utf-8"
+
+ if charset == "utf-8":
+ content_type = "application/x-www-form-urlencoded"
+ else:
+ content_type = "application/x-www-form-urlencoded; " "charset=%s" % charset
+
+ return payload.BytesPayload(
+ urlencode(data, doseq=True, encoding=charset).encode(),
+ content_type=content_type,
+ )
+
+ def _gen_form_data(self) -> multipart.MultipartWriter:
+ """Encode a list of fields using the multipart/form-data MIME format"""
+ if self._is_processed:
+ raise RuntimeError("Form data has been processed already")
+ for dispparams, headers, value in self._fields:
+ try:
+ if hdrs.CONTENT_TYPE in headers:
+ part = payload.get_payload(
+ value,
+ content_type=headers[hdrs.CONTENT_TYPE],
+ headers=headers,
+ encoding=self._charset,
+ )
+ else:
+ part = payload.get_payload(
+ value, headers=headers, encoding=self._charset
+ )
+ except Exception as exc:
+ raise TypeError(
+ "Can not serialize value type: %r\n "
+ "headers: %r\n value: %r" % (type(value), headers, value)
+ ) from exc
+
+ if dispparams:
+ part.set_content_disposition(
+ "form-data", quote_fields=self._quote_fields, **dispparams
+ )
+ # FIXME cgi.FieldStorage doesn't likes body parts with
+ # Content-Length which were sent via chunked transfer encoding
+ assert part.headers is not None
+ part.headers.popall(hdrs.CONTENT_LENGTH, None)
+
+ self._writer.append_payload(part)
+
+ self._is_processed = True
+ return self._writer
+
+ def __call__(self) -> Payload:
+ if self._is_multipart:
+ return self._gen_form_data()
+ else:
+ return self._gen_form_urlencoded()
diff --git a/third_party/python/aiohttp/aiohttp/frozenlist.py b/third_party/python/aiohttp/aiohttp/frozenlist.py
new file mode 100644
index 0000000000..46b26108cf
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/frozenlist.py
@@ -0,0 +1,72 @@
+from collections.abc import MutableSequence
+from functools import total_ordering
+
+from .helpers import NO_EXTENSIONS
+
+
+@total_ordering
+class FrozenList(MutableSequence):
+
+ __slots__ = ("_frozen", "_items")
+
+ def __init__(self, items=None):
+ self._frozen = False
+ if items is not None:
+ items = list(items)
+ else:
+ items = []
+ self._items = items
+
+ @property
+ def frozen(self):
+ return self._frozen
+
+ def freeze(self):
+ self._frozen = True
+
+ def __getitem__(self, index):
+ return self._items[index]
+
+ def __setitem__(self, index, value):
+ if self._frozen:
+ raise RuntimeError("Cannot modify frozen list.")
+ self._items[index] = value
+
+ def __delitem__(self, index):
+ if self._frozen:
+ raise RuntimeError("Cannot modify frozen list.")
+ del self._items[index]
+
+ def __len__(self):
+ return self._items.__len__()
+
+ def __iter__(self):
+ return self._items.__iter__()
+
+ def __reversed__(self):
+ return self._items.__reversed__()
+
+ def __eq__(self, other):
+ return list(self) == other
+
+ def __le__(self, other):
+ return list(self) <= other
+
+ def insert(self, pos, item):
+ if self._frozen:
+ raise RuntimeError("Cannot modify frozen list.")
+ self._items.insert(pos, item)
+
+ def __repr__(self):
+ return f"<FrozenList(frozen={self._frozen}, {self._items!r})>"
+
+
+PyFrozenList = FrozenList
+
+try:
+ from aiohttp._frozenlist import FrozenList as CFrozenList # type: ignore
+
+ if not NO_EXTENSIONS:
+ FrozenList = CFrozenList # type: ignore
+except ImportError: # pragma: no cover
+ pass
diff --git a/third_party/python/aiohttp/aiohttp/frozenlist.pyi b/third_party/python/aiohttp/aiohttp/frozenlist.pyi
new file mode 100644
index 0000000000..72ab086715
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/frozenlist.pyi
@@ -0,0 +1,46 @@
+from typing import (
+ Generic,
+ Iterable,
+ Iterator,
+ List,
+ MutableSequence,
+ Optional,
+ TypeVar,
+ Union,
+ overload,
+)
+
+_T = TypeVar("_T")
+_Arg = Union[List[_T], Iterable[_T]]
+
+class FrozenList(MutableSequence[_T], Generic[_T]):
+ def __init__(self, items: Optional[_Arg[_T]] = ...) -> None: ...
+ @property
+ def frozen(self) -> bool: ...
+ def freeze(self) -> None: ...
+ @overload
+ def __getitem__(self, i: int) -> _T: ...
+ @overload
+ def __getitem__(self, s: slice) -> FrozenList[_T]: ...
+ @overload
+ def __setitem__(self, i: int, o: _T) -> None: ...
+ @overload
+ def __setitem__(self, s: slice, o: Iterable[_T]) -> None: ...
+ @overload
+ def __delitem__(self, i: int) -> None: ...
+ @overload
+ def __delitem__(self, i: slice) -> None: ...
+ def __len__(self) -> int: ...
+ def __iter__(self) -> Iterator[_T]: ...
+ def __reversed__(self) -> Iterator[_T]: ...
+ def __eq__(self, other: object) -> bool: ...
+ def __le__(self, other: FrozenList[_T]) -> bool: ...
+ def __ne__(self, other: object) -> bool: ...
+ def __lt__(self, other: FrozenList[_T]) -> bool: ...
+ def __ge__(self, other: FrozenList[_T]) -> bool: ...
+ def __gt__(self, other: FrozenList[_T]) -> bool: ...
+ def insert(self, pos: int, item: _T) -> None: ...
+ def __repr__(self) -> str: ...
+
+# types for C accelerators are the same
+CFrozenList = PyFrozenList = FrozenList
diff --git a/third_party/python/aiohttp/aiohttp/hdrs.py b/third_party/python/aiohttp/aiohttp/hdrs.py
new file mode 100644
index 0000000000..f04a5457f9
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/hdrs.py
@@ -0,0 +1,108 @@
+"""HTTP Headers constants."""
+
+# After changing the file content call ./tools/gen.py
+# to regenerate the headers parser
+
+from multidict import istr
+
+METH_ANY = "*"
+METH_CONNECT = "CONNECT"
+METH_HEAD = "HEAD"
+METH_GET = "GET"
+METH_DELETE = "DELETE"
+METH_OPTIONS = "OPTIONS"
+METH_PATCH = "PATCH"
+METH_POST = "POST"
+METH_PUT = "PUT"
+METH_TRACE = "TRACE"
+
+METH_ALL = {
+ METH_CONNECT,
+ METH_HEAD,
+ METH_GET,
+ METH_DELETE,
+ METH_OPTIONS,
+ METH_PATCH,
+ METH_POST,
+ METH_PUT,
+ METH_TRACE,
+}
+
+
+ACCEPT = istr("Accept")
+ACCEPT_CHARSET = istr("Accept-Charset")
+ACCEPT_ENCODING = istr("Accept-Encoding")
+ACCEPT_LANGUAGE = istr("Accept-Language")
+ACCEPT_RANGES = istr("Accept-Ranges")
+ACCESS_CONTROL_MAX_AGE = istr("Access-Control-Max-Age")
+ACCESS_CONTROL_ALLOW_CREDENTIALS = istr("Access-Control-Allow-Credentials")
+ACCESS_CONTROL_ALLOW_HEADERS = istr("Access-Control-Allow-Headers")
+ACCESS_CONTROL_ALLOW_METHODS = istr("Access-Control-Allow-Methods")
+ACCESS_CONTROL_ALLOW_ORIGIN = istr("Access-Control-Allow-Origin")
+ACCESS_CONTROL_EXPOSE_HEADERS = istr("Access-Control-Expose-Headers")
+ACCESS_CONTROL_REQUEST_HEADERS = istr("Access-Control-Request-Headers")
+ACCESS_CONTROL_REQUEST_METHOD = istr("Access-Control-Request-Method")
+AGE = istr("Age")
+ALLOW = istr("Allow")
+AUTHORIZATION = istr("Authorization")
+CACHE_CONTROL = istr("Cache-Control")
+CONNECTION = istr("Connection")
+CONTENT_DISPOSITION = istr("Content-Disposition")
+CONTENT_ENCODING = istr("Content-Encoding")
+CONTENT_LANGUAGE = istr("Content-Language")
+CONTENT_LENGTH = istr("Content-Length")
+CONTENT_LOCATION = istr("Content-Location")
+CONTENT_MD5 = istr("Content-MD5")
+CONTENT_RANGE = istr("Content-Range")
+CONTENT_TRANSFER_ENCODING = istr("Content-Transfer-Encoding")
+CONTENT_TYPE = istr("Content-Type")
+COOKIE = istr("Cookie")
+DATE = istr("Date")
+DESTINATION = istr("Destination")
+DIGEST = istr("Digest")
+ETAG = istr("Etag")
+EXPECT = istr("Expect")
+EXPIRES = istr("Expires")
+FORWARDED = istr("Forwarded")
+FROM = istr("From")
+HOST = istr("Host")
+IF_MATCH = istr("If-Match")
+IF_MODIFIED_SINCE = istr("If-Modified-Since")
+IF_NONE_MATCH = istr("If-None-Match")
+IF_RANGE = istr("If-Range")
+IF_UNMODIFIED_SINCE = istr("If-Unmodified-Since")
+KEEP_ALIVE = istr("Keep-Alive")
+LAST_EVENT_ID = istr("Last-Event-ID")
+LAST_MODIFIED = istr("Last-Modified")
+LINK = istr("Link")
+LOCATION = istr("Location")
+MAX_FORWARDS = istr("Max-Forwards")
+ORIGIN = istr("Origin")
+PRAGMA = istr("Pragma")
+PROXY_AUTHENTICATE = istr("Proxy-Authenticate")
+PROXY_AUTHORIZATION = istr("Proxy-Authorization")
+RANGE = istr("Range")
+REFERER = istr("Referer")
+RETRY_AFTER = istr("Retry-After")
+SEC_WEBSOCKET_ACCEPT = istr("Sec-WebSocket-Accept")
+SEC_WEBSOCKET_VERSION = istr("Sec-WebSocket-Version")
+SEC_WEBSOCKET_PROTOCOL = istr("Sec-WebSocket-Protocol")
+SEC_WEBSOCKET_EXTENSIONS = istr("Sec-WebSocket-Extensions")
+SEC_WEBSOCKET_KEY = istr("Sec-WebSocket-Key")
+SEC_WEBSOCKET_KEY1 = istr("Sec-WebSocket-Key1")
+SERVER = istr("Server")
+SET_COOKIE = istr("Set-Cookie")
+TE = istr("TE")
+TRAILER = istr("Trailer")
+TRANSFER_ENCODING = istr("Transfer-Encoding")
+UPGRADE = istr("Upgrade")
+URI = istr("URI")
+USER_AGENT = istr("User-Agent")
+VARY = istr("Vary")
+VIA = istr("Via")
+WANT_DIGEST = istr("Want-Digest")
+WARNING = istr("Warning")
+WWW_AUTHENTICATE = istr("WWW-Authenticate")
+X_FORWARDED_FOR = istr("X-Forwarded-For")
+X_FORWARDED_HOST = istr("X-Forwarded-Host")
+X_FORWARDED_PROTO = istr("X-Forwarded-Proto")
diff --git a/third_party/python/aiohttp/aiohttp/helpers.py b/third_party/python/aiohttp/aiohttp/helpers.py
new file mode 100644
index 0000000000..bbf5f1298f
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/helpers.py
@@ -0,0 +1,780 @@
+"""Various helper functions"""
+
+import asyncio
+import base64
+import binascii
+import cgi
+import datetime
+import functools
+import inspect
+import netrc
+import os
+import platform
+import re
+import sys
+import time
+import warnings
+import weakref
+from collections import namedtuple
+from contextlib import suppress
+from math import ceil
+from pathlib import Path
+from types import TracebackType
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Generator,
+ Generic,
+ Iterable,
+ Iterator,
+ List,
+ Mapping,
+ Optional,
+ Pattern,
+ Set,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+ cast,
+)
+from urllib.parse import quote
+from urllib.request import getproxies
+
+import async_timeout
+import attr
+from multidict import MultiDict, MultiDictProxy
+from typing_extensions import Protocol
+from yarl import URL
+
+from . import hdrs
+from .log import client_logger, internal_logger
+from .typedefs import PathLike # noqa
+
+__all__ = ("BasicAuth", "ChainMapProxy")
+
+PY_36 = sys.version_info >= (3, 6)
+PY_37 = sys.version_info >= (3, 7)
+PY_38 = sys.version_info >= (3, 8)
+
+if not PY_37:
+ import idna_ssl
+
+ idna_ssl.patch_match_hostname()
+
+try:
+ from typing import ContextManager
+except ImportError:
+ from typing_extensions import ContextManager
+
+
+def all_tasks(
+ loop: Optional[asyncio.AbstractEventLoop] = None,
+) -> Set["asyncio.Task[Any]"]:
+ tasks = list(asyncio.Task.all_tasks(loop))
+ return {t for t in tasks if not t.done()}
+
+
+if PY_37:
+ all_tasks = getattr(asyncio, "all_tasks")
+
+
+_T = TypeVar("_T")
+_S = TypeVar("_S")
+
+
+sentinel = object() # type: Any
+NO_EXTENSIONS = bool(os.environ.get("AIOHTTP_NO_EXTENSIONS")) # type: bool
+
+# N.B. sys.flags.dev_mode is available on Python 3.7+, use getattr
+# for compatibility with older versions
+DEBUG = getattr(sys.flags, "dev_mode", False) or (
+ not sys.flags.ignore_environment and bool(os.environ.get("PYTHONASYNCIODEBUG"))
+) # type: bool
+
+
+CHAR = {chr(i) for i in range(0, 128)}
+CTL = {chr(i) for i in range(0, 32)} | {
+ chr(127),
+}
+SEPARATORS = {
+ "(",
+ ")",
+ "<",
+ ">",
+ "@",
+ ",",
+ ";",
+ ":",
+ "\\",
+ '"',
+ "/",
+ "[",
+ "]",
+ "?",
+ "=",
+ "{",
+ "}",
+ " ",
+ chr(9),
+}
+TOKEN = CHAR ^ CTL ^ SEPARATORS
+
+
+class noop:
+ def __await__(self) -> Generator[None, None, None]:
+ yield
+
+
+class BasicAuth(namedtuple("BasicAuth", ["login", "password", "encoding"])):
+ """Http basic authentication helper."""
+
+ def __new__(
+ cls, login: str, password: str = "", encoding: str = "latin1"
+ ) -> "BasicAuth":
+ if login is None:
+ raise ValueError("None is not allowed as login value")
+
+ if password is None:
+ raise ValueError("None is not allowed as password value")
+
+ if ":" in login:
+ raise ValueError('A ":" is not allowed in login (RFC 1945#section-11.1)')
+
+ return super().__new__(cls, login, password, encoding)
+
+ @classmethod
+ def decode(cls, auth_header: str, encoding: str = "latin1") -> "BasicAuth":
+ """Create a BasicAuth object from an Authorization HTTP header."""
+ try:
+ auth_type, encoded_credentials = auth_header.split(" ", 1)
+ except ValueError:
+ raise ValueError("Could not parse authorization header.")
+
+ if auth_type.lower() != "basic":
+ raise ValueError("Unknown authorization method %s" % auth_type)
+
+ try:
+ decoded = base64.b64decode(
+ encoded_credentials.encode("ascii"), validate=True
+ ).decode(encoding)
+ except binascii.Error:
+ raise ValueError("Invalid base64 encoding.")
+
+ try:
+ # RFC 2617 HTTP Authentication
+ # https://www.ietf.org/rfc/rfc2617.txt
+ # the colon must be present, but the username and password may be
+ # otherwise blank.
+ username, password = decoded.split(":", 1)
+ except ValueError:
+ raise ValueError("Invalid credentials.")
+
+ return cls(username, password, encoding=encoding)
+
+ @classmethod
+ def from_url(cls, url: URL, *, encoding: str = "latin1") -> Optional["BasicAuth"]:
+ """Create BasicAuth from url."""
+ if not isinstance(url, URL):
+ raise TypeError("url should be yarl.URL instance")
+ if url.user is None:
+ return None
+ return cls(url.user, url.password or "", encoding=encoding)
+
+ def encode(self) -> str:
+ """Encode credentials."""
+ creds = (f"{self.login}:{self.password}").encode(self.encoding)
+ return "Basic %s" % base64.b64encode(creds).decode(self.encoding)
+
+
+def strip_auth_from_url(url: URL) -> Tuple[URL, Optional[BasicAuth]]:
+ auth = BasicAuth.from_url(url)
+ if auth is None:
+ return url, None
+ else:
+ return url.with_user(None), auth
+
+
+def netrc_from_env() -> Optional[netrc.netrc]:
+ """Attempt to load the netrc file from the path specified by the env-var
+ NETRC or in the default location in the user's home directory.
+
+ Returns None if it couldn't be found or fails to parse.
+ """
+ netrc_env = os.environ.get("NETRC")
+
+ if netrc_env is not None:
+ netrc_path = Path(netrc_env)
+ else:
+ try:
+ home_dir = Path.home()
+ except RuntimeError as e: # pragma: no cover
+ # if pathlib can't resolve home, it may raise a RuntimeError
+ client_logger.debug(
+ "Could not resolve home directory when "
+ "trying to look for .netrc file: %s",
+ e,
+ )
+ return None
+
+ netrc_path = home_dir / (
+ "_netrc" if platform.system() == "Windows" else ".netrc"
+ )
+
+ try:
+ return netrc.netrc(str(netrc_path))
+ except netrc.NetrcParseError as e:
+ client_logger.warning("Could not parse .netrc file: %s", e)
+ except OSError as e:
+ # we couldn't read the file (doesn't exist, permissions, etc.)
+ if netrc_env or netrc_path.is_file():
+ # only warn if the environment wanted us to load it,
+ # or it appears like the default file does actually exist
+ client_logger.warning("Could not read .netrc file: %s", e)
+
+ return None
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class ProxyInfo:
+ proxy: URL
+ proxy_auth: Optional[BasicAuth]
+
+
+def proxies_from_env() -> Dict[str, ProxyInfo]:
+ proxy_urls = {k: URL(v) for k, v in getproxies().items() if k in ("http", "https")}
+ netrc_obj = netrc_from_env()
+ stripped = {k: strip_auth_from_url(v) for k, v in proxy_urls.items()}
+ ret = {}
+ for proto, val in stripped.items():
+ proxy, auth = val
+ if proxy.scheme == "https":
+ client_logger.warning("HTTPS proxies %s are not supported, ignoring", proxy)
+ continue
+ if netrc_obj and auth is None:
+ auth_from_netrc = None
+ if proxy.host is not None:
+ auth_from_netrc = netrc_obj.authenticators(proxy.host)
+ if auth_from_netrc is not None:
+ # auth_from_netrc is a (`user`, `account`, `password`) tuple,
+ # `user` and `account` both can be username,
+ # if `user` is None, use `account`
+ *logins, password = auth_from_netrc
+ login = logins[0] if logins[0] else logins[-1]
+ auth = BasicAuth(cast(str, login), cast(str, password))
+ ret[proto] = ProxyInfo(proxy, auth)
+ return ret
+
+
+def current_task(
+ loop: Optional[asyncio.AbstractEventLoop] = None,
+) -> "Optional[asyncio.Task[Any]]":
+ if PY_37:
+ return asyncio.current_task(loop=loop)
+ else:
+ return asyncio.Task.current_task(loop=loop)
+
+
+def get_running_loop(
+ loop: Optional[asyncio.AbstractEventLoop] = None,
+) -> asyncio.AbstractEventLoop:
+ if loop is None:
+ loop = asyncio.get_event_loop()
+ if not loop.is_running():
+ warnings.warn(
+ "The object should be created within an async function",
+ DeprecationWarning,
+ stacklevel=3,
+ )
+ if loop.get_debug():
+ internal_logger.warning(
+ "The object should be created within an async function", stack_info=True
+ )
+ return loop
+
+
+def isasyncgenfunction(obj: Any) -> bool:
+ func = getattr(inspect, "isasyncgenfunction", None)
+ if func is not None:
+ return func(obj)
+ else:
+ return False
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class MimeType:
+ type: str
+ subtype: str
+ suffix: str
+ parameters: "MultiDictProxy[str]"
+
+
+@functools.lru_cache(maxsize=56)
+def parse_mimetype(mimetype: str) -> MimeType:
+ """Parses a MIME type into its components.
+
+ mimetype is a MIME type string.
+
+ Returns a MimeType object.
+
+ Example:
+
+ >>> parse_mimetype('text/html; charset=utf-8')
+ MimeType(type='text', subtype='html', suffix='',
+ parameters={'charset': 'utf-8'})
+
+ """
+ if not mimetype:
+ return MimeType(
+ type="", subtype="", suffix="", parameters=MultiDictProxy(MultiDict())
+ )
+
+ parts = mimetype.split(";")
+ params = MultiDict() # type: MultiDict[str]
+ for item in parts[1:]:
+ if not item:
+ continue
+ key, value = cast(
+ Tuple[str, str], item.split("=", 1) if "=" in item else (item, "")
+ )
+ params.add(key.lower().strip(), value.strip(' "'))
+
+ fulltype = parts[0].strip().lower()
+ if fulltype == "*":
+ fulltype = "*/*"
+
+ mtype, stype = (
+ cast(Tuple[str, str], fulltype.split("/", 1))
+ if "/" in fulltype
+ else (fulltype, "")
+ )
+ stype, suffix = (
+ cast(Tuple[str, str], stype.split("+", 1)) if "+" in stype else (stype, "")
+ )
+
+ return MimeType(
+ type=mtype, subtype=stype, suffix=suffix, parameters=MultiDictProxy(params)
+ )
+
+
+def guess_filename(obj: Any, default: Optional[str] = None) -> Optional[str]:
+ name = getattr(obj, "name", None)
+ if name and isinstance(name, str) and name[0] != "<" and name[-1] != ">":
+ return Path(name).name
+ return default
+
+
+def content_disposition_header(
+ disptype: str, quote_fields: bool = True, **params: str
+) -> str:
+ """Sets ``Content-Disposition`` header.
+
+ disptype is a disposition type: inline, attachment, form-data.
+ Should be valid extension token (see RFC 2183)
+
+ params is a dict with disposition params.
+ """
+ if not disptype or not (TOKEN > set(disptype)):
+ raise ValueError("bad content disposition type {!r}" "".format(disptype))
+
+ value = disptype
+ if params:
+ lparams = []
+ for key, val in params.items():
+ if not key or not (TOKEN > set(key)):
+ raise ValueError(
+ "bad content disposition parameter" " {!r}={!r}".format(key, val)
+ )
+ qval = quote(val, "") if quote_fields else val
+ lparams.append((key, '"%s"' % qval))
+ if key == "filename":
+ lparams.append(("filename*", "utf-8''" + qval))
+ sparams = "; ".join("=".join(pair) for pair in lparams)
+ value = "; ".join((value, sparams))
+ return value
+
+
+class _TSelf(Protocol):
+ _cache: Dict[str, Any]
+
+
+class reify(Generic[_T]):
+ """Use as a class method decorator. It operates almost exactly like
+ the Python `@property` decorator, but it puts the result of the
+ method it decorates into the instance dict after the first call,
+ effectively replacing the function it decorates with an instance
+ variable. It is, in Python parlance, a data descriptor.
+
+ """
+
+ def __init__(self, wrapped: Callable[..., _T]) -> None:
+ self.wrapped = wrapped
+ self.__doc__ = wrapped.__doc__
+ self.name = wrapped.__name__
+
+ def __get__(self, inst: _TSelf, owner: Optional[Type[Any]] = None) -> _T:
+ try:
+ try:
+ return inst._cache[self.name]
+ except KeyError:
+ val = self.wrapped(inst)
+ inst._cache[self.name] = val
+ return val
+ except AttributeError:
+ if inst is None:
+ return self
+ raise
+
+ def __set__(self, inst: _TSelf, value: _T) -> None:
+ raise AttributeError("reified property is read-only")
+
+
+reify_py = reify
+
+try:
+ from ._helpers import reify as reify_c
+
+ if not NO_EXTENSIONS:
+ reify = reify_c # type: ignore
+except ImportError:
+ pass
+
+_ipv4_pattern = (
+ r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}"
+ r"(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$"
+)
+_ipv6_pattern = (
+ r"^(?:(?:(?:[A-F0-9]{1,4}:){6}|(?=(?:[A-F0-9]{0,4}:){0,6}"
+ r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}$)(([0-9A-F]{1,4}:){0,5}|:)"
+ r"((:[0-9A-F]{1,4}){1,5}:|:)|::(?:[A-F0-9]{1,4}:){5})"
+ r"(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\.){3}"
+ r"(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])|(?:[A-F0-9]{1,4}:){7}"
+ r"[A-F0-9]{1,4}|(?=(?:[A-F0-9]{0,4}:){0,7}[A-F0-9]{0,4}$)"
+ r"(([0-9A-F]{1,4}:){1,7}|:)((:[0-9A-F]{1,4}){1,7}|:)|(?:[A-F0-9]{1,4}:){7}"
+ r":|:(:[A-F0-9]{1,4}){7})$"
+)
+_ipv4_regex = re.compile(_ipv4_pattern)
+_ipv6_regex = re.compile(_ipv6_pattern, flags=re.IGNORECASE)
+_ipv4_regexb = re.compile(_ipv4_pattern.encode("ascii"))
+_ipv6_regexb = re.compile(_ipv6_pattern.encode("ascii"), flags=re.IGNORECASE)
+
+
+def _is_ip_address(
+ regex: Pattern[str], regexb: Pattern[bytes], host: Optional[Union[str, bytes]]
+) -> bool:
+ if host is None:
+ return False
+ if isinstance(host, str):
+ return bool(regex.match(host))
+ elif isinstance(host, (bytes, bytearray, memoryview)):
+ return bool(regexb.match(host))
+ else:
+ raise TypeError("{} [{}] is not a str or bytes".format(host, type(host)))
+
+
+is_ipv4_address = functools.partial(_is_ip_address, _ipv4_regex, _ipv4_regexb)
+is_ipv6_address = functools.partial(_is_ip_address, _ipv6_regex, _ipv6_regexb)
+
+
+def is_ip_address(host: Optional[Union[str, bytes, bytearray, memoryview]]) -> bool:
+ return is_ipv4_address(host) or is_ipv6_address(host)
+
+
+def next_whole_second() -> datetime.datetime:
+ """Return current time rounded up to the next whole second."""
+ return datetime.datetime.now(datetime.timezone.utc).replace(
+ microsecond=0
+ ) + datetime.timedelta(seconds=0)
+
+
+_cached_current_datetime = None # type: Optional[int]
+_cached_formatted_datetime = ""
+
+
+def rfc822_formatted_time() -> str:
+ global _cached_current_datetime
+ global _cached_formatted_datetime
+
+ now = int(time.time())
+ if now != _cached_current_datetime:
+ # Weekday and month names for HTTP date/time formatting;
+ # always English!
+ # Tuples are constants stored in codeobject!
+ _weekdayname = ("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun")
+ _monthname = (
+ "", # Dummy so we can use 1-based month numbers
+ "Jan",
+ "Feb",
+ "Mar",
+ "Apr",
+ "May",
+ "Jun",
+ "Jul",
+ "Aug",
+ "Sep",
+ "Oct",
+ "Nov",
+ "Dec",
+ )
+
+ year, month, day, hh, mm, ss, wd, *tail = time.gmtime(now)
+ _cached_formatted_datetime = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
+ _weekdayname[wd],
+ day,
+ _monthname[month],
+ year,
+ hh,
+ mm,
+ ss,
+ )
+ _cached_current_datetime = now
+ return _cached_formatted_datetime
+
+
+def _weakref_handle(info): # type: ignore
+ ref, name = info
+ ob = ref()
+ if ob is not None:
+ with suppress(Exception):
+ getattr(ob, name)()
+
+
+def weakref_handle(ob, name, timeout, loop): # type: ignore
+ if timeout is not None and timeout > 0:
+ when = loop.time() + timeout
+ if timeout >= 5:
+ when = ceil(when)
+
+ return loop.call_at(when, _weakref_handle, (weakref.ref(ob), name))
+
+
+def call_later(cb, timeout, loop): # type: ignore
+ if timeout is not None and timeout > 0:
+ when = loop.time() + timeout
+ if timeout > 5:
+ when = ceil(when)
+ return loop.call_at(when, cb)
+
+
+class TimeoutHandle:
+ """ Timeout handle """
+
+ def __init__(
+ self, loop: asyncio.AbstractEventLoop, timeout: Optional[float]
+ ) -> None:
+ self._timeout = timeout
+ self._loop = loop
+ self._callbacks = (
+ []
+ ) # type: List[Tuple[Callable[..., None], Tuple[Any, ...], Dict[str, Any]]]
+
+ def register(
+ self, callback: Callable[..., None], *args: Any, **kwargs: Any
+ ) -> None:
+ self._callbacks.append((callback, args, kwargs))
+
+ def close(self) -> None:
+ self._callbacks.clear()
+
+ def start(self) -> Optional[asyncio.Handle]:
+ timeout = self._timeout
+ if timeout is not None and timeout > 0:
+ when = self._loop.time() + timeout
+ if timeout >= 5:
+ when = ceil(when)
+ return self._loop.call_at(when, self.__call__)
+ else:
+ return None
+
+ def timer(self) -> "BaseTimerContext":
+ if self._timeout is not None and self._timeout > 0:
+ timer = TimerContext(self._loop)
+ self.register(timer.timeout)
+ return timer
+ else:
+ return TimerNoop()
+
+ def __call__(self) -> None:
+ for cb, args, kwargs in self._callbacks:
+ with suppress(Exception):
+ cb(*args, **kwargs)
+
+ self._callbacks.clear()
+
+
+class BaseTimerContext(ContextManager["BaseTimerContext"]):
+ pass
+
+
+class TimerNoop(BaseTimerContext):
+ def __enter__(self) -> BaseTimerContext:
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ return
+
+
+class TimerContext(BaseTimerContext):
+ """ Low resolution timeout context manager """
+
+ def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
+ self._loop = loop
+ self._tasks = [] # type: List[asyncio.Task[Any]]
+ self._cancelled = False
+
+ def __enter__(self) -> BaseTimerContext:
+ task = current_task(loop=self._loop)
+
+ if task is None:
+ raise RuntimeError(
+ "Timeout context manager should be used " "inside a task"
+ )
+
+ if self._cancelled:
+ task.cancel()
+ raise asyncio.TimeoutError from None
+
+ self._tasks.append(task)
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> Optional[bool]:
+ if self._tasks:
+ self._tasks.pop()
+
+ if exc_type is asyncio.CancelledError and self._cancelled:
+ raise asyncio.TimeoutError from None
+ return None
+
+ def timeout(self) -> None:
+ if not self._cancelled:
+ for task in set(self._tasks):
+ task.cancel()
+
+ self._cancelled = True
+
+
+class CeilTimeout(async_timeout.timeout):
+ def __enter__(self) -> async_timeout.timeout:
+ if self._timeout is not None:
+ self._task = current_task(loop=self._loop)
+ if self._task is None:
+ raise RuntimeError(
+ "Timeout context manager should be used inside a task"
+ )
+ now = self._loop.time()
+ delay = self._timeout
+ when = now + delay
+ if delay > 5:
+ when = ceil(when)
+ self._cancel_handler = self._loop.call_at(when, self._cancel_task)
+ return self
+
+
+class HeadersMixin:
+
+ ATTRS = frozenset(["_content_type", "_content_dict", "_stored_content_type"])
+
+ _content_type = None # type: Optional[str]
+ _content_dict = None # type: Optional[Dict[str, str]]
+ _stored_content_type = sentinel
+
+ def _parse_content_type(self, raw: str) -> None:
+ self._stored_content_type = raw
+ if raw is None:
+ # default value according to RFC 2616
+ self._content_type = "application/octet-stream"
+ self._content_dict = {}
+ else:
+ self._content_type, self._content_dict = cgi.parse_header(raw)
+
+ @property
+ def content_type(self) -> str:
+ """The value of content part for Content-Type HTTP header."""
+ raw = self._headers.get(hdrs.CONTENT_TYPE) # type: ignore
+ if self._stored_content_type != raw:
+ self._parse_content_type(raw)
+ return self._content_type # type: ignore
+
+ @property
+ def charset(self) -> Optional[str]:
+ """The value of charset part for Content-Type HTTP header."""
+ raw = self._headers.get(hdrs.CONTENT_TYPE) # type: ignore
+ if self._stored_content_type != raw:
+ self._parse_content_type(raw)
+ return self._content_dict.get("charset") # type: ignore
+
+ @property
+ def content_length(self) -> Optional[int]:
+ """The value of Content-Length HTTP header."""
+ content_length = self._headers.get(hdrs.CONTENT_LENGTH) # type: ignore
+
+ if content_length is not None:
+ return int(content_length)
+ else:
+ return None
+
+
+def set_result(fut: "asyncio.Future[_T]", result: _T) -> None:
+ if not fut.done():
+ fut.set_result(result)
+
+
+def set_exception(fut: "asyncio.Future[_T]", exc: BaseException) -> None:
+ if not fut.done():
+ fut.set_exception(exc)
+
+
+class ChainMapProxy(Mapping[str, Any]):
+ __slots__ = ("_maps",)
+
+ def __init__(self, maps: Iterable[Mapping[str, Any]]) -> None:
+ self._maps = tuple(maps)
+
+ def __init_subclass__(cls) -> None:
+ raise TypeError(
+ "Inheritance class {} from ChainMapProxy "
+ "is forbidden".format(cls.__name__)
+ )
+
+ def __getitem__(self, key: str) -> Any:
+ for mapping in self._maps:
+ try:
+ return mapping[key]
+ except KeyError:
+ pass
+ raise KeyError(key)
+
+ def get(self, key: str, default: Any = None) -> Any:
+ return self[key] if key in self else default
+
+ def __len__(self) -> int:
+ # reuses stored hash values if possible
+ return len(set().union(*self._maps)) # type: ignore
+
+ def __iter__(self) -> Iterator[str]:
+ d = {} # type: Dict[str, Any]
+ for mapping in reversed(self._maps):
+ # reuses stored hash values if possible
+ d.update(mapping)
+ return iter(d)
+
+ def __contains__(self, key: object) -> bool:
+ return any(key in m for m in self._maps)
+
+ def __bool__(self) -> bool:
+ return any(self._maps)
+
+ def __repr__(self) -> str:
+ content = ", ".join(map(repr, self._maps))
+ return f"ChainMapProxy({content})"
diff --git a/third_party/python/aiohttp/aiohttp/http.py b/third_party/python/aiohttp/aiohttp/http.py
new file mode 100644
index 0000000000..415ffbf563
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/http.py
@@ -0,0 +1,72 @@
+import http.server
+import sys
+from typing import Mapping, Tuple
+
+from . import __version__
+from .http_exceptions import HttpProcessingError as HttpProcessingError
+from .http_parser import (
+ HeadersParser as HeadersParser,
+ HttpParser as HttpParser,
+ HttpRequestParser as HttpRequestParser,
+ HttpResponseParser as HttpResponseParser,
+ RawRequestMessage as RawRequestMessage,
+ RawResponseMessage as RawResponseMessage,
+)
+from .http_websocket import (
+ WS_CLOSED_MESSAGE as WS_CLOSED_MESSAGE,
+ WS_CLOSING_MESSAGE as WS_CLOSING_MESSAGE,
+ WS_KEY as WS_KEY,
+ WebSocketError as WebSocketError,
+ WebSocketReader as WebSocketReader,
+ WebSocketWriter as WebSocketWriter,
+ WSCloseCode as WSCloseCode,
+ WSMessage as WSMessage,
+ WSMsgType as WSMsgType,
+ ws_ext_gen as ws_ext_gen,
+ ws_ext_parse as ws_ext_parse,
+)
+from .http_writer import (
+ HttpVersion as HttpVersion,
+ HttpVersion10 as HttpVersion10,
+ HttpVersion11 as HttpVersion11,
+ StreamWriter as StreamWriter,
+)
+
+__all__ = (
+ "HttpProcessingError",
+ "RESPONSES",
+ "SERVER_SOFTWARE",
+ # .http_writer
+ "StreamWriter",
+ "HttpVersion",
+ "HttpVersion10",
+ "HttpVersion11",
+ # .http_parser
+ "HeadersParser",
+ "HttpParser",
+ "HttpRequestParser",
+ "HttpResponseParser",
+ "RawRequestMessage",
+ "RawResponseMessage",
+ # .http_websocket
+ "WS_CLOSED_MESSAGE",
+ "WS_CLOSING_MESSAGE",
+ "WS_KEY",
+ "WebSocketReader",
+ "WebSocketWriter",
+ "ws_ext_gen",
+ "ws_ext_parse",
+ "WSMessage",
+ "WebSocketError",
+ "WSMsgType",
+ "WSCloseCode",
+)
+
+
+SERVER_SOFTWARE = "Python/{0[0]}.{0[1]} aiohttp/{1}".format(
+ sys.version_info, __version__
+) # type: str
+
+RESPONSES = (
+ http.server.BaseHTTPRequestHandler.responses
+) # type: Mapping[int, Tuple[str, str]]
diff --git a/third_party/python/aiohttp/aiohttp/http_exceptions.py b/third_party/python/aiohttp/aiohttp/http_exceptions.py
new file mode 100644
index 0000000000..c885f80f32
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/http_exceptions.py
@@ -0,0 +1,105 @@
+"""Low-level http related exceptions."""
+
+
+from typing import Optional, Union
+
+from .typedefs import _CIMultiDict
+
+__all__ = ("HttpProcessingError",)
+
+
+class HttpProcessingError(Exception):
+ """HTTP error.
+
+ Shortcut for raising HTTP errors with custom code, message and headers.
+
+ code: HTTP Error code.
+ message: (optional) Error message.
+ headers: (optional) Headers to be sent in response, a list of pairs
+ """
+
+ code = 0
+ message = ""
+ headers = None
+
+ def __init__(
+ self,
+ *,
+ code: Optional[int] = None,
+ message: str = "",
+ headers: Optional[_CIMultiDict] = None,
+ ) -> None:
+ if code is not None:
+ self.code = code
+ self.headers = headers
+ self.message = message
+
+ def __str__(self) -> str:
+ return f"{self.code}, message={self.message!r}"
+
+ def __repr__(self) -> str:
+ return f"<{self.__class__.__name__}: {self}>"
+
+
+class BadHttpMessage(HttpProcessingError):
+
+ code = 400
+ message = "Bad Request"
+
+ def __init__(self, message: str, *, headers: Optional[_CIMultiDict] = None) -> None:
+ super().__init__(message=message, headers=headers)
+ self.args = (message,)
+
+
+class HttpBadRequest(BadHttpMessage):
+
+ code = 400
+ message = "Bad Request"
+
+
+class PayloadEncodingError(BadHttpMessage):
+ """Base class for payload errors"""
+
+
+class ContentEncodingError(PayloadEncodingError):
+ """Content encoding error."""
+
+
+class TransferEncodingError(PayloadEncodingError):
+ """transfer encoding error."""
+
+
+class ContentLengthError(PayloadEncodingError):
+ """Not enough data for satisfy content length header."""
+
+
+class LineTooLong(BadHttpMessage):
+ def __init__(
+ self, line: str, limit: str = "Unknown", actual_size: str = "Unknown"
+ ) -> None:
+ super().__init__(
+ f"Got more than {limit} bytes ({actual_size}) when reading {line}."
+ )
+ self.args = (line, limit, actual_size)
+
+
+class InvalidHeader(BadHttpMessage):
+ def __init__(self, hdr: Union[bytes, str]) -> None:
+ if isinstance(hdr, bytes):
+ hdr = hdr.decode("utf-8", "surrogateescape")
+ super().__init__(f"Invalid HTTP Header: {hdr}")
+ self.hdr = hdr
+ self.args = (hdr,)
+
+
+class BadStatusLine(BadHttpMessage):
+ def __init__(self, line: str = "") -> None:
+ if not isinstance(line, str):
+ line = repr(line)
+ super().__init__(f"Bad status line {line!r}")
+ self.args = (line,)
+ self.line = line
+
+
+class InvalidURLError(BadHttpMessage):
+ pass
diff --git a/third_party/python/aiohttp/aiohttp/http_parser.py b/third_party/python/aiohttp/aiohttp/http_parser.py
new file mode 100644
index 0000000000..71ba815ae6
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/http_parser.py
@@ -0,0 +1,901 @@
+import abc
+import asyncio
+import collections
+import re
+import string
+import zlib
+from enum import IntEnum
+from typing import Any, List, Optional, Tuple, Type, Union
+
+from multidict import CIMultiDict, CIMultiDictProxy, istr
+from yarl import URL
+
+from . import hdrs
+from .base_protocol import BaseProtocol
+from .helpers import NO_EXTENSIONS, BaseTimerContext
+from .http_exceptions import (
+ BadStatusLine,
+ ContentEncodingError,
+ ContentLengthError,
+ InvalidHeader,
+ LineTooLong,
+ TransferEncodingError,
+)
+from .http_writer import HttpVersion, HttpVersion10
+from .log import internal_logger
+from .streams import EMPTY_PAYLOAD, StreamReader
+from .typedefs import RawHeaders
+
+try:
+ import brotli
+
+ HAS_BROTLI = True
+except ImportError: # pragma: no cover
+ HAS_BROTLI = False
+
+
+__all__ = (
+ "HeadersParser",
+ "HttpParser",
+ "HttpRequestParser",
+ "HttpResponseParser",
+ "RawRequestMessage",
+ "RawResponseMessage",
+)
+
+ASCIISET = set(string.printable)
+
+# See https://tools.ietf.org/html/rfc7230#section-3.1.1
+# and https://tools.ietf.org/html/rfc7230#appendix-B
+#
+# method = token
+# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
+# "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
+# token = 1*tchar
+METHRE = re.compile(r"[!#$%&'*+\-.^_`|~0-9A-Za-z]+")
+VERSRE = re.compile(r"HTTP/(\d+).(\d+)")
+HDRRE = re.compile(rb"[\x00-\x1F\x7F()<>@,;:\[\]={} \t\\\\\"]")
+
+RawRequestMessage = collections.namedtuple(
+ "RawRequestMessage",
+ [
+ "method",
+ "path",
+ "version",
+ "headers",
+ "raw_headers",
+ "should_close",
+ "compression",
+ "upgrade",
+ "chunked",
+ "url",
+ ],
+)
+
+RawResponseMessage = collections.namedtuple(
+ "RawResponseMessage",
+ [
+ "version",
+ "code",
+ "reason",
+ "headers",
+ "raw_headers",
+ "should_close",
+ "compression",
+ "upgrade",
+ "chunked",
+ ],
+)
+
+
+class ParseState(IntEnum):
+
+ PARSE_NONE = 0
+ PARSE_LENGTH = 1
+ PARSE_CHUNKED = 2
+ PARSE_UNTIL_EOF = 3
+
+
+class ChunkState(IntEnum):
+ PARSE_CHUNKED_SIZE = 0
+ PARSE_CHUNKED_CHUNK = 1
+ PARSE_CHUNKED_CHUNK_EOF = 2
+ PARSE_MAYBE_TRAILERS = 3
+ PARSE_TRAILERS = 4
+
+
+class HeadersParser:
+ def __init__(
+ self,
+ max_line_size: int = 8190,
+ max_headers: int = 32768,
+ max_field_size: int = 8190,
+ ) -> None:
+ self.max_line_size = max_line_size
+ self.max_headers = max_headers
+ self.max_field_size = max_field_size
+
+ def parse_headers(
+ self, lines: List[bytes]
+ ) -> Tuple["CIMultiDictProxy[str]", RawHeaders]:
+ headers = CIMultiDict() # type: CIMultiDict[str]
+ raw_headers = []
+
+ lines_idx = 1
+ line = lines[1]
+ line_count = len(lines)
+
+ while line:
+ # Parse initial header name : value pair.
+ try:
+ bname, bvalue = line.split(b":", 1)
+ except ValueError:
+ raise InvalidHeader(line) from None
+
+ bname = bname.strip(b" \t")
+ bvalue = bvalue.lstrip()
+ if HDRRE.search(bname):
+ raise InvalidHeader(bname)
+ if len(bname) > self.max_field_size:
+ raise LineTooLong(
+ "request header name {}".format(
+ bname.decode("utf8", "xmlcharrefreplace")
+ ),
+ str(self.max_field_size),
+ str(len(bname)),
+ )
+
+ header_length = len(bvalue)
+
+ # next line
+ lines_idx += 1
+ line = lines[lines_idx]
+
+ # consume continuation lines
+ continuation = line and line[0] in (32, 9) # (' ', '\t')
+
+ if continuation:
+ bvalue_lst = [bvalue]
+ while continuation:
+ header_length += len(line)
+ if header_length > self.max_field_size:
+ raise LineTooLong(
+ "request header field {}".format(
+ bname.decode("utf8", "xmlcharrefreplace")
+ ),
+ str(self.max_field_size),
+ str(header_length),
+ )
+ bvalue_lst.append(line)
+
+ # next line
+ lines_idx += 1
+ if lines_idx < line_count:
+ line = lines[lines_idx]
+ if line:
+ continuation = line[0] in (32, 9) # (' ', '\t')
+ else:
+ line = b""
+ break
+ bvalue = b"".join(bvalue_lst)
+ else:
+ if header_length > self.max_field_size:
+ raise LineTooLong(
+ "request header field {}".format(
+ bname.decode("utf8", "xmlcharrefreplace")
+ ),
+ str(self.max_field_size),
+ str(header_length),
+ )
+
+ bvalue = bvalue.strip()
+ name = bname.decode("utf-8", "surrogateescape")
+ value = bvalue.decode("utf-8", "surrogateescape")
+
+ headers.add(name, value)
+ raw_headers.append((bname, bvalue))
+
+ return (CIMultiDictProxy(headers), tuple(raw_headers))
+
+
+class HttpParser(abc.ABC):
+ def __init__(
+ self,
+ protocol: Optional[BaseProtocol] = None,
+ loop: Optional[asyncio.AbstractEventLoop] = None,
+ limit: int = 2 ** 16,
+ max_line_size: int = 8190,
+ max_headers: int = 32768,
+ max_field_size: int = 8190,
+ timer: Optional[BaseTimerContext] = None,
+ code: Optional[int] = None,
+ method: Optional[str] = None,
+ readall: bool = False,
+ payload_exception: Optional[Type[BaseException]] = None,
+ response_with_body: bool = True,
+ read_until_eof: bool = False,
+ auto_decompress: bool = True,
+ ) -> None:
+ self.protocol = protocol
+ self.loop = loop
+ self.max_line_size = max_line_size
+ self.max_headers = max_headers
+ self.max_field_size = max_field_size
+ self.timer = timer
+ self.code = code
+ self.method = method
+ self.readall = readall
+ self.payload_exception = payload_exception
+ self.response_with_body = response_with_body
+ self.read_until_eof = read_until_eof
+
+ self._lines = [] # type: List[bytes]
+ self._tail = b""
+ self._upgraded = False
+ self._payload = None
+ self._payload_parser = None # type: Optional[HttpPayloadParser]
+ self._auto_decompress = auto_decompress
+ self._limit = limit
+ self._headers_parser = HeadersParser(max_line_size, max_headers, max_field_size)
+
+ @abc.abstractmethod
+ def parse_message(self, lines: List[bytes]) -> Any:
+ pass
+
+ def feed_eof(self) -> Any:
+ if self._payload_parser is not None:
+ self._payload_parser.feed_eof()
+ self._payload_parser = None
+ else:
+ # try to extract partial message
+ if self._tail:
+ self._lines.append(self._tail)
+
+ if self._lines:
+ if self._lines[-1] != "\r\n":
+ self._lines.append(b"")
+ try:
+ return self.parse_message(self._lines)
+ except Exception:
+ return None
+
+ def feed_data(
+ self,
+ data: bytes,
+ SEP: bytes = b"\r\n",
+ EMPTY: bytes = b"",
+ CONTENT_LENGTH: istr = hdrs.CONTENT_LENGTH,
+ METH_CONNECT: str = hdrs.METH_CONNECT,
+ SEC_WEBSOCKET_KEY1: istr = hdrs.SEC_WEBSOCKET_KEY1,
+ ) -> Tuple[List[Any], bool, bytes]:
+
+ messages = []
+
+ if self._tail:
+ data, self._tail = self._tail + data, b""
+
+ data_len = len(data)
+ start_pos = 0
+ loop = self.loop
+
+ while start_pos < data_len:
+
+ # read HTTP message (request/response line + headers), \r\n\r\n
+ # and split by lines
+ if self._payload_parser is None and not self._upgraded:
+ pos = data.find(SEP, start_pos)
+ # consume \r\n
+ if pos == start_pos and not self._lines:
+ start_pos = pos + 2
+ continue
+
+ if pos >= start_pos:
+ # line found
+ self._lines.append(data[start_pos:pos])
+ start_pos = pos + 2
+
+ # \r\n\r\n found
+ if self._lines[-1] == EMPTY:
+ try:
+ msg = self.parse_message(self._lines)
+ finally:
+ self._lines.clear()
+
+ # payload length
+ length = msg.headers.get(CONTENT_LENGTH)
+ if length is not None:
+ try:
+ length = int(length)
+ except ValueError:
+ raise InvalidHeader(CONTENT_LENGTH)
+ if length < 0:
+ raise InvalidHeader(CONTENT_LENGTH)
+
+ # do not support old websocket spec
+ if SEC_WEBSOCKET_KEY1 in msg.headers:
+ raise InvalidHeader(SEC_WEBSOCKET_KEY1)
+
+ self._upgraded = msg.upgrade
+
+ method = getattr(msg, "method", self.method)
+
+ assert self.protocol is not None
+ # calculate payload
+ if (
+ (length is not None and length > 0)
+ or msg.chunked
+ and not msg.upgrade
+ ):
+ payload = StreamReader(
+ self.protocol,
+ timer=self.timer,
+ loop=loop,
+ limit=self._limit,
+ )
+ payload_parser = HttpPayloadParser(
+ payload,
+ length=length,
+ chunked=msg.chunked,
+ method=method,
+ compression=msg.compression,
+ code=self.code,
+ readall=self.readall,
+ response_with_body=self.response_with_body,
+ auto_decompress=self._auto_decompress,
+ )
+ if not payload_parser.done:
+ self._payload_parser = payload_parser
+ elif method == METH_CONNECT:
+ payload = StreamReader(
+ self.protocol,
+ timer=self.timer,
+ loop=loop,
+ limit=self._limit,
+ )
+ self._upgraded = True
+ self._payload_parser = HttpPayloadParser(
+ payload,
+ method=msg.method,
+ compression=msg.compression,
+ readall=True,
+ auto_decompress=self._auto_decompress,
+ )
+ else:
+ if (
+ getattr(msg, "code", 100) >= 199
+ and length is None
+ and self.read_until_eof
+ ):
+ payload = StreamReader(
+ self.protocol,
+ timer=self.timer,
+ loop=loop,
+ limit=self._limit,
+ )
+ payload_parser = HttpPayloadParser(
+ payload,
+ length=length,
+ chunked=msg.chunked,
+ method=method,
+ compression=msg.compression,
+ code=self.code,
+ readall=True,
+ response_with_body=self.response_with_body,
+ auto_decompress=self._auto_decompress,
+ )
+ if not payload_parser.done:
+ self._payload_parser = payload_parser
+ else:
+ payload = EMPTY_PAYLOAD # type: ignore
+
+ messages.append((msg, payload))
+ else:
+ self._tail = data[start_pos:]
+ data = EMPTY
+ break
+
+ # no parser, just store
+ elif self._payload_parser is None and self._upgraded:
+ assert not self._lines
+ break
+
+ # feed payload
+ elif data and start_pos < data_len:
+ assert not self._lines
+ assert self._payload_parser is not None
+ try:
+ eof, data = self._payload_parser.feed_data(data[start_pos:])
+ except BaseException as exc:
+ if self.payload_exception is not None:
+ self._payload_parser.payload.set_exception(
+ self.payload_exception(str(exc))
+ )
+ else:
+ self._payload_parser.payload.set_exception(exc)
+
+ eof = True
+ data = b""
+
+ if eof:
+ start_pos = 0
+ data_len = len(data)
+ self._payload_parser = None
+ continue
+ else:
+ break
+
+ if data and start_pos < data_len:
+ data = data[start_pos:]
+ else:
+ data = EMPTY
+
+ return messages, self._upgraded, data
+
+ def parse_headers(
+ self, lines: List[bytes]
+ ) -> Tuple[
+ "CIMultiDictProxy[str]", RawHeaders, Optional[bool], Optional[str], bool, bool
+ ]:
+ """Parses RFC 5322 headers from a stream.
+
+ Line continuations are supported. Returns list of header name
+ and value pairs. Header name is in upper case.
+ """
+ headers, raw_headers = self._headers_parser.parse_headers(lines)
+ close_conn = None
+ encoding = None
+ upgrade = False
+ chunked = False
+
+ # keep-alive
+ conn = headers.get(hdrs.CONNECTION)
+ if conn:
+ v = conn.lower()
+ if v == "close":
+ close_conn = True
+ elif v == "keep-alive":
+ close_conn = False
+ elif v == "upgrade":
+ upgrade = True
+
+ # encoding
+ enc = headers.get(hdrs.CONTENT_ENCODING)
+ if enc:
+ enc = enc.lower()
+ if enc in ("gzip", "deflate", "br"):
+ encoding = enc
+
+ # chunking
+ te = headers.get(hdrs.TRANSFER_ENCODING)
+ if te and "chunked" in te.lower():
+ chunked = True
+
+ return (headers, raw_headers, close_conn, encoding, upgrade, chunked)
+
+ def set_upgraded(self, val: bool) -> None:
+ """Set connection upgraded (to websocket) mode.
+ :param bool val: new state.
+ """
+ self._upgraded = val
+
+
+class HttpRequestParser(HttpParser):
+ """Read request status line. Exception .http_exceptions.BadStatusLine
+ could be raised in case of any errors in status line.
+ Returns RawRequestMessage.
+ """
+
+ def parse_message(self, lines: List[bytes]) -> Any:
+ # request line
+ line = lines[0].decode("utf-8", "surrogateescape")
+ try:
+ method, path, version = line.split(None, 2)
+ except ValueError:
+ raise BadStatusLine(line) from None
+
+ if len(path) > self.max_line_size:
+ raise LineTooLong(
+ "Status line is too long", str(self.max_line_size), str(len(path))
+ )
+
+ path_part, _hash_separator, url_fragment = path.partition("#")
+ path_part, _question_mark_separator, qs_part = path_part.partition("?")
+
+ # method
+ if not METHRE.match(method):
+ raise BadStatusLine(method)
+
+ # version
+ try:
+ if version.startswith("HTTP/"):
+ n1, n2 = version[5:].split(".", 1)
+ version_o = HttpVersion(int(n1), int(n2))
+ else:
+ raise BadStatusLine(version)
+ except Exception:
+ raise BadStatusLine(version)
+
+ # read headers
+ (
+ headers,
+ raw_headers,
+ close,
+ compression,
+ upgrade,
+ chunked,
+ ) = self.parse_headers(lines)
+
+ if close is None: # then the headers weren't set in the request
+ if version_o <= HttpVersion10: # HTTP 1.0 must asks to not close
+ close = True
+ else: # HTTP 1.1 must ask to close.
+ close = False
+
+ return RawRequestMessage(
+ method,
+ path,
+ version_o,
+ headers,
+ raw_headers,
+ close,
+ compression,
+ upgrade,
+ chunked,
+ # NOTE: `yarl.URL.build()` is used to mimic what the Cython-based
+ # NOTE: parser does, otherwise it results into the same
+ # NOTE: HTTP Request-Line input producing different
+ # NOTE: `yarl.URL()` objects
+ URL.build(
+ path=path_part,
+ query_string=qs_part,
+ fragment=url_fragment,
+ encoded=True,
+ ),
+ )
+
+
+class HttpResponseParser(HttpParser):
+ """Read response status line and headers.
+
+ BadStatusLine could be raised in case of any errors in status line.
+ Returns RawResponseMessage"""
+
+ def parse_message(self, lines: List[bytes]) -> Any:
+ line = lines[0].decode("utf-8", "surrogateescape")
+ try:
+ version, status = line.split(None, 1)
+ except ValueError:
+ raise BadStatusLine(line) from None
+
+ try:
+ status, reason = status.split(None, 1)
+ except ValueError:
+ reason = ""
+
+ if len(reason) > self.max_line_size:
+ raise LineTooLong(
+ "Status line is too long", str(self.max_line_size), str(len(reason))
+ )
+
+ # version
+ match = VERSRE.match(version)
+ if match is None:
+ raise BadStatusLine(line)
+ version_o = HttpVersion(int(match.group(1)), int(match.group(2)))
+
+ # The status code is a three-digit number
+ try:
+ status_i = int(status)
+ except ValueError:
+ raise BadStatusLine(line) from None
+
+ if status_i > 999:
+ raise BadStatusLine(line)
+
+ # read headers
+ (
+ headers,
+ raw_headers,
+ close,
+ compression,
+ upgrade,
+ chunked,
+ ) = self.parse_headers(lines)
+
+ if close is None:
+ close = version_o <= HttpVersion10
+
+ return RawResponseMessage(
+ version_o,
+ status_i,
+ reason.strip(),
+ headers,
+ raw_headers,
+ close,
+ compression,
+ upgrade,
+ chunked,
+ )
+
+
+class HttpPayloadParser:
+ def __init__(
+ self,
+ payload: StreamReader,
+ length: Optional[int] = None,
+ chunked: bool = False,
+ compression: Optional[str] = None,
+ code: Optional[int] = None,
+ method: Optional[str] = None,
+ readall: bool = False,
+ response_with_body: bool = True,
+ auto_decompress: bool = True,
+ ) -> None:
+ self._length = 0
+ self._type = ParseState.PARSE_NONE
+ self._chunk = ChunkState.PARSE_CHUNKED_SIZE
+ self._chunk_size = 0
+ self._chunk_tail = b""
+ self._auto_decompress = auto_decompress
+ self.done = False
+
+ # payload decompression wrapper
+ if response_with_body and compression and self._auto_decompress:
+ real_payload = DeflateBuffer(
+ payload, compression
+ ) # type: Union[StreamReader, DeflateBuffer]
+ else:
+ real_payload = payload
+
+ # payload parser
+ if not response_with_body:
+ # don't parse payload if it's not expected to be received
+ self._type = ParseState.PARSE_NONE
+ real_payload.feed_eof()
+ self.done = True
+
+ elif chunked:
+ self._type = ParseState.PARSE_CHUNKED
+ elif length is not None:
+ self._type = ParseState.PARSE_LENGTH
+ self._length = length
+ if self._length == 0:
+ real_payload.feed_eof()
+ self.done = True
+ else:
+ if readall and code != 204:
+ self._type = ParseState.PARSE_UNTIL_EOF
+ elif method in ("PUT", "POST"):
+ internal_logger.warning( # pragma: no cover
+ "Content-Length or Transfer-Encoding header is required"
+ )
+ self._type = ParseState.PARSE_NONE
+ real_payload.feed_eof()
+ self.done = True
+
+ self.payload = real_payload
+
+ def feed_eof(self) -> None:
+ if self._type == ParseState.PARSE_UNTIL_EOF:
+ self.payload.feed_eof()
+ elif self._type == ParseState.PARSE_LENGTH:
+ raise ContentLengthError(
+ "Not enough data for satisfy content length header."
+ )
+ elif self._type == ParseState.PARSE_CHUNKED:
+ raise TransferEncodingError(
+ "Not enough data for satisfy transfer length header."
+ )
+
+ def feed_data(
+ self, chunk: bytes, SEP: bytes = b"\r\n", CHUNK_EXT: bytes = b";"
+ ) -> Tuple[bool, bytes]:
+ # Read specified amount of bytes
+ if self._type == ParseState.PARSE_LENGTH:
+ required = self._length
+ chunk_len = len(chunk)
+
+ if required >= chunk_len:
+ self._length = required - chunk_len
+ self.payload.feed_data(chunk, chunk_len)
+ if self._length == 0:
+ self.payload.feed_eof()
+ return True, b""
+ else:
+ self._length = 0
+ self.payload.feed_data(chunk[:required], required)
+ self.payload.feed_eof()
+ return True, chunk[required:]
+
+ # Chunked transfer encoding parser
+ elif self._type == ParseState.PARSE_CHUNKED:
+ if self._chunk_tail:
+ chunk = self._chunk_tail + chunk
+ self._chunk_tail = b""
+
+ while chunk:
+
+ # read next chunk size
+ if self._chunk == ChunkState.PARSE_CHUNKED_SIZE:
+ pos = chunk.find(SEP)
+ if pos >= 0:
+ i = chunk.find(CHUNK_EXT, 0, pos)
+ if i >= 0:
+ size_b = chunk[:i] # strip chunk-extensions
+ else:
+ size_b = chunk[:pos]
+
+ try:
+ size = int(bytes(size_b), 16)
+ except ValueError:
+ exc = TransferEncodingError(
+ chunk[:pos].decode("ascii", "surrogateescape")
+ )
+ self.payload.set_exception(exc)
+ raise exc from None
+
+ chunk = chunk[pos + 2 :]
+ if size == 0: # eof marker
+ self._chunk = ChunkState.PARSE_MAYBE_TRAILERS
+ else:
+ self._chunk = ChunkState.PARSE_CHUNKED_CHUNK
+ self._chunk_size = size
+ self.payload.begin_http_chunk_receiving()
+ else:
+ self._chunk_tail = chunk
+ return False, b""
+
+ # read chunk and feed buffer
+ if self._chunk == ChunkState.PARSE_CHUNKED_CHUNK:
+ required = self._chunk_size
+ chunk_len = len(chunk)
+
+ if required > chunk_len:
+ self._chunk_size = required - chunk_len
+ self.payload.feed_data(chunk, chunk_len)
+ return False, b""
+ else:
+ self._chunk_size = 0
+ self.payload.feed_data(chunk[:required], required)
+ chunk = chunk[required:]
+ self._chunk = ChunkState.PARSE_CHUNKED_CHUNK_EOF
+ self.payload.end_http_chunk_receiving()
+
+ # toss the CRLF at the end of the chunk
+ if self._chunk == ChunkState.PARSE_CHUNKED_CHUNK_EOF:
+ if chunk[:2] == SEP:
+ chunk = chunk[2:]
+ self._chunk = ChunkState.PARSE_CHUNKED_SIZE
+ else:
+ self._chunk_tail = chunk
+ return False, b""
+
+ # if stream does not contain trailer, after 0\r\n
+ # we should get another \r\n otherwise
+ # trailers needs to be skiped until \r\n\r\n
+ if self._chunk == ChunkState.PARSE_MAYBE_TRAILERS:
+ head = chunk[:2]
+ if head == SEP:
+ # end of stream
+ self.payload.feed_eof()
+ return True, chunk[2:]
+ # Both CR and LF, or only LF may not be received yet. It is
+ # expected that CRLF or LF will be shown at the very first
+ # byte next time, otherwise trailers should come. The last
+ # CRLF which marks the end of response might not be
+ # contained in the same TCP segment which delivered the
+ # size indicator.
+ if not head:
+ return False, b""
+ if head == SEP[:1]:
+ self._chunk_tail = head
+ return False, b""
+ self._chunk = ChunkState.PARSE_TRAILERS
+
+ # read and discard trailer up to the CRLF terminator
+ if self._chunk == ChunkState.PARSE_TRAILERS:
+ pos = chunk.find(SEP)
+ if pos >= 0:
+ chunk = chunk[pos + 2 :]
+ self._chunk = ChunkState.PARSE_MAYBE_TRAILERS
+ else:
+ self._chunk_tail = chunk
+ return False, b""
+
+ # Read all bytes until eof
+ elif self._type == ParseState.PARSE_UNTIL_EOF:
+ self.payload.feed_data(chunk, len(chunk))
+
+ return False, b""
+
+
+class DeflateBuffer:
+ """DeflateStream decompress stream and feed data into specified stream."""
+
+ def __init__(self, out: StreamReader, encoding: Optional[str]) -> None:
+ self.out = out
+ self.size = 0
+ self.encoding = encoding
+ self._started_decoding = False
+
+ if encoding == "br":
+ if not HAS_BROTLI: # pragma: no cover
+ raise ContentEncodingError(
+ "Can not decode content-encoding: brotli (br). "
+ "Please install `brotlipy`"
+ )
+ self.decompressor = brotli.Decompressor()
+ else:
+ zlib_mode = 16 + zlib.MAX_WBITS if encoding == "gzip" else zlib.MAX_WBITS
+ self.decompressor = zlib.decompressobj(wbits=zlib_mode)
+
+ def set_exception(self, exc: BaseException) -> None:
+ self.out.set_exception(exc)
+
+ def feed_data(self, chunk: bytes, size: int) -> None:
+ if not size:
+ return
+
+ self.size += size
+
+ # RFC1950
+ # bits 0..3 = CM = 0b1000 = 8 = "deflate"
+ # bits 4..7 = CINFO = 1..7 = windows size.
+ if (
+ not self._started_decoding
+ and self.encoding == "deflate"
+ and chunk[0] & 0xF != 8
+ ):
+ # Change the decoder to decompress incorrectly compressed data
+ # Actually we should issue a warning about non-RFC-compliant data.
+ self.decompressor = zlib.decompressobj(wbits=-zlib.MAX_WBITS)
+
+ try:
+ chunk = self.decompressor.decompress(chunk)
+ except Exception:
+ raise ContentEncodingError(
+ "Can not decode content-encoding: %s" % self.encoding
+ )
+
+ self._started_decoding = True
+
+ if chunk:
+ self.out.feed_data(chunk, len(chunk))
+
+ def feed_eof(self) -> None:
+ chunk = self.decompressor.flush()
+
+ if chunk or self.size > 0:
+ self.out.feed_data(chunk, len(chunk))
+ if self.encoding == "deflate" and not self.decompressor.eof:
+ raise ContentEncodingError("deflate")
+
+ self.out.feed_eof()
+
+ def begin_http_chunk_receiving(self) -> None:
+ self.out.begin_http_chunk_receiving()
+
+ def end_http_chunk_receiving(self) -> None:
+ self.out.end_http_chunk_receiving()
+
+
+HttpRequestParserPy = HttpRequestParser
+HttpResponseParserPy = HttpResponseParser
+RawRequestMessagePy = RawRequestMessage
+RawResponseMessagePy = RawResponseMessage
+
+try:
+ if not NO_EXTENSIONS:
+ from ._http_parser import ( # type: ignore
+ HttpRequestParser,
+ HttpResponseParser,
+ RawRequestMessage,
+ RawResponseMessage,
+ )
+
+ HttpRequestParserC = HttpRequestParser
+ HttpResponseParserC = HttpResponseParser
+ RawRequestMessageC = RawRequestMessage
+ RawResponseMessageC = RawResponseMessage
+except ImportError: # pragma: no cover
+ pass
diff --git a/third_party/python/aiohttp/aiohttp/http_websocket.py b/third_party/python/aiohttp/aiohttp/http_websocket.py
new file mode 100644
index 0000000000..5cdaeea43c
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/http_websocket.py
@@ -0,0 +1,698 @@
+"""WebSocket protocol versions 13 and 8."""
+
+import asyncio
+import collections
+import json
+import random
+import re
+import sys
+import zlib
+from enum import IntEnum
+from struct import Struct
+from typing import Any, Callable, List, Optional, Tuple, Union
+
+from .base_protocol import BaseProtocol
+from .helpers import NO_EXTENSIONS
+from .streams import DataQueue
+
+__all__ = (
+ "WS_CLOSED_MESSAGE",
+ "WS_CLOSING_MESSAGE",
+ "WS_KEY",
+ "WebSocketReader",
+ "WebSocketWriter",
+ "WSMessage",
+ "WebSocketError",
+ "WSMsgType",
+ "WSCloseCode",
+)
+
+
+class WSCloseCode(IntEnum):
+ OK = 1000
+ GOING_AWAY = 1001
+ PROTOCOL_ERROR = 1002
+ UNSUPPORTED_DATA = 1003
+ INVALID_TEXT = 1007
+ POLICY_VIOLATION = 1008
+ MESSAGE_TOO_BIG = 1009
+ MANDATORY_EXTENSION = 1010
+ INTERNAL_ERROR = 1011
+ SERVICE_RESTART = 1012
+ TRY_AGAIN_LATER = 1013
+
+
+ALLOWED_CLOSE_CODES = {int(i) for i in WSCloseCode}
+
+
+class WSMsgType(IntEnum):
+ # websocket spec types
+ CONTINUATION = 0x0
+ TEXT = 0x1
+ BINARY = 0x2
+ PING = 0x9
+ PONG = 0xA
+ CLOSE = 0x8
+
+ # aiohttp specific types
+ CLOSING = 0x100
+ CLOSED = 0x101
+ ERROR = 0x102
+
+ text = TEXT
+ binary = BINARY
+ ping = PING
+ pong = PONG
+ close = CLOSE
+ closing = CLOSING
+ closed = CLOSED
+ error = ERROR
+
+
+WS_KEY = b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
+
+
+UNPACK_LEN2 = Struct("!H").unpack_from
+UNPACK_LEN3 = Struct("!Q").unpack_from
+UNPACK_CLOSE_CODE = Struct("!H").unpack
+PACK_LEN1 = Struct("!BB").pack
+PACK_LEN2 = Struct("!BBH").pack
+PACK_LEN3 = Struct("!BBQ").pack
+PACK_CLOSE_CODE = Struct("!H").pack
+MSG_SIZE = 2 ** 14
+DEFAULT_LIMIT = 2 ** 16
+
+
+_WSMessageBase = collections.namedtuple("_WSMessageBase", ["type", "data", "extra"])
+
+
+class WSMessage(_WSMessageBase):
+ def json(self, *, loads: Callable[[Any], Any] = json.loads) -> Any:
+ """Return parsed JSON data.
+
+ .. versionadded:: 0.22
+ """
+ return loads(self.data)
+
+
+WS_CLOSED_MESSAGE = WSMessage(WSMsgType.CLOSED, None, None)
+WS_CLOSING_MESSAGE = WSMessage(WSMsgType.CLOSING, None, None)
+
+
+class WebSocketError(Exception):
+ """WebSocket protocol parser error."""
+
+ def __init__(self, code: int, message: str) -> None:
+ self.code = code
+ super().__init__(code, message)
+
+ def __str__(self) -> str:
+ return self.args[1]
+
+
+class WSHandshakeError(Exception):
+ """WebSocket protocol handshake error."""
+
+
+native_byteorder = sys.byteorder
+
+
+# Used by _websocket_mask_python
+_XOR_TABLE = [bytes(a ^ b for a in range(256)) for b in range(256)]
+
+
+def _websocket_mask_python(mask: bytes, data: bytearray) -> None:
+ """Websocket masking function.
+
+ `mask` is a `bytes` object of length 4; `data` is a `bytearray`
+ object of any length. The contents of `data` are masked with `mask`,
+ as specified in section 5.3 of RFC 6455.
+
+ Note that this function mutates the `data` argument.
+
+ This pure-python implementation may be replaced by an optimized
+ version when available.
+
+ """
+ assert isinstance(data, bytearray), data
+ assert len(mask) == 4, mask
+
+ if data:
+ a, b, c, d = (_XOR_TABLE[n] for n in mask)
+ data[::4] = data[::4].translate(a)
+ data[1::4] = data[1::4].translate(b)
+ data[2::4] = data[2::4].translate(c)
+ data[3::4] = data[3::4].translate(d)
+
+
+if NO_EXTENSIONS: # pragma: no cover
+ _websocket_mask = _websocket_mask_python
+else:
+ try:
+ from ._websocket import _websocket_mask_cython # type: ignore
+
+ _websocket_mask = _websocket_mask_cython
+ except ImportError: # pragma: no cover
+ _websocket_mask = _websocket_mask_python
+
+_WS_DEFLATE_TRAILING = bytes([0x00, 0x00, 0xFF, 0xFF])
+
+
+_WS_EXT_RE = re.compile(
+ r"^(?:;\s*(?:"
+ r"(server_no_context_takeover)|"
+ r"(client_no_context_takeover)|"
+ r"(server_max_window_bits(?:=(\d+))?)|"
+ r"(client_max_window_bits(?:=(\d+))?)))*$"
+)
+
+_WS_EXT_RE_SPLIT = re.compile(r"permessage-deflate([^,]+)?")
+
+
+def ws_ext_parse(extstr: Optional[str], isserver: bool = False) -> Tuple[int, bool]:
+ if not extstr:
+ return 0, False
+
+ compress = 0
+ notakeover = False
+ for ext in _WS_EXT_RE_SPLIT.finditer(extstr):
+ defext = ext.group(1)
+ # Return compress = 15 when get `permessage-deflate`
+ if not defext:
+ compress = 15
+ break
+ match = _WS_EXT_RE.match(defext)
+ if match:
+ compress = 15
+ if isserver:
+ # Server never fail to detect compress handshake.
+ # Server does not need to send max wbit to client
+ if match.group(4):
+ compress = int(match.group(4))
+ # Group3 must match if group4 matches
+ # Compress wbit 8 does not support in zlib
+ # If compress level not support,
+ # CONTINUE to next extension
+ if compress > 15 or compress < 9:
+ compress = 0
+ continue
+ if match.group(1):
+ notakeover = True
+ # Ignore regex group 5 & 6 for client_max_window_bits
+ break
+ else:
+ if match.group(6):
+ compress = int(match.group(6))
+ # Group5 must match if group6 matches
+ # Compress wbit 8 does not support in zlib
+ # If compress level not support,
+ # FAIL the parse progress
+ if compress > 15 or compress < 9:
+ raise WSHandshakeError("Invalid window size")
+ if match.group(2):
+ notakeover = True
+ # Ignore regex group 5 & 6 for client_max_window_bits
+ break
+ # Return Fail if client side and not match
+ elif not isserver:
+ raise WSHandshakeError("Extension for deflate not supported" + ext.group(1))
+
+ return compress, notakeover
+
+
+def ws_ext_gen(
+ compress: int = 15, isserver: bool = False, server_notakeover: bool = False
+) -> str:
+ # client_notakeover=False not used for server
+ # compress wbit 8 does not support in zlib
+ if compress < 9 or compress > 15:
+ raise ValueError(
+ "Compress wbits must between 9 and 15, " "zlib does not support wbits=8"
+ )
+ enabledext = ["permessage-deflate"]
+ if not isserver:
+ enabledext.append("client_max_window_bits")
+
+ if compress < 15:
+ enabledext.append("server_max_window_bits=" + str(compress))
+ if server_notakeover:
+ enabledext.append("server_no_context_takeover")
+ # if client_notakeover:
+ # enabledext.append('client_no_context_takeover')
+ return "; ".join(enabledext)
+
+
+class WSParserState(IntEnum):
+ READ_HEADER = 1
+ READ_PAYLOAD_LENGTH = 2
+ READ_PAYLOAD_MASK = 3
+ READ_PAYLOAD = 4
+
+
+class WebSocketReader:
+ def __init__(
+ self, queue: DataQueue[WSMessage], max_msg_size: int, compress: bool = True
+ ) -> None:
+ self.queue = queue
+ self._max_msg_size = max_msg_size
+
+ self._exc = None # type: Optional[BaseException]
+ self._partial = bytearray()
+ self._state = WSParserState.READ_HEADER
+
+ self._opcode = None # type: Optional[int]
+ self._frame_fin = False
+ self._frame_opcode = None # type: Optional[int]
+ self._frame_payload = bytearray()
+
+ self._tail = b""
+ self._has_mask = False
+ self._frame_mask = None # type: Optional[bytes]
+ self._payload_length = 0
+ self._payload_length_flag = 0
+ self._compressed = None # type: Optional[bool]
+ self._decompressobj = None # type: Any # zlib.decompressobj actually
+ self._compress = compress
+
+ def feed_eof(self) -> None:
+ self.queue.feed_eof()
+
+ def feed_data(self, data: bytes) -> Tuple[bool, bytes]:
+ if self._exc:
+ return True, data
+
+ try:
+ return self._feed_data(data)
+ except Exception as exc:
+ self._exc = exc
+ self.queue.set_exception(exc)
+ return True, b""
+
+ def _feed_data(self, data: bytes) -> Tuple[bool, bytes]:
+ for fin, opcode, payload, compressed in self.parse_frame(data):
+ if compressed and not self._decompressobj:
+ self._decompressobj = zlib.decompressobj(wbits=-zlib.MAX_WBITS)
+ if opcode == WSMsgType.CLOSE:
+ if len(payload) >= 2:
+ close_code = UNPACK_CLOSE_CODE(payload[:2])[0]
+ if close_code < 3000 and close_code not in ALLOWED_CLOSE_CODES:
+ raise WebSocketError(
+ WSCloseCode.PROTOCOL_ERROR,
+ f"Invalid close code: {close_code}",
+ )
+ try:
+ close_message = payload[2:].decode("utf-8")
+ except UnicodeDecodeError as exc:
+ raise WebSocketError(
+ WSCloseCode.INVALID_TEXT, "Invalid UTF-8 text message"
+ ) from exc
+ msg = WSMessage(WSMsgType.CLOSE, close_code, close_message)
+ elif payload:
+ raise WebSocketError(
+ WSCloseCode.PROTOCOL_ERROR,
+ f"Invalid close frame: {fin} {opcode} {payload!r}",
+ )
+ else:
+ msg = WSMessage(WSMsgType.CLOSE, 0, "")
+
+ self.queue.feed_data(msg, 0)
+
+ elif opcode == WSMsgType.PING:
+ self.queue.feed_data(
+ WSMessage(WSMsgType.PING, payload, ""), len(payload)
+ )
+
+ elif opcode == WSMsgType.PONG:
+ self.queue.feed_data(
+ WSMessage(WSMsgType.PONG, payload, ""), len(payload)
+ )
+
+ elif (
+ opcode not in (WSMsgType.TEXT, WSMsgType.BINARY)
+ and self._opcode is None
+ ):
+ raise WebSocketError(
+ WSCloseCode.PROTOCOL_ERROR, f"Unexpected opcode={opcode!r}"
+ )
+ else:
+ # load text/binary
+ if not fin:
+ # got partial frame payload
+ if opcode != WSMsgType.CONTINUATION:
+ self._opcode = opcode
+ self._partial.extend(payload)
+ if self._max_msg_size and len(self._partial) >= self._max_msg_size:
+ raise WebSocketError(
+ WSCloseCode.MESSAGE_TOO_BIG,
+ "Message size {} exceeds limit {}".format(
+ len(self._partial), self._max_msg_size
+ ),
+ )
+ else:
+ # previous frame was non finished
+ # we should get continuation opcode
+ if self._partial:
+ if opcode != WSMsgType.CONTINUATION:
+ raise WebSocketError(
+ WSCloseCode.PROTOCOL_ERROR,
+ "The opcode in non-fin frame is expected "
+ "to be zero, got {!r}".format(opcode),
+ )
+
+ if opcode == WSMsgType.CONTINUATION:
+ assert self._opcode is not None
+ opcode = self._opcode
+ self._opcode = None
+
+ self._partial.extend(payload)
+ if self._max_msg_size and len(self._partial) >= self._max_msg_size:
+ raise WebSocketError(
+ WSCloseCode.MESSAGE_TOO_BIG,
+ "Message size {} exceeds limit {}".format(
+ len(self._partial), self._max_msg_size
+ ),
+ )
+
+ # Decompress process must to be done after all packets
+ # received.
+ if compressed:
+ self._partial.extend(_WS_DEFLATE_TRAILING)
+ payload_merged = self._decompressobj.decompress(
+ self._partial, self._max_msg_size
+ )
+ if self._decompressobj.unconsumed_tail:
+ left = len(self._decompressobj.unconsumed_tail)
+ raise WebSocketError(
+ WSCloseCode.MESSAGE_TOO_BIG,
+ "Decompressed message size {} exceeds limit {}".format(
+ self._max_msg_size + left, self._max_msg_size
+ ),
+ )
+ else:
+ payload_merged = bytes(self._partial)
+
+ self._partial.clear()
+
+ if opcode == WSMsgType.TEXT:
+ try:
+ text = payload_merged.decode("utf-8")
+ self.queue.feed_data(
+ WSMessage(WSMsgType.TEXT, text, ""), len(text)
+ )
+ except UnicodeDecodeError as exc:
+ raise WebSocketError(
+ WSCloseCode.INVALID_TEXT, "Invalid UTF-8 text message"
+ ) from exc
+ else:
+ self.queue.feed_data(
+ WSMessage(WSMsgType.BINARY, payload_merged, ""),
+ len(payload_merged),
+ )
+
+ return False, b""
+
+ def parse_frame(
+ self, buf: bytes
+ ) -> List[Tuple[bool, Optional[int], bytearray, Optional[bool]]]:
+ """Return the next frame from the socket."""
+ frames = []
+ if self._tail:
+ buf, self._tail = self._tail + buf, b""
+
+ start_pos = 0
+ buf_length = len(buf)
+
+ while True:
+ # read header
+ if self._state == WSParserState.READ_HEADER:
+ if buf_length - start_pos >= 2:
+ data = buf[start_pos : start_pos + 2]
+ start_pos += 2
+ first_byte, second_byte = data
+
+ fin = (first_byte >> 7) & 1
+ rsv1 = (first_byte >> 6) & 1
+ rsv2 = (first_byte >> 5) & 1
+ rsv3 = (first_byte >> 4) & 1
+ opcode = first_byte & 0xF
+
+ # frame-fin = %x0 ; more frames of this message follow
+ # / %x1 ; final frame of this message
+ # frame-rsv1 = %x0 ;
+ # 1 bit, MUST be 0 unless negotiated otherwise
+ # frame-rsv2 = %x0 ;
+ # 1 bit, MUST be 0 unless negotiated otherwise
+ # frame-rsv3 = %x0 ;
+ # 1 bit, MUST be 0 unless negotiated otherwise
+ #
+ # Remove rsv1 from this test for deflate development
+ if rsv2 or rsv3 or (rsv1 and not self._compress):
+ raise WebSocketError(
+ WSCloseCode.PROTOCOL_ERROR,
+ "Received frame with non-zero reserved bits",
+ )
+
+ if opcode > 0x7 and fin == 0:
+ raise WebSocketError(
+ WSCloseCode.PROTOCOL_ERROR,
+ "Received fragmented control frame",
+ )
+
+ has_mask = (second_byte >> 7) & 1
+ length = second_byte & 0x7F
+
+ # Control frames MUST have a payload
+ # length of 125 bytes or less
+ if opcode > 0x7 and length > 125:
+ raise WebSocketError(
+ WSCloseCode.PROTOCOL_ERROR,
+ "Control frame payload cannot be " "larger than 125 bytes",
+ )
+
+ # Set compress status if last package is FIN
+ # OR set compress status if this is first fragment
+ # Raise error if not first fragment with rsv1 = 0x1
+ if self._frame_fin or self._compressed is None:
+ self._compressed = True if rsv1 else False
+ elif rsv1:
+ raise WebSocketError(
+ WSCloseCode.PROTOCOL_ERROR,
+ "Received frame with non-zero reserved bits",
+ )
+
+ self._frame_fin = bool(fin)
+ self._frame_opcode = opcode
+ self._has_mask = bool(has_mask)
+ self._payload_length_flag = length
+ self._state = WSParserState.READ_PAYLOAD_LENGTH
+ else:
+ break
+
+ # read payload length
+ if self._state == WSParserState.READ_PAYLOAD_LENGTH:
+ length = self._payload_length_flag
+ if length == 126:
+ if buf_length - start_pos >= 2:
+ data = buf[start_pos : start_pos + 2]
+ start_pos += 2
+ length = UNPACK_LEN2(data)[0]
+ self._payload_length = length
+ self._state = (
+ WSParserState.READ_PAYLOAD_MASK
+ if self._has_mask
+ else WSParserState.READ_PAYLOAD
+ )
+ else:
+ break
+ elif length > 126:
+ if buf_length - start_pos >= 8:
+ data = buf[start_pos : start_pos + 8]
+ start_pos += 8
+ length = UNPACK_LEN3(data)[0]
+ self._payload_length = length
+ self._state = (
+ WSParserState.READ_PAYLOAD_MASK
+ if self._has_mask
+ else WSParserState.READ_PAYLOAD
+ )
+ else:
+ break
+ else:
+ self._payload_length = length
+ self._state = (
+ WSParserState.READ_PAYLOAD_MASK
+ if self._has_mask
+ else WSParserState.READ_PAYLOAD
+ )
+
+ # read payload mask
+ if self._state == WSParserState.READ_PAYLOAD_MASK:
+ if buf_length - start_pos >= 4:
+ self._frame_mask = buf[start_pos : start_pos + 4]
+ start_pos += 4
+ self._state = WSParserState.READ_PAYLOAD
+ else:
+ break
+
+ if self._state == WSParserState.READ_PAYLOAD:
+ length = self._payload_length
+ payload = self._frame_payload
+
+ chunk_len = buf_length - start_pos
+ if length >= chunk_len:
+ self._payload_length = length - chunk_len
+ payload.extend(buf[start_pos:])
+ start_pos = buf_length
+ else:
+ self._payload_length = 0
+ payload.extend(buf[start_pos : start_pos + length])
+ start_pos = start_pos + length
+
+ if self._payload_length == 0:
+ if self._has_mask:
+ assert self._frame_mask is not None
+ _websocket_mask(self._frame_mask, payload)
+
+ frames.append(
+ (self._frame_fin, self._frame_opcode, payload, self._compressed)
+ )
+
+ self._frame_payload = bytearray()
+ self._state = WSParserState.READ_HEADER
+ else:
+ break
+
+ self._tail = buf[start_pos:]
+
+ return frames
+
+
+class WebSocketWriter:
+ def __init__(
+ self,
+ protocol: BaseProtocol,
+ transport: asyncio.Transport,
+ *,
+ use_mask: bool = False,
+ limit: int = DEFAULT_LIMIT,
+ random: Any = random.Random(),
+ compress: int = 0,
+ notakeover: bool = False,
+ ) -> None:
+ self.protocol = protocol
+ self.transport = transport
+ self.use_mask = use_mask
+ self.randrange = random.randrange
+ self.compress = compress
+ self.notakeover = notakeover
+ self._closing = False
+ self._limit = limit
+ self._output_size = 0
+ self._compressobj = None # type: Any # actually compressobj
+
+ async def _send_frame(
+ self, message: bytes, opcode: int, compress: Optional[int] = None
+ ) -> None:
+ """Send a frame over the websocket with message as its payload."""
+ if self._closing and not (opcode & WSMsgType.CLOSE):
+ raise ConnectionResetError("Cannot write to closing transport")
+
+ rsv = 0
+
+ # Only compress larger packets (disabled)
+ # Does small packet needs to be compressed?
+ # if self.compress and opcode < 8 and len(message) > 124:
+ if (compress or self.compress) and opcode < 8:
+ if compress:
+ # Do not set self._compress if compressing is for this frame
+ compressobj = zlib.compressobj(level=zlib.Z_BEST_SPEED, wbits=-compress)
+ else: # self.compress
+ if not self._compressobj:
+ self._compressobj = zlib.compressobj(
+ level=zlib.Z_BEST_SPEED, wbits=-self.compress
+ )
+ compressobj = self._compressobj
+
+ message = compressobj.compress(message)
+ message = message + compressobj.flush(
+ zlib.Z_FULL_FLUSH if self.notakeover else zlib.Z_SYNC_FLUSH
+ )
+ if message.endswith(_WS_DEFLATE_TRAILING):
+ message = message[:-4]
+ rsv = rsv | 0x40
+
+ msg_length = len(message)
+
+ use_mask = self.use_mask
+ if use_mask:
+ mask_bit = 0x80
+ else:
+ mask_bit = 0
+
+ if msg_length < 126:
+ header = PACK_LEN1(0x80 | rsv | opcode, msg_length | mask_bit)
+ elif msg_length < (1 << 16):
+ header = PACK_LEN2(0x80 | rsv | opcode, 126 | mask_bit, msg_length)
+ else:
+ header = PACK_LEN3(0x80 | rsv | opcode, 127 | mask_bit, msg_length)
+ if use_mask:
+ mask = self.randrange(0, 0xFFFFFFFF)
+ mask = mask.to_bytes(4, "big")
+ message = bytearray(message)
+ _websocket_mask(mask, message)
+ self._write(header + mask + message)
+ self._output_size += len(header) + len(mask) + len(message)
+ else:
+ if len(message) > MSG_SIZE:
+ self._write(header)
+ self._write(message)
+ else:
+ self._write(header + message)
+
+ self._output_size += len(header) + len(message)
+
+ if self._output_size > self._limit:
+ self._output_size = 0
+ await self.protocol._drain_helper()
+
+ def _write(self, data: bytes) -> None:
+ if self.transport is None or self.transport.is_closing():
+ raise ConnectionResetError("Cannot write to closing transport")
+ self.transport.write(data)
+
+ async def pong(self, message: bytes = b"") -> None:
+ """Send pong message."""
+ if isinstance(message, str):
+ message = message.encode("utf-8")
+ await self._send_frame(message, WSMsgType.PONG)
+
+ async def ping(self, message: bytes = b"") -> None:
+ """Send ping message."""
+ if isinstance(message, str):
+ message = message.encode("utf-8")
+ await self._send_frame(message, WSMsgType.PING)
+
+ async def send(
+ self,
+ message: Union[str, bytes],
+ binary: bool = False,
+ compress: Optional[int] = None,
+ ) -> None:
+ """Send a frame over the websocket with message as its payload."""
+ if isinstance(message, str):
+ message = message.encode("utf-8")
+ if binary:
+ await self._send_frame(message, WSMsgType.BINARY, compress)
+ else:
+ await self._send_frame(message, WSMsgType.TEXT, compress)
+
+ async def close(self, code: int = 1000, message: bytes = b"") -> None:
+ """Close the websocket, sending the specified code and message."""
+ if isinstance(message, str):
+ message = message.encode("utf-8")
+ try:
+ await self._send_frame(
+ PACK_CLOSE_CODE(code) + message, opcode=WSMsgType.CLOSE
+ )
+ finally:
+ self._closing = True
diff --git a/third_party/python/aiohttp/aiohttp/http_writer.py b/third_party/python/aiohttp/aiohttp/http_writer.py
new file mode 100644
index 0000000000..d261fc4e8d
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/http_writer.py
@@ -0,0 +1,182 @@
+"""Http related parsers and protocol."""
+
+import asyncio
+import collections
+import zlib
+from typing import Any, Awaitable, Callable, Optional, Union # noqa
+
+from multidict import CIMultiDict
+
+from .abc import AbstractStreamWriter
+from .base_protocol import BaseProtocol
+from .helpers import NO_EXTENSIONS
+
+__all__ = ("StreamWriter", "HttpVersion", "HttpVersion10", "HttpVersion11")
+
+HttpVersion = collections.namedtuple("HttpVersion", ["major", "minor"])
+HttpVersion10 = HttpVersion(1, 0)
+HttpVersion11 = HttpVersion(1, 1)
+
+
+_T_OnChunkSent = Optional[Callable[[bytes], Awaitable[None]]]
+
+
+class StreamWriter(AbstractStreamWriter):
+ def __init__(
+ self,
+ protocol: BaseProtocol,
+ loop: asyncio.AbstractEventLoop,
+ on_chunk_sent: _T_OnChunkSent = None,
+ ) -> None:
+ self._protocol = protocol
+ self._transport = protocol.transport
+
+ self.loop = loop
+ self.length = None
+ self.chunked = False
+ self.buffer_size = 0
+ self.output_size = 0
+
+ self._eof = False
+ self._compress = None # type: Any
+ self._drain_waiter = None
+
+ self._on_chunk_sent = on_chunk_sent # type: _T_OnChunkSent
+
+ @property
+ def transport(self) -> Optional[asyncio.Transport]:
+ return self._transport
+
+ @property
+ def protocol(self) -> BaseProtocol:
+ return self._protocol
+
+ def enable_chunking(self) -> None:
+ self.chunked = True
+
+ def enable_compression(self, encoding: str = "deflate") -> None:
+ zlib_mode = 16 + zlib.MAX_WBITS if encoding == "gzip" else zlib.MAX_WBITS
+ self._compress = zlib.compressobj(wbits=zlib_mode)
+
+ def _write(self, chunk: bytes) -> None:
+ size = len(chunk)
+ self.buffer_size += size
+ self.output_size += size
+
+ if self._transport is None or self._transport.is_closing():
+ raise ConnectionResetError("Cannot write to closing transport")
+ self._transport.write(chunk)
+
+ async def write(
+ self, chunk: bytes, *, drain: bool = True, LIMIT: int = 0x10000
+ ) -> None:
+ """Writes chunk of data to a stream.
+
+ write_eof() indicates end of stream.
+ writer can't be used after write_eof() method being called.
+ write() return drain future.
+ """
+ if self._on_chunk_sent is not None:
+ await self._on_chunk_sent(chunk)
+
+ if isinstance(chunk, memoryview):
+ if chunk.nbytes != len(chunk):
+ # just reshape it
+ chunk = chunk.cast("c")
+
+ if self._compress is not None:
+ chunk = self._compress.compress(chunk)
+ if not chunk:
+ return
+
+ if self.length is not None:
+ chunk_len = len(chunk)
+ if self.length >= chunk_len:
+ self.length = self.length - chunk_len
+ else:
+ chunk = chunk[: self.length]
+ self.length = 0
+ if not chunk:
+ return
+
+ if chunk:
+ if self.chunked:
+ chunk_len_pre = ("%x\r\n" % len(chunk)).encode("ascii")
+ chunk = chunk_len_pre + chunk + b"\r\n"
+
+ self._write(chunk)
+
+ if self.buffer_size > LIMIT and drain:
+ self.buffer_size = 0
+ await self.drain()
+
+ async def write_headers(
+ self, status_line: str, headers: "CIMultiDict[str]"
+ ) -> None:
+ """Write request/response status and headers."""
+ # status + headers
+ buf = _serialize_headers(status_line, headers)
+ self._write(buf)
+
+ async def write_eof(self, chunk: bytes = b"") -> None:
+ if self._eof:
+ return
+
+ if chunk and self._on_chunk_sent is not None:
+ await self._on_chunk_sent(chunk)
+
+ if self._compress:
+ if chunk:
+ chunk = self._compress.compress(chunk)
+
+ chunk = chunk + self._compress.flush()
+ if chunk and self.chunked:
+ chunk_len = ("%x\r\n" % len(chunk)).encode("ascii")
+ chunk = chunk_len + chunk + b"\r\n0\r\n\r\n"
+ else:
+ if self.chunked:
+ if chunk:
+ chunk_len = ("%x\r\n" % len(chunk)).encode("ascii")
+ chunk = chunk_len + chunk + b"\r\n0\r\n\r\n"
+ else:
+ chunk = b"0\r\n\r\n"
+
+ if chunk:
+ self._write(chunk)
+
+ await self.drain()
+
+ self._eof = True
+ self._transport = None
+
+ async def drain(self) -> None:
+ """Flush the write buffer.
+
+ The intended use is to write
+
+ await w.write(data)
+ await w.drain()
+ """
+ if self._protocol.transport is not None:
+ await self._protocol._drain_helper()
+
+
+def _py_serialize_headers(status_line: str, headers: "CIMultiDict[str]") -> bytes:
+ line = (
+ status_line
+ + "\r\n"
+ + "".join([k + ": " + v + "\r\n" for k, v in headers.items()])
+ )
+ return line.encode("utf-8") + b"\r\n"
+
+
+_serialize_headers = _py_serialize_headers
+
+try:
+ import aiohttp._http_writer as _http_writer # type: ignore
+
+ _c_serialize_headers = _http_writer._serialize_headers
+ if not NO_EXTENSIONS:
+ _serialize_headers = _c_serialize_headers
+except ImportError:
+ pass
diff --git a/third_party/python/aiohttp/aiohttp/locks.py b/third_party/python/aiohttp/aiohttp/locks.py
new file mode 100644
index 0000000000..ce5b9c6f73
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/locks.py
@@ -0,0 +1,45 @@
+import asyncio
+import collections
+from typing import Any, Optional
+
+try:
+ from typing import Deque
+except ImportError:
+ from typing_extensions import Deque
+
+
+class EventResultOrError:
+ """
+ This class wrappers the Event asyncio lock allowing either awake the
+ locked Tasks without any error or raising an exception.
+
+ thanks to @vorpalsmith for the simple design.
+ """
+
+ def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
+ self._loop = loop
+ self._exc = None # type: Optional[BaseException]
+ self._event = asyncio.Event()
+ self._waiters = collections.deque() # type: Deque[asyncio.Future[Any]]
+
+ def set(self, exc: Optional[BaseException] = None) -> None:
+ self._exc = exc
+ self._event.set()
+
+ async def wait(self) -> Any:
+ waiter = self._loop.create_task(self._event.wait())
+ self._waiters.append(waiter)
+ try:
+ val = await waiter
+ finally:
+ self._waiters.remove(waiter)
+
+ if self._exc is not None:
+ raise self._exc
+
+ return val
+
+ def cancel(self) -> None:
+ """ Cancel all waiters """
+ for waiter in self._waiters:
+ waiter.cancel()
diff --git a/third_party/python/aiohttp/aiohttp/log.py b/third_party/python/aiohttp/aiohttp/log.py
new file mode 100644
index 0000000000..3cecea2bac
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/log.py
@@ -0,0 +1,8 @@
+import logging
+
+access_logger = logging.getLogger("aiohttp.access")
+client_logger = logging.getLogger("aiohttp.client")
+internal_logger = logging.getLogger("aiohttp.internal")
+server_logger = logging.getLogger("aiohttp.server")
+web_logger = logging.getLogger("aiohttp.web")
+ws_logger = logging.getLogger("aiohttp.websocket")
diff --git a/third_party/python/aiohttp/aiohttp/multipart.py b/third_party/python/aiohttp/aiohttp/multipart.py
new file mode 100644
index 0000000000..9e1ca92d23
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/multipart.py
@@ -0,0 +1,957 @@
+import base64
+import binascii
+import json
+import re
+import uuid
+import warnings
+import zlib
+from collections import deque
+from types import TracebackType
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ AsyncIterator,
+ Dict,
+ Iterator,
+ List,
+ Mapping,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+)
+from urllib.parse import parse_qsl, unquote, urlencode
+
+from multidict import CIMultiDict, CIMultiDictProxy, MultiMapping
+
+from .hdrs import (
+ CONTENT_DISPOSITION,
+ CONTENT_ENCODING,
+ CONTENT_LENGTH,
+ CONTENT_TRANSFER_ENCODING,
+ CONTENT_TYPE,
+)
+from .helpers import CHAR, TOKEN, parse_mimetype, reify
+from .http import HeadersParser
+from .payload import (
+ JsonPayload,
+ LookupError,
+ Order,
+ Payload,
+ StringPayload,
+ get_payload,
+ payload_type,
+)
+from .streams import StreamReader
+
+__all__ = (
+ "MultipartReader",
+ "MultipartWriter",
+ "BodyPartReader",
+ "BadContentDispositionHeader",
+ "BadContentDispositionParam",
+ "parse_content_disposition",
+ "content_disposition_filename",
+)
+
+
+if TYPE_CHECKING: # pragma: no cover
+ from .client_reqrep import ClientResponse
+
+
+class BadContentDispositionHeader(RuntimeWarning):
+ pass
+
+
+class BadContentDispositionParam(RuntimeWarning):
+ pass
+
+
+def parse_content_disposition(
+ header: Optional[str],
+) -> Tuple[Optional[str], Dict[str, str]]:
+ def is_token(string: str) -> bool:
+ return bool(string) and TOKEN >= set(string)
+
+ def is_quoted(string: str) -> bool:
+ return string[0] == string[-1] == '"'
+
+ def is_rfc5987(string: str) -> bool:
+ return is_token(string) and string.count("'") == 2
+
+ def is_extended_param(string: str) -> bool:
+ return string.endswith("*")
+
+ def is_continuous_param(string: str) -> bool:
+ pos = string.find("*") + 1
+ if not pos:
+ return False
+ substring = string[pos:-1] if string.endswith("*") else string[pos:]
+ return substring.isdigit()
+
+ def unescape(text: str, *, chars: str = "".join(map(re.escape, CHAR))) -> str:
+ return re.sub(f"\\\\([{chars}])", "\\1", text)
+
+ if not header:
+ return None, {}
+
+ disptype, *parts = header.split(";")
+ if not is_token(disptype):
+ warnings.warn(BadContentDispositionHeader(header))
+ return None, {}
+
+ params = {} # type: Dict[str, str]
+ while parts:
+ item = parts.pop(0)
+
+ if "=" not in item:
+ warnings.warn(BadContentDispositionHeader(header))
+ return None, {}
+
+ key, value = item.split("=", 1)
+ key = key.lower().strip()
+ value = value.lstrip()
+
+ if key in params:
+ warnings.warn(BadContentDispositionHeader(header))
+ return None, {}
+
+ if not is_token(key):
+ warnings.warn(BadContentDispositionParam(item))
+ continue
+
+ elif is_continuous_param(key):
+ if is_quoted(value):
+ value = unescape(value[1:-1])
+ elif not is_token(value):
+ warnings.warn(BadContentDispositionParam(item))
+ continue
+
+ elif is_extended_param(key):
+ if is_rfc5987(value):
+ encoding, _, value = value.split("'", 2)
+ encoding = encoding or "utf-8"
+ else:
+ warnings.warn(BadContentDispositionParam(item))
+ continue
+
+ try:
+ value = unquote(value, encoding, "strict")
+ except UnicodeDecodeError: # pragma: nocover
+ warnings.warn(BadContentDispositionParam(item))
+ continue
+
+ else:
+ failed = True
+ if is_quoted(value):
+ failed = False
+ value = unescape(value[1:-1].lstrip("\\/"))
+ elif is_token(value):
+ failed = False
+ elif parts:
+ # maybe just ; in filename, in any case this is just
+ # one case fix, for proper fix we need to redesign parser
+ _value = "{};{}".format(value, parts[0])
+ if is_quoted(_value):
+ parts.pop(0)
+ value = unescape(_value[1:-1].lstrip("\\/"))
+ failed = False
+
+ if failed:
+ warnings.warn(BadContentDispositionHeader(header))
+ return None, {}
+
+ params[key] = value
+
+ return disptype.lower(), params
+
+
+def content_disposition_filename(
+ params: Mapping[str, str], name: str = "filename"
+) -> Optional[str]:
+ name_suf = "%s*" % name
+ if not params:
+ return None
+ elif name_suf in params:
+ return params[name_suf]
+ elif name in params:
+ return params[name]
+ else:
+ parts = []
+ fnparams = sorted(
+ (key, value) for key, value in params.items() if key.startswith(name_suf)
+ )
+ for num, (key, value) in enumerate(fnparams):
+ _, tail = key.split("*", 1)
+ if tail.endswith("*"):
+ tail = tail[:-1]
+ if tail == str(num):
+ parts.append(value)
+ else:
+ break
+ if not parts:
+ return None
+ value = "".join(parts)
+ if "'" in value:
+ encoding, _, value = value.split("'", 2)
+ encoding = encoding or "utf-8"
+ return unquote(value, encoding, "strict")
+ return value
+
+
+class MultipartResponseWrapper:
+ """Wrapper around the MultipartReader.
+
+ It takes care about
+ underlying connection and close it when it needs in.
+ """
+
+ def __init__(
+ self,
+ resp: "ClientResponse",
+ stream: "MultipartReader",
+ ) -> None:
+ self.resp = resp
+ self.stream = stream
+
+ def __aiter__(self) -> "MultipartResponseWrapper":
+ return self
+
+ async def __anext__(
+ self,
+ ) -> Union["MultipartReader", "BodyPartReader"]:
+ part = await self.next()
+ if part is None:
+ raise StopAsyncIteration
+ return part
+
+ def at_eof(self) -> bool:
+ """Returns True when all response data had been read."""
+ return self.resp.content.at_eof()
+
+ async def next(
+ self,
+ ) -> Optional[Union["MultipartReader", "BodyPartReader"]]:
+ """Emits next multipart reader object."""
+ item = await self.stream.next()
+ if self.stream.at_eof():
+ await self.release()
+ return item
+
+ async def release(self) -> None:
+ """Releases the connection gracefully, reading all the content
+ to the void."""
+ await self.resp.release()
+
+
+class BodyPartReader:
+ """Multipart reader for single body part."""
+
+ chunk_size = 8192
+
+ def __init__(
+ self, boundary: bytes, headers: "CIMultiDictProxy[str]", content: StreamReader
+ ) -> None:
+ self.headers = headers
+ self._boundary = boundary
+ self._content = content
+ self._at_eof = False
+ length = self.headers.get(CONTENT_LENGTH, None)
+ self._length = int(length) if length is not None else None
+ self._read_bytes = 0
+ # TODO: typeing.Deque is not supported by Python 3.5
+ self._unread = deque() # type: Any
+ self._prev_chunk = None # type: Optional[bytes]
+ self._content_eof = 0
+ self._cache = {} # type: Dict[str, Any]
+
+ def __aiter__(self) -> AsyncIterator["BodyPartReader"]:
+ return self # type: ignore
+
+ async def __anext__(self) -> bytes:
+ part = await self.next()
+ if part is None:
+ raise StopAsyncIteration
+ return part
+
+ async def next(self) -> Optional[bytes]:
+ item = await self.read()
+ if not item:
+ return None
+ return item
+
+ async def read(self, *, decode: bool = False) -> bytes:
+ """Reads body part data.
+
+ decode: Decodes data following by encoding
+ method from Content-Encoding header. If it missed
+ data remains untouched
+ """
+ if self._at_eof:
+ return b""
+ data = bytearray()
+ while not self._at_eof:
+ data.extend(await self.read_chunk(self.chunk_size))
+ if decode:
+ return self.decode(data)
+ return data
+
+ async def read_chunk(self, size: int = chunk_size) -> bytes:
+ """Reads body part content chunk of the specified size.
+
+ size: chunk size
+ """
+ if self._at_eof:
+ return b""
+ if self._length:
+ chunk = await self._read_chunk_from_length(size)
+ else:
+ chunk = await self._read_chunk_from_stream(size)
+
+ self._read_bytes += len(chunk)
+ if self._read_bytes == self._length:
+ self._at_eof = True
+ if self._at_eof:
+ clrf = await self._content.readline()
+ assert (
+ b"\r\n" == clrf
+ ), "reader did not read all the data or it is malformed"
+ return chunk
+
+ async def _read_chunk_from_length(self, size: int) -> bytes:
+ # Reads body part content chunk of the specified size.
+ # The body part must has Content-Length header with proper value.
+ assert self._length is not None, "Content-Length required for chunked read"
+ chunk_size = min(size, self._length - self._read_bytes)
+ chunk = await self._content.read(chunk_size)
+ return chunk
+
+ async def _read_chunk_from_stream(self, size: int) -> bytes:
+ # Reads content chunk of body part with unknown length.
+ # The Content-Length header for body part is not necessary.
+ assert (
+ size >= len(self._boundary) + 2
+ ), "Chunk size must be greater or equal than boundary length + 2"
+ first_chunk = self._prev_chunk is None
+ if first_chunk:
+ self._prev_chunk = await self._content.read(size)
+
+ chunk = await self._content.read(size)
+ self._content_eof += int(self._content.at_eof())
+ assert self._content_eof < 3, "Reading after EOF"
+ assert self._prev_chunk is not None
+ window = self._prev_chunk + chunk
+ sub = b"\r\n" + self._boundary
+ if first_chunk:
+ idx = window.find(sub)
+ else:
+ idx = window.find(sub, max(0, len(self._prev_chunk) - len(sub)))
+ if idx >= 0:
+ # pushing boundary back to content
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", category=DeprecationWarning)
+ self._content.unread_data(window[idx:])
+ if size > idx:
+ self._prev_chunk = self._prev_chunk[:idx]
+ chunk = window[len(self._prev_chunk) : idx]
+ if not chunk:
+ self._at_eof = True
+ result = self._prev_chunk
+ self._prev_chunk = chunk
+ return result
+
+ async def readline(self) -> bytes:
+ """Reads body part by line by line."""
+ if self._at_eof:
+ return b""
+
+ if self._unread:
+ line = self._unread.popleft()
+ else:
+ line = await self._content.readline()
+
+ if line.startswith(self._boundary):
+ # the very last boundary may not come with \r\n,
+ # so set single rules for everyone
+ sline = line.rstrip(b"\r\n")
+ boundary = self._boundary
+ last_boundary = self._boundary + b"--"
+ # ensure that we read exactly the boundary, not something alike
+ if sline == boundary or sline == last_boundary:
+ self._at_eof = True
+ self._unread.append(line)
+ return b""
+ else:
+ next_line = await self._content.readline()
+ if next_line.startswith(self._boundary):
+ line = line[:-2] # strip CRLF but only once
+ self._unread.append(next_line)
+
+ return line
+
+ async def release(self) -> None:
+ """Like read(), but reads all the data to the void."""
+ if self._at_eof:
+ return
+ while not self._at_eof:
+ await self.read_chunk(self.chunk_size)
+
+ async def text(self, *, encoding: Optional[str] = None) -> str:
+ """Like read(), but assumes that body part contains text data."""
+ data = await self.read(decode=True)
+ # see https://www.w3.org/TR/html5/forms.html#multipart/form-data-encoding-algorithm # NOQA
+ # and https://dvcs.w3.org/hg/xhr/raw-file/tip/Overview.html#dom-xmlhttprequest-send # NOQA
+ encoding = encoding or self.get_charset(default="utf-8")
+ return data.decode(encoding)
+
+ async def json(self, *, encoding: Optional[str] = None) -> Optional[Dict[str, Any]]:
+ """Like read(), but assumes that body parts contains JSON data."""
+ data = await self.read(decode=True)
+ if not data:
+ return None
+ encoding = encoding or self.get_charset(default="utf-8")
+ return json.loads(data.decode(encoding))
+
+ async def form(self, *, encoding: Optional[str] = None) -> List[Tuple[str, str]]:
+ """Like read(), but assumes that body parts contains form
+ urlencoded data.
+ """
+ data = await self.read(decode=True)
+ if not data:
+ return []
+ if encoding is not None:
+ real_encoding = encoding
+ else:
+ real_encoding = self.get_charset(default="utf-8")
+ return parse_qsl(
+ data.rstrip().decode(real_encoding),
+ keep_blank_values=True,
+ encoding=real_encoding,
+ )
+
+ def at_eof(self) -> bool:
+ """Returns True if the boundary was reached or False otherwise."""
+ return self._at_eof
+
+ def decode(self, data: bytes) -> bytes:
+ """Decodes data according the specified Content-Encoding
+ or Content-Transfer-Encoding headers value.
+ """
+ if CONTENT_TRANSFER_ENCODING in self.headers:
+ data = self._decode_content_transfer(data)
+ if CONTENT_ENCODING in self.headers:
+ return self._decode_content(data)
+ return data
+
+ def _decode_content(self, data: bytes) -> bytes:
+ encoding = self.headers.get(CONTENT_ENCODING, "").lower()
+
+ if encoding == "deflate":
+ return zlib.decompress(data, -zlib.MAX_WBITS)
+ elif encoding == "gzip":
+ return zlib.decompress(data, 16 + zlib.MAX_WBITS)
+ elif encoding == "identity":
+ return data
+ else:
+ raise RuntimeError(f"unknown content encoding: {encoding}")
+
+ def _decode_content_transfer(self, data: bytes) -> bytes:
+ encoding = self.headers.get(CONTENT_TRANSFER_ENCODING, "").lower()
+
+ if encoding == "base64":
+ return base64.b64decode(data)
+ elif encoding == "quoted-printable":
+ return binascii.a2b_qp(data)
+ elif encoding in ("binary", "8bit", "7bit"):
+ return data
+ else:
+ raise RuntimeError(
+ "unknown content transfer encoding: {}" "".format(encoding)
+ )
+
+ def get_charset(self, default: str) -> str:
+ """Returns charset parameter from Content-Type header or default."""
+ ctype = self.headers.get(CONTENT_TYPE, "")
+ mimetype = parse_mimetype(ctype)
+ return mimetype.parameters.get("charset", default)
+
+ @reify
+ def name(self) -> Optional[str]:
+ """Returns name specified in Content-Disposition header or None
+ if missed or header is malformed.
+ """
+
+ _, params = parse_content_disposition(self.headers.get(CONTENT_DISPOSITION))
+ return content_disposition_filename(params, "name")
+
+ @reify
+ def filename(self) -> Optional[str]:
+ """Returns filename specified in Content-Disposition header or None
+ if missed or header is malformed.
+ """
+ _, params = parse_content_disposition(self.headers.get(CONTENT_DISPOSITION))
+ return content_disposition_filename(params, "filename")
+
+
+@payload_type(BodyPartReader, order=Order.try_first)
+class BodyPartReaderPayload(Payload):
+ def __init__(self, value: BodyPartReader, *args: Any, **kwargs: Any) -> None:
+ super().__init__(value, *args, **kwargs)
+
+ params = {} # type: Dict[str, str]
+ if value.name is not None:
+ params["name"] = value.name
+ if value.filename is not None:
+ params["filename"] = value.filename
+
+ if params:
+ self.set_content_disposition("attachment", True, **params)
+
+ async def write(self, writer: Any) -> None:
+ field = self._value
+ chunk = await field.read_chunk(size=2 ** 16)
+ while chunk:
+ await writer.write(field.decode(chunk))
+ chunk = await field.read_chunk(size=2 ** 16)
+
+
+class MultipartReader:
+ """Multipart body reader."""
+
+ #: Response wrapper, used when multipart readers constructs from response.
+ response_wrapper_cls = MultipartResponseWrapper
+ #: Multipart reader class, used to handle multipart/* body parts.
+ #: None points to type(self)
+ multipart_reader_cls = None
+ #: Body part reader class for non multipart/* content types.
+ part_reader_cls = BodyPartReader
+
+ def __init__(self, headers: Mapping[str, str], content: StreamReader) -> None:
+ self.headers = headers
+ self._boundary = ("--" + self._get_boundary()).encode()
+ self._content = content
+ self._last_part = (
+ None
+ ) # type: Optional[Union['MultipartReader', BodyPartReader]]
+ self._at_eof = False
+ self._at_bof = True
+ self._unread = [] # type: List[bytes]
+
+ def __aiter__(
+ self,
+ ) -> AsyncIterator["BodyPartReader"]:
+ return self # type: ignore
+
+ async def __anext__(
+ self,
+ ) -> Optional[Union["MultipartReader", BodyPartReader]]:
+ part = await self.next()
+ if part is None:
+ raise StopAsyncIteration
+ return part
+
+ @classmethod
+ def from_response(
+ cls,
+ response: "ClientResponse",
+ ) -> MultipartResponseWrapper:
+ """Constructs reader instance from HTTP response.
+
+ :param response: :class:`~aiohttp.client.ClientResponse` instance
+ """
+ obj = cls.response_wrapper_cls(
+ response, cls(response.headers, response.content)
+ )
+ return obj
+
+ def at_eof(self) -> bool:
+ """Returns True if the final boundary was reached or
+ False otherwise.
+ """
+ return self._at_eof
+
+ async def next(
+ self,
+ ) -> Optional[Union["MultipartReader", BodyPartReader]]:
+ """Emits the next multipart body part."""
+ # So, if we're at BOF, we need to skip till the boundary.
+ if self._at_eof:
+ return None
+ await self._maybe_release_last_part()
+ if self._at_bof:
+ await self._read_until_first_boundary()
+ self._at_bof = False
+ else:
+ await self._read_boundary()
+ if self._at_eof: # we just read the last boundary, nothing to do there
+ return None
+ self._last_part = await self.fetch_next_part()
+ return self._last_part
+
+ async def release(self) -> None:
+ """Reads all the body parts to the void till the final boundary."""
+ while not self._at_eof:
+ item = await self.next()
+ if item is None:
+ break
+ await item.release()
+
+ async def fetch_next_part(
+ self,
+ ) -> Union["MultipartReader", BodyPartReader]:
+ """Returns the next body part reader."""
+ headers = await self._read_headers()
+ return self._get_part_reader(headers)
+
+ def _get_part_reader(
+ self,
+ headers: "CIMultiDictProxy[str]",
+ ) -> Union["MultipartReader", BodyPartReader]:
+ """Dispatches the response by the `Content-Type` header, returning
+ suitable reader instance.
+
+ :param dict headers: Response headers
+ """
+ ctype = headers.get(CONTENT_TYPE, "")
+ mimetype = parse_mimetype(ctype)
+
+ if mimetype.type == "multipart":
+ if self.multipart_reader_cls is None:
+ return type(self)(headers, self._content)
+ return self.multipart_reader_cls(headers, self._content)
+ else:
+ return self.part_reader_cls(self._boundary, headers, self._content)
+
+ def _get_boundary(self) -> str:
+ mimetype = parse_mimetype(self.headers[CONTENT_TYPE])
+
+ assert mimetype.type == "multipart", "multipart/* content type expected"
+
+ if "boundary" not in mimetype.parameters:
+ raise ValueError(
+ "boundary missed for Content-Type: %s" % self.headers[CONTENT_TYPE]
+ )
+
+ boundary = mimetype.parameters["boundary"]
+ if len(boundary) > 70:
+ raise ValueError("boundary %r is too long (70 chars max)" % boundary)
+
+ return boundary
+
+ async def _readline(self) -> bytes:
+ if self._unread:
+ return self._unread.pop()
+ return await self._content.readline()
+
+ async def _read_until_first_boundary(self) -> None:
+ while True:
+ chunk = await self._readline()
+ if chunk == b"":
+ raise ValueError(
+ "Could not find starting boundary %r" % (self._boundary)
+ )
+ chunk = chunk.rstrip()
+ if chunk == self._boundary:
+ return
+ elif chunk == self._boundary + b"--":
+ self._at_eof = True
+ return
+
+ async def _read_boundary(self) -> None:
+ chunk = (await self._readline()).rstrip()
+ if chunk == self._boundary:
+ pass
+ elif chunk == self._boundary + b"--":
+ self._at_eof = True
+ epilogue = await self._readline()
+ next_line = await self._readline()
+
+ # the epilogue is expected and then either the end of input or the
+ # parent multipart boundary, if the parent boundary is found then
+ # it should be marked as unread and handed to the parent for
+ # processing
+ if next_line[:2] == b"--":
+ self._unread.append(next_line)
+ # otherwise the request is likely missing an epilogue and both
+ # lines should be passed to the parent for processing
+ # (this handles the old behavior gracefully)
+ else:
+ self._unread.extend([next_line, epilogue])
+ else:
+ raise ValueError(f"Invalid boundary {chunk!r}, expected {self._boundary!r}")
+
+ async def _read_headers(self) -> "CIMultiDictProxy[str]":
+ lines = [b""]
+ while True:
+ chunk = await self._content.readline()
+ chunk = chunk.strip()
+ lines.append(chunk)
+ if not chunk:
+ break
+ parser = HeadersParser()
+ headers, raw_headers = parser.parse_headers(lines)
+ return headers
+
+ async def _maybe_release_last_part(self) -> None:
+ """Ensures that the last read body part is read completely."""
+ if self._last_part is not None:
+ if not self._last_part.at_eof():
+ await self._last_part.release()
+ self._unread.extend(self._last_part._unread)
+ self._last_part = None
+
+
+_Part = Tuple[Payload, str, str]
+
+
+class MultipartWriter(Payload):
+ """Multipart body writer."""
+
+ def __init__(self, subtype: str = "mixed", boundary: Optional[str] = None) -> None:
+ boundary = boundary if boundary is not None else uuid.uuid4().hex
+ # The underlying Payload API demands a str (utf-8), not bytes,
+ # so we need to ensure we don't lose anything during conversion.
+ # As a result, require the boundary to be ASCII only.
+ # In both situations.
+
+ try:
+ self._boundary = boundary.encode("ascii")
+ except UnicodeEncodeError:
+ raise ValueError("boundary should contain ASCII only chars") from None
+ ctype = f"multipart/{subtype}; boundary={self._boundary_value}"
+
+ super().__init__(None, content_type=ctype)
+
+ self._parts = [] # type: List[_Part]
+
+ def __enter__(self) -> "MultipartWriter":
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ pass
+
+ def __iter__(self) -> Iterator[_Part]:
+ return iter(self._parts)
+
+ def __len__(self) -> int:
+ return len(self._parts)
+
+ def __bool__(self) -> bool:
+ return True
+
+ _valid_tchar_regex = re.compile(br"\A[!#$%&'*+\-.^_`|~\w]+\Z")
+ _invalid_qdtext_char_regex = re.compile(br"[\x00-\x08\x0A-\x1F\x7F]")
+
+ @property
+ def _boundary_value(self) -> str:
+ """Wrap boundary parameter value in quotes, if necessary.
+
+ Reads self.boundary and returns a unicode sting.
+ """
+ # Refer to RFCs 7231, 7230, 5234.
+ #
+ # parameter = token "=" ( token / quoted-string )
+ # token = 1*tchar
+ # quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE
+ # qdtext = HTAB / SP / %x21 / %x23-5B / %x5D-7E / obs-text
+ # obs-text = %x80-FF
+ # quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text )
+ # tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
+ # / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
+ # / DIGIT / ALPHA
+ # ; any VCHAR, except delimiters
+ # VCHAR = %x21-7E
+ value = self._boundary
+ if re.match(self._valid_tchar_regex, value):
+ return value.decode("ascii") # cannot fail
+
+ if re.search(self._invalid_qdtext_char_regex, value):
+ raise ValueError("boundary value contains invalid characters")
+
+ # escape %x5C and %x22
+ quoted_value_content = value.replace(b"\\", b"\\\\")
+ quoted_value_content = quoted_value_content.replace(b'"', b'\\"')
+
+ return '"' + quoted_value_content.decode("ascii") + '"'
+
+ @property
+ def boundary(self) -> str:
+ return self._boundary.decode("ascii")
+
+ def append(self, obj: Any, headers: Optional[MultiMapping[str]] = None) -> Payload:
+ if headers is None:
+ headers = CIMultiDict()
+
+ if isinstance(obj, Payload):
+ obj.headers.update(headers)
+ return self.append_payload(obj)
+ else:
+ try:
+ payload = get_payload(obj, headers=headers)
+ except LookupError:
+ raise TypeError("Cannot create payload from %r" % obj)
+ else:
+ return self.append_payload(payload)
+
+ def append_payload(self, payload: Payload) -> Payload:
+ """Adds a new body part to multipart writer."""
+ # compression
+ encoding = payload.headers.get(
+ CONTENT_ENCODING,
+ "",
+ ).lower() # type: Optional[str]
+ if encoding and encoding not in ("deflate", "gzip", "identity"):
+ raise RuntimeError(f"unknown content encoding: {encoding}")
+ if encoding == "identity":
+ encoding = None
+
+ # te encoding
+ te_encoding = payload.headers.get(
+ CONTENT_TRANSFER_ENCODING,
+ "",
+ ).lower() # type: Optional[str]
+ if te_encoding not in ("", "base64", "quoted-printable", "binary"):
+ raise RuntimeError(
+ "unknown content transfer encoding: {}" "".format(te_encoding)
+ )
+ if te_encoding == "binary":
+ te_encoding = None
+
+ # size
+ size = payload.size
+ if size is not None and not (encoding or te_encoding):
+ payload.headers[CONTENT_LENGTH] = str(size)
+
+ self._parts.append((payload, encoding, te_encoding)) # type: ignore
+ return payload
+
+ def append_json(
+ self, obj: Any, headers: Optional[MultiMapping[str]] = None
+ ) -> Payload:
+ """Helper to append JSON part."""
+ if headers is None:
+ headers = CIMultiDict()
+
+ return self.append_payload(JsonPayload(obj, headers=headers))
+
+ def append_form(
+ self,
+ obj: Union[Sequence[Tuple[str, str]], Mapping[str, str]],
+ headers: Optional[MultiMapping[str]] = None,
+ ) -> Payload:
+ """Helper to append form urlencoded part."""
+ assert isinstance(obj, (Sequence, Mapping))
+
+ if headers is None:
+ headers = CIMultiDict()
+
+ if isinstance(obj, Mapping):
+ obj = list(obj.items())
+ data = urlencode(obj, doseq=True)
+
+ return self.append_payload(
+ StringPayload(
+ data, headers=headers, content_type="application/x-www-form-urlencoded"
+ )
+ )
+
+ @property
+ def size(self) -> Optional[int]:
+ """Size of the payload."""
+ total = 0
+ for part, encoding, te_encoding in self._parts:
+ if encoding or te_encoding or part.size is None:
+ return None
+
+ total += int(
+ 2
+ + len(self._boundary)
+ + 2
+ + part.size # b'--'+self._boundary+b'\r\n'
+ + len(part._binary_headers)
+ + 2 # b'\r\n'
+ )
+
+ total += 2 + len(self._boundary) + 4 # b'--'+self._boundary+b'--\r\n'
+ return total
+
+ async def write(self, writer: Any, close_boundary: bool = True) -> None:
+ """Write body."""
+ for part, encoding, te_encoding in self._parts:
+ await writer.write(b"--" + self._boundary + b"\r\n")
+ await writer.write(part._binary_headers)
+
+ if encoding or te_encoding:
+ w = MultipartPayloadWriter(writer)
+ if encoding:
+ w.enable_compression(encoding)
+ if te_encoding:
+ w.enable_encoding(te_encoding)
+ await part.write(w) # type: ignore
+ await w.write_eof()
+ else:
+ await part.write(writer)
+
+ await writer.write(b"\r\n")
+
+ if close_boundary:
+ await writer.write(b"--" + self._boundary + b"--\r\n")
+
+
+class MultipartPayloadWriter:
+ def __init__(self, writer: Any) -> None:
+ self._writer = writer
+ self._encoding = None # type: Optional[str]
+ self._compress = None # type: Any
+ self._encoding_buffer = None # type: Optional[bytearray]
+
+ def enable_encoding(self, encoding: str) -> None:
+ if encoding == "base64":
+ self._encoding = encoding
+ self._encoding_buffer = bytearray()
+ elif encoding == "quoted-printable":
+ self._encoding = "quoted-printable"
+
+ def enable_compression(self, encoding: str = "deflate") -> None:
+ zlib_mode = 16 + zlib.MAX_WBITS if encoding == "gzip" else -zlib.MAX_WBITS
+ self._compress = zlib.compressobj(wbits=zlib_mode)
+
+ async def write_eof(self) -> None:
+ if self._compress is not None:
+ chunk = self._compress.flush()
+ if chunk:
+ self._compress = None
+ await self.write(chunk)
+
+ if self._encoding == "base64":
+ if self._encoding_buffer:
+ await self._writer.write(base64.b64encode(self._encoding_buffer))
+
+ async def write(self, chunk: bytes) -> None:
+ if self._compress is not None:
+ if chunk:
+ chunk = self._compress.compress(chunk)
+ if not chunk:
+ return
+
+ if self._encoding == "base64":
+ buf = self._encoding_buffer
+ assert buf is not None
+ buf.extend(chunk)
+
+ if buf:
+ div, mod = divmod(len(buf), 3)
+ enc_chunk, self._encoding_buffer = (buf[: div * 3], buf[div * 3 :])
+ if enc_chunk:
+ b64chunk = base64.b64encode(enc_chunk)
+ await self._writer.write(b64chunk)
+ elif self._encoding == "quoted-printable":
+ await self._writer.write(binascii.b2a_qp(chunk))
+ else:
+ await self._writer.write(chunk)
diff --git a/third_party/python/aiohttp/aiohttp/payload.py b/third_party/python/aiohttp/aiohttp/payload.py
new file mode 100644
index 0000000000..c63dd2204c
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/payload.py
@@ -0,0 +1,448 @@
+import asyncio
+import enum
+import io
+import json
+import mimetypes
+import os
+import warnings
+from abc import ABC, abstractmethod
+from itertools import chain
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ ByteString,
+ Dict,
+ Iterable,
+ Optional,
+ Text,
+ TextIO,
+ Tuple,
+ Type,
+ Union,
+)
+
+from multidict import CIMultiDict
+
+from . import hdrs
+from .abc import AbstractStreamWriter
+from .helpers import (
+ PY_36,
+ content_disposition_header,
+ guess_filename,
+ parse_mimetype,
+ sentinel,
+)
+from .streams import StreamReader
+from .typedefs import JSONEncoder, _CIMultiDict
+
+__all__ = (
+ "PAYLOAD_REGISTRY",
+ "get_payload",
+ "payload_type",
+ "Payload",
+ "BytesPayload",
+ "StringPayload",
+ "IOBasePayload",
+ "BytesIOPayload",
+ "BufferedReaderPayload",
+ "TextIOPayload",
+ "StringIOPayload",
+ "JsonPayload",
+ "AsyncIterablePayload",
+)
+
+TOO_LARGE_BYTES_BODY = 2 ** 20 # 1 MB
+
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import List
+
+
+class LookupError(Exception):
+ pass
+
+
+class Order(str, enum.Enum):
+ normal = "normal"
+ try_first = "try_first"
+ try_last = "try_last"
+
+
+def get_payload(data: Any, *args: Any, **kwargs: Any) -> "Payload":
+ return PAYLOAD_REGISTRY.get(data, *args, **kwargs)
+
+
+def register_payload(
+ factory: Type["Payload"], type: Any, *, order: Order = Order.normal
+) -> None:
+ PAYLOAD_REGISTRY.register(factory, type, order=order)
+
+
+class payload_type:
+ def __init__(self, type: Any, *, order: Order = Order.normal) -> None:
+ self.type = type
+ self.order = order
+
+ def __call__(self, factory: Type["Payload"]) -> Type["Payload"]:
+ register_payload(factory, self.type, order=self.order)
+ return factory
+
+
+class PayloadRegistry:
+ """Payload registry.
+
+ note: we need zope.interface for more efficient adapter search
+ """
+
+ def __init__(self) -> None:
+ self._first = [] # type: List[Tuple[Type[Payload], Any]]
+ self._normal = [] # type: List[Tuple[Type[Payload], Any]]
+ self._last = [] # type: List[Tuple[Type[Payload], Any]]
+
+ def get(
+ self, data: Any, *args: Any, _CHAIN: Any = chain, **kwargs: Any
+ ) -> "Payload":
+ if isinstance(data, Payload):
+ return data
+ for factory, type in _CHAIN(self._first, self._normal, self._last):
+ if isinstance(data, type):
+ return factory(data, *args, **kwargs)
+
+ raise LookupError()
+
+ def register(
+ self, factory: Type["Payload"], type: Any, *, order: Order = Order.normal
+ ) -> None:
+ if order is Order.try_first:
+ self._first.append((factory, type))
+ elif order is Order.normal:
+ self._normal.append((factory, type))
+ elif order is Order.try_last:
+ self._last.append((factory, type))
+ else:
+ raise ValueError(f"Unsupported order {order!r}")
+
+
+class Payload(ABC):
+
+ _default_content_type = "application/octet-stream" # type: str
+ _size = None # type: Optional[int]
+
+ def __init__(
+ self,
+ value: Any,
+ headers: Optional[
+ Union[_CIMultiDict, Dict[str, str], Iterable[Tuple[str, str]]]
+ ] = None,
+ content_type: Optional[str] = sentinel,
+ filename: Optional[str] = None,
+ encoding: Optional[str] = None,
+ **kwargs: Any,
+ ) -> None:
+ self._encoding = encoding
+ self._filename = filename
+ self._headers = CIMultiDict() # type: _CIMultiDict
+ self._value = value
+ if content_type is not sentinel and content_type is not None:
+ self._headers[hdrs.CONTENT_TYPE] = content_type
+ elif self._filename is not None:
+ content_type = mimetypes.guess_type(self._filename)[0]
+ if content_type is None:
+ content_type = self._default_content_type
+ self._headers[hdrs.CONTENT_TYPE] = content_type
+ else:
+ self._headers[hdrs.CONTENT_TYPE] = self._default_content_type
+ self._headers.update(headers or {})
+
+ @property
+ def size(self) -> Optional[int]:
+ """Size of the payload."""
+ return self._size
+
+ @property
+ def filename(self) -> Optional[str]:
+ """Filename of the payload."""
+ return self._filename
+
+ @property
+ def headers(self) -> _CIMultiDict:
+ """Custom item headers"""
+ return self._headers
+
+ @property
+ def _binary_headers(self) -> bytes:
+ return (
+ "".join([k + ": " + v + "\r\n" for k, v in self.headers.items()]).encode(
+ "utf-8"
+ )
+ + b"\r\n"
+ )
+
+ @property
+ def encoding(self) -> Optional[str]:
+ """Payload encoding"""
+ return self._encoding
+
+ @property
+ def content_type(self) -> str:
+ """Content type"""
+ return self._headers[hdrs.CONTENT_TYPE]
+
+ def set_content_disposition(
+ self, disptype: str, quote_fields: bool = True, **params: Any
+ ) -> None:
+ """Sets ``Content-Disposition`` header."""
+ self._headers[hdrs.CONTENT_DISPOSITION] = content_disposition_header(
+ disptype, quote_fields=quote_fields, **params
+ )
+
+ @abstractmethod
+ async def write(self, writer: AbstractStreamWriter) -> None:
+ """Write payload.
+
+ writer is an AbstractStreamWriter instance:
+ """
+
+
+class BytesPayload(Payload):
+ def __init__(self, value: ByteString, *args: Any, **kwargs: Any) -> None:
+ if not isinstance(value, (bytes, bytearray, memoryview)):
+ raise TypeError(
+ "value argument must be byte-ish, not {!r}".format(type(value))
+ )
+
+ if "content_type" not in kwargs:
+ kwargs["content_type"] = "application/octet-stream"
+
+ super().__init__(value, *args, **kwargs)
+
+ if isinstance(value, memoryview):
+ self._size = value.nbytes
+ else:
+ self._size = len(value)
+
+ if self._size > TOO_LARGE_BYTES_BODY:
+ if PY_36:
+ kwargs = {"source": self}
+ else:
+ kwargs = {}
+ warnings.warn(
+ "Sending a large body directly with raw bytes might"
+ " lock the event loop. You should probably pass an "
+ "io.BytesIO object instead",
+ ResourceWarning,
+ **kwargs,
+ )
+
+ async def write(self, writer: AbstractStreamWriter) -> None:
+ await writer.write(self._value)
+
+
+class StringPayload(BytesPayload):
+ def __init__(
+ self,
+ value: Text,
+ *args: Any,
+ encoding: Optional[str] = None,
+ content_type: Optional[str] = None,
+ **kwargs: Any,
+ ) -> None:
+
+ if encoding is None:
+ if content_type is None:
+ real_encoding = "utf-8"
+ content_type = "text/plain; charset=utf-8"
+ else:
+ mimetype = parse_mimetype(content_type)
+ real_encoding = mimetype.parameters.get("charset", "utf-8")
+ else:
+ if content_type is None:
+ content_type = "text/plain; charset=%s" % encoding
+ real_encoding = encoding
+
+ super().__init__(
+ value.encode(real_encoding),
+ encoding=real_encoding,
+ content_type=content_type,
+ *args,
+ **kwargs,
+ )
+
+
+class StringIOPayload(StringPayload):
+ def __init__(self, value: IO[str], *args: Any, **kwargs: Any) -> None:
+ super().__init__(value.read(), *args, **kwargs)
+
+
+class IOBasePayload(Payload):
+ def __init__(
+ self, value: IO[Any], disposition: str = "attachment", *args: Any, **kwargs: Any
+ ) -> None:
+ if "filename" not in kwargs:
+ kwargs["filename"] = guess_filename(value)
+
+ super().__init__(value, *args, **kwargs)
+
+ if self._filename is not None and disposition is not None:
+ if hdrs.CONTENT_DISPOSITION not in self.headers:
+ self.set_content_disposition(disposition, filename=self._filename)
+
+ async def write(self, writer: AbstractStreamWriter) -> None:
+ loop = asyncio.get_event_loop()
+ try:
+ chunk = await loop.run_in_executor(None, self._value.read, 2 ** 16)
+ while chunk:
+ await writer.write(chunk)
+ chunk = await loop.run_in_executor(None, self._value.read, 2 ** 16)
+ finally:
+ await loop.run_in_executor(None, self._value.close)
+
+
+class TextIOPayload(IOBasePayload):
+ def __init__(
+ self,
+ value: TextIO,
+ *args: Any,
+ encoding: Optional[str] = None,
+ content_type: Optional[str] = None,
+ **kwargs: Any,
+ ) -> None:
+
+ if encoding is None:
+ if content_type is None:
+ encoding = "utf-8"
+ content_type = "text/plain; charset=utf-8"
+ else:
+ mimetype = parse_mimetype(content_type)
+ encoding = mimetype.parameters.get("charset", "utf-8")
+ else:
+ if content_type is None:
+ content_type = "text/plain; charset=%s" % encoding
+
+ super().__init__(
+ value,
+ content_type=content_type,
+ encoding=encoding,
+ *args,
+ **kwargs,
+ )
+
+ @property
+ def size(self) -> Optional[int]:
+ try:
+ return os.fstat(self._value.fileno()).st_size - self._value.tell()
+ except OSError:
+ return None
+
+ async def write(self, writer: AbstractStreamWriter) -> None:
+ loop = asyncio.get_event_loop()
+ try:
+ chunk = await loop.run_in_executor(None, self._value.read, 2 ** 16)
+ while chunk:
+ await writer.write(chunk.encode(self._encoding))
+ chunk = await loop.run_in_executor(None, self._value.read, 2 ** 16)
+ finally:
+ await loop.run_in_executor(None, self._value.close)
+
+
+class BytesIOPayload(IOBasePayload):
+ @property
+ def size(self) -> int:
+ position = self._value.tell()
+ end = self._value.seek(0, os.SEEK_END)
+ self._value.seek(position)
+ return end - position
+
+
+class BufferedReaderPayload(IOBasePayload):
+ @property
+ def size(self) -> Optional[int]:
+ try:
+ return os.fstat(self._value.fileno()).st_size - self._value.tell()
+ except OSError:
+ # data.fileno() is not supported, e.g.
+ # io.BufferedReader(io.BytesIO(b'data'))
+ return None
+
+
+class JsonPayload(BytesPayload):
+ def __init__(
+ self,
+ value: Any,
+ encoding: str = "utf-8",
+ content_type: str = "application/json",
+ dumps: JSONEncoder = json.dumps,
+ *args: Any,
+ **kwargs: Any,
+ ) -> None:
+
+ super().__init__(
+ dumps(value).encode(encoding),
+ content_type=content_type,
+ encoding=encoding,
+ *args,
+ **kwargs,
+ )
+
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import AsyncIterable, AsyncIterator
+
+ _AsyncIterator = AsyncIterator[bytes]
+ _AsyncIterable = AsyncIterable[bytes]
+else:
+ from collections.abc import AsyncIterable, AsyncIterator
+
+ _AsyncIterator = AsyncIterator
+ _AsyncIterable = AsyncIterable
+
+
+class AsyncIterablePayload(Payload):
+
+ _iter = None # type: Optional[_AsyncIterator]
+
+ def __init__(self, value: _AsyncIterable, *args: Any, **kwargs: Any) -> None:
+ if not isinstance(value, AsyncIterable):
+ raise TypeError(
+ "value argument must support "
+ "collections.abc.AsyncIterablebe interface, "
+ "got {!r}".format(type(value))
+ )
+
+ if "content_type" not in kwargs:
+ kwargs["content_type"] = "application/octet-stream"
+
+ super().__init__(value, *args, **kwargs)
+
+ self._iter = value.__aiter__()
+
+ async def write(self, writer: AbstractStreamWriter) -> None:
+ if self._iter:
+ try:
+ # iter is not None check prevents rare cases
+ # when the case iterable is used twice
+ while True:
+ chunk = await self._iter.__anext__()
+ await writer.write(chunk)
+ except StopAsyncIteration:
+ self._iter = None
+
+
+class StreamReaderPayload(AsyncIterablePayload):
+ def __init__(self, value: StreamReader, *args: Any, **kwargs: Any) -> None:
+ super().__init__(value.iter_any(), *args, **kwargs)
+
+
+PAYLOAD_REGISTRY = PayloadRegistry()
+PAYLOAD_REGISTRY.register(BytesPayload, (bytes, bytearray, memoryview))
+PAYLOAD_REGISTRY.register(StringPayload, str)
+PAYLOAD_REGISTRY.register(StringIOPayload, io.StringIO)
+PAYLOAD_REGISTRY.register(TextIOPayload, io.TextIOBase)
+PAYLOAD_REGISTRY.register(BytesIOPayload, io.BytesIO)
+PAYLOAD_REGISTRY.register(BufferedReaderPayload, (io.BufferedReader, io.BufferedRandom))
+PAYLOAD_REGISTRY.register(IOBasePayload, io.IOBase)
+PAYLOAD_REGISTRY.register(StreamReaderPayload, StreamReader)
+# try_last for giving a chance to more specialized async interables like
+# multidict.BodyPartReaderPayload override the default
+PAYLOAD_REGISTRY.register(AsyncIterablePayload, AsyncIterable, order=Order.try_last)
diff --git a/third_party/python/aiohttp/aiohttp/payload_streamer.py b/third_party/python/aiohttp/aiohttp/payload_streamer.py
new file mode 100644
index 0000000000..3b2de15164
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/payload_streamer.py
@@ -0,0 +1,74 @@
+""" Payload implemenation for coroutines as data provider.
+
+As a simple case, you can upload data from file::
+
+ @aiohttp.streamer
+ async def file_sender(writer, file_name=None):
+ with open(file_name, 'rb') as f:
+ chunk = f.read(2**16)
+ while chunk:
+ await writer.write(chunk)
+
+ chunk = f.read(2**16)
+
+Then you can use `file_sender` like this:
+
+ async with session.post('http://httpbin.org/post',
+ data=file_sender(file_name='huge_file')) as resp:
+ print(await resp.text())
+
+..note:: Coroutine must accept `writer` as first argument
+
+"""
+
+import types
+import warnings
+from typing import Any, Awaitable, Callable, Dict, Tuple
+
+from .abc import AbstractStreamWriter
+from .payload import Payload, payload_type
+
+__all__ = ("streamer",)
+
+
+class _stream_wrapper:
+ def __init__(
+ self,
+ coro: Callable[..., Awaitable[None]],
+ args: Tuple[Any, ...],
+ kwargs: Dict[str, Any],
+ ) -> None:
+ self.coro = types.coroutine(coro)
+ self.args = args
+ self.kwargs = kwargs
+
+ async def __call__(self, writer: AbstractStreamWriter) -> None:
+ await self.coro(writer, *self.args, **self.kwargs) # type: ignore
+
+
+class streamer:
+ def __init__(self, coro: Callable[..., Awaitable[None]]) -> None:
+ warnings.warn(
+ "@streamer is deprecated, use async generators instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ self.coro = coro
+
+ def __call__(self, *args: Any, **kwargs: Any) -> _stream_wrapper:
+ return _stream_wrapper(self.coro, args, kwargs)
+
+
+@payload_type(_stream_wrapper)
+class StreamWrapperPayload(Payload):
+ async def write(self, writer: AbstractStreamWriter) -> None:
+ await self._value(writer)
+
+
+@payload_type(streamer)
+class StreamPayload(StreamWrapperPayload):
+ def __init__(self, value: Any, *args: Any, **kwargs: Any) -> None:
+ super().__init__(value(), *args, **kwargs)
+
+ async def write(self, writer: AbstractStreamWriter) -> None:
+ await self._value(writer)
diff --git a/third_party/python/aiohttp/aiohttp/py.typed b/third_party/python/aiohttp/aiohttp/py.typed
new file mode 100644
index 0000000000..f5642f79f2
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/py.typed
@@ -0,0 +1 @@
+Marker
diff --git a/third_party/python/aiohttp/aiohttp/pytest_plugin.py b/third_party/python/aiohttp/aiohttp/pytest_plugin.py
new file mode 100644
index 0000000000..5204293410
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/pytest_plugin.py
@@ -0,0 +1,380 @@
+import asyncio
+import contextlib
+import warnings
+from collections.abc import Callable
+
+import pytest
+
+from aiohttp.helpers import PY_37, isasyncgenfunction
+from aiohttp.web import Application
+
+from .test_utils import (
+ BaseTestServer,
+ RawTestServer,
+ TestClient,
+ TestServer,
+ loop_context,
+ setup_test_loop,
+ teardown_test_loop,
+ unused_port as _unused_port,
+)
+
+try:
+ import uvloop
+except ImportError: # pragma: no cover
+ uvloop = None
+
+try:
+ import tokio
+except ImportError: # pragma: no cover
+ tokio = None
+
+
+def pytest_addoption(parser): # type: ignore
+ parser.addoption(
+ "--aiohttp-fast",
+ action="store_true",
+ default=False,
+ help="run tests faster by disabling extra checks",
+ )
+ parser.addoption(
+ "--aiohttp-loop",
+ action="store",
+ default="pyloop",
+ help="run tests with specific loop: pyloop, uvloop, tokio or all",
+ )
+ parser.addoption(
+ "--aiohttp-enable-loop-debug",
+ action="store_true",
+ default=False,
+ help="enable event loop debug mode",
+ )
+
+
+def pytest_fixture_setup(fixturedef): # type: ignore
+ """
+ Allow fixtures to be coroutines. Run coroutine fixtures in an event loop.
+ """
+ func = fixturedef.func
+
+ if isasyncgenfunction(func):
+ # async generator fixture
+ is_async_gen = True
+ elif asyncio.iscoroutinefunction(func):
+ # regular async fixture
+ is_async_gen = False
+ else:
+ # not an async fixture, nothing to do
+ return
+
+ strip_request = False
+ if "request" not in fixturedef.argnames:
+ fixturedef.argnames += ("request",)
+ strip_request = True
+
+ def wrapper(*args, **kwargs): # type: ignore
+ request = kwargs["request"]
+ if strip_request:
+ del kwargs["request"]
+
+ # if neither the fixture nor the test use the 'loop' fixture,
+ # 'getfixturevalue' will fail because the test is not parameterized
+ # (this can be removed someday if 'loop' is no longer parameterized)
+ if "loop" not in request.fixturenames:
+ raise Exception(
+ "Asynchronous fixtures must depend on the 'loop' fixture or "
+ "be used in tests depending from it."
+ )
+
+ _loop = request.getfixturevalue("loop")
+
+ if is_async_gen:
+ # for async generators, we need to advance the generator once,
+ # then advance it again in a finalizer
+ gen = func(*args, **kwargs)
+
+ def finalizer(): # type: ignore
+ try:
+ return _loop.run_until_complete(gen.__anext__())
+ except StopAsyncIteration:
+ pass
+
+ request.addfinalizer(finalizer)
+ return _loop.run_until_complete(gen.__anext__())
+ else:
+ return _loop.run_until_complete(func(*args, **kwargs))
+
+ fixturedef.func = wrapper
+
+
+@pytest.fixture
+def fast(request): # type: ignore
+ """--fast config option"""
+ return request.config.getoption("--aiohttp-fast")
+
+
+@pytest.fixture
+def loop_debug(request): # type: ignore
+ """--enable-loop-debug config option"""
+ return request.config.getoption("--aiohttp-enable-loop-debug")
+
+
+@contextlib.contextmanager
+def _runtime_warning_context(): # type: ignore
+ """
+ Context manager which checks for RuntimeWarnings, specifically to
+ avoid "coroutine 'X' was never awaited" warnings being missed.
+
+ If RuntimeWarnings occur in the context a RuntimeError is raised.
+ """
+ with warnings.catch_warnings(record=True) as _warnings:
+ yield
+ rw = [
+ "{w.filename}:{w.lineno}:{w.message}".format(w=w)
+ for w in _warnings
+ if w.category == RuntimeWarning
+ ]
+ if rw:
+ raise RuntimeError(
+ "{} Runtime Warning{},\n{}".format(
+ len(rw), "" if len(rw) == 1 else "s", "\n".join(rw)
+ )
+ )
+
+
+@contextlib.contextmanager
+def _passthrough_loop_context(loop, fast=False): # type: ignore
+ """
+ setups and tears down a loop unless one is passed in via the loop
+ argument when it's passed straight through.
+ """
+ if loop:
+ # loop already exists, pass it straight through
+ yield loop
+ else:
+ # this shadows loop_context's standard behavior
+ loop = setup_test_loop()
+ yield loop
+ teardown_test_loop(loop, fast=fast)
+
+
+def pytest_pycollect_makeitem(collector, name, obj): # type: ignore
+ """
+ Fix pytest collecting for coroutines.
+ """
+ if collector.funcnamefilter(name) and asyncio.iscoroutinefunction(obj):
+ return list(collector._genfunctions(name, obj))
+
+
+def pytest_pyfunc_call(pyfuncitem): # type: ignore
+ """
+ Run coroutines in an event loop instead of a normal function call.
+ """
+ fast = pyfuncitem.config.getoption("--aiohttp-fast")
+ if asyncio.iscoroutinefunction(pyfuncitem.function):
+ existing_loop = pyfuncitem.funcargs.get(
+ "proactor_loop"
+ ) or pyfuncitem.funcargs.get("loop", None)
+ with _runtime_warning_context():
+ with _passthrough_loop_context(existing_loop, fast=fast) as _loop:
+ testargs = {
+ arg: pyfuncitem.funcargs[arg]
+ for arg in pyfuncitem._fixtureinfo.argnames
+ }
+ _loop.run_until_complete(pyfuncitem.obj(**testargs))
+
+ return True
+
+
+def pytest_generate_tests(metafunc): # type: ignore
+ if "loop_factory" not in metafunc.fixturenames:
+ return
+
+ loops = metafunc.config.option.aiohttp_loop
+ avail_factories = {"pyloop": asyncio.DefaultEventLoopPolicy}
+
+ if uvloop is not None: # pragma: no cover
+ avail_factories["uvloop"] = uvloop.EventLoopPolicy
+
+ if tokio is not None: # pragma: no cover
+ avail_factories["tokio"] = tokio.EventLoopPolicy
+
+ if loops == "all":
+ loops = "pyloop,uvloop?,tokio?"
+
+ factories = {} # type: ignore
+ for name in loops.split(","):
+ required = not name.endswith("?")
+ name = name.strip(" ?")
+ if name not in avail_factories: # pragma: no cover
+ if required:
+ raise ValueError(
+ "Unknown loop '%s', available loops: %s"
+ % (name, list(factories.keys()))
+ )
+ else:
+ continue
+ factories[name] = avail_factories[name]
+ metafunc.parametrize(
+ "loop_factory", list(factories.values()), ids=list(factories.keys())
+ )
+
+
+@pytest.fixture
+def loop(loop_factory, fast, loop_debug): # type: ignore
+ """Return an instance of the event loop."""
+ policy = loop_factory()
+ asyncio.set_event_loop_policy(policy)
+ with loop_context(fast=fast) as _loop:
+ if loop_debug:
+ _loop.set_debug(True) # pragma: no cover
+ asyncio.set_event_loop(_loop)
+ yield _loop
+
+
+@pytest.fixture
+def proactor_loop(): # type: ignore
+ if not PY_37:
+ policy = asyncio.get_event_loop_policy()
+ policy._loop_factory = asyncio.ProactorEventLoop # type: ignore
+ else:
+ policy = asyncio.WindowsProactorEventLoopPolicy() # type: ignore
+ asyncio.set_event_loop_policy(policy)
+
+ with loop_context(policy.new_event_loop) as _loop:
+ asyncio.set_event_loop(_loop)
+ yield _loop
+
+
+@pytest.fixture
+def unused_port(aiohttp_unused_port): # type: ignore # pragma: no cover
+ warnings.warn(
+ "Deprecated, use aiohttp_unused_port fixture instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return aiohttp_unused_port
+
+
+@pytest.fixture
+def aiohttp_unused_port(): # type: ignore
+ """Return a port that is unused on the current host."""
+ return _unused_port
+
+
+@pytest.fixture
+def aiohttp_server(loop): # type: ignore
+ """Factory to create a TestServer instance, given an app.
+
+ aiohttp_server(app, **kwargs)
+ """
+ servers = []
+
+ async def go(app, *, port=None, **kwargs): # type: ignore
+ server = TestServer(app, port=port)
+ await server.start_server(loop=loop, **kwargs)
+ servers.append(server)
+ return server
+
+ yield go
+
+ async def finalize(): # type: ignore
+ while servers:
+ await servers.pop().close()
+
+ loop.run_until_complete(finalize())
+
+
+@pytest.fixture
+def test_server(aiohttp_server): # type: ignore # pragma: no cover
+ warnings.warn(
+ "Deprecated, use aiohttp_server fixture instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return aiohttp_server
+
+
+@pytest.fixture
+def aiohttp_raw_server(loop): # type: ignore
+ """Factory to create a RawTestServer instance, given a web handler.
+
+ aiohttp_raw_server(handler, **kwargs)
+ """
+ servers = []
+
+ async def go(handler, *, port=None, **kwargs): # type: ignore
+ server = RawTestServer(handler, port=port)
+ await server.start_server(loop=loop, **kwargs)
+ servers.append(server)
+ return server
+
+ yield go
+
+ async def finalize(): # type: ignore
+ while servers:
+ await servers.pop().close()
+
+ loop.run_until_complete(finalize())
+
+
+@pytest.fixture
+def raw_test_server(aiohttp_raw_server): # type: ignore # pragma: no cover
+ warnings.warn(
+ "Deprecated, use aiohttp_raw_server fixture instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return aiohttp_raw_server
+
+
+@pytest.fixture
+def aiohttp_client(loop): # type: ignore
+ """Factory to create a TestClient instance.
+
+ aiohttp_client(app, **kwargs)
+ aiohttp_client(server, **kwargs)
+ aiohttp_client(raw_server, **kwargs)
+ """
+ clients = []
+
+ async def go(__param, *args, server_kwargs=None, **kwargs): # type: ignore
+
+ if isinstance(__param, Callable) and not isinstance( # type: ignore
+ __param, (Application, BaseTestServer)
+ ):
+ __param = __param(loop, *args, **kwargs)
+ kwargs = {}
+ else:
+ assert not args, "args should be empty"
+
+ if isinstance(__param, Application):
+ server_kwargs = server_kwargs or {}
+ server = TestServer(__param, loop=loop, **server_kwargs)
+ client = TestClient(server, loop=loop, **kwargs)
+ elif isinstance(__param, BaseTestServer):
+ client = TestClient(__param, loop=loop, **kwargs)
+ else:
+ raise ValueError("Unknown argument type: %r" % type(__param))
+
+ await client.start_server()
+ clients.append(client)
+ return client
+
+ yield go
+
+ async def finalize(): # type: ignore
+ while clients:
+ await clients.pop().close()
+
+ loop.run_until_complete(finalize())
+
+
+@pytest.fixture
+def test_client(aiohttp_client): # type: ignore # pragma: no cover
+ warnings.warn(
+ "Deprecated, use aiohttp_client fixture instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return aiohttp_client
diff --git a/third_party/python/aiohttp/aiohttp/resolver.py b/third_party/python/aiohttp/aiohttp/resolver.py
new file mode 100644
index 0000000000..2974bcad7a
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/resolver.py
@@ -0,0 +1,149 @@
+import asyncio
+import socket
+from typing import Any, Dict, List, Optional
+
+from .abc import AbstractResolver
+from .helpers import get_running_loop
+
+__all__ = ("ThreadedResolver", "AsyncResolver", "DefaultResolver")
+
+try:
+ import aiodns
+
+ # aiodns_default = hasattr(aiodns.DNSResolver, 'gethostbyname')
+except ImportError: # pragma: no cover
+ aiodns = None
+
+aiodns_default = False
+
+
+class ThreadedResolver(AbstractResolver):
+ """Use Executor for synchronous getaddrinfo() calls, which defaults to
+ concurrent.futures.ThreadPoolExecutor.
+ """
+
+ def __init__(self, loop: Optional[asyncio.AbstractEventLoop] = None) -> None:
+ self._loop = get_running_loop(loop)
+
+ async def resolve(
+ self, hostname: str, port: int = 0, family: int = socket.AF_INET
+ ) -> List[Dict[str, Any]]:
+ infos = await self._loop.getaddrinfo(
+ hostname,
+ port,
+ type=socket.SOCK_STREAM,
+ family=family,
+ flags=socket.AI_ADDRCONFIG,
+ )
+
+ hosts = []
+ for family, _, proto, _, address in infos:
+ if family == socket.AF_INET6 and address[3]: # type: ignore
+ # This is essential for link-local IPv6 addresses.
+ # LL IPv6 is a VERY rare case. Strictly speaking, we should use
+ # getnameinfo() unconditionally, but performance makes sense.
+ host, _port = socket.getnameinfo(
+ address, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV
+ )
+ port = int(_port)
+ else:
+ host, port = address[:2]
+ hosts.append(
+ {
+ "hostname": hostname,
+ "host": host,
+ "port": port,
+ "family": family,
+ "proto": proto,
+ "flags": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,
+ }
+ )
+
+ return hosts
+
+ async def close(self) -> None:
+ pass
+
+
+class AsyncResolver(AbstractResolver):
+ """Use the `aiodns` package to make asynchronous DNS lookups"""
+
+ def __init__(
+ self,
+ loop: Optional[asyncio.AbstractEventLoop] = None,
+ *args: Any,
+ **kwargs: Any
+ ) -> None:
+ if aiodns is None:
+ raise RuntimeError("Resolver requires aiodns library")
+
+ self._loop = get_running_loop(loop)
+ self._resolver = aiodns.DNSResolver(*args, loop=loop, **kwargs)
+
+ if not hasattr(self._resolver, "gethostbyname"):
+ # aiodns 1.1 is not available, fallback to DNSResolver.query
+ self.resolve = self._resolve_with_query # type: ignore
+
+ async def resolve(
+ self, host: str, port: int = 0, family: int = socket.AF_INET
+ ) -> List[Dict[str, Any]]:
+ try:
+ resp = await self._resolver.gethostbyname(host, family)
+ except aiodns.error.DNSError as exc:
+ msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed"
+ raise OSError(msg) from exc
+ hosts = []
+ for address in resp.addresses:
+ hosts.append(
+ {
+ "hostname": host,
+ "host": address,
+ "port": port,
+ "family": family,
+ "proto": 0,
+ "flags": socket.AI_NUMERICHOST | socket.AI_NUMERICSERV,
+ }
+ )
+
+ if not hosts:
+ raise OSError("DNS lookup failed")
+
+ return hosts
+
+ async def _resolve_with_query(
+ self, host: str, port: int = 0, family: int = socket.AF_INET
+ ) -> List[Dict[str, Any]]:
+ if family == socket.AF_INET6:
+ qtype = "AAAA"
+ else:
+ qtype = "A"
+
+ try:
+ resp = await self._resolver.query(host, qtype)
+ except aiodns.error.DNSError as exc:
+ msg = exc.args[1] if len(exc.args) >= 1 else "DNS lookup failed"
+ raise OSError(msg) from exc
+
+ hosts = []
+ for rr in resp:
+ hosts.append(
+ {
+ "hostname": host,
+ "host": rr.host,
+ "port": port,
+ "family": family,
+ "proto": 0,
+ "flags": socket.AI_NUMERICHOST,
+ }
+ )
+
+ if not hosts:
+ raise OSError("DNS lookup failed")
+
+ return hosts
+
+ async def close(self) -> None:
+ return self._resolver.cancel()
+
+
+DefaultResolver = AsyncResolver if aiodns_default else ThreadedResolver
diff --git a/third_party/python/aiohttp/aiohttp/signals.py b/third_party/python/aiohttp/aiohttp/signals.py
new file mode 100644
index 0000000000..d406c02423
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/signals.py
@@ -0,0 +1,34 @@
+from aiohttp.frozenlist import FrozenList
+
+__all__ = ("Signal",)
+
+
+class Signal(FrozenList):
+ """Coroutine-based signal implementation.
+
+ To connect a callback to a signal, use any list method.
+
+ Signals are fired using the send() coroutine, which takes named
+ arguments.
+ """
+
+ __slots__ = ("_owner",)
+
+ def __init__(self, owner):
+ super().__init__()
+ self._owner = owner
+
+ def __repr__(self):
+ return "<Signal owner={}, frozen={}, {!r}>".format(
+ self._owner, self.frozen, list(self)
+ )
+
+ async def send(self, *args, **kwargs):
+ """
+ Sends data to all registered receivers.
+ """
+ if not self.frozen:
+ raise RuntimeError("Cannot send non-frozen signal.")
+
+ for receiver in self:
+ await receiver(*args, **kwargs) # type: ignore
diff --git a/third_party/python/aiohttp/aiohttp/signals.pyi b/third_party/python/aiohttp/aiohttp/signals.pyi
new file mode 100644
index 0000000000..455f8e2f22
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/signals.pyi
@@ -0,0 +1,12 @@
+from typing import Any, Generic, TypeVar
+
+from aiohttp.frozenlist import FrozenList
+
+__all__ = ("Signal",)
+
+_T = TypeVar("_T")
+
+class Signal(FrozenList[_T], Generic[_T]):
+ def __init__(self, owner: Any) -> None: ...
+ def __repr__(self) -> str: ...
+ async def send(self, *args: Any, **kwargs: Any) -> None: ...
diff --git a/third_party/python/aiohttp/aiohttp/streams.py b/third_party/python/aiohttp/aiohttp/streams.py
new file mode 100644
index 0000000000..42970b531d
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/streams.py
@@ -0,0 +1,647 @@
+import asyncio
+import collections
+import warnings
+from typing import Awaitable, Callable, Generic, List, Optional, Tuple, TypeVar
+
+from .base_protocol import BaseProtocol
+from .helpers import BaseTimerContext, set_exception, set_result
+from .log import internal_logger
+
+try: # pragma: no cover
+ from typing import Deque
+except ImportError:
+ from typing_extensions import Deque
+
+__all__ = (
+ "EMPTY_PAYLOAD",
+ "EofStream",
+ "StreamReader",
+ "DataQueue",
+ "FlowControlDataQueue",
+)
+
+_T = TypeVar("_T")
+
+
+class EofStream(Exception):
+ """eof stream indication."""
+
+
+class AsyncStreamIterator(Generic[_T]):
+ def __init__(self, read_func: Callable[[], Awaitable[_T]]) -> None:
+ self.read_func = read_func
+
+ def __aiter__(self) -> "AsyncStreamIterator[_T]":
+ return self
+
+ async def __anext__(self) -> _T:
+ try:
+ rv = await self.read_func()
+ except EofStream:
+ raise StopAsyncIteration
+ if rv == b"":
+ raise StopAsyncIteration
+ return rv
+
+
+class ChunkTupleAsyncStreamIterator:
+ def __init__(self, stream: "StreamReader") -> None:
+ self._stream = stream
+
+ def __aiter__(self) -> "ChunkTupleAsyncStreamIterator":
+ return self
+
+ async def __anext__(self) -> Tuple[bytes, bool]:
+ rv = await self._stream.readchunk()
+ if rv == (b"", False):
+ raise StopAsyncIteration
+ return rv
+
+
+class AsyncStreamReaderMixin:
+ def __aiter__(self) -> AsyncStreamIterator[bytes]:
+ return AsyncStreamIterator(self.readline) # type: ignore
+
+ def iter_chunked(self, n: int) -> AsyncStreamIterator[bytes]:
+ """Returns an asynchronous iterator that yields chunks of size n.
+
+ Python-3.5 available for Python 3.5+ only
+ """
+ return AsyncStreamIterator(lambda: self.read(n)) # type: ignore
+
+ def iter_any(self) -> AsyncStreamIterator[bytes]:
+ """Returns an asynchronous iterator that yields all the available
+ data as soon as it is received
+
+ Python-3.5 available for Python 3.5+ only
+ """
+ return AsyncStreamIterator(self.readany) # type: ignore
+
+ def iter_chunks(self) -> ChunkTupleAsyncStreamIterator:
+ """Returns an asynchronous iterator that yields chunks of data
+ as they are received by the server. The yielded objects are tuples
+ of (bytes, bool) as returned by the StreamReader.readchunk method.
+
+ Python-3.5 available for Python 3.5+ only
+ """
+ return ChunkTupleAsyncStreamIterator(self) # type: ignore
+
+
+class StreamReader(AsyncStreamReaderMixin):
+ """An enhancement of asyncio.StreamReader.
+
+ Supports asynchronous iteration by line, chunk or as available::
+
+ async for line in reader:
+ ...
+ async for chunk in reader.iter_chunked(1024):
+ ...
+ async for slice in reader.iter_any():
+ ...
+
+ """
+
+ total_bytes = 0
+
+ def __init__(
+ self,
+ protocol: BaseProtocol,
+ limit: int,
+ *,
+ timer: Optional[BaseTimerContext] = None,
+ loop: Optional[asyncio.AbstractEventLoop] = None
+ ) -> None:
+ self._protocol = protocol
+ self._low_water = limit
+ self._high_water = limit * 2
+ if loop is None:
+ loop = asyncio.get_event_loop()
+ self._loop = loop
+ self._size = 0
+ self._cursor = 0
+ self._http_chunk_splits = None # type: Optional[List[int]]
+ self._buffer = collections.deque() # type: Deque[bytes]
+ self._buffer_offset = 0
+ self._eof = False
+ self._waiter = None # type: Optional[asyncio.Future[None]]
+ self._eof_waiter = None # type: Optional[asyncio.Future[None]]
+ self._exception = None # type: Optional[BaseException]
+ self._timer = timer
+ self._eof_callbacks = [] # type: List[Callable[[], None]]
+
+ def __repr__(self) -> str:
+ info = [self.__class__.__name__]
+ if self._size:
+ info.append("%d bytes" % self._size)
+ if self._eof:
+ info.append("eof")
+ if self._low_water != 2 ** 16: # default limit
+ info.append("low=%d high=%d" % (self._low_water, self._high_water))
+ if self._waiter:
+ info.append("w=%r" % self._waiter)
+ if self._exception:
+ info.append("e=%r" % self._exception)
+ return "<%s>" % " ".join(info)
+
+ def get_read_buffer_limits(self) -> Tuple[int, int]:
+ return (self._low_water, self._high_water)
+
+ def exception(self) -> Optional[BaseException]:
+ return self._exception
+
+ def set_exception(self, exc: BaseException) -> None:
+ self._exception = exc
+ self._eof_callbacks.clear()
+
+ waiter = self._waiter
+ if waiter is not None:
+ self._waiter = None
+ set_exception(waiter, exc)
+
+ waiter = self._eof_waiter
+ if waiter is not None:
+ self._eof_waiter = None
+ set_exception(waiter, exc)
+
+ def on_eof(self, callback: Callable[[], None]) -> None:
+ if self._eof:
+ try:
+ callback()
+ except Exception:
+ internal_logger.exception("Exception in eof callback")
+ else:
+ self._eof_callbacks.append(callback)
+
+ def feed_eof(self) -> None:
+ self._eof = True
+
+ waiter = self._waiter
+ if waiter is not None:
+ self._waiter = None
+ set_result(waiter, None)
+
+ waiter = self._eof_waiter
+ if waiter is not None:
+ self._eof_waiter = None
+ set_result(waiter, None)
+
+ for cb in self._eof_callbacks:
+ try:
+ cb()
+ except Exception:
+ internal_logger.exception("Exception in eof callback")
+
+ self._eof_callbacks.clear()
+
+ def is_eof(self) -> bool:
+ """Return True if 'feed_eof' was called."""
+ return self._eof
+
+ def at_eof(self) -> bool:
+ """Return True if the buffer is empty and 'feed_eof' was called."""
+ return self._eof and not self._buffer
+
+ async def wait_eof(self) -> None:
+ if self._eof:
+ return
+
+ assert self._eof_waiter is None
+ self._eof_waiter = self._loop.create_future()
+ try:
+ await self._eof_waiter
+ finally:
+ self._eof_waiter = None
+
+ def unread_data(self, data: bytes) -> None:
+ """rollback reading some data from stream, inserting it to buffer head."""
+ warnings.warn(
+ "unread_data() is deprecated "
+ "and will be removed in future releases (#3260)",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ if not data:
+ return
+
+ if self._buffer_offset:
+ self._buffer[0] = self._buffer[0][self._buffer_offset :]
+ self._buffer_offset = 0
+ self._size += len(data)
+ self._cursor -= len(data)
+ self._buffer.appendleft(data)
+ self._eof_counter = 0
+
+ # TODO: size is ignored, remove the param later
+ def feed_data(self, data: bytes, size: int = 0) -> None:
+ assert not self._eof, "feed_data after feed_eof"
+
+ if not data:
+ return
+
+ self._size += len(data)
+ self._buffer.append(data)
+ self.total_bytes += len(data)
+
+ waiter = self._waiter
+ if waiter is not None:
+ self._waiter = None
+ set_result(waiter, None)
+
+ if self._size > self._high_water and not self._protocol._reading_paused:
+ self._protocol.pause_reading()
+
+ def begin_http_chunk_receiving(self) -> None:
+ if self._http_chunk_splits is None:
+ if self.total_bytes:
+ raise RuntimeError(
+ "Called begin_http_chunk_receiving when" "some data was already fed"
+ )
+ self._http_chunk_splits = []
+
+ def end_http_chunk_receiving(self) -> None:
+ if self._http_chunk_splits is None:
+ raise RuntimeError(
+ "Called end_chunk_receiving without calling "
+ "begin_chunk_receiving first"
+ )
+
+ # self._http_chunk_splits contains logical byte offsets from start of
+ # the body transfer. Each offset is the offset of the end of a chunk.
+ # "Logical" means bytes, accessible for a user.
+ # If no chunks containig logical data were received, current position
+ # is difinitely zero.
+ pos = self._http_chunk_splits[-1] if self._http_chunk_splits else 0
+
+ if self.total_bytes == pos:
+ # We should not add empty chunks here. So we check for that.
+ # Note, when chunked + gzip is used, we can receive a chunk
+ # of compressed data, but that data may not be enough for gzip FSM
+ # to yield any uncompressed data. That's why current position may
+ # not change after receiving a chunk.
+ return
+
+ self._http_chunk_splits.append(self.total_bytes)
+
+ # wake up readchunk when end of http chunk received
+ waiter = self._waiter
+ if waiter is not None:
+ self._waiter = None
+ set_result(waiter, None)
+
+ async def _wait(self, func_name: str) -> None:
+ # StreamReader uses a future to link the protocol feed_data() method
+ # to a read coroutine. Running two read coroutines at the same time
+ # would have an unexpected behaviour. It would not possible to know
+ # which coroutine would get the next data.
+ if self._waiter is not None:
+ raise RuntimeError(
+ "%s() called while another coroutine is "
+ "already waiting for incoming data" % func_name
+ )
+
+ waiter = self._waiter = self._loop.create_future()
+ try:
+ if self._timer:
+ with self._timer:
+ await waiter
+ else:
+ await waiter
+ finally:
+ self._waiter = None
+
+ async def readline(self) -> bytes:
+ if self._exception is not None:
+ raise self._exception
+
+ line = []
+ line_size = 0
+ not_enough = True
+
+ while not_enough:
+ while self._buffer and not_enough:
+ offset = self._buffer_offset
+ ichar = self._buffer[0].find(b"\n", offset) + 1
+ # Read from current offset to found b'\n' or to the end.
+ data = self._read_nowait_chunk(ichar - offset if ichar else -1)
+ line.append(data)
+ line_size += len(data)
+ if ichar:
+ not_enough = False
+
+ if line_size > self._high_water:
+ raise ValueError("Line is too long")
+
+ if self._eof:
+ break
+
+ if not_enough:
+ await self._wait("readline")
+
+ return b"".join(line)
+
+ async def read(self, n: int = -1) -> bytes:
+ if self._exception is not None:
+ raise self._exception
+
+ # migration problem; with DataQueue you have to catch
+ # EofStream exception, so common way is to run payload.read() inside
+ # infinite loop. what can cause real infinite loop with StreamReader
+ # lets keep this code one major release.
+ if __debug__:
+ if self._eof and not self._buffer:
+ self._eof_counter = getattr(self, "_eof_counter", 0) + 1
+ if self._eof_counter > 5:
+ internal_logger.warning(
+ "Multiple access to StreamReader in eof state, "
+ "might be infinite loop.",
+ stack_info=True,
+ )
+
+ if not n:
+ return b""
+
+ if n < 0:
+ # This used to just loop creating a new waiter hoping to
+ # collect everything in self._buffer, but that would
+ # deadlock if the subprocess sends more than self.limit
+ # bytes. So just call self.readany() until EOF.
+ blocks = []
+ while True:
+ block = await self.readany()
+ if not block:
+ break
+ blocks.append(block)
+ return b"".join(blocks)
+
+ # TODO: should be `if` instead of `while`
+ # because waiter maybe triggered on chunk end,
+ # without feeding any data
+ while not self._buffer and not self._eof:
+ await self._wait("read")
+
+ return self._read_nowait(n)
+
+ async def readany(self) -> bytes:
+ if self._exception is not None:
+ raise self._exception
+
+ # TODO: should be `if` instead of `while`
+ # because waiter maybe triggered on chunk end,
+ # without feeding any data
+ while not self._buffer and not self._eof:
+ await self._wait("readany")
+
+ return self._read_nowait(-1)
+
+ async def readchunk(self) -> Tuple[bytes, bool]:
+ """Returns a tuple of (data, end_of_http_chunk). When chunked transfer
+ encoding is used, end_of_http_chunk is a boolean indicating if the end
+ of the data corresponds to the end of a HTTP chunk , otherwise it is
+ always False.
+ """
+ while True:
+ if self._exception is not None:
+ raise self._exception
+
+ while self._http_chunk_splits:
+ pos = self._http_chunk_splits.pop(0)
+ if pos == self._cursor:
+ return (b"", True)
+ if pos > self._cursor:
+ return (self._read_nowait(pos - self._cursor), True)
+ internal_logger.warning(
+ "Skipping HTTP chunk end due to data "
+ "consumption beyond chunk boundary"
+ )
+
+ if self._buffer:
+ return (self._read_nowait_chunk(-1), False)
+ # return (self._read_nowait(-1), False)
+
+ if self._eof:
+ # Special case for signifying EOF.
+ # (b'', True) is not a final return value actually.
+ return (b"", False)
+
+ await self._wait("readchunk")
+
+ async def readexactly(self, n: int) -> bytes:
+ if self._exception is not None:
+ raise self._exception
+
+ blocks = [] # type: List[bytes]
+ while n > 0:
+ block = await self.read(n)
+ if not block:
+ partial = b"".join(blocks)
+ raise asyncio.IncompleteReadError(partial, len(partial) + n)
+ blocks.append(block)
+ n -= len(block)
+
+ return b"".join(blocks)
+
+ def read_nowait(self, n: int = -1) -> bytes:
+ # default was changed to be consistent with .read(-1)
+ #
+ # I believe the most users don't know about the method and
+ # they are not affected.
+ if self._exception is not None:
+ raise self._exception
+
+ if self._waiter and not self._waiter.done():
+ raise RuntimeError(
+ "Called while some coroutine is waiting for incoming data."
+ )
+
+ return self._read_nowait(n)
+
+ def _read_nowait_chunk(self, n: int) -> bytes:
+ first_buffer = self._buffer[0]
+ offset = self._buffer_offset
+ if n != -1 and len(first_buffer) - offset > n:
+ data = first_buffer[offset : offset + n]
+ self._buffer_offset += n
+
+ elif offset:
+ self._buffer.popleft()
+ data = first_buffer[offset:]
+ self._buffer_offset = 0
+
+ else:
+ data = self._buffer.popleft()
+
+ self._size -= len(data)
+ self._cursor += len(data)
+
+ chunk_splits = self._http_chunk_splits
+ # Prevent memory leak: drop useless chunk splits
+ while chunk_splits and chunk_splits[0] < self._cursor:
+ chunk_splits.pop(0)
+
+ if self._size < self._low_water and self._protocol._reading_paused:
+ self._protocol.resume_reading()
+ return data
+
+ def _read_nowait(self, n: int) -> bytes:
+ """ Read not more than n bytes, or whole buffer if n == -1 """
+ chunks = []
+
+ while self._buffer:
+ chunk = self._read_nowait_chunk(n)
+ chunks.append(chunk)
+ if n != -1:
+ n -= len(chunk)
+ if n == 0:
+ break
+
+ return b"".join(chunks) if chunks else b""
+
+
+class EmptyStreamReader(AsyncStreamReaderMixin):
+ def exception(self) -> Optional[BaseException]:
+ return None
+
+ def set_exception(self, exc: BaseException) -> None:
+ pass
+
+ def on_eof(self, callback: Callable[[], None]) -> None:
+ try:
+ callback()
+ except Exception:
+ internal_logger.exception("Exception in eof callback")
+
+ def feed_eof(self) -> None:
+ pass
+
+ def is_eof(self) -> bool:
+ return True
+
+ def at_eof(self) -> bool:
+ return True
+
+ async def wait_eof(self) -> None:
+ return
+
+ def feed_data(self, data: bytes, n: int = 0) -> None:
+ pass
+
+ async def readline(self) -> bytes:
+ return b""
+
+ async def read(self, n: int = -1) -> bytes:
+ return b""
+
+ async def readany(self) -> bytes:
+ return b""
+
+ async def readchunk(self) -> Tuple[bytes, bool]:
+ return (b"", True)
+
+ async def readexactly(self, n: int) -> bytes:
+ raise asyncio.IncompleteReadError(b"", n)
+
+ def read_nowait(self) -> bytes:
+ return b""
+
+
+EMPTY_PAYLOAD = EmptyStreamReader()
+
+
+class DataQueue(Generic[_T]):
+ """DataQueue is a general-purpose blocking queue with one reader."""
+
+ def __init__(self, loop: asyncio.AbstractEventLoop) -> None:
+ self._loop = loop
+ self._eof = False
+ self._waiter = None # type: Optional[asyncio.Future[None]]
+ self._exception = None # type: Optional[BaseException]
+ self._size = 0
+ self._buffer = collections.deque() # type: Deque[Tuple[_T, int]]
+
+ def __len__(self) -> int:
+ return len(self._buffer)
+
+ def is_eof(self) -> bool:
+ return self._eof
+
+ def at_eof(self) -> bool:
+ return self._eof and not self._buffer
+
+ def exception(self) -> Optional[BaseException]:
+ return self._exception
+
+ def set_exception(self, exc: BaseException) -> None:
+ self._eof = True
+ self._exception = exc
+
+ waiter = self._waiter
+ if waiter is not None:
+ self._waiter = None
+ set_exception(waiter, exc)
+
+ def feed_data(self, data: _T, size: int = 0) -> None:
+ self._size += size
+ self._buffer.append((data, size))
+
+ waiter = self._waiter
+ if waiter is not None:
+ self._waiter = None
+ set_result(waiter, None)
+
+ def feed_eof(self) -> None:
+ self._eof = True
+
+ waiter = self._waiter
+ if waiter is not None:
+ self._waiter = None
+ set_result(waiter, None)
+
+ async def read(self) -> _T:
+ if not self._buffer and not self._eof:
+ assert not self._waiter
+ self._waiter = self._loop.create_future()
+ try:
+ await self._waiter
+ except (asyncio.CancelledError, asyncio.TimeoutError):
+ self._waiter = None
+ raise
+
+ if self._buffer:
+ data, size = self._buffer.popleft()
+ self._size -= size
+ return data
+ else:
+ if self._exception is not None:
+ raise self._exception
+ else:
+ raise EofStream
+
+ def __aiter__(self) -> AsyncStreamIterator[_T]:
+ return AsyncStreamIterator(self.read)
+
+
+class FlowControlDataQueue(DataQueue[_T]):
+ """FlowControlDataQueue resumes and pauses an underlying stream.
+
+ It is a destination for parsed data."""
+
+ def __init__(
+ self, protocol: BaseProtocol, limit: int, *, loop: asyncio.AbstractEventLoop
+ ) -> None:
+ super().__init__(loop=loop)
+
+ self._protocol = protocol
+ self._limit = limit * 2
+
+ def feed_data(self, data: _T, size: int = 0) -> None:
+ super().feed_data(data, size)
+
+ if self._size > self._limit and not self._protocol._reading_paused:
+ self._protocol.pause_reading()
+
+ async def read(self) -> _T:
+ try:
+ return await super().read()
+ finally:
+ if self._size < self._limit and self._protocol._reading_paused:
+ self._protocol.resume_reading()
diff --git a/third_party/python/aiohttp/aiohttp/tcp_helpers.py b/third_party/python/aiohttp/aiohttp/tcp_helpers.py
new file mode 100644
index 0000000000..0e1dbf1655
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/tcp_helpers.py
@@ -0,0 +1,38 @@
+"""Helper methods to tune a TCP connection"""
+
+import asyncio
+import socket
+from contextlib import suppress
+from typing import Optional # noqa
+
+__all__ = ("tcp_keepalive", "tcp_nodelay")
+
+
+if hasattr(socket, "SO_KEEPALIVE"):
+
+ def tcp_keepalive(transport: asyncio.Transport) -> None:
+ sock = transport.get_extra_info("socket")
+ if sock is not None:
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
+
+
+else:
+
+ def tcp_keepalive(transport: asyncio.Transport) -> None: # pragma: no cover
+ pass
+
+
+def tcp_nodelay(transport: asyncio.Transport, value: bool) -> None:
+ sock = transport.get_extra_info("socket")
+
+ if sock is None:
+ return
+
+ if sock.family not in (socket.AF_INET, socket.AF_INET6):
+ return
+
+ value = bool(value)
+
+ # socket may be closed already, on windows OSError get raised
+ with suppress(OSError):
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, value)
diff --git a/third_party/python/aiohttp/aiohttp/test_utils.py b/third_party/python/aiohttp/aiohttp/test_utils.py
new file mode 100644
index 0000000000..7a9ca7ddf3
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/test_utils.py
@@ -0,0 +1,676 @@
+"""Utilities shared by tests."""
+
+import asyncio
+import contextlib
+import functools
+import gc
+import inspect
+import os
+import socket
+import sys
+import unittest
+from abc import ABC, abstractmethod
+from types import TracebackType
+from typing import TYPE_CHECKING, Any, Callable, Iterator, List, Optional, Type, Union
+from unittest import mock
+
+from multidict import CIMultiDict, CIMultiDictProxy
+from yarl import URL
+
+import aiohttp
+from aiohttp.client import (
+ ClientResponse,
+ _RequestContextManager,
+ _WSRequestContextManager,
+)
+
+from . import ClientSession, hdrs
+from .abc import AbstractCookieJar
+from .client_reqrep import ClientResponse
+from .client_ws import ClientWebSocketResponse
+from .helpers import sentinel
+from .http import HttpVersion, RawRequestMessage
+from .signals import Signal
+from .web import (
+ Application,
+ AppRunner,
+ BaseRunner,
+ Request,
+ Server,
+ ServerRunner,
+ SockSite,
+ UrlMappingMatchInfo,
+)
+from .web_protocol import _RequestHandler
+
+if TYPE_CHECKING: # pragma: no cover
+ from ssl import SSLContext
+else:
+ SSLContext = None
+
+
+REUSE_ADDRESS = os.name == "posix" and sys.platform != "cygwin"
+
+
+def get_unused_port_socket(host: str) -> socket.socket:
+ return get_port_socket(host, 0)
+
+
+def get_port_socket(host: str, port: int) -> socket.socket:
+ s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ if REUSE_ADDRESS:
+ # Windows has different semantics for SO_REUSEADDR,
+ # so don't set it. Ref:
+ # https://docs.microsoft.com/en-us/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse
+ s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ s.bind((host, port))
+ return s
+
+
+def unused_port() -> int:
+ """Return a port that is unused on the current host."""
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
+ s.bind(("127.0.0.1", 0))
+ return s.getsockname()[1]
+
+
+class BaseTestServer(ABC):
+ __test__ = False
+
+ def __init__(
+ self,
+ *,
+ scheme: Union[str, object] = sentinel,
+ loop: Optional[asyncio.AbstractEventLoop] = None,
+ host: str = "127.0.0.1",
+ port: Optional[int] = None,
+ skip_url_asserts: bool = False,
+ **kwargs: Any,
+ ) -> None:
+ self._loop = loop
+ self.runner = None # type: Optional[BaseRunner]
+ self._root = None # type: Optional[URL]
+ self.host = host
+ self.port = port
+ self._closed = False
+ self.scheme = scheme
+ self.skip_url_asserts = skip_url_asserts
+
+ async def start_server(
+ self, loop: Optional[asyncio.AbstractEventLoop] = None, **kwargs: Any
+ ) -> None:
+ if self.runner:
+ return
+ self._loop = loop
+ self._ssl = kwargs.pop("ssl", None)
+ self.runner = await self._make_runner(**kwargs)
+ await self.runner.setup()
+ if not self.port:
+ self.port = 0
+ _sock = get_port_socket(self.host, self.port)
+ self.host, self.port = _sock.getsockname()[:2]
+ site = SockSite(self.runner, sock=_sock, ssl_context=self._ssl)
+ await site.start()
+ server = site._server
+ assert server is not None
+ sockets = server.sockets
+ assert sockets is not None
+ self.port = sockets[0].getsockname()[1]
+ if self.scheme is sentinel:
+ if self._ssl:
+ scheme = "https"
+ else:
+ scheme = "http"
+ self.scheme = scheme
+ self._root = URL(f"{self.scheme}://{self.host}:{self.port}")
+
+ @abstractmethod # pragma: no cover
+ async def _make_runner(self, **kwargs: Any) -> BaseRunner:
+ pass
+
+ def make_url(self, path: str) -> URL:
+ assert self._root is not None
+ url = URL(path)
+ if not self.skip_url_asserts:
+ assert not url.is_absolute()
+ return self._root.join(url)
+ else:
+ return URL(str(self._root) + path)
+
+ @property
+ def started(self) -> bool:
+ return self.runner is not None
+
+ @property
+ def closed(self) -> bool:
+ return self._closed
+
+ @property
+ def handler(self) -> Server:
+ # for backward compatibility
+ # web.Server instance
+ runner = self.runner
+ assert runner is not None
+ assert runner.server is not None
+ return runner.server
+
+ async def close(self) -> None:
+ """Close all fixtures created by the test client.
+
+ After that point, the TestClient is no longer usable.
+
+ This is an idempotent function: running close multiple times
+ will not have any additional effects.
+
+ close is also run when the object is garbage collected, and on
+ exit when used as a context manager.
+
+ """
+ if self.started and not self.closed:
+ assert self.runner is not None
+ await self.runner.cleanup()
+ self._root = None
+ self.port = None
+ self._closed = True
+
+ def __enter__(self) -> None:
+ raise TypeError("Use async with instead")
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_value: Optional[BaseException],
+ traceback: Optional[TracebackType],
+ ) -> None:
+ # __exit__ should exist in pair with __enter__ but never executed
+ pass # pragma: no cover
+
+ async def __aenter__(self) -> "BaseTestServer":
+ await self.start_server(loop=self._loop)
+ return self
+
+ async def __aexit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_value: Optional[BaseException],
+ traceback: Optional[TracebackType],
+ ) -> None:
+ await self.close()
+
+
+class TestServer(BaseTestServer):
+ def __init__(
+ self,
+ app: Application,
+ *,
+ scheme: Union[str, object] = sentinel,
+ host: str = "127.0.0.1",
+ port: Optional[int] = None,
+ **kwargs: Any,
+ ):
+ self.app = app
+ super().__init__(scheme=scheme, host=host, port=port, **kwargs)
+
+ async def _make_runner(self, **kwargs: Any) -> BaseRunner:
+ return AppRunner(self.app, **kwargs)
+
+
+class RawTestServer(BaseTestServer):
+ def __init__(
+ self,
+ handler: _RequestHandler,
+ *,
+ scheme: Union[str, object] = sentinel,
+ host: str = "127.0.0.1",
+ port: Optional[int] = None,
+ **kwargs: Any,
+ ) -> None:
+ self._handler = handler
+ super().__init__(scheme=scheme, host=host, port=port, **kwargs)
+
+ async def _make_runner(self, debug: bool = True, **kwargs: Any) -> ServerRunner:
+ srv = Server(self._handler, loop=self._loop, debug=debug, **kwargs)
+ return ServerRunner(srv, debug=debug, **kwargs)
+
+
+class TestClient:
+ """
+ A test client implementation.
+
+ To write functional tests for aiohttp based servers.
+
+ """
+
+ __test__ = False
+
+ def __init__(
+ self,
+ server: BaseTestServer,
+ *,
+ cookie_jar: Optional[AbstractCookieJar] = None,
+ loop: Optional[asyncio.AbstractEventLoop] = None,
+ **kwargs: Any,
+ ) -> None:
+ if not isinstance(server, BaseTestServer):
+ raise TypeError(
+ "server must be TestServer " "instance, found type: %r" % type(server)
+ )
+ self._server = server
+ self._loop = loop
+ if cookie_jar is None:
+ cookie_jar = aiohttp.CookieJar(unsafe=True, loop=loop)
+ self._session = ClientSession(loop=loop, cookie_jar=cookie_jar, **kwargs)
+ self._closed = False
+ self._responses = [] # type: List[ClientResponse]
+ self._websockets = [] # type: List[ClientWebSocketResponse]
+
+ async def start_server(self) -> None:
+ await self._server.start_server(loop=self._loop)
+
+ @property
+ def host(self) -> str:
+ return self._server.host
+
+ @property
+ def port(self) -> Optional[int]:
+ return self._server.port
+
+ @property
+ def server(self) -> BaseTestServer:
+ return self._server
+
+ @property
+ def app(self) -> Application:
+ return getattr(self._server, "app", None)
+
+ @property
+ def session(self) -> ClientSession:
+ """An internal aiohttp.ClientSession.
+
+ Unlike the methods on the TestClient, client session requests
+ do not automatically include the host in the url queried, and
+ will require an absolute path to the resource.
+
+ """
+ return self._session
+
+ def make_url(self, path: str) -> URL:
+ return self._server.make_url(path)
+
+ async def _request(self, method: str, path: str, **kwargs: Any) -> ClientResponse:
+ resp = await self._session.request(method, self.make_url(path), **kwargs)
+ # save it to close later
+ self._responses.append(resp)
+ return resp
+
+ def request(self, method: str, path: str, **kwargs: Any) -> _RequestContextManager:
+ """Routes a request to tested http server.
+
+ The interface is identical to aiohttp.ClientSession.request,
+ except the loop kwarg is overridden by the instance used by the
+ test server.
+
+ """
+ return _RequestContextManager(self._request(method, path, **kwargs))
+
+ def get(self, path: str, **kwargs: Any) -> _RequestContextManager:
+ """Perform an HTTP GET request."""
+ return _RequestContextManager(self._request(hdrs.METH_GET, path, **kwargs))
+
+ def post(self, path: str, **kwargs: Any) -> _RequestContextManager:
+ """Perform an HTTP POST request."""
+ return _RequestContextManager(self._request(hdrs.METH_POST, path, **kwargs))
+
+ def options(self, path: str, **kwargs: Any) -> _RequestContextManager:
+ """Perform an HTTP OPTIONS request."""
+ return _RequestContextManager(self._request(hdrs.METH_OPTIONS, path, **kwargs))
+
+ def head(self, path: str, **kwargs: Any) -> _RequestContextManager:
+ """Perform an HTTP HEAD request."""
+ return _RequestContextManager(self._request(hdrs.METH_HEAD, path, **kwargs))
+
+ def put(self, path: str, **kwargs: Any) -> _RequestContextManager:
+ """Perform an HTTP PUT request."""
+ return _RequestContextManager(self._request(hdrs.METH_PUT, path, **kwargs))
+
+ def patch(self, path: str, **kwargs: Any) -> _RequestContextManager:
+ """Perform an HTTP PATCH request."""
+ return _RequestContextManager(self._request(hdrs.METH_PATCH, path, **kwargs))
+
+ def delete(self, path: str, **kwargs: Any) -> _RequestContextManager:
+ """Perform an HTTP PATCH request."""
+ return _RequestContextManager(self._request(hdrs.METH_DELETE, path, **kwargs))
+
+ def ws_connect(self, path: str, **kwargs: Any) -> _WSRequestContextManager:
+ """Initiate websocket connection.
+
+ The api corresponds to aiohttp.ClientSession.ws_connect.
+
+ """
+ return _WSRequestContextManager(self._ws_connect(path, **kwargs))
+
+ async def _ws_connect(self, path: str, **kwargs: Any) -> ClientWebSocketResponse:
+ ws = await self._session.ws_connect(self.make_url(path), **kwargs)
+ self._websockets.append(ws)
+ return ws
+
+ async def close(self) -> None:
+ """Close all fixtures created by the test client.
+
+ After that point, the TestClient is no longer usable.
+
+ This is an idempotent function: running close multiple times
+ will not have any additional effects.
+
+ close is also run on exit when used as a(n) (asynchronous)
+ context manager.
+
+ """
+ if not self._closed:
+ for resp in self._responses:
+ resp.close()
+ for ws in self._websockets:
+ await ws.close()
+ await self._session.close()
+ await self._server.close()
+ self._closed = True
+
+ def __enter__(self) -> None:
+ raise TypeError("Use async with instead")
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc: Optional[BaseException],
+ tb: Optional[TracebackType],
+ ) -> None:
+ # __exit__ should exist in pair with __enter__ but never executed
+ pass # pragma: no cover
+
+ async def __aenter__(self) -> "TestClient":
+ await self.start_server()
+ return self
+
+ async def __aexit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc: Optional[BaseException],
+ tb: Optional[TracebackType],
+ ) -> None:
+ await self.close()
+
+
+class AioHTTPTestCase(unittest.TestCase):
+ """A base class to allow for unittest web applications using
+ aiohttp.
+
+ Provides the following:
+
+ * self.client (aiohttp.test_utils.TestClient): an aiohttp test client.
+ * self.loop (asyncio.BaseEventLoop): the event loop in which the
+ application and server are running.
+ * self.app (aiohttp.web.Application): the application returned by
+ self.get_application()
+
+ Note that the TestClient's methods are asynchronous: you have to
+ execute function on the test client using asynchronous methods.
+ """
+
+ async def get_application(self) -> Application:
+ """
+ This method should be overridden
+ to return the aiohttp.web.Application
+ object to test.
+
+ """
+ return self.get_app()
+
+ def get_app(self) -> Application:
+ """Obsolete method used to constructing web application.
+
+ Use .get_application() coroutine instead
+
+ """
+ raise RuntimeError("Did you forget to define get_application()?")
+
+ def setUp(self) -> None:
+ self.loop = setup_test_loop()
+
+ self.app = self.loop.run_until_complete(self.get_application())
+ self.server = self.loop.run_until_complete(self.get_server(self.app))
+ self.client = self.loop.run_until_complete(self.get_client(self.server))
+
+ self.loop.run_until_complete(self.client.start_server())
+
+ self.loop.run_until_complete(self.setUpAsync())
+
+ async def setUpAsync(self) -> None:
+ pass
+
+ def tearDown(self) -> None:
+ self.loop.run_until_complete(self.tearDownAsync())
+ self.loop.run_until_complete(self.client.close())
+ teardown_test_loop(self.loop)
+
+ async def tearDownAsync(self) -> None:
+ pass
+
+ async def get_server(self, app: Application) -> TestServer:
+ """Return a TestServer instance."""
+ return TestServer(app, loop=self.loop)
+
+ async def get_client(self, server: TestServer) -> TestClient:
+ """Return a TestClient instance."""
+ return TestClient(server, loop=self.loop)
+
+
+def unittest_run_loop(func: Any, *args: Any, **kwargs: Any) -> Any:
+ """A decorator dedicated to use with asynchronous methods of an
+ AioHTTPTestCase.
+
+ Handles executing an asynchronous function, using
+ the self.loop of the AioHTTPTestCase.
+ """
+
+ @functools.wraps(func, *args, **kwargs)
+ def new_func(self: Any, *inner_args: Any, **inner_kwargs: Any) -> Any:
+ return self.loop.run_until_complete(func(self, *inner_args, **inner_kwargs))
+
+ return new_func
+
+
+_LOOP_FACTORY = Callable[[], asyncio.AbstractEventLoop]
+
+
+@contextlib.contextmanager
+def loop_context(
+ loop_factory: _LOOP_FACTORY = asyncio.new_event_loop, fast: bool = False
+) -> Iterator[asyncio.AbstractEventLoop]:
+ """A contextmanager that creates an event_loop, for test purposes.
+
+ Handles the creation and cleanup of a test loop.
+ """
+ loop = setup_test_loop(loop_factory)
+ yield loop
+ teardown_test_loop(loop, fast=fast)
+
+
+def setup_test_loop(
+ loop_factory: _LOOP_FACTORY = asyncio.new_event_loop,
+) -> asyncio.AbstractEventLoop:
+ """Create and return an asyncio.BaseEventLoop
+ instance.
+
+ The caller should also call teardown_test_loop,
+ once they are done with the loop.
+ """
+ loop = loop_factory()
+ try:
+ module = loop.__class__.__module__
+ skip_watcher = "uvloop" in module
+ except AttributeError: # pragma: no cover
+ # Just in case
+ skip_watcher = True
+ asyncio.set_event_loop(loop)
+ if sys.platform != "win32" and not skip_watcher:
+ policy = asyncio.get_event_loop_policy()
+ watcher = asyncio.SafeChildWatcher()
+ watcher.attach_loop(loop)
+ with contextlib.suppress(NotImplementedError):
+ policy.set_child_watcher(watcher)
+ return loop
+
+
+def teardown_test_loop(loop: asyncio.AbstractEventLoop, fast: bool = False) -> None:
+ """Teardown and cleanup an event_loop created
+ by setup_test_loop.
+
+ """
+ closed = loop.is_closed()
+ if not closed:
+ loop.call_soon(loop.stop)
+ loop.run_forever()
+ loop.close()
+
+ if not fast:
+ gc.collect()
+
+ asyncio.set_event_loop(None)
+
+
+def _create_app_mock() -> mock.MagicMock:
+ def get_dict(app: Any, key: str) -> Any:
+ return app.__app_dict[key]
+
+ def set_dict(app: Any, key: str, value: Any) -> None:
+ app.__app_dict[key] = value
+
+ app = mock.MagicMock()
+ app.__app_dict = {}
+ app.__getitem__ = get_dict
+ app.__setitem__ = set_dict
+
+ app._debug = False
+ app.on_response_prepare = Signal(app)
+ app.on_response_prepare.freeze()
+ return app
+
+
+def _create_transport(sslcontext: Optional[SSLContext] = None) -> mock.Mock:
+ transport = mock.Mock()
+
+ def get_extra_info(key: str) -> Optional[SSLContext]:
+ if key == "sslcontext":
+ return sslcontext
+ else:
+ return None
+
+ transport.get_extra_info.side_effect = get_extra_info
+ return transport
+
+
+def make_mocked_request(
+ method: str,
+ path: str,
+ headers: Any = None,
+ *,
+ match_info: Any = sentinel,
+ version: HttpVersion = HttpVersion(1, 1),
+ closing: bool = False,
+ app: Any = None,
+ writer: Any = sentinel,
+ protocol: Any = sentinel,
+ transport: Any = sentinel,
+ payload: Any = sentinel,
+ sslcontext: Optional[SSLContext] = None,
+ client_max_size: int = 1024 ** 2,
+ loop: Any = ...,
+) -> Request:
+ """Creates mocked web.Request testing purposes.
+
+ Useful in unit tests, when spinning full web server is overkill or
+ specific conditions and errors are hard to trigger.
+
+ """
+
+ task = mock.Mock()
+ if loop is ...:
+ loop = mock.Mock()
+ loop.create_future.return_value = ()
+
+ if version < HttpVersion(1, 1):
+ closing = True
+
+ if headers:
+ headers = CIMultiDictProxy(CIMultiDict(headers))
+ raw_hdrs = tuple(
+ (k.encode("utf-8"), v.encode("utf-8")) for k, v in headers.items()
+ )
+ else:
+ headers = CIMultiDictProxy(CIMultiDict())
+ raw_hdrs = ()
+
+ chunked = "chunked" in headers.get(hdrs.TRANSFER_ENCODING, "").lower()
+
+ message = RawRequestMessage(
+ method,
+ path,
+ version,
+ headers,
+ raw_hdrs,
+ closing,
+ False,
+ False,
+ chunked,
+ URL(path),
+ )
+ if app is None:
+ app = _create_app_mock()
+
+ if transport is sentinel:
+ transport = _create_transport(sslcontext)
+
+ if protocol is sentinel:
+ protocol = mock.Mock()
+ protocol.transport = transport
+
+ if writer is sentinel:
+ writer = mock.Mock()
+ writer.write_headers = make_mocked_coro(None)
+ writer.write = make_mocked_coro(None)
+ writer.write_eof = make_mocked_coro(None)
+ writer.drain = make_mocked_coro(None)
+ writer.transport = transport
+
+ protocol.transport = transport
+ protocol.writer = writer
+
+ if payload is sentinel:
+ payload = mock.Mock()
+
+ req = Request(
+ message, payload, protocol, writer, task, loop, client_max_size=client_max_size
+ )
+
+ match_info = UrlMappingMatchInfo(
+ {} if match_info is sentinel else match_info, mock.Mock()
+ )
+ match_info.add_app(app)
+ req._match_info = match_info
+
+ return req
+
+
+def make_mocked_coro(
+ return_value: Any = sentinel, raise_exception: Any = sentinel
+) -> Any:
+ """Creates a coroutine mock."""
+
+ async def mock_coro(*args: Any, **kwargs: Any) -> Any:
+ if raise_exception is not sentinel:
+ raise raise_exception
+ if not inspect.isawaitable(return_value):
+ return return_value
+ await return_value
+
+ return mock.Mock(wraps=mock_coro)
diff --git a/third_party/python/aiohttp/aiohttp/tracing.py b/third_party/python/aiohttp/aiohttp/tracing.py
new file mode 100644
index 0000000000..7ae7948f9a
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/tracing.py
@@ -0,0 +1,442 @@
+from types import SimpleNamespace
+from typing import TYPE_CHECKING, Awaitable, Optional, Type, TypeVar
+
+import attr
+from multidict import CIMultiDict
+from yarl import URL
+
+from .client_reqrep import ClientResponse
+from .signals import Signal
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing_extensions import Protocol
+
+ from .client import ClientSession
+
+ _ParamT_contra = TypeVar("_ParamT_contra", contravariant=True)
+
+ class _SignalCallback(Protocol[_ParamT_contra]):
+ def __call__(
+ self,
+ __client_session: ClientSession,
+ __trace_config_ctx: SimpleNamespace,
+ __params: _ParamT_contra,
+ ) -> Awaitable[None]:
+ ...
+
+
+__all__ = (
+ "TraceConfig",
+ "TraceRequestStartParams",
+ "TraceRequestEndParams",
+ "TraceRequestExceptionParams",
+ "TraceConnectionQueuedStartParams",
+ "TraceConnectionQueuedEndParams",
+ "TraceConnectionCreateStartParams",
+ "TraceConnectionCreateEndParams",
+ "TraceConnectionReuseconnParams",
+ "TraceDnsResolveHostStartParams",
+ "TraceDnsResolveHostEndParams",
+ "TraceDnsCacheHitParams",
+ "TraceDnsCacheMissParams",
+ "TraceRequestRedirectParams",
+ "TraceRequestChunkSentParams",
+ "TraceResponseChunkReceivedParams",
+)
+
+
+class TraceConfig:
+ """First-class used to trace requests launched via ClientSession
+ objects."""
+
+ def __init__(
+ self, trace_config_ctx_factory: Type[SimpleNamespace] = SimpleNamespace
+ ) -> None:
+ self._on_request_start = Signal(
+ self
+ ) # type: Signal[_SignalCallback[TraceRequestStartParams]]
+ self._on_request_chunk_sent = Signal(
+ self
+ ) # type: Signal[_SignalCallback[TraceRequestChunkSentParams]]
+ self._on_response_chunk_received = Signal(
+ self
+ ) # type: Signal[_SignalCallback[TraceResponseChunkReceivedParams]]
+ self._on_request_end = Signal(
+ self
+ ) # type: Signal[_SignalCallback[TraceRequestEndParams]]
+ self._on_request_exception = Signal(
+ self
+ ) # type: Signal[_SignalCallback[TraceRequestExceptionParams]]
+ self._on_request_redirect = Signal(
+ self
+ ) # type: Signal[_SignalCallback[TraceRequestRedirectParams]]
+ self._on_connection_queued_start = Signal(
+ self
+ ) # type: Signal[_SignalCallback[TraceConnectionQueuedStartParams]]
+ self._on_connection_queued_end = Signal(
+ self
+ ) # type: Signal[_SignalCallback[TraceConnectionQueuedEndParams]]
+ self._on_connection_create_start = Signal(
+ self
+ ) # type: Signal[_SignalCallback[TraceConnectionCreateStartParams]]
+ self._on_connection_create_end = Signal(
+ self
+ ) # type: Signal[_SignalCallback[TraceConnectionCreateEndParams]]
+ self._on_connection_reuseconn = Signal(
+ self
+ ) # type: Signal[_SignalCallback[TraceConnectionReuseconnParams]]
+ self._on_dns_resolvehost_start = Signal(
+ self
+ ) # type: Signal[_SignalCallback[TraceDnsResolveHostStartParams]]
+ self._on_dns_resolvehost_end = Signal(
+ self
+ ) # type: Signal[_SignalCallback[TraceDnsResolveHostEndParams]]
+ self._on_dns_cache_hit = Signal(
+ self
+ ) # type: Signal[_SignalCallback[TraceDnsCacheHitParams]]
+ self._on_dns_cache_miss = Signal(
+ self
+ ) # type: Signal[_SignalCallback[TraceDnsCacheMissParams]]
+
+ self._trace_config_ctx_factory = trace_config_ctx_factory
+
+ def trace_config_ctx(
+ self, trace_request_ctx: Optional[SimpleNamespace] = None
+ ) -> SimpleNamespace:
+ """ Return a new trace_config_ctx instance """
+ return self._trace_config_ctx_factory(trace_request_ctx=trace_request_ctx)
+
+ def freeze(self) -> None:
+ self._on_request_start.freeze()
+ self._on_request_chunk_sent.freeze()
+ self._on_response_chunk_received.freeze()
+ self._on_request_end.freeze()
+ self._on_request_exception.freeze()
+ self._on_request_redirect.freeze()
+ self._on_connection_queued_start.freeze()
+ self._on_connection_queued_end.freeze()
+ self._on_connection_create_start.freeze()
+ self._on_connection_create_end.freeze()
+ self._on_connection_reuseconn.freeze()
+ self._on_dns_resolvehost_start.freeze()
+ self._on_dns_resolvehost_end.freeze()
+ self._on_dns_cache_hit.freeze()
+ self._on_dns_cache_miss.freeze()
+
+ @property
+ def on_request_start(self) -> "Signal[_SignalCallback[TraceRequestStartParams]]":
+ return self._on_request_start
+
+ @property
+ def on_request_chunk_sent(
+ self,
+ ) -> "Signal[_SignalCallback[TraceRequestChunkSentParams]]":
+ return self._on_request_chunk_sent
+
+ @property
+ def on_response_chunk_received(
+ self,
+ ) -> "Signal[_SignalCallback[TraceResponseChunkReceivedParams]]":
+ return self._on_response_chunk_received
+
+ @property
+ def on_request_end(self) -> "Signal[_SignalCallback[TraceRequestEndParams]]":
+ return self._on_request_end
+
+ @property
+ def on_request_exception(
+ self,
+ ) -> "Signal[_SignalCallback[TraceRequestExceptionParams]]":
+ return self._on_request_exception
+
+ @property
+ def on_request_redirect(
+ self,
+ ) -> "Signal[_SignalCallback[TraceRequestRedirectParams]]":
+ return self._on_request_redirect
+
+ @property
+ def on_connection_queued_start(
+ self,
+ ) -> "Signal[_SignalCallback[TraceConnectionQueuedStartParams]]":
+ return self._on_connection_queued_start
+
+ @property
+ def on_connection_queued_end(
+ self,
+ ) -> "Signal[_SignalCallback[TraceConnectionQueuedEndParams]]":
+ return self._on_connection_queued_end
+
+ @property
+ def on_connection_create_start(
+ self,
+ ) -> "Signal[_SignalCallback[TraceConnectionCreateStartParams]]":
+ return self._on_connection_create_start
+
+ @property
+ def on_connection_create_end(
+ self,
+ ) -> "Signal[_SignalCallback[TraceConnectionCreateEndParams]]":
+ return self._on_connection_create_end
+
+ @property
+ def on_connection_reuseconn(
+ self,
+ ) -> "Signal[_SignalCallback[TraceConnectionReuseconnParams]]":
+ return self._on_connection_reuseconn
+
+ @property
+ def on_dns_resolvehost_start(
+ self,
+ ) -> "Signal[_SignalCallback[TraceDnsResolveHostStartParams]]":
+ return self._on_dns_resolvehost_start
+
+ @property
+ def on_dns_resolvehost_end(
+ self,
+ ) -> "Signal[_SignalCallback[TraceDnsResolveHostEndParams]]":
+ return self._on_dns_resolvehost_end
+
+ @property
+ def on_dns_cache_hit(self) -> "Signal[_SignalCallback[TraceDnsCacheHitParams]]":
+ return self._on_dns_cache_hit
+
+ @property
+ def on_dns_cache_miss(self) -> "Signal[_SignalCallback[TraceDnsCacheMissParams]]":
+ return self._on_dns_cache_miss
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class TraceRequestStartParams:
+ """ Parameters sent by the `on_request_start` signal"""
+
+ method: str
+ url: URL
+ headers: "CIMultiDict[str]"
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class TraceRequestChunkSentParams:
+ """ Parameters sent by the `on_request_chunk_sent` signal"""
+
+ method: str
+ url: URL
+ chunk: bytes
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class TraceResponseChunkReceivedParams:
+ """ Parameters sent by the `on_response_chunk_received` signal"""
+
+ method: str
+ url: URL
+ chunk: bytes
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class TraceRequestEndParams:
+ """ Parameters sent by the `on_request_end` signal"""
+
+ method: str
+ url: URL
+ headers: "CIMultiDict[str]"
+ response: ClientResponse
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class TraceRequestExceptionParams:
+ """ Parameters sent by the `on_request_exception` signal"""
+
+ method: str
+ url: URL
+ headers: "CIMultiDict[str]"
+ exception: BaseException
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class TraceRequestRedirectParams:
+ """ Parameters sent by the `on_request_redirect` signal"""
+
+ method: str
+ url: URL
+ headers: "CIMultiDict[str]"
+ response: ClientResponse
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class TraceConnectionQueuedStartParams:
+ """ Parameters sent by the `on_connection_queued_start` signal"""
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class TraceConnectionQueuedEndParams:
+ """ Parameters sent by the `on_connection_queued_end` signal"""
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class TraceConnectionCreateStartParams:
+ """ Parameters sent by the `on_connection_create_start` signal"""
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class TraceConnectionCreateEndParams:
+ """ Parameters sent by the `on_connection_create_end` signal"""
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class TraceConnectionReuseconnParams:
+ """ Parameters sent by the `on_connection_reuseconn` signal"""
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class TraceDnsResolveHostStartParams:
+ """ Parameters sent by the `on_dns_resolvehost_start` signal"""
+
+ host: str
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class TraceDnsResolveHostEndParams:
+ """ Parameters sent by the `on_dns_resolvehost_end` signal"""
+
+ host: str
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class TraceDnsCacheHitParams:
+ """ Parameters sent by the `on_dns_cache_hit` signal"""
+
+ host: str
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class TraceDnsCacheMissParams:
+ """ Parameters sent by the `on_dns_cache_miss` signal"""
+
+ host: str
+
+
+class Trace:
+ """Internal class used to keep together the main dependencies used
+ at the moment of send a signal."""
+
+ def __init__(
+ self,
+ session: "ClientSession",
+ trace_config: TraceConfig,
+ trace_config_ctx: SimpleNamespace,
+ ) -> None:
+ self._trace_config = trace_config
+ self._trace_config_ctx = trace_config_ctx
+ self._session = session
+
+ async def send_request_start(
+ self, method: str, url: URL, headers: "CIMultiDict[str]"
+ ) -> None:
+ return await self._trace_config.on_request_start.send(
+ self._session,
+ self._trace_config_ctx,
+ TraceRequestStartParams(method, url, headers),
+ )
+
+ async def send_request_chunk_sent(
+ self, method: str, url: URL, chunk: bytes
+ ) -> None:
+ return await self._trace_config.on_request_chunk_sent.send(
+ self._session,
+ self._trace_config_ctx,
+ TraceRequestChunkSentParams(method, url, chunk),
+ )
+
+ async def send_response_chunk_received(
+ self, method: str, url: URL, chunk: bytes
+ ) -> None:
+ return await self._trace_config.on_response_chunk_received.send(
+ self._session,
+ self._trace_config_ctx,
+ TraceResponseChunkReceivedParams(method, url, chunk),
+ )
+
+ async def send_request_end(
+ self,
+ method: str,
+ url: URL,
+ headers: "CIMultiDict[str]",
+ response: ClientResponse,
+ ) -> None:
+ return await self._trace_config.on_request_end.send(
+ self._session,
+ self._trace_config_ctx,
+ TraceRequestEndParams(method, url, headers, response),
+ )
+
+ async def send_request_exception(
+ self,
+ method: str,
+ url: URL,
+ headers: "CIMultiDict[str]",
+ exception: BaseException,
+ ) -> None:
+ return await self._trace_config.on_request_exception.send(
+ self._session,
+ self._trace_config_ctx,
+ TraceRequestExceptionParams(method, url, headers, exception),
+ )
+
+ async def send_request_redirect(
+ self,
+ method: str,
+ url: URL,
+ headers: "CIMultiDict[str]",
+ response: ClientResponse,
+ ) -> None:
+ return await self._trace_config._on_request_redirect.send(
+ self._session,
+ self._trace_config_ctx,
+ TraceRequestRedirectParams(method, url, headers, response),
+ )
+
+ async def send_connection_queued_start(self) -> None:
+ return await self._trace_config.on_connection_queued_start.send(
+ self._session, self._trace_config_ctx, TraceConnectionQueuedStartParams()
+ )
+
+ async def send_connection_queued_end(self) -> None:
+ return await self._trace_config.on_connection_queued_end.send(
+ self._session, self._trace_config_ctx, TraceConnectionQueuedEndParams()
+ )
+
+ async def send_connection_create_start(self) -> None:
+ return await self._trace_config.on_connection_create_start.send(
+ self._session, self._trace_config_ctx, TraceConnectionCreateStartParams()
+ )
+
+ async def send_connection_create_end(self) -> None:
+ return await self._trace_config.on_connection_create_end.send(
+ self._session, self._trace_config_ctx, TraceConnectionCreateEndParams()
+ )
+
+ async def send_connection_reuseconn(self) -> None:
+ return await self._trace_config.on_connection_reuseconn.send(
+ self._session, self._trace_config_ctx, TraceConnectionReuseconnParams()
+ )
+
+ async def send_dns_resolvehost_start(self, host: str) -> None:
+ return await self._trace_config.on_dns_resolvehost_start.send(
+ self._session, self._trace_config_ctx, TraceDnsResolveHostStartParams(host)
+ )
+
+ async def send_dns_resolvehost_end(self, host: str) -> None:
+ return await self._trace_config.on_dns_resolvehost_end.send(
+ self._session, self._trace_config_ctx, TraceDnsResolveHostEndParams(host)
+ )
+
+ async def send_dns_cache_hit(self, host: str) -> None:
+ return await self._trace_config.on_dns_cache_hit.send(
+ self._session, self._trace_config_ctx, TraceDnsCacheHitParams(host)
+ )
+
+ async def send_dns_cache_miss(self, host: str) -> None:
+ return await self._trace_config.on_dns_cache_miss.send(
+ self._session, self._trace_config_ctx, TraceDnsCacheMissParams(host)
+ )
diff --git a/third_party/python/aiohttp/aiohttp/typedefs.py b/third_party/python/aiohttp/aiohttp/typedefs.py
new file mode 100644
index 0000000000..1b68a242af
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/typedefs.py
@@ -0,0 +1,46 @@
+import json
+import os
+import pathlib
+import sys
+from typing import TYPE_CHECKING, Any, Callable, Iterable, Mapping, Tuple, Union
+
+from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy, istr
+from yarl import URL
+
+DEFAULT_JSON_ENCODER = json.dumps
+DEFAULT_JSON_DECODER = json.loads
+
+if TYPE_CHECKING: # pragma: no cover
+ _CIMultiDict = CIMultiDict[str]
+ _CIMultiDictProxy = CIMultiDictProxy[str]
+ _MultiDict = MultiDict[str]
+ _MultiDictProxy = MultiDictProxy[str]
+ from http.cookies import BaseCookie, Morsel
+else:
+ _CIMultiDict = CIMultiDict
+ _CIMultiDictProxy = CIMultiDictProxy
+ _MultiDict = MultiDict
+ _MultiDictProxy = MultiDictProxy
+
+Byteish = Union[bytes, bytearray, memoryview]
+JSONEncoder = Callable[[Any], str]
+JSONDecoder = Callable[[str], Any]
+LooseHeaders = Union[Mapping[Union[str, istr], str], _CIMultiDict, _CIMultiDictProxy]
+RawHeaders = Tuple[Tuple[bytes, bytes], ...]
+StrOrURL = Union[str, URL]
+
+LooseCookiesMappings = Mapping[str, Union[str, "BaseCookie[str]", "Morsel[Any]"]]
+LooseCookiesIterables = Iterable[
+ Tuple[str, Union[str, "BaseCookie[str]", "Morsel[Any]"]]
+]
+LooseCookies = Union[
+ LooseCookiesMappings,
+ LooseCookiesIterables,
+ "BaseCookie[str]",
+]
+
+
+if sys.version_info >= (3, 6):
+ PathLike = Union[str, "os.PathLike[str]"]
+else:
+ PathLike = Union[str, pathlib.PurePath]
diff --git a/third_party/python/aiohttp/aiohttp/web.py b/third_party/python/aiohttp/aiohttp/web.py
new file mode 100644
index 0000000000..557e3c3b4d
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/web.py
@@ -0,0 +1,581 @@
+import asyncio
+import logging
+import socket
+import sys
+from argparse import ArgumentParser
+from collections.abc import Iterable
+from importlib import import_module
+from typing import (
+ Any as Any,
+ Awaitable as Awaitable,
+ Callable as Callable,
+ Iterable as TypingIterable,
+ List as List,
+ Optional as Optional,
+ Set as Set,
+ Type as Type,
+ Union as Union,
+ cast as cast,
+)
+
+from .abc import AbstractAccessLogger
+from .helpers import all_tasks
+from .log import access_logger
+from .web_app import Application as Application, CleanupError as CleanupError
+from .web_exceptions import (
+ HTTPAccepted as HTTPAccepted,
+ HTTPBadGateway as HTTPBadGateway,
+ HTTPBadRequest as HTTPBadRequest,
+ HTTPClientError as HTTPClientError,
+ HTTPConflict as HTTPConflict,
+ HTTPCreated as HTTPCreated,
+ HTTPError as HTTPError,
+ HTTPException as HTTPException,
+ HTTPExpectationFailed as HTTPExpectationFailed,
+ HTTPFailedDependency as HTTPFailedDependency,
+ HTTPForbidden as HTTPForbidden,
+ HTTPFound as HTTPFound,
+ HTTPGatewayTimeout as HTTPGatewayTimeout,
+ HTTPGone as HTTPGone,
+ HTTPInsufficientStorage as HTTPInsufficientStorage,
+ HTTPInternalServerError as HTTPInternalServerError,
+ HTTPLengthRequired as HTTPLengthRequired,
+ HTTPMethodNotAllowed as HTTPMethodNotAllowed,
+ HTTPMisdirectedRequest as HTTPMisdirectedRequest,
+ HTTPMovedPermanently as HTTPMovedPermanently,
+ HTTPMultipleChoices as HTTPMultipleChoices,
+ HTTPNetworkAuthenticationRequired as HTTPNetworkAuthenticationRequired,
+ HTTPNoContent as HTTPNoContent,
+ HTTPNonAuthoritativeInformation as HTTPNonAuthoritativeInformation,
+ HTTPNotAcceptable as HTTPNotAcceptable,
+ HTTPNotExtended as HTTPNotExtended,
+ HTTPNotFound as HTTPNotFound,
+ HTTPNotImplemented as HTTPNotImplemented,
+ HTTPNotModified as HTTPNotModified,
+ HTTPOk as HTTPOk,
+ HTTPPartialContent as HTTPPartialContent,
+ HTTPPaymentRequired as HTTPPaymentRequired,
+ HTTPPermanentRedirect as HTTPPermanentRedirect,
+ HTTPPreconditionFailed as HTTPPreconditionFailed,
+ HTTPPreconditionRequired as HTTPPreconditionRequired,
+ HTTPProxyAuthenticationRequired as HTTPProxyAuthenticationRequired,
+ HTTPRedirection as HTTPRedirection,
+ HTTPRequestEntityTooLarge as HTTPRequestEntityTooLarge,
+ HTTPRequestHeaderFieldsTooLarge as HTTPRequestHeaderFieldsTooLarge,
+ HTTPRequestRangeNotSatisfiable as HTTPRequestRangeNotSatisfiable,
+ HTTPRequestTimeout as HTTPRequestTimeout,
+ HTTPRequestURITooLong as HTTPRequestURITooLong,
+ HTTPResetContent as HTTPResetContent,
+ HTTPSeeOther as HTTPSeeOther,
+ HTTPServerError as HTTPServerError,
+ HTTPServiceUnavailable as HTTPServiceUnavailable,
+ HTTPSuccessful as HTTPSuccessful,
+ HTTPTemporaryRedirect as HTTPTemporaryRedirect,
+ HTTPTooManyRequests as HTTPTooManyRequests,
+ HTTPUnauthorized as HTTPUnauthorized,
+ HTTPUnavailableForLegalReasons as HTTPUnavailableForLegalReasons,
+ HTTPUnprocessableEntity as HTTPUnprocessableEntity,
+ HTTPUnsupportedMediaType as HTTPUnsupportedMediaType,
+ HTTPUpgradeRequired as HTTPUpgradeRequired,
+ HTTPUseProxy as HTTPUseProxy,
+ HTTPVariantAlsoNegotiates as HTTPVariantAlsoNegotiates,
+ HTTPVersionNotSupported as HTTPVersionNotSupported,
+)
+from .web_fileresponse import FileResponse as FileResponse
+from .web_log import AccessLogger
+from .web_middlewares import (
+ middleware as middleware,
+ normalize_path_middleware as normalize_path_middleware,
+)
+from .web_protocol import (
+ PayloadAccessError as PayloadAccessError,
+ RequestHandler as RequestHandler,
+ RequestPayloadError as RequestPayloadError,
+)
+from .web_request import (
+ BaseRequest as BaseRequest,
+ FileField as FileField,
+ Request as Request,
+)
+from .web_response import (
+ ContentCoding as ContentCoding,
+ Response as Response,
+ StreamResponse as StreamResponse,
+ json_response as json_response,
+)
+from .web_routedef import (
+ AbstractRouteDef as AbstractRouteDef,
+ RouteDef as RouteDef,
+ RouteTableDef as RouteTableDef,
+ StaticDef as StaticDef,
+ delete as delete,
+ get as get,
+ head as head,
+ options as options,
+ patch as patch,
+ post as post,
+ put as put,
+ route as route,
+ static as static,
+ view as view,
+)
+from .web_runner import (
+ AppRunner as AppRunner,
+ BaseRunner as BaseRunner,
+ BaseSite as BaseSite,
+ GracefulExit as GracefulExit,
+ NamedPipeSite as NamedPipeSite,
+ ServerRunner as ServerRunner,
+ SockSite as SockSite,
+ TCPSite as TCPSite,
+ UnixSite as UnixSite,
+)
+from .web_server import Server as Server
+from .web_urldispatcher import (
+ AbstractResource as AbstractResource,
+ AbstractRoute as AbstractRoute,
+ DynamicResource as DynamicResource,
+ PlainResource as PlainResource,
+ Resource as Resource,
+ ResourceRoute as ResourceRoute,
+ StaticResource as StaticResource,
+ UrlDispatcher as UrlDispatcher,
+ UrlMappingMatchInfo as UrlMappingMatchInfo,
+ View as View,
+)
+from .web_ws import (
+ WebSocketReady as WebSocketReady,
+ WebSocketResponse as WebSocketResponse,
+ WSMsgType as WSMsgType,
+)
+
+__all__ = (
+ # web_app
+ "Application",
+ "CleanupError",
+ # web_exceptions
+ "HTTPAccepted",
+ "HTTPBadGateway",
+ "HTTPBadRequest",
+ "HTTPClientError",
+ "HTTPConflict",
+ "HTTPCreated",
+ "HTTPError",
+ "HTTPException",
+ "HTTPExpectationFailed",
+ "HTTPFailedDependency",
+ "HTTPForbidden",
+ "HTTPFound",
+ "HTTPGatewayTimeout",
+ "HTTPGone",
+ "HTTPInsufficientStorage",
+ "HTTPInternalServerError",
+ "HTTPLengthRequired",
+ "HTTPMethodNotAllowed",
+ "HTTPMisdirectedRequest",
+ "HTTPMovedPermanently",
+ "HTTPMultipleChoices",
+ "HTTPNetworkAuthenticationRequired",
+ "HTTPNoContent",
+ "HTTPNonAuthoritativeInformation",
+ "HTTPNotAcceptable",
+ "HTTPNotExtended",
+ "HTTPNotFound",
+ "HTTPNotImplemented",
+ "HTTPNotModified",
+ "HTTPOk",
+ "HTTPPartialContent",
+ "HTTPPaymentRequired",
+ "HTTPPermanentRedirect",
+ "HTTPPreconditionFailed",
+ "HTTPPreconditionRequired",
+ "HTTPProxyAuthenticationRequired",
+ "HTTPRedirection",
+ "HTTPRequestEntityTooLarge",
+ "HTTPRequestHeaderFieldsTooLarge",
+ "HTTPRequestRangeNotSatisfiable",
+ "HTTPRequestTimeout",
+ "HTTPRequestURITooLong",
+ "HTTPResetContent",
+ "HTTPSeeOther",
+ "HTTPServerError",
+ "HTTPServiceUnavailable",
+ "HTTPSuccessful",
+ "HTTPTemporaryRedirect",
+ "HTTPTooManyRequests",
+ "HTTPUnauthorized",
+ "HTTPUnavailableForLegalReasons",
+ "HTTPUnprocessableEntity",
+ "HTTPUnsupportedMediaType",
+ "HTTPUpgradeRequired",
+ "HTTPUseProxy",
+ "HTTPVariantAlsoNegotiates",
+ "HTTPVersionNotSupported",
+ # web_fileresponse
+ "FileResponse",
+ # web_middlewares
+ "middleware",
+ "normalize_path_middleware",
+ # web_protocol
+ "PayloadAccessError",
+ "RequestHandler",
+ "RequestPayloadError",
+ # web_request
+ "BaseRequest",
+ "FileField",
+ "Request",
+ # web_response
+ "ContentCoding",
+ "Response",
+ "StreamResponse",
+ "json_response",
+ # web_routedef
+ "AbstractRouteDef",
+ "RouteDef",
+ "RouteTableDef",
+ "StaticDef",
+ "delete",
+ "get",
+ "head",
+ "options",
+ "patch",
+ "post",
+ "put",
+ "route",
+ "static",
+ "view",
+ # web_runner
+ "AppRunner",
+ "BaseRunner",
+ "BaseSite",
+ "GracefulExit",
+ "ServerRunner",
+ "SockSite",
+ "TCPSite",
+ "UnixSite",
+ "NamedPipeSite",
+ # web_server
+ "Server",
+ # web_urldispatcher
+ "AbstractResource",
+ "AbstractRoute",
+ "DynamicResource",
+ "PlainResource",
+ "Resource",
+ "ResourceRoute",
+ "StaticResource",
+ "UrlDispatcher",
+ "UrlMappingMatchInfo",
+ "View",
+ # web_ws
+ "WebSocketReady",
+ "WebSocketResponse",
+ "WSMsgType",
+ # web
+ "run_app",
+)
+
+
+try:
+ from ssl import SSLContext
+except ImportError: # pragma: no cover
+ SSLContext = Any # type: ignore
+
+HostSequence = TypingIterable[str]
+
+
+async def _run_app(
+ app: Union[Application, Awaitable[Application]],
+ *,
+ host: Optional[Union[str, HostSequence]] = None,
+ port: Optional[int] = None,
+ path: Optional[str] = None,
+ sock: Optional[socket.socket] = None,
+ shutdown_timeout: float = 60.0,
+ ssl_context: Optional[SSLContext] = None,
+ print: Callable[..., None] = print,
+ backlog: int = 128,
+ access_log_class: Type[AbstractAccessLogger] = AccessLogger,
+ access_log_format: str = AccessLogger.LOG_FORMAT,
+ access_log: Optional[logging.Logger] = access_logger,
+ handle_signals: bool = True,
+ reuse_address: Optional[bool] = None,
+ reuse_port: Optional[bool] = None,
+) -> None:
+ # A internal functio to actually do all dirty job for application running
+ if asyncio.iscoroutine(app):
+ app = await app # type: ignore
+
+ app = cast(Application, app)
+
+ runner = AppRunner(
+ app,
+ handle_signals=handle_signals,
+ access_log_class=access_log_class,
+ access_log_format=access_log_format,
+ access_log=access_log,
+ )
+
+ await runner.setup()
+
+ sites = [] # type: List[BaseSite]
+
+ try:
+ if host is not None:
+ if isinstance(host, (str, bytes, bytearray, memoryview)):
+ sites.append(
+ TCPSite(
+ runner,
+ host,
+ port,
+ shutdown_timeout=shutdown_timeout,
+ ssl_context=ssl_context,
+ backlog=backlog,
+ reuse_address=reuse_address,
+ reuse_port=reuse_port,
+ )
+ )
+ else:
+ for h in host:
+ sites.append(
+ TCPSite(
+ runner,
+ h,
+ port,
+ shutdown_timeout=shutdown_timeout,
+ ssl_context=ssl_context,
+ backlog=backlog,
+ reuse_address=reuse_address,
+ reuse_port=reuse_port,
+ )
+ )
+ elif path is None and sock is None or port is not None:
+ sites.append(
+ TCPSite(
+ runner,
+ port=port,
+ shutdown_timeout=shutdown_timeout,
+ ssl_context=ssl_context,
+ backlog=backlog,
+ reuse_address=reuse_address,
+ reuse_port=reuse_port,
+ )
+ )
+
+ if path is not None:
+ if isinstance(path, (str, bytes, bytearray, memoryview)):
+ sites.append(
+ UnixSite(
+ runner,
+ path,
+ shutdown_timeout=shutdown_timeout,
+ ssl_context=ssl_context,
+ backlog=backlog,
+ )
+ )
+ else:
+ for p in path:
+ sites.append(
+ UnixSite(
+ runner,
+ p,
+ shutdown_timeout=shutdown_timeout,
+ ssl_context=ssl_context,
+ backlog=backlog,
+ )
+ )
+
+ if sock is not None:
+ if not isinstance(sock, Iterable):
+ sites.append(
+ SockSite(
+ runner,
+ sock,
+ shutdown_timeout=shutdown_timeout,
+ ssl_context=ssl_context,
+ backlog=backlog,
+ )
+ )
+ else:
+ for s in sock:
+ sites.append(
+ SockSite(
+ runner,
+ s,
+ shutdown_timeout=shutdown_timeout,
+ ssl_context=ssl_context,
+ backlog=backlog,
+ )
+ )
+ for site in sites:
+ await site.start()
+
+ if print: # pragma: no branch
+ names = sorted(str(s.name) for s in runner.sites)
+ print(
+ "======== Running on {} ========\n"
+ "(Press CTRL+C to quit)".format(", ".join(names))
+ )
+
+ # sleep forever by 1 hour intervals,
+ # on Windows before Python 3.8 wake up every 1 second to handle
+ # Ctrl+C smoothly
+ if sys.platform == "win32" and sys.version_info < (3, 8):
+ delay = 1
+ else:
+ delay = 3600
+
+ while True:
+ await asyncio.sleep(delay)
+ finally:
+ await runner.cleanup()
+
+
+def _cancel_tasks(
+ to_cancel: Set["asyncio.Task[Any]"], loop: asyncio.AbstractEventLoop
+) -> None:
+ if not to_cancel:
+ return
+
+ for task in to_cancel:
+ task.cancel()
+
+ loop.run_until_complete(
+ asyncio.gather(*to_cancel, loop=loop, return_exceptions=True)
+ )
+
+ for task in to_cancel:
+ if task.cancelled():
+ continue
+ if task.exception() is not None:
+ loop.call_exception_handler(
+ {
+ "message": "unhandled exception during asyncio.run() shutdown",
+ "exception": task.exception(),
+ "task": task,
+ }
+ )
+
+
+def run_app(
+ app: Union[Application, Awaitable[Application]],
+ *,
+ host: Optional[Union[str, HostSequence]] = None,
+ port: Optional[int] = None,
+ path: Optional[str] = None,
+ sock: Optional[socket.socket] = None,
+ shutdown_timeout: float = 60.0,
+ ssl_context: Optional[SSLContext] = None,
+ print: Callable[..., None] = print,
+ backlog: int = 128,
+ access_log_class: Type[AbstractAccessLogger] = AccessLogger,
+ access_log_format: str = AccessLogger.LOG_FORMAT,
+ access_log: Optional[logging.Logger] = access_logger,
+ handle_signals: bool = True,
+ reuse_address: Optional[bool] = None,
+ reuse_port: Optional[bool] = None,
+) -> None:
+ """Run an app locally"""
+ loop = asyncio.get_event_loop()
+
+ # Configure if and only if in debugging mode and using the default logger
+ if loop.get_debug() and access_log and access_log.name == "aiohttp.access":
+ if access_log.level == logging.NOTSET:
+ access_log.setLevel(logging.DEBUG)
+ if not access_log.hasHandlers():
+ access_log.addHandler(logging.StreamHandler())
+
+ try:
+ main_task = loop.create_task(
+ _run_app(
+ app,
+ host=host,
+ port=port,
+ path=path,
+ sock=sock,
+ shutdown_timeout=shutdown_timeout,
+ ssl_context=ssl_context,
+ print=print,
+ backlog=backlog,
+ access_log_class=access_log_class,
+ access_log_format=access_log_format,
+ access_log=access_log,
+ handle_signals=handle_signals,
+ reuse_address=reuse_address,
+ reuse_port=reuse_port,
+ )
+ )
+ loop.run_until_complete(main_task)
+ except (GracefulExit, KeyboardInterrupt): # pragma: no cover
+ pass
+ finally:
+ _cancel_tasks({main_task}, loop)
+ _cancel_tasks(all_tasks(loop), loop)
+ if sys.version_info >= (3, 6): # don't use PY_36 to pass mypy
+ loop.run_until_complete(loop.shutdown_asyncgens())
+ loop.close()
+
+
+def main(argv: List[str]) -> None:
+ arg_parser = ArgumentParser(
+ description="aiohttp.web Application server", prog="aiohttp.web"
+ )
+ arg_parser.add_argument(
+ "entry_func",
+ help=(
+ "Callable returning the `aiohttp.web.Application` instance to "
+ "run. Should be specified in the 'module:function' syntax."
+ ),
+ metavar="entry-func",
+ )
+ arg_parser.add_argument(
+ "-H",
+ "--hostname",
+ help="TCP/IP hostname to serve on (default: %(default)r)",
+ default="localhost",
+ )
+ arg_parser.add_argument(
+ "-P",
+ "--port",
+ help="TCP/IP port to serve on (default: %(default)r)",
+ type=int,
+ default="8080",
+ )
+ arg_parser.add_argument(
+ "-U",
+ "--path",
+ help="Unix file system path to serve on. Specifying a path will cause "
+ "hostname and port arguments to be ignored.",
+ )
+ args, extra_argv = arg_parser.parse_known_args(argv)
+
+ # Import logic
+ mod_str, _, func_str = args.entry_func.partition(":")
+ if not func_str or not mod_str:
+ arg_parser.error("'entry-func' not in 'module:function' syntax")
+ if mod_str.startswith("."):
+ arg_parser.error("relative module names not supported")
+ try:
+ module = import_module(mod_str)
+ except ImportError as ex:
+ arg_parser.error(f"unable to import {mod_str}: {ex}")
+ try:
+ func = getattr(module, func_str)
+ except AttributeError:
+ arg_parser.error(f"module {mod_str!r} has no attribute {func_str!r}")
+
+ # Compatibility logic
+ if args.path is not None and not hasattr(socket, "AF_UNIX"):
+ arg_parser.error(
+ "file system paths not supported by your operating" " environment"
+ )
+
+ logging.basicConfig(level=logging.DEBUG)
+
+ app = func(extra_argv)
+ run_app(app, host=args.hostname, port=args.port, path=args.path)
+ arg_parser.exit(message="Stopped\n")
+
+
+if __name__ == "__main__": # pragma: no branch
+ main(sys.argv[1:]) # pragma: no cover
diff --git a/third_party/python/aiohttp/aiohttp/web_app.py b/third_party/python/aiohttp/aiohttp/web_app.py
new file mode 100644
index 0000000000..14f2937ae5
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/web_app.py
@@ -0,0 +1,552 @@
+import asyncio
+import logging
+import warnings
+from functools import partial, update_wrapper
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ AsyncIterator,
+ Awaitable,
+ Callable,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Mapping,
+ MutableMapping,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+ cast,
+)
+
+from . import hdrs
+from .abc import (
+ AbstractAccessLogger,
+ AbstractMatchInfo,
+ AbstractRouter,
+ AbstractStreamWriter,
+)
+from .frozenlist import FrozenList
+from .helpers import DEBUG
+from .http_parser import RawRequestMessage
+from .log import web_logger
+from .signals import Signal
+from .streams import StreamReader
+from .web_log import AccessLogger
+from .web_middlewares import _fix_request_current_app
+from .web_protocol import RequestHandler
+from .web_request import Request
+from .web_response import StreamResponse
+from .web_routedef import AbstractRouteDef
+from .web_server import Server
+from .web_urldispatcher import (
+ AbstractResource,
+ AbstractRoute,
+ Domain,
+ MaskDomain,
+ MatchedSubAppResource,
+ PrefixedSubAppResource,
+ UrlDispatcher,
+)
+
+__all__ = ("Application", "CleanupError")
+
+
+if TYPE_CHECKING: # pragma: no cover
+ _AppSignal = Signal[Callable[["Application"], Awaitable[None]]]
+ _RespPrepareSignal = Signal[Callable[[Request, StreamResponse], Awaitable[None]]]
+ _Handler = Callable[[Request], Awaitable[StreamResponse]]
+ _Middleware = Union[
+ Callable[[Request, _Handler], Awaitable[StreamResponse]],
+ Callable[["Application", _Handler], Awaitable[_Handler]], # old-style
+ ]
+ _Middlewares = FrozenList[_Middleware]
+ _MiddlewaresHandlers = Optional[Sequence[Tuple[_Middleware, bool]]]
+ _Subapps = List["Application"]
+else:
+ # No type checker mode, skip types
+ _AppSignal = Signal
+ _RespPrepareSignal = Signal
+ _Handler = Callable
+ _Middleware = Callable
+ _Middlewares = FrozenList
+ _MiddlewaresHandlers = Optional[Sequence]
+ _Subapps = List
+
+
+class Application(MutableMapping[str, Any]):
+ ATTRS = frozenset(
+ [
+ "logger",
+ "_debug",
+ "_router",
+ "_loop",
+ "_handler_args",
+ "_middlewares",
+ "_middlewares_handlers",
+ "_run_middlewares",
+ "_state",
+ "_frozen",
+ "_pre_frozen",
+ "_subapps",
+ "_on_response_prepare",
+ "_on_startup",
+ "_on_shutdown",
+ "_on_cleanup",
+ "_client_max_size",
+ "_cleanup_ctx",
+ ]
+ )
+
+ def __init__(
+ self,
+ *,
+ logger: logging.Logger = web_logger,
+ router: Optional[UrlDispatcher] = None,
+ middlewares: Iterable[_Middleware] = (),
+ handler_args: Optional[Mapping[str, Any]] = None,
+ client_max_size: int = 1024 ** 2,
+ loop: Optional[asyncio.AbstractEventLoop] = None,
+ debug: Any = ..., # mypy doesn't support ellipsis
+ ) -> None:
+ if router is None:
+ router = UrlDispatcher()
+ else:
+ warnings.warn(
+ "router argument is deprecated", DeprecationWarning, stacklevel=2
+ )
+ assert isinstance(router, AbstractRouter), router
+
+ if loop is not None:
+ warnings.warn(
+ "loop argument is deprecated", DeprecationWarning, stacklevel=2
+ )
+
+ if debug is not ...:
+ warnings.warn(
+ "debug argument is deprecated", DeprecationWarning, stacklevel=2
+ )
+ self._debug = debug
+ self._router = router # type: UrlDispatcher
+ self._loop = loop
+ self._handler_args = handler_args
+ self.logger = logger
+
+ self._middlewares = FrozenList(middlewares) # type: _Middlewares
+
+ # initialized on freezing
+ self._middlewares_handlers = None # type: _MiddlewaresHandlers
+ # initialized on freezing
+ self._run_middlewares = None # type: Optional[bool]
+
+ self._state = {} # type: Dict[str, Any]
+ self._frozen = False
+ self._pre_frozen = False
+ self._subapps = [] # type: _Subapps
+
+ self._on_response_prepare = Signal(self) # type: _RespPrepareSignal
+ self._on_startup = Signal(self) # type: _AppSignal
+ self._on_shutdown = Signal(self) # type: _AppSignal
+ self._on_cleanup = Signal(self) # type: _AppSignal
+ self._cleanup_ctx = CleanupContext()
+ self._on_startup.append(self._cleanup_ctx._on_startup)
+ self._on_cleanup.append(self._cleanup_ctx._on_cleanup)
+ self._client_max_size = client_max_size
+
+ def __init_subclass__(cls: Type["Application"]) -> None:
+ warnings.warn(
+ "Inheritance class {} from web.Application "
+ "is discouraged".format(cls.__name__),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ if DEBUG: # pragma: no cover
+
+ def __setattr__(self, name: str, val: Any) -> None:
+ if name not in self.ATTRS:
+ warnings.warn(
+ "Setting custom web.Application.{} attribute "
+ "is discouraged".format(name),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ super().__setattr__(name, val)
+
+ # MutableMapping API
+
+ def __eq__(self, other: object) -> bool:
+ return self is other
+
+ def __getitem__(self, key: str) -> Any:
+ return self._state[key]
+
+ def _check_frozen(self) -> None:
+ if self._frozen:
+ warnings.warn(
+ "Changing state of started or joined " "application is deprecated",
+ DeprecationWarning,
+ stacklevel=3,
+ )
+
+ def __setitem__(self, key: str, value: Any) -> None:
+ self._check_frozen()
+ self._state[key] = value
+
+ def __delitem__(self, key: str) -> None:
+ self._check_frozen()
+ del self._state[key]
+
+ def __len__(self) -> int:
+ return len(self._state)
+
+ def __iter__(self) -> Iterator[str]:
+ return iter(self._state)
+
+ ########
+ @property
+ def loop(self) -> asyncio.AbstractEventLoop:
+ # Technically the loop can be None
+ # but we mask it by explicit type cast
+ # to provide more convinient type annotation
+ warnings.warn("loop property is deprecated", DeprecationWarning, stacklevel=2)
+ return cast(asyncio.AbstractEventLoop, self._loop)
+
+ def _set_loop(self, loop: Optional[asyncio.AbstractEventLoop]) -> None:
+ if loop is None:
+ loop = asyncio.get_event_loop()
+ if self._loop is not None and self._loop is not loop:
+ raise RuntimeError(
+ "web.Application instance initialized with different loop"
+ )
+
+ self._loop = loop
+
+ # set loop debug
+ if self._debug is ...:
+ self._debug = loop.get_debug()
+
+ # set loop to sub applications
+ for subapp in self._subapps:
+ subapp._set_loop(loop)
+
+ @property
+ def pre_frozen(self) -> bool:
+ return self._pre_frozen
+
+ def pre_freeze(self) -> None:
+ if self._pre_frozen:
+ return
+
+ self._pre_frozen = True
+ self._middlewares.freeze()
+ self._router.freeze()
+ self._on_response_prepare.freeze()
+ self._cleanup_ctx.freeze()
+ self._on_startup.freeze()
+ self._on_shutdown.freeze()
+ self._on_cleanup.freeze()
+ self._middlewares_handlers = tuple(self._prepare_middleware())
+
+ # If current app and any subapp do not have middlewares avoid run all
+ # of the code footprint that it implies, which have a middleware
+ # hardcoded per app that sets up the current_app attribute. If no
+ # middlewares are configured the handler will receive the proper
+ # current_app without needing all of this code.
+ self._run_middlewares = True if self.middlewares else False
+
+ for subapp in self._subapps:
+ subapp.pre_freeze()
+ self._run_middlewares = self._run_middlewares or subapp._run_middlewares
+
+ @property
+ def frozen(self) -> bool:
+ return self._frozen
+
+ def freeze(self) -> None:
+ if self._frozen:
+ return
+
+ self.pre_freeze()
+ self._frozen = True
+ for subapp in self._subapps:
+ subapp.freeze()
+
+ @property
+ def debug(self) -> bool:
+ warnings.warn("debug property is deprecated", DeprecationWarning, stacklevel=2)
+ return self._debug
+
+ def _reg_subapp_signals(self, subapp: "Application") -> None:
+ def reg_handler(signame: str) -> None:
+ subsig = getattr(subapp, signame)
+
+ async def handler(app: "Application") -> None:
+ await subsig.send(subapp)
+
+ appsig = getattr(self, signame)
+ appsig.append(handler)
+
+ reg_handler("on_startup")
+ reg_handler("on_shutdown")
+ reg_handler("on_cleanup")
+
+ def add_subapp(self, prefix: str, subapp: "Application") -> AbstractResource:
+ if not isinstance(prefix, str):
+ raise TypeError("Prefix must be str")
+ prefix = prefix.rstrip("/")
+ if not prefix:
+ raise ValueError("Prefix cannot be empty")
+ factory = partial(PrefixedSubAppResource, prefix, subapp)
+ return self._add_subapp(factory, subapp)
+
+ def _add_subapp(
+ self, resource_factory: Callable[[], AbstractResource], subapp: "Application"
+ ) -> AbstractResource:
+ if self.frozen:
+ raise RuntimeError("Cannot add sub application to frozen application")
+ if subapp.frozen:
+ raise RuntimeError("Cannot add frozen application")
+ resource = resource_factory()
+ self.router.register_resource(resource)
+ self._reg_subapp_signals(subapp)
+ self._subapps.append(subapp)
+ subapp.pre_freeze()
+ if self._loop is not None:
+ subapp._set_loop(self._loop)
+ return resource
+
+ def add_domain(self, domain: str, subapp: "Application") -> AbstractResource:
+ if not isinstance(domain, str):
+ raise TypeError("Domain must be str")
+ elif "*" in domain:
+ rule = MaskDomain(domain) # type: Domain
+ else:
+ rule = Domain(domain)
+ factory = partial(MatchedSubAppResource, rule, subapp)
+ return self._add_subapp(factory, subapp)
+
+ def add_routes(self, routes: Iterable[AbstractRouteDef]) -> List[AbstractRoute]:
+ return self.router.add_routes(routes)
+
+ @property
+ def on_response_prepare(self) -> _RespPrepareSignal:
+ return self._on_response_prepare
+
+ @property
+ def on_startup(self) -> _AppSignal:
+ return self._on_startup
+
+ @property
+ def on_shutdown(self) -> _AppSignal:
+ return self._on_shutdown
+
+ @property
+ def on_cleanup(self) -> _AppSignal:
+ return self._on_cleanup
+
+ @property
+ def cleanup_ctx(self) -> "CleanupContext":
+ return self._cleanup_ctx
+
+ @property
+ def router(self) -> UrlDispatcher:
+ return self._router
+
+ @property
+ def middlewares(self) -> _Middlewares:
+ return self._middlewares
+
+ def _make_handler(
+ self,
+ *,
+ loop: Optional[asyncio.AbstractEventLoop] = None,
+ access_log_class: Type[AbstractAccessLogger] = AccessLogger,
+ **kwargs: Any,
+ ) -> Server:
+
+ if not issubclass(access_log_class, AbstractAccessLogger):
+ raise TypeError(
+ "access_log_class must be subclass of "
+ "aiohttp.abc.AbstractAccessLogger, got {}".format(access_log_class)
+ )
+
+ self._set_loop(loop)
+ self.freeze()
+
+ kwargs["debug"] = self._debug
+ kwargs["access_log_class"] = access_log_class
+ if self._handler_args:
+ for k, v in self._handler_args.items():
+ kwargs[k] = v
+
+ return Server(
+ self._handle, # type: ignore
+ request_factory=self._make_request,
+ loop=self._loop,
+ **kwargs,
+ )
+
+ def make_handler(
+ self,
+ *,
+ loop: Optional[asyncio.AbstractEventLoop] = None,
+ access_log_class: Type[AbstractAccessLogger] = AccessLogger,
+ **kwargs: Any,
+ ) -> Server:
+
+ warnings.warn(
+ "Application.make_handler(...) is deprecated, " "use AppRunner API instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ return self._make_handler(
+ loop=loop, access_log_class=access_log_class, **kwargs
+ )
+
+ async def startup(self) -> None:
+ """Causes on_startup signal
+
+ Should be called in the event loop along with the request handler.
+ """
+ await self.on_startup.send(self)
+
+ async def shutdown(self) -> None:
+ """Causes on_shutdown signal
+
+ Should be called before cleanup()
+ """
+ await self.on_shutdown.send(self)
+
+ async def cleanup(self) -> None:
+ """Causes on_cleanup signal
+
+ Should be called after shutdown()
+ """
+ await self.on_cleanup.send(self)
+
+ def _make_request(
+ self,
+ message: RawRequestMessage,
+ payload: StreamReader,
+ protocol: RequestHandler,
+ writer: AbstractStreamWriter,
+ task: "asyncio.Task[None]",
+ _cls: Type[Request] = Request,
+ ) -> Request:
+ return _cls(
+ message,
+ payload,
+ protocol,
+ writer,
+ task,
+ self._loop,
+ client_max_size=self._client_max_size,
+ )
+
+ def _prepare_middleware(self) -> Iterator[Tuple[_Middleware, bool]]:
+ for m in reversed(self._middlewares):
+ if getattr(m, "__middleware_version__", None) == 1:
+ yield m, True
+ else:
+ warnings.warn(
+ 'old-style middleware "{!r}" deprecated, ' "see #2252".format(m),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ yield m, False
+
+ yield _fix_request_current_app(self), True
+
+ async def _handle(self, request: Request) -> StreamResponse:
+ loop = asyncio.get_event_loop()
+ debug = loop.get_debug()
+ match_info = await self._router.resolve(request)
+ if debug: # pragma: no cover
+ if not isinstance(match_info, AbstractMatchInfo):
+ raise TypeError(
+ "match_info should be AbstractMatchInfo "
+ "instance, not {!r}".format(match_info)
+ )
+ match_info.add_app(self)
+
+ match_info.freeze()
+
+ resp = None
+ request._match_info = match_info # type: ignore
+ expect = request.headers.get(hdrs.EXPECT)
+ if expect:
+ resp = await match_info.expect_handler(request)
+ await request.writer.drain()
+
+ if resp is None:
+ handler = match_info.handler
+
+ if self._run_middlewares:
+ for app in match_info.apps[::-1]:
+ for m, new_style in app._middlewares_handlers: # type: ignore
+ if new_style:
+ handler = update_wrapper(
+ partial(m, handler=handler), handler
+ )
+ else:
+ handler = await m(app, handler) # type: ignore
+
+ resp = await handler(request)
+
+ return resp
+
+ def __call__(self) -> "Application":
+ """gunicorn compatibility"""
+ return self
+
+ def __repr__(self) -> str:
+ return "<Application 0x{:x}>".format(id(self))
+
+ def __bool__(self) -> bool:
+ return True
+
+
+class CleanupError(RuntimeError):
+ @property
+ def exceptions(self) -> List[BaseException]:
+ return self.args[1]
+
+
+if TYPE_CHECKING: # pragma: no cover
+ _CleanupContextBase = FrozenList[Callable[[Application], AsyncIterator[None]]]
+else:
+ _CleanupContextBase = FrozenList
+
+
+class CleanupContext(_CleanupContextBase):
+ def __init__(self) -> None:
+ super().__init__()
+ self._exits = [] # type: List[AsyncIterator[None]]
+
+ async def _on_startup(self, app: Application) -> None:
+ for cb in self:
+ it = cb(app).__aiter__()
+ await it.__anext__()
+ self._exits.append(it)
+
+ async def _on_cleanup(self, app: Application) -> None:
+ errors = []
+ for it in reversed(self._exits):
+ try:
+ await it.__anext__()
+ except StopAsyncIteration:
+ pass
+ except Exception as exc:
+ errors.append(exc)
+ else:
+ errors.append(RuntimeError(f"{it!r} has more than one 'yield'"))
+ if errors:
+ if len(errors) == 1:
+ raise errors[0]
+ else:
+ raise CleanupError("Multiple errors on cleanup stage", errors)
diff --git a/third_party/python/aiohttp/aiohttp/web_exceptions.py b/third_party/python/aiohttp/aiohttp/web_exceptions.py
new file mode 100644
index 0000000000..2eadca0386
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/web_exceptions.py
@@ -0,0 +1,441 @@
+import warnings
+from typing import Any, Dict, Iterable, List, Optional, Set # noqa
+
+from yarl import URL
+
+from .typedefs import LooseHeaders, StrOrURL
+from .web_response import Response
+
+__all__ = (
+ "HTTPException",
+ "HTTPError",
+ "HTTPRedirection",
+ "HTTPSuccessful",
+ "HTTPOk",
+ "HTTPCreated",
+ "HTTPAccepted",
+ "HTTPNonAuthoritativeInformation",
+ "HTTPNoContent",
+ "HTTPResetContent",
+ "HTTPPartialContent",
+ "HTTPMultipleChoices",
+ "HTTPMovedPermanently",
+ "HTTPFound",
+ "HTTPSeeOther",
+ "HTTPNotModified",
+ "HTTPUseProxy",
+ "HTTPTemporaryRedirect",
+ "HTTPPermanentRedirect",
+ "HTTPClientError",
+ "HTTPBadRequest",
+ "HTTPUnauthorized",
+ "HTTPPaymentRequired",
+ "HTTPForbidden",
+ "HTTPNotFound",
+ "HTTPMethodNotAllowed",
+ "HTTPNotAcceptable",
+ "HTTPProxyAuthenticationRequired",
+ "HTTPRequestTimeout",
+ "HTTPConflict",
+ "HTTPGone",
+ "HTTPLengthRequired",
+ "HTTPPreconditionFailed",
+ "HTTPRequestEntityTooLarge",
+ "HTTPRequestURITooLong",
+ "HTTPUnsupportedMediaType",
+ "HTTPRequestRangeNotSatisfiable",
+ "HTTPExpectationFailed",
+ "HTTPMisdirectedRequest",
+ "HTTPUnprocessableEntity",
+ "HTTPFailedDependency",
+ "HTTPUpgradeRequired",
+ "HTTPPreconditionRequired",
+ "HTTPTooManyRequests",
+ "HTTPRequestHeaderFieldsTooLarge",
+ "HTTPUnavailableForLegalReasons",
+ "HTTPServerError",
+ "HTTPInternalServerError",
+ "HTTPNotImplemented",
+ "HTTPBadGateway",
+ "HTTPServiceUnavailable",
+ "HTTPGatewayTimeout",
+ "HTTPVersionNotSupported",
+ "HTTPVariantAlsoNegotiates",
+ "HTTPInsufficientStorage",
+ "HTTPNotExtended",
+ "HTTPNetworkAuthenticationRequired",
+)
+
+
+############################################################
+# HTTP Exceptions
+############################################################
+
+
+class HTTPException(Response, Exception):
+
+ # You should set in subclasses:
+ # status = 200
+
+ status_code = -1
+ empty_body = False
+
+ __http_exception__ = True
+
+ def __init__(
+ self,
+ *,
+ headers: Optional[LooseHeaders] = None,
+ reason: Optional[str] = None,
+ body: Any = None,
+ text: Optional[str] = None,
+ content_type: Optional[str] = None,
+ ) -> None:
+ if body is not None:
+ warnings.warn(
+ "body argument is deprecated for http web exceptions",
+ DeprecationWarning,
+ )
+ Response.__init__(
+ self,
+ status=self.status_code,
+ headers=headers,
+ reason=reason,
+ body=body,
+ text=text,
+ content_type=content_type,
+ )
+ Exception.__init__(self, self.reason)
+ if self.body is None and not self.empty_body:
+ self.text = f"{self.status}: {self.reason}"
+
+ def __bool__(self) -> bool:
+ return True
+
+
+class HTTPError(HTTPException):
+ """Base class for exceptions with status codes in the 400s and 500s."""
+
+
+class HTTPRedirection(HTTPException):
+ """Base class for exceptions with status codes in the 300s."""
+
+
+class HTTPSuccessful(HTTPException):
+ """Base class for exceptions with status codes in the 200s."""
+
+
+class HTTPOk(HTTPSuccessful):
+ status_code = 200
+
+
+class HTTPCreated(HTTPSuccessful):
+ status_code = 201
+
+
+class HTTPAccepted(HTTPSuccessful):
+ status_code = 202
+
+
+class HTTPNonAuthoritativeInformation(HTTPSuccessful):
+ status_code = 203
+
+
+class HTTPNoContent(HTTPSuccessful):
+ status_code = 204
+ empty_body = True
+
+
+class HTTPResetContent(HTTPSuccessful):
+ status_code = 205
+ empty_body = True
+
+
+class HTTPPartialContent(HTTPSuccessful):
+ status_code = 206
+
+
+############################################################
+# 3xx redirection
+############################################################
+
+
+class _HTTPMove(HTTPRedirection):
+ def __init__(
+ self,
+ location: StrOrURL,
+ *,
+ headers: Optional[LooseHeaders] = None,
+ reason: Optional[str] = None,
+ body: Any = None,
+ text: Optional[str] = None,
+ content_type: Optional[str] = None,
+ ) -> None:
+ if not location:
+ raise ValueError("HTTP redirects need a location to redirect to.")
+ super().__init__(
+ headers=headers,
+ reason=reason,
+ body=body,
+ text=text,
+ content_type=content_type,
+ )
+ self.headers["Location"] = str(URL(location))
+ self.location = location
+
+
+class HTTPMultipleChoices(_HTTPMove):
+ status_code = 300
+
+
+class HTTPMovedPermanently(_HTTPMove):
+ status_code = 301
+
+
+class HTTPFound(_HTTPMove):
+ status_code = 302
+
+
+# This one is safe after a POST (the redirected location will be
+# retrieved with GET):
+class HTTPSeeOther(_HTTPMove):
+ status_code = 303
+
+
+class HTTPNotModified(HTTPRedirection):
+ # FIXME: this should include a date or etag header
+ status_code = 304
+ empty_body = True
+
+
+class HTTPUseProxy(_HTTPMove):
+ # Not a move, but looks a little like one
+ status_code = 305
+
+
+class HTTPTemporaryRedirect(_HTTPMove):
+ status_code = 307
+
+
+class HTTPPermanentRedirect(_HTTPMove):
+ status_code = 308
+
+
+############################################################
+# 4xx client error
+############################################################
+
+
+class HTTPClientError(HTTPError):
+ pass
+
+
+class HTTPBadRequest(HTTPClientError):
+ status_code = 400
+
+
+class HTTPUnauthorized(HTTPClientError):
+ status_code = 401
+
+
+class HTTPPaymentRequired(HTTPClientError):
+ status_code = 402
+
+
+class HTTPForbidden(HTTPClientError):
+ status_code = 403
+
+
+class HTTPNotFound(HTTPClientError):
+ status_code = 404
+
+
+class HTTPMethodNotAllowed(HTTPClientError):
+ status_code = 405
+
+ def __init__(
+ self,
+ method: str,
+ allowed_methods: Iterable[str],
+ *,
+ headers: Optional[LooseHeaders] = None,
+ reason: Optional[str] = None,
+ body: Any = None,
+ text: Optional[str] = None,
+ content_type: Optional[str] = None,
+ ) -> None:
+ allow = ",".join(sorted(allowed_methods))
+ super().__init__(
+ headers=headers,
+ reason=reason,
+ body=body,
+ text=text,
+ content_type=content_type,
+ )
+ self.headers["Allow"] = allow
+ self.allowed_methods = set(allowed_methods) # type: Set[str]
+ self.method = method.upper()
+
+
+class HTTPNotAcceptable(HTTPClientError):
+ status_code = 406
+
+
+class HTTPProxyAuthenticationRequired(HTTPClientError):
+ status_code = 407
+
+
+class HTTPRequestTimeout(HTTPClientError):
+ status_code = 408
+
+
+class HTTPConflict(HTTPClientError):
+ status_code = 409
+
+
+class HTTPGone(HTTPClientError):
+ status_code = 410
+
+
+class HTTPLengthRequired(HTTPClientError):
+ status_code = 411
+
+
+class HTTPPreconditionFailed(HTTPClientError):
+ status_code = 412
+
+
+class HTTPRequestEntityTooLarge(HTTPClientError):
+ status_code = 413
+
+ def __init__(self, max_size: float, actual_size: float, **kwargs: Any) -> None:
+ kwargs.setdefault(
+ "text",
+ "Maximum request body size {} exceeded, "
+ "actual body size {}".format(max_size, actual_size),
+ )
+ super().__init__(**kwargs)
+
+
+class HTTPRequestURITooLong(HTTPClientError):
+ status_code = 414
+
+
+class HTTPUnsupportedMediaType(HTTPClientError):
+ status_code = 415
+
+
+class HTTPRequestRangeNotSatisfiable(HTTPClientError):
+ status_code = 416
+
+
+class HTTPExpectationFailed(HTTPClientError):
+ status_code = 417
+
+
+class HTTPMisdirectedRequest(HTTPClientError):
+ status_code = 421
+
+
+class HTTPUnprocessableEntity(HTTPClientError):
+ status_code = 422
+
+
+class HTTPFailedDependency(HTTPClientError):
+ status_code = 424
+
+
+class HTTPUpgradeRequired(HTTPClientError):
+ status_code = 426
+
+
+class HTTPPreconditionRequired(HTTPClientError):
+ status_code = 428
+
+
+class HTTPTooManyRequests(HTTPClientError):
+ status_code = 429
+
+
+class HTTPRequestHeaderFieldsTooLarge(HTTPClientError):
+ status_code = 431
+
+
+class HTTPUnavailableForLegalReasons(HTTPClientError):
+ status_code = 451
+
+ def __init__(
+ self,
+ link: str,
+ *,
+ headers: Optional[LooseHeaders] = None,
+ reason: Optional[str] = None,
+ body: Any = None,
+ text: Optional[str] = None,
+ content_type: Optional[str] = None,
+ ) -> None:
+ super().__init__(
+ headers=headers,
+ reason=reason,
+ body=body,
+ text=text,
+ content_type=content_type,
+ )
+ self.headers["Link"] = '<%s>; rel="blocked-by"' % link
+ self.link = link
+
+
+############################################################
+# 5xx Server Error
+############################################################
+# Response status codes beginning with the digit "5" indicate cases in
+# which the server is aware that it has erred or is incapable of
+# performing the request. Except when responding to a HEAD request, the
+# server SHOULD include an entity containing an explanation of the error
+# situation, and whether it is a temporary or permanent condition. User
+# agents SHOULD display any included entity to the user. These response
+# codes are applicable to any request method.
+
+
+class HTTPServerError(HTTPError):
+ pass
+
+
+class HTTPInternalServerError(HTTPServerError):
+ status_code = 500
+
+
+class HTTPNotImplemented(HTTPServerError):
+ status_code = 501
+
+
+class HTTPBadGateway(HTTPServerError):
+ status_code = 502
+
+
+class HTTPServiceUnavailable(HTTPServerError):
+ status_code = 503
+
+
+class HTTPGatewayTimeout(HTTPServerError):
+ status_code = 504
+
+
+class HTTPVersionNotSupported(HTTPServerError):
+ status_code = 505
+
+
+class HTTPVariantAlsoNegotiates(HTTPServerError):
+ status_code = 506
+
+
+class HTTPInsufficientStorage(HTTPServerError):
+ status_code = 507
+
+
+class HTTPNotExtended(HTTPServerError):
+ status_code = 510
+
+
+class HTTPNetworkAuthenticationRequired(HTTPServerError):
+ status_code = 511
diff --git a/third_party/python/aiohttp/aiohttp/web_fileresponse.py b/third_party/python/aiohttp/aiohttp/web_fileresponse.py
new file mode 100644
index 0000000000..0737c4f42d
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/web_fileresponse.py
@@ -0,0 +1,243 @@
+import asyncio
+import mimetypes
+import os
+import pathlib
+import sys
+from typing import ( # noqa
+ IO,
+ TYPE_CHECKING,
+ Any,
+ Awaitable,
+ Callable,
+ List,
+ Optional,
+ Union,
+ cast,
+)
+
+from . import hdrs
+from .abc import AbstractStreamWriter
+from .typedefs import LooseHeaders
+from .web_exceptions import (
+ HTTPNotModified,
+ HTTPPartialContent,
+ HTTPPreconditionFailed,
+ HTTPRequestRangeNotSatisfiable,
+)
+from .web_response import StreamResponse
+
+__all__ = ("FileResponse",)
+
+if TYPE_CHECKING: # pragma: no cover
+ from .web_request import BaseRequest
+
+
+_T_OnChunkSent = Optional[Callable[[bytes], Awaitable[None]]]
+
+
+NOSENDFILE = bool(os.environ.get("AIOHTTP_NOSENDFILE"))
+
+
+class FileResponse(StreamResponse):
+ """A response object can be used to send files."""
+
+ def __init__(
+ self,
+ path: Union[str, pathlib.Path],
+ chunk_size: int = 256 * 1024,
+ status: int = 200,
+ reason: Optional[str] = None,
+ headers: Optional[LooseHeaders] = None,
+ ) -> None:
+ super().__init__(status=status, reason=reason, headers=headers)
+
+ if isinstance(path, str):
+ path = pathlib.Path(path)
+
+ self._path = path
+ self._chunk_size = chunk_size
+
+ async def _sendfile_fallback(
+ self, writer: AbstractStreamWriter, fobj: IO[Any], offset: int, count: int
+ ) -> AbstractStreamWriter:
+ # To keep memory usage low,fobj is transferred in chunks
+ # controlled by the constructor's chunk_size argument.
+
+ chunk_size = self._chunk_size
+ loop = asyncio.get_event_loop()
+
+ await loop.run_in_executor(None, fobj.seek, offset)
+
+ chunk = await loop.run_in_executor(None, fobj.read, chunk_size)
+ while chunk:
+ await writer.write(chunk)
+ count = count - chunk_size
+ if count <= 0:
+ break
+ chunk = await loop.run_in_executor(None, fobj.read, min(chunk_size, count))
+
+ await writer.drain()
+ return writer
+
+ async def _sendfile(
+ self, request: "BaseRequest", fobj: IO[Any], offset: int, count: int
+ ) -> AbstractStreamWriter:
+ writer = await super().prepare(request)
+ assert writer is not None
+
+ if NOSENDFILE or sys.version_info < (3, 7) or self.compression:
+ return await self._sendfile_fallback(writer, fobj, offset, count)
+
+ loop = request._loop
+ transport = request.transport
+ assert transport is not None
+
+ try:
+ await loop.sendfile(transport, fobj, offset, count)
+ except NotImplementedError:
+ return await self._sendfile_fallback(writer, fobj, offset, count)
+
+ await super().write_eof()
+ return writer
+
+ async def prepare(self, request: "BaseRequest") -> Optional[AbstractStreamWriter]:
+ filepath = self._path
+
+ gzip = False
+ if "gzip" in request.headers.get(hdrs.ACCEPT_ENCODING, ""):
+ gzip_path = filepath.with_name(filepath.name + ".gz")
+
+ if gzip_path.is_file():
+ filepath = gzip_path
+ gzip = True
+
+ loop = asyncio.get_event_loop()
+ st = await loop.run_in_executor(None, filepath.stat)
+
+ modsince = request.if_modified_since
+ if modsince is not None and st.st_mtime <= modsince.timestamp():
+ self.set_status(HTTPNotModified.status_code)
+ self._length_check = False
+ # Delete any Content-Length headers provided by user. HTTP 304
+ # should always have empty response body
+ return await super().prepare(request)
+
+ unmodsince = request.if_unmodified_since
+ if unmodsince is not None and st.st_mtime > unmodsince.timestamp():
+ self.set_status(HTTPPreconditionFailed.status_code)
+ return await super().prepare(request)
+
+ if hdrs.CONTENT_TYPE not in self.headers:
+ ct, encoding = mimetypes.guess_type(str(filepath))
+ if not ct:
+ ct = "application/octet-stream"
+ should_set_ct = True
+ else:
+ encoding = "gzip" if gzip else None
+ should_set_ct = False
+
+ status = self._status
+ file_size = st.st_size
+ count = file_size
+
+ start = None
+
+ ifrange = request.if_range
+ if ifrange is None or st.st_mtime <= ifrange.timestamp():
+ # If-Range header check:
+ # condition = cached date >= last modification date
+ # return 206 if True else 200.
+ # if False:
+ # Range header would not be processed, return 200
+ # if True but Range header missing
+ # return 200
+ try:
+ rng = request.http_range
+ start = rng.start
+ end = rng.stop
+ except ValueError:
+ # https://tools.ietf.org/html/rfc7233:
+ # A server generating a 416 (Range Not Satisfiable) response to
+ # a byte-range request SHOULD send a Content-Range header field
+ # with an unsatisfied-range value.
+ # The complete-length in a 416 response indicates the current
+ # length of the selected representation.
+ #
+ # Will do the same below. Many servers ignore this and do not
+ # send a Content-Range header with HTTP 416
+ self.headers[hdrs.CONTENT_RANGE] = f"bytes */{file_size}"
+ self.set_status(HTTPRequestRangeNotSatisfiable.status_code)
+ return await super().prepare(request)
+
+ # If a range request has been made, convert start, end slice
+ # notation into file pointer offset and count
+ if start is not None or end is not None:
+ if start < 0 and end is None: # return tail of file
+ start += file_size
+ if start < 0:
+ # if Range:bytes=-1000 in request header but file size
+ # is only 200, there would be trouble without this
+ start = 0
+ count = file_size - start
+ else:
+ # rfc7233:If the last-byte-pos value is
+ # absent, or if the value is greater than or equal to
+ # the current length of the representation data,
+ # the byte range is interpreted as the remainder
+ # of the representation (i.e., the server replaces the
+ # value of last-byte-pos with a value that is one less than
+ # the current length of the selected representation).
+ count = (
+ min(end if end is not None else file_size, file_size) - start
+ )
+
+ if start >= file_size:
+ # HTTP 416 should be returned in this case.
+ #
+ # According to https://tools.ietf.org/html/rfc7233:
+ # If a valid byte-range-set includes at least one
+ # byte-range-spec with a first-byte-pos that is less than
+ # the current length of the representation, or at least one
+ # suffix-byte-range-spec with a non-zero suffix-length,
+ # then the byte-range-set is satisfiable. Otherwise, the
+ # byte-range-set is unsatisfiable.
+ self.headers[hdrs.CONTENT_RANGE] = f"bytes */{file_size}"
+ self.set_status(HTTPRequestRangeNotSatisfiable.status_code)
+ return await super().prepare(request)
+
+ status = HTTPPartialContent.status_code
+ # Even though you are sending the whole file, you should still
+ # return a HTTP 206 for a Range request.
+ self.set_status(status)
+
+ if should_set_ct:
+ self.content_type = ct # type: ignore
+ if encoding:
+ self.headers[hdrs.CONTENT_ENCODING] = encoding
+ if gzip:
+ self.headers[hdrs.VARY] = hdrs.ACCEPT_ENCODING
+ self.last_modified = st.st_mtime # type: ignore
+ self.content_length = count
+
+ self.headers[hdrs.ACCEPT_RANGES] = "bytes"
+
+ real_start = cast(int, start)
+
+ if status == HTTPPartialContent.status_code:
+ self.headers[hdrs.CONTENT_RANGE] = "bytes {}-{}/{}".format(
+ real_start, real_start + count - 1, file_size
+ )
+
+ if request.method == hdrs.METH_HEAD or self.status in [204, 304]:
+ return await super().prepare(request)
+
+ fobj = await loop.run_in_executor(None, filepath.open, "rb")
+ if start: # be aware that start could be None or int=0 here.
+ offset = start
+ else:
+ offset = 0
+
+ try:
+ return await self._sendfile(request, fobj, offset, count)
+ finally:
+ await loop.run_in_executor(None, fobj.close)
diff --git a/third_party/python/aiohttp/aiohttp/web_log.py b/third_party/python/aiohttp/aiohttp/web_log.py
new file mode 100644
index 0000000000..4cfa57929a
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/web_log.py
@@ -0,0 +1,208 @@
+import datetime
+import functools
+import logging
+import os
+import re
+from collections import namedtuple
+from typing import Any, Callable, Dict, Iterable, List, Tuple # noqa
+
+from .abc import AbstractAccessLogger
+from .web_request import BaseRequest
+from .web_response import StreamResponse
+
+KeyMethod = namedtuple("KeyMethod", "key method")
+
+
+class AccessLogger(AbstractAccessLogger):
+ """Helper object to log access.
+
+ Usage:
+ log = logging.getLogger("spam")
+ log_format = "%a %{User-Agent}i"
+ access_logger = AccessLogger(log, log_format)
+ access_logger.log(request, response, time)
+
+ Format:
+ %% The percent sign
+ %a Remote IP-address (IP-address of proxy if using reverse proxy)
+ %t Time when the request was started to process
+ %P The process ID of the child that serviced the request
+ %r First line of request
+ %s Response status code
+ %b Size of response in bytes, including HTTP headers
+ %T Time taken to serve the request, in seconds
+ %Tf Time taken to serve the request, in seconds with floating fraction
+ in .06f format
+ %D Time taken to serve the request, in microseconds
+ %{FOO}i request.headers['FOO']
+ %{FOO}o response.headers['FOO']
+ %{FOO}e os.environ['FOO']
+
+ """
+
+ LOG_FORMAT_MAP = {
+ "a": "remote_address",
+ "t": "request_start_time",
+ "P": "process_id",
+ "r": "first_request_line",
+ "s": "response_status",
+ "b": "response_size",
+ "T": "request_time",
+ "Tf": "request_time_frac",
+ "D": "request_time_micro",
+ "i": "request_header",
+ "o": "response_header",
+ }
+
+ LOG_FORMAT = '%a %t "%r" %s %b "%{Referer}i" "%{User-Agent}i"'
+ FORMAT_RE = re.compile(r"%(\{([A-Za-z0-9\-_]+)\}([ioe])|[atPrsbOD]|Tf?)")
+ CLEANUP_RE = re.compile(r"(%[^s])")
+ _FORMAT_CACHE = {} # type: Dict[str, Tuple[str, List[KeyMethod]]]
+
+ def __init__(self, logger: logging.Logger, log_format: str = LOG_FORMAT) -> None:
+ """Initialise the logger.
+
+ logger is a logger object to be used for logging.
+ log_format is a string with apache compatible log format description.
+
+ """
+ super().__init__(logger, log_format=log_format)
+
+ _compiled_format = AccessLogger._FORMAT_CACHE.get(log_format)
+ if not _compiled_format:
+ _compiled_format = self.compile_format(log_format)
+ AccessLogger._FORMAT_CACHE[log_format] = _compiled_format
+
+ self._log_format, self._methods = _compiled_format
+
+ def compile_format(self, log_format: str) -> Tuple[str, List[KeyMethod]]:
+ """Translate log_format into form usable by modulo formatting
+
+ All known atoms will be replaced with %s
+ Also methods for formatting of those atoms will be added to
+ _methods in appropriate order
+
+ For example we have log_format = "%a %t"
+ This format will be translated to "%s %s"
+ Also contents of _methods will be
+ [self._format_a, self._format_t]
+ These method will be called and results will be passed
+ to translated string format.
+
+ Each _format_* method receive 'args' which is list of arguments
+ given to self.log
+
+ Exceptions are _format_e, _format_i and _format_o methods which
+ also receive key name (by functools.partial)
+
+ """
+ # list of (key, method) tuples, we don't use an OrderedDict as users
+ # can repeat the same key more than once
+ methods = list()
+
+ for atom in self.FORMAT_RE.findall(log_format):
+ if atom[1] == "":
+ format_key1 = self.LOG_FORMAT_MAP[atom[0]]
+ m = getattr(AccessLogger, "_format_%s" % atom[0])
+ key_method = KeyMethod(format_key1, m)
+ else:
+ format_key2 = (self.LOG_FORMAT_MAP[atom[2]], atom[1])
+ m = getattr(AccessLogger, "_format_%s" % atom[2])
+ key_method = KeyMethod(format_key2, functools.partial(m, atom[1]))
+
+ methods.append(key_method)
+
+ log_format = self.FORMAT_RE.sub(r"%s", log_format)
+ log_format = self.CLEANUP_RE.sub(r"%\1", log_format)
+ return log_format, methods
+
+ @staticmethod
+ def _format_i(
+ key: str, request: BaseRequest, response: StreamResponse, time: float
+ ) -> str:
+ if request is None:
+ return "(no headers)"
+
+ # suboptimal, make istr(key) once
+ return request.headers.get(key, "-")
+
+ @staticmethod
+ def _format_o(
+ key: str, request: BaseRequest, response: StreamResponse, time: float
+ ) -> str:
+ # suboptimal, make istr(key) once
+ return response.headers.get(key, "-")
+
+ @staticmethod
+ def _format_a(request: BaseRequest, response: StreamResponse, time: float) -> str:
+ if request is None:
+ return "-"
+ ip = request.remote
+ return ip if ip is not None else "-"
+
+ @staticmethod
+ def _format_t(request: BaseRequest, response: StreamResponse, time: float) -> str:
+ now = datetime.datetime.utcnow()
+ start_time = now - datetime.timedelta(seconds=time)
+ return start_time.strftime("[%d/%b/%Y:%H:%M:%S +0000]")
+
+ @staticmethod
+ def _format_P(request: BaseRequest, response: StreamResponse, time: float) -> str:
+ return "<%s>" % os.getpid()
+
+ @staticmethod
+ def _format_r(request: BaseRequest, response: StreamResponse, time: float) -> str:
+ if request is None:
+ return "-"
+ return "{} {} HTTP/{}.{}".format(
+ request.method,
+ request.path_qs,
+ request.version.major,
+ request.version.minor,
+ )
+
+ @staticmethod
+ def _format_s(request: BaseRequest, response: StreamResponse, time: float) -> int:
+ return response.status
+
+ @staticmethod
+ def _format_b(request: BaseRequest, response: StreamResponse, time: float) -> int:
+ return response.body_length
+
+ @staticmethod
+ def _format_T(request: BaseRequest, response: StreamResponse, time: float) -> str:
+ return str(round(time))
+
+ @staticmethod
+ def _format_Tf(request: BaseRequest, response: StreamResponse, time: float) -> str:
+ return "%06f" % time
+
+ @staticmethod
+ def _format_D(request: BaseRequest, response: StreamResponse, time: float) -> str:
+ return str(round(time * 1000000))
+
+ def _format_line(
+ self, request: BaseRequest, response: StreamResponse, time: float
+ ) -> Iterable[Tuple[str, Callable[[BaseRequest, StreamResponse, float], str]]]:
+ return [(key, method(request, response, time)) for key, method in self._methods]
+
+ def log(self, request: BaseRequest, response: StreamResponse, time: float) -> None:
+ try:
+ fmt_info = self._format_line(request, response, time)
+
+ values = list()
+ extra = dict()
+ for key, value in fmt_info:
+ values.append(value)
+
+ if key.__class__ is str:
+ extra[key] = value
+ else:
+ k1, k2 = key # type: ignore
+ dct = extra.get(k1, {}) # type: ignore
+ dct[k2] = value # type: ignore
+ extra[k1] = dct # type: ignore
+
+ self.logger.info(self._log_format % tuple(values), extra=extra)
+ except Exception:
+ self.logger.exception("Error in logging")
diff --git a/third_party/python/aiohttp/aiohttp/web_middlewares.py b/third_party/python/aiohttp/aiohttp/web_middlewares.py
new file mode 100644
index 0000000000..8a8967e813
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/web_middlewares.py
@@ -0,0 +1,121 @@
+import re
+from typing import TYPE_CHECKING, Awaitable, Callable, Tuple, Type, TypeVar
+
+from .web_exceptions import HTTPPermanentRedirect, _HTTPMove
+from .web_request import Request
+from .web_response import StreamResponse
+from .web_urldispatcher import SystemRoute
+
+__all__ = (
+ "middleware",
+ "normalize_path_middleware",
+)
+
+if TYPE_CHECKING: # pragma: no cover
+ from .web_app import Application
+
+_Func = TypeVar("_Func")
+
+
+async def _check_request_resolves(request: Request, path: str) -> Tuple[bool, Request]:
+ alt_request = request.clone(rel_url=path)
+
+ match_info = await request.app.router.resolve(alt_request)
+ alt_request._match_info = match_info # type: ignore
+
+ if match_info.http_exception is None:
+ return True, alt_request
+
+ return False, request
+
+
+def middleware(f: _Func) -> _Func:
+ f.__middleware_version__ = 1 # type: ignore
+ return f
+
+
+_Handler = Callable[[Request], Awaitable[StreamResponse]]
+_Middleware = Callable[[Request, _Handler], Awaitable[StreamResponse]]
+
+
+def normalize_path_middleware(
+ *,
+ append_slash: bool = True,
+ remove_slash: bool = False,
+ merge_slashes: bool = True,
+ redirect_class: Type[_HTTPMove] = HTTPPermanentRedirect
+) -> _Middleware:
+ """
+ Middleware factory which produces a middleware that normalizes
+ the path of a request. By normalizing it means:
+
+ - Add or remove a trailing slash to the path.
+ - Double slashes are replaced by one.
+
+ The middleware returns as soon as it finds a path that resolves
+ correctly. The order if both merge and append/remove are enabled is
+ 1) merge slashes
+ 2) append/remove slash
+ 3) both merge slashes and append/remove slash.
+ If the path resolves with at least one of those conditions, it will
+ redirect to the new path.
+
+ Only one of `append_slash` and `remove_slash` can be enabled. If both
+ are `True` the factory will raise an assertion error
+
+ If `append_slash` is `True` the middleware will append a slash when
+ needed. If a resource is defined with trailing slash and the request
+ comes without it, it will append it automatically.
+
+ If `remove_slash` is `True`, `append_slash` must be `False`. When enabled
+ the middleware will remove trailing slashes and redirect if the resource
+ is defined
+
+ If merge_slashes is True, merge multiple consecutive slashes in the
+ path into one.
+ """
+
+ correct_configuration = not (append_slash and remove_slash)
+ assert correct_configuration, "Cannot both remove and append slash"
+
+ @middleware
+ async def impl(request: Request, handler: _Handler) -> StreamResponse:
+ if isinstance(request.match_info.route, SystemRoute):
+ paths_to_check = []
+ if "?" in request.raw_path:
+ path, query = request.raw_path.split("?", 1)
+ query = "?" + query
+ else:
+ query = ""
+ path = request.raw_path
+
+ if merge_slashes:
+ paths_to_check.append(re.sub("//+", "/", path))
+ if append_slash and not request.path.endswith("/"):
+ paths_to_check.append(path + "/")
+ if remove_slash and request.path.endswith("/"):
+ paths_to_check.append(path[:-1])
+ if merge_slashes and append_slash:
+ paths_to_check.append(re.sub("//+", "/", path + "/"))
+ if merge_slashes and remove_slash:
+ merged_slashes = re.sub("//+", "/", path)
+ paths_to_check.append(merged_slashes[:-1])
+
+ for path in paths_to_check:
+ path = re.sub("^//+", "/", path) # SECURITY: GHSA-v6wp-4m6f-gcjg
+ resolves, request = await _check_request_resolves(request, path)
+ if resolves:
+ raise redirect_class(request.raw_path + query)
+
+ return await handler(request)
+
+ return impl
+
+
+def _fix_request_current_app(app: "Application") -> _Middleware:
+ @middleware
+ async def impl(request: Request, handler: _Handler) -> StreamResponse:
+ with request.match_info.set_current_app(app):
+ return await handler(request)
+
+ return impl
diff --git a/third_party/python/aiohttp/aiohttp/web_protocol.py b/third_party/python/aiohttp/aiohttp/web_protocol.py
new file mode 100644
index 0000000000..8e02bc4aab
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/web_protocol.py
@@ -0,0 +1,667 @@
+import asyncio
+import asyncio.streams
+import traceback
+import warnings
+from collections import deque
+from contextlib import suppress
+from html import escape as html_escape
+from http import HTTPStatus
+from logging import Logger
+from typing import TYPE_CHECKING, Any, Awaitable, Callable, Optional, Tuple, Type, cast
+
+import yarl
+
+from .abc import AbstractAccessLogger, AbstractStreamWriter
+from .base_protocol import BaseProtocol
+from .helpers import CeilTimeout, current_task
+from .http import (
+ HttpProcessingError,
+ HttpRequestParser,
+ HttpVersion10,
+ RawRequestMessage,
+ StreamWriter,
+)
+from .log import access_logger, server_logger
+from .streams import EMPTY_PAYLOAD, StreamReader
+from .tcp_helpers import tcp_keepalive
+from .web_exceptions import HTTPException
+from .web_log import AccessLogger
+from .web_request import BaseRequest
+from .web_response import Response, StreamResponse
+
+__all__ = ("RequestHandler", "RequestPayloadError", "PayloadAccessError")
+
+if TYPE_CHECKING: # pragma: no cover
+ from .web_server import Server
+
+
+_RequestFactory = Callable[
+ [
+ RawRequestMessage,
+ StreamReader,
+ "RequestHandler",
+ AbstractStreamWriter,
+ "asyncio.Task[None]",
+ ],
+ BaseRequest,
+]
+
+_RequestHandler = Callable[[BaseRequest], Awaitable[StreamResponse]]
+
+
+ERROR = RawRequestMessage(
+ "UNKNOWN", "/", HttpVersion10, {}, {}, True, False, False, False, yarl.URL("/")
+)
+
+
+class RequestPayloadError(Exception):
+ """Payload parsing error."""
+
+
+class PayloadAccessError(Exception):
+ """Payload was accessed after response was sent."""
+
+
+class RequestHandler(BaseProtocol):
+ """HTTP protocol implementation.
+
+ RequestHandler handles incoming HTTP request. It reads request line,
+ request headers and request payload and calls handle_request() method.
+ By default it always returns with 404 response.
+
+ RequestHandler handles errors in incoming request, like bad
+ status line, bad headers or incomplete payload. If any error occurs,
+ connection gets closed.
+
+ :param keepalive_timeout: number of seconds before closing
+ keep-alive connection
+ :type keepalive_timeout: int or None
+
+ :param bool tcp_keepalive: TCP keep-alive is on, default is on
+
+ :param bool debug: enable debug mode
+
+ :param logger: custom logger object
+ :type logger: aiohttp.log.server_logger
+
+ :param access_log_class: custom class for access_logger
+ :type access_log_class: aiohttp.abc.AbstractAccessLogger
+
+ :param access_log: custom logging object
+ :type access_log: aiohttp.log.server_logger
+
+ :param str access_log_format: access log format string
+
+ :param loop: Optional event loop
+
+ :param int max_line_size: Optional maximum header line size
+
+ :param int max_field_size: Optional maximum header field size
+
+ :param int max_headers: Optional maximum header size
+
+ """
+
+ KEEPALIVE_RESCHEDULE_DELAY = 1
+
+ __slots__ = (
+ "_request_count",
+ "_keepalive",
+ "_manager",
+ "_request_handler",
+ "_request_factory",
+ "_tcp_keepalive",
+ "_keepalive_time",
+ "_keepalive_handle",
+ "_keepalive_timeout",
+ "_lingering_time",
+ "_messages",
+ "_message_tail",
+ "_waiter",
+ "_error_handler",
+ "_task_handler",
+ "_upgrade",
+ "_payload_parser",
+ "_request_parser",
+ "_reading_paused",
+ "logger",
+ "debug",
+ "access_log",
+ "access_logger",
+ "_close",
+ "_force_close",
+ "_current_request",
+ )
+
+ def __init__(
+ self,
+ manager: "Server",
+ *,
+ loop: asyncio.AbstractEventLoop,
+ keepalive_timeout: float = 75.0, # NGINX default is 75 secs
+ tcp_keepalive: bool = True,
+ logger: Logger = server_logger,
+ access_log_class: Type[AbstractAccessLogger] = AccessLogger,
+ access_log: Logger = access_logger,
+ access_log_format: str = AccessLogger.LOG_FORMAT,
+ debug: bool = False,
+ max_line_size: int = 8190,
+ max_headers: int = 32768,
+ max_field_size: int = 8190,
+ lingering_time: float = 10.0,
+ read_bufsize: int = 2 ** 16,
+ ):
+
+ super().__init__(loop)
+
+ self._request_count = 0
+ self._keepalive = False
+ self._current_request = None # type: Optional[BaseRequest]
+ self._manager = manager # type: Optional[Server]
+ self._request_handler = (
+ manager.request_handler
+ ) # type: Optional[_RequestHandler]
+ self._request_factory = (
+ manager.request_factory
+ ) # type: Optional[_RequestFactory]
+
+ self._tcp_keepalive = tcp_keepalive
+ # placeholder to be replaced on keepalive timeout setup
+ self._keepalive_time = 0.0
+ self._keepalive_handle = None # type: Optional[asyncio.Handle]
+ self._keepalive_timeout = keepalive_timeout
+ self._lingering_time = float(lingering_time)
+
+ self._messages = deque() # type: Any # Python 3.5 has no typing.Deque
+ self._message_tail = b""
+
+ self._waiter = None # type: Optional[asyncio.Future[None]]
+ self._error_handler = None # type: Optional[asyncio.Task[None]]
+ self._task_handler = None # type: Optional[asyncio.Task[None]]
+
+ self._upgrade = False
+ self._payload_parser = None # type: Any
+ self._request_parser = HttpRequestParser(
+ self,
+ loop,
+ read_bufsize,
+ max_line_size=max_line_size,
+ max_field_size=max_field_size,
+ max_headers=max_headers,
+ payload_exception=RequestPayloadError,
+ ) # type: Optional[HttpRequestParser]
+
+ self.logger = logger
+ self.debug = debug
+ self.access_log = access_log
+ if access_log:
+ self.access_logger = access_log_class(
+ access_log, access_log_format
+ ) # type: Optional[AbstractAccessLogger]
+ else:
+ self.access_logger = None
+
+ self._close = False
+ self._force_close = False
+
+ def __repr__(self) -> str:
+ return "<{} {}>".format(
+ self.__class__.__name__,
+ "connected" if self.transport is not None else "disconnected",
+ )
+
+ @property
+ def keepalive_timeout(self) -> float:
+ return self._keepalive_timeout
+
+ async def shutdown(self, timeout: Optional[float] = 15.0) -> None:
+ """Worker process is about to exit, we need cleanup everything and
+ stop accepting requests. It is especially important for keep-alive
+ connections."""
+ self._force_close = True
+
+ if self._keepalive_handle is not None:
+ self._keepalive_handle.cancel()
+
+ if self._waiter:
+ self._waiter.cancel()
+
+ # wait for handlers
+ with suppress(asyncio.CancelledError, asyncio.TimeoutError):
+ with CeilTimeout(timeout, loop=self._loop):
+ if self._error_handler is not None and not self._error_handler.done():
+ await self._error_handler
+
+ if self._current_request is not None:
+ self._current_request._cancel(asyncio.CancelledError())
+
+ if self._task_handler is not None and not self._task_handler.done():
+ await self._task_handler
+
+ # force-close non-idle handler
+ if self._task_handler is not None:
+ self._task_handler.cancel()
+
+ if self.transport is not None:
+ self.transport.close()
+ self.transport = None
+
+ def connection_made(self, transport: asyncio.BaseTransport) -> None:
+ super().connection_made(transport)
+
+ real_transport = cast(asyncio.Transport, transport)
+ if self._tcp_keepalive:
+ tcp_keepalive(real_transport)
+
+ self._task_handler = self._loop.create_task(self.start())
+ assert self._manager is not None
+ self._manager.connection_made(self, real_transport)
+
+ def connection_lost(self, exc: Optional[BaseException]) -> None:
+ if self._manager is None:
+ return
+ self._manager.connection_lost(self, exc)
+
+ super().connection_lost(exc)
+
+ self._manager = None
+ self._force_close = True
+ self._request_factory = None
+ self._request_handler = None
+ self._request_parser = None
+
+ if self._keepalive_handle is not None:
+ self._keepalive_handle.cancel()
+
+ if self._current_request is not None:
+ if exc is None:
+ exc = ConnectionResetError("Connection lost")
+ self._current_request._cancel(exc)
+
+ if self._error_handler is not None:
+ self._error_handler.cancel()
+ if self._task_handler is not None:
+ self._task_handler.cancel()
+ if self._waiter is not None:
+ self._waiter.cancel()
+
+ self._task_handler = None
+
+ if self._payload_parser is not None:
+ self._payload_parser.feed_eof()
+ self._payload_parser = None
+
+ def set_parser(self, parser: Any) -> None:
+ # Actual type is WebReader
+ assert self._payload_parser is None
+
+ self._payload_parser = parser
+
+ if self._message_tail:
+ self._payload_parser.feed_data(self._message_tail)
+ self._message_tail = b""
+
+ def eof_received(self) -> None:
+ pass
+
+ def data_received(self, data: bytes) -> None:
+ if self._force_close or self._close:
+ return
+ # parse http messages
+ if self._payload_parser is None and not self._upgrade:
+ assert self._request_parser is not None
+ try:
+ messages, upgraded, tail = self._request_parser.feed_data(data)
+ except HttpProcessingError as exc:
+ # something happened during parsing
+ self._error_handler = self._loop.create_task(
+ self.handle_parse_error(
+ StreamWriter(self, self._loop), 400, exc, exc.message
+ )
+ )
+ self.close()
+ except Exception as exc:
+ # 500: internal error
+ self._error_handler = self._loop.create_task(
+ self.handle_parse_error(StreamWriter(self, self._loop), 500, exc)
+ )
+ self.close()
+ else:
+ if messages:
+ # sometimes the parser returns no messages
+ for (msg, payload) in messages:
+ self._request_count += 1
+ self._messages.append((msg, payload))
+
+ waiter = self._waiter
+ if waiter is not None:
+ if not waiter.done():
+ # don't set result twice
+ waiter.set_result(None)
+
+ self._upgrade = upgraded
+ if upgraded and tail:
+ self._message_tail = tail
+
+ # no parser, just store
+ elif self._payload_parser is None and self._upgrade and data:
+ self._message_tail += data
+
+ # feed payload
+ elif data:
+ eof, tail = self._payload_parser.feed_data(data)
+ if eof:
+ self.close()
+
+ def keep_alive(self, val: bool) -> None:
+ """Set keep-alive connection mode.
+
+ :param bool val: new state.
+ """
+ self._keepalive = val
+ if self._keepalive_handle:
+ self._keepalive_handle.cancel()
+ self._keepalive_handle = None
+
+ def close(self) -> None:
+ """Stop accepting new pipelinig messages and close
+ connection when handlers done processing messages"""
+ self._close = True
+ if self._waiter:
+ self._waiter.cancel()
+
+ def force_close(self) -> None:
+ """Force close connection"""
+ self._force_close = True
+ if self._waiter:
+ self._waiter.cancel()
+ if self.transport is not None:
+ self.transport.close()
+ self.transport = None
+
+ def log_access(
+ self, request: BaseRequest, response: StreamResponse, time: float
+ ) -> None:
+ if self.access_logger is not None:
+ self.access_logger.log(request, response, self._loop.time() - time)
+
+ def log_debug(self, *args: Any, **kw: Any) -> None:
+ if self.debug:
+ self.logger.debug(*args, **kw)
+
+ def log_exception(self, *args: Any, **kw: Any) -> None:
+ self.logger.exception(*args, **kw)
+
+ def _process_keepalive(self) -> None:
+ if self._force_close or not self._keepalive:
+ return
+
+ next = self._keepalive_time + self._keepalive_timeout
+
+ # handler in idle state
+ if self._waiter:
+ if self._loop.time() > next:
+ self.force_close()
+ return
+
+ # not all request handlers are done,
+ # reschedule itself to next second
+ self._keepalive_handle = self._loop.call_later(
+ self.KEEPALIVE_RESCHEDULE_DELAY, self._process_keepalive
+ )
+
+ async def _handle_request(
+ self,
+ request: BaseRequest,
+ start_time: float,
+ ) -> Tuple[StreamResponse, bool]:
+ assert self._request_handler is not None
+ try:
+ try:
+ self._current_request = request
+ resp = await self._request_handler(request)
+ finally:
+ self._current_request = None
+ except HTTPException as exc:
+ resp = Response(
+ status=exc.status, reason=exc.reason, text=exc.text, headers=exc.headers
+ )
+ reset = await self.finish_response(request, resp, start_time)
+ except asyncio.CancelledError:
+ raise
+ except asyncio.TimeoutError as exc:
+ self.log_debug("Request handler timed out.", exc_info=exc)
+ resp = self.handle_error(request, 504)
+ reset = await self.finish_response(request, resp, start_time)
+ except Exception as exc:
+ resp = self.handle_error(request, 500, exc)
+ reset = await self.finish_response(request, resp, start_time)
+ else:
+ reset = await self.finish_response(request, resp, start_time)
+
+ return resp, reset
+
+ async def start(self) -> None:
+ """Process incoming request.
+
+ It reads request line, request headers and request payload, then
+ calls handle_request() method. Subclass has to override
+ handle_request(). start() handles various exceptions in request
+ or response handling. Connection is being closed always unless
+ keep_alive(True) specified.
+ """
+ loop = self._loop
+ handler = self._task_handler
+ assert handler is not None
+ manager = self._manager
+ assert manager is not None
+ keepalive_timeout = self._keepalive_timeout
+ resp = None
+ assert self._request_factory is not None
+ assert self._request_handler is not None
+
+ while not self._force_close:
+ if not self._messages:
+ try:
+ # wait for next request
+ self._waiter = loop.create_future()
+ await self._waiter
+ except asyncio.CancelledError:
+ break
+ finally:
+ self._waiter = None
+
+ message, payload = self._messages.popleft()
+
+ start = loop.time()
+
+ manager.requests_count += 1
+ writer = StreamWriter(self, loop)
+ request = self._request_factory(message, payload, self, writer, handler)
+ try:
+ # a new task is used for copy context vars (#3406)
+ task = self._loop.create_task(self._handle_request(request, start))
+ try:
+ resp, reset = await task
+ except (asyncio.CancelledError, ConnectionError):
+ self.log_debug("Ignored premature client disconnection")
+ break
+ # Deprecation warning (See #2415)
+ if getattr(resp, "__http_exception__", False):
+ warnings.warn(
+ "returning HTTPException object is deprecated "
+ "(#2415) and will be removed, "
+ "please raise the exception instead",
+ DeprecationWarning,
+ )
+
+ # Drop the processed task from asyncio.Task.all_tasks() early
+ del task
+ if reset:
+ self.log_debug("Ignored premature client disconnection 2")
+ break
+
+ # notify server about keep-alive
+ self._keepalive = bool(resp.keep_alive)
+
+ # check payload
+ if not payload.is_eof():
+ lingering_time = self._lingering_time
+ if not self._force_close and lingering_time:
+ self.log_debug(
+ "Start lingering close timer for %s sec.", lingering_time
+ )
+
+ now = loop.time()
+ end_t = now + lingering_time
+
+ with suppress(asyncio.TimeoutError, asyncio.CancelledError):
+ while not payload.is_eof() and now < end_t:
+ with CeilTimeout(end_t - now, loop=loop):
+ # read and ignore
+ await payload.readany()
+ now = loop.time()
+
+ # if payload still uncompleted
+ if not payload.is_eof() and not self._force_close:
+ self.log_debug("Uncompleted request.")
+ self.close()
+
+ payload.set_exception(PayloadAccessError())
+
+ except asyncio.CancelledError:
+ self.log_debug("Ignored premature client disconnection ")
+ break
+ except RuntimeError as exc:
+ if self.debug:
+ self.log_exception("Unhandled runtime exception", exc_info=exc)
+ self.force_close()
+ except Exception as exc:
+ self.log_exception("Unhandled exception", exc_info=exc)
+ self.force_close()
+ finally:
+ if self.transport is None and resp is not None:
+ self.log_debug("Ignored premature client disconnection.")
+ elif not self._force_close:
+ if self._keepalive and not self._close:
+ # start keep-alive timer
+ if keepalive_timeout is not None:
+ now = self._loop.time()
+ self._keepalive_time = now
+ if self._keepalive_handle is None:
+ self._keepalive_handle = loop.call_at(
+ now + keepalive_timeout, self._process_keepalive
+ )
+ else:
+ break
+
+ # remove handler, close transport if no handlers left
+ if not self._force_close:
+ self._task_handler = None
+ if self.transport is not None and self._error_handler is None:
+ self.transport.close()
+
+ async def finish_response(
+ self, request: BaseRequest, resp: StreamResponse, start_time: float
+ ) -> bool:
+ """
+ Prepare the response and write_eof, then log access. This has to
+ be called within the context of any exception so the access logger
+ can get exception information. Returns True if the client disconnects
+ prematurely.
+ """
+ if self._request_parser is not None:
+ self._request_parser.set_upgraded(False)
+ self._upgrade = False
+ if self._message_tail:
+ self._request_parser.feed_data(self._message_tail)
+ self._message_tail = b""
+ try:
+ prepare_meth = resp.prepare
+ except AttributeError:
+ if resp is None:
+ raise RuntimeError("Missing return " "statement on request handler")
+ else:
+ raise RuntimeError(
+ "Web-handler should return "
+ "a response instance, "
+ "got {!r}".format(resp)
+ )
+ try:
+ await prepare_meth(request)
+ await resp.write_eof()
+ except ConnectionError:
+ self.log_access(request, resp, start_time)
+ return True
+ else:
+ self.log_access(request, resp, start_time)
+ return False
+
+ def handle_error(
+ self,
+ request: BaseRequest,
+ status: int = 500,
+ exc: Optional[BaseException] = None,
+ message: Optional[str] = None,
+ ) -> StreamResponse:
+ """Handle errors.
+
+ Returns HTTP response with specific status code. Logs additional
+ information. It always closes current connection."""
+ self.log_exception("Error handling request", exc_info=exc)
+
+ ct = "text/plain"
+ if status == HTTPStatus.INTERNAL_SERVER_ERROR:
+ title = "{0.value} {0.phrase}".format(HTTPStatus.INTERNAL_SERVER_ERROR)
+ msg = HTTPStatus.INTERNAL_SERVER_ERROR.description
+ tb = None
+ if self.debug:
+ with suppress(Exception):
+ tb = traceback.format_exc()
+
+ if "text/html" in request.headers.get("Accept", ""):
+ if tb:
+ tb = html_escape(tb)
+ msg = f"<h2>Traceback:</h2>\n<pre>{tb}</pre>"
+ message = (
+ "<html><head>"
+ "<title>{title}</title>"
+ "</head><body>\n<h1>{title}</h1>"
+ "\n{msg}\n</body></html>\n"
+ ).format(title=title, msg=msg)
+ ct = "text/html"
+ else:
+ if tb:
+ msg = tb
+ message = title + "\n\n" + msg
+
+ resp = Response(status=status, text=message, content_type=ct)
+ resp.force_close()
+
+ # some data already got sent, connection is broken
+ if request.writer.output_size > 0 or self.transport is None:
+ self.force_close()
+
+ return resp
+
+ async def handle_parse_error(
+ self,
+ writer: AbstractStreamWriter,
+ status: int,
+ exc: Optional[BaseException] = None,
+ message: Optional[str] = None,
+ ) -> None:
+ task = current_task()
+ assert task is not None
+ request = BaseRequest(
+ ERROR, EMPTY_PAYLOAD, self, writer, task, self._loop # type: ignore
+ )
+
+ resp = self.handle_error(request, status, exc, message)
+ await resp.prepare(request)
+ await resp.write_eof()
+
+ if self.transport is not None:
+ self.transport.close()
+
+ self._error_handler = None
diff --git a/third_party/python/aiohttp/aiohttp/web_request.py b/third_party/python/aiohttp/aiohttp/web_request.py
new file mode 100644
index 0000000000..f11e7be44b
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/web_request.py
@@ -0,0 +1,824 @@
+import asyncio
+import datetime
+import io
+import re
+import socket
+import string
+import tempfile
+import types
+import warnings
+from email.utils import parsedate
+from http.cookies import SimpleCookie
+from types import MappingProxyType
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Dict,
+ Iterator,
+ Mapping,
+ MutableMapping,
+ Optional,
+ Tuple,
+ Union,
+ cast,
+)
+from urllib.parse import parse_qsl
+
+import attr
+from multidict import CIMultiDict, CIMultiDictProxy, MultiDict, MultiDictProxy
+from yarl import URL
+
+from . import hdrs
+from .abc import AbstractStreamWriter
+from .helpers import DEBUG, ChainMapProxy, HeadersMixin, reify, sentinel
+from .http_parser import RawRequestMessage
+from .http_writer import HttpVersion
+from .multipart import BodyPartReader, MultipartReader
+from .streams import EmptyStreamReader, StreamReader
+from .typedefs import (
+ DEFAULT_JSON_DECODER,
+ JSONDecoder,
+ LooseHeaders,
+ RawHeaders,
+ StrOrURL,
+)
+from .web_exceptions import HTTPRequestEntityTooLarge
+from .web_response import StreamResponse
+
+__all__ = ("BaseRequest", "FileField", "Request")
+
+
+if TYPE_CHECKING: # pragma: no cover
+ from .web_app import Application
+ from .web_protocol import RequestHandler
+ from .web_urldispatcher import UrlMappingMatchInfo
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class FileField:
+ name: str
+ filename: str
+ file: io.BufferedReader
+ content_type: str
+ headers: "CIMultiDictProxy[str]"
+
+
+_TCHAR = string.digits + string.ascii_letters + r"!#$%&'*+.^_`|~-"
+# '-' at the end to prevent interpretation as range in a char class
+
+_TOKEN = fr"[{_TCHAR}]+"
+
+_QDTEXT = r"[{}]".format(
+ r"".join(chr(c) for c in (0x09, 0x20, 0x21) + tuple(range(0x23, 0x7F)))
+)
+# qdtext includes 0x5C to escape 0x5D ('\]')
+# qdtext excludes obs-text (because obsoleted, and encoding not specified)
+
+_QUOTED_PAIR = r"\\[\t !-~]"
+
+_QUOTED_STRING = r'"(?:{quoted_pair}|{qdtext})*"'.format(
+ qdtext=_QDTEXT, quoted_pair=_QUOTED_PAIR
+)
+
+_FORWARDED_PAIR = r"({token})=({token}|{quoted_string})(:\d{{1,4}})?".format(
+ token=_TOKEN, quoted_string=_QUOTED_STRING
+)
+
+_QUOTED_PAIR_REPLACE_RE = re.compile(r"\\([\t !-~])")
+# same pattern as _QUOTED_PAIR but contains a capture group
+
+_FORWARDED_PAIR_RE = re.compile(_FORWARDED_PAIR)
+
+############################################################
+# HTTP Request
+############################################################
+
+
+class BaseRequest(MutableMapping[str, Any], HeadersMixin):
+
+ POST_METHODS = {
+ hdrs.METH_PATCH,
+ hdrs.METH_POST,
+ hdrs.METH_PUT,
+ hdrs.METH_TRACE,
+ hdrs.METH_DELETE,
+ }
+
+ ATTRS = HeadersMixin.ATTRS | frozenset(
+ [
+ "_message",
+ "_protocol",
+ "_payload_writer",
+ "_payload",
+ "_headers",
+ "_method",
+ "_version",
+ "_rel_url",
+ "_post",
+ "_read_bytes",
+ "_state",
+ "_cache",
+ "_task",
+ "_client_max_size",
+ "_loop",
+ "_transport_sslcontext",
+ "_transport_peername",
+ ]
+ )
+
+ def __init__(
+ self,
+ message: RawRequestMessage,
+ payload: StreamReader,
+ protocol: "RequestHandler",
+ payload_writer: AbstractStreamWriter,
+ task: "asyncio.Task[None]",
+ loop: asyncio.AbstractEventLoop,
+ *,
+ client_max_size: int = 1024 ** 2,
+ state: Optional[Dict[str, Any]] = None,
+ scheme: Optional[str] = None,
+ host: Optional[str] = None,
+ remote: Optional[str] = None,
+ ) -> None:
+ if state is None:
+ state = {}
+ self._message = message
+ self._protocol = protocol
+ self._payload_writer = payload_writer
+
+ self._payload = payload
+ self._headers = message.headers
+ self._method = message.method
+ self._version = message.version
+ self._rel_url = message.url
+ self._post = (
+ None
+ ) # type: Optional[MultiDictProxy[Union[str, bytes, FileField]]]
+ self._read_bytes = None # type: Optional[bytes]
+
+ self._state = state
+ self._cache = {} # type: Dict[str, Any]
+ self._task = task
+ self._client_max_size = client_max_size
+ self._loop = loop
+
+ transport = self._protocol.transport
+ assert transport is not None
+ self._transport_sslcontext = transport.get_extra_info("sslcontext")
+ self._transport_peername = transport.get_extra_info("peername")
+
+ if scheme is not None:
+ self._cache["scheme"] = scheme
+ if host is not None:
+ self._cache["host"] = host
+ if remote is not None:
+ self._cache["remote"] = remote
+
+ def clone(
+ self,
+ *,
+ method: str = sentinel,
+ rel_url: StrOrURL = sentinel,
+ headers: LooseHeaders = sentinel,
+ scheme: str = sentinel,
+ host: str = sentinel,
+ remote: str = sentinel,
+ ) -> "BaseRequest":
+ """Clone itself with replacement some attributes.
+
+ Creates and returns a new instance of Request object. If no parameters
+ are given, an exact copy is returned. If a parameter is not passed, it
+ will reuse the one from the current request object.
+
+ """
+
+ if self._read_bytes:
+ raise RuntimeError("Cannot clone request " "after reading its content")
+
+ dct = {} # type: Dict[str, Any]
+ if method is not sentinel:
+ dct["method"] = method
+ if rel_url is not sentinel:
+ new_url = URL(rel_url)
+ dct["url"] = new_url
+ dct["path"] = str(new_url)
+ if headers is not sentinel:
+ # a copy semantic
+ dct["headers"] = CIMultiDictProxy(CIMultiDict(headers))
+ dct["raw_headers"] = tuple(
+ (k.encode("utf-8"), v.encode("utf-8")) for k, v in headers.items()
+ )
+
+ message = self._message._replace(**dct)
+
+ kwargs = {}
+ if scheme is not sentinel:
+ kwargs["scheme"] = scheme
+ if host is not sentinel:
+ kwargs["host"] = host
+ if remote is not sentinel:
+ kwargs["remote"] = remote
+
+ return self.__class__(
+ message,
+ self._payload,
+ self._protocol,
+ self._payload_writer,
+ self._task,
+ self._loop,
+ client_max_size=self._client_max_size,
+ state=self._state.copy(),
+ **kwargs,
+ )
+
+ @property
+ def task(self) -> "asyncio.Task[None]":
+ return self._task
+
+ @property
+ def protocol(self) -> "RequestHandler":
+ return self._protocol
+
+ @property
+ def transport(self) -> Optional[asyncio.Transport]:
+ if self._protocol is None:
+ return None
+ return self._protocol.transport
+
+ @property
+ def writer(self) -> AbstractStreamWriter:
+ return self._payload_writer
+
+ @reify
+ def message(self) -> RawRequestMessage:
+ warnings.warn("Request.message is deprecated", DeprecationWarning, stacklevel=3)
+ return self._message
+
+ @reify
+ def rel_url(self) -> URL:
+ return self._rel_url
+
+ @reify
+ def loop(self) -> asyncio.AbstractEventLoop:
+ warnings.warn(
+ "request.loop property is deprecated", DeprecationWarning, stacklevel=2
+ )
+ return self._loop
+
+ # MutableMapping API
+
+ def __getitem__(self, key: str) -> Any:
+ return self._state[key]
+
+ def __setitem__(self, key: str, value: Any) -> None:
+ self._state[key] = value
+
+ def __delitem__(self, key: str) -> None:
+ del self._state[key]
+
+ def __len__(self) -> int:
+ return len(self._state)
+
+ def __iter__(self) -> Iterator[str]:
+ return iter(self._state)
+
+ ########
+
+ @reify
+ def secure(self) -> bool:
+ """A bool indicating if the request is handled with SSL."""
+ return self.scheme == "https"
+
+ @reify
+ def forwarded(self) -> Tuple[Mapping[str, str], ...]:
+ """A tuple containing all parsed Forwarded header(s).
+
+ Makes an effort to parse Forwarded headers as specified by RFC 7239:
+
+ - It adds one (immutable) dictionary per Forwarded 'field-value', ie
+ per proxy. The element corresponds to the data in the Forwarded
+ field-value added by the first proxy encountered by the client. Each
+ subsequent item corresponds to those added by later proxies.
+ - It checks that every value has valid syntax in general as specified
+ in section 4: either a 'token' or a 'quoted-string'.
+ - It un-escapes found escape sequences.
+ - It does NOT validate 'by' and 'for' contents as specified in section
+ 6.
+ - It does NOT validate 'host' contents (Host ABNF).
+ - It does NOT validate 'proto' contents for valid URI scheme names.
+
+ Returns a tuple containing one or more immutable dicts
+ """
+ elems = []
+ for field_value in self._message.headers.getall(hdrs.FORWARDED, ()):
+ length = len(field_value)
+ pos = 0
+ need_separator = False
+ elem = {} # type: Dict[str, str]
+ elems.append(types.MappingProxyType(elem))
+ while 0 <= pos < length:
+ match = _FORWARDED_PAIR_RE.match(field_value, pos)
+ if match is not None: # got a valid forwarded-pair
+ if need_separator:
+ # bad syntax here, skip to next comma
+ pos = field_value.find(",", pos)
+ else:
+ name, value, port = match.groups()
+ if value[0] == '"':
+ # quoted string: remove quotes and unescape
+ value = _QUOTED_PAIR_REPLACE_RE.sub(r"\1", value[1:-1])
+ if port:
+ value += port
+ elem[name.lower()] = value
+ pos += len(match.group(0))
+ need_separator = True
+ elif field_value[pos] == ",": # next forwarded-element
+ need_separator = False
+ elem = {}
+ elems.append(types.MappingProxyType(elem))
+ pos += 1
+ elif field_value[pos] == ";": # next forwarded-pair
+ need_separator = False
+ pos += 1
+ elif field_value[pos] in " \t":
+ # Allow whitespace even between forwarded-pairs, though
+ # RFC 7239 doesn't. This simplifies code and is in line
+ # with Postel's law.
+ pos += 1
+ else:
+ # bad syntax here, skip to next comma
+ pos = field_value.find(",", pos)
+ return tuple(elems)
+
+ @reify
+ def scheme(self) -> str:
+ """A string representing the scheme of the request.
+
+ Hostname is resolved in this order:
+
+ - overridden value by .clone(scheme=new_scheme) call.
+ - type of connection to peer: HTTPS if socket is SSL, HTTP otherwise.
+
+ 'http' or 'https'.
+ """
+ if self._transport_sslcontext:
+ return "https"
+ else:
+ return "http"
+
+ @reify
+ def method(self) -> str:
+ """Read only property for getting HTTP method.
+
+ The value is upper-cased str like 'GET', 'POST', 'PUT' etc.
+ """
+ return self._method
+
+ @reify
+ def version(self) -> HttpVersion:
+ """Read only property for getting HTTP version of request.
+
+ Returns aiohttp.protocol.HttpVersion instance.
+ """
+ return self._version
+
+ @reify
+ def host(self) -> str:
+ """Hostname of the request.
+
+ Hostname is resolved in this order:
+
+ - overridden value by .clone(host=new_host) call.
+ - HOST HTTP header
+ - socket.getfqdn() value
+ """
+ host = self._message.headers.get(hdrs.HOST)
+ if host is not None:
+ return host
+ else:
+ return socket.getfqdn()
+
+ @reify
+ def remote(self) -> Optional[str]:
+ """Remote IP of client initiated HTTP request.
+
+ The IP is resolved in this order:
+
+ - overridden value by .clone(remote=new_remote) call.
+ - peername of opened socket
+ """
+ if isinstance(self._transport_peername, (list, tuple)):
+ return self._transport_peername[0]
+ else:
+ return self._transport_peername
+
+ @reify
+ def url(self) -> URL:
+ url = URL.build(scheme=self.scheme, host=self.host)
+ return url.join(self._rel_url)
+
+ @reify
+ def path(self) -> str:
+ """The URL including *PATH INFO* without the host or scheme.
+
+ E.g., ``/app/blog``
+ """
+ return self._rel_url.path
+
+ @reify
+ def path_qs(self) -> str:
+ """The URL including PATH_INFO and the query string.
+
+ E.g, /app/blog?id=10
+ """
+ return str(self._rel_url)
+
+ @reify
+ def raw_path(self) -> str:
+ """The URL including raw *PATH INFO* without the host or scheme.
+ Warning, the path is unquoted and may contains non valid URL characters
+
+ E.g., ``/my%2Fpath%7Cwith%21some%25strange%24characters``
+ """
+ return self._message.path
+
+ @reify
+ def query(self) -> "MultiDictProxy[str]":
+ """A multidict with all the variables in the query string."""
+ return self._rel_url.query
+
+ @reify
+ def query_string(self) -> str:
+ """The query string in the URL.
+
+ E.g., id=10
+ """
+ return self._rel_url.query_string
+
+ @reify
+ def headers(self) -> "CIMultiDictProxy[str]":
+ """A case-insensitive multidict proxy with all headers."""
+ return self._headers
+
+ @reify
+ def raw_headers(self) -> RawHeaders:
+ """A sequence of pairs for all headers."""
+ return self._message.raw_headers
+
+ @staticmethod
+ def _http_date(_date_str: Optional[str]) -> Optional[datetime.datetime]:
+ """Process a date string, return a datetime object"""
+ if _date_str is not None:
+ timetuple = parsedate(_date_str)
+ if timetuple is not None:
+ return datetime.datetime(*timetuple[:6], tzinfo=datetime.timezone.utc)
+ return None
+
+ @reify
+ def if_modified_since(self) -> Optional[datetime.datetime]:
+ """The value of If-Modified-Since HTTP header, or None.
+
+ This header is represented as a `datetime` object.
+ """
+ return self._http_date(self.headers.get(hdrs.IF_MODIFIED_SINCE))
+
+ @reify
+ def if_unmodified_since(self) -> Optional[datetime.datetime]:
+ """The value of If-Unmodified-Since HTTP header, or None.
+
+ This header is represented as a `datetime` object.
+ """
+ return self._http_date(self.headers.get(hdrs.IF_UNMODIFIED_SINCE))
+
+ @reify
+ def if_range(self) -> Optional[datetime.datetime]:
+ """The value of If-Range HTTP header, or None.
+
+ This header is represented as a `datetime` object.
+ """
+ return self._http_date(self.headers.get(hdrs.IF_RANGE))
+
+ @reify
+ def keep_alive(self) -> bool:
+ """Is keepalive enabled by client?"""
+ return not self._message.should_close
+
+ @reify
+ def cookies(self) -> Mapping[str, str]:
+ """Return request cookies.
+
+ A read-only dictionary-like object.
+ """
+ raw = self.headers.get(hdrs.COOKIE, "")
+ parsed = SimpleCookie(raw) # type: SimpleCookie[str]
+ return MappingProxyType({key: val.value for key, val in parsed.items()})
+
+ @reify
+ def http_range(self) -> slice:
+ """The content of Range HTTP header.
+
+ Return a slice instance.
+
+ """
+ rng = self._headers.get(hdrs.RANGE)
+ start, end = None, None
+ if rng is not None:
+ try:
+ pattern = r"^bytes=(\d*)-(\d*)$"
+ start, end = re.findall(pattern, rng)[0]
+ except IndexError: # pattern was not found in header
+ raise ValueError("range not in acceptable format")
+
+ end = int(end) if end else None
+ start = int(start) if start else None
+
+ if start is None and end is not None:
+ # end with no start is to return tail of content
+ start = -end
+ end = None
+
+ if start is not None and end is not None:
+ # end is inclusive in range header, exclusive for slice
+ end += 1
+
+ if start >= end:
+ raise ValueError("start cannot be after end")
+
+ if start is end is None: # No valid range supplied
+ raise ValueError("No start or end of range specified")
+
+ return slice(start, end, 1)
+
+ @reify
+ def content(self) -> StreamReader:
+ """Return raw payload stream."""
+ return self._payload
+
+ @property
+ def has_body(self) -> bool:
+ """Return True if request's HTTP BODY can be read, False otherwise."""
+ warnings.warn(
+ "Deprecated, use .can_read_body #2005", DeprecationWarning, stacklevel=2
+ )
+ return not self._payload.at_eof()
+
+ @property
+ def can_read_body(self) -> bool:
+ """Return True if request's HTTP BODY can be read, False otherwise."""
+ return not self._payload.at_eof()
+
+ @reify
+ def body_exists(self) -> bool:
+ """Return True if request has HTTP BODY, False otherwise."""
+ return type(self._payload) is not EmptyStreamReader
+
+ async def release(self) -> None:
+ """Release request.
+
+ Eat unread part of HTTP BODY if present.
+ """
+ while not self._payload.at_eof():
+ await self._payload.readany()
+
+ async def read(self) -> bytes:
+ """Read request body if present.
+
+ Returns bytes object with full request content.
+ """
+ if self._read_bytes is None:
+ body = bytearray()
+ while True:
+ chunk = await self._payload.readany()
+ body.extend(chunk)
+ if self._client_max_size:
+ body_size = len(body)
+ if body_size >= self._client_max_size:
+ raise HTTPRequestEntityTooLarge(
+ max_size=self._client_max_size, actual_size=body_size
+ )
+ if not chunk:
+ break
+ self._read_bytes = bytes(body)
+ return self._read_bytes
+
+ async def text(self) -> str:
+ """Return BODY as text using encoding from .charset."""
+ bytes_body = await self.read()
+ encoding = self.charset or "utf-8"
+ return bytes_body.decode(encoding)
+
+ async def json(self, *, loads: JSONDecoder = DEFAULT_JSON_DECODER) -> Any:
+ """Return BODY as JSON."""
+ body = await self.text()
+ return loads(body)
+
+ async def multipart(self) -> MultipartReader:
+ """Return async iterator to process BODY as multipart."""
+ return MultipartReader(self._headers, self._payload)
+
+ async def post(self) -> "MultiDictProxy[Union[str, bytes, FileField]]":
+ """Return POST parameters."""
+ if self._post is not None:
+ return self._post
+ if self._method not in self.POST_METHODS:
+ self._post = MultiDictProxy(MultiDict())
+ return self._post
+
+ content_type = self.content_type
+ if content_type not in (
+ "",
+ "application/x-www-form-urlencoded",
+ "multipart/form-data",
+ ):
+ self._post = MultiDictProxy(MultiDict())
+ return self._post
+
+ out = MultiDict() # type: MultiDict[Union[str, bytes, FileField]]
+
+ if content_type == "multipart/form-data":
+ multipart = await self.multipart()
+ max_size = self._client_max_size
+
+ field = await multipart.next()
+ while field is not None:
+ size = 0
+ field_ct = field.headers.get(hdrs.CONTENT_TYPE)
+
+ if isinstance(field, BodyPartReader):
+ assert field.name is not None
+
+ # Note that according to RFC 7578, the Content-Type header
+ # is optional, even for files, so we can't assume it's
+ # present.
+ # https://tools.ietf.org/html/rfc7578#section-4.4
+ if field.filename:
+ # store file in temp file
+ tmp = tempfile.TemporaryFile()
+ chunk = await field.read_chunk(size=2 ** 16)
+ while chunk:
+ chunk = field.decode(chunk)
+ tmp.write(chunk)
+ size += len(chunk)
+ if 0 < max_size < size:
+ raise HTTPRequestEntityTooLarge(
+ max_size=max_size, actual_size=size
+ )
+ chunk = await field.read_chunk(size=2 ** 16)
+ tmp.seek(0)
+
+ if field_ct is None:
+ field_ct = "application/octet-stream"
+
+ ff = FileField(
+ field.name,
+ field.filename,
+ cast(io.BufferedReader, tmp),
+ field_ct,
+ field.headers,
+ )
+ out.add(field.name, ff)
+ else:
+ # deal with ordinary data
+ value = await field.read(decode=True)
+ if field_ct is None or field_ct.startswith("text/"):
+ charset = field.get_charset(default="utf-8")
+ out.add(field.name, value.decode(charset))
+ else:
+ out.add(field.name, value)
+ size += len(value)
+ if 0 < max_size < size:
+ raise HTTPRequestEntityTooLarge(
+ max_size=max_size, actual_size=size
+ )
+ else:
+ raise ValueError(
+ "To decode nested multipart you need " "to use custom reader",
+ )
+
+ field = await multipart.next()
+ else:
+ data = await self.read()
+ if data:
+ charset = self.charset or "utf-8"
+ out.extend(
+ parse_qsl(
+ data.rstrip().decode(charset),
+ keep_blank_values=True,
+ encoding=charset,
+ )
+ )
+
+ self._post = MultiDictProxy(out)
+ return self._post
+
+ def get_extra_info(self, name: str, default: Any = None) -> Any:
+ """Extra info from protocol transport"""
+ protocol = self._protocol
+ if protocol is None:
+ return default
+
+ transport = protocol.transport
+ if transport is None:
+ return default
+
+ return transport.get_extra_info(name, default)
+
+ def __repr__(self) -> str:
+ ascii_encodable_path = self.path.encode("ascii", "backslashreplace").decode(
+ "ascii"
+ )
+ return "<{} {} {} >".format(
+ self.__class__.__name__, self._method, ascii_encodable_path
+ )
+
+ def __eq__(self, other: object) -> bool:
+ return id(self) == id(other)
+
+ def __bool__(self) -> bool:
+ return True
+
+ async def _prepare_hook(self, response: StreamResponse) -> None:
+ return
+
+ def _cancel(self, exc: BaseException) -> None:
+ self._payload.set_exception(exc)
+
+
+class Request(BaseRequest):
+
+ ATTRS = BaseRequest.ATTRS | frozenset(["_match_info"])
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ super().__init__(*args, **kwargs)
+
+ # matchdict, route_name, handler
+ # or information about traversal lookup
+
+ # initialized after route resolving
+ self._match_info = None # type: Optional[UrlMappingMatchInfo]
+
+ if DEBUG:
+
+ def __setattr__(self, name: str, val: Any) -> None:
+ if name not in self.ATTRS:
+ warnings.warn(
+ "Setting custom {}.{} attribute "
+ "is discouraged".format(self.__class__.__name__, name),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ super().__setattr__(name, val)
+
+ def clone(
+ self,
+ *,
+ method: str = sentinel,
+ rel_url: StrOrURL = sentinel,
+ headers: LooseHeaders = sentinel,
+ scheme: str = sentinel,
+ host: str = sentinel,
+ remote: str = sentinel,
+ ) -> "Request":
+ ret = super().clone(
+ method=method,
+ rel_url=rel_url,
+ headers=headers,
+ scheme=scheme,
+ host=host,
+ remote=remote,
+ )
+ new_ret = cast(Request, ret)
+ new_ret._match_info = self._match_info
+ return new_ret
+
+ @reify
+ def match_info(self) -> "UrlMappingMatchInfo":
+ """Result of route resolving."""
+ match_info = self._match_info
+ assert match_info is not None
+ return match_info
+
+ @property
+ def app(self) -> "Application":
+ """Application instance."""
+ match_info = self._match_info
+ assert match_info is not None
+ return match_info.current_app
+
+ @property
+ def config_dict(self) -> ChainMapProxy:
+ match_info = self._match_info
+ assert match_info is not None
+ lst = match_info.apps
+ app = self.app
+ idx = lst.index(app)
+ sublist = list(reversed(lst[: idx + 1]))
+ return ChainMapProxy(sublist)
+
+ async def _prepare_hook(self, response: StreamResponse) -> None:
+ match_info = self._match_info
+ if match_info is None:
+ return
+ for app in match_info._apps:
+ await app.on_response_prepare.send(self, response)
diff --git a/third_party/python/aiohttp/aiohttp/web_response.py b/third_party/python/aiohttp/aiohttp/web_response.py
new file mode 100644
index 0000000000..f34b00e2d9
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/web_response.py
@@ -0,0 +1,781 @@
+import asyncio
+import collections.abc
+import datetime
+import enum
+import json
+import math
+import time
+import warnings
+import zlib
+from concurrent.futures import Executor
+from email.utils import parsedate
+from http.cookies import Morsel, SimpleCookie
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Dict,
+ Iterator,
+ Mapping,
+ MutableMapping,
+ Optional,
+ Tuple,
+ Union,
+ cast,
+)
+
+from multidict import CIMultiDict, istr
+
+from . import hdrs, payload
+from .abc import AbstractStreamWriter
+from .helpers import PY_38, HeadersMixin, rfc822_formatted_time, sentinel
+from .http import RESPONSES, SERVER_SOFTWARE, HttpVersion10, HttpVersion11
+from .payload import Payload
+from .typedefs import JSONEncoder, LooseHeaders
+
+__all__ = ("ContentCoding", "StreamResponse", "Response", "json_response")
+
+
+if TYPE_CHECKING: # pragma: no cover
+ from .web_request import BaseRequest
+
+ BaseClass = MutableMapping[str, Any]
+else:
+ BaseClass = collections.abc.MutableMapping
+
+
+if not PY_38:
+ # allow samesite to be used in python < 3.8
+ # already permitted in python 3.8, see https://bugs.python.org/issue29613
+ Morsel._reserved["samesite"] = "SameSite" # type: ignore
+
+
+class ContentCoding(enum.Enum):
+ # The content codings that we have support for.
+ #
+ # Additional registered codings are listed at:
+ # https://www.iana.org/assignments/http-parameters/http-parameters.xhtml#content-coding
+ deflate = "deflate"
+ gzip = "gzip"
+ identity = "identity"
+
+
+############################################################
+# HTTP Response classes
+############################################################
+
+
+class StreamResponse(BaseClass, HeadersMixin):
+
+ _length_check = True
+
+ def __init__(
+ self,
+ *,
+ status: int = 200,
+ reason: Optional[str] = None,
+ headers: Optional[LooseHeaders] = None,
+ ) -> None:
+ self._body = None
+ self._keep_alive = None # type: Optional[bool]
+ self._chunked = False
+ self._compression = False
+ self._compression_force = None # type: Optional[ContentCoding]
+ self._cookies = SimpleCookie() # type: SimpleCookie[str]
+
+ self._req = None # type: Optional[BaseRequest]
+ self._payload_writer = None # type: Optional[AbstractStreamWriter]
+ self._eof_sent = False
+ self._body_length = 0
+ self._state = {} # type: Dict[str, Any]
+
+ if headers is not None:
+ self._headers = CIMultiDict(headers) # type: CIMultiDict[str]
+ else:
+ self._headers = CIMultiDict()
+
+ self.set_status(status, reason)
+
+ @property
+ def prepared(self) -> bool:
+ return self._payload_writer is not None
+
+ @property
+ def task(self) -> "asyncio.Task[None]":
+ return getattr(self._req, "task", None)
+
+ @property
+ def status(self) -> int:
+ return self._status
+
+ @property
+ def chunked(self) -> bool:
+ return self._chunked
+
+ @property
+ def compression(self) -> bool:
+ return self._compression
+
+ @property
+ def reason(self) -> str:
+ return self._reason
+
+ def set_status(
+ self,
+ status: int,
+ reason: Optional[str] = None,
+ _RESPONSES: Mapping[int, Tuple[str, str]] = RESPONSES,
+ ) -> None:
+ assert not self.prepared, (
+ "Cannot change the response status code after " "the headers have been sent"
+ )
+ self._status = int(status)
+ if reason is None:
+ try:
+ reason = _RESPONSES[self._status][0]
+ except Exception:
+ reason = ""
+ self._reason = reason
+
+ @property
+ def keep_alive(self) -> Optional[bool]:
+ return self._keep_alive
+
+ def force_close(self) -> None:
+ self._keep_alive = False
+
+ @property
+ def body_length(self) -> int:
+ return self._body_length
+
+ @property
+ def output_length(self) -> int:
+ warnings.warn("output_length is deprecated", DeprecationWarning)
+ assert self._payload_writer
+ return self._payload_writer.buffer_size
+
+ def enable_chunked_encoding(self, chunk_size: Optional[int] = None) -> None:
+ """Enables automatic chunked transfer encoding."""
+ self._chunked = True
+
+ if hdrs.CONTENT_LENGTH in self._headers:
+ raise RuntimeError(
+ "You can't enable chunked encoding when " "a content length is set"
+ )
+ if chunk_size is not None:
+ warnings.warn("Chunk size is deprecated #1615", DeprecationWarning)
+
+ def enable_compression(
+ self, force: Optional[Union[bool, ContentCoding]] = None
+ ) -> None:
+ """Enables response compression encoding."""
+ # Backwards compatibility for when force was a bool <0.17.
+ if type(force) == bool:
+ force = ContentCoding.deflate if force else ContentCoding.identity
+ warnings.warn(
+ "Using boolean for force is deprecated #3318", DeprecationWarning
+ )
+ elif force is not None:
+ assert isinstance(force, ContentCoding), (
+ "force should one of " "None, bool or " "ContentEncoding"
+ )
+
+ self._compression = True
+ self._compression_force = force
+
+ @property
+ def headers(self) -> "CIMultiDict[str]":
+ return self._headers
+
+ @property
+ def cookies(self) -> "SimpleCookie[str]":
+ return self._cookies
+
+ def set_cookie(
+ self,
+ name: str,
+ value: str,
+ *,
+ expires: Optional[str] = None,
+ domain: Optional[str] = None,
+ max_age: Optional[Union[int, str]] = None,
+ path: str = "/",
+ secure: Optional[bool] = None,
+ httponly: Optional[bool] = None,
+ version: Optional[str] = None,
+ samesite: Optional[str] = None,
+ ) -> None:
+ """Set or update response cookie.
+
+ Sets new cookie or updates existent with new value.
+ Also updates only those params which are not None.
+ """
+
+ old = self._cookies.get(name)
+ if old is not None and old.coded_value == "":
+ # deleted cookie
+ self._cookies.pop(name, None)
+
+ self._cookies[name] = value
+ c = self._cookies[name]
+
+ if expires is not None:
+ c["expires"] = expires
+ elif c.get("expires") == "Thu, 01 Jan 1970 00:00:00 GMT":
+ del c["expires"]
+
+ if domain is not None:
+ c["domain"] = domain
+
+ if max_age is not None:
+ c["max-age"] = str(max_age)
+ elif "max-age" in c:
+ del c["max-age"]
+
+ c["path"] = path
+
+ if secure is not None:
+ c["secure"] = secure
+ if httponly is not None:
+ c["httponly"] = httponly
+ if version is not None:
+ c["version"] = version
+ if samesite is not None:
+ c["samesite"] = samesite
+
+ def del_cookie(
+ self, name: str, *, domain: Optional[str] = None, path: str = "/"
+ ) -> None:
+ """Delete cookie.
+
+ Creates new empty expired cookie.
+ """
+ # TODO: do we need domain/path here?
+ self._cookies.pop(name, None)
+ self.set_cookie(
+ name,
+ "",
+ max_age=0,
+ expires="Thu, 01 Jan 1970 00:00:00 GMT",
+ domain=domain,
+ path=path,
+ )
+
+ @property
+ def content_length(self) -> Optional[int]:
+ # Just a placeholder for adding setter
+ return super().content_length
+
+ @content_length.setter
+ def content_length(self, value: Optional[int]) -> None:
+ if value is not None:
+ value = int(value)
+ if self._chunked:
+ raise RuntimeError(
+ "You can't set content length when " "chunked encoding is enable"
+ )
+ self._headers[hdrs.CONTENT_LENGTH] = str(value)
+ else:
+ self._headers.pop(hdrs.CONTENT_LENGTH, None)
+
+ @property
+ def content_type(self) -> str:
+ # Just a placeholder for adding setter
+ return super().content_type
+
+ @content_type.setter
+ def content_type(self, value: str) -> None:
+ self.content_type # read header values if needed
+ self._content_type = str(value)
+ self._generate_content_type_header()
+
+ @property
+ def charset(self) -> Optional[str]:
+ # Just a placeholder for adding setter
+ return super().charset
+
+ @charset.setter
+ def charset(self, value: Optional[str]) -> None:
+ ctype = self.content_type # read header values if needed
+ if ctype == "application/octet-stream":
+ raise RuntimeError(
+ "Setting charset for application/octet-stream "
+ "doesn't make sense, setup content_type first"
+ )
+ assert self._content_dict is not None
+ if value is None:
+ self._content_dict.pop("charset", None)
+ else:
+ self._content_dict["charset"] = str(value).lower()
+ self._generate_content_type_header()
+
+ @property
+ def last_modified(self) -> Optional[datetime.datetime]:
+ """The value of Last-Modified HTTP header, or None.
+
+ This header is represented as a `datetime` object.
+ """
+ httpdate = self._headers.get(hdrs.LAST_MODIFIED)
+ if httpdate is not None:
+ timetuple = parsedate(httpdate)
+ if timetuple is not None:
+ return datetime.datetime(*timetuple[:6], tzinfo=datetime.timezone.utc)
+ return None
+
+ @last_modified.setter
+ def last_modified(
+ self, value: Optional[Union[int, float, datetime.datetime, str]]
+ ) -> None:
+ if value is None:
+ self._headers.pop(hdrs.LAST_MODIFIED, None)
+ elif isinstance(value, (int, float)):
+ self._headers[hdrs.LAST_MODIFIED] = time.strftime(
+ "%a, %d %b %Y %H:%M:%S GMT", time.gmtime(math.ceil(value))
+ )
+ elif isinstance(value, datetime.datetime):
+ self._headers[hdrs.LAST_MODIFIED] = time.strftime(
+ "%a, %d %b %Y %H:%M:%S GMT", value.utctimetuple()
+ )
+ elif isinstance(value, str):
+ self._headers[hdrs.LAST_MODIFIED] = value
+
+ def _generate_content_type_header(
+ self, CONTENT_TYPE: istr = hdrs.CONTENT_TYPE
+ ) -> None:
+ assert self._content_dict is not None
+ assert self._content_type is not None
+ params = "; ".join(f"{k}={v}" for k, v in self._content_dict.items())
+ if params:
+ ctype = self._content_type + "; " + params
+ else:
+ ctype = self._content_type
+ self._headers[CONTENT_TYPE] = ctype
+
+ async def _do_start_compression(self, coding: ContentCoding) -> None:
+ if coding != ContentCoding.identity:
+ assert self._payload_writer is not None
+ self._headers[hdrs.CONTENT_ENCODING] = coding.value
+ self._payload_writer.enable_compression(coding.value)
+ # Compressed payload may have different content length,
+ # remove the header
+ self._headers.popall(hdrs.CONTENT_LENGTH, None)
+
+ async def _start_compression(self, request: "BaseRequest") -> None:
+ if self._compression_force:
+ await self._do_start_compression(self._compression_force)
+ else:
+ accept_encoding = request.headers.get(hdrs.ACCEPT_ENCODING, "").lower()
+ for coding in ContentCoding:
+ if coding.value in accept_encoding:
+ await self._do_start_compression(coding)
+ return
+
+ async def prepare(self, request: "BaseRequest") -> Optional[AbstractStreamWriter]:
+ if self._eof_sent:
+ return None
+ if self._payload_writer is not None:
+ return self._payload_writer
+
+ return await self._start(request)
+
+ async def _start(self, request: "BaseRequest") -> AbstractStreamWriter:
+ self._req = request
+ writer = self._payload_writer = request._payload_writer
+
+ await self._prepare_headers()
+ await request._prepare_hook(self)
+ await self._write_headers()
+
+ return writer
+
+ async def _prepare_headers(self) -> None:
+ request = self._req
+ assert request is not None
+ writer = self._payload_writer
+ assert writer is not None
+ keep_alive = self._keep_alive
+ if keep_alive is None:
+ keep_alive = request.keep_alive
+ self._keep_alive = keep_alive
+
+ version = request.version
+
+ headers = self._headers
+ for cookie in self._cookies.values():
+ value = cookie.output(header="")[1:]
+ headers.add(hdrs.SET_COOKIE, value)
+
+ if self._compression:
+ await self._start_compression(request)
+
+ if self._chunked:
+ if version != HttpVersion11:
+ raise RuntimeError(
+ "Using chunked encoding is forbidden "
+ "for HTTP/{0.major}.{0.minor}".format(request.version)
+ )
+ writer.enable_chunking()
+ headers[hdrs.TRANSFER_ENCODING] = "chunked"
+ if hdrs.CONTENT_LENGTH in headers:
+ del headers[hdrs.CONTENT_LENGTH]
+ elif self._length_check:
+ writer.length = self.content_length
+ if writer.length is None:
+ if version >= HttpVersion11:
+ writer.enable_chunking()
+ headers[hdrs.TRANSFER_ENCODING] = "chunked"
+ if hdrs.CONTENT_LENGTH in headers:
+ del headers[hdrs.CONTENT_LENGTH]
+ else:
+ keep_alive = False
+ # HTTP 1.1: https://tools.ietf.org/html/rfc7230#section-3.3.2
+ # HTTP 1.0: https://tools.ietf.org/html/rfc1945#section-10.4
+ elif version >= HttpVersion11 and self.status in (100, 101, 102, 103, 204):
+ del headers[hdrs.CONTENT_LENGTH]
+
+ headers.setdefault(hdrs.CONTENT_TYPE, "application/octet-stream")
+ headers.setdefault(hdrs.DATE, rfc822_formatted_time())
+ headers.setdefault(hdrs.SERVER, SERVER_SOFTWARE)
+
+ # connection header
+ if hdrs.CONNECTION not in headers:
+ if keep_alive:
+ if version == HttpVersion10:
+ headers[hdrs.CONNECTION] = "keep-alive"
+ else:
+ if version == HttpVersion11:
+ headers[hdrs.CONNECTION] = "close"
+
+ async def _write_headers(self) -> None:
+ request = self._req
+ assert request is not None
+ writer = self._payload_writer
+ assert writer is not None
+ # status line
+ version = request.version
+ status_line = "HTTP/{}.{} {} {}".format(
+ version[0], version[1], self._status, self._reason
+ )
+ await writer.write_headers(status_line, self._headers)
+
+ async def write(self, data: bytes) -> None:
+ assert isinstance(
+ data, (bytes, bytearray, memoryview)
+ ), "data argument must be byte-ish (%r)" % type(data)
+
+ if self._eof_sent:
+ raise RuntimeError("Cannot call write() after write_eof()")
+ if self._payload_writer is None:
+ raise RuntimeError("Cannot call write() before prepare()")
+
+ await self._payload_writer.write(data)
+
+ async def drain(self) -> None:
+ assert not self._eof_sent, "EOF has already been sent"
+ assert self._payload_writer is not None, "Response has not been started"
+ warnings.warn(
+ "drain method is deprecated, use await resp.write()",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ await self._payload_writer.drain()
+
+ async def write_eof(self, data: bytes = b"") -> None:
+ assert isinstance(
+ data, (bytes, bytearray, memoryview)
+ ), "data argument must be byte-ish (%r)" % type(data)
+
+ if self._eof_sent:
+ return
+
+ assert self._payload_writer is not None, "Response has not been started"
+
+ await self._payload_writer.write_eof(data)
+ self._eof_sent = True
+ self._req = None
+ self._body_length = self._payload_writer.output_size
+ self._payload_writer = None
+
+ def __repr__(self) -> str:
+ if self._eof_sent:
+ info = "eof"
+ elif self.prepared:
+ assert self._req is not None
+ info = f"{self._req.method} {self._req.path} "
+ else:
+ info = "not prepared"
+ return f"<{self.__class__.__name__} {self.reason} {info}>"
+
+ def __getitem__(self, key: str) -> Any:
+ return self._state[key]
+
+ def __setitem__(self, key: str, value: Any) -> None:
+ self._state[key] = value
+
+ def __delitem__(self, key: str) -> None:
+ del self._state[key]
+
+ def __len__(self) -> int:
+ return len(self._state)
+
+ def __iter__(self) -> Iterator[str]:
+ return iter(self._state)
+
+ def __hash__(self) -> int:
+ return hash(id(self))
+
+ def __eq__(self, other: object) -> bool:
+ return self is other
+
+
+class Response(StreamResponse):
+ def __init__(
+ self,
+ *,
+ body: Any = None,
+ status: int = 200,
+ reason: Optional[str] = None,
+ text: Optional[str] = None,
+ headers: Optional[LooseHeaders] = None,
+ content_type: Optional[str] = None,
+ charset: Optional[str] = None,
+ zlib_executor_size: Optional[int] = None,
+ zlib_executor: Optional[Executor] = None,
+ ) -> None:
+ if body is not None and text is not None:
+ raise ValueError("body and text are not allowed together")
+
+ if headers is None:
+ real_headers = CIMultiDict() # type: CIMultiDict[str]
+ elif not isinstance(headers, CIMultiDict):
+ real_headers = CIMultiDict(headers)
+ else:
+ real_headers = headers # = cast('CIMultiDict[str]', headers)
+
+ if content_type is not None and "charset" in content_type:
+ raise ValueError("charset must not be in content_type " "argument")
+
+ if text is not None:
+ if hdrs.CONTENT_TYPE in real_headers:
+ if content_type or charset:
+ raise ValueError(
+ "passing both Content-Type header and "
+ "content_type or charset params "
+ "is forbidden"
+ )
+ else:
+ # fast path for filling headers
+ if not isinstance(text, str):
+ raise TypeError("text argument must be str (%r)" % type(text))
+ if content_type is None:
+ content_type = "text/plain"
+ if charset is None:
+ charset = "utf-8"
+ real_headers[hdrs.CONTENT_TYPE] = content_type + "; charset=" + charset
+ body = text.encode(charset)
+ text = None
+ else:
+ if hdrs.CONTENT_TYPE in real_headers:
+ if content_type is not None or charset is not None:
+ raise ValueError(
+ "passing both Content-Type header and "
+ "content_type or charset params "
+ "is forbidden"
+ )
+ else:
+ if content_type is not None:
+ if charset is not None:
+ content_type += "; charset=" + charset
+ real_headers[hdrs.CONTENT_TYPE] = content_type
+
+ super().__init__(status=status, reason=reason, headers=real_headers)
+
+ if text is not None:
+ self.text = text
+ else:
+ self.body = body
+
+ self._compressed_body = None # type: Optional[bytes]
+ self._zlib_executor_size = zlib_executor_size
+ self._zlib_executor = zlib_executor
+
+ @property
+ def body(self) -> Optional[Union[bytes, Payload]]:
+ return self._body
+
+ @body.setter
+ def body(
+ self,
+ body: bytes,
+ CONTENT_TYPE: istr = hdrs.CONTENT_TYPE,
+ CONTENT_LENGTH: istr = hdrs.CONTENT_LENGTH,
+ ) -> None:
+ if body is None:
+ self._body = None # type: Optional[bytes]
+ self._body_payload = False # type: bool
+ elif isinstance(body, (bytes, bytearray)):
+ self._body = body
+ self._body_payload = False
+ else:
+ try:
+ self._body = body = payload.PAYLOAD_REGISTRY.get(body)
+ except payload.LookupError:
+ raise ValueError("Unsupported body type %r" % type(body))
+
+ self._body_payload = True
+
+ headers = self._headers
+
+ # set content-length header if needed
+ if not self._chunked and CONTENT_LENGTH not in headers:
+ size = body.size
+ if size is not None:
+ headers[CONTENT_LENGTH] = str(size)
+
+ # set content-type
+ if CONTENT_TYPE not in headers:
+ headers[CONTENT_TYPE] = body.content_type
+
+ # copy payload headers
+ if body.headers:
+ for (key, value) in body.headers.items():
+ if key not in headers:
+ headers[key] = value
+
+ self._compressed_body = None
+
+ @property
+ def text(self) -> Optional[str]:
+ if self._body is None:
+ return None
+ return self._body.decode(self.charset or "utf-8")
+
+ @text.setter
+ def text(self, text: str) -> None:
+ assert text is None or isinstance(
+ text, str
+ ), "text argument must be str (%r)" % type(text)
+
+ if self.content_type == "application/octet-stream":
+ self.content_type = "text/plain"
+ if self.charset is None:
+ self.charset = "utf-8"
+
+ self._body = text.encode(self.charset)
+ self._body_payload = False
+ self._compressed_body = None
+
+ @property
+ def content_length(self) -> Optional[int]:
+ if self._chunked:
+ return None
+
+ if hdrs.CONTENT_LENGTH in self._headers:
+ return super().content_length
+
+ if self._compressed_body is not None:
+ # Return length of the compressed body
+ return len(self._compressed_body)
+ elif self._body_payload:
+ # A payload without content length, or a compressed payload
+ return None
+ elif self._body is not None:
+ return len(self._body)
+ else:
+ return 0
+
+ @content_length.setter
+ def content_length(self, value: Optional[int]) -> None:
+ raise RuntimeError("Content length is set automatically")
+
+ async def write_eof(self, data: bytes = b"") -> None:
+ if self._eof_sent:
+ return
+ if self._compressed_body is None:
+ body = self._body # type: Optional[Union[bytes, Payload]]
+ else:
+ body = self._compressed_body
+ assert not data, f"data arg is not supported, got {data!r}"
+ assert self._req is not None
+ assert self._payload_writer is not None
+ if body is not None:
+ if self._req._method == hdrs.METH_HEAD or self._status in [204, 304]:
+ await super().write_eof()
+ elif self._body_payload:
+ payload = cast(Payload, body)
+ await payload.write(self._payload_writer)
+ await super().write_eof()
+ else:
+ await super().write_eof(cast(bytes, body))
+ else:
+ await super().write_eof()
+
+ async def _start(self, request: "BaseRequest") -> AbstractStreamWriter:
+ if not self._chunked and hdrs.CONTENT_LENGTH not in self._headers:
+ if not self._body_payload:
+ if self._body is not None:
+ self._headers[hdrs.CONTENT_LENGTH] = str(len(self._body))
+ else:
+ self._headers[hdrs.CONTENT_LENGTH] = "0"
+
+ return await super()._start(request)
+
+ def _compress_body(self, zlib_mode: int) -> None:
+ assert zlib_mode > 0
+ compressobj = zlib.compressobj(wbits=zlib_mode)
+ body_in = self._body
+ assert body_in is not None
+ self._compressed_body = compressobj.compress(body_in) + compressobj.flush()
+
+ async def _do_start_compression(self, coding: ContentCoding) -> None:
+ if self._body_payload or self._chunked:
+ return await super()._do_start_compression(coding)
+
+ if coding != ContentCoding.identity:
+ # Instead of using _payload_writer.enable_compression,
+ # compress the whole body
+ zlib_mode = (
+ 16 + zlib.MAX_WBITS if coding == ContentCoding.gzip else zlib.MAX_WBITS
+ )
+ body_in = self._body
+ assert body_in is not None
+ if (
+ self._zlib_executor_size is not None
+ and len(body_in) > self._zlib_executor_size
+ ):
+ await asyncio.get_event_loop().run_in_executor(
+ self._zlib_executor, self._compress_body, zlib_mode
+ )
+ else:
+ self._compress_body(zlib_mode)
+
+ body_out = self._compressed_body
+ assert body_out is not None
+
+ self._headers[hdrs.CONTENT_ENCODING] = coding.value
+ self._headers[hdrs.CONTENT_LENGTH] = str(len(body_out))
+
+
+def json_response(
+ data: Any = sentinel,
+ *,
+ text: Optional[str] = None,
+ body: Optional[bytes] = None,
+ status: int = 200,
+ reason: Optional[str] = None,
+ headers: Optional[LooseHeaders] = None,
+ content_type: str = "application/json",
+ dumps: JSONEncoder = json.dumps,
+) -> Response:
+ if data is not sentinel:
+ if text or body:
+ raise ValueError("only one of data, text, or body should be specified")
+ else:
+ text = dumps(data)
+ return Response(
+ text=text,
+ body=body,
+ status=status,
+ reason=reason,
+ headers=headers,
+ content_type=content_type,
+ )
diff --git a/third_party/python/aiohttp/aiohttp/web_routedef.py b/third_party/python/aiohttp/aiohttp/web_routedef.py
new file mode 100644
index 0000000000..188525103d
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/web_routedef.py
@@ -0,0 +1,215 @@
+import abc
+import os # noqa
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Awaitable,
+ Callable,
+ Dict,
+ Iterator,
+ List,
+ Optional,
+ Sequence,
+ Type,
+ Union,
+ overload,
+)
+
+import attr
+
+from . import hdrs
+from .abc import AbstractView
+from .typedefs import PathLike
+
+if TYPE_CHECKING: # pragma: no cover
+ from .web_request import Request
+ from .web_response import StreamResponse
+ from .web_urldispatcher import AbstractRoute, UrlDispatcher
+else:
+ Request = StreamResponse = UrlDispatcher = AbstractRoute = None
+
+
+__all__ = (
+ "AbstractRouteDef",
+ "RouteDef",
+ "StaticDef",
+ "RouteTableDef",
+ "head",
+ "options",
+ "get",
+ "post",
+ "patch",
+ "put",
+ "delete",
+ "route",
+ "view",
+ "static",
+)
+
+
+class AbstractRouteDef(abc.ABC):
+ @abc.abstractmethod
+ def register(self, router: UrlDispatcher) -> List[AbstractRoute]:
+ pass # pragma: no cover
+
+
+_SimpleHandler = Callable[[Request], Awaitable[StreamResponse]]
+_HandlerType = Union[Type[AbstractView], _SimpleHandler]
+
+
+@attr.s(auto_attribs=True, frozen=True, repr=False, slots=True)
+class RouteDef(AbstractRouteDef):
+ method: str
+ path: str
+ handler: _HandlerType
+ kwargs: Dict[str, Any]
+
+ def __repr__(self) -> str:
+ info = []
+ for name, value in sorted(self.kwargs.items()):
+ info.append(f", {name}={value!r}")
+ return "<RouteDef {method} {path} -> {handler.__name__!r}" "{info}>".format(
+ method=self.method, path=self.path, handler=self.handler, info="".join(info)
+ )
+
+ def register(self, router: UrlDispatcher) -> List[AbstractRoute]:
+ if self.method in hdrs.METH_ALL:
+ reg = getattr(router, "add_" + self.method.lower())
+ return [reg(self.path, self.handler, **self.kwargs)]
+ else:
+ return [
+ router.add_route(self.method, self.path, self.handler, **self.kwargs)
+ ]
+
+
+@attr.s(auto_attribs=True, frozen=True, repr=False, slots=True)
+class StaticDef(AbstractRouteDef):
+ prefix: str
+ path: PathLike
+ kwargs: Dict[str, Any]
+
+ def __repr__(self) -> str:
+ info = []
+ for name, value in sorted(self.kwargs.items()):
+ info.append(f", {name}={value!r}")
+ return "<StaticDef {prefix} -> {path}" "{info}>".format(
+ prefix=self.prefix, path=self.path, info="".join(info)
+ )
+
+ def register(self, router: UrlDispatcher) -> List[AbstractRoute]:
+ resource = router.add_static(self.prefix, self.path, **self.kwargs)
+ routes = resource.get_info().get("routes", {})
+ return list(routes.values())
+
+
+def route(method: str, path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
+ return RouteDef(method, path, handler, kwargs)
+
+
+def head(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
+ return route(hdrs.METH_HEAD, path, handler, **kwargs)
+
+
+def options(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
+ return route(hdrs.METH_OPTIONS, path, handler, **kwargs)
+
+
+def get(
+ path: str,
+ handler: _HandlerType,
+ *,
+ name: Optional[str] = None,
+ allow_head: bool = True,
+ **kwargs: Any,
+) -> RouteDef:
+ return route(
+ hdrs.METH_GET, path, handler, name=name, allow_head=allow_head, **kwargs
+ )
+
+
+def post(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
+ return route(hdrs.METH_POST, path, handler, **kwargs)
+
+
+def put(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
+ return route(hdrs.METH_PUT, path, handler, **kwargs)
+
+
+def patch(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
+ return route(hdrs.METH_PATCH, path, handler, **kwargs)
+
+
+def delete(path: str, handler: _HandlerType, **kwargs: Any) -> RouteDef:
+ return route(hdrs.METH_DELETE, path, handler, **kwargs)
+
+
+def view(path: str, handler: Type[AbstractView], **kwargs: Any) -> RouteDef:
+ return route(hdrs.METH_ANY, path, handler, **kwargs)
+
+
+def static(prefix: str, path: PathLike, **kwargs: Any) -> StaticDef:
+ return StaticDef(prefix, path, kwargs)
+
+
+_Deco = Callable[[_HandlerType], _HandlerType]
+
+
+class RouteTableDef(Sequence[AbstractRouteDef]):
+ """Route definition table"""
+
+ def __init__(self) -> None:
+ self._items = [] # type: List[AbstractRouteDef]
+
+ def __repr__(self) -> str:
+ return "<RouteTableDef count={}>".format(len(self._items))
+
+ @overload
+ def __getitem__(self, index: int) -> AbstractRouteDef:
+ ...
+
+ @overload
+ def __getitem__(self, index: slice) -> List[AbstractRouteDef]:
+ ...
+
+ def __getitem__(self, index): # type: ignore
+ return self._items[index]
+
+ def __iter__(self) -> Iterator[AbstractRouteDef]:
+ return iter(self._items)
+
+ def __len__(self) -> int:
+ return len(self._items)
+
+ def __contains__(self, item: object) -> bool:
+ return item in self._items
+
+ def route(self, method: str, path: str, **kwargs: Any) -> _Deco:
+ def inner(handler: _HandlerType) -> _HandlerType:
+ self._items.append(RouteDef(method, path, handler, kwargs))
+ return handler
+
+ return inner
+
+ def head(self, path: str, **kwargs: Any) -> _Deco:
+ return self.route(hdrs.METH_HEAD, path, **kwargs)
+
+ def get(self, path: str, **kwargs: Any) -> _Deco:
+ return self.route(hdrs.METH_GET, path, **kwargs)
+
+ def post(self, path: str, **kwargs: Any) -> _Deco:
+ return self.route(hdrs.METH_POST, path, **kwargs)
+
+ def put(self, path: str, **kwargs: Any) -> _Deco:
+ return self.route(hdrs.METH_PUT, path, **kwargs)
+
+ def patch(self, path: str, **kwargs: Any) -> _Deco:
+ return self.route(hdrs.METH_PATCH, path, **kwargs)
+
+ def delete(self, path: str, **kwargs: Any) -> _Deco:
+ return self.route(hdrs.METH_DELETE, path, **kwargs)
+
+ def view(self, path: str, **kwargs: Any) -> _Deco:
+ return self.route(hdrs.METH_ANY, path, **kwargs)
+
+ def static(self, prefix: str, path: PathLike, **kwargs: Any) -> None:
+ self._items.append(StaticDef(prefix, path, kwargs))
diff --git a/third_party/python/aiohttp/aiohttp/web_runner.py b/third_party/python/aiohttp/aiohttp/web_runner.py
new file mode 100644
index 0000000000..25ac28a7a8
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/web_runner.py
@@ -0,0 +1,381 @@
+import asyncio
+import signal
+import socket
+from abc import ABC, abstractmethod
+from typing import Any, List, Optional, Set
+
+from yarl import URL
+
+from .web_app import Application
+from .web_server import Server
+
+try:
+ from ssl import SSLContext
+except ImportError:
+ SSLContext = object # type: ignore
+
+
+__all__ = (
+ "BaseSite",
+ "TCPSite",
+ "UnixSite",
+ "NamedPipeSite",
+ "SockSite",
+ "BaseRunner",
+ "AppRunner",
+ "ServerRunner",
+ "GracefulExit",
+)
+
+
+class GracefulExit(SystemExit):
+ code = 1
+
+
+def _raise_graceful_exit() -> None:
+ raise GracefulExit()
+
+
+class BaseSite(ABC):
+ __slots__ = ("_runner", "_shutdown_timeout", "_ssl_context", "_backlog", "_server")
+
+ def __init__(
+ self,
+ runner: "BaseRunner",
+ *,
+ shutdown_timeout: float = 60.0,
+ ssl_context: Optional[SSLContext] = None,
+ backlog: int = 128,
+ ) -> None:
+ if runner.server is None:
+ raise RuntimeError("Call runner.setup() before making a site")
+ self._runner = runner
+ self._shutdown_timeout = shutdown_timeout
+ self._ssl_context = ssl_context
+ self._backlog = backlog
+ self._server = None # type: Optional[asyncio.AbstractServer]
+
+ @property
+ @abstractmethod
+ def name(self) -> str:
+ pass # pragma: no cover
+
+ @abstractmethod
+ async def start(self) -> None:
+ self._runner._reg_site(self)
+
+ async def stop(self) -> None:
+ self._runner._check_site(self)
+ if self._server is None:
+ self._runner._unreg_site(self)
+ return # not started yet
+ self._server.close()
+ # named pipes do not have wait_closed property
+ if hasattr(self._server, "wait_closed"):
+ await self._server.wait_closed()
+ await self._runner.shutdown()
+ assert self._runner.server
+ await self._runner.server.shutdown(self._shutdown_timeout)
+ self._runner._unreg_site(self)
+
+
+class TCPSite(BaseSite):
+ __slots__ = ("_host", "_port", "_reuse_address", "_reuse_port")
+
+ def __init__(
+ self,
+ runner: "BaseRunner",
+ host: Optional[str] = None,
+ port: Optional[int] = None,
+ *,
+ shutdown_timeout: float = 60.0,
+ ssl_context: Optional[SSLContext] = None,
+ backlog: int = 128,
+ reuse_address: Optional[bool] = None,
+ reuse_port: Optional[bool] = None,
+ ) -> None:
+ super().__init__(
+ runner,
+ shutdown_timeout=shutdown_timeout,
+ ssl_context=ssl_context,
+ backlog=backlog,
+ )
+ self._host = host
+ if port is None:
+ port = 8443 if self._ssl_context else 8080
+ self._port = port
+ self._reuse_address = reuse_address
+ self._reuse_port = reuse_port
+
+ @property
+ def name(self) -> str:
+ scheme = "https" if self._ssl_context else "http"
+ host = "0.0.0.0" if self._host is None else self._host
+ return str(URL.build(scheme=scheme, host=host, port=self._port))
+
+ async def start(self) -> None:
+ await super().start()
+ loop = asyncio.get_event_loop()
+ server = self._runner.server
+ assert server is not None
+ self._server = await loop.create_server(
+ server,
+ self._host,
+ self._port,
+ ssl=self._ssl_context,
+ backlog=self._backlog,
+ reuse_address=self._reuse_address,
+ reuse_port=self._reuse_port,
+ )
+
+
+class UnixSite(BaseSite):
+ __slots__ = ("_path",)
+
+ def __init__(
+ self,
+ runner: "BaseRunner",
+ path: str,
+ *,
+ shutdown_timeout: float = 60.0,
+ ssl_context: Optional[SSLContext] = None,
+ backlog: int = 128,
+ ) -> None:
+ super().__init__(
+ runner,
+ shutdown_timeout=shutdown_timeout,
+ ssl_context=ssl_context,
+ backlog=backlog,
+ )
+ self._path = path
+
+ @property
+ def name(self) -> str:
+ scheme = "https" if self._ssl_context else "http"
+ return f"{scheme}://unix:{self._path}:"
+
+ async def start(self) -> None:
+ await super().start()
+ loop = asyncio.get_event_loop()
+ server = self._runner.server
+ assert server is not None
+ self._server = await loop.create_unix_server(
+ server, self._path, ssl=self._ssl_context, backlog=self._backlog
+ )
+
+
+class NamedPipeSite(BaseSite):
+ __slots__ = ("_path",)
+
+ def __init__(
+ self, runner: "BaseRunner", path: str, *, shutdown_timeout: float = 60.0
+ ) -> None:
+ loop = asyncio.get_event_loop()
+ if not isinstance(loop, asyncio.ProactorEventLoop): # type: ignore
+ raise RuntimeError(
+ "Named Pipes only available in proactor" "loop under windows"
+ )
+ super().__init__(runner, shutdown_timeout=shutdown_timeout)
+ self._path = path
+
+ @property
+ def name(self) -> str:
+ return self._path
+
+ async def start(self) -> None:
+ await super().start()
+ loop = asyncio.get_event_loop()
+ server = self._runner.server
+ assert server is not None
+ _server = await loop.start_serving_pipe(server, self._path) # type: ignore
+ self._server = _server[0]
+
+
+class SockSite(BaseSite):
+ __slots__ = ("_sock", "_name")
+
+ def __init__(
+ self,
+ runner: "BaseRunner",
+ sock: socket.socket,
+ *,
+ shutdown_timeout: float = 60.0,
+ ssl_context: Optional[SSLContext] = None,
+ backlog: int = 128,
+ ) -> None:
+ super().__init__(
+ runner,
+ shutdown_timeout=shutdown_timeout,
+ ssl_context=ssl_context,
+ backlog=backlog,
+ )
+ self._sock = sock
+ scheme = "https" if self._ssl_context else "http"
+ if hasattr(socket, "AF_UNIX") and sock.family == socket.AF_UNIX:
+ name = f"{scheme}://unix:{sock.getsockname()}:"
+ else:
+ host, port = sock.getsockname()[:2]
+ name = str(URL.build(scheme=scheme, host=host, port=port))
+ self._name = name
+
+ @property
+ def name(self) -> str:
+ return self._name
+
+ async def start(self) -> None:
+ await super().start()
+ loop = asyncio.get_event_loop()
+ server = self._runner.server
+ assert server is not None
+ self._server = await loop.create_server(
+ server, sock=self._sock, ssl=self._ssl_context, backlog=self._backlog
+ )
+
+
+class BaseRunner(ABC):
+ __slots__ = ("_handle_signals", "_kwargs", "_server", "_sites")
+
+ def __init__(self, *, handle_signals: bool = False, **kwargs: Any) -> None:
+ self._handle_signals = handle_signals
+ self._kwargs = kwargs
+ self._server = None # type: Optional[Server]
+ self._sites = [] # type: List[BaseSite]
+
+ @property
+ def server(self) -> Optional[Server]:
+ return self._server
+
+ @property
+ def addresses(self) -> List[Any]:
+ ret = [] # type: List[Any]
+ for site in self._sites:
+ server = site._server
+ if server is not None:
+ sockets = server.sockets
+ if sockets is not None:
+ for sock in sockets:
+ ret.append(sock.getsockname())
+ return ret
+
+ @property
+ def sites(self) -> Set[BaseSite]:
+ return set(self._sites)
+
+ async def setup(self) -> None:
+ loop = asyncio.get_event_loop()
+
+ if self._handle_signals:
+ try:
+ loop.add_signal_handler(signal.SIGINT, _raise_graceful_exit)
+ loop.add_signal_handler(signal.SIGTERM, _raise_graceful_exit)
+ except NotImplementedError: # pragma: no cover
+ # add_signal_handler is not implemented on Windows
+ pass
+
+ self._server = await self._make_server()
+
+ @abstractmethod
+ async def shutdown(self) -> None:
+ pass # pragma: no cover
+
+ async def cleanup(self) -> None:
+ loop = asyncio.get_event_loop()
+
+ if self._server is None:
+ # no started yet, do nothing
+ return
+
+ # The loop over sites is intentional, an exception on gather()
+ # leaves self._sites in unpredictable state.
+ # The loop guaranties that a site is either deleted on success or
+ # still present on failure
+ for site in list(self._sites):
+ await site.stop()
+ await self._cleanup_server()
+ self._server = None
+ if self._handle_signals:
+ try:
+ loop.remove_signal_handler(signal.SIGINT)
+ loop.remove_signal_handler(signal.SIGTERM)
+ except NotImplementedError: # pragma: no cover
+ # remove_signal_handler is not implemented on Windows
+ pass
+
+ @abstractmethod
+ async def _make_server(self) -> Server:
+ pass # pragma: no cover
+
+ @abstractmethod
+ async def _cleanup_server(self) -> None:
+ pass # pragma: no cover
+
+ def _reg_site(self, site: BaseSite) -> None:
+ if site in self._sites:
+ raise RuntimeError(f"Site {site} is already registered in runner {self}")
+ self._sites.append(site)
+
+ def _check_site(self, site: BaseSite) -> None:
+ if site not in self._sites:
+ raise RuntimeError(f"Site {site} is not registered in runner {self}")
+
+ def _unreg_site(self, site: BaseSite) -> None:
+ if site not in self._sites:
+ raise RuntimeError(f"Site {site} is not registered in runner {self}")
+ self._sites.remove(site)
+
+
+class ServerRunner(BaseRunner):
+ """Low-level web server runner"""
+
+ __slots__ = ("_web_server",)
+
+ def __init__(
+ self, web_server: Server, *, handle_signals: bool = False, **kwargs: Any
+ ) -> None:
+ super().__init__(handle_signals=handle_signals, **kwargs)
+ self._web_server = web_server
+
+ async def shutdown(self) -> None:
+ pass
+
+ async def _make_server(self) -> Server:
+ return self._web_server
+
+ async def _cleanup_server(self) -> None:
+ pass
+
+
+class AppRunner(BaseRunner):
+ """Web Application runner"""
+
+ __slots__ = ("_app",)
+
+ def __init__(
+ self, app: Application, *, handle_signals: bool = False, **kwargs: Any
+ ) -> None:
+ super().__init__(handle_signals=handle_signals, **kwargs)
+ if not isinstance(app, Application):
+ raise TypeError(
+ "The first argument should be web.Application "
+ "instance, got {!r}".format(app)
+ )
+ self._app = app
+
+ @property
+ def app(self) -> Application:
+ return self._app
+
+ async def shutdown(self) -> None:
+ await self._app.shutdown()
+
+ async def _make_server(self) -> Server:
+ loop = asyncio.get_event_loop()
+ self._app._set_loop(loop)
+ self._app.on_startup.freeze()
+ await self._app.startup()
+ self._app.freeze()
+
+ return self._app._make_handler(loop=loop, **self._kwargs)
+
+ async def _cleanup_server(self) -> None:
+ await self._app.cleanup()
diff --git a/third_party/python/aiohttp/aiohttp/web_server.py b/third_party/python/aiohttp/aiohttp/web_server.py
new file mode 100644
index 0000000000..5657ed9c80
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/web_server.py
@@ -0,0 +1,62 @@
+"""Low level HTTP server."""
+import asyncio
+from typing import Any, Awaitable, Callable, Dict, List, Optional # noqa
+
+from .abc import AbstractStreamWriter
+from .helpers import get_running_loop
+from .http_parser import RawRequestMessage
+from .streams import StreamReader
+from .web_protocol import RequestHandler, _RequestFactory, _RequestHandler
+from .web_request import BaseRequest
+
+__all__ = ("Server",)
+
+
+class Server:
+ def __init__(
+ self,
+ handler: _RequestHandler,
+ *,
+ request_factory: Optional[_RequestFactory] = None,
+ loop: Optional[asyncio.AbstractEventLoop] = None,
+ **kwargs: Any
+ ) -> None:
+ self._loop = get_running_loop(loop)
+ self._connections = {} # type: Dict[RequestHandler, asyncio.Transport]
+ self._kwargs = kwargs
+ self.requests_count = 0
+ self.request_handler = handler
+ self.request_factory = request_factory or self._make_request
+
+ @property
+ def connections(self) -> List[RequestHandler]:
+ return list(self._connections.keys())
+
+ def connection_made(
+ self, handler: RequestHandler, transport: asyncio.Transport
+ ) -> None:
+ self._connections[handler] = transport
+
+ def connection_lost(
+ self, handler: RequestHandler, exc: Optional[BaseException] = None
+ ) -> None:
+ if handler in self._connections:
+ del self._connections[handler]
+
+ def _make_request(
+ self,
+ message: RawRequestMessage,
+ payload: StreamReader,
+ protocol: RequestHandler,
+ writer: AbstractStreamWriter,
+ task: "asyncio.Task[None]",
+ ) -> BaseRequest:
+ return BaseRequest(message, payload, protocol, writer, task, self._loop)
+
+ async def shutdown(self, timeout: Optional[float] = None) -> None:
+ coros = [conn.shutdown(timeout) for conn in self._connections]
+ await asyncio.gather(*coros)
+ self._connections.clear()
+
+ def __call__(self) -> RequestHandler:
+ return RequestHandler(self, loop=self._loop, **self._kwargs)
diff --git a/third_party/python/aiohttp/aiohttp/web_urldispatcher.py b/third_party/python/aiohttp/aiohttp/web_urldispatcher.py
new file mode 100644
index 0000000000..2afd72f13d
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/web_urldispatcher.py
@@ -0,0 +1,1233 @@
+import abc
+import asyncio
+import base64
+import hashlib
+import inspect
+import keyword
+import os
+import re
+import warnings
+from contextlib import contextmanager
+from functools import wraps
+from pathlib import Path
+from types import MappingProxyType
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Awaitable,
+ Callable,
+ Container,
+ Dict,
+ Generator,
+ Iterable,
+ Iterator,
+ List,
+ Mapping,
+ Optional,
+ Pattern,
+ Set,
+ Sized,
+ Tuple,
+ Type,
+ Union,
+ cast,
+)
+
+from typing_extensions import TypedDict
+from yarl import URL, __version__ as yarl_version # type: ignore
+
+from . import hdrs
+from .abc import AbstractMatchInfo, AbstractRouter, AbstractView
+from .helpers import DEBUG
+from .http import HttpVersion11
+from .typedefs import PathLike
+from .web_exceptions import (
+ HTTPException,
+ HTTPExpectationFailed,
+ HTTPForbidden,
+ HTTPMethodNotAllowed,
+ HTTPNotFound,
+)
+from .web_fileresponse import FileResponse
+from .web_request import Request
+from .web_response import Response, StreamResponse
+from .web_routedef import AbstractRouteDef
+
+__all__ = (
+ "UrlDispatcher",
+ "UrlMappingMatchInfo",
+ "AbstractResource",
+ "Resource",
+ "PlainResource",
+ "DynamicResource",
+ "AbstractRoute",
+ "ResourceRoute",
+ "StaticResource",
+ "View",
+)
+
+
+if TYPE_CHECKING: # pragma: no cover
+ from .web_app import Application
+
+ BaseDict = Dict[str, str]
+else:
+ BaseDict = dict
+
+YARL_VERSION = tuple(map(int, yarl_version.split(".")[:2]))
+
+HTTP_METHOD_RE = re.compile(r"^[0-9A-Za-z!#\$%&'\*\+\-\.\^_`\|~]+$")
+ROUTE_RE = re.compile(r"(\{[_a-zA-Z][^{}]*(?:\{[^{}]*\}[^{}]*)*\})")
+PATH_SEP = re.escape("/")
+
+
+_WebHandler = Callable[[Request], Awaitable[StreamResponse]]
+_ExpectHandler = Callable[[Request], Awaitable[None]]
+_Resolve = Tuple[Optional[AbstractMatchInfo], Set[str]]
+
+
+class _InfoDict(TypedDict, total=False):
+ path: str
+
+ formatter: str
+ pattern: Pattern[str]
+
+ directory: Path
+ prefix: str
+ routes: Mapping[str, "AbstractRoute"]
+
+ app: "Application"
+
+ domain: str
+
+ rule: "AbstractRuleMatching"
+
+ http_exception: HTTPException
+
+
+class AbstractResource(Sized, Iterable["AbstractRoute"]):
+ def __init__(self, *, name: Optional[str] = None) -> None:
+ self._name = name
+
+ @property
+ def name(self) -> Optional[str]:
+ return self._name
+
+ @property
+ @abc.abstractmethod
+ def canonical(self) -> str:
+ """Exposes the resource's canonical path.
+
+ For example '/foo/bar/{name}'
+
+ """
+
+ @abc.abstractmethod # pragma: no branch
+ def url_for(self, **kwargs: str) -> URL:
+ """Construct url for resource with additional params."""
+
+ @abc.abstractmethod # pragma: no branch
+ async def resolve(self, request: Request) -> _Resolve:
+ """Resolve resource
+
+ Return (UrlMappingMatchInfo, allowed_methods) pair."""
+
+ @abc.abstractmethod
+ def add_prefix(self, prefix: str) -> None:
+ """Add a prefix to processed URLs.
+
+ Required for subapplications support.
+
+ """
+
+ @abc.abstractmethod
+ def get_info(self) -> _InfoDict:
+ """Return a dict with additional info useful for introspection"""
+
+ def freeze(self) -> None:
+ pass
+
+ @abc.abstractmethod
+ def raw_match(self, path: str) -> bool:
+ """Perform a raw match against path"""
+
+
+class AbstractRoute(abc.ABC):
+ def __init__(
+ self,
+ method: str,
+ handler: Union[_WebHandler, Type[AbstractView]],
+ *,
+ expect_handler: Optional[_ExpectHandler] = None,
+ resource: Optional[AbstractResource] = None,
+ ) -> None:
+
+ if expect_handler is None:
+ expect_handler = _default_expect_handler
+
+ assert asyncio.iscoroutinefunction(
+ expect_handler
+ ), f"Coroutine is expected, got {expect_handler!r}"
+
+ method = method.upper()
+ if not HTTP_METHOD_RE.match(method):
+ raise ValueError(f"{method} is not allowed HTTP method")
+
+ assert callable(handler), handler
+ if asyncio.iscoroutinefunction(handler):
+ pass
+ elif inspect.isgeneratorfunction(handler):
+ warnings.warn(
+ "Bare generators are deprecated, " "use @coroutine wrapper",
+ DeprecationWarning,
+ )
+ elif isinstance(handler, type) and issubclass(handler, AbstractView):
+ pass
+ else:
+ warnings.warn(
+ "Bare functions are deprecated, " "use async ones", DeprecationWarning
+ )
+
+ @wraps(handler)
+ async def handler_wrapper(request: Request) -> StreamResponse:
+ result = old_handler(request)
+ if asyncio.iscoroutine(result):
+ return await result
+ return result # type: ignore
+
+ old_handler = handler
+ handler = handler_wrapper
+
+ self._method = method
+ self._handler = handler
+ self._expect_handler = expect_handler
+ self._resource = resource
+
+ @property
+ def method(self) -> str:
+ return self._method
+
+ @property
+ def handler(self) -> _WebHandler:
+ return self._handler
+
+ @property
+ @abc.abstractmethod
+ def name(self) -> Optional[str]:
+ """Optional route's name, always equals to resource's name."""
+
+ @property
+ def resource(self) -> Optional[AbstractResource]:
+ return self._resource
+
+ @abc.abstractmethod
+ def get_info(self) -> _InfoDict:
+ """Return a dict with additional info useful for introspection"""
+
+ @abc.abstractmethod # pragma: no branch
+ def url_for(self, *args: str, **kwargs: str) -> URL:
+ """Construct url for route with additional params."""
+
+ async def handle_expect_header(self, request: Request) -> None:
+ await self._expect_handler(request)
+
+
+class UrlMappingMatchInfo(BaseDict, AbstractMatchInfo):
+ def __init__(self, match_dict: Dict[str, str], route: AbstractRoute):
+ super().__init__(match_dict)
+ self._route = route
+ self._apps = [] # type: List[Application]
+ self._current_app = None # type: Optional[Application]
+ self._frozen = False
+
+ @property
+ def handler(self) -> _WebHandler:
+ return self._route.handler
+
+ @property
+ def route(self) -> AbstractRoute:
+ return self._route
+
+ @property
+ def expect_handler(self) -> _ExpectHandler:
+ return self._route.handle_expect_header
+
+ @property
+ def http_exception(self) -> Optional[HTTPException]:
+ return None
+
+ def get_info(self) -> _InfoDict: # type: ignore
+ return self._route.get_info()
+
+ @property
+ def apps(self) -> Tuple["Application", ...]:
+ return tuple(self._apps)
+
+ def add_app(self, app: "Application") -> None:
+ if self._frozen:
+ raise RuntimeError("Cannot change apps stack after .freeze() call")
+ if self._current_app is None:
+ self._current_app = app
+ self._apps.insert(0, app)
+
+ @property
+ def current_app(self) -> "Application":
+ app = self._current_app
+ assert app is not None
+ return app
+
+ @contextmanager
+ def set_current_app(self, app: "Application") -> Generator[None, None, None]:
+ if DEBUG: # pragma: no cover
+ if app not in self._apps:
+ raise RuntimeError(
+ "Expected one of the following apps {!r}, got {!r}".format(
+ self._apps, app
+ )
+ )
+ prev = self._current_app
+ self._current_app = app
+ try:
+ yield
+ finally:
+ self._current_app = prev
+
+ def freeze(self) -> None:
+ self._frozen = True
+
+ def __repr__(self) -> str:
+ return f"<MatchInfo {super().__repr__()}: {self._route}>"
+
+
+class MatchInfoError(UrlMappingMatchInfo):
+ def __init__(self, http_exception: HTTPException) -> None:
+ self._exception = http_exception
+ super().__init__({}, SystemRoute(self._exception))
+
+ @property
+ def http_exception(self) -> HTTPException:
+ return self._exception
+
+ def __repr__(self) -> str:
+ return "<MatchInfoError {}: {}>".format(
+ self._exception.status, self._exception.reason
+ )
+
+
+async def _default_expect_handler(request: Request) -> None:
+ """Default handler for Expect header.
+
+ Just send "100 Continue" to client.
+ raise HTTPExpectationFailed if value of header is not "100-continue"
+ """
+ expect = request.headers.get(hdrs.EXPECT, "")
+ if request.version == HttpVersion11:
+ if expect.lower() == "100-continue":
+ await request.writer.write(b"HTTP/1.1 100 Continue\r\n\r\n")
+ else:
+ raise HTTPExpectationFailed(text="Unknown Expect: %s" % expect)
+
+
+class Resource(AbstractResource):
+ def __init__(self, *, name: Optional[str] = None) -> None:
+ super().__init__(name=name)
+ self._routes = [] # type: List[ResourceRoute]
+
+ def add_route(
+ self,
+ method: str,
+ handler: Union[Type[AbstractView], _WebHandler],
+ *,
+ expect_handler: Optional[_ExpectHandler] = None,
+ ) -> "ResourceRoute":
+
+ for route_obj in self._routes:
+ if route_obj.method == method or route_obj.method == hdrs.METH_ANY:
+ raise RuntimeError(
+ "Added route will never be executed, "
+ "method {route.method} is already "
+ "registered".format(route=route_obj)
+ )
+
+ route_obj = ResourceRoute(method, handler, self, expect_handler=expect_handler)
+ self.register_route(route_obj)
+ return route_obj
+
+ def register_route(self, route: "ResourceRoute") -> None:
+ assert isinstance(
+ route, ResourceRoute
+ ), f"Instance of Route class is required, got {route!r}"
+ self._routes.append(route)
+
+ async def resolve(self, request: Request) -> _Resolve:
+ allowed_methods = set() # type: Set[str]
+
+ match_dict = self._match(request.rel_url.raw_path)
+ if match_dict is None:
+ return None, allowed_methods
+
+ for route_obj in self._routes:
+ route_method = route_obj.method
+ allowed_methods.add(route_method)
+
+ if route_method == request.method or route_method == hdrs.METH_ANY:
+ return (UrlMappingMatchInfo(match_dict, route_obj), allowed_methods)
+ else:
+ return None, allowed_methods
+
+ @abc.abstractmethod
+ def _match(self, path: str) -> Optional[Dict[str, str]]:
+ pass # pragma: no cover
+
+ def __len__(self) -> int:
+ return len(self._routes)
+
+ def __iter__(self) -> Iterator[AbstractRoute]:
+ return iter(self._routes)
+
+ # TODO: implement all abstract methods
+
+
+class PlainResource(Resource):
+ def __init__(self, path: str, *, name: Optional[str] = None) -> None:
+ super().__init__(name=name)
+ assert not path or path.startswith("/")
+ self._path = path
+
+ @property
+ def canonical(self) -> str:
+ return self._path
+
+ def freeze(self) -> None:
+ if not self._path:
+ self._path = "/"
+
+ def add_prefix(self, prefix: str) -> None:
+ assert prefix.startswith("/")
+ assert not prefix.endswith("/")
+ assert len(prefix) > 1
+ self._path = prefix + self._path
+
+ def _match(self, path: str) -> Optional[Dict[str, str]]:
+ # string comparison is about 10 times faster than regexp matching
+ if self._path == path:
+ return {}
+ else:
+ return None
+
+ def raw_match(self, path: str) -> bool:
+ return self._path == path
+
+ def get_info(self) -> _InfoDict:
+ return {"path": self._path}
+
+ def url_for(self) -> URL: # type: ignore
+ return URL.build(path=self._path, encoded=True)
+
+ def __repr__(self) -> str:
+ name = "'" + self.name + "' " if self.name is not None else ""
+ return f"<PlainResource {name} {self._path}>"
+
+
+class DynamicResource(Resource):
+
+ DYN = re.compile(r"\{(?P<var>[_a-zA-Z][_a-zA-Z0-9]*)\}")
+ DYN_WITH_RE = re.compile(r"\{(?P<var>[_a-zA-Z][_a-zA-Z0-9]*):(?P<re>.+)\}")
+ GOOD = r"[^{}/]+"
+
+ def __init__(self, path: str, *, name: Optional[str] = None) -> None:
+ super().__init__(name=name)
+ pattern = ""
+ formatter = ""
+ for part in ROUTE_RE.split(path):
+ match = self.DYN.fullmatch(part)
+ if match:
+ pattern += "(?P<{}>{})".format(match.group("var"), self.GOOD)
+ formatter += "{" + match.group("var") + "}"
+ continue
+
+ match = self.DYN_WITH_RE.fullmatch(part)
+ if match:
+ pattern += "(?P<{var}>{re})".format(**match.groupdict())
+ formatter += "{" + match.group("var") + "}"
+ continue
+
+ if "{" in part or "}" in part:
+ raise ValueError(f"Invalid path '{path}'['{part}']")
+
+ part = _requote_path(part)
+ formatter += part
+ pattern += re.escape(part)
+
+ try:
+ compiled = re.compile(pattern)
+ except re.error as exc:
+ raise ValueError(f"Bad pattern '{pattern}': {exc}") from None
+ assert compiled.pattern.startswith(PATH_SEP)
+ assert formatter.startswith("/")
+ self._pattern = compiled
+ self._formatter = formatter
+
+ @property
+ def canonical(self) -> str:
+ return self._formatter
+
+ def add_prefix(self, prefix: str) -> None:
+ assert prefix.startswith("/")
+ assert not prefix.endswith("/")
+ assert len(prefix) > 1
+ self._pattern = re.compile(re.escape(prefix) + self._pattern.pattern)
+ self._formatter = prefix + self._formatter
+
+ def _match(self, path: str) -> Optional[Dict[str, str]]:
+ match = self._pattern.fullmatch(path)
+ if match is None:
+ return None
+ else:
+ return {
+ key: _unquote_path(value) for key, value in match.groupdict().items()
+ }
+
+ def raw_match(self, path: str) -> bool:
+ return self._formatter == path
+
+ def get_info(self) -> _InfoDict:
+ return {"formatter": self._formatter, "pattern": self._pattern}
+
+ def url_for(self, **parts: str) -> URL:
+ url = self._formatter.format_map({k: _quote_path(v) for k, v in parts.items()})
+ return URL.build(path=url, encoded=True)
+
+ def __repr__(self) -> str:
+ name = "'" + self.name + "' " if self.name is not None else ""
+ return "<DynamicResource {name} {formatter}>".format(
+ name=name, formatter=self._formatter
+ )
+
+
+class PrefixResource(AbstractResource):
+ def __init__(self, prefix: str, *, name: Optional[str] = None) -> None:
+ assert not prefix or prefix.startswith("/"), prefix
+ assert prefix in ("", "/") or not prefix.endswith("/"), prefix
+ super().__init__(name=name)
+ self._prefix = _requote_path(prefix)
+
+ @property
+ def canonical(self) -> str:
+ return self._prefix
+
+ def add_prefix(self, prefix: str) -> None:
+ assert prefix.startswith("/")
+ assert not prefix.endswith("/")
+ assert len(prefix) > 1
+ self._prefix = prefix + self._prefix
+
+ def raw_match(self, prefix: str) -> bool:
+ return False
+
+ # TODO: impl missing abstract methods
+
+
+class StaticResource(PrefixResource):
+ VERSION_KEY = "v"
+
+ def __init__(
+ self,
+ prefix: str,
+ directory: PathLike,
+ *,
+ name: Optional[str] = None,
+ expect_handler: Optional[_ExpectHandler] = None,
+ chunk_size: int = 256 * 1024,
+ show_index: bool = False,
+ follow_symlinks: bool = False,
+ append_version: bool = False,
+ ) -> None:
+ super().__init__(prefix, name=name)
+ try:
+ directory = Path(directory)
+ if str(directory).startswith("~"):
+ directory = Path(os.path.expanduser(str(directory)))
+ directory = directory.resolve()
+ if not directory.is_dir():
+ raise ValueError("Not a directory")
+ except (FileNotFoundError, ValueError) as error:
+ raise ValueError(f"No directory exists at '{directory}'") from error
+ self._directory = directory
+ self._show_index = show_index
+ self._chunk_size = chunk_size
+ self._follow_symlinks = follow_symlinks
+ self._expect_handler = expect_handler
+ self._append_version = append_version
+
+ self._routes = {
+ "GET": ResourceRoute(
+ "GET", self._handle, self, expect_handler=expect_handler
+ ),
+ "HEAD": ResourceRoute(
+ "HEAD", self._handle, self, expect_handler=expect_handler
+ ),
+ }
+
+ def url_for( # type: ignore
+ self,
+ *,
+ filename: Union[str, Path],
+ append_version: Optional[bool] = None,
+ ) -> URL:
+ if append_version is None:
+ append_version = self._append_version
+ if isinstance(filename, Path):
+ filename = str(filename)
+ filename = filename.lstrip("/")
+
+ url = URL.build(path=self._prefix, encoded=True)
+ # filename is not encoded
+ if YARL_VERSION < (1, 6):
+ url = url / filename.replace("%", "%25")
+ else:
+ url = url / filename
+
+ if append_version:
+ try:
+ filepath = self._directory.joinpath(filename).resolve()
+ if not self._follow_symlinks:
+ filepath.relative_to(self._directory)
+ except (ValueError, FileNotFoundError):
+ # ValueError for case when path point to symlink
+ # with follow_symlinks is False
+ return url # relatively safe
+ if filepath.is_file():
+ # TODO cache file content
+ # with file watcher for cache invalidation
+ with filepath.open("rb") as f:
+ file_bytes = f.read()
+ h = self._get_file_hash(file_bytes)
+ url = url.with_query({self.VERSION_KEY: h})
+ return url
+ return url
+
+ @staticmethod
+ def _get_file_hash(byte_array: bytes) -> str:
+ m = hashlib.sha256() # todo sha256 can be configurable param
+ m.update(byte_array)
+ b64 = base64.urlsafe_b64encode(m.digest())
+ return b64.decode("ascii")
+
+ def get_info(self) -> _InfoDict:
+ return {
+ "directory": self._directory,
+ "prefix": self._prefix,
+ "routes": self._routes,
+ }
+
+ def set_options_route(self, handler: _WebHandler) -> None:
+ if "OPTIONS" in self._routes:
+ raise RuntimeError("OPTIONS route was set already")
+ self._routes["OPTIONS"] = ResourceRoute(
+ "OPTIONS", handler, self, expect_handler=self._expect_handler
+ )
+
+ async def resolve(self, request: Request) -> _Resolve:
+ path = request.rel_url.raw_path
+ method = request.method
+ allowed_methods = set(self._routes)
+ if not path.startswith(self._prefix):
+ return None, set()
+
+ if method not in allowed_methods:
+ return None, allowed_methods
+
+ match_dict = {"filename": _unquote_path(path[len(self._prefix) + 1 :])}
+ return (UrlMappingMatchInfo(match_dict, self._routes[method]), allowed_methods)
+
+ def __len__(self) -> int:
+ return len(self._routes)
+
+ def __iter__(self) -> Iterator[AbstractRoute]:
+ return iter(self._routes.values())
+
+ async def _handle(self, request: Request) -> StreamResponse:
+ rel_url = request.match_info["filename"]
+ try:
+ filename = Path(rel_url)
+ if filename.anchor:
+ # rel_url is an absolute name like
+ # /static/\\machine_name\c$ or /static/D:\path
+ # where the static dir is totally different
+ raise HTTPForbidden()
+ filepath = self._directory.joinpath(filename).resolve()
+ if not self._follow_symlinks:
+ filepath.relative_to(self._directory)
+ except (ValueError, FileNotFoundError) as error:
+ # relatively safe
+ raise HTTPNotFound() from error
+ except HTTPForbidden:
+ raise
+ except Exception as error:
+ # perm error or other kind!
+ request.app.logger.exception(error)
+ raise HTTPNotFound() from error
+
+ # on opening a dir, load its contents if allowed
+ if filepath.is_dir():
+ if self._show_index:
+ try:
+ return Response(
+ text=self._directory_as_html(filepath), content_type="text/html"
+ )
+ except PermissionError:
+ raise HTTPForbidden()
+ else:
+ raise HTTPForbidden()
+ elif filepath.is_file():
+ return FileResponse(filepath, chunk_size=self._chunk_size)
+ else:
+ raise HTTPNotFound
+
+ def _directory_as_html(self, filepath: Path) -> str:
+ # returns directory's index as html
+
+ # sanity check
+ assert filepath.is_dir()
+
+ relative_path_to_dir = filepath.relative_to(self._directory).as_posix()
+ index_of = f"Index of /{relative_path_to_dir}"
+ h1 = f"<h1>{index_of}</h1>"
+
+ index_list = []
+ dir_index = filepath.iterdir()
+ for _file in sorted(dir_index):
+ # show file url as relative to static path
+ rel_path = _file.relative_to(self._directory).as_posix()
+ file_url = self._prefix + "/" + rel_path
+
+ # if file is a directory, add '/' to the end of the name
+ if _file.is_dir():
+ file_name = f"{_file.name}/"
+ else:
+ file_name = _file.name
+
+ index_list.append(
+ '<li><a href="{url}">{name}</a></li>'.format(
+ url=file_url, name=file_name
+ )
+ )
+ ul = "<ul>\n{}\n</ul>".format("\n".join(index_list))
+ body = f"<body>\n{h1}\n{ul}\n</body>"
+
+ head_str = f"<head>\n<title>{index_of}</title>\n</head>"
+ html = f"<html>\n{head_str}\n{body}\n</html>"
+
+ return html
+
+ def __repr__(self) -> str:
+ name = "'" + self.name + "'" if self.name is not None else ""
+ return "<StaticResource {name} {path} -> {directory!r}>".format(
+ name=name, path=self._prefix, directory=self._directory
+ )
+
+
+class PrefixedSubAppResource(PrefixResource):
+ def __init__(self, prefix: str, app: "Application") -> None:
+ super().__init__(prefix)
+ self._app = app
+ for resource in app.router.resources():
+ resource.add_prefix(prefix)
+
+ def add_prefix(self, prefix: str) -> None:
+ super().add_prefix(prefix)
+ for resource in self._app.router.resources():
+ resource.add_prefix(prefix)
+
+ def url_for(self, *args: str, **kwargs: str) -> URL:
+ raise RuntimeError(".url_for() is not supported " "by sub-application root")
+
+ def get_info(self) -> _InfoDict:
+ return {"app": self._app, "prefix": self._prefix}
+
+ async def resolve(self, request: Request) -> _Resolve:
+ if (
+ not request.url.raw_path.startswith(self._prefix + "/")
+ and request.url.raw_path != self._prefix
+ ):
+ return None, set()
+ match_info = await self._app.router.resolve(request)
+ match_info.add_app(self._app)
+ if isinstance(match_info.http_exception, HTTPMethodNotAllowed):
+ methods = match_info.http_exception.allowed_methods
+ else:
+ methods = set()
+ return match_info, methods
+
+ def __len__(self) -> int:
+ return len(self._app.router.routes())
+
+ def __iter__(self) -> Iterator[AbstractRoute]:
+ return iter(self._app.router.routes())
+
+ def __repr__(self) -> str:
+ return "<PrefixedSubAppResource {prefix} -> {app!r}>".format(
+ prefix=self._prefix, app=self._app
+ )
+
+
+class AbstractRuleMatching(abc.ABC):
+ @abc.abstractmethod # pragma: no branch
+ async def match(self, request: Request) -> bool:
+ """Return bool if the request satisfies the criteria"""
+
+ @abc.abstractmethod # pragma: no branch
+ def get_info(self) -> _InfoDict:
+ """Return a dict with additional info useful for introspection"""
+
+ @property
+ @abc.abstractmethod # pragma: no branch
+ def canonical(self) -> str:
+ """Return a str"""
+
+
+class Domain(AbstractRuleMatching):
+ re_part = re.compile(r"(?!-)[a-z\d-]{1,63}(?<!-)")
+
+ def __init__(self, domain: str) -> None:
+ super().__init__()
+ self._domain = self.validation(domain)
+
+ @property
+ def canonical(self) -> str:
+ return self._domain
+
+ def validation(self, domain: str) -> str:
+ if not isinstance(domain, str):
+ raise TypeError("Domain must be str")
+ domain = domain.rstrip(".").lower()
+ if not domain:
+ raise ValueError("Domain cannot be empty")
+ elif "://" in domain:
+ raise ValueError("Scheme not supported")
+ url = URL("http://" + domain)
+ assert url.raw_host is not None
+ if not all(self.re_part.fullmatch(x) for x in url.raw_host.split(".")):
+ raise ValueError("Domain not valid")
+ if url.port == 80:
+ return url.raw_host
+ return f"{url.raw_host}:{url.port}"
+
+ async def match(self, request: Request) -> bool:
+ host = request.headers.get(hdrs.HOST)
+ if not host:
+ return False
+ return self.match_domain(host)
+
+ def match_domain(self, host: str) -> bool:
+ return host.lower() == self._domain
+
+ def get_info(self) -> _InfoDict:
+ return {"domain": self._domain}
+
+
+class MaskDomain(Domain):
+ re_part = re.compile(r"(?!-)[a-z\d\*-]{1,63}(?<!-)")
+
+ def __init__(self, domain: str) -> None:
+ super().__init__(domain)
+ mask = self._domain.replace(".", r"\.").replace("*", ".*")
+ self._mask = re.compile(mask)
+
+ @property
+ def canonical(self) -> str:
+ return self._mask.pattern
+
+ def match_domain(self, host: str) -> bool:
+ return self._mask.fullmatch(host) is not None
+
+
+class MatchedSubAppResource(PrefixedSubAppResource):
+ def __init__(self, rule: AbstractRuleMatching, app: "Application") -> None:
+ AbstractResource.__init__(self)
+ self._prefix = ""
+ self._app = app
+ self._rule = rule
+
+ @property
+ def canonical(self) -> str:
+ return self._rule.canonical
+
+ def get_info(self) -> _InfoDict:
+ return {"app": self._app, "rule": self._rule}
+
+ async def resolve(self, request: Request) -> _Resolve:
+ if not await self._rule.match(request):
+ return None, set()
+ match_info = await self._app.router.resolve(request)
+ match_info.add_app(self._app)
+ if isinstance(match_info.http_exception, HTTPMethodNotAllowed):
+ methods = match_info.http_exception.allowed_methods
+ else:
+ methods = set()
+ return match_info, methods
+
+ def __repr__(self) -> str:
+ return "<MatchedSubAppResource -> {app!r}>" "".format(app=self._app)
+
+
+class ResourceRoute(AbstractRoute):
+ """A route with resource"""
+
+ def __init__(
+ self,
+ method: str,
+ handler: Union[_WebHandler, Type[AbstractView]],
+ resource: AbstractResource,
+ *,
+ expect_handler: Optional[_ExpectHandler] = None,
+ ) -> None:
+ super().__init__(
+ method, handler, expect_handler=expect_handler, resource=resource
+ )
+
+ def __repr__(self) -> str:
+ return "<ResourceRoute [{method}] {resource} -> {handler!r}".format(
+ method=self.method, resource=self._resource, handler=self.handler
+ )
+
+ @property
+ def name(self) -> Optional[str]:
+ if self._resource is None:
+ return None
+ return self._resource.name
+
+ def url_for(self, *args: str, **kwargs: str) -> URL:
+ """Construct url for route with additional params."""
+ assert self._resource is not None
+ return self._resource.url_for(*args, **kwargs)
+
+ def get_info(self) -> _InfoDict:
+ assert self._resource is not None
+ return self._resource.get_info()
+
+
+class SystemRoute(AbstractRoute):
+ def __init__(self, http_exception: HTTPException) -> None:
+ super().__init__(hdrs.METH_ANY, self._handle)
+ self._http_exception = http_exception
+
+ def url_for(self, *args: str, **kwargs: str) -> URL:
+ raise RuntimeError(".url_for() is not allowed for SystemRoute")
+
+ @property
+ def name(self) -> Optional[str]:
+ return None
+
+ def get_info(self) -> _InfoDict:
+ return {"http_exception": self._http_exception}
+
+ async def _handle(self, request: Request) -> StreamResponse:
+ raise self._http_exception
+
+ @property
+ def status(self) -> int:
+ return self._http_exception.status
+
+ @property
+ def reason(self) -> str:
+ return self._http_exception.reason
+
+ def __repr__(self) -> str:
+ return "<SystemRoute {self.status}: {self.reason}>".format(self=self)
+
+
+class View(AbstractView):
+ async def _iter(self) -> StreamResponse:
+ if self.request.method not in hdrs.METH_ALL:
+ self._raise_allowed_methods()
+ method = getattr(self, self.request.method.lower(), None)
+ if method is None:
+ self._raise_allowed_methods()
+ resp = await method()
+ return resp
+
+ def __await__(self) -> Generator[Any, None, StreamResponse]:
+ return self._iter().__await__()
+
+ def _raise_allowed_methods(self) -> None:
+ allowed_methods = {m for m in hdrs.METH_ALL if hasattr(self, m.lower())}
+ raise HTTPMethodNotAllowed(self.request.method, allowed_methods)
+
+
+class ResourcesView(Sized, Iterable[AbstractResource], Container[AbstractResource]):
+ def __init__(self, resources: List[AbstractResource]) -> None:
+ self._resources = resources
+
+ def __len__(self) -> int:
+ return len(self._resources)
+
+ def __iter__(self) -> Iterator[AbstractResource]:
+ yield from self._resources
+
+ def __contains__(self, resource: object) -> bool:
+ return resource in self._resources
+
+
+class RoutesView(Sized, Iterable[AbstractRoute], Container[AbstractRoute]):
+ def __init__(self, resources: List[AbstractResource]):
+ self._routes = [] # type: List[AbstractRoute]
+ for resource in resources:
+ for route in resource:
+ self._routes.append(route)
+
+ def __len__(self) -> int:
+ return len(self._routes)
+
+ def __iter__(self) -> Iterator[AbstractRoute]:
+ yield from self._routes
+
+ def __contains__(self, route: object) -> bool:
+ return route in self._routes
+
+
+class UrlDispatcher(AbstractRouter, Mapping[str, AbstractResource]):
+
+ NAME_SPLIT_RE = re.compile(r"[.:-]")
+
+ def __init__(self) -> None:
+ super().__init__()
+ self._resources = [] # type: List[AbstractResource]
+ self._named_resources = {} # type: Dict[str, AbstractResource]
+
+ async def resolve(self, request: Request) -> AbstractMatchInfo:
+ method = request.method
+ allowed_methods = set() # type: Set[str]
+
+ for resource in self._resources:
+ match_dict, allowed = await resource.resolve(request)
+ if match_dict is not None:
+ return match_dict
+ else:
+ allowed_methods |= allowed
+ else:
+ if allowed_methods:
+ return MatchInfoError(HTTPMethodNotAllowed(method, allowed_methods))
+ else:
+ return MatchInfoError(HTTPNotFound())
+
+ def __iter__(self) -> Iterator[str]:
+ return iter(self._named_resources)
+
+ def __len__(self) -> int:
+ return len(self._named_resources)
+
+ def __contains__(self, resource: object) -> bool:
+ return resource in self._named_resources
+
+ def __getitem__(self, name: str) -> AbstractResource:
+ return self._named_resources[name]
+
+ def resources(self) -> ResourcesView:
+ return ResourcesView(self._resources)
+
+ def routes(self) -> RoutesView:
+ return RoutesView(self._resources)
+
+ def named_resources(self) -> Mapping[str, AbstractResource]:
+ return MappingProxyType(self._named_resources)
+
+ def register_resource(self, resource: AbstractResource) -> None:
+ assert isinstance(
+ resource, AbstractResource
+ ), f"Instance of AbstractResource class is required, got {resource!r}"
+ if self.frozen:
+ raise RuntimeError("Cannot register a resource into frozen router.")
+
+ name = resource.name
+
+ if name is not None:
+ parts = self.NAME_SPLIT_RE.split(name)
+ for part in parts:
+ if keyword.iskeyword(part):
+ raise ValueError(
+ f"Incorrect route name {name!r}, "
+ "python keywords cannot be used "
+ "for route name"
+ )
+ if not part.isidentifier():
+ raise ValueError(
+ "Incorrect route name {!r}, "
+ "the name should be a sequence of "
+ "python identifiers separated "
+ "by dash, dot or column".format(name)
+ )
+ if name in self._named_resources:
+ raise ValueError(
+ "Duplicate {!r}, "
+ "already handled by {!r}".format(name, self._named_resources[name])
+ )
+ self._named_resources[name] = resource
+ self._resources.append(resource)
+
+ def add_resource(self, path: str, *, name: Optional[str] = None) -> Resource:
+ if path and not path.startswith("/"):
+ raise ValueError("path should be started with / or be empty")
+ # Reuse last added resource if path and name are the same
+ if self._resources:
+ resource = self._resources[-1]
+ if resource.name == name and resource.raw_match(path):
+ return cast(Resource, resource)
+ if not ("{" in path or "}" in path or ROUTE_RE.search(path)):
+ resource = PlainResource(_requote_path(path), name=name)
+ self.register_resource(resource)
+ return resource
+ resource = DynamicResource(path, name=name)
+ self.register_resource(resource)
+ return resource
+
+ def add_route(
+ self,
+ method: str,
+ path: str,
+ handler: Union[_WebHandler, Type[AbstractView]],
+ *,
+ name: Optional[str] = None,
+ expect_handler: Optional[_ExpectHandler] = None,
+ ) -> AbstractRoute:
+ resource = self.add_resource(path, name=name)
+ return resource.add_route(method, handler, expect_handler=expect_handler)
+
+ def add_static(
+ self,
+ prefix: str,
+ path: PathLike,
+ *,
+ name: Optional[str] = None,
+ expect_handler: Optional[_ExpectHandler] = None,
+ chunk_size: int = 256 * 1024,
+ show_index: bool = False,
+ follow_symlinks: bool = False,
+ append_version: bool = False,
+ ) -> AbstractResource:
+ """Add static files view.
+
+ prefix - url prefix
+ path - folder with files
+
+ """
+ assert prefix.startswith("/")
+ if prefix.endswith("/"):
+ prefix = prefix[:-1]
+ resource = StaticResource(
+ prefix,
+ path,
+ name=name,
+ expect_handler=expect_handler,
+ chunk_size=chunk_size,
+ show_index=show_index,
+ follow_symlinks=follow_symlinks,
+ append_version=append_version,
+ )
+ self.register_resource(resource)
+ return resource
+
+ def add_head(self, path: str, handler: _WebHandler, **kwargs: Any) -> AbstractRoute:
+ """
+ Shortcut for add_route with method HEAD
+ """
+ return self.add_route(hdrs.METH_HEAD, path, handler, **kwargs)
+
+ def add_options(
+ self, path: str, handler: _WebHandler, **kwargs: Any
+ ) -> AbstractRoute:
+ """
+ Shortcut for add_route with method OPTIONS
+ """
+ return self.add_route(hdrs.METH_OPTIONS, path, handler, **kwargs)
+
+ def add_get(
+ self,
+ path: str,
+ handler: _WebHandler,
+ *,
+ name: Optional[str] = None,
+ allow_head: bool = True,
+ **kwargs: Any,
+ ) -> AbstractRoute:
+ """
+ Shortcut for add_route with method GET, if allow_head is true another
+ route is added allowing head requests to the same endpoint
+ """
+ resource = self.add_resource(path, name=name)
+ if allow_head:
+ resource.add_route(hdrs.METH_HEAD, handler, **kwargs)
+ return resource.add_route(hdrs.METH_GET, handler, **kwargs)
+
+ def add_post(self, path: str, handler: _WebHandler, **kwargs: Any) -> AbstractRoute:
+ """
+ Shortcut for add_route with method POST
+ """
+ return self.add_route(hdrs.METH_POST, path, handler, **kwargs)
+
+ def add_put(self, path: str, handler: _WebHandler, **kwargs: Any) -> AbstractRoute:
+ """
+ Shortcut for add_route with method PUT
+ """
+ return self.add_route(hdrs.METH_PUT, path, handler, **kwargs)
+
+ def add_patch(
+ self, path: str, handler: _WebHandler, **kwargs: Any
+ ) -> AbstractRoute:
+ """
+ Shortcut for add_route with method PATCH
+ """
+ return self.add_route(hdrs.METH_PATCH, path, handler, **kwargs)
+
+ def add_delete(
+ self, path: str, handler: _WebHandler, **kwargs: Any
+ ) -> AbstractRoute:
+ """
+ Shortcut for add_route with method DELETE
+ """
+ return self.add_route(hdrs.METH_DELETE, path, handler, **kwargs)
+
+ def add_view(
+ self, path: str, handler: Type[AbstractView], **kwargs: Any
+ ) -> AbstractRoute:
+ """
+ Shortcut for add_route with ANY methods for a class-based view
+ """
+ return self.add_route(hdrs.METH_ANY, path, handler, **kwargs)
+
+ def freeze(self) -> None:
+ super().freeze()
+ for resource in self._resources:
+ resource.freeze()
+
+ def add_routes(self, routes: Iterable[AbstractRouteDef]) -> List[AbstractRoute]:
+ """Append routes to route table.
+
+ Parameter should be a sequence of RouteDef objects.
+
+ Returns a list of registered AbstractRoute instances.
+ """
+ registered_routes = []
+ for route_def in routes:
+ registered_routes.extend(route_def.register(self))
+ return registered_routes
+
+
+def _quote_path(value: str) -> str:
+ if YARL_VERSION < (1, 6):
+ value = value.replace("%", "%25")
+ return URL.build(path=value, encoded=False).raw_path
+
+
+def _unquote_path(value: str) -> str:
+ return URL.build(path=value, encoded=True).path
+
+
+def _requote_path(value: str) -> str:
+ # Quote non-ascii characters and other characters which must be quoted,
+ # but preserve existing %-sequences.
+ result = _quote_path(value)
+ if "%" in value:
+ result = result.replace("%25", "%")
+ return result
diff --git a/third_party/python/aiohttp/aiohttp/web_ws.py b/third_party/python/aiohttp/aiohttp/web_ws.py
new file mode 100644
index 0000000000..da7ce6df1c
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/web_ws.py
@@ -0,0 +1,481 @@
+import asyncio
+import base64
+import binascii
+import hashlib
+import json
+from typing import Any, Iterable, Optional, Tuple
+
+import async_timeout
+import attr
+from multidict import CIMultiDict
+
+from . import hdrs
+from .abc import AbstractStreamWriter
+from .helpers import call_later, set_result
+from .http import (
+ WS_CLOSED_MESSAGE,
+ WS_CLOSING_MESSAGE,
+ WS_KEY,
+ WebSocketError,
+ WebSocketReader,
+ WebSocketWriter,
+ WSMessage,
+ WSMsgType as WSMsgType,
+ ws_ext_gen,
+ ws_ext_parse,
+)
+from .log import ws_logger
+from .streams import EofStream, FlowControlDataQueue
+from .typedefs import JSONDecoder, JSONEncoder
+from .web_exceptions import HTTPBadRequest, HTTPException
+from .web_request import BaseRequest
+from .web_response import StreamResponse
+
+__all__ = (
+ "WebSocketResponse",
+ "WebSocketReady",
+ "WSMsgType",
+)
+
+THRESHOLD_CONNLOST_ACCESS = 5
+
+
+@attr.s(auto_attribs=True, frozen=True, slots=True)
+class WebSocketReady:
+ ok: bool
+ protocol: Optional[str]
+
+ def __bool__(self) -> bool:
+ return self.ok
+
+
+class WebSocketResponse(StreamResponse):
+
+ _length_check = False
+
+ def __init__(
+ self,
+ *,
+ timeout: float = 10.0,
+ receive_timeout: Optional[float] = None,
+ autoclose: bool = True,
+ autoping: bool = True,
+ heartbeat: Optional[float] = None,
+ protocols: Iterable[str] = (),
+ compress: bool = True,
+ max_msg_size: int = 4 * 1024 * 1024,
+ ) -> None:
+ super().__init__(status=101)
+ self._protocols = protocols
+ self._ws_protocol = None # type: Optional[str]
+ self._writer = None # type: Optional[WebSocketWriter]
+ self._reader = None # type: Optional[FlowControlDataQueue[WSMessage]]
+ self._closed = False
+ self._closing = False
+ self._conn_lost = 0
+ self._close_code = None # type: Optional[int]
+ self._loop = None # type: Optional[asyncio.AbstractEventLoop]
+ self._waiting = None # type: Optional[asyncio.Future[bool]]
+ self._exception = None # type: Optional[BaseException]
+ self._timeout = timeout
+ self._receive_timeout = receive_timeout
+ self._autoclose = autoclose
+ self._autoping = autoping
+ self._heartbeat = heartbeat
+ self._heartbeat_cb = None
+ if heartbeat is not None:
+ self._pong_heartbeat = heartbeat / 2.0
+ self._pong_response_cb = None
+ self._compress = compress
+ self._max_msg_size = max_msg_size
+
+ def _cancel_heartbeat(self) -> None:
+ if self._pong_response_cb is not None:
+ self._pong_response_cb.cancel()
+ self._pong_response_cb = None
+
+ if self._heartbeat_cb is not None:
+ self._heartbeat_cb.cancel()
+ self._heartbeat_cb = None
+
+ def _reset_heartbeat(self) -> None:
+ self._cancel_heartbeat()
+
+ if self._heartbeat is not None:
+ self._heartbeat_cb = call_later(
+ self._send_heartbeat, self._heartbeat, self._loop
+ )
+
+ def _send_heartbeat(self) -> None:
+ if self._heartbeat is not None and not self._closed:
+ # fire-and-forget a task is not perfect but maybe ok for
+ # sending ping. Otherwise we need a long-living heartbeat
+ # task in the class.
+ self._loop.create_task(self._writer.ping()) # type: ignore
+
+ if self._pong_response_cb is not None:
+ self._pong_response_cb.cancel()
+ self._pong_response_cb = call_later(
+ self._pong_not_received, self._pong_heartbeat, self._loop
+ )
+
+ def _pong_not_received(self) -> None:
+ if self._req is not None and self._req.transport is not None:
+ self._closed = True
+ self._close_code = 1006
+ self._exception = asyncio.TimeoutError()
+ self._req.transport.close()
+
+ async def prepare(self, request: BaseRequest) -> AbstractStreamWriter:
+ # make pre-check to don't hide it by do_handshake() exceptions
+ if self._payload_writer is not None:
+ return self._payload_writer
+
+ protocol, writer = self._pre_start(request)
+ payload_writer = await super().prepare(request)
+ assert payload_writer is not None
+ self._post_start(request, protocol, writer)
+ await payload_writer.drain()
+ return payload_writer
+
+ def _handshake(
+ self, request: BaseRequest
+ ) -> Tuple["CIMultiDict[str]", str, bool, bool]:
+ headers = request.headers
+ if "websocket" != headers.get(hdrs.UPGRADE, "").lower().strip():
+ raise HTTPBadRequest(
+ text=(
+ "No WebSocket UPGRADE hdr: {}\n Can "
+ '"Upgrade" only to "WebSocket".'
+ ).format(headers.get(hdrs.UPGRADE))
+ )
+
+ if "upgrade" not in headers.get(hdrs.CONNECTION, "").lower():
+ raise HTTPBadRequest(
+ text="No CONNECTION upgrade hdr: {}".format(
+ headers.get(hdrs.CONNECTION)
+ )
+ )
+
+ # find common sub-protocol between client and server
+ protocol = None
+ if hdrs.SEC_WEBSOCKET_PROTOCOL in headers:
+ req_protocols = [
+ str(proto.strip())
+ for proto in headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(",")
+ ]
+
+ for proto in req_protocols:
+ if proto in self._protocols:
+ protocol = proto
+ break
+ else:
+ # No overlap found: Return no protocol as per spec
+ ws_logger.warning(
+ "Client protocols %r don’t overlap server-known ones %r",
+ req_protocols,
+ self._protocols,
+ )
+
+ # check supported version
+ version = headers.get(hdrs.SEC_WEBSOCKET_VERSION, "")
+ if version not in ("13", "8", "7"):
+ raise HTTPBadRequest(text=f"Unsupported version: {version}")
+
+ # check client handshake for validity
+ key = headers.get(hdrs.SEC_WEBSOCKET_KEY)
+ try:
+ if not key or len(base64.b64decode(key)) != 16:
+ raise HTTPBadRequest(text=f"Handshake error: {key!r}")
+ except binascii.Error:
+ raise HTTPBadRequest(text=f"Handshake error: {key!r}") from None
+
+ accept_val = base64.b64encode(
+ hashlib.sha1(key.encode() + WS_KEY).digest()
+ ).decode()
+ response_headers = CIMultiDict( # type: ignore
+ {
+ hdrs.UPGRADE: "websocket", # type: ignore
+ hdrs.CONNECTION: "upgrade",
+ hdrs.SEC_WEBSOCKET_ACCEPT: accept_val,
+ }
+ )
+
+ notakeover = False
+ compress = 0
+ if self._compress:
+ extensions = headers.get(hdrs.SEC_WEBSOCKET_EXTENSIONS)
+ # Server side always get return with no exception.
+ # If something happened, just drop compress extension
+ compress, notakeover = ws_ext_parse(extensions, isserver=True)
+ if compress:
+ enabledext = ws_ext_gen(
+ compress=compress, isserver=True, server_notakeover=notakeover
+ )
+ response_headers[hdrs.SEC_WEBSOCKET_EXTENSIONS] = enabledext
+
+ if protocol:
+ response_headers[hdrs.SEC_WEBSOCKET_PROTOCOL] = protocol
+ return (response_headers, protocol, compress, notakeover) # type: ignore
+
+ def _pre_start(self, request: BaseRequest) -> Tuple[str, WebSocketWriter]:
+ self._loop = request._loop
+
+ headers, protocol, compress, notakeover = self._handshake(request)
+
+ self.set_status(101)
+ self.headers.update(headers)
+ self.force_close()
+ self._compress = compress
+ transport = request._protocol.transport
+ assert transport is not None
+ writer = WebSocketWriter(
+ request._protocol, transport, compress=compress, notakeover=notakeover
+ )
+
+ return protocol, writer
+
+ def _post_start(
+ self, request: BaseRequest, protocol: str, writer: WebSocketWriter
+ ) -> None:
+ self._ws_protocol = protocol
+ self._writer = writer
+
+ self._reset_heartbeat()
+
+ loop = self._loop
+ assert loop is not None
+ self._reader = FlowControlDataQueue(request._protocol, 2 ** 16, loop=loop)
+ request.protocol.set_parser(
+ WebSocketReader(self._reader, self._max_msg_size, compress=self._compress)
+ )
+ # disable HTTP keepalive for WebSocket
+ request.protocol.keep_alive(False)
+
+ def can_prepare(self, request: BaseRequest) -> WebSocketReady:
+ if self._writer is not None:
+ raise RuntimeError("Already started")
+ try:
+ _, protocol, _, _ = self._handshake(request)
+ except HTTPException:
+ return WebSocketReady(False, None)
+ else:
+ return WebSocketReady(True, protocol)
+
+ @property
+ def closed(self) -> bool:
+ return self._closed
+
+ @property
+ def close_code(self) -> Optional[int]:
+ return self._close_code
+
+ @property
+ def ws_protocol(self) -> Optional[str]:
+ return self._ws_protocol
+
+ @property
+ def compress(self) -> bool:
+ return self._compress
+
+ def exception(self) -> Optional[BaseException]:
+ return self._exception
+
+ async def ping(self, message: bytes = b"") -> None:
+ if self._writer is None:
+ raise RuntimeError("Call .prepare() first")
+ await self._writer.ping(message)
+
+ async def pong(self, message: bytes = b"") -> None:
+ # unsolicited pong
+ if self._writer is None:
+ raise RuntimeError("Call .prepare() first")
+ await self._writer.pong(message)
+
+ async def send_str(self, data: str, compress: Optional[bool] = None) -> None:
+ if self._writer is None:
+ raise RuntimeError("Call .prepare() first")
+ if not isinstance(data, str):
+ raise TypeError("data argument must be str (%r)" % type(data))
+ await self._writer.send(data, binary=False, compress=compress)
+
+ async def send_bytes(self, data: bytes, compress: Optional[bool] = None) -> None:
+ if self._writer is None:
+ raise RuntimeError("Call .prepare() first")
+ if not isinstance(data, (bytes, bytearray, memoryview)):
+ raise TypeError("data argument must be byte-ish (%r)" % type(data))
+ await self._writer.send(data, binary=True, compress=compress)
+
+ async def send_json(
+ self,
+ data: Any,
+ compress: Optional[bool] = None,
+ *,
+ dumps: JSONEncoder = json.dumps,
+ ) -> None:
+ await self.send_str(dumps(data), compress=compress)
+
+ async def write_eof(self) -> None: # type: ignore
+ if self._eof_sent:
+ return
+ if self._payload_writer is None:
+ raise RuntimeError("Response has not been started")
+
+ await self.close()
+ self._eof_sent = True
+
+ async def close(self, *, code: int = 1000, message: bytes = b"") -> bool:
+ if self._writer is None:
+ raise RuntimeError("Call .prepare() first")
+
+ self._cancel_heartbeat()
+ reader = self._reader
+ assert reader is not None
+
+ # we need to break `receive()` cycle first,
+ # `close()` may be called from different task
+ if self._waiting is not None and not self._closed:
+ reader.feed_data(WS_CLOSING_MESSAGE, 0)
+ await self._waiting
+
+ if not self._closed:
+ self._closed = True
+ try:
+ await self._writer.close(code, message)
+ writer = self._payload_writer
+ assert writer is not None
+ await writer.drain()
+ except (asyncio.CancelledError, asyncio.TimeoutError):
+ self._close_code = 1006
+ raise
+ except Exception as exc:
+ self._close_code = 1006
+ self._exception = exc
+ return True
+
+ if self._closing:
+ return True
+
+ reader = self._reader
+ assert reader is not None
+ try:
+ with async_timeout.timeout(self._timeout, loop=self._loop):
+ msg = await reader.read()
+ except asyncio.CancelledError:
+ self._close_code = 1006
+ raise
+ except Exception as exc:
+ self._close_code = 1006
+ self._exception = exc
+ return True
+
+ if msg.type == WSMsgType.CLOSE:
+ self._close_code = msg.data
+ return True
+
+ self._close_code = 1006
+ self._exception = asyncio.TimeoutError()
+ return True
+ else:
+ return False
+
+ async def receive(self, timeout: Optional[float] = None) -> WSMessage:
+ if self._reader is None:
+ raise RuntimeError("Call .prepare() first")
+
+ loop = self._loop
+ assert loop is not None
+ while True:
+ if self._waiting is not None:
+ raise RuntimeError("Concurrent call to receive() is not allowed")
+
+ if self._closed:
+ self._conn_lost += 1
+ if self._conn_lost >= THRESHOLD_CONNLOST_ACCESS:
+ raise RuntimeError("WebSocket connection is closed.")
+ return WS_CLOSED_MESSAGE
+ elif self._closing:
+ return WS_CLOSING_MESSAGE
+
+ try:
+ self._waiting = loop.create_future()
+ try:
+ with async_timeout.timeout(
+ timeout or self._receive_timeout, loop=self._loop
+ ):
+ msg = await self._reader.read()
+ self._reset_heartbeat()
+ finally:
+ waiter = self._waiting
+ set_result(waiter, True)
+ self._waiting = None
+ except (asyncio.CancelledError, asyncio.TimeoutError):
+ self._close_code = 1006
+ raise
+ except EofStream:
+ self._close_code = 1000
+ await self.close()
+ return WSMessage(WSMsgType.CLOSED, None, None)
+ except WebSocketError as exc:
+ self._close_code = exc.code
+ await self.close(code=exc.code)
+ return WSMessage(WSMsgType.ERROR, exc, None)
+ except Exception as exc:
+ self._exception = exc
+ self._closing = True
+ self._close_code = 1006
+ await self.close()
+ return WSMessage(WSMsgType.ERROR, exc, None)
+
+ if msg.type == WSMsgType.CLOSE:
+ self._closing = True
+ self._close_code = msg.data
+ if not self._closed and self._autoclose:
+ await self.close()
+ elif msg.type == WSMsgType.CLOSING:
+ self._closing = True
+ elif msg.type == WSMsgType.PING and self._autoping:
+ await self.pong(msg.data)
+ continue
+ elif msg.type == WSMsgType.PONG and self._autoping:
+ continue
+
+ return msg
+
+ async def receive_str(self, *, timeout: Optional[float] = None) -> str:
+ msg = await self.receive(timeout)
+ if msg.type != WSMsgType.TEXT:
+ raise TypeError(
+ "Received message {}:{!r} is not WSMsgType.TEXT".format(
+ msg.type, msg.data
+ )
+ )
+ return msg.data
+
+ async def receive_bytes(self, *, timeout: Optional[float] = None) -> bytes:
+ msg = await self.receive(timeout)
+ if msg.type != WSMsgType.BINARY:
+ raise TypeError(f"Received message {msg.type}:{msg.data!r} is not bytes")
+ return msg.data
+
+ async def receive_json(
+ self, *, loads: JSONDecoder = json.loads, timeout: Optional[float] = None
+ ) -> Any:
+ data = await self.receive_str(timeout=timeout)
+ return loads(data)
+
+ async def write(self, data: bytes) -> None:
+ raise RuntimeError("Cannot call .write() for websocket")
+
+ def __aiter__(self) -> "WebSocketResponse":
+ return self
+
+ async def __anext__(self) -> WSMessage:
+ msg = await self.receive()
+ if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING, WSMsgType.CLOSED):
+ raise StopAsyncIteration
+ return msg
+
+ def _cancel(self, exc: BaseException) -> None:
+ if self._reader is not None:
+ self._reader.set_exception(exc)
diff --git a/third_party/python/aiohttp/aiohttp/worker.py b/third_party/python/aiohttp/aiohttp/worker.py
new file mode 100644
index 0000000000..67b244bbd3
--- /dev/null
+++ b/third_party/python/aiohttp/aiohttp/worker.py
@@ -0,0 +1,252 @@
+"""Async gunicorn worker for aiohttp.web"""
+
+import asyncio
+import os
+import re
+import signal
+import sys
+from types import FrameType
+from typing import Any, Awaitable, Callable, Optional, Union # noqa
+
+from gunicorn.config import AccessLogFormat as GunicornAccessLogFormat
+from gunicorn.workers import base
+
+from aiohttp import web
+
+from .helpers import set_result
+from .web_app import Application
+from .web_log import AccessLogger
+
+try:
+ import ssl
+
+ SSLContext = ssl.SSLContext
+except ImportError: # pragma: no cover
+ ssl = None # type: ignore
+ SSLContext = object # type: ignore
+
+
+__all__ = ("GunicornWebWorker", "GunicornUVLoopWebWorker", "GunicornTokioWebWorker")
+
+
+class GunicornWebWorker(base.Worker):
+
+ DEFAULT_AIOHTTP_LOG_FORMAT = AccessLogger.LOG_FORMAT
+ DEFAULT_GUNICORN_LOG_FORMAT = GunicornAccessLogFormat.default
+
+ def __init__(self, *args: Any, **kw: Any) -> None: # pragma: no cover
+ super().__init__(*args, **kw)
+
+ self._task = None # type: Optional[asyncio.Task[None]]
+ self.exit_code = 0
+ self._notify_waiter = None # type: Optional[asyncio.Future[bool]]
+
+ def init_process(self) -> None:
+ # create new event_loop after fork
+ asyncio.get_event_loop().close()
+
+ self.loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(self.loop)
+
+ super().init_process()
+
+ def run(self) -> None:
+ self._task = self.loop.create_task(self._run())
+
+ try: # ignore all finalization problems
+ self.loop.run_until_complete(self._task)
+ except Exception:
+ self.log.exception("Exception in gunicorn worker")
+ if sys.version_info >= (3, 6):
+ self.loop.run_until_complete(self.loop.shutdown_asyncgens())
+ self.loop.close()
+
+ sys.exit(self.exit_code)
+
+ async def _run(self) -> None:
+ if isinstance(self.wsgi, Application):
+ app = self.wsgi
+ elif asyncio.iscoroutinefunction(self.wsgi):
+ app = await self.wsgi()
+ else:
+ raise RuntimeError(
+ "wsgi app should be either Application or "
+ "async function returning Application, got {}".format(self.wsgi)
+ )
+ access_log = self.log.access_log if self.cfg.accesslog else None
+ runner = web.AppRunner(
+ app,
+ logger=self.log,
+ keepalive_timeout=self.cfg.keepalive,
+ access_log=access_log,
+ access_log_format=self._get_valid_log_format(self.cfg.access_log_format),
+ )
+ await runner.setup()
+
+ ctx = self._create_ssl_context(self.cfg) if self.cfg.is_ssl else None
+
+ runner = runner
+ assert runner is not None
+ server = runner.server
+ assert server is not None
+ for sock in self.sockets:
+ site = web.SockSite(
+ runner,
+ sock,
+ ssl_context=ctx,
+ shutdown_timeout=self.cfg.graceful_timeout / 100 * 95,
+ )
+ await site.start()
+
+ # If our parent changed then we shut down.
+ pid = os.getpid()
+ try:
+ while self.alive: # type: ignore
+ self.notify()
+
+ cnt = server.requests_count
+ if self.cfg.max_requests and cnt > self.cfg.max_requests:
+ self.alive = False
+ self.log.info("Max requests, shutting down: %s", self)
+
+ elif pid == os.getpid() and self.ppid != os.getppid():
+ self.alive = False
+ self.log.info("Parent changed, shutting down: %s", self)
+ else:
+ await self._wait_next_notify()
+ except BaseException:
+ pass
+
+ await runner.cleanup()
+
+ def _wait_next_notify(self) -> "asyncio.Future[bool]":
+ self._notify_waiter_done()
+
+ loop = self.loop
+ assert loop is not None
+ self._notify_waiter = waiter = loop.create_future()
+ self.loop.call_later(1.0, self._notify_waiter_done, waiter)
+
+ return waiter
+
+ def _notify_waiter_done(
+ self, waiter: Optional["asyncio.Future[bool]"] = None
+ ) -> None:
+ if waiter is None:
+ waiter = self._notify_waiter
+ if waiter is not None:
+ set_result(waiter, True)
+
+ if waiter is self._notify_waiter:
+ self._notify_waiter = None
+
+ def init_signals(self) -> None:
+ # Set up signals through the event loop API.
+
+ self.loop.add_signal_handler(
+ signal.SIGQUIT, self.handle_quit, signal.SIGQUIT, None
+ )
+
+ self.loop.add_signal_handler(
+ signal.SIGTERM, self.handle_exit, signal.SIGTERM, None
+ )
+
+ self.loop.add_signal_handler(
+ signal.SIGINT, self.handle_quit, signal.SIGINT, None
+ )
+
+ self.loop.add_signal_handler(
+ signal.SIGWINCH, self.handle_winch, signal.SIGWINCH, None
+ )
+
+ self.loop.add_signal_handler(
+ signal.SIGUSR1, self.handle_usr1, signal.SIGUSR1, None
+ )
+
+ self.loop.add_signal_handler(
+ signal.SIGABRT, self.handle_abort, signal.SIGABRT, None
+ )
+
+ # Don't let SIGTERM and SIGUSR1 disturb active requests
+ # by interrupting system calls
+ signal.siginterrupt(signal.SIGTERM, False)
+ signal.siginterrupt(signal.SIGUSR1, False)
+
+ def handle_quit(self, sig: int, frame: FrameType) -> None:
+ self.alive = False
+
+ # worker_int callback
+ self.cfg.worker_int(self)
+
+ # wakeup closing process
+ self._notify_waiter_done()
+
+ def handle_abort(self, sig: int, frame: FrameType) -> None:
+ self.alive = False
+ self.exit_code = 1
+ self.cfg.worker_abort(self)
+ sys.exit(1)
+
+ @staticmethod
+ def _create_ssl_context(cfg: Any) -> "SSLContext":
+ """Creates SSLContext instance for usage in asyncio.create_server.
+
+ See ssl.SSLSocket.__init__ for more details.
+ """
+ if ssl is None: # pragma: no cover
+ raise RuntimeError("SSL is not supported.")
+
+ ctx = ssl.SSLContext(cfg.ssl_version)
+ ctx.load_cert_chain(cfg.certfile, cfg.keyfile)
+ ctx.verify_mode = cfg.cert_reqs
+ if cfg.ca_certs:
+ ctx.load_verify_locations(cfg.ca_certs)
+ if cfg.ciphers:
+ ctx.set_ciphers(cfg.ciphers)
+ return ctx
+
+ def _get_valid_log_format(self, source_format: str) -> str:
+ if source_format == self.DEFAULT_GUNICORN_LOG_FORMAT:
+ return self.DEFAULT_AIOHTTP_LOG_FORMAT
+ elif re.search(r"%\([^\)]+\)", source_format):
+ raise ValueError(
+ "Gunicorn's style options in form of `%(name)s` are not "
+ "supported for the log formatting. Please use aiohttp's "
+ "format specification to configure access log formatting: "
+ "http://docs.aiohttp.org/en/stable/logging.html"
+ "#format-specification"
+ )
+ else:
+ return source_format
+
+
+class GunicornUVLoopWebWorker(GunicornWebWorker):
+ def init_process(self) -> None:
+ import uvloop
+
+ # Close any existing event loop before setting a
+ # new policy.
+ asyncio.get_event_loop().close()
+
+ # Setup uvloop policy, so that every
+ # asyncio.get_event_loop() will create an instance
+ # of uvloop event loop.
+ asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
+
+ super().init_process()
+
+
+class GunicornTokioWebWorker(GunicornWebWorker):
+ def init_process(self) -> None: # pragma: no cover
+ import tokio
+
+ # Close any existing event loop before setting a
+ # new policy.
+ asyncio.get_event_loop().close()
+
+ # Setup tokio policy, so that every
+ # asyncio.get_event_loop() will create an instance
+ # of tokio event loop.
+ asyncio.set_event_loop_policy(tokio.EventLoopPolicy())
+
+ super().init_process()
diff --git a/third_party/python/aiohttp/examples/background_tasks.py b/third_party/python/aiohttp/examples/background_tasks.py
new file mode 100755
index 0000000000..2a1ec12afa
--- /dev/null
+++ b/third_party/python/aiohttp/examples/background_tasks.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python3
+"""Example of aiohttp.web.Application.on_startup signal handler"""
+import asyncio
+
+import aioredis
+
+from aiohttp import web
+
+
+async def websocket_handler(request):
+ ws = web.WebSocketResponse()
+ await ws.prepare(request)
+ request.app["websockets"].append(ws)
+ try:
+ async for msg in ws:
+ print(msg)
+ await asyncio.sleep(1)
+ finally:
+ request.app["websockets"].remove(ws)
+ return ws
+
+
+async def on_shutdown(app):
+ for ws in app["websockets"]:
+ await ws.close(code=999, message="Server shutdown")
+
+
+async def listen_to_redis(app):
+ try:
+ sub = await aioredis.create_redis(("localhost", 6379), loop=app.loop)
+ ch, *_ = await sub.subscribe("news")
+ async for msg in ch.iter(encoding="utf-8"):
+ # Forward message to all connected websockets:
+ for ws in app["websockets"]:
+ await ws.send_str(f"{ch.name}: {msg}")
+ print(f"message in {ch.name}: {msg}")
+ except asyncio.CancelledError:
+ pass
+ finally:
+ print("Cancel Redis listener: close connection...")
+ await sub.unsubscribe(ch.name)
+ await sub.quit()
+ print("Redis connection closed.")
+
+
+async def start_background_tasks(app):
+ app["redis_listener"] = app.loop.create_task(listen_to_redis(app))
+
+
+async def cleanup_background_tasks(app):
+ print("cleanup background tasks...")
+ app["redis_listener"].cancel()
+ await app["redis_listener"]
+
+
+def init():
+ app = web.Application()
+ app["websockets"] = []
+ app.router.add_get("/news", websocket_handler)
+ app.on_startup.append(start_background_tasks)
+ app.on_cleanup.append(cleanup_background_tasks)
+ app.on_shutdown.append(on_shutdown)
+ return app
+
+
+web.run_app(init())
diff --git a/third_party/python/aiohttp/examples/cli_app.py b/third_party/python/aiohttp/examples/cli_app.py
new file mode 100755
index 0000000000..9fbd3b7604
--- /dev/null
+++ b/third_party/python/aiohttp/examples/cli_app.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python3
+"""
+Example of serving an Application using the `aiohttp.web` CLI.
+
+Serve this app using::
+
+ $ python -m aiohttp.web -H localhost -P 8080 --repeat 10 cli_app:init \
+ > "Hello World"
+
+Here ``--repeat`` & ``"Hello World"`` are application specific command-line
+arguments. `aiohttp.web` only parses & consumes the command-line arguments it
+needs (i.e. ``-H``, ``-P`` & ``entry-func``) and passes on any additional
+arguments to the `cli_app:init` function for processing.
+"""
+
+from argparse import ArgumentParser
+
+from aiohttp import web
+
+
+def display_message(req):
+ args = req.app["args"]
+ text = "\n".join([args.message] * args.repeat)
+ return web.Response(text=text)
+
+
+def init(argv):
+ arg_parser = ArgumentParser(
+ prog="aiohttp.web ...", description="Application CLI", add_help=False
+ )
+
+ # Positional argument
+ arg_parser.add_argument("message", help="message to print")
+
+ # Optional argument
+ arg_parser.add_argument(
+ "--repeat", help="number of times to repeat message", type=int, default="1"
+ )
+
+ # Avoid conflict with -h from `aiohttp.web` CLI parser
+ arg_parser.add_argument(
+ "--app-help", help="show this message and exit", action="help"
+ )
+
+ args = arg_parser.parse_args(argv)
+
+ app = web.Application()
+ app["args"] = args
+ app.router.add_get("/", display_message)
+
+ return app
diff --git a/third_party/python/aiohttp/examples/client_auth.py b/third_party/python/aiohttp/examples/client_auth.py
new file mode 100755
index 0000000000..6513de20e5
--- /dev/null
+++ b/third_party/python/aiohttp/examples/client_auth.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python3
+import asyncio
+
+import aiohttp
+
+
+async def fetch(session):
+ print("Query http://httpbin.org/basic-auth/andrew/password")
+ async with session.get("http://httpbin.org/basic-auth/andrew/password") as resp:
+ print(resp.status)
+ body = await resp.text()
+ print(body)
+
+
+async def go(loop):
+ async with aiohttp.ClientSession(
+ auth=aiohttp.BasicAuth("andrew", "password"), loop=loop
+ ) as session:
+ await fetch(session)
+
+
+loop = asyncio.get_event_loop()
+loop.run_until_complete(go(loop))
diff --git a/third_party/python/aiohttp/examples/client_json.py b/third_party/python/aiohttp/examples/client_json.py
new file mode 100755
index 0000000000..e54edeaddb
--- /dev/null
+++ b/third_party/python/aiohttp/examples/client_json.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python3
+import asyncio
+
+import aiohttp
+
+
+async def fetch(session):
+ print("Query http://httpbin.org/get")
+ async with session.get("http://httpbin.org/get") as resp:
+ print(resp.status)
+ data = await resp.json()
+ print(data)
+
+
+async def go(loop):
+ async with aiohttp.ClientSession(loop=loop) as session:
+ await fetch(session)
+
+
+loop = asyncio.get_event_loop()
+loop.run_until_complete(go(loop))
+loop.close()
diff --git a/third_party/python/aiohttp/examples/client_ws.py b/third_party/python/aiohttp/examples/client_ws.py
new file mode 100755
index 0000000000..ec48eccc9a
--- /dev/null
+++ b/third_party/python/aiohttp/examples/client_ws.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python3
+"""websocket cmd client for wssrv.py example."""
+import argparse
+import asyncio
+import signal
+import sys
+
+import aiohttp
+
+
+async def start_client(loop, url):
+ name = input("Please enter your name: ")
+
+ # input reader
+ def stdin_callback():
+ line = sys.stdin.buffer.readline().decode("utf-8")
+ if not line:
+ loop.stop()
+ else:
+ ws.send_str(name + ": " + line)
+
+ loop.add_reader(sys.stdin.fileno(), stdin_callback)
+
+ async def dispatch():
+ while True:
+ msg = await ws.receive()
+
+ if msg.type == aiohttp.WSMsgType.TEXT:
+ print("Text: ", msg.data.strip())
+ elif msg.type == aiohttp.WSMsgType.BINARY:
+ print("Binary: ", msg.data)
+ elif msg.type == aiohttp.WSMsgType.PING:
+ ws.pong()
+ elif msg.type == aiohttp.WSMsgType.PONG:
+ print("Pong received")
+ else:
+ if msg.type == aiohttp.WSMsgType.CLOSE:
+ await ws.close()
+ elif msg.type == aiohttp.WSMsgType.ERROR:
+ print("Error during receive %s" % ws.exception())
+ elif msg.type == aiohttp.WSMsgType.CLOSED:
+ pass
+
+ break
+
+ # send request
+ async with aiohttp.ws_connect(url, autoclose=False, autoping=False) as ws:
+ await dispatch()
+
+
+ARGS = argparse.ArgumentParser(
+ description="websocket console client for wssrv.py example."
+)
+ARGS.add_argument(
+ "--host", action="store", dest="host", default="127.0.0.1", help="Host name"
+)
+ARGS.add_argument(
+ "--port", action="store", dest="port", default=8080, type=int, help="Port number"
+)
+
+if __name__ == "__main__":
+ args = ARGS.parse_args()
+ if ":" in args.host:
+ args.host, port = args.host.split(":", 1)
+ args.port = int(port)
+
+ url = f"http://{args.host}:{args.port}"
+
+ loop = asyncio.get_event_loop()
+
+ loop.add_signal_handler(signal.SIGINT, loop.stop)
+ loop.create_task(start_client(loop, url))
+ loop.run_forever()
diff --git a/third_party/python/aiohttp/examples/curl.py b/third_party/python/aiohttp/examples/curl.py
new file mode 100755
index 0000000000..a39639af34
--- /dev/null
+++ b/third_party/python/aiohttp/examples/curl.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+
+import argparse
+import asyncio
+
+import aiohttp
+
+
+async def curl(url):
+ async with aiohttp.ClientSession() as session:
+ async with session.request("GET", url) as response:
+ print(repr(response))
+ chunk = await response.content.read()
+ print("Downloaded: %s" % len(chunk))
+
+
+if __name__ == "__main__":
+ ARGS = argparse.ArgumentParser(description="GET url example")
+ ARGS.add_argument("url", nargs=1, metavar="URL", help="URL to download")
+ ARGS.add_argument(
+ "--iocp",
+ default=False,
+ action="store_true",
+ help="Use ProactorEventLoop on Windows",
+ )
+ options = ARGS.parse_args()
+
+ if options.iocp:
+ from asyncio import events, windows_events
+
+ el = windows_events.ProactorEventLoop()
+ events.set_event_loop(el)
+
+ loop = asyncio.get_event_loop()
+ loop.run_until_complete(curl(options.url[0]))
diff --git a/third_party/python/aiohttp/examples/fake_server.py b/third_party/python/aiohttp/examples/fake_server.py
new file mode 100755
index 0000000000..007d96ba02
--- /dev/null
+++ b/third_party/python/aiohttp/examples/fake_server.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python3
+import asyncio
+import pathlib
+import socket
+import ssl
+
+import aiohttp
+from aiohttp import web
+from aiohttp.resolver import DefaultResolver
+from aiohttp.test_utils import unused_port
+
+
+class FakeResolver:
+ _LOCAL_HOST = {0: "127.0.0.1", socket.AF_INET: "127.0.0.1", socket.AF_INET6: "::1"}
+
+ def __init__(self, fakes, *, loop):
+ """fakes -- dns -> port dict"""
+ self._fakes = fakes
+ self._resolver = DefaultResolver(loop=loop)
+
+ async def resolve(self, host, port=0, family=socket.AF_INET):
+ fake_port = self._fakes.get(host)
+ if fake_port is not None:
+ return [
+ {
+ "hostname": host,
+ "host": self._LOCAL_HOST[family],
+ "port": fake_port,
+ "family": family,
+ "proto": 0,
+ "flags": socket.AI_NUMERICHOST,
+ }
+ ]
+ else:
+ return await self._resolver.resolve(host, port, family)
+
+
+class FakeFacebook:
+ def __init__(self, *, loop):
+ self.loop = loop
+ self.app = web.Application(loop=loop)
+ self.app.router.add_routes(
+ [
+ web.get("/v2.7/me", self.on_me),
+ web.get("/v2.7/me/friends", self.on_my_friends),
+ ]
+ )
+ self.runner = None
+ here = pathlib.Path(__file__)
+ ssl_cert = here.parent / "server.crt"
+ ssl_key = here.parent / "server.key"
+ self.ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
+ self.ssl_context.load_cert_chain(str(ssl_cert), str(ssl_key))
+
+ async def start(self):
+ port = unused_port()
+ self.runner = web.AppRunner(self.app)
+ await self.runner.setup()
+ site = web.TCPSite(self.runner, "127.0.0.1", port, ssl_context=self.ssl_context)
+ await site.start()
+ return {"graph.facebook.com": port}
+
+ async def stop(self):
+ await self.runner.cleanup()
+
+ async def on_me(self, request):
+ return web.json_response({"name": "John Doe", "id": "12345678901234567"})
+
+ async def on_my_friends(self, request):
+ return web.json_response(
+ {
+ "data": [
+ {"name": "Bill Doe", "id": "233242342342"},
+ {"name": "Mary Doe", "id": "2342342343222"},
+ {"name": "Alex Smith", "id": "234234234344"},
+ ],
+ "paging": {
+ "cursors": {
+ "before": "QVFIUjRtc2c5NEl0ajN",
+ "after": "QVFIUlpFQWM0TmVuaDRad0dt",
+ },
+ "next": (
+ "https://graph.facebook.com/v2.7/12345678901234567/"
+ "friends?access_token=EAACEdEose0cB"
+ ),
+ },
+ "summary": {"total_count": 3},
+ }
+ )
+
+
+async def main(loop):
+ token = "ER34gsSGGS34XCBKd7u"
+
+ fake_facebook = FakeFacebook(loop=loop)
+ info = await fake_facebook.start()
+ resolver = FakeResolver(info, loop=loop)
+ connector = aiohttp.TCPConnector(loop=loop, resolver=resolver, verify_ssl=False)
+
+ async with aiohttp.ClientSession(connector=connector, loop=loop) as session:
+ async with session.get(
+ "https://graph.facebook.com/v2.7/me", params={"access_token": token}
+ ) as resp:
+ print(await resp.json())
+
+ async with session.get(
+ "https://graph.facebook.com/v2.7/me/friends", params={"access_token": token}
+ ) as resp:
+ print(await resp.json())
+
+ await fake_facebook.stop()
+
+
+loop = asyncio.get_event_loop()
+loop.run_until_complete(main(loop))
diff --git a/third_party/python/aiohttp/examples/legacy/crawl.py b/third_party/python/aiohttp/examples/legacy/crawl.py
new file mode 100755
index 0000000000..c8029b4854
--- /dev/null
+++ b/third_party/python/aiohttp/examples/legacy/crawl.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python3
+
+import asyncio
+import logging
+import re
+import signal
+import sys
+import urllib.parse
+
+import aiohttp
+
+
+class Crawler:
+ def __init__(self, rooturl, loop, maxtasks=100):
+ self.rooturl = rooturl
+ self.loop = loop
+ self.todo = set()
+ self.busy = set()
+ self.done = {}
+ self.tasks = set()
+ self.sem = asyncio.Semaphore(maxtasks, loop=loop)
+
+ # connector stores cookies between requests and uses connection pool
+ self.session = aiohttp.ClientSession(loop=loop)
+
+ async def run(self):
+ t = asyncio.ensure_future(self.addurls([(self.rooturl, "")]), loop=self.loop)
+ await asyncio.sleep(1, loop=self.loop)
+ while self.busy:
+ await asyncio.sleep(1, loop=self.loop)
+
+ await t
+ await self.session.close()
+ self.loop.stop()
+
+ async def addurls(self, urls):
+ for url, parenturl in urls:
+ url = urllib.parse.urljoin(parenturl, url)
+ url, frag = urllib.parse.urldefrag(url)
+ if (
+ url.startswith(self.rooturl)
+ and url not in self.busy
+ and url not in self.done
+ and url not in self.todo
+ ):
+ self.todo.add(url)
+ await self.sem.acquire()
+ task = asyncio.ensure_future(self.process(url), loop=self.loop)
+ task.add_done_callback(lambda t: self.sem.release())
+ task.add_done_callback(self.tasks.remove)
+ self.tasks.add(task)
+
+ async def process(self, url):
+ print("processing:", url)
+
+ self.todo.remove(url)
+ self.busy.add(url)
+ try:
+ resp = await self.session.get(url)
+ except Exception as exc:
+ print("...", url, "has error", repr(str(exc)))
+ self.done[url] = False
+ else:
+ if resp.status == 200 and ("text/html" in resp.headers.get("content-type")):
+ data = (await resp.read()).decode("utf-8", "replace")
+ urls = re.findall(r'(?i)href=["\']?([^\s"\'<>]+)', data)
+ asyncio.Task(self.addurls([(u, url) for u in urls]))
+
+ resp.close()
+ self.done[url] = True
+
+ self.busy.remove(url)
+ print(
+ len(self.done),
+ "completed tasks,",
+ len(self.tasks),
+ "still pending, todo",
+ len(self.todo),
+ )
+
+
+def main():
+ loop = asyncio.get_event_loop()
+
+ c = Crawler(sys.argv[1], loop)
+ asyncio.ensure_future(c.run(), loop=loop)
+
+ try:
+ loop.add_signal_handler(signal.SIGINT, loop.stop)
+ except RuntimeError:
+ pass
+ loop.run_forever()
+ print("todo:", len(c.todo))
+ print("busy:", len(c.busy))
+ print("done:", len(c.done), "; ok:", sum(c.done.values()))
+ print("tasks:", len(c.tasks))
+
+
+if __name__ == "__main__":
+ if "--iocp" in sys.argv:
+ from asyncio import events, windows_events
+
+ sys.argv.remove("--iocp")
+ logging.info("using iocp")
+ el = windows_events.ProactorEventLoop()
+ events.set_event_loop(el)
+
+ main()
diff --git a/third_party/python/aiohttp/examples/legacy/srv.py b/third_party/python/aiohttp/examples/legacy/srv.py
new file mode 100755
index 0000000000..628b6f332f
--- /dev/null
+++ b/third_party/python/aiohttp/examples/legacy/srv.py
@@ -0,0 +1,178 @@
+#!/usr/bin/env python3
+"""Simple server written using an event loop."""
+
+import argparse
+import asyncio
+import logging
+import os
+import sys
+
+import aiohttp
+import aiohttp.server
+
+try:
+ import ssl
+except ImportError: # pragma: no cover
+ ssl = None
+
+
+class HttpRequestHandler(aiohttp.server.ServerHttpProtocol):
+ async def handle_request(self, message, payload):
+ print(
+ "method = {!r}; path = {!r}; version = {!r}".format(
+ message.method, message.path, message.version
+ )
+ )
+
+ path = message.path
+
+ if not (path.isprintable() and path.startswith("/")) or "/." in path:
+ print("bad path", repr(path))
+ path = None
+ else:
+ path = "." + path
+ if not os.path.exists(path):
+ print("no file", repr(path))
+ path = None
+ else:
+ isdir = os.path.isdir(path)
+
+ if not path:
+ raise aiohttp.HttpProcessingError(code=404)
+
+ for hdr, val in message.headers.items():
+ print(hdr, val)
+
+ if isdir and not path.endswith("/"):
+ path = path + "/"
+ raise aiohttp.HttpProcessingError(
+ code=302, headers=(("URI", path), ("Location", path))
+ )
+
+ response = aiohttp.Response(self.writer, 200, http_version=message.version)
+ response.add_header("Transfer-Encoding", "chunked")
+
+ # content encoding
+ accept_encoding = message.headers.get("accept-encoding", "").lower()
+ if "deflate" in accept_encoding:
+ response.add_header("Content-Encoding", "deflate")
+ response.add_compression_filter("deflate")
+ elif "gzip" in accept_encoding:
+ response.add_header("Content-Encoding", "gzip")
+ response.add_compression_filter("gzip")
+
+ response.add_chunking_filter(1025)
+
+ if isdir:
+ response.add_header("Content-type", "text/html")
+ response.send_headers()
+
+ response.write(b"<ul>\r\n")
+ for name in sorted(os.listdir(path)):
+ if name.isprintable() and not name.startswith("."):
+ try:
+ bname = name.encode("ascii")
+ except UnicodeError:
+ pass
+ else:
+ if os.path.isdir(os.path.join(path, name)):
+ response.write(
+ b'<li><a href="'
+ + bname
+ + b'/">'
+ + bname
+ + b"/</a></li>\r\n"
+ )
+ else:
+ response.write(
+ b'<li><a href="'
+ + bname
+ + b'">'
+ + bname
+ + b"</a></li>\r\n"
+ )
+ response.write(b"</ul>")
+ else:
+ response.add_header("Content-type", "text/plain")
+ response.send_headers()
+
+ try:
+ with open(path, "rb") as fp:
+ chunk = fp.read(8192)
+ while chunk:
+ response.write(chunk)
+ chunk = fp.read(8192)
+ except OSError:
+ response.write(b"Cannot open")
+
+ await response.write_eof()
+ if response.keep_alive():
+ self.keep_alive(True)
+
+
+ARGS = argparse.ArgumentParser(description="Run simple HTTP server.")
+ARGS.add_argument(
+ "--host", action="store", dest="host", default="127.0.0.1", help="Host name"
+)
+ARGS.add_argument(
+ "--port", action="store", dest="port", default=8080, type=int, help="Port number"
+)
+# make iocp and ssl mutually exclusive because ProactorEventLoop is
+# incompatible with SSL
+group = ARGS.add_mutually_exclusive_group()
+group.add_argument(
+ "--iocp", action="store_true", dest="iocp", help="Windows IOCP event loop"
+)
+group.add_argument("--ssl", action="store_true", dest="ssl", help="Run ssl mode.")
+ARGS.add_argument("--sslcert", action="store", dest="certfile", help="SSL cert file.")
+ARGS.add_argument("--sslkey", action="store", dest="keyfile", help="SSL key file.")
+
+
+def main():
+ args = ARGS.parse_args()
+
+ if ":" in args.host:
+ args.host, port = args.host.split(":", 1)
+ args.port = int(port)
+
+ if args.iocp:
+ from asyncio import windows_events
+
+ sys.argv.remove("--iocp")
+ logging.info("using iocp")
+ el = windows_events.ProactorEventLoop()
+ asyncio.set_event_loop(el)
+
+ if args.ssl:
+ here = os.path.join(os.path.dirname(__file__), "tests")
+
+ if args.certfile:
+ certfile = args.certfile or os.path.join(here, "sample.crt")
+ keyfile = args.keyfile or os.path.join(here, "sample.key")
+ else:
+ certfile = os.path.join(here, "sample.crt")
+ keyfile = os.path.join(here, "sample.key")
+
+ sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ sslcontext.load_cert_chain(certfile, keyfile)
+ else:
+ sslcontext = None
+
+ loop = asyncio.get_event_loop()
+ f = loop.create_server(
+ lambda: HttpRequestHandler(debug=True, keep_alive=75),
+ args.host,
+ args.port,
+ ssl=sslcontext,
+ )
+ svr = loop.run_until_complete(f)
+ socks = svr.sockets
+ print("serving on", socks[0].getsockname())
+ try:
+ loop.run_forever()
+ except KeyboardInterrupt:
+ pass
+
+
+if __name__ == "__main__":
+ main()
diff --git a/third_party/python/aiohttp/examples/legacy/tcp_protocol_parser.py b/third_party/python/aiohttp/examples/legacy/tcp_protocol_parser.py
new file mode 100755
index 0000000000..ca49db7d8f
--- /dev/null
+++ b/third_party/python/aiohttp/examples/legacy/tcp_protocol_parser.py
@@ -0,0 +1,172 @@
+#!/usr/bin/env python3
+"""Protocol parser example."""
+import argparse
+import asyncio
+import collections
+
+import aiohttp
+
+try:
+ import signal
+except ImportError:
+ signal = None
+
+
+MSG_TEXT = b"text:"
+MSG_PING = b"ping:"
+MSG_PONG = b"pong:"
+MSG_STOP = b"stop:"
+
+Message = collections.namedtuple("Message", ("tp", "data"))
+
+
+def my_protocol_parser(out, buf):
+ """Parser is used with StreamParser for incremental protocol parsing.
+ Parser is a generator function, but it is not a coroutine. Usually
+ parsers are implemented as a state machine.
+
+ more details in asyncio/parsers.py
+ existing parsers:
+ * HTTP protocol parsers asyncio/http/protocol.py
+ * websocket parser asyncio/http/websocket.py
+ """
+ while True:
+ tp = yield from buf.read(5)
+ if tp in (MSG_PING, MSG_PONG):
+ # skip line
+ yield from buf.skipuntil(b"\r\n")
+ out.feed_data(Message(tp, None))
+ elif tp == MSG_STOP:
+ out.feed_data(Message(tp, None))
+ elif tp == MSG_TEXT:
+ # read text
+ text = yield from buf.readuntil(b"\r\n")
+ out.feed_data(Message(tp, text.strip().decode("utf-8")))
+ else:
+ raise ValueError("Unknown protocol prefix.")
+
+
+class MyProtocolWriter:
+ def __init__(self, transport):
+ self.transport = transport
+
+ def ping(self):
+ self.transport.write(b"ping:\r\n")
+
+ def pong(self):
+ self.transport.write(b"pong:\r\n")
+
+ def stop(self):
+ self.transport.write(b"stop:\r\n")
+
+ def send_text(self, text):
+ self.transport.write(f"text:{text.strip()}\r\n".encode("utf-8"))
+
+
+class EchoServer(asyncio.Protocol):
+ def connection_made(self, transport):
+ print("Connection made")
+ self.transport = transport
+ self.stream = aiohttp.StreamParser()
+ asyncio.Task(self.dispatch())
+
+ def data_received(self, data):
+ self.stream.feed_data(data)
+
+ def eof_received(self):
+ self.stream.feed_eof()
+
+ def connection_lost(self, exc):
+ print("Connection lost")
+
+ async def dispatch(self):
+ reader = self.stream.set_parser(my_protocol_parser)
+ writer = MyProtocolWriter(self.transport)
+
+ while True:
+ try:
+ msg = await reader.read()
+ except aiohttp.ConnectionError:
+ # client has been disconnected
+ break
+
+ print(f"Message received: {msg}")
+
+ if msg.type == MSG_PING:
+ writer.pong()
+ elif msg.type == MSG_TEXT:
+ writer.send_text("Re: " + msg.data)
+ elif msg.type == MSG_STOP:
+ self.transport.close()
+ break
+
+
+async def start_client(loop, host, port):
+ transport, stream = await loop.create_connection(aiohttp.StreamProtocol, host, port)
+ reader = stream.reader.set_parser(my_protocol_parser)
+ writer = MyProtocolWriter(transport)
+ writer.ping()
+
+ message = "This is the message. It will be echoed."
+
+ while True:
+ try:
+ msg = await reader.read()
+ except aiohttp.ConnectionError:
+ print("Server has been disconnected.")
+ break
+
+ print(f"Message received: {msg}")
+ if msg.type == MSG_PONG:
+ writer.send_text(message)
+ print("data sent:", message)
+ elif msg.type == MSG_TEXT:
+ writer.stop()
+ print("stop sent")
+ break
+
+ transport.close()
+
+
+def start_server(loop, host, port):
+ f = loop.create_server(EchoServer, host, port)
+ srv = loop.run_until_complete(f)
+ x = srv.sockets[0]
+ print("serving on", x.getsockname())
+ loop.run_forever()
+
+
+ARGS = argparse.ArgumentParser(description="Protocol parser example.")
+ARGS.add_argument(
+ "--server", action="store_true", dest="server", default=False, help="Run tcp server"
+)
+ARGS.add_argument(
+ "--client", action="store_true", dest="client", default=False, help="Run tcp client"
+)
+ARGS.add_argument(
+ "--host", action="store", dest="host", default="127.0.0.1", help="Host name"
+)
+ARGS.add_argument(
+ "--port", action="store", dest="port", default=9999, type=int, help="Port number"
+)
+
+
+if __name__ == "__main__":
+ args = ARGS.parse_args()
+
+ if ":" in args.host:
+ args.host, port = args.host.split(":", 1)
+ args.port = int(port)
+
+ if (not (args.server or args.client)) or (args.server and args.client):
+ print("Please specify --server or --client\n")
+ ARGS.print_help()
+ else:
+ loop = asyncio.get_event_loop()
+ if signal is not None:
+ loop.add_signal_handler(signal.SIGINT, loop.stop)
+
+ if args.server:
+ start_server(loop, args.host, args.port)
+ else:
+ loop.run_until_complete(start_client(loop, args.host, args.port))
diff --git a/third_party/python/aiohttp/examples/lowlevel_srv.py b/third_party/python/aiohttp/examples/lowlevel_srv.py
new file mode 100644
index 0000000000..5a003f40f4
--- /dev/null
+++ b/third_party/python/aiohttp/examples/lowlevel_srv.py
@@ -0,0 +1,26 @@
+import asyncio
+
+from aiohttp import web
+
+
+async def handler(request):
+ return web.Response(text="OK")
+
+
+async def main(loop):
+ server = web.Server(handler)
+ await loop.create_server(server, "127.0.0.1", 8080)
+ print("======= Serving on http://127.0.0.1:8080/ ======")
+
+ # pause here for very long time by serving HTTP requests and
+ # waiting for keyboard interruption
+ await asyncio.sleep(100 * 3600)
+
+
+loop = asyncio.get_event_loop()
+
+try:
+ loop.run_until_complete(main(loop))
+except KeyboardInterrupt:
+ pass
+loop.close()
diff --git a/third_party/python/aiohttp/examples/server.crt b/third_party/python/aiohttp/examples/server.crt
new file mode 100644
index 0000000000..708971a376
--- /dev/null
+++ b/third_party/python/aiohttp/examples/server.crt
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDADCCAegCCQCgevpPMuTTLzANBgkqhkiG9w0BAQsFADBCMQswCQYDVQQGEwJV
+QTEQMA4GA1UECAwHVWtyYWluZTEhMB8GA1UECgwYSW50ZXJuZXQgV2lkZ2l0cyBQ
+dHkgTHRkMB4XDTE2MDgwNzIzMTMwOFoXDTI2MDgwNTIzMTMwOFowQjELMAkGA1UE
+BhMCVUExEDAOBgNVBAgMB1VrcmFpbmUxITAfBgNVBAoMGEludGVybmV0IFdpZGdp
+dHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOUgkn3j
+X/sdg6GGueGDHCM+snIUVY3fM6D4jXjyBhnT3TqKG1lJwCGYR11AD+2SJYppU+w4
+QaF6YZwMeZBKy+mVQ9+CrVYyKQE7j9H8XgNEHV9BQzoragT8lia8eC5aOQzUeX8A
+xCSSbsnyT/X+S1IKdd0txLOeZOD6pWwJoc3dpDELglk2b1tzhyN2GjQv3aRHj55P
+x7127MeZyRXwODFpXrpbnwih4OqkA4EYtmqFbZttGEzMhd4Y5mkbyuRbGM+IE99o
+QJMvnIkjAfUo0aKnDrcAIkWCkwLIci9TIG6u3R1P2Tn+HYVntzQZ4BnxanbFNQ5S
+9ARd3529EmO3BzUCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAXyiw1+YUnTEDI3C/
+vq1Vn9pnwZALVQPiPlTqEGkl/nbq0suMmeZZG7pwrOJp3wr+sGwRAv9sPTro6srf
+Vj12wTo4LrTRKEDuS+AUJl0Mut7cPGIUKo+MGeZmmnDjMqcjljN3AO47ef4eWYo5
+XGe4r4NDABEk5auOD/vQW5IiIMdmWsaMJ+0mZNpAV2NhAD/6ia28VvSL/yuaNqDW
+TYTUYHWLH08H6M6qrQ7FdoIDyYR5siqBukQzeqlnuq45bQ3ViYttNIkzZN4jbWJV
+/MFYLuJQ/fNoalDIC+ec0EIa9NbrfpoocJ8h6HlmWOqkES4QpBSOrkVid64Cdy3P
+JgiEWg==
+-----END CERTIFICATE-----
diff --git a/third_party/python/aiohttp/examples/server.csr b/third_party/python/aiohttp/examples/server.csr
new file mode 100644
index 0000000000..1df3087b91
--- /dev/null
+++ b/third_party/python/aiohttp/examples/server.csr
@@ -0,0 +1,16 @@
+-----BEGIN CERTIFICATE REQUEST-----
+MIIChzCCAW8CAQAwQjELMAkGA1UEBhMCVUExEDAOBgNVBAgMB1VrcmFpbmUxITAf
+BgNVBAoMGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAOUgkn3jX/sdg6GGueGDHCM+snIUVY3fM6D4jXjyBhnT
+3TqKG1lJwCGYR11AD+2SJYppU+w4QaF6YZwMeZBKy+mVQ9+CrVYyKQE7j9H8XgNE
+HV9BQzoragT8lia8eC5aOQzUeX8AxCSSbsnyT/X+S1IKdd0txLOeZOD6pWwJoc3d
+pDELglk2b1tzhyN2GjQv3aRHj55Px7127MeZyRXwODFpXrpbnwih4OqkA4EYtmqF
+bZttGEzMhd4Y5mkbyuRbGM+IE99oQJMvnIkjAfUo0aKnDrcAIkWCkwLIci9TIG6u
+3R1P2Tn+HYVntzQZ4BnxanbFNQ5S9ARd3529EmO3BzUCAwEAAaAAMA0GCSqGSIb3
+DQEBCwUAA4IBAQDO/PSd29KgisTdGXhntg7yBEhBAjsDW7uQCrdrPSZtFyN6wUHy
+/1yrrWe56ZuW8jpuP5tG0eTZ+0bT2RXIRot8a2Cc3eBhpoe8M3d84yXjKAoHutGE
+5IK+TViQdvT3pT3a7pTmjlf8Ojq9tx+U2ckiz8Ccnjd9yM47M9NgMhrS1aBpVZSt
+gOD+zzrqMML4xks9id94H7bi9Tgs3AbEJIyDpBpoK6i4OvK7KTidCngCg80qmdTy
+bcScLapoy1Ped2BKKuxWdOOlP+mDJatc/pcfBLE13AncQjJgMerS9M5RWCBjmRow
+A+aB6fBEU8bOTrqCryfBeTiV6xzyDDcIXtc6
+-----END CERTIFICATE REQUEST-----
diff --git a/third_party/python/aiohttp/examples/server.key b/third_party/python/aiohttp/examples/server.key
new file mode 100644
index 0000000000..37dae99e67
--- /dev/null
+++ b/third_party/python/aiohttp/examples/server.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEA5SCSfeNf+x2DoYa54YMcIz6ychRVjd8zoPiNePIGGdPdOoob
+WUnAIZhHXUAP7ZIlimlT7DhBoXphnAx5kErL6ZVD34KtVjIpATuP0fxeA0QdX0FD
+OitqBPyWJrx4Llo5DNR5fwDEJJJuyfJP9f5LUgp13S3Es55k4PqlbAmhzd2kMQuC
+WTZvW3OHI3YaNC/dpEePnk/HvXbsx5nJFfA4MWleulufCKHg6qQDgRi2aoVtm20Y
+TMyF3hjmaRvK5FsYz4gT32hAky+ciSMB9SjRoqcOtwAiRYKTAshyL1Mgbq7dHU/Z
+Of4dhWe3NBngGfFqdsU1DlL0BF3fnb0SY7cHNQIDAQABAoIBAG9BJ6B03VADfrzZ
+vDwh+3Gpqd/2u6wNqvYIejk123yDATLBiJIMW3x0goJm7tT+V7gjeJqEnmmYEPlC
+nWxQxT6AOdq3iw8FgB+XGjhuAAA5/MEZ4VjHZ81QEGBytzBaosT2DqB6cMMJTz5D
+qEvb1Brb9WsWJCLLUFRloBkbfDOG9lMvt34ixYTTmqjsVj5WByD5BhzKH51OJ72L
+00IYpvrsEOtSev1hNV4199CHPYE90T/YQVooRBiHtTcfN+/KNVJu6Rf/zcaJ3WMS
+1l3MBI8HwMimjKKkbddpoMHyFMtSNmS9Yq+4a9w7XZo1F5rt88hYSCtAF8HRAarX
+0VBCJmkCgYEA9HenBBnmfDoN857femzoTHdWQQrZQ4YPAKHvKPlcgudizE5tQbs0
+iTpwm+IsecgJS2Rio7zY+P7A5nKFz3N5c0IX3smYo0J2PoakkLAm25KMxFZYBuz4
+MFWVdfByAU7d28BdNfyOVbA2kU2eal9lJ0yPLpMLbH8+bbvw5uBS808CgYEA7++p
+ftwib3DvKWMpl6G5eA1C2xprdbE0jm2fSr3LYp/vZ4QN2V6kK2YIlyUqQvhYCnxX
+oIP3v2MWDRHKKwJtBWR4+t23PaDaSXS2Ifm0qhRxwSm/oqpAJQXbR7VzxXp4/4FP
+1SgkLe51bubc4h+cDngqBLcplCanvj52CqhqzDsCgYAEIhG8zANNjl22BLWaiETV
+Jh9bMifCMH4IcLRuaOjbfbX55kmKlvOobkiBGi3OUUd28teIFSVF8GiqfL0uaLFg
+9XkZ1yaxe+or3HLjz1aY171xhFQwqcj4aDoCqHIE+6Rclr/8raxqXnRNuJY5DivT
+okO5cdr7lpsjl83W2WwNmQKBgCPXi1xWChbXqgJmu8nY8NnMMVaFpdPY+t7j5U3G
++GDtP1gZU/BKwP9yqInblWqXqp82X+isjg/a/2pIZAj0vdB2Z9Qh1sOwCau7cZG1
+uZVGpI+UavojsJ1XOKCHrJmtZ/HTIVfYPT9XRdehSRHGYwuOS8iUi/ODqr8ymXOS
+IRINAoGBAMEmhTihgFz6Y8ezRK3QTubguehHZG1zIvtgVhOk+8hRUTSJPI9nBJPC
+4gOZsPx4g2oLK6PiudPR79bhxRxPACCMnXkdwZ/8FaIdmvRHsWVs8T80wID0wthI
+r5hW4uqi9CcKZrGWH7mx9cVJktspeGUczvKyzNMfCaojwzA/49Z1
+-----END RSA PRIVATE KEY-----
diff --git a/third_party/python/aiohttp/examples/server_simple.py b/third_party/python/aiohttp/examples/server_simple.py
new file mode 100644
index 0000000000..d464383d26
--- /dev/null
+++ b/third_party/python/aiohttp/examples/server_simple.py
@@ -0,0 +1,31 @@
+# server_simple.py
+from aiohttp import web
+
+
+async def handle(request):
+ name = request.match_info.get("name", "Anonymous")
+ text = "Hello, " + name
+ return web.Response(text=text)
+
+
+async def wshandle(request):
+ ws = web.WebSocketResponse()
+ await ws.prepare(request)
+
+ async for msg in ws:
+ if msg.type == web.WSMsgType.text:
+ await ws.send_str(f"Hello, {msg.data}")
+ elif msg.type == web.WSMsgType.binary:
+ await ws.send_bytes(msg.data)
+ elif msg.type == web.WSMsgType.close:
+ break
+
+ return ws
+
+
+app = web.Application()
+app.add_routes(
+ [web.get("/", handle), web.get("/echo", wshandle), web.get("/{name}", handle)]
+)
+
+web.run_app(app)
diff --git a/third_party/python/aiohttp/examples/static_files.py b/third_party/python/aiohttp/examples/static_files.py
new file mode 100755
index 0000000000..65f6bb9c76
--- /dev/null
+++ b/third_party/python/aiohttp/examples/static_files.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python3
+import pathlib
+
+from aiohttp import web
+
+app = web.Application()
+app.router.add_static("/", pathlib.Path(__file__).parent, show_index=True)
+
+web.run_app(app)
diff --git a/third_party/python/aiohttp/examples/web_classview.py b/third_party/python/aiohttp/examples/web_classview.py
new file mode 100755
index 0000000000..0f65f7d7f4
--- /dev/null
+++ b/third_party/python/aiohttp/examples/web_classview.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python3
+"""Example for aiohttp.web class based views
+"""
+
+
+import functools
+import json
+
+from aiohttp import web
+
+
+class MyView(web.View):
+ async def get(self):
+ return web.json_response(
+ {
+ "method": "get",
+ "args": dict(self.request.GET),
+ "headers": dict(self.request.headers),
+ },
+ dumps=functools.partial(json.dumps, indent=4),
+ )
+
+ async def post(self):
+ data = await self.request.post()
+ return web.json_response(
+ {
+ "method": "post",
+ "args": dict(self.request.GET),
+ "data": dict(data),
+ "headers": dict(self.request.headers),
+ },
+ dumps=functools.partial(json.dumps, indent=4),
+ )
+
+
+async def index(request):
+ txt = """
+ <html>
+ <head>
+ <title>Class based view example</title>
+ </head>
+ <body>
+ <h1>Class based view example</h1>
+ <ul>
+ <li><a href="/">/</a> This page
+ <li><a href="/get">/get</a> Returns GET data.
+ <li><a href="/post">/post</a> Returns POST data.
+ </ul>
+ </body>
+ </html>
+ """
+ return web.Response(text=txt, content_type="text/html")
+
+
+def init():
+ app = web.Application()
+ app.router.add_get("/", index)
+ app.router.add_get("/get", MyView)
+ app.router.add_post("/post", MyView)
+ return app
+
+
+web.run_app(init())
diff --git a/third_party/python/aiohttp/examples/web_cookies.py b/third_party/python/aiohttp/examples/web_cookies.py
new file mode 100755
index 0000000000..e7a4a595d7
--- /dev/null
+++ b/third_party/python/aiohttp/examples/web_cookies.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python3
+"""Example for aiohttp.web basic server with cookies.
+"""
+
+from pprint import pformat
+
+from aiohttp import web
+
+tmpl = """\
+<html>
+ <body>
+ <a href="/login">Login</a><br/>
+ <a href="/logout">Logout</a><br/>
+ <pre>{}</pre>
+ </body>
+</html>"""
+
+
+async def root(request):
+ resp = web.Response(content_type="text/html")
+ resp.text = tmpl.format(pformat(request.cookies))
+ return resp
+
+
+async def login(request):
+ resp = web.HTTPFound(location="/")
+ resp.set_cookie("AUTH", "secret")
+ return resp
+
+
+async def logout(request):
+ resp = web.HTTPFound(location="/")
+ resp.del_cookie("AUTH")
+ return resp
+
+
+def init(loop):
+ app = web.Application(loop=loop)
+ app.router.add_get("/", root)
+ app.router.add_get("/login", login)
+ app.router.add_get("/logout", logout)
+ return app
+
+
+web.run_app(init())
diff --git a/third_party/python/aiohttp/examples/web_rewrite_headers_middleware.py b/third_party/python/aiohttp/examples/web_rewrite_headers_middleware.py
new file mode 100755
index 0000000000..20799a3a7c
--- /dev/null
+++ b/third_party/python/aiohttp/examples/web_rewrite_headers_middleware.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python3
+"""
+Example for rewriting response headers by middleware.
+"""
+
+from aiohttp import web
+
+
+async def handler(request):
+ return web.Response(text="Everything is fine")
+
+
+@web.middleware
+async def middleware(request, handler):
+ try:
+ response = await handler(request)
+ except web.HTTPException as exc:
+ raise exc
+ if not response.prepared:
+ response.headers["SERVER"] = "Secured Server Software"
+ return response
+
+
+def init():
+ app = web.Application(middlewares=[middleware])
+ app.router.add_get("/", handler)
+ return app
+
+
+web.run_app(init())
diff --git a/third_party/python/aiohttp/examples/web_srv.py b/third_party/python/aiohttp/examples/web_srv.py
new file mode 100755
index 0000000000..b572326a3a
--- /dev/null
+++ b/third_party/python/aiohttp/examples/web_srv.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python3
+"""Example for aiohttp.web basic server
+"""
+
+import textwrap
+
+from aiohttp import web
+
+
+async def intro(request):
+ txt = textwrap.dedent(
+ """\
+ Type {url}/hello/John {url}/simple or {url}/change_body
+ in browser url bar
+ """
+ ).format(url="127.0.0.1:8080")
+ binary = txt.encode("utf8")
+ resp = web.StreamResponse()
+ resp.content_length = len(binary)
+ resp.content_type = "text/plain"
+ await resp.prepare(request)
+ await resp.write(binary)
+ return resp
+
+
+async def simple(request):
+ return web.Response(text="Simple answer")
+
+
+async def change_body(request):
+ resp = web.Response()
+ resp.body = b"Body changed"
+ resp.content_type = "text/plain"
+ return resp
+
+
+async def hello(request):
+ resp = web.StreamResponse()
+ name = request.match_info.get("name", "Anonymous")
+ answer = ("Hello, " + name).encode("utf8")
+ resp.content_length = len(answer)
+ resp.content_type = "text/plain"
+ await resp.prepare(request)
+ await resp.write(answer)
+ await resp.write_eof()
+ return resp
+
+
+def init():
+ app = web.Application()
+ app.router.add_get("/", intro)
+ app.router.add_get("/simple", simple)
+ app.router.add_get("/change_body", change_body)
+ app.router.add_get("/hello/{name}", hello)
+ app.router.add_get("/hello", hello)
+ return app
+
+
+web.run_app(init())
diff --git a/third_party/python/aiohttp/examples/web_srv_route_deco.py b/third_party/python/aiohttp/examples/web_srv_route_deco.py
new file mode 100644
index 0000000000..332990362c
--- /dev/null
+++ b/third_party/python/aiohttp/examples/web_srv_route_deco.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python3
+"""Example for aiohttp.web basic server
+with decorator definition for routes
+"""
+
+import textwrap
+
+from aiohttp import web
+
+routes = web.RouteTableDef()
+
+
+@routes.get("/")
+async def intro(request):
+ txt = textwrap.dedent(
+ """\
+ Type {url}/hello/John {url}/simple or {url}/change_body
+ in browser url bar
+ """
+ ).format(url="127.0.0.1:8080")
+ binary = txt.encode("utf8")
+ resp = web.StreamResponse()
+ resp.content_length = len(binary)
+ resp.content_type = "text/plain"
+ await resp.prepare(request)
+ await resp.write(binary)
+ return resp
+
+
+@routes.get("/simple")
+async def simple(request):
+ return web.Response(text="Simple answer")
+
+
+@routes.get("/change_body")
+async def change_body(request):
+ resp = web.Response()
+ resp.body = b"Body changed"
+ resp.content_type = "text/plain"
+ return resp
+
+
+@routes.get("/hello")
+async def hello(request):
+ resp = web.StreamResponse()
+ name = request.match_info.get("name", "Anonymous")
+ answer = ("Hello, " + name).encode("utf8")
+ resp.content_length = len(answer)
+ resp.content_type = "text/plain"
+ await resp.prepare(request)
+ await resp.write(answer)
+ await resp.write_eof()
+ return resp
+
+
+def init():
+ app = web.Application()
+ app.router.add_routes(routes)
+ return app
+
+
+web.run_app(init())
diff --git a/third_party/python/aiohttp/examples/web_srv_route_table.py b/third_party/python/aiohttp/examples/web_srv_route_table.py
new file mode 100644
index 0000000000..f53142adad
--- /dev/null
+++ b/third_party/python/aiohttp/examples/web_srv_route_table.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python3
+"""Example for aiohttp.web basic server
+with table definition for routes
+"""
+
+import textwrap
+
+from aiohttp import web
+
+
+async def intro(request):
+ txt = textwrap.dedent(
+ """\
+ Type {url}/hello/John {url}/simple or {url}/change_body
+ in browser url bar
+ """
+ ).format(url="127.0.0.1:8080")
+ binary = txt.encode("utf8")
+ resp = web.StreamResponse()
+ resp.content_length = len(binary)
+ resp.content_type = "text/plain"
+ await resp.prepare(request)
+ await resp.write(binary)
+ return resp
+
+
+async def simple(request):
+ return web.Response(text="Simple answer")
+
+
+async def change_body(request):
+ resp = web.Response()
+ resp.body = b"Body changed"
+ resp.content_type = "text/plain"
+ return resp
+
+
+async def hello(request):
+ resp = web.StreamResponse()
+ name = request.match_info.get("name", "Anonymous")
+ answer = ("Hello, " + name).encode("utf8")
+ resp.content_length = len(answer)
+ resp.content_type = "text/plain"
+ await resp.prepare(request)
+ await resp.write(answer)
+ await resp.write_eof()
+ return resp
+
+
+def init():
+ app = web.Application()
+ app.router.add_routes(
+ [
+ web.get("/", intro),
+ web.get("/simple", simple),
+ web.get("/change_body", change_body),
+ web.get("/hello/{name}", hello),
+ web.get("/hello", hello),
+ ]
+ )
+ return app
+
+
+web.run_app(init())
diff --git a/third_party/python/aiohttp/examples/web_ws.py b/third_party/python/aiohttp/examples/web_ws.py
new file mode 100755
index 0000000000..970f1506be
--- /dev/null
+++ b/third_party/python/aiohttp/examples/web_ws.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python3
+"""Example for aiohttp.web websocket server
+"""
+
+import os
+
+from aiohttp import web
+
+WS_FILE = os.path.join(os.path.dirname(__file__), "websocket.html")
+
+
+async def wshandler(request):
+ resp = web.WebSocketResponse()
+ available = resp.can_prepare(request)
+ if not available:
+ with open(WS_FILE, "rb") as fp:
+ return web.Response(body=fp.read(), content_type="text/html")
+
+ await resp.prepare(request)
+
+ await resp.send_str("Welcome!!!")
+
+ try:
+ print("Someone joined.")
+ for ws in request.app["sockets"]:
+ await ws.send_str("Someone joined")
+ request.app["sockets"].append(resp)
+
+ async for msg in resp:
+ if msg.type == web.WSMsgType.TEXT:
+ for ws in request.app["sockets"]:
+ if ws is not resp:
+ await ws.send_str(msg.data)
+ else:
+ return resp
+ return resp
+
+ finally:
+ request.app["sockets"].remove(resp)
+ print("Someone disconnected.")
+ for ws in request.app["sockets"]:
+ await ws.send_str("Someone disconnected.")
+
+
+async def on_shutdown(app):
+ for ws in app["sockets"]:
+ await ws.close()
+
+
+def init():
+ app = web.Application()
+ app["sockets"] = []
+ app.router.add_get("/", wshandler)
+ app.on_shutdown.append(on_shutdown)
+ return app
+
+
+web.run_app(init())
diff --git a/third_party/python/aiohttp/examples/websocket.html b/third_party/python/aiohttp/examples/websocket.html
new file mode 100644
index 0000000000..2ba9ff367d
--- /dev/null
+++ b/third_party/python/aiohttp/examples/websocket.html
@@ -0,0 +1,89 @@
+<!DOCTYPE html>
+<meta charset="utf-8" />
+<html>
+<head>
+<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.4.2/jquery.min.js">
+</script>
+ <script language="javascript" type="text/javascript">
+ $(function() {
+ var conn = null;
+ function log(msg) {
+ var control = $('#log');
+ control.html(control.html() + msg + '<br/>');
+ control.scrollTop(control.scrollTop() + 1000);
+ }
+ function connect() {
+ disconnect();
+ var wsUri = (window.location.protocol=='https:'&&'wss://'||'ws://')+window.location.host;
+ conn = new WebSocket(wsUri);
+ log('Connecting...');
+ conn.onopen = function() {
+ log('Connected.');
+ update_ui();
+ };
+ conn.onmessage = function(e) {
+ log('Received: ' + e.data);
+ };
+ conn.onclose = function() {
+ log('Disconnected.');
+ conn = null;
+ update_ui();
+ };
+ }
+ function disconnect() {
+ if (conn != null) {
+ log('Disconnecting...');
+ conn.close();
+ conn = null;
+ update_ui();
+ }
+ }
+ function update_ui() {
+ if (conn == null) {
+ $('#status').text('disconnected');
+ $('#connect').html('Connect');
+ } else {
+ $('#status').text('connected (' + conn.protocol + ')');
+ $('#connect').html('Disconnect');
+ }
+ }
+ $('#connect').click(function() {
+ if (conn == null) {
+ connect();
+ } else {
+ disconnect();
+ }
+ update_ui();
+ return false;
+ });
+ $('#send').click(function() {
+ var text = $('#text').val();
+ log('Sending: ' + text);
+ conn.send(text);
+ $('#text').val('').focus();
+ return false;
+ });
+ $('#text').keyup(function(e) {
+ if (e.keyCode === 13) {
+ $('#send').click();
+ return false;
+ }
+ });
+ });
+</script>
+</head>
+<body>
+<h3>Chat!</h3>
+<div>
+ <button id="connect">Connect</button>&nbsp;|&nbsp;Status:
+ <span id="status">disconnected</span>
+</div>
+<div id="log"
+ style="width:20em;height:15em;overflow:auto;border:1px solid black">
+</div>
+<form id="chatform" onsubmit="return false;">
+ <input id="text" type="text" />
+ <input id="send" type="button" value="Send" />
+</form>
+</body>
+</html>
diff --git a/third_party/python/aiohttp/pyproject.toml b/third_party/python/aiohttp/pyproject.toml
new file mode 100644
index 0000000000..e666dfc174
--- /dev/null
+++ b/third_party/python/aiohttp/pyproject.toml
@@ -0,0 +1,7 @@
+[tool.towncrier]
+package = "aiohttp"
+filename = "CHANGES.rst"
+directory = "CHANGES/"
+title_format = "{version} ({project_date})"
+template = "CHANGES/.TEMPLATE.rst"
+issue_format = "`#{issue} <https://github.com/aio-libs/aiohttp/issues/{issue}>`_"
diff --git a/third_party/python/aiohttp/setup.cfg b/third_party/python/aiohttp/setup.cfg
new file mode 100644
index 0000000000..2f528bc49f
--- /dev/null
+++ b/third_party/python/aiohttp/setup.cfg
@@ -0,0 +1,93 @@
+[aliases]
+test = pytest
+
+[metadata]
+license_file = LICENSE.txt
+
+[pep8]
+max-line-length = 79
+
+[easy_install]
+zip_ok = false
+
+[flake8]
+ignore = N801,N802,N803,E203,E226,E305,W504,E252,E301,E302,E704,W503,W504,F811
+max-line-length = 88
+
+[isort]
+line_length = 88
+include_trailing_comma = True
+multi_line_output = 3
+force_grid_wrap = 0
+combine_as_imports = True
+known_third_party = jinja2,pytest,multidict,yarl,gunicorn,freezegun,async_generator
+known_first_party = aiohttp,aiohttp_jinja2,aiopg
+
+[report]
+exclude_lines =
+ @abc.abstractmethod
+ @abstractmethod
+
+[coverage:run]
+branch = True
+source = aiohttp, tests
+omit = site-packages
+
+[tool:pytest]
+addopts = --cov=aiohttp -v -rxXs --durations 10
+filterwarnings =
+ error
+ ignore:module 'ssl' has no attribute 'OP_NO_COMPRESSION'. The Python interpreter is compiled against OpenSSL < 1.0.0. Ref. https.//docs.python.org/3/library/ssl.html#ssl.OP_NO_COMPRESSION:UserWarning
+junit_suite_name = aiohttp_test_suite
+norecursedirs = dist docs build .tox .eggs
+minversion = 3.8.2
+testpaths = tests/
+junit_family = xunit2
+xfail_strict = true
+
+[mypy]
+follow_imports = silent
+strict_optional = True
+warn_redundant_casts = True
+warn_unused_ignores = True
+check_untyped_defs = True
+disallow_any_generics = True
+disallow_untyped_defs = True
+
+[mypy-pytest]
+ignore_missing_imports = true
+
+[mypy-uvloop]
+ignore_missing_imports = true
+
+[mypy-tokio]
+ignore_missing_imports = true
+
+[mypy-async_generator]
+ignore_missing_imports = true
+
+[mypy-aiodns]
+ignore_missing_imports = true
+
+[mypy-gunicorn.config]
+ignore_missing_imports = true
+
+[mypy-gunicorn.workers]
+ignore_missing_imports = true
+
+[mypy-brotli]
+ignore_missing_imports = true
+
+[mypy-chardet]
+ignore_missing_imports = true
+
+[mypy-cchardet]
+ignore_missing_imports = true
+
+[mypy-idna_ssl]
+ignore_missing_imports = true
+
+[egg_info]
+tag_build =
+tag_date = 0
+
diff --git a/third_party/python/aiohttp/setup.py b/third_party/python/aiohttp/setup.py
new file mode 100644
index 0000000000..54462ba71c
--- /dev/null
+++ b/third_party/python/aiohttp/setup.py
@@ -0,0 +1,159 @@
+import pathlib
+import re
+import sys
+from distutils.command.build_ext import build_ext
+from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError
+
+from setuptools import Extension, setup
+
+if sys.version_info < (3, 6):
+ raise RuntimeError("aiohttp 3.7+ requires Python 3.6+")
+
+here = pathlib.Path(__file__).parent
+
+
+if (here / ".git").exists() and not (here / "vendor/http-parser/README.md").exists():
+ print("Install submodules when building from git clone", file=sys.stderr)
+ print("Hint:", file=sys.stderr)
+ print(" git submodule update --init", file=sys.stderr)
+ sys.exit(2)
+
+
+# NOTE: makefile cythonizes all Cython modules
+
+extensions = [
+ Extension("aiohttp._websocket", ["aiohttp/_websocket.c"]),
+ Extension(
+ "aiohttp._http_parser",
+ [
+ "aiohttp/_http_parser.c",
+ "vendor/http-parser/http_parser.c",
+ "aiohttp/_find_header.c",
+ ],
+ define_macros=[("HTTP_PARSER_STRICT", 0)],
+ ),
+ Extension("aiohttp._frozenlist", ["aiohttp/_frozenlist.c"]),
+ Extension("aiohttp._helpers", ["aiohttp/_helpers.c"]),
+ Extension("aiohttp._http_writer", ["aiohttp/_http_writer.c"]),
+]
+
+
+class BuildFailed(Exception):
+ pass
+
+
+class ve_build_ext(build_ext):
+ # This class allows C extension building to fail.
+
+ def run(self):
+ try:
+ build_ext.run(self)
+ except (DistutilsPlatformError, FileNotFoundError):
+ raise BuildFailed()
+
+ def build_extension(self, ext):
+ try:
+ build_ext.build_extension(self, ext)
+ except (CCompilerError, DistutilsExecError, DistutilsPlatformError, ValueError):
+ raise BuildFailed()
+
+
+txt = (here / "aiohttp" / "__init__.py").read_text("utf-8")
+try:
+ version = re.findall(r'^__version__ = "([^"]+)"\r?$', txt, re.M)[0]
+except IndexError:
+ raise RuntimeError("Unable to determine version.")
+
+install_requires = [
+ "attrs>=17.3.0",
+ "chardet>=2.0,<5.0",
+ "multidict>=4.5,<7.0",
+ "async_timeout>=3.0,<4.0",
+ "yarl>=1.0,<2.0",
+ 'idna-ssl>=1.0; python_version<"3.7"',
+ "typing_extensions>=3.6.5",
+]
+
+
+def read(f):
+ return (here / f).read_text("utf-8").strip()
+
+
+NEEDS_PYTEST = {"pytest", "test"}.intersection(sys.argv)
+pytest_runner = ["pytest-runner"] if NEEDS_PYTEST else []
+
+tests_require = [
+ "pytest",
+ "gunicorn",
+ "pytest-timeout",
+ "async-generator",
+ "pytest-xdist",
+]
+
+
+args = dict(
+ name="aiohttp",
+ version=version,
+ description="Async http client/server framework (asyncio)",
+ long_description="\n\n".join((read("README.rst"), read("CHANGES.rst"))),
+ classifiers=[
+ "License :: OSI Approved :: Apache Software License",
+ "Intended Audience :: Developers",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Development Status :: 5 - Production/Stable",
+ "Operating System :: POSIX",
+ "Operating System :: MacOS :: MacOS X",
+ "Operating System :: Microsoft :: Windows",
+ "Topic :: Internet :: WWW/HTTP",
+ "Framework :: AsyncIO",
+ ],
+ author="Nikolay Kim",
+ author_email="fafhrd91@gmail.com",
+ maintainer=", ".join(
+ (
+ "Nikolay Kim <fafhrd91@gmail.com>",
+ "Andrew Svetlov <andrew.svetlov@gmail.com>",
+ )
+ ),
+ maintainer_email="aio-libs@googlegroups.com",
+ url="https://github.com/aio-libs/aiohttp",
+ project_urls={
+ "Chat: Gitter": "https://gitter.im/aio-libs/Lobby",
+ "CI: Azure Pipelines": "https://dev.azure.com/aio-libs/aiohttp/_build",
+ "Coverage: codecov": "https://codecov.io/github/aio-libs/aiohttp",
+ "Docs: RTD": "https://docs.aiohttp.org",
+ "GitHub: issues": "https://github.com/aio-libs/aiohttp/issues",
+ "GitHub: repo": "https://github.com/aio-libs/aiohttp",
+ },
+ license="Apache 2",
+ packages=["aiohttp"],
+ python_requires=">=3.6",
+ install_requires=install_requires,
+ extras_require={
+ "speedups": [
+ "aiodns",
+ "brotlipy",
+ "cchardet",
+ ],
+ },
+ tests_require=tests_require,
+ setup_requires=pytest_runner,
+ include_package_data=True,
+ ext_modules=extensions,
+ cmdclass=dict(build_ext=ve_build_ext),
+)
+
+try:
+ setup(**args)
+except BuildFailed:
+ print("************************************************************")
+ print("Cannot compile C accelerator module, use pure python version")
+ print("************************************************************")
+ del args["ext_modules"]
+ del args["cmdclass"]
+ setup(**args)
diff --git a/third_party/python/aiohttp/vendor/http-parser/.gitignore b/third_party/python/aiohttp/vendor/http-parser/.gitignore
new file mode 100644
index 0000000000..c122e76fb9
--- /dev/null
+++ b/third_party/python/aiohttp/vendor/http-parser/.gitignore
@@ -0,0 +1,30 @@
+/out/
+core
+tags
+*.o
+test
+test_g
+test_fast
+bench
+url_parser
+parsertrace
+parsertrace_g
+*.mk
+*.Makefile
+*.so.*
+*.exe.*
+*.exe
+*.a
+
+
+# Visual Studio uglies
+*.suo
+*.sln
+*.vcxproj
+*.vcxproj.filters
+*.vcxproj.user
+*.opensdf
+*.ncrunchsolution*
+*.sdf
+*.vsp
+*.psess
diff --git a/third_party/python/aiohttp/vendor/http-parser/.mailmap b/third_party/python/aiohttp/vendor/http-parser/.mailmap
new file mode 100644
index 0000000000..278d141263
--- /dev/null
+++ b/third_party/python/aiohttp/vendor/http-parser/.mailmap
@@ -0,0 +1,8 @@
+# update AUTHORS with:
+# git log --all --reverse --format='%aN <%aE>' | perl -ne 'BEGIN{print "# Authors ordered by first contribution.\n"} print unless $h{$_}; $h{$_} = 1' > AUTHORS
+Ryan Dahl <ry@tinyclouds.org>
+Salman Haq <salman.haq@asti-usa.com>
+Simon Zimmermann <simonz05@gmail.com>
+Thomas LE ROUX <thomas@november-eleven.fr> LE ROUX Thomas <thomas@procheo.fr>
+Thomas LE ROUX <thomas@november-eleven.fr> Thomas LE ROUX <thomas@procheo.fr>
+Fedor Indutny <fedor@indutny.com>
diff --git a/third_party/python/aiohttp/vendor/http-parser/.travis.yml b/third_party/python/aiohttp/vendor/http-parser/.travis.yml
new file mode 100644
index 0000000000..4b038e6e62
--- /dev/null
+++ b/third_party/python/aiohttp/vendor/http-parser/.travis.yml
@@ -0,0 +1,13 @@
+language: c
+
+compiler:
+ - clang
+ - gcc
+
+script:
+ - "make"
+
+notifications:
+ email: false
+ irc:
+ - "irc.freenode.net#node-ci"
diff --git a/third_party/python/aiohttp/vendor/http-parser/AUTHORS b/third_party/python/aiohttp/vendor/http-parser/AUTHORS
new file mode 100644
index 0000000000..5323b685ca
--- /dev/null
+++ b/third_party/python/aiohttp/vendor/http-parser/AUTHORS
@@ -0,0 +1,68 @@
+# Authors ordered by first contribution.
+Ryan Dahl <ry@tinyclouds.org>
+Jeremy Hinegardner <jeremy@hinegardner.org>
+Sergey Shepelev <temotor@gmail.com>
+Joe Damato <ice799@gmail.com>
+tomika <tomika_nospam@freemail.hu>
+Phoenix Sol <phoenix@burninglabs.com>
+Cliff Frey <cliff@meraki.com>
+Ewen Cheslack-Postava <ewencp@cs.stanford.edu>
+Santiago Gala <sgala@apache.org>
+Tim Becker <tim.becker@syngenio.de>
+Jeff Terrace <jterrace@gmail.com>
+Ben Noordhuis <info@bnoordhuis.nl>
+Nathan Rajlich <nathan@tootallnate.net>
+Mark Nottingham <mnot@mnot.net>
+Aman Gupta <aman@tmm1.net>
+Tim Becker <tim.becker@kuriositaet.de>
+Sean Cunningham <sean.cunningham@mandiant.com>
+Peter Griess <pg@std.in>
+Salman Haq <salman.haq@asti-usa.com>
+Cliff Frey <clifffrey@gmail.com>
+Jon Kolb <jon@b0g.us>
+Fouad Mardini <f.mardini@gmail.com>
+Paul Querna <pquerna@apache.org>
+Felix Geisendörfer <felix@debuggable.com>
+koichik <koichik@improvement.jp>
+Andre Caron <andre.l.caron@gmail.com>
+Ivo Raisr <ivosh@ivosh.net>
+James McLaughlin <jamie@lacewing-project.org>
+David Gwynne <loki@animata.net>
+Thomas LE ROUX <thomas@november-eleven.fr>
+Randy Rizun <rrizun@ortivawireless.com>
+Andre Louis Caron <andre.louis.caron@usherbrooke.ca>
+Simon Zimmermann <simonz05@gmail.com>
+Erik Dubbelboer <erik@dubbelboer.com>
+Martell Malone <martellmalone@gmail.com>
+Bertrand Paquet <bpaquet@octo.com>
+BogDan Vatra <bogdan@kde.org>
+Peter Faiman <peter@thepicard.org>
+Corey Richardson <corey@octayn.net>
+Tóth Tamás <tomika_nospam@freemail.hu>
+Cam Swords <cam.swords@gmail.com>
+Chris Dickinson <christopher.s.dickinson@gmail.com>
+Uli Köhler <ukoehler@btronik.de>
+Charlie Somerville <charlie@charliesomerville.com>
+Patrik Stutz <patrik.stutz@gmail.com>
+Fedor Indutny <fedor.indutny@gmail.com>
+runner <runner.mei@gmail.com>
+Alexis Campailla <alexis@janeasystems.com>
+David Wragg <david@wragg.org>
+Vinnie Falco <vinnie.falco@gmail.com>
+Alex Butum <alexbutum@linux.com>
+Rex Feng <rexfeng@gmail.com>
+Alex Kocharin <alex@kocharin.ru>
+Mark Koopman <markmontymark@yahoo.com>
+Helge Heß <me@helgehess.eu>
+Alexis La Goutte <alexis.lagoutte@gmail.com>
+George Miroshnykov <george.miroshnykov@gmail.com>
+Maciej Małecki <me@mmalecki.com>
+Marc O'Morain <github.com@marcomorain.com>
+Jeff Pinner <jpinner@twitter.com>
+Timothy J Fontaine <tjfontaine@gmail.com>
+Akagi201 <akagi201@gmail.com>
+Romain Giraud <giraud.romain@gmail.com>
+Jay Satiro <raysatiro@yahoo.com>
+Arne Steen <Arne.Steen@gmx.de>
+Kjell Schubert <kjell.schubert@gmail.com>
+Olivier Mengué <dolmen@cpan.org>
diff --git a/third_party/python/aiohttp/vendor/http-parser/LICENSE-MIT b/third_party/python/aiohttp/vendor/http-parser/LICENSE-MIT
new file mode 100644
index 0000000000..1ec0ab4e17
--- /dev/null
+++ b/third_party/python/aiohttp/vendor/http-parser/LICENSE-MIT
@@ -0,0 +1,19 @@
+Copyright Joyent, Inc. and other Node contributors.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to
+deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.
diff --git a/third_party/python/aiohttp/vendor/http-parser/Makefile b/third_party/python/aiohttp/vendor/http-parser/Makefile
new file mode 100644
index 0000000000..5d21221504
--- /dev/null
+++ b/third_party/python/aiohttp/vendor/http-parser/Makefile
@@ -0,0 +1,160 @@
+# Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to
+# deal in the Software without restriction, including without limitation the
+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+# sell copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+# IN THE SOFTWARE.
+
+PLATFORM ?= $(shell sh -c 'uname -s | tr "[A-Z]" "[a-z]"')
+HELPER ?=
+BINEXT ?=
+SOLIBNAME = libhttp_parser
+SOMAJOR = 2
+SOMINOR = 9
+SOREV = 4
+ifeq (darwin,$(PLATFORM))
+SOEXT ?= dylib
+SONAME ?= $(SOLIBNAME).$(SOMAJOR).$(SOMINOR).$(SOEXT)
+LIBNAME ?= $(SOLIBNAME).$(SOMAJOR).$(SOMINOR).$(SOREV).$(SOEXT)
+else ifeq (wine,$(PLATFORM))
+CC = winegcc
+BINEXT = .exe.so
+HELPER = wine
+else
+SOEXT ?= so
+SONAME ?= $(SOLIBNAME).$(SOEXT).$(SOMAJOR).$(SOMINOR)
+LIBNAME ?= $(SOLIBNAME).$(SOEXT).$(SOMAJOR).$(SOMINOR).$(SOREV)
+endif
+
+CC?=gcc
+AR?=ar
+
+CPPFLAGS ?=
+LDFLAGS ?=
+
+CPPFLAGS += -I.
+CPPFLAGS_DEBUG = $(CPPFLAGS) -DHTTP_PARSER_STRICT=1
+CPPFLAGS_DEBUG += $(CPPFLAGS_DEBUG_EXTRA)
+CPPFLAGS_FAST = $(CPPFLAGS) -DHTTP_PARSER_STRICT=0
+CPPFLAGS_FAST += $(CPPFLAGS_FAST_EXTRA)
+CPPFLAGS_BENCH = $(CPPFLAGS_FAST)
+
+CFLAGS += -Wall -Wextra -Werror
+CFLAGS_DEBUG = $(CFLAGS) -O0 -g $(CFLAGS_DEBUG_EXTRA)
+CFLAGS_FAST = $(CFLAGS) -O3 $(CFLAGS_FAST_EXTRA)
+CFLAGS_BENCH = $(CFLAGS_FAST) -Wno-unused-parameter
+CFLAGS_LIB = $(CFLAGS_FAST) -fPIC
+
+LDFLAGS_LIB = $(LDFLAGS) -shared
+
+INSTALL ?= install
+PREFIX ?= /usr/local
+LIBDIR = $(PREFIX)/lib
+INCLUDEDIR = $(PREFIX)/include
+
+ifeq (darwin,$(PLATFORM))
+LDFLAGS_LIB += -Wl,-install_name,$(LIBDIR)/$(SONAME)
+else
+# TODO(bnoordhuis) The native SunOS linker expects -h rather than -soname...
+LDFLAGS_LIB += -Wl,-soname=$(SONAME)
+endif
+
+test: test_g test_fast
+ $(HELPER) ./test_g$(BINEXT)
+ $(HELPER) ./test_fast$(BINEXT)
+
+test_g: http_parser_g.o test_g.o
+ $(CC) $(CFLAGS_DEBUG) $(LDFLAGS) http_parser_g.o test_g.o -o $@
+
+test_g.o: test.c http_parser.h Makefile
+ $(CC) $(CPPFLAGS_DEBUG) $(CFLAGS_DEBUG) -c test.c -o $@
+
+http_parser_g.o: http_parser.c http_parser.h Makefile
+ $(CC) $(CPPFLAGS_DEBUG) $(CFLAGS_DEBUG) -c http_parser.c -o $@
+
+test_fast: http_parser.o test.o http_parser.h
+ $(CC) $(CFLAGS_FAST) $(LDFLAGS) http_parser.o test.o -o $@
+
+test.o: test.c http_parser.h Makefile
+ $(CC) $(CPPFLAGS_FAST) $(CFLAGS_FAST) -c test.c -o $@
+
+bench: http_parser.o bench.o
+ $(CC) $(CFLAGS_BENCH) $(LDFLAGS) http_parser.o bench.o -o $@
+
+bench.o: bench.c http_parser.h Makefile
+ $(CC) $(CPPFLAGS_BENCH) $(CFLAGS_BENCH) -c bench.c -o $@
+
+http_parser.o: http_parser.c http_parser.h Makefile
+ $(CC) $(CPPFLAGS_FAST) $(CFLAGS_FAST) -c http_parser.c
+
+test-run-timed: test_fast
+ while(true) do time $(HELPER) ./test_fast$(BINEXT) > /dev/null; done
+
+test-valgrind: test_g
+ valgrind ./test_g
+
+libhttp_parser.o: http_parser.c http_parser.h Makefile
+ $(CC) $(CPPFLAGS_FAST) $(CFLAGS_LIB) -c http_parser.c -o libhttp_parser.o
+
+library: libhttp_parser.o
+ $(CC) $(LDFLAGS_LIB) -o $(LIBNAME) $<
+
+package: http_parser.o
+ $(AR) rcs libhttp_parser.a http_parser.o
+
+url_parser: http_parser.o contrib/url_parser.c
+ $(CC) $(CPPFLAGS_FAST) $(CFLAGS_FAST) $^ -o $@
+
+url_parser_g: http_parser_g.o contrib/url_parser.c
+ $(CC) $(CPPFLAGS_DEBUG) $(CFLAGS_DEBUG) $^ -o $@
+
+parsertrace: http_parser.o contrib/parsertrace.c
+ $(CC) $(CPPFLAGS_FAST) $(CFLAGS_FAST) $^ -o parsertrace$(BINEXT)
+
+parsertrace_g: http_parser_g.o contrib/parsertrace.c
+ $(CC) $(CPPFLAGS_DEBUG) $(CFLAGS_DEBUG) $^ -o parsertrace_g$(BINEXT)
+
+tags: http_parser.c http_parser.h test.c
+ ctags $^
+
+install: library
+ $(INSTALL) -D http_parser.h $(DESTDIR)$(INCLUDEDIR)/http_parser.h
+ $(INSTALL) -D $(LIBNAME) $(DESTDIR)$(LIBDIR)/$(LIBNAME)
+ ln -sf $(LIBNAME) $(DESTDIR)$(LIBDIR)/$(SONAME)
+ ln -sf $(LIBNAME) $(DESTDIR)$(LIBDIR)/$(SOLIBNAME).$(SOEXT)
+
+install-strip: library
+ $(INSTALL) -D http_parser.h $(DESTDIR)$(INCLUDEDIR)/http_parser.h
+ $(INSTALL) -D -s $(LIBNAME) $(DESTDIR)$(LIBDIR)/$(LIBNAME)
+ ln -sf $(LIBNAME) $(DESTDIR)$(LIBDIR)/$(SONAME)
+ ln -sf $(LIBNAME) $(DESTDIR)$(LIBDIR)/$(SOLIBNAME).$(SOEXT)
+
+uninstall:
+ rm $(DESTDIR)$(INCLUDEDIR)/http_parser.h
+ rm $(DESTDIR)$(LIBDIR)/$(SOLIBNAME).$(SOEXT)
+ rm $(DESTDIR)$(LIBDIR)/$(SONAME)
+ rm $(DESTDIR)$(LIBDIR)/$(LIBNAME)
+
+clean:
+ rm -f *.o *.a tags test test_fast test_g \
+ http_parser.tar libhttp_parser.so.* \
+ url_parser url_parser_g parsertrace parsertrace_g \
+ *.exe *.exe.so
+
+contrib/url_parser.c: http_parser.h
+contrib/parsertrace.c: http_parser.h
+
+.PHONY: clean package test-run test-run-timed test-valgrind install install-strip uninstall
diff --git a/third_party/python/aiohttp/vendor/http-parser/README.md b/third_party/python/aiohttp/vendor/http-parser/README.md
new file mode 100644
index 0000000000..b265d71715
--- /dev/null
+++ b/third_party/python/aiohttp/vendor/http-parser/README.md
@@ -0,0 +1,246 @@
+HTTP Parser
+===========
+
+[![Build Status](https://api.travis-ci.org/nodejs/http-parser.svg?branch=master)](https://travis-ci.org/nodejs/http-parser)
+
+This is a parser for HTTP messages written in C. It parses both requests and
+responses. The parser is designed to be used in performance HTTP
+applications. It does not make any syscalls nor allocations, it does not
+buffer data, it can be interrupted at anytime. Depending on your
+architecture, it only requires about 40 bytes of data per message
+stream (in a web server that is per connection).
+
+Features:
+
+ * No dependencies
+ * Handles persistent streams (keep-alive).
+ * Decodes chunked encoding.
+ * Upgrade support
+ * Defends against buffer overflow attacks.
+
+The parser extracts the following information from HTTP messages:
+
+ * Header fields and values
+ * Content-Length
+ * Request method
+ * Response status code
+ * Transfer-Encoding
+ * HTTP version
+ * Request URL
+ * Message body
+
+
+Usage
+-----
+
+One `http_parser` object is used per TCP connection. Initialize the struct
+using `http_parser_init()` and set the callbacks. That might look something
+like this for a request parser:
+```c
+http_parser_settings settings;
+settings.on_url = my_url_callback;
+settings.on_header_field = my_header_field_callback;
+/* ... */
+
+http_parser *parser = malloc(sizeof(http_parser));
+http_parser_init(parser, HTTP_REQUEST);
+parser->data = my_socket;
+```
+
+When data is received on the socket execute the parser and check for errors.
+
+```c
+size_t len = 80*1024, nparsed;
+char buf[len];
+ssize_t recved;
+
+recved = recv(fd, buf, len, 0);
+
+if (recved < 0) {
+ /* Handle error. */
+}
+
+/* Start up / continue the parser.
+ * Note we pass recved==0 to signal that EOF has been received.
+ */
+nparsed = http_parser_execute(parser, &settings, buf, recved);
+
+if (parser->upgrade) {
+ /* handle new protocol */
+} else if (nparsed != recved) {
+ /* Handle error. Usually just close the connection. */
+}
+```
+
+`http_parser` needs to know where the end of the stream is. For example, sometimes
+servers send responses without Content-Length and expect the client to
+consume input (for the body) until EOF. To tell `http_parser` about EOF, give
+`0` as the fourth parameter to `http_parser_execute()`. Callbacks and errors
+can still be encountered during an EOF, so one must still be prepared
+to receive them.
+
+Scalar valued message information such as `status_code`, `method`, and the
+HTTP version are stored in the parser structure. This data is only
+temporally stored in `http_parser` and gets reset on each new message. If
+this information is needed later, copy it out of the structure during the
+`headers_complete` callback.
+
+The parser decodes the transfer-encoding for both requests and responses
+transparently. That is, a chunked encoding is decoded before being sent to
+the on_body callback.
+
+
+The Special Problem of Upgrade
+------------------------------
+
+`http_parser` supports upgrading the connection to a different protocol. An
+increasingly common example of this is the WebSocket protocol which sends
+a request like
+
+ GET /demo HTTP/1.1
+ Upgrade: WebSocket
+ Connection: Upgrade
+ Host: example.com
+ Origin: http://example.com
+ WebSocket-Protocol: sample
+
+followed by non-HTTP data.
+
+(See [RFC6455](https://tools.ietf.org/html/rfc6455) for more information the
+WebSocket protocol.)
+
+To support this, the parser will treat this as a normal HTTP message without a
+body, issuing both on_headers_complete and on_message_complete callbacks. However
+http_parser_execute() will stop parsing at the end of the headers and return.
+
+The user is expected to check if `parser->upgrade` has been set to 1 after
+`http_parser_execute()` returns. Non-HTTP data begins at the buffer supplied
+offset by the return value of `http_parser_execute()`.
+
+
+Callbacks
+---------
+
+During the `http_parser_execute()` call, the callbacks set in
+`http_parser_settings` will be executed. The parser maintains state and
+never looks behind, so buffering the data is not necessary. If you need to
+save certain data for later usage, you can do that from the callbacks.
+
+There are two types of callbacks:
+
+* notification `typedef int (*http_cb) (http_parser*);`
+ Callbacks: on_message_begin, on_headers_complete, on_message_complete.
+* data `typedef int (*http_data_cb) (http_parser*, const char *at, size_t length);`
+ Callbacks: (requests only) on_url,
+ (common) on_header_field, on_header_value, on_body;
+
+Callbacks must return 0 on success. Returning a non-zero value indicates
+error to the parser, making it exit immediately.
+
+For cases where it is necessary to pass local information to/from a callback,
+the `http_parser` object's `data` field can be used.
+An example of such a case is when using threads to handle a socket connection,
+parse a request, and then give a response over that socket. By instantiation
+of a thread-local struct containing relevant data (e.g. accepted socket,
+allocated memory for callbacks to write into, etc), a parser's callbacks are
+able to communicate data between the scope of the thread and the scope of the
+callback in a threadsafe manner. This allows `http_parser` to be used in
+multi-threaded contexts.
+
+Example:
+```c
+ typedef struct {
+ socket_t sock;
+ void* buffer;
+ int buf_len;
+ } custom_data_t;
+
+
+int my_url_callback(http_parser* parser, const char *at, size_t length) {
+ /* access to thread local custom_data_t struct.
+ Use this access save parsed data for later use into thread local
+ buffer, or communicate over socket
+ */
+ parser->data;
+ ...
+ return 0;
+}
+
+...
+
+void http_parser_thread(socket_t sock) {
+ int nparsed = 0;
+ /* allocate memory for user data */
+ custom_data_t *my_data = malloc(sizeof(custom_data_t));
+
+ /* some information for use by callbacks.
+ * achieves thread -> callback information flow */
+ my_data->sock = sock;
+
+ /* instantiate a thread-local parser */
+ http_parser *parser = malloc(sizeof(http_parser));
+ http_parser_init(parser, HTTP_REQUEST); /* initialise parser */
+ /* this custom data reference is accessible through the reference to the
+ parser supplied to callback functions */
+ parser->data = my_data;
+
+ http_parser_settings settings; /* set up callbacks */
+ settings.on_url = my_url_callback;
+
+ /* execute parser */
+ nparsed = http_parser_execute(parser, &settings, buf, recved);
+
+ ...
+ /* parsed information copied from callback.
+ can now perform action on data copied into thread-local memory from callbacks.
+ achieves callback -> thread information flow */
+ my_data->buffer;
+ ...
+}
+
+```
+
+In case you parse HTTP message in chunks (i.e. `read()` request line
+from socket, parse, read half headers, parse, etc) your data callbacks
+may be called more than once. `http_parser` guarantees that data pointer is only
+valid for the lifetime of callback. You can also `read()` into a heap allocated
+buffer to avoid copying memory around if this fits your application.
+
+Reading headers may be a tricky task if you read/parse headers partially.
+Basically, you need to remember whether last header callback was field or value
+and apply the following logic:
+
+ (on_header_field and on_header_value shortened to on_h_*)
+ ------------------------ ------------ --------------------------------------------
+ | State (prev. callback) | Callback | Description/action |
+ ------------------------ ------------ --------------------------------------------
+ | nothing (first call) | on_h_field | Allocate new buffer and copy callback data |
+ | | | into it |
+ ------------------------ ------------ --------------------------------------------
+ | value | on_h_field | New header started. |
+ | | | Copy current name,value buffers to headers |
+ | | | list and allocate new buffer for new name |
+ ------------------------ ------------ --------------------------------------------
+ | field | on_h_field | Previous name continues. Reallocate name |
+ | | | buffer and append callback data to it |
+ ------------------------ ------------ --------------------------------------------
+ | field | on_h_value | Value for current header started. Allocate |
+ | | | new buffer and copy callback data to it |
+ ------------------------ ------------ --------------------------------------------
+ | value | on_h_value | Value continues. Reallocate value buffer |
+ | | | and append callback data to it |
+ ------------------------ ------------ --------------------------------------------
+
+
+Parsing URLs
+------------
+
+A simplistic zero-copy URL parser is provided as `http_parser_parse_url()`.
+Users of this library may wish to use it to parse URLs constructed from
+consecutive `on_url` callbacks.
+
+See examples of reading in headers:
+
+* [partial example](http://gist.github.com/155877) in C
+* [from http-parser tests](http://github.com/joyent/http-parser/blob/37a0ff8/test.c#L403) in C
+* [from Node library](http://github.com/joyent/node/blob/842eaf4/src/http.js#L284) in Javascript
diff --git a/third_party/python/aiohttp/vendor/http-parser/bench.c b/third_party/python/aiohttp/vendor/http-parser/bench.c
new file mode 100644
index 0000000000..678f5556c5
--- /dev/null
+++ b/third_party/python/aiohttp/vendor/http-parser/bench.c
@@ -0,0 +1,128 @@
+/* Copyright Fedor Indutny. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include "http_parser.h"
+#include <assert.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/time.h>
+
+/* 8 gb */
+static const int64_t kBytes = 8LL << 30;
+
+static const char data[] =
+ "POST /joyent/http-parser HTTP/1.1\r\n"
+ "Host: github.com\r\n"
+ "DNT: 1\r\n"
+ "Accept-Encoding: gzip, deflate, sdch\r\n"
+ "Accept-Language: ru-RU,ru;q=0.8,en-US;q=0.6,en;q=0.4\r\n"
+ "User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) "
+ "AppleWebKit/537.36 (KHTML, like Gecko) "
+ "Chrome/39.0.2171.65 Safari/537.36\r\n"
+ "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,"
+ "image/webp,*/*;q=0.8\r\n"
+ "Referer: https://github.com/joyent/http-parser\r\n"
+ "Connection: keep-alive\r\n"
+ "Transfer-Encoding: chunked\r\n"
+ "Cache-Control: max-age=0\r\n\r\nb\r\nhello world\r\n0\r\n";
+static const size_t data_len = sizeof(data) - 1;
+
+static int on_info(http_parser* p) {
+ return 0;
+}
+
+
+static int on_data(http_parser* p, const char *at, size_t length) {
+ return 0;
+}
+
+static http_parser_settings settings = {
+ .on_message_begin = on_info,
+ .on_headers_complete = on_info,
+ .on_message_complete = on_info,
+ .on_header_field = on_data,
+ .on_header_value = on_data,
+ .on_url = on_data,
+ .on_status = on_data,
+ .on_body = on_data
+};
+
+int bench(int iter_count, int silent) {
+ struct http_parser parser;
+ int i;
+ int err;
+ struct timeval start;
+ struct timeval end;
+
+ if (!silent) {
+ err = gettimeofday(&start, NULL);
+ assert(err == 0);
+ }
+
+ fprintf(stderr, "req_len=%d\n", (int) data_len);
+ for (i = 0; i < iter_count; i++) {
+ size_t parsed;
+ http_parser_init(&parser, HTTP_REQUEST);
+
+ parsed = http_parser_execute(&parser, &settings, data, data_len);
+ assert(parsed == data_len);
+ }
+
+ if (!silent) {
+ double elapsed;
+ double bw;
+ double total;
+
+ err = gettimeofday(&end, NULL);
+ assert(err == 0);
+
+ fprintf(stdout, "Benchmark result:\n");
+
+ elapsed = (double) (end.tv_sec - start.tv_sec) +
+ (end.tv_usec - start.tv_usec) * 1e-6f;
+
+ total = (double) iter_count * data_len;
+ bw = (double) total / elapsed;
+
+ fprintf(stdout, "%.2f mb | %.2f mb/s | %.2f req/sec | %.2f s\n",
+ (double) total / (1024 * 1024),
+ bw / (1024 * 1024),
+ (double) iter_count / elapsed,
+ elapsed);
+
+ fflush(stdout);
+ }
+
+ return 0;
+}
+
+int main(int argc, char** argv) {
+ int64_t iterations;
+
+ iterations = kBytes / (int64_t) data_len;
+ if (argc == 2 && strcmp(argv[1], "infinite") == 0) {
+ for (;;)
+ bench(iterations, 1);
+ return 0;
+ } else {
+ return bench(iterations, 0);
+ }
+}
diff --git a/third_party/python/aiohttp/vendor/http-parser/contrib/parsertrace.c b/third_party/python/aiohttp/vendor/http-parser/contrib/parsertrace.c
new file mode 100644
index 0000000000..3daa7f46a1
--- /dev/null
+++ b/third_party/python/aiohttp/vendor/http-parser/contrib/parsertrace.c
@@ -0,0 +1,157 @@
+/* Copyright Joyent, Inc. and other Node contributors.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+/* Dump what the parser finds to stdout as it happen */
+
+#include "http_parser.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+int on_message_begin(http_parser* _) {
+ (void)_;
+ printf("\n***MESSAGE BEGIN***\n\n");
+ return 0;
+}
+
+int on_headers_complete(http_parser* _) {
+ (void)_;
+ printf("\n***HEADERS COMPLETE***\n\n");
+ return 0;
+}
+
+int on_message_complete(http_parser* _) {
+ (void)_;
+ printf("\n***MESSAGE COMPLETE***\n\n");
+ return 0;
+}
+
+int on_url(http_parser* _, const char* at, size_t length) {
+ (void)_;
+ printf("Url: %.*s\n", (int)length, at);
+ return 0;
+}
+
+int on_header_field(http_parser* _, const char* at, size_t length) {
+ (void)_;
+ printf("Header field: %.*s\n", (int)length, at);
+ return 0;
+}
+
+int on_header_value(http_parser* _, const char* at, size_t length) {
+ (void)_;
+ printf("Header value: %.*s\n", (int)length, at);
+ return 0;
+}
+
+int on_body(http_parser* _, const char* at, size_t length) {
+ (void)_;
+ printf("Body: %.*s\n", (int)length, at);
+ return 0;
+}
+
+void usage(const char* name) {
+ fprintf(stderr,
+ "Usage: %s $type $filename\n"
+ " type: -x, where x is one of {r,b,q}\n"
+ " parses file as a Response, reQuest, or Both\n",
+ name);
+ exit(EXIT_FAILURE);
+}
+
+int main(int argc, char* argv[]) {
+ enum http_parser_type file_type;
+
+ if (argc != 3) {
+ usage(argv[0]);
+ }
+
+ char* type = argv[1];
+ if (type[0] != '-') {
+ usage(argv[0]);
+ }
+
+ switch (type[1]) {
+ /* in the case of "-", type[1] will be NUL */
+ case 'r':
+ file_type = HTTP_RESPONSE;
+ break;
+ case 'q':
+ file_type = HTTP_REQUEST;
+ break;
+ case 'b':
+ file_type = HTTP_BOTH;
+ break;
+ default:
+ usage(argv[0]);
+ }
+
+ char* filename = argv[2];
+ FILE* file = fopen(filename, "r");
+ if (file == NULL) {
+ perror("fopen");
+ goto fail;
+ }
+
+ fseek(file, 0, SEEK_END);
+ long file_length = ftell(file);
+ if (file_length == -1) {
+ perror("ftell");
+ goto fail;
+ }
+ fseek(file, 0, SEEK_SET);
+
+ char* data = malloc(file_length);
+ if (fread(data, 1, file_length, file) != (size_t)file_length) {
+ fprintf(stderr, "couldn't read entire file\n");
+ free(data);
+ goto fail;
+ }
+
+ http_parser_settings settings;
+ memset(&settings, 0, sizeof(settings));
+ settings.on_message_begin = on_message_begin;
+ settings.on_url = on_url;
+ settings.on_header_field = on_header_field;
+ settings.on_header_value = on_header_value;
+ settings.on_headers_complete = on_headers_complete;
+ settings.on_body = on_body;
+ settings.on_message_complete = on_message_complete;
+
+ http_parser parser;
+ http_parser_init(&parser, file_type);
+ size_t nparsed = http_parser_execute(&parser, &settings, data, file_length);
+ free(data);
+
+ if (nparsed != (size_t)file_length) {
+ fprintf(stderr,
+ "Error: %s (%s)\n",
+ http_errno_description(HTTP_PARSER_ERRNO(&parser)),
+ http_errno_name(HTTP_PARSER_ERRNO(&parser)));
+ goto fail;
+ }
+
+ return EXIT_SUCCESS;
+
+fail:
+ fclose(file);
+ return EXIT_FAILURE;
+}
diff --git a/third_party/python/aiohttp/vendor/http-parser/contrib/url_parser.c b/third_party/python/aiohttp/vendor/http-parser/contrib/url_parser.c
new file mode 100644
index 0000000000..f235bed9e4
--- /dev/null
+++ b/third_party/python/aiohttp/vendor/http-parser/contrib/url_parser.c
@@ -0,0 +1,47 @@
+#include "http_parser.h"
+#include <stdio.h>
+#include <string.h>
+
+void
+dump_url (const char *url, const struct http_parser_url *u)
+{
+ unsigned int i;
+
+ printf("\tfield_set: 0x%x, port: %u\n", u->field_set, u->port);
+ for (i = 0; i < UF_MAX; i++) {
+ if ((u->field_set & (1 << i)) == 0) {
+ printf("\tfield_data[%u]: unset\n", i);
+ continue;
+ }
+
+ printf("\tfield_data[%u]: off: %u, len: %u, part: %.*s\n",
+ i,
+ u->field_data[i].off,
+ u->field_data[i].len,
+ u->field_data[i].len,
+ url + u->field_data[i].off);
+ }
+}
+
+int main(int argc, char ** argv) {
+ struct http_parser_url u;
+ int len, connect, result;
+
+ if (argc != 3) {
+ printf("Syntax : %s connect|get url\n", argv[0]);
+ return 1;
+ }
+ len = strlen(argv[2]);
+ connect = strcmp("connect", argv[1]) == 0 ? 1 : 0;
+ printf("Parsing %s, connect %d\n", argv[2], connect);
+
+ http_parser_url_init(&u);
+ result = http_parser_parse_url(argv[2], len, connect, &u);
+ if (result != 0) {
+ printf("Parse error : %d\n", result);
+ return result;
+ }
+ printf("Parse ok, result : \n");
+ dump_url(argv[2], &u);
+ return 0;
+}
diff --git a/third_party/python/aiohttp/vendor/http-parser/http_parser.c b/third_party/python/aiohttp/vendor/http-parser/http_parser.c
new file mode 100644
index 0000000000..95ff42f783
--- /dev/null
+++ b/third_party/python/aiohttp/vendor/http-parser/http_parser.c
@@ -0,0 +1,2568 @@
+/* Copyright Joyent, Inc. and other Node contributors.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include "http_parser.h"
+#include <assert.h>
+#include <stddef.h>
+#include <ctype.h>
+#include <string.h>
+#include <limits.h>
+
+static uint32_t max_header_size = HTTP_MAX_HEADER_SIZE;
+
+#ifndef ULLONG_MAX
+# define ULLONG_MAX ((uint64_t) -1) /* 2^64-1 */
+#endif
+
+#ifndef MIN
+# define MIN(a,b) ((a) < (b) ? (a) : (b))
+#endif
+
+#ifndef ARRAY_SIZE
+# define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+#endif
+
+#ifndef BIT_AT
+# define BIT_AT(a, i) \
+ (!!((unsigned int) (a)[(unsigned int) (i) >> 3] & \
+ (1 << ((unsigned int) (i) & 7))))
+#endif
+
+#ifndef ELEM_AT
+# define ELEM_AT(a, i, v) ((unsigned int) (i) < ARRAY_SIZE(a) ? (a)[(i)] : (v))
+#endif
+
+#define SET_ERRNO(e) \
+do { \
+ parser->nread = nread; \
+ parser->http_errno = (e); \
+} while(0)
+
+#define CURRENT_STATE() p_state
+#define UPDATE_STATE(V) p_state = (enum state) (V);
+#define RETURN(V) \
+do { \
+ parser->nread = nread; \
+ parser->state = CURRENT_STATE(); \
+ return (V); \
+} while (0);
+#define REEXECUTE() \
+ goto reexecute; \
+
+
+#ifdef __GNUC__
+# define LIKELY(X) __builtin_expect(!!(X), 1)
+# define UNLIKELY(X) __builtin_expect(!!(X), 0)
+#else
+# define LIKELY(X) (X)
+# define UNLIKELY(X) (X)
+#endif
+
+
+/* Run the notify callback FOR, returning ER if it fails */
+#define CALLBACK_NOTIFY_(FOR, ER) \
+do { \
+ assert(HTTP_PARSER_ERRNO(parser) == HPE_OK); \
+ \
+ if (LIKELY(settings->on_##FOR)) { \
+ parser->state = CURRENT_STATE(); \
+ if (UNLIKELY(0 != settings->on_##FOR(parser))) { \
+ SET_ERRNO(HPE_CB_##FOR); \
+ } \
+ UPDATE_STATE(parser->state); \
+ \
+ /* We either errored above or got paused; get out */ \
+ if (UNLIKELY(HTTP_PARSER_ERRNO(parser) != HPE_OK)) { \
+ return (ER); \
+ } \
+ } \
+} while (0)
+
+/* Run the notify callback FOR and consume the current byte */
+#define CALLBACK_NOTIFY(FOR) CALLBACK_NOTIFY_(FOR, p - data + 1)
+
+/* Run the notify callback FOR and don't consume the current byte */
+#define CALLBACK_NOTIFY_NOADVANCE(FOR) CALLBACK_NOTIFY_(FOR, p - data)
+
+/* Run data callback FOR with LEN bytes, returning ER if it fails */
+#define CALLBACK_DATA_(FOR, LEN, ER) \
+do { \
+ assert(HTTP_PARSER_ERRNO(parser) == HPE_OK); \
+ \
+ if (FOR##_mark) { \
+ if (LIKELY(settings->on_##FOR)) { \
+ parser->state = CURRENT_STATE(); \
+ if (UNLIKELY(0 != \
+ settings->on_##FOR(parser, FOR##_mark, (LEN)))) { \
+ SET_ERRNO(HPE_CB_##FOR); \
+ } \
+ UPDATE_STATE(parser->state); \
+ \
+ /* We either errored above or got paused; get out */ \
+ if (UNLIKELY(HTTP_PARSER_ERRNO(parser) != HPE_OK)) { \
+ return (ER); \
+ } \
+ } \
+ FOR##_mark = NULL; \
+ } \
+} while (0)
+
+/* Run the data callback FOR and consume the current byte */
+#define CALLBACK_DATA(FOR) \
+ CALLBACK_DATA_(FOR, p - FOR##_mark, p - data + 1)
+
+/* Run the data callback FOR and don't consume the current byte */
+#define CALLBACK_DATA_NOADVANCE(FOR) \
+ CALLBACK_DATA_(FOR, p - FOR##_mark, p - data)
+
+/* Set the mark FOR; non-destructive if mark is already set */
+#define MARK(FOR) \
+do { \
+ if (!FOR##_mark) { \
+ FOR##_mark = p; \
+ } \
+} while (0)
+
+/* Don't allow the total size of the HTTP headers (including the status
+ * line) to exceed max_header_size. This check is here to protect
+ * embedders against denial-of-service attacks where the attacker feeds
+ * us a never-ending header that the embedder keeps buffering.
+ *
+ * This check is arguably the responsibility of embedders but we're doing
+ * it on the embedder's behalf because most won't bother and this way we
+ * make the web a little safer. max_header_size is still far bigger
+ * than any reasonable request or response so this should never affect
+ * day-to-day operation.
+ */
+#define COUNT_HEADER_SIZE(V) \
+do { \
+ nread += (uint32_t)(V); \
+ if (UNLIKELY(nread > max_header_size)) { \
+ SET_ERRNO(HPE_HEADER_OVERFLOW); \
+ goto error; \
+ } \
+} while (0)
+
+
+#define PROXY_CONNECTION "proxy-connection"
+#define CONNECTION "connection"
+#define CONTENT_LENGTH "content-length"
+#define TRANSFER_ENCODING "transfer-encoding"
+#define UPGRADE "upgrade"
+#define CHUNKED "chunked"
+#define KEEP_ALIVE "keep-alive"
+#define CLOSE "close"
+
+
+static const char *method_strings[] =
+ {
+#define XX(num, name, string) #string,
+ HTTP_METHOD_MAP(XX)
+#undef XX
+ };
+
+
+/* Tokens as defined by rfc 2616. Also lowercases them.
+ * token = 1*<any CHAR except CTLs or separators>
+ * separators = "(" | ")" | "<" | ">" | "@"
+ * | "," | ";" | ":" | "\" | <">
+ * | "/" | "[" | "]" | "?" | "="
+ * | "{" | "}" | SP | HT
+ */
+static const char tokens[256] = {
+/* 0 nul 1 soh 2 stx 3 etx 4 eot 5 enq 6 ack 7 bel */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+/* 8 bs 9 ht 10 nl 11 vt 12 np 13 cr 14 so 15 si */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+/* 16 dle 17 dc1 18 dc2 19 dc3 20 dc4 21 nak 22 syn 23 etb */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+/* 24 can 25 em 26 sub 27 esc 28 fs 29 gs 30 rs 31 us */
+ 0, 0, 0, 0, 0, 0, 0, 0,
+/* 32 sp 33 ! 34 " 35 # 36 $ 37 % 38 & 39 ' */
+ ' ', '!', 0, '#', '$', '%', '&', '\'',
+/* 40 ( 41 ) 42 * 43 + 44 , 45 - 46 . 47 / */
+ 0, 0, '*', '+', 0, '-', '.', 0,
+/* 48 0 49 1 50 2 51 3 52 4 53 5 54 6 55 7 */
+ '0', '1', '2', '3', '4', '5', '6', '7',
+/* 56 8 57 9 58 : 59 ; 60 < 61 = 62 > 63 ? */
+ '8', '9', 0, 0, 0, 0, 0, 0,
+/* 64 @ 65 A 66 B 67 C 68 D 69 E 70 F 71 G */
+ 0, 'a', 'b', 'c', 'd', 'e', 'f', 'g',
+/* 72 H 73 I 74 J 75 K 76 L 77 M 78 N 79 O */
+ 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
+/* 80 P 81 Q 82 R 83 S 84 T 85 U 86 V 87 W */
+ 'p', 'q', 'r', 's', 't', 'u', 'v', 'w',
+/* 88 X 89 Y 90 Z 91 [ 92 \ 93 ] 94 ^ 95 _ */
+ 'x', 'y', 'z', 0, 0, 0, '^', '_',
+/* 96 ` 97 a 98 b 99 c 100 d 101 e 102 f 103 g */
+ '`', 'a', 'b', 'c', 'd', 'e', 'f', 'g',
+/* 104 h 105 i 106 j 107 k 108 l 109 m 110 n 111 o */
+ 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
+/* 112 p 113 q 114 r 115 s 116 t 117 u 118 v 119 w */
+ 'p', 'q', 'r', 's', 't', 'u', 'v', 'w',
+/* 120 x 121 y 122 z 123 { 124 | 125 } 126 ~ 127 del */
+ 'x', 'y', 'z', 0, '|', 0, '~', 0 };
+
+
+static const int8_t unhex[256] =
+ {-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1
+ ,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1
+ ,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1
+ , 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,-1,-1,-1,-1,-1,-1
+ ,-1,10,11,12,13,14,15,-1,-1,-1,-1,-1,-1,-1,-1,-1
+ ,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1
+ ,-1,10,11,12,13,14,15,-1,-1,-1,-1,-1,-1,-1,-1,-1
+ ,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1
+ };
+
+
+#if HTTP_PARSER_STRICT
+# define T(v) 0
+#else
+# define T(v) v
+#endif
+
+
+static const uint8_t normal_url_char[32] = {
+/* 0 nul 1 soh 2 stx 3 etx 4 eot 5 enq 6 ack 7 bel */
+ 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0,
+/* 8 bs 9 ht 10 nl 11 vt 12 np 13 cr 14 so 15 si */
+ 0 | T(2) | 0 | 0 | T(16) | 0 | 0 | 0,
+/* 16 dle 17 dc1 18 dc2 19 dc3 20 dc4 21 nak 22 syn 23 etb */
+ 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0,
+/* 24 can 25 em 26 sub 27 esc 28 fs 29 gs 30 rs 31 us */
+ 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0,
+/* 32 sp 33 ! 34 " 35 # 36 $ 37 % 38 & 39 ' */
+ 0 | 2 | 4 | 0 | 16 | 32 | 64 | 128,
+/* 40 ( 41 ) 42 * 43 + 44 , 45 - 46 . 47 / */
+ 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128,
+/* 48 0 49 1 50 2 51 3 52 4 53 5 54 6 55 7 */
+ 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128,
+/* 56 8 57 9 58 : 59 ; 60 < 61 = 62 > 63 ? */
+ 1 | 2 | 4 | 8 | 16 | 32 | 64 | 0,
+/* 64 @ 65 A 66 B 67 C 68 D 69 E 70 F 71 G */
+ 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128,
+/* 72 H 73 I 74 J 75 K 76 L 77 M 78 N 79 O */
+ 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128,
+/* 80 P 81 Q 82 R 83 S 84 T 85 U 86 V 87 W */
+ 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128,
+/* 88 X 89 Y 90 Z 91 [ 92 \ 93 ] 94 ^ 95 _ */
+ 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128,
+/* 96 ` 97 a 98 b 99 c 100 d 101 e 102 f 103 g */
+ 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128,
+/* 104 h 105 i 106 j 107 k 108 l 109 m 110 n 111 o */
+ 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128,
+/* 112 p 113 q 114 r 115 s 116 t 117 u 118 v 119 w */
+ 1 | 2 | 4 | 8 | 16 | 32 | 64 | 128,
+/* 120 x 121 y 122 z 123 { 124 | 125 } 126 ~ 127 del */
+ 1 | 2 | 4 | 8 | 16 | 32 | 64 | 0, };
+
+#undef T
+
+enum state
+ { s_dead = 1 /* important that this is > 0 */
+
+ , s_start_req_or_res
+ , s_res_or_resp_H
+ , s_start_res
+ , s_res_H
+ , s_res_HT
+ , s_res_HTT
+ , s_res_HTTP
+ , s_res_http_major
+ , s_res_http_dot
+ , s_res_http_minor
+ , s_res_http_end
+ , s_res_first_status_code
+ , s_res_status_code
+ , s_res_status_start
+ , s_res_status
+ , s_res_line_almost_done
+
+ , s_start_req
+
+ , s_req_method
+ , s_req_spaces_before_url
+ , s_req_schema
+ , s_req_schema_slash
+ , s_req_schema_slash_slash
+ , s_req_server_start
+ , s_req_server
+ , s_req_server_with_at
+ , s_req_path
+ , s_req_query_string_start
+ , s_req_query_string
+ , s_req_fragment_start
+ , s_req_fragment
+ , s_req_http_start
+ , s_req_http_H
+ , s_req_http_HT
+ , s_req_http_HTT
+ , s_req_http_HTTP
+ , s_req_http_I
+ , s_req_http_IC
+ , s_req_http_major
+ , s_req_http_dot
+ , s_req_http_minor
+ , s_req_http_end
+ , s_req_line_almost_done
+
+ , s_header_field_start
+ , s_header_field
+ , s_header_value_discard_ws
+ , s_header_value_discard_ws_almost_done
+ , s_header_value_discard_lws
+ , s_header_value_start
+ , s_header_value
+ , s_header_value_lws
+
+ , s_header_almost_done
+
+ , s_chunk_size_start
+ , s_chunk_size
+ , s_chunk_parameters
+ , s_chunk_size_almost_done
+
+ , s_headers_almost_done
+ , s_headers_done
+
+ /* Important: 's_headers_done' must be the last 'header' state. All
+ * states beyond this must be 'body' states. It is used for overflow
+ * checking. See the PARSING_HEADER() macro.
+ */
+
+ , s_chunk_data
+ , s_chunk_data_almost_done
+ , s_chunk_data_done
+
+ , s_body_identity
+ , s_body_identity_eof
+
+ , s_message_done
+ };
+
+
+#define PARSING_HEADER(state) (state <= s_headers_done)
+
+
+enum header_states
+ { h_general = 0
+ , h_C
+ , h_CO
+ , h_CON
+
+ , h_matching_connection
+ , h_matching_proxy_connection
+ , h_matching_content_length
+ , h_matching_transfer_encoding
+ , h_matching_upgrade
+
+ , h_connection
+ , h_content_length
+ , h_content_length_num
+ , h_content_length_ws
+ , h_transfer_encoding
+ , h_upgrade
+
+ , h_matching_transfer_encoding_token_start
+ , h_matching_transfer_encoding_chunked
+ , h_matching_transfer_encoding_token
+
+ , h_matching_connection_token_start
+ , h_matching_connection_keep_alive
+ , h_matching_connection_close
+ , h_matching_connection_upgrade
+ , h_matching_connection_token
+
+ , h_transfer_encoding_chunked
+ , h_connection_keep_alive
+ , h_connection_close
+ , h_connection_upgrade
+ };
+
+enum http_host_state
+ {
+ s_http_host_dead = 1
+ , s_http_userinfo_start
+ , s_http_userinfo
+ , s_http_host_start
+ , s_http_host_v6_start
+ , s_http_host
+ , s_http_host_v6
+ , s_http_host_v6_end
+ , s_http_host_v6_zone_start
+ , s_http_host_v6_zone
+ , s_http_host_port_start
+ , s_http_host_port
+};
+
+/* Macros for character classes; depends on strict-mode */
+#define CR '\r'
+#define LF '\n'
+#define LOWER(c) (unsigned char)(c | 0x20)
+#define IS_ALPHA(c) (LOWER(c) >= 'a' && LOWER(c) <= 'z')
+#define IS_NUM(c) ((c) >= '0' && (c) <= '9')
+#define IS_ALPHANUM(c) (IS_ALPHA(c) || IS_NUM(c))
+#define IS_HEX(c) (IS_NUM(c) || (LOWER(c) >= 'a' && LOWER(c) <= 'f'))
+#define IS_MARK(c) ((c) == '-' || (c) == '_' || (c) == '.' || \
+ (c) == '!' || (c) == '~' || (c) == '*' || (c) == '\'' || (c) == '(' || \
+ (c) == ')')
+#define IS_USERINFO_CHAR(c) (IS_ALPHANUM(c) || IS_MARK(c) || (c) == '%' || \
+ (c) == ';' || (c) == ':' || (c) == '&' || (c) == '=' || (c) == '+' || \
+ (c) == '$' || (c) == ',')
+
+#define STRICT_TOKEN(c) ((c == ' ') ? 0 : tokens[(unsigned char)c])
+
+#if HTTP_PARSER_STRICT
+#define TOKEN(c) STRICT_TOKEN(c)
+#define IS_URL_CHAR(c) (BIT_AT(normal_url_char, (unsigned char)c))
+#define IS_HOST_CHAR(c) (IS_ALPHANUM(c) || (c) == '.' || (c) == '-')
+#else
+#define TOKEN(c) tokens[(unsigned char)c]
+#define IS_URL_CHAR(c) \
+ (BIT_AT(normal_url_char, (unsigned char)c) || ((c) & 0x80))
+#define IS_HOST_CHAR(c) \
+ (IS_ALPHANUM(c) || (c) == '.' || (c) == '-' || (c) == '_')
+#endif
+
+/**
+ * Verify that a char is a valid visible (printable) US-ASCII
+ * character or %x80-FF
+ **/
+#define IS_HEADER_CHAR(ch) \
+ (ch == CR || ch == LF || ch == 9 || ((unsigned char)ch > 31 && ch != 127))
+
+#define start_state (parser->type == HTTP_REQUEST ? s_start_req : s_start_res)
+
+
+#if HTTP_PARSER_STRICT
+# define STRICT_CHECK(cond) \
+do { \
+ if (cond) { \
+ SET_ERRNO(HPE_STRICT); \
+ goto error; \
+ } \
+} while (0)
+# define NEW_MESSAGE() (http_should_keep_alive(parser) ? start_state : s_dead)
+#else
+# define STRICT_CHECK(cond)
+# define NEW_MESSAGE() start_state
+#endif
+
+
+/* Map errno values to strings for human-readable output */
+#define HTTP_STRERROR_GEN(n, s) { "HPE_" #n, s },
+static struct {
+ const char *name;
+ const char *description;
+} http_strerror_tab[] = {
+ HTTP_ERRNO_MAP(HTTP_STRERROR_GEN)
+};
+#undef HTTP_STRERROR_GEN
+
+int http_message_needs_eof(const http_parser *parser);
+
+/* Our URL parser.
+ *
+ * This is designed to be shared by http_parser_execute() for URL validation,
+ * hence it has a state transition + byte-for-byte interface. In addition, it
+ * is meant to be embedded in http_parser_parse_url(), which does the dirty
+ * work of turning state transitions URL components for its API.
+ *
+ * This function should only be invoked with non-space characters. It is
+ * assumed that the caller cares about (and can detect) the transition between
+ * URL and non-URL states by looking for these.
+ */
+static enum state
+parse_url_char(enum state s, const char ch)
+{
+ if (ch == ' ' || ch == '\r' || ch == '\n') {
+ return s_dead;
+ }
+
+#if HTTP_PARSER_STRICT
+ if (ch == '\t' || ch == '\f') {
+ return s_dead;
+ }
+#endif
+
+ switch (s) {
+ case s_req_spaces_before_url:
+ /* Proxied requests are followed by scheme of an absolute URI (alpha).
+ * All methods except CONNECT are followed by '/' or '*'.
+ */
+
+ if (ch == '/' || ch == '*') {
+ return s_req_path;
+ }
+
+ if (IS_ALPHA(ch)) {
+ return s_req_schema;
+ }
+
+ break;
+
+ case s_req_schema:
+ if (IS_ALPHA(ch)) {
+ return s;
+ }
+
+ if (ch == ':') {
+ return s_req_schema_slash;
+ }
+
+ break;
+
+ case s_req_schema_slash:
+ if (ch == '/') {
+ return s_req_schema_slash_slash;
+ }
+
+ break;
+
+ case s_req_schema_slash_slash:
+ if (ch == '/') {
+ return s_req_server_start;
+ }
+
+ break;
+
+ case s_req_server_with_at:
+ if (ch == '@') {
+ return s_dead;
+ }
+
+ /* fall through */
+ case s_req_server_start:
+ case s_req_server:
+ if (ch == '/') {
+ return s_req_path;
+ }
+
+ if (ch == '?') {
+ return s_req_query_string_start;
+ }
+
+ if (ch == '@') {
+ return s_req_server_with_at;
+ }
+
+ if (IS_USERINFO_CHAR(ch) || ch == '[' || ch == ']') {
+ return s_req_server;
+ }
+
+ break;
+
+ case s_req_path:
+ if (IS_URL_CHAR(ch)) {
+ return s;
+ }
+
+ switch (ch) {
+ case '?':
+ return s_req_query_string_start;
+
+ case '#':
+ return s_req_fragment_start;
+ }
+
+ break;
+
+ case s_req_query_string_start:
+ case s_req_query_string:
+ if (IS_URL_CHAR(ch)) {
+ return s_req_query_string;
+ }
+
+ switch (ch) {
+ case '?':
+ /* allow extra '?' in query string */
+ return s_req_query_string;
+
+ case '#':
+ return s_req_fragment_start;
+ }
+
+ break;
+
+ case s_req_fragment_start:
+ if (IS_URL_CHAR(ch)) {
+ return s_req_fragment;
+ }
+
+ switch (ch) {
+ case '?':
+ return s_req_fragment;
+
+ case '#':
+ return s;
+ }
+
+ break;
+
+ case s_req_fragment:
+ if (IS_URL_CHAR(ch)) {
+ return s;
+ }
+
+ switch (ch) {
+ case '?':
+ case '#':
+ return s;
+ }
+
+ break;
+
+ default:
+ break;
+ }
+
+ /* We should never fall out of the switch above unless there's an error */
+ return s_dead;
+}
+
+size_t http_parser_execute (http_parser *parser,
+ const http_parser_settings *settings,
+ const char *data,
+ size_t len)
+{
+ char c, ch;
+ int8_t unhex_val;
+ const char *p = data;
+ const char *header_field_mark = 0;
+ const char *header_value_mark = 0;
+ const char *url_mark = 0;
+ const char *body_mark = 0;
+ const char *status_mark = 0;
+ enum state p_state = (enum state) parser->state;
+ const unsigned int lenient = parser->lenient_http_headers;
+ uint32_t nread = parser->nread;
+
+ /* We're in an error state. Don't bother doing anything. */
+ if (HTTP_PARSER_ERRNO(parser) != HPE_OK) {
+ return 0;
+ }
+
+ if (len == 0) {
+ switch (CURRENT_STATE()) {
+ case s_body_identity_eof:
+ /* Use of CALLBACK_NOTIFY() here would erroneously return 1 byte read if
+ * we got paused.
+ */
+ CALLBACK_NOTIFY_NOADVANCE(message_complete);
+ return 0;
+
+ case s_dead:
+ case s_start_req_or_res:
+ case s_start_res:
+ case s_start_req:
+ return 0;
+
+ default:
+ SET_ERRNO(HPE_INVALID_EOF_STATE);
+ return 1;
+ }
+ }
+
+
+ if (CURRENT_STATE() == s_header_field)
+ header_field_mark = data;
+ if (CURRENT_STATE() == s_header_value)
+ header_value_mark = data;
+ switch (CURRENT_STATE()) {
+ case s_req_path:
+ case s_req_schema:
+ case s_req_schema_slash:
+ case s_req_schema_slash_slash:
+ case s_req_server_start:
+ case s_req_server:
+ case s_req_server_with_at:
+ case s_req_query_string_start:
+ case s_req_query_string:
+ case s_req_fragment_start:
+ case s_req_fragment:
+ url_mark = data;
+ break;
+ case s_res_status:
+ status_mark = data;
+ break;
+ default:
+ break;
+ }
+
+ for (p=data; p != data + len; p++) {
+ ch = *p;
+
+ if (PARSING_HEADER(CURRENT_STATE()))
+ COUNT_HEADER_SIZE(1);
+
+reexecute:
+ switch (CURRENT_STATE()) {
+
+ case s_dead:
+ /* this state is used after a 'Connection: close' message
+ * the parser will error out if it reads another message
+ */
+ if (LIKELY(ch == CR || ch == LF))
+ break;
+
+ SET_ERRNO(HPE_CLOSED_CONNECTION);
+ goto error;
+
+ case s_start_req_or_res:
+ {
+ if (ch == CR || ch == LF)
+ break;
+ parser->flags = 0;
+ parser->extra_flags = 0;
+ parser->content_length = ULLONG_MAX;
+
+ if (ch == 'H') {
+ UPDATE_STATE(s_res_or_resp_H);
+
+ CALLBACK_NOTIFY(message_begin);
+ } else {
+ parser->type = HTTP_REQUEST;
+ UPDATE_STATE(s_start_req);
+ REEXECUTE();
+ }
+
+ break;
+ }
+
+ case s_res_or_resp_H:
+ if (ch == 'T') {
+ parser->type = HTTP_RESPONSE;
+ UPDATE_STATE(s_res_HT);
+ } else {
+ if (UNLIKELY(ch != 'E')) {
+ SET_ERRNO(HPE_INVALID_CONSTANT);
+ goto error;
+ }
+
+ parser->type = HTTP_REQUEST;
+ parser->method = HTTP_HEAD;
+ parser->index = 2;
+ UPDATE_STATE(s_req_method);
+ }
+ break;
+
+ case s_start_res:
+ {
+ if (ch == CR || ch == LF)
+ break;
+ parser->flags = 0;
+ parser->extra_flags = 0;
+ parser->content_length = ULLONG_MAX;
+
+ if (ch == 'H') {
+ UPDATE_STATE(s_res_H);
+ } else {
+ SET_ERRNO(HPE_INVALID_CONSTANT);
+ goto error;
+ }
+
+ CALLBACK_NOTIFY(message_begin);
+ break;
+ }
+
+ case s_res_H:
+ STRICT_CHECK(ch != 'T');
+ UPDATE_STATE(s_res_HT);
+ break;
+
+ case s_res_HT:
+ STRICT_CHECK(ch != 'T');
+ UPDATE_STATE(s_res_HTT);
+ break;
+
+ case s_res_HTT:
+ STRICT_CHECK(ch != 'P');
+ UPDATE_STATE(s_res_HTTP);
+ break;
+
+ case s_res_HTTP:
+ STRICT_CHECK(ch != '/');
+ UPDATE_STATE(s_res_http_major);
+ break;
+
+ case s_res_http_major:
+ if (UNLIKELY(!IS_NUM(ch))) {
+ SET_ERRNO(HPE_INVALID_VERSION);
+ goto error;
+ }
+
+ parser->http_major = ch - '0';
+ UPDATE_STATE(s_res_http_dot);
+ break;
+
+ case s_res_http_dot:
+ {
+ if (UNLIKELY(ch != '.')) {
+ SET_ERRNO(HPE_INVALID_VERSION);
+ goto error;
+ }
+
+ UPDATE_STATE(s_res_http_minor);
+ break;
+ }
+
+ case s_res_http_minor:
+ if (UNLIKELY(!IS_NUM(ch))) {
+ SET_ERRNO(HPE_INVALID_VERSION);
+ goto error;
+ }
+
+ parser->http_minor = ch - '0';
+ UPDATE_STATE(s_res_http_end);
+ break;
+
+ case s_res_http_end:
+ {
+ if (UNLIKELY(ch != ' ')) {
+ SET_ERRNO(HPE_INVALID_VERSION);
+ goto error;
+ }
+
+ UPDATE_STATE(s_res_first_status_code);
+ break;
+ }
+
+ case s_res_first_status_code:
+ {
+ if (!IS_NUM(ch)) {
+ if (ch == ' ') {
+ break;
+ }
+
+ SET_ERRNO(HPE_INVALID_STATUS);
+ goto error;
+ }
+ parser->status_code = ch - '0';
+ UPDATE_STATE(s_res_status_code);
+ break;
+ }
+
+ case s_res_status_code:
+ {
+ if (!IS_NUM(ch)) {
+ switch (ch) {
+ case ' ':
+ UPDATE_STATE(s_res_status_start);
+ break;
+ case CR:
+ case LF:
+ UPDATE_STATE(s_res_status_start);
+ REEXECUTE();
+ break;
+ default:
+ SET_ERRNO(HPE_INVALID_STATUS);
+ goto error;
+ }
+ break;
+ }
+
+ parser->status_code *= 10;
+ parser->status_code += ch - '0';
+
+ if (UNLIKELY(parser->status_code > 999)) {
+ SET_ERRNO(HPE_INVALID_STATUS);
+ goto error;
+ }
+
+ break;
+ }
+
+ case s_res_status_start:
+ {
+ MARK(status);
+ UPDATE_STATE(s_res_status);
+ parser->index = 0;
+
+ if (ch == CR || ch == LF)
+ REEXECUTE();
+
+ break;
+ }
+
+ case s_res_status:
+ if (ch == CR) {
+ UPDATE_STATE(s_res_line_almost_done);
+ CALLBACK_DATA(status);
+ break;
+ }
+
+ if (ch == LF) {
+ UPDATE_STATE(s_header_field_start);
+ CALLBACK_DATA(status);
+ break;
+ }
+
+ break;
+
+ case s_res_line_almost_done:
+ STRICT_CHECK(ch != LF);
+ UPDATE_STATE(s_header_field_start);
+ break;
+
+ case s_start_req:
+ {
+ if (ch == CR || ch == LF)
+ break;
+ parser->flags = 0;
+ parser->extra_flags = 0;
+ parser->content_length = ULLONG_MAX;
+
+ if (UNLIKELY(!IS_ALPHA(ch))) {
+ SET_ERRNO(HPE_INVALID_METHOD);
+ goto error;
+ }
+
+ parser->method = (enum http_method) 0;
+ parser->index = 1;
+ switch (ch) {
+ case 'A': parser->method = HTTP_ACL; break;
+ case 'B': parser->method = HTTP_BIND; break;
+ case 'C': parser->method = HTTP_CONNECT; /* or COPY, CHECKOUT */ break;
+ case 'D': parser->method = HTTP_DELETE; break;
+ case 'G': parser->method = HTTP_GET; break;
+ case 'H': parser->method = HTTP_HEAD; break;
+ case 'L': parser->method = HTTP_LOCK; /* or LINK */ break;
+ case 'M': parser->method = HTTP_MKCOL; /* or MOVE, MKACTIVITY, MERGE, M-SEARCH, MKCALENDAR */ break;
+ case 'N': parser->method = HTTP_NOTIFY; break;
+ case 'O': parser->method = HTTP_OPTIONS; break;
+ case 'P': parser->method = HTTP_POST;
+ /* or PROPFIND|PROPPATCH|PUT|PATCH|PURGE */
+ break;
+ case 'R': parser->method = HTTP_REPORT; /* or REBIND */ break;
+ case 'S': parser->method = HTTP_SUBSCRIBE; /* or SEARCH, SOURCE */ break;
+ case 'T': parser->method = HTTP_TRACE; break;
+ case 'U': parser->method = HTTP_UNLOCK; /* or UNSUBSCRIBE, UNBIND, UNLINK */ break;
+ default:
+ SET_ERRNO(HPE_INVALID_METHOD);
+ goto error;
+ }
+ UPDATE_STATE(s_req_method);
+
+ CALLBACK_NOTIFY(message_begin);
+
+ break;
+ }
+
+ case s_req_method:
+ {
+ const char *matcher;
+ if (UNLIKELY(ch == '\0')) {
+ SET_ERRNO(HPE_INVALID_METHOD);
+ goto error;
+ }
+
+ matcher = method_strings[parser->method];
+ if (ch == ' ' && matcher[parser->index] == '\0') {
+ UPDATE_STATE(s_req_spaces_before_url);
+ } else if (ch == matcher[parser->index]) {
+ ; /* nada */
+ } else if ((ch >= 'A' && ch <= 'Z') || ch == '-') {
+
+ switch (parser->method << 16 | parser->index << 8 | ch) {
+#define XX(meth, pos, ch, new_meth) \
+ case (HTTP_##meth << 16 | pos << 8 | ch): \
+ parser->method = HTTP_##new_meth; break;
+
+ XX(POST, 1, 'U', PUT)
+ XX(POST, 1, 'A', PATCH)
+ XX(POST, 1, 'R', PROPFIND)
+ XX(PUT, 2, 'R', PURGE)
+ XX(CONNECT, 1, 'H', CHECKOUT)
+ XX(CONNECT, 2, 'P', COPY)
+ XX(MKCOL, 1, 'O', MOVE)
+ XX(MKCOL, 1, 'E', MERGE)
+ XX(MKCOL, 1, '-', MSEARCH)
+ XX(MKCOL, 2, 'A', MKACTIVITY)
+ XX(MKCOL, 3, 'A', MKCALENDAR)
+ XX(SUBSCRIBE, 1, 'E', SEARCH)
+ XX(SUBSCRIBE, 1, 'O', SOURCE)
+ XX(REPORT, 2, 'B', REBIND)
+ XX(PROPFIND, 4, 'P', PROPPATCH)
+ XX(LOCK, 1, 'I', LINK)
+ XX(UNLOCK, 2, 'S', UNSUBSCRIBE)
+ XX(UNLOCK, 2, 'B', UNBIND)
+ XX(UNLOCK, 3, 'I', UNLINK)
+#undef XX
+ default:
+ SET_ERRNO(HPE_INVALID_METHOD);
+ goto error;
+ }
+ } else {
+ SET_ERRNO(HPE_INVALID_METHOD);
+ goto error;
+ }
+
+ ++parser->index;
+ break;
+ }
+
+ case s_req_spaces_before_url:
+ {
+ if (ch == ' ') break;
+
+ MARK(url);
+ if (parser->method == HTTP_CONNECT) {
+ UPDATE_STATE(s_req_server_start);
+ }
+
+ UPDATE_STATE(parse_url_char(CURRENT_STATE(), ch));
+ if (UNLIKELY(CURRENT_STATE() == s_dead)) {
+ SET_ERRNO(HPE_INVALID_URL);
+ goto error;
+ }
+
+ break;
+ }
+
+ case s_req_schema:
+ case s_req_schema_slash:
+ case s_req_schema_slash_slash:
+ case s_req_server_start:
+ {
+ switch (ch) {
+ /* No whitespace allowed here */
+ case ' ':
+ case CR:
+ case LF:
+ SET_ERRNO(HPE_INVALID_URL);
+ goto error;
+ default:
+ UPDATE_STATE(parse_url_char(CURRENT_STATE(), ch));
+ if (UNLIKELY(CURRENT_STATE() == s_dead)) {
+ SET_ERRNO(HPE_INVALID_URL);
+ goto error;
+ }
+ }
+
+ break;
+ }
+
+ case s_req_server:
+ case s_req_server_with_at:
+ case s_req_path:
+ case s_req_query_string_start:
+ case s_req_query_string:
+ case s_req_fragment_start:
+ case s_req_fragment:
+ {
+ switch (ch) {
+ case ' ':
+ UPDATE_STATE(s_req_http_start);
+ CALLBACK_DATA(url);
+ break;
+ case CR:
+ case LF:
+ parser->http_major = 0;
+ parser->http_minor = 9;
+ UPDATE_STATE((ch == CR) ?
+ s_req_line_almost_done :
+ s_header_field_start);
+ CALLBACK_DATA(url);
+ break;
+ default:
+ UPDATE_STATE(parse_url_char(CURRENT_STATE(), ch));
+ if (UNLIKELY(CURRENT_STATE() == s_dead)) {
+ SET_ERRNO(HPE_INVALID_URL);
+ goto error;
+ }
+ }
+ break;
+ }
+
+ case s_req_http_start:
+ switch (ch) {
+ case ' ':
+ break;
+ case 'H':
+ UPDATE_STATE(s_req_http_H);
+ break;
+ case 'I':
+ if (parser->method == HTTP_SOURCE) {
+ UPDATE_STATE(s_req_http_I);
+ break;
+ }
+ /* fall through */
+ default:
+ SET_ERRNO(HPE_INVALID_CONSTANT);
+ goto error;
+ }
+ break;
+
+ case s_req_http_H:
+ STRICT_CHECK(ch != 'T');
+ UPDATE_STATE(s_req_http_HT);
+ break;
+
+ case s_req_http_HT:
+ STRICT_CHECK(ch != 'T');
+ UPDATE_STATE(s_req_http_HTT);
+ break;
+
+ case s_req_http_HTT:
+ STRICT_CHECK(ch != 'P');
+ UPDATE_STATE(s_req_http_HTTP);
+ break;
+
+ case s_req_http_I:
+ STRICT_CHECK(ch != 'C');
+ UPDATE_STATE(s_req_http_IC);
+ break;
+
+ case s_req_http_IC:
+ STRICT_CHECK(ch != 'E');
+ UPDATE_STATE(s_req_http_HTTP); /* Treat "ICE" as "HTTP". */
+ break;
+
+ case s_req_http_HTTP:
+ STRICT_CHECK(ch != '/');
+ UPDATE_STATE(s_req_http_major);
+ break;
+
+ case s_req_http_major:
+ if (UNLIKELY(!IS_NUM(ch))) {
+ SET_ERRNO(HPE_INVALID_VERSION);
+ goto error;
+ }
+
+ parser->http_major = ch - '0';
+ UPDATE_STATE(s_req_http_dot);
+ break;
+
+ case s_req_http_dot:
+ {
+ if (UNLIKELY(ch != '.')) {
+ SET_ERRNO(HPE_INVALID_VERSION);
+ goto error;
+ }
+
+ UPDATE_STATE(s_req_http_minor);
+ break;
+ }
+
+ case s_req_http_minor:
+ if (UNLIKELY(!IS_NUM(ch))) {
+ SET_ERRNO(HPE_INVALID_VERSION);
+ goto error;
+ }
+
+ parser->http_minor = ch - '0';
+ UPDATE_STATE(s_req_http_end);
+ break;
+
+ case s_req_http_end:
+ {
+ if (ch == CR) {
+ UPDATE_STATE(s_req_line_almost_done);
+ break;
+ }
+
+ if (ch == LF) {
+ UPDATE_STATE(s_header_field_start);
+ break;
+ }
+
+ SET_ERRNO(HPE_INVALID_VERSION);
+ goto error;
+ break;
+ }
+
+ /* end of request line */
+ case s_req_line_almost_done:
+ {
+ if (UNLIKELY(ch != LF)) {
+ SET_ERRNO(HPE_LF_EXPECTED);
+ goto error;
+ }
+
+ UPDATE_STATE(s_header_field_start);
+ break;
+ }
+
+ case s_header_field_start:
+ {
+ if (ch == CR) {
+ UPDATE_STATE(s_headers_almost_done);
+ break;
+ }
+
+ if (ch == LF) {
+ /* they might be just sending \n instead of \r\n so this would be
+ * the second \n to denote the end of headers*/
+ UPDATE_STATE(s_headers_almost_done);
+ REEXECUTE();
+ }
+
+ c = TOKEN(ch);
+
+ if (UNLIKELY(!c)) {
+ SET_ERRNO(HPE_INVALID_HEADER_TOKEN);
+ goto error;
+ }
+
+ MARK(header_field);
+
+ parser->index = 0;
+ UPDATE_STATE(s_header_field);
+
+ switch (c) {
+ case 'c':
+ parser->header_state = h_C;
+ break;
+
+ case 'p':
+ parser->header_state = h_matching_proxy_connection;
+ break;
+
+ case 't':
+ parser->header_state = h_matching_transfer_encoding;
+ break;
+
+ case 'u':
+ parser->header_state = h_matching_upgrade;
+ break;
+
+ default:
+ parser->header_state = h_general;
+ break;
+ }
+ break;
+ }
+
+ case s_header_field:
+ {
+ const char* start = p;
+ for (; p != data + len; p++) {
+ ch = *p;
+ c = TOKEN(ch);
+
+ if (!c)
+ break;
+
+ switch (parser->header_state) {
+ case h_general: {
+ size_t left = data + len - p;
+ const char* pe = p + MIN(left, max_header_size);
+ while (p+1 < pe && TOKEN(p[1])) {
+ p++;
+ }
+ break;
+ }
+
+ case h_C:
+ parser->index++;
+ parser->header_state = (c == 'o' ? h_CO : h_general);
+ break;
+
+ case h_CO:
+ parser->index++;
+ parser->header_state = (c == 'n' ? h_CON : h_general);
+ break;
+
+ case h_CON:
+ parser->index++;
+ switch (c) {
+ case 'n':
+ parser->header_state = h_matching_connection;
+ break;
+ case 't':
+ parser->header_state = h_matching_content_length;
+ break;
+ default:
+ parser->header_state = h_general;
+ break;
+ }
+ break;
+
+ /* connection */
+
+ case h_matching_connection:
+ parser->index++;
+ if (parser->index > sizeof(CONNECTION)-1
+ || c != CONNECTION[parser->index]) {
+ parser->header_state = h_general;
+ } else if (parser->index == sizeof(CONNECTION)-2) {
+ parser->header_state = h_connection;
+ }
+ break;
+
+ /* proxy-connection */
+
+ case h_matching_proxy_connection:
+ parser->index++;
+ if (parser->index > sizeof(PROXY_CONNECTION)-1
+ || c != PROXY_CONNECTION[parser->index]) {
+ parser->header_state = h_general;
+ } else if (parser->index == sizeof(PROXY_CONNECTION)-2) {
+ parser->header_state = h_connection;
+ }
+ break;
+
+ /* content-length */
+
+ case h_matching_content_length:
+ parser->index++;
+ if (parser->index > sizeof(CONTENT_LENGTH)-1
+ || c != CONTENT_LENGTH[parser->index]) {
+ parser->header_state = h_general;
+ } else if (parser->index == sizeof(CONTENT_LENGTH)-2) {
+ parser->header_state = h_content_length;
+ }
+ break;
+
+ /* transfer-encoding */
+
+ case h_matching_transfer_encoding:
+ parser->index++;
+ if (parser->index > sizeof(TRANSFER_ENCODING)-1
+ || c != TRANSFER_ENCODING[parser->index]) {
+ parser->header_state = h_general;
+ } else if (parser->index == sizeof(TRANSFER_ENCODING)-2) {
+ parser->header_state = h_transfer_encoding;
+ parser->extra_flags |= F_TRANSFER_ENCODING >> 8;
+ }
+ break;
+
+ /* upgrade */
+
+ case h_matching_upgrade:
+ parser->index++;
+ if (parser->index > sizeof(UPGRADE)-1
+ || c != UPGRADE[parser->index]) {
+ parser->header_state = h_general;
+ } else if (parser->index == sizeof(UPGRADE)-2) {
+ parser->header_state = h_upgrade;
+ }
+ break;
+
+ case h_connection:
+ case h_content_length:
+ case h_transfer_encoding:
+ case h_upgrade:
+ if (ch != ' ') parser->header_state = h_general;
+ break;
+
+ default:
+ assert(0 && "Unknown header_state");
+ break;
+ }
+ }
+
+ if (p == data + len) {
+ --p;
+ COUNT_HEADER_SIZE(p - start);
+ break;
+ }
+
+ COUNT_HEADER_SIZE(p - start);
+
+ if (ch == ':') {
+ UPDATE_STATE(s_header_value_discard_ws);
+ CALLBACK_DATA(header_field);
+ break;
+ }
+
+ SET_ERRNO(HPE_INVALID_HEADER_TOKEN);
+ goto error;
+ }
+
+ case s_header_value_discard_ws:
+ if (ch == ' ' || ch == '\t') break;
+
+ if (ch == CR) {
+ UPDATE_STATE(s_header_value_discard_ws_almost_done);
+ break;
+ }
+
+ if (ch == LF) {
+ UPDATE_STATE(s_header_value_discard_lws);
+ break;
+ }
+
+ /* fall through */
+
+ case s_header_value_start:
+ {
+ MARK(header_value);
+
+ UPDATE_STATE(s_header_value);
+ parser->index = 0;
+
+ c = LOWER(ch);
+
+ switch (parser->header_state) {
+ case h_upgrade:
+ parser->flags |= F_UPGRADE;
+ parser->header_state = h_general;
+ break;
+
+ case h_transfer_encoding:
+ /* looking for 'Transfer-Encoding: chunked' */
+ if ('c' == c) {
+ parser->header_state = h_matching_transfer_encoding_chunked;
+ } else {
+ parser->header_state = h_matching_transfer_encoding_token;
+ }
+ break;
+
+ /* Multi-value `Transfer-Encoding` header */
+ case h_matching_transfer_encoding_token_start:
+ break;
+
+ case h_content_length:
+ if (UNLIKELY(!IS_NUM(ch))) {
+ SET_ERRNO(HPE_INVALID_CONTENT_LENGTH);
+ goto error;
+ }
+
+ if (parser->flags & F_CONTENTLENGTH) {
+ SET_ERRNO(HPE_UNEXPECTED_CONTENT_LENGTH);
+ goto error;
+ }
+
+ parser->flags |= F_CONTENTLENGTH;
+ parser->content_length = ch - '0';
+ parser->header_state = h_content_length_num;
+ break;
+
+ /* when obsolete line folding is encountered for content length
+ * continue to the s_header_value state */
+ case h_content_length_ws:
+ break;
+
+ case h_connection:
+ /* looking for 'Connection: keep-alive' */
+ if (c == 'k') {
+ parser->header_state = h_matching_connection_keep_alive;
+ /* looking for 'Connection: close' */
+ } else if (c == 'c') {
+ parser->header_state = h_matching_connection_close;
+ } else if (c == 'u') {
+ parser->header_state = h_matching_connection_upgrade;
+ } else {
+ parser->header_state = h_matching_connection_token;
+ }
+ break;
+
+ /* Multi-value `Connection` header */
+ case h_matching_connection_token_start:
+ break;
+
+ default:
+ parser->header_state = h_general;
+ break;
+ }
+ break;
+ }
+
+ case s_header_value:
+ {
+ const char* start = p;
+ enum header_states h_state = (enum header_states) parser->header_state;
+ for (; p != data + len; p++) {
+ ch = *p;
+ if (ch == CR) {
+ UPDATE_STATE(s_header_almost_done);
+ parser->header_state = h_state;
+ CALLBACK_DATA(header_value);
+ break;
+ }
+
+ if (ch == LF) {
+ UPDATE_STATE(s_header_almost_done);
+ COUNT_HEADER_SIZE(p - start);
+ parser->header_state = h_state;
+ CALLBACK_DATA_NOADVANCE(header_value);
+ REEXECUTE();
+ }
+
+ if (!lenient && !IS_HEADER_CHAR(ch)) {
+ SET_ERRNO(HPE_INVALID_HEADER_TOKEN);
+ goto error;
+ }
+
+ c = LOWER(ch);
+
+ switch (h_state) {
+ case h_general:
+ {
+ size_t left = data + len - p;
+ const char* pe = p + MIN(left, max_header_size);
+
+ for (; p != pe; p++) {
+ ch = *p;
+ if (ch == CR || ch == LF) {
+ --p;
+ break;
+ }
+ if (!lenient && !IS_HEADER_CHAR(ch)) {
+ SET_ERRNO(HPE_INVALID_HEADER_TOKEN);
+ goto error;
+ }
+ }
+ if (p == data + len)
+ --p;
+ break;
+ }
+
+ case h_connection:
+ case h_transfer_encoding:
+ assert(0 && "Shouldn't get here.");
+ break;
+
+ case h_content_length:
+ if (ch == ' ') break;
+ h_state = h_content_length_num;
+ /* fall through */
+
+ case h_content_length_num:
+ {
+ uint64_t t;
+
+ if (ch == ' ') {
+ h_state = h_content_length_ws;
+ break;
+ }
+
+ if (UNLIKELY(!IS_NUM(ch))) {
+ SET_ERRNO(HPE_INVALID_CONTENT_LENGTH);
+ parser->header_state = h_state;
+ goto error;
+ }
+
+ t = parser->content_length;
+ t *= 10;
+ t += ch - '0';
+
+ /* Overflow? Test against a conservative limit for simplicity. */
+ if (UNLIKELY((ULLONG_MAX - 10) / 10 < parser->content_length)) {
+ SET_ERRNO(HPE_INVALID_CONTENT_LENGTH);
+ parser->header_state = h_state;
+ goto error;
+ }
+
+ parser->content_length = t;
+ break;
+ }
+
+ case h_content_length_ws:
+ if (ch == ' ') break;
+ SET_ERRNO(HPE_INVALID_CONTENT_LENGTH);
+ parser->header_state = h_state;
+ goto error;
+
+ /* Transfer-Encoding: chunked */
+ case h_matching_transfer_encoding_token_start:
+ /* looking for 'Transfer-Encoding: chunked' */
+ if ('c' == c) {
+ h_state = h_matching_transfer_encoding_chunked;
+ } else if (STRICT_TOKEN(c)) {
+ /* TODO(indutny): similar code below does this, but why?
+ * At the very least it seems to be inconsistent given that
+ * h_matching_transfer_encoding_token does not check for
+ * `STRICT_TOKEN`
+ */
+ h_state = h_matching_transfer_encoding_token;
+ } else if (c == ' ' || c == '\t') {
+ /* Skip lws */
+ } else {
+ h_state = h_general;
+ }
+ break;
+
+ case h_matching_transfer_encoding_chunked:
+ parser->index++;
+ if (parser->index > sizeof(CHUNKED)-1
+ || c != CHUNKED[parser->index]) {
+ h_state = h_matching_transfer_encoding_token;
+ } else if (parser->index == sizeof(CHUNKED)-2) {
+ h_state = h_transfer_encoding_chunked;
+ }
+ break;
+
+ case h_matching_transfer_encoding_token:
+ if (ch == ',') {
+ h_state = h_matching_transfer_encoding_token_start;
+ parser->index = 0;
+ }
+ break;
+
+ case h_matching_connection_token_start:
+ /* looking for 'Connection: keep-alive' */
+ if (c == 'k') {
+ h_state = h_matching_connection_keep_alive;
+ /* looking for 'Connection: close' */
+ } else if (c == 'c') {
+ h_state = h_matching_connection_close;
+ } else if (c == 'u') {
+ h_state = h_matching_connection_upgrade;
+ } else if (STRICT_TOKEN(c)) {
+ h_state = h_matching_connection_token;
+ } else if (c == ' ' || c == '\t') {
+ /* Skip lws */
+ } else {
+ h_state = h_general;
+ }
+ break;
+
+ /* looking for 'Connection: keep-alive' */
+ case h_matching_connection_keep_alive:
+ parser->index++;
+ if (parser->index > sizeof(KEEP_ALIVE)-1
+ || c != KEEP_ALIVE[parser->index]) {
+ h_state = h_matching_connection_token;
+ } else if (parser->index == sizeof(KEEP_ALIVE)-2) {
+ h_state = h_connection_keep_alive;
+ }
+ break;
+
+ /* looking for 'Connection: close' */
+ case h_matching_connection_close:
+ parser->index++;
+ if (parser->index > sizeof(CLOSE)-1 || c != CLOSE[parser->index]) {
+ h_state = h_matching_connection_token;
+ } else if (parser->index == sizeof(CLOSE)-2) {
+ h_state = h_connection_close;
+ }
+ break;
+
+ /* looking for 'Connection: upgrade' */
+ case h_matching_connection_upgrade:
+ parser->index++;
+ if (parser->index > sizeof(UPGRADE) - 1 ||
+ c != UPGRADE[parser->index]) {
+ h_state = h_matching_connection_token;
+ } else if (parser->index == sizeof(UPGRADE)-2) {
+ h_state = h_connection_upgrade;
+ }
+ break;
+
+ case h_matching_connection_token:
+ if (ch == ',') {
+ h_state = h_matching_connection_token_start;
+ parser->index = 0;
+ }
+ break;
+
+ case h_transfer_encoding_chunked:
+ if (ch != ' ') h_state = h_matching_transfer_encoding_token;
+ break;
+
+ case h_connection_keep_alive:
+ case h_connection_close:
+ case h_connection_upgrade:
+ if (ch == ',') {
+ if (h_state == h_connection_keep_alive) {
+ parser->flags |= F_CONNECTION_KEEP_ALIVE;
+ } else if (h_state == h_connection_close) {
+ parser->flags |= F_CONNECTION_CLOSE;
+ } else if (h_state == h_connection_upgrade) {
+ parser->flags |= F_CONNECTION_UPGRADE;
+ }
+ h_state = h_matching_connection_token_start;
+ parser->index = 0;
+ } else if (ch != ' ') {
+ h_state = h_matching_connection_token;
+ }
+ break;
+
+ default:
+ UPDATE_STATE(s_header_value);
+ h_state = h_general;
+ break;
+ }
+ }
+ parser->header_state = h_state;
+
+ if (p == data + len)
+ --p;
+
+ COUNT_HEADER_SIZE(p - start);
+ break;
+ }
+
+ case s_header_almost_done:
+ {
+ if (UNLIKELY(ch != LF)) {
+ SET_ERRNO(HPE_LF_EXPECTED);
+ goto error;
+ }
+
+ UPDATE_STATE(s_header_value_lws);
+ break;
+ }
+
+ case s_header_value_lws:
+ {
+ if (ch == ' ' || ch == '\t') {
+ if (parser->header_state == h_content_length_num) {
+ /* treat obsolete line folding as space */
+ parser->header_state = h_content_length_ws;
+ }
+ UPDATE_STATE(s_header_value_start);
+ REEXECUTE();
+ }
+
+ /* finished the header */
+ switch (parser->header_state) {
+ case h_connection_keep_alive:
+ parser->flags |= F_CONNECTION_KEEP_ALIVE;
+ break;
+ case h_connection_close:
+ parser->flags |= F_CONNECTION_CLOSE;
+ break;
+ case h_transfer_encoding_chunked:
+ parser->flags |= F_CHUNKED;
+ break;
+ case h_connection_upgrade:
+ parser->flags |= F_CONNECTION_UPGRADE;
+ break;
+ default:
+ break;
+ }
+
+ UPDATE_STATE(s_header_field_start);
+ REEXECUTE();
+ }
+
+ case s_header_value_discard_ws_almost_done:
+ {
+ STRICT_CHECK(ch != LF);
+ UPDATE_STATE(s_header_value_discard_lws);
+ break;
+ }
+
+ case s_header_value_discard_lws:
+ {
+ if (ch == ' ' || ch == '\t') {
+ UPDATE_STATE(s_header_value_discard_ws);
+ break;
+ } else {
+ switch (parser->header_state) {
+ case h_connection_keep_alive:
+ parser->flags |= F_CONNECTION_KEEP_ALIVE;
+ break;
+ case h_connection_close:
+ parser->flags |= F_CONNECTION_CLOSE;
+ break;
+ case h_connection_upgrade:
+ parser->flags |= F_CONNECTION_UPGRADE;
+ break;
+ case h_transfer_encoding_chunked:
+ parser->flags |= F_CHUNKED;
+ break;
+ case h_content_length:
+ /* do not allow empty content length */
+ SET_ERRNO(HPE_INVALID_CONTENT_LENGTH);
+ goto error;
+ break;
+ default:
+ break;
+ }
+
+ /* header value was empty */
+ MARK(header_value);
+ UPDATE_STATE(s_header_field_start);
+ CALLBACK_DATA_NOADVANCE(header_value);
+ REEXECUTE();
+ }
+ }
+
+ case s_headers_almost_done:
+ {
+ STRICT_CHECK(ch != LF);
+
+ if (parser->flags & F_TRAILING) {
+ /* End of a chunked request */
+ UPDATE_STATE(s_message_done);
+ CALLBACK_NOTIFY_NOADVANCE(chunk_complete);
+ REEXECUTE();
+ }
+
+ /* Cannot us transfer-encoding and a content-length header together
+ per the HTTP specification. (RFC 7230 Section 3.3.3) */
+ if ((parser->extra_flags & (F_TRANSFER_ENCODING >> 8)) &&
+ (parser->flags & F_CONTENTLENGTH)) {
+ /* Allow it for lenient parsing as long as `Transfer-Encoding` is
+ * not `chunked`
+ */
+ if (!lenient || (parser->flags & F_CHUNKED)) {
+ SET_ERRNO(HPE_UNEXPECTED_CONTENT_LENGTH);
+ goto error;
+ }
+ }
+
+ UPDATE_STATE(s_headers_done);
+
+ /* Set this here so that on_headers_complete() callbacks can see it */
+ if ((parser->flags & F_UPGRADE) &&
+ (parser->flags & F_CONNECTION_UPGRADE)) {
+ /* For responses, "Upgrade: foo" and "Connection: upgrade" are
+ * mandatory only when it is a 101 Switching Protocols response,
+ * otherwise it is purely informational, to announce support.
+ */
+ parser->upgrade =
+ (parser->type == HTTP_REQUEST || parser->status_code == 101);
+ } else {
+ parser->upgrade = (parser->method == HTTP_CONNECT);
+ }
+
+ /* Here we call the headers_complete callback. This is somewhat
+ * different than other callbacks because if the user returns 1, we
+ * will interpret that as saying that this message has no body. This
+ * is needed for the annoying case of recieving a response to a HEAD
+ * request.
+ *
+ * We'd like to use CALLBACK_NOTIFY_NOADVANCE() here but we cannot, so
+ * we have to simulate it by handling a change in errno below.
+ */
+ if (settings->on_headers_complete) {
+ switch (settings->on_headers_complete(parser)) {
+ case 0:
+ break;
+
+ case 2:
+ parser->upgrade = 1;
+
+ /* fall through */
+ case 1:
+ parser->flags |= F_SKIPBODY;
+ break;
+
+ default:
+ SET_ERRNO(HPE_CB_headers_complete);
+ RETURN(p - data); /* Error */
+ }
+ }
+
+ if (HTTP_PARSER_ERRNO(parser) != HPE_OK) {
+ RETURN(p - data);
+ }
+
+ REEXECUTE();
+ }
+
+ case s_headers_done:
+ {
+ int hasBody;
+ STRICT_CHECK(ch != LF);
+
+ parser->nread = 0;
+ nread = 0;
+
+ hasBody = parser->flags & F_CHUNKED ||
+ (parser->content_length > 0 && parser->content_length != ULLONG_MAX);
+ if (parser->upgrade && (parser->method == HTTP_CONNECT ||
+ (parser->flags & F_SKIPBODY) || !hasBody)) {
+ /* Exit, the rest of the message is in a different protocol. */
+ UPDATE_STATE(NEW_MESSAGE());
+ CALLBACK_NOTIFY(message_complete);
+ RETURN((p - data) + 1);
+ }
+
+ if (parser->flags & F_SKIPBODY) {
+ UPDATE_STATE(NEW_MESSAGE());
+ CALLBACK_NOTIFY(message_complete);
+ } else if (parser->flags & F_CHUNKED) {
+ /* chunked encoding - ignore Content-Length header,
+ * prepare for a chunk */
+ UPDATE_STATE(s_chunk_size_start);
+ } else if (parser->extra_flags & (F_TRANSFER_ENCODING >> 8)) {
+ if (parser->type == HTTP_REQUEST && !lenient) {
+ /* RFC 7230 3.3.3 */
+
+ /* If a Transfer-Encoding header field
+ * is present in a request and the chunked transfer coding is not
+ * the final encoding, the message body length cannot be determined
+ * reliably; the server MUST respond with the 400 (Bad Request)
+ * status code and then close the connection.
+ */
+ SET_ERRNO(HPE_INVALID_TRANSFER_ENCODING);
+ RETURN(p - data); /* Error */
+ } else {
+ /* RFC 7230 3.3.3 */
+
+ /* If a Transfer-Encoding header field is present in a response and
+ * the chunked transfer coding is not the final encoding, the
+ * message body length is determined by reading the connection until
+ * it is closed by the server.
+ */
+ UPDATE_STATE(s_body_identity_eof);
+ }
+ } else {
+ if (parser->content_length == 0) {
+ /* Content-Length header given but zero: Content-Length: 0\r\n */
+ UPDATE_STATE(NEW_MESSAGE());
+ CALLBACK_NOTIFY(message_complete);
+ } else if (parser->content_length != ULLONG_MAX) {
+ /* Content-Length header given and non-zero */
+ UPDATE_STATE(s_body_identity);
+ } else {
+ if (!http_message_needs_eof(parser)) {
+ /* Assume content-length 0 - read the next */
+ UPDATE_STATE(NEW_MESSAGE());
+ CALLBACK_NOTIFY(message_complete);
+ } else {
+ /* Read body until EOF */
+ UPDATE_STATE(s_body_identity_eof);
+ }
+ }
+ }
+
+ break;
+ }
+
+ case s_body_identity:
+ {
+ uint64_t to_read = MIN(parser->content_length,
+ (uint64_t) ((data + len) - p));
+
+ assert(parser->content_length != 0
+ && parser->content_length != ULLONG_MAX);
+
+ /* The difference between advancing content_length and p is because
+ * the latter will automaticaly advance on the next loop iteration.
+ * Further, if content_length ends up at 0, we want to see the last
+ * byte again for our message complete callback.
+ */
+ MARK(body);
+ parser->content_length -= to_read;
+ p += to_read - 1;
+
+ if (parser->content_length == 0) {
+ UPDATE_STATE(s_message_done);
+
+ /* Mimic CALLBACK_DATA_NOADVANCE() but with one extra byte.
+ *
+ * The alternative to doing this is to wait for the next byte to
+ * trigger the data callback, just as in every other case. The
+ * problem with this is that this makes it difficult for the test
+ * harness to distinguish between complete-on-EOF and
+ * complete-on-length. It's not clear that this distinction is
+ * important for applications, but let's keep it for now.
+ */
+ CALLBACK_DATA_(body, p - body_mark + 1, p - data);
+ REEXECUTE();
+ }
+
+ break;
+ }
+
+ /* read until EOF */
+ case s_body_identity_eof:
+ MARK(body);
+ p = data + len - 1;
+
+ break;
+
+ case s_message_done:
+ UPDATE_STATE(NEW_MESSAGE());
+ CALLBACK_NOTIFY(message_complete);
+ if (parser->upgrade) {
+ /* Exit, the rest of the message is in a different protocol. */
+ RETURN((p - data) + 1);
+ }
+ break;
+
+ case s_chunk_size_start:
+ {
+ assert(nread == 1);
+ assert(parser->flags & F_CHUNKED);
+
+ unhex_val = unhex[(unsigned char)ch];
+ if (UNLIKELY(unhex_val == -1)) {
+ SET_ERRNO(HPE_INVALID_CHUNK_SIZE);
+ goto error;
+ }
+
+ parser->content_length = unhex_val;
+ UPDATE_STATE(s_chunk_size);
+ break;
+ }
+
+ case s_chunk_size:
+ {
+ uint64_t t;
+
+ assert(parser->flags & F_CHUNKED);
+
+ if (ch == CR) {
+ UPDATE_STATE(s_chunk_size_almost_done);
+ break;
+ }
+
+ unhex_val = unhex[(unsigned char)ch];
+
+ if (unhex_val == -1) {
+ if (ch == ';' || ch == ' ') {
+ UPDATE_STATE(s_chunk_parameters);
+ break;
+ }
+
+ SET_ERRNO(HPE_INVALID_CHUNK_SIZE);
+ goto error;
+ }
+
+ t = parser->content_length;
+ t *= 16;
+ t += unhex_val;
+
+ /* Overflow? Test against a conservative limit for simplicity. */
+ if (UNLIKELY((ULLONG_MAX - 16) / 16 < parser->content_length)) {
+ SET_ERRNO(HPE_INVALID_CONTENT_LENGTH);
+ goto error;
+ }
+
+ parser->content_length = t;
+ break;
+ }
+
+ case s_chunk_parameters:
+ {
+ assert(parser->flags & F_CHUNKED);
+ /* just ignore this shit. TODO check for overflow */
+ if (ch == CR) {
+ UPDATE_STATE(s_chunk_size_almost_done);
+ break;
+ }
+ break;
+ }
+
+ case s_chunk_size_almost_done:
+ {
+ assert(parser->flags & F_CHUNKED);
+ STRICT_CHECK(ch != LF);
+
+ parser->nread = 0;
+ nread = 0;
+
+ if (parser->content_length == 0) {
+ parser->flags |= F_TRAILING;
+ UPDATE_STATE(s_header_field_start);
+ } else {
+ UPDATE_STATE(s_chunk_data);
+ }
+ CALLBACK_NOTIFY(chunk_header);
+ break;
+ }
+
+ case s_chunk_data:
+ {
+ uint64_t to_read = MIN(parser->content_length,
+ (uint64_t) ((data + len) - p));
+
+ assert(parser->flags & F_CHUNKED);
+ assert(parser->content_length != 0
+ && parser->content_length != ULLONG_MAX);
+
+ /* See the explanation in s_body_identity for why the content
+ * length and data pointers are managed this way.
+ */
+ MARK(body);
+ parser->content_length -= to_read;
+ p += to_read - 1;
+
+ if (parser->content_length == 0) {
+ UPDATE_STATE(s_chunk_data_almost_done);
+ }
+
+ break;
+ }
+
+ case s_chunk_data_almost_done:
+ assert(parser->flags & F_CHUNKED);
+ assert(parser->content_length == 0);
+ STRICT_CHECK(ch != CR);
+ UPDATE_STATE(s_chunk_data_done);
+ CALLBACK_DATA(body);
+ break;
+
+ case s_chunk_data_done:
+ assert(parser->flags & F_CHUNKED);
+ STRICT_CHECK(ch != LF);
+ parser->nread = 0;
+ nread = 0;
+ UPDATE_STATE(s_chunk_size_start);
+ CALLBACK_NOTIFY(chunk_complete);
+ break;
+
+ default:
+ assert(0 && "unhandled state");
+ SET_ERRNO(HPE_INVALID_INTERNAL_STATE);
+ goto error;
+ }
+ }
+
+ /* Run callbacks for any marks that we have leftover after we ran out of
+ * bytes. There should be at most one of these set, so it's OK to invoke
+ * them in series (unset marks will not result in callbacks).
+ *
+ * We use the NOADVANCE() variety of callbacks here because 'p' has already
+ * overflowed 'data' and this allows us to correct for the off-by-one that
+ * we'd otherwise have (since CALLBACK_DATA() is meant to be run with a 'p'
+ * value that's in-bounds).
+ */
+
+ assert(((header_field_mark ? 1 : 0) +
+ (header_value_mark ? 1 : 0) +
+ (url_mark ? 1 : 0) +
+ (body_mark ? 1 : 0) +
+ (status_mark ? 1 : 0)) <= 1);
+
+ CALLBACK_DATA_NOADVANCE(header_field);
+ CALLBACK_DATA_NOADVANCE(header_value);
+ CALLBACK_DATA_NOADVANCE(url);
+ CALLBACK_DATA_NOADVANCE(body);
+ CALLBACK_DATA_NOADVANCE(status);
+
+ RETURN(len);
+
+error:
+ if (HTTP_PARSER_ERRNO(parser) == HPE_OK) {
+ SET_ERRNO(HPE_UNKNOWN);
+ }
+
+ RETURN(p - data);
+}
+
+
+/* Does the parser need to see an EOF to find the end of the message? */
+int
+http_message_needs_eof (const http_parser *parser)
+{
+ if (parser->type == HTTP_REQUEST) {
+ return 0;
+ }
+
+ /* See RFC 2616 section 4.4 */
+ if (parser->status_code / 100 == 1 || /* 1xx e.g. Continue */
+ parser->status_code == 204 || /* No Content */
+ parser->status_code == 304 || /* Not Modified */
+ parser->flags & F_SKIPBODY) { /* response to a HEAD request */
+ return 0;
+ }
+
+ /* RFC 7230 3.3.3, see `s_headers_almost_done` */
+ if ((parser->extra_flags & (F_TRANSFER_ENCODING >> 8)) &&
+ (parser->flags & F_CHUNKED) == 0) {
+ return 1;
+ }
+
+ if ((parser->flags & F_CHUNKED) || parser->content_length != ULLONG_MAX) {
+ return 0;
+ }
+
+ return 1;
+}
+
+
+int
+http_should_keep_alive (const http_parser *parser)
+{
+ if (parser->http_major > 0 && parser->http_minor > 0) {
+ /* HTTP/1.1 */
+ if (parser->flags & F_CONNECTION_CLOSE) {
+ return 0;
+ }
+ } else {
+ /* HTTP/1.0 or earlier */
+ if (!(parser->flags & F_CONNECTION_KEEP_ALIVE)) {
+ return 0;
+ }
+ }
+
+ return !http_message_needs_eof(parser);
+}
+
+
+const char *
+http_method_str (enum http_method m)
+{
+ return ELEM_AT(method_strings, m, "<unknown>");
+}
+
+const char *
+http_status_str (enum http_status s)
+{
+ switch (s) {
+#define XX(num, name, string) case HTTP_STATUS_##name: return #string;
+ HTTP_STATUS_MAP(XX)
+#undef XX
+ default: return "<unknown>";
+ }
+}
+
+void
+http_parser_init (http_parser *parser, enum http_parser_type t)
+{
+ void *data = parser->data; /* preserve application data */
+ memset(parser, 0, sizeof(*parser));
+ parser->data = data;
+ parser->type = t;
+ parser->state = (t == HTTP_REQUEST ? s_start_req : (t == HTTP_RESPONSE ? s_start_res : s_start_req_or_res));
+ parser->http_errno = HPE_OK;
+}
+
+void
+http_parser_settings_init(http_parser_settings *settings)
+{
+ memset(settings, 0, sizeof(*settings));
+}
+
+const char *
+http_errno_name(enum http_errno err) {
+ assert(((size_t) err) < ARRAY_SIZE(http_strerror_tab));
+ return http_strerror_tab[err].name;
+}
+
+const char *
+http_errno_description(enum http_errno err) {
+ assert(((size_t) err) < ARRAY_SIZE(http_strerror_tab));
+ return http_strerror_tab[err].description;
+}
+
+static enum http_host_state
+http_parse_host_char(enum http_host_state s, const char ch) {
+ switch(s) {
+ case s_http_userinfo:
+ case s_http_userinfo_start:
+ if (ch == '@') {
+ return s_http_host_start;
+ }
+
+ if (IS_USERINFO_CHAR(ch)) {
+ return s_http_userinfo;
+ }
+ break;
+
+ case s_http_host_start:
+ if (ch == '[') {
+ return s_http_host_v6_start;
+ }
+
+ if (IS_HOST_CHAR(ch)) {
+ return s_http_host;
+ }
+
+ break;
+
+ case s_http_host:
+ if (IS_HOST_CHAR(ch)) {
+ return s_http_host;
+ }
+
+ /* fall through */
+ case s_http_host_v6_end:
+ if (ch == ':') {
+ return s_http_host_port_start;
+ }
+
+ break;
+
+ case s_http_host_v6:
+ if (ch == ']') {
+ return s_http_host_v6_end;
+ }
+
+ /* fall through */
+ case s_http_host_v6_start:
+ if (IS_HEX(ch) || ch == ':' || ch == '.') {
+ return s_http_host_v6;
+ }
+
+ if (s == s_http_host_v6 && ch == '%') {
+ return s_http_host_v6_zone_start;
+ }
+ break;
+
+ case s_http_host_v6_zone:
+ if (ch == ']') {
+ return s_http_host_v6_end;
+ }
+
+ /* fall through */
+ case s_http_host_v6_zone_start:
+ /* RFC 6874 Zone ID consists of 1*( unreserved / pct-encoded) */
+ if (IS_ALPHANUM(ch) || ch == '%' || ch == '.' || ch == '-' || ch == '_' ||
+ ch == '~') {
+ return s_http_host_v6_zone;
+ }
+ break;
+
+ case s_http_host_port:
+ case s_http_host_port_start:
+ if (IS_NUM(ch)) {
+ return s_http_host_port;
+ }
+
+ break;
+
+ default:
+ break;
+ }
+ return s_http_host_dead;
+}
+
+static int
+http_parse_host(const char * buf, struct http_parser_url *u, int found_at) {
+ enum http_host_state s;
+
+ const char *p;
+ size_t buflen = u->field_data[UF_HOST].off + u->field_data[UF_HOST].len;
+
+ assert(u->field_set & (1 << UF_HOST));
+
+ u->field_data[UF_HOST].len = 0;
+
+ s = found_at ? s_http_userinfo_start : s_http_host_start;
+
+ for (p = buf + u->field_data[UF_HOST].off; p < buf + buflen; p++) {
+ enum http_host_state new_s = http_parse_host_char(s, *p);
+
+ if (new_s == s_http_host_dead) {
+ return 1;
+ }
+
+ switch(new_s) {
+ case s_http_host:
+ if (s != s_http_host) {
+ u->field_data[UF_HOST].off = (uint16_t)(p - buf);
+ }
+ u->field_data[UF_HOST].len++;
+ break;
+
+ case s_http_host_v6:
+ if (s != s_http_host_v6) {
+ u->field_data[UF_HOST].off = (uint16_t)(p - buf);
+ }
+ u->field_data[UF_HOST].len++;
+ break;
+
+ case s_http_host_v6_zone_start:
+ case s_http_host_v6_zone:
+ u->field_data[UF_HOST].len++;
+ break;
+
+ case s_http_host_port:
+ if (s != s_http_host_port) {
+ u->field_data[UF_PORT].off = (uint16_t)(p - buf);
+ u->field_data[UF_PORT].len = 0;
+ u->field_set |= (1 << UF_PORT);
+ }
+ u->field_data[UF_PORT].len++;
+ break;
+
+ case s_http_userinfo:
+ if (s != s_http_userinfo) {
+ u->field_data[UF_USERINFO].off = (uint16_t)(p - buf);
+ u->field_data[UF_USERINFO].len = 0;
+ u->field_set |= (1 << UF_USERINFO);
+ }
+ u->field_data[UF_USERINFO].len++;
+ break;
+
+ default:
+ break;
+ }
+ s = new_s;
+ }
+
+ /* Make sure we don't end somewhere unexpected */
+ switch (s) {
+ case s_http_host_start:
+ case s_http_host_v6_start:
+ case s_http_host_v6:
+ case s_http_host_v6_zone_start:
+ case s_http_host_v6_zone:
+ case s_http_host_port_start:
+ case s_http_userinfo:
+ case s_http_userinfo_start:
+ return 1;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+void
+http_parser_url_init(struct http_parser_url *u) {
+ memset(u, 0, sizeof(*u));
+}
+
+int
+http_parser_parse_url(const char *buf, size_t buflen, int is_connect,
+ struct http_parser_url *u)
+{
+ enum state s;
+ const char *p;
+ enum http_parser_url_fields uf, old_uf;
+ int found_at = 0;
+
+ if (buflen == 0) {
+ return 1;
+ }
+
+ u->port = u->field_set = 0;
+ s = is_connect ? s_req_server_start : s_req_spaces_before_url;
+ old_uf = UF_MAX;
+
+ for (p = buf; p < buf + buflen; p++) {
+ s = parse_url_char(s, *p);
+
+ /* Figure out the next field that we're operating on */
+ switch (s) {
+ case s_dead:
+ return 1;
+
+ /* Skip delimeters */
+ case s_req_schema_slash:
+ case s_req_schema_slash_slash:
+ case s_req_server_start:
+ case s_req_query_string_start:
+ case s_req_fragment_start:
+ continue;
+
+ case s_req_schema:
+ uf = UF_SCHEMA;
+ break;
+
+ case s_req_server_with_at:
+ found_at = 1;
+
+ /* fall through */
+ case s_req_server:
+ uf = UF_HOST;
+ break;
+
+ case s_req_path:
+ uf = UF_PATH;
+ break;
+
+ case s_req_query_string:
+ uf = UF_QUERY;
+ break;
+
+ case s_req_fragment:
+ uf = UF_FRAGMENT;
+ break;
+
+ default:
+ assert(!"Unexpected state");
+ return 1;
+ }
+
+ /* Nothing's changed; soldier on */
+ if (uf == old_uf) {
+ u->field_data[uf].len++;
+ continue;
+ }
+
+ u->field_data[uf].off = (uint16_t)(p - buf);
+ u->field_data[uf].len = 1;
+
+ u->field_set |= (1 << uf);
+ old_uf = uf;
+ }
+
+ /* host must be present if there is a schema */
+ /* parsing http:///toto will fail */
+ if ((u->field_set & (1 << UF_SCHEMA)) &&
+ (u->field_set & (1 << UF_HOST)) == 0) {
+ return 1;
+ }
+
+ if (u->field_set & (1 << UF_HOST)) {
+ if (http_parse_host(buf, u, found_at) != 0) {
+ return 1;
+ }
+ }
+
+ /* CONNECT requests can only contain "hostname:port" */
+ if (is_connect && u->field_set != ((1 << UF_HOST)|(1 << UF_PORT))) {
+ return 1;
+ }
+
+ if (u->field_set & (1 << UF_PORT)) {
+ uint16_t off;
+ uint16_t len;
+ const char* p;
+ const char* end;
+ unsigned long v;
+
+ off = u->field_data[UF_PORT].off;
+ len = u->field_data[UF_PORT].len;
+ end = buf + off + len;
+
+ /* NOTE: The characters are already validated and are in the [0-9] range */
+ assert(off + len <= buflen && "Port number overflow");
+ v = 0;
+ for (p = buf + off; p < end; p++) {
+ v *= 10;
+ v += *p - '0';
+
+ /* Ports have a max value of 2^16 */
+ if (v > 0xffff) {
+ return 1;
+ }
+ }
+
+ u->port = (uint16_t) v;
+ }
+
+ return 0;
+}
+
+void
+http_parser_pause(http_parser *parser, int paused) {
+ /* Users should only be pausing/unpausing a parser that is not in an error
+ * state. In non-debug builds, there's not much that we can do about this
+ * other than ignore it.
+ */
+ if (HTTP_PARSER_ERRNO(parser) == HPE_OK ||
+ HTTP_PARSER_ERRNO(parser) == HPE_PAUSED) {
+ uint32_t nread = parser->nread; /* used by the SET_ERRNO macro */
+ SET_ERRNO((paused) ? HPE_PAUSED : HPE_OK);
+ } else {
+ assert(0 && "Attempting to pause parser in error state");
+ }
+}
+
+int
+http_body_is_final(const struct http_parser *parser) {
+ return parser->state == s_message_done;
+}
+
+unsigned long
+http_parser_version(void) {
+ return HTTP_PARSER_VERSION_MAJOR * 0x10000 |
+ HTTP_PARSER_VERSION_MINOR * 0x00100 |
+ HTTP_PARSER_VERSION_PATCH * 0x00001;
+}
+
+void
+http_parser_set_max_header_size(uint32_t size) {
+ max_header_size = size;
+}
diff --git a/third_party/python/aiohttp/vendor/http-parser/http_parser.gyp b/third_party/python/aiohttp/vendor/http-parser/http_parser.gyp
new file mode 100644
index 0000000000..ef34ecaeae
--- /dev/null
+++ b/third_party/python/aiohttp/vendor/http-parser/http_parser.gyp
@@ -0,0 +1,111 @@
+# This file is used with the GYP meta build system.
+# http://code.google.com/p/gyp/
+# To build try this:
+# svn co http://gyp.googlecode.com/svn/trunk gyp
+# ./gyp/gyp -f make --depth=`pwd` http_parser.gyp
+# ./out/Debug/test
+{
+ 'target_defaults': {
+ 'default_configuration': 'Debug',
+ 'configurations': {
+ # TODO: hoist these out and put them somewhere common, because
+ # RuntimeLibrary MUST MATCH across the entire project
+ 'Debug': {
+ 'defines': [ 'DEBUG', '_DEBUG' ],
+ 'cflags': [ '-Wall', '-Wextra', '-O0', '-g', '-ftrapv' ],
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'RuntimeLibrary': 1, # static debug
+ },
+ },
+ },
+ 'Release': {
+ 'defines': [ 'NDEBUG' ],
+ 'cflags': [ '-Wall', '-Wextra', '-O3' ],
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'RuntimeLibrary': 0, # static release
+ },
+ },
+ }
+ },
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ },
+ 'VCLibrarianTool': {
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ },
+ },
+ 'conditions': [
+ ['OS == "win"', {
+ 'defines': [
+ 'WIN32'
+ ],
+ }]
+ ],
+ },
+
+ 'targets': [
+ {
+ 'target_name': 'http_parser',
+ 'type': 'static_library',
+ 'include_dirs': [ '.' ],
+ 'direct_dependent_settings': {
+ 'defines': [ 'HTTP_PARSER_STRICT=0' ],
+ 'include_dirs': [ '.' ],
+ },
+ 'defines': [ 'HTTP_PARSER_STRICT=0' ],
+ 'sources': [ './http_parser.c', ],
+ 'conditions': [
+ ['OS=="win"', {
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ # Compile as C++. http_parser.c is actually C99, but C++ is
+ # close enough in this case.
+ 'CompileAs': 2,
+ },
+ },
+ }]
+ ],
+ },
+
+ {
+ 'target_name': 'http_parser_strict',
+ 'type': 'static_library',
+ 'include_dirs': [ '.' ],
+ 'direct_dependent_settings': {
+ 'defines': [ 'HTTP_PARSER_STRICT=1' ],
+ 'include_dirs': [ '.' ],
+ },
+ 'defines': [ 'HTTP_PARSER_STRICT=1' ],
+ 'sources': [ './http_parser.c', ],
+ 'conditions': [
+ ['OS=="win"', {
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ # Compile as C++. http_parser.c is actually C99, but C++ is
+ # close enough in this case.
+ 'CompileAs': 2,
+ },
+ },
+ }]
+ ],
+ },
+
+ {
+ 'target_name': 'test-nonstrict',
+ 'type': 'executable',
+ 'dependencies': [ 'http_parser' ],
+ 'sources': [ 'test.c' ]
+ },
+
+ {
+ 'target_name': 'test-strict',
+ 'type': 'executable',
+ 'dependencies': [ 'http_parser_strict' ],
+ 'sources': [ 'test.c' ]
+ }
+ ]
+}
diff --git a/third_party/python/aiohttp/vendor/http-parser/http_parser.h b/third_party/python/aiohttp/vendor/http-parser/http_parser.h
new file mode 100644
index 0000000000..df8825260d
--- /dev/null
+++ b/third_party/python/aiohttp/vendor/http-parser/http_parser.h
@@ -0,0 +1,443 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#ifndef http_parser_h
+#define http_parser_h
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Also update SONAME in the Makefile whenever you change these. */
+#define HTTP_PARSER_VERSION_MAJOR 2
+#define HTTP_PARSER_VERSION_MINOR 9
+#define HTTP_PARSER_VERSION_PATCH 4
+
+#include <stddef.h>
+#if defined(_WIN32) && !defined(__MINGW32__) && \
+ (!defined(_MSC_VER) || _MSC_VER<1600) && !defined(__WINE__)
+#include <BaseTsd.h>
+typedef __int8 int8_t;
+typedef unsigned __int8 uint8_t;
+typedef __int16 int16_t;
+typedef unsigned __int16 uint16_t;
+typedef __int32 int32_t;
+typedef unsigned __int32 uint32_t;
+typedef __int64 int64_t;
+typedef unsigned __int64 uint64_t;
+#else
+#include <stdint.h>
+#endif
+
+/* Compile with -DHTTP_PARSER_STRICT=0 to make less checks, but run
+ * faster
+ */
+#ifndef HTTP_PARSER_STRICT
+# define HTTP_PARSER_STRICT 1
+#endif
+
+/* Maximium header size allowed. If the macro is not defined
+ * before including this header then the default is used. To
+ * change the maximum header size, define the macro in the build
+ * environment (e.g. -DHTTP_MAX_HEADER_SIZE=<value>). To remove
+ * the effective limit on the size of the header, define the macro
+ * to a very large number (e.g. -DHTTP_MAX_HEADER_SIZE=0x7fffffff)
+ */
+#ifndef HTTP_MAX_HEADER_SIZE
+# define HTTP_MAX_HEADER_SIZE (80*1024)
+#endif
+
+typedef struct http_parser http_parser;
+typedef struct http_parser_settings http_parser_settings;
+
+
+/* Callbacks should return non-zero to indicate an error. The parser will
+ * then halt execution.
+ *
+ * The one exception is on_headers_complete. In a HTTP_RESPONSE parser
+ * returning '1' from on_headers_complete will tell the parser that it
+ * should not expect a body. This is used when receiving a response to a
+ * HEAD request which may contain 'Content-Length' or 'Transfer-Encoding:
+ * chunked' headers that indicate the presence of a body.
+ *
+ * Returning `2` from on_headers_complete will tell parser that it should not
+ * expect neither a body nor any futher responses on this connection. This is
+ * useful for handling responses to a CONNECT request which may not contain
+ * `Upgrade` or `Connection: upgrade` headers.
+ *
+ * http_data_cb does not return data chunks. It will be called arbitrarily
+ * many times for each string. E.G. you might get 10 callbacks for "on_url"
+ * each providing just a few characters more data.
+ */
+typedef int (*http_data_cb) (http_parser*, const char *at, size_t length);
+typedef int (*http_cb) (http_parser*);
+
+
+/* Status Codes */
+#define HTTP_STATUS_MAP(XX) \
+ XX(100, CONTINUE, Continue) \
+ XX(101, SWITCHING_PROTOCOLS, Switching Protocols) \
+ XX(102, PROCESSING, Processing) \
+ XX(200, OK, OK) \
+ XX(201, CREATED, Created) \
+ XX(202, ACCEPTED, Accepted) \
+ XX(203, NON_AUTHORITATIVE_INFORMATION, Non-Authoritative Information) \
+ XX(204, NO_CONTENT, No Content) \
+ XX(205, RESET_CONTENT, Reset Content) \
+ XX(206, PARTIAL_CONTENT, Partial Content) \
+ XX(207, MULTI_STATUS, Multi-Status) \
+ XX(208, ALREADY_REPORTED, Already Reported) \
+ XX(226, IM_USED, IM Used) \
+ XX(300, MULTIPLE_CHOICES, Multiple Choices) \
+ XX(301, MOVED_PERMANENTLY, Moved Permanently) \
+ XX(302, FOUND, Found) \
+ XX(303, SEE_OTHER, See Other) \
+ XX(304, NOT_MODIFIED, Not Modified) \
+ XX(305, USE_PROXY, Use Proxy) \
+ XX(307, TEMPORARY_REDIRECT, Temporary Redirect) \
+ XX(308, PERMANENT_REDIRECT, Permanent Redirect) \
+ XX(400, BAD_REQUEST, Bad Request) \
+ XX(401, UNAUTHORIZED, Unauthorized) \
+ XX(402, PAYMENT_REQUIRED, Payment Required) \
+ XX(403, FORBIDDEN, Forbidden) \
+ XX(404, NOT_FOUND, Not Found) \
+ XX(405, METHOD_NOT_ALLOWED, Method Not Allowed) \
+ XX(406, NOT_ACCEPTABLE, Not Acceptable) \
+ XX(407, PROXY_AUTHENTICATION_REQUIRED, Proxy Authentication Required) \
+ XX(408, REQUEST_TIMEOUT, Request Timeout) \
+ XX(409, CONFLICT, Conflict) \
+ XX(410, GONE, Gone) \
+ XX(411, LENGTH_REQUIRED, Length Required) \
+ XX(412, PRECONDITION_FAILED, Precondition Failed) \
+ XX(413, PAYLOAD_TOO_LARGE, Payload Too Large) \
+ XX(414, URI_TOO_LONG, URI Too Long) \
+ XX(415, UNSUPPORTED_MEDIA_TYPE, Unsupported Media Type) \
+ XX(416, RANGE_NOT_SATISFIABLE, Range Not Satisfiable) \
+ XX(417, EXPECTATION_FAILED, Expectation Failed) \
+ XX(421, MISDIRECTED_REQUEST, Misdirected Request) \
+ XX(422, UNPROCESSABLE_ENTITY, Unprocessable Entity) \
+ XX(423, LOCKED, Locked) \
+ XX(424, FAILED_DEPENDENCY, Failed Dependency) \
+ XX(426, UPGRADE_REQUIRED, Upgrade Required) \
+ XX(428, PRECONDITION_REQUIRED, Precondition Required) \
+ XX(429, TOO_MANY_REQUESTS, Too Many Requests) \
+ XX(431, REQUEST_HEADER_FIELDS_TOO_LARGE, Request Header Fields Too Large) \
+ XX(451, UNAVAILABLE_FOR_LEGAL_REASONS, Unavailable For Legal Reasons) \
+ XX(500, INTERNAL_SERVER_ERROR, Internal Server Error) \
+ XX(501, NOT_IMPLEMENTED, Not Implemented) \
+ XX(502, BAD_GATEWAY, Bad Gateway) \
+ XX(503, SERVICE_UNAVAILABLE, Service Unavailable) \
+ XX(504, GATEWAY_TIMEOUT, Gateway Timeout) \
+ XX(505, HTTP_VERSION_NOT_SUPPORTED, HTTP Version Not Supported) \
+ XX(506, VARIANT_ALSO_NEGOTIATES, Variant Also Negotiates) \
+ XX(507, INSUFFICIENT_STORAGE, Insufficient Storage) \
+ XX(508, LOOP_DETECTED, Loop Detected) \
+ XX(510, NOT_EXTENDED, Not Extended) \
+ XX(511, NETWORK_AUTHENTICATION_REQUIRED, Network Authentication Required) \
+
+enum http_status
+ {
+#define XX(num, name, string) HTTP_STATUS_##name = num,
+ HTTP_STATUS_MAP(XX)
+#undef XX
+ };
+
+
+/* Request Methods */
+#define HTTP_METHOD_MAP(XX) \
+ XX(0, DELETE, DELETE) \
+ XX(1, GET, GET) \
+ XX(2, HEAD, HEAD) \
+ XX(3, POST, POST) \
+ XX(4, PUT, PUT) \
+ /* pathological */ \
+ XX(5, CONNECT, CONNECT) \
+ XX(6, OPTIONS, OPTIONS) \
+ XX(7, TRACE, TRACE) \
+ /* WebDAV */ \
+ XX(8, COPY, COPY) \
+ XX(9, LOCK, LOCK) \
+ XX(10, MKCOL, MKCOL) \
+ XX(11, MOVE, MOVE) \
+ XX(12, PROPFIND, PROPFIND) \
+ XX(13, PROPPATCH, PROPPATCH) \
+ XX(14, SEARCH, SEARCH) \
+ XX(15, UNLOCK, UNLOCK) \
+ XX(16, BIND, BIND) \
+ XX(17, REBIND, REBIND) \
+ XX(18, UNBIND, UNBIND) \
+ XX(19, ACL, ACL) \
+ /* subversion */ \
+ XX(20, REPORT, REPORT) \
+ XX(21, MKACTIVITY, MKACTIVITY) \
+ XX(22, CHECKOUT, CHECKOUT) \
+ XX(23, MERGE, MERGE) \
+ /* upnp */ \
+ XX(24, MSEARCH, M-SEARCH) \
+ XX(25, NOTIFY, NOTIFY) \
+ XX(26, SUBSCRIBE, SUBSCRIBE) \
+ XX(27, UNSUBSCRIBE, UNSUBSCRIBE) \
+ /* RFC-5789 */ \
+ XX(28, PATCH, PATCH) \
+ XX(29, PURGE, PURGE) \
+ /* CalDAV */ \
+ XX(30, MKCALENDAR, MKCALENDAR) \
+ /* RFC-2068, section 19.6.1.2 */ \
+ XX(31, LINK, LINK) \
+ XX(32, UNLINK, UNLINK) \
+ /* icecast */ \
+ XX(33, SOURCE, SOURCE) \
+
+enum http_method
+ {
+#define XX(num, name, string) HTTP_##name = num,
+ HTTP_METHOD_MAP(XX)
+#undef XX
+ };
+
+
+enum http_parser_type { HTTP_REQUEST, HTTP_RESPONSE, HTTP_BOTH };
+
+
+/* Flag values for http_parser.flags field */
+enum flags
+ { F_CHUNKED = 1 << 0
+ , F_CONNECTION_KEEP_ALIVE = 1 << 1
+ , F_CONNECTION_CLOSE = 1 << 2
+ , F_CONNECTION_UPGRADE = 1 << 3
+ , F_TRAILING = 1 << 4
+ , F_UPGRADE = 1 << 5
+ , F_SKIPBODY = 1 << 6
+ , F_CONTENTLENGTH = 1 << 7
+ , F_TRANSFER_ENCODING = 1 << 8 /* Never set in http_parser.flags */
+ };
+
+
+/* Map for errno-related constants
+ *
+ * The provided argument should be a macro that takes 2 arguments.
+ */
+#define HTTP_ERRNO_MAP(XX) \
+ /* No error */ \
+ XX(OK, "success") \
+ \
+ /* Callback-related errors */ \
+ XX(CB_message_begin, "the on_message_begin callback failed") \
+ XX(CB_url, "the on_url callback failed") \
+ XX(CB_header_field, "the on_header_field callback failed") \
+ XX(CB_header_value, "the on_header_value callback failed") \
+ XX(CB_headers_complete, "the on_headers_complete callback failed") \
+ XX(CB_body, "the on_body callback failed") \
+ XX(CB_message_complete, "the on_message_complete callback failed") \
+ XX(CB_status, "the on_status callback failed") \
+ XX(CB_chunk_header, "the on_chunk_header callback failed") \
+ XX(CB_chunk_complete, "the on_chunk_complete callback failed") \
+ \
+ /* Parsing-related errors */ \
+ XX(INVALID_EOF_STATE, "stream ended at an unexpected time") \
+ XX(HEADER_OVERFLOW, \
+ "too many header bytes seen; overflow detected") \
+ XX(CLOSED_CONNECTION, \
+ "data received after completed connection: close message") \
+ XX(INVALID_VERSION, "invalid HTTP version") \
+ XX(INVALID_STATUS, "invalid HTTP status code") \
+ XX(INVALID_METHOD, "invalid HTTP method") \
+ XX(INVALID_URL, "invalid URL") \
+ XX(INVALID_HOST, "invalid host") \
+ XX(INVALID_PORT, "invalid port") \
+ XX(INVALID_PATH, "invalid path") \
+ XX(INVALID_QUERY_STRING, "invalid query string") \
+ XX(INVALID_FRAGMENT, "invalid fragment") \
+ XX(LF_EXPECTED, "LF character expected") \
+ XX(INVALID_HEADER_TOKEN, "invalid character in header") \
+ XX(INVALID_CONTENT_LENGTH, \
+ "invalid character in content-length header") \
+ XX(UNEXPECTED_CONTENT_LENGTH, \
+ "unexpected content-length header") \
+ XX(INVALID_CHUNK_SIZE, \
+ "invalid character in chunk size header") \
+ XX(INVALID_CONSTANT, "invalid constant string") \
+ XX(INVALID_INTERNAL_STATE, "encountered unexpected internal state")\
+ XX(STRICT, "strict mode assertion failed") \
+ XX(PAUSED, "parser is paused") \
+ XX(UNKNOWN, "an unknown error occurred") \
+ XX(INVALID_TRANSFER_ENCODING, \
+ "request has invalid transfer-encoding") \
+
+
+/* Define HPE_* values for each errno value above */
+#define HTTP_ERRNO_GEN(n, s) HPE_##n,
+enum http_errno {
+ HTTP_ERRNO_MAP(HTTP_ERRNO_GEN)
+};
+#undef HTTP_ERRNO_GEN
+
+
+/* Get an http_errno value from an http_parser */
+#define HTTP_PARSER_ERRNO(p) ((enum http_errno) (p)->http_errno)
+
+
+struct http_parser {
+ /** PRIVATE **/
+ unsigned int type : 2; /* enum http_parser_type */
+ unsigned int flags : 8; /* F_* values from 'flags' enum; semi-public */
+ unsigned int state : 7; /* enum state from http_parser.c */
+ unsigned int header_state : 7; /* enum header_state from http_parser.c */
+ unsigned int index : 5; /* index into current matcher */
+ unsigned int extra_flags : 2;
+ unsigned int lenient_http_headers : 1;
+
+ uint32_t nread; /* # bytes read in various scenarios */
+ uint64_t content_length; /* # bytes in body (0 if no Content-Length header) */
+
+ /** READ-ONLY **/
+ unsigned short http_major;
+ unsigned short http_minor;
+ unsigned int status_code : 16; /* responses only */
+ unsigned int method : 8; /* requests only */
+ unsigned int http_errno : 7;
+
+ /* 1 = Upgrade header was present and the parser has exited because of that.
+ * 0 = No upgrade header present.
+ * Should be checked when http_parser_execute() returns in addition to
+ * error checking.
+ */
+ unsigned int upgrade : 1;
+
+ /** PUBLIC **/
+ void *data; /* A pointer to get hook to the "connection" or "socket" object */
+};
+
+
+struct http_parser_settings {
+ http_cb on_message_begin;
+ http_data_cb on_url;
+ http_data_cb on_status;
+ http_data_cb on_header_field;
+ http_data_cb on_header_value;
+ http_cb on_headers_complete;
+ http_data_cb on_body;
+ http_cb on_message_complete;
+ /* When on_chunk_header is called, the current chunk length is stored
+ * in parser->content_length.
+ */
+ http_cb on_chunk_header;
+ http_cb on_chunk_complete;
+};
+
+
+enum http_parser_url_fields
+ { UF_SCHEMA = 0
+ , UF_HOST = 1
+ , UF_PORT = 2
+ , UF_PATH = 3
+ , UF_QUERY = 4
+ , UF_FRAGMENT = 5
+ , UF_USERINFO = 6
+ , UF_MAX = 7
+ };
+
+
+/* Result structure for http_parser_parse_url().
+ *
+ * Callers should index into field_data[] with UF_* values iff field_set
+ * has the relevant (1 << UF_*) bit set. As a courtesy to clients (and
+ * because we probably have padding left over), we convert any port to
+ * a uint16_t.
+ */
+struct http_parser_url {
+ uint16_t field_set; /* Bitmask of (1 << UF_*) values */
+ uint16_t port; /* Converted UF_PORT string */
+
+ struct {
+ uint16_t off; /* Offset into buffer in which field starts */
+ uint16_t len; /* Length of run in buffer */
+ } field_data[UF_MAX];
+};
+
+
+/* Returns the library version. Bits 16-23 contain the major version number,
+ * bits 8-15 the minor version number and bits 0-7 the patch level.
+ * Usage example:
+ *
+ * unsigned long version = http_parser_version();
+ * unsigned major = (version >> 16) & 255;
+ * unsigned minor = (version >> 8) & 255;
+ * unsigned patch = version & 255;
+ * printf("http_parser v%u.%u.%u\n", major, minor, patch);
+ */
+unsigned long http_parser_version(void);
+
+void http_parser_init(http_parser *parser, enum http_parser_type type);
+
+
+/* Initialize http_parser_settings members to 0
+ */
+void http_parser_settings_init(http_parser_settings *settings);
+
+
+/* Executes the parser. Returns number of parsed bytes. Sets
+ * `parser->http_errno` on error. */
+size_t http_parser_execute(http_parser *parser,
+ const http_parser_settings *settings,
+ const char *data,
+ size_t len);
+
+
+/* If http_should_keep_alive() in the on_headers_complete or
+ * on_message_complete callback returns 0, then this should be
+ * the last message on the connection.
+ * If you are the server, respond with the "Connection: close" header.
+ * If you are the client, close the connection.
+ */
+int http_should_keep_alive(const http_parser *parser);
+
+/* Returns a string version of the HTTP method. */
+const char *http_method_str(enum http_method m);
+
+/* Returns a string version of the HTTP status code. */
+const char *http_status_str(enum http_status s);
+
+/* Return a string name of the given error */
+const char *http_errno_name(enum http_errno err);
+
+/* Return a string description of the given error */
+const char *http_errno_description(enum http_errno err);
+
+/* Initialize all http_parser_url members to 0 */
+void http_parser_url_init(struct http_parser_url *u);
+
+/* Parse a URL; return nonzero on failure */
+int http_parser_parse_url(const char *buf, size_t buflen,
+ int is_connect,
+ struct http_parser_url *u);
+
+/* Pause or un-pause the parser; a nonzero value pauses */
+void http_parser_pause(http_parser *parser, int paused);
+
+/* Checks if this is the final chunk of the body. */
+int http_body_is_final(const http_parser *parser);
+
+/* Change the maximum header size provided at compile time. */
+void http_parser_set_max_header_size(uint32_t size);
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/third_party/python/aiohttp/vendor/http-parser/test.c b/third_party/python/aiohttp/vendor/http-parser/test.c
new file mode 100644
index 0000000000..798342451e
--- /dev/null
+++ b/third_party/python/aiohttp/vendor/http-parser/test.c
@@ -0,0 +1,4600 @@
+/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+#include "http_parser.h"
+#include <stdlib.h>
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h> /* rand */
+#include <string.h>
+#include <stdarg.h>
+
+#if defined(__APPLE__)
+# undef strlncpy
+#endif /* defined(__APPLE__) */
+
+#undef TRUE
+#define TRUE 1
+#undef FALSE
+#define FALSE 0
+
+#define MAX_HEADERS 13
+#define MAX_ELEMENT_SIZE 2048
+#define MAX_CHUNKS 16
+
+#define MIN(a,b) ((a) < (b) ? (a) : (b))
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*x))
+
+static http_parser parser;
+
+struct message {
+ const char *name; // for debugging purposes
+ const char *raw;
+ enum http_parser_type type;
+ enum http_method method;
+ int status_code;
+ char response_status[MAX_ELEMENT_SIZE];
+ char request_path[MAX_ELEMENT_SIZE];
+ char request_url[MAX_ELEMENT_SIZE];
+ char fragment[MAX_ELEMENT_SIZE];
+ char query_string[MAX_ELEMENT_SIZE];
+ char body[MAX_ELEMENT_SIZE];
+ size_t body_size;
+ const char *host;
+ const char *userinfo;
+ uint16_t port;
+ int num_headers;
+ enum { NONE=0, FIELD, VALUE } last_header_element;
+ char headers [MAX_HEADERS][2][MAX_ELEMENT_SIZE];
+ int should_keep_alive;
+
+ int num_chunks;
+ int num_chunks_complete;
+ int chunk_lengths[MAX_CHUNKS];
+
+ const char *upgrade; // upgraded body
+
+ unsigned short http_major;
+ unsigned short http_minor;
+
+ int message_begin_cb_called;
+ int headers_complete_cb_called;
+ int message_complete_cb_called;
+ int status_cb_called;
+ int message_complete_on_eof;
+ int body_is_final;
+};
+
+static int currently_parsing_eof;
+
+static struct message messages[5];
+static int num_messages;
+static http_parser_settings *current_pause_parser;
+
+/* * R E Q U E S T S * */
+const struct message requests[] =
+#define CURL_GET 0
+{ {.name= "curl get"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET /test HTTP/1.1\r\n"
+ "User-Agent: curl/7.18.0 (i486-pc-linux-gnu) libcurl/7.18.0 OpenSSL/0.9.8g zlib/1.2.3.3 libidn/1.1\r\n"
+ "Host: 0.0.0.0=5000\r\n"
+ "Accept: */*\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_GET
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/test"
+ ,.request_url= "/test"
+ ,.num_headers= 3
+ ,.headers=
+ { { "User-Agent", "curl/7.18.0 (i486-pc-linux-gnu) libcurl/7.18.0 OpenSSL/0.9.8g zlib/1.2.3.3 libidn/1.1" }
+ , { "Host", "0.0.0.0=5000" }
+ , { "Accept", "*/*" }
+ }
+ ,.body= ""
+ }
+
+#define FIREFOX_GET 1
+, {.name= "firefox get"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET /favicon.ico HTTP/1.1\r\n"
+ "Host: 0.0.0.0=5000\r\n"
+ "User-Agent: Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9) Gecko/2008061015 Firefox/3.0\r\n"
+ "Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n"
+ "Accept-Language: en-us,en;q=0.5\r\n"
+ "Accept-Encoding: gzip,deflate\r\n"
+ "Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7\r\n"
+ "Keep-Alive: 300\r\n"
+ "Connection: keep-alive\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_GET
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/favicon.ico"
+ ,.request_url= "/favicon.ico"
+ ,.num_headers= 8
+ ,.headers=
+ { { "Host", "0.0.0.0=5000" }
+ , { "User-Agent", "Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9) Gecko/2008061015 Firefox/3.0" }
+ , { "Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8" }
+ , { "Accept-Language", "en-us,en;q=0.5" }
+ , { "Accept-Encoding", "gzip,deflate" }
+ , { "Accept-Charset", "ISO-8859-1,utf-8;q=0.7,*;q=0.7" }
+ , { "Keep-Alive", "300" }
+ , { "Connection", "keep-alive" }
+ }
+ ,.body= ""
+ }
+
+#define DUMBLUCK 2
+, {.name= "dumbluck"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET /dumbluck HTTP/1.1\r\n"
+ "aaaaaaaaaaaaa:++++++++++\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_GET
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/dumbluck"
+ ,.request_url= "/dumbluck"
+ ,.num_headers= 1
+ ,.headers=
+ { { "aaaaaaaaaaaaa", "++++++++++" }
+ }
+ ,.body= ""
+ }
+
+#define FRAGMENT_IN_URI 3
+, {.name= "fragment in url"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET /forums/1/topics/2375?page=1#posts-17408 HTTP/1.1\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_GET
+ ,.query_string= "page=1"
+ ,.fragment= "posts-17408"
+ ,.request_path= "/forums/1/topics/2375"
+ /* XXX request url does include fragment? */
+ ,.request_url= "/forums/1/topics/2375?page=1#posts-17408"
+ ,.num_headers= 0
+ ,.body= ""
+ }
+
+#define GET_NO_HEADERS_NO_BODY 4
+, {.name= "get no headers no body"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET /get_no_headers_no_body/world HTTP/1.1\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE /* would need Connection: close */
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_GET
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/get_no_headers_no_body/world"
+ ,.request_url= "/get_no_headers_no_body/world"
+ ,.num_headers= 0
+ ,.body= ""
+ }
+
+#define GET_ONE_HEADER_NO_BODY 5
+, {.name= "get one header no body"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET /get_one_header_no_body HTTP/1.1\r\n"
+ "Accept: */*\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE /* would need Connection: close */
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_GET
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/get_one_header_no_body"
+ ,.request_url= "/get_one_header_no_body"
+ ,.num_headers= 1
+ ,.headers=
+ { { "Accept" , "*/*" }
+ }
+ ,.body= ""
+ }
+
+#define GET_FUNKY_CONTENT_LENGTH 6
+, {.name= "get funky content length body hello"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET /get_funky_content_length_body_hello HTTP/1.0\r\n"
+ "conTENT-Length: 5\r\n"
+ "\r\n"
+ "HELLO"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 0
+ ,.method= HTTP_GET
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/get_funky_content_length_body_hello"
+ ,.request_url= "/get_funky_content_length_body_hello"
+ ,.num_headers= 1
+ ,.headers=
+ { { "conTENT-Length" , "5" }
+ }
+ ,.body= "HELLO"
+ }
+
+#define POST_IDENTITY_BODY_WORLD 7
+, {.name= "post identity body world"
+ ,.type= HTTP_REQUEST
+ ,.raw= "POST /post_identity_body_world?q=search#hey HTTP/1.1\r\n"
+ "Accept: */*\r\n"
+ "Content-Length: 5\r\n"
+ "\r\n"
+ "World"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_POST
+ ,.query_string= "q=search"
+ ,.fragment= "hey"
+ ,.request_path= "/post_identity_body_world"
+ ,.request_url= "/post_identity_body_world?q=search#hey"
+ ,.num_headers= 2
+ ,.headers=
+ { { "Accept", "*/*" }
+ , { "Content-Length", "5" }
+ }
+ ,.body= "World"
+ }
+
+#define POST_CHUNKED_ALL_YOUR_BASE 8
+, {.name= "post - chunked body: all your base are belong to us"
+ ,.type= HTTP_REQUEST
+ ,.raw= "POST /post_chunked_all_your_base HTTP/1.1\r\n"
+ "Transfer-Encoding: chunked\r\n"
+ "\r\n"
+ "1e\r\nall your base are belong to us\r\n"
+ "0\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_POST
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/post_chunked_all_your_base"
+ ,.request_url= "/post_chunked_all_your_base"
+ ,.num_headers= 1
+ ,.headers=
+ { { "Transfer-Encoding" , "chunked" }
+ }
+ ,.body= "all your base are belong to us"
+ ,.num_chunks_complete= 2
+ ,.chunk_lengths= { 0x1e }
+ }
+
+#define TWO_CHUNKS_MULT_ZERO_END 9
+, {.name= "two chunks ; triple zero ending"
+ ,.type= HTTP_REQUEST
+ ,.raw= "POST /two_chunks_mult_zero_end HTTP/1.1\r\n"
+ "Transfer-Encoding: chunked\r\n"
+ "\r\n"
+ "5\r\nhello\r\n"
+ "6\r\n world\r\n"
+ "000\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_POST
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/two_chunks_mult_zero_end"
+ ,.request_url= "/two_chunks_mult_zero_end"
+ ,.num_headers= 1
+ ,.headers=
+ { { "Transfer-Encoding", "chunked" }
+ }
+ ,.body= "hello world"
+ ,.num_chunks_complete= 3
+ ,.chunk_lengths= { 5, 6 }
+ }
+
+#define CHUNKED_W_TRAILING_HEADERS 10
+, {.name= "chunked with trailing headers. blech."
+ ,.type= HTTP_REQUEST
+ ,.raw= "POST /chunked_w_trailing_headers HTTP/1.1\r\n"
+ "Transfer-Encoding: chunked\r\n"
+ "\r\n"
+ "5\r\nhello\r\n"
+ "6\r\n world\r\n"
+ "0\r\n"
+ "Vary: *\r\n"
+ "Content-Type: text/plain\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_POST
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/chunked_w_trailing_headers"
+ ,.request_url= "/chunked_w_trailing_headers"
+ ,.num_headers= 3
+ ,.headers=
+ { { "Transfer-Encoding", "chunked" }
+ , { "Vary", "*" }
+ , { "Content-Type", "text/plain" }
+ }
+ ,.body= "hello world"
+ ,.num_chunks_complete= 3
+ ,.chunk_lengths= { 5, 6 }
+ }
+
+#define CHUNKED_W_NONSENSE_AFTER_LENGTH 11
+, {.name= "with nonsense after the length"
+ ,.type= HTTP_REQUEST
+ ,.raw= "POST /chunked_w_nonsense_after_length HTTP/1.1\r\n"
+ "Transfer-Encoding: chunked\r\n"
+ "\r\n"
+ "5; ilovew3;whattheluck=aretheseparametersfor\r\nhello\r\n"
+ "6; blahblah; blah\r\n world\r\n"
+ "0\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_POST
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/chunked_w_nonsense_after_length"
+ ,.request_url= "/chunked_w_nonsense_after_length"
+ ,.num_headers= 1
+ ,.headers=
+ { { "Transfer-Encoding", "chunked" }
+ }
+ ,.body= "hello world"
+ ,.num_chunks_complete= 3
+ ,.chunk_lengths= { 5, 6 }
+ }
+
+#define WITH_QUOTES 12
+, {.name= "with quotes"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET /with_\"stupid\"_quotes?foo=\"bar\" HTTP/1.1\r\n\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_GET
+ ,.query_string= "foo=\"bar\""
+ ,.fragment= ""
+ ,.request_path= "/with_\"stupid\"_quotes"
+ ,.request_url= "/with_\"stupid\"_quotes?foo=\"bar\""
+ ,.num_headers= 0
+ ,.headers= { }
+ ,.body= ""
+ }
+
+#define APACHEBENCH_GET 13
+/* The server receiving this request SHOULD NOT wait for EOF
+ * to know that content-length == 0.
+ * How to represent this in a unit test? message_complete_on_eof
+ * Compare with NO_CONTENT_LENGTH_RESPONSE.
+ */
+, {.name = "apachebench get"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET /test HTTP/1.0\r\n"
+ "Host: 0.0.0.0:5000\r\n"
+ "User-Agent: ApacheBench/2.3\r\n"
+ "Accept: */*\r\n\r\n"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 0
+ ,.method= HTTP_GET
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/test"
+ ,.request_url= "/test"
+ ,.num_headers= 3
+ ,.headers= { { "Host", "0.0.0.0:5000" }
+ , { "User-Agent", "ApacheBench/2.3" }
+ , { "Accept", "*/*" }
+ }
+ ,.body= ""
+ }
+
+#define QUERY_URL_WITH_QUESTION_MARK_GET 14
+/* Some clients include '?' characters in query strings.
+ */
+, {.name = "query url with question mark"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET /test.cgi?foo=bar?baz HTTP/1.1\r\n\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_GET
+ ,.query_string= "foo=bar?baz"
+ ,.fragment= ""
+ ,.request_path= "/test.cgi"
+ ,.request_url= "/test.cgi?foo=bar?baz"
+ ,.num_headers= 0
+ ,.headers= {}
+ ,.body= ""
+ }
+
+#define PREFIX_NEWLINE_GET 15
+/* Some clients, especially after a POST in a keep-alive connection,
+ * will send an extra CRLF before the next request
+ */
+, {.name = "newline prefix get"
+ ,.type= HTTP_REQUEST
+ ,.raw= "\r\nGET /test HTTP/1.1\r\n\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_GET
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/test"
+ ,.request_url= "/test"
+ ,.num_headers= 0
+ ,.headers= { }
+ ,.body= ""
+ }
+
+#define UPGRADE_REQUEST 16
+, {.name = "upgrade request"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET /demo HTTP/1.1\r\n"
+ "Host: example.com\r\n"
+ "Connection: Upgrade\r\n"
+ "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00\r\n"
+ "Sec-WebSocket-Protocol: sample\r\n"
+ "Upgrade: WebSocket\r\n"
+ "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5\r\n"
+ "Origin: http://example.com\r\n"
+ "\r\n"
+ "Hot diggity dogg"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_GET
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/demo"
+ ,.request_url= "/demo"
+ ,.num_headers= 7
+ ,.upgrade="Hot diggity dogg"
+ ,.headers= { { "Host", "example.com" }
+ , { "Connection", "Upgrade" }
+ , { "Sec-WebSocket-Key2", "12998 5 Y3 1 .P00" }
+ , { "Sec-WebSocket-Protocol", "sample" }
+ , { "Upgrade", "WebSocket" }
+ , { "Sec-WebSocket-Key1", "4 @1 46546xW%0l 1 5" }
+ , { "Origin", "http://example.com" }
+ }
+ ,.body= ""
+ }
+
+#define CONNECT_REQUEST 17
+, {.name = "connect request"
+ ,.type= HTTP_REQUEST
+ ,.raw= "CONNECT 0-home0.netscape.com:443 HTTP/1.0\r\n"
+ "User-agent: Mozilla/1.1N\r\n"
+ "Proxy-authorization: basic aGVsbG86d29ybGQ=\r\n"
+ "\r\n"
+ "some data\r\n"
+ "and yet even more data"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 0
+ ,.method= HTTP_CONNECT
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= ""
+ ,.request_url= "0-home0.netscape.com:443"
+ ,.num_headers= 2
+ ,.upgrade="some data\r\nand yet even more data"
+ ,.headers= { { "User-agent", "Mozilla/1.1N" }
+ , { "Proxy-authorization", "basic aGVsbG86d29ybGQ=" }
+ }
+ ,.body= ""
+ }
+
+#define REPORT_REQ 18
+, {.name= "report request"
+ ,.type= HTTP_REQUEST
+ ,.raw= "REPORT /test HTTP/1.1\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_REPORT
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/test"
+ ,.request_url= "/test"
+ ,.num_headers= 0
+ ,.headers= {}
+ ,.body= ""
+ }
+
+#define NO_HTTP_VERSION 19
+, {.name= "request with no http version"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET /\r\n"
+ "\r\n"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 0
+ ,.http_minor= 9
+ ,.method= HTTP_GET
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/"
+ ,.request_url= "/"
+ ,.num_headers= 0
+ ,.headers= {}
+ ,.body= ""
+ }
+
+#define MSEARCH_REQ 20
+, {.name= "m-search request"
+ ,.type= HTTP_REQUEST
+ ,.raw= "M-SEARCH * HTTP/1.1\r\n"
+ "HOST: 239.255.255.250:1900\r\n"
+ "MAN: \"ssdp:discover\"\r\n"
+ "ST: \"ssdp:all\"\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_MSEARCH
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "*"
+ ,.request_url= "*"
+ ,.num_headers= 3
+ ,.headers= { { "HOST", "239.255.255.250:1900" }
+ , { "MAN", "\"ssdp:discover\"" }
+ , { "ST", "\"ssdp:all\"" }
+ }
+ ,.body= ""
+ }
+
+#define LINE_FOLDING_IN_HEADER 21
+, {.name= "line folding in header value"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET / HTTP/1.1\r\n"
+ "Line1: abc\r\n"
+ "\tdef\r\n"
+ " ghi\r\n"
+ "\t\tjkl\r\n"
+ " mno \r\n"
+ "\t \tqrs\r\n"
+ "Line2: \t line2\t\r\n"
+ "Line3:\r\n"
+ " line3\r\n"
+ "Line4: \r\n"
+ " \r\n"
+ "Connection:\r\n"
+ " close\r\n"
+ "\r\n"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_GET
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/"
+ ,.request_url= "/"
+ ,.num_headers= 5
+ ,.headers= { { "Line1", "abc\tdef ghi\t\tjkl mno \t \tqrs" }
+ , { "Line2", "line2\t" }
+ , { "Line3", "line3" }
+ , { "Line4", "" }
+ , { "Connection", "close" },
+ }
+ ,.body= ""
+ }
+
+
+#define QUERY_TERMINATED_HOST 22
+, {.name= "host terminated by a query string"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET http://hypnotoad.org?hail=all HTTP/1.1\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_GET
+ ,.query_string= "hail=all"
+ ,.fragment= ""
+ ,.request_path= ""
+ ,.request_url= "http://hypnotoad.org?hail=all"
+ ,.host= "hypnotoad.org"
+ ,.num_headers= 0
+ ,.headers= { }
+ ,.body= ""
+ }
+
+#define QUERY_TERMINATED_HOSTPORT 23
+, {.name= "host:port terminated by a query string"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET http://hypnotoad.org:1234?hail=all HTTP/1.1\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_GET
+ ,.query_string= "hail=all"
+ ,.fragment= ""
+ ,.request_path= ""
+ ,.request_url= "http://hypnotoad.org:1234?hail=all"
+ ,.host= "hypnotoad.org"
+ ,.port= 1234
+ ,.num_headers= 0
+ ,.headers= { }
+ ,.body= ""
+ }
+
+#define SPACE_TERMINATED_HOSTPORT 24
+, {.name= "host:port terminated by a space"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET http://hypnotoad.org:1234 HTTP/1.1\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_GET
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= ""
+ ,.request_url= "http://hypnotoad.org:1234"
+ ,.host= "hypnotoad.org"
+ ,.port= 1234
+ ,.num_headers= 0
+ ,.headers= { }
+ ,.body= ""
+ }
+
+#define PATCH_REQ 25
+, {.name = "PATCH request"
+ ,.type= HTTP_REQUEST
+ ,.raw= "PATCH /file.txt HTTP/1.1\r\n"
+ "Host: www.example.com\r\n"
+ "Content-Type: application/example\r\n"
+ "If-Match: \"e0023aa4e\"\r\n"
+ "Content-Length: 10\r\n"
+ "\r\n"
+ "cccccccccc"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_PATCH
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/file.txt"
+ ,.request_url= "/file.txt"
+ ,.num_headers= 4
+ ,.headers= { { "Host", "www.example.com" }
+ , { "Content-Type", "application/example" }
+ , { "If-Match", "\"e0023aa4e\"" }
+ , { "Content-Length", "10" }
+ }
+ ,.body= "cccccccccc"
+ }
+
+#define CONNECT_CAPS_REQUEST 26
+, {.name = "connect caps request"
+ ,.type= HTTP_REQUEST
+ ,.raw= "CONNECT HOME0.NETSCAPE.COM:443 HTTP/1.0\r\n"
+ "User-agent: Mozilla/1.1N\r\n"
+ "Proxy-authorization: basic aGVsbG86d29ybGQ=\r\n"
+ "\r\n"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 0
+ ,.method= HTTP_CONNECT
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= ""
+ ,.request_url= "HOME0.NETSCAPE.COM:443"
+ ,.num_headers= 2
+ ,.upgrade=""
+ ,.headers= { { "User-agent", "Mozilla/1.1N" }
+ , { "Proxy-authorization", "basic aGVsbG86d29ybGQ=" }
+ }
+ ,.body= ""
+ }
+
+#if !HTTP_PARSER_STRICT
+#define UTF8_PATH_REQ 27
+, {.name= "utf-8 path request"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET /δ¶/δt/pope?q=1#narf HTTP/1.1\r\n"
+ "Host: github.com\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_GET
+ ,.query_string= "q=1"
+ ,.fragment= "narf"
+ ,.request_path= "/δ¶/δt/pope"
+ ,.request_url= "/δ¶/δt/pope?q=1#narf"
+ ,.num_headers= 1
+ ,.headers= { {"Host", "github.com" }
+ }
+ ,.body= ""
+ }
+
+#define HOSTNAME_UNDERSCORE 28
+, {.name = "hostname underscore"
+ ,.type= HTTP_REQUEST
+ ,.raw= "CONNECT home_0.netscape.com:443 HTTP/1.0\r\n"
+ "User-agent: Mozilla/1.1N\r\n"
+ "Proxy-authorization: basic aGVsbG86d29ybGQ=\r\n"
+ "\r\n"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 0
+ ,.method= HTTP_CONNECT
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= ""
+ ,.request_url= "home_0.netscape.com:443"
+ ,.num_headers= 2
+ ,.upgrade=""
+ ,.headers= { { "User-agent", "Mozilla/1.1N" }
+ , { "Proxy-authorization", "basic aGVsbG86d29ybGQ=" }
+ }
+ ,.body= ""
+ }
+#endif /* !HTTP_PARSER_STRICT */
+
+/* see https://github.com/ry/http-parser/issues/47 */
+#define EAT_TRAILING_CRLF_NO_CONNECTION_CLOSE 29
+, {.name = "eat CRLF between requests, no \"Connection: close\" header"
+ ,.raw= "POST / HTTP/1.1\r\n"
+ "Host: www.example.com\r\n"
+ "Content-Type: application/x-www-form-urlencoded\r\n"
+ "Content-Length: 4\r\n"
+ "\r\n"
+ "q=42\r\n" /* note the trailing CRLF */
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_POST
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/"
+ ,.request_url= "/"
+ ,.num_headers= 3
+ ,.upgrade= 0
+ ,.headers= { { "Host", "www.example.com" }
+ , { "Content-Type", "application/x-www-form-urlencoded" }
+ , { "Content-Length", "4" }
+ }
+ ,.body= "q=42"
+ }
+
+/* see https://github.com/ry/http-parser/issues/47 */
+#define EAT_TRAILING_CRLF_WITH_CONNECTION_CLOSE 30
+, {.name = "eat CRLF between requests even if \"Connection: close\" is set"
+ ,.raw= "POST / HTTP/1.1\r\n"
+ "Host: www.example.com\r\n"
+ "Content-Type: application/x-www-form-urlencoded\r\n"
+ "Content-Length: 4\r\n"
+ "Connection: close\r\n"
+ "\r\n"
+ "q=42\r\n" /* note the trailing CRLF */
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= FALSE /* input buffer isn't empty when on_message_complete is called */
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_POST
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/"
+ ,.request_url= "/"
+ ,.num_headers= 4
+ ,.upgrade= 0
+ ,.headers= { { "Host", "www.example.com" }
+ , { "Content-Type", "application/x-www-form-urlencoded" }
+ , { "Content-Length", "4" }
+ , { "Connection", "close" }
+ }
+ ,.body= "q=42"
+ }
+
+#define PURGE_REQ 31
+, {.name = "PURGE request"
+ ,.type= HTTP_REQUEST
+ ,.raw= "PURGE /file.txt HTTP/1.1\r\n"
+ "Host: www.example.com\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_PURGE
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/file.txt"
+ ,.request_url= "/file.txt"
+ ,.num_headers= 1
+ ,.headers= { { "Host", "www.example.com" } }
+ ,.body= ""
+ }
+
+#define SEARCH_REQ 32
+, {.name = "SEARCH request"
+ ,.type= HTTP_REQUEST
+ ,.raw= "SEARCH / HTTP/1.1\r\n"
+ "Host: www.example.com\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_SEARCH
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/"
+ ,.request_url= "/"
+ ,.num_headers= 1
+ ,.headers= { { "Host", "www.example.com" } }
+ ,.body= ""
+ }
+
+#define PROXY_WITH_BASIC_AUTH 33
+, {.name= "host:port and basic_auth"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET http://a%12:b!&*$@hypnotoad.org:1234/toto HTTP/1.1\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_GET
+ ,.fragment= ""
+ ,.request_path= "/toto"
+ ,.request_url= "http://a%12:b!&*$@hypnotoad.org:1234/toto"
+ ,.host= "hypnotoad.org"
+ ,.userinfo= "a%12:b!&*$"
+ ,.port= 1234
+ ,.num_headers= 0
+ ,.headers= { }
+ ,.body= ""
+ }
+
+#define LINE_FOLDING_IN_HEADER_WITH_LF 34
+, {.name= "line folding in header value"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET / HTTP/1.1\n"
+ "Line1: abc\n"
+ "\tdef\n"
+ " ghi\n"
+ "\t\tjkl\n"
+ " mno \n"
+ "\t \tqrs\n"
+ "Line2: \t line2\t\n"
+ "Line3:\n"
+ " line3\n"
+ "Line4: \n"
+ " \n"
+ "Connection:\n"
+ " close\n"
+ "\n"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_GET
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/"
+ ,.request_url= "/"
+ ,.num_headers= 5
+ ,.headers= { { "Line1", "abc\tdef ghi\t\tjkl mno \t \tqrs" }
+ , { "Line2", "line2\t" }
+ , { "Line3", "line3" }
+ , { "Line4", "" }
+ , { "Connection", "close" },
+ }
+ ,.body= ""
+ }
+
+#define CONNECTION_MULTI 35
+, {.name = "multiple connection header values with folding"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET /demo HTTP/1.1\r\n"
+ "Host: example.com\r\n"
+ "Connection: Something,\r\n"
+ " Upgrade, ,Keep-Alive\r\n"
+ "Sec-WebSocket-Key2: 12998 5 Y3 1 .P00\r\n"
+ "Sec-WebSocket-Protocol: sample\r\n"
+ "Upgrade: WebSocket\r\n"
+ "Sec-WebSocket-Key1: 4 @1 46546xW%0l 1 5\r\n"
+ "Origin: http://example.com\r\n"
+ "\r\n"
+ "Hot diggity dogg"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_GET
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/demo"
+ ,.request_url= "/demo"
+ ,.num_headers= 7
+ ,.upgrade="Hot diggity dogg"
+ ,.headers= { { "Host", "example.com" }
+ , { "Connection", "Something, Upgrade, ,Keep-Alive" }
+ , { "Sec-WebSocket-Key2", "12998 5 Y3 1 .P00" }
+ , { "Sec-WebSocket-Protocol", "sample" }
+ , { "Upgrade", "WebSocket" }
+ , { "Sec-WebSocket-Key1", "4 @1 46546xW%0l 1 5" }
+ , { "Origin", "http://example.com" }
+ }
+ ,.body= ""
+ }
+
+#define CONNECTION_MULTI_LWS 36
+, {.name = "multiple connection header values with folding and lws"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET /demo HTTP/1.1\r\n"
+ "Connection: keep-alive, upgrade\r\n"
+ "Upgrade: WebSocket\r\n"
+ "\r\n"
+ "Hot diggity dogg"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_GET
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/demo"
+ ,.request_url= "/demo"
+ ,.num_headers= 2
+ ,.upgrade="Hot diggity dogg"
+ ,.headers= { { "Connection", "keep-alive, upgrade" }
+ , { "Upgrade", "WebSocket" }
+ }
+ ,.body= ""
+ }
+
+#define CONNECTION_MULTI_LWS_CRLF 37
+, {.name = "multiple connection header values with folding and lws"
+ ,.type= HTTP_REQUEST
+ ,.raw= "GET /demo HTTP/1.1\r\n"
+ "Connection: keep-alive, \r\n upgrade\r\n"
+ "Upgrade: WebSocket\r\n"
+ "\r\n"
+ "Hot diggity dogg"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_GET
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/demo"
+ ,.request_url= "/demo"
+ ,.num_headers= 2
+ ,.upgrade="Hot diggity dogg"
+ ,.headers= { { "Connection", "keep-alive, upgrade" }
+ , { "Upgrade", "WebSocket" }
+ }
+ ,.body= ""
+ }
+
+#define UPGRADE_POST_REQUEST 38
+, {.name = "upgrade post request"
+ ,.type= HTTP_REQUEST
+ ,.raw= "POST /demo HTTP/1.1\r\n"
+ "Host: example.com\r\n"
+ "Connection: Upgrade\r\n"
+ "Upgrade: HTTP/2.0\r\n"
+ "Content-Length: 15\r\n"
+ "\r\n"
+ "sweet post body"
+ "Hot diggity dogg"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_POST
+ ,.request_path= "/demo"
+ ,.request_url= "/demo"
+ ,.num_headers= 4
+ ,.upgrade="Hot diggity dogg"
+ ,.headers= { { "Host", "example.com" }
+ , { "Connection", "Upgrade" }
+ , { "Upgrade", "HTTP/2.0" }
+ , { "Content-Length", "15" }
+ }
+ ,.body= "sweet post body"
+ }
+
+#define CONNECT_WITH_BODY_REQUEST 39
+, {.name = "connect with body request"
+ ,.type= HTTP_REQUEST
+ ,.raw= "CONNECT foo.bar.com:443 HTTP/1.0\r\n"
+ "User-agent: Mozilla/1.1N\r\n"
+ "Proxy-authorization: basic aGVsbG86d29ybGQ=\r\n"
+ "Content-Length: 10\r\n"
+ "\r\n"
+ "blarfcicle"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 0
+ ,.method= HTTP_CONNECT
+ ,.request_url= "foo.bar.com:443"
+ ,.num_headers= 3
+ ,.upgrade="blarfcicle"
+ ,.headers= { { "User-agent", "Mozilla/1.1N" }
+ , { "Proxy-authorization", "basic aGVsbG86d29ybGQ=" }
+ , { "Content-Length", "10" }
+ }
+ ,.body= ""
+ }
+
+/* Examples from the Internet draft for LINK/UNLINK methods:
+ * https://tools.ietf.org/id/draft-snell-link-method-01.html#rfc.section.5
+ */
+
+#define LINK_REQUEST 40
+, {.name = "link request"
+ ,.type= HTTP_REQUEST
+ ,.raw= "LINK /images/my_dog.jpg HTTP/1.1\r\n"
+ "Host: example.com\r\n"
+ "Link: <http://example.com/profiles/joe>; rel=\"tag\"\r\n"
+ "Link: <http://example.com/profiles/sally>; rel=\"tag\"\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_LINK
+ ,.request_path= "/images/my_dog.jpg"
+ ,.request_url= "/images/my_dog.jpg"
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.num_headers= 3
+ ,.headers= { { "Host", "example.com" }
+ , { "Link", "<http://example.com/profiles/joe>; rel=\"tag\"" }
+ , { "Link", "<http://example.com/profiles/sally>; rel=\"tag\"" }
+ }
+ ,.body= ""
+ }
+
+#define UNLINK_REQUEST 41
+, {.name = "unlink request"
+ ,.type= HTTP_REQUEST
+ ,.raw= "UNLINK /images/my_dog.jpg HTTP/1.1\r\n"
+ "Host: example.com\r\n"
+ "Link: <http://example.com/profiles/sally>; rel=\"tag\"\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_UNLINK
+ ,.request_path= "/images/my_dog.jpg"
+ ,.request_url= "/images/my_dog.jpg"
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.num_headers= 2
+ ,.headers= { { "Host", "example.com" }
+ , { "Link", "<http://example.com/profiles/sally>; rel=\"tag\"" }
+ }
+ ,.body= ""
+ }
+
+#define SOURCE_REQUEST 42
+, {.name = "source request"
+ ,.type= HTTP_REQUEST
+ ,.raw= "SOURCE /music/sweet/music HTTP/1.1\r\n"
+ "Host: example.com\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_SOURCE
+ ,.request_path= "/music/sweet/music"
+ ,.request_url= "/music/sweet/music"
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.num_headers= 1
+ ,.headers= { { "Host", "example.com" } }
+ ,.body= ""
+ }
+
+#define SOURCE_ICE_REQUEST 42
+, {.name = "source request"
+ ,.type= HTTP_REQUEST
+ ,.raw= "SOURCE /music/sweet/music ICE/1.0\r\n"
+ "Host: example.com\r\n"
+ "\r\n"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 0
+ ,.method= HTTP_SOURCE
+ ,.request_path= "/music/sweet/music"
+ ,.request_url= "/music/sweet/music"
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.num_headers= 1
+ ,.headers= { { "Host", "example.com" } }
+ ,.body= ""
+ }
+
+#define POST_MULTI_TE_LAST_CHUNKED 43
+, {.name= "post - multi coding transfer-encoding chunked body"
+ ,.type= HTTP_REQUEST
+ ,.raw= "POST / HTTP/1.1\r\n"
+ "Transfer-Encoding: deflate, chunked\r\n"
+ "\r\n"
+ "1e\r\nall your base are belong to us\r\n"
+ "0\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_POST
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/"
+ ,.request_url= "/"
+ ,.num_headers= 1
+ ,.headers=
+ { { "Transfer-Encoding" , "deflate, chunked" }
+ }
+ ,.body= "all your base are belong to us"
+ ,.num_chunks_complete= 2
+ ,.chunk_lengths= { 0x1e }
+ }
+
+#define POST_MULTI_LINE_TE_LAST_CHUNKED 44
+, {.name= "post - multi line coding transfer-encoding chunked body"
+ ,.type= HTTP_REQUEST
+ ,.raw= "POST / HTTP/1.1\r\n"
+ "Transfer-Encoding: deflate,\r\n"
+ " chunked\r\n"
+ "\r\n"
+ "1e\r\nall your base are belong to us\r\n"
+ "0\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.method= HTTP_POST
+ ,.query_string= ""
+ ,.fragment= ""
+ ,.request_path= "/"
+ ,.request_url= "/"
+ ,.num_headers= 1
+ ,.headers=
+ { { "Transfer-Encoding" , "deflate, chunked" }
+ }
+ ,.body= "all your base are belong to us"
+ ,.num_chunks_complete= 2
+ ,.chunk_lengths= { 0x1e }
+ }
+};
+
+/* * R E S P O N S E S * */
+const struct message responses[] =
+#define GOOGLE_301 0
+{ {.name= "google 301"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 301 Moved Permanently\r\n"
+ "Location: http://www.google.com/\r\n"
+ "Content-Type: text/html; charset=UTF-8\r\n"
+ "Date: Sun, 26 Apr 2009 11:11:49 GMT\r\n"
+ "Expires: Tue, 26 May 2009 11:11:49 GMT\r\n"
+ "X-$PrototypeBI-Version: 1.6.0.3\r\n" /* $ char in header field */
+ "Cache-Control: public, max-age=2592000\r\n"
+ "Server: gws\r\n"
+ "Content-Length: 219 \r\n"
+ "\r\n"
+ "<HTML><HEAD><meta http-equiv=\"content-type\" content=\"text/html;charset=utf-8\">\n"
+ "<TITLE>301 Moved</TITLE></HEAD><BODY>\n"
+ "<H1>301 Moved</H1>\n"
+ "The document has moved\n"
+ "<A HREF=\"http://www.google.com/\">here</A>.\r\n"
+ "</BODY></HTML>\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 301
+ ,.response_status= "Moved Permanently"
+ ,.num_headers= 8
+ ,.headers=
+ { { "Location", "http://www.google.com/" }
+ , { "Content-Type", "text/html; charset=UTF-8" }
+ , { "Date", "Sun, 26 Apr 2009 11:11:49 GMT" }
+ , { "Expires", "Tue, 26 May 2009 11:11:49 GMT" }
+ , { "X-$PrototypeBI-Version", "1.6.0.3" }
+ , { "Cache-Control", "public, max-age=2592000" }
+ , { "Server", "gws" }
+ , { "Content-Length", "219 " }
+ }
+ ,.body= "<HTML><HEAD><meta http-equiv=\"content-type\" content=\"text/html;charset=utf-8\">\n"
+ "<TITLE>301 Moved</TITLE></HEAD><BODY>\n"
+ "<H1>301 Moved</H1>\n"
+ "The document has moved\n"
+ "<A HREF=\"http://www.google.com/\">here</A>.\r\n"
+ "</BODY></HTML>\r\n"
+ }
+
+#define NO_CONTENT_LENGTH_RESPONSE 1
+/* The client should wait for the server's EOF. That is, when content-length
+ * is not specified, and "Connection: close", the end of body is specified
+ * by the EOF.
+ * Compare with APACHEBENCH_GET
+ */
+, {.name= "no content-length response"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 200 OK\r\n"
+ "Date: Tue, 04 Aug 2009 07:59:32 GMT\r\n"
+ "Server: Apache\r\n"
+ "X-Powered-By: Servlet/2.5 JSP/2.1\r\n"
+ "Content-Type: text/xml; charset=utf-8\r\n"
+ "Connection: close\r\n"
+ "\r\n"
+ "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
+ "<SOAP-ENV:Envelope xmlns:SOAP-ENV=\"http://schemas.xmlsoap.org/soap/envelope/\">\n"
+ " <SOAP-ENV:Body>\n"
+ " <SOAP-ENV:Fault>\n"
+ " <faultcode>SOAP-ENV:Client</faultcode>\n"
+ " <faultstring>Client Error</faultstring>\n"
+ " </SOAP-ENV:Fault>\n"
+ " </SOAP-ENV:Body>\n"
+ "</SOAP-ENV:Envelope>"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= TRUE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 200
+ ,.response_status= "OK"
+ ,.num_headers= 5
+ ,.headers=
+ { { "Date", "Tue, 04 Aug 2009 07:59:32 GMT" }
+ , { "Server", "Apache" }
+ , { "X-Powered-By", "Servlet/2.5 JSP/2.1" }
+ , { "Content-Type", "text/xml; charset=utf-8" }
+ , { "Connection", "close" }
+ }
+ ,.body= "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
+ "<SOAP-ENV:Envelope xmlns:SOAP-ENV=\"http://schemas.xmlsoap.org/soap/envelope/\">\n"
+ " <SOAP-ENV:Body>\n"
+ " <SOAP-ENV:Fault>\n"
+ " <faultcode>SOAP-ENV:Client</faultcode>\n"
+ " <faultstring>Client Error</faultstring>\n"
+ " </SOAP-ENV:Fault>\n"
+ " </SOAP-ENV:Body>\n"
+ "</SOAP-ENV:Envelope>"
+ }
+
+#define NO_HEADERS_NO_BODY_404 2
+, {.name= "404 no headers no body"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 404 Not Found\r\n\r\n"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= TRUE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 404
+ ,.response_status= "Not Found"
+ ,.num_headers= 0
+ ,.headers= {}
+ ,.body_size= 0
+ ,.body= ""
+ }
+
+#define NO_REASON_PHRASE 3
+, {.name= "301 no response phrase"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 301\r\n\r\n"
+ ,.should_keep_alive = FALSE
+ ,.message_complete_on_eof= TRUE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 301
+ ,.response_status= ""
+ ,.num_headers= 0
+ ,.headers= {}
+ ,.body= ""
+ }
+
+#define TRAILING_SPACE_ON_CHUNKED_BODY 4
+, {.name="200 trailing space on chunked body"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 200 OK\r\n"
+ "Content-Type: text/plain\r\n"
+ "Transfer-Encoding: chunked\r\n"
+ "\r\n"
+ "25 \r\n"
+ "This is the data in the first chunk\r\n"
+ "\r\n"
+ "1C\r\n"
+ "and this is the second one\r\n"
+ "\r\n"
+ "0 \r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 200
+ ,.response_status= "OK"
+ ,.num_headers= 2
+ ,.headers=
+ { {"Content-Type", "text/plain" }
+ , {"Transfer-Encoding", "chunked" }
+ }
+ ,.body_size = 37+28
+ ,.body =
+ "This is the data in the first chunk\r\n"
+ "and this is the second one\r\n"
+ ,.num_chunks_complete= 3
+ ,.chunk_lengths= { 0x25, 0x1c }
+ }
+
+#define NO_CARRIAGE_RET 5
+, {.name="no carriage ret"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 200 OK\n"
+ "Content-Type: text/html; charset=utf-8\n"
+ "Connection: close\n"
+ "\n"
+ "these headers are from http://news.ycombinator.com/"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= TRUE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 200
+ ,.response_status= "OK"
+ ,.num_headers= 2
+ ,.headers=
+ { {"Content-Type", "text/html; charset=utf-8" }
+ , {"Connection", "close" }
+ }
+ ,.body= "these headers are from http://news.ycombinator.com/"
+ }
+
+#define PROXY_CONNECTION 6
+, {.name="proxy connection"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 200 OK\r\n"
+ "Content-Type: text/html; charset=UTF-8\r\n"
+ "Content-Length: 11\r\n"
+ "Proxy-Connection: close\r\n"
+ "Date: Thu, 31 Dec 2009 20:55:48 +0000\r\n"
+ "\r\n"
+ "hello world"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 200
+ ,.response_status= "OK"
+ ,.num_headers= 4
+ ,.headers=
+ { {"Content-Type", "text/html; charset=UTF-8" }
+ , {"Content-Length", "11" }
+ , {"Proxy-Connection", "close" }
+ , {"Date", "Thu, 31 Dec 2009 20:55:48 +0000"}
+ }
+ ,.body= "hello world"
+ }
+
+#define UNDERSTORE_HEADER_KEY 7
+ // shown by
+ // curl -o /dev/null -v "http://ad.doubleclick.net/pfadx/DARTSHELLCONFIGXML;dcmt=text/xml;"
+, {.name="underscore header key"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 200 OK\r\n"
+ "Server: DCLK-AdSvr\r\n"
+ "Content-Type: text/xml\r\n"
+ "Content-Length: 0\r\n"
+ "DCLK_imp: v7;x;114750856;0-0;0;17820020;0/0;21603567/21621457/1;;~okv=;dcmt=text/xml;;~cs=o\r\n\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 200
+ ,.response_status= "OK"
+ ,.num_headers= 4
+ ,.headers=
+ { {"Server", "DCLK-AdSvr" }
+ , {"Content-Type", "text/xml" }
+ , {"Content-Length", "0" }
+ , {"DCLK_imp", "v7;x;114750856;0-0;0;17820020;0/0;21603567/21621457/1;;~okv=;dcmt=text/xml;;~cs=o" }
+ }
+ ,.body= ""
+ }
+
+#define BONJOUR_MADAME_FR 8
+/* The client should not merge two headers fields when the first one doesn't
+ * have a value.
+ */
+, {.name= "bonjourmadame.fr"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.0 301 Moved Permanently\r\n"
+ "Date: Thu, 03 Jun 2010 09:56:32 GMT\r\n"
+ "Server: Apache/2.2.3 (Red Hat)\r\n"
+ "Cache-Control: public\r\n"
+ "Pragma: \r\n"
+ "Location: http://www.bonjourmadame.fr/\r\n"
+ "Vary: Accept-Encoding\r\n"
+ "Content-Length: 0\r\n"
+ "Content-Type: text/html; charset=UTF-8\r\n"
+ "Connection: keep-alive\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 0
+ ,.status_code= 301
+ ,.response_status= "Moved Permanently"
+ ,.num_headers= 9
+ ,.headers=
+ { { "Date", "Thu, 03 Jun 2010 09:56:32 GMT" }
+ , { "Server", "Apache/2.2.3 (Red Hat)" }
+ , { "Cache-Control", "public" }
+ , { "Pragma", "" }
+ , { "Location", "http://www.bonjourmadame.fr/" }
+ , { "Vary", "Accept-Encoding" }
+ , { "Content-Length", "0" }
+ , { "Content-Type", "text/html; charset=UTF-8" }
+ , { "Connection", "keep-alive" }
+ }
+ ,.body= ""
+ }
+
+#define RES_FIELD_UNDERSCORE 9
+/* Should handle spaces in header fields */
+, {.name= "field underscore"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 200 OK\r\n"
+ "Date: Tue, 28 Sep 2010 01:14:13 GMT\r\n"
+ "Server: Apache\r\n"
+ "Cache-Control: no-cache, must-revalidate\r\n"
+ "Expires: Mon, 26 Jul 1997 05:00:00 GMT\r\n"
+ ".et-Cookie: PlaxoCS=1274804622353690521; path=/; domain=.plaxo.com\r\n"
+ "Vary: Accept-Encoding\r\n"
+ "_eep-Alive: timeout=45\r\n" /* semantic value ignored */
+ "_onnection: Keep-Alive\r\n" /* semantic value ignored */
+ "Transfer-Encoding: chunked\r\n"
+ "Content-Type: text/html\r\n"
+ "Connection: close\r\n"
+ "\r\n"
+ "0\r\n\r\n"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 200
+ ,.response_status= "OK"
+ ,.num_headers= 11
+ ,.headers=
+ { { "Date", "Tue, 28 Sep 2010 01:14:13 GMT" }
+ , { "Server", "Apache" }
+ , { "Cache-Control", "no-cache, must-revalidate" }
+ , { "Expires", "Mon, 26 Jul 1997 05:00:00 GMT" }
+ , { ".et-Cookie", "PlaxoCS=1274804622353690521; path=/; domain=.plaxo.com" }
+ , { "Vary", "Accept-Encoding" }
+ , { "_eep-Alive", "timeout=45" }
+ , { "_onnection", "Keep-Alive" }
+ , { "Transfer-Encoding", "chunked" }
+ , { "Content-Type", "text/html" }
+ , { "Connection", "close" }
+ }
+ ,.body= ""
+ ,.num_chunks_complete= 1
+ ,.chunk_lengths= {}
+ }
+
+#define NON_ASCII_IN_STATUS_LINE 10
+/* Should handle non-ASCII in status line */
+, {.name= "non-ASCII in status line"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 500 Oriëntatieprobleem\r\n"
+ "Date: Fri, 5 Nov 2010 23:07:12 GMT+2\r\n"
+ "Content-Length: 0\r\n"
+ "Connection: close\r\n"
+ "\r\n"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 500
+ ,.response_status= "Oriëntatieprobleem"
+ ,.num_headers= 3
+ ,.headers=
+ { { "Date", "Fri, 5 Nov 2010 23:07:12 GMT+2" }
+ , { "Content-Length", "0" }
+ , { "Connection", "close" }
+ }
+ ,.body= ""
+ }
+
+#define HTTP_VERSION_0_9 11
+/* Should handle HTTP/0.9 */
+, {.name= "http version 0.9"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/0.9 200 OK\r\n"
+ "\r\n"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= TRUE
+ ,.http_major= 0
+ ,.http_minor= 9
+ ,.status_code= 200
+ ,.response_status= "OK"
+ ,.num_headers= 0
+ ,.headers=
+ {}
+ ,.body= ""
+ }
+
+#define NO_CONTENT_LENGTH_NO_TRANSFER_ENCODING_RESPONSE 12
+/* The client should wait for the server's EOF. That is, when neither
+ * content-length nor transfer-encoding is specified, the end of body
+ * is specified by the EOF.
+ */
+, {.name= "neither content-length nor transfer-encoding response"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 200 OK\r\n"
+ "Content-Type: text/plain\r\n"
+ "\r\n"
+ "hello world"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= TRUE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 200
+ ,.response_status= "OK"
+ ,.num_headers= 1
+ ,.headers=
+ { { "Content-Type", "text/plain" }
+ }
+ ,.body= "hello world"
+ }
+
+#define NO_BODY_HTTP10_KA_200 13
+, {.name= "HTTP/1.0 with keep-alive and EOF-terminated 200 status"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.0 200 OK\r\n"
+ "Connection: keep-alive\r\n"
+ "\r\n"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= TRUE
+ ,.http_major= 1
+ ,.http_minor= 0
+ ,.status_code= 200
+ ,.response_status= "OK"
+ ,.num_headers= 1
+ ,.headers=
+ { { "Connection", "keep-alive" }
+ }
+ ,.body_size= 0
+ ,.body= ""
+ }
+
+#define NO_BODY_HTTP10_KA_204 14
+, {.name= "HTTP/1.0 with keep-alive and a 204 status"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.0 204 No content\r\n"
+ "Connection: keep-alive\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 0
+ ,.status_code= 204
+ ,.response_status= "No content"
+ ,.num_headers= 1
+ ,.headers=
+ { { "Connection", "keep-alive" }
+ }
+ ,.body_size= 0
+ ,.body= ""
+ }
+
+#define NO_BODY_HTTP11_KA_200 15
+, {.name= "HTTP/1.1 with an EOF-terminated 200 status"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 200 OK\r\n"
+ "\r\n"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= TRUE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 200
+ ,.response_status= "OK"
+ ,.num_headers= 0
+ ,.headers={}
+ ,.body_size= 0
+ ,.body= ""
+ }
+
+#define NO_BODY_HTTP11_KA_204 16
+, {.name= "HTTP/1.1 with a 204 status"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 204 No content\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 204
+ ,.response_status= "No content"
+ ,.num_headers= 0
+ ,.headers={}
+ ,.body_size= 0
+ ,.body= ""
+ }
+
+#define NO_BODY_HTTP11_NOKA_204 17
+, {.name= "HTTP/1.1 with a 204 status and keep-alive disabled"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 204 No content\r\n"
+ "Connection: close\r\n"
+ "\r\n"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 204
+ ,.response_status= "No content"
+ ,.num_headers= 1
+ ,.headers=
+ { { "Connection", "close" }
+ }
+ ,.body_size= 0
+ ,.body= ""
+ }
+
+#define NO_BODY_HTTP11_KA_CHUNKED_200 18
+, {.name= "HTTP/1.1 with chunked endocing and a 200 response"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 200 OK\r\n"
+ "Transfer-Encoding: chunked\r\n"
+ "\r\n"
+ "0\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 200
+ ,.response_status= "OK"
+ ,.num_headers= 1
+ ,.headers=
+ { { "Transfer-Encoding", "chunked" }
+ }
+ ,.body_size= 0
+ ,.body= ""
+ ,.num_chunks_complete= 1
+ }
+
+#if !HTTP_PARSER_STRICT
+#define SPACE_IN_FIELD_RES 19
+/* Should handle spaces in header fields */
+, {.name= "field space"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 200 OK\r\n"
+ "Server: Microsoft-IIS/6.0\r\n"
+ "X-Powered-By: ASP.NET\r\n"
+ "en-US Content-Type: text/xml\r\n" /* this is the problem */
+ "Content-Type: text/xml\r\n"
+ "Content-Length: 16\r\n"
+ "Date: Fri, 23 Jul 2010 18:45:38 GMT\r\n"
+ "Connection: keep-alive\r\n"
+ "\r\n"
+ "<xml>hello</xml>" /* fake body */
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 200
+ ,.response_status= "OK"
+ ,.num_headers= 7
+ ,.headers=
+ { { "Server", "Microsoft-IIS/6.0" }
+ , { "X-Powered-By", "ASP.NET" }
+ , { "en-US Content-Type", "text/xml" }
+ , { "Content-Type", "text/xml" }
+ , { "Content-Length", "16" }
+ , { "Date", "Fri, 23 Jul 2010 18:45:38 GMT" }
+ , { "Connection", "keep-alive" }
+ }
+ ,.body= "<xml>hello</xml>"
+ }
+#endif /* !HTTP_PARSER_STRICT */
+
+#define AMAZON_COM 20
+, {.name= "amazon.com"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 301 MovedPermanently\r\n"
+ "Date: Wed, 15 May 2013 17:06:33 GMT\r\n"
+ "Server: Server\r\n"
+ "x-amz-id-1: 0GPHKXSJQ826RK7GZEB2\r\n"
+ "p3p: policyref=\"http://www.amazon.com/w3c/p3p.xml\",CP=\"CAO DSP LAW CUR ADM IVAo IVDo CONo OTPo OUR DELi PUBi OTRi BUS PHY ONL UNI PUR FIN COM NAV INT DEM CNT STA HEA PRE LOC GOV OTC \"\r\n"
+ "x-amz-id-2: STN69VZxIFSz9YJLbz1GDbxpbjG6Qjmmq5E3DxRhOUw+Et0p4hr7c/Q8qNcx4oAD\r\n"
+ "Location: http://www.amazon.com/Dan-Brown/e/B000AP9DSU/ref=s9_pop_gw_al1?_encoding=UTF8&refinementId=618073011&pf_rd_m=ATVPDKIKX0DER&pf_rd_s=center-2&pf_rd_r=0SHYY5BZXN3KR20BNFAY&pf_rd_t=101&pf_rd_p=1263340922&pf_rd_i=507846\r\n"
+ "Vary: Accept-Encoding,User-Agent\r\n"
+ "Content-Type: text/html; charset=ISO-8859-1\r\n"
+ "Transfer-Encoding: chunked\r\n"
+ "\r\n"
+ "1\r\n"
+ "\n\r\n"
+ "0\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 301
+ ,.response_status= "MovedPermanently"
+ ,.num_headers= 9
+ ,.headers= { { "Date", "Wed, 15 May 2013 17:06:33 GMT" }
+ , { "Server", "Server" }
+ , { "x-amz-id-1", "0GPHKXSJQ826RK7GZEB2" }
+ , { "p3p", "policyref=\"http://www.amazon.com/w3c/p3p.xml\",CP=\"CAO DSP LAW CUR ADM IVAo IVDo CONo OTPo OUR DELi PUBi OTRi BUS PHY ONL UNI PUR FIN COM NAV INT DEM CNT STA HEA PRE LOC GOV OTC \"" }
+ , { "x-amz-id-2", "STN69VZxIFSz9YJLbz1GDbxpbjG6Qjmmq5E3DxRhOUw+Et0p4hr7c/Q8qNcx4oAD" }
+ , { "Location", "http://www.amazon.com/Dan-Brown/e/B000AP9DSU/ref=s9_pop_gw_al1?_encoding=UTF8&refinementId=618073011&pf_rd_m=ATVPDKIKX0DER&pf_rd_s=center-2&pf_rd_r=0SHYY5BZXN3KR20BNFAY&pf_rd_t=101&pf_rd_p=1263340922&pf_rd_i=507846" }
+ , { "Vary", "Accept-Encoding,User-Agent" }
+ , { "Content-Type", "text/html; charset=ISO-8859-1" }
+ , { "Transfer-Encoding", "chunked" }
+ }
+ ,.body= "\n"
+ ,.num_chunks_complete= 2
+ ,.chunk_lengths= { 1 }
+ }
+
+#define EMPTY_REASON_PHRASE_AFTER_SPACE 20
+, {.name= "empty reason phrase after space"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 200 \r\n"
+ "\r\n"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= TRUE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 200
+ ,.response_status= ""
+ ,.num_headers= 0
+ ,.headers= {}
+ ,.body= ""
+ }
+
+#define CONTENT_LENGTH_X 21
+, {.name= "Content-Length-X"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 200 OK\r\n"
+ "Content-Length-X: 0\r\n"
+ "Transfer-Encoding: chunked\r\n"
+ "\r\n"
+ "2\r\n"
+ "OK\r\n"
+ "0\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 200
+ ,.response_status= "OK"
+ ,.num_headers= 2
+ ,.headers= { { "Content-Length-X", "0" }
+ , { "Transfer-Encoding", "chunked" }
+ }
+ ,.body= "OK"
+ ,.num_chunks_complete= 2
+ ,.chunk_lengths= { 2 }
+ }
+
+#define HTTP_101_RESPONSE_WITH_UPGRADE_HEADER 22
+, {.name= "HTTP 101 response with Upgrade header"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 101 Switching Protocols\r\n"
+ "Connection: upgrade\r\n"
+ "Upgrade: h2c\r\n"
+ "\r\n"
+ "proto"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 101
+ ,.response_status= "Switching Protocols"
+ ,.upgrade= "proto"
+ ,.num_headers= 2
+ ,.headers=
+ { { "Connection", "upgrade" }
+ , { "Upgrade", "h2c" }
+ }
+ }
+
+#define HTTP_101_RESPONSE_WITH_UPGRADE_HEADER_AND_CONTENT_LENGTH 23
+, {.name= "HTTP 101 response with Upgrade and Content-Length header"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 101 Switching Protocols\r\n"
+ "Connection: upgrade\r\n"
+ "Upgrade: h2c\r\n"
+ "Content-Length: 4\r\n"
+ "\r\n"
+ "body"
+ "proto"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 101
+ ,.response_status= "Switching Protocols"
+ ,.body= "body"
+ ,.upgrade= "proto"
+ ,.num_headers= 3
+ ,.headers=
+ { { "Connection", "upgrade" }
+ , { "Upgrade", "h2c" }
+ , { "Content-Length", "4" }
+ }
+ }
+
+#define HTTP_101_RESPONSE_WITH_UPGRADE_HEADER_AND_TRANSFER_ENCODING 24
+, {.name= "HTTP 101 response with Upgrade and Transfer-Encoding header"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 101 Switching Protocols\r\n"
+ "Connection: upgrade\r\n"
+ "Upgrade: h2c\r\n"
+ "Transfer-Encoding: chunked\r\n"
+ "\r\n"
+ "2\r\n"
+ "bo\r\n"
+ "2\r\n"
+ "dy\r\n"
+ "0\r\n"
+ "\r\n"
+ "proto"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 101
+ ,.response_status= "Switching Protocols"
+ ,.body= "body"
+ ,.upgrade= "proto"
+ ,.num_headers= 3
+ ,.headers=
+ { { "Connection", "upgrade" }
+ , { "Upgrade", "h2c" }
+ , { "Transfer-Encoding", "chunked" }
+ }
+ ,.num_chunks_complete= 3
+ ,.chunk_lengths= { 2, 2 }
+ }
+
+#define HTTP_200_RESPONSE_WITH_UPGRADE_HEADER 25
+, {.name= "HTTP 200 response with Upgrade header"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 200 OK\r\n"
+ "Connection: upgrade\r\n"
+ "Upgrade: h2c\r\n"
+ "\r\n"
+ "body"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= TRUE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 200
+ ,.response_status= "OK"
+ ,.body= "body"
+ ,.upgrade= NULL
+ ,.num_headers= 2
+ ,.headers=
+ { { "Connection", "upgrade" }
+ , { "Upgrade", "h2c" }
+ }
+ }
+
+#define HTTP_200_RESPONSE_WITH_UPGRADE_HEADER_AND_CONTENT_LENGTH 26
+, {.name= "HTTP 200 response with Upgrade and Content-Length header"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 200 OK\r\n"
+ "Connection: upgrade\r\n"
+ "Upgrade: h2c\r\n"
+ "Content-Length: 4\r\n"
+ "\r\n"
+ "body"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 200
+ ,.response_status= "OK"
+ ,.num_headers= 3
+ ,.body= "body"
+ ,.upgrade= NULL
+ ,.headers=
+ { { "Connection", "upgrade" }
+ , { "Upgrade", "h2c" }
+ , { "Content-Length", "4" }
+ }
+ }
+
+#define HTTP_200_RESPONSE_WITH_UPGRADE_HEADER_AND_TRANSFER_ENCODING 27
+, {.name= "HTTP 200 response with Upgrade and Transfer-Encoding header"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 200 OK\r\n"
+ "Connection: upgrade\r\n"
+ "Upgrade: h2c\r\n"
+ "Transfer-Encoding: chunked\r\n"
+ "\r\n"
+ "2\r\n"
+ "bo\r\n"
+ "2\r\n"
+ "dy\r\n"
+ "0\r\n"
+ "\r\n"
+ ,.should_keep_alive= TRUE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 200
+ ,.response_status= "OK"
+ ,.num_headers= 3
+ ,.body= "body"
+ ,.upgrade= NULL
+ ,.headers=
+ { { "Connection", "upgrade" }
+ , { "Upgrade", "h2c" }
+ , { "Transfer-Encoding", "chunked" }
+ }
+ ,.num_chunks_complete= 3
+ ,.chunk_lengths= { 2, 2 }
+ }
+#define HTTP_200_MULTI_TE_NOT_LAST_CHUNKED 28
+, {.name= "HTTP 200 response with `chunked` being *not last* Transfer-Encoding"
+ ,.type= HTTP_RESPONSE
+ ,.raw= "HTTP/1.1 200 OK\r\n"
+ "Transfer-Encoding: chunked, identity\r\n"
+ "\r\n"
+ "2\r\n"
+ "OK\r\n"
+ "0\r\n"
+ "\r\n"
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= TRUE
+ ,.http_major= 1
+ ,.http_minor= 1
+ ,.status_code= 200
+ ,.response_status= "OK"
+ ,.num_headers= 1
+ ,.headers= { { "Transfer-Encoding", "chunked, identity" }
+ }
+ ,.body= "2\r\nOK\r\n0\r\n\r\n"
+ ,.num_chunks_complete= 0
+ }
+};
+
+/* strnlen() is a POSIX.2008 addition. Can't rely on it being available so
+ * define it ourselves.
+ */
+size_t
+strnlen(const char *s, size_t maxlen)
+{
+ const char *p;
+
+ p = memchr(s, '\0', maxlen);
+ if (p == NULL)
+ return maxlen;
+
+ return p - s;
+}
+
+size_t
+strlncat(char *dst, size_t len, const char *src, size_t n)
+{
+ size_t slen;
+ size_t dlen;
+ size_t rlen;
+ size_t ncpy;
+
+ slen = strnlen(src, n);
+ dlen = strnlen(dst, len);
+
+ if (dlen < len) {
+ rlen = len - dlen;
+ ncpy = slen < rlen ? slen : (rlen - 1);
+ memcpy(dst + dlen, src, ncpy);
+ dst[dlen + ncpy] = '\0';
+ }
+
+ assert(len > slen + dlen);
+ return slen + dlen;
+}
+
+size_t
+strlncpy(char *dst, size_t len, const char *src, size_t n)
+{
+ size_t slen;
+ size_t ncpy;
+
+ slen = strnlen(src, n);
+
+ if (len > 0) {
+ ncpy = slen < len ? slen : (len - 1);
+ memcpy(dst, src, ncpy);
+ dst[ncpy] = '\0';
+ }
+
+ assert(len > slen);
+ return slen;
+}
+
+int
+request_url_cb (http_parser *p, const char *buf, size_t len)
+{
+ assert(p == &parser);
+ strlncat(messages[num_messages].request_url,
+ sizeof(messages[num_messages].request_url),
+ buf,
+ len);
+ return 0;
+}
+
+int
+header_field_cb (http_parser *p, const char *buf, size_t len)
+{
+ assert(p == &parser);
+ struct message *m = &messages[num_messages];
+
+ if (m->last_header_element != FIELD)
+ m->num_headers++;
+
+ strlncat(m->headers[m->num_headers-1][0],
+ sizeof(m->headers[m->num_headers-1][0]),
+ buf,
+ len);
+
+ m->last_header_element = FIELD;
+
+ return 0;
+}
+
+int
+header_value_cb (http_parser *p, const char *buf, size_t len)
+{
+ assert(p == &parser);
+ struct message *m = &messages[num_messages];
+
+ strlncat(m->headers[m->num_headers-1][1],
+ sizeof(m->headers[m->num_headers-1][1]),
+ buf,
+ len);
+
+ m->last_header_element = VALUE;
+
+ return 0;
+}
+
+void
+check_body_is_final (const http_parser *p)
+{
+ if (messages[num_messages].body_is_final) {
+ fprintf(stderr, "\n\n *** Error http_body_is_final() should return 1 "
+ "on last on_body callback call "
+ "but it doesn't! ***\n\n");
+ assert(0);
+ abort();
+ }
+ messages[num_messages].body_is_final = http_body_is_final(p);
+}
+
+int
+body_cb (http_parser *p, const char *buf, size_t len)
+{
+ assert(p == &parser);
+ strlncat(messages[num_messages].body,
+ sizeof(messages[num_messages].body),
+ buf,
+ len);
+ messages[num_messages].body_size += len;
+ check_body_is_final(p);
+ // printf("body_cb: '%s'\n", requests[num_messages].body);
+ return 0;
+}
+
+int
+count_body_cb (http_parser *p, const char *buf, size_t len)
+{
+ assert(p == &parser);
+ assert(buf);
+ messages[num_messages].body_size += len;
+ check_body_is_final(p);
+ return 0;
+}
+
+int
+message_begin_cb (http_parser *p)
+{
+ assert(p == &parser);
+ assert(!messages[num_messages].message_begin_cb_called);
+ messages[num_messages].message_begin_cb_called = TRUE;
+ return 0;
+}
+
+int
+headers_complete_cb (http_parser *p)
+{
+ assert(p == &parser);
+ messages[num_messages].method = parser.method;
+ messages[num_messages].status_code = parser.status_code;
+ messages[num_messages].http_major = parser.http_major;
+ messages[num_messages].http_minor = parser.http_minor;
+ messages[num_messages].headers_complete_cb_called = TRUE;
+ messages[num_messages].should_keep_alive = http_should_keep_alive(&parser);
+ return 0;
+}
+
+int
+message_complete_cb (http_parser *p)
+{
+ assert(p == &parser);
+ if (messages[num_messages].should_keep_alive !=
+ http_should_keep_alive(&parser))
+ {
+ fprintf(stderr, "\n\n *** Error http_should_keep_alive() should have same "
+ "value in both on_message_complete and on_headers_complete "
+ "but it doesn't! ***\n\n");
+ assert(0);
+ abort();
+ }
+
+ if (messages[num_messages].body_size &&
+ http_body_is_final(p) &&
+ !messages[num_messages].body_is_final)
+ {
+ fprintf(stderr, "\n\n *** Error http_body_is_final() should return 1 "
+ "on last on_body callback call "
+ "but it doesn't! ***\n\n");
+ assert(0);
+ abort();
+ }
+
+ messages[num_messages].message_complete_cb_called = TRUE;
+
+ messages[num_messages].message_complete_on_eof = currently_parsing_eof;
+
+ num_messages++;
+ return 0;
+}
+
+int
+response_status_cb (http_parser *p, const char *buf, size_t len)
+{
+ assert(p == &parser);
+
+ messages[num_messages].status_cb_called = TRUE;
+
+ strlncat(messages[num_messages].response_status,
+ sizeof(messages[num_messages].response_status),
+ buf,
+ len);
+ return 0;
+}
+
+int
+chunk_header_cb (http_parser *p)
+{
+ assert(p == &parser);
+ int chunk_idx = messages[num_messages].num_chunks;
+ messages[num_messages].num_chunks++;
+ if (chunk_idx < MAX_CHUNKS) {
+ messages[num_messages].chunk_lengths[chunk_idx] = p->content_length;
+ }
+
+ return 0;
+}
+
+int
+chunk_complete_cb (http_parser *p)
+{
+ assert(p == &parser);
+
+ /* Here we want to verify that each chunk_header_cb is matched by a
+ * chunk_complete_cb, so not only should the total number of calls to
+ * both callbacks be the same, but they also should be interleaved
+ * properly */
+ assert(messages[num_messages].num_chunks ==
+ messages[num_messages].num_chunks_complete + 1);
+
+ messages[num_messages].num_chunks_complete++;
+ return 0;
+}
+
+/* These dontcall_* callbacks exist so that we can verify that when we're
+ * paused, no additional callbacks are invoked */
+int
+dontcall_message_begin_cb (http_parser *p)
+{
+ if (p) { } // gcc
+ fprintf(stderr, "\n\n*** on_message_begin() called on paused parser ***\n\n");
+ abort();
+}
+
+int
+dontcall_header_field_cb (http_parser *p, const char *buf, size_t len)
+{
+ if (p || buf || len) { } // gcc
+ fprintf(stderr, "\n\n*** on_header_field() called on paused parser ***\n\n");
+ abort();
+}
+
+int
+dontcall_header_value_cb (http_parser *p, const char *buf, size_t len)
+{
+ if (p || buf || len) { } // gcc
+ fprintf(stderr, "\n\n*** on_header_value() called on paused parser ***\n\n");
+ abort();
+}
+
+int
+dontcall_request_url_cb (http_parser *p, const char *buf, size_t len)
+{
+ if (p || buf || len) { } // gcc
+ fprintf(stderr, "\n\n*** on_request_url() called on paused parser ***\n\n");
+ abort();
+}
+
+int
+dontcall_body_cb (http_parser *p, const char *buf, size_t len)
+{
+ if (p || buf || len) { } // gcc
+ fprintf(stderr, "\n\n*** on_body_cb() called on paused parser ***\n\n");
+ abort();
+}
+
+int
+dontcall_headers_complete_cb (http_parser *p)
+{
+ if (p) { } // gcc
+ fprintf(stderr, "\n\n*** on_headers_complete() called on paused "
+ "parser ***\n\n");
+ abort();
+}
+
+int
+dontcall_message_complete_cb (http_parser *p)
+{
+ if (p) { } // gcc
+ fprintf(stderr, "\n\n*** on_message_complete() called on paused "
+ "parser ***\n\n");
+ abort();
+}
+
+int
+dontcall_response_status_cb (http_parser *p, const char *buf, size_t len)
+{
+ if (p || buf || len) { } // gcc
+ fprintf(stderr, "\n\n*** on_status() called on paused parser ***\n\n");
+ abort();
+}
+
+int
+dontcall_chunk_header_cb (http_parser *p)
+{
+ if (p) { } // gcc
+ fprintf(stderr, "\n\n*** on_chunk_header() called on paused parser ***\n\n");
+ exit(1);
+}
+
+int
+dontcall_chunk_complete_cb (http_parser *p)
+{
+ if (p) { } // gcc
+ fprintf(stderr, "\n\n*** on_chunk_complete() "
+ "called on paused parser ***\n\n");
+ exit(1);
+}
+
+static http_parser_settings settings_dontcall =
+ {.on_message_begin = dontcall_message_begin_cb
+ ,.on_header_field = dontcall_header_field_cb
+ ,.on_header_value = dontcall_header_value_cb
+ ,.on_url = dontcall_request_url_cb
+ ,.on_status = dontcall_response_status_cb
+ ,.on_body = dontcall_body_cb
+ ,.on_headers_complete = dontcall_headers_complete_cb
+ ,.on_message_complete = dontcall_message_complete_cb
+ ,.on_chunk_header = dontcall_chunk_header_cb
+ ,.on_chunk_complete = dontcall_chunk_complete_cb
+ };
+
+/* These pause_* callbacks always pause the parser and just invoke the regular
+ * callback that tracks content. Before returning, we overwrite the parser
+ * settings to point to the _dontcall variety so that we can verify that
+ * the pause actually did, you know, pause. */
+int
+pause_message_begin_cb (http_parser *p)
+{
+ http_parser_pause(p, 1);
+ *current_pause_parser = settings_dontcall;
+ return message_begin_cb(p);
+}
+
+int
+pause_header_field_cb (http_parser *p, const char *buf, size_t len)
+{
+ http_parser_pause(p, 1);
+ *current_pause_parser = settings_dontcall;
+ return header_field_cb(p, buf, len);
+}
+
+int
+pause_header_value_cb (http_parser *p, const char *buf, size_t len)
+{
+ http_parser_pause(p, 1);
+ *current_pause_parser = settings_dontcall;
+ return header_value_cb(p, buf, len);
+}
+
+int
+pause_request_url_cb (http_parser *p, const char *buf, size_t len)
+{
+ http_parser_pause(p, 1);
+ *current_pause_parser = settings_dontcall;
+ return request_url_cb(p, buf, len);
+}
+
+int
+pause_body_cb (http_parser *p, const char *buf, size_t len)
+{
+ http_parser_pause(p, 1);
+ *current_pause_parser = settings_dontcall;
+ return body_cb(p, buf, len);
+}
+
+int
+pause_headers_complete_cb (http_parser *p)
+{
+ http_parser_pause(p, 1);
+ *current_pause_parser = settings_dontcall;
+ return headers_complete_cb(p);
+}
+
+int
+pause_message_complete_cb (http_parser *p)
+{
+ http_parser_pause(p, 1);
+ *current_pause_parser = settings_dontcall;
+ return message_complete_cb(p);
+}
+
+int
+pause_response_status_cb (http_parser *p, const char *buf, size_t len)
+{
+ http_parser_pause(p, 1);
+ *current_pause_parser = settings_dontcall;
+ return response_status_cb(p, buf, len);
+}
+
+int
+pause_chunk_header_cb (http_parser *p)
+{
+ http_parser_pause(p, 1);
+ *current_pause_parser = settings_dontcall;
+ return chunk_header_cb(p);
+}
+
+int
+pause_chunk_complete_cb (http_parser *p)
+{
+ http_parser_pause(p, 1);
+ *current_pause_parser = settings_dontcall;
+ return chunk_complete_cb(p);
+}
+
+int
+connect_headers_complete_cb (http_parser *p)
+{
+ headers_complete_cb(p);
+ return 1;
+}
+
+int
+connect_message_complete_cb (http_parser *p)
+{
+ messages[num_messages].should_keep_alive = http_should_keep_alive(&parser);
+ return message_complete_cb(p);
+}
+
+static http_parser_settings settings_pause =
+ {.on_message_begin = pause_message_begin_cb
+ ,.on_header_field = pause_header_field_cb
+ ,.on_header_value = pause_header_value_cb
+ ,.on_url = pause_request_url_cb
+ ,.on_status = pause_response_status_cb
+ ,.on_body = pause_body_cb
+ ,.on_headers_complete = pause_headers_complete_cb
+ ,.on_message_complete = pause_message_complete_cb
+ ,.on_chunk_header = pause_chunk_header_cb
+ ,.on_chunk_complete = pause_chunk_complete_cb
+ };
+
+static http_parser_settings settings =
+ {.on_message_begin = message_begin_cb
+ ,.on_header_field = header_field_cb
+ ,.on_header_value = header_value_cb
+ ,.on_url = request_url_cb
+ ,.on_status = response_status_cb
+ ,.on_body = body_cb
+ ,.on_headers_complete = headers_complete_cb
+ ,.on_message_complete = message_complete_cb
+ ,.on_chunk_header = chunk_header_cb
+ ,.on_chunk_complete = chunk_complete_cb
+ };
+
+static http_parser_settings settings_count_body =
+ {.on_message_begin = message_begin_cb
+ ,.on_header_field = header_field_cb
+ ,.on_header_value = header_value_cb
+ ,.on_url = request_url_cb
+ ,.on_status = response_status_cb
+ ,.on_body = count_body_cb
+ ,.on_headers_complete = headers_complete_cb
+ ,.on_message_complete = message_complete_cb
+ ,.on_chunk_header = chunk_header_cb
+ ,.on_chunk_complete = chunk_complete_cb
+ };
+
+static http_parser_settings settings_connect =
+ {.on_message_begin = message_begin_cb
+ ,.on_header_field = header_field_cb
+ ,.on_header_value = header_value_cb
+ ,.on_url = request_url_cb
+ ,.on_status = response_status_cb
+ ,.on_body = dontcall_body_cb
+ ,.on_headers_complete = connect_headers_complete_cb
+ ,.on_message_complete = connect_message_complete_cb
+ ,.on_chunk_header = chunk_header_cb
+ ,.on_chunk_complete = chunk_complete_cb
+ };
+
+static http_parser_settings settings_null =
+ {.on_message_begin = 0
+ ,.on_header_field = 0
+ ,.on_header_value = 0
+ ,.on_url = 0
+ ,.on_status = 0
+ ,.on_body = 0
+ ,.on_headers_complete = 0
+ ,.on_message_complete = 0
+ ,.on_chunk_header = 0
+ ,.on_chunk_complete = 0
+ };
+
+void
+parser_init (enum http_parser_type type)
+{
+ num_messages = 0;
+ http_parser_init(&parser, type);
+ memset(&messages, 0, sizeof messages);
+}
+
+size_t parse (const char *buf, size_t len)
+{
+ size_t nparsed;
+ currently_parsing_eof = (len == 0);
+ nparsed = http_parser_execute(&parser, &settings, buf, len);
+ return nparsed;
+}
+
+size_t parse_count_body (const char *buf, size_t len)
+{
+ size_t nparsed;
+ currently_parsing_eof = (len == 0);
+ nparsed = http_parser_execute(&parser, &settings_count_body, buf, len);
+ return nparsed;
+}
+
+size_t parse_pause (const char *buf, size_t len)
+{
+ size_t nparsed;
+ http_parser_settings s = settings_pause;
+
+ currently_parsing_eof = (len == 0);
+ current_pause_parser = &s;
+ nparsed = http_parser_execute(&parser, current_pause_parser, buf, len);
+ return nparsed;
+}
+
+size_t parse_connect (const char *buf, size_t len)
+{
+ size_t nparsed;
+ currently_parsing_eof = (len == 0);
+ nparsed = http_parser_execute(&parser, &settings_connect, buf, len);
+ return nparsed;
+}
+
+static inline int
+check_str_eq (const struct message *m,
+ const char *prop,
+ const char *expected,
+ const char *found) {
+ if ((expected == NULL) != (found == NULL)) {
+ printf("\n*** Error: %s in '%s' ***\n\n", prop, m->name);
+ printf("expected %s\n", (expected == NULL) ? "NULL" : expected);
+ printf(" found %s\n", (found == NULL) ? "NULL" : found);
+ return 0;
+ }
+ if (expected != NULL && 0 != strcmp(expected, found)) {
+ printf("\n*** Error: %s in '%s' ***\n\n", prop, m->name);
+ printf("expected '%s'\n", expected);
+ printf(" found '%s'\n", found);
+ return 0;
+ }
+ return 1;
+}
+
+static inline int
+check_num_eq (const struct message *m,
+ const char *prop,
+ int expected,
+ int found) {
+ if (expected != found) {
+ printf("\n*** Error: %s in '%s' ***\n\n", prop, m->name);
+ printf("expected %d\n", expected);
+ printf(" found %d\n", found);
+ return 0;
+ }
+ return 1;
+}
+
+#define MESSAGE_CHECK_STR_EQ(expected, found, prop) \
+ if (!check_str_eq(expected, #prop, expected->prop, found->prop)) return 0
+
+#define MESSAGE_CHECK_NUM_EQ(expected, found, prop) \
+ if (!check_num_eq(expected, #prop, expected->prop, found->prop)) return 0
+
+#define MESSAGE_CHECK_URL_EQ(u, expected, found, prop, fn) \
+do { \
+ char ubuf[256]; \
+ \
+ if ((u)->field_set & (1 << (fn))) { \
+ memcpy(ubuf, (found)->request_url + (u)->field_data[(fn)].off, \
+ (u)->field_data[(fn)].len); \
+ ubuf[(u)->field_data[(fn)].len] = '\0'; \
+ } else { \
+ ubuf[0] = '\0'; \
+ } \
+ \
+ check_str_eq(expected, #prop, expected->prop, ubuf); \
+} while(0)
+
+int
+message_eq (int index, int connect, const struct message *expected)
+{
+ int i;
+ struct message *m = &messages[index];
+
+ MESSAGE_CHECK_NUM_EQ(expected, m, http_major);
+ MESSAGE_CHECK_NUM_EQ(expected, m, http_minor);
+
+ if (expected->type == HTTP_REQUEST) {
+ MESSAGE_CHECK_NUM_EQ(expected, m, method);
+ } else {
+ MESSAGE_CHECK_NUM_EQ(expected, m, status_code);
+ MESSAGE_CHECK_STR_EQ(expected, m, response_status);
+ assert(m->status_cb_called);
+ }
+
+ if (!connect) {
+ MESSAGE_CHECK_NUM_EQ(expected, m, should_keep_alive);
+ MESSAGE_CHECK_NUM_EQ(expected, m, message_complete_on_eof);
+ }
+
+ assert(m->message_begin_cb_called);
+ assert(m->headers_complete_cb_called);
+ assert(m->message_complete_cb_called);
+
+
+ MESSAGE_CHECK_STR_EQ(expected, m, request_url);
+
+ /* Check URL components; we can't do this w/ CONNECT since it doesn't
+ * send us a well-formed URL.
+ */
+ if (*m->request_url && m->method != HTTP_CONNECT) {
+ struct http_parser_url u;
+
+ if (http_parser_parse_url(m->request_url, strlen(m->request_url), 0, &u)) {
+ fprintf(stderr, "\n\n*** failed to parse URL %s ***\n\n",
+ m->request_url);
+ abort();
+ }
+
+ if (expected->host) {
+ MESSAGE_CHECK_URL_EQ(&u, expected, m, host, UF_HOST);
+ }
+
+ if (expected->userinfo) {
+ MESSAGE_CHECK_URL_EQ(&u, expected, m, userinfo, UF_USERINFO);
+ }
+
+ m->port = (u.field_set & (1 << UF_PORT)) ?
+ u.port : 0;
+
+ MESSAGE_CHECK_URL_EQ(&u, expected, m, query_string, UF_QUERY);
+ MESSAGE_CHECK_URL_EQ(&u, expected, m, fragment, UF_FRAGMENT);
+ MESSAGE_CHECK_URL_EQ(&u, expected, m, request_path, UF_PATH);
+ MESSAGE_CHECK_NUM_EQ(expected, m, port);
+ }
+
+ if (connect) {
+ check_num_eq(m, "body_size", 0, m->body_size);
+ } else if (expected->body_size) {
+ MESSAGE_CHECK_NUM_EQ(expected, m, body_size);
+ } else {
+ MESSAGE_CHECK_STR_EQ(expected, m, body);
+ }
+
+ if (connect) {
+ check_num_eq(m, "num_chunks_complete", 0, m->num_chunks_complete);
+ } else {
+ assert(m->num_chunks == m->num_chunks_complete);
+ MESSAGE_CHECK_NUM_EQ(expected, m, num_chunks_complete);
+ for (i = 0; i < m->num_chunks && i < MAX_CHUNKS; i++) {
+ MESSAGE_CHECK_NUM_EQ(expected, m, chunk_lengths[i]);
+ }
+ }
+
+ MESSAGE_CHECK_NUM_EQ(expected, m, num_headers);
+
+ int r;
+ for (i = 0; i < m->num_headers; i++) {
+ r = check_str_eq(expected, "header field", expected->headers[i][0], m->headers[i][0]);
+ if (!r) return 0;
+ r = check_str_eq(expected, "header value", expected->headers[i][1], m->headers[i][1]);
+ if (!r) return 0;
+ }
+
+ if (!connect) {
+ MESSAGE_CHECK_STR_EQ(expected, m, upgrade);
+ }
+
+ return 1;
+}
+
+/* Given a sequence of varargs messages, return the number of them that the
+ * parser should successfully parse, taking into account that upgraded
+ * messages prevent all subsequent messages from being parsed.
+ */
+size_t
+count_parsed_messages(const size_t nmsgs, ...) {
+ size_t i;
+ va_list ap;
+
+ va_start(ap, nmsgs);
+
+ for (i = 0; i < nmsgs; i++) {
+ struct message *m = va_arg(ap, struct message *);
+
+ if (m->upgrade) {
+ va_end(ap);
+ return i + 1;
+ }
+ }
+
+ va_end(ap);
+ return nmsgs;
+}
+
+/* Given a sequence of bytes and the number of these that we were able to
+ * parse, verify that upgrade bodies are correct.
+ */
+void
+upgrade_message_fix(char *body, const size_t nread, const size_t nmsgs, ...) {
+ va_list ap;
+ size_t i;
+ size_t off = 0;
+
+ va_start(ap, nmsgs);
+
+ for (i = 0; i < nmsgs; i++) {
+ struct message *m = va_arg(ap, struct message *);
+
+ off += strlen(m->raw);
+
+ if (m->upgrade) {
+ off -= strlen(m->upgrade);
+
+ /* Check the portion of the response after its specified upgrade */
+ if (!check_str_eq(m, "upgrade", body + off, body + nread)) {
+ abort();
+ }
+
+ /* Fix up the response so that message_eq() will verify the beginning
+ * of the upgrade */
+ *(body + nread + strlen(m->upgrade)) = '\0';
+ messages[num_messages -1 ].upgrade = body + nread;
+
+ va_end(ap);
+ return;
+ }
+ }
+
+ va_end(ap);
+ printf("\n\n*** Error: expected a message with upgrade ***\n");
+
+ abort();
+}
+
+static void
+print_error (const char *raw, size_t error_location)
+{
+ fprintf(stderr, "\n*** %s ***\n\n",
+ http_errno_description(HTTP_PARSER_ERRNO(&parser)));
+
+ int this_line = 0, char_len = 0;
+ size_t i, j, len = strlen(raw), error_location_line = 0;
+ for (i = 0; i < len; i++) {
+ if (i == error_location) this_line = 1;
+ switch (raw[i]) {
+ case '\r':
+ char_len = 2;
+ fprintf(stderr, "\\r");
+ break;
+
+ case '\n':
+ fprintf(stderr, "\\n\n");
+
+ if (this_line) goto print;
+
+ error_location_line = 0;
+ continue;
+
+ default:
+ char_len = 1;
+ fputc(raw[i], stderr);
+ break;
+ }
+ if (!this_line) error_location_line += char_len;
+ }
+
+ fprintf(stderr, "[eof]\n");
+
+ print:
+ for (j = 0; j < error_location_line; j++) {
+ fputc(' ', stderr);
+ }
+ fprintf(stderr, "^\n\nerror location: %u\n", (unsigned int)error_location);
+}
+
+void
+test_preserve_data (void)
+{
+ char my_data[] = "application-specific data";
+ http_parser parser;
+ parser.data = my_data;
+ http_parser_init(&parser, HTTP_REQUEST);
+ if (parser.data != my_data) {
+ printf("\n*** parser.data not preserved accross http_parser_init ***\n\n");
+ abort();
+ }
+}
+
+struct url_test {
+ const char *name;
+ const char *url;
+ int is_connect;
+ struct http_parser_url u;
+ int rv;
+};
+
+const struct url_test url_tests[] =
+{ {.name="proxy request"
+ ,.url="http://hostname/"
+ ,.is_connect=0
+ ,.u=
+ {.field_set=(1 << UF_SCHEMA) | (1 << UF_HOST) | (1 << UF_PATH)
+ ,.port=0
+ ,.field_data=
+ {{ 0, 4 } /* UF_SCHEMA */
+ ,{ 7, 8 } /* UF_HOST */
+ ,{ 0, 0 } /* UF_PORT */
+ ,{ 15, 1 } /* UF_PATH */
+ ,{ 0, 0 } /* UF_QUERY */
+ ,{ 0, 0 } /* UF_FRAGMENT */
+ ,{ 0, 0 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+
+, {.name="proxy request with port"
+ ,.url="http://hostname:444/"
+ ,.is_connect=0
+ ,.u=
+ {.field_set=(1 << UF_SCHEMA) | (1 << UF_HOST) | (1 << UF_PORT) | (1 << UF_PATH)
+ ,.port=444
+ ,.field_data=
+ {{ 0, 4 } /* UF_SCHEMA */
+ ,{ 7, 8 } /* UF_HOST */
+ ,{ 16, 3 } /* UF_PORT */
+ ,{ 19, 1 } /* UF_PATH */
+ ,{ 0, 0 } /* UF_QUERY */
+ ,{ 0, 0 } /* UF_FRAGMENT */
+ ,{ 0, 0 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+
+, {.name="CONNECT request"
+ ,.url="hostname:443"
+ ,.is_connect=1
+ ,.u=
+ {.field_set=(1 << UF_HOST) | (1 << UF_PORT)
+ ,.port=443
+ ,.field_data=
+ {{ 0, 0 } /* UF_SCHEMA */
+ ,{ 0, 8 } /* UF_HOST */
+ ,{ 9, 3 } /* UF_PORT */
+ ,{ 0, 0 } /* UF_PATH */
+ ,{ 0, 0 } /* UF_QUERY */
+ ,{ 0, 0 } /* UF_FRAGMENT */
+ ,{ 0, 0 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+
+, {.name="CONNECT request but not connect"
+ ,.url="hostname:443"
+ ,.is_connect=0
+ ,.rv=1
+ }
+
+, {.name="proxy ipv6 request"
+ ,.url="http://[1:2::3:4]/"
+ ,.is_connect=0
+ ,.u=
+ {.field_set=(1 << UF_SCHEMA) | (1 << UF_HOST) | (1 << UF_PATH)
+ ,.port=0
+ ,.field_data=
+ {{ 0, 4 } /* UF_SCHEMA */
+ ,{ 8, 8 } /* UF_HOST */
+ ,{ 0, 0 } /* UF_PORT */
+ ,{ 17, 1 } /* UF_PATH */
+ ,{ 0, 0 } /* UF_QUERY */
+ ,{ 0, 0 } /* UF_FRAGMENT */
+ ,{ 0, 0 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+
+, {.name="proxy ipv6 request with port"
+ ,.url="http://[1:2::3:4]:67/"
+ ,.is_connect=0
+ ,.u=
+ {.field_set=(1 << UF_SCHEMA) | (1 << UF_HOST) | (1 << UF_PORT) | (1 << UF_PATH)
+ ,.port=67
+ ,.field_data=
+ {{ 0, 4 } /* UF_SCHEMA */
+ ,{ 8, 8 } /* UF_HOST */
+ ,{ 18, 2 } /* UF_PORT */
+ ,{ 20, 1 } /* UF_PATH */
+ ,{ 0, 0 } /* UF_QUERY */
+ ,{ 0, 0 } /* UF_FRAGMENT */
+ ,{ 0, 0 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+
+, {.name="CONNECT ipv6 address"
+ ,.url="[1:2::3:4]:443"
+ ,.is_connect=1
+ ,.u=
+ {.field_set=(1 << UF_HOST) | (1 << UF_PORT)
+ ,.port=443
+ ,.field_data=
+ {{ 0, 0 } /* UF_SCHEMA */
+ ,{ 1, 8 } /* UF_HOST */
+ ,{ 11, 3 } /* UF_PORT */
+ ,{ 0, 0 } /* UF_PATH */
+ ,{ 0, 0 } /* UF_QUERY */
+ ,{ 0, 0 } /* UF_FRAGMENT */
+ ,{ 0, 0 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+
+, {.name="ipv4 in ipv6 address"
+ ,.url="http://[2001:0000:0000:0000:0000:0000:1.9.1.1]/"
+ ,.is_connect=0
+ ,.u=
+ {.field_set=(1 << UF_SCHEMA) | (1 << UF_HOST) | (1 << UF_PATH)
+ ,.port=0
+ ,.field_data=
+ {{ 0, 4 } /* UF_SCHEMA */
+ ,{ 8, 37 } /* UF_HOST */
+ ,{ 0, 0 } /* UF_PORT */
+ ,{ 46, 1 } /* UF_PATH */
+ ,{ 0, 0 } /* UF_QUERY */
+ ,{ 0, 0 } /* UF_FRAGMENT */
+ ,{ 0, 0 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+
+, {.name="extra ? in query string"
+ ,.url="http://a.tbcdn.cn/p/fp/2010c/??fp-header-min.css,fp-base-min.css,"
+ "fp-channel-min.css,fp-product-min.css,fp-mall-min.css,fp-category-min.css,"
+ "fp-sub-min.css,fp-gdp4p-min.css,fp-css3-min.css,fp-misc-min.css?t=20101022.css"
+ ,.is_connect=0
+ ,.u=
+ {.field_set=(1<<UF_SCHEMA) | (1<<UF_HOST) | (1<<UF_PATH) | (1<<UF_QUERY)
+ ,.port=0
+ ,.field_data=
+ {{ 0, 4 } /* UF_SCHEMA */
+ ,{ 7, 10 } /* UF_HOST */
+ ,{ 0, 0 } /* UF_PORT */
+ ,{ 17, 12 } /* UF_PATH */
+ ,{ 30,187 } /* UF_QUERY */
+ ,{ 0, 0 } /* UF_FRAGMENT */
+ ,{ 0, 0 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+
+, {.name="space URL encoded"
+ ,.url="/toto.html?toto=a%20b"
+ ,.is_connect=0
+ ,.u=
+ {.field_set= (1<<UF_PATH) | (1<<UF_QUERY)
+ ,.port=0
+ ,.field_data=
+ {{ 0, 0 } /* UF_SCHEMA */
+ ,{ 0, 0 } /* UF_HOST */
+ ,{ 0, 0 } /* UF_PORT */
+ ,{ 0, 10 } /* UF_PATH */
+ ,{ 11, 10 } /* UF_QUERY */
+ ,{ 0, 0 } /* UF_FRAGMENT */
+ ,{ 0, 0 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+
+
+, {.name="URL fragment"
+ ,.url="/toto.html#titi"
+ ,.is_connect=0
+ ,.u=
+ {.field_set= (1<<UF_PATH) | (1<<UF_FRAGMENT)
+ ,.port=0
+ ,.field_data=
+ {{ 0, 0 } /* UF_SCHEMA */
+ ,{ 0, 0 } /* UF_HOST */
+ ,{ 0, 0 } /* UF_PORT */
+ ,{ 0, 10 } /* UF_PATH */
+ ,{ 0, 0 } /* UF_QUERY */
+ ,{ 11, 4 } /* UF_FRAGMENT */
+ ,{ 0, 0 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+
+, {.name="complex URL fragment"
+ ,.url="http://www.webmasterworld.com/r.cgi?f=21&d=8405&url="
+ "http://www.example.com/index.html?foo=bar&hello=world#midpage"
+ ,.is_connect=0
+ ,.u=
+ {.field_set= (1<<UF_SCHEMA) | (1<<UF_HOST) | (1<<UF_PATH) | (1<<UF_QUERY) |\
+ (1<<UF_FRAGMENT)
+ ,.port=0
+ ,.field_data=
+ {{ 0, 4 } /* UF_SCHEMA */
+ ,{ 7, 22 } /* UF_HOST */
+ ,{ 0, 0 } /* UF_PORT */
+ ,{ 29, 6 } /* UF_PATH */
+ ,{ 36, 69 } /* UF_QUERY */
+ ,{106, 7 } /* UF_FRAGMENT */
+ ,{ 0, 0 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+
+, {.name="complex URL from node js url parser doc"
+ ,.url="http://host.com:8080/p/a/t/h?query=string#hash"
+ ,.is_connect=0
+ ,.u=
+ {.field_set= (1<<UF_SCHEMA) | (1<<UF_HOST) | (1<<UF_PORT) | (1<<UF_PATH) |\
+ (1<<UF_QUERY) | (1<<UF_FRAGMENT)
+ ,.port=8080
+ ,.field_data=
+ {{ 0, 4 } /* UF_SCHEMA */
+ ,{ 7, 8 } /* UF_HOST */
+ ,{ 16, 4 } /* UF_PORT */
+ ,{ 20, 8 } /* UF_PATH */
+ ,{ 29, 12 } /* UF_QUERY */
+ ,{ 42, 4 } /* UF_FRAGMENT */
+ ,{ 0, 0 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+
+, {.name="complex URL with basic auth from node js url parser doc"
+ ,.url="http://a:b@host.com:8080/p/a/t/h?query=string#hash"
+ ,.is_connect=0
+ ,.u=
+ {.field_set= (1<<UF_SCHEMA) | (1<<UF_HOST) | (1<<UF_PORT) | (1<<UF_PATH) |\
+ (1<<UF_QUERY) | (1<<UF_FRAGMENT) | (1<<UF_USERINFO)
+ ,.port=8080
+ ,.field_data=
+ {{ 0, 4 } /* UF_SCHEMA */
+ ,{ 11, 8 } /* UF_HOST */
+ ,{ 20, 4 } /* UF_PORT */
+ ,{ 24, 8 } /* UF_PATH */
+ ,{ 33, 12 } /* UF_QUERY */
+ ,{ 46, 4 } /* UF_FRAGMENT */
+ ,{ 7, 3 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+
+, {.name="double @"
+ ,.url="http://a:b@@hostname:443/"
+ ,.is_connect=0
+ ,.rv=1
+ }
+
+, {.name="proxy empty host"
+ ,.url="http://:443/"
+ ,.is_connect=0
+ ,.rv=1
+ }
+
+, {.name="proxy empty port"
+ ,.url="http://hostname:/"
+ ,.is_connect=0
+ ,.rv=1
+ }
+
+, {.name="CONNECT with basic auth"
+ ,.url="a:b@hostname:443"
+ ,.is_connect=1
+ ,.rv=1
+ }
+
+, {.name="CONNECT empty host"
+ ,.url=":443"
+ ,.is_connect=1
+ ,.rv=1
+ }
+
+, {.name="CONNECT empty port"
+ ,.url="hostname:"
+ ,.is_connect=1
+ ,.rv=1
+ }
+
+, {.name="CONNECT with extra bits"
+ ,.url="hostname:443/"
+ ,.is_connect=1
+ ,.rv=1
+ }
+
+, {.name="space in URL"
+ ,.url="/foo bar/"
+ ,.rv=1 /* s_dead */
+ }
+
+, {.name="proxy basic auth with space url encoded"
+ ,.url="http://a%20:b@host.com/"
+ ,.is_connect=0
+ ,.u=
+ {.field_set= (1<<UF_SCHEMA) | (1<<UF_HOST) | (1<<UF_PATH) | (1<<UF_USERINFO)
+ ,.port=0
+ ,.field_data=
+ {{ 0, 4 } /* UF_SCHEMA */
+ ,{ 14, 8 } /* UF_HOST */
+ ,{ 0, 0 } /* UF_PORT */
+ ,{ 22, 1 } /* UF_PATH */
+ ,{ 0, 0 } /* UF_QUERY */
+ ,{ 0, 0 } /* UF_FRAGMENT */
+ ,{ 7, 6 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+
+, {.name="carriage return in URL"
+ ,.url="/foo\rbar/"
+ ,.rv=1 /* s_dead */
+ }
+
+, {.name="proxy double : in URL"
+ ,.url="http://hostname::443/"
+ ,.rv=1 /* s_dead */
+ }
+
+, {.name="proxy basic auth with double :"
+ ,.url="http://a::b@host.com/"
+ ,.is_connect=0
+ ,.u=
+ {.field_set= (1<<UF_SCHEMA) | (1<<UF_HOST) | (1<<UF_PATH) | (1<<UF_USERINFO)
+ ,.port=0
+ ,.field_data=
+ {{ 0, 4 } /* UF_SCHEMA */
+ ,{ 12, 8 } /* UF_HOST */
+ ,{ 0, 0 } /* UF_PORT */
+ ,{ 20, 1 } /* UF_PATH */
+ ,{ 0, 0 } /* UF_QUERY */
+ ,{ 0, 0 } /* UF_FRAGMENT */
+ ,{ 7, 4 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+
+, {.name="line feed in URL"
+ ,.url="/foo\nbar/"
+ ,.rv=1 /* s_dead */
+ }
+
+, {.name="proxy empty basic auth"
+ ,.url="http://@hostname/fo"
+ ,.u=
+ {.field_set= (1<<UF_SCHEMA) | (1<<UF_HOST) | (1<<UF_PATH)
+ ,.port=0
+ ,.field_data=
+ {{ 0, 4 } /* UF_SCHEMA */
+ ,{ 8, 8 } /* UF_HOST */
+ ,{ 0, 0 } /* UF_PORT */
+ ,{ 16, 3 } /* UF_PATH */
+ ,{ 0, 0 } /* UF_QUERY */
+ ,{ 0, 0 } /* UF_FRAGMENT */
+ ,{ 0, 0 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+, {.name="proxy line feed in hostname"
+ ,.url="http://host\name/fo"
+ ,.rv=1 /* s_dead */
+ }
+
+, {.name="proxy % in hostname"
+ ,.url="http://host%name/fo"
+ ,.rv=1 /* s_dead */
+ }
+
+, {.name="proxy ; in hostname"
+ ,.url="http://host;ame/fo"
+ ,.rv=1 /* s_dead */
+ }
+
+, {.name="proxy basic auth with unreservedchars"
+ ,.url="http://a!;-_!=+$@host.com/"
+ ,.is_connect=0
+ ,.u=
+ {.field_set= (1<<UF_SCHEMA) | (1<<UF_HOST) | (1<<UF_PATH) | (1<<UF_USERINFO)
+ ,.port=0
+ ,.field_data=
+ {{ 0, 4 } /* UF_SCHEMA */
+ ,{ 17, 8 } /* UF_HOST */
+ ,{ 0, 0 } /* UF_PORT */
+ ,{ 25, 1 } /* UF_PATH */
+ ,{ 0, 0 } /* UF_QUERY */
+ ,{ 0, 0 } /* UF_FRAGMENT */
+ ,{ 7, 9 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+
+, {.name="proxy only empty basic auth"
+ ,.url="http://@/fo"
+ ,.rv=1 /* s_dead */
+ }
+
+, {.name="proxy only basic auth"
+ ,.url="http://toto@/fo"
+ ,.rv=1 /* s_dead */
+ }
+
+, {.name="proxy emtpy hostname"
+ ,.url="http:///fo"
+ ,.rv=1 /* s_dead */
+ }
+
+, {.name="proxy = in URL"
+ ,.url="http://host=ame/fo"
+ ,.rv=1 /* s_dead */
+ }
+
+, {.name="ipv6 address with Zone ID"
+ ,.url="http://[fe80::a%25eth0]/"
+ ,.is_connect=0
+ ,.u=
+ {.field_set= (1<<UF_SCHEMA) | (1<<UF_HOST) | (1<<UF_PATH)
+ ,.port=0
+ ,.field_data=
+ {{ 0, 4 } /* UF_SCHEMA */
+ ,{ 8, 14 } /* UF_HOST */
+ ,{ 0, 0 } /* UF_PORT */
+ ,{ 23, 1 } /* UF_PATH */
+ ,{ 0, 0 } /* UF_QUERY */
+ ,{ 0, 0 } /* UF_FRAGMENT */
+ ,{ 0, 0 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+
+, {.name="ipv6 address with Zone ID, but '%' is not percent-encoded"
+ ,.url="http://[fe80::a%eth0]/"
+ ,.is_connect=0
+ ,.u=
+ {.field_set= (1<<UF_SCHEMA) | (1<<UF_HOST) | (1<<UF_PATH)
+ ,.port=0
+ ,.field_data=
+ {{ 0, 4 } /* UF_SCHEMA */
+ ,{ 8, 12 } /* UF_HOST */
+ ,{ 0, 0 } /* UF_PORT */
+ ,{ 21, 1 } /* UF_PATH */
+ ,{ 0, 0 } /* UF_QUERY */
+ ,{ 0, 0 } /* UF_FRAGMENT */
+ ,{ 0, 0 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+
+, {.name="ipv6 address ending with '%'"
+ ,.url="http://[fe80::a%]/"
+ ,.rv=1 /* s_dead */
+ }
+
+, {.name="ipv6 address with Zone ID including bad character"
+ ,.url="http://[fe80::a%$HOME]/"
+ ,.rv=1 /* s_dead */
+ }
+
+, {.name="just ipv6 Zone ID"
+ ,.url="http://[%eth0]/"
+ ,.rv=1 /* s_dead */
+ }
+
+, {.name="empty url"
+ ,.url=""
+ ,.is_connect=0
+ ,.rv=1
+ }
+
+, {.name="NULL url"
+ ,.url=NULL
+ ,.is_connect=0
+ ,.rv=1
+ }
+
+, {.name="full of spaces url"
+ ,.url=" "
+ ,.is_connect=0
+ ,.rv=1
+ }
+
+#if HTTP_PARSER_STRICT
+
+, {.name="tab in URL"
+ ,.url="/foo\tbar/"
+ ,.rv=1 /* s_dead */
+ }
+
+, {.name="form feed in URL"
+ ,.url="/foo\fbar/"
+ ,.rv=1 /* s_dead */
+ }
+
+#else /* !HTTP_PARSER_STRICT */
+
+, {.name="tab in URL"
+ ,.url="/foo\tbar/"
+ ,.u=
+ {.field_set=(1 << UF_PATH)
+ ,.field_data=
+ {{ 0, 0 } /* UF_SCHEMA */
+ ,{ 0, 0 } /* UF_HOST */
+ ,{ 0, 0 } /* UF_PORT */
+ ,{ 0, 9 } /* UF_PATH */
+ ,{ 0, 0 } /* UF_QUERY */
+ ,{ 0, 0 } /* UF_FRAGMENT */
+ ,{ 0, 0 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+
+, {.name="form feed in URL"
+ ,.url="/foo\fbar/"
+ ,.u=
+ {.field_set=(1 << UF_PATH)
+ ,.field_data=
+ {{ 0, 0 } /* UF_SCHEMA */
+ ,{ 0, 0 } /* UF_HOST */
+ ,{ 0, 0 } /* UF_PORT */
+ ,{ 0, 9 } /* UF_PATH */
+ ,{ 0, 0 } /* UF_QUERY */
+ ,{ 0, 0 } /* UF_FRAGMENT */
+ ,{ 0, 0 } /* UF_USERINFO */
+ }
+ }
+ ,.rv=0
+ }
+#endif
+};
+
+void
+dump_url (const char *url, const struct http_parser_url *u)
+{
+ unsigned int i;
+
+ printf("\tfield_set: 0x%x, port: %u\n", u->field_set, u->port);
+ for (i = 0; i < UF_MAX; i++) {
+ if ((u->field_set & (1 << i)) == 0) {
+ printf("\tfield_data[%u]: unset\n", i);
+ continue;
+ }
+
+ printf("\tfield_data[%u]: off: %u len: %u part: \"%.*s\n\"",
+ i,
+ u->field_data[i].off,
+ u->field_data[i].len,
+ u->field_data[i].len,
+ url + u->field_data[i].off);
+ }
+}
+
+void
+test_parse_url (void)
+{
+ struct http_parser_url u;
+ const struct url_test *test;
+ unsigned int i;
+ int rv;
+
+ for (i = 0; i < (sizeof(url_tests) / sizeof(url_tests[0])); i++) {
+ test = &url_tests[i];
+ memset(&u, 0, sizeof(u));
+
+ rv = http_parser_parse_url(test->url,
+ test->url ? strlen(test->url) : 0,
+ test->is_connect,
+ &u);
+
+ if (test->rv == 0) {
+ if (rv != 0) {
+ printf("\n*** http_parser_parse_url(\"%s\") \"%s\" test failed, "
+ "unexpected rv %d ***\n\n", test->url, test->name, rv);
+ abort();
+ }
+
+ if (memcmp(&u, &test->u, sizeof(u)) != 0) {
+ printf("\n*** http_parser_parse_url(\"%s\") \"%s\" failed ***\n",
+ test->url, test->name);
+
+ printf("target http_parser_url:\n");
+ dump_url(test->url, &test->u);
+ printf("result http_parser_url:\n");
+ dump_url(test->url, &u);
+
+ abort();
+ }
+ } else {
+ /* test->rv != 0 */
+ if (rv == 0) {
+ printf("\n*** http_parser_parse_url(\"%s\") \"%s\" test failed, "
+ "unexpected rv %d ***\n\n", test->url, test->name, rv);
+ abort();
+ }
+ }
+ }
+}
+
+void
+test_method_str (void)
+{
+ assert(0 == strcmp("GET", http_method_str(HTTP_GET)));
+ assert(0 == strcmp("<unknown>", http_method_str(1337)));
+}
+
+void
+test_status_str (void)
+{
+ assert(0 == strcmp("OK", http_status_str(HTTP_STATUS_OK)));
+ assert(0 == strcmp("Not Found", http_status_str(HTTP_STATUS_NOT_FOUND)));
+ assert(0 == strcmp("<unknown>", http_status_str(1337)));
+}
+
+void
+test_message (const struct message *message)
+{
+ size_t raw_len = strlen(message->raw);
+ size_t msg1len;
+ for (msg1len = 0; msg1len < raw_len; msg1len++) {
+ parser_init(message->type);
+
+ size_t read;
+ const char *msg1 = message->raw;
+ const char *msg2 = msg1 + msg1len;
+ size_t msg2len = raw_len - msg1len;
+
+ if (msg1len) {
+ assert(num_messages == 0);
+ messages[0].headers_complete_cb_called = FALSE;
+
+ read = parse(msg1, msg1len);
+
+ if (!messages[0].headers_complete_cb_called && parser.nread != read) {
+ assert(parser.nread == read);
+ print_error(msg1, read);
+ abort();
+ }
+
+ if (message->upgrade && parser.upgrade && num_messages > 0) {
+ messages[num_messages - 1].upgrade = msg1 + read;
+ goto test;
+ }
+
+ if (read != msg1len) {
+ print_error(msg1, read);
+ abort();
+ }
+ }
+
+
+ read = parse(msg2, msg2len);
+
+ if (message->upgrade && parser.upgrade) {
+ messages[num_messages - 1].upgrade = msg2 + read;
+ goto test;
+ }
+
+ if (read != msg2len) {
+ print_error(msg2, read);
+ abort();
+ }
+
+ read = parse(NULL, 0);
+
+ if (read != 0) {
+ print_error(message->raw, read);
+ abort();
+ }
+
+ test:
+
+ if (num_messages != 1) {
+ printf("\n*** num_messages != 1 after testing '%s' ***\n\n", message->name);
+ abort();
+ }
+
+ if(!message_eq(0, 0, message)) abort();
+ }
+}
+
+void
+test_message_count_body (const struct message *message)
+{
+ parser_init(message->type);
+
+ size_t read;
+ size_t l = strlen(message->raw);
+ size_t i, toread;
+ size_t chunk = 4024;
+
+ for (i = 0; i < l; i+= chunk) {
+ toread = MIN(l-i, chunk);
+ read = parse_count_body(message->raw + i, toread);
+ if (read != toread) {
+ print_error(message->raw, read);
+ abort();
+ }
+ }
+
+
+ read = parse_count_body(NULL, 0);
+ if (read != 0) {
+ print_error(message->raw, read);
+ abort();
+ }
+
+ if (num_messages != 1) {
+ printf("\n*** num_messages != 1 after testing '%s' ***\n\n", message->name);
+ abort();
+ }
+
+ if(!message_eq(0, 0, message)) abort();
+}
+
+void
+test_simple_type (const char *buf,
+ enum http_errno err_expected,
+ enum http_parser_type type)
+{
+ parser_init(type);
+
+ enum http_errno err;
+
+ parse(buf, strlen(buf));
+ err = HTTP_PARSER_ERRNO(&parser);
+ parse(NULL, 0);
+
+ /* In strict mode, allow us to pass with an unexpected HPE_STRICT as
+ * long as the caller isn't expecting success.
+ */
+#if HTTP_PARSER_STRICT
+ if (err_expected != err && err_expected != HPE_OK && err != HPE_STRICT) {
+#else
+ if (err_expected != err) {
+#endif
+ fprintf(stderr, "\n*** test_simple expected %s, but saw %s ***\n\n%s\n",
+ http_errno_name(err_expected), http_errno_name(err), buf);
+ abort();
+ }
+}
+
+void
+test_simple (const char *buf, enum http_errno err_expected)
+{
+ test_simple_type(buf, err_expected, HTTP_REQUEST);
+}
+
+void
+test_invalid_header_content (int req, const char* str)
+{
+ http_parser parser;
+ http_parser_init(&parser, req ? HTTP_REQUEST : HTTP_RESPONSE);
+ size_t parsed;
+ const char *buf;
+ buf = req ?
+ "GET / HTTP/1.1\r\n" :
+ "HTTP/1.1 200 OK\r\n";
+ parsed = http_parser_execute(&parser, &settings_null, buf, strlen(buf));
+ assert(parsed == strlen(buf));
+
+ buf = str;
+ size_t buflen = strlen(buf);
+
+ parsed = http_parser_execute(&parser, &settings_null, buf, buflen);
+ if (parsed != buflen) {
+ assert(HTTP_PARSER_ERRNO(&parser) == HPE_INVALID_HEADER_TOKEN);
+ return;
+ }
+
+ fprintf(stderr,
+ "\n*** Error expected but none in invalid header content test ***\n");
+ abort();
+}
+
+void
+test_invalid_header_field_content_error (int req)
+{
+ test_invalid_header_content(req, "Foo: F\01ailure");
+ test_invalid_header_content(req, "Foo: B\02ar");
+}
+
+void
+test_invalid_header_field (int req, const char* str)
+{
+ http_parser parser;
+ http_parser_init(&parser, req ? HTTP_REQUEST : HTTP_RESPONSE);
+ size_t parsed;
+ const char *buf;
+ buf = req ?
+ "GET / HTTP/1.1\r\n" :
+ "HTTP/1.1 200 OK\r\n";
+ parsed = http_parser_execute(&parser, &settings_null, buf, strlen(buf));
+ assert(parsed == strlen(buf));
+
+ buf = str;
+ size_t buflen = strlen(buf);
+
+ parsed = http_parser_execute(&parser, &settings_null, buf, buflen);
+ if (parsed != buflen) {
+ assert(HTTP_PARSER_ERRNO(&parser) == HPE_INVALID_HEADER_TOKEN);
+ return;
+ }
+
+ fprintf(stderr,
+ "\n*** Error expected but none in invalid header token test ***\n");
+ abort();
+}
+
+void
+test_invalid_header_field_token_error (int req)
+{
+ test_invalid_header_field(req, "Fo@: Failure");
+ test_invalid_header_field(req, "Foo\01\test: Bar");
+}
+
+void
+test_double_content_length_error (int req)
+{
+ http_parser parser;
+ http_parser_init(&parser, req ? HTTP_REQUEST : HTTP_RESPONSE);
+ size_t parsed;
+ const char *buf;
+ buf = req ?
+ "GET / HTTP/1.1\r\n" :
+ "HTTP/1.1 200 OK\r\n";
+ parsed = http_parser_execute(&parser, &settings_null, buf, strlen(buf));
+ assert(parsed == strlen(buf));
+
+ buf = "Content-Length: 0\r\nContent-Length: 1\r\n\r\n";
+ size_t buflen = strlen(buf);
+
+ parsed = http_parser_execute(&parser, &settings_null, buf, buflen);
+ if (parsed != buflen) {
+ assert(HTTP_PARSER_ERRNO(&parser) == HPE_UNEXPECTED_CONTENT_LENGTH);
+ return;
+ }
+
+ fprintf(stderr,
+ "\n*** Error expected but none in double content-length test ***\n");
+ abort();
+}
+
+void
+test_chunked_content_length_error (int req)
+{
+ http_parser parser;
+ http_parser_init(&parser, req ? HTTP_REQUEST : HTTP_RESPONSE);
+ size_t parsed;
+ const char *buf;
+ buf = req ?
+ "GET / HTTP/1.1\r\n" :
+ "HTTP/1.1 200 OK\r\n";
+ parsed = http_parser_execute(&parser, &settings_null, buf, strlen(buf));
+ assert(parsed == strlen(buf));
+
+ buf = "Transfer-Encoding: anything\r\nContent-Length: 1\r\n\r\n";
+ size_t buflen = strlen(buf);
+
+ parsed = http_parser_execute(&parser, &settings_null, buf, buflen);
+ if (parsed != buflen) {
+ assert(HTTP_PARSER_ERRNO(&parser) == HPE_UNEXPECTED_CONTENT_LENGTH);
+ return;
+ }
+
+ fprintf(stderr,
+ "\n*** Error expected but none in chunked content-length test ***\n");
+ abort();
+}
+
+void
+test_header_cr_no_lf_error (int req)
+{
+ http_parser parser;
+ http_parser_init(&parser, req ? HTTP_REQUEST : HTTP_RESPONSE);
+ size_t parsed;
+ const char *buf;
+ buf = req ?
+ "GET / HTTP/1.1\r\n" :
+ "HTTP/1.1 200 OK\r\n";
+ parsed = http_parser_execute(&parser, &settings_null, buf, strlen(buf));
+ assert(parsed == strlen(buf));
+
+ buf = "Foo: 1\rBar: 1\r\n\r\n";
+ size_t buflen = strlen(buf);
+
+ parsed = http_parser_execute(&parser, &settings_null, buf, buflen);
+ if (parsed != buflen) {
+ assert(HTTP_PARSER_ERRNO(&parser) == HPE_LF_EXPECTED);
+ return;
+ }
+
+ fprintf(stderr,
+ "\n*** Error expected but none in header whitespace test ***\n");
+ abort();
+}
+
+void
+test_no_overflow_parse_url (void)
+{
+ int rv;
+ struct http_parser_url u;
+
+ http_parser_url_init(&u);
+ rv = http_parser_parse_url("http://example.com:8001", 22, 0, &u);
+
+ if (rv != 0) {
+ fprintf(stderr,
+ "\n*** test_no_overflow_parse_url invalid return value=%d\n",
+ rv);
+ abort();
+ }
+
+ if (u.port != 800) {
+ fprintf(stderr,
+ "\n*** test_no_overflow_parse_url invalid port number=%d\n",
+ u.port);
+ abort();
+ }
+}
+
+void
+test_header_overflow_error (int req)
+{
+ http_parser parser;
+ http_parser_init(&parser, req ? HTTP_REQUEST : HTTP_RESPONSE);
+ size_t parsed;
+ const char *buf;
+ buf = req ? "GET / HTTP/1.1\r\n" : "HTTP/1.0 200 OK\r\n";
+ parsed = http_parser_execute(&parser, &settings_null, buf, strlen(buf));
+ assert(parsed == strlen(buf));
+
+ buf = "header-key: header-value\r\n";
+ size_t buflen = strlen(buf);
+
+ int i;
+ for (i = 0; i < 10000; i++) {
+ parsed = http_parser_execute(&parser, &settings_null, buf, buflen);
+ if (parsed != buflen) {
+ //fprintf(stderr, "error found on iter %d\n", i);
+ assert(HTTP_PARSER_ERRNO(&parser) == HPE_HEADER_OVERFLOW);
+ return;
+ }
+ }
+
+ fprintf(stderr, "\n*** Error expected but none in header overflow test ***\n");
+ abort();
+}
+
+
+void
+test_header_nread_value ()
+{
+ http_parser parser;
+ http_parser_init(&parser, HTTP_REQUEST);
+ size_t parsed;
+ const char *buf;
+ buf = "GET / HTTP/1.1\r\nheader: value\nhdr: value\r\n";
+ parsed = http_parser_execute(&parser, &settings_null, buf, strlen(buf));
+ assert(parsed == strlen(buf));
+
+ assert(parser.nread == strlen(buf));
+}
+
+
+static void
+test_content_length_overflow (const char *buf, size_t buflen, int expect_ok)
+{
+ http_parser parser;
+ http_parser_init(&parser, HTTP_RESPONSE);
+ http_parser_execute(&parser, &settings_null, buf, buflen);
+
+ if (expect_ok)
+ assert(HTTP_PARSER_ERRNO(&parser) == HPE_OK);
+ else
+ assert(HTTP_PARSER_ERRNO(&parser) == HPE_INVALID_CONTENT_LENGTH);
+}
+
+void
+test_header_content_length_overflow_error (void)
+{
+#define X(size) \
+ "HTTP/1.1 200 OK\r\n" \
+ "Content-Length: " #size "\r\n" \
+ "\r\n"
+ const char a[] = X(1844674407370955160); /* 2^64 / 10 - 1 */
+ const char b[] = X(18446744073709551615); /* 2^64-1 */
+ const char c[] = X(18446744073709551616); /* 2^64 */
+#undef X
+ test_content_length_overflow(a, sizeof(a) - 1, 1); /* expect ok */
+ test_content_length_overflow(b, sizeof(b) - 1, 0); /* expect failure */
+ test_content_length_overflow(c, sizeof(c) - 1, 0); /* expect failure */
+}
+
+void
+test_chunk_content_length_overflow_error (void)
+{
+#define X(size) \
+ "HTTP/1.1 200 OK\r\n" \
+ "Transfer-Encoding: chunked\r\n" \
+ "\r\n" \
+ #size "\r\n" \
+ "..."
+ const char a[] = X(FFFFFFFFFFFFFFE); /* 2^64 / 16 - 1 */
+ const char b[] = X(FFFFFFFFFFFFFFFF); /* 2^64-1 */
+ const char c[] = X(10000000000000000); /* 2^64 */
+#undef X
+ test_content_length_overflow(a, sizeof(a) - 1, 1); /* expect ok */
+ test_content_length_overflow(b, sizeof(b) - 1, 0); /* expect failure */
+ test_content_length_overflow(c, sizeof(c) - 1, 0); /* expect failure */
+}
+
+void
+test_no_overflow_long_body (int req, size_t length)
+{
+ http_parser parser;
+ http_parser_init(&parser, req ? HTTP_REQUEST : HTTP_RESPONSE);
+ size_t parsed;
+ size_t i;
+ char buf1[3000];
+ size_t buf1len = sprintf(buf1, "%s\r\nConnection: Keep-Alive\r\nContent-Length: %lu\r\n\r\n",
+ req ? "POST / HTTP/1.0" : "HTTP/1.0 200 OK", (unsigned long)length);
+ parsed = http_parser_execute(&parser, &settings_null, buf1, buf1len);
+ if (parsed != buf1len)
+ goto err;
+
+ for (i = 0; i < length; i++) {
+ char foo = 'a';
+ parsed = http_parser_execute(&parser, &settings_null, &foo, 1);
+ if (parsed != 1)
+ goto err;
+ }
+
+ parsed = http_parser_execute(&parser, &settings_null, buf1, buf1len);
+ if (parsed != buf1len) goto err;
+ return;
+
+ err:
+ fprintf(stderr,
+ "\n*** error in test_no_overflow_long_body %s of length %lu ***\n",
+ req ? "REQUEST" : "RESPONSE",
+ (unsigned long)length);
+ abort();
+}
+
+void
+test_multiple3 (const struct message *r1, const struct message *r2, const struct message *r3)
+{
+ int message_count = count_parsed_messages(3, r1, r2, r3);
+
+ char total[ strlen(r1->raw)
+ + strlen(r2->raw)
+ + strlen(r3->raw)
+ + 1
+ ];
+ total[0] = '\0';
+
+ strcat(total, r1->raw);
+ strcat(total, r2->raw);
+ strcat(total, r3->raw);
+
+ parser_init(r1->type);
+
+ size_t read;
+
+ read = parse(total, strlen(total));
+
+ if (parser.upgrade) {
+ upgrade_message_fix(total, read, 3, r1, r2, r3);
+ goto test;
+ }
+
+ if (read != strlen(total)) {
+ print_error(total, read);
+ abort();
+ }
+
+ read = parse(NULL, 0);
+
+ if (read != 0) {
+ print_error(total, read);
+ abort();
+ }
+
+test:
+
+ if (message_count != num_messages) {
+ fprintf(stderr, "\n\n*** Parser didn't see 3 messages only %d *** \n", num_messages);
+ abort();
+ }
+
+ if (!message_eq(0, 0, r1)) abort();
+ if (message_count > 1 && !message_eq(1, 0, r2)) abort();
+ if (message_count > 2 && !message_eq(2, 0, r3)) abort();
+}
+
+/* SCAN through every possible breaking to make sure the
+ * parser can handle getting the content in any chunks that
+ * might come from the socket
+ */
+void
+test_scan (const struct message *r1, const struct message *r2, const struct message *r3)
+{
+ char total[80*1024] = "\0";
+ char buf1[80*1024] = "\0";
+ char buf2[80*1024] = "\0";
+ char buf3[80*1024] = "\0";
+
+ strcat(total, r1->raw);
+ strcat(total, r2->raw);
+ strcat(total, r3->raw);
+
+ size_t read;
+
+ int total_len = strlen(total);
+
+ int total_ops = 2 * (total_len - 1) * (total_len - 2) / 2;
+ int ops = 0 ;
+
+ size_t buf1_len, buf2_len, buf3_len;
+ int message_count = count_parsed_messages(3, r1, r2, r3);
+
+ int i,j,type_both;
+ for (type_both = 0; type_both < 2; type_both ++ ) {
+ for (j = 2; j < total_len; j ++ ) {
+ for (i = 1; i < j; i ++ ) {
+
+ if (ops % 1000 == 0) {
+ printf("\b\b\b\b%3.0f%%", 100 * (float)ops /(float)total_ops);
+ fflush(stdout);
+ }
+ ops += 1;
+
+ parser_init(type_both ? HTTP_BOTH : r1->type);
+
+ buf1_len = i;
+ strlncpy(buf1, sizeof(buf1), total, buf1_len);
+ buf1[buf1_len] = 0;
+
+ buf2_len = j - i;
+ strlncpy(buf2, sizeof(buf1), total+i, buf2_len);
+ buf2[buf2_len] = 0;
+
+ buf3_len = total_len - j;
+ strlncpy(buf3, sizeof(buf1), total+j, buf3_len);
+ buf3[buf3_len] = 0;
+
+ assert(num_messages == 0);
+ messages[0].headers_complete_cb_called = FALSE;
+
+ read = parse(buf1, buf1_len);
+
+ if (!messages[0].headers_complete_cb_called && parser.nread != read) {
+ print_error(buf1, read);
+ goto error;
+ }
+
+ if (parser.upgrade) goto test;
+
+ if (read != buf1_len) {
+ print_error(buf1, read);
+ goto error;
+ }
+
+ read += parse(buf2, buf2_len);
+
+ if (parser.upgrade) goto test;
+
+ if (read != buf1_len + buf2_len) {
+ print_error(buf2, read);
+ goto error;
+ }
+
+ read += parse(buf3, buf3_len);
+
+ if (parser.upgrade) goto test;
+
+ if (read != buf1_len + buf2_len + buf3_len) {
+ print_error(buf3, read);
+ goto error;
+ }
+
+ parse(NULL, 0);
+
+test:
+ if (parser.upgrade) {
+ upgrade_message_fix(total, read, 3, r1, r2, r3);
+ }
+
+ if (message_count != num_messages) {
+ fprintf(stderr, "\n\nParser didn't see %d messages only %d\n",
+ message_count, num_messages);
+ goto error;
+ }
+
+ if (!message_eq(0, 0, r1)) {
+ fprintf(stderr, "\n\nError matching messages[0] in test_scan.\n");
+ goto error;
+ }
+
+ if (message_count > 1 && !message_eq(1, 0, r2)) {
+ fprintf(stderr, "\n\nError matching messages[1] in test_scan.\n");
+ goto error;
+ }
+
+ if (message_count > 2 && !message_eq(2, 0, r3)) {
+ fprintf(stderr, "\n\nError matching messages[2] in test_scan.\n");
+ goto error;
+ }
+ }
+ }
+ }
+ puts("\b\b\b\b100%");
+ return;
+
+ error:
+ fprintf(stderr, "i=%d j=%d\n", i, j);
+ fprintf(stderr, "buf1 (%u) %s\n\n", (unsigned int)buf1_len, buf1);
+ fprintf(stderr, "buf2 (%u) %s\n\n", (unsigned int)buf2_len , buf2);
+ fprintf(stderr, "buf3 (%u) %s\n", (unsigned int)buf3_len, buf3);
+ abort();
+}
+
+// user required to free the result
+// string terminated by \0
+char *
+create_large_chunked_message (int body_size_in_kb, const char* headers)
+{
+ int i;
+ size_t wrote = 0;
+ size_t headers_len = strlen(headers);
+ size_t bufsize = headers_len + (5+1024+2)*body_size_in_kb + 6;
+ char * buf = malloc(bufsize);
+
+ memcpy(buf, headers, headers_len);
+ wrote += headers_len;
+
+ for (i = 0; i < body_size_in_kb; i++) {
+ // write 1kb chunk into the body.
+ memcpy(buf + wrote, "400\r\n", 5);
+ wrote += 5;
+ memset(buf + wrote, 'C', 1024);
+ wrote += 1024;
+ strcpy(buf + wrote, "\r\n");
+ wrote += 2;
+ }
+
+ memcpy(buf + wrote, "0\r\n\r\n", 6);
+ wrote += 6;
+ assert(wrote == bufsize);
+
+ return buf;
+}
+
+/* Verify that we can pause parsing at any of the bytes in the
+ * message and still get the result that we're expecting. */
+void
+test_message_pause (const struct message *msg)
+{
+ char *buf = (char*) msg->raw;
+ size_t buflen = strlen(msg->raw);
+ size_t nread;
+
+ parser_init(msg->type);
+
+ do {
+ nread = parse_pause(buf, buflen);
+
+ // We can only set the upgrade buffer once we've gotten our message
+ // completion callback.
+ if (messages[0].message_complete_cb_called &&
+ msg->upgrade &&
+ parser.upgrade) {
+ messages[0].upgrade = buf + nread;
+ goto test;
+ }
+
+ if (nread < buflen) {
+
+ // Not much do to if we failed a strict-mode check
+ if (HTTP_PARSER_ERRNO(&parser) == HPE_STRICT) {
+ return;
+ }
+
+ assert (HTTP_PARSER_ERRNO(&parser) == HPE_PAUSED);
+ }
+
+ buf += nread;
+ buflen -= nread;
+ http_parser_pause(&parser, 0);
+ } while (buflen > 0);
+
+ nread = parse_pause(NULL, 0);
+ assert (nread == 0);
+
+test:
+ if (num_messages != 1) {
+ printf("\n*** num_messages != 1 after testing '%s' ***\n\n", msg->name);
+ abort();
+ }
+
+ if(!message_eq(0, 0, msg)) abort();
+}
+
+/* Verify that body and next message won't be parsed in responses to CONNECT */
+void
+test_message_connect (const struct message *msg)
+{
+ char *buf = (char*) msg->raw;
+ size_t buflen = strlen(msg->raw);
+
+ parser_init(msg->type);
+
+ parse_connect(buf, buflen);
+
+ if (num_messages != 1) {
+ printf("\n*** num_messages != 1 after testing '%s' ***\n\n", msg->name);
+ abort();
+ }
+
+ if(!message_eq(0, 1, msg)) abort();
+}
+
+int
+main (void)
+{
+ unsigned i, j, k;
+ unsigned long version;
+ unsigned major;
+ unsigned minor;
+ unsigned patch;
+
+ version = http_parser_version();
+ major = (version >> 16) & 255;
+ minor = (version >> 8) & 255;
+ patch = version & 255;
+ printf("http_parser v%u.%u.%u (0x%06lx)\n", major, minor, patch, version);
+
+ printf("sizeof(http_parser) = %u\n", (unsigned int)sizeof(http_parser));
+ assert(sizeof(http_parser) == 4 + 4 + 8 + 2 + 2 + 4 + sizeof(void *));
+
+ //// API
+ test_preserve_data();
+ test_parse_url();
+ test_method_str();
+ test_status_str();
+
+ //// NREAD
+ test_header_nread_value();
+
+ //// OVERFLOW CONDITIONS
+ test_no_overflow_parse_url();
+
+ test_header_overflow_error(HTTP_REQUEST);
+ test_no_overflow_long_body(HTTP_REQUEST, 1000);
+ test_no_overflow_long_body(HTTP_REQUEST, 100000);
+
+ test_header_overflow_error(HTTP_RESPONSE);
+ test_no_overflow_long_body(HTTP_RESPONSE, 1000);
+ test_no_overflow_long_body(HTTP_RESPONSE, 100000);
+
+ test_header_content_length_overflow_error();
+ test_chunk_content_length_overflow_error();
+
+ //// HEADER FIELD CONDITIONS
+ test_double_content_length_error(HTTP_REQUEST);
+ test_chunked_content_length_error(HTTP_REQUEST);
+ test_header_cr_no_lf_error(HTTP_REQUEST);
+ test_invalid_header_field_token_error(HTTP_REQUEST);
+ test_invalid_header_field_content_error(HTTP_REQUEST);
+ test_double_content_length_error(HTTP_RESPONSE);
+ test_chunked_content_length_error(HTTP_RESPONSE);
+ test_header_cr_no_lf_error(HTTP_RESPONSE);
+ test_invalid_header_field_token_error(HTTP_RESPONSE);
+ test_invalid_header_field_content_error(HTTP_RESPONSE);
+
+ test_simple_type(
+ "POST / HTTP/1.1\r\n"
+ "Content-Length:\r\n" // empty
+ "\r\n",
+ HPE_INVALID_CONTENT_LENGTH,
+ HTTP_REQUEST);
+
+ test_simple_type(
+ "POST / HTTP/1.1\r\n"
+ "Content-Length: 42 \r\n" // Note the surrounding whitespace.
+ "\r\n",
+ HPE_OK,
+ HTTP_REQUEST);
+
+ test_simple_type(
+ "POST / HTTP/1.1\r\n"
+ "Content-Length: 4 2\r\n"
+ "\r\n",
+ HPE_INVALID_CONTENT_LENGTH,
+ HTTP_REQUEST);
+
+ test_simple_type(
+ "POST / HTTP/1.1\r\n"
+ "Content-Length: 13 37\r\n"
+ "\r\n",
+ HPE_INVALID_CONTENT_LENGTH,
+ HTTP_REQUEST);
+
+ test_simple_type(
+ "POST / HTTP/1.1\r\n"
+ "Content-Length: 42\r\n"
+ " Hello world!\r\n",
+ HPE_INVALID_CONTENT_LENGTH,
+ HTTP_REQUEST);
+
+ test_simple_type(
+ "POST / HTTP/1.1\r\n"
+ "Content-Length: 42\r\n"
+ " \r\n",
+ HPE_OK,
+ HTTP_REQUEST);
+
+ //// RESPONSES
+
+ test_simple_type("HTP/1.1 200 OK\r\n\r\n", HPE_INVALID_VERSION, HTTP_RESPONSE);
+ test_simple_type("HTTP/01.1 200 OK\r\n\r\n", HPE_INVALID_VERSION, HTTP_RESPONSE);
+ test_simple_type("HTTP/11.1 200 OK\r\n\r\n", HPE_INVALID_VERSION, HTTP_RESPONSE);
+ test_simple_type("HTTP/1.01 200 OK\r\n\r\n", HPE_INVALID_VERSION, HTTP_RESPONSE);
+ test_simple_type("HTTP/1.1\t200 OK\r\n\r\n", HPE_INVALID_VERSION, HTTP_RESPONSE);
+ test_simple_type("\rHTTP/1.1\t200 OK\r\n\r\n", HPE_INVALID_VERSION, HTTP_RESPONSE);
+
+ for (i = 0; i < ARRAY_SIZE(responses); i++) {
+ test_message(&responses[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(responses); i++) {
+ test_message_pause(&responses[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(responses); i++) {
+ test_message_connect(&responses[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(responses); i++) {
+ if (!responses[i].should_keep_alive) continue;
+ for (j = 0; j < ARRAY_SIZE(responses); j++) {
+ if (!responses[j].should_keep_alive) continue;
+ for (k = 0; k < ARRAY_SIZE(responses); k++) {
+ test_multiple3(&responses[i], &responses[j], &responses[k]);
+ }
+ }
+ }
+
+ test_message_count_body(&responses[NO_HEADERS_NO_BODY_404]);
+ test_message_count_body(&responses[TRAILING_SPACE_ON_CHUNKED_BODY]);
+
+ // test very large chunked response
+ {
+ char * msg = create_large_chunked_message(31337,
+ "HTTP/1.0 200 OK\r\n"
+ "Transfer-Encoding: chunked\r\n"
+ "Content-Type: text/plain\r\n"
+ "\r\n");
+ struct message large_chunked =
+ {.name= "large chunked"
+ ,.type= HTTP_RESPONSE
+ ,.raw= msg
+ ,.should_keep_alive= FALSE
+ ,.message_complete_on_eof= FALSE
+ ,.http_major= 1
+ ,.http_minor= 0
+ ,.status_code= 200
+ ,.response_status= "OK"
+ ,.num_headers= 2
+ ,.headers=
+ { { "Transfer-Encoding", "chunked" }
+ , { "Content-Type", "text/plain" }
+ }
+ ,.body_size= 31337*1024
+ ,.num_chunks_complete= 31338
+ };
+ for (i = 0; i < MAX_CHUNKS; i++) {
+ large_chunked.chunk_lengths[i] = 1024;
+ }
+ test_message_count_body(&large_chunked);
+ free(msg);
+ }
+
+
+
+ printf("response scan 1/2 ");
+ test_scan( &responses[TRAILING_SPACE_ON_CHUNKED_BODY]
+ , &responses[NO_BODY_HTTP10_KA_204]
+ , &responses[NO_REASON_PHRASE]
+ );
+
+ printf("response scan 2/2 ");
+ test_scan( &responses[BONJOUR_MADAME_FR]
+ , &responses[UNDERSTORE_HEADER_KEY]
+ , &responses[NO_CARRIAGE_RET]
+ );
+
+ puts("responses okay");
+
+
+ /// REQUESTS
+
+ test_simple("GET / IHTTP/1.0\r\n\r\n", HPE_INVALID_CONSTANT);
+ test_simple("GET / ICE/1.0\r\n\r\n", HPE_INVALID_CONSTANT);
+ test_simple("GET / HTP/1.1\r\n\r\n", HPE_INVALID_VERSION);
+ test_simple("GET / HTTP/01.1\r\n\r\n", HPE_INVALID_VERSION);
+ test_simple("GET / HTTP/11.1\r\n\r\n", HPE_INVALID_VERSION);
+ test_simple("GET / HTTP/1.01\r\n\r\n", HPE_INVALID_VERSION);
+
+ test_simple("GET / HTTP/1.0\r\nHello: w\1rld\r\n\r\n", HPE_INVALID_HEADER_TOKEN);
+ test_simple("GET / HTTP/1.0\r\nHello: woooo\2rld\r\n\r\n", HPE_INVALID_HEADER_TOKEN);
+
+ // Extended characters - see nodejs/test/parallel/test-http-headers-obstext.js
+ test_simple("GET / HTTP/1.1\r\n"
+ "Test: Düsseldorf\r\n",
+ HPE_OK);
+
+ // Well-formed but incomplete
+ test_simple("GET / HTTP/1.1\r\n"
+ "Content-Type: text/plain\r\n"
+ "Content-Length: 6\r\n"
+ "\r\n"
+ "fooba",
+ HPE_OK);
+
+ // Unknown Transfer-Encoding in request
+ test_simple("GET / HTTP/1.1\r\n"
+ "Transfer-Encoding: unknown\r\n"
+ "\r\n",
+ HPE_INVALID_TRANSFER_ENCODING);
+
+ static const char *all_methods[] = {
+ "DELETE",
+ "GET",
+ "HEAD",
+ "POST",
+ "PUT",
+ //"CONNECT", //CONNECT can't be tested like other methods, it's a tunnel
+ "OPTIONS",
+ "TRACE",
+ "COPY",
+ "LOCK",
+ "MKCOL",
+ "MOVE",
+ "PROPFIND",
+ "PROPPATCH",
+ "SEARCH",
+ "UNLOCK",
+ "BIND",
+ "REBIND",
+ "UNBIND",
+ "ACL",
+ "REPORT",
+ "MKACTIVITY",
+ "CHECKOUT",
+ "MERGE",
+ "M-SEARCH",
+ "NOTIFY",
+ "SUBSCRIBE",
+ "UNSUBSCRIBE",
+ "PATCH",
+ "PURGE",
+ "MKCALENDAR",
+ "LINK",
+ "UNLINK",
+ 0 };
+ const char **this_method;
+ for (this_method = all_methods; *this_method; this_method++) {
+ char buf[200];
+ sprintf(buf, "%s / HTTP/1.1\r\n\r\n", *this_method);
+ test_simple(buf, HPE_OK);
+ }
+
+ static const char *bad_methods[] = {
+ "ASDF",
+ "C******",
+ "COLA",
+ "GEM",
+ "GETA",
+ "M****",
+ "MKCOLA",
+ "PROPPATCHA",
+ "PUN",
+ "PX",
+ "SA",
+ "hello world",
+ 0 };
+ for (this_method = bad_methods; *this_method; this_method++) {
+ char buf[200];
+ sprintf(buf, "%s / HTTP/1.1\r\n\r\n", *this_method);
+ test_simple(buf, HPE_INVALID_METHOD);
+ }
+
+ // illegal header field name line folding
+ test_simple("GET / HTTP/1.1\r\n"
+ "name\r\n"
+ " : value\r\n"
+ "\r\n",
+ HPE_INVALID_HEADER_TOKEN);
+
+ const char *dumbluck2 =
+ "GET / HTTP/1.1\r\n"
+ "X-SSL-Nonsense: -----BEGIN CERTIFICATE-----\r\n"
+ "\tMIIFbTCCBFWgAwIBAgICH4cwDQYJKoZIhvcNAQEFBQAwcDELMAkGA1UEBhMCVUsx\r\n"
+ "\tETAPBgNVBAoTCGVTY2llbmNlMRIwEAYDVQQLEwlBdXRob3JpdHkxCzAJBgNVBAMT\r\n"
+ "\tAkNBMS0wKwYJKoZIhvcNAQkBFh5jYS1vcGVyYXRvckBncmlkLXN1cHBvcnQuYWMu\r\n"
+ "\tdWswHhcNMDYwNzI3MTQxMzI4WhcNMDcwNzI3MTQxMzI4WjBbMQswCQYDVQQGEwJV\r\n"
+ "\tSzERMA8GA1UEChMIZVNjaWVuY2UxEzARBgNVBAsTCk1hbmNoZXN0ZXIxCzAJBgNV\r\n"
+ "\tBAcTmrsogriqMWLAk1DMRcwFQYDVQQDEw5taWNoYWVsIHBhcmQYJKoZIhvcNAQEB\r\n"
+ "\tBQADggEPADCCAQoCggEBANPEQBgl1IaKdSS1TbhF3hEXSl72G9J+WC/1R64fAcEF\r\n"
+ "\tW51rEyFYiIeZGx/BVzwXbeBoNUK41OK65sxGuflMo5gLflbwJtHBRIEKAfVVp3YR\r\n"
+ "\tgW7cMA/s/XKgL1GEC7rQw8lIZT8RApukCGqOVHSi/F1SiFlPDxuDfmdiNzL31+sL\r\n"
+ "\t0iwHDdNkGjy5pyBSB8Y79dsSJtCW/iaLB0/n8Sj7HgvvZJ7x0fr+RQjYOUUfrePP\r\n"
+ "\tu2MSpFyf+9BbC/aXgaZuiCvSR+8Snv3xApQY+fULK/xY8h8Ua51iXoQ5jrgu2SqR\r\n"
+ "\twgA7BUi3G8LFzMBl8FRCDYGUDy7M6QaHXx1ZWIPWNKsCAwEAAaOCAiQwggIgMAwG\r\n"
+ "\tA1UdEwEB/wQCMAAwEQYJYIZIAYb4QgHTTPAQDAgWgMA4GA1UdDwEB/wQEAwID6DAs\r\n"
+ "\tBglghkgBhvhCAQ0EHxYdVUsgZS1TY2llbmNlIFVzZXIgQ2VydGlmaWNhdGUwHQYD\r\n"
+ "\tVR0OBBYEFDTt/sf9PeMaZDHkUIldrDYMNTBZMIGaBgNVHSMEgZIwgY+AFAI4qxGj\r\n"
+ "\tloCLDdMVKwiljjDastqooXSkcjBwMQswCQYDVQQGEwJVSzERMA8GA1UEChMIZVNj\r\n"
+ "\taWVuY2UxEjAQBgNVBAsTCUF1dGhvcml0eTELMAkGA1UEAxMCQ0ExLTArBgkqhkiG\r\n"
+ "\t9w0BCQEWHmNhLW9wZXJhdG9yQGdyaWQtc3VwcG9ydC5hYy51a4IBADApBgNVHRIE\r\n"
+ "\tIjAggR5jYS1vcGVyYXRvckBncmlkLXN1cHBvcnQuYWMudWswGQYDVR0gBBIwEDAO\r\n"
+ "\tBgwrBgEEAdkvAQEBAQYwPQYJYIZIAYb4QgEEBDAWLmh0dHA6Ly9jYS5ncmlkLXN1\r\n"
+ "\tcHBvcnQuYWMudmT4sopwqlBWsvcHViL2NybC9jYWNybC5jcmwwPQYJYIZIAYb4QgEDBDAWLmh0\r\n"
+ "\tdHA6Ly9jYS5ncmlkLXN1cHBvcnQuYWMudWsvcHViL2NybC9jYWNybC5jcmwwPwYD\r\n"
+ "\tVR0fBDgwNjA0oDKgMIYuaHR0cDovL2NhLmdyaWQt5hYy51ay9wdWIv\r\n"
+ "\tY3JsL2NhY3JsLmNybDANBgkqhkiG9w0BAQUFAAOCAQEAS/U4iiooBENGW/Hwmmd3\r\n"
+ "\tXCy6Zrt08YjKCzGNjorT98g8uGsqYjSxv/hmi0qlnlHs+k/3Iobc3LjS5AMYr5L8\r\n"
+ "\tUO7OSkgFFlLHQyC9JzPfmLCAugvzEbyv4Olnsr8hbxF1MbKZoQxUZtMVu29wjfXk\r\n"
+ "\thTeApBv7eaKCWpSp7MCbvgzm74izKhu3vlDk9w6qVrxePfGgpKPqfHiOoGhFnbTK\r\n"
+ "\twTC6o2xq5y0qZ03JonF7OJspEd3I5zKY3E+ov7/ZhW6DqT8UFvsAdjvQbXyhV8Eu\r\n"
+ "\tYhixw1aKEPzNjNowuIseVogKOLXxWI5vAi5HgXdS0/ES5gDGsABo4fqovUKlgop3\r\n"
+ "\tRA==\r\n"
+ "\t-----END CERTIFICATE-----\r\n"
+ "\r\n";
+ test_simple(dumbluck2, HPE_OK);
+
+ const char *corrupted_connection =
+ "GET / HTTP/1.1\r\n"
+ "Host: www.example.com\r\n"
+ "Connection\r\033\065\325eep-Alive\r\n"
+ "Accept-Encoding: gzip\r\n"
+ "\r\n";
+ test_simple(corrupted_connection, HPE_INVALID_HEADER_TOKEN);
+
+ const char *corrupted_header_name =
+ "GET / HTTP/1.1\r\n"
+ "Host: www.example.com\r\n"
+ "X-Some-Header\r\033\065\325eep-Alive\r\n"
+ "Accept-Encoding: gzip\r\n"
+ "\r\n";
+ test_simple(corrupted_header_name, HPE_INVALID_HEADER_TOKEN);
+
+#if 0
+ // NOTE(Wed Nov 18 11:57:27 CET 2009) this seems okay. we just read body
+ // until EOF.
+ //
+ // no content-length
+ // error if there is a body without content length
+ const char *bad_get_no_headers_no_body = "GET /bad_get_no_headers_no_body/world HTTP/1.1\r\n"
+ "Accept: */*\r\n"
+ "\r\n"
+ "HELLO";
+ test_simple(bad_get_no_headers_no_body, 0);
+#endif
+ /* TODO sending junk and large headers gets rejected */
+
+
+ /* check to make sure our predefined requests are okay */
+ for (i = 0; i < ARRAY_SIZE(requests); i++) {
+ test_message(&requests[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(requests); i++) {
+ test_message_pause(&requests[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(requests); i++) {
+ if (!requests[i].should_keep_alive) continue;
+ for (j = 0; j < ARRAY_SIZE(requests); j++) {
+ if (!requests[j].should_keep_alive) continue;
+ for (k = 0; k < ARRAY_SIZE(requests); k++) {
+ test_multiple3(&requests[i], &requests[j], &requests[k]);
+ }
+ }
+ }
+
+ printf("request scan 1/4 ");
+ test_scan( &requests[GET_NO_HEADERS_NO_BODY]
+ , &requests[GET_ONE_HEADER_NO_BODY]
+ , &requests[GET_NO_HEADERS_NO_BODY]
+ );
+
+ printf("request scan 2/4 ");
+ test_scan( &requests[POST_CHUNKED_ALL_YOUR_BASE]
+ , &requests[POST_IDENTITY_BODY_WORLD]
+ , &requests[GET_FUNKY_CONTENT_LENGTH]
+ );
+
+ printf("request scan 3/4 ");
+ test_scan( &requests[TWO_CHUNKS_MULT_ZERO_END]
+ , &requests[CHUNKED_W_TRAILING_HEADERS]
+ , &requests[CHUNKED_W_NONSENSE_AFTER_LENGTH]
+ );
+
+ printf("request scan 4/4 ");
+ test_scan( &requests[QUERY_URL_WITH_QUESTION_MARK_GET]
+ , &requests[PREFIX_NEWLINE_GET ]
+ , &requests[CONNECT_REQUEST]
+ );
+
+ puts("requests okay");
+
+ return 0;
+}
diff --git a/third_party/python/ansicon/ansicon-1.89.0.dist-info/LICENSE.txt b/third_party/python/ansicon/ansicon-1.89.0.dist-info/LICENSE.txt
new file mode 100644
index 0000000000..a612ad9813
--- /dev/null
+++ b/third_party/python/ansicon/ansicon-1.89.0.dist-info/LICENSE.txt
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/third_party/python/ansicon/ansicon-1.89.0.dist-info/METADATA b/third_party/python/ansicon/ansicon-1.89.0.dist-info/METADATA
new file mode 100644
index 0000000000..2cf0615fb7
--- /dev/null
+++ b/third_party/python/ansicon/ansicon-1.89.0.dist-info/METADATA
@@ -0,0 +1,2 @@
+Name: ansicon
+Version: 1.89.0
diff --git a/third_party/python/ansicon/ansicon-1.89.0.dist-info/RECORD b/third_party/python/ansicon/ansicon-1.89.0.dist-info/RECORD
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/ansicon/ansicon-1.89.0.dist-info/RECORD
diff --git a/third_party/python/ansicon/ansicon-1.89.0.dist-info/WHEEL b/third_party/python/ansicon/ansicon-1.89.0.dist-info/WHEEL
new file mode 100644
index 0000000000..1316c41d07
--- /dev/null
+++ b/third_party/python/ansicon/ansicon-1.89.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.31.1)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/ansicon/ansicon-1.89.0.dist-info/top_level.txt b/third_party/python/ansicon/ansicon-1.89.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..f32baf242b
--- /dev/null
+++ b/third_party/python/ansicon/ansicon-1.89.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+ansicon
diff --git a/third_party/python/ansicon/ansicon/__init__.py b/third_party/python/ansicon/ansicon/__init__.py
new file mode 100644
index 0000000000..f56c6cfcdf
--- /dev/null
+++ b/third_party/python/ansicon/ansicon/__init__.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+class Dummy(object):
+
+ def __init__(self):
+ pass
+
+ def load(self):
+ pass
+
+
+_ANSICON = Dummy()
+load = _ANSICON.load
diff --git a/third_party/python/appdirs/appdirs-1.4.4.dist-info/LICENSE.txt b/third_party/python/appdirs/appdirs-1.4.4.dist-info/LICENSE.txt
new file mode 100644
index 0000000000..107c61405e
--- /dev/null
+++ b/third_party/python/appdirs/appdirs-1.4.4.dist-info/LICENSE.txt
@@ -0,0 +1,23 @@
+# This is the MIT license
+
+Copyright (c) 2010 ActiveState Software Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/third_party/python/appdirs/appdirs-1.4.4.dist-info/METADATA b/third_party/python/appdirs/appdirs-1.4.4.dist-info/METADATA
new file mode 100644
index 0000000000..f950731044
--- /dev/null
+++ b/third_party/python/appdirs/appdirs-1.4.4.dist-info/METADATA
@@ -0,0 +1,264 @@
+Metadata-Version: 2.1
+Name: appdirs
+Version: 1.4.4
+Summary: A small Python module for determining appropriate platform-specific dirs, e.g. a "user data dir".
+Home-page: http://github.com/ActiveState/appdirs
+Author: Trent Mick
+Author-email: trentm@gmail.com
+Maintainer: Jeff Rouse
+Maintainer-email: jr@its.to
+License: MIT
+Keywords: application directory log cache user
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+
+
+.. image:: https://secure.travis-ci.org/ActiveState/appdirs.png
+ :target: http://travis-ci.org/ActiveState/appdirs
+
+the problem
+===========
+
+What directory should your app use for storing user data? If running on Mac OS X, you
+should use::
+
+ ~/Library/Application Support/<AppName>
+
+If on Windows (at least English Win XP) that should be::
+
+ C:\Documents and Settings\<User>\Application Data\Local Settings\<AppAuthor>\<AppName>
+
+or possibly::
+
+ C:\Documents and Settings\<User>\Application Data\<AppAuthor>\<AppName>
+
+for `roaming profiles <http://bit.ly/9yl3b6>`_ but that is another story.
+
+On Linux (and other Unices) the dir, according to the `XDG
+spec <http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_, is::
+
+ ~/.local/share/<AppName>
+
+
+``appdirs`` to the rescue
+=========================
+
+This kind of thing is what the ``appdirs`` module is for. ``appdirs`` will
+help you choose an appropriate:
+
+- user data dir (``user_data_dir``)
+- user config dir (``user_config_dir``)
+- user cache dir (``user_cache_dir``)
+- site data dir (``site_data_dir``)
+- site config dir (``site_config_dir``)
+- user log dir (``user_log_dir``)
+
+and also:
+
+- is a single module so other Python packages can include their own private copy
+- is slightly opinionated on the directory names used. Look for "OPINION" in
+ documentation and code for when an opinion is being applied.
+
+
+some example output
+===================
+
+On Mac OS X::
+
+ >>> from appdirs import *
+ >>> appname = "SuperApp"
+ >>> appauthor = "Acme"
+ >>> user_data_dir(appname, appauthor)
+ '/Users/trentm/Library/Application Support/SuperApp'
+ >>> site_data_dir(appname, appauthor)
+ '/Library/Application Support/SuperApp'
+ >>> user_cache_dir(appname, appauthor)
+ '/Users/trentm/Library/Caches/SuperApp'
+ >>> user_log_dir(appname, appauthor)
+ '/Users/trentm/Library/Logs/SuperApp'
+
+On Windows 7::
+
+ >>> from appdirs import *
+ >>> appname = "SuperApp"
+ >>> appauthor = "Acme"
+ >>> user_data_dir(appname, appauthor)
+ 'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp'
+ >>> user_data_dir(appname, appauthor, roaming=True)
+ 'C:\\Users\\trentm\\AppData\\Roaming\\Acme\\SuperApp'
+ >>> user_cache_dir(appname, appauthor)
+ 'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp\\Cache'
+ >>> user_log_dir(appname, appauthor)
+ 'C:\\Users\\trentm\\AppData\\Local\\Acme\\SuperApp\\Logs'
+
+On Linux::
+
+ >>> from appdirs import *
+ >>> appname = "SuperApp"
+ >>> appauthor = "Acme"
+ >>> user_data_dir(appname, appauthor)
+ '/home/trentm/.local/share/SuperApp
+ >>> site_data_dir(appname, appauthor)
+ '/usr/local/share/SuperApp'
+ >>> site_data_dir(appname, appauthor, multipath=True)
+ '/usr/local/share/SuperApp:/usr/share/SuperApp'
+ >>> user_cache_dir(appname, appauthor)
+ '/home/trentm/.cache/SuperApp'
+ >>> user_log_dir(appname, appauthor)
+ '/home/trentm/.cache/SuperApp/log'
+ >>> user_config_dir(appname)
+ '/home/trentm/.config/SuperApp'
+ >>> site_config_dir(appname)
+ '/etc/xdg/SuperApp'
+ >>> os.environ['XDG_CONFIG_DIRS'] = '/etc:/usr/local/etc'
+ >>> site_config_dir(appname, multipath=True)
+ '/etc/SuperApp:/usr/local/etc/SuperApp'
+
+
+``AppDirs`` for convenience
+===========================
+
+::
+
+ >>> from appdirs import AppDirs
+ >>> dirs = AppDirs("SuperApp", "Acme")
+ >>> dirs.user_data_dir
+ '/Users/trentm/Library/Application Support/SuperApp'
+ >>> dirs.site_data_dir
+ '/Library/Application Support/SuperApp'
+ >>> dirs.user_cache_dir
+ '/Users/trentm/Library/Caches/SuperApp'
+ >>> dirs.user_log_dir
+ '/Users/trentm/Library/Logs/SuperApp'
+
+
+
+Per-version isolation
+=====================
+
+If you have multiple versions of your app in use that you want to be
+able to run side-by-side, then you may want version-isolation for these
+dirs::
+
+ >>> from appdirs import AppDirs
+ >>> dirs = AppDirs("SuperApp", "Acme", version="1.0")
+ >>> dirs.user_data_dir
+ '/Users/trentm/Library/Application Support/SuperApp/1.0'
+ >>> dirs.site_data_dir
+ '/Library/Application Support/SuperApp/1.0'
+ >>> dirs.user_cache_dir
+ '/Users/trentm/Library/Caches/SuperApp/1.0'
+ >>> dirs.user_log_dir
+ '/Users/trentm/Library/Logs/SuperApp/1.0'
+
+
+
+appdirs Changelog
+=================
+
+appdirs 1.4.4
+-------------
+- [PR #92] Don't import appdirs from setup.py
+
+Project officially classified as Stable which is important
+for inclusion in other distros such as ActivePython.
+
+First of several incremental releases to catch up on maintenance.
+
+appdirs 1.4.3
+-------------
+- [PR #76] Python 3.6 invalid escape sequence deprecation fixes
+- Fix for Python 3.6 support
+
+appdirs 1.4.2
+-------------
+- [PR #84] Allow installing without setuptools
+- [PR #86] Fix string delimiters in setup.py description
+- Add Python 3.6 support
+
+appdirs 1.4.1
+-------------
+- [issue #38] Fix _winreg import on Windows Py3
+- [issue #55] Make appname optional
+
+appdirs 1.4.0
+-------------
+- [PR #42] AppAuthor is now optional on Windows
+- [issue 41] Support Jython on Windows, Mac, and Unix-like platforms. Windows
+ support requires `JNA <https://github.com/twall/jna>`_.
+- [PR #44] Fix incorrect behaviour of the site_config_dir method
+
+appdirs 1.3.0
+-------------
+- [Unix, issue 16] Conform to XDG standard, instead of breaking it for
+ everybody
+- [Unix] Removes gratuitous case mangling of the case, since \*nix-es are
+ usually case sensitive, so mangling is not wise
+- [Unix] Fixes the utterly wrong behaviour in ``site_data_dir``, return result
+ based on XDG_DATA_DIRS and make room for respecting the standard which
+ specifies XDG_DATA_DIRS is a multiple-value variable
+- [Issue 6] Add ``*_config_dir`` which are distinct on nix-es, according to
+ XDG specs; on Windows and Mac return the corresponding ``*_data_dir``
+
+appdirs 1.2.0
+-------------
+
+- [Unix] Put ``user_log_dir`` under the *cache* dir on Unix. Seems to be more
+ typical.
+- [issue 9] Make ``unicode`` work on py3k.
+
+appdirs 1.1.0
+-------------
+
+- [issue 4] Add ``AppDirs.user_log_dir``.
+- [Unix, issue 2, issue 7] appdirs now conforms to `XDG base directory spec
+ <http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_.
+- [Mac, issue 5] Fix ``site_data_dir()`` on Mac.
+- [Mac] Drop use of 'Carbon' module in favour of hardcoded paths; supports
+ Python3 now.
+- [Windows] Append "Cache" to ``user_cache_dir`` on Windows by default. Use
+ ``opinion=False`` option to disable this.
+- Add ``appdirs.AppDirs`` convenience class. Usage:
+
+ >>> dirs = AppDirs("SuperApp", "Acme", version="1.0")
+ >>> dirs.user_data_dir
+ '/Users/trentm/Library/Application Support/SuperApp/1.0'
+
+- [Windows] Cherry-pick Komodo's change to downgrade paths to the Windows short
+ paths if there are high bit chars.
+- [Linux] Change default ``user_cache_dir()`` on Linux to be singular, e.g.
+ "~/.superapp/cache".
+- [Windows] Add ``roaming`` option to ``user_data_dir()`` (for use on Windows only)
+ and change the default ``user_data_dir`` behaviour to use a *non*-roaming
+ profile dir (``CSIDL_LOCAL_APPDATA`` instead of ``CSIDL_APPDATA``). Why? Because
+ a large roaming profile can cause login speed issues. The "only syncs on
+ logout" behaviour can cause surprises in appdata info.
+
+
+appdirs 1.0.1 (never released)
+------------------------------
+
+Started this changelog 27 July 2010. Before that this module originated in the
+`Komodo <http://www.activestate.com/komodo>`_ product as ``applib.py`` and then
+as `applib/location.py
+<http://github.com/ActiveState/applib/blob/master/applib/location.py>`_ (used by
+`PyPM <http://code.activestate.com/pypm/>`_ in `ActivePython
+<http://www.activestate.com/activepython>`_). This is basically a fork of
+applib.py 1.0.1 and applib/location.py 1.0.1.
+
+
+
diff --git a/third_party/python/appdirs/appdirs-1.4.4.dist-info/RECORD b/third_party/python/appdirs/appdirs-1.4.4.dist-info/RECORD
new file mode 100644
index 0000000000..9cbb30620e
--- /dev/null
+++ b/third_party/python/appdirs/appdirs-1.4.4.dist-info/RECORD
@@ -0,0 +1,6 @@
+appdirs.py,sha256=g99s2sXhnvTEm79oj4bWI0Toapc-_SmKKNXvOXHkVic,24720
+appdirs-1.4.4.dist-info/LICENSE.txt,sha256=Nt200KdFqTqyAyA9cZCBSxuJcn0lTK_0jHp6-71HAAs,1097
+appdirs-1.4.4.dist-info/METADATA,sha256=k5TVfXMNKGHTfp2wm6EJKTuGwGNuoQR5TqQgH8iwG8M,8981
+appdirs-1.4.4.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
+appdirs-1.4.4.dist-info/top_level.txt,sha256=nKncE8CUqZERJ6VuQWL4_bkunSPDNfn7KZqb4Tr5YEM,8
+appdirs-1.4.4.dist-info/RECORD,,
diff --git a/third_party/python/appdirs/appdirs-1.4.4.dist-info/WHEEL b/third_party/python/appdirs/appdirs-1.4.4.dist-info/WHEEL
new file mode 100644
index 0000000000..ef99c6cf32
--- /dev/null
+++ b/third_party/python/appdirs/appdirs-1.4.4.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.34.2)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/appdirs/appdirs-1.4.4.dist-info/top_level.txt b/third_party/python/appdirs/appdirs-1.4.4.dist-info/top_level.txt
new file mode 100644
index 0000000000..d64bc321a1
--- /dev/null
+++ b/third_party/python/appdirs/appdirs-1.4.4.dist-info/top_level.txt
@@ -0,0 +1 @@
+appdirs
diff --git a/third_party/python/appdirs/appdirs.py b/third_party/python/appdirs/appdirs.py
new file mode 100644
index 0000000000..2acd1debeb
--- /dev/null
+++ b/third_party/python/appdirs/appdirs.py
@@ -0,0 +1,608 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2005-2010 ActiveState Software Inc.
+# Copyright (c) 2013 Eddy Petrișor
+
+"""Utilities for determining application-specific dirs.
+
+See <http://github.com/ActiveState/appdirs> for details and usage.
+"""
+# Dev Notes:
+# - MSDN on where to store app data files:
+# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
+# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
+# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
+
+__version__ = "1.4.4"
+__version_info__ = tuple(int(segment) for segment in __version__.split("."))
+
+
+import sys
+import os
+
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+ unicode = str
+
+if sys.platform.startswith('java'):
+ import platform
+ os_name = platform.java_ver()[3][0]
+ if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
+ system = 'win32'
+ elif os_name.startswith('Mac'): # "Mac OS X", etc.
+ system = 'darwin'
+ else: # "Linux", "SunOS", "FreeBSD", etc.
+ # Setting this to "linux2" is not ideal, but only Windows or Mac
+ # are actually checked for and the rest of the module expects
+ # *sys.platform* style strings.
+ system = 'linux2'
+else:
+ system = sys.platform
+
+
+
+def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
+ r"""Return full path to the user-specific data dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "roaming" (boolean, default False) can be set True to use the Windows
+ roaming appdata directory. That means that for users on a Windows
+ network setup for roaming profiles, this user data will be
+ sync'd on login. See
+ <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
+ for a discussion of issues.
+
+ Typical user data directories are:
+ Mac OS X: ~/Library/Application Support/<AppName>
+ Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
+ Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
+ Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
+ Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
+ Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
+
+ For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
+ That means, by default "~/.local/share/<AppName>".
+ """
+ if system == "win32":
+ if appauthor is None:
+ appauthor = appname
+ const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
+ path = os.path.normpath(_get_win_folder(const))
+ if appname:
+ if appauthor is not False:
+ path = os.path.join(path, appauthor, appname)
+ else:
+ path = os.path.join(path, appname)
+ elif system == 'darwin':
+ path = os.path.expanduser('~/Library/Application Support/')
+ if appname:
+ path = os.path.join(path, appname)
+ else:
+ path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
+ r"""Return full path to the user-shared data dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "multipath" is an optional parameter only applicable to *nix
+ which indicates that the entire list of data dirs should be
+ returned. By default, the first item from XDG_DATA_DIRS is
+ returned, or '/usr/local/share/<AppName>',
+ if XDG_DATA_DIRS is not set
+
+ Typical site data directories are:
+ Mac OS X: /Library/Application Support/<AppName>
+ Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
+ Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
+ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
+ Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
+
+ For Unix, this is using the $XDG_DATA_DIRS[0] default.
+
+ WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
+ """
+ if system == "win32":
+ if appauthor is None:
+ appauthor = appname
+ path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
+ if appname:
+ if appauthor is not False:
+ path = os.path.join(path, appauthor, appname)
+ else:
+ path = os.path.join(path, appname)
+ elif system == 'darwin':
+ path = os.path.expanduser('/Library/Application Support')
+ if appname:
+ path = os.path.join(path, appname)
+ else:
+ # XDG default for $XDG_DATA_DIRS
+ # only first, if multipath is False
+ path = os.getenv('XDG_DATA_DIRS',
+ os.pathsep.join(['/usr/local/share', '/usr/share']))
+ pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
+ if appname:
+ if version:
+ appname = os.path.join(appname, version)
+ pathlist = [os.sep.join([x, appname]) for x in pathlist]
+
+ if multipath:
+ path = os.pathsep.join(pathlist)
+ else:
+ path = pathlist[0]
+ return path
+
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
+ r"""Return full path to the user-specific config dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "roaming" (boolean, default False) can be set True to use the Windows
+ roaming appdata directory. That means that for users on a Windows
+ network setup for roaming profiles, this user data will be
+ sync'd on login. See
+ <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
+ for a discussion of issues.
+
+ Typical user config directories are:
+ Mac OS X: same as user_data_dir
+ Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
+ Win *: same as user_data_dir
+
+ For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
+ That means, by default "~/.config/<AppName>".
+ """
+ if system in ["win32", "darwin"]:
+ path = user_data_dir(appname, appauthor, None, roaming)
+ else:
+ path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
+ r"""Return full path to the user-shared data dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "multipath" is an optional parameter only applicable to *nix
+ which indicates that the entire list of config dirs should be
+ returned. By default, the first item from XDG_CONFIG_DIRS is
+ returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
+
+ Typical site config directories are:
+ Mac OS X: same as site_data_dir
+ Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
+ $XDG_CONFIG_DIRS
+ Win *: same as site_data_dir
+ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
+
+ For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
+
+ WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
+ """
+ if system in ["win32", "darwin"]:
+ path = site_data_dir(appname, appauthor)
+ if appname and version:
+ path = os.path.join(path, version)
+ else:
+ # XDG default for $XDG_CONFIG_DIRS
+ # only first, if multipath is False
+ path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
+ pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
+ if appname:
+ if version:
+ appname = os.path.join(appname, version)
+ pathlist = [os.sep.join([x, appname]) for x in pathlist]
+
+ if multipath:
+ path = os.pathsep.join(pathlist)
+ else:
+ path = pathlist[0]
+ return path
+
+
+def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
+ r"""Return full path to the user-specific cache dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "opinion" (boolean) can be False to disable the appending of
+ "Cache" to the base app data dir for Windows. See
+ discussion below.
+
+ Typical user cache directories are:
+ Mac OS X: ~/Library/Caches/<AppName>
+ Unix: ~/.cache/<AppName> (XDG default)
+ Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
+ Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
+
+ On Windows the only suggestion in the MSDN docs is that local settings go in
+ the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
+ app data dir (the default returned by `user_data_dir` above). Apps typically
+ put cache data somewhere *under* the given dir here. Some examples:
+ ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
+ ...\Acme\SuperApp\Cache\1.0
+ OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
+ This can be disabled with the `opinion=False` option.
+ """
+ if system == "win32":
+ if appauthor is None:
+ appauthor = appname
+ path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
+ if appname:
+ if appauthor is not False:
+ path = os.path.join(path, appauthor, appname)
+ else:
+ path = os.path.join(path, appname)
+ if opinion:
+ path = os.path.join(path, "Cache")
+ elif system == 'darwin':
+ path = os.path.expanduser('~/Library/Caches')
+ if appname:
+ path = os.path.join(path, appname)
+ else:
+ path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
+ r"""Return full path to the user-specific state dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "roaming" (boolean, default False) can be set True to use the Windows
+ roaming appdata directory. That means that for users on a Windows
+ network setup for roaming profiles, this user data will be
+ sync'd on login. See
+ <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
+ for a discussion of issues.
+
+ Typical user state directories are:
+ Mac OS X: same as user_data_dir
+ Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
+ Win *: same as user_data_dir
+
+ For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
+ to extend the XDG spec and support $XDG_STATE_HOME.
+
+ That means, by default "~/.local/state/<AppName>".
+ """
+ if system in ["win32", "darwin"]:
+ path = user_data_dir(appname, appauthor, None, roaming)
+ else:
+ path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
+ r"""Return full path to the user-specific log dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "opinion" (boolean) can be False to disable the appending of
+ "Logs" to the base app data dir for Windows, and "log" to the
+ base cache dir for Unix. See discussion below.
+
+ Typical user log directories are:
+ Mac OS X: ~/Library/Logs/<AppName>
+ Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
+ Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
+ Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
+
+ On Windows the only suggestion in the MSDN docs is that local settings
+ go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
+ examples of what some windows apps use for a logs dir.)
+
+ OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
+ value for Windows and appends "log" to the user cache dir for Unix.
+ This can be disabled with the `opinion=False` option.
+ """
+ if system == "darwin":
+ path = os.path.join(
+ os.path.expanduser('~/Library/Logs'),
+ appname)
+ elif system == "win32":
+ path = user_data_dir(appname, appauthor, version)
+ version = False
+ if opinion:
+ path = os.path.join(path, "Logs")
+ else:
+ path = user_cache_dir(appname, appauthor, version)
+ version = False
+ if opinion:
+ path = os.path.join(path, "log")
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+class AppDirs(object):
+ """Convenience wrapper for getting application dirs."""
+ def __init__(self, appname=None, appauthor=None, version=None,
+ roaming=False, multipath=False):
+ self.appname = appname
+ self.appauthor = appauthor
+ self.version = version
+ self.roaming = roaming
+ self.multipath = multipath
+
+ @property
+ def user_data_dir(self):
+ return user_data_dir(self.appname, self.appauthor,
+ version=self.version, roaming=self.roaming)
+
+ @property
+ def site_data_dir(self):
+ return site_data_dir(self.appname, self.appauthor,
+ version=self.version, multipath=self.multipath)
+
+ @property
+ def user_config_dir(self):
+ return user_config_dir(self.appname, self.appauthor,
+ version=self.version, roaming=self.roaming)
+
+ @property
+ def site_config_dir(self):
+ return site_config_dir(self.appname, self.appauthor,
+ version=self.version, multipath=self.multipath)
+
+ @property
+ def user_cache_dir(self):
+ return user_cache_dir(self.appname, self.appauthor,
+ version=self.version)
+
+ @property
+ def user_state_dir(self):
+ return user_state_dir(self.appname, self.appauthor,
+ version=self.version)
+
+ @property
+ def user_log_dir(self):
+ return user_log_dir(self.appname, self.appauthor,
+ version=self.version)
+
+
+#---- internal support stuff
+
+def _get_win_folder_from_registry(csidl_name):
+ """This is a fallback technique at best. I'm not sure if using the
+ registry for this guarantees us the correct answer for all CSIDL_*
+ names.
+ """
+ if PY3:
+ import winreg as _winreg
+ else:
+ import _winreg
+
+ shell_folder_name = {
+ "CSIDL_APPDATA": "AppData",
+ "CSIDL_COMMON_APPDATA": "Common AppData",
+ "CSIDL_LOCAL_APPDATA": "Local AppData",
+ }[csidl_name]
+
+ key = _winreg.OpenKey(
+ _winreg.HKEY_CURRENT_USER,
+ r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
+ )
+ dir, type = _winreg.QueryValueEx(key, shell_folder_name)
+ return dir
+
+
+def _get_win_folder_with_pywin32(csidl_name):
+ from win32com.shell import shellcon, shell
+ dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
+ # Try to make this a unicode path because SHGetFolderPath does
+ # not return unicode strings when there is unicode data in the
+ # path.
+ try:
+ dir = unicode(dir)
+
+ # Downgrade to short path name if have highbit chars. See
+ # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+ has_high_char = False
+ for c in dir:
+ if ord(c) > 255:
+ has_high_char = True
+ break
+ if has_high_char:
+ try:
+ import win32api
+ dir = win32api.GetShortPathName(dir)
+ except ImportError:
+ pass
+ except UnicodeError:
+ pass
+ return dir
+
+
+def _get_win_folder_with_ctypes(csidl_name):
+ import ctypes
+
+ csidl_const = {
+ "CSIDL_APPDATA": 26,
+ "CSIDL_COMMON_APPDATA": 35,
+ "CSIDL_LOCAL_APPDATA": 28,
+ }[csidl_name]
+
+ buf = ctypes.create_unicode_buffer(1024)
+ ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
+
+ # Downgrade to short path name if have highbit chars. See
+ # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+ has_high_char = False
+ for c in buf:
+ if ord(c) > 255:
+ has_high_char = True
+ break
+ if has_high_char:
+ buf2 = ctypes.create_unicode_buffer(1024)
+ if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
+ buf = buf2
+
+ return buf.value
+
+def _get_win_folder_with_jna(csidl_name):
+ import array
+ from com.sun import jna
+ from com.sun.jna.platform import win32
+
+ buf_size = win32.WinDef.MAX_PATH * 2
+ buf = array.zeros('c', buf_size)
+ shell = win32.Shell32.INSTANCE
+ shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
+ dir = jna.Native.toString(buf.tostring()).rstrip("\0")
+
+ # Downgrade to short path name if have highbit chars. See
+ # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+ has_high_char = False
+ for c in dir:
+ if ord(c) > 255:
+ has_high_char = True
+ break
+ if has_high_char:
+ buf = array.zeros('c', buf_size)
+ kernel = win32.Kernel32.INSTANCE
+ if kernel.GetShortPathName(dir, buf, buf_size):
+ dir = jna.Native.toString(buf.tostring()).rstrip("\0")
+
+ return dir
+
+if system == "win32":
+ try:
+ import win32com.shell
+ _get_win_folder = _get_win_folder_with_pywin32
+ except ImportError:
+ try:
+ from ctypes import windll
+ _get_win_folder = _get_win_folder_with_ctypes
+ except ImportError:
+ try:
+ import com.sun.jna
+ _get_win_folder = _get_win_folder_with_jna
+ except ImportError:
+ _get_win_folder = _get_win_folder_from_registry
+
+
+#---- self test code
+
+if __name__ == "__main__":
+ appname = "MyApp"
+ appauthor = "MyCompany"
+
+ props = ("user_data_dir",
+ "user_config_dir",
+ "user_cache_dir",
+ "user_state_dir",
+ "user_log_dir",
+ "site_data_dir",
+ "site_config_dir")
+
+ print("-- app dirs %s --" % __version__)
+
+ print("-- app dirs (with optional 'version')")
+ dirs = AppDirs(appname, appauthor, version="1.0")
+ for prop in props:
+ print("%s: %s" % (prop, getattr(dirs, prop)))
+
+ print("\n-- app dirs (without optional 'version')")
+ dirs = AppDirs(appname, appauthor)
+ for prop in props:
+ print("%s: %s" % (prop, getattr(dirs, prop)))
+
+ print("\n-- app dirs (without optional 'appauthor')")
+ dirs = AppDirs(appname)
+ for prop in props:
+ print("%s: %s" % (prop, getattr(dirs, prop)))
+
+ print("\n-- app dirs (with disabled 'appauthor')")
+ dirs = AppDirs(appname, appauthor=False)
+ for prop in props:
+ print("%s: %s" % (prop, getattr(dirs, prop)))
diff --git a/third_party/python/async_timeout/async_timeout-3.0.1.dist-info/LICENSE b/third_party/python/async_timeout/async_timeout-3.0.1.dist-info/LICENSE
new file mode 100644
index 0000000000..8dada3edaf
--- /dev/null
+++ b/third_party/python/async_timeout/async_timeout-3.0.1.dist-info/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/third_party/python/async_timeout/async_timeout-3.0.1.dist-info/METADATA b/third_party/python/async_timeout/async_timeout-3.0.1.dist-info/METADATA
new file mode 100644
index 0000000000..5ec05a2785
--- /dev/null
+++ b/third_party/python/async_timeout/async_timeout-3.0.1.dist-info/METADATA
@@ -0,0 +1,165 @@
+Metadata-Version: 2.1
+Name: async-timeout
+Version: 3.0.1
+Summary: Timeout context manager for asyncio programs
+Home-page: https://github.com/aio-libs/async_timeout/
+Author: Andrew Svetlov
+Author-email: andrew.svetlov@gmail.com
+License: Apache 2
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Intended Audience :: Developers
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Topic :: Internet :: WWW/HTTP
+Classifier: Framework :: AsyncIO
+Requires-Python: >=3.5.3
+
+async-timeout
+=============
+.. image:: https://travis-ci.org/aio-libs/async-timeout.svg?branch=master
+ :target: https://travis-ci.org/aio-libs/async-timeout
+.. image:: https://codecov.io/gh/aio-libs/async-timeout/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/aio-libs/async-timeout
+.. image:: https://img.shields.io/pypi/v/async-timeout.svg
+ :target: https://pypi.python.org/pypi/async-timeout
+.. image:: https://badges.gitter.im/Join%20Chat.svg
+ :target: https://gitter.im/aio-libs/Lobby
+ :alt: Chat on Gitter
+
+asyncio-compatible timeout context manager.
+
+
+Usage example
+-------------
+
+
+The context manager is useful in cases when you want to apply timeout
+logic around block of code or in cases when ``asyncio.wait_for()`` is
+not suitable. Also it's much faster than ``asyncio.wait_for()``
+because ``timeout`` doesn't create a new task.
+
+The ``timeout(timeout, *, loop=None)`` call returns a context manager
+that cancels a block on *timeout* expiring::
+
+ async with timeout(1.5):
+ await inner()
+
+1. If ``inner()`` is executed faster than in ``1.5`` seconds nothing
+ happens.
+2. Otherwise ``inner()`` is cancelled internally by sending
+ ``asyncio.CancelledError`` into but ``asyncio.TimeoutError`` is
+ raised outside of context manager scope.
+
+*timeout* parameter could be ``None`` for skipping timeout functionality.
+
+
+Context manager has ``.expired`` property for check if timeout happens
+exactly in context manager::
+
+ async with timeout(1.5) as cm:
+ await inner()
+ print(cm.expired)
+
+The property is ``True`` if ``inner()`` execution is cancelled by
+timeout context manager.
+
+If ``inner()`` call explicitly raises ``TimeoutError`` ``cm.expired``
+is ``False``.
+
+Installation
+------------
+
+::
+
+ $ pip install async-timeout
+
+The library is Python 3 only!
+
+
+
+Authors and License
+-------------------
+
+The module is written by Andrew Svetlov.
+
+It's *Apache 2* licensed and freely available.
+
+
+CHANGES
+=======
+
+3.0.1 (2018-10-09)
+------------------
+
+- More aggressive typing (#48)
+
+3.0.0 (2018-05-05)
+------------------
+
+- Drop Python 3.4, the minimal supported version is Python 3.5.3
+
+- Provide type annotations
+
+2.0.1 (2018-03-13)
+------------------
+
+* Fix ``PendingDeprecationWarning`` on Python 3.7 (#33)
+
+
+2.0.0 (2017-10-09)
+------------------
+
+* Changed `timeout <= 0` behaviour
+
+ * Backward incompatibility change, prior this version `0` was
+ shortcut for `None`
+ * when timeout <= 0 `TimeoutError` raised faster
+
+1.4.0 (2017-09-09)
+------------------
+
+* Implement `remaining` property (#20)
+
+ * If timeout is not started yet or started unconstrained:
+ `remaining` is `None`
+ * If timeout is expired: `remaining` is `0.0`
+ * All others: roughly amount of time before `TimeoutError` is triggered
+
+1.3.0 (2017-08-23)
+------------------
+
+* Don't suppress nested exception on timeout. Exception context points
+ on cancelled line with suspended `await` (#13)
+
+* Introduce `.timeout` property (#16)
+
+* Add methods for using as async context manager (#9)
+
+1.2.1 (2017-05-02)
+------------------
+
+* Support unpublished event loop's "current_task" api.
+
+
+1.2.0 (2017-03-11)
+------------------
+
+* Extra check on context manager exit
+
+* 0 is no-op timeout
+
+
+1.1.0 (2016-10-20)
+------------------
+
+* Rename to `async-timeout`
+
+1.0.0 (2016-09-09)
+------------------
+
+* The first release.
+
+
diff --git a/third_party/python/async_timeout/async_timeout-3.0.1.dist-info/RECORD b/third_party/python/async_timeout/async_timeout-3.0.1.dist-info/RECORD
new file mode 100644
index 0000000000..8979fc35ac
--- /dev/null
+++ b/third_party/python/async_timeout/async_timeout-3.0.1.dist-info/RECORD
@@ -0,0 +1,7 @@
+async_timeout/__init__.py,sha256=mGvWOoRqLtScEU3kmzqtTSH7EQsHvu8zhgHxOTXCn7c,3654
+async_timeout/py.typed,sha256=9LJP7QJ0oxYYrBtmXuFirzMbS3D9_3Tz-d3tyUtNp0U,11
+async_timeout-3.0.1.dist-info/LICENSE,sha256=tAkwu8-AdEyGxGoSvJ2gVmQdcicWw3j1ZZueVV74M-E,11357
+async_timeout-3.0.1.dist-info/METADATA,sha256=_3ByJ8L0-cU5wWu75_Rl8n0ZkbSCgW15fMAu_DzwTm0,4013
+async_timeout-3.0.1.dist-info/WHEEL,sha256=-ZFxwj8mZJPIVcZGLrsQ8UGRcxVAOExzPLVBGR7u7bE,92
+async_timeout-3.0.1.dist-info/top_level.txt,sha256=9oM4e7Twq8iD_7_Q3Mz0E6GPIB6vJvRFo-UBwUQtBDU,14
+async_timeout-3.0.1.dist-info/RECORD,,
diff --git a/third_party/python/async_timeout/async_timeout-3.0.1.dist-info/WHEEL b/third_party/python/async_timeout/async_timeout-3.0.1.dist-info/WHEEL
new file mode 100644
index 0000000000..f87af075c0
--- /dev/null
+++ b/third_party/python/async_timeout/async_timeout-3.0.1.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.32.1)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/third_party/python/async_timeout/async_timeout-3.0.1.dist-info/top_level.txt b/third_party/python/async_timeout/async_timeout-3.0.1.dist-info/top_level.txt
new file mode 100644
index 0000000000..ad29955ef9
--- /dev/null
+++ b/third_party/python/async_timeout/async_timeout-3.0.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+async_timeout
diff --git a/third_party/python/async_timeout/async_timeout/__init__.py b/third_party/python/async_timeout/async_timeout/__init__.py
new file mode 100644
index 0000000000..dcc55f0cea
--- /dev/null
+++ b/third_party/python/async_timeout/async_timeout/__init__.py
@@ -0,0 +1,115 @@
+import asyncio
+import sys
+
+from types import TracebackType
+from typing import Optional, Type, Any # noqa
+
+
+__version__ = '3.0.1'
+
+PY_37 = sys.version_info >= (3, 7)
+
+
+class timeout:
+ """timeout context manager.
+
+ Useful in cases when you want to apply timeout logic around block
+ of code or in cases when asyncio.wait_for is not suitable. For example:
+
+ >>> with timeout(0.001):
+ ... async with aiohttp.get('https://github.com') as r:
+ ... await r.text()
+
+
+ timeout - value in seconds or None to disable timeout logic
+ loop - asyncio compatible event loop
+ """
+ def __init__(self, timeout: Optional[float],
+ *, loop: Optional[asyncio.AbstractEventLoop] = None) -> None:
+ self._timeout = timeout
+ if loop is None:
+ loop = asyncio.get_event_loop()
+ self._loop = loop
+ self._task = None # type: Optional[asyncio.Task[Any]]
+ self._cancelled = False
+ self._cancel_handler = None # type: Optional[asyncio.Handle]
+ self._cancel_at = None # type: Optional[float]
+
+ def __enter__(self) -> 'timeout':
+ return self._do_enter()
+
+ def __exit__(self,
+ exc_type: Type[BaseException],
+ exc_val: BaseException,
+ exc_tb: TracebackType) -> Optional[bool]:
+ self._do_exit(exc_type)
+ return None
+
+ async def __aenter__(self) -> 'timeout':
+ return self._do_enter()
+
+ async def __aexit__(self,
+ exc_type: Type[BaseException],
+ exc_val: BaseException,
+ exc_tb: TracebackType) -> None:
+ self._do_exit(exc_type)
+
+ @property
+ def expired(self) -> bool:
+ return self._cancelled
+
+ @property
+ def remaining(self) -> Optional[float]:
+ if self._cancel_at is not None:
+ return max(self._cancel_at - self._loop.time(), 0.0)
+ else:
+ return None
+
+ def _do_enter(self) -> 'timeout':
+ # Support Tornado 5- without timeout
+ # Details: https://github.com/python/asyncio/issues/392
+ if self._timeout is None:
+ return self
+
+ self._task = current_task(self._loop)
+ if self._task is None:
+ raise RuntimeError('Timeout context manager should be used '
+ 'inside a task')
+
+ if self._timeout <= 0:
+ self._loop.call_soon(self._cancel_task)
+ return self
+
+ self._cancel_at = self._loop.time() + self._timeout
+ self._cancel_handler = self._loop.call_at(
+ self._cancel_at, self._cancel_task)
+ return self
+
+ def _do_exit(self, exc_type: Type[BaseException]) -> None:
+ if exc_type is asyncio.CancelledError and self._cancelled:
+ self._cancel_handler = None
+ self._task = None
+ raise asyncio.TimeoutError
+ if self._timeout is not None and self._cancel_handler is not None:
+ self._cancel_handler.cancel()
+ self._cancel_handler = None
+ self._task = None
+ return None
+
+ def _cancel_task(self) -> None:
+ if self._task is not None:
+ self._task.cancel()
+ self._cancelled = True
+
+
+def current_task(loop: asyncio.AbstractEventLoop) -> 'asyncio.Task[Any]':
+ if PY_37:
+ task = asyncio.current_task(loop=loop) # type: ignore
+ else:
+ task = asyncio.Task.current_task(loop=loop)
+ if task is None:
+ # this should be removed, tokio must use register_task and family API
+ if hasattr(loop, 'current_task'):
+ task = loop.current_task() # type: ignore
+
+ return task
diff --git a/third_party/python/async_timeout/async_timeout/py.typed b/third_party/python/async_timeout/async_timeout/py.typed
new file mode 100644
index 0000000000..f6e0339af6
--- /dev/null
+++ b/third_party/python/async_timeout/async_timeout/py.typed
@@ -0,0 +1 @@
+Placeholder \ No newline at end of file
diff --git a/third_party/python/attrs/attr/__init__.py b/third_party/python/attrs/attr/__init__.py
new file mode 100644
index 0000000000..7cfa792f74
--- /dev/null
+++ b/third_party/python/attrs/attr/__init__.py
@@ -0,0 +1,132 @@
+# SPDX-License-Identifier: MIT
+
+"""
+Classes Without Boilerplate
+"""
+
+from functools import partial
+from typing import Callable
+
+from . import converters, exceptions, filters, setters, validators
+from ._cmp import cmp_using
+from ._config import get_run_validators, set_run_validators
+from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types
+from ._make import (
+ NOTHING,
+ Attribute,
+ Factory,
+ attrib,
+ attrs,
+ fields,
+ fields_dict,
+ make_class,
+ validate,
+)
+from ._next_gen import define, field, frozen, mutable
+from ._version_info import VersionInfo
+
+
+s = attributes = attrs
+ib = attr = attrib
+dataclass = partial(attrs, auto_attribs=True) # happy Easter ;)
+
+
+class AttrsInstance:
+ pass
+
+
+__all__ = [
+ "Attribute",
+ "AttrsInstance",
+ "Factory",
+ "NOTHING",
+ "asdict",
+ "assoc",
+ "astuple",
+ "attr",
+ "attrib",
+ "attributes",
+ "attrs",
+ "cmp_using",
+ "converters",
+ "define",
+ "evolve",
+ "exceptions",
+ "field",
+ "fields",
+ "fields_dict",
+ "filters",
+ "frozen",
+ "get_run_validators",
+ "has",
+ "ib",
+ "make_class",
+ "mutable",
+ "resolve_types",
+ "s",
+ "set_run_validators",
+ "setters",
+ "validate",
+ "validators",
+]
+
+
+def _make_getattr(mod_name: str) -> Callable:
+ """
+ Create a metadata proxy for packaging information that uses *mod_name* in
+ its warnings and errors.
+ """
+
+ def __getattr__(name: str) -> str:
+ dunder_to_metadata = {
+ "__title__": "Name",
+ "__copyright__": "",
+ "__version__": "version",
+ "__version_info__": "version",
+ "__description__": "summary",
+ "__uri__": "",
+ "__url__": "",
+ "__author__": "",
+ "__email__": "",
+ "__license__": "license",
+ }
+ if name not in dunder_to_metadata.keys():
+ raise AttributeError(f"module {mod_name} has no attribute {name}")
+
+ import sys
+ import warnings
+
+ if sys.version_info < (3, 8):
+ from importlib_metadata import metadata
+ else:
+ from importlib.metadata import metadata
+
+ if name != "__version_info__":
+ warnings.warn(
+ f"Accessing {mod_name}.{name} is deprecated and will be "
+ "removed in a future release. Use importlib.metadata directly "
+ "to query for attrs's packaging metadata.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ meta = metadata("attrs")
+ if name == "__license__":
+ return "MIT"
+ elif name == "__copyright__":
+ return "Copyright (c) 2015 Hynek Schlawack"
+ elif name in ("__uri__", "__url__"):
+ return meta["Project-URL"].split(" ", 1)[-1]
+ elif name == "__version_info__":
+ return VersionInfo._from_version_string(meta["version"])
+ elif name == "__author__":
+ return meta["Author-email"].rsplit(" ", 1)[0]
+ elif name == "__email__":
+ return meta["Author-email"].rsplit("<", 1)[1][:-1]
+
+ return meta[dunder_to_metadata[name]]
+
+ return __getattr__
+
+
+__getattr__ = _make_getattr(__name__)
diff --git a/third_party/python/attrs/attr/__init__.pyi b/third_party/python/attrs/attr/__init__.pyi
new file mode 100644
index 0000000000..ced5a3fd40
--- /dev/null
+++ b/third_party/python/attrs/attr/__init__.pyi
@@ -0,0 +1,571 @@
+import enum
+import sys
+
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Generic,
+ List,
+ Mapping,
+ Optional,
+ Protocol,
+ Sequence,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+ overload,
+)
+
+# `import X as X` is required to make these public
+from . import converters as converters
+from . import exceptions as exceptions
+from . import filters as filters
+from . import setters as setters
+from . import validators as validators
+from ._cmp import cmp_using as cmp_using
+from ._typing_compat import AttrsInstance_
+from ._version_info import VersionInfo
+
+if sys.version_info >= (3, 10):
+ from typing import TypeGuard
+else:
+ from typing_extensions import TypeGuard
+
+__version__: str
+__version_info__: VersionInfo
+__title__: str
+__description__: str
+__url__: str
+__uri__: str
+__author__: str
+__email__: str
+__license__: str
+__copyright__: str
+
+_T = TypeVar("_T")
+_C = TypeVar("_C", bound=type)
+
+_EqOrderType = Union[bool, Callable[[Any], Any]]
+_ValidatorType = Callable[[Any, "Attribute[_T]", _T], Any]
+_ConverterType = Callable[[Any], Any]
+_FilterType = Callable[["Attribute[_T]", _T], bool]
+_ReprType = Callable[[Any], str]
+_ReprArgType = Union[bool, _ReprType]
+_OnSetAttrType = Callable[[Any, "Attribute[Any]", Any], Any]
+_OnSetAttrArgType = Union[
+ _OnSetAttrType, List[_OnSetAttrType], setters._NoOpType
+]
+_FieldTransformer = Callable[
+ [type, List["Attribute[Any]"]], List["Attribute[Any]"]
+]
+# FIXME: in reality, if multiple validators are passed they must be in a list
+# or tuple, but those are invariant and so would prevent subtypes of
+# _ValidatorType from working when passed in a list or tuple.
+_ValidatorArgType = Union[_ValidatorType[_T], Sequence[_ValidatorType[_T]]]
+
+# We subclass this here to keep the protocol's qualified name clean.
+class AttrsInstance(AttrsInstance_, Protocol):
+ pass
+
+_A = TypeVar("_A", bound=AttrsInstance)
+# _make --
+
+class _Nothing(enum.Enum):
+ NOTHING = enum.auto()
+
+NOTHING = _Nothing.NOTHING
+
+# NOTE: Factory lies about its return type to make this possible:
+# `x: List[int] # = Factory(list)`
+# Work around mypy issue #4554 in the common case by using an overload.
+if sys.version_info >= (3, 8):
+ from typing import Literal
+ @overload
+ def Factory(factory: Callable[[], _T]) -> _T: ...
+ @overload
+ def Factory(
+ factory: Callable[[Any], _T],
+ takes_self: Literal[True],
+ ) -> _T: ...
+ @overload
+ def Factory(
+ factory: Callable[[], _T],
+ takes_self: Literal[False],
+ ) -> _T: ...
+
+else:
+ @overload
+ def Factory(factory: Callable[[], _T]) -> _T: ...
+ @overload
+ def Factory(
+ factory: Union[Callable[[Any], _T], Callable[[], _T]],
+ takes_self: bool = ...,
+ ) -> _T: ...
+
+# Static type inference support via __dataclass_transform__ implemented as per:
+# https://github.com/microsoft/pyright/blob/1.1.135/specs/dataclass_transforms.md
+# This annotation must be applied to all overloads of "define" and "attrs"
+#
+# NOTE: This is a typing construct and does not exist at runtime. Extensions
+# wrapping attrs decorators should declare a separate __dataclass_transform__
+# signature in the extension module using the specification linked above to
+# provide pyright support.
+def __dataclass_transform__(
+ *,
+ eq_default: bool = True,
+ order_default: bool = False,
+ kw_only_default: bool = False,
+ frozen_default: bool = False,
+ field_descriptors: Tuple[Union[type, Callable[..., Any]], ...] = (()),
+) -> Callable[[_T], _T]: ...
+
+class Attribute(Generic[_T]):
+ name: str
+ default: Optional[_T]
+ validator: Optional[_ValidatorType[_T]]
+ repr: _ReprArgType
+ cmp: _EqOrderType
+ eq: _EqOrderType
+ order: _EqOrderType
+ hash: Optional[bool]
+ init: bool
+ converter: Optional[_ConverterType]
+ metadata: Dict[Any, Any]
+ type: Optional[Type[_T]]
+ kw_only: bool
+ on_setattr: _OnSetAttrType
+ alias: Optional[str]
+
+ def evolve(self, **changes: Any) -> "Attribute[Any]": ...
+
+# NOTE: We had several choices for the annotation to use for type arg:
+# 1) Type[_T]
+# - Pros: Handles simple cases correctly
+# - Cons: Might produce less informative errors in the case of conflicting
+# TypeVars e.g. `attr.ib(default='bad', type=int)`
+# 2) Callable[..., _T]
+# - Pros: Better error messages than #1 for conflicting TypeVars
+# - Cons: Terrible error messages for validator checks.
+# e.g. attr.ib(type=int, validator=validate_str)
+# -> error: Cannot infer function type argument
+# 3) type (and do all of the work in the mypy plugin)
+# - Pros: Simple here, and we could customize the plugin with our own errors.
+# - Cons: Would need to write mypy plugin code to handle all the cases.
+# We chose option #1.
+
+# `attr` lies about its return type to make the following possible:
+# attr() -> Any
+# attr(8) -> int
+# attr(validator=<some callable>) -> Whatever the callable expects.
+# This makes this type of assignments possible:
+# x: int = attr(8)
+#
+# This form catches explicit None or no default but with no other arguments
+# returns Any.
+@overload
+def attrib(
+ default: None = ...,
+ validator: None = ...,
+ repr: _ReprArgType = ...,
+ cmp: Optional[_EqOrderType] = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ metadata: Optional[Mapping[Any, Any]] = ...,
+ type: None = ...,
+ converter: None = ...,
+ factory: None = ...,
+ kw_only: bool = ...,
+ eq: Optional[_EqOrderType] = ...,
+ order: Optional[_EqOrderType] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+ alias: Optional[str] = ...,
+) -> Any: ...
+
+# This form catches an explicit None or no default and infers the type from the
+# other arguments.
+@overload
+def attrib(
+ default: None = ...,
+ validator: Optional[_ValidatorArgType[_T]] = ...,
+ repr: _ReprArgType = ...,
+ cmp: Optional[_EqOrderType] = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ metadata: Optional[Mapping[Any, Any]] = ...,
+ type: Optional[Type[_T]] = ...,
+ converter: Optional[_ConverterType] = ...,
+ factory: Optional[Callable[[], _T]] = ...,
+ kw_only: bool = ...,
+ eq: Optional[_EqOrderType] = ...,
+ order: Optional[_EqOrderType] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+ alias: Optional[str] = ...,
+) -> _T: ...
+
+# This form catches an explicit default argument.
+@overload
+def attrib(
+ default: _T,
+ validator: Optional[_ValidatorArgType[_T]] = ...,
+ repr: _ReprArgType = ...,
+ cmp: Optional[_EqOrderType] = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ metadata: Optional[Mapping[Any, Any]] = ...,
+ type: Optional[Type[_T]] = ...,
+ converter: Optional[_ConverterType] = ...,
+ factory: Optional[Callable[[], _T]] = ...,
+ kw_only: bool = ...,
+ eq: Optional[_EqOrderType] = ...,
+ order: Optional[_EqOrderType] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+ alias: Optional[str] = ...,
+) -> _T: ...
+
+# This form covers type=non-Type: e.g. forward references (str), Any
+@overload
+def attrib(
+ default: Optional[_T] = ...,
+ validator: Optional[_ValidatorArgType[_T]] = ...,
+ repr: _ReprArgType = ...,
+ cmp: Optional[_EqOrderType] = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ metadata: Optional[Mapping[Any, Any]] = ...,
+ type: object = ...,
+ converter: Optional[_ConverterType] = ...,
+ factory: Optional[Callable[[], _T]] = ...,
+ kw_only: bool = ...,
+ eq: Optional[_EqOrderType] = ...,
+ order: Optional[_EqOrderType] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+ alias: Optional[str] = ...,
+) -> Any: ...
+@overload
+def field(
+ *,
+ default: None = ...,
+ validator: None = ...,
+ repr: _ReprArgType = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ metadata: Optional[Mapping[Any, Any]] = ...,
+ converter: None = ...,
+ factory: None = ...,
+ kw_only: bool = ...,
+ eq: Optional[bool] = ...,
+ order: Optional[bool] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+ alias: Optional[str] = ...,
+ type: Optional[type] = ...,
+) -> Any: ...
+
+# This form catches an explicit None or no default and infers the type from the
+# other arguments.
+@overload
+def field(
+ *,
+ default: None = ...,
+ validator: Optional[_ValidatorArgType[_T]] = ...,
+ repr: _ReprArgType = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ metadata: Optional[Mapping[Any, Any]] = ...,
+ converter: Optional[_ConverterType] = ...,
+ factory: Optional[Callable[[], _T]] = ...,
+ kw_only: bool = ...,
+ eq: Optional[_EqOrderType] = ...,
+ order: Optional[_EqOrderType] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+ alias: Optional[str] = ...,
+ type: Optional[type] = ...,
+) -> _T: ...
+
+# This form catches an explicit default argument.
+@overload
+def field(
+ *,
+ default: _T,
+ validator: Optional[_ValidatorArgType[_T]] = ...,
+ repr: _ReprArgType = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ metadata: Optional[Mapping[Any, Any]] = ...,
+ converter: Optional[_ConverterType] = ...,
+ factory: Optional[Callable[[], _T]] = ...,
+ kw_only: bool = ...,
+ eq: Optional[_EqOrderType] = ...,
+ order: Optional[_EqOrderType] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+ alias: Optional[str] = ...,
+ type: Optional[type] = ...,
+) -> _T: ...
+
+# This form covers type=non-Type: e.g. forward references (str), Any
+@overload
+def field(
+ *,
+ default: Optional[_T] = ...,
+ validator: Optional[_ValidatorArgType[_T]] = ...,
+ repr: _ReprArgType = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ metadata: Optional[Mapping[Any, Any]] = ...,
+ converter: Optional[_ConverterType] = ...,
+ factory: Optional[Callable[[], _T]] = ...,
+ kw_only: bool = ...,
+ eq: Optional[_EqOrderType] = ...,
+ order: Optional[_EqOrderType] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+ alias: Optional[str] = ...,
+ type: Optional[type] = ...,
+) -> Any: ...
+@overload
+@__dataclass_transform__(order_default=True, field_descriptors=(attrib, field))
+def attrs(
+ maybe_cls: _C,
+ these: Optional[Dict[str, Any]] = ...,
+ repr_ns: Optional[str] = ...,
+ repr: bool = ...,
+ cmp: Optional[_EqOrderType] = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ slots: bool = ...,
+ frozen: bool = ...,
+ weakref_slot: bool = ...,
+ str: bool = ...,
+ auto_attribs: bool = ...,
+ kw_only: bool = ...,
+ cache_hash: bool = ...,
+ auto_exc: bool = ...,
+ eq: Optional[_EqOrderType] = ...,
+ order: Optional[_EqOrderType] = ...,
+ auto_detect: bool = ...,
+ collect_by_mro: bool = ...,
+ getstate_setstate: Optional[bool] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+ field_transformer: Optional[_FieldTransformer] = ...,
+ match_args: bool = ...,
+ unsafe_hash: Optional[bool] = ...,
+) -> _C: ...
+@overload
+@__dataclass_transform__(order_default=True, field_descriptors=(attrib, field))
+def attrs(
+ maybe_cls: None = ...,
+ these: Optional[Dict[str, Any]] = ...,
+ repr_ns: Optional[str] = ...,
+ repr: bool = ...,
+ cmp: Optional[_EqOrderType] = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ slots: bool = ...,
+ frozen: bool = ...,
+ weakref_slot: bool = ...,
+ str: bool = ...,
+ auto_attribs: bool = ...,
+ kw_only: bool = ...,
+ cache_hash: bool = ...,
+ auto_exc: bool = ...,
+ eq: Optional[_EqOrderType] = ...,
+ order: Optional[_EqOrderType] = ...,
+ auto_detect: bool = ...,
+ collect_by_mro: bool = ...,
+ getstate_setstate: Optional[bool] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+ field_transformer: Optional[_FieldTransformer] = ...,
+ match_args: bool = ...,
+ unsafe_hash: Optional[bool] = ...,
+) -> Callable[[_C], _C]: ...
+@overload
+@__dataclass_transform__(field_descriptors=(attrib, field))
+def define(
+ maybe_cls: _C,
+ *,
+ these: Optional[Dict[str, Any]] = ...,
+ repr: bool = ...,
+ unsafe_hash: Optional[bool] = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ slots: bool = ...,
+ frozen: bool = ...,
+ weakref_slot: bool = ...,
+ str: bool = ...,
+ auto_attribs: bool = ...,
+ kw_only: bool = ...,
+ cache_hash: bool = ...,
+ auto_exc: bool = ...,
+ eq: Optional[bool] = ...,
+ order: Optional[bool] = ...,
+ auto_detect: bool = ...,
+ getstate_setstate: Optional[bool] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+ field_transformer: Optional[_FieldTransformer] = ...,
+ match_args: bool = ...,
+) -> _C: ...
+@overload
+@__dataclass_transform__(field_descriptors=(attrib, field))
+def define(
+ maybe_cls: None = ...,
+ *,
+ these: Optional[Dict[str, Any]] = ...,
+ repr: bool = ...,
+ unsafe_hash: Optional[bool] = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ slots: bool = ...,
+ frozen: bool = ...,
+ weakref_slot: bool = ...,
+ str: bool = ...,
+ auto_attribs: bool = ...,
+ kw_only: bool = ...,
+ cache_hash: bool = ...,
+ auto_exc: bool = ...,
+ eq: Optional[bool] = ...,
+ order: Optional[bool] = ...,
+ auto_detect: bool = ...,
+ getstate_setstate: Optional[bool] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+ field_transformer: Optional[_FieldTransformer] = ...,
+ match_args: bool = ...,
+) -> Callable[[_C], _C]: ...
+
+mutable = define
+
+@overload
+@__dataclass_transform__(
+ frozen_default=True, field_descriptors=(attrib, field)
+)
+def frozen(
+ maybe_cls: _C,
+ *,
+ these: Optional[Dict[str, Any]] = ...,
+ repr: bool = ...,
+ unsafe_hash: Optional[bool] = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ slots: bool = ...,
+ frozen: bool = ...,
+ weakref_slot: bool = ...,
+ str: bool = ...,
+ auto_attribs: bool = ...,
+ kw_only: bool = ...,
+ cache_hash: bool = ...,
+ auto_exc: bool = ...,
+ eq: Optional[bool] = ...,
+ order: Optional[bool] = ...,
+ auto_detect: bool = ...,
+ getstate_setstate: Optional[bool] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+ field_transformer: Optional[_FieldTransformer] = ...,
+ match_args: bool = ...,
+) -> _C: ...
+@overload
+@__dataclass_transform__(
+ frozen_default=True, field_descriptors=(attrib, field)
+)
+def frozen(
+ maybe_cls: None = ...,
+ *,
+ these: Optional[Dict[str, Any]] = ...,
+ repr: bool = ...,
+ unsafe_hash: Optional[bool] = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ slots: bool = ...,
+ frozen: bool = ...,
+ weakref_slot: bool = ...,
+ str: bool = ...,
+ auto_attribs: bool = ...,
+ kw_only: bool = ...,
+ cache_hash: bool = ...,
+ auto_exc: bool = ...,
+ eq: Optional[bool] = ...,
+ order: Optional[bool] = ...,
+ auto_detect: bool = ...,
+ getstate_setstate: Optional[bool] = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+ field_transformer: Optional[_FieldTransformer] = ...,
+ match_args: bool = ...,
+) -> Callable[[_C], _C]: ...
+def fields(cls: Type[AttrsInstance]) -> Any: ...
+def fields_dict(cls: Type[AttrsInstance]) -> Dict[str, Attribute[Any]]: ...
+def validate(inst: AttrsInstance) -> None: ...
+def resolve_types(
+ cls: _A,
+ globalns: Optional[Dict[str, Any]] = ...,
+ localns: Optional[Dict[str, Any]] = ...,
+ attribs: Optional[List[Attribute[Any]]] = ...,
+ include_extras: bool = ...,
+) -> _A: ...
+
+# TODO: add support for returning a proper attrs class from the mypy plugin
+# we use Any instead of _CountingAttr so that e.g. `make_class('Foo',
+# [attr.ib()])` is valid
+def make_class(
+ name: str,
+ attrs: Union[List[str], Tuple[str, ...], Dict[str, Any]],
+ bases: Tuple[type, ...] = ...,
+ repr_ns: Optional[str] = ...,
+ repr: bool = ...,
+ cmp: Optional[_EqOrderType] = ...,
+ hash: Optional[bool] = ...,
+ init: bool = ...,
+ slots: bool = ...,
+ frozen: bool = ...,
+ weakref_slot: bool = ...,
+ str: bool = ...,
+ auto_attribs: bool = ...,
+ kw_only: bool = ...,
+ cache_hash: bool = ...,
+ auto_exc: bool = ...,
+ eq: Optional[_EqOrderType] = ...,
+ order: Optional[_EqOrderType] = ...,
+ collect_by_mro: bool = ...,
+ on_setattr: Optional[_OnSetAttrArgType] = ...,
+ field_transformer: Optional[_FieldTransformer] = ...,
+) -> type: ...
+
+# _funcs --
+
+# TODO: add support for returning TypedDict from the mypy plugin
+# FIXME: asdict/astuple do not honor their factory args. Waiting on one of
+# these:
+# https://github.com/python/mypy/issues/4236
+# https://github.com/python/typing/issues/253
+# XXX: remember to fix attrs.asdict/astuple too!
+def asdict(
+ inst: AttrsInstance,
+ recurse: bool = ...,
+ filter: Optional[_FilterType[Any]] = ...,
+ dict_factory: Type[Mapping[Any, Any]] = ...,
+ retain_collection_types: bool = ...,
+ value_serializer: Optional[
+ Callable[[type, Attribute[Any], Any], Any]
+ ] = ...,
+ tuple_keys: Optional[bool] = ...,
+) -> Dict[str, Any]: ...
+
+# TODO: add support for returning NamedTuple from the mypy plugin
+def astuple(
+ inst: AttrsInstance,
+ recurse: bool = ...,
+ filter: Optional[_FilterType[Any]] = ...,
+ tuple_factory: Type[Sequence[Any]] = ...,
+ retain_collection_types: bool = ...,
+) -> Tuple[Any, ...]: ...
+def has(cls: type) -> TypeGuard[Type[AttrsInstance]]: ...
+def assoc(inst: _T, **changes: Any) -> _T: ...
+def evolve(inst: _T, **changes: Any) -> _T: ...
+
+# _config --
+
+def set_run_validators(run: bool) -> None: ...
+def get_run_validators() -> bool: ...
+
+# aliases --
+
+s = attributes = attrs
+ib = attr = attrib
+dataclass = attrs # Technically, partial(attrs, auto_attribs=True) ;)
diff --git a/third_party/python/attrs/attr/_cmp.py b/third_party/python/attrs/attr/_cmp.py
new file mode 100644
index 0000000000..d9cbe22cde
--- /dev/null
+++ b/third_party/python/attrs/attr/_cmp.py
@@ -0,0 +1,155 @@
+# SPDX-License-Identifier: MIT
+
+
+import functools
+import types
+
+from ._make import _make_ne
+
+
+_operation_names = {"eq": "==", "lt": "<", "le": "<=", "gt": ">", "ge": ">="}
+
+
+def cmp_using(
+ eq=None,
+ lt=None,
+ le=None,
+ gt=None,
+ ge=None,
+ require_same_type=True,
+ class_name="Comparable",
+):
+ """
+ Create a class that can be passed into `attrs.field`'s ``eq``, ``order``,
+ and ``cmp`` arguments to customize field comparison.
+
+ The resulting class will have a full set of ordering methods if at least
+ one of ``{lt, le, gt, ge}`` and ``eq`` are provided.
+
+ :param Optional[callable] eq: `callable` used to evaluate equality of two
+ objects.
+ :param Optional[callable] lt: `callable` used to evaluate whether one
+ object is less than another object.
+ :param Optional[callable] le: `callable` used to evaluate whether one
+ object is less than or equal to another object.
+ :param Optional[callable] gt: `callable` used to evaluate whether one
+ object is greater than another object.
+ :param Optional[callable] ge: `callable` used to evaluate whether one
+ object is greater than or equal to another object.
+
+ :param bool require_same_type: When `True`, equality and ordering methods
+ will return `NotImplemented` if objects are not of the same type.
+
+ :param Optional[str] class_name: Name of class. Defaults to 'Comparable'.
+
+ See `comparison` for more details.
+
+ .. versionadded:: 21.1.0
+ """
+
+ body = {
+ "__slots__": ["value"],
+ "__init__": _make_init(),
+ "_requirements": [],
+ "_is_comparable_to": _is_comparable_to,
+ }
+
+ # Add operations.
+ num_order_functions = 0
+ has_eq_function = False
+
+ if eq is not None:
+ has_eq_function = True
+ body["__eq__"] = _make_operator("eq", eq)
+ body["__ne__"] = _make_ne()
+
+ if lt is not None:
+ num_order_functions += 1
+ body["__lt__"] = _make_operator("lt", lt)
+
+ if le is not None:
+ num_order_functions += 1
+ body["__le__"] = _make_operator("le", le)
+
+ if gt is not None:
+ num_order_functions += 1
+ body["__gt__"] = _make_operator("gt", gt)
+
+ if ge is not None:
+ num_order_functions += 1
+ body["__ge__"] = _make_operator("ge", ge)
+
+ type_ = types.new_class(
+ class_name, (object,), {}, lambda ns: ns.update(body)
+ )
+
+ # Add same type requirement.
+ if require_same_type:
+ type_._requirements.append(_check_same_type)
+
+ # Add total ordering if at least one operation was defined.
+ if 0 < num_order_functions < 4:
+ if not has_eq_function:
+ # functools.total_ordering requires __eq__ to be defined,
+ # so raise early error here to keep a nice stack.
+ raise ValueError(
+ "eq must be define is order to complete ordering from "
+ "lt, le, gt, ge."
+ )
+ type_ = functools.total_ordering(type_)
+
+ return type_
+
+
+def _make_init():
+ """
+ Create __init__ method.
+ """
+
+ def __init__(self, value):
+ """
+ Initialize object with *value*.
+ """
+ self.value = value
+
+ return __init__
+
+
+def _make_operator(name, func):
+ """
+ Create operator method.
+ """
+
+ def method(self, other):
+ if not self._is_comparable_to(other):
+ return NotImplemented
+
+ result = func(self.value, other.value)
+ if result is NotImplemented:
+ return NotImplemented
+
+ return result
+
+ method.__name__ = f"__{name}__"
+ method.__doc__ = (
+ f"Return a {_operation_names[name]} b. Computed by attrs."
+ )
+
+ return method
+
+
+def _is_comparable_to(self, other):
+ """
+ Check whether `other` is comparable to `self`.
+ """
+ for func in self._requirements:
+ if not func(self, other):
+ return False
+ return True
+
+
+def _check_same_type(self, other):
+ """
+ Return True if *self* and *other* are of the same type, False otherwise.
+ """
+ return other.value.__class__ is self.value.__class__
diff --git a/third_party/python/attrs/attr/_cmp.pyi b/third_party/python/attrs/attr/_cmp.pyi
new file mode 100644
index 0000000000..f3dcdc1a75
--- /dev/null
+++ b/third_party/python/attrs/attr/_cmp.pyi
@@ -0,0 +1,13 @@
+from typing import Any, Callable, Optional, Type
+
+_CompareWithType = Callable[[Any, Any], bool]
+
+def cmp_using(
+ eq: Optional[_CompareWithType] = ...,
+ lt: Optional[_CompareWithType] = ...,
+ le: Optional[_CompareWithType] = ...,
+ gt: Optional[_CompareWithType] = ...,
+ ge: Optional[_CompareWithType] = ...,
+ require_same_type: bool = ...,
+ class_name: str = ...,
+) -> Type: ...
diff --git a/third_party/python/attrs/attr/_compat.py b/third_party/python/attrs/attr/_compat.py
new file mode 100644
index 0000000000..c3bf5e33ba
--- /dev/null
+++ b/third_party/python/attrs/attr/_compat.py
@@ -0,0 +1,185 @@
+# SPDX-License-Identifier: MIT
+
+
+import inspect
+import platform
+import sys
+import threading
+import types
+import warnings
+
+from collections.abc import Mapping, Sequence # noqa
+from typing import _GenericAlias
+
+
+PYPY = platform.python_implementation() == "PyPy"
+PY_3_9_PLUS = sys.version_info[:2] >= (3, 9)
+PY310 = sys.version_info[:2] >= (3, 10)
+PY_3_12_PLUS = sys.version_info[:2] >= (3, 12)
+
+
+def just_warn(*args, **kw):
+ warnings.warn(
+ "Running interpreter doesn't sufficiently support code object "
+ "introspection. Some features like bare super() or accessing "
+ "__class__ will not work with slotted classes.",
+ RuntimeWarning,
+ stacklevel=2,
+ )
+
+
+class _AnnotationExtractor:
+ """
+ Extract type annotations from a callable, returning None whenever there
+ is none.
+ """
+
+ __slots__ = ["sig"]
+
+ def __init__(self, callable):
+ try:
+ self.sig = inspect.signature(callable)
+ except (ValueError, TypeError): # inspect failed
+ self.sig = None
+
+ def get_first_param_type(self):
+ """
+ Return the type annotation of the first argument if it's not empty.
+ """
+ if not self.sig:
+ return None
+
+ params = list(self.sig.parameters.values())
+ if params and params[0].annotation is not inspect.Parameter.empty:
+ return params[0].annotation
+
+ return None
+
+ def get_return_type(self):
+ """
+ Return the return type if it's not empty.
+ """
+ if (
+ self.sig
+ and self.sig.return_annotation is not inspect.Signature.empty
+ ):
+ return self.sig.return_annotation
+
+ return None
+
+
+def make_set_closure_cell():
+ """Return a function of two arguments (cell, value) which sets
+ the value stored in the closure cell `cell` to `value`.
+ """
+ # pypy makes this easy. (It also supports the logic below, but
+ # why not do the easy/fast thing?)
+ if PYPY:
+
+ def set_closure_cell(cell, value):
+ cell.__setstate__((value,))
+
+ return set_closure_cell
+
+ # Otherwise gotta do it the hard way.
+
+ try:
+ if sys.version_info >= (3, 8):
+
+ def set_closure_cell(cell, value):
+ cell.cell_contents = value
+
+ else:
+ # Create a function that will set its first cellvar to `value`.
+ def set_first_cellvar_to(value):
+ x = value
+ return
+
+ # This function will be eliminated as dead code, but
+ # not before its reference to `x` forces `x` to be
+ # represented as a closure cell rather than a local.
+ def force_x_to_be_a_cell(): # pragma: no cover
+ return x
+
+ # Extract the code object and make sure our assumptions about
+ # the closure behavior are correct.
+ co = set_first_cellvar_to.__code__
+ if co.co_cellvars != ("x",) or co.co_freevars != ():
+ raise AssertionError # pragma: no cover
+
+ # Convert this code object to a code object that sets the
+ # function's first _freevar_ (not cellvar) to the argument.
+ args = [co.co_argcount]
+ args.append(co.co_kwonlyargcount)
+ args.extend(
+ [
+ co.co_nlocals,
+ co.co_stacksize,
+ co.co_flags,
+ co.co_code,
+ co.co_consts,
+ co.co_names,
+ co.co_varnames,
+ co.co_filename,
+ co.co_name,
+ co.co_firstlineno,
+ co.co_lnotab,
+ # These two arguments are reversed:
+ co.co_cellvars,
+ co.co_freevars,
+ ]
+ )
+ set_first_freevar_code = types.CodeType(*args)
+
+ def set_closure_cell(cell, value):
+ # Create a function using the set_first_freevar_code,
+ # whose first closure cell is `cell`. Calling it will
+ # change the value of that cell.
+ setter = types.FunctionType(
+ set_first_freevar_code, {}, "setter", (), (cell,)
+ )
+ # And call it to set the cell.
+ setter(value)
+
+ # Make sure it works on this interpreter:
+ def make_func_with_cell():
+ x = None
+
+ def func():
+ return x # pragma: no cover
+
+ return func
+
+ cell = make_func_with_cell().__closure__[0]
+ set_closure_cell(cell, 100)
+ if cell.cell_contents != 100:
+ raise AssertionError # pragma: no cover
+
+ except Exception:
+ return just_warn
+ else:
+ return set_closure_cell
+
+
+set_closure_cell = make_set_closure_cell()
+
+# Thread-local global to track attrs instances which are already being repr'd.
+# This is needed because there is no other (thread-safe) way to pass info
+# about the instances that are already being repr'd through the call stack
+# in order to ensure we don't perform infinite recursion.
+#
+# For instance, if an instance contains a dict which contains that instance,
+# we need to know that we're already repr'ing the outside instance from within
+# the dict's repr() call.
+#
+# This lives here rather than in _make.py so that the functions in _make.py
+# don't have a direct reference to the thread-local in their globals dict.
+# If they have such a reference, it breaks cloudpickle.
+repr_context = threading.local()
+
+
+def get_generic_base(cl):
+ """If this is a generic class (A[str]), return the generic base for it."""
+ if cl.__class__ is _GenericAlias:
+ return cl.__origin__
+ return None
diff --git a/third_party/python/attrs/attr/_config.py b/third_party/python/attrs/attr/_config.py
new file mode 100644
index 0000000000..96d4200773
--- /dev/null
+++ b/third_party/python/attrs/attr/_config.py
@@ -0,0 +1,31 @@
+# SPDX-License-Identifier: MIT
+
+
+__all__ = ["set_run_validators", "get_run_validators"]
+
+_run_validators = True
+
+
+def set_run_validators(run):
+ """
+ Set whether or not validators are run. By default, they are run.
+
+ .. deprecated:: 21.3.0 It will not be removed, but it also will not be
+ moved to new ``attrs`` namespace. Use `attrs.validators.set_disabled()`
+ instead.
+ """
+ if not isinstance(run, bool):
+ raise TypeError("'run' must be bool.")
+ global _run_validators
+ _run_validators = run
+
+
+def get_run_validators():
+ """
+ Return whether or not validators are run.
+
+ .. deprecated:: 21.3.0 It will not be removed, but it also will not be
+ moved to new ``attrs`` namespace. Use `attrs.validators.get_disabled()`
+ instead.
+ """
+ return _run_validators
diff --git a/third_party/python/attrs/attr/_funcs.py b/third_party/python/attrs/attr/_funcs.py
new file mode 100644
index 0000000000..7f5d9610f3
--- /dev/null
+++ b/third_party/python/attrs/attr/_funcs.py
@@ -0,0 +1,477 @@
+# SPDX-License-Identifier: MIT
+
+
+import copy
+
+from ._compat import PY_3_9_PLUS, get_generic_base
+from ._make import NOTHING, _obj_setattr, fields
+from .exceptions import AttrsAttributeNotFoundError
+
+
+def asdict(
+ inst,
+ recurse=True,
+ filter=None,
+ dict_factory=dict,
+ retain_collection_types=False,
+ value_serializer=None,
+):
+ """
+ Return the *attrs* attribute values of *inst* as a dict.
+
+ Optionally recurse into other *attrs*-decorated classes.
+
+ :param inst: Instance of an *attrs*-decorated class.
+ :param bool recurse: Recurse into classes that are also
+ *attrs*-decorated.
+ :param callable filter: A callable whose return code determines whether an
+ attribute or element is included (``True``) or dropped (``False``). Is
+ called with the `attrs.Attribute` as the first argument and the
+ value as the second argument.
+ :param callable dict_factory: A callable to produce dictionaries from. For
+ example, to produce ordered dictionaries instead of normal Python
+ dictionaries, pass in ``collections.OrderedDict``.
+ :param bool retain_collection_types: Do not convert to ``list`` when
+ encountering an attribute whose type is ``tuple`` or ``set``. Only
+ meaningful if ``recurse`` is ``True``.
+ :param Optional[callable] value_serializer: A hook that is called for every
+ attribute or dict key/value. It receives the current instance, field
+ and value and must return the (updated) value. The hook is run *after*
+ the optional *filter* has been applied.
+
+ :rtype: return type of *dict_factory*
+
+ :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs*
+ class.
+
+ .. versionadded:: 16.0.0 *dict_factory*
+ .. versionadded:: 16.1.0 *retain_collection_types*
+ .. versionadded:: 20.3.0 *value_serializer*
+ .. versionadded:: 21.3.0 If a dict has a collection for a key, it is
+ serialized as a tuple.
+ """
+ attrs = fields(inst.__class__)
+ rv = dict_factory()
+ for a in attrs:
+ v = getattr(inst, a.name)
+ if filter is not None and not filter(a, v):
+ continue
+
+ if value_serializer is not None:
+ v = value_serializer(inst, a, v)
+
+ if recurse is True:
+ if has(v.__class__):
+ rv[a.name] = asdict(
+ v,
+ recurse=True,
+ filter=filter,
+ dict_factory=dict_factory,
+ retain_collection_types=retain_collection_types,
+ value_serializer=value_serializer,
+ )
+ elif isinstance(v, (tuple, list, set, frozenset)):
+ cf = v.__class__ if retain_collection_types is True else list
+ rv[a.name] = cf(
+ [
+ _asdict_anything(
+ i,
+ is_key=False,
+ filter=filter,
+ dict_factory=dict_factory,
+ retain_collection_types=retain_collection_types,
+ value_serializer=value_serializer,
+ )
+ for i in v
+ ]
+ )
+ elif isinstance(v, dict):
+ df = dict_factory
+ rv[a.name] = df(
+ (
+ _asdict_anything(
+ kk,
+ is_key=True,
+ filter=filter,
+ dict_factory=df,
+ retain_collection_types=retain_collection_types,
+ value_serializer=value_serializer,
+ ),
+ _asdict_anything(
+ vv,
+ is_key=False,
+ filter=filter,
+ dict_factory=df,
+ retain_collection_types=retain_collection_types,
+ value_serializer=value_serializer,
+ ),
+ )
+ for kk, vv in v.items()
+ )
+ else:
+ rv[a.name] = v
+ else:
+ rv[a.name] = v
+ return rv
+
+
+def _asdict_anything(
+ val,
+ is_key,
+ filter,
+ dict_factory,
+ retain_collection_types,
+ value_serializer,
+):
+ """
+ ``asdict`` only works on attrs instances, this works on anything.
+ """
+ if getattr(val.__class__, "__attrs_attrs__", None) is not None:
+ # Attrs class.
+ rv = asdict(
+ val,
+ recurse=True,
+ filter=filter,
+ dict_factory=dict_factory,
+ retain_collection_types=retain_collection_types,
+ value_serializer=value_serializer,
+ )
+ elif isinstance(val, (tuple, list, set, frozenset)):
+ if retain_collection_types is True:
+ cf = val.__class__
+ elif is_key:
+ cf = tuple
+ else:
+ cf = list
+
+ rv = cf(
+ [
+ _asdict_anything(
+ i,
+ is_key=False,
+ filter=filter,
+ dict_factory=dict_factory,
+ retain_collection_types=retain_collection_types,
+ value_serializer=value_serializer,
+ )
+ for i in val
+ ]
+ )
+ elif isinstance(val, dict):
+ df = dict_factory
+ rv = df(
+ (
+ _asdict_anything(
+ kk,
+ is_key=True,
+ filter=filter,
+ dict_factory=df,
+ retain_collection_types=retain_collection_types,
+ value_serializer=value_serializer,
+ ),
+ _asdict_anything(
+ vv,
+ is_key=False,
+ filter=filter,
+ dict_factory=df,
+ retain_collection_types=retain_collection_types,
+ value_serializer=value_serializer,
+ ),
+ )
+ for kk, vv in val.items()
+ )
+ else:
+ rv = val
+ if value_serializer is not None:
+ rv = value_serializer(None, None, rv)
+
+ return rv
+
+
+def astuple(
+ inst,
+ recurse=True,
+ filter=None,
+ tuple_factory=tuple,
+ retain_collection_types=False,
+):
+ """
+ Return the *attrs* attribute values of *inst* as a tuple.
+
+ Optionally recurse into other *attrs*-decorated classes.
+
+ :param inst: Instance of an *attrs*-decorated class.
+ :param bool recurse: Recurse into classes that are also
+ *attrs*-decorated.
+ :param callable filter: A callable whose return code determines whether an
+ attribute or element is included (``True``) or dropped (``False``). Is
+ called with the `attrs.Attribute` as the first argument and the
+ value as the second argument.
+ :param callable tuple_factory: A callable to produce tuples from. For
+ example, to produce lists instead of tuples.
+ :param bool retain_collection_types: Do not convert to ``list``
+ or ``dict`` when encountering an attribute which type is
+ ``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is
+ ``True``.
+
+ :rtype: return type of *tuple_factory*
+
+ :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs*
+ class.
+
+ .. versionadded:: 16.2.0
+ """
+ attrs = fields(inst.__class__)
+ rv = []
+ retain = retain_collection_types # Very long. :/
+ for a in attrs:
+ v = getattr(inst, a.name)
+ if filter is not None and not filter(a, v):
+ continue
+ if recurse is True:
+ if has(v.__class__):
+ rv.append(
+ astuple(
+ v,
+ recurse=True,
+ filter=filter,
+ tuple_factory=tuple_factory,
+ retain_collection_types=retain,
+ )
+ )
+ elif isinstance(v, (tuple, list, set, frozenset)):
+ cf = v.__class__ if retain is True else list
+ rv.append(
+ cf(
+ [
+ astuple(
+ j,
+ recurse=True,
+ filter=filter,
+ tuple_factory=tuple_factory,
+ retain_collection_types=retain,
+ )
+ if has(j.__class__)
+ else j
+ for j in v
+ ]
+ )
+ )
+ elif isinstance(v, dict):
+ df = v.__class__ if retain is True else dict
+ rv.append(
+ df(
+ (
+ astuple(
+ kk,
+ tuple_factory=tuple_factory,
+ retain_collection_types=retain,
+ )
+ if has(kk.__class__)
+ else kk,
+ astuple(
+ vv,
+ tuple_factory=tuple_factory,
+ retain_collection_types=retain,
+ )
+ if has(vv.__class__)
+ else vv,
+ )
+ for kk, vv in v.items()
+ )
+ )
+ else:
+ rv.append(v)
+ else:
+ rv.append(v)
+
+ return rv if tuple_factory is list else tuple_factory(rv)
+
+
+def has(cls):
+ """
+ Check whether *cls* is a class with *attrs* attributes.
+
+ :param type cls: Class to introspect.
+ :raise TypeError: If *cls* is not a class.
+
+ :rtype: bool
+ """
+ attrs = getattr(cls, "__attrs_attrs__", None)
+ if attrs is not None:
+ return True
+
+ # No attrs, maybe it's a specialized generic (A[str])?
+ generic_base = get_generic_base(cls)
+ if generic_base is not None:
+ generic_attrs = getattr(generic_base, "__attrs_attrs__", None)
+ if generic_attrs is not None:
+ # Stick it on here for speed next time.
+ cls.__attrs_attrs__ = generic_attrs
+ return generic_attrs is not None
+ return False
+
+
+def assoc(inst, **changes):
+ """
+ Copy *inst* and apply *changes*.
+
+ This is different from `evolve` that applies the changes to the arguments
+ that create the new instance.
+
+ `evolve`'s behavior is preferable, but there are `edge cases`_ where it
+ doesn't work. Therefore `assoc` is deprecated, but will not be removed.
+
+ .. _`edge cases`: https://github.com/python-attrs/attrs/issues/251
+
+ :param inst: Instance of a class with *attrs* attributes.
+ :param changes: Keyword changes in the new copy.
+
+ :return: A copy of inst with *changes* incorporated.
+
+ :raise attrs.exceptions.AttrsAttributeNotFoundError: If *attr_name*
+ couldn't be found on *cls*.
+ :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs*
+ class.
+
+ .. deprecated:: 17.1.0
+ Use `attrs.evolve` instead if you can.
+ This function will not be removed du to the slightly different approach
+ compared to `attrs.evolve`.
+ """
+ new = copy.copy(inst)
+ attrs = fields(inst.__class__)
+ for k, v in changes.items():
+ a = getattr(attrs, k, NOTHING)
+ if a is NOTHING:
+ raise AttrsAttributeNotFoundError(
+ f"{k} is not an attrs attribute on {new.__class__}."
+ )
+ _obj_setattr(new, k, v)
+ return new
+
+
+def evolve(*args, **changes):
+ """
+ Create a new instance, based on the first positional argument with
+ *changes* applied.
+
+ :param inst: Instance of a class with *attrs* attributes.
+ :param changes: Keyword changes in the new copy.
+
+ :return: A copy of inst with *changes* incorporated.
+
+ :raise TypeError: If *attr_name* couldn't be found in the class
+ ``__init__``.
+ :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs*
+ class.
+
+ .. versionadded:: 17.1.0
+ .. deprecated:: 23.1.0
+ It is now deprecated to pass the instance using the keyword argument
+ *inst*. It will raise a warning until at least April 2024, after which
+ it will become an error. Always pass the instance as a positional
+ argument.
+ """
+ # Try to get instance by positional argument first.
+ # Use changes otherwise and warn it'll break.
+ if args:
+ try:
+ (inst,) = args
+ except ValueError:
+ raise TypeError(
+ f"evolve() takes 1 positional argument, but {len(args)} "
+ "were given"
+ ) from None
+ else:
+ try:
+ inst = changes.pop("inst")
+ except KeyError:
+ raise TypeError(
+ "evolve() missing 1 required positional argument: 'inst'"
+ ) from None
+
+ import warnings
+
+ warnings.warn(
+ "Passing the instance per keyword argument is deprecated and "
+ "will stop working in, or after, April 2024.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ cls = inst.__class__
+ attrs = fields(cls)
+ for a in attrs:
+ if not a.init:
+ continue
+ attr_name = a.name # To deal with private attributes.
+ init_name = a.alias
+ if init_name not in changes:
+ changes[init_name] = getattr(inst, attr_name)
+
+ return cls(**changes)
+
+
+def resolve_types(
+ cls, globalns=None, localns=None, attribs=None, include_extras=True
+):
+ """
+ Resolve any strings and forward annotations in type annotations.
+
+ This is only required if you need concrete types in `Attribute`'s *type*
+ field. In other words, you don't need to resolve your types if you only
+ use them for static type checking.
+
+ With no arguments, names will be looked up in the module in which the class
+ was created. If this is not what you want, e.g. if the name only exists
+ inside a method, you may pass *globalns* or *localns* to specify other
+ dictionaries in which to look up these names. See the docs of
+ `typing.get_type_hints` for more details.
+
+ :param type cls: Class to resolve.
+ :param Optional[dict] globalns: Dictionary containing global variables.
+ :param Optional[dict] localns: Dictionary containing local variables.
+ :param Optional[list] attribs: List of attribs for the given class.
+ This is necessary when calling from inside a ``field_transformer``
+ since *cls* is not an *attrs* class yet.
+ :param bool include_extras: Resolve more accurately, if possible.
+ Pass ``include_extras`` to ``typing.get_hints``, if supported by the
+ typing module. On supported Python versions (3.9+), this resolves the
+ types more accurately.
+
+ :raise TypeError: If *cls* is not a class.
+ :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs*
+ class and you didn't pass any attribs.
+ :raise NameError: If types cannot be resolved because of missing variables.
+
+ :returns: *cls* so you can use this function also as a class decorator.
+ Please note that you have to apply it **after** `attrs.define`. That
+ means the decorator has to come in the line **before** `attrs.define`.
+
+ .. versionadded:: 20.1.0
+ .. versionadded:: 21.1.0 *attribs*
+ .. versionadded:: 23.1.0 *include_extras*
+
+ """
+ # Since calling get_type_hints is expensive we cache whether we've
+ # done it already.
+ if getattr(cls, "__attrs_types_resolved__", None) != cls:
+ import typing
+
+ kwargs = {"globalns": globalns, "localns": localns}
+
+ if PY_3_9_PLUS:
+ kwargs["include_extras"] = include_extras
+
+ hints = typing.get_type_hints(cls, **kwargs)
+ for field in fields(cls) if attribs is None else attribs:
+ if field.name in hints:
+ # Since fields have been frozen we must work around it.
+ _obj_setattr(field, "type", hints[field.name])
+ # We store the class we resolved so that subclasses know they haven't
+ # been resolved.
+ cls.__attrs_types_resolved__ = cls
+
+ # Return the class so you can use it as a decorator too.
+ return cls
diff --git a/third_party/python/attrs/attr/_make.py b/third_party/python/attrs/attr/_make.py
new file mode 100644
index 0000000000..d72f738eec
--- /dev/null
+++ b/third_party/python/attrs/attr/_make.py
@@ -0,0 +1,2987 @@
+# SPDX-License-Identifier: MIT
+
+import copy
+import enum
+import linecache
+import sys
+import types
+import typing
+
+from operator import itemgetter
+
+# We need to import _compat itself in addition to the _compat members to avoid
+# having the thread-local in the globals here.
+from . import _compat, _config, setters
+from ._compat import (
+ PY310,
+ _AnnotationExtractor,
+ get_generic_base,
+ set_closure_cell,
+)
+from .exceptions import (
+ DefaultAlreadySetError,
+ FrozenInstanceError,
+ NotAnAttrsClassError,
+ UnannotatedAttributeError,
+)
+
+
+# This is used at least twice, so cache it here.
+_obj_setattr = object.__setattr__
+_init_converter_pat = "__attr_converter_%s"
+_init_factory_pat = "__attr_factory_%s"
+_classvar_prefixes = (
+ "typing.ClassVar",
+ "t.ClassVar",
+ "ClassVar",
+ "typing_extensions.ClassVar",
+)
+# we don't use a double-underscore prefix because that triggers
+# name mangling when trying to create a slot for the field
+# (when slots=True)
+_hash_cache_field = "_attrs_cached_hash"
+
+_empty_metadata_singleton = types.MappingProxyType({})
+
+# Unique object for unequivocal getattr() defaults.
+_sentinel = object()
+
+_ng_default_on_setattr = setters.pipe(setters.convert, setters.validate)
+
+
+class _Nothing(enum.Enum):
+ """
+ Sentinel to indicate the lack of a value when ``None`` is ambiguous.
+
+ If extending attrs, you can use ``typing.Literal[NOTHING]`` to show
+ that a value may be ``NOTHING``.
+
+ .. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False.
+ .. versionchanged:: 22.2.0 ``NOTHING`` is now an ``enum.Enum`` variant.
+ """
+
+ NOTHING = enum.auto()
+
+ def __repr__(self):
+ return "NOTHING"
+
+ def __bool__(self):
+ return False
+
+
+NOTHING = _Nothing.NOTHING
+"""
+Sentinel to indicate the lack of a value when ``None`` is ambiguous.
+"""
+
+
+class _CacheHashWrapper(int):
+ """
+ An integer subclass that pickles / copies as None
+
+ This is used for non-slots classes with ``cache_hash=True``, to avoid
+ serializing a potentially (even likely) invalid hash value. Since ``None``
+ is the default value for uncalculated hashes, whenever this is copied,
+ the copy's value for the hash should automatically reset.
+
+ See GH #613 for more details.
+ """
+
+ def __reduce__(self, _none_constructor=type(None), _args=()):
+ return _none_constructor, _args
+
+
+def attrib(
+ default=NOTHING,
+ validator=None,
+ repr=True,
+ cmp=None,
+ hash=None,
+ init=True,
+ metadata=None,
+ type=None,
+ converter=None,
+ factory=None,
+ kw_only=False,
+ eq=None,
+ order=None,
+ on_setattr=None,
+ alias=None,
+):
+ """
+ Create a new attribute on a class.
+
+ .. warning::
+
+ Does *not* do anything unless the class is also decorated with
+ `attr.s` / `attrs.define` / et cetera!
+
+ Please consider using `attrs.field` in new code (``attr.ib`` will *never*
+ go away, though).
+
+ :param default: A value that is used if an *attrs*-generated ``__init__``
+ is used and no value is passed while instantiating or the attribute is
+ excluded using ``init=False``.
+
+ If the value is an instance of `attrs.Factory`, its callable will be
+ used to construct a new value (useful for mutable data types like lists
+ or dicts).
+
+ If a default is not set (or set manually to `attrs.NOTHING`), a value
+ *must* be supplied when instantiating; otherwise a `TypeError`
+ will be raised.
+
+ The default can also be set using decorator notation as shown below.
+
+ :type default: Any value
+
+ :param callable factory: Syntactic sugar for
+ ``default=attr.Factory(factory)``.
+
+ :param validator: `callable` that is called by *attrs*-generated
+ ``__init__`` methods after the instance has been initialized. They
+ receive the initialized instance, the :func:`~attrs.Attribute`, and the
+ passed value.
+
+ The return value is *not* inspected so the validator has to throw an
+ exception itself.
+
+ If a `list` is passed, its items are treated as validators and must
+ all pass.
+
+ Validators can be globally disabled and re-enabled using
+ `attrs.validators.get_disabled` / `attrs.validators.set_disabled`.
+
+ The validator can also be set using decorator notation as shown below.
+
+ :type validator: `callable` or a `list` of `callable`\\ s.
+
+ :param repr: Include this attribute in the generated ``__repr__``
+ method. If ``True``, include the attribute; if ``False``, omit it. By
+ default, the built-in ``repr()`` function is used. To override how the
+ attribute value is formatted, pass a ``callable`` that takes a single
+ value and returns a string. Note that the resulting string is used
+ as-is, i.e. it will be used directly *instead* of calling ``repr()``
+ (the default).
+ :type repr: a `bool` or a `callable` to use a custom function.
+
+ :param eq: If ``True`` (default), include this attribute in the
+ generated ``__eq__`` and ``__ne__`` methods that check two instances
+ for equality. To override how the attribute value is compared,
+ pass a ``callable`` that takes a single value and returns the value
+ to be compared.
+ :type eq: a `bool` or a `callable`.
+
+ :param order: If ``True`` (default), include this attributes in the
+ generated ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods.
+ To override how the attribute value is ordered,
+ pass a ``callable`` that takes a single value and returns the value
+ to be ordered.
+ :type order: a `bool` or a `callable`.
+
+ :param cmp: Setting *cmp* is equivalent to setting *eq* and *order* to the
+ same value. Must not be mixed with *eq* or *order*.
+ :type cmp: a `bool` or a `callable`.
+
+ :param Optional[bool] hash: Include this attribute in the generated
+ ``__hash__`` method. If ``None`` (default), mirror *eq*'s value. This
+ is the correct behavior according the Python spec. Setting this value
+ to anything else than ``None`` is *discouraged*.
+ :param bool init: Include this attribute in the generated ``__init__``
+ method. It is possible to set this to ``False`` and set a default
+ value. In that case this attributed is unconditionally initialized
+ with the specified default value or factory.
+ :param callable converter: `callable` that is called by
+ *attrs*-generated ``__init__`` methods to convert attribute's value
+ to the desired format. It is given the passed-in value, and the
+ returned value will be used as the new value of the attribute. The
+ value is converted before being passed to the validator, if any.
+ :param metadata: An arbitrary mapping, to be used by third-party
+ components. See `extending-metadata`.
+
+ :param type: The type of the attribute. Nowadays, the preferred method to
+ specify the type is using a variable annotation (see :pep:`526`).
+ This argument is provided for backward compatibility.
+ Regardless of the approach used, the type will be stored on
+ ``Attribute.type``.
+
+ Please note that *attrs* doesn't do anything with this metadata by
+ itself. You can use it as part of your own code or for
+ `static type checking <types>`.
+ :param kw_only: Make this attribute keyword-only in the generated
+ ``__init__`` (if ``init`` is ``False``, this parameter is ignored).
+ :param on_setattr: Allows to overwrite the *on_setattr* setting from
+ `attr.s`. If left `None`, the *on_setattr* value from `attr.s` is used.
+ Set to `attrs.setters.NO_OP` to run **no** `setattr` hooks for this
+ attribute -- regardless of the setting in `attr.s`.
+ :type on_setattr: `callable`, or a list of callables, or `None`, or
+ `attrs.setters.NO_OP`
+ :param Optional[str] alias: Override this attribute's parameter name in the
+ generated ``__init__`` method. If left `None`, default to ``name``
+ stripped of leading underscores. See `private-attributes`.
+
+ .. versionadded:: 15.2.0 *convert*
+ .. versionadded:: 16.3.0 *metadata*
+ .. versionchanged:: 17.1.0 *validator* can be a ``list`` now.
+ .. versionchanged:: 17.1.0
+ *hash* is ``None`` and therefore mirrors *eq* by default.
+ .. versionadded:: 17.3.0 *type*
+ .. deprecated:: 17.4.0 *convert*
+ .. versionadded:: 17.4.0 *converter* as a replacement for the deprecated
+ *convert* to achieve consistency with other noun-based arguments.
+ .. versionadded:: 18.1.0
+ ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``.
+ .. versionadded:: 18.2.0 *kw_only*
+ .. versionchanged:: 19.2.0 *convert* keyword argument removed.
+ .. versionchanged:: 19.2.0 *repr* also accepts a custom callable.
+ .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01.
+ .. versionadded:: 19.2.0 *eq* and *order*
+ .. versionadded:: 20.1.0 *on_setattr*
+ .. versionchanged:: 20.3.0 *kw_only* backported to Python 2
+ .. versionchanged:: 21.1.0
+ *eq*, *order*, and *cmp* also accept a custom callable
+ .. versionchanged:: 21.1.0 *cmp* undeprecated
+ .. versionadded:: 22.2.0 *alias*
+ """
+ eq, eq_key, order, order_key = _determine_attrib_eq_order(
+ cmp, eq, order, True
+ )
+
+ if hash is not None and hash is not True and hash is not False:
+ raise TypeError(
+ "Invalid value for hash. Must be True, False, or None."
+ )
+
+ if factory is not None:
+ if default is not NOTHING:
+ raise ValueError(
+ "The `default` and `factory` arguments are mutually "
+ "exclusive."
+ )
+ if not callable(factory):
+ raise ValueError("The `factory` argument must be a callable.")
+ default = Factory(factory)
+
+ if metadata is None:
+ metadata = {}
+
+ # Apply syntactic sugar by auto-wrapping.
+ if isinstance(on_setattr, (list, tuple)):
+ on_setattr = setters.pipe(*on_setattr)
+
+ if validator and isinstance(validator, (list, tuple)):
+ validator = and_(*validator)
+
+ if converter and isinstance(converter, (list, tuple)):
+ converter = pipe(*converter)
+
+ return _CountingAttr(
+ default=default,
+ validator=validator,
+ repr=repr,
+ cmp=None,
+ hash=hash,
+ init=init,
+ converter=converter,
+ metadata=metadata,
+ type=type,
+ kw_only=kw_only,
+ eq=eq,
+ eq_key=eq_key,
+ order=order,
+ order_key=order_key,
+ on_setattr=on_setattr,
+ alias=alias,
+ )
+
+
+def _compile_and_eval(script, globs, locs=None, filename=""):
+ """
+ "Exec" the script with the given global (globs) and local (locs) variables.
+ """
+ bytecode = compile(script, filename, "exec")
+ eval(bytecode, globs, locs)
+
+
+def _make_method(name, script, filename, globs):
+ """
+ Create the method with the script given and return the method object.
+ """
+ locs = {}
+
+ # In order of debuggers like PDB being able to step through the code,
+ # we add a fake linecache entry.
+ count = 1
+ base_filename = filename
+ while True:
+ linecache_tuple = (
+ len(script),
+ None,
+ script.splitlines(True),
+ filename,
+ )
+ old_val = linecache.cache.setdefault(filename, linecache_tuple)
+ if old_val == linecache_tuple:
+ break
+ else:
+ filename = f"{base_filename[:-1]}-{count}>"
+ count += 1
+
+ _compile_and_eval(script, globs, locs, filename)
+
+ return locs[name]
+
+
+def _make_attr_tuple_class(cls_name, attr_names):
+ """
+ Create a tuple subclass to hold `Attribute`s for an `attrs` class.
+
+ The subclass is a bare tuple with properties for names.
+
+ class MyClassAttributes(tuple):
+ __slots__ = ()
+ x = property(itemgetter(0))
+ """
+ attr_class_name = f"{cls_name}Attributes"
+ attr_class_template = [
+ f"class {attr_class_name}(tuple):",
+ " __slots__ = ()",
+ ]
+ if attr_names:
+ for i, attr_name in enumerate(attr_names):
+ attr_class_template.append(
+ f" {attr_name} = _attrs_property(_attrs_itemgetter({i}))"
+ )
+ else:
+ attr_class_template.append(" pass")
+ globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property}
+ _compile_and_eval("\n".join(attr_class_template), globs)
+ return globs[attr_class_name]
+
+
+# Tuple class for extracted attributes from a class definition.
+# `base_attrs` is a subset of `attrs`.
+_Attributes = _make_attr_tuple_class(
+ "_Attributes",
+ [
+ # all attributes to build dunder methods for
+ "attrs",
+ # attributes that have been inherited
+ "base_attrs",
+ # map inherited attributes to their originating classes
+ "base_attrs_map",
+ ],
+)
+
+
+def _is_class_var(annot):
+ """
+ Check whether *annot* is a typing.ClassVar.
+
+ The string comparison hack is used to avoid evaluating all string
+ annotations which would put attrs-based classes at a performance
+ disadvantage compared to plain old classes.
+ """
+ annot = str(annot)
+
+ # Annotation can be quoted.
+ if annot.startswith(("'", '"')) and annot.endswith(("'", '"')):
+ annot = annot[1:-1]
+
+ return annot.startswith(_classvar_prefixes)
+
+
+def _has_own_attribute(cls, attrib_name):
+ """
+ Check whether *cls* defines *attrib_name* (and doesn't just inherit it).
+ """
+ attr = getattr(cls, attrib_name, _sentinel)
+ if attr is _sentinel:
+ return False
+
+ for base_cls in cls.__mro__[1:]:
+ a = getattr(base_cls, attrib_name, None)
+ if attr is a:
+ return False
+
+ return True
+
+
+def _get_annotations(cls):
+ """
+ Get annotations for *cls*.
+ """
+ if _has_own_attribute(cls, "__annotations__"):
+ return cls.__annotations__
+
+ return {}
+
+
+def _collect_base_attrs(cls, taken_attr_names):
+ """
+ Collect attr.ibs from base classes of *cls*, except *taken_attr_names*.
+ """
+ base_attrs = []
+ base_attr_map = {} # A dictionary of base attrs to their classes.
+
+ # Traverse the MRO and collect attributes.
+ for base_cls in reversed(cls.__mro__[1:-1]):
+ for a in getattr(base_cls, "__attrs_attrs__", []):
+ if a.inherited or a.name in taken_attr_names:
+ continue
+
+ a = a.evolve(inherited=True)
+ base_attrs.append(a)
+ base_attr_map[a.name] = base_cls
+
+ # For each name, only keep the freshest definition i.e. the furthest at the
+ # back. base_attr_map is fine because it gets overwritten with every new
+ # instance.
+ filtered = []
+ seen = set()
+ for a in reversed(base_attrs):
+ if a.name in seen:
+ continue
+ filtered.insert(0, a)
+ seen.add(a.name)
+
+ return filtered, base_attr_map
+
+
+def _collect_base_attrs_broken(cls, taken_attr_names):
+ """
+ Collect attr.ibs from base classes of *cls*, except *taken_attr_names*.
+
+ N.B. *taken_attr_names* will be mutated.
+
+ Adhere to the old incorrect behavior.
+
+ Notably it collects from the front and considers inherited attributes which
+ leads to the buggy behavior reported in #428.
+ """
+ base_attrs = []
+ base_attr_map = {} # A dictionary of base attrs to their classes.
+
+ # Traverse the MRO and collect attributes.
+ for base_cls in cls.__mro__[1:-1]:
+ for a in getattr(base_cls, "__attrs_attrs__", []):
+ if a.name in taken_attr_names:
+ continue
+
+ a = a.evolve(inherited=True)
+ taken_attr_names.add(a.name)
+ base_attrs.append(a)
+ base_attr_map[a.name] = base_cls
+
+ return base_attrs, base_attr_map
+
+
+def _transform_attrs(
+ cls, these, auto_attribs, kw_only, collect_by_mro, field_transformer
+):
+ """
+ Transform all `_CountingAttr`s on a class into `Attribute`s.
+
+ If *these* is passed, use that and don't look for them on the class.
+
+ *collect_by_mro* is True, collect them in the correct MRO order, otherwise
+ use the old -- incorrect -- order. See #428.
+
+ Return an `_Attributes`.
+ """
+ cd = cls.__dict__
+ anns = _get_annotations(cls)
+
+ if these is not None:
+ ca_list = [(name, ca) for name, ca in these.items()]
+ elif auto_attribs is True:
+ ca_names = {
+ name
+ for name, attr in cd.items()
+ if isinstance(attr, _CountingAttr)
+ }
+ ca_list = []
+ annot_names = set()
+ for attr_name, type in anns.items():
+ if _is_class_var(type):
+ continue
+ annot_names.add(attr_name)
+ a = cd.get(attr_name, NOTHING)
+
+ if not isinstance(a, _CountingAttr):
+ if a is NOTHING:
+ a = attrib()
+ else:
+ a = attrib(default=a)
+ ca_list.append((attr_name, a))
+
+ unannotated = ca_names - annot_names
+ if len(unannotated) > 0:
+ raise UnannotatedAttributeError(
+ "The following `attr.ib`s lack a type annotation: "
+ + ", ".join(
+ sorted(unannotated, key=lambda n: cd.get(n).counter)
+ )
+ + "."
+ )
+ else:
+ ca_list = sorted(
+ (
+ (name, attr)
+ for name, attr in cd.items()
+ if isinstance(attr, _CountingAttr)
+ ),
+ key=lambda e: e[1].counter,
+ )
+
+ own_attrs = [
+ Attribute.from_counting_attr(
+ name=attr_name, ca=ca, type=anns.get(attr_name)
+ )
+ for attr_name, ca in ca_list
+ ]
+
+ if collect_by_mro:
+ base_attrs, base_attr_map = _collect_base_attrs(
+ cls, {a.name for a in own_attrs}
+ )
+ else:
+ base_attrs, base_attr_map = _collect_base_attrs_broken(
+ cls, {a.name for a in own_attrs}
+ )
+
+ if kw_only:
+ own_attrs = [a.evolve(kw_only=True) for a in own_attrs]
+ base_attrs = [a.evolve(kw_only=True) for a in base_attrs]
+
+ attrs = base_attrs + own_attrs
+
+ # Mandatory vs non-mandatory attr order only matters when they are part of
+ # the __init__ signature and when they aren't kw_only (which are moved to
+ # the end and can be mandatory or non-mandatory in any order, as they will
+ # be specified as keyword args anyway). Check the order of those attrs:
+ had_default = False
+ for a in (a for a in attrs if a.init is not False and a.kw_only is False):
+ if had_default is True and a.default is NOTHING:
+ raise ValueError(
+ "No mandatory attributes allowed after an attribute with a "
+ f"default value or factory. Attribute in question: {a!r}"
+ )
+
+ if had_default is False and a.default is not NOTHING:
+ had_default = True
+
+ if field_transformer is not None:
+ attrs = field_transformer(cls, attrs)
+
+ # Resolve default field alias after executing field_transformer.
+ # This allows field_transformer to differentiate between explicit vs
+ # default aliases and supply their own defaults.
+ attrs = [
+ a.evolve(alias=_default_init_alias_for(a.name)) if not a.alias else a
+ for a in attrs
+ ]
+
+ # Create AttrsClass *after* applying the field_transformer since it may
+ # add or remove attributes!
+ attr_names = [a.name for a in attrs]
+ AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names)
+
+ return _Attributes((AttrsClass(attrs), base_attrs, base_attr_map))
+
+
+def _frozen_setattrs(self, name, value):
+ """
+ Attached to frozen classes as __setattr__.
+ """
+ if isinstance(self, BaseException) and name in (
+ "__cause__",
+ "__context__",
+ "__traceback__",
+ ):
+ BaseException.__setattr__(self, name, value)
+ return
+
+ raise FrozenInstanceError()
+
+
+def _frozen_delattrs(self, name):
+ """
+ Attached to frozen classes as __delattr__.
+ """
+ raise FrozenInstanceError()
+
+
+class _ClassBuilder:
+ """
+ Iteratively build *one* class.
+ """
+
+ __slots__ = (
+ "_attr_names",
+ "_attrs",
+ "_base_attr_map",
+ "_base_names",
+ "_cache_hash",
+ "_cls",
+ "_cls_dict",
+ "_delete_attribs",
+ "_frozen",
+ "_has_pre_init",
+ "_has_post_init",
+ "_is_exc",
+ "_on_setattr",
+ "_slots",
+ "_weakref_slot",
+ "_wrote_own_setattr",
+ "_has_custom_setattr",
+ )
+
+ def __init__(
+ self,
+ cls,
+ these,
+ slots,
+ frozen,
+ weakref_slot,
+ getstate_setstate,
+ auto_attribs,
+ kw_only,
+ cache_hash,
+ is_exc,
+ collect_by_mro,
+ on_setattr,
+ has_custom_setattr,
+ field_transformer,
+ ):
+ attrs, base_attrs, base_map = _transform_attrs(
+ cls,
+ these,
+ auto_attribs,
+ kw_only,
+ collect_by_mro,
+ field_transformer,
+ )
+
+ self._cls = cls
+ self._cls_dict = dict(cls.__dict__) if slots else {}
+ self._attrs = attrs
+ self._base_names = {a.name for a in base_attrs}
+ self._base_attr_map = base_map
+ self._attr_names = tuple(a.name for a in attrs)
+ self._slots = slots
+ self._frozen = frozen
+ self._weakref_slot = weakref_slot
+ self._cache_hash = cache_hash
+ self._has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False))
+ self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False))
+ self._delete_attribs = not bool(these)
+ self._is_exc = is_exc
+ self._on_setattr = on_setattr
+
+ self._has_custom_setattr = has_custom_setattr
+ self._wrote_own_setattr = False
+
+ self._cls_dict["__attrs_attrs__"] = self._attrs
+
+ if frozen:
+ self._cls_dict["__setattr__"] = _frozen_setattrs
+ self._cls_dict["__delattr__"] = _frozen_delattrs
+
+ self._wrote_own_setattr = True
+ elif on_setattr in (
+ _ng_default_on_setattr,
+ setters.validate,
+ setters.convert,
+ ):
+ has_validator = has_converter = False
+ for a in attrs:
+ if a.validator is not None:
+ has_validator = True
+ if a.converter is not None:
+ has_converter = True
+
+ if has_validator and has_converter:
+ break
+ if (
+ (
+ on_setattr == _ng_default_on_setattr
+ and not (has_validator or has_converter)
+ )
+ or (on_setattr == setters.validate and not has_validator)
+ or (on_setattr == setters.convert and not has_converter)
+ ):
+ # If class-level on_setattr is set to convert + validate, but
+ # there's no field to convert or validate, pretend like there's
+ # no on_setattr.
+ self._on_setattr = None
+
+ if getstate_setstate:
+ (
+ self._cls_dict["__getstate__"],
+ self._cls_dict["__setstate__"],
+ ) = self._make_getstate_setstate()
+
+ def __repr__(self):
+ return f"<_ClassBuilder(cls={self._cls.__name__})>"
+
+ if PY310:
+ import abc
+
+ def build_class(self):
+ """
+ Finalize class based on the accumulated configuration.
+
+ Builder cannot be used after calling this method.
+ """
+ if self._slots is True:
+ return self._create_slots_class()
+
+ return self.abc.update_abstractmethods(
+ self._patch_original_class()
+ )
+
+ else:
+
+ def build_class(self):
+ """
+ Finalize class based on the accumulated configuration.
+
+ Builder cannot be used after calling this method.
+ """
+ if self._slots is True:
+ return self._create_slots_class()
+
+ return self._patch_original_class()
+
+ def _patch_original_class(self):
+ """
+ Apply accumulated methods and return the class.
+ """
+ cls = self._cls
+ base_names = self._base_names
+
+ # Clean class of attribute definitions (`attr.ib()`s).
+ if self._delete_attribs:
+ for name in self._attr_names:
+ if (
+ name not in base_names
+ and getattr(cls, name, _sentinel) is not _sentinel
+ ):
+ try:
+ delattr(cls, name)
+ except AttributeError:
+ # This can happen if a base class defines a class
+ # variable and we want to set an attribute with the
+ # same name by using only a type annotation.
+ pass
+
+ # Attach our dunder methods.
+ for name, value in self._cls_dict.items():
+ setattr(cls, name, value)
+
+ # If we've inherited an attrs __setattr__ and don't write our own,
+ # reset it to object's.
+ if not self._wrote_own_setattr and getattr(
+ cls, "__attrs_own_setattr__", False
+ ):
+ cls.__attrs_own_setattr__ = False
+
+ if not self._has_custom_setattr:
+ cls.__setattr__ = _obj_setattr
+
+ return cls
+
+ def _create_slots_class(self):
+ """
+ Build and return a new class with a `__slots__` attribute.
+ """
+ cd = {
+ k: v
+ for k, v in self._cls_dict.items()
+ if k not in tuple(self._attr_names) + ("__dict__", "__weakref__")
+ }
+
+ # If our class doesn't have its own implementation of __setattr__
+ # (either from the user or by us), check the bases, if one of them has
+ # an attrs-made __setattr__, that needs to be reset. We don't walk the
+ # MRO because we only care about our immediate base classes.
+ # XXX: This can be confused by subclassing a slotted attrs class with
+ # XXX: a non-attrs class and subclass the resulting class with an attrs
+ # XXX: class. See `test_slotted_confused` for details. For now that's
+ # XXX: OK with us.
+ if not self._wrote_own_setattr:
+ cd["__attrs_own_setattr__"] = False
+
+ if not self._has_custom_setattr:
+ for base_cls in self._cls.__bases__:
+ if base_cls.__dict__.get("__attrs_own_setattr__", False):
+ cd["__setattr__"] = _obj_setattr
+ break
+
+ # Traverse the MRO to collect existing slots
+ # and check for an existing __weakref__.
+ existing_slots = dict()
+ weakref_inherited = False
+ for base_cls in self._cls.__mro__[1:-1]:
+ if base_cls.__dict__.get("__weakref__", None) is not None:
+ weakref_inherited = True
+ existing_slots.update(
+ {
+ name: getattr(base_cls, name)
+ for name in getattr(base_cls, "__slots__", [])
+ }
+ )
+
+ base_names = set(self._base_names)
+
+ names = self._attr_names
+ if (
+ self._weakref_slot
+ and "__weakref__" not in getattr(self._cls, "__slots__", ())
+ and "__weakref__" not in names
+ and not weakref_inherited
+ ):
+ names += ("__weakref__",)
+
+ # We only add the names of attributes that aren't inherited.
+ # Setting __slots__ to inherited attributes wastes memory.
+ slot_names = [name for name in names if name not in base_names]
+ # There are slots for attributes from current class
+ # that are defined in parent classes.
+ # As their descriptors may be overridden by a child class,
+ # we collect them here and update the class dict
+ reused_slots = {
+ slot: slot_descriptor
+ for slot, slot_descriptor in existing_slots.items()
+ if slot in slot_names
+ }
+ slot_names = [name for name in slot_names if name not in reused_slots]
+ cd.update(reused_slots)
+ if self._cache_hash:
+ slot_names.append(_hash_cache_field)
+ cd["__slots__"] = tuple(slot_names)
+
+ cd["__qualname__"] = self._cls.__qualname__
+
+ # Create new class based on old class and our methods.
+ cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd)
+
+ # The following is a fix for
+ # <https://github.com/python-attrs/attrs/issues/102>.
+ # If a method mentions `__class__` or uses the no-arg super(), the
+ # compiler will bake a reference to the class in the method itself
+ # as `method.__closure__`. Since we replace the class with a
+ # clone, we rewrite these references so it keeps working.
+ for item in cls.__dict__.values():
+ if isinstance(item, (classmethod, staticmethod)):
+ # Class- and staticmethods hide their functions inside.
+ # These might need to be rewritten as well.
+ closure_cells = getattr(item.__func__, "__closure__", None)
+ elif isinstance(item, property):
+ # Workaround for property `super()` shortcut (PY3-only).
+ # There is no universal way for other descriptors.
+ closure_cells = getattr(item.fget, "__closure__", None)
+ else:
+ closure_cells = getattr(item, "__closure__", None)
+
+ if not closure_cells: # Catch None or the empty list.
+ continue
+ for cell in closure_cells:
+ try:
+ match = cell.cell_contents is self._cls
+ except ValueError: # ValueError: Cell is empty
+ pass
+ else:
+ if match:
+ set_closure_cell(cell, cls)
+
+ return cls
+
+ def add_repr(self, ns):
+ self._cls_dict["__repr__"] = self._add_method_dunders(
+ _make_repr(self._attrs, ns, self._cls)
+ )
+ return self
+
+ def add_str(self):
+ repr = self._cls_dict.get("__repr__")
+ if repr is None:
+ raise ValueError(
+ "__str__ can only be generated if a __repr__ exists."
+ )
+
+ def __str__(self):
+ return self.__repr__()
+
+ self._cls_dict["__str__"] = self._add_method_dunders(__str__)
+ return self
+
+ def _make_getstate_setstate(self):
+ """
+ Create custom __setstate__ and __getstate__ methods.
+ """
+ # __weakref__ is not writable.
+ state_attr_names = tuple(
+ an for an in self._attr_names if an != "__weakref__"
+ )
+
+ def slots_getstate(self):
+ """
+ Automatically created by attrs.
+ """
+ return {name: getattr(self, name) for name in state_attr_names}
+
+ hash_caching_enabled = self._cache_hash
+
+ def slots_setstate(self, state):
+ """
+ Automatically created by attrs.
+ """
+ __bound_setattr = _obj_setattr.__get__(self)
+ if isinstance(state, tuple):
+ # Backward compatibility with attrs instances pickled with
+ # attrs versions before v22.2.0 which stored tuples.
+ for name, value in zip(state_attr_names, state):
+ __bound_setattr(name, value)
+ else:
+ for name in state_attr_names:
+ if name in state:
+ __bound_setattr(name, state[name])
+
+ # The hash code cache is not included when the object is
+ # serialized, but it still needs to be initialized to None to
+ # indicate that the first call to __hash__ should be a cache
+ # miss.
+ if hash_caching_enabled:
+ __bound_setattr(_hash_cache_field, None)
+
+ return slots_getstate, slots_setstate
+
+ def make_unhashable(self):
+ self._cls_dict["__hash__"] = None
+ return self
+
+ def add_hash(self):
+ self._cls_dict["__hash__"] = self._add_method_dunders(
+ _make_hash(
+ self._cls,
+ self._attrs,
+ frozen=self._frozen,
+ cache_hash=self._cache_hash,
+ )
+ )
+
+ return self
+
+ def add_init(self):
+ self._cls_dict["__init__"] = self._add_method_dunders(
+ _make_init(
+ self._cls,
+ self._attrs,
+ self._has_pre_init,
+ self._has_post_init,
+ self._frozen,
+ self._slots,
+ self._cache_hash,
+ self._base_attr_map,
+ self._is_exc,
+ self._on_setattr,
+ attrs_init=False,
+ )
+ )
+
+ return self
+
+ def add_match_args(self):
+ self._cls_dict["__match_args__"] = tuple(
+ field.name
+ for field in self._attrs
+ if field.init and not field.kw_only
+ )
+
+ def add_attrs_init(self):
+ self._cls_dict["__attrs_init__"] = self._add_method_dunders(
+ _make_init(
+ self._cls,
+ self._attrs,
+ self._has_pre_init,
+ self._has_post_init,
+ self._frozen,
+ self._slots,
+ self._cache_hash,
+ self._base_attr_map,
+ self._is_exc,
+ self._on_setattr,
+ attrs_init=True,
+ )
+ )
+
+ return self
+
+ def add_eq(self):
+ cd = self._cls_dict
+
+ cd["__eq__"] = self._add_method_dunders(
+ _make_eq(self._cls, self._attrs)
+ )
+ cd["__ne__"] = self._add_method_dunders(_make_ne())
+
+ return self
+
+ def add_order(self):
+ cd = self._cls_dict
+
+ cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = (
+ self._add_method_dunders(meth)
+ for meth in _make_order(self._cls, self._attrs)
+ )
+
+ return self
+
+ def add_setattr(self):
+ if self._frozen:
+ return self
+
+ sa_attrs = {}
+ for a in self._attrs:
+ on_setattr = a.on_setattr or self._on_setattr
+ if on_setattr and on_setattr is not setters.NO_OP:
+ sa_attrs[a.name] = a, on_setattr
+
+ if not sa_attrs:
+ return self
+
+ if self._has_custom_setattr:
+ # We need to write a __setattr__ but there already is one!
+ raise ValueError(
+ "Can't combine custom __setattr__ with on_setattr hooks."
+ )
+
+ # docstring comes from _add_method_dunders
+ def __setattr__(self, name, val):
+ try:
+ a, hook = sa_attrs[name]
+ except KeyError:
+ nval = val
+ else:
+ nval = hook(self, a, val)
+
+ _obj_setattr(self, name, nval)
+
+ self._cls_dict["__attrs_own_setattr__"] = True
+ self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__)
+ self._wrote_own_setattr = True
+
+ return self
+
+ def _add_method_dunders(self, method):
+ """
+ Add __module__ and __qualname__ to a *method* if possible.
+ """
+ try:
+ method.__module__ = self._cls.__module__
+ except AttributeError:
+ pass
+
+ try:
+ method.__qualname__ = ".".join(
+ (self._cls.__qualname__, method.__name__)
+ )
+ except AttributeError:
+ pass
+
+ try:
+ method.__doc__ = (
+ "Method generated by attrs for class "
+ f"{self._cls.__qualname__}."
+ )
+ except AttributeError:
+ pass
+
+ return method
+
+
+def _determine_attrs_eq_order(cmp, eq, order, default_eq):
+ """
+ Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
+ values of eq and order. If *eq* is None, set it to *default_eq*.
+ """
+ if cmp is not None and any((eq is not None, order is not None)):
+ raise ValueError("Don't mix `cmp` with `eq' and `order`.")
+
+ # cmp takes precedence due to bw-compatibility.
+ if cmp is not None:
+ return cmp, cmp
+
+ # If left None, equality is set to the specified default and ordering
+ # mirrors equality.
+ if eq is None:
+ eq = default_eq
+
+ if order is None:
+ order = eq
+
+ if eq is False and order is True:
+ raise ValueError("`order` can only be True if `eq` is True too.")
+
+ return eq, order
+
+
+def _determine_attrib_eq_order(cmp, eq, order, default_eq):
+ """
+ Validate the combination of *cmp*, *eq*, and *order*. Derive the effective
+ values of eq and order. If *eq* is None, set it to *default_eq*.
+ """
+ if cmp is not None and any((eq is not None, order is not None)):
+ raise ValueError("Don't mix `cmp` with `eq' and `order`.")
+
+ def decide_callable_or_boolean(value):
+ """
+ Decide whether a key function is used.
+ """
+ if callable(value):
+ value, key = True, value
+ else:
+ key = None
+ return value, key
+
+ # cmp takes precedence due to bw-compatibility.
+ if cmp is not None:
+ cmp, cmp_key = decide_callable_or_boolean(cmp)
+ return cmp, cmp_key, cmp, cmp_key
+
+ # If left None, equality is set to the specified default and ordering
+ # mirrors equality.
+ if eq is None:
+ eq, eq_key = default_eq, None
+ else:
+ eq, eq_key = decide_callable_or_boolean(eq)
+
+ if order is None:
+ order, order_key = eq, eq_key
+ else:
+ order, order_key = decide_callable_or_boolean(order)
+
+ if eq is False and order is True:
+ raise ValueError("`order` can only be True if `eq` is True too.")
+
+ return eq, eq_key, order, order_key
+
+
+def _determine_whether_to_implement(
+ cls, flag, auto_detect, dunders, default=True
+):
+ """
+ Check whether we should implement a set of methods for *cls*.
+
+ *flag* is the argument passed into @attr.s like 'init', *auto_detect* the
+ same as passed into @attr.s and *dunders* is a tuple of attribute names
+ whose presence signal that the user has implemented it themselves.
+
+ Return *default* if no reason for either for or against is found.
+ """
+ if flag is True or flag is False:
+ return flag
+
+ if flag is None and auto_detect is False:
+ return default
+
+ # Logically, flag is None and auto_detect is True here.
+ for dunder in dunders:
+ if _has_own_attribute(cls, dunder):
+ return False
+
+ return default
+
+
+def attrs(
+ maybe_cls=None,
+ these=None,
+ repr_ns=None,
+ repr=None,
+ cmp=None,
+ hash=None,
+ init=None,
+ slots=False,
+ frozen=False,
+ weakref_slot=True,
+ str=False,
+ auto_attribs=False,
+ kw_only=False,
+ cache_hash=False,
+ auto_exc=False,
+ eq=None,
+ order=None,
+ auto_detect=False,
+ collect_by_mro=False,
+ getstate_setstate=None,
+ on_setattr=None,
+ field_transformer=None,
+ match_args=True,
+ unsafe_hash=None,
+):
+ r"""
+ A class decorator that adds :term:`dunder methods` according to the
+ specified attributes using `attr.ib` or the *these* argument.
+
+ Please consider using `attrs.define` / `attrs.frozen` in new code
+ (``attr.s`` will *never* go away, though).
+
+ :param these: A dictionary of name to `attr.ib` mappings. This is
+ useful to avoid the definition of your attributes within the class body
+ because you can't (e.g. if you want to add ``__repr__`` methods to
+ Django models) or don't want to.
+
+ If *these* is not ``None``, *attrs* will *not* search the class body
+ for attributes and will *not* remove any attributes from it.
+
+ The order is deduced from the order of the attributes inside *these*.
+
+ :type these: `dict` of `str` to `attr.ib`
+
+ :param str repr_ns: When using nested classes, there's no way in Python 2
+ to automatically detect that. Therefore it's possible to set the
+ namespace explicitly for a more meaningful ``repr`` output.
+ :param bool auto_detect: Instead of setting the *init*, *repr*, *eq*,
+ *order*, and *hash* arguments explicitly, assume they are set to
+ ``True`` **unless any** of the involved methods for one of the
+ arguments is implemented in the *current* class (i.e. it is *not*
+ inherited from some base class).
+
+ So for example by implementing ``__eq__`` on a class yourself,
+ *attrs* will deduce ``eq=False`` and will create *neither*
+ ``__eq__`` *nor* ``__ne__`` (but Python classes come with a sensible
+ ``__ne__`` by default, so it *should* be enough to only implement
+ ``__eq__`` in most cases).
+
+ .. warning::
+
+ If you prevent *attrs* from creating the ordering methods for you
+ (``order=False``, e.g. by implementing ``__le__``), it becomes
+ *your* responsibility to make sure its ordering is sound. The best
+ way is to use the `functools.total_ordering` decorator.
+
+
+ Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*,
+ *cmp*, or *hash* overrides whatever *auto_detect* would determine.
+
+ :param bool repr: Create a ``__repr__`` method with a human readable
+ representation of *attrs* attributes..
+ :param bool str: Create a ``__str__`` method that is identical to
+ ``__repr__``. This is usually not necessary except for
+ `Exception`\ s.
+ :param Optional[bool] eq: If ``True`` or ``None`` (default), add ``__eq__``
+ and ``__ne__`` methods that check two instances for equality.
+
+ They compare the instances as if they were tuples of their *attrs*
+ attributes if and only if the types of both classes are *identical*!
+ :param Optional[bool] order: If ``True``, add ``__lt__``, ``__le__``,
+ ``__gt__``, and ``__ge__`` methods that behave like *eq* above and
+ allow instances to be ordered. If ``None`` (default) mirror value of
+ *eq*.
+ :param Optional[bool] cmp: Setting *cmp* is equivalent to setting *eq*
+ and *order* to the same value. Must not be mixed with *eq* or *order*.
+ :param Optional[bool] unsafe_hash: If ``None`` (default), the ``__hash__``
+ method is generated according how *eq* and *frozen* are set.
+
+ 1. If *both* are True, *attrs* will generate a ``__hash__`` for you.
+ 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to
+ None, marking it unhashable (which it is).
+ 3. If *eq* is False, ``__hash__`` will be left untouched meaning the
+ ``__hash__`` method of the base class will be used (if base class is
+ ``object``, this means it will fall back to id-based hashing.).
+
+ Although not recommended, you can decide for yourself and force
+ *attrs* to create one (e.g. if the class is immutable even though you
+ didn't freeze it programmatically) by passing ``True`` or not. Both of
+ these cases are rather special and should be used carefully.
+
+ See our documentation on `hashing`, Python's documentation on
+ `object.__hash__`, and the `GitHub issue that led to the default \
+ behavior <https://github.com/python-attrs/attrs/issues/136>`_ for more
+ details.
+ :param Optional[bool] hash: Alias for *unsafe_hash*. *unsafe_hash* takes
+ precedence.
+ :param bool init: Create a ``__init__`` method that initializes the
+ *attrs* attributes. Leading underscores are stripped for the argument
+ name. If a ``__attrs_pre_init__`` method exists on the class, it will
+ be called before the class is initialized. If a ``__attrs_post_init__``
+ method exists on the class, it will be called after the class is fully
+ initialized.
+
+ If ``init`` is ``False``, an ``__attrs_init__`` method will be
+ injected instead. This allows you to define a custom ``__init__``
+ method that can do pre-init work such as ``super().__init__()``,
+ and then call ``__attrs_init__()`` and ``__attrs_post_init__()``.
+ :param bool slots: Create a :term:`slotted class <slotted classes>` that's
+ more memory-efficient. Slotted classes are generally superior to the
+ default dict classes, but have some gotchas you should know about, so
+ we encourage you to read the :term:`glossary entry <slotted classes>`.
+ :param bool frozen: Make instances immutable after initialization. If
+ someone attempts to modify a frozen instance,
+ `attrs.exceptions.FrozenInstanceError` is raised.
+
+ .. note::
+
+ 1. This is achieved by installing a custom ``__setattr__`` method
+ on your class, so you can't implement your own.
+
+ 2. True immutability is impossible in Python.
+
+ 3. This *does* have a minor a runtime performance `impact
+ <how-frozen>` when initializing new instances. In other words:
+ ``__init__`` is slightly slower with ``frozen=True``.
+
+ 4. If a class is frozen, you cannot modify ``self`` in
+ ``__attrs_post_init__`` or a self-written ``__init__``. You can
+ circumvent that limitation by using
+ ``object.__setattr__(self, "attribute_name", value)``.
+
+ 5. Subclasses of a frozen class are frozen too.
+
+ :param bool weakref_slot: Make instances weak-referenceable. This has no
+ effect unless ``slots`` is also enabled.
+ :param bool auto_attribs: If ``True``, collect :pep:`526`-annotated
+ attributes from the class body.
+
+ In this case, you **must** annotate every field. If *attrs*
+ encounters a field that is set to an `attr.ib` but lacks a type
+ annotation, an `attr.exceptions.UnannotatedAttributeError` is
+ raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't
+ want to set a type.
+
+ If you assign a value to those attributes (e.g. ``x: int = 42``), that
+ value becomes the default value like if it were passed using
+ ``attr.ib(default=42)``. Passing an instance of `attrs.Factory` also
+ works as expected in most cases (see warning below).
+
+ Attributes annotated as `typing.ClassVar`, and attributes that are
+ neither annotated nor set to an `attr.ib` are **ignored**.
+
+ .. warning::
+ For features that use the attribute name to create decorators (e.g.
+ :ref:`validators <validators>`), you still *must* assign `attr.ib`
+ to them. Otherwise Python will either not find the name or try to
+ use the default value to call e.g. ``validator`` on it.
+
+ These errors can be quite confusing and probably the most common bug
+ report on our bug tracker.
+
+ :param bool kw_only: Make all attributes keyword-only
+ in the generated ``__init__`` (if ``init`` is ``False``, this
+ parameter is ignored).
+ :param bool cache_hash: Ensure that the object's hash code is computed
+ only once and stored on the object. If this is set to ``True``,
+ hashing must be either explicitly or implicitly enabled for this
+ class. If the hash code is cached, avoid any reassignments of
+ fields involved in hash code computation or mutations of the objects
+ those fields point to after object creation. If such changes occur,
+ the behavior of the object's hash code is undefined.
+ :param bool auto_exc: If the class subclasses `BaseException`
+ (which implicitly includes any subclass of any exception), the
+ following happens to behave like a well-behaved Python exceptions
+ class:
+
+ - the values for *eq*, *order*, and *hash* are ignored and the
+ instances compare and hash by the instance's ids (N.B. *attrs* will
+ *not* remove existing implementations of ``__hash__`` or the equality
+ methods. It just won't add own ones.),
+ - all attributes that are either passed into ``__init__`` or have a
+ default value are additionally available as a tuple in the ``args``
+ attribute,
+ - the value of *str* is ignored leaving ``__str__`` to base classes.
+ :param bool collect_by_mro: Setting this to `True` fixes the way *attrs*
+ collects attributes from base classes. The default behavior is
+ incorrect in certain cases of multiple inheritance. It should be on by
+ default but is kept off for backward-compatibility.
+
+ See issue `#428 <https://github.com/python-attrs/attrs/issues/428>`_ for
+ more details.
+
+ :param Optional[bool] getstate_setstate:
+ .. note::
+ This is usually only interesting for slotted classes and you should
+ probably just set *auto_detect* to `True`.
+
+ If `True`, ``__getstate__`` and
+ ``__setstate__`` are generated and attached to the class. This is
+ necessary for slotted classes to be pickleable. If left `None`, it's
+ `True` by default for slotted classes and ``False`` for dict classes.
+
+ If *auto_detect* is `True`, and *getstate_setstate* is left `None`,
+ and **either** ``__getstate__`` or ``__setstate__`` is detected directly
+ on the class (i.e. not inherited), it is set to `False` (this is usually
+ what you want).
+
+ :param on_setattr: A callable that is run whenever the user attempts to set
+ an attribute (either by assignment like ``i.x = 42`` or by using
+ `setattr` like ``setattr(i, "x", 42)``). It receives the same arguments
+ as validators: the instance, the attribute that is being modified, and
+ the new value.
+
+ If no exception is raised, the attribute is set to the return value of
+ the callable.
+
+ If a list of callables is passed, they're automatically wrapped in an
+ `attrs.setters.pipe`.
+ :type on_setattr: `callable`, or a list of callables, or `None`, or
+ `attrs.setters.NO_OP`
+
+ :param Optional[callable] field_transformer:
+ A function that is called with the original class object and all
+ fields right before *attrs* finalizes the class. You can use
+ this, e.g., to automatically add converters or validators to
+ fields based on their types. See `transform-fields` for more details.
+
+ :param bool match_args:
+ If `True` (default), set ``__match_args__`` on the class to support
+ :pep:`634` (Structural Pattern Matching). It is a tuple of all
+ non-keyword-only ``__init__`` parameter names on Python 3.10 and later.
+ Ignored on older Python versions.
+
+ .. versionadded:: 16.0.0 *slots*
+ .. versionadded:: 16.1.0 *frozen*
+ .. versionadded:: 16.3.0 *str*
+ .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``.
+ .. versionchanged:: 17.1.0
+ *hash* supports ``None`` as value which is also the default now.
+ .. versionadded:: 17.3.0 *auto_attribs*
+ .. versionchanged:: 18.1.0
+ If *these* is passed, no attributes are deleted from the class body.
+ .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained.
+ .. versionadded:: 18.2.0 *weakref_slot*
+ .. deprecated:: 18.2.0
+ ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a
+ `DeprecationWarning` if the classes compared are subclasses of
+ each other. ``__eq`` and ``__ne__`` never tried to compared subclasses
+ to each other.
+ .. versionchanged:: 19.2.0
+ ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider
+ subclasses comparable anymore.
+ .. versionadded:: 18.2.0 *kw_only*
+ .. versionadded:: 18.2.0 *cache_hash*
+ .. versionadded:: 19.1.0 *auto_exc*
+ .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01.
+ .. versionadded:: 19.2.0 *eq* and *order*
+ .. versionadded:: 20.1.0 *auto_detect*
+ .. versionadded:: 20.1.0 *collect_by_mro*
+ .. versionadded:: 20.1.0 *getstate_setstate*
+ .. versionadded:: 20.1.0 *on_setattr*
+ .. versionadded:: 20.3.0 *field_transformer*
+ .. versionchanged:: 21.1.0
+ ``init=False`` injects ``__attrs_init__``
+ .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__``
+ .. versionchanged:: 21.1.0 *cmp* undeprecated
+ .. versionadded:: 21.3.0 *match_args*
+ .. versionadded:: 22.2.0
+ *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance).
+ """
+ eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None)
+
+ # unsafe_hash takes precedence due to PEP 681.
+ if unsafe_hash is not None:
+ hash = unsafe_hash
+
+ if isinstance(on_setattr, (list, tuple)):
+ on_setattr = setters.pipe(*on_setattr)
+
+ def wrap(cls):
+ is_frozen = frozen or _has_frozen_base_class(cls)
+ is_exc = auto_exc is True and issubclass(cls, BaseException)
+ has_own_setattr = auto_detect and _has_own_attribute(
+ cls, "__setattr__"
+ )
+
+ if has_own_setattr and is_frozen:
+ raise ValueError("Can't freeze a class with a custom __setattr__.")
+
+ builder = _ClassBuilder(
+ cls,
+ these,
+ slots,
+ is_frozen,
+ weakref_slot,
+ _determine_whether_to_implement(
+ cls,
+ getstate_setstate,
+ auto_detect,
+ ("__getstate__", "__setstate__"),
+ default=slots,
+ ),
+ auto_attribs,
+ kw_only,
+ cache_hash,
+ is_exc,
+ collect_by_mro,
+ on_setattr,
+ has_own_setattr,
+ field_transformer,
+ )
+ if _determine_whether_to_implement(
+ cls, repr, auto_detect, ("__repr__",)
+ ):
+ builder.add_repr(repr_ns)
+ if str is True:
+ builder.add_str()
+
+ eq = _determine_whether_to_implement(
+ cls, eq_, auto_detect, ("__eq__", "__ne__")
+ )
+ if not is_exc and eq is True:
+ builder.add_eq()
+ if not is_exc and _determine_whether_to_implement(
+ cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__")
+ ):
+ builder.add_order()
+
+ builder.add_setattr()
+
+ nonlocal hash
+ if (
+ hash is None
+ and auto_detect is True
+ and _has_own_attribute(cls, "__hash__")
+ ):
+ hash = False
+
+ if hash is not True and hash is not False and hash is not None:
+ # Can't use `hash in` because 1 == True for example.
+ raise TypeError(
+ "Invalid value for hash. Must be True, False, or None."
+ )
+ elif hash is False or (hash is None and eq is False) or is_exc:
+ # Don't do anything. Should fall back to __object__'s __hash__
+ # which is by id.
+ if cache_hash:
+ raise TypeError(
+ "Invalid value for cache_hash. To use hash caching,"
+ " hashing must be either explicitly or implicitly "
+ "enabled."
+ )
+ elif hash is True or (
+ hash is None and eq is True and is_frozen is True
+ ):
+ # Build a __hash__ if told so, or if it's safe.
+ builder.add_hash()
+ else:
+ # Raise TypeError on attempts to hash.
+ if cache_hash:
+ raise TypeError(
+ "Invalid value for cache_hash. To use hash caching,"
+ " hashing must be either explicitly or implicitly "
+ "enabled."
+ )
+ builder.make_unhashable()
+
+ if _determine_whether_to_implement(
+ cls, init, auto_detect, ("__init__",)
+ ):
+ builder.add_init()
+ else:
+ builder.add_attrs_init()
+ if cache_hash:
+ raise TypeError(
+ "Invalid value for cache_hash. To use hash caching,"
+ " init must be True."
+ )
+
+ if (
+ PY310
+ and match_args
+ and not _has_own_attribute(cls, "__match_args__")
+ ):
+ builder.add_match_args()
+
+ return builder.build_class()
+
+ # maybe_cls's type depends on the usage of the decorator. It's a class
+ # if it's used as `@attrs` but ``None`` if used as `@attrs()`.
+ if maybe_cls is None:
+ return wrap
+ else:
+ return wrap(maybe_cls)
+
+
+_attrs = attrs
+"""
+Internal alias so we can use it in functions that take an argument called
+*attrs*.
+"""
+
+
+def _has_frozen_base_class(cls):
+ """
+ Check whether *cls* has a frozen ancestor by looking at its
+ __setattr__.
+ """
+ return cls.__setattr__ is _frozen_setattrs
+
+
+def _generate_unique_filename(cls, func_name):
+ """
+ Create a "filename" suitable for a function being generated.
+ """
+ return (
+ f"<attrs generated {func_name} {cls.__module__}."
+ f"{getattr(cls, '__qualname__', cls.__name__)}>"
+ )
+
+
+def _make_hash(cls, attrs, frozen, cache_hash):
+ attrs = tuple(
+ a for a in attrs if a.hash is True or (a.hash is None and a.eq is True)
+ )
+
+ tab = " "
+
+ unique_filename = _generate_unique_filename(cls, "hash")
+ type_hash = hash(unique_filename)
+ # If eq is custom generated, we need to include the functions in globs
+ globs = {}
+
+ hash_def = "def __hash__(self"
+ hash_func = "hash(("
+ closing_braces = "))"
+ if not cache_hash:
+ hash_def += "):"
+ else:
+ hash_def += ", *"
+
+ hash_def += (
+ ", _cache_wrapper="
+ + "__import__('attr._make')._make._CacheHashWrapper):"
+ )
+ hash_func = "_cache_wrapper(" + hash_func
+ closing_braces += ")"
+
+ method_lines = [hash_def]
+
+ def append_hash_computation_lines(prefix, indent):
+ """
+ Generate the code for actually computing the hash code.
+ Below this will either be returned directly or used to compute
+ a value which is then cached, depending on the value of cache_hash
+ """
+
+ method_lines.extend(
+ [
+ indent + prefix + hash_func,
+ indent + f" {type_hash},",
+ ]
+ )
+
+ for a in attrs:
+ if a.eq_key:
+ cmp_name = f"_{a.name}_key"
+ globs[cmp_name] = a.eq_key
+ method_lines.append(
+ indent + f" {cmp_name}(self.{a.name}),"
+ )
+ else:
+ method_lines.append(indent + f" self.{a.name},")
+
+ method_lines.append(indent + " " + closing_braces)
+
+ if cache_hash:
+ method_lines.append(tab + f"if self.{_hash_cache_field} is None:")
+ if frozen:
+ append_hash_computation_lines(
+ f"object.__setattr__(self, '{_hash_cache_field}', ", tab * 2
+ )
+ method_lines.append(tab * 2 + ")") # close __setattr__
+ else:
+ append_hash_computation_lines(
+ f"self.{_hash_cache_field} = ", tab * 2
+ )
+ method_lines.append(tab + f"return self.{_hash_cache_field}")
+ else:
+ append_hash_computation_lines("return ", tab)
+
+ script = "\n".join(method_lines)
+ return _make_method("__hash__", script, unique_filename, globs)
+
+
+def _add_hash(cls, attrs):
+ """
+ Add a hash method to *cls*.
+ """
+ cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False)
+ return cls
+
+
+def _make_ne():
+ """
+ Create __ne__ method.
+ """
+
+ def __ne__(self, other):
+ """
+ Check equality and either forward a NotImplemented or
+ return the result negated.
+ """
+ result = self.__eq__(other)
+ if result is NotImplemented:
+ return NotImplemented
+
+ return not result
+
+ return __ne__
+
+
+def _make_eq(cls, attrs):
+ """
+ Create __eq__ method for *cls* with *attrs*.
+ """
+ attrs = [a for a in attrs if a.eq]
+
+ unique_filename = _generate_unique_filename(cls, "eq")
+ lines = [
+ "def __eq__(self, other):",
+ " if other.__class__ is not self.__class__:",
+ " return NotImplemented",
+ ]
+
+ # We can't just do a big self.x = other.x and... clause due to
+ # irregularities like nan == nan is false but (nan,) == (nan,) is true.
+ globs = {}
+ if attrs:
+ lines.append(" return (")
+ others = [" ) == ("]
+ for a in attrs:
+ if a.eq_key:
+ cmp_name = f"_{a.name}_key"
+ # Add the key function to the global namespace
+ # of the evaluated function.
+ globs[cmp_name] = a.eq_key
+ lines.append(f" {cmp_name}(self.{a.name}),")
+ others.append(f" {cmp_name}(other.{a.name}),")
+ else:
+ lines.append(f" self.{a.name},")
+ others.append(f" other.{a.name},")
+
+ lines += others + [" )"]
+ else:
+ lines.append(" return True")
+
+ script = "\n".join(lines)
+
+ return _make_method("__eq__", script, unique_filename, globs)
+
+
+def _make_order(cls, attrs):
+ """
+ Create ordering methods for *cls* with *attrs*.
+ """
+ attrs = [a for a in attrs if a.order]
+
+ def attrs_to_tuple(obj):
+ """
+ Save us some typing.
+ """
+ return tuple(
+ key(value) if key else value
+ for value, key in (
+ (getattr(obj, a.name), a.order_key) for a in attrs
+ )
+ )
+
+ def __lt__(self, other):
+ """
+ Automatically created by attrs.
+ """
+ if other.__class__ is self.__class__:
+ return attrs_to_tuple(self) < attrs_to_tuple(other)
+
+ return NotImplemented
+
+ def __le__(self, other):
+ """
+ Automatically created by attrs.
+ """
+ if other.__class__ is self.__class__:
+ return attrs_to_tuple(self) <= attrs_to_tuple(other)
+
+ return NotImplemented
+
+ def __gt__(self, other):
+ """
+ Automatically created by attrs.
+ """
+ if other.__class__ is self.__class__:
+ return attrs_to_tuple(self) > attrs_to_tuple(other)
+
+ return NotImplemented
+
+ def __ge__(self, other):
+ """
+ Automatically created by attrs.
+ """
+ if other.__class__ is self.__class__:
+ return attrs_to_tuple(self) >= attrs_to_tuple(other)
+
+ return NotImplemented
+
+ return __lt__, __le__, __gt__, __ge__
+
+
+def _add_eq(cls, attrs=None):
+ """
+ Add equality methods to *cls* with *attrs*.
+ """
+ if attrs is None:
+ attrs = cls.__attrs_attrs__
+
+ cls.__eq__ = _make_eq(cls, attrs)
+ cls.__ne__ = _make_ne()
+
+ return cls
+
+
+def _make_repr(attrs, ns, cls):
+ unique_filename = _generate_unique_filename(cls, "repr")
+ # Figure out which attributes to include, and which function to use to
+ # format them. The a.repr value can be either bool or a custom
+ # callable.
+ attr_names_with_reprs = tuple(
+ (a.name, (repr if a.repr is True else a.repr), a.init)
+ for a in attrs
+ if a.repr is not False
+ )
+ globs = {
+ name + "_repr": r for name, r, _ in attr_names_with_reprs if r != repr
+ }
+ globs["_compat"] = _compat
+ globs["AttributeError"] = AttributeError
+ globs["NOTHING"] = NOTHING
+ attribute_fragments = []
+ for name, r, i in attr_names_with_reprs:
+ accessor = (
+ "self." + name if i else 'getattr(self, "' + name + '", NOTHING)'
+ )
+ fragment = (
+ "%s={%s!r}" % (name, accessor)
+ if r == repr
+ else "%s={%s_repr(%s)}" % (name, name, accessor)
+ )
+ attribute_fragments.append(fragment)
+ repr_fragment = ", ".join(attribute_fragments)
+
+ if ns is None:
+ cls_name_fragment = '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}'
+ else:
+ cls_name_fragment = ns + ".{self.__class__.__name__}"
+
+ lines = [
+ "def __repr__(self):",
+ " try:",
+ " already_repring = _compat.repr_context.already_repring",
+ " except AttributeError:",
+ " already_repring = {id(self),}",
+ " _compat.repr_context.already_repring = already_repring",
+ " else:",
+ " if id(self) in already_repring:",
+ " return '...'",
+ " else:",
+ " already_repring.add(id(self))",
+ " try:",
+ f" return f'{cls_name_fragment}({repr_fragment})'",
+ " finally:",
+ " already_repring.remove(id(self))",
+ ]
+
+ return _make_method(
+ "__repr__", "\n".join(lines), unique_filename, globs=globs
+ )
+
+
+def _add_repr(cls, ns=None, attrs=None):
+ """
+ Add a repr method to *cls*.
+ """
+ if attrs is None:
+ attrs = cls.__attrs_attrs__
+
+ cls.__repr__ = _make_repr(attrs, ns, cls)
+ return cls
+
+
+def fields(cls):
+ """
+ Return the tuple of *attrs* attributes for a class.
+
+ The tuple also allows accessing the fields by their names (see below for
+ examples).
+
+ :param type cls: Class to introspect.
+
+ :raise TypeError: If *cls* is not a class.
+ :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs*
+ class.
+
+ :rtype: tuple (with name accessors) of `attrs.Attribute`
+
+ .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields
+ by name.
+ .. versionchanged:: 23.1.0 Add support for generic classes.
+ """
+ generic_base = get_generic_base(cls)
+
+ if generic_base is None and not isinstance(cls, type):
+ raise TypeError("Passed object must be a class.")
+
+ attrs = getattr(cls, "__attrs_attrs__", None)
+
+ if attrs is None:
+ if generic_base is not None:
+ attrs = getattr(generic_base, "__attrs_attrs__", None)
+ if attrs is not None:
+ # Even though this is global state, stick it on here to speed
+ # it up. We rely on `cls` being cached for this to be
+ # efficient.
+ cls.__attrs_attrs__ = attrs
+ return attrs
+ raise NotAnAttrsClassError(f"{cls!r} is not an attrs-decorated class.")
+
+ return attrs
+
+
+def fields_dict(cls):
+ """
+ Return an ordered dictionary of *attrs* attributes for a class, whose
+ keys are the attribute names.
+
+ :param type cls: Class to introspect.
+
+ :raise TypeError: If *cls* is not a class.
+ :raise attrs.exceptions.NotAnAttrsClassError: If *cls* is not an *attrs*
+ class.
+
+ :rtype: dict
+
+ .. versionadded:: 18.1.0
+ """
+ if not isinstance(cls, type):
+ raise TypeError("Passed object must be a class.")
+ attrs = getattr(cls, "__attrs_attrs__", None)
+ if attrs is None:
+ raise NotAnAttrsClassError(f"{cls!r} is not an attrs-decorated class.")
+ return {a.name: a for a in attrs}
+
+
+def validate(inst):
+ """
+ Validate all attributes on *inst* that have a validator.
+
+ Leaves all exceptions through.
+
+ :param inst: Instance of a class with *attrs* attributes.
+ """
+ if _config._run_validators is False:
+ return
+
+ for a in fields(inst.__class__):
+ v = a.validator
+ if v is not None:
+ v(inst, a, getattr(inst, a.name))
+
+
+def _is_slot_cls(cls):
+ return "__slots__" in cls.__dict__
+
+
+def _is_slot_attr(a_name, base_attr_map):
+ """
+ Check if the attribute name comes from a slot class.
+ """
+ return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name])
+
+
+def _make_init(
+ cls,
+ attrs,
+ pre_init,
+ post_init,
+ frozen,
+ slots,
+ cache_hash,
+ base_attr_map,
+ is_exc,
+ cls_on_setattr,
+ attrs_init,
+):
+ has_cls_on_setattr = (
+ cls_on_setattr is not None and cls_on_setattr is not setters.NO_OP
+ )
+
+ if frozen and has_cls_on_setattr:
+ raise ValueError("Frozen classes can't use on_setattr.")
+
+ needs_cached_setattr = cache_hash or frozen
+ filtered_attrs = []
+ attr_dict = {}
+ for a in attrs:
+ if not a.init and a.default is NOTHING:
+ continue
+
+ filtered_attrs.append(a)
+ attr_dict[a.name] = a
+
+ if a.on_setattr is not None:
+ if frozen is True:
+ raise ValueError("Frozen classes can't use on_setattr.")
+
+ needs_cached_setattr = True
+ elif has_cls_on_setattr and a.on_setattr is not setters.NO_OP:
+ needs_cached_setattr = True
+
+ unique_filename = _generate_unique_filename(cls, "init")
+
+ script, globs, annotations = _attrs_to_init_script(
+ filtered_attrs,
+ frozen,
+ slots,
+ pre_init,
+ post_init,
+ cache_hash,
+ base_attr_map,
+ is_exc,
+ needs_cached_setattr,
+ has_cls_on_setattr,
+ attrs_init,
+ )
+ if cls.__module__ in sys.modules:
+ # This makes typing.get_type_hints(CLS.__init__) resolve string types.
+ globs.update(sys.modules[cls.__module__].__dict__)
+
+ globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict})
+
+ if needs_cached_setattr:
+ # Save the lookup overhead in __init__ if we need to circumvent
+ # setattr hooks.
+ globs["_cached_setattr_get"] = _obj_setattr.__get__
+
+ init = _make_method(
+ "__attrs_init__" if attrs_init else "__init__",
+ script,
+ unique_filename,
+ globs,
+ )
+ init.__annotations__ = annotations
+
+ return init
+
+
+def _setattr(attr_name, value_var, has_on_setattr):
+ """
+ Use the cached object.setattr to set *attr_name* to *value_var*.
+ """
+ return f"_setattr('{attr_name}', {value_var})"
+
+
+def _setattr_with_converter(attr_name, value_var, has_on_setattr):
+ """
+ Use the cached object.setattr to set *attr_name* to *value_var*, but run
+ its converter first.
+ """
+ return "_setattr('%s', %s(%s))" % (
+ attr_name,
+ _init_converter_pat % (attr_name,),
+ value_var,
+ )
+
+
+def _assign(attr_name, value, has_on_setattr):
+ """
+ Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise
+ relegate to _setattr.
+ """
+ if has_on_setattr:
+ return _setattr(attr_name, value, True)
+
+ return f"self.{attr_name} = {value}"
+
+
+def _assign_with_converter(attr_name, value_var, has_on_setattr):
+ """
+ Unless *attr_name* has an on_setattr hook, use normal assignment after
+ conversion. Otherwise relegate to _setattr_with_converter.
+ """
+ if has_on_setattr:
+ return _setattr_with_converter(attr_name, value_var, True)
+
+ return "self.%s = %s(%s)" % (
+ attr_name,
+ _init_converter_pat % (attr_name,),
+ value_var,
+ )
+
+
+def _attrs_to_init_script(
+ attrs,
+ frozen,
+ slots,
+ pre_init,
+ post_init,
+ cache_hash,
+ base_attr_map,
+ is_exc,
+ needs_cached_setattr,
+ has_cls_on_setattr,
+ attrs_init,
+):
+ """
+ Return a script of an initializer for *attrs* and a dict of globals.
+
+ The globals are expected by the generated script.
+
+ If *frozen* is True, we cannot set the attributes directly so we use
+ a cached ``object.__setattr__``.
+ """
+ lines = []
+ if pre_init:
+ lines.append("self.__attrs_pre_init__()")
+
+ if needs_cached_setattr:
+ lines.append(
+ # Circumvent the __setattr__ descriptor to save one lookup per
+ # assignment.
+ # Note _setattr will be used again below if cache_hash is True
+ "_setattr = _cached_setattr_get(self)"
+ )
+
+ if frozen is True:
+ if slots is True:
+ fmt_setter = _setattr
+ fmt_setter_with_converter = _setattr_with_converter
+ else:
+ # Dict frozen classes assign directly to __dict__.
+ # But only if the attribute doesn't come from an ancestor slot
+ # class.
+ # Note _inst_dict will be used again below if cache_hash is True
+ lines.append("_inst_dict = self.__dict__")
+
+ def fmt_setter(attr_name, value_var, has_on_setattr):
+ if _is_slot_attr(attr_name, base_attr_map):
+ return _setattr(attr_name, value_var, has_on_setattr)
+
+ return f"_inst_dict['{attr_name}'] = {value_var}"
+
+ def fmt_setter_with_converter(
+ attr_name, value_var, has_on_setattr
+ ):
+ if has_on_setattr or _is_slot_attr(attr_name, base_attr_map):
+ return _setattr_with_converter(
+ attr_name, value_var, has_on_setattr
+ )
+
+ return "_inst_dict['%s'] = %s(%s)" % (
+ attr_name,
+ _init_converter_pat % (attr_name,),
+ value_var,
+ )
+
+ else:
+ # Not frozen.
+ fmt_setter = _assign
+ fmt_setter_with_converter = _assign_with_converter
+
+ args = []
+ kw_only_args = []
+ attrs_to_validate = []
+
+ # This is a dictionary of names to validator and converter callables.
+ # Injecting this into __init__ globals lets us avoid lookups.
+ names_for_globals = {}
+ annotations = {"return": None}
+
+ for a in attrs:
+ if a.validator:
+ attrs_to_validate.append(a)
+
+ attr_name = a.name
+ has_on_setattr = a.on_setattr is not None or (
+ a.on_setattr is not setters.NO_OP and has_cls_on_setattr
+ )
+ # a.alias is set to maybe-mangled attr_name in _ClassBuilder if not
+ # explicitly provided
+ arg_name = a.alias
+
+ has_factory = isinstance(a.default, Factory)
+ if has_factory and a.default.takes_self:
+ maybe_self = "self"
+ else:
+ maybe_self = ""
+
+ if a.init is False:
+ if has_factory:
+ init_factory_name = _init_factory_pat % (a.name,)
+ if a.converter is not None:
+ lines.append(
+ fmt_setter_with_converter(
+ attr_name,
+ init_factory_name + f"({maybe_self})",
+ has_on_setattr,
+ )
+ )
+ conv_name = _init_converter_pat % (a.name,)
+ names_for_globals[conv_name] = a.converter
+ else:
+ lines.append(
+ fmt_setter(
+ attr_name,
+ init_factory_name + f"({maybe_self})",
+ has_on_setattr,
+ )
+ )
+ names_for_globals[init_factory_name] = a.default.factory
+ else:
+ if a.converter is not None:
+ lines.append(
+ fmt_setter_with_converter(
+ attr_name,
+ f"attr_dict['{attr_name}'].default",
+ has_on_setattr,
+ )
+ )
+ conv_name = _init_converter_pat % (a.name,)
+ names_for_globals[conv_name] = a.converter
+ else:
+ lines.append(
+ fmt_setter(
+ attr_name,
+ f"attr_dict['{attr_name}'].default",
+ has_on_setattr,
+ )
+ )
+ elif a.default is not NOTHING and not has_factory:
+ arg = f"{arg_name}=attr_dict['{attr_name}'].default"
+ if a.kw_only:
+ kw_only_args.append(arg)
+ else:
+ args.append(arg)
+
+ if a.converter is not None:
+ lines.append(
+ fmt_setter_with_converter(
+ attr_name, arg_name, has_on_setattr
+ )
+ )
+ names_for_globals[
+ _init_converter_pat % (a.name,)
+ ] = a.converter
+ else:
+ lines.append(fmt_setter(attr_name, arg_name, has_on_setattr))
+
+ elif has_factory:
+ arg = f"{arg_name}=NOTHING"
+ if a.kw_only:
+ kw_only_args.append(arg)
+ else:
+ args.append(arg)
+ lines.append(f"if {arg_name} is not NOTHING:")
+
+ init_factory_name = _init_factory_pat % (a.name,)
+ if a.converter is not None:
+ lines.append(
+ " "
+ + fmt_setter_with_converter(
+ attr_name, arg_name, has_on_setattr
+ )
+ )
+ lines.append("else:")
+ lines.append(
+ " "
+ + fmt_setter_with_converter(
+ attr_name,
+ init_factory_name + "(" + maybe_self + ")",
+ has_on_setattr,
+ )
+ )
+ names_for_globals[
+ _init_converter_pat % (a.name,)
+ ] = a.converter
+ else:
+ lines.append(
+ " " + fmt_setter(attr_name, arg_name, has_on_setattr)
+ )
+ lines.append("else:")
+ lines.append(
+ " "
+ + fmt_setter(
+ attr_name,
+ init_factory_name + "(" + maybe_self + ")",
+ has_on_setattr,
+ )
+ )
+ names_for_globals[init_factory_name] = a.default.factory
+ else:
+ if a.kw_only:
+ kw_only_args.append(arg_name)
+ else:
+ args.append(arg_name)
+
+ if a.converter is not None:
+ lines.append(
+ fmt_setter_with_converter(
+ attr_name, arg_name, has_on_setattr
+ )
+ )
+ names_for_globals[
+ _init_converter_pat % (a.name,)
+ ] = a.converter
+ else:
+ lines.append(fmt_setter(attr_name, arg_name, has_on_setattr))
+
+ if a.init is True:
+ if a.type is not None and a.converter is None:
+ annotations[arg_name] = a.type
+ elif a.converter is not None:
+ # Try to get the type from the converter.
+ t = _AnnotationExtractor(a.converter).get_first_param_type()
+ if t:
+ annotations[arg_name] = t
+
+ if attrs_to_validate: # we can skip this if there are no validators.
+ names_for_globals["_config"] = _config
+ lines.append("if _config._run_validators is True:")
+ for a in attrs_to_validate:
+ val_name = "__attr_validator_" + a.name
+ attr_name = "__attr_" + a.name
+ lines.append(f" {val_name}(self, {attr_name}, self.{a.name})")
+ names_for_globals[val_name] = a.validator
+ names_for_globals[attr_name] = a
+
+ if post_init:
+ lines.append("self.__attrs_post_init__()")
+
+ # because this is set only after __attrs_post_init__ is called, a crash
+ # will result if post-init tries to access the hash code. This seemed
+ # preferable to setting this beforehand, in which case alteration to
+ # field values during post-init combined with post-init accessing the
+ # hash code would result in silent bugs.
+ if cache_hash:
+ if frozen:
+ if slots:
+ # if frozen and slots, then _setattr defined above
+ init_hash_cache = "_setattr('%s', %s)"
+ else:
+ # if frozen and not slots, then _inst_dict defined above
+ init_hash_cache = "_inst_dict['%s'] = %s"
+ else:
+ init_hash_cache = "self.%s = %s"
+ lines.append(init_hash_cache % (_hash_cache_field, "None"))
+
+ # For exceptions we rely on BaseException.__init__ for proper
+ # initialization.
+ if is_exc:
+ vals = ",".join(f"self.{a.name}" for a in attrs if a.init)
+
+ lines.append(f"BaseException.__init__(self, {vals})")
+
+ args = ", ".join(args)
+ if kw_only_args:
+ args += "%s*, %s" % (
+ ", " if args else "", # leading comma
+ ", ".join(kw_only_args), # kw_only args
+ )
+
+ return (
+ "def %s(self, %s):\n %s\n"
+ % (
+ ("__attrs_init__" if attrs_init else "__init__"),
+ args,
+ "\n ".join(lines) if lines else "pass",
+ ),
+ names_for_globals,
+ annotations,
+ )
+
+
+def _default_init_alias_for(name: str) -> str:
+ """
+ The default __init__ parameter name for a field.
+
+ This performs private-name adjustment via leading-unscore stripping,
+ and is the default value of Attribute.alias if not provided.
+ """
+
+ return name.lstrip("_")
+
+
+class Attribute:
+ """
+ *Read-only* representation of an attribute.
+
+ .. warning::
+
+ You should never instantiate this class yourself.
+
+ The class has *all* arguments of `attr.ib` (except for ``factory``
+ which is only syntactic sugar for ``default=Factory(...)`` plus the
+ following:
+
+ - ``name`` (`str`): The name of the attribute.
+ - ``alias`` (`str`): The __init__ parameter name of the attribute, after
+ any explicit overrides and default private-attribute-name handling.
+ - ``inherited`` (`bool`): Whether or not that attribute has been inherited
+ from a base class.
+ - ``eq_key`` and ``order_key`` (`typing.Callable` or `None`): The callables
+ that are used for comparing and ordering objects by this attribute,
+ respectively. These are set by passing a callable to `attr.ib`'s ``eq``,
+ ``order``, or ``cmp`` arguments. See also :ref:`comparison customization
+ <custom-comparison>`.
+
+ Instances of this class are frequently used for introspection purposes
+ like:
+
+ - `fields` returns a tuple of them.
+ - Validators get them passed as the first argument.
+ - The :ref:`field transformer <transform-fields>` hook receives a list of
+ them.
+ - The ``alias`` property exposes the __init__ parameter name of the field,
+ with any overrides and default private-attribute handling applied.
+
+
+ .. versionadded:: 20.1.0 *inherited*
+ .. versionadded:: 20.1.0 *on_setattr*
+ .. versionchanged:: 20.2.0 *inherited* is not taken into account for
+ equality checks and hashing anymore.
+ .. versionadded:: 21.1.0 *eq_key* and *order_key*
+ .. versionadded:: 22.2.0 *alias*
+
+ For the full version history of the fields, see `attr.ib`.
+ """
+
+ __slots__ = (
+ "name",
+ "default",
+ "validator",
+ "repr",
+ "eq",
+ "eq_key",
+ "order",
+ "order_key",
+ "hash",
+ "init",
+ "metadata",
+ "type",
+ "converter",
+ "kw_only",
+ "inherited",
+ "on_setattr",
+ "alias",
+ )
+
+ def __init__(
+ self,
+ name,
+ default,
+ validator,
+ repr,
+ cmp, # XXX: unused, remove along with other cmp code.
+ hash,
+ init,
+ inherited,
+ metadata=None,
+ type=None,
+ converter=None,
+ kw_only=False,
+ eq=None,
+ eq_key=None,
+ order=None,
+ order_key=None,
+ on_setattr=None,
+ alias=None,
+ ):
+ eq, eq_key, order, order_key = _determine_attrib_eq_order(
+ cmp, eq_key or eq, order_key or order, True
+ )
+
+ # Cache this descriptor here to speed things up later.
+ bound_setattr = _obj_setattr.__get__(self)
+
+ # Despite the big red warning, people *do* instantiate `Attribute`
+ # themselves.
+ bound_setattr("name", name)
+ bound_setattr("default", default)
+ bound_setattr("validator", validator)
+ bound_setattr("repr", repr)
+ bound_setattr("eq", eq)
+ bound_setattr("eq_key", eq_key)
+ bound_setattr("order", order)
+ bound_setattr("order_key", order_key)
+ bound_setattr("hash", hash)
+ bound_setattr("init", init)
+ bound_setattr("converter", converter)
+ bound_setattr(
+ "metadata",
+ (
+ types.MappingProxyType(dict(metadata)) # Shallow copy
+ if metadata
+ else _empty_metadata_singleton
+ ),
+ )
+ bound_setattr("type", type)
+ bound_setattr("kw_only", kw_only)
+ bound_setattr("inherited", inherited)
+ bound_setattr("on_setattr", on_setattr)
+ bound_setattr("alias", alias)
+
+ def __setattr__(self, name, value):
+ raise FrozenInstanceError()
+
+ @classmethod
+ def from_counting_attr(cls, name, ca, type=None):
+ # type holds the annotated value. deal with conflicts:
+ if type is None:
+ type = ca.type
+ elif ca.type is not None:
+ raise ValueError(
+ "Type annotation and type argument cannot both be present"
+ )
+ inst_dict = {
+ k: getattr(ca, k)
+ for k in Attribute.__slots__
+ if k
+ not in (
+ "name",
+ "validator",
+ "default",
+ "type",
+ "inherited",
+ ) # exclude methods and deprecated alias
+ }
+ return cls(
+ name=name,
+ validator=ca._validator,
+ default=ca._default,
+ type=type,
+ cmp=None,
+ inherited=False,
+ **inst_dict,
+ )
+
+ # Don't use attrs.evolve since fields(Attribute) doesn't work
+ def evolve(self, **changes):
+ """
+ Copy *self* and apply *changes*.
+
+ This works similarly to `attrs.evolve` but that function does not work
+ with `Attribute`.
+
+ It is mainly meant to be used for `transform-fields`.
+
+ .. versionadded:: 20.3.0
+ """
+ new = copy.copy(self)
+
+ new._setattrs(changes.items())
+
+ return new
+
+ # Don't use _add_pickle since fields(Attribute) doesn't work
+ def __getstate__(self):
+ """
+ Play nice with pickle.
+ """
+ return tuple(
+ getattr(self, name) if name != "metadata" else dict(self.metadata)
+ for name in self.__slots__
+ )
+
+ def __setstate__(self, state):
+ """
+ Play nice with pickle.
+ """
+ self._setattrs(zip(self.__slots__, state))
+
+ def _setattrs(self, name_values_pairs):
+ bound_setattr = _obj_setattr.__get__(self)
+ for name, value in name_values_pairs:
+ if name != "metadata":
+ bound_setattr(name, value)
+ else:
+ bound_setattr(
+ name,
+ types.MappingProxyType(dict(value))
+ if value
+ else _empty_metadata_singleton,
+ )
+
+
+_a = [
+ Attribute(
+ name=name,
+ default=NOTHING,
+ validator=None,
+ repr=True,
+ cmp=None,
+ eq=True,
+ order=False,
+ hash=(name != "metadata"),
+ init=True,
+ inherited=False,
+ alias=_default_init_alias_for(name),
+ )
+ for name in Attribute.__slots__
+]
+
+Attribute = _add_hash(
+ _add_eq(
+ _add_repr(Attribute, attrs=_a),
+ attrs=[a for a in _a if a.name != "inherited"],
+ ),
+ attrs=[a for a in _a if a.hash and a.name != "inherited"],
+)
+
+
+class _CountingAttr:
+ """
+ Intermediate representation of attributes that uses a counter to preserve
+ the order in which the attributes have been defined.
+
+ *Internal* data structure of the attrs library. Running into is most
+ likely the result of a bug like a forgotten `@attr.s` decorator.
+ """
+
+ __slots__ = (
+ "counter",
+ "_default",
+ "repr",
+ "eq",
+ "eq_key",
+ "order",
+ "order_key",
+ "hash",
+ "init",
+ "metadata",
+ "_validator",
+ "converter",
+ "type",
+ "kw_only",
+ "on_setattr",
+ "alias",
+ )
+ __attrs_attrs__ = tuple(
+ Attribute(
+ name=name,
+ alias=_default_init_alias_for(name),
+ default=NOTHING,
+ validator=None,
+ repr=True,
+ cmp=None,
+ hash=True,
+ init=True,
+ kw_only=False,
+ eq=True,
+ eq_key=None,
+ order=False,
+ order_key=None,
+ inherited=False,
+ on_setattr=None,
+ )
+ for name in (
+ "counter",
+ "_default",
+ "repr",
+ "eq",
+ "order",
+ "hash",
+ "init",
+ "on_setattr",
+ "alias",
+ )
+ ) + (
+ Attribute(
+ name="metadata",
+ alias="metadata",
+ default=None,
+ validator=None,
+ repr=True,
+ cmp=None,
+ hash=False,
+ init=True,
+ kw_only=False,
+ eq=True,
+ eq_key=None,
+ order=False,
+ order_key=None,
+ inherited=False,
+ on_setattr=None,
+ ),
+ )
+ cls_counter = 0
+
+ def __init__(
+ self,
+ default,
+ validator,
+ repr,
+ cmp,
+ hash,
+ init,
+ converter,
+ metadata,
+ type,
+ kw_only,
+ eq,
+ eq_key,
+ order,
+ order_key,
+ on_setattr,
+ alias,
+ ):
+ _CountingAttr.cls_counter += 1
+ self.counter = _CountingAttr.cls_counter
+ self._default = default
+ self._validator = validator
+ self.converter = converter
+ self.repr = repr
+ self.eq = eq
+ self.eq_key = eq_key
+ self.order = order
+ self.order_key = order_key
+ self.hash = hash
+ self.init = init
+ self.metadata = metadata
+ self.type = type
+ self.kw_only = kw_only
+ self.on_setattr = on_setattr
+ self.alias = alias
+
+ def validator(self, meth):
+ """
+ Decorator that adds *meth* to the list of validators.
+
+ Returns *meth* unchanged.
+
+ .. versionadded:: 17.1.0
+ """
+ if self._validator is None:
+ self._validator = meth
+ else:
+ self._validator = and_(self._validator, meth)
+ return meth
+
+ def default(self, meth):
+ """
+ Decorator that allows to set the default for an attribute.
+
+ Returns *meth* unchanged.
+
+ :raises DefaultAlreadySetError: If default has been set before.
+
+ .. versionadded:: 17.1.0
+ """
+ if self._default is not NOTHING:
+ raise DefaultAlreadySetError()
+
+ self._default = Factory(meth, takes_self=True)
+
+ return meth
+
+
+_CountingAttr = _add_eq(_add_repr(_CountingAttr))
+
+
+class Factory:
+ """
+ Stores a factory callable.
+
+ If passed as the default value to `attrs.field`, the factory is used to
+ generate a new value.
+
+ :param callable factory: A callable that takes either none or exactly one
+ mandatory positional argument depending on *takes_self*.
+ :param bool takes_self: Pass the partially initialized instance that is
+ being initialized as a positional argument.
+
+ .. versionadded:: 17.1.0 *takes_self*
+ """
+
+ __slots__ = ("factory", "takes_self")
+
+ def __init__(self, factory, takes_self=False):
+ self.factory = factory
+ self.takes_self = takes_self
+
+ def __getstate__(self):
+ """
+ Play nice with pickle.
+ """
+ return tuple(getattr(self, name) for name in self.__slots__)
+
+ def __setstate__(self, state):
+ """
+ Play nice with pickle.
+ """
+ for name, value in zip(self.__slots__, state):
+ setattr(self, name, value)
+
+
+_f = [
+ Attribute(
+ name=name,
+ default=NOTHING,
+ validator=None,
+ repr=True,
+ cmp=None,
+ eq=True,
+ order=False,
+ hash=True,
+ init=True,
+ inherited=False,
+ )
+ for name in Factory.__slots__
+]
+
+Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f)
+
+
+def make_class(name, attrs, bases=(object,), **attributes_arguments):
+ r"""
+ A quick way to create a new class called *name* with *attrs*.
+
+ :param str name: The name for the new class.
+
+ :param attrs: A list of names or a dictionary of mappings of names to
+ `attr.ib`\ s / `attrs.field`\ s.
+
+ The order is deduced from the order of the names or attributes inside
+ *attrs*. Otherwise the order of the definition of the attributes is
+ used.
+ :type attrs: `list` or `dict`
+
+ :param tuple bases: Classes that the new class will subclass.
+
+ :param attributes_arguments: Passed unmodified to `attr.s`.
+
+ :return: A new class with *attrs*.
+ :rtype: type
+
+ .. versionadded:: 17.1.0 *bases*
+ .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained.
+ """
+ if isinstance(attrs, dict):
+ cls_dict = attrs
+ elif isinstance(attrs, (list, tuple)):
+ cls_dict = {a: attrib() for a in attrs}
+ else:
+ raise TypeError("attrs argument must be a dict or a list.")
+
+ pre_init = cls_dict.pop("__attrs_pre_init__", None)
+ post_init = cls_dict.pop("__attrs_post_init__", None)
+ user_init = cls_dict.pop("__init__", None)
+
+ body = {}
+ if pre_init is not None:
+ body["__attrs_pre_init__"] = pre_init
+ if post_init is not None:
+ body["__attrs_post_init__"] = post_init
+ if user_init is not None:
+ body["__init__"] = user_init
+
+ type_ = types.new_class(name, bases, {}, lambda ns: ns.update(body))
+
+ # For pickling to work, the __module__ variable needs to be set to the
+ # frame where the class is created. Bypass this step in environments where
+ # sys._getframe is not defined (Jython for example) or sys._getframe is not
+ # defined for arguments greater than 0 (IronPython).
+ try:
+ type_.__module__ = sys._getframe(1).f_globals.get(
+ "__name__", "__main__"
+ )
+ except (AttributeError, ValueError):
+ pass
+
+ # We do it here for proper warnings with meaningful stacklevel.
+ cmp = attributes_arguments.pop("cmp", None)
+ (
+ attributes_arguments["eq"],
+ attributes_arguments["order"],
+ ) = _determine_attrs_eq_order(
+ cmp,
+ attributes_arguments.get("eq"),
+ attributes_arguments.get("order"),
+ True,
+ )
+
+ return _attrs(these=cls_dict, **attributes_arguments)(type_)
+
+
+# These are required by within this module so we define them here and merely
+# import into .validators / .converters.
+
+
+@attrs(slots=True, hash=True)
+class _AndValidator:
+ """
+ Compose many validators to a single one.
+ """
+
+ _validators = attrib()
+
+ def __call__(self, inst, attr, value):
+ for v in self._validators:
+ v(inst, attr, value)
+
+
+def and_(*validators):
+ """
+ A validator that composes multiple validators into one.
+
+ When called on a value, it runs all wrapped validators.
+
+ :param callables validators: Arbitrary number of validators.
+
+ .. versionadded:: 17.1.0
+ """
+ vals = []
+ for validator in validators:
+ vals.extend(
+ validator._validators
+ if isinstance(validator, _AndValidator)
+ else [validator]
+ )
+
+ return _AndValidator(tuple(vals))
+
+
+def pipe(*converters):
+ """
+ A converter that composes multiple converters into one.
+
+ When called on a value, it runs all wrapped converters, returning the
+ *last* value.
+
+ Type annotations will be inferred from the wrapped converters', if
+ they have any.
+
+ :param callables converters: Arbitrary number of converters.
+
+ .. versionadded:: 20.1.0
+ """
+
+ def pipe_converter(val):
+ for converter in converters:
+ val = converter(val)
+
+ return val
+
+ if not converters:
+ # If the converter list is empty, pipe_converter is the identity.
+ A = typing.TypeVar("A")
+ pipe_converter.__annotations__ = {"val": A, "return": A}
+ else:
+ # Get parameter type from first converter.
+ t = _AnnotationExtractor(converters[0]).get_first_param_type()
+ if t:
+ pipe_converter.__annotations__["val"] = t
+
+ # Get return type from last converter.
+ rt = _AnnotationExtractor(converters[-1]).get_return_type()
+ if rt:
+ pipe_converter.__annotations__["return"] = rt
+
+ return pipe_converter
diff --git a/third_party/python/attrs/attr/_next_gen.py b/third_party/python/attrs/attr/_next_gen.py
new file mode 100644
index 0000000000..8f7c0b9a46
--- /dev/null
+++ b/third_party/python/attrs/attr/_next_gen.py
@@ -0,0 +1,232 @@
+# SPDX-License-Identifier: MIT
+
+"""
+These are keyword-only APIs that call `attr.s` and `attr.ib` with different
+default values.
+"""
+
+
+from functools import partial
+
+from . import setters
+from ._funcs import asdict as _asdict
+from ._funcs import astuple as _astuple
+from ._make import (
+ NOTHING,
+ _frozen_setattrs,
+ _ng_default_on_setattr,
+ attrib,
+ attrs,
+)
+from .exceptions import UnannotatedAttributeError
+
+
+def define(
+ maybe_cls=None,
+ *,
+ these=None,
+ repr=None,
+ unsafe_hash=None,
+ hash=None,
+ init=None,
+ slots=True,
+ frozen=False,
+ weakref_slot=True,
+ str=False,
+ auto_attribs=None,
+ kw_only=False,
+ cache_hash=False,
+ auto_exc=True,
+ eq=None,
+ order=False,
+ auto_detect=True,
+ getstate_setstate=None,
+ on_setattr=None,
+ field_transformer=None,
+ match_args=True,
+):
+ r"""
+ Define an *attrs* class.
+
+ Differences to the classic `attr.s` that it uses underneath:
+
+ - Automatically detect whether or not *auto_attribs* should be `True` (c.f.
+ *auto_attribs* parameter).
+ - If *frozen* is `False`, run converters and validators when setting an
+ attribute by default.
+ - *slots=True*
+
+ .. caution::
+
+ Usually this has only upsides and few visible effects in everyday
+ programming. But it *can* lead to some suprising behaviors, so please
+ make sure to read :term:`slotted classes`.
+ - *auto_exc=True*
+ - *auto_detect=True*
+ - *order=False*
+ - Some options that were only relevant on Python 2 or were kept around for
+ backwards-compatibility have been removed.
+
+ Please note that these are all defaults and you can change them as you
+ wish.
+
+ :param Optional[bool] auto_attribs: If set to `True` or `False`, it behaves
+ exactly like `attr.s`. If left `None`, `attr.s` will try to guess:
+
+ 1. If any attributes are annotated and no unannotated `attrs.fields`\ s
+ are found, it assumes *auto_attribs=True*.
+ 2. Otherwise it assumes *auto_attribs=False* and tries to collect
+ `attrs.fields`\ s.
+
+ For now, please refer to `attr.s` for the rest of the parameters.
+
+ .. versionadded:: 20.1.0
+ .. versionchanged:: 21.3.0 Converters are also run ``on_setattr``.
+ .. versionadded:: 22.2.0
+ *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance).
+ """
+
+ def do_it(cls, auto_attribs):
+ return attrs(
+ maybe_cls=cls,
+ these=these,
+ repr=repr,
+ hash=hash,
+ unsafe_hash=unsafe_hash,
+ init=init,
+ slots=slots,
+ frozen=frozen,
+ weakref_slot=weakref_slot,
+ str=str,
+ auto_attribs=auto_attribs,
+ kw_only=kw_only,
+ cache_hash=cache_hash,
+ auto_exc=auto_exc,
+ eq=eq,
+ order=order,
+ auto_detect=auto_detect,
+ collect_by_mro=True,
+ getstate_setstate=getstate_setstate,
+ on_setattr=on_setattr,
+ field_transformer=field_transformer,
+ match_args=match_args,
+ )
+
+ def wrap(cls):
+ """
+ Making this a wrapper ensures this code runs during class creation.
+
+ We also ensure that frozen-ness of classes is inherited.
+ """
+ nonlocal frozen, on_setattr
+
+ had_on_setattr = on_setattr not in (None, setters.NO_OP)
+
+ # By default, mutable classes convert & validate on setattr.
+ if frozen is False and on_setattr is None:
+ on_setattr = _ng_default_on_setattr
+
+ # However, if we subclass a frozen class, we inherit the immutability
+ # and disable on_setattr.
+ for base_cls in cls.__bases__:
+ if base_cls.__setattr__ is _frozen_setattrs:
+ if had_on_setattr:
+ raise ValueError(
+ "Frozen classes can't use on_setattr "
+ "(frozen-ness was inherited)."
+ )
+
+ on_setattr = setters.NO_OP
+ break
+
+ if auto_attribs is not None:
+ return do_it(cls, auto_attribs)
+
+ try:
+ return do_it(cls, True)
+ except UnannotatedAttributeError:
+ return do_it(cls, False)
+
+ # maybe_cls's type depends on the usage of the decorator. It's a class
+ # if it's used as `@attrs` but ``None`` if used as `@attrs()`.
+ if maybe_cls is None:
+ return wrap
+ else:
+ return wrap(maybe_cls)
+
+
+mutable = define
+frozen = partial(define, frozen=True, on_setattr=None)
+
+
+def field(
+ *,
+ default=NOTHING,
+ validator=None,
+ repr=True,
+ hash=None,
+ init=True,
+ metadata=None,
+ type=None,
+ converter=None,
+ factory=None,
+ kw_only=False,
+ eq=None,
+ order=None,
+ on_setattr=None,
+ alias=None,
+):
+ """
+ Identical to `attr.ib`, except keyword-only and with some arguments
+ removed.
+
+ .. versionadded:: 23.1.0
+ The *type* parameter has been re-added; mostly for
+ {func}`attrs.make_class`. Please note that type checkers ignore this
+ metadata.
+ .. versionadded:: 20.1.0
+ """
+ return attrib(
+ default=default,
+ validator=validator,
+ repr=repr,
+ hash=hash,
+ init=init,
+ metadata=metadata,
+ type=type,
+ converter=converter,
+ factory=factory,
+ kw_only=kw_only,
+ eq=eq,
+ order=order,
+ on_setattr=on_setattr,
+ alias=alias,
+ )
+
+
+def asdict(inst, *, recurse=True, filter=None, value_serializer=None):
+ """
+ Same as `attr.asdict`, except that collections types are always retained
+ and dict is always used as *dict_factory*.
+
+ .. versionadded:: 21.3.0
+ """
+ return _asdict(
+ inst=inst,
+ recurse=recurse,
+ filter=filter,
+ value_serializer=value_serializer,
+ retain_collection_types=True,
+ )
+
+
+def astuple(inst, *, recurse=True, filter=None):
+ """
+ Same as `attr.astuple`, except that collections types are always retained
+ and `tuple` is always used as the *tuple_factory*.
+
+ .. versionadded:: 21.3.0
+ """
+ return _astuple(
+ inst=inst, recurse=recurse, filter=filter, retain_collection_types=True
+ )
diff --git a/third_party/python/attrs/attr/_typing_compat.pyi b/third_party/python/attrs/attr/_typing_compat.pyi
new file mode 100644
index 0000000000..ca7b71e906
--- /dev/null
+++ b/third_party/python/attrs/attr/_typing_compat.pyi
@@ -0,0 +1,15 @@
+from typing import Any, ClassVar, Protocol
+
+# MYPY is a special constant in mypy which works the same way as `TYPE_CHECKING`.
+MYPY = False
+
+if MYPY:
+ # A protocol to be able to statically accept an attrs class.
+ class AttrsInstance_(Protocol):
+ __attrs_attrs__: ClassVar[Any]
+
+else:
+ # For type checkers without plug-in support use an empty protocol that
+ # will (hopefully) be combined into a union.
+ class AttrsInstance_(Protocol):
+ pass
diff --git a/third_party/python/attrs/attr/_version_info.py b/third_party/python/attrs/attr/_version_info.py
new file mode 100644
index 0000000000..51a1312f97
--- /dev/null
+++ b/third_party/python/attrs/attr/_version_info.py
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: MIT
+
+
+from functools import total_ordering
+
+from ._funcs import astuple
+from ._make import attrib, attrs
+
+
+@total_ordering
+@attrs(eq=False, order=False, slots=True, frozen=True)
+class VersionInfo:
+ """
+ A version object that can be compared to tuple of length 1--4:
+
+ >>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2)
+ True
+ >>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1)
+ True
+ >>> vi = attr.VersionInfo(19, 2, 0, "final")
+ >>> vi < (19, 1, 1)
+ False
+ >>> vi < (19,)
+ False
+ >>> vi == (19, 2,)
+ True
+ >>> vi == (19, 2, 1)
+ False
+
+ .. versionadded:: 19.2
+ """
+
+ year = attrib(type=int)
+ minor = attrib(type=int)
+ micro = attrib(type=int)
+ releaselevel = attrib(type=str)
+
+ @classmethod
+ def _from_version_string(cls, s):
+ """
+ Parse *s* and return a _VersionInfo.
+ """
+ v = s.split(".")
+ if len(v) == 3:
+ v.append("final")
+
+ return cls(
+ year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3]
+ )
+
+ def _ensure_tuple(self, other):
+ """
+ Ensure *other* is a tuple of a valid length.
+
+ Returns a possibly transformed *other* and ourselves as a tuple of
+ the same length as *other*.
+ """
+
+ if self.__class__ is other.__class__:
+ other = astuple(other)
+
+ if not isinstance(other, tuple):
+ raise NotImplementedError
+
+ if not (1 <= len(other) <= 4):
+ raise NotImplementedError
+
+ return astuple(self)[: len(other)], other
+
+ def __eq__(self, other):
+ try:
+ us, them = self._ensure_tuple(other)
+ except NotImplementedError:
+ return NotImplemented
+
+ return us == them
+
+ def __lt__(self, other):
+ try:
+ us, them = self._ensure_tuple(other)
+ except NotImplementedError:
+ return NotImplemented
+
+ # Since alphabetically "dev0" < "final" < "post1" < "post2", we don't
+ # have to do anything special with releaselevel for now.
+ return us < them
diff --git a/third_party/python/attrs/attr/_version_info.pyi b/third_party/python/attrs/attr/_version_info.pyi
new file mode 100644
index 0000000000..45ced08633
--- /dev/null
+++ b/third_party/python/attrs/attr/_version_info.pyi
@@ -0,0 +1,9 @@
+class VersionInfo:
+ @property
+ def year(self) -> int: ...
+ @property
+ def minor(self) -> int: ...
+ @property
+ def micro(self) -> int: ...
+ @property
+ def releaselevel(self) -> str: ...
diff --git a/third_party/python/attrs/attr/converters.py b/third_party/python/attrs/attr/converters.py
new file mode 100644
index 0000000000..4cada106b0
--- /dev/null
+++ b/third_party/python/attrs/attr/converters.py
@@ -0,0 +1,144 @@
+# SPDX-License-Identifier: MIT
+
+"""
+Commonly useful converters.
+"""
+
+
+import typing
+
+from ._compat import _AnnotationExtractor
+from ._make import NOTHING, Factory, pipe
+
+
+__all__ = [
+ "default_if_none",
+ "optional",
+ "pipe",
+ "to_bool",
+]
+
+
+def optional(converter):
+ """
+ A converter that allows an attribute to be optional. An optional attribute
+ is one which can be set to ``None``.
+
+ Type annotations will be inferred from the wrapped converter's, if it
+ has any.
+
+ :param callable converter: the converter that is used for non-``None``
+ values.
+
+ .. versionadded:: 17.1.0
+ """
+
+ def optional_converter(val):
+ if val is None:
+ return None
+ return converter(val)
+
+ xtr = _AnnotationExtractor(converter)
+
+ t = xtr.get_first_param_type()
+ if t:
+ optional_converter.__annotations__["val"] = typing.Optional[t]
+
+ rt = xtr.get_return_type()
+ if rt:
+ optional_converter.__annotations__["return"] = typing.Optional[rt]
+
+ return optional_converter
+
+
+def default_if_none(default=NOTHING, factory=None):
+ """
+ A converter that allows to replace ``None`` values by *default* or the
+ result of *factory*.
+
+ :param default: Value to be used if ``None`` is passed. Passing an instance
+ of `attrs.Factory` is supported, however the ``takes_self`` option
+ is *not*.
+ :param callable factory: A callable that takes no parameters whose result
+ is used if ``None`` is passed.
+
+ :raises TypeError: If **neither** *default* or *factory* is passed.
+ :raises TypeError: If **both** *default* and *factory* are passed.
+ :raises ValueError: If an instance of `attrs.Factory` is passed with
+ ``takes_self=True``.
+
+ .. versionadded:: 18.2.0
+ """
+ if default is NOTHING and factory is None:
+ raise TypeError("Must pass either `default` or `factory`.")
+
+ if default is not NOTHING and factory is not None:
+ raise TypeError(
+ "Must pass either `default` or `factory` but not both."
+ )
+
+ if factory is not None:
+ default = Factory(factory)
+
+ if isinstance(default, Factory):
+ if default.takes_self:
+ raise ValueError(
+ "`takes_self` is not supported by default_if_none."
+ )
+
+ def default_if_none_converter(val):
+ if val is not None:
+ return val
+
+ return default.factory()
+
+ else:
+
+ def default_if_none_converter(val):
+ if val is not None:
+ return val
+
+ return default
+
+ return default_if_none_converter
+
+
+def to_bool(val):
+ """
+ Convert "boolean" strings (e.g., from env. vars.) to real booleans.
+
+ Values mapping to :code:`True`:
+
+ - :code:`True`
+ - :code:`"true"` / :code:`"t"`
+ - :code:`"yes"` / :code:`"y"`
+ - :code:`"on"`
+ - :code:`"1"`
+ - :code:`1`
+
+ Values mapping to :code:`False`:
+
+ - :code:`False`
+ - :code:`"false"` / :code:`"f"`
+ - :code:`"no"` / :code:`"n"`
+ - :code:`"off"`
+ - :code:`"0"`
+ - :code:`0`
+
+ :raises ValueError: for any other value.
+
+ .. versionadded:: 21.3.0
+ """
+ if isinstance(val, str):
+ val = val.lower()
+ truthy = {True, "true", "t", "yes", "y", "on", "1", 1}
+ falsy = {False, "false", "f", "no", "n", "off", "0", 0}
+ try:
+ if val in truthy:
+ return True
+ if val in falsy:
+ return False
+ except TypeError:
+ # Raised when "val" is not hashable (e.g., lists)
+ pass
+ raise ValueError(f"Cannot convert value to bool: {val}")
diff --git a/third_party/python/attrs/attr/converters.pyi b/third_party/python/attrs/attr/converters.pyi
new file mode 100644
index 0000000000..5abb49f6d5
--- /dev/null
+++ b/third_party/python/attrs/attr/converters.pyi
@@ -0,0 +1,13 @@
+from typing import Callable, TypeVar, overload
+
+from . import _ConverterType
+
+_T = TypeVar("_T")
+
+def pipe(*validators: _ConverterType) -> _ConverterType: ...
+def optional(converter: _ConverterType) -> _ConverterType: ...
+@overload
+def default_if_none(default: _T) -> _ConverterType: ...
+@overload
+def default_if_none(*, factory: Callable[[], _T]) -> _ConverterType: ...
+def to_bool(val: str) -> bool: ...
diff --git a/third_party/python/attrs/attr/exceptions.py b/third_party/python/attrs/attr/exceptions.py
new file mode 100644
index 0000000000..2883493085
--- /dev/null
+++ b/third_party/python/attrs/attr/exceptions.py
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: MIT
+
+
+class FrozenError(AttributeError):
+ """
+ A frozen/immutable instance or attribute have been attempted to be
+ modified.
+
+ It mirrors the behavior of ``namedtuples`` by using the same error message
+ and subclassing `AttributeError`.
+
+ .. versionadded:: 20.1.0
+ """
+
+ msg = "can't set attribute"
+ args = [msg]
+
+
+class FrozenInstanceError(FrozenError):
+ """
+ A frozen instance has been attempted to be modified.
+
+ .. versionadded:: 16.1.0
+ """
+
+
+class FrozenAttributeError(FrozenError):
+ """
+ A frozen attribute has been attempted to be modified.
+
+ .. versionadded:: 20.1.0
+ """
+
+
+class AttrsAttributeNotFoundError(ValueError):
+ """
+ An *attrs* function couldn't find an attribute that the user asked for.
+
+ .. versionadded:: 16.2.0
+ """
+
+
+class NotAnAttrsClassError(ValueError):
+ """
+ A non-*attrs* class has been passed into an *attrs* function.
+
+ .. versionadded:: 16.2.0
+ """
+
+
+class DefaultAlreadySetError(RuntimeError):
+ """
+ A default has been set when defining the field and is attempted to be reset
+ using the decorator.
+
+ .. versionadded:: 17.1.0
+ """
+
+
+class UnannotatedAttributeError(RuntimeError):
+ """
+ A class with ``auto_attribs=True`` has a field without a type annotation.
+
+ .. versionadded:: 17.3.0
+ """
+
+
+class PythonTooOldError(RuntimeError):
+ """
+ It was attempted to use an *attrs* feature that requires a newer Python
+ version.
+
+ .. versionadded:: 18.2.0
+ """
+
+
+class NotCallableError(TypeError):
+ """
+ A field requiring a callable has been set with a value that is not
+ callable.
+
+ .. versionadded:: 19.2.0
+ """
+
+ def __init__(self, msg, value):
+ super(TypeError, self).__init__(msg, value)
+ self.msg = msg
+ self.value = value
+
+ def __str__(self):
+ return str(self.msg)
diff --git a/third_party/python/attrs/attr/exceptions.pyi b/third_party/python/attrs/attr/exceptions.pyi
new file mode 100644
index 0000000000..f2680118b4
--- /dev/null
+++ b/third_party/python/attrs/attr/exceptions.pyi
@@ -0,0 +1,17 @@
+from typing import Any
+
+class FrozenError(AttributeError):
+ msg: str = ...
+
+class FrozenInstanceError(FrozenError): ...
+class FrozenAttributeError(FrozenError): ...
+class AttrsAttributeNotFoundError(ValueError): ...
+class NotAnAttrsClassError(ValueError): ...
+class DefaultAlreadySetError(RuntimeError): ...
+class UnannotatedAttributeError(RuntimeError): ...
+class PythonTooOldError(RuntimeError): ...
+
+class NotCallableError(TypeError):
+ msg: str = ...
+ value: Any = ...
+ def __init__(self, msg: str, value: Any) -> None: ...
diff --git a/third_party/python/attrs/attr/filters.py b/third_party/python/attrs/attr/filters.py
new file mode 100644
index 0000000000..a1e40c98db
--- /dev/null
+++ b/third_party/python/attrs/attr/filters.py
@@ -0,0 +1,66 @@
+# SPDX-License-Identifier: MIT
+
+"""
+Commonly useful filters for `attr.asdict`.
+"""
+
+from ._make import Attribute
+
+
+def _split_what(what):
+ """
+ Returns a tuple of `frozenset`s of classes and attributes.
+ """
+ return (
+ frozenset(cls for cls in what if isinstance(cls, type)),
+ frozenset(cls for cls in what if isinstance(cls, str)),
+ frozenset(cls for cls in what if isinstance(cls, Attribute)),
+ )
+
+
+def include(*what):
+ """
+ Include *what*.
+
+ :param what: What to include.
+ :type what: `list` of classes `type`, field names `str` or
+ `attrs.Attribute`\\ s
+
+ :rtype: `callable`
+
+ .. versionchanged:: 23.1.0 Accept strings with field names.
+ """
+ cls, names, attrs = _split_what(what)
+
+ def include_(attribute, value):
+ return (
+ value.__class__ in cls
+ or attribute.name in names
+ or attribute in attrs
+ )
+
+ return include_
+
+
+def exclude(*what):
+ """
+ Exclude *what*.
+
+ :param what: What to exclude.
+ :type what: `list` of classes `type`, field names `str` or
+ `attrs.Attribute`\\ s.
+
+ :rtype: `callable`
+
+ .. versionchanged:: 23.3.0 Accept field name string as input argument
+ """
+ cls, names, attrs = _split_what(what)
+
+ def exclude_(attribute, value):
+ return not (
+ value.__class__ in cls
+ or attribute.name in names
+ or attribute in attrs
+ )
+
+ return exclude_
diff --git a/third_party/python/attrs/attr/filters.pyi b/third_party/python/attrs/attr/filters.pyi
new file mode 100644
index 0000000000..8a02fa0fc0
--- /dev/null
+++ b/third_party/python/attrs/attr/filters.pyi
@@ -0,0 +1,6 @@
+from typing import Any, Union
+
+from . import Attribute, _FilterType
+
+def include(*what: Union[type, str, Attribute[Any]]) -> _FilterType[Any]: ...
+def exclude(*what: Union[type, str, Attribute[Any]]) -> _FilterType[Any]: ...
diff --git a/third_party/python/attrs/attr/py.typed b/third_party/python/attrs/attr/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/attrs/attr/py.typed
diff --git a/third_party/python/attrs/attr/setters.py b/third_party/python/attrs/attr/setters.py
new file mode 100644
index 0000000000..12ed6750df
--- /dev/null
+++ b/third_party/python/attrs/attr/setters.py
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: MIT
+
+"""
+Commonly used hooks for on_setattr.
+"""
+
+
+from . import _config
+from .exceptions import FrozenAttributeError
+
+
+def pipe(*setters):
+ """
+ Run all *setters* and return the return value of the last one.
+
+ .. versionadded:: 20.1.0
+ """
+
+ def wrapped_pipe(instance, attrib, new_value):
+ rv = new_value
+
+ for setter in setters:
+ rv = setter(instance, attrib, rv)
+
+ return rv
+
+ return wrapped_pipe
+
+
+def frozen(_, __, ___):
+ """
+ Prevent an attribute to be modified.
+
+ .. versionadded:: 20.1.0
+ """
+ raise FrozenAttributeError()
+
+
+def validate(instance, attrib, new_value):
+ """
+ Run *attrib*'s validator on *new_value* if it has one.
+
+ .. versionadded:: 20.1.0
+ """
+ if _config._run_validators is False:
+ return new_value
+
+ v = attrib.validator
+ if not v:
+ return new_value
+
+ v(instance, attrib, new_value)
+
+ return new_value
+
+
+def convert(instance, attrib, new_value):
+ """
+ Run *attrib*'s converter -- if it has one -- on *new_value* and return the
+ result.
+
+ .. versionadded:: 20.1.0
+ """
+ c = attrib.converter
+ if c:
+ return c(new_value)
+
+ return new_value
+
+
+# Sentinel for disabling class-wide *on_setattr* hooks for certain attributes.
+# autodata stopped working, so the docstring is inlined in the API docs.
+NO_OP = object()
diff --git a/third_party/python/attrs/attr/setters.pyi b/third_party/python/attrs/attr/setters.pyi
new file mode 100644
index 0000000000..72f7ce4761
--- /dev/null
+++ b/third_party/python/attrs/attr/setters.pyi
@@ -0,0 +1,19 @@
+from typing import Any, NewType, NoReturn, TypeVar
+
+from . import Attribute, _OnSetAttrType
+
+_T = TypeVar("_T")
+
+def frozen(
+ instance: Any, attribute: Attribute[Any], new_value: Any
+) -> NoReturn: ...
+def pipe(*setters: _OnSetAttrType) -> _OnSetAttrType: ...
+def validate(instance: Any, attribute: Attribute[_T], new_value: _T) -> _T: ...
+
+# convert is allowed to return Any, because they can be chained using pipe.
+def convert(
+ instance: Any, attribute: Attribute[Any], new_value: Any
+) -> Any: ...
+
+_NoOpType = NewType("_NoOpType", object)
+NO_OP: _NoOpType
diff --git a/third_party/python/attrs/attr/validators.py b/third_party/python/attrs/attr/validators.py
new file mode 100644
index 0000000000..1488554f78
--- /dev/null
+++ b/third_party/python/attrs/attr/validators.py
@@ -0,0 +1,720 @@
+# SPDX-License-Identifier: MIT
+
+"""
+Commonly useful validators.
+"""
+
+
+import operator
+import re
+
+from contextlib import contextmanager
+from re import Pattern
+
+from ._config import get_run_validators, set_run_validators
+from ._make import _AndValidator, and_, attrib, attrs
+from .converters import default_if_none
+from .exceptions import NotCallableError
+
+
+__all__ = [
+ "and_",
+ "deep_iterable",
+ "deep_mapping",
+ "disabled",
+ "ge",
+ "get_disabled",
+ "gt",
+ "in_",
+ "instance_of",
+ "is_callable",
+ "le",
+ "lt",
+ "matches_re",
+ "max_len",
+ "min_len",
+ "not_",
+ "optional",
+ "provides",
+ "set_disabled",
+]
+
+
+def set_disabled(disabled):
+ """
+ Globally disable or enable running validators.
+
+ By default, they are run.
+
+ :param disabled: If ``True``, disable running all validators.
+ :type disabled: bool
+
+ .. warning::
+
+ This function is not thread-safe!
+
+ .. versionadded:: 21.3.0
+ """
+ set_run_validators(not disabled)
+
+
+def get_disabled():
+ """
+ Return a bool indicating whether validators are currently disabled or not.
+
+ :return: ``True`` if validators are currently disabled.
+ :rtype: bool
+
+ .. versionadded:: 21.3.0
+ """
+ return not get_run_validators()
+
+
+@contextmanager
+def disabled():
+ """
+ Context manager that disables running validators within its context.
+
+ .. warning::
+
+ This context manager is not thread-safe!
+
+ .. versionadded:: 21.3.0
+ """
+ set_run_validators(False)
+ try:
+ yield
+ finally:
+ set_run_validators(True)
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _InstanceOfValidator:
+ type = attrib()
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if not isinstance(value, self.type):
+ raise TypeError(
+ "'{name}' must be {type!r} (got {value!r} that is a "
+ "{actual!r}).".format(
+ name=attr.name,
+ type=self.type,
+ actual=value.__class__,
+ value=value,
+ ),
+ attr,
+ self.type,
+ value,
+ )
+
+ def __repr__(self):
+ return "<instance_of validator for type {type!r}>".format(
+ type=self.type
+ )
+
+
+def instance_of(type):
+ """
+ A validator that raises a `TypeError` if the initializer is called
+ with a wrong type for this particular attribute (checks are performed using
+ `isinstance` therefore it's also valid to pass a tuple of types).
+
+ :param type: The type to check for.
+ :type type: type or tuple of type
+
+ :raises TypeError: With a human readable error message, the attribute
+ (of type `attrs.Attribute`), the expected type, and the value it
+ got.
+ """
+ return _InstanceOfValidator(type)
+
+
+@attrs(repr=False, frozen=True, slots=True)
+class _MatchesReValidator:
+ pattern = attrib()
+ match_func = attrib()
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if not self.match_func(value):
+ raise ValueError(
+ "'{name}' must match regex {pattern!r}"
+ " ({value!r} doesn't)".format(
+ name=attr.name, pattern=self.pattern.pattern, value=value
+ ),
+ attr,
+ self.pattern,
+ value,
+ )
+
+ def __repr__(self):
+ return "<matches_re validator for pattern {pattern!r}>".format(
+ pattern=self.pattern
+ )
+
+
+def matches_re(regex, flags=0, func=None):
+ r"""
+ A validator that raises `ValueError` if the initializer is called
+ with a string that doesn't match *regex*.
+
+ :param regex: a regex string or precompiled pattern to match against
+ :param int flags: flags that will be passed to the underlying re function
+ (default 0)
+ :param callable func: which underlying `re` function to call. Valid options
+ are `re.fullmatch`, `re.search`, and `re.match`; the default ``None``
+ means `re.fullmatch`. For performance reasons, the pattern is always
+ precompiled using `re.compile`.
+
+ .. versionadded:: 19.2.0
+ .. versionchanged:: 21.3.0 *regex* can be a pre-compiled pattern.
+ """
+ valid_funcs = (re.fullmatch, None, re.search, re.match)
+ if func not in valid_funcs:
+ raise ValueError(
+ "'func' must be one of {}.".format(
+ ", ".join(
+ sorted(
+ e and e.__name__ or "None" for e in set(valid_funcs)
+ )
+ )
+ )
+ )
+
+ if isinstance(regex, Pattern):
+ if flags:
+ raise TypeError(
+ "'flags' can only be used with a string pattern; "
+ "pass flags to re.compile() instead"
+ )
+ pattern = regex
+ else:
+ pattern = re.compile(regex, flags)
+
+ if func is re.match:
+ match_func = pattern.match
+ elif func is re.search:
+ match_func = pattern.search
+ else:
+ match_func = pattern.fullmatch
+
+ return _MatchesReValidator(pattern, match_func)
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _ProvidesValidator:
+ interface = attrib()
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if not self.interface.providedBy(value):
+ raise TypeError(
+ "'{name}' must provide {interface!r} which {value!r} "
+ "doesn't.".format(
+ name=attr.name, interface=self.interface, value=value
+ ),
+ attr,
+ self.interface,
+ value,
+ )
+
+ def __repr__(self):
+ return "<provides validator for interface {interface!r}>".format(
+ interface=self.interface
+ )
+
+
+def provides(interface):
+ """
+ A validator that raises a `TypeError` if the initializer is called
+ with an object that does not provide the requested *interface* (checks are
+ performed using ``interface.providedBy(value)`` (see `zope.interface
+ <https://zopeinterface.readthedocs.io/en/latest/>`_).
+
+ :param interface: The interface to check for.
+ :type interface: ``zope.interface.Interface``
+
+ :raises TypeError: With a human readable error message, the attribute
+ (of type `attrs.Attribute`), the expected interface, and the
+ value it got.
+
+ .. deprecated:: 23.1.0
+ """
+ import warnings
+
+ warnings.warn(
+ "attrs's zope-interface support is deprecated and will be removed in, "
+ "or after, April 2024.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return _ProvidesValidator(interface)
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _OptionalValidator:
+ validator = attrib()
+
+ def __call__(self, inst, attr, value):
+ if value is None:
+ return
+
+ self.validator(inst, attr, value)
+
+ def __repr__(self):
+ return "<optional validator for {what} or None>".format(
+ what=repr(self.validator)
+ )
+
+
+def optional(validator):
+ """
+ A validator that makes an attribute optional. An optional attribute is one
+ which can be set to ``None`` in addition to satisfying the requirements of
+ the sub-validator.
+
+ :param Callable | tuple[Callable] | list[Callable] validator: A validator
+ (or validators) that is used for non-``None`` values.
+
+ .. versionadded:: 15.1.0
+ .. versionchanged:: 17.1.0 *validator* can be a list of validators.
+ .. versionchanged:: 23.1.0 *validator* can also be a tuple of validators.
+ """
+ if isinstance(validator, (list, tuple)):
+ return _OptionalValidator(_AndValidator(validator))
+
+ return _OptionalValidator(validator)
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _InValidator:
+ options = attrib()
+
+ def __call__(self, inst, attr, value):
+ try:
+ in_options = value in self.options
+ except TypeError: # e.g. `1 in "abc"`
+ in_options = False
+
+ if not in_options:
+ raise ValueError(
+ "'{name}' must be in {options!r} (got {value!r})".format(
+ name=attr.name, options=self.options, value=value
+ ),
+ attr,
+ self.options,
+ value,
+ )
+
+ def __repr__(self):
+ return "<in_ validator with options {options!r}>".format(
+ options=self.options
+ )
+
+
+def in_(options):
+ """
+ A validator that raises a `ValueError` if the initializer is called
+ with a value that does not belong in the options provided. The check is
+ performed using ``value in options``.
+
+ :param options: Allowed options.
+ :type options: list, tuple, `enum.Enum`, ...
+
+ :raises ValueError: With a human readable error message, the attribute (of
+ type `attrs.Attribute`), the expected options, and the value it
+ got.
+
+ .. versionadded:: 17.1.0
+ .. versionchanged:: 22.1.0
+ The ValueError was incomplete until now and only contained the human
+ readable error message. Now it contains all the information that has
+ been promised since 17.1.0.
+ """
+ return _InValidator(options)
+
+
+@attrs(repr=False, slots=False, hash=True)
+class _IsCallableValidator:
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if not callable(value):
+ message = (
+ "'{name}' must be callable "
+ "(got {value!r} that is a {actual!r})."
+ )
+ raise NotCallableError(
+ msg=message.format(
+ name=attr.name, value=value, actual=value.__class__
+ ),
+ value=value,
+ )
+
+ def __repr__(self):
+ return "<is_callable validator>"
+
+
+def is_callable():
+ """
+ A validator that raises a `attrs.exceptions.NotCallableError` if the
+ initializer is called with a value for this particular attribute
+ that is not callable.
+
+ .. versionadded:: 19.1.0
+
+ :raises attrs.exceptions.NotCallableError: With a human readable error
+ message containing the attribute (`attrs.Attribute`) name,
+ and the value it got.
+ """
+ return _IsCallableValidator()
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _DeepIterable:
+ member_validator = attrib(validator=is_callable())
+ iterable_validator = attrib(
+ default=None, validator=optional(is_callable())
+ )
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if self.iterable_validator is not None:
+ self.iterable_validator(inst, attr, value)
+
+ for member in value:
+ self.member_validator(inst, attr, member)
+
+ def __repr__(self):
+ iterable_identifier = (
+ ""
+ if self.iterable_validator is None
+ else f" {self.iterable_validator!r}"
+ )
+ return (
+ "<deep_iterable validator for{iterable_identifier}"
+ " iterables of {member!r}>"
+ ).format(
+ iterable_identifier=iterable_identifier,
+ member=self.member_validator,
+ )
+
+
+def deep_iterable(member_validator, iterable_validator=None):
+ """
+ A validator that performs deep validation of an iterable.
+
+ :param member_validator: Validator(s) to apply to iterable members
+ :param iterable_validator: Validator to apply to iterable itself
+ (optional)
+
+ .. versionadded:: 19.1.0
+
+ :raises TypeError: if any sub-validators fail
+ """
+ if isinstance(member_validator, (list, tuple)):
+ member_validator = and_(*member_validator)
+ return _DeepIterable(member_validator, iterable_validator)
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _DeepMapping:
+ key_validator = attrib(validator=is_callable())
+ value_validator = attrib(validator=is_callable())
+ mapping_validator = attrib(default=None, validator=optional(is_callable()))
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if self.mapping_validator is not None:
+ self.mapping_validator(inst, attr, value)
+
+ for key in value:
+ self.key_validator(inst, attr, key)
+ self.value_validator(inst, attr, value[key])
+
+ def __repr__(self):
+ return (
+ "<deep_mapping validator for objects mapping {key!r} to {value!r}>"
+ ).format(key=self.key_validator, value=self.value_validator)
+
+
+def deep_mapping(key_validator, value_validator, mapping_validator=None):
+ """
+ A validator that performs deep validation of a dictionary.
+
+ :param key_validator: Validator to apply to dictionary keys
+ :param value_validator: Validator to apply to dictionary values
+ :param mapping_validator: Validator to apply to top-level mapping
+ attribute (optional)
+
+ .. versionadded:: 19.1.0
+
+ :raises TypeError: if any sub-validators fail
+ """
+ return _DeepMapping(key_validator, value_validator, mapping_validator)
+
+
+@attrs(repr=False, frozen=True, slots=True)
+class _NumberValidator:
+ bound = attrib()
+ compare_op = attrib()
+ compare_func = attrib()
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if not self.compare_func(value, self.bound):
+ raise ValueError(
+ "'{name}' must be {op} {bound}: {value}".format(
+ name=attr.name,
+ op=self.compare_op,
+ bound=self.bound,
+ value=value,
+ )
+ )
+
+ def __repr__(self):
+ return "<Validator for x {op} {bound}>".format(
+ op=self.compare_op, bound=self.bound
+ )
+
+
+def lt(val):
+ """
+ A validator that raises `ValueError` if the initializer is called
+ with a number larger or equal to *val*.
+
+ :param val: Exclusive upper bound for values
+
+ .. versionadded:: 21.3.0
+ """
+ return _NumberValidator(val, "<", operator.lt)
+
+
+def le(val):
+ """
+ A validator that raises `ValueError` if the initializer is called
+ with a number greater than *val*.
+
+ :param val: Inclusive upper bound for values
+
+ .. versionadded:: 21.3.0
+ """
+ return _NumberValidator(val, "<=", operator.le)
+
+
+def ge(val):
+ """
+ A validator that raises `ValueError` if the initializer is called
+ with a number smaller than *val*.
+
+ :param val: Inclusive lower bound for values
+
+ .. versionadded:: 21.3.0
+ """
+ return _NumberValidator(val, ">=", operator.ge)
+
+
+def gt(val):
+ """
+ A validator that raises `ValueError` if the initializer is called
+ with a number smaller or equal to *val*.
+
+ :param val: Exclusive lower bound for values
+
+ .. versionadded:: 21.3.0
+ """
+ return _NumberValidator(val, ">", operator.gt)
+
+
+@attrs(repr=False, frozen=True, slots=True)
+class _MaxLengthValidator:
+ max_length = attrib()
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if len(value) > self.max_length:
+ raise ValueError(
+ "Length of '{name}' must be <= {max}: {len}".format(
+ name=attr.name, max=self.max_length, len=len(value)
+ )
+ )
+
+ def __repr__(self):
+ return f"<max_len validator for {self.max_length}>"
+
+
+def max_len(length):
+ """
+ A validator that raises `ValueError` if the initializer is called
+ with a string or iterable that is longer than *length*.
+
+ :param int length: Maximum length of the string or iterable
+
+ .. versionadded:: 21.3.0
+ """
+ return _MaxLengthValidator(length)
+
+
+@attrs(repr=False, frozen=True, slots=True)
+class _MinLengthValidator:
+ min_length = attrib()
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if len(value) < self.min_length:
+ raise ValueError(
+ "Length of '{name}' must be => {min}: {len}".format(
+ name=attr.name, min=self.min_length, len=len(value)
+ )
+ )
+
+ def __repr__(self):
+ return f"<min_len validator for {self.min_length}>"
+
+
+def min_len(length):
+ """
+ A validator that raises `ValueError` if the initializer is called
+ with a string or iterable that is shorter than *length*.
+
+ :param int length: Minimum length of the string or iterable
+
+ .. versionadded:: 22.1.0
+ """
+ return _MinLengthValidator(length)
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _SubclassOfValidator:
+ type = attrib()
+
+ def __call__(self, inst, attr, value):
+ """
+ We use a callable class to be able to change the ``__repr__``.
+ """
+ if not issubclass(value, self.type):
+ raise TypeError(
+ "'{name}' must be a subclass of {type!r} "
+ "(got {value!r}).".format(
+ name=attr.name,
+ type=self.type,
+ value=value,
+ ),
+ attr,
+ self.type,
+ value,
+ )
+
+ def __repr__(self):
+ return "<subclass_of validator for type {type!r}>".format(
+ type=self.type
+ )
+
+
+def _subclass_of(type):
+ """
+ A validator that raises a `TypeError` if the initializer is called
+ with a wrong type for this particular attribute (checks are performed using
+ `issubclass` therefore it's also valid to pass a tuple of types).
+
+ :param type: The type to check for.
+ :type type: type or tuple of types
+
+ :raises TypeError: With a human readable error message, the attribute
+ (of type `attrs.Attribute`), the expected type, and the value it
+ got.
+ """
+ return _SubclassOfValidator(type)
+
+
+@attrs(repr=False, slots=True, hash=True)
+class _NotValidator:
+ validator = attrib()
+ msg = attrib(
+ converter=default_if_none(
+ "not_ validator child '{validator!r}' "
+ "did not raise a captured error"
+ )
+ )
+ exc_types = attrib(
+ validator=deep_iterable(
+ member_validator=_subclass_of(Exception),
+ iterable_validator=instance_of(tuple),
+ ),
+ )
+
+ def __call__(self, inst, attr, value):
+ try:
+ self.validator(inst, attr, value)
+ except self.exc_types:
+ pass # suppress error to invert validity
+ else:
+ raise ValueError(
+ self.msg.format(
+ validator=self.validator,
+ exc_types=self.exc_types,
+ ),
+ attr,
+ self.validator,
+ value,
+ self.exc_types,
+ )
+
+ def __repr__(self):
+ return (
+ "<not_ validator wrapping {what!r}, " "capturing {exc_types!r}>"
+ ).format(
+ what=self.validator,
+ exc_types=self.exc_types,
+ )
+
+
+def not_(validator, *, msg=None, exc_types=(ValueError, TypeError)):
+ """
+ A validator that wraps and logically 'inverts' the validator passed to it.
+ It will raise a `ValueError` if the provided validator *doesn't* raise a
+ `ValueError` or `TypeError` (by default), and will suppress the exception
+ if the provided validator *does*.
+
+ Intended to be used with existing validators to compose logic without
+ needing to create inverted variants, for example, ``not_(in_(...))``.
+
+ :param validator: A validator to be logically inverted.
+ :param msg: Message to raise if validator fails.
+ Formatted with keys ``exc_types`` and ``validator``.
+ :type msg: str
+ :param exc_types: Exception type(s) to capture.
+ Other types raised by child validators will not be intercepted and
+ pass through.
+
+ :raises ValueError: With a human readable error message,
+ the attribute (of type `attrs.Attribute`),
+ the validator that failed to raise an exception,
+ the value it got,
+ and the expected exception types.
+
+ .. versionadded:: 22.2.0
+ """
+ try:
+ exc_types = tuple(exc_types)
+ except TypeError:
+ exc_types = (exc_types,)
+ return _NotValidator(validator, msg, exc_types)
diff --git a/third_party/python/attrs/attr/validators.pyi b/third_party/python/attrs/attr/validators.pyi
new file mode 100644
index 0000000000..d194a75abc
--- /dev/null
+++ b/third_party/python/attrs/attr/validators.pyi
@@ -0,0 +1,88 @@
+from typing import (
+ Any,
+ AnyStr,
+ Callable,
+ Container,
+ ContextManager,
+ Iterable,
+ List,
+ Mapping,
+ Match,
+ Optional,
+ Pattern,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+ overload,
+)
+
+from . import _ValidatorType
+from . import _ValidatorArgType
+
+_T = TypeVar("_T")
+_T1 = TypeVar("_T1")
+_T2 = TypeVar("_T2")
+_T3 = TypeVar("_T3")
+_I = TypeVar("_I", bound=Iterable)
+_K = TypeVar("_K")
+_V = TypeVar("_V")
+_M = TypeVar("_M", bound=Mapping)
+
+def set_disabled(run: bool) -> None: ...
+def get_disabled() -> bool: ...
+def disabled() -> ContextManager[None]: ...
+
+# To be more precise on instance_of use some overloads.
+# If there are more than 3 items in the tuple then we fall back to Any
+@overload
+def instance_of(type: Type[_T]) -> _ValidatorType[_T]: ...
+@overload
+def instance_of(type: Tuple[Type[_T]]) -> _ValidatorType[_T]: ...
+@overload
+def instance_of(
+ type: Tuple[Type[_T1], Type[_T2]]
+) -> _ValidatorType[Union[_T1, _T2]]: ...
+@overload
+def instance_of(
+ type: Tuple[Type[_T1], Type[_T2], Type[_T3]]
+) -> _ValidatorType[Union[_T1, _T2, _T3]]: ...
+@overload
+def instance_of(type: Tuple[type, ...]) -> _ValidatorType[Any]: ...
+def provides(interface: Any) -> _ValidatorType[Any]: ...
+def optional(
+ validator: Union[
+ _ValidatorType[_T], List[_ValidatorType[_T]], Tuple[_ValidatorType[_T]]
+ ]
+) -> _ValidatorType[Optional[_T]]: ...
+def in_(options: Container[_T]) -> _ValidatorType[_T]: ...
+def and_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ...
+def matches_re(
+ regex: Union[Pattern[AnyStr], AnyStr],
+ flags: int = ...,
+ func: Optional[
+ Callable[[AnyStr, AnyStr, int], Optional[Match[AnyStr]]]
+ ] = ...,
+) -> _ValidatorType[AnyStr]: ...
+def deep_iterable(
+ member_validator: _ValidatorArgType[_T],
+ iterable_validator: Optional[_ValidatorType[_I]] = ...,
+) -> _ValidatorType[_I]: ...
+def deep_mapping(
+ key_validator: _ValidatorType[_K],
+ value_validator: _ValidatorType[_V],
+ mapping_validator: Optional[_ValidatorType[_M]] = ...,
+) -> _ValidatorType[_M]: ...
+def is_callable() -> _ValidatorType[_T]: ...
+def lt(val: _T) -> _ValidatorType[_T]: ...
+def le(val: _T) -> _ValidatorType[_T]: ...
+def ge(val: _T) -> _ValidatorType[_T]: ...
+def gt(val: _T) -> _ValidatorType[_T]: ...
+def max_len(length: int) -> _ValidatorType[_T]: ...
+def min_len(length: int) -> _ValidatorType[_T]: ...
+def not_(
+ validator: _ValidatorType[_T],
+ *,
+ msg: Optional[str] = None,
+ exc_types: Union[Type[Exception], Iterable[Type[Exception]]] = ...,
+) -> _ValidatorType[_T]: ...
diff --git a/third_party/python/attrs/attrs-23.1.0.dist-info/METADATA b/third_party/python/attrs/attrs-23.1.0.dist-info/METADATA
new file mode 100644
index 0000000000..4a986f007f
--- /dev/null
+++ b/third_party/python/attrs/attrs-23.1.0.dist-info/METADATA
@@ -0,0 +1,243 @@
+Metadata-Version: 2.1
+Name: attrs
+Version: 23.1.0
+Summary: Classes Without Boilerplate
+Project-URL: Documentation, https://www.attrs.org/
+Project-URL: Changelog, https://www.attrs.org/en/stable/changelog.html
+Project-URL: Bug Tracker, https://github.com/python-attrs/attrs/issues
+Project-URL: Source Code, https://github.com/python-attrs/attrs
+Project-URL: Funding, https://github.com/sponsors/hynek
+Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=pypi
+Author-email: Hynek Schlawack <hs@ox.cx>
+License-Expression: MIT
+License-File: LICENSE
+Keywords: attribute,boilerplate,class
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Typing :: Typed
+Requires-Python: >=3.7
+Requires-Dist: importlib-metadata; python_version < '3.8'
+Provides-Extra: cov
+Requires-Dist: attrs[tests]; extra == 'cov'
+Requires-Dist: coverage[toml]>=5.3; extra == 'cov'
+Provides-Extra: dev
+Requires-Dist: attrs[docs,tests]; extra == 'dev'
+Requires-Dist: pre-commit; extra == 'dev'
+Provides-Extra: docs
+Requires-Dist: furo; extra == 'docs'
+Requires-Dist: myst-parser; extra == 'docs'
+Requires-Dist: sphinx; extra == 'docs'
+Requires-Dist: sphinx-notfound-page; extra == 'docs'
+Requires-Dist: sphinxcontrib-towncrier; extra == 'docs'
+Requires-Dist: towncrier; extra == 'docs'
+Requires-Dist: zope-interface; extra == 'docs'
+Provides-Extra: tests
+Requires-Dist: attrs[tests-no-zope]; extra == 'tests'
+Requires-Dist: zope-interface; extra == 'tests'
+Provides-Extra: tests-no-zope
+Requires-Dist: cloudpickle; platform_python_implementation == 'CPython' and extra == 'tests-no-zope'
+Requires-Dist: hypothesis; extra == 'tests-no-zope'
+Requires-Dist: mypy>=1.1.1; platform_python_implementation == 'CPython' and extra == 'tests-no-zope'
+Requires-Dist: pympler; extra == 'tests-no-zope'
+Requires-Dist: pytest-mypy-plugins; platform_python_implementation == 'CPython' and python_version < '3.11' and extra == 'tests-no-zope'
+Requires-Dist: pytest-xdist[psutil]; extra == 'tests-no-zope'
+Requires-Dist: pytest>=4.3.0; extra == 'tests-no-zope'
+Description-Content-Type: text/markdown
+
+<p align="center">
+ <a href="https://www.attrs.org/">
+ <img src="https://raw.githubusercontent.com/python-attrs/attrs/main/docs/_static/attrs_logo.svg" width="35%" alt="attrs" />
+ </a>
+</p>
+
+
+*attrs* is the Python package that will bring back the **joy** of **writing classes** by relieving you from the drudgery of implementing object protocols (aka [dunder methods](https://www.attrs.org/en/latest/glossary.html#term-dunder-methods)).
+[Trusted by NASA](https://docs.github.com/en/account-and-profile/setting-up-and-managing-your-github-profile/customizing-your-profile/personalizing-your-profile#list-of-qualifying-repositories-for-mars-2020-helicopter-contributor-achievement) for Mars missions since 2020!
+
+Its main goal is to help you to write **concise** and **correct** software without slowing down your code.
+
+
+## Sponsors
+
+*attrs* would not be possible without our [amazing sponsors](https://github.com/sponsors/hynek).
+Especially those generously supporting us at the *The Organization* tier and higher:
+
+<p align="center">
+ <a href="https://www.variomedia.de/">
+ <img src="https://raw.githubusercontent.com/python-attrs/attrs/main/.github/sponsors/Variomedia.svg" width="200" height="60"></img>
+ </a>
+
+ <a href="https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=referral&utm_campaign=enterprise&utm_term=repo">
+ <img src="https://raw.githubusercontent.com/python-attrs/attrs/main/.github/sponsors/Tidelift.svg" width="200" height="60"></img>
+ </a>
+
+ <a href="https://sentry.io/">
+ <img src="https://raw.githubusercontent.com/python-attrs/attrs/main/.github/sponsors/Sentry.svg" width="200" height="60"></img>
+ </a>
+
+ <a href="https://filepreviews.io/">
+ <img src="https://raw.githubusercontent.com/python-attrs/attrs/main/.github/sponsors/FilePreviews.svg" width="200" height="60"></img>
+ </a>
+</p>
+
+<p align="center">
+ <strong>Please consider <a href="https://github.com/sponsors/hynek">joining them</a> to help make <em>attrs</em>’s maintenance more sustainable!</strong>
+</p>
+
+<!-- teaser-end -->
+
+## Example
+
+*attrs* gives you a class decorator and a way to declaratively define the attributes on that class:
+
+<!-- code-begin -->
+
+```pycon
+>>> from attrs import asdict, define, make_class, Factory
+
+>>> @define
+... class SomeClass:
+... a_number: int = 42
+... list_of_numbers: list[int] = Factory(list)
+...
+... def hard_math(self, another_number):
+... return self.a_number + sum(self.list_of_numbers) * another_number
+
+
+>>> sc = SomeClass(1, [1, 2, 3])
+>>> sc
+SomeClass(a_number=1, list_of_numbers=[1, 2, 3])
+
+>>> sc.hard_math(3)
+19
+>>> sc == SomeClass(1, [1, 2, 3])
+True
+>>> sc != SomeClass(2, [3, 2, 1])
+True
+
+>>> asdict(sc)
+{'a_number': 1, 'list_of_numbers': [1, 2, 3]}
+
+>>> SomeClass()
+SomeClass(a_number=42, list_of_numbers=[])
+
+>>> C = make_class("C", ["a", "b"])
+>>> C("foo", "bar")
+C(a='foo', b='bar')
+```
+
+After *declaring* your attributes, *attrs* gives you:
+
+- a concise and explicit overview of the class's attributes,
+- a nice human-readable `__repr__`,
+- equality-checking methods,
+- an initializer,
+- and much more,
+
+*without* writing dull boilerplate code again and again and *without* runtime performance penalties.
+
+**Hate type annotations**!?
+No problem!
+Types are entirely **optional** with *attrs*.
+Simply assign `attrs.field()` to the attributes instead of annotating them with types.
+
+---
+
+This example uses *attrs*'s modern APIs that have been introduced in version 20.1.0, and the *attrs* package import name that has been added in version 21.3.0.
+The classic APIs (`@attr.s`, `attr.ib`, plus their serious-business aliases) and the `attr` package import name will remain **indefinitely**.
+
+Please check out [*On The Core API Names*](https://www.attrs.org/en/latest/names.html) for a more in-depth explanation.
+
+
+## Data Classes
+
+On the tin, *attrs* might remind you of `dataclasses` (and indeed, `dataclasses` [are a descendant](https://hynek.me/articles/import-attrs/) of *attrs*).
+In practice it does a lot more and is more flexible.
+For instance it allows you to define [special handling of NumPy arrays for equality checks](https://www.attrs.org/en/stable/comparison.html#customization), or allows more ways to [plug into the initialization process](https://www.attrs.org/en/stable/init.html#hooking-yourself-into-initialization).
+
+For more details, please refer to our [comparison page](https://www.attrs.org/en/stable/why.html#data-classes).
+
+
+## Project Information
+
+- [**Changelog**](https://www.attrs.org/en/stable/changelog.html)
+- [**Documentation**](https://www.attrs.org/)
+- [**PyPI**](https://pypi.org/project/attrs/)
+- [**Source Code**](https://github.com/python-attrs/attrs)
+- [**Contributing**](https://github.com/python-attrs/attrs/blob/main/.github/CONTRIBUTING.md)
+- [**Third-party Extensions**](https://github.com/python-attrs/attrs/wiki/Extensions-to-attrs)
+- **License**: [MIT](https://www.attrs.org/en/latest/license.html)
+- **Get Help**: please use the `python-attrs` tag on [StackOverflow](https://stackoverflow.com/questions/tagged/python-attrs)
+- **Supported Python Versions**: 3.7 and later
+
+
+### *attrs* for Enterprise
+
+Available as part of the Tidelift Subscription.
+
+The maintainers of *attrs* and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source packages you use to build your applications.
+Save time, reduce risk, and improve code health, while paying the maintainers of the exact packages you use.
+[Learn more.](https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=referral&utm_campaign=enterprise&utm_term=repo)
+
+## Release Information
+
+### Backwards-incompatible Changes
+
+- Python 3.6 has been dropped and packaging switched to static package data using [Hatch](https://hatch.pypa.io/latest/).
+ [#993](https://github.com/python-attrs/attrs/issues/993)
+
+
+### Deprecations
+
+- The support for *zope-interface* via the `attrs.validators.provides` validator is now deprecated and will be removed in, or after, April 2024.
+
+ The presence of a C-based package in our developement dependencies has caused headaches and we're not under the impression it's used a lot.
+
+ Let us know if you're using it and we might publish it as a separate package.
+ [#1120](https://github.com/python-attrs/attrs/issues/1120)
+
+
+### Changes
+
+- `attrs.filters.exclude()` and `attrs.filters.include()` now support the passing of attribute names as strings.
+ [#1068](https://github.com/python-attrs/attrs/issues/1068)
+- `attrs.has()` and `attrs.fields()` now handle generic classes correctly.
+ [#1079](https://github.com/python-attrs/attrs/issues/1079)
+- Fix frozen exception classes when raised within e.g. `contextlib.contextmanager`, which mutates their `__traceback__` attributes.
+ [#1081](https://github.com/python-attrs/attrs/issues/1081)
+- `@frozen` now works with type checkers that implement [PEP-681](https://peps.python.org/pep-0681/) (ex. [pyright](https://github.com/microsoft/pyright/)).
+ [#1084](https://github.com/python-attrs/attrs/issues/1084)
+- Restored ability to unpickle instances pickled before 22.2.0.
+ [#1085](https://github.com/python-attrs/attrs/issues/1085)
+- `attrs.asdict()`'s and `attrs.astuple()`'s type stubs now accept the `attrs.AttrsInstance` protocol.
+ [#1090](https://github.com/python-attrs/attrs/issues/1090)
+- Fix slots class cellvar updating closure in CPython 3.8+ even when `__code__` introspection is unavailable.
+ [#1092](https://github.com/python-attrs/attrs/issues/1092)
+- `attrs.resolve_types()` can now pass `include_extras` to `typing.get_type_hints()` on Python 3.9+, and does so by default.
+ [#1099](https://github.com/python-attrs/attrs/issues/1099)
+- Added instructions for pull request workflow to `CONTRIBUTING.md`.
+ [#1105](https://github.com/python-attrs/attrs/issues/1105)
+- Added *type* parameter to `attrs.field()` function for use with `attrs.make_class()`.
+
+ Please note that type checkers ignore type metadata passed into `make_class()`, but it can be useful if you're wrapping _attrs_.
+ [#1107](https://github.com/python-attrs/attrs/issues/1107)
+- It is now possible for `attrs.evolve()` (and `attr.evolve()`) to change fields named `inst` if the instance is passed as a positional argument.
+
+ Passing the instance using the `inst` keyword argument is now deprecated and will be removed in, or after, April 2024.
+ [#1117](https://github.com/python-attrs/attrs/issues/1117)
+- `attrs.validators.optional()` now also accepts a tuple of validators (in addition to lists of validators).
+ [#1122](https://github.com/python-attrs/attrs/issues/1122)
+
+
+
+---
+
+[Full changelog](https://www.attrs.org/en/stable/changelog.html)
diff --git a/third_party/python/attrs/attrs-23.1.0.dist-info/RECORD b/third_party/python/attrs/attrs-23.1.0.dist-info/RECORD
new file mode 100644
index 0000000000..a6eb2ceb38
--- /dev/null
+++ b/third_party/python/attrs/attrs-23.1.0.dist-info/RECORD
@@ -0,0 +1,35 @@
+attr/__init__.py,sha256=dSRUBxRVTh-dXMrMR_oQ3ZISu2QSfhSZlik03Mjbu30,3241
+attr/__init__.pyi,sha256=rIK-2IakIoehVtqXK5l5rs9_fJNCbnYtKTS3cOAVJD8,17609
+attr/_cmp.py,sha256=diMUQV-BIg7IjIb6-o1hswtnjrR4qdAUz_tE8gxS96w,4098
+attr/_cmp.pyi,sha256=sGQmOM0w3_K4-X8cTXR7g0Hqr290E8PTObA9JQxWQqc,399
+attr/_compat.py,sha256=d3cpIu60IbKrLywPni17RUEQY7MvkqqKifyzJ5H3zRU,5803
+attr/_config.py,sha256=5W8lgRePuIOWu1ZuqF1899e2CmXGc95-ipwTpF1cEU4,826
+attr/_funcs.py,sha256=YMtzHRSOnFvOVJ7at3E0K95A2lW26HDjby96TMTDbc0,16730
+attr/_make.py,sha256=JIyKV-HRh3IcHi-EvOj2dw6tRoqATlx2kBHFrrxZpk0,96979
+attr/_next_gen.py,sha256=8lB_S5SFgX2KsflksK8Zygk6XDXToQYtIlmgd37I9aY,6271
+attr/_typing_compat.pyi,sha256=XDP54TUn-ZKhD62TOQebmzrwFyomhUCoGRpclb6alRA,469
+attr/_version_info.py,sha256=exSqb3b5E-fMSsgZAlEw9XcLpEgobPORCZpcaEglAM4,2121
+attr/_version_info.pyi,sha256=x_M3L3WuB7r_ULXAWjx959udKQ4HLB8l-hsc1FDGNvk,209
+attr/converters.py,sha256=xfGVSPRgWGcym6N5FZM9fyfvCQePqFyApWeC5BXKvoM,3602
+attr/converters.pyi,sha256=jKlpHBEt6HVKJvgrMFJRrHq8p61GXg4-Nd5RZWKJX7M,406
+attr/exceptions.py,sha256=0ZTyH_mHmI9utwTTbBWrdS_ck5jps9R2M_fYJPXxH_U,1890
+attr/exceptions.pyi,sha256=zZq8bCUnKAy9mDtBEw42ZhPhAUIHoTKedDQInJD883M,539
+attr/filters.py,sha256=9pYvXqdg6mtLvKIIb56oALRMoHFnQTcGCO4EXTc1qyM,1470
+attr/filters.pyi,sha256=0mRCjLKxdcvAo0vD-Cr81HfRXXCp9j_cAXjOoAHtPGM,225
+attr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+attr/setters.py,sha256=pbCZQ-pE6ZxjDqZfWWUhUFefXtpekIU4qS_YDMLPQ50,1400
+attr/setters.pyi,sha256=pyY8TVNBu8TWhOldv_RxHzmGvdgFQH981db70r0fn5I,567
+attr/validators.py,sha256=C2MQgX7ubL_cs5YzibWa8m0YxdMq5_3Ch3dVIzsLO-Y,20702
+attr/validators.pyi,sha256=167Dl9nt7NUhE9wht1I-buo039qyUT1nEUT_nKjSWr4,2580
+attrs/__init__.py,sha256=9_5waVbFs7rLqtXZ73tNDrxhezyZ8VZeX4BbvQ3EeJw,1039
+attrs/__init__.pyi,sha256=s_ajQ_U14DOsOz0JbmAKDOi46B3v2PcdO0UAV1MY6Ek,2168
+attrs/converters.py,sha256=fCBEdlYWcmI3sCnpUk2pz22GYtXzqTkp6NeOpdI64PY,70
+attrs/exceptions.py,sha256=SlDli6AY77f6ny-H7oy98OkQjsrw-D_supEuErIVYkE,70
+attrs/filters.py,sha256=dc_dNey29kH6KLU1mT2Dakq7tZ3kBfzEGwzOmDzw1F8,67
+attrs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+attrs/setters.py,sha256=oKw51C72Hh45wTwYvDHJP9kbicxiMhMR4Y5GvdpKdHQ,67
+attrs/validators.py,sha256=4ag1SyVD2Hm3PYKiNG_NOtR_e7f81Hr6GiNl4YvXo4Q,70
+attrs-23.1.0.dist-info/METADATA,sha256=yglwUXko75Q-IJ6LmPVQ4Y99KJS3CPK0NW8ovXFYsDg,11348
+attrs-23.1.0.dist-info/WHEEL,sha256=EI2JsGydwUL5GP9t6kzZv7G3HDPi7FuZDDf9In6amRM,87
+attrs-23.1.0.dist-info/licenses/LICENSE,sha256=iCEVyV38KvHutnFPjsbVy8q_Znyv-HKfQkINpj9xTp8,1109
+attrs-23.1.0.dist-info/RECORD,,
diff --git a/third_party/python/attrs/attrs-23.1.0.dist-info/WHEEL b/third_party/python/attrs/attrs-23.1.0.dist-info/WHEEL
new file mode 100644
index 0000000000..58d0071fa2
--- /dev/null
+++ b/third_party/python/attrs/attrs-23.1.0.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: hatchling 1.14.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/third_party/python/attrs/attrs-23.1.0.dist-info/licenses/LICENSE b/third_party/python/attrs/attrs-23.1.0.dist-info/licenses/LICENSE
new file mode 100644
index 0000000000..2bd6453d25
--- /dev/null
+++ b/third_party/python/attrs/attrs-23.1.0.dist-info/licenses/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Hynek Schlawack and the attrs contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/third_party/python/attrs/attrs/__init__.py b/third_party/python/attrs/attrs/__init__.py
new file mode 100644
index 0000000000..0c2481561a
--- /dev/null
+++ b/third_party/python/attrs/attrs/__init__.py
@@ -0,0 +1,65 @@
+# SPDX-License-Identifier: MIT
+
+from attr import (
+ NOTHING,
+ Attribute,
+ AttrsInstance,
+ Factory,
+ _make_getattr,
+ assoc,
+ cmp_using,
+ define,
+ evolve,
+ field,
+ fields,
+ fields_dict,
+ frozen,
+ has,
+ make_class,
+ mutable,
+ resolve_types,
+ validate,
+)
+from attr._next_gen import asdict, astuple
+
+from . import converters, exceptions, filters, setters, validators
+
+
+__all__ = [
+ "__author__",
+ "__copyright__",
+ "__description__",
+ "__doc__",
+ "__email__",
+ "__license__",
+ "__title__",
+ "__url__",
+ "__version__",
+ "__version_info__",
+ "asdict",
+ "assoc",
+ "astuple",
+ "Attribute",
+ "AttrsInstance",
+ "cmp_using",
+ "converters",
+ "define",
+ "evolve",
+ "exceptions",
+ "Factory",
+ "field",
+ "fields_dict",
+ "fields",
+ "filters",
+ "frozen",
+ "has",
+ "make_class",
+ "mutable",
+ "NOTHING",
+ "resolve_types",
+ "setters",
+ "validate",
+ "validators",
+]
+
+__getattr__ = _make_getattr(__name__)
diff --git a/third_party/python/attrs/attrs/__init__.pyi b/third_party/python/attrs/attrs/__init__.pyi
new file mode 100644
index 0000000000..9372cfea16
--- /dev/null
+++ b/third_party/python/attrs/attrs/__init__.pyi
@@ -0,0 +1,67 @@
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Mapping,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+)
+
+# Because we need to type our own stuff, we have to make everything from
+# attr explicitly public too.
+from attr import __author__ as __author__
+from attr import __copyright__ as __copyright__
+from attr import __description__ as __description__
+from attr import __email__ as __email__
+from attr import __license__ as __license__
+from attr import __title__ as __title__
+from attr import __url__ as __url__
+from attr import __version__ as __version__
+from attr import __version_info__ as __version_info__
+from attr import _FilterType
+from attr import assoc as assoc
+from attr import Attribute as Attribute
+from attr import AttrsInstance as AttrsInstance
+from attr import cmp_using as cmp_using
+from attr import converters as converters
+from attr import define as define
+from attr import evolve as evolve
+from attr import exceptions as exceptions
+from attr import Factory as Factory
+from attr import field as field
+from attr import fields as fields
+from attr import fields_dict as fields_dict
+from attr import filters as filters
+from attr import frozen as frozen
+from attr import has as has
+from attr import make_class as make_class
+from attr import mutable as mutable
+from attr import NOTHING as NOTHING
+from attr import resolve_types as resolve_types
+from attr import setters as setters
+from attr import validate as validate
+from attr import validators as validators
+
+# TODO: see definition of attr.asdict/astuple
+def asdict(
+ inst: AttrsInstance,
+ recurse: bool = ...,
+ filter: Optional[_FilterType[Any]] = ...,
+ dict_factory: Type[Mapping[Any, Any]] = ...,
+ retain_collection_types: bool = ...,
+ value_serializer: Optional[
+ Callable[[type, Attribute[Any], Any], Any]
+ ] = ...,
+ tuple_keys: bool = ...,
+) -> Dict[str, Any]: ...
+
+# TODO: add support for returning NamedTuple from the mypy plugin
+def astuple(
+ inst: AttrsInstance,
+ recurse: bool = ...,
+ filter: Optional[_FilterType[Any]] = ...,
+ tuple_factory: Type[Sequence[Any]] = ...,
+ retain_collection_types: bool = ...,
+) -> Tuple[Any, ...]: ...
diff --git a/third_party/python/attrs/attrs/converters.py b/third_party/python/attrs/attrs/converters.py
new file mode 100644
index 0000000000..edfa8d3c16
--- /dev/null
+++ b/third_party/python/attrs/attrs/converters.py
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: MIT
+
+from attr.converters import * # noqa
diff --git a/third_party/python/attrs/attrs/exceptions.py b/third_party/python/attrs/attrs/exceptions.py
new file mode 100644
index 0000000000..bd9efed202
--- /dev/null
+++ b/third_party/python/attrs/attrs/exceptions.py
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: MIT
+
+from attr.exceptions import * # noqa
diff --git a/third_party/python/attrs/attrs/filters.py b/third_party/python/attrs/attrs/filters.py
new file mode 100644
index 0000000000..52959005b0
--- /dev/null
+++ b/third_party/python/attrs/attrs/filters.py
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: MIT
+
+from attr.filters import * # noqa
diff --git a/third_party/python/attrs/attrs/py.typed b/third_party/python/attrs/attrs/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/attrs/attrs/py.typed
diff --git a/third_party/python/attrs/attrs/setters.py b/third_party/python/attrs/attrs/setters.py
new file mode 100644
index 0000000000..9b50770804
--- /dev/null
+++ b/third_party/python/attrs/attrs/setters.py
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: MIT
+
+from attr.setters import * # noqa
diff --git a/third_party/python/attrs/attrs/validators.py b/third_party/python/attrs/attrs/validators.py
new file mode 100644
index 0000000000..ab2c9b3024
--- /dev/null
+++ b/third_party/python/attrs/attrs/validators.py
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: MIT
+
+from attr.validators import * # noqa
diff --git a/third_party/python/blessed/blessed-1.19.1.dist-info/LICENSE b/third_party/python/blessed/blessed-1.19.1.dist-info/LICENSE
new file mode 100644
index 0000000000..4b0713283e
--- /dev/null
+++ b/third_party/python/blessed/blessed-1.19.1.dist-info/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2014 Jeff Quast
+Copyright (c) 2011 Erik Rose
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/third_party/python/blessed/blessed-1.19.1.dist-info/METADATA b/third_party/python/blessed/blessed-1.19.1.dist-info/METADATA
new file mode 100644
index 0000000000..c0e3c98cdd
--- /dev/null
+++ b/third_party/python/blessed/blessed-1.19.1.dist-info/METADATA
@@ -0,0 +1,269 @@
+Metadata-Version: 2.1
+Name: blessed
+Version: 1.19.1
+Summary: Easy, practical library for making terminal apps, by providing an elegant, well-documented interface to Colors, Keyboard input, and screen Positioning capabilities.
+Home-page: https://github.com/jquast/blessed
+Author: Jeff Quast, Erik Rose, Avram Lubkin
+Author-email: contact@jeffquast.com
+License: MIT
+Project-URL: Documentation, https://blessed.readthedocs.io
+Keywords: terminal,sequences,tty,curses,ncurses,formatting,style,color,console,keyboard,ansi,xterm
+Platform: UNKNOWN
+Classifier: Intended Audience :: Developers
+Classifier: Natural Language :: English
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Environment :: Console :: Curses
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Software Development :: User Interfaces
+Classifier: Topic :: Terminals
+Classifier: Typing :: Typed
+Requires-Python: >=2.7
+License-File: LICENSE
+Requires-Dist: wcwidth (>=0.1.4)
+Requires-Dist: six (>=1.9.0)
+Requires-Dist: jinxed (>=1.1.0) ; platform_system == "Windows"
+Requires-Dist: ordereddict (==1.1) ; python_version < "2.7"
+Requires-Dist: backports.functools-lru-cache (>=1.2.1) ; python_version < "3.2"
+
+| |pypi_downloads| |codecov| |windows| |linux| |mac| |bsd|
+
+Introduction
+============
+
+Blessed is an easy, practical *library* for making *terminal* apps, by providing an elegant,
+well-documented interface to Colors_, Keyboard_ input, and screen position and Location_
+capabilities.
+
+.. code-block:: python
+
+ from blessed import Terminal
+
+ term = Terminal()
+
+ print(term.home + term.clear + term.move_y(term.height // 2))
+ print(term.black_on_darkkhaki(term.center('press any key to continue.')))
+
+ with term.cbreak(), term.hidden_cursor():
+ inp = term.inkey()
+
+ print(term.move_down(2) + 'You pressed ' + term.bold(repr(inp)))
+
+.. figure:: https://dxtz6bzwq9sxx.cloudfront.net/demo_basic_intro.gif
+ :alt: Animation of running the code example
+
+It's meant to be *fun* and *easy*, to do basic terminal graphics and styling with Python using
+*blessed*. Terminal_ is the only class you need to import and the only object you should need for
+Terminal capabilities.
+
+Whether you want to improve CLI apps with colors, or make fullscreen applications or games,
+*blessed* should help get you started quickly. Your users will love it because it works on Windows,
+Mac, and Linux, and you will love it because it has plenty of documentation and examples!
+
+Full documentation at https://blessed.readthedocs.io/en/latest/
+
+Examples
+--------
+
+.. figure:: https://dxtz6bzwq9sxx.cloudfront.net/blessed_demo_intro.gif
+ :alt: Animations of x11-colorpicker.py, bounce.py, worms.py, and plasma.py
+
+ x11-colorpicker.py_, bounce.py_, worms.py_, and plasma.py_, from our repository.
+
+Exemplary 3rd-party examples which use *blessed*,
+
+.. figure:: https://dxtz6bzwq9sxx.cloudfront.net/demo_3rdparty_voltron.png
+ :alt: Screenshot of 'Voltron' (By the author of Voltron, from their README).
+
+ Voltron_ is an extensible debugger UI toolkit written in Python
+
+.. figure:: https://dxtz6bzwq9sxx.cloudfront.net/demo_3rdparty_cursewords.gif
+ :alt: Animation of 'cursewords' (By the author of cursewords, from their README).
+
+ cursewords_ is "graphical" command line program for solving crossword puzzles in the terminal.
+
+.. figure:: https://dxtz6bzwq9sxx.cloudfront.net/demo_3rdparty_githeat.gif
+ :alt: Animation of 'githeat.interactive', using blessed repository at the time of capture.
+
+ GitHeat_ builds an interactive heatmap of git history.
+
+.. figure:: https://dxtz6bzwq9sxx.cloudfront.net/demo_3rdparty_dashing.gif
+ :alt: Animations from 'Dashing' (By the author of Dashing, from their README)
+
+ Dashing_ is a library to quickly create terminal-based dashboards.
+
+.. figure:: https://dxtz6bzwq9sxx.cloudfront.net/demo_3rdparty_enlighten.gif
+ :alt: Animations from 'Enlighten' (By the author of Enlighten, from their README)
+
+ Enlighten_ is a console progress bar library that allows simultaneous output without redirection.
+
+.. figure:: https://dxtz6bzwq9sxx.cloudfront.net/blessed_3rdparty_macht.gif
+ :alt: Demonstration of 'macht', a 2048 clone
+
+ macht_ is a clone of the (briefly popular) puzzle game, 2048.
+
+Requirements
+------------
+
+*Blessed* works with Windows, Mac, Linux, and BSD's, on Python 2.7, 3.4, 3.5, 3.6, 3.7, and 3.8.
+
+Brief Overview
+--------------
+
+*Blessed* is more than just a Python wrapper around curses_:
+
+* Styles_, Colors_, and maybe a little positioning without necessarily clearing the whole screen
+ first.
+* Works great with Python's new f-strings_ or any other kind of string formatting.
+* Provides up-to-the-moment Location_ and terminal height and width, so you can respond to terminal
+ size changes.
+* Avoids making a mess if the output gets piped to a non-terminal, you can output sequences to any
+ file-like object such as *StringIO*, files, pipes or sockets.
+* Uses `terminfo(5)`_ so it works with any terminal type and capability: No more C-like calls to
+ tigetstr_ and tparm_.
+* Non-obtrusive calls to only the capabilities database ensures that you are free to mix and match
+ with calls to any other curses application code or library you like.
+* Provides context managers `Terminal.fullscreen()`_ and `Terminal.hidden_cursor()`_ to safely
+ express terminal modes, curses development will no longer fudge up your shell.
+* Act intelligently when somebody redirects your output to a file, omitting all of the special
+ sequences colors, but still containing all of the text.
+
+*Blessed* is a fork of `blessings <https://github.com/erikrose/blessings>`_, which does all of
+the same above with the same API, as well as following **enhancements**:
+
+* Windows support, new since Dec. 2019!
+* Dead-simple keyboard handling: safely decoding unicode input in your system's preferred locale and
+ supports application/arrow keys.
+* 24-bit color support, using `Terminal.color_rgb()`_ and `Terminal.on_color_rgb()`_ and all X11
+ Colors_ by name, and not by number.
+* Determine cursor location using `Terminal.get_location()`_, enter key-at-a-time input mode using
+ `Terminal.cbreak()`_ or `Terminal.raw()`_ context managers, and read timed key presses using
+ `Terminal.inkey()`_.
+* Allows the *printable length* of strings that contain sequences to be determined by
+ `Terminal.length()`_, supporting additional methods `Terminal.wrap()`_ and `Terminal.center()`_,
+ terminal-aware variants of the built-in function `textwrap.wrap()`_ and method `str.center()`_,
+ respectively.
+* Allows sequences to be removed from strings that contain them, using `Terminal.strip_seqs()`_ or
+ sequences and whitespace using `Terminal.strip()`_.
+
+Before And After
+----------------
+
+With the built-in curses_ module, this is how you would typically
+print some underlined text at the bottom of the screen:
+
+.. code-block:: python
+
+ from curses import tigetstr, setupterm, tparm
+ from fcntl import ioctl
+ from os import isatty
+ import struct
+ import sys
+ from termios import TIOCGWINSZ
+
+ # If we want to tolerate having our output piped to other commands or
+ # files without crashing, we need to do all this branching:
+ if hasattr(sys.stdout, 'fileno') and isatty(sys.stdout.fileno()):
+ setupterm()
+ sc = tigetstr('sc')
+ cup = tigetstr('cup')
+ rc = tigetstr('rc')
+ underline = tigetstr('smul')
+ normal = tigetstr('sgr0')
+ else:
+ sc = cup = rc = underline = normal = ''
+
+ # Save cursor position.
+ print(sc)
+
+ if cup:
+ # tigetnum('lines') doesn't always update promptly, hence this:
+ height = struct.unpack('hhhh', ioctl(0, TIOCGWINSZ, '\000' * 8))[0]
+
+ # Move cursor to bottom.
+ print(tparm(cup, height - 1, 0))
+
+ print('This is {under}underlined{normal}!'
+ .format(under=underline, normal=normal))
+
+ # Restore cursor position.
+ print(rc)
+
+The same program with *Blessed* is simply:
+
+.. code-block:: python
+
+ from blessed import Terminal
+
+ term = Terminal()
+ with term.location(0, term.height - 1):
+ print('This is ' + term.underline('underlined') + '!', end='')
+
+.. _curses: https://docs.python.org/3/library/curses.html
+.. _tigetstr: http://man.openbsd.org/cgi-bin/man.cgi/OpenBSD-current/man3/tigetstr.3
+.. _tparm: http://man.openbsd.org/cgi-bin/man.cgi/OpenBSD-current/man3/tparm.3
+.. _`terminfo(5)`: https://invisible-island.net/ncurses/man/terminfo.5.html
+.. _str.center(): https://docs.python.org/3/library/stdtypes.html#str.center
+.. _textwrap.wrap(): https://docs.python.org/3/library/textwrap.html#textwrap.wrap
+.. _Terminal: https://blessed.readthedocs.io/en/stable/terminal.html
+.. _`Terminal.fullscreen()`: https://blessed.readthedocs.io/en/latest/api/terminal.html#blessed.terminal.Terminal.fullscreen
+.. _`Terminal.get_location()`: https://blessed.readthedocs.io/en/latest/location.html#finding-the-cursor
+.. _`Terminal.color_rgb()`: https://blessed.readthedocs.io/en/stable/api/terminal.html#blessed.terminal.Terminal.color_rgb
+.. _`Terminal.hidden_cursor()`: https://blessed.readthedocs.io/en/latest/api/terminal.html#blessed.terminal.Terminal.hidden_cursor
+.. _`Terminal.on_color_rgb()`: https://blessed.readthedocs.io/en/stable/api/terminal.html#blessed.terminal.Terminal.on_color_rgb
+.. _`Terminal.length()`: https://blessed.readthedocs.io/en/stable/api/terminal.html#blessed.terminal.Terminal.length
+.. _`Terminal.strip()`: https://blessed.readthedocs.io/en/stable/api/terminal.html#blessed.terminal.Terminal.strip
+.. _`Terminal.rstrip()`: https://blessed.readthedocs.io/en/stable/api/terminal.html#blessed.terminal.Terminal.rstrip
+.. _`Terminal.lstrip()`: https://blessed.readthedocs.io/en/stable/api/terminal.html#blessed.terminal.Terminal.lstrip
+.. _`Terminal.strip_seqs()`: https://blessed.readthedocs.io/en/stable/api/terminal.html#blessed.terminal.Terminal.strip_seqs
+.. _`Terminal.wrap()`: https://blessed.readthedocs.io/en/stable/api/terminal.html#blessed.terminal.Terminal.wrap
+.. _`Terminal.center()`: https://blessed.readthedocs.io/en/stable/api/terminal.html#blessed.terminal.Terminal.center
+.. _`Terminal.rjust()`: https://blessed.readthedocs.io/en/stable/api/terminal.html#blessed.terminal.Terminal.rjust
+.. _`Terminal.ljust()`: https://blessed.readthedocs.io/en/stable/api/terminal.html#blessed.terminal.Terminal.ljust
+.. _`Terminal.cbreak()`: https://blessed.readthedocs.io/en/stable/api/terminal.html#blessed.terminal.Terminal.cbreak
+.. _`Terminal.raw()`: https://blessed.readthedocs.io/en/stable/api/terminal.html#blessed.terminal.Terminal.raw
+.. _`Terminal.inkey()`: https://blessed.readthedocs.io/en/stable/api/terminal.html#blessed.terminal.Terminal.inkey
+.. _Colors: https://blessed.readthedocs.io/en/stable/colors.html
+.. _Styles: https://blessed.readthedocs.io/en/stable/terminal.html#styles
+.. _Location: https://blessed.readthedocs.io/en/stable/location.html
+.. _Keyboard: https://blessed.readthedocs.io/en/stable/keyboard.html
+.. _Examples: https://blessed.readthedocs.io/en/stable/examples.html
+.. _x11-colorpicker.py: https://blessed.readthedocs.io/en/stable/examples.html#x11-colorpicker-py
+.. _bounce.py: https://blessed.readthedocs.io/en/stable/examples.html#bounce-py
+.. _worms.py: https://blessed.readthedocs.io/en/stable/examples.html#worms-py
+.. _plasma.py: https://blessed.readthedocs.io/en/stable/examples.html#plasma-py
+.. _Voltron: https://github.com/snare/voltron
+.. _cursewords: https://github.com/thisisparker/cursewords
+.. _GitHeat: https://github.com/AmmsA/Githeat
+.. _Dashing: https://github.com/FedericoCeratto/dashing
+.. _Enlighten: https://github.com/Rockhopper-Technologies/enlighten
+.. _macht: https://github.com/rolfmorel/macht
+.. _f-strings: https://docs.python.org/3/reference/lexical_analysis.html#f-strings
+.. |pypi_downloads| image:: https://img.shields.io/pypi/dm/blessed.svg?logo=pypi
+ :alt: Downloads
+ :target: https://pypi.org/project/blessed/
+.. |codecov| image:: https://codecov.io/gh/jquast/blessed/branch/master/graph/badge.svg
+ :alt: codecov.io Code Coverage
+ :target: https://codecov.io/gh/jquast/blessed/
+.. |linux| image:: https://img.shields.io/badge/Linux-yes-success?logo=linux
+ :alt: Linux supported
+.. |windows| image:: https://img.shields.io/badge/Windows-NEW-success?logo=windows
+ :alt: Windows supported
+.. |mac| image:: https://img.shields.io/badge/MacOS-yes-success?logo=apple
+ :alt: MacOS supported
+.. |bsd| image:: https://img.shields.io/badge/BSD-yes-success?logo=freebsd
+ :alt: BSD supported
+
+
diff --git a/third_party/python/blessed/blessed-1.19.1.dist-info/RECORD b/third_party/python/blessed/blessed-1.19.1.dist-info/RECORD
new file mode 100644
index 0000000000..ddbe360887
--- /dev/null
+++ b/third_party/python/blessed/blessed-1.19.1.dist-info/RECORD
@@ -0,0 +1,23 @@
+blessed/__init__.py,sha256=cVCRzlNO_XNDjs-hlDkzv5m3o5BMXxD-hVm30bbWQ1w,687
+blessed/_capabilities.py,sha256=Thj8lgDvhfM6TttvziDu0mabqZqYnwAwC3NTtSMntxc,6292
+blessed/_capabilities.pyi,sha256=If5dG9LhrIyTxUuCuV44bNxnWMk3S7Xvry3oGOBHp2k,265
+blessed/color.py,sha256=D5VmWAsZxSYIERKmkJ7FVhZgNssEtUkV8GcMbDnoxVk,7502
+blessed/color.pyi,sha256=4DNLFe-SMCVKbsvuMFLWUzNCdy45bgtZuk3wP51HlKQ,690
+blessed/colorspace.py,sha256=GEoKL18C9VrBlt-LqbhFirBuhqUmS-tyy0sb9VNJS5U,35322
+blessed/colorspace.pyi,sha256=zwo_F4rf0GSPJsW2irvxHQ3SqNlGgt7VQOFN2hXnTnw,234
+blessed/formatters.py,sha256=3QQiwMC51EdXy6-ZbCPRfw87X49jErvk2-k1yIBwk9g,19416
+blessed/formatters.pyi,sha256=gDgcWIk3pqif7SZgL5DG--GjO7QXwzen5UNgqQ_XHsw,2091
+blessed/keyboard.py,sha256=2WNuDJp_bb5tyRho6cGUup6Hqvp7bFgGDC5D5HbJ2-I,17687
+blessed/keyboard.pyi,sha256=9ibu_A44OkWKcdy_36EeHVhK4ybnurig5arrXxwVOeM,796
+blessed/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+blessed/sequences.py,sha256=Hjs2ddTcXFWqxdL_PKuMoPvaJ3gRRjsZxdy9dByXvLU,17126
+blessed/sequences.pyi,sha256=_zJkZm8S015g242mRUoEp12Qs27DS348vFqkSRsYfHc,1852
+blessed/terminal.py,sha256=QA7yt-E7U72WF7RGkfR7g_HcGAuGFalF3aUP6Zm1L6Y,59889
+blessed/terminal.pyi,sha256=ujihEHr4Ii6r8xw2UBOnAjcY1Wm7dlGbGwfLBqyQQiU,4078
+blessed/win_terminal.py,sha256=uGl52EiEq4K3udsZJHn2tlnESUHg_77fxWrEuFD77WY,5804
+blessed/win_terminal.pyi,sha256=GoS67cnj927_SXZRr1WCLD21ie_w7zlA20V33clpV7E,333
+blessed-1.19.1.dist-info/LICENSE,sha256=YBSQ1biC0QDEeC-dqb_dI_lg5reeNazESZQ5XBj01X0,1083
+blessed-1.19.1.dist-info/METADATA,sha256=H1PBstJVFYedNvC-3vOFQE0PItwv5f6wtmlKy0P5hYQ,13155
+blessed-1.19.1.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110
+blessed-1.19.1.dist-info/top_level.txt,sha256=2lUIfLwFZtAucvesS5UE8_MxXID5rSx_3gJ2-1JGckA,8
+blessed-1.19.1.dist-info/RECORD,,
diff --git a/third_party/python/blessed/blessed-1.19.1.dist-info/WHEEL b/third_party/python/blessed/blessed-1.19.1.dist-info/WHEEL
new file mode 100644
index 0000000000..01b8fc7d4a
--- /dev/null
+++ b/third_party/python/blessed/blessed-1.19.1.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.36.2)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/blessed/blessed-1.19.1.dist-info/top_level.txt b/third_party/python/blessed/blessed-1.19.1.dist-info/top_level.txt
new file mode 100644
index 0000000000..d43de1b8be
--- /dev/null
+++ b/third_party/python/blessed/blessed-1.19.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+blessed
diff --git a/third_party/python/blessed/blessed/__init__.py b/third_party/python/blessed/blessed/__init__.py
new file mode 100644
index 0000000000..d1a89c1af7
--- /dev/null
+++ b/third_party/python/blessed/blessed/__init__.py
@@ -0,0 +1,23 @@
+"""
+A thin, practical wrapper around terminal capabilities in Python.
+
+http://pypi.python.org/pypi/blessed
+"""
+# std imports
+import sys as _sys
+import platform as _platform
+
+# isort: off
+if _platform.system() == 'Windows':
+ from blessed.win_terminal import Terminal
+else:
+ from blessed.terminal import Terminal # type: ignore
+
+if (3, 0, 0) <= _sys.version_info[:3] < (3, 2, 3):
+ # Good till 3.2.10
+ # Python 3.x < 3.2.3 has a bug in which tparm() erroneously takes a string.
+ raise ImportError('Blessed needs Python 3.2.3 or greater for Python 3 '
+ 'support due to http://bugs.python.org/issue10570.')
+
+__all__ = ('Terminal',)
+__version__ = "1.19.1"
diff --git a/third_party/python/blessed/blessed/_capabilities.py b/third_party/python/blessed/blessed/_capabilities.py
new file mode 100644
index 0000000000..c4df54bca6
--- /dev/null
+++ b/third_party/python/blessed/blessed/_capabilities.py
@@ -0,0 +1,168 @@
+"""Terminal capability builder patterns."""
+# std imports
+import re
+from collections import OrderedDict
+
+__all__ = (
+ 'CAPABILITY_DATABASE',
+ 'CAPABILITIES_RAW_MIXIN',
+ 'CAPABILITIES_ADDITIVES',
+ 'CAPABILITIES_CAUSE_MOVEMENT',
+)
+
+CAPABILITY_DATABASE = OrderedDict((
+ ('bell', ('bel', {})),
+ ('carriage_return', ('cr', {})),
+ ('change_scroll_region', ('csr', {'nparams': 2})),
+ ('clear_all_tabs', ('tbc', {})),
+ ('clear_screen', ('clear', {})),
+ ('clr_bol', ('el1', {})),
+ ('clr_eol', ('el', {})),
+ ('clr_eos', ('clear_eos', {})),
+ ('column_address', ('hpa', {'nparams': 1})),
+ ('cursor_address', ('cup', {'nparams': 2, 'match_grouped': True})),
+ ('cursor_down', ('cud1', {})),
+ ('cursor_home', ('home', {})),
+ ('cursor_invisible', ('civis', {})),
+ ('cursor_left', ('cub1', {})),
+ ('cursor_normal', ('cnorm', {})),
+ ('cursor_report', ('u6', {'nparams': 2, 'match_grouped': True})),
+ ('cursor_right', ('cuf1', {})),
+ ('cursor_up', ('cuu1', {})),
+ ('cursor_visible', ('cvvis', {})),
+ ('delete_character', ('dch1', {})),
+ ('delete_line', ('dl1', {})),
+ ('enter_blink_mode', ('blink', {})),
+ ('enter_bold_mode', ('bold', {})),
+ ('enter_dim_mode', ('dim', {})),
+ ('enter_fullscreen', ('smcup', {})),
+ ('enter_standout_mode', ('standout', {})),
+ ('enter_superscript_mode', ('superscript', {})),
+ ('enter_susimpleript_mode', ('susimpleript', {})),
+ ('enter_underline_mode', ('underline', {})),
+ ('erase_chars', ('ech', {'nparams': 1})),
+ ('exit_alt_charset_mode', ('rmacs', {})),
+ ('exit_am_mode', ('rmam', {})),
+ ('exit_attribute_mode', ('sgr0', {})),
+ ('exit_ca_mode', ('rmcup', {})),
+ ('exit_fullscreen', ('rmcup', {})),
+ ('exit_insert_mode', ('rmir', {})),
+ ('exit_standout_mode', ('rmso', {})),
+ ('exit_underline_mode', ('rmul', {})),
+ ('flash_hook', ('hook', {})),
+ ('flash_screen', ('flash', {})),
+ ('insert_line', ('il1', {})),
+ ('keypad_local', ('rmkx', {})),
+ ('keypad_xmit', ('smkx', {})),
+ ('meta_off', ('rmm', {})),
+ ('meta_on', ('smm', {})),
+ ('orig_pair', ('op', {})),
+ ('parm_down_cursor', ('cud', {'nparams': 1})),
+ ('parm_left_cursor', ('cub', {'nparams': 1, 'match_grouped': True})),
+ ('parm_dch', ('dch', {'nparams': 1})),
+ ('parm_delete_line', ('dl', {'nparams': 1})),
+ ('parm_ich', ('ich', {'nparams': 1})),
+ ('parm_index', ('indn', {'nparams': 1})),
+ ('parm_insert_line', ('il', {'nparams': 1})),
+ ('parm_right_cursor', ('cuf', {'nparams': 1, 'match_grouped': True})),
+ ('parm_rindex', ('rin', {'nparams': 1})),
+ ('parm_up_cursor', ('cuu', {'nparams': 1})),
+ ('print_screen', ('mc0', {})),
+ ('prtr_off', ('mc4', {})),
+ ('prtr_on', ('mc5', {})),
+ ('reset_1string', ('r1', {})),
+ ('reset_2string', ('r2', {})),
+ ('reset_3string', ('r3', {})),
+ ('restore_cursor', ('rc', {})),
+ ('row_address', ('vpa', {'nparams': 1})),
+ ('save_cursor', ('sc', {})),
+ ('scroll_forward', ('ind', {})),
+ ('scroll_reverse', ('rev', {})),
+ ('set0_des_seq', ('s0ds', {})),
+ ('set1_des_seq', ('s1ds', {})),
+ ('set2_des_seq', ('s2ds', {})),
+ ('set3_des_seq', ('s3ds', {})),
+ # this 'color' is deceiving, but often matching, and a better match
+ # than set_a_attributes1 or set_a_foreground.
+ ('color', ('_foreground_color', {'nparams': 1, 'match_any': True,
+ 'numeric': 1})),
+ ('set_a_foreground', ('color', {'nparams': 1, 'match_any': True,
+ 'numeric': 1})),
+ ('set_a_background', ('on_color', {'nparams': 1, 'match_any': True,
+ 'numeric': 1})),
+ ('set_tab', ('hts', {})),
+ ('tab', ('ht', {})),
+ ('italic', ('sitm', {})),
+ ('no_italic', ('sitm', {})),
+))
+
+CAPABILITIES_RAW_MIXIN = {
+ 'bell': re.escape('\a'),
+ 'carriage_return': re.escape('\r'),
+ 'cursor_left': re.escape('\b'),
+ 'cursor_report': re.escape('\x1b') + r'\[(\d+)\;(\d+)R',
+ 'cursor_right': re.escape('\x1b') + r'\[C',
+ 'exit_attribute_mode': re.escape('\x1b') + r'\[m',
+ 'parm_left_cursor': re.escape('\x1b') + r'\[(\d+)D',
+ 'parm_right_cursor': re.escape('\x1b') + r'\[(\d+)C',
+ 'restore_cursor': re.escape(r'\x1b\[u'),
+ 'save_cursor': re.escape(r'\x1b\[s'),
+ 'scroll_forward': re.escape('\n'),
+ 'set0_des_seq': re.escape('\x1b(B'),
+ 'tab': re.escape('\t'),
+}
+_ANY_NOTESC = '[^' + re.escape('\x1b') + ']*'
+
+CAPABILITIES_ADDITIVES = {
+ 'link': ('link',
+ re.escape('\x1b') + r'\]8;' + _ANY_NOTESC + ';' +
+ _ANY_NOTESC + re.escape('\x1b') + '\\\\'),
+ 'color256': ('color', re.escape('\x1b') + r'\[38;5;\d+m'),
+ 'on_color256': ('on_color', re.escape('\x1b') + r'\[48;5;\d+m'),
+ 'color_rgb': ('color_rgb', re.escape('\x1b') + r'\[38;2;\d+;\d+;\d+m'),
+ 'on_color_rgb': ('on_color_rgb', re.escape('\x1b') + r'\[48;2;\d+;\d+;\d+m'),
+ 'shift_in': ('', re.escape('\x0f')),
+ 'shift_out': ('', re.escape('\x0e')),
+ # sgr(...) outputs strangely, use the basic ANSI/EMCA-48 codes here.
+ 'set_a_attributes1': (
+ 'sgr', re.escape('\x1b') + r'\[\d+m'),
+ 'set_a_attributes2': (
+ 'sgr', re.escape('\x1b') + r'\[\d+\;\d+m'),
+ 'set_a_attributes3': (
+ 'sgr', re.escape('\x1b') + r'\[\d+\;\d+\;\d+m'),
+ 'set_a_attributes4': (
+ 'sgr', re.escape('\x1b') + r'\[\d+\;\d+\;\d+\;\d+m'),
+ # this helps where xterm's sgr0 includes set0_des_seq, we'd
+ # rather like to also match this immediate substring.
+ 'sgr0': ('sgr0', re.escape('\x1b') + r'\[m'),
+ 'backspace': ('', re.escape('\b')),
+ 'ascii_tab': ('', re.escape('\t')),
+ 'clr_eol': ('', re.escape('\x1b[K')),
+ 'clr_eol0': ('', re.escape('\x1b[0K')),
+ 'clr_bol': ('', re.escape('\x1b[1K')),
+ 'clr_eosK': ('', re.escape('\x1b[2K')),
+}
+
+CAPABILITIES_CAUSE_MOVEMENT = (
+ 'ascii_tab',
+ 'backspace',
+ 'carriage_return',
+ 'clear_screen',
+ 'column_address',
+ 'cursor_address',
+ 'cursor_down',
+ 'cursor_home',
+ 'cursor_left',
+ 'cursor_right',
+ 'cursor_up',
+ 'enter_fullscreen',
+ 'exit_fullscreen',
+ 'parm_down_cursor',
+ 'parm_left_cursor',
+ 'parm_right_cursor',
+ 'parm_up_cursor',
+ 'restore_cursor',
+ 'row_address',
+ 'scroll_forward',
+ 'tab',
+)
diff --git a/third_party/python/blessed/blessed/_capabilities.pyi b/third_party/python/blessed/blessed/_capabilities.pyi
new file mode 100644
index 0000000000..04c59c35fa
--- /dev/null
+++ b/third_party/python/blessed/blessed/_capabilities.pyi
@@ -0,0 +1,7 @@
+# std imports
+from typing import Any, Dict, Tuple, OrderedDict
+
+CAPABILITY_DATABASE: OrderedDict[str, Tuple[str, Dict[str, Any]]]
+CAPABILITIES_RAW_MIXIN: Dict[str, str]
+CAPABILITIES_ADDITIVES: Dict[str, Tuple[str, str]]
+CAPABILITIES_CAUSE_MOVEMENT: Tuple[str, ...]
diff --git a/third_party/python/blessed/blessed/color.py b/third_party/python/blessed/blessed/color.py
new file mode 100644
index 0000000000..482fc0e11e
--- /dev/null
+++ b/third_party/python/blessed/blessed/color.py
@@ -0,0 +1,258 @@
+# -*- coding: utf-8 -*-
+"""
+Sub-module providing color functions.
+
+References,
+
+- https://en.wikipedia.org/wiki/Color_difference
+- http://www.easyrgb.com/en/math.php
+- Measuring Colour by R.W.G. Hunt and M.R. Pointer
+"""
+
+# std imports
+from math import cos, exp, sin, sqrt, atan2
+
+# isort: off
+try:
+ from functools import lru_cache
+except ImportError:
+ # lru_cache was added in Python 3.2
+ from backports.functools_lru_cache import lru_cache
+
+
+def rgb_to_xyz(red, green, blue):
+ """
+ Convert standard RGB color to XYZ color.
+
+ :arg int red: RGB value of Red.
+ :arg int green: RGB value of Green.
+ :arg int blue: RGB value of Blue.
+ :returns: Tuple (X, Y, Z) representing XYZ color
+ :rtype: tuple
+
+ D65/2° standard illuminant
+ """
+ rgb = []
+ for val in red, green, blue:
+ val /= 255.0
+ if val > 0.04045:
+ val = pow((val + 0.055) / 1.055, 2.4)
+ else:
+ val /= 12.92
+ val *= 100
+ rgb.append(val)
+
+ red, green, blue = rgb # pylint: disable=unbalanced-tuple-unpacking
+ x_val = red * 0.4124 + green * 0.3576 + blue * 0.1805
+ y_val = red * 0.2126 + green * 0.7152 + blue * 0.0722
+ z_val = red * 0.0193 + green * 0.1192 + blue * 0.9505
+
+ return x_val, y_val, z_val
+
+
+def xyz_to_lab(x_val, y_val, z_val):
+ """
+ Convert XYZ color to CIE-Lab color.
+
+ :arg float x_val: XYZ value of X.
+ :arg float y_val: XYZ value of Y.
+ :arg float z_val: XYZ value of Z.
+ :returns: Tuple (L, a, b) representing CIE-Lab color
+ :rtype: tuple
+
+ D65/2° standard illuminant
+ """
+ xyz = []
+ for val, ref in (x_val, 95.047), (y_val, 100.0), (z_val, 108.883):
+ val /= ref
+ val = pow(val, 1 / 3.0) if val > 0.008856 else 7.787 * val + 16 / 116.0
+ xyz.append(val)
+
+ x_val, y_val, z_val = xyz # pylint: disable=unbalanced-tuple-unpacking
+ cie_l = 116 * y_val - 16
+ cie_a = 500 * (x_val - y_val)
+ cie_b = 200 * (y_val - z_val)
+
+ return cie_l, cie_a, cie_b
+
+
+@lru_cache(maxsize=256)
+def rgb_to_lab(red, green, blue):
+ """
+ Convert RGB color to CIE-Lab color.
+
+ :arg int red: RGB value of Red.
+ :arg int green: RGB value of Green.
+ :arg int blue: RGB value of Blue.
+ :returns: Tuple (L, a, b) representing CIE-Lab color
+ :rtype: tuple
+
+ D65/2° standard illuminant
+ """
+ return xyz_to_lab(*rgb_to_xyz(red, green, blue))
+
+
+def dist_rgb(rgb1, rgb2):
+ """
+ Determine distance between two rgb colors.
+
+ :arg tuple rgb1: RGB color definition
+ :arg tuple rgb2: RGB color definition
+ :returns: Square of the distance between provided colors
+ :rtype: float
+
+ This works by treating RGB colors as coordinates in three dimensional
+ space and finding the closest point within the configured color range
+ using the formula::
+
+ d^2 = (r2 - r1)^2 + (g2 - g1)^2 + (b2 - b1)^2
+
+ For efficiency, the square of the distance is returned
+ which is sufficient for comparisons
+ """
+ return sum(pow(rgb1[idx] - rgb2[idx], 2) for idx in (0, 1, 2))
+
+
+def dist_rgb_weighted(rgb1, rgb2):
+ """
+ Determine the weighted distance between two rgb colors.
+
+ :arg tuple rgb1: RGB color definition
+ :arg tuple rgb2: RGB color definition
+ :returns: Square of the distance between provided colors
+ :rtype: float
+
+ Similar to a standard distance formula, the values are weighted
+ to approximate human perception of color differences
+
+ For efficiency, the square of the distance is returned
+ which is sufficient for comparisons
+ """
+ red_mean = (rgb1[0] + rgb2[0]) / 2.0
+
+ return ((2 + red_mean / 256) * pow(rgb1[0] - rgb2[0], 2) +
+ 4 * pow(rgb1[1] - rgb2[1], 2) +
+ (2 + (255 - red_mean) / 256) * pow(rgb1[2] - rgb2[2], 2))
+
+
+def dist_cie76(rgb1, rgb2):
+ """
+ Determine distance between two rgb colors using the CIE94 algorithm.
+
+ :arg tuple rgb1: RGB color definition
+ :arg tuple rgb2: RGB color definition
+ :returns: Square of the distance between provided colors
+ :rtype: float
+
+ For efficiency, the square of the distance is returned
+ which is sufficient for comparisons
+ """
+ l_1, a_1, b_1 = rgb_to_lab(*rgb1)
+ l_2, a_2, b_2 = rgb_to_lab(*rgb2)
+ return pow(l_1 - l_2, 2) + pow(a_1 - a_2, 2) + pow(b_1 - b_2, 2)
+
+
+def dist_cie94(rgb1, rgb2):
+ # pylint: disable=too-many-locals
+ """
+ Determine distance between two rgb colors using the CIE94 algorithm.
+
+ :arg tuple rgb1: RGB color definition
+ :arg tuple rgb2: RGB color definition
+ :returns: Square of the distance between provided colors
+ :rtype: float
+
+ For efficiency, the square of the distance is returned
+ which is sufficient for comparisons
+ """
+ l_1, a_1, b_1 = rgb_to_lab(*rgb1)
+ l_2, a_2, b_2 = rgb_to_lab(*rgb2)
+
+ s_l = k_l = k_c = k_h = 1
+ k_1 = 0.045
+ k_2 = 0.015
+
+ delta_l = l_1 - l_2
+ delta_a = a_1 - a_2
+ delta_b = b_1 - b_2
+ c_1 = sqrt(a_1 ** 2 + b_1 ** 2)
+ c_2 = sqrt(a_2 ** 2 + b_2 ** 2)
+ delta_c = c_1 - c_2
+ delta_h = sqrt(delta_a ** 2 + delta_b ** 2 + delta_c ** 2)
+ s_c = 1 + k_1 * c_1
+ s_h = 1 + k_2 * c_1
+
+ return ((delta_l / (k_l * s_l)) ** 2 + # pylint: disable=superfluous-parens
+ (delta_c / (k_c * s_c)) ** 2 +
+ (delta_h / (k_h * s_h)) ** 2)
+
+
+def dist_cie2000(rgb1, rgb2):
+ # pylint: disable=too-many-locals
+ """
+ Determine distance between two rgb colors using the CIE2000 algorithm.
+
+ :arg tuple rgb1: RGB color definition
+ :arg tuple rgb2: RGB color definition
+ :returns: Square of the distance between provided colors
+ :rtype: float
+
+ For efficiency, the square of the distance is returned
+ which is sufficient for comparisons
+ """
+ s_l = k_l = k_c = k_h = 1
+
+ l_1, a_1, b_1 = rgb_to_lab(*rgb1)
+ l_2, a_2, b_2 = rgb_to_lab(*rgb2)
+
+ delta_l = l_2 - l_1
+ l_mean = (l_1 + l_2) / 2
+
+ c_1 = sqrt(a_1 ** 2 + b_1 ** 2)
+ c_2 = sqrt(a_2 ** 2 + b_2 ** 2)
+ c_mean = (c_1 + c_2) / 2
+ delta_c = c_1 - c_2
+
+ g_x = sqrt(c_mean ** 7 / (c_mean ** 7 + 25 ** 7))
+ h_1 = atan2(b_1, a_1 + (a_1 / 2) * (1 - g_x)) % 360
+ h_2 = atan2(b_2, a_2 + (a_2 / 2) * (1 - g_x)) % 360
+
+ if 0 in (c_1, c_2):
+ delta_h_prime = 0
+ h_mean = h_1 + h_2
+ else:
+ delta_h_prime = h_2 - h_1
+ if abs(delta_h_prime) <= 180:
+ h_mean = (h_1 + h_2) / 2
+ else:
+ if h_2 <= h_1:
+ delta_h_prime += 360
+ else:
+ delta_h_prime -= 360
+ h_mean = (h_1 + h_2 + 360) / 2 if h_1 + h_2 < 360 else (h_1 + h_2 - 360) / 2
+
+ delta_h = 2 * sqrt(c_1 * c_2) * sin(delta_h_prime / 2)
+
+ t_x = (1 -
+ 0.17 * cos(h_mean - 30) +
+ 0.24 * cos(2 * h_mean) +
+ 0.32 * cos(3 * h_mean + 6) -
+ 0.20 * cos(4 * h_mean - 63))
+
+ s_l = 1 + (0.015 * (l_mean - 50) ** 2) / sqrt(20 + (l_mean - 50) ** 2)
+ s_c = 1 + 0.045 * c_mean
+ s_h = 1 + 0.015 * c_mean * t_x
+ r_t = -2 * g_x * sin(abs(60 * exp(-1 * abs((delta_h - 275) / 25) ** 2)))
+
+ delta_l = delta_l / (k_l * s_l)
+ delta_c = delta_c / (k_c * s_c)
+ delta_h = delta_h / (k_h * s_h)
+
+ return delta_l ** 2 + delta_c ** 2 + delta_h ** 2 + r_t * delta_c * delta_h
+
+
+COLOR_DISTANCE_ALGORITHMS = {'rgb': dist_rgb,
+ 'rgb-weighted': dist_rgb_weighted,
+ 'cie76': dist_cie76,
+ 'cie94': dist_cie94,
+ 'cie2000': dist_cie2000}
diff --git a/third_party/python/blessed/blessed/color.pyi b/third_party/python/blessed/blessed/color.pyi
new file mode 100644
index 0000000000..ece82e3e98
--- /dev/null
+++ b/third_party/python/blessed/blessed/color.pyi
@@ -0,0 +1,17 @@
+# std imports
+from typing import Dict, Tuple, Callable
+
+_RGB = Tuple[int, int, int]
+
+def rgb_to_xyz(red: int, green: int, blue: int) -> Tuple[float, float, float]: ...
+def xyz_to_lab(
+ x_val: float, y_val: float, z_val: float
+) -> Tuple[float, float, float]: ...
+def rgb_to_lab(red: int, green: int, blue: int) -> Tuple[float, float, float]: ...
+def dist_rgb(rgb1: _RGB, rgb2: _RGB) -> float: ...
+def dist_rgb_weighted(rgb1: _RGB, rgb2: _RGB) -> float: ...
+def dist_cie76(rgb1: _RGB, rgb2: _RGB) -> float: ...
+def dist_cie94(rgb1: _RGB, rgb2: _RGB) -> float: ...
+def dist_cie2000(rgb1: _RGB, rgb2: _RGB) -> float: ...
+
+COLOR_DISTANCE_ALGORITHMS: Dict[str, Callable[[_RGB, _RGB], float]]
diff --git a/third_party/python/blessed/blessed/colorspace.py b/third_party/python/blessed/blessed/colorspace.py
new file mode 100644
index 0000000000..36fe646b35
--- /dev/null
+++ b/third_party/python/blessed/blessed/colorspace.py
@@ -0,0 +1,973 @@
+"""
+Color reference data.
+
+References,
+
+- https://github.com/freedesktop/xorg-rgb/blob/master/rgb.txt
+- https://github.com/ThomasDickey/xterm-snapshots/blob/master/256colres.h
+- https://github.com/ThomasDickey/xterm-snapshots/blob/master/XTerm-col.ad
+- https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
+- https://gist.github.com/XVilka/8346728
+- https://devblogs.microsoft.com/commandline/24-bit-color-in-the-windows-console/
+- http://jdebp.uk/Softwares/nosh/guide/TerminalCapabilities.html
+"""
+# std imports
+import collections
+
+__all__ = (
+ 'CGA_COLORS',
+ 'RGBColor',
+ 'RGB_256TABLE',
+ 'X11_COLORNAMES_TO_RGB',
+)
+
+CGA_COLORS = set(
+ ('black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'))
+
+
+class RGBColor(collections.namedtuple("RGBColor", ["red", "green", "blue"])):
+ """Named tuple for an RGB color definition."""
+
+ def __str__(self):
+ return '#{0:02x}{1:02x}{2:02x}'.format(*self)
+
+
+#: X11 Color names to (XTerm-defined) RGB values from xorg-rgb/rgb.txt
+X11_COLORNAMES_TO_RGB = {
+ 'aliceblue': RGBColor(240, 248, 255),
+ 'antiquewhite': RGBColor(250, 235, 215),
+ 'antiquewhite1': RGBColor(255, 239, 219),
+ 'antiquewhite2': RGBColor(238, 223, 204),
+ 'antiquewhite3': RGBColor(205, 192, 176),
+ 'antiquewhite4': RGBColor(139, 131, 120),
+ 'aqua': RGBColor(0, 255, 255),
+ 'aquamarine': RGBColor(127, 255, 212),
+ 'aquamarine1': RGBColor(127, 255, 212),
+ 'aquamarine2': RGBColor(118, 238, 198),
+ 'aquamarine3': RGBColor(102, 205, 170),
+ 'aquamarine4': RGBColor(69, 139, 116),
+ 'azure': RGBColor(240, 255, 255),
+ 'azure1': RGBColor(240, 255, 255),
+ 'azure2': RGBColor(224, 238, 238),
+ 'azure3': RGBColor(193, 205, 205),
+ 'azure4': RGBColor(131, 139, 139),
+ 'beige': RGBColor(245, 245, 220),
+ 'bisque': RGBColor(255, 228, 196),
+ 'bisque1': RGBColor(255, 228, 196),
+ 'bisque2': RGBColor(238, 213, 183),
+ 'bisque3': RGBColor(205, 183, 158),
+ 'bisque4': RGBColor(139, 125, 107),
+ 'black': RGBColor(0, 0, 0),
+ 'blanchedalmond': RGBColor(255, 235, 205),
+ 'blue': RGBColor(0, 0, 255),
+ 'blue1': RGBColor(0, 0, 255),
+ 'blue2': RGBColor(0, 0, 238),
+ 'blue3': RGBColor(0, 0, 205),
+ 'blue4': RGBColor(0, 0, 139),
+ 'blueviolet': RGBColor(138, 43, 226),
+ 'brown': RGBColor(165, 42, 42),
+ 'brown1': RGBColor(255, 64, 64),
+ 'brown2': RGBColor(238, 59, 59),
+ 'brown3': RGBColor(205, 51, 51),
+ 'brown4': RGBColor(139, 35, 35),
+ 'burlywood': RGBColor(222, 184, 135),
+ 'burlywood1': RGBColor(255, 211, 155),
+ 'burlywood2': RGBColor(238, 197, 145),
+ 'burlywood3': RGBColor(205, 170, 125),
+ 'burlywood4': RGBColor(139, 115, 85),
+ 'cadetblue': RGBColor(95, 158, 160),
+ 'cadetblue1': RGBColor(152, 245, 255),
+ 'cadetblue2': RGBColor(142, 229, 238),
+ 'cadetblue3': RGBColor(122, 197, 205),
+ 'cadetblue4': RGBColor(83, 134, 139),
+ 'chartreuse': RGBColor(127, 255, 0),
+ 'chartreuse1': RGBColor(127, 255, 0),
+ 'chartreuse2': RGBColor(118, 238, 0),
+ 'chartreuse3': RGBColor(102, 205, 0),
+ 'chartreuse4': RGBColor(69, 139, 0),
+ 'chocolate': RGBColor(210, 105, 30),
+ 'chocolate1': RGBColor(255, 127, 36),
+ 'chocolate2': RGBColor(238, 118, 33),
+ 'chocolate3': RGBColor(205, 102, 29),
+ 'chocolate4': RGBColor(139, 69, 19),
+ 'coral': RGBColor(255, 127, 80),
+ 'coral1': RGBColor(255, 114, 86),
+ 'coral2': RGBColor(238, 106, 80),
+ 'coral3': RGBColor(205, 91, 69),
+ 'coral4': RGBColor(139, 62, 47),
+ 'cornflowerblue': RGBColor(100, 149, 237),
+ 'cornsilk': RGBColor(255, 248, 220),
+ 'cornsilk1': RGBColor(255, 248, 220),
+ 'cornsilk2': RGBColor(238, 232, 205),
+ 'cornsilk3': RGBColor(205, 200, 177),
+ 'cornsilk4': RGBColor(139, 136, 120),
+ 'crimson': RGBColor(220, 20, 60),
+ 'cyan': RGBColor(0, 255, 255),
+ 'cyan1': RGBColor(0, 255, 255),
+ 'cyan2': RGBColor(0, 238, 238),
+ 'cyan3': RGBColor(0, 205, 205),
+ 'cyan4': RGBColor(0, 139, 139),
+ 'darkblue': RGBColor(0, 0, 139),
+ 'darkcyan': RGBColor(0, 139, 139),
+ 'darkgoldenrod': RGBColor(184, 134, 11),
+ 'darkgoldenrod1': RGBColor(255, 185, 15),
+ 'darkgoldenrod2': RGBColor(238, 173, 14),
+ 'darkgoldenrod3': RGBColor(205, 149, 12),
+ 'darkgoldenrod4': RGBColor(139, 101, 8),
+ 'darkgray': RGBColor(169, 169, 169),
+ 'darkgreen': RGBColor(0, 100, 0),
+ 'darkgrey': RGBColor(169, 169, 169),
+ 'darkkhaki': RGBColor(189, 183, 107),
+ 'darkmagenta': RGBColor(139, 0, 139),
+ 'darkolivegreen': RGBColor(85, 107, 47),
+ 'darkolivegreen1': RGBColor(202, 255, 112),
+ 'darkolivegreen2': RGBColor(188, 238, 104),
+ 'darkolivegreen3': RGBColor(162, 205, 90),
+ 'darkolivegreen4': RGBColor(110, 139, 61),
+ 'darkorange': RGBColor(255, 140, 0),
+ 'darkorange1': RGBColor(255, 127, 0),
+ 'darkorange2': RGBColor(238, 118, 0),
+ 'darkorange3': RGBColor(205, 102, 0),
+ 'darkorange4': RGBColor(139, 69, 0),
+ 'darkorchid': RGBColor(153, 50, 204),
+ 'darkorchid1': RGBColor(191, 62, 255),
+ 'darkorchid2': RGBColor(178, 58, 238),
+ 'darkorchid3': RGBColor(154, 50, 205),
+ 'darkorchid4': RGBColor(104, 34, 139),
+ 'darkred': RGBColor(139, 0, 0),
+ 'darksalmon': RGBColor(233, 150, 122),
+ 'darkseagreen': RGBColor(143, 188, 143),
+ 'darkseagreen1': RGBColor(193, 255, 193),
+ 'darkseagreen2': RGBColor(180, 238, 180),
+ 'darkseagreen3': RGBColor(155, 205, 155),
+ 'darkseagreen4': RGBColor(105, 139, 105),
+ 'darkslateblue': RGBColor(72, 61, 139),
+ 'darkslategray': RGBColor(47, 79, 79),
+ 'darkslategray1': RGBColor(151, 255, 255),
+ 'darkslategray2': RGBColor(141, 238, 238),
+ 'darkslategray3': RGBColor(121, 205, 205),
+ 'darkslategray4': RGBColor(82, 139, 139),
+ 'darkslategrey': RGBColor(47, 79, 79),
+ 'darkturquoise': RGBColor(0, 206, 209),
+ 'darkviolet': RGBColor(148, 0, 211),
+ 'deeppink': RGBColor(255, 20, 147),
+ 'deeppink1': RGBColor(255, 20, 147),
+ 'deeppink2': RGBColor(238, 18, 137),
+ 'deeppink3': RGBColor(205, 16, 118),
+ 'deeppink4': RGBColor(139, 10, 80),
+ 'deepskyblue': RGBColor(0, 191, 255),
+ 'deepskyblue1': RGBColor(0, 191, 255),
+ 'deepskyblue2': RGBColor(0, 178, 238),
+ 'deepskyblue3': RGBColor(0, 154, 205),
+ 'deepskyblue4': RGBColor(0, 104, 139),
+ 'dimgray': RGBColor(105, 105, 105),
+ 'dimgrey': RGBColor(105, 105, 105),
+ 'dodgerblue': RGBColor(30, 144, 255),
+ 'dodgerblue1': RGBColor(30, 144, 255),
+ 'dodgerblue2': RGBColor(28, 134, 238),
+ 'dodgerblue3': RGBColor(24, 116, 205),
+ 'dodgerblue4': RGBColor(16, 78, 139),
+ 'firebrick': RGBColor(178, 34, 34),
+ 'firebrick1': RGBColor(255, 48, 48),
+ 'firebrick2': RGBColor(238, 44, 44),
+ 'firebrick3': RGBColor(205, 38, 38),
+ 'firebrick4': RGBColor(139, 26, 26),
+ 'floralwhite': RGBColor(255, 250, 240),
+ 'forestgreen': RGBColor(34, 139, 34),
+ 'fuchsia': RGBColor(255, 0, 255),
+ 'gainsboro': RGBColor(220, 220, 220),
+ 'ghostwhite': RGBColor(248, 248, 255),
+ 'gold': RGBColor(255, 215, 0),
+ 'gold1': RGBColor(255, 215, 0),
+ 'gold2': RGBColor(238, 201, 0),
+ 'gold3': RGBColor(205, 173, 0),
+ 'gold4': RGBColor(139, 117, 0),
+ 'goldenrod': RGBColor(218, 165, 32),
+ 'goldenrod1': RGBColor(255, 193, 37),
+ 'goldenrod2': RGBColor(238, 180, 34),
+ 'goldenrod3': RGBColor(205, 155, 29),
+ 'goldenrod4': RGBColor(139, 105, 20),
+ 'gray': RGBColor(190, 190, 190),
+ 'gray0': RGBColor(0, 0, 0),
+ 'gray1': RGBColor(3, 3, 3),
+ 'gray10': RGBColor(26, 26, 26),
+ 'gray100': RGBColor(255, 255, 255),
+ 'gray11': RGBColor(28, 28, 28),
+ 'gray12': RGBColor(31, 31, 31),
+ 'gray13': RGBColor(33, 33, 33),
+ 'gray14': RGBColor(36, 36, 36),
+ 'gray15': RGBColor(38, 38, 38),
+ 'gray16': RGBColor(41, 41, 41),
+ 'gray17': RGBColor(43, 43, 43),
+ 'gray18': RGBColor(46, 46, 46),
+ 'gray19': RGBColor(48, 48, 48),
+ 'gray2': RGBColor(5, 5, 5),
+ 'gray20': RGBColor(51, 51, 51),
+ 'gray21': RGBColor(54, 54, 54),
+ 'gray22': RGBColor(56, 56, 56),
+ 'gray23': RGBColor(59, 59, 59),
+ 'gray24': RGBColor(61, 61, 61),
+ 'gray25': RGBColor(64, 64, 64),
+ 'gray26': RGBColor(66, 66, 66),
+ 'gray27': RGBColor(69, 69, 69),
+ 'gray28': RGBColor(71, 71, 71),
+ 'gray29': RGBColor(74, 74, 74),
+ 'gray3': RGBColor(8, 8, 8),
+ 'gray30': RGBColor(77, 77, 77),
+ 'gray31': RGBColor(79, 79, 79),
+ 'gray32': RGBColor(82, 82, 82),
+ 'gray33': RGBColor(84, 84, 84),
+ 'gray34': RGBColor(87, 87, 87),
+ 'gray35': RGBColor(89, 89, 89),
+ 'gray36': RGBColor(92, 92, 92),
+ 'gray37': RGBColor(94, 94, 94),
+ 'gray38': RGBColor(97, 97, 97),
+ 'gray39': RGBColor(99, 99, 99),
+ 'gray4': RGBColor(10, 10, 10),
+ 'gray40': RGBColor(102, 102, 102),
+ 'gray41': RGBColor(105, 105, 105),
+ 'gray42': RGBColor(107, 107, 107),
+ 'gray43': RGBColor(110, 110, 110),
+ 'gray44': RGBColor(112, 112, 112),
+ 'gray45': RGBColor(115, 115, 115),
+ 'gray46': RGBColor(117, 117, 117),
+ 'gray47': RGBColor(120, 120, 120),
+ 'gray48': RGBColor(122, 122, 122),
+ 'gray49': RGBColor(125, 125, 125),
+ 'gray5': RGBColor(13, 13, 13),
+ 'gray50': RGBColor(127, 127, 127),
+ 'gray51': RGBColor(130, 130, 130),
+ 'gray52': RGBColor(133, 133, 133),
+ 'gray53': RGBColor(135, 135, 135),
+ 'gray54': RGBColor(138, 138, 138),
+ 'gray55': RGBColor(140, 140, 140),
+ 'gray56': RGBColor(143, 143, 143),
+ 'gray57': RGBColor(145, 145, 145),
+ 'gray58': RGBColor(148, 148, 148),
+ 'gray59': RGBColor(150, 150, 150),
+ 'gray6': RGBColor(15, 15, 15),
+ 'gray60': RGBColor(153, 153, 153),
+ 'gray61': RGBColor(156, 156, 156),
+ 'gray62': RGBColor(158, 158, 158),
+ 'gray63': RGBColor(161, 161, 161),
+ 'gray64': RGBColor(163, 163, 163),
+ 'gray65': RGBColor(166, 166, 166),
+ 'gray66': RGBColor(168, 168, 168),
+ 'gray67': RGBColor(171, 171, 171),
+ 'gray68': RGBColor(173, 173, 173),
+ 'gray69': RGBColor(176, 176, 176),
+ 'gray7': RGBColor(18, 18, 18),
+ 'gray70': RGBColor(179, 179, 179),
+ 'gray71': RGBColor(181, 181, 181),
+ 'gray72': RGBColor(184, 184, 184),
+ 'gray73': RGBColor(186, 186, 186),
+ 'gray74': RGBColor(189, 189, 189),
+ 'gray75': RGBColor(191, 191, 191),
+ 'gray76': RGBColor(194, 194, 194),
+ 'gray77': RGBColor(196, 196, 196),
+ 'gray78': RGBColor(199, 199, 199),
+ 'gray79': RGBColor(201, 201, 201),
+ 'gray8': RGBColor(20, 20, 20),
+ 'gray80': RGBColor(204, 204, 204),
+ 'gray81': RGBColor(207, 207, 207),
+ 'gray82': RGBColor(209, 209, 209),
+ 'gray83': RGBColor(212, 212, 212),
+ 'gray84': RGBColor(214, 214, 214),
+ 'gray85': RGBColor(217, 217, 217),
+ 'gray86': RGBColor(219, 219, 219),
+ 'gray87': RGBColor(222, 222, 222),
+ 'gray88': RGBColor(224, 224, 224),
+ 'gray89': RGBColor(227, 227, 227),
+ 'gray9': RGBColor(23, 23, 23),
+ 'gray90': RGBColor(229, 229, 229),
+ 'gray91': RGBColor(232, 232, 232),
+ 'gray92': RGBColor(235, 235, 235),
+ 'gray93': RGBColor(237, 237, 237),
+ 'gray94': RGBColor(240, 240, 240),
+ 'gray95': RGBColor(242, 242, 242),
+ 'gray96': RGBColor(245, 245, 245),
+ 'gray97': RGBColor(247, 247, 247),
+ 'gray98': RGBColor(250, 250, 250),
+ 'gray99': RGBColor(252, 252, 252),
+ 'green': RGBColor(0, 255, 0),
+ 'green1': RGBColor(0, 255, 0),
+ 'green2': RGBColor(0, 238, 0),
+ 'green3': RGBColor(0, 205, 0),
+ 'green4': RGBColor(0, 139, 0),
+ 'greenyellow': RGBColor(173, 255, 47),
+ 'grey': RGBColor(190, 190, 190),
+ 'grey0': RGBColor(0, 0, 0),
+ 'grey1': RGBColor(3, 3, 3),
+ 'grey10': RGBColor(26, 26, 26),
+ 'grey100': RGBColor(255, 255, 255),
+ 'grey11': RGBColor(28, 28, 28),
+ 'grey12': RGBColor(31, 31, 31),
+ 'grey13': RGBColor(33, 33, 33),
+ 'grey14': RGBColor(36, 36, 36),
+ 'grey15': RGBColor(38, 38, 38),
+ 'grey16': RGBColor(41, 41, 41),
+ 'grey17': RGBColor(43, 43, 43),
+ 'grey18': RGBColor(46, 46, 46),
+ 'grey19': RGBColor(48, 48, 48),
+ 'grey2': RGBColor(5, 5, 5),
+ 'grey20': RGBColor(51, 51, 51),
+ 'grey21': RGBColor(54, 54, 54),
+ 'grey22': RGBColor(56, 56, 56),
+ 'grey23': RGBColor(59, 59, 59),
+ 'grey24': RGBColor(61, 61, 61),
+ 'grey25': RGBColor(64, 64, 64),
+ 'grey26': RGBColor(66, 66, 66),
+ 'grey27': RGBColor(69, 69, 69),
+ 'grey28': RGBColor(71, 71, 71),
+ 'grey29': RGBColor(74, 74, 74),
+ 'grey3': RGBColor(8, 8, 8),
+ 'grey30': RGBColor(77, 77, 77),
+ 'grey31': RGBColor(79, 79, 79),
+ 'grey32': RGBColor(82, 82, 82),
+ 'grey33': RGBColor(84, 84, 84),
+ 'grey34': RGBColor(87, 87, 87),
+ 'grey35': RGBColor(89, 89, 89),
+ 'grey36': RGBColor(92, 92, 92),
+ 'grey37': RGBColor(94, 94, 94),
+ 'grey38': RGBColor(97, 97, 97),
+ 'grey39': RGBColor(99, 99, 99),
+ 'grey4': RGBColor(10, 10, 10),
+ 'grey40': RGBColor(102, 102, 102),
+ 'grey41': RGBColor(105, 105, 105),
+ 'grey42': RGBColor(107, 107, 107),
+ 'grey43': RGBColor(110, 110, 110),
+ 'grey44': RGBColor(112, 112, 112),
+ 'grey45': RGBColor(115, 115, 115),
+ 'grey46': RGBColor(117, 117, 117),
+ 'grey47': RGBColor(120, 120, 120),
+ 'grey48': RGBColor(122, 122, 122),
+ 'grey49': RGBColor(125, 125, 125),
+ 'grey5': RGBColor(13, 13, 13),
+ 'grey50': RGBColor(127, 127, 127),
+ 'grey51': RGBColor(130, 130, 130),
+ 'grey52': RGBColor(133, 133, 133),
+ 'grey53': RGBColor(135, 135, 135),
+ 'grey54': RGBColor(138, 138, 138),
+ 'grey55': RGBColor(140, 140, 140),
+ 'grey56': RGBColor(143, 143, 143),
+ 'grey57': RGBColor(145, 145, 145),
+ 'grey58': RGBColor(148, 148, 148),
+ 'grey59': RGBColor(150, 150, 150),
+ 'grey6': RGBColor(15, 15, 15),
+ 'grey60': RGBColor(153, 153, 153),
+ 'grey61': RGBColor(156, 156, 156),
+ 'grey62': RGBColor(158, 158, 158),
+ 'grey63': RGBColor(161, 161, 161),
+ 'grey64': RGBColor(163, 163, 163),
+ 'grey65': RGBColor(166, 166, 166),
+ 'grey66': RGBColor(168, 168, 168),
+ 'grey67': RGBColor(171, 171, 171),
+ 'grey68': RGBColor(173, 173, 173),
+ 'grey69': RGBColor(176, 176, 176),
+ 'grey7': RGBColor(18, 18, 18),
+ 'grey70': RGBColor(179, 179, 179),
+ 'grey71': RGBColor(181, 181, 181),
+ 'grey72': RGBColor(184, 184, 184),
+ 'grey73': RGBColor(186, 186, 186),
+ 'grey74': RGBColor(189, 189, 189),
+ 'grey75': RGBColor(191, 191, 191),
+ 'grey76': RGBColor(194, 194, 194),
+ 'grey77': RGBColor(196, 196, 196),
+ 'grey78': RGBColor(199, 199, 199),
+ 'grey79': RGBColor(201, 201, 201),
+ 'grey8': RGBColor(20, 20, 20),
+ 'grey80': RGBColor(204, 204, 204),
+ 'grey81': RGBColor(207, 207, 207),
+ 'grey82': RGBColor(209, 209, 209),
+ 'grey83': RGBColor(212, 212, 212),
+ 'grey84': RGBColor(214, 214, 214),
+ 'grey85': RGBColor(217, 217, 217),
+ 'grey86': RGBColor(219, 219, 219),
+ 'grey87': RGBColor(222, 222, 222),
+ 'grey88': RGBColor(224, 224, 224),
+ 'grey89': RGBColor(227, 227, 227),
+ 'grey9': RGBColor(23, 23, 23),
+ 'grey90': RGBColor(229, 229, 229),
+ 'grey91': RGBColor(232, 232, 232),
+ 'grey92': RGBColor(235, 235, 235),
+ 'grey93': RGBColor(237, 237, 237),
+ 'grey94': RGBColor(240, 240, 240),
+ 'grey95': RGBColor(242, 242, 242),
+ 'grey96': RGBColor(245, 245, 245),
+ 'grey97': RGBColor(247, 247, 247),
+ 'grey98': RGBColor(250, 250, 250),
+ 'grey99': RGBColor(252, 252, 252),
+ 'honeydew': RGBColor(240, 255, 240),
+ 'honeydew1': RGBColor(240, 255, 240),
+ 'honeydew2': RGBColor(224, 238, 224),
+ 'honeydew3': RGBColor(193, 205, 193),
+ 'honeydew4': RGBColor(131, 139, 131),
+ 'hotpink': RGBColor(255, 105, 180),
+ 'hotpink1': RGBColor(255, 110, 180),
+ 'hotpink2': RGBColor(238, 106, 167),
+ 'hotpink3': RGBColor(205, 96, 144),
+ 'hotpink4': RGBColor(139, 58, 98),
+ 'indianred': RGBColor(205, 92, 92),
+ 'indianred1': RGBColor(255, 106, 106),
+ 'indianred2': RGBColor(238, 99, 99),
+ 'indianred3': RGBColor(205, 85, 85),
+ 'indianred4': RGBColor(139, 58, 58),
+ 'indigo': RGBColor(75, 0, 130),
+ 'ivory': RGBColor(255, 255, 240),
+ 'ivory1': RGBColor(255, 255, 240),
+ 'ivory2': RGBColor(238, 238, 224),
+ 'ivory3': RGBColor(205, 205, 193),
+ 'ivory4': RGBColor(139, 139, 131),
+ 'khaki': RGBColor(240, 230, 140),
+ 'khaki1': RGBColor(255, 246, 143),
+ 'khaki2': RGBColor(238, 230, 133),
+ 'khaki3': RGBColor(205, 198, 115),
+ 'khaki4': RGBColor(139, 134, 78),
+ 'lavender': RGBColor(230, 230, 250),
+ 'lavenderblush': RGBColor(255, 240, 245),
+ 'lavenderblush1': RGBColor(255, 240, 245),
+ 'lavenderblush2': RGBColor(238, 224, 229),
+ 'lavenderblush3': RGBColor(205, 193, 197),
+ 'lavenderblush4': RGBColor(139, 131, 134),
+ 'lawngreen': RGBColor(124, 252, 0),
+ 'lemonchiffon': RGBColor(255, 250, 205),
+ 'lemonchiffon1': RGBColor(255, 250, 205),
+ 'lemonchiffon2': RGBColor(238, 233, 191),
+ 'lemonchiffon3': RGBColor(205, 201, 165),
+ 'lemonchiffon4': RGBColor(139, 137, 112),
+ 'lightblue': RGBColor(173, 216, 230),
+ 'lightblue1': RGBColor(191, 239, 255),
+ 'lightblue2': RGBColor(178, 223, 238),
+ 'lightblue3': RGBColor(154, 192, 205),
+ 'lightblue4': RGBColor(104, 131, 139),
+ 'lightcoral': RGBColor(240, 128, 128),
+ 'lightcyan': RGBColor(224, 255, 255),
+ 'lightcyan1': RGBColor(224, 255, 255),
+ 'lightcyan2': RGBColor(209, 238, 238),
+ 'lightcyan3': RGBColor(180, 205, 205),
+ 'lightcyan4': RGBColor(122, 139, 139),
+ 'lightgoldenrod': RGBColor(238, 221, 130),
+ 'lightgoldenrod1': RGBColor(255, 236, 139),
+ 'lightgoldenrod2': RGBColor(238, 220, 130),
+ 'lightgoldenrod3': RGBColor(205, 190, 112),
+ 'lightgoldenrod4': RGBColor(139, 129, 76),
+ 'lightgoldenrodyellow': RGBColor(250, 250, 210),
+ 'lightgray': RGBColor(211, 211, 211),
+ 'lightgreen': RGBColor(144, 238, 144),
+ 'lightgrey': RGBColor(211, 211, 211),
+ 'lightpink': RGBColor(255, 182, 193),
+ 'lightpink1': RGBColor(255, 174, 185),
+ 'lightpink2': RGBColor(238, 162, 173),
+ 'lightpink3': RGBColor(205, 140, 149),
+ 'lightpink4': RGBColor(139, 95, 101),
+ 'lightsalmon': RGBColor(255, 160, 122),
+ 'lightsalmon1': RGBColor(255, 160, 122),
+ 'lightsalmon2': RGBColor(238, 149, 114),
+ 'lightsalmon3': RGBColor(205, 129, 98),
+ 'lightsalmon4': RGBColor(139, 87, 66),
+ 'lightseagreen': RGBColor(32, 178, 170),
+ 'lightskyblue': RGBColor(135, 206, 250),
+ 'lightskyblue1': RGBColor(176, 226, 255),
+ 'lightskyblue2': RGBColor(164, 211, 238),
+ 'lightskyblue3': RGBColor(141, 182, 205),
+ 'lightskyblue4': RGBColor(96, 123, 139),
+ 'lightslateblue': RGBColor(132, 112, 255),
+ 'lightslategray': RGBColor(119, 136, 153),
+ 'lightslategrey': RGBColor(119, 136, 153),
+ 'lightsteelblue': RGBColor(176, 196, 222),
+ 'lightsteelblue1': RGBColor(202, 225, 255),
+ 'lightsteelblue2': RGBColor(188, 210, 238),
+ 'lightsteelblue3': RGBColor(162, 181, 205),
+ 'lightsteelblue4': RGBColor(110, 123, 139),
+ 'lightyellow': RGBColor(255, 255, 224),
+ 'lightyellow1': RGBColor(255, 255, 224),
+ 'lightyellow2': RGBColor(238, 238, 209),
+ 'lightyellow3': RGBColor(205, 205, 180),
+ 'lightyellow4': RGBColor(139, 139, 122),
+ 'lime': RGBColor(0, 255, 0),
+ 'limegreen': RGBColor(50, 205, 50),
+ 'linen': RGBColor(250, 240, 230),
+ 'magenta': RGBColor(255, 0, 255),
+ 'magenta1': RGBColor(255, 0, 255),
+ 'magenta2': RGBColor(238, 0, 238),
+ 'magenta3': RGBColor(205, 0, 205),
+ 'magenta4': RGBColor(139, 0, 139),
+ 'maroon': RGBColor(176, 48, 96),
+ 'maroon1': RGBColor(255, 52, 179),
+ 'maroon2': RGBColor(238, 48, 167),
+ 'maroon3': RGBColor(205, 41, 144),
+ 'maroon4': RGBColor(139, 28, 98),
+ 'mediumaquamarine': RGBColor(102, 205, 170),
+ 'mediumblue': RGBColor(0, 0, 205),
+ 'mediumorchid': RGBColor(186, 85, 211),
+ 'mediumorchid1': RGBColor(224, 102, 255),
+ 'mediumorchid2': RGBColor(209, 95, 238),
+ 'mediumorchid3': RGBColor(180, 82, 205),
+ 'mediumorchid4': RGBColor(122, 55, 139),
+ 'mediumpurple': RGBColor(147, 112, 219),
+ 'mediumpurple1': RGBColor(171, 130, 255),
+ 'mediumpurple2': RGBColor(159, 121, 238),
+ 'mediumpurple3': RGBColor(137, 104, 205),
+ 'mediumpurple4': RGBColor(93, 71, 139),
+ 'mediumseagreen': RGBColor(60, 179, 113),
+ 'mediumslateblue': RGBColor(123, 104, 238),
+ 'mediumspringgreen': RGBColor(0, 250, 154),
+ 'mediumturquoise': RGBColor(72, 209, 204),
+ 'mediumvioletred': RGBColor(199, 21, 133),
+ 'midnightblue': RGBColor(25, 25, 112),
+ 'mintcream': RGBColor(245, 255, 250),
+ 'mistyrose': RGBColor(255, 228, 225),
+ 'mistyrose1': RGBColor(255, 228, 225),
+ 'mistyrose2': RGBColor(238, 213, 210),
+ 'mistyrose3': RGBColor(205, 183, 181),
+ 'mistyrose4': RGBColor(139, 125, 123),
+ 'moccasin': RGBColor(255, 228, 181),
+ 'navajowhite': RGBColor(255, 222, 173),
+ 'navajowhite1': RGBColor(255, 222, 173),
+ 'navajowhite2': RGBColor(238, 207, 161),
+ 'navajowhite3': RGBColor(205, 179, 139),
+ 'navajowhite4': RGBColor(139, 121, 94),
+ 'navy': RGBColor(0, 0, 128),
+ 'navyblue': RGBColor(0, 0, 128),
+ 'oldlace': RGBColor(253, 245, 230),
+ 'olive': RGBColor(128, 128, 0),
+ 'olivedrab': RGBColor(107, 142, 35),
+ 'olivedrab1': RGBColor(192, 255, 62),
+ 'olivedrab2': RGBColor(179, 238, 58),
+ 'olivedrab3': RGBColor(154, 205, 50),
+ 'olivedrab4': RGBColor(105, 139, 34),
+ 'orange': RGBColor(255, 165, 0),
+ 'orange1': RGBColor(255, 165, 0),
+ 'orange2': RGBColor(238, 154, 0),
+ 'orange3': RGBColor(205, 133, 0),
+ 'orange4': RGBColor(139, 90, 0),
+ 'orangered': RGBColor(255, 69, 0),
+ 'orangered1': RGBColor(255, 69, 0),
+ 'orangered2': RGBColor(238, 64, 0),
+ 'orangered3': RGBColor(205, 55, 0),
+ 'orangered4': RGBColor(139, 37, 0),
+ 'orchid': RGBColor(218, 112, 214),
+ 'orchid1': RGBColor(255, 131, 250),
+ 'orchid2': RGBColor(238, 122, 233),
+ 'orchid3': RGBColor(205, 105, 201),
+ 'orchid4': RGBColor(139, 71, 137),
+ 'palegoldenrod': RGBColor(238, 232, 170),
+ 'palegreen': RGBColor(152, 251, 152),
+ 'palegreen1': RGBColor(154, 255, 154),
+ 'palegreen2': RGBColor(144, 238, 144),
+ 'palegreen3': RGBColor(124, 205, 124),
+ 'palegreen4': RGBColor(84, 139, 84),
+ 'paleturquoise': RGBColor(175, 238, 238),
+ 'paleturquoise1': RGBColor(187, 255, 255),
+ 'paleturquoise2': RGBColor(174, 238, 238),
+ 'paleturquoise3': RGBColor(150, 205, 205),
+ 'paleturquoise4': RGBColor(102, 139, 139),
+ 'palevioletred': RGBColor(219, 112, 147),
+ 'palevioletred1': RGBColor(255, 130, 171),
+ 'palevioletred2': RGBColor(238, 121, 159),
+ 'palevioletred3': RGBColor(205, 104, 137),
+ 'palevioletred4': RGBColor(139, 71, 93),
+ 'papayawhip': RGBColor(255, 239, 213),
+ 'peachpuff': RGBColor(255, 218, 185),
+ 'peachpuff1': RGBColor(255, 218, 185),
+ 'peachpuff2': RGBColor(238, 203, 173),
+ 'peachpuff3': RGBColor(205, 175, 149),
+ 'peachpuff4': RGBColor(139, 119, 101),
+ 'peru': RGBColor(205, 133, 63),
+ 'pink': RGBColor(255, 192, 203),
+ 'pink1': RGBColor(255, 181, 197),
+ 'pink2': RGBColor(238, 169, 184),
+ 'pink3': RGBColor(205, 145, 158),
+ 'pink4': RGBColor(139, 99, 108),
+ 'plum': RGBColor(221, 160, 221),
+ 'plum1': RGBColor(255, 187, 255),
+ 'plum2': RGBColor(238, 174, 238),
+ 'plum3': RGBColor(205, 150, 205),
+ 'plum4': RGBColor(139, 102, 139),
+ 'powderblue': RGBColor(176, 224, 230),
+ 'purple': RGBColor(160, 32, 240),
+ 'purple1': RGBColor(155, 48, 255),
+ 'purple2': RGBColor(145, 44, 238),
+ 'purple3': RGBColor(125, 38, 205),
+ 'purple4': RGBColor(85, 26, 139),
+ 'rebeccapurple': RGBColor(102, 51, 153),
+ 'red': RGBColor(255, 0, 0),
+ 'red1': RGBColor(255, 0, 0),
+ 'red2': RGBColor(238, 0, 0),
+ 'red3': RGBColor(205, 0, 0),
+ 'red4': RGBColor(139, 0, 0),
+ 'rosybrown': RGBColor(188, 143, 143),
+ 'rosybrown1': RGBColor(255, 193, 193),
+ 'rosybrown2': RGBColor(238, 180, 180),
+ 'rosybrown3': RGBColor(205, 155, 155),
+ 'rosybrown4': RGBColor(139, 105, 105),
+ 'royalblue': RGBColor(65, 105, 225),
+ 'royalblue1': RGBColor(72, 118, 255),
+ 'royalblue2': RGBColor(67, 110, 238),
+ 'royalblue3': RGBColor(58, 95, 205),
+ 'royalblue4': RGBColor(39, 64, 139),
+ 'saddlebrown': RGBColor(139, 69, 19),
+ 'salmon': RGBColor(250, 128, 114),
+ 'salmon1': RGBColor(255, 140, 105),
+ 'salmon2': RGBColor(238, 130, 98),
+ 'salmon3': RGBColor(205, 112, 84),
+ 'salmon4': RGBColor(139, 76, 57),
+ 'sandybrown': RGBColor(244, 164, 96),
+ 'seagreen': RGBColor(46, 139, 87),
+ 'seagreen1': RGBColor(84, 255, 159),
+ 'seagreen2': RGBColor(78, 238, 148),
+ 'seagreen3': RGBColor(67, 205, 128),
+ 'seagreen4': RGBColor(46, 139, 87),
+ 'seashell': RGBColor(255, 245, 238),
+ 'seashell1': RGBColor(255, 245, 238),
+ 'seashell2': RGBColor(238, 229, 222),
+ 'seashell3': RGBColor(205, 197, 191),
+ 'seashell4': RGBColor(139, 134, 130),
+ 'sienna': RGBColor(160, 82, 45),
+ 'sienna1': RGBColor(255, 130, 71),
+ 'sienna2': RGBColor(238, 121, 66),
+ 'sienna3': RGBColor(205, 104, 57),
+ 'sienna4': RGBColor(139, 71, 38),
+ 'silver': RGBColor(192, 192, 192),
+ 'skyblue': RGBColor(135, 206, 235),
+ 'skyblue1': RGBColor(135, 206, 255),
+ 'skyblue2': RGBColor(126, 192, 238),
+ 'skyblue3': RGBColor(108, 166, 205),
+ 'skyblue4': RGBColor(74, 112, 139),
+ 'slateblue': RGBColor(106, 90, 205),
+ 'slateblue1': RGBColor(131, 111, 255),
+ 'slateblue2': RGBColor(122, 103, 238),
+ 'slateblue3': RGBColor(105, 89, 205),
+ 'slateblue4': RGBColor(71, 60, 139),
+ 'slategray': RGBColor(112, 128, 144),
+ 'slategray1': RGBColor(198, 226, 255),
+ 'slategray2': RGBColor(185, 211, 238),
+ 'slategray3': RGBColor(159, 182, 205),
+ 'slategray4': RGBColor(108, 123, 139),
+ 'slategrey': RGBColor(112, 128, 144),
+ 'snow': RGBColor(255, 250, 250),
+ 'snow1': RGBColor(255, 250, 250),
+ 'snow2': RGBColor(238, 233, 233),
+ 'snow3': RGBColor(205, 201, 201),
+ 'snow4': RGBColor(139, 137, 137),
+ 'springgreen': RGBColor(0, 255, 127),
+ 'springgreen1': RGBColor(0, 255, 127),
+ 'springgreen2': RGBColor(0, 238, 118),
+ 'springgreen3': RGBColor(0, 205, 102),
+ 'springgreen4': RGBColor(0, 139, 69),
+ 'steelblue': RGBColor(70, 130, 180),
+ 'steelblue1': RGBColor(99, 184, 255),
+ 'steelblue2': RGBColor(92, 172, 238),
+ 'steelblue3': RGBColor(79, 148, 205),
+ 'steelblue4': RGBColor(54, 100, 139),
+ 'tan': RGBColor(210, 180, 140),
+ 'tan1': RGBColor(255, 165, 79),
+ 'tan2': RGBColor(238, 154, 73),
+ 'tan3': RGBColor(205, 133, 63),
+ 'tan4': RGBColor(139, 90, 43),
+ 'teal': RGBColor(0, 128, 128),
+ 'thistle': RGBColor(216, 191, 216),
+ 'thistle1': RGBColor(255, 225, 255),
+ 'thistle2': RGBColor(238, 210, 238),
+ 'thistle3': RGBColor(205, 181, 205),
+ 'thistle4': RGBColor(139, 123, 139),
+ 'tomato': RGBColor(255, 99, 71),
+ 'tomato1': RGBColor(255, 99, 71),
+ 'tomato2': RGBColor(238, 92, 66),
+ 'tomato3': RGBColor(205, 79, 57),
+ 'tomato4': RGBColor(139, 54, 38),
+ 'turquoise': RGBColor(64, 224, 208),
+ 'turquoise1': RGBColor(0, 245, 255),
+ 'turquoise2': RGBColor(0, 229, 238),
+ 'turquoise3': RGBColor(0, 197, 205),
+ 'turquoise4': RGBColor(0, 134, 139),
+ 'violet': RGBColor(238, 130, 238),
+ 'violetred': RGBColor(208, 32, 144),
+ 'violetred1': RGBColor(255, 62, 150),
+ 'violetred2': RGBColor(238, 58, 140),
+ 'violetred3': RGBColor(205, 50, 120),
+ 'violetred4': RGBColor(139, 34, 82),
+ 'webgray': RGBColor(128, 128, 128),
+ 'webgreen': RGBColor(0, 128, 0),
+ 'webgrey': RGBColor(128, 128, 128),
+ 'webmaroon': RGBColor(128, 0, 0),
+ 'webpurple': RGBColor(128, 0, 128),
+ 'wheat': RGBColor(245, 222, 179),
+ 'wheat1': RGBColor(255, 231, 186),
+ 'wheat2': RGBColor(238, 216, 174),
+ 'wheat3': RGBColor(205, 186, 150),
+ 'wheat4': RGBColor(139, 126, 102),
+ 'white': RGBColor(255, 255, 255),
+ 'whitesmoke': RGBColor(245, 245, 245),
+ 'x11gray': RGBColor(190, 190, 190),
+ 'x11green': RGBColor(0, 255, 0),
+ 'x11grey': RGBColor(190, 190, 190),
+ 'x11maroon': RGBColor(176, 48, 96),
+ 'x11purple': RGBColor(160, 32, 240),
+ 'yellow': RGBColor(255, 255, 0),
+ 'yellow1': RGBColor(255, 255, 0),
+ 'yellow2': RGBColor(238, 238, 0),
+ 'yellow3': RGBColor(205, 205, 0),
+ 'yellow4': RGBColor(139, 139, 0),
+ 'yellowgreen': RGBColor(154, 205, 50)
+}
+
+#: Curses color indices of 8, 16, and 256-color terminals
+RGB_256TABLE = (
+ RGBColor(0, 0, 0),
+ RGBColor(205, 0, 0),
+ RGBColor(0, 205, 0),
+ RGBColor(205, 205, 0),
+ RGBColor(0, 0, 238),
+ RGBColor(205, 0, 205),
+ RGBColor(0, 205, 205),
+ RGBColor(229, 229, 229),
+ RGBColor(127, 127, 127),
+ RGBColor(255, 0, 0),
+ RGBColor(0, 255, 0),
+ RGBColor(255, 255, 0),
+ RGBColor(92, 92, 255),
+ RGBColor(255, 0, 255),
+ RGBColor(0, 255, 255),
+ RGBColor(255, 255, 255),
+ RGBColor(0, 0, 0),
+ RGBColor(0, 0, 95),
+ RGBColor(0, 0, 135),
+ RGBColor(0, 0, 175),
+ RGBColor(0, 0, 215),
+ RGBColor(0, 0, 255),
+ RGBColor(0, 95, 0),
+ RGBColor(0, 95, 95),
+ RGBColor(0, 95, 135),
+ RGBColor(0, 95, 175),
+ RGBColor(0, 95, 215),
+ RGBColor(0, 95, 255),
+ RGBColor(0, 135, 0),
+ RGBColor(0, 135, 95),
+ RGBColor(0, 135, 135),
+ RGBColor(0, 135, 175),
+ RGBColor(0, 135, 215),
+ RGBColor(0, 135, 255),
+ RGBColor(0, 175, 0),
+ RGBColor(0, 175, 95),
+ RGBColor(0, 175, 135),
+ RGBColor(0, 175, 175),
+ RGBColor(0, 175, 215),
+ RGBColor(0, 175, 255),
+ RGBColor(0, 215, 0),
+ RGBColor(0, 215, 95),
+ RGBColor(0, 215, 135),
+ RGBColor(0, 215, 175),
+ RGBColor(0, 215, 215),
+ RGBColor(0, 215, 255),
+ RGBColor(0, 255, 0),
+ RGBColor(0, 255, 95),
+ RGBColor(0, 255, 135),
+ RGBColor(0, 255, 175),
+ RGBColor(0, 255, 215),
+ RGBColor(0, 255, 255),
+ RGBColor(95, 0, 0),
+ RGBColor(95, 0, 95),
+ RGBColor(95, 0, 135),
+ RGBColor(95, 0, 175),
+ RGBColor(95, 0, 215),
+ RGBColor(95, 0, 255),
+ RGBColor(95, 95, 0),
+ RGBColor(95, 95, 95),
+ RGBColor(95, 95, 135),
+ RGBColor(95, 95, 175),
+ RGBColor(95, 95, 215),
+ RGBColor(95, 95, 255),
+ RGBColor(95, 135, 0),
+ RGBColor(95, 135, 95),
+ RGBColor(95, 135, 135),
+ RGBColor(95, 135, 175),
+ RGBColor(95, 135, 215),
+ RGBColor(95, 135, 255),
+ RGBColor(95, 175, 0),
+ RGBColor(95, 175, 95),
+ RGBColor(95, 175, 135),
+ RGBColor(95, 175, 175),
+ RGBColor(95, 175, 215),
+ RGBColor(95, 175, 255),
+ RGBColor(95, 215, 0),
+ RGBColor(95, 215, 95),
+ RGBColor(95, 215, 135),
+ RGBColor(95, 215, 175),
+ RGBColor(95, 215, 215),
+ RGBColor(95, 215, 255),
+ RGBColor(95, 255, 0),
+ RGBColor(95, 255, 95),
+ RGBColor(95, 255, 135),
+ RGBColor(95, 255, 175),
+ RGBColor(95, 255, 215),
+ RGBColor(95, 255, 255),
+ RGBColor(135, 0, 0),
+ RGBColor(135, 0, 95),
+ RGBColor(135, 0, 135),
+ RGBColor(135, 0, 175),
+ RGBColor(135, 0, 215),
+ RGBColor(135, 0, 255),
+ RGBColor(135, 95, 0),
+ RGBColor(135, 95, 95),
+ RGBColor(135, 95, 135),
+ RGBColor(135, 95, 175),
+ RGBColor(135, 95, 215),
+ RGBColor(135, 95, 255),
+ RGBColor(135, 135, 0),
+ RGBColor(135, 135, 95),
+ RGBColor(135, 135, 135),
+ RGBColor(135, 135, 175),
+ RGBColor(135, 135, 215),
+ RGBColor(135, 135, 255),
+ RGBColor(135, 175, 0),
+ RGBColor(135, 175, 95),
+ RGBColor(135, 175, 135),
+ RGBColor(135, 175, 175),
+ RGBColor(135, 175, 215),
+ RGBColor(135, 175, 255),
+ RGBColor(135, 215, 0),
+ RGBColor(135, 215, 95),
+ RGBColor(135, 215, 135),
+ RGBColor(135, 215, 175),
+ RGBColor(135, 215, 215),
+ RGBColor(135, 215, 255),
+ RGBColor(135, 255, 0),
+ RGBColor(135, 255, 95),
+ RGBColor(135, 255, 135),
+ RGBColor(135, 255, 175),
+ RGBColor(135, 255, 215),
+ RGBColor(135, 255, 255),
+ RGBColor(175, 0, 0),
+ RGBColor(175, 0, 95),
+ RGBColor(175, 0, 135),
+ RGBColor(175, 0, 175),
+ RGBColor(175, 0, 215),
+ RGBColor(175, 0, 255),
+ RGBColor(175, 95, 0),
+ RGBColor(175, 95, 95),
+ RGBColor(175, 95, 135),
+ RGBColor(175, 95, 175),
+ RGBColor(175, 95, 215),
+ RGBColor(175, 95, 255),
+ RGBColor(175, 135, 0),
+ RGBColor(175, 135, 95),
+ RGBColor(175, 135, 135),
+ RGBColor(175, 135, 175),
+ RGBColor(175, 135, 215),
+ RGBColor(175, 135, 255),
+ RGBColor(175, 175, 0),
+ RGBColor(175, 175, 95),
+ RGBColor(175, 175, 135),
+ RGBColor(175, 175, 175),
+ RGBColor(175, 175, 215),
+ RGBColor(175, 175, 255),
+ RGBColor(175, 215, 0),
+ RGBColor(175, 215, 95),
+ RGBColor(175, 215, 135),
+ RGBColor(175, 215, 175),
+ RGBColor(175, 215, 215),
+ RGBColor(175, 215, 255),
+ RGBColor(175, 255, 0),
+ RGBColor(175, 255, 95),
+ RGBColor(175, 255, 135),
+ RGBColor(175, 255, 175),
+ RGBColor(175, 255, 215),
+ RGBColor(175, 255, 255),
+ RGBColor(215, 0, 0),
+ RGBColor(215, 0, 95),
+ RGBColor(215, 0, 135),
+ RGBColor(215, 0, 175),
+ RGBColor(215, 0, 215),
+ RGBColor(215, 0, 255),
+ RGBColor(215, 95, 0),
+ RGBColor(215, 95, 95),
+ RGBColor(215, 95, 135),
+ RGBColor(215, 95, 175),
+ RGBColor(215, 95, 215),
+ RGBColor(215, 95, 255),
+ RGBColor(215, 135, 0),
+ RGBColor(215, 135, 95),
+ RGBColor(215, 135, 135),
+ RGBColor(215, 135, 175),
+ RGBColor(215, 135, 215),
+ RGBColor(215, 135, 255),
+ RGBColor(215, 175, 0),
+ RGBColor(215, 175, 95),
+ RGBColor(215, 175, 135),
+ RGBColor(215, 175, 175),
+ RGBColor(215, 175, 215),
+ RGBColor(215, 175, 255),
+ RGBColor(215, 215, 0),
+ RGBColor(215, 215, 95),
+ RGBColor(215, 215, 135),
+ RGBColor(215, 215, 175),
+ RGBColor(215, 215, 215),
+ RGBColor(215, 215, 255),
+ RGBColor(215, 255, 0),
+ RGBColor(215, 255, 95),
+ RGBColor(215, 255, 135),
+ RGBColor(215, 255, 175),
+ RGBColor(215, 255, 215),
+ RGBColor(215, 255, 255),
+ RGBColor(255, 0, 0),
+ RGBColor(255, 0, 135),
+ RGBColor(255, 0, 95),
+ RGBColor(255, 0, 175),
+ RGBColor(255, 0, 215),
+ RGBColor(255, 0, 255),
+ RGBColor(255, 95, 0),
+ RGBColor(255, 95, 95),
+ RGBColor(255, 95, 135),
+ RGBColor(255, 95, 175),
+ RGBColor(255, 95, 215),
+ RGBColor(255, 95, 255),
+ RGBColor(255, 135, 0),
+ RGBColor(255, 135, 95),
+ RGBColor(255, 135, 135),
+ RGBColor(255, 135, 175),
+ RGBColor(255, 135, 215),
+ RGBColor(255, 135, 255),
+ RGBColor(255, 175, 0),
+ RGBColor(255, 175, 95),
+ RGBColor(255, 175, 135),
+ RGBColor(255, 175, 175),
+ RGBColor(255, 175, 215),
+ RGBColor(255, 175, 255),
+ RGBColor(255, 215, 0),
+ RGBColor(255, 215, 95),
+ RGBColor(255, 215, 135),
+ RGBColor(255, 215, 175),
+ RGBColor(255, 215, 215),
+ RGBColor(255, 215, 255),
+ RGBColor(255, 255, 0),
+ RGBColor(255, 255, 95),
+ RGBColor(255, 255, 135),
+ RGBColor(255, 255, 175),
+ RGBColor(255, 255, 215),
+ RGBColor(255, 255, 255),
+ RGBColor(8, 8, 8),
+ RGBColor(18, 18, 18),
+ RGBColor(28, 28, 28),
+ RGBColor(38, 38, 38),
+ RGBColor(48, 48, 48),
+ RGBColor(58, 58, 58),
+ RGBColor(68, 68, 68),
+ RGBColor(78, 78, 78),
+ RGBColor(88, 88, 88),
+ RGBColor(98, 98, 98),
+ RGBColor(108, 108, 108),
+ RGBColor(118, 118, 118),
+ RGBColor(128, 128, 128),
+ RGBColor(138, 138, 138),
+ RGBColor(148, 148, 148),
+ RGBColor(158, 158, 158),
+ RGBColor(168, 168, 168),
+ RGBColor(178, 178, 178),
+ RGBColor(188, 188, 188),
+ RGBColor(198, 198, 198),
+ RGBColor(208, 208, 208),
+ RGBColor(218, 218, 218),
+ RGBColor(228, 228, 228),
+ RGBColor(238, 238, 238),
+)
diff --git a/third_party/python/blessed/blessed/colorspace.pyi b/third_party/python/blessed/blessed/colorspace.pyi
new file mode 100644
index 0000000000..a799cd01cf
--- /dev/null
+++ b/third_party/python/blessed/blessed/colorspace.pyi
@@ -0,0 +1,12 @@
+# std imports
+from typing import Set, Dict, Tuple, NamedTuple
+
+CGA_COLORS: Set[str]
+
+class RGBColor(NamedTuple):
+ red: int
+ green: int
+ blue: int
+
+X11_COLORNAMES_TO_RGB: Dict[str, RGBColor]
+RGB_256TABLE: Tuple[RGBColor, ...]
diff --git a/third_party/python/blessed/blessed/formatters.py b/third_party/python/blessed/blessed/formatters.py
new file mode 100644
index 0000000000..ed1badc5ad
--- /dev/null
+++ b/third_party/python/blessed/blessed/formatters.py
@@ -0,0 +1,498 @@
+"""Sub-module providing sequence-formatting functions."""
+# std imports
+import platform
+
+# 3rd party
+import six
+
+# local
+from blessed.colorspace import CGA_COLORS, X11_COLORNAMES_TO_RGB
+
+# isort: off
+# curses
+if platform.system() == 'Windows':
+ import jinxed as curses # pylint: disable=import-error
+else:
+ import curses
+
+
+def _make_colors():
+ """
+ Return set of valid colors and their derivatives.
+
+ :rtype: set
+ :returns: Color names with prefixes
+ """
+ colors = set()
+ # basic CGA foreground color, background, high intensity, and bold
+ # background ('iCE colors' in my day).
+ for cga_color in CGA_COLORS:
+ colors.add(cga_color)
+ colors.add('on_' + cga_color)
+ colors.add('bright_' + cga_color)
+ colors.add('on_bright_' + cga_color)
+
+ # foreground and background VGA color
+ for vga_color in X11_COLORNAMES_TO_RGB:
+ colors.add(vga_color)
+ colors.add('on_' + vga_color)
+ return colors
+
+
+#: Valid colors and their background (on), bright, and bright-background
+#: derivatives.
+COLORS = _make_colors()
+
+#: Attributes that may be compounded with colors, by underscore, such as
+#: 'reverse_indigo'.
+COMPOUNDABLES = set('bold underline reverse blink italic standout'.split())
+
+
+class ParameterizingString(six.text_type):
+ r"""
+ A Unicode string which can be called as a parameterizing termcap.
+
+ For example::
+
+ >>> from blessed import Terminal
+ >>> term = Terminal()
+ >>> color = ParameterizingString(term.color, term.normal, 'color')
+ >>> color(9)('color #9')
+ u'\x1b[91mcolor #9\x1b(B\x1b[m'
+ """
+
+ def __new__(cls, cap, normal=u'', name=u'<not specified>'):
+ # pylint: disable = missing-return-doc, missing-return-type-doc
+ """
+ Class constructor accepting 3 positional arguments.
+
+ :arg str cap: parameterized string suitable for curses.tparm()
+ :arg str normal: terminating sequence for this capability (optional).
+ :arg str name: name of this terminal capability (optional).
+ """
+ new = six.text_type.__new__(cls, cap)
+ new._normal = normal
+ new._name = name
+ return new
+
+ def __call__(self, *args):
+ """
+ Returning :class:`FormattingString` instance for given parameters.
+
+ Return evaluated terminal capability (self), receiving arguments
+ ``*args``, followed by the terminating sequence (self.normal) into
+ a :class:`FormattingString` capable of being called.
+
+ :raises TypeError: Mismatch between capability and arguments
+ :raises curses.error: :func:`curses.tparm` raised an exception
+ :rtype: :class:`FormattingString` or :class:`NullCallableString`
+ :returns: Callable string for given parameters
+ """
+ try:
+ # Re-encode the cap, because tparm() takes a bytestring in Python
+ # 3. However, appear to be a plain Unicode string otherwise so
+ # concats work.
+ attr = curses.tparm(self.encode('latin1'), *args).decode('latin1')
+ return FormattingString(attr, self._normal)
+ except TypeError as err:
+ # If the first non-int (i.e. incorrect) arg was a string, suggest
+ # something intelligent:
+ if args and isinstance(args[0], six.string_types):
+ raise TypeError(
+ "Unknown terminal capability, %r, or, TypeError "
+ "for arguments %r: %s" % (self._name, args, err))
+ # Somebody passed a non-string; I don't feel confident
+ # guessing what they were trying to do.
+ raise
+ except curses.error as err:
+ # ignore 'tparm() returned NULL', you won't get any styling,
+ # even if does_styling is True. This happens on win32 platforms
+ # with http://www.lfd.uci.edu/~gohlke/pythonlibs/#curses installed
+ if "tparm() returned NULL" not in six.text_type(err):
+ raise
+ return NullCallableString()
+
+
+class ParameterizingProxyString(six.text_type):
+ r"""
+ A Unicode string which can be called to proxy missing termcap entries.
+
+ This class supports the function :func:`get_proxy_string`, and mirrors
+ the behavior of :class:`ParameterizingString`, except that instead of
+ a capability name, receives a format string, and callable to filter the
+ given positional ``*args`` of :meth:`ParameterizingProxyString.__call__`
+ into a terminal sequence.
+
+ For example::
+
+ >>> from blessed import Terminal
+ >>> term = Terminal('screen')
+ >>> hpa = ParameterizingString(term.hpa, term.normal, 'hpa')
+ >>> hpa(9)
+ u''
+ >>> fmt = u'\x1b[{0}G'
+ >>> fmt_arg = lambda *arg: (arg[0] + 1,)
+ >>> hpa = ParameterizingProxyString((fmt, fmt_arg), term.normal, 'hpa')
+ >>> hpa(9)
+ u'\x1b[10G'
+ """
+
+ def __new__(cls, fmt_pair, normal=u'', name=u'<not specified>'):
+ # pylint: disable = missing-return-doc, missing-return-type-doc
+ """
+ Class constructor accepting 4 positional arguments.
+
+ :arg tuple fmt_pair: Two element tuple containing:
+ - format string suitable for displaying terminal sequences
+ - callable suitable for receiving __call__ arguments for formatting string
+ :arg str normal: terminating sequence for this capability (optional).
+ :arg str name: name of this terminal capability (optional).
+ """
+ assert isinstance(fmt_pair, tuple), fmt_pair
+ assert callable(fmt_pair[1]), fmt_pair[1]
+ new = six.text_type.__new__(cls, fmt_pair[0])
+ new._fmt_args = fmt_pair[1]
+ new._normal = normal
+ new._name = name
+ return new
+
+ def __call__(self, *args):
+ """
+ Returning :class:`FormattingString` instance for given parameters.
+
+ Arguments are determined by the capability. For example, ``hpa``
+ (move_x) receives only a single integer, whereas ``cup`` (move)
+ receives two integers. See documentation in terminfo(5) for the
+ given capability.
+
+ :rtype: FormattingString
+ :returns: Callable string for given parameters
+ """
+ return FormattingString(self.format(*self._fmt_args(*args)),
+ self._normal)
+
+
+class FormattingString(six.text_type):
+ r"""
+ A Unicode string which doubles as a callable.
+
+ This is used for terminal attributes, so that it may be used both
+ directly, or as a callable. When used directly, it simply emits
+ the given terminal sequence. When used as a callable, it wraps the
+ given (string) argument with the 2nd argument used by the class
+ constructor::
+
+ >>> from blessed import Terminal
+ >>> term = Terminal()
+ >>> style = FormattingString(term.bright_blue, term.normal)
+ >>> print(repr(style))
+ u'\x1b[94m'
+ >>> style('Big Blue')
+ u'\x1b[94mBig Blue\x1b(B\x1b[m'
+ """
+
+ def __new__(cls, sequence, normal=u''):
+ # pylint: disable = missing-return-doc, missing-return-type-doc
+ """
+ Class constructor accepting 2 positional arguments.
+
+ :arg str sequence: terminal attribute sequence.
+ :arg str normal: terminating sequence for this attribute (optional).
+ """
+ new = six.text_type.__new__(cls, sequence)
+ new._normal = normal
+ return new
+
+ def __call__(self, *args):
+ """
+ Return ``text`` joined by ``sequence`` and ``normal``.
+
+ :raises TypeError: Not a string type
+ :rtype: str
+ :returns: Arguments wrapped in sequence and normal
+ """
+ # Jim Allman brings us this convenience of allowing existing
+ # unicode strings to be joined as a call parameter to a formatting
+ # string result, allowing nestation:
+ #
+ # >>> t.red('This is ', t.bold('extremely'), ' dangerous!')
+ for idx, ucs_part in enumerate(args):
+ if not isinstance(ucs_part, six.string_types):
+ expected_types = ', '.join(_type.__name__ for _type in six.string_types)
+ raise TypeError(
+ "TypeError for FormattingString argument, "
+ "%r, at position %s: expected type %s, "
+ "got %s" % (ucs_part, idx, expected_types,
+ type(ucs_part).__name__))
+ postfix = u''
+ if self and self._normal:
+ postfix = self._normal
+ _refresh = self._normal + self
+ args = [_refresh.join(ucs_part.split(self._normal))
+ for ucs_part in args]
+
+ return self + u''.join(args) + postfix
+
+
+class FormattingOtherString(six.text_type):
+ r"""
+ A Unicode string which doubles as a callable for another sequence when called.
+
+ This is used for the :meth:`~.Terminal.move_up`, ``down``, ``left``, and ``right()``
+ family of functions::
+
+ >>> from blessed import Terminal
+ >>> term = Terminal()
+ >>> move_right = FormattingOtherString(term.cuf1, term.cuf)
+ >>> print(repr(move_right))
+ u'\x1b[C'
+ >>> print(repr(move_right(666)))
+ u'\x1b[666C'
+ >>> print(repr(move_right()))
+ u'\x1b[C'
+ """
+
+ def __new__(cls, direct, target):
+ # pylint: disable = missing-return-doc, missing-return-type-doc
+ """
+ Class constructor accepting 2 positional arguments.
+
+ :arg str direct: capability name for direct formatting, eg ``('x' + term.right)``.
+ :arg str target: capability name for callable, eg ``('x' + term.right(99))``.
+ """
+ new = six.text_type.__new__(cls, direct)
+ new._callable = target
+ return new
+
+ def __getnewargs__(self):
+ # return arguments used for the __new__ method upon unpickling.
+ return six.text_type.__new__(six.text_type, self), self._callable
+
+ def __call__(self, *args):
+ """Return ``text`` by ``target``."""
+ if args:
+ return self._callable(*args)
+ return self
+
+
+class NullCallableString(six.text_type):
+ """
+ A dummy callable Unicode alternative to :class:`FormattingString`.
+
+ This is used for colors on terminals that do not support colors, it is just a basic form of
+ unicode that may also act as a callable.
+ """
+
+ def __new__(cls):
+ """Class constructor."""
+ return six.text_type.__new__(cls, u'')
+
+ def __call__(self, *args):
+ """
+ Allow empty string to be callable, returning given string, if any.
+
+ When called with an int as the first arg, return an empty Unicode. An
+ int is a good hint that I am a :class:`ParameterizingString`, as there
+ are only about half a dozen string-returning capabilities listed in
+ terminfo(5) which accept non-int arguments, they are seldom used.
+
+ When called with a non-int as the first arg (no no args at all), return
+ the first arg, acting in place of :class:`FormattingString` without
+ any attributes.
+ """
+ if not args or isinstance(args[0], int):
+ # As a NullCallableString, even when provided with a parameter,
+ # such as t.color(5), we must also still be callable, fe:
+ #
+ # >>> t.color(5)('shmoo')
+ #
+ # is actually simplified result of NullCallable()() on terminals
+ # without color support, so turtles all the way down: we return
+ # another instance.
+ return NullCallableString()
+ return u''.join(args)
+
+
+def get_proxy_string(term, attr):
+ """
+ Proxy and return callable string for proxied attributes.
+
+ :arg Terminal term: :class:`~.Terminal` instance.
+ :arg str attr: terminal capability name that may be proxied.
+ :rtype: None or :class:`ParameterizingProxyString`.
+ :returns: :class:`ParameterizingProxyString` for some attributes
+ of some terminal types that support it, where the terminfo(5)
+ database would otherwise come up empty, such as ``move_x``
+ attribute for ``term.kind`` of ``screen``. Otherwise, None.
+ """
+ # normalize 'screen-256color', or 'ansi.sys' to its basic names
+ term_kind = next(iter(_kind for _kind in ('screen', 'ansi',)
+ if term.kind.startswith(_kind)), term)
+ _proxy_table = { # pragma: no cover
+ 'screen': {
+ # proxy move_x/move_y for 'screen' terminal type, used by tmux(1).
+ 'hpa': ParameterizingProxyString(
+ (u'\x1b[{0}G', lambda *arg: (arg[0] + 1,)), term.normal, attr),
+ 'vpa': ParameterizingProxyString(
+ (u'\x1b[{0}d', lambda *arg: (arg[0] + 1,)), term.normal, attr),
+ },
+ 'ansi': {
+ # proxy show/hide cursor for 'ansi' terminal type. There is some
+ # demand for a richly working ANSI terminal type for some reason.
+ 'civis': ParameterizingProxyString(
+ (u'\x1b[?25l', lambda *arg: ()), term.normal, attr),
+ 'cnorm': ParameterizingProxyString(
+ (u'\x1b[?25h', lambda *arg: ()), term.normal, attr),
+ 'hpa': ParameterizingProxyString(
+ (u'\x1b[{0}G', lambda *arg: (arg[0] + 1,)), term.normal, attr),
+ 'vpa': ParameterizingProxyString(
+ (u'\x1b[{0}d', lambda *arg: (arg[0] + 1,)), term.normal, attr),
+ 'sc': '\x1b[s',
+ 'rc': '\x1b[u',
+ }
+ }
+ return _proxy_table.get(term_kind, {}).get(attr, None)
+
+
+def split_compound(compound):
+ """
+ Split compound formating string into segments.
+
+ >>> split_compound('bold_underline_bright_blue_on_red')
+ ['bold', 'underline', 'bright_blue', 'on_red']
+
+ :arg str compound: a string that may contain compounds, separated by
+ underline (``_``).
+ :rtype: list
+ :returns: List of formating string segments
+ """
+ merged_segs = []
+ # These occur only as prefixes, so they can always be merged:
+ mergeable_prefixes = ['on', 'bright', 'on_bright']
+ for segment in compound.split('_'):
+ if merged_segs and merged_segs[-1] in mergeable_prefixes:
+ merged_segs[-1] += '_' + segment
+ else:
+ merged_segs.append(segment)
+ return merged_segs
+
+
+def resolve_capability(term, attr):
+ """
+ Resolve a raw terminal capability using :func:`tigetstr`.
+
+ :arg Terminal term: :class:`~.Terminal` instance.
+ :arg str attr: terminal capability name.
+ :returns: string of the given terminal capability named by ``attr``,
+ which may be empty (u'') if not found or not supported by the
+ given :attr:`~.Terminal.kind`.
+ :rtype: str
+ """
+ if not term.does_styling:
+ return u''
+ val = curses.tigetstr(term._sugar.get(attr, attr)) # pylint: disable=protected-access
+ # Decode sequences as latin1, as they are always 8-bit bytes, so when
+ # b'\xff' is returned, this is decoded as u'\xff'.
+ return u'' if val is None else val.decode('latin1')
+
+
+def resolve_color(term, color):
+ """
+ Resolve a simple color name to a callable capability.
+
+ This function supports :func:`resolve_attribute`.
+
+ :arg Terminal term: :class:`~.Terminal` instance.
+ :arg str color: any string found in set :const:`COLORS`.
+ :returns: a string class instance which emits the terminal sequence
+ for the given color, and may be used as a callable to wrap the
+ given string with such sequence.
+ :returns: :class:`NullCallableString` when
+ :attr:`~.Terminal.number_of_colors` is 0,
+ otherwise :class:`FormattingString`.
+ :rtype: :class:`NullCallableString` or :class:`FormattingString`
+ """
+ # pylint: disable=protected-access
+ if term.number_of_colors == 0:
+ return NullCallableString()
+
+ # fg/bg capabilities terminals that support 0-256+ colors.
+ vga_color_cap = (term._background_color if 'on_' in color else
+ term._foreground_color)
+
+ base_color = color.rsplit('_', 1)[-1]
+ if base_color in CGA_COLORS:
+ # curses constants go up to only 7, so add an offset to get at the
+ # bright colors at 8-15:
+ offset = 8 if 'bright_' in color else 0
+ base_color = color.rsplit('_', 1)[-1]
+ attr = 'COLOR_%s' % (base_color.upper(),)
+ fmt_attr = vga_color_cap(getattr(curses, attr) + offset)
+ return FormattingString(fmt_attr, term.normal)
+
+ assert base_color in X11_COLORNAMES_TO_RGB, (
+ 'color not known', base_color)
+ rgb = X11_COLORNAMES_TO_RGB[base_color]
+
+ # downconvert X11 colors to CGA, EGA, or VGA color spaces
+ if term.number_of_colors <= 256:
+ fmt_attr = vga_color_cap(term.rgb_downconvert(*rgb))
+ return FormattingString(fmt_attr, term.normal)
+
+ # Modern 24-bit color terminals are written pretty basically. The
+ # foreground and background sequences are:
+ # - ^[38;2;<r>;<g>;<b>m
+ # - ^[48;2;<r>;<g>;<b>m
+ fgbg_seq = ('48' if 'on_' in color else '38')
+ assert term.number_of_colors == 1 << 24
+ fmt_attr = u'\x1b[' + fgbg_seq + ';2;{0};{1};{2}m'
+ return FormattingString(fmt_attr.format(*rgb), term.normal)
+
+
+def resolve_attribute(term, attr):
+ """
+ Resolve a terminal attribute name into a capability class.
+
+ :arg Terminal term: :class:`~.Terminal` instance.
+ :arg str attr: Sugary, ordinary, or compound formatted terminal
+ capability, such as "red_on_white", "normal", "red", or
+ "bold_on_black".
+ :returns: a string class instance which emits the terminal sequence
+ for the given terminal capability, or may be used as a callable to
+ wrap the given string with such sequence.
+ :returns: :class:`NullCallableString` when
+ :attr:`~.Terminal.number_of_colors` is 0,
+ otherwise :class:`FormattingString`.
+ :rtype: :class:`NullCallableString` or :class:`FormattingString`
+ """
+ if attr in COLORS:
+ return resolve_color(term, attr)
+
+ # A direct compoundable, such as `bold' or `on_red'.
+ if attr in COMPOUNDABLES:
+ sequence = resolve_capability(term, attr)
+ return FormattingString(sequence, term.normal)
+
+ # Given `bold_on_red', resolve to ('bold', 'on_red'), RECURSIVE
+ # call for each compounding section, joined and returned as
+ # a completed completed FormattingString.
+ formatters = split_compound(attr)
+ if all((fmt in COLORS or fmt in COMPOUNDABLES) for fmt in formatters):
+ resolution = (resolve_attribute(term, fmt) for fmt in formatters)
+ return FormattingString(u''.join(resolution), term.normal)
+
+ # otherwise, this is our end-game: given a sequence such as 'csr'
+ # (change scrolling region), return a ParameterizingString instance,
+ # that when called, performs and returns the final string after curses
+ # capability lookup is performed.
+ tparm_capseq = resolve_capability(term, attr)
+ if not tparm_capseq:
+ # and, for special terminals, such as 'screen', provide a Proxy
+ # ParameterizingString for attributes they do not claim to support,
+ # but actually do! (such as 'hpa' and 'vpa').
+ proxy = get_proxy_string(term,
+ term._sugar.get(attr, attr)) # pylint: disable=protected-access
+ if proxy is not None:
+ return proxy
+
+ return ParameterizingString(tparm_capseq, term.normal, attr)
diff --git a/third_party/python/blessed/blessed/formatters.pyi b/third_party/python/blessed/blessed/formatters.pyi
new file mode 100644
index 0000000000..32a3dc2df3
--- /dev/null
+++ b/third_party/python/blessed/blessed/formatters.pyi
@@ -0,0 +1,70 @@
+# std imports
+from typing import (Any,
+ Set,
+ List,
+ Type,
+ Tuple,
+ Union,
+ TypeVar,
+ Callable,
+ NoReturn,
+ Optional,
+ overload)
+
+# local
+from .terminal import Terminal
+
+COLORS: Set[str]
+COMPOUNDABLES: Set[str]
+
+_T = TypeVar("_T")
+
+class ParameterizingString(str):
+ def __new__(cls: Type[_T], cap: str, normal: str = ..., name: str = ...) -> _T: ...
+ @overload
+ def __call__(
+ self, *args: int
+ ) -> Union["FormattingString", "NullCallableString"]: ...
+ @overload
+ def __call__(self, *args: str) -> NoReturn: ...
+
+class ParameterizingProxyString(str):
+ def __new__(
+ cls: Type[_T],
+ fmt_pair: Tuple[str, Callable[..., Tuple[object, ...]]],
+ normal: str = ...,
+ name: str = ...,
+ ) -> _T: ...
+ def __call__(self, *args: Any) -> "FormattingString": ...
+
+class FormattingString(str):
+ def __new__(cls: Type[_T], sequence: str, normal: str = ...) -> _T: ...
+ @overload
+ def __call__(self, *args: int) -> NoReturn: ...
+ @overload
+ def __call__(self, *args: str) -> str: ...
+
+class FormattingOtherString(str):
+ def __new__(
+ cls: Type[_T], direct: ParameterizingString, target: ParameterizingString = ...
+ ) -> _T: ...
+ def __call__(self, *args: Union[int, str]) -> str: ...
+
+class NullCallableString(str):
+ def __new__(cls: Type[_T]) -> _T: ...
+ @overload
+ def __call__(self, *args: int) -> "NullCallableString": ...
+ @overload
+ def __call__(self, *args: str) -> str: ...
+
+def get_proxy_string(
+ term: Terminal, attr: str
+) -> Optional[ParameterizingProxyString]: ...
+def split_compound(compound: str) -> List[str]: ...
+def resolve_capability(term: Terminal, attr: str) -> str: ...
+def resolve_color(
+ term: Terminal, color: str
+) -> Union[NullCallableString, FormattingString]: ...
+def resolve_attribute(
+ term: Terminal, attr: str
+) -> Union[ParameterizingString, FormattingString]: ...
diff --git a/third_party/python/blessed/blessed/keyboard.py b/third_party/python/blessed/blessed/keyboard.py
new file mode 100644
index 0000000000..2736da160f
--- /dev/null
+++ b/third_party/python/blessed/blessed/keyboard.py
@@ -0,0 +1,449 @@
+"""Sub-module providing 'keyboard awareness'."""
+
+# std imports
+import re
+import time
+import platform
+from collections import OrderedDict
+
+# 3rd party
+import six
+
+# isort: off
+# curses
+if platform.system() == 'Windows':
+ # pylint: disable=import-error
+ import jinxed as curses
+ from jinxed.has_key import _capability_names as capability_names
+else:
+ import curses
+ from curses.has_key import _capability_names as capability_names
+
+
+class Keystroke(six.text_type):
+ """
+ A unicode-derived class for describing a single keystroke.
+
+ A class instance describes a single keystroke received on input,
+ which may contain multiple characters as a multibyte sequence,
+ which is indicated by properties :attr:`is_sequence` returning
+ ``True``.
+
+ When the string is a known sequence, :attr:`code` matches terminal
+ class attributes for comparison, such as ``term.KEY_LEFT``.
+
+ The string-name of the sequence, such as ``u'KEY_LEFT'`` is accessed
+ by property :attr:`name`, and is used by the :meth:`__repr__` method
+ to display a human-readable form of the Keystroke this class
+ instance represents. It may otherwise by joined, split, or evaluated
+ just as as any other unicode string.
+ """
+
+ def __new__(cls, ucs='', code=None, name=None):
+ """Class constructor."""
+ new = six.text_type.__new__(cls, ucs)
+ new._name = name
+ new._code = code
+ return new
+
+ @property
+ def is_sequence(self):
+ """Whether the value represents a multibyte sequence (bool)."""
+ return self._code is not None
+
+ def __repr__(self):
+ """Docstring overwritten."""
+ return (six.text_type.__repr__(self) if self._name is None else
+ self._name)
+ __repr__.__doc__ = six.text_type.__doc__
+
+ @property
+ def name(self):
+ """String-name of key sequence, such as ``u'KEY_LEFT'`` (str)."""
+ return self._name
+
+ @property
+ def code(self):
+ """Integer keycode value of multibyte sequence (int)."""
+ return self._code
+
+
+def get_curses_keycodes():
+ """
+ Return mapping of curses key-names paired by their keycode integer value.
+
+ :rtype: dict
+ :returns: Dictionary of (name, code) pairs for curses keyboard constant
+ values and their mnemonic name. Such as code ``260``, with the value of
+ its key-name identity, ``u'KEY_LEFT'``.
+ """
+ _keynames = [attr for attr in dir(curses)
+ if attr.startswith('KEY_')]
+ return {keyname: getattr(curses, keyname) for keyname in _keynames}
+
+
+def get_keyboard_codes():
+ """
+ Return mapping of keycode integer values paired by their curses key-name.
+
+ :rtype: dict
+ :returns: Dictionary of (code, name) pairs for curses keyboard constant
+ values and their mnemonic name. Such as key ``260``, with the value of
+ its identity, ``u'KEY_LEFT'``.
+
+ These keys are derived from the attributes by the same of the curses module,
+ with the following exceptions:
+
+ * ``KEY_DELETE`` in place of ``KEY_DC``
+ * ``KEY_INSERT`` in place of ``KEY_IC``
+ * ``KEY_PGUP`` in place of ``KEY_PPAGE``
+ * ``KEY_PGDOWN`` in place of ``KEY_NPAGE``
+ * ``KEY_ESCAPE`` in place of ``KEY_EXIT``
+ * ``KEY_SUP`` in place of ``KEY_SR``
+ * ``KEY_SDOWN`` in place of ``KEY_SF``
+
+ This function is the inverse of :func:`get_curses_keycodes`. With the
+ given override "mixins" listed above, the keycode for the delete key will
+ map to our imaginary ``KEY_DELETE`` mnemonic, effectively erasing the
+ phrase ``KEY_DC`` from our code vocabulary for anyone that wishes to use
+ the return value to determine the key-name by keycode.
+ """
+ keycodes = OrderedDict(get_curses_keycodes())
+ keycodes.update(CURSES_KEYCODE_OVERRIDE_MIXIN)
+ # merge _CURSES_KEYCODE_ADDINS added to our module space
+ keycodes.update((name, value) for name, value in globals().items() if name.startswith('KEY_'))
+
+ # invert dictionary (key, values) => (values, key), preferring the
+ # last-most inserted value ('KEY_DELETE' over 'KEY_DC').
+ return dict(zip(keycodes.values(), keycodes.keys()))
+
+
+def _alternative_left_right(term):
+ r"""
+ Determine and return mapping of left and right arrow keys sequences.
+
+ :arg blessed.Terminal term: :class:`~.Terminal` instance.
+ :rtype: dict
+ :returns: Dictionary of sequences ``term._cuf1``, and ``term._cub1``,
+ valued as ``KEY_RIGHT``, ``KEY_LEFT`` (when appropriate).
+
+ This function supports :func:`get_terminal_sequences` to discover
+ the preferred input sequence for the left and right application keys.
+
+ It is necessary to check the value of these sequences to ensure we do not
+ use ``u' '`` and ``u'\b'`` for ``KEY_RIGHT`` and ``KEY_LEFT``,
+ preferring their true application key sequence, instead.
+ """
+ # pylint: disable=protected-access
+ keymap = {}
+ if term._cuf1 and term._cuf1 != u' ':
+ keymap[term._cuf1] = curses.KEY_RIGHT
+ if term._cub1 and term._cub1 != u'\b':
+ keymap[term._cub1] = curses.KEY_LEFT
+ return keymap
+
+
+def get_keyboard_sequences(term):
+ r"""
+ Return mapping of keyboard sequences paired by keycodes.
+
+ :arg blessed.Terminal term: :class:`~.Terminal` instance.
+ :returns: mapping of keyboard unicode sequences paired by keycodes
+ as integer. This is used as the argument ``mapper`` to
+ the supporting function :func:`resolve_sequence`.
+ :rtype: OrderedDict
+
+ Initialize and return a keyboard map and sequence lookup table,
+ (sequence, keycode) from :class:`~.Terminal` instance ``term``,
+ where ``sequence`` is a multibyte input sequence of unicode
+ characters, such as ``u'\x1b[D'``, and ``keycode`` is an integer
+ value, matching curses constant such as term.KEY_LEFT.
+
+ The return value is an OrderedDict instance, with their keys
+ sorted longest-first.
+ """
+ # A small gem from curses.has_key that makes this all possible,
+ # _capability_names: a lookup table of terminal capability names for
+ # keyboard sequences (fe. kcub1, key_left), keyed by the values of
+ # constants found beginning with KEY_ in the main curses module
+ # (such as KEY_LEFT).
+ #
+ # latin1 encoding is used so that bytes in 8-bit range of 127-255
+ # have equivalent chr() and unichr() values, so that the sequence
+ # of a kermit or avatar terminal, for example, remains unchanged
+ # in its byte sequence values even when represented by unicode.
+ #
+ sequence_map = dict((
+ (seq.decode('latin1'), val)
+ for (seq, val) in (
+ (curses.tigetstr(cap), val)
+ for (val, cap) in capability_names.items()
+ ) if seq
+ ) if term.does_styling else ())
+
+ sequence_map.update(_alternative_left_right(term))
+ sequence_map.update(DEFAULT_SEQUENCE_MIXIN)
+
+ # This is for fast lookup matching of sequences, preferring
+ # full-length sequence such as ('\x1b[D', KEY_LEFT)
+ # over simple sequences such as ('\x1b', KEY_EXIT).
+ return OrderedDict((
+ (seq, sequence_map[seq]) for seq in sorted(
+ sequence_map.keys(), key=len, reverse=True)))
+
+
+def get_leading_prefixes(sequences):
+ """
+ Return a set of proper prefixes for given sequence of strings.
+
+ :arg iterable sequences
+ :rtype: set
+ :return: Set of all string prefixes
+
+ Given an iterable of strings, all textparts leading up to the final
+ string is returned as a unique set. This function supports the
+ :meth:`~.Terminal.inkey` method by determining whether the given
+ input is a sequence that **may** lead to a final matching pattern.
+
+ >>> prefixes(['abc', 'abdf', 'e', 'jkl'])
+ set([u'a', u'ab', u'abd', u'j', u'jk'])
+ """
+ return {seq[:i] for seq in sequences for i in range(1, len(seq))}
+
+
+def resolve_sequence(text, mapper, codes):
+ r"""
+ Return a single :class:`Keystroke` instance for given sequence ``text``.
+
+ :arg str text: string of characters received from terminal input stream.
+ :arg OrderedDict mapper: unicode multibyte sequences, such as ``u'\x1b[D'``
+ paired by their integer value (260)
+ :arg dict codes: a :type:`dict` of integer values (such as 260) paired
+ by their mnemonic name, such as ``'KEY_LEFT'``.
+ :rtype: Keystroke
+ :returns: Keystroke instance for the given sequence
+
+ The given ``text`` may extend beyond a matching sequence, such as
+ ``u\x1b[Dxxx`` returns a :class:`Keystroke` instance of attribute
+ :attr:`Keystroke.sequence` valued only ``u\x1b[D``. It is up to
+ calls to determine that ``xxx`` remains unresolved.
+ """
+ for sequence, code in mapper.items():
+ if text.startswith(sequence):
+ return Keystroke(ucs=sequence, code=code, name=codes[code])
+ return Keystroke(ucs=text and text[0] or u'')
+
+
+def _time_left(stime, timeout):
+ """
+ Return time remaining since ``stime`` before given ``timeout``.
+
+ This function assists determining the value of ``timeout`` for
+ class method :meth:`~.Terminal.kbhit` and similar functions.
+
+ :arg float stime: starting time for measurement
+ :arg float timeout: timeout period, may be set to None to
+ indicate no timeout (where None is always returned).
+ :rtype: float or int
+ :returns: time remaining as float. If no time is remaining,
+ then the integer ``0`` is returned.
+ """
+ return max(0, timeout - (time.time() - stime)) if timeout else timeout
+
+
+def _read_until(term, pattern, timeout):
+ """
+ Convenience read-until-pattern function, supporting :meth:`~.get_location`.
+
+ :arg blessed.Terminal term: :class:`~.Terminal` instance.
+ :arg float timeout: timeout period, may be set to None to indicate no
+ timeout (where 0 is always returned).
+ :arg str pattern: target regular expression pattern to seek.
+ :rtype: tuple
+ :returns: tuple in form of ``(match, str)``, *match*
+ may be :class:`re.MatchObject` if pattern is discovered
+ in input stream before timeout has elapsed, otherwise
+ None. ``str`` is any remaining text received exclusive
+ of the matching pattern).
+
+ The reason a tuple containing non-matching data is returned, is that the
+ consumer should push such data back into the input buffer by
+ :meth:`~.Terminal.ungetch` if any was received.
+
+ For example, when a user is performing rapid input keystrokes while its
+ terminal emulator surreptitiously responds to this in-band sequence, we
+ must ensure any such keyboard data is well-received by the next call to
+ term.inkey() without delay.
+ """
+ stime = time.time()
+ match, buf = None, u''
+
+ # first, buffer all pending data. pexpect library provides a
+ # 'searchwindowsize' attribute that limits this memory region. We're not
+ # concerned about OOM conditions: only (human) keyboard input and terminal
+ # response sequences are expected.
+
+ while True: # pragma: no branch
+ # block as long as necessary to ensure at least one character is
+ # received on input or remaining timeout has elapsed.
+ ucs = term.inkey(timeout=_time_left(stime, timeout))
+ # while the keyboard buffer is "hot" (has input), we continue to
+ # aggregate all awaiting data. We do this to ensure slow I/O
+ # calls do not unnecessarily give up within the first 'while' loop
+ # for short timeout periods.
+ while ucs:
+ buf += ucs
+ ucs = term.inkey(timeout=0)
+
+ match = re.search(pattern=pattern, string=buf)
+ if match is not None:
+ # match
+ break
+
+ if timeout is not None and not _time_left(stime, timeout):
+ # timeout
+ break
+
+ return match, buf
+
+
+#: Though we may determine *keynames* and codes for keyboard input that
+#: generate multibyte sequences, it is also especially useful to aliases
+#: a few basic ASCII characters such as ``KEY_TAB`` instead of ``u'\t'`` for
+#: uniformity.
+#:
+#: Furthermore, many key-names for application keys enabled only by context
+#: manager :meth:`~.Terminal.keypad` are surprisingly absent. We inject them
+#: here directly into the curses module.
+_CURSES_KEYCODE_ADDINS = (
+ 'TAB',
+ 'KP_MULTIPLY',
+ 'KP_ADD',
+ 'KP_SEPARATOR',
+ 'KP_SUBTRACT',
+ 'KP_DECIMAL',
+ 'KP_DIVIDE',
+ 'KP_EQUAL',
+ 'KP_0',
+ 'KP_1',
+ 'KP_2',
+ 'KP_3',
+ 'KP_4',
+ 'KP_5',
+ 'KP_6',
+ 'KP_7',
+ 'KP_8',
+ 'KP_9')
+
+_LASTVAL = max(get_curses_keycodes().values())
+for keycode_name in _CURSES_KEYCODE_ADDINS:
+ _LASTVAL += 1
+ globals()['KEY_' + keycode_name] = _LASTVAL
+
+#: In a perfect world, terminal emulators would always send exactly what
+#: the terminfo(5) capability database plans for them, accordingly by the
+#: value of the ``TERM`` name they declare.
+#:
+#: But this isn't a perfect world. Many vt220-derived terminals, such as
+#: those declaring 'xterm', will continue to send vt220 codes instead of
+#: their native-declared codes, for backwards-compatibility.
+#:
+#: This goes for many: rxvt, putty, iTerm.
+#:
+#: These "mixins" are used for *all* terminals, regardless of their type.
+#:
+#: Furthermore, curses does not provide sequences sent by the keypad,
+#: at least, it does not provide a way to distinguish between keypad 0
+#: and numeric 0.
+DEFAULT_SEQUENCE_MIXIN = (
+ # these common control characters (and 127, ctrl+'?') mapped to
+ # an application key definition.
+ (six.unichr(10), curses.KEY_ENTER),
+ (six.unichr(13), curses.KEY_ENTER),
+ (six.unichr(8), curses.KEY_BACKSPACE),
+ (six.unichr(9), KEY_TAB), # noqa # pylint: disable=undefined-variable
+ (six.unichr(27), curses.KEY_EXIT),
+ (six.unichr(127), curses.KEY_BACKSPACE),
+
+ (u"\x1b[A", curses.KEY_UP),
+ (u"\x1b[B", curses.KEY_DOWN),
+ (u"\x1b[C", curses.KEY_RIGHT),
+ (u"\x1b[D", curses.KEY_LEFT),
+ (u"\x1b[1;2A", curses.KEY_SR),
+ (u"\x1b[1;2B", curses.KEY_SF),
+ (u"\x1b[1;2C", curses.KEY_SRIGHT),
+ (u"\x1b[1;2D", curses.KEY_SLEFT),
+ (u"\x1b[F", curses.KEY_END),
+ (u"\x1b[H", curses.KEY_HOME),
+ # not sure where these are from .. please report
+ (u"\x1b[K", curses.KEY_END),
+ (u"\x1b[U", curses.KEY_NPAGE),
+ (u"\x1b[V", curses.KEY_PPAGE),
+
+ # keys sent after term.smkx (keypad_xmit) is emitted, source:
+ # http://www.xfree86.org/current/ctlseqs.html#PC-Style%20Function%20Keys
+ # http://fossies.org/linux/rxvt/doc/rxvtRef.html#KeyCodes
+ #
+ # keypad, numlock on
+ (u"\x1bOM", curses.KEY_ENTER), # noqa return
+ (u"\x1bOj", KEY_KP_MULTIPLY), # noqa * # pylint: disable=undefined-variable
+ (u"\x1bOk", KEY_KP_ADD), # noqa + # pylint: disable=undefined-variable
+ (u"\x1bOl", KEY_KP_SEPARATOR), # noqa , # pylint: disable=undefined-variable
+ (u"\x1bOm", KEY_KP_SUBTRACT), # noqa - # pylint: disable=undefined-variable
+ (u"\x1bOn", KEY_KP_DECIMAL), # noqa . # pylint: disable=undefined-variable
+ (u"\x1bOo", KEY_KP_DIVIDE), # noqa / # pylint: disable=undefined-variable
+ (u"\x1bOX", KEY_KP_EQUAL), # noqa = # pylint: disable=undefined-variable
+ (u"\x1bOp", KEY_KP_0), # noqa 0 # pylint: disable=undefined-variable
+ (u"\x1bOq", KEY_KP_1), # noqa 1 # pylint: disable=undefined-variable
+ (u"\x1bOr", KEY_KP_2), # noqa 2 # pylint: disable=undefined-variable
+ (u"\x1bOs", KEY_KP_3), # noqa 3 # pylint: disable=undefined-variable
+ (u"\x1bOt", KEY_KP_4), # noqa 4 # pylint: disable=undefined-variable
+ (u"\x1bOu", KEY_KP_5), # noqa 5 # pylint: disable=undefined-variable
+ (u"\x1bOv", KEY_KP_6), # noqa 6 # pylint: disable=undefined-variable
+ (u"\x1bOw", KEY_KP_7), # noqa 7 # pylint: disable=undefined-variable
+ (u"\x1bOx", KEY_KP_8), # noqa 8 # pylint: disable=undefined-variable
+ (u"\x1bOy", KEY_KP_9), # noqa 9 # pylint: disable=undefined-variable
+
+ # keypad, numlock off
+ (u"\x1b[1~", curses.KEY_FIND), # find
+ (u"\x1b[2~", curses.KEY_IC), # insert (0)
+ (u"\x1b[3~", curses.KEY_DC), # delete (.), "Execute"
+ (u"\x1b[4~", curses.KEY_SELECT), # select
+ (u"\x1b[5~", curses.KEY_PPAGE), # pgup (9)
+ (u"\x1b[6~", curses.KEY_NPAGE), # pgdown (3)
+ (u"\x1b[7~", curses.KEY_HOME), # home
+ (u"\x1b[8~", curses.KEY_END), # end
+ (u"\x1b[OA", curses.KEY_UP), # up (8)
+ (u"\x1b[OB", curses.KEY_DOWN), # down (2)
+ (u"\x1b[OC", curses.KEY_RIGHT), # right (6)
+ (u"\x1b[OD", curses.KEY_LEFT), # left (4)
+ (u"\x1b[OF", curses.KEY_END), # end (1)
+ (u"\x1b[OH", curses.KEY_HOME), # home (7)
+
+ # The vt220 placed F1-F4 above the keypad, in place of actual
+ # F1-F4 were local functions (hold screen, print screen,
+ # set up, data/talk, break).
+ (u"\x1bOP", curses.KEY_F1),
+ (u"\x1bOQ", curses.KEY_F2),
+ (u"\x1bOR", curses.KEY_F3),
+ (u"\x1bOS", curses.KEY_F4),
+)
+
+#: Override mixins for a few curses constants with easier
+#: mnemonics: there may only be a 1:1 mapping when only a
+#: keycode (int) is given, where these phrases are preferred.
+CURSES_KEYCODE_OVERRIDE_MIXIN = (
+ ('KEY_DELETE', curses.KEY_DC),
+ ('KEY_INSERT', curses.KEY_IC),
+ ('KEY_PGUP', curses.KEY_PPAGE),
+ ('KEY_PGDOWN', curses.KEY_NPAGE),
+ ('KEY_ESCAPE', curses.KEY_EXIT),
+ ('KEY_SUP', curses.KEY_SR),
+ ('KEY_SDOWN', curses.KEY_SF),
+ ('KEY_UP_LEFT', curses.KEY_A1),
+ ('KEY_UP_RIGHT', curses.KEY_A3),
+ ('KEY_CENTER', curses.KEY_B2),
+ ('KEY_BEGIN', curses.KEY_BEG),
+)
+
+__all__ = ('Keystroke', 'get_keyboard_codes', 'get_keyboard_sequences',)
diff --git a/third_party/python/blessed/blessed/keyboard.pyi b/third_party/python/blessed/blessed/keyboard.pyi
new file mode 100644
index 0000000000..ae76393f14
--- /dev/null
+++ b/third_party/python/blessed/blessed/keyboard.pyi
@@ -0,0 +1,28 @@
+# std imports
+from typing import Set, Dict, Type, Mapping, TypeVar, Iterable, Optional, OrderedDict
+
+# local
+from .terminal import Terminal
+
+_T = TypeVar("_T")
+
+class Keystroke(str):
+ def __new__(
+ cls: Type[_T],
+ ucs: str = ...,
+ code: Optional[int] = ...,
+ name: Optional[str] = ...,
+ ) -> _T: ...
+ @property
+ def is_sequence(self) -> bool: ...
+ @property
+ def name(self) -> Optional[str]: ...
+ @property
+ def code(self) -> Optional[int]: ...
+
+def get_keyboard_codes() -> Dict[int, str]: ...
+def get_keyboard_sequences(term: Terminal) -> OrderedDict[str, int]: ...
+def get_leading_prefixes(sequences: Iterable[str]) -> Set[str]: ...
+def resolve_sequence(
+ text: str, mapper: Mapping[str, int], codes: Mapping[int, str]
+) -> Keystroke: ...
diff --git a/third_party/python/blessed/blessed/py.typed b/third_party/python/blessed/blessed/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/blessed/blessed/py.typed
diff --git a/third_party/python/blessed/blessed/sequences.py b/third_party/python/blessed/blessed/sequences.py
new file mode 100644
index 0000000000..6735ab8504
--- /dev/null
+++ b/third_party/python/blessed/blessed/sequences.py
@@ -0,0 +1,461 @@
+# -*- coding: utf-8 -*-
+"""Module providing 'sequence awareness'."""
+# std imports
+import re
+import math
+import textwrap
+
+# 3rd party
+import six
+from wcwidth import wcwidth
+
+# local
+from blessed._capabilities import CAPABILITIES_CAUSE_MOVEMENT
+
+__all__ = ('Sequence', 'SequenceTextWrapper', 'iter_parse', 'measure_length')
+
+
+class Termcap(object):
+ """Terminal capability of given variable name and pattern."""
+
+ def __init__(self, name, pattern, attribute):
+ """
+ Class initializer.
+
+ :arg str name: name describing capability.
+ :arg str pattern: regular expression string.
+ :arg str attribute: :class:`~.Terminal` attribute used to build
+ this terminal capability.
+ """
+ self.name = name
+ self.pattern = pattern
+ self.attribute = attribute
+ self._re_compiled = None
+
+ def __repr__(self):
+ # pylint: disable=redundant-keyword-arg
+ return '<Termcap {self.name}:{self.pattern!r}>'.format(self=self)
+
+ @property
+ def named_pattern(self):
+ """Regular expression pattern for capability with named group."""
+ # pylint: disable=redundant-keyword-arg
+ return '(?P<{self.name}>{self.pattern})'.format(self=self)
+
+ @property
+ def re_compiled(self):
+ """Compiled regular expression pattern for capability."""
+ if self._re_compiled is None:
+ self._re_compiled = re.compile(self.pattern)
+ return self._re_compiled
+
+ @property
+ def will_move(self):
+ """Whether capability causes cursor movement."""
+ return self.name in CAPABILITIES_CAUSE_MOVEMENT
+
+ def horizontal_distance(self, text):
+ """
+ Horizontal carriage adjusted by capability, may be negative.
+
+ :rtype: int
+ :arg str text: for capabilities *parm_left_cursor*,
+ *parm_right_cursor*, provide the matching sequence
+ text, its interpreted distance is returned.
+
+ :returns: 0 except for matching '
+ """
+ value = {
+ 'cursor_left': -1,
+ 'backspace': -1,
+ 'cursor_right': 1,
+ 'tab': 8,
+ 'ascii_tab': 8,
+ }.get(self.name, None)
+ if value is not None:
+ return value
+
+ unit = {
+ 'parm_left_cursor': -1,
+ 'parm_right_cursor': 1
+ }.get(self.name, None)
+ if unit is not None:
+ value = int(self.re_compiled.match(text).group(1))
+ return unit * value
+
+ return 0
+
+ # pylint: disable=too-many-arguments
+ @classmethod
+ def build(cls, name, capability, attribute, nparams=0,
+ numeric=99, match_grouped=False, match_any=False,
+ match_optional=False):
+ r"""
+ Class factory builder for given capability definition.
+
+ :arg str name: Variable name given for this pattern.
+ :arg str capability: A unicode string representing a terminal
+ capability to build for. When ``nparams`` is non-zero, it
+ must be a callable unicode string (such as the result from
+ ``getattr(term, 'bold')``.
+ :arg str attribute: The terminfo(5) capability name by which this
+ pattern is known.
+ :arg int nparams: number of positional arguments for callable.
+ :arg int numeric: Value to substitute into capability to when generating pattern
+ :arg bool match_grouped: If the numeric pattern should be
+ grouped, ``(\d+)`` when ``True``, ``\d+`` default.
+ :arg bool match_any: When keyword argument ``nparams`` is given,
+ *any* numeric found in output is suitable for building as
+ pattern ``(\d+)``. Otherwise, only the first matching value of
+ range *(numeric - 1)* through *(numeric + 1)* will be replaced by
+ pattern ``(\d+)`` in builder.
+ :arg bool match_optional: When ``True``, building of numeric patterns
+ containing ``(\d+)`` will be built as optional, ``(\d+)?``.
+ :rtype: blessed.sequences.Termcap
+ :returns: Terminal capability instance for given capability definition
+ """
+ _numeric_regex = r'\d+'
+ if match_grouped:
+ _numeric_regex = r'(\d+)'
+ if match_optional:
+ _numeric_regex = r'(\d+)?'
+ numeric = 99 if numeric is None else numeric
+
+ # basic capability attribute, not used as a callable
+ if nparams == 0:
+ return cls(name, re.escape(capability), attribute)
+
+ # a callable capability accepting numeric argument
+ _outp = re.escape(capability(*(numeric,) * nparams))
+ if not match_any:
+ for num in range(numeric - 1, numeric + 2):
+ if str(num) in _outp:
+ pattern = _outp.replace(str(num), _numeric_regex)
+ return cls(name, pattern, attribute)
+
+ if match_grouped:
+ pattern = re.sub(r'(\d+)', lambda x: _numeric_regex, _outp)
+ else:
+ pattern = re.sub(r'\d+', lambda x: _numeric_regex, _outp)
+ return cls(name, pattern, attribute)
+
+
+class SequenceTextWrapper(textwrap.TextWrapper):
+ """Docstring overridden."""
+
+ def __init__(self, width, term, **kwargs):
+ """
+ Class initializer.
+
+ This class supports the :meth:`~.Terminal.wrap` method.
+ """
+ self.term = term
+ textwrap.TextWrapper.__init__(self, width, **kwargs)
+
+ def _wrap_chunks(self, chunks):
+ """
+ Sequence-aware variant of :meth:`textwrap.TextWrapper._wrap_chunks`.
+
+ :raises ValueError: ``self.width`` is not a positive integer
+ :rtype: list
+ :returns: text chunks adjusted for width
+
+ This simply ensures that word boundaries are not broken mid-sequence, as standard python
+ textwrap would incorrectly determine the length of a string containing sequences, and may
+ also break consider sequences part of a "word" that may be broken by hyphen (``-``), where
+ this implementation corrects both.
+ """
+ lines = []
+ if self.width <= 0 or not isinstance(self.width, int):
+ raise ValueError(
+ "invalid width {0!r}({1!r}) (must be integer > 0)"
+ .format(self.width, type(self.width)))
+
+ term = self.term
+ drop_whitespace = not hasattr(self, 'drop_whitespace'
+ ) or self.drop_whitespace
+ chunks.reverse()
+ while chunks:
+ cur_line = []
+ cur_len = 0
+ indent = self.subsequent_indent if lines else self.initial_indent
+ width = self.width - len(indent)
+ if drop_whitespace and (
+ Sequence(chunks[-1], term).strip() == '' and lines):
+ del chunks[-1]
+ while chunks:
+ chunk_len = Sequence(chunks[-1], term).length()
+ if cur_len + chunk_len > width:
+ break
+ cur_line.append(chunks.pop())
+ cur_len += chunk_len
+ if chunks and Sequence(chunks[-1], term).length() > width:
+ self._handle_long_word(chunks, cur_line, cur_len, width)
+ if drop_whitespace and (
+ cur_line and Sequence(cur_line[-1], term).strip() == ''):
+ del cur_line[-1]
+ if cur_line:
+ lines.append(indent + u''.join(cur_line))
+ return lines
+
+ def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
+ """
+ Sequence-aware :meth:`textwrap.TextWrapper._handle_long_word`.
+
+ This simply ensures that word boundaries are not broken mid-sequence, as standard python
+ textwrap would incorrectly determine the length of a string containing sequences, and may
+ also break consider sequences part of a "word" that may be broken by hyphen (``-``), where
+ this implementation corrects both.
+ """
+ # Figure out when indent is larger than the specified width, and make
+ # sure at least one character is stripped off on every pass
+ space_left = 1 if width < 1 else width - cur_len
+ # If we're allowed to break long words, then do so: put as much
+ # of the next chunk onto the current line as will fit.
+
+ if self.break_long_words:
+ term = self.term
+ chunk = reversed_chunks[-1]
+ idx = nxt = 0
+ for text, _ in iter_parse(term, chunk):
+ nxt += len(text)
+ if Sequence(chunk[:nxt], term).length() > space_left:
+ break
+ idx = nxt
+ cur_line.append(chunk[:idx])
+ reversed_chunks[-1] = chunk[idx:]
+
+ # Otherwise, we have to preserve the long word intact. Only add
+ # it to the current line if there's nothing already there --
+ # that minimizes how much we violate the width constraint.
+ elif not cur_line:
+ cur_line.append(reversed_chunks.pop())
+
+ # If we're not allowed to break long words, and there's already
+ # text on the current line, do nothing. Next time through the
+ # main loop of _wrap_chunks(), we'll wind up here again, but
+ # cur_len will be zero, so the next line will be entirely
+ # devoted to the long word that we can't handle right now.
+
+
+SequenceTextWrapper.__doc__ = textwrap.TextWrapper.__doc__
+
+
+class Sequence(six.text_type):
+ """
+ A "sequence-aware" version of the base :class:`str` class.
+
+ This unicode-derived class understands the effect of escape sequences
+ of printable length, allowing a properly implemented :meth:`rjust`,
+ :meth:`ljust`, :meth:`center`, and :meth:`length`.
+ """
+
+ def __new__(cls, sequence_text, term):
+ # pylint: disable = missing-return-doc, missing-return-type-doc
+ """
+ Class constructor.
+
+ :arg str sequence_text: A string that may contain sequences.
+ :arg blessed.Terminal term: :class:`~.Terminal` instance.
+ """
+ new = six.text_type.__new__(cls, sequence_text)
+ new._term = term
+ return new
+
+ def ljust(self, width, fillchar=u' '):
+ """
+ Return string containing sequences, left-adjusted.
+
+ :arg int width: Total width given to left-adjust ``text``. If
+ unspecified, the width of the attached terminal is used (default).
+ :arg str fillchar: String for padding right-of ``text``.
+ :returns: String of ``text``, left-aligned by ``width``.
+ :rtype: str
+ """
+ rightside = fillchar * int(
+ (max(0.0, float(width.__index__() - self.length()))) / float(len(fillchar)))
+ return u''.join((self, rightside))
+
+ def rjust(self, width, fillchar=u' '):
+ """
+ Return string containing sequences, right-adjusted.
+
+ :arg int width: Total width given to right-adjust ``text``. If
+ unspecified, the width of the attached terminal is used (default).
+ :arg str fillchar: String for padding left-of ``text``.
+ :returns: String of ``text``, right-aligned by ``width``.
+ :rtype: str
+ """
+ leftside = fillchar * int(
+ (max(0.0, float(width.__index__() - self.length()))) / float(len(fillchar)))
+ return u''.join((leftside, self))
+
+ def center(self, width, fillchar=u' '):
+ """
+ Return string containing sequences, centered.
+
+ :arg int width: Total width given to center ``text``. If
+ unspecified, the width of the attached terminal is used (default).
+ :arg str fillchar: String for padding left and right-of ``text``.
+ :returns: String of ``text``, centered by ``width``.
+ :rtype: str
+ """
+ split = max(0.0, float(width.__index__()) - self.length()) / 2
+ leftside = fillchar * int(
+ (max(0.0, math.floor(split))) / float(len(fillchar)))
+ rightside = fillchar * int(
+ (max(0.0, math.ceil(split))) / float(len(fillchar)))
+ return u''.join((leftside, self, rightside))
+
+ def truncate(self, width):
+ """
+ Truncate a string in a sequence-aware manner.
+
+ Any printable characters beyond ``width`` are removed, while all
+ sequences remain in place. Horizontal Sequences are first expanded
+ by :meth:`padd`.
+
+ :arg int width: The printable width to truncate the string to.
+ :rtype: str
+ :returns: String truncated to at most ``width`` printable characters.
+ """
+ output = ""
+ current_width = 0
+ target_width = width.__index__()
+ parsed_seq = iter_parse(self._term, self.padd())
+
+ # Retain all text until non-cap width reaches desired width
+ for text, cap in parsed_seq:
+ if not cap:
+ # use wcwidth clipped to 0 because it can sometimes return -1
+ current_width += max(wcwidth(text), 0)
+ if current_width > target_width:
+ break
+ output += text
+
+ # Return with remaining caps appended
+ return output + ''.join(text for text, cap in parsed_seq if cap)
+
+ def length(self):
+ r"""
+ Return the printable length of string containing sequences.
+
+ Strings containing ``term.left`` or ``\b`` will cause "overstrike",
+ but a length less than 0 is not ever returned. So ``_\b+`` is a
+ length of 1 (displays as ``+``), but ``\b`` alone is simply a
+ length of 0.
+
+ Some characters may consume more than one cell, mainly those CJK
+ Unified Ideographs (Chinese, Japanese, Korean) defined by Unicode
+ as half or full-width characters.
+
+ For example:
+
+ >>> from blessed import Terminal
+ >>> from blessed.sequences import Sequence
+ >>> term = Terminal()
+ >>> msg = term.clear + term.red(u'コンニチハ')
+ >>> Sequence(msg, term).length()
+ 10
+
+ .. note:: Although accounted for, strings containing sequences such
+ as ``term.clear`` will not give accurate returns, it is not
+ considered lengthy (a length of 0).
+ """
+ # because control characters may return -1, "clip" their length to 0.
+ return sum(max(wcwidth(w_char), 0) for w_char in self.padd(strip=True))
+
+ def strip(self, chars=None):
+ """
+ Return string of sequences, leading and trailing whitespace removed.
+
+ :arg str chars: Remove characters in chars instead of whitespace.
+ :rtype: str
+ :returns: string of sequences with leading and trailing whitespace removed.
+ """
+ return self.strip_seqs().strip(chars)
+
+ def lstrip(self, chars=None):
+ """
+ Return string of all sequences and leading whitespace removed.
+
+ :arg str chars: Remove characters in chars instead of whitespace.
+ :rtype: str
+ :returns: string of sequences with leading removed.
+ """
+ return self.strip_seqs().lstrip(chars)
+
+ def rstrip(self, chars=None):
+ """
+ Return string of all sequences and trailing whitespace removed.
+
+ :arg str chars: Remove characters in chars instead of whitespace.
+ :rtype: str
+ :returns: string of sequences with trailing removed.
+ """
+ return self.strip_seqs().rstrip(chars)
+
+ def strip_seqs(self):
+ """
+ Return ``text`` stripped of only its terminal sequences.
+
+ :rtype: str
+ :returns: Text with terminal sequences removed
+ """
+ return self.padd(strip=True)
+
+ def padd(self, strip=False):
+ """
+ Return non-destructive horizontal movement as destructive spacing.
+
+ :arg bool strip: Strip terminal sequences
+ :rtype: str
+ :returns: Text adjusted for horizontal movement
+ """
+ outp = ''
+ for text, cap in iter_parse(self._term, self):
+ if not cap:
+ outp += text
+ continue
+
+ value = cap.horizontal_distance(text)
+ if value > 0:
+ outp += ' ' * value
+ elif value < 0:
+ outp = outp[:value]
+ elif not strip:
+ outp += text
+ return outp
+
+
+def iter_parse(term, text):
+ """
+ Generator yields (text, capability) for characters of ``text``.
+
+ value for ``capability`` may be ``None``, where ``text`` is
+ :class:`str` of length 1. Otherwise, ``text`` is a full
+ matching sequence of given capability.
+ """
+ for match in term._caps_compiled_any.finditer(text): # pylint: disable=protected-access
+ name = match.lastgroup
+ value = match.group(name)
+ if name == 'MISMATCH':
+ yield (value, None)
+ else:
+ yield value, term.caps[name]
+
+
+def measure_length(text, term):
+ """
+ .. deprecated:: 1.12.0.
+
+ :rtype: int
+ :returns: Length of the first sequence in the string
+ """
+ try:
+ text, capability = next(iter_parse(term, text))
+ if capability:
+ return len(text)
+ except StopIteration:
+ return 0
+ return 0
diff --git a/third_party/python/blessed/blessed/sequences.pyi b/third_party/python/blessed/blessed/sequences.pyi
new file mode 100644
index 0000000000..4460b7a466
--- /dev/null
+++ b/third_party/python/blessed/blessed/sequences.pyi
@@ -0,0 +1,55 @@
+# std imports
+import textwrap
+from typing import Any, Type, Tuple, Pattern, TypeVar, Iterator, Optional, SupportsIndex
+
+# local
+from .terminal import Terminal
+
+_T = TypeVar("_T")
+
+class Termcap:
+ name: str = ...
+ pattern: str = ...
+ attribute: str = ...
+ def __init__(self, name: str, pattern: str, attribute: str) -> None: ...
+ @property
+ def named_pattern(self) -> str: ...
+ @property
+ def re_compiled(self) -> Pattern[str]: ...
+ @property
+ def will_move(self) -> bool: ...
+ def horizontal_distance(self, text: str) -> int: ...
+ @classmethod
+ def build(
+ cls,
+ name: str,
+ capability: str,
+ attribute: str,
+ nparams: int = ...,
+ numeric: int = ...,
+ match_grouped: bool = ...,
+ match_any: bool = ...,
+ match_optional: bool = ...,
+ ) -> "Termcap": ...
+
+class SequenceTextWrapper(textwrap.TextWrapper):
+ term: Terminal = ...
+ def __init__(self, width: int, term: Terminal, **kwargs: Any) -> None: ...
+
+class Sequence(str):
+ def __new__(cls: Type[_T], sequence_text: str, term: Terminal) -> _T: ...
+ def ljust(self, width: SupportsIndex, fillchar: str = ...) -> str: ...
+ def rjust(self, width: SupportsIndex, fillchar: str = ...) -> str: ...
+ def center(self, width: SupportsIndex, fillchar: str = ...) -> str: ...
+ def truncate(self, width: SupportsIndex) -> str: ...
+ def length(self) -> int: ...
+ def strip(self, chars: Optional[str] = ...) -> str: ...
+ def lstrip(self, chars: Optional[str] = ...) -> str: ...
+ def rstrip(self, chars: Optional[str] = ...) -> str: ...
+ def strip_seqs(self) -> str: ...
+ def padd(self, strip: bool = ...) -> str: ...
+
+def iter_parse(
+ term: Terminal, text: str
+) -> Iterator[Tuple[str, Optional[Termcap]]]: ...
+def measure_length(text: str, term: Terminal) -> int: ...
diff --git a/third_party/python/blessed/blessed/terminal.py b/third_party/python/blessed/blessed/terminal.py
new file mode 100644
index 0000000000..38bd2bb66b
--- /dev/null
+++ b/third_party/python/blessed/blessed/terminal.py
@@ -0,0 +1,1502 @@
+# -*- coding: utf-8 -*-
+# pylint: disable=too-many-lines
+"""Module containing :class:`Terminal`, the primary API entry point."""
+# std imports
+import os
+import re
+import sys
+import time
+import codecs
+import locale
+import select
+import struct
+import platform
+import warnings
+import functools
+import contextlib
+import collections
+
+# local
+from .color import COLOR_DISTANCE_ALGORITHMS
+from .keyboard import (_time_left,
+ _read_until,
+ resolve_sequence,
+ get_keyboard_codes,
+ get_leading_prefixes,
+ get_keyboard_sequences)
+from .sequences import Termcap, Sequence, SequenceTextWrapper
+from .colorspace import RGB_256TABLE
+from .formatters import (COLORS,
+ COMPOUNDABLES,
+ FormattingString,
+ NullCallableString,
+ ParameterizingString,
+ FormattingOtherString,
+ split_compound,
+ resolve_attribute,
+ resolve_capability)
+from ._capabilities import CAPABILITY_DATABASE, CAPABILITIES_ADDITIVES, CAPABILITIES_RAW_MIXIN
+
+# isort: off
+
+try:
+ InterruptedError
+except NameError:
+ # alias py2 exception to py3
+ # pylint: disable=redefined-builtin
+ InterruptedError = select.error
+
+
+HAS_TTY = True
+if platform.system() == 'Windows':
+ IS_WINDOWS = True
+ import jinxed as curses # pylint: disable=import-error
+ from jinxed.win32 import get_console_input_encoding # pylint: disable=import-error
+else:
+ IS_WINDOWS = False
+ import curses
+
+ try:
+ import fcntl
+ import termios
+ import tty
+ except ImportError:
+ _TTY_METHODS = ('setraw', 'cbreak', 'kbhit', 'height', 'width')
+ _MSG_NOSUPPORT = (
+ "One or more of the modules: 'termios', 'fcntl', and 'tty' "
+ "are not found on your platform '{platform}'. "
+ "The following methods of Terminal are dummy/no-op "
+ "unless a deriving class overrides them: {tty_methods}."
+ .format(platform=platform.system(),
+ tty_methods=', '.join(_TTY_METHODS)))
+ warnings.warn(_MSG_NOSUPPORT)
+ HAS_TTY = False
+
+_CUR_TERM = None # See comments at end of file
+
+
+class Terminal(object):
+ """
+ An abstraction for color, style, positioning, and input in the terminal.
+
+ This keeps the endless calls to ``tigetstr()`` and ``tparm()`` out of your code, acts
+ intelligently when somebody pipes your output to a non-terminal, and abstracts over the
+ complexity of unbuffered keyboard input. It uses the terminfo database to remain portable across
+ terminal types.
+ """
+ # pylint: disable=too-many-instance-attributes,too-many-public-methods
+ # Too many public methods (28/20)
+ # Too many instance attributes (12/7)
+
+ #: Sugary names for commonly-used capabilities
+ _sugar = dict(
+ save='sc',
+ restore='rc',
+ clear_eol='el',
+ clear_bol='el1',
+ clear_eos='ed',
+ enter_fullscreen='smcup',
+ exit_fullscreen='rmcup',
+ move='cup',
+ move_yx='cup',
+ move_x='hpa',
+ move_y='vpa',
+ hide_cursor='civis',
+ normal_cursor='cnorm',
+ reset_colors='op',
+ normal='sgr0',
+ reverse='rev',
+ italic='sitm',
+ no_italic='ritm',
+ shadow='sshm',
+ no_shadow='rshm',
+ standout='smso',
+ no_standout='rmso',
+ subscript='ssubm',
+ no_subscript='rsubm',
+ superscript='ssupm',
+ no_superscript='rsupm',
+ underline='smul',
+ no_underline='rmul',
+ cursor_report='u6',
+ cursor_request='u7',
+ terminal_answerback='u8',
+ terminal_enquire='u9',
+ )
+
+ def __init__(self, kind=None, stream=None, force_styling=False):
+ """
+ Initialize the terminal.
+
+ :arg str kind: A terminal string as taken by :func:`curses.setupterm`.
+ Defaults to the value of the ``TERM`` environment variable.
+
+ .. note:: Terminals withing a single process must share a common
+ ``kind``. See :obj:`_CUR_TERM`.
+
+ :arg file stream: A file-like object representing the Terminal output.
+ Defaults to the original value of :obj:`sys.__stdout__`, like
+ :func:`curses.initscr` does.
+
+ If ``stream`` is not a tty, empty Unicode strings are returned for
+ all capability values, so things like piping your program output to
+ a pipe or file does not emit terminal sequences.
+
+ :arg bool force_styling: Whether to force the emission of capabilities
+ even if :obj:`sys.__stdout__` does not seem to be connected to a
+ terminal. If you want to force styling to not happen, use
+ ``force_styling=None``.
+
+ This comes in handy if users are trying to pipe your output through
+ something like ``less -r`` or build systems which support decoding
+ of terminal sequences.
+ """
+ # pylint: disable=global-statement,too-many-branches
+ global _CUR_TERM
+ self.errors = ['parameters: kind=%r, stream=%r, force_styling=%r' %
+ (kind, stream, force_styling)]
+ self._normal = None # cache normal attr, preventing recursive lookups
+ # we assume our input stream to be line-buffered until either the
+ # cbreak of raw context manager methods are entered with an attached tty.
+ self._line_buffered = True
+
+ self._stream = stream
+ self._keyboard_fd = None
+ self._init_descriptor = None
+ self._is_a_tty = False
+ self.__init__streams()
+
+ if IS_WINDOWS and self._init_descriptor is not None:
+ self._kind = kind or curses.get_term(self._init_descriptor)
+ else:
+ self._kind = kind or os.environ.get('TERM', 'dumb') or 'dumb'
+
+ self._does_styling = False
+ if force_styling is None and self.is_a_tty:
+ self.errors.append('force_styling is None')
+ elif force_styling or self.is_a_tty:
+ self._does_styling = True
+
+ if self.does_styling:
+ # Initialize curses (call setupterm), so things like tigetstr() work.
+ try:
+ curses.setupterm(self._kind, self._init_descriptor)
+ except curses.error as err:
+ msg = 'Failed to setupterm(kind={0!r}): {1}'.format(self._kind, err)
+ warnings.warn(msg)
+ self.errors.append(msg)
+ self._kind = None
+ self._does_styling = False
+ else:
+ if _CUR_TERM is None or self._kind == _CUR_TERM:
+ _CUR_TERM = self._kind
+ else:
+ # termcap 'kind' is immutable in a python process! Once
+ # initialized by setupterm, it is unsupported by the
+ # 'curses' module to change the terminal type again. If you
+ # are a downstream developer and you need this
+ # functionality, consider sub-processing, instead.
+ warnings.warn(
+ 'A terminal of kind "%s" has been requested; due to an'
+ ' internal python curses bug, terminal capabilities'
+ ' for a terminal of kind "%s" will continue to be'
+ ' returned for the remainder of this process.' % (
+ self._kind, _CUR_TERM,))
+
+ self.__init__color_capabilities()
+ self.__init__capabilities()
+ self.__init__keycodes()
+
+ def __init__streams(self):
+ # pylint: disable=too-complex,too-many-branches
+ # Agree to disagree !
+ stream_fd = None
+
+ # Default stream is stdout
+ if self._stream is None:
+ self._stream = sys.__stdout__
+
+ if not hasattr(self._stream, 'fileno'):
+ self.errors.append('stream has no fileno method')
+ elif not callable(self._stream.fileno):
+ self.errors.append('stream.fileno is not callable')
+ else:
+ try:
+ stream_fd = self._stream.fileno()
+ except ValueError as err:
+ # The stream is not a file, such as the case of StringIO, or, when it has been
+ # "detached", such as might be the case of stdout in some test scenarios.
+ self.errors.append('Unable to determine output stream file descriptor: %s' % err)
+ else:
+ self._is_a_tty = os.isatty(stream_fd)
+ if not self._is_a_tty:
+ self.errors.append('stream not a TTY')
+
+ # Keyboard valid as stdin only when output stream is stdout or stderr and is a tty.
+ if self._stream in (sys.__stdout__, sys.__stderr__):
+ try:
+ self._keyboard_fd = sys.__stdin__.fileno()
+ except (AttributeError, ValueError) as err:
+ self.errors.append('Unable to determine input stream file descriptor: %s' % err)
+ else:
+ # _keyboard_fd only non-None if both stdin and stdout is a tty.
+ if not self.is_a_tty:
+ self.errors.append('Output stream is not a TTY')
+ self._keyboard_fd = None
+ elif not os.isatty(self._keyboard_fd):
+ self.errors.append('Input stream is not a TTY')
+ self._keyboard_fd = None
+ else:
+ self.errors.append('Output stream is not a default stream')
+
+ # The descriptor to direct terminal initialization sequences to.
+ self._init_descriptor = stream_fd
+ if stream_fd is None:
+ try:
+ self._init_descriptor = sys.__stdout__.fileno()
+ except ValueError as err:
+ self.errors.append('Unable to determine __stdout__ file descriptor: %s' % err)
+
+ def __init__color_capabilities(self):
+ self._color_distance_algorithm = 'cie2000'
+ if not self.does_styling:
+ self.number_of_colors = 0
+ elif IS_WINDOWS or os.environ.get('COLORTERM') in ('truecolor', '24bit'):
+ self.number_of_colors = 1 << 24
+ else:
+ self.number_of_colors = max(0, curses.tigetnum('colors') or -1)
+
+ def __clear_color_capabilities(self):
+ for cached_color_cap in set(dir(self)) & COLORS:
+ delattr(self, cached_color_cap)
+
+ def __init__capabilities(self):
+ # important that we lay these in their ordered direction, so that our
+ # preferred, 'color' over 'set_a_attributes1', for example.
+ self.caps = collections.OrderedDict()
+
+ # some static injected patterns, esp. without named attribute access.
+ for name, (attribute, pattern) in CAPABILITIES_ADDITIVES.items():
+ self.caps[name] = Termcap(name, pattern, attribute)
+
+ for name, (attribute, kwds) in CAPABILITY_DATABASE.items():
+ if self.does_styling:
+ # attempt dynamic lookup
+ cap = getattr(self, attribute)
+ if cap:
+ self.caps[name] = Termcap.build(
+ name, cap, attribute, **kwds)
+ continue
+
+ # fall-back
+ pattern = CAPABILITIES_RAW_MIXIN.get(name)
+ if pattern:
+ self.caps[name] = Termcap(name, pattern, attribute)
+
+ # make a compiled named regular expression table
+ self.caps_compiled = re.compile(
+ '|'.join(cap.pattern for name, cap in self.caps.items()))
+
+ # for tokenizer, the '.lastgroup' is the primary lookup key for
+ # 'self.caps', unless 'MISMATCH'; then it is an unmatched character.
+ self._caps_compiled_any = re.compile('|'.join(
+ cap.named_pattern for name, cap in self.caps.items()
+ ) + '|(?P<MISMATCH>.)')
+ self._caps_unnamed_any = re.compile('|'.join(
+ '({0})'.format(cap.pattern) for name, cap in self.caps.items()
+ ) + '|(.)')
+
+ def __init__keycodes(self):
+ # Initialize keyboard data determined by capability.
+ # Build database of int code <=> KEY_NAME.
+ self._keycodes = get_keyboard_codes()
+
+ # Store attributes as: self.KEY_NAME = code.
+ for key_code, key_name in self._keycodes.items():
+ setattr(self, key_name, key_code)
+
+ # Build database of sequence <=> KEY_NAME.
+ self._keymap = get_keyboard_sequences(self)
+
+ # build set of prefixes of sequences
+ self._keymap_prefixes = get_leading_prefixes(self._keymap)
+
+ # keyboard stream buffer
+ self._keyboard_buf = collections.deque()
+
+ if self._keyboard_fd is not None:
+ # set input encoding and initialize incremental decoder
+
+ if IS_WINDOWS:
+ self._encoding = get_console_input_encoding() \
+ or locale.getpreferredencoding() or 'UTF-8'
+ else:
+ self._encoding = locale.getpreferredencoding() or 'UTF-8'
+
+ try:
+ self._keyboard_decoder = codecs.getincrementaldecoder(self._encoding)()
+ except LookupError as err:
+ # encoding is illegal or unsupported, use 'UTF-8'
+ warnings.warn('LookupError: {0}, defaulting to UTF-8 for keyboard.'.format(err))
+ self._encoding = 'UTF-8'
+ self._keyboard_decoder = codecs.getincrementaldecoder(self._encoding)()
+
+ def __getattr__(self, attr):
+ r"""
+ Return a terminal capability as Unicode string.
+
+ For example, ``term.bold`` is a unicode string that may be prepended
+ to text to set the video attribute for bold, which should also be
+ terminated with the pairing :attr:`normal`. This capability
+ returns a callable, so you can use ``term.bold("hi")`` which
+ results in the joining of ``(term.bold, "hi", term.normal)``.
+
+ Compound formatters may also be used. For example::
+
+ >>> term.bold_blink_red_on_green("merry x-mas!")
+
+ For a parameterized capability such as ``move`` (or ``cup``), pass the
+ parameters as positional arguments::
+
+ >>> term.move(line, column)
+
+ See the manual page `terminfo(5)
+ <https://invisible-island.net/ncurses/man/terminfo.5.html>`_ for a
+ complete list of capabilities and their arguments.
+ """
+ if not self._does_styling:
+ return NullCallableString()
+ # Fetch the missing 'attribute' into some kind of curses-resolved
+ # capability, and cache by attaching to this Terminal class instance.
+ #
+ # Note that this will prevent future calls to __getattr__(), but
+ # that's precisely the idea of the cache!
+ val = resolve_attribute(self, attr)
+ setattr(self, attr, val)
+ return val
+
+ @property
+ def kind(self):
+ """
+ Read-only property: Terminal kind determined on class initialization.
+
+ :rtype: str
+ """
+ return self._kind
+
+ @property
+ def does_styling(self):
+ """
+ Read-only property: Whether this class instance may emit sequences.
+
+ :rtype: bool
+ """
+ return self._does_styling
+
+ @property
+ def is_a_tty(self):
+ """
+ Read-only property: Whether :attr:`~.stream` is a terminal.
+
+ :rtype: bool
+ """
+ return self._is_a_tty
+
+ @property
+ def height(self):
+ """
+ Read-only property: Height of the terminal (in number of lines).
+
+ :rtype: int
+ """
+ return self._height_and_width().ws_row
+
+ @property
+ def width(self):
+ """
+ Read-only property: Width of the terminal (in number of columns).
+
+ :rtype: int
+ """
+ return self._height_and_width().ws_col
+
+ @property
+ def pixel_height(self):
+ """
+ Read-only property: Height ofthe terminal (in pixels).
+
+ :rtype: int
+ """
+ return self._height_and_width().ws_ypixel
+
+ @property
+ def pixel_width(self):
+ """
+ Read-only property: Width of terminal (in pixels).
+
+ :rtype: int
+ """
+ return self._height_and_width().ws_xpixel
+
+ @staticmethod
+ def _winsize(fd):
+ """
+ Return named tuple describing size of the terminal by ``fd``.
+
+ If the given platform does not have modules :mod:`termios`,
+ :mod:`fcntl`, or :mod:`tty`, window size of 80 columns by 25
+ rows is always returned.
+
+ :arg int fd: file descriptor queries for its window size.
+ :raises IOError: the file descriptor ``fd`` is not a terminal.
+ :rtype: WINSZ
+ :returns: named tuple describing size of the terminal
+
+ WINSZ is a :class:`collections.namedtuple` instance, whose structure
+ directly maps to the return value of the :const:`termios.TIOCGWINSZ`
+ ioctl return value. The return parameters are:
+
+ - ``ws_row``: width of terminal by its number of character cells.
+ - ``ws_col``: height of terminal by its number of character cells.
+ - ``ws_xpixel``: width of terminal by pixels (not accurate).
+ - ``ws_ypixel``: height of terminal by pixels (not accurate).
+ """
+ if HAS_TTY:
+ # pylint: disable=protected-access
+ data = fcntl.ioctl(fd, termios.TIOCGWINSZ, WINSZ._BUF)
+ return WINSZ(*struct.unpack(WINSZ._FMT, data))
+ return WINSZ(ws_row=25, ws_col=80, ws_xpixel=0, ws_ypixel=0)
+
+ def _height_and_width(self):
+ """
+ Return a tuple of (terminal height, terminal width).
+
+ If :attr:`stream` or :obj:`sys.__stdout__` is not a tty or does not
+ support :func:`fcntl.ioctl` of :const:`termios.TIOCGWINSZ`, a window
+ size of 80 columns by 25 rows is returned for any values not
+ represented by environment variables ``LINES`` and ``COLUMNS``, which
+ is the default text mode of IBM PC compatibles.
+
+ :rtype: WINSZ
+ :returns: Named tuple specifying the terminal size
+
+ WINSZ is a :class:`collections.namedtuple` instance, whose structure
+ directly maps to the return value of the :const:`termios.TIOCGWINSZ`
+ ioctl return value. The return parameters are:
+
+ - ``ws_row``: height of terminal by its number of cell rows.
+ - ``ws_col``: width of terminal by its number of cell columns.
+ - ``ws_xpixel``: width of terminal by pixels (not accurate).
+ - ``ws_ypixel``: height of terminal by pixels (not accurate).
+
+ .. note:: the peculiar (height, width, width, height) order, which
+ matches the return order of TIOCGWINSZ!
+ """
+ for fd in (self._init_descriptor, sys.__stdout__):
+ try:
+ if fd is not None:
+ return self._winsize(fd)
+ except (IOError, OSError, ValueError, TypeError): # pylint: disable=overlapping-except
+ pass
+
+ return WINSZ(ws_row=int(os.getenv('LINES', '25')),
+ ws_col=int(os.getenv('COLUMNS', '80')),
+ ws_xpixel=None,
+ ws_ypixel=None)
+
+ @contextlib.contextmanager
+ def location(self, x=None, y=None):
+ """
+ Context manager for temporarily moving the cursor.
+
+ :arg int x: horizontal position, from left, *0*, to right edge of screen, *self.width - 1*.
+ :arg int y: vertical position, from top, *0*, to bottom of screen, *self.height - 1*.
+ :return: a context manager.
+ :rtype: Iterator
+
+ Move the cursor to a certain position on entry, do any kind of I/O, and upon exit
+ let you print stuff there, then return the cursor to its original position:
+
+
+ .. code-block:: python
+
+ term = Terminal()
+ with term.location(y=0, x=0):
+ for row_num in range(term.height-1):
+ print('Row #{row_num}')
+ print(term.clear_eol + 'Back to original location.')
+
+ Specify ``x`` to move to a certain column, ``y`` to move to a certain
+ row, both, or neither. If you specify neither, only the saving and
+ restoration of cursor position will happen. This can be useful if you
+ simply want to restore your place after doing some manual cursor
+ movement.
+
+ Calls cannot be nested: only one should be entered at a time.
+
+ .. note:: The argument order *(x, y)* differs from the return value order *(y, x)*
+ of :meth:`get_location`, or argument order *(y, x)* of :meth:`move`. This is
+ for API Compaibility with the blessings library, sorry for the trouble!
+ """
+ # pylint: disable=invalid-name
+ # Invalid argument name "x"
+
+ # Save position and move to the requested column, row, or both:
+ self.stream.write(self.save)
+ if x is not None and y is not None:
+ self.stream.write(self.move(y, x))
+ elif x is not None:
+ self.stream.write(self.move_x(x))
+ elif y is not None:
+ self.stream.write(self.move_y(y))
+ try:
+ self.stream.flush()
+ yield
+ finally:
+ # Restore original cursor position:
+ self.stream.write(self.restore)
+ self.stream.flush()
+
+ def get_location(self, timeout=None):
+ r"""
+ Return tuple (row, column) of cursor position.
+
+ :arg float timeout: Return after time elapsed in seconds with value ``(-1, -1)`` indicating
+ that the remote end did not respond.
+ :rtype: tuple
+ :returns: cursor position as tuple in form of ``(y, x)``. When a timeout is specified,
+ always ensure the return value is checked for ``(-1, -1)``.
+
+ The location of the cursor is determined by emitting the ``u7`` terminal capability, or
+ VT100 `Query Cursor Position
+ <https://www2.ccs.neu.edu/research/gpc/VonaUtils/vona/terminal/vtansi.htm#status>`_
+ when such capability is undefined, which elicits a response from a reply string described by
+ capability ``u6``, or again VT100's definition of ``\x1b[%i%d;%dR`` when undefined.
+
+ The ``(y, x)`` return value matches the parameter order of the :meth:`move_xy` capability.
+ The following sequence should cause the cursor to not move at all::
+
+ >>> term = Terminal()
+ >>> term.move_yx(*term.get_location()))
+
+ And the following should assert True with a terminal:
+
+ >>> term = Terminal()
+ >>> given_y, given_x = 10, 20
+ >>> with term.location(y=given_y, x=given_x):
+ ... result_y, result_x = term.get_location()
+ ...
+ >>> assert given_x == result_x, (given_x, result_x)
+ >>> assert given_y == result_y, (given_y, result_y)
+
+ """
+ # Local lines attached by termios and remote login protocols such as
+ # ssh and telnet both provide a means to determine the window
+ # dimensions of a connected client, but **no means to determine the
+ # location of the cursor**.
+ #
+ # from https://invisible-island.net/ncurses/terminfo.src.html,
+ #
+ # > The System V Release 4 and XPG4 terminfo format defines ten string
+ # > capabilities for use by applications, <u0>...<u9>. In this file,
+ # > we use certain of these capabilities to describe functions which
+ # > are not covered by terminfo. The mapping is as follows:
+ # >
+ # > u9 terminal enquire string (equiv. to ANSI/ECMA-48 DA)
+ # > u8 terminal answerback description
+ # > u7 cursor position request (equiv. to VT100/ANSI/ECMA-48 DSR 6)
+ # > u6 cursor position report (equiv. to ANSI/ECMA-48 CPR)
+ query_str = self.u7 or u'\x1b[6n'
+ response_str = getattr(self, self.caps['cursor_report'].attribute) or u'\x1b[%i%d;%dR'
+
+ # determine response format as a regular expression
+ response_re = self.caps['cursor_report'].re_compiled
+
+ # Avoid changing user's desired raw or cbreak mode if already entered,
+ # by entering cbreak mode ourselves. This is necessary to receive user
+ # input without awaiting a human to press the return key. This mode
+ # also disables echo, which we should also hide, as our input is an
+ # sequence that is not meaningful for display as an output sequence.
+
+ ctx = None
+ try:
+ if self._line_buffered:
+ ctx = self.cbreak()
+ ctx.__enter__() # pylint: disable=no-member
+
+ # emit the 'query cursor position' sequence,
+ self.stream.write(query_str)
+ self.stream.flush()
+
+ # expect a response,
+ match, data = _read_until(term=self,
+ pattern=response_re,
+ timeout=timeout)
+
+ # ensure response sequence is excluded from subsequent input,
+ if match:
+ data = (data[:match.start()] + data[match.end():])
+
+ # re-buffer keyboard data, if any
+ self.ungetch(data)
+
+ if match:
+ # return matching sequence response, the cursor location.
+ row, col = (int(val) for val in match.groups())
+
+ # Per https://invisible-island.net/ncurses/terminfo.src.html
+ # The cursor position report (<u6>) string must contain two
+ # scanf(3)-style %d format elements. The first of these must
+ # correspond to the Y coordinate and the second to the %d.
+ # If the string contains the sequence %i, it is taken as an
+ # instruction to decrement each value after reading it (this is
+ # the inverse sense from the cup string).
+ if u'%i' in response_str:
+ row -= 1
+ col -= 1
+ return row, col
+
+ finally:
+ if ctx is not None:
+ ctx.__exit__(None, None, None) # pylint: disable=no-member
+
+ # We chose to return an illegal value rather than an exception,
+ # favoring that users author function filters, such as max(0, y),
+ # rather than crowbarring such logic into an exception handler.
+ return -1, -1
+
+ @contextlib.contextmanager
+ def fullscreen(self):
+ """
+ Context manager that switches to secondary screen, restoring on exit.
+
+ Under the hood, this switches between the primary screen buffer and
+ the secondary one. The primary one is saved on entry and restored on
+ exit. Likewise, the secondary contents are also stable and are
+ faithfully restored on the next entry::
+
+ with term.fullscreen():
+ main()
+
+ .. note:: There is only one primary and one secondary screen buffer.
+ :meth:`fullscreen` calls cannot be nested, only one should be
+ entered at a time.
+ """
+ self.stream.write(self.enter_fullscreen)
+ self.stream.flush()
+ try:
+ yield
+ finally:
+ self.stream.write(self.exit_fullscreen)
+ self.stream.flush()
+
+ @contextlib.contextmanager
+ def hidden_cursor(self):
+ """
+ Context manager that hides the cursor, setting visibility on exit.
+
+ with term.hidden_cursor():
+ main()
+
+ .. note:: :meth:`hidden_cursor` calls cannot be nested: only one
+ should be entered at a time.
+ """
+ self.stream.write(self.hide_cursor)
+ self.stream.flush()
+ try:
+ yield
+ finally:
+ self.stream.write(self.normal_cursor)
+ self.stream.flush()
+
+ def move_xy(self, x, y):
+ """
+ A callable string that moves the cursor to the given ``(x, y)`` screen coordinates.
+
+ :arg int x: horizontal position, from left, *0*, to right edge of screen, *self.width - 1*.
+ :arg int y: vertical position, from top, *0*, to bottom of screen, *self.height - 1*.
+ :rtype: ParameterizingString
+ :returns: Callable string that moves the cursor to the given coordinates
+ """
+ # this is just a convenience alias to the built-in, but hidden 'move'
+ # attribute -- we encourage folks to use only (x, y) positional
+ # arguments, or, if they must use (y, x), then use the 'move_yx'
+ # alias.
+ return self.move(y, x)
+
+ def move_yx(self, y, x):
+ """
+ A callable string that moves the cursor to the given ``(y, x)`` screen coordinates.
+
+ :arg int y: vertical position, from top, *0*, to bottom of screen, *self.height - 1*.
+ :arg int x: horizontal position, from left, *0*, to right edge of screen, *self.width - 1*.
+ :rtype: ParameterizingString
+ :returns: Callable string that moves the cursor to the given coordinates
+ """
+ return self.move(y, x)
+
+ @property
+ def move_left(self):
+ """Move cursor 1 cells to the left, or callable string for n>1 cells."""
+ return FormattingOtherString(self.cub1, ParameterizingString(self.cub))
+
+ @property
+ def move_right(self):
+ """Move cursor 1 or more cells to the right, or callable string for n>1 cells."""
+ return FormattingOtherString(self.cuf1, ParameterizingString(self.cuf))
+
+ @property
+ def move_up(self):
+ """Move cursor 1 or more cells upwards, or callable string for n>1 cells."""
+ return FormattingOtherString(self.cuu1, ParameterizingString(self.cuu))
+
+ @property
+ def move_down(self):
+ """Move cursor 1 or more cells downwards, or callable string for n>1 cells."""
+ return FormattingOtherString(self.cud1, ParameterizingString(self.cud))
+
+ @property
+ def color(self):
+ """
+ A callable string that sets the foreground color.
+
+ :rtype: ParameterizingString
+
+ The capability is unparameterized until called and passed a number, at which point it
+ returns another string which represents a specific color change. This second string can
+ further be called to color a piece of text and set everything back to normal afterward.
+
+ This should not be used directly, but rather a specific color by name or
+ :meth:`~.Terminal.color_rgb` value.
+ """
+ if not self.does_styling:
+ return NullCallableString()
+ return ParameterizingString(self._foreground_color,
+ self.normal, 'color')
+
+ def color_rgb(self, red, green, blue):
+ """
+ Provides callable formatting string to set foreground color to the specified RGB color.
+
+ :arg int red: RGB value of Red.
+ :arg int green: RGB value of Green.
+ :arg int blue: RGB value of Blue.
+ :rtype: FormattingString
+ :returns: Callable string that sets the foreground color
+
+ If the terminal does not support RGB color, the nearest supported
+ color will be determined using :py:attr:`color_distance_algorithm`.
+ """
+ if self.number_of_colors == 1 << 24:
+ # "truecolor" 24-bit
+ fmt_attr = u'\x1b[38;2;{0};{1};{2}m'.format(red, green, blue)
+ return FormattingString(fmt_attr, self.normal)
+
+ # color by approximation to 256 or 16-color terminals
+ color_idx = self.rgb_downconvert(red, green, blue)
+ return FormattingString(self._foreground_color(color_idx), self.normal)
+
+ @property
+ def on_color(self):
+ """
+ A callable capability that sets the background color.
+
+ :rtype: ParameterizingString
+ """
+ if not self.does_styling:
+ return NullCallableString()
+ return ParameterizingString(self._background_color,
+ self.normal, 'on_color')
+
+ def on_color_rgb(self, red, green, blue):
+ """
+ Provides callable formatting string to set background color to the specified RGB color.
+
+ :arg int red: RGB value of Red.
+ :arg int green: RGB value of Green.
+ :arg int blue: RGB value of Blue.
+ :rtype: FormattingString
+ :returns: Callable string that sets the foreground color
+
+ If the terminal does not support RGB color, the nearest supported
+ color will be determined using :py:attr:`color_distance_algorithm`.
+ """
+ if self.number_of_colors == 1 << 24:
+ fmt_attr = u'\x1b[48;2;{0};{1};{2}m'.format(red, green, blue)
+ return FormattingString(fmt_attr, self.normal)
+
+ color_idx = self.rgb_downconvert(red, green, blue)
+ return FormattingString(self._background_color(color_idx), self.normal)
+
+ def formatter(self, value):
+ """
+ Provides callable formatting string to set color and other text formatting options.
+
+ :arg str value: Sugary, ordinary, or compound formatted terminal capability,
+ such as "red_on_white", "normal", "red", or "bold_on_black".
+ :rtype: :class:`FormattingString` or :class:`NullCallableString`
+ :returns: Callable string that sets color and other text formatting options
+
+ Calling ``term.formatter('bold_on_red')`` is equivalent to ``term.bold_on_red``, but a
+ string that is not a valid text formatter will return a :class:`NullCallableString`.
+ This is intended to allow validation of text formatters without the possibility of
+ inadvertently returning another terminal capability.
+ """
+ formatters = split_compound(value)
+ if all((fmt in COLORS or fmt in COMPOUNDABLES) for fmt in formatters):
+ return getattr(self, value)
+
+ return NullCallableString()
+
+ def rgb_downconvert(self, red, green, blue):
+ """
+ Translate an RGB color to a color code of the terminal's color depth.
+
+ :arg int red: RGB value of Red (0-255).
+ :arg int green: RGB value of Green (0-255).
+ :arg int blue: RGB value of Blue (0-255).
+ :rtype: int
+ :returns: Color code of downconverted RGB color
+ """
+ # Though pre-computing all 1 << 24 options is memory-intensive, a pre-computed
+ # "k-d tree" of 256 (x,y,z) vectors of a colorspace in 3 dimensions, such as a
+ # cone of HSV, or simply 255x255x255 RGB square, any given rgb value is just a
+ # nearest-neighbor search of 256 points, which k-d should be much faster by
+ # sub-dividing / culling search points, rather than our "search all 256 points
+ # always" approach.
+ fn_distance = COLOR_DISTANCE_ALGORITHMS[self.color_distance_algorithm]
+ color_idx = 7
+ shortest_distance = None
+ for cmp_depth, cmp_rgb in enumerate(RGB_256TABLE):
+ cmp_distance = fn_distance(cmp_rgb, (red, green, blue))
+ if shortest_distance is None or cmp_distance < shortest_distance:
+ shortest_distance = cmp_distance
+ color_idx = cmp_depth
+ if cmp_depth >= self.number_of_colors:
+ break
+ return color_idx
+
+ @property
+ def normal(self):
+ """
+ A capability that resets all video attributes.
+
+ :rtype: str
+
+ ``normal`` is an alias for ``sgr0`` or ``exit_attribute_mode``. Any
+ styling attributes previously applied, such as foreground or
+ background colors, reverse video, or bold are reset to defaults.
+ """
+ if self._normal:
+ return self._normal
+ self._normal = resolve_capability(self, 'normal')
+ return self._normal
+
+ def link(self, url, text, url_id=''):
+ """
+ Display ``text`` that when touched or clicked, navigates to ``url``.
+
+ Optional ``url_id`` may be specified, so that non-adjacent cells can reference a single
+ target, all cells painted with the same "id" will highlight on hover, rather than any
+ individual one, as described in "Hovering and underlining the id parameter" of gist
+ https://gist.github.com/egmontkob/eb114294efbcd5adb1944c9f3cb5feda.
+
+ :param str url: Hyperlink URL.
+ :param str text: Clickable text.
+ :param str url_id: Optional 'id'.
+ :rtype: str
+ :returns: String of ``text`` as a hyperlink to ``url``.
+ """
+ assert len(url) < 2000, (len(url), url)
+ if url_id:
+ assert len(str(url_id)) < 250, (len(str(url_id)), url_id)
+ params = 'id={0}'.format(url_id)
+ else:
+ params = ''
+ if not self.does_styling:
+ return text
+ return ('\x1b]8;{0};{1}\x1b\\{2}'
+ '\x1b]8;;\x1b\\'.format(params, url, text))
+
+ @property
+ def stream(self):
+ """
+ Read-only property: stream the terminal outputs to.
+
+ This is a convenience attribute. It is used internally for implied
+ writes performed by context managers :meth:`~.hidden_cursor`,
+ :meth:`~.fullscreen`, :meth:`~.location`, and :meth:`~.keypad`.
+ """
+ return self._stream
+
+ @property
+ def number_of_colors(self):
+ """
+ Number of colors supported by terminal.
+
+ Common return values are 0, 8, 16, 256, or 1 << 24.
+
+ This may be used to test whether the terminal supports colors,
+ and at what depth, if that's a concern.
+
+ If this property is assigned a value of 88, the value 16 will be saved. This is due to the
+ the rarity of 88 color support and the inconsistency of behavior between implementations.
+
+ Assigning this property to a value other than 0, 4, 8, 16, 88, 256, or 1 << 24 will
+ raise an :py:exc:`AssertionError`.
+ """
+ return self._number_of_colors
+
+ @number_of_colors.setter
+ def number_of_colors(self, value):
+ assert value in (0, 4, 8, 16, 88, 256, 1 << 24)
+ # Because 88 colors is rare and we can't guarantee consistent behavior,
+ # when 88 colors is detected, it is treated as 16 colors
+ self._number_of_colors = 16 if value == 88 else value
+ self.__clear_color_capabilities()
+
+ @property
+ def color_distance_algorithm(self):
+ """
+ Color distance algorithm used by :meth:`rgb_downconvert`.
+
+ The slowest, but most accurate, 'cie2000', is default. Other available options are 'rgb',
+ 'rgb-weighted', 'cie76', and 'cie94'.
+ """
+ return self._color_distance_algorithm
+
+ @color_distance_algorithm.setter
+ def color_distance_algorithm(self, value):
+ assert value in COLOR_DISTANCE_ALGORITHMS
+ self._color_distance_algorithm = value
+ self.__clear_color_capabilities()
+
+ @property
+ def _foreground_color(self):
+ """
+ Convenience capability to support :attr:`~.on_color`.
+
+ Prefers returning sequence for capability ``setaf``, "Set foreground color to #1, using ANSI
+ escape". If the given terminal does not support such sequence, fallback to returning
+ attribute ``setf``, "Set foreground color #1".
+ """
+ return self.setaf or self.setf
+
+ @property
+ def _background_color(self):
+ """
+ Convenience capability to support :attr:`~.on_color`.
+
+ Prefers returning sequence for capability ``setab``, "Set background color to #1, using ANSI
+ escape". If the given terminal does not support such sequence, fallback to returning
+ attribute ``setb``, "Set background color #1".
+ """
+ return self.setab or self.setb
+
+ def ljust(self, text, width=None, fillchar=u' '):
+ """
+ Left-align ``text``, which may contain terminal sequences.
+
+ :arg str text: String to be aligned
+ :arg int width: Total width to fill with aligned text. If
+ unspecified, the whole width of the terminal is filled.
+ :arg str fillchar: String for padding the right of ``text``
+ :rtype: str
+ :returns: String of ``text``, left-aligned by ``width``.
+ """
+ # Left justification is different from left alignment, but we continue
+ # the vocabulary error of the str method for polymorphism.
+ if width is None:
+ width = self.width
+ return Sequence(text, self).ljust(width, fillchar)
+
+ def rjust(self, text, width=None, fillchar=u' '):
+ """
+ Right-align ``text``, which may contain terminal sequences.
+
+ :arg str text: String to be aligned
+ :arg int width: Total width to fill with aligned text. If
+ unspecified, the whole width of the terminal is used.
+ :arg str fillchar: String for padding the left of ``text``
+ :rtype: str
+ :returns: String of ``text``, right-aligned by ``width``.
+ """
+ if width is None:
+ width = self.width
+ return Sequence(text, self).rjust(width, fillchar)
+
+ def center(self, text, width=None, fillchar=u' '):
+ """
+ Center ``text``, which may contain terminal sequences.
+
+ :arg str text: String to be centered
+ :arg int width: Total width in which to center text. If
+ unspecified, the whole width of the terminal is used.
+ :arg str fillchar: String for padding the left and right of ``text``
+ :rtype: str
+ :returns: String of ``text``, centered by ``width``
+ """
+ if width is None:
+ width = self.width
+ return Sequence(text, self).center(width, fillchar)
+
+ def truncate(self, text, width=None):
+ r"""
+ Truncate ``text`` to maximum ``width`` printable characters, retaining terminal sequences.
+
+ :arg str text: Text to truncate
+ :arg int width: The maximum width to truncate it to
+ :rtype: str
+ :returns: ``text`` truncated to at most ``width`` printable characters
+
+ >>> term.truncate(u'xyz\x1b[0;3m', 2)
+ u'xy\x1b[0;3m'
+ """
+ if width is None:
+ width = self.width
+ return Sequence(text, self).truncate(width)
+
+ def length(self, text):
+ u"""
+ Return printable length of a string containing sequences.
+
+ :arg str text: String to measure. May contain terminal sequences.
+ :rtype: int
+ :returns: The number of terminal character cells the string will occupy
+ when printed
+
+ Wide characters that consume 2 character cells are supported:
+
+ >>> term = Terminal()
+ >>> term.length(term.clear + term.red(u'コンニチハ'))
+ 10
+
+ .. note:: Sequences such as 'clear', which is considered as a
+ "movement sequence" because it would move the cursor to
+ (y, x)(0, 0), are evaluated as a printable length of
+ *0*.
+ """
+ return Sequence(text, self).length()
+
+ def strip(self, text, chars=None):
+ r"""
+ Return ``text`` without sequences and leading or trailing whitespace.
+
+ :rtype: str
+ :returns: Text with leading and trailing whitespace removed
+
+ >>> term.strip(u' \x1b[0;3m xyz ')
+ u'xyz'
+ """
+ return Sequence(text, self).strip(chars)
+
+ def rstrip(self, text, chars=None):
+ r"""
+ Return ``text`` without terminal sequences or trailing whitespace.
+
+ :rtype: str
+ :returns: Text with terminal sequences and trailing whitespace removed
+
+ >>> term.rstrip(u' \x1b[0;3m xyz ')
+ u' xyz'
+ """
+ return Sequence(text, self).rstrip(chars)
+
+ def lstrip(self, text, chars=None):
+ r"""
+ Return ``text`` without terminal sequences or leading whitespace.
+
+ :rtype: str
+ :returns: Text with terminal sequences and leading whitespace removed
+
+ >>> term.lstrip(u' \x1b[0;3m xyz ')
+ u'xyz '
+ """
+ return Sequence(text, self).lstrip(chars)
+
+ def strip_seqs(self, text):
+ r"""
+ Return ``text`` stripped of only its terminal sequences.
+
+ :rtype: str
+ :returns: Text with terminal sequences removed
+
+ >>> term.strip_seqs(u'\x1b[0;3mxyz')
+ u'xyz'
+ >>> term.strip_seqs(term.cuf(5) + term.red(u'test'))
+ u' test'
+
+ .. note:: Non-destructive sequences that adjust horizontal distance
+ (such as ``\b`` or ``term.cuf(5)``) are replaced by destructive
+ space or erasing.
+ """
+ return Sequence(text, self).strip_seqs()
+
+ def split_seqs(self, text, maxsplit=0):
+ r"""
+ Return ``text`` split by individual character elements and sequences.
+
+ :arg str text: String containing sequences
+ :arg int maxsplit: When maxsplit is nonzero, at most maxsplit splits
+ occur, and the remainder of the string is returned as the final element
+ of the list (same meaning is argument for :func:`re.split`).
+ :rtype: list[str]
+ :returns: List of sequences and individual characters
+
+ >>> term.split_seqs(term.underline(u'xyz'))
+ ['\x1b[4m', 'x', 'y', 'z', '\x1b(B', '\x1b[m']
+
+ >>> term.split_seqs(term.underline(u'xyz'), 1)
+ ['\x1b[4m', r'xyz\x1b(B\x1b[m']
+ """
+ pattern = self._caps_unnamed_any
+ result = []
+ for idx, match in enumerate(re.finditer(pattern, text)):
+ result.append(match.group())
+ if maxsplit and idx == maxsplit:
+ remaining = text[match.end():]
+ if remaining:
+ result[-1] += remaining
+ break
+ return result
+
+ def wrap(self, text, width=None, **kwargs):
+ r"""
+ Text-wrap a string, returning a list of wrapped lines.
+
+ :arg str text: Unlike :func:`textwrap.wrap`, ``text`` may contain
+ terminal sequences, such as colors, bold, or underline. By
+ default, tabs in ``text`` are expanded by
+ :func:`string.expandtabs`.
+ :arg int width: Unlike :func:`textwrap.wrap`, ``width`` will
+ default to the width of the attached terminal.
+ :arg \**kwargs: See :py:class:`textwrap.TextWrapper`
+ :rtype: list
+ :returns: List of wrapped lines
+
+ See :class:`textwrap.TextWrapper` for keyword arguments that can
+ customize wrapping behaviour.
+ """
+ width = self.width if width is None else width
+ wrapper = SequenceTextWrapper(width=width, term=self, **kwargs)
+ lines = []
+ for line in text.splitlines():
+ lines.extend(iter(wrapper.wrap(line)) if line.strip() else (u'',))
+
+ return lines
+
+ def getch(self):
+ """
+ Read, decode, and return the next byte from the keyboard stream.
+
+ :rtype: unicode
+ :returns: a single unicode character, or ``u''`` if a multi-byte
+ sequence has not yet been fully received.
+
+ This method name and behavior mimics curses ``getch(void)``, and
+ it supports :meth:`inkey`, reading only one byte from
+ the keyboard string at a time. This method should always return
+ without blocking if called after :meth:`kbhit` has returned True.
+
+ Implementors of alternate input stream methods should override
+ this method.
+ """
+ assert self._keyboard_fd is not None
+ byte = os.read(self._keyboard_fd, 1)
+ return self._keyboard_decoder.decode(byte, final=False)
+
+ def ungetch(self, text):
+ """
+ Buffer input data to be discovered by next call to :meth:`~.inkey`.
+
+ :arg str text: String to be buffered as keyboard input.
+ """
+ self._keyboard_buf.extendleft(text)
+
+ def kbhit(self, timeout=None):
+ """
+ Return whether a keypress has been detected on the keyboard.
+
+ This method is used by :meth:`inkey` to determine if a byte may
+ be read using :meth:`getch` without blocking. The standard
+ implementation simply uses the :func:`select.select` call on stdin.
+
+ :arg float timeout: When ``timeout`` is 0, this call is
+ non-blocking, otherwise blocking indefinitely until keypress
+ is detected when None (default). When ``timeout`` is a
+ positive number, returns after ``timeout`` seconds have
+ elapsed (float).
+ :rtype: bool
+ :returns: True if a keypress is awaiting to be read on the keyboard
+ attached to this terminal. When input is not a terminal, False is
+ always returned.
+ """
+ stime = time.time()
+ ready_r = [None, ]
+ check_r = [self._keyboard_fd] if self._keyboard_fd is not None else []
+
+ while HAS_TTY:
+ try:
+ ready_r, _, _ = select.select(check_r, [], [], timeout)
+ except InterruptedError:
+ # Beginning with python3.5, IntrruptError is no longer thrown
+ # https://www.python.org/dev/peps/pep-0475/
+ #
+ # For previous versions of python, we take special care to
+ # retry select on InterruptedError exception, namely to handle
+ # a custom SIGWINCH handler. When installed, it would cause
+ # select() to be interrupted with errno 4 (EAGAIN).
+ #
+ # Just as in python3.5, it is ignored, and a new timeout value
+ # is derived from the previous unless timeout becomes negative.
+ # because the signal handler has blocked beyond timeout, then
+ # False is returned. Otherwise, when timeout is None, we
+ # continue to block indefinitely (default).
+ if timeout is not None:
+ # subtract time already elapsed,
+ timeout -= time.time() - stime
+ if timeout > 0:
+ continue
+ # no time remains after handling exception (rare)
+ ready_r = [] # pragma: no cover
+ break # pragma: no cover
+ else:
+ break
+
+ return False if self._keyboard_fd is None else check_r == ready_r
+
+ @contextlib.contextmanager
+ def cbreak(self):
+ """
+ Allow each keystroke to be read immediately after it is pressed.
+
+ This is a context manager for :func:`tty.setcbreak`.
+
+ This context manager activates 'rare' mode, the opposite of 'cooked'
+ mode: On entry, :func:`tty.setcbreak` mode is activated disabling
+ line-buffering of keyboard input and turning off automatic echo of
+ input as output.
+
+ .. note:: You must explicitly print any user input you would like
+ displayed. If you provide any kind of editing, you must handle
+ backspace and other line-editing control functions in this mode
+ as well!
+
+ **Normally**, characters received from the keyboard cannot be read
+ by Python until the *Return* key is pressed. Also known as *cooked* or
+ *canonical input* mode, it allows the tty driver to provide
+ line-editing before shuttling the input to your program and is the
+ (implicit) default terminal mode set by most unix shells before
+ executing programs.
+
+ Technically, this context manager sets the :mod:`termios` attributes
+ of the terminal attached to :obj:`sys.__stdin__`.
+
+ .. note:: :func:`tty.setcbreak` sets ``VMIN = 1`` and ``VTIME = 0``,
+ see http://www.unixwiz.net/techtips/termios-vmin-vtime.html
+ """
+ if HAS_TTY and self._keyboard_fd is not None:
+ # Save current terminal mode:
+ save_mode = termios.tcgetattr(self._keyboard_fd)
+ save_line_buffered = self._line_buffered
+ tty.setcbreak(self._keyboard_fd, termios.TCSANOW)
+ try:
+ self._line_buffered = False
+ yield
+ finally:
+ # Restore prior mode:
+ termios.tcsetattr(self._keyboard_fd,
+ termios.TCSAFLUSH,
+ save_mode)
+ self._line_buffered = save_line_buffered
+ else:
+ yield
+
+ @contextlib.contextmanager
+ def raw(self):
+ r"""
+ A context manager for :func:`tty.setraw`.
+
+ Although both :meth:`break` and :meth:`raw` modes allow each keystroke
+ to be read immediately after it is pressed, Raw mode disables
+ processing of input and output.
+
+ In cbreak mode, special input characters such as ``^C`` or ``^S`` are
+ interpreted by the terminal driver and excluded from the stdin stream.
+ In raw mode these values are receive by the :meth:`inkey` method.
+
+ Because output processing is not done, the newline ``'\n'`` is not
+ enough, you must also print carriage return to ensure that the cursor
+ is returned to the first column::
+
+ with term.raw():
+ print("printing in raw mode", end="\r\n")
+ """
+ if HAS_TTY and self._keyboard_fd is not None:
+ # Save current terminal mode:
+ save_mode = termios.tcgetattr(self._keyboard_fd)
+ save_line_buffered = self._line_buffered
+ tty.setraw(self._keyboard_fd, termios.TCSANOW)
+ try:
+ self._line_buffered = False
+ yield
+ finally:
+ # Restore prior mode:
+ termios.tcsetattr(self._keyboard_fd,
+ termios.TCSAFLUSH,
+ save_mode)
+ self._line_buffered = save_line_buffered
+ else:
+ yield
+
+ @contextlib.contextmanager
+ def keypad(self):
+ r"""
+ Context manager that enables directional keypad input.
+
+ On entrying, this puts the terminal into "keyboard_transmit" mode by
+ emitting the keypad_xmit (smkx) capability. On exit, it emits
+ keypad_local (rmkx).
+
+ On an IBM-PC keyboard with numeric keypad of terminal-type *xterm*,
+ with numlock off, the lower-left diagonal key transmits sequence
+ ``\\x1b[F``, translated to :class:`~.Terminal` attribute
+ ``KEY_END``.
+
+ However, upon entering :meth:`keypad`, ``\\x1b[OF`` is transmitted,
+ translating to ``KEY_LL`` (lower-left key), allowing you to determine
+ diagonal direction keys.
+ """
+ try:
+ self.stream.write(self.smkx)
+ self.stream.flush()
+ yield
+ finally:
+ self.stream.write(self.rmkx)
+ self.stream.flush()
+
+ def inkey(self, timeout=None, esc_delay=0.35):
+ """
+ Read and return the next keyboard event within given timeout.
+
+ Generally, this should be used inside the :meth:`raw` context manager.
+
+ :arg float timeout: Number of seconds to wait for a keystroke before
+ returning. When ``None`` (default), this method may block
+ indefinitely.
+ :arg float esc_delay: To distinguish between the keystroke of
+ ``KEY_ESCAPE``, and sequences beginning with escape, the parameter
+ ``esc_delay`` specifies the amount of time after receiving escape
+ (``chr(27)``) to seek for the completion of an application key
+ before returning a :class:`~.Keystroke` instance for
+ ``KEY_ESCAPE``.
+ :rtype: :class:`~.Keystroke`.
+ :returns: :class:`~.Keystroke`, which may be empty (``u''``) if
+ ``timeout`` is specified and keystroke is not received.
+
+ .. note:: When used without the context manager :meth:`cbreak`, or
+ :meth:`raw`, :obj:`sys.__stdin__` remains line-buffered, and this
+ function will block until the return key is pressed!
+
+ .. note:: On Windows, a 10 ms sleep is added to the key press detection loop to reduce CPU
+ load. Due to the behavior of :py:func:`time.sleep` on Windows, this will actually
+ result in a 15.6 ms delay when using the default `time resolution
+ <https://docs.microsoft.com/en-us/windows/win32/api/timeapi/nf-timeapi-timebeginperiod>`_.
+ Decreasing the time resolution will reduce this to 10 ms, while increasing it, which
+ is rarely done, will have a perceptable impact on the behavior.
+ """
+ resolve = functools.partial(resolve_sequence,
+ mapper=self._keymap,
+ codes=self._keycodes)
+
+ stime = time.time()
+
+ # re-buffer previously received keystrokes,
+ ucs = u''
+ while self._keyboard_buf:
+ ucs += self._keyboard_buf.pop()
+
+ # receive all immediately available bytes
+ while self.kbhit(timeout=0):
+ ucs += self.getch()
+
+ # decode keystroke, if any
+ ks = resolve(text=ucs)
+
+ # so long as the most immediately received or buffered keystroke is
+ # incomplete, (which may be a multibyte encoding), block until until
+ # one is received.
+ while not ks and self.kbhit(timeout=_time_left(stime, timeout)):
+ ucs += self.getch()
+ ks = resolve(text=ucs)
+
+ # handle escape key (KEY_ESCAPE) vs. escape sequence (like those
+ # that begin with \x1b[ or \x1bO) up to esc_delay when
+ # received. This is not optimal, but causes least delay when
+ # "meta sends escape" is used, or when an unsupported sequence is
+ # sent.
+ #
+ # The statement, "ucs in self._keymap_prefixes" has an effect on
+ # keystrokes such as Alt + Z ("\x1b[z" with metaSendsEscape): because
+ # no known input sequences begin with such phrasing to allow it to be
+ # returned more quickly than esc_delay otherwise blocks for.
+ if ks.code == self.KEY_ESCAPE:
+ esctime = time.time()
+ while (ks.code == self.KEY_ESCAPE and
+ ucs in self._keymap_prefixes and
+ self.kbhit(timeout=_time_left(esctime, esc_delay))):
+ ucs += self.getch()
+ ks = resolve(text=ucs)
+
+ # buffer any remaining text received
+ self.ungetch(ucs[len(ks):])
+ return ks
+
+
+class WINSZ(collections.namedtuple('WINSZ', (
+ 'ws_row', 'ws_col', 'ws_xpixel', 'ws_ypixel'))):
+ """
+ Structure represents return value of :const:`termios.TIOCGWINSZ`.
+
+ .. py:attribute:: ws_row
+
+ rows, in characters
+
+ .. py:attribute:: ws_col
+
+ columns, in characters
+
+ .. py:attribute:: ws_xpixel
+
+ horizontal size, pixels
+
+ .. py:attribute:: ws_ypixel
+
+ vertical size, pixels
+ """
+ #: format of termios structure
+ _FMT = 'hhhh'
+ #: buffer of termios structure appropriate for ioctl argument
+ _BUF = '\x00' * struct.calcsize(_FMT)
+
+
+#: _CUR_TERM = None
+#: From libcurses/doc/ncurses-intro.html (ESR, Thomas Dickey, et. al)::
+#:
+#: "After the call to setupterm(), the global variable cur_term is set to
+#: point to the current structure of terminal capabilities. By calling
+#: setupterm() for each terminal, and saving and restoring cur_term, it
+#: is possible for a program to use two or more terminals at once."
+#:
+#: However, if you study Python's ``./Modules/_cursesmodule.c``, you'll find::
+#:
+#: if (!initialised_setupterm && setupterm(termstr,fd,&err) == ERR) {
+#:
+#: Python - perhaps wrongly - will not allow for re-initialisation of new
+#: terminals through :func:`curses.setupterm`, so the value of cur_term cannot
+#: be changed once set: subsequent calls to :func:`curses.setupterm` have no
+#: effect.
+#:
+#: Therefore, the :attr:`Terminal.kind` of each :class:`Terminal` is
+#: essentially a singleton. This global variable reflects that, and a warning
+#: is emitted if somebody expects otherwise.
diff --git a/third_party/python/blessed/blessed/terminal.pyi b/third_party/python/blessed/blessed/terminal.pyi
new file mode 100644
index 0000000000..3d8eea4db7
--- /dev/null
+++ b/third_party/python/blessed/blessed/terminal.pyi
@@ -0,0 +1,106 @@
+# std imports
+from typing import IO, Any, List, Tuple, Union, Optional, OrderedDict, ContextManager
+
+# local
+from .keyboard import Keystroke
+from .sequences import Termcap
+from .formatters import (FormattingString,
+ NullCallableString,
+ ParameterizingString,
+ FormattingOtherString)
+
+HAS_TTY: bool
+
+class Terminal:
+ caps: OrderedDict[str, Termcap]
+ errors: List[str] = ...
+ def __init__(
+ self,
+ kind: Optional[str] = ...,
+ stream: Optional[IO[str]] = ...,
+ force_styling: bool = ...,
+ ) -> None: ...
+ def __getattr__(
+ self, attr: str
+ ) -> Union[NullCallableString, ParameterizingString, FormattingString]: ...
+ @property
+ def kind(self) -> str: ...
+ @property
+ def does_styling(self) -> bool: ...
+ @property
+ def is_a_tty(self) -> bool: ...
+ @property
+ def height(self) -> int: ...
+ @property
+ def width(self) -> int: ...
+ @property
+ def pixel_height(self) -> int: ...
+ @property
+ def pixel_width(self) -> int: ...
+ def location(
+ self, x: Optional[int] = ..., y: Optional[int] = ...
+ ) -> ContextManager[None]: ...
+ def get_location(self, timeout: Optional[float] = ...) -> Tuple[int, int]: ...
+ def fullscreen(self) -> ContextManager[None]: ...
+ def hidden_cursor(self) -> ContextManager[None]: ...
+ def move_xy(self, x: int, y: int) -> ParameterizingString: ...
+ def move_yx(self, y: int, x: int) -> ParameterizingString: ...
+ @property
+ def move_left(self) -> FormattingOtherString: ...
+ @property
+ def move_right(self) -> FormattingOtherString: ...
+ @property
+ def move_up(self) -> FormattingOtherString: ...
+ @property
+ def move_down(self) -> FormattingOtherString: ...
+ @property
+ def color(self) -> Union[NullCallableString, ParameterizingString]: ...
+ def color_rgb(self, red: int, green: int, blue: int) -> FormattingString: ...
+ @property
+ def on_color(self) -> Union[NullCallableString, ParameterizingString]: ...
+ def on_color_rgb(self, red: int, green: int, blue: int) -> FormattingString: ...
+ def formatter(self, value: str) -> Union[NullCallableString, FormattingString]: ...
+ def rgb_downconvert(self, red: int, green: int, blue: int) -> int: ...
+ @property
+ def normal(self) -> str: ...
+ def link(self, url: str, text: str, url_id: str = ...) -> str: ...
+ @property
+ def stream(self) -> IO[str]: ...
+ @property
+ def number_of_colors(self) -> int: ...
+ @number_of_colors.setter
+ def number_of_colors(self, value: int) -> None: ...
+ @property
+ def color_distance_algorithm(self) -> str: ...
+ @color_distance_algorithm.setter
+ def color_distance_algorithm(self, value: str) -> None: ...
+ def ljust(
+ self, text: str, width: Optional[int] = ..., fillchar: str = ...
+ ) -> str: ...
+ def rjust(
+ self, text: str, width: Optional[int] = ..., fillchar: str = ...
+ ) -> str: ...
+ def center(
+ self, text: str, width: Optional[int] = ..., fillchar: str = ...
+ ) -> str: ...
+ def truncate(self, text: str, width: Optional[int] = ...) -> str: ...
+ def length(self, text: str) -> int: ...
+ def strip(self, text: str, chars: Optional[str] = ...) -> str: ...
+ def rstrip(self, text: str, chars: Optional[str] = ...) -> str: ...
+ def lstrip(self, text: str, chars: Optional[str] = ...) -> str: ...
+ def strip_seqs(self, text: str) -> str: ...
+ def split_seqs(self, text: str, maxsplit: int) -> List[str]: ...
+ def wrap(
+ self, text: str, width: Optional[int] = ..., **kwargs: Any
+ ) -> List[str]: ...
+ def getch(self) -> str: ...
+ def ungetch(self, text: str) -> None: ...
+ def kbhit(self, timeout: Optional[float] = ...) -> bool: ...
+ def cbreak(self) -> ContextManager[None]: ...
+ def raw(self) -> ContextManager[None]: ...
+ def keypad(self) -> ContextManager[None]: ...
+ def inkey(
+ self, timeout: Optional[float] = ..., esc_delay: float = ...
+ ) -> Keystroke: ...
+
+class WINSZ: ...
diff --git a/third_party/python/blessed/blessed/win_terminal.py b/third_party/python/blessed/blessed/win_terminal.py
new file mode 100644
index 0000000000..267e028b96
--- /dev/null
+++ b/third_party/python/blessed/blessed/win_terminal.py
@@ -0,0 +1,163 @@
+# -*- coding: utf-8 -*-
+"""Module containing Windows version of :class:`Terminal`."""
+
+from __future__ import absolute_import
+
+# std imports
+import time
+import msvcrt # pylint: disable=import-error
+import contextlib
+
+# 3rd party
+from jinxed import win32 # pylint: disable=import-error
+
+# local
+from .terminal import WINSZ
+from .terminal import Terminal as _Terminal
+
+
+class Terminal(_Terminal):
+ """Windows subclass of :class:`Terminal`."""
+
+ def getch(self):
+ r"""
+ Read, decode, and return the next byte from the keyboard stream.
+
+ :rtype: unicode
+ :returns: a single unicode character, or ``u''`` if a multi-byte
+ sequence has not yet been fully received.
+
+ For versions of Windows 10.0.10586 and later, the console is expected
+ to be in ENABLE_VIRTUAL_TERMINAL_INPUT mode and the default method is
+ called.
+
+ For older versions of Windows, msvcrt.getwch() is used. If the received
+ character is ``\x00`` or ``\xe0``, the next character is
+ automatically retrieved.
+ """
+ if win32.VTMODE_SUPPORTED:
+ return super(Terminal, self).getch()
+
+ rtn = msvcrt.getwch()
+ if rtn in ('\x00', '\xe0'):
+ rtn += msvcrt.getwch()
+ return rtn
+
+ def kbhit(self, timeout=None):
+ """
+ Return whether a keypress has been detected on the keyboard.
+
+ This method is used by :meth:`inkey` to determine if a byte may
+ be read using :meth:`getch` without blocking. This is implemented
+ by wrapping msvcrt.kbhit() in a timeout.
+
+ :arg float timeout: When ``timeout`` is 0, this call is
+ non-blocking, otherwise blocking indefinitely until keypress
+ is detected when None (default). When ``timeout`` is a
+ positive number, returns after ``timeout`` seconds have
+ elapsed (float).
+ :rtype: bool
+ :returns: True if a keypress is awaiting to be read on the keyboard
+ attached to this terminal.
+ """
+ end = time.time() + (timeout or 0)
+ while True:
+
+ if msvcrt.kbhit():
+ return True
+
+ if timeout is not None and end < time.time():
+ break
+
+ time.sleep(0.01) # Sleep to reduce CPU load
+ return False
+
+ @staticmethod
+ def _winsize(fd):
+ """
+ Return named tuple describing size of the terminal by ``fd``.
+
+ :arg int fd: file descriptor queries for its window size.
+ :rtype: WINSZ
+ :returns: named tuple describing size of the terminal
+
+ WINSZ is a :class:`collections.namedtuple` instance, whose structure
+ directly maps to the return value of the :const:`termios.TIOCGWINSZ`
+ ioctl return value. The return parameters are:
+
+ - ``ws_row``: width of terminal by its number of character cells.
+ - ``ws_col``: height of terminal by its number of character cells.
+ - ``ws_xpixel``: width of terminal by pixels (not accurate).
+ - ``ws_ypixel``: height of terminal by pixels (not accurate).
+ """
+ window = win32.get_terminal_size(fd)
+ return WINSZ(ws_row=window.lines, ws_col=window.columns,
+ ws_xpixel=0, ws_ypixel=0)
+
+ @contextlib.contextmanager
+ def cbreak(self):
+ """
+ Allow each keystroke to be read immediately after it is pressed.
+
+ This is a context manager for ``jinxed.w32.setcbreak()``.
+
+ .. note:: You must explicitly print any user input you would like
+ displayed. If you provide any kind of editing, you must handle
+ backspace and other line-editing control functions in this mode
+ as well!
+
+ **Normally**, characters received from the keyboard cannot be read
+ by Python until the *Return* key is pressed. Also known as *cooked* or
+ *canonical input* mode, it allows the tty driver to provide
+ line-editing before shuttling the input to your program and is the
+ (implicit) default terminal mode set by most unix shells before
+ executing programs.
+ """
+ if self._keyboard_fd is not None:
+
+ filehandle = msvcrt.get_osfhandle(self._keyboard_fd)
+
+ # Save current terminal mode:
+ save_mode = win32.get_console_mode(filehandle)
+ save_line_buffered = self._line_buffered
+ win32.setcbreak(filehandle)
+ try:
+ self._line_buffered = False
+ yield
+ finally:
+ win32.set_console_mode(filehandle, save_mode)
+ self._line_buffered = save_line_buffered
+
+ else:
+ yield
+
+ @contextlib.contextmanager
+ def raw(self):
+ """
+ A context manager for ``jinxed.w32.setcbreak()``.
+
+ Although both :meth:`break` and :meth:`raw` modes allow each keystroke
+ to be read immediately after it is pressed, Raw mode disables
+ processing of input and output.
+
+ In cbreak mode, special input characters such as ``^C`` are
+ interpreted by the terminal driver and excluded from the stdin stream.
+ In raw mode these values are receive by the :meth:`inkey` method.
+ """
+ if self._keyboard_fd is not None:
+
+ filehandle = msvcrt.get_osfhandle(self._keyboard_fd)
+
+ # Save current terminal mode:
+ save_mode = win32.get_console_mode(filehandle)
+ save_line_buffered = self._line_buffered
+ win32.setraw(filehandle)
+ try:
+ self._line_buffered = False
+ yield
+ finally:
+ win32.set_console_mode(filehandle, save_mode)
+ self._line_buffered = save_line_buffered
+
+ else:
+ yield
diff --git a/third_party/python/blessed/blessed/win_terminal.pyi b/third_party/python/blessed/blessed/win_terminal.pyi
new file mode 100644
index 0000000000..275f16f9ee
--- /dev/null
+++ b/third_party/python/blessed/blessed/win_terminal.pyi
@@ -0,0 +1,11 @@
+# std imports
+from typing import Optional, ContextManager
+
+# local
+from .terminal import Terminal as _Terminal
+
+class Terminal(_Terminal):
+ def getch(self) -> str: ...
+ def kbhit(self, timeout: Optional[float] = ...) -> bool: ...
+ def cbreak(self) -> ContextManager[None]: ...
+ def raw(self) -> ContextManager[None]: ...
diff --git a/third_party/python/cbor2/cbor2-4.0.1.dist-info/DESCRIPTION.rst b/third_party/python/cbor2/cbor2-4.0.1.dist-info/DESCRIPTION.rst
new file mode 100644
index 0000000000..734481b638
--- /dev/null
+++ b/third_party/python/cbor2/cbor2-4.0.1.dist-info/DESCRIPTION.rst
@@ -0,0 +1,26 @@
+.. image:: https://travis-ci.org/agronholm/cbor2.svg?branch=master
+ :target: https://travis-ci.org/agronholm/cbor2
+ :alt: Build Status
+.. image:: https://coveralls.io/repos/github/agronholm/cbor2/badge.svg?branch=master
+ :target: https://coveralls.io/github/agronholm/cbor2?branch=master
+ :alt: Code Coverage
+
+This library provides encoding and decoding for the Concise Binary Object Representation (CBOR)
+(`RFC 7049`_) serialization format.
+
+There exists another Python CBOR implementation (cbor) which is faster on CPython due to its C
+extensions. On PyPy, cbor2 and cbor are almost identical in performance. The other implementation
+also lacks documentation and a comprehensive test suite, does not support most standard extension
+tags and is known to crash (segfault) when passed a cyclic structure (say, a list containing
+itself).
+
+.. _RFC 7049: https://tools.ietf.org/html/rfc7049
+
+Project links
+-------------
+
+* `Documentation <http://cbor2.readthedocs.org/>`_
+* `Source code <https://github.com/agronholm/cbor2>`_
+* `Issue tracker <https://github.com/agronholm/cbor2/issues>`_
+
+
diff --git a/third_party/python/cbor2/cbor2-4.0.1.dist-info/METADATA b/third_party/python/cbor2/cbor2-4.0.1.dist-info/METADATA
new file mode 100644
index 0000000000..c7f42ac60f
--- /dev/null
+++ b/third_party/python/cbor2/cbor2-4.0.1.dist-info/METADATA
@@ -0,0 +1,50 @@
+Metadata-Version: 2.0
+Name: cbor2
+Version: 4.0.1
+Summary: Pure Python CBOR (de)serializer with extensive tag support
+Home-page: https://github.com/agronholm/cbor2
+Author: Alex Grönholm
+Author-email: alex.gronholm@nextday.fi
+License: MIT
+Keywords: serialization cbor
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Provides-Extra: testing
+Requires-Dist: pytest; extra == 'testing'
+Requires-Dist: pytest-cov; extra == 'testing'
+
+.. image:: https://travis-ci.org/agronholm/cbor2.svg?branch=master
+ :target: https://travis-ci.org/agronholm/cbor2
+ :alt: Build Status
+.. image:: https://coveralls.io/repos/github/agronholm/cbor2/badge.svg?branch=master
+ :target: https://coveralls.io/github/agronholm/cbor2?branch=master
+ :alt: Code Coverage
+
+This library provides encoding and decoding for the Concise Binary Object Representation (CBOR)
+(`RFC 7049`_) serialization format.
+
+There exists another Python CBOR implementation (cbor) which is faster on CPython due to its C
+extensions. On PyPy, cbor2 and cbor are almost identical in performance. The other implementation
+also lacks documentation and a comprehensive test suite, does not support most standard extension
+tags and is known to crash (segfault) when passed a cyclic structure (say, a list containing
+itself).
+
+.. _RFC 7049: https://tools.ietf.org/html/rfc7049
+
+Project links
+-------------
+
+* `Documentation <http://cbor2.readthedocs.org/>`_
+* `Source code <https://github.com/agronholm/cbor2>`_
+* `Issue tracker <https://github.com/agronholm/cbor2/issues>`_
+
+
diff --git a/third_party/python/cbor2/cbor2-4.0.1.dist-info/RECORD b/third_party/python/cbor2/cbor2-4.0.1.dist-info/RECORD
new file mode 100644
index 0000000000..e29279b8df
--- /dev/null
+++ b/third_party/python/cbor2/cbor2-4.0.1.dist-info/RECORD
@@ -0,0 +1,11 @@
+cbor2/__init__.py,sha256=Si4l50bD5McrzpgQ6bEmhla2w2U910scs0lCqHzwxOo,239
+cbor2/compat.py,sha256=aBzyMrGwl061zdmlFPQrk4U1rqZQcVNl5ojRsQdG5d0,1033
+cbor2/decoder.py,sha256=6bJMq6fC8RRe5uJFrvKy9T-J3VLYKIkSF9UUmmlYj2A,11936
+cbor2/encoder.py,sha256=OimwLht642jK61Vl2X5FeIv3rHL0hd5yjQ7ajoO2hko,11496
+cbor2/types.py,sha256=I2lpvqktj8Nm8MJtUwdhOYXAUJw-UctYTQlKg0qZ9pc,1302
+cbor2-4.0.1.dist-info/DESCRIPTION.rst,sha256=1Lg57ktrF2XHHyDuGfWtKY5VZd4ydp3-7Ptr27cbWrE,1091
+cbor2-4.0.1.dist-info/METADATA,sha256=h1mC4t8mFZcyJc3cHWJFUf5wUWYVPAqh4Q4DRe0ajQg,1981
+cbor2-4.0.1.dist-info/RECORD,,
+cbor2-4.0.1.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110
+cbor2-4.0.1.dist-info/metadata.json,sha256=lHkH6x7w_MNrQqe5ZNu9kihQi3Gg-XOQpYTTElRtKe8,1006
+cbor2-4.0.1.dist-info/top_level.txt,sha256=4Z7JYs5_QM6eqOa2Ew1n_2-uKm2SYl76j2NWTtfCChs,6
diff --git a/third_party/python/cbor2/cbor2-4.0.1.dist-info/WHEEL b/third_party/python/cbor2/cbor2-4.0.1.dist-info/WHEEL
new file mode 100644
index 0000000000..8b6dd1b5a8
--- /dev/null
+++ b/third_party/python/cbor2/cbor2-4.0.1.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.29.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/cbor2/cbor2-4.0.1.dist-info/metadata.json b/third_party/python/cbor2/cbor2-4.0.1.dist-info/metadata.json
new file mode 100644
index 0000000000..85d36a4496
--- /dev/null
+++ b/third_party/python/cbor2/cbor2-4.0.1.dist-info/metadata.json
@@ -0,0 +1 @@
+{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6"], "extensions": {"python.details": {"contacts": [{"email": "alex.gronholm@nextday.fi", "name": "Alex Gr\u00f6nholm", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/agronholm/cbor2"}}}, "extras": ["testing"], "generator": "bdist_wheel (0.29.0)", "keywords": ["serialization", "cbor"], "license": "MIT", "metadata_version": "2.0", "name": "cbor2", "run_requires": [{"extra": "testing", "requires": ["pytest-cov", "pytest"]}], "summary": "Pure Python CBOR (de)serializer with extensive tag support", "version": "4.0.1"} \ No newline at end of file
diff --git a/third_party/python/cbor2/cbor2-4.0.1.dist-info/top_level.txt b/third_party/python/cbor2/cbor2-4.0.1.dist-info/top_level.txt
new file mode 100644
index 0000000000..615ca8aeba
--- /dev/null
+++ b/third_party/python/cbor2/cbor2-4.0.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+cbor2
diff --git a/third_party/python/cbor2/cbor2/__init__.py b/third_party/python/cbor2/cbor2/__init__.py
new file mode 100644
index 0000000000..474841ace4
--- /dev/null
+++ b/third_party/python/cbor2/cbor2/__init__.py
@@ -0,0 +1,3 @@
+from cbor2.decoder import load, loads, CBORDecoder, CBORDecodeError # noqa
+from cbor2.encoder import dump, dumps, CBOREncoder, CBOREncodeError, shareable_encoder # noqa
+from cbor2.types import CBORTag, CBORSimpleValue, undefined # noqa
diff --git a/third_party/python/cbor2/cbor2/compat.py b/third_party/python/cbor2/cbor2/compat.py
new file mode 100644
index 0000000000..983efda59b
--- /dev/null
+++ b/third_party/python/cbor2/cbor2/compat.py
@@ -0,0 +1,49 @@
+import sys
+
+
+if sys.version_info.major < 3:
+ from datetime import tzinfo, timedelta
+
+ class timezone(tzinfo):
+ def __init__(self, offset):
+ self.offset = offset
+
+ def utcoffset(self, dt):
+ return self.offset
+
+ def dst(self, dt):
+ return timedelta(0)
+
+ def tzname(self, dt):
+ return 'UTC+00:00'
+
+ def as_unicode(string):
+ return string.decode('utf-8')
+
+ def iteritems(self):
+ return self.iteritems()
+
+ def bytes_from_list(values):
+ return bytes(bytearray(values))
+
+ byte_as_integer = ord
+ timezone.utc = timezone(timedelta(0))
+ xrange = xrange # noqa
+ long = long # noqa
+ unicode = unicode # noqa
+else:
+ from datetime import timezone
+
+ def byte_as_integer(bytestr):
+ return bytestr[0]
+
+ def as_unicode(string):
+ return string
+
+ def iteritems(self):
+ return self.items()
+
+ xrange = range # noqa
+ long = int # noqa
+ unicode = str # noqa
+ bytes_from_list = bytes
diff --git a/third_party/python/cbor2/cbor2/decoder.py b/third_party/python/cbor2/cbor2/decoder.py
new file mode 100644
index 0000000000..5833d9e9f4
--- /dev/null
+++ b/third_party/python/cbor2/cbor2/decoder.py
@@ -0,0 +1,411 @@
+import re
+import struct
+from datetime import datetime, timedelta
+from io import BytesIO
+
+from cbor2.compat import timezone, xrange, byte_as_integer
+from cbor2.types import CBORTag, undefined, break_marker, CBORSimpleValue
+
+timestamp_re = re.compile(r'^(\d{4})-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)'
+ r'(?:\.(\d+))?(?:Z|([+-]\d\d):(\d\d))$')
+
+
+class CBORDecodeError(Exception):
+ """Raised when an error occurs deserializing a CBOR datastream."""
+
+
+def decode_uint(decoder, subtype, shareable_index=None, allow_infinite=False):
+ # Major tag 0
+ if subtype < 24:
+ return subtype
+ elif subtype == 24:
+ return struct.unpack('>B', decoder.read(1))[0]
+ elif subtype == 25:
+ return struct.unpack('>H', decoder.read(2))[0]
+ elif subtype == 26:
+ return struct.unpack('>L', decoder.read(4))[0]
+ elif subtype == 27:
+ return struct.unpack('>Q', decoder.read(8))[0]
+ elif subtype == 31 and allow_infinite:
+ return None
+ else:
+ raise CBORDecodeError('unknown unsigned integer subtype 0x%x' % subtype)
+
+
+def decode_negint(decoder, subtype, shareable_index=None):
+ # Major tag 1
+ uint = decode_uint(decoder, subtype)
+ return -uint - 1
+
+
+def decode_bytestring(decoder, subtype, shareable_index=None):
+ # Major tag 2
+ length = decode_uint(decoder, subtype, allow_infinite=True)
+ if length is None:
+ # Indefinite length
+ buf = bytearray()
+ while True:
+ initial_byte = byte_as_integer(decoder.read(1))
+ if initial_byte == 255:
+ return buf
+ else:
+ length = decode_uint(decoder, initial_byte & 31)
+ value = decoder.read(length)
+ buf.extend(value)
+ else:
+ return decoder.read(length)
+
+
+def decode_string(decoder, subtype, shareable_index=None):
+ # Major tag 3
+ return decode_bytestring(decoder, subtype).decode('utf-8')
+
+
+def decode_array(decoder, subtype, shareable_index=None):
+ # Major tag 4
+ items = []
+ decoder.set_shareable(shareable_index, items)
+ length = decode_uint(decoder, subtype, allow_infinite=True)
+ if length is None:
+ # Indefinite length
+ while True:
+ value = decoder.decode()
+ if value is break_marker:
+ break
+ else:
+ items.append(value)
+ else:
+ for _ in xrange(length):
+ item = decoder.decode()
+ items.append(item)
+
+ return items
+
+
+def decode_map(decoder, subtype, shareable_index=None):
+ # Major tag 5
+ dictionary = {}
+ decoder.set_shareable(shareable_index, dictionary)
+ length = decode_uint(decoder, subtype, allow_infinite=True)
+ if length is None:
+ # Indefinite length
+ while True:
+ key = decoder.decode()
+ if key is break_marker:
+ break
+ else:
+ value = decoder.decode()
+ dictionary[key] = value
+ else:
+ for _ in xrange(length):
+ key = decoder.decode()
+ value = decoder.decode()
+ dictionary[key] = value
+
+ if decoder.object_hook:
+ return decoder.object_hook(decoder, dictionary)
+ else:
+ return dictionary
+
+
+def decode_semantic(decoder, subtype, shareable_index=None):
+ # Major tag 6
+ tagnum = decode_uint(decoder, subtype)
+
+ # Special handling for the "shareable" tag
+ if tagnum == 28:
+ shareable_index = decoder._allocate_shareable()
+ return decoder.decode(shareable_index)
+
+ value = decoder.decode()
+ semantic_decoder = semantic_decoders.get(tagnum)
+ if semantic_decoder:
+ return semantic_decoder(decoder, value, shareable_index)
+
+ tag = CBORTag(tagnum, value)
+ if decoder.tag_hook:
+ return decoder.tag_hook(decoder, tag, shareable_index)
+ else:
+ return tag
+
+
+def decode_special(decoder, subtype, shareable_index=None):
+ # Simple value
+ if subtype < 20:
+ return CBORSimpleValue(subtype)
+
+ # Major tag 7
+ return special_decoders[subtype](decoder)
+
+
+#
+# Semantic decoders (major tag 6)
+#
+
+def decode_datetime_string(decoder, value, shareable_index=None):
+ # Semantic tag 0
+ match = timestamp_re.match(value)
+ if match:
+ year, month, day, hour, minute, second, micro, offset_h, offset_m = match.groups()
+ if offset_h:
+ tz = timezone(timedelta(hours=int(offset_h), minutes=int(offset_m)))
+ else:
+ tz = timezone.utc
+
+ return datetime(int(year), int(month), int(day), int(hour), int(minute), int(second),
+ int(micro or 0), tz)
+ else:
+ raise CBORDecodeError('invalid datetime string: {}'.format(value))
+
+
+def decode_epoch_datetime(decoder, value, shareable_index=None):
+ # Semantic tag 1
+ return datetime.fromtimestamp(value, timezone.utc)
+
+
+def decode_positive_bignum(decoder, value, shareable_index=None):
+ # Semantic tag 2
+ from binascii import hexlify
+ return int(hexlify(value), 16)
+
+
+def decode_negative_bignum(decoder, value, shareable_index=None):
+ # Semantic tag 3
+ return -decode_positive_bignum(decoder, value) - 1
+
+
+def decode_fraction(decoder, value, shareable_index=None):
+ # Semantic tag 4
+ from decimal import Decimal
+ exp = Decimal(value[0])
+ mantissa = Decimal(value[1])
+ return mantissa * (10 ** exp)
+
+
+def decode_bigfloat(decoder, value, shareable_index=None):
+ # Semantic tag 5
+ from decimal import Decimal
+ exp = Decimal(value[0])
+ mantissa = Decimal(value[1])
+ return mantissa * (2 ** exp)
+
+
+def decode_sharedref(decoder, value, shareable_index=None):
+ # Semantic tag 29
+ try:
+ shared = decoder._shareables[value]
+ except IndexError:
+ raise CBORDecodeError('shared reference %d not found' % value)
+
+ if shared is None:
+ raise CBORDecodeError('shared value %d has not been initialized' % value)
+ else:
+ return shared
+
+
+def decode_rational(decoder, value, shareable_index=None):
+ # Semantic tag 30
+ from fractions import Fraction
+ return Fraction(*value)
+
+
+def decode_regexp(decoder, value, shareable_index=None):
+ # Semantic tag 35
+ return re.compile(value)
+
+
+def decode_mime(decoder, value, shareable_index=None):
+ # Semantic tag 36
+ from email.parser import Parser
+ return Parser().parsestr(value)
+
+
+def decode_uuid(decoder, value, shareable_index=None):
+ # Semantic tag 37
+ from uuid import UUID
+ return UUID(bytes=value)
+
+
+#
+# Special decoders (major tag 7)
+#
+
+def decode_simple_value(decoder, shareable_index=None):
+ return CBORSimpleValue(struct.unpack('>B', decoder.read(1))[0])
+
+
+def decode_float16(decoder, shareable_index=None):
+ # Code adapted from RFC 7049, appendix D
+ from math import ldexp
+
+ def decode_single(single):
+ return struct.unpack("!f", struct.pack("!I", single))[0]
+
+ payload = struct.unpack('>H', decoder.read(2))[0]
+ value = (payload & 0x7fff) << 13 | (payload & 0x8000) << 16
+ if payload & 0x7c00 != 0x7c00:
+ return ldexp(decode_single(value), 112)
+
+ return decode_single(value | 0x7f800000)
+
+
+def decode_float32(decoder, shareable_index=None):
+ return struct.unpack('>f', decoder.read(4))[0]
+
+
+def decode_float64(decoder, shareable_index=None):
+ return struct.unpack('>d', decoder.read(8))[0]
+
+
+major_decoders = {
+ 0: decode_uint,
+ 1: decode_negint,
+ 2: decode_bytestring,
+ 3: decode_string,
+ 4: decode_array,
+ 5: decode_map,
+ 6: decode_semantic,
+ 7: decode_special
+}
+
+special_decoders = {
+ 20: lambda self: False,
+ 21: lambda self: True,
+ 22: lambda self: None,
+ 23: lambda self: undefined,
+ 24: decode_simple_value,
+ 25: decode_float16,
+ 26: decode_float32,
+ 27: decode_float64,
+ 31: lambda self: break_marker
+}
+
+semantic_decoders = {
+ 0: decode_datetime_string,
+ 1: decode_epoch_datetime,
+ 2: decode_positive_bignum,
+ 3: decode_negative_bignum,
+ 4: decode_fraction,
+ 5: decode_bigfloat,
+ 29: decode_sharedref,
+ 30: decode_rational,
+ 35: decode_regexp,
+ 36: decode_mime,
+ 37: decode_uuid
+}
+
+
+class CBORDecoder(object):
+ """
+ Deserializes a CBOR encoded byte stream.
+
+ :param tag_hook: Callable that takes 3 arguments: the decoder instance, the
+ :class:`~cbor2.types.CBORTag` and the shareable index for the resulting object, if any.
+ This callback is called for any tags for which there is no built-in decoder.
+ The return value is substituted for the CBORTag object in the deserialized output.
+ :param object_hook: Callable that takes 2 arguments: the decoder instance and the dictionary.
+ This callback is called for each deserialized :class:`dict` object.
+ The return value is substituted for the dict in the deserialized output.
+ """
+
+ __slots__ = ('fp', 'tag_hook', 'object_hook', '_shareables')
+
+ def __init__(self, fp, tag_hook=None, object_hook=None):
+ self.fp = fp
+ self.tag_hook = tag_hook
+ self.object_hook = object_hook
+ self._shareables = []
+
+ def _allocate_shareable(self):
+ self._shareables.append(None)
+ return len(self._shareables) - 1
+
+ def set_shareable(self, index, value):
+ """
+ Set the shareable value for the last encountered shared value marker, if any.
+
+ If the given index is ``None``, nothing is done.
+
+ :param index: the value of the ``shared_index`` argument to the decoder
+ :param value: the shared value
+
+ """
+ if index is not None:
+ self._shareables[index] = value
+
+ def read(self, amount):
+ """
+ Read bytes from the data stream.
+
+ :param int amount: the number of bytes to read
+
+ """
+ data = self.fp.read(amount)
+ if len(data) < amount:
+ raise CBORDecodeError('premature end of stream (expected to read {} bytes, got {} '
+ 'instead)'.format(amount, len(data)))
+
+ return data
+
+ def decode(self, shareable_index=None):
+ """
+ Decode the next value from the stream.
+
+ :raises CBORDecodeError: if there is any problem decoding the stream
+
+ """
+ try:
+ initial_byte = byte_as_integer(self.fp.read(1))
+ major_type = initial_byte >> 5
+ subtype = initial_byte & 31
+ except Exception as e:
+ raise CBORDecodeError('error reading major type at index {}: {}'
+ .format(self.fp.tell(), e))
+
+ decoder = major_decoders[major_type]
+ try:
+ return decoder(self, subtype, shareable_index)
+ except CBORDecodeError:
+ raise
+ except Exception as e:
+ raise CBORDecodeError('error decoding value at index {}: {}'.format(self.fp.tell(), e))
+
+ def decode_from_bytes(self, buf):
+ """
+ Wrap the given bytestring as a file and call :meth:`decode` with it as the argument.
+
+ This method was intended to be used from the ``tag_hook`` hook when an object needs to be
+ decoded separately from the rest but while still taking advantage of the shared value
+ registry.
+
+ """
+ old_fp = self.fp
+ self.fp = BytesIO(buf)
+ retval = self.decode()
+ self.fp = old_fp
+ return retval
+
+
+def loads(payload, **kwargs):
+ """
+ Deserialize an object from a bytestring.
+
+ :param bytes payload: the bytestring to serialize
+ :param kwargs: keyword arguments passed to :class:`~.CBORDecoder`
+ :return: the deserialized object
+
+ """
+ fp = BytesIO(payload)
+ return CBORDecoder(fp, **kwargs).decode()
+
+
+def load(fp, **kwargs):
+ """
+ Deserialize an object from an open file.
+
+ :param fp: the input file (any file-like object)
+ :param kwargs: keyword arguments passed to :class:`~.CBORDecoder`
+ :return: the deserialized object
+
+ """
+ return CBORDecoder(fp, **kwargs).decode()
diff --git a/third_party/python/cbor2/cbor2/encoder.py b/third_party/python/cbor2/cbor2/encoder.py
new file mode 100644
index 0000000000..adcb2722e5
--- /dev/null
+++ b/third_party/python/cbor2/cbor2/encoder.py
@@ -0,0 +1,362 @@
+import re
+import struct
+from collections import OrderedDict, defaultdict
+from contextlib import contextmanager
+from functools import wraps
+from datetime import datetime, date, time
+from io import BytesIO
+
+from cbor2.compat import iteritems, timezone, long, unicode, as_unicode, bytes_from_list
+from cbor2.types import CBORTag, undefined, CBORSimpleValue
+
+
+class CBOREncodeError(Exception):
+ """Raised when an error occurs while serializing an object into a CBOR datastream."""
+
+
+def shareable_encoder(func):
+ """
+ Wrap the given encoder function to gracefully handle cyclic data structures.
+
+ If value sharing is enabled, this marks the given value shared in the datastream on the
+ first call. If the value has already been passed to this method, a reference marker is
+ instead written to the data stream and the wrapped function is not called.
+
+ If value sharing is disabled, only infinite recursion protection is done.
+
+ """
+ @wraps(func)
+ def wrapper(encoder, value, *args, **kwargs):
+ value_id = id(value)
+ container, container_index = encoder._shared_containers.get(value_id, (None, None))
+ if encoder.value_sharing:
+ if container is value:
+ # Generate a reference to the previous index instead of encoding this again
+ encoder.write(encode_length(0xd8, 0x1d))
+ encode_int(encoder, container_index)
+ else:
+ # Mark the container as shareable
+ encoder._shared_containers[value_id] = (value, len(encoder._shared_containers))
+ encoder.write(encode_length(0xd8, 0x1c))
+ func(encoder, value, *args, **kwargs)
+ else:
+ if container is value:
+ raise CBOREncodeError('cyclic data structure detected but value sharing is '
+ 'disabled')
+ else:
+ encoder._shared_containers[value_id] = (value, None)
+ func(encoder, value, *args, **kwargs)
+ del encoder._shared_containers[value_id]
+
+ return wrapper
+
+
+def encode_length(major_tag, length):
+ if length < 24:
+ return struct.pack('>B', major_tag | length)
+ elif length < 256:
+ return struct.pack('>BB', major_tag | 24, length)
+ elif length < 65536:
+ return struct.pack('>BH', major_tag | 25, length)
+ elif length < 4294967296:
+ return struct.pack('>BL', major_tag | 26, length)
+ else:
+ return struct.pack('>BQ', major_tag | 27, length)
+
+
+def encode_int(encoder, value):
+ # Big integers (2 ** 64 and over)
+ if value >= 18446744073709551616 or value < -18446744073709551616:
+ if value >= 0:
+ major_type = 0x02
+ else:
+ major_type = 0x03
+ value = -value - 1
+
+ values = []
+ while value > 0:
+ value, remainder = divmod(value, 256)
+ values.insert(0, remainder)
+
+ payload = bytes_from_list(values)
+ encode_semantic(encoder, CBORTag(major_type, payload))
+ elif value >= 0:
+ encoder.write(encode_length(0, value))
+ else:
+ encoder.write(encode_length(0x20, abs(value) - 1))
+
+
+def encode_bytestring(encoder, value):
+ encoder.write(encode_length(0x40, len(value)) + value)
+
+
+def encode_bytearray(encoder, value):
+ encode_bytestring(encoder, bytes(value))
+
+
+def encode_string(encoder, value):
+ encoded = value.encode('utf-8')
+ encoder.write(encode_length(0x60, len(encoded)) + encoded)
+
+
+@shareable_encoder
+def encode_array(encoder, value):
+ encoder.write(encode_length(0x80, len(value)))
+ for item in value:
+ encoder.encode(item)
+
+
+@shareable_encoder
+def encode_map(encoder, value):
+ encoder.write(encode_length(0xa0, len(value)))
+ for key, val in iteritems(value):
+ encoder.encode(key)
+ encoder.encode(val)
+
+
+def encode_semantic(encoder, value):
+ encoder.write(encode_length(0xc0, value.tag))
+ encoder.encode(value.value)
+
+
+#
+# Semantic decoders (major tag 6)
+#
+
+def encode_datetime(encoder, value):
+ # Semantic tag 0
+ if not value.tzinfo:
+ if encoder.timezone:
+ value = value.replace(tzinfo=encoder.timezone)
+ else:
+ raise CBOREncodeError(
+ 'naive datetime encountered and no default timezone has been set')
+
+ if encoder.datetime_as_timestamp:
+ from calendar import timegm
+ timestamp = timegm(value.utctimetuple()) + value.microsecond // 1000000
+ encode_semantic(encoder, CBORTag(1, timestamp))
+ else:
+ datestring = as_unicode(value.isoformat().replace('+00:00', 'Z'))
+ encode_semantic(encoder, CBORTag(0, datestring))
+
+
+def encode_date(encoder, value):
+ value = datetime.combine(value, time()).replace(tzinfo=timezone.utc)
+ encode_datetime(encoder, value)
+
+
+def encode_decimal(encoder, value):
+ # Semantic tag 4
+ if value.is_nan():
+ encoder.write(b'\xf9\x7e\x00')
+ elif value.is_infinite():
+ encoder.write(b'\xf9\x7c\x00' if value > 0 else b'\xf9\xfc\x00')
+ else:
+ dt = value.as_tuple()
+ mantissa = sum(d * 10 ** i for i, d in enumerate(reversed(dt.digits)))
+ with encoder.disable_value_sharing():
+ encode_semantic(encoder, CBORTag(4, [dt.exponent, mantissa]))
+
+
+def encode_rational(encoder, value):
+ # Semantic tag 30
+ with encoder.disable_value_sharing():
+ encode_semantic(encoder, CBORTag(30, [value.numerator, value.denominator]))
+
+
+def encode_regexp(encoder, value):
+ # Semantic tag 35
+ encode_semantic(encoder, CBORTag(35, as_unicode(value.pattern)))
+
+
+def encode_mime(encoder, value):
+ # Semantic tag 36
+ encode_semantic(encoder, CBORTag(36, as_unicode(value.as_string())))
+
+
+def encode_uuid(encoder, value):
+ # Semantic tag 37
+ encode_semantic(encoder, CBORTag(37, value.bytes))
+
+
+#
+# Special encoders (major tag 7)
+#
+
+def encode_simple_value(encoder, value):
+ if value.value < 20:
+ encoder.write(struct.pack('>B', 0xe0 | value.value))
+ else:
+ encoder.write(struct.pack('>BB', 0xf8, value.value))
+
+
+def encode_float(encoder, value):
+ # Handle special values efficiently
+ import math
+ if math.isnan(value):
+ encoder.write(b'\xf9\x7e\x00')
+ elif math.isinf(value):
+ encoder.write(b'\xf9\x7c\x00' if value > 0 else b'\xf9\xfc\x00')
+ else:
+ encoder.write(struct.pack('>Bd', 0xfb, value))
+
+
+def encode_boolean(encoder, value):
+ encoder.write(b'\xf5' if value else b'\xf4')
+
+
+def encode_none(encoder, value):
+ encoder.write(b'\xf6')
+
+
+def encode_undefined(encoder, value):
+ encoder.write(b'\xf7')
+
+
+default_encoders = OrderedDict([
+ (bytes, encode_bytestring),
+ (bytearray, encode_bytearray),
+ (unicode, encode_string),
+ (int, encode_int),
+ (long, encode_int),
+ (float, encode_float),
+ (('decimal', 'Decimal'), encode_decimal),
+ (bool, encode_boolean),
+ (type(None), encode_none),
+ (tuple, encode_array),
+ (list, encode_array),
+ (dict, encode_map),
+ (defaultdict, encode_map),
+ (OrderedDict, encode_map),
+ (type(undefined), encode_undefined),
+ (datetime, encode_datetime),
+ (date, encode_date),
+ (type(re.compile('')), encode_regexp),
+ (('fractions', 'Fraction'), encode_rational),
+ (('email.message', 'Message'), encode_mime),
+ (('uuid', 'UUID'), encode_uuid),
+ (CBORSimpleValue, encode_simple_value),
+ (CBORTag, encode_semantic)
+])
+
+
+class CBOREncoder(object):
+ """
+ Serializes objects to a byte stream using Concise Binary Object Representation.
+
+ :param datetime_as_timestamp: set to ``True`` to serialize datetimes as UNIX timestamps
+ (this makes datetimes more concise on the wire but loses the time zone information)
+ :param datetime.tzinfo timezone: the default timezone to use for serializing naive datetimes
+ :param value_sharing: if ``True``, allows more efficient serializing of repeated values and,
+ more importantly, cyclic data structures, at the cost of extra line overhead
+ :param default: a callable that is called by the encoder with three arguments
+ (encoder, value, file object) when no suitable encoder has been found, and should use the
+ methods on the encoder to encode any objects it wants to add to the data stream
+ """
+
+ __slots__ = ('fp', 'datetime_as_timestamp', 'timezone', 'default', 'value_sharing',
+ 'json_compatible', '_shared_containers', '_encoders')
+
+ def __init__(self, fp, datetime_as_timestamp=False, timezone=None, value_sharing=False,
+ default=None):
+ self.fp = fp
+ self.datetime_as_timestamp = datetime_as_timestamp
+ self.timezone = timezone
+ self.value_sharing = value_sharing
+ self.default = default
+ self._shared_containers = {} # indexes used for value sharing
+ self._encoders = default_encoders.copy()
+
+ def _find_encoder(self, obj_type):
+ from sys import modules
+
+ for type_, enc in list(iteritems(self._encoders)):
+ if type(type_) is tuple:
+ modname, typename = type_
+ imported_type = getattr(modules.get(modname), typename, None)
+ if imported_type is not None:
+ del self._encoders[type_]
+ self._encoders[imported_type] = enc
+ type_ = imported_type
+ else: # pragma: nocover
+ continue
+
+ if issubclass(obj_type, type_):
+ self._encoders[obj_type] = enc
+ return enc
+
+ return None
+
+ @contextmanager
+ def disable_value_sharing(self):
+ """Disable value sharing in the encoder for the duration of the context block."""
+ old_value_sharing = self.value_sharing
+ self.value_sharing = False
+ yield
+ self.value_sharing = old_value_sharing
+
+ def write(self, data):
+ """
+ Write bytes to the data stream.
+
+ :param data: the bytes to write
+
+ """
+ self.fp.write(data)
+
+ def encode(self, obj):
+ """
+ Encode the given object using CBOR.
+
+ :param obj: the object to encode
+
+ """
+ obj_type = obj.__class__
+ encoder = self._encoders.get(obj_type) or self._find_encoder(obj_type) or self.default
+ if not encoder:
+ raise CBOREncodeError('cannot serialize type %s' % obj_type.__name__)
+
+ encoder(self, obj)
+
+ def encode_to_bytes(self, obj):
+ """
+ Encode the given object to a byte buffer and return its value as bytes.
+
+ This method was intended to be used from the ``default`` hook when an object needs to be
+ encoded separately from the rest but while still taking advantage of the shared value
+ registry.
+
+ """
+ old_fp = self.fp
+ self.fp = fp = BytesIO()
+ self.encode(obj)
+ self.fp = old_fp
+ return fp.getvalue()
+
+
+def dumps(obj, **kwargs):
+ """
+ Serialize an object to a bytestring.
+
+ :param obj: the object to serialize
+ :param kwargs: keyword arguments passed to :class:`~.CBOREncoder`
+ :return: the serialized output
+ :rtype: bytes
+
+ """
+ fp = BytesIO()
+ dump(obj, fp, **kwargs)
+ return fp.getvalue()
+
+
+def dump(obj, fp, **kwargs):
+ """
+ Serialize an object to a file.
+
+ :param obj: the object to serialize
+ :param fp: a file-like object
+ :param kwargs: keyword arguments passed to :class:`~.CBOREncoder`
+
+ """
+ CBOREncoder(fp, **kwargs).encode(obj)
diff --git a/third_party/python/cbor2/cbor2/types.py b/third_party/python/cbor2/cbor2/types.py
new file mode 100644
index 0000000000..1d3afb0601
--- /dev/null
+++ b/third_party/python/cbor2/cbor2/types.py
@@ -0,0 +1,55 @@
+class CBORTag(object):
+ """
+ Represents a CBOR semantic tag.
+
+ :param int tag: tag number
+ :param value: encapsulated value (any object)
+ """
+
+ __slots__ = 'tag', 'value'
+
+ def __init__(self, tag, value):
+ self.tag = tag
+ self.value = value
+
+ def __eq__(self, other):
+ if isinstance(other, CBORTag):
+ return self.tag == other.tag and self.value == other.value
+ return NotImplemented
+
+ def __repr__(self):
+ return 'CBORTag({self.tag}, {self.value!r})'.format(self=self)
+
+
+class CBORSimpleValue(object):
+ """
+ Represents a CBOR "simple value".
+
+ :param int value: the value (0-255)
+ """
+
+ __slots__ = 'value'
+
+ def __init__(self, value):
+ if value < 0 or value > 255:
+ raise TypeError('simple value too big')
+ self.value = value
+
+ def __eq__(self, other):
+ if isinstance(other, CBORSimpleValue):
+ return self.value == other.value
+ elif isinstance(other, int):
+ return self.value == other
+ return NotImplemented
+
+ def __repr__(self):
+ return 'CBORSimpleValue({self.value})'.format(self=self)
+
+
+class UndefinedType(object):
+ __slots__ = ()
+
+
+#: Represents the "undefined" value.
+undefined = UndefinedType()
+break_marker = object()
diff --git a/third_party/python/certifi/certifi-2022.12.7.dist-info/LICENSE b/third_party/python/certifi/certifi-2022.12.7.dist-info/LICENSE
new file mode 100644
index 0000000000..0a64774eab
--- /dev/null
+++ b/third_party/python/certifi/certifi-2022.12.7.dist-info/LICENSE
@@ -0,0 +1,21 @@
+This package contains a modified version of ca-bundle.crt:
+
+ca-bundle.crt -- Bundle of CA Root Certificates
+
+Certificate data from Mozilla as of: Thu Nov 3 19:04:19 2011#
+This is a bundle of X.509 certificates of public Certificate Authorities
+(CA). These were automatically extracted from Mozilla's root certificates
+file (certdata.txt). This file can be found in the mozilla source tree:
+https://hg.mozilla.org/mozilla-central/file/tip/security/nss/lib/ckfw/builtins/certdata.txt
+It contains the certificates in PEM format and therefore
+can be directly used with curl / libcurl / php_curl, or with
+an Apache+mod_ssl webserver for SSL client authentication.
+Just configure this file as the SSLCACertificateFile.#
+
+***** BEGIN LICENSE BLOCK *****
+This Source Code Form is subject to the terms of the Mozilla Public License,
+v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain
+one at http://mozilla.org/MPL/2.0/.
+
+***** END LICENSE BLOCK *****
+@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $
diff --git a/third_party/python/certifi/certifi-2022.12.7.dist-info/METADATA b/third_party/python/certifi/certifi-2022.12.7.dist-info/METADATA
new file mode 100644
index 0000000000..aeb1991aee
--- /dev/null
+++ b/third_party/python/certifi/certifi-2022.12.7.dist-info/METADATA
@@ -0,0 +1,83 @@
+Metadata-Version: 2.1
+Name: certifi
+Version: 2022.12.7
+Summary: Python package for providing Mozilla's CA Bundle.
+Home-page: https://github.com/certifi/python-certifi
+Author: Kenneth Reitz
+Author-email: me@kennethreitz.com
+License: MPL-2.0
+Project-URL: Source, https://github.com/certifi/python-certifi
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
+Classifier: Natural Language :: English
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Requires-Python: >=3.6
+License-File: LICENSE
+
+Certifi: Python SSL Certificates
+================================
+
+Certifi provides Mozilla's carefully curated collection of Root Certificates for
+validating the trustworthiness of SSL certificates while verifying the identity
+of TLS hosts. It has been extracted from the `Requests`_ project.
+
+Installation
+------------
+
+``certifi`` is available on PyPI. Simply install it with ``pip``::
+
+ $ pip install certifi
+
+Usage
+-----
+
+To reference the installed certificate authority (CA) bundle, you can use the
+built-in function::
+
+ >>> import certifi
+
+ >>> certifi.where()
+ '/usr/local/lib/python3.7/site-packages/certifi/cacert.pem'
+
+Or from the command line::
+
+ $ python -m certifi
+ /usr/local/lib/python3.7/site-packages/certifi/cacert.pem
+
+Enjoy!
+
+1024-bit Root Certificates
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Browsers and certificate authorities have concluded that 1024-bit keys are
+unacceptably weak for certificates, particularly root certificates. For this
+reason, Mozilla has removed any weak (i.e. 1024-bit key) certificate from its
+bundle, replacing it with an equivalent strong (i.e. 2048-bit or greater key)
+certificate from the same CA. Because Mozilla removed these certificates from
+its bundle, ``certifi`` removed them as well.
+
+In previous versions, ``certifi`` provided the ``certifi.old_where()`` function
+to intentionally re-add the 1024-bit roots back into your bundle. This was not
+recommended in production and therefore was removed at the end of 2018.
+
+.. _`Requests`: https://requests.readthedocs.io/en/master/
+
+Addition/Removal of Certificates
+--------------------------------
+
+Certifi does not support any addition/removal or other modification of the
+CA trust store content. This project is intended to provide a reliable and
+highly portable root of trust to python deployments. Look to upstream projects
+for methods to use alternate trust.
+
+
diff --git a/third_party/python/certifi/certifi-2022.12.7.dist-info/RECORD b/third_party/python/certifi/certifi-2022.12.7.dist-info/RECORD
new file mode 100644
index 0000000000..444a05eba2
--- /dev/null
+++ b/third_party/python/certifi/certifi-2022.12.7.dist-info/RECORD
@@ -0,0 +1,10 @@
+certifi/__init__.py,sha256=bK_nm9bLJzNvWZc2oZdiTwg2KWD4HSPBWGaM0zUDvMw,94
+certifi/__main__.py,sha256=xBBoj905TUWBLRGANOcf7oi6e-3dMP4cEoG9OyMs11g,243
+certifi/cacert.pem,sha256=LBHDzgj_xA05AxnHK8ENT5COnGNElNZe0svFUHMf1SQ,275233
+certifi/core.py,sha256=lhewz0zFb2b4ULsQurElmloYwQoecjWzPqY67P8T7iM,4219
+certifi/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+certifi-2022.12.7.dist-info/LICENSE,sha256=oC9sY4-fuE0G93ZMOrCF2K9-2luTwWbaVDEkeQd8b7A,1052
+certifi-2022.12.7.dist-info/METADATA,sha256=chFpcxKhCPEQ3d8-Vz36zr2Micf1eQhKkFFk7_JvJNo,2911
+certifi-2022.12.7.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92
+certifi-2022.12.7.dist-info/top_level.txt,sha256=KMu4vUCfsjLrkPbSNdgdekS-pVJzBAJFO__nI8NF6-U,8
+certifi-2022.12.7.dist-info/RECORD,,
diff --git a/third_party/python/certifi/certifi-2022.12.7.dist-info/WHEEL b/third_party/python/certifi/certifi-2022.12.7.dist-info/WHEEL
new file mode 100644
index 0000000000..5bad85fdc1
--- /dev/null
+++ b/third_party/python/certifi/certifi-2022.12.7.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/third_party/python/certifi/certifi-2022.12.7.dist-info/top_level.txt b/third_party/python/certifi/certifi-2022.12.7.dist-info/top_level.txt
new file mode 100644
index 0000000000..963eac530b
--- /dev/null
+++ b/third_party/python/certifi/certifi-2022.12.7.dist-info/top_level.txt
@@ -0,0 +1 @@
+certifi
diff --git a/third_party/python/certifi/certifi/__init__.py b/third_party/python/certifi/certifi/__init__.py
new file mode 100644
index 0000000000..a3546f1255
--- /dev/null
+++ b/third_party/python/certifi/certifi/__init__.py
@@ -0,0 +1,4 @@
+from .core import contents, where
+
+__all__ = ["contents", "where"]
+__version__ = "2022.12.07"
diff --git a/third_party/python/certifi/certifi/__main__.py b/third_party/python/certifi/certifi/__main__.py
new file mode 100644
index 0000000000..8945b5da85
--- /dev/null
+++ b/third_party/python/certifi/certifi/__main__.py
@@ -0,0 +1,12 @@
+import argparse
+
+from certifi import contents, where
+
+parser = argparse.ArgumentParser()
+parser.add_argument("-c", "--contents", action="store_true")
+args = parser.parse_args()
+
+if args.contents:
+ print(contents())
+else:
+ print(where())
diff --git a/third_party/python/certifi/certifi/cacert.pem b/third_party/python/certifi/certifi/cacert.pem
new file mode 100644
index 0000000000..df9e4e3c75
--- /dev/null
+++ b/third_party/python/certifi/certifi/cacert.pem
@@ -0,0 +1,4527 @@
+
+# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
+# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
+# Label: "GlobalSign Root CA"
+# Serial: 4835703278459707669005204
+# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a
+# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c
+# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99
+-----BEGIN CERTIFICATE-----
+MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG
+A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv
+b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw
+MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i
+YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT
+aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ
+jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp
+xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp
+1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG
+snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ
+U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8
+9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E
+BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B
+AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz
+yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE
+38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP
+AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad
+DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME
+HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
+# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
+# Label: "Entrust.net Premium 2048 Secure Server CA"
+# Serial: 946069240
+# MD5 Fingerprint: ee:29:31:bc:32:7e:9a:e6:e8:b5:f7:51:b4:34:71:90
+# SHA1 Fingerprint: 50:30:06:09:1d:97:d4:f5:ae:39:f7:cb:e7:92:7d:7d:65:2d:34:31
+# SHA256 Fingerprint: 6d:c4:71:72:e0:1c:bc:b0:bf:62:58:0d:89:5f:e2:b8:ac:9a:d4:f8:73:80:1e:0c:10:b9:c8:37:d2:1e:b1:77
+-----BEGIN CERTIFICATE-----
+MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML
+RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp
+bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5
+IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3
+MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3
+LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp
+YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG
+A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq
+K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe
+sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX
+MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT
+XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/
+HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH
+4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub
+j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo
+U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf
+zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b
+u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+
+bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er
+fF6adulZkMV8gzURZVE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust
+# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust
+# Label: "Baltimore CyberTrust Root"
+# Serial: 33554617
+# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4
+# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74
+# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ
+RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD
+VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX
+DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y
+ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy
+VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr
+mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr
+IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK
+mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu
+XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy
+dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye
+jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1
+BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3
+DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92
+9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx
+jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0
+Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz
+ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS
+R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
+# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
+# Label: "Entrust Root Certification Authority"
+# Serial: 1164660820
+# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4
+# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9
+# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c
+-----BEGIN CERTIFICATE-----
+MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC
+VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0
+Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW
+KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl
+cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw
+NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw
+NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy
+ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV
+BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo
+Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4
+4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9
+KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI
+rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi
+94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB
+sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi
+gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo
+kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE
+vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA
+A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t
+O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua
+AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP
+9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/
+eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m
+0vdXcDazv/wor3ElhVsT/h5/WrQ8
+-----END CERTIFICATE-----
+
+# Issuer: CN=AAA Certificate Services O=Comodo CA Limited
+# Subject: CN=AAA Certificate Services O=Comodo CA Limited
+# Label: "Comodo AAA Services root"
+# Serial: 1
+# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0
+# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49
+# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4
+-----BEGIN CERTIFICATE-----
+MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb
+MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow
+GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj
+YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL
+MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
+BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM
+GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua
+BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe
+3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4
+YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR
+rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm
+ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU
+oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF
+MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v
+QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t
+b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF
+AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q
+GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz
+Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2
+G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi
+l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3
+smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 2"
+# Serial: 1289
+# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b
+# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7
+# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86
+-----BEGIN CERTIFICATE-----
+MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x
+GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv
+b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV
+BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W
+YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa
+GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg
+Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J
+WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB
+rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp
++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1
+ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i
+Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz
+PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og
+/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH
+oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI
+yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud
+EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2
+A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL
+MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT
+ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f
+BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn
+g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl
+fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K
+WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha
+B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc
+hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR
+TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD
+mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z
+ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y
+4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza
+8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 3"
+# Serial: 1478
+# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf
+# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85
+# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35
+-----BEGIN CERTIFICATE-----
+MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x
+GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv
+b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV
+BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W
+YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM
+V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB
+4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr
+H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd
+8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv
+vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT
+mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe
+btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc
+T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt
+WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ
+c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A
+4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD
+VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG
+CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0
+aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0
+aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu
+dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw
+czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G
+A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC
+TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg
+Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0
+7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem
+d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd
++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B
+4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN
+t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x
+DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57
+k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s
+zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j
+Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT
+mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK
+4SVhM7JZG+Ju1zdXtg2pEto=
+-----END CERTIFICATE-----
+
+# Issuer: O=SECOM Trust.net OU=Security Communication RootCA1
+# Subject: O=SECOM Trust.net OU=Security Communication RootCA1
+# Label: "Security Communication Root CA"
+# Serial: 0
+# MD5 Fingerprint: f1:bc:63:6a:54:e0:b5:27:f5:cd:e7:1a:e3:4d:6e:4a
+# SHA1 Fingerprint: 36:b1:2b:49:f9:81:9e:d7:4c:9e:bc:38:0f:c6:56:8f:5d:ac:b2:f7
+# SHA256 Fingerprint: e7:5e:72:ed:9f:56:0e:ec:6e:b4:80:00:73:a4:3f:c3:ad:19:19:5a:39:22:82:01:78:95:97:4a:99:02:6b:6c
+-----BEGIN CERTIFICATE-----
+MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY
+MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t
+dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5
+WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD
+VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8
+9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ
+DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9
+Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N
+QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ
+xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G
+A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T
+AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG
+kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr
+Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5
+Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU
+JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot
+RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
+# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
+# Label: "XRamp Global CA Root"
+# Serial: 107108908803651509692980124233745014957
+# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1
+# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6
+# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2
+-----BEGIN CERTIFICATE-----
+MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB
+gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk
+MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY
+UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx
+NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3
+dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy
+dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB
+dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6
+38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP
+KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q
+DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4
+qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa
+JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi
+PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P
+BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs
+jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0
+eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD
+ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR
+vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt
+qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa
+IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy
+i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ
+O+7ETPTsJ3xCwnR8gooJybQDJbw=
+-----END CERTIFICATE-----
+
+# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority
+# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority
+# Label: "Go Daddy Class 2 CA"
+# Serial: 0
+# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67
+# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4
+# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4
+-----BEGIN CERTIFICATE-----
+MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh
+MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE
+YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3
+MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo
+ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg
+MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN
+ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA
+PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w
+wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi
+EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY
+avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+
+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE
+sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h
+/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5
+IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD
+ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy
+OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P
+TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ
+HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER
+dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf
+ReYNnyicsbkqWletNw+vHX/bvZ8=
+-----END CERTIFICATE-----
+
+# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority
+# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority
+# Label: "Starfield Class 2 CA"
+# Serial: 0
+# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24
+# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a
+# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58
+-----BEGIN CERTIFICATE-----
+MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl
+MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp
+U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw
+NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE
+ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp
+ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3
+DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf
+8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN
++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0
+X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa
+K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA
+1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G
+A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR
+zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0
+YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD
+bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w
+DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3
+L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D
+eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl
+xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp
+VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY
+WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q=
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root CA"
+# Serial: 17154717934120587862167794914071425081
+# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72
+# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43
+# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c
+-----BEGIN CERTIFICATE-----
+MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
+b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
+cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c
+JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP
+mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+
+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4
+VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/
+AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB
+AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
+BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun
+pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC
+dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf
+fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm
+NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx
+H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe
++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root CA"
+# Serial: 10944719598952040374951832963794454346
+# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e
+# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36
+# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61
+-----BEGIN CERTIFICATE-----
+MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD
+QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
+b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB
+CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97
+nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt
+43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P
+T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4
+gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO
+BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR
+TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw
+DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr
+hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg
+06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF
+PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls
+YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk
+CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4=
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert High Assurance EV Root CA"
+# Serial: 3553400076410547919724730734378100087
+# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a
+# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25
+# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j
+ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL
+MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3
+LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug
+RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm
++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW
+PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM
+xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB
+Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3
+hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg
+EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA
+FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec
+nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z
+eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF
+hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2
+Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe
+vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep
++OkuE6N36B9K
+-----END CERTIFICATE-----
+
+# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG
+# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG
+# Label: "SwissSign Gold CA - G2"
+# Serial: 13492815561806991280
+# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93
+# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61
+# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95
+-----BEGIN CERTIFICATE-----
+MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
+BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln
+biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF
+MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT
+d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC
+CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8
+76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+
+bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c
+6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE
+emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd
+MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt
+MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y
+MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y
+FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi
+aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM
+gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB
+qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7
+lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn
+8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov
+L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6
+45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO
+UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5
+O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC
+bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv
+GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a
+77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC
+hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3
+92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp
+Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w
+ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt
+Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ
+-----END CERTIFICATE-----
+
+# Issuer: CN=SwissSign Silver CA - G2 O=SwissSign AG
+# Subject: CN=SwissSign Silver CA - G2 O=SwissSign AG
+# Label: "SwissSign Silver CA - G2"
+# Serial: 5700383053117599563
+# MD5 Fingerprint: e0:06:a1:c9:7d:cf:c9:fc:0d:c0:56:75:96:d8:62:13
+# SHA1 Fingerprint: 9b:aa:e5:9f:56:ee:21:cb:43:5a:be:25:93:df:a7:f0:40:d1:1d:cb
+# SHA256 Fingerprint: be:6c:4d:a2:bb:b9:ba:59:b6:f3:93:97:68:37:42:46:c3:c0:05:99:3f:a9:8f:02:0d:1d:ed:be:d4:8a:81:d5
+-----BEGIN CERTIFICATE-----
+MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE
+BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu
+IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow
+RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY
+U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A
+MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv
+Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br
+YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF
+nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH
+6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt
+eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/
+c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ
+MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH
+HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf
+jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6
+5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB
+rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
+F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c
+wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0
+cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB
+AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp
+WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9
+xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ
+2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ
+IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8
+aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X
+em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR
+dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/
+OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+
+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy
+tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u
+-----END CERTIFICATE-----
+
+# Issuer: CN=SecureTrust CA O=SecureTrust Corporation
+# Subject: CN=SecureTrust CA O=SecureTrust Corporation
+# Label: "SecureTrust CA"
+# Serial: 17199774589125277788362757014266862032
+# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1
+# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11
+# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73
+-----BEGIN CERTIFICATE-----
+MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI
+MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
+FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz
+MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv
+cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz
+Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO
+0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao
+wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj
+7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS
+8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT
+BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg
+JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC
+NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3
+6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/
+3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm
+D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS
+CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR
+3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Secure Global CA O=SecureTrust Corporation
+# Subject: CN=Secure Global CA O=SecureTrust Corporation
+# Label: "Secure Global CA"
+# Serial: 9751836167731051554232119481456978597
+# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de
+# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b
+# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69
+-----BEGIN CERTIFICATE-----
+MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK
+MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
+GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx
+MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg
+Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ
+iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa
+/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ
+jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI
+HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7
+sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w
+gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw
+KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG
+AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L
+URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO
+H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm
+I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY
+iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc
+f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO Certification Authority O=COMODO CA Limited
+# Label: "COMODO Certification Authority"
+# Serial: 104350513648249232941998508985834464573
+# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75
+# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b
+# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66
+-----BEGIN CERTIFICATE-----
+MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB
+gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV
+BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw
+MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl
+YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P
+RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0
+aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3
+UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI
+2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8
+Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp
++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+
+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O
+nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW
+/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g
+PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u
+QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY
+SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv
+IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/
+RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4
+zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd
+BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB
+ZQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited
+# Label: "COMODO ECC Certification Authority"
+# Serial: 41578283867086692638256921589707938090
+# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23
+# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11
+# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7
+-----BEGIN CERTIFICATE-----
+MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL
+MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
+BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT
+IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw
+MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy
+ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N
+T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv
+biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR
+FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J
+cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW
+BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm
+fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv
+GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certigna O=Dhimyotis
+# Subject: CN=Certigna O=Dhimyotis
+# Label: "Certigna"
+# Serial: 18364802974209362175
+# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff
+# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97
+# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d
+-----BEGIN CERTIFICATE-----
+MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV
+BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X
+DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ
+BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4
+QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny
+gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw
+zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q
+130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2
+JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw
+DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw
+ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT
+AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj
+AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG
+9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h
+bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc
+fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu
+HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w
+t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw
+WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg==
+-----END CERTIFICATE-----
+
+# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority
+# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority
+# Label: "ePKI Root Certification Authority"
+# Serial: 28956088682735189655030529057352760477
+# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3
+# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0
+# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5
+-----BEGIN CERTIFICATE-----
+MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe
+MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0
+ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
+Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw
+IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL
+SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH
+SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh
+ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X
+DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1
+TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ
+fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA
+sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU
+WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS
+nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH
+dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip
+NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC
+AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF
+MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH
+ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB
+uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl
+PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP
+JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/
+gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2
+j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6
+5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB
+o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS
+/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z
+Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE
+W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D
+hNQ+IIX3Sj0rnP0qCglN6oH4EZw=
+-----END CERTIFICATE-----
+
+# Issuer: O=certSIGN OU=certSIGN ROOT CA
+# Subject: O=certSIGN OU=certSIGN ROOT CA
+# Label: "certSIGN ROOT CA"
+# Serial: 35210227249154
+# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17
+# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b
+# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb
+-----BEGIN CERTIFICATE-----
+MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT
+AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD
+QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP
+MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do
+0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ
+UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d
+RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ
+OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv
+JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C
+AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O
+BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ
+LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY
+MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ
+44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I
+Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw
+i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN
+9u6wWk5JRFRYX0KD
+-----END CERTIFICATE-----
+
+# Issuer: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services)
+# Subject: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services)
+# Label: "NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny"
+# Serial: 80544274841616
+# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88
+# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91
+# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98
+-----BEGIN CERTIFICATE-----
+MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG
+EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3
+MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl
+cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR
+dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB
+pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM
+b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm
+aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz
+IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT
+lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz
+AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5
+VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG
+ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2
+BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG
+AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M
+U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh
+bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C
++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC
+bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F
+uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2
+XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hongkong Post Root CA 1 O=Hongkong Post
+# Subject: CN=Hongkong Post Root CA 1 O=Hongkong Post
+# Label: "Hongkong Post Root CA 1"
+# Serial: 1000
+# MD5 Fingerprint: a8:0d:6f:39:78:b9:43:6d:77:42:6d:98:5a:cc:23:ca
+# SHA1 Fingerprint: d6:da:a8:20:8d:09:d2:15:4d:24:b5:2f:cb:34:6e:b2:58:b2:8a:58
+# SHA256 Fingerprint: f9:e6:7d:33:6c:51:00:2a:c0:54:c6:32:02:2d:66:dd:a2:e7:e3:ff:f1:0a:d0:61:ed:31:d8:bb:b4:10:cf:b2
+-----BEGIN CERTIFICATE-----
+MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx
+FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg
+Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG
+A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr
+b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ
+jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn
+PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh
+ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9
+nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h
+q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED
+MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC
+mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3
+7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB
+oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs
+EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO
+fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi
+AmvZWg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=SecureSign RootCA11 O=Japan Certification Services, Inc.
+# Subject: CN=SecureSign RootCA11 O=Japan Certification Services, Inc.
+# Label: "SecureSign RootCA11"
+# Serial: 1
+# MD5 Fingerprint: b7:52:74:e2:92:b4:80:93:f2:75:e4:cc:d7:f2:ea:26
+# SHA1 Fingerprint: 3b:c4:9f:48:f8:f3:73:a0:9c:1e:bd:f8:5b:b1:c3:65:c7:d8:11:b3
+# SHA256 Fingerprint: bf:0f:ee:fb:9e:3a:58:1a:d5:f9:e9:db:75:89:98:57:43:d2:61:08:5c:4d:31:4f:6f:5d:72:59:aa:42:16:12
+-----BEGIN CERTIFICATE-----
+MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDEr
+MCkGA1UEChMiSmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoG
+A1UEAxMTU2VjdXJlU2lnbiBSb290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0
+MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSswKQYDVQQKEyJKYXBhbiBDZXJ0aWZp
+Y2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1cmVTaWduIFJvb3RD
+QTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvLTJsz
+i1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8
+h9uuywGOwvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOV
+MdrAG/LuYpmGYz+/3ZMqg6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9
+UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rPO7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni
+8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitAbpSACW22s293bzUIUPsC
+h8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZXt94wDgYD
+VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB
+AKChOBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xm
+KbabfSVSSUOrTC4rbnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQ
+X5Ucv+2rIrVls4W6ng+4reV6G4pQOh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWr
+QbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01y8hSyn+B/tlr0/cR7SXf+Of5
+pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN
+QSdJQO7e5iNEOdyhIta6A/I=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd.
+# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd.
+# Label: "Microsec e-Szigno Root CA 2009"
+# Serial: 14014712776195784473
+# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1
+# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e
+# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78
+-----BEGIN CERTIFICATE-----
+MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD
+VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0
+ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G
+CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y
+OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx
+FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp
+Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o
+dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP
+kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc
+cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U
+fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7
+N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC
+xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1
++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G
+A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM
+Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG
+SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h
+mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk
+ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775
+tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c
+2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t
+HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3
+# Label: "GlobalSign Root CA - R3"
+# Serial: 4835703278459759426209954
+# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28
+# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad
+# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b
+-----BEGIN CERTIFICATE-----
+MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G
+A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp
+Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4
+MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG
+A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8
+RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT
+gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm
+KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd
+QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ
+XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw
+DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o
+LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU
+RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp
+jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK
+6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX
+mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs
+Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH
+WD9f
+-----END CERTIFICATE-----
+
+# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
+# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
+# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068"
+# Serial: 6047274297262753887
+# MD5 Fingerprint: 73:3a:74:7a:ec:bb:a3:96:a6:c2:e4:e2:c8:9b:c0:c3
+# SHA1 Fingerprint: ae:c5:fb:3f:c8:e1:bf:c4:e5:4f:03:07:5a:9a:e8:00:b7:f7:b6:fa
+# SHA256 Fingerprint: 04:04:80:28:bf:1f:28:64:d4:8f:9a:d4:d8:32:94:36:6a:82:88:56:55:3f:3b:14:30:3f:90:14:7f:5d:40:ef
+-----BEGIN CERTIFICATE-----
+MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE
+BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h
+cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy
+MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg
+Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9
+thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM
+cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG
+L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i
+NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h
+X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b
+m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy
+Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja
+EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T
+KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF
+6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh
+OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD
+VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD
+VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp
+cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv
+ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl
+AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF
+661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9
+am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1
+ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481
+PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS
+3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k
+SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF
+3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM
+ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g
+StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz
+Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB
+jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V
+-----END CERTIFICATE-----
+
+# Issuer: CN=Izenpe.com O=IZENPE S.A.
+# Subject: CN=Izenpe.com O=IZENPE S.A.
+# Label: "Izenpe.com"
+# Serial: 917563065490389241595536686991402621
+# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73
+# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19
+# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f
+-----BEGIN CERTIFICATE-----
+MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4
+MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6
+ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD
+VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j
+b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq
+scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO
+xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H
+LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX
+uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD
+yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+
+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q
+rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN
+BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L
+hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB
+QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+
+HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu
+Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg
+QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB
+BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx
+MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA
+A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb
+laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56
+awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo
+JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw
+LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT
+VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk
+LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb
+UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/
+QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+
+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls
+QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
+# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
+# Label: "Go Daddy Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01
+# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b
+# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT
+EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp
+ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz
+NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH
+EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE
+AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD
+E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH
+/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy
+DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh
+GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR
+tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA
+AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
+FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX
+WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu
+9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr
+gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo
+2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO
+LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI
+4uJEvlz36hz1
+-----END CERTIFICATE-----
+
+# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Label: "Starfield Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96
+# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e
+# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5
+-----BEGIN CERTIFICATE-----
+MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs
+ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw
+MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6
+b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj
+aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp
+Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg
+nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1
+HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N
+Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN
+dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0
+HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G
+CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU
+sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3
+4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg
+8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K
+pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1
+mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0
+-----END CERTIFICATE-----
+
+# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Label: "Starfield Services Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2
+# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f
+# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5
+-----BEGIN CERTIFICATE-----
+MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs
+ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5
+MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD
+VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy
+ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy
+dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p
+OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2
+8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K
+Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe
+hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk
+6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw
+DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q
+AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI
+bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB
+ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z
+qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd
+iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn
+0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN
+sSi6
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Commercial O=AffirmTrust
+# Subject: CN=AffirmTrust Commercial O=AffirmTrust
+# Label: "AffirmTrust Commercial"
+# Serial: 8608355977964138876
+# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7
+# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7
+# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP
+Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr
+ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL
+MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1
+yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr
+VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/
+nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG
+XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj
+vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt
+Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g
+N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC
+nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Networking O=AffirmTrust
+# Subject: CN=AffirmTrust Networking O=AffirmTrust
+# Label: "AffirmTrust Networking"
+# Serial: 8957382827206547757
+# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f
+# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f
+# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y
+YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua
+kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL
+QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp
+6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG
+yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i
+QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO
+tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu
+QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ
+Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u
+olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48
+x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Premium O=AffirmTrust
+# Subject: CN=AffirmTrust Premium O=AffirmTrust
+# Label: "AffirmTrust Premium"
+# Serial: 7893706540734352110
+# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57
+# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27
+# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a
+-----BEGIN CERTIFICATE-----
+MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz
+dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG
+A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U
+cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf
+qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ
+JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ
++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS
+s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5
+HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7
+70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG
+V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S
+qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S
+5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia
+C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX
+OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE
+FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
+BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2
+KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg
+Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B
+8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ
+MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc
+0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ
+u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF
+u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH
+YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8
+GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO
+RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e
+KeC2uAloGRwYQw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust
+# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust
+# Label: "AffirmTrust Premium ECC"
+# Serial: 8401224907861490260
+# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d
+# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb
+# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23
+-----BEGIN CERTIFICATE-----
+MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC
+VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ
+cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ
+BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt
+VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D
+0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9
+ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G
+A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs
+aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I
+flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Label: "Certum Trusted Network CA"
+# Serial: 279744
+# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78
+# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e
+# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e
+-----BEGIN CERTIFICATE-----
+MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM
+MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D
+ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU
+cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3
+WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg
+Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw
+IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH
+UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM
+TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU
+BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM
+kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x
+AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV
+HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y
+sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL
+I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8
+J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY
+VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI
+03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
+# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
+# Label: "TWCA Root Certification Authority"
+# Serial: 1
+# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79
+# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48
+# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44
+-----BEGIN CERTIFICATE-----
+MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES
+MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU
+V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz
+WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO
+LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
+AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE
+AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH
+K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX
+RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z
+rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx
+3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq
+hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC
+MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls
+XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D
+lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn
+aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ
+YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw==
+-----END CERTIFICATE-----
+
+# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2
+# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2
+# Label: "Security Communication RootCA2"
+# Serial: 0
+# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43
+# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74
+# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl
+MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe
+U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX
+DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy
+dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj
+YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV
+OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr
+zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM
+VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ
+hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO
+ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw
+awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs
+OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3
+DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF
+coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc
+okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8
+t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy
+1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/
+SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03
+-----END CERTIFICATE-----
+
+# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967
+# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967
+# Label: "Actalis Authentication Root CA"
+# Serial: 6271844772424770508
+# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6
+# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac
+# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66
+-----BEGIN CERTIFICATE-----
+MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE
+BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w
+MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290
+IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC
+SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1
+ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv
+UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX
+4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9
+KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/
+gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb
+rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ
+51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F
+be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe
+KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F
+v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn
+fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7
+jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz
+ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt
+ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL
+e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70
+jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz
+WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V
+SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j
+pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX
+X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok
+fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R
+K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU
+ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU
+LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT
+LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327
+# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327
+# Label: "Buypass Class 2 Root CA"
+# Serial: 2
+# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29
+# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99
+# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48
+-----BEGIN CERTIFICATE-----
+MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd
+MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg
+Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow
+TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw
+HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB
+BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr
+6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV
+L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91
+1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx
+MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ
+QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB
+arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr
+Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi
+FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS
+P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN
+9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP
+AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz
+uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h
+9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s
+A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t
+OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo
++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7
+KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2
+DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us
+H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ
+I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7
+5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h
+3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz
+Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327
+# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327
+# Label: "Buypass Class 3 Root CA"
+# Serial: 2
+# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec
+# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57
+# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d
+-----BEGIN CERTIFICATE-----
+MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd
+MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg
+Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow
+TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw
+HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB
+BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y
+ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E
+N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9
+tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX
+0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c
+/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X
+KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY
+zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS
+O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D
+34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP
+K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3
+AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv
+Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj
+QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV
+cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS
+IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2
+HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa
+O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv
+033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u
+dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE
+kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41
+3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD
+u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq
+4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc=
+-----END CERTIFICATE-----
+
+# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Label: "T-TeleSec GlobalRoot Class 3"
+# Serial: 1
+# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef
+# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1
+# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd
+-----BEGIN CERTIFICATE-----
+MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx
+KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd
+BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl
+YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1
+OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy
+aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50
+ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN
+8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/
+RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4
+hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5
+ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM
+EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1
+A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy
+WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ
+1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30
+6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT
+91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml
+e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p
+TpPDpFQUWw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH
+# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH
+# Label: "D-TRUST Root Class 3 CA 2 2009"
+# Serial: 623603
+# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f
+# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0
+# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1
+-----BEGIN CERTIFICATE-----
+MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF
+MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD
+bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha
+ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM
+HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03
+UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42
+tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R
+ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM
+lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp
+/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G
+A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G
+A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj
+dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy
+MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl
+cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js
+L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL
+BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni
+acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0
+o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K
+zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8
+PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y
+Johw1+qRzT65ysCQblrGXnRl11z+o+I=
+-----END CERTIFICATE-----
+
+# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH
+# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH
+# Label: "D-TRUST Root Class 3 CA 2 EV 2009"
+# Serial: 623604
+# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6
+# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83
+# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81
+-----BEGIN CERTIFICATE-----
+MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF
+MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD
+bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw
+NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV
+BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn
+ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0
+3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z
+qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR
+p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8
+HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw
+ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea
+HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw
+Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh
+c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E
+RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt
+dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku
+Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp
+3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05
+nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF
+CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na
+xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX
+KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1
+-----END CERTIFICATE-----
+
+# Issuer: CN=CA Disig Root R2 O=Disig a.s.
+# Subject: CN=CA Disig Root R2 O=Disig a.s.
+# Label: "CA Disig Root R2"
+# Serial: 10572350602393338211
+# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03
+# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71
+# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03
+-----BEGIN CERTIFICATE-----
+MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV
+BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu
+MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy
+MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx
+EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw
+ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe
+NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH
+PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I
+x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe
+QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR
+yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO
+QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912
+H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ
+QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD
+i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs
+nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1
+rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud
+DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI
+hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM
+tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf
+GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb
+lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka
++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal
+TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i
+nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3
+gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr
+G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os
+zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x
+L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL
+-----END CERTIFICATE-----
+
+# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV
+# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV
+# Label: "ACCVRAIZ1"
+# Serial: 6828503384748696800
+# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02
+# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17
+# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13
+-----BEGIN CERTIFICATE-----
+MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE
+AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw
+CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ
+BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND
+VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb
+qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY
+HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo
+G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA
+lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr
+IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/
+0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH
+k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47
+4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO
+m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa
+cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl
+uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI
+KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls
+ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG
+AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2
+VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT
+VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG
+CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA
+cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA
+QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA
+7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA
+cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA
+QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA
+czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu
+aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt
+aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud
+DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF
+BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp
+D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU
+JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m
+AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD
+vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms
+tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH
+7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h
+I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA
+h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF
+d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H
+pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7
+-----END CERTIFICATE-----
+
+# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA
+# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA
+# Label: "TWCA Global Root CA"
+# Serial: 3262
+# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96
+# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65
+# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b
+-----BEGIN CERTIFICATE-----
+MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx
+EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT
+VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5
+NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT
+B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF
+10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz
+0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh
+MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH
+zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc
+46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2
+yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi
+laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP
+oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA
+BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE
+qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm
+4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
+/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL
+1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn
+LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF
+H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo
+RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+
+nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh
+15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW
+6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW
+nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j
+wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz
+aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy
+KwbQBM0=
+-----END CERTIFICATE-----
+
+# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera
+# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera
+# Label: "TeliaSonera Root CA v1"
+# Serial: 199041966741090107964904287217786801558
+# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c
+# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37
+# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89
+-----BEGIN CERTIFICATE-----
+MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw
+NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv
+b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD
+VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F
+VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1
+7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X
+Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+
+/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs
+81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm
+dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe
+Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu
+sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4
+pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs
+slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ
+arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD
+VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG
+9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl
+dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx
+0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj
+TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed
+Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7
+Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI
+OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7
+vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW
+t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn
+HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx
+SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi
+# Subject: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi
+# Label: "E-Tugra Certification Authority"
+# Serial: 7667447206703254355
+# MD5 Fingerprint: b8:a1:03:63:b0:bd:21:71:70:8a:6f:13:3a:bb:79:49
+# SHA1 Fingerprint: 51:c6:e7:08:49:06:6e:f3:92:d4:5c:a0:0d:6d:a3:62:8f:c3:52:39
+# SHA256 Fingerprint: b0:bf:d5:2b:b0:d7:d9:bd:92:bf:5d:4d:c1:3d:a2:55:c0:2c:54:2f:37:83:65:ea:89:39:11:f5:5e:55:f2:3c
+-----BEGIN CERTIFICATE-----
+MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV
+BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC
+aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV
+BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1
+Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz
+MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+
+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp
+em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN
+ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY
+B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH
+D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF
+Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo
+q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D
+k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH
+fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut
+dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM
+ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8
+zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn
+rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX
+U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6
+Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5
+XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF
+Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR
+HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY
+GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c
+77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3
++GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK
+vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6
+FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl
+yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P
+AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD
+y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d
+NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Label: "T-TeleSec GlobalRoot Class 2"
+# Serial: 1
+# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a
+# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9
+# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52
+-----BEGIN CERTIFICATE-----
+MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx
+KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd
+BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl
+YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1
+OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy
+aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50
+ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd
+AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC
+FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi
+1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq
+jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ
+wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/
+WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy
+NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC
+uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw
+IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6
+g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN
+9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP
+BSeOE6Fuwg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Atos TrustedRoot 2011 O=Atos
+# Subject: CN=Atos TrustedRoot 2011 O=Atos
+# Label: "Atos TrustedRoot 2011"
+# Serial: 6643877497813316402
+# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56
+# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21
+# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE
+AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG
+EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM
+FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC
+REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp
+Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM
+VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+
+SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ
+4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L
+cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi
+eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV
+HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG
+A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3
+DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j
+vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP
+DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc
+maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D
+lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv
+KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 1 G3"
+# Serial: 687049649626669250736271037606554624078720034195
+# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab
+# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67
+# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00
+MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV
+wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe
+rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341
+68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh
+4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp
+UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o
+abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc
+3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G
+KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt
+hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO
+Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt
+zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD
+ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC
+MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2
+cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN
+qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5
+YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv
+b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2
+8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k
+NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj
+ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp
+q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt
+nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 2 G3"
+# Serial: 390156079458959257446133169266079962026824725800
+# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06
+# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36
+# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00
+MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf
+qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW
+n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym
+c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+
+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1
+o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j
+IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq
+IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz
+8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh
+vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l
+7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG
+cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD
+ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66
+AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC
+roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga
+W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n
+lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE
++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV
+csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd
+dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg
+KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM
+HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4
+WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 3 G3"
+# Serial: 268090761170461462463995952157327242137089239581
+# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7
+# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d
+# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00
+MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR
+/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu
+FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR
+U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c
+ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR
+FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k
+A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw
+eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl
+sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp
+VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q
+A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+
+ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD
+ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px
+KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI
+FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv
+oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg
+u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP
+0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf
+3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl
+8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+
+DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN
+PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/
+ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root G2"
+# Serial: 15385348160840213938643033620894905419
+# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d
+# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f
+# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85
+-----BEGIN CERTIFICATE-----
+MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
+b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
+cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA
+n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc
+biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp
+EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA
+bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu
+YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB
+AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW
+BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI
+QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I
+0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni
+lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9
+B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv
+ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo
+IhNzbM8m9Yop5w==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root G3"
+# Serial: 15459312981008553731928384953135426796
+# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb
+# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89
+# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2
+-----BEGIN CERTIFICATE-----
+MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw
+CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
+ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg
+RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV
+UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
+Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq
+hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf
+Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q
+RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
+BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD
+AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY
+JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv
+6pZjamVFkpUBtA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root G2"
+# Serial: 4293743540046975378534879503202253541
+# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44
+# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4
+# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f
+-----BEGIN CERTIFICATE-----
+MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH
+MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
+b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI
+2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx
+1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ
+q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz
+tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ
+vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP
+BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV
+5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY
+1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4
+NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG
+Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91
+8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe
+pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl
+MrY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root G3"
+# Serial: 7089244469030293291760083333884364146
+# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca
+# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e
+# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0
+-----BEGIN CERTIFICATE-----
+MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw
+CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
+ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe
+Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw
+EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x
+IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF
+K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG
+fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO
+Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd
+BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx
+AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/
+oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8
+sycX
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Trusted Root G4"
+# Serial: 7451500558977370777930084869016614236
+# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49
+# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4
+# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88
+-----BEGIN CERTIFICATE-----
+MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg
+RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV
+UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
+Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y
+ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If
+xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV
+ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO
+DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ
+jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/
+CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi
+EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM
+fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY
+uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK
+chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t
+9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD
+ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2
+SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd
++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc
+fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa
+sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N
+cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N
+0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie
+4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI
+r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1
+/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm
+gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited
+# Label: "COMODO RSA Certification Authority"
+# Serial: 101909084537582093308941363524873193117
+# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18
+# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4
+# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34
+-----BEGIN CERTIFICATE-----
+MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB
+hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV
+BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5
+MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT
+EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR
+Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR
+6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X
+pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC
+9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV
+/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf
+Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z
++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w
+qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah
+SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC
+u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf
+Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq
+crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E
+FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB
+/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl
+wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM
+4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV
+2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna
+FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ
+CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK
+boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke
+jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL
+S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb
+QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl
+0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB
+NVOFBkpdn627G190
+-----END CERTIFICATE-----
+
+# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network
+# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network
+# Label: "USERTrust RSA Certification Authority"
+# Serial: 2645093764781058787591871645665788717
+# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5
+# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e
+# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2
+-----BEGIN CERTIFICATE-----
+MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB
+iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl
+cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV
+BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw
+MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV
+BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU
+aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy
+dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B
+3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY
+tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/
+Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2
+VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT
+79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6
+c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT
+Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l
+c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee
+UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE
+Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd
+BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G
+A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF
+Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO
+VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3
+ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs
+8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR
+iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze
+Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ
+XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/
+qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB
+VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB
+L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG
+jjxDah2nGN59PRbxYvnKkKj9
+-----END CERTIFICATE-----
+
+# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network
+# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network
+# Label: "USERTrust ECC Certification Authority"
+# Serial: 123013823720199481456569720443997572134
+# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1
+# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0
+# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a
+-----BEGIN CERTIFICATE-----
+MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL
+MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl
+eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT
+JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx
+MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT
+Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg
+VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo
+I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng
+o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G
+A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB
+zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW
+RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5
+# Label: "GlobalSign ECC Root CA - R5"
+# Serial: 32785792099990507226680698011560947931244
+# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08
+# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa
+# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24
+-----BEGIN CERTIFICATE-----
+MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk
+MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH
+bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX
+DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD
+QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu
+MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc
+8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke
+hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI
+KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg
+515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO
+xwy8p2Fp8fc74SrL+SvzZpA3
+-----END CERTIFICATE-----
+
+# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust
+# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust
+# Label: "IdenTrust Commercial Root CA 1"
+# Serial: 13298821034946342390520003877796839426
+# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7
+# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25
+# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK
+MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu
+VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw
+MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw
+JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT
+3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU
++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp
+S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1
+bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi
+T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL
+vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK
+Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK
+dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT
+c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv
+l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N
+iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD
+ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH
+6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt
+LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93
+nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3
++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK
+W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT
+AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq
+l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG
+4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ
+mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A
+7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H
+-----END CERTIFICATE-----
+
+# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust
+# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust
+# Label: "IdenTrust Public Sector Root CA 1"
+# Serial: 13298821034946342390521976156843933698
+# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba
+# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd
+# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f
+-----BEGIN CERTIFICATE-----
+MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN
+MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu
+VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN
+MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0
+MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7
+ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy
+RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS
+bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF
+/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R
+3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw
+EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy
+9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V
+GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ
+2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV
+WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD
+W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN
+AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj
+t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV
+DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9
+TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G
+lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW
+mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df
+WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5
++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ
+tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA
+GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv
+8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only
+# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only
+# Label: "Entrust Root Certification Authority - G2"
+# Serial: 1246989352
+# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2
+# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4
+# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39
+-----BEGIN CERTIFICATE-----
+MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC
+VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50
+cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs
+IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz
+dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy
+NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu
+dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt
+dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0
+aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T
+RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN
+cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW
+wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1
+U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0
+jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP
+BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN
+BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/
+jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ
+Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v
+1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R
+nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH
+VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only
+# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only
+# Label: "Entrust Root Certification Authority - EC1"
+# Serial: 51543124481930649114116133369
+# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc
+# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47
+# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5
+-----BEGIN CERTIFICATE-----
+MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG
+A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3
+d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu
+dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq
+RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy
+MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD
+VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0
+L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g
+Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD
+ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi
+A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt
+ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH
+Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O
+BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC
+R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX
+hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G
+-----END CERTIFICATE-----
+
+# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority
+# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority
+# Label: "CFCA EV ROOT"
+# Serial: 407555286
+# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30
+# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83
+# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd
+-----BEGIN CERTIFICATE-----
+MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD
+TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx
+MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j
+aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP
+T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03
+sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL
+TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5
+/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp
+7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz
+EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt
+hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP
+a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot
+aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg
+TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV
+PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv
+cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL
+tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd
+BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB
+ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT
+ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL
+jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS
+ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy
+P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19
+xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d
+Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN
+5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe
+/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z
+AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ
+5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su
+-----END CERTIFICATE-----
+
+# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
+# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
+# Label: "OISTE WISeKey Global Root GB CA"
+# Serial: 157768595616588414422159278966750757568
+# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d
+# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed
+# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6
+-----BEGIN CERTIFICATE-----
+MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt
+MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg
+Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i
+YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x
+CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG
+b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh
+bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3
+HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx
+WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX
+1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk
+u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P
+99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r
+M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw
+AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB
+BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh
+cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5
+gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO
+ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf
+aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic
+Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM=
+-----END CERTIFICATE-----
+
+# Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A.
+# Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A.
+# Label: "SZAFIR ROOT CA2"
+# Serial: 357043034767186914217277344587386743377558296292
+# MD5 Fingerprint: 11:64:c1:89:b0:24:b1:8c:b1:07:7e:89:9e:51:9e:99
+# SHA1 Fingerprint: e2:52:fa:95:3f:ed:db:24:60:bd:6e:28:f3:9c:cc:cf:5e:b3:3f:de
+# SHA256 Fingerprint: a1:33:9d:33:28:1a:0b:56:e5:57:d3:d3:2b:1c:e7:f9:36:7e:b0:94:bd:5f:a7:2a:7e:50:04:c8:de:d7:ca:fe
+-----BEGIN CERTIFICATE-----
+MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL
+BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6
+ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw
+NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L
+cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg
+Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN
+QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT
+3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw
+3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6
+3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5
+BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN
+XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD
+AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF
+AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw
+8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG
+nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP
+oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy
+d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg
+LvWpCz/UXeHPhJ/iGcJfitYgHuNztw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Subject: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Label: "Certum Trusted Network CA 2"
+# Serial: 44979900017204383099463764357512596969
+# MD5 Fingerprint: 6d:46:9e:d9:25:6d:08:23:5b:5e:74:7d:1e:27:db:f2
+# SHA1 Fingerprint: d3:dd:48:3e:2b:bf:4c:05:e8:af:10:f5:fa:76:26:cf:d3:dc:30:92
+# SHA256 Fingerprint: b6:76:f2:ed:da:e8:77:5c:d3:6c:b0:f6:3c:d1:d4:60:39:61:f4:9e:62:65:ba:01:3a:2f:03:07:b6:d0:b8:04
+-----BEGIN CERTIFICATE-----
+MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB
+gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu
+QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG
+A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz
+OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ
+VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3
+b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA
+DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn
+0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB
+OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE
+fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E
+Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m
+o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i
+sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW
+OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez
+Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS
+adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n
+3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
+AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC
+AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ
+F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf
+CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29
+XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm
+djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/
+WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb
+AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq
+P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko
+b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj
+XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P
+5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi
+DrW5viSP
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Subject: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Label: "Hellenic Academic and Research Institutions RootCA 2015"
+# Serial: 0
+# MD5 Fingerprint: ca:ff:e2:db:03:d9:cb:4b:e9:0f:ad:84:fd:7b:18:ce
+# SHA1 Fingerprint: 01:0c:06:95:a6:98:19:14:ff:bf:5f:c6:b0:b6:95:ea:29:e9:12:a6
+# SHA256 Fingerprint: a0:40:92:9a:02:ce:53:b4:ac:f4:f2:ff:c6:98:1c:e4:49:6f:75:5e:6d:45:fe:0b:2a:69:2b:cd:52:52:3f:36
+-----BEGIN CERTIFICATE-----
+MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix
+DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k
+IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT
+N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v
+dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG
+A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh
+ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx
+QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1
+dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
+AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA
+4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0
+AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10
+4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C
+ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV
+9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD
+gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6
+Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq
+NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko
+LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc
+Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV
+HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd
+ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I
+XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI
+M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot
+9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V
+Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea
+j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh
+X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ
+l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf
+bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4
+pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK
+e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0
+vm9qp/UsQu0yrbYhnr68
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Subject: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Label: "Hellenic Academic and Research Institutions ECC RootCA 2015"
+# Serial: 0
+# MD5 Fingerprint: 81:e5:b4:17:eb:c2:f5:e1:4b:0d:41:7b:49:92:fe:ef
+# SHA1 Fingerprint: 9f:f1:71:8d:92:d5:9a:f3:7d:74:97:b4:bc:6f:84:68:0b:ba:b6:66
+# SHA256 Fingerprint: 44:b5:45:aa:8a:25:e6:5a:73:ca:15:dc:27:fc:36:d2:4c:1c:b9:95:3a:06:65:39:b1:15:82:dc:48:7b:48:33
+-----BEGIN CERTIFICATE-----
+MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN
+BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl
+c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl
+bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv
+b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ
+BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj
+YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5
+MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0
+dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg
+QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa
+jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC
+MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi
+C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep
+lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof
+TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR
+-----END CERTIFICATE-----
+
+# Issuer: CN=ISRG Root X1 O=Internet Security Research Group
+# Subject: CN=ISRG Root X1 O=Internet Security Research Group
+# Label: "ISRG Root X1"
+# Serial: 172886928669790476064670243504169061120
+# MD5 Fingerprint: 0c:d2:f9:e0:da:17:73:e9:ed:86:4d:a5:e3:70:e7:4e
+# SHA1 Fingerprint: ca:bd:2a:79:a1:07:6a:31:f2:1d:25:36:35:cb:03:9d:43:29:a5:e8
+# SHA256 Fingerprint: 96:bc:ec:06:26:49:76:f3:74:60:77:9a:cf:28:c5:a7:cf:e8:a3:c0:aa:e1:1a:8f:fc:ee:05:c0:bd:df:08:c6
+-----BEGIN CERTIFICATE-----
+MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
+TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
+cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
+WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
+ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
+h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
+0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
+A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
+T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
+B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
+B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
+KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
+OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
+jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
+qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
+rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
+hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
+ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
+3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
+NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
+ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
+TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
+jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
+oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
+4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
+mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
+emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
+-----END CERTIFICATE-----
+
+# Issuer: O=FNMT-RCM OU=AC RAIZ FNMT-RCM
+# Subject: O=FNMT-RCM OU=AC RAIZ FNMT-RCM
+# Label: "AC RAIZ FNMT-RCM"
+# Serial: 485876308206448804701554682760554759
+# MD5 Fingerprint: e2:09:04:b4:d3:bd:d1:a0:14:fd:1a:d2:47:c4:57:1d
+# SHA1 Fingerprint: ec:50:35:07:b2:15:c4:95:62:19:e2:a8:9a:5b:42:99:2c:4c:2c:20
+# SHA256 Fingerprint: eb:c5:57:0c:29:01:8c:4d:67:b1:aa:12:7b:af:12:f7:03:b4:61:1e:bc:17:b7:da:b5:57:38:94:17:9b:93:fa
+-----BEGIN CERTIFICATE-----
+MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsx
+CzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJ
+WiBGTk1ULVJDTTAeFw0wODEwMjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJ
+BgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBG
+Tk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALpxgHpMhm5/
+yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcfqQgf
+BBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAz
+WHFctPVrbtQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxF
+tBDXaEAUwED653cXeuYLj2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z
+374jNUUeAlz+taibmSXaXvMiwzn15Cou08YfxGyqxRxqAQVKL9LFwag0Jl1mpdIC
+IfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mwWsXmo8RZZUc1g16p6DUL
+mbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnTtOmlcYF7
+wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peS
+MKGJ47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2
+ZSysV4999AeU14ECll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMet
+UqIJ5G+GR4of6ygnXYMgrwTJbFaai0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUw
+AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPd9xf3E6Jobd2Sn9R2gzL+H
+YJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1odHRwOi8vd3d3
+LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD
+nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1
+RXxlDPiyN8+sD8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYM
+LVN0V2Ue1bLdI4E7pWYjJ2cJj+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf
+77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrTQfv6MooqtyuGC2mDOL7Nii4LcK2N
+JpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW+YJF1DngoABd15jm
+fZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7Ixjp
+6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp
+1txyM/1d8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B
+9kiABdcPUXmsEKvU7ANm5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wok
+RqEIr9baRRmW1FMdW4R58MD3R++Lj8UGrp1MYp3/RgT408m2ECVAdf4WqslKYIYv
+uu8wd+RU4riEmViAqhOLUTpPSPaLtrM=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 1 O=Amazon
+# Subject: CN=Amazon Root CA 1 O=Amazon
+# Label: "Amazon Root CA 1"
+# Serial: 143266978916655856878034712317230054538369994
+# MD5 Fingerprint: 43:c6:bf:ae:ec:fe:ad:2f:18:c6:88:68:30:fc:c8:e6
+# SHA1 Fingerprint: 8d:a7:f9:65:ec:5e:fc:37:91:0f:1c:6e:59:fd:c1:cc:6a:6e:de:16
+# SHA256 Fingerprint: 8e:cd:e6:88:4f:3d:87:b1:12:5b:a3:1a:c3:fc:b1:3d:70:16:de:7f:57:cc:90:4f:e1:cb:97:c6:ae:98:19:6e
+-----BEGIN CERTIFICATE-----
+MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF
+ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
+b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL
+MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
+b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj
+ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM
+9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw
+IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6
+VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L
+93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm
+jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA
+A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI
+U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs
+N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv
+o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU
+5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy
+rqXRfboQnoZsG4q5WTP468SQvvG5
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 2 O=Amazon
+# Subject: CN=Amazon Root CA 2 O=Amazon
+# Label: "Amazon Root CA 2"
+# Serial: 143266982885963551818349160658925006970653239
+# MD5 Fingerprint: c8:e5:8d:ce:a8:42:e2:7a:c0:2a:5c:7c:9e:26:bf:66
+# SHA1 Fingerprint: 5a:8c:ef:45:d7:a6:98:59:76:7a:8c:8b:44:96:b5:78:cf:47:4b:1a
+# SHA256 Fingerprint: 1b:a5:b2:aa:8c:65:40:1a:82:96:01:18:f8:0b:ec:4f:62:30:4d:83:ce:c4:71:3a:19:c3:9c:01:1e:a4:6d:b4
+-----BEGIN CERTIFICATE-----
+MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF
+ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
+b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL
+MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
+b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK
+gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ
+W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg
+1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K
+8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r
+2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me
+z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR
+8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj
+mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz
+7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6
++XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI
+0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB
+Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm
+UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2
+LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY
++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS
+k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl
+7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm
+btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl
+urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+
+fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63
+n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE
+76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H
+9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT
+4PsJYGw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 3 O=Amazon
+# Subject: CN=Amazon Root CA 3 O=Amazon
+# Label: "Amazon Root CA 3"
+# Serial: 143266986699090766294700635381230934788665930
+# MD5 Fingerprint: a0:d4:ef:0b:f7:b5:d8:49:95:2a:ec:f5:c4:fc:81:87
+# SHA1 Fingerprint: 0d:44:dd:8c:3c:8c:1a:1a:58:75:64:81:e9:0f:2e:2a:ff:b3:d2:6e
+# SHA256 Fingerprint: 18:ce:6c:fe:7b:f1:4e:60:b2:e3:47:b8:df:e8:68:cb:31:d0:2e:bb:3a:da:27:15:69:f5:03:43:b4:6d:b3:a4
+-----BEGIN CERTIFICATE-----
+MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5
+MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g
+Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG
+A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg
+Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl
+ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr
+ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr
+BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM
+YyRIHN8wfdVoOw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 4 O=Amazon
+# Subject: CN=Amazon Root CA 4 O=Amazon
+# Label: "Amazon Root CA 4"
+# Serial: 143266989758080763974105200630763877849284878
+# MD5 Fingerprint: 89:bc:27:d5:eb:17:8d:06:6a:69:d5:fd:89:47:b4:cd
+# SHA1 Fingerprint: f6:10:84:07:d6:f8:bb:67:98:0c:c2:e2:44:c2:eb:ae:1c:ef:63:be
+# SHA256 Fingerprint: e3:5d:28:41:9e:d0:20:25:cf:a6:90:38:cd:62:39:62:45:8d:a5:c6:95:fb:de:a3:c2:2b:0b:fb:25:89:70:92
+-----BEGIN CERTIFICATE-----
+MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5
+MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g
+Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG
+A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg
+Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi
+9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk
+M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB
+/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB
+MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw
+CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW
+1KyLa2tJElMzrdfkviT8tQp21KW8EA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM
+# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM
+# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1"
+# Serial: 1
+# MD5 Fingerprint: dc:00:81:dc:69:2f:3e:2f:b0:3b:f6:3d:5a:91:8e:49
+# SHA1 Fingerprint: 31:43:64:9b:ec:ce:27:ec:ed:3a:3f:0b:8f:0d:e4:e8:91:dd:ee:ca
+# SHA256 Fingerprint: 46:ed:c3:68:90:46:d5:3a:45:3f:b3:10:4a:b8:0d:ca:ec:65:8b:26:60:ea:16:29:dd:7e:86:79:90:64:87:16
+-----BEGIN CERTIFICATE-----
+MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIx
+GDAWBgNVBAcTD0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxp
+bXNlbCB2ZSBUZWtub2xvamlrIEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0w
+KwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24gTWVya2V6aSAtIEthbXUgU00xNjA0
+BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRpZmlrYXNpIC0gU3Vy
+dW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYDVQQG
+EwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXll
+IEJpbGltc2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklU
+QUsxLTArBgNVBAsTJEthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBT
+TTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11IFNNIFNTTCBLb2sgU2VydGlmaWthc2kg
+LSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr3UwM6q7
+a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y86Ij5iySr
+LqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INr
+N3wcwv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2X
+YacQuFWQfw4tJzh03+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/
+iSIzL+aFCr2lqBs23tPcLG07xxO9WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4f
+AJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQUZT/HiobGPN08VFw1+DrtUgxH
+V8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL
+BQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh
+AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPf
+IPP54+M638yclNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4
+lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c
+8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf
+lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD.
+# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD.
+# Label: "GDCA TrustAUTH R5 ROOT"
+# Serial: 9009899650740120186
+# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4
+# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4
+# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93
+-----BEGIN CERTIFICATE-----
+MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE
+BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ
+IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0
+MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV
+BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w
+HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj
+Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj
+TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u
+KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj
+qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm
+MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12
+ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP
+zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk
+L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC
+jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA
+HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC
+AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB
+/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg
+p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm
+DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5
+COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry
+L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf
+JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg
+IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io
+2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV
+09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ
+XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq
+T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe
+MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation
+# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation
+# Label: "SSL.com Root Certification Authority RSA"
+# Serial: 8875640296558310041
+# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29
+# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb
+# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69
+-----BEGIN CERTIFICATE-----
+MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE
+BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK
+DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz
+OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv
+dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv
+bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN
+AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R
+xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX
+qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC
+C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3
+6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh
+/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF
+YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E
+JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc
+US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8
+ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm
++Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi
+M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV
+HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G
+A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV
+cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc
+Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs
+PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/
+q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0
+cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr
+a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I
+H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y
+K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu
+nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf
+oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY
+Ic2wBlX7Jz9TkHCpBB5XJ7k=
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation
+# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation
+# Label: "SSL.com Root Certification Authority ECC"
+# Serial: 8495723813297216424
+# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e
+# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a
+# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65
+-----BEGIN CERTIFICATE-----
+MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC
+VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T
+U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0
+aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz
+WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0
+b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS
+b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI
+7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg
+CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud
+EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD
+VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T
+kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+
+gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation
+# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation
+# Label: "SSL.com EV Root Certification Authority RSA R2"
+# Serial: 6248227494352943350
+# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95
+# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a
+# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c
+-----BEGIN CERTIFICATE-----
+MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV
+BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE
+CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy
+dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy
+MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G
+A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD
+DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq
+M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf
+OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa
+4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9
+HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR
+aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA
+b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ
+Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV
+PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO
+pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu
+UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY
+MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV
+HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4
+9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW
+s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5
+Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg
+cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM
+79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz
+/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt
+ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm
+Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK
+QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ
+w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi
+S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07
+mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w==
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation
+# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation
+# Label: "SSL.com EV Root Certification Authority ECC"
+# Serial: 3182246526754555285
+# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90
+# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d
+# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8
+-----BEGIN CERTIFICATE-----
+MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC
+VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T
+U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx
+NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv
+dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv
+bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49
+AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA
+VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku
+WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP
+MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX
+5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ
+ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg
+h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6
+# Label: "GlobalSign Root CA - R6"
+# Serial: 1417766617973444989252670301619537
+# MD5 Fingerprint: 4f:dd:07:e4:d4:22:64:39:1e:0c:37:42:ea:d1:c6:ae
+# SHA1 Fingerprint: 80:94:64:0e:b5:a7:a1:ca:11:9c:1f:dd:d5:9f:81:02:63:a7:fb:d1
+# SHA256 Fingerprint: 2c:ab:ea:fe:37:d0:6c:a2:2a:ba:73:91:c0:03:3d:25:98:29:52:c4:53:64:73:49:76:3a:3a:b5:ad:6c:cf:69
+-----BEGIN CERTIFICATE-----
+MIIFgzCCA2ugAwIBAgIORea7A4Mzw4VlSOb/RVEwDQYJKoZIhvcNAQEMBQAwTDEg
+MB4GA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjYxEzARBgNVBAoTCkdsb2Jh
+bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTQxMjEwMDAwMDAwWhcNMzQx
+MjEwMDAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSNjET
+MBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCAiIwDQYJ
+KoZIhvcNAQEBBQADggIPADCCAgoCggIBAJUH6HPKZvnsFMp7PPcNCPG0RQssgrRI
+xutbPK6DuEGSMxSkb3/pKszGsIhrxbaJ0cay/xTOURQh7ErdG1rG1ofuTToVBu1k
+ZguSgMpE3nOUTvOniX9PeGMIyBJQbUJmL025eShNUhqKGoC3GYEOfsSKvGRMIRxD
+aNc9PIrFsmbVkJq3MQbFvuJtMgamHvm566qjuL++gmNQ0PAYid/kD3n16qIfKtJw
+LnvnvJO7bVPiSHyMEAc4/2ayd2F+4OqMPKq0pPbzlUoSB239jLKJz9CgYXfIWHSw
+1CM69106yqLbnQneXUQtkPGBzVeS+n68UARjNN9rkxi+azayOeSsJDa38O+2HBNX
+k7besvjihbdzorg1qkXy4J02oW9UivFyVm4uiMVRQkQVlO6jxTiWm05OWgtH8wY2
+SXcwvHE35absIQh1/OZhFj931dmRl4QKbNQCTXTAFO39OfuD8l4UoQSwC+n+7o/h
+bguyCLNhZglqsQY6ZZZZwPA1/cnaKI0aEYdwgQqomnUdnjqGBQCe24DWJfncBZ4n
+WUx2OVvq+aWh2IMP0f/fMBH5hc8zSPXKbWQULHpYT9NLCEnFlWQaYw55PfWzjMpY
+rZxCRXluDocZXFSxZba/jJvcE+kNb7gu3GduyYsRtYQUigAZcIN5kZeR1Bonvzce
+MgfYFGM8KEyvAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTAD
+AQH/MB0GA1UdDgQWBBSubAWjkxPioufi1xzWx/B/yGdToDAfBgNVHSMEGDAWgBSu
+bAWjkxPioufi1xzWx/B/yGdToDANBgkqhkiG9w0BAQwFAAOCAgEAgyXt6NH9lVLN
+nsAEoJFp5lzQhN7craJP6Ed41mWYqVuoPId8AorRbrcWc+ZfwFSY1XS+wc3iEZGt
+Ixg93eFyRJa0lV7Ae46ZeBZDE1ZXs6KzO7V33EByrKPrmzU+sQghoefEQzd5Mr61
+55wsTLxDKZmOMNOsIeDjHfrYBzN2VAAiKrlNIC5waNrlU/yDXNOd8v9EDERm8tLj
+vUYAGm0CuiVdjaExUd1URhxN25mW7xocBFymFe944Hn+Xds+qkxV/ZoVqW/hpvvf
+cDDpw+5CRu3CkwWJ+n1jez/QcYF8AOiYrg54NMMl+68KnyBr3TsTjxKM4kEaSHpz
+oHdpx7Zcf4LIHv5YGygrqGytXm3ABdJ7t+uA/iU3/gKbaKxCXcPu9czc8FB10jZp
+nOZ7BN9uBmm23goJSFmH63sUYHpkqmlD75HHTOwY3WzvUy2MmeFe8nI+z1TIvWfs
+pA9MRf/TuTAjB0yPEL+GltmZWrSZVxykzLsViVO6LAUP5MSeGbEYNNVMnbrt9x+v
+JJUEeKgDu+6B5dpffItKoZB0JaezPkvILFa9x8jvOOJckvB595yEunQtYQEgfn7R
+8k8HWV+LLUNS60YMlOH1Zkd5d9VUWx+tJDfLRVpOoERIyNiwmcUVhAn21klJwGW4
+5hpxbqCo8YLoRT5s1gLXCmeDBVrJpBA=
+-----END CERTIFICATE-----
+
+# Issuer: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed
+# Subject: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed
+# Label: "OISTE WISeKey Global Root GC CA"
+# Serial: 44084345621038548146064804565436152554
+# MD5 Fingerprint: a9:d6:b9:2d:2f:93:64:f8:a5:69:ca:91:e9:68:07:23
+# SHA1 Fingerprint: e0:11:84:5e:34:de:be:88:81:b9:9c:f6:16:26:d1:96:1f:c3:b9:31
+# SHA256 Fingerprint: 85:60:f9:1c:36:24:da:ba:95:70:b5:fe:a0:db:e3:6f:f1:1a:83:23:be:94:86:85:4f:b3:f3:4a:55:71:19:8d
+-----BEGIN CERTIFICATE-----
+MIICaTCCAe+gAwIBAgIQISpWDK7aDKtARb8roi066jAKBggqhkjOPQQDAzBtMQsw
+CQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91
+bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwg
+Um9vdCBHQyBDQTAeFw0xNzA1MDkwOTQ4MzRaFw00MjA1MDkwOTU4MzNaMG0xCzAJ
+BgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBGb3Vu
+ZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2JhbCBS
+b290IEdDIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAETOlQwMYPchi82PG6s4ni
+eUqjFqdrVCTbUf/q9Akkwwsin8tqJ4KBDdLArzHkdIJuyiXZjHWd8dvQmqJLIX4W
+p2OQ0jnUsYd4XxiWD1AbNTcPasbc2RNNpI6QN+a9WzGRo1QwUjAOBgNVHQ8BAf8E
+BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUSIcUrOPDnpBgOtfKie7T
+rYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0EAwMDaAAwZQIwJsdpW9zV
+57LnyAyMjMPdeYwbY9XJUpROTYJKcx6ygISpJcBMWm1JKWB4E+J+SOtkAjEA2zQg
+Mgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9
+-----END CERTIFICATE-----
+
+# Issuer: CN=UCA Global G2 Root O=UniTrust
+# Subject: CN=UCA Global G2 Root O=UniTrust
+# Label: "UCA Global G2 Root"
+# Serial: 124779693093741543919145257850076631279
+# MD5 Fingerprint: 80:fe:f0:c4:4a:f0:5c:62:32:9f:1c:ba:78:a9:50:f8
+# SHA1 Fingerprint: 28:f9:78:16:19:7a:ff:18:25:18:aa:44:fe:c1:a0:ce:5c:b6:4c:8a
+# SHA256 Fingerprint: 9b:ea:11:c9:76:fe:01:47:64:c1:be:56:a6:f9:14:b5:a5:60:31:7a:bd:99:88:39:33:82:e5:16:1a:a0:49:3c
+-----BEGIN CERTIFICATE-----
+MIIFRjCCAy6gAwIBAgIQXd+x2lqj7V2+WmUgZQOQ7zANBgkqhkiG9w0BAQsFADA9
+MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxGzAZBgNVBAMMElVDQSBH
+bG9iYWwgRzIgUm9vdDAeFw0xNjAzMTEwMDAwMDBaFw00MDEyMzEwMDAwMDBaMD0x
+CzAJBgNVBAYTAkNOMREwDwYDVQQKDAhVbmlUcnVzdDEbMBkGA1UEAwwSVUNBIEds
+b2JhbCBHMiBSb290MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxeYr
+b3zvJgUno4Ek2m/LAfmZmqkywiKHYUGRO8vDaBsGxUypK8FnFyIdK+35KYmToni9
+kmugow2ifsqTs6bRjDXVdfkX9s9FxeV67HeToI8jrg4aA3++1NDtLnurRiNb/yzm
+VHqUwCoV8MmNsHo7JOHXaOIxPAYzRrZUEaalLyJUKlgNAQLx+hVRZ2zA+te2G3/R
+VogvGjqNO7uCEeBHANBSh6v7hn4PJGtAnTRnvI3HLYZveT6OqTwXS3+wmeOwcWDc
+C/Vkw85DvG1xudLeJ1uK6NjGruFZfc8oLTW4lVYa8bJYS7cSN8h8s+1LgOGN+jIj
+tm+3SJUIsUROhYw6AlQgL9+/V087OpAh18EmNVQg7Mc/R+zvWr9LesGtOxdQXGLY
+D0tK3Cv6brxzks3sx1DoQZbXqX5t2Okdj4q1uViSukqSKwxW/YDrCPBeKW4bHAyv
+j5OJrdu9o54hyokZ7N+1wxrrFv54NkzWbtA+FxyQF2smuvt6L78RHBgOLXMDj6Dl
+NaBa4kx1HXHhOThTeEDMg5PXCp6dW4+K5OXgSORIskfNTip1KnvyIvbJvgmRlld6
+iIis7nCs+dwp4wwcOxJORNanTrAmyPPZGpeRaOrvjUYG0lZFWJo8DA+DuAUlwznP
+O6Q0ibd5Ei9Hxeepl2n8pndntd978XplFeRhVmUCAwEAAaNCMEAwDgYDVR0PAQH/
+BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIHEjMz15DD/pQwIX4wV
+ZyF0Ad/fMA0GCSqGSIb3DQEBCwUAA4ICAQATZSL1jiutROTL/7lo5sOASD0Ee/oj
+L3rtNtqyzm325p7lX1iPyzcyochltq44PTUbPrw7tgTQvPlJ9Zv3hcU2tsu8+Mg5
+1eRfB70VVJd0ysrtT7q6ZHafgbiERUlMjW+i67HM0cOU2kTC5uLqGOiiHycFutfl
+1qnN3e92mI0ADs0b+gO3joBYDic/UvuUospeZcnWhNq5NXHzJsBPd+aBJ9J3O5oU
+b3n09tDh05S60FdRvScFDcH9yBIw7m+NESsIndTUv4BFFJqIRNow6rSn4+7vW4LV
+PtateJLbXDzz2K36uGt/xDYotgIVilQsnLAXc47QN6MUPJiVAAwpBVueSUmxX8fj
+y88nZY41F7dXyDDZQVu5FLbowg+UMaeUmMxq67XhJ/UQqAHojhJi6IjMtX9Gl8Cb
+EGY4GjZGXyJoPd/JxhMnq1MGrKI8hgZlb7F+sSlEmqO6SWkoaY/X5V+tBIZkbxqg
+DMUIYs6Ao9Dz7GjevjPHF1t/gMRMTLGmhIrDO7gJzRSBuhjjVFc2/tsvfEehOjPI
++Vg7RE+xygKJBJYoaMVLuCaJu9YzL1DV/pqJuhgyklTGW+Cd+V7lDSKb9triyCGy
+YiGqhkCyLmTTX8jjfhFnRR8F/uOi77Oos/N9j/gMHyIfLXC0uAE0djAA5SN4p1bX
+UB+K+wb1whnw0A==
+-----END CERTIFICATE-----
+
+# Issuer: CN=UCA Extended Validation Root O=UniTrust
+# Subject: CN=UCA Extended Validation Root O=UniTrust
+# Label: "UCA Extended Validation Root"
+# Serial: 106100277556486529736699587978573607008
+# MD5 Fingerprint: a1:f3:5f:43:c6:34:9b:da:bf:8c:7e:05:53:ad:96:e2
+# SHA1 Fingerprint: a3:a1:b0:6f:24:61:23:4a:e3:36:a5:c2:37:fc:a6:ff:dd:f0:d7:3a
+# SHA256 Fingerprint: d4:3a:f9:b3:54:73:75:5c:96:84:fc:06:d7:d8:cb:70:ee:5c:28:e7:73:fb:29:4e:b4:1e:e7:17:22:92:4d:24
+-----BEGIN CERTIFICATE-----
+MIIFWjCCA0KgAwIBAgIQT9Irj/VkyDOeTzRYZiNwYDANBgkqhkiG9w0BAQsFADBH
+MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNVBAMMHFVDQSBF
+eHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwHhcNMTUwMzEzMDAwMDAwWhcNMzgxMjMx
+MDAwMDAwWjBHMQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNV
+BAMMHFVDQSBFeHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQCpCQcoEwKwmeBkqh5DFnpzsZGgdT6o+uM4AHrsiWog
+D4vFsJszA1qGxliG1cGFu0/GnEBNyr7uaZa4rYEwmnySBesFK5pI0Lh2PpbIILvS
+sPGP2KxFRv+qZ2C0d35qHzwaUnoEPQc8hQ2E0B92CvdqFN9y4zR8V05WAT558aop
+O2z6+I9tTcg1367r3CTueUWnhbYFiN6IXSV8l2RnCdm/WhUFhvMJHuxYMjMR83dk
+sHYf5BA1FxvyDrFspCqjc/wJHx4yGVMR59mzLC52LqGj3n5qiAno8geK+LLNEOfi
+c0CTuwjRP+H8C5SzJe98ptfRr5//lpr1kXuYC3fUfugH0mK1lTnj8/FtDw5lhIpj
+VMWAtuCeS31HJqcBCF3RiJ7XwzJE+oJKCmhUfzhTA8ykADNkUVkLo4KRel7sFsLz
+KuZi2irbWWIQJUoqgQtHB0MGcIfS+pMRKXpITeuUx3BNr2fVUbGAIAEBtHoIppB/
+TuDvB0GHr2qlXov7z1CymlSvw4m6WC31MJixNnI5fkkE/SmnTHnkBVfblLkWU41G
+sx2VYVdWf6/wFlthWG82UBEL2KwrlRYaDh8IzTY0ZRBiZtWAXxQgXy0MoHgKaNYs
+1+lvK9JKBZP8nm9rZ/+I8U6laUpSNwXqxhaN0sSZ0YIrO7o1dfdRUVjzyAfd5LQD
+fwIDAQABo0IwQDAdBgNVHQ4EFgQU2XQ65DA9DfcS3H5aBZ8eNJr34RQwDwYDVR0T
+AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBADaN
+l8xCFWQpN5smLNb7rhVpLGsaGvdftvkHTFnq88nIua7Mui563MD1sC3AO6+fcAUR
+ap8lTwEpcOPlDOHqWnzcSbvBHiqB9RZLcpHIojG5qtr8nR/zXUACE/xOHAbKsxSQ
+VBcZEhrxH9cMaVr2cXj0lH2RC47skFSOvG+hTKv8dGT9cZr4QQehzZHkPJrgmzI5
+c6sq1WnIeJEmMX3ixzDx/BR4dxIOE/TdFpS/S2d7cFOFyrC78zhNLJA5wA3CXWvp
+4uXViI3WLL+rG761KIcSF3Ru/H38j9CHJrAb+7lsq+KePRXBOy5nAliRn+/4Qh8s
+t2j1da3Ptfb/EX3C8CSlrdP6oDyp+l3cpaDvRKS+1ujl5BOWF3sGPjLtx7dCvHaj
+2GU4Kzg1USEODm8uNBNA4StnDG1KQTAYI1oyVZnJF+A83vbsea0rWBmirSwiGpWO
+vpaQXUJXxPkUAzUrHC1RVwinOt4/5Mi0A3PCwSaAuwtCH60NryZy2sy+s6ODWA2C
+xR9GUeOcGMyNm43sSet1UNWMKFnKdDTajAshqx7qG+XH/RU+wBeq+yNuJkbL+vmx
+cmtpzyKEC2IPrNkZAJSidjzULZrtBJ4tBmIQN1IchXIbJ+XMxjHsN+xjWZsLHXbM
+fjKaiJUINlK73nZfdklJrX+9ZSCyycErdhh2n1ax
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036
+# Subject: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036
+# Label: "Certigna Root CA"
+# Serial: 269714418870597844693661054334862075617
+# MD5 Fingerprint: 0e:5c:30:62:27:eb:5b:bc:d7:ae:62:ba:e9:d5:df:77
+# SHA1 Fingerprint: 2d:0d:52:14:ff:9e:ad:99:24:01:74:20:47:6e:6c:85:27:27:f5:43
+# SHA256 Fingerprint: d4:8d:3d:23:ee:db:50:a4:59:e5:51:97:60:1c:27:77:4b:9d:7b:18:c9:4d:5a:05:95:11:a1:02:50:b9:31:68
+-----BEGIN CERTIFICATE-----
+MIIGWzCCBEOgAwIBAgIRAMrpG4nxVQMNo+ZBbcTjpuEwDQYJKoZIhvcNAQELBQAw
+WjELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczEcMBoGA1UECwwTMDAw
+MiA0ODE0NjMwODEwMDAzNjEZMBcGA1UEAwwQQ2VydGlnbmEgUm9vdCBDQTAeFw0x
+MzEwMDEwODMyMjdaFw0zMzEwMDEwODMyMjdaMFoxCzAJBgNVBAYTAkZSMRIwEAYD
+VQQKDAlEaGlteW90aXMxHDAaBgNVBAsMEzAwMDIgNDgxNDYzMDgxMDAwMzYxGTAX
+BgNVBAMMEENlcnRpZ25hIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw
+ggIKAoICAQDNGDllGlmx6mQWDoyUJJV8g9PFOSbcDO8WV43X2KyjQn+Cyu3NW9sO
+ty3tRQgXstmzy9YXUnIo245Onoq2C/mehJpNdt4iKVzSs9IGPjA5qXSjklYcoW9M
+CiBtnyN6tMbaLOQdLNyzKNAT8kxOAkmhVECe5uUFoC2EyP+YbNDrihqECB63aCPu
+I9Vwzm1RaRDuoXrC0SIxwoKF0vJVdlB8JXrJhFwLrN1CTivngqIkicuQstDuI7pm
+TLtipPlTWmR7fJj6o0ieD5Wupxj0auwuA0Wv8HT4Ks16XdG+RCYyKfHx9WzMfgIh
+C59vpD++nVPiz32pLHxYGpfhPTc3GGYo0kDFUYqMwy3OU4gkWGQwFsWq4NYKpkDf
+ePb1BHxpE4S80dGnBs8B92jAqFe7OmGtBIyT46388NtEbVncSVmurJqZNjBBe3Yz
+IoejwpKGbvlw7q6Hh5UbxHq9MfPU0uWZ/75I7HX1eBYdpnDBfzwboZL7z8g81sWT
+Co/1VTp2lc5ZmIoJlXcymoO6LAQ6l73UL77XbJuiyn1tJslV1c/DeVIICZkHJC1k
+JWumIWmbat10TWuXekG9qxf5kBdIjzb5LdXF2+6qhUVB+s06RbFo5jZMm5BX7CO5
+hwjCxAnxl4YqKE3idMDaxIzb3+KhF1nOJFl0Mdp//TBt2dzhauH8XwIDAQABo4IB
+GjCCARYwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
+FBiHVuBud+4kNTxOc5of1uHieX4rMB8GA1UdIwQYMBaAFBiHVuBud+4kNTxOc5of
+1uHieX4rMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8GCCsGAQUFBwIBFiNodHRwczov
+L3d3d3cuY2VydGlnbmEuZnIvYXV0b3JpdGVzLzBtBgNVHR8EZjBkMC+gLaArhilo
+dHRwOi8vY3JsLmNlcnRpZ25hLmZyL2NlcnRpZ25hcm9vdGNhLmNybDAxoC+gLYYr
+aHR0cDovL2NybC5kaGlteW90aXMuY29tL2NlcnRpZ25hcm9vdGNhLmNybDANBgkq
+hkiG9w0BAQsFAAOCAgEAlLieT/DjlQgi581oQfccVdV8AOItOoldaDgvUSILSo3L
+6btdPrtcPbEo/uRTVRPPoZAbAh1fZkYJMyjhDSSXcNMQH+pkV5a7XdrnxIxPTGRG
+HVyH41neQtGbqH6mid2PHMkwgu07nM3A6RngatgCdTer9zQoKJHyBApPNeNgJgH6
+0BGM+RFq7q89w1DTj18zeTyGqHNFkIwgtnJzFyO+B2XleJINugHA64wcZr+shncB
+lA2c5uk5jR+mUYyZDDl34bSb+hxnV29qao6pK0xXeXpXIs/NX2NGjVxZOob4Mkdi
+o2cNGJHc+6Zr9UhhcyNZjgKnvETq9Emd8VRY+WCv2hikLyhF3HqgiIZd8zvn/yk1
+gPxkQ5Tm4xxvvq0OKmOZK8l+hfZx6AYDlf7ej0gcWtSS6Cvu5zHbugRqh5jnxV/v
+faci9wHYTfmJ0A6aBVmknpjZbyvKcL5kwlWj9Omvw5Ip3IgWJJk8jSaYtlu3zM63
+Nwf9JtmYhST/WSMDmu2dnajkXjjO11INb9I/bbEFa0nOipFGc/T2L/Coc3cOZayh
+jWZSaX5LaAzHHjcng6WMxwLkFM1JAbBzs/3GkDpv0mztO+7skb6iQ12LAEpmJURw
+3kAP+HwV96LOPNdeE4yBFxgX0b3xdxA61GU5wSesVywlVP+i2k+KYTlerj1KjL0=
+-----END CERTIFICATE-----
+
+# Issuer: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI
+# Subject: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI
+# Label: "emSign Root CA - G1"
+# Serial: 235931866688319308814040
+# MD5 Fingerprint: 9c:42:84:57:dd:cb:0b:a7:2e:95:ad:b6:f3:da:bc:ac
+# SHA1 Fingerprint: 8a:c7:ad:8f:73:ac:4e:c1:b5:75:4d:a5:40:f4:fc:cf:7c:b5:8e:8c
+# SHA256 Fingerprint: 40:f6:af:03:46:a9:9a:a1:cd:1d:55:5a:4e:9c:ce:62:c7:f9:63:46:03:ee:40:66:15:83:3d:c8:c8:d0:03:67
+-----BEGIN CERTIFICATE-----
+MIIDlDCCAnygAwIBAgIKMfXkYgxsWO3W2DANBgkqhkiG9w0BAQsFADBnMQswCQYD
+VQQGEwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBU
+ZWNobm9sb2dpZXMgTGltaXRlZDEcMBoGA1UEAxMTZW1TaWduIFJvb3QgQ0EgLSBH
+MTAeFw0xODAyMTgxODMwMDBaFw00MzAyMTgxODMwMDBaMGcxCzAJBgNVBAYTAklO
+MRMwEQYDVQQLEwplbVNpZ24gUEtJMSUwIwYDVQQKExxlTXVkaHJhIFRlY2hub2xv
+Z2llcyBMaW1pdGVkMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEcxMIIBIjAN
+BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAk0u76WaK7p1b1TST0Bsew+eeuGQz
+f2N4aLTNLnF115sgxk0pvLZoYIr3IZpWNVrzdr3YzZr/k1ZLpVkGoZM0Kd0WNHVO
+8oG0x5ZOrRkVUkr+PHB1cM2vK6sVmjM8qrOLqs1D/fXqcP/tzxE7lM5OMhbTI0Aq
+d7OvPAEsbO2ZLIvZTmmYsvePQbAyeGHWDV/D+qJAkh1cF+ZwPjXnorfCYuKrpDhM
+tTk1b+oDafo6VGiFbdbyL0NVHpENDtjVaqSW0RM8LHhQ6DqS0hdW5TUaQBw+jSzt
+Od9C4INBdN+jzcKGYEho42kLVACL5HZpIQ15TjQIXhTCzLG3rdd8cIrHhQIDAQAB
+o0IwQDAdBgNVHQ4EFgQU++8Nhp6w492pufEhF38+/PB3KxowDgYDVR0PAQH/BAQD
+AgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAFn/8oz1h31x
+PaOfG1vR2vjTnGs2vZupYeveFix0PZ7mddrXuqe8QhfnPZHr5X3dPpzxz5KsbEjM
+wiI/aTvFthUvozXGaCocV685743QNcMYDHsAVhzNixl03r4PEuDQqqE/AjSxcM6d
+GNYIAwlG7mDgfrbESQRRfXBgvKqy/3lyeqYdPV8q+Mri/Tm3R7nrft8EI6/6nAYH
+6ftjk4BAtcZsCjEozgyfz7MjNYBBjWzEN3uBL4ChQEKF6dk4jeihU80Bv2noWgby
+RQuQ+q7hv53yrlc8pa6yVvSLZUDp/TGBLPQ5Cdjua6e0ph0VpZj3AYHYhX3zUVxx
+iN66zB+Afko=
+-----END CERTIFICATE-----
+
+# Issuer: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI
+# Subject: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI
+# Label: "emSign ECC Root CA - G3"
+# Serial: 287880440101571086945156
+# MD5 Fingerprint: ce:0b:72:d1:9f:88:8e:d0:50:03:e8:e3:b8:8b:67:40
+# SHA1 Fingerprint: 30:43:fa:4f:f2:57:dc:a0:c3:80:ee:2e:58:ea:78:b2:3f:e6:bb:c1
+# SHA256 Fingerprint: 86:a1:ec:ba:08:9c:4a:8d:3b:be:27:34:c6:12:ba:34:1d:81:3e:04:3c:f9:e8:a8:62:cd:5c:57:a3:6b:be:6b
+-----BEGIN CERTIFICATE-----
+MIICTjCCAdOgAwIBAgIKPPYHqWhwDtqLhDAKBggqhkjOPQQDAzBrMQswCQYDVQQG
+EwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNo
+bm9sb2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0g
+RzMwHhcNMTgwMjE4MTgzMDAwWhcNNDMwMjE4MTgzMDAwWjBrMQswCQYDVQQGEwJJ
+TjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNobm9s
+b2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0gRzMw
+djAQBgcqhkjOPQIBBgUrgQQAIgNiAAQjpQy4LRL1KPOxst3iAhKAnjlfSU2fySU0
+WXTsuwYc58Byr+iuL+FBVIcUqEqy6HyC5ltqtdyzdc6LBtCGI79G1Y4PPwT01xyS
+fvalY8L1X44uT6EYGQIrMgqCZH0Wk9GjQjBAMB0GA1UdDgQWBBR8XQKEE9TMipuB
+zhccLikenEhjQjAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggq
+hkjOPQQDAwNpADBmAjEAvvNhzwIQHWSVB7gYboiFBS+DCBeQyh+KTOgNG3qxrdWB
+CUfvO6wIBHxcmbHtRwfSAjEAnbpV/KlK6O3t5nYBQnvI+GDZjVGLVTv7jHvrZQnD
++JbNR6iC8hZVdyR+EhCVBCyj
+-----END CERTIFICATE-----
+
+# Issuer: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI
+# Subject: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI
+# Label: "emSign Root CA - C1"
+# Serial: 825510296613316004955058
+# MD5 Fingerprint: d8:e3:5d:01:21:fa:78:5a:b0:df:ba:d2:ee:2a:5f:68
+# SHA1 Fingerprint: e7:2e:f1:df:fc:b2:09:28:cf:5d:d4:d5:67:37:b1:51:cb:86:4f:01
+# SHA256 Fingerprint: 12:56:09:aa:30:1d:a0:a2:49:b9:7a:82:39:cb:6a:34:21:6f:44:dc:ac:9f:39:54:b1:42:92:f2:e8:c8:60:8f
+-----BEGIN CERTIFICATE-----
+MIIDczCCAlugAwIBAgILAK7PALrEzzL4Q7IwDQYJKoZIhvcNAQELBQAwVjELMAkG
+A1UEBhMCVVMxEzARBgNVBAsTCmVtU2lnbiBQS0kxFDASBgNVBAoTC2VNdWRocmEg
+SW5jMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEMxMB4XDTE4MDIxODE4MzAw
+MFoXDTQzMDIxODE4MzAwMFowVjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln
+biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMRwwGgYDVQQDExNlbVNpZ24gUm9v
+dCBDQSAtIEMxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz+upufGZ
+BczYKCFK83M0UYRWEPWgTywS4/oTmifQz/l5GnRfHXk5/Fv4cI7gklL35CX5VIPZ
+HdPIWoU/Xse2B+4+wM6ar6xWQio5JXDWv7V7Nq2s9nPczdcdioOl+yuQFTdrHCZH
+3DspVpNqs8FqOp099cGXOFgFixwR4+S0uF2FHYP+eF8LRWgYSKVGczQ7/g/IdrvH
+GPMF0Ybzhe3nudkyrVWIzqa2kbBPrH4VI5b2P/AgNBbeCsbEBEV5f6f9vtKppa+c
+xSMq9zwhbL2vj07FOrLzNBL834AaSaTUqZX3noleoomslMuoaJuvimUnzYnu3Yy1
+aylwQ6BpC+S5DwIDAQABo0IwQDAdBgNVHQ4EFgQU/qHgcB4qAzlSWkK+XJGFehiq
+TbUwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL
+BQADggEBAMJKVvoVIXsoounlHfv4LcQ5lkFMOycsxGwYFYDGrK9HWS8mC+M2sO87
+/kOXSTKZEhVb3xEp/6tT+LvBeA+snFOvV71ojD1pM/CjoCNjO2RnIkSt1XHLVip4
+kqNPEjE2NuLe/gDEo2APJ62gsIq1NnpSob0n9CAnYuhNlCQT5AoE6TyrLshDCUrG
+YQTlSTR+08TI9Q/Aqum6VF7zYytPT1DU/rl7mYw9wC68AivTxEDkigcxHpvOJpkT
++xHqmiIMERnHXhuBUDDIlhJu58tBf5E7oke3VIAb3ADMmpDqw8NQBmIMMMAVSKeo
+WXzhriKi4gp6D/piq1JM4fHfyr6DDUI=
+-----END CERTIFICATE-----
+
+# Issuer: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI
+# Subject: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI
+# Label: "emSign ECC Root CA - C3"
+# Serial: 582948710642506000014504
+# MD5 Fingerprint: 3e:53:b3:a3:81:ee:d7:10:f8:d3:b0:1d:17:92:f5:d5
+# SHA1 Fingerprint: b6:af:43:c2:9b:81:53:7d:f6:ef:6b:c3:1f:1f:60:15:0c:ee:48:66
+# SHA256 Fingerprint: bc:4d:80:9b:15:18:9d:78:db:3e:1d:8c:f4:f9:72:6a:79:5d:a1:64:3c:a5:f1:35:8e:1d:db:0e:dc:0d:7e:b3
+-----BEGIN CERTIFICATE-----
+MIICKzCCAbGgAwIBAgIKe3G2gla4EnycqDAKBggqhkjOPQQDAzBaMQswCQYDVQQG
+EwJVUzETMBEGA1UECxMKZW1TaWduIFBLSTEUMBIGA1UEChMLZU11ZGhyYSBJbmMx
+IDAeBgNVBAMTF2VtU2lnbiBFQ0MgUm9vdCBDQSAtIEMzMB4XDTE4MDIxODE4MzAw
+MFoXDTQzMDIxODE4MzAwMFowWjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln
+biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMSAwHgYDVQQDExdlbVNpZ24gRUND
+IFJvb3QgQ0EgLSBDMzB2MBAGByqGSM49AgEGBSuBBAAiA2IABP2lYa57JhAd6bci
+MK4G9IGzsUJxlTm801Ljr6/58pc1kjZGDoeVjbk5Wum739D+yAdBPLtVb4Ojavti
+sIGJAnB9SMVK4+kiVCJNk7tCDK93nCOmfddhEc5lx/h//vXyqaNCMEAwHQYDVR0O
+BBYEFPtaSNCAIEDyqOkAB2kZd6fmw/TPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB
+Af8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMQC02C8Cif22TGK6Q04ThHK1rt0c
+3ta13FaPWEBaLd4gTCKDypOofu4SQMfWh0/434UCMBwUZOR8loMRnLDRWmFLpg9J
+0wD8ofzkpf9/rdcw0Md3f76BB1UwUCAU9Vc4CqgxUQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hongkong Post Root CA 3 O=Hongkong Post
+# Subject: CN=Hongkong Post Root CA 3 O=Hongkong Post
+# Label: "Hongkong Post Root CA 3"
+# Serial: 46170865288971385588281144162979347873371282084
+# MD5 Fingerprint: 11:fc:9f:bd:73:30:02:8a:fd:3f:f3:58:b9:cb:20:f0
+# SHA1 Fingerprint: 58:a2:d0:ec:20:52:81:5b:c1:f3:f8:64:02:24:4e:c2:8e:02:4b:02
+# SHA256 Fingerprint: 5a:2f:c0:3f:0c:83:b0:90:bb:fa:40:60:4b:09:88:44:6c:76:36:18:3d:f9:84:6e:17:10:1a:44:7f:b8:ef:d6
+-----BEGIN CERTIFICATE-----
+MIIFzzCCA7egAwIBAgIUCBZfikyl7ADJk0DfxMauI7gcWqQwDQYJKoZIhvcNAQEL
+BQAwbzELMAkGA1UEBhMCSEsxEjAQBgNVBAgTCUhvbmcgS29uZzESMBAGA1UEBxMJ
+SG9uZyBLb25nMRYwFAYDVQQKEw1Ib25na29uZyBQb3N0MSAwHgYDVQQDExdIb25n
+a29uZyBQb3N0IFJvb3QgQ0EgMzAeFw0xNzA2MDMwMjI5NDZaFw00MjA2MDMwMjI5
+NDZaMG8xCzAJBgNVBAYTAkhLMRIwEAYDVQQIEwlIb25nIEtvbmcxEjAQBgNVBAcT
+CUhvbmcgS29uZzEWMBQGA1UEChMNSG9uZ2tvbmcgUG9zdDEgMB4GA1UEAxMXSG9u
+Z2tvbmcgUG9zdCBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+AoICAQCziNfqzg8gTr7m1gNt7ln8wlffKWihgw4+aMdoWJwcYEuJQwy51BWy7sFO
+dem1p+/l6TWZ5Mwc50tfjTMwIDNT2aa71T4Tjukfh0mtUC1Qyhi+AViiE3CWu4mI
+VoBc+L0sPOFMV4i707mV78vH9toxdCim5lSJ9UExyuUmGs2C4HDaOym71QP1mbpV
+9WTRYA6ziUm4ii8F0oRFKHyPaFASePwLtVPLwpgchKOesL4jpNrcyCse2m5FHomY
+2vkALgbpDDtw1VAliJnLzXNg99X/NWfFobxeq81KuEXryGgeDQ0URhLj0mRiikKY
+vLTGCAj4/ahMZJx2Ab0vqWwzD9g/KLg8aQFChn5pwckGyuV6RmXpwtZQQS4/t+Tt
+bNe/JgERohYpSms0BpDsE9K2+2p20jzt8NYt3eEV7KObLyzJPivkaTv/ciWxNoZb
+x39ri1UbSsUgYT2uy1DhCDq+sI9jQVMwCFk8mB13umOResoQUGC/8Ne8lYePl8X+
+l2oBlKN8W4UdKjk60FSh0Tlxnf0h+bV78OLgAo9uliQlLKAeLKjEiafv7ZkGL7YK
+TE/bosw3Gq9HhS2KX8Q0NEwA/RiTZxPRN+ZItIsGxVd7GYYKecsAyVKvQv83j+Gj
+Hno9UKtjBucVtT+2RTeUN7F+8kjDf8V1/peNRY8apxpyKBpADwIDAQABo2MwYTAP
+BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQXnc0e
+i9Y5K3DTXNSguB+wAPzFYTAdBgNVHQ4EFgQUF53NHovWOStw01zUoLgfsAD8xWEw
+DQYJKoZIhvcNAQELBQADggIBAFbVe27mIgHSQpsY1Q7XZiNc4/6gx5LS6ZStS6LG
+7BJ8dNVI0lkUmcDrudHr9EgwW62nV3OZqdPlt9EuWSRY3GguLmLYauRwCy0gUCCk
+MpXRAJi70/33MvJJrsZ64Ee+bs7Lo3I6LWldy8joRTnU+kLBEUx3XZL7av9YROXr
+gZ6voJmtvqkBZss4HTzfQx/0TW60uhdG/H39h4F5ag0zD/ov+BS5gLNdTaqX4fnk
+GMX41TiMJjz98iji7lpJiCzfeT2OnpA8vUFKOt1b9pq0zj8lMH8yfaIDlNDceqFS
+3m6TjRgm/VWsvY+b0s+v54Ysyx8Jb6NvqYTUc79NoXQbTiNg8swOqn+knEwlqLJm
+Ozj/2ZQw9nKEvmhVEA/GcywWaZMH/rFF7buiVWqw2rVKAiUnhde3t4ZEFolsgCs+
+l6mc1X5VTMbeRRAc6uk7nwNT7u56AQIWeNTowr5GdogTPyK7SBIdUgC0An4hGh6c
+JfTzPV4e0hz5sy229zdcxsshTrD3mUcYhcErulWuBurQB7Lcq9CClnXO0lD+mefP
+L5/ndtFhKvshuzHQqp9HpLIiyhY6UFfEW0NnxWViA0kB60PZ2Pierc+xYw5F9KBa
+LJstxabArahH9CdMOA0uG0k7UvToiIMrVCjU8jVStDKDYmlkDJGcn5fqdBb9HxEG
+mpv0
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority - G4 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2015 Entrust, Inc. - for authorized use only
+# Subject: CN=Entrust Root Certification Authority - G4 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2015 Entrust, Inc. - for authorized use only
+# Label: "Entrust Root Certification Authority - G4"
+# Serial: 289383649854506086828220374796556676440
+# MD5 Fingerprint: 89:53:f1:83:23:b7:7c:8e:05:f1:8c:71:38:4e:1f:88
+# SHA1 Fingerprint: 14:88:4e:86:26:37:b0:26:af:59:62:5c:40:77:ec:35:29:ba:96:01
+# SHA256 Fingerprint: db:35:17:d1:f6:73:2a:2d:5a:b9:7c:53:3e:c7:07:79:ee:32:70:a6:2f:b4:ac:42:38:37:24:60:e6:f0:1e:88
+-----BEGIN CERTIFICATE-----
+MIIGSzCCBDOgAwIBAgIRANm1Q3+vqTkPAAAAAFVlrVgwDQYJKoZIhvcNAQELBQAw
+gb4xCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQL
+Ex9TZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykg
+MjAxNSBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMjAw
+BgNVBAMTKUVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEc0
+MB4XDTE1MDUyNzExMTExNloXDTM3MTIyNzExNDExNlowgb4xCzAJBgNVBAYTAlVT
+MRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1
+c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxNSBFbnRydXN0LCBJ
+bmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMjAwBgNVBAMTKUVudHJ1c3Qg
+Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEc0MIICIjANBgkqhkiG9w0B
+AQEFAAOCAg8AMIICCgKCAgEAsewsQu7i0TD/pZJH4i3DumSXbcr3DbVZwbPLqGgZ
+2K+EbTBwXX7zLtJTmeH+H17ZSK9dE43b/2MzTdMAArzE+NEGCJR5WIoV3imz/f3E
+T+iq4qA7ec2/a0My3dl0ELn39GjUu9CH1apLiipvKgS1sqbHoHrmSKvS0VnM1n4j
+5pds8ELl3FFLFUHtSUrJ3hCX1nbB76W1NhSXNdh4IjVS70O92yfbYVaCNNzLiGAM
+C1rlLAHGVK/XqsEQe9IFWrhAnoanw5CGAlZSCXqc0ieCU0plUmr1POeo8pyvi73T
+DtTUXm6Hnmo9RR3RXRv06QqsYJn7ibT/mCzPfB3pAqoEmh643IhuJbNsZvc8kPNX
+wbMv9W3y+8qh+CmdRouzavbmZwe+LGcKKh9asj5XxNMhIWNlUpEbsZmOeX7m640A
+2Vqq6nPopIICR5b+W45UYaPrL0swsIsjdXJ8ITzI9vF01Bx7owVV7rtNOzK+mndm
+nqxpkCIHH2E6lr7lmk/MBTwoWdPBDFSoWWG9yHJM6Nyfh3+9nEg2XpWjDrk4JFX8
+dWbrAuMINClKxuMrLzOg2qOGpRKX/YAr2hRC45K9PvJdXmd0LhyIRyk0X+IyqJwl
+N4y6mACXi0mWHv0liqzc2thddG5msP9E36EYxr5ILzeUePiVSj9/E15dWf10hkNj
+c0kCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD
+VR0OBBYEFJ84xFYjwznooHFs6FRM5Og6sb9nMA0GCSqGSIb3DQEBCwUAA4ICAQAS
+5UKme4sPDORGpbZgQIeMJX6tuGguW8ZAdjwD+MlZ9POrYs4QjbRaZIxowLByQzTS
+Gwv2LFPSypBLhmb8qoMi9IsabyZIrHZ3CL/FmFz0Jomee8O5ZDIBf9PD3Vht7LGr
+hFV0d4QEJ1JrhkzO3bll/9bGXp+aEJlLdWr+aumXIOTkdnrG0CSqkM0gkLpHZPt/
+B7NTeLUKYvJzQ85BK4FqLoUWlFPUa19yIqtRLULVAJyZv967lDtX/Zr1hstWO1uI
+AeV8KEsD+UmDfLJ/fOPtjqF/YFOOVZ1QNBIPt5d7bIdKROf1beyAN/BYGW5KaHbw
+H5Lk6rWS02FREAutp9lfx1/cH6NcjKF+m7ee01ZvZl4HliDtC3T7Zk6LERXpgUl+
+b7DUUH8i119lAg2m9IUe2K4GS0qn0jFmwvjO5QimpAKWRGhXxNUzzxkvFMSUHHuk
+2fCfDrGA4tGeEWSpiBE6doLlYsKA2KSD7ZPvfC+QsDJMlhVoSFLUmQjAJOgc47Ol
+IQ6SwJAfzyBfyjs4x7dtOvPmRLgOMWuIjnDrnBdSqEGULoe256YSxXXfW8AKbnuk
+5F6G+TaU33fD6Q3AOfF5u0aOq0NZJ7cguyPpVkAh7DE9ZapD8j3fcEThuk0mEDuY
+n/PIjhs4ViFqUZPTkcpG2om3PVODLAgfi49T3f+sHw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation
+# Subject: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation
+# Label: "Microsoft ECC Root Certificate Authority 2017"
+# Serial: 136839042543790627607696632466672567020
+# MD5 Fingerprint: dd:a1:03:e6:4a:93:10:d1:bf:f0:19:42:cb:fe:ed:67
+# SHA1 Fingerprint: 99:9a:64:c3:7f:f4:7d:9f:ab:95:f1:47:69:89:14:60:ee:c4:c3:c5
+# SHA256 Fingerprint: 35:8d:f3:9d:76:4a:f9:e1:b7:66:e9:c9:72:df:35:2e:e1:5c:fa:c2:27:af:6a:d1:d7:0e:8e:4a:6e:dc:ba:02
+-----BEGIN CERTIFICATE-----
+MIICWTCCAd+gAwIBAgIQZvI9r4fei7FK6gxXMQHC7DAKBggqhkjOPQQDAzBlMQsw
+CQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYD
+VQQDEy1NaWNyb3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIw
+MTcwHhcNMTkxMjE4MjMwNjQ1WhcNNDIwNzE4MjMxNjA0WjBlMQswCQYDVQQGEwJV
+UzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1NaWNy
+b3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwdjAQBgcq
+hkjOPQIBBgUrgQQAIgNiAATUvD0CQnVBEyPNgASGAlEvaqiBYgtlzPbKnR5vSmZR
+ogPZnZH6thaxjG7efM3beaYvzrvOcS/lpaso7GMEZpn4+vKTEAXhgShC48Zo9OYb
+hGBKia/teQ87zvH2RPUBeMCjVDBSMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8E
+BTADAQH/MB0GA1UdDgQWBBTIy5lycFIM+Oa+sgRXKSrPQhDtNTAQBgkrBgEEAYI3
+FQEEAwIBADAKBggqhkjOPQQDAwNoADBlAjBY8k3qDPlfXu5gKcs68tvWMoQZP3zV
+L8KxzJOuULsJMsbG7X7JNpQS5GiFBqIb0C8CMQCZ6Ra0DvpWSNSkMBaReNtUjGUB
+iudQZsIxtzm6uBoiB078a1QWIP8rtedMDE2mT3M=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation
+# Subject: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation
+# Label: "Microsoft RSA Root Certificate Authority 2017"
+# Serial: 40975477897264996090493496164228220339
+# MD5 Fingerprint: 10:ff:00:ff:cf:c9:f8:c7:7a:c0:ee:35:8e:c9:0f:47
+# SHA1 Fingerprint: 73:a5:e6:4a:3b:ff:83:16:ff:0e:dc:cc:61:8a:90:6e:4e:ae:4d:74
+# SHA256 Fingerprint: c7:41:f7:0f:4b:2a:8d:88:bf:2e:71:c1:41:22:ef:53:ef:10:eb:a0:cf:a5:e6:4c:fa:20:f4:18:85:30:73:e0
+-----BEGIN CERTIFICATE-----
+MIIFqDCCA5CgAwIBAgIQHtOXCV/YtLNHcB6qvn9FszANBgkqhkiG9w0BAQwFADBl
+MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYw
+NAYDVQQDEy1NaWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5
+IDIwMTcwHhcNMTkxMjE4MjI1MTIyWhcNNDIwNzE4MjMwMDIzWjBlMQswCQYDVQQG
+EwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1N
+aWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKW76UM4wplZEWCpW9R2LBifOZ
+Nt9GkMml7Xhqb0eRaPgnZ1AzHaGm++DlQ6OEAlcBXZxIQIJTELy/xztokLaCLeX0
+ZdDMbRnMlfl7rEqUrQ7eS0MdhweSE5CAg2Q1OQT85elss7YfUJQ4ZVBcF0a5toW1
+HLUX6NZFndiyJrDKxHBKrmCk3bPZ7Pw71VdyvD/IybLeS2v4I2wDwAW9lcfNcztm
+gGTjGqwu+UcF8ga2m3P1eDNbx6H7JyqhtJqRjJHTOoI+dkC0zVJhUXAoP8XFWvLJ
+jEm7FFtNyP9nTUwSlq31/niol4fX/V4ggNyhSyL71Imtus5Hl0dVe49FyGcohJUc
+aDDv70ngNXtk55iwlNpNhTs+VcQor1fznhPbRiefHqJeRIOkpcrVE7NLP8TjwuaG
+YaRSMLl6IE9vDzhTyzMMEyuP1pq9KsgtsRx9S1HKR9FIJ3Jdh+vVReZIZZ2vUpC6
+W6IYZVcSn2i51BVrlMRpIpj0M+Dt+VGOQVDJNE92kKz8OMHY4Xu54+OU4UZpyw4K
+UGsTuqwPN1q3ErWQgR5WrlcihtnJ0tHXUeOrO8ZV/R4O03QK0dqq6mm4lyiPSMQH
++FJDOvTKVTUssKZqwJz58oHhEmrARdlns87/I6KJClTUFLkqqNfs+avNJVgyeY+Q
+W5g5xAgGwax/Dj0ApQIDAQABo1QwUjAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/
+BAUwAwEB/zAdBgNVHQ4EFgQUCctZf4aycI8awznjwNnpv7tNsiMwEAYJKwYBBAGC
+NxUBBAMCAQAwDQYJKoZIhvcNAQEMBQADggIBAKyvPl3CEZaJjqPnktaXFbgToqZC
+LgLNFgVZJ8og6Lq46BrsTaiXVq5lQ7GPAJtSzVXNUzltYkyLDVt8LkS/gxCP81OC
+gMNPOsduET/m4xaRhPtthH80dK2Jp86519efhGSSvpWhrQlTM93uCupKUY5vVau6
+tZRGrox/2KJQJWVggEbbMwSubLWYdFQl3JPk+ONVFT24bcMKpBLBaYVu32TxU5nh
+SnUgnZUP5NbcA/FZGOhHibJXWpS2qdgXKxdJ5XbLwVaZOjex/2kskZGT4d9Mozd2
+TaGf+G0eHdP67Pv0RR0Tbc/3WeUiJ3IrhvNXuzDtJE3cfVa7o7P4NHmJweDyAmH3
+pvwPuxwXC65B2Xy9J6P9LjrRk5Sxcx0ki69bIImtt2dmefU6xqaWM/5TkshGsRGR
+xpl/j8nWZjEgQRCHLQzWwa80mMpkg/sTV9HB8Dx6jKXB/ZUhoHHBk2dxEuqPiApp
+GWSZI1b7rCoucL5mxAyE7+WL85MB+GqQk2dLsmijtWKP6T+MejteD+eMuMZ87zf9
+dOLITzNy4ZQ5bb0Sr74MTnB8G2+NszKTc0QWbej09+CVgI+WXTik9KveCjCHk9hN
+AHFiRSdLOkKEW39lt2c0Ui2cFmuqqNh7o0JMcccMyj6D5KbvtwEwXlGjefVwaaZB
+RA+GsCyRxj3qrg+E
+-----END CERTIFICATE-----
+
+# Issuer: CN=e-Szigno Root CA 2017 O=Microsec Ltd.
+# Subject: CN=e-Szigno Root CA 2017 O=Microsec Ltd.
+# Label: "e-Szigno Root CA 2017"
+# Serial: 411379200276854331539784714
+# MD5 Fingerprint: de:1f:f6:9e:84:ae:a7:b4:21:ce:1e:58:7d:d1:84:98
+# SHA1 Fingerprint: 89:d4:83:03:4f:9e:9a:48:80:5f:72:37:d4:a9:a6:ef:cb:7c:1f:d1
+# SHA256 Fingerprint: be:b0:0b:30:83:9b:9b:c3:2c:32:e4:44:79:05:95:06:41:f2:64:21:b1:5e:d0:89:19:8b:51:8a:e2:ea:1b:99
+-----BEGIN CERTIFICATE-----
+MIICQDCCAeWgAwIBAgIMAVRI7yH9l1kN9QQKMAoGCCqGSM49BAMCMHExCzAJBgNV
+BAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMgTHRk
+LjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25vIFJv
+b3QgQ0EgMjAxNzAeFw0xNzA4MjIxMjA3MDZaFw00MjA4MjIxMjA3MDZaMHExCzAJ
+BgNVBAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMg
+THRkLjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25v
+IFJvb3QgQ0EgMjAxNzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABJbcPYrYsHtv
+xie+RJCxs1YVe45DJH0ahFnuY2iyxl6H0BVIHqiQrb1TotreOpCmYF9oMrWGQd+H
+Wyx7xf58etqjYzBhMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G
+A1UdDgQWBBSHERUI0arBeAyxr87GyZDvvzAEwDAfBgNVHSMEGDAWgBSHERUI0arB
+eAyxr87GyZDvvzAEwDAKBggqhkjOPQQDAgNJADBGAiEAtVfd14pVCzbhhkT61Nlo
+jbjcI4qKDdQvfepz7L9NbKgCIQDLpbQS+ue16M9+k/zzNY9vTlp8tLxOsvxyqltZ
++efcMQ==
+-----END CERTIFICATE-----
+
+# Issuer: O=CERTSIGN SA OU=certSIGN ROOT CA G2
+# Subject: O=CERTSIGN SA OU=certSIGN ROOT CA G2
+# Label: "certSIGN Root CA G2"
+# Serial: 313609486401300475190
+# MD5 Fingerprint: 8c:f1:75:8a:c6:19:cf:94:b7:f7:65:20:87:c3:97:c7
+# SHA1 Fingerprint: 26:f9:93:b4:ed:3d:28:27:b0:b9:4b:a7:e9:15:1d:a3:8d:92:e5:32
+# SHA256 Fingerprint: 65:7c:fe:2f:a7:3f:aa:38:46:25:71:f3:32:a2:36:3a:46:fc:e7:02:09:51:71:07:02:cd:fb:b6:ee:da:33:05
+-----BEGIN CERTIFICATE-----
+MIIFRzCCAy+gAwIBAgIJEQA0tk7GNi02MA0GCSqGSIb3DQEBCwUAMEExCzAJBgNV
+BAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJR04g
+Uk9PVCBDQSBHMjAeFw0xNzAyMDYwOTI3MzVaFw00MjAyMDYwOTI3MzVaMEExCzAJ
+BgNVBAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJ
+R04gUk9PVCBDQSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDF
+dRmRfUR0dIf+DjuW3NgBFszuY5HnC2/OOwppGnzC46+CjobXXo9X69MhWf05N0Iw
+vlDqtg+piNguLWkh59E3GE59kdUWX2tbAMI5Qw02hVK5U2UPHULlj88F0+7cDBrZ
+uIt4ImfkabBoxTzkbFpG583H+u/E7Eu9aqSs/cwoUe+StCmrqzWaTOTECMYmzPhp
+n+Sc8CnTXPnGFiWeI8MgwT0PPzhAsP6CRDiqWhqKa2NYOLQV07YRaXseVO6MGiKs
+cpc/I1mbySKEwQdPzH/iV8oScLumZfNpdWO9lfsbl83kqK/20U6o2YpxJM02PbyW
+xPFsqa7lzw1uKA2wDrXKUXt4FMMgL3/7FFXhEZn91QqhngLjYl/rNUssuHLoPj1P
+rCy7Lobio3aP5ZMqz6WryFyNSwb/EkaseMsUBzXgqd+L6a8VTxaJW732jcZZroiF
+DsGJ6x9nxUWO/203Nit4ZoORUSs9/1F3dmKh7Gc+PoGD4FapUB8fepmrY7+EF3fx
+DTvf95xhszWYijqy7DwaNz9+j5LP2RIUZNoQAhVB/0/E6xyjyfqZ90bp4RjZsbgy
+LcsUDFDYg2WD7rlcz8sFWkz6GZdr1l0T08JcVLwyc6B49fFtHsufpaafItzRUZ6C
+eWRgKRM+o/1Pcmqr4tTluCRVLERLiohEnMqE0yo7AgMBAAGjQjBAMA8GA1UdEwEB
+/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSCIS1mxteg4BXrzkwJ
+d8RgnlRuAzANBgkqhkiG9w0BAQsFAAOCAgEAYN4auOfyYILVAzOBywaK8SJJ6ejq
+kX/GM15oGQOGO0MBzwdw5AgeZYWR5hEit/UCI46uuR59H35s5r0l1ZUa8gWmr4UC
+b6741jH/JclKyMeKqdmfS0mbEVeZkkMR3rYzpMzXjWR91M08KCy0mpbqTfXERMQl
+qiCA2ClV9+BB/AYm/7k29UMUA2Z44RGx2iBfRgB4ACGlHgAoYXhvqAEBj500mv/0
+OJD7uNGzcgbJceaBxXntC6Z58hMLnPddDnskk7RI24Zf3lCGeOdA5jGokHZwYa+c
+NywRtYK3qq4kNFtyDGkNzVmf9nGvnAvRCjj5BiKDUyUM/FHE5r7iOZULJK2v0ZXk
+ltd0ZGtxTgI8qoXzIKNDOXZbbFD+mpwUHmUUihW9o4JFWklWatKcsWMy5WHgUyIO
+pwpJ6st+H6jiYoD2EEVSmAYY3qXNL3+q1Ok+CHLsIwMCPKaq2LxndD0UF/tUSxfj
+03k9bWtJySgOLnRQvwzZRjoQhsmnP+mg7H/rpXdYaXHmgwo38oZJar55CJD2AhZk
+PuXaTH4MNMn5X7azKFGnpyuqSfqNZSlO42sTp5SjLVFteAxEy9/eCG/Oo2Sr05WE
+1LlSVHJ7liXMvGnjSG4N0MedJ5qq+BOS3R7fY581qRY27Iy4g/Q9iY/NtBde17MX
+QRBdJ3NghVdJIgc=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc.
+# Subject: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc.
+# Label: "Trustwave Global Certification Authority"
+# Serial: 1846098327275375458322922162
+# MD5 Fingerprint: f8:1c:18:2d:2f:ba:5f:6d:a1:6c:bc:c7:ab:91:c7:0e
+# SHA1 Fingerprint: 2f:8f:36:4f:e1:58:97:44:21:59:87:a5:2a:9a:d0:69:95:26:7f:b5
+# SHA256 Fingerprint: 97:55:20:15:f5:dd:fc:3c:87:88:c0:06:94:45:55:40:88:94:45:00:84:f1:00:86:70:86:bc:1a:2b:b5:8d:c8
+-----BEGIN CERTIFICATE-----
+MIIF2jCCA8KgAwIBAgIMBfcOhtpJ80Y1LrqyMA0GCSqGSIb3DQEBCwUAMIGIMQsw
+CQYDVQQGEwJVUzERMA8GA1UECAwISWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28x
+ITAfBgNVBAoMGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1
+c3R3YXZlIEdsb2JhbCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0xNzA4MjMx
+OTM0MTJaFw00MjA4MjMxOTM0MTJaMIGIMQswCQYDVQQGEwJVUzERMA8GA1UECAwI
+SWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28xITAfBgNVBAoMGFRydXN0d2F2ZSBI
+b2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1c3R3YXZlIEdsb2JhbCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
+ALldUShLPDeS0YLOvR29zd24q88KPuFd5dyqCblXAj7mY2Hf8g+CY66j96xz0Xzn
+swuvCAAJWX/NKSqIk4cXGIDtiLK0thAfLdZfVaITXdHG6wZWiYj+rDKd/VzDBcdu
+7oaJuogDnXIhhpCujwOl3J+IKMujkkkP7NAP4m1ET4BqstTnoApTAbqOl5F2brz8
+1Ws25kCI1nsvXwXoLG0R8+eyvpJETNKXpP7ScoFDB5zpET71ixpZfR9oWN0EACyW
+80OzfpgZdNmcc9kYvkHHNHnZ9GLCQ7mzJ7Aiy/k9UscwR7PJPrhq4ufogXBeQotP
+JqX+OsIgbrv4Fo7NDKm0G2x2EOFYeUY+VM6AqFcJNykbmROPDMjWLBz7BegIlT1l
+RtzuzWniTY+HKE40Cz7PFNm73bZQmq131BnW2hqIyE4bJ3XYsgjxroMwuREOzYfw
+hI0Vcnyh78zyiGG69Gm7DIwLdVcEuE4qFC49DxweMqZiNu5m4iK4BUBjECLzMx10
+coos9TkpoNPnG4CELcU9402x/RpvumUHO1jsQkUm+9jaJXLE9gCxInm943xZYkqc
+BW89zubWR2OZxiRvchLIrH+QtAuRcOi35hYQcRfO3gZPSEF9NUqjifLJS3tBEW1n
+twiYTOURGa5CgNz7kAXU+FDKvuStx8KU1xad5hePrzb7AgMBAAGjQjBAMA8GA1Ud
+EwEB/wQFMAMBAf8wHQYDVR0OBBYEFJngGWcNYtt2s9o9uFvo/ULSMQ6HMA4GA1Ud
+DwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAmHNw4rDT7TnsTGDZqRKGFx6W
+0OhUKDtkLSGm+J1WE2pIPU/HPinbbViDVD2HfSMF1OQc3Og4ZYbFdada2zUFvXfe
+uyk3QAUHw5RSn8pk3fEbK9xGChACMf1KaA0HZJDmHvUqoai7PF35owgLEQzxPy0Q
+lG/+4jSHg9bP5Rs1bdID4bANqKCqRieCNqcVtgimQlRXtpla4gt5kNdXElE1GYhB
+aCXUNxeEFfsBctyV3lImIJgm4nb1J2/6ADtKYdkNy1GTKv0WBpanI5ojSP5RvbbE
+sLFUzt5sQa0WZ37b/TjNuThOssFgy50X31ieemKyJo90lZvkWx3SD92YHJtZuSPT
+MaCm/zjdzyBP6VhWOmfD0faZmZ26NraAL4hHT4a/RDqA5Dccprrql5gR0IRiR2Qe
+qu5AvzSxnI9O4fKSTx+O856X3vOmeWqJcU9LJxdI/uz0UA9PSX3MReO9ekDFQdxh
+VicGaeVyQYHTtgGJoC86cnn+OjC/QezHYj6RS8fZMXZC+fc8Y+wmjHMMfRod6qh8
+h6jCJ3zhM0EPz8/8AKAigJ5Kp28AsEFFtyLKaEjFQqKu3R3y4G5OBVixwJAWKqQ9
+EEC+j2Jjg6mcgn0tAumDMHzLJ8n9HmYAsC7TIS+OMxZsmO0QqAfWzJPP29FpHOTK
+yeC2nOnOcXHebD8WpHk=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc.
+# Subject: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc.
+# Label: "Trustwave Global ECC P256 Certification Authority"
+# Serial: 4151900041497450638097112925
+# MD5 Fingerprint: 5b:44:e3:8d:5d:36:86:26:e8:0d:05:d2:59:a7:83:54
+# SHA1 Fingerprint: b4:90:82:dd:45:0c:be:8b:5b:b1:66:d3:e2:a4:08:26:cd:ed:42:cf
+# SHA256 Fingerprint: 94:5b:bc:82:5e:a5:54:f4:89:d1:fd:51:a7:3d:df:2e:a6:24:ac:70:19:a0:52:05:22:5c:22:a7:8c:cf:a8:b4
+-----BEGIN CERTIFICATE-----
+MIICYDCCAgegAwIBAgIMDWpfCD8oXD5Rld9dMAoGCCqGSM49BAMCMIGRMQswCQYD
+VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf
+BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3
+YXZlIEdsb2JhbCBFQ0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x
+NzA4MjMxOTM1MTBaFw00MjA4MjMxOTM1MTBaMIGRMQswCQYDVQQGEwJVUzERMA8G
+A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0
+d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF
+Q0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTBZMBMGByqGSM49AgEGCCqG
+SM49AwEHA0IABH77bOYj43MyCMpg5lOcunSNGLB4kFKA3TjASh3RqMyTpJcGOMoN
+FWLGjgEqZZ2q3zSRLoHB5DOSMcT9CTqmP62jQzBBMA8GA1UdEwEB/wQFMAMBAf8w
+DwYDVR0PAQH/BAUDAwcGADAdBgNVHQ4EFgQUo0EGrJBt0UrrdaVKEJmzsaGLSvcw
+CgYIKoZIzj0EAwIDRwAwRAIgB+ZU2g6gWrKuEZ+Hxbb/ad4lvvigtwjzRM4q3wgh
+DDcCIC0mA6AFvWvR9lz4ZcyGbbOcNEhjhAnFjXca4syc4XR7
+-----END CERTIFICATE-----
+
+# Issuer: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc.
+# Subject: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc.
+# Label: "Trustwave Global ECC P384 Certification Authority"
+# Serial: 2704997926503831671788816187
+# MD5 Fingerprint: ea:cf:60:c4:3b:b9:15:29:40:a1:97:ed:78:27:93:d6
+# SHA1 Fingerprint: e7:f3:a3:c8:cf:6f:c3:04:2e:6d:0e:67:32:c5:9e:68:95:0d:5e:d2
+# SHA256 Fingerprint: 55:90:38:59:c8:c0:c3:eb:b8:75:9e:ce:4e:25:57:22:5f:f5:75:8b:bd:38:eb:d4:82:76:60:1e:1b:d5:80:97
+-----BEGIN CERTIFICATE-----
+MIICnTCCAiSgAwIBAgIMCL2Fl2yZJ6SAaEc7MAoGCCqGSM49BAMDMIGRMQswCQYD
+VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf
+BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3
+YXZlIEdsb2JhbCBFQ0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x
+NzA4MjMxOTM2NDNaFw00MjA4MjMxOTM2NDNaMIGRMQswCQYDVQQGEwJVUzERMA8G
+A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0
+d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF
+Q0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABGvaDXU1CDFHBa5FmVXxERMuSvgQMSOjfoPTfygIOiYaOs+Xgh+AtycJ
+j9GOMMQKmw6sWASr9zZ9lCOkmwqKi6vr/TklZvFe/oyujUF5nQlgziip04pt89ZF
+1PKYhDhloKNDMEEwDwYDVR0TAQH/BAUwAwEB/zAPBgNVHQ8BAf8EBQMDBwYAMB0G
+A1UdDgQWBBRVqYSJ0sEyvRjLbKYHTsjnnb6CkDAKBggqhkjOPQQDAwNnADBkAjA3
+AZKXRRJ+oPM+rRk6ct30UJMDEr5E0k9BpIycnR+j9sKS50gU/k6bpZFXrsY3crsC
+MGclCrEMXu6pY5Jv5ZAL/mYiykf9ijH3g/56vxC+GCsej/YpHpRZ744hN8tRmKVu
+Sw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp.
+# Subject: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp.
+# Label: "NAVER Global Root Certification Authority"
+# Serial: 9013692873798656336226253319739695165984492813
+# MD5 Fingerprint: c8:7e:41:f6:25:3b:f5:09:b3:17:e8:46:3d:bf:d0:9b
+# SHA1 Fingerprint: 8f:6b:f2:a9:27:4a:da:14:a0:c4:f4:8e:61:27:f9:c0:1e:78:5d:d1
+# SHA256 Fingerprint: 88:f4:38:dc:f8:ff:d1:fa:8f:42:91:15:ff:e5:f8:2a:e1:e0:6e:0c:70:c3:75:fa:ad:71:7b:34:a4:9e:72:65
+-----BEGIN CERTIFICATE-----
+MIIFojCCA4qgAwIBAgIUAZQwHqIL3fXFMyqxQ0Rx+NZQTQ0wDQYJKoZIhvcNAQEM
+BQAwaTELMAkGA1UEBhMCS1IxJjAkBgNVBAoMHU5BVkVSIEJVU0lORVNTIFBMQVRG
+T1JNIENvcnAuMTIwMAYDVQQDDClOQVZFUiBHbG9iYWwgUm9vdCBDZXJ0aWZpY2F0
+aW9uIEF1dGhvcml0eTAeFw0xNzA4MTgwODU4NDJaFw0zNzA4MTgyMzU5NTlaMGkx
+CzAJBgNVBAYTAktSMSYwJAYDVQQKDB1OQVZFUiBCVVNJTkVTUyBQTEFURk9STSBD
+b3JwLjEyMDAGA1UEAwwpTkFWRVIgR2xvYmFsIFJvb3QgQ2VydGlmaWNhdGlvbiBB
+dXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC21PGTXLVA
+iQqrDZBbUGOukJR0F0Vy1ntlWilLp1agS7gvQnXp2XskWjFlqxcX0TM62RHcQDaH
+38dq6SZeWYp34+hInDEW+j6RscrJo+KfziFTowI2MMtSAuXaMl3Dxeb57hHHi8lE
+HoSTGEq0n+USZGnQJoViAbbJAh2+g1G7XNr4rRVqmfeSVPc0W+m/6imBEtRTkZaz
+kVrd/pBzKPswRrXKCAfHcXLJZtM0l/aM9BhK4dA9WkW2aacp+yPOiNgSnABIqKYP
+szuSjXEOdMWLyEz59JuOuDxp7W87UC9Y7cSw0BwbagzivESq2M0UXZR4Yb8Obtoq
+vC8MC3GmsxY/nOb5zJ9TNeIDoKAYv7vxvvTWjIcNQvcGufFt7QSUqP620wbGQGHf
+nZ3zVHbOUzoBppJB7ASjjw2i1QnK1sua8e9DXcCrpUHPXFNwcMmIpi3Ua2FzUCaG
+YQ5fG8Ir4ozVu53BA0K6lNpfqbDKzE0K70dpAy8i+/Eozr9dUGWokG2zdLAIx6yo
+0es+nPxdGoMuK8u180SdOqcXYZaicdNwlhVNt0xz7hlcxVs+Qf6sdWA7G2POAN3a
+CJBitOUt7kinaxeZVL6HSuOpXgRM6xBtVNbv8ejyYhbLgGvtPe31HzClrkvJE+2K
+AQHJuFFYwGY6sWZLxNUxAmLpdIQM201GLQIDAQABo0IwQDAdBgNVHQ4EFgQU0p+I
+36HNLL3s9TsBAZMzJ7LrYEswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMB
+Af8wDQYJKoZIhvcNAQEMBQADggIBADLKgLOdPVQG3dLSLvCkASELZ0jKbY7gyKoN
+qo0hV4/GPnrK21HUUrPUloSlWGB/5QuOH/XcChWB5Tu2tyIvCZwTFrFsDDUIbatj
+cu3cvuzHV+YwIHHW1xDBE1UBjCpD5EHxzzp6U5LOogMFDTjfArsQLtk70pt6wKGm
++LUx5vR1yblTmXVHIloUFcd4G7ad6Qz4G3bxhYTeodoS76TiEJd6eN4MUZeoIUCL
+hr0N8F5OSza7OyAfikJW4Qsav3vQIkMsRIz75Sq0bBwcupTgE34h5prCy8VCZLQe
+lHsIJchxzIdFV4XTnyliIoNRlwAYl3dqmJLJfGBs32x9SuRwTMKeuB330DTHD8z7
+p/8Dvq1wkNoL3chtl1+afwkyQf3NosxabUzyqkn+Zvjp2DXrDige7kgvOtB5CTh8
+piKCk5XQA76+AqAF3SAi428diDRgxuYKuQl1C/AH6GmWNcf7I4GOODm4RStDeKLR
+LBT/DShycpWbXgnbiUSYqqFJu3FS8r/2/yehNq+4tneI3TqkbZs0kNwUXTC/t+sX
+5Ie3cdCh13cV1ELX8vMxmV2b3RZtP+oGI/hGoiLtk/bdmuYqh7GYVPEi92tF4+KO
+dh2ajcQGjTa3FPOdVGm3jjzVpG2Tgbet9r1ke8LJaDmgkpzNNIaRkPpkUZ3+/uul
+9XXeifdy
+-----END CERTIFICATE-----
+
+# Issuer: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres
+# Subject: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres
+# Label: "AC RAIZ FNMT-RCM SERVIDORES SEGUROS"
+# Serial: 131542671362353147877283741781055151509
+# MD5 Fingerprint: 19:36:9c:52:03:2f:d2:d1:bb:23:cc:dd:1e:12:55:bb
+# SHA1 Fingerprint: 62:ff:d9:9e:c0:65:0d:03:ce:75:93:d2:ed:3f:2d:32:c9:e3:e5:4a
+# SHA256 Fingerprint: 55:41:53:b1:3d:2c:f9:dd:b7:53:bf:be:1a:4e:0a:e0:8d:0a:a4:18:70:58:fe:60:a2:b8:62:b2:e4:b8:7b:cb
+-----BEGIN CERTIFICATE-----
+MIICbjCCAfOgAwIBAgIQYvYybOXE42hcG2LdnC6dlTAKBggqhkjOPQQDAzB4MQsw
+CQYDVQQGEwJFUzERMA8GA1UECgwIRk5NVC1SQ00xDjAMBgNVBAsMBUNlcmVzMRgw
+FgYDVQRhDA9WQVRFUy1RMjgyNjAwNEoxLDAqBgNVBAMMI0FDIFJBSVogRk5NVC1S
+Q00gU0VSVklET1JFUyBTRUdVUk9TMB4XDTE4MTIyMDA5MzczM1oXDTQzMTIyMDA5
+MzczM1oweDELMAkGA1UEBhMCRVMxETAPBgNVBAoMCEZOTVQtUkNNMQ4wDAYDVQQL
+DAVDZXJlczEYMBYGA1UEYQwPVkFURVMtUTI4MjYwMDRKMSwwKgYDVQQDDCNBQyBS
+QUlaIEZOTVQtUkNNIFNFUlZJRE9SRVMgU0VHVVJPUzB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABPa6V1PIyqvfNkpSIeSX0oNnnvBlUdBeh8dHsVnyV0ebAAKTRBdp20LH
+sbI6GA60XYyzZl2hNPk2LEnb80b8s0RpRBNm/dfF/a82Tc4DTQdxz69qBdKiQ1oK
+Um8BA06Oi6NCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD
+VR0OBBYEFAG5L++/EYZg8k/QQW6rcx/n0m5JMAoGCCqGSM49BAMDA2kAMGYCMQCu
+SuMrQMN0EfKVrRYj3k4MGuZdpSRea0R7/DjiT8ucRRcRTBQnJlU5dUoDzBOQn5IC
+MQD6SmxgiHPz7riYYqnOK8LZiqZwMR2vsJRM60/G49HzYqc8/5MuB1xJAWdpEgJy
+v+c=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign Root R46 O=GlobalSign nv-sa
+# Subject: CN=GlobalSign Root R46 O=GlobalSign nv-sa
+# Label: "GlobalSign Root R46"
+# Serial: 1552617688466950547958867513931858518042577
+# MD5 Fingerprint: c4:14:30:e4:fa:66:43:94:2a:6a:1b:24:5f:19:d0:ef
+# SHA1 Fingerprint: 53:a2:b0:4b:ca:6b:d6:45:e6:39:8a:8e:c4:0d:d2:bf:77:c3:a2:90
+# SHA256 Fingerprint: 4f:a3:12:6d:8d:3a:11:d1:c4:85:5a:4f:80:7c:ba:d6:cf:91:9d:3a:5a:88:b0:3b:ea:2c:63:72:d9:3c:40:c9
+-----BEGIN CERTIFICATE-----
+MIIFWjCCA0KgAwIBAgISEdK7udcjGJ5AXwqdLdDfJWfRMA0GCSqGSIb3DQEBDAUA
+MEYxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYD
+VQQDExNHbG9iYWxTaWduIFJvb3QgUjQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMy
+MDAwMDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYt
+c2ExHDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBSNDYwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQCsrHQy6LNl5brtQyYdpokNRbopiLKkHWPd08EsCVeJ
+OaFV6Wc0dwxu5FUdUiXSE2te4R2pt32JMl8Nnp8semNgQB+msLZ4j5lUlghYruQG
+vGIFAha/r6gjA7aUD7xubMLL1aa7DOn2wQL7Id5m3RerdELv8HQvJfTqa1VbkNud
+316HCkD7rRlr+/fKYIje2sGP1q7Vf9Q8g+7XFkyDRTNrJ9CG0Bwta/OrffGFqfUo
+0q3v84RLHIf8E6M6cqJaESvWJ3En7YEtbWaBkoe0G1h6zD8K+kZPTXhc+CtI4wSE
+y132tGqzZfxCnlEmIyDLPRT5ge1lFgBPGmSXZgjPjHvjK8Cd+RTyG/FWaha/LIWF
+zXg4mutCagI0GIMXTpRW+LaCtfOW3T3zvn8gdz57GSNrLNRyc0NXfeD412lPFzYE
++cCQYDdF3uYM2HSNrpyibXRdQr4G9dlkbgIQrImwTDsHTUB+JMWKmIJ5jqSngiCN
+I/onccnfxkF0oE32kRbcRoxfKWMxWXEM2G/CtjJ9++ZdU6Z+Ffy7dXxd7Pj2Fxzs
+x2sZy/N78CsHpdlseVR2bJ0cpm4O6XkMqCNqo98bMDGfsVR7/mrLZqrcZdCinkqa
+ByFrgY/bxFn63iLABJzjqls2k+g9vXqhnQt2sQvHnf3PmKgGwvgqo6GDoLclcqUC
+4wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQUA1yrc4GHqMywptWU4jaWSf8FmSwwDQYJKoZIhvcNAQEMBQADggIBAHx4
+7PYCLLtbfpIrXTncvtgdokIzTfnvpCo7RGkerNlFo048p9gkUbJUHJNOxO97k4Vg
+JuoJSOD1u8fpaNK7ajFxzHmuEajwmf3lH7wvqMxX63bEIaZHU1VNaL8FpO7XJqti
+2kM3S+LGteWygxk6x9PbTZ4IevPuzz5i+6zoYMzRx6Fcg0XERczzF2sUyQQCPtIk
+pnnpHs6i58FZFZ8d4kuaPp92CC1r2LpXFNqD6v6MVenQTqnMdzGxRBF6XLE+0xRF
+FRhiJBPSy03OXIPBNvIQtQ6IbbjhVp+J3pZmOUdkLG5NrmJ7v2B0GbhWrJKsFjLt
+rWhV/pi60zTe9Mlhww6G9kuEYO4Ne7UyWHmRVSyBQ7N0H3qqJZ4d16GLuc1CLgSk
+ZoNNiTW2bKg2SnkheCLQQrzRQDGQob4Ez8pn7fXwgNNgyYMqIgXQBztSvwyeqiv5
+u+YfjyW6hY0XHgL+XVAEV8/+LbzvXMAaq7afJMbfc2hIkCwU9D9SGuTSyxTDYWnP
+4vkYxboznxSjBF25cfe1lNj2M8FawTSLfJvdkzrnE6JwYZ+vj+vYxXX4M2bUdGc6
+N3ec592kD3ZDZopD8p/7DEJ4Y9HiD2971KE9dJeFt0g5QdYg/NA6s/rob8SKunE3
+vouXsXgxT7PntgMTzlSdriVZzH81Xwj3QEUxeCp6
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign Root E46 O=GlobalSign nv-sa
+# Subject: CN=GlobalSign Root E46 O=GlobalSign nv-sa
+# Label: "GlobalSign Root E46"
+# Serial: 1552617690338932563915843282459653771421763
+# MD5 Fingerprint: b5:b8:66:ed:de:08:83:e3:c9:e2:01:34:06:ac:51:6f
+# SHA1 Fingerprint: 39:b4:6c:d5:fe:80:06:eb:e2:2f:4a:bb:08:33:a0:af:db:b9:dd:84
+# SHA256 Fingerprint: cb:b9:c4:4d:84:b8:04:3e:10:50:ea:31:a6:9f:51:49:55:d7:bf:d2:e2:c6:b4:93:01:01:9a:d6:1d:9f:50:58
+-----BEGIN CERTIFICATE-----
+MIICCzCCAZGgAwIBAgISEdK7ujNu1LzmJGjFDYQdmOhDMAoGCCqGSM49BAMDMEYx
+CzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYDVQQD
+ExNHbG9iYWxTaWduIFJvb3QgRTQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMyMDAw
+MDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2Ex
+HDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBFNDYwdjAQBgcqhkjOPQIBBgUrgQQA
+IgNiAAScDrHPt+ieUnd1NPqlRqetMhkytAepJ8qUuwzSChDH2omwlwxwEwkBjtjq
+R+q+soArzfwoDdusvKSGN+1wCAB16pMLey5SnCNoIwZD7JIvU4Tb+0cUB+hflGdd
+yXqBPCCjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud
+DgQWBBQxCpCPtsad0kRLgLWi5h+xEk8blTAKBggqhkjOPQQDAwNoADBlAjEA31SQ
+7Zvvi5QCkxeCmb6zniz2C5GMn0oUsfZkvLtoURMMA/cVi4RguYv/Uo7njLwcAjA8
++RHUjE7AwWHCFUyqqx0LMV87HOIAl0Qx5v5zli/altP+CAezNIm8BZ/3Hobui3A=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH
+# Subject: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH
+# Label: "GLOBALTRUST 2020"
+# Serial: 109160994242082918454945253
+# MD5 Fingerprint: 8a:c7:6f:cb:6d:e3:cc:a2:f1:7c:83:fa:0e:78:d7:e8
+# SHA1 Fingerprint: d0:67:c1:13:51:01:0c:aa:d0:c7:6a:65:37:31:16:26:4f:53:71:a2
+# SHA256 Fingerprint: 9a:29:6a:51:82:d1:d4:51:a2:e3:7f:43:9b:74:da:af:a2:67:52:33:29:f9:0f:9a:0d:20:07:c3:34:e2:3c:9a
+-----BEGIN CERTIFICATE-----
+MIIFgjCCA2qgAwIBAgILWku9WvtPilv6ZeUwDQYJKoZIhvcNAQELBQAwTTELMAkG
+A1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9uaXRvcmluZyBHbWJIMRkw
+FwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMB4XDTIwMDIxMDAwMDAwMFoXDTQwMDYx
+MDAwMDAwMFowTTELMAkGA1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9u
+aXRvcmluZyBHbWJIMRkwFwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMIICIjANBgkq
+hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAri5WrRsc7/aVj6B3GyvTY4+ETUWiD59b
+RatZe1E0+eyLinjF3WuvvcTfk0Uev5E4C64OFudBc/jbu9G4UeDLgztzOG53ig9Z
+YybNpyrOVPu44sB8R85gfD+yc/LAGbaKkoc1DZAoouQVBGM+uq/ufF7MpotQsjj3
+QWPKzv9pj2gOlTblzLmMCcpL3TGQlsjMH/1WljTbjhzqLL6FLmPdqqmV0/0plRPw
+yJiT2S0WR5ARg6I6IqIoV6Lr/sCMKKCmfecqQjuCgGOlYx8ZzHyyZqjC0203b+J+
+BlHZRYQfEs4kUmSFC0iAToexIiIwquuuvuAC4EDosEKAA1GqtH6qRNdDYfOiaxaJ
+SaSjpCuKAsR49GiKweR6NrFvG5Ybd0mN1MkGco/PU+PcF4UgStyYJ9ORJitHHmkH
+r96i5OTUawuzXnzUJIBHKWk7buis/UDr2O1xcSvy6Fgd60GXIsUf1DnQJ4+H4xj0
+4KlGDfV0OoIu0G4skaMxXDtG6nsEEFZegB31pWXogvziB4xiRfUg3kZwhqG8k9Me
+dKZssCz3AwyIDMvUclOGvGBG85hqwvG/Q/lwIHfKN0F5VVJjjVsSn8VoxIidrPIw
+q7ejMZdnrY8XD2zHc+0klGvIg5rQmjdJBKuxFshsSUktq6HQjJLyQUp5ISXbY9e2
+nKd+Qmn7OmMCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AQYwHQYDVR0OBBYEFNwuH9FhN3nkq9XVsxJxaD1qaJwiMB8GA1UdIwQYMBaAFNwu
+H9FhN3nkq9XVsxJxaD1qaJwiMA0GCSqGSIb3DQEBCwUAA4ICAQCR8EICaEDuw2jA
+VC/f7GLDw56KoDEoqoOOpFaWEhCGVrqXctJUMHytGdUdaG/7FELYjQ7ztdGl4wJC
+XtzoRlgHNQIw4Lx0SsFDKv/bGtCwr2zD/cuz9X9tAy5ZVp0tLTWMstZDFyySCstd
+6IwPS3BD0IL/qMy/pJTAvoe9iuOTe8aPmxadJ2W8esVCgmxcB9CpwYhgROmYhRZf
++I/KARDOJcP5YBugxZfD0yyIMaK9MOzQ0MAS8cE54+X1+NZK3TTN+2/BT+MAi1bi
+kvcoskJ3ciNnxz8RFbLEAwW+uxF7Cr+obuf/WEPPm2eggAe2HcqtbepBEX4tdJP7
+wry+UUTF72glJ4DjyKDUEuzZpTcdN3y0kcra1LGWge9oXHYQSa9+pTeAsRxSvTOB
+TI/53WXZFM2KJVj04sWDpQmQ1GwUY7VA3+vA/MRYfg0UFodUJ25W5HCEuGwyEn6C
+MUO+1918oa2u1qsgEu8KwxCMSZY13At1XrFP1U80DhEgB3VDRemjEdqso5nCtnkn
+4rnvyOL2NSl6dPrFf4IFYqYK6miyeUcGbvJXqBUzxvd4Sj1Ce2t+/vdG6tHrju+I
+aFvowdlxfv1k7/9nR4hYJS8+hge9+6jlgqispdNpQ80xiEmEU5LAsTkbOYMBMMTy
+qfrQA71yN2BWHzZ8vTmR9W0Nv3vXkg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz
+# Subject: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz
+# Label: "ANF Secure Server Root CA"
+# Serial: 996390341000653745
+# MD5 Fingerprint: 26:a6:44:5a:d9:af:4e:2f:b2:1d:b6:65:b0:4e:e8:96
+# SHA1 Fingerprint: 5b:6e:68:d0:cc:15:b6:a0:5f:1e:c1:5f:ae:02:fc:6b:2f:5d:6f:74
+# SHA256 Fingerprint: fb:8f:ec:75:91:69:b9:10:6b:1e:51:16:44:c6:18:c5:13:04:37:3f:6c:06:43:08:8d:8b:ef:fd:1b:99:75:99
+-----BEGIN CERTIFICATE-----
+MIIF7zCCA9egAwIBAgIIDdPjvGz5a7EwDQYJKoZIhvcNAQELBQAwgYQxEjAQBgNV
+BAUTCUc2MzI4NzUxMDELMAkGA1UEBhMCRVMxJzAlBgNVBAoTHkFORiBBdXRvcmlk
+YWQgZGUgQ2VydGlmaWNhY2lvbjEUMBIGA1UECxMLQU5GIENBIFJhaXoxIjAgBgNV
+BAMTGUFORiBTZWN1cmUgU2VydmVyIFJvb3QgQ0EwHhcNMTkwOTA0MTAwMDM4WhcN
+MzkwODMwMTAwMDM4WjCBhDESMBAGA1UEBRMJRzYzMjg3NTEwMQswCQYDVQQGEwJF
+UzEnMCUGA1UEChMeQU5GIEF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uMRQwEgYD
+VQQLEwtBTkYgQ0EgUmFpejEiMCAGA1UEAxMZQU5GIFNlY3VyZSBTZXJ2ZXIgUm9v
+dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANvrayvmZFSVgpCj
+cqQZAZ2cC4Ffc0m6p6zzBE57lgvsEeBbphzOG9INgxwruJ4dfkUyYA8H6XdYfp9q
+yGFOtibBTI3/TO80sh9l2Ll49a2pcbnvT1gdpd50IJeh7WhM3pIXS7yr/2WanvtH
+2Vdy8wmhrnZEE26cLUQ5vPnHO6RYPUG9tMJJo8gN0pcvB2VSAKduyK9o7PQUlrZX
+H1bDOZ8rbeTzPvY1ZNoMHKGESy9LS+IsJJ1tk0DrtSOOMspvRdOoiXsezx76W0OL
+zc2oD2rKDF65nkeP8Nm2CgtYZRczuSPkdxl9y0oukntPLxB3sY0vaJxizOBQ+OyR
+p1RMVwnVdmPF6GUe7m1qzwmd+nxPrWAI/VaZDxUse6mAq4xhj0oHdkLePfTdsiQz
+W7i1o0TJrH93PB0j7IKppuLIBkwC/qxcmZkLLxCKpvR/1Yd0DVlJRfbwcVw5Kda/
+SiOL9V8BY9KHcyi1Swr1+KuCLH5zJTIdC2MKF4EA/7Z2Xue0sUDKIbvVgFHlSFJn
+LNJhiQcND85Cd8BEc5xEUKDbEAotlRyBr+Qc5RQe8TZBAQIvfXOn3kLMTOmJDVb3
+n5HUA8ZsyY/b2BzgQJhdZpmYgG4t/wHFzstGH6wCxkPmrqKEPMVOHj1tyRRM4y5B
+u8o5vzY8KhmqQYdOpc5LMnndkEl/AgMBAAGjYzBhMB8GA1UdIwQYMBaAFJxf0Gxj
+o1+TypOYCK2Mh6UsXME3MB0GA1UdDgQWBBScX9BsY6Nfk8qTmAitjIelLFzBNzAO
+BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC
+AgEATh65isagmD9uw2nAalxJUqzLK114OMHVVISfk/CHGT0sZonrDUL8zPB1hT+L
+9IBdeeUXZ701guLyPI59WzbLWoAAKfLOKyzxj6ptBZNscsdW699QIyjlRRA96Gej
+rw5VD5AJYu9LWaL2U/HANeQvwSS9eS9OICI7/RogsKQOLHDtdD+4E5UGUcjohybK
+pFtqFiGS3XNgnhAY3jyB6ugYw3yJ8otQPr0R4hUDqDZ9MwFsSBXXiJCZBMXM5gf0
+vPSQ7RPi6ovDj6MzD8EpTBNO2hVWcXNyglD2mjN8orGoGjR0ZVzO0eurU+AagNjq
+OknkJjCb5RyKqKkVMoaZkgoQI1YS4PbOTOK7vtuNknMBZi9iPrJyJ0U27U1W45eZ
+/zo1PqVUSlJZS2Db7v54EX9K3BR5YLZrZAPbFYPhor72I5dQ8AkzNqdxliXzuUJ9
+2zg/LFis6ELhDtjTO0wugumDLmsx2d1Hhk9tl5EuT+IocTUW0fJz/iUrB0ckYyfI
++PbZa/wSMVYIwFNCr5zQM378BvAxRAMU8Vjq8moNqRGyg77FGr8H6lnco4g175x2
+MjxNBiLOFeXdntiP2t7SxDnlF4HPOEfrf4htWRvfn0IUrn7PqLBmZdo3r5+qPeoo
+tt7VMVgWglvquxl1AnMaykgaIZOQCo6ThKd9OyMYkomgjaw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority
+# Subject: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority
+# Label: "Certum EC-384 CA"
+# Serial: 160250656287871593594747141429395092468
+# MD5 Fingerprint: b6:65:b3:96:60:97:12:a1:ec:4e:e1:3d:a3:c6:c9:f1
+# SHA1 Fingerprint: f3:3e:78:3c:ac:df:f4:a2:cc:ac:67:55:69:56:d7:e5:16:3c:e1:ed
+# SHA256 Fingerprint: 6b:32:80:85:62:53:18:aa:50:d1:73:c9:8d:8b:da:09:d5:7e:27:41:3d:11:4c:f7:87:a0:f5:d0:6c:03:0c:f6
+-----BEGIN CERTIFICATE-----
+MIICZTCCAeugAwIBAgIQeI8nXIESUiClBNAt3bpz9DAKBggqhkjOPQQDAzB0MQsw
+CQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScw
+JQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAXBgNVBAMT
+EENlcnR1bSBFQy0zODQgQ0EwHhcNMTgwMzI2MDcyNDU0WhcNNDMwMzI2MDcyNDU0
+WjB0MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBT
+LkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAX
+BgNVBAMTEENlcnR1bSBFQy0zODQgQ0EwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATE
+KI6rGFtqvm5kN2PkzeyrOvfMobgOgknXhimfoZTy42B4mIF4Bk3y7JoOV2CDn7Tm
+Fy8as10CW4kjPMIRBSqniBMY81CE1700LCeJVf/OTOffph8oxPBUw7l8t1Ot68Kj
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI0GZnQkdjrzife81r1HfS+8
+EF9LMA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNoADBlAjADVS2m5hjEfO/J
+UG7BJw+ch69u1RsIGL2SKcHvlJF40jocVYli5RsJHrpka/F2tNQCMQC0QoSZ/6vn
+nvuRlydd3LBbMHHOXjgaatkl5+r3YZJW+OraNsKHZZYuciUvf9/DE8k=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority
+# Subject: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority
+# Label: "Certum Trusted Root CA"
+# Serial: 40870380103424195783807378461123655149
+# MD5 Fingerprint: 51:e1:c2:e7:fe:4c:84:af:59:0e:2f:f4:54:6f:ea:29
+# SHA1 Fingerprint: c8:83:44:c0:18:ae:9f:cc:f1:87:b7:8f:22:d1:c5:d7:45:84:ba:e5
+# SHA256 Fingerprint: fe:76:96:57:38:55:77:3e:37:a9:5e:7a:d4:d9:cc:96:c3:01:57:c1:5d:31:76:5b:a9:b1:57:04:e1:ae:78:fd
+-----BEGIN CERTIFICATE-----
+MIIFwDCCA6igAwIBAgIQHr9ZULjJgDdMBvfrVU+17TANBgkqhkiG9w0BAQ0FADB6
+MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEu
+MScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxHzAdBgNV
+BAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwHhcNMTgwMzE2MTIxMDEzWhcNNDMw
+MzE2MTIxMDEzWjB6MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEg
+U3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRo
+b3JpdHkxHzAdBgNVBAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQDRLY67tzbqbTeRn06TpwXkKQMlzhyC93yZ
+n0EGze2jusDbCSzBfN8pfktlL5On1AFrAygYo9idBcEq2EXxkd7fO9CAAozPOA/q
+p1x4EaTByIVcJdPTsuclzxFUl6s1wB52HO8AU5853BSlLCIls3Jy/I2z5T4IHhQq
+NwuIPMqw9MjCoa68wb4pZ1Xi/K1ZXP69VyywkI3C7Te2fJmItdUDmj0VDT06qKhF
+8JVOJVkdzZhpu9PMMsmN74H+rX2Ju7pgE8pllWeg8xn2A1bUatMn4qGtg/BKEiJ3
+HAVz4hlxQsDsdUaakFjgao4rpUYwBI4Zshfjvqm6f1bxJAPXsiEodg42MEx51UGa
+mqi4NboMOvJEGyCI98Ul1z3G4z5D3Yf+xOr1Uz5MZf87Sst4WmsXXw3Hw09Omiqi
+7VdNIuJGmj8PkTQkfVXjjJU30xrwCSss0smNtA0Aq2cpKNgB9RkEth2+dv5yXMSF
+ytKAQd8FqKPVhJBPC/PgP5sZ0jeJP/J7UhyM9uH3PAeXjA6iWYEMspA90+NZRu0P
+qafegGtaqge2Gcu8V/OXIXoMsSt0Puvap2ctTMSYnjYJdmZm/Bo/6khUHL4wvYBQ
+v3y1zgD2DGHZ5yQD4OMBgQ692IU0iL2yNqh7XAjlRICMb/gv1SHKHRzQ+8S1h9E6
+Tsd2tTVItQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSM+xx1
+vALTn04uSNn5YFSqxLNP+jAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQENBQAD
+ggIBAEii1QALLtA/vBzVtVRJHlpr9OTy4EA34MwUe7nJ+jW1dReTagVphZzNTxl4
+WxmB82M+w85bj/UvXgF2Ez8sALnNllI5SW0ETsXpD4YN4fqzX4IS8TrOZgYkNCvo
+zMrnadyHncI013nR03e4qllY/p0m+jiGPp2Kh2RX5Rc64vmNueMzeMGQ2Ljdt4NR
+5MTMI9UGfOZR0800McD2RrsLrfw9EAUqO0qRJe6M1ISHgCq8CYyqOhNf6DR5UMEQ
+GfnTKB7U0VEwKbOukGfWHwpjscWpxkIxYxeU72nLL/qMFH3EQxiJ2fAyQOaA4kZf
+5ePBAFmo+eggvIksDkc0C+pXwlM2/KfUrzHN/gLldfq5Jwn58/U7yn2fqSLLiMmq
+0Uc9NneoWWRrJ8/vJ8HjJLWG965+Mk2weWjROeiQWMODvA8s1pfrzgzhIMfatz7D
+P78v3DSk+yshzWePS/Tj6tQ/50+6uaWTRRxmHyH6ZF5v4HaUMst19W7l9o/HuKTM
+qJZ9ZPskWkoDbGs4xugDQ5r3V7mzKWmTOPQD8rv7gmsHINFSH5pkAnuYZttcTVoP
+0ISVoDwUQwbKytu4QTbaakRnh6+v40URFWkIsr4WOZckbxJF0WddCajJFdr60qZf
+E2Efv4WstK2tBZQIgx51F9NxO5NQI1mg7TyRVJ12AMXDuDjb
+-----END CERTIFICATE-----
+
+# Issuer: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique
+# Subject: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique
+# Label: "TunTrust Root CA"
+# Serial: 108534058042236574382096126452369648152337120275
+# MD5 Fingerprint: 85:13:b9:90:5b:36:5c:b6:5e:b8:5a:f8:e0:31:57:b4
+# SHA1 Fingerprint: cf:e9:70:84:0f:e0:73:0f:9d:f6:0c:7f:2c:4b:ee:20:46:34:9c:bb
+# SHA256 Fingerprint: 2e:44:10:2a:b5:8c:b8:54:19:45:1c:8e:19:d9:ac:f3:66:2c:af:bc:61:4b:6a:53:96:0a:30:f7:d0:e2:eb:41
+-----BEGIN CERTIFICATE-----
+MIIFszCCA5ugAwIBAgIUEwLV4kBMkkaGFmddtLu7sms+/BMwDQYJKoZIhvcNAQEL
+BQAwYTELMAkGA1UEBhMCVE4xNzA1BgNVBAoMLkFnZW5jZSBOYXRpb25hbGUgZGUg
+Q2VydGlmaWNhdGlvbiBFbGVjdHJvbmlxdWUxGTAXBgNVBAMMEFR1blRydXN0IFJv
+b3QgQ0EwHhcNMTkwNDI2MDg1NzU2WhcNNDQwNDI2MDg1NzU2WjBhMQswCQYDVQQG
+EwJUTjE3MDUGA1UECgwuQWdlbmNlIE5hdGlvbmFsZSBkZSBDZXJ0aWZpY2F0aW9u
+IEVsZWN0cm9uaXF1ZTEZMBcGA1UEAwwQVHVuVHJ1c3QgUm9vdCBDQTCCAiIwDQYJ
+KoZIhvcNAQEBBQADggIPADCCAgoCggIBAMPN0/y9BFPdDCA61YguBUtB9YOCfvdZ
+n56eY+hz2vYGqU8ftPkLHzmMmiDQfgbU7DTZhrx1W4eI8NLZ1KMKsmwb60ksPqxd
+2JQDoOw05TDENX37Jk0bbjBU2PWARZw5rZzJJQRNmpA+TkBuimvNKWfGzC3gdOgF
+VwpIUPp6Q9p+7FuaDmJ2/uqdHYVy7BG7NegfJ7/Boce7SBbdVtfMTqDhuazb1YMZ
+GoXRlJfXyqNlC/M4+QKu3fZnz8k/9YosRxqZbwUN/dAdgjH8KcwAWJeRTIAAHDOF
+li/LQcKLEITDCSSJH7UP2dl3RxiSlGBcx5kDPP73lad9UKGAwqmDrViWVSHbhlnU
+r8a83YFuB9tgYv7sEG7aaAH0gxupPqJbI9dkxt/con3YS7qC0lH4Zr8GRuR5KiY2
+eY8fTpkdso8MDhz/yV3A/ZAQprE38806JG60hZC/gLkMjNWb1sjxVj8agIl6qeIb
+MlEsPvLfe/ZdeikZjuXIvTZxi11Mwh0/rViizz1wTaZQmCXcI/m4WEEIcb9PuISg
+jwBUFfyRbVinljvrS5YnzWuioYasDXxU5mZMZl+QviGaAkYt5IPCgLnPSz7ofzwB
+7I9ezX/SKEIBlYrilz0QIX32nRzFNKHsLA4KUiwSVXAkPcvCFDVDXSdOvsC9qnyW
+5/yeYa1E0wCXAgMBAAGjYzBhMB0GA1UdDgQWBBQGmpsfU33x9aTI04Y+oXNZtPdE
+ITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFAaamx9TffH1pMjThj6hc1m0
+90QhMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAqgVutt0Vyb+z
+xiD2BkewhpMl0425yAA/l/VSJ4hxyXT968pk21vvHl26v9Hr7lxpuhbI87mP0zYu
+QEkHDVneixCwSQXi/5E/S7fdAo74gShczNxtr18UnH1YeA32gAm56Q6XKRm4t+v4
+FstVEuTGfbvE7Pi1HE4+Z7/FXxttbUcoqgRYYdZ2vyJ/0Adqp2RT8JeNnYA/u8EH
+22Wv5psymsNUk8QcCMNE+3tjEUPRahphanltkE8pjkcFwRJpadbGNjHh/PqAulxP
+xOu3Mqz4dWEX1xAZufHSCe96Qp1bWgvUxpVOKs7/B9dPfhgGiPEZtdmYu65xxBzn
+dFlY7wyJz4sfdZMaBBSSSFCp61cpABbjNhzI+L/wM9VBD8TMPN3pM0MBkRArHtG5
+Xc0yGYuPjCB31yLEQtyEFpslbei0VXF/sHyz03FJuc9SpAQ/3D2gu68zngowYI7b
+nV2UqL1g52KAdoGDDIzMMEZJ4gzSqK/rYXHv5yJiqfdcZGyfFoxnNidF9Ql7v/YQ
+CvGwjVRDjAS6oz/v4jXH+XTgbzRB0L9zZVcg+ZtnemZoJE6AZb0QmQZZ8mWvuMZH
+u/2QeItBcy6vVR/cO5JyboTT0GFMDcx2V+IthSIVNg3rAZ3r2OvEhJn7wAzMMujj
+d9qDRIueVSjAi1jTkD5OGwDxFa2DK5o=
+-----END CERTIFICATE-----
+
+# Issuer: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA
+# Subject: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA
+# Label: "HARICA TLS RSA Root CA 2021"
+# Serial: 76817823531813593706434026085292783742
+# MD5 Fingerprint: 65:47:9b:58:86:dd:2c:f0:fc:a2:84:1f:1e:96:c4:91
+# SHA1 Fingerprint: 02:2d:05:82:fa:88:ce:14:0c:06:79:de:7f:14:10:e9:45:d7:a5:6d
+# SHA256 Fingerprint: d9:5d:0e:8e:da:79:52:5b:f9:be:b1:1b:14:d2:10:0d:32:94:98:5f:0c:62:d9:fa:bd:9c:d9:99:ec:cb:7b:1d
+-----BEGIN CERTIFICATE-----
+MIIFpDCCA4ygAwIBAgIQOcqTHO9D88aOk8f0ZIk4fjANBgkqhkiG9w0BAQsFADBs
+MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl
+c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0Eg
+Um9vdCBDQSAyMDIxMB4XDTIxMDIxOTEwNTUzOFoXDTQ1MDIxMzEwNTUzN1owbDEL
+MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl
+YXJjaCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgUlNBIFJv
+b3QgQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAIvC569l
+mwVnlskNJLnQDmT8zuIkGCyEf3dRywQRNrhe7Wlxp57kJQmXZ8FHws+RFjZiPTgE
+4VGC/6zStGndLuwRo0Xua2s7TL+MjaQenRG56Tj5eg4MmOIjHdFOY9TnuEFE+2uv
+a9of08WRiFukiZLRgeaMOVig1mlDqa2YUlhu2wr7a89o+uOkXjpFc5gH6l8Cct4M
+pbOfrqkdtx2z/IpZ525yZa31MJQjB/OCFks1mJxTuy/K5FrZx40d/JiZ+yykgmvw
+Kh+OC19xXFyuQnspiYHLA6OZyoieC0AJQTPb5lh6/a6ZcMBaD9YThnEvdmn8kN3b
+LW7R8pv1GmuebxWMevBLKKAiOIAkbDakO/IwkfN4E8/BPzWr8R0RI7VDIp4BkrcY
+AuUR0YLbFQDMYTfBKnya4dC6s1BG7oKsnTH4+yPiAwBIcKMJJnkVU2DzOFytOOqB
+AGMUuTNe3QvboEUHGjMJ+E20pwKmafTCWQWIZYVWrkvL4N48fS0ayOn7H6NhStYq
+E613TBoYm5EPWNgGVMWX+Ko/IIqmhaZ39qb8HOLubpQzKoNQhArlT4b4UEV4AIHr
+W2jjJo3Me1xR9BQsQL4aYB16cmEdH2MtiKrOokWQCPxrvrNQKlr9qEgYRtaQQJKQ
+CoReaDH46+0N0x3GfZkYVVYnZS6NRcUk7M7jAgMBAAGjQjBAMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFApII6ZgpJIKM+qTW8VX6iVNvRLuMA4GA1UdDwEB/wQE
+AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAPpBIqm5iFSVmewzVjIuJndftTgfvnNAU
+X15QvWiWkKQUEapobQk1OUAJ2vQJLDSle1mESSmXdMgHHkdt8s4cUCbjnj1AUz/3
+f5Z2EMVGpdAgS1D0NTsY9FVqQRtHBmg8uwkIYtlfVUKqrFOFrJVWNlar5AWMxaja
+H6NpvVMPxP/cyuN+8kyIhkdGGvMA9YCRotxDQpSbIPDRzbLrLFPCU3hKTwSUQZqP
+JzLB5UkZv/HywouoCjkxKLR9YjYsTewfM7Z+d21+UPCfDtcRj88YxeMn/ibvBZ3P
+zzfF0HvaO7AWhAw6k9a+F9sPPg4ZeAnHqQJyIkv3N3a6dcSFA1pj1bF1BcK5vZSt
+jBWZp5N99sXzqnTPBIWUmAD04vnKJGW/4GKvyMX6ssmeVkjaef2WdhW+o45WxLM0
+/L5H9MG0qPzVMIho7suuyWPEdr6sOBjhXlzPrjoiUevRi7PzKzMHVIf6tLITe7pT
+BGIBnfHAT+7hOtSLIBD6Alfm78ELt5BGnBkpjNxvoEppaZS3JGWg/6w/zgH7IS79
+aPib8qXPMThcFarmlwDB31qlpzmq6YR/PFGoOtmUW4y/Twhx5duoXNTSpv4Ao8YW
+xw/ogM4cKGR0GQjTQuPOAF1/sdwTsOEFy9EgqoZ0njnnkf3/W9b3raYvAwtt41dU
+63ZTGI0RmLo=
+-----END CERTIFICATE-----
+
+# Issuer: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA
+# Subject: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA
+# Label: "HARICA TLS ECC Root CA 2021"
+# Serial: 137515985548005187474074462014555733966
+# MD5 Fingerprint: ae:f7:4c:e5:66:35:d1:b7:9b:8c:22:93:74:d3:4b:b0
+# SHA1 Fingerprint: bc:b0:c1:9d:e9:98:92:70:19:38:57:e9:8d:a7:b4:5d:6e:ee:01:48
+# SHA256 Fingerprint: 3f:99:cc:47:4a:cf:ce:4d:fe:d5:87:94:66:5e:47:8d:15:47:73:9f:2e:78:0f:1b:b4:ca:9b:13:30:97:d4:01
+-----BEGIN CERTIFICATE-----
+MIICVDCCAdugAwIBAgIQZ3SdjXfYO2rbIvT/WeK/zjAKBggqhkjOPQQDAzBsMQsw
+CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh
+cmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9v
+dCBDQSAyMDIxMB4XDTIxMDIxOTExMDExMFoXDTQ1MDIxMzExMDEwOVowbDELMAkG
+A1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj
+aCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgRUNDIFJvb3Qg
+Q0EgMjAyMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABDgI/rGgltJ6rK9JOtDA4MM7
+KKrxcm1lAEeIhPyaJmuqS7psBAqIXhfyVYf8MLA04jRYVxqEU+kw2anylnTDUR9Y
+STHMmE5gEYd103KUkE+bECUqqHgtvpBBWJAVcqeht6NCMEAwDwYDVR0TAQH/BAUw
+AwEB/zAdBgNVHQ4EFgQUyRtTgRL+BNUW0aq8mm+3oJUZbsowDgYDVR0PAQH/BAQD
+AgGGMAoGCCqGSM49BAMDA2cAMGQCMBHervjcToiwqfAircJRQO9gcS3ujwLEXQNw
+SaSS6sUUiHCm0w2wqsosQJz76YJumgIwK0eaB8bRwoF8yguWGEEbo/QwCZ61IygN
+nxS2PFOiTAZpffpskcYqSUXm7LcT4Tps
+-----END CERTIFICATE-----
+
+# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
+# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
+# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068"
+# Serial: 1977337328857672817
+# MD5 Fingerprint: 4e:6e:9b:54:4c:ca:b7:fa:48:e4:90:b1:15:4b:1c:a3
+# SHA1 Fingerprint: 0b:be:c2:27:22:49:cb:39:aa:db:35:5c:53:e3:8c:ae:78:ff:b6:fe
+# SHA256 Fingerprint: 57:de:05:83:ef:d2:b2:6e:03:61:da:99:da:9d:f4:64:8d:ef:7e:e8:44:1c:3b:72:8a:fa:9b:cd:e0:f9:b2:6a
+-----BEGIN CERTIFICATE-----
+MIIGFDCCA/ygAwIBAgIIG3Dp0v+ubHEwDQYJKoZIhvcNAQELBQAwUTELMAkGA1UE
+BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h
+cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0xNDA5MjMxNTIyMDdaFw0zNjA1
+MDUxNTIyMDdaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg
+Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9
+thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM
+cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG
+L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i
+NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h
+X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b
+m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy
+Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja
+EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T
+KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF
+6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh
+OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMB0GA1UdDgQWBBRlzeurNR4APn7VdMAc
+tHNHDhpkLzASBgNVHRMBAf8ECDAGAQH/AgEBMIGmBgNVHSAEgZ4wgZswgZgGBFUd
+IAAwgY8wLwYIKwYBBQUHAgEWI2h0dHA6Ly93d3cuZmlybWFwcm9mZXNpb25hbC5j
+b20vY3BzMFwGCCsGAQUFBwICMFAeTgBQAGEAcwBlAG8AIABkAGUAIABsAGEAIABC
+AG8AbgBhAG4AbwB2AGEAIAA0ADcAIABCAGEAcgBjAGUAbABvAG4AYQAgADAAOAAw
+ADEANzAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQELBQADggIBAHSHKAIrdx9m
+iWTtj3QuRhy7qPj4Cx2Dtjqn6EWKB7fgPiDL4QjbEwj4KKE1soCzC1HA01aajTNF
+Sa9J8OA9B3pFE1r/yJfY0xgsfZb43aJlQ3CTkBW6kN/oGbDbLIpgD7dvlAceHabJ
+hfa9NPhAeGIQcDq+fUs5gakQ1JZBu/hfHAsdCPKxsIl68veg4MSPi3i1O1ilI45P
+Vf42O+AMt8oqMEEgtIDNrvx2ZnOorm7hfNoD6JQg5iKj0B+QXSBTFCZX2lSX3xZE
+EAEeiGaPcjiT3SC3NL7X8e5jjkd5KAb881lFJWAiMxujX6i6KtoaPc1A6ozuBRWV
+1aUsIC+nmCjuRfzxuIgALI9C2lHVnOUTaHFFQ4ueCyE8S1wF3BqfmI7avSKecs2t
+CsvMo2ebKHTEm9caPARYpoKdrcd7b/+Alun4jWq9GJAd/0kakFI3ky88Al2CdgtR
+5xbHV/g4+afNmyJU72OwFW1TZQNKXkqgsqeOSQBZONXH9IBk9W6VULgRfhVwOEqw
+f9DEMnDAGf/JOC0ULGb0QkTmVXYbgBVX/8Cnp6o5qtjTcNAuuuuUavpfNIbnYrX9
+ivAwhZTJryQCL2/W3Wf+47BVTwSYT6RBVuKT0Gro1vP7ZeDOdcQxWQzugsgMYDNK
+GbqEZycPvEJdvSRUDewdcAZfpLz6IHxV
+-----END CERTIFICATE-----
+
+# Issuer: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd.
+# Subject: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd.
+# Label: "vTrus ECC Root CA"
+# Serial: 630369271402956006249506845124680065938238527194
+# MD5 Fingerprint: de:4b:c1:f5:52:8c:9b:43:e1:3e:8f:55:54:17:8d:85
+# SHA1 Fingerprint: f6:9c:db:b0:fc:f6:02:13:b6:52:32:a6:a3:91:3f:16:70:da:c3:e1
+# SHA256 Fingerprint: 30:fb:ba:2c:32:23:8e:2a:98:54:7a:f9:79:31:e5:50:42:8b:9b:3f:1c:8e:eb:66:33:dc:fa:86:c5:b2:7d:d3
+-----BEGIN CERTIFICATE-----
+MIICDzCCAZWgAwIBAgIUbmq8WapTvpg5Z6LSa6Q75m0c1towCgYIKoZIzj0EAwMw
+RzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4xGjAY
+BgNVBAMTEXZUcnVzIEVDQyBSb290IENBMB4XDTE4MDczMTA3MjY0NFoXDTQzMDcz
+MTA3MjY0NFowRzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28u
+LEx0ZC4xGjAYBgNVBAMTEXZUcnVzIEVDQyBSb290IENBMHYwEAYHKoZIzj0CAQYF
+K4EEACIDYgAEZVBKrox5lkqqHAjDo6LN/llWQXf9JpRCux3NCNtzslt188+cToL0
+v/hhJoVs1oVbcnDS/dtitN9Ti72xRFhiQgnH+n9bEOf+QP3A2MMrMudwpremIFUd
+e4BdS49nTPEQo0IwQDAdBgNVHQ4EFgQUmDnNvtiyjPeyq+GtJK97fKHbH88wDwYD
+VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwCgYIKoZIzj0EAwMDaAAwZQIw
+V53dVvHH4+m4SVBrm2nDb+zDfSXkV5UTQJtS0zvzQBm8JsctBp61ezaf9SXUY2sA
+AjEA6dPGnlaaKsyh2j/IZivTWJwghfqrkYpwcBE4YGQLYgmRWAD5Tfs0aNoJrSEG
+GJTO
+-----END CERTIFICATE-----
+
+# Issuer: CN=vTrus Root CA O=iTrusChina Co.,Ltd.
+# Subject: CN=vTrus Root CA O=iTrusChina Co.,Ltd.
+# Label: "vTrus Root CA"
+# Serial: 387574501246983434957692974888460947164905180485
+# MD5 Fingerprint: b8:c9:37:df:fa:6b:31:84:64:c5:ea:11:6a:1b:75:fc
+# SHA1 Fingerprint: 84:1a:69:fb:f5:cd:1a:25:34:13:3d:e3:f8:fc:b8:99:d0:c9:14:b7
+# SHA256 Fingerprint: 8a:71:de:65:59:33:6f:42:6c:26:e5:38:80:d0:0d:88:a1:8d:a4:c6:a9:1f:0d:cb:61:94:e2:06:c5:c9:63:87
+-----BEGIN CERTIFICATE-----
+MIIFVjCCAz6gAwIBAgIUQ+NxE9izWRRdt86M/TX9b7wFjUUwDQYJKoZIhvcNAQEL
+BQAwQzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4x
+FjAUBgNVBAMTDXZUcnVzIFJvb3QgQ0EwHhcNMTgwNzMxMDcyNDA1WhcNNDMwNzMx
+MDcyNDA1WjBDMQswCQYDVQQGEwJDTjEcMBoGA1UEChMTaVRydXNDaGluYSBDby4s
+THRkLjEWMBQGA1UEAxMNdlRydXMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQAD
+ggIPADCCAgoCggIBAL1VfGHTuB0EYgWgrmy3cLRB6ksDXhA/kFocizuwZotsSKYc
+IrrVQJLuM7IjWcmOvFjai57QGfIvWcaMY1q6n6MLsLOaXLoRuBLpDLvPbmyAhykU
+AyyNJJrIZIO1aqwTLDPxn9wsYTwaP3BVm60AUn/PBLn+NvqcwBauYv6WTEN+VRS+
+GrPSbcKvdmaVayqwlHeFXgQPYh1jdfdr58tbmnDsPmcF8P4HCIDPKNsFxhQnL4Z9
+8Cfe/+Z+M0jnCx5Y0ScrUw5XSmXX+6KAYPxMvDVTAWqXcoKv8R1w6Jz1717CbMdH
+flqUhSZNO7rrTOiwCcJlwp2dCZtOtZcFrPUGoPc2BX70kLJrxLT5ZOrpGgrIDajt
+J8nU57O5q4IikCc9Kuh8kO+8T/3iCiSn3mUkpF3qwHYw03dQ+A0Em5Q2AXPKBlim
+0zvc+gRGE1WKyURHuFE5Gi7oNOJ5y1lKCn+8pu8fA2dqWSslYpPZUxlmPCdiKYZN
+pGvu/9ROutW04o5IWgAZCfEF2c6Rsffr6TlP9m8EQ5pV9T4FFL2/s1m02I4zhKOQ
+UqqzApVg+QxMaPnu1RcN+HFXtSXkKe5lXa/R7jwXC1pDxaWG6iSe4gUH3DRCEpHW
+OXSuTEGC2/KmSNGzm/MzqvOmwMVO9fSddmPmAsYiS8GVP1BkLFTltvA8Kc9XAgMB
+AAGjQjBAMB0GA1UdDgQWBBRUYnBj8XWEQ1iO0RYgscasGrz2iTAPBgNVHRMBAf8E
+BTADAQH/MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAKbqSSaet
+8PFww+SX8J+pJdVrnjT+5hpk9jprUrIQeBqfTNqK2uwcN1LgQkv7bHbKJAs5EhWd
+nxEt/Hlk3ODg9d3gV8mlsnZwUKT+twpw1aA08XXXTUm6EdGz2OyC/+sOxL9kLX1j
+bhd47F18iMjrjld22VkE+rxSH0Ws8HqA7Oxvdq6R2xCOBNyS36D25q5J08FsEhvM
+Kar5CKXiNxTKsbhm7xqC5PD48acWabfbqWE8n/Uxy+QARsIvdLGx14HuqCaVvIiv
+TDUHKgLKeBRtRytAVunLKmChZwOgzoy8sHJnxDHO2zTlJQNgJXtxmOTAGytfdELS
+S8VZCAeHvsXDf+eW2eHcKJfWjwXj9ZtOyh1QRwVTsMo554WgicEFOwE30z9J4nfr
+I8iIZjs9OXYhRvHsXyO466JmdXTBQPfYaJqT4i2pLr0cox7IdMakLXogqzu4sEb9
+b91fUlV1YvCXoHzXOP0l382gmxDPi7g4Xl7FtKYCNqEeXxzP4padKar9mK5S4fNB
+UvupLnKWnyfjqnN9+BojZns7q2WwMgFLFT49ok8MKzWixtlnEjUwzXYuFrOZnk1P
+Ti07NEPhmg4NpGaXutIcSkwsKouLgU9xGqndXHt7CMUADTdA43x7VF8vhV929ven
+sBxXVsFy6K2ir40zSbofitzmdHxghm+Hl3s=
+-----END CERTIFICATE-----
+
+# Issuer: CN=ISRG Root X2 O=Internet Security Research Group
+# Subject: CN=ISRG Root X2 O=Internet Security Research Group
+# Label: "ISRG Root X2"
+# Serial: 87493402998870891108772069816698636114
+# MD5 Fingerprint: d3:9e:c4:1e:23:3c:a6:df:cf:a3:7e:6d:e0:14:e6:e5
+# SHA1 Fingerprint: bd:b1:b9:3c:d5:97:8d:45:c6:26:14:55:f8:db:95:c7:5a:d1:53:af
+# SHA256 Fingerprint: 69:72:9b:8e:15:a8:6e:fc:17:7a:57:af:b7:17:1d:fc:64:ad:d2:8c:2f:ca:8c:f1:50:7e:34:45:3c:cb:14:70
+-----BEGIN CERTIFICATE-----
+MIICGzCCAaGgAwIBAgIQQdKd0XLq7qeAwSxs6S+HUjAKBggqhkjOPQQDAzBPMQsw
+CQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2gg
+R3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBYMjAeFw0yMDA5MDQwMDAwMDBaFw00
+MDA5MTcxNjAwMDBaME8xCzAJBgNVBAYTAlVTMSkwJwYDVQQKEyBJbnRlcm5ldCBT
+ZWN1cml0eSBSZXNlYXJjaCBHcm91cDEVMBMGA1UEAxMMSVNSRyBSb290IFgyMHYw
+EAYHKoZIzj0CAQYFK4EEACIDYgAEzZvVn4CDCuwJSvMWSj5cz3es3mcFDR0HttwW
++1qLFNvicWDEukWVEYmO6gbf9yoWHKS5xcUy4APgHoIYOIvXRdgKam7mAHf7AlF9
+ItgKbppbd9/w+kHsOdx1ymgHDB/qo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0T
+AQH/BAUwAwEB/zAdBgNVHQ4EFgQUfEKWrt5LSDv6kviejM9ti6lyN5UwCgYIKoZI
+zj0EAwMDaAAwZQIwe3lORlCEwkSHRhtFcP9Ymd70/aTSVaYgLXTWNLxBo1BfASdW
+tL4ndQavEi51mI38AjEAi/V3bNTIZargCyzuFJ0nN6T5U6VR5CmD1/iQMVtCnwr1
+/q4AaOeMSQ+2b1tbFfLn
+-----END CERTIFICATE-----
+
+# Issuer: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd.
+# Subject: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd.
+# Label: "HiPKI Root CA - G1"
+# Serial: 60966262342023497858655262305426234976
+# MD5 Fingerprint: 69:45:df:16:65:4b:e8:68:9a:8f:76:5f:ff:80:9e:d3
+# SHA1 Fingerprint: 6a:92:e4:a8:ee:1b:ec:96:45:37:e3:29:57:49:cd:96:e3:e5:d2:60
+# SHA256 Fingerprint: f0:15:ce:3c:c2:39:bf:ef:06:4b:e9:f1:d2:c4:17:e1:a0:26:4a:0a:94:be:1f:0c:8d:12:18:64:eb:69:49:cc
+-----BEGIN CERTIFICATE-----
+MIIFajCCA1KgAwIBAgIQLd2szmKXlKFD6LDNdmpeYDANBgkqhkiG9w0BAQsFADBP
+MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0
+ZC4xGzAZBgNVBAMMEkhpUEtJIFJvb3QgQ0EgLSBHMTAeFw0xOTAyMjIwOTQ2MDRa
+Fw0zNzEyMzExNTU5NTlaME8xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3
+YSBUZWxlY29tIENvLiwgTHRkLjEbMBkGA1UEAwwSSGlQS0kgUm9vdCBDQSAtIEcx
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA9B5/UnMyDHPkvRN0o9Qw
+qNCuS9i233VHZvR85zkEHmpwINJaR3JnVfSl6J3VHiGh8Ge6zCFovkRTv4354twv
+Vcg3Px+kwJyz5HdcoEb+d/oaoDjq7Zpy3iu9lFc6uux55199QmQ5eiY29yTw1S+6
+lZgRZq2XNdZ1AYDgr/SEYYwNHl98h5ZeQa/rh+r4XfEuiAU+TCK72h8q3VJGZDnz
+Qs7ZngyzsHeXZJzA9KMuH5UHsBffMNsAGJZMoYFL3QRtU6M9/Aes1MU3guvklQgZ
+KILSQjqj2FPseYlgSGDIcpJQ3AOPgz+yQlda22rpEZfdhSi8MEyr48KxRURHH+CK
+FgeW0iEPU8DtqX7UTuybCeyvQqww1r/REEXgphaypcXTT3OUM3ECoWqj1jOXTyFj
+HluP2cFeRXF3D4FdXyGarYPM+l7WjSNfGz1BryB1ZlpK9p/7qxj3ccC2HTHsOyDr
+y+K49a6SsvfhhEvyovKTmiKe0xRvNlS9H15ZFblzqMF8b3ti6RZsR1pl8w4Rm0bZ
+/W3c1pzAtH2lsN0/Vm+h+fbkEkj9Bn8SV7apI09bA8PgcSojt/ewsTu8mL3WmKgM
+a/aOEmem8rJY5AIJEzypuxC00jBF8ez3ABHfZfjcK0NVvxaXxA/VLGGEqnKG/uY6
+fsI/fe78LxQ+5oXdUG+3Se0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQU8ncX+l6o/vY9cdVouslGDDjYr7AwDgYDVR0PAQH/BAQDAgGGMA0GCSqG
+SIb3DQEBCwUAA4ICAQBQUfB13HAE4/+qddRxosuej6ip0691x1TPOhwEmSKsxBHi
+7zNKpiMdDg1H2DfHb680f0+BazVP6XKlMeJ45/dOlBhbQH3PayFUhuaVevvGyuqc
+SE5XCV0vrPSltJczWNWseanMX/mF+lLFjfiRFOs6DRfQUsJ748JzjkZ4Bjgs6Fza
+ZsT0pPBWGTMpWmWSBUdGSquEwx4noR8RkpkndZMPvDY7l1ePJlsMu5wP1G4wB9Tc
+XzZoZjmDlicmisjEOf6aIW/Vcobpf2Lll07QJNBAsNB1CI69aO4I1258EHBGG3zg
+iLKecoaZAeO/n0kZtCW+VmWuF2PlHt/o/0elv+EmBYTksMCv5wiZqAxeJoBF1Pho
+L5aPruJKHJwWDBNvOIf2u8g0X5IDUXlwpt/L9ZlNec1OvFefQ05rLisY+GpzjLrF
+Ne85akEez3GoorKGB1s6yeHvP2UEgEcyRHCVTjFnanRbEEV16rCf0OY1/k6fi8wr
+kkVbbiVghUbN0aqwdmaTd5a+g744tiROJgvM7XpWGuDpWsZkrUx6AEhEL7lAuxM+
+vhV4nYWBSipX3tUZQ9rbyltHhoMLP7YNdnhzeSJesYAfz77RP1YQmCuVh6EfnWQU
+YDksswBVLuT1sw5XxJFBAJw/6KXf6vb/yPCtbVKoF6ubYfwSUTXkJf2vqmqGOQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
+# Label: "GlobalSign ECC Root CA - R4"
+# Serial: 159662223612894884239637590694
+# MD5 Fingerprint: 26:29:f8:6d:e1:88:bf:a2:65:7f:aa:c4:cd:0f:7f:fc
+# SHA1 Fingerprint: 6b:a0:b0:98:e1:71:ef:5a:ad:fe:48:15:80:77:10:f4:bd:6f:0b:28
+# SHA256 Fingerprint: b0:85:d7:0b:96:4f:19:1a:73:e4:af:0d:54:ae:7a:0e:07:aa:fd:af:9b:71:dd:08:62:13:8a:b7:32:5a:24:a2
+-----BEGIN CERTIFICATE-----
+MIIB3DCCAYOgAwIBAgINAgPlfvU/k/2lCSGypjAKBggqhkjOPQQDAjBQMSQwIgYD
+VQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0gUjQxEzARBgNVBAoTCkdsb2Jh
+bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTIxMTEzMDAwMDAwWhcNMzgw
+MTE5MDMxNDA3WjBQMSQwIgYDVQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0g
+UjQxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wWTAT
+BgcqhkjOPQIBBggqhkjOPQMBBwNCAAS4xnnTj2wlDp8uORkcA6SumuU5BwkWymOx
+uYb4ilfBV85C+nOh92VC/x7BALJucw7/xyHlGKSq2XE/qNS5zowdo0IwQDAOBgNV
+HQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVLB7rUW44kB/
++wpu+74zyTyjhNUwCgYIKoZIzj0EAwIDRwAwRAIgIk90crlgr/HmnKAWBVBfw147
+bmF0774BxL4YSFlhgjICICadVGNA3jdgUM/I2O2dgq43mLyjj0xMqTQrbO/7lZsm
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R1 O=Google Trust Services LLC
+# Subject: CN=GTS Root R1 O=Google Trust Services LLC
+# Label: "GTS Root R1"
+# Serial: 159662320309726417404178440727
+# MD5 Fingerprint: 05:fe:d0:bf:71:a8:a3:76:63:da:01:e0:d8:52:dc:40
+# SHA1 Fingerprint: e5:8c:1c:c4:91:3b:38:63:4b:e9:10:6e:e3:ad:8e:6b:9d:d9:81:4a
+# SHA256 Fingerprint: d9:47:43:2a:bd:e7:b7:fa:90:fc:2e:6b:59:10:1b:12:80:e0:e1:c7:e4:e4:0f:a3:c6:88:7f:ff:57:a7:f4:cf
+-----BEGIN CERTIFICATE-----
+MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQsw
+CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
+MBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
+MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
+Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaMf/vo
+27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7w
+Cl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjw
+TcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0Pfybl
+qAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtcvfaH
+szVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4Zor8
+Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUspzBmk
+MiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92
+wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70p
+aDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrN
+VjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQID
+AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
+FgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBAJ+qQibb
+C5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe
+QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuy
+h6f88/qBVRRiClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM4
+7HLwEXWdyzRSjeZ2axfG34arJ45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8J
+ZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYciNuaCp+0KueIHoI17eko8cdLiA6Ef
+MgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5meLMFrUKTX5hgUvYU/
+Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJFfbdT
+6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ
+0E6yove+7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm
+2tIMPNuzjsmhDYAPexZ3FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bb
+bP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3gm3c
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R2 O=Google Trust Services LLC
+# Subject: CN=GTS Root R2 O=Google Trust Services LLC
+# Label: "GTS Root R2"
+# Serial: 159662449406622349769042896298
+# MD5 Fingerprint: 1e:39:c0:53:e6:1e:29:82:0b:ca:52:55:36:5d:57:dc
+# SHA1 Fingerprint: 9a:44:49:76:32:db:de:fa:d0:bc:fb:5a:7b:17:bd:9e:56:09:24:94
+# SHA256 Fingerprint: 8d:25:cd:97:22:9d:bf:70:35:6b:da:4e:b3:cc:73:40:31:e2:4c:f0:0f:af:cf:d3:2d:c7:6e:b5:84:1c:7e:a8
+-----BEGIN CERTIFICATE-----
+MIIFVzCCAz+gAwIBAgINAgPlrsWNBCUaqxElqjANBgkqhkiG9w0BAQwFADBHMQsw
+CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
+MBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
+MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
+Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3LvCvpt
+nfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3KgGjSY
+6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9BuXvAu
+MC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOdre7k
+RXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXuPuWg
+f9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1mKPV
++3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K8Yzo
+dDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqjx5RW
+Ir9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsRnTKa
+G73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0kzCq
+gc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9OktwID
+AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
+FgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBAB/Kzt3H
+vqGf2SdMC9wXmBFqiN495nFWcrKeGk6c1SuYJF2ba3uwM4IJvd8lRuqYnrYb/oM8
+0mJhwQTtzuDFycgTE1XnqGOtjHsB/ncw4c5omwX4Eu55MaBBRTUoCnGkJE+M3DyC
+B19m3H0Q/gxhswWV7uGugQ+o+MePTagjAiZrHYNSVc61LwDKgEDg4XSsYPWHgJ2u
+NmSRXbBoGOqKYcl3qJfEycel/FVL8/B/uWU9J2jQzGv6U53hkRrJXRqWbTKH7QMg
+yALOWr7Z6v2yTcQvG99fevX4i8buMTolUVVnjWQye+mew4K6Ki3pHrTgSAai/Gev
+HyICc/sgCq+dVEuhzf9gR7A/Xe8bVr2XIZYtCtFenTgCR2y59PYjJbigapordwj6
+xLEokCZYCDzifqrXPW+6MYgKBesntaFJ7qBFVHvmJ2WZICGoo7z7GJa7Um8M7YNR
+TOlZ4iBgxcJlkoKM8xAfDoqXvneCbT+PHV28SSe9zE8P4c52hgQjxcCMElv924Sg
+JPFI/2R80L5cFtHvma3AH/vLrrw4IgYmZNralw4/KBVEqE8AyvCazM90arQ+POuV
+7LXTWtiBmelDGDfrs7vRWGJB82bSj6p4lVQgw1oudCvV0b4YacCs1aTPObpRhANl
+6WLAYv7YTVWW4tAR+kg0Eeye7QUd5MjWHYbL
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R3 O=Google Trust Services LLC
+# Subject: CN=GTS Root R3 O=Google Trust Services LLC
+# Label: "GTS Root R3"
+# Serial: 159662495401136852707857743206
+# MD5 Fingerprint: 3e:e7:9d:58:02:94:46:51:94:e5:e0:22:4a:8b:e7:73
+# SHA1 Fingerprint: ed:e5:71:80:2b:c8:92:b9:5b:83:3c:d2:32:68:3f:09:cd:a0:1e:46
+# SHA256 Fingerprint: 34:d8:a7:3e:e2:08:d9:bc:db:0d:95:65:20:93:4b:4e:40:e6:94:82:59:6e:8b:6f:73:c8:42:6b:01:0a:6f:48
+-----BEGIN CERTIFICATE-----
+MIICCTCCAY6gAwIBAgINAgPluILrIPglJ209ZjAKBggqhkjOPQQDAzBHMQswCQYD
+VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG
+A1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw
+WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz
+IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQAIgNi
+AAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout736G
+jOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2ADDL2
+4CejQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
+BBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEA9uEglRR7
+VKOQFhG/hMjqb2sXnh5GmCCbn9MN2azTL818+FsuVbu/3ZL3pAzcMeGiAjEA/Jdm
+ZuVDFhOD3cffL74UOO0BzrEXGhF16b0DjyZ+hOXJYKaV11RZt+cRLInUue4X
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R4 O=Google Trust Services LLC
+# Subject: CN=GTS Root R4 O=Google Trust Services LLC
+# Label: "GTS Root R4"
+# Serial: 159662532700760215368942768210
+# MD5 Fingerprint: 43:96:83:77:19:4d:76:b3:9d:65:52:e4:1d:22:a5:e8
+# SHA1 Fingerprint: 77:d3:03:67:b5:e0:0c:15:f6:0c:38:61:df:7c:e1:3b:92:46:4d:47
+# SHA256 Fingerprint: 34:9d:fa:40:58:c5:e2:63:12:3b:39:8a:e7:95:57:3c:4e:13:13:c8:3f:e6:8f:93:55:6c:d5:e8:03:1b:3c:7d
+-----BEGIN CERTIFICATE-----
+MIICCTCCAY6gAwIBAgINAgPlwGjvYxqccpBQUjAKBggqhkjOPQQDAzBHMQswCQYD
+VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG
+A1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw
+WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz
+IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQAIgNi
+AATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzuhXyi
+QHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/lxKvR
+HYqjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
+BBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNpADBmAjEA6ED/g94D
+9J+uHXqnLrmvT/aDHQ4thQEd0dlq7A/Cr8deVl5c1RxYIigL9zC2L7F8AjEA8GE8
+p/SgguMh1YQdc4acLa/KNJvxn7kjNuK8YAOdgLOaVsjh4rsUecrNIdSUtUlD
+-----END CERTIFICATE-----
+
+# Issuer: CN=Telia Root CA v2 O=Telia Finland Oyj
+# Subject: CN=Telia Root CA v2 O=Telia Finland Oyj
+# Label: "Telia Root CA v2"
+# Serial: 7288924052977061235122729490515358
+# MD5 Fingerprint: 0e:8f:ac:aa:82:df:85:b1:f4:dc:10:1c:fc:99:d9:48
+# SHA1 Fingerprint: b9:99:cd:d1:73:50:8a:c4:47:05:08:9c:8c:88:fb:be:a0:2b:40:cd
+# SHA256 Fingerprint: 24:2b:69:74:2f:cb:1e:5b:2a:bf:98:89:8b:94:57:21:87:54:4e:5b:4d:99:11:78:65:73:62:1f:6a:74:b8:2c
+-----BEGIN CERTIFICATE-----
+MIIFdDCCA1ygAwIBAgIPAWdfJ9b+euPkrL4JWwWeMA0GCSqGSIb3DQEBCwUAMEQx
+CzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZMBcGA1UE
+AwwQVGVsaWEgUm9vdCBDQSB2MjAeFw0xODExMjkxMTU1NTRaFw00MzExMjkxMTU1
+NTRaMEQxCzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZ
+MBcGA1UEAwwQVGVsaWEgUm9vdCBDQSB2MjCCAiIwDQYJKoZIhvcNAQEBBQADggIP
+ADCCAgoCggIBALLQPwe84nvQa5n44ndp586dpAO8gm2h/oFlH0wnrI4AuhZ76zBq
+AMCzdGh+sq/H1WKzej9Qyow2RCRj0jbpDIX2Q3bVTKFgcmfiKDOlyzG4OiIjNLh9
+vVYiQJ3q9HsDrWj8soFPmNB06o3lfc1jw6P23pLCWBnglrvFxKk9pXSW/q/5iaq9
+lRdU2HhE8Qx3FZLgmEKnpNaqIJLNwaCzlrI6hEKNfdWV5Nbb6WLEWLN5xYzTNTOD
+n3WhUidhOPFZPY5Q4L15POdslv5e2QJltI5c0BE0312/UqeBAMN/mUWZFdUXyApT
+7GPzmX3MaRKGwhfwAZ6/hLzRUssbkmbOpFPlob/E2wnW5olWK8jjfN7j/4nlNW4o
+6GwLI1GpJQXrSPjdscr6bAhR77cYbETKJuFzxokGgeWKrLDiKca5JLNrRBH0pUPC
+TEPlcDaMtjNXepUugqD0XBCzYYP2AgWGLnwtbNwDRm41k9V6lS/eINhbfpSQBGq6
+WT0EBXWdN6IOLj3rwaRSg/7Qa9RmjtzG6RJOHSpXqhC8fF6CfaamyfItufUXJ63R
+DolUK5X6wK0dmBR4M0KGCqlztft0DbcbMBnEWg4cJ7faGND/isgFuvGqHKI3t+ZI
+pEYslOqodmJHixBTB0hXbOKSTbauBcvcwUpej6w9GU7C7WB1K9vBykLVAgMBAAGj
+YzBhMB8GA1UdIwQYMBaAFHKs5DN5qkWH9v2sHZ7Wxy+G2CQ5MB0GA1UdDgQWBBRy
+rOQzeapFh/b9rB2e1scvhtgkOTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw
+AwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAoDtZpwmUPjaE0n4vOaWWl/oRrfxn83EJ
+8rKJhGdEr7nv7ZbsnGTbMjBvZ5qsfl+yqwE2foH65IRe0qw24GtixX1LDoJt0nZi
+0f6X+J8wfBj5tFJ3gh1229MdqfDBmgC9bXXYfef6xzijnHDoRnkDry5023X4blMM
+A8iZGok1GTzTyVR8qPAs5m4HeW9q4ebqkYJpCh3DflminmtGFZhb069GHWLIzoBS
+SRE/yQQSwxN8PzuKlts8oB4KtItUsiRnDe+Cy748fdHif64W1lZYudogsYMVoe+K
+TTJvQS8TUoKU1xrBeKJR3Stwbbca+few4GeXVtt8YVMJAygCQMez2P2ccGrGKMOF
+6eLtGpOg3kuYooQ+BXcBlj37tCAPnHICehIv1aO6UXivKitEZU61/Qrowc15h2Er
+3oBXRb9n8ZuRXqWk7FlIEA04x7D6w0RtBPV4UBySllva9bguulvP5fBqnUsvWHMt
+Ty3EHD70sz+rFQ47GUGKpMFXEmZxTPpT41frYpUJnlTd0cI8Vzy9OK2YZLe4A5pT
+VmBds9hCG1xLEooc6+t9xnppxyd/pPiL8uSUZodL6ZQHCRJ5irLrdATczvREWeAW
+ysUsWNc8e89ihmpQfTU2Zqf7N+cox9jQraVplI/owd8k+BsHMYeB2F326CjYSlKA
+rBPuUBQemMc=
+-----END CERTIFICATE-----
+
+# Issuer: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH
+# Subject: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH
+# Label: "D-TRUST BR Root CA 1 2020"
+# Serial: 165870826978392376648679885835942448534
+# MD5 Fingerprint: b5:aa:4b:d5:ed:f7:e3:55:2e:8f:72:0a:f3:75:b8:ed
+# SHA1 Fingerprint: 1f:5b:98:f0:e3:b5:f7:74:3c:ed:e6:b0:36:7d:32:cd:f4:09:41:67
+# SHA256 Fingerprint: e5:9a:aa:81:60:09:c2:2b:ff:5b:25:ba:d3:7d:f3:06:f0:49:79:7c:1f:81:d8:5a:b0:89:e6:57:bd:8f:00:44
+-----BEGIN CERTIFICATE-----
+MIIC2zCCAmCgAwIBAgIQfMmPK4TX3+oPyWWa00tNljAKBggqhkjOPQQDAzBIMQsw
+CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS
+VVNUIEJSIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTA5NDUwMFoXDTM1MDIxMTA5
+NDQ1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG
+A1UEAxMZRC1UUlVTVCBCUiBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABMbLxyjR+4T1mu9CFCDhQ2tuda38KwOE1HaTJddZO0Flax7mNCq7dPYS
+zuht56vkPE4/RAiLzRZxy7+SmfSk1zxQVFKQhYN4lGdnoxwJGT11NIXe7WB9xwy0
+QVK5buXuQqOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHOREKv/
+VbNafAkl1bK6CKBrqx9tMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g
+PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2JyX3Jvb3Rf
+Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l
+dC9DTj1ELVRSVVNUJTIwQlIlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1
+c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO
+PQQDAwNpADBmAjEAlJAtE/rhY/hhY+ithXhUkZy4kzg+GkHaQBZTQgjKL47xPoFW
+wKrY7RjEsK70PvomAjEA8yjixtsrmfu3Ubgko6SUeho/5jbiA1czijDLgsfWFBHV
+dWNbFJWcHwHP2NVypw87
+-----END CERTIFICATE-----
+
+# Issuer: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH
+# Subject: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH
+# Label: "D-TRUST EV Root CA 1 2020"
+# Serial: 126288379621884218666039612629459926992
+# MD5 Fingerprint: 8c:2d:9d:70:9f:48:99:11:06:11:fb:e9:cb:30:c0:6e
+# SHA1 Fingerprint: 61:db:8c:21:59:69:03:90:d8:7c:9c:12:86:54:cf:9d:3d:f4:dd:07
+# SHA256 Fingerprint: 08:17:0d:1a:a3:64:53:90:1a:2f:95:92:45:e3:47:db:0c:8d:37:ab:aa:bc:56:b8:1a:a1:00:dc:95:89:70:db
+-----BEGIN CERTIFICATE-----
+MIIC2zCCAmCgAwIBAgIQXwJB13qHfEwDo6yWjfv/0DAKBggqhkjOPQQDAzBIMQsw
+CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS
+VVNUIEVWIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTEwMDAwMFoXDTM1MDIxMTA5
+NTk1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG
+A1UEAxMZRC1UUlVTVCBFViBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABPEL3YZDIBnfl4XoIkqbz52Yv7QFJsnL46bSj8WeeHsxiamJrSc8ZRCC
+/N/DnU7wMyPE0jL1HLDfMxddxfCxivnvubcUyilKwg+pf3VlSSowZ/Rk99Yad9rD
+wpdhQntJraOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH8QARY3
+OqQo5FD4pPfsazK2/umLMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g
+PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2V2X3Jvb3Rf
+Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l
+dC9DTj1ELVRSVVNUJTIwRVYlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1
+c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO
+PQQDAwNpADBmAjEAyjzGKnXCXnViOTYAYFqLwZOZzNnbQTs7h5kXO9XMT8oi96CA
+y/m0sRtW9XLS/BnRAjEAkfcwkz8QRitxpNA7RJvAKQIFskF3UfN5Wp6OFKBOQtJb
+gfM0agPnIjhQW+0ZT0MW
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc.
+# Subject: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc.
+# Label: "DigiCert TLS ECC P384 Root G5"
+# Serial: 13129116028163249804115411775095713523
+# MD5 Fingerprint: d3:71:04:6a:43:1c:db:a6:59:e1:a8:a3:aa:c5:71:ed
+# SHA1 Fingerprint: 17:f3:de:5e:9f:0f:19:e9:8e:f6:1f:32:26:6e:20:c4:07:ae:30:ee
+# SHA256 Fingerprint: 01:8e:13:f0:77:25:32:cf:80:9b:d1:b1:72:81:86:72:83:fc:48:c6:e1:3b:e9:c6:98:12:85:4a:49:0c:1b:05
+-----BEGIN CERTIFICATE-----
+MIICGTCCAZ+gAwIBAgIQCeCTZaz32ci5PhwLBCou8zAKBggqhkjOPQQDAzBOMQsw
+CQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJjAkBgNVBAMTHURp
+Z2lDZXJ0IFRMUyBFQ0MgUDM4NCBSb290IEc1MB4XDTIxMDExNTAwMDAwMFoXDTQ2
+MDExNDIzNTk1OVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDkRpZ2lDZXJ0LCBJ
+bmMuMSYwJAYDVQQDEx1EaWdpQ2VydCBUTFMgRUNDIFAzODQgUm9vdCBHNTB2MBAG
+ByqGSM49AgEGBSuBBAAiA2IABMFEoc8Rl1Ca3iOCNQfN0MsYndLxf3c1TzvdlHJS
+7cI7+Oz6e2tYIOyZrsn8aLN1udsJ7MgT9U7GCh1mMEy7H0cKPGEQQil8pQgO4CLp
+0zVozptjn4S1mU1YoI71VOeVyaNCMEAwHQYDVR0OBBYEFMFRRVBZqz7nLFr6ICIS
+B4CIfBFqMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49
+BAMDA2gAMGUCMQCJao1H5+z8blUD2WdsJk6Dxv3J+ysTvLd6jLRl0mlpYxNjOyZQ
+LgGheQaRnUi/wr4CMEfDFXuxoJGZSZOoPHzoRgaLLPIxAJSdYsiJvRmEFOml+wG4
+DXZDjC5Ty3zfDBeWUA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc.
+# Subject: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc.
+# Label: "DigiCert TLS RSA4096 Root G5"
+# Serial: 11930366277458970227240571539258396554
+# MD5 Fingerprint: ac:fe:f7:34:96:a9:f2:b3:b4:12:4b:e4:27:41:6f:e1
+# SHA1 Fingerprint: a7:88:49:dc:5d:7c:75:8c:8c:de:39:98:56:b3:aa:d0:b2:a5:71:35
+# SHA256 Fingerprint: 37:1a:00:dc:05:33:b3:72:1a:7e:eb:40:e8:41:9e:70:79:9d:2b:0a:0f:2c:1d:80:69:31:65:f7:ce:c4:ad:75
+-----BEGIN CERTIFICATE-----
+MIIFZjCCA06gAwIBAgIQCPm0eKj6ftpqMzeJ3nzPijANBgkqhkiG9w0BAQwFADBN
+MQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJTAjBgNVBAMT
+HERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwHhcNMjEwMTE1MDAwMDAwWhcN
+NDYwMTE0MjM1OTU5WjBNMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQs
+IEluYy4xJTAjBgNVBAMTHERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCz0PTJeRGd/fxmgefM1eS87IE+
+ajWOLrfn3q/5B03PMJ3qCQuZvWxX2hhKuHisOjmopkisLnLlvevxGs3npAOpPxG0
+2C+JFvuUAT27L/gTBaF4HI4o4EXgg/RZG5Wzrn4DReW+wkL+7vI8toUTmDKdFqgp
+wgscONyfMXdcvyej/Cestyu9dJsXLfKB2l2w4SMXPohKEiPQ6s+d3gMXsUJKoBZM
+pG2T6T867jp8nVid9E6P/DsjyG244gXazOvswzH016cpVIDPRFtMbzCe88zdH5RD
+nU1/cHAN1DrRN/BsnZvAFJNY781BOHW8EwOVfH/jXOnVDdXifBBiqmvwPXbzP6Po
+sMH976pXTayGpxi0KcEsDr9kvimM2AItzVwv8n/vFfQMFawKsPHTDU9qTXeXAaDx
+Zre3zu/O7Oyldcqs4+Fj97ihBMi8ez9dLRYiVu1ISf6nL3kwJZu6ay0/nTvEF+cd
+Lvvyz6b84xQslpghjLSR6Rlgg/IwKwZzUNWYOwbpx4oMYIwo+FKbbuH2TbsGJJvX
+KyY//SovcfXWJL5/MZ4PbeiPT02jP/816t9JXkGPhvnxd3lLG7SjXi/7RgLQZhNe
+XoVPzthwiHvOAbWWl9fNff2C+MIkwcoBOU+NosEUQB+cZtUMCUbW8tDRSHZWOkPL
+tgoRObqME2wGtZ7P6wIDAQABo0IwQDAdBgNVHQ4EFgQUUTMc7TZArxfTJc1paPKv
+TiM+s0EwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcN
+AQEMBQADggIBAGCmr1tfV9qJ20tQqcQjNSH/0GEwhJG3PxDPJY7Jv0Y02cEhJhxw
+GXIeo8mH/qlDZJY6yFMECrZBu8RHANmfGBg7sg7zNOok992vIGCukihfNudd5N7H
+PNtQOa27PShNlnx2xlv0wdsUpasZYgcYQF+Xkdycx6u1UQ3maVNVzDl92sURVXLF
+O4uJ+DQtpBflF+aZfTCIITfNMBc9uPK8qHWgQ9w+iUuQrm0D4ByjoJYJu32jtyoQ
+REtGBzRj7TG5BO6jm5qu5jF49OokYTurWGT/u4cnYiWB39yhL/btp/96j1EuMPik
+AdKFOV8BmZZvWltwGUb+hmA+rYAQCd05JS9Yf7vSdPD3Rh9GOUrYU9DzLjtxpdRv
+/PNn5AeP3SYZ4Y1b+qOTEZvpyDrDVWiakuFSdjjo4bq9+0/V77PnSIMx8IIh47a+
+p6tv75/fTM8BuGJqIz3nCU2AG3swpMPdB380vqQmsvZB6Akd4yCYqjdP//fx4ilw
+MUc/dNAUFvohigLVigmUdy7yWSiLfFCSCmZ4OIN1xLVaqBHG5cGdZlXPU8Sv13WF
+qUITVuwhd4GTWgzqltlJyqEI8pc7bZsEGCREjnwB8twl2F6GmrE52/WRMmrRpnCK
+ovfepEWFJqgejF0pW8hL2JpqA15w8oVPbEtoL8pU9ozaMv7Da4M/OMZ+
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certainly Root R1 O=Certainly
+# Subject: CN=Certainly Root R1 O=Certainly
+# Label: "Certainly Root R1"
+# Serial: 188833316161142517227353805653483829216
+# MD5 Fingerprint: 07:70:d4:3e:82:87:a0:fa:33:36:13:f4:fa:33:e7:12
+# SHA1 Fingerprint: a0:50:ee:0f:28:71:f4:27:b2:12:6d:6f:50:96:25:ba:cc:86:42:af
+# SHA256 Fingerprint: 77:b8:2c:d8:64:4c:43:05:f7:ac:c5:cb:15:6b:45:67:50:04:03:3d:51:c6:0c:62:02:a8:e0:c3:34:67:d3:a0
+-----BEGIN CERTIFICATE-----
+MIIFRzCCAy+gAwIBAgIRAI4P+UuQcWhlM1T01EQ5t+AwDQYJKoZIhvcNAQELBQAw
+PTELMAkGA1UEBhMCVVMxEjAQBgNVBAoTCUNlcnRhaW5seTEaMBgGA1UEAxMRQ2Vy
+dGFpbmx5IFJvb3QgUjEwHhcNMjEwNDAxMDAwMDAwWhcNNDYwNDAxMDAwMDAwWjA9
+MQswCQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0
+YWlubHkgUm9vdCBSMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANA2
+1B/q3avk0bbm+yLA3RMNansiExyXPGhjZjKcA7WNpIGD2ngwEc/csiu+kr+O5MQT
+vqRoTNoCaBZ0vrLdBORrKt03H2As2/X3oXyVtwxwhi7xOu9S98zTm/mLvg7fMbed
+aFySpvXl8wo0tf97ouSHocavFwDvA5HtqRxOcT3Si2yJ9HiG5mpJoM610rCrm/b0
+1C7jcvk2xusVtyWMOvwlDbMicyF0yEqWYZL1LwsYpfSt4u5BvQF5+paMjRcCMLT5
+r3gajLQ2EBAHBXDQ9DGQilHFhiZ5shGIXsXwClTNSaa/ApzSRKft43jvRl5tcdF5
+cBxGX1HpyTfcX35pe0HfNEXgO4T0oYoKNp43zGJS4YkNKPl6I7ENPT2a/Z2B7yyQ
+wHtETrtJ4A5KVpK8y7XdeReJkd5hiXSSqOMyhb5OhaRLWcsrxXiOcVTQAjeZjOVJ
+6uBUcqQRBi8LjMFbvrWhsFNunLhgkR9Za/kt9JQKl7XsxXYDVBtlUrpMklZRNaBA
+2CnbrlJ2Oy0wQJuK0EJWtLeIAaSHO1OWzaMWj/Nmqhexx2DgwUMFDO6bW2BvBlyH
+Wyf5QBGenDPBt+U1VwV/J84XIIwc/PH72jEpSe31C4SnT8H2TsIonPru4K8H+zMR
+eiFPCyEQtkA6qyI6BJyLm4SGcprSp6XEtHWRqSsjAgMBAAGjQjBAMA4GA1UdDwEB
+/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTgqj8ljZ9EXME66C6u
+d0yEPmcM9DANBgkqhkiG9w0BAQsFAAOCAgEAuVevuBLaV4OPaAszHQNTVfSVcOQr
+PbA56/qJYv331hgELyE03fFo8NWWWt7CgKPBjcZq91l3rhVkz1t5BXdm6ozTaw3d
+8VkswTOlMIAVRQdFGjEitpIAq5lNOo93r6kiyi9jyhXWx8bwPWz8HA2YEGGeEaIi
+1wrykXprOQ4vMMM2SZ/g6Q8CRFA3lFV96p/2O7qUpUzpvD5RtOjKkjZUbVwlKNrd
+rRT90+7iIgXr0PK3aBLXWopBGsaSpVo7Y0VPv+E6dyIvXL9G+VoDhRNCX8reU9di
+taY1BMJH/5n9hN9czulegChB8n3nHpDYT3Y+gjwN/KUD+nsa2UUeYNrEjvn8K8l7
+lcUq/6qJ34IxD3L/DCfXCh5WAFAeDJDBlrXYFIW7pw0WwfgHJBu6haEaBQmAupVj
+yTrsJZ9/nbqkRxWbRHDxakvWOF5D8xh+UG7pWijmZeZ3Gzr9Hb4DJqPb1OG7fpYn
+Kx3upPvaJVQTA945xsMfTZDsjxtK0hzthZU4UHlG1sGQUDGpXJpuHfUzVounmdLy
+yCwzk5Iwx06MZTMQZBf9JBeW0Y3COmor6xOLRPIh80oat3df1+2IpHLlOR+Vnb5n
+wXARPbv0+Em34yaXOp/SX3z7wJl8OSngex2/DaeP0ik0biQVy96QXr8axGbqwua6
+OV+KmalBWQewLK8=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certainly Root E1 O=Certainly
+# Subject: CN=Certainly Root E1 O=Certainly
+# Label: "Certainly Root E1"
+# Serial: 8168531406727139161245376702891150584
+# MD5 Fingerprint: 0a:9e:ca:cd:3e:52:50:c6:36:f3:4b:a3:ed:a7:53:e9
+# SHA1 Fingerprint: f9:e1:6d:dc:01:89:cf:d5:82:45:63:3e:c5:37:7d:c2:eb:93:6f:2b
+# SHA256 Fingerprint: b4:58:5f:22:e4:ac:75:6a:4e:86:12:a1:36:1c:5d:9d:03:1a:93:fd:84:fe:bb:77:8f:a3:06:8b:0f:c4:2d:c2
+-----BEGIN CERTIFICATE-----
+MIIB9zCCAX2gAwIBAgIQBiUzsUcDMydc+Y2aub/M+DAKBggqhkjOPQQDAzA9MQsw
+CQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0YWlu
+bHkgUm9vdCBFMTAeFw0yMTA0MDEwMDAwMDBaFw00NjA0MDEwMDAwMDBaMD0xCzAJ
+BgNVBAYTAlVTMRIwEAYDVQQKEwlDZXJ0YWlubHkxGjAYBgNVBAMTEUNlcnRhaW5s
+eSBSb290IEUxMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE3m/4fxzf7flHh4axpMCK
++IKXgOqPyEpeKn2IaKcBYhSRJHpcnqMXfYqGITQYUBsQ3tA3SybHGWCA6TS9YBk2
+QNYphwk8kXr2vBMj3VlOBF7PyAIcGFPBMdjaIOlEjeR2o0IwQDAOBgNVHQ8BAf8E
+BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU8ygYy2R17ikq6+2uI1g4
+hevIIgcwCgYIKoZIzj0EAwMDaAAwZQIxALGOWiDDshliTd6wT99u0nCK8Z9+aozm
+ut6Dacpps6kFtZaSF4fC0urQe87YQVt8rgIwRt7qy12a7DLCZRawTDBcMPPaTnOG
+BtjOiQRINzf43TNRnXCve1XYAS59BWQOhriR
+-----END CERTIFICATE-----
+
+# Issuer: CN=E-Tugra Global Root CA RSA v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center
+# Subject: CN=E-Tugra Global Root CA RSA v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center
+# Label: "E-Tugra Global Root CA RSA v3"
+# Serial: 75951268308633135324246244059508261641472512052
+# MD5 Fingerprint: 22:be:10:f6:c2:f8:03:88:73:5f:33:29:47:28:47:a4
+# SHA1 Fingerprint: e9:a8:5d:22:14:52:1c:5b:aa:0a:b4:be:24:6a:23:8a:c9:ba:e2:a9
+# SHA256 Fingerprint: ef:66:b0:b1:0a:3c:db:9f:2e:36:48:c7:6b:d2:af:18:ea:d2:bf:e6:f1:17:65:5e:28:c4:06:0d:a1:a3:f4:c2
+-----BEGIN CERTIFICATE-----
+MIIF8zCCA9ugAwIBAgIUDU3FzRYilZYIfrgLfxUGNPt5EDQwDQYJKoZIhvcNAQEL
+BQAwgYAxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHEwZBbmthcmExGTAXBgNVBAoTEEUt
+VHVncmEgRUJHIEEuUy4xHTAbBgNVBAsTFEUtVHVncmEgVHJ1c3QgQ2VudGVyMSYw
+JAYDVQQDEx1FLVR1Z3JhIEdsb2JhbCBSb290IENBIFJTQSB2MzAeFw0yMDAzMTgw
+OTA3MTdaFw00NTAzMTIwOTA3MTdaMIGAMQswCQYDVQQGEwJUUjEPMA0GA1UEBxMG
+QW5rYXJhMRkwFwYDVQQKExBFLVR1Z3JhIEVCRyBBLlMuMR0wGwYDVQQLExRFLVR1
+Z3JhIFRydXN0IENlbnRlcjEmMCQGA1UEAxMdRS1UdWdyYSBHbG9iYWwgUm9vdCBD
+QSBSU0EgdjMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCiZvCJt3J7
+7gnJY9LTQ91ew6aEOErxjYG7FL1H6EAX8z3DeEVypi6Q3po61CBxyryfHUuXCscx
+uj7X/iWpKo429NEvx7epXTPcMHD4QGxLsqYxYdE0PD0xesevxKenhOGXpOhL9hd8
+7jwH7eKKV9y2+/hDJVDqJ4GohryPUkqWOmAalrv9c/SF/YP9f4RtNGx/ardLAQO/
+rWm31zLZ9Vdq6YaCPqVmMbMWPcLzJmAy01IesGykNz709a/r4d+ABs8qQedmCeFL
+l+d3vSFtKbZnwy1+7dZ5ZdHPOrbRsV5WYVB6Ws5OUDGAA5hH5+QYfERaxqSzO8bG
+wzrwbMOLyKSRBfP12baqBqG3q+Sx6iEUXIOk/P+2UNOMEiaZdnDpwA+mdPy70Bt4
+znKS4iicvObpCdg604nmvi533wEKb5b25Y08TVJ2Glbhc34XrD2tbKNSEhhw5oBO
+M/J+JjKsBY04pOZ2PJ8QaQ5tndLBeSBrW88zjdGUdjXnXVXHt6woq0bM5zshtQoK
+5EpZ3IE1S0SVEgpnpaH/WwAH0sDM+T/8nzPyAPiMbIedBi3x7+PmBvrFZhNb/FAH
+nnGGstpvdDDPk1Po3CLW3iAfYY2jLqN4MpBs3KwytQXk9TwzDdbgh3cXTJ2w2Amo
+DVf3RIXwyAS+XF1a4xeOVGNpf0l0ZAWMowIDAQABo2MwYTAPBgNVHRMBAf8EBTAD
+AQH/MB8GA1UdIwQYMBaAFLK0ruYt9ybVqnUtdkvAG1Mh0EjvMB0GA1UdDgQWBBSy
+tK7mLfcm1ap1LXZLwBtTIdBI7zAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEL
+BQADggIBAImocn+M684uGMQQgC0QDP/7FM0E4BQ8Tpr7nym/Ip5XuYJzEmMmtcyQ
+6dIqKe6cLcwsmb5FJ+Sxce3kOJUxQfJ9emN438o2Fi+CiJ+8EUdPdk3ILY7r3y18
+Tjvarvbj2l0Upq7ohUSdBm6O++96SmotKygY/r+QLHUWnw/qln0F7psTpURs+APQ
+3SPh/QMSEgj0GDSz4DcLdxEBSL9htLX4GdnLTeqjjO/98Aa1bZL0SmFQhO3sSdPk
+vmjmLuMxC1QLGpLWgti2omU8ZgT5Vdps+9u1FGZNlIM7zR6mK7L+d0CGq+ffCsn9
+9t2HVhjYsCxVYJb6CH5SkPVLpi6HfMsg2wY+oF0Dd32iPBMbKaITVaA9FCKvb7jQ
+mhty3QUBjYZgv6Rn7rWlDdF/5horYmbDB7rnoEgcOMPpRfunf/ztAmgayncSd6YA
+VSgU7NbHEqIbZULpkejLPoeJVF3Zr52XnGnnCv8PWniLYypMfUeUP95L6VPQMPHF
+9p5J3zugkaOj/s1YzOrfr28oO6Bpm4/srK4rVJ2bBLFHIK+WEj5jlB0E5y67hscM
+moi/dkfv97ALl2bSRM9gUgfh1SxKOidhd8rXj+eHDjD/DLsE4mHDosiXYY60MGo8
+bcIHX0pzLz/5FooBZu+6kcpSV3uu1OYP3Qt6f4ueJiDPO++BcYNZ
+-----END CERTIFICATE-----
+
+# Issuer: CN=E-Tugra Global Root CA ECC v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center
+# Subject: CN=E-Tugra Global Root CA ECC v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center
+# Label: "E-Tugra Global Root CA ECC v3"
+# Serial: 218504919822255052842371958738296604628416471745
+# MD5 Fingerprint: 46:bc:81:bb:f1:b5:1e:f7:4b:96:bc:14:e2:e7:27:64
+# SHA1 Fingerprint: 8a:2f:af:57:53:b1:b0:e6:a1:04:ec:5b:6a:69:71:6d:f6:1c:e2:84
+# SHA256 Fingerprint: 87:3f:46:85:fa:7f:56:36:25:25:2e:6d:36:bc:d7:f1:6f:c2:49:51:f2:64:e4:7e:1b:95:4f:49:08:cd:ca:13
+-----BEGIN CERTIFICATE-----
+MIICpTCCAiqgAwIBAgIUJkYZdzHhT28oNt45UYbm1JeIIsEwCgYIKoZIzj0EAwMw
+gYAxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHEwZBbmthcmExGTAXBgNVBAoTEEUtVHVn
+cmEgRUJHIEEuUy4xHTAbBgNVBAsTFEUtVHVncmEgVHJ1c3QgQ2VudGVyMSYwJAYD
+VQQDEx1FLVR1Z3JhIEdsb2JhbCBSb290IENBIEVDQyB2MzAeFw0yMDAzMTgwOTQ2
+NThaFw00NTAzMTIwOTQ2NThaMIGAMQswCQYDVQQGEwJUUjEPMA0GA1UEBxMGQW5r
+YXJhMRkwFwYDVQQKExBFLVR1Z3JhIEVCRyBBLlMuMR0wGwYDVQQLExRFLVR1Z3Jh
+IFRydXN0IENlbnRlcjEmMCQGA1UEAxMdRS1UdWdyYSBHbG9iYWwgUm9vdCBDQSBF
+Q0MgdjMwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASOmCm/xxAeJ9urA8woLNheSBkQ
+KczLWYHMjLiSF4mDKpL2w6QdTGLVn9agRtwcvHbB40fQWxPa56WzZkjnIZpKT4YK
+fWzqTTKACrJ6CZtpS5iB4i7sAnCWH/31Rs7K3IKjYzBhMA8GA1UdEwEB/wQFMAMB
+Af8wHwYDVR0jBBgwFoAU/4Ixcj75xGZsrTie0bBRiKWQzPUwHQYDVR0OBBYEFP+C
+MXI++cRmbK04ntGwUYilkMz1MA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNp
+ADBmAjEA5gVYaWHlLcoNy/EZCL3W/VGSGn5jVASQkZo1kTmZ+gepZpO6yGjUij/6
+7W4WAie3AjEA3VoXK3YdZUKWpqxdinlW2Iob35reX8dQj7FbcQwm32pAAOwzkSFx
+vmjkI6TZraE3
+-----END CERTIFICATE-----
+
+# Issuer: CN=Security Communication RootCA3 O=SECOM Trust Systems CO.,LTD.
+# Subject: CN=Security Communication RootCA3 O=SECOM Trust Systems CO.,LTD.
+# Label: "Security Communication RootCA3"
+# Serial: 16247922307909811815
+# MD5 Fingerprint: 1c:9a:16:ff:9e:5c:e0:4d:8a:14:01:f4:35:5d:29:26
+# SHA1 Fingerprint: c3:03:c8:22:74:92:e5:61:a2:9c:5f:79:91:2b:1e:44:13:91:30:3a
+# SHA256 Fingerprint: 24:a5:5c:2a:b0:51:44:2d:06:17:76:65:41:23:9a:4a:d0:32:d7:c5:51:75:aa:34:ff:de:2f:bc:4f:5c:52:94
+-----BEGIN CERTIFICATE-----
+MIIFfzCCA2egAwIBAgIJAOF8N0D9G/5nMA0GCSqGSIb3DQEBDAUAMF0xCzAJBgNV
+BAYTAkpQMSUwIwYDVQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMScw
+JQYDVQQDEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTMwHhcNMTYwNjE2
+MDYxNzE2WhcNMzgwMTE4MDYxNzE2WjBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc
+U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UEAxMeU2VjdXJpdHkg
+Q29tbXVuaWNhdGlvbiBSb290Q0EzMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC
+CgKCAgEA48lySfcw3gl8qUCBWNO0Ot26YQ+TUG5pPDXC7ltzkBtnTCHsXzW7OT4r
+CmDvu20rhvtxosis5FaU+cmvsXLUIKx00rgVrVH+hXShuRD+BYD5UpOzQD11EKzA
+lrenfna84xtSGc4RHwsENPXY9Wk8d/Nk9A2qhd7gCVAEF5aEt8iKvE1y/By7z/MG
+TfmfZPd+pmaGNXHIEYBMwXFAWB6+oHP2/D5Q4eAvJj1+XCO1eXDe+uDRpdYMQXF7
+9+qMHIjH7Iv10S9VlkZ8WjtYO/u62C21Jdp6Ts9EriGmnpjKIG58u4iFW/vAEGK7
+8vknR+/RiTlDxN/e4UG/VHMgly1s2vPUB6PmudhvrvyMGS7TZ2crldtYXLVqAvO4
+g160a75BflcJdURQVc1aEWEhCmHCqYj9E7wtiS/NYeCVvsq1e+F7NGcLH7YMx3we
+GVPKp7FKFSBWFHA9K4IsD50VHUeAR/94mQ4xr28+j+2GaR57GIgUssL8gjMunEst
++3A7caoreyYn8xrC3PsXuKHqy6C0rtOUfnrQq8PsOC0RLoi/1D+tEjtCrI8Cbn3M
+0V9hvqG8OmpI6iZVIhZdXw3/JzOfGAN0iltSIEdrRU0id4xVJ/CvHozJgyJUt5rQ
+T9nO/NkuHJYosQLTA70lUhw0Zk8jq/R3gpYd0VcwCBEF/VfR2ccCAwEAAaNCMEAw
+HQYDVR0OBBYEFGQUfPxYchamCik0FW8qy7z8r6irMA4GA1UdDwEB/wQEAwIBBjAP
+BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBDAUAA4ICAQDcAiMI4u8hOscNtybS
+YpOnpSNyByCCYN8Y11StaSWSntkUz5m5UoHPrmyKO1o5yGwBQ8IibQLwYs1OY0PA
+FNr0Y/Dq9HHuTofjcan0yVflLl8cebsjqodEV+m9NU1Bu0soo5iyG9kLFwfl9+qd
+9XbXv8S2gVj/yP9kaWJ5rW4OH3/uHWnlt3Jxs/6lATWUVCvAUm2PVcTJ0rjLyjQI
+UYWg9by0F1jqClx6vWPGOi//lkkZhOpn2ASxYfQAW0q3nHE3GYV5v4GwxxMOdnE+
+OoAGrgYWp421wsTL/0ClXI2lyTrtcoHKXJg80jQDdwj98ClZXSEIx2C/pHF7uNke
+gr4Jr2VvKKu/S7XuPghHJ6APbw+LP6yVGPO5DtxnVW5inkYO0QR4ynKudtml+LLf
+iAlhi+8kTtFZP1rUPcmTPCtk9YENFpb3ksP+MW/oKjJ0DvRMmEoYDjBU1cXrvMUV
+nuiZIesnKwkK2/HmcBhWuwzkvvnoEKQTkrgc4NtnHVMDpCKn3F2SEDzq//wbEBrD
+2NCcnWXL0CsnMQMeNuE9dnUM/0Umud1RvCPHX9jYhxBAEg09ODfnRDwYwFMJZI//
+1ZqmfHAuc1Uh6N//g7kdPjIe1qZ9LPFm6Vwdp6POXiUyK+OVrCoHzrQoeIY8Laad
+TdJ0MN1kURXbg4NR16/9M51NZg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Security Communication ECC RootCA1 O=SECOM Trust Systems CO.,LTD.
+# Subject: CN=Security Communication ECC RootCA1 O=SECOM Trust Systems CO.,LTD.
+# Label: "Security Communication ECC RootCA1"
+# Serial: 15446673492073852651
+# MD5 Fingerprint: 7e:43:b0:92:68:ec:05:43:4c:98:ab:5d:35:2e:7e:86
+# SHA1 Fingerprint: b8:0e:26:a9:bf:d2:b2:3b:c0:ef:46:c9:ba:c7:bb:f6:1d:0d:41:41
+# SHA256 Fingerprint: e7:4f:bd:a5:5b:d5:64:c4:73:a3:6b:44:1a:a7:99:c8:a6:8e:07:74:40:e8:28:8b:9f:a1:e5:0e:4b:ba:ca:11
+-----BEGIN CERTIFICATE-----
+MIICODCCAb6gAwIBAgIJANZdm7N4gS7rMAoGCCqGSM49BAMDMGExCzAJBgNVBAYT
+AkpQMSUwIwYDVQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMSswKQYD
+VQQDEyJTZWN1cml0eSBDb21tdW5pY2F0aW9uIEVDQyBSb290Q0ExMB4XDTE2MDYx
+NjA1MTUyOFoXDTM4MDExODA1MTUyOFowYTELMAkGA1UEBhMCSlAxJTAjBgNVBAoT
+HFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKzApBgNVBAMTIlNlY3VyaXR5
+IENvbW11bmljYXRpb24gRUNDIFJvb3RDQTEwdjAQBgcqhkjOPQIBBgUrgQQAIgNi
+AASkpW9gAwPDvTH00xecK4R1rOX9PVdu12O/5gSJko6BnOPpR27KkBLIE+Cnnfdl
+dB9sELLo5OnvbYUymUSxXv3MdhDYW72ixvnWQuRXdtyQwjWpS4g8EkdtXP9JTxpK
+ULGjQjBAMB0GA1UdDgQWBBSGHOf+LaVKiwj+KBH6vqNm+GBZLzAOBgNVHQ8BAf8E
+BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjAVXUI9/Lbu
+9zuxNuie9sRGKEkz0FhDKmMpzE2xtHqiuQ04pV1IKv3LsnNdo4gIxwwCMQDAqy0O
+be0YottT6SXbVQjgUMzfRGEWgqtJsLKB7HOHeLRMsmIbEvoWTSVLY70eN9k=
+-----END CERTIFICATE-----
diff --git a/third_party/python/certifi/certifi/core.py b/third_party/python/certifi/certifi/core.py
new file mode 100644
index 0000000000..de028981b9
--- /dev/null
+++ b/third_party/python/certifi/certifi/core.py
@@ -0,0 +1,108 @@
+"""
+certifi.py
+~~~~~~~~~~
+
+This module returns the installation location of cacert.pem or its contents.
+"""
+import sys
+
+
+if sys.version_info >= (3, 11):
+
+ from importlib.resources import as_file, files
+
+ _CACERT_CTX = None
+ _CACERT_PATH = None
+
+ def where() -> str:
+ # This is slightly terrible, but we want to delay extracting the file
+ # in cases where we're inside of a zipimport situation until someone
+ # actually calls where(), but we don't want to re-extract the file
+ # on every call of where(), so we'll do it once then store it in a
+ # global variable.
+ global _CACERT_CTX
+ global _CACERT_PATH
+ if _CACERT_PATH is None:
+ # This is slightly janky, the importlib.resources API wants you to
+ # manage the cleanup of this file, so it doesn't actually return a
+ # path, it returns a context manager that will give you the path
+ # when you enter it and will do any cleanup when you leave it. In
+ # the common case of not needing a temporary file, it will just
+ # return the file system location and the __exit__() is a no-op.
+ #
+ # We also have to hold onto the actual context manager, because
+ # it will do the cleanup whenever it gets garbage collected, so
+ # we will also store that at the global level as well.
+ _CACERT_CTX = as_file(files("certifi").joinpath("cacert.pem"))
+ _CACERT_PATH = str(_CACERT_CTX.__enter__())
+
+ return _CACERT_PATH
+
+ def contents() -> str:
+ return files("certifi").joinpath("cacert.pem").read_text(encoding="ascii")
+
+elif sys.version_info >= (3, 7):
+
+ from importlib.resources import path as get_path, read_text
+
+ _CACERT_CTX = None
+ _CACERT_PATH = None
+
+ def where() -> str:
+ # This is slightly terrible, but we want to delay extracting the
+ # file in cases where we're inside of a zipimport situation until
+ # someone actually calls where(), but we don't want to re-extract
+ # the file on every call of where(), so we'll do it once then store
+ # it in a global variable.
+ global _CACERT_CTX
+ global _CACERT_PATH
+ if _CACERT_PATH is None:
+ # This is slightly janky, the importlib.resources API wants you
+ # to manage the cleanup of this file, so it doesn't actually
+ # return a path, it returns a context manager that will give
+ # you the path when you enter it and will do any cleanup when
+ # you leave it. In the common case of not needing a temporary
+ # file, it will just return the file system location and the
+ # __exit__() is a no-op.
+ #
+ # We also have to hold onto the actual context manager, because
+ # it will do the cleanup whenever it gets garbage collected, so
+ # we will also store that at the global level as well.
+ _CACERT_CTX = get_path("certifi", "cacert.pem")
+ _CACERT_PATH = str(_CACERT_CTX.__enter__())
+
+ return _CACERT_PATH
+
+ def contents() -> str:
+ return read_text("certifi", "cacert.pem", encoding="ascii")
+
+else:
+ import os
+ import types
+ from typing import Union
+
+ Package = Union[types.ModuleType, str]
+ Resource = Union[str, "os.PathLike"]
+
+ # This fallback will work for Python versions prior to 3.7 that lack the
+ # importlib.resources module but relies on the existing `where` function
+ # so won't address issues with environments like PyOxidizer that don't set
+ # __file__ on modules.
+ def read_text(
+ package: Package,
+ resource: Resource,
+ encoding: str = 'utf-8',
+ errors: str = 'strict'
+ ) -> str:
+ with open(where(), encoding=encoding) as data:
+ return data.read()
+
+ # If we don't have importlib.resources, then we will just do the old logic
+ # of assuming we're on the filesystem and munge the path directly.
+ def where() -> str:
+ f = os.path.dirname(__file__)
+
+ return os.path.join(f, "cacert.pem")
+
+ def contents() -> str:
+ return read_text("certifi", "cacert.pem", encoding="ascii")
diff --git a/third_party/python/certifi/certifi/py.typed b/third_party/python/certifi/certifi/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/certifi/certifi/py.typed
diff --git a/third_party/python/chardet/chardet-4.0.0.dist-info/LICENSE b/third_party/python/chardet/chardet-4.0.0.dist-info/LICENSE
new file mode 100644
index 0000000000..8add30ad59
--- /dev/null
+++ b/third_party/python/chardet/chardet-4.0.0.dist-info/LICENSE
@@ -0,0 +1,504 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/third_party/python/chardet/chardet-4.0.0.dist-info/METADATA b/third_party/python/chardet/chardet-4.0.0.dist-info/METADATA
new file mode 100644
index 0000000000..590bcc32a7
--- /dev/null
+++ b/third_party/python/chardet/chardet-4.0.0.dist-info/METADATA
@@ -0,0 +1,101 @@
+Metadata-Version: 2.1
+Name: chardet
+Version: 4.0.0
+Summary: Universal encoding detector for Python 2 and 3
+Home-page: https://github.com/chardet/chardet
+Author: Mark Pilgrim
+Author-email: mark@diveintomark.org
+Maintainer: Daniel Blanchard
+Maintainer-email: dan.blanchard@gmail.com
+License: LGPL
+Keywords: encoding,i18n,xml
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Linguistic
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
+
+Chardet: The Universal Character Encoding Detector
+--------------------------------------------------
+
+.. image:: https://img.shields.io/travis/chardet/chardet/stable.svg
+ :alt: Build status
+ :target: https://travis-ci.org/chardet/chardet
+
+.. image:: https://img.shields.io/coveralls/chardet/chardet/stable.svg
+ :target: https://coveralls.io/r/chardet/chardet
+
+.. image:: https://img.shields.io/pypi/v/chardet.svg
+ :target: https://warehouse.python.org/project/chardet/
+ :alt: Latest version on PyPI
+
+.. image:: https://img.shields.io/pypi/l/chardet.svg
+ :alt: License
+
+
+Detects
+ - ASCII, UTF-8, UTF-16 (2 variants), UTF-32 (4 variants)
+ - Big5, GB2312, EUC-TW, HZ-GB-2312, ISO-2022-CN (Traditional and Simplified Chinese)
+ - EUC-JP, SHIFT_JIS, CP932, ISO-2022-JP (Japanese)
+ - EUC-KR, ISO-2022-KR (Korean)
+ - KOI8-R, MacCyrillic, IBM855, IBM866, ISO-8859-5, windows-1251 (Cyrillic)
+ - ISO-8859-5, windows-1251 (Bulgarian)
+ - ISO-8859-1, windows-1252 (Western European languages)
+ - ISO-8859-7, windows-1253 (Greek)
+ - ISO-8859-8, windows-1255 (Visual and Logical Hebrew)
+ - TIS-620 (Thai)
+
+.. note::
+ Our ISO-8859-2 and windows-1250 (Hungarian) probers have been temporarily
+ disabled until we can retrain the models.
+
+Requires Python 2.7 or 3.5+.
+
+Installation
+------------
+
+Install from `PyPI <https://pypi.org/project/chardet/>`_::
+
+ pip install chardet
+
+Documentation
+-------------
+
+For users, docs are now available at https://chardet.readthedocs.io/.
+
+Command-line Tool
+-----------------
+
+chardet comes with a command-line script which reports on the encodings of one
+or more files::
+
+ % chardetect somefile someotherfile
+ somefile: windows-1252 with confidence 0.5
+ someotherfile: ascii with confidence 1.0
+
+About
+-----
+
+This is a continuation of Mark Pilgrim's excellent chardet. Previously, two
+versions needed to be maintained: one that supported python 2.x and one that
+supported python 3.x. We've recently merged with `Ian Cordasco <https://github.com/sigmavirus24>`_'s
+`charade <https://github.com/sigmavirus24/charade>`_ fork, so now we have one
+coherent version that works for Python 2.7+ and 3.4+.
+
+:maintainer: Dan Blanchard
+
+
diff --git a/third_party/python/chardet/chardet-4.0.0.dist-info/RECORD b/third_party/python/chardet/chardet-4.0.0.dist-info/RECORD
new file mode 100644
index 0000000000..d471390d2f
--- /dev/null
+++ b/third_party/python/chardet/chardet-4.0.0.dist-info/RECORD
@@ -0,0 +1,49 @@
+chardet/__init__.py,sha256=mWZaWmvZkhwfBEAT9O1Y6nRTfKzhT7FHhQTTAujbqUA,3271
+chardet/big5freq.py,sha256=D_zK5GyzoVsRes0HkLJziltFQX0bKCLOrFe9_xDvO_8,31254
+chardet/big5prober.py,sha256=kBxHbdetBpPe7xrlb-e990iot64g_eGSLd32lB7_h3M,1757
+chardet/chardistribution.py,sha256=3woWS62KrGooKyqz4zQSnjFbJpa6V7g02daAibTwcl8,9411
+chardet/charsetgroupprober.py,sha256=GZLReHP6FRRn43hvSOoGCxYamErKzyp6RgOQxVeC3kg,3839
+chardet/charsetprober.py,sha256=KSmwJErjypyj0bRZmC5F5eM7c8YQgLYIjZXintZNstg,5110
+chardet/codingstatemachine.py,sha256=VYp_6cyyki5sHgXDSZnXW4q1oelHc3cu9AyQTX7uug8,3590
+chardet/compat.py,sha256=40zr6wICZwknxyuLGGcIOPyve8DTebBCbbvttvnmp5Q,1200
+chardet/cp949prober.py,sha256=TZ434QX8zzBsnUvL_8wm4AQVTZ2ZkqEEQL_lNw9f9ow,1855
+chardet/enums.py,sha256=Aimwdb9as1dJKZaFNUH2OhWIVBVd6ZkJJ_WK5sNY8cU,1661
+chardet/escprober.py,sha256=kkyqVg1Yw3DIOAMJ2bdlyQgUFQhuHAW8dUGskToNWSc,3950
+chardet/escsm.py,sha256=RuXlgNvTIDarndvllNCk5WZBIpdCxQ0kcd9EAuxUh84,10510
+chardet/eucjpprober.py,sha256=iD8Jdp0ISRjgjiVN7f0e8xGeQJ5GM2oeZ1dA8nbSeUw,3749
+chardet/euckrfreq.py,sha256=-7GdmvgWez4-eO4SuXpa7tBiDi5vRXQ8WvdFAzVaSfo,13546
+chardet/euckrprober.py,sha256=MqFMTQXxW4HbzIpZ9lKDHB3GN8SP4yiHenTmf8g_PxY,1748
+chardet/euctwfreq.py,sha256=No1WyduFOgB5VITUA7PLyC5oJRNzRyMbBxaKI1l16MA,31621
+chardet/euctwprober.py,sha256=13p6EP4yRaxqnP4iHtxHOJ6R2zxHq1_m8hTRjzVZ95c,1747
+chardet/gb2312freq.py,sha256=JX8lsweKLmnCwmk8UHEQsLgkr_rP_kEbvivC4qPOrlc,20715
+chardet/gb2312prober.py,sha256=gGvIWi9WhDjE-xQXHvNIyrnLvEbMAYgyUSZ65HUfylw,1754
+chardet/hebrewprober.py,sha256=c3SZ-K7hvyzGY6JRAZxJgwJ_sUS9k0WYkvMY00YBYFo,13838
+chardet/jisfreq.py,sha256=vpmJv2Bu0J8gnMVRPHMFefTRvo_ha1mryLig8CBwgOg,25777
+chardet/jpcntx.py,sha256=PYlNqRUQT8LM3cT5FmHGP0iiscFlTWED92MALvBungo,19643
+chardet/langbulgarianmodel.py,sha256=r6tvOtO8FqhnbWBB5V4czcl1fWM4pB9lGiWQU-8gvsw,105685
+chardet/langgreekmodel.py,sha256=1cMu2wUgPB8bQ2RbVjR4LNwCCETgQ-Dwo0Eg2_uB11s,99559
+chardet/langhebrewmodel.py,sha256=urMmJHHIXtCwaWAqy1zEY_4SmwwNzt730bDOtjXzRjs,98764
+chardet/langhungarianmodel.py,sha256=ODAisvqCfes8B4FeyM_Pg9HY3ZDnEyaCiT4Bxyzoc6w,102486
+chardet/langrussianmodel.py,sha256=sPqkrBbX0QVwwy6oqRl-x7ERv2J4-zaMoCvLpkSsSJI,131168
+chardet/langthaimodel.py,sha256=ppoKOGL9OPdj9A4CUyG8R48zbnXt9MN1WXeCYepa6sc,103300
+chardet/langturkishmodel.py,sha256=H3ldicI_rhlv0r3VFpVWtUL6X30Wy596v7_YHz2sEdg,95934
+chardet/latin1prober.py,sha256=S2IoORhFk39FEFOlSFWtgVybRiP6h7BlLldHVclNkU8,5370
+chardet/mbcharsetprober.py,sha256=AR95eFH9vuqSfvLQZN-L5ijea25NOBCoXqw8s5O9xLQ,3413
+chardet/mbcsgroupprober.py,sha256=h6TRnnYq2OxG1WdD5JOyxcdVpn7dG0q-vB8nWr5mbh4,2012
+chardet/mbcssm.py,sha256=SY32wVIF3HzcjY3BaEspy9metbNSKxIIB0RKPn7tjpI,25481
+chardet/sbcharsetprober.py,sha256=nmyMyuxzG87DN6K3Rk2MUzJLMLR69MrWpdnHzOwVUwQ,6136
+chardet/sbcsgroupprober.py,sha256=hqefQuXmiFyDBArOjujH6hd6WFXlOD1kWCsxDhjx5Vc,4309
+chardet/sjisprober.py,sha256=IIt-lZj0WJqK4rmUZzKZP4GJlE8KUEtFYVuY96ek5MQ,3774
+chardet/universaldetector.py,sha256=DpZTXCX0nUHXxkQ9sr4GZxGB_hveZ6hWt3uM94cgWKs,12503
+chardet/utf8prober.py,sha256=IdD8v3zWOsB8OLiyPi-y_fqwipRFxV9Nc1eKBLSuIEw,2766
+chardet/version.py,sha256=A4CILFAd8MRVG1HoXPp45iK9RLlWyV73a1EtwE8Tvn8,242
+chardet/cli/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
+chardet/cli/chardetect.py,sha256=kUPeQCi-olObXpOq5MtlKuBn1EU19rkeenAMwxl7URY,2711
+chardet/metadata/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+chardet/metadata/languages.py,sha256=41tLq3eLSrBEbEVVQpVGFq9K7o1ln9b1HpY1l0hCUQo,19474
+chardet-4.0.0.dist-info/LICENSE,sha256=YJXp_6d33SKDn3gBqoRbMcntB_PWv4om3F0t7IzMDvM,26432
+chardet-4.0.0.dist-info/METADATA,sha256=ySYQAE7NPm3LwxgMqFi1zdLQ48mmwMbrJwqAWCtcbH8,3526
+chardet-4.0.0.dist-info/WHEEL,sha256=ADKeyaGyKF5DwBNE0sRE5pvW-bSkFMJfBuhzZ3rceP4,110
+chardet-4.0.0.dist-info/entry_points.txt,sha256=fAMmhu5eJ-zAJ-smfqQwRClQ3-nozOCmvJ6-E8lgGJo,60
+chardet-4.0.0.dist-info/top_level.txt,sha256=AowzBbZy4x8EirABDdJSLJZMkJ_53iIag8xfKR6D7kI,8
+chardet-4.0.0.dist-info/RECORD,,
diff --git a/third_party/python/chardet/chardet-4.0.0.dist-info/WHEEL b/third_party/python/chardet/chardet-4.0.0.dist-info/WHEEL
new file mode 100644
index 0000000000..6d38aa0601
--- /dev/null
+++ b/third_party/python/chardet/chardet-4.0.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.35.1)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/chardet/chardet-4.0.0.dist-info/entry_points.txt b/third_party/python/chardet/chardet-4.0.0.dist-info/entry_points.txt
new file mode 100644
index 0000000000..a884269e7f
--- /dev/null
+++ b/third_party/python/chardet/chardet-4.0.0.dist-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+chardetect = chardet.cli.chardetect:main
+
diff --git a/third_party/python/chardet/chardet-4.0.0.dist-info/top_level.txt b/third_party/python/chardet/chardet-4.0.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..79236f25cd
--- /dev/null
+++ b/third_party/python/chardet/chardet-4.0.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+chardet
diff --git a/third_party/python/chardet/chardet/__init__.py b/third_party/python/chardet/chardet/__init__.py
new file mode 100644
index 0000000000..80ad2546d7
--- /dev/null
+++ b/third_party/python/chardet/chardet/__init__.py
@@ -0,0 +1,83 @@
+######################## BEGIN LICENSE BLOCK ########################
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+
+from .universaldetector import UniversalDetector
+from .enums import InputState
+from .version import __version__, VERSION
+
+
+__all__ = ['UniversalDetector', 'detect', 'detect_all', '__version__', 'VERSION']
+
+
+def detect(byte_str):
+ """
+ Detect the encoding of the given byte string.
+
+ :param byte_str: The byte sequence to examine.
+ :type byte_str: ``bytes`` or ``bytearray``
+ """
+ if not isinstance(byte_str, bytearray):
+ if not isinstance(byte_str, bytes):
+ raise TypeError('Expected object of type bytes or bytearray, got: '
+ '{}'.format(type(byte_str)))
+ else:
+ byte_str = bytearray(byte_str)
+ detector = UniversalDetector()
+ detector.feed(byte_str)
+ return detector.close()
+
+
+def detect_all(byte_str):
+ """
+ Detect all the possible encodings of the given byte string.
+
+ :param byte_str: The byte sequence to examine.
+ :type byte_str: ``bytes`` or ``bytearray``
+ """
+ if not isinstance(byte_str, bytearray):
+ if not isinstance(byte_str, bytes):
+ raise TypeError('Expected object of type bytes or bytearray, got: '
+ '{}'.format(type(byte_str)))
+ else:
+ byte_str = bytearray(byte_str)
+
+ detector = UniversalDetector()
+ detector.feed(byte_str)
+ detector.close()
+
+ if detector._input_state == InputState.HIGH_BYTE:
+ results = []
+ for prober in detector._charset_probers:
+ if prober.get_confidence() > detector.MINIMUM_THRESHOLD:
+ charset_name = prober.charset_name
+ lower_charset_name = prober.charset_name.lower()
+ # Use Windows encoding name instead of ISO-8859 if we saw any
+ # extra Windows-specific bytes
+ if lower_charset_name.startswith('iso-8859'):
+ if detector._has_win_bytes:
+ charset_name = detector.ISO_WIN_MAP.get(lower_charset_name,
+ charset_name)
+ results.append({
+ 'encoding': charset_name,
+ 'confidence': prober.get_confidence(),
+ 'language': prober.language,
+ })
+ if len(results) > 0:
+ return sorted(results, key=lambda result: -result['confidence'])
+
+ return [detector.result]
diff --git a/third_party/python/chardet/chardet/big5freq.py b/third_party/python/chardet/chardet/big5freq.py
new file mode 100644
index 0000000000..38f32517aa
--- /dev/null
+++ b/third_party/python/chardet/chardet/big5freq.py
@@ -0,0 +1,386 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# Big5 frequency table
+# by Taiwan's Mandarin Promotion Council
+# <http://www.edu.tw:81/mandr/>
+#
+# 128 --> 0.42261
+# 256 --> 0.57851
+# 512 --> 0.74851
+# 1024 --> 0.89384
+# 2048 --> 0.97583
+#
+# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
+# Random Distribution Ration = 512/(5401-512)=0.105
+#
+# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
+
+BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
+
+#Char to FreqOrder table
+BIG5_TABLE_SIZE = 5376
+
+BIG5_CHAR_TO_FREQ_ORDER = (
+ 1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
+3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
+1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
+ 63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
+3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
+4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
+5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
+ 630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
+ 179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
+ 995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
+2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
+1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
+3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
+ 706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
+1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
+3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
+2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
+ 437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
+3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
+1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
+5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
+ 266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
+5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
+1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
+ 32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
+ 188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
+3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
+3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
+ 324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
+2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
+2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
+ 314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
+ 287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
+3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
+1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
+1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
+1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
+2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
+ 265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
+4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
+1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
+5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
+2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
+ 383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
+ 98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
+ 523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
+ 710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
+5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
+ 379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
+1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
+ 585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
+ 690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
+5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
+1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
+ 544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
+3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
+4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
+3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
+ 279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
+ 610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
+1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
+4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
+3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
+3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
+2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
+5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
+3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
+5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
+1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
+2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
+1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
+ 78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
+1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
+4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
+3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
+ 534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
+ 165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
+ 626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
+2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
+5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
+1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
+2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
+1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
+1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
+5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
+5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
+5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
+3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
+4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
+4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
+2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
+5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
+3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
+ 598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
+5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
+5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
+1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
+2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
+3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
+4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
+5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
+3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
+4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
+1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
+1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
+4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
+1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
+ 240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
+1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
+1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
+3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
+ 619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
+5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
+2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
+1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
+1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
+5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
+ 829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
+4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
+ 375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
+2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
+ 444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
+1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
+1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
+ 730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
+4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
+4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
+1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
+3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
+5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
+5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
+1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
+2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
+1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
+3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
+2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
+3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
+2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
+4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
+4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
+3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
+ 97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
+3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
+ 424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
+3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
+4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
+3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
+1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
+5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
+ 199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
+5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
+1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
+ 391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
+4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
+4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
+ 397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
+2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
+2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
+3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
+1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
+4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
+2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
+1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
+1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
+2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
+3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
+1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
+5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
+1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
+4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
+1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
+ 135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
+1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
+4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
+4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
+2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
+1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
+4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
+ 660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
+5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
+2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
+3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
+4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
+ 790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
+5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
+5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
+1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
+4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
+4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
+2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
+3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
+3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
+2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
+1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
+4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
+3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
+3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
+2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
+4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
+5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
+3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
+2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
+3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
+1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
+2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
+3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
+4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
+2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
+2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
+5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
+1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
+2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
+1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
+3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
+4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
+2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
+3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
+3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
+2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
+4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
+2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
+3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
+4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
+5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
+3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
+ 194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
+1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
+4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
+1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
+4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
+5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
+ 510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
+5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
+5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
+2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
+3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
+2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
+2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
+ 681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
+1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
+4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
+3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
+3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
+ 838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
+2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
+ 625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
+2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
+4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
+1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
+4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
+1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
+3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
+ 574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
+3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
+5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
+5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
+3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
+3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
+1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
+2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
+5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
+1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
+1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
+3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
+ 919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
+1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
+4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
+5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
+2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
+3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
+ 516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
+1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
+2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
+2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
+5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
+5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
+5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
+2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
+2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
+1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
+4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
+3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
+3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
+4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
+4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
+2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
+2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
+5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
+4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
+5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
+4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
+ 502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
+ 121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
+1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
+3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
+4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
+1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
+5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
+2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
+2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
+3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
+5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
+1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
+3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
+5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
+1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
+5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
+2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
+3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
+2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
+3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
+3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
+3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
+4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
+ 803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
+2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
+4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
+3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
+5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
+1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
+5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
+ 425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
+1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
+ 479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
+4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
+1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
+4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
+1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
+ 433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
+3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
+4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
+5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
+ 938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
+3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
+ 890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
+2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376
+)
+
diff --git a/third_party/python/chardet/chardet/big5prober.py b/third_party/python/chardet/chardet/big5prober.py
new file mode 100644
index 0000000000..98f9970122
--- /dev/null
+++ b/third_party/python/chardet/chardet/big5prober.py
@@ -0,0 +1,47 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import Big5DistributionAnalysis
+from .mbcssm import BIG5_SM_MODEL
+
+
+class Big5Prober(MultiByteCharSetProber):
+ def __init__(self):
+ super(Big5Prober, self).__init__()
+ self.coding_sm = CodingStateMachine(BIG5_SM_MODEL)
+ self.distribution_analyzer = Big5DistributionAnalysis()
+ self.reset()
+
+ @property
+ def charset_name(self):
+ return "Big5"
+
+ @property
+ def language(self):
+ return "Chinese"
diff --git a/third_party/python/chardet/chardet/chardistribution.py b/third_party/python/chardet/chardet/chardistribution.py
new file mode 100644
index 0000000000..c0395f4a45
--- /dev/null
+++ b/third_party/python/chardet/chardet/chardistribution.py
@@ -0,0 +1,233 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .euctwfreq import (EUCTW_CHAR_TO_FREQ_ORDER, EUCTW_TABLE_SIZE,
+ EUCTW_TYPICAL_DISTRIBUTION_RATIO)
+from .euckrfreq import (EUCKR_CHAR_TO_FREQ_ORDER, EUCKR_TABLE_SIZE,
+ EUCKR_TYPICAL_DISTRIBUTION_RATIO)
+from .gb2312freq import (GB2312_CHAR_TO_FREQ_ORDER, GB2312_TABLE_SIZE,
+ GB2312_TYPICAL_DISTRIBUTION_RATIO)
+from .big5freq import (BIG5_CHAR_TO_FREQ_ORDER, BIG5_TABLE_SIZE,
+ BIG5_TYPICAL_DISTRIBUTION_RATIO)
+from .jisfreq import (JIS_CHAR_TO_FREQ_ORDER, JIS_TABLE_SIZE,
+ JIS_TYPICAL_DISTRIBUTION_RATIO)
+
+
+class CharDistributionAnalysis(object):
+ ENOUGH_DATA_THRESHOLD = 1024
+ SURE_YES = 0.99
+ SURE_NO = 0.01
+ MINIMUM_DATA_THRESHOLD = 3
+
+ def __init__(self):
+ # Mapping table to get frequency order from char order (get from
+ # GetOrder())
+ self._char_to_freq_order = None
+ self._table_size = None # Size of above table
+ # This is a constant value which varies from language to language,
+ # used in calculating confidence. See
+ # http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
+ # for further detail.
+ self.typical_distribution_ratio = None
+ self._done = None
+ self._total_chars = None
+ self._freq_chars = None
+ self.reset()
+
+ def reset(self):
+ """reset analyser, clear any state"""
+ # If this flag is set to True, detection is done and conclusion has
+ # been made
+ self._done = False
+ self._total_chars = 0 # Total characters encountered
+ # The number of characters whose frequency order is less than 512
+ self._freq_chars = 0
+
+ def feed(self, char, char_len):
+ """feed a character with known length"""
+ if char_len == 2:
+ # we only care about 2-bytes character in our distribution analysis
+ order = self.get_order(char)
+ else:
+ order = -1
+ if order >= 0:
+ self._total_chars += 1
+ # order is valid
+ if order < self._table_size:
+ if 512 > self._char_to_freq_order[order]:
+ self._freq_chars += 1
+
+ def get_confidence(self):
+ """return confidence based on existing data"""
+ # if we didn't receive any character in our consideration range,
+ # return negative answer
+ if self._total_chars <= 0 or self._freq_chars <= self.MINIMUM_DATA_THRESHOLD:
+ return self.SURE_NO
+
+ if self._total_chars != self._freq_chars:
+ r = (self._freq_chars / ((self._total_chars - self._freq_chars)
+ * self.typical_distribution_ratio))
+ if r < self.SURE_YES:
+ return r
+
+ # normalize confidence (we don't want to be 100% sure)
+ return self.SURE_YES
+
+ def got_enough_data(self):
+ # It is not necessary to receive all data to draw conclusion.
+ # For charset detection, certain amount of data is enough
+ return self._total_chars > self.ENOUGH_DATA_THRESHOLD
+
+ def get_order(self, byte_str):
+ # We do not handle characters based on the original encoding string,
+ # but convert this encoding string to a number, here called order.
+ # This allows multiple encodings of a language to share one frequency
+ # table.
+ return -1
+
+
+class EUCTWDistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self):
+ super(EUCTWDistributionAnalysis, self).__init__()
+ self._char_to_freq_order = EUCTW_CHAR_TO_FREQ_ORDER
+ self._table_size = EUCTW_TABLE_SIZE
+ self.typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, byte_str):
+ # for euc-TW encoding, we are interested
+ # first byte range: 0xc4 -- 0xfe
+ # second byte range: 0xa1 -- 0xfe
+ # no validation needed here. State machine has done that
+ first_char = byte_str[0]
+ if first_char >= 0xC4:
+ return 94 * (first_char - 0xC4) + byte_str[1] - 0xA1
+ else:
+ return -1
+
+
+class EUCKRDistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self):
+ super(EUCKRDistributionAnalysis, self).__init__()
+ self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER
+ self._table_size = EUCKR_TABLE_SIZE
+ self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, byte_str):
+ # for euc-KR encoding, we are interested
+ # first byte range: 0xb0 -- 0xfe
+ # second byte range: 0xa1 -- 0xfe
+ # no validation needed here. State machine has done that
+ first_char = byte_str[0]
+ if first_char >= 0xB0:
+ return 94 * (first_char - 0xB0) + byte_str[1] - 0xA1
+ else:
+ return -1
+
+
+class GB2312DistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self):
+ super(GB2312DistributionAnalysis, self).__init__()
+ self._char_to_freq_order = GB2312_CHAR_TO_FREQ_ORDER
+ self._table_size = GB2312_TABLE_SIZE
+ self.typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, byte_str):
+ # for GB2312 encoding, we are interested
+ # first byte range: 0xb0 -- 0xfe
+ # second byte range: 0xa1 -- 0xfe
+ # no validation needed here. State machine has done that
+ first_char, second_char = byte_str[0], byte_str[1]
+ if (first_char >= 0xB0) and (second_char >= 0xA1):
+ return 94 * (first_char - 0xB0) + second_char - 0xA1
+ else:
+ return -1
+
+
+class Big5DistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self):
+ super(Big5DistributionAnalysis, self).__init__()
+ self._char_to_freq_order = BIG5_CHAR_TO_FREQ_ORDER
+ self._table_size = BIG5_TABLE_SIZE
+ self.typical_distribution_ratio = BIG5_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, byte_str):
+ # for big5 encoding, we are interested
+ # first byte range: 0xa4 -- 0xfe
+ # second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
+ # no validation needed here. State machine has done that
+ first_char, second_char = byte_str[0], byte_str[1]
+ if first_char >= 0xA4:
+ if second_char >= 0xA1:
+ return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
+ else:
+ return 157 * (first_char - 0xA4) + second_char - 0x40
+ else:
+ return -1
+
+
+class SJISDistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self):
+ super(SJISDistributionAnalysis, self).__init__()
+ self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
+ self._table_size = JIS_TABLE_SIZE
+ self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, byte_str):
+ # for sjis encoding, we are interested
+ # first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
+ # second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
+ # no validation needed here. State machine has done that
+ first_char, second_char = byte_str[0], byte_str[1]
+ if (first_char >= 0x81) and (first_char <= 0x9F):
+ order = 188 * (first_char - 0x81)
+ elif (first_char >= 0xE0) and (first_char <= 0xEF):
+ order = 188 * (first_char - 0xE0 + 31)
+ else:
+ return -1
+ order = order + second_char - 0x40
+ if second_char > 0x7F:
+ order = -1
+ return order
+
+
+class EUCJPDistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self):
+ super(EUCJPDistributionAnalysis, self).__init__()
+ self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
+ self._table_size = JIS_TABLE_SIZE
+ self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, byte_str):
+ # for euc-JP encoding, we are interested
+ # first byte range: 0xa0 -- 0xfe
+ # second byte range: 0xa1 -- 0xfe
+ # no validation needed here. State machine has done that
+ char = byte_str[0]
+ if char >= 0xA0:
+ return 94 * (char - 0xA1) + byte_str[1] - 0xa1
+ else:
+ return -1
diff --git a/third_party/python/chardet/chardet/charsetgroupprober.py b/third_party/python/chardet/chardet/charsetgroupprober.py
new file mode 100644
index 0000000000..5812cef0b5
--- /dev/null
+++ b/third_party/python/chardet/chardet/charsetgroupprober.py
@@ -0,0 +1,107 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .enums import ProbingState
+from .charsetprober import CharSetProber
+
+
+class CharSetGroupProber(CharSetProber):
+ def __init__(self, lang_filter=None):
+ super(CharSetGroupProber, self).__init__(lang_filter=lang_filter)
+ self._active_num = 0
+ self.probers = []
+ self._best_guess_prober = None
+
+ def reset(self):
+ super(CharSetGroupProber, self).reset()
+ self._active_num = 0
+ for prober in self.probers:
+ if prober:
+ prober.reset()
+ prober.active = True
+ self._active_num += 1
+ self._best_guess_prober = None
+
+ @property
+ def charset_name(self):
+ if not self._best_guess_prober:
+ self.get_confidence()
+ if not self._best_guess_prober:
+ return None
+ return self._best_guess_prober.charset_name
+
+ @property
+ def language(self):
+ if not self._best_guess_prober:
+ self.get_confidence()
+ if not self._best_guess_prober:
+ return None
+ return self._best_guess_prober.language
+
+ def feed(self, byte_str):
+ for prober in self.probers:
+ if not prober:
+ continue
+ if not prober.active:
+ continue
+ state = prober.feed(byte_str)
+ if not state:
+ continue
+ if state == ProbingState.FOUND_IT:
+ self._best_guess_prober = prober
+ self._state = ProbingState.FOUND_IT
+ return self.state
+ elif state == ProbingState.NOT_ME:
+ prober.active = False
+ self._active_num -= 1
+ if self._active_num <= 0:
+ self._state = ProbingState.NOT_ME
+ return self.state
+ return self.state
+
+ def get_confidence(self):
+ state = self.state
+ if state == ProbingState.FOUND_IT:
+ return 0.99
+ elif state == ProbingState.NOT_ME:
+ return 0.01
+ best_conf = 0.0
+ self._best_guess_prober = None
+ for prober in self.probers:
+ if not prober:
+ continue
+ if not prober.active:
+ self.logger.debug('%s not active', prober.charset_name)
+ continue
+ conf = prober.get_confidence()
+ self.logger.debug('%s %s confidence = %s', prober.charset_name, prober.language, conf)
+ if best_conf < conf:
+ best_conf = conf
+ self._best_guess_prober = prober
+ if not self._best_guess_prober:
+ return 0.0
+ return best_conf
diff --git a/third_party/python/chardet/chardet/charsetprober.py b/third_party/python/chardet/chardet/charsetprober.py
new file mode 100644
index 0000000000..eac4e59865
--- /dev/null
+++ b/third_party/python/chardet/chardet/charsetprober.py
@@ -0,0 +1,145 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+import logging
+import re
+
+from .enums import ProbingState
+
+
+class CharSetProber(object):
+
+ SHORTCUT_THRESHOLD = 0.95
+
+ def __init__(self, lang_filter=None):
+ self._state = None
+ self.lang_filter = lang_filter
+ self.logger = logging.getLogger(__name__)
+
+ def reset(self):
+ self._state = ProbingState.DETECTING
+
+ @property
+ def charset_name(self):
+ return None
+
+ def feed(self, buf):
+ pass
+
+ @property
+ def state(self):
+ return self._state
+
+ def get_confidence(self):
+ return 0.0
+
+ @staticmethod
+ def filter_high_byte_only(buf):
+ buf = re.sub(b'([\x00-\x7F])+', b' ', buf)
+ return buf
+
+ @staticmethod
+ def filter_international_words(buf):
+ """
+ We define three types of bytes:
+ alphabet: english alphabets [a-zA-Z]
+ international: international characters [\x80-\xFF]
+ marker: everything else [^a-zA-Z\x80-\xFF]
+
+ The input buffer can be thought to contain a series of words delimited
+ by markers. This function works to filter all words that contain at
+ least one international character. All contiguous sequences of markers
+ are replaced by a single space ascii character.
+
+ This filter applies to all scripts which do not use English characters.
+ """
+ filtered = bytearray()
+
+ # This regex expression filters out only words that have at-least one
+ # international character. The word may include one marker character at
+ # the end.
+ words = re.findall(b'[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?',
+ buf)
+
+ for word in words:
+ filtered.extend(word[:-1])
+
+ # If the last character in the word is a marker, replace it with a
+ # space as markers shouldn't affect our analysis (they are used
+ # similarly across all languages and may thus have similar
+ # frequencies).
+ last_char = word[-1:]
+ if not last_char.isalpha() and last_char < b'\x80':
+ last_char = b' '
+ filtered.extend(last_char)
+
+ return filtered
+
+ @staticmethod
+ def filter_with_english_letters(buf):
+ """
+ Returns a copy of ``buf`` that retains only the sequences of English
+ alphabet and high byte characters that are not between <> characters.
+ Also retains English alphabet and high byte characters immediately
+ before occurrences of >.
+
+ This filter can be applied to all scripts which contain both English
+ characters and extended ASCII characters, but is currently only used by
+ ``Latin1Prober``.
+ """
+ filtered = bytearray()
+ in_tag = False
+ prev = 0
+
+ for curr in range(len(buf)):
+ # Slice here to get bytes instead of an int with Python 3
+ buf_char = buf[curr:curr + 1]
+ # Check if we're coming out of or entering an HTML tag
+ if buf_char == b'>':
+ in_tag = False
+ elif buf_char == b'<':
+ in_tag = True
+
+ # If current character is not extended-ASCII and not alphabetic...
+ if buf_char < b'\x80' and not buf_char.isalpha():
+ # ...and we're not in a tag
+ if curr > prev and not in_tag:
+ # Keep everything after last non-extended-ASCII,
+ # non-alphabetic character
+ filtered.extend(buf[prev:curr])
+ # Output a space to delimit stretch we kept
+ filtered.extend(b' ')
+ prev = curr + 1
+
+ # If we're not in a tag...
+ if not in_tag:
+ # Keep everything after last non-extended-ASCII, non-alphabetic
+ # character
+ filtered.extend(buf[prev:])
+
+ return filtered
diff --git a/third_party/python/chardet/chardet/cli/__init__.py b/third_party/python/chardet/chardet/cli/__init__.py
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/third_party/python/chardet/chardet/cli/__init__.py
@@ -0,0 +1 @@
+
diff --git a/third_party/python/chardet/chardet/cli/chardetect.py b/third_party/python/chardet/chardet/cli/chardetect.py
new file mode 100644
index 0000000000..e1d8cd69ac
--- /dev/null
+++ b/third_party/python/chardet/chardet/cli/chardetect.py
@@ -0,0 +1,84 @@
+"""
+Script which takes one or more file paths and reports on their detected
+encodings
+
+Example::
+
+ % chardetect somefile someotherfile
+ somefile: windows-1252 with confidence 0.5
+ someotherfile: ascii with confidence 1.0
+
+If no paths are provided, it takes its input from stdin.
+
+"""
+
+from __future__ import absolute_import, print_function, unicode_literals
+
+import argparse
+import sys
+
+from chardet import __version__
+from chardet.compat import PY2
+from chardet.universaldetector import UniversalDetector
+
+
+def description_of(lines, name='stdin'):
+ """
+ Return a string describing the probable encoding of a file or
+ list of strings.
+
+ :param lines: The lines to get the encoding of.
+ :type lines: Iterable of bytes
+ :param name: Name of file or collection of lines
+ :type name: str
+ """
+ u = UniversalDetector()
+ for line in lines:
+ line = bytearray(line)
+ u.feed(line)
+ # shortcut out of the loop to save reading further - particularly useful if we read a BOM.
+ if u.done:
+ break
+ u.close()
+ result = u.result
+ if PY2:
+ name = name.decode(sys.getfilesystemencoding(), 'ignore')
+ if result['encoding']:
+ return '{}: {} with confidence {}'.format(name, result['encoding'],
+ result['confidence'])
+ else:
+ return '{}: no result'.format(name)
+
+
+def main(argv=None):
+ """
+ Handles command line arguments and gets things started.
+
+ :param argv: List of arguments, as if specified on the command-line.
+ If None, ``sys.argv[1:]`` is used instead.
+ :type argv: list of str
+ """
+ # Get command line arguments
+ parser = argparse.ArgumentParser(
+ description="Takes one or more file paths and reports their detected \
+ encodings")
+ parser.add_argument('input',
+ help='File whose encoding we would like to determine. \
+ (default: stdin)',
+ type=argparse.FileType('rb'), nargs='*',
+ default=[sys.stdin if PY2 else sys.stdin.buffer])
+ parser.add_argument('--version', action='version',
+ version='%(prog)s {}'.format(__version__))
+ args = parser.parse_args(argv)
+
+ for f in args.input:
+ if f.isatty():
+ print("You are running chardetect interactively. Press " +
+ "CTRL-D twice at the start of a blank line to signal the " +
+ "end of your input. If you want help, run chardetect " +
+ "--help\n", file=sys.stderr)
+ print(description_of(f, f.name))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/third_party/python/chardet/chardet/codingstatemachine.py b/third_party/python/chardet/chardet/codingstatemachine.py
new file mode 100644
index 0000000000..68fba44f14
--- /dev/null
+++ b/third_party/python/chardet/chardet/codingstatemachine.py
@@ -0,0 +1,88 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+import logging
+
+from .enums import MachineState
+
+
+class CodingStateMachine(object):
+ """
+ A state machine to verify a byte sequence for a particular encoding. For
+ each byte the detector receives, it will feed that byte to every active
+ state machine available, one byte at a time. The state machine changes its
+ state based on its previous state and the byte it receives. There are 3
+ states in a state machine that are of interest to an auto-detector:
+
+ START state: This is the state to start with, or a legal byte sequence
+ (i.e. a valid code point) for character has been identified.
+
+ ME state: This indicates that the state machine identified a byte sequence
+ that is specific to the charset it is designed for and that
+ there is no other possible encoding which can contain this byte
+ sequence. This will to lead to an immediate positive answer for
+ the detector.
+
+ ERROR state: This indicates the state machine identified an illegal byte
+ sequence for that encoding. This will lead to an immediate
+ negative answer for this encoding. Detector will exclude this
+ encoding from consideration from here on.
+ """
+ def __init__(self, sm):
+ self._model = sm
+ self._curr_byte_pos = 0
+ self._curr_char_len = 0
+ self._curr_state = None
+ self.logger = logging.getLogger(__name__)
+ self.reset()
+
+ def reset(self):
+ self._curr_state = MachineState.START
+
+ def next_state(self, c):
+ # for each byte we get its class
+ # if it is first byte, we also get byte length
+ byte_class = self._model['class_table'][c]
+ if self._curr_state == MachineState.START:
+ self._curr_byte_pos = 0
+ self._curr_char_len = self._model['char_len_table'][byte_class]
+ # from byte's class and state_table, we get its next state
+ curr_state = (self._curr_state * self._model['class_factor']
+ + byte_class)
+ self._curr_state = self._model['state_table'][curr_state]
+ self._curr_byte_pos += 1
+ return self._curr_state
+
+ def get_current_charlen(self):
+ return self._curr_char_len
+
+ def get_coding_state_machine(self):
+ return self._model['name']
+
+ @property
+ def language(self):
+ return self._model['language']
diff --git a/third_party/python/chardet/chardet/compat.py b/third_party/python/chardet/chardet/compat.py
new file mode 100644
index 0000000000..8941572b3e
--- /dev/null
+++ b/third_party/python/chardet/chardet/compat.py
@@ -0,0 +1,36 @@
+######################## BEGIN LICENSE BLOCK ########################
+# Contributor(s):
+# Dan Blanchard
+# Ian Cordasco
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+import sys
+
+
+if sys.version_info < (3, 0):
+ PY2 = True
+ PY3 = False
+ string_types = (str, unicode)
+ text_type = unicode
+ iteritems = dict.iteritems
+else:
+ PY2 = False
+ PY3 = True
+ string_types = (bytes, str)
+ text_type = str
+ iteritems = dict.items
diff --git a/third_party/python/chardet/chardet/cp949prober.py b/third_party/python/chardet/chardet/cp949prober.py
new file mode 100644
index 0000000000..efd793abca
--- /dev/null
+++ b/third_party/python/chardet/chardet/cp949prober.py
@@ -0,0 +1,49 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .chardistribution import EUCKRDistributionAnalysis
+from .codingstatemachine import CodingStateMachine
+from .mbcharsetprober import MultiByteCharSetProber
+from .mbcssm import CP949_SM_MODEL
+
+
+class CP949Prober(MultiByteCharSetProber):
+ def __init__(self):
+ super(CP949Prober, self).__init__()
+ self.coding_sm = CodingStateMachine(CP949_SM_MODEL)
+ # NOTE: CP949 is a superset of EUC-KR, so the distribution should be
+ # not different.
+ self.distribution_analyzer = EUCKRDistributionAnalysis()
+ self.reset()
+
+ @property
+ def charset_name(self):
+ return "CP949"
+
+ @property
+ def language(self):
+ return "Korean"
diff --git a/third_party/python/chardet/chardet/enums.py b/third_party/python/chardet/chardet/enums.py
new file mode 100644
index 0000000000..0451207225
--- /dev/null
+++ b/third_party/python/chardet/chardet/enums.py
@@ -0,0 +1,76 @@
+"""
+All of the Enums that are used throughout the chardet package.
+
+:author: Dan Blanchard (dan.blanchard@gmail.com)
+"""
+
+
+class InputState(object):
+ """
+ This enum represents the different states a universal detector can be in.
+ """
+ PURE_ASCII = 0
+ ESC_ASCII = 1
+ HIGH_BYTE = 2
+
+
+class LanguageFilter(object):
+ """
+ This enum represents the different language filters we can apply to a
+ ``UniversalDetector``.
+ """
+ CHINESE_SIMPLIFIED = 0x01
+ CHINESE_TRADITIONAL = 0x02
+ JAPANESE = 0x04
+ KOREAN = 0x08
+ NON_CJK = 0x10
+ ALL = 0x1F
+ CHINESE = CHINESE_SIMPLIFIED | CHINESE_TRADITIONAL
+ CJK = CHINESE | JAPANESE | KOREAN
+
+
+class ProbingState(object):
+ """
+ This enum represents the different states a prober can be in.
+ """
+ DETECTING = 0
+ FOUND_IT = 1
+ NOT_ME = 2
+
+
+class MachineState(object):
+ """
+ This enum represents the different states a state machine can be in.
+ """
+ START = 0
+ ERROR = 1
+ ITS_ME = 2
+
+
+class SequenceLikelihood(object):
+ """
+ This enum represents the likelihood of a character following the previous one.
+ """
+ NEGATIVE = 0
+ UNLIKELY = 1
+ LIKELY = 2
+ POSITIVE = 3
+
+ @classmethod
+ def get_num_categories(cls):
+ """:returns: The number of likelihood categories in the enum."""
+ return 4
+
+
+class CharacterCategory(object):
+ """
+ This enum represents the different categories language models for
+ ``SingleByteCharsetProber`` put characters into.
+
+ Anything less than CONTROL is considered a letter.
+ """
+ UNDEFINED = 255
+ LINE_BREAK = 254
+ SYMBOL = 253
+ DIGIT = 252
+ CONTROL = 251
diff --git a/third_party/python/chardet/chardet/escprober.py b/third_party/python/chardet/chardet/escprober.py
new file mode 100644
index 0000000000..c70493f2b1
--- /dev/null
+++ b/third_party/python/chardet/chardet/escprober.py
@@ -0,0 +1,101 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetprober import CharSetProber
+from .codingstatemachine import CodingStateMachine
+from .enums import LanguageFilter, ProbingState, MachineState
+from .escsm import (HZ_SM_MODEL, ISO2022CN_SM_MODEL, ISO2022JP_SM_MODEL,
+ ISO2022KR_SM_MODEL)
+
+
+class EscCharSetProber(CharSetProber):
+ """
+ This CharSetProber uses a "code scheme" approach for detecting encodings,
+ whereby easily recognizable escape or shift sequences are relied on to
+ identify these encodings.
+ """
+
+ def __init__(self, lang_filter=None):
+ super(EscCharSetProber, self).__init__(lang_filter=lang_filter)
+ self.coding_sm = []
+ if self.lang_filter & LanguageFilter.CHINESE_SIMPLIFIED:
+ self.coding_sm.append(CodingStateMachine(HZ_SM_MODEL))
+ self.coding_sm.append(CodingStateMachine(ISO2022CN_SM_MODEL))
+ if self.lang_filter & LanguageFilter.JAPANESE:
+ self.coding_sm.append(CodingStateMachine(ISO2022JP_SM_MODEL))
+ if self.lang_filter & LanguageFilter.KOREAN:
+ self.coding_sm.append(CodingStateMachine(ISO2022KR_SM_MODEL))
+ self.active_sm_count = None
+ self._detected_charset = None
+ self._detected_language = None
+ self._state = None
+ self.reset()
+
+ def reset(self):
+ super(EscCharSetProber, self).reset()
+ for coding_sm in self.coding_sm:
+ if not coding_sm:
+ continue
+ coding_sm.active = True
+ coding_sm.reset()
+ self.active_sm_count = len(self.coding_sm)
+ self._detected_charset = None
+ self._detected_language = None
+
+ @property
+ def charset_name(self):
+ return self._detected_charset
+
+ @property
+ def language(self):
+ return self._detected_language
+
+ def get_confidence(self):
+ if self._detected_charset:
+ return 0.99
+ else:
+ return 0.00
+
+ def feed(self, byte_str):
+ for c in byte_str:
+ for coding_sm in self.coding_sm:
+ if not coding_sm or not coding_sm.active:
+ continue
+ coding_state = coding_sm.next_state(c)
+ if coding_state == MachineState.ERROR:
+ coding_sm.active = False
+ self.active_sm_count -= 1
+ if self.active_sm_count <= 0:
+ self._state = ProbingState.NOT_ME
+ return self.state
+ elif coding_state == MachineState.ITS_ME:
+ self._state = ProbingState.FOUND_IT
+ self._detected_charset = coding_sm.get_coding_state_machine()
+ self._detected_language = coding_sm.language
+ return self.state
+
+ return self.state
diff --git a/third_party/python/chardet/chardet/escsm.py b/third_party/python/chardet/chardet/escsm.py
new file mode 100644
index 0000000000..0069523a04
--- /dev/null
+++ b/third_party/python/chardet/chardet/escsm.py
@@ -0,0 +1,246 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .enums import MachineState
+
+HZ_CLS = (
+1,0,0,0,0,0,0,0, # 00 - 07
+0,0,0,0,0,0,0,0, # 08 - 0f
+0,0,0,0,0,0,0,0, # 10 - 17
+0,0,0,1,0,0,0,0, # 18 - 1f
+0,0,0,0,0,0,0,0, # 20 - 27
+0,0,0,0,0,0,0,0, # 28 - 2f
+0,0,0,0,0,0,0,0, # 30 - 37
+0,0,0,0,0,0,0,0, # 38 - 3f
+0,0,0,0,0,0,0,0, # 40 - 47
+0,0,0,0,0,0,0,0, # 48 - 4f
+0,0,0,0,0,0,0,0, # 50 - 57
+0,0,0,0,0,0,0,0, # 58 - 5f
+0,0,0,0,0,0,0,0, # 60 - 67
+0,0,0,0,0,0,0,0, # 68 - 6f
+0,0,0,0,0,0,0,0, # 70 - 77
+0,0,0,4,0,5,2,0, # 78 - 7f
+1,1,1,1,1,1,1,1, # 80 - 87
+1,1,1,1,1,1,1,1, # 88 - 8f
+1,1,1,1,1,1,1,1, # 90 - 97
+1,1,1,1,1,1,1,1, # 98 - 9f
+1,1,1,1,1,1,1,1, # a0 - a7
+1,1,1,1,1,1,1,1, # a8 - af
+1,1,1,1,1,1,1,1, # b0 - b7
+1,1,1,1,1,1,1,1, # b8 - bf
+1,1,1,1,1,1,1,1, # c0 - c7
+1,1,1,1,1,1,1,1, # c8 - cf
+1,1,1,1,1,1,1,1, # d0 - d7
+1,1,1,1,1,1,1,1, # d8 - df
+1,1,1,1,1,1,1,1, # e0 - e7
+1,1,1,1,1,1,1,1, # e8 - ef
+1,1,1,1,1,1,1,1, # f0 - f7
+1,1,1,1,1,1,1,1, # f8 - ff
+)
+
+HZ_ST = (
+MachineState.START,MachineState.ERROR, 3,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f
+MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START, 4,MachineState.ERROR,# 10-17
+ 5,MachineState.ERROR, 6,MachineState.ERROR, 5, 5, 4,MachineState.ERROR,# 18-1f
+ 4,MachineState.ERROR, 4, 4, 4,MachineState.ERROR, 4,MachineState.ERROR,# 20-27
+ 4,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 28-2f
+)
+
+HZ_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
+
+HZ_SM_MODEL = {'class_table': HZ_CLS,
+ 'class_factor': 6,
+ 'state_table': HZ_ST,
+ 'char_len_table': HZ_CHAR_LEN_TABLE,
+ 'name': "HZ-GB-2312",
+ 'language': 'Chinese'}
+
+ISO2022CN_CLS = (
+2,0,0,0,0,0,0,0, # 00 - 07
+0,0,0,0,0,0,0,0, # 08 - 0f
+0,0,0,0,0,0,0,0, # 10 - 17
+0,0,0,1,0,0,0,0, # 18 - 1f
+0,0,0,0,0,0,0,0, # 20 - 27
+0,3,0,0,0,0,0,0, # 28 - 2f
+0,0,0,0,0,0,0,0, # 30 - 37
+0,0,0,0,0,0,0,0, # 38 - 3f
+0,0,0,4,0,0,0,0, # 40 - 47
+0,0,0,0,0,0,0,0, # 48 - 4f
+0,0,0,0,0,0,0,0, # 50 - 57
+0,0,0,0,0,0,0,0, # 58 - 5f
+0,0,0,0,0,0,0,0, # 60 - 67
+0,0,0,0,0,0,0,0, # 68 - 6f
+0,0,0,0,0,0,0,0, # 70 - 77
+0,0,0,0,0,0,0,0, # 78 - 7f
+2,2,2,2,2,2,2,2, # 80 - 87
+2,2,2,2,2,2,2,2, # 88 - 8f
+2,2,2,2,2,2,2,2, # 90 - 97
+2,2,2,2,2,2,2,2, # 98 - 9f
+2,2,2,2,2,2,2,2, # a0 - a7
+2,2,2,2,2,2,2,2, # a8 - af
+2,2,2,2,2,2,2,2, # b0 - b7
+2,2,2,2,2,2,2,2, # b8 - bf
+2,2,2,2,2,2,2,2, # c0 - c7
+2,2,2,2,2,2,2,2, # c8 - cf
+2,2,2,2,2,2,2,2, # d0 - d7
+2,2,2,2,2,2,2,2, # d8 - df
+2,2,2,2,2,2,2,2, # e0 - e7
+2,2,2,2,2,2,2,2, # e8 - ef
+2,2,2,2,2,2,2,2, # f0 - f7
+2,2,2,2,2,2,2,2, # f8 - ff
+)
+
+ISO2022CN_ST = (
+MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07
+MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f
+MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17
+MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,# 18-1f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 20-27
+ 5, 6,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 28-2f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 30-37
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,# 38-3f
+)
+
+ISO2022CN_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+ISO2022CN_SM_MODEL = {'class_table': ISO2022CN_CLS,
+ 'class_factor': 9,
+ 'state_table': ISO2022CN_ST,
+ 'char_len_table': ISO2022CN_CHAR_LEN_TABLE,
+ 'name': "ISO-2022-CN",
+ 'language': 'Chinese'}
+
+ISO2022JP_CLS = (
+2,0,0,0,0,0,0,0, # 00 - 07
+0,0,0,0,0,0,2,2, # 08 - 0f
+0,0,0,0,0,0,0,0, # 10 - 17
+0,0,0,1,0,0,0,0, # 18 - 1f
+0,0,0,0,7,0,0,0, # 20 - 27
+3,0,0,0,0,0,0,0, # 28 - 2f
+0,0,0,0,0,0,0,0, # 30 - 37
+0,0,0,0,0,0,0,0, # 38 - 3f
+6,0,4,0,8,0,0,0, # 40 - 47
+0,9,5,0,0,0,0,0, # 48 - 4f
+0,0,0,0,0,0,0,0, # 50 - 57
+0,0,0,0,0,0,0,0, # 58 - 5f
+0,0,0,0,0,0,0,0, # 60 - 67
+0,0,0,0,0,0,0,0, # 68 - 6f
+0,0,0,0,0,0,0,0, # 70 - 77
+0,0,0,0,0,0,0,0, # 78 - 7f
+2,2,2,2,2,2,2,2, # 80 - 87
+2,2,2,2,2,2,2,2, # 88 - 8f
+2,2,2,2,2,2,2,2, # 90 - 97
+2,2,2,2,2,2,2,2, # 98 - 9f
+2,2,2,2,2,2,2,2, # a0 - a7
+2,2,2,2,2,2,2,2, # a8 - af
+2,2,2,2,2,2,2,2, # b0 - b7
+2,2,2,2,2,2,2,2, # b8 - bf
+2,2,2,2,2,2,2,2, # c0 - c7
+2,2,2,2,2,2,2,2, # c8 - cf
+2,2,2,2,2,2,2,2, # d0 - d7
+2,2,2,2,2,2,2,2, # d8 - df
+2,2,2,2,2,2,2,2, # e0 - e7
+2,2,2,2,2,2,2,2, # e8 - ef
+2,2,2,2,2,2,2,2, # f0 - f7
+2,2,2,2,2,2,2,2, # f8 - ff
+)
+
+ISO2022JP_ST = (
+MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 00-07
+MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 08-0f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 10-17
+MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,# 18-1f
+MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,MachineState.ERROR,# 20-27
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 6,MachineState.ITS_ME,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,# 28-2f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,# 30-37
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 38-3f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.START,# 40-47
+)
+
+ISO2022JP_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+ISO2022JP_SM_MODEL = {'class_table': ISO2022JP_CLS,
+ 'class_factor': 10,
+ 'state_table': ISO2022JP_ST,
+ 'char_len_table': ISO2022JP_CHAR_LEN_TABLE,
+ 'name': "ISO-2022-JP",
+ 'language': 'Japanese'}
+
+ISO2022KR_CLS = (
+2,0,0,0,0,0,0,0, # 00 - 07
+0,0,0,0,0,0,0,0, # 08 - 0f
+0,0,0,0,0,0,0,0, # 10 - 17
+0,0,0,1,0,0,0,0, # 18 - 1f
+0,0,0,0,3,0,0,0, # 20 - 27
+0,4,0,0,0,0,0,0, # 28 - 2f
+0,0,0,0,0,0,0,0, # 30 - 37
+0,0,0,0,0,0,0,0, # 38 - 3f
+0,0,0,5,0,0,0,0, # 40 - 47
+0,0,0,0,0,0,0,0, # 48 - 4f
+0,0,0,0,0,0,0,0, # 50 - 57
+0,0,0,0,0,0,0,0, # 58 - 5f
+0,0,0,0,0,0,0,0, # 60 - 67
+0,0,0,0,0,0,0,0, # 68 - 6f
+0,0,0,0,0,0,0,0, # 70 - 77
+0,0,0,0,0,0,0,0, # 78 - 7f
+2,2,2,2,2,2,2,2, # 80 - 87
+2,2,2,2,2,2,2,2, # 88 - 8f
+2,2,2,2,2,2,2,2, # 90 - 97
+2,2,2,2,2,2,2,2, # 98 - 9f
+2,2,2,2,2,2,2,2, # a0 - a7
+2,2,2,2,2,2,2,2, # a8 - af
+2,2,2,2,2,2,2,2, # b0 - b7
+2,2,2,2,2,2,2,2, # b8 - bf
+2,2,2,2,2,2,2,2, # c0 - c7
+2,2,2,2,2,2,2,2, # c8 - cf
+2,2,2,2,2,2,2,2, # d0 - d7
+2,2,2,2,2,2,2,2, # d8 - df
+2,2,2,2,2,2,2,2, # e0 - e7
+2,2,2,2,2,2,2,2, # e8 - ef
+2,2,2,2,2,2,2,2, # f0 - f7
+2,2,2,2,2,2,2,2, # f8 - ff
+)
+
+ISO2022KR_ST = (
+MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,# 00-07
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,# 08-0f
+MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 4,MachineState.ERROR,MachineState.ERROR,# 10-17
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,# 18-1f
+MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.START,MachineState.START,MachineState.START,MachineState.START,# 20-27
+)
+
+ISO2022KR_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
+
+ISO2022KR_SM_MODEL = {'class_table': ISO2022KR_CLS,
+ 'class_factor': 6,
+ 'state_table': ISO2022KR_ST,
+ 'char_len_table': ISO2022KR_CHAR_LEN_TABLE,
+ 'name': "ISO-2022-KR",
+ 'language': 'Korean'}
+
+
diff --git a/third_party/python/chardet/chardet/eucjpprober.py b/third_party/python/chardet/chardet/eucjpprober.py
new file mode 100644
index 0000000000..20ce8f7d15
--- /dev/null
+++ b/third_party/python/chardet/chardet/eucjpprober.py
@@ -0,0 +1,92 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .enums import ProbingState, MachineState
+from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import EUCJPDistributionAnalysis
+from .jpcntx import EUCJPContextAnalysis
+from .mbcssm import EUCJP_SM_MODEL
+
+
+class EUCJPProber(MultiByteCharSetProber):
+ def __init__(self):
+ super(EUCJPProber, self).__init__()
+ self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL)
+ self.distribution_analyzer = EUCJPDistributionAnalysis()
+ self.context_analyzer = EUCJPContextAnalysis()
+ self.reset()
+
+ def reset(self):
+ super(EUCJPProber, self).reset()
+ self.context_analyzer.reset()
+
+ @property
+ def charset_name(self):
+ return "EUC-JP"
+
+ @property
+ def language(self):
+ return "Japanese"
+
+ def feed(self, byte_str):
+ for i in range(len(byte_str)):
+ # PY3K: byte_str is a byte array, so byte_str[i] is an int, not a byte
+ coding_state = self.coding_sm.next_state(byte_str[i])
+ if coding_state == MachineState.ERROR:
+ self.logger.debug('%s %s prober hit error at byte %s',
+ self.charset_name, self.language, i)
+ self._state = ProbingState.NOT_ME
+ break
+ elif coding_state == MachineState.ITS_ME:
+ self._state = ProbingState.FOUND_IT
+ break
+ elif coding_state == MachineState.START:
+ char_len = self.coding_sm.get_current_charlen()
+ if i == 0:
+ self._last_char[1] = byte_str[0]
+ self.context_analyzer.feed(self._last_char, char_len)
+ self.distribution_analyzer.feed(self._last_char, char_len)
+ else:
+ self.context_analyzer.feed(byte_str[i - 1:i + 1],
+ char_len)
+ self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
+ char_len)
+
+ self._last_char[0] = byte_str[-1]
+
+ if self.state == ProbingState.DETECTING:
+ if (self.context_analyzer.got_enough_data() and
+ (self.get_confidence() > self.SHORTCUT_THRESHOLD)):
+ self._state = ProbingState.FOUND_IT
+
+ return self.state
+
+ def get_confidence(self):
+ context_conf = self.context_analyzer.get_confidence()
+ distrib_conf = self.distribution_analyzer.get_confidence()
+ return max(context_conf, distrib_conf)
diff --git a/third_party/python/chardet/chardet/euckrfreq.py b/third_party/python/chardet/chardet/euckrfreq.py
new file mode 100644
index 0000000000..b68078cb96
--- /dev/null
+++ b/third_party/python/chardet/chardet/euckrfreq.py
@@ -0,0 +1,195 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# Sampling from about 20M text materials include literature and computer technology
+
+# 128 --> 0.79
+# 256 --> 0.92
+# 512 --> 0.986
+# 1024 --> 0.99944
+# 2048 --> 0.99999
+#
+# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
+# Random Distribution Ration = 512 / (2350-512) = 0.279.
+#
+# Typical Distribution Ratio
+
+EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
+
+EUCKR_TABLE_SIZE = 2352
+
+# Char to FreqOrder table ,
+EUCKR_CHAR_TO_FREQ_ORDER = (
+ 13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
+1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
+1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
+ 945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
+ 116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
+ 708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
+1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
+ 344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
+ 709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
+1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
+1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
+1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
+1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
+1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
+ 885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
+1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
+1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
+1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
+1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
+ 544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
+1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
+ 119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
+ 893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
+1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
+ 282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
+1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
+ 127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
+ 0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
+1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
+1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
+1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
+1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
+ 269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
+1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
+ 887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
+ 217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
+1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
+1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
+1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
+1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
+1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
+1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
+ 50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
+ 639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
+ 103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
+1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
+ 818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
+1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
+ 423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
+ 532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
+2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
+ 619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
+ 191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
+2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
+2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
+2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
+ 719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
+ 819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
+2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
+ 499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
+1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
+2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
+1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
+2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
+2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
+1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
+ 949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
+2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
+2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
+ 22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
+ 962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
+2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
+1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
+2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
+2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
+2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
+2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
+2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
+2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
+1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
+2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
+2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
+2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
+2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
+2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
+1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
+1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
+2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
+1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
+2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
+1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
+ 295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
+2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
+ 432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
+2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
+ 808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
+2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
+2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
+ 501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
+2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
+1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
+ 425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
+1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
+2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
+1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
+2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
+ 416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
+2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
+1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
+2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
+1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
+2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
+1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
+ 593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
+2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
+2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
+ 644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
+ 915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
+1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
+1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
+ 291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
+2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
+2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
+ 797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
+ 434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
+ 585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
+2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
+ 95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
+ 161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
+2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
+2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
+ 704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
+2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
+1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
+ 249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
+2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
+2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
+2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
+ 3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
+ 202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
+ 974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
+2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
+2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
+2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
+1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
+2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
+ 670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
+)
+
diff --git a/third_party/python/chardet/chardet/euckrprober.py b/third_party/python/chardet/chardet/euckrprober.py
new file mode 100644
index 0000000000..345a060d02
--- /dev/null
+++ b/third_party/python/chardet/chardet/euckrprober.py
@@ -0,0 +1,47 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import EUCKRDistributionAnalysis
+from .mbcssm import EUCKR_SM_MODEL
+
+
+class EUCKRProber(MultiByteCharSetProber):
+ def __init__(self):
+ super(EUCKRProber, self).__init__()
+ self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL)
+ self.distribution_analyzer = EUCKRDistributionAnalysis()
+ self.reset()
+
+ @property
+ def charset_name(self):
+ return "EUC-KR"
+
+ @property
+ def language(self):
+ return "Korean"
diff --git a/third_party/python/chardet/chardet/euctwfreq.py b/third_party/python/chardet/chardet/euctwfreq.py
new file mode 100644
index 0000000000..ed7a995a3a
--- /dev/null
+++ b/third_party/python/chardet/chardet/euctwfreq.py
@@ -0,0 +1,387 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# EUCTW frequency table
+# Converted from big5 work
+# by Taiwan's Mandarin Promotion Council
+# <http:#www.edu.tw:81/mandr/>
+
+# 128 --> 0.42261
+# 256 --> 0.57851
+# 512 --> 0.74851
+# 1024 --> 0.89384
+# 2048 --> 0.97583
+#
+# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
+# Random Distribution Ration = 512/(5401-512)=0.105
+#
+# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
+
+EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
+
+# Char to FreqOrder table ,
+EUCTW_TABLE_SIZE = 5376
+
+EUCTW_CHAR_TO_FREQ_ORDER = (
+ 1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
+3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
+1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
+ 63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
+3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
+4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
+7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
+ 630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
+ 179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
+ 995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
+2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
+1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
+3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
+ 706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
+1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
+3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
+2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
+ 437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
+3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
+1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
+7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
+ 266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
+7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
+1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
+ 32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
+ 188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
+3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
+3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
+ 324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
+2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
+2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
+ 314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
+ 287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
+3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
+1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
+1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
+1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
+2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
+ 265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
+4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
+1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
+7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
+2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
+ 383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
+ 98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
+ 523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
+ 710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
+7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
+ 379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
+1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
+ 585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
+ 690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
+7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
+1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
+ 544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
+3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
+4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
+3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
+ 279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
+ 610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
+1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
+4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
+3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
+3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
+2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
+7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
+3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
+7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
+1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
+2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
+1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
+ 78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
+1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
+4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
+3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
+ 534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
+ 165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
+ 626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
+2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
+7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
+1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
+2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
+1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
+1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
+7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
+7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
+7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
+3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
+4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
+1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
+7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
+2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
+7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
+3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
+3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
+7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
+2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
+7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
+ 862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
+4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
+2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
+7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
+3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
+2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
+2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
+ 294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
+2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
+1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
+1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
+2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
+1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
+7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
+7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
+2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
+4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
+1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
+7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
+ 829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
+4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
+ 375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
+2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
+ 444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
+1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
+1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
+ 730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
+3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
+3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
+1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
+3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
+7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
+7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
+1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
+2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
+1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
+3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
+2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
+3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
+2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
+4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
+4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
+3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
+ 97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
+3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
+ 424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
+3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
+3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
+3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
+1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
+7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
+ 199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
+7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
+1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
+ 391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
+4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
+3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
+ 397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
+2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
+2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
+3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
+1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
+4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
+2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
+1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
+1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
+2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
+3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
+1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
+7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
+1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
+4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
+1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
+ 135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
+1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
+3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
+3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
+2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
+1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
+4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
+ 660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
+7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
+2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
+3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
+4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
+ 790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
+7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
+7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
+1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
+4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
+3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
+2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
+3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
+3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
+2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
+1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
+4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
+3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
+3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
+2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
+4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
+7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
+3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
+2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
+3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
+1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
+2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
+3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
+4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
+2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
+2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
+7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
+1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
+2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
+1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
+3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
+4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
+2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
+3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
+3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
+2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
+4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
+2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
+3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
+4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
+7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
+3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
+ 194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
+1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
+4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
+1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
+4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
+7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
+ 510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
+7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
+2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
+1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
+1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
+3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
+ 509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
+ 552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
+ 478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
+3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
+2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
+ 751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
+7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
+1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
+3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
+7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
+1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
+7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
+4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
+1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
+2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
+2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
+4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
+ 802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
+ 809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
+3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
+3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
+1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
+2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
+7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
+1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
+1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
+3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
+ 919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
+1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
+4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
+7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
+2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
+3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
+ 516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
+1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
+2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
+2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
+7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
+7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
+7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
+2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
+2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
+1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
+4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
+3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
+3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
+4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
+4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
+2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
+2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
+7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
+4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
+7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
+2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
+1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
+3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
+4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
+2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
+ 120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
+2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
+1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
+2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
+2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
+4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
+7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
+1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
+3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
+7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
+1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
+8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
+2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
+8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
+2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
+2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
+8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
+8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
+8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
+ 408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
+8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
+4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
+3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
+8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
+1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
+8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
+ 425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
+1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
+ 479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
+4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
+1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
+4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
+1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
+ 433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
+3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
+4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
+8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
+ 938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
+3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
+ 890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
+2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
+)
+
diff --git a/third_party/python/chardet/chardet/euctwprober.py b/third_party/python/chardet/chardet/euctwprober.py
new file mode 100644
index 0000000000..35669cc4dd
--- /dev/null
+++ b/third_party/python/chardet/chardet/euctwprober.py
@@ -0,0 +1,46 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import EUCTWDistributionAnalysis
+from .mbcssm import EUCTW_SM_MODEL
+
+class EUCTWProber(MultiByteCharSetProber):
+ def __init__(self):
+ super(EUCTWProber, self).__init__()
+ self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL)
+ self.distribution_analyzer = EUCTWDistributionAnalysis()
+ self.reset()
+
+ @property
+ def charset_name(self):
+ return "EUC-TW"
+
+ @property
+ def language(self):
+ return "Taiwan"
diff --git a/third_party/python/chardet/chardet/gb2312freq.py b/third_party/python/chardet/chardet/gb2312freq.py
new file mode 100644
index 0000000000..697837bd9a
--- /dev/null
+++ b/third_party/python/chardet/chardet/gb2312freq.py
@@ -0,0 +1,283 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# GB2312 most frequently used character table
+#
+# Char to FreqOrder table , from hz6763
+
+# 512 --> 0.79 -- 0.79
+# 1024 --> 0.92 -- 0.13
+# 2048 --> 0.98 -- 0.06
+# 6768 --> 1.00 -- 0.02
+#
+# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79
+# Random Distribution Ration = 512 / (3755 - 512) = 0.157
+#
+# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR
+
+GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
+
+GB2312_TABLE_SIZE = 3760
+
+GB2312_CHAR_TO_FREQ_ORDER = (
+1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
+2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
+2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409,
+ 249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670,
+1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820,
+1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585,
+ 152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566,
+1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575,
+2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853,
+3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061,
+ 544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155,
+1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406,
+ 927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816,
+2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606,
+ 360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023,
+2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414,
+1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513,
+3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052,
+ 198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570,
+1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575,
+ 253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250,
+2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506,
+1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26,
+3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835,
+1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686,
+2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054,
+1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894,
+ 585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105,
+3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403,
+3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694,
+ 252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873,
+3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940,
+ 836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121,
+1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648,
+3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992,
+2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233,
+1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157,
+ 755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807,
+1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094,
+4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258,
+ 887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478,
+3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152,
+3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909,
+ 509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272,
+1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221,
+2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252,
+1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301,
+1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254,
+ 389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070,
+3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461,
+3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360,
+4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124,
+ 296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535,
+3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243,
+1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713,
+1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071,
+4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442,
+ 215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946,
+ 814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257,
+3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180,
+1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427,
+ 602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781,
+1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724,
+2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937,
+ 930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943,
+ 432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789,
+ 396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552,
+3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246,
+4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451,
+3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310,
+ 750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860,
+2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297,
+2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780,
+2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745,
+ 776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936,
+2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032,
+ 968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657,
+ 163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414,
+ 220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976,
+3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436,
+2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254,
+2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536,
+1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238,
+ 18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059,
+2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741,
+ 90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447,
+ 286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601,
+1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269,
+1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894,
+ 915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173,
+ 681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994,
+1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956,
+2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437,
+3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154,
+2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240,
+2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143,
+2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634,
+3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472,
+1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541,
+1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143,
+2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312,
+1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414,
+3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754,
+1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424,
+1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302,
+3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739,
+ 795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004,
+2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484,
+1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739,
+4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535,
+1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641,
+1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307,
+3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573,
+1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533,
+ 47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965,
+ 504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99,
+1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280,
+ 160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505,
+1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012,
+1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039,
+ 744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982,
+3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530,
+4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392,
+3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656,
+2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220,
+2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766,
+1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535,
+3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728,
+2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338,
+1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627,
+1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885,
+ 125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411,
+2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671,
+2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162,
+3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774,
+4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524,
+3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346,
+ 180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040,
+3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188,
+2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280,
+1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131,
+ 259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947,
+ 774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970,
+3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814,
+4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557,
+2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997,
+1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972,
+1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369,
+ 766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376,
+1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480,
+3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610,
+ 955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128,
+ 642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769,
+1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207,
+ 57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392,
+1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623,
+ 193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782,
+2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650,
+ 158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478,
+2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773,
+2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007,
+1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323,
+1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598,
+2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961,
+ 819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302,
+1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409,
+1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683,
+2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191,
+2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616,
+3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302,
+1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774,
+4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147,
+ 571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731,
+ 845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464,
+3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377,
+1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315,
+ 470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557,
+3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903,
+1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060,
+4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261,
+1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092,
+2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810,
+1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708,
+ 498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658,
+1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871,
+3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503,
+ 448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229,
+2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112,
+ 136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504,
+1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389,
+1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27,
+1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542,
+3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861,
+2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845,
+3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700,
+3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469,
+3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582,
+ 996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999,
+2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274,
+ 786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020,
+2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601,
+ 12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628,
+1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31,
+ 475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668,
+ 233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778,
+1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169,
+3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667,
+3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881,
+1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276,
+1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320,
+3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751,
+2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432,
+2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772,
+1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843,
+3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116,
+ 451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904,
+4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652,
+1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664,
+2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770,
+3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283,
+3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626,
+1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713,
+ 768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333,
+ 391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062,
+2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555,
+ 931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014,
+1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510,
+ 386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015,
+1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459,
+1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390,
+1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238,
+1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232,
+1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624,
+ 381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
+ 852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, #last 512
+)
+
diff --git a/third_party/python/chardet/chardet/gb2312prober.py b/third_party/python/chardet/chardet/gb2312prober.py
new file mode 100644
index 0000000000..8446d2dd95
--- /dev/null
+++ b/third_party/python/chardet/chardet/gb2312prober.py
@@ -0,0 +1,46 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import GB2312DistributionAnalysis
+from .mbcssm import GB2312_SM_MODEL
+
+class GB2312Prober(MultiByteCharSetProber):
+ def __init__(self):
+ super(GB2312Prober, self).__init__()
+ self.coding_sm = CodingStateMachine(GB2312_SM_MODEL)
+ self.distribution_analyzer = GB2312DistributionAnalysis()
+ self.reset()
+
+ @property
+ def charset_name(self):
+ return "GB2312"
+
+ @property
+ def language(self):
+ return "Chinese"
diff --git a/third_party/python/chardet/chardet/hebrewprober.py b/third_party/python/chardet/chardet/hebrewprober.py
new file mode 100644
index 0000000000..b0e1bf4926
--- /dev/null
+++ b/third_party/python/chardet/chardet/hebrewprober.py
@@ -0,0 +1,292 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Shy Shalom
+# Portions created by the Initial Developer are Copyright (C) 2005
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetprober import CharSetProber
+from .enums import ProbingState
+
+# This prober doesn't actually recognize a language or a charset.
+# It is a helper prober for the use of the Hebrew model probers
+
+### General ideas of the Hebrew charset recognition ###
+#
+# Four main charsets exist in Hebrew:
+# "ISO-8859-8" - Visual Hebrew
+# "windows-1255" - Logical Hebrew
+# "ISO-8859-8-I" - Logical Hebrew
+# "x-mac-hebrew" - ?? Logical Hebrew ??
+#
+# Both "ISO" charsets use a completely identical set of code points, whereas
+# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
+# these code points. windows-1255 defines additional characters in the range
+# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
+# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
+# x-mac-hebrew defines similar additional code points but with a different
+# mapping.
+#
+# As far as an average Hebrew text with no diacritics is concerned, all four
+# charsets are identical with respect to code points. Meaning that for the
+# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
+# (including final letters).
+#
+# The dominant difference between these charsets is their directionality.
+# "Visual" directionality means that the text is ordered as if the renderer is
+# not aware of a BIDI rendering algorithm. The renderer sees the text and
+# draws it from left to right. The text itself when ordered naturally is read
+# backwards. A buffer of Visual Hebrew generally looks like so:
+# "[last word of first line spelled backwards] [whole line ordered backwards
+# and spelled backwards] [first word of first line spelled backwards]
+# [end of line] [last word of second line] ... etc' "
+# adding punctuation marks, numbers and English text to visual text is
+# naturally also "visual" and from left to right.
+#
+# "Logical" directionality means the text is ordered "naturally" according to
+# the order it is read. It is the responsibility of the renderer to display
+# the text from right to left. A BIDI algorithm is used to place general
+# punctuation marks, numbers and English text in the text.
+#
+# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
+# what little evidence I could find, it seems that its general directionality
+# is Logical.
+#
+# To sum up all of the above, the Hebrew probing mechanism knows about two
+# charsets:
+# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
+# backwards while line order is natural. For charset recognition purposes
+# the line order is unimportant (In fact, for this implementation, even
+# word order is unimportant).
+# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
+#
+# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
+# specifically identified.
+# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
+# that contain special punctuation marks or diacritics is displayed with
+# some unconverted characters showing as question marks. This problem might
+# be corrected using another model prober for x-mac-hebrew. Due to the fact
+# that x-mac-hebrew texts are so rare, writing another model prober isn't
+# worth the effort and performance hit.
+#
+#### The Prober ####
+#
+# The prober is divided between two SBCharSetProbers and a HebrewProber,
+# all of which are managed, created, fed data, inquired and deleted by the
+# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
+# fact some kind of Hebrew, Logical or Visual. The final decision about which
+# one is it is made by the HebrewProber by combining final-letter scores
+# with the scores of the two SBCharSetProbers to produce a final answer.
+#
+# The SBCSGroupProber is responsible for stripping the original text of HTML
+# tags, English characters, numbers, low-ASCII punctuation characters, spaces
+# and new lines. It reduces any sequence of such characters to a single space.
+# The buffer fed to each prober in the SBCS group prober is pure text in
+# high-ASCII.
+# The two SBCharSetProbers (model probers) share the same language model:
+# Win1255Model.
+# The first SBCharSetProber uses the model normally as any other
+# SBCharSetProber does, to recognize windows-1255, upon which this model was
+# built. The second SBCharSetProber is told to make the pair-of-letter
+# lookup in the language model backwards. This in practice exactly simulates
+# a visual Hebrew model using the windows-1255 logical Hebrew model.
+#
+# The HebrewProber is not using any language model. All it does is look for
+# final-letter evidence suggesting the text is either logical Hebrew or visual
+# Hebrew. Disjointed from the model probers, the results of the HebrewProber
+# alone are meaningless. HebrewProber always returns 0.00 as confidence
+# since it never identifies a charset by itself. Instead, the pointer to the
+# HebrewProber is passed to the model probers as a helper "Name Prober".
+# When the Group prober receives a positive identification from any prober,
+# it asks for the name of the charset identified. If the prober queried is a
+# Hebrew model prober, the model prober forwards the call to the
+# HebrewProber to make the final decision. In the HebrewProber, the
+# decision is made according to the final-letters scores maintained and Both
+# model probers scores. The answer is returned in the form of the name of the
+# charset identified, either "windows-1255" or "ISO-8859-8".
+
+class HebrewProber(CharSetProber):
+ # windows-1255 / ISO-8859-8 code points of interest
+ FINAL_KAF = 0xea
+ NORMAL_KAF = 0xeb
+ FINAL_MEM = 0xed
+ NORMAL_MEM = 0xee
+ FINAL_NUN = 0xef
+ NORMAL_NUN = 0xf0
+ FINAL_PE = 0xf3
+ NORMAL_PE = 0xf4
+ FINAL_TSADI = 0xf5
+ NORMAL_TSADI = 0xf6
+
+ # Minimum Visual vs Logical final letter score difference.
+ # If the difference is below this, don't rely solely on the final letter score
+ # distance.
+ MIN_FINAL_CHAR_DISTANCE = 5
+
+ # Minimum Visual vs Logical model score difference.
+ # If the difference is below this, don't rely at all on the model score
+ # distance.
+ MIN_MODEL_DISTANCE = 0.01
+
+ VISUAL_HEBREW_NAME = "ISO-8859-8"
+ LOGICAL_HEBREW_NAME = "windows-1255"
+
+ def __init__(self):
+ super(HebrewProber, self).__init__()
+ self._final_char_logical_score = None
+ self._final_char_visual_score = None
+ self._prev = None
+ self._before_prev = None
+ self._logical_prober = None
+ self._visual_prober = None
+ self.reset()
+
+ def reset(self):
+ self._final_char_logical_score = 0
+ self._final_char_visual_score = 0
+ # The two last characters seen in the previous buffer,
+ # mPrev and mBeforePrev are initialized to space in order to simulate
+ # a word delimiter at the beginning of the data
+ self._prev = ' '
+ self._before_prev = ' '
+ # These probers are owned by the group prober.
+
+ def set_model_probers(self, logicalProber, visualProber):
+ self._logical_prober = logicalProber
+ self._visual_prober = visualProber
+
+ def is_final(self, c):
+ return c in [self.FINAL_KAF, self.FINAL_MEM, self.FINAL_NUN,
+ self.FINAL_PE, self.FINAL_TSADI]
+
+ def is_non_final(self, c):
+ # The normal Tsadi is not a good Non-Final letter due to words like
+ # 'lechotet' (to chat) containing an apostrophe after the tsadi. This
+ # apostrophe is converted to a space in FilterWithoutEnglishLetters
+ # causing the Non-Final tsadi to appear at an end of a word even
+ # though this is not the case in the original text.
+ # The letters Pe and Kaf rarely display a related behavior of not being
+ # a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
+ # for example legally end with a Non-Final Pe or Kaf. However, the
+ # benefit of these letters as Non-Final letters outweighs the damage
+ # since these words are quite rare.
+ return c in [self.NORMAL_KAF, self.NORMAL_MEM,
+ self.NORMAL_NUN, self.NORMAL_PE]
+
+ def feed(self, byte_str):
+ # Final letter analysis for logical-visual decision.
+ # Look for evidence that the received buffer is either logical Hebrew
+ # or visual Hebrew.
+ # The following cases are checked:
+ # 1) A word longer than 1 letter, ending with a final letter. This is
+ # an indication that the text is laid out "naturally" since the
+ # final letter really appears at the end. +1 for logical score.
+ # 2) A word longer than 1 letter, ending with a Non-Final letter. In
+ # normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
+ # should not end with the Non-Final form of that letter. Exceptions
+ # to this rule are mentioned above in isNonFinal(). This is an
+ # indication that the text is laid out backwards. +1 for visual
+ # score
+ # 3) A word longer than 1 letter, starting with a final letter. Final
+ # letters should not appear at the beginning of a word. This is an
+ # indication that the text is laid out backwards. +1 for visual
+ # score.
+ #
+ # The visual score and logical score are accumulated throughout the
+ # text and are finally checked against each other in GetCharSetName().
+ # No checking for final letters in the middle of words is done since
+ # that case is not an indication for either Logical or Visual text.
+ #
+ # We automatically filter out all 7-bit characters (replace them with
+ # spaces) so the word boundary detection works properly. [MAP]
+
+ if self.state == ProbingState.NOT_ME:
+ # Both model probers say it's not them. No reason to continue.
+ return ProbingState.NOT_ME
+
+ byte_str = self.filter_high_byte_only(byte_str)
+
+ for cur in byte_str:
+ if cur == ' ':
+ # We stand on a space - a word just ended
+ if self._before_prev != ' ':
+ # next-to-last char was not a space so self._prev is not a
+ # 1 letter word
+ if self.is_final(self._prev):
+ # case (1) [-2:not space][-1:final letter][cur:space]
+ self._final_char_logical_score += 1
+ elif self.is_non_final(self._prev):
+ # case (2) [-2:not space][-1:Non-Final letter][
+ # cur:space]
+ self._final_char_visual_score += 1
+ else:
+ # Not standing on a space
+ if ((self._before_prev == ' ') and
+ (self.is_final(self._prev)) and (cur != ' ')):
+ # case (3) [-2:space][-1:final letter][cur:not space]
+ self._final_char_visual_score += 1
+ self._before_prev = self._prev
+ self._prev = cur
+
+ # Forever detecting, till the end or until both model probers return
+ # ProbingState.NOT_ME (handled above)
+ return ProbingState.DETECTING
+
+ @property
+ def charset_name(self):
+ # Make the decision: is it Logical or Visual?
+ # If the final letter score distance is dominant enough, rely on it.
+ finalsub = self._final_char_logical_score - self._final_char_visual_score
+ if finalsub >= self.MIN_FINAL_CHAR_DISTANCE:
+ return self.LOGICAL_HEBREW_NAME
+ if finalsub <= -self.MIN_FINAL_CHAR_DISTANCE:
+ return self.VISUAL_HEBREW_NAME
+
+ # It's not dominant enough, try to rely on the model scores instead.
+ modelsub = (self._logical_prober.get_confidence()
+ - self._visual_prober.get_confidence())
+ if modelsub > self.MIN_MODEL_DISTANCE:
+ return self.LOGICAL_HEBREW_NAME
+ if modelsub < -self.MIN_MODEL_DISTANCE:
+ return self.VISUAL_HEBREW_NAME
+
+ # Still no good, back to final letter distance, maybe it'll save the
+ # day.
+ if finalsub < 0.0:
+ return self.VISUAL_HEBREW_NAME
+
+ # (finalsub > 0 - Logical) or (don't know what to do) default to
+ # Logical.
+ return self.LOGICAL_HEBREW_NAME
+
+ @property
+ def language(self):
+ return 'Hebrew'
+
+ @property
+ def state(self):
+ # Remain active as long as any of the model probers are active.
+ if (self._logical_prober.state == ProbingState.NOT_ME) and \
+ (self._visual_prober.state == ProbingState.NOT_ME):
+ return ProbingState.NOT_ME
+ return ProbingState.DETECTING
diff --git a/third_party/python/chardet/chardet/jisfreq.py b/third_party/python/chardet/chardet/jisfreq.py
new file mode 100644
index 0000000000..83fc082b54
--- /dev/null
+++ b/third_party/python/chardet/chardet/jisfreq.py
@@ -0,0 +1,325 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# Sampling from about 20M text materials include literature and computer technology
+#
+# Japanese frequency table, applied to both S-JIS and EUC-JP
+# They are sorted in order.
+
+# 128 --> 0.77094
+# 256 --> 0.85710
+# 512 --> 0.92635
+# 1024 --> 0.97130
+# 2048 --> 0.99431
+#
+# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
+# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
+#
+# Typical Distribution Ratio, 25% of IDR
+
+JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
+
+# Char to FreqOrder table ,
+JIS_TABLE_SIZE = 4368
+
+JIS_CHAR_TO_FREQ_ORDER = (
+ 40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
+3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
+1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
+2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
+2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
+5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
+1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
+5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
+5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
+5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
+5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
+5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
+5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
+1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
+1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
+1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
+2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
+3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
+3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
+ 4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
+ 12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
+1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
+ 109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
+5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
+ 271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
+ 32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
+ 43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
+ 280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
+ 54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
+5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
+5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
+5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
+4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
+5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
+5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
+5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
+5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
+5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
+5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
+5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
+5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
+5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
+3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
+5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
+5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
+5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
+5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
+5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
+5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
+5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
+5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
+5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
+5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
+5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
+5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
+5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
+5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
+5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
+5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
+5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
+5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
+5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
+5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
+5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
+5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
+5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
+5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
+5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
+5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
+5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
+5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
+5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
+5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
+5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
+5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
+5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
+5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
+5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
+5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
+5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
+5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
+6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
+6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
+6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
+6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
+6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
+6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
+6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
+6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
+4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
+ 854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
+ 665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
+1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
+1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
+ 896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
+3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
+3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
+ 804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
+3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
+3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
+ 586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
+2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
+ 277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
+3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
+1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
+ 380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
+1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
+ 850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
+2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
+2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
+2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
+2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
+1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
+1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
+1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
+1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
+2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
+1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
+2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
+1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
+1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
+1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
+1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
+1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
+1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
+ 606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
+ 684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
+1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
+2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
+2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
+2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
+3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
+3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
+ 884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
+3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
+1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
+ 861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
+2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
+1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
+ 576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
+3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
+4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
+2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
+1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
+2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
+1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
+ 385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
+ 178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
+1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
+2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
+2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
+2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
+3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
+1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
+2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
+ 359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
+ 837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
+ 855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
+1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
+2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
+ 633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
+1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
+1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
+ 353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
+1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
+1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
+1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
+ 764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
+2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
+ 278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
+2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
+3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
+2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
+1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
+6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
+1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
+2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
+1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
+ 470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
+ 72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
+3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
+3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
+1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
+1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
+1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
+1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
+ 123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
+ 913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
+2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
+ 900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
+3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
+2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
+ 423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
+1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
+2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
+ 220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
+1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
+ 745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
+4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
+2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
+1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
+ 666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
+1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
+2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
+ 376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
+6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
+1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
+1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
+2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
+3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
+ 914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
+3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
+1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
+ 674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
+1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
+ 199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
+3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
+ 370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
+2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
+ 414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
+4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
+2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
+1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
+1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
+1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
+ 166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
+1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
+3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
+1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
+3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
+ 264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
+ 543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
+ 983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
+2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
+1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
+ 867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
+1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
+ 894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
+1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
+ 530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
+ 839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
+ 480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
+1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
+1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
+2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
+4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
+ 227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
+1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
+ 328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
+1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
+3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
+1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
+2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
+2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
+1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
+1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
+2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
+ 455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
+2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
+1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
+1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
+1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
+1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
+3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
+2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
+2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
+ 575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
+3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
+3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
+1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
+2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
+1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
+2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
+)
+
+
diff --git a/third_party/python/chardet/chardet/jpcntx.py b/third_party/python/chardet/chardet/jpcntx.py
new file mode 100644
index 0000000000..20044e4bc8
--- /dev/null
+++ b/third_party/python/chardet/chardet/jpcntx.py
@@ -0,0 +1,233 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+
+# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
+jp2CharContext = (
+(0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1),
+(2,4,0,4,0,3,0,4,0,3,4,4,4,2,4,3,3,4,3,2,3,3,4,2,3,3,3,2,4,1,4,3,3,1,5,4,3,4,3,4,3,5,3,0,3,5,4,2,0,3,1,0,3,3,0,3,3,0,1,1,0,4,3,0,3,3,0,4,0,2,0,3,5,5,5,5,4,0,4,1,0,3,4),
+(0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2),
+(0,4,0,5,0,5,0,4,0,4,5,4,4,3,5,3,5,1,5,3,4,3,4,4,3,4,3,3,4,3,5,4,4,3,5,5,3,5,5,5,3,5,5,3,4,5,5,3,1,3,2,0,3,4,0,4,2,0,4,2,1,5,3,2,3,5,0,4,0,2,0,5,4,4,5,4,5,0,4,0,0,4,4),
+(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
+(0,3,0,4,0,3,0,3,0,4,5,4,3,3,3,3,4,3,5,4,4,3,5,4,4,3,4,3,4,4,4,4,5,3,4,4,3,4,5,5,4,5,5,1,4,5,4,3,0,3,3,1,3,3,0,4,4,0,3,3,1,5,3,3,3,5,0,4,0,3,0,4,4,3,4,3,3,0,4,1,1,3,4),
+(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
+(0,4,0,3,0,3,0,4,0,3,4,4,3,2,2,1,2,1,3,1,3,3,3,3,3,4,3,1,3,3,5,3,3,0,4,3,0,5,4,3,3,5,4,4,3,4,4,5,0,1,2,0,1,2,0,2,2,0,1,0,0,5,2,2,1,4,0,3,0,1,0,4,4,3,5,4,3,0,2,1,0,4,3),
+(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
+(0,3,0,5,0,4,0,2,1,4,4,2,4,1,4,2,4,2,4,3,3,3,4,3,3,3,3,1,4,2,3,3,3,1,4,4,1,1,1,4,3,3,2,0,2,4,3,2,0,3,3,0,3,1,1,0,0,0,3,3,0,4,2,2,3,4,0,4,0,3,0,4,4,5,3,4,4,0,3,0,0,1,4),
+(1,4,0,4,0,4,0,4,0,3,5,4,4,3,4,3,5,4,3,3,4,3,5,4,4,4,4,3,4,2,4,3,3,1,5,4,3,2,4,5,4,5,5,4,4,5,4,4,0,3,2,2,3,3,0,4,3,1,3,2,1,4,3,3,4,5,0,3,0,2,0,4,5,5,4,5,4,0,4,0,0,5,4),
+(0,5,0,5,0,4,0,3,0,4,4,3,4,3,3,3,4,0,4,4,4,3,4,3,4,3,3,1,4,2,4,3,4,0,5,4,1,4,5,4,4,5,3,2,4,3,4,3,2,4,1,3,3,3,2,3,2,0,4,3,3,4,3,3,3,4,0,4,0,3,0,4,5,4,4,4,3,0,4,1,0,1,3),
+(0,3,1,4,0,3,0,2,0,3,4,4,3,1,4,2,3,3,4,3,4,3,4,3,4,4,3,2,3,1,5,4,4,1,4,4,3,5,4,4,3,5,5,4,3,4,4,3,1,2,3,1,2,2,0,3,2,0,3,1,0,5,3,3,3,4,3,3,3,3,4,4,4,4,5,4,2,0,3,3,2,4,3),
+(0,2,0,3,0,1,0,1,0,0,3,2,0,0,2,0,1,0,2,1,3,3,3,1,2,3,1,0,1,0,4,2,1,1,3,3,0,4,3,3,1,4,3,3,0,3,3,2,0,0,0,0,1,0,0,2,0,0,0,0,0,4,1,0,2,3,2,2,2,1,3,3,3,4,4,3,2,0,3,1,0,3,3),
+(0,4,0,4,0,3,0,3,0,4,4,4,3,3,3,3,3,3,4,3,4,2,4,3,4,3,3,2,4,3,4,5,4,1,4,5,3,5,4,5,3,5,4,0,3,5,5,3,1,3,3,2,2,3,0,3,4,1,3,3,2,4,3,3,3,4,0,4,0,3,0,4,5,4,4,5,3,0,4,1,0,3,4),
+(0,2,0,3,0,3,0,0,0,2,2,2,1,0,1,0,0,0,3,0,3,0,3,0,1,3,1,0,3,1,3,3,3,1,3,3,3,0,1,3,1,3,4,0,0,3,1,1,0,3,2,0,0,0,0,1,3,0,1,0,0,3,3,2,0,3,0,0,0,0,0,3,4,3,4,3,3,0,3,0,0,2,3),
+(2,3,0,3,0,2,0,1,0,3,3,4,3,1,3,1,1,1,3,1,4,3,4,3,3,3,0,0,3,1,5,4,3,1,4,3,2,5,5,4,4,4,4,3,3,4,4,4,0,2,1,1,3,2,0,1,2,0,0,1,0,4,1,3,3,3,0,3,0,1,0,4,4,4,5,5,3,0,2,0,0,4,4),
+(0,2,0,1,0,3,1,3,0,2,3,3,3,0,3,1,0,0,3,0,3,2,3,1,3,2,1,1,0,0,4,2,1,0,2,3,1,4,3,2,0,4,4,3,1,3,1,3,0,1,0,0,1,0,0,0,1,0,0,0,0,4,1,1,1,2,0,3,0,0,0,3,4,2,4,3,2,0,1,0,0,3,3),
+(0,1,0,4,0,5,0,4,0,2,4,4,2,3,3,2,3,3,5,3,3,3,4,3,4,2,3,0,4,3,3,3,4,1,4,3,2,1,5,5,3,4,5,1,3,5,4,2,0,3,3,0,1,3,0,4,2,0,1,3,1,4,3,3,3,3,0,3,0,1,0,3,4,4,4,5,5,0,3,0,1,4,5),
+(0,2,0,3,0,3,0,0,0,2,3,1,3,0,4,0,1,1,3,0,3,4,3,2,3,1,0,3,3,2,3,1,3,0,2,3,0,2,1,4,1,2,2,0,0,3,3,0,0,2,0,0,0,1,0,0,0,0,2,2,0,3,2,1,3,3,0,2,0,2,0,0,3,3,1,2,4,0,3,0,2,2,3),
+(2,4,0,5,0,4,0,4,0,2,4,4,4,3,4,3,3,3,1,2,4,3,4,3,4,4,5,0,3,3,3,3,2,0,4,3,1,4,3,4,1,4,4,3,3,4,4,3,1,2,3,0,4,2,0,4,1,0,3,3,0,4,3,3,3,4,0,4,0,2,0,3,5,3,4,5,2,0,3,0,0,4,5),
+(0,3,0,4,0,1,0,1,0,1,3,2,2,1,3,0,3,0,2,0,2,0,3,0,2,0,0,0,1,0,1,1,0,0,3,1,0,0,0,4,0,3,1,0,2,1,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,4,2,2,3,1,0,3,0,0,0,1,4,4,4,3,0,0,4,0,0,1,4),
+(1,4,1,5,0,3,0,3,0,4,5,4,4,3,5,3,3,4,4,3,4,1,3,3,3,3,2,1,4,1,5,4,3,1,4,4,3,5,4,4,3,5,4,3,3,4,4,4,0,3,3,1,2,3,0,3,1,0,3,3,0,5,4,4,4,4,4,4,3,3,5,4,4,3,3,5,4,0,3,2,0,4,4),
+(0,2,0,3,0,1,0,0,0,1,3,3,3,2,4,1,3,0,3,1,3,0,2,2,1,1,0,0,2,0,4,3,1,0,4,3,0,4,4,4,1,4,3,1,1,3,3,1,0,2,0,0,1,3,0,0,0,0,2,0,0,4,3,2,4,3,5,4,3,3,3,4,3,3,4,3,3,0,2,1,0,3,3),
+(0,2,0,4,0,3,0,2,0,2,5,5,3,4,4,4,4,1,4,3,3,0,4,3,4,3,1,3,3,2,4,3,0,3,4,3,0,3,4,4,2,4,4,0,4,5,3,3,2,2,1,1,1,2,0,1,5,0,3,3,2,4,3,3,3,4,0,3,0,2,0,4,4,3,5,5,0,0,3,0,2,3,3),
+(0,3,0,4,0,3,0,1,0,3,4,3,3,1,3,3,3,0,3,1,3,0,4,3,3,1,1,0,3,0,3,3,0,0,4,4,0,1,5,4,3,3,5,0,3,3,4,3,0,2,0,1,1,1,0,1,3,0,1,2,1,3,3,2,3,3,0,3,0,1,0,1,3,3,4,4,1,0,1,2,2,1,3),
+(0,1,0,4,0,4,0,3,0,1,3,3,3,2,3,1,1,0,3,0,3,3,4,3,2,4,2,0,1,0,4,3,2,0,4,3,0,5,3,3,2,4,4,4,3,3,3,4,0,1,3,0,0,1,0,0,1,0,0,0,0,4,2,3,3,3,0,3,0,0,0,4,4,4,5,3,2,0,3,3,0,3,5),
+(0,2,0,3,0,0,0,3,0,1,3,0,2,0,0,0,1,0,3,1,1,3,3,0,0,3,0,0,3,0,2,3,1,0,3,1,0,3,3,2,0,4,2,2,0,2,0,0,0,4,0,0,0,0,0,0,0,0,0,0,0,2,1,2,0,1,0,1,0,0,0,1,3,1,2,0,0,0,1,0,0,1,4),
+(0,3,0,3,0,5,0,1,0,2,4,3,1,3,3,2,1,1,5,2,1,0,5,1,2,0,0,0,3,3,2,2,3,2,4,3,0,0,3,3,1,3,3,0,2,5,3,4,0,3,3,0,1,2,0,2,2,0,3,2,0,2,2,3,3,3,0,2,0,1,0,3,4,4,2,5,4,0,3,0,0,3,5),
+(0,3,0,3,0,3,0,1,0,3,3,3,3,0,3,0,2,0,2,1,1,0,2,0,1,0,0,0,2,1,0,0,1,0,3,2,0,0,3,3,1,2,3,1,0,3,3,0,0,1,0,0,0,0,0,2,0,0,0,0,0,2,3,1,2,3,0,3,0,1,0,3,2,1,0,4,3,0,1,1,0,3,3),
+(0,4,0,5,0,3,0,3,0,4,5,5,4,3,5,3,4,3,5,3,3,2,5,3,4,4,4,3,4,3,4,5,5,3,4,4,3,4,4,5,4,4,4,3,4,5,5,4,2,3,4,2,3,4,0,3,3,1,4,3,2,4,3,3,5,5,0,3,0,3,0,5,5,5,5,4,4,0,4,0,1,4,4),
+(0,4,0,4,0,3,0,3,0,3,5,4,4,2,3,2,5,1,3,2,5,1,4,2,3,2,3,3,4,3,3,3,3,2,5,4,1,3,3,5,3,4,4,0,4,4,3,1,1,3,1,0,2,3,0,2,3,0,3,0,0,4,3,1,3,4,0,3,0,2,0,4,4,4,3,4,5,0,4,0,0,3,4),
+(0,3,0,3,0,3,1,2,0,3,4,4,3,3,3,0,2,2,4,3,3,1,3,3,3,1,1,0,3,1,4,3,2,3,4,4,2,4,4,4,3,4,4,3,2,4,4,3,1,3,3,1,3,3,0,4,1,0,2,2,1,4,3,2,3,3,5,4,3,3,5,4,4,3,3,0,4,0,3,2,2,4,4),
+(0,2,0,1,0,0,0,0,0,1,2,1,3,0,0,0,0,0,2,0,1,2,1,0,0,1,0,0,0,0,3,0,0,1,0,1,1,3,1,0,0,0,1,1,0,1,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,1,2,2,0,3,4,0,0,0,1,1,0,0,1,0,0,0,0,0,1,1),
+(0,1,0,0,0,1,0,0,0,0,4,0,4,1,4,0,3,0,4,0,3,0,4,0,3,0,3,0,4,1,5,1,4,0,0,3,0,5,0,5,2,0,1,0,0,0,2,1,4,0,1,3,0,0,3,0,0,3,1,1,4,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0),
+(1,4,0,5,0,3,0,2,0,3,5,4,4,3,4,3,5,3,4,3,3,0,4,3,3,3,3,3,3,2,4,4,3,1,3,4,4,5,4,4,3,4,4,1,3,5,4,3,3,3,1,2,2,3,3,1,3,1,3,3,3,5,3,3,4,5,0,3,0,3,0,3,4,3,4,4,3,0,3,0,2,4,3),
+(0,1,0,4,0,0,0,0,0,1,4,0,4,1,4,2,4,0,3,0,1,0,1,0,0,0,0,0,2,0,3,1,1,1,0,3,0,0,0,1,2,1,0,0,1,1,1,1,0,1,0,0,0,1,0,0,3,0,0,0,0,3,2,0,2,2,0,1,0,0,0,2,3,2,3,3,0,0,0,0,2,1,0),
+(0,5,1,5,0,3,0,3,0,5,4,4,5,1,5,3,3,0,4,3,4,3,5,3,4,3,3,2,4,3,4,3,3,0,3,3,1,4,4,3,4,4,4,3,4,5,5,3,2,3,1,1,3,3,1,3,1,1,3,3,2,4,5,3,3,5,0,4,0,3,0,4,4,3,5,3,3,0,3,4,0,4,3),
+(0,5,0,5,0,3,0,2,0,4,4,3,5,2,4,3,3,3,4,4,4,3,5,3,5,3,3,1,4,0,4,3,3,0,3,3,0,4,4,4,4,5,4,3,3,5,5,3,2,3,1,2,3,2,0,1,0,0,3,2,2,4,4,3,1,5,0,4,0,3,0,4,3,1,3,2,1,0,3,3,0,3,3),
+(0,4,0,5,0,5,0,4,0,4,5,5,5,3,4,3,3,2,5,4,4,3,5,3,5,3,4,0,4,3,4,4,3,2,4,4,3,4,5,4,4,5,5,0,3,5,5,4,1,3,3,2,3,3,1,3,1,0,4,3,1,4,4,3,4,5,0,4,0,2,0,4,3,4,4,3,3,0,4,0,0,5,5),
+(0,4,0,4,0,5,0,1,1,3,3,4,4,3,4,1,3,0,5,1,3,0,3,1,3,1,1,0,3,0,3,3,4,0,4,3,0,4,4,4,3,4,4,0,3,5,4,1,0,3,0,0,2,3,0,3,1,0,3,1,0,3,2,1,3,5,0,3,0,1,0,3,2,3,3,4,4,0,2,2,0,4,4),
+(2,4,0,5,0,4,0,3,0,4,5,5,4,3,5,3,5,3,5,3,5,2,5,3,4,3,3,4,3,4,5,3,2,1,5,4,3,2,3,4,5,3,4,1,2,5,4,3,0,3,3,0,3,2,0,2,3,0,4,1,0,3,4,3,3,5,0,3,0,1,0,4,5,5,5,4,3,0,4,2,0,3,5),
+(0,5,0,4,0,4,0,2,0,5,4,3,4,3,4,3,3,3,4,3,4,2,5,3,5,3,4,1,4,3,4,4,4,0,3,5,0,4,4,4,4,5,3,1,3,4,5,3,3,3,3,3,3,3,0,2,2,0,3,3,2,4,3,3,3,5,3,4,1,3,3,5,3,2,0,0,0,0,4,3,1,3,3),
+(0,1,0,3,0,3,0,1,0,1,3,3,3,2,3,3,3,0,3,0,0,0,3,1,3,0,0,0,2,2,2,3,0,0,3,2,0,1,2,4,1,3,3,0,0,3,3,3,0,1,0,0,2,1,0,0,3,0,3,1,0,3,0,0,1,3,0,2,0,1,0,3,3,1,3,3,0,0,1,1,0,3,3),
+(0,2,0,3,0,2,1,4,0,2,2,3,1,1,3,1,1,0,2,0,3,1,2,3,1,3,0,0,1,0,4,3,2,3,3,3,1,4,2,3,3,3,3,1,0,3,1,4,0,1,1,0,1,2,0,1,1,0,1,1,0,3,1,3,2,2,0,1,0,0,0,2,3,3,3,1,0,0,0,0,0,2,3),
+(0,5,0,4,0,5,0,2,0,4,5,5,3,3,4,3,3,1,5,4,4,2,4,4,4,3,4,2,4,3,5,5,4,3,3,4,3,3,5,5,4,5,5,1,3,4,5,3,1,4,3,1,3,3,0,3,3,1,4,3,1,4,5,3,3,5,0,4,0,3,0,5,3,3,1,4,3,0,4,0,1,5,3),
+(0,5,0,5,0,4,0,2,0,4,4,3,4,3,3,3,3,3,5,4,4,4,4,4,4,5,3,3,5,2,4,4,4,3,4,4,3,3,4,4,5,5,3,3,4,3,4,3,3,4,3,3,3,3,1,2,2,1,4,3,3,5,4,4,3,4,0,4,0,3,0,4,4,4,4,4,1,0,4,2,0,2,4),
+(0,4,0,4,0,3,0,1,0,3,5,2,3,0,3,0,2,1,4,2,3,3,4,1,4,3,3,2,4,1,3,3,3,0,3,3,0,0,3,3,3,5,3,3,3,3,3,2,0,2,0,0,2,0,0,2,0,0,1,0,0,3,1,2,2,3,0,3,0,2,0,4,4,3,3,4,1,0,3,0,0,2,4),
+(0,0,0,4,0,0,0,0,0,0,1,0,1,0,2,0,0,0,0,0,1,0,2,0,1,0,0,0,0,0,3,1,3,0,3,2,0,0,0,1,0,3,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,4,0,2,0,0,0,0,0,0,2),
+(0,2,1,3,0,2,0,2,0,3,3,3,3,1,3,1,3,3,3,3,3,3,4,2,2,1,2,1,4,0,4,3,1,3,3,3,2,4,3,5,4,3,3,3,3,3,3,3,0,1,3,0,2,0,0,1,0,0,1,0,0,4,2,0,2,3,0,3,3,0,3,3,4,2,3,1,4,0,1,2,0,2,3),
+(0,3,0,3,0,1,0,3,0,2,3,3,3,0,3,1,2,0,3,3,2,3,3,2,3,2,3,1,3,0,4,3,2,0,3,3,1,4,3,3,2,3,4,3,1,3,3,1,1,0,1,1,0,1,0,1,0,1,0,0,0,4,1,1,0,3,0,3,1,0,2,3,3,3,3,3,1,0,0,2,0,3,3),
+(0,0,0,0,0,0,0,0,0,0,3,0,2,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,3,0,3,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,2,0,2,3,0,0,0,0,0,0,0,0,3),
+(0,2,0,3,1,3,0,3,0,2,3,3,3,1,3,1,3,1,3,1,3,3,3,1,3,0,2,3,1,1,4,3,3,2,3,3,1,2,2,4,1,3,3,0,1,4,2,3,0,1,3,0,3,0,0,1,3,0,2,0,0,3,3,2,1,3,0,3,0,2,0,3,4,4,4,3,1,0,3,0,0,3,3),
+(0,2,0,1,0,2,0,0,0,1,3,2,2,1,3,0,1,1,3,0,3,2,3,1,2,0,2,0,1,1,3,3,3,0,3,3,1,1,2,3,2,3,3,1,2,3,2,0,0,1,0,0,0,0,0,0,3,0,1,0,0,2,1,2,1,3,0,3,0,0,0,3,4,4,4,3,2,0,2,0,0,2,4),
+(0,0,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,3,1,0,0,0,0,0,0,0,3),
+(0,3,0,3,0,2,0,3,0,3,3,3,2,3,2,2,2,0,3,1,3,3,3,2,3,3,0,0,3,0,3,2,2,0,2,3,1,4,3,4,3,3,2,3,1,5,4,4,0,3,1,2,1,3,0,3,1,1,2,0,2,3,1,3,1,3,0,3,0,1,0,3,3,4,4,2,1,0,2,1,0,2,4),
+(0,1,0,3,0,1,0,2,0,1,4,2,5,1,4,0,2,0,2,1,3,1,4,0,2,1,0,0,2,1,4,1,1,0,3,3,0,5,1,3,2,3,3,1,0,3,2,3,0,1,0,0,0,0,0,0,1,0,0,0,0,4,0,1,0,3,0,2,0,1,0,3,3,3,4,3,3,0,0,0,0,2,3),
+(0,0,0,1,0,0,0,0,0,0,2,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,1,0,0,1,0,0,0,0,0,3),
+(0,1,0,3,0,4,0,3,0,2,4,3,1,0,3,2,2,1,3,1,2,2,3,1,1,1,2,1,3,0,1,2,0,1,3,2,1,3,0,5,5,1,0,0,1,3,2,1,0,3,0,0,1,0,0,0,0,0,3,4,0,1,1,1,3,2,0,2,0,1,0,2,3,3,1,2,3,0,1,0,1,0,4),
+(0,0,0,1,0,3,0,3,0,2,2,1,0,0,4,0,3,0,3,1,3,0,3,0,3,0,1,0,3,0,3,1,3,0,3,3,0,0,1,2,1,1,1,0,1,2,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,2,2,1,2,0,0,2,0,0,0,0,2,3,3,3,3,0,0,0,0,1,4),
+(0,0,0,3,0,3,0,0,0,0,3,1,1,0,3,0,1,0,2,0,1,0,0,0,0,0,0,0,1,0,3,0,2,0,2,3,0,0,2,2,3,1,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,2,3),
+(2,4,0,5,0,5,0,4,0,3,4,3,3,3,4,3,3,3,4,3,4,4,5,4,5,5,5,2,3,0,5,5,4,1,5,4,3,1,5,4,3,4,4,3,3,4,3,3,0,3,2,0,2,3,0,3,0,0,3,3,0,5,3,2,3,3,0,3,0,3,0,3,4,5,4,5,3,0,4,3,0,3,4),
+(0,3,0,3,0,3,0,3,0,3,3,4,3,2,3,2,3,0,4,3,3,3,3,3,3,3,3,0,3,2,4,3,3,1,3,4,3,4,4,4,3,4,4,3,2,4,4,1,0,2,0,0,1,1,0,2,0,0,3,1,0,5,3,2,1,3,0,3,0,1,2,4,3,2,4,3,3,0,3,2,0,4,4),
+(0,3,0,3,0,1,0,0,0,1,4,3,3,2,3,1,3,1,4,2,3,2,4,2,3,4,3,0,2,2,3,3,3,0,3,3,3,0,3,4,1,3,3,0,3,4,3,3,0,1,1,0,1,0,0,0,4,0,3,0,0,3,1,2,1,3,0,4,0,1,0,4,3,3,4,3,3,0,2,0,0,3,3),
+(0,3,0,4,0,1,0,3,0,3,4,3,3,0,3,3,3,1,3,1,3,3,4,3,3,3,0,0,3,1,5,3,3,1,3,3,2,5,4,3,3,4,5,3,2,5,3,4,0,1,0,0,0,0,0,2,0,0,1,1,0,4,2,2,1,3,0,3,0,2,0,4,4,3,5,3,2,0,1,1,0,3,4),
+(0,5,0,4,0,5,0,2,0,4,4,3,3,2,3,3,3,1,4,3,4,1,5,3,4,3,4,0,4,2,4,3,4,1,5,4,0,4,4,4,4,5,4,1,3,5,4,2,1,4,1,1,3,2,0,3,1,0,3,2,1,4,3,3,3,4,0,4,0,3,0,4,4,4,3,3,3,0,4,2,0,3,4),
+(1,4,0,4,0,3,0,1,0,3,3,3,1,1,3,3,2,2,3,3,1,0,3,2,2,1,2,0,3,1,2,1,2,0,3,2,0,2,2,3,3,4,3,0,3,3,1,2,0,1,1,3,1,2,0,0,3,0,1,1,0,3,2,2,3,3,0,3,0,0,0,2,3,3,4,3,3,0,1,0,0,1,4),
+(0,4,0,4,0,4,0,0,0,3,4,4,3,1,4,2,3,2,3,3,3,1,4,3,4,0,3,0,4,2,3,3,2,2,5,4,2,1,3,4,3,4,3,1,3,3,4,2,0,2,1,0,3,3,0,0,2,0,3,1,0,4,4,3,4,3,0,4,0,1,0,2,4,4,4,4,4,0,3,2,0,3,3),
+(0,0,0,1,0,4,0,0,0,0,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,3,2,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,2),
+(0,2,0,3,0,4,0,4,0,1,3,3,3,0,4,0,2,1,2,1,1,1,2,0,3,1,1,0,1,0,3,1,0,0,3,3,2,0,1,1,0,0,0,0,0,1,0,2,0,2,2,0,3,1,0,0,1,0,1,1,0,1,2,0,3,0,0,0,0,1,0,0,3,3,4,3,1,0,1,0,3,0,2),
+(0,0,0,3,0,5,0,0,0,0,1,0,2,0,3,1,0,1,3,0,0,0,2,0,0,0,1,0,0,0,1,1,0,0,4,0,0,0,2,3,0,1,4,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,1,0,0,0,0,0,0,0,2,0,0,3,0,0,0,0,0,3),
+(0,2,0,5,0,5,0,1,0,2,4,3,3,2,5,1,3,2,3,3,3,0,4,1,2,0,3,0,4,0,2,2,1,1,5,3,0,0,1,4,2,3,2,0,3,3,3,2,0,2,4,1,1,2,0,1,1,0,3,1,0,1,3,1,2,3,0,2,0,0,0,1,3,5,4,4,4,0,3,0,0,1,3),
+(0,4,0,5,0,4,0,4,0,4,5,4,3,3,4,3,3,3,4,3,4,4,5,3,4,5,4,2,4,2,3,4,3,1,4,4,1,3,5,4,4,5,5,4,4,5,5,5,2,3,3,1,4,3,1,3,3,0,3,3,1,4,3,4,4,4,0,3,0,4,0,3,3,4,4,5,0,0,4,3,0,4,5),
+(0,4,0,4,0,3,0,3,0,3,4,4,4,3,3,2,4,3,4,3,4,3,5,3,4,3,2,1,4,2,4,4,3,1,3,4,2,4,5,5,3,4,5,4,1,5,4,3,0,3,2,2,3,2,1,3,1,0,3,3,3,5,3,3,3,5,4,4,2,3,3,4,3,3,3,2,1,0,3,2,1,4,3),
+(0,4,0,5,0,4,0,3,0,3,5,5,3,2,4,3,4,0,5,4,4,1,4,4,4,3,3,3,4,3,5,5,2,3,3,4,1,2,5,5,3,5,5,2,3,5,5,4,0,3,2,0,3,3,1,1,5,1,4,1,0,4,3,2,3,5,0,4,0,3,0,5,4,3,4,3,0,0,4,1,0,4,4),
+(1,3,0,4,0,2,0,2,0,2,5,5,3,3,3,3,3,0,4,2,3,4,4,4,3,4,0,0,3,4,5,4,3,3,3,3,2,5,5,4,5,5,5,4,3,5,5,5,1,3,1,0,1,0,0,3,2,0,4,2,0,5,2,3,2,4,1,3,0,3,0,4,5,4,5,4,3,0,4,2,0,5,4),
+(0,3,0,4,0,5,0,3,0,3,4,4,3,2,3,2,3,3,3,3,3,2,4,3,3,2,2,0,3,3,3,3,3,1,3,3,3,0,4,4,3,4,4,1,1,4,4,2,0,3,1,0,1,1,0,4,1,0,2,3,1,3,3,1,3,4,0,3,0,1,0,3,1,3,0,0,1,0,2,0,0,4,4),
+(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0),
+(0,3,0,3,0,2,0,3,0,1,5,4,3,3,3,1,4,2,1,2,3,4,4,2,4,4,5,0,3,1,4,3,4,0,4,3,3,3,2,3,2,5,3,4,3,2,2,3,0,0,3,0,2,1,0,1,2,0,0,0,0,2,1,1,3,1,0,2,0,4,0,3,4,4,4,5,2,0,2,0,0,1,3),
+(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,1,0,0,1,1,0,0,0,4,2,1,1,0,1,0,3,2,0,0,3,1,1,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,3,0,1,0,0,0,2,0,0,0,1,4,0,4,2,1,0,0,0,0,0,1),
+(0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,1,0,1,0,0,0,0,3,1,0,0,0,2,0,2,1,0,0,1,2,1,0,1,1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,1,3,1,0,0,0,0,0,1,0,0,2,1,0,0,0,0,0,0,0,0,2),
+(0,4,0,4,0,4,0,3,0,4,4,3,4,2,4,3,2,0,4,4,4,3,5,3,5,3,3,2,4,2,4,3,4,3,1,4,0,2,3,4,4,4,3,3,3,4,4,4,3,4,1,3,4,3,2,1,2,1,3,3,3,4,4,3,3,5,0,4,0,3,0,4,3,3,3,2,1,0,3,0,0,3,3),
+(0,4,0,3,0,3,0,3,0,3,5,5,3,3,3,3,4,3,4,3,3,3,4,4,4,3,3,3,3,4,3,5,3,3,1,3,2,4,5,5,5,5,4,3,4,5,5,3,2,2,3,3,3,3,2,3,3,1,2,3,2,4,3,3,3,4,0,4,0,2,0,4,3,2,2,1,2,0,3,0,0,4,1),
+)
+
+class JapaneseContextAnalysis(object):
+ NUM_OF_CATEGORY = 6
+ DONT_KNOW = -1
+ ENOUGH_REL_THRESHOLD = 100
+ MAX_REL_THRESHOLD = 1000
+ MINIMUM_DATA_THRESHOLD = 4
+
+ def __init__(self):
+ self._total_rel = None
+ self._rel_sample = None
+ self._need_to_skip_char_num = None
+ self._last_char_order = None
+ self._done = None
+ self.reset()
+
+ def reset(self):
+ self._total_rel = 0 # total sequence received
+ # category counters, each integer counts sequence in its category
+ self._rel_sample = [0] * self.NUM_OF_CATEGORY
+ # if last byte in current buffer is not the last byte of a character,
+ # we need to know how many bytes to skip in next buffer
+ self._need_to_skip_char_num = 0
+ self._last_char_order = -1 # The order of previous char
+ # If this flag is set to True, detection is done and conclusion has
+ # been made
+ self._done = False
+
+ def feed(self, byte_str, num_bytes):
+ if self._done:
+ return
+
+ # The buffer we got is byte oriented, and a character may span in more than one
+ # buffers. In case the last one or two byte in last buffer is not
+ # complete, we record how many byte needed to complete that character
+ # and skip these bytes here. We can choose to record those bytes as
+ # well and analyse the character once it is complete, but since a
+ # character will not make much difference, by simply skipping
+ # this character will simply our logic and improve performance.
+ i = self._need_to_skip_char_num
+ while i < num_bytes:
+ order, char_len = self.get_order(byte_str[i:i + 2])
+ i += char_len
+ if i > num_bytes:
+ self._need_to_skip_char_num = i - num_bytes
+ self._last_char_order = -1
+ else:
+ if (order != -1) and (self._last_char_order != -1):
+ self._total_rel += 1
+ if self._total_rel > self.MAX_REL_THRESHOLD:
+ self._done = True
+ break
+ self._rel_sample[jp2CharContext[self._last_char_order][order]] += 1
+ self._last_char_order = order
+
+ def got_enough_data(self):
+ return self._total_rel > self.ENOUGH_REL_THRESHOLD
+
+ def get_confidence(self):
+ # This is just one way to calculate confidence. It works well for me.
+ if self._total_rel > self.MINIMUM_DATA_THRESHOLD:
+ return (self._total_rel - self._rel_sample[0]) / self._total_rel
+ else:
+ return self.DONT_KNOW
+
+ def get_order(self, byte_str):
+ return -1, 1
+
+class SJISContextAnalysis(JapaneseContextAnalysis):
+ def __init__(self):
+ super(SJISContextAnalysis, self).__init__()
+ self._charset_name = "SHIFT_JIS"
+
+ @property
+ def charset_name(self):
+ return self._charset_name
+
+ def get_order(self, byte_str):
+ if not byte_str:
+ return -1, 1
+ # find out current char's byte length
+ first_char = byte_str[0]
+ if (0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC):
+ char_len = 2
+ if (first_char == 0x87) or (0xFA <= first_char <= 0xFC):
+ self._charset_name = "CP932"
+ else:
+ char_len = 1
+
+ # return its order if it is hiragana
+ if len(byte_str) > 1:
+ second_char = byte_str[1]
+ if (first_char == 202) and (0x9F <= second_char <= 0xF1):
+ return second_char - 0x9F, char_len
+
+ return -1, char_len
+
+class EUCJPContextAnalysis(JapaneseContextAnalysis):
+ def get_order(self, byte_str):
+ if not byte_str:
+ return -1, 1
+ # find out current char's byte length
+ first_char = byte_str[0]
+ if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):
+ char_len = 2
+ elif first_char == 0x8F:
+ char_len = 3
+ else:
+ char_len = 1
+
+ # return its order if it is hiragana
+ if len(byte_str) > 1:
+ second_char = byte_str[1]
+ if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):
+ return second_char - 0xA1, char_len
+
+ return -1, char_len
+
+
diff --git a/third_party/python/chardet/chardet/langbulgarianmodel.py b/third_party/python/chardet/chardet/langbulgarianmodel.py
new file mode 100644
index 0000000000..561bfd9051
--- /dev/null
+++ b/third_party/python/chardet/chardet/langbulgarianmodel.py
@@ -0,0 +1,4650 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from chardet.sbcharsetprober import SingleByteCharSetModel
+
+
+# 3: Positive
+# 2: Likely
+# 1: Unlikely
+# 0: Negative
+
+BULGARIAN_LANG_MODEL = {
+ 63: { # 'e'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 0, # 'а'
+ 18: 1, # 'б'
+ 9: 1, # 'в'
+ 20: 1, # 'г'
+ 11: 1, # 'д'
+ 3: 1, # 'е'
+ 23: 1, # 'ж'
+ 15: 1, # 'з'
+ 2: 0, # 'и'
+ 26: 1, # 'й'
+ 12: 1, # 'к'
+ 10: 1, # 'л'
+ 14: 1, # 'м'
+ 6: 1, # 'н'
+ 4: 1, # 'о'
+ 13: 1, # 'п'
+ 7: 1, # 'р'
+ 8: 1, # 'с'
+ 5: 1, # 'т'
+ 19: 0, # 'у'
+ 29: 1, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 1, # 'ч'
+ 27: 1, # 'ш'
+ 24: 1, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 45: { # '\xad'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 0, # 'Г'
+ 37: 1, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 0, # 'Л'
+ 38: 1, # 'М'
+ 36: 0, # 'Н'
+ 41: 1, # 'О'
+ 30: 1, # 'П'
+ 39: 1, # 'Р'
+ 28: 1, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 0, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 0, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 0, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 0, # 'о'
+ 13: 0, # 'п'
+ 7: 0, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 0, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 31: { # 'А'
+ 63: 0, # 'e'
+ 45: 1, # '\xad'
+ 31: 1, # 'А'
+ 32: 1, # 'Б'
+ 35: 2, # 'В'
+ 43: 1, # 'Г'
+ 37: 2, # 'Д'
+ 44: 2, # 'Е'
+ 55: 1, # 'Ж'
+ 47: 2, # 'З'
+ 40: 1, # 'И'
+ 59: 1, # 'Й'
+ 33: 1, # 'К'
+ 46: 2, # 'Л'
+ 38: 1, # 'М'
+ 36: 2, # 'Н'
+ 41: 1, # 'О'
+ 30: 2, # 'П'
+ 39: 2, # 'Р'
+ 28: 2, # 'С'
+ 34: 2, # 'Т'
+ 51: 1, # 'У'
+ 48: 2, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 1, # 'Ш'
+ 57: 2, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 1, # 'а'
+ 18: 2, # 'б'
+ 9: 2, # 'в'
+ 20: 2, # 'г'
+ 11: 2, # 'д'
+ 3: 1, # 'е'
+ 23: 1, # 'ж'
+ 15: 2, # 'з'
+ 2: 0, # 'и'
+ 26: 2, # 'й'
+ 12: 2, # 'к'
+ 10: 3, # 'л'
+ 14: 2, # 'м'
+ 6: 3, # 'н'
+ 4: 0, # 'о'
+ 13: 2, # 'п'
+ 7: 2, # 'р'
+ 8: 2, # 'с'
+ 5: 2, # 'т'
+ 19: 1, # 'у'
+ 29: 2, # 'ф'
+ 25: 1, # 'х'
+ 22: 1, # 'ц'
+ 21: 1, # 'ч'
+ 27: 1, # 'ш'
+ 24: 0, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 32: { # 'Б'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 2, # 'Б'
+ 35: 1, # 'В'
+ 43: 1, # 'Г'
+ 37: 2, # 'Д'
+ 44: 1, # 'Е'
+ 55: 1, # 'Ж'
+ 47: 2, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 2, # 'Н'
+ 41: 2, # 'О'
+ 30: 1, # 'П'
+ 39: 1, # 'Р'
+ 28: 2, # 'С'
+ 34: 2, # 'Т'
+ 51: 1, # 'У'
+ 48: 2, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 1, # 'Щ'
+ 61: 2, # 'Ъ'
+ 60: 1, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 3, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 1, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 2, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 2, # 'р'
+ 8: 1, # 'с'
+ 5: 0, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 2, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 35: { # 'В'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 0, # 'Г'
+ 37: 1, # 'Д'
+ 44: 2, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 1, # 'О'
+ 30: 1, # 'П'
+ 39: 2, # 'Р'
+ 28: 2, # 'С'
+ 34: 1, # 'Т'
+ 51: 1, # 'У'
+ 48: 2, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 1, # 'Ю'
+ 56: 2, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 1, # 'д'
+ 3: 3, # 'е'
+ 23: 1, # 'ж'
+ 15: 2, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 2, # 'л'
+ 14: 1, # 'м'
+ 6: 2, # 'н'
+ 4: 2, # 'о'
+ 13: 1, # 'п'
+ 7: 2, # 'р'
+ 8: 2, # 'с'
+ 5: 2, # 'т'
+ 19: 1, # 'у'
+ 29: 0, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 2, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 43: { # 'Г'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 1, # 'Д'
+ 44: 2, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 1, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 0, # 'М'
+ 36: 1, # 'Н'
+ 41: 1, # 'О'
+ 30: 0, # 'П'
+ 39: 1, # 'Р'
+ 28: 1, # 'С'
+ 34: 0, # 'Т'
+ 51: 1, # 'У'
+ 48: 1, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 1, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 2, # 'а'
+ 18: 1, # 'б'
+ 9: 1, # 'в'
+ 20: 0, # 'г'
+ 11: 1, # 'д'
+ 3: 3, # 'е'
+ 23: 1, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 2, # 'л'
+ 14: 1, # 'м'
+ 6: 1, # 'н'
+ 4: 2, # 'о'
+ 13: 0, # 'п'
+ 7: 2, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 1, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 37: { # 'Д'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 2, # 'В'
+ 43: 1, # 'Г'
+ 37: 2, # 'Д'
+ 44: 2, # 'Е'
+ 55: 2, # 'Ж'
+ 47: 1, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 2, # 'О'
+ 30: 2, # 'П'
+ 39: 1, # 'Р'
+ 28: 2, # 'С'
+ 34: 1, # 'Т'
+ 51: 1, # 'У'
+ 48: 1, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 1, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 3, # 'а'
+ 18: 0, # 'б'
+ 9: 2, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 3, # 'е'
+ 23: 3, # 'ж'
+ 15: 1, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 1, # 'л'
+ 14: 1, # 'м'
+ 6: 2, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 2, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 2, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 44: { # 'Е'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 1, # 'А'
+ 32: 1, # 'Б'
+ 35: 2, # 'В'
+ 43: 1, # 'Г'
+ 37: 1, # 'Д'
+ 44: 1, # 'Е'
+ 55: 1, # 'Ж'
+ 47: 1, # 'З'
+ 40: 1, # 'И'
+ 59: 1, # 'Й'
+ 33: 2, # 'К'
+ 46: 2, # 'Л'
+ 38: 1, # 'М'
+ 36: 2, # 'Н'
+ 41: 2, # 'О'
+ 30: 1, # 'П'
+ 39: 2, # 'Р'
+ 28: 2, # 'С'
+ 34: 2, # 'Т'
+ 51: 1, # 'У'
+ 48: 2, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 2, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 1, # 'Ш'
+ 57: 1, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 0, # 'а'
+ 18: 1, # 'б'
+ 9: 2, # 'в'
+ 20: 1, # 'г'
+ 11: 2, # 'д'
+ 3: 0, # 'е'
+ 23: 1, # 'ж'
+ 15: 1, # 'з'
+ 2: 0, # 'и'
+ 26: 1, # 'й'
+ 12: 2, # 'к'
+ 10: 2, # 'л'
+ 14: 2, # 'м'
+ 6: 2, # 'н'
+ 4: 0, # 'о'
+ 13: 1, # 'п'
+ 7: 2, # 'р'
+ 8: 2, # 'с'
+ 5: 1, # 'т'
+ 19: 1, # 'у'
+ 29: 1, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 1, # 'ч'
+ 27: 1, # 'ш'
+ 24: 1, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 55: { # 'Ж'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 1, # 'А'
+ 32: 0, # 'Б'
+ 35: 1, # 'В'
+ 43: 0, # 'Г'
+ 37: 1, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 1, # 'Н'
+ 41: 1, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 1, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 2, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 1, # 'д'
+ 3: 2, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 2, # 'о'
+ 13: 1, # 'п'
+ 7: 1, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 1, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 47: { # 'З'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 1, # 'Г'
+ 37: 1, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 1, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 2, # 'Н'
+ 41: 1, # 'О'
+ 30: 1, # 'П'
+ 39: 1, # 'Р'
+ 28: 1, # 'С'
+ 34: 1, # 'Т'
+ 51: 1, # 'У'
+ 48: 0, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 2, # 'в'
+ 20: 1, # 'г'
+ 11: 2, # 'д'
+ 3: 2, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 1, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 2, # 'л'
+ 14: 1, # 'м'
+ 6: 1, # 'н'
+ 4: 1, # 'о'
+ 13: 0, # 'п'
+ 7: 1, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 1, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 40: { # 'И'
+ 63: 0, # 'e'
+ 45: 1, # '\xad'
+ 31: 1, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 1, # 'Г'
+ 37: 1, # 'Д'
+ 44: 2, # 'Е'
+ 55: 1, # 'Ж'
+ 47: 2, # 'З'
+ 40: 1, # 'И'
+ 59: 1, # 'Й'
+ 33: 2, # 'К'
+ 46: 2, # 'Л'
+ 38: 2, # 'М'
+ 36: 2, # 'Н'
+ 41: 1, # 'О'
+ 30: 1, # 'П'
+ 39: 2, # 'Р'
+ 28: 2, # 'С'
+ 34: 2, # 'Т'
+ 51: 0, # 'У'
+ 48: 1, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 1, # 'Ш'
+ 57: 1, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 2, # 'Я'
+ 1: 1, # 'а'
+ 18: 1, # 'б'
+ 9: 3, # 'в'
+ 20: 2, # 'г'
+ 11: 1, # 'д'
+ 3: 1, # 'е'
+ 23: 0, # 'ж'
+ 15: 3, # 'з'
+ 2: 0, # 'и'
+ 26: 1, # 'й'
+ 12: 1, # 'к'
+ 10: 2, # 'л'
+ 14: 2, # 'м'
+ 6: 2, # 'н'
+ 4: 0, # 'о'
+ 13: 1, # 'п'
+ 7: 2, # 'р'
+ 8: 2, # 'с'
+ 5: 2, # 'т'
+ 19: 0, # 'у'
+ 29: 1, # 'ф'
+ 25: 1, # 'х'
+ 22: 1, # 'ц'
+ 21: 1, # 'ч'
+ 27: 1, # 'ш'
+ 24: 1, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 59: { # 'Й'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 1, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 1, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 1, # 'С'
+ 34: 1, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 0, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 1, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 0, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 2, # 'о'
+ 13: 0, # 'п'
+ 7: 0, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 0, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 33: { # 'К'
+ 63: 0, # 'e'
+ 45: 1, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 1, # 'Г'
+ 37: 1, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 1, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 0, # 'М'
+ 36: 2, # 'Н'
+ 41: 2, # 'О'
+ 30: 2, # 'П'
+ 39: 1, # 'Р'
+ 28: 2, # 'С'
+ 34: 1, # 'Т'
+ 51: 1, # 'У'
+ 48: 1, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 1, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 0, # 'б'
+ 9: 1, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 2, # 'е'
+ 23: 1, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 2, # 'л'
+ 14: 1, # 'м'
+ 6: 2, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 3, # 'р'
+ 8: 1, # 'с'
+ 5: 0, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 1, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 2, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 46: { # 'Л'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 2, # 'Г'
+ 37: 1, # 'Д'
+ 44: 2, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 1, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 0, # 'М'
+ 36: 1, # 'Н'
+ 41: 2, # 'О'
+ 30: 1, # 'П'
+ 39: 0, # 'Р'
+ 28: 1, # 'С'
+ 34: 1, # 'Т'
+ 51: 1, # 'У'
+ 48: 0, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 1, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 2, # 'а'
+ 18: 0, # 'б'
+ 9: 1, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 2, # 'о'
+ 13: 0, # 'п'
+ 7: 0, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 2, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 38: { # 'М'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 2, # 'В'
+ 43: 0, # 'Г'
+ 37: 1, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 1, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 2, # 'О'
+ 30: 1, # 'П'
+ 39: 1, # 'Р'
+ 28: 2, # 'С'
+ 34: 1, # 'Т'
+ 51: 1, # 'У'
+ 48: 1, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 3, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 2, # 'л'
+ 14: 0, # 'м'
+ 6: 2, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 1, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 2, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 36: { # 'Н'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 2, # 'Б'
+ 35: 1, # 'В'
+ 43: 1, # 'Г'
+ 37: 2, # 'Д'
+ 44: 2, # 'Е'
+ 55: 1, # 'Ж'
+ 47: 1, # 'З'
+ 40: 2, # 'И'
+ 59: 1, # 'Й'
+ 33: 2, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 2, # 'О'
+ 30: 1, # 'П'
+ 39: 1, # 'Р'
+ 28: 2, # 'С'
+ 34: 2, # 'Т'
+ 51: 1, # 'У'
+ 48: 1, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 1, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 1, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 3, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 1, # 'г'
+ 11: 0, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 0, # 'р'
+ 8: 0, # 'с'
+ 5: 1, # 'т'
+ 19: 1, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 1, # 'ш'
+ 24: 0, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 2, # 'ю'
+ 16: 2, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 41: { # 'О'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 1, # 'А'
+ 32: 1, # 'Б'
+ 35: 2, # 'В'
+ 43: 1, # 'Г'
+ 37: 2, # 'Д'
+ 44: 1, # 'Е'
+ 55: 1, # 'Ж'
+ 47: 1, # 'З'
+ 40: 1, # 'И'
+ 59: 1, # 'Й'
+ 33: 2, # 'К'
+ 46: 2, # 'Л'
+ 38: 2, # 'М'
+ 36: 2, # 'Н'
+ 41: 2, # 'О'
+ 30: 1, # 'П'
+ 39: 2, # 'Р'
+ 28: 2, # 'С'
+ 34: 2, # 'Т'
+ 51: 1, # 'У'
+ 48: 1, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 1, # 'Ш'
+ 57: 1, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 1, # 'а'
+ 18: 2, # 'б'
+ 9: 2, # 'в'
+ 20: 2, # 'г'
+ 11: 1, # 'д'
+ 3: 1, # 'е'
+ 23: 1, # 'ж'
+ 15: 1, # 'з'
+ 2: 0, # 'и'
+ 26: 1, # 'й'
+ 12: 2, # 'к'
+ 10: 2, # 'л'
+ 14: 1, # 'м'
+ 6: 1, # 'н'
+ 4: 0, # 'о'
+ 13: 2, # 'п'
+ 7: 2, # 'р'
+ 8: 2, # 'с'
+ 5: 3, # 'т'
+ 19: 1, # 'у'
+ 29: 1, # 'ф'
+ 25: 1, # 'х'
+ 22: 1, # 'ц'
+ 21: 2, # 'ч'
+ 27: 0, # 'ш'
+ 24: 2, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 30: { # 'П'
+ 63: 0, # 'e'
+ 45: 1, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 1, # 'Г'
+ 37: 1, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 1, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 2, # 'О'
+ 30: 2, # 'П'
+ 39: 2, # 'Р'
+ 28: 2, # 'С'
+ 34: 1, # 'Т'
+ 51: 2, # 'У'
+ 48: 1, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 1, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 1, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 2, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 3, # 'л'
+ 14: 0, # 'м'
+ 6: 1, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 3, # 'р'
+ 8: 1, # 'с'
+ 5: 1, # 'т'
+ 19: 2, # 'у'
+ 29: 1, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 1, # 'ч'
+ 27: 1, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 39: { # 'Р'
+ 63: 0, # 'e'
+ 45: 1, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 2, # 'Г'
+ 37: 2, # 'Д'
+ 44: 2, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 1, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 0, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 2, # 'О'
+ 30: 2, # 'П'
+ 39: 1, # 'Р'
+ 28: 1, # 'С'
+ 34: 1, # 'Т'
+ 51: 1, # 'У'
+ 48: 1, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 1, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 3, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 2, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 1, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 0, # 'р'
+ 8: 1, # 'с'
+ 5: 0, # 'т'
+ 19: 3, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 28: { # 'С'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 3, # 'А'
+ 32: 2, # 'Б'
+ 35: 2, # 'В'
+ 43: 1, # 'Г'
+ 37: 2, # 'Д'
+ 44: 2, # 'Е'
+ 55: 1, # 'Ж'
+ 47: 1, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 2, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 2, # 'О'
+ 30: 2, # 'П'
+ 39: 1, # 'Р'
+ 28: 2, # 'С'
+ 34: 2, # 'Т'
+ 51: 1, # 'У'
+ 48: 1, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 1, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 2, # 'в'
+ 20: 1, # 'г'
+ 11: 1, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 2, # 'к'
+ 10: 3, # 'л'
+ 14: 2, # 'м'
+ 6: 1, # 'н'
+ 4: 3, # 'о'
+ 13: 3, # 'п'
+ 7: 2, # 'р'
+ 8: 0, # 'с'
+ 5: 3, # 'т'
+ 19: 2, # 'у'
+ 29: 2, # 'ф'
+ 25: 1, # 'х'
+ 22: 1, # 'ц'
+ 21: 1, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 34: { # 'Т'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 2, # 'Б'
+ 35: 1, # 'В'
+ 43: 0, # 'Г'
+ 37: 1, # 'Д'
+ 44: 2, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 2, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 2, # 'О'
+ 30: 1, # 'П'
+ 39: 2, # 'Р'
+ 28: 2, # 'С'
+ 34: 1, # 'Т'
+ 51: 1, # 'У'
+ 48: 1, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 1, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 1, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 3, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 2, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 51: { # 'У'
+ 63: 0, # 'e'
+ 45: 1, # '\xad'
+ 31: 1, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 1, # 'Г'
+ 37: 1, # 'Д'
+ 44: 2, # 'Е'
+ 55: 1, # 'Ж'
+ 47: 1, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 0, # 'О'
+ 30: 1, # 'П'
+ 39: 1, # 'Р'
+ 28: 1, # 'С'
+ 34: 2, # 'Т'
+ 51: 0, # 'У'
+ 48: 1, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 1, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 1, # 'а'
+ 18: 1, # 'б'
+ 9: 2, # 'в'
+ 20: 1, # 'г'
+ 11: 1, # 'д'
+ 3: 2, # 'е'
+ 23: 1, # 'ж'
+ 15: 1, # 'з'
+ 2: 2, # 'и'
+ 26: 1, # 'й'
+ 12: 2, # 'к'
+ 10: 1, # 'л'
+ 14: 1, # 'м'
+ 6: 2, # 'н'
+ 4: 2, # 'о'
+ 13: 1, # 'п'
+ 7: 1, # 'р'
+ 8: 2, # 'с'
+ 5: 1, # 'т'
+ 19: 1, # 'у'
+ 29: 0, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 2, # 'ч'
+ 27: 1, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 48: { # 'Ф'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 0, # 'М'
+ 36: 1, # 'Н'
+ 41: 1, # 'О'
+ 30: 2, # 'П'
+ 39: 1, # 'Р'
+ 28: 2, # 'С'
+ 34: 1, # 'Т'
+ 51: 1, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 2, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 2, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 2, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 2, # 'о'
+ 13: 0, # 'п'
+ 7: 2, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 1, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 49: { # 'Х'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 1, # 'А'
+ 32: 0, # 'Б'
+ 35: 1, # 'В'
+ 43: 1, # 'Г'
+ 37: 1, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 1, # 'О'
+ 30: 1, # 'П'
+ 39: 1, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 2, # 'а'
+ 18: 0, # 'б'
+ 9: 1, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 2, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 1, # 'л'
+ 14: 1, # 'м'
+ 6: 0, # 'н'
+ 4: 2, # 'о'
+ 13: 0, # 'п'
+ 7: 2, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 53: { # 'Ц'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 1, # 'А'
+ 32: 0, # 'Б'
+ 35: 1, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 2, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 1, # 'Р'
+ 28: 2, # 'С'
+ 34: 0, # 'Т'
+ 51: 1, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 2, # 'а'
+ 18: 0, # 'б'
+ 9: 2, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 2, # 'е'
+ 23: 0, # 'ж'
+ 15: 1, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 1, # 'о'
+ 13: 0, # 'п'
+ 7: 1, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 1, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 50: { # 'Ч'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 1, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 0, # 'М'
+ 36: 1, # 'Н'
+ 41: 1, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 1, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 2, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 3, # 'е'
+ 23: 1, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 1, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 2, # 'о'
+ 13: 0, # 'п'
+ 7: 1, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 0, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 54: { # 'Ш'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 1, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 1, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 1, # 'Н'
+ 41: 1, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 1, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 2, # 'а'
+ 18: 0, # 'б'
+ 9: 2, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 2, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 1, # 'л'
+ 14: 1, # 'м'
+ 6: 1, # 'н'
+ 4: 2, # 'о'
+ 13: 1, # 'п'
+ 7: 1, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 1, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 0, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 57: { # 'Щ'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 1, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 1, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 2, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 2, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 1, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 1, # 'о'
+ 13: 0, # 'п'
+ 7: 1, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 1, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 61: { # 'Ъ'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 0, # 'Г'
+ 37: 1, # 'Д'
+ 44: 0, # 'Е'
+ 55: 1, # 'Ж'
+ 47: 1, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 2, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 0, # 'О'
+ 30: 1, # 'П'
+ 39: 2, # 'Р'
+ 28: 1, # 'С'
+ 34: 1, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 1, # 'Ш'
+ 57: 1, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 0, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 0, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 0, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 1, # 'л'
+ 14: 0, # 'м'
+ 6: 1, # 'н'
+ 4: 0, # 'о'
+ 13: 0, # 'п'
+ 7: 1, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 0, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 60: { # 'Ю'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 1, # 'А'
+ 32: 1, # 'Б'
+ 35: 0, # 'В'
+ 43: 1, # 'Г'
+ 37: 1, # 'Д'
+ 44: 0, # 'Е'
+ 55: 1, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 0, # 'М'
+ 36: 1, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 1, # 'Р'
+ 28: 1, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 0, # 'а'
+ 18: 1, # 'б'
+ 9: 1, # 'в'
+ 20: 2, # 'г'
+ 11: 1, # 'д'
+ 3: 0, # 'е'
+ 23: 2, # 'ж'
+ 15: 1, # 'з'
+ 2: 1, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 1, # 'л'
+ 14: 1, # 'м'
+ 6: 1, # 'н'
+ 4: 0, # 'о'
+ 13: 1, # 'п'
+ 7: 1, # 'р'
+ 8: 1, # 'с'
+ 5: 1, # 'т'
+ 19: 0, # 'у'
+ 29: 0, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 56: { # 'Я'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 1, # 'Г'
+ 37: 1, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 1, # 'С'
+ 34: 2, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 0, # 'а'
+ 18: 1, # 'б'
+ 9: 1, # 'в'
+ 20: 1, # 'г'
+ 11: 1, # 'д'
+ 3: 0, # 'е'
+ 23: 0, # 'ж'
+ 15: 1, # 'з'
+ 2: 1, # 'и'
+ 26: 1, # 'й'
+ 12: 1, # 'к'
+ 10: 1, # 'л'
+ 14: 2, # 'м'
+ 6: 2, # 'н'
+ 4: 0, # 'о'
+ 13: 2, # 'п'
+ 7: 1, # 'р'
+ 8: 1, # 'с'
+ 5: 1, # 'т'
+ 19: 0, # 'у'
+ 29: 0, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 1, # 'ш'
+ 24: 0, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 1: { # 'а'
+ 63: 1, # 'e'
+ 45: 1, # '\xad'
+ 31: 1, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 1, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 3, # 'г'
+ 11: 3, # 'д'
+ 3: 3, # 'е'
+ 23: 3, # 'ж'
+ 15: 3, # 'з'
+ 2: 3, # 'и'
+ 26: 3, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 2, # 'о'
+ 13: 3, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 3, # 'у'
+ 29: 3, # 'ф'
+ 25: 3, # 'х'
+ 22: 3, # 'ц'
+ 21: 3, # 'ч'
+ 27: 3, # 'ш'
+ 24: 3, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 18: { # 'б'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 0, # 'б'
+ 9: 3, # 'в'
+ 20: 1, # 'г'
+ 11: 2, # 'д'
+ 3: 3, # 'е'
+ 23: 1, # 'ж'
+ 15: 1, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 3, # 'л'
+ 14: 2, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 1, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 0, # 'т'
+ 19: 3, # 'у'
+ 29: 0, # 'ф'
+ 25: 2, # 'х'
+ 22: 1, # 'ц'
+ 21: 1, # 'ч'
+ 27: 1, # 'ш'
+ 24: 3, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 2, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 9: { # 'в'
+ 63: 1, # 'e'
+ 45: 1, # '\xad'
+ 31: 0, # 'А'
+ 32: 1, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 1, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 0, # 'в'
+ 20: 2, # 'г'
+ 11: 3, # 'д'
+ 3: 3, # 'е'
+ 23: 1, # 'ж'
+ 15: 3, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 2, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 2, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 2, # 'х'
+ 22: 2, # 'ц'
+ 21: 3, # 'ч'
+ 27: 2, # 'ш'
+ 24: 1, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 2, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 20: { # 'г'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 2, # 'в'
+ 20: 1, # 'г'
+ 11: 2, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 1, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 3, # 'л'
+ 14: 1, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 1, # 'п'
+ 7: 3, # 'р'
+ 8: 2, # 'с'
+ 5: 2, # 'т'
+ 19: 3, # 'у'
+ 29: 1, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 1, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 11: { # 'д'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 2, # 'б'
+ 9: 3, # 'в'
+ 20: 2, # 'г'
+ 11: 2, # 'д'
+ 3: 3, # 'е'
+ 23: 3, # 'ж'
+ 15: 2, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 3, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 1, # 'т'
+ 19: 3, # 'у'
+ 29: 1, # 'ф'
+ 25: 2, # 'х'
+ 22: 2, # 'ц'
+ 21: 2, # 'ч'
+ 27: 1, # 'ш'
+ 24: 1, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 3: { # 'е'
+ 63: 0, # 'e'
+ 45: 1, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 2, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 3, # 'г'
+ 11: 3, # 'д'
+ 3: 2, # 'е'
+ 23: 3, # 'ж'
+ 15: 3, # 'з'
+ 2: 2, # 'и'
+ 26: 3, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 3, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 2, # 'у'
+ 29: 3, # 'ф'
+ 25: 3, # 'х'
+ 22: 3, # 'ц'
+ 21: 3, # 'ч'
+ 27: 3, # 'ш'
+ 24: 3, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 23: { # 'ж'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 3, # 'б'
+ 9: 2, # 'в'
+ 20: 1, # 'г'
+ 11: 3, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 2, # 'к'
+ 10: 1, # 'л'
+ 14: 1, # 'м'
+ 6: 3, # 'н'
+ 4: 2, # 'о'
+ 13: 1, # 'п'
+ 7: 1, # 'р'
+ 8: 1, # 'с'
+ 5: 1, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 1, # 'ц'
+ 21: 1, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 15: { # 'з'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 3, # 'г'
+ 11: 3, # 'д'
+ 3: 3, # 'е'
+ 23: 1, # 'ж'
+ 15: 1, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 3, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 3, # 'у'
+ 29: 1, # 'ф'
+ 25: 2, # 'х'
+ 22: 2, # 'ц'
+ 21: 2, # 'ч'
+ 27: 2, # 'ш'
+ 24: 1, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 2, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 2: { # 'и'
+ 63: 1, # 'e'
+ 45: 1, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 1, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 1, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 1, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 3, # 'г'
+ 11: 3, # 'д'
+ 3: 3, # 'е'
+ 23: 3, # 'ж'
+ 15: 3, # 'з'
+ 2: 3, # 'и'
+ 26: 3, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 3, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 2, # 'у'
+ 29: 3, # 'ф'
+ 25: 3, # 'х'
+ 22: 3, # 'ц'
+ 21: 3, # 'ч'
+ 27: 3, # 'ш'
+ 24: 3, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 26: { # 'й'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 1, # 'а'
+ 18: 2, # 'б'
+ 9: 2, # 'в'
+ 20: 1, # 'г'
+ 11: 2, # 'д'
+ 3: 2, # 'е'
+ 23: 0, # 'ж'
+ 15: 2, # 'з'
+ 2: 1, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 2, # 'л'
+ 14: 2, # 'м'
+ 6: 3, # 'н'
+ 4: 2, # 'о'
+ 13: 1, # 'п'
+ 7: 2, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 1, # 'у'
+ 29: 2, # 'ф'
+ 25: 1, # 'х'
+ 22: 2, # 'ц'
+ 21: 2, # 'ч'
+ 27: 1, # 'ш'
+ 24: 1, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 12: { # 'к'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 1, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 3, # 'в'
+ 20: 2, # 'г'
+ 11: 1, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 2, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 3, # 'л'
+ 14: 2, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 1, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 3, # 'у'
+ 29: 1, # 'ф'
+ 25: 1, # 'х'
+ 22: 3, # 'ц'
+ 21: 2, # 'ч'
+ 27: 1, # 'ш'
+ 24: 0, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 2, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 10: { # 'л'
+ 63: 1, # 'e'
+ 45: 1, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 1, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 3, # 'г'
+ 11: 2, # 'д'
+ 3: 3, # 'е'
+ 23: 3, # 'ж'
+ 15: 2, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 1, # 'л'
+ 14: 2, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 2, # 'п'
+ 7: 2, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 3, # 'у'
+ 29: 2, # 'ф'
+ 25: 2, # 'х'
+ 22: 2, # 'ц'
+ 21: 2, # 'ч'
+ 27: 2, # 'ш'
+ 24: 1, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 2, # 'ь'
+ 42: 3, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 14: { # 'м'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 1, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 1, # 'г'
+ 11: 1, # 'д'
+ 3: 3, # 'е'
+ 23: 1, # 'ж'
+ 15: 1, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 2, # 'к'
+ 10: 3, # 'л'
+ 14: 1, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 3, # 'п'
+ 7: 2, # 'р'
+ 8: 2, # 'с'
+ 5: 1, # 'т'
+ 19: 3, # 'у'
+ 29: 2, # 'ф'
+ 25: 1, # 'х'
+ 22: 2, # 'ц'
+ 21: 2, # 'ч'
+ 27: 2, # 'ш'
+ 24: 1, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 2, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 6: { # 'н'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 1, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 2, # 'б'
+ 9: 2, # 'в'
+ 20: 3, # 'г'
+ 11: 3, # 'д'
+ 3: 3, # 'е'
+ 23: 2, # 'ж'
+ 15: 2, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 2, # 'л'
+ 14: 1, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 1, # 'п'
+ 7: 2, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 3, # 'у'
+ 29: 3, # 'ф'
+ 25: 2, # 'х'
+ 22: 3, # 'ц'
+ 21: 3, # 'ч'
+ 27: 2, # 'ш'
+ 24: 1, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 2, # 'ь'
+ 42: 2, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 4: { # 'о'
+ 63: 0, # 'e'
+ 45: 1, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 2, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 3, # 'г'
+ 11: 3, # 'д'
+ 3: 3, # 'е'
+ 23: 3, # 'ж'
+ 15: 3, # 'з'
+ 2: 3, # 'и'
+ 26: 3, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 2, # 'о'
+ 13: 3, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 2, # 'у'
+ 29: 3, # 'ф'
+ 25: 3, # 'х'
+ 22: 3, # 'ц'
+ 21: 3, # 'ч'
+ 27: 3, # 'ш'
+ 24: 3, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 13: { # 'п'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 2, # 'в'
+ 20: 1, # 'г'
+ 11: 1, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 1, # 'з'
+ 2: 3, # 'и'
+ 26: 1, # 'й'
+ 12: 2, # 'к'
+ 10: 3, # 'л'
+ 14: 1, # 'м'
+ 6: 2, # 'н'
+ 4: 3, # 'о'
+ 13: 1, # 'п'
+ 7: 3, # 'р'
+ 8: 2, # 'с'
+ 5: 2, # 'т'
+ 19: 3, # 'у'
+ 29: 1, # 'ф'
+ 25: 1, # 'х'
+ 22: 2, # 'ц'
+ 21: 2, # 'ч'
+ 27: 1, # 'ш'
+ 24: 1, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 2, # 'ю'
+ 16: 2, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 7: { # 'р'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 3, # 'г'
+ 11: 3, # 'д'
+ 3: 3, # 'е'
+ 23: 3, # 'ж'
+ 15: 2, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 2, # 'п'
+ 7: 1, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 3, # 'у'
+ 29: 2, # 'ф'
+ 25: 3, # 'х'
+ 22: 3, # 'ц'
+ 21: 2, # 'ч'
+ 27: 3, # 'ш'
+ 24: 1, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 2, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 8: { # 'с'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 2, # 'б'
+ 9: 3, # 'в'
+ 20: 2, # 'г'
+ 11: 2, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 1, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 3, # 'п'
+ 7: 3, # 'р'
+ 8: 1, # 'с'
+ 5: 3, # 'т'
+ 19: 3, # 'у'
+ 29: 2, # 'ф'
+ 25: 2, # 'х'
+ 22: 2, # 'ц'
+ 21: 2, # 'ч'
+ 27: 2, # 'ш'
+ 24: 0, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 2, # 'ь'
+ 42: 2, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 5: { # 'т'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 2, # 'г'
+ 11: 2, # 'д'
+ 3: 3, # 'е'
+ 23: 1, # 'ж'
+ 15: 1, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 2, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 2, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 3, # 'у'
+ 29: 1, # 'ф'
+ 25: 2, # 'х'
+ 22: 2, # 'ц'
+ 21: 2, # 'ч'
+ 27: 1, # 'ш'
+ 24: 1, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 2, # 'ь'
+ 42: 2, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 19: { # 'у'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 3, # 'г'
+ 11: 3, # 'д'
+ 3: 2, # 'е'
+ 23: 3, # 'ж'
+ 15: 3, # 'з'
+ 2: 2, # 'и'
+ 26: 2, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 2, # 'о'
+ 13: 3, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 1, # 'у'
+ 29: 2, # 'ф'
+ 25: 2, # 'х'
+ 22: 2, # 'ц'
+ 21: 3, # 'ч'
+ 27: 3, # 'ш'
+ 24: 2, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 29: { # 'ф'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 1, # 'в'
+ 20: 1, # 'г'
+ 11: 0, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 2, # 'к'
+ 10: 2, # 'л'
+ 14: 1, # 'м'
+ 6: 1, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 2, # 'р'
+ 8: 2, # 'с'
+ 5: 2, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 1, # 'ч'
+ 27: 1, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 2, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 25: { # 'х'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 3, # 'в'
+ 20: 0, # 'г'
+ 11: 1, # 'д'
+ 3: 2, # 'е'
+ 23: 0, # 'ж'
+ 15: 1, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 2, # 'л'
+ 14: 2, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 1, # 'п'
+ 7: 3, # 'р'
+ 8: 1, # 'с'
+ 5: 2, # 'т'
+ 19: 3, # 'у'
+ 29: 0, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 1, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 22: { # 'ц'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 2, # 'в'
+ 20: 1, # 'г'
+ 11: 1, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 1, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 2, # 'к'
+ 10: 1, # 'л'
+ 14: 1, # 'м'
+ 6: 1, # 'н'
+ 4: 2, # 'о'
+ 13: 1, # 'п'
+ 7: 1, # 'р'
+ 8: 1, # 'с'
+ 5: 1, # 'т'
+ 19: 2, # 'у'
+ 29: 1, # 'ф'
+ 25: 1, # 'х'
+ 22: 1, # 'ц'
+ 21: 1, # 'ч'
+ 27: 1, # 'ш'
+ 24: 1, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 0, # 'ю'
+ 16: 2, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 21: { # 'ч'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 3, # 'в'
+ 20: 1, # 'г'
+ 11: 0, # 'д'
+ 3: 3, # 'е'
+ 23: 1, # 'ж'
+ 15: 0, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 2, # 'л'
+ 14: 2, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 2, # 'р'
+ 8: 0, # 'с'
+ 5: 2, # 'т'
+ 19: 3, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 1, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 27: { # 'ш'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 0, # 'б'
+ 9: 2, # 'в'
+ 20: 0, # 'г'
+ 11: 1, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 2, # 'л'
+ 14: 1, # 'м'
+ 6: 3, # 'н'
+ 4: 2, # 'о'
+ 13: 2, # 'п'
+ 7: 1, # 'р'
+ 8: 0, # 'с'
+ 5: 1, # 'т'
+ 19: 2, # 'у'
+ 29: 1, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 1, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 24: { # 'щ'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 0, # 'б'
+ 9: 1, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 2, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 1, # 'р'
+ 8: 0, # 'с'
+ 5: 2, # 'т'
+ 19: 3, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 1, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 2, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 17: { # 'ъ'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 1, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 3, # 'г'
+ 11: 3, # 'д'
+ 3: 2, # 'е'
+ 23: 3, # 'ж'
+ 15: 3, # 'з'
+ 2: 1, # 'и'
+ 26: 2, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 3, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 1, # 'у'
+ 29: 1, # 'ф'
+ 25: 2, # 'х'
+ 22: 2, # 'ц'
+ 21: 3, # 'ч'
+ 27: 2, # 'ш'
+ 24: 3, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 2, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 52: { # 'ь'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 0, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 1, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 0, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 1, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 0, # 'р'
+ 8: 0, # 'с'
+ 5: 1, # 'т'
+ 19: 0, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 1, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 42: { # 'ю'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 1, # 'а'
+ 18: 2, # 'б'
+ 9: 1, # 'в'
+ 20: 2, # 'г'
+ 11: 2, # 'д'
+ 3: 1, # 'е'
+ 23: 2, # 'ж'
+ 15: 2, # 'з'
+ 2: 1, # 'и'
+ 26: 1, # 'й'
+ 12: 2, # 'к'
+ 10: 2, # 'л'
+ 14: 2, # 'м'
+ 6: 2, # 'н'
+ 4: 1, # 'о'
+ 13: 1, # 'п'
+ 7: 2, # 'р'
+ 8: 2, # 'с'
+ 5: 2, # 'т'
+ 19: 1, # 'у'
+ 29: 1, # 'ф'
+ 25: 1, # 'х'
+ 22: 2, # 'ц'
+ 21: 3, # 'ч'
+ 27: 1, # 'ш'
+ 24: 1, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 16: { # 'я'
+ 63: 0, # 'e'
+ 45: 1, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 0, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 2, # 'г'
+ 11: 3, # 'д'
+ 3: 2, # 'е'
+ 23: 1, # 'ж'
+ 15: 2, # 'з'
+ 2: 1, # 'и'
+ 26: 2, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 1, # 'о'
+ 13: 2, # 'п'
+ 7: 2, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 1, # 'у'
+ 29: 1, # 'ф'
+ 25: 3, # 'х'
+ 22: 2, # 'ц'
+ 21: 1, # 'ч'
+ 27: 1, # 'ш'
+ 24: 2, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 58: { # 'є'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 0, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 0, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 0, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 0, # 'о'
+ 13: 0, # 'п'
+ 7: 0, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 0, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 62: { # '№'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 0, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 0, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 0, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 0, # 'о'
+ 13: 0, # 'п'
+ 7: 0, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 0, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+}
+
+# 255: Undefined characters that did not exist in training text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+# 251: Control characters
+
+# Character Mapping Table(s):
+ISO_8859_5_BULGARIAN_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 77, # 'A'
+ 66: 90, # 'B'
+ 67: 99, # 'C'
+ 68: 100, # 'D'
+ 69: 72, # 'E'
+ 70: 109, # 'F'
+ 71: 107, # 'G'
+ 72: 101, # 'H'
+ 73: 79, # 'I'
+ 74: 185, # 'J'
+ 75: 81, # 'K'
+ 76: 102, # 'L'
+ 77: 76, # 'M'
+ 78: 94, # 'N'
+ 79: 82, # 'O'
+ 80: 110, # 'P'
+ 81: 186, # 'Q'
+ 82: 108, # 'R'
+ 83: 91, # 'S'
+ 84: 74, # 'T'
+ 85: 119, # 'U'
+ 86: 84, # 'V'
+ 87: 96, # 'W'
+ 88: 111, # 'X'
+ 89: 187, # 'Y'
+ 90: 115, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 65, # 'a'
+ 98: 69, # 'b'
+ 99: 70, # 'c'
+ 100: 66, # 'd'
+ 101: 63, # 'e'
+ 102: 68, # 'f'
+ 103: 112, # 'g'
+ 104: 103, # 'h'
+ 105: 92, # 'i'
+ 106: 194, # 'j'
+ 107: 104, # 'k'
+ 108: 95, # 'l'
+ 109: 86, # 'm'
+ 110: 87, # 'n'
+ 111: 71, # 'o'
+ 112: 116, # 'p'
+ 113: 195, # 'q'
+ 114: 85, # 'r'
+ 115: 93, # 's'
+ 116: 97, # 't'
+ 117: 113, # 'u'
+ 118: 196, # 'v'
+ 119: 197, # 'w'
+ 120: 198, # 'x'
+ 121: 199, # 'y'
+ 122: 200, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 194, # '\x80'
+ 129: 195, # '\x81'
+ 130: 196, # '\x82'
+ 131: 197, # '\x83'
+ 132: 198, # '\x84'
+ 133: 199, # '\x85'
+ 134: 200, # '\x86'
+ 135: 201, # '\x87'
+ 136: 202, # '\x88'
+ 137: 203, # '\x89'
+ 138: 204, # '\x8a'
+ 139: 205, # '\x8b'
+ 140: 206, # '\x8c'
+ 141: 207, # '\x8d'
+ 142: 208, # '\x8e'
+ 143: 209, # '\x8f'
+ 144: 210, # '\x90'
+ 145: 211, # '\x91'
+ 146: 212, # '\x92'
+ 147: 213, # '\x93'
+ 148: 214, # '\x94'
+ 149: 215, # '\x95'
+ 150: 216, # '\x96'
+ 151: 217, # '\x97'
+ 152: 218, # '\x98'
+ 153: 219, # '\x99'
+ 154: 220, # '\x9a'
+ 155: 221, # '\x9b'
+ 156: 222, # '\x9c'
+ 157: 223, # '\x9d'
+ 158: 224, # '\x9e'
+ 159: 225, # '\x9f'
+ 160: 81, # '\xa0'
+ 161: 226, # 'Ё'
+ 162: 227, # 'Ђ'
+ 163: 228, # 'Ѓ'
+ 164: 229, # 'Є'
+ 165: 230, # 'Ѕ'
+ 166: 105, # 'І'
+ 167: 231, # 'Ї'
+ 168: 232, # 'Ј'
+ 169: 233, # 'Љ'
+ 170: 234, # 'Њ'
+ 171: 235, # 'Ћ'
+ 172: 236, # 'Ќ'
+ 173: 45, # '\xad'
+ 174: 237, # 'Ў'
+ 175: 238, # 'Џ'
+ 176: 31, # 'А'
+ 177: 32, # 'Б'
+ 178: 35, # 'В'
+ 179: 43, # 'Г'
+ 180: 37, # 'Д'
+ 181: 44, # 'Е'
+ 182: 55, # 'Ж'
+ 183: 47, # 'З'
+ 184: 40, # 'И'
+ 185: 59, # 'Й'
+ 186: 33, # 'К'
+ 187: 46, # 'Л'
+ 188: 38, # 'М'
+ 189: 36, # 'Н'
+ 190: 41, # 'О'
+ 191: 30, # 'П'
+ 192: 39, # 'Р'
+ 193: 28, # 'С'
+ 194: 34, # 'Т'
+ 195: 51, # 'У'
+ 196: 48, # 'Ф'
+ 197: 49, # 'Х'
+ 198: 53, # 'Ц'
+ 199: 50, # 'Ч'
+ 200: 54, # 'Ш'
+ 201: 57, # 'Щ'
+ 202: 61, # 'Ъ'
+ 203: 239, # 'Ы'
+ 204: 67, # 'Ь'
+ 205: 240, # 'Э'
+ 206: 60, # 'Ю'
+ 207: 56, # 'Я'
+ 208: 1, # 'а'
+ 209: 18, # 'б'
+ 210: 9, # 'в'
+ 211: 20, # 'г'
+ 212: 11, # 'д'
+ 213: 3, # 'е'
+ 214: 23, # 'ж'
+ 215: 15, # 'з'
+ 216: 2, # 'и'
+ 217: 26, # 'й'
+ 218: 12, # 'к'
+ 219: 10, # 'л'
+ 220: 14, # 'м'
+ 221: 6, # 'н'
+ 222: 4, # 'о'
+ 223: 13, # 'п'
+ 224: 7, # 'р'
+ 225: 8, # 'с'
+ 226: 5, # 'т'
+ 227: 19, # 'у'
+ 228: 29, # 'ф'
+ 229: 25, # 'х'
+ 230: 22, # 'ц'
+ 231: 21, # 'ч'
+ 232: 27, # 'ш'
+ 233: 24, # 'щ'
+ 234: 17, # 'ъ'
+ 235: 75, # 'ы'
+ 236: 52, # 'ь'
+ 237: 241, # 'э'
+ 238: 42, # 'ю'
+ 239: 16, # 'я'
+ 240: 62, # '№'
+ 241: 242, # 'ё'
+ 242: 243, # 'ђ'
+ 243: 244, # 'ѓ'
+ 244: 58, # 'є'
+ 245: 245, # 'ѕ'
+ 246: 98, # 'і'
+ 247: 246, # 'ї'
+ 248: 247, # 'ј'
+ 249: 248, # 'љ'
+ 250: 249, # 'њ'
+ 251: 250, # 'ћ'
+ 252: 251, # 'ќ'
+ 253: 91, # '§'
+ 254: 252, # 'ў'
+ 255: 253, # 'џ'
+}
+
+ISO_8859_5_BULGARIAN_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-5',
+ language='Bulgarian',
+ char_to_order_map=ISO_8859_5_BULGARIAN_CHAR_TO_ORDER,
+ language_model=BULGARIAN_LANG_MODEL,
+ typical_positive_ratio=0.969392,
+ keep_ascii_letters=False,
+ alphabet='АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя')
+
+WINDOWS_1251_BULGARIAN_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 77, # 'A'
+ 66: 90, # 'B'
+ 67: 99, # 'C'
+ 68: 100, # 'D'
+ 69: 72, # 'E'
+ 70: 109, # 'F'
+ 71: 107, # 'G'
+ 72: 101, # 'H'
+ 73: 79, # 'I'
+ 74: 185, # 'J'
+ 75: 81, # 'K'
+ 76: 102, # 'L'
+ 77: 76, # 'M'
+ 78: 94, # 'N'
+ 79: 82, # 'O'
+ 80: 110, # 'P'
+ 81: 186, # 'Q'
+ 82: 108, # 'R'
+ 83: 91, # 'S'
+ 84: 74, # 'T'
+ 85: 119, # 'U'
+ 86: 84, # 'V'
+ 87: 96, # 'W'
+ 88: 111, # 'X'
+ 89: 187, # 'Y'
+ 90: 115, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 65, # 'a'
+ 98: 69, # 'b'
+ 99: 70, # 'c'
+ 100: 66, # 'd'
+ 101: 63, # 'e'
+ 102: 68, # 'f'
+ 103: 112, # 'g'
+ 104: 103, # 'h'
+ 105: 92, # 'i'
+ 106: 194, # 'j'
+ 107: 104, # 'k'
+ 108: 95, # 'l'
+ 109: 86, # 'm'
+ 110: 87, # 'n'
+ 111: 71, # 'o'
+ 112: 116, # 'p'
+ 113: 195, # 'q'
+ 114: 85, # 'r'
+ 115: 93, # 's'
+ 116: 97, # 't'
+ 117: 113, # 'u'
+ 118: 196, # 'v'
+ 119: 197, # 'w'
+ 120: 198, # 'x'
+ 121: 199, # 'y'
+ 122: 200, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 206, # 'Ђ'
+ 129: 207, # 'Ѓ'
+ 130: 208, # '‚'
+ 131: 209, # 'ѓ'
+ 132: 210, # '„'
+ 133: 211, # '…'
+ 134: 212, # '†'
+ 135: 213, # '‡'
+ 136: 120, # '€'
+ 137: 214, # '‰'
+ 138: 215, # 'Љ'
+ 139: 216, # '‹'
+ 140: 217, # 'Њ'
+ 141: 218, # 'Ќ'
+ 142: 219, # 'Ћ'
+ 143: 220, # 'Џ'
+ 144: 221, # 'ђ'
+ 145: 78, # '‘'
+ 146: 64, # '’'
+ 147: 83, # '“'
+ 148: 121, # '”'
+ 149: 98, # '•'
+ 150: 117, # '–'
+ 151: 105, # '—'
+ 152: 222, # None
+ 153: 223, # '™'
+ 154: 224, # 'љ'
+ 155: 225, # '›'
+ 156: 226, # 'њ'
+ 157: 227, # 'ќ'
+ 158: 228, # 'ћ'
+ 159: 229, # 'џ'
+ 160: 88, # '\xa0'
+ 161: 230, # 'Ў'
+ 162: 231, # 'ў'
+ 163: 232, # 'Ј'
+ 164: 233, # '¤'
+ 165: 122, # 'Ґ'
+ 166: 89, # '¦'
+ 167: 106, # '§'
+ 168: 234, # 'Ё'
+ 169: 235, # '©'
+ 170: 236, # 'Є'
+ 171: 237, # '«'
+ 172: 238, # '¬'
+ 173: 45, # '\xad'
+ 174: 239, # '®'
+ 175: 240, # 'Ї'
+ 176: 73, # '°'
+ 177: 80, # '±'
+ 178: 118, # 'І'
+ 179: 114, # 'і'
+ 180: 241, # 'ґ'
+ 181: 242, # 'µ'
+ 182: 243, # '¶'
+ 183: 244, # '·'
+ 184: 245, # 'ё'
+ 185: 62, # '№'
+ 186: 58, # 'є'
+ 187: 246, # '»'
+ 188: 247, # 'ј'
+ 189: 248, # 'Ѕ'
+ 190: 249, # 'ѕ'
+ 191: 250, # 'ї'
+ 192: 31, # 'А'
+ 193: 32, # 'Б'
+ 194: 35, # 'В'
+ 195: 43, # 'Г'
+ 196: 37, # 'Д'
+ 197: 44, # 'Е'
+ 198: 55, # 'Ж'
+ 199: 47, # 'З'
+ 200: 40, # 'И'
+ 201: 59, # 'Й'
+ 202: 33, # 'К'
+ 203: 46, # 'Л'
+ 204: 38, # 'М'
+ 205: 36, # 'Н'
+ 206: 41, # 'О'
+ 207: 30, # 'П'
+ 208: 39, # 'Р'
+ 209: 28, # 'С'
+ 210: 34, # 'Т'
+ 211: 51, # 'У'
+ 212: 48, # 'Ф'
+ 213: 49, # 'Х'
+ 214: 53, # 'Ц'
+ 215: 50, # 'Ч'
+ 216: 54, # 'Ш'
+ 217: 57, # 'Щ'
+ 218: 61, # 'Ъ'
+ 219: 251, # 'Ы'
+ 220: 67, # 'Ь'
+ 221: 252, # 'Э'
+ 222: 60, # 'Ю'
+ 223: 56, # 'Я'
+ 224: 1, # 'а'
+ 225: 18, # 'б'
+ 226: 9, # 'в'
+ 227: 20, # 'г'
+ 228: 11, # 'д'
+ 229: 3, # 'е'
+ 230: 23, # 'ж'
+ 231: 15, # 'з'
+ 232: 2, # 'и'
+ 233: 26, # 'й'
+ 234: 12, # 'к'
+ 235: 10, # 'л'
+ 236: 14, # 'м'
+ 237: 6, # 'н'
+ 238: 4, # 'о'
+ 239: 13, # 'п'
+ 240: 7, # 'р'
+ 241: 8, # 'с'
+ 242: 5, # 'т'
+ 243: 19, # 'у'
+ 244: 29, # 'ф'
+ 245: 25, # 'х'
+ 246: 22, # 'ц'
+ 247: 21, # 'ч'
+ 248: 27, # 'ш'
+ 249: 24, # 'щ'
+ 250: 17, # 'ъ'
+ 251: 75, # 'ы'
+ 252: 52, # 'ь'
+ 253: 253, # 'э'
+ 254: 42, # 'ю'
+ 255: 16, # 'я'
+}
+
+WINDOWS_1251_BULGARIAN_MODEL = SingleByteCharSetModel(charset_name='windows-1251',
+ language='Bulgarian',
+ char_to_order_map=WINDOWS_1251_BULGARIAN_CHAR_TO_ORDER,
+ language_model=BULGARIAN_LANG_MODEL,
+ typical_positive_ratio=0.969392,
+ keep_ascii_letters=False,
+ alphabet='АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя')
+
diff --git a/third_party/python/chardet/chardet/langgreekmodel.py b/third_party/python/chardet/chardet/langgreekmodel.py
new file mode 100644
index 0000000000..02b94de655
--- /dev/null
+++ b/third_party/python/chardet/chardet/langgreekmodel.py
@@ -0,0 +1,4398 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from chardet.sbcharsetprober import SingleByteCharSetModel
+
+
+# 3: Positive
+# 2: Likely
+# 1: Unlikely
+# 0: Negative
+
+GREEK_LANG_MODEL = {
+ 60: { # 'e'
+ 60: 2, # 'e'
+ 55: 1, # 'o'
+ 58: 2, # 't'
+ 36: 1, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 1, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 55: { # 'o'
+ 60: 0, # 'e'
+ 55: 2, # 'o'
+ 58: 2, # 't'
+ 36: 1, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 1, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 1, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 58: { # 't'
+ 60: 2, # 'e'
+ 55: 1, # 'o'
+ 58: 1, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 1, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 36: { # '·'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 61: { # 'Ά'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 1, # 'γ'
+ 21: 2, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 1, # 'π'
+ 8: 2, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 46: { # 'Έ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 2, # 'β'
+ 20: 2, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 2, # 'κ'
+ 16: 2, # 'λ'
+ 10: 0, # 'μ'
+ 6: 3, # 'ν'
+ 30: 2, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 2, # 'π'
+ 8: 2, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 1, # 'σ'
+ 2: 2, # 'τ'
+ 12: 0, # 'υ'
+ 28: 2, # 'φ'
+ 23: 3, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 54: { # 'Ό'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 2, # 'μ'
+ 6: 2, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 2, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 2, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 2, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 31: { # 'Α'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 2, # 'Β'
+ 43: 2, # 'Γ'
+ 41: 1, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 2, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 2, # 'Κ'
+ 53: 2, # 'Λ'
+ 38: 2, # 'Μ'
+ 49: 2, # 'Ν'
+ 59: 1, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 2, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 2, # 'Σ'
+ 33: 2, # 'Τ'
+ 45: 2, # 'Υ'
+ 56: 2, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 2, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 1, # 'θ'
+ 5: 0, # 'ι'
+ 11: 2, # 'κ'
+ 16: 3, # 'λ'
+ 10: 2, # 'μ'
+ 6: 3, # 'ν'
+ 30: 2, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 2, # 'ς'
+ 7: 2, # 'σ'
+ 2: 0, # 'τ'
+ 12: 3, # 'υ'
+ 28: 2, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 51: { # 'Β'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 1, # 'Ε'
+ 40: 1, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 1, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 1, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 2, # 'έ'
+ 22: 2, # 'ή'
+ 15: 0, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 2, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 2, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 0, # 'π'
+ 8: 2, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 43: { # 'Γ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 1, # 'Α'
+ 51: 0, # 'Β'
+ 43: 2, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 1, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 1, # 'Κ'
+ 53: 1, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 1, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 2, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 1, # 'Χ'
+ 57: 2, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 2, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 2, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 0, # 'μ'
+ 6: 2, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 0, # 'π'
+ 8: 2, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 41: { # 'Δ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 2, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 2, # 'ή'
+ 15: 2, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 2, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 0, # 'π'
+ 8: 2, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 2, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 1, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 34: { # 'Ε'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 0, # 'Β'
+ 43: 2, # 'Γ'
+ 41: 2, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 2, # 'Κ'
+ 53: 2, # 'Λ'
+ 38: 2, # 'Μ'
+ 49: 2, # 'Ν'
+ 59: 1, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 2, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 2, # 'Σ'
+ 33: 2, # 'Τ'
+ 45: 2, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 2, # 'Χ'
+ 57: 2, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 3, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 3, # 'γ'
+ 21: 2, # 'δ'
+ 3: 1, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 1, # 'θ'
+ 5: 2, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 2, # 'μ'
+ 6: 3, # 'ν'
+ 30: 2, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 3, # 'π'
+ 8: 2, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 2, # 'σ'
+ 2: 2, # 'τ'
+ 12: 2, # 'υ'
+ 28: 2, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 1, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 40: { # 'Η'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 1, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 2, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 2, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 2, # 'Μ'
+ 49: 2, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 2, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 2, # 'Σ'
+ 33: 2, # 'Τ'
+ 45: 1, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 0, # 'μ'
+ 6: 1, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 1, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 52: { # 'Θ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 1, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 1, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 2, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 2, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 2, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 47: { # 'Ι'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 1, # 'Β'
+ 43: 1, # 'Γ'
+ 41: 2, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 2, # 'Κ'
+ 53: 2, # 'Λ'
+ 38: 2, # 'Μ'
+ 49: 2, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 2, # 'Σ'
+ 33: 2, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 2, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 2, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 2, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 1, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 2, # 'σ'
+ 2: 1, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 1, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 44: { # 'Κ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 1, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 1, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 1, # 'Τ'
+ 45: 2, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 1, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 2, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 2, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 0, # 'π'
+ 8: 2, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 2, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 2, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 53: { # 'Λ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 2, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 2, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 2, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 2, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 2, # 'έ'
+ 22: 0, # 'ή'
+ 15: 2, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 2, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 1, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 2, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 2, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 38: { # 'Μ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 2, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 2, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 2, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 2, # 'έ'
+ 22: 2, # 'ή'
+ 15: 2, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 2, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 3, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 2, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 2, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 49: { # 'Ν'
+ 60: 2, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 2, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 2, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 2, # 'έ'
+ 22: 0, # 'ή'
+ 15: 2, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 1, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 1, # 'ω'
+ 19: 2, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 59: { # 'Ξ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 1, # 'Ε'
+ 40: 1, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 1, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 2, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 2, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 39: { # 'Ο'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 1, # 'Β'
+ 43: 2, # 'Γ'
+ 41: 2, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 1, # 'Η'
+ 52: 2, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 2, # 'Κ'
+ 53: 2, # 'Λ'
+ 38: 2, # 'Μ'
+ 49: 2, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 2, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 2, # 'Σ'
+ 33: 2, # 'Τ'
+ 45: 2, # 'Υ'
+ 56: 2, # 'Φ'
+ 50: 2, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 2, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 2, # 'κ'
+ 16: 2, # 'λ'
+ 10: 2, # 'μ'
+ 6: 2, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 2, # 'π'
+ 8: 2, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 2, # 'τ'
+ 12: 2, # 'υ'
+ 28: 1, # 'φ'
+ 23: 1, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 35: { # 'Π'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 2, # 'Λ'
+ 38: 1, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 1, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 1, # 'Χ'
+ 57: 2, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 1, # 'έ'
+ 22: 1, # 'ή'
+ 15: 2, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 2, # 'η'
+ 25: 0, # 'θ'
+ 5: 2, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 0, # 'μ'
+ 6: 2, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 3, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 2, # 'υ'
+ 28: 0, # 'φ'
+ 23: 2, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 2, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 48: { # 'Ρ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 0, # 'Β'
+ 43: 1, # 'Γ'
+ 41: 1, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 2, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 1, # 'Τ'
+ 45: 1, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 1, # 'Χ'
+ 57: 1, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 2, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 1, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 3, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 0, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 37: { # 'Σ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 1, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 2, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 2, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 2, # 'Σ'
+ 33: 2, # 'Τ'
+ 45: 2, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 2, # 'Χ'
+ 57: 2, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 2, # 'ή'
+ 15: 2, # 'ί'
+ 1: 2, # 'α'
+ 29: 2, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 0, # 'θ'
+ 5: 2, # 'ι'
+ 11: 2, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 2, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 0, # 'φ'
+ 23: 2, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 0, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 33: { # 'Τ'
+ 60: 0, # 'e'
+ 55: 1, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 2, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 1, # 'Τ'
+ 45: 1, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 2, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 2, # 'έ'
+ 22: 0, # 'ή'
+ 15: 2, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 2, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 2, # 'η'
+ 25: 0, # 'θ'
+ 5: 2, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 2, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 2, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 2, # 'σ'
+ 2: 0, # 'τ'
+ 12: 2, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 2, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 45: { # 'Υ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 2, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 1, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 2, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 1, # 'Λ'
+ 38: 2, # 'Μ'
+ 49: 2, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 2, # 'Π'
+ 48: 1, # 'Ρ'
+ 37: 2, # 'Σ'
+ 33: 2, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 1, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 3, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 56: { # 'Φ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 1, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 1, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 2, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 2, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 2, # 'τ'
+ 12: 2, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 1, # 'ύ'
+ 27: 1, # 'ώ'
+ },
+ 50: { # 'Χ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 1, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 1, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 1, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 1, # 'Χ'
+ 57: 1, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 2, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 0, # 'π'
+ 8: 3, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 2, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 57: { # 'Ω'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 1, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 1, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 2, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 2, # 'Σ'
+ 33: 2, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 0, # 'π'
+ 8: 2, # 'ρ'
+ 14: 2, # 'ς'
+ 7: 2, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 1, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 17: { # 'ά'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 2, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 3, # 'β'
+ 20: 3, # 'γ'
+ 21: 3, # 'δ'
+ 3: 3, # 'ε'
+ 32: 3, # 'ζ'
+ 13: 0, # 'η'
+ 25: 3, # 'θ'
+ 5: 2, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 3, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 3, # 'φ'
+ 23: 3, # 'χ'
+ 42: 3, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 18: { # 'έ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 3, # 'α'
+ 29: 2, # 'β'
+ 20: 3, # 'γ'
+ 21: 2, # 'δ'
+ 3: 3, # 'ε'
+ 32: 2, # 'ζ'
+ 13: 0, # 'η'
+ 25: 3, # 'θ'
+ 5: 0, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 3, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 3, # 'φ'
+ 23: 3, # 'χ'
+ 42: 3, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 22: { # 'ή'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 1, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 3, # 'γ'
+ 21: 3, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 3, # 'θ'
+ 5: 0, # 'ι'
+ 11: 3, # 'κ'
+ 16: 2, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 2, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 2, # 'φ'
+ 23: 3, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 15: { # 'ί'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 3, # 'α'
+ 29: 2, # 'β'
+ 20: 3, # 'γ'
+ 21: 3, # 'δ'
+ 3: 3, # 'ε'
+ 32: 3, # 'ζ'
+ 13: 3, # 'η'
+ 25: 3, # 'θ'
+ 5: 0, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 3, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 1, # 'φ'
+ 23: 3, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 1: { # 'α'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 2, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 2, # 'έ'
+ 22: 0, # 'ή'
+ 15: 3, # 'ί'
+ 1: 0, # 'α'
+ 29: 3, # 'β'
+ 20: 3, # 'γ'
+ 21: 3, # 'δ'
+ 3: 2, # 'ε'
+ 32: 3, # 'ζ'
+ 13: 1, # 'η'
+ 25: 3, # 'θ'
+ 5: 3, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 3, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 3, # 'φ'
+ 23: 3, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 2, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 29: { # 'β'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 2, # 'έ'
+ 22: 3, # 'ή'
+ 15: 2, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 2, # 'γ'
+ 21: 2, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 2, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 3, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 3, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 2, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 20: { # 'γ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 3, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 3, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 3, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 2, # 'υ'
+ 28: 0, # 'φ'
+ 23: 3, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 21: { # 'δ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 3, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 3, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 3: { # 'ε'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 2, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 3, # 'ί'
+ 1: 2, # 'α'
+ 29: 3, # 'β'
+ 20: 3, # 'γ'
+ 21: 3, # 'δ'
+ 3: 2, # 'ε'
+ 32: 2, # 'ζ'
+ 13: 0, # 'η'
+ 25: 3, # 'θ'
+ 5: 3, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 3, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 3, # 'φ'
+ 23: 3, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 2, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 32: { # 'ζ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 2, # 'έ'
+ 22: 2, # 'ή'
+ 15: 2, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 0, # 'θ'
+ 5: 2, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 1, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 2, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 13: { # 'η'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 2, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 3, # 'γ'
+ 21: 2, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 3, # 'θ'
+ 5: 0, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 2, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 2, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 2, # 'φ'
+ 23: 3, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 25: { # 'θ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 2, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 1, # 'λ'
+ 10: 3, # 'μ'
+ 6: 2, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 3, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 3, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 5: { # 'ι'
+ 60: 0, # 'e'
+ 55: 1, # 'o'
+ 58: 0, # 't'
+ 36: 2, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 1, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 0, # 'ί'
+ 1: 3, # 'α'
+ 29: 3, # 'β'
+ 20: 3, # 'γ'
+ 21: 3, # 'δ'
+ 3: 3, # 'ε'
+ 32: 2, # 'ζ'
+ 13: 3, # 'η'
+ 25: 3, # 'θ'
+ 5: 0, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 3, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 2, # 'φ'
+ 23: 3, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 11: { # 'κ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 3, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 2, # 'θ'
+ 5: 3, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 2, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 2, # 'π'
+ 8: 3, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 2, # 'φ'
+ 23: 2, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 16: { # 'λ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 1, # 'β'
+ 20: 2, # 'γ'
+ 21: 1, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 2, # 'θ'
+ 5: 3, # 'ι'
+ 11: 2, # 'κ'
+ 16: 3, # 'λ'
+ 10: 2, # 'μ'
+ 6: 2, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 3, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 2, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 10: { # 'μ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 1, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 3, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 3, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 2, # 'υ'
+ 28: 3, # 'φ'
+ 23: 0, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 6: { # 'ν'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 2, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 3, # 'δ'
+ 3: 3, # 'ε'
+ 32: 2, # 'ζ'
+ 13: 3, # 'η'
+ 25: 3, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 1, # 'λ'
+ 10: 0, # 'μ'
+ 6: 2, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 30: { # 'ξ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 2, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 0, # 'θ'
+ 5: 2, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 3, # 'τ'
+ 12: 2, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 2, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 1, # 'ώ'
+ },
+ 4: { # 'ο'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 2, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 2, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 2, # 'α'
+ 29: 3, # 'β'
+ 20: 3, # 'γ'
+ 21: 3, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 3, # 'θ'
+ 5: 3, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 2, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 3, # 'φ'
+ 23: 3, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 1, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 9: { # 'π'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 3, # 'λ'
+ 10: 0, # 'μ'
+ 6: 2, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 3, # 'ρ'
+ 14: 2, # 'ς'
+ 7: 0, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 0, # 'φ'
+ 23: 2, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 8: { # 'ρ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 2, # 'β'
+ 20: 3, # 'γ'
+ 21: 2, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 3, # 'θ'
+ 5: 3, # 'ι'
+ 11: 3, # 'κ'
+ 16: 1, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 2, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 2, # 'π'
+ 8: 2, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 2, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 3, # 'φ'
+ 23: 3, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 14: { # 'ς'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 2, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 7: { # 'σ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 2, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 3, # 'β'
+ 20: 0, # 'γ'
+ 21: 2, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 3, # 'θ'
+ 5: 3, # 'ι'
+ 11: 3, # 'κ'
+ 16: 2, # 'λ'
+ 10: 3, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 3, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 3, # 'φ'
+ 23: 3, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 2: { # 'τ'
+ 60: 0, # 'e'
+ 55: 2, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 2, # 'ζ'
+ 13: 3, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 2, # 'κ'
+ 16: 2, # 'λ'
+ 10: 3, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 3, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 2, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 12: { # 'υ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 2, # 'έ'
+ 22: 3, # 'ή'
+ 15: 2, # 'ί'
+ 1: 3, # 'α'
+ 29: 2, # 'β'
+ 20: 3, # 'γ'
+ 21: 2, # 'δ'
+ 3: 2, # 'ε'
+ 32: 2, # 'ζ'
+ 13: 2, # 'η'
+ 25: 3, # 'θ'
+ 5: 2, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 3, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 2, # 'φ'
+ 23: 3, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 2, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 28: { # 'φ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 2, # 'η'
+ 25: 2, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 0, # 'μ'
+ 6: 1, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 3, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 1, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 23: { # 'χ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 2, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 2, # 'η'
+ 25: 2, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 2, # 'μ'
+ 6: 3, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 3, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 0, # 'φ'
+ 23: 2, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 42: { # 'ψ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 2, # 'έ'
+ 22: 1, # 'ή'
+ 15: 2, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 0, # 'θ'
+ 5: 2, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 2, # 'τ'
+ 12: 1, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 24: { # 'ω'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 1, # 'ά'
+ 18: 0, # 'έ'
+ 22: 2, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 2, # 'β'
+ 20: 3, # 'γ'
+ 21: 2, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 3, # 'θ'
+ 5: 2, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 2, # 'φ'
+ 23: 2, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 19: { # 'ό'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 3, # 'β'
+ 20: 3, # 'γ'
+ 21: 3, # 'δ'
+ 3: 1, # 'ε'
+ 32: 2, # 'ζ'
+ 13: 2, # 'η'
+ 25: 2, # 'θ'
+ 5: 2, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 1, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 2, # 'φ'
+ 23: 3, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 26: { # 'ύ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 2, # 'α'
+ 29: 2, # 'β'
+ 20: 2, # 'γ'
+ 21: 1, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 2, # 'η'
+ 25: 3, # 'θ'
+ 5: 0, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 2, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 2, # 'φ'
+ 23: 2, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 27: { # 'ώ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 1, # 'β'
+ 20: 0, # 'γ'
+ 21: 3, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 1, # 'η'
+ 25: 2, # 'θ'
+ 5: 2, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 1, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 2, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 1, # 'φ'
+ 23: 1, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+}
+
+# 255: Undefined characters that did not exist in training text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+# 251: Control characters
+
+# Character Mapping Table(s):
+WINDOWS_1253_GREEK_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 82, # 'A'
+ 66: 100, # 'B'
+ 67: 104, # 'C'
+ 68: 94, # 'D'
+ 69: 98, # 'E'
+ 70: 101, # 'F'
+ 71: 116, # 'G'
+ 72: 102, # 'H'
+ 73: 111, # 'I'
+ 74: 187, # 'J'
+ 75: 117, # 'K'
+ 76: 92, # 'L'
+ 77: 88, # 'M'
+ 78: 113, # 'N'
+ 79: 85, # 'O'
+ 80: 79, # 'P'
+ 81: 118, # 'Q'
+ 82: 105, # 'R'
+ 83: 83, # 'S'
+ 84: 67, # 'T'
+ 85: 114, # 'U'
+ 86: 119, # 'V'
+ 87: 95, # 'W'
+ 88: 99, # 'X'
+ 89: 109, # 'Y'
+ 90: 188, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 72, # 'a'
+ 98: 70, # 'b'
+ 99: 80, # 'c'
+ 100: 81, # 'd'
+ 101: 60, # 'e'
+ 102: 96, # 'f'
+ 103: 93, # 'g'
+ 104: 89, # 'h'
+ 105: 68, # 'i'
+ 106: 120, # 'j'
+ 107: 97, # 'k'
+ 108: 77, # 'l'
+ 109: 86, # 'm'
+ 110: 69, # 'n'
+ 111: 55, # 'o'
+ 112: 78, # 'p'
+ 113: 115, # 'q'
+ 114: 65, # 'r'
+ 115: 66, # 's'
+ 116: 58, # 't'
+ 117: 76, # 'u'
+ 118: 106, # 'v'
+ 119: 103, # 'w'
+ 120: 87, # 'x'
+ 121: 107, # 'y'
+ 122: 112, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 255, # '€'
+ 129: 255, # None
+ 130: 255, # '‚'
+ 131: 255, # 'ƒ'
+ 132: 255, # '„'
+ 133: 255, # '…'
+ 134: 255, # '†'
+ 135: 255, # '‡'
+ 136: 255, # None
+ 137: 255, # '‰'
+ 138: 255, # None
+ 139: 255, # '‹'
+ 140: 255, # None
+ 141: 255, # None
+ 142: 255, # None
+ 143: 255, # None
+ 144: 255, # None
+ 145: 255, # '‘'
+ 146: 255, # '’'
+ 147: 255, # '“'
+ 148: 255, # '”'
+ 149: 255, # '•'
+ 150: 255, # '–'
+ 151: 255, # '—'
+ 152: 255, # None
+ 153: 255, # '™'
+ 154: 255, # None
+ 155: 255, # '›'
+ 156: 255, # None
+ 157: 255, # None
+ 158: 255, # None
+ 159: 255, # None
+ 160: 253, # '\xa0'
+ 161: 233, # '΅'
+ 162: 61, # 'Ά'
+ 163: 253, # '£'
+ 164: 253, # '¤'
+ 165: 253, # '¥'
+ 166: 253, # '¦'
+ 167: 253, # '§'
+ 168: 253, # '¨'
+ 169: 253, # '©'
+ 170: 253, # None
+ 171: 253, # '«'
+ 172: 253, # '¬'
+ 173: 74, # '\xad'
+ 174: 253, # '®'
+ 175: 253, # '―'
+ 176: 253, # '°'
+ 177: 253, # '±'
+ 178: 253, # '²'
+ 179: 253, # '³'
+ 180: 247, # '΄'
+ 181: 253, # 'µ'
+ 182: 253, # '¶'
+ 183: 36, # '·'
+ 184: 46, # 'Έ'
+ 185: 71, # 'Ή'
+ 186: 73, # 'Ί'
+ 187: 253, # '»'
+ 188: 54, # 'Ό'
+ 189: 253, # '½'
+ 190: 108, # 'Ύ'
+ 191: 123, # 'Ώ'
+ 192: 110, # 'ΐ'
+ 193: 31, # 'Α'
+ 194: 51, # 'Β'
+ 195: 43, # 'Γ'
+ 196: 41, # 'Δ'
+ 197: 34, # 'Ε'
+ 198: 91, # 'Ζ'
+ 199: 40, # 'Η'
+ 200: 52, # 'Θ'
+ 201: 47, # 'Ι'
+ 202: 44, # 'Κ'
+ 203: 53, # 'Λ'
+ 204: 38, # 'Μ'
+ 205: 49, # 'Ν'
+ 206: 59, # 'Ξ'
+ 207: 39, # 'Ο'
+ 208: 35, # 'Π'
+ 209: 48, # 'Ρ'
+ 210: 250, # None
+ 211: 37, # 'Σ'
+ 212: 33, # 'Τ'
+ 213: 45, # 'Υ'
+ 214: 56, # 'Φ'
+ 215: 50, # 'Χ'
+ 216: 84, # 'Ψ'
+ 217: 57, # 'Ω'
+ 218: 120, # 'Ϊ'
+ 219: 121, # 'Ϋ'
+ 220: 17, # 'ά'
+ 221: 18, # 'έ'
+ 222: 22, # 'ή'
+ 223: 15, # 'ί'
+ 224: 124, # 'ΰ'
+ 225: 1, # 'α'
+ 226: 29, # 'β'
+ 227: 20, # 'γ'
+ 228: 21, # 'δ'
+ 229: 3, # 'ε'
+ 230: 32, # 'ζ'
+ 231: 13, # 'η'
+ 232: 25, # 'θ'
+ 233: 5, # 'ι'
+ 234: 11, # 'κ'
+ 235: 16, # 'λ'
+ 236: 10, # 'μ'
+ 237: 6, # 'ν'
+ 238: 30, # 'ξ'
+ 239: 4, # 'ο'
+ 240: 9, # 'π'
+ 241: 8, # 'ρ'
+ 242: 14, # 'ς'
+ 243: 7, # 'σ'
+ 244: 2, # 'τ'
+ 245: 12, # 'υ'
+ 246: 28, # 'φ'
+ 247: 23, # 'χ'
+ 248: 42, # 'ψ'
+ 249: 24, # 'ω'
+ 250: 64, # 'ϊ'
+ 251: 75, # 'ϋ'
+ 252: 19, # 'ό'
+ 253: 26, # 'ύ'
+ 254: 27, # 'ώ'
+ 255: 253, # None
+}
+
+WINDOWS_1253_GREEK_MODEL = SingleByteCharSetModel(charset_name='windows-1253',
+ language='Greek',
+ char_to_order_map=WINDOWS_1253_GREEK_CHAR_TO_ORDER,
+ language_model=GREEK_LANG_MODEL,
+ typical_positive_ratio=0.982851,
+ keep_ascii_letters=False,
+ alphabet='ΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίαβγδεζηθικλμνξοπρςστυφχψωόύώ')
+
+ISO_8859_7_GREEK_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 82, # 'A'
+ 66: 100, # 'B'
+ 67: 104, # 'C'
+ 68: 94, # 'D'
+ 69: 98, # 'E'
+ 70: 101, # 'F'
+ 71: 116, # 'G'
+ 72: 102, # 'H'
+ 73: 111, # 'I'
+ 74: 187, # 'J'
+ 75: 117, # 'K'
+ 76: 92, # 'L'
+ 77: 88, # 'M'
+ 78: 113, # 'N'
+ 79: 85, # 'O'
+ 80: 79, # 'P'
+ 81: 118, # 'Q'
+ 82: 105, # 'R'
+ 83: 83, # 'S'
+ 84: 67, # 'T'
+ 85: 114, # 'U'
+ 86: 119, # 'V'
+ 87: 95, # 'W'
+ 88: 99, # 'X'
+ 89: 109, # 'Y'
+ 90: 188, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 72, # 'a'
+ 98: 70, # 'b'
+ 99: 80, # 'c'
+ 100: 81, # 'd'
+ 101: 60, # 'e'
+ 102: 96, # 'f'
+ 103: 93, # 'g'
+ 104: 89, # 'h'
+ 105: 68, # 'i'
+ 106: 120, # 'j'
+ 107: 97, # 'k'
+ 108: 77, # 'l'
+ 109: 86, # 'm'
+ 110: 69, # 'n'
+ 111: 55, # 'o'
+ 112: 78, # 'p'
+ 113: 115, # 'q'
+ 114: 65, # 'r'
+ 115: 66, # 's'
+ 116: 58, # 't'
+ 117: 76, # 'u'
+ 118: 106, # 'v'
+ 119: 103, # 'w'
+ 120: 87, # 'x'
+ 121: 107, # 'y'
+ 122: 112, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 255, # '\x80'
+ 129: 255, # '\x81'
+ 130: 255, # '\x82'
+ 131: 255, # '\x83'
+ 132: 255, # '\x84'
+ 133: 255, # '\x85'
+ 134: 255, # '\x86'
+ 135: 255, # '\x87'
+ 136: 255, # '\x88'
+ 137: 255, # '\x89'
+ 138: 255, # '\x8a'
+ 139: 255, # '\x8b'
+ 140: 255, # '\x8c'
+ 141: 255, # '\x8d'
+ 142: 255, # '\x8e'
+ 143: 255, # '\x8f'
+ 144: 255, # '\x90'
+ 145: 255, # '\x91'
+ 146: 255, # '\x92'
+ 147: 255, # '\x93'
+ 148: 255, # '\x94'
+ 149: 255, # '\x95'
+ 150: 255, # '\x96'
+ 151: 255, # '\x97'
+ 152: 255, # '\x98'
+ 153: 255, # '\x99'
+ 154: 255, # '\x9a'
+ 155: 255, # '\x9b'
+ 156: 255, # '\x9c'
+ 157: 255, # '\x9d'
+ 158: 255, # '\x9e'
+ 159: 255, # '\x9f'
+ 160: 253, # '\xa0'
+ 161: 233, # '‘'
+ 162: 90, # '’'
+ 163: 253, # '£'
+ 164: 253, # '€'
+ 165: 253, # '₯'
+ 166: 253, # '¦'
+ 167: 253, # '§'
+ 168: 253, # '¨'
+ 169: 253, # '©'
+ 170: 253, # 'ͺ'
+ 171: 253, # '«'
+ 172: 253, # '¬'
+ 173: 74, # '\xad'
+ 174: 253, # None
+ 175: 253, # '―'
+ 176: 253, # '°'
+ 177: 253, # '±'
+ 178: 253, # '²'
+ 179: 253, # '³'
+ 180: 247, # '΄'
+ 181: 248, # '΅'
+ 182: 61, # 'Ά'
+ 183: 36, # '·'
+ 184: 46, # 'Έ'
+ 185: 71, # 'Ή'
+ 186: 73, # 'Ί'
+ 187: 253, # '»'
+ 188: 54, # 'Ό'
+ 189: 253, # '½'
+ 190: 108, # 'Ύ'
+ 191: 123, # 'Ώ'
+ 192: 110, # 'ΐ'
+ 193: 31, # 'Α'
+ 194: 51, # 'Β'
+ 195: 43, # 'Γ'
+ 196: 41, # 'Δ'
+ 197: 34, # 'Ε'
+ 198: 91, # 'Ζ'
+ 199: 40, # 'Η'
+ 200: 52, # 'Θ'
+ 201: 47, # 'Ι'
+ 202: 44, # 'Κ'
+ 203: 53, # 'Λ'
+ 204: 38, # 'Μ'
+ 205: 49, # 'Ν'
+ 206: 59, # 'Ξ'
+ 207: 39, # 'Ο'
+ 208: 35, # 'Π'
+ 209: 48, # 'Ρ'
+ 210: 250, # None
+ 211: 37, # 'Σ'
+ 212: 33, # 'Τ'
+ 213: 45, # 'Υ'
+ 214: 56, # 'Φ'
+ 215: 50, # 'Χ'
+ 216: 84, # 'Ψ'
+ 217: 57, # 'Ω'
+ 218: 120, # 'Ϊ'
+ 219: 121, # 'Ϋ'
+ 220: 17, # 'ά'
+ 221: 18, # 'έ'
+ 222: 22, # 'ή'
+ 223: 15, # 'ί'
+ 224: 124, # 'ΰ'
+ 225: 1, # 'α'
+ 226: 29, # 'β'
+ 227: 20, # 'γ'
+ 228: 21, # 'δ'
+ 229: 3, # 'ε'
+ 230: 32, # 'ζ'
+ 231: 13, # 'η'
+ 232: 25, # 'θ'
+ 233: 5, # 'ι'
+ 234: 11, # 'κ'
+ 235: 16, # 'λ'
+ 236: 10, # 'μ'
+ 237: 6, # 'ν'
+ 238: 30, # 'ξ'
+ 239: 4, # 'ο'
+ 240: 9, # 'π'
+ 241: 8, # 'ρ'
+ 242: 14, # 'ς'
+ 243: 7, # 'σ'
+ 244: 2, # 'τ'
+ 245: 12, # 'υ'
+ 246: 28, # 'φ'
+ 247: 23, # 'χ'
+ 248: 42, # 'ψ'
+ 249: 24, # 'ω'
+ 250: 64, # 'ϊ'
+ 251: 75, # 'ϋ'
+ 252: 19, # 'ό'
+ 253: 26, # 'ύ'
+ 254: 27, # 'ώ'
+ 255: 253, # None
+}
+
+ISO_8859_7_GREEK_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-7',
+ language='Greek',
+ char_to_order_map=ISO_8859_7_GREEK_CHAR_TO_ORDER,
+ language_model=GREEK_LANG_MODEL,
+ typical_positive_ratio=0.982851,
+ keep_ascii_letters=False,
+ alphabet='ΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίαβγδεζηθικλμνξοπρςστυφχψωόύώ')
+
diff --git a/third_party/python/chardet/chardet/langhebrewmodel.py b/third_party/python/chardet/chardet/langhebrewmodel.py
new file mode 100644
index 0000000000..40fd674c4a
--- /dev/null
+++ b/third_party/python/chardet/chardet/langhebrewmodel.py
@@ -0,0 +1,4383 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from chardet.sbcharsetprober import SingleByteCharSetModel
+
+
+# 3: Positive
+# 2: Likely
+# 1: Unlikely
+# 0: Negative
+
+HEBREW_LANG_MODEL = {
+ 50: { # 'a'
+ 50: 0, # 'a'
+ 60: 1, # 'c'
+ 61: 1, # 'd'
+ 42: 1, # 'e'
+ 53: 1, # 'i'
+ 56: 2, # 'l'
+ 54: 2, # 'n'
+ 49: 0, # 'o'
+ 51: 2, # 'r'
+ 43: 1, # 's'
+ 44: 2, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 1, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 1, # 'ק'
+ 7: 0, # 'ר'
+ 10: 1, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 60: { # 'c'
+ 50: 1, # 'a'
+ 60: 1, # 'c'
+ 61: 0, # 'd'
+ 42: 1, # 'e'
+ 53: 1, # 'i'
+ 56: 1, # 'l'
+ 54: 0, # 'n'
+ 49: 1, # 'o'
+ 51: 1, # 'r'
+ 43: 1, # 's'
+ 44: 2, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 1, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 1, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 61: { # 'd'
+ 50: 1, # 'a'
+ 60: 0, # 'c'
+ 61: 1, # 'd'
+ 42: 1, # 'e'
+ 53: 1, # 'i'
+ 56: 1, # 'l'
+ 54: 1, # 'n'
+ 49: 2, # 'o'
+ 51: 1, # 'r'
+ 43: 1, # 's'
+ 44: 0, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 1, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 1, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 42: { # 'e'
+ 50: 1, # 'a'
+ 60: 1, # 'c'
+ 61: 2, # 'd'
+ 42: 1, # 'e'
+ 53: 1, # 'i'
+ 56: 2, # 'l'
+ 54: 2, # 'n'
+ 49: 1, # 'o'
+ 51: 2, # 'r'
+ 43: 2, # 's'
+ 44: 2, # 't'
+ 63: 1, # 'u'
+ 34: 1, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 1, # '–'
+ 52: 2, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 53: { # 'i'
+ 50: 1, # 'a'
+ 60: 2, # 'c'
+ 61: 1, # 'd'
+ 42: 1, # 'e'
+ 53: 0, # 'i'
+ 56: 1, # 'l'
+ 54: 2, # 'n'
+ 49: 2, # 'o'
+ 51: 1, # 'r'
+ 43: 2, # 's'
+ 44: 2, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 56: { # 'l'
+ 50: 1, # 'a'
+ 60: 1, # 'c'
+ 61: 1, # 'd'
+ 42: 2, # 'e'
+ 53: 2, # 'i'
+ 56: 2, # 'l'
+ 54: 1, # 'n'
+ 49: 1, # 'o'
+ 51: 0, # 'r'
+ 43: 1, # 's'
+ 44: 1, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 54: { # 'n'
+ 50: 1, # 'a'
+ 60: 1, # 'c'
+ 61: 1, # 'd'
+ 42: 1, # 'e'
+ 53: 1, # 'i'
+ 56: 1, # 'l'
+ 54: 1, # 'n'
+ 49: 1, # 'o'
+ 51: 0, # 'r'
+ 43: 1, # 's'
+ 44: 2, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 1, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 2, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 49: { # 'o'
+ 50: 1, # 'a'
+ 60: 1, # 'c'
+ 61: 1, # 'd'
+ 42: 1, # 'e'
+ 53: 1, # 'i'
+ 56: 1, # 'l'
+ 54: 2, # 'n'
+ 49: 1, # 'o'
+ 51: 2, # 'r'
+ 43: 1, # 's'
+ 44: 1, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 51: { # 'r'
+ 50: 2, # 'a'
+ 60: 1, # 'c'
+ 61: 1, # 'd'
+ 42: 2, # 'e'
+ 53: 1, # 'i'
+ 56: 1, # 'l'
+ 54: 1, # 'n'
+ 49: 2, # 'o'
+ 51: 1, # 'r'
+ 43: 1, # 's'
+ 44: 1, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 2, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 43: { # 's'
+ 50: 1, # 'a'
+ 60: 1, # 'c'
+ 61: 0, # 'd'
+ 42: 2, # 'e'
+ 53: 1, # 'i'
+ 56: 1, # 'l'
+ 54: 1, # 'n'
+ 49: 1, # 'o'
+ 51: 1, # 'r'
+ 43: 1, # 's'
+ 44: 2, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 2, # '”'
+ 58: 0, # '†'
+ 40: 2, # '…'
+ },
+ 44: { # 't'
+ 50: 1, # 'a'
+ 60: 1, # 'c'
+ 61: 0, # 'd'
+ 42: 2, # 'e'
+ 53: 2, # 'i'
+ 56: 1, # 'l'
+ 54: 0, # 'n'
+ 49: 1, # 'o'
+ 51: 1, # 'r'
+ 43: 1, # 's'
+ 44: 1, # 't'
+ 63: 1, # 'u'
+ 34: 1, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 2, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 63: { # 'u'
+ 50: 1, # 'a'
+ 60: 1, # 'c'
+ 61: 1, # 'd'
+ 42: 1, # 'e'
+ 53: 1, # 'i'
+ 56: 1, # 'l'
+ 54: 1, # 'n'
+ 49: 0, # 'o'
+ 51: 1, # 'r'
+ 43: 2, # 's'
+ 44: 1, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 34: { # '\xa0'
+ 50: 1, # 'a'
+ 60: 0, # 'c'
+ 61: 1, # 'd'
+ 42: 0, # 'e'
+ 53: 1, # 'i'
+ 56: 0, # 'l'
+ 54: 1, # 'n'
+ 49: 1, # 'o'
+ 51: 0, # 'r'
+ 43: 1, # 's'
+ 44: 1, # 't'
+ 63: 0, # 'u'
+ 34: 2, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 1, # 'ב'
+ 20: 1, # 'ג'
+ 16: 1, # 'ד'
+ 3: 1, # 'ה'
+ 2: 1, # 'ו'
+ 24: 1, # 'ז'
+ 14: 1, # 'ח'
+ 22: 1, # 'ט'
+ 1: 2, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 2, # 'מ'
+ 23: 0, # 'ן'
+ 12: 1, # 'נ'
+ 19: 1, # 'ס'
+ 13: 1, # 'ע'
+ 26: 0, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 1, # 'ק'
+ 7: 1, # 'ר'
+ 10: 1, # 'ש'
+ 5: 1, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 55: { # '´'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 1, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 1, # 'ה'
+ 2: 1, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 2, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 1, # 'ן'
+ 12: 1, # 'נ'
+ 19: 1, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 1, # 'ר'
+ 10: 1, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 48: { # '¼'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 1, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 39: { # '½'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 1, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 57: { # '¾'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 30: { # 'ְ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 2, # 'ב'
+ 20: 2, # 'ג'
+ 16: 2, # 'ד'
+ 3: 2, # 'ה'
+ 2: 2, # 'ו'
+ 24: 2, # 'ז'
+ 14: 2, # 'ח'
+ 22: 2, # 'ט'
+ 1: 2, # 'י'
+ 25: 2, # 'ך'
+ 15: 2, # 'כ'
+ 4: 2, # 'ל'
+ 11: 1, # 'ם'
+ 6: 2, # 'מ'
+ 23: 0, # 'ן'
+ 12: 2, # 'נ'
+ 19: 2, # 'ס'
+ 13: 2, # 'ע'
+ 26: 0, # 'ף'
+ 18: 2, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 2, # 'ק'
+ 7: 2, # 'ר'
+ 10: 2, # 'ש'
+ 5: 2, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 59: { # 'ֱ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 1, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 1, # 'ב'
+ 20: 1, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 1, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 1, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 2, # 'ל'
+ 11: 0, # 'ם'
+ 6: 2, # 'מ'
+ 23: 0, # 'ן'
+ 12: 1, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 1, # 'ר'
+ 10: 1, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 41: { # 'ֲ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 2, # 'ב'
+ 20: 1, # 'ג'
+ 16: 2, # 'ד'
+ 3: 1, # 'ה'
+ 2: 1, # 'ו'
+ 24: 1, # 'ז'
+ 14: 1, # 'ח'
+ 22: 1, # 'ט'
+ 1: 1, # 'י'
+ 25: 1, # 'ך'
+ 15: 1, # 'כ'
+ 4: 2, # 'ל'
+ 11: 0, # 'ם'
+ 6: 2, # 'מ'
+ 23: 0, # 'ן'
+ 12: 2, # 'נ'
+ 19: 1, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 1, # 'ק'
+ 7: 2, # 'ר'
+ 10: 2, # 'ש'
+ 5: 1, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 33: { # 'ִ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 1, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 1, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 1, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 1, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 2, # 'ב'
+ 20: 2, # 'ג'
+ 16: 2, # 'ד'
+ 3: 1, # 'ה'
+ 2: 1, # 'ו'
+ 24: 2, # 'ז'
+ 14: 1, # 'ח'
+ 22: 1, # 'ט'
+ 1: 3, # 'י'
+ 25: 1, # 'ך'
+ 15: 2, # 'כ'
+ 4: 2, # 'ל'
+ 11: 2, # 'ם'
+ 6: 2, # 'מ'
+ 23: 2, # 'ן'
+ 12: 2, # 'נ'
+ 19: 2, # 'ס'
+ 13: 1, # 'ע'
+ 26: 0, # 'ף'
+ 18: 2, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 2, # 'ק'
+ 7: 2, # 'ר'
+ 10: 2, # 'ש'
+ 5: 2, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 37: { # 'ֵ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 1, # 'ַ'
+ 29: 1, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 2, # 'ב'
+ 20: 1, # 'ג'
+ 16: 2, # 'ד'
+ 3: 2, # 'ה'
+ 2: 1, # 'ו'
+ 24: 1, # 'ז'
+ 14: 2, # 'ח'
+ 22: 1, # 'ט'
+ 1: 3, # 'י'
+ 25: 2, # 'ך'
+ 15: 1, # 'כ'
+ 4: 2, # 'ל'
+ 11: 2, # 'ם'
+ 6: 1, # 'מ'
+ 23: 2, # 'ן'
+ 12: 2, # 'נ'
+ 19: 1, # 'ס'
+ 13: 2, # 'ע'
+ 26: 1, # 'ף'
+ 18: 1, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 1, # 'ק'
+ 7: 2, # 'ר'
+ 10: 2, # 'ש'
+ 5: 2, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 36: { # 'ֶ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 1, # 'ַ'
+ 29: 1, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 2, # 'ב'
+ 20: 1, # 'ג'
+ 16: 2, # 'ד'
+ 3: 2, # 'ה'
+ 2: 1, # 'ו'
+ 24: 1, # 'ז'
+ 14: 2, # 'ח'
+ 22: 1, # 'ט'
+ 1: 2, # 'י'
+ 25: 2, # 'ך'
+ 15: 1, # 'כ'
+ 4: 2, # 'ל'
+ 11: 2, # 'ם'
+ 6: 2, # 'מ'
+ 23: 2, # 'ן'
+ 12: 2, # 'נ'
+ 19: 2, # 'ס'
+ 13: 1, # 'ע'
+ 26: 1, # 'ף'
+ 18: 1, # 'פ'
+ 27: 2, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 1, # 'ק'
+ 7: 2, # 'ר'
+ 10: 2, # 'ש'
+ 5: 2, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 31: { # 'ַ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 1, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 2, # 'ב'
+ 20: 2, # 'ג'
+ 16: 2, # 'ד'
+ 3: 2, # 'ה'
+ 2: 1, # 'ו'
+ 24: 2, # 'ז'
+ 14: 2, # 'ח'
+ 22: 2, # 'ט'
+ 1: 3, # 'י'
+ 25: 1, # 'ך'
+ 15: 2, # 'כ'
+ 4: 2, # 'ל'
+ 11: 2, # 'ם'
+ 6: 2, # 'מ'
+ 23: 2, # 'ן'
+ 12: 2, # 'נ'
+ 19: 2, # 'ס'
+ 13: 2, # 'ע'
+ 26: 2, # 'ף'
+ 18: 2, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 2, # 'ק'
+ 7: 2, # 'ר'
+ 10: 2, # 'ש'
+ 5: 2, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 29: { # 'ָ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 1, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 1, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 2, # 'ב'
+ 20: 2, # 'ג'
+ 16: 2, # 'ד'
+ 3: 3, # 'ה'
+ 2: 2, # 'ו'
+ 24: 2, # 'ז'
+ 14: 2, # 'ח'
+ 22: 1, # 'ט'
+ 1: 2, # 'י'
+ 25: 2, # 'ך'
+ 15: 2, # 'כ'
+ 4: 2, # 'ל'
+ 11: 2, # 'ם'
+ 6: 2, # 'מ'
+ 23: 2, # 'ן'
+ 12: 2, # 'נ'
+ 19: 1, # 'ס'
+ 13: 2, # 'ע'
+ 26: 1, # 'ף'
+ 18: 2, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 2, # 'ק'
+ 7: 2, # 'ר'
+ 10: 2, # 'ש'
+ 5: 2, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 35: { # 'ֹ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 2, # 'ב'
+ 20: 1, # 'ג'
+ 16: 2, # 'ד'
+ 3: 2, # 'ה'
+ 2: 1, # 'ו'
+ 24: 1, # 'ז'
+ 14: 1, # 'ח'
+ 22: 1, # 'ט'
+ 1: 1, # 'י'
+ 25: 1, # 'ך'
+ 15: 2, # 'כ'
+ 4: 2, # 'ל'
+ 11: 2, # 'ם'
+ 6: 2, # 'מ'
+ 23: 2, # 'ן'
+ 12: 2, # 'נ'
+ 19: 2, # 'ס'
+ 13: 2, # 'ע'
+ 26: 1, # 'ף'
+ 18: 2, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 2, # 'ק'
+ 7: 2, # 'ר'
+ 10: 2, # 'ש'
+ 5: 2, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 62: { # 'ֻ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 1, # 'ב'
+ 20: 1, # 'ג'
+ 16: 1, # 'ד'
+ 3: 1, # 'ה'
+ 2: 1, # 'ו'
+ 24: 1, # 'ז'
+ 14: 1, # 'ח'
+ 22: 0, # 'ט'
+ 1: 1, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 2, # 'ל'
+ 11: 1, # 'ם'
+ 6: 1, # 'מ'
+ 23: 1, # 'ן'
+ 12: 1, # 'נ'
+ 19: 1, # 'ס'
+ 13: 1, # 'ע'
+ 26: 0, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 1, # 'ק'
+ 7: 1, # 'ר'
+ 10: 1, # 'ש'
+ 5: 1, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 28: { # 'ּ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 3, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 1, # 'ֲ'
+ 33: 3, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 3, # 'ַ'
+ 29: 3, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 2, # 'ׁ'
+ 45: 1, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 2, # 'ב'
+ 20: 1, # 'ג'
+ 16: 2, # 'ד'
+ 3: 1, # 'ה'
+ 2: 2, # 'ו'
+ 24: 1, # 'ז'
+ 14: 1, # 'ח'
+ 22: 1, # 'ט'
+ 1: 2, # 'י'
+ 25: 2, # 'ך'
+ 15: 2, # 'כ'
+ 4: 2, # 'ל'
+ 11: 1, # 'ם'
+ 6: 2, # 'מ'
+ 23: 1, # 'ן'
+ 12: 2, # 'נ'
+ 19: 1, # 'ס'
+ 13: 2, # 'ע'
+ 26: 1, # 'ף'
+ 18: 1, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 1, # 'ק'
+ 7: 2, # 'ר'
+ 10: 2, # 'ש'
+ 5: 2, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 38: { # 'ׁ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 2, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 1, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 1, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 45: { # 'ׂ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 1, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 1, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 0, # 'ב'
+ 20: 1, # 'ג'
+ 16: 0, # 'ד'
+ 3: 1, # 'ה'
+ 2: 2, # 'ו'
+ 24: 0, # 'ז'
+ 14: 1, # 'ח'
+ 22: 0, # 'ט'
+ 1: 1, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 1, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 1, # 'נ'
+ 19: 0, # 'ס'
+ 13: 1, # 'ע'
+ 26: 0, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 1, # 'ר'
+ 10: 0, # 'ש'
+ 5: 1, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 9: { # 'א'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 1, # '´'
+ 48: 1, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 2, # 'ֱ'
+ 41: 2, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 3, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 2, # 'ע'
+ 26: 3, # 'ף'
+ 18: 3, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 8: { # 'ב'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 1, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 3, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 2, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 2, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 3, # 'ע'
+ 26: 1, # 'ף'
+ 18: 3, # 'פ'
+ 27: 2, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 1, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 20: { # 'ג'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 2, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 1, # 'ִ'
+ 37: 1, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 3, # 'ב'
+ 20: 2, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 2, # 'ח'
+ 22: 2, # 'ט'
+ 1: 3, # 'י'
+ 25: 1, # 'ך'
+ 15: 1, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 2, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 2, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 1, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 16: { # 'ד'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 1, # 'ז'
+ 14: 2, # 'ח'
+ 22: 2, # 'ט'
+ 1: 3, # 'י'
+ 25: 2, # 'ך'
+ 15: 2, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 2, # 'ן'
+ 12: 3, # 'נ'
+ 19: 2, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 3, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 3: { # 'ה'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 1, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 0, # '´'
+ 48: 1, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 1, # 'ְ'
+ 59: 1, # 'ֱ'
+ 41: 2, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 3, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 1, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 3, # 'ע'
+ 26: 0, # 'ף'
+ 18: 3, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 1, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 2, # '…'
+ },
+ 2: { # 'ו'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 1, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 1, # '´'
+ 48: 1, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 1, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 3, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 3, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 3, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 3, # 'ע'
+ 26: 3, # 'ף'
+ 18: 3, # 'פ'
+ 27: 3, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 1, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 2, # '…'
+ },
+ 24: { # 'ז'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 1, # 'ֲ'
+ 33: 1, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 2, # 'ב'
+ 20: 2, # 'ג'
+ 16: 2, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 2, # 'ז'
+ 14: 2, # 'ח'
+ 22: 1, # 'ט'
+ 1: 3, # 'י'
+ 25: 1, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 2, # 'ם'
+ 6: 3, # 'מ'
+ 23: 2, # 'ן'
+ 12: 2, # 'נ'
+ 19: 1, # 'ס'
+ 13: 2, # 'ע'
+ 26: 1, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 1, # 'ש'
+ 5: 2, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 14: { # 'ח'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 1, # 'ֱ'
+ 41: 2, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 3, # 'ב'
+ 20: 2, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 2, # 'ח'
+ 22: 2, # 'ט'
+ 1: 3, # 'י'
+ 25: 1, # 'ך'
+ 15: 2, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 2, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 1, # 'ע'
+ 26: 2, # 'ף'
+ 18: 2, # 'פ'
+ 27: 2, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 22: { # 'ט'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 1, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 1, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 1, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 1, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 2, # 'ז'
+ 14: 3, # 'ח'
+ 22: 2, # 'ט'
+ 1: 3, # 'י'
+ 25: 1, # 'ך'
+ 15: 2, # 'כ'
+ 4: 3, # 'ל'
+ 11: 2, # 'ם'
+ 6: 2, # 'מ'
+ 23: 2, # 'ן'
+ 12: 3, # 'נ'
+ 19: 2, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 3, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 2, # 'ק'
+ 7: 3, # 'ר'
+ 10: 2, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 1: { # 'י'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 1, # '´'
+ 48: 1, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 3, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 3, # 'ע'
+ 26: 3, # 'ף'
+ 18: 3, # 'פ'
+ 27: 3, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 1, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 2, # '…'
+ },
+ 25: { # 'ך'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 1, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 1, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 1, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 1, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 15: { # 'כ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 3, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 2, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 3, # 'ח'
+ 22: 2, # 'ט'
+ 1: 3, # 'י'
+ 25: 3, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 2, # 'ע'
+ 26: 3, # 'ף'
+ 18: 3, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 2, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 4: { # 'ל'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 3, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 3, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 2, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 3, # 'פ'
+ 27: 2, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 1, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 11: { # 'ם'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 1, # 'ב'
+ 20: 1, # 'ג'
+ 16: 0, # 'ד'
+ 3: 1, # 'ה'
+ 2: 1, # 'ו'
+ 24: 1, # 'ז'
+ 14: 1, # 'ח'
+ 22: 0, # 'ט'
+ 1: 1, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 1, # 'ל'
+ 11: 1, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 1, # 'נ'
+ 19: 0, # 'ס'
+ 13: 1, # 'ע'
+ 26: 0, # 'ף'
+ 18: 1, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 1, # 'ק'
+ 7: 1, # 'ר'
+ 10: 1, # 'ש'
+ 5: 1, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 2, # '…'
+ },
+ 6: { # 'מ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 2, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 3, # 'ע'
+ 26: 0, # 'ף'
+ 18: 3, # 'פ'
+ 27: 2, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 23: { # 'ן'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 0, # '´'
+ 48: 1, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 1, # 'ב'
+ 20: 1, # 'ג'
+ 16: 1, # 'ד'
+ 3: 1, # 'ה'
+ 2: 1, # 'ו'
+ 24: 0, # 'ז'
+ 14: 1, # 'ח'
+ 22: 1, # 'ט'
+ 1: 1, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 1, # 'ל'
+ 11: 1, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 1, # 'נ'
+ 19: 1, # 'ס'
+ 13: 1, # 'ע'
+ 26: 1, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 1, # 'ק'
+ 7: 1, # 'ר'
+ 10: 1, # 'ש'
+ 5: 1, # 'ת'
+ 32: 1, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 2, # '…'
+ },
+ 12: { # 'נ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 2, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 3, # 'פ'
+ 27: 2, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 19: { # 'ס'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 1, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 1, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 2, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 1, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 2, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 2, # 'ם'
+ 6: 3, # 'מ'
+ 23: 2, # 'ן'
+ 12: 3, # 'נ'
+ 19: 2, # 'ס'
+ 13: 3, # 'ע'
+ 26: 3, # 'ף'
+ 18: 3, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 1, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 13: { # 'ע'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 1, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 1, # 'ְ'
+ 59: 1, # 'ֱ'
+ 41: 2, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 1, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 2, # 'ך'
+ 15: 2, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 2, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 2, # 'ע'
+ 26: 1, # 'ף'
+ 18: 2, # 'פ'
+ 27: 2, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 26: { # 'ף'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 1, # 'ו'
+ 24: 0, # 'ז'
+ 14: 1, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 1, # 'ס'
+ 13: 0, # 'ע'
+ 26: 1, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 1, # 'ק'
+ 7: 1, # 'ר'
+ 10: 1, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 18: { # 'פ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 1, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 1, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 2, # 'ב'
+ 20: 3, # 'ג'
+ 16: 2, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 2, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 2, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 2, # 'ם'
+ 6: 2, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 2, # 'פ'
+ 27: 2, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 27: { # 'ץ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 1, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 1, # 'ר'
+ 10: 0, # 'ש'
+ 5: 1, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 21: { # 'צ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 2, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 1, # 'ז'
+ 14: 3, # 'ח'
+ 22: 2, # 'ט'
+ 1: 3, # 'י'
+ 25: 1, # 'ך'
+ 15: 1, # 'כ'
+ 4: 3, # 'ל'
+ 11: 2, # 'ם'
+ 6: 3, # 'מ'
+ 23: 2, # 'ן'
+ 12: 3, # 'נ'
+ 19: 1, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 3, # 'פ'
+ 27: 2, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 0, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 17: { # 'ק'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 2, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 2, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 1, # 'ך'
+ 15: 1, # 'כ'
+ 4: 3, # 'ל'
+ 11: 2, # 'ם'
+ 6: 3, # 'מ'
+ 23: 2, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 3, # 'פ'
+ 27: 2, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 2, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 7: { # 'ר'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 2, # '´'
+ 48: 1, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 1, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 3, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 3, # 'פ'
+ 27: 3, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 2, # '…'
+ },
+ 10: { # 'ש'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 1, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 1, # 'ִ'
+ 37: 1, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 1, # 'ַ'
+ 29: 1, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 3, # 'ׁ'
+ 45: 2, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 2, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 3, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 2, # 'ן'
+ 12: 3, # 'נ'
+ 19: 2, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 3, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 5: { # 'ת'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 0, # '´'
+ 48: 1, # '¼'
+ 39: 1, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 2, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 2, # 'ז'
+ 14: 3, # 'ח'
+ 22: 2, # 'ט'
+ 1: 3, # 'י'
+ 25: 2, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 2, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 3, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 1, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 2, # '…'
+ },
+ 32: { # '–'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 1, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 1, # 'ב'
+ 20: 1, # 'ג'
+ 16: 1, # 'ד'
+ 3: 1, # 'ה'
+ 2: 1, # 'ו'
+ 24: 0, # 'ז'
+ 14: 1, # 'ח'
+ 22: 0, # 'ט'
+ 1: 1, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 1, # 'ס'
+ 13: 1, # 'ע'
+ 26: 0, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 0, # 'ק'
+ 7: 1, # 'ר'
+ 10: 1, # 'ש'
+ 5: 1, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 52: { # '’'
+ 50: 1, # 'a'
+ 60: 0, # 'c'
+ 61: 1, # 'd'
+ 42: 1, # 'e'
+ 53: 1, # 'i'
+ 56: 1, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 1, # 'r'
+ 43: 2, # 's'
+ 44: 2, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 1, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 1, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 47: { # '“'
+ 50: 1, # 'a'
+ 60: 1, # 'c'
+ 61: 1, # 'd'
+ 42: 1, # 'e'
+ 53: 1, # 'i'
+ 56: 1, # 'l'
+ 54: 1, # 'n'
+ 49: 1, # 'o'
+ 51: 1, # 'r'
+ 43: 1, # 's'
+ 44: 1, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 1, # 'ב'
+ 20: 1, # 'ג'
+ 16: 1, # 'ד'
+ 3: 1, # 'ה'
+ 2: 1, # 'ו'
+ 24: 1, # 'ז'
+ 14: 1, # 'ח'
+ 22: 1, # 'ט'
+ 1: 1, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 1, # 'נ'
+ 19: 1, # 'ס'
+ 13: 1, # 'ע'
+ 26: 0, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 1, # 'ק'
+ 7: 1, # 'ר'
+ 10: 1, # 'ש'
+ 5: 1, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 46: { # '”'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 1, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 1, # 'ב'
+ 20: 1, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 1, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 0, # 'ק'
+ 7: 1, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 58: { # '†'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 2, # '†'
+ 40: 0, # '…'
+ },
+ 40: { # '…'
+ 50: 1, # 'a'
+ 60: 1, # 'c'
+ 61: 1, # 'd'
+ 42: 1, # 'e'
+ 53: 1, # 'i'
+ 56: 0, # 'l'
+ 54: 1, # 'n'
+ 49: 0, # 'o'
+ 51: 1, # 'r'
+ 43: 1, # 's'
+ 44: 1, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 1, # 'ה'
+ 2: 1, # 'ו'
+ 24: 1, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 1, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 1, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 1, # 'ר'
+ 10: 1, # 'ש'
+ 5: 1, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 2, # '…'
+ },
+}
+
+# 255: Undefined characters that did not exist in training text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+# 251: Control characters
+
+# Character Mapping Table(s):
+WINDOWS_1255_HEBREW_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 69, # 'A'
+ 66: 91, # 'B'
+ 67: 79, # 'C'
+ 68: 80, # 'D'
+ 69: 92, # 'E'
+ 70: 89, # 'F'
+ 71: 97, # 'G'
+ 72: 90, # 'H'
+ 73: 68, # 'I'
+ 74: 111, # 'J'
+ 75: 112, # 'K'
+ 76: 82, # 'L'
+ 77: 73, # 'M'
+ 78: 95, # 'N'
+ 79: 85, # 'O'
+ 80: 78, # 'P'
+ 81: 121, # 'Q'
+ 82: 86, # 'R'
+ 83: 71, # 'S'
+ 84: 67, # 'T'
+ 85: 102, # 'U'
+ 86: 107, # 'V'
+ 87: 84, # 'W'
+ 88: 114, # 'X'
+ 89: 103, # 'Y'
+ 90: 115, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 50, # 'a'
+ 98: 74, # 'b'
+ 99: 60, # 'c'
+ 100: 61, # 'd'
+ 101: 42, # 'e'
+ 102: 76, # 'f'
+ 103: 70, # 'g'
+ 104: 64, # 'h'
+ 105: 53, # 'i'
+ 106: 105, # 'j'
+ 107: 93, # 'k'
+ 108: 56, # 'l'
+ 109: 65, # 'm'
+ 110: 54, # 'n'
+ 111: 49, # 'o'
+ 112: 66, # 'p'
+ 113: 110, # 'q'
+ 114: 51, # 'r'
+ 115: 43, # 's'
+ 116: 44, # 't'
+ 117: 63, # 'u'
+ 118: 81, # 'v'
+ 119: 77, # 'w'
+ 120: 98, # 'x'
+ 121: 75, # 'y'
+ 122: 108, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 124, # '€'
+ 129: 202, # None
+ 130: 203, # '‚'
+ 131: 204, # 'ƒ'
+ 132: 205, # '„'
+ 133: 40, # '…'
+ 134: 58, # '†'
+ 135: 206, # '‡'
+ 136: 207, # 'ˆ'
+ 137: 208, # '‰'
+ 138: 209, # None
+ 139: 210, # '‹'
+ 140: 211, # None
+ 141: 212, # None
+ 142: 213, # None
+ 143: 214, # None
+ 144: 215, # None
+ 145: 83, # '‘'
+ 146: 52, # '’'
+ 147: 47, # '“'
+ 148: 46, # '”'
+ 149: 72, # '•'
+ 150: 32, # '–'
+ 151: 94, # '—'
+ 152: 216, # '˜'
+ 153: 113, # '™'
+ 154: 217, # None
+ 155: 109, # '›'
+ 156: 218, # None
+ 157: 219, # None
+ 158: 220, # None
+ 159: 221, # None
+ 160: 34, # '\xa0'
+ 161: 116, # '¡'
+ 162: 222, # '¢'
+ 163: 118, # '£'
+ 164: 100, # '₪'
+ 165: 223, # '¥'
+ 166: 224, # '¦'
+ 167: 117, # '§'
+ 168: 119, # '¨'
+ 169: 104, # '©'
+ 170: 125, # '×'
+ 171: 225, # '«'
+ 172: 226, # '¬'
+ 173: 87, # '\xad'
+ 174: 99, # '®'
+ 175: 227, # '¯'
+ 176: 106, # '°'
+ 177: 122, # '±'
+ 178: 123, # '²'
+ 179: 228, # '³'
+ 180: 55, # '´'
+ 181: 229, # 'µ'
+ 182: 230, # '¶'
+ 183: 101, # '·'
+ 184: 231, # '¸'
+ 185: 232, # '¹'
+ 186: 120, # '÷'
+ 187: 233, # '»'
+ 188: 48, # '¼'
+ 189: 39, # '½'
+ 190: 57, # '¾'
+ 191: 234, # '¿'
+ 192: 30, # 'ְ'
+ 193: 59, # 'ֱ'
+ 194: 41, # 'ֲ'
+ 195: 88, # 'ֳ'
+ 196: 33, # 'ִ'
+ 197: 37, # 'ֵ'
+ 198: 36, # 'ֶ'
+ 199: 31, # 'ַ'
+ 200: 29, # 'ָ'
+ 201: 35, # 'ֹ'
+ 202: 235, # None
+ 203: 62, # 'ֻ'
+ 204: 28, # 'ּ'
+ 205: 236, # 'ֽ'
+ 206: 126, # '־'
+ 207: 237, # 'ֿ'
+ 208: 238, # '׀'
+ 209: 38, # 'ׁ'
+ 210: 45, # 'ׂ'
+ 211: 239, # '׃'
+ 212: 240, # 'װ'
+ 213: 241, # 'ױ'
+ 214: 242, # 'ײ'
+ 215: 243, # '׳'
+ 216: 127, # '״'
+ 217: 244, # None
+ 218: 245, # None
+ 219: 246, # None
+ 220: 247, # None
+ 221: 248, # None
+ 222: 249, # None
+ 223: 250, # None
+ 224: 9, # 'א'
+ 225: 8, # 'ב'
+ 226: 20, # 'ג'
+ 227: 16, # 'ד'
+ 228: 3, # 'ה'
+ 229: 2, # 'ו'
+ 230: 24, # 'ז'
+ 231: 14, # 'ח'
+ 232: 22, # 'ט'
+ 233: 1, # 'י'
+ 234: 25, # 'ך'
+ 235: 15, # 'כ'
+ 236: 4, # 'ל'
+ 237: 11, # 'ם'
+ 238: 6, # 'מ'
+ 239: 23, # 'ן'
+ 240: 12, # 'נ'
+ 241: 19, # 'ס'
+ 242: 13, # 'ע'
+ 243: 26, # 'ף'
+ 244: 18, # 'פ'
+ 245: 27, # 'ץ'
+ 246: 21, # 'צ'
+ 247: 17, # 'ק'
+ 248: 7, # 'ר'
+ 249: 10, # 'ש'
+ 250: 5, # 'ת'
+ 251: 251, # None
+ 252: 252, # None
+ 253: 128, # '\u200e'
+ 254: 96, # '\u200f'
+ 255: 253, # None
+}
+
+WINDOWS_1255_HEBREW_MODEL = SingleByteCharSetModel(charset_name='windows-1255',
+ language='Hebrew',
+ char_to_order_map=WINDOWS_1255_HEBREW_CHAR_TO_ORDER,
+ language_model=HEBREW_LANG_MODEL,
+ typical_positive_ratio=0.984004,
+ keep_ascii_letters=False,
+ alphabet='אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ')
+
diff --git a/third_party/python/chardet/chardet/langhungarianmodel.py b/third_party/python/chardet/chardet/langhungarianmodel.py
new file mode 100644
index 0000000000..24a097f520
--- /dev/null
+++ b/third_party/python/chardet/chardet/langhungarianmodel.py
@@ -0,0 +1,4650 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from chardet.sbcharsetprober import SingleByteCharSetModel
+
+
+# 3: Positive
+# 2: Likely
+# 1: Unlikely
+# 0: Negative
+
+HUNGARIAN_LANG_MODEL = {
+ 28: { # 'A'
+ 28: 0, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 2, # 'D'
+ 32: 1, # 'E'
+ 50: 1, # 'F'
+ 49: 2, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 2, # 'K'
+ 41: 2, # 'L'
+ 34: 1, # 'M'
+ 35: 2, # 'N'
+ 47: 1, # 'O'
+ 46: 2, # 'P'
+ 43: 2, # 'R'
+ 33: 2, # 'S'
+ 37: 2, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 2, # 'Z'
+ 2: 0, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 2, # 'd'
+ 1: 1, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 1, # 'h'
+ 9: 1, # 'i'
+ 22: 1, # 'j'
+ 7: 2, # 'k'
+ 6: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 2, # 'n'
+ 8: 0, # 'o'
+ 23: 2, # 'p'
+ 10: 2, # 'r'
+ 5: 1, # 's'
+ 3: 1, # 't'
+ 21: 1, # 'u'
+ 19: 1, # 'v'
+ 62: 1, # 'x'
+ 16: 0, # 'y'
+ 11: 3, # 'z'
+ 51: 1, # 'Á'
+ 44: 0, # 'É'
+ 61: 1, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 40: { # 'B'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 0, # 'M'
+ 35: 1, # 'N'
+ 47: 2, # 'O'
+ 46: 0, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 3, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 2, # 'i'
+ 22: 1, # 'j'
+ 7: 0, # 'k'
+ 6: 1, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 2, # 'o'
+ 23: 1, # 'p'
+ 10: 2, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 3, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 0, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 2, # 'á'
+ 15: 2, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 54: { # 'C'
+ 28: 1, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 1, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 0, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 2, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 0, # 'V'
+ 55: 1, # 'Y'
+ 52: 1, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 1, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 1, # 'h'
+ 9: 1, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 1, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 2, # 'o'
+ 23: 0, # 'p'
+ 10: 1, # 'r'
+ 5: 3, # 's'
+ 3: 0, # 't'
+ 21: 1, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 1, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 1, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 45: { # 'D'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 0, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 0, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 2, # 'O'
+ 46: 0, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 1, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 3, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 1, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 1, # 'o'
+ 23: 0, # 'p'
+ 10: 2, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 2, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 1, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 1, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 1, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 32: { # 'E'
+ 28: 1, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 1, # 'E'
+ 50: 1, # 'F'
+ 49: 2, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 2, # 'K'
+ 41: 2, # 'L'
+ 34: 2, # 'M'
+ 35: 2, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 2, # 'R'
+ 33: 2, # 'S'
+ 37: 2, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 1, # 'Z'
+ 2: 1, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 2, # 'd'
+ 1: 1, # 'e'
+ 27: 1, # 'f'
+ 12: 3, # 'g'
+ 20: 1, # 'h'
+ 9: 1, # 'i'
+ 22: 1, # 'j'
+ 7: 1, # 'k'
+ 6: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 2, # 'n'
+ 8: 0, # 'o'
+ 23: 1, # 'p'
+ 10: 2, # 'r'
+ 5: 2, # 's'
+ 3: 1, # 't'
+ 21: 2, # 'u'
+ 19: 1, # 'v'
+ 62: 1, # 'x'
+ 16: 0, # 'y'
+ 11: 3, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 0, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 1, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 50: { # 'F'
+ 28: 1, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 1, # 'E'
+ 50: 1, # 'F'
+ 49: 0, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 1, # 'O'
+ 46: 0, # 'P'
+ 43: 1, # 'R'
+ 33: 0, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 0, # 'V'
+ 55: 1, # 'Y'
+ 52: 0, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 2, # 'e'
+ 27: 1, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 2, # 'i'
+ 22: 1, # 'j'
+ 7: 0, # 'k'
+ 6: 1, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 2, # 'o'
+ 23: 0, # 'p'
+ 10: 2, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 1, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 0, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 0, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 1, # 'á'
+ 15: 1, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 2, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 49: { # 'G'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 2, # 'Y'
+ 52: 1, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 2, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 1, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 1, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 2, # 'o'
+ 23: 0, # 'p'
+ 10: 2, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 1, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 2, # 'y'
+ 11: 0, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 1, # 'á'
+ 15: 1, # 'é'
+ 30: 0, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 1, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 38: { # 'H'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 0, # 'D'
+ 32: 1, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 1, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 1, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 1, # 'O'
+ 46: 0, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 0, # 'V'
+ 55: 1, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 2, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 2, # 'i'
+ 22: 1, # 'j'
+ 7: 0, # 'k'
+ 6: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 0, # 'n'
+ 8: 3, # 'o'
+ 23: 0, # 'p'
+ 10: 1, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 2, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 0, # 'z'
+ 51: 2, # 'Á'
+ 44: 2, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 2, # 'á'
+ 15: 1, # 'é'
+ 30: 2, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 39: { # 'I'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 1, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 2, # 'K'
+ 41: 2, # 'L'
+ 34: 1, # 'M'
+ 35: 2, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 2, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 2, # 'Z'
+ 2: 0, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 2, # 'd'
+ 1: 0, # 'e'
+ 27: 1, # 'f'
+ 12: 2, # 'g'
+ 20: 1, # 'h'
+ 9: 0, # 'i'
+ 22: 1, # 'j'
+ 7: 1, # 'k'
+ 6: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 1, # 'n'
+ 8: 0, # 'o'
+ 23: 1, # 'p'
+ 10: 2, # 'r'
+ 5: 2, # 's'
+ 3: 2, # 't'
+ 21: 0, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 1, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 0, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 53: { # 'J'
+ 28: 2, # 'A'
+ 40: 0, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 1, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 1, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 2, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 1, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 1, # 'o'
+ 23: 0, # 'p'
+ 10: 0, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 2, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 0, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 0, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 2, # 'á'
+ 15: 1, # 'é'
+ 30: 0, # 'í'
+ 25: 2, # 'ó'
+ 24: 2, # 'ö'
+ 31: 1, # 'ú'
+ 29: 0, # 'ü'
+ 42: 1, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 36: { # 'K'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 0, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 2, # 'O'
+ 46: 0, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 0, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 2, # 'e'
+ 27: 1, # 'f'
+ 12: 0, # 'g'
+ 20: 1, # 'h'
+ 9: 3, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 8: 2, # 'o'
+ 23: 0, # 'p'
+ 10: 2, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 1, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 0, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 2, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 2, # 'á'
+ 15: 2, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 2, # 'ö'
+ 31: 1, # 'ú'
+ 29: 2, # 'ü'
+ 42: 1, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 41: { # 'L'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 2, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 2, # 'O'
+ 46: 0, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 2, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 1, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 3, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 2, # 'i'
+ 22: 1, # 'j'
+ 7: 0, # 'k'
+ 6: 1, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 2, # 'o'
+ 23: 0, # 'p'
+ 10: 0, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 2, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 0, # 'z'
+ 51: 2, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 2, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 0, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 34: { # 'M'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 0, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 1, # 'Z'
+ 2: 3, # 'a'
+ 18: 0, # 'b'
+ 26: 1, # 'c'
+ 17: 0, # 'd'
+ 1: 3, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 3, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 0, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 8: 3, # 'o'
+ 23: 0, # 'p'
+ 10: 1, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 2, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 0, # 'z'
+ 51: 2, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 2, # 'á'
+ 15: 2, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 35: { # 'N'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 2, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 2, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 2, # 'Y'
+ 52: 1, # 'Z'
+ 2: 3, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 3, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 2, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 1, # 'n'
+ 8: 2, # 'o'
+ 23: 0, # 'p'
+ 10: 0, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 1, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 2, # 'y'
+ 11: 0, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 1, # 'á'
+ 15: 2, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 1, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 47: { # 'O'
+ 28: 1, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 1, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 2, # 'K'
+ 41: 2, # 'L'
+ 34: 2, # 'M'
+ 35: 2, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 2, # 'R'
+ 33: 2, # 'S'
+ 37: 2, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 1, # 'Z'
+ 2: 0, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 1, # 'd'
+ 1: 1, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 1, # 'h'
+ 9: 1, # 'i'
+ 22: 1, # 'j'
+ 7: 2, # 'k'
+ 6: 2, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 8: 1, # 'o'
+ 23: 1, # 'p'
+ 10: 2, # 'r'
+ 5: 1, # 's'
+ 3: 2, # 't'
+ 21: 1, # 'u'
+ 19: 0, # 'v'
+ 62: 1, # 'x'
+ 16: 0, # 'y'
+ 11: 1, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 0, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 46: { # 'P'
+ 28: 1, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 1, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 0, # 'M'
+ 35: 1, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 2, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 1, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 2, # 'e'
+ 27: 1, # 'f'
+ 12: 0, # 'g'
+ 20: 1, # 'h'
+ 9: 2, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 1, # 'l'
+ 13: 0, # 'm'
+ 4: 1, # 'n'
+ 8: 2, # 'o'
+ 23: 0, # 'p'
+ 10: 2, # 'r'
+ 5: 1, # 's'
+ 3: 0, # 't'
+ 21: 1, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 0, # 'z'
+ 51: 2, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 3, # 'á'
+ 15: 2, # 'é'
+ 30: 0, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 0, # 'ú'
+ 29: 1, # 'ü'
+ 42: 1, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 43: { # 'R'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 2, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 2, # 'S'
+ 37: 2, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 1, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 2, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 1, # 'h'
+ 9: 2, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 2, # 'o'
+ 23: 0, # 'p'
+ 10: 0, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 1, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 0, # 'z'
+ 51: 2, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 2, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 2, # 'á'
+ 15: 2, # 'é'
+ 30: 1, # 'í'
+ 25: 2, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 33: { # 'S'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 2, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 2, # 'S'
+ 37: 2, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 3, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 1, # 'c'
+ 17: 0, # 'd'
+ 1: 2, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 1, # 'h'
+ 9: 2, # 'i'
+ 22: 0, # 'j'
+ 7: 1, # 'k'
+ 6: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 0, # 'n'
+ 8: 2, # 'o'
+ 23: 1, # 'p'
+ 10: 0, # 'r'
+ 5: 0, # 's'
+ 3: 1, # 't'
+ 21: 1, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 3, # 'z'
+ 51: 2, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 2, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 37: { # 'T'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 2, # 'O'
+ 46: 1, # 'P'
+ 43: 2, # 'R'
+ 33: 1, # 'S'
+ 37: 2, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 1, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 2, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 1, # 'h'
+ 9: 2, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 2, # 'o'
+ 23: 0, # 'p'
+ 10: 1, # 'r'
+ 5: 1, # 's'
+ 3: 0, # 't'
+ 21: 2, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 1, # 'z'
+ 51: 2, # 'Á'
+ 44: 2, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 2, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 2, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 57: { # 'U'
+ 28: 1, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 1, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 2, # 'S'
+ 37: 1, # 'T'
+ 57: 0, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 1, # 'Z'
+ 2: 0, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 1, # 'd'
+ 1: 1, # 'e'
+ 27: 0, # 'f'
+ 12: 2, # 'g'
+ 20: 0, # 'h'
+ 9: 0, # 'i'
+ 22: 1, # 'j'
+ 7: 1, # 'k'
+ 6: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 8: 0, # 'o'
+ 23: 1, # 'p'
+ 10: 1, # 'r'
+ 5: 1, # 's'
+ 3: 1, # 't'
+ 21: 0, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 1, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 1, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 48: { # 'V'
+ 28: 2, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 0, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 2, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 2, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 1, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 2, # 'o'
+ 23: 0, # 'p'
+ 10: 0, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 1, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 0, # 'z'
+ 51: 2, # 'Á'
+ 44: 2, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 2, # 'á'
+ 15: 2, # 'é'
+ 30: 1, # 'í'
+ 25: 0, # 'ó'
+ 24: 1, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 55: { # 'Y'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 2, # 'Z'
+ 2: 1, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 1, # 'd'
+ 1: 1, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 0, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 1, # 'o'
+ 23: 1, # 'p'
+ 10: 0, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 0, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 0, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 52: { # 'Z'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 0, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 2, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 2, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 1, # 'Z'
+ 2: 1, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 1, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 1, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 1, # 'n'
+ 8: 1, # 'o'
+ 23: 0, # 'p'
+ 10: 1, # 'r'
+ 5: 2, # 's'
+ 3: 0, # 't'
+ 21: 1, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 0, # 'z'
+ 51: 2, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 1, # 'á'
+ 15: 1, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 2: { # 'a'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 1, # 'a'
+ 18: 3, # 'b'
+ 26: 3, # 'c'
+ 17: 3, # 'd'
+ 1: 2, # 'e'
+ 27: 2, # 'f'
+ 12: 3, # 'g'
+ 20: 3, # 'h'
+ 9: 3, # 'i'
+ 22: 3, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 2, # 'o'
+ 23: 3, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 3, # 'v'
+ 62: 1, # 'x'
+ 16: 2, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 1, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 18: { # 'b'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 3, # 'b'
+ 26: 1, # 'c'
+ 17: 1, # 'd'
+ 1: 3, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 1, # 'h'
+ 9: 3, # 'i'
+ 22: 2, # 'j'
+ 7: 2, # 'k'
+ 6: 2, # 'l'
+ 13: 1, # 'm'
+ 4: 2, # 'n'
+ 8: 3, # 'o'
+ 23: 1, # 'p'
+ 10: 3, # 'r'
+ 5: 2, # 's'
+ 3: 1, # 't'
+ 21: 3, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 1, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 2, # 'í'
+ 25: 3, # 'ó'
+ 24: 2, # 'ö'
+ 31: 2, # 'ú'
+ 29: 2, # 'ü'
+ 42: 2, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 26: { # 'c'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 1, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 1, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 2, # 'a'
+ 18: 1, # 'b'
+ 26: 2, # 'c'
+ 17: 1, # 'd'
+ 1: 3, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 3, # 'h'
+ 9: 3, # 'i'
+ 22: 1, # 'j'
+ 7: 2, # 'k'
+ 6: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 8: 3, # 'o'
+ 23: 1, # 'p'
+ 10: 2, # 'r'
+ 5: 3, # 's'
+ 3: 2, # 't'
+ 21: 2, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 2, # 'á'
+ 15: 2, # 'é'
+ 30: 2, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 17: { # 'd'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 2, # 'b'
+ 26: 1, # 'c'
+ 17: 2, # 'd'
+ 1: 3, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 2, # 'h'
+ 9: 3, # 'i'
+ 22: 3, # 'j'
+ 7: 2, # 'k'
+ 6: 1, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 8: 3, # 'o'
+ 23: 1, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 3, # 'v'
+ 62: 0, # 'x'
+ 16: 2, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 3, # 'í'
+ 25: 3, # 'ó'
+ 24: 3, # 'ö'
+ 31: 2, # 'ú'
+ 29: 2, # 'ü'
+ 42: 2, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 1: { # 'e'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 2, # 'a'
+ 18: 3, # 'b'
+ 26: 3, # 'c'
+ 17: 3, # 'd'
+ 1: 2, # 'e'
+ 27: 3, # 'f'
+ 12: 3, # 'g'
+ 20: 3, # 'h'
+ 9: 3, # 'i'
+ 22: 3, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 2, # 'o'
+ 23: 3, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 2, # 'u'
+ 19: 3, # 'v'
+ 62: 2, # 'x'
+ 16: 2, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 27: { # 'f'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 1, # 'd'
+ 1: 3, # 'e'
+ 27: 2, # 'f'
+ 12: 1, # 'g'
+ 20: 1, # 'h'
+ 9: 3, # 'i'
+ 22: 2, # 'j'
+ 7: 1, # 'k'
+ 6: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 8: 3, # 'o'
+ 23: 0, # 'p'
+ 10: 3, # 'r'
+ 5: 1, # 's'
+ 3: 1, # 't'
+ 21: 2, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 0, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 3, # 'ö'
+ 31: 1, # 'ú'
+ 29: 2, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 12: { # 'g'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 3, # 'b'
+ 26: 2, # 'c'
+ 17: 2, # 'd'
+ 1: 3, # 'e'
+ 27: 2, # 'f'
+ 12: 3, # 'g'
+ 20: 3, # 'h'
+ 9: 3, # 'i'
+ 22: 3, # 'j'
+ 7: 2, # 'k'
+ 6: 3, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 8: 3, # 'o'
+ 23: 1, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 3, # 'v'
+ 62: 0, # 'x'
+ 16: 3, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 2, # 'í'
+ 25: 3, # 'ó'
+ 24: 2, # 'ö'
+ 31: 2, # 'ú'
+ 29: 2, # 'ü'
+ 42: 2, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 20: { # 'h'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 0, # 'd'
+ 1: 3, # 'e'
+ 27: 0, # 'f'
+ 12: 1, # 'g'
+ 20: 2, # 'h'
+ 9: 3, # 'i'
+ 22: 1, # 'j'
+ 7: 1, # 'k'
+ 6: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 8: 3, # 'o'
+ 23: 0, # 'p'
+ 10: 1, # 'r'
+ 5: 2, # 's'
+ 3: 1, # 't'
+ 21: 3, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 2, # 'y'
+ 11: 0, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 3, # 'í'
+ 25: 2, # 'ó'
+ 24: 2, # 'ö'
+ 31: 2, # 'ú'
+ 29: 1, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 9: { # 'i'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 3, # 'b'
+ 26: 3, # 'c'
+ 17: 3, # 'd'
+ 1: 3, # 'e'
+ 27: 3, # 'f'
+ 12: 3, # 'g'
+ 20: 3, # 'h'
+ 9: 2, # 'i'
+ 22: 2, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 2, # 'o'
+ 23: 2, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 3, # 'v'
+ 62: 1, # 'x'
+ 16: 1, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 2, # 'é'
+ 30: 1, # 'í'
+ 25: 3, # 'ó'
+ 24: 1, # 'ö'
+ 31: 2, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 22: { # 'j'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 2, # 'b'
+ 26: 1, # 'c'
+ 17: 3, # 'd'
+ 1: 3, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 2, # 'h'
+ 9: 1, # 'i'
+ 22: 2, # 'j'
+ 7: 2, # 'k'
+ 6: 2, # 'l'
+ 13: 1, # 'm'
+ 4: 2, # 'n'
+ 8: 3, # 'o'
+ 23: 1, # 'p'
+ 10: 2, # 'r'
+ 5: 2, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 1, # 'í'
+ 25: 3, # 'ó'
+ 24: 3, # 'ö'
+ 31: 3, # 'ú'
+ 29: 2, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 7: { # 'k'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 3, # 'b'
+ 26: 2, # 'c'
+ 17: 1, # 'd'
+ 1: 3, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 2, # 'h'
+ 9: 3, # 'i'
+ 22: 2, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 1, # 'm'
+ 4: 3, # 'n'
+ 8: 3, # 'o'
+ 23: 1, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 2, # 'v'
+ 62: 0, # 'x'
+ 16: 2, # 'y'
+ 11: 1, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 3, # 'í'
+ 25: 2, # 'ó'
+ 24: 3, # 'ö'
+ 31: 1, # 'ú'
+ 29: 3, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 6: { # 'l'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 1, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 1, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 2, # 'b'
+ 26: 3, # 'c'
+ 17: 3, # 'd'
+ 1: 3, # 'e'
+ 27: 3, # 'f'
+ 12: 3, # 'g'
+ 20: 3, # 'h'
+ 9: 3, # 'i'
+ 22: 3, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 3, # 'o'
+ 23: 2, # 'p'
+ 10: 2, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 3, # 'v'
+ 62: 0, # 'x'
+ 16: 3, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 3, # 'í'
+ 25: 3, # 'ó'
+ 24: 3, # 'ö'
+ 31: 2, # 'ú'
+ 29: 2, # 'ü'
+ 42: 3, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 13: { # 'm'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 3, # 'b'
+ 26: 2, # 'c'
+ 17: 1, # 'd'
+ 1: 3, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 2, # 'h'
+ 9: 3, # 'i'
+ 22: 2, # 'j'
+ 7: 1, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 2, # 'n'
+ 8: 3, # 'o'
+ 23: 3, # 'p'
+ 10: 2, # 'r'
+ 5: 2, # 's'
+ 3: 2, # 't'
+ 21: 3, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 2, # 'í'
+ 25: 2, # 'ó'
+ 24: 2, # 'ö'
+ 31: 2, # 'ú'
+ 29: 2, # 'ü'
+ 42: 1, # 'ő'
+ 56: 2, # 'ű'
+ },
+ 4: { # 'n'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 3, # 'b'
+ 26: 3, # 'c'
+ 17: 3, # 'd'
+ 1: 3, # 'e'
+ 27: 2, # 'f'
+ 12: 3, # 'g'
+ 20: 3, # 'h'
+ 9: 3, # 'i'
+ 22: 2, # 'j'
+ 7: 3, # 'k'
+ 6: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 8: 3, # 'o'
+ 23: 2, # 'p'
+ 10: 2, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 2, # 'v'
+ 62: 1, # 'x'
+ 16: 3, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 2, # 'í'
+ 25: 2, # 'ó'
+ 24: 3, # 'ö'
+ 31: 2, # 'ú'
+ 29: 3, # 'ü'
+ 42: 2, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 8: { # 'o'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 1, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 2, # 'a'
+ 18: 3, # 'b'
+ 26: 3, # 'c'
+ 17: 3, # 'd'
+ 1: 2, # 'e'
+ 27: 2, # 'f'
+ 12: 3, # 'g'
+ 20: 3, # 'h'
+ 9: 2, # 'i'
+ 22: 2, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 1, # 'o'
+ 23: 3, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 2, # 'u'
+ 19: 3, # 'v'
+ 62: 1, # 'x'
+ 16: 1, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 1, # 'á'
+ 15: 2, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 23: { # 'p'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 1, # 'b'
+ 26: 2, # 'c'
+ 17: 1, # 'd'
+ 1: 3, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 2, # 'h'
+ 9: 3, # 'i'
+ 22: 2, # 'j'
+ 7: 2, # 'k'
+ 6: 3, # 'l'
+ 13: 1, # 'm'
+ 4: 2, # 'n'
+ 8: 3, # 'o'
+ 23: 3, # 'p'
+ 10: 3, # 'r'
+ 5: 2, # 's'
+ 3: 2, # 't'
+ 21: 3, # 'u'
+ 19: 2, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 2, # 'í'
+ 25: 2, # 'ó'
+ 24: 2, # 'ö'
+ 31: 1, # 'ú'
+ 29: 2, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 10: { # 'r'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 3, # 'b'
+ 26: 3, # 'c'
+ 17: 3, # 'd'
+ 1: 3, # 'e'
+ 27: 2, # 'f'
+ 12: 3, # 'g'
+ 20: 2, # 'h'
+ 9: 3, # 'i'
+ 22: 3, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 3, # 'o'
+ 23: 2, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 3, # 'v'
+ 62: 1, # 'x'
+ 16: 2, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 2, # 'í'
+ 25: 3, # 'ó'
+ 24: 3, # 'ö'
+ 31: 3, # 'ú'
+ 29: 3, # 'ü'
+ 42: 2, # 'ő'
+ 56: 2, # 'ű'
+ },
+ 5: { # 's'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 3, # 'b'
+ 26: 2, # 'c'
+ 17: 2, # 'd'
+ 1: 3, # 'e'
+ 27: 2, # 'f'
+ 12: 2, # 'g'
+ 20: 2, # 'h'
+ 9: 3, # 'i'
+ 22: 1, # 'j'
+ 7: 3, # 'k'
+ 6: 2, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 3, # 'o'
+ 23: 2, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 2, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 3, # 'í'
+ 25: 3, # 'ó'
+ 24: 3, # 'ö'
+ 31: 3, # 'ú'
+ 29: 3, # 'ü'
+ 42: 2, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 3: { # 't'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 3, # 'b'
+ 26: 2, # 'c'
+ 17: 1, # 'd'
+ 1: 3, # 'e'
+ 27: 2, # 'f'
+ 12: 1, # 'g'
+ 20: 3, # 'h'
+ 9: 3, # 'i'
+ 22: 3, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 8: 3, # 'o'
+ 23: 1, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 3, # 'v'
+ 62: 0, # 'x'
+ 16: 3, # 'y'
+ 11: 1, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 2, # 'í'
+ 25: 3, # 'ó'
+ 24: 3, # 'ö'
+ 31: 3, # 'ú'
+ 29: 3, # 'ü'
+ 42: 3, # 'ő'
+ 56: 2, # 'ű'
+ },
+ 21: { # 'u'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 1, # 'a'
+ 18: 2, # 'b'
+ 26: 2, # 'c'
+ 17: 3, # 'd'
+ 1: 2, # 'e'
+ 27: 1, # 'f'
+ 12: 3, # 'g'
+ 20: 2, # 'h'
+ 9: 2, # 'i'
+ 22: 2, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 1, # 'o'
+ 23: 2, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 1, # 'u'
+ 19: 3, # 'v'
+ 62: 1, # 'x'
+ 16: 1, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 2, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 0, # 'ö'
+ 31: 1, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 19: { # 'v'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 2, # 'b'
+ 26: 1, # 'c'
+ 17: 1, # 'd'
+ 1: 3, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 1, # 'h'
+ 9: 3, # 'i'
+ 22: 1, # 'j'
+ 7: 1, # 'k'
+ 6: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 8: 3, # 'o'
+ 23: 1, # 'p'
+ 10: 1, # 'r'
+ 5: 2, # 's'
+ 3: 2, # 't'
+ 21: 2, # 'u'
+ 19: 2, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 1, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 2, # 'í'
+ 25: 2, # 'ó'
+ 24: 2, # 'ö'
+ 31: 1, # 'ú'
+ 29: 2, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 62: { # 'x'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 1, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 0, # 'd'
+ 1: 1, # 'e'
+ 27: 1, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 1, # 'i'
+ 22: 0, # 'j'
+ 7: 1, # 'k'
+ 6: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 8: 1, # 'o'
+ 23: 1, # 'p'
+ 10: 1, # 'r'
+ 5: 1, # 's'
+ 3: 1, # 't'
+ 21: 1, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 0, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 1, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 16: { # 'y'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 2, # 'b'
+ 26: 1, # 'c'
+ 17: 1, # 'd'
+ 1: 3, # 'e'
+ 27: 2, # 'f'
+ 12: 2, # 'g'
+ 20: 2, # 'h'
+ 9: 3, # 'i'
+ 22: 2, # 'j'
+ 7: 2, # 'k'
+ 6: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 8: 3, # 'o'
+ 23: 2, # 'p'
+ 10: 2, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 3, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 2, # 'í'
+ 25: 2, # 'ó'
+ 24: 3, # 'ö'
+ 31: 2, # 'ú'
+ 29: 2, # 'ü'
+ 42: 1, # 'ő'
+ 56: 2, # 'ű'
+ },
+ 11: { # 'z'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 2, # 'b'
+ 26: 1, # 'c'
+ 17: 3, # 'd'
+ 1: 3, # 'e'
+ 27: 1, # 'f'
+ 12: 2, # 'g'
+ 20: 2, # 'h'
+ 9: 3, # 'i'
+ 22: 1, # 'j'
+ 7: 3, # 'k'
+ 6: 2, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 3, # 'o'
+ 23: 1, # 'p'
+ 10: 2, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 2, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 3, # 'í'
+ 25: 3, # 'ó'
+ 24: 3, # 'ö'
+ 31: 2, # 'ú'
+ 29: 3, # 'ü'
+ 42: 2, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 51: { # 'Á'
+ 28: 0, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 0, # 'E'
+ 50: 1, # 'F'
+ 49: 2, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 2, # 'L'
+ 34: 1, # 'M'
+ 35: 2, # 'N'
+ 47: 0, # 'O'
+ 46: 1, # 'P'
+ 43: 2, # 'R'
+ 33: 2, # 'S'
+ 37: 1, # 'T'
+ 57: 0, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 1, # 'Z'
+ 2: 0, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 1, # 'd'
+ 1: 0, # 'e'
+ 27: 0, # 'f'
+ 12: 1, # 'g'
+ 20: 1, # 'h'
+ 9: 0, # 'i'
+ 22: 1, # 'j'
+ 7: 1, # 'k'
+ 6: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 0, # 'n'
+ 8: 0, # 'o'
+ 23: 1, # 'p'
+ 10: 1, # 'r'
+ 5: 1, # 's'
+ 3: 1, # 't'
+ 21: 0, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 1, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 1, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 44: { # 'É'
+ 28: 0, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 1, # 'E'
+ 50: 0, # 'F'
+ 49: 2, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 2, # 'L'
+ 34: 1, # 'M'
+ 35: 2, # 'N'
+ 47: 0, # 'O'
+ 46: 1, # 'P'
+ 43: 2, # 'R'
+ 33: 2, # 'S'
+ 37: 2, # 'T'
+ 57: 0, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 1, # 'Z'
+ 2: 0, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 1, # 'd'
+ 1: 0, # 'e'
+ 27: 0, # 'f'
+ 12: 1, # 'g'
+ 20: 1, # 'h'
+ 9: 0, # 'i'
+ 22: 1, # 'j'
+ 7: 1, # 'k'
+ 6: 2, # 'l'
+ 13: 1, # 'm'
+ 4: 2, # 'n'
+ 8: 0, # 'o'
+ 23: 1, # 'p'
+ 10: 2, # 'r'
+ 5: 3, # 's'
+ 3: 1, # 't'
+ 21: 0, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 0, # 'z'
+ 51: 0, # 'Á'
+ 44: 1, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 61: { # 'Í'
+ 28: 0, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 0, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 1, # 'J'
+ 36: 0, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 0, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 0, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 1, # 'Z'
+ 2: 0, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 0, # 'e'
+ 27: 0, # 'f'
+ 12: 2, # 'g'
+ 20: 0, # 'h'
+ 9: 0, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 0, # 'l'
+ 13: 1, # 'm'
+ 4: 0, # 'n'
+ 8: 0, # 'o'
+ 23: 0, # 'p'
+ 10: 1, # 'r'
+ 5: 0, # 's'
+ 3: 1, # 't'
+ 21: 0, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 1, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 58: { # 'Ó'
+ 28: 1, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 0, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 2, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 0, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 0, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 1, # 'Z'
+ 2: 0, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 1, # 'd'
+ 1: 0, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 2, # 'h'
+ 9: 0, # 'i'
+ 22: 0, # 'j'
+ 7: 1, # 'k'
+ 6: 1, # 'l'
+ 13: 0, # 'm'
+ 4: 1, # 'n'
+ 8: 0, # 'o'
+ 23: 1, # 'p'
+ 10: 1, # 'r'
+ 5: 1, # 's'
+ 3: 0, # 't'
+ 21: 0, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 1, # 'z'
+ 51: 0, # 'Á'
+ 44: 1, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 59: { # 'Ö'
+ 28: 0, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 0, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 0, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 0, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 1, # 'Z'
+ 2: 0, # 'a'
+ 18: 0, # 'b'
+ 26: 1, # 'c'
+ 17: 1, # 'd'
+ 1: 0, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 0, # 'i'
+ 22: 0, # 'j'
+ 7: 1, # 'k'
+ 6: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 8: 0, # 'o'
+ 23: 0, # 'p'
+ 10: 2, # 'r'
+ 5: 1, # 's'
+ 3: 1, # 't'
+ 21: 0, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 1, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 60: { # 'Ú'
+ 28: 0, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 0, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 0, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 1, # 'Z'
+ 2: 0, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 0, # 'e'
+ 27: 0, # 'f'
+ 12: 2, # 'g'
+ 20: 0, # 'h'
+ 9: 0, # 'i'
+ 22: 2, # 'j'
+ 7: 0, # 'k'
+ 6: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 1, # 'n'
+ 8: 0, # 'o'
+ 23: 0, # 'p'
+ 10: 1, # 'r'
+ 5: 1, # 's'
+ 3: 1, # 't'
+ 21: 0, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 0, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 63: { # 'Ü'
+ 28: 0, # 'A'
+ 40: 1, # 'B'
+ 54: 0, # 'C'
+ 45: 1, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 0, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 0, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 1, # 'Z'
+ 2: 0, # 'a'
+ 18: 1, # 'b'
+ 26: 0, # 'c'
+ 17: 1, # 'd'
+ 1: 0, # 'e'
+ 27: 0, # 'f'
+ 12: 1, # 'g'
+ 20: 0, # 'h'
+ 9: 0, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 1, # 'l'
+ 13: 0, # 'm'
+ 4: 1, # 'n'
+ 8: 0, # 'o'
+ 23: 0, # 'p'
+ 10: 1, # 'r'
+ 5: 1, # 's'
+ 3: 1, # 't'
+ 21: 0, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 1, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 14: { # 'á'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 1, # 'a'
+ 18: 3, # 'b'
+ 26: 3, # 'c'
+ 17: 3, # 'd'
+ 1: 1, # 'e'
+ 27: 2, # 'f'
+ 12: 3, # 'g'
+ 20: 2, # 'h'
+ 9: 2, # 'i'
+ 22: 3, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 1, # 'o'
+ 23: 2, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 2, # 'u'
+ 19: 3, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 1, # 'á'
+ 15: 2, # 'é'
+ 30: 1, # 'í'
+ 25: 0, # 'ó'
+ 24: 1, # 'ö'
+ 31: 0, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 15: { # 'é'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 1, # 'a'
+ 18: 3, # 'b'
+ 26: 2, # 'c'
+ 17: 3, # 'd'
+ 1: 1, # 'e'
+ 27: 1, # 'f'
+ 12: 3, # 'g'
+ 20: 3, # 'h'
+ 9: 2, # 'i'
+ 22: 2, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 1, # 'o'
+ 23: 3, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 0, # 'u'
+ 19: 3, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 1, # 'á'
+ 15: 1, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 30: { # 'í'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 0, # 'a'
+ 18: 1, # 'b'
+ 26: 2, # 'c'
+ 17: 1, # 'd'
+ 1: 0, # 'e'
+ 27: 1, # 'f'
+ 12: 3, # 'g'
+ 20: 0, # 'h'
+ 9: 0, # 'i'
+ 22: 1, # 'j'
+ 7: 1, # 'k'
+ 6: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 8: 0, # 'o'
+ 23: 1, # 'p'
+ 10: 3, # 'r'
+ 5: 2, # 's'
+ 3: 3, # 't'
+ 21: 0, # 'u'
+ 19: 3, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 25: { # 'ó'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 2, # 'a'
+ 18: 3, # 'b'
+ 26: 2, # 'c'
+ 17: 3, # 'd'
+ 1: 1, # 'e'
+ 27: 2, # 'f'
+ 12: 2, # 'g'
+ 20: 2, # 'h'
+ 9: 2, # 'i'
+ 22: 2, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 8: 1, # 'o'
+ 23: 2, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 1, # 'u'
+ 19: 2, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 1, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 0, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 24: { # 'ö'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 0, # 'a'
+ 18: 3, # 'b'
+ 26: 1, # 'c'
+ 17: 2, # 'd'
+ 1: 0, # 'e'
+ 27: 1, # 'f'
+ 12: 2, # 'g'
+ 20: 1, # 'h'
+ 9: 0, # 'i'
+ 22: 1, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 0, # 'o'
+ 23: 2, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 0, # 'u'
+ 19: 3, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 31: { # 'ú'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 1, # 'a'
+ 18: 1, # 'b'
+ 26: 2, # 'c'
+ 17: 1, # 'd'
+ 1: 1, # 'e'
+ 27: 2, # 'f'
+ 12: 3, # 'g'
+ 20: 1, # 'h'
+ 9: 1, # 'i'
+ 22: 3, # 'j'
+ 7: 1, # 'k'
+ 6: 3, # 'l'
+ 13: 1, # 'm'
+ 4: 2, # 'n'
+ 8: 0, # 'o'
+ 23: 1, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 2, # 't'
+ 21: 1, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 1, # 'á'
+ 15: 1, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 29: { # 'ü'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 1, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 2, # 'd'
+ 1: 1, # 'e'
+ 27: 1, # 'f'
+ 12: 3, # 'g'
+ 20: 2, # 'h'
+ 9: 1, # 'i'
+ 22: 1, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 1, # 'm'
+ 4: 3, # 'n'
+ 8: 0, # 'o'
+ 23: 1, # 'p'
+ 10: 2, # 'r'
+ 5: 2, # 's'
+ 3: 2, # 't'
+ 21: 0, # 'u'
+ 19: 2, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 1, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 42: { # 'ő'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 1, # 'a'
+ 18: 2, # 'b'
+ 26: 1, # 'c'
+ 17: 2, # 'd'
+ 1: 1, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 1, # 'h'
+ 9: 1, # 'i'
+ 22: 1, # 'j'
+ 7: 2, # 'k'
+ 6: 3, # 'l'
+ 13: 1, # 'm'
+ 4: 2, # 'n'
+ 8: 1, # 'o'
+ 23: 1, # 'p'
+ 10: 2, # 'r'
+ 5: 2, # 's'
+ 3: 2, # 't'
+ 21: 1, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 56: { # 'ű'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 1, # 'a'
+ 18: 1, # 'b'
+ 26: 0, # 'c'
+ 17: 1, # 'd'
+ 1: 1, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 1, # 'h'
+ 9: 1, # 'i'
+ 22: 1, # 'j'
+ 7: 1, # 'k'
+ 6: 1, # 'l'
+ 13: 0, # 'm'
+ 4: 2, # 'n'
+ 8: 0, # 'o'
+ 23: 0, # 'p'
+ 10: 1, # 'r'
+ 5: 1, # 's'
+ 3: 1, # 't'
+ 21: 0, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+}
+
+# 255: Undefined characters that did not exist in training text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+# 251: Control characters
+
+# Character Mapping Table(s):
+WINDOWS_1250_HUNGARIAN_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 28, # 'A'
+ 66: 40, # 'B'
+ 67: 54, # 'C'
+ 68: 45, # 'D'
+ 69: 32, # 'E'
+ 70: 50, # 'F'
+ 71: 49, # 'G'
+ 72: 38, # 'H'
+ 73: 39, # 'I'
+ 74: 53, # 'J'
+ 75: 36, # 'K'
+ 76: 41, # 'L'
+ 77: 34, # 'M'
+ 78: 35, # 'N'
+ 79: 47, # 'O'
+ 80: 46, # 'P'
+ 81: 72, # 'Q'
+ 82: 43, # 'R'
+ 83: 33, # 'S'
+ 84: 37, # 'T'
+ 85: 57, # 'U'
+ 86: 48, # 'V'
+ 87: 64, # 'W'
+ 88: 68, # 'X'
+ 89: 55, # 'Y'
+ 90: 52, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 2, # 'a'
+ 98: 18, # 'b'
+ 99: 26, # 'c'
+ 100: 17, # 'd'
+ 101: 1, # 'e'
+ 102: 27, # 'f'
+ 103: 12, # 'g'
+ 104: 20, # 'h'
+ 105: 9, # 'i'
+ 106: 22, # 'j'
+ 107: 7, # 'k'
+ 108: 6, # 'l'
+ 109: 13, # 'm'
+ 110: 4, # 'n'
+ 111: 8, # 'o'
+ 112: 23, # 'p'
+ 113: 67, # 'q'
+ 114: 10, # 'r'
+ 115: 5, # 's'
+ 116: 3, # 't'
+ 117: 21, # 'u'
+ 118: 19, # 'v'
+ 119: 65, # 'w'
+ 120: 62, # 'x'
+ 121: 16, # 'y'
+ 122: 11, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 161, # '€'
+ 129: 162, # None
+ 130: 163, # '‚'
+ 131: 164, # None
+ 132: 165, # '„'
+ 133: 166, # '…'
+ 134: 167, # '†'
+ 135: 168, # '‡'
+ 136: 169, # None
+ 137: 170, # '‰'
+ 138: 171, # 'Š'
+ 139: 172, # '‹'
+ 140: 173, # 'Ś'
+ 141: 174, # 'Ť'
+ 142: 175, # 'Ž'
+ 143: 176, # 'Ź'
+ 144: 177, # None
+ 145: 178, # '‘'
+ 146: 179, # '’'
+ 147: 180, # '“'
+ 148: 78, # '”'
+ 149: 181, # '•'
+ 150: 69, # '–'
+ 151: 182, # '—'
+ 152: 183, # None
+ 153: 184, # '™'
+ 154: 185, # 'š'
+ 155: 186, # '›'
+ 156: 187, # 'ś'
+ 157: 188, # 'ť'
+ 158: 189, # 'ž'
+ 159: 190, # 'ź'
+ 160: 191, # '\xa0'
+ 161: 192, # 'ˇ'
+ 162: 193, # '˘'
+ 163: 194, # 'Ł'
+ 164: 195, # '¤'
+ 165: 196, # 'Ą'
+ 166: 197, # '¦'
+ 167: 76, # '§'
+ 168: 198, # '¨'
+ 169: 199, # '©'
+ 170: 200, # 'Ş'
+ 171: 201, # '«'
+ 172: 202, # '¬'
+ 173: 203, # '\xad'
+ 174: 204, # '®'
+ 175: 205, # 'Ż'
+ 176: 81, # '°'
+ 177: 206, # '±'
+ 178: 207, # '˛'
+ 179: 208, # 'ł'
+ 180: 209, # '´'
+ 181: 210, # 'µ'
+ 182: 211, # '¶'
+ 183: 212, # '·'
+ 184: 213, # '¸'
+ 185: 214, # 'ą'
+ 186: 215, # 'ş'
+ 187: 216, # '»'
+ 188: 217, # 'Ľ'
+ 189: 218, # '˝'
+ 190: 219, # 'ľ'
+ 191: 220, # 'ż'
+ 192: 221, # 'Ŕ'
+ 193: 51, # 'Á'
+ 194: 83, # 'Â'
+ 195: 222, # 'Ă'
+ 196: 80, # 'Ä'
+ 197: 223, # 'Ĺ'
+ 198: 224, # 'Ć'
+ 199: 225, # 'Ç'
+ 200: 226, # 'Č'
+ 201: 44, # 'É'
+ 202: 227, # 'Ę'
+ 203: 228, # 'Ë'
+ 204: 229, # 'Ě'
+ 205: 61, # 'Í'
+ 206: 230, # 'Î'
+ 207: 231, # 'Ď'
+ 208: 232, # 'Đ'
+ 209: 233, # 'Ń'
+ 210: 234, # 'Ň'
+ 211: 58, # 'Ó'
+ 212: 235, # 'Ô'
+ 213: 66, # 'Ő'
+ 214: 59, # 'Ö'
+ 215: 236, # '×'
+ 216: 237, # 'Ř'
+ 217: 238, # 'Ů'
+ 218: 60, # 'Ú'
+ 219: 70, # 'Ű'
+ 220: 63, # 'Ü'
+ 221: 239, # 'Ý'
+ 222: 240, # 'Ţ'
+ 223: 241, # 'ß'
+ 224: 84, # 'ŕ'
+ 225: 14, # 'á'
+ 226: 75, # 'â'
+ 227: 242, # 'ă'
+ 228: 71, # 'ä'
+ 229: 82, # 'ĺ'
+ 230: 243, # 'ć'
+ 231: 73, # 'ç'
+ 232: 244, # 'č'
+ 233: 15, # 'é'
+ 234: 85, # 'ę'
+ 235: 79, # 'ë'
+ 236: 86, # 'ě'
+ 237: 30, # 'í'
+ 238: 77, # 'î'
+ 239: 87, # 'ď'
+ 240: 245, # 'đ'
+ 241: 246, # 'ń'
+ 242: 247, # 'ň'
+ 243: 25, # 'ó'
+ 244: 74, # 'ô'
+ 245: 42, # 'ő'
+ 246: 24, # 'ö'
+ 247: 248, # '÷'
+ 248: 249, # 'ř'
+ 249: 250, # 'ů'
+ 250: 31, # 'ú'
+ 251: 56, # 'ű'
+ 252: 29, # 'ü'
+ 253: 251, # 'ý'
+ 254: 252, # 'ţ'
+ 255: 253, # '˙'
+}
+
+WINDOWS_1250_HUNGARIAN_MODEL = SingleByteCharSetModel(charset_name='windows-1250',
+ language='Hungarian',
+ char_to_order_map=WINDOWS_1250_HUNGARIAN_CHAR_TO_ORDER,
+ language_model=HUNGARIAN_LANG_MODEL,
+ typical_positive_ratio=0.947368,
+ keep_ascii_letters=True,
+ alphabet='ABCDEFGHIJKLMNOPRSTUVZabcdefghijklmnoprstuvzÁÉÍÓÖÚÜáéíóöúüŐőŰű')
+
+ISO_8859_2_HUNGARIAN_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 28, # 'A'
+ 66: 40, # 'B'
+ 67: 54, # 'C'
+ 68: 45, # 'D'
+ 69: 32, # 'E'
+ 70: 50, # 'F'
+ 71: 49, # 'G'
+ 72: 38, # 'H'
+ 73: 39, # 'I'
+ 74: 53, # 'J'
+ 75: 36, # 'K'
+ 76: 41, # 'L'
+ 77: 34, # 'M'
+ 78: 35, # 'N'
+ 79: 47, # 'O'
+ 80: 46, # 'P'
+ 81: 71, # 'Q'
+ 82: 43, # 'R'
+ 83: 33, # 'S'
+ 84: 37, # 'T'
+ 85: 57, # 'U'
+ 86: 48, # 'V'
+ 87: 64, # 'W'
+ 88: 68, # 'X'
+ 89: 55, # 'Y'
+ 90: 52, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 2, # 'a'
+ 98: 18, # 'b'
+ 99: 26, # 'c'
+ 100: 17, # 'd'
+ 101: 1, # 'e'
+ 102: 27, # 'f'
+ 103: 12, # 'g'
+ 104: 20, # 'h'
+ 105: 9, # 'i'
+ 106: 22, # 'j'
+ 107: 7, # 'k'
+ 108: 6, # 'l'
+ 109: 13, # 'm'
+ 110: 4, # 'n'
+ 111: 8, # 'o'
+ 112: 23, # 'p'
+ 113: 67, # 'q'
+ 114: 10, # 'r'
+ 115: 5, # 's'
+ 116: 3, # 't'
+ 117: 21, # 'u'
+ 118: 19, # 'v'
+ 119: 65, # 'w'
+ 120: 62, # 'x'
+ 121: 16, # 'y'
+ 122: 11, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 159, # '\x80'
+ 129: 160, # '\x81'
+ 130: 161, # '\x82'
+ 131: 162, # '\x83'
+ 132: 163, # '\x84'
+ 133: 164, # '\x85'
+ 134: 165, # '\x86'
+ 135: 166, # '\x87'
+ 136: 167, # '\x88'
+ 137: 168, # '\x89'
+ 138: 169, # '\x8a'
+ 139: 170, # '\x8b'
+ 140: 171, # '\x8c'
+ 141: 172, # '\x8d'
+ 142: 173, # '\x8e'
+ 143: 174, # '\x8f'
+ 144: 175, # '\x90'
+ 145: 176, # '\x91'
+ 146: 177, # '\x92'
+ 147: 178, # '\x93'
+ 148: 179, # '\x94'
+ 149: 180, # '\x95'
+ 150: 181, # '\x96'
+ 151: 182, # '\x97'
+ 152: 183, # '\x98'
+ 153: 184, # '\x99'
+ 154: 185, # '\x9a'
+ 155: 186, # '\x9b'
+ 156: 187, # '\x9c'
+ 157: 188, # '\x9d'
+ 158: 189, # '\x9e'
+ 159: 190, # '\x9f'
+ 160: 191, # '\xa0'
+ 161: 192, # 'Ą'
+ 162: 193, # '˘'
+ 163: 194, # 'Ł'
+ 164: 195, # '¤'
+ 165: 196, # 'Ľ'
+ 166: 197, # 'Ś'
+ 167: 75, # '§'
+ 168: 198, # '¨'
+ 169: 199, # 'Š'
+ 170: 200, # 'Ş'
+ 171: 201, # 'Ť'
+ 172: 202, # 'Ź'
+ 173: 203, # '\xad'
+ 174: 204, # 'Ž'
+ 175: 205, # 'Ż'
+ 176: 79, # '°'
+ 177: 206, # 'ą'
+ 178: 207, # '˛'
+ 179: 208, # 'ł'
+ 180: 209, # '´'
+ 181: 210, # 'ľ'
+ 182: 211, # 'ś'
+ 183: 212, # 'ˇ'
+ 184: 213, # '¸'
+ 185: 214, # 'š'
+ 186: 215, # 'ş'
+ 187: 216, # 'ť'
+ 188: 217, # 'ź'
+ 189: 218, # '˝'
+ 190: 219, # 'ž'
+ 191: 220, # 'ż'
+ 192: 221, # 'Ŕ'
+ 193: 51, # 'Á'
+ 194: 81, # 'Â'
+ 195: 222, # 'Ă'
+ 196: 78, # 'Ä'
+ 197: 223, # 'Ĺ'
+ 198: 224, # 'Ć'
+ 199: 225, # 'Ç'
+ 200: 226, # 'Č'
+ 201: 44, # 'É'
+ 202: 227, # 'Ę'
+ 203: 228, # 'Ë'
+ 204: 229, # 'Ě'
+ 205: 61, # 'Í'
+ 206: 230, # 'Î'
+ 207: 231, # 'Ď'
+ 208: 232, # 'Đ'
+ 209: 233, # 'Ń'
+ 210: 234, # 'Ň'
+ 211: 58, # 'Ó'
+ 212: 235, # 'Ô'
+ 213: 66, # 'Ő'
+ 214: 59, # 'Ö'
+ 215: 236, # '×'
+ 216: 237, # 'Ř'
+ 217: 238, # 'Ů'
+ 218: 60, # 'Ú'
+ 219: 69, # 'Ű'
+ 220: 63, # 'Ü'
+ 221: 239, # 'Ý'
+ 222: 240, # 'Ţ'
+ 223: 241, # 'ß'
+ 224: 82, # 'ŕ'
+ 225: 14, # 'á'
+ 226: 74, # 'â'
+ 227: 242, # 'ă'
+ 228: 70, # 'ä'
+ 229: 80, # 'ĺ'
+ 230: 243, # 'ć'
+ 231: 72, # 'ç'
+ 232: 244, # 'č'
+ 233: 15, # 'é'
+ 234: 83, # 'ę'
+ 235: 77, # 'ë'
+ 236: 84, # 'ě'
+ 237: 30, # 'í'
+ 238: 76, # 'î'
+ 239: 85, # 'ď'
+ 240: 245, # 'đ'
+ 241: 246, # 'ń'
+ 242: 247, # 'ň'
+ 243: 25, # 'ó'
+ 244: 73, # 'ô'
+ 245: 42, # 'ő'
+ 246: 24, # 'ö'
+ 247: 248, # '÷'
+ 248: 249, # 'ř'
+ 249: 250, # 'ů'
+ 250: 31, # 'ú'
+ 251: 56, # 'ű'
+ 252: 29, # 'ü'
+ 253: 251, # 'ý'
+ 254: 252, # 'ţ'
+ 255: 253, # '˙'
+}
+
+ISO_8859_2_HUNGARIAN_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-2',
+ language='Hungarian',
+ char_to_order_map=ISO_8859_2_HUNGARIAN_CHAR_TO_ORDER,
+ language_model=HUNGARIAN_LANG_MODEL,
+ typical_positive_ratio=0.947368,
+ keep_ascii_letters=True,
+ alphabet='ABCDEFGHIJKLMNOPRSTUVZabcdefghijklmnoprstuvzÁÉÍÓÖÚÜáéíóöúüŐőŰű')
+
diff --git a/third_party/python/chardet/chardet/langrussianmodel.py b/third_party/python/chardet/chardet/langrussianmodel.py
new file mode 100644
index 0000000000..569689d0f5
--- /dev/null
+++ b/third_party/python/chardet/chardet/langrussianmodel.py
@@ -0,0 +1,5718 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from chardet.sbcharsetprober import SingleByteCharSetModel
+
+
+# 3: Positive
+# 2: Likely
+# 1: Unlikely
+# 0: Negative
+
+RUSSIAN_LANG_MODEL = {
+ 37: { # 'А'
+ 37: 0, # 'А'
+ 44: 1, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 1, # 'Д'
+ 48: 1, # 'Е'
+ 56: 1, # 'Ж'
+ 51: 1, # 'З'
+ 42: 1, # 'И'
+ 60: 1, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 2, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 1, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 1, # 'Ш'
+ 63: 1, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 1, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 1, # 'а'
+ 21: 2, # 'б'
+ 10: 2, # 'в'
+ 19: 2, # 'г'
+ 13: 2, # 'д'
+ 2: 0, # 'е'
+ 24: 1, # 'ж'
+ 20: 1, # 'з'
+ 4: 0, # 'и'
+ 23: 1, # 'й'
+ 11: 2, # 'к'
+ 8: 3, # 'л'
+ 12: 2, # 'м'
+ 5: 2, # 'н'
+ 1: 0, # 'о'
+ 15: 2, # 'п'
+ 9: 2, # 'р'
+ 7: 2, # 'с'
+ 6: 2, # 'т'
+ 14: 2, # 'у'
+ 39: 2, # 'ф'
+ 26: 2, # 'х'
+ 28: 0, # 'ц'
+ 22: 1, # 'ч'
+ 25: 2, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 1, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 44: { # 'Б'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 0, # 'П'
+ 45: 1, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 2, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 1, # 'д'
+ 2: 3, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 2, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 2, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 2, # 'ы'
+ 17: 1, # 'ь'
+ 30: 2, # 'э'
+ 27: 1, # 'ю'
+ 16: 1, # 'я'
+ },
+ 33: { # 'В'
+ 37: 2, # 'А'
+ 44: 0, # 'Б'
+ 33: 1, # 'В'
+ 46: 0, # 'Г'
+ 41: 1, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 1, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 2, # 'а'
+ 21: 1, # 'б'
+ 10: 1, # 'в'
+ 19: 1, # 'г'
+ 13: 2, # 'д'
+ 2: 3, # 'е'
+ 24: 0, # 'ж'
+ 20: 2, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 1, # 'к'
+ 8: 2, # 'л'
+ 12: 2, # 'м'
+ 5: 2, # 'н'
+ 1: 3, # 'о'
+ 15: 2, # 'п'
+ 9: 2, # 'р'
+ 7: 3, # 'с'
+ 6: 2, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 1, # 'х'
+ 28: 1, # 'ц'
+ 22: 2, # 'ч'
+ 25: 1, # 'ш'
+ 29: 0, # 'щ'
+ 54: 1, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 1, # 'ь'
+ 30: 2, # 'э'
+ 27: 0, # 'ю'
+ 16: 1, # 'я'
+ },
+ 46: { # 'Г'
+ 37: 1, # 'А'
+ 44: 1, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 1, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 1, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 0, # 'б'
+ 10: 1, # 'в'
+ 19: 0, # 'г'
+ 13: 2, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 2, # 'л'
+ 12: 1, # 'м'
+ 5: 1, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 2, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 1, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 0, # 'я'
+ },
+ 41: { # 'Д'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 1, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 2, # 'Е'
+ 56: 1, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 0, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 0, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 3, # 'а'
+ 21: 0, # 'б'
+ 10: 2, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 3, # 'ж'
+ 20: 1, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 2, # 'л'
+ 12: 1, # 'м'
+ 5: 1, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 2, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 1, # 'ь'
+ 30: 2, # 'э'
+ 27: 1, # 'ю'
+ 16: 1, # 'я'
+ },
+ 48: { # 'Е'
+ 37: 1, # 'А'
+ 44: 1, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 1, # 'Д'
+ 48: 1, # 'Е'
+ 56: 1, # 'Ж'
+ 51: 1, # 'З'
+ 42: 1, # 'И'
+ 60: 1, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 2, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 2, # 'Р'
+ 32: 2, # 'С'
+ 40: 1, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 1, # 'Ш'
+ 63: 1, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 0, # 'а'
+ 21: 0, # 'б'
+ 10: 2, # 'в'
+ 19: 2, # 'г'
+ 13: 2, # 'д'
+ 2: 2, # 'е'
+ 24: 1, # 'ж'
+ 20: 1, # 'з'
+ 4: 0, # 'и'
+ 23: 2, # 'й'
+ 11: 1, # 'к'
+ 8: 2, # 'л'
+ 12: 2, # 'м'
+ 5: 1, # 'н'
+ 1: 0, # 'о'
+ 15: 1, # 'п'
+ 9: 1, # 'р'
+ 7: 3, # 'с'
+ 6: 0, # 'т'
+ 14: 0, # 'у'
+ 39: 1, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 1, # 'ш'
+ 29: 2, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 1, # 'ю'
+ 16: 0, # 'я'
+ },
+ 56: { # 'Ж'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 1, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 1, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 1, # 'б'
+ 10: 0, # 'в'
+ 19: 1, # 'г'
+ 13: 1, # 'д'
+ 2: 2, # 'е'
+ 24: 1, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 0, # 'л'
+ 12: 1, # 'м'
+ 5: 0, # 'н'
+ 1: 2, # 'о'
+ 15: 0, # 'п'
+ 9: 1, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 2, # 'ю'
+ 16: 0, # 'я'
+ },
+ 51: { # 'З'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 1, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 0, # 'П'
+ 45: 1, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 1, # 'б'
+ 10: 2, # 'в'
+ 19: 0, # 'г'
+ 13: 2, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 1, # 'л'
+ 12: 1, # 'м'
+ 5: 2, # 'н'
+ 1: 2, # 'о'
+ 15: 0, # 'п'
+ 9: 1, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 1, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 1, # 'я'
+ },
+ 42: { # 'И'
+ 37: 1, # 'А'
+ 44: 1, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 1, # 'Д'
+ 48: 2, # 'Е'
+ 56: 1, # 'Ж'
+ 51: 1, # 'З'
+ 42: 1, # 'И'
+ 60: 1, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 1, # 'Р'
+ 32: 2, # 'С'
+ 40: 1, # 'Т'
+ 52: 0, # 'У'
+ 53: 1, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 1, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 1, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 1, # 'а'
+ 21: 2, # 'б'
+ 10: 2, # 'в'
+ 19: 2, # 'г'
+ 13: 2, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 2, # 'з'
+ 4: 1, # 'и'
+ 23: 0, # 'й'
+ 11: 1, # 'к'
+ 8: 2, # 'л'
+ 12: 2, # 'м'
+ 5: 2, # 'н'
+ 1: 1, # 'о'
+ 15: 1, # 'п'
+ 9: 2, # 'р'
+ 7: 2, # 'с'
+ 6: 2, # 'т'
+ 14: 1, # 'у'
+ 39: 1, # 'ф'
+ 26: 2, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 1, # 'ш'
+ 29: 1, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 1, # 'ю'
+ 16: 0, # 'я'
+ },
+ 60: { # 'Й'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 1, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 1, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 0, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 1, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 0, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 0, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 2, # 'о'
+ 15: 0, # 'п'
+ 9: 0, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 0, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 36: { # 'К'
+ 37: 2, # 'А'
+ 44: 0, # 'Б'
+ 33: 1, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 1, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 1, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 2, # 'О'
+ 35: 1, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 1, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 0, # 'б'
+ 10: 1, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 2, # 'л'
+ 12: 0, # 'м'
+ 5: 1, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 2, # 'р'
+ 7: 2, # 'с'
+ 6: 2, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 1, # 'ь'
+ 30: 2, # 'э'
+ 27: 1, # 'ю'
+ 16: 0, # 'я'
+ },
+ 49: { # 'Л'
+ 37: 2, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 1, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 1, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 0, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 0, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 1, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 2, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 1, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 1, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 1, # 'л'
+ 12: 0, # 'м'
+ 5: 1, # 'н'
+ 1: 2, # 'о'
+ 15: 0, # 'п'
+ 9: 0, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 1, # 'ь'
+ 30: 2, # 'э'
+ 27: 2, # 'ю'
+ 16: 1, # 'я'
+ },
+ 38: { # 'М'
+ 37: 1, # 'А'
+ 44: 1, # 'Б'
+ 33: 1, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 1, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 1, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 3, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 1, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 1, # 'л'
+ 12: 1, # 'м'
+ 5: 2, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 1, # 'р'
+ 7: 1, # 'с'
+ 6: 0, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 1, # 'ь'
+ 30: 2, # 'э'
+ 27: 1, # 'ю'
+ 16: 1, # 'я'
+ },
+ 31: { # 'Н'
+ 37: 2, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 1, # 'Г'
+ 41: 1, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 1, # 'З'
+ 42: 2, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 0, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 1, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 1, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 3, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 3, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 0, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 1, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 3, # 'у'
+ 39: 0, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 2, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 1, # 'я'
+ },
+ 34: { # 'О'
+ 37: 0, # 'А'
+ 44: 1, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 2, # 'Д'
+ 48: 1, # 'Е'
+ 56: 1, # 'Ж'
+ 51: 1, # 'З'
+ 42: 1, # 'И'
+ 60: 1, # 'Й'
+ 36: 1, # 'К'
+ 49: 2, # 'Л'
+ 38: 1, # 'М'
+ 31: 2, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 2, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 1, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 1, # 'Ш'
+ 63: 1, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 1, # 'а'
+ 21: 2, # 'б'
+ 10: 1, # 'в'
+ 19: 2, # 'г'
+ 13: 2, # 'д'
+ 2: 0, # 'е'
+ 24: 1, # 'ж'
+ 20: 1, # 'з'
+ 4: 0, # 'и'
+ 23: 1, # 'й'
+ 11: 2, # 'к'
+ 8: 2, # 'л'
+ 12: 1, # 'м'
+ 5: 3, # 'н'
+ 1: 0, # 'о'
+ 15: 2, # 'п'
+ 9: 2, # 'р'
+ 7: 2, # 'с'
+ 6: 2, # 'т'
+ 14: 1, # 'у'
+ 39: 1, # 'ф'
+ 26: 2, # 'х'
+ 28: 1, # 'ц'
+ 22: 2, # 'ч'
+ 25: 2, # 'ш'
+ 29: 1, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 35: { # 'П'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 1, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 2, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 2, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 2, # 'л'
+ 12: 0, # 'м'
+ 5: 1, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 3, # 'р'
+ 7: 1, # 'с'
+ 6: 1, # 'т'
+ 14: 2, # 'у'
+ 39: 1, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 1, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 2, # 'ь'
+ 30: 1, # 'э'
+ 27: 0, # 'ю'
+ 16: 2, # 'я'
+ },
+ 45: { # 'Р'
+ 37: 2, # 'А'
+ 44: 1, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 1, # 'Д'
+ 48: 2, # 'Е'
+ 56: 1, # 'Ж'
+ 51: 0, # 'З'
+ 42: 2, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 2, # 'О'
+ 35: 0, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 1, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 1, # 'Э'
+ 59: 1, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 3, # 'а'
+ 21: 0, # 'б'
+ 10: 1, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 1, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 0, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 1, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 2, # 'ы'
+ 17: 0, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 2, # 'я'
+ },
+ 32: { # 'С'
+ 37: 1, # 'А'
+ 44: 1, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 1, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 2, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 1, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 1, # 'Э'
+ 59: 1, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 2, # 'а'
+ 21: 1, # 'б'
+ 10: 2, # 'в'
+ 19: 1, # 'г'
+ 13: 2, # 'д'
+ 2: 3, # 'е'
+ 24: 1, # 'ж'
+ 20: 1, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 2, # 'к'
+ 8: 2, # 'л'
+ 12: 2, # 'м'
+ 5: 2, # 'н'
+ 1: 2, # 'о'
+ 15: 2, # 'п'
+ 9: 2, # 'р'
+ 7: 1, # 'с'
+ 6: 3, # 'т'
+ 14: 2, # 'у'
+ 39: 1, # 'ф'
+ 26: 1, # 'х'
+ 28: 1, # 'ц'
+ 22: 1, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 1, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 1, # 'ь'
+ 30: 2, # 'э'
+ 27: 1, # 'ю'
+ 16: 1, # 'я'
+ },
+ 40: { # 'Т'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 1, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 2, # 'О'
+ 35: 0, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 1, # 'Э'
+ 59: 1, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 3, # 'а'
+ 21: 1, # 'б'
+ 10: 2, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 3, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 1, # 'к'
+ 8: 1, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 2, # 'р'
+ 7: 1, # 'с'
+ 6: 0, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 1, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 1, # 'ь'
+ 30: 2, # 'э'
+ 27: 1, # 'ю'
+ 16: 1, # 'я'
+ },
+ 52: { # 'У'
+ 37: 1, # 'А'
+ 44: 1, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 1, # 'Д'
+ 48: 1, # 'Е'
+ 56: 1, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 1, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 1, # 'Ш'
+ 63: 1, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 1, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 1, # 'а'
+ 21: 2, # 'б'
+ 10: 2, # 'в'
+ 19: 1, # 'г'
+ 13: 2, # 'д'
+ 2: 1, # 'е'
+ 24: 2, # 'ж'
+ 20: 2, # 'з'
+ 4: 2, # 'и'
+ 23: 1, # 'й'
+ 11: 1, # 'к'
+ 8: 2, # 'л'
+ 12: 2, # 'м'
+ 5: 1, # 'н'
+ 1: 2, # 'о'
+ 15: 1, # 'п'
+ 9: 2, # 'р'
+ 7: 2, # 'с'
+ 6: 2, # 'т'
+ 14: 0, # 'у'
+ 39: 1, # 'ф'
+ 26: 1, # 'х'
+ 28: 1, # 'ц'
+ 22: 2, # 'ч'
+ 25: 1, # 'ш'
+ 29: 1, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 2, # 'э'
+ 27: 1, # 'ю'
+ 16: 0, # 'я'
+ },
+ 53: { # 'Ф'
+ 37: 1, # 'А'
+ 44: 1, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 1, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 1, # 'О'
+ 35: 0, # 'П'
+ 45: 1, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 2, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 2, # 'о'
+ 15: 0, # 'п'
+ 9: 2, # 'р'
+ 7: 0, # 'с'
+ 6: 1, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 1, # 'ь'
+ 30: 2, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 55: { # 'Х'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 1, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 0, # 'б'
+ 10: 2, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 2, # 'л'
+ 12: 1, # 'м'
+ 5: 0, # 'н'
+ 1: 2, # 'о'
+ 15: 0, # 'п'
+ 9: 2, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 1, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 1, # 'ь'
+ 30: 1, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 58: { # 'Ц'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 1, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 1, # 'а'
+ 21: 0, # 'б'
+ 10: 1, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 0, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 0, # 'о'
+ 15: 0, # 'п'
+ 9: 0, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 1, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 1, # 'ю'
+ 16: 0, # 'я'
+ },
+ 50: { # 'Ч'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 0, # 'О'
+ 35: 1, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 1, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 1, # 'о'
+ 15: 0, # 'п'
+ 9: 1, # 'р'
+ 7: 0, # 'с'
+ 6: 3, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 1, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 57: { # 'Ш'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 0, # 'б'
+ 10: 1, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 1, # 'и'
+ 23: 0, # 'й'
+ 11: 1, # 'к'
+ 8: 2, # 'л'
+ 12: 1, # 'м'
+ 5: 1, # 'н'
+ 1: 2, # 'о'
+ 15: 2, # 'п'
+ 9: 1, # 'р'
+ 7: 0, # 'с'
+ 6: 2, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 1, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 1, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 63: { # 'Щ'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 1, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 1, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 1, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 0, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 1, # 'о'
+ 15: 0, # 'п'
+ 9: 0, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 1, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 62: { # 'Ы'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 1, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 0, # 'О'
+ 35: 1, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 1, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 1, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 0, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 0, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 0, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 0, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 0, # 'о'
+ 15: 0, # 'п'
+ 9: 0, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 0, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 61: { # 'Ь'
+ 37: 0, # 'А'
+ 44: 1, # 'Б'
+ 33: 1, # 'В'
+ 46: 0, # 'Г'
+ 41: 1, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 0, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 1, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 1, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 1, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 1, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 0, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 0, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 0, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 0, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 0, # 'о'
+ 15: 0, # 'п'
+ 9: 0, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 0, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 47: { # 'Э'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 1, # 'В'
+ 46: 0, # 'Г'
+ 41: 1, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 1, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 0, # 'О'
+ 35: 1, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 1, # 'а'
+ 21: 1, # 'б'
+ 10: 2, # 'в'
+ 19: 1, # 'г'
+ 13: 2, # 'д'
+ 2: 0, # 'е'
+ 24: 1, # 'ж'
+ 20: 0, # 'з'
+ 4: 0, # 'и'
+ 23: 2, # 'й'
+ 11: 2, # 'к'
+ 8: 2, # 'л'
+ 12: 2, # 'м'
+ 5: 2, # 'н'
+ 1: 0, # 'о'
+ 15: 1, # 'п'
+ 9: 2, # 'р'
+ 7: 1, # 'с'
+ 6: 3, # 'т'
+ 14: 1, # 'у'
+ 39: 1, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 1, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 59: { # 'Ю'
+ 37: 1, # 'А'
+ 44: 1, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 1, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 1, # 'Р'
+ 32: 0, # 'С'
+ 40: 1, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 1, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 0, # 'а'
+ 21: 1, # 'б'
+ 10: 0, # 'в'
+ 19: 1, # 'г'
+ 13: 1, # 'д'
+ 2: 0, # 'е'
+ 24: 1, # 'ж'
+ 20: 0, # 'з'
+ 4: 0, # 'и'
+ 23: 0, # 'й'
+ 11: 1, # 'к'
+ 8: 2, # 'л'
+ 12: 1, # 'м'
+ 5: 2, # 'н'
+ 1: 0, # 'о'
+ 15: 1, # 'п'
+ 9: 1, # 'р'
+ 7: 1, # 'с'
+ 6: 0, # 'т'
+ 14: 0, # 'у'
+ 39: 0, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 43: { # 'Я'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 1, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 1, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 0, # 'а'
+ 21: 1, # 'б'
+ 10: 1, # 'в'
+ 19: 1, # 'г'
+ 13: 1, # 'д'
+ 2: 0, # 'е'
+ 24: 0, # 'ж'
+ 20: 1, # 'з'
+ 4: 0, # 'и'
+ 23: 1, # 'й'
+ 11: 1, # 'к'
+ 8: 1, # 'л'
+ 12: 1, # 'м'
+ 5: 2, # 'н'
+ 1: 0, # 'о'
+ 15: 1, # 'п'
+ 9: 1, # 'р'
+ 7: 1, # 'с'
+ 6: 0, # 'т'
+ 14: 0, # 'у'
+ 39: 0, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 1, # 'ш'
+ 29: 1, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 3: { # 'а'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 3, # 'б'
+ 10: 3, # 'в'
+ 19: 3, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 3, # 'ж'
+ 20: 3, # 'з'
+ 4: 3, # 'и'
+ 23: 3, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 2, # 'о'
+ 15: 3, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 3, # 'у'
+ 39: 2, # 'ф'
+ 26: 3, # 'х'
+ 28: 3, # 'ц'
+ 22: 3, # 'ч'
+ 25: 3, # 'ш'
+ 29: 3, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 2, # 'э'
+ 27: 3, # 'ю'
+ 16: 3, # 'я'
+ },
+ 21: { # 'б'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 2, # 'б'
+ 10: 2, # 'в'
+ 19: 1, # 'г'
+ 13: 2, # 'д'
+ 2: 3, # 'е'
+ 24: 2, # 'ж'
+ 20: 1, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 2, # 'к'
+ 8: 3, # 'л'
+ 12: 2, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 1, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 2, # 'т'
+ 14: 3, # 'у'
+ 39: 0, # 'ф'
+ 26: 2, # 'х'
+ 28: 1, # 'ц'
+ 22: 1, # 'ч'
+ 25: 2, # 'ш'
+ 29: 3, # 'щ'
+ 54: 2, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 2, # 'ь'
+ 30: 1, # 'э'
+ 27: 2, # 'ю'
+ 16: 3, # 'я'
+ },
+ 10: { # 'в'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 2, # 'б'
+ 10: 2, # 'в'
+ 19: 2, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 1, # 'ж'
+ 20: 3, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 2, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 3, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 3, # 'у'
+ 39: 1, # 'ф'
+ 26: 2, # 'х'
+ 28: 2, # 'ц'
+ 22: 2, # 'ч'
+ 25: 3, # 'ш'
+ 29: 2, # 'щ'
+ 54: 2, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 3, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 3, # 'я'
+ },
+ 19: { # 'г'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 1, # 'б'
+ 10: 2, # 'в'
+ 19: 1, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 0, # 'ж'
+ 20: 1, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 2, # 'к'
+ 8: 3, # 'л'
+ 12: 2, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 3, # 'р'
+ 7: 2, # 'с'
+ 6: 2, # 'т'
+ 14: 3, # 'у'
+ 39: 1, # 'ф'
+ 26: 1, # 'х'
+ 28: 1, # 'ц'
+ 22: 2, # 'ч'
+ 25: 1, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 1, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 0, # 'я'
+ },
+ 13: { # 'д'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 2, # 'б'
+ 10: 3, # 'в'
+ 19: 2, # 'г'
+ 13: 2, # 'д'
+ 2: 3, # 'е'
+ 24: 2, # 'ж'
+ 20: 2, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 2, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 2, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 3, # 'у'
+ 39: 1, # 'ф'
+ 26: 2, # 'х'
+ 28: 3, # 'ц'
+ 22: 2, # 'ч'
+ 25: 2, # 'ш'
+ 29: 1, # 'щ'
+ 54: 2, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 3, # 'ь'
+ 30: 1, # 'э'
+ 27: 2, # 'ю'
+ 16: 3, # 'я'
+ },
+ 2: { # 'е'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 3, # 'б'
+ 10: 3, # 'в'
+ 19: 3, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 3, # 'ж'
+ 20: 3, # 'з'
+ 4: 2, # 'и'
+ 23: 3, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 3, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 2, # 'у'
+ 39: 2, # 'ф'
+ 26: 3, # 'х'
+ 28: 3, # 'ц'
+ 22: 3, # 'ч'
+ 25: 3, # 'ш'
+ 29: 3, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 1, # 'э'
+ 27: 2, # 'ю'
+ 16: 3, # 'я'
+ },
+ 24: { # 'ж'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 2, # 'б'
+ 10: 1, # 'в'
+ 19: 2, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 2, # 'ж'
+ 20: 1, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 2, # 'к'
+ 8: 2, # 'л'
+ 12: 1, # 'м'
+ 5: 3, # 'н'
+ 1: 2, # 'о'
+ 15: 1, # 'п'
+ 9: 2, # 'р'
+ 7: 2, # 'с'
+ 6: 1, # 'т'
+ 14: 3, # 'у'
+ 39: 1, # 'ф'
+ 26: 0, # 'х'
+ 28: 1, # 'ц'
+ 22: 2, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 2, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 1, # 'я'
+ },
+ 20: { # 'з'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 3, # 'б'
+ 10: 3, # 'в'
+ 19: 3, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 2, # 'ж'
+ 20: 2, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 3, # 'р'
+ 7: 2, # 'с'
+ 6: 2, # 'т'
+ 14: 3, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 1, # 'ц'
+ 22: 2, # 'ч'
+ 25: 1, # 'ш'
+ 29: 0, # 'щ'
+ 54: 2, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 2, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 3, # 'я'
+ },
+ 4: { # 'и'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 3, # 'б'
+ 10: 3, # 'в'
+ 19: 3, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 3, # 'ж'
+ 20: 3, # 'з'
+ 4: 3, # 'и'
+ 23: 3, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 3, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 2, # 'у'
+ 39: 2, # 'ф'
+ 26: 3, # 'х'
+ 28: 3, # 'ц'
+ 22: 3, # 'ч'
+ 25: 3, # 'ш'
+ 29: 3, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 2, # 'э'
+ 27: 3, # 'ю'
+ 16: 3, # 'я'
+ },
+ 23: { # 'й'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 1, # 'а'
+ 21: 1, # 'б'
+ 10: 1, # 'в'
+ 19: 2, # 'г'
+ 13: 3, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 2, # 'з'
+ 4: 1, # 'и'
+ 23: 0, # 'й'
+ 11: 2, # 'к'
+ 8: 2, # 'л'
+ 12: 2, # 'м'
+ 5: 3, # 'н'
+ 1: 2, # 'о'
+ 15: 1, # 'п'
+ 9: 2, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 1, # 'у'
+ 39: 2, # 'ф'
+ 26: 1, # 'х'
+ 28: 2, # 'ц'
+ 22: 3, # 'ч'
+ 25: 2, # 'ш'
+ 29: 1, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 2, # 'я'
+ },
+ 11: { # 'к'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 1, # 'б'
+ 10: 3, # 'в'
+ 19: 1, # 'г'
+ 13: 1, # 'д'
+ 2: 3, # 'е'
+ 24: 2, # 'ж'
+ 20: 2, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 2, # 'к'
+ 8: 3, # 'л'
+ 12: 1, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 3, # 'у'
+ 39: 1, # 'ф'
+ 26: 2, # 'х'
+ 28: 2, # 'ц'
+ 22: 1, # 'ч'
+ 25: 2, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 1, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 1, # 'я'
+ },
+ 8: { # 'л'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 2, # 'б'
+ 10: 2, # 'в'
+ 19: 3, # 'г'
+ 13: 2, # 'д'
+ 2: 3, # 'е'
+ 24: 3, # 'ж'
+ 20: 2, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 2, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 2, # 'п'
+ 9: 1, # 'р'
+ 7: 3, # 'с'
+ 6: 2, # 'т'
+ 14: 3, # 'у'
+ 39: 2, # 'ф'
+ 26: 2, # 'х'
+ 28: 1, # 'ц'
+ 22: 3, # 'ч'
+ 25: 2, # 'ш'
+ 29: 1, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 3, # 'ь'
+ 30: 1, # 'э'
+ 27: 3, # 'ю'
+ 16: 3, # 'я'
+ },
+ 12: { # 'м'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 2, # 'б'
+ 10: 2, # 'в'
+ 19: 2, # 'г'
+ 13: 1, # 'д'
+ 2: 3, # 'е'
+ 24: 1, # 'ж'
+ 20: 1, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 2, # 'к'
+ 8: 3, # 'л'
+ 12: 2, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 2, # 'п'
+ 9: 2, # 'р'
+ 7: 3, # 'с'
+ 6: 2, # 'т'
+ 14: 3, # 'у'
+ 39: 2, # 'ф'
+ 26: 2, # 'х'
+ 28: 2, # 'ц'
+ 22: 2, # 'ч'
+ 25: 1, # 'ш'
+ 29: 1, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 2, # 'ь'
+ 30: 2, # 'э'
+ 27: 1, # 'ю'
+ 16: 3, # 'я'
+ },
+ 5: { # 'н'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 2, # 'б'
+ 10: 2, # 'в'
+ 19: 3, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 2, # 'ж'
+ 20: 2, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 2, # 'л'
+ 12: 1, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 1, # 'п'
+ 9: 2, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 3, # 'у'
+ 39: 2, # 'ф'
+ 26: 2, # 'х'
+ 28: 3, # 'ц'
+ 22: 3, # 'ч'
+ 25: 2, # 'ш'
+ 29: 2, # 'щ'
+ 54: 1, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 3, # 'ь'
+ 30: 1, # 'э'
+ 27: 3, # 'ю'
+ 16: 3, # 'я'
+ },
+ 1: { # 'о'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 3, # 'б'
+ 10: 3, # 'в'
+ 19: 3, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 3, # 'ж'
+ 20: 3, # 'з'
+ 4: 3, # 'и'
+ 23: 3, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 3, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 2, # 'у'
+ 39: 2, # 'ф'
+ 26: 3, # 'х'
+ 28: 2, # 'ц'
+ 22: 3, # 'ч'
+ 25: 3, # 'ш'
+ 29: 3, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 2, # 'э'
+ 27: 3, # 'ю'
+ 16: 3, # 'я'
+ },
+ 15: { # 'п'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 1, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 3, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 2, # 'к'
+ 8: 3, # 'л'
+ 12: 1, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 2, # 'п'
+ 9: 3, # 'р'
+ 7: 2, # 'с'
+ 6: 2, # 'т'
+ 14: 3, # 'у'
+ 39: 1, # 'ф'
+ 26: 0, # 'х'
+ 28: 2, # 'ц'
+ 22: 2, # 'ч'
+ 25: 1, # 'ш'
+ 29: 1, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 2, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 3, # 'я'
+ },
+ 9: { # 'р'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 2, # 'б'
+ 10: 3, # 'в'
+ 19: 3, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 3, # 'ж'
+ 20: 2, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 2, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 2, # 'п'
+ 9: 2, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 3, # 'у'
+ 39: 2, # 'ф'
+ 26: 3, # 'х'
+ 28: 2, # 'ц'
+ 22: 2, # 'ч'
+ 25: 3, # 'ш'
+ 29: 2, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 3, # 'ь'
+ 30: 2, # 'э'
+ 27: 2, # 'ю'
+ 16: 3, # 'я'
+ },
+ 7: { # 'с'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 1, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 2, # 'б'
+ 10: 3, # 'в'
+ 19: 2, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 2, # 'ж'
+ 20: 2, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 3, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 3, # 'у'
+ 39: 2, # 'ф'
+ 26: 3, # 'х'
+ 28: 2, # 'ц'
+ 22: 3, # 'ч'
+ 25: 2, # 'ш'
+ 29: 1, # 'щ'
+ 54: 2, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 3, # 'ь'
+ 30: 2, # 'э'
+ 27: 3, # 'ю'
+ 16: 3, # 'я'
+ },
+ 6: { # 'т'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 2, # 'б'
+ 10: 3, # 'в'
+ 19: 2, # 'г'
+ 13: 2, # 'д'
+ 2: 3, # 'е'
+ 24: 1, # 'ж'
+ 20: 1, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 2, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 2, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 2, # 'т'
+ 14: 3, # 'у'
+ 39: 2, # 'ф'
+ 26: 2, # 'х'
+ 28: 2, # 'ц'
+ 22: 2, # 'ч'
+ 25: 2, # 'ш'
+ 29: 2, # 'щ'
+ 54: 2, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 3, # 'ь'
+ 30: 2, # 'э'
+ 27: 2, # 'ю'
+ 16: 3, # 'я'
+ },
+ 14: { # 'у'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 3, # 'б'
+ 10: 3, # 'в'
+ 19: 3, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 3, # 'ж'
+ 20: 3, # 'з'
+ 4: 2, # 'и'
+ 23: 2, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 2, # 'о'
+ 15: 3, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 1, # 'у'
+ 39: 2, # 'ф'
+ 26: 3, # 'х'
+ 28: 2, # 'ц'
+ 22: 3, # 'ч'
+ 25: 3, # 'ш'
+ 29: 3, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 2, # 'э'
+ 27: 3, # 'ю'
+ 16: 2, # 'я'
+ },
+ 39: { # 'ф'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 1, # 'б'
+ 10: 0, # 'в'
+ 19: 1, # 'г'
+ 13: 0, # 'д'
+ 2: 3, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 1, # 'к'
+ 8: 2, # 'л'
+ 12: 1, # 'м'
+ 5: 1, # 'н'
+ 1: 3, # 'о'
+ 15: 1, # 'п'
+ 9: 2, # 'р'
+ 7: 2, # 'с'
+ 6: 2, # 'т'
+ 14: 2, # 'у'
+ 39: 2, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 1, # 'ч'
+ 25: 1, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 2, # 'ы'
+ 17: 1, # 'ь'
+ 30: 2, # 'э'
+ 27: 1, # 'ю'
+ 16: 1, # 'я'
+ },
+ 26: { # 'х'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 0, # 'б'
+ 10: 3, # 'в'
+ 19: 1, # 'г'
+ 13: 1, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 1, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 1, # 'к'
+ 8: 2, # 'л'
+ 12: 2, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 1, # 'п'
+ 9: 3, # 'р'
+ 7: 2, # 'с'
+ 6: 2, # 'т'
+ 14: 2, # 'у'
+ 39: 1, # 'ф'
+ 26: 1, # 'х'
+ 28: 1, # 'ц'
+ 22: 1, # 'ч'
+ 25: 2, # 'ш'
+ 29: 0, # 'щ'
+ 54: 1, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 1, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 0, # 'я'
+ },
+ 28: { # 'ц'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 1, # 'б'
+ 10: 2, # 'в'
+ 19: 1, # 'г'
+ 13: 1, # 'д'
+ 2: 3, # 'е'
+ 24: 0, # 'ж'
+ 20: 1, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 2, # 'к'
+ 8: 1, # 'л'
+ 12: 1, # 'м'
+ 5: 1, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 1, # 'р'
+ 7: 0, # 'с'
+ 6: 1, # 'т'
+ 14: 3, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 1, # 'ц'
+ 22: 0, # 'ч'
+ 25: 1, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 1, # 'ь'
+ 30: 0, # 'э'
+ 27: 1, # 'ю'
+ 16: 0, # 'я'
+ },
+ 22: { # 'ч'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 1, # 'б'
+ 10: 1, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 3, # 'е'
+ 24: 1, # 'ж'
+ 20: 0, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 2, # 'л'
+ 12: 1, # 'м'
+ 5: 3, # 'н'
+ 1: 2, # 'о'
+ 15: 0, # 'п'
+ 9: 2, # 'р'
+ 7: 1, # 'с'
+ 6: 3, # 'т'
+ 14: 3, # 'у'
+ 39: 1, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 1, # 'ч'
+ 25: 2, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 3, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 25: { # 'ш'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 1, # 'б'
+ 10: 2, # 'в'
+ 19: 1, # 'г'
+ 13: 0, # 'д'
+ 2: 3, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 2, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 2, # 'п'
+ 9: 2, # 'р'
+ 7: 1, # 'с'
+ 6: 2, # 'т'
+ 14: 3, # 'у'
+ 39: 2, # 'ф'
+ 26: 1, # 'х'
+ 28: 1, # 'ц'
+ 22: 1, # 'ч'
+ 25: 1, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 3, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 0, # 'я'
+ },
+ 29: { # 'щ'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 0, # 'б'
+ 10: 1, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 3, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 0, # 'л'
+ 12: 1, # 'м'
+ 5: 2, # 'н'
+ 1: 1, # 'о'
+ 15: 0, # 'п'
+ 9: 2, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 2, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 54: { # 'ъ'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 0, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 0, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 0, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 0, # 'о'
+ 15: 0, # 'п'
+ 9: 0, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 0, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 1, # 'ю'
+ 16: 2, # 'я'
+ },
+ 18: { # 'ы'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 0, # 'а'
+ 21: 3, # 'б'
+ 10: 3, # 'в'
+ 19: 2, # 'г'
+ 13: 2, # 'д'
+ 2: 3, # 'е'
+ 24: 2, # 'ж'
+ 20: 2, # 'з'
+ 4: 2, # 'и'
+ 23: 3, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 1, # 'о'
+ 15: 3, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 1, # 'у'
+ 39: 0, # 'ф'
+ 26: 3, # 'х'
+ 28: 2, # 'ц'
+ 22: 3, # 'ч'
+ 25: 3, # 'ш'
+ 29: 2, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 2, # 'я'
+ },
+ 17: { # 'ь'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 0, # 'а'
+ 21: 2, # 'б'
+ 10: 2, # 'в'
+ 19: 2, # 'г'
+ 13: 2, # 'д'
+ 2: 3, # 'е'
+ 24: 1, # 'ж'
+ 20: 3, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 0, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 2, # 'о'
+ 15: 2, # 'п'
+ 9: 1, # 'р'
+ 7: 3, # 'с'
+ 6: 2, # 'т'
+ 14: 0, # 'у'
+ 39: 2, # 'ф'
+ 26: 1, # 'х'
+ 28: 2, # 'ц'
+ 22: 2, # 'ч'
+ 25: 3, # 'ш'
+ 29: 2, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 1, # 'э'
+ 27: 3, # 'ю'
+ 16: 3, # 'я'
+ },
+ 30: { # 'э'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 1, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 0, # 'а'
+ 21: 1, # 'б'
+ 10: 1, # 'в'
+ 19: 1, # 'г'
+ 13: 2, # 'д'
+ 2: 1, # 'е'
+ 24: 0, # 'ж'
+ 20: 1, # 'з'
+ 4: 0, # 'и'
+ 23: 2, # 'й'
+ 11: 2, # 'к'
+ 8: 2, # 'л'
+ 12: 2, # 'м'
+ 5: 2, # 'н'
+ 1: 0, # 'о'
+ 15: 2, # 'п'
+ 9: 2, # 'р'
+ 7: 2, # 'с'
+ 6: 3, # 'т'
+ 14: 1, # 'у'
+ 39: 2, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 1, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 1, # 'я'
+ },
+ 27: { # 'ю'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 3, # 'б'
+ 10: 1, # 'в'
+ 19: 2, # 'г'
+ 13: 3, # 'д'
+ 2: 1, # 'е'
+ 24: 2, # 'ж'
+ 20: 2, # 'з'
+ 4: 1, # 'и'
+ 23: 1, # 'й'
+ 11: 2, # 'к'
+ 8: 2, # 'л'
+ 12: 2, # 'м'
+ 5: 2, # 'н'
+ 1: 1, # 'о'
+ 15: 2, # 'п'
+ 9: 2, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 0, # 'у'
+ 39: 1, # 'ф'
+ 26: 2, # 'х'
+ 28: 2, # 'ц'
+ 22: 2, # 'ч'
+ 25: 2, # 'ш'
+ 29: 3, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 1, # 'э'
+ 27: 2, # 'ю'
+ 16: 1, # 'я'
+ },
+ 16: { # 'я'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 0, # 'а'
+ 21: 2, # 'б'
+ 10: 3, # 'в'
+ 19: 2, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 3, # 'ж'
+ 20: 3, # 'з'
+ 4: 2, # 'и'
+ 23: 2, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 0, # 'о'
+ 15: 2, # 'п'
+ 9: 2, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 1, # 'у'
+ 39: 1, # 'ф'
+ 26: 3, # 'х'
+ 28: 2, # 'ц'
+ 22: 2, # 'ч'
+ 25: 2, # 'ш'
+ 29: 3, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 2, # 'ю'
+ 16: 2, # 'я'
+ },
+}
+
+# 255: Undefined characters that did not exist in training text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+# 251: Control characters
+
+# Character Mapping Table(s):
+IBM866_RUSSIAN_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 142, # 'A'
+ 66: 143, # 'B'
+ 67: 144, # 'C'
+ 68: 145, # 'D'
+ 69: 146, # 'E'
+ 70: 147, # 'F'
+ 71: 148, # 'G'
+ 72: 149, # 'H'
+ 73: 150, # 'I'
+ 74: 151, # 'J'
+ 75: 152, # 'K'
+ 76: 74, # 'L'
+ 77: 153, # 'M'
+ 78: 75, # 'N'
+ 79: 154, # 'O'
+ 80: 155, # 'P'
+ 81: 156, # 'Q'
+ 82: 157, # 'R'
+ 83: 158, # 'S'
+ 84: 159, # 'T'
+ 85: 160, # 'U'
+ 86: 161, # 'V'
+ 87: 162, # 'W'
+ 88: 163, # 'X'
+ 89: 164, # 'Y'
+ 90: 165, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 71, # 'a'
+ 98: 172, # 'b'
+ 99: 66, # 'c'
+ 100: 173, # 'd'
+ 101: 65, # 'e'
+ 102: 174, # 'f'
+ 103: 76, # 'g'
+ 104: 175, # 'h'
+ 105: 64, # 'i'
+ 106: 176, # 'j'
+ 107: 177, # 'k'
+ 108: 77, # 'l'
+ 109: 72, # 'm'
+ 110: 178, # 'n'
+ 111: 69, # 'o'
+ 112: 67, # 'p'
+ 113: 179, # 'q'
+ 114: 78, # 'r'
+ 115: 73, # 's'
+ 116: 180, # 't'
+ 117: 181, # 'u'
+ 118: 79, # 'v'
+ 119: 182, # 'w'
+ 120: 183, # 'x'
+ 121: 184, # 'y'
+ 122: 185, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 37, # 'А'
+ 129: 44, # 'Б'
+ 130: 33, # 'В'
+ 131: 46, # 'Г'
+ 132: 41, # 'Д'
+ 133: 48, # 'Е'
+ 134: 56, # 'Ж'
+ 135: 51, # 'З'
+ 136: 42, # 'И'
+ 137: 60, # 'Й'
+ 138: 36, # 'К'
+ 139: 49, # 'Л'
+ 140: 38, # 'М'
+ 141: 31, # 'Н'
+ 142: 34, # 'О'
+ 143: 35, # 'П'
+ 144: 45, # 'Р'
+ 145: 32, # 'С'
+ 146: 40, # 'Т'
+ 147: 52, # 'У'
+ 148: 53, # 'Ф'
+ 149: 55, # 'Х'
+ 150: 58, # 'Ц'
+ 151: 50, # 'Ч'
+ 152: 57, # 'Ш'
+ 153: 63, # 'Щ'
+ 154: 70, # 'Ъ'
+ 155: 62, # 'Ы'
+ 156: 61, # 'Ь'
+ 157: 47, # 'Э'
+ 158: 59, # 'Ю'
+ 159: 43, # 'Я'
+ 160: 3, # 'а'
+ 161: 21, # 'б'
+ 162: 10, # 'в'
+ 163: 19, # 'г'
+ 164: 13, # 'д'
+ 165: 2, # 'е'
+ 166: 24, # 'ж'
+ 167: 20, # 'з'
+ 168: 4, # 'и'
+ 169: 23, # 'й'
+ 170: 11, # 'к'
+ 171: 8, # 'л'
+ 172: 12, # 'м'
+ 173: 5, # 'н'
+ 174: 1, # 'о'
+ 175: 15, # 'п'
+ 176: 191, # '░'
+ 177: 192, # '▒'
+ 178: 193, # '▓'
+ 179: 194, # '│'
+ 180: 195, # '┤'
+ 181: 196, # '╡'
+ 182: 197, # '╢'
+ 183: 198, # '╖'
+ 184: 199, # '╕'
+ 185: 200, # '╣'
+ 186: 201, # '║'
+ 187: 202, # '╗'
+ 188: 203, # '╝'
+ 189: 204, # '╜'
+ 190: 205, # '╛'
+ 191: 206, # '┐'
+ 192: 207, # '└'
+ 193: 208, # '┴'
+ 194: 209, # '┬'
+ 195: 210, # '├'
+ 196: 211, # '─'
+ 197: 212, # '┼'
+ 198: 213, # '╞'
+ 199: 214, # '╟'
+ 200: 215, # '╚'
+ 201: 216, # '╔'
+ 202: 217, # '╩'
+ 203: 218, # '╦'
+ 204: 219, # '╠'
+ 205: 220, # '═'
+ 206: 221, # '╬'
+ 207: 222, # '╧'
+ 208: 223, # '╨'
+ 209: 224, # '╤'
+ 210: 225, # '╥'
+ 211: 226, # '╙'
+ 212: 227, # '╘'
+ 213: 228, # '╒'
+ 214: 229, # '╓'
+ 215: 230, # '╫'
+ 216: 231, # '╪'
+ 217: 232, # '┘'
+ 218: 233, # '┌'
+ 219: 234, # '█'
+ 220: 235, # '▄'
+ 221: 236, # '▌'
+ 222: 237, # '▐'
+ 223: 238, # '▀'
+ 224: 9, # 'р'
+ 225: 7, # 'с'
+ 226: 6, # 'т'
+ 227: 14, # 'у'
+ 228: 39, # 'ф'
+ 229: 26, # 'х'
+ 230: 28, # 'ц'
+ 231: 22, # 'ч'
+ 232: 25, # 'ш'
+ 233: 29, # 'щ'
+ 234: 54, # 'ъ'
+ 235: 18, # 'ы'
+ 236: 17, # 'ь'
+ 237: 30, # 'э'
+ 238: 27, # 'ю'
+ 239: 16, # 'я'
+ 240: 239, # 'Ё'
+ 241: 68, # 'ё'
+ 242: 240, # 'Є'
+ 243: 241, # 'є'
+ 244: 242, # 'Ї'
+ 245: 243, # 'ї'
+ 246: 244, # 'Ў'
+ 247: 245, # 'ў'
+ 248: 246, # '°'
+ 249: 247, # '∙'
+ 250: 248, # '·'
+ 251: 249, # '√'
+ 252: 250, # '№'
+ 253: 251, # '¤'
+ 254: 252, # '■'
+ 255: 255, # '\xa0'
+}
+
+IBM866_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='IBM866',
+ language='Russian',
+ char_to_order_map=IBM866_RUSSIAN_CHAR_TO_ORDER,
+ language_model=RUSSIAN_LANG_MODEL,
+ typical_positive_ratio=0.976601,
+ keep_ascii_letters=False,
+ alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё')
+
+WINDOWS_1251_RUSSIAN_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 142, # 'A'
+ 66: 143, # 'B'
+ 67: 144, # 'C'
+ 68: 145, # 'D'
+ 69: 146, # 'E'
+ 70: 147, # 'F'
+ 71: 148, # 'G'
+ 72: 149, # 'H'
+ 73: 150, # 'I'
+ 74: 151, # 'J'
+ 75: 152, # 'K'
+ 76: 74, # 'L'
+ 77: 153, # 'M'
+ 78: 75, # 'N'
+ 79: 154, # 'O'
+ 80: 155, # 'P'
+ 81: 156, # 'Q'
+ 82: 157, # 'R'
+ 83: 158, # 'S'
+ 84: 159, # 'T'
+ 85: 160, # 'U'
+ 86: 161, # 'V'
+ 87: 162, # 'W'
+ 88: 163, # 'X'
+ 89: 164, # 'Y'
+ 90: 165, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 71, # 'a'
+ 98: 172, # 'b'
+ 99: 66, # 'c'
+ 100: 173, # 'd'
+ 101: 65, # 'e'
+ 102: 174, # 'f'
+ 103: 76, # 'g'
+ 104: 175, # 'h'
+ 105: 64, # 'i'
+ 106: 176, # 'j'
+ 107: 177, # 'k'
+ 108: 77, # 'l'
+ 109: 72, # 'm'
+ 110: 178, # 'n'
+ 111: 69, # 'o'
+ 112: 67, # 'p'
+ 113: 179, # 'q'
+ 114: 78, # 'r'
+ 115: 73, # 's'
+ 116: 180, # 't'
+ 117: 181, # 'u'
+ 118: 79, # 'v'
+ 119: 182, # 'w'
+ 120: 183, # 'x'
+ 121: 184, # 'y'
+ 122: 185, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 191, # 'Ђ'
+ 129: 192, # 'Ѓ'
+ 130: 193, # '‚'
+ 131: 194, # 'ѓ'
+ 132: 195, # '„'
+ 133: 196, # '…'
+ 134: 197, # '†'
+ 135: 198, # '‡'
+ 136: 199, # '€'
+ 137: 200, # '‰'
+ 138: 201, # 'Љ'
+ 139: 202, # '‹'
+ 140: 203, # 'Њ'
+ 141: 204, # 'Ќ'
+ 142: 205, # 'Ћ'
+ 143: 206, # 'Џ'
+ 144: 207, # 'ђ'
+ 145: 208, # '‘'
+ 146: 209, # '’'
+ 147: 210, # '“'
+ 148: 211, # '”'
+ 149: 212, # '•'
+ 150: 213, # '–'
+ 151: 214, # '—'
+ 152: 215, # None
+ 153: 216, # '™'
+ 154: 217, # 'љ'
+ 155: 218, # '›'
+ 156: 219, # 'њ'
+ 157: 220, # 'ќ'
+ 158: 221, # 'ћ'
+ 159: 222, # 'џ'
+ 160: 223, # '\xa0'
+ 161: 224, # 'Ў'
+ 162: 225, # 'ў'
+ 163: 226, # 'Ј'
+ 164: 227, # '¤'
+ 165: 228, # 'Ґ'
+ 166: 229, # '¦'
+ 167: 230, # '§'
+ 168: 231, # 'Ё'
+ 169: 232, # '©'
+ 170: 233, # 'Є'
+ 171: 234, # '«'
+ 172: 235, # '¬'
+ 173: 236, # '\xad'
+ 174: 237, # '®'
+ 175: 238, # 'Ї'
+ 176: 239, # '°'
+ 177: 240, # '±'
+ 178: 241, # 'І'
+ 179: 242, # 'і'
+ 180: 243, # 'ґ'
+ 181: 244, # 'µ'
+ 182: 245, # '¶'
+ 183: 246, # '·'
+ 184: 68, # 'ё'
+ 185: 247, # '№'
+ 186: 248, # 'є'
+ 187: 249, # '»'
+ 188: 250, # 'ј'
+ 189: 251, # 'Ѕ'
+ 190: 252, # 'ѕ'
+ 191: 253, # 'ї'
+ 192: 37, # 'А'
+ 193: 44, # 'Б'
+ 194: 33, # 'В'
+ 195: 46, # 'Г'
+ 196: 41, # 'Д'
+ 197: 48, # 'Е'
+ 198: 56, # 'Ж'
+ 199: 51, # 'З'
+ 200: 42, # 'И'
+ 201: 60, # 'Й'
+ 202: 36, # 'К'
+ 203: 49, # 'Л'
+ 204: 38, # 'М'
+ 205: 31, # 'Н'
+ 206: 34, # 'О'
+ 207: 35, # 'П'
+ 208: 45, # 'Р'
+ 209: 32, # 'С'
+ 210: 40, # 'Т'
+ 211: 52, # 'У'
+ 212: 53, # 'Ф'
+ 213: 55, # 'Х'
+ 214: 58, # 'Ц'
+ 215: 50, # 'Ч'
+ 216: 57, # 'Ш'
+ 217: 63, # 'Щ'
+ 218: 70, # 'Ъ'
+ 219: 62, # 'Ы'
+ 220: 61, # 'Ь'
+ 221: 47, # 'Э'
+ 222: 59, # 'Ю'
+ 223: 43, # 'Я'
+ 224: 3, # 'а'
+ 225: 21, # 'б'
+ 226: 10, # 'в'
+ 227: 19, # 'г'
+ 228: 13, # 'д'
+ 229: 2, # 'е'
+ 230: 24, # 'ж'
+ 231: 20, # 'з'
+ 232: 4, # 'и'
+ 233: 23, # 'й'
+ 234: 11, # 'к'
+ 235: 8, # 'л'
+ 236: 12, # 'м'
+ 237: 5, # 'н'
+ 238: 1, # 'о'
+ 239: 15, # 'п'
+ 240: 9, # 'р'
+ 241: 7, # 'с'
+ 242: 6, # 'т'
+ 243: 14, # 'у'
+ 244: 39, # 'ф'
+ 245: 26, # 'х'
+ 246: 28, # 'ц'
+ 247: 22, # 'ч'
+ 248: 25, # 'ш'
+ 249: 29, # 'щ'
+ 250: 54, # 'ъ'
+ 251: 18, # 'ы'
+ 252: 17, # 'ь'
+ 253: 30, # 'э'
+ 254: 27, # 'ю'
+ 255: 16, # 'я'
+}
+
+WINDOWS_1251_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='windows-1251',
+ language='Russian',
+ char_to_order_map=WINDOWS_1251_RUSSIAN_CHAR_TO_ORDER,
+ language_model=RUSSIAN_LANG_MODEL,
+ typical_positive_ratio=0.976601,
+ keep_ascii_letters=False,
+ alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё')
+
+IBM855_RUSSIAN_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 142, # 'A'
+ 66: 143, # 'B'
+ 67: 144, # 'C'
+ 68: 145, # 'D'
+ 69: 146, # 'E'
+ 70: 147, # 'F'
+ 71: 148, # 'G'
+ 72: 149, # 'H'
+ 73: 150, # 'I'
+ 74: 151, # 'J'
+ 75: 152, # 'K'
+ 76: 74, # 'L'
+ 77: 153, # 'M'
+ 78: 75, # 'N'
+ 79: 154, # 'O'
+ 80: 155, # 'P'
+ 81: 156, # 'Q'
+ 82: 157, # 'R'
+ 83: 158, # 'S'
+ 84: 159, # 'T'
+ 85: 160, # 'U'
+ 86: 161, # 'V'
+ 87: 162, # 'W'
+ 88: 163, # 'X'
+ 89: 164, # 'Y'
+ 90: 165, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 71, # 'a'
+ 98: 172, # 'b'
+ 99: 66, # 'c'
+ 100: 173, # 'd'
+ 101: 65, # 'e'
+ 102: 174, # 'f'
+ 103: 76, # 'g'
+ 104: 175, # 'h'
+ 105: 64, # 'i'
+ 106: 176, # 'j'
+ 107: 177, # 'k'
+ 108: 77, # 'l'
+ 109: 72, # 'm'
+ 110: 178, # 'n'
+ 111: 69, # 'o'
+ 112: 67, # 'p'
+ 113: 179, # 'q'
+ 114: 78, # 'r'
+ 115: 73, # 's'
+ 116: 180, # 't'
+ 117: 181, # 'u'
+ 118: 79, # 'v'
+ 119: 182, # 'w'
+ 120: 183, # 'x'
+ 121: 184, # 'y'
+ 122: 185, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 191, # 'ђ'
+ 129: 192, # 'Ђ'
+ 130: 193, # 'ѓ'
+ 131: 194, # 'Ѓ'
+ 132: 68, # 'ё'
+ 133: 195, # 'Ё'
+ 134: 196, # 'є'
+ 135: 197, # 'Є'
+ 136: 198, # 'ѕ'
+ 137: 199, # 'Ѕ'
+ 138: 200, # 'і'
+ 139: 201, # 'І'
+ 140: 202, # 'ї'
+ 141: 203, # 'Ї'
+ 142: 204, # 'ј'
+ 143: 205, # 'Ј'
+ 144: 206, # 'љ'
+ 145: 207, # 'Љ'
+ 146: 208, # 'њ'
+ 147: 209, # 'Њ'
+ 148: 210, # 'ћ'
+ 149: 211, # 'Ћ'
+ 150: 212, # 'ќ'
+ 151: 213, # 'Ќ'
+ 152: 214, # 'ў'
+ 153: 215, # 'Ў'
+ 154: 216, # 'џ'
+ 155: 217, # 'Џ'
+ 156: 27, # 'ю'
+ 157: 59, # 'Ю'
+ 158: 54, # 'ъ'
+ 159: 70, # 'Ъ'
+ 160: 3, # 'а'
+ 161: 37, # 'А'
+ 162: 21, # 'б'
+ 163: 44, # 'Б'
+ 164: 28, # 'ц'
+ 165: 58, # 'Ц'
+ 166: 13, # 'д'
+ 167: 41, # 'Д'
+ 168: 2, # 'е'
+ 169: 48, # 'Е'
+ 170: 39, # 'ф'
+ 171: 53, # 'Ф'
+ 172: 19, # 'г'
+ 173: 46, # 'Г'
+ 174: 218, # '«'
+ 175: 219, # '»'
+ 176: 220, # '░'
+ 177: 221, # '▒'
+ 178: 222, # '▓'
+ 179: 223, # '│'
+ 180: 224, # '┤'
+ 181: 26, # 'х'
+ 182: 55, # 'Х'
+ 183: 4, # 'и'
+ 184: 42, # 'И'
+ 185: 225, # '╣'
+ 186: 226, # '║'
+ 187: 227, # '╗'
+ 188: 228, # '╝'
+ 189: 23, # 'й'
+ 190: 60, # 'Й'
+ 191: 229, # '┐'
+ 192: 230, # '└'
+ 193: 231, # '┴'
+ 194: 232, # '┬'
+ 195: 233, # '├'
+ 196: 234, # '─'
+ 197: 235, # '┼'
+ 198: 11, # 'к'
+ 199: 36, # 'К'
+ 200: 236, # '╚'
+ 201: 237, # '╔'
+ 202: 238, # '╩'
+ 203: 239, # '╦'
+ 204: 240, # '╠'
+ 205: 241, # '═'
+ 206: 242, # '╬'
+ 207: 243, # '¤'
+ 208: 8, # 'л'
+ 209: 49, # 'Л'
+ 210: 12, # 'м'
+ 211: 38, # 'М'
+ 212: 5, # 'н'
+ 213: 31, # 'Н'
+ 214: 1, # 'о'
+ 215: 34, # 'О'
+ 216: 15, # 'п'
+ 217: 244, # '┘'
+ 218: 245, # '┌'
+ 219: 246, # '█'
+ 220: 247, # '▄'
+ 221: 35, # 'П'
+ 222: 16, # 'я'
+ 223: 248, # '▀'
+ 224: 43, # 'Я'
+ 225: 9, # 'р'
+ 226: 45, # 'Р'
+ 227: 7, # 'с'
+ 228: 32, # 'С'
+ 229: 6, # 'т'
+ 230: 40, # 'Т'
+ 231: 14, # 'у'
+ 232: 52, # 'У'
+ 233: 24, # 'ж'
+ 234: 56, # 'Ж'
+ 235: 10, # 'в'
+ 236: 33, # 'В'
+ 237: 17, # 'ь'
+ 238: 61, # 'Ь'
+ 239: 249, # '№'
+ 240: 250, # '\xad'
+ 241: 18, # 'ы'
+ 242: 62, # 'Ы'
+ 243: 20, # 'з'
+ 244: 51, # 'З'
+ 245: 25, # 'ш'
+ 246: 57, # 'Ш'
+ 247: 30, # 'э'
+ 248: 47, # 'Э'
+ 249: 29, # 'щ'
+ 250: 63, # 'Щ'
+ 251: 22, # 'ч'
+ 252: 50, # 'Ч'
+ 253: 251, # '§'
+ 254: 252, # '■'
+ 255: 255, # '\xa0'
+}
+
+IBM855_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='IBM855',
+ language='Russian',
+ char_to_order_map=IBM855_RUSSIAN_CHAR_TO_ORDER,
+ language_model=RUSSIAN_LANG_MODEL,
+ typical_positive_ratio=0.976601,
+ keep_ascii_letters=False,
+ alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё')
+
+KOI8_R_RUSSIAN_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 142, # 'A'
+ 66: 143, # 'B'
+ 67: 144, # 'C'
+ 68: 145, # 'D'
+ 69: 146, # 'E'
+ 70: 147, # 'F'
+ 71: 148, # 'G'
+ 72: 149, # 'H'
+ 73: 150, # 'I'
+ 74: 151, # 'J'
+ 75: 152, # 'K'
+ 76: 74, # 'L'
+ 77: 153, # 'M'
+ 78: 75, # 'N'
+ 79: 154, # 'O'
+ 80: 155, # 'P'
+ 81: 156, # 'Q'
+ 82: 157, # 'R'
+ 83: 158, # 'S'
+ 84: 159, # 'T'
+ 85: 160, # 'U'
+ 86: 161, # 'V'
+ 87: 162, # 'W'
+ 88: 163, # 'X'
+ 89: 164, # 'Y'
+ 90: 165, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 71, # 'a'
+ 98: 172, # 'b'
+ 99: 66, # 'c'
+ 100: 173, # 'd'
+ 101: 65, # 'e'
+ 102: 174, # 'f'
+ 103: 76, # 'g'
+ 104: 175, # 'h'
+ 105: 64, # 'i'
+ 106: 176, # 'j'
+ 107: 177, # 'k'
+ 108: 77, # 'l'
+ 109: 72, # 'm'
+ 110: 178, # 'n'
+ 111: 69, # 'o'
+ 112: 67, # 'p'
+ 113: 179, # 'q'
+ 114: 78, # 'r'
+ 115: 73, # 's'
+ 116: 180, # 't'
+ 117: 181, # 'u'
+ 118: 79, # 'v'
+ 119: 182, # 'w'
+ 120: 183, # 'x'
+ 121: 184, # 'y'
+ 122: 185, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 191, # '─'
+ 129: 192, # '│'
+ 130: 193, # '┌'
+ 131: 194, # '┐'
+ 132: 195, # '└'
+ 133: 196, # '┘'
+ 134: 197, # '├'
+ 135: 198, # '┤'
+ 136: 199, # '┬'
+ 137: 200, # '┴'
+ 138: 201, # '┼'
+ 139: 202, # '▀'
+ 140: 203, # '▄'
+ 141: 204, # '█'
+ 142: 205, # '▌'
+ 143: 206, # '▐'
+ 144: 207, # '░'
+ 145: 208, # '▒'
+ 146: 209, # '▓'
+ 147: 210, # '⌠'
+ 148: 211, # '■'
+ 149: 212, # '∙'
+ 150: 213, # '√'
+ 151: 214, # '≈'
+ 152: 215, # '≤'
+ 153: 216, # '≥'
+ 154: 217, # '\xa0'
+ 155: 218, # '⌡'
+ 156: 219, # '°'
+ 157: 220, # '²'
+ 158: 221, # '·'
+ 159: 222, # '÷'
+ 160: 223, # '═'
+ 161: 224, # '║'
+ 162: 225, # '╒'
+ 163: 68, # 'ё'
+ 164: 226, # '╓'
+ 165: 227, # '╔'
+ 166: 228, # '╕'
+ 167: 229, # '╖'
+ 168: 230, # '╗'
+ 169: 231, # '╘'
+ 170: 232, # '╙'
+ 171: 233, # '╚'
+ 172: 234, # '╛'
+ 173: 235, # '╜'
+ 174: 236, # '╝'
+ 175: 237, # '╞'
+ 176: 238, # '╟'
+ 177: 239, # '╠'
+ 178: 240, # '╡'
+ 179: 241, # 'Ё'
+ 180: 242, # '╢'
+ 181: 243, # '╣'
+ 182: 244, # '╤'
+ 183: 245, # '╥'
+ 184: 246, # '╦'
+ 185: 247, # '╧'
+ 186: 248, # '╨'
+ 187: 249, # '╩'
+ 188: 250, # '╪'
+ 189: 251, # '╫'
+ 190: 252, # '╬'
+ 191: 253, # '©'
+ 192: 27, # 'ю'
+ 193: 3, # 'а'
+ 194: 21, # 'б'
+ 195: 28, # 'ц'
+ 196: 13, # 'д'
+ 197: 2, # 'е'
+ 198: 39, # 'ф'
+ 199: 19, # 'г'
+ 200: 26, # 'х'
+ 201: 4, # 'и'
+ 202: 23, # 'й'
+ 203: 11, # 'к'
+ 204: 8, # 'л'
+ 205: 12, # 'м'
+ 206: 5, # 'н'
+ 207: 1, # 'о'
+ 208: 15, # 'п'
+ 209: 16, # 'я'
+ 210: 9, # 'р'
+ 211: 7, # 'с'
+ 212: 6, # 'т'
+ 213: 14, # 'у'
+ 214: 24, # 'ж'
+ 215: 10, # 'в'
+ 216: 17, # 'ь'
+ 217: 18, # 'ы'
+ 218: 20, # 'з'
+ 219: 25, # 'ш'
+ 220: 30, # 'э'
+ 221: 29, # 'щ'
+ 222: 22, # 'ч'
+ 223: 54, # 'ъ'
+ 224: 59, # 'Ю'
+ 225: 37, # 'А'
+ 226: 44, # 'Б'
+ 227: 58, # 'Ц'
+ 228: 41, # 'Д'
+ 229: 48, # 'Е'
+ 230: 53, # 'Ф'
+ 231: 46, # 'Г'
+ 232: 55, # 'Х'
+ 233: 42, # 'И'
+ 234: 60, # 'Й'
+ 235: 36, # 'К'
+ 236: 49, # 'Л'
+ 237: 38, # 'М'
+ 238: 31, # 'Н'
+ 239: 34, # 'О'
+ 240: 35, # 'П'
+ 241: 43, # 'Я'
+ 242: 45, # 'Р'
+ 243: 32, # 'С'
+ 244: 40, # 'Т'
+ 245: 52, # 'У'
+ 246: 56, # 'Ж'
+ 247: 33, # 'В'
+ 248: 61, # 'Ь'
+ 249: 62, # 'Ы'
+ 250: 51, # 'З'
+ 251: 57, # 'Ш'
+ 252: 47, # 'Э'
+ 253: 63, # 'Щ'
+ 254: 50, # 'Ч'
+ 255: 70, # 'Ъ'
+}
+
+KOI8_R_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='KOI8-R',
+ language='Russian',
+ char_to_order_map=KOI8_R_RUSSIAN_CHAR_TO_ORDER,
+ language_model=RUSSIAN_LANG_MODEL,
+ typical_positive_ratio=0.976601,
+ keep_ascii_letters=False,
+ alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё')
+
+MACCYRILLIC_RUSSIAN_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 142, # 'A'
+ 66: 143, # 'B'
+ 67: 144, # 'C'
+ 68: 145, # 'D'
+ 69: 146, # 'E'
+ 70: 147, # 'F'
+ 71: 148, # 'G'
+ 72: 149, # 'H'
+ 73: 150, # 'I'
+ 74: 151, # 'J'
+ 75: 152, # 'K'
+ 76: 74, # 'L'
+ 77: 153, # 'M'
+ 78: 75, # 'N'
+ 79: 154, # 'O'
+ 80: 155, # 'P'
+ 81: 156, # 'Q'
+ 82: 157, # 'R'
+ 83: 158, # 'S'
+ 84: 159, # 'T'
+ 85: 160, # 'U'
+ 86: 161, # 'V'
+ 87: 162, # 'W'
+ 88: 163, # 'X'
+ 89: 164, # 'Y'
+ 90: 165, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 71, # 'a'
+ 98: 172, # 'b'
+ 99: 66, # 'c'
+ 100: 173, # 'd'
+ 101: 65, # 'e'
+ 102: 174, # 'f'
+ 103: 76, # 'g'
+ 104: 175, # 'h'
+ 105: 64, # 'i'
+ 106: 176, # 'j'
+ 107: 177, # 'k'
+ 108: 77, # 'l'
+ 109: 72, # 'm'
+ 110: 178, # 'n'
+ 111: 69, # 'o'
+ 112: 67, # 'p'
+ 113: 179, # 'q'
+ 114: 78, # 'r'
+ 115: 73, # 's'
+ 116: 180, # 't'
+ 117: 181, # 'u'
+ 118: 79, # 'v'
+ 119: 182, # 'w'
+ 120: 183, # 'x'
+ 121: 184, # 'y'
+ 122: 185, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 37, # 'А'
+ 129: 44, # 'Б'
+ 130: 33, # 'В'
+ 131: 46, # 'Г'
+ 132: 41, # 'Д'
+ 133: 48, # 'Е'
+ 134: 56, # 'Ж'
+ 135: 51, # 'З'
+ 136: 42, # 'И'
+ 137: 60, # 'Й'
+ 138: 36, # 'К'
+ 139: 49, # 'Л'
+ 140: 38, # 'М'
+ 141: 31, # 'Н'
+ 142: 34, # 'О'
+ 143: 35, # 'П'
+ 144: 45, # 'Р'
+ 145: 32, # 'С'
+ 146: 40, # 'Т'
+ 147: 52, # 'У'
+ 148: 53, # 'Ф'
+ 149: 55, # 'Х'
+ 150: 58, # 'Ц'
+ 151: 50, # 'Ч'
+ 152: 57, # 'Ш'
+ 153: 63, # 'Щ'
+ 154: 70, # 'Ъ'
+ 155: 62, # 'Ы'
+ 156: 61, # 'Ь'
+ 157: 47, # 'Э'
+ 158: 59, # 'Ю'
+ 159: 43, # 'Я'
+ 160: 191, # '†'
+ 161: 192, # '°'
+ 162: 193, # 'Ґ'
+ 163: 194, # '£'
+ 164: 195, # '§'
+ 165: 196, # '•'
+ 166: 197, # '¶'
+ 167: 198, # 'І'
+ 168: 199, # '®'
+ 169: 200, # '©'
+ 170: 201, # '™'
+ 171: 202, # 'Ђ'
+ 172: 203, # 'ђ'
+ 173: 204, # '≠'
+ 174: 205, # 'Ѓ'
+ 175: 206, # 'ѓ'
+ 176: 207, # '∞'
+ 177: 208, # '±'
+ 178: 209, # '≤'
+ 179: 210, # '≥'
+ 180: 211, # 'і'
+ 181: 212, # 'µ'
+ 182: 213, # 'ґ'
+ 183: 214, # 'Ј'
+ 184: 215, # 'Є'
+ 185: 216, # 'є'
+ 186: 217, # 'Ї'
+ 187: 218, # 'ї'
+ 188: 219, # 'Љ'
+ 189: 220, # 'љ'
+ 190: 221, # 'Њ'
+ 191: 222, # 'њ'
+ 192: 223, # 'ј'
+ 193: 224, # 'Ѕ'
+ 194: 225, # '¬'
+ 195: 226, # '√'
+ 196: 227, # 'ƒ'
+ 197: 228, # '≈'
+ 198: 229, # '∆'
+ 199: 230, # '«'
+ 200: 231, # '»'
+ 201: 232, # '…'
+ 202: 233, # '\xa0'
+ 203: 234, # 'Ћ'
+ 204: 235, # 'ћ'
+ 205: 236, # 'Ќ'
+ 206: 237, # 'ќ'
+ 207: 238, # 'ѕ'
+ 208: 239, # '–'
+ 209: 240, # '—'
+ 210: 241, # '“'
+ 211: 242, # '”'
+ 212: 243, # '‘'
+ 213: 244, # '’'
+ 214: 245, # '÷'
+ 215: 246, # '„'
+ 216: 247, # 'Ў'
+ 217: 248, # 'ў'
+ 218: 249, # 'Џ'
+ 219: 250, # 'џ'
+ 220: 251, # '№'
+ 221: 252, # 'Ё'
+ 222: 68, # 'ё'
+ 223: 16, # 'я'
+ 224: 3, # 'а'
+ 225: 21, # 'б'
+ 226: 10, # 'в'
+ 227: 19, # 'г'
+ 228: 13, # 'д'
+ 229: 2, # 'е'
+ 230: 24, # 'ж'
+ 231: 20, # 'з'
+ 232: 4, # 'и'
+ 233: 23, # 'й'
+ 234: 11, # 'к'
+ 235: 8, # 'л'
+ 236: 12, # 'м'
+ 237: 5, # 'н'
+ 238: 1, # 'о'
+ 239: 15, # 'п'
+ 240: 9, # 'р'
+ 241: 7, # 'с'
+ 242: 6, # 'т'
+ 243: 14, # 'у'
+ 244: 39, # 'ф'
+ 245: 26, # 'х'
+ 246: 28, # 'ц'
+ 247: 22, # 'ч'
+ 248: 25, # 'ш'
+ 249: 29, # 'щ'
+ 250: 54, # 'ъ'
+ 251: 18, # 'ы'
+ 252: 17, # 'ь'
+ 253: 30, # 'э'
+ 254: 27, # 'ю'
+ 255: 255, # '€'
+}
+
+MACCYRILLIC_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='MacCyrillic',
+ language='Russian',
+ char_to_order_map=MACCYRILLIC_RUSSIAN_CHAR_TO_ORDER,
+ language_model=RUSSIAN_LANG_MODEL,
+ typical_positive_ratio=0.976601,
+ keep_ascii_letters=False,
+ alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё')
+
+ISO_8859_5_RUSSIAN_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 142, # 'A'
+ 66: 143, # 'B'
+ 67: 144, # 'C'
+ 68: 145, # 'D'
+ 69: 146, # 'E'
+ 70: 147, # 'F'
+ 71: 148, # 'G'
+ 72: 149, # 'H'
+ 73: 150, # 'I'
+ 74: 151, # 'J'
+ 75: 152, # 'K'
+ 76: 74, # 'L'
+ 77: 153, # 'M'
+ 78: 75, # 'N'
+ 79: 154, # 'O'
+ 80: 155, # 'P'
+ 81: 156, # 'Q'
+ 82: 157, # 'R'
+ 83: 158, # 'S'
+ 84: 159, # 'T'
+ 85: 160, # 'U'
+ 86: 161, # 'V'
+ 87: 162, # 'W'
+ 88: 163, # 'X'
+ 89: 164, # 'Y'
+ 90: 165, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 71, # 'a'
+ 98: 172, # 'b'
+ 99: 66, # 'c'
+ 100: 173, # 'd'
+ 101: 65, # 'e'
+ 102: 174, # 'f'
+ 103: 76, # 'g'
+ 104: 175, # 'h'
+ 105: 64, # 'i'
+ 106: 176, # 'j'
+ 107: 177, # 'k'
+ 108: 77, # 'l'
+ 109: 72, # 'm'
+ 110: 178, # 'n'
+ 111: 69, # 'o'
+ 112: 67, # 'p'
+ 113: 179, # 'q'
+ 114: 78, # 'r'
+ 115: 73, # 's'
+ 116: 180, # 't'
+ 117: 181, # 'u'
+ 118: 79, # 'v'
+ 119: 182, # 'w'
+ 120: 183, # 'x'
+ 121: 184, # 'y'
+ 122: 185, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 191, # '\x80'
+ 129: 192, # '\x81'
+ 130: 193, # '\x82'
+ 131: 194, # '\x83'
+ 132: 195, # '\x84'
+ 133: 196, # '\x85'
+ 134: 197, # '\x86'
+ 135: 198, # '\x87'
+ 136: 199, # '\x88'
+ 137: 200, # '\x89'
+ 138: 201, # '\x8a'
+ 139: 202, # '\x8b'
+ 140: 203, # '\x8c'
+ 141: 204, # '\x8d'
+ 142: 205, # '\x8e'
+ 143: 206, # '\x8f'
+ 144: 207, # '\x90'
+ 145: 208, # '\x91'
+ 146: 209, # '\x92'
+ 147: 210, # '\x93'
+ 148: 211, # '\x94'
+ 149: 212, # '\x95'
+ 150: 213, # '\x96'
+ 151: 214, # '\x97'
+ 152: 215, # '\x98'
+ 153: 216, # '\x99'
+ 154: 217, # '\x9a'
+ 155: 218, # '\x9b'
+ 156: 219, # '\x9c'
+ 157: 220, # '\x9d'
+ 158: 221, # '\x9e'
+ 159: 222, # '\x9f'
+ 160: 223, # '\xa0'
+ 161: 224, # 'Ё'
+ 162: 225, # 'Ђ'
+ 163: 226, # 'Ѓ'
+ 164: 227, # 'Є'
+ 165: 228, # 'Ѕ'
+ 166: 229, # 'І'
+ 167: 230, # 'Ї'
+ 168: 231, # 'Ј'
+ 169: 232, # 'Љ'
+ 170: 233, # 'Њ'
+ 171: 234, # 'Ћ'
+ 172: 235, # 'Ќ'
+ 173: 236, # '\xad'
+ 174: 237, # 'Ў'
+ 175: 238, # 'Џ'
+ 176: 37, # 'А'
+ 177: 44, # 'Б'
+ 178: 33, # 'В'
+ 179: 46, # 'Г'
+ 180: 41, # 'Д'
+ 181: 48, # 'Е'
+ 182: 56, # 'Ж'
+ 183: 51, # 'З'
+ 184: 42, # 'И'
+ 185: 60, # 'Й'
+ 186: 36, # 'К'
+ 187: 49, # 'Л'
+ 188: 38, # 'М'
+ 189: 31, # 'Н'
+ 190: 34, # 'О'
+ 191: 35, # 'П'
+ 192: 45, # 'Р'
+ 193: 32, # 'С'
+ 194: 40, # 'Т'
+ 195: 52, # 'У'
+ 196: 53, # 'Ф'
+ 197: 55, # 'Х'
+ 198: 58, # 'Ц'
+ 199: 50, # 'Ч'
+ 200: 57, # 'Ш'
+ 201: 63, # 'Щ'
+ 202: 70, # 'Ъ'
+ 203: 62, # 'Ы'
+ 204: 61, # 'Ь'
+ 205: 47, # 'Э'
+ 206: 59, # 'Ю'
+ 207: 43, # 'Я'
+ 208: 3, # 'а'
+ 209: 21, # 'б'
+ 210: 10, # 'в'
+ 211: 19, # 'г'
+ 212: 13, # 'д'
+ 213: 2, # 'е'
+ 214: 24, # 'ж'
+ 215: 20, # 'з'
+ 216: 4, # 'и'
+ 217: 23, # 'й'
+ 218: 11, # 'к'
+ 219: 8, # 'л'
+ 220: 12, # 'м'
+ 221: 5, # 'н'
+ 222: 1, # 'о'
+ 223: 15, # 'п'
+ 224: 9, # 'р'
+ 225: 7, # 'с'
+ 226: 6, # 'т'
+ 227: 14, # 'у'
+ 228: 39, # 'ф'
+ 229: 26, # 'х'
+ 230: 28, # 'ц'
+ 231: 22, # 'ч'
+ 232: 25, # 'ш'
+ 233: 29, # 'щ'
+ 234: 54, # 'ъ'
+ 235: 18, # 'ы'
+ 236: 17, # 'ь'
+ 237: 30, # 'э'
+ 238: 27, # 'ю'
+ 239: 16, # 'я'
+ 240: 239, # '№'
+ 241: 68, # 'ё'
+ 242: 240, # 'ђ'
+ 243: 241, # 'ѓ'
+ 244: 242, # 'є'
+ 245: 243, # 'ѕ'
+ 246: 244, # 'і'
+ 247: 245, # 'ї'
+ 248: 246, # 'ј'
+ 249: 247, # 'љ'
+ 250: 248, # 'њ'
+ 251: 249, # 'ћ'
+ 252: 250, # 'ќ'
+ 253: 251, # '§'
+ 254: 252, # 'ў'
+ 255: 255, # 'џ'
+}
+
+ISO_8859_5_RUSSIAN_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-5',
+ language='Russian',
+ char_to_order_map=ISO_8859_5_RUSSIAN_CHAR_TO_ORDER,
+ language_model=RUSSIAN_LANG_MODEL,
+ typical_positive_ratio=0.976601,
+ keep_ascii_letters=False,
+ alphabet='ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё')
+
diff --git a/third_party/python/chardet/chardet/langthaimodel.py b/third_party/python/chardet/chardet/langthaimodel.py
new file mode 100644
index 0000000000..d0191f241d
--- /dev/null
+++ b/third_party/python/chardet/chardet/langthaimodel.py
@@ -0,0 +1,4383 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from chardet.sbcharsetprober import SingleByteCharSetModel
+
+
+# 3: Positive
+# 2: Likely
+# 1: Unlikely
+# 0: Negative
+
+THAI_LANG_MODEL = {
+ 5: { # 'ก'
+ 5: 2, # 'ก'
+ 30: 2, # 'ข'
+ 24: 2, # 'ค'
+ 8: 2, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 3, # 'ฎ'
+ 57: 2, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 2, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 3, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 2, # 'น'
+ 17: 1, # 'บ'
+ 25: 2, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 1, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 1, # 'ย'
+ 2: 3, # 'ร'
+ 61: 2, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 3, # 'ว'
+ 42: 2, # 'ศ'
+ 46: 3, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 2, # 'ห'
+ 4: 3, # 'อ'
+ 63: 1, # 'ฯ'
+ 22: 2, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 3, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 2, # 'ื'
+ 32: 2, # 'ุ'
+ 35: 1, # 'ู'
+ 11: 2, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 1, # 'ๆ'
+ 37: 3, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 2, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 30: { # 'ข'
+ 5: 1, # 'ก'
+ 30: 0, # 'ข'
+ 24: 1, # 'ค'
+ 8: 1, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 2, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 2, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 2, # 'น'
+ 17: 1, # 'บ'
+ 25: 1, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 2, # 'ย'
+ 2: 1, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 2, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 1, # 'ห'
+ 4: 3, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 2, # 'ี'
+ 40: 3, # 'ึ'
+ 27: 1, # 'ื'
+ 32: 1, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 1, # '็'
+ 6: 2, # '่'
+ 7: 3, # '้'
+ 38: 1, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 24: { # 'ค'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 2, # 'ค'
+ 8: 2, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 2, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 2, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 0, # 'บ'
+ 25: 1, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 2, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 3, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 0, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 2, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 2, # 'า'
+ 36: 3, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 2, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 3, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 2, # 'ู'
+ 11: 1, # 'เ'
+ 28: 0, # 'แ'
+ 41: 3, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 1, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 3, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 8: { # 'ง'
+ 5: 3, # 'ก'
+ 30: 2, # 'ข'
+ 24: 3, # 'ค'
+ 8: 2, # 'ง'
+ 26: 2, # 'จ'
+ 52: 1, # 'ฉ'
+ 34: 2, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 2, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 1, # 'ธ'
+ 3: 3, # 'น'
+ 17: 2, # 'บ'
+ 25: 2, # 'ป'
+ 39: 2, # 'ผ'
+ 62: 1, # 'ฝ'
+ 31: 2, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 1, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 2, # 'ว'
+ 42: 2, # 'ศ'
+ 46: 1, # 'ษ'
+ 18: 3, # 'ส'
+ 21: 3, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 1, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 1, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 1, # 'ื'
+ 32: 1, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 3, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 3, # 'ๆ'
+ 37: 0, # '็'
+ 6: 2, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 26: { # 'จ'
+ 5: 2, # 'ก'
+ 30: 1, # 'ข'
+ 24: 0, # 'ค'
+ 8: 2, # 'ง'
+ 26: 3, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 1, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 1, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 1, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 1, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 3, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 3, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 1, # 'ี'
+ 40: 3, # 'ึ'
+ 27: 1, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 2, # 'ู'
+ 11: 1, # 'เ'
+ 28: 1, # 'แ'
+ 41: 0, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 2, # '่'
+ 7: 2, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 52: { # 'ฉ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 3, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 3, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 1, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 1, # 'ะ'
+ 10: 1, # 'ั'
+ 1: 1, # 'า'
+ 36: 0, # 'ำ'
+ 23: 1, # 'ิ'
+ 13: 1, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 1, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 34: { # 'ช'
+ 5: 1, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 1, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 1, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 2, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 1, # 'ย'
+ 2: 1, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 2, # 'ั'
+ 1: 3, # 'า'
+ 36: 1, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 2, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 3, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 1, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 1, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 51: { # 'ซ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 1, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 0, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 1, # 'ั'
+ 1: 1, # 'า'
+ 36: 0, # 'ำ'
+ 23: 1, # 'ิ'
+ 13: 2, # 'ี'
+ 40: 3, # 'ึ'
+ 27: 2, # 'ื'
+ 32: 1, # 'ุ'
+ 35: 1, # 'ู'
+ 11: 1, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 1, # '็'
+ 6: 1, # '่'
+ 7: 2, # '้'
+ 38: 1, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 47: { # 'ญ'
+ 5: 1, # 'ก'
+ 30: 1, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 3, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 1, # 'บ'
+ 25: 1, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 2, # 'ห'
+ 4: 1, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 1, # 'ะ'
+ 10: 2, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 1, # 'ิ'
+ 13: 1, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 1, # 'เ'
+ 28: 1, # 'แ'
+ 41: 0, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 1, # 'ๆ'
+ 37: 0, # '็'
+ 6: 2, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 58: { # 'ฎ'
+ 5: 2, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 1, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 1, # 'ิ'
+ 13: 2, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 57: { # 'ฏ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 1, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 49: { # 'ฐ'
+ 5: 1, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 2, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 1, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 1, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 53: { # 'ฑ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 3, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 55: { # 'ฒ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 1, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 43: { # 'ณ'
+ 5: 1, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 3, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 3, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 1, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 1, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 3, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 1, # 'ิ'
+ 13: 2, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 1, # 'เ'
+ 28: 1, # 'แ'
+ 41: 0, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 3, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 20: { # 'ด'
+ 5: 2, # 'ก'
+ 30: 2, # 'ข'
+ 24: 2, # 'ค'
+ 8: 3, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 2, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 1, # 'น'
+ 17: 1, # 'บ'
+ 25: 1, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 3, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 2, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 2, # 'ห'
+ 4: 1, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 2, # 'า'
+ 36: 2, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 1, # 'ึ'
+ 27: 2, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 2, # 'ู'
+ 11: 2, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 2, # 'ๆ'
+ 37: 2, # '็'
+ 6: 1, # '่'
+ 7: 3, # '้'
+ 38: 1, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 19: { # 'ต'
+ 5: 2, # 'ก'
+ 30: 1, # 'ข'
+ 24: 1, # 'ค'
+ 8: 0, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 1, # 'ต'
+ 44: 2, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 2, # 'น'
+ 17: 1, # 'บ'
+ 25: 1, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 2, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 1, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 3, # 'ส'
+ 21: 0, # 'ห'
+ 4: 3, # 'อ'
+ 63: 1, # 'ฯ'
+ 22: 2, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 2, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 2, # 'ี'
+ 40: 1, # 'ึ'
+ 27: 1, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 2, # 'ู'
+ 11: 1, # 'เ'
+ 28: 1, # 'แ'
+ 41: 1, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 2, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 2, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 44: { # 'ถ'
+ 5: 1, # 'ก'
+ 30: 0, # 'ข'
+ 24: 1, # 'ค'
+ 8: 0, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 1, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 1, # 'น'
+ 17: 2, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 1, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 0, # 'ห'
+ 4: 1, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 2, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 1, # 'ี'
+ 40: 3, # 'ึ'
+ 27: 2, # 'ื'
+ 32: 2, # 'ุ'
+ 35: 3, # 'ู'
+ 11: 1, # 'เ'
+ 28: 1, # 'แ'
+ 41: 0, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 2, # '่'
+ 7: 3, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 14: { # 'ท'
+ 5: 1, # 'ก'
+ 30: 1, # 'ข'
+ 24: 3, # 'ค'
+ 8: 1, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 1, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 3, # 'ธ'
+ 3: 3, # 'น'
+ 17: 2, # 'บ'
+ 25: 2, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 2, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 3, # 'ย'
+ 2: 3, # 'ร'
+ 61: 1, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 2, # 'ว'
+ 42: 3, # 'ศ'
+ 46: 1, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 0, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 2, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 3, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 2, # 'ึ'
+ 27: 1, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 1, # 'ู'
+ 11: 0, # 'เ'
+ 28: 1, # 'แ'
+ 41: 0, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 1, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 2, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 48: { # 'ธ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 1, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 1, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 2, # 'า'
+ 36: 0, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 2, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 3, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 3: { # 'น'
+ 5: 3, # 'ก'
+ 30: 2, # 'ข'
+ 24: 3, # 'ค'
+ 8: 1, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 1, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 3, # 'ต'
+ 44: 2, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 3, # 'ธ'
+ 3: 2, # 'น'
+ 17: 2, # 'บ'
+ 25: 2, # 'ป'
+ 39: 2, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 2, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 2, # 'ย'
+ 2: 2, # 'ร'
+ 61: 1, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 3, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 2, # 'ห'
+ 4: 3, # 'อ'
+ 63: 1, # 'ฯ'
+ 22: 2, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 3, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 3, # 'ึ'
+ 27: 3, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 2, # 'ู'
+ 11: 3, # 'เ'
+ 28: 2, # 'แ'
+ 41: 3, # 'โ'
+ 29: 3, # 'ใ'
+ 33: 3, # 'ไ'
+ 50: 2, # 'ๆ'
+ 37: 1, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 2, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 17: { # 'บ'
+ 5: 3, # 'ก'
+ 30: 2, # 'ข'
+ 24: 2, # 'ค'
+ 8: 1, # 'ง'
+ 26: 1, # 'จ'
+ 52: 1, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 2, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 3, # 'บ'
+ 25: 2, # 'ป'
+ 39: 2, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 0, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 3, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 2, # 'ห'
+ 4: 2, # 'อ'
+ 63: 1, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 2, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 2, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 2, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 2, # 'ู'
+ 11: 2, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 1, # '็'
+ 6: 2, # '่'
+ 7: 2, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 25: { # 'ป'
+ 5: 2, # 'ก'
+ 30: 0, # 'ข'
+ 24: 1, # 'ค'
+ 8: 0, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 1, # 'ฎ'
+ 57: 3, # 'ฏ'
+ 49: 1, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 1, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 2, # 'น'
+ 17: 0, # 'บ'
+ 25: 1, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 1, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 0, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 1, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 1, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 1, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 1, # 'า'
+ 36: 0, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 1, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 1, # 'เ'
+ 28: 2, # 'แ'
+ 41: 0, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 3, # '็'
+ 6: 1, # '่'
+ 7: 2, # '้'
+ 38: 1, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 39: { # 'ผ'
+ 5: 1, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 1, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 2, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 2, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 1, # 'ะ'
+ 10: 1, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 1, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 3, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 3, # '่'
+ 7: 1, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 62: { # 'ฝ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 1, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 1, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 1, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 1, # 'ี'
+ 40: 2, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 2, # '่'
+ 7: 1, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 31: { # 'พ'
+ 5: 1, # 'ก'
+ 30: 1, # 'ข'
+ 24: 1, # 'ค'
+ 8: 1, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 1, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 1, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 1, # 'ธ'
+ 3: 3, # 'น'
+ 17: 2, # 'บ'
+ 25: 0, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 2, # 'ย'
+ 2: 3, # 'ร'
+ 61: 2, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 2, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 1, # 'ห'
+ 4: 2, # 'อ'
+ 63: 1, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 2, # 'ี'
+ 40: 1, # 'ึ'
+ 27: 3, # 'ื'
+ 32: 1, # 'ุ'
+ 35: 2, # 'ู'
+ 11: 1, # 'เ'
+ 28: 1, # 'แ'
+ 41: 0, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 1, # '็'
+ 6: 0, # '่'
+ 7: 1, # '้'
+ 38: 3, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 54: { # 'ฟ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 1, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 2, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 1, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 0, # 'ห'
+ 4: 1, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 2, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 1, # 'ิ'
+ 13: 1, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 1, # 'ื'
+ 32: 1, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 1, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 2, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 45: { # 'ภ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 1, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 1, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 1, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 2, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 1, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 9: { # 'ม'
+ 5: 2, # 'ก'
+ 30: 2, # 'ข'
+ 24: 2, # 'ค'
+ 8: 2, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 1, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 2, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 1, # 'ธ'
+ 3: 3, # 'น'
+ 17: 2, # 'บ'
+ 25: 2, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 3, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 1, # 'ย'
+ 2: 2, # 'ร'
+ 61: 2, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 2, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 1, # 'ษ'
+ 18: 3, # 'ส'
+ 21: 3, # 'ห'
+ 4: 3, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 1, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 3, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 3, # 'ู'
+ 11: 2, # 'เ'
+ 28: 2, # 'แ'
+ 41: 2, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 1, # 'ๆ'
+ 37: 1, # '็'
+ 6: 3, # '่'
+ 7: 2, # '้'
+ 38: 1, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 16: { # 'ย'
+ 5: 3, # 'ก'
+ 30: 1, # 'ข'
+ 24: 2, # 'ค'
+ 8: 3, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 2, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 2, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 2, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 1, # 'ธ'
+ 3: 3, # 'น'
+ 17: 3, # 'บ'
+ 25: 1, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 0, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 3, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 1, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 2, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 1, # 'ึ'
+ 27: 2, # 'ื'
+ 32: 2, # 'ุ'
+ 35: 3, # 'ู'
+ 11: 2, # 'เ'
+ 28: 1, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 2, # 'ๆ'
+ 37: 1, # '็'
+ 6: 3, # '่'
+ 7: 2, # '้'
+ 38: 3, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 2: { # 'ร'
+ 5: 3, # 'ก'
+ 30: 2, # 'ข'
+ 24: 2, # 'ค'
+ 8: 3, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 2, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 3, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 3, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 2, # 'ต'
+ 44: 3, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 1, # 'ธ'
+ 3: 2, # 'น'
+ 17: 2, # 'บ'
+ 25: 3, # 'ป'
+ 39: 2, # 'ผ'
+ 62: 1, # 'ฝ'
+ 31: 2, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 2, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 3, # 'ว'
+ 42: 2, # 'ศ'
+ 46: 2, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 2, # 'ห'
+ 4: 3, # 'อ'
+ 63: 1, # 'ฯ'
+ 22: 3, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 2, # 'ึ'
+ 27: 3, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 3, # 'ู'
+ 11: 3, # 'เ'
+ 28: 3, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 3, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 3, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 61: { # 'ฤ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 2, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 2, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 15: { # 'ล'
+ 5: 2, # 'ก'
+ 30: 3, # 'ข'
+ 24: 1, # 'ค'
+ 8: 3, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 2, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 1, # 'น'
+ 17: 2, # 'บ'
+ 25: 2, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 3, # 'ย'
+ 2: 1, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 1, # 'ห'
+ 4: 3, # 'อ'
+ 63: 2, # 'ฯ'
+ 22: 3, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 2, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 2, # 'ึ'
+ 27: 3, # 'ื'
+ 32: 2, # 'ุ'
+ 35: 3, # 'ู'
+ 11: 2, # 'เ'
+ 28: 1, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 2, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 2, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 12: { # 'ว'
+ 5: 3, # 'ก'
+ 30: 2, # 'ข'
+ 24: 1, # 'ค'
+ 8: 3, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 1, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 1, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 2, # 'บ'
+ 25: 1, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 3, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 2, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 2, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 2, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 2, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 3, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 1, # 'ๆ'
+ 37: 0, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 1, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 42: { # 'ศ'
+ 5: 1, # 'ก'
+ 30: 0, # 'ข'
+ 24: 1, # 'ค'
+ 8: 0, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 1, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 1, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 2, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 2, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 2, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 2, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 3, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 2, # 'ู'
+ 11: 0, # 'เ'
+ 28: 1, # 'แ'
+ 41: 0, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 1, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 46: { # 'ษ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 2, # 'ฎ'
+ 57: 1, # 'ฏ'
+ 49: 2, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 3, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 1, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 2, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 2, # 'ะ'
+ 10: 2, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 1, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 1, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 2, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 18: { # 'ส'
+ 5: 2, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 2, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 3, # 'ต'
+ 44: 3, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 2, # 'บ'
+ 25: 1, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 2, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 1, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 2, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 2, # 'ห'
+ 4: 3, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 2, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 3, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 2, # 'ึ'
+ 27: 3, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 3, # 'ู'
+ 11: 2, # 'เ'
+ 28: 0, # 'แ'
+ 41: 1, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 3, # '่'
+ 7: 1, # '้'
+ 38: 2, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 21: { # 'ห'
+ 5: 3, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 1, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 2, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 3, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 0, # 'บ'
+ 25: 1, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 2, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 2, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 3, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 1, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 1, # 'ิ'
+ 13: 1, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 1, # 'ุ'
+ 35: 1, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 3, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 2, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 4: { # 'อ'
+ 5: 3, # 'ก'
+ 30: 1, # 'ข'
+ 24: 2, # 'ค'
+ 8: 3, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 2, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 1, # 'ธ'
+ 3: 3, # 'น'
+ 17: 3, # 'บ'
+ 25: 1, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 3, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 2, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 2, # 'ห'
+ 4: 3, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 2, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 2, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 3, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 3, # 'เ'
+ 28: 1, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 1, # 'ๆ'
+ 37: 1, # '็'
+ 6: 2, # '่'
+ 7: 2, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 63: { # 'ฯ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 22: { # 'ะ'
+ 5: 3, # 'ก'
+ 30: 1, # 'ข'
+ 24: 2, # 'ค'
+ 8: 1, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 3, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 3, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 1, # 'ธ'
+ 3: 2, # 'น'
+ 17: 3, # 'บ'
+ 25: 2, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 2, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 2, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 2, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 3, # 'ส'
+ 21: 3, # 'ห'
+ 4: 2, # 'อ'
+ 63: 1, # 'ฯ'
+ 22: 1, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 3, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 10: { # 'ั'
+ 5: 3, # 'ก'
+ 30: 0, # 'ข'
+ 24: 1, # 'ค'
+ 8: 3, # 'ง'
+ 26: 3, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 3, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 2, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 3, # 'ฒ'
+ 43: 3, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 3, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 3, # 'บ'
+ 25: 1, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 2, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 3, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 3, # 'ว'
+ 42: 2, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 3, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 1: { # 'า'
+ 5: 3, # 'ก'
+ 30: 2, # 'ข'
+ 24: 3, # 'ค'
+ 8: 3, # 'ง'
+ 26: 3, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 3, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 2, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 3, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 3, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 2, # 'ธ'
+ 3: 3, # 'น'
+ 17: 3, # 'บ'
+ 25: 2, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 1, # 'ฝ'
+ 31: 3, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 3, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 3, # 'ว'
+ 42: 2, # 'ศ'
+ 46: 3, # 'ษ'
+ 18: 3, # 'ส'
+ 21: 3, # 'ห'
+ 4: 2, # 'อ'
+ 63: 1, # 'ฯ'
+ 22: 3, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 3, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 1, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 36: { # 'ำ'
+ 5: 2, # 'ก'
+ 30: 1, # 'ข'
+ 24: 3, # 'ค'
+ 8: 2, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 1, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 1, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 1, # 'บ'
+ 25: 1, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 0, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 3, # 'ห'
+ 4: 1, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 3, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 23: { # 'ิ'
+ 5: 3, # 'ก'
+ 30: 1, # 'ข'
+ 24: 2, # 'ค'
+ 8: 3, # 'ง'
+ 26: 3, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 3, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 2, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 3, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 3, # 'ธ'
+ 3: 3, # 'น'
+ 17: 3, # 'บ'
+ 25: 2, # 'ป'
+ 39: 2, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 3, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 2, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 2, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 3, # 'ว'
+ 42: 3, # 'ศ'
+ 46: 2, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 3, # 'ห'
+ 4: 1, # 'อ'
+ 63: 1, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 3, # 'เ'
+ 28: 1, # 'แ'
+ 41: 1, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 3, # '่'
+ 7: 2, # '้'
+ 38: 2, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 13: { # 'ี'
+ 5: 3, # 'ก'
+ 30: 2, # 'ข'
+ 24: 2, # 'ค'
+ 8: 0, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 1, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 1, # 'น'
+ 17: 2, # 'บ'
+ 25: 2, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 2, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 3, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 2, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 1, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 2, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 1, # 'ๆ'
+ 37: 0, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 40: { # 'ึ'
+ 5: 3, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 3, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 27: { # 'ื'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 2, # 'น'
+ 17: 3, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 3, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 32: { # 'ุ'
+ 5: 3, # 'ก'
+ 30: 2, # 'ข'
+ 24: 3, # 'ค'
+ 8: 3, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 2, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 1, # 'ฒ'
+ 43: 3, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 3, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 1, # 'ธ'
+ 3: 2, # 'น'
+ 17: 2, # 'บ'
+ 25: 2, # 'ป'
+ 39: 2, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 1, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 1, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 2, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 1, # 'ห'
+ 4: 1, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 1, # 'เ'
+ 28: 0, # 'แ'
+ 41: 1, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 3, # '่'
+ 7: 2, # '้'
+ 38: 1, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 35: { # 'ู'
+ 5: 3, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 2, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 2, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 1, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 2, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 2, # 'น'
+ 17: 0, # 'บ'
+ 25: 3, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 0, # 'ย'
+ 2: 1, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 1, # 'เ'
+ 28: 1, # 'แ'
+ 41: 1, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 11: { # 'เ'
+ 5: 3, # 'ก'
+ 30: 3, # 'ข'
+ 24: 3, # 'ค'
+ 8: 2, # 'ง'
+ 26: 3, # 'จ'
+ 52: 3, # 'ฉ'
+ 34: 3, # 'ช'
+ 51: 2, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 1, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 3, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 1, # 'ธ'
+ 3: 3, # 'น'
+ 17: 3, # 'บ'
+ 25: 3, # 'ป'
+ 39: 2, # 'ผ'
+ 62: 1, # 'ฝ'
+ 31: 3, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 3, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 2, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 3, # 'ว'
+ 42: 2, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 3, # 'ส'
+ 21: 3, # 'ห'
+ 4: 3, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 28: { # 'แ'
+ 5: 3, # 'ก'
+ 30: 2, # 'ข'
+ 24: 2, # 'ค'
+ 8: 1, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 3, # 'ต'
+ 44: 2, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 3, # 'บ'
+ 25: 2, # 'ป'
+ 39: 3, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 2, # 'พ'
+ 54: 2, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 2, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 2, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 3, # 'ส'
+ 21: 3, # 'ห'
+ 4: 1, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 41: { # 'โ'
+ 5: 2, # 'ก'
+ 30: 1, # 'ข'
+ 24: 2, # 'ค'
+ 8: 0, # 'ง'
+ 26: 1, # 'จ'
+ 52: 1, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 2, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 1, # 'บ'
+ 25: 3, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 2, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 0, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 0, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 29: { # 'ใ'
+ 5: 2, # 'ก'
+ 30: 0, # 'ข'
+ 24: 1, # 'ค'
+ 8: 0, # 'ง'
+ 26: 3, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 3, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 1, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 2, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 1, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 3, # 'ส'
+ 21: 3, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 33: { # 'ไ'
+ 5: 1, # 'ก'
+ 30: 2, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 1, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 1, # 'บ'
+ 25: 3, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 2, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 0, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 3, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 2, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 50: { # 'ๆ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 37: { # '็'
+ 5: 2, # 'ก'
+ 30: 1, # 'ข'
+ 24: 2, # 'ค'
+ 8: 2, # 'ง'
+ 26: 3, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 1, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 2, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 3, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 1, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 2, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 0, # 'ห'
+ 4: 1, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 1, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 6: { # '่'
+ 5: 2, # 'ก'
+ 30: 1, # 'ข'
+ 24: 2, # 'ค'
+ 8: 3, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 1, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 2, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 1, # 'ธ'
+ 3: 3, # 'น'
+ 17: 1, # 'บ'
+ 25: 2, # 'ป'
+ 39: 2, # 'ผ'
+ 62: 1, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 3, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 3, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 1, # 'ห'
+ 4: 3, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 1, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 3, # 'า'
+ 36: 2, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 3, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 1, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 7: { # '้'
+ 5: 2, # 'ก'
+ 30: 1, # 'ข'
+ 24: 2, # 'ค'
+ 8: 3, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 2, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 2, # 'บ'
+ 25: 2, # 'ป'
+ 39: 2, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 2, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 3, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 2, # 'ห'
+ 4: 3, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 3, # 'า'
+ 36: 2, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 2, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 38: { # '์'
+ 5: 2, # 'ก'
+ 30: 1, # 'ข'
+ 24: 1, # 'ค'
+ 8: 0, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 1, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 1, # 'น'
+ 17: 1, # 'บ'
+ 25: 1, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 0, # 'ย'
+ 2: 1, # 'ร'
+ 61: 1, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 1, # 'ห'
+ 4: 2, # 'อ'
+ 63: 1, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 2, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 56: { # '๑'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 2, # '๑'
+ 59: 1, # '๒'
+ 60: 1, # '๕'
+ },
+ 59: { # '๒'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 1, # '๑'
+ 59: 1, # '๒'
+ 60: 3, # '๕'
+ },
+ 60: { # '๕'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 2, # '๑'
+ 59: 1, # '๒'
+ 60: 0, # '๕'
+ },
+}
+
+# 255: Undefined characters that did not exist in training text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+# 251: Control characters
+
+# Character Mapping Table(s):
+TIS_620_THAI_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 182, # 'A'
+ 66: 106, # 'B'
+ 67: 107, # 'C'
+ 68: 100, # 'D'
+ 69: 183, # 'E'
+ 70: 184, # 'F'
+ 71: 185, # 'G'
+ 72: 101, # 'H'
+ 73: 94, # 'I'
+ 74: 186, # 'J'
+ 75: 187, # 'K'
+ 76: 108, # 'L'
+ 77: 109, # 'M'
+ 78: 110, # 'N'
+ 79: 111, # 'O'
+ 80: 188, # 'P'
+ 81: 189, # 'Q'
+ 82: 190, # 'R'
+ 83: 89, # 'S'
+ 84: 95, # 'T'
+ 85: 112, # 'U'
+ 86: 113, # 'V'
+ 87: 191, # 'W'
+ 88: 192, # 'X'
+ 89: 193, # 'Y'
+ 90: 194, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 64, # 'a'
+ 98: 72, # 'b'
+ 99: 73, # 'c'
+ 100: 114, # 'd'
+ 101: 74, # 'e'
+ 102: 115, # 'f'
+ 103: 116, # 'g'
+ 104: 102, # 'h'
+ 105: 81, # 'i'
+ 106: 201, # 'j'
+ 107: 117, # 'k'
+ 108: 90, # 'l'
+ 109: 103, # 'm'
+ 110: 78, # 'n'
+ 111: 82, # 'o'
+ 112: 96, # 'p'
+ 113: 202, # 'q'
+ 114: 91, # 'r'
+ 115: 79, # 's'
+ 116: 84, # 't'
+ 117: 104, # 'u'
+ 118: 105, # 'v'
+ 119: 97, # 'w'
+ 120: 98, # 'x'
+ 121: 92, # 'y'
+ 122: 203, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 209, # '\x80'
+ 129: 210, # '\x81'
+ 130: 211, # '\x82'
+ 131: 212, # '\x83'
+ 132: 213, # '\x84'
+ 133: 88, # '\x85'
+ 134: 214, # '\x86'
+ 135: 215, # '\x87'
+ 136: 216, # '\x88'
+ 137: 217, # '\x89'
+ 138: 218, # '\x8a'
+ 139: 219, # '\x8b'
+ 140: 220, # '\x8c'
+ 141: 118, # '\x8d'
+ 142: 221, # '\x8e'
+ 143: 222, # '\x8f'
+ 144: 223, # '\x90'
+ 145: 224, # '\x91'
+ 146: 99, # '\x92'
+ 147: 85, # '\x93'
+ 148: 83, # '\x94'
+ 149: 225, # '\x95'
+ 150: 226, # '\x96'
+ 151: 227, # '\x97'
+ 152: 228, # '\x98'
+ 153: 229, # '\x99'
+ 154: 230, # '\x9a'
+ 155: 231, # '\x9b'
+ 156: 232, # '\x9c'
+ 157: 233, # '\x9d'
+ 158: 234, # '\x9e'
+ 159: 235, # '\x9f'
+ 160: 236, # None
+ 161: 5, # 'ก'
+ 162: 30, # 'ข'
+ 163: 237, # 'ฃ'
+ 164: 24, # 'ค'
+ 165: 238, # 'ฅ'
+ 166: 75, # 'ฆ'
+ 167: 8, # 'ง'
+ 168: 26, # 'จ'
+ 169: 52, # 'ฉ'
+ 170: 34, # 'ช'
+ 171: 51, # 'ซ'
+ 172: 119, # 'ฌ'
+ 173: 47, # 'ญ'
+ 174: 58, # 'ฎ'
+ 175: 57, # 'ฏ'
+ 176: 49, # 'ฐ'
+ 177: 53, # 'ฑ'
+ 178: 55, # 'ฒ'
+ 179: 43, # 'ณ'
+ 180: 20, # 'ด'
+ 181: 19, # 'ต'
+ 182: 44, # 'ถ'
+ 183: 14, # 'ท'
+ 184: 48, # 'ธ'
+ 185: 3, # 'น'
+ 186: 17, # 'บ'
+ 187: 25, # 'ป'
+ 188: 39, # 'ผ'
+ 189: 62, # 'ฝ'
+ 190: 31, # 'พ'
+ 191: 54, # 'ฟ'
+ 192: 45, # 'ภ'
+ 193: 9, # 'ม'
+ 194: 16, # 'ย'
+ 195: 2, # 'ร'
+ 196: 61, # 'ฤ'
+ 197: 15, # 'ล'
+ 198: 239, # 'ฦ'
+ 199: 12, # 'ว'
+ 200: 42, # 'ศ'
+ 201: 46, # 'ษ'
+ 202: 18, # 'ส'
+ 203: 21, # 'ห'
+ 204: 76, # 'ฬ'
+ 205: 4, # 'อ'
+ 206: 66, # 'ฮ'
+ 207: 63, # 'ฯ'
+ 208: 22, # 'ะ'
+ 209: 10, # 'ั'
+ 210: 1, # 'า'
+ 211: 36, # 'ำ'
+ 212: 23, # 'ิ'
+ 213: 13, # 'ี'
+ 214: 40, # 'ึ'
+ 215: 27, # 'ื'
+ 216: 32, # 'ุ'
+ 217: 35, # 'ู'
+ 218: 86, # 'ฺ'
+ 219: 240, # None
+ 220: 241, # None
+ 221: 242, # None
+ 222: 243, # None
+ 223: 244, # '฿'
+ 224: 11, # 'เ'
+ 225: 28, # 'แ'
+ 226: 41, # 'โ'
+ 227: 29, # 'ใ'
+ 228: 33, # 'ไ'
+ 229: 245, # 'ๅ'
+ 230: 50, # 'ๆ'
+ 231: 37, # '็'
+ 232: 6, # '่'
+ 233: 7, # '้'
+ 234: 67, # '๊'
+ 235: 77, # '๋'
+ 236: 38, # '์'
+ 237: 93, # 'ํ'
+ 238: 246, # '๎'
+ 239: 247, # '๏'
+ 240: 68, # '๐'
+ 241: 56, # '๑'
+ 242: 59, # '๒'
+ 243: 65, # '๓'
+ 244: 69, # '๔'
+ 245: 60, # '๕'
+ 246: 70, # '๖'
+ 247: 80, # '๗'
+ 248: 71, # '๘'
+ 249: 87, # '๙'
+ 250: 248, # '๚'
+ 251: 249, # '๛'
+ 252: 250, # None
+ 253: 251, # None
+ 254: 252, # None
+ 255: 253, # None
+}
+
+TIS_620_THAI_MODEL = SingleByteCharSetModel(charset_name='TIS-620',
+ language='Thai',
+ char_to_order_map=TIS_620_THAI_CHAR_TO_ORDER,
+ language_model=THAI_LANG_MODEL,
+ typical_positive_ratio=0.926386,
+ keep_ascii_letters=False,
+ alphabet='กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛')
+
diff --git a/third_party/python/chardet/chardet/langturkishmodel.py b/third_party/python/chardet/chardet/langturkishmodel.py
new file mode 100644
index 0000000000..8ba93224de
--- /dev/null
+++ b/third_party/python/chardet/chardet/langturkishmodel.py
@@ -0,0 +1,4383 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+from chardet.sbcharsetprober import SingleByteCharSetModel
+
+
+# 3: Positive
+# 2: Likely
+# 1: Unlikely
+# 0: Negative
+
+TURKISH_LANG_MODEL = {
+ 23: { # 'A'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 0, # 'c'
+ 12: 2, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 1, # 'g'
+ 25: 1, # 'h'
+ 3: 1, # 'i'
+ 24: 0, # 'j'
+ 10: 2, # 'k'
+ 5: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 1, # 'r'
+ 8: 1, # 's'
+ 9: 1, # 't'
+ 14: 1, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 3, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 0, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 37: { # 'B'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 2, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 2, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 1, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 1, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 0, # 'Z'
+ 1: 2, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 0, # 'k'
+ 5: 0, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 1, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 1, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 0, # 'ı'
+ 40: 1, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 47: { # 'C'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 1, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 1, # 'L'
+ 20: 0, # 'M'
+ 46: 1, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 1, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 2, # 'j'
+ 10: 1, # 'k'
+ 5: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 2, # 'n'
+ 15: 1, # 'o'
+ 26: 0, # 'p'
+ 7: 2, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 1, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 39: { # 'D'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 1, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 1, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 2, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 0, # 'k'
+ 5: 1, # 'l'
+ 13: 3, # 'm'
+ 4: 0, # 'n'
+ 15: 1, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 1, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 1, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 1, # 'ı'
+ 40: 1, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 29: { # 'E'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 1, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 0, # 'c'
+ 12: 2, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 1, # 'g'
+ 25: 0, # 'h'
+ 3: 1, # 'i'
+ 24: 1, # 'j'
+ 10: 0, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 1, # 's'
+ 9: 1, # 't'
+ 14: 1, # 'u'
+ 32: 1, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 2, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 52: { # 'F'
+ 23: 0, # 'A'
+ 37: 1, # 'B'
+ 47: 1, # 'C'
+ 39: 1, # 'D'
+ 29: 1, # 'E'
+ 52: 2, # 'F'
+ 36: 0, # 'G'
+ 45: 2, # 'H'
+ 53: 1, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 1, # 'N'
+ 42: 1, # 'O'
+ 48: 2, # 'P'
+ 44: 1, # 'R'
+ 35: 1, # 'S'
+ 31: 1, # 'T'
+ 51: 1, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 2, # 'Y'
+ 56: 0, # 'Z'
+ 1: 0, # 'a'
+ 21: 1, # 'b'
+ 28: 1, # 'c'
+ 12: 1, # 'd'
+ 2: 0, # 'e'
+ 18: 1, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 2, # 'i'
+ 24: 1, # 'j'
+ 10: 0, # 'k'
+ 5: 0, # 'l'
+ 13: 1, # 'm'
+ 4: 2, # 'n'
+ 15: 1, # 'o'
+ 26: 0, # 'p'
+ 7: 2, # 'r'
+ 8: 1, # 's'
+ 9: 1, # 't'
+ 14: 1, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 1, # 'y'
+ 22: 1, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 1, # 'Ö'
+ 55: 2, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 2, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 2, # 'ş'
+ },
+ 36: { # 'G'
+ 23: 1, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 1, # 'F'
+ 36: 2, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 2, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 2, # 'N'
+ 42: 1, # 'O'
+ 48: 1, # 'P'
+ 44: 1, # 'R'
+ 35: 1, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 2, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 1, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 1, # 'j'
+ 10: 1, # 'k'
+ 5: 0, # 'l'
+ 13: 3, # 'm'
+ 4: 2, # 'n'
+ 15: 0, # 'o'
+ 26: 1, # 'p'
+ 7: 0, # 'r'
+ 8: 1, # 's'
+ 9: 1, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 1, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 2, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 1, # 'â'
+ 33: 2, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 2, # 'ı'
+ 40: 2, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 45: { # 'H'
+ 23: 0, # 'A'
+ 37: 1, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 2, # 'F'
+ 36: 2, # 'G'
+ 45: 1, # 'H'
+ 53: 1, # 'I'
+ 60: 0, # 'J'
+ 16: 2, # 'K'
+ 49: 1, # 'L'
+ 20: 0, # 'M'
+ 46: 1, # 'N'
+ 42: 1, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 2, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 2, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 2, # 'i'
+ 24: 0, # 'j'
+ 10: 1, # 'k'
+ 5: 0, # 'l'
+ 13: 2, # 'm'
+ 4: 0, # 'n'
+ 15: 1, # 'o'
+ 26: 1, # 'p'
+ 7: 1, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 1, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 2, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 0, # 'ı'
+ 40: 2, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 53: { # 'I'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 1, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 2, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 2, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 0, # 'k'
+ 5: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 0, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 2, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 0, # 'ı'
+ 40: 1, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 60: { # 'J'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 0, # 'a'
+ 21: 1, # 'b'
+ 28: 0, # 'c'
+ 12: 1, # 'd'
+ 2: 0, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 1, # 'i'
+ 24: 0, # 'j'
+ 10: 0, # 'k'
+ 5: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 1, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 1, # 's'
+ 9: 0, # 't'
+ 14: 0, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 0, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 16: { # 'K'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 3, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 2, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 2, # 'a'
+ 21: 3, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 1, # 'e'
+ 18: 3, # 'f'
+ 27: 3, # 'g'
+ 25: 3, # 'h'
+ 3: 3, # 'i'
+ 24: 2, # 'j'
+ 10: 3, # 'k'
+ 5: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 1, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 0, # 'u'
+ 32: 3, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 2, # 'y'
+ 22: 1, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 2, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 49: { # 'L'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 2, # 'E'
+ 52: 0, # 'F'
+ 36: 1, # 'G'
+ 45: 1, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 2, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 0, # 'Z'
+ 1: 0, # 'a'
+ 21: 3, # 'b'
+ 28: 0, # 'c'
+ 12: 2, # 'd'
+ 2: 0, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 2, # 'i'
+ 24: 0, # 'j'
+ 10: 1, # 'k'
+ 5: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 2, # 'n'
+ 15: 1, # 'o'
+ 26: 1, # 'p'
+ 7: 1, # 'r'
+ 8: 1, # 's'
+ 9: 1, # 't'
+ 14: 0, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 2, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 2, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 1, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 20: { # 'M'
+ 23: 1, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 1, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 1, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 2, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 1, # 'g'
+ 25: 1, # 'h'
+ 3: 2, # 'i'
+ 24: 2, # 'j'
+ 10: 2, # 'k'
+ 5: 2, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 1, # 'p'
+ 7: 3, # 'r'
+ 8: 0, # 's'
+ 9: 2, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 2, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 3, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 46: { # 'N'
+ 23: 0, # 'A'
+ 37: 1, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 1, # 'F'
+ 36: 1, # 'G'
+ 45: 1, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 2, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 1, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 1, # 'R'
+ 35: 1, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 2, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 1, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 2, # 'j'
+ 10: 1, # 'k'
+ 5: 1, # 'l'
+ 13: 3, # 'm'
+ 4: 2, # 'n'
+ 15: 1, # 'o'
+ 26: 1, # 'p'
+ 7: 1, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 1, # 'x'
+ 11: 1, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 1, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 2, # 'ı'
+ 40: 1, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 42: { # 'O'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 1, # 'F'
+ 36: 0, # 'G'
+ 45: 1, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 2, # 'K'
+ 49: 1, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 2, # 'P'
+ 44: 1, # 'R'
+ 35: 1, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 0, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 0, # 'n'
+ 15: 1, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 2, # 'Ç'
+ 50: 1, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 2, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 2, # 'İ'
+ 6: 1, # 'ı'
+ 40: 1, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 48: { # 'P'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 2, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 2, # 'F'
+ 36: 1, # 'G'
+ 45: 1, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 2, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 1, # 'N'
+ 42: 1, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 1, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 2, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 1, # 'k'
+ 5: 0, # 'l'
+ 13: 2, # 'm'
+ 4: 0, # 'n'
+ 15: 2, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 2, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 2, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 2, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 0, # 'ı'
+ 40: 2, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 44: { # 'R'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 1, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 1, # 'b'
+ 28: 1, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 1, # 'k'
+ 5: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 0, # 'n'
+ 15: 1, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 1, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 1, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 1, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 1, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 35: { # 'S'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 1, # 'F'
+ 36: 1, # 'G'
+ 45: 1, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 1, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 1, # 'k'
+ 5: 1, # 'l'
+ 13: 2, # 'm'
+ 4: 1, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 1, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 1, # 'z'
+ 63: 0, # '·'
+ 54: 2, # 'Ç'
+ 50: 2, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 3, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 2, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 31: { # 'T'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 1, # 'J'
+ 16: 2, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 2, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 2, # 'b'
+ 28: 0, # 'c'
+ 12: 1, # 'd'
+ 2: 3, # 'e'
+ 18: 2, # 'f'
+ 27: 2, # 'g'
+ 25: 0, # 'h'
+ 3: 1, # 'i'
+ 24: 1, # 'j'
+ 10: 2, # 'k'
+ 5: 2, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 2, # 'p'
+ 7: 2, # 'r'
+ 8: 0, # 's'
+ 9: 2, # 't'
+ 14: 2, # 'u'
+ 32: 1, # 'v'
+ 57: 1, # 'w'
+ 58: 1, # 'x'
+ 11: 2, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 1, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 51: { # 'U'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 1, # 'F'
+ 36: 1, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 1, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 1, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 1, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 2, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 1, # 'k'
+ 5: 1, # 'l'
+ 13: 3, # 'm'
+ 4: 2, # 'n'
+ 15: 0, # 'o'
+ 26: 1, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 1, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 38: { # 'V'
+ 23: 1, # 'A'
+ 37: 1, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 2, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 1, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 0, # 'k'
+ 5: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 0, # 'n'
+ 15: 2, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 1, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 1, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 1, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 1, # 'â'
+ 33: 2, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 3, # 'ı'
+ 40: 2, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 62: { # 'W'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 0, # 'a'
+ 21: 0, # 'b'
+ 28: 0, # 'c'
+ 12: 0, # 'd'
+ 2: 0, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 0, # 'k'
+ 5: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 0, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 0, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 43: { # 'Y'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 2, # 'F'
+ 36: 0, # 'G'
+ 45: 1, # 'H'
+ 53: 1, # 'I'
+ 60: 0, # 'J'
+ 16: 2, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 2, # 'N'
+ 42: 0, # 'O'
+ 48: 2, # 'P'
+ 44: 1, # 'R'
+ 35: 1, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 2, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 1, # 'j'
+ 10: 1, # 'k'
+ 5: 1, # 'l'
+ 13: 3, # 'm'
+ 4: 0, # 'n'
+ 15: 2, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 1, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 2, # 'Ö'
+ 55: 1, # 'Ü'
+ 59: 1, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 0, # 'ı'
+ 40: 2, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 56: { # 'Z'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 2, # 'Z'
+ 1: 2, # 'a'
+ 21: 1, # 'b'
+ 28: 0, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 2, # 'i'
+ 24: 1, # 'j'
+ 10: 0, # 'k'
+ 5: 0, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 1, # 'r'
+ 8: 1, # 's'
+ 9: 0, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 1, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 1, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 1: { # 'a'
+ 23: 3, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 3, # 'E'
+ 52: 0, # 'F'
+ 36: 1, # 'G'
+ 45: 1, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 1, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 3, # 'T'
+ 51: 0, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 2, # 'Z'
+ 1: 2, # 'a'
+ 21: 3, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 2, # 'e'
+ 18: 3, # 'f'
+ 27: 3, # 'g'
+ 25: 3, # 'h'
+ 3: 3, # 'i'
+ 24: 3, # 'j'
+ 10: 3, # 'k'
+ 5: 0, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 15: 1, # 'o'
+ 26: 3, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 3, # 'v'
+ 57: 2, # 'w'
+ 58: 0, # 'x'
+ 11: 3, # 'y'
+ 22: 0, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 1, # 'î'
+ 34: 1, # 'ö'
+ 17: 3, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 21: { # 'b'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 1, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 1, # 'J'
+ 16: 2, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 1, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 2, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 3, # 'g'
+ 25: 1, # 'h'
+ 3: 3, # 'i'
+ 24: 2, # 'j'
+ 10: 3, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 3, # 'p'
+ 7: 1, # 'r'
+ 8: 2, # 's'
+ 9: 2, # 't'
+ 14: 2, # 'u'
+ 32: 1, # 'v'
+ 57: 0, # 'w'
+ 58: 1, # 'x'
+ 11: 3, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 28: { # 'c'
+ 23: 0, # 'A'
+ 37: 1, # 'B'
+ 47: 1, # 'C'
+ 39: 1, # 'D'
+ 29: 2, # 'E'
+ 52: 0, # 'F'
+ 36: 2, # 'G'
+ 45: 2, # 'H'
+ 53: 1, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 1, # 'N'
+ 42: 1, # 'O'
+ 48: 2, # 'P'
+ 44: 1, # 'R'
+ 35: 1, # 'S'
+ 31: 2, # 'T'
+ 51: 2, # 'U'
+ 38: 2, # 'V'
+ 62: 0, # 'W'
+ 43: 3, # 'Y'
+ 56: 0, # 'Z'
+ 1: 1, # 'a'
+ 21: 1, # 'b'
+ 28: 2, # 'c'
+ 12: 2, # 'd'
+ 2: 1, # 'e'
+ 18: 1, # 'f'
+ 27: 2, # 'g'
+ 25: 2, # 'h'
+ 3: 3, # 'i'
+ 24: 1, # 'j'
+ 10: 3, # 'k'
+ 5: 0, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 15: 2, # 'o'
+ 26: 2, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 1, # 'u'
+ 32: 0, # 'v'
+ 57: 1, # 'w'
+ 58: 0, # 'x'
+ 11: 2, # 'y'
+ 22: 1, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 1, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 1, # 'î'
+ 34: 2, # 'ö'
+ 17: 2, # 'ü'
+ 30: 2, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 2, # 'ş'
+ },
+ 12: { # 'd'
+ 23: 1, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 2, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 1, # 'S'
+ 31: 1, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 2, # 'b'
+ 28: 1, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 1, # 'f'
+ 27: 3, # 'g'
+ 25: 3, # 'h'
+ 3: 2, # 'i'
+ 24: 3, # 'j'
+ 10: 2, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 1, # 'o'
+ 26: 2, # 'p'
+ 7: 3, # 'r'
+ 8: 2, # 's'
+ 9: 2, # 't'
+ 14: 3, # 'u'
+ 32: 1, # 'v'
+ 57: 0, # 'w'
+ 58: 1, # 'x'
+ 11: 3, # 'y'
+ 22: 1, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 1, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 2: { # 'e'
+ 23: 2, # 'A'
+ 37: 0, # 'B'
+ 47: 2, # 'C'
+ 39: 0, # 'D'
+ 29: 3, # 'E'
+ 52: 1, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 1, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 1, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 1, # 'R'
+ 35: 0, # 'S'
+ 31: 3, # 'T'
+ 51: 0, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 3, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 2, # 'e'
+ 18: 3, # 'f'
+ 27: 3, # 'g'
+ 25: 3, # 'h'
+ 3: 3, # 'i'
+ 24: 3, # 'j'
+ 10: 3, # 'k'
+ 5: 0, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 15: 1, # 'o'
+ 26: 3, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 3, # 'v'
+ 57: 2, # 'w'
+ 58: 0, # 'x'
+ 11: 3, # 'y'
+ 22: 1, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 3, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 18: { # 'f'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 2, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 2, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 1, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 2, # 'f'
+ 27: 1, # 'g'
+ 25: 1, # 'h'
+ 3: 1, # 'i'
+ 24: 1, # 'j'
+ 10: 1, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 2, # 'p'
+ 7: 1, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 1, # 'u'
+ 32: 2, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 1, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 1, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 1, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 27: { # 'g'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 1, # 'S'
+ 31: 1, # 'T'
+ 51: 0, # 'U'
+ 38: 2, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 1, # 'b'
+ 28: 0, # 'c'
+ 12: 1, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 2, # 'g'
+ 25: 1, # 'h'
+ 3: 2, # 'i'
+ 24: 3, # 'j'
+ 10: 2, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 2, # 'n'
+ 15: 0, # 'o'
+ 26: 1, # 'p'
+ 7: 2, # 'r'
+ 8: 2, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 1, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 1, # 'y'
+ 22: 0, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 25: { # 'h'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 2, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 0, # 'c'
+ 12: 2, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 1, # 'g'
+ 25: 2, # 'h'
+ 3: 2, # 'i'
+ 24: 3, # 'j'
+ 10: 3, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 1, # 'o'
+ 26: 1, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 2, # 't'
+ 14: 3, # 'u'
+ 32: 2, # 'v'
+ 57: 1, # 'w'
+ 58: 0, # 'x'
+ 11: 1, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 3: { # 'i'
+ 23: 2, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 1, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 0, # 'N'
+ 42: 1, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 1, # 'S'
+ 31: 2, # 'T'
+ 51: 0, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 2, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 2, # 'f'
+ 27: 3, # 'g'
+ 25: 1, # 'h'
+ 3: 3, # 'i'
+ 24: 2, # 'j'
+ 10: 3, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 1, # 'o'
+ 26: 3, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 2, # 'v'
+ 57: 1, # 'w'
+ 58: 1, # 'x'
+ 11: 3, # 'y'
+ 22: 1, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 1, # 'Ü'
+ 59: 0, # 'â'
+ 33: 2, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 3, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 24: { # 'j'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 1, # 'J'
+ 16: 2, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 1, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 1, # 'Z'
+ 1: 3, # 'a'
+ 21: 1, # 'b'
+ 28: 1, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 2, # 'f'
+ 27: 1, # 'g'
+ 25: 1, # 'h'
+ 3: 2, # 'i'
+ 24: 1, # 'j'
+ 10: 2, # 'k'
+ 5: 2, # 'l'
+ 13: 3, # 'm'
+ 4: 2, # 'n'
+ 15: 0, # 'o'
+ 26: 1, # 'p'
+ 7: 2, # 'r'
+ 8: 3, # 's'
+ 9: 2, # 't'
+ 14: 3, # 'u'
+ 32: 2, # 'v'
+ 57: 0, # 'w'
+ 58: 2, # 'x'
+ 11: 1, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 1, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 10: { # 'k'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 3, # 'T'
+ 51: 0, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 1, # 'Z'
+ 1: 3, # 'a'
+ 21: 2, # 'b'
+ 28: 0, # 'c'
+ 12: 2, # 'd'
+ 2: 3, # 'e'
+ 18: 1, # 'f'
+ 27: 2, # 'g'
+ 25: 2, # 'h'
+ 3: 3, # 'i'
+ 24: 2, # 'j'
+ 10: 2, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 3, # 'p'
+ 7: 2, # 'r'
+ 8: 2, # 's'
+ 9: 2, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 1, # 'x'
+ 11: 3, # 'y'
+ 22: 0, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 3, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 3, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 5: { # 'l'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 3, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 1, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 0, # 'a'
+ 21: 3, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 1, # 'e'
+ 18: 3, # 'f'
+ 27: 3, # 'g'
+ 25: 2, # 'h'
+ 3: 3, # 'i'
+ 24: 2, # 'j'
+ 10: 3, # 'k'
+ 5: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 2, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 2, # 'u'
+ 32: 2, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 3, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 2, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 13: { # 'm'
+ 23: 1, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 3, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 3, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 0, # 'Z'
+ 1: 2, # 'a'
+ 21: 3, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 2, # 'e'
+ 18: 3, # 'f'
+ 27: 3, # 'g'
+ 25: 3, # 'h'
+ 3: 3, # 'i'
+ 24: 3, # 'j'
+ 10: 3, # 'k'
+ 5: 0, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 15: 1, # 'o'
+ 26: 2, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 2, # 'u'
+ 32: 2, # 'v'
+ 57: 1, # 'w'
+ 58: 0, # 'x'
+ 11: 3, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 3, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 4: { # 'n'
+ 23: 1, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 1, # 'H'
+ 53: 0, # 'I'
+ 60: 2, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 2, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 2, # 'b'
+ 28: 1, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 1, # 'f'
+ 27: 2, # 'g'
+ 25: 3, # 'h'
+ 3: 2, # 'i'
+ 24: 2, # 'j'
+ 10: 3, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 1, # 'o'
+ 26: 3, # 'p'
+ 7: 2, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 2, # 'v'
+ 57: 0, # 'w'
+ 58: 2, # 'x'
+ 11: 3, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 2, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 1, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 15: { # 'o'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 2, # 'F'
+ 36: 1, # 'G'
+ 45: 1, # 'H'
+ 53: 1, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 2, # 'L'
+ 20: 0, # 'M'
+ 46: 2, # 'N'
+ 42: 1, # 'O'
+ 48: 2, # 'P'
+ 44: 1, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 1, # 'i'
+ 24: 2, # 'j'
+ 10: 1, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 2, # 'n'
+ 15: 2, # 'o'
+ 26: 0, # 'p'
+ 7: 1, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 2, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 2, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 3, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 2, # 'ğ'
+ 41: 2, # 'İ'
+ 6: 3, # 'ı'
+ 40: 2, # 'Ş'
+ 19: 2, # 'ş'
+ },
+ 26: { # 'p'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 1, # 'b'
+ 28: 0, # 'c'
+ 12: 1, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 1, # 'g'
+ 25: 1, # 'h'
+ 3: 2, # 'i'
+ 24: 3, # 'j'
+ 10: 1, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 2, # 'n'
+ 15: 0, # 'o'
+ 26: 2, # 'p'
+ 7: 2, # 'r'
+ 8: 1, # 's'
+ 9: 1, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 1, # 'x'
+ 11: 1, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 3, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 1, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 7: { # 'r'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 1, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 2, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 2, # 'T'
+ 51: 1, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 1, # 'Z'
+ 1: 3, # 'a'
+ 21: 1, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 2, # 'g'
+ 25: 3, # 'h'
+ 3: 2, # 'i'
+ 24: 2, # 'j'
+ 10: 3, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 2, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 2, # 'v'
+ 57: 0, # 'w'
+ 58: 1, # 'x'
+ 11: 2, # 'y'
+ 22: 0, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 2, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 3, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 8: { # 's'
+ 23: 1, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 1, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 2, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 1, # 'Z'
+ 1: 3, # 'a'
+ 21: 2, # 'b'
+ 28: 1, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 2, # 'g'
+ 25: 2, # 'h'
+ 3: 2, # 'i'
+ 24: 3, # 'j'
+ 10: 3, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 3, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 2, # 'v'
+ 57: 0, # 'w'
+ 58: 1, # 'x'
+ 11: 2, # 'y'
+ 22: 1, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 2, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 2, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 9: { # 't'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 1, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 2, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 1, # 'Z'
+ 1: 3, # 'a'
+ 21: 3, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 2, # 'f'
+ 27: 2, # 'g'
+ 25: 2, # 'h'
+ 3: 2, # 'i'
+ 24: 2, # 'j'
+ 10: 3, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 2, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 3, # 'v'
+ 57: 0, # 'w'
+ 58: 2, # 'x'
+ 11: 2, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 3, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 2, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 14: { # 'u'
+ 23: 3, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 3, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 1, # 'H'
+ 53: 0, # 'I'
+ 60: 1, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 2, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 3, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 2, # 'Z'
+ 1: 2, # 'a'
+ 21: 3, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 2, # 'e'
+ 18: 2, # 'f'
+ 27: 3, # 'g'
+ 25: 3, # 'h'
+ 3: 3, # 'i'
+ 24: 2, # 'j'
+ 10: 3, # 'k'
+ 5: 0, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 3, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 2, # 'v'
+ 57: 2, # 'w'
+ 58: 0, # 'x'
+ 11: 3, # 'y'
+ 22: 0, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 3, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 32: { # 'v'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 1, # 'j'
+ 10: 1, # 'k'
+ 5: 3, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 1, # 'p'
+ 7: 1, # 'r'
+ 8: 2, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 1, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 2, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 1, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 57: { # 'w'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 1, # 'a'
+ 21: 0, # 'b'
+ 28: 0, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 1, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 1, # 'k'
+ 5: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 1, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 1, # 's'
+ 9: 0, # 't'
+ 14: 1, # 'u'
+ 32: 0, # 'v'
+ 57: 2, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 0, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 1, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 0, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 58: { # 'x'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 1, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 1, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 0, # 'a'
+ 21: 1, # 'b'
+ 28: 0, # 'c'
+ 12: 2, # 'd'
+ 2: 1, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 2, # 'i'
+ 24: 2, # 'j'
+ 10: 1, # 'k'
+ 5: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 2, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 1, # 'r'
+ 8: 2, # 's'
+ 9: 1, # 't'
+ 14: 0, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 2, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 1, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 11: { # 'y'
+ 23: 1, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 1, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 1, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 1, # 'Z'
+ 1: 3, # 'a'
+ 21: 1, # 'b'
+ 28: 0, # 'c'
+ 12: 2, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 2, # 'g'
+ 25: 2, # 'h'
+ 3: 2, # 'i'
+ 24: 1, # 'j'
+ 10: 2, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 1, # 'p'
+ 7: 2, # 'r'
+ 8: 1, # 's'
+ 9: 2, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 1, # 'x'
+ 11: 3, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 3, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 2, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 22: { # 'z'
+ 23: 2, # 'A'
+ 37: 2, # 'B'
+ 47: 1, # 'C'
+ 39: 2, # 'D'
+ 29: 3, # 'E'
+ 52: 1, # 'F'
+ 36: 2, # 'G'
+ 45: 2, # 'H'
+ 53: 1, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 2, # 'N'
+ 42: 2, # 'O'
+ 48: 2, # 'P'
+ 44: 1, # 'R'
+ 35: 1, # 'S'
+ 31: 3, # 'T'
+ 51: 2, # 'U'
+ 38: 2, # 'V'
+ 62: 0, # 'W'
+ 43: 2, # 'Y'
+ 56: 1, # 'Z'
+ 1: 1, # 'a'
+ 21: 2, # 'b'
+ 28: 1, # 'c'
+ 12: 2, # 'd'
+ 2: 2, # 'e'
+ 18: 3, # 'f'
+ 27: 2, # 'g'
+ 25: 2, # 'h'
+ 3: 3, # 'i'
+ 24: 2, # 'j'
+ 10: 3, # 'k'
+ 5: 0, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 15: 2, # 'o'
+ 26: 2, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 0, # 'u'
+ 32: 2, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 3, # 'y'
+ 22: 2, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 2, # 'Ü'
+ 59: 1, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 2, # 'ö'
+ 17: 2, # 'ü'
+ 30: 2, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 3, # 'ı'
+ 40: 1, # 'Ş'
+ 19: 2, # 'ş'
+ },
+ 63: { # '·'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 0, # 'a'
+ 21: 0, # 'b'
+ 28: 0, # 'c'
+ 12: 0, # 'd'
+ 2: 1, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 0, # 'k'
+ 5: 0, # 'l'
+ 13: 2, # 'm'
+ 4: 0, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 0, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 54: { # 'Ç'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 1, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 1, # 'G'
+ 45: 1, # 'H'
+ 53: 1, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 1, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 2, # 'Y'
+ 56: 0, # 'Z'
+ 1: 0, # 'a'
+ 21: 1, # 'b'
+ 28: 0, # 'c'
+ 12: 1, # 'd'
+ 2: 0, # 'e'
+ 18: 0, # 'f'
+ 27: 1, # 'g'
+ 25: 0, # 'h'
+ 3: 3, # 'i'
+ 24: 0, # 'j'
+ 10: 1, # 'k'
+ 5: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 2, # 'n'
+ 15: 1, # 'o'
+ 26: 0, # 'p'
+ 7: 2, # 'r'
+ 8: 0, # 's'
+ 9: 1, # 't'
+ 14: 0, # 'u'
+ 32: 2, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 2, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 50: { # 'Ö'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 1, # 'D'
+ 29: 2, # 'E'
+ 52: 0, # 'F'
+ 36: 1, # 'G'
+ 45: 2, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 1, # 'N'
+ 42: 2, # 'O'
+ 48: 2, # 'P'
+ 44: 1, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 2, # 'Y'
+ 56: 0, # 'Z'
+ 1: 0, # 'a'
+ 21: 2, # 'b'
+ 28: 1, # 'c'
+ 12: 2, # 'd'
+ 2: 0, # 'e'
+ 18: 1, # 'f'
+ 27: 1, # 'g'
+ 25: 1, # 'h'
+ 3: 2, # 'i'
+ 24: 0, # 'j'
+ 10: 2, # 'k'
+ 5: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 3, # 'n'
+ 15: 2, # 'o'
+ 26: 2, # 'p'
+ 7: 3, # 'r'
+ 8: 1, # 's'
+ 9: 2, # 't'
+ 14: 0, # 'u'
+ 32: 1, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 1, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 2, # 'ö'
+ 17: 2, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 55: { # 'Ü'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 2, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 1, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 2, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 1, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 0, # 'k'
+ 5: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 1, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 1, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 1, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 0, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 59: { # 'â'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 1, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 1, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 2, # 'a'
+ 21: 0, # 'b'
+ 28: 0, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 0, # 'k'
+ 5: 0, # 'l'
+ 13: 2, # 'm'
+ 4: 0, # 'n'
+ 15: 1, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 1, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 1, # 'ı'
+ 40: 1, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 33: { # 'ç'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 3, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 2, # 'T'
+ 51: 0, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 0, # 'a'
+ 21: 3, # 'b'
+ 28: 0, # 'c'
+ 12: 2, # 'd'
+ 2: 0, # 'e'
+ 18: 2, # 'f'
+ 27: 1, # 'g'
+ 25: 3, # 'h'
+ 3: 3, # 'i'
+ 24: 0, # 'j'
+ 10: 3, # 'k'
+ 5: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 1, # 'p'
+ 7: 3, # 'r'
+ 8: 2, # 's'
+ 9: 3, # 't'
+ 14: 0, # 'u'
+ 32: 2, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 2, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 1, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 61: { # 'î'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 1, # 'Z'
+ 1: 2, # 'a'
+ 21: 0, # 'b'
+ 28: 0, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 1, # 'j'
+ 10: 0, # 'k'
+ 5: 0, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 1, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 1, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 1, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 1, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 34: { # 'ö'
+ 23: 0, # 'A'
+ 37: 1, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 2, # 'F'
+ 36: 1, # 'G'
+ 45: 1, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 1, # 'L'
+ 20: 0, # 'M'
+ 46: 1, # 'N'
+ 42: 1, # 'O'
+ 48: 2, # 'P'
+ 44: 1, # 'R'
+ 35: 1, # 'S'
+ 31: 1, # 'T'
+ 51: 1, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 1, # 'Z'
+ 1: 3, # 'a'
+ 21: 1, # 'b'
+ 28: 2, # 'c'
+ 12: 1, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 2, # 'g'
+ 25: 2, # 'h'
+ 3: 1, # 'i'
+ 24: 2, # 'j'
+ 10: 1, # 'k'
+ 5: 2, # 'l'
+ 13: 3, # 'm'
+ 4: 2, # 'n'
+ 15: 2, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 3, # 's'
+ 9: 1, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 1, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 2, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 2, # 'ç'
+ 61: 0, # 'î'
+ 34: 2, # 'ö'
+ 17: 0, # 'ü'
+ 30: 2, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 1, # 'ı'
+ 40: 2, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 17: { # 'ü'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 1, # 'J'
+ 16: 1, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 1, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 1, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 0, # 'c'
+ 12: 1, # 'd'
+ 2: 3, # 'e'
+ 18: 1, # 'f'
+ 27: 2, # 'g'
+ 25: 0, # 'h'
+ 3: 1, # 'i'
+ 24: 1, # 'j'
+ 10: 2, # 'k'
+ 5: 3, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 2, # 'p'
+ 7: 2, # 'r'
+ 8: 3, # 's'
+ 9: 2, # 't'
+ 14: 3, # 'u'
+ 32: 1, # 'v'
+ 57: 1, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 2, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 30: { # 'ğ'
+ 23: 0, # 'A'
+ 37: 2, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 2, # 'F'
+ 36: 1, # 'G'
+ 45: 0, # 'H'
+ 53: 1, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 2, # 'N'
+ 42: 2, # 'O'
+ 48: 1, # 'P'
+ 44: 1, # 'R'
+ 35: 0, # 'S'
+ 31: 1, # 'T'
+ 51: 0, # 'U'
+ 38: 2, # 'V'
+ 62: 0, # 'W'
+ 43: 2, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 3, # 'j'
+ 10: 1, # 'k'
+ 5: 2, # 'l'
+ 13: 3, # 'm'
+ 4: 0, # 'n'
+ 15: 1, # 'o'
+ 26: 0, # 'p'
+ 7: 1, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 2, # 'Ç'
+ 50: 2, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 2, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 2, # 'İ'
+ 6: 2, # 'ı'
+ 40: 2, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 41: { # 'İ'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 1, # 'D'
+ 29: 1, # 'E'
+ 52: 0, # 'F'
+ 36: 2, # 'G'
+ 45: 2, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 1, # 'N'
+ 42: 1, # 'O'
+ 48: 2, # 'P'
+ 44: 0, # 'R'
+ 35: 1, # 'S'
+ 31: 1, # 'T'
+ 51: 1, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 2, # 'Y'
+ 56: 0, # 'Z'
+ 1: 1, # 'a'
+ 21: 2, # 'b'
+ 28: 1, # 'c'
+ 12: 2, # 'd'
+ 2: 1, # 'e'
+ 18: 0, # 'f'
+ 27: 3, # 'g'
+ 25: 2, # 'h'
+ 3: 2, # 'i'
+ 24: 2, # 'j'
+ 10: 2, # 'k'
+ 5: 0, # 'l'
+ 13: 1, # 'm'
+ 4: 3, # 'n'
+ 15: 1, # 'o'
+ 26: 1, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 2, # 't'
+ 14: 0, # 'u'
+ 32: 0, # 'v'
+ 57: 1, # 'w'
+ 58: 0, # 'x'
+ 11: 2, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 1, # 'Ü'
+ 59: 1, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 1, # 'ü'
+ 30: 2, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 6: { # 'ı'
+ 23: 2, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 1, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 2, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 1, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 2, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 2, # 'Y'
+ 56: 1, # 'Z'
+ 1: 3, # 'a'
+ 21: 2, # 'b'
+ 28: 1, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 3, # 'f'
+ 27: 3, # 'g'
+ 25: 2, # 'h'
+ 3: 3, # 'i'
+ 24: 3, # 'j'
+ 10: 3, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 3, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 3, # 'v'
+ 57: 1, # 'w'
+ 58: 1, # 'x'
+ 11: 3, # 'y'
+ 22: 0, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 2, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 3, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 40: { # 'Ş'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 1, # 'D'
+ 29: 1, # 'E'
+ 52: 0, # 'F'
+ 36: 1, # 'G'
+ 45: 2, # 'H'
+ 53: 1, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 1, # 'N'
+ 42: 1, # 'O'
+ 48: 2, # 'P'
+ 44: 2, # 'R'
+ 35: 1, # 'S'
+ 31: 1, # 'T'
+ 51: 0, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 2, # 'Y'
+ 56: 1, # 'Z'
+ 1: 0, # 'a'
+ 21: 2, # 'b'
+ 28: 0, # 'c'
+ 12: 2, # 'd'
+ 2: 0, # 'e'
+ 18: 3, # 'f'
+ 27: 0, # 'g'
+ 25: 2, # 'h'
+ 3: 3, # 'i'
+ 24: 2, # 'j'
+ 10: 1, # 'k'
+ 5: 0, # 'l'
+ 13: 1, # 'm'
+ 4: 3, # 'n'
+ 15: 2, # 'o'
+ 26: 0, # 'p'
+ 7: 3, # 'r'
+ 8: 2, # 's'
+ 9: 2, # 't'
+ 14: 1, # 'u'
+ 32: 3, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 2, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 1, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 2, # 'ö'
+ 17: 1, # 'ü'
+ 30: 2, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 1, # 'Ş'
+ 19: 2, # 'ş'
+ },
+ 19: { # 'ş'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 2, # 'F'
+ 36: 1, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 2, # 'L'
+ 20: 0, # 'M'
+ 46: 1, # 'N'
+ 42: 1, # 'O'
+ 48: 1, # 'P'
+ 44: 1, # 'R'
+ 35: 1, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 1, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 2, # 'g'
+ 25: 1, # 'h'
+ 3: 1, # 'i'
+ 24: 0, # 'j'
+ 10: 2, # 'k'
+ 5: 2, # 'l'
+ 13: 3, # 'm'
+ 4: 0, # 'n'
+ 15: 0, # 'o'
+ 26: 1, # 'p'
+ 7: 3, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 2, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 1, # 'î'
+ 34: 2, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 1, # 'ı'
+ 40: 1, # 'Ş'
+ 19: 1, # 'ş'
+ },
+}
+
+# 255: Undefined characters that did not exist in training text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+# 251: Control characters
+
+# Character Mapping Table(s):
+ISO_8859_9_TURKISH_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 255, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 255, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 255, # ' '
+ 33: 255, # '!'
+ 34: 255, # '"'
+ 35: 255, # '#'
+ 36: 255, # '$'
+ 37: 255, # '%'
+ 38: 255, # '&'
+ 39: 255, # "'"
+ 40: 255, # '('
+ 41: 255, # ')'
+ 42: 255, # '*'
+ 43: 255, # '+'
+ 44: 255, # ','
+ 45: 255, # '-'
+ 46: 255, # '.'
+ 47: 255, # '/'
+ 48: 255, # '0'
+ 49: 255, # '1'
+ 50: 255, # '2'
+ 51: 255, # '3'
+ 52: 255, # '4'
+ 53: 255, # '5'
+ 54: 255, # '6'
+ 55: 255, # '7'
+ 56: 255, # '8'
+ 57: 255, # '9'
+ 58: 255, # ':'
+ 59: 255, # ';'
+ 60: 255, # '<'
+ 61: 255, # '='
+ 62: 255, # '>'
+ 63: 255, # '?'
+ 64: 255, # '@'
+ 65: 23, # 'A'
+ 66: 37, # 'B'
+ 67: 47, # 'C'
+ 68: 39, # 'D'
+ 69: 29, # 'E'
+ 70: 52, # 'F'
+ 71: 36, # 'G'
+ 72: 45, # 'H'
+ 73: 53, # 'I'
+ 74: 60, # 'J'
+ 75: 16, # 'K'
+ 76: 49, # 'L'
+ 77: 20, # 'M'
+ 78: 46, # 'N'
+ 79: 42, # 'O'
+ 80: 48, # 'P'
+ 81: 69, # 'Q'
+ 82: 44, # 'R'
+ 83: 35, # 'S'
+ 84: 31, # 'T'
+ 85: 51, # 'U'
+ 86: 38, # 'V'
+ 87: 62, # 'W'
+ 88: 65, # 'X'
+ 89: 43, # 'Y'
+ 90: 56, # 'Z'
+ 91: 255, # '['
+ 92: 255, # '\\'
+ 93: 255, # ']'
+ 94: 255, # '^'
+ 95: 255, # '_'
+ 96: 255, # '`'
+ 97: 1, # 'a'
+ 98: 21, # 'b'
+ 99: 28, # 'c'
+ 100: 12, # 'd'
+ 101: 2, # 'e'
+ 102: 18, # 'f'
+ 103: 27, # 'g'
+ 104: 25, # 'h'
+ 105: 3, # 'i'
+ 106: 24, # 'j'
+ 107: 10, # 'k'
+ 108: 5, # 'l'
+ 109: 13, # 'm'
+ 110: 4, # 'n'
+ 111: 15, # 'o'
+ 112: 26, # 'p'
+ 113: 64, # 'q'
+ 114: 7, # 'r'
+ 115: 8, # 's'
+ 116: 9, # 't'
+ 117: 14, # 'u'
+ 118: 32, # 'v'
+ 119: 57, # 'w'
+ 120: 58, # 'x'
+ 121: 11, # 'y'
+ 122: 22, # 'z'
+ 123: 255, # '{'
+ 124: 255, # '|'
+ 125: 255, # '}'
+ 126: 255, # '~'
+ 127: 255, # '\x7f'
+ 128: 180, # '\x80'
+ 129: 179, # '\x81'
+ 130: 178, # '\x82'
+ 131: 177, # '\x83'
+ 132: 176, # '\x84'
+ 133: 175, # '\x85'
+ 134: 174, # '\x86'
+ 135: 173, # '\x87'
+ 136: 172, # '\x88'
+ 137: 171, # '\x89'
+ 138: 170, # '\x8a'
+ 139: 169, # '\x8b'
+ 140: 168, # '\x8c'
+ 141: 167, # '\x8d'
+ 142: 166, # '\x8e'
+ 143: 165, # '\x8f'
+ 144: 164, # '\x90'
+ 145: 163, # '\x91'
+ 146: 162, # '\x92'
+ 147: 161, # '\x93'
+ 148: 160, # '\x94'
+ 149: 159, # '\x95'
+ 150: 101, # '\x96'
+ 151: 158, # '\x97'
+ 152: 157, # '\x98'
+ 153: 156, # '\x99'
+ 154: 155, # '\x9a'
+ 155: 154, # '\x9b'
+ 156: 153, # '\x9c'
+ 157: 152, # '\x9d'
+ 158: 151, # '\x9e'
+ 159: 106, # '\x9f'
+ 160: 150, # '\xa0'
+ 161: 149, # '¡'
+ 162: 148, # '¢'
+ 163: 147, # '£'
+ 164: 146, # '¤'
+ 165: 145, # '¥'
+ 166: 144, # '¦'
+ 167: 100, # '§'
+ 168: 143, # '¨'
+ 169: 142, # '©'
+ 170: 141, # 'ª'
+ 171: 140, # '«'
+ 172: 139, # '¬'
+ 173: 138, # '\xad'
+ 174: 137, # '®'
+ 175: 136, # '¯'
+ 176: 94, # '°'
+ 177: 80, # '±'
+ 178: 93, # '²'
+ 179: 135, # '³'
+ 180: 105, # '´'
+ 181: 134, # 'µ'
+ 182: 133, # '¶'
+ 183: 63, # '·'
+ 184: 132, # '¸'
+ 185: 131, # '¹'
+ 186: 130, # 'º'
+ 187: 129, # '»'
+ 188: 128, # '¼'
+ 189: 127, # '½'
+ 190: 126, # '¾'
+ 191: 125, # '¿'
+ 192: 124, # 'À'
+ 193: 104, # 'Á'
+ 194: 73, # 'Â'
+ 195: 99, # 'Ã'
+ 196: 79, # 'Ä'
+ 197: 85, # 'Å'
+ 198: 123, # 'Æ'
+ 199: 54, # 'Ç'
+ 200: 122, # 'È'
+ 201: 98, # 'É'
+ 202: 92, # 'Ê'
+ 203: 121, # 'Ë'
+ 204: 120, # 'Ì'
+ 205: 91, # 'Í'
+ 206: 103, # 'Î'
+ 207: 119, # 'Ï'
+ 208: 68, # 'Ğ'
+ 209: 118, # 'Ñ'
+ 210: 117, # 'Ò'
+ 211: 97, # 'Ó'
+ 212: 116, # 'Ô'
+ 213: 115, # 'Õ'
+ 214: 50, # 'Ö'
+ 215: 90, # '×'
+ 216: 114, # 'Ø'
+ 217: 113, # 'Ù'
+ 218: 112, # 'Ú'
+ 219: 111, # 'Û'
+ 220: 55, # 'Ü'
+ 221: 41, # 'İ'
+ 222: 40, # 'Ş'
+ 223: 86, # 'ß'
+ 224: 89, # 'à'
+ 225: 70, # 'á'
+ 226: 59, # 'â'
+ 227: 78, # 'ã'
+ 228: 71, # 'ä'
+ 229: 82, # 'å'
+ 230: 88, # 'æ'
+ 231: 33, # 'ç'
+ 232: 77, # 'è'
+ 233: 66, # 'é'
+ 234: 84, # 'ê'
+ 235: 83, # 'ë'
+ 236: 110, # 'ì'
+ 237: 75, # 'í'
+ 238: 61, # 'î'
+ 239: 96, # 'ï'
+ 240: 30, # 'ğ'
+ 241: 67, # 'ñ'
+ 242: 109, # 'ò'
+ 243: 74, # 'ó'
+ 244: 87, # 'ô'
+ 245: 102, # 'õ'
+ 246: 34, # 'ö'
+ 247: 95, # '÷'
+ 248: 81, # 'ø'
+ 249: 108, # 'ù'
+ 250: 76, # 'ú'
+ 251: 72, # 'û'
+ 252: 17, # 'ü'
+ 253: 6, # 'ı'
+ 254: 19, # 'ş'
+ 255: 107, # 'ÿ'
+}
+
+ISO_8859_9_TURKISH_MODEL = SingleByteCharSetModel(charset_name='ISO-8859-9',
+ language='Turkish',
+ char_to_order_map=ISO_8859_9_TURKISH_CHAR_TO_ORDER,
+ language_model=TURKISH_LANG_MODEL,
+ typical_positive_ratio=0.97029,
+ keep_ascii_letters=True,
+ alphabet='ABCDEFGHIJKLMNOPRSTUVYZabcdefghijklmnoprstuvyzÂÇÎÖÛÜâçîöûüĞğİıŞş')
+
diff --git a/third_party/python/chardet/chardet/latin1prober.py b/third_party/python/chardet/chardet/latin1prober.py
new file mode 100644
index 0000000000..7d1e8c20fb
--- /dev/null
+++ b/third_party/python/chardet/chardet/latin1prober.py
@@ -0,0 +1,145 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetprober import CharSetProber
+from .enums import ProbingState
+
+FREQ_CAT_NUM = 4
+
+UDF = 0 # undefined
+OTH = 1 # other
+ASC = 2 # ascii capital letter
+ASS = 3 # ascii small letter
+ACV = 4 # accent capital vowel
+ACO = 5 # accent capital other
+ASV = 6 # accent small vowel
+ASO = 7 # accent small other
+CLASS_NUM = 8 # total classes
+
+Latin1_CharToClass = (
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
+ OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
+ ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
+ ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
+ ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
+ OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
+ ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
+ ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
+ ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
+ OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
+ OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
+ UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
+ OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
+ ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
+ ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
+ ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
+ ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
+ ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
+ ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
+ ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
+ ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
+)
+
+# 0 : illegal
+# 1 : very unlikely
+# 2 : normal
+# 3 : very likely
+Latin1ClassModel = (
+# UDF OTH ASC ASS ACV ACO ASV ASO
+ 0, 0, 0, 0, 0, 0, 0, 0, # UDF
+ 0, 3, 3, 3, 3, 3, 3, 3, # OTH
+ 0, 3, 3, 3, 3, 3, 3, 3, # ASC
+ 0, 3, 3, 3, 1, 1, 3, 3, # ASS
+ 0, 3, 3, 3, 1, 2, 1, 2, # ACV
+ 0, 3, 3, 3, 3, 3, 3, 3, # ACO
+ 0, 3, 1, 3, 1, 1, 1, 3, # ASV
+ 0, 3, 1, 3, 1, 1, 3, 3, # ASO
+)
+
+
+class Latin1Prober(CharSetProber):
+ def __init__(self):
+ super(Latin1Prober, self).__init__()
+ self._last_char_class = None
+ self._freq_counter = None
+ self.reset()
+
+ def reset(self):
+ self._last_char_class = OTH
+ self._freq_counter = [0] * FREQ_CAT_NUM
+ CharSetProber.reset(self)
+
+ @property
+ def charset_name(self):
+ return "ISO-8859-1"
+
+ @property
+ def language(self):
+ return ""
+
+ def feed(self, byte_str):
+ byte_str = self.filter_with_english_letters(byte_str)
+ for c in byte_str:
+ char_class = Latin1_CharToClass[c]
+ freq = Latin1ClassModel[(self._last_char_class * CLASS_NUM)
+ + char_class]
+ if freq == 0:
+ self._state = ProbingState.NOT_ME
+ break
+ self._freq_counter[freq] += 1
+ self._last_char_class = char_class
+
+ return self.state
+
+ def get_confidence(self):
+ if self.state == ProbingState.NOT_ME:
+ return 0.01
+
+ total = sum(self._freq_counter)
+ if total < 0.01:
+ confidence = 0.0
+ else:
+ confidence = ((self._freq_counter[3] - self._freq_counter[1] * 20.0)
+ / total)
+ if confidence < 0.0:
+ confidence = 0.0
+ # lower the confidence of latin1 so that other more accurate
+ # detector can take priority.
+ confidence = confidence * 0.73
+ return confidence
diff --git a/third_party/python/chardet/chardet/mbcharsetprober.py b/third_party/python/chardet/chardet/mbcharsetprober.py
new file mode 100644
index 0000000000..6256ecfd1e
--- /dev/null
+++ b/third_party/python/chardet/chardet/mbcharsetprober.py
@@ -0,0 +1,91 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+# Proofpoint, Inc.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetprober import CharSetProber
+from .enums import ProbingState, MachineState
+
+
+class MultiByteCharSetProber(CharSetProber):
+ """
+ MultiByteCharSetProber
+ """
+
+ def __init__(self, lang_filter=None):
+ super(MultiByteCharSetProber, self).__init__(lang_filter=lang_filter)
+ self.distribution_analyzer = None
+ self.coding_sm = None
+ self._last_char = [0, 0]
+
+ def reset(self):
+ super(MultiByteCharSetProber, self).reset()
+ if self.coding_sm:
+ self.coding_sm.reset()
+ if self.distribution_analyzer:
+ self.distribution_analyzer.reset()
+ self._last_char = [0, 0]
+
+ @property
+ def charset_name(self):
+ raise NotImplementedError
+
+ @property
+ def language(self):
+ raise NotImplementedError
+
+ def feed(self, byte_str):
+ for i in range(len(byte_str)):
+ coding_state = self.coding_sm.next_state(byte_str[i])
+ if coding_state == MachineState.ERROR:
+ self.logger.debug('%s %s prober hit error at byte %s',
+ self.charset_name, self.language, i)
+ self._state = ProbingState.NOT_ME
+ break
+ elif coding_state == MachineState.ITS_ME:
+ self._state = ProbingState.FOUND_IT
+ break
+ elif coding_state == MachineState.START:
+ char_len = self.coding_sm.get_current_charlen()
+ if i == 0:
+ self._last_char[1] = byte_str[0]
+ self.distribution_analyzer.feed(self._last_char, char_len)
+ else:
+ self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
+ char_len)
+
+ self._last_char[0] = byte_str[-1]
+
+ if self.state == ProbingState.DETECTING:
+ if (self.distribution_analyzer.got_enough_data() and
+ (self.get_confidence() > self.SHORTCUT_THRESHOLD)):
+ self._state = ProbingState.FOUND_IT
+
+ return self.state
+
+ def get_confidence(self):
+ return self.distribution_analyzer.get_confidence()
diff --git a/third_party/python/chardet/chardet/mbcsgroupprober.py b/third_party/python/chardet/chardet/mbcsgroupprober.py
new file mode 100644
index 0000000000..530abe75e0
--- /dev/null
+++ b/third_party/python/chardet/chardet/mbcsgroupprober.py
@@ -0,0 +1,54 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+# Proofpoint, Inc.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetgroupprober import CharSetGroupProber
+from .utf8prober import UTF8Prober
+from .sjisprober import SJISProber
+from .eucjpprober import EUCJPProber
+from .gb2312prober import GB2312Prober
+from .euckrprober import EUCKRProber
+from .cp949prober import CP949Prober
+from .big5prober import Big5Prober
+from .euctwprober import EUCTWProber
+
+
+class MBCSGroupProber(CharSetGroupProber):
+ def __init__(self, lang_filter=None):
+ super(MBCSGroupProber, self).__init__(lang_filter=lang_filter)
+ self.probers = [
+ UTF8Prober(),
+ SJISProber(),
+ EUCJPProber(),
+ GB2312Prober(),
+ EUCKRProber(),
+ CP949Prober(),
+ Big5Prober(),
+ EUCTWProber()
+ ]
+ self.reset()
diff --git a/third_party/python/chardet/chardet/mbcssm.py b/third_party/python/chardet/chardet/mbcssm.py
new file mode 100644
index 0000000000..8360d0f284
--- /dev/null
+++ b/third_party/python/chardet/chardet/mbcssm.py
@@ -0,0 +1,572 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .enums import MachineState
+
+# BIG5
+
+BIG5_CLS = (
+ 1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
+ 1,1,1,1,1,1,0,0, # 08 - 0f
+ 1,1,1,1,1,1,1,1, # 10 - 17
+ 1,1,1,0,1,1,1,1, # 18 - 1f
+ 1,1,1,1,1,1,1,1, # 20 - 27
+ 1,1,1,1,1,1,1,1, # 28 - 2f
+ 1,1,1,1,1,1,1,1, # 30 - 37
+ 1,1,1,1,1,1,1,1, # 38 - 3f
+ 2,2,2,2,2,2,2,2, # 40 - 47
+ 2,2,2,2,2,2,2,2, # 48 - 4f
+ 2,2,2,2,2,2,2,2, # 50 - 57
+ 2,2,2,2,2,2,2,2, # 58 - 5f
+ 2,2,2,2,2,2,2,2, # 60 - 67
+ 2,2,2,2,2,2,2,2, # 68 - 6f
+ 2,2,2,2,2,2,2,2, # 70 - 77
+ 2,2,2,2,2,2,2,1, # 78 - 7f
+ 4,4,4,4,4,4,4,4, # 80 - 87
+ 4,4,4,4,4,4,4,4, # 88 - 8f
+ 4,4,4,4,4,4,4,4, # 90 - 97
+ 4,4,4,4,4,4,4,4, # 98 - 9f
+ 4,3,3,3,3,3,3,3, # a0 - a7
+ 3,3,3,3,3,3,3,3, # a8 - af
+ 3,3,3,3,3,3,3,3, # b0 - b7
+ 3,3,3,3,3,3,3,3, # b8 - bf
+ 3,3,3,3,3,3,3,3, # c0 - c7
+ 3,3,3,3,3,3,3,3, # c8 - cf
+ 3,3,3,3,3,3,3,3, # d0 - d7
+ 3,3,3,3,3,3,3,3, # d8 - df
+ 3,3,3,3,3,3,3,3, # e0 - e7
+ 3,3,3,3,3,3,3,3, # e8 - ef
+ 3,3,3,3,3,3,3,3, # f0 - f7
+ 3,3,3,3,3,3,3,0 # f8 - ff
+)
+
+BIG5_ST = (
+ MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,#08-0f
+ MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START#10-17
+)
+
+BIG5_CHAR_LEN_TABLE = (0, 1, 1, 2, 0)
+
+BIG5_SM_MODEL = {'class_table': BIG5_CLS,
+ 'class_factor': 5,
+ 'state_table': BIG5_ST,
+ 'char_len_table': BIG5_CHAR_LEN_TABLE,
+ 'name': 'Big5'}
+
+# CP949
+
+CP949_CLS = (
+ 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f
+ 1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f
+ 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f
+ 1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f
+ 1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f
+ 4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f
+ 1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f
+ 5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f
+ 0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f
+ 6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f
+ 6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af
+ 7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf
+ 7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf
+ 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df
+ 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef
+ 2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff
+)
+
+CP949_ST = (
+#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
+ MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START, 4, 5,MachineState.ERROR, 6, # MachineState.START
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, # MachineState.ERROR
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME, # MachineState.ITS_ME
+ MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 3
+ MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 4
+ MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 5
+ MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 6
+)
+
+CP949_CHAR_LEN_TABLE = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
+
+CP949_SM_MODEL = {'class_table': CP949_CLS,
+ 'class_factor': 10,
+ 'state_table': CP949_ST,
+ 'char_len_table': CP949_CHAR_LEN_TABLE,
+ 'name': 'CP949'}
+
+# EUC-JP
+
+EUCJP_CLS = (
+ 4,4,4,4,4,4,4,4, # 00 - 07
+ 4,4,4,4,4,4,5,5, # 08 - 0f
+ 4,4,4,4,4,4,4,4, # 10 - 17
+ 4,4,4,5,4,4,4,4, # 18 - 1f
+ 4,4,4,4,4,4,4,4, # 20 - 27
+ 4,4,4,4,4,4,4,4, # 28 - 2f
+ 4,4,4,4,4,4,4,4, # 30 - 37
+ 4,4,4,4,4,4,4,4, # 38 - 3f
+ 4,4,4,4,4,4,4,4, # 40 - 47
+ 4,4,4,4,4,4,4,4, # 48 - 4f
+ 4,4,4,4,4,4,4,4, # 50 - 57
+ 4,4,4,4,4,4,4,4, # 58 - 5f
+ 4,4,4,4,4,4,4,4, # 60 - 67
+ 4,4,4,4,4,4,4,4, # 68 - 6f
+ 4,4,4,4,4,4,4,4, # 70 - 77
+ 4,4,4,4,4,4,4,4, # 78 - 7f
+ 5,5,5,5,5,5,5,5, # 80 - 87
+ 5,5,5,5,5,5,1,3, # 88 - 8f
+ 5,5,5,5,5,5,5,5, # 90 - 97
+ 5,5,5,5,5,5,5,5, # 98 - 9f
+ 5,2,2,2,2,2,2,2, # a0 - a7
+ 2,2,2,2,2,2,2,2, # a8 - af
+ 2,2,2,2,2,2,2,2, # b0 - b7
+ 2,2,2,2,2,2,2,2, # b8 - bf
+ 2,2,2,2,2,2,2,2, # c0 - c7
+ 2,2,2,2,2,2,2,2, # c8 - cf
+ 2,2,2,2,2,2,2,2, # d0 - d7
+ 2,2,2,2,2,2,2,2, # d8 - df
+ 0,0,0,0,0,0,0,0, # e0 - e7
+ 0,0,0,0,0,0,0,0, # e8 - ef
+ 0,0,0,0,0,0,0,0, # f0 - f7
+ 0,0,0,0,0,0,0,5 # f8 - ff
+)
+
+EUCJP_ST = (
+ 3, 4, 3, 5,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17
+ MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 3,MachineState.ERROR,#18-1f
+ 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START#20-27
+)
+
+EUCJP_CHAR_LEN_TABLE = (2, 2, 2, 3, 1, 0)
+
+EUCJP_SM_MODEL = {'class_table': EUCJP_CLS,
+ 'class_factor': 6,
+ 'state_table': EUCJP_ST,
+ 'char_len_table': EUCJP_CHAR_LEN_TABLE,
+ 'name': 'EUC-JP'}
+
+# EUC-KR
+
+EUCKR_CLS = (
+ 1,1,1,1,1,1,1,1, # 00 - 07
+ 1,1,1,1,1,1,0,0, # 08 - 0f
+ 1,1,1,1,1,1,1,1, # 10 - 17
+ 1,1,1,0,1,1,1,1, # 18 - 1f
+ 1,1,1,1,1,1,1,1, # 20 - 27
+ 1,1,1,1,1,1,1,1, # 28 - 2f
+ 1,1,1,1,1,1,1,1, # 30 - 37
+ 1,1,1,1,1,1,1,1, # 38 - 3f
+ 1,1,1,1,1,1,1,1, # 40 - 47
+ 1,1,1,1,1,1,1,1, # 48 - 4f
+ 1,1,1,1,1,1,1,1, # 50 - 57
+ 1,1,1,1,1,1,1,1, # 58 - 5f
+ 1,1,1,1,1,1,1,1, # 60 - 67
+ 1,1,1,1,1,1,1,1, # 68 - 6f
+ 1,1,1,1,1,1,1,1, # 70 - 77
+ 1,1,1,1,1,1,1,1, # 78 - 7f
+ 0,0,0,0,0,0,0,0, # 80 - 87
+ 0,0,0,0,0,0,0,0, # 88 - 8f
+ 0,0,0,0,0,0,0,0, # 90 - 97
+ 0,0,0,0,0,0,0,0, # 98 - 9f
+ 0,2,2,2,2,2,2,2, # a0 - a7
+ 2,2,2,2,2,3,3,3, # a8 - af
+ 2,2,2,2,2,2,2,2, # b0 - b7
+ 2,2,2,2,2,2,2,2, # b8 - bf
+ 2,2,2,2,2,2,2,2, # c0 - c7
+ 2,3,2,2,2,2,2,2, # c8 - cf
+ 2,2,2,2,2,2,2,2, # d0 - d7
+ 2,2,2,2,2,2,2,2, # d8 - df
+ 2,2,2,2,2,2,2,2, # e0 - e7
+ 2,2,2,2,2,2,2,2, # e8 - ef
+ 2,2,2,2,2,2,2,2, # f0 - f7
+ 2,2,2,2,2,2,2,0 # f8 - ff
+)
+
+EUCKR_ST = (
+ MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f
+)
+
+EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0)
+
+EUCKR_SM_MODEL = {'class_table': EUCKR_CLS,
+ 'class_factor': 4,
+ 'state_table': EUCKR_ST,
+ 'char_len_table': EUCKR_CHAR_LEN_TABLE,
+ 'name': 'EUC-KR'}
+
+# EUC-TW
+
+EUCTW_CLS = (
+ 2,2,2,2,2,2,2,2, # 00 - 07
+ 2,2,2,2,2,2,0,0, # 08 - 0f
+ 2,2,2,2,2,2,2,2, # 10 - 17
+ 2,2,2,0,2,2,2,2, # 18 - 1f
+ 2,2,2,2,2,2,2,2, # 20 - 27
+ 2,2,2,2,2,2,2,2, # 28 - 2f
+ 2,2,2,2,2,2,2,2, # 30 - 37
+ 2,2,2,2,2,2,2,2, # 38 - 3f
+ 2,2,2,2,2,2,2,2, # 40 - 47
+ 2,2,2,2,2,2,2,2, # 48 - 4f
+ 2,2,2,2,2,2,2,2, # 50 - 57
+ 2,2,2,2,2,2,2,2, # 58 - 5f
+ 2,2,2,2,2,2,2,2, # 60 - 67
+ 2,2,2,2,2,2,2,2, # 68 - 6f
+ 2,2,2,2,2,2,2,2, # 70 - 77
+ 2,2,2,2,2,2,2,2, # 78 - 7f
+ 0,0,0,0,0,0,0,0, # 80 - 87
+ 0,0,0,0,0,0,6,0, # 88 - 8f
+ 0,0,0,0,0,0,0,0, # 90 - 97
+ 0,0,0,0,0,0,0,0, # 98 - 9f
+ 0,3,4,4,4,4,4,4, # a0 - a7
+ 5,5,1,1,1,1,1,1, # a8 - af
+ 1,1,1,1,1,1,1,1, # b0 - b7
+ 1,1,1,1,1,1,1,1, # b8 - bf
+ 1,1,3,1,3,3,3,3, # c0 - c7
+ 3,3,3,3,3,3,3,3, # c8 - cf
+ 3,3,3,3,3,3,3,3, # d0 - d7
+ 3,3,3,3,3,3,3,3, # d8 - df
+ 3,3,3,3,3,3,3,3, # e0 - e7
+ 3,3,3,3,3,3,3,3, # e8 - ef
+ 3,3,3,3,3,3,3,3, # f0 - f7
+ 3,3,3,3,3,3,3,0 # f8 - ff
+)
+
+EUCTW_ST = (
+ MachineState.ERROR,MachineState.ERROR,MachineState.START, 3, 3, 3, 4,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.ERROR,#10-17
+ MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
+ 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,#20-27
+ MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
+)
+
+EUCTW_CHAR_LEN_TABLE = (0, 0, 1, 2, 2, 2, 3)
+
+EUCTW_SM_MODEL = {'class_table': EUCTW_CLS,
+ 'class_factor': 7,
+ 'state_table': EUCTW_ST,
+ 'char_len_table': EUCTW_CHAR_LEN_TABLE,
+ 'name': 'x-euc-tw'}
+
+# GB2312
+
+GB2312_CLS = (
+ 1,1,1,1,1,1,1,1, # 00 - 07
+ 1,1,1,1,1,1,0,0, # 08 - 0f
+ 1,1,1,1,1,1,1,1, # 10 - 17
+ 1,1,1,0,1,1,1,1, # 18 - 1f
+ 1,1,1,1,1,1,1,1, # 20 - 27
+ 1,1,1,1,1,1,1,1, # 28 - 2f
+ 3,3,3,3,3,3,3,3, # 30 - 37
+ 3,3,1,1,1,1,1,1, # 38 - 3f
+ 2,2,2,2,2,2,2,2, # 40 - 47
+ 2,2,2,2,2,2,2,2, # 48 - 4f
+ 2,2,2,2,2,2,2,2, # 50 - 57
+ 2,2,2,2,2,2,2,2, # 58 - 5f
+ 2,2,2,2,2,2,2,2, # 60 - 67
+ 2,2,2,2,2,2,2,2, # 68 - 6f
+ 2,2,2,2,2,2,2,2, # 70 - 77
+ 2,2,2,2,2,2,2,4, # 78 - 7f
+ 5,6,6,6,6,6,6,6, # 80 - 87
+ 6,6,6,6,6,6,6,6, # 88 - 8f
+ 6,6,6,6,6,6,6,6, # 90 - 97
+ 6,6,6,6,6,6,6,6, # 98 - 9f
+ 6,6,6,6,6,6,6,6, # a0 - a7
+ 6,6,6,6,6,6,6,6, # a8 - af
+ 6,6,6,6,6,6,6,6, # b0 - b7
+ 6,6,6,6,6,6,6,6, # b8 - bf
+ 6,6,6,6,6,6,6,6, # c0 - c7
+ 6,6,6,6,6,6,6,6, # c8 - cf
+ 6,6,6,6,6,6,6,6, # d0 - d7
+ 6,6,6,6,6,6,6,6, # d8 - df
+ 6,6,6,6,6,6,6,6, # e0 - e7
+ 6,6,6,6,6,6,6,6, # e8 - ef
+ 6,6,6,6,6,6,6,6, # f0 - f7
+ 6,6,6,6,6,6,6,0 # f8 - ff
+)
+
+GB2312_ST = (
+ MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, 3,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,#10-17
+ 4,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
+ MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#20-27
+ MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
+)
+
+# To be accurate, the length of class 6 can be either 2 or 4.
+# But it is not necessary to discriminate between the two since
+# it is used for frequency analysis only, and we are validating
+# each code range there as well. So it is safe to set it to be
+# 2 here.
+GB2312_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 1, 2)
+
+GB2312_SM_MODEL = {'class_table': GB2312_CLS,
+ 'class_factor': 7,
+ 'state_table': GB2312_ST,
+ 'char_len_table': GB2312_CHAR_LEN_TABLE,
+ 'name': 'GB2312'}
+
+# Shift_JIS
+
+SJIS_CLS = (
+ 1,1,1,1,1,1,1,1, # 00 - 07
+ 1,1,1,1,1,1,0,0, # 08 - 0f
+ 1,1,1,1,1,1,1,1, # 10 - 17
+ 1,1,1,0,1,1,1,1, # 18 - 1f
+ 1,1,1,1,1,1,1,1, # 20 - 27
+ 1,1,1,1,1,1,1,1, # 28 - 2f
+ 1,1,1,1,1,1,1,1, # 30 - 37
+ 1,1,1,1,1,1,1,1, # 38 - 3f
+ 2,2,2,2,2,2,2,2, # 40 - 47
+ 2,2,2,2,2,2,2,2, # 48 - 4f
+ 2,2,2,2,2,2,2,2, # 50 - 57
+ 2,2,2,2,2,2,2,2, # 58 - 5f
+ 2,2,2,2,2,2,2,2, # 60 - 67
+ 2,2,2,2,2,2,2,2, # 68 - 6f
+ 2,2,2,2,2,2,2,2, # 70 - 77
+ 2,2,2,2,2,2,2,1, # 78 - 7f
+ 3,3,3,3,3,2,2,3, # 80 - 87
+ 3,3,3,3,3,3,3,3, # 88 - 8f
+ 3,3,3,3,3,3,3,3, # 90 - 97
+ 3,3,3,3,3,3,3,3, # 98 - 9f
+ #0xa0 is illegal in sjis encoding, but some pages does
+ #contain such byte. We need to be more error forgiven.
+ 2,2,2,2,2,2,2,2, # a0 - a7
+ 2,2,2,2,2,2,2,2, # a8 - af
+ 2,2,2,2,2,2,2,2, # b0 - b7
+ 2,2,2,2,2,2,2,2, # b8 - bf
+ 2,2,2,2,2,2,2,2, # c0 - c7
+ 2,2,2,2,2,2,2,2, # c8 - cf
+ 2,2,2,2,2,2,2,2, # d0 - d7
+ 2,2,2,2,2,2,2,2, # d8 - df
+ 3,3,3,3,3,3,3,3, # e0 - e7
+ 3,3,3,3,3,4,4,4, # e8 - ef
+ 3,3,3,3,3,3,3,3, # f0 - f7
+ 3,3,3,3,3,0,0,0) # f8 - ff
+
+
+SJIS_ST = (
+ MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START #10-17
+)
+
+SJIS_CHAR_LEN_TABLE = (0, 1, 1, 2, 0, 0)
+
+SJIS_SM_MODEL = {'class_table': SJIS_CLS,
+ 'class_factor': 6,
+ 'state_table': SJIS_ST,
+ 'char_len_table': SJIS_CHAR_LEN_TABLE,
+ 'name': 'Shift_JIS'}
+
+# UCS2-BE
+
+UCS2BE_CLS = (
+ 0,0,0,0,0,0,0,0, # 00 - 07
+ 0,0,1,0,0,2,0,0, # 08 - 0f
+ 0,0,0,0,0,0,0,0, # 10 - 17
+ 0,0,0,3,0,0,0,0, # 18 - 1f
+ 0,0,0,0,0,0,0,0, # 20 - 27
+ 0,3,3,3,3,3,0,0, # 28 - 2f
+ 0,0,0,0,0,0,0,0, # 30 - 37
+ 0,0,0,0,0,0,0,0, # 38 - 3f
+ 0,0,0,0,0,0,0,0, # 40 - 47
+ 0,0,0,0,0,0,0,0, # 48 - 4f
+ 0,0,0,0,0,0,0,0, # 50 - 57
+ 0,0,0,0,0,0,0,0, # 58 - 5f
+ 0,0,0,0,0,0,0,0, # 60 - 67
+ 0,0,0,0,0,0,0,0, # 68 - 6f
+ 0,0,0,0,0,0,0,0, # 70 - 77
+ 0,0,0,0,0,0,0,0, # 78 - 7f
+ 0,0,0,0,0,0,0,0, # 80 - 87
+ 0,0,0,0,0,0,0,0, # 88 - 8f
+ 0,0,0,0,0,0,0,0, # 90 - 97
+ 0,0,0,0,0,0,0,0, # 98 - 9f
+ 0,0,0,0,0,0,0,0, # a0 - a7
+ 0,0,0,0,0,0,0,0, # a8 - af
+ 0,0,0,0,0,0,0,0, # b0 - b7
+ 0,0,0,0,0,0,0,0, # b8 - bf
+ 0,0,0,0,0,0,0,0, # c0 - c7
+ 0,0,0,0,0,0,0,0, # c8 - cf
+ 0,0,0,0,0,0,0,0, # d0 - d7
+ 0,0,0,0,0,0,0,0, # d8 - df
+ 0,0,0,0,0,0,0,0, # e0 - e7
+ 0,0,0,0,0,0,0,0, # e8 - ef
+ 0,0,0,0,0,0,0,0, # f0 - f7
+ 0,0,0,0,0,0,4,5 # f8 - ff
+)
+
+UCS2BE_ST = (
+ 5, 7, 7,MachineState.ERROR, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+ MachineState.ITS_ME,MachineState.ITS_ME, 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,#10-17
+ 6, 6, 6, 6, 6,MachineState.ITS_ME, 6, 6,#18-1f
+ 6, 6, 6, 6, 5, 7, 7,MachineState.ERROR,#20-27
+ 5, 8, 6, 6,MachineState.ERROR, 6, 6, 6,#28-2f
+ 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #30-37
+)
+
+UCS2BE_CHAR_LEN_TABLE = (2, 2, 2, 0, 2, 2)
+
+UCS2BE_SM_MODEL = {'class_table': UCS2BE_CLS,
+ 'class_factor': 6,
+ 'state_table': UCS2BE_ST,
+ 'char_len_table': UCS2BE_CHAR_LEN_TABLE,
+ 'name': 'UTF-16BE'}
+
+# UCS2-LE
+
+UCS2LE_CLS = (
+ 0,0,0,0,0,0,0,0, # 00 - 07
+ 0,0,1,0,0,2,0,0, # 08 - 0f
+ 0,0,0,0,0,0,0,0, # 10 - 17
+ 0,0,0,3,0,0,0,0, # 18 - 1f
+ 0,0,0,0,0,0,0,0, # 20 - 27
+ 0,3,3,3,3,3,0,0, # 28 - 2f
+ 0,0,0,0,0,0,0,0, # 30 - 37
+ 0,0,0,0,0,0,0,0, # 38 - 3f
+ 0,0,0,0,0,0,0,0, # 40 - 47
+ 0,0,0,0,0,0,0,0, # 48 - 4f
+ 0,0,0,0,0,0,0,0, # 50 - 57
+ 0,0,0,0,0,0,0,0, # 58 - 5f
+ 0,0,0,0,0,0,0,0, # 60 - 67
+ 0,0,0,0,0,0,0,0, # 68 - 6f
+ 0,0,0,0,0,0,0,0, # 70 - 77
+ 0,0,0,0,0,0,0,0, # 78 - 7f
+ 0,0,0,0,0,0,0,0, # 80 - 87
+ 0,0,0,0,0,0,0,0, # 88 - 8f
+ 0,0,0,0,0,0,0,0, # 90 - 97
+ 0,0,0,0,0,0,0,0, # 98 - 9f
+ 0,0,0,0,0,0,0,0, # a0 - a7
+ 0,0,0,0,0,0,0,0, # a8 - af
+ 0,0,0,0,0,0,0,0, # b0 - b7
+ 0,0,0,0,0,0,0,0, # b8 - bf
+ 0,0,0,0,0,0,0,0, # c0 - c7
+ 0,0,0,0,0,0,0,0, # c8 - cf
+ 0,0,0,0,0,0,0,0, # d0 - d7
+ 0,0,0,0,0,0,0,0, # d8 - df
+ 0,0,0,0,0,0,0,0, # e0 - e7
+ 0,0,0,0,0,0,0,0, # e8 - ef
+ 0,0,0,0,0,0,0,0, # f0 - f7
+ 0,0,0,0,0,0,4,5 # f8 - ff
+)
+
+UCS2LE_ST = (
+ 6, 6, 7, 6, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+ MachineState.ITS_ME,MachineState.ITS_ME, 5, 5, 5,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#10-17
+ 5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR, 6, 6,#18-1f
+ 7, 6, 8, 8, 5, 5, 5,MachineState.ERROR,#20-27
+ 5, 5, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5,#28-2f
+ 5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR,MachineState.START,MachineState.START #30-37
+)
+
+UCS2LE_CHAR_LEN_TABLE = (2, 2, 2, 2, 2, 2)
+
+UCS2LE_SM_MODEL = {'class_table': UCS2LE_CLS,
+ 'class_factor': 6,
+ 'state_table': UCS2LE_ST,
+ 'char_len_table': UCS2LE_CHAR_LEN_TABLE,
+ 'name': 'UTF-16LE'}
+
+# UTF-8
+
+UTF8_CLS = (
+ 1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
+ 1,1,1,1,1,1,0,0, # 08 - 0f
+ 1,1,1,1,1,1,1,1, # 10 - 17
+ 1,1,1,0,1,1,1,1, # 18 - 1f
+ 1,1,1,1,1,1,1,1, # 20 - 27
+ 1,1,1,1,1,1,1,1, # 28 - 2f
+ 1,1,1,1,1,1,1,1, # 30 - 37
+ 1,1,1,1,1,1,1,1, # 38 - 3f
+ 1,1,1,1,1,1,1,1, # 40 - 47
+ 1,1,1,1,1,1,1,1, # 48 - 4f
+ 1,1,1,1,1,1,1,1, # 50 - 57
+ 1,1,1,1,1,1,1,1, # 58 - 5f
+ 1,1,1,1,1,1,1,1, # 60 - 67
+ 1,1,1,1,1,1,1,1, # 68 - 6f
+ 1,1,1,1,1,1,1,1, # 70 - 77
+ 1,1,1,1,1,1,1,1, # 78 - 7f
+ 2,2,2,2,3,3,3,3, # 80 - 87
+ 4,4,4,4,4,4,4,4, # 88 - 8f
+ 4,4,4,4,4,4,4,4, # 90 - 97
+ 4,4,4,4,4,4,4,4, # 98 - 9f
+ 5,5,5,5,5,5,5,5, # a0 - a7
+ 5,5,5,5,5,5,5,5, # a8 - af
+ 5,5,5,5,5,5,5,5, # b0 - b7
+ 5,5,5,5,5,5,5,5, # b8 - bf
+ 0,0,6,6,6,6,6,6, # c0 - c7
+ 6,6,6,6,6,6,6,6, # c8 - cf
+ 6,6,6,6,6,6,6,6, # d0 - d7
+ 6,6,6,6,6,6,6,6, # d8 - df
+ 7,8,8,8,8,8,8,8, # e0 - e7
+ 8,8,8,8,8,9,8,8, # e8 - ef
+ 10,11,11,11,11,11,11,11, # f0 - f7
+ 12,13,13,13,14,15,0,0 # f8 - ff
+)
+
+UTF8_ST = (
+ MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12, 10,#00-07
+ 9, 11, 8, 7, 6, 5, 4, 3,#08-0f
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#20-27
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#28-2f
+ MachineState.ERROR,MachineState.ERROR, 5, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#30-37
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#38-3f
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#40-47
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#48-4f
+ MachineState.ERROR,MachineState.ERROR, 7, 7, 7, 7,MachineState.ERROR,MachineState.ERROR,#50-57
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#58-5f
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 7, 7,MachineState.ERROR,MachineState.ERROR,#60-67
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#68-6f
+ MachineState.ERROR,MachineState.ERROR, 9, 9, 9, 9,MachineState.ERROR,MachineState.ERROR,#70-77
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#78-7f
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 9,MachineState.ERROR,MachineState.ERROR,#80-87
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#88-8f
+ MachineState.ERROR,MachineState.ERROR, 12, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,#90-97
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#98-9f
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12,MachineState.ERROR,MachineState.ERROR,#a0-a7
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#a8-af
+ MachineState.ERROR,MachineState.ERROR, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b0-b7
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b8-bf
+ MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,#c0-c7
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR #c8-cf
+)
+
+UTF8_CHAR_LEN_TABLE = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
+
+UTF8_SM_MODEL = {'class_table': UTF8_CLS,
+ 'class_factor': 16,
+ 'state_table': UTF8_ST,
+ 'char_len_table': UTF8_CHAR_LEN_TABLE,
+ 'name': 'UTF-8'}
diff --git a/third_party/python/chardet/chardet/metadata/__init__.py b/third_party/python/chardet/chardet/metadata/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/chardet/chardet/metadata/__init__.py
diff --git a/third_party/python/chardet/chardet/metadata/languages.py b/third_party/python/chardet/chardet/metadata/languages.py
new file mode 100644
index 0000000000..3237d5abf6
--- /dev/null
+++ b/third_party/python/chardet/chardet/metadata/languages.py
@@ -0,0 +1,310 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""
+Metadata about languages used by our model training code for our
+SingleByteCharSetProbers. Could be used for other things in the future.
+
+This code is based on the language metadata from the uchardet project.
+"""
+from __future__ import absolute_import, print_function
+
+from string import ascii_letters
+
+
+# TODO: Add Ukranian (KOI8-U)
+
+class Language(object):
+ """Metadata about a language useful for training models
+
+ :ivar name: The human name for the language, in English.
+ :type name: str
+ :ivar iso_code: 2-letter ISO 639-1 if possible, 3-letter ISO code otherwise,
+ or use another catalog as a last resort.
+ :type iso_code: str
+ :ivar use_ascii: Whether or not ASCII letters should be included in trained
+ models.
+ :type use_ascii: bool
+ :ivar charsets: The charsets we want to support and create data for.
+ :type charsets: list of str
+ :ivar alphabet: The characters in the language's alphabet. If `use_ascii` is
+ `True`, you only need to add those not in the ASCII set.
+ :type alphabet: str
+ :ivar wiki_start_pages: The Wikipedia pages to start from if we're crawling
+ Wikipedia for training data.
+ :type wiki_start_pages: list of str
+ """
+ def __init__(self, name=None, iso_code=None, use_ascii=True, charsets=None,
+ alphabet=None, wiki_start_pages=None):
+ super(Language, self).__init__()
+ self.name = name
+ self.iso_code = iso_code
+ self.use_ascii = use_ascii
+ self.charsets = charsets
+ if self.use_ascii:
+ if alphabet:
+ alphabet += ascii_letters
+ else:
+ alphabet = ascii_letters
+ elif not alphabet:
+ raise ValueError('Must supply alphabet if use_ascii is False')
+ self.alphabet = ''.join(sorted(set(alphabet))) if alphabet else None
+ self.wiki_start_pages = wiki_start_pages
+
+ def __repr__(self):
+ return '{}({})'.format(self.__class__.__name__,
+ ', '.join('{}={!r}'.format(k, v)
+ for k, v in self.__dict__.items()
+ if not k.startswith('_')))
+
+
+LANGUAGES = {'Arabic': Language(name='Arabic',
+ iso_code='ar',
+ use_ascii=False,
+ # We only support encodings that use isolated
+ # forms, because the current recommendation is
+ # that the rendering system handles presentation
+ # forms. This means we purposefully skip IBM864.
+ charsets=['ISO-8859-6', 'WINDOWS-1256',
+ 'CP720', 'CP864'],
+ alphabet=u'ءآأؤإئابةتثجحخدذرزسشصضطظعغػؼؽؾؿـفقكلمنهوىيًٌٍَُِّ',
+ wiki_start_pages=[u'الصفحة_الرئيسية']),
+ 'Belarusian': Language(name='Belarusian',
+ iso_code='be',
+ use_ascii=False,
+ charsets=['ISO-8859-5', 'WINDOWS-1251',
+ 'IBM866', 'MacCyrillic'],
+ alphabet=(u'АБВГДЕЁЖЗІЙКЛМНОПРСТУЎФХЦЧШЫЬЭЮЯ'
+ u'абвгдеёжзійклмнопрстуўфхцчшыьэюяʼ'),
+ wiki_start_pages=[u'Галоўная_старонка']),
+ 'Bulgarian': Language(name='Bulgarian',
+ iso_code='bg',
+ use_ascii=False,
+ charsets=['ISO-8859-5', 'WINDOWS-1251',
+ 'IBM855'],
+ alphabet=(u'АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯ'
+ u'абвгдежзийклмнопрстуфхцчшщъьюя'),
+ wiki_start_pages=[u'Начална_страница']),
+ 'Czech': Language(name='Czech',
+ iso_code='cz',
+ use_ascii=True,
+ charsets=['ISO-8859-2', 'WINDOWS-1250'],
+ alphabet=u'áčďéěíňóřšťúůýžÁČĎÉĚÍŇÓŘŠŤÚŮÝŽ',
+ wiki_start_pages=[u'Hlavní_strana']),
+ 'Danish': Language(name='Danish',
+ iso_code='da',
+ use_ascii=True,
+ charsets=['ISO-8859-1', 'ISO-8859-15',
+ 'WINDOWS-1252'],
+ alphabet=u'æøåÆØÅ',
+ wiki_start_pages=[u'Forside']),
+ 'German': Language(name='German',
+ iso_code='de',
+ use_ascii=True,
+ charsets=['ISO-8859-1', 'WINDOWS-1252'],
+ alphabet=u'äöüßÄÖÜ',
+ wiki_start_pages=[u'Wikipedia:Hauptseite']),
+ 'Greek': Language(name='Greek',
+ iso_code='el',
+ use_ascii=False,
+ charsets=['ISO-8859-7', 'WINDOWS-1253'],
+ alphabet=(u'αβγδεζηθικλμνξοπρσςτυφχψωάέήίόύώ'
+ u'ΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΣΤΥΦΧΨΩΆΈΉΊΌΎΏ'),
+ wiki_start_pages=[u'Πύλη:Κύρια']),
+ 'English': Language(name='English',
+ iso_code='en',
+ use_ascii=True,
+ charsets=['ISO-8859-1', 'WINDOWS-1252'],
+ wiki_start_pages=[u'Main_Page']),
+ 'Esperanto': Language(name='Esperanto',
+ iso_code='eo',
+ # Q, W, X, and Y not used at all
+ use_ascii=False,
+ charsets=['ISO-8859-3'],
+ alphabet=(u'abcĉdefgĝhĥijĵklmnoprsŝtuŭvz'
+ u'ABCĈDEFGĜHĤIJĴKLMNOPRSŜTUŬVZ'),
+ wiki_start_pages=[u'Vikipedio:Ĉefpaĝo']),
+ 'Spanish': Language(name='Spanish',
+ iso_code='es',
+ use_ascii=True,
+ charsets=['ISO-8859-1', 'ISO-8859-15',
+ 'WINDOWS-1252'],
+ alphabet=u'ñáéíóúüÑÁÉÍÓÚÜ',
+ wiki_start_pages=[u'Wikipedia:Portada']),
+ 'Estonian': Language(name='Estonian',
+ iso_code='et',
+ use_ascii=False,
+ charsets=['ISO-8859-4', 'ISO-8859-13',
+ 'WINDOWS-1257'],
+ # C, F, Š, Q, W, X, Y, Z, Ž are only for
+ # loanwords
+ alphabet=(u'ABDEGHIJKLMNOPRSTUVÕÄÖÜ'
+ u'abdeghijklmnoprstuvõäöü'),
+ wiki_start_pages=[u'Esileht']),
+ 'Finnish': Language(name='Finnish',
+ iso_code='fi',
+ use_ascii=True,
+ charsets=['ISO-8859-1', 'ISO-8859-15',
+ 'WINDOWS-1252'],
+ alphabet=u'ÅÄÖŠŽåäöšž',
+ wiki_start_pages=[u'Wikipedia:Etusivu']),
+ 'French': Language(name='French',
+ iso_code='fr',
+ use_ascii=True,
+ charsets=['ISO-8859-1', 'ISO-8859-15',
+ 'WINDOWS-1252'],
+ alphabet=u'œàâçèéîïùûêŒÀÂÇÈÉÎÏÙÛÊ',
+ wiki_start_pages=[u'Wikipédia:Accueil_principal',
+ u'Bœuf (animal)']),
+ 'Hebrew': Language(name='Hebrew',
+ iso_code='he',
+ use_ascii=False,
+ charsets=['ISO-8859-8', 'WINDOWS-1255'],
+ alphabet=u'אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ',
+ wiki_start_pages=[u'עמוד_ראשי']),
+ 'Croatian': Language(name='Croatian',
+ iso_code='hr',
+ # Q, W, X, Y are only used for foreign words.
+ use_ascii=False,
+ charsets=['ISO-8859-2', 'WINDOWS-1250'],
+ alphabet=(u'abcčćdđefghijklmnoprsštuvzž'
+ u'ABCČĆDĐEFGHIJKLMNOPRSŠTUVZŽ'),
+ wiki_start_pages=[u'Glavna_stranica']),
+ 'Hungarian': Language(name='Hungarian',
+ iso_code='hu',
+ # Q, W, X, Y are only used for foreign words.
+ use_ascii=False,
+ charsets=['ISO-8859-2', 'WINDOWS-1250'],
+ alphabet=(u'abcdefghijklmnoprstuvzáéíóöőúüű'
+ u'ABCDEFGHIJKLMNOPRSTUVZÁÉÍÓÖŐÚÜŰ'),
+ wiki_start_pages=[u'Kezdőlap']),
+ 'Italian': Language(name='Italian',
+ iso_code='it',
+ use_ascii=True,
+ charsets=['ISO-8859-1', 'ISO-8859-15',
+ 'WINDOWS-1252'],
+ alphabet=u'ÀÈÉÌÒÓÙàèéìòóù',
+ wiki_start_pages=[u'Pagina_principale']),
+ 'Lithuanian': Language(name='Lithuanian',
+ iso_code='lt',
+ use_ascii=False,
+ charsets=['ISO-8859-13', 'WINDOWS-1257',
+ 'ISO-8859-4'],
+ # Q, W, and X not used at all
+ alphabet=(u'AĄBCČDEĘĖFGHIĮYJKLMNOPRSŠTUŲŪVZŽ'
+ u'aąbcčdeęėfghiįyjklmnoprsštuųūvzž'),
+ wiki_start_pages=[u'Pagrindinis_puslapis']),
+ 'Latvian': Language(name='Latvian',
+ iso_code='lv',
+ use_ascii=False,
+ charsets=['ISO-8859-13', 'WINDOWS-1257',
+ 'ISO-8859-4'],
+ # Q, W, X, Y are only for loanwords
+ alphabet=(u'AĀBCČDEĒFGĢHIĪJKĶLĻMNŅOPRSŠTUŪVZŽ'
+ u'aābcčdeēfgģhiījkķlļmnņoprsštuūvzž'),
+ wiki_start_pages=[u'Sākumlapa']),
+ 'Macedonian': Language(name='Macedonian',
+ iso_code='mk',
+ use_ascii=False,
+ charsets=['ISO-8859-5', 'WINDOWS-1251',
+ 'MacCyrillic', 'IBM855'],
+ alphabet=(u'АБВГДЃЕЖЗЅИЈКЛЉМНЊОПРСТЌУФХЦЧЏШ'
+ u'абвгдѓежзѕијклљмнњопрстќуфхцчџш'),
+ wiki_start_pages=[u'Главна_страница']),
+ 'Dutch': Language(name='Dutch',
+ iso_code='nl',
+ use_ascii=True,
+ charsets=['ISO-8859-1', 'WINDOWS-1252'],
+ wiki_start_pages=[u'Hoofdpagina']),
+ 'Polish': Language(name='Polish',
+ iso_code='pl',
+ # Q and X are only used for foreign words.
+ use_ascii=False,
+ charsets=['ISO-8859-2', 'WINDOWS-1250'],
+ alphabet=(u'AĄBCĆDEĘFGHIJKLŁMNŃOÓPRSŚTUWYZŹŻ'
+ u'aąbcćdeęfghijklłmnńoóprsśtuwyzźż'),
+ wiki_start_pages=[u'Wikipedia:Strona_główna']),
+ 'Portuguese': Language(name='Portuguese',
+ iso_code='pt',
+ use_ascii=True,
+ charsets=['ISO-8859-1', 'ISO-8859-15',
+ 'WINDOWS-1252'],
+ alphabet=u'ÁÂÃÀÇÉÊÍÓÔÕÚáâãàçéêíóôõú',
+ wiki_start_pages=[u'Wikipédia:Página_principal']),
+ 'Romanian': Language(name='Romanian',
+ iso_code='ro',
+ use_ascii=True,
+ charsets=['ISO-8859-2', 'WINDOWS-1250'],
+ alphabet=u'ăâîșțĂÂÎȘȚ',
+ wiki_start_pages=[u'Pagina_principală']),
+ 'Russian': Language(name='Russian',
+ iso_code='ru',
+ use_ascii=False,
+ charsets=['ISO-8859-5', 'WINDOWS-1251',
+ 'KOI8-R', 'MacCyrillic', 'IBM866',
+ 'IBM855'],
+ alphabet=(u'абвгдеёжзийклмнопрстуфхцчшщъыьэюя'
+ u'АБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ'),
+ wiki_start_pages=[u'Заглавная_страница']),
+ 'Slovak': Language(name='Slovak',
+ iso_code='sk',
+ use_ascii=True,
+ charsets=['ISO-8859-2', 'WINDOWS-1250'],
+ alphabet=u'áäčďéíĺľňóôŕšťúýžÁÄČĎÉÍĹĽŇÓÔŔŠŤÚÝŽ',
+ wiki_start_pages=[u'Hlavná_stránka']),
+ 'Slovene': Language(name='Slovene',
+ iso_code='sl',
+ # Q, W, X, Y are only used for foreign words.
+ use_ascii=False,
+ charsets=['ISO-8859-2', 'WINDOWS-1250'],
+ alphabet=(u'abcčdefghijklmnoprsštuvzž'
+ u'ABCČDEFGHIJKLMNOPRSŠTUVZŽ'),
+ wiki_start_pages=[u'Glavna_stran']),
+ # Serbian can be written in both Latin and Cyrillic, but there's no
+ # simple way to get the Latin alphabet pages from Wikipedia through
+ # the API, so for now we just support Cyrillic.
+ 'Serbian': Language(name='Serbian',
+ iso_code='sr',
+ alphabet=(u'АБВГДЂЕЖЗИЈКЛЉМНЊОПРСТЋУФХЦЧЏШ'
+ u'абвгдђежзијклљмнњопрстћуфхцчџш'),
+ charsets=['ISO-8859-5', 'WINDOWS-1251',
+ 'MacCyrillic', 'IBM855'],
+ wiki_start_pages=[u'Главна_страна']),
+ 'Thai': Language(name='Thai',
+ iso_code='th',
+ use_ascii=False,
+ charsets=['ISO-8859-11', 'TIS-620', 'CP874'],
+ alphabet=u'กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛',
+ wiki_start_pages=[u'หน้าหลัก']),
+ 'Turkish': Language(name='Turkish',
+ iso_code='tr',
+ # Q, W, and X are not used by Turkish
+ use_ascii=False,
+ charsets=['ISO-8859-3', 'ISO-8859-9',
+ 'WINDOWS-1254'],
+ alphabet=(u'abcçdefgğhıijklmnoöprsştuüvyzâîû'
+ u'ABCÇDEFGĞHIİJKLMNOÖPRSŞTUÜVYZÂÎÛ'),
+ wiki_start_pages=[u'Ana_Sayfa']),
+ 'Vietnamese': Language(name='Vietnamese',
+ iso_code='vi',
+ use_ascii=False,
+ # Windows-1258 is the only common 8-bit
+ # Vietnamese encoding supported by Python.
+ # From Wikipedia:
+ # For systems that lack support for Unicode,
+ # dozens of 8-bit Vietnamese code pages are
+ # available.[1] The most common are VISCII
+ # (TCVN 5712:1993), VPS, and Windows-1258.[3]
+ # Where ASCII is required, such as when
+ # ensuring readability in plain text e-mail,
+ # Vietnamese letters are often encoded
+ # according to Vietnamese Quoted-Readable
+ # (VIQR) or VSCII Mnemonic (VSCII-MNEM),[4]
+ # though usage of either variable-width
+ # scheme has declined dramatically following
+ # the adoption of Unicode on the World Wide
+ # Web.
+ charsets=['WINDOWS-1258'],
+ alphabet=(u'aăâbcdđeêghiklmnoôơpqrstuưvxy'
+ u'AĂÂBCDĐEÊGHIKLMNOÔƠPQRSTUƯVXY'),
+ wiki_start_pages=[u'Chữ_Quốc_ngữ']),
+ }
diff --git a/third_party/python/chardet/chardet/sbcharsetprober.py b/third_party/python/chardet/chardet/sbcharsetprober.py
new file mode 100644
index 0000000000..46ba835c66
--- /dev/null
+++ b/third_party/python/chardet/chardet/sbcharsetprober.py
@@ -0,0 +1,145 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from collections import namedtuple
+
+from .charsetprober import CharSetProber
+from .enums import CharacterCategory, ProbingState, SequenceLikelihood
+
+
+SingleByteCharSetModel = namedtuple('SingleByteCharSetModel',
+ ['charset_name',
+ 'language',
+ 'char_to_order_map',
+ 'language_model',
+ 'typical_positive_ratio',
+ 'keep_ascii_letters',
+ 'alphabet'])
+
+
+class SingleByteCharSetProber(CharSetProber):
+ SAMPLE_SIZE = 64
+ SB_ENOUGH_REL_THRESHOLD = 1024 # 0.25 * SAMPLE_SIZE^2
+ POSITIVE_SHORTCUT_THRESHOLD = 0.95
+ NEGATIVE_SHORTCUT_THRESHOLD = 0.05
+
+ def __init__(self, model, reversed=False, name_prober=None):
+ super(SingleByteCharSetProber, self).__init__()
+ self._model = model
+ # TRUE if we need to reverse every pair in the model lookup
+ self._reversed = reversed
+ # Optional auxiliary prober for name decision
+ self._name_prober = name_prober
+ self._last_order = None
+ self._seq_counters = None
+ self._total_seqs = None
+ self._total_char = None
+ self._freq_char = None
+ self.reset()
+
+ def reset(self):
+ super(SingleByteCharSetProber, self).reset()
+ # char order of last character
+ self._last_order = 255
+ self._seq_counters = [0] * SequenceLikelihood.get_num_categories()
+ self._total_seqs = 0
+ self._total_char = 0
+ # characters that fall in our sampling range
+ self._freq_char = 0
+
+ @property
+ def charset_name(self):
+ if self._name_prober:
+ return self._name_prober.charset_name
+ else:
+ return self._model.charset_name
+
+ @property
+ def language(self):
+ if self._name_prober:
+ return self._name_prober.language
+ else:
+ return self._model.language
+
+ def feed(self, byte_str):
+ # TODO: Make filter_international_words keep things in self.alphabet
+ if not self._model.keep_ascii_letters:
+ byte_str = self.filter_international_words(byte_str)
+ if not byte_str:
+ return self.state
+ char_to_order_map = self._model.char_to_order_map
+ language_model = self._model.language_model
+ for char in byte_str:
+ order = char_to_order_map.get(char, CharacterCategory.UNDEFINED)
+ # XXX: This was SYMBOL_CAT_ORDER before, with a value of 250, but
+ # CharacterCategory.SYMBOL is actually 253, so we use CONTROL
+ # to make it closer to the original intent. The only difference
+ # is whether or not we count digits and control characters for
+ # _total_char purposes.
+ if order < CharacterCategory.CONTROL:
+ self._total_char += 1
+ # TODO: Follow uchardet's lead and discount confidence for frequent
+ # control characters.
+ # See https://github.com/BYVoid/uchardet/commit/55b4f23971db61
+ if order < self.SAMPLE_SIZE:
+ self._freq_char += 1
+ if self._last_order < self.SAMPLE_SIZE:
+ self._total_seqs += 1
+ if not self._reversed:
+ lm_cat = language_model[self._last_order][order]
+ else:
+ lm_cat = language_model[order][self._last_order]
+ self._seq_counters[lm_cat] += 1
+ self._last_order = order
+
+ charset_name = self._model.charset_name
+ if self.state == ProbingState.DETECTING:
+ if self._total_seqs > self.SB_ENOUGH_REL_THRESHOLD:
+ confidence = self.get_confidence()
+ if confidence > self.POSITIVE_SHORTCUT_THRESHOLD:
+ self.logger.debug('%s confidence = %s, we have a winner',
+ charset_name, confidence)
+ self._state = ProbingState.FOUND_IT
+ elif confidence < self.NEGATIVE_SHORTCUT_THRESHOLD:
+ self.logger.debug('%s confidence = %s, below negative '
+ 'shortcut threshhold %s', charset_name,
+ confidence,
+ self.NEGATIVE_SHORTCUT_THRESHOLD)
+ self._state = ProbingState.NOT_ME
+
+ return self.state
+
+ def get_confidence(self):
+ r = 0.01
+ if self._total_seqs > 0:
+ r = ((1.0 * self._seq_counters[SequenceLikelihood.POSITIVE]) /
+ self._total_seqs / self._model.typical_positive_ratio)
+ r = r * self._freq_char / self._total_char
+ if r >= 1.0:
+ r = 0.99
+ return r
diff --git a/third_party/python/chardet/chardet/sbcsgroupprober.py b/third_party/python/chardet/chardet/sbcsgroupprober.py
new file mode 100644
index 0000000000..bdeef4e15b
--- /dev/null
+++ b/third_party/python/chardet/chardet/sbcsgroupprober.py
@@ -0,0 +1,83 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetgroupprober import CharSetGroupProber
+from .hebrewprober import HebrewProber
+from .langbulgarianmodel import (ISO_8859_5_BULGARIAN_MODEL,
+ WINDOWS_1251_BULGARIAN_MODEL)
+from .langgreekmodel import ISO_8859_7_GREEK_MODEL, WINDOWS_1253_GREEK_MODEL
+from .langhebrewmodel import WINDOWS_1255_HEBREW_MODEL
+# from .langhungarianmodel import (ISO_8859_2_HUNGARIAN_MODEL,
+# WINDOWS_1250_HUNGARIAN_MODEL)
+from .langrussianmodel import (IBM855_RUSSIAN_MODEL, IBM866_RUSSIAN_MODEL,
+ ISO_8859_5_RUSSIAN_MODEL, KOI8_R_RUSSIAN_MODEL,
+ MACCYRILLIC_RUSSIAN_MODEL,
+ WINDOWS_1251_RUSSIAN_MODEL)
+from .langthaimodel import TIS_620_THAI_MODEL
+from .langturkishmodel import ISO_8859_9_TURKISH_MODEL
+from .sbcharsetprober import SingleByteCharSetProber
+
+
+class SBCSGroupProber(CharSetGroupProber):
+ def __init__(self):
+ super(SBCSGroupProber, self).__init__()
+ hebrew_prober = HebrewProber()
+ logical_hebrew_prober = SingleByteCharSetProber(WINDOWS_1255_HEBREW_MODEL,
+ False, hebrew_prober)
+ # TODO: See if using ISO-8859-8 Hebrew model works better here, since
+ # it's actually the visual one
+ visual_hebrew_prober = SingleByteCharSetProber(WINDOWS_1255_HEBREW_MODEL,
+ True, hebrew_prober)
+ hebrew_prober.set_model_probers(logical_hebrew_prober,
+ visual_hebrew_prober)
+ # TODO: ORDER MATTERS HERE. I changed the order vs what was in master
+ # and several tests failed that did not before. Some thought
+ # should be put into the ordering, and we should consider making
+ # order not matter here, because that is very counter-intuitive.
+ self.probers = [
+ SingleByteCharSetProber(WINDOWS_1251_RUSSIAN_MODEL),
+ SingleByteCharSetProber(KOI8_R_RUSSIAN_MODEL),
+ SingleByteCharSetProber(ISO_8859_5_RUSSIAN_MODEL),
+ SingleByteCharSetProber(MACCYRILLIC_RUSSIAN_MODEL),
+ SingleByteCharSetProber(IBM866_RUSSIAN_MODEL),
+ SingleByteCharSetProber(IBM855_RUSSIAN_MODEL),
+ SingleByteCharSetProber(ISO_8859_7_GREEK_MODEL),
+ SingleByteCharSetProber(WINDOWS_1253_GREEK_MODEL),
+ SingleByteCharSetProber(ISO_8859_5_BULGARIAN_MODEL),
+ SingleByteCharSetProber(WINDOWS_1251_BULGARIAN_MODEL),
+ # TODO: Restore Hungarian encodings (iso-8859-2 and windows-1250)
+ # after we retrain model.
+ # SingleByteCharSetProber(ISO_8859_2_HUNGARIAN_MODEL),
+ # SingleByteCharSetProber(WINDOWS_1250_HUNGARIAN_MODEL),
+ SingleByteCharSetProber(TIS_620_THAI_MODEL),
+ SingleByteCharSetProber(ISO_8859_9_TURKISH_MODEL),
+ hebrew_prober,
+ logical_hebrew_prober,
+ visual_hebrew_prober,
+ ]
+ self.reset()
diff --git a/third_party/python/chardet/chardet/sjisprober.py b/third_party/python/chardet/chardet/sjisprober.py
new file mode 100644
index 0000000000..9e29623bdc
--- /dev/null
+++ b/third_party/python/chardet/chardet/sjisprober.py
@@ -0,0 +1,92 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .mbcharsetprober import MultiByteCharSetProber
+from .codingstatemachine import CodingStateMachine
+from .chardistribution import SJISDistributionAnalysis
+from .jpcntx import SJISContextAnalysis
+from .mbcssm import SJIS_SM_MODEL
+from .enums import ProbingState, MachineState
+
+
+class SJISProber(MultiByteCharSetProber):
+ def __init__(self):
+ super(SJISProber, self).__init__()
+ self.coding_sm = CodingStateMachine(SJIS_SM_MODEL)
+ self.distribution_analyzer = SJISDistributionAnalysis()
+ self.context_analyzer = SJISContextAnalysis()
+ self.reset()
+
+ def reset(self):
+ super(SJISProber, self).reset()
+ self.context_analyzer.reset()
+
+ @property
+ def charset_name(self):
+ return self.context_analyzer.charset_name
+
+ @property
+ def language(self):
+ return "Japanese"
+
+ def feed(self, byte_str):
+ for i in range(len(byte_str)):
+ coding_state = self.coding_sm.next_state(byte_str[i])
+ if coding_state == MachineState.ERROR:
+ self.logger.debug('%s %s prober hit error at byte %s',
+ self.charset_name, self.language, i)
+ self._state = ProbingState.NOT_ME
+ break
+ elif coding_state == MachineState.ITS_ME:
+ self._state = ProbingState.FOUND_IT
+ break
+ elif coding_state == MachineState.START:
+ char_len = self.coding_sm.get_current_charlen()
+ if i == 0:
+ self._last_char[1] = byte_str[0]
+ self.context_analyzer.feed(self._last_char[2 - char_len:],
+ char_len)
+ self.distribution_analyzer.feed(self._last_char, char_len)
+ else:
+ self.context_analyzer.feed(byte_str[i + 1 - char_len:i + 3
+ - char_len], char_len)
+ self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
+ char_len)
+
+ self._last_char[0] = byte_str[-1]
+
+ if self.state == ProbingState.DETECTING:
+ if (self.context_analyzer.got_enough_data() and
+ (self.get_confidence() > self.SHORTCUT_THRESHOLD)):
+ self._state = ProbingState.FOUND_IT
+
+ return self.state
+
+ def get_confidence(self):
+ context_conf = self.context_analyzer.get_confidence()
+ distrib_conf = self.distribution_analyzer.get_confidence()
+ return max(context_conf, distrib_conf)
diff --git a/third_party/python/chardet/chardet/universaldetector.py b/third_party/python/chardet/chardet/universaldetector.py
new file mode 100644
index 0000000000..055a8ac1b1
--- /dev/null
+++ b/third_party/python/chardet/chardet/universaldetector.py
@@ -0,0 +1,286 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+"""
+Module containing the UniversalDetector detector class, which is the primary
+class a user of ``chardet`` should use.
+
+:author: Mark Pilgrim (initial port to Python)
+:author: Shy Shalom (original C code)
+:author: Dan Blanchard (major refactoring for 3.0)
+:author: Ian Cordasco
+"""
+
+
+import codecs
+import logging
+import re
+
+from .charsetgroupprober import CharSetGroupProber
+from .enums import InputState, LanguageFilter, ProbingState
+from .escprober import EscCharSetProber
+from .latin1prober import Latin1Prober
+from .mbcsgroupprober import MBCSGroupProber
+from .sbcsgroupprober import SBCSGroupProber
+
+
+class UniversalDetector(object):
+ """
+ The ``UniversalDetector`` class underlies the ``chardet.detect`` function
+ and coordinates all of the different charset probers.
+
+ To get a ``dict`` containing an encoding and its confidence, you can simply
+ run:
+
+ .. code::
+
+ u = UniversalDetector()
+ u.feed(some_bytes)
+ u.close()
+ detected = u.result
+
+ """
+
+ MINIMUM_THRESHOLD = 0.20
+ HIGH_BYTE_DETECTOR = re.compile(b'[\x80-\xFF]')
+ ESC_DETECTOR = re.compile(b'(\033|~{)')
+ WIN_BYTE_DETECTOR = re.compile(b'[\x80-\x9F]')
+ ISO_WIN_MAP = {'iso-8859-1': 'Windows-1252',
+ 'iso-8859-2': 'Windows-1250',
+ 'iso-8859-5': 'Windows-1251',
+ 'iso-8859-6': 'Windows-1256',
+ 'iso-8859-7': 'Windows-1253',
+ 'iso-8859-8': 'Windows-1255',
+ 'iso-8859-9': 'Windows-1254',
+ 'iso-8859-13': 'Windows-1257'}
+
+ def __init__(self, lang_filter=LanguageFilter.ALL):
+ self._esc_charset_prober = None
+ self._charset_probers = []
+ self.result = None
+ self.done = None
+ self._got_data = None
+ self._input_state = None
+ self._last_char = None
+ self.lang_filter = lang_filter
+ self.logger = logging.getLogger(__name__)
+ self._has_win_bytes = None
+ self.reset()
+
+ def reset(self):
+ """
+ Reset the UniversalDetector and all of its probers back to their
+ initial states. This is called by ``__init__``, so you only need to
+ call this directly in between analyses of different documents.
+ """
+ self.result = {'encoding': None, 'confidence': 0.0, 'language': None}
+ self.done = False
+ self._got_data = False
+ self._has_win_bytes = False
+ self._input_state = InputState.PURE_ASCII
+ self._last_char = b''
+ if self._esc_charset_prober:
+ self._esc_charset_prober.reset()
+ for prober in self._charset_probers:
+ prober.reset()
+
+ def feed(self, byte_str):
+ """
+ Takes a chunk of a document and feeds it through all of the relevant
+ charset probers.
+
+ After calling ``feed``, you can check the value of the ``done``
+ attribute to see if you need to continue feeding the
+ ``UniversalDetector`` more data, or if it has made a prediction
+ (in the ``result`` attribute).
+
+ .. note::
+ You should always call ``close`` when you're done feeding in your
+ document if ``done`` is not already ``True``.
+ """
+ if self.done:
+ return
+
+ if not len(byte_str):
+ return
+
+ if not isinstance(byte_str, bytearray):
+ byte_str = bytearray(byte_str)
+
+ # First check for known BOMs, since these are guaranteed to be correct
+ if not self._got_data:
+ # If the data starts with BOM, we know it is UTF
+ if byte_str.startswith(codecs.BOM_UTF8):
+ # EF BB BF UTF-8 with BOM
+ self.result = {'encoding': "UTF-8-SIG",
+ 'confidence': 1.0,
+ 'language': ''}
+ elif byte_str.startswith((codecs.BOM_UTF32_LE,
+ codecs.BOM_UTF32_BE)):
+ # FF FE 00 00 UTF-32, little-endian BOM
+ # 00 00 FE FF UTF-32, big-endian BOM
+ self.result = {'encoding': "UTF-32",
+ 'confidence': 1.0,
+ 'language': ''}
+ elif byte_str.startswith(b'\xFE\xFF\x00\x00'):
+ # FE FF 00 00 UCS-4, unusual octet order BOM (3412)
+ self.result = {'encoding': "X-ISO-10646-UCS-4-3412",
+ 'confidence': 1.0,
+ 'language': ''}
+ elif byte_str.startswith(b'\x00\x00\xFF\xFE'):
+ # 00 00 FF FE UCS-4, unusual octet order BOM (2143)
+ self.result = {'encoding': "X-ISO-10646-UCS-4-2143",
+ 'confidence': 1.0,
+ 'language': ''}
+ elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)):
+ # FF FE UTF-16, little endian BOM
+ # FE FF UTF-16, big endian BOM
+ self.result = {'encoding': "UTF-16",
+ 'confidence': 1.0,
+ 'language': ''}
+
+ self._got_data = True
+ if self.result['encoding'] is not None:
+ self.done = True
+ return
+
+ # If none of those matched and we've only see ASCII so far, check
+ # for high bytes and escape sequences
+ if self._input_state == InputState.PURE_ASCII:
+ if self.HIGH_BYTE_DETECTOR.search(byte_str):
+ self._input_state = InputState.HIGH_BYTE
+ elif self._input_state == InputState.PURE_ASCII and \
+ self.ESC_DETECTOR.search(self._last_char + byte_str):
+ self._input_state = InputState.ESC_ASCII
+
+ self._last_char = byte_str[-1:]
+
+ # If we've seen escape sequences, use the EscCharSetProber, which
+ # uses a simple state machine to check for known escape sequences in
+ # HZ and ISO-2022 encodings, since those are the only encodings that
+ # use such sequences.
+ if self._input_state == InputState.ESC_ASCII:
+ if not self._esc_charset_prober:
+ self._esc_charset_prober = EscCharSetProber(self.lang_filter)
+ if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT:
+ self.result = {'encoding':
+ self._esc_charset_prober.charset_name,
+ 'confidence':
+ self._esc_charset_prober.get_confidence(),
+ 'language':
+ self._esc_charset_prober.language}
+ self.done = True
+ # If we've seen high bytes (i.e., those with values greater than 127),
+ # we need to do more complicated checks using all our multi-byte and
+ # single-byte probers that are left. The single-byte probers
+ # use character bigram distributions to determine the encoding, whereas
+ # the multi-byte probers use a combination of character unigram and
+ # bigram distributions.
+ elif self._input_state == InputState.HIGH_BYTE:
+ if not self._charset_probers:
+ self._charset_probers = [MBCSGroupProber(self.lang_filter)]
+ # If we're checking non-CJK encodings, use single-byte prober
+ if self.lang_filter & LanguageFilter.NON_CJK:
+ self._charset_probers.append(SBCSGroupProber())
+ self._charset_probers.append(Latin1Prober())
+ for prober in self._charset_probers:
+ if prober.feed(byte_str) == ProbingState.FOUND_IT:
+ self.result = {'encoding': prober.charset_name,
+ 'confidence': prober.get_confidence(),
+ 'language': prober.language}
+ self.done = True
+ break
+ if self.WIN_BYTE_DETECTOR.search(byte_str):
+ self._has_win_bytes = True
+
+ def close(self):
+ """
+ Stop analyzing the current document and come up with a final
+ prediction.
+
+ :returns: The ``result`` attribute, a ``dict`` with the keys
+ `encoding`, `confidence`, and `language`.
+ """
+ # Don't bother with checks if we're already done
+ if self.done:
+ return self.result
+ self.done = True
+
+ if not self._got_data:
+ self.logger.debug('no data received!')
+
+ # Default to ASCII if it is all we've seen so far
+ elif self._input_state == InputState.PURE_ASCII:
+ self.result = {'encoding': 'ascii',
+ 'confidence': 1.0,
+ 'language': ''}
+
+ # If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD
+ elif self._input_state == InputState.HIGH_BYTE:
+ prober_confidence = None
+ max_prober_confidence = 0.0
+ max_prober = None
+ for prober in self._charset_probers:
+ if not prober:
+ continue
+ prober_confidence = prober.get_confidence()
+ if prober_confidence > max_prober_confidence:
+ max_prober_confidence = prober_confidence
+ max_prober = prober
+ if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD):
+ charset_name = max_prober.charset_name
+ lower_charset_name = max_prober.charset_name.lower()
+ confidence = max_prober.get_confidence()
+ # Use Windows encoding name instead of ISO-8859 if we saw any
+ # extra Windows-specific bytes
+ if lower_charset_name.startswith('iso-8859'):
+ if self._has_win_bytes:
+ charset_name = self.ISO_WIN_MAP.get(lower_charset_name,
+ charset_name)
+ self.result = {'encoding': charset_name,
+ 'confidence': confidence,
+ 'language': max_prober.language}
+
+ # Log all prober confidences if none met MINIMUM_THRESHOLD
+ if self.logger.getEffectiveLevel() <= logging.DEBUG:
+ if self.result['encoding'] is None:
+ self.logger.debug('no probers hit minimum threshold')
+ for group_prober in self._charset_probers:
+ if not group_prober:
+ continue
+ if isinstance(group_prober, CharSetGroupProber):
+ for prober in group_prober.probers:
+ self.logger.debug('%s %s confidence = %s',
+ prober.charset_name,
+ prober.language,
+ prober.get_confidence())
+ else:
+ self.logger.debug('%s %s confidence = %s',
+ group_prober.charset_name,
+ group_prober.language,
+ group_prober.get_confidence())
+ return self.result
diff --git a/third_party/python/chardet/chardet/utf8prober.py b/third_party/python/chardet/chardet/utf8prober.py
new file mode 100644
index 0000000000..6c3196cc2d
--- /dev/null
+++ b/third_party/python/chardet/chardet/utf8prober.py
@@ -0,0 +1,82 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetprober import CharSetProber
+from .enums import ProbingState, MachineState
+from .codingstatemachine import CodingStateMachine
+from .mbcssm import UTF8_SM_MODEL
+
+
+
+class UTF8Prober(CharSetProber):
+ ONE_CHAR_PROB = 0.5
+
+ def __init__(self):
+ super(UTF8Prober, self).__init__()
+ self.coding_sm = CodingStateMachine(UTF8_SM_MODEL)
+ self._num_mb_chars = None
+ self.reset()
+
+ def reset(self):
+ super(UTF8Prober, self).reset()
+ self.coding_sm.reset()
+ self._num_mb_chars = 0
+
+ @property
+ def charset_name(self):
+ return "utf-8"
+
+ @property
+ def language(self):
+ return ""
+
+ def feed(self, byte_str):
+ for c in byte_str:
+ coding_state = self.coding_sm.next_state(c)
+ if coding_state == MachineState.ERROR:
+ self._state = ProbingState.NOT_ME
+ break
+ elif coding_state == MachineState.ITS_ME:
+ self._state = ProbingState.FOUND_IT
+ break
+ elif coding_state == MachineState.START:
+ if self.coding_sm.get_current_charlen() >= 2:
+ self._num_mb_chars += 1
+
+ if self.state == ProbingState.DETECTING:
+ if self.get_confidence() > self.SHORTCUT_THRESHOLD:
+ self._state = ProbingState.FOUND_IT
+
+ return self.state
+
+ def get_confidence(self):
+ unlike = 0.99
+ if self._num_mb_chars < 6:
+ unlike *= self.ONE_CHAR_PROB ** self._num_mb_chars
+ return 1.0 - unlike
+ else:
+ return unlike
diff --git a/third_party/python/chardet/chardet/version.py b/third_party/python/chardet/chardet/version.py
new file mode 100644
index 0000000000..70369b9d66
--- /dev/null
+++ b/third_party/python/chardet/chardet/version.py
@@ -0,0 +1,9 @@
+"""
+This module exists only to simplify retrieving the version number of chardet
+from within setup.py and from chardet subpackages.
+
+:author: Dan Blanchard (dan.blanchard@gmail.com)
+"""
+
+__version__ = "4.0.0"
+VERSION = __version__.split('.')
diff --git a/third_party/python/click/click-7.1.2.dist-info/LICENSE.rst b/third_party/python/click/click-7.1.2.dist-info/LICENSE.rst
new file mode 100644
index 0000000000..d12a849186
--- /dev/null
+++ b/third_party/python/click/click-7.1.2.dist-info/LICENSE.rst
@@ -0,0 +1,28 @@
+Copyright 2014 Pallets
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/third_party/python/click/click-7.1.2.dist-info/METADATA b/third_party/python/click/click-7.1.2.dist-info/METADATA
new file mode 100644
index 0000000000..00d697493a
--- /dev/null
+++ b/third_party/python/click/click-7.1.2.dist-info/METADATA
@@ -0,0 +1,102 @@
+Metadata-Version: 2.1
+Name: click
+Version: 7.1.2
+Summary: Composable command line interface toolkit
+Home-page: https://palletsprojects.com/p/click/
+Maintainer: Pallets
+Maintainer-email: contact@palletsprojects.com
+License: BSD-3-Clause
+Project-URL: Documentation, https://click.palletsprojects.com/
+Project-URL: Code, https://github.com/pallets/click
+Project-URL: Issue tracker, https://github.com/pallets/click/issues
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
+
+\$ click\_
+==========
+
+Click is a Python package for creating beautiful command line interfaces
+in a composable way with as little code as necessary. It's the "Command
+Line Interface Creation Kit". It's highly configurable but comes with
+sensible defaults out of the box.
+
+It aims to make the process of writing command line tools quick and fun
+while also preventing any frustration caused by the inability to
+implement an intended CLI API.
+
+Click in three points:
+
+- Arbitrary nesting of commands
+- Automatic help page generation
+- Supports lazy loading of subcommands at runtime
+
+
+Installing
+----------
+
+Install and update using `pip`_:
+
+.. code-block:: text
+
+ $ pip install -U click
+
+.. _pip: https://pip.pypa.io/en/stable/quickstart/
+
+
+A Simple Example
+----------------
+
+.. code-block:: python
+
+ import click
+
+ @click.command()
+ @click.option("--count", default=1, help="Number of greetings.")
+ @click.option("--name", prompt="Your name", help="The person to greet.")
+ def hello(count, name):
+ """Simple program that greets NAME for a total of COUNT times."""
+ for _ in range(count):
+ click.echo(f"Hello, {name}!")
+
+ if __name__ == '__main__':
+ hello()
+
+.. code-block:: text
+
+ $ python hello.py --count=3
+ Your name: Click
+ Hello, Click!
+ Hello, Click!
+ Hello, Click!
+
+
+Donate
+------
+
+The Pallets organization develops and supports Click and other popular
+packages. In order to grow the community of contributors and users, and
+allow the maintainers to devote more time to the projects, `please
+donate today`_.
+
+.. _please donate today: https://palletsprojects.com/donate
+
+
+Links
+-----
+
+- Website: https://palletsprojects.com/p/click/
+- Documentation: https://click.palletsprojects.com/
+- Releases: https://pypi.org/project/click/
+- Code: https://github.com/pallets/click
+- Issue tracker: https://github.com/pallets/click/issues
+- Test status: https://dev.azure.com/pallets/click/_build
+- Official chat: https://discord.gg/t6rrQZH
+
+
diff --git a/third_party/python/click/click-7.1.2.dist-info/RECORD b/third_party/python/click/click-7.1.2.dist-info/RECORD
new file mode 100644
index 0000000000..847406774c
--- /dev/null
+++ b/third_party/python/click/click-7.1.2.dist-info/RECORD
@@ -0,0 +1,22 @@
+click/__init__.py,sha256=FkyGDQ-cbiQxP_lxgUspyFYS48f2S_pTcfKPz-d_RMo,2463
+click/_bashcomplete.py,sha256=9J98IHQYmCAr2Jup6TDshUr5FJEen-AoQCZR0K5nKxQ,12309
+click/_compat.py,sha256=AoMaYnZ-3pwtNXuHtlb6_UXsayoG0QZiHKIRy2VFezc,24169
+click/_termui_impl.py,sha256=yNktUMAdjYOU1HMkq915jR3zgAzUNtGSQqSTSSMn3eQ,20702
+click/_textwrap.py,sha256=ajCzkzFly5tjm9foQ5N9_MOeaYJMBjAltuFa69n4iXY,1197
+click/_unicodefun.py,sha256=apLSNEBZgUsQNPMUv072zJ1swqnm0dYVT5TqcIWTt6w,4201
+click/_winconsole.py,sha256=6YDu6Rq1Wxx4w9uinBMK2LHvP83aerZM9GQurlk3QDo,10010
+click/core.py,sha256=V6DJzastGhrC6WTDwV9MSLwcJUdX2Uf1ypmgkjBdn_Y,77650
+click/decorators.py,sha256=3TvEO_BkaHl7k6Eh1G5eC7JK4LKPdpFqH9JP0QDyTlM,11215
+click/exceptions.py,sha256=3pQAyyMFzx5A3eV0Y27WtDTyGogZRbrC6_o5DjjKBbw,8118
+click/formatting.py,sha256=Wb4gqFEpWaKPgAbOvnkCl8p-bEZx5KpM5ZSByhlnJNk,9281
+click/globals.py,sha256=ht7u2kUGI08pAarB4e4yC8Lkkxy6gJfRZyzxEj8EbWQ,1501
+click/parser.py,sha256=mFK-k58JtPpqO0AC36WAr0t5UfzEw1mvgVSyn7WCe9M,15691
+click/termui.py,sha256=G7QBEKIepRIGLvNdGwBTYiEtSImRxvTO_AglVpyHH2s,23998
+click/testing.py,sha256=EUEsDUqNXFgCLhZ0ZFOROpaVDA5I_rijwnNPE6qICgA,12854
+click/types.py,sha256=wuubik4VqgqAw5dvbYFkDt-zSAx97y9TQXuXcVaRyQA,25045
+click/utils.py,sha256=4VEcJ7iEHwjnFuzEuRtkT99o5VG3zqSD7Q2CVzv13WU,15940
+click-7.1.2.dist-info/LICENSE.rst,sha256=morRBqOU6FO_4h9C9OctWSgZoigF2ZG18ydQKSkrZY0,1475
+click-7.1.2.dist-info/METADATA,sha256=LrRgakZKV7Yg3qJqX_plu2WhFW81MzP3EqQmZhHIO8M,2868
+click-7.1.2.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
+click-7.1.2.dist-info/top_level.txt,sha256=J1ZQogalYS4pphY_lPECoNMfw0HzTSrZglC4Yfwo4xA,6
+click-7.1.2.dist-info/RECORD,,
diff --git a/third_party/python/click/click-7.1.2.dist-info/WHEEL b/third_party/python/click/click-7.1.2.dist-info/WHEEL
new file mode 100644
index 0000000000..ef99c6cf32
--- /dev/null
+++ b/third_party/python/click/click-7.1.2.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.34.2)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/click/click-7.1.2.dist-info/top_level.txt b/third_party/python/click/click-7.1.2.dist-info/top_level.txt
new file mode 100644
index 0000000000..dca9a90964
--- /dev/null
+++ b/third_party/python/click/click-7.1.2.dist-info/top_level.txt
@@ -0,0 +1 @@
+click
diff --git a/third_party/python/click/click/__init__.py b/third_party/python/click/click/__init__.py
new file mode 100644
index 0000000000..2b6008f2dd
--- /dev/null
+++ b/third_party/python/click/click/__init__.py
@@ -0,0 +1,79 @@
+"""
+Click is a simple Python module inspired by the stdlib optparse to make
+writing command line scripts fun. Unlike other modules, it's based
+around a simple API that does not come with too much magic and is
+composable.
+"""
+from .core import Argument
+from .core import BaseCommand
+from .core import Command
+from .core import CommandCollection
+from .core import Context
+from .core import Group
+from .core import MultiCommand
+from .core import Option
+from .core import Parameter
+from .decorators import argument
+from .decorators import command
+from .decorators import confirmation_option
+from .decorators import group
+from .decorators import help_option
+from .decorators import make_pass_decorator
+from .decorators import option
+from .decorators import pass_context
+from .decorators import pass_obj
+from .decorators import password_option
+from .decorators import version_option
+from .exceptions import Abort
+from .exceptions import BadArgumentUsage
+from .exceptions import BadOptionUsage
+from .exceptions import BadParameter
+from .exceptions import ClickException
+from .exceptions import FileError
+from .exceptions import MissingParameter
+from .exceptions import NoSuchOption
+from .exceptions import UsageError
+from .formatting import HelpFormatter
+from .formatting import wrap_text
+from .globals import get_current_context
+from .parser import OptionParser
+from .termui import clear
+from .termui import confirm
+from .termui import echo_via_pager
+from .termui import edit
+from .termui import get_terminal_size
+from .termui import getchar
+from .termui import launch
+from .termui import pause
+from .termui import progressbar
+from .termui import prompt
+from .termui import secho
+from .termui import style
+from .termui import unstyle
+from .types import BOOL
+from .types import Choice
+from .types import DateTime
+from .types import File
+from .types import FLOAT
+from .types import FloatRange
+from .types import INT
+from .types import IntRange
+from .types import ParamType
+from .types import Path
+from .types import STRING
+from .types import Tuple
+from .types import UNPROCESSED
+from .types import UUID
+from .utils import echo
+from .utils import format_filename
+from .utils import get_app_dir
+from .utils import get_binary_stream
+from .utils import get_os_args
+from .utils import get_text_stream
+from .utils import open_file
+
+# Controls if click should emit the warning about the use of unicode
+# literals.
+disable_unicode_literals_warning = False
+
+__version__ = "7.1.2"
diff --git a/third_party/python/click/click/_bashcomplete.py b/third_party/python/click/click/_bashcomplete.py
new file mode 100644
index 0000000000..8bca24480f
--- /dev/null
+++ b/third_party/python/click/click/_bashcomplete.py
@@ -0,0 +1,375 @@
+import copy
+import os
+import re
+
+from .core import Argument
+from .core import MultiCommand
+from .core import Option
+from .parser import split_arg_string
+from .types import Choice
+from .utils import echo
+
+try:
+ from collections import abc
+except ImportError:
+ import collections as abc
+
+WORDBREAK = "="
+
+# Note, only BASH version 4.4 and later have the nosort option.
+COMPLETION_SCRIPT_BASH = """
+%(complete_func)s() {
+ local IFS=$'\n'
+ COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\
+ COMP_CWORD=$COMP_CWORD \\
+ %(autocomplete_var)s=complete $1 ) )
+ return 0
+}
+
+%(complete_func)setup() {
+ local COMPLETION_OPTIONS=""
+ local BASH_VERSION_ARR=(${BASH_VERSION//./ })
+ # Only BASH version 4.4 and later have the nosort option.
+ if [ ${BASH_VERSION_ARR[0]} -gt 4 ] || ([ ${BASH_VERSION_ARR[0]} -eq 4 ] \
+&& [ ${BASH_VERSION_ARR[1]} -ge 4 ]); then
+ COMPLETION_OPTIONS="-o nosort"
+ fi
+
+ complete $COMPLETION_OPTIONS -F %(complete_func)s %(script_names)s
+}
+
+%(complete_func)setup
+"""
+
+COMPLETION_SCRIPT_ZSH = """
+#compdef %(script_names)s
+
+%(complete_func)s() {
+ local -a completions
+ local -a completions_with_descriptions
+ local -a response
+ (( ! $+commands[%(script_names)s] )) && return 1
+
+ response=("${(@f)$( env COMP_WORDS=\"${words[*]}\" \\
+ COMP_CWORD=$((CURRENT-1)) \\
+ %(autocomplete_var)s=\"complete_zsh\" \\
+ %(script_names)s )}")
+
+ for key descr in ${(kv)response}; do
+ if [[ "$descr" == "_" ]]; then
+ completions+=("$key")
+ else
+ completions_with_descriptions+=("$key":"$descr")
+ fi
+ done
+
+ if [ -n "$completions_with_descriptions" ]; then
+ _describe -V unsorted completions_with_descriptions -U
+ fi
+
+ if [ -n "$completions" ]; then
+ compadd -U -V unsorted -a completions
+ fi
+ compstate[insert]="automenu"
+}
+
+compdef %(complete_func)s %(script_names)s
+"""
+
+COMPLETION_SCRIPT_FISH = (
+ "complete --no-files --command %(script_names)s --arguments"
+ ' "(env %(autocomplete_var)s=complete_fish'
+ " COMP_WORDS=(commandline -cp) COMP_CWORD=(commandline -t)"
+ ' %(script_names)s)"'
+)
+
+_completion_scripts = {
+ "bash": COMPLETION_SCRIPT_BASH,
+ "zsh": COMPLETION_SCRIPT_ZSH,
+ "fish": COMPLETION_SCRIPT_FISH,
+}
+
+_invalid_ident_char_re = re.compile(r"[^a-zA-Z0-9_]")
+
+
+def get_completion_script(prog_name, complete_var, shell):
+ cf_name = _invalid_ident_char_re.sub("", prog_name.replace("-", "_"))
+ script = _completion_scripts.get(shell, COMPLETION_SCRIPT_BASH)
+ return (
+ script
+ % {
+ "complete_func": "_{}_completion".format(cf_name),
+ "script_names": prog_name,
+ "autocomplete_var": complete_var,
+ }
+ ).strip() + ";"
+
+
+def resolve_ctx(cli, prog_name, args):
+ """Parse into a hierarchy of contexts. Contexts are connected
+ through the parent variable.
+
+ :param cli: command definition
+ :param prog_name: the program that is running
+ :param args: full list of args
+ :return: the final context/command parsed
+ """
+ ctx = cli.make_context(prog_name, args, resilient_parsing=True)
+ args = ctx.protected_args + ctx.args
+ while args:
+ if isinstance(ctx.command, MultiCommand):
+ if not ctx.command.chain:
+ cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)
+ if cmd is None:
+ return ctx
+ ctx = cmd.make_context(
+ cmd_name, args, parent=ctx, resilient_parsing=True
+ )
+ args = ctx.protected_args + ctx.args
+ else:
+ # Walk chained subcommand contexts saving the last one.
+ while args:
+ cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)
+ if cmd is None:
+ return ctx
+ sub_ctx = cmd.make_context(
+ cmd_name,
+ args,
+ parent=ctx,
+ allow_extra_args=True,
+ allow_interspersed_args=False,
+ resilient_parsing=True,
+ )
+ args = sub_ctx.args
+ ctx = sub_ctx
+ args = sub_ctx.protected_args + sub_ctx.args
+ else:
+ break
+ return ctx
+
+
+def start_of_option(param_str):
+ """
+ :param param_str: param_str to check
+ :return: whether or not this is the start of an option declaration
+ (i.e. starts "-" or "--")
+ """
+ return param_str and param_str[:1] == "-"
+
+
+def is_incomplete_option(all_args, cmd_param):
+ """
+ :param all_args: the full original list of args supplied
+ :param cmd_param: the current command paramter
+ :return: whether or not the last option declaration (i.e. starts
+ "-" or "--") is incomplete and corresponds to this cmd_param. In
+ other words whether this cmd_param option can still accept
+ values
+ """
+ if not isinstance(cmd_param, Option):
+ return False
+ if cmd_param.is_flag:
+ return False
+ last_option = None
+ for index, arg_str in enumerate(
+ reversed([arg for arg in all_args if arg != WORDBREAK])
+ ):
+ if index + 1 > cmd_param.nargs:
+ break
+ if start_of_option(arg_str):
+ last_option = arg_str
+
+ return True if last_option and last_option in cmd_param.opts else False
+
+
+def is_incomplete_argument(current_params, cmd_param):
+ """
+ :param current_params: the current params and values for this
+ argument as already entered
+ :param cmd_param: the current command parameter
+ :return: whether or not the last argument is incomplete and
+ corresponds to this cmd_param. In other words whether or not the
+ this cmd_param argument can still accept values
+ """
+ if not isinstance(cmd_param, Argument):
+ return False
+ current_param_values = current_params[cmd_param.name]
+ if current_param_values is None:
+ return True
+ if cmd_param.nargs == -1:
+ return True
+ if (
+ isinstance(current_param_values, abc.Iterable)
+ and cmd_param.nargs > 1
+ and len(current_param_values) < cmd_param.nargs
+ ):
+ return True
+ return False
+
+
+def get_user_autocompletions(ctx, args, incomplete, cmd_param):
+ """
+ :param ctx: context associated with the parsed command
+ :param args: full list of args
+ :param incomplete: the incomplete text to autocomplete
+ :param cmd_param: command definition
+ :return: all the possible user-specified completions for the param
+ """
+ results = []
+ if isinstance(cmd_param.type, Choice):
+ # Choices don't support descriptions.
+ results = [
+ (c, None) for c in cmd_param.type.choices if str(c).startswith(incomplete)
+ ]
+ elif cmd_param.autocompletion is not None:
+ dynamic_completions = cmd_param.autocompletion(
+ ctx=ctx, args=args, incomplete=incomplete
+ )
+ results = [
+ c if isinstance(c, tuple) else (c, None) for c in dynamic_completions
+ ]
+ return results
+
+
+def get_visible_commands_starting_with(ctx, starts_with):
+ """
+ :param ctx: context associated with the parsed command
+ :starts_with: string that visible commands must start with.
+ :return: all visible (not hidden) commands that start with starts_with.
+ """
+ for c in ctx.command.list_commands(ctx):
+ if c.startswith(starts_with):
+ command = ctx.command.get_command(ctx, c)
+ if not command.hidden:
+ yield command
+
+
+def add_subcommand_completions(ctx, incomplete, completions_out):
+ # Add subcommand completions.
+ if isinstance(ctx.command, MultiCommand):
+ completions_out.extend(
+ [
+ (c.name, c.get_short_help_str())
+ for c in get_visible_commands_starting_with(ctx, incomplete)
+ ]
+ )
+
+ # Walk up the context list and add any other completion
+ # possibilities from chained commands
+ while ctx.parent is not None:
+ ctx = ctx.parent
+ if isinstance(ctx.command, MultiCommand) and ctx.command.chain:
+ remaining_commands = [
+ c
+ for c in get_visible_commands_starting_with(ctx, incomplete)
+ if c.name not in ctx.protected_args
+ ]
+ completions_out.extend(
+ [(c.name, c.get_short_help_str()) for c in remaining_commands]
+ )
+
+
+def get_choices(cli, prog_name, args, incomplete):
+ """
+ :param cli: command definition
+ :param prog_name: the program that is running
+ :param args: full list of args
+ :param incomplete: the incomplete text to autocomplete
+ :return: all the possible completions for the incomplete
+ """
+ all_args = copy.deepcopy(args)
+
+ ctx = resolve_ctx(cli, prog_name, args)
+ if ctx is None:
+ return []
+
+ has_double_dash = "--" in all_args
+
+ # In newer versions of bash long opts with '='s are partitioned, but
+ # it's easier to parse without the '='
+ if start_of_option(incomplete) and WORDBREAK in incomplete:
+ partition_incomplete = incomplete.partition(WORDBREAK)
+ all_args.append(partition_incomplete[0])
+ incomplete = partition_incomplete[2]
+ elif incomplete == WORDBREAK:
+ incomplete = ""
+
+ completions = []
+ if not has_double_dash and start_of_option(incomplete):
+ # completions for partial options
+ for param in ctx.command.params:
+ if isinstance(param, Option) and not param.hidden:
+ param_opts = [
+ param_opt
+ for param_opt in param.opts + param.secondary_opts
+ if param_opt not in all_args or param.multiple
+ ]
+ completions.extend(
+ [(o, param.help) for o in param_opts if o.startswith(incomplete)]
+ )
+ return completions
+ # completion for option values from user supplied values
+ for param in ctx.command.params:
+ if is_incomplete_option(all_args, param):
+ return get_user_autocompletions(ctx, all_args, incomplete, param)
+ # completion for argument values from user supplied values
+ for param in ctx.command.params:
+ if is_incomplete_argument(ctx.params, param):
+ return get_user_autocompletions(ctx, all_args, incomplete, param)
+
+ add_subcommand_completions(ctx, incomplete, completions)
+ # Sort before returning so that proper ordering can be enforced in custom types.
+ return sorted(completions)
+
+
+def do_complete(cli, prog_name, include_descriptions):
+ cwords = split_arg_string(os.environ["COMP_WORDS"])
+ cword = int(os.environ["COMP_CWORD"])
+ args = cwords[1:cword]
+ try:
+ incomplete = cwords[cword]
+ except IndexError:
+ incomplete = ""
+
+ for item in get_choices(cli, prog_name, args, incomplete):
+ echo(item[0])
+ if include_descriptions:
+ # ZSH has trouble dealing with empty array parameters when
+ # returned from commands, use '_' to indicate no description
+ # is present.
+ echo(item[1] if item[1] else "_")
+
+ return True
+
+
+def do_complete_fish(cli, prog_name):
+ cwords = split_arg_string(os.environ["COMP_WORDS"])
+ incomplete = os.environ["COMP_CWORD"]
+ args = cwords[1:]
+
+ for item in get_choices(cli, prog_name, args, incomplete):
+ if item[1]:
+ echo("{arg}\t{desc}".format(arg=item[0], desc=item[1]))
+ else:
+ echo(item[0])
+
+ return True
+
+
+def bashcomplete(cli, prog_name, complete_var, complete_instr):
+ if "_" in complete_instr:
+ command, shell = complete_instr.split("_", 1)
+ else:
+ command = complete_instr
+ shell = "bash"
+
+ if command == "source":
+ echo(get_completion_script(prog_name, complete_var, shell))
+ return True
+ elif command == "complete":
+ if shell == "fish":
+ return do_complete_fish(cli, prog_name)
+ elif shell in {"bash", "zsh"}:
+ return do_complete(cli, prog_name, shell == "zsh")
+
+ return False
diff --git a/third_party/python/click/click/_compat.py b/third_party/python/click/click/_compat.py
new file mode 100644
index 0000000000..60cb115bc5
--- /dev/null
+++ b/third_party/python/click/click/_compat.py
@@ -0,0 +1,786 @@
+# flake8: noqa
+import codecs
+import io
+import os
+import re
+import sys
+from weakref import WeakKeyDictionary
+
+PY2 = sys.version_info[0] == 2
+CYGWIN = sys.platform.startswith("cygwin")
+MSYS2 = sys.platform.startswith("win") and ("GCC" in sys.version)
+# Determine local App Engine environment, per Google's own suggestion
+APP_ENGINE = "APPENGINE_RUNTIME" in os.environ and "Development/" in os.environ.get(
+ "SERVER_SOFTWARE", ""
+)
+WIN = sys.platform.startswith("win") and not APP_ENGINE and not MSYS2
+DEFAULT_COLUMNS = 80
+
+
+_ansi_re = re.compile(r"\033\[[;?0-9]*[a-zA-Z]")
+
+
+def get_filesystem_encoding():
+ return sys.getfilesystemencoding() or sys.getdefaultencoding()
+
+
+def _make_text_stream(
+ stream, encoding, errors, force_readable=False, force_writable=False
+):
+ if encoding is None:
+ encoding = get_best_encoding(stream)
+ if errors is None:
+ errors = "replace"
+ return _NonClosingTextIOWrapper(
+ stream,
+ encoding,
+ errors,
+ line_buffering=True,
+ force_readable=force_readable,
+ force_writable=force_writable,
+ )
+
+
+def is_ascii_encoding(encoding):
+ """Checks if a given encoding is ascii."""
+ try:
+ return codecs.lookup(encoding).name == "ascii"
+ except LookupError:
+ return False
+
+
+def get_best_encoding(stream):
+ """Returns the default stream encoding if not found."""
+ rv = getattr(stream, "encoding", None) or sys.getdefaultencoding()
+ if is_ascii_encoding(rv):
+ return "utf-8"
+ return rv
+
+
+class _NonClosingTextIOWrapper(io.TextIOWrapper):
+ def __init__(
+ self,
+ stream,
+ encoding,
+ errors,
+ force_readable=False,
+ force_writable=False,
+ **extra
+ ):
+ self._stream = stream = _FixupStream(stream, force_readable, force_writable)
+ io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra)
+
+ # The io module is a place where the Python 3 text behavior
+ # was forced upon Python 2, so we need to unbreak
+ # it to look like Python 2.
+ if PY2:
+
+ def write(self, x):
+ if isinstance(x, str) or is_bytes(x):
+ try:
+ self.flush()
+ except Exception:
+ pass
+ return self.buffer.write(str(x))
+ return io.TextIOWrapper.write(self, x)
+
+ def writelines(self, lines):
+ for line in lines:
+ self.write(line)
+
+ def __del__(self):
+ try:
+ self.detach()
+ except Exception:
+ pass
+
+ def isatty(self):
+ # https://bitbucket.org/pypy/pypy/issue/1803
+ return self._stream.isatty()
+
+
+class _FixupStream(object):
+ """The new io interface needs more from streams than streams
+ traditionally implement. As such, this fix-up code is necessary in
+ some circumstances.
+
+ The forcing of readable and writable flags are there because some tools
+ put badly patched objects on sys (one such offender are certain version
+ of jupyter notebook).
+ """
+
+ def __init__(self, stream, force_readable=False, force_writable=False):
+ self._stream = stream
+ self._force_readable = force_readable
+ self._force_writable = force_writable
+
+ def __getattr__(self, name):
+ return getattr(self._stream, name)
+
+ def read1(self, size):
+ f = getattr(self._stream, "read1", None)
+ if f is not None:
+ return f(size)
+ # We only dispatch to readline instead of read in Python 2 as we
+ # do not want cause problems with the different implementation
+ # of line buffering.
+ if PY2:
+ return self._stream.readline(size)
+ return self._stream.read(size)
+
+ def readable(self):
+ if self._force_readable:
+ return True
+ x = getattr(self._stream, "readable", None)
+ if x is not None:
+ return x()
+ try:
+ self._stream.read(0)
+ except Exception:
+ return False
+ return True
+
+ def writable(self):
+ if self._force_writable:
+ return True
+ x = getattr(self._stream, "writable", None)
+ if x is not None:
+ return x()
+ try:
+ self._stream.write("")
+ except Exception:
+ try:
+ self._stream.write(b"")
+ except Exception:
+ return False
+ return True
+
+ def seekable(self):
+ x = getattr(self._stream, "seekable", None)
+ if x is not None:
+ return x()
+ try:
+ self._stream.seek(self._stream.tell())
+ except Exception:
+ return False
+ return True
+
+
+if PY2:
+ text_type = unicode
+ raw_input = raw_input
+ string_types = (str, unicode)
+ int_types = (int, long)
+ iteritems = lambda x: x.iteritems()
+ range_type = xrange
+
+ def is_bytes(x):
+ return isinstance(x, (buffer, bytearray))
+
+ _identifier_re = re.compile(r"^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+ # For Windows, we need to force stdout/stdin/stderr to binary if it's
+ # fetched for that. This obviously is not the most correct way to do
+ # it as it changes global state. Unfortunately, there does not seem to
+ # be a clear better way to do it as just reopening the file in binary
+ # mode does not change anything.
+ #
+ # An option would be to do what Python 3 does and to open the file as
+ # binary only, patch it back to the system, and then use a wrapper
+ # stream that converts newlines. It's not quite clear what's the
+ # correct option here.
+ #
+ # This code also lives in _winconsole for the fallback to the console
+ # emulation stream.
+ #
+ # There are also Windows environments where the `msvcrt` module is not
+ # available (which is why we use try-catch instead of the WIN variable
+ # here), such as the Google App Engine development server on Windows. In
+ # those cases there is just nothing we can do.
+ def set_binary_mode(f):
+ return f
+
+ try:
+ import msvcrt
+ except ImportError:
+ pass
+ else:
+
+ def set_binary_mode(f):
+ try:
+ fileno = f.fileno()
+ except Exception:
+ pass
+ else:
+ msvcrt.setmode(fileno, os.O_BINARY)
+ return f
+
+ try:
+ import fcntl
+ except ImportError:
+ pass
+ else:
+
+ def set_binary_mode(f):
+ try:
+ fileno = f.fileno()
+ except Exception:
+ pass
+ else:
+ flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
+ fcntl.fcntl(fileno, fcntl.F_SETFL, flags & ~os.O_NONBLOCK)
+ return f
+
+ def isidentifier(x):
+ return _identifier_re.search(x) is not None
+
+ def get_binary_stdin():
+ return set_binary_mode(sys.stdin)
+
+ def get_binary_stdout():
+ _wrap_std_stream("stdout")
+ return set_binary_mode(sys.stdout)
+
+ def get_binary_stderr():
+ _wrap_std_stream("stderr")
+ return set_binary_mode(sys.stderr)
+
+ def get_text_stdin(encoding=None, errors=None):
+ rv = _get_windows_console_stream(sys.stdin, encoding, errors)
+ if rv is not None:
+ return rv
+ return _make_text_stream(sys.stdin, encoding, errors, force_readable=True)
+
+ def get_text_stdout(encoding=None, errors=None):
+ _wrap_std_stream("stdout")
+ rv = _get_windows_console_stream(sys.stdout, encoding, errors)
+ if rv is not None:
+ return rv
+ return _make_text_stream(sys.stdout, encoding, errors, force_writable=True)
+
+ def get_text_stderr(encoding=None, errors=None):
+ _wrap_std_stream("stderr")
+ rv = _get_windows_console_stream(sys.stderr, encoding, errors)
+ if rv is not None:
+ return rv
+ return _make_text_stream(sys.stderr, encoding, errors, force_writable=True)
+
+ def filename_to_ui(value):
+ if isinstance(value, bytes):
+ value = value.decode(get_filesystem_encoding(), "replace")
+ return value
+
+
+else:
+ import io
+
+ text_type = str
+ raw_input = input
+ string_types = (str,)
+ int_types = (int,)
+ range_type = range
+ isidentifier = lambda x: x.isidentifier()
+ iteritems = lambda x: iter(x.items())
+
+ def is_bytes(x):
+ return isinstance(x, (bytes, memoryview, bytearray))
+
+ def _is_binary_reader(stream, default=False):
+ try:
+ return isinstance(stream.read(0), bytes)
+ except Exception:
+ return default
+ # This happens in some cases where the stream was already
+ # closed. In this case, we assume the default.
+
+ def _is_binary_writer(stream, default=False):
+ try:
+ stream.write(b"")
+ except Exception:
+ try:
+ stream.write("")
+ return False
+ except Exception:
+ pass
+ return default
+ return True
+
+ def _find_binary_reader(stream):
+ # We need to figure out if the given stream is already binary.
+ # This can happen because the official docs recommend detaching
+ # the streams to get binary streams. Some code might do this, so
+ # we need to deal with this case explicitly.
+ if _is_binary_reader(stream, False):
+ return stream
+
+ buf = getattr(stream, "buffer", None)
+
+ # Same situation here; this time we assume that the buffer is
+ # actually binary in case it's closed.
+ if buf is not None and _is_binary_reader(buf, True):
+ return buf
+
+ def _find_binary_writer(stream):
+ # We need to figure out if the given stream is already binary.
+ # This can happen because the official docs recommend detatching
+ # the streams to get binary streams. Some code might do this, so
+ # we need to deal with this case explicitly.
+ if _is_binary_writer(stream, False):
+ return stream
+
+ buf = getattr(stream, "buffer", None)
+
+ # Same situation here; this time we assume that the buffer is
+ # actually binary in case it's closed.
+ if buf is not None and _is_binary_writer(buf, True):
+ return buf
+
+ def _stream_is_misconfigured(stream):
+ """A stream is misconfigured if its encoding is ASCII."""
+ # If the stream does not have an encoding set, we assume it's set
+ # to ASCII. This appears to happen in certain unittest
+ # environments. It's not quite clear what the correct behavior is
+ # but this at least will force Click to recover somehow.
+ return is_ascii_encoding(getattr(stream, "encoding", None) or "ascii")
+
+ def _is_compat_stream_attr(stream, attr, value):
+ """A stream attribute is compatible if it is equal to the
+ desired value or the desired value is unset and the attribute
+ has a value.
+ """
+ stream_value = getattr(stream, attr, None)
+ return stream_value == value or (value is None and stream_value is not None)
+
+ def _is_compatible_text_stream(stream, encoding, errors):
+ """Check if a stream's encoding and errors attributes are
+ compatible with the desired values.
+ """
+ return _is_compat_stream_attr(
+ stream, "encoding", encoding
+ ) and _is_compat_stream_attr(stream, "errors", errors)
+
+ def _force_correct_text_stream(
+ text_stream,
+ encoding,
+ errors,
+ is_binary,
+ find_binary,
+ force_readable=False,
+ force_writable=False,
+ ):
+ if is_binary(text_stream, False):
+ binary_reader = text_stream
+ else:
+ # If the stream looks compatible, and won't default to a
+ # misconfigured ascii encoding, return it as-is.
+ if _is_compatible_text_stream(text_stream, encoding, errors) and not (
+ encoding is None and _stream_is_misconfigured(text_stream)
+ ):
+ return text_stream
+
+ # Otherwise, get the underlying binary reader.
+ binary_reader = find_binary(text_stream)
+
+ # If that's not possible, silently use the original reader
+ # and get mojibake instead of exceptions.
+ if binary_reader is None:
+ return text_stream
+
+ # Default errors to replace instead of strict in order to get
+ # something that works.
+ if errors is None:
+ errors = "replace"
+
+ # Wrap the binary stream in a text stream with the correct
+ # encoding parameters.
+ return _make_text_stream(
+ binary_reader,
+ encoding,
+ errors,
+ force_readable=force_readable,
+ force_writable=force_writable,
+ )
+
+ def _force_correct_text_reader(text_reader, encoding, errors, force_readable=False):
+ return _force_correct_text_stream(
+ text_reader,
+ encoding,
+ errors,
+ _is_binary_reader,
+ _find_binary_reader,
+ force_readable=force_readable,
+ )
+
+ def _force_correct_text_writer(text_writer, encoding, errors, force_writable=False):
+ return _force_correct_text_stream(
+ text_writer,
+ encoding,
+ errors,
+ _is_binary_writer,
+ _find_binary_writer,
+ force_writable=force_writable,
+ )
+
+ def get_binary_stdin():
+ reader = _find_binary_reader(sys.stdin)
+ if reader is None:
+ raise RuntimeError("Was not able to determine binary stream for sys.stdin.")
+ return reader
+
+ def get_binary_stdout():
+ writer = _find_binary_writer(sys.stdout)
+ if writer is None:
+ raise RuntimeError(
+ "Was not able to determine binary stream for sys.stdout."
+ )
+ return writer
+
+ def get_binary_stderr():
+ writer = _find_binary_writer(sys.stderr)
+ if writer is None:
+ raise RuntimeError(
+ "Was not able to determine binary stream for sys.stderr."
+ )
+ return writer
+
+ def get_text_stdin(encoding=None, errors=None):
+ rv = _get_windows_console_stream(sys.stdin, encoding, errors)
+ if rv is not None:
+ return rv
+ return _force_correct_text_reader(
+ sys.stdin, encoding, errors, force_readable=True
+ )
+
+ def get_text_stdout(encoding=None, errors=None):
+ rv = _get_windows_console_stream(sys.stdout, encoding, errors)
+ if rv is not None:
+ return rv
+ return _force_correct_text_writer(
+ sys.stdout, encoding, errors, force_writable=True
+ )
+
+ def get_text_stderr(encoding=None, errors=None):
+ rv = _get_windows_console_stream(sys.stderr, encoding, errors)
+ if rv is not None:
+ return rv
+ return _force_correct_text_writer(
+ sys.stderr, encoding, errors, force_writable=True
+ )
+
+ def filename_to_ui(value):
+ if isinstance(value, bytes):
+ value = value.decode(get_filesystem_encoding(), "replace")
+ else:
+ value = value.encode("utf-8", "surrogateescape").decode("utf-8", "replace")
+ return value
+
+
+def get_streerror(e, default=None):
+ if hasattr(e, "strerror"):
+ msg = e.strerror
+ else:
+ if default is not None:
+ msg = default
+ else:
+ msg = str(e)
+ if isinstance(msg, bytes):
+ msg = msg.decode("utf-8", "replace")
+ return msg
+
+
+def _wrap_io_open(file, mode, encoding, errors):
+ """On Python 2, :func:`io.open` returns a text file wrapper that
+ requires passing ``unicode`` to ``write``. Need to open the file in
+ binary mode then wrap it in a subclass that can write ``str`` and
+ ``unicode``.
+
+ Also handles not passing ``encoding`` and ``errors`` in binary mode.
+ """
+ binary = "b" in mode
+
+ if binary:
+ kwargs = {}
+ else:
+ kwargs = {"encoding": encoding, "errors": errors}
+
+ if not PY2 or binary:
+ return io.open(file, mode, **kwargs)
+
+ f = io.open(file, "{}b".format(mode.replace("t", "")))
+ return _make_text_stream(f, **kwargs)
+
+
+def open_stream(filename, mode="r", encoding=None, errors="strict", atomic=False):
+ binary = "b" in mode
+
+ # Standard streams first. These are simple because they don't need
+ # special handling for the atomic flag. It's entirely ignored.
+ if filename == "-":
+ if any(m in mode for m in ["w", "a", "x"]):
+ if binary:
+ return get_binary_stdout(), False
+ return get_text_stdout(encoding=encoding, errors=errors), False
+ if binary:
+ return get_binary_stdin(), False
+ return get_text_stdin(encoding=encoding, errors=errors), False
+
+ # Non-atomic writes directly go out through the regular open functions.
+ if not atomic:
+ return _wrap_io_open(filename, mode, encoding, errors), True
+
+ # Some usability stuff for atomic writes
+ if "a" in mode:
+ raise ValueError(
+ "Appending to an existing file is not supported, because that"
+ " would involve an expensive `copy`-operation to a temporary"
+ " file. Open the file in normal `w`-mode and copy explicitly"
+ " if that's what you're after."
+ )
+ if "x" in mode:
+ raise ValueError("Use the `overwrite`-parameter instead.")
+ if "w" not in mode:
+ raise ValueError("Atomic writes only make sense with `w`-mode.")
+
+ # Atomic writes are more complicated. They work by opening a file
+ # as a proxy in the same folder and then using the fdopen
+ # functionality to wrap it in a Python file. Then we wrap it in an
+ # atomic file that moves the file over on close.
+ import errno
+ import random
+
+ try:
+ perm = os.stat(filename).st_mode
+ except OSError:
+ perm = None
+
+ flags = os.O_RDWR | os.O_CREAT | os.O_EXCL
+
+ if binary:
+ flags |= getattr(os, "O_BINARY", 0)
+
+ while True:
+ tmp_filename = os.path.join(
+ os.path.dirname(filename),
+ ".__atomic-write{:08x}".format(random.randrange(1 << 32)),
+ )
+ try:
+ fd = os.open(tmp_filename, flags, 0o666 if perm is None else perm)
+ break
+ except OSError as e:
+ if e.errno == errno.EEXIST or (
+ os.name == "nt"
+ and e.errno == errno.EACCES
+ and os.path.isdir(e.filename)
+ and os.access(e.filename, os.W_OK)
+ ):
+ continue
+ raise
+
+ if perm is not None:
+ os.chmod(tmp_filename, perm) # in case perm includes bits in umask
+
+ f = _wrap_io_open(fd, mode, encoding, errors)
+ return _AtomicFile(f, tmp_filename, os.path.realpath(filename)), True
+
+
+# Used in a destructor call, needs extra protection from interpreter cleanup.
+if hasattr(os, "replace"):
+ _replace = os.replace
+ _can_replace = True
+else:
+ _replace = os.rename
+ _can_replace = not WIN
+
+
+class _AtomicFile(object):
+ def __init__(self, f, tmp_filename, real_filename):
+ self._f = f
+ self._tmp_filename = tmp_filename
+ self._real_filename = real_filename
+ self.closed = False
+
+ @property
+ def name(self):
+ return self._real_filename
+
+ def close(self, delete=False):
+ if self.closed:
+ return
+ self._f.close()
+ if not _can_replace:
+ try:
+ os.remove(self._real_filename)
+ except OSError:
+ pass
+ _replace(self._tmp_filename, self._real_filename)
+ self.closed = True
+
+ def __getattr__(self, name):
+ return getattr(self._f, name)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self.close(delete=exc_type is not None)
+
+ def __repr__(self):
+ return repr(self._f)
+
+
+auto_wrap_for_ansi = None
+colorama = None
+get_winterm_size = None
+
+
+def strip_ansi(value):
+ return _ansi_re.sub("", value)
+
+
+def _is_jupyter_kernel_output(stream):
+ if WIN:
+ # TODO: Couldn't test on Windows, should't try to support until
+ # someone tests the details wrt colorama.
+ return
+
+ while isinstance(stream, (_FixupStream, _NonClosingTextIOWrapper)):
+ stream = stream._stream
+
+ return stream.__class__.__module__.startswith("ipykernel.")
+
+
+def should_strip_ansi(stream=None, color=None):
+ if color is None:
+ if stream is None:
+ stream = sys.stdin
+ return not isatty(stream) and not _is_jupyter_kernel_output(stream)
+ return not color
+
+
+# If we're on Windows, we provide transparent integration through
+# colorama. This will make ANSI colors through the echo function
+# work automatically.
+if WIN:
+ # Windows has a smaller terminal
+ DEFAULT_COLUMNS = 79
+
+ from ._winconsole import _get_windows_console_stream, _wrap_std_stream
+
+ def _get_argv_encoding():
+ import locale
+
+ return locale.getpreferredencoding()
+
+ if PY2:
+
+ def raw_input(prompt=""):
+ sys.stderr.flush()
+ if prompt:
+ stdout = _default_text_stdout()
+ stdout.write(prompt)
+ stdin = _default_text_stdin()
+ return stdin.readline().rstrip("\r\n")
+
+ try:
+ import colorama
+ except ImportError:
+ pass
+ else:
+ _ansi_stream_wrappers = WeakKeyDictionary()
+
+ def auto_wrap_for_ansi(stream, color=None):
+ """This function wraps a stream so that calls through colorama
+ are issued to the win32 console API to recolor on demand. It
+ also ensures to reset the colors if a write call is interrupted
+ to not destroy the console afterwards.
+ """
+ try:
+ cached = _ansi_stream_wrappers.get(stream)
+ except Exception:
+ cached = None
+ if cached is not None:
+ return cached
+ strip = should_strip_ansi(stream, color)
+ ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip)
+ rv = ansi_wrapper.stream
+ _write = rv.write
+
+ def _safe_write(s):
+ try:
+ return _write(s)
+ except:
+ ansi_wrapper.reset_all()
+ raise
+
+ rv.write = _safe_write
+ try:
+ _ansi_stream_wrappers[stream] = rv
+ except Exception:
+ pass
+ return rv
+
+ def get_winterm_size():
+ win = colorama.win32.GetConsoleScreenBufferInfo(
+ colorama.win32.STDOUT
+ ).srWindow
+ return win.Right - win.Left, win.Bottom - win.Top
+
+
+else:
+
+ def _get_argv_encoding():
+ return getattr(sys.stdin, "encoding", None) or get_filesystem_encoding()
+
+ _get_windows_console_stream = lambda *x: None
+ _wrap_std_stream = lambda *x: None
+
+
+def term_len(x):
+ return len(strip_ansi(x))
+
+
+def isatty(stream):
+ try:
+ return stream.isatty()
+ except Exception:
+ return False
+
+
+def _make_cached_stream_func(src_func, wrapper_func):
+ cache = WeakKeyDictionary()
+
+ def func():
+ stream = src_func()
+ try:
+ rv = cache.get(stream)
+ except Exception:
+ rv = None
+ if rv is not None:
+ return rv
+ rv = wrapper_func()
+ try:
+ stream = src_func() # In case wrapper_func() modified the stream
+ cache[stream] = rv
+ except Exception:
+ pass
+ return rv
+
+ return func
+
+
+_default_text_stdin = _make_cached_stream_func(lambda: sys.stdin, get_text_stdin)
+_default_text_stdout = _make_cached_stream_func(lambda: sys.stdout, get_text_stdout)
+_default_text_stderr = _make_cached_stream_func(lambda: sys.stderr, get_text_stderr)
+
+
+binary_streams = {
+ "stdin": get_binary_stdin,
+ "stdout": get_binary_stdout,
+ "stderr": get_binary_stderr,
+}
+
+text_streams = {
+ "stdin": get_text_stdin,
+ "stdout": get_text_stdout,
+ "stderr": get_text_stderr,
+}
diff --git a/third_party/python/click/click/_termui_impl.py b/third_party/python/click/click/_termui_impl.py
new file mode 100644
index 0000000000..88bec37701
--- /dev/null
+++ b/third_party/python/click/click/_termui_impl.py
@@ -0,0 +1,657 @@
+# -*- coding: utf-8 -*-
+"""
+This module contains implementations for the termui module. To keep the
+import time of Click down, some infrequently used functionality is
+placed in this module and only imported as needed.
+"""
+import contextlib
+import math
+import os
+import sys
+import time
+
+from ._compat import _default_text_stdout
+from ._compat import CYGWIN
+from ._compat import get_best_encoding
+from ._compat import int_types
+from ._compat import isatty
+from ._compat import open_stream
+from ._compat import range_type
+from ._compat import strip_ansi
+from ._compat import term_len
+from ._compat import WIN
+from .exceptions import ClickException
+from .utils import echo
+
+if os.name == "nt":
+ BEFORE_BAR = "\r"
+ AFTER_BAR = "\n"
+else:
+ BEFORE_BAR = "\r\033[?25l"
+ AFTER_BAR = "\033[?25h\n"
+
+
+def _length_hint(obj):
+ """Returns the length hint of an object."""
+ try:
+ return len(obj)
+ except (AttributeError, TypeError):
+ try:
+ get_hint = type(obj).__length_hint__
+ except AttributeError:
+ return None
+ try:
+ hint = get_hint(obj)
+ except TypeError:
+ return None
+ if hint is NotImplemented or not isinstance(hint, int_types) or hint < 0:
+ return None
+ return hint
+
+
+class ProgressBar(object):
+ def __init__(
+ self,
+ iterable,
+ length=None,
+ fill_char="#",
+ empty_char=" ",
+ bar_template="%(bar)s",
+ info_sep=" ",
+ show_eta=True,
+ show_percent=None,
+ show_pos=False,
+ item_show_func=None,
+ label=None,
+ file=None,
+ color=None,
+ width=30,
+ ):
+ self.fill_char = fill_char
+ self.empty_char = empty_char
+ self.bar_template = bar_template
+ self.info_sep = info_sep
+ self.show_eta = show_eta
+ self.show_percent = show_percent
+ self.show_pos = show_pos
+ self.item_show_func = item_show_func
+ self.label = label or ""
+ if file is None:
+ file = _default_text_stdout()
+ self.file = file
+ self.color = color
+ self.width = width
+ self.autowidth = width == 0
+
+ if length is None:
+ length = _length_hint(iterable)
+ if iterable is None:
+ if length is None:
+ raise TypeError("iterable or length is required")
+ iterable = range_type(length)
+ self.iter = iter(iterable)
+ self.length = length
+ self.length_known = length is not None
+ self.pos = 0
+ self.avg = []
+ self.start = self.last_eta = time.time()
+ self.eta_known = False
+ self.finished = False
+ self.max_width = None
+ self.entered = False
+ self.current_item = None
+ self.is_hidden = not isatty(self.file)
+ self._last_line = None
+ self.short_limit = 0.5
+
+ def __enter__(self):
+ self.entered = True
+ self.render_progress()
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self.render_finish()
+
+ def __iter__(self):
+ if not self.entered:
+ raise RuntimeError("You need to use progress bars in a with block.")
+ self.render_progress()
+ return self.generator()
+
+ def __next__(self):
+ # Iteration is defined in terms of a generator function,
+ # returned by iter(self); use that to define next(). This works
+ # because `self.iter` is an iterable consumed by that generator,
+ # so it is re-entry safe. Calling `next(self.generator())`
+ # twice works and does "what you want".
+ return next(iter(self))
+
+ # Python 2 compat
+ next = __next__
+
+ def is_fast(self):
+ return time.time() - self.start <= self.short_limit
+
+ def render_finish(self):
+ if self.is_hidden or self.is_fast():
+ return
+ self.file.write(AFTER_BAR)
+ self.file.flush()
+
+ @property
+ def pct(self):
+ if self.finished:
+ return 1.0
+ return min(self.pos / (float(self.length) or 1), 1.0)
+
+ @property
+ def time_per_iteration(self):
+ if not self.avg:
+ return 0.0
+ return sum(self.avg) / float(len(self.avg))
+
+ @property
+ def eta(self):
+ if self.length_known and not self.finished:
+ return self.time_per_iteration * (self.length - self.pos)
+ return 0.0
+
+ def format_eta(self):
+ if self.eta_known:
+ t = int(self.eta)
+ seconds = t % 60
+ t //= 60
+ minutes = t % 60
+ t //= 60
+ hours = t % 24
+ t //= 24
+ if t > 0:
+ return "{}d {:02}:{:02}:{:02}".format(t, hours, minutes, seconds)
+ else:
+ return "{:02}:{:02}:{:02}".format(hours, minutes, seconds)
+ return ""
+
+ def format_pos(self):
+ pos = str(self.pos)
+ if self.length_known:
+ pos += "/{}".format(self.length)
+ return pos
+
+ def format_pct(self):
+ return "{: 4}%".format(int(self.pct * 100))[1:]
+
+ def format_bar(self):
+ if self.length_known:
+ bar_length = int(self.pct * self.width)
+ bar = self.fill_char * bar_length
+ bar += self.empty_char * (self.width - bar_length)
+ elif self.finished:
+ bar = self.fill_char * self.width
+ else:
+ bar = list(self.empty_char * (self.width or 1))
+ if self.time_per_iteration != 0:
+ bar[
+ int(
+ (math.cos(self.pos * self.time_per_iteration) / 2.0 + 0.5)
+ * self.width
+ )
+ ] = self.fill_char
+ bar = "".join(bar)
+ return bar
+
+ def format_progress_line(self):
+ show_percent = self.show_percent
+
+ info_bits = []
+ if self.length_known and show_percent is None:
+ show_percent = not self.show_pos
+
+ if self.show_pos:
+ info_bits.append(self.format_pos())
+ if show_percent:
+ info_bits.append(self.format_pct())
+ if self.show_eta and self.eta_known and not self.finished:
+ info_bits.append(self.format_eta())
+ if self.item_show_func is not None:
+ item_info = self.item_show_func(self.current_item)
+ if item_info is not None:
+ info_bits.append(item_info)
+
+ return (
+ self.bar_template
+ % {
+ "label": self.label,
+ "bar": self.format_bar(),
+ "info": self.info_sep.join(info_bits),
+ }
+ ).rstrip()
+
+ def render_progress(self):
+ from .termui import get_terminal_size
+
+ if self.is_hidden:
+ return
+
+ buf = []
+ # Update width in case the terminal has been resized
+ if self.autowidth:
+ old_width = self.width
+ self.width = 0
+ clutter_length = term_len(self.format_progress_line())
+ new_width = max(0, get_terminal_size()[0] - clutter_length)
+ if new_width < old_width:
+ buf.append(BEFORE_BAR)
+ buf.append(" " * self.max_width)
+ self.max_width = new_width
+ self.width = new_width
+
+ clear_width = self.width
+ if self.max_width is not None:
+ clear_width = self.max_width
+
+ buf.append(BEFORE_BAR)
+ line = self.format_progress_line()
+ line_len = term_len(line)
+ if self.max_width is None or self.max_width < line_len:
+ self.max_width = line_len
+
+ buf.append(line)
+ buf.append(" " * (clear_width - line_len))
+ line = "".join(buf)
+ # Render the line only if it changed.
+
+ if line != self._last_line and not self.is_fast():
+ self._last_line = line
+ echo(line, file=self.file, color=self.color, nl=False)
+ self.file.flush()
+
+ def make_step(self, n_steps):
+ self.pos += n_steps
+ if self.length_known and self.pos >= self.length:
+ self.finished = True
+
+ if (time.time() - self.last_eta) < 1.0:
+ return
+
+ self.last_eta = time.time()
+
+ # self.avg is a rolling list of length <= 7 of steps where steps are
+ # defined as time elapsed divided by the total progress through
+ # self.length.
+ if self.pos:
+ step = (time.time() - self.start) / self.pos
+ else:
+ step = time.time() - self.start
+
+ self.avg = self.avg[-6:] + [step]
+
+ self.eta_known = self.length_known
+
+ def update(self, n_steps):
+ self.make_step(n_steps)
+ self.render_progress()
+
+ def finish(self):
+ self.eta_known = 0
+ self.current_item = None
+ self.finished = True
+
+ def generator(self):
+ """Return a generator which yields the items added to the bar
+ during construction, and updates the progress bar *after* the
+ yielded block returns.
+ """
+ # WARNING: the iterator interface for `ProgressBar` relies on
+ # this and only works because this is a simple generator which
+ # doesn't create or manage additional state. If this function
+ # changes, the impact should be evaluated both against
+ # `iter(bar)` and `next(bar)`. `next()` in particular may call
+ # `self.generator()` repeatedly, and this must remain safe in
+ # order for that interface to work.
+ if not self.entered:
+ raise RuntimeError("You need to use progress bars in a with block.")
+
+ if self.is_hidden:
+ for rv in self.iter:
+ yield rv
+ else:
+ for rv in self.iter:
+ self.current_item = rv
+ yield rv
+ self.update(1)
+ self.finish()
+ self.render_progress()
+
+
+def pager(generator, color=None):
+ """Decide what method to use for paging through text."""
+ stdout = _default_text_stdout()
+ if not isatty(sys.stdin) or not isatty(stdout):
+ return _nullpager(stdout, generator, color)
+ pager_cmd = (os.environ.get("PAGER", None) or "").strip()
+ if pager_cmd:
+ if WIN:
+ return _tempfilepager(generator, pager_cmd, color)
+ return _pipepager(generator, pager_cmd, color)
+ if os.environ.get("TERM") in ("dumb", "emacs"):
+ return _nullpager(stdout, generator, color)
+ if WIN or sys.platform.startswith("os2"):
+ return _tempfilepager(generator, "more <", color)
+ if hasattr(os, "system") and os.system("(less) 2>/dev/null") == 0:
+ return _pipepager(generator, "less", color)
+
+ import tempfile
+
+ fd, filename = tempfile.mkstemp()
+ os.close(fd)
+ try:
+ if hasattr(os, "system") and os.system('more "{}"'.format(filename)) == 0:
+ return _pipepager(generator, "more", color)
+ return _nullpager(stdout, generator, color)
+ finally:
+ os.unlink(filename)
+
+
+def _pipepager(generator, cmd, color):
+ """Page through text by feeding it to another program. Invoking a
+ pager through this might support colors.
+ """
+ import subprocess
+
+ env = dict(os.environ)
+
+ # If we're piping to less we might support colors under the
+ # condition that
+ cmd_detail = cmd.rsplit("/", 1)[-1].split()
+ if color is None and cmd_detail[0] == "less":
+ less_flags = "{}{}".format(os.environ.get("LESS", ""), " ".join(cmd_detail[1:]))
+ if not less_flags:
+ env["LESS"] = "-R"
+ color = True
+ elif "r" in less_flags or "R" in less_flags:
+ color = True
+
+ c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, env=env)
+ encoding = get_best_encoding(c.stdin)
+ try:
+ for text in generator:
+ if not color:
+ text = strip_ansi(text)
+
+ c.stdin.write(text.encode(encoding, "replace"))
+ except (IOError, KeyboardInterrupt):
+ pass
+ else:
+ c.stdin.close()
+
+ # Less doesn't respect ^C, but catches it for its own UI purposes (aborting
+ # search or other commands inside less).
+ #
+ # That means when the user hits ^C, the parent process (click) terminates,
+ # but less is still alive, paging the output and messing up the terminal.
+ #
+ # If the user wants to make the pager exit on ^C, they should set
+ # `LESS='-K'`. It's not our decision to make.
+ while True:
+ try:
+ c.wait()
+ except KeyboardInterrupt:
+ pass
+ else:
+ break
+
+
+def _tempfilepager(generator, cmd, color):
+ """Page through text by invoking a program on a temporary file."""
+ import tempfile
+
+ filename = tempfile.mktemp()
+ # TODO: This never terminates if the passed generator never terminates.
+ text = "".join(generator)
+ if not color:
+ text = strip_ansi(text)
+ encoding = get_best_encoding(sys.stdout)
+ with open_stream(filename, "wb")[0] as f:
+ f.write(text.encode(encoding))
+ try:
+ os.system('{} "{}"'.format(cmd, filename))
+ finally:
+ os.unlink(filename)
+
+
+def _nullpager(stream, generator, color):
+ """Simply print unformatted text. This is the ultimate fallback."""
+ for text in generator:
+ if not color:
+ text = strip_ansi(text)
+ stream.write(text)
+
+
+class Editor(object):
+ def __init__(self, editor=None, env=None, require_save=True, extension=".txt"):
+ self.editor = editor
+ self.env = env
+ self.require_save = require_save
+ self.extension = extension
+
+ def get_editor(self):
+ if self.editor is not None:
+ return self.editor
+ for key in "VISUAL", "EDITOR":
+ rv = os.environ.get(key)
+ if rv:
+ return rv
+ if WIN:
+ return "notepad"
+ for editor in "sensible-editor", "vim", "nano":
+ if os.system("which {} >/dev/null 2>&1".format(editor)) == 0:
+ return editor
+ return "vi"
+
+ def edit_file(self, filename):
+ import subprocess
+
+ editor = self.get_editor()
+ if self.env:
+ environ = os.environ.copy()
+ environ.update(self.env)
+ else:
+ environ = None
+ try:
+ c = subprocess.Popen(
+ '{} "{}"'.format(editor, filename), env=environ, shell=True,
+ )
+ exit_code = c.wait()
+ if exit_code != 0:
+ raise ClickException("{}: Editing failed!".format(editor))
+ except OSError as e:
+ raise ClickException("{}: Editing failed: {}".format(editor, e))
+
+ def edit(self, text):
+ import tempfile
+
+ text = text or ""
+ if text and not text.endswith("\n"):
+ text += "\n"
+
+ fd, name = tempfile.mkstemp(prefix="editor-", suffix=self.extension)
+ try:
+ if WIN:
+ encoding = "utf-8-sig"
+ text = text.replace("\n", "\r\n")
+ else:
+ encoding = "utf-8"
+ text = text.encode(encoding)
+
+ f = os.fdopen(fd, "wb")
+ f.write(text)
+ f.close()
+ timestamp = os.path.getmtime(name)
+
+ self.edit_file(name)
+
+ if self.require_save and os.path.getmtime(name) == timestamp:
+ return None
+
+ f = open(name, "rb")
+ try:
+ rv = f.read()
+ finally:
+ f.close()
+ return rv.decode("utf-8-sig").replace("\r\n", "\n")
+ finally:
+ os.unlink(name)
+
+
+def open_url(url, wait=False, locate=False):
+ import subprocess
+
+ def _unquote_file(url):
+ try:
+ import urllib
+ except ImportError:
+ import urllib
+ if url.startswith("file://"):
+ url = urllib.unquote(url[7:])
+ return url
+
+ if sys.platform == "darwin":
+ args = ["open"]
+ if wait:
+ args.append("-W")
+ if locate:
+ args.append("-R")
+ args.append(_unquote_file(url))
+ null = open("/dev/null", "w")
+ try:
+ return subprocess.Popen(args, stderr=null).wait()
+ finally:
+ null.close()
+ elif WIN:
+ if locate:
+ url = _unquote_file(url)
+ args = 'explorer /select,"{}"'.format(_unquote_file(url.replace('"', "")))
+ else:
+ args = 'start {} "" "{}"'.format(
+ "/WAIT" if wait else "", url.replace('"', "")
+ )
+ return os.system(args)
+ elif CYGWIN:
+ if locate:
+ url = _unquote_file(url)
+ args = 'cygstart "{}"'.format(os.path.dirname(url).replace('"', ""))
+ else:
+ args = 'cygstart {} "{}"'.format("-w" if wait else "", url.replace('"', ""))
+ return os.system(args)
+
+ try:
+ if locate:
+ url = os.path.dirname(_unquote_file(url)) or "."
+ else:
+ url = _unquote_file(url)
+ c = subprocess.Popen(["xdg-open", url])
+ if wait:
+ return c.wait()
+ return 0
+ except OSError:
+ if url.startswith(("http://", "https://")) and not locate and not wait:
+ import webbrowser
+
+ webbrowser.open(url)
+ return 0
+ return 1
+
+
+def _translate_ch_to_exc(ch):
+ if ch == u"\x03":
+ raise KeyboardInterrupt()
+ if ch == u"\x04" and not WIN: # Unix-like, Ctrl+D
+ raise EOFError()
+ if ch == u"\x1a" and WIN: # Windows, Ctrl+Z
+ raise EOFError()
+
+
+if WIN:
+ import msvcrt
+
+ @contextlib.contextmanager
+ def raw_terminal():
+ yield
+
+ def getchar(echo):
+ # The function `getch` will return a bytes object corresponding to
+ # the pressed character. Since Windows 10 build 1803, it will also
+ # return \x00 when called a second time after pressing a regular key.
+ #
+ # `getwch` does not share this probably-bugged behavior. Moreover, it
+ # returns a Unicode object by default, which is what we want.
+ #
+ # Either of these functions will return \x00 or \xe0 to indicate
+ # a special key, and you need to call the same function again to get
+ # the "rest" of the code. The fun part is that \u00e0 is
+ # "latin small letter a with grave", so if you type that on a French
+ # keyboard, you _also_ get a \xe0.
+ # E.g., consider the Up arrow. This returns \xe0 and then \x48. The
+ # resulting Unicode string reads as "a with grave" + "capital H".
+ # This is indistinguishable from when the user actually types
+ # "a with grave" and then "capital H".
+ #
+ # When \xe0 is returned, we assume it's part of a special-key sequence
+ # and call `getwch` again, but that means that when the user types
+ # the \u00e0 character, `getchar` doesn't return until a second
+ # character is typed.
+ # The alternative is returning immediately, but that would mess up
+ # cross-platform handling of arrow keys and others that start with
+ # \xe0. Another option is using `getch`, but then we can't reliably
+ # read non-ASCII characters, because return values of `getch` are
+ # limited to the current 8-bit codepage.
+ #
+ # Anyway, Click doesn't claim to do this Right(tm), and using `getwch`
+ # is doing the right thing in more situations than with `getch`.
+ if echo:
+ func = msvcrt.getwche
+ else:
+ func = msvcrt.getwch
+
+ rv = func()
+ if rv in (u"\x00", u"\xe0"):
+ # \x00 and \xe0 are control characters that indicate special key,
+ # see above.
+ rv += func()
+ _translate_ch_to_exc(rv)
+ return rv
+
+
+else:
+ import tty
+ import termios
+
+ @contextlib.contextmanager
+ def raw_terminal():
+ if not isatty(sys.stdin):
+ f = open("/dev/tty")
+ fd = f.fileno()
+ else:
+ fd = sys.stdin.fileno()
+ f = None
+ try:
+ old_settings = termios.tcgetattr(fd)
+ try:
+ tty.setraw(fd)
+ yield fd
+ finally:
+ termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
+ sys.stdout.flush()
+ if f is not None:
+ f.close()
+ except termios.error:
+ pass
+
+ def getchar(echo):
+ with raw_terminal() as fd:
+ ch = os.read(fd, 32)
+ ch = ch.decode(get_best_encoding(sys.stdin), "replace")
+ if echo and isatty(sys.stdout):
+ sys.stdout.write(ch)
+ _translate_ch_to_exc(ch)
+ return ch
diff --git a/third_party/python/click/click/_textwrap.py b/third_party/python/click/click/_textwrap.py
new file mode 100644
index 0000000000..6959087b7f
--- /dev/null
+++ b/third_party/python/click/click/_textwrap.py
@@ -0,0 +1,37 @@
+import textwrap
+from contextlib import contextmanager
+
+
+class TextWrapper(textwrap.TextWrapper):
+ def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
+ space_left = max(width - cur_len, 1)
+
+ if self.break_long_words:
+ last = reversed_chunks[-1]
+ cut = last[:space_left]
+ res = last[space_left:]
+ cur_line.append(cut)
+ reversed_chunks[-1] = res
+ elif not cur_line:
+ cur_line.append(reversed_chunks.pop())
+
+ @contextmanager
+ def extra_indent(self, indent):
+ old_initial_indent = self.initial_indent
+ old_subsequent_indent = self.subsequent_indent
+ self.initial_indent += indent
+ self.subsequent_indent += indent
+ try:
+ yield
+ finally:
+ self.initial_indent = old_initial_indent
+ self.subsequent_indent = old_subsequent_indent
+
+ def indent_only(self, text):
+ rv = []
+ for idx, line in enumerate(text.splitlines()):
+ indent = self.initial_indent
+ if idx > 0:
+ indent = self.subsequent_indent
+ rv.append(indent + line)
+ return "\n".join(rv)
diff --git a/third_party/python/click/click/_unicodefun.py b/third_party/python/click/click/_unicodefun.py
new file mode 100644
index 0000000000..781c365227
--- /dev/null
+++ b/third_party/python/click/click/_unicodefun.py
@@ -0,0 +1,131 @@
+import codecs
+import os
+import sys
+
+from ._compat import PY2
+
+
+def _find_unicode_literals_frame():
+ import __future__
+
+ if not hasattr(sys, "_getframe"): # not all Python implementations have it
+ return 0
+ frm = sys._getframe(1)
+ idx = 1
+ while frm is not None:
+ if frm.f_globals.get("__name__", "").startswith("click."):
+ frm = frm.f_back
+ idx += 1
+ elif frm.f_code.co_flags & __future__.unicode_literals.compiler_flag:
+ return idx
+ else:
+ break
+ return 0
+
+
+def _check_for_unicode_literals():
+ if not __debug__:
+ return
+
+ from . import disable_unicode_literals_warning
+
+ if not PY2 or disable_unicode_literals_warning:
+ return
+ bad_frame = _find_unicode_literals_frame()
+ if bad_frame <= 0:
+ return
+ from warnings import warn
+
+ warn(
+ Warning(
+ "Click detected the use of the unicode_literals __future__"
+ " import. This is heavily discouraged because it can"
+ " introduce subtle bugs in your code. You should instead"
+ ' use explicit u"" literals for your unicode strings. For'
+ " more information see"
+ " https://click.palletsprojects.com/python3/"
+ ),
+ stacklevel=bad_frame,
+ )
+
+
+def _verify_python3_env():
+ """Ensures that the environment is good for unicode on Python 3."""
+ if PY2:
+ return
+ try:
+ import locale
+
+ fs_enc = codecs.lookup(locale.getpreferredencoding()).name
+ except Exception:
+ fs_enc = "ascii"
+ if fs_enc != "ascii":
+ return
+
+ extra = ""
+ if os.name == "posix":
+ import subprocess
+
+ try:
+ rv = subprocess.Popen(
+ ["locale", "-a"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
+ ).communicate()[0]
+ except OSError:
+ rv = b""
+ good_locales = set()
+ has_c_utf8 = False
+
+ # Make sure we're operating on text here.
+ if isinstance(rv, bytes):
+ rv = rv.decode("ascii", "replace")
+
+ for line in rv.splitlines():
+ locale = line.strip()
+ if locale.lower().endswith((".utf-8", ".utf8")):
+ good_locales.add(locale)
+ if locale.lower() in ("c.utf8", "c.utf-8"):
+ has_c_utf8 = True
+
+ extra += "\n\n"
+ if not good_locales:
+ extra += (
+ "Additional information: on this system no suitable"
+ " UTF-8 locales were discovered. This most likely"
+ " requires resolving by reconfiguring the locale"
+ " system."
+ )
+ elif has_c_utf8:
+ extra += (
+ "This system supports the C.UTF-8 locale which is"
+ " recommended. You might be able to resolve your issue"
+ " by exporting the following environment variables:\n\n"
+ " export LC_ALL=C.UTF-8\n"
+ " export LANG=C.UTF-8"
+ )
+ else:
+ extra += (
+ "This system lists a couple of UTF-8 supporting locales"
+ " that you can pick from. The following suitable"
+ " locales were discovered: {}".format(", ".join(sorted(good_locales)))
+ )
+
+ bad_locale = None
+ for locale in os.environ.get("LC_ALL"), os.environ.get("LANG"):
+ if locale and locale.lower().endswith((".utf-8", ".utf8")):
+ bad_locale = locale
+ if locale is not None:
+ break
+ if bad_locale is not None:
+ extra += (
+ "\n\nClick discovered that you exported a UTF-8 locale"
+ " but the locale system could not pick up from it"
+ " because it does not exist. The exported locale is"
+ " '{}' but it is not supported".format(bad_locale)
+ )
+
+ raise RuntimeError(
+ "Click will abort further execution because Python 3 was"
+ " configured to use ASCII as encoding for the environment."
+ " Consult https://click.palletsprojects.com/python3/ for"
+ " mitigation steps.{}".format(extra)
+ )
diff --git a/third_party/python/click/click/_winconsole.py b/third_party/python/click/click/_winconsole.py
new file mode 100644
index 0000000000..b6c4274af0
--- /dev/null
+++ b/third_party/python/click/click/_winconsole.py
@@ -0,0 +1,370 @@
+# -*- coding: utf-8 -*-
+# This module is based on the excellent work by Adam Bartoš who
+# provided a lot of what went into the implementation here in
+# the discussion to issue1602 in the Python bug tracker.
+#
+# There are some general differences in regards to how this works
+# compared to the original patches as we do not need to patch
+# the entire interpreter but just work in our little world of
+# echo and prmopt.
+import ctypes
+import io
+import os
+import sys
+import time
+import zlib
+from ctypes import byref
+from ctypes import c_char
+from ctypes import c_char_p
+from ctypes import c_int
+from ctypes import c_ssize_t
+from ctypes import c_ulong
+from ctypes import c_void_p
+from ctypes import POINTER
+from ctypes import py_object
+from ctypes import windll
+from ctypes import WinError
+from ctypes import WINFUNCTYPE
+from ctypes.wintypes import DWORD
+from ctypes.wintypes import HANDLE
+from ctypes.wintypes import LPCWSTR
+from ctypes.wintypes import LPWSTR
+
+import msvcrt
+
+from ._compat import _NonClosingTextIOWrapper
+from ._compat import PY2
+from ._compat import text_type
+
+try:
+ from ctypes import pythonapi
+
+ PyObject_GetBuffer = pythonapi.PyObject_GetBuffer
+ PyBuffer_Release = pythonapi.PyBuffer_Release
+except ImportError:
+ pythonapi = None
+
+
+c_ssize_p = POINTER(c_ssize_t)
+
+kernel32 = windll.kernel32
+GetStdHandle = kernel32.GetStdHandle
+ReadConsoleW = kernel32.ReadConsoleW
+WriteConsoleW = kernel32.WriteConsoleW
+GetConsoleMode = kernel32.GetConsoleMode
+GetLastError = kernel32.GetLastError
+GetCommandLineW = WINFUNCTYPE(LPWSTR)(("GetCommandLineW", windll.kernel32))
+CommandLineToArgvW = WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))(
+ ("CommandLineToArgvW", windll.shell32)
+)
+LocalFree = WINFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)(
+ ("LocalFree", windll.kernel32)
+)
+
+
+STDIN_HANDLE = GetStdHandle(-10)
+STDOUT_HANDLE = GetStdHandle(-11)
+STDERR_HANDLE = GetStdHandle(-12)
+
+
+PyBUF_SIMPLE = 0
+PyBUF_WRITABLE = 1
+
+ERROR_SUCCESS = 0
+ERROR_NOT_ENOUGH_MEMORY = 8
+ERROR_OPERATION_ABORTED = 995
+
+STDIN_FILENO = 0
+STDOUT_FILENO = 1
+STDERR_FILENO = 2
+
+EOF = b"\x1a"
+MAX_BYTES_WRITTEN = 32767
+
+
+class Py_buffer(ctypes.Structure):
+ _fields_ = [
+ ("buf", c_void_p),
+ ("obj", py_object),
+ ("len", c_ssize_t),
+ ("itemsize", c_ssize_t),
+ ("readonly", c_int),
+ ("ndim", c_int),
+ ("format", c_char_p),
+ ("shape", c_ssize_p),
+ ("strides", c_ssize_p),
+ ("suboffsets", c_ssize_p),
+ ("internal", c_void_p),
+ ]
+
+ if PY2:
+ _fields_.insert(-1, ("smalltable", c_ssize_t * 2))
+
+
+# On PyPy we cannot get buffers so our ability to operate here is
+# serverly limited.
+if pythonapi is None:
+ get_buffer = None
+else:
+
+ def get_buffer(obj, writable=False):
+ buf = Py_buffer()
+ flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE
+ PyObject_GetBuffer(py_object(obj), byref(buf), flags)
+ try:
+ buffer_type = c_char * buf.len
+ return buffer_type.from_address(buf.buf)
+ finally:
+ PyBuffer_Release(byref(buf))
+
+
+class _WindowsConsoleRawIOBase(io.RawIOBase):
+ def __init__(self, handle):
+ self.handle = handle
+
+ def isatty(self):
+ io.RawIOBase.isatty(self)
+ return True
+
+
+class _WindowsConsoleReader(_WindowsConsoleRawIOBase):
+ def readable(self):
+ return True
+
+ def readinto(self, b):
+ bytes_to_be_read = len(b)
+ if not bytes_to_be_read:
+ return 0
+ elif bytes_to_be_read % 2:
+ raise ValueError(
+ "cannot read odd number of bytes from UTF-16-LE encoded console"
+ )
+
+ buffer = get_buffer(b, writable=True)
+ code_units_to_be_read = bytes_to_be_read // 2
+ code_units_read = c_ulong()
+
+ rv = ReadConsoleW(
+ HANDLE(self.handle),
+ buffer,
+ code_units_to_be_read,
+ byref(code_units_read),
+ None,
+ )
+ if GetLastError() == ERROR_OPERATION_ABORTED:
+ # wait for KeyboardInterrupt
+ time.sleep(0.1)
+ if not rv:
+ raise OSError("Windows error: {}".format(GetLastError()))
+
+ if buffer[0] == EOF:
+ return 0
+ return 2 * code_units_read.value
+
+
+class _WindowsConsoleWriter(_WindowsConsoleRawIOBase):
+ def writable(self):
+ return True
+
+ @staticmethod
+ def _get_error_message(errno):
+ if errno == ERROR_SUCCESS:
+ return "ERROR_SUCCESS"
+ elif errno == ERROR_NOT_ENOUGH_MEMORY:
+ return "ERROR_NOT_ENOUGH_MEMORY"
+ return "Windows error {}".format(errno)
+
+ def write(self, b):
+ bytes_to_be_written = len(b)
+ buf = get_buffer(b)
+ code_units_to_be_written = min(bytes_to_be_written, MAX_BYTES_WRITTEN) // 2
+ code_units_written = c_ulong()
+
+ WriteConsoleW(
+ HANDLE(self.handle),
+ buf,
+ code_units_to_be_written,
+ byref(code_units_written),
+ None,
+ )
+ bytes_written = 2 * code_units_written.value
+
+ if bytes_written == 0 and bytes_to_be_written > 0:
+ raise OSError(self._get_error_message(GetLastError()))
+ return bytes_written
+
+
+class ConsoleStream(object):
+ def __init__(self, text_stream, byte_stream):
+ self._text_stream = text_stream
+ self.buffer = byte_stream
+
+ @property
+ def name(self):
+ return self.buffer.name
+
+ def write(self, x):
+ if isinstance(x, text_type):
+ return self._text_stream.write(x)
+ try:
+ self.flush()
+ except Exception:
+ pass
+ return self.buffer.write(x)
+
+ def writelines(self, lines):
+ for line in lines:
+ self.write(line)
+
+ def __getattr__(self, name):
+ return getattr(self._text_stream, name)
+
+ def isatty(self):
+ return self.buffer.isatty()
+
+ def __repr__(self):
+ return "<ConsoleStream name={!r} encoding={!r}>".format(
+ self.name, self.encoding
+ )
+
+
+class WindowsChunkedWriter(object):
+ """
+ Wraps a stream (such as stdout), acting as a transparent proxy for all
+ attribute access apart from method 'write()' which we wrap to write in
+ limited chunks due to a Windows limitation on binary console streams.
+ """
+
+ def __init__(self, wrapped):
+ # double-underscore everything to prevent clashes with names of
+ # attributes on the wrapped stream object.
+ self.__wrapped = wrapped
+
+ def __getattr__(self, name):
+ return getattr(self.__wrapped, name)
+
+ def write(self, text):
+ total_to_write = len(text)
+ written = 0
+
+ while written < total_to_write:
+ to_write = min(total_to_write - written, MAX_BYTES_WRITTEN)
+ self.__wrapped.write(text[written : written + to_write])
+ written += to_write
+
+
+_wrapped_std_streams = set()
+
+
+def _wrap_std_stream(name):
+ # Python 2 & Windows 7 and below
+ if (
+ PY2
+ and sys.getwindowsversion()[:2] <= (6, 1)
+ and name not in _wrapped_std_streams
+ ):
+ setattr(sys, name, WindowsChunkedWriter(getattr(sys, name)))
+ _wrapped_std_streams.add(name)
+
+
+def _get_text_stdin(buffer_stream):
+ text_stream = _NonClosingTextIOWrapper(
+ io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)),
+ "utf-16-le",
+ "strict",
+ line_buffering=True,
+ )
+ return ConsoleStream(text_stream, buffer_stream)
+
+
+def _get_text_stdout(buffer_stream):
+ text_stream = _NonClosingTextIOWrapper(
+ io.BufferedWriter(_WindowsConsoleWriter(STDOUT_HANDLE)),
+ "utf-16-le",
+ "strict",
+ line_buffering=True,
+ )
+ return ConsoleStream(text_stream, buffer_stream)
+
+
+def _get_text_stderr(buffer_stream):
+ text_stream = _NonClosingTextIOWrapper(
+ io.BufferedWriter(_WindowsConsoleWriter(STDERR_HANDLE)),
+ "utf-16-le",
+ "strict",
+ line_buffering=True,
+ )
+ return ConsoleStream(text_stream, buffer_stream)
+
+
+if PY2:
+
+ def _hash_py_argv():
+ return zlib.crc32("\x00".join(sys.argv[1:]))
+
+ _initial_argv_hash = _hash_py_argv()
+
+ def _get_windows_argv():
+ argc = c_int(0)
+ argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc))
+ if not argv_unicode:
+ raise WinError()
+ try:
+ argv = [argv_unicode[i] for i in range(0, argc.value)]
+ finally:
+ LocalFree(argv_unicode)
+ del argv_unicode
+
+ if not hasattr(sys, "frozen"):
+ argv = argv[1:]
+ while len(argv) > 0:
+ arg = argv[0]
+ if not arg.startswith("-") or arg == "-":
+ break
+ argv = argv[1:]
+ if arg.startswith(("-c", "-m")):
+ break
+
+ return argv[1:]
+
+
+_stream_factories = {
+ 0: _get_text_stdin,
+ 1: _get_text_stdout,
+ 2: _get_text_stderr,
+}
+
+
+def _is_console(f):
+ if not hasattr(f, "fileno"):
+ return False
+
+ try:
+ fileno = f.fileno()
+ except OSError:
+ return False
+
+ handle = msvcrt.get_osfhandle(fileno)
+ return bool(GetConsoleMode(handle, byref(DWORD())))
+
+
+def _get_windows_console_stream(f, encoding, errors):
+ if (
+ get_buffer is not None
+ and encoding in ("utf-16-le", None)
+ and errors in ("strict", None)
+ and _is_console(f)
+ ):
+ func = _stream_factories.get(f.fileno())
+ if func is not None:
+ if not PY2:
+ f = getattr(f, "buffer", None)
+ if f is None:
+ return None
+ else:
+ # If we are on Python 2 we need to set the stream that we
+ # deal with to binary mode as otherwise the exercise if a
+ # bit moot. The same problems apply as for
+ # get_binary_stdin and friends from _compat.
+ msvcrt.setmode(f.fileno(), os.O_BINARY)
+ return func(f)
diff --git a/third_party/python/click/click/core.py b/third_party/python/click/click/core.py
new file mode 100644
index 0000000000..f58bf26d2f
--- /dev/null
+++ b/third_party/python/click/click/core.py
@@ -0,0 +1,2030 @@
+import errno
+import inspect
+import os
+import sys
+from contextlib import contextmanager
+from functools import update_wrapper
+from itertools import repeat
+
+from ._compat import isidentifier
+from ._compat import iteritems
+from ._compat import PY2
+from ._compat import string_types
+from ._unicodefun import _check_for_unicode_literals
+from ._unicodefun import _verify_python3_env
+from .exceptions import Abort
+from .exceptions import BadParameter
+from .exceptions import ClickException
+from .exceptions import Exit
+from .exceptions import MissingParameter
+from .exceptions import UsageError
+from .formatting import HelpFormatter
+from .formatting import join_options
+from .globals import pop_context
+from .globals import push_context
+from .parser import OptionParser
+from .parser import split_opt
+from .termui import confirm
+from .termui import prompt
+from .termui import style
+from .types import BOOL
+from .types import convert_type
+from .types import IntRange
+from .utils import echo
+from .utils import get_os_args
+from .utils import make_default_short_help
+from .utils import make_str
+from .utils import PacifyFlushWrapper
+
+_missing = object()
+
+SUBCOMMAND_METAVAR = "COMMAND [ARGS]..."
+SUBCOMMANDS_METAVAR = "COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]..."
+
+DEPRECATED_HELP_NOTICE = " (DEPRECATED)"
+DEPRECATED_INVOKE_NOTICE = "DeprecationWarning: The command %(name)s is deprecated."
+
+
+def _maybe_show_deprecated_notice(cmd):
+ if cmd.deprecated:
+ echo(style(DEPRECATED_INVOKE_NOTICE % {"name": cmd.name}, fg="red"), err=True)
+
+
+def fast_exit(code):
+ """Exit without garbage collection, this speeds up exit by about 10ms for
+ things like bash completion.
+ """
+ sys.stdout.flush()
+ sys.stderr.flush()
+ os._exit(code)
+
+
+def _bashcomplete(cmd, prog_name, complete_var=None):
+ """Internal handler for the bash completion support."""
+ if complete_var is None:
+ complete_var = "_{}_COMPLETE".format(prog_name.replace("-", "_").upper())
+ complete_instr = os.environ.get(complete_var)
+ if not complete_instr:
+ return
+
+ from ._bashcomplete import bashcomplete
+
+ if bashcomplete(cmd, prog_name, complete_var, complete_instr):
+ fast_exit(1)
+
+
+def _check_multicommand(base_command, cmd_name, cmd, register=False):
+ if not base_command.chain or not isinstance(cmd, MultiCommand):
+ return
+ if register:
+ hint = (
+ "It is not possible to add multi commands as children to"
+ " another multi command that is in chain mode."
+ )
+ else:
+ hint = (
+ "Found a multi command as subcommand to a multi command"
+ " that is in chain mode. This is not supported."
+ )
+ raise RuntimeError(
+ "{}. Command '{}' is set to chain and '{}' was added as"
+ " subcommand but it in itself is a multi command. ('{}' is a {}"
+ " within a chained {} named '{}').".format(
+ hint,
+ base_command.name,
+ cmd_name,
+ cmd_name,
+ cmd.__class__.__name__,
+ base_command.__class__.__name__,
+ base_command.name,
+ )
+ )
+
+
+def batch(iterable, batch_size):
+ return list(zip(*repeat(iter(iterable), batch_size)))
+
+
+def invoke_param_callback(callback, ctx, param, value):
+ code = getattr(callback, "__code__", None)
+ args = getattr(code, "co_argcount", 3)
+
+ if args < 3:
+ from warnings import warn
+
+ warn(
+ "Parameter callbacks take 3 args, (ctx, param, value). The"
+ " 2-arg style is deprecated and will be removed in 8.0.".format(callback),
+ DeprecationWarning,
+ stacklevel=3,
+ )
+ return callback(ctx, value)
+
+ return callback(ctx, param, value)
+
+
+@contextmanager
+def augment_usage_errors(ctx, param=None):
+ """Context manager that attaches extra information to exceptions."""
+ try:
+ yield
+ except BadParameter as e:
+ if e.ctx is None:
+ e.ctx = ctx
+ if param is not None and e.param is None:
+ e.param = param
+ raise
+ except UsageError as e:
+ if e.ctx is None:
+ e.ctx = ctx
+ raise
+
+
+def iter_params_for_processing(invocation_order, declaration_order):
+ """Given a sequence of parameters in the order as should be considered
+ for processing and an iterable of parameters that exist, this returns
+ a list in the correct order as they should be processed.
+ """
+
+ def sort_key(item):
+ try:
+ idx = invocation_order.index(item)
+ except ValueError:
+ idx = float("inf")
+ return (not item.is_eager, idx)
+
+ return sorted(declaration_order, key=sort_key)
+
+
+class Context(object):
+ """The context is a special internal object that holds state relevant
+ for the script execution at every single level. It's normally invisible
+ to commands unless they opt-in to getting access to it.
+
+ The context is useful as it can pass internal objects around and can
+ control special execution features such as reading data from
+ environment variables.
+
+ A context can be used as context manager in which case it will call
+ :meth:`close` on teardown.
+
+ .. versionadded:: 2.0
+ Added the `resilient_parsing`, `help_option_names`,
+ `token_normalize_func` parameters.
+
+ .. versionadded:: 3.0
+ Added the `allow_extra_args` and `allow_interspersed_args`
+ parameters.
+
+ .. versionadded:: 4.0
+ Added the `color`, `ignore_unknown_options`, and
+ `max_content_width` parameters.
+
+ .. versionadded:: 7.1
+ Added the `show_default` parameter.
+
+ :param command: the command class for this context.
+ :param parent: the parent context.
+ :param info_name: the info name for this invocation. Generally this
+ is the most descriptive name for the script or
+ command. For the toplevel script it is usually
+ the name of the script, for commands below it it's
+ the name of the script.
+ :param obj: an arbitrary object of user data.
+ :param auto_envvar_prefix: the prefix to use for automatic environment
+ variables. If this is `None` then reading
+ from environment variables is disabled. This
+ does not affect manually set environment
+ variables which are always read.
+ :param default_map: a dictionary (like object) with default values
+ for parameters.
+ :param terminal_width: the width of the terminal. The default is
+ inherit from parent context. If no context
+ defines the terminal width then auto
+ detection will be applied.
+ :param max_content_width: the maximum width for content rendered by
+ Click (this currently only affects help
+ pages). This defaults to 80 characters if
+ not overridden. In other words: even if the
+ terminal is larger than that, Click will not
+ format things wider than 80 characters by
+ default. In addition to that, formatters might
+ add some safety mapping on the right.
+ :param resilient_parsing: if this flag is enabled then Click will
+ parse without any interactivity or callback
+ invocation. Default values will also be
+ ignored. This is useful for implementing
+ things such as completion support.
+ :param allow_extra_args: if this is set to `True` then extra arguments
+ at the end will not raise an error and will be
+ kept on the context. The default is to inherit
+ from the command.
+ :param allow_interspersed_args: if this is set to `False` then options
+ and arguments cannot be mixed. The
+ default is to inherit from the command.
+ :param ignore_unknown_options: instructs click to ignore options it does
+ not know and keeps them for later
+ processing.
+ :param help_option_names: optionally a list of strings that define how
+ the default help parameter is named. The
+ default is ``['--help']``.
+ :param token_normalize_func: an optional function that is used to
+ normalize tokens (options, choices,
+ etc.). This for instance can be used to
+ implement case insensitive behavior.
+ :param color: controls if the terminal supports ANSI colors or not. The
+ default is autodetection. This is only needed if ANSI
+ codes are used in texts that Click prints which is by
+ default not the case. This for instance would affect
+ help output.
+ :param show_default: if True, shows defaults for all options.
+ Even if an option is later created with show_default=False,
+ this command-level setting overrides it.
+ """
+
+ def __init__(
+ self,
+ command,
+ parent=None,
+ info_name=None,
+ obj=None,
+ auto_envvar_prefix=None,
+ default_map=None,
+ terminal_width=None,
+ max_content_width=None,
+ resilient_parsing=False,
+ allow_extra_args=None,
+ allow_interspersed_args=None,
+ ignore_unknown_options=None,
+ help_option_names=None,
+ token_normalize_func=None,
+ color=None,
+ show_default=None,
+ ):
+ #: the parent context or `None` if none exists.
+ self.parent = parent
+ #: the :class:`Command` for this context.
+ self.command = command
+ #: the descriptive information name
+ self.info_name = info_name
+ #: the parsed parameters except if the value is hidden in which
+ #: case it's not remembered.
+ self.params = {}
+ #: the leftover arguments.
+ self.args = []
+ #: protected arguments. These are arguments that are prepended
+ #: to `args` when certain parsing scenarios are encountered but
+ #: must be never propagated to another arguments. This is used
+ #: to implement nested parsing.
+ self.protected_args = []
+ if obj is None and parent is not None:
+ obj = parent.obj
+ #: the user object stored.
+ self.obj = obj
+ self._meta = getattr(parent, "meta", {})
+
+ #: A dictionary (-like object) with defaults for parameters.
+ if (
+ default_map is None
+ and parent is not None
+ and parent.default_map is not None
+ ):
+ default_map = parent.default_map.get(info_name)
+ self.default_map = default_map
+
+ #: This flag indicates if a subcommand is going to be executed. A
+ #: group callback can use this information to figure out if it's
+ #: being executed directly or because the execution flow passes
+ #: onwards to a subcommand. By default it's None, but it can be
+ #: the name of the subcommand to execute.
+ #:
+ #: If chaining is enabled this will be set to ``'*'`` in case
+ #: any commands are executed. It is however not possible to
+ #: figure out which ones. If you require this knowledge you
+ #: should use a :func:`resultcallback`.
+ self.invoked_subcommand = None
+
+ if terminal_width is None and parent is not None:
+ terminal_width = parent.terminal_width
+ #: The width of the terminal (None is autodetection).
+ self.terminal_width = terminal_width
+
+ if max_content_width is None and parent is not None:
+ max_content_width = parent.max_content_width
+ #: The maximum width of formatted content (None implies a sensible
+ #: default which is 80 for most things).
+ self.max_content_width = max_content_width
+
+ if allow_extra_args is None:
+ allow_extra_args = command.allow_extra_args
+ #: Indicates if the context allows extra args or if it should
+ #: fail on parsing.
+ #:
+ #: .. versionadded:: 3.0
+ self.allow_extra_args = allow_extra_args
+
+ if allow_interspersed_args is None:
+ allow_interspersed_args = command.allow_interspersed_args
+ #: Indicates if the context allows mixing of arguments and
+ #: options or not.
+ #:
+ #: .. versionadded:: 3.0
+ self.allow_interspersed_args = allow_interspersed_args
+
+ if ignore_unknown_options is None:
+ ignore_unknown_options = command.ignore_unknown_options
+ #: Instructs click to ignore options that a command does not
+ #: understand and will store it on the context for later
+ #: processing. This is primarily useful for situations where you
+ #: want to call into external programs. Generally this pattern is
+ #: strongly discouraged because it's not possibly to losslessly
+ #: forward all arguments.
+ #:
+ #: .. versionadded:: 4.0
+ self.ignore_unknown_options = ignore_unknown_options
+
+ if help_option_names is None:
+ if parent is not None:
+ help_option_names = parent.help_option_names
+ else:
+ help_option_names = ["--help"]
+
+ #: The names for the help options.
+ self.help_option_names = help_option_names
+
+ if token_normalize_func is None and parent is not None:
+ token_normalize_func = parent.token_normalize_func
+
+ #: An optional normalization function for tokens. This is
+ #: options, choices, commands etc.
+ self.token_normalize_func = token_normalize_func
+
+ #: Indicates if resilient parsing is enabled. In that case Click
+ #: will do its best to not cause any failures and default values
+ #: will be ignored. Useful for completion.
+ self.resilient_parsing = resilient_parsing
+
+ # If there is no envvar prefix yet, but the parent has one and
+ # the command on this level has a name, we can expand the envvar
+ # prefix automatically.
+ if auto_envvar_prefix is None:
+ if (
+ parent is not None
+ and parent.auto_envvar_prefix is not None
+ and self.info_name is not None
+ ):
+ auto_envvar_prefix = "{}_{}".format(
+ parent.auto_envvar_prefix, self.info_name.upper()
+ )
+ else:
+ auto_envvar_prefix = auto_envvar_prefix.upper()
+ if auto_envvar_prefix is not None:
+ auto_envvar_prefix = auto_envvar_prefix.replace("-", "_")
+ self.auto_envvar_prefix = auto_envvar_prefix
+
+ if color is None and parent is not None:
+ color = parent.color
+
+ #: Controls if styling output is wanted or not.
+ self.color = color
+
+ self.show_default = show_default
+
+ self._close_callbacks = []
+ self._depth = 0
+
+ def __enter__(self):
+ self._depth += 1
+ push_context(self)
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self._depth -= 1
+ if self._depth == 0:
+ self.close()
+ pop_context()
+
+ @contextmanager
+ def scope(self, cleanup=True):
+ """This helper method can be used with the context object to promote
+ it to the current thread local (see :func:`get_current_context`).
+ The default behavior of this is to invoke the cleanup functions which
+ can be disabled by setting `cleanup` to `False`. The cleanup
+ functions are typically used for things such as closing file handles.
+
+ If the cleanup is intended the context object can also be directly
+ used as a context manager.
+
+ Example usage::
+
+ with ctx.scope():
+ assert get_current_context() is ctx
+
+ This is equivalent::
+
+ with ctx:
+ assert get_current_context() is ctx
+
+ .. versionadded:: 5.0
+
+ :param cleanup: controls if the cleanup functions should be run or
+ not. The default is to run these functions. In
+ some situations the context only wants to be
+ temporarily pushed in which case this can be disabled.
+ Nested pushes automatically defer the cleanup.
+ """
+ if not cleanup:
+ self._depth += 1
+ try:
+ with self as rv:
+ yield rv
+ finally:
+ if not cleanup:
+ self._depth -= 1
+
+ @property
+ def meta(self):
+ """This is a dictionary which is shared with all the contexts
+ that are nested. It exists so that click utilities can store some
+ state here if they need to. It is however the responsibility of
+ that code to manage this dictionary well.
+
+ The keys are supposed to be unique dotted strings. For instance
+ module paths are a good choice for it. What is stored in there is
+ irrelevant for the operation of click. However what is important is
+ that code that places data here adheres to the general semantics of
+ the system.
+
+ Example usage::
+
+ LANG_KEY = f'{__name__}.lang'
+
+ def set_language(value):
+ ctx = get_current_context()
+ ctx.meta[LANG_KEY] = value
+
+ def get_language():
+ return get_current_context().meta.get(LANG_KEY, 'en_US')
+
+ .. versionadded:: 5.0
+ """
+ return self._meta
+
+ def make_formatter(self):
+ """Creates the formatter for the help and usage output."""
+ return HelpFormatter(
+ width=self.terminal_width, max_width=self.max_content_width
+ )
+
+ def call_on_close(self, f):
+ """This decorator remembers a function as callback that should be
+ executed when the context tears down. This is most useful to bind
+ resource handling to the script execution. For instance, file objects
+ opened by the :class:`File` type will register their close callbacks
+ here.
+
+ :param f: the function to execute on teardown.
+ """
+ self._close_callbacks.append(f)
+ return f
+
+ def close(self):
+ """Invokes all close callbacks."""
+ for cb in self._close_callbacks:
+ cb()
+ self._close_callbacks = []
+
+ @property
+ def command_path(self):
+ """The computed command path. This is used for the ``usage``
+ information on the help page. It's automatically created by
+ combining the info names of the chain of contexts to the root.
+ """
+ rv = ""
+ if self.info_name is not None:
+ rv = self.info_name
+ if self.parent is not None:
+ rv = "{} {}".format(self.parent.command_path, rv)
+ return rv.lstrip()
+
+ def find_root(self):
+ """Finds the outermost context."""
+ node = self
+ while node.parent is not None:
+ node = node.parent
+ return node
+
+ def find_object(self, object_type):
+ """Finds the closest object of a given type."""
+ node = self
+ while node is not None:
+ if isinstance(node.obj, object_type):
+ return node.obj
+ node = node.parent
+
+ def ensure_object(self, object_type):
+ """Like :meth:`find_object` but sets the innermost object to a
+ new instance of `object_type` if it does not exist.
+ """
+ rv = self.find_object(object_type)
+ if rv is None:
+ self.obj = rv = object_type()
+ return rv
+
+ def lookup_default(self, name):
+ """Looks up the default for a parameter name. This by default
+ looks into the :attr:`default_map` if available.
+ """
+ if self.default_map is not None:
+ rv = self.default_map.get(name)
+ if callable(rv):
+ rv = rv()
+ return rv
+
+ def fail(self, message):
+ """Aborts the execution of the program with a specific error
+ message.
+
+ :param message: the error message to fail with.
+ """
+ raise UsageError(message, self)
+
+ def abort(self):
+ """Aborts the script."""
+ raise Abort()
+
+ def exit(self, code=0):
+ """Exits the application with a given exit code."""
+ raise Exit(code)
+
+ def get_usage(self):
+ """Helper method to get formatted usage string for the current
+ context and command.
+ """
+ return self.command.get_usage(self)
+
+ def get_help(self):
+ """Helper method to get formatted help page for the current
+ context and command.
+ """
+ return self.command.get_help(self)
+
+ def invoke(*args, **kwargs): # noqa: B902
+ """Invokes a command callback in exactly the way it expects. There
+ are two ways to invoke this method:
+
+ 1. the first argument can be a callback and all other arguments and
+ keyword arguments are forwarded directly to the function.
+ 2. the first argument is a click command object. In that case all
+ arguments are forwarded as well but proper click parameters
+ (options and click arguments) must be keyword arguments and Click
+ will fill in defaults.
+
+ Note that before Click 3.2 keyword arguments were not properly filled
+ in against the intention of this code and no context was created. For
+ more information about this change and why it was done in a bugfix
+ release see :ref:`upgrade-to-3.2`.
+ """
+ self, callback = args[:2]
+ ctx = self
+
+ # It's also possible to invoke another command which might or
+ # might not have a callback. In that case we also fill
+ # in defaults and make a new context for this command.
+ if isinstance(callback, Command):
+ other_cmd = callback
+ callback = other_cmd.callback
+ ctx = Context(other_cmd, info_name=other_cmd.name, parent=self)
+ if callback is None:
+ raise TypeError(
+ "The given command does not have a callback that can be invoked."
+ )
+
+ for param in other_cmd.params:
+ if param.name not in kwargs and param.expose_value:
+ kwargs[param.name] = param.get_default(ctx)
+
+ args = args[2:]
+ with augment_usage_errors(self):
+ with ctx:
+ return callback(*args, **kwargs)
+
+ def forward(*args, **kwargs): # noqa: B902
+ """Similar to :meth:`invoke` but fills in default keyword
+ arguments from the current context if the other command expects
+ it. This cannot invoke callbacks directly, only other commands.
+ """
+ self, cmd = args[:2]
+
+ # It's also possible to invoke another command which might or
+ # might not have a callback.
+ if not isinstance(cmd, Command):
+ raise TypeError("Callback is not a command.")
+
+ for param in self.params:
+ if param not in kwargs:
+ kwargs[param] = self.params[param]
+
+ return self.invoke(cmd, **kwargs)
+
+
+class BaseCommand(object):
+ """The base command implements the minimal API contract of commands.
+ Most code will never use this as it does not implement a lot of useful
+ functionality but it can act as the direct subclass of alternative
+ parsing methods that do not depend on the Click parser.
+
+ For instance, this can be used to bridge Click and other systems like
+ argparse or docopt.
+
+ Because base commands do not implement a lot of the API that other
+ parts of Click take for granted, they are not supported for all
+ operations. For instance, they cannot be used with the decorators
+ usually and they have no built-in callback system.
+
+ .. versionchanged:: 2.0
+ Added the `context_settings` parameter.
+
+ :param name: the name of the command to use unless a group overrides it.
+ :param context_settings: an optional dictionary with defaults that are
+ passed to the context object.
+ """
+
+ #: the default for the :attr:`Context.allow_extra_args` flag.
+ allow_extra_args = False
+ #: the default for the :attr:`Context.allow_interspersed_args` flag.
+ allow_interspersed_args = True
+ #: the default for the :attr:`Context.ignore_unknown_options` flag.
+ ignore_unknown_options = False
+
+ def __init__(self, name, context_settings=None):
+ #: the name the command thinks it has. Upon registering a command
+ #: on a :class:`Group` the group will default the command name
+ #: with this information. You should instead use the
+ #: :class:`Context`\'s :attr:`~Context.info_name` attribute.
+ self.name = name
+ if context_settings is None:
+ context_settings = {}
+ #: an optional dictionary with defaults passed to the context.
+ self.context_settings = context_settings
+
+ def __repr__(self):
+ return "<{} {}>".format(self.__class__.__name__, self.name)
+
+ def get_usage(self, ctx):
+ raise NotImplementedError("Base commands cannot get usage")
+
+ def get_help(self, ctx):
+ raise NotImplementedError("Base commands cannot get help")
+
+ def make_context(self, info_name, args, parent=None, **extra):
+ """This function when given an info name and arguments will kick
+ off the parsing and create a new :class:`Context`. It does not
+ invoke the actual command callback though.
+
+ :param info_name: the info name for this invokation. Generally this
+ is the most descriptive name for the script or
+ command. For the toplevel script it's usually
+ the name of the script, for commands below it it's
+ the name of the script.
+ :param args: the arguments to parse as list of strings.
+ :param parent: the parent context if available.
+ :param extra: extra keyword arguments forwarded to the context
+ constructor.
+ """
+ for key, value in iteritems(self.context_settings):
+ if key not in extra:
+ extra[key] = value
+ ctx = Context(self, info_name=info_name, parent=parent, **extra)
+ with ctx.scope(cleanup=False):
+ self.parse_args(ctx, args)
+ return ctx
+
+ def parse_args(self, ctx, args):
+ """Given a context and a list of arguments this creates the parser
+ and parses the arguments, then modifies the context as necessary.
+ This is automatically invoked by :meth:`make_context`.
+ """
+ raise NotImplementedError("Base commands do not know how to parse arguments.")
+
+ def invoke(self, ctx):
+ """Given a context, this invokes the command. The default
+ implementation is raising a not implemented error.
+ """
+ raise NotImplementedError("Base commands are not invokable by default")
+
+ def main(
+ self,
+ args=None,
+ prog_name=None,
+ complete_var=None,
+ standalone_mode=True,
+ **extra
+ ):
+ """This is the way to invoke a script with all the bells and
+ whistles as a command line application. This will always terminate
+ the application after a call. If this is not wanted, ``SystemExit``
+ needs to be caught.
+
+ This method is also available by directly calling the instance of
+ a :class:`Command`.
+
+ .. versionadded:: 3.0
+ Added the `standalone_mode` flag to control the standalone mode.
+
+ :param args: the arguments that should be used for parsing. If not
+ provided, ``sys.argv[1:]`` is used.
+ :param prog_name: the program name that should be used. By default
+ the program name is constructed by taking the file
+ name from ``sys.argv[0]``.
+ :param complete_var: the environment variable that controls the
+ bash completion support. The default is
+ ``"_<prog_name>_COMPLETE"`` with prog_name in
+ uppercase.
+ :param standalone_mode: the default behavior is to invoke the script
+ in standalone mode. Click will then
+ handle exceptions and convert them into
+ error messages and the function will never
+ return but shut down the interpreter. If
+ this is set to `False` they will be
+ propagated to the caller and the return
+ value of this function is the return value
+ of :meth:`invoke`.
+ :param extra: extra keyword arguments are forwarded to the context
+ constructor. See :class:`Context` for more information.
+ """
+ # If we are in Python 3, we will verify that the environment is
+ # sane at this point or reject further execution to avoid a
+ # broken script.
+ if not PY2:
+ _verify_python3_env()
+ else:
+ _check_for_unicode_literals()
+
+ if args is None:
+ args = get_os_args()
+ else:
+ args = list(args)
+
+ if prog_name is None:
+ prog_name = make_str(
+ os.path.basename(sys.argv[0] if sys.argv else __file__)
+ )
+
+ # Hook for the Bash completion. This only activates if the Bash
+ # completion is actually enabled, otherwise this is quite a fast
+ # noop.
+ _bashcomplete(self, prog_name, complete_var)
+
+ try:
+ try:
+ with self.make_context(prog_name, args, **extra) as ctx:
+ rv = self.invoke(ctx)
+ if not standalone_mode:
+ return rv
+ # it's not safe to `ctx.exit(rv)` here!
+ # note that `rv` may actually contain data like "1" which
+ # has obvious effects
+ # more subtle case: `rv=[None, None]` can come out of
+ # chained commands which all returned `None` -- so it's not
+ # even always obvious that `rv` indicates success/failure
+ # by its truthiness/falsiness
+ ctx.exit()
+ except (EOFError, KeyboardInterrupt):
+ echo(file=sys.stderr)
+ raise Abort()
+ except ClickException as e:
+ if not standalone_mode:
+ raise
+ e.show()
+ sys.exit(e.exit_code)
+ except IOError as e:
+ if e.errno == errno.EPIPE:
+ sys.stdout = PacifyFlushWrapper(sys.stdout)
+ sys.stderr = PacifyFlushWrapper(sys.stderr)
+ sys.exit(1)
+ else:
+ raise
+ except Exit as e:
+ if standalone_mode:
+ sys.exit(e.exit_code)
+ else:
+ # in non-standalone mode, return the exit code
+ # note that this is only reached if `self.invoke` above raises
+ # an Exit explicitly -- thus bypassing the check there which
+ # would return its result
+ # the results of non-standalone execution may therefore be
+ # somewhat ambiguous: if there are codepaths which lead to
+ # `ctx.exit(1)` and to `return 1`, the caller won't be able to
+ # tell the difference between the two
+ return e.exit_code
+ except Abort:
+ if not standalone_mode:
+ raise
+ echo("Aborted!", file=sys.stderr)
+ sys.exit(1)
+
+ def __call__(self, *args, **kwargs):
+ """Alias for :meth:`main`."""
+ return self.main(*args, **kwargs)
+
+
+class Command(BaseCommand):
+ """Commands are the basic building block of command line interfaces in
+ Click. A basic command handles command line parsing and might dispatch
+ more parsing to commands nested below it.
+
+ .. versionchanged:: 2.0
+ Added the `context_settings` parameter.
+ .. versionchanged:: 7.1
+ Added the `no_args_is_help` parameter.
+
+ :param name: the name of the command to use unless a group overrides it.
+ :param context_settings: an optional dictionary with defaults that are
+ passed to the context object.
+ :param callback: the callback to invoke. This is optional.
+ :param params: the parameters to register with this command. This can
+ be either :class:`Option` or :class:`Argument` objects.
+ :param help: the help string to use for this command.
+ :param epilog: like the help string but it's printed at the end of the
+ help page after everything else.
+ :param short_help: the short help to use for this command. This is
+ shown on the command listing of the parent command.
+ :param add_help_option: by default each command registers a ``--help``
+ option. This can be disabled by this parameter.
+ :param no_args_is_help: this controls what happens if no arguments are
+ provided. This option is disabled by default.
+ If enabled this will add ``--help`` as argument
+ if no arguments are passed
+ :param hidden: hide this command from help outputs.
+
+ :param deprecated: issues a message indicating that
+ the command is deprecated.
+ """
+
+ def __init__(
+ self,
+ name,
+ context_settings=None,
+ callback=None,
+ params=None,
+ help=None,
+ epilog=None,
+ short_help=None,
+ options_metavar="[OPTIONS]",
+ add_help_option=True,
+ no_args_is_help=False,
+ hidden=False,
+ deprecated=False,
+ ):
+ BaseCommand.__init__(self, name, context_settings)
+ #: the callback to execute when the command fires. This might be
+ #: `None` in which case nothing happens.
+ self.callback = callback
+ #: the list of parameters for this command in the order they
+ #: should show up in the help page and execute. Eager parameters
+ #: will automatically be handled before non eager ones.
+ self.params = params or []
+ # if a form feed (page break) is found in the help text, truncate help
+ # text to the content preceding the first form feed
+ if help and "\f" in help:
+ help = help.split("\f", 1)[0]
+ self.help = help
+ self.epilog = epilog
+ self.options_metavar = options_metavar
+ self.short_help = short_help
+ self.add_help_option = add_help_option
+ self.no_args_is_help = no_args_is_help
+ self.hidden = hidden
+ self.deprecated = deprecated
+
+ def get_usage(self, ctx):
+ """Formats the usage line into a string and returns it.
+
+ Calls :meth:`format_usage` internally.
+ """
+ formatter = ctx.make_formatter()
+ self.format_usage(ctx, formatter)
+ return formatter.getvalue().rstrip("\n")
+
+ def get_params(self, ctx):
+ rv = self.params
+ help_option = self.get_help_option(ctx)
+ if help_option is not None:
+ rv = rv + [help_option]
+ return rv
+
+ def format_usage(self, ctx, formatter):
+ """Writes the usage line into the formatter.
+
+ This is a low-level method called by :meth:`get_usage`.
+ """
+ pieces = self.collect_usage_pieces(ctx)
+ formatter.write_usage(ctx.command_path, " ".join(pieces))
+
+ def collect_usage_pieces(self, ctx):
+ """Returns all the pieces that go into the usage line and returns
+ it as a list of strings.
+ """
+ rv = [self.options_metavar]
+ for param in self.get_params(ctx):
+ rv.extend(param.get_usage_pieces(ctx))
+ return rv
+
+ def get_help_option_names(self, ctx):
+ """Returns the names for the help option."""
+ all_names = set(ctx.help_option_names)
+ for param in self.params:
+ all_names.difference_update(param.opts)
+ all_names.difference_update(param.secondary_opts)
+ return all_names
+
+ def get_help_option(self, ctx):
+ """Returns the help option object."""
+ help_options = self.get_help_option_names(ctx)
+ if not help_options or not self.add_help_option:
+ return
+
+ def show_help(ctx, param, value):
+ if value and not ctx.resilient_parsing:
+ echo(ctx.get_help(), color=ctx.color)
+ ctx.exit()
+
+ return Option(
+ help_options,
+ is_flag=True,
+ is_eager=True,
+ expose_value=False,
+ callback=show_help,
+ help="Show this message and exit.",
+ )
+
+ def make_parser(self, ctx):
+ """Creates the underlying option parser for this command."""
+ parser = OptionParser(ctx)
+ for param in self.get_params(ctx):
+ param.add_to_parser(parser, ctx)
+ return parser
+
+ def get_help(self, ctx):
+ """Formats the help into a string and returns it.
+
+ Calls :meth:`format_help` internally.
+ """
+ formatter = ctx.make_formatter()
+ self.format_help(ctx, formatter)
+ return formatter.getvalue().rstrip("\n")
+
+ def get_short_help_str(self, limit=45):
+ """Gets short help for the command or makes it by shortening the
+ long help string.
+ """
+ return (
+ self.short_help
+ or self.help
+ and make_default_short_help(self.help, limit)
+ or ""
+ )
+
+ def format_help(self, ctx, formatter):
+ """Writes the help into the formatter if it exists.
+
+ This is a low-level method called by :meth:`get_help`.
+
+ This calls the following methods:
+
+ - :meth:`format_usage`
+ - :meth:`format_help_text`
+ - :meth:`format_options`
+ - :meth:`format_epilog`
+ """
+ self.format_usage(ctx, formatter)
+ self.format_help_text(ctx, formatter)
+ self.format_options(ctx, formatter)
+ self.format_epilog(ctx, formatter)
+
+ def format_help_text(self, ctx, formatter):
+ """Writes the help text to the formatter if it exists."""
+ if self.help:
+ formatter.write_paragraph()
+ with formatter.indentation():
+ help_text = self.help
+ if self.deprecated:
+ help_text += DEPRECATED_HELP_NOTICE
+ formatter.write_text(help_text)
+ elif self.deprecated:
+ formatter.write_paragraph()
+ with formatter.indentation():
+ formatter.write_text(DEPRECATED_HELP_NOTICE)
+
+ def format_options(self, ctx, formatter):
+ """Writes all the options into the formatter if they exist."""
+ opts = []
+ for param in self.get_params(ctx):
+ rv = param.get_help_record(ctx)
+ if rv is not None:
+ opts.append(rv)
+
+ if opts:
+ with formatter.section("Options"):
+ formatter.write_dl(opts)
+
+ def format_epilog(self, ctx, formatter):
+ """Writes the epilog into the formatter if it exists."""
+ if self.epilog:
+ formatter.write_paragraph()
+ with formatter.indentation():
+ formatter.write_text(self.epilog)
+
+ def parse_args(self, ctx, args):
+ if not args and self.no_args_is_help and not ctx.resilient_parsing:
+ echo(ctx.get_help(), color=ctx.color)
+ ctx.exit()
+
+ parser = self.make_parser(ctx)
+ opts, args, param_order = parser.parse_args(args=args)
+
+ for param in iter_params_for_processing(param_order, self.get_params(ctx)):
+ value, args = param.handle_parse_result(ctx, opts, args)
+
+ if args and not ctx.allow_extra_args and not ctx.resilient_parsing:
+ ctx.fail(
+ "Got unexpected extra argument{} ({})".format(
+ "s" if len(args) != 1 else "", " ".join(map(make_str, args))
+ )
+ )
+
+ ctx.args = args
+ return args
+
+ def invoke(self, ctx):
+ """Given a context, this invokes the attached callback (if it exists)
+ in the right way.
+ """
+ _maybe_show_deprecated_notice(self)
+ if self.callback is not None:
+ return ctx.invoke(self.callback, **ctx.params)
+
+
+class MultiCommand(Command):
+ """A multi command is the basic implementation of a command that
+ dispatches to subcommands. The most common version is the
+ :class:`Group`.
+
+ :param invoke_without_command: this controls how the multi command itself
+ is invoked. By default it's only invoked
+ if a subcommand is provided.
+ :param no_args_is_help: this controls what happens if no arguments are
+ provided. This option is enabled by default if
+ `invoke_without_command` is disabled or disabled
+ if it's enabled. If enabled this will add
+ ``--help`` as argument if no arguments are
+ passed.
+ :param subcommand_metavar: the string that is used in the documentation
+ to indicate the subcommand place.
+ :param chain: if this is set to `True` chaining of multiple subcommands
+ is enabled. This restricts the form of commands in that
+ they cannot have optional arguments but it allows
+ multiple commands to be chained together.
+ :param result_callback: the result callback to attach to this multi
+ command.
+ """
+
+ allow_extra_args = True
+ allow_interspersed_args = False
+
+ def __init__(
+ self,
+ name=None,
+ invoke_without_command=False,
+ no_args_is_help=None,
+ subcommand_metavar=None,
+ chain=False,
+ result_callback=None,
+ **attrs
+ ):
+ Command.__init__(self, name, **attrs)
+ if no_args_is_help is None:
+ no_args_is_help = not invoke_without_command
+ self.no_args_is_help = no_args_is_help
+ self.invoke_without_command = invoke_without_command
+ if subcommand_metavar is None:
+ if chain:
+ subcommand_metavar = SUBCOMMANDS_METAVAR
+ else:
+ subcommand_metavar = SUBCOMMAND_METAVAR
+ self.subcommand_metavar = subcommand_metavar
+ self.chain = chain
+ #: The result callback that is stored. This can be set or
+ #: overridden with the :func:`resultcallback` decorator.
+ self.result_callback = result_callback
+
+ if self.chain:
+ for param in self.params:
+ if isinstance(param, Argument) and not param.required:
+ raise RuntimeError(
+ "Multi commands in chain mode cannot have"
+ " optional arguments."
+ )
+
+ def collect_usage_pieces(self, ctx):
+ rv = Command.collect_usage_pieces(self, ctx)
+ rv.append(self.subcommand_metavar)
+ return rv
+
+ def format_options(self, ctx, formatter):
+ Command.format_options(self, ctx, formatter)
+ self.format_commands(ctx, formatter)
+
+ def resultcallback(self, replace=False):
+ """Adds a result callback to the chain command. By default if a
+ result callback is already registered this will chain them but
+ this can be disabled with the `replace` parameter. The result
+ callback is invoked with the return value of the subcommand
+ (or the list of return values from all subcommands if chaining
+ is enabled) as well as the parameters as they would be passed
+ to the main callback.
+
+ Example::
+
+ @click.group()
+ @click.option('-i', '--input', default=23)
+ def cli(input):
+ return 42
+
+ @cli.resultcallback()
+ def process_result(result, input):
+ return result + input
+
+ .. versionadded:: 3.0
+
+ :param replace: if set to `True` an already existing result
+ callback will be removed.
+ """
+
+ def decorator(f):
+ old_callback = self.result_callback
+ if old_callback is None or replace:
+ self.result_callback = f
+ return f
+
+ def function(__value, *args, **kwargs):
+ return f(old_callback(__value, *args, **kwargs), *args, **kwargs)
+
+ self.result_callback = rv = update_wrapper(function, f)
+ return rv
+
+ return decorator
+
+ def format_commands(self, ctx, formatter):
+ """Extra format methods for multi methods that adds all the commands
+ after the options.
+ """
+ commands = []
+ for subcommand in self.list_commands(ctx):
+ cmd = self.get_command(ctx, subcommand)
+ # What is this, the tool lied about a command. Ignore it
+ if cmd is None:
+ continue
+ if cmd.hidden:
+ continue
+
+ commands.append((subcommand, cmd))
+
+ # allow for 3 times the default spacing
+ if len(commands):
+ limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands)
+
+ rows = []
+ for subcommand, cmd in commands:
+ help = cmd.get_short_help_str(limit)
+ rows.append((subcommand, help))
+
+ if rows:
+ with formatter.section("Commands"):
+ formatter.write_dl(rows)
+
+ def parse_args(self, ctx, args):
+ if not args and self.no_args_is_help and not ctx.resilient_parsing:
+ echo(ctx.get_help(), color=ctx.color)
+ ctx.exit()
+
+ rest = Command.parse_args(self, ctx, args)
+ if self.chain:
+ ctx.protected_args = rest
+ ctx.args = []
+ elif rest:
+ ctx.protected_args, ctx.args = rest[:1], rest[1:]
+
+ return ctx.args
+
+ def invoke(self, ctx):
+ def _process_result(value):
+ if self.result_callback is not None:
+ value = ctx.invoke(self.result_callback, value, **ctx.params)
+ return value
+
+ if not ctx.protected_args:
+ # If we are invoked without command the chain flag controls
+ # how this happens. If we are not in chain mode, the return
+ # value here is the return value of the command.
+ # If however we are in chain mode, the return value is the
+ # return value of the result processor invoked with an empty
+ # list (which means that no subcommand actually was executed).
+ if self.invoke_without_command:
+ if not self.chain:
+ return Command.invoke(self, ctx)
+ with ctx:
+ Command.invoke(self, ctx)
+ return _process_result([])
+ ctx.fail("Missing command.")
+
+ # Fetch args back out
+ args = ctx.protected_args + ctx.args
+ ctx.args = []
+ ctx.protected_args = []
+
+ # If we're not in chain mode, we only allow the invocation of a
+ # single command but we also inform the current context about the
+ # name of the command to invoke.
+ if not self.chain:
+ # Make sure the context is entered so we do not clean up
+ # resources until the result processor has worked.
+ with ctx:
+ cmd_name, cmd, args = self.resolve_command(ctx, args)
+ ctx.invoked_subcommand = cmd_name
+ Command.invoke(self, ctx)
+ sub_ctx = cmd.make_context(cmd_name, args, parent=ctx)
+ with sub_ctx:
+ return _process_result(sub_ctx.command.invoke(sub_ctx))
+
+ # In chain mode we create the contexts step by step, but after the
+ # base command has been invoked. Because at that point we do not
+ # know the subcommands yet, the invoked subcommand attribute is
+ # set to ``*`` to inform the command that subcommands are executed
+ # but nothing else.
+ with ctx:
+ ctx.invoked_subcommand = "*" if args else None
+ Command.invoke(self, ctx)
+
+ # Otherwise we make every single context and invoke them in a
+ # chain. In that case the return value to the result processor
+ # is the list of all invoked subcommand's results.
+ contexts = []
+ while args:
+ cmd_name, cmd, args = self.resolve_command(ctx, args)
+ sub_ctx = cmd.make_context(
+ cmd_name,
+ args,
+ parent=ctx,
+ allow_extra_args=True,
+ allow_interspersed_args=False,
+ )
+ contexts.append(sub_ctx)
+ args, sub_ctx.args = sub_ctx.args, []
+
+ rv = []
+ for sub_ctx in contexts:
+ with sub_ctx:
+ rv.append(sub_ctx.command.invoke(sub_ctx))
+ return _process_result(rv)
+
+ def resolve_command(self, ctx, args):
+ cmd_name = make_str(args[0])
+ original_cmd_name = cmd_name
+
+ # Get the command
+ cmd = self.get_command(ctx, cmd_name)
+
+ # If we can't find the command but there is a normalization
+ # function available, we try with that one.
+ if cmd is None and ctx.token_normalize_func is not None:
+ cmd_name = ctx.token_normalize_func(cmd_name)
+ cmd = self.get_command(ctx, cmd_name)
+
+ # If we don't find the command we want to show an error message
+ # to the user that it was not provided. However, there is
+ # something else we should do: if the first argument looks like
+ # an option we want to kick off parsing again for arguments to
+ # resolve things like --help which now should go to the main
+ # place.
+ if cmd is None and not ctx.resilient_parsing:
+ if split_opt(cmd_name)[0]:
+ self.parse_args(ctx, ctx.args)
+ ctx.fail("No such command '{}'.".format(original_cmd_name))
+
+ return cmd_name, cmd, args[1:]
+
+ def get_command(self, ctx, cmd_name):
+ """Given a context and a command name, this returns a
+ :class:`Command` object if it exists or returns `None`.
+ """
+ raise NotImplementedError()
+
+ def list_commands(self, ctx):
+ """Returns a list of subcommand names in the order they should
+ appear.
+ """
+ return []
+
+
+class Group(MultiCommand):
+ """A group allows a command to have subcommands attached. This is the
+ most common way to implement nesting in Click.
+
+ :param commands: a dictionary of commands.
+ """
+
+ def __init__(self, name=None, commands=None, **attrs):
+ MultiCommand.__init__(self, name, **attrs)
+ #: the registered subcommands by their exported names.
+ self.commands = commands or {}
+
+ def add_command(self, cmd, name=None):
+ """Registers another :class:`Command` with this group. If the name
+ is not provided, the name of the command is used.
+ """
+ name = name or cmd.name
+ if name is None:
+ raise TypeError("Command has no name.")
+ _check_multicommand(self, name, cmd, register=True)
+ self.commands[name] = cmd
+
+ def command(self, *args, **kwargs):
+ """A shortcut decorator for declaring and attaching a command to
+ the group. This takes the same arguments as :func:`command` but
+ immediately registers the created command with this instance by
+ calling into :meth:`add_command`.
+ """
+ from .decorators import command
+
+ def decorator(f):
+ cmd = command(*args, **kwargs)(f)
+ self.add_command(cmd)
+ return cmd
+
+ return decorator
+
+ def group(self, *args, **kwargs):
+ """A shortcut decorator for declaring and attaching a group to
+ the group. This takes the same arguments as :func:`group` but
+ immediately registers the created command with this instance by
+ calling into :meth:`add_command`.
+ """
+ from .decorators import group
+
+ def decorator(f):
+ cmd = group(*args, **kwargs)(f)
+ self.add_command(cmd)
+ return cmd
+
+ return decorator
+
+ def get_command(self, ctx, cmd_name):
+ return self.commands.get(cmd_name)
+
+ def list_commands(self, ctx):
+ return sorted(self.commands)
+
+
+class CommandCollection(MultiCommand):
+ """A command collection is a multi command that merges multiple multi
+ commands together into one. This is a straightforward implementation
+ that accepts a list of different multi commands as sources and
+ provides all the commands for each of them.
+ """
+
+ def __init__(self, name=None, sources=None, **attrs):
+ MultiCommand.__init__(self, name, **attrs)
+ #: The list of registered multi commands.
+ self.sources = sources or []
+
+ def add_source(self, multi_cmd):
+ """Adds a new multi command to the chain dispatcher."""
+ self.sources.append(multi_cmd)
+
+ def get_command(self, ctx, cmd_name):
+ for source in self.sources:
+ rv = source.get_command(ctx, cmd_name)
+ if rv is not None:
+ if self.chain:
+ _check_multicommand(self, cmd_name, rv)
+ return rv
+
+ def list_commands(self, ctx):
+ rv = set()
+ for source in self.sources:
+ rv.update(source.list_commands(ctx))
+ return sorted(rv)
+
+
+class Parameter(object):
+ r"""A parameter to a command comes in two versions: they are either
+ :class:`Option`\s or :class:`Argument`\s. Other subclasses are currently
+ not supported by design as some of the internals for parsing are
+ intentionally not finalized.
+
+ Some settings are supported by both options and arguments.
+
+ :param param_decls: the parameter declarations for this option or
+ argument. This is a list of flags or argument
+ names.
+ :param type: the type that should be used. Either a :class:`ParamType`
+ or a Python type. The later is converted into the former
+ automatically if supported.
+ :param required: controls if this is optional or not.
+ :param default: the default value if omitted. This can also be a callable,
+ in which case it's invoked when the default is needed
+ without any arguments.
+ :param callback: a callback that should be executed after the parameter
+ was matched. This is called as ``fn(ctx, param,
+ value)`` and needs to return the value.
+ :param nargs: the number of arguments to match. If not ``1`` the return
+ value is a tuple instead of single value. The default for
+ nargs is ``1`` (except if the type is a tuple, then it's
+ the arity of the tuple).
+ :param metavar: how the value is represented in the help page.
+ :param expose_value: if this is `True` then the value is passed onwards
+ to the command callback and stored on the context,
+ otherwise it's skipped.
+ :param is_eager: eager values are processed before non eager ones. This
+ should not be set for arguments or it will inverse the
+ order of processing.
+ :param envvar: a string or list of strings that are environment variables
+ that should be checked.
+
+ .. versionchanged:: 7.1
+ Empty environment variables are ignored rather than taking the
+ empty string value. This makes it possible for scripts to clear
+ variables if they can't unset them.
+
+ .. versionchanged:: 2.0
+ Changed signature for parameter callback to also be passed the
+ parameter. The old callback format will still work, but it will
+ raise a warning to give you a chance to migrate the code easier.
+ """
+ param_type_name = "parameter"
+
+ def __init__(
+ self,
+ param_decls=None,
+ type=None,
+ required=False,
+ default=None,
+ callback=None,
+ nargs=None,
+ metavar=None,
+ expose_value=True,
+ is_eager=False,
+ envvar=None,
+ autocompletion=None,
+ ):
+ self.name, self.opts, self.secondary_opts = self._parse_decls(
+ param_decls or (), expose_value
+ )
+
+ self.type = convert_type(type, default)
+
+ # Default nargs to what the type tells us if we have that
+ # information available.
+ if nargs is None:
+ if self.type.is_composite:
+ nargs = self.type.arity
+ else:
+ nargs = 1
+
+ self.required = required
+ self.callback = callback
+ self.nargs = nargs
+ self.multiple = False
+ self.expose_value = expose_value
+ self.default = default
+ self.is_eager = is_eager
+ self.metavar = metavar
+ self.envvar = envvar
+ self.autocompletion = autocompletion
+
+ def __repr__(self):
+ return "<{} {}>".format(self.__class__.__name__, self.name)
+
+ @property
+ def human_readable_name(self):
+ """Returns the human readable name of this parameter. This is the
+ same as the name for options, but the metavar for arguments.
+ """
+ return self.name
+
+ def make_metavar(self):
+ if self.metavar is not None:
+ return self.metavar
+ metavar = self.type.get_metavar(self)
+ if metavar is None:
+ metavar = self.type.name.upper()
+ if self.nargs != 1:
+ metavar += "..."
+ return metavar
+
+ def get_default(self, ctx):
+ """Given a context variable this calculates the default value."""
+ # Otherwise go with the regular default.
+ if callable(self.default):
+ rv = self.default()
+ else:
+ rv = self.default
+ return self.type_cast_value(ctx, rv)
+
+ def add_to_parser(self, parser, ctx):
+ pass
+
+ def consume_value(self, ctx, opts):
+ value = opts.get(self.name)
+ if value is None:
+ value = self.value_from_envvar(ctx)
+ if value is None:
+ value = ctx.lookup_default(self.name)
+ return value
+
+ def type_cast_value(self, ctx, value):
+ """Given a value this runs it properly through the type system.
+ This automatically handles things like `nargs` and `multiple` as
+ well as composite types.
+ """
+ if self.type.is_composite:
+ if self.nargs <= 1:
+ raise TypeError(
+ "Attempted to invoke composite type but nargs has"
+ " been set to {}. This is not supported; nargs"
+ " needs to be set to a fixed value > 1.".format(self.nargs)
+ )
+ if self.multiple:
+ return tuple(self.type(x or (), self, ctx) for x in value or ())
+ return self.type(value or (), self, ctx)
+
+ def _convert(value, level):
+ if level == 0:
+ return self.type(value, self, ctx)
+ return tuple(_convert(x, level - 1) for x in value or ())
+
+ return _convert(value, (self.nargs != 1) + bool(self.multiple))
+
+ def process_value(self, ctx, value):
+ """Given a value and context this runs the logic to convert the
+ value as necessary.
+ """
+ # If the value we were given is None we do nothing. This way
+ # code that calls this can easily figure out if something was
+ # not provided. Otherwise it would be converted into an empty
+ # tuple for multiple invocations which is inconvenient.
+ if value is not None:
+ return self.type_cast_value(ctx, value)
+
+ def value_is_missing(self, value):
+ if value is None:
+ return True
+ if (self.nargs != 1 or self.multiple) and value == ():
+ return True
+ return False
+
+ def full_process_value(self, ctx, value):
+ value = self.process_value(ctx, value)
+
+ if value is None and not ctx.resilient_parsing:
+ value = self.get_default(ctx)
+
+ if self.required and self.value_is_missing(value):
+ raise MissingParameter(ctx=ctx, param=self)
+
+ return value
+
+ def resolve_envvar_value(self, ctx):
+ if self.envvar is None:
+ return
+ if isinstance(self.envvar, (tuple, list)):
+ for envvar in self.envvar:
+ rv = os.environ.get(envvar)
+ if rv is not None:
+ return rv
+ else:
+ rv = os.environ.get(self.envvar)
+
+ if rv != "":
+ return rv
+
+ def value_from_envvar(self, ctx):
+ rv = self.resolve_envvar_value(ctx)
+ if rv is not None and self.nargs != 1:
+ rv = self.type.split_envvar_value(rv)
+ return rv
+
+ def handle_parse_result(self, ctx, opts, args):
+ with augment_usage_errors(ctx, param=self):
+ value = self.consume_value(ctx, opts)
+ try:
+ value = self.full_process_value(ctx, value)
+ except Exception:
+ if not ctx.resilient_parsing:
+ raise
+ value = None
+ if self.callback is not None:
+ try:
+ value = invoke_param_callback(self.callback, ctx, self, value)
+ except Exception:
+ if not ctx.resilient_parsing:
+ raise
+
+ if self.expose_value:
+ ctx.params[self.name] = value
+ return value, args
+
+ def get_help_record(self, ctx):
+ pass
+
+ def get_usage_pieces(self, ctx):
+ return []
+
+ def get_error_hint(self, ctx):
+ """Get a stringified version of the param for use in error messages to
+ indicate which param caused the error.
+ """
+ hint_list = self.opts or [self.human_readable_name]
+ return " / ".join(repr(x) for x in hint_list)
+
+
+class Option(Parameter):
+ """Options are usually optional values on the command line and
+ have some extra features that arguments don't have.
+
+ All other parameters are passed onwards to the parameter constructor.
+
+ :param show_default: controls if the default value should be shown on the
+ help page. Normally, defaults are not shown. If this
+ value is a string, it shows the string instead of the
+ value. This is particularly useful for dynamic options.
+ :param show_envvar: controls if an environment variable should be shown on
+ the help page. Normally, environment variables
+ are not shown.
+ :param prompt: if set to `True` or a non empty string then the user will be
+ prompted for input. If set to `True` the prompt will be the
+ option name capitalized.
+ :param confirmation_prompt: if set then the value will need to be confirmed
+ if it was prompted for.
+ :param hide_input: if this is `True` then the input on the prompt will be
+ hidden from the user. This is useful for password
+ input.
+ :param is_flag: forces this option to act as a flag. The default is
+ auto detection.
+ :param flag_value: which value should be used for this flag if it's
+ enabled. This is set to a boolean automatically if
+ the option string contains a slash to mark two options.
+ :param multiple: if this is set to `True` then the argument is accepted
+ multiple times and recorded. This is similar to ``nargs``
+ in how it works but supports arbitrary number of
+ arguments.
+ :param count: this flag makes an option increment an integer.
+ :param allow_from_autoenv: if this is enabled then the value of this
+ parameter will be pulled from an environment
+ variable in case a prefix is defined on the
+ context.
+ :param help: the help string.
+ :param hidden: hide this option from help outputs.
+ """
+
+ param_type_name = "option"
+
+ def __init__(
+ self,
+ param_decls=None,
+ show_default=False,
+ prompt=False,
+ confirmation_prompt=False,
+ hide_input=False,
+ is_flag=None,
+ flag_value=None,
+ multiple=False,
+ count=False,
+ allow_from_autoenv=True,
+ type=None,
+ help=None,
+ hidden=False,
+ show_choices=True,
+ show_envvar=False,
+ **attrs
+ ):
+ default_is_missing = attrs.get("default", _missing) is _missing
+ Parameter.__init__(self, param_decls, type=type, **attrs)
+
+ if prompt is True:
+ prompt_text = self.name.replace("_", " ").capitalize()
+ elif prompt is False:
+ prompt_text = None
+ else:
+ prompt_text = prompt
+ self.prompt = prompt_text
+ self.confirmation_prompt = confirmation_prompt
+ self.hide_input = hide_input
+ self.hidden = hidden
+
+ # Flags
+ if is_flag is None:
+ if flag_value is not None:
+ is_flag = True
+ else:
+ is_flag = bool(self.secondary_opts)
+ if is_flag and default_is_missing:
+ self.default = False
+ if flag_value is None:
+ flag_value = not self.default
+ self.is_flag = is_flag
+ self.flag_value = flag_value
+ if self.is_flag and isinstance(self.flag_value, bool) and type in [None, bool]:
+ self.type = BOOL
+ self.is_bool_flag = True
+ else:
+ self.is_bool_flag = False
+
+ # Counting
+ self.count = count
+ if count:
+ if type is None:
+ self.type = IntRange(min=0)
+ if default_is_missing:
+ self.default = 0
+
+ self.multiple = multiple
+ self.allow_from_autoenv = allow_from_autoenv
+ self.help = help
+ self.show_default = show_default
+ self.show_choices = show_choices
+ self.show_envvar = show_envvar
+
+ # Sanity check for stuff we don't support
+ if __debug__:
+ if self.nargs < 0:
+ raise TypeError("Options cannot have nargs < 0")
+ if self.prompt and self.is_flag and not self.is_bool_flag:
+ raise TypeError("Cannot prompt for flags that are not bools.")
+ if not self.is_bool_flag and self.secondary_opts:
+ raise TypeError("Got secondary option for non boolean flag.")
+ if self.is_bool_flag and self.hide_input and self.prompt is not None:
+ raise TypeError("Hidden input does not work with boolean flag prompts.")
+ if self.count:
+ if self.multiple:
+ raise TypeError(
+ "Options cannot be multiple and count at the same time."
+ )
+ elif self.is_flag:
+ raise TypeError(
+ "Options cannot be count and flags at the same time."
+ )
+
+ def _parse_decls(self, decls, expose_value):
+ opts = []
+ secondary_opts = []
+ name = None
+ possible_names = []
+
+ for decl in decls:
+ if isidentifier(decl):
+ if name is not None:
+ raise TypeError("Name defined twice")
+ name = decl
+ else:
+ split_char = ";" if decl[:1] == "/" else "/"
+ if split_char in decl:
+ first, second = decl.split(split_char, 1)
+ first = first.rstrip()
+ if first:
+ possible_names.append(split_opt(first))
+ opts.append(first)
+ second = second.lstrip()
+ if second:
+ secondary_opts.append(second.lstrip())
+ else:
+ possible_names.append(split_opt(decl))
+ opts.append(decl)
+
+ if name is None and possible_names:
+ possible_names.sort(key=lambda x: -len(x[0])) # group long options first
+ name = possible_names[0][1].replace("-", "_").lower()
+ if not isidentifier(name):
+ name = None
+
+ if name is None:
+ if not expose_value:
+ return None, opts, secondary_opts
+ raise TypeError("Could not determine name for option")
+
+ if not opts and not secondary_opts:
+ raise TypeError(
+ "No options defined but a name was passed ({}). Did you"
+ " mean to declare an argument instead of an option?".format(name)
+ )
+
+ return name, opts, secondary_opts
+
+ def add_to_parser(self, parser, ctx):
+ kwargs = {
+ "dest": self.name,
+ "nargs": self.nargs,
+ "obj": self,
+ }
+
+ if self.multiple:
+ action = "append"
+ elif self.count:
+ action = "count"
+ else:
+ action = "store"
+
+ if self.is_flag:
+ kwargs.pop("nargs", None)
+ action_const = "{}_const".format(action)
+ if self.is_bool_flag and self.secondary_opts:
+ parser.add_option(self.opts, action=action_const, const=True, **kwargs)
+ parser.add_option(
+ self.secondary_opts, action=action_const, const=False, **kwargs
+ )
+ else:
+ parser.add_option(
+ self.opts, action=action_const, const=self.flag_value, **kwargs
+ )
+ else:
+ kwargs["action"] = action
+ parser.add_option(self.opts, **kwargs)
+
+ def get_help_record(self, ctx):
+ if self.hidden:
+ return
+ any_prefix_is_slash = []
+
+ def _write_opts(opts):
+ rv, any_slashes = join_options(opts)
+ if any_slashes:
+ any_prefix_is_slash[:] = [True]
+ if not self.is_flag and not self.count:
+ rv += " {}".format(self.make_metavar())
+ return rv
+
+ rv = [_write_opts(self.opts)]
+ if self.secondary_opts:
+ rv.append(_write_opts(self.secondary_opts))
+
+ help = self.help or ""
+ extra = []
+ if self.show_envvar:
+ envvar = self.envvar
+ if envvar is None:
+ if self.allow_from_autoenv and ctx.auto_envvar_prefix is not None:
+ envvar = "{}_{}".format(ctx.auto_envvar_prefix, self.name.upper())
+ if envvar is not None:
+ extra.append(
+ "env var: {}".format(
+ ", ".join(str(d) for d in envvar)
+ if isinstance(envvar, (list, tuple))
+ else envvar
+ )
+ )
+ if self.default is not None and (self.show_default or ctx.show_default):
+ if isinstance(self.show_default, string_types):
+ default_string = "({})".format(self.show_default)
+ elif isinstance(self.default, (list, tuple)):
+ default_string = ", ".join(str(d) for d in self.default)
+ elif inspect.isfunction(self.default):
+ default_string = "(dynamic)"
+ else:
+ default_string = self.default
+ extra.append("default: {}".format(default_string))
+
+ if self.required:
+ extra.append("required")
+ if extra:
+ help = "{}[{}]".format(
+ "{} ".format(help) if help else "", "; ".join(extra)
+ )
+
+ return ("; " if any_prefix_is_slash else " / ").join(rv), help
+
+ def get_default(self, ctx):
+ # If we're a non boolean flag our default is more complex because
+ # we need to look at all flags in the same group to figure out
+ # if we're the the default one in which case we return the flag
+ # value as default.
+ if self.is_flag and not self.is_bool_flag:
+ for param in ctx.command.params:
+ if param.name == self.name and param.default:
+ return param.flag_value
+ return None
+ return Parameter.get_default(self, ctx)
+
+ def prompt_for_value(self, ctx):
+ """This is an alternative flow that can be activated in the full
+ value processing if a value does not exist. It will prompt the
+ user until a valid value exists and then returns the processed
+ value as result.
+ """
+ # Calculate the default before prompting anything to be stable.
+ default = self.get_default(ctx)
+
+ # If this is a prompt for a flag we need to handle this
+ # differently.
+ if self.is_bool_flag:
+ return confirm(self.prompt, default)
+
+ return prompt(
+ self.prompt,
+ default=default,
+ type=self.type,
+ hide_input=self.hide_input,
+ show_choices=self.show_choices,
+ confirmation_prompt=self.confirmation_prompt,
+ value_proc=lambda x: self.process_value(ctx, x),
+ )
+
+ def resolve_envvar_value(self, ctx):
+ rv = Parameter.resolve_envvar_value(self, ctx)
+ if rv is not None:
+ return rv
+ if self.allow_from_autoenv and ctx.auto_envvar_prefix is not None:
+ envvar = "{}_{}".format(ctx.auto_envvar_prefix, self.name.upper())
+ return os.environ.get(envvar)
+
+ def value_from_envvar(self, ctx):
+ rv = self.resolve_envvar_value(ctx)
+ if rv is None:
+ return None
+ value_depth = (self.nargs != 1) + bool(self.multiple)
+ if value_depth > 0 and rv is not None:
+ rv = self.type.split_envvar_value(rv)
+ if self.multiple and self.nargs != 1:
+ rv = batch(rv, self.nargs)
+ return rv
+
+ def full_process_value(self, ctx, value):
+ if value is None and self.prompt is not None and not ctx.resilient_parsing:
+ return self.prompt_for_value(ctx)
+ return Parameter.full_process_value(self, ctx, value)
+
+
+class Argument(Parameter):
+ """Arguments are positional parameters to a command. They generally
+ provide fewer features than options but can have infinite ``nargs``
+ and are required by default.
+
+ All parameters are passed onwards to the parameter constructor.
+ """
+
+ param_type_name = "argument"
+
+ def __init__(self, param_decls, required=None, **attrs):
+ if required is None:
+ if attrs.get("default") is not None:
+ required = False
+ else:
+ required = attrs.get("nargs", 1) > 0
+ Parameter.__init__(self, param_decls, required=required, **attrs)
+ if self.default is not None and self.nargs < 0:
+ raise TypeError(
+ "nargs=-1 in combination with a default value is not supported."
+ )
+
+ @property
+ def human_readable_name(self):
+ if self.metavar is not None:
+ return self.metavar
+ return self.name.upper()
+
+ def make_metavar(self):
+ if self.metavar is not None:
+ return self.metavar
+ var = self.type.get_metavar(self)
+ if not var:
+ var = self.name.upper()
+ if not self.required:
+ var = "[{}]".format(var)
+ if self.nargs != 1:
+ var += "..."
+ return var
+
+ def _parse_decls(self, decls, expose_value):
+ if not decls:
+ if not expose_value:
+ return None, [], []
+ raise TypeError("Could not determine name for argument")
+ if len(decls) == 1:
+ name = arg = decls[0]
+ name = name.replace("-", "_").lower()
+ else:
+ raise TypeError(
+ "Arguments take exactly one parameter declaration, got"
+ " {}".format(len(decls))
+ )
+ return name, [arg], []
+
+ def get_usage_pieces(self, ctx):
+ return [self.make_metavar()]
+
+ def get_error_hint(self, ctx):
+ return repr(self.make_metavar())
+
+ def add_to_parser(self, parser, ctx):
+ parser.add_argument(dest=self.name, nargs=self.nargs, obj=self)
diff --git a/third_party/python/click/click/decorators.py b/third_party/python/click/click/decorators.py
new file mode 100644
index 0000000000..c7b5af6cc5
--- /dev/null
+++ b/third_party/python/click/click/decorators.py
@@ -0,0 +1,333 @@
+import inspect
+import sys
+from functools import update_wrapper
+
+from ._compat import iteritems
+from ._unicodefun import _check_for_unicode_literals
+from .core import Argument
+from .core import Command
+from .core import Group
+from .core import Option
+from .globals import get_current_context
+from .utils import echo
+
+
+def pass_context(f):
+ """Marks a callback as wanting to receive the current context
+ object as first argument.
+ """
+
+ def new_func(*args, **kwargs):
+ return f(get_current_context(), *args, **kwargs)
+
+ return update_wrapper(new_func, f)
+
+
+def pass_obj(f):
+ """Similar to :func:`pass_context`, but only pass the object on the
+ context onwards (:attr:`Context.obj`). This is useful if that object
+ represents the state of a nested system.
+ """
+
+ def new_func(*args, **kwargs):
+ return f(get_current_context().obj, *args, **kwargs)
+
+ return update_wrapper(new_func, f)
+
+
+def make_pass_decorator(object_type, ensure=False):
+ """Given an object type this creates a decorator that will work
+ similar to :func:`pass_obj` but instead of passing the object of the
+ current context, it will find the innermost context of type
+ :func:`object_type`.
+
+ This generates a decorator that works roughly like this::
+
+ from functools import update_wrapper
+
+ def decorator(f):
+ @pass_context
+ def new_func(ctx, *args, **kwargs):
+ obj = ctx.find_object(object_type)
+ return ctx.invoke(f, obj, *args, **kwargs)
+ return update_wrapper(new_func, f)
+ return decorator
+
+ :param object_type: the type of the object to pass.
+ :param ensure: if set to `True`, a new object will be created and
+ remembered on the context if it's not there yet.
+ """
+
+ def decorator(f):
+ def new_func(*args, **kwargs):
+ ctx = get_current_context()
+ if ensure:
+ obj = ctx.ensure_object(object_type)
+ else:
+ obj = ctx.find_object(object_type)
+ if obj is None:
+ raise RuntimeError(
+ "Managed to invoke callback without a context"
+ " object of type '{}' existing".format(object_type.__name__)
+ )
+ return ctx.invoke(f, obj, *args, **kwargs)
+
+ return update_wrapper(new_func, f)
+
+ return decorator
+
+
+def _make_command(f, name, attrs, cls):
+ if isinstance(f, Command):
+ raise TypeError("Attempted to convert a callback into a command twice.")
+ try:
+ params = f.__click_params__
+ params.reverse()
+ del f.__click_params__
+ except AttributeError:
+ params = []
+ help = attrs.get("help")
+ if help is None:
+ help = inspect.getdoc(f)
+ if isinstance(help, bytes):
+ help = help.decode("utf-8")
+ else:
+ help = inspect.cleandoc(help)
+ attrs["help"] = help
+ _check_for_unicode_literals()
+ return cls(
+ name=name or f.__name__.lower().replace("_", "-"),
+ callback=f,
+ params=params,
+ **attrs
+ )
+
+
+def command(name=None, cls=None, **attrs):
+ r"""Creates a new :class:`Command` and uses the decorated function as
+ callback. This will also automatically attach all decorated
+ :func:`option`\s and :func:`argument`\s as parameters to the command.
+
+ The name of the command defaults to the name of the function with
+ underscores replaced by dashes. If you want to change that, you can
+ pass the intended name as the first argument.
+
+ All keyword arguments are forwarded to the underlying command class.
+
+ Once decorated the function turns into a :class:`Command` instance
+ that can be invoked as a command line utility or be attached to a
+ command :class:`Group`.
+
+ :param name: the name of the command. This defaults to the function
+ name with underscores replaced by dashes.
+ :param cls: the command class to instantiate. This defaults to
+ :class:`Command`.
+ """
+ if cls is None:
+ cls = Command
+
+ def decorator(f):
+ cmd = _make_command(f, name, attrs, cls)
+ cmd.__doc__ = f.__doc__
+ return cmd
+
+ return decorator
+
+
+def group(name=None, **attrs):
+ """Creates a new :class:`Group` with a function as callback. This
+ works otherwise the same as :func:`command` just that the `cls`
+ parameter is set to :class:`Group`.
+ """
+ attrs.setdefault("cls", Group)
+ return command(name, **attrs)
+
+
+def _param_memo(f, param):
+ if isinstance(f, Command):
+ f.params.append(param)
+ else:
+ if not hasattr(f, "__click_params__"):
+ f.__click_params__ = []
+ f.__click_params__.append(param)
+
+
+def argument(*param_decls, **attrs):
+ """Attaches an argument to the command. All positional arguments are
+ passed as parameter declarations to :class:`Argument`; all keyword
+ arguments are forwarded unchanged (except ``cls``).
+ This is equivalent to creating an :class:`Argument` instance manually
+ and attaching it to the :attr:`Command.params` list.
+
+ :param cls: the argument class to instantiate. This defaults to
+ :class:`Argument`.
+ """
+
+ def decorator(f):
+ ArgumentClass = attrs.pop("cls", Argument)
+ _param_memo(f, ArgumentClass(param_decls, **attrs))
+ return f
+
+ return decorator
+
+
+def option(*param_decls, **attrs):
+ """Attaches an option to the command. All positional arguments are
+ passed as parameter declarations to :class:`Option`; all keyword
+ arguments are forwarded unchanged (except ``cls``).
+ This is equivalent to creating an :class:`Option` instance manually
+ and attaching it to the :attr:`Command.params` list.
+
+ :param cls: the option class to instantiate. This defaults to
+ :class:`Option`.
+ """
+
+ def decorator(f):
+ # Issue 926, copy attrs, so pre-defined options can re-use the same cls=
+ option_attrs = attrs.copy()
+
+ if "help" in option_attrs:
+ option_attrs["help"] = inspect.cleandoc(option_attrs["help"])
+ OptionClass = option_attrs.pop("cls", Option)
+ _param_memo(f, OptionClass(param_decls, **option_attrs))
+ return f
+
+ return decorator
+
+
+def confirmation_option(*param_decls, **attrs):
+ """Shortcut for confirmation prompts that can be ignored by passing
+ ``--yes`` as parameter.
+
+ This is equivalent to decorating a function with :func:`option` with
+ the following parameters::
+
+ def callback(ctx, param, value):
+ if not value:
+ ctx.abort()
+
+ @click.command()
+ @click.option('--yes', is_flag=True, callback=callback,
+ expose_value=False, prompt='Do you want to continue?')
+ def dropdb():
+ pass
+ """
+
+ def decorator(f):
+ def callback(ctx, param, value):
+ if not value:
+ ctx.abort()
+
+ attrs.setdefault("is_flag", True)
+ attrs.setdefault("callback", callback)
+ attrs.setdefault("expose_value", False)
+ attrs.setdefault("prompt", "Do you want to continue?")
+ attrs.setdefault("help", "Confirm the action without prompting.")
+ return option(*(param_decls or ("--yes",)), **attrs)(f)
+
+ return decorator
+
+
+def password_option(*param_decls, **attrs):
+ """Shortcut for password prompts.
+
+ This is equivalent to decorating a function with :func:`option` with
+ the following parameters::
+
+ @click.command()
+ @click.option('--password', prompt=True, confirmation_prompt=True,
+ hide_input=True)
+ def changeadmin(password):
+ pass
+ """
+
+ def decorator(f):
+ attrs.setdefault("prompt", True)
+ attrs.setdefault("confirmation_prompt", True)
+ attrs.setdefault("hide_input", True)
+ return option(*(param_decls or ("--password",)), **attrs)(f)
+
+ return decorator
+
+
+def version_option(version=None, *param_decls, **attrs):
+ """Adds a ``--version`` option which immediately ends the program
+ printing out the version number. This is implemented as an eager
+ option that prints the version and exits the program in the callback.
+
+ :param version: the version number to show. If not provided Click
+ attempts an auto discovery via setuptools.
+ :param prog_name: the name of the program (defaults to autodetection)
+ :param message: custom message to show instead of the default
+ (``'%(prog)s, version %(version)s'``)
+ :param others: everything else is forwarded to :func:`option`.
+ """
+ if version is None:
+ if hasattr(sys, "_getframe"):
+ module = sys._getframe(1).f_globals.get("__name__")
+ else:
+ module = ""
+
+ def decorator(f):
+ prog_name = attrs.pop("prog_name", None)
+ message = attrs.pop("message", "%(prog)s, version %(version)s")
+
+ def callback(ctx, param, value):
+ if not value or ctx.resilient_parsing:
+ return
+ prog = prog_name
+ if prog is None:
+ prog = ctx.find_root().info_name
+ ver = version
+ if ver is None:
+ try:
+ import pkg_resources
+ except ImportError:
+ pass
+ else:
+ for dist in pkg_resources.working_set:
+ scripts = dist.get_entry_map().get("console_scripts") or {}
+ for _, entry_point in iteritems(scripts):
+ if entry_point.module_name == module:
+ ver = dist.version
+ break
+ if ver is None:
+ raise RuntimeError("Could not determine version")
+ echo(message % {"prog": prog, "version": ver}, color=ctx.color)
+ ctx.exit()
+
+ attrs.setdefault("is_flag", True)
+ attrs.setdefault("expose_value", False)
+ attrs.setdefault("is_eager", True)
+ attrs.setdefault("help", "Show the version and exit.")
+ attrs["callback"] = callback
+ return option(*(param_decls or ("--version",)), **attrs)(f)
+
+ return decorator
+
+
+def help_option(*param_decls, **attrs):
+ """Adds a ``--help`` option which immediately ends the program
+ printing out the help page. This is usually unnecessary to add as
+ this is added by default to all commands unless suppressed.
+
+ Like :func:`version_option`, this is implemented as eager option that
+ prints in the callback and exits.
+
+ All arguments are forwarded to :func:`option`.
+ """
+
+ def decorator(f):
+ def callback(ctx, param, value):
+ if value and not ctx.resilient_parsing:
+ echo(ctx.get_help(), color=ctx.color)
+ ctx.exit()
+
+ attrs.setdefault("is_flag", True)
+ attrs.setdefault("expose_value", False)
+ attrs.setdefault("help", "Show this message and exit.")
+ attrs.setdefault("is_eager", True)
+ attrs["callback"] = callback
+ return option(*(param_decls or ("--help",)), **attrs)(f)
+
+ return decorator
diff --git a/third_party/python/click/click/exceptions.py b/third_party/python/click/click/exceptions.py
new file mode 100644
index 0000000000..592ee38f0d
--- /dev/null
+++ b/third_party/python/click/click/exceptions.py
@@ -0,0 +1,253 @@
+from ._compat import filename_to_ui
+from ._compat import get_text_stderr
+from ._compat import PY2
+from .utils import echo
+
+
+def _join_param_hints(param_hint):
+ if isinstance(param_hint, (tuple, list)):
+ return " / ".join(repr(x) for x in param_hint)
+ return param_hint
+
+
+class ClickException(Exception):
+ """An exception that Click can handle and show to the user."""
+
+ #: The exit code for this exception
+ exit_code = 1
+
+ def __init__(self, message):
+ ctor_msg = message
+ if PY2:
+ if ctor_msg is not None:
+ ctor_msg = ctor_msg.encode("utf-8")
+ Exception.__init__(self, ctor_msg)
+ self.message = message
+
+ def format_message(self):
+ return self.message
+
+ def __str__(self):
+ return self.message
+
+ if PY2:
+ __unicode__ = __str__
+
+ def __str__(self):
+ return self.message.encode("utf-8")
+
+ def show(self, file=None):
+ if file is None:
+ file = get_text_stderr()
+ echo("Error: {}".format(self.format_message()), file=file)
+
+
+class UsageError(ClickException):
+ """An internal exception that signals a usage error. This typically
+ aborts any further handling.
+
+ :param message: the error message to display.
+ :param ctx: optionally the context that caused this error. Click will
+ fill in the context automatically in some situations.
+ """
+
+ exit_code = 2
+
+ def __init__(self, message, ctx=None):
+ ClickException.__init__(self, message)
+ self.ctx = ctx
+ self.cmd = self.ctx.command if self.ctx else None
+
+ def show(self, file=None):
+ if file is None:
+ file = get_text_stderr()
+ color = None
+ hint = ""
+ if self.cmd is not None and self.cmd.get_help_option(self.ctx) is not None:
+ hint = "Try '{} {}' for help.\n".format(
+ self.ctx.command_path, self.ctx.help_option_names[0]
+ )
+ if self.ctx is not None:
+ color = self.ctx.color
+ echo("{}\n{}".format(self.ctx.get_usage(), hint), file=file, color=color)
+ echo("Error: {}".format(self.format_message()), file=file, color=color)
+
+
+class BadParameter(UsageError):
+ """An exception that formats out a standardized error message for a
+ bad parameter. This is useful when thrown from a callback or type as
+ Click will attach contextual information to it (for instance, which
+ parameter it is).
+
+ .. versionadded:: 2.0
+
+ :param param: the parameter object that caused this error. This can
+ be left out, and Click will attach this info itself
+ if possible.
+ :param param_hint: a string that shows up as parameter name. This
+ can be used as alternative to `param` in cases
+ where custom validation should happen. If it is
+ a string it's used as such, if it's a list then
+ each item is quoted and separated.
+ """
+
+ def __init__(self, message, ctx=None, param=None, param_hint=None):
+ UsageError.__init__(self, message, ctx)
+ self.param = param
+ self.param_hint = param_hint
+
+ def format_message(self):
+ if self.param_hint is not None:
+ param_hint = self.param_hint
+ elif self.param is not None:
+ param_hint = self.param.get_error_hint(self.ctx)
+ else:
+ return "Invalid value: {}".format(self.message)
+ param_hint = _join_param_hints(param_hint)
+
+ return "Invalid value for {}: {}".format(param_hint, self.message)
+
+
+class MissingParameter(BadParameter):
+ """Raised if click required an option or argument but it was not
+ provided when invoking the script.
+
+ .. versionadded:: 4.0
+
+ :param param_type: a string that indicates the type of the parameter.
+ The default is to inherit the parameter type from
+ the given `param`. Valid values are ``'parameter'``,
+ ``'option'`` or ``'argument'``.
+ """
+
+ def __init__(
+ self, message=None, ctx=None, param=None, param_hint=None, param_type=None
+ ):
+ BadParameter.__init__(self, message, ctx, param, param_hint)
+ self.param_type = param_type
+
+ def format_message(self):
+ if self.param_hint is not None:
+ param_hint = self.param_hint
+ elif self.param is not None:
+ param_hint = self.param.get_error_hint(self.ctx)
+ else:
+ param_hint = None
+ param_hint = _join_param_hints(param_hint)
+
+ param_type = self.param_type
+ if param_type is None and self.param is not None:
+ param_type = self.param.param_type_name
+
+ msg = self.message
+ if self.param is not None:
+ msg_extra = self.param.type.get_missing_message(self.param)
+ if msg_extra:
+ if msg:
+ msg += ". {}".format(msg_extra)
+ else:
+ msg = msg_extra
+
+ return "Missing {}{}{}{}".format(
+ param_type,
+ " {}".format(param_hint) if param_hint else "",
+ ". " if msg else ".",
+ msg or "",
+ )
+
+ def __str__(self):
+ if self.message is None:
+ param_name = self.param.name if self.param else None
+ return "missing parameter: {}".format(param_name)
+ else:
+ return self.message
+
+ if PY2:
+ __unicode__ = __str__
+
+ def __str__(self):
+ return self.__unicode__().encode("utf-8")
+
+
+class NoSuchOption(UsageError):
+ """Raised if click attempted to handle an option that does not
+ exist.
+
+ .. versionadded:: 4.0
+ """
+
+ def __init__(self, option_name, message=None, possibilities=None, ctx=None):
+ if message is None:
+ message = "no such option: {}".format(option_name)
+ UsageError.__init__(self, message, ctx)
+ self.option_name = option_name
+ self.possibilities = possibilities
+
+ def format_message(self):
+ bits = [self.message]
+ if self.possibilities:
+ if len(self.possibilities) == 1:
+ bits.append("Did you mean {}?".format(self.possibilities[0]))
+ else:
+ possibilities = sorted(self.possibilities)
+ bits.append("(Possible options: {})".format(", ".join(possibilities)))
+ return " ".join(bits)
+
+
+class BadOptionUsage(UsageError):
+ """Raised if an option is generally supplied but the use of the option
+ was incorrect. This is for instance raised if the number of arguments
+ for an option is not correct.
+
+ .. versionadded:: 4.0
+
+ :param option_name: the name of the option being used incorrectly.
+ """
+
+ def __init__(self, option_name, message, ctx=None):
+ UsageError.__init__(self, message, ctx)
+ self.option_name = option_name
+
+
+class BadArgumentUsage(UsageError):
+ """Raised if an argument is generally supplied but the use of the argument
+ was incorrect. This is for instance raised if the number of values
+ for an argument is not correct.
+
+ .. versionadded:: 6.0
+ """
+
+ def __init__(self, message, ctx=None):
+ UsageError.__init__(self, message, ctx)
+
+
+class FileError(ClickException):
+ """Raised if a file cannot be opened."""
+
+ def __init__(self, filename, hint=None):
+ ui_filename = filename_to_ui(filename)
+ if hint is None:
+ hint = "unknown error"
+ ClickException.__init__(self, hint)
+ self.ui_filename = ui_filename
+ self.filename = filename
+
+ def format_message(self):
+ return "Could not open file {}: {}".format(self.ui_filename, self.message)
+
+
+class Abort(RuntimeError):
+ """An internal signalling exception that signals Click to abort."""
+
+
+class Exit(RuntimeError):
+ """An exception that indicates that the application should exit with some
+ status code.
+
+ :param code: the status code to exit with.
+ """
+
+ __slots__ = ("exit_code",)
+
+ def __init__(self, code=0):
+ self.exit_code = code
diff --git a/third_party/python/click/click/formatting.py b/third_party/python/click/click/formatting.py
new file mode 100644
index 0000000000..319c7f6163
--- /dev/null
+++ b/third_party/python/click/click/formatting.py
@@ -0,0 +1,283 @@
+from contextlib import contextmanager
+
+from ._compat import term_len
+from .parser import split_opt
+from .termui import get_terminal_size
+
+# Can force a width. This is used by the test system
+FORCED_WIDTH = None
+
+
+def measure_table(rows):
+ widths = {}
+ for row in rows:
+ for idx, col in enumerate(row):
+ widths[idx] = max(widths.get(idx, 0), term_len(col))
+ return tuple(y for x, y in sorted(widths.items()))
+
+
+def iter_rows(rows, col_count):
+ for row in rows:
+ row = tuple(row)
+ yield row + ("",) * (col_count - len(row))
+
+
+def wrap_text(
+ text, width=78, initial_indent="", subsequent_indent="", preserve_paragraphs=False
+):
+ """A helper function that intelligently wraps text. By default, it
+ assumes that it operates on a single paragraph of text but if the
+ `preserve_paragraphs` parameter is provided it will intelligently
+ handle paragraphs (defined by two empty lines).
+
+ If paragraphs are handled, a paragraph can be prefixed with an empty
+ line containing the ``\\b`` character (``\\x08``) to indicate that
+ no rewrapping should happen in that block.
+
+ :param text: the text that should be rewrapped.
+ :param width: the maximum width for the text.
+ :param initial_indent: the initial indent that should be placed on the
+ first line as a string.
+ :param subsequent_indent: the indent string that should be placed on
+ each consecutive line.
+ :param preserve_paragraphs: if this flag is set then the wrapping will
+ intelligently handle paragraphs.
+ """
+ from ._textwrap import TextWrapper
+
+ text = text.expandtabs()
+ wrapper = TextWrapper(
+ width,
+ initial_indent=initial_indent,
+ subsequent_indent=subsequent_indent,
+ replace_whitespace=False,
+ )
+ if not preserve_paragraphs:
+ return wrapper.fill(text)
+
+ p = []
+ buf = []
+ indent = None
+
+ def _flush_par():
+ if not buf:
+ return
+ if buf[0].strip() == "\b":
+ p.append((indent or 0, True, "\n".join(buf[1:])))
+ else:
+ p.append((indent or 0, False, " ".join(buf)))
+ del buf[:]
+
+ for line in text.splitlines():
+ if not line:
+ _flush_par()
+ indent = None
+ else:
+ if indent is None:
+ orig_len = term_len(line)
+ line = line.lstrip()
+ indent = orig_len - term_len(line)
+ buf.append(line)
+ _flush_par()
+
+ rv = []
+ for indent, raw, text in p:
+ with wrapper.extra_indent(" " * indent):
+ if raw:
+ rv.append(wrapper.indent_only(text))
+ else:
+ rv.append(wrapper.fill(text))
+
+ return "\n\n".join(rv)
+
+
+class HelpFormatter(object):
+ """This class helps with formatting text-based help pages. It's
+ usually just needed for very special internal cases, but it's also
+ exposed so that developers can write their own fancy outputs.
+
+ At present, it always writes into memory.
+
+ :param indent_increment: the additional increment for each level.
+ :param width: the width for the text. This defaults to the terminal
+ width clamped to a maximum of 78.
+ """
+
+ def __init__(self, indent_increment=2, width=None, max_width=None):
+ self.indent_increment = indent_increment
+ if max_width is None:
+ max_width = 80
+ if width is None:
+ width = FORCED_WIDTH
+ if width is None:
+ width = max(min(get_terminal_size()[0], max_width) - 2, 50)
+ self.width = width
+ self.current_indent = 0
+ self.buffer = []
+
+ def write(self, string):
+ """Writes a unicode string into the internal buffer."""
+ self.buffer.append(string)
+
+ def indent(self):
+ """Increases the indentation."""
+ self.current_indent += self.indent_increment
+
+ def dedent(self):
+ """Decreases the indentation."""
+ self.current_indent -= self.indent_increment
+
+ def write_usage(self, prog, args="", prefix="Usage: "):
+ """Writes a usage line into the buffer.
+
+ :param prog: the program name.
+ :param args: whitespace separated list of arguments.
+ :param prefix: the prefix for the first line.
+ """
+ usage_prefix = "{:>{w}}{} ".format(prefix, prog, w=self.current_indent)
+ text_width = self.width - self.current_indent
+
+ if text_width >= (term_len(usage_prefix) + 20):
+ # The arguments will fit to the right of the prefix.
+ indent = " " * term_len(usage_prefix)
+ self.write(
+ wrap_text(
+ args,
+ text_width,
+ initial_indent=usage_prefix,
+ subsequent_indent=indent,
+ )
+ )
+ else:
+ # The prefix is too long, put the arguments on the next line.
+ self.write(usage_prefix)
+ self.write("\n")
+ indent = " " * (max(self.current_indent, term_len(prefix)) + 4)
+ self.write(
+ wrap_text(
+ args, text_width, initial_indent=indent, subsequent_indent=indent
+ )
+ )
+
+ self.write("\n")
+
+ def write_heading(self, heading):
+ """Writes a heading into the buffer."""
+ self.write("{:>{w}}{}:\n".format("", heading, w=self.current_indent))
+
+ def write_paragraph(self):
+ """Writes a paragraph into the buffer."""
+ if self.buffer:
+ self.write("\n")
+
+ def write_text(self, text):
+ """Writes re-indented text into the buffer. This rewraps and
+ preserves paragraphs.
+ """
+ text_width = max(self.width - self.current_indent, 11)
+ indent = " " * self.current_indent
+ self.write(
+ wrap_text(
+ text,
+ text_width,
+ initial_indent=indent,
+ subsequent_indent=indent,
+ preserve_paragraphs=True,
+ )
+ )
+ self.write("\n")
+
+ def write_dl(self, rows, col_max=30, col_spacing=2):
+ """Writes a definition list into the buffer. This is how options
+ and commands are usually formatted.
+
+ :param rows: a list of two item tuples for the terms and values.
+ :param col_max: the maximum width of the first column.
+ :param col_spacing: the number of spaces between the first and
+ second column.
+ """
+ rows = list(rows)
+ widths = measure_table(rows)
+ if len(widths) != 2:
+ raise TypeError("Expected two columns for definition list")
+
+ first_col = min(widths[0], col_max) + col_spacing
+
+ for first, second in iter_rows(rows, len(widths)):
+ self.write("{:>{w}}{}".format("", first, w=self.current_indent))
+ if not second:
+ self.write("\n")
+ continue
+ if term_len(first) <= first_col - col_spacing:
+ self.write(" " * (first_col - term_len(first)))
+ else:
+ self.write("\n")
+ self.write(" " * (first_col + self.current_indent))
+
+ text_width = max(self.width - first_col - 2, 10)
+ wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True)
+ lines = wrapped_text.splitlines()
+
+ if lines:
+ self.write("{}\n".format(lines[0]))
+
+ for line in lines[1:]:
+ self.write(
+ "{:>{w}}{}\n".format(
+ "", line, w=first_col + self.current_indent
+ )
+ )
+
+ if len(lines) > 1:
+ # separate long help from next option
+ self.write("\n")
+ else:
+ self.write("\n")
+
+ @contextmanager
+ def section(self, name):
+ """Helpful context manager that writes a paragraph, a heading,
+ and the indents.
+
+ :param name: the section name that is written as heading.
+ """
+ self.write_paragraph()
+ self.write_heading(name)
+ self.indent()
+ try:
+ yield
+ finally:
+ self.dedent()
+
+ @contextmanager
+ def indentation(self):
+ """A context manager that increases the indentation."""
+ self.indent()
+ try:
+ yield
+ finally:
+ self.dedent()
+
+ def getvalue(self):
+ """Returns the buffer contents."""
+ return "".join(self.buffer)
+
+
+def join_options(options):
+ """Given a list of option strings this joins them in the most appropriate
+ way and returns them in the form ``(formatted_string,
+ any_prefix_is_slash)`` where the second item in the tuple is a flag that
+ indicates if any of the option prefixes was a slash.
+ """
+ rv = []
+ any_prefix_is_slash = False
+ for opt in options:
+ prefix = split_opt(opt)[0]
+ if prefix == "/":
+ any_prefix_is_slash = True
+ rv.append((len(prefix), opt))
+
+ rv.sort(key=lambda x: x[0])
+
+ rv = ", ".join(x[1] for x in rv)
+ return rv, any_prefix_is_slash
diff --git a/third_party/python/click/click/globals.py b/third_party/python/click/click/globals.py
new file mode 100644
index 0000000000..1649f9a0bf
--- /dev/null
+++ b/third_party/python/click/click/globals.py
@@ -0,0 +1,47 @@
+from threading import local
+
+_local = local()
+
+
+def get_current_context(silent=False):
+ """Returns the current click context. This can be used as a way to
+ access the current context object from anywhere. This is a more implicit
+ alternative to the :func:`pass_context` decorator. This function is
+ primarily useful for helpers such as :func:`echo` which might be
+ interested in changing its behavior based on the current context.
+
+ To push the current context, :meth:`Context.scope` can be used.
+
+ .. versionadded:: 5.0
+
+ :param silent: if set to `True` the return value is `None` if no context
+ is available. The default behavior is to raise a
+ :exc:`RuntimeError`.
+ """
+ try:
+ return _local.stack[-1]
+ except (AttributeError, IndexError):
+ if not silent:
+ raise RuntimeError("There is no active click context.")
+
+
+def push_context(ctx):
+ """Pushes a new context to the current stack."""
+ _local.__dict__.setdefault("stack", []).append(ctx)
+
+
+def pop_context():
+ """Removes the top level from the stack."""
+ _local.stack.pop()
+
+
+def resolve_color_default(color=None):
+ """"Internal helper to get the default value of the color flag. If a
+ value is passed it's returned unchanged, otherwise it's looked up from
+ the current context.
+ """
+ if color is not None:
+ return color
+ ctx = get_current_context(silent=True)
+ if ctx is not None:
+ return ctx.color
diff --git a/third_party/python/click/click/parser.py b/third_party/python/click/click/parser.py
new file mode 100644
index 0000000000..f43ebfe9fc
--- /dev/null
+++ b/third_party/python/click/click/parser.py
@@ -0,0 +1,428 @@
+# -*- coding: utf-8 -*-
+"""
+This module started out as largely a copy paste from the stdlib's
+optparse module with the features removed that we do not need from
+optparse because we implement them in Click on a higher level (for
+instance type handling, help formatting and a lot more).
+
+The plan is to remove more and more from here over time.
+
+The reason this is a different module and not optparse from the stdlib
+is that there are differences in 2.x and 3.x about the error messages
+generated and optparse in the stdlib uses gettext for no good reason
+and might cause us issues.
+
+Click uses parts of optparse written by Gregory P. Ward and maintained
+by the Python Software Foundation. This is limited to code in parser.py.
+
+Copyright 2001-2006 Gregory P. Ward. All rights reserved.
+Copyright 2002-2006 Python Software Foundation. All rights reserved.
+"""
+import re
+from collections import deque
+
+from .exceptions import BadArgumentUsage
+from .exceptions import BadOptionUsage
+from .exceptions import NoSuchOption
+from .exceptions import UsageError
+
+
+def _unpack_args(args, nargs_spec):
+ """Given an iterable of arguments and an iterable of nargs specifications,
+ it returns a tuple with all the unpacked arguments at the first index
+ and all remaining arguments as the second.
+
+ The nargs specification is the number of arguments that should be consumed
+ or `-1` to indicate that this position should eat up all the remainders.
+
+ Missing items are filled with `None`.
+ """
+ args = deque(args)
+ nargs_spec = deque(nargs_spec)
+ rv = []
+ spos = None
+
+ def _fetch(c):
+ try:
+ if spos is None:
+ return c.popleft()
+ else:
+ return c.pop()
+ except IndexError:
+ return None
+
+ while nargs_spec:
+ nargs = _fetch(nargs_spec)
+ if nargs == 1:
+ rv.append(_fetch(args))
+ elif nargs > 1:
+ x = [_fetch(args) for _ in range(nargs)]
+ # If we're reversed, we're pulling in the arguments in reverse,
+ # so we need to turn them around.
+ if spos is not None:
+ x.reverse()
+ rv.append(tuple(x))
+ elif nargs < 0:
+ if spos is not None:
+ raise TypeError("Cannot have two nargs < 0")
+ spos = len(rv)
+ rv.append(None)
+
+ # spos is the position of the wildcard (star). If it's not `None`,
+ # we fill it with the remainder.
+ if spos is not None:
+ rv[spos] = tuple(args)
+ args = []
+ rv[spos + 1 :] = reversed(rv[spos + 1 :])
+
+ return tuple(rv), list(args)
+
+
+def _error_opt_args(nargs, opt):
+ if nargs == 1:
+ raise BadOptionUsage(opt, "{} option requires an argument".format(opt))
+ raise BadOptionUsage(opt, "{} option requires {} arguments".format(opt, nargs))
+
+
+def split_opt(opt):
+ first = opt[:1]
+ if first.isalnum():
+ return "", opt
+ if opt[1:2] == first:
+ return opt[:2], opt[2:]
+ return first, opt[1:]
+
+
+def normalize_opt(opt, ctx):
+ if ctx is None or ctx.token_normalize_func is None:
+ return opt
+ prefix, opt = split_opt(opt)
+ return prefix + ctx.token_normalize_func(opt)
+
+
+def split_arg_string(string):
+ """Given an argument string this attempts to split it into small parts."""
+ rv = []
+ for match in re.finditer(
+ r"('([^'\\]*(?:\\.[^'\\]*)*)'|\"([^\"\\]*(?:\\.[^\"\\]*)*)\"|\S+)\s*",
+ string,
+ re.S,
+ ):
+ arg = match.group().strip()
+ if arg[:1] == arg[-1:] and arg[:1] in "\"'":
+ arg = arg[1:-1].encode("ascii", "backslashreplace").decode("unicode-escape")
+ try:
+ arg = type(string)(arg)
+ except UnicodeError:
+ pass
+ rv.append(arg)
+ return rv
+
+
+class Option(object):
+ def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None):
+ self._short_opts = []
+ self._long_opts = []
+ self.prefixes = set()
+
+ for opt in opts:
+ prefix, value = split_opt(opt)
+ if not prefix:
+ raise ValueError("Invalid start character for option ({})".format(opt))
+ self.prefixes.add(prefix[0])
+ if len(prefix) == 1 and len(value) == 1:
+ self._short_opts.append(opt)
+ else:
+ self._long_opts.append(opt)
+ self.prefixes.add(prefix)
+
+ if action is None:
+ action = "store"
+
+ self.dest = dest
+ self.action = action
+ self.nargs = nargs
+ self.const = const
+ self.obj = obj
+
+ @property
+ def takes_value(self):
+ return self.action in ("store", "append")
+
+ def process(self, value, state):
+ if self.action == "store":
+ state.opts[self.dest] = value
+ elif self.action == "store_const":
+ state.opts[self.dest] = self.const
+ elif self.action == "append":
+ state.opts.setdefault(self.dest, []).append(value)
+ elif self.action == "append_const":
+ state.opts.setdefault(self.dest, []).append(self.const)
+ elif self.action == "count":
+ state.opts[self.dest] = state.opts.get(self.dest, 0) + 1
+ else:
+ raise ValueError("unknown action '{}'".format(self.action))
+ state.order.append(self.obj)
+
+
+class Argument(object):
+ def __init__(self, dest, nargs=1, obj=None):
+ self.dest = dest
+ self.nargs = nargs
+ self.obj = obj
+
+ def process(self, value, state):
+ if self.nargs > 1:
+ holes = sum(1 for x in value if x is None)
+ if holes == len(value):
+ value = None
+ elif holes != 0:
+ raise BadArgumentUsage(
+ "argument {} takes {} values".format(self.dest, self.nargs)
+ )
+ state.opts[self.dest] = value
+ state.order.append(self.obj)
+
+
+class ParsingState(object):
+ def __init__(self, rargs):
+ self.opts = {}
+ self.largs = []
+ self.rargs = rargs
+ self.order = []
+
+
+class OptionParser(object):
+ """The option parser is an internal class that is ultimately used to
+ parse options and arguments. It's modelled after optparse and brings
+ a similar but vastly simplified API. It should generally not be used
+ directly as the high level Click classes wrap it for you.
+
+ It's not nearly as extensible as optparse or argparse as it does not
+ implement features that are implemented on a higher level (such as
+ types or defaults).
+
+ :param ctx: optionally the :class:`~click.Context` where this parser
+ should go with.
+ """
+
+ def __init__(self, ctx=None):
+ #: The :class:`~click.Context` for this parser. This might be
+ #: `None` for some advanced use cases.
+ self.ctx = ctx
+ #: This controls how the parser deals with interspersed arguments.
+ #: If this is set to `False`, the parser will stop on the first
+ #: non-option. Click uses this to implement nested subcommands
+ #: safely.
+ self.allow_interspersed_args = True
+ #: This tells the parser how to deal with unknown options. By
+ #: default it will error out (which is sensible), but there is a
+ #: second mode where it will ignore it and continue processing
+ #: after shifting all the unknown options into the resulting args.
+ self.ignore_unknown_options = False
+ if ctx is not None:
+ self.allow_interspersed_args = ctx.allow_interspersed_args
+ self.ignore_unknown_options = ctx.ignore_unknown_options
+ self._short_opt = {}
+ self._long_opt = {}
+ self._opt_prefixes = {"-", "--"}
+ self._args = []
+
+ def add_option(self, opts, dest, action=None, nargs=1, const=None, obj=None):
+ """Adds a new option named `dest` to the parser. The destination
+ is not inferred (unlike with optparse) and needs to be explicitly
+ provided. Action can be any of ``store``, ``store_const``,
+ ``append``, ``appnd_const`` or ``count``.
+
+ The `obj` can be used to identify the option in the order list
+ that is returned from the parser.
+ """
+ if obj is None:
+ obj = dest
+ opts = [normalize_opt(opt, self.ctx) for opt in opts]
+ option = Option(opts, dest, action=action, nargs=nargs, const=const, obj=obj)
+ self._opt_prefixes.update(option.prefixes)
+ for opt in option._short_opts:
+ self._short_opt[opt] = option
+ for opt in option._long_opts:
+ self._long_opt[opt] = option
+
+ def add_argument(self, dest, nargs=1, obj=None):
+ """Adds a positional argument named `dest` to the parser.
+
+ The `obj` can be used to identify the option in the order list
+ that is returned from the parser.
+ """
+ if obj is None:
+ obj = dest
+ self._args.append(Argument(dest=dest, nargs=nargs, obj=obj))
+
+ def parse_args(self, args):
+ """Parses positional arguments and returns ``(values, args, order)``
+ for the parsed options and arguments as well as the leftover
+ arguments if there are any. The order is a list of objects as they
+ appear on the command line. If arguments appear multiple times they
+ will be memorized multiple times as well.
+ """
+ state = ParsingState(args)
+ try:
+ self._process_args_for_options(state)
+ self._process_args_for_args(state)
+ except UsageError:
+ if self.ctx is None or not self.ctx.resilient_parsing:
+ raise
+ return state.opts, state.largs, state.order
+
+ def _process_args_for_args(self, state):
+ pargs, args = _unpack_args(
+ state.largs + state.rargs, [x.nargs for x in self._args]
+ )
+
+ for idx, arg in enumerate(self._args):
+ arg.process(pargs[idx], state)
+
+ state.largs = args
+ state.rargs = []
+
+ def _process_args_for_options(self, state):
+ while state.rargs:
+ arg = state.rargs.pop(0)
+ arglen = len(arg)
+ # Double dashes always handled explicitly regardless of what
+ # prefixes are valid.
+ if arg == "--":
+ return
+ elif arg[:1] in self._opt_prefixes and arglen > 1:
+ self._process_opts(arg, state)
+ elif self.allow_interspersed_args:
+ state.largs.append(arg)
+ else:
+ state.rargs.insert(0, arg)
+ return
+
+ # Say this is the original argument list:
+ # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
+ # ^
+ # (we are about to process arg(i)).
+ #
+ # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
+ # [arg0, ..., arg(i-1)] (any options and their arguments will have
+ # been removed from largs).
+ #
+ # The while loop will usually consume 1 or more arguments per pass.
+ # If it consumes 1 (eg. arg is an option that takes no arguments),
+ # then after _process_arg() is done the situation is:
+ #
+ # largs = subset of [arg0, ..., arg(i)]
+ # rargs = [arg(i+1), ..., arg(N-1)]
+ #
+ # If allow_interspersed_args is false, largs will always be
+ # *empty* -- still a subset of [arg0, ..., arg(i-1)], but
+ # not a very interesting subset!
+
+ def _match_long_opt(self, opt, explicit_value, state):
+ if opt not in self._long_opt:
+ possibilities = [word for word in self._long_opt if word.startswith(opt)]
+ raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx)
+
+ option = self._long_opt[opt]
+ if option.takes_value:
+ # At this point it's safe to modify rargs by injecting the
+ # explicit value, because no exception is raised in this
+ # branch. This means that the inserted value will be fully
+ # consumed.
+ if explicit_value is not None:
+ state.rargs.insert(0, explicit_value)
+
+ nargs = option.nargs
+ if len(state.rargs) < nargs:
+ _error_opt_args(nargs, opt)
+ elif nargs == 1:
+ value = state.rargs.pop(0)
+ else:
+ value = tuple(state.rargs[:nargs])
+ del state.rargs[:nargs]
+
+ elif explicit_value is not None:
+ raise BadOptionUsage(opt, "{} option does not take a value".format(opt))
+
+ else:
+ value = None
+
+ option.process(value, state)
+
+ def _match_short_opt(self, arg, state):
+ stop = False
+ i = 1
+ prefix = arg[0]
+ unknown_options = []
+
+ for ch in arg[1:]:
+ opt = normalize_opt(prefix + ch, self.ctx)
+ option = self._short_opt.get(opt)
+ i += 1
+
+ if not option:
+ if self.ignore_unknown_options:
+ unknown_options.append(ch)
+ continue
+ raise NoSuchOption(opt, ctx=self.ctx)
+ if option.takes_value:
+ # Any characters left in arg? Pretend they're the
+ # next arg, and stop consuming characters of arg.
+ if i < len(arg):
+ state.rargs.insert(0, arg[i:])
+ stop = True
+
+ nargs = option.nargs
+ if len(state.rargs) < nargs:
+ _error_opt_args(nargs, opt)
+ elif nargs == 1:
+ value = state.rargs.pop(0)
+ else:
+ value = tuple(state.rargs[:nargs])
+ del state.rargs[:nargs]
+
+ else:
+ value = None
+
+ option.process(value, state)
+
+ if stop:
+ break
+
+ # If we got any unknown options we re-combinate the string of the
+ # remaining options and re-attach the prefix, then report that
+ # to the state as new larg. This way there is basic combinatorics
+ # that can be achieved while still ignoring unknown arguments.
+ if self.ignore_unknown_options and unknown_options:
+ state.largs.append("{}{}".format(prefix, "".join(unknown_options)))
+
+ def _process_opts(self, arg, state):
+ explicit_value = None
+ # Long option handling happens in two parts. The first part is
+ # supporting explicitly attached values. In any case, we will try
+ # to long match the option first.
+ if "=" in arg:
+ long_opt, explicit_value = arg.split("=", 1)
+ else:
+ long_opt = arg
+ norm_long_opt = normalize_opt(long_opt, self.ctx)
+
+ # At this point we will match the (assumed) long option through
+ # the long option matching code. Note that this allows options
+ # like "-foo" to be matched as long options.
+ try:
+ self._match_long_opt(norm_long_opt, explicit_value, state)
+ except NoSuchOption:
+ # At this point the long option matching failed, and we need
+ # to try with short options. However there is a special rule
+ # which says, that if we have a two character options prefix
+ # (applies to "--foo" for instance), we do not dispatch to the
+ # short option code and will instead raise the no option
+ # error.
+ if arg[:2] not in self._opt_prefixes:
+ return self._match_short_opt(arg, state)
+ if not self.ignore_unknown_options:
+ raise
+ state.largs.append(arg)
diff --git a/third_party/python/click/click/termui.py b/third_party/python/click/click/termui.py
new file mode 100644
index 0000000000..02ef9e9f04
--- /dev/null
+++ b/third_party/python/click/click/termui.py
@@ -0,0 +1,681 @@
+import inspect
+import io
+import itertools
+import os
+import struct
+import sys
+
+from ._compat import DEFAULT_COLUMNS
+from ._compat import get_winterm_size
+from ._compat import isatty
+from ._compat import raw_input
+from ._compat import string_types
+from ._compat import strip_ansi
+from ._compat import text_type
+from ._compat import WIN
+from .exceptions import Abort
+from .exceptions import UsageError
+from .globals import resolve_color_default
+from .types import Choice
+from .types import convert_type
+from .types import Path
+from .utils import echo
+from .utils import LazyFile
+
+# The prompt functions to use. The doc tools currently override these
+# functions to customize how they work.
+visible_prompt_func = raw_input
+
+_ansi_colors = {
+ "black": 30,
+ "red": 31,
+ "green": 32,
+ "yellow": 33,
+ "blue": 34,
+ "magenta": 35,
+ "cyan": 36,
+ "white": 37,
+ "reset": 39,
+ "bright_black": 90,
+ "bright_red": 91,
+ "bright_green": 92,
+ "bright_yellow": 93,
+ "bright_blue": 94,
+ "bright_magenta": 95,
+ "bright_cyan": 96,
+ "bright_white": 97,
+}
+_ansi_reset_all = "\033[0m"
+
+
+def hidden_prompt_func(prompt):
+ import getpass
+
+ return getpass.getpass(prompt)
+
+
+def _build_prompt(
+ text, suffix, show_default=False, default=None, show_choices=True, type=None
+):
+ prompt = text
+ if type is not None and show_choices and isinstance(type, Choice):
+ prompt += " ({})".format(", ".join(map(str, type.choices)))
+ if default is not None and show_default:
+ prompt = "{} [{}]".format(prompt, _format_default(default))
+ return prompt + suffix
+
+
+def _format_default(default):
+ if isinstance(default, (io.IOBase, LazyFile)) and hasattr(default, "name"):
+ return default.name
+
+ return default
+
+
+def prompt(
+ text,
+ default=None,
+ hide_input=False,
+ confirmation_prompt=False,
+ type=None,
+ value_proc=None,
+ prompt_suffix=": ",
+ show_default=True,
+ err=False,
+ show_choices=True,
+):
+ """Prompts a user for input. This is a convenience function that can
+ be used to prompt a user for input later.
+
+ If the user aborts the input by sending a interrupt signal, this
+ function will catch it and raise a :exc:`Abort` exception.
+
+ .. versionadded:: 7.0
+ Added the show_choices parameter.
+
+ .. versionadded:: 6.0
+ Added unicode support for cmd.exe on Windows.
+
+ .. versionadded:: 4.0
+ Added the `err` parameter.
+
+ :param text: the text to show for the prompt.
+ :param default: the default value to use if no input happens. If this
+ is not given it will prompt until it's aborted.
+ :param hide_input: if this is set to true then the input value will
+ be hidden.
+ :param confirmation_prompt: asks for confirmation for the value.
+ :param type: the type to use to check the value against.
+ :param value_proc: if this parameter is provided it's a function that
+ is invoked instead of the type conversion to
+ convert a value.
+ :param prompt_suffix: a suffix that should be added to the prompt.
+ :param show_default: shows or hides the default value in the prompt.
+ :param err: if set to true the file defaults to ``stderr`` instead of
+ ``stdout``, the same as with echo.
+ :param show_choices: Show or hide choices if the passed type is a Choice.
+ For example if type is a Choice of either day or week,
+ show_choices is true and text is "Group by" then the
+ prompt will be "Group by (day, week): ".
+ """
+ result = None
+
+ def prompt_func(text):
+ f = hidden_prompt_func if hide_input else visible_prompt_func
+ try:
+ # Write the prompt separately so that we get nice
+ # coloring through colorama on Windows
+ echo(text, nl=False, err=err)
+ return f("")
+ except (KeyboardInterrupt, EOFError):
+ # getpass doesn't print a newline if the user aborts input with ^C.
+ # Allegedly this behavior is inherited from getpass(3).
+ # A doc bug has been filed at https://bugs.python.org/issue24711
+ if hide_input:
+ echo(None, err=err)
+ raise Abort()
+
+ if value_proc is None:
+ value_proc = convert_type(type, default)
+
+ prompt = _build_prompt(
+ text, prompt_suffix, show_default, default, show_choices, type
+ )
+
+ while 1:
+ while 1:
+ value = prompt_func(prompt)
+ if value:
+ break
+ elif default is not None:
+ if isinstance(value_proc, Path):
+ # validate Path default value(exists, dir_okay etc.)
+ value = default
+ break
+ return default
+ try:
+ result = value_proc(value)
+ except UsageError as e:
+ echo("Error: {}".format(e.message), err=err) # noqa: B306
+ continue
+ if not confirmation_prompt:
+ return result
+ while 1:
+ value2 = prompt_func("Repeat for confirmation: ")
+ if value2:
+ break
+ if value == value2:
+ return result
+ echo("Error: the two entered values do not match", err=err)
+
+
+def confirm(
+ text, default=False, abort=False, prompt_suffix=": ", show_default=True, err=False
+):
+ """Prompts for confirmation (yes/no question).
+
+ If the user aborts the input by sending a interrupt signal this
+ function will catch it and raise a :exc:`Abort` exception.
+
+ .. versionadded:: 4.0
+ Added the `err` parameter.
+
+ :param text: the question to ask.
+ :param default: the default for the prompt.
+ :param abort: if this is set to `True` a negative answer aborts the
+ exception by raising :exc:`Abort`.
+ :param prompt_suffix: a suffix that should be added to the prompt.
+ :param show_default: shows or hides the default value in the prompt.
+ :param err: if set to true the file defaults to ``stderr`` instead of
+ ``stdout``, the same as with echo.
+ """
+ prompt = _build_prompt(
+ text, prompt_suffix, show_default, "Y/n" if default else "y/N"
+ )
+ while 1:
+ try:
+ # Write the prompt separately so that we get nice
+ # coloring through colorama on Windows
+ echo(prompt, nl=False, err=err)
+ value = visible_prompt_func("").lower().strip()
+ except (KeyboardInterrupt, EOFError):
+ raise Abort()
+ if value in ("y", "yes"):
+ rv = True
+ elif value in ("n", "no"):
+ rv = False
+ elif value == "":
+ rv = default
+ else:
+ echo("Error: invalid input", err=err)
+ continue
+ break
+ if abort and not rv:
+ raise Abort()
+ return rv
+
+
+def get_terminal_size():
+ """Returns the current size of the terminal as tuple in the form
+ ``(width, height)`` in columns and rows.
+ """
+ # If shutil has get_terminal_size() (Python 3.3 and later) use that
+ if sys.version_info >= (3, 3):
+ import shutil
+
+ shutil_get_terminal_size = getattr(shutil, "get_terminal_size", None)
+ if shutil_get_terminal_size:
+ sz = shutil_get_terminal_size()
+ return sz.columns, sz.lines
+
+ # We provide a sensible default for get_winterm_size() when being invoked
+ # inside a subprocess. Without this, it would not provide a useful input.
+ if get_winterm_size is not None:
+ size = get_winterm_size()
+ if size == (0, 0):
+ return (79, 24)
+ else:
+ return size
+
+ def ioctl_gwinsz(fd):
+ try:
+ import fcntl
+ import termios
+
+ cr = struct.unpack("hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234"))
+ except Exception:
+ return
+ return cr
+
+ cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
+ if not cr:
+ try:
+ fd = os.open(os.ctermid(), os.O_RDONLY)
+ try:
+ cr = ioctl_gwinsz(fd)
+ finally:
+ os.close(fd)
+ except Exception:
+ pass
+ if not cr or not cr[0] or not cr[1]:
+ cr = (os.environ.get("LINES", 25), os.environ.get("COLUMNS", DEFAULT_COLUMNS))
+ return int(cr[1]), int(cr[0])
+
+
+def echo_via_pager(text_or_generator, color=None):
+ """This function takes a text and shows it via an environment specific
+ pager on stdout.
+
+ .. versionchanged:: 3.0
+ Added the `color` flag.
+
+ :param text_or_generator: the text to page, or alternatively, a
+ generator emitting the text to page.
+ :param color: controls if the pager supports ANSI colors or not. The
+ default is autodetection.
+ """
+ color = resolve_color_default(color)
+
+ if inspect.isgeneratorfunction(text_or_generator):
+ i = text_or_generator()
+ elif isinstance(text_or_generator, string_types):
+ i = [text_or_generator]
+ else:
+ i = iter(text_or_generator)
+
+ # convert every element of i to a text type if necessary
+ text_generator = (el if isinstance(el, string_types) else text_type(el) for el in i)
+
+ from ._termui_impl import pager
+
+ return pager(itertools.chain(text_generator, "\n"), color)
+
+
+def progressbar(
+ iterable=None,
+ length=None,
+ label=None,
+ show_eta=True,
+ show_percent=None,
+ show_pos=False,
+ item_show_func=None,
+ fill_char="#",
+ empty_char="-",
+ bar_template="%(label)s [%(bar)s] %(info)s",
+ info_sep=" ",
+ width=36,
+ file=None,
+ color=None,
+):
+ """This function creates an iterable context manager that can be used
+ to iterate over something while showing a progress bar. It will
+ either iterate over the `iterable` or `length` items (that are counted
+ up). While iteration happens, this function will print a rendered
+ progress bar to the given `file` (defaults to stdout) and will attempt
+ to calculate remaining time and more. By default, this progress bar
+ will not be rendered if the file is not a terminal.
+
+ The context manager creates the progress bar. When the context
+ manager is entered the progress bar is already created. With every
+ iteration over the progress bar, the iterable passed to the bar is
+ advanced and the bar is updated. When the context manager exits,
+ a newline is printed and the progress bar is finalized on screen.
+
+ Note: The progress bar is currently designed for use cases where the
+ total progress can be expected to take at least several seconds.
+ Because of this, the ProgressBar class object won't display
+ progress that is considered too fast, and progress where the time
+ between steps is less than a second.
+
+ No printing must happen or the progress bar will be unintentionally
+ destroyed.
+
+ Example usage::
+
+ with progressbar(items) as bar:
+ for item in bar:
+ do_something_with(item)
+
+ Alternatively, if no iterable is specified, one can manually update the
+ progress bar through the `update()` method instead of directly
+ iterating over the progress bar. The update method accepts the number
+ of steps to increment the bar with::
+
+ with progressbar(length=chunks.total_bytes) as bar:
+ for chunk in chunks:
+ process_chunk(chunk)
+ bar.update(chunks.bytes)
+
+ .. versionadded:: 2.0
+
+ .. versionadded:: 4.0
+ Added the `color` parameter. Added a `update` method to the
+ progressbar object.
+
+ :param iterable: an iterable to iterate over. If not provided the length
+ is required.
+ :param length: the number of items to iterate over. By default the
+ progressbar will attempt to ask the iterator about its
+ length, which might or might not work. If an iterable is
+ also provided this parameter can be used to override the
+ length. If an iterable is not provided the progress bar
+ will iterate over a range of that length.
+ :param label: the label to show next to the progress bar.
+ :param show_eta: enables or disables the estimated time display. This is
+ automatically disabled if the length cannot be
+ determined.
+ :param show_percent: enables or disables the percentage display. The
+ default is `True` if the iterable has a length or
+ `False` if not.
+ :param show_pos: enables or disables the absolute position display. The
+ default is `False`.
+ :param item_show_func: a function called with the current item which
+ can return a string to show the current item
+ next to the progress bar. Note that the current
+ item can be `None`!
+ :param fill_char: the character to use to show the filled part of the
+ progress bar.
+ :param empty_char: the character to use to show the non-filled part of
+ the progress bar.
+ :param bar_template: the format string to use as template for the bar.
+ The parameters in it are ``label`` for the label,
+ ``bar`` for the progress bar and ``info`` for the
+ info section.
+ :param info_sep: the separator between multiple info items (eta etc.)
+ :param width: the width of the progress bar in characters, 0 means full
+ terminal width
+ :param file: the file to write to. If this is not a terminal then
+ only the label is printed.
+ :param color: controls if the terminal supports ANSI colors or not. The
+ default is autodetection. This is only needed if ANSI
+ codes are included anywhere in the progress bar output
+ which is not the case by default.
+ """
+ from ._termui_impl import ProgressBar
+
+ color = resolve_color_default(color)
+ return ProgressBar(
+ iterable=iterable,
+ length=length,
+ show_eta=show_eta,
+ show_percent=show_percent,
+ show_pos=show_pos,
+ item_show_func=item_show_func,
+ fill_char=fill_char,
+ empty_char=empty_char,
+ bar_template=bar_template,
+ info_sep=info_sep,
+ file=file,
+ label=label,
+ width=width,
+ color=color,
+ )
+
+
+def clear():
+ """Clears the terminal screen. This will have the effect of clearing
+ the whole visible space of the terminal and moving the cursor to the
+ top left. This does not do anything if not connected to a terminal.
+
+ .. versionadded:: 2.0
+ """
+ if not isatty(sys.stdout):
+ return
+ # If we're on Windows and we don't have colorama available, then we
+ # clear the screen by shelling out. Otherwise we can use an escape
+ # sequence.
+ if WIN:
+ os.system("cls")
+ else:
+ sys.stdout.write("\033[2J\033[1;1H")
+
+
+def style(
+ text,
+ fg=None,
+ bg=None,
+ bold=None,
+ dim=None,
+ underline=None,
+ blink=None,
+ reverse=None,
+ reset=True,
+):
+ """Styles a text with ANSI styles and returns the new string. By
+ default the styling is self contained which means that at the end
+ of the string a reset code is issued. This can be prevented by
+ passing ``reset=False``.
+
+ Examples::
+
+ click.echo(click.style('Hello World!', fg='green'))
+ click.echo(click.style('ATTENTION!', blink=True))
+ click.echo(click.style('Some things', reverse=True, fg='cyan'))
+
+ Supported color names:
+
+ * ``black`` (might be a gray)
+ * ``red``
+ * ``green``
+ * ``yellow`` (might be an orange)
+ * ``blue``
+ * ``magenta``
+ * ``cyan``
+ * ``white`` (might be light gray)
+ * ``bright_black``
+ * ``bright_red``
+ * ``bright_green``
+ * ``bright_yellow``
+ * ``bright_blue``
+ * ``bright_magenta``
+ * ``bright_cyan``
+ * ``bright_white``
+ * ``reset`` (reset the color code only)
+
+ .. versionadded:: 2.0
+
+ .. versionadded:: 7.0
+ Added support for bright colors.
+
+ :param text: the string to style with ansi codes.
+ :param fg: if provided this will become the foreground color.
+ :param bg: if provided this will become the background color.
+ :param bold: if provided this will enable or disable bold mode.
+ :param dim: if provided this will enable or disable dim mode. This is
+ badly supported.
+ :param underline: if provided this will enable or disable underline.
+ :param blink: if provided this will enable or disable blinking.
+ :param reverse: if provided this will enable or disable inverse
+ rendering (foreground becomes background and the
+ other way round).
+ :param reset: by default a reset-all code is added at the end of the
+ string which means that styles do not carry over. This
+ can be disabled to compose styles.
+ """
+ bits = []
+ if fg:
+ try:
+ bits.append("\033[{}m".format(_ansi_colors[fg]))
+ except KeyError:
+ raise TypeError("Unknown color '{}'".format(fg))
+ if bg:
+ try:
+ bits.append("\033[{}m".format(_ansi_colors[bg] + 10))
+ except KeyError:
+ raise TypeError("Unknown color '{}'".format(bg))
+ if bold is not None:
+ bits.append("\033[{}m".format(1 if bold else 22))
+ if dim is not None:
+ bits.append("\033[{}m".format(2 if dim else 22))
+ if underline is not None:
+ bits.append("\033[{}m".format(4 if underline else 24))
+ if blink is not None:
+ bits.append("\033[{}m".format(5 if blink else 25))
+ if reverse is not None:
+ bits.append("\033[{}m".format(7 if reverse else 27))
+ bits.append(text)
+ if reset:
+ bits.append(_ansi_reset_all)
+ return "".join(bits)
+
+
+def unstyle(text):
+ """Removes ANSI styling information from a string. Usually it's not
+ necessary to use this function as Click's echo function will
+ automatically remove styling if necessary.
+
+ .. versionadded:: 2.0
+
+ :param text: the text to remove style information from.
+ """
+ return strip_ansi(text)
+
+
+def secho(message=None, file=None, nl=True, err=False, color=None, **styles):
+ """This function combines :func:`echo` and :func:`style` into one
+ call. As such the following two calls are the same::
+
+ click.secho('Hello World!', fg='green')
+ click.echo(click.style('Hello World!', fg='green'))
+
+ All keyword arguments are forwarded to the underlying functions
+ depending on which one they go with.
+
+ .. versionadded:: 2.0
+ """
+ if message is not None:
+ message = style(message, **styles)
+ return echo(message, file=file, nl=nl, err=err, color=color)
+
+
+def edit(
+ text=None, editor=None, env=None, require_save=True, extension=".txt", filename=None
+):
+ r"""Edits the given text in the defined editor. If an editor is given
+ (should be the full path to the executable but the regular operating
+ system search path is used for finding the executable) it overrides
+ the detected editor. Optionally, some environment variables can be
+ used. If the editor is closed without changes, `None` is returned. In
+ case a file is edited directly the return value is always `None` and
+ `require_save` and `extension` are ignored.
+
+ If the editor cannot be opened a :exc:`UsageError` is raised.
+
+ Note for Windows: to simplify cross-platform usage, the newlines are
+ automatically converted from POSIX to Windows and vice versa. As such,
+ the message here will have ``\n`` as newline markers.
+
+ :param text: the text to edit.
+ :param editor: optionally the editor to use. Defaults to automatic
+ detection.
+ :param env: environment variables to forward to the editor.
+ :param require_save: if this is true, then not saving in the editor
+ will make the return value become `None`.
+ :param extension: the extension to tell the editor about. This defaults
+ to `.txt` but changing this might change syntax
+ highlighting.
+ :param filename: if provided it will edit this file instead of the
+ provided text contents. It will not use a temporary
+ file as an indirection in that case.
+ """
+ from ._termui_impl import Editor
+
+ editor = Editor(
+ editor=editor, env=env, require_save=require_save, extension=extension
+ )
+ if filename is None:
+ return editor.edit(text)
+ editor.edit_file(filename)
+
+
+def launch(url, wait=False, locate=False):
+ """This function launches the given URL (or filename) in the default
+ viewer application for this file type. If this is an executable, it
+ might launch the executable in a new session. The return value is
+ the exit code of the launched application. Usually, ``0`` indicates
+ success.
+
+ Examples::
+
+ click.launch('https://click.palletsprojects.com/')
+ click.launch('/my/downloaded/file', locate=True)
+
+ .. versionadded:: 2.0
+
+ :param url: URL or filename of the thing to launch.
+ :param wait: waits for the program to stop.
+ :param locate: if this is set to `True` then instead of launching the
+ application associated with the URL it will attempt to
+ launch a file manager with the file located. This
+ might have weird effects if the URL does not point to
+ the filesystem.
+ """
+ from ._termui_impl import open_url
+
+ return open_url(url, wait=wait, locate=locate)
+
+
+# If this is provided, getchar() calls into this instead. This is used
+# for unittesting purposes.
+_getchar = None
+
+
+def getchar(echo=False):
+ """Fetches a single character from the terminal and returns it. This
+ will always return a unicode character and under certain rare
+ circumstances this might return more than one character. The
+ situations which more than one character is returned is when for
+ whatever reason multiple characters end up in the terminal buffer or
+ standard input was not actually a terminal.
+
+ Note that this will always read from the terminal, even if something
+ is piped into the standard input.
+
+ Note for Windows: in rare cases when typing non-ASCII characters, this
+ function might wait for a second character and then return both at once.
+ This is because certain Unicode characters look like special-key markers.
+
+ .. versionadded:: 2.0
+
+ :param echo: if set to `True`, the character read will also show up on
+ the terminal. The default is to not show it.
+ """
+ f = _getchar
+ if f is None:
+ from ._termui_impl import getchar as f
+ return f(echo)
+
+
+def raw_terminal():
+ from ._termui_impl import raw_terminal as f
+
+ return f()
+
+
+def pause(info="Press any key to continue ...", err=False):
+ """This command stops execution and waits for the user to press any
+ key to continue. This is similar to the Windows batch "pause"
+ command. If the program is not run through a terminal, this command
+ will instead do nothing.
+
+ .. versionadded:: 2.0
+
+ .. versionadded:: 4.0
+ Added the `err` parameter.
+
+ :param info: the info string to print before pausing.
+ :param err: if set to message goes to ``stderr`` instead of
+ ``stdout``, the same as with echo.
+ """
+ if not isatty(sys.stdin) or not isatty(sys.stdout):
+ return
+ try:
+ if info:
+ echo(info, nl=False, err=err)
+ try:
+ getchar()
+ except (KeyboardInterrupt, EOFError):
+ pass
+ finally:
+ if info:
+ echo(err=err)
diff --git a/third_party/python/click/click/testing.py b/third_party/python/click/click/testing.py
new file mode 100644
index 0000000000..a3dba3b301
--- /dev/null
+++ b/third_party/python/click/click/testing.py
@@ -0,0 +1,382 @@
+import contextlib
+import os
+import shlex
+import shutil
+import sys
+import tempfile
+
+from . import formatting
+from . import termui
+from . import utils
+from ._compat import iteritems
+from ._compat import PY2
+from ._compat import string_types
+
+
+if PY2:
+ from cStringIO import StringIO
+else:
+ import io
+ from ._compat import _find_binary_reader
+
+
+class EchoingStdin(object):
+ def __init__(self, input, output):
+ self._input = input
+ self._output = output
+
+ def __getattr__(self, x):
+ return getattr(self._input, x)
+
+ def _echo(self, rv):
+ self._output.write(rv)
+ return rv
+
+ def read(self, n=-1):
+ return self._echo(self._input.read(n))
+
+ def readline(self, n=-1):
+ return self._echo(self._input.readline(n))
+
+ def readlines(self):
+ return [self._echo(x) for x in self._input.readlines()]
+
+ def __iter__(self):
+ return iter(self._echo(x) for x in self._input)
+
+ def __repr__(self):
+ return repr(self._input)
+
+
+def make_input_stream(input, charset):
+ # Is already an input stream.
+ if hasattr(input, "read"):
+ if PY2:
+ return input
+ rv = _find_binary_reader(input)
+ if rv is not None:
+ return rv
+ raise TypeError("Could not find binary reader for input stream.")
+
+ if input is None:
+ input = b""
+ elif not isinstance(input, bytes):
+ input = input.encode(charset)
+ if PY2:
+ return StringIO(input)
+ return io.BytesIO(input)
+
+
+class Result(object):
+ """Holds the captured result of an invoked CLI script."""
+
+ def __init__(
+ self, runner, stdout_bytes, stderr_bytes, exit_code, exception, exc_info=None
+ ):
+ #: The runner that created the result
+ self.runner = runner
+ #: The standard output as bytes.
+ self.stdout_bytes = stdout_bytes
+ #: The standard error as bytes, or None if not available
+ self.stderr_bytes = stderr_bytes
+ #: The exit code as integer.
+ self.exit_code = exit_code
+ #: The exception that happened if one did.
+ self.exception = exception
+ #: The traceback
+ self.exc_info = exc_info
+
+ @property
+ def output(self):
+ """The (standard) output as unicode string."""
+ return self.stdout
+
+ @property
+ def stdout(self):
+ """The standard output as unicode string."""
+ return self.stdout_bytes.decode(self.runner.charset, "replace").replace(
+ "\r\n", "\n"
+ )
+
+ @property
+ def stderr(self):
+ """The standard error as unicode string."""
+ if self.stderr_bytes is None:
+ raise ValueError("stderr not separately captured")
+ return self.stderr_bytes.decode(self.runner.charset, "replace").replace(
+ "\r\n", "\n"
+ )
+
+ def __repr__(self):
+ return "<{} {}>".format(
+ type(self).__name__, repr(self.exception) if self.exception else "okay"
+ )
+
+
+class CliRunner(object):
+ """The CLI runner provides functionality to invoke a Click command line
+ script for unittesting purposes in a isolated environment. This only
+ works in single-threaded systems without any concurrency as it changes the
+ global interpreter state.
+
+ :param charset: the character set for the input and output data. This is
+ UTF-8 by default and should not be changed currently as
+ the reporting to Click only works in Python 2 properly.
+ :param env: a dictionary with environment variables for overriding.
+ :param echo_stdin: if this is set to `True`, then reading from stdin writes
+ to stdout. This is useful for showing examples in
+ some circumstances. Note that regular prompts
+ will automatically echo the input.
+ :param mix_stderr: if this is set to `False`, then stdout and stderr are
+ preserved as independent streams. This is useful for
+ Unix-philosophy apps that have predictable stdout and
+ noisy stderr, such that each may be measured
+ independently
+ """
+
+ def __init__(self, charset=None, env=None, echo_stdin=False, mix_stderr=True):
+ if charset is None:
+ charset = "utf-8"
+ self.charset = charset
+ self.env = env or {}
+ self.echo_stdin = echo_stdin
+ self.mix_stderr = mix_stderr
+
+ def get_default_prog_name(self, cli):
+ """Given a command object it will return the default program name
+ for it. The default is the `name` attribute or ``"root"`` if not
+ set.
+ """
+ return cli.name or "root"
+
+ def make_env(self, overrides=None):
+ """Returns the environment overrides for invoking a script."""
+ rv = dict(self.env)
+ if overrides:
+ rv.update(overrides)
+ return rv
+
+ @contextlib.contextmanager
+ def isolation(self, input=None, env=None, color=False):
+ """A context manager that sets up the isolation for invoking of a
+ command line tool. This sets up stdin with the given input data
+ and `os.environ` with the overrides from the given dictionary.
+ This also rebinds some internals in Click to be mocked (like the
+ prompt functionality).
+
+ This is automatically done in the :meth:`invoke` method.
+
+ .. versionadded:: 4.0
+ The ``color`` parameter was added.
+
+ :param input: the input stream to put into sys.stdin.
+ :param env: the environment overrides as dictionary.
+ :param color: whether the output should contain color codes. The
+ application can still override this explicitly.
+ """
+ input = make_input_stream(input, self.charset)
+
+ old_stdin = sys.stdin
+ old_stdout = sys.stdout
+ old_stderr = sys.stderr
+ old_forced_width = formatting.FORCED_WIDTH
+ formatting.FORCED_WIDTH = 80
+
+ env = self.make_env(env)
+
+ if PY2:
+ bytes_output = StringIO()
+ if self.echo_stdin:
+ input = EchoingStdin(input, bytes_output)
+ sys.stdout = bytes_output
+ if not self.mix_stderr:
+ bytes_error = StringIO()
+ sys.stderr = bytes_error
+ else:
+ bytes_output = io.BytesIO()
+ if self.echo_stdin:
+ input = EchoingStdin(input, bytes_output)
+ input = io.TextIOWrapper(input, encoding=self.charset)
+ sys.stdout = io.TextIOWrapper(bytes_output, encoding=self.charset)
+ if not self.mix_stderr:
+ bytes_error = io.BytesIO()
+ sys.stderr = io.TextIOWrapper(bytes_error, encoding=self.charset)
+
+ if self.mix_stderr:
+ sys.stderr = sys.stdout
+
+ sys.stdin = input
+
+ def visible_input(prompt=None):
+ sys.stdout.write(prompt or "")
+ val = input.readline().rstrip("\r\n")
+ sys.stdout.write("{}\n".format(val))
+ sys.stdout.flush()
+ return val
+
+ def hidden_input(prompt=None):
+ sys.stdout.write("{}\n".format(prompt or ""))
+ sys.stdout.flush()
+ return input.readline().rstrip("\r\n")
+
+ def _getchar(echo):
+ char = sys.stdin.read(1)
+ if echo:
+ sys.stdout.write(char)
+ sys.stdout.flush()
+ return char
+
+ default_color = color
+
+ def should_strip_ansi(stream=None, color=None):
+ if color is None:
+ return not default_color
+ return not color
+
+ old_visible_prompt_func = termui.visible_prompt_func
+ old_hidden_prompt_func = termui.hidden_prompt_func
+ old__getchar_func = termui._getchar
+ old_should_strip_ansi = utils.should_strip_ansi
+ termui.visible_prompt_func = visible_input
+ termui.hidden_prompt_func = hidden_input
+ termui._getchar = _getchar
+ utils.should_strip_ansi = should_strip_ansi
+
+ old_env = {}
+ try:
+ for key, value in iteritems(env):
+ old_env[key] = os.environ.get(key)
+ if value is None:
+ try:
+ del os.environ[key]
+ except Exception:
+ pass
+ else:
+ os.environ[key] = value
+ yield (bytes_output, not self.mix_stderr and bytes_error)
+ finally:
+ for key, value in iteritems(old_env):
+ if value is None:
+ try:
+ del os.environ[key]
+ except Exception:
+ pass
+ else:
+ os.environ[key] = value
+ sys.stdout = old_stdout
+ sys.stderr = old_stderr
+ sys.stdin = old_stdin
+ termui.visible_prompt_func = old_visible_prompt_func
+ termui.hidden_prompt_func = old_hidden_prompt_func
+ termui._getchar = old__getchar_func
+ utils.should_strip_ansi = old_should_strip_ansi
+ formatting.FORCED_WIDTH = old_forced_width
+
+ def invoke(
+ self,
+ cli,
+ args=None,
+ input=None,
+ env=None,
+ catch_exceptions=True,
+ color=False,
+ **extra
+ ):
+ """Invokes a command in an isolated environment. The arguments are
+ forwarded directly to the command line script, the `extra` keyword
+ arguments are passed to the :meth:`~clickpkg.Command.main` function of
+ the command.
+
+ This returns a :class:`Result` object.
+
+ .. versionadded:: 3.0
+ The ``catch_exceptions`` parameter was added.
+
+ .. versionchanged:: 3.0
+ The result object now has an `exc_info` attribute with the
+ traceback if available.
+
+ .. versionadded:: 4.0
+ The ``color`` parameter was added.
+
+ :param cli: the command to invoke
+ :param args: the arguments to invoke. It may be given as an iterable
+ or a string. When given as string it will be interpreted
+ as a Unix shell command. More details at
+ :func:`shlex.split`.
+ :param input: the input data for `sys.stdin`.
+ :param env: the environment overrides.
+ :param catch_exceptions: Whether to catch any other exceptions than
+ ``SystemExit``.
+ :param extra: the keyword arguments to pass to :meth:`main`.
+ :param color: whether the output should contain color codes. The
+ application can still override this explicitly.
+ """
+ exc_info = None
+ with self.isolation(input=input, env=env, color=color) as outstreams:
+ exception = None
+ exit_code = 0
+
+ if isinstance(args, string_types):
+ args = shlex.split(args)
+
+ try:
+ prog_name = extra.pop("prog_name")
+ except KeyError:
+ prog_name = self.get_default_prog_name(cli)
+
+ try:
+ cli.main(args=args or (), prog_name=prog_name, **extra)
+ except SystemExit as e:
+ exc_info = sys.exc_info()
+ exit_code = e.code
+ if exit_code is None:
+ exit_code = 0
+
+ if exit_code != 0:
+ exception = e
+
+ if not isinstance(exit_code, int):
+ sys.stdout.write(str(exit_code))
+ sys.stdout.write("\n")
+ exit_code = 1
+
+ except Exception as e:
+ if not catch_exceptions:
+ raise
+ exception = e
+ exit_code = 1
+ exc_info = sys.exc_info()
+ finally:
+ sys.stdout.flush()
+ stdout = outstreams[0].getvalue()
+ if self.mix_stderr:
+ stderr = None
+ else:
+ stderr = outstreams[1].getvalue()
+
+ return Result(
+ runner=self,
+ stdout_bytes=stdout,
+ stderr_bytes=stderr,
+ exit_code=exit_code,
+ exception=exception,
+ exc_info=exc_info,
+ )
+
+ @contextlib.contextmanager
+ def isolated_filesystem(self):
+ """A context manager that creates a temporary folder and changes
+ the current working directory to it for isolated filesystem tests.
+ """
+ cwd = os.getcwd()
+ t = tempfile.mkdtemp()
+ os.chdir(t)
+ try:
+ yield t
+ finally:
+ os.chdir(cwd)
+ try:
+ shutil.rmtree(t)
+ except (OSError, IOError): # noqa: B014
+ pass
diff --git a/third_party/python/click/click/types.py b/third_party/python/click/click/types.py
new file mode 100644
index 0000000000..505c39f850
--- /dev/null
+++ b/third_party/python/click/click/types.py
@@ -0,0 +1,762 @@
+import os
+import stat
+from datetime import datetime
+
+from ._compat import _get_argv_encoding
+from ._compat import filename_to_ui
+from ._compat import get_filesystem_encoding
+from ._compat import get_streerror
+from ._compat import open_stream
+from ._compat import PY2
+from ._compat import text_type
+from .exceptions import BadParameter
+from .utils import LazyFile
+from .utils import safecall
+
+
+class ParamType(object):
+ """Helper for converting values through types. The following is
+ necessary for a valid type:
+
+ * it needs a name
+ * it needs to pass through None unchanged
+ * it needs to convert from a string
+ * it needs to convert its result type through unchanged
+ (eg: needs to be idempotent)
+ * it needs to be able to deal with param and context being `None`.
+ This can be the case when the object is used with prompt
+ inputs.
+ """
+
+ is_composite = False
+
+ #: the descriptive name of this type
+ name = None
+
+ #: if a list of this type is expected and the value is pulled from a
+ #: string environment variable, this is what splits it up. `None`
+ #: means any whitespace. For all parameters the general rule is that
+ #: whitespace splits them up. The exception are paths and files which
+ #: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on
+ #: Windows).
+ envvar_list_splitter = None
+
+ def __call__(self, value, param=None, ctx=None):
+ if value is not None:
+ return self.convert(value, param, ctx)
+
+ def get_metavar(self, param):
+ """Returns the metavar default for this param if it provides one."""
+
+ def get_missing_message(self, param):
+ """Optionally might return extra information about a missing
+ parameter.
+
+ .. versionadded:: 2.0
+ """
+
+ def convert(self, value, param, ctx):
+ """Converts the value. This is not invoked for values that are
+ `None` (the missing value).
+ """
+ return value
+
+ def split_envvar_value(self, rv):
+ """Given a value from an environment variable this splits it up
+ into small chunks depending on the defined envvar list splitter.
+
+ If the splitter is set to `None`, which means that whitespace splits,
+ then leading and trailing whitespace is ignored. Otherwise, leading
+ and trailing splitters usually lead to empty items being included.
+ """
+ return (rv or "").split(self.envvar_list_splitter)
+
+ def fail(self, message, param=None, ctx=None):
+ """Helper method to fail with an invalid value message."""
+ raise BadParameter(message, ctx=ctx, param=param)
+
+
+class CompositeParamType(ParamType):
+ is_composite = True
+
+ @property
+ def arity(self):
+ raise NotImplementedError()
+
+
+class FuncParamType(ParamType):
+ def __init__(self, func):
+ self.name = func.__name__
+ self.func = func
+
+ def convert(self, value, param, ctx):
+ try:
+ return self.func(value)
+ except ValueError:
+ try:
+ value = text_type(value)
+ except UnicodeError:
+ value = str(value).decode("utf-8", "replace")
+ self.fail(value, param, ctx)
+
+
+class UnprocessedParamType(ParamType):
+ name = "text"
+
+ def convert(self, value, param, ctx):
+ return value
+
+ def __repr__(self):
+ return "UNPROCESSED"
+
+
+class StringParamType(ParamType):
+ name = "text"
+
+ def convert(self, value, param, ctx):
+ if isinstance(value, bytes):
+ enc = _get_argv_encoding()
+ try:
+ value = value.decode(enc)
+ except UnicodeError:
+ fs_enc = get_filesystem_encoding()
+ if fs_enc != enc:
+ try:
+ value = value.decode(fs_enc)
+ except UnicodeError:
+ value = value.decode("utf-8", "replace")
+ else:
+ value = value.decode("utf-8", "replace")
+ return value
+ return value
+
+ def __repr__(self):
+ return "STRING"
+
+
+class Choice(ParamType):
+ """The choice type allows a value to be checked against a fixed set
+ of supported values. All of these values have to be strings.
+
+ You should only pass a list or tuple of choices. Other iterables
+ (like generators) may lead to surprising results.
+
+ The resulting value will always be one of the originally passed choices
+ regardless of ``case_sensitive`` or any ``ctx.token_normalize_func``
+ being specified.
+
+ See :ref:`choice-opts` for an example.
+
+ :param case_sensitive: Set to false to make choices case
+ insensitive. Defaults to true.
+ """
+
+ name = "choice"
+
+ def __init__(self, choices, case_sensitive=True):
+ self.choices = choices
+ self.case_sensitive = case_sensitive
+
+ def get_metavar(self, param):
+ return "[{}]".format("|".join(self.choices))
+
+ def get_missing_message(self, param):
+ return "Choose from:\n\t{}.".format(",\n\t".join(self.choices))
+
+ def convert(self, value, param, ctx):
+ # Match through normalization and case sensitivity
+ # first do token_normalize_func, then lowercase
+ # preserve original `value` to produce an accurate message in
+ # `self.fail`
+ normed_value = value
+ normed_choices = {choice: choice for choice in self.choices}
+
+ if ctx is not None and ctx.token_normalize_func is not None:
+ normed_value = ctx.token_normalize_func(value)
+ normed_choices = {
+ ctx.token_normalize_func(normed_choice): original
+ for normed_choice, original in normed_choices.items()
+ }
+
+ if not self.case_sensitive:
+ if PY2:
+ lower = str.lower
+ else:
+ lower = str.casefold
+
+ normed_value = lower(normed_value)
+ normed_choices = {
+ lower(normed_choice): original
+ for normed_choice, original in normed_choices.items()
+ }
+
+ if normed_value in normed_choices:
+ return normed_choices[normed_value]
+
+ self.fail(
+ "invalid choice: {}. (choose from {})".format(
+ value, ", ".join(self.choices)
+ ),
+ param,
+ ctx,
+ )
+
+ def __repr__(self):
+ return "Choice('{}')".format(list(self.choices))
+
+
+class DateTime(ParamType):
+ """The DateTime type converts date strings into `datetime` objects.
+
+ The format strings which are checked are configurable, but default to some
+ common (non-timezone aware) ISO 8601 formats.
+
+ When specifying *DateTime* formats, you should only pass a list or a tuple.
+ Other iterables, like generators, may lead to surprising results.
+
+ The format strings are processed using ``datetime.strptime``, and this
+ consequently defines the format strings which are allowed.
+
+ Parsing is tried using each format, in order, and the first format which
+ parses successfully is used.
+
+ :param formats: A list or tuple of date format strings, in the order in
+ which they should be tried. Defaults to
+ ``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``,
+ ``'%Y-%m-%d %H:%M:%S'``.
+ """
+
+ name = "datetime"
+
+ def __init__(self, formats=None):
+ self.formats = formats or ["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S"]
+
+ def get_metavar(self, param):
+ return "[{}]".format("|".join(self.formats))
+
+ def _try_to_convert_date(self, value, format):
+ try:
+ return datetime.strptime(value, format)
+ except ValueError:
+ return None
+
+ def convert(self, value, param, ctx):
+ # Exact match
+ for format in self.formats:
+ dtime = self._try_to_convert_date(value, format)
+ if dtime:
+ return dtime
+
+ self.fail(
+ "invalid datetime format: {}. (choose from {})".format(
+ value, ", ".join(self.formats)
+ )
+ )
+
+ def __repr__(self):
+ return "DateTime"
+
+
+class IntParamType(ParamType):
+ name = "integer"
+
+ def convert(self, value, param, ctx):
+ try:
+ return int(value)
+ except ValueError:
+ self.fail("{} is not a valid integer".format(value), param, ctx)
+
+ def __repr__(self):
+ return "INT"
+
+
+class IntRange(IntParamType):
+ """A parameter that works similar to :data:`click.INT` but restricts
+ the value to fit into a range. The default behavior is to fail if the
+ value falls outside the range, but it can also be silently clamped
+ between the two edges.
+
+ See :ref:`ranges` for an example.
+ """
+
+ name = "integer range"
+
+ def __init__(self, min=None, max=None, clamp=False):
+ self.min = min
+ self.max = max
+ self.clamp = clamp
+
+ def convert(self, value, param, ctx):
+ rv = IntParamType.convert(self, value, param, ctx)
+ if self.clamp:
+ if self.min is not None and rv < self.min:
+ return self.min
+ if self.max is not None and rv > self.max:
+ return self.max
+ if (
+ self.min is not None
+ and rv < self.min
+ or self.max is not None
+ and rv > self.max
+ ):
+ if self.min is None:
+ self.fail(
+ "{} is bigger than the maximum valid value {}.".format(
+ rv, self.max
+ ),
+ param,
+ ctx,
+ )
+ elif self.max is None:
+ self.fail(
+ "{} is smaller than the minimum valid value {}.".format(
+ rv, self.min
+ ),
+ param,
+ ctx,
+ )
+ else:
+ self.fail(
+ "{} is not in the valid range of {} to {}.".format(
+ rv, self.min, self.max
+ ),
+ param,
+ ctx,
+ )
+ return rv
+
+ def __repr__(self):
+ return "IntRange({}, {})".format(self.min, self.max)
+
+
+class FloatParamType(ParamType):
+ name = "float"
+
+ def convert(self, value, param, ctx):
+ try:
+ return float(value)
+ except ValueError:
+ self.fail(
+ "{} is not a valid floating point value".format(value), param, ctx
+ )
+
+ def __repr__(self):
+ return "FLOAT"
+
+
+class FloatRange(FloatParamType):
+ """A parameter that works similar to :data:`click.FLOAT` but restricts
+ the value to fit into a range. The default behavior is to fail if the
+ value falls outside the range, but it can also be silently clamped
+ between the two edges.
+
+ See :ref:`ranges` for an example.
+ """
+
+ name = "float range"
+
+ def __init__(self, min=None, max=None, clamp=False):
+ self.min = min
+ self.max = max
+ self.clamp = clamp
+
+ def convert(self, value, param, ctx):
+ rv = FloatParamType.convert(self, value, param, ctx)
+ if self.clamp:
+ if self.min is not None and rv < self.min:
+ return self.min
+ if self.max is not None and rv > self.max:
+ return self.max
+ if (
+ self.min is not None
+ and rv < self.min
+ or self.max is not None
+ and rv > self.max
+ ):
+ if self.min is None:
+ self.fail(
+ "{} is bigger than the maximum valid value {}.".format(
+ rv, self.max
+ ),
+ param,
+ ctx,
+ )
+ elif self.max is None:
+ self.fail(
+ "{} is smaller than the minimum valid value {}.".format(
+ rv, self.min
+ ),
+ param,
+ ctx,
+ )
+ else:
+ self.fail(
+ "{} is not in the valid range of {} to {}.".format(
+ rv, self.min, self.max
+ ),
+ param,
+ ctx,
+ )
+ return rv
+
+ def __repr__(self):
+ return "FloatRange({}, {})".format(self.min, self.max)
+
+
+class BoolParamType(ParamType):
+ name = "boolean"
+
+ def convert(self, value, param, ctx):
+ if isinstance(value, bool):
+ return bool(value)
+ value = value.lower()
+ if value in ("true", "t", "1", "yes", "y"):
+ return True
+ elif value in ("false", "f", "0", "no", "n"):
+ return False
+ self.fail("{} is not a valid boolean".format(value), param, ctx)
+
+ def __repr__(self):
+ return "BOOL"
+
+
+class UUIDParameterType(ParamType):
+ name = "uuid"
+
+ def convert(self, value, param, ctx):
+ import uuid
+
+ try:
+ if PY2 and isinstance(value, text_type):
+ value = value.encode("ascii")
+ return uuid.UUID(value)
+ except ValueError:
+ self.fail("{} is not a valid UUID value".format(value), param, ctx)
+
+ def __repr__(self):
+ return "UUID"
+
+
+class File(ParamType):
+ """Declares a parameter to be a file for reading or writing. The file
+ is automatically closed once the context tears down (after the command
+ finished working).
+
+ Files can be opened for reading or writing. The special value ``-``
+ indicates stdin or stdout depending on the mode.
+
+ By default, the file is opened for reading text data, but it can also be
+ opened in binary mode or for writing. The encoding parameter can be used
+ to force a specific encoding.
+
+ The `lazy` flag controls if the file should be opened immediately or upon
+ first IO. The default is to be non-lazy for standard input and output
+ streams as well as files opened for reading, `lazy` otherwise. When opening a
+ file lazily for reading, it is still opened temporarily for validation, but
+ will not be held open until first IO. lazy is mainly useful when opening
+ for writing to avoid creating the file until it is needed.
+
+ Starting with Click 2.0, files can also be opened atomically in which
+ case all writes go into a separate file in the same folder and upon
+ completion the file will be moved over to the original location. This
+ is useful if a file regularly read by other users is modified.
+
+ See :ref:`file-args` for more information.
+ """
+
+ name = "filename"
+ envvar_list_splitter = os.path.pathsep
+
+ def __init__(
+ self, mode="r", encoding=None, errors="strict", lazy=None, atomic=False
+ ):
+ self.mode = mode
+ self.encoding = encoding
+ self.errors = errors
+ self.lazy = lazy
+ self.atomic = atomic
+
+ def resolve_lazy_flag(self, value):
+ if self.lazy is not None:
+ return self.lazy
+ if value == "-":
+ return False
+ elif "w" in self.mode:
+ return True
+ return False
+
+ def convert(self, value, param, ctx):
+ try:
+ if hasattr(value, "read") or hasattr(value, "write"):
+ return value
+
+ lazy = self.resolve_lazy_flag(value)
+
+ if lazy:
+ f = LazyFile(
+ value, self.mode, self.encoding, self.errors, atomic=self.atomic
+ )
+ if ctx is not None:
+ ctx.call_on_close(f.close_intelligently)
+ return f
+
+ f, should_close = open_stream(
+ value, self.mode, self.encoding, self.errors, atomic=self.atomic
+ )
+ # If a context is provided, we automatically close the file
+ # at the end of the context execution (or flush out). If a
+ # context does not exist, it's the caller's responsibility to
+ # properly close the file. This for instance happens when the
+ # type is used with prompts.
+ if ctx is not None:
+ if should_close:
+ ctx.call_on_close(safecall(f.close))
+ else:
+ ctx.call_on_close(safecall(f.flush))
+ return f
+ except (IOError, OSError) as e: # noqa: B014
+ self.fail(
+ "Could not open file: {}: {}".format(
+ filename_to_ui(value), get_streerror(e)
+ ),
+ param,
+ ctx,
+ )
+
+
+class Path(ParamType):
+ """The path type is similar to the :class:`File` type but it performs
+ different checks. First of all, instead of returning an open file
+ handle it returns just the filename. Secondly, it can perform various
+ basic checks about what the file or directory should be.
+
+ .. versionchanged:: 6.0
+ `allow_dash` was added.
+
+ :param exists: if set to true, the file or directory needs to exist for
+ this value to be valid. If this is not required and a
+ file does indeed not exist, then all further checks are
+ silently skipped.
+ :param file_okay: controls if a file is a possible value.
+ :param dir_okay: controls if a directory is a possible value.
+ :param writable: if true, a writable check is performed.
+ :param readable: if true, a readable check is performed.
+ :param resolve_path: if this is true, then the path is fully resolved
+ before the value is passed onwards. This means
+ that it's absolute and symlinks are resolved. It
+ will not expand a tilde-prefix, as this is
+ supposed to be done by the shell only.
+ :param allow_dash: If this is set to `True`, a single dash to indicate
+ standard streams is permitted.
+ :param path_type: optionally a string type that should be used to
+ represent the path. The default is `None` which
+ means the return value will be either bytes or
+ unicode depending on what makes most sense given the
+ input data Click deals with.
+ """
+
+ envvar_list_splitter = os.path.pathsep
+
+ def __init__(
+ self,
+ exists=False,
+ file_okay=True,
+ dir_okay=True,
+ writable=False,
+ readable=True,
+ resolve_path=False,
+ allow_dash=False,
+ path_type=None,
+ ):
+ self.exists = exists
+ self.file_okay = file_okay
+ self.dir_okay = dir_okay
+ self.writable = writable
+ self.readable = readable
+ self.resolve_path = resolve_path
+ self.allow_dash = allow_dash
+ self.type = path_type
+
+ if self.file_okay and not self.dir_okay:
+ self.name = "file"
+ self.path_type = "File"
+ elif self.dir_okay and not self.file_okay:
+ self.name = "directory"
+ self.path_type = "Directory"
+ else:
+ self.name = "path"
+ self.path_type = "Path"
+
+ def coerce_path_result(self, rv):
+ if self.type is not None and not isinstance(rv, self.type):
+ if self.type is text_type:
+ rv = rv.decode(get_filesystem_encoding())
+ else:
+ rv = rv.encode(get_filesystem_encoding())
+ return rv
+
+ def convert(self, value, param, ctx):
+ rv = value
+
+ is_dash = self.file_okay and self.allow_dash and rv in (b"-", "-")
+
+ if not is_dash:
+ if self.resolve_path:
+ rv = os.path.realpath(rv)
+
+ try:
+ st = os.stat(rv)
+ except OSError:
+ if not self.exists:
+ return self.coerce_path_result(rv)
+ self.fail(
+ "{} '{}' does not exist.".format(
+ self.path_type, filename_to_ui(value)
+ ),
+ param,
+ ctx,
+ )
+
+ if not self.file_okay and stat.S_ISREG(st.st_mode):
+ self.fail(
+ "{} '{}' is a file.".format(self.path_type, filename_to_ui(value)),
+ param,
+ ctx,
+ )
+ if not self.dir_okay and stat.S_ISDIR(st.st_mode):
+ self.fail(
+ "{} '{}' is a directory.".format(
+ self.path_type, filename_to_ui(value)
+ ),
+ param,
+ ctx,
+ )
+ if self.writable and not os.access(value, os.W_OK):
+ self.fail(
+ "{} '{}' is not writable.".format(
+ self.path_type, filename_to_ui(value)
+ ),
+ param,
+ ctx,
+ )
+ if self.readable and not os.access(value, os.R_OK):
+ self.fail(
+ "{} '{}' is not readable.".format(
+ self.path_type, filename_to_ui(value)
+ ),
+ param,
+ ctx,
+ )
+
+ return self.coerce_path_result(rv)
+
+
+class Tuple(CompositeParamType):
+ """The default behavior of Click is to apply a type on a value directly.
+ This works well in most cases, except for when `nargs` is set to a fixed
+ count and different types should be used for different items. In this
+ case the :class:`Tuple` type can be used. This type can only be used
+ if `nargs` is set to a fixed number.
+
+ For more information see :ref:`tuple-type`.
+
+ This can be selected by using a Python tuple literal as a type.
+
+ :param types: a list of types that should be used for the tuple items.
+ """
+
+ def __init__(self, types):
+ self.types = [convert_type(ty) for ty in types]
+
+ @property
+ def name(self):
+ return "<{}>".format(" ".join(ty.name for ty in self.types))
+
+ @property
+ def arity(self):
+ return len(self.types)
+
+ def convert(self, value, param, ctx):
+ if len(value) != len(self.types):
+ raise TypeError(
+ "It would appear that nargs is set to conflict with the"
+ " composite type arity."
+ )
+ return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value))
+
+
+def convert_type(ty, default=None):
+ """Converts a callable or python type into the most appropriate
+ param type.
+ """
+ guessed_type = False
+ if ty is None and default is not None:
+ if isinstance(default, tuple):
+ ty = tuple(map(type, default))
+ else:
+ ty = type(default)
+ guessed_type = True
+
+ if isinstance(ty, tuple):
+ return Tuple(ty)
+ if isinstance(ty, ParamType):
+ return ty
+ if ty is text_type or ty is str or ty is None:
+ return STRING
+ if ty is int:
+ return INT
+ # Booleans are only okay if not guessed. This is done because for
+ # flags the default value is actually a bit of a lie in that it
+ # indicates which of the flags is the one we want. See get_default()
+ # for more information.
+ if ty is bool and not guessed_type:
+ return BOOL
+ if ty is float:
+ return FLOAT
+ if guessed_type:
+ return STRING
+
+ # Catch a common mistake
+ if __debug__:
+ try:
+ if issubclass(ty, ParamType):
+ raise AssertionError(
+ "Attempted to use an uninstantiated parameter type ({}).".format(ty)
+ )
+ except TypeError:
+ pass
+ return FuncParamType(ty)
+
+
+#: A dummy parameter type that just does nothing. From a user's
+#: perspective this appears to just be the same as `STRING` but internally
+#: no string conversion takes place. This is necessary to achieve the
+#: same bytes/unicode behavior on Python 2/3 in situations where you want
+#: to not convert argument types. This is usually useful when working
+#: with file paths as they can appear in bytes and unicode.
+#:
+#: For path related uses the :class:`Path` type is a better choice but
+#: there are situations where an unprocessed type is useful which is why
+#: it is is provided.
+#:
+#: .. versionadded:: 4.0
+UNPROCESSED = UnprocessedParamType()
+
+#: A unicode string parameter type which is the implicit default. This
+#: can also be selected by using ``str`` as type.
+STRING = StringParamType()
+
+#: An integer parameter. This can also be selected by using ``int`` as
+#: type.
+INT = IntParamType()
+
+#: A floating point value parameter. This can also be selected by using
+#: ``float`` as type.
+FLOAT = FloatParamType()
+
+#: A boolean parameter. This is the default for boolean flags. This can
+#: also be selected by using ``bool`` as a type.
+BOOL = BoolParamType()
+
+#: A UUID parameter.
+UUID = UUIDParameterType()
diff --git a/third_party/python/click/click/utils.py b/third_party/python/click/click/utils.py
new file mode 100644
index 0000000000..79265e732d
--- /dev/null
+++ b/third_party/python/click/click/utils.py
@@ -0,0 +1,455 @@
+import os
+import sys
+
+from ._compat import _default_text_stderr
+from ._compat import _default_text_stdout
+from ._compat import auto_wrap_for_ansi
+from ._compat import binary_streams
+from ._compat import filename_to_ui
+from ._compat import get_filesystem_encoding
+from ._compat import get_streerror
+from ._compat import is_bytes
+from ._compat import open_stream
+from ._compat import PY2
+from ._compat import should_strip_ansi
+from ._compat import string_types
+from ._compat import strip_ansi
+from ._compat import text_streams
+from ._compat import text_type
+from ._compat import WIN
+from .globals import resolve_color_default
+
+if not PY2:
+ from ._compat import _find_binary_writer
+elif WIN:
+ from ._winconsole import _get_windows_argv
+ from ._winconsole import _hash_py_argv
+ from ._winconsole import _initial_argv_hash
+
+echo_native_types = string_types + (bytes, bytearray)
+
+
+def _posixify(name):
+ return "-".join(name.split()).lower()
+
+
+def safecall(func):
+ """Wraps a function so that it swallows exceptions."""
+
+ def wrapper(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except Exception:
+ pass
+
+ return wrapper
+
+
+def make_str(value):
+ """Converts a value into a valid string."""
+ if isinstance(value, bytes):
+ try:
+ return value.decode(get_filesystem_encoding())
+ except UnicodeError:
+ return value.decode("utf-8", "replace")
+ return text_type(value)
+
+
+def make_default_short_help(help, max_length=45):
+ """Return a condensed version of help string."""
+ words = help.split()
+ total_length = 0
+ result = []
+ done = False
+
+ for word in words:
+ if word[-1:] == ".":
+ done = True
+ new_length = 1 + len(word) if result else len(word)
+ if total_length + new_length > max_length:
+ result.append("...")
+ done = True
+ else:
+ if result:
+ result.append(" ")
+ result.append(word)
+ if done:
+ break
+ total_length += new_length
+
+ return "".join(result)
+
+
+class LazyFile(object):
+ """A lazy file works like a regular file but it does not fully open
+ the file but it does perform some basic checks early to see if the
+ filename parameter does make sense. This is useful for safely opening
+ files for writing.
+ """
+
+ def __init__(
+ self, filename, mode="r", encoding=None, errors="strict", atomic=False
+ ):
+ self.name = filename
+ self.mode = mode
+ self.encoding = encoding
+ self.errors = errors
+ self.atomic = atomic
+
+ if filename == "-":
+ self._f, self.should_close = open_stream(filename, mode, encoding, errors)
+ else:
+ if "r" in mode:
+ # Open and close the file in case we're opening it for
+ # reading so that we can catch at least some errors in
+ # some cases early.
+ open(filename, mode).close()
+ self._f = None
+ self.should_close = True
+
+ def __getattr__(self, name):
+ return getattr(self.open(), name)
+
+ def __repr__(self):
+ if self._f is not None:
+ return repr(self._f)
+ return "<unopened file '{}' {}>".format(self.name, self.mode)
+
+ def open(self):
+ """Opens the file if it's not yet open. This call might fail with
+ a :exc:`FileError`. Not handling this error will produce an error
+ that Click shows.
+ """
+ if self._f is not None:
+ return self._f
+ try:
+ rv, self.should_close = open_stream(
+ self.name, self.mode, self.encoding, self.errors, atomic=self.atomic
+ )
+ except (IOError, OSError) as e: # noqa: E402
+ from .exceptions import FileError
+
+ raise FileError(self.name, hint=get_streerror(e))
+ self._f = rv
+ return rv
+
+ def close(self):
+ """Closes the underlying file, no matter what."""
+ if self._f is not None:
+ self._f.close()
+
+ def close_intelligently(self):
+ """This function only closes the file if it was opened by the lazy
+ file wrapper. For instance this will never close stdin.
+ """
+ if self.should_close:
+ self.close()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self.close_intelligently()
+
+ def __iter__(self):
+ self.open()
+ return iter(self._f)
+
+
+class KeepOpenFile(object):
+ def __init__(self, file):
+ self._file = file
+
+ def __getattr__(self, name):
+ return getattr(self._file, name)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ pass
+
+ def __repr__(self):
+ return repr(self._file)
+
+ def __iter__(self):
+ return iter(self._file)
+
+
+def echo(message=None, file=None, nl=True, err=False, color=None):
+ """Prints a message plus a newline to the given file or stdout. On
+ first sight, this looks like the print function, but it has improved
+ support for handling Unicode and binary data that does not fail no
+ matter how badly configured the system is.
+
+ Primarily it means that you can print binary data as well as Unicode
+ data on both 2.x and 3.x to the given file in the most appropriate way
+ possible. This is a very carefree function in that it will try its
+ best to not fail. As of Click 6.0 this includes support for unicode
+ output on the Windows console.
+
+ In addition to that, if `colorama`_ is installed, the echo function will
+ also support clever handling of ANSI codes. Essentially it will then
+ do the following:
+
+ - add transparent handling of ANSI color codes on Windows.
+ - hide ANSI codes automatically if the destination file is not a
+ terminal.
+
+ .. _colorama: https://pypi.org/project/colorama/
+
+ .. versionchanged:: 6.0
+ As of Click 6.0 the echo function will properly support unicode
+ output on the windows console. Not that click does not modify
+ the interpreter in any way which means that `sys.stdout` or the
+ print statement or function will still not provide unicode support.
+
+ .. versionchanged:: 2.0
+ Starting with version 2.0 of Click, the echo function will work
+ with colorama if it's installed.
+
+ .. versionadded:: 3.0
+ The `err` parameter was added.
+
+ .. versionchanged:: 4.0
+ Added the `color` flag.
+
+ :param message: the message to print
+ :param file: the file to write to (defaults to ``stdout``)
+ :param err: if set to true the file defaults to ``stderr`` instead of
+ ``stdout``. This is faster and easier than calling
+ :func:`get_text_stderr` yourself.
+ :param nl: if set to `True` (the default) a newline is printed afterwards.
+ :param color: controls if the terminal supports ANSI colors or not. The
+ default is autodetection.
+ """
+ if file is None:
+ if err:
+ file = _default_text_stderr()
+ else:
+ file = _default_text_stdout()
+
+ # Convert non bytes/text into the native string type.
+ if message is not None and not isinstance(message, echo_native_types):
+ message = text_type(message)
+
+ if nl:
+ message = message or u""
+ if isinstance(message, text_type):
+ message += u"\n"
+ else:
+ message += b"\n"
+
+ # If there is a message, and we're in Python 3, and the value looks
+ # like bytes, we manually need to find the binary stream and write the
+ # message in there. This is done separately so that most stream
+ # types will work as you would expect. Eg: you can write to StringIO
+ # for other cases.
+ if message and not PY2 and is_bytes(message):
+ binary_file = _find_binary_writer(file)
+ if binary_file is not None:
+ file.flush()
+ binary_file.write(message)
+ binary_file.flush()
+ return
+
+ # ANSI-style support. If there is no message or we are dealing with
+ # bytes nothing is happening. If we are connected to a file we want
+ # to strip colors. If we are on windows we either wrap the stream
+ # to strip the color or we use the colorama support to translate the
+ # ansi codes to API calls.
+ if message and not is_bytes(message):
+ color = resolve_color_default(color)
+ if should_strip_ansi(file, color):
+ message = strip_ansi(message)
+ elif WIN:
+ if auto_wrap_for_ansi is not None:
+ file = auto_wrap_for_ansi(file)
+ elif not color:
+ message = strip_ansi(message)
+
+ if message:
+ file.write(message)
+ file.flush()
+
+
+def get_binary_stream(name):
+ """Returns a system stream for byte processing. This essentially
+ returns the stream from the sys module with the given name but it
+ solves some compatibility issues between different Python versions.
+ Primarily this function is necessary for getting binary streams on
+ Python 3.
+
+ :param name: the name of the stream to open. Valid names are ``'stdin'``,
+ ``'stdout'`` and ``'stderr'``
+ """
+ opener = binary_streams.get(name)
+ if opener is None:
+ raise TypeError("Unknown standard stream '{}'".format(name))
+ return opener()
+
+
+def get_text_stream(name, encoding=None, errors="strict"):
+ """Returns a system stream for text processing. This usually returns
+ a wrapped stream around a binary stream returned from
+ :func:`get_binary_stream` but it also can take shortcuts on Python 3
+ for already correctly configured streams.
+
+ :param name: the name of the stream to open. Valid names are ``'stdin'``,
+ ``'stdout'`` and ``'stderr'``
+ :param encoding: overrides the detected default encoding.
+ :param errors: overrides the default error mode.
+ """
+ opener = text_streams.get(name)
+ if opener is None:
+ raise TypeError("Unknown standard stream '{}'".format(name))
+ return opener(encoding, errors)
+
+
+def open_file(
+ filename, mode="r", encoding=None, errors="strict", lazy=False, atomic=False
+):
+ """This is similar to how the :class:`File` works but for manual
+ usage. Files are opened non lazy by default. This can open regular
+ files as well as stdin/stdout if ``'-'`` is passed.
+
+ If stdin/stdout is returned the stream is wrapped so that the context
+ manager will not close the stream accidentally. This makes it possible
+ to always use the function like this without having to worry to
+ accidentally close a standard stream::
+
+ with open_file(filename) as f:
+ ...
+
+ .. versionadded:: 3.0
+
+ :param filename: the name of the file to open (or ``'-'`` for stdin/stdout).
+ :param mode: the mode in which to open the file.
+ :param encoding: the encoding to use.
+ :param errors: the error handling for this file.
+ :param lazy: can be flipped to true to open the file lazily.
+ :param atomic: in atomic mode writes go into a temporary file and it's
+ moved on close.
+ """
+ if lazy:
+ return LazyFile(filename, mode, encoding, errors, atomic=atomic)
+ f, should_close = open_stream(filename, mode, encoding, errors, atomic=atomic)
+ if not should_close:
+ f = KeepOpenFile(f)
+ return f
+
+
+def get_os_args():
+ """This returns the argument part of sys.argv in the most appropriate
+ form for processing. What this means is that this return value is in
+ a format that works for Click to process but does not necessarily
+ correspond well to what's actually standard for the interpreter.
+
+ On most environments the return value is ``sys.argv[:1]`` unchanged.
+ However if you are on Windows and running Python 2 the return value
+ will actually be a list of unicode strings instead because the
+ default behavior on that platform otherwise will not be able to
+ carry all possible values that sys.argv can have.
+
+ .. versionadded:: 6.0
+ """
+ # We can only extract the unicode argv if sys.argv has not been
+ # changed since the startup of the application.
+ if PY2 and WIN and _initial_argv_hash == _hash_py_argv():
+ return _get_windows_argv()
+ return sys.argv[1:]
+
+
+def format_filename(filename, shorten=False):
+ """Formats a filename for user display. The main purpose of this
+ function is to ensure that the filename can be displayed at all. This
+ will decode the filename to unicode if necessary in a way that it will
+ not fail. Optionally, it can shorten the filename to not include the
+ full path to the filename.
+
+ :param filename: formats a filename for UI display. This will also convert
+ the filename into unicode without failing.
+ :param shorten: this optionally shortens the filename to strip of the
+ path that leads up to it.
+ """
+ if shorten:
+ filename = os.path.basename(filename)
+ return filename_to_ui(filename)
+
+
+def get_app_dir(app_name, roaming=True, force_posix=False):
+ r"""Returns the config folder for the application. The default behavior
+ is to return whatever is most appropriate for the operating system.
+
+ To give you an idea, for an app called ``"Foo Bar"``, something like
+ the following folders could be returned:
+
+ Mac OS X:
+ ``~/Library/Application Support/Foo Bar``
+ Mac OS X (POSIX):
+ ``~/.foo-bar``
+ Unix:
+ ``~/.config/foo-bar``
+ Unix (POSIX):
+ ``~/.foo-bar``
+ Win XP (roaming):
+ ``C:\Documents and Settings\<user>\Local Settings\Application Data\Foo Bar``
+ Win XP (not roaming):
+ ``C:\Documents and Settings\<user>\Application Data\Foo Bar``
+ Win 7 (roaming):
+ ``C:\Users\<user>\AppData\Roaming\Foo Bar``
+ Win 7 (not roaming):
+ ``C:\Users\<user>\AppData\Local\Foo Bar``
+
+ .. versionadded:: 2.0
+
+ :param app_name: the application name. This should be properly capitalized
+ and can contain whitespace.
+ :param roaming: controls if the folder should be roaming or not on Windows.
+ Has no affect otherwise.
+ :param force_posix: if this is set to `True` then on any POSIX system the
+ folder will be stored in the home folder with a leading
+ dot instead of the XDG config home or darwin's
+ application support folder.
+ """
+ if WIN:
+ key = "APPDATA" if roaming else "LOCALAPPDATA"
+ folder = os.environ.get(key)
+ if folder is None:
+ folder = os.path.expanduser("~")
+ return os.path.join(folder, app_name)
+ if force_posix:
+ return os.path.join(os.path.expanduser("~/.{}".format(_posixify(app_name))))
+ if sys.platform == "darwin":
+ return os.path.join(
+ os.path.expanduser("~/Library/Application Support"), app_name
+ )
+ return os.path.join(
+ os.environ.get("XDG_CONFIG_HOME", os.path.expanduser("~/.config")),
+ _posixify(app_name),
+ )
+
+
+class PacifyFlushWrapper(object):
+ """This wrapper is used to catch and suppress BrokenPipeErrors resulting
+ from ``.flush()`` being called on broken pipe during the shutdown/final-GC
+ of the Python interpreter. Notably ``.flush()`` is always called on
+ ``sys.stdout`` and ``sys.stderr``. So as to have minimal impact on any
+ other cleanup code, and the case where the underlying file is not a broken
+ pipe, all calls and attributes are proxied.
+ """
+
+ def __init__(self, wrapped):
+ self.wrapped = wrapped
+
+ def flush(self):
+ try:
+ self.wrapped.flush()
+ except IOError as e:
+ import errno
+
+ if e.errno != errno.EPIPE:
+ raise
+
+ def __getattr__(self, attr):
+ return getattr(self.wrapped, attr)
diff --git a/third_party/python/colorama/colorama-0.4.5.dist-info/LICENSE.txt b/third_party/python/colorama/colorama-0.4.5.dist-info/LICENSE.txt
new file mode 100644
index 0000000000..3105888ec1
--- /dev/null
+++ b/third_party/python/colorama/colorama-0.4.5.dist-info/LICENSE.txt
@@ -0,0 +1,27 @@
+Copyright (c) 2010 Jonathan Hartley
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+* Neither the name of the copyright holders, nor those of its contributors
+ may be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/third_party/python/colorama/colorama-0.4.5.dist-info/METADATA b/third_party/python/colorama/colorama-0.4.5.dist-info/METADATA
new file mode 100644
index 0000000000..cd9393530e
--- /dev/null
+++ b/third_party/python/colorama/colorama-0.4.5.dist-info/METADATA
@@ -0,0 +1,411 @@
+Metadata-Version: 2.1
+Name: colorama
+Version: 0.4.5
+Summary: Cross-platform colored terminal text.
+Home-page: https://github.com/tartley/colorama
+Author: Jonathan Hartley
+Author-email: tartley@tartley.com
+Maintainer: Arnon Yaari
+License: BSD
+Keywords: color colour terminal text ansi windows crossplatform xplatform
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Terminals
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
+License-File: LICENSE.txt
+
+.. image:: https://img.shields.io/pypi/v/colorama.svg
+ :target: https://pypi.org/project/colorama/
+ :alt: Latest Version
+
+.. image:: https://img.shields.io/pypi/pyversions/colorama.svg
+ :target: https://pypi.org/project/colorama/
+ :alt: Supported Python versions
+
+.. image:: https://github.com/tartley/colorama/actions/workflows/test.yml/badge.svg
+ :target: https://github.com/tartley/colorama/actions/workflows/test.yml
+ :alt: Build Status
+
+Colorama
+========
+
+Makes ANSI escape character sequences (for producing colored terminal text and
+cursor positioning) work under MS Windows.
+
+.. |donate| image:: https://www.paypalobjects.com/en_US/i/btn/btn_donate_SM.gif
+ :target: https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=2MZ9D2GMLYCUJ&item_name=Colorama&currency_code=USD
+ :alt: Donate with Paypal
+
+`PyPI for releases <https://pypi.org/project/colorama/>`_ |
+`Github for source <https://github.com/tartley/colorama>`_ |
+`Colorama for enterprise on Tidelift <https://github.com/tartley/colorama/blob/master/ENTERPRISE.md>`_
+
+If you find Colorama useful, please |donate| to the authors. Thank you!
+
+
+Installation
+------------
+
+Tested on CPython 2.7, 3.5, 3.6, 3.7, 3.8, 3.9 and 3.10 and Pypy 2.7 and 3.6.
+
+No requirements other than the standard library.
+
+.. code-block:: bash
+
+ pip install colorama
+ # or
+ conda install -c anaconda colorama
+
+
+Description
+-----------
+
+ANSI escape character sequences have long been used to produce colored terminal
+text and cursor positioning on Unix and Macs. Colorama makes this work on
+Windows, too, by wrapping ``stdout``, stripping ANSI sequences it finds (which
+would appear as gobbledygook in the output), and converting them into the
+appropriate win32 calls to modify the state of the terminal. On other platforms,
+Colorama does nothing.
+
+This has the upshot of providing a simple cross-platform API for printing
+colored terminal text from Python, and has the happy side-effect that existing
+applications or libraries which use ANSI sequences to produce colored output on
+Linux or Macs can now also work on Windows, simply by calling
+``colorama.init()``.
+
+An alternative approach is to install ``ansi.sys`` on Windows machines, which
+provides the same behaviour for all applications running in terminals. Colorama
+is intended for situations where that isn't easy (e.g., maybe your app doesn't
+have an installer.)
+
+Demo scripts in the source code repository print some colored text using
+ANSI sequences. Compare their output under Gnome-terminal's built in ANSI
+handling, versus on Windows Command-Prompt using Colorama:
+
+.. image:: https://github.com/tartley/colorama/raw/master/screenshots/ubuntu-demo.png
+ :width: 661
+ :height: 357
+ :alt: ANSI sequences on Ubuntu under gnome-terminal.
+
+.. image:: https://github.com/tartley/colorama/raw/master/screenshots/windows-demo.png
+ :width: 668
+ :height: 325
+ :alt: Same ANSI sequences on Windows, using Colorama.
+
+These screenshots show that, on Windows, Colorama does not support ANSI 'dim
+text'; it looks the same as 'normal text'.
+
+Usage
+-----
+
+Initialisation
+..............
+
+Applications should initialise Colorama using:
+
+.. code-block:: python
+
+ from colorama import init
+ init()
+
+On Windows, calling ``init()`` will filter ANSI escape sequences out of any
+text sent to ``stdout`` or ``stderr``, and replace them with equivalent Win32
+calls.
+
+On other platforms, calling ``init()`` has no effect (unless you request other
+optional functionality, see "Init Keyword Args" below; or if output
+is redirected). By design, this permits applications to call ``init()``
+unconditionally on all platforms, after which ANSI output should just work.
+
+On all platforms, if output is redirected, ANSI escape sequences are completely
+stripped out.
+
+To stop using Colorama before your program exits, simply call ``deinit()``.
+This will restore ``stdout`` and ``stderr`` to their original values, so that
+Colorama is disabled. To resume using Colorama again, call ``reinit()``; it is
+cheaper than calling ``init()`` again (but does the same thing).
+
+
+Colored Output
+..............
+
+Cross-platform printing of colored text can then be done using Colorama's
+constant shorthand for ANSI escape sequences. These are deliberately
+rudimentary, see below.
+
+.. code-block:: python
+
+ from colorama import Fore, Back, Style
+ print(Fore.RED + 'some red text')
+ print(Back.GREEN + 'and with a green background')
+ print(Style.DIM + 'and in dim text')
+ print(Style.RESET_ALL)
+ print('back to normal now')
+
+...or simply by manually printing ANSI sequences from your own code:
+
+.. code-block:: python
+
+ print('\033[31m' + 'some red text')
+ print('\033[39m') # and reset to default color
+
+...or, Colorama can be used in conjunction with existing ANSI libraries
+such as the venerable `Termcolor <https://pypi.org/project/termcolor/>`_
+the fabulous `Blessings <https://pypi.org/project/blessings/>`_,
+or the incredible `_Rich <https://pypi.org/project/rich/>`_.
+
+If you wish Colorama's Fore, Back and Style constants were more capable,
+then consider using one of the above highly capable libraries to generate
+colors, etc, and use Colorama just for its primary purpose: to convert
+those ANSI sequences to also work on Windows:
+
+.. code-block:: python
+
+ from colorama import init
+ from termcolor import colored
+
+ # use Colorama to make Termcolor work on Windows too
+ init()
+
+ # then use Termcolor for all colored text output
+ print(colored('Hello, World!', 'green', 'on_red'))
+
+Available formatting constants are::
+
+ Fore: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
+ Back: BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE, RESET.
+ Style: DIM, NORMAL, BRIGHT, RESET_ALL
+
+``Style.RESET_ALL`` resets foreground, background, and brightness. Colorama will
+perform this reset automatically on program exit.
+
+These are fairly well supported, but not part of the standard::
+
+ Fore: LIGHTBLACK_EX, LIGHTRED_EX, LIGHTGREEN_EX, LIGHTYELLOW_EX, LIGHTBLUE_EX, LIGHTMAGENTA_EX, LIGHTCYAN_EX, LIGHTWHITE_EX
+ Back: LIGHTBLACK_EX, LIGHTRED_EX, LIGHTGREEN_EX, LIGHTYELLOW_EX, LIGHTBLUE_EX, LIGHTMAGENTA_EX, LIGHTCYAN_EX, LIGHTWHITE_EX
+
+
+Cursor Positioning
+..................
+
+ANSI codes to reposition the cursor are supported. See ``demos/demo06.py`` for
+an example of how to generate them.
+
+
+Init Keyword Args
+.................
+
+``init()`` accepts some ``**kwargs`` to override default behaviour.
+
+init(autoreset=False):
+ If you find yourself repeatedly sending reset sequences to turn off color
+ changes at the end of every print, then ``init(autoreset=True)`` will
+ automate that:
+
+ .. code-block:: python
+
+ from colorama import init
+ init(autoreset=True)
+ print(Fore.RED + 'some red text')
+ print('automatically back to default color again')
+
+init(strip=None):
+ Pass ``True`` or ``False`` to override whether ANSI codes should be
+ stripped from the output. The default behaviour is to strip if on Windows
+ or if output is redirected (not a tty).
+
+init(convert=None):
+ Pass ``True`` or ``False`` to override whether to convert ANSI codes in the
+ output into win32 calls. The default behaviour is to convert if on Windows
+ and output is to a tty (terminal).
+
+init(wrap=True):
+ On Windows, Colorama works by replacing ``sys.stdout`` and ``sys.stderr``
+ with proxy objects, which override the ``.write()`` method to do their work.
+ If this wrapping causes you problems, then this can be disabled by passing
+ ``init(wrap=False)``. The default behaviour is to wrap if ``autoreset`` or
+ ``strip`` or ``convert`` are True.
+
+ When wrapping is disabled, colored printing on non-Windows platforms will
+ continue to work as normal. To do cross-platform colored output, you can
+ use Colorama's ``AnsiToWin32`` proxy directly:
+
+ .. code-block:: python
+
+ import sys
+ from colorama import init, AnsiToWin32
+ init(wrap=False)
+ stream = AnsiToWin32(sys.stderr).stream
+
+ # Python 2
+ print >>stream, Fore.BLUE + 'blue text on stderr'
+
+ # Python 3
+ print(Fore.BLUE + 'blue text on stderr', file=stream)
+
+
+Recognised ANSI Sequences
+.........................
+
+ANSI sequences generally take the form::
+
+ ESC [ <param> ; <param> ... <command>
+
+Where ``<param>`` is an integer, and ``<command>`` is a single letter. Zero or
+more params are passed to a ``<command>``. If no params are passed, it is
+generally synonymous with passing a single zero. No spaces exist in the
+sequence; they have been inserted here simply to read more easily.
+
+The only ANSI sequences that Colorama converts into win32 calls are::
+
+ ESC [ 0 m # reset all (colors and brightness)
+ ESC [ 1 m # bright
+ ESC [ 2 m # dim (looks same as normal brightness)
+ ESC [ 22 m # normal brightness
+
+ # FOREGROUND:
+ ESC [ 30 m # black
+ ESC [ 31 m # red
+ ESC [ 32 m # green
+ ESC [ 33 m # yellow
+ ESC [ 34 m # blue
+ ESC [ 35 m # magenta
+ ESC [ 36 m # cyan
+ ESC [ 37 m # white
+ ESC [ 39 m # reset
+
+ # BACKGROUND
+ ESC [ 40 m # black
+ ESC [ 41 m # red
+ ESC [ 42 m # green
+ ESC [ 43 m # yellow
+ ESC [ 44 m # blue
+ ESC [ 45 m # magenta
+ ESC [ 46 m # cyan
+ ESC [ 47 m # white
+ ESC [ 49 m # reset
+
+ # cursor positioning
+ ESC [ y;x H # position cursor at x across, y down
+ ESC [ y;x f # position cursor at x across, y down
+ ESC [ n A # move cursor n lines up
+ ESC [ n B # move cursor n lines down
+ ESC [ n C # move cursor n characters forward
+ ESC [ n D # move cursor n characters backward
+
+ # clear the screen
+ ESC [ mode J # clear the screen
+
+ # clear the line
+ ESC [ mode K # clear the line
+
+Multiple numeric params to the ``'m'`` command can be combined into a single
+sequence::
+
+ ESC [ 36 ; 45 ; 1 m # bright cyan text on magenta background
+
+All other ANSI sequences of the form ``ESC [ <param> ; <param> ... <command>``
+are silently stripped from the output on Windows.
+
+Any other form of ANSI sequence, such as single-character codes or alternative
+initial characters, are not recognised or stripped. It would be cool to add
+them though. Let me know if it would be useful for you, via the Issues on
+GitHub.
+
+
+Status & Known Problems
+-----------------------
+
+I've personally only tested it on Windows XP (CMD, Console2), Ubuntu
+(gnome-terminal, xterm), and OS X.
+
+Some presumably valid ANSI sequences aren't recognised (see details below),
+but to my knowledge nobody has yet complained about this. Puzzling.
+
+See outstanding issues and wish-list:
+https://github.com/tartley/colorama/issues
+
+If anything doesn't work for you, or doesn't do what you expected or hoped for,
+I'd love to hear about it on that issues list, would be delighted by patches,
+and would be happy to grant commit access to anyone who submits a working patch
+or two.
+
+If you're hacking on the code, see `README-hacking.md`_.
+
+.. _README-hacking.md: README-hacking.md
+
+
+License
+-------
+
+Copyright Jonathan Hartley & Arnon Yaari, 2013-2020. BSD 3-Clause license; see
+LICENSE file.
+
+
+Professional support
+--------------------
+
+.. |tideliftlogo| image:: https://cdn2.hubspot.net/hubfs/4008838/website/logos/logos_for_download/Tidelift_primary-shorthand-logo.png
+ :alt: Tidelift
+ :target: https://tidelift.com/subscription/pkg/pypi-colorama?utm_source=pypi-colorama&utm_medium=referral&utm_campaign=readme
+
+.. list-table::
+ :widths: 10 100
+
+ * - |tideliftlogo|
+ - Professional support for colorama is available as part of the
+ `Tidelift Subscription`_.
+ Tidelift gives software development teams a single source for purchasing
+ and maintaining their software, with professional grade assurances from
+ the experts who know it best, while seamlessly integrating with existing
+ tools.
+
+.. _Tidelift Subscription: https://tidelift.com/subscription/pkg/pypi-colorama?utm_source=pypi-colorama&utm_medium=referral&utm_campaign=readme
+
+
+Thanks
+------
+
+* Marc Schlaich (schlamar) for a ``setup.py`` fix for Python2.5.
+* Marc Abramowitz, reported & fixed a crash on exit with closed ``stdout``,
+ providing a solution to issue #7's setuptools/distutils debate,
+ and other fixes.
+* User 'eryksun', for guidance on correctly instantiating ``ctypes.windll``.
+* Matthew McCormick for politely pointing out a longstanding crash on non-Win.
+* Ben Hoyt, for a magnificent fix under 64-bit Windows.
+* Jesse at Empty Square for submitting a fix for examples in the README.
+* User 'jamessp', an observant documentation fix for cursor positioning.
+* User 'vaal1239', Dave Mckee & Lackner Kristof for a tiny but much-needed Win7
+ fix.
+* Julien Stuyck, for wisely suggesting Python3 compatible updates to README.
+* Daniel Griffith for multiple fabulous patches.
+* Oscar Lesta for a valuable fix to stop ANSI chars being sent to non-tty
+ output.
+* Roger Binns, for many suggestions, valuable feedback, & bug reports.
+* Tim Golden for thought and much appreciated feedback on the initial idea.
+* User 'Zearin' for updates to the README file.
+* John Szakmeister for adding support for light colors
+* Charles Merriam for adding documentation to demos
+* Jurko for a fix on 64-bit Windows CPython2.5 w/o ctypes
+* Florian Bruhin for a fix when stdout or stderr are None
+* Thomas Weininger for fixing ValueError on Windows
+* Remi Rampin for better Github integration and fixes to the README file
+* Simeon Visser for closing a file handle using 'with' and updating classifiers
+ to include Python 3.3 and 3.4
+* Andy Neff for fixing RESET of LIGHT_EX colors.
+* Jonathan Hartley for the initial idea and implementation.
diff --git a/third_party/python/colorama/colorama-0.4.5.dist-info/RECORD b/third_party/python/colorama/colorama-0.4.5.dist-info/RECORD
new file mode 100644
index 0000000000..b5754771c3
--- /dev/null
+++ b/third_party/python/colorama/colorama-0.4.5.dist-info/RECORD
@@ -0,0 +1,11 @@
+colorama/__init__.py,sha256=ihDoWQOkapwF7sqQ99AoDoEF3vGYm40OtmgW211cLZw,239
+colorama/ansi.py,sha256=Top4EeEuaQdBWdteKMEcGOTeKeF19Q-Wo_6_Cj5kOzQ,2522
+colorama/ansitowin32.py,sha256=gGrO7MVtwc-j1Sq3jKfZpERT1JWmYSOsTVDiTnFbZU4,10830
+colorama/initialise.py,sha256=PprovDNxMTrvoNHFcL2NZjpH2XzDc8BLxLxiErfUl4k,1915
+colorama/win32.py,sha256=bJ8Il9jwaBN5BJ8bmN6FoYZ1QYuMKv2j8fGrXh7TJjw,5404
+colorama/winterm.py,sha256=2y_2b7Zsv34feAsP67mLOVc-Bgq51mdYGo571VprlrM,6438
+colorama-0.4.5.dist-info/LICENSE.txt,sha256=ysNcAmhuXQSlpxQL-zs25zrtSWZW6JEQLkKIhteTAxg,1491
+colorama-0.4.5.dist-info/METADATA,sha256=Kb6MoYzWBmkPhFCf0SW7a-5Eeyssj-szefJmxokQFSU,15128
+colorama-0.4.5.dist-info/WHEEL,sha256=z9j0xAa_JmUKMpmz72K0ZGALSM_n-wQVmGbleXx2VHg,110
+colorama-0.4.5.dist-info/top_level.txt,sha256=_Kx6-Cni2BT1PEATPhrSRxo0d7kSgfBbHf5o7IF1ABw,9
+colorama-0.4.5.dist-info/RECORD,,
diff --git a/third_party/python/colorama/colorama-0.4.5.dist-info/WHEEL b/third_party/python/colorama/colorama-0.4.5.dist-info/WHEEL
new file mode 100644
index 0000000000..0b18a28110
--- /dev/null
+++ b/third_party/python/colorama/colorama-0.4.5.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.1)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/colorama/colorama-0.4.5.dist-info/top_level.txt b/third_party/python/colorama/colorama-0.4.5.dist-info/top_level.txt
new file mode 100644
index 0000000000..3fcfb51b2a
--- /dev/null
+++ b/third_party/python/colorama/colorama-0.4.5.dist-info/top_level.txt
@@ -0,0 +1 @@
+colorama
diff --git a/third_party/python/colorama/colorama/__init__.py b/third_party/python/colorama/colorama/__init__.py
new file mode 100644
index 0000000000..9138a8cc8f
--- /dev/null
+++ b/third_party/python/colorama/colorama/__init__.py
@@ -0,0 +1,6 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+from .initialise import init, deinit, reinit, colorama_text
+from .ansi import Fore, Back, Style, Cursor
+from .ansitowin32 import AnsiToWin32
+
+__version__ = '0.4.5'
diff --git a/third_party/python/colorama/colorama/ansi.py b/third_party/python/colorama/colorama/ansi.py
new file mode 100644
index 0000000000..11ec695ff7
--- /dev/null
+++ b/third_party/python/colorama/colorama/ansi.py
@@ -0,0 +1,102 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+'''
+This module generates ANSI character codes to printing colors to terminals.
+See: http://en.wikipedia.org/wiki/ANSI_escape_code
+'''
+
+CSI = '\033['
+OSC = '\033]'
+BEL = '\a'
+
+
+def code_to_chars(code):
+ return CSI + str(code) + 'm'
+
+def set_title(title):
+ return OSC + '2;' + title + BEL
+
+def clear_screen(mode=2):
+ return CSI + str(mode) + 'J'
+
+def clear_line(mode=2):
+ return CSI + str(mode) + 'K'
+
+
+class AnsiCodes(object):
+ def __init__(self):
+ # the subclasses declare class attributes which are numbers.
+ # Upon instantiation we define instance attributes, which are the same
+ # as the class attributes but wrapped with the ANSI escape sequence
+ for name in dir(self):
+ if not name.startswith('_'):
+ value = getattr(self, name)
+ setattr(self, name, code_to_chars(value))
+
+
+class AnsiCursor(object):
+ def UP(self, n=1):
+ return CSI + str(n) + 'A'
+ def DOWN(self, n=1):
+ return CSI + str(n) + 'B'
+ def FORWARD(self, n=1):
+ return CSI + str(n) + 'C'
+ def BACK(self, n=1):
+ return CSI + str(n) + 'D'
+ def POS(self, x=1, y=1):
+ return CSI + str(y) + ';' + str(x) + 'H'
+
+
+class AnsiFore(AnsiCodes):
+ BLACK = 30
+ RED = 31
+ GREEN = 32
+ YELLOW = 33
+ BLUE = 34
+ MAGENTA = 35
+ CYAN = 36
+ WHITE = 37
+ RESET = 39
+
+ # These are fairly well supported, but not part of the standard.
+ LIGHTBLACK_EX = 90
+ LIGHTRED_EX = 91
+ LIGHTGREEN_EX = 92
+ LIGHTYELLOW_EX = 93
+ LIGHTBLUE_EX = 94
+ LIGHTMAGENTA_EX = 95
+ LIGHTCYAN_EX = 96
+ LIGHTWHITE_EX = 97
+
+
+class AnsiBack(AnsiCodes):
+ BLACK = 40
+ RED = 41
+ GREEN = 42
+ YELLOW = 43
+ BLUE = 44
+ MAGENTA = 45
+ CYAN = 46
+ WHITE = 47
+ RESET = 49
+
+ # These are fairly well supported, but not part of the standard.
+ LIGHTBLACK_EX = 100
+ LIGHTRED_EX = 101
+ LIGHTGREEN_EX = 102
+ LIGHTYELLOW_EX = 103
+ LIGHTBLUE_EX = 104
+ LIGHTMAGENTA_EX = 105
+ LIGHTCYAN_EX = 106
+ LIGHTWHITE_EX = 107
+
+
+class AnsiStyle(AnsiCodes):
+ BRIGHT = 1
+ DIM = 2
+ NORMAL = 22
+ RESET_ALL = 0
+
+Fore = AnsiFore()
+Back = AnsiBack()
+Style = AnsiStyle()
+Cursor = AnsiCursor()
diff --git a/third_party/python/colorama/colorama/ansitowin32.py b/third_party/python/colorama/colorama/ansitowin32.py
new file mode 100644
index 0000000000..3db248baac
--- /dev/null
+++ b/third_party/python/colorama/colorama/ansitowin32.py
@@ -0,0 +1,266 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+import re
+import sys
+import os
+
+from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style, BEL
+from .winterm import WinTerm, WinColor, WinStyle
+from .win32 import windll, winapi_test
+
+
+winterm = None
+if windll is not None:
+ winterm = WinTerm()
+
+
+class StreamWrapper(object):
+ '''
+ Wraps a stream (such as stdout), acting as a transparent proxy for all
+ attribute access apart from method 'write()', which is delegated to our
+ Converter instance.
+ '''
+ def __init__(self, wrapped, converter):
+ # double-underscore everything to prevent clashes with names of
+ # attributes on the wrapped stream object.
+ self.__wrapped = wrapped
+ self.__convertor = converter
+
+ def __getattr__(self, name):
+ return getattr(self.__wrapped, name)
+
+ def __enter__(self, *args, **kwargs):
+ # special method lookup bypasses __getattr__/__getattribute__, see
+ # https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit
+ # thus, contextlib magic methods are not proxied via __getattr__
+ return self.__wrapped.__enter__(*args, **kwargs)
+
+ def __exit__(self, *args, **kwargs):
+ return self.__wrapped.__exit__(*args, **kwargs)
+
+ def __setstate__(self, state):
+ self.__dict__ = state
+
+ def __getstate__(self):
+ return self.__dict__
+
+ def write(self, text):
+ self.__convertor.write(text)
+
+ def isatty(self):
+ stream = self.__wrapped
+ if 'PYCHARM_HOSTED' in os.environ:
+ if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__):
+ return True
+ try:
+ stream_isatty = stream.isatty
+ except AttributeError:
+ return False
+ else:
+ return stream_isatty()
+
+ @property
+ def closed(self):
+ stream = self.__wrapped
+ try:
+ return stream.closed
+ # AttributeError in the case that the stream doesn't support being closed
+ # ValueError for the case that the stream has already been detached when atexit runs
+ except (AttributeError, ValueError):
+ return True
+
+
+class AnsiToWin32(object):
+ '''
+ Implements a 'write()' method which, on Windows, will strip ANSI character
+ sequences from the text, and if outputting to a tty, will convert them into
+ win32 function calls.
+ '''
+ ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer
+ ANSI_OSC_RE = re.compile('\001?\033\\]([^\a]*)(\a)\002?') # Operating System Command
+
+ def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
+ # The wrapped stream (normally sys.stdout or sys.stderr)
+ self.wrapped = wrapped
+
+ # should we reset colors to defaults after every .write()
+ self.autoreset = autoreset
+
+ # create the proxy wrapping our output stream
+ self.stream = StreamWrapper(wrapped, self)
+
+ on_windows = os.name == 'nt'
+ # We test if the WinAPI works, because even if we are on Windows
+ # we may be using a terminal that doesn't support the WinAPI
+ # (e.g. Cygwin Terminal). In this case it's up to the terminal
+ # to support the ANSI codes.
+ conversion_supported = on_windows and winapi_test()
+
+ # should we strip ANSI sequences from our output?
+ if strip is None:
+ strip = conversion_supported or (not self.stream.closed and not self.stream.isatty())
+ self.strip = strip
+
+ # should we should convert ANSI sequences into win32 calls?
+ if convert is None:
+ convert = conversion_supported and not self.stream.closed and self.stream.isatty()
+ self.convert = convert
+
+ # dict of ansi codes to win32 functions and parameters
+ self.win32_calls = self.get_win32_calls()
+
+ # are we wrapping stderr?
+ self.on_stderr = self.wrapped is sys.stderr
+
+ def should_wrap(self):
+ '''
+ True if this class is actually needed. If false, then the output
+ stream will not be affected, nor will win32 calls be issued, so
+ wrapping stdout is not actually required. This will generally be
+ False on non-Windows platforms, unless optional functionality like
+ autoreset has been requested using kwargs to init()
+ '''
+ return self.convert or self.strip or self.autoreset
+
+ def get_win32_calls(self):
+ if self.convert and winterm:
+ return {
+ AnsiStyle.RESET_ALL: (winterm.reset_all, ),
+ AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
+ AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
+ AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
+ AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
+ AnsiFore.RED: (winterm.fore, WinColor.RED),
+ AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
+ AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
+ AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
+ AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
+ AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
+ AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
+ AnsiFore.RESET: (winterm.fore, ),
+ AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),
+ AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),
+ AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),
+ AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),
+ AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),
+ AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),
+ AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),
+ AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),
+ AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
+ AnsiBack.RED: (winterm.back, WinColor.RED),
+ AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
+ AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
+ AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
+ AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
+ AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
+ AnsiBack.WHITE: (winterm.back, WinColor.GREY),
+ AnsiBack.RESET: (winterm.back, ),
+ AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),
+ AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),
+ AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),
+ AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),
+ AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),
+ AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),
+ AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),
+ AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),
+ }
+ return dict()
+
+ def write(self, text):
+ if self.strip or self.convert:
+ self.write_and_convert(text)
+ else:
+ self.wrapped.write(text)
+ self.wrapped.flush()
+ if self.autoreset:
+ self.reset_all()
+
+
+ def reset_all(self):
+ if self.convert:
+ self.call_win32('m', (0,))
+ elif not self.strip and not self.stream.closed:
+ self.wrapped.write(Style.RESET_ALL)
+
+
+ def write_and_convert(self, text):
+ '''
+ Write the given text to our wrapped stream, stripping any ANSI
+ sequences from the text, and optionally converting them into win32
+ calls.
+ '''
+ cursor = 0
+ text = self.convert_osc(text)
+ for match in self.ANSI_CSI_RE.finditer(text):
+ start, end = match.span()
+ self.write_plain_text(text, cursor, start)
+ self.convert_ansi(*match.groups())
+ cursor = end
+ self.write_plain_text(text, cursor, len(text))
+
+
+ def write_plain_text(self, text, start, end):
+ if start < end:
+ self.wrapped.write(text[start:end])
+ self.wrapped.flush()
+
+
+ def convert_ansi(self, paramstring, command):
+ if self.convert:
+ params = self.extract_params(command, paramstring)
+ self.call_win32(command, params)
+
+
+ def extract_params(self, command, paramstring):
+ if command in 'Hf':
+ params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';'))
+ while len(params) < 2:
+ # defaults:
+ params = params + (1,)
+ else:
+ params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0)
+ if len(params) == 0:
+ # defaults:
+ if command in 'JKm':
+ params = (0,)
+ elif command in 'ABCD':
+ params = (1,)
+
+ return params
+
+
+ def call_win32(self, command, params):
+ if command == 'm':
+ for param in params:
+ if param in self.win32_calls:
+ func_args = self.win32_calls[param]
+ func = func_args[0]
+ args = func_args[1:]
+ kwargs = dict(on_stderr=self.on_stderr)
+ func(*args, **kwargs)
+ elif command in 'J':
+ winterm.erase_screen(params[0], on_stderr=self.on_stderr)
+ elif command in 'K':
+ winterm.erase_line(params[0], on_stderr=self.on_stderr)
+ elif command in 'Hf': # cursor position - absolute
+ winterm.set_cursor_position(params, on_stderr=self.on_stderr)
+ elif command in 'ABCD': # cursor position - relative
+ n = params[0]
+ # A - up, B - down, C - forward, D - back
+ x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command]
+ winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)
+
+
+ def convert_osc(self, text):
+ for match in self.ANSI_OSC_RE.finditer(text):
+ start, end = match.span()
+ text = text[:start] + text[end:]
+ paramstring, command = match.groups()
+ if command == BEL:
+ if paramstring.count(";") == 1:
+ params = paramstring.split(";")
+ # 0 - change title and icon (we will only change title)
+ # 1 - change icon (we don't support this)
+ # 2 - change title
+ if params[0] in '02':
+ winterm.set_title(params[1])
+ return text
diff --git a/third_party/python/colorama/colorama/initialise.py b/third_party/python/colorama/colorama/initialise.py
new file mode 100644
index 0000000000..430d066872
--- /dev/null
+++ b/third_party/python/colorama/colorama/initialise.py
@@ -0,0 +1,80 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+import atexit
+import contextlib
+import sys
+
+from .ansitowin32 import AnsiToWin32
+
+
+orig_stdout = None
+orig_stderr = None
+
+wrapped_stdout = None
+wrapped_stderr = None
+
+atexit_done = False
+
+
+def reset_all():
+ if AnsiToWin32 is not None: # Issue #74: objects might become None at exit
+ AnsiToWin32(orig_stdout).reset_all()
+
+
+def init(autoreset=False, convert=None, strip=None, wrap=True):
+
+ if not wrap and any([autoreset, convert, strip]):
+ raise ValueError('wrap=False conflicts with any other arg=True')
+
+ global wrapped_stdout, wrapped_stderr
+ global orig_stdout, orig_stderr
+
+ orig_stdout = sys.stdout
+ orig_stderr = sys.stderr
+
+ if sys.stdout is None:
+ wrapped_stdout = None
+ else:
+ sys.stdout = wrapped_stdout = \
+ wrap_stream(orig_stdout, convert, strip, autoreset, wrap)
+ if sys.stderr is None:
+ wrapped_stderr = None
+ else:
+ sys.stderr = wrapped_stderr = \
+ wrap_stream(orig_stderr, convert, strip, autoreset, wrap)
+
+ global atexit_done
+ if not atexit_done:
+ atexit.register(reset_all)
+ atexit_done = True
+
+
+def deinit():
+ if orig_stdout is not None:
+ sys.stdout = orig_stdout
+ if orig_stderr is not None:
+ sys.stderr = orig_stderr
+
+
+@contextlib.contextmanager
+def colorama_text(*args, **kwargs):
+ init(*args, **kwargs)
+ try:
+ yield
+ finally:
+ deinit()
+
+
+def reinit():
+ if wrapped_stdout is not None:
+ sys.stdout = wrapped_stdout
+ if wrapped_stderr is not None:
+ sys.stderr = wrapped_stderr
+
+
+def wrap_stream(stream, convert, strip, autoreset, wrap):
+ if wrap:
+ wrapper = AnsiToWin32(stream,
+ convert=convert, strip=strip, autoreset=autoreset)
+ if wrapper.should_wrap():
+ stream = wrapper.stream
+ return stream
diff --git a/third_party/python/colorama/colorama/win32.py b/third_party/python/colorama/colorama/win32.py
new file mode 100644
index 0000000000..c2d8360336
--- /dev/null
+++ b/third_party/python/colorama/colorama/win32.py
@@ -0,0 +1,152 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+
+# from winbase.h
+STDOUT = -11
+STDERR = -12
+
+try:
+ import ctypes
+ from ctypes import LibraryLoader
+ windll = LibraryLoader(ctypes.WinDLL)
+ from ctypes import wintypes
+except (AttributeError, ImportError):
+ windll = None
+ SetConsoleTextAttribute = lambda *_: None
+ winapi_test = lambda *_: None
+else:
+ from ctypes import byref, Structure, c_char, POINTER
+
+ COORD = wintypes._COORD
+
+ class CONSOLE_SCREEN_BUFFER_INFO(Structure):
+ """struct in wincon.h."""
+ _fields_ = [
+ ("dwSize", COORD),
+ ("dwCursorPosition", COORD),
+ ("wAttributes", wintypes.WORD),
+ ("srWindow", wintypes.SMALL_RECT),
+ ("dwMaximumWindowSize", COORD),
+ ]
+ def __str__(self):
+ return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
+ self.dwSize.Y, self.dwSize.X
+ , self.dwCursorPosition.Y, self.dwCursorPosition.X
+ , self.wAttributes
+ , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
+ , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
+ )
+
+ _GetStdHandle = windll.kernel32.GetStdHandle
+ _GetStdHandle.argtypes = [
+ wintypes.DWORD,
+ ]
+ _GetStdHandle.restype = wintypes.HANDLE
+
+ _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
+ _GetConsoleScreenBufferInfo.argtypes = [
+ wintypes.HANDLE,
+ POINTER(CONSOLE_SCREEN_BUFFER_INFO),
+ ]
+ _GetConsoleScreenBufferInfo.restype = wintypes.BOOL
+
+ _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
+ _SetConsoleTextAttribute.argtypes = [
+ wintypes.HANDLE,
+ wintypes.WORD,
+ ]
+ _SetConsoleTextAttribute.restype = wintypes.BOOL
+
+ _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
+ _SetConsoleCursorPosition.argtypes = [
+ wintypes.HANDLE,
+ COORD,
+ ]
+ _SetConsoleCursorPosition.restype = wintypes.BOOL
+
+ _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
+ _FillConsoleOutputCharacterA.argtypes = [
+ wintypes.HANDLE,
+ c_char,
+ wintypes.DWORD,
+ COORD,
+ POINTER(wintypes.DWORD),
+ ]
+ _FillConsoleOutputCharacterA.restype = wintypes.BOOL
+
+ _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
+ _FillConsoleOutputAttribute.argtypes = [
+ wintypes.HANDLE,
+ wintypes.WORD,
+ wintypes.DWORD,
+ COORD,
+ POINTER(wintypes.DWORD),
+ ]
+ _FillConsoleOutputAttribute.restype = wintypes.BOOL
+
+ _SetConsoleTitleW = windll.kernel32.SetConsoleTitleW
+ _SetConsoleTitleW.argtypes = [
+ wintypes.LPCWSTR
+ ]
+ _SetConsoleTitleW.restype = wintypes.BOOL
+
+ def _winapi_test(handle):
+ csbi = CONSOLE_SCREEN_BUFFER_INFO()
+ success = _GetConsoleScreenBufferInfo(
+ handle, byref(csbi))
+ return bool(success)
+
+ def winapi_test():
+ return any(_winapi_test(h) for h in
+ (_GetStdHandle(STDOUT), _GetStdHandle(STDERR)))
+
+ def GetConsoleScreenBufferInfo(stream_id=STDOUT):
+ handle = _GetStdHandle(stream_id)
+ csbi = CONSOLE_SCREEN_BUFFER_INFO()
+ success = _GetConsoleScreenBufferInfo(
+ handle, byref(csbi))
+ return csbi
+
+ def SetConsoleTextAttribute(stream_id, attrs):
+ handle = _GetStdHandle(stream_id)
+ return _SetConsoleTextAttribute(handle, attrs)
+
+ def SetConsoleCursorPosition(stream_id, position, adjust=True):
+ position = COORD(*position)
+ # If the position is out of range, do nothing.
+ if position.Y <= 0 or position.X <= 0:
+ return
+ # Adjust for Windows' SetConsoleCursorPosition:
+ # 1. being 0-based, while ANSI is 1-based.
+ # 2. expecting (x,y), while ANSI uses (y,x).
+ adjusted_position = COORD(position.Y - 1, position.X - 1)
+ if adjust:
+ # Adjust for viewport's scroll position
+ sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
+ adjusted_position.Y += sr.Top
+ adjusted_position.X += sr.Left
+ # Resume normal processing
+ handle = _GetStdHandle(stream_id)
+ return _SetConsoleCursorPosition(handle, adjusted_position)
+
+ def FillConsoleOutputCharacter(stream_id, char, length, start):
+ handle = _GetStdHandle(stream_id)
+ char = c_char(char.encode())
+ length = wintypes.DWORD(length)
+ num_written = wintypes.DWORD(0)
+ # Note that this is hard-coded for ANSI (vs wide) bytes.
+ success = _FillConsoleOutputCharacterA(
+ handle, char, length, start, byref(num_written))
+ return num_written.value
+
+ def FillConsoleOutputAttribute(stream_id, attr, length, start):
+ ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
+ handle = _GetStdHandle(stream_id)
+ attribute = wintypes.WORD(attr)
+ length = wintypes.DWORD(length)
+ num_written = wintypes.DWORD(0)
+ # Note that this is hard-coded for ANSI (vs wide) bytes.
+ return _FillConsoleOutputAttribute(
+ handle, attribute, length, start, byref(num_written))
+
+ def SetConsoleTitle(title):
+ return _SetConsoleTitleW(title)
diff --git a/third_party/python/colorama/colorama/winterm.py b/third_party/python/colorama/colorama/winterm.py
new file mode 100644
index 0000000000..0fdb4ec4e9
--- /dev/null
+++ b/third_party/python/colorama/colorama/winterm.py
@@ -0,0 +1,169 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+from . import win32
+
+
+# from wincon.h
+class WinColor(object):
+ BLACK = 0
+ BLUE = 1
+ GREEN = 2
+ CYAN = 3
+ RED = 4
+ MAGENTA = 5
+ YELLOW = 6
+ GREY = 7
+
+# from wincon.h
+class WinStyle(object):
+ NORMAL = 0x00 # dim text, dim background
+ BRIGHT = 0x08 # bright text, dim background
+ BRIGHT_BACKGROUND = 0x80 # dim text, bright background
+
+class WinTerm(object):
+
+ def __init__(self):
+ self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
+ self.set_attrs(self._default)
+ self._default_fore = self._fore
+ self._default_back = self._back
+ self._default_style = self._style
+ # In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style.
+ # So that LIGHT_EX colors and BRIGHT style do not clobber each other,
+ # we track them separately, since LIGHT_EX is overwritten by Fore/Back
+ # and BRIGHT is overwritten by Style codes.
+ self._light = 0
+
+ def get_attrs(self):
+ return self._fore + self._back * 16 + (self._style | self._light)
+
+ def set_attrs(self, value):
+ self._fore = value & 7
+ self._back = (value >> 4) & 7
+ self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND)
+
+ def reset_all(self, on_stderr=None):
+ self.set_attrs(self._default)
+ self.set_console(attrs=self._default)
+ self._light = 0
+
+ def fore(self, fore=None, light=False, on_stderr=False):
+ if fore is None:
+ fore = self._default_fore
+ self._fore = fore
+ # Emulate LIGHT_EX with BRIGHT Style
+ if light:
+ self._light |= WinStyle.BRIGHT
+ else:
+ self._light &= ~WinStyle.BRIGHT
+ self.set_console(on_stderr=on_stderr)
+
+ def back(self, back=None, light=False, on_stderr=False):
+ if back is None:
+ back = self._default_back
+ self._back = back
+ # Emulate LIGHT_EX with BRIGHT_BACKGROUND Style
+ if light:
+ self._light |= WinStyle.BRIGHT_BACKGROUND
+ else:
+ self._light &= ~WinStyle.BRIGHT_BACKGROUND
+ self.set_console(on_stderr=on_stderr)
+
+ def style(self, style=None, on_stderr=False):
+ if style is None:
+ style = self._default_style
+ self._style = style
+ self.set_console(on_stderr=on_stderr)
+
+ def set_console(self, attrs=None, on_stderr=False):
+ if attrs is None:
+ attrs = self.get_attrs()
+ handle = win32.STDOUT
+ if on_stderr:
+ handle = win32.STDERR
+ win32.SetConsoleTextAttribute(handle, attrs)
+
+ def get_position(self, handle):
+ position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition
+ # Because Windows coordinates are 0-based,
+ # and win32.SetConsoleCursorPosition expects 1-based.
+ position.X += 1
+ position.Y += 1
+ return position
+
+ def set_cursor_position(self, position=None, on_stderr=False):
+ if position is None:
+ # I'm not currently tracking the position, so there is no default.
+ # position = self.get_position()
+ return
+ handle = win32.STDOUT
+ if on_stderr:
+ handle = win32.STDERR
+ win32.SetConsoleCursorPosition(handle, position)
+
+ def cursor_adjust(self, x, y, on_stderr=False):
+ handle = win32.STDOUT
+ if on_stderr:
+ handle = win32.STDERR
+ position = self.get_position(handle)
+ adjusted_position = (position.Y + y, position.X + x)
+ win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False)
+
+ def erase_screen(self, mode=0, on_stderr=False):
+ # 0 should clear from the cursor to the end of the screen.
+ # 1 should clear from the cursor to the beginning of the screen.
+ # 2 should clear the entire screen, and move cursor to (1,1)
+ handle = win32.STDOUT
+ if on_stderr:
+ handle = win32.STDERR
+ csbi = win32.GetConsoleScreenBufferInfo(handle)
+ # get the number of character cells in the current buffer
+ cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y
+ # get number of character cells before current cursor position
+ cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X
+ if mode == 0:
+ from_coord = csbi.dwCursorPosition
+ cells_to_erase = cells_in_screen - cells_before_cursor
+ elif mode == 1:
+ from_coord = win32.COORD(0, 0)
+ cells_to_erase = cells_before_cursor
+ elif mode == 2:
+ from_coord = win32.COORD(0, 0)
+ cells_to_erase = cells_in_screen
+ else:
+ # invalid mode
+ return
+ # fill the entire screen with blanks
+ win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
+ # now set the buffer's attributes accordingly
+ win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
+ if mode == 2:
+ # put the cursor where needed
+ win32.SetConsoleCursorPosition(handle, (1, 1))
+
+ def erase_line(self, mode=0, on_stderr=False):
+ # 0 should clear from the cursor to the end of the line.
+ # 1 should clear from the cursor to the beginning of the line.
+ # 2 should clear the entire line.
+ handle = win32.STDOUT
+ if on_stderr:
+ handle = win32.STDERR
+ csbi = win32.GetConsoleScreenBufferInfo(handle)
+ if mode == 0:
+ from_coord = csbi.dwCursorPosition
+ cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X
+ elif mode == 1:
+ from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
+ cells_to_erase = csbi.dwCursorPosition.X
+ elif mode == 2:
+ from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
+ cells_to_erase = csbi.dwSize.X
+ else:
+ # invalid mode
+ return
+ # fill the entire screen with blanks
+ win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
+ # now set the buffer's attributes accordingly
+ win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
+
+ def set_title(self, title):
+ win32.SetConsoleTitle(title)
diff --git a/third_party/python/compare_locales/compare_locales-9.0.1.dist-info/LICENSE.md b/third_party/python/compare_locales/compare_locales-9.0.1.dist-info/LICENSE.md
new file mode 100644
index 0000000000..a612ad9813
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales-9.0.1.dist-info/LICENSE.md
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/third_party/python/compare_locales/compare_locales-9.0.1.dist-info/METADATA b/third_party/python/compare_locales/compare_locales-9.0.1.dist-info/METADATA
new file mode 100644
index 0000000000..65ff8760bf
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales-9.0.1.dist-info/METADATA
@@ -0,0 +1,84 @@
+Metadata-Version: 2.1
+Name: compare-locales
+Version: 9.0.1
+Summary: Lint Mozilla localizations
+Home-page: https://github.com/mozilla/compare-locales
+Author: Axel Hecht
+Author-email: axel@mozilla.com
+License: MPL 2.0
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Software Development :: Localization
+Classifier: Topic :: Software Development :: Testing
+Requires-Python: >=3.7, <4
+Description-Content-Type: text/markdown
+License-File: LICENSE.md
+Requires-Dist: fluent.syntax (<0.20,>=0.18.0)
+Requires-Dist: six
+Requires-Dist: toml
+
+![Build tests](https://github.com/mozilla/compare-locales/workflows/test/badge.svg)
+# compare-locales
+Lint Mozilla localizations
+
+Finds
+* missing strings
+* obsolete strings
+* errors on runtime errors without false positives
+* warns on possible runtime errors
+
+It also includes `l10n-merge` functionality, which pads localizations with
+missing English strings, and replaces entities with errors with English.
+
+If you want to check your original code for errors like duplicated messages,
+use `moz-l10n-lint`, which is also part of this package. You can also use
+this to check for conflicts between your strings and those already exposed
+to l10n.
+
+# Configuration
+
+You configure `compare-locales` (and `moz-l10n-lint`) through a
+[project configuration](https://moz-l10n-config.readthedocs.io/en/latest/fileformat.html)
+file, `l10n.toml`.
+
+# Examples
+
+To check all locales in a project use
+
+```bash
+compare-locales l10n.toml .
+```
+
+To check Firefox against a local check-out of l10n-central, use
+
+```bash
+compare-locales browser/locales/l10n.toml ../l10n-central
+```
+
+If you just want to check particular locales, specify them as additional
+commandline parameters.
+
+To lint your local work, use
+
+```bash
+moz-l10n-lint l10n.toml
+```
+
+To check for conflicts against already existing strings:
+
+```bash
+moz-l10n-lint --reference-project ../android-l10n/mozilla-mobile/fenix l10n.toml
+moz-l10n-lint --l10n-reference ../gecko-strings browser/locales/l10n.toml
+```
+
+to check for a monolithic project like Fenix or a gecko project like Firefox,
+resp.
diff --git a/third_party/python/compare_locales/compare_locales-9.0.1.dist-info/RECORD b/third_party/python/compare_locales/compare_locales-9.0.1.dist-info/RECORD
new file mode 100644
index 0000000000..1d81d9fca6
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales-9.0.1.dist-info/RECORD
@@ -0,0 +1,45 @@
+compare_locales/__init__.py,sha256=Lbi3Zk69IMtSQjV8b_gDCN24gZf_Vjd35WfEDZu9VNI,18
+compare_locales/commands.py,sha256=cAy0ZseVq2oAkXQyacn671PqfNx_zSraPgeSjAV7pWs,8428
+compare_locales/keyedtuple.py,sha256=WVOkwqS2y3-gH1GwU8oPhay5OeN1YsXTEPb1WacqiS4,1507
+compare_locales/merge.py,sha256=Cuaw783A0YaEpK_cV19iFNayg28l3VwsHLOvUX06y2w,4657
+compare_locales/mozpath.py,sha256=ZzBm7Y9LgO161UgqzHgniyIIXwAlTUDbF1Q2O9FxHL4,4232
+compare_locales/plurals.py,sha256=s5M29AZElgB4z9L24xtc3_W7lUK6UZr_j1APv89fx28,4015
+compare_locales/serializer.py,sha256=uJR-fL2h_X1j0lNnv3PwJ4RRV_x-5kc66KDJg863JvU,4408
+compare_locales/util.py,sha256=ttl1tcGveJpYqoHKVlIplhb0wSjAjAaTRQT0z6xoYrQ,439
+compare_locales/checks/__init__.py,sha256=7S1or4MzMxMA_MtRu-CB5eFyPDPnv1Zq6GGCToaztwo,969
+compare_locales/checks/android.py,sha256=L0z-DJatylz7NeQnAq0sA_fXHTXj0dfZ-nNS1DJPa-8,8318
+compare_locales/checks/base.py,sha256=ld5YSptqIU8xWWs9KKY-u9XP7oN8NrmvzqN605dwRPE,4165
+compare_locales/checks/dtd.py,sha256=OHG99oQI-tT9ZkSPCJR_T9idSSycI6mFSPrb6OJmdHw,9961
+compare_locales/checks/fluent.py,sha256=QP_709JGmEaqruYCyc17WuBcbet6MCa2jexuRHJaMQk,13019
+compare_locales/checks/properties.py,sha256=gtd-5fLWDdowN_KYgQ3dZLsElQHQ6NVvp4jx57GRPjA,6558
+compare_locales/compare/__init__.py,sha256=VMGx8O_MavjZGrcn_6DSfT-J75_ry8m2GxLgUcoUQjM,3293
+compare_locales/compare/content.py,sha256=qCOLcFCoWqktVS-FbsNeI0w1JPhi3t3gqz26Or592D8,10990
+compare_locales/compare/observer.py,sha256=RopVbCeq8nWilR7kfrAfBNfDkF2wHUv98Y8ki49TKMM,7357
+compare_locales/compare/utils.py,sha256=crRWvQYRoKwQbpu1z1IuLjWqOq-PMx23EHNIIAH3eDU,4197
+compare_locales/integration_tests/__init__.py,sha256=eOFgaCLveRf8s90SCQUeZRRxG5LAXwUSxQHxi4H4hvc,154
+compare_locales/integration_tests/test_plurals.py,sha256=Hs4pkXf-DJL7yxnsXo1lbz_1gBpL-1DKaeYy1cS4UY8,1643
+compare_locales/lint/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+compare_locales/lint/cli.py,sha256=dVf9TV5QgDy_5W1jpTIKzhZyvmRDZIZg1mZPBl9RbLE,2965
+compare_locales/lint/linter.py,sha256=cyS6SivquOgXUpQbjpFHs7GgdJbYgsW-5jT0F3RDGyQ,4211
+compare_locales/lint/util.py,sha256=hgHkSvNqWqEiFN38daujWXBUmlQAdy-XBRVGVY9RBfY,1290
+compare_locales/parser/__init__.py,sha256=BVL7HrZOmRo0tGDoROn1f2Ka93314LhrTGPU4Cx0pVU,2041
+compare_locales/parser/android.py,sha256=SvTeAInvGBlal8Ahpv9uA8SaHIZ1LOS0s9Kb-36DJQk,9212
+compare_locales/parser/base.py,sha256=1cDXMnkzM7Qt1KbwGlgKuNm17hPsoWgpdpJDC_9Icqg,12923
+compare_locales/parser/defines.py,sha256=LFseFNLFGb5bHNEmcYqeBymy7VzKIm7OPc6vSoQ298w,3549
+compare_locales/parser/dtd.py,sha256=Dmb8Rk-ptooLbHE9Le9lUUvdtWWFUtSBTlS8w2uWH94,4325
+compare_locales/parser/fluent.py,sha256=GHFCKuqaozGoN5C1c0PGBDhtQ994Swutw_aHXtu0WoM,7035
+compare_locales/parser/ini.py,sha256=I-t-hmGq6VH-sinAxjnIUwtPM2EE_AfMXlJ9G9hKnAs,1545
+compare_locales/parser/po.py,sha256=d9SYQ3WBTICGO_yFvz5SIHjM8mld7oYd-ZupXRN-qZ4,3220
+compare_locales/parser/properties.py,sha256=rnmomMr1-EDvjyC3R1lGl-nYkIZA1B9E2C-U-N_7YXY,3716
+compare_locales/paths/__init__.py,sha256=pQZ4FlsedUtR8dA-uqTqhiNC3rQvPZNzEoTRdJLbyts,1419
+compare_locales/paths/configparser.py,sha256=xIWYDgasIt_qXIcHvH6DMLtXiiF5zbu3Zi8bbrnArtY,4377
+compare_locales/paths/files.py,sha256=2uEhVEjpkGZBJNiF2jwiN5oyxhNouLCI7Hivw4SgkRE,9165
+compare_locales/paths/ini.py,sha256=5IPcgacKYCxKx3dEiNpi8MztYWWFQT6ATOgtpFaT54I,8411
+compare_locales/paths/matcher.py,sha256=4k0UZr1PvFAb29R_nATR5qdWP4ThJGy36yMf6Ipie58,15099
+compare_locales/paths/project.py,sha256=Tl6CfikkOKDi0E3BcxWS4Q3PSU-rjFKVdeNcENwQVN4,8784
+compare_locales-9.0.1.dist-info/LICENSE.md,sha256=HyVuytGSiAUQ6ErWBHTqt1iSGHhLmlC8fO7jTCuR8dU,16725
+compare_locales-9.0.1.dist-info/METADATA,sha256=j59rhNO4K7-WQKT_uxjCMBXlBcCyYuu3trfMS5Sskuw,2595
+compare_locales-9.0.1.dist-info/WHEEL,sha256=a-zpFRIJzOq5QfuhBzbhiA1eHTzNCJn8OdRvhdNX0Rk,110
+compare_locales-9.0.1.dist-info/entry_points.txt,sha256=EYuE78Z7UKpwisLmRuYHZdosK06cETbXNN4BZICR6xM,127
+compare_locales-9.0.1.dist-info/top_level.txt,sha256=eSEPLAFZcEPFC1j0N9GtVpMaKCFKw67ehDx9CMcoel0,16
+compare_locales-9.0.1.dist-info/RECORD,,
diff --git a/third_party/python/compare_locales/compare_locales-9.0.1.dist-info/WHEEL b/third_party/python/compare_locales/compare_locales-9.0.1.dist-info/WHEEL
new file mode 100644
index 0000000000..f771c29b87
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales-9.0.1.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.40.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/compare_locales/compare_locales-9.0.1.dist-info/entry_points.txt b/third_party/python/compare_locales/compare_locales-9.0.1.dist-info/entry_points.txt
new file mode 100644
index 0000000000..03d6f06f40
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales-9.0.1.dist-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+compare-locales = compare_locales.commands:CompareLocales.call
+moz-l10n-lint = compare_locales.lint.cli:main
diff --git a/third_party/python/compare_locales/compare_locales-9.0.1.dist-info/top_level.txt b/third_party/python/compare_locales/compare_locales-9.0.1.dist-info/top_level.txt
new file mode 100644
index 0000000000..d9c74fc101
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales-9.0.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+compare_locales
diff --git a/third_party/python/compare_locales/compare_locales/__init__.py b/third_party/python/compare_locales/compare_locales/__init__.py
new file mode 100644
index 0000000000..23b7f329ba
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/__init__.py
@@ -0,0 +1 @@
+version = "9.0.1"
diff --git a/third_party/python/compare_locales/compare_locales/checks/__init__.py b/third_party/python/compare_locales/compare_locales/checks/__init__.py
new file mode 100644
index 0000000000..c15ede03f9
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/checks/__init__.py
@@ -0,0 +1,27 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from .base import Checker, EntityPos
+from .android import AndroidChecker
+from .dtd import DTDChecker
+from .fluent import FluentChecker
+from .properties import PropertiesChecker
+
+
+__all__ = [
+ 'Checker', 'EntityPos',
+ 'AndroidChecker', 'DTDChecker', 'FluentChecker', 'PropertiesChecker',
+]
+
+
+def getChecker(file, extra_tests=None):
+ if PropertiesChecker.use(file):
+ return PropertiesChecker(extra_tests, locale=file.locale)
+ if DTDChecker.use(file):
+ return DTDChecker(extra_tests, locale=file.locale)
+ if FluentChecker.use(file):
+ return FluentChecker(extra_tests, locale=file.locale)
+ if AndroidChecker.use(file):
+ return AndroidChecker(extra_tests, locale=file.locale)
+ return Checker(extra_tests, locale=file.locale)
diff --git a/third_party/python/compare_locales/compare_locales/checks/android.py b/third_party/python/compare_locales/compare_locales/checks/android.py
new file mode 100644
index 0000000000..d5a1f2f25f
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/checks/android.py
@@ -0,0 +1,256 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+from xml.dom import minidom
+
+from .base import Checker
+from ..parser.android import textContent
+
+
+class AndroidChecker(Checker):
+ pattern = re.compile('(.*)?strings.*\\.xml$')
+
+ def check(self, refEnt, l10nEnt):
+ '''Given the reference and localized Entities, performs checks.
+
+ This is a generator yielding tuples of
+ - "warning" or "error", depending on what should be reported,
+ - tuple of line, column info for the error within the string
+ - description string to be shown in the report
+ '''
+ yield from super().check(refEnt, l10nEnt)
+ refNode = refEnt.node
+ l10nNode = l10nEnt.node
+ # Apples and oranges, error out.
+ if refNode.nodeName != l10nNode.nodeName:
+ yield ("error", 0, "Incompatible resource types", "android")
+ return
+ # Once we start parsing more resource types, make sure to add checks
+ # for them.
+ if refNode.nodeName != "string":
+ yield ("warning", 0, "Unsupported resource type", "android")
+ return
+ yield from self.check_string([refNode], l10nEnt)
+
+ def check_string(self, refs, l10nEnt):
+ '''Check a single string literal against a list of references.
+
+ There should be multiple nodes given for <plurals> or <string-array>.
+ '''
+ l10n = l10nEnt.node
+ if self.not_translatable(l10n, *refs):
+ yield (
+ "error",
+ 0,
+ "strings must be translatable",
+ "android"
+ )
+ return
+ if self.no_at_string(l10n):
+ yield (
+ "error",
+ 0,
+ "strings must be translatable",
+ "android"
+ )
+ return
+ if self.no_at_string(*refs):
+ yield (
+ "warning",
+ 0,
+ "strings must be translatable",
+ "android"
+ )
+ if self.non_simple_data(l10n):
+ yield (
+ "error",
+ 0,
+ "Only plain text allowed, "
+ "or one CDATA surrounded by whitespace",
+ "android"
+ )
+ return
+ yield from check_apostrophes(l10nEnt.val)
+
+ params, errors = get_params(refs)
+ for error, pos in errors:
+ yield (
+ "warning",
+ pos,
+ error,
+ "android"
+ )
+ if params:
+ yield from check_params(params, l10nEnt.val)
+
+ def not_translatable(self, *nodes):
+ return any(
+ node.hasAttribute("translatable")
+ and node.getAttribute("translatable") == "false"
+ for node in nodes
+ )
+
+ def no_at_string(self, *ref_nodes):
+ '''Android allows to reference other strings by using
+ @string/identifier
+ instead of the actual value. Those references don't belong into
+ a localizable file, warn on that.
+ '''
+ return any(
+ textContent(node).startswith('@string/')
+ for node in ref_nodes
+ )
+
+ def non_simple_data(self, node):
+ '''Only allow single text nodes, or, a single CDATA node
+ surrounded by whitespace.
+ '''
+ cdata = [
+ child
+ for child in node.childNodes
+ if child.nodeType == minidom.Node.CDATA_SECTION_NODE
+ ]
+ if len(cdata) == 0:
+ if node.childNodes.length == 0:
+ # empty translation is OK
+ return False
+ if node.childNodes.length != 1:
+ return True
+ return node.childNodes[0].nodeType != minidom.Node.TEXT_NODE
+ if len(cdata) > 1:
+ return True
+ for child in node.childNodes:
+ if child == cdata[0]:
+ continue
+ if child.nodeType != minidom.Node.TEXT_NODE:
+ return True
+ if child.data.strip() != "":
+ return True
+ return False
+
+
+silencer = re.compile(r'\\.|""')
+
+
+def check_apostrophes(string):
+ '''Check Android logic for quotes and apostrophes.
+
+ If you have an apostrophe (') in your string, you must either escape it
+ with a backslash (\') or enclose the string in double-quotes (").
+
+ Unescaped quotes are not visually shown on Android, but they're
+ also harmless, so we're not checking for quotes. We might do once we're
+ better at checking for inline XML, which is full of quotes.
+ Pairing quotes as in '""' is bad, though, so report errors for that.
+ Mostly, because it's hard to tell if a string is consider quoted or not
+ by Android in the end.
+
+ https://developer.android.com/guide/topics/resources/string-resource#escaping_quotes
+ '''
+ for m in re.finditer('""', string):
+ yield (
+ "error",
+ m.start(),
+ "Double straight quotes not allowed",
+ "android"
+ )
+ string = silencer.sub(" ", string)
+
+ is_quoted = string.startswith('"') and string.endswith('"')
+ if not is_quoted:
+ # apostrophes need to be escaped
+ for m in re.finditer("'", string):
+ yield (
+ "error",
+ m.start(),
+ "Apostrophe must be escaped",
+ "android"
+ )
+
+
+def get_params(refs):
+ '''Get printf parameters and internal errors.
+
+ Returns a sparse map of positions to formatter, and a list
+ of errors. Errors covered so far are mismatching formatters.
+ '''
+ params = {}
+ errors = []
+ next_implicit = 1
+ for ref in refs:
+ if isinstance(ref, minidom.Node):
+ ref = textContent(ref)
+ for m in re.finditer(r'%(?P<order>[1-9]\$)?(?P<format>[sSd])', ref):
+ order = m.group('order')
+ if order:
+ order = int(order[0])
+ else:
+ order = next_implicit
+ next_implicit += 1
+ fmt = m.group('format')
+ if order not in params:
+ params[order] = fmt
+ else:
+ # check for consistency errors
+ if params[order] == fmt:
+ continue
+ msg = "Conflicting formatting, %{order}${f1} vs %{order}${f2}"
+ errors.append((
+ msg.format(order=order, f1=fmt, f2=params[order]),
+ m.start()
+ ))
+ return params, errors
+
+
+def check_params(params, string):
+ '''Compare the printf parameters in the given string to the reference
+ parameters.
+
+ Also yields errors that are internal to the parameters inside string,
+ as found by `get_params`.
+ '''
+ lparams, errors = get_params([string])
+ for error, pos in errors:
+ yield (
+ "error",
+ pos,
+ error,
+ "android"
+ )
+ # Compare reference for each localized parameter.
+ # If there's no reference found, error, as an out-of-bounds
+ # parameter crashes.
+ # This assumes that all parameters are actually used in the reference,
+ # which should be OK.
+ # If there's a mismatch in the formatter, error.
+ for order in sorted(lparams):
+ if order not in params:
+ yield (
+ "error",
+ 0,
+ "Formatter %{}${} not found in reference".format(
+ order, lparams[order]
+ ),
+ "android"
+ )
+ elif params[order] != lparams[order]:
+ yield (
+ "error",
+ 0,
+ "Mismatching formatter",
+ "android"
+ )
+ # All parameters used in the reference are expected to be included.
+ # Warn if this isn't the case.
+ for order in params:
+ if order not in sorted(lparams):
+ yield (
+ "warning",
+ 0,
+ "Formatter %{}${} not found in translation".format(
+ order, params[order]
+ ),
+ "android",
+ )
diff --git a/third_party/python/compare_locales/compare_locales/checks/base.py b/third_party/python/compare_locales/compare_locales/checks/base.py
new file mode 100644
index 0000000000..95f4bc7b59
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/checks/base.py
@@ -0,0 +1,122 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+
+
+class EntityPos(int):
+ pass
+
+
+mochibake = re.compile('\ufffd')
+
+
+class Checker:
+ '''Abstract class to implement checks per file type.
+ '''
+ pattern = None
+ # if a check uses all reference entities, set this to True
+ needs_reference = False
+
+ @classmethod
+ def use(cls, file):
+ return cls.pattern.match(file.file)
+
+ def __init__(self, extra_tests, locale=None):
+ self.extra_tests = extra_tests
+ self.locale = locale
+ self.reference = None
+
+ def check(self, refEnt, l10nEnt):
+ '''Given the reference and localized Entities, performs checks.
+
+ This is a generator yielding tuples of
+ - "warning" or "error", depending on what should be reported,
+ - tuple of line, column info for the error within the string
+ - description string to be shown in the report
+
+ By default, check for possible encoding errors.
+ '''
+ for m in mochibake.finditer(l10nEnt.all):
+ yield (
+ "warning",
+ EntityPos(m.start()),
+ f"\ufffd in: {l10nEnt.key}",
+ "encodings"
+ )
+
+ def set_reference(self, reference):
+ '''Set the reference entities.
+ Only do this if self.needs_reference is True.
+ '''
+ self.reference = reference
+
+
+class CSSCheckMixin:
+ def maybe_style(self, ref_value, l10n_value):
+ ref_map, _ = self.parse_css_spec(ref_value)
+ if not ref_map:
+ return
+ l10n_map, errors = self.parse_css_spec(l10n_value)
+ yield from self.check_style(ref_map, l10n_map, errors)
+
+ def check_style(self, ref_map, l10n_map, errors):
+ if not l10n_map:
+ yield ('error', 0, 'reference is a CSS spec', 'css')
+ return
+ if errors:
+ yield ('error', 0, 'reference is a CSS spec', 'css')
+ return
+ msgs = []
+ for prop, unit in l10n_map.items():
+ if prop not in ref_map:
+ msgs.insert(0, '%s only in l10n' % prop)
+ continue
+ else:
+ ref_unit = ref_map.pop(prop)
+ if unit != ref_unit:
+ msgs.append("units for %s don't match "
+ "(%s != %s)" % (prop, unit, ref_unit))
+ for prop in ref_map.keys():
+ msgs.insert(0, '%s only in reference' % prop)
+ if msgs:
+ yield ('warning', 0, ', '.join(msgs), 'css')
+
+ def parse_css_spec(self, val):
+ if not hasattr(self, '_css_spec'):
+ self._css_spec = re.compile(
+ r'(?:'
+ r'(?P<prop>(?:min\-|max\-)?(?:width|height))'
+ r'[ \t\r\n]*:[ \t\r\n]*'
+ r'(?P<length>[0-9]+|[0-9]*\.[0-9]+)'
+ r'(?P<unit>ch|em|ex|rem|px|cm|mm|in|pc|pt)'
+ r')'
+ r'|\Z'
+ )
+ self._css_sep = re.compile(r'[ \t\r\n]*(?P<semi>;)?[ \t\r\n]*$')
+ refMap = errors = None
+ end = 0
+ for m in self._css_spec.finditer(val):
+ if end == 0 and m.start() == m.end():
+ # no CSS spec found, just immediately end of string
+ return None, None
+ if m.start() > end:
+ split = self._css_sep.match(val, end, m.start())
+ if split is None:
+ errors = errors or []
+ errors.append({
+ 'pos': end,
+ 'code': 'css-bad-content',
+ })
+ elif end > 0 and split.group('semi') is None:
+ errors = errors or []
+ errors.append({
+ 'pos': end,
+ 'code': 'css-missing-semicolon',
+ })
+ if m.group('prop'):
+ refMap = refMap or {}
+ refMap[m.group('prop')] = m.group('unit')
+ end = m.end()
+ return refMap, errors
diff --git a/third_party/python/compare_locales/compare_locales/checks/dtd.py b/third_party/python/compare_locales/compare_locales/checks/dtd.py
new file mode 100644
index 0000000000..139624f98f
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/checks/dtd.py
@@ -0,0 +1,238 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from io import BytesIO
+import re
+from xml import sax
+
+from compare_locales.parser import DTDParser
+from .base import Checker, CSSCheckMixin
+
+
+class DTDChecker(Checker, CSSCheckMixin):
+ """Tests to run on DTD files.
+
+ Uses xml.sax for the heavy lifting of xml parsing.
+
+ The code tries to parse until it doesn't find any unresolved entities
+ anymore. If it finds one, it tries to grab the key, and adds an empty
+ <!ENTITY key ""> definition to the header.
+
+ Also checks for some CSS and number heuristics in the values.
+ """
+ pattern = re.compile(r'.*\.dtd$')
+ needs_reference = True # to cast a wider net for known entity references
+
+ eref = re.compile('&(%s);' % DTDParser.Name)
+ tmpl = b'''<!DOCTYPE elem [%s]>
+<elem>%s</elem>
+'''
+ xmllist = {'amp', 'lt', 'gt', 'apos', 'quot'}
+
+ def __init__(self, extra_tests, locale=None):
+ super().__init__(extra_tests, locale=locale)
+ self.processContent = False
+ if self.extra_tests is not None and 'android-dtd' in self.extra_tests:
+ self.processContent = True
+ self.__known_entities = None
+
+ def known_entities(self, refValue):
+ if self.__known_entities is None and self.reference is not None:
+ self.__known_entities = set()
+ for ent in self.reference.values():
+ self.__known_entities.update(
+ self.entities_for_value(ent.raw_val))
+ return self.__known_entities if self.__known_entities is not None \
+ else self.entities_for_value(refValue)
+
+ def entities_for_value(self, value):
+ reflist = {m.group(1) for m in self.eref.finditer(value)}
+ reflist -= self.xmllist
+ return reflist
+
+ # Setup for XML parser, with default and text-only content handler
+ class TextContent(sax.handler.ContentHandler):
+ textcontent = ''
+
+ def characters(self, content):
+ self.textcontent += content
+
+ defaulthandler = sax.handler.ContentHandler()
+ texthandler = TextContent()
+
+ numPattern = r'([0-9]+|[0-9]*\.[0-9]+)'
+ num = re.compile('^%s$' % numPattern)
+ lengthPattern = '%s(em|px|ch|cm|in)' % numPattern
+ length = re.compile('^%s$' % lengthPattern)
+
+ def check(self, refEnt, l10nEnt):
+ """Try to parse the refvalue inside a dummy element, and keep
+ track of entities that we need to define to make that work.
+
+ Return a checker that offers just those entities.
+ """
+ yield from super().check(refEnt, l10nEnt)
+ refValue, l10nValue = refEnt.raw_val, l10nEnt.raw_val
+ # find entities the refValue references,
+ # reusing markup from DTDParser.
+ reflist = self.known_entities(refValue)
+ inContext = self.entities_for_value(refValue)
+ entities = ''.join('<!ENTITY %s "">' % s for s in sorted(reflist))
+ parser = sax.make_parser()
+ parser.setFeature(sax.handler.feature_external_ges, False)
+
+ parser.setContentHandler(self.defaulthandler)
+ try:
+ parser.parse(
+ BytesIO(self.tmpl %
+ (entities.encode('utf-8'),
+ refValue.encode('utf-8'))))
+ # also catch stray %
+ parser.parse(
+ BytesIO(self.tmpl %
+ ((refEnt.all + entities).encode('utf-8'),
+ b'&%s;' % refEnt.key.encode('utf-8'))))
+ except sax.SAXParseException as e:
+ e # noqa
+ yield ('warning',
+ (0, 0),
+ "can't parse en-US value", 'xmlparse')
+
+ # find entities the l10nValue references,
+ # reusing markup from DTDParser.
+ l10nlist = self.entities_for_value(l10nValue)
+ missing = sorted(l10nlist - reflist)
+ _entities = entities + ''.join('<!ENTITY %s "">' % s for s in missing)
+ if self.processContent:
+ self.texthandler.textcontent = ''
+ parser.setContentHandler(self.texthandler)
+ try:
+ parser.parse(BytesIO(self.tmpl % (_entities.encode('utf-8'),
+ l10nValue.encode('utf-8'))))
+ # also catch stray %
+ # if this fails, we need to substract the entity definition
+ parser.setContentHandler(self.defaulthandler)
+ parser.parse(
+ BytesIO(self.tmpl %
+ ((l10nEnt.all + _entities).encode('utf-8'),
+ b'&%s;' % l10nEnt.key.encode('utf-8'))))
+ except sax.SAXParseException as e:
+ # xml parse error, yield error
+ # sometimes, the error is reported on our fake closing
+ # element, make that the end of the last line
+ lnr = e.getLineNumber() - 1
+ lines = l10nValue.splitlines()
+ if lnr > len(lines):
+ lnr = len(lines)
+ col = len(lines[lnr-1])
+ else:
+ col = e.getColumnNumber()
+ if lnr == 1:
+ # first line starts with <elem>, substract
+ col -= len("<elem>")
+ elif lnr == 0:
+ col -= len("<!DOCTYPE elem [") # first line is DOCTYPE
+ yield ('error', (lnr, col), ' '.join(e.args), 'xmlparse')
+
+ warntmpl = 'Referencing unknown entity `%s`'
+ if reflist:
+ if inContext:
+ elsewhere = reflist - inContext
+ warntmpl += ' (%s used in context' % \
+ ', '.join(sorted(inContext))
+ if elsewhere:
+ warntmpl += ', %s known)' % ', '.join(sorted(elsewhere))
+ else:
+ warntmpl += ')'
+ else:
+ warntmpl += ' (%s known)' % ', '.join(sorted(reflist))
+ for key in missing:
+ yield ('warning', (0, 0), warntmpl % key,
+ 'xmlparse')
+ if inContext and l10nlist and l10nlist - inContext - set(missing):
+ mismatch = sorted(l10nlist - inContext - set(missing))
+ for key in mismatch:
+ yield ('warning', (0, 0),
+ 'Entity {} referenced, but {} used in context'.format(
+ key,
+ ', '.join(sorted(inContext))
+ ), 'xmlparse')
+
+ # Number check
+ if self.num.match(refValue) and not self.num.match(l10nValue):
+ yield ('warning', 0, 'reference is a number', 'number')
+ # CSS checks
+ # just a length, width="100em"
+ if self.length.match(refValue) and not self.length.match(l10nValue):
+ yield ('error', 0, 'reference is a CSS length', 'css')
+ # Check for actual CSS style attribute values
+ yield from self.maybe_style(refValue, l10nValue)
+
+ if self.extra_tests is not None and 'android-dtd' in self.extra_tests:
+ yield from self.processAndroidContent(self.texthandler.textcontent)
+
+ quoted = re.compile("(?P<q>[\"']).*(?P=q)$")
+
+ def unicode_escape(self, str):
+ """Helper method to try to decode all unicode escapes in a string.
+
+ This code uses the standard python decode for unicode-escape, but
+ that's somewhat tricky, as its input needs to be ascii. To get to
+ ascii, the unicode string gets converted to ascii with
+ backslashreplace, i.e., all non-ascii unicode chars get unicode
+ escaped. And then we try to roll all of that back.
+ Now, when that hits an error, that's from the original string, and we
+ need to search for the actual error position in the original string,
+ as the backslashreplace code changes string positions quite badly.
+ See also the last check in TestAndroid.test_android_dtd, with a
+ lengthy chinese string.
+ """
+ val = str.encode('ascii', 'backslashreplace')
+ try:
+ val.decode('unicode-escape')
+ except UnicodeDecodeError as e:
+ args = list(e.args)
+ badstring = args[1][args[2]:args[3]]
+ i = len(args[1][:args[2]].decode('unicode-escape'))
+ args[2] = i
+ args[3] = i + len(badstring)
+ raise UnicodeDecodeError(*args)
+
+ def processAndroidContent(self, val):
+ """Check for the string values that Android puts into an XML container.
+
+ http://developer.android.com/guide/topics/resources/string-resource.html#FormattingAndStyling # noqa
+
+ Check for unicode escapes and unescaped quotes and apostrophes,
+ if string's not quoted.
+ """
+ # first, try to decode unicode escapes
+ try:
+ self.unicode_escape(val)
+ except UnicodeDecodeError as e:
+ yield ('error', e.args[2], e.args[4], 'android')
+ # check for unescaped single or double quotes.
+ # first, see if the complete string is single or double quoted,
+ # that changes the rules
+ m = self.quoted.match(val)
+ if m:
+ q = m.group('q')
+ offset = 0
+ val = val[1:-1] # strip quotes
+ else:
+ q = "[\"']"
+ offset = -1
+ stray_quot = re.compile(r"[\\\\]*(%s)" % q)
+
+ for m in stray_quot.finditer(val):
+ if len(m.group(0)) % 2:
+ # found an unescaped single or double quote, which message?
+ if m.group(1) == '"':
+ msg = "Quotes in Android DTDs need escaping with \\\" "\
+ "or \\u0022, or put string in apostrophes."
+ else:
+ msg = "Apostrophes in Android DTDs need escaping with "\
+ "\\' or \\u0027, or use \u2019, or put string in "\
+ "quotes."
+ yield ('error', m.end(0)+offset, msg, 'android')
diff --git a/third_party/python/compare_locales/compare_locales/checks/fluent.py b/third_party/python/compare_locales/compare_locales/checks/fluent.py
new file mode 100644
index 0000000000..f82ecbd54f
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/checks/fluent.py
@@ -0,0 +1,351 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+from collections import defaultdict
+
+from fluent.syntax import ast as ftl
+from fluent.syntax.serializer import serialize_variant_key
+from fluent.syntax.visitor import Visitor
+
+from .base import Checker, CSSCheckMixin
+from compare_locales import plurals
+
+
+MSGS = {
+ 'missing-msg-ref': 'Missing message reference: {ref}',
+ 'missing-term-ref': 'Missing term reference: {ref}',
+ 'obsolete-msg-ref': 'Obsolete message reference: {ref}',
+ 'obsolete-term-ref': 'Obsolete term reference: {ref}',
+ 'duplicate-attribute': 'Attribute "{name}" is duplicated',
+ 'missing-value': 'Missing value',
+ 'obsolete-value': 'Obsolete value',
+ 'missing-attribute': 'Missing attribute: {name}',
+ 'obsolete-attribute': 'Obsolete attribute: {name}',
+ 'duplicate-variant': 'Variant key "{name}" is duplicated',
+ 'missing-plural': 'Plural categories missing: {categories}',
+ 'plain-message': '{message}',
+}
+
+
+def pattern_variants(pattern):
+ """Get variants of plain text of a pattern.
+
+ For now, just return simple text patterns.
+ This can be improved to allow for SelectExpressions
+ of simple text patterns, or even nested expressions, and Literals.
+ Variants with Variable-, Message-, or TermReferences should be ignored.
+ """
+ elements = pattern.elements
+ if len(elements) == 1:
+ if isinstance(elements[0], ftl.TextElement):
+ return [elements[0].value]
+ return []
+
+
+class ReferenceMessageVisitor(Visitor, CSSCheckMixin):
+ def __init__(self):
+ # References to Messages, their Attributes, and Terms
+ # Store reference name and type
+ self.entry_refs = defaultdict(dict)
+ # The currently active references
+ self.refs = {}
+ # Start with the Entry value (associated with None)
+ self.entry_refs[None] = self.refs
+ # If we're a messsage, store if there was a value
+ self.message_has_value = False
+ # Map attribute names to positions
+ self.attribute_positions = {}
+ # Map of CSS style attribute properties and units
+ self.css_styles = None
+ self.css_errors = None
+
+ def generic_visit(self, node):
+ if isinstance(
+ node,
+ (ftl.Span, ftl.Annotation, ftl.BaseComment)
+ ):
+ return
+ super().generic_visit(node)
+
+ def visit_Message(self, node):
+ if node.value is not None:
+ self.message_has_value = True
+ super().generic_visit(node)
+
+ def visit_Attribute(self, node):
+ self.attribute_positions[node.id.name] = node.span.start
+ old_refs = self.refs
+ self.refs = self.entry_refs[node.id.name]
+ super().generic_visit(node)
+ self.refs = old_refs
+ if node.id.name != 'style':
+ return
+ text_values = pattern_variants(node.value)
+ if not text_values:
+ self.css_styles = 'skip'
+ return
+ # right now, there's just one possible text value
+ self.css_styles, self.css_errors = self.parse_css_spec(text_values[0])
+
+ def visit_SelectExpression(self, node):
+ # optimize select expressions to only go through the variants
+ self.visit(node.variants)
+
+ def visit_MessageReference(self, node):
+ ref = node.id.name
+ if node.attribute:
+ ref += '.' + node.attribute.name
+ self.refs[ref] = 'msg-ref'
+
+ def visit_TermReference(self, node):
+ # only collect term references, but not attributes of terms
+ if node.attribute:
+ return
+ self.refs['-' + node.id.name] = 'term-ref'
+
+
+class GenericL10nChecks:
+ '''Helper Mixin for checks shared between Terms and Messages.'''
+ def check_duplicate_attributes(self, node):
+ warned = set()
+ for left in range(len(node.attributes) - 1):
+ if left in warned:
+ continue
+ left_attr = node.attributes[left]
+ warned_left = False
+ for right in range(left+1, len(node.attributes)):
+ right_attr = node.attributes[right]
+ if left_attr.id.name == right_attr.id.name:
+ if not warned_left:
+ warned_left = True
+ self.messages.append(
+ (
+ 'warning', left_attr.span.start,
+ MSGS['duplicate-attribute'].format(
+ name=left_attr.id.name
+ )
+ )
+ )
+ warned.add(right)
+ self.messages.append(
+ (
+ 'warning', right_attr.span.start,
+ MSGS['duplicate-attribute'].format(
+ name=left_attr.id.name
+ )
+ )
+ )
+
+ def check_variants(self, variants):
+ # Check for duplicate variants
+ warned = set()
+ for left in range(len(variants) - 1):
+ if left in warned:
+ continue
+ left_key = variants[left].key
+ key_string = None
+ for right in range(left+1, len(variants)):
+ if left_key.equals(variants[right].key):
+ if key_string is None:
+ key_string = serialize_variant_key(left_key)
+ self.messages.append(
+ (
+ 'warning', left_key.span.start,
+ MSGS['duplicate-variant'].format(
+ name=key_string
+ )
+ )
+ )
+ warned.add(right)
+ self.messages.append(
+ (
+ 'warning', variants[right].key.span.start,
+ MSGS['duplicate-variant'].format(
+ name=key_string
+ )
+ )
+ )
+ # Check for plural categories
+ known_plurals = plurals.get_plural(self.locale)
+ if known_plurals:
+ known_plurals = set(known_plurals)
+ # Ask for known plurals, but check for plurals w/out `other`.
+ # `other` is used for all kinds of things.
+ check_plurals = known_plurals.copy()
+ check_plurals.discard('other')
+ given_plurals = {serialize_variant_key(v.key) for v in variants}
+ if given_plurals & check_plurals:
+ missing_plurals = sorted(known_plurals - given_plurals)
+ if missing_plurals:
+ self.messages.append(
+ (
+ 'warning', variants[0].key.span.start,
+ MSGS['missing-plural'].format(
+ categories=', '.join(missing_plurals)
+ )
+ )
+ )
+
+
+class L10nMessageVisitor(GenericL10nChecks, ReferenceMessageVisitor):
+ def __init__(self, locale, reference):
+ super().__init__()
+ self.locale = locale
+ # Overload refs to map to sets, just store what we found
+ # References to Messages, their Attributes, and Terms
+ # Store reference name and type
+ self.entry_refs = defaultdict(set)
+ # The currently active references
+ self.refs = set()
+ # Start with the Entry value (associated with None)
+ self.entry_refs[None] = self.refs
+ self.reference = reference
+ self.reference_refs = reference.entry_refs[None]
+ self.messages = []
+
+ def visit_Message(self, node):
+ self.check_duplicate_attributes(node)
+ super().visit_Message(node)
+ if self.message_has_value and not self.reference.message_has_value:
+ self.messages.append(
+ ('error', node.value.span.start, MSGS['obsolete-value'])
+ )
+ if not self.message_has_value and self.reference.message_has_value:
+ self.messages.append(
+ ('error', 0, MSGS['missing-value'])
+ )
+ ref_attrs = set(self.reference.attribute_positions)
+ l10n_attrs = set(self.attribute_positions)
+ for missing_attr in ref_attrs - l10n_attrs:
+ self.messages.append(
+ (
+ 'error', 0,
+ MSGS['missing-attribute'].format(name=missing_attr)
+ )
+ )
+ for obs_attr in l10n_attrs - ref_attrs:
+ self.messages.append(
+ (
+ 'error', self.attribute_positions[obs_attr],
+ MSGS['obsolete-attribute'].format(name=obs_attr)
+ )
+ )
+
+ def visit_Term(self, node):
+ raise RuntimeError("Should not use L10nMessageVisitor for Terms")
+
+ def visit_Attribute(self, node):
+ old_reference_refs = self.reference_refs
+ self.reference_refs = self.reference.entry_refs[node.id.name]
+ super().visit_Attribute(node)
+ self.reference_refs = old_reference_refs
+ if node.id.name != 'style' or self.css_styles == 'skip':
+ return
+ ref_styles = self.reference.css_styles
+ if ref_styles in ('skip', None):
+ # Reference is complex, l10n isn't.
+ # Let's still validate the css spec.
+ ref_styles = {}
+ for cat, msg, pos, _ in self.check_style(
+ ref_styles,
+ self.css_styles,
+ self.css_errors
+ ):
+ self.messages.append((cat, msg, pos))
+
+ def visit_SelectExpression(self, node):
+ super().visit_SelectExpression(node)
+ self.check_variants(node.variants)
+
+ def visit_MessageReference(self, node):
+ ref = node.id.name
+ if node.attribute:
+ ref += '.' + node.attribute.name
+ self.refs.add(ref)
+ self.check_obsolete_ref(node, ref, 'msg-ref')
+
+ def visit_TermReference(self, node):
+ if node.attribute:
+ return
+ ref = '-' + node.id.name
+ self.refs.add(ref)
+ self.check_obsolete_ref(node, ref, 'term-ref')
+
+ def check_obsolete_ref(self, node, ref, ref_type):
+ if ref not in self.reference_refs:
+ self.messages.append(
+ (
+ 'warning', node.span.start,
+ MSGS['obsolete-' + ref_type].format(ref=ref),
+ )
+ )
+
+
+class TermVisitor(GenericL10nChecks, Visitor):
+ def __init__(self, locale):
+ super().__init__()
+ self.locale = locale
+ self.messages = []
+
+ def generic_visit(self, node):
+ if isinstance(
+ node,
+ (ftl.Span, ftl.Annotation, ftl.BaseComment)
+ ):
+ return
+ super().generic_visit(node)
+
+ def visit_Message(self, node):
+ raise RuntimeError("Should not use TermVisitor for Messages")
+
+ def visit_Term(self, node):
+ self.check_duplicate_attributes(node)
+ super().generic_visit(node)
+
+ def visit_SelectExpression(self, node):
+ super().generic_visit(node)
+ self.check_variants(node.variants)
+
+
+class FluentChecker(Checker):
+ '''Tests to run on Fluent (FTL) files.
+ '''
+ pattern = re.compile(r'.*\.ftl')
+
+ def check_message(self, ref_entry, l10n_entry):
+ '''Run checks on localized messages against reference message.'''
+ ref_data = ReferenceMessageVisitor()
+ ref_data.visit(ref_entry)
+ l10n_data = L10nMessageVisitor(self.locale, ref_data)
+ l10n_data.visit(l10n_entry)
+
+ messages = l10n_data.messages
+ for attr_or_val, refs in ref_data.entry_refs.items():
+ for ref, ref_type in refs.items():
+ if ref not in l10n_data.entry_refs[attr_or_val]:
+ msg = MSGS['missing-' + ref_type].format(ref=ref)
+ messages.append(('warning', 0, msg))
+ return messages
+
+ def check_term(self, l10n_entry):
+ '''Check localized terms.'''
+ l10n_data = TermVisitor(self.locale)
+ l10n_data.visit(l10n_entry)
+ return l10n_data.messages
+
+ def check(self, refEnt, l10nEnt):
+ yield from super().check(refEnt, l10nEnt)
+ l10n_entry = l10nEnt.entry
+ if isinstance(l10n_entry, ftl.Message):
+ ref_entry = refEnt.entry
+ messages = self.check_message(ref_entry, l10n_entry)
+ elif isinstance(l10n_entry, ftl.Term):
+ messages = self.check_term(l10n_entry)
+
+ messages.sort(key=lambda t: t[1])
+ for cat, pos, msg in messages:
+ if pos:
+ pos = pos - l10n_entry.span.start
+ yield (cat, pos, msg, 'fluent')
diff --git a/third_party/python/compare_locales/compare_locales/checks/properties.py b/third_party/python/compare_locales/compare_locales/checks/properties.py
new file mode 100644
index 0000000000..549e8533b6
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/checks/properties.py
@@ -0,0 +1,162 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+from difflib import SequenceMatcher
+
+from compare_locales.parser import PropertiesEntity
+from compare_locales import plurals
+from .base import Checker
+
+
+class PrintfException(Exception):
+ def __init__(self, msg, pos):
+ self.pos = pos
+ self.msg = msg
+
+
+class PropertiesChecker(Checker):
+ '''Tests to run on .properties files.
+ '''
+ pattern = re.compile(r'.*\.properties$')
+ printf = re.compile(r'%(?P<good>%|'
+ r'(?:(?P<number>[1-9][0-9]*)\$)?'
+ r'(?P<width>\*|[0-9]+)?'
+ r'(?P<prec>\.(?:\*|[0-9]+)?)?'
+ r'(?P<spec>[duxXosScpfg]))?')
+
+ def check(self, refEnt, l10nEnt):
+ '''Test for the different variable formats.
+ '''
+ yield from super().check(refEnt, l10nEnt)
+ refValue, l10nValue = refEnt.val, l10nEnt.val
+ refSpecs = None
+ # check for PluralForm.jsm stuff, should have the docs in the
+ # comment
+ # That also includes intl.properties' pluralRule, so exclude
+ # entities with that key and values with just numbers
+ if (refEnt.pre_comment
+ and 'Localization_and_Plurals' in refEnt.pre_comment.all
+ and refEnt.key != 'pluralRule'
+ and not re.match(r'\d+$', refValue)):
+ yield from self.check_plural(refValue, l10nValue)
+ return
+ # check for lost escapes
+ raw_val = l10nEnt.raw_val
+ for m in PropertiesEntity.escape.finditer(raw_val):
+ if m.group('single') and \
+ m.group('single') not in PropertiesEntity.known_escapes:
+ yield ('warning', m.start(),
+ 'unknown escape sequence, \\' + m.group('single'),
+ 'escape')
+ try:
+ refSpecs = self.getPrintfSpecs(refValue)
+ except PrintfException:
+ refSpecs = []
+ if refSpecs:
+ yield from self.checkPrintf(refSpecs, l10nValue)
+ return
+
+ def check_plural(self, refValue, l10nValue):
+ '''Check for the stringbundle plurals logic.
+ The common variable pattern is #1.
+ '''
+ known_plurals = plurals.get_plural(self.locale)
+ if known_plurals:
+ expected_forms = len(known_plurals)
+ found_forms = l10nValue.count(';') + 1
+ msg = 'expecting {} plurals, found {}'.format(
+ expected_forms,
+ found_forms
+ )
+ if expected_forms > found_forms:
+ yield ('warning', 0, msg, 'plural')
+ if expected_forms < found_forms:
+ yield ('warning', 0, msg, 'plural')
+ pats = {int(m.group(1)) for m in re.finditer('#([0-9]+)', refValue)}
+ if len(pats) == 0:
+ return
+ lpats = {int(m.group(1)) for m in re.finditer('#([0-9]+)', l10nValue)}
+ if pats - lpats:
+ yield ('warning', 0, 'not all variables used in l10n',
+ 'plural')
+ return
+ if lpats - pats:
+ yield ('error', 0, 'unreplaced variables in l10n',
+ 'plural')
+
+ def checkPrintf(self, refSpecs, l10nValue):
+ try:
+ l10nSpecs = self.getPrintfSpecs(l10nValue)
+ except PrintfException as e:
+ yield ('error', e.pos, e.msg, 'printf')
+ return
+ if refSpecs != l10nSpecs:
+ sm = SequenceMatcher()
+ sm.set_seqs(refSpecs, l10nSpecs)
+ msgs = []
+ warn = None
+ for action, i1, i2, j1, j2 in sm.get_opcodes():
+ if action == 'equal':
+ continue
+ if action == 'delete':
+ # missing argument in l10n
+ if i2 == len(refSpecs):
+ # trailing specs missing, that's just a warning
+ warn = ', '.join('trailing argument %d `%s` missing' %
+ (i+1, refSpecs[i])
+ for i in range(i1, i2))
+ else:
+ for i in range(i1, i2):
+ msgs.append('argument %d `%s` missing' %
+ (i+1, refSpecs[i]))
+ continue
+ if action == 'insert':
+ # obsolete argument in l10n
+ for i in range(j1, j2):
+ msgs.append('argument %d `%s` obsolete' %
+ (i+1, l10nSpecs[i]))
+ continue
+ if action == 'replace':
+ for i, j in zip(range(i1, i2), range(j1, j2)):
+ msgs.append('argument %d `%s` should be `%s`' %
+ (j+1, l10nSpecs[j], refSpecs[i]))
+ if msgs:
+ yield ('error', 0, ', '.join(msgs), 'printf')
+ if warn is not None:
+ yield ('warning', 0, warn, 'printf')
+
+ def getPrintfSpecs(self, val):
+ hasNumber = False
+ specs = []
+ for m in self.printf.finditer(val):
+ if m.group("good") is None:
+ # found just a '%', signal an error
+ raise PrintfException('Found single %', m.start())
+ if m.group("good") == '%':
+ # escaped %
+ continue
+ if ((hasNumber and m.group('number') is None) or
+ (not hasNumber and specs and
+ m.group('number') is not None)):
+ # mixed style, numbered and not
+ raise PrintfException('Mixed ordered and non-ordered args',
+ m.start())
+ hasNumber = m.group('number') is not None
+ if hasNumber:
+ pos = int(m.group('number')) - 1
+ ls = len(specs)
+ if pos >= ls:
+ # pad specs
+ nones = pos - ls
+ specs[ls:pos] = nones*[None]
+ specs.append(m.group('spec'))
+ else:
+ specs[pos] = m.group('spec')
+ else:
+ specs.append(m.group('spec'))
+ # check for missing args
+ if hasNumber and not all(specs):
+ raise PrintfException('Ordered argument missing', 0)
+ return specs
diff --git a/third_party/python/compare_locales/compare_locales/commands.py b/third_party/python/compare_locales/compare_locales/commands.py
new file mode 100644
index 0000000000..58266e308a
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/commands.py
@@ -0,0 +1,203 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'Commands exposed to commandlines'
+
+import logging
+from argparse import ArgumentParser
+from json import dump as json_dump
+import os
+import sys
+
+from compare_locales import mozpath
+from compare_locales import version
+from compare_locales.paths import EnumerateApp, TOMLParser, ConfigNotFound
+from compare_locales.compare import compareProjects
+
+
+class CompareLocales:
+ """Check the localization status of gecko applications.
+The first arguments are paths to the l10n.toml or ini files for the
+applications, followed by the base directory of the localization repositories.
+Then you pass in the list of locale codes you want to compare. If there are
+no locales given, the list of locales will be taken from the l10n.toml file
+or the all-locales file referenced by the application\'s l10n.ini."""
+
+ def __init__(self):
+ self.parser = self.get_parser()
+
+ def get_parser(self):
+ """Get an ArgumentParser, with class docstring as description.
+ """
+ parser = ArgumentParser(description=self.__doc__)
+ parser.add_argument('--version', action='version',
+ version='%(prog)s ' + version)
+ parser.add_argument('-v', '--verbose', action='count',
+ default=0, help='Make more noise')
+ parser.add_argument('-q', '--quiet', action='count',
+ default=0, help='''Show less data.
+Specified once, don't show obsolete entities. Specified twice, also hide
+missing entities. Specify thrice to exclude warnings and four times to
+just show stats''')
+ parser.add_argument('--validate', action='store_true',
+ help='Run compare-locales against reference')
+ parser.add_argument('-m', '--merge',
+ help='''Use this directory to stage merged files,
+use {ab_CD} to specify a different directory for each locale''')
+ parser.add_argument('config_paths', metavar='l10n.toml', nargs='+',
+ help='TOML or INI file for the project')
+ parser.add_argument('l10n_base_dir', metavar='l10n-base-dir',
+ help='Parent directory of localizations')
+ parser.add_argument('locales', nargs='*', metavar='locale-code',
+ help='Locale code and top-level directory of '
+ 'each localization')
+ parser.add_argument('--json',
+ help='''Serialize to JSON. Value is the name of
+the output file, pass "-" to serialize to stdout and hide the default output.
+''')
+ parser.add_argument('-D', action='append', metavar='var=value',
+ default=[], dest='defines',
+ help='Overwrite variables in TOML files')
+ parser.add_argument('--full', action="store_true",
+ help="Compare sub-projects that are disabled")
+ parser.add_argument('--return-zero', action="store_true",
+ help="Return 0 regardless of l10n status")
+ parser.add_argument('--clobber-merge', action="store_true",
+ default=False, dest='clobber',
+ help="""WARNING: DATALOSS.
+Use this option with care. If specified, the merge directory will
+be clobbered for each module. That means, the subdirectory will
+be completely removed, any files that were there are lost.
+Be careful to specify the right merge directory when using this option.""")
+ return parser
+
+ @classmethod
+ def call(cls):
+ """Entry_point for setuptools.
+ The actual command handling is done in the handle() method of the
+ subclasses.
+ """
+ cmd = cls()
+ args = cmd.parser.parse_args()
+ return cmd.handle(**vars(args))
+
+ def handle(
+ self,
+ quiet=0, verbose=0,
+ validate=False,
+ merge=None,
+ config_paths=[], l10n_base_dir=None, locales=[],
+ defines=[],
+ full=False,
+ return_zero=False,
+ clobber=False,
+ json=None,
+ ):
+ """The instance part of the classmethod call.
+
+ Using keyword arguments as that is what we need for mach
+ commands in mozilla-central.
+ """
+ # log as verbose or quiet as we want, warn by default
+ logging_level = logging.WARNING - (verbose - quiet) * 10
+ logging.basicConfig()
+ logging.getLogger().setLevel(logging_level)
+
+ config_paths, l10n_base_dir, locales = self.extract_positionals(
+ validate=validate,
+ config_paths=config_paths,
+ l10n_base_dir=l10n_base_dir,
+ locales=locales,
+ )
+
+ # when we compare disabled projects, we set our locales
+ # on all subconfigs, so deep is True.
+ locales_deep = full
+ configs = []
+ config_env = {
+ 'l10n_base': l10n_base_dir
+ }
+ for define in defines:
+ var, _, value = define.partition('=')
+ config_env[var] = value
+ for config_path in config_paths:
+ if config_path.endswith('.toml'):
+ try:
+ config = TOMLParser().parse(config_path, env=config_env)
+ except ConfigNotFound as e:
+ self.parser.exit('config file %s not found' % e.filename)
+ if locales_deep:
+ if not locales:
+ # no explicit locales given, force all locales
+ config.set_locales(config.all_locales, deep=True)
+ else:
+ config.set_locales(locales, deep=True)
+ configs.append(config)
+ else:
+ app = EnumerateApp(config_path, l10n_base_dir)
+ configs.append(app.asConfig())
+ try:
+ observers = compareProjects(
+ configs,
+ locales,
+ l10n_base_dir,
+ quiet=quiet,
+ merge_stage=merge, clobber_merge=clobber)
+ except OSError as exc:
+ print("FAIL: " + str(exc))
+ self.parser.exit(2)
+
+ if json is None or json != '-':
+ details = observers.serializeDetails()
+ if details:
+ print(details)
+ if len(configs) > 1:
+ if details:
+ print('')
+ print("Summaries for")
+ for config_path in config_paths:
+ print(" " + config_path)
+ print(" and the union of these, counting each string once")
+ print(observers.serializeSummaries())
+ if json is not None:
+ data = [observer.toJSON() for observer in observers]
+ stdout = json == '-'
+ indent = 1 if stdout else None
+ fh = sys.stdout if stdout else open(json, 'w')
+ json_dump(data, fh, sort_keys=True, indent=indent)
+ if stdout:
+ fh.write('\n')
+ fh.close()
+ rv = 1 if not return_zero and observers.error else 0
+ return rv
+
+ def extract_positionals(
+ self,
+ validate=False,
+ config_paths=[], l10n_base_dir=None, locales=[],
+ ):
+ # using nargs multiple times in argparser totally screws things
+ # up, repair that.
+ # First files are configs, then the base dir, everything else is
+ # locales
+ all_args = config_paths + [l10n_base_dir] + locales
+ config_paths = []
+ # The first directory is our l10n base, split there.
+ while all_args and not os.path.isdir(all_args[0]):
+ config_paths.append(all_args.pop(0))
+ if not config_paths:
+ self.parser.error('no configuration file given')
+ for cf in config_paths:
+ if not os.path.isfile(cf):
+ self.parser.error('config file %s not found' % cf)
+ if not all_args:
+ self.parser.error('l10n-base-dir not found')
+ l10n_base_dir = mozpath.abspath(all_args.pop(0))
+ if validate:
+ # signal validation mode by setting locale list to [None]
+ locales = [None]
+ else:
+ locales = all_args
+
+ return config_paths, l10n_base_dir, locales
diff --git a/third_party/python/compare_locales/compare_locales/compare/__init__.py b/third_party/python/compare_locales/compare_locales/compare/__init__.py
new file mode 100644
index 0000000000..6d4f3735bf
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/compare/__init__.py
@@ -0,0 +1,89 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'Mozilla l10n compare locales tool'
+
+import os
+import shutil
+
+from compare_locales import paths, mozpath
+
+from .content import ContentComparer
+from .observer import Observer, ObserverList
+from .utils import Tree, AddRemove
+
+
+__all__ = [
+ 'ContentComparer',
+ 'Observer', 'ObserverList',
+ 'AddRemove', 'Tree',
+ 'compareProjects',
+]
+
+
+def compareProjects(
+ project_configs,
+ locales,
+ l10n_base_dir,
+ stat_observer=None,
+ merge_stage=None,
+ clobber_merge=False,
+ quiet=0,
+ ):
+ all_locales = set(locales)
+ comparer = ContentComparer(quiet)
+ observers = comparer.observers
+ for project in project_configs:
+ # disable filter if we're in validation mode
+ if None in locales:
+ filter = None
+ else:
+ filter = project.filter
+ observers.append(
+ Observer(
+ quiet=quiet,
+ filter=filter,
+ ))
+ if not locales:
+ all_locales.update(project.all_locales)
+ for locale in sorted(all_locales):
+ files = paths.ProjectFiles(locale, project_configs,
+ mergebase=merge_stage)
+ if merge_stage is not None:
+ if clobber_merge:
+ mergematchers = {_m.get('merge') for _m in files.matchers}
+ mergematchers.discard(None)
+ for matcher in mergematchers:
+ clobberdir = matcher.prefix
+ if os.path.exists(clobberdir):
+ shutil.rmtree(clobberdir)
+ print("clobbered " + clobberdir)
+ for l10npath, refpath, mergepath, extra_tests in files:
+ # module and file path are needed for legacy filter.py support
+ module = None
+ fpath = mozpath.relpath(l10npath, l10n_base_dir)
+ for _m in files.matchers:
+ if _m['l10n'].match(l10npath):
+ if _m['module']:
+ # legacy ini support, set module, and resolve
+ # local path against the matcher prefix,
+ # which includes the module
+ module = _m['module']
+ fpath = mozpath.relpath(l10npath, _m['l10n'].prefix)
+ break
+ reffile = paths.File(refpath, fpath or refpath, module=module)
+ if locale is None:
+ # When validating the reference files, set locale
+ # to a private subtag. This only shows in the output.
+ locale = paths.REFERENCE_LOCALE
+ l10n = paths.File(l10npath, fpath or l10npath,
+ module=module, locale=locale)
+ if not os.path.exists(l10npath):
+ comparer.add(reffile, l10n, mergepath)
+ continue
+ if not os.path.exists(refpath):
+ comparer.remove(reffile, l10n, mergepath)
+ continue
+ comparer.compare(reffile, l10n, mergepath, extra_tests)
+ return observers
diff --git a/third_party/python/compare_locales/compare_locales/compare/content.py b/third_party/python/compare_locales/compare_locales/compare/content.py
new file mode 100644
index 0000000000..1e879a643c
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/compare/content.py
@@ -0,0 +1,304 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'Mozilla l10n compare locales tool'
+
+import codecs
+import os
+import shutil
+import re
+
+from compare_locales import parser
+from compare_locales import mozpath
+from compare_locales.checks import getChecker, EntityPos
+from compare_locales.keyedtuple import KeyedTuple
+
+from .observer import ObserverList
+from .utils import AddRemove
+
+
+class ContentComparer:
+ keyRE = re.compile('[kK]ey')
+ nl = re.compile('\n', re.M)
+
+ def __init__(self, quiet=0):
+ '''Create a ContentComparer.
+ observer is usually a instance of Observer. The return values
+ of the notify method are used to control the handling of missing
+ entities.
+ '''
+ self.observers = ObserverList(quiet=quiet)
+
+ def create_merge_dir(self, merge_file):
+ outdir = mozpath.dirname(merge_file)
+ os.makedirs(outdir, exist_ok=True)
+
+ def merge(self, ref_entities, ref_file, l10n_file, merge_file,
+ missing, skips, ctx, capabilities, encoding):
+ '''Create localized file in merge dir
+
+ `ref_entities` and `ref_map` are the parser result of the
+ reference file
+ `ref_file` and `l10n_file` are the File objects for the reference and
+ the l10n file, resp.
+ `merge_file` is the output path for the generated content. This is None
+ if we're just comparing or validating.
+ `missing` are the missing messages in l10n - potentially copied from
+ reference
+ `skips` are entries to be dropped from the localized file
+ `ctx` is the parsing context
+ `capabilities` are the capabilities for the merge algorithm
+ `encoding` is the encoding to be used when serializing, usually utf-8
+ '''
+
+ if not merge_file:
+ return
+
+ if capabilities == parser.CAN_NONE:
+ return
+
+ self.create_merge_dir(merge_file)
+
+ if capabilities & parser.CAN_COPY:
+ # copy the l10n file if it's good, or the reference file if not
+ if skips or missing:
+ src = ref_file.fullpath
+ else:
+ src = l10n_file.fullpath
+ shutil.copyfile(src, merge_file)
+ print("copied reference to " + merge_file)
+ return
+
+ if not (capabilities & parser.CAN_SKIP):
+ return
+
+ # Start with None in case the merge file doesn't need to be created.
+ f = None
+
+ if skips:
+ # skips come in ordered by key name, we need them in file order
+ skips.sort(key=lambda s: s.span[0])
+
+ # we need to skip a few erroneous blocks in the input, copy by hand
+ f = codecs.open(merge_file, 'wb', encoding)
+ offset = 0
+ for skip in skips:
+ chunk = skip.span
+ f.write(ctx.contents[offset:chunk[0]])
+ offset = chunk[1]
+ f.write(ctx.contents[offset:])
+
+ if f is None:
+ # l10n file is a good starting point
+ shutil.copyfile(l10n_file.fullpath, merge_file)
+
+ if not (capabilities & parser.CAN_MERGE):
+ if f:
+ f.close()
+ return
+
+ if skips or missing:
+ if f is None:
+ f = codecs.open(merge_file, 'ab', encoding)
+ trailing = (['\n'] +
+ [ref_entities[key].all for key in missing] +
+ [ref_entities[skip.key].all for skip in skips
+ if not isinstance(skip, parser.Junk)])
+
+ def ensureNewline(s):
+ if not s.endswith('\n'):
+ return s + '\n'
+ return s
+
+ print("adding to " + merge_file)
+ f.write(''.join(map(ensureNewline, trailing)))
+
+ if f is not None:
+ f.close()
+
+ def remove(self, ref_file, l10n, merge_file):
+ '''Obsolete l10n file.
+
+ Copy to merge stage if we can.
+ '''
+ self.observers.notify('obsoleteFile', l10n, None)
+ self.merge(
+ KeyedTuple([]), ref_file, l10n, merge_file,
+ [], [], None, parser.CAN_COPY, None
+ )
+
+ def compare(self, ref_file, l10n, merge_file, extra_tests=None):
+ try:
+ p = parser.getParser(ref_file.file)
+ except UserWarning:
+ # no comparison, XXX report?
+ # At least, merge
+ self.merge(
+ KeyedTuple([]), ref_file, l10n, merge_file, [], [], None,
+ parser.CAN_COPY, None)
+ return
+ try:
+ p.readFile(ref_file)
+ except Exception as e:
+ self.observers.notify('error', ref_file, str(e))
+ return
+ ref_entities = p.parse()
+ try:
+ p.readFile(l10n)
+ l10n_entities = p.parse()
+ l10n_ctx = p.ctx
+ except Exception as e:
+ self.observers.notify('error', l10n, str(e))
+ return
+
+ ar = AddRemove()
+ ar.set_left(ref_entities.keys())
+ ar.set_right(l10n_entities.keys())
+ report = missing = obsolete = changed = unchanged = keys = 0
+ missing_w = changed_w = unchanged_w = 0 # word stats
+ missings = []
+ skips = []
+ checker = getChecker(l10n, extra_tests=extra_tests)
+ if checker and checker.needs_reference:
+ checker.set_reference(ref_entities)
+ for msg in p.findDuplicates(ref_entities):
+ self.observers.notify('warning', l10n, msg)
+ for msg in p.findDuplicates(l10n_entities):
+ self.observers.notify('error', l10n, msg)
+ for action, entity_id in ar:
+ if action == 'delete':
+ # missing entity
+ if isinstance(ref_entities[entity_id], parser.Junk):
+ self.observers.notify(
+ 'warning', l10n, 'Parser error in en-US'
+ )
+ continue
+ _rv = self.observers.notify('missingEntity', l10n, entity_id)
+ if _rv == "ignore":
+ continue
+ if _rv == "error":
+ # only add to missing entities for l10n-merge on error,
+ # not report
+ missings.append(entity_id)
+ missing += 1
+ refent = ref_entities[entity_id]
+ missing_w += refent.count_words()
+ else:
+ # just report
+ report += 1
+ elif action == 'add':
+ # obsolete entity or junk
+ if isinstance(l10n_entities[entity_id],
+ parser.Junk):
+ junk = l10n_entities[entity_id]
+ self.observers.notify(
+ 'error', l10n,
+ junk.error_message()
+ )
+ if merge_file is not None:
+ skips.append(junk)
+ elif (
+ self.observers.notify('obsoleteEntity', l10n, entity_id)
+ != 'ignore'
+ ):
+ obsolete += 1
+ else:
+ # entity found in both ref and l10n, check for changed
+ refent = ref_entities[entity_id]
+ l10nent = l10n_entities[entity_id]
+ if self.keyRE.search(entity_id):
+ keys += 1
+ else:
+ if refent.equals(l10nent):
+ self.doUnchanged(l10nent)
+ unchanged += 1
+ unchanged_w += refent.count_words()
+ else:
+ self.doChanged(ref_file, refent, l10nent)
+ changed += 1
+ changed_w += refent.count_words()
+ # run checks:
+ if checker:
+ for tp, pos, msg, cat in checker.check(refent, l10nent):
+ if isinstance(pos, EntityPos):
+ line, col = l10nent.position(pos)
+ else:
+ line, col = l10nent.value_position(pos)
+ # skip error entities when merging
+ if tp == 'error' and merge_file is not None:
+ skips.append(l10nent)
+ self.observers.notify(
+ tp, l10n,
+ "%s at line %d, column %d for %s" %
+ (msg, line, col, refent.key)
+ )
+ pass
+
+ if merge_file is not None:
+ self.merge(
+ ref_entities, ref_file,
+ l10n, merge_file, missings, skips, l10n_ctx,
+ p.capabilities, p.encoding)
+
+ stats = {
+ 'missing': missing,
+ 'missing_w': missing_w,
+ 'report': report,
+ 'obsolete': obsolete,
+ 'changed': changed,
+ 'changed_w': changed_w,
+ 'unchanged': unchanged,
+ 'unchanged_w': unchanged_w,
+ 'keys': keys,
+ }
+ self.observers.updateStats(l10n, stats)
+ pass
+
+ def add(self, orig, missing, merge_file):
+ ''' Add missing localized file.'''
+ f = orig
+ try:
+ p = parser.getParser(f.file)
+ except UserWarning:
+ p = None
+
+ # if we don't support this file, assume CAN_COPY to mimick
+ # l10n dir as closely as possible
+ caps = p.capabilities if p else parser.CAN_COPY
+ if (caps & (parser.CAN_COPY | parser.CAN_MERGE)):
+ # even if we can merge, pretend we can only copy
+ self.merge(
+ KeyedTuple([]), orig, missing, merge_file,
+ ['trigger copy'], [], None, parser.CAN_COPY, None
+ )
+
+ if self.observers.notify('missingFile', missing, None) == "ignore":
+ # filter said that we don't need this file, don't count it
+ return
+
+ if p is None:
+ # We don't have a parser, cannot count missing strings
+ return
+
+ try:
+ p.readFile(f)
+ entities = p.parse()
+ except Exception as ex:
+ self.observers.notify('error', f, str(ex))
+ return
+ # strip parse errors
+ entities = [e for e in entities if not isinstance(e, parser.Junk)]
+ self.observers.updateStats(missing, {'missing': len(entities)})
+ missing_w = 0
+ for e in entities:
+ missing_w += e.count_words()
+ self.observers.updateStats(missing, {'missing_w': missing_w})
+
+ def doUnchanged(self, entity):
+ # overload this if needed
+ pass
+
+ def doChanged(self, file, ref_entity, l10n_entity):
+ # overload this if needed
+ pass
diff --git a/third_party/python/compare_locales/compare_locales/compare/observer.py b/third_party/python/compare_locales/compare_locales/compare/observer.py
new file mode 100644
index 0000000000..d336a004b3
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/compare/observer.py
@@ -0,0 +1,215 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'Mozilla l10n compare locales tool'
+
+from collections import defaultdict
+
+from .utils import Tree
+
+
+class Observer:
+
+ def __init__(self, quiet=0, filter=None):
+ '''Create Observer
+ For quiet=1, skip per-entity missing and obsolete strings,
+ for quiet=2, skip missing and obsolete files. For quiet=3,
+ skip warnings and errors.
+ '''
+ self.summary = defaultdict(lambda: {
+ "errors": 0,
+ "warnings": 0,
+ "missing": 0,
+ "missing_w": 0,
+ "report": 0,
+ "obsolete": 0,
+ "changed": 0,
+ "changed_w": 0,
+ "unchanged": 0,
+ "unchanged_w": 0,
+ "keys": 0,
+ })
+ self.details = Tree(list)
+ self.quiet = quiet
+ self.filter = filter
+ self.error = False
+
+ def _dictify(self, d):
+ plaindict = {}
+ for k, v in d.items():
+ plaindict[k] = dict(v)
+ return plaindict
+
+ def toJSON(self):
+ # Don't export file stats, even if we collected them.
+ # Those are not part of the data we use toJSON for.
+ return {
+ 'summary': self._dictify(self.summary),
+ 'details': self.details.toJSON()
+ }
+
+ def updateStats(self, file, stats):
+ # in multi-project scenarios, this file might not be ours,
+ # check that.
+ # Pass in a dummy entity key '' to avoid getting in to
+ # generic file filters. If we have stats for those,
+ # we want to aggregate the counts
+ if (self.filter is not None and
+ self.filter(file, entity='') == 'ignore'):
+ return
+ for category, value in stats.items():
+ if category == 'errors':
+ # updateStats isn't called with `errors`, but make sure
+ # we handle this if that changes
+ self.error = True
+ self.summary[file.locale][category] += value
+
+ def notify(self, category, file, data):
+ rv = 'error'
+ if category in ['missingFile', 'obsoleteFile']:
+ if self.filter is not None:
+ rv = self.filter(file)
+ if rv == "ignore" or self.quiet >= 2:
+ return rv
+ if self.quiet == 0 or category == 'missingFile':
+ self.details[file].append({category: rv})
+ return rv
+ if self.filter is not None:
+ rv = self.filter(file, data)
+ if rv == "ignore":
+ return rv
+ if category in ['missingEntity', 'obsoleteEntity']:
+ if (
+ (category == 'missingEntity' and self.quiet < 2)
+ or (category == 'obsoleteEntity' and self.quiet < 1)
+ ):
+ self.details[file].append({category: data})
+ return rv
+ if category == 'error':
+ # Set error independently of quiet
+ self.error = True
+ if category in ('error', 'warning'):
+ if (
+ (category == 'error' and self.quiet < 4)
+ or (category == 'warning' and self.quiet < 3)
+ ):
+ self.details[file].append({category: data})
+ self.summary[file.locale][category + 's'] += 1
+ return rv
+
+
+class ObserverList(Observer):
+ def __init__(self, quiet=0):
+ super().__init__(quiet=quiet)
+ self.observers = []
+
+ def __iter__(self):
+ return iter(self.observers)
+
+ def append(self, observer):
+ self.observers.append(observer)
+
+ def notify(self, category, file, data):
+ """Check observer for the found data, and if it's
+ not to ignore, notify stat_observers.
+ """
+ rvs = {
+ observer.notify(category, file, data)
+ for observer in self.observers
+ }
+ if all(rv == 'ignore' for rv in rvs):
+ return 'ignore'
+ # our return value doesn't count
+ super().notify(category, file, data)
+ rvs.discard('ignore')
+ if 'error' in rvs:
+ return 'error'
+ assert len(rvs) == 1
+ return rvs.pop()
+
+ def updateStats(self, file, stats):
+ """Check observer for the found data, and if it's
+ not to ignore, notify stat_observers.
+ """
+ for observer in self.observers:
+ observer.updateStats(file, stats)
+ super().updateStats(file, stats)
+
+ def serializeDetails(self):
+
+ def tostr(t):
+ if t[1] == 'key':
+ return ' ' * t[0] + '/'.join(t[2])
+ o = []
+ indent = ' ' * (t[0] + 1)
+ for item in t[2]:
+ if 'error' in item:
+ o += [indent + 'ERROR: ' + item['error']]
+ elif 'warning' in item:
+ o += [indent + 'WARNING: ' + item['warning']]
+ elif 'missingEntity' in item:
+ o += [indent + '+' + item['missingEntity']]
+ elif 'obsoleteEntity' in item:
+ o += [indent + '-' + item['obsoleteEntity']]
+ elif 'missingFile' in item:
+ o.append(indent + '// add and localize this file')
+ elif 'obsoleteFile' in item:
+ o.append(indent + '// remove this file')
+ return '\n'.join(o)
+
+ return '\n'.join(tostr(c) for c in self.details.getContent())
+
+ def serializeSummaries(self):
+ summaries = {
+ loc: []
+ for loc in self.summary.keys()
+ }
+ for observer in self.observers:
+ for loc, lst in summaries.items():
+ # Not all locales are on all projects,
+ # default to empty summary
+ lst.append(observer.summary.get(loc, {}))
+ if len(self.observers) > 1:
+ # add ourselves if there's more than one project
+ for loc, lst in summaries.items():
+ lst.append(self.summary[loc])
+ keys = (
+ 'errors',
+ 'warnings',
+ 'missing', 'missing_w',
+ 'obsolete',
+ 'changed', 'changed_w',
+ 'unchanged', 'unchanged_w',
+ 'keys',
+ )
+ leads = [
+ f'{k:12}' for k in keys
+ ]
+ out = []
+ for locale, summaries in sorted(summaries.items()):
+ if locale:
+ out.append(locale + ':')
+ segment = [''] * len(keys)
+ for summary in summaries:
+ for row, key in enumerate(keys):
+ segment[row] += ' {:6}'.format(summary.get(key) or '')
+
+ out += [
+ lead + row
+ for lead, row in zip(leads, segment)
+ if row.strip()
+ ]
+
+ total = sum(summaries[-1].get(k, 0)
+ for k in ['changed', 'unchanged', 'report', 'missing']
+ )
+ rate = 0
+ if total:
+ rate = (('changed' in summary and summary['changed'] * 100) or
+ 0) / total
+ out.append('%d%% of entries changed' % rate)
+ return '\n'.join(out)
+
+ def __str__(self):
+ return 'observer'
diff --git a/third_party/python/compare_locales/compare_locales/compare/utils.py b/third_party/python/compare_locales/compare_locales/compare/utils.py
new file mode 100644
index 0000000000..e298f80bc5
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/compare/utils.py
@@ -0,0 +1,133 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'Mozilla l10n compare locales tool'
+
+from compare_locales import paths
+
+
+class Tree:
+ def __init__(self, valuetype):
+ self.branches = dict()
+ self.valuetype = valuetype
+ self.value = None
+
+ def __getitem__(self, leaf):
+ parts = []
+ if isinstance(leaf, paths.File):
+ parts = []
+ if leaf.module:
+ parts += [leaf.locale] + leaf.module.split('/')
+ parts += leaf.file.split('/')
+ else:
+ parts = leaf.split('/')
+ return self.__get(parts)
+
+ def __get(self, parts):
+ common = None
+ old = None
+ new = tuple(parts)
+ t = self
+ for k, v in self.branches.items():
+ for i, part in enumerate(zip(k, parts)):
+ if part[0] != part[1]:
+ i -= 1
+ break
+ if i < 0:
+ continue
+ i += 1
+ common = tuple(k[:i])
+ old = tuple(k[i:])
+ new = tuple(parts[i:])
+ break
+ if old:
+ self.branches.pop(k)
+ t = Tree(self.valuetype)
+ t.branches[old] = v
+ self.branches[common] = t
+ elif common:
+ t = self.branches[common]
+ if new:
+ if common:
+ return t.__get(new)
+ t2 = t
+ t = Tree(self.valuetype)
+ t2.branches[new] = t
+ if t.value is None:
+ t.value = t.valuetype()
+ return t.value
+
+ indent = ' '
+
+ def getContent(self, depth=0):
+ '''
+ Returns iterator of (depth, flag, key_or_value) tuples.
+ If flag is 'value', key_or_value is a value object, otherwise
+ (flag is 'key') it's a key string.
+ '''
+ keys = sorted(self.branches.keys())
+ if self.value is not None:
+ yield (depth, 'value', self.value)
+ for key in keys:
+ yield (depth, 'key', key)
+ yield from self.branches[key].getContent(depth + 1)
+
+ def toJSON(self):
+ '''
+ Returns this Tree as a JSON-able tree of hashes.
+ Only the values need to take care that they're JSON-able.
+ '''
+ if self.value is not None:
+ return self.value
+ return {'/'.join(key): self.branches[key].toJSON()
+ for key in self.branches.keys()}
+
+ def getStrRows(self):
+ def tostr(t):
+ if t[1] == 'key':
+ return self.indent * t[0] + '/'.join(t[2])
+ return self.indent * (t[0] + 1) + str(t[2])
+
+ return [tostr(c) for c in self.getContent()]
+
+ def __str__(self):
+ return '\n'.join(self.getStrRows())
+
+
+class AddRemove:
+ def __init__(self):
+ self.left = self.right = None
+
+ def set_left(self, left):
+ if not isinstance(left, list):
+ left = list(l for l in left)
+ self.left = left
+
+ def set_right(self, right):
+ if not isinstance(right, list):
+ right = list(l for l in right)
+ self.right = right
+
+ def __iter__(self):
+ # order_map stores index in left and then index in right
+ order_map = {item: (i, -1) for i, item in enumerate(self.left)}
+ left_items = set(order_map)
+ # as we go through the right side, keep track of which left
+ # item we had in right last, and for items not in left,
+ # set the sortmap to (left_offset, right_index)
+ left_offset = -1
+ right_items = set()
+ for i, item in enumerate(self.right):
+ right_items.add(item)
+ if item in order_map:
+ left_offset = order_map[item][0]
+ else:
+ order_map[item] = (left_offset, i)
+ for item in sorted(order_map, key=lambda item: order_map[item]):
+ if item in left_items and item in right_items:
+ yield ('equal', item)
+ elif item in left_items:
+ yield ('delete', item)
+ else:
+ yield ('add', item)
diff --git a/third_party/python/compare_locales/compare_locales/integration_tests/__init__.py b/third_party/python/compare_locales/compare_locales/integration_tests/__init__.py
new file mode 100644
index 0000000000..ba9db8b8ec
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/integration_tests/__init__.py
@@ -0,0 +1,5 @@
+'''Tests that are not run by default.
+
+They might just take long, or depend on external services, or both.
+They might also fail for external changes.
+'''
diff --git a/third_party/python/compare_locales/compare_locales/integration_tests/test_plurals.py b/third_party/python/compare_locales/compare_locales/integration_tests/test_plurals.py
new file mode 100644
index 0000000000..e63ff861f7
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/integration_tests/test_plurals.py
@@ -0,0 +1,51 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import json
+import unittest
+from urllib.error import URLError
+from urllib.request import urlopen
+
+from compare_locales import plurals
+
+
+TRANSVISION_URL = (
+ 'https://transvision.mozfr.org/'
+ 'api/v1/entity/gecko_strings/'
+ '?id=toolkit/chrome/global/intl.properties:pluralRule'
+)
+
+
+class TestPlural(unittest.TestCase):
+ '''Integration test for plural forms and l10n-central.
+
+ Having more plural forms than in l10n-central is OK, missing or
+ mismatching ones isn't.
+ Depends on Transvision.
+ '''
+ maxDiff = None
+
+ def test_valid_forms(self):
+ reference_form_map = self._load_transvision()
+ # Strip matches from dicts, to make diff for test small
+ locales = list(reference_form_map)
+ cl_form_map = {}
+ for locale in locales:
+ cl_form = str(plurals.get_plural_rule(locale))
+ if cl_form == reference_form_map[locale]:
+ reference_form_map.pop(locale)
+ else:
+ cl_form_map[locale] = cl_form
+ self.assertDictEqual(reference_form_map, cl_form_map)
+
+ def _load_transvision(self):
+ '''Use the Transvision API to load all values of pluralRule
+ in intl.properties.
+ Skip test on load failure.
+ '''
+ try:
+ data = urlopen(TRANSVISION_URL).read()
+ except URLError:
+ raise unittest.SkipTest("Couldn't load Transvision API.")
+ return json.loads(data)
diff --git a/third_party/python/compare_locales/compare_locales/keyedtuple.py b/third_party/python/compare_locales/compare_locales/keyedtuple.py
new file mode 100644
index 0000000000..af703e8fa2
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/keyedtuple.py
@@ -0,0 +1,55 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'''A tuple with keys.
+
+A Sequence type that allows to refer to its elements by key.
+Making this immutable, 'cause keeping track of mutations is hard.
+
+compare-locales uses strings for Entity keys, and tuples in the
+case of PO. Support both.
+
+In the interfaces that check for membership, dicts check keys and
+sequences check values. Always try our dict cache `__map` first,
+and fall back to the superclass implementation.
+'''
+
+
+class KeyedTuple(tuple):
+
+ def __new__(cls, iterable):
+ return super().__new__(cls, iterable)
+
+ def __init__(self, iterable):
+ self.__map = {}
+ if iterable:
+ for index, item in enumerate(self):
+ self.__map[item.key] = index
+
+ def __contains__(self, key):
+ try:
+ contains = key in self.__map
+ if contains:
+ return True
+ except TypeError:
+ pass
+ return super().__contains__(key)
+
+ def __getitem__(self, key):
+ try:
+ key = self.__map[key]
+ except (KeyError, TypeError):
+ pass
+ return super().__getitem__(key)
+
+ def keys(self):
+ for value in self:
+ yield value.key
+
+ def items(self):
+ for value in self:
+ yield value.key, value
+
+ def values(self):
+ return self
diff --git a/third_party/python/compare_locales/compare_locales/lint/__init__.py b/third_party/python/compare_locales/compare_locales/lint/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/lint/__init__.py
diff --git a/third_party/python/compare_locales/compare_locales/lint/cli.py b/third_party/python/compare_locales/compare_locales/lint/cli.py
new file mode 100644
index 0000000000..dc476e1b77
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/lint/cli.py
@@ -0,0 +1,93 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import argparse
+import os
+
+from compare_locales.lint.linter import L10nLinter
+from compare_locales.lint.util import (
+ default_reference_and_tests,
+ mirror_reference_and_tests,
+ l10n_base_reference_and_tests,
+)
+from compare_locales import mozpath
+from compare_locales import paths
+from compare_locales import parser
+from compare_locales import version
+
+
+epilog = '''\
+moz-l10n-lint checks for common mistakes in localizable files. It tests for
+duplicate entries, parsing errors, and the like. Optionally, it can compare
+the strings to an external reference with strings and warn if a string might
+need to get a new ID.
+'''
+
+
+def main():
+ p = argparse.ArgumentParser(
+ description='Validate localizable strings',
+ epilog=epilog,
+ )
+ p.add_argument('l10n_toml')
+ p.add_argument(
+ '--version', action='version', version='%(prog)s ' + version
+ )
+ p.add_argument('-W', action='store_true', help='error on warnings')
+ p.add_argument(
+ '--l10n-reference',
+ dest='l10n_reference',
+ metavar='PATH',
+ help='check for conflicts against an l10n-only reference repository '
+ 'like gecko-strings',
+ )
+ p.add_argument(
+ '--reference-project',
+ dest='ref_project',
+ metavar='PATH',
+ help='check for conflicts against a reference project like '
+ 'android-l10n',
+ )
+ args = p.parse_args()
+ if args.l10n_reference:
+ l10n_base, locale = \
+ os.path.split(os.path.abspath(args.l10n_reference))
+ if not locale or not os.path.isdir(args.l10n_reference):
+ p.error('Pass an existing l10n reference')
+ else:
+ l10n_base = '.'
+ locale = None
+ pc = paths.TOMLParser().parse(args.l10n_toml, env={'l10n_base': l10n_base})
+ if locale:
+ pc.set_locales([locale], deep=True)
+ files = paths.ProjectFiles(locale, [pc])
+ get_reference_and_tests = default_reference_and_tests
+ if args.l10n_reference:
+ get_reference_and_tests = l10n_base_reference_and_tests(files)
+ elif args.ref_project:
+ get_reference_and_tests = mirror_reference_and_tests(
+ files, args.ref_project
+ )
+ linter = L10nLinter()
+ results = linter.lint(
+ (f for f, _, _, _ in files.iter_reference() if parser.hasParser(f)),
+ get_reference_and_tests
+ )
+ rv = 0
+ if results:
+ rv = 1
+ if all(r['level'] == 'warning' for r in results) and not args.W:
+ rv = 0
+ for result in results:
+ print('{} ({}:{}): {}'.format(
+ mozpath.relpath(result['path'], '.'),
+ result.get('lineno', 0),
+ result.get('column', 0),
+ result['message']
+ ))
+ return rv
+
+
+if __name__ == '__main__':
+ main()
diff --git a/third_party/python/compare_locales/compare_locales/lint/linter.py b/third_party/python/compare_locales/compare_locales/lint/linter.py
new file mode 100644
index 0000000000..a946608d97
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/lint/linter.py
@@ -0,0 +1,121 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from collections import Counter
+import os
+
+from compare_locales import parser, checks
+from compare_locales.paths import File, REFERENCE_LOCALE
+
+
+class L10nLinter:
+
+ def lint(self, files, get_reference_and_tests):
+ results = []
+ for path in files:
+ if not parser.hasParser(path):
+ continue
+ ref, extra_tests = get_reference_and_tests(path)
+ results.extend(self.lint_file(path, ref, extra_tests))
+ return results
+
+ def lint_file(self, path, ref, extra_tests):
+ file_parser = parser.getParser(path)
+ if ref is not None and os.path.isfile(ref):
+ file_parser.readFile(ref)
+ reference = file_parser.parse()
+ else:
+ reference = {}
+ file_parser.readFile(path)
+ current = file_parser.parse()
+ checker = checks.getChecker(
+ File(path, path, locale=REFERENCE_LOCALE),
+ extra_tests=extra_tests
+ )
+ if checker and checker.needs_reference:
+ checker.set_reference(current)
+ linter = EntityLinter(current, checker, reference)
+ for current_entity in current:
+ for result in linter.lint_entity(current_entity):
+ result['path'] = path
+ yield result
+
+
+class EntityLinter:
+ '''Factored out helper to run linters on a single entity.'''
+ def __init__(self, current, checker, reference):
+ self.key_count = Counter(entity.key for entity in current)
+ self.checker = checker
+ self.reference = reference
+
+ def lint_entity(self, current_entity):
+ res = self.handle_junk(current_entity)
+ if res:
+ yield res
+ return
+ for res in self.lint_full_entity(current_entity):
+ yield res
+ for res in self.lint_value(current_entity):
+ yield res
+
+ def lint_full_entity(self, current_entity):
+ '''Checks that go good or bad for a full entity,
+ without a particular spot inside the entity.
+ '''
+ lineno = col = None
+ if self.key_count[current_entity.key] > 1:
+ lineno, col = current_entity.position()
+ yield {
+ 'lineno': lineno,
+ 'column': col,
+ 'level': 'error',
+ 'message': 'Duplicate string with ID: {}'.format(
+ current_entity.key
+ )
+ }
+
+ if current_entity.key in self.reference:
+ reference_entity = self.reference[current_entity.key]
+ if not current_entity.equals(reference_entity):
+ if lineno is None:
+ lineno, col = current_entity.position()
+ msg = 'Changes to string require a new ID: {}'.format(
+ current_entity.key
+ )
+ yield {
+ 'lineno': lineno,
+ 'column': col,
+ 'level': 'warning',
+ 'message': msg,
+ }
+
+ def lint_value(self, current_entity):
+ '''Checks that error on particular locations in the entity value.
+ '''
+ if self.checker:
+ for tp, pos, msg, cat in self.checker.check(
+ current_entity, current_entity
+ ):
+ if isinstance(pos, checks.EntityPos):
+ lineno, col = current_entity.position(pos)
+ else:
+ lineno, col = current_entity.value_position(pos)
+ yield {
+ 'lineno': lineno,
+ 'column': col,
+ 'level': tp,
+ 'message': msg,
+ }
+
+ def handle_junk(self, current_entity):
+ if not isinstance(current_entity, parser.Junk):
+ return None
+
+ lineno, col = current_entity.position()
+ return {
+ 'lineno': lineno,
+ 'column': col,
+ 'level': 'error',
+ 'message': current_entity.error_message()
+ }
diff --git a/third_party/python/compare_locales/compare_locales/lint/util.py b/third_party/python/compare_locales/compare_locales/lint/util.py
new file mode 100644
index 0000000000..f5e1fb5e6e
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/lint/util.py
@@ -0,0 +1,38 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from compare_locales import paths
+
+
+def default_reference_and_tests(path):
+ return None, None
+
+
+def mirror_reference_and_tests(files, basedir):
+ '''Get reference files to check for conflicts in android-l10n and friends.
+ '''
+ def get_reference_and_tests(path):
+ for matchers in files.matchers:
+ if 'reference' not in matchers:
+ continue
+ matcher = matchers['reference']
+ if matcher.match(path) is None:
+ continue
+ ref_matcher = paths.Matcher(matcher, root=basedir)
+ ref_path = matcher.sub(ref_matcher, path)
+ return ref_path, matchers.get('test')
+ return None, None
+ return get_reference_and_tests
+
+
+def l10n_base_reference_and_tests(files):
+ '''Get reference files to check for conflicts in gecko-strings and friends.
+ '''
+ def get_reference_and_tests(path):
+ match = files.match(path)
+ if match is None:
+ return None, None
+ ref, _, _, extra_tests = match
+ return ref, extra_tests
+ return get_reference_and_tests
diff --git a/third_party/python/compare_locales/compare_locales/merge.py b/third_party/python/compare_locales/compare_locales/merge.py
new file mode 100644
index 0000000000..1d73560bb9
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/merge.py
@@ -0,0 +1,143 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'''Merge resources across channels.
+
+Merging resources is done over a series of parsed resources, or source
+strings.
+The nomenclature is that the resources are ordered from newest to oldest.
+The generated file structure is taken from the newest file, and then the
+next-newest, etc. The values of the returned entities are taken from the
+newest to the oldest resource, too.
+
+In merge_resources, there's an option to choose the values from oldest
+to newest instead.
+'''
+
+from collections import OrderedDict, defaultdict
+from codecs import encode
+from functools import reduce
+
+
+from compare_locales import parser as cl
+from compare_locales.parser.base import StickyEntry
+from compare_locales.compare.utils import AddRemove
+
+
+class MergeNotSupportedError(ValueError):
+ pass
+
+
+def merge_channels(name, resources):
+ try:
+ parser = cl.getParser(name)
+ except UserWarning:
+ raise MergeNotSupportedError(
+ f'Unsupported file format ({name}).')
+
+ entities = merge_resources(parser, resources)
+ return encode(serialize_legacy_resource(entities), parser.encoding)
+
+
+def merge_resources(parser, resources, keep_newest=True):
+ '''Merge parsed or unparsed resources, returning a enumerable of Entities.
+
+ Resources are ordered from newest to oldest in the input. The structure
+ of the generated content is taken from the newest resource first, and
+ then filled by the next etc.
+ Values are also taken from the newest, unless keep_newest is False,
+ then values are taken from the oldest first.
+ '''
+
+ def parse_resource(resource):
+ # The counter dict keeps track of number of identical comments.
+ counter = defaultdict(int)
+ if isinstance(resource, bytes):
+ parser.readContents(resource)
+ resource = parser.walk()
+ pairs = [get_key_value(entity, counter) for entity in resource]
+ return OrderedDict(pairs)
+
+ def get_key_value(entity, counter):
+ if isinstance(entity, cl.Comment):
+ counter[entity.val] += 1
+ # Use the (value, index) tuple as the key. AddRemove will
+ # de-deplicate identical comments at the same index.
+ return ((entity.val, counter[entity.val]), entity)
+
+ if isinstance(entity, cl.Whitespace):
+ # Use the Whitespace instance as the key so that it's always
+ # unique. Adjecent whitespace will be folded into the longer one in
+ # prune.
+ return (entity, entity)
+
+ return (entity.key, entity)
+
+ entities = reduce(
+ lambda x, y: merge_two(x, y, keep_newer=keep_newest),
+ map(parse_resource, resources))
+ return entities.values()
+
+
+def merge_two(newer, older, keep_newer=True):
+ '''Merge two OrderedDicts.
+
+ The order of the result dict is determined by `newer`.
+ The values in the dict are the newer ones by default, too.
+ If `keep_newer` is False, the values will be taken from the older
+ dict.
+ '''
+ diff = AddRemove()
+ diff.set_left(newer.keys())
+ diff.set_right(older.keys())
+
+ # Create a flat sequence of all entities in order reported by AddRemove.
+ get_entity = get_newer_entity if keep_newer else get_older_entity
+ contents = [(key, get_entity(newer, older, key)) for _, key in diff]
+
+ def prune(acc, cur):
+ _, entity = cur
+ if entity is None:
+ # Prune Nones which stand for duplicated comments.
+ return acc
+
+ if len(acc) and isinstance(entity, cl.Whitespace):
+ _, prev_entity = acc[-1]
+
+ if isinstance(prev_entity, cl.Whitespace):
+ # Prefer the longer whitespace.
+ if len(entity.all) > len(prev_entity.all):
+ acc[-1] = (entity, entity)
+ return acc
+
+ acc.append(cur)
+ return acc
+
+ pruned = reduce(prune, contents, [])
+ return OrderedDict(pruned)
+
+
+def get_newer_entity(newer, older, key):
+ entity = newer.get(key, None)
+
+ # Always prefer the newer version.
+ if entity is not None:
+ return entity
+
+ return older.get(key)
+
+
+def get_older_entity(newer, older, key):
+ entity = older.get(key, None)
+
+ # If we don't have an older version, or it's a StickyEntry,
+ # get a newer version
+ if entity is None or isinstance(entity, StickyEntry):
+ return newer.get(key)
+
+ return entity
+
+
+def serialize_legacy_resource(entities):
+ return "".join(entity.all for entity in entities)
diff --git a/third_party/python/compare_locales/compare_locales/mozpath.py b/third_party/python/compare_locales/compare_locales/mozpath.py
new file mode 100644
index 0000000000..d2b1575858
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/mozpath.py
@@ -0,0 +1,154 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'''
+Like :py:mod:`os.path`, with a reduced set of functions, and with normalized
+path separators (always use forward slashes).
+Also contains a few additional utilities not found in :py:mod:`os.path`.
+'''
+
+
+import posixpath
+import os
+import re
+
+
+def normsep(path):
+ '''
+ Normalize path separators, by using forward slashes instead of whatever
+ :py:const:`os.sep` is.
+ '''
+ if os.sep != '/':
+ path = path.replace(os.sep, '/')
+ if os.altsep and os.altsep != '/':
+ path = path.replace(os.altsep, '/')
+ return path
+
+
+def relpath(path, start):
+ rel = normsep(os.path.relpath(path, start))
+ return '' if rel == '.' else rel
+
+
+def realpath(path):
+ return normsep(os.path.realpath(path))
+
+
+def abspath(path):
+ return normsep(os.path.abspath(path))
+
+
+def join(*paths):
+ return normsep(os.path.join(*paths))
+
+
+def normpath(path):
+ return posixpath.normpath(normsep(path))
+
+
+def dirname(path):
+ return posixpath.dirname(normsep(path))
+
+
+def commonprefix(paths):
+ return posixpath.commonprefix([normsep(path) for path in paths])
+
+
+def basename(path):
+ return os.path.basename(path)
+
+
+def splitext(path):
+ return posixpath.splitext(normsep(path))
+
+
+def split(path):
+ '''
+ Return the normalized path as a list of its components.
+
+ ``split('foo/bar/baz')`` returns ``['foo', 'bar', 'baz']``
+ '''
+ return normsep(path).split('/')
+
+
+def basedir(path, bases):
+ '''
+ Given a list of directories (`bases`), return which one contains the given
+ path. If several matches are found, the deepest base directory is returned.
+
+ ``basedir('foo/bar/baz', ['foo', 'baz', 'foo/bar'])`` returns ``'foo/bar'``
+ (`'foo'` and `'foo/bar'` both match, but `'foo/bar'` is the deepest match)
+ '''
+ path = normsep(path)
+ bases = [normsep(b) for b in bases]
+ if path in bases:
+ return path
+ for b in sorted(bases, reverse=True):
+ if b == '' or path.startswith(b + '/'):
+ return b
+
+
+re_cache = {}
+
+
+def match(path, pattern):
+ '''
+ Return whether the given path matches the given pattern.
+ An asterisk can be used to match any string, including the null string, in
+ one part of the path:
+
+ ``foo`` matches ``*``, ``f*`` or ``fo*o``
+
+ However, an asterisk matching a subdirectory may not match the null string:
+
+ ``foo/bar`` does *not* match ``foo/*/bar``
+
+ If the pattern matches one of the ancestor directories of the path, the
+ patch is considered matching:
+
+ ``foo/bar`` matches ``foo``
+
+ Two adjacent asterisks can be used to match files and zero or more
+ directories and subdirectories.
+
+ ``foo/bar`` matches ``foo/**/bar``, or ``**/bar``
+ '''
+ if not pattern:
+ return True
+ if pattern not in re_cache:
+ last_end = 0
+ p = ''
+ for m in re.finditer(r'(?:(^|/)\*\*(/|$))|(?P<star>\*)', pattern):
+ if m.start() > last_end:
+ p += re.escape(pattern[last_end:m.start()])
+ if m.group('star'):
+ p += '[^/]*'
+ elif m.group(2):
+ p += re.escape(m.group(1)) + r'(?:.+%s)?' % m.group(2)
+ else:
+ p += r'(?:%s.+)?' % re.escape(m.group(1))
+ last_end = m.end()
+ p += re.escape(pattern[last_end:]) + '(?:/.*)?$'
+ re_cache[pattern] = re.compile(p)
+ return re_cache[pattern].match(path) is not None
+
+
+def rebase(oldbase, base, relativepath):
+ '''
+ Return `relativepath` relative to `base` instead of `oldbase`.
+ '''
+ if base == oldbase:
+ return relativepath
+ if len(base) < len(oldbase):
+ assert basedir(oldbase, [base]) == base
+ relbase = relpath(oldbase, base)
+ result = join(relbase, relativepath)
+ else:
+ assert basedir(base, [oldbase]) == oldbase
+ relbase = relpath(base, oldbase)
+ result = relpath(relativepath, relbase)
+ result = normpath(result)
+ if relativepath.endswith('/') and not result.endswith('/'):
+ result += '/'
+ return result
diff --git a/third_party/python/compare_locales/compare_locales/parser/__init__.py b/third_party/python/compare_locales/compare_locales/parser/__init__.py
new file mode 100644
index 0000000000..b537bb0686
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/parser/__init__.py
@@ -0,0 +1,81 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+
+from .base import (
+ CAN_NONE, CAN_COPY, CAN_SKIP, CAN_MERGE,
+ Entry, Entity, Comment, OffsetComment, Junk, Whitespace,
+ BadEntity, Parser,
+)
+from .android import (
+ AndroidParser
+)
+from .defines import (
+ DefinesParser, DefinesInstruction
+)
+from .dtd import (
+ DTDEntity, DTDParser
+)
+from .fluent import (
+ FluentParser, FluentComment, FluentEntity, FluentMessage, FluentTerm,
+)
+from .ini import (
+ IniParser, IniSection,
+)
+from .po import (
+ PoParser
+)
+from .properties import (
+ PropertiesParser, PropertiesEntity
+)
+
+__all__ = [
+ "CAN_NONE", "CAN_COPY", "CAN_SKIP", "CAN_MERGE",
+ "Junk", "Entry", "Entity", "Whitespace", "Comment", "OffsetComment",
+ "BadEntity", "Parser",
+ "AndroidParser",
+ "DefinesParser", "DefinesInstruction",
+ "DTDParser", "DTDEntity",
+ "FluentParser", "FluentComment", "FluentEntity",
+ "FluentMessage", "FluentTerm",
+ "IniParser", "IniSection",
+ "PoParser",
+ "PropertiesParser", "PropertiesEntity",
+]
+
+__constructors = []
+
+
+def getParser(path):
+ for item in __constructors:
+ if re.search(item[0], path):
+ return item[1]
+ try:
+ from pkg_resources import iter_entry_points
+ for entry_point in iter_entry_points('compare_locales.parsers'):
+ p = entry_point.resolve()()
+ if p.use(path):
+ return p
+ except (ImportError, OSError):
+ pass
+ raise UserWarning("Cannot find Parser")
+
+
+def hasParser(path):
+ try:
+ return bool(getParser(path))
+ except UserWarning:
+ return False
+
+
+__constructors = [
+ ('strings.*\\.xml$', AndroidParser()),
+ ('\\.dtd$', DTDParser()),
+ ('\\.properties$', PropertiesParser()),
+ ('\\.ini$', IniParser()),
+ ('\\.inc$', DefinesParser()),
+ ('\\.ftl$', FluentParser()),
+ ('\\.pot?$', PoParser()),
+]
diff --git a/third_party/python/compare_locales/compare_locales/parser/android.py b/third_party/python/compare_locales/compare_locales/parser/android.py
new file mode 100644
index 0000000000..ba4197da84
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/parser/android.py
@@ -0,0 +1,303 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""Android strings.xml parser
+
+Parses strings.xml files per
+https://developer.android.com/guide/topics/resources/localization.
+As we're using a built-in XML parser underneath, errors on that level
+break the full parsing, and result in a single Junk entry.
+"""
+
+
+import re
+from xml.dom import minidom
+from xml.dom.minidom import Node
+
+from .base import (
+ CAN_SKIP,
+ Entity, Comment, Junk, Whitespace,
+ StickyEntry, LiteralEntity,
+ Parser
+)
+
+
+class AndroidEntity(Entity):
+ def __init__(
+ self, ctx, pre_comment, white_space, node, all, key, raw_val, val
+ ):
+ # fill out superclass as good as we can right now
+ # most span can get modified at endElement
+ super().__init__(
+ ctx, pre_comment, white_space,
+ (None, None),
+ (None, None),
+ (None, None)
+ )
+ self.node = node
+ self._all_literal = all
+ self._key_literal = key
+ self._raw_val_literal = raw_val
+ self._val_literal = val
+
+ @property
+ def all(self):
+ chunks = []
+ if self.pre_comment is not None:
+ chunks.append(self.pre_comment.all)
+ if self.inner_white is not None:
+ chunks.append(self.inner_white.all)
+ chunks.append(self._all_literal)
+ return ''.join(chunks)
+
+ @property
+ def key(self):
+ return self._key_literal
+
+ @property
+ def raw_val(self):
+ return self._raw_val_literal
+
+ def position(self, offset=0):
+ return (0, offset)
+
+ def value_position(self, offset=0):
+ return (0, offset)
+
+ def wrap(self, raw_val):
+ clone = self.node.cloneNode(True)
+ if clone.childNodes.length == 1:
+ child = clone.childNodes[0]
+ else:
+ for child in clone.childNodes:
+ if child.nodeType == Node.CDATA_SECTION_NODE:
+ break
+ child.data = raw_val
+ all = []
+ if self.pre_comment is not None:
+ all.append(self.pre_comment.all)
+ if self.inner_white is not None:
+ all.append(self.inner_white.all)
+ all.append(clone.toxml())
+ return LiteralEntity(self.key, raw_val, ''.join(all))
+
+
+class NodeMixin:
+ def __init__(self, all, value):
+ self._all_literal = all
+ self._val_literal = value
+
+ @property
+ def all(self):
+ return self._all_literal
+
+ @property
+ def key(self):
+ return self._all_literal
+
+ @property
+ def raw_val(self):
+ return self._val_literal
+
+ def position(self, offset=0):
+ return (0, offset)
+
+ def value_position(self, offset=0):
+ return (0, offset)
+
+
+class XMLWhitespace(NodeMixin, Whitespace):
+ pass
+
+
+class XMLComment(NodeMixin, Comment):
+ @property
+ def val(self):
+ return self._val_literal
+
+ @property
+ def key(self):
+ return None
+
+
+# DocumentWrapper is sticky in serialization.
+# Always keep the one from the reference document.
+class DocumentWrapper(NodeMixin, StickyEntry):
+ def __init__(self, key, all):
+ self._all_literal = all
+ self._val_literal = all
+ self._key_literal = key
+
+ @property
+ def key(self):
+ return self._key_literal
+
+
+class XMLJunk(Junk):
+ def __init__(self, all):
+ super().__init__(None, (0, 0))
+ self._all_literal = all
+
+ @property
+ def all(self):
+ return self._all_literal
+
+ def position(self, offset=0):
+ return (0, offset)
+
+ def value_position(self, offset=0):
+ return (0, offset)
+
+
+def textContent(node):
+ if node.childNodes.length == 0:
+ return ''
+ for child in node.childNodes:
+ if child.nodeType == minidom.Node.CDATA_SECTION_NODE:
+ return child.data
+ if (
+ node.childNodes.length != 1 or
+ node.childNodes[0].nodeType != minidom.Node.TEXT_NODE
+ ):
+ # Return something, we'll fail in checks on this
+ return node.toxml()
+ return node.childNodes[0].data
+
+
+NEWLINE = re.compile(r'[ \t]*\n[ \t]*')
+
+
+def normalize(val):
+ return NEWLINE.sub('\n', val.strip(' \t'))
+
+
+class AndroidParser(Parser):
+ # Android does l10n fallback at runtime, don't merge en-US strings
+ capabilities = CAN_SKIP
+
+ def __init__(self):
+ super().__init__()
+ self.last_comment = None
+
+ def walk(self, only_localizable=False):
+ if not self.ctx:
+ # loading file failed, or we just didn't load anything
+ return
+ ctx = self.ctx
+ contents = ctx.contents
+ try:
+ doc = minidom.parseString(contents.encode('utf-8'))
+ except Exception:
+ yield XMLJunk(contents)
+ return
+ docElement = doc.documentElement
+ if docElement.nodeName != 'resources':
+ yield XMLJunk(doc.toxml())
+ return
+ root_children = docElement.childNodes
+ if not only_localizable:
+ yield DocumentWrapper(
+ '<?xml?><resources>',
+ '<?xml version="1.0" encoding="utf-8"?>\n<resources'
+ )
+ for attr_name, attr_value in docElement.attributes.items():
+ yield DocumentWrapper(
+ attr_name,
+ f' {attr_name}="{attr_value}"'
+ )
+ yield DocumentWrapper('>', '>')
+ child_num = 0
+ while child_num < len(root_children):
+ node = root_children[child_num]
+ if node.nodeType == Node.COMMENT_NODE:
+ current_comment, child_num = self.handleComment(
+ node, root_children, child_num
+ )
+ if child_num < len(root_children):
+ node = root_children[child_num]
+ else:
+ if not only_localizable:
+ yield current_comment
+ break
+ else:
+ current_comment = None
+ if node.nodeType in (Node.TEXT_NODE, Node.CDATA_SECTION_NODE):
+ white_space = XMLWhitespace(node.toxml(), node.nodeValue)
+ child_num += 1
+ if current_comment is None:
+ if not only_localizable:
+ yield white_space
+ continue
+ if node.nodeValue.count('\n') > 1:
+ if not only_localizable:
+ if current_comment is not None:
+ yield current_comment
+ yield white_space
+ continue
+ if child_num < len(root_children):
+ node = root_children[child_num]
+ else:
+ if not only_localizable:
+ if current_comment is not None:
+ yield current_comment
+ yield white_space
+ break
+ else:
+ white_space = None
+ if node.nodeType == Node.ELEMENT_NODE:
+ yield self.handleElement(node, current_comment, white_space)
+ else:
+ if not only_localizable:
+ if current_comment:
+ yield current_comment
+ if white_space:
+ yield white_space
+ child_num += 1
+ if not only_localizable:
+ yield DocumentWrapper('</resources>', '</resources>\n')
+
+ def handleElement(self, element, current_comment, white_space):
+ if element.nodeName == 'string' and element.hasAttribute('name'):
+ return AndroidEntity(
+ self.ctx,
+ current_comment,
+ white_space,
+ element,
+ element.toxml(),
+ element.getAttribute('name'),
+ textContent(element),
+ ''.join(c.toxml() for c in element.childNodes)
+ )
+ else:
+ return XMLJunk(element.toxml())
+
+ def handleComment(self, node, root_children, child_num):
+ all = node.toxml()
+ val = normalize(node.nodeValue)
+ while True:
+ child_num += 1
+ if child_num >= len(root_children):
+ break
+ node = root_children[child_num]
+ if node.nodeType == Node.TEXT_NODE:
+ if node.nodeValue.count('\n') > 1:
+ break
+ white = node
+ child_num += 1
+ if child_num >= len(root_children):
+ break
+ node = root_children[child_num]
+ else:
+ white = None
+ if node.nodeType != Node.COMMENT_NODE:
+ if white is not None:
+ # do not consume this node
+ child_num -= 1
+ break
+ if white:
+ all += white.toxml()
+ val += normalize(white.nodeValue)
+ all += node.toxml()
+ val += normalize(node.nodeValue)
+ return XMLComment(all, val), child_num
diff --git a/third_party/python/compare_locales/compare_locales/parser/base.py b/third_party/python/compare_locales/compare_locales/parser/base.py
new file mode 100644
index 0000000000..b8972beb33
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/parser/base.py
@@ -0,0 +1,443 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+import bisect
+import codecs
+from collections import Counter
+from compare_locales.keyedtuple import KeyedTuple
+from compare_locales.paths import File
+
+__constructors = []
+
+
+# The allowed capabilities for the Parsers. They define the exact strategy
+# used by ContentComparer.merge.
+
+# Don't perform any merging
+CAN_NONE = 0
+# Copy the entire reference file
+CAN_COPY = 1
+# Remove broken entities from localization
+# Without CAN_MERGE, en-US is not good to use for localization.
+CAN_SKIP = 2
+# Add missing and broken entities from the reference to localization
+# This effectively means that en-US is good to use for localized files.
+CAN_MERGE = 4
+
+
+class Entry:
+ '''
+ Abstraction layer for a localizable entity.
+ Currently supported are grammars of the form:
+
+ 1: entity definition
+ 2: entity key (name)
+ 3: entity value
+
+ <!ENTITY key "value">
+
+ <--- definition ---->
+ '''
+ def __init__(
+ self, ctx, pre_comment, inner_white, span, key_span, val_span
+ ):
+ self.ctx = ctx
+ self.span = span
+ self.key_span = key_span
+ self.val_span = val_span
+ self.pre_comment = pre_comment
+ self.inner_white = inner_white
+
+ def position(self, offset=0):
+ """Get the 1-based line and column of the character
+ with given offset into the Entity.
+
+ If offset is negative, return the end of the Entity.
+ """
+ if offset < 0:
+ pos = self.span[1]
+ else:
+ pos = self.span[0] + offset
+ return self.ctx.linecol(pos)
+
+ def value_position(self, offset=0):
+ """Get the 1-based line and column of the character
+ with given offset into the value.
+
+ If offset is negative, return the end of the value.
+ """
+ assert self.val_span is not None
+ if offset < 0:
+ pos = self.val_span[1]
+ else:
+ pos = self.val_span[0] + offset
+ return self.ctx.linecol(pos)
+
+ def _span_start(self):
+ start = self.span[0]
+ if hasattr(self, 'pre_comment') and self.pre_comment is not None:
+ start = self.pre_comment.span[0]
+ return start
+
+ @property
+ def all(self):
+ start = self._span_start()
+ end = self.span[1]
+ return self.ctx.contents[start:end]
+
+ @property
+ def key(self):
+ return self.ctx.contents[self.key_span[0]:self.key_span[1]]
+
+ @property
+ def raw_val(self):
+ if self.val_span is None:
+ return None
+ return self.ctx.contents[self.val_span[0]:self.val_span[1]]
+
+ @property
+ def val(self):
+ return self.raw_val
+
+ def __repr__(self):
+ return self.key
+
+ re_br = re.compile('<br[ \t\r\n]*/?>', re.U)
+ re_sgml = re.compile(r'</?\w+.*?>', re.U | re.M)
+
+ def count_words(self):
+ """Count the words in an English string.
+ Replace a couple of xml markup to make that safer, too.
+ """
+ value = self.re_br.sub('\n', self.val)
+ value = self.re_sgml.sub('', value)
+ return len(value.split())
+
+ def equals(self, other):
+ return self.key == other.key and self.val == other.val
+
+
+class StickyEntry(Entry):
+ """Subclass of Entry to use in for syntax fragments
+ which should always be overwritten in the serializer.
+ """
+ pass
+
+
+class Entity(Entry):
+ @property
+ def localized(self):
+ '''Is this entity localized.
+
+ Always true for monolingual files.
+ In bilingual files, this is a dynamic property.
+ '''
+ return True
+
+ def unwrap(self):
+ """Return the literal value to be used by tools.
+ """
+ return self.raw_val
+
+ def wrap(self, raw_val):
+ """Create literal entity based on reference and raw value.
+
+ This is used by the serialization logic.
+ """
+ start = self._span_start()
+ all = (
+ self.ctx.contents[start:self.val_span[0]] +
+ raw_val +
+ self.ctx.contents[self.val_span[1]:self.span[1]]
+ )
+ return LiteralEntity(self.key, raw_val, all)
+
+
+class LiteralEntity(Entity):
+ """Subclass of Entity to represent entities without context slices.
+
+ It's storing string literals for key, raw_val and all instead of spans.
+ """
+ def __init__(self, key, val, all):
+ super().__init__(None, None, None, None, None, None)
+ self._key = key
+ self._raw_val = val
+ self._all = all
+
+ @property
+ def key(self):
+ return self._key
+
+ @property
+ def raw_val(self):
+ return self._raw_val
+
+ @property
+ def all(self):
+ return self._all
+
+
+class PlaceholderEntity(LiteralEntity):
+ """Subclass of Entity to be removed in merges.
+ """
+ def __init__(self, key):
+ super().__init__(key, "", "\nplaceholder\n")
+
+
+class Comment(Entry):
+ def __init__(self, ctx, span):
+ self.ctx = ctx
+ self.span = span
+ self.val_span = None
+ self._val_cache = None
+
+ @property
+ def key(self):
+ return None
+
+ @property
+ def val(self):
+ if self._val_cache is None:
+ self._val_cache = self.all
+ return self._val_cache
+
+ def __repr__(self):
+ return self.all
+
+
+class OffsetComment(Comment):
+ '''Helper for file formats that have a constant number of leading
+ chars to strip from comments.
+ Offset defaults to 1
+ '''
+ comment_offset = 1
+
+ @property
+ def val(self):
+ if self._val_cache is None:
+ self._val_cache = ''.join(
+ l[self.comment_offset:] for l in self.all.splitlines(True)
+ )
+ return self._val_cache
+
+
+class Junk:
+ '''
+ An almost-Entity, representing junk data that we didn't parse.
+ This way, we can signal bad content as stuff we don't understand.
+ And the either fix that, or report real bugs in localizations.
+ '''
+ junkid = 0
+
+ def __init__(self, ctx, span):
+ self.ctx = ctx
+ self.span = span
+ self.__class__.junkid += 1
+ self.key = '_junk_%d_%d-%d' % (self.__class__.junkid, span[0], span[1])
+
+ def position(self, offset=0):
+ """Get the 1-based line and column of the character
+ with given offset into the Entity.
+
+ If offset is negative, return the end of the Entity.
+ """
+ if offset < 0:
+ pos = self.span[1]
+ else:
+ pos = self.span[0] + offset
+ return self.ctx.linecol(pos)
+
+ @property
+ def all(self):
+ return self.ctx.contents[self.span[0]:self.span[1]]
+
+ @property
+ def raw_val(self):
+ return self.all
+
+ @property
+ def val(self):
+ return self.all
+
+ def error_message(self):
+ params = (self.val,) + self.position() + self.position(-1)
+ return (
+ 'Unparsed content "%s" from line %d column %d'
+ ' to line %d column %d' % params
+ )
+
+ def __repr__(self):
+ return self.key
+
+
+class Whitespace(Entry):
+ '''Entity-like object representing an empty file with whitespace,
+ if allowed
+ '''
+ def __init__(self, ctx, span):
+ self.ctx = ctx
+ self.span = self.key_span = self.val_span = span
+
+ def __repr__(self):
+ return self.raw_val
+
+
+class BadEntity(ValueError):
+ '''Raised when the parser can't create an Entity for a found match.
+ '''
+ pass
+
+
+class Parser:
+ capabilities = CAN_SKIP | CAN_MERGE
+ reWhitespace = re.compile('[ \t\r\n]+', re.M)
+ Comment = Comment
+ # NotImplementedError would be great, but also tedious
+ reKey = reComment = None
+
+ class Context:
+ "Fixture for content and line numbers"
+ def __init__(self, contents):
+ self.contents = contents
+ # cache split lines
+ self._lines = None
+
+ def linecol(self, position):
+ "Returns 1-based line and column numbers."
+ if self._lines is None:
+ nl = re.compile('\n', re.M)
+ self._lines = [m.end()
+ for m in nl.finditer(self.contents)]
+
+ line_offset = bisect.bisect(self._lines, position)
+ line_start = self._lines[line_offset - 1] if line_offset else 0
+ col_offset = position - line_start
+
+ return line_offset + 1, col_offset + 1
+
+ def __init__(self):
+ if not hasattr(self, 'encoding'):
+ self.encoding = 'utf-8'
+ self.ctx = None
+
+ def readFile(self, file):
+ '''Read contents from disk, with universal_newlines'''
+ if isinstance(file, File):
+ file = file.fullpath
+ # python 2 has binary input with universal newlines,
+ # python 3 doesn't. Let's split code paths
+ with open(
+ file,
+ encoding=self.encoding, errors='replace',
+ newline=None
+ ) as f:
+ self.readUnicode(f.read())
+
+ def readContents(self, contents):
+ '''Read contents and create parsing context.
+
+ contents are in native encoding, but with normalized line endings.
+ '''
+ (contents, _) = codecs.getdecoder(self.encoding)(contents, 'replace')
+ self.readUnicode(contents)
+
+ def readUnicode(self, contents):
+ self.ctx = self.Context(contents)
+
+ def parse(self):
+ return KeyedTuple(self)
+
+ def __iter__(self):
+ return self.walk(only_localizable=True)
+
+ def walk(self, only_localizable=False):
+ if not self.ctx:
+ # loading file failed, or we just didn't load anything
+ return
+ ctx = self.ctx
+ contents = ctx.contents
+
+ next_offset = 0
+ while next_offset < len(contents):
+ entity = self.getNext(ctx, next_offset)
+
+ if isinstance(entity, (Entity, Junk)):
+ yield entity
+ elif not only_localizable:
+ yield entity
+
+ next_offset = entity.span[1]
+
+ def getNext(self, ctx, offset):
+ '''Parse the next fragment.
+
+ Parse comments first, then white-space.
+ If an entity follows, create that entity with such pre_comment and
+ inner white-space. If not, emit comment or white-space as standlone.
+ It's OK that this might parse whitespace more than once.
+ Comments are associated with entities if they're not separated by
+ blank lines. Multiple consecutive comments are joined.
+ '''
+ junk_offset = offset
+ m = self.reComment.match(ctx.contents, offset)
+ if m:
+ current_comment = self.Comment(ctx, m.span())
+ if offset < 2 and 'License' in current_comment.val:
+ # Heuristic. A early comment with "License" is probably
+ # a license header, and should be standalone.
+ # Not glueing ourselves to offset == 0 as we might have
+ # skipped a BOM.
+ return current_comment
+ offset = m.end()
+ else:
+ current_comment = None
+ m = self.reWhitespace.match(ctx.contents, offset)
+ if m:
+ white_space = Whitespace(ctx, m.span())
+ offset = m.end()
+ if (
+ current_comment is not None
+ and white_space.raw_val.count('\n') > 1
+ ):
+ # standalone comment
+ # return the comment, and reparse the whitespace next time
+ return current_comment
+ if current_comment is None:
+ return white_space
+ else:
+ white_space = None
+ m = self.reKey.match(ctx.contents, offset)
+ if m:
+ try:
+ return self.createEntity(ctx, m, current_comment, white_space)
+ except BadEntity:
+ # fall through to Junk, probably
+ pass
+ if current_comment is not None:
+ return current_comment
+ if white_space is not None:
+ return white_space
+ return self.getJunk(ctx, junk_offset, self.reKey, self.reComment)
+
+ def getJunk(self, ctx, offset, *expressions):
+ junkend = None
+ for exp in expressions:
+ m = exp.search(ctx.contents, offset)
+ if m:
+ junkend = min(junkend, m.start()) if junkend else m.start()
+ return Junk(ctx, (offset, junkend or len(ctx.contents)))
+
+ def createEntity(self, ctx, m, current_comment, white_space):
+ return Entity(
+ ctx, current_comment, white_space,
+ m.span(), m.span('key'), m.span('val')
+ )
+
+ @classmethod
+ def findDuplicates(cls, entities):
+ found = Counter(entity.key for entity in entities)
+ for entity_id, cnt in found.items():
+ if cnt > 1:
+ yield f'{entity_id} occurs {cnt} times'
diff --git a/third_party/python/compare_locales/compare_locales/parser/defines.py b/third_party/python/compare_locales/compare_locales/parser/defines.py
new file mode 100644
index 0000000000..dd4511e4a8
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/parser/defines.py
@@ -0,0 +1,104 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+
+from .base import (
+ CAN_COPY,
+ Entry, OffsetComment, Junk, Whitespace,
+ Parser
+)
+
+
+class DefinesInstruction(Entry):
+ '''Entity-like object representing processing instructions in inc files
+ '''
+ def __init__(self, ctx, span, val_span):
+ self.ctx = ctx
+ self.span = span
+ self.key_span = self.val_span = val_span
+
+ def __repr__(self):
+ return self.raw_val
+
+
+class DefinesParser(Parser):
+ # can't merge, #unfilter needs to be the last item, which we don't support
+ capabilities = CAN_COPY
+ reWhitespace = re.compile('\n+', re.M)
+
+ EMPTY_LINES = 1 << 0
+
+ class Comment(OffsetComment):
+ comment_offset = 2
+
+ class Context(Parser.Context):
+ def __init__(self, contents):
+ super(DefinesParser.Context, self).__init__(contents)
+ self.filter_empty_lines = False
+
+ def __init__(self):
+ self.reComment = re.compile('(?:^# .*?\n)*(?:^# [^\n]*)', re.M)
+ # corresponds to
+ # https://hg.mozilla.org/mozilla-central/file/72ee4800d4156931c89b58bd807af4a3083702bb/python/mozbuild/mozbuild/preprocessor.py#l561 # noqa
+ self.reKey = re.compile(
+ r'#define[ \t]+(?P<key>\w+)(?:[ \t](?P<val>[^\n]*))?', re.M)
+ self.rePI = re.compile(r'#(?P<val>\w+[ \t]+[^\n]+)', re.M)
+ Parser.__init__(self)
+
+ def getNext(self, ctx, offset):
+ junk_offset = offset
+ contents = ctx.contents
+
+ m = self.reComment.match(ctx.contents, offset)
+ if m:
+ current_comment = self.Comment(ctx, m.span())
+ offset = m.end()
+ else:
+ current_comment = None
+
+ m = self.reWhitespace.match(contents, offset)
+ if m:
+ # blank lines outside of filter_empty_lines or
+ # leading whitespace are bad
+ if (
+ offset == 0 or
+ not (len(m.group()) == 1 or ctx.filter_empty_lines)
+ ):
+ if current_comment:
+ return current_comment
+ return Junk(ctx, m.span())
+ white_space = Whitespace(ctx, m.span())
+ offset = m.end()
+ if (
+ current_comment is not None
+ and white_space.raw_val.count('\n') > 1
+ ):
+ # standalone comment
+ # return the comment, and reparse the whitespace next time
+ return current_comment
+ if current_comment is None:
+ return white_space
+ else:
+ white_space = None
+
+ m = self.reKey.match(contents, offset)
+ if m:
+ return self.createEntity(ctx, m, current_comment, white_space)
+ # defines instructions don't have comments
+ # Any pending commment is standalone
+ if current_comment:
+ return current_comment
+ if white_space:
+ return white_space
+ m = self.rePI.match(contents, offset)
+ if m:
+ instr = DefinesInstruction(ctx, m.span(), m.span('val'))
+ if instr.val == 'filter emptyLines':
+ ctx.filter_empty_lines = True
+ if instr.val == 'unfilter emptyLines':
+ ctx.filter_empty_lines = False
+ return instr
+ return self.getJunk(
+ ctx, junk_offset, self.reComment, self.reKey, self.rePI)
diff --git a/third_party/python/compare_locales/compare_locales/parser/dtd.py b/third_party/python/compare_locales/compare_locales/parser/dtd.py
new file mode 100644
index 0000000000..55086177a8
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/parser/dtd.py
@@ -0,0 +1,115 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+
+try:
+ from html import unescape as html_unescape
+except ImportError:
+ from HTMLParser import HTMLParser
+ html_parser = HTMLParser()
+ html_unescape = html_parser.unescape
+
+from .base import (
+ Entity, Comment, Junk,
+ Parser
+)
+
+
+class DTDEntityMixin:
+ @property
+ def val(self):
+ '''Unescape HTML entities into corresponding Unicode characters.
+
+ Named (&amp;), decimal (&#38;), and hex (&#x26; and &#x0026;) formats
+ are supported. Unknown entities are left intact.
+
+ As of Python 3.7 the following 252 named entities are
+ recognized and unescaped:
+
+ https://github.com/python/cpython/blob/3.7/Lib/html/entities.py
+ '''
+ return html_unescape(self.raw_val)
+
+ def value_position(self, offset=0):
+ # DTDChecker already returns tuples of (line, col) positions
+ if isinstance(offset, tuple):
+ line_pos, col_pos = offset
+ line, col = super().value_position()
+ if line_pos == 1:
+ col = col + col_pos
+ else:
+ col = col_pos
+ line += line_pos - 1
+ return line, col
+ else:
+ return super().value_position(offset)
+
+
+class DTDEntity(DTDEntityMixin, Entity):
+ pass
+
+
+class DTDParser(Parser):
+ # http://www.w3.org/TR/2006/REC-xml11-20060816/#NT-NameStartChar
+ # ":" | [A-Z] | "_" | [a-z] |
+ # [#xC0-#xD6] | [#xD8-#xF6] | [#xF8-#x2FF] | [#x370-#x37D] | [#x37F-#x1FFF]
+ # | [#x200C-#x200D] | [#x2070-#x218F] | [#x2C00-#x2FEF] |
+ # [#x3001-#xD7FF] | [#xF900-#xFDCF] | [#xFDF0-#xFFFD] |
+ # [#x10000-#xEFFFF]
+ CharMinusDash = '\x09\x0A\x0D\u0020-\u002C\u002E-\uD7FF\uE000-\uFFFD'
+ XmlComment = '<!--(?:-?[%s])*?-->' % CharMinusDash
+ NameStartChar = ':A-Z_a-z\xC0-\xD6\xD8-\xF6\xF8-\u02FF' + \
+ '\u0370-\u037D\u037F-\u1FFF\u200C-\u200D\u2070-\u218F' + \
+ '\u2C00-\u2FEF\u3001-\uD7FF\uF900-\uFDCF\uFDF0-\uFFFD'
+ # + \U00010000-\U000EFFFF seems to be unsupported in python
+
+ # NameChar ::= NameStartChar | "-" | "." | [0-9] | #xB7 |
+ # [#x0300-#x036F] | [#x203F-#x2040]
+ NameChar = NameStartChar + r'\-\.0-9' + '\xB7\u0300-\u036F\u203F-\u2040'
+ Name = '[' + NameStartChar + '][' + NameChar + ']*'
+ reKey = re.compile('<!ENTITY[ \t\r\n]+(?P<key>' + Name + ')[ \t\r\n]+'
+ '(?P<val>\"[^\"]*\"|\'[^\']*\'?)[ \t\r\n]*>',
+ re.DOTALL | re.M)
+ # add BOM to DTDs, details in bug 435002
+ reHeader = re.compile('^\ufeff')
+ reComment = re.compile('<!--(?P<val>-?[%s])*?-->' % CharMinusDash,
+ re.S)
+ rePE = re.compile('<!ENTITY[ \t\r\n]+%[ \t\r\n]+(?P<key>' + Name + ')'
+ '[ \t\r\n]+SYSTEM[ \t\r\n]+'
+ '(?P<val>\"[^\"]*\"|\'[^\']*\')[ \t\r\n]*>[ \t\r\n]*'
+ '%' + Name + ';'
+ '(?:[ \t]*(?:' + XmlComment + '[ \t\r\n]*)*\n?)?')
+
+ class Comment(Comment):
+ @property
+ def val(self):
+ if self._val_cache is None:
+ # Strip "<!--" and "-->" to comment contents
+ self._val_cache = self.all[4:-3]
+ return self._val_cache
+
+ def getNext(self, ctx, offset):
+ '''
+ Overload Parser.getNext to special-case ParsedEntities.
+ Just check for a parsed entity if that method claims junk.
+
+ <!ENTITY % foo SYSTEM "url">
+ %foo;
+ '''
+ if offset == 0 and self.reHeader.match(ctx.contents):
+ offset += 1
+ entity = Parser.getNext(self, ctx, offset)
+ if (entity and isinstance(entity, Junk)) or entity is None:
+ m = self.rePE.match(ctx.contents, offset)
+ if m:
+ entity = DTDEntity(
+ ctx, None, None, m.span(), m.span('key'), m.span('val'))
+ return entity
+
+ def createEntity(self, ctx, m, current_comment, white_space):
+ valspan = m.span('val')
+ valspan = (valspan[0]+1, valspan[1]-1)
+ return DTDEntity(ctx, current_comment, white_space,
+ m.span(), m.span('key'), valspan)
diff --git a/third_party/python/compare_locales/compare_locales/parser/fluent.py b/third_party/python/compare_locales/compare_locales/parser/fluent.py
new file mode 100644
index 0000000000..a74f8cb4f4
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/parser/fluent.py
@@ -0,0 +1,218 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+
+from fluent.syntax import FluentParser as FTLParser
+from fluent.syntax import ast as ftl
+from fluent.syntax.serializer import serialize_comment
+from fluent.syntax.visitor import Visitor
+from .base import (
+ CAN_SKIP,
+ Entry, Entity, Comment, Junk, Whitespace,
+ LiteralEntity,
+ Parser
+)
+
+
+class WordCounter(Visitor):
+ def __init__(self):
+ self.word_count = 0
+
+ def generic_visit(self, node):
+ if isinstance(
+ node,
+ (ftl.Span, ftl.Annotation, ftl.BaseComment)
+ ):
+ return
+ super().generic_visit(node)
+
+ def visit_SelectExpression(self, node):
+ # optimize select expressions to only go through the variants
+ self.visit(node.variants)
+
+ def visit_TextElement(self, node):
+ self.word_count += len(node.value.split())
+
+
+class FluentAttribute(Entry):
+ ignored_fields = ['span']
+
+ def __init__(self, entity, attr_node):
+ self.ctx = entity.ctx
+ self.attr = attr_node
+ self.key_span = (attr_node.id.span.start, attr_node.id.span.end)
+ self.val_span = (attr_node.value.span.start, attr_node.value.span.end)
+
+ def equals(self, other):
+ if not isinstance(other, FluentAttribute):
+ return False
+ return self.attr.equals(
+ other.attr, ignored_fields=self.ignored_fields)
+
+
+class FluentEntity(Entity):
+ # Fields ignored when comparing two entities.
+ ignored_fields = ['comment', 'span']
+
+ def __init__(self, ctx, entry):
+ start = entry.span.start
+ end = entry.span.end
+
+ self.ctx = ctx
+ self.span = (start, end)
+
+ if isinstance(entry, ftl.Term):
+ # Terms don't have their '-' as part of the id, use the prior
+ # character
+ self.key_span = (entry.id.span.start - 1, entry.id.span.end)
+ else:
+ # Message
+ self.key_span = (entry.id.span.start, entry.id.span.end)
+
+ if entry.value is not None:
+ self.val_span = (entry.value.span.start, entry.value.span.end)
+ else:
+ self.val_span = None
+
+ self.entry = entry
+
+ # Entry instances are expected to have pre_comment. It's used by
+ # other formats to associate a Comment with an Entity. FluentEntities
+ # don't need it because message comments are part of the entry AST and
+ # are not separate Comment instances.
+ self.pre_comment = None
+
+ @property
+ def root_node(self):
+ '''AST node at which to start traversal for count_words.
+
+ By default we count words in the value and in all attributes.
+ '''
+ return self.entry
+
+ _word_count = None
+
+ def count_words(self):
+ if self._word_count is None:
+ counter = WordCounter()
+ counter.visit(self.root_node)
+ self._word_count = counter.word_count
+
+ return self._word_count
+
+ def equals(self, other):
+ return self.entry.equals(
+ other.entry, ignored_fields=self.ignored_fields)
+
+ # In Fluent we treat entries as a whole. FluentChecker reports errors at
+ # offsets calculated from the beginning of the entry.
+ def value_position(self, offset=None):
+ if offset is None:
+ # no offset given, use our value start or id end
+ if self.val_span:
+ offset = self.val_span[0] - self.span[0]
+ else:
+ offset = self.key_span[1] - self.span[0]
+ return self.position(offset)
+
+ @property
+ def attributes(self):
+ for attr_node in self.entry.attributes:
+ yield FluentAttribute(self, attr_node)
+
+ def unwrap(self):
+ return self.all
+
+ def wrap(self, raw_val):
+ """Create literal entity the given raw value.
+
+ For Fluent, we're exposing the message source to tools like
+ Pontoon.
+ We also recreate the comment from this entity to the created entity.
+ """
+ all = raw_val
+ if self.entry.comment is not None:
+ all = serialize_comment(self.entry.comment) + all
+ return LiteralEntity(self.key, raw_val, all)
+
+
+class FluentMessage(FluentEntity):
+ pass
+
+
+class FluentTerm(FluentEntity):
+ # Fields ignored when comparing two terms.
+ ignored_fields = ['attributes', 'comment', 'span']
+
+ @property
+ def root_node(self):
+ '''AST node at which to start traversal for count_words.
+
+ In Fluent Terms we only count words in the value. Attributes are
+ private and do not count towards the word total.
+ '''
+ return self.entry.value
+
+
+class FluentComment(Comment):
+ def __init__(self, ctx, span, entry):
+ super().__init__(ctx, span)
+ self._val_cache = entry.content
+
+
+class FluentParser(Parser):
+ capabilities = CAN_SKIP
+
+ def __init__(self):
+ super().__init__()
+ self.ftl_parser = FTLParser()
+
+ def walk(self, only_localizable=False):
+ if not self.ctx:
+ # loading file failed, or we just didn't load anything
+ return
+
+ resource = self.ftl_parser.parse(self.ctx.contents)
+
+ last_span_end = 0
+
+ for entry in resource.body:
+ if not only_localizable:
+ if entry.span.start > last_span_end:
+ yield Whitespace(
+ self.ctx, (last_span_end, entry.span.start))
+
+ if isinstance(entry, ftl.Message):
+ yield FluentMessage(self.ctx, entry)
+ elif isinstance(entry, ftl.Term):
+ yield FluentTerm(self.ctx, entry)
+ elif isinstance(entry, ftl.Junk):
+ start = entry.span.start
+ end = entry.span.end
+ # strip leading whitespace
+ start += re.match('[ \t\r\n]*', entry.content).end()
+ if not only_localizable and entry.span.start < start:
+ yield Whitespace(
+ self.ctx, (entry.span.start, start)
+ )
+ # strip trailing whitespace
+ ws, we = re.search('[ \t\r\n]*$', entry.content).span()
+ end -= we - ws
+ yield Junk(self.ctx, (start, end))
+ if not only_localizable and end < entry.span.end:
+ yield Whitespace(
+ self.ctx, (end, entry.span.end)
+ )
+ elif isinstance(entry, ftl.BaseComment) and not only_localizable:
+ span = (entry.span.start, entry.span.end)
+ yield FluentComment(self.ctx, span, entry)
+
+ last_span_end = entry.span.end
+
+ # Yield Whitespace at the EOF.
+ if not only_localizable:
+ eof_offset = len(self.ctx.contents)
+ if eof_offset > last_span_end:
+ yield Whitespace(self.ctx, (last_span_end, eof_offset))
diff --git a/third_party/python/compare_locales/compare_locales/parser/ini.py b/third_party/python/compare_locales/compare_locales/parser/ini.py
new file mode 100644
index 0000000000..623f7c15a4
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/parser/ini.py
@@ -0,0 +1,56 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+
+from .base import (
+ Entry, OffsetComment,
+ Parser
+)
+
+
+class IniSection(Entry):
+ '''Entity-like object representing sections in ini files
+ '''
+ def __init__(self, ctx, span, val_span):
+ self.ctx = ctx
+ self.span = span
+ self.key_span = self.val_span = val_span
+
+ def __repr__(self):
+ return self.raw_val
+
+
+class IniParser(Parser):
+ '''
+ Parse files of the form:
+ # initial comment
+ [cat]
+ whitespace*
+ #comment
+ string=value
+ ...
+ '''
+
+ Comment = OffsetComment
+
+ def __init__(self):
+ self.reComment = re.compile('(?:^[;#][^\n]*\n)*(?:^[;#][^\n]*)', re.M)
+ self.reSection = re.compile(r'\[(?P<val>.*?)\]', re.M)
+ self.reKey = re.compile('(?P<key>.+?)=(?P<val>.*)', re.M)
+ Parser.__init__(self)
+
+ def getNext(self, ctx, offset):
+ contents = ctx.contents
+ m = self.reSection.match(contents, offset)
+ if m:
+ return IniSection(ctx, m.span(), m.span('val'))
+
+ return super().getNext(ctx, offset)
+
+ def getJunk(self, ctx, offset, *expressions):
+ # base.Parser.getNext calls us with self.reKey, self.reComment.
+ # Add self.reSection to the end-of-junk expressions
+ expressions = expressions + (self.reSection,)
+ return super().getJunk(ctx, offset, *expressions)
diff --git a/third_party/python/compare_locales/compare_locales/parser/po.py b/third_party/python/compare_locales/compare_locales/parser/po.py
new file mode 100644
index 0000000000..48ea05ca2b
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/parser/po.py
@@ -0,0 +1,125 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""Gettext PO(T) parser
+
+Parses gettext po and pot files.
+"""
+
+
+import re
+
+from .base import (
+ CAN_SKIP,
+ Entity,
+ BadEntity,
+ Parser
+)
+
+
+class PoEntityMixin:
+
+ @property
+ def val(self):
+ return (
+ self.stringlist_val
+ if self.stringlist_val
+ else self.stringlist_key[0]
+ )
+
+ @property
+ def key(self):
+ return self.stringlist_key
+
+ @property
+ def localized(self):
+ # gettext denotes a non-localized string by an empty value
+ return bool(self.stringlist_val)
+
+ def __repr__(self):
+ return self.key[0]
+
+
+class PoEntity(PoEntityMixin, Entity):
+ pass
+
+
+# Unescape and concat a string list
+def eval_stringlist(lines):
+ return ''.join(
+ (
+ l
+ .replace(r'\\', '\\')
+ .replace(r'\t', '\t')
+ .replace(r'\r', '\r')
+ .replace(r'\n', '\n')
+ .replace(r'\"', '"')
+ )
+ for l in lines
+ )
+
+
+class PoParser(Parser):
+ # gettext l10n fallback at runtime, don't merge en-US strings
+ capabilities = CAN_SKIP
+
+ reKey = re.compile('msgctxt|msgid')
+ reValue = re.compile('(?P<white>[ \t\r\n]*)(?P<cmd>msgstr)')
+ reComment = re.compile(r'(?:#.*?\n)+')
+ # string list item:
+ # leading whitespace
+ # `"`
+ # escaped quotes etc, not quote, newline, backslash
+ # `"`
+ reListItem = re.compile(r'[ \t\r\n]*"((?:\\[\\trn"]|[^"\n\\])*)"')
+
+ def __init__(self):
+ super().__init__()
+
+ def createEntity(self, ctx, m, current_comment, white_space):
+ start = cursor = m.start()
+ id_start = cursor
+ try:
+ msgctxt, cursor = self._parse_string_list(ctx, cursor, 'msgctxt')
+ m = self.reWhitespace.match(ctx.contents, cursor)
+ if m:
+ cursor = m.end()
+ except BadEntity:
+ # no msgctxt is OK
+ msgctxt = None
+ if id_start is None:
+ id_start = cursor
+ msgid, cursor = self._parse_string_list(ctx, cursor, 'msgid')
+ id_end = cursor
+ m = self.reWhitespace.match(ctx.contents, cursor)
+ if m:
+ cursor = m.end()
+ val_start = cursor
+ msgstr, cursor = self._parse_string_list(ctx, cursor, 'msgstr')
+ e = PoEntity(
+ ctx,
+ current_comment,
+ white_space,
+ (start, cursor),
+ (id_start, id_end),
+ (val_start, cursor)
+ )
+ e.stringlist_key = (msgid, msgctxt)
+ e.stringlist_val = msgstr
+ return e
+
+ def _parse_string_list(self, ctx, cursor, key):
+ if not ctx.contents.startswith(key, cursor):
+ raise BadEntity
+ cursor += len(key)
+ frags = []
+ while True:
+ m = self.reListItem.match(ctx.contents, cursor)
+ if not m:
+ break
+ frags.append(m.group(1))
+ cursor = m.end()
+ if not frags:
+ raise BadEntity
+ return eval_stringlist(frags), cursor
diff --git a/third_party/python/compare_locales/compare_locales/parser/properties.py b/third_party/python/compare_locales/compare_locales/parser/properties.py
new file mode 100644
index 0000000000..396800c99b
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/parser/properties.py
@@ -0,0 +1,113 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+
+from .base import (
+ Entity, OffsetComment, Whitespace,
+ Parser
+)
+
+
+class PropertiesEntityMixin:
+ escape = re.compile(r'\\((?P<uni>u[0-9a-fA-F]{1,4})|'
+ '(?P<nl>\n[ \t]*)|(?P<single>.))', re.M)
+ known_escapes = {'n': '\n', 'r': '\r', 't': '\t', '\\': '\\'}
+
+ @property
+ def val(self):
+ def unescape(m):
+ found = m.groupdict()
+ if found['uni']:
+ return chr(int(found['uni'][1:], 16))
+ if found['nl']:
+ return ''
+ return self.known_escapes.get(found['single'], found['single'])
+
+ return self.escape.sub(unescape, self.raw_val)
+
+
+class PropertiesEntity(PropertiesEntityMixin, Entity):
+ pass
+
+
+class PropertiesParser(Parser):
+
+ Comment = OffsetComment
+
+ def __init__(self):
+ self.reKey = re.compile(
+ '(?P<key>[^#! \t\r\n][^=:\n]*?)[ \t]*[:=][ \t]*', re.M)
+ self.reComment = re.compile('(?:[#!][^\n]*\n)*(?:[#!][^\n]*)', re.M)
+ self._escapedEnd = re.compile(r'\\+$')
+ self._trailingWS = re.compile(r'[ \t\r\n]*(?:\n|\Z)', re.M)
+ Parser.__init__(self)
+
+ def getNext(self, ctx, offset):
+ junk_offset = offset
+ # overwritten to parse values line by line
+ contents = ctx.contents
+
+ m = self.reComment.match(contents, offset)
+ if m:
+ current_comment = self.Comment(ctx, m.span())
+ if offset == 0 and 'License' in current_comment.val:
+ # Heuristic. A early comment with "License" is probably
+ # a license header, and should be standalone.
+ return current_comment
+ offset = m.end()
+ else:
+ current_comment = None
+
+ m = self.reWhitespace.match(contents, offset)
+ if m:
+ white_space = Whitespace(ctx, m.span())
+ offset = m.end()
+ if (
+ current_comment is not None
+ and white_space.raw_val.count('\n') > 1
+ ):
+ # standalone comment
+ return current_comment
+ if current_comment is None:
+ return white_space
+ else:
+ white_space = None
+
+ m = self.reKey.match(contents, offset)
+ if m:
+ startline = offset = m.end()
+ while True:
+ endval = nextline = contents.find('\n', offset)
+ if nextline == -1:
+ endval = offset = len(contents)
+ break
+ # is newline escaped?
+ _e = self._escapedEnd.search(contents, offset, nextline)
+ offset = nextline + 1
+ if _e is None:
+ break
+ # backslashes at end of line, if 2*n, not escaped
+ if len(_e.group()) % 2 == 0:
+ break
+ startline = offset
+
+ # strip trailing whitespace
+ ws = self._trailingWS.search(contents, startline)
+ if ws:
+ endval = ws.start()
+
+ entity = PropertiesEntity(
+ ctx, current_comment, white_space,
+ (m.start(), endval), # full span
+ m.span('key'),
+ (m.end(), endval)) # value span
+ return entity
+
+ if current_comment is not None:
+ return current_comment
+ if white_space is not None:
+ return white_space
+
+ return self.getJunk(ctx, junk_offset, self.reKey, self.reComment)
diff --git a/third_party/python/compare_locales/compare_locales/paths/__init__.py b/third_party/python/compare_locales/compare_locales/paths/__init__.py
new file mode 100644
index 0000000000..f2d1c407c5
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/paths/__init__.py
@@ -0,0 +1,53 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from compare_locales import mozpath
+from .files import ProjectFiles, REFERENCE_LOCALE
+from .ini import (
+ L10nConfigParser, SourceTreeConfigParser,
+ EnumerateApp, EnumerateSourceTreeApp,
+)
+from .matcher import Matcher
+from .project import ProjectConfig
+from .configparser import TOMLParser, ConfigNotFound
+
+
+__all__ = [
+ 'Matcher',
+ 'ProjectConfig',
+ 'L10nConfigParser', 'SourceTreeConfigParser',
+ 'EnumerateApp', 'EnumerateSourceTreeApp',
+ 'ProjectFiles', 'REFERENCE_LOCALE',
+ 'TOMLParser', 'ConfigNotFound',
+]
+
+
+class File:
+
+ def __init__(self, fullpath, file, module=None, locale=None):
+ self.fullpath = fullpath
+ self.file = file
+ self.module = module
+ self.locale = locale
+ pass
+
+ @property
+ def localpath(self):
+ if self.module:
+ return mozpath.join(self.locale, self.module, self.file)
+ return self.file
+
+ def __hash__(self):
+ return hash(self.localpath)
+
+ def __str__(self):
+ return self.fullpath
+
+ def __eq__(self, other):
+ if not isinstance(other, File):
+ return False
+ return vars(self) == vars(other)
+
+ def __ne__(self, other):
+ return not (self == other)
diff --git a/third_party/python/compare_locales/compare_locales/paths/configparser.py b/third_party/python/compare_locales/compare_locales/paths/configparser.py
new file mode 100644
index 0000000000..1c1dbfbff3
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/paths/configparser.py
@@ -0,0 +1,138 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import errno
+import logging
+from compare_locales import mozpath
+from .project import ProjectConfig
+from .matcher import expand
+import toml
+
+
+class ConfigNotFound(EnvironmentError):
+ def __init__(self, path):
+ super().__init__(
+ errno.ENOENT,
+ 'Configuration file not found',
+ path)
+
+
+class ParseContext:
+ def __init__(self, path, env, ignore_missing_includes):
+ self.path = path
+ self.env = env
+ self.ignore_missing_includes = ignore_missing_includes
+ self.data = None
+ self.pc = ProjectConfig(path)
+
+
+class TOMLParser:
+ def parse(self, path, env=None, ignore_missing_includes=False):
+ ctx = self.context(
+ path, env=env, ignore_missing_includes=ignore_missing_includes
+ )
+ self.load(ctx)
+ self.processBasePath(ctx)
+ self.processEnv(ctx)
+ self.processPaths(ctx)
+ self.processFilters(ctx)
+ self.processIncludes(ctx)
+ self.processExcludes(ctx)
+ self.processLocales(ctx)
+ return self.asConfig(ctx)
+
+ def context(self, path, env=None, ignore_missing_includes=False):
+ return ParseContext(
+ path,
+ env if env is not None else {},
+ ignore_missing_includes,
+ )
+
+ def load(self, ctx):
+ try:
+ with open(ctx.path, 'rt') as fin:
+ ctx.data = toml.load(fin)
+ except (toml.TomlDecodeError, OSError):
+ raise ConfigNotFound(ctx.path)
+
+ def processBasePath(self, ctx):
+ assert ctx.data is not None
+ ctx.pc.set_root(ctx.data.get('basepath', '.'))
+
+ def processEnv(self, ctx):
+ assert ctx.data is not None
+ ctx.pc.add_environment(**ctx.data.get('env', {}))
+ # add parser environment, possibly overwriting file variables
+ ctx.pc.add_environment(**ctx.env)
+
+ def processLocales(self, ctx):
+ assert ctx.data is not None
+ if 'locales' in ctx.data:
+ ctx.pc.set_locales(ctx.data['locales'])
+
+ def processPaths(self, ctx):
+ assert ctx.data is not None
+ for data in ctx.data.get('paths', []):
+ paths = {
+ "l10n": data['l10n']
+ }
+ if 'locales' in data:
+ paths['locales'] = data['locales']
+ if 'reference' in data:
+ paths['reference'] = data['reference']
+ if 'test' in data:
+ paths['test'] = data['test']
+ ctx.pc.add_paths(paths)
+
+ def processFilters(self, ctx):
+ assert ctx.data is not None
+ for data in ctx.data.get('filters', []):
+ paths = data['path']
+ if isinstance(paths, str):
+ paths = [paths]
+ rule = {
+ "path": paths,
+ "action": data['action']
+ }
+ if 'key' in data:
+ rule['key'] = data['key']
+ ctx.pc.add_rules(rule)
+
+ def processIncludes(self, ctx):
+ for child in self._processChild(ctx, 'includes'):
+ ctx.pc.add_child(child)
+
+ def processExcludes(self, ctx):
+ for child in self._processChild(ctx, 'excludes'):
+ ctx.pc.exclude(child)
+
+ def _processChild(self, ctx, field):
+ assert ctx.data is not None
+ if field not in ctx.data:
+ return
+ for child_config in ctx.data[field]:
+ # resolve child_config['path'] against our root and env
+ p = mozpath.normpath(
+ expand(
+ ctx.pc.root,
+ child_config['path'],
+ ctx.pc.environ
+ )
+ )
+ try:
+ child = self.parse(
+ p, env=ctx.env,
+ ignore_missing_includes=ctx.ignore_missing_includes
+ )
+ except ConfigNotFound as e:
+ if not ctx.ignore_missing_includes:
+ raise
+ (logging
+ .getLogger('compare-locales.io')
+ .error('%s: %s', e.strerror, e.filename))
+ continue
+ yield child
+
+ def asConfig(self, ctx):
+ return ctx.pc
diff --git a/third_party/python/compare_locales/compare_locales/paths/files.py b/third_party/python/compare_locales/compare_locales/paths/files.py
new file mode 100644
index 0000000000..bfbe7ffbd1
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/paths/files.py
@@ -0,0 +1,224 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+from compare_locales import mozpath
+
+
+REFERENCE_LOCALE = 'en-x-moz-reference'
+
+
+class ConfigList(list):
+ def maybe_extend(self, other):
+ '''Add configs from other list if this list doesn't have this path yet.
+ '''
+ for config in other:
+ if any(mine.path == config.path for mine in self):
+ continue
+ self.append(config)
+
+
+class ProjectFiles:
+ '''Iterable object to get all files and tests for a locale and a
+ list of ProjectConfigs.
+
+ If the given locale is None, iterate over reference files as
+ both reference and locale for a reference self-test.
+ '''
+ def __init__(self, locale, projects, mergebase=None):
+ self.locale = locale
+ self.matchers = []
+ self.exclude = None
+ self.mergebase = mergebase
+ configs = ConfigList()
+ excludes = ConfigList()
+ for project in projects:
+ # Only add this project if we're not in validation mode,
+ # and the given locale is enabled for the project.
+ if locale is not None and locale not in project.all_locales:
+ continue
+ configs.maybe_extend(project.configs)
+ excludes.maybe_extend(project.excludes)
+ # If an excluded config is explicitly included, drop if from the
+ # excludes.
+ excludes = [
+ exclude
+ for exclude in excludes
+ if not any(c.path == exclude.path for c in configs)
+ ]
+ if excludes:
+ self.exclude = ProjectFiles(locale, excludes)
+ for pc in configs:
+ if locale and pc.locales is not None and locale not in pc.locales:
+ continue
+ for paths in pc.paths:
+ if (
+ locale and
+ 'locales' in paths and
+ locale not in paths['locales']
+ ):
+ continue
+ m = {
+ 'l10n': paths['l10n'].with_env({
+ "locale": locale or REFERENCE_LOCALE
+ }),
+ 'module': paths.get('module'),
+ }
+ if 'reference' in paths:
+ m['reference'] = paths['reference']
+ if self.mergebase is not None:
+ m['merge'] = paths['l10n'].with_env({
+ "locale": locale,
+ "l10n_base": self.mergebase
+ })
+ m['test'] = set(paths.get('test', []))
+ if 'locales' in paths:
+ m['locales'] = paths['locales'][:]
+ self.matchers.append(m)
+ self.matchers.reverse() # we always iterate last first
+ # Remove duplicate patterns, comparing each matcher
+ # against all other matchers.
+ # Avoid n^2 comparisons by only scanning the upper triangle
+ # of a n x n matrix of all possible combinations.
+ # Using enumerate and keeping track of indexes, as we can't
+ # modify the list while iterating over it.
+ drops = set() # duplicate matchers to remove
+ for i, m in enumerate(self.matchers[:-1]):
+ if i in drops:
+ continue # we're dropping this anyway, don't search again
+ for i_, m_ in enumerate(self.matchers[(i+1):]):
+ if (mozpath.realpath(m['l10n'].prefix) !=
+ mozpath.realpath(m_['l10n'].prefix)):
+ # ok, not the same thing, continue
+ continue
+ if m['l10n'].pattern != m_['l10n'].pattern:
+ # We cannot guess whether same entry until the pattern is
+ # resolved, continue
+ continue
+ # check that we're comparing the same thing
+ if 'reference' in m:
+ if (mozpath.realpath(m['reference'].prefix) !=
+ mozpath.realpath(m_.get('reference').prefix)):
+ raise RuntimeError('Mismatch in reference for ' +
+ mozpath.realpath(m['l10n'].prefix))
+ drops.add(i_ + i + 1)
+ m['test'] |= m_['test']
+ drops = sorted(drops, reverse=True)
+ for i in drops:
+ del self.matchers[i]
+
+ def __iter__(self):
+ # The iteration is pretty different when we iterate over
+ # a localization vs over the reference. We do that latter
+ # when running in validation mode.
+ inner = self.iter_locale() if self.locale else self.iter_reference()
+ yield from inner
+
+ def iter_locale(self):
+ '''Iterate over locale files.'''
+ known = {}
+ for matchers in self.matchers:
+ matcher = matchers['l10n']
+ for path in self._files(matcher):
+ if path not in known:
+ known[path] = {'test': matchers.get('test')}
+ if 'reference' in matchers:
+ known[path]['reference'] = matcher.sub(
+ matchers['reference'], path)
+ if 'merge' in matchers:
+ known[path]['merge'] = matcher.sub(
+ matchers['merge'], path)
+ if 'reference' not in matchers:
+ continue
+ matcher = matchers['reference']
+ for path in self._files(matcher):
+ l10npath = matcher.sub(matchers['l10n'], path)
+ if l10npath not in known:
+ known[l10npath] = {
+ 'reference': path,
+ 'test': matchers.get('test')
+ }
+ if 'merge' in matchers:
+ known[l10npath]['merge'] = \
+ matcher.sub(matchers['merge'], path)
+ for path, d in sorted(known.items()):
+ yield (path, d.get('reference'), d.get('merge'), d['test'])
+
+ def iter_reference(self):
+ '''Iterate over reference files.'''
+ # unset self.exclude, as we don't want that for our reference files
+ exclude = self.exclude
+ self.exclude = None
+ known = {}
+ for matchers in self.matchers:
+ if 'reference' not in matchers:
+ continue
+ matcher = matchers['reference']
+ for path in self._files(matcher):
+ refpath = matcher.sub(matchers['reference'], path)
+ if refpath not in known:
+ known[refpath] = {
+ 'reference': path,
+ 'test': matchers.get('test')
+ }
+ for path, d in sorted(known.items()):
+ yield (path, d.get('reference'), None, d['test'])
+ self.exclude = exclude
+
+ def _files(self, matcher):
+ '''Base implementation of getting all files in a hierarchy
+ using the file system.
+ Subclasses might replace this method to support different IO
+ patterns.
+ '''
+ base = matcher.prefix
+ if self._isfile(base):
+ if self.exclude and self.exclude.match(base) is not None:
+ return
+ if matcher.match(base) is not None:
+ yield base
+ return
+ for d, dirs, files in self._walk(base):
+ for f in files:
+ p = mozpath.join(d, f)
+ if self.exclude and self.exclude.match(p) is not None:
+ continue
+ if matcher.match(p) is not None:
+ yield p
+
+ def _isfile(self, path):
+ return os.path.isfile(path)
+
+ def _walk(self, base):
+ yield from os.walk(base)
+
+ def match(self, path):
+ '''Return the tuple of l10n_path, reference, mergepath, tests
+ if the given path matches any config, otherwise None.
+
+ This routine doesn't check that the files actually exist.
+ '''
+ if (
+ self.locale is not None and
+ self.exclude and self.exclude.match(path) is not None
+ ):
+ return
+ for matchers in self.matchers:
+ matcher = matchers['l10n']
+ if self.locale is not None and matcher.match(path) is not None:
+ ref = merge = None
+ if 'reference' in matchers:
+ ref = matcher.sub(matchers['reference'], path)
+ if 'merge' in matchers:
+ merge = matcher.sub(matchers['merge'], path)
+ return path, ref, merge, matchers.get('test')
+ if 'reference' not in matchers:
+ continue
+ matcher = matchers['reference']
+ if matcher.match(path) is not None:
+ merge = None
+ l10n = matcher.sub(matchers['l10n'], path)
+ if 'merge' in matchers:
+ merge = matcher.sub(matchers['merge'], path)
+ return l10n, path, merge, matchers.get('test')
diff --git a/third_party/python/compare_locales/compare_locales/paths/ini.py b/third_party/python/compare_locales/compare_locales/paths/ini.py
new file mode 100644
index 0000000000..bde7def0ca
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/paths/ini.py
@@ -0,0 +1,224 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from configparser import ConfigParser, NoSectionError, NoOptionError
+from collections import defaultdict
+from compare_locales import util, mozpath
+from .project import ProjectConfig
+
+
+class L10nConfigParser:
+ '''Helper class to gather application information from ini files.
+
+ This class is working on synchronous open to read files or web data.
+ Subclass this and overwrite loadConfigs and addChild if you need async.
+ '''
+ def __init__(self, inipath, **kwargs):
+ """Constructor for L10nConfigParsers
+
+ inipath -- l10n.ini path
+ Optional keyword arguments are fowarded to the inner ConfigParser as
+ defaults.
+ """
+ self.inipath = mozpath.normpath(inipath)
+ # l10n.ini files can import other l10n.ini files, store the
+ # corresponding L10nConfigParsers
+ self.children = []
+ # we really only care about the l10n directories described in l10n.ini
+ self.dirs = []
+ # optional defaults to be passed to the inner ConfigParser (unused?)
+ self.defaults = kwargs
+
+ def getDepth(self, cp):
+ '''Get the depth for the comparison from the parsed l10n.ini.
+ '''
+ try:
+ depth = cp.get('general', 'depth')
+ except (NoSectionError, NoOptionError):
+ depth = '.'
+ return depth
+
+ def getFilters(self):
+ '''Get the test functions from this ConfigParser and all children.
+
+ Only works with synchronous loads, used by compare-locales, which
+ is local anyway.
+ '''
+ filter_path = mozpath.join(mozpath.dirname(self.inipath), 'filter.py')
+ try:
+ local = {}
+ with open(filter_path) as f:
+ exec(compile(f.read(), filter_path, 'exec'), {}, local)
+ if 'test' in local and callable(local['test']):
+ filters = [local['test']]
+ else:
+ filters = []
+ except BaseException: # we really want to handle EVERYTHING here
+ filters = []
+
+ for c in self.children:
+ filters += c.getFilters()
+
+ return filters
+
+ def loadConfigs(self):
+ """Entry point to load the l10n.ini file this Parser refers to.
+
+ This implementation uses synchronous loads, subclasses might overload
+ this behaviour. If you do, make sure to pass a file-like object
+ to onLoadConfig.
+ """
+ cp = ConfigParser(self.defaults)
+ cp.read(self.inipath)
+ depth = self.getDepth(cp)
+ self.base = mozpath.join(mozpath.dirname(self.inipath), depth)
+ # create child loaders for any other l10n.ini files to be included
+ try:
+ for title, path in cp.items('includes'):
+ # skip default items
+ if title in self.defaults:
+ continue
+ # add child config parser
+ self.addChild(title, path, cp)
+ except NoSectionError:
+ pass
+ # try to load the "dirs" defined in the "compare" section
+ try:
+ self.dirs.extend(cp.get('compare', 'dirs').split())
+ except (NoOptionError, NoSectionError):
+ pass
+ # try to set "all_path" and "all_url"
+ try:
+ self.all_path = mozpath.join(self.base, cp.get('general', 'all'))
+ except (NoOptionError, NoSectionError):
+ self.all_path = None
+ return cp
+
+ def addChild(self, title, path, orig_cp):
+ """Create a child L10nConfigParser and load it.
+
+ title -- indicates the module's name
+ path -- indicates the path to the module's l10n.ini file
+ orig_cp -- the configuration parser of this l10n.ini
+ """
+ cp = L10nConfigParser(mozpath.join(self.base, path), **self.defaults)
+ cp.loadConfigs()
+ self.children.append(cp)
+
+ def dirsIter(self):
+ """Iterate over all dirs and our base path for this l10n.ini"""
+ for dir in self.dirs:
+ yield dir, (self.base, dir)
+
+ def directories(self):
+ """Iterate over all dirs and base paths for this l10n.ini as well
+ as the included ones.
+ """
+ yield from self.dirsIter()
+ for child in self.children:
+ yield from child.directories()
+
+ def allLocales(self):
+ """Return a list of all the locales of this project"""
+ with open(self.all_path) as f:
+ return util.parseLocales(f.read())
+
+
+class SourceTreeConfigParser(L10nConfigParser):
+ '''Subclassing L10nConfigParser to work with just the repos
+ checked out next to each other instead of intermingled like
+ we do for real builds.
+ '''
+
+ def __init__(self, inipath, base, redirects):
+ '''Add additional arguments basepath.
+
+ basepath is used to resolve local paths via branchnames.
+ redirects is used in unified repository, mapping upstream
+ repos to local clones.
+ '''
+ L10nConfigParser.__init__(self, inipath)
+ self.base = base
+ self.redirects = redirects
+
+ def addChild(self, title, path, orig_cp):
+ # check if there's a section with details for this include
+ # we might have to check a different repo, or even VCS
+ # for example, projects like "mail" indicate in
+ # an "include_" section where to find the l10n.ini for "toolkit"
+ details = 'include_' + title
+ if orig_cp.has_section(details):
+ branch = orig_cp.get(details, 'mozilla')
+ branch = self.redirects.get(branch, branch)
+ inipath = orig_cp.get(details, 'l10n.ini')
+ path = mozpath.join(self.base, branch, inipath)
+ else:
+ path = mozpath.join(self.base, path)
+ cp = SourceTreeConfigParser(path, self.base, self.redirects,
+ **self.defaults)
+ cp.loadConfigs()
+ self.children.append(cp)
+
+
+class EnumerateApp:
+ reference = 'en-US'
+
+ def __init__(self, inipath, l10nbase):
+ self.setupConfigParser(inipath)
+ self.modules = defaultdict(dict)
+ self.l10nbase = mozpath.abspath(l10nbase)
+ self.filters = []
+ self.addFilters(*self.config.getFilters())
+
+ def setupConfigParser(self, inipath):
+ self.config = L10nConfigParser(inipath)
+ self.config.loadConfigs()
+
+ def addFilters(self, *args):
+ self.filters += args
+
+ def asConfig(self):
+ # We've already normalized paths in the ini parsing.
+ # Set the path and root to None to just keep our paths as is.
+ config = ProjectConfig(None)
+ config.set_root('.') # sets to None because path is None
+ config.add_environment(l10n_base=self.l10nbase)
+ self._config_for_ini(config, self.config)
+ filters = self.config.getFilters()
+ if filters:
+ config.set_filter_py(filters[0])
+ config.set_locales(self.config.allLocales(), deep=True)
+ return config
+
+ def _config_for_ini(self, projectconfig, aConfig):
+ for k, (basepath, module) in aConfig.dirsIter():
+ paths = {
+ 'module': module,
+ 'reference': mozpath.normpath('%s/%s/locales/en-US/**' %
+ (basepath, module)),
+ 'l10n': mozpath.normpath('{l10n_base}/{locale}/%s/**' %
+ module)
+ }
+ if module == 'mobile/android/base':
+ paths['test'] = ['android-dtd']
+ projectconfig.add_paths(paths)
+ for child in aConfig.children:
+ self._config_for_ini(projectconfig, child)
+
+
+class EnumerateSourceTreeApp(EnumerateApp):
+ '''Subclass EnumerateApp to work on side-by-side checked out
+ repos, and to no pay attention to how the source would actually
+ be checked out for building.
+ '''
+
+ def __init__(self, inipath, basepath, l10nbase, redirects):
+ self.basepath = basepath
+ self.redirects = redirects
+ EnumerateApp.__init__(self, inipath, l10nbase)
+
+ def setupConfigParser(self, inipath):
+ self.config = SourceTreeConfigParser(inipath, self.basepath,
+ self.redirects)
+ self.config.loadConfigs()
diff --git a/third_party/python/compare_locales/compare_locales/paths/matcher.py b/third_party/python/compare_locales/compare_locales/paths/matcher.py
new file mode 100644
index 0000000000..82de936107
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/paths/matcher.py
@@ -0,0 +1,470 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+import re
+import itertools
+from compare_locales import mozpath
+
+
+# Android uses non-standard locale codes, these are the mappings
+# back and forth
+ANDROID_LEGACY_MAP = {
+ 'he': 'iw',
+ 'id': 'in',
+ 'yi': 'ji'
+}
+ANDROID_STANDARD_MAP = {
+ legacy: standard
+ for standard, legacy in ANDROID_LEGACY_MAP.items()
+}
+
+
+class Matcher:
+ '''Path pattern matcher
+ Supports path matching similar to mozpath.match(), but does
+ not match trailing file paths without trailing wildcards.
+ Also gets a prefix, which is the path before the first wildcard,
+ which is good for filesystem iterations, and allows to replace
+ the own matches in a path on a different Matcher. compare-locales
+ uses that to transform l10n and en-US paths back and forth.
+ '''
+
+ def __init__(self, pattern_or_other, env={}, root=None, encoding=None):
+ '''Create regular expression similar to mozpath.match().
+ '''
+ parser = PatternParser()
+ real_env = {k: parser.parse(v) for k, v in env.items()}
+ self._cached_re = None
+ if root is not None:
+ # make sure that our root is fully expanded and ends with /
+ root = mozpath.abspath(root) + '/'
+ # allow constructing Matchers from Matchers
+ if isinstance(pattern_or_other, Matcher):
+ other = pattern_or_other
+ self.pattern = Pattern(other.pattern)
+ self.env = other.env.copy()
+ self.env.update(real_env)
+ if root is not None:
+ self.pattern.root = root
+ self.encoding = other.encoding
+ return
+ self.env = real_env
+ pattern = pattern_or_other
+ self.pattern = parser.parse(pattern)
+ if root is not None:
+ self.pattern.root = root
+ self.encoding = encoding
+
+ def with_env(self, environ):
+ return Matcher(self, environ)
+
+ @property
+ def prefix(self):
+ subpattern = Pattern(self.pattern[:self.pattern.prefix_length])
+ subpattern.root = self.pattern.root
+ prefix = subpattern.expand(self.env)
+ if self.encoding is not None:
+ prefix = prefix.encode(self.encoding)
+ return prefix
+
+ def match(self, path):
+ '''Test the given path against this matcher and its environment.
+
+ Return None if there's no match, and the dictionary of matched
+ variables in this matcher if there's a match.
+ '''
+ self._cache_regex()
+ m = self._cached_re.match(path)
+ if m is None:
+ return None
+ d = m.groupdict()
+ if self.encoding is not None:
+ d = {key: value.decode(self.encoding) for key, value in d.items()}
+ if 'android_locale' in d and 'locale' not in d:
+ # map android_locale to locale code
+ locale = d['android_locale']
+ # map legacy locale codes, he <-> iw, id <-> in, yi <-> ji
+ locale = re.sub(
+ r'(iw|in|ji)(?=\Z|-)',
+ lambda legacy: ANDROID_STANDARD_MAP[legacy.group(1)],
+ locale
+ )
+ locale = re.sub(r'-r([A-Z]{2})', r'-\1', locale)
+ locale = locale.replace('b+', '').replace('+', '-')
+ d['locale'] = locale
+ return d
+
+ def _cache_regex(self):
+ if self._cached_re is not None:
+ return
+ pattern = self.pattern.regex_pattern(self.env) + '$'
+ if self.encoding is not None:
+ pattern = pattern.encode(self.encoding)
+ self._cached_re = re.compile(pattern)
+
+ def sub(self, other, path):
+ '''
+ Replace the wildcard matches in this pattern into the
+ pattern of the other Match object.
+ '''
+ m = self.match(path)
+ if m is None:
+ return None
+ env = {}
+ env.update(
+ (key, Literal(value if value is not None else ''))
+ for key, value in m.items()
+ )
+ env.update(other.env)
+ path = other.pattern.expand(env)
+ if self.encoding is not None:
+ path = path.encode(self.encoding)
+ return path
+
+ def concat(self, other):
+ '''Concat two Matcher objects.
+
+ The intent is to create one Matcher with variable substitutions that
+ behaves as if you joined the resulting paths.
+ This doesn't do path separator logic, though, and it won't resolve
+ parent directories.
+ '''
+ if not isinstance(other, Matcher):
+ other_matcher = Matcher(other)
+ else:
+ other_matcher = other
+ other_pattern = other_matcher.pattern
+ if other_pattern.root is not None:
+ raise ValueError('Other matcher must not be rooted')
+ result = Matcher(self)
+ result.pattern += other_pattern
+ if self.pattern.prefix_length == len(self.pattern):
+ result.pattern.prefix_length += other_pattern.prefix_length
+ result.env.update(other_matcher.env)
+ return result
+
+ def __str__(self):
+ return self.pattern.expand(self.env)
+
+ def __repr__(self):
+ return '{}({!r}, env={!r}, root={!r})'.format(
+ type(self).__name__, self.pattern, self.env, self.pattern.root
+ )
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __eq__(self, other):
+ '''Equality for Matcher.
+
+ The equality for Matchers is defined to have the same pattern,
+ and no conflicting environment. Additional environment settings
+ in self or other are OK.
+ '''
+ if other.__class__ is not self.__class__:
+ return NotImplemented
+ if self.pattern != other.pattern:
+ return False
+ if self.env and other.env:
+ for k in self.env:
+ if k not in other.env:
+ continue
+ if self.env[k] != other.env[k]:
+ return False
+ if self.encoding != other.encoding:
+ return False
+ return True
+
+
+def expand(root, path, env):
+ '''Expand a given path relative to the given root,
+ using the given env to resolve variables.
+
+ This will break if the path contains wildcards.
+ '''
+ matcher = Matcher(path, env=env, root=root)
+ return str(matcher)
+
+
+class MissingEnvironment(Exception):
+ pass
+
+
+class Node:
+ '''Abstract base class for all nodes in parsed patterns.'''
+ def regex_pattern(self, env):
+ '''Create a regular expression fragment for this Node.'''
+ raise NotImplementedError
+
+ def expand(self, env):
+ '''Convert this node to a string with the given environment.'''
+ raise NotImplementedError
+
+
+class Pattern(list, Node):
+ def __init__(self, iterable=[]):
+ list.__init__(self, iterable)
+ self.root = getattr(iterable, 'root', None)
+ self.prefix_length = getattr(iterable, 'prefix_length', None)
+
+ def regex_pattern(self, env):
+ root = ''
+ if self.root is not None:
+ # make sure we're not hiding a full path
+ first_seg = self[0].expand(env)
+ if not os.path.isabs(first_seg):
+ root = re.escape(self.root)
+ return root + ''.join(
+ child.regex_pattern(env) for child in self
+ )
+
+ def expand(self, env, raise_missing=False):
+ root = ''
+ if self.root is not None:
+ # make sure we're not hiding a full path
+ first_seg = self[0].expand(env)
+ if not os.path.isabs(first_seg):
+ root = self.root
+ return root + ''.join(self._expand_children(env, raise_missing))
+
+ def _expand_children(self, env, raise_missing):
+ # Helper iterator to convert Exception to a stopped iterator
+ for child in self:
+ try:
+ yield child.expand(env, raise_missing=True)
+ except MissingEnvironment:
+ if raise_missing:
+ raise
+ return
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __eq__(self, other):
+ if not super().__eq__(other):
+ return False
+ if other.__class__ == list:
+ # good for tests and debugging
+ return True
+ return (
+ self.root == other.root
+ and self.prefix_length == other.prefix_length
+ )
+
+
+class Literal(str, Node):
+ def regex_pattern(self, env):
+ return re.escape(self)
+
+ def expand(self, env, raise_missing=False):
+ return self
+
+
+class Variable(Node):
+ def __init__(self, name, repeat=False):
+ self.name = name
+ self.repeat = repeat
+
+ def regex_pattern(self, env):
+ if self.repeat:
+ return f'(?P={self.name})'
+ return f'(?P<{self.name}>{self._pattern_from_env(env)})'
+
+ def _pattern_from_env(self, env):
+ if self.name in env:
+ # make sure we match the value in the environment
+ return env[self.name].regex_pattern(self._no_cycle(env))
+ # match anything, including path segments
+ return '.+?'
+
+ def expand(self, env, raise_missing=False):
+ '''Create a string for this Variable.
+
+ This expansion happens recursively. We avoid recusion loops
+ by removing the current variable from the environment that's used
+ to expand child variable references.
+ '''
+ if self.name not in env:
+ raise MissingEnvironment
+ return env[self.name].expand(
+ self._no_cycle(env), raise_missing=raise_missing
+ )
+
+ def _no_cycle(self, env):
+ '''Remove our variable name from the environment.
+ That way, we can't create cyclic references.
+ '''
+ if self.name not in env:
+ return env
+ env = env.copy()
+ env.pop(self.name)
+ return env
+
+ def __repr__(self):
+ return f'Variable(name="{self.name}")'
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __eq__(self, other):
+ if other.__class__ is not self.__class__:
+ return False
+ return (
+ self.name == other.name
+ and self.repeat == other.repeat
+ )
+
+
+class AndroidLocale(Variable):
+ '''Subclass for Android locale code mangling.
+
+ Supports ab-rCD and b+ab+Scrip+DE.
+ Language and Language-Region tags get mapped to ab-rCD, more complex
+ Locale tags to b+.
+ '''
+ def __init__(self, repeat=False):
+ self.name = 'android_locale'
+ self.repeat = repeat
+
+ def _pattern_from_env(self, env):
+ android_locale = self._get_android_locale(env)
+ if android_locale is not None:
+ return re.escape(android_locale)
+ return '.+?'
+
+ def expand(self, env, raise_missing=False):
+ '''Create a string for this Variable.
+
+ This expansion happens recursively. We avoid recusion loops
+ by removing the current variable from the environment that's used
+ to expand child variable references.
+ '''
+ android_locale = self._get_android_locale(env)
+ if android_locale is None:
+ raise MissingEnvironment
+ return android_locale
+
+ def _get_android_locale(self, env):
+ if 'locale' not in env:
+ return None
+ android = bcp47 = env['locale'].expand(self._no_cycle(env))
+ # map legacy locale codes, he <-> iw, id <-> in, yi <-> ji
+ android = bcp47 = re.sub(
+ r'(he|id|yi)(?=\Z|-)',
+ lambda standard: ANDROID_LEGACY_MAP[standard.group(1)],
+ bcp47
+ )
+ if re.match(r'[a-z]{2,3}-[A-Z]{2}', bcp47):
+ android = '{}-r{}'.format(*bcp47.split('-'))
+ elif '-' in bcp47:
+ android = 'b+' + bcp47.replace('-', '+')
+ return android
+
+
+class Star(Node):
+ def __init__(self, number):
+ self.number = number
+
+ def regex_pattern(self, env):
+ return f'(?P<s{self.number}>[^/]*)'
+
+ def expand(self, env, raise_missing=False):
+ return env['s%d' % self.number]
+
+ def __repr__(self):
+ return type(self).__name__
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __eq__(self, other):
+ if other.__class__ is not self.__class__:
+ return False
+ return self.number == other.number
+
+
+class Starstar(Star):
+ def __init__(self, number, suffix):
+ self.number = number
+ self.suffix = suffix
+
+ def regex_pattern(self, env):
+ return f'(?P<s{self.number}>.+{self.suffix})?'
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __eq__(self, other):
+ if not super().__eq__(other):
+ return False
+ return self.suffix == other.suffix
+
+
+PATH_SPECIAL = re.compile(
+ r'(?P<starstar>(?<![^/}])\*\*(?P<suffix>/|$))'
+ r'|'
+ r'(?P<star>\*)'
+ r'|'
+ r'(?P<variable>{ *(?P<varname>[\w]+) *})'
+)
+
+
+class PatternParser:
+ def __init__(self):
+ # Not really initializing anything, just making room for our
+ # result and state members.
+ self.pattern = None
+ self._stargroup = self._cursor = None
+ self._known_vars = None
+
+ def parse(self, pattern):
+ if isinstance(pattern, Pattern):
+ return pattern
+ if isinstance(pattern, Matcher):
+ return pattern.pattern
+ # Initializing result and state
+ self.pattern = Pattern()
+ self._stargroup = itertools.count(1)
+ self._known_vars = set()
+ self._cursor = 0
+ for match in PATH_SPECIAL.finditer(pattern):
+ if match.start() > self._cursor:
+ self.pattern.append(
+ Literal(pattern[self._cursor:match.start()])
+ )
+ self.handle(match)
+ self.pattern.append(Literal(pattern[self._cursor:]))
+ if self.pattern.prefix_length is None:
+ self.pattern.prefix_length = len(self.pattern)
+ return self.pattern
+
+ def handle(self, match):
+ if match.group('variable'):
+ self.variable(match)
+ else:
+ self.wildcard(match)
+ self._cursor = match.end()
+
+ def variable(self, match):
+ varname = match.group('varname')
+ # Special case Android locale code matching.
+ # It's kinda sad, but true.
+ if varname == 'android_locale':
+ self.pattern.append(AndroidLocale(varname in self._known_vars))
+ else:
+ self.pattern.append(Variable(varname, varname in self._known_vars))
+ self._known_vars.add(varname)
+
+ def wildcard(self, match):
+ # wildcard found, stop prefix
+ if self.pattern.prefix_length is None:
+ self.pattern.prefix_length = len(self.pattern)
+ wildcard = next(self._stargroup)
+ if match.group('star'):
+ # *
+ self.pattern.append(Star(wildcard))
+ else:
+ # **
+ self.pattern.append(Starstar(wildcard, match.group('suffix')))
diff --git a/third_party/python/compare_locales/compare_locales/paths/project.py b/third_party/python/compare_locales/compare_locales/paths/project.py
new file mode 100644
index 0000000000..1f18a9d2d5
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/paths/project.py
@@ -0,0 +1,260 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+from compare_locales import mozpath
+from .matcher import Matcher
+
+
+class ExcludeError(ValueError):
+ pass
+
+
+class ProjectConfig:
+ '''Abstraction of l10n project configuration data.
+ '''
+
+ def __init__(self, path):
+ self.filter_py = None # legacy filter code
+ # {
+ # 'l10n': pattern,
+ # 'reference': pattern, # optional
+ # 'locales': [], # optional
+ # 'test': [], # optional
+ # }
+ self.path = path
+ self.root = None
+ self.paths = []
+ self.rules = []
+ self.locales = None
+ # cache for all_locales, as that's not in `filter`
+ self._all_locales = None
+ self.environ = {}
+ self.children = []
+ self.excludes = []
+ self._cache = None
+
+ def same(self, other):
+ '''Equality test, ignoring locales.
+ '''
+ if other.__class__ is not self.__class__:
+ return False
+ if len(self.children) != len(other.children):
+ return False
+ for prop in ('path', 'root', 'paths', 'rules', 'environ'):
+ if getattr(self, prop) != getattr(other, prop):
+ return False
+ for this_child, other_child in zip(self.children, other.children):
+ if not this_child.same(other_child):
+ return False
+ return True
+
+ def set_root(self, basepath):
+ if self.path is None:
+ self.root = None
+ return
+ self.root = mozpath.abspath(
+ mozpath.join(mozpath.dirname(self.path), basepath)
+ )
+
+ def add_environment(self, **kwargs):
+ self.environ.update(kwargs)
+
+ def add_paths(self, *paths):
+ '''Add path dictionaries to this config.
+ The dictionaries must have a `l10n` key. For monolingual files,
+ `reference` is also required.
+ An optional key `test` is allowed to enable additional tests for this
+ path pattern.
+ '''
+ self._all_locales = None # clear cache
+ for d in paths:
+ rv = {
+ 'l10n': Matcher(d['l10n'], env=self.environ, root=self.root),
+ 'module': d.get('module')
+ }
+ if 'reference' in d:
+ rv['reference'] = Matcher(
+ d['reference'], env=self.environ, root=self.root
+ )
+ if 'test' in d:
+ rv['test'] = d['test']
+ if 'locales' in d:
+ rv['locales'] = d['locales'][:]
+ self.paths.append(rv)
+
+ def set_filter_py(self, filter_function):
+ '''Set legacy filter.py code.
+ Assert that no rules are set.
+ Also, normalize output already here.
+ '''
+ assert not self.rules
+
+ def filter_(module, path, entity=None):
+ try:
+ rv = filter_function(module, path, entity=entity)
+ except BaseException: # we really want to handle EVERYTHING here
+ return 'error'
+ rv = {
+ True: 'error',
+ False: 'ignore',
+ 'report': 'warning'
+ }.get(rv, rv)
+ assert rv in ('error', 'ignore', 'warning', None)
+ return rv
+ self.filter_py = filter_
+
+ def add_rules(self, *rules):
+ '''Add rules to filter on.
+ Assert that there's no legacy filter.py code hooked up.
+ '''
+ assert self.filter_py is None
+ for rule in rules:
+ self.rules.extend(self._compile_rule(rule))
+
+ def add_child(self, child):
+ self._all_locales = None # clear cache
+ if child.excludes:
+ raise ExcludeError(
+ 'Included configs cannot declare their own excludes.'
+ )
+ self.children.append(child)
+
+ def exclude(self, child):
+ for config in child.configs:
+ if config.excludes:
+ raise ExcludeError(
+ 'Excluded configs cannot declare their own excludes.'
+ )
+ self.excludes.append(child)
+
+ def set_locales(self, locales, deep=False):
+ self._all_locales = None # clear cache
+ self.locales = locales
+ if not deep:
+ return
+ for child in self.children:
+ child.set_locales(locales, deep=deep)
+
+ @property
+ def configs(self):
+ 'Recursively get all configs in this project and its children'
+ yield self
+ for child in self.children:
+ yield from child.configs
+
+ @property
+ def all_locales(self):
+ 'Recursively get all locales in this project and its paths'
+ if self._all_locales is None:
+ all_locales = set()
+ for config in self.configs:
+ if config.locales is not None:
+ all_locales.update(config.locales)
+ for paths in config.paths:
+ if 'locales' in paths:
+ all_locales.update(paths['locales'])
+ self._all_locales = sorted(all_locales)
+ return self._all_locales
+
+ def filter(self, l10n_file, entity=None):
+ '''Filter a localization file or entities within, according to
+ this configuration file.'''
+ if l10n_file.locale not in self.all_locales:
+ return 'ignore'
+ if self.filter_py is not None:
+ return self.filter_py(l10n_file.module, l10n_file.file,
+ entity=entity)
+ rv = self._filter(l10n_file, entity=entity)
+ if rv is None:
+ return 'ignore'
+ return rv
+
+ class FilterCache:
+ def __init__(self, locale):
+ self.locale = locale
+ self.rules = []
+ self.l10n_paths = []
+
+ def cache(self, locale):
+ if self._cache and self._cache.locale == locale:
+ return self._cache
+ self._cache = self.FilterCache(locale)
+ for paths in self.paths:
+ if 'locales' in paths and locale not in paths['locales']:
+ continue
+ self._cache.l10n_paths.append(paths['l10n'].with_env({
+ "locale": locale
+ }))
+ for rule in self.rules:
+ cached_rule = rule.copy()
+ cached_rule['path'] = rule['path'].with_env({
+ "locale": locale
+ })
+ self._cache.rules.append(cached_rule)
+ return self._cache
+
+ def _filter(self, l10n_file, entity=None):
+ if any(
+ exclude.filter(l10n_file) == 'error'
+ for exclude in self.excludes
+ ):
+ return
+ actions = {
+ child._filter(l10n_file, entity=entity)
+ for child in self.children}
+ if 'error' in actions:
+ # return early if we know we'll error
+ return 'error'
+
+ cached = self.cache(l10n_file.locale)
+ if any(p.match(l10n_file.fullpath) for p in cached.l10n_paths):
+ action = 'error'
+ for rule in reversed(cached.rules):
+ if not rule['path'].match(l10n_file.fullpath):
+ continue
+ if ('key' in rule) ^ (entity is not None):
+ # key/file mismatch, not a matching rule
+ continue
+ if 'key' in rule and not rule['key'].match(entity):
+ continue
+ action = rule['action']
+ break
+ actions.add(action)
+ if 'error' in actions:
+ return 'error'
+ if 'warning' in actions:
+ return 'warning'
+ if 'ignore' in actions:
+ return 'ignore'
+
+ def _compile_rule(self, rule):
+ assert 'path' in rule
+ if isinstance(rule['path'], list):
+ for path in rule['path']:
+ _rule = rule.copy()
+ _rule['path'] = Matcher(path, env=self.environ, root=self.root)
+ yield from self._compile_rule(_rule)
+ return
+ if isinstance(rule['path'], str):
+ rule['path'] = Matcher(
+ rule['path'], env=self.environ, root=self.root
+ )
+ if 'key' not in rule:
+ yield rule
+ return
+ if not isinstance(rule['key'], str):
+ for key in rule['key']:
+ _rule = rule.copy()
+ _rule['key'] = key
+ yield from self._compile_rule(_rule)
+ return
+ rule = rule.copy()
+ key = rule['key']
+ if key.startswith('re:'):
+ key = key[3:]
+ else:
+ key = re.escape(key) + '$'
+ rule['key'] = re.compile(key)
+ yield rule
diff --git a/third_party/python/compare_locales/compare_locales/plurals.py b/third_party/python/compare_locales/compare_locales/plurals.py
new file mode 100644
index 0000000000..b04006b14f
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/plurals.py
@@ -0,0 +1,221 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'Mapping of locales to CLDR plural categories as implemented by PluralForm.jsm'
+
+CATEGORIES_BY_INDEX = (
+ # 0 (Chinese)
+ ('other',),
+ # 1 (English)
+ ('one', 'other'),
+ # 2 (French)
+ ('one', 'other'),
+ # 3 (Latvian)
+ ('zero', 'one', 'other'),
+ # 4 (Scottish Gaelic)
+ ('one', 'two', 'few', 'other'),
+ # 5 (Romanian)
+ ('one', 'few', 'other'),
+ # 6 (Lithuanian)
+ # CLDR: one, few, many (fractions), other
+ ('one', 'other', 'few'),
+ # 7 (Russian)
+ # CLDR: one, few, many, other (fractions)
+ ('one', 'few', 'many'),
+ # 8 (Slovak)
+ # CLDR: one, few, many (fractions), other
+ ('one', 'few', 'other'),
+ # 9 (Polish)
+ # CLDR: one, few, many, other (fractions)
+ ('one', 'few', 'many'),
+ # 10 (Slovenian)
+ ('one', 'two', 'few', 'other'),
+ # 11 (Irish Gaelic)
+ ('one', 'two', 'few', 'many', 'other'),
+ # 12 (Arabic)
+ # CLDR: zero, one, two, few, many, other
+ ('one', 'two', 'few', 'many', 'other', 'zero'),
+ # 13 (Maltese)
+ ('one', 'few', 'many', 'other'),
+ # 14 (Unused)
+ # CLDR: one, other
+ ('one', 'two', 'other'),
+ # 15 (Icelandic, Macedonian)
+ ('one', 'other'),
+ # 16 (Breton)
+ ('one', 'two', 'few', 'many', 'other'),
+ # 17 (Shuar)
+ # CLDR: (missing)
+ ('zero', 'other'),
+ # 18 (Welsh),
+ ('zero', 'one', 'two', 'few', 'many', 'other'),
+ # 19 (Bosnian, Croatian, Serbian)
+ ('one', 'few', 'other'),
+)
+
+CATEGORIES_EXCEPTIONS = {
+}
+
+CATEGORIES_BY_LOCALE = {
+ 'ace': 0,
+ 'ach': 1,
+ 'af': 1,
+ 'ak': 2,
+ 'an': 1,
+ 'ar': 12,
+ 'arn': 1,
+ 'as': 1,
+ 'ast': 1,
+ 'az': 1,
+ 'be': 7,
+ 'bg': 1,
+ 'bn': 2,
+ 'bo': 0,
+ 'br': 16,
+ 'brx': 1,
+ 'bs': 19,
+ 'ca': 1,
+ 'cak': 1,
+ 'ckb': 1,
+ 'crh': 1,
+ 'cs': 8,
+ 'csb': 9,
+ 'cv': 1,
+ 'cy': 18,
+ 'da': 1,
+ 'de': 1,
+ 'dsb': 10,
+ 'el': 1,
+ 'en': 1,
+ 'eo': 1,
+ 'es': 1,
+ 'et': 1,
+ 'eu': 1,
+ 'fa': 2,
+ 'ff': 1,
+ 'fi': 1,
+ 'fr': 2,
+ 'frp': 2,
+ 'fur': 1,
+ 'fy': 1,
+ 'ga': 11,
+ 'gd': 4,
+ 'gl': 1,
+ 'gn': 1,
+ 'gu': 2,
+ 'he': 1,
+ 'hi': 2,
+ 'hr': 19,
+ 'hsb': 10,
+ 'hto': 1,
+ 'hu': 1,
+ 'hy': 1,
+ 'hye': 1,
+ 'ia': 1,
+ 'id': 0,
+ 'ilo': 0,
+ 'is': 15,
+ 'it': 1,
+ 'ja': 0,
+ 'jiv': 17,
+ 'ka': 1,
+ 'kab': 1,
+ 'kk': 1,
+ 'km': 0,
+ 'kn': 1,
+ 'ko': 0,
+ 'ks': 1,
+ 'ku': 1,
+ 'lb': 1,
+ 'lg': 1,
+ 'lij': 1,
+ 'lo': 0,
+ 'lt': 6,
+ 'ltg': 3,
+ 'lv': 3,
+ 'lus': 0,
+ 'mai': 1,
+ 'meh': 0,
+ 'mix': 0,
+ 'mk': 15,
+ 'ml': 1,
+ 'mn': 1,
+ 'mr': 1,
+ 'ms': 0,
+ 'my': 0,
+ 'nb': 1,
+ 'ne': 1,
+ 'nl': 1,
+ 'nn': 1,
+ 'nr': 1,
+ 'nso': 2,
+ 'ny': 1,
+ 'oc': 2,
+ 'or': 1,
+ 'pa': 2,
+ 'pai': 0,
+ 'pl': 9,
+ 'pt': 1,
+ 'quy': 1,
+ 'qvi': 1,
+ 'rm': 1,
+ 'ro': 5,
+ 'ru': 7,
+ 'rw': 1,
+ 'sah': 0,
+ 'sat': 1,
+ 'sc': 1,
+ 'scn': 1,
+ 'sco': 1,
+ 'si': 1,
+ 'sk': 8,
+ 'skr': 1,
+ 'sl': 10,
+ 'son': 1,
+ 'sq': 1,
+ 'sr': 19,
+ 'ss': 1,
+ 'st': 1,
+ 'sv': 1,
+ 'sw': 1,
+ 'szl': 9,
+ 'ta': 1,
+ 'ta': 1,
+ 'te': 1,
+ 'tg': 1,
+ 'th': 0,
+ 'tl': 1,
+ 'tn': 1,
+ 'tr': 1,
+ 'trs': 1,
+ 'ts': 1,
+ 'tsz': 1,
+ 'uk': 7,
+ 'ur': 1,
+ 'uz': 1,
+ 've': 1,
+ 'vi': 0,
+ 'wo': 0,
+ 'xh': 1,
+ 'zam': 1,
+ 'zh-CN': 0,
+ 'zh-TW': 0,
+ 'zu': 2,
+}
+
+
+def get_plural(locale):
+ plural_form = get_plural_rule(locale)
+ if plural_form is None:
+ return None
+ return CATEGORIES_BY_INDEX[plural_form]
+
+
+def get_plural_rule(locale):
+ if locale is None:
+ return None
+ if locale in CATEGORIES_BY_LOCALE:
+ return CATEGORIES_BY_LOCALE[locale]
+ locale = locale.split('-', 1)[0]
+ return CATEGORIES_BY_LOCALE.get(locale)
diff --git a/third_party/python/compare_locales/compare_locales/serializer.py b/third_party/python/compare_locales/compare_locales/serializer.py
new file mode 100644
index 0000000000..826fb29693
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/serializer.py
@@ -0,0 +1,137 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+'''Serialize string changes.
+
+The serialization logic is based on the cross-channel merge algorithm.
+It's taking the file structure for the first file, and localizable entries
+from the last.
+Input data is the parsed reference as a list of parser.walk(),
+the existing localized file, also a list of parser.walk(), and a dictionary
+of newly added keys and raw values.
+To remove a string from a localization, pass `None` as value for a key.
+
+The marshalling between raw values and entities is done via Entity.unwrap
+and Entity.wrap.
+
+To avoid adding English reference strings into the generated file, the
+actual entities in the reference are replaced with Placeholders, which
+are removed in a final pass over the result of merge_resources. After that,
+we also prune whitespace once more.`
+'''
+
+from codecs import encode
+from functools import reduce
+
+from compare_locales.merge import merge_resources, serialize_legacy_resource
+from compare_locales.parser import getParser
+from compare_locales.parser.base import (
+ Entity,
+ PlaceholderEntity,
+ Junk,
+ Whitespace,
+)
+
+
+class SerializationNotSupportedError(ValueError):
+ pass
+
+
+def serialize(filename, reference, old_l10n, new_data):
+ '''Returns a byte string of the serialized content to use.
+
+ Input are a filename to create the right parser, a reference and
+ an existing localization, both as the result of parser.walk().
+ Finally, new_data is a dictionary of key to raw values to serialize.
+
+ Raises a SerializationNotSupportedError if we don't support the file
+ format.
+ '''
+ try:
+ parser = getParser(filename)
+ except UserWarning:
+ raise SerializationNotSupportedError(
+ f'Unsupported file format ({filename}).')
+ # create template, whitespace and all
+ placeholders = [
+ placeholder(entry)
+ for entry in reference
+ if not isinstance(entry, Junk)
+ ]
+ ref_mapping = {
+ entry.key: entry
+ for entry in reference
+ if isinstance(entry, Entity)
+ }
+ # strip obsolete strings
+ old_l10n = sanitize_old(ref_mapping.keys(), old_l10n, new_data)
+ # create new Entities
+ # .val can just be "", merge_channels doesn't need that
+ new_l10n = []
+ for key, new_raw_val in new_data.items():
+ if new_raw_val is None or key not in ref_mapping:
+ continue
+ ref_ent = ref_mapping[key]
+ new_l10n.append(ref_ent.wrap(new_raw_val))
+
+ merged = merge_resources(
+ parser,
+ [placeholders, old_l10n, new_l10n],
+ keep_newest=False
+ )
+ pruned = prune_placeholders(merged)
+ return encode(serialize_legacy_resource(pruned), parser.encoding)
+
+
+def sanitize_old(known_keys, old_l10n, new_data):
+ """Strip Junk and replace obsolete messages with placeholders.
+ If new_data has `None` as a value, strip the existing translation.
+ Use placeholders generously, so that we can rely on `prune_placeholders`
+ to find their associated comments and remove them, too.
+ """
+
+ def should_placeholder(entry):
+ # If entry is an Entity, check if it's obsolete
+ # or marked to be removed.
+ if not isinstance(entry, Entity):
+ return False
+ if entry.key not in known_keys:
+ return True
+ return entry.key in new_data and new_data[entry.key] is None
+
+ return [
+ placeholder(entry)
+ if should_placeholder(entry)
+ else entry
+ for entry in old_l10n
+ if not isinstance(entry, Junk)
+ ]
+
+
+def placeholder(entry):
+ if isinstance(entry, Entity):
+ return PlaceholderEntity(entry.key)
+ return entry
+
+
+def prune_placeholders(entries):
+ pruned = [
+ entry for entry in entries
+ if not isinstance(entry, PlaceholderEntity)
+ ]
+
+ def prune_whitespace(acc, entity):
+ if len(acc) and isinstance(entity, Whitespace):
+ prev_entity = acc[-1]
+
+ if isinstance(prev_entity, Whitespace):
+ # Prefer the longer whitespace.
+ if len(entity.all) > len(prev_entity.all):
+ acc[-1] = entity
+ return acc
+
+ acc.append(entity)
+ return acc
+
+ return reduce(prune_whitespace, pruned, [])
diff --git a/third_party/python/compare_locales/compare_locales/util.py b/third_party/python/compare_locales/compare_locales/util.py
new file mode 100644
index 0000000000..71eadd8749
--- /dev/null
+++ b/third_party/python/compare_locales/compare_locales/util.py
@@ -0,0 +1,11 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# This file is shared between compare-locales and locale-inspector
+# test_util is in compare-locales only, for the sake of easy
+# development.
+
+
+def parseLocales(content):
+ return sorted(l.split()[0] for l in content.splitlines() if l)
diff --git a/third_party/python/cookies/cookies-2.2.1.dist-info/DESCRIPTION.rst b/third_party/python/cookies/cookies-2.2.1.dist-info/DESCRIPTION.rst
new file mode 100644
index 0000000000..6c04d8b1d0
--- /dev/null
+++ b/third_party/python/cookies/cookies-2.2.1.dist-info/DESCRIPTION.rst
@@ -0,0 +1,90 @@
+What is this and what is it for?
+--------------------------------
+
+cookies.py is a Python module for working with HTTP cookies: parsing and
+rendering 'Cookie:' request headers and 'Set-Cookie:' response headers,
+and exposing a convenient API for creating and modifying cookies. It can be
+used as a replacement of Python's Cookie.py (aka http.cookies).
+
+Features
+--------
+
+* Rendering according to the excellent new RFC 6265
+ (rather than using a unique ad hoc format inconsistently relating to
+ unrealistic, very old RFCs which everyone ignored). Uses URL encoding to
+ represent non-ASCII by default, like many other languages' libraries
+* Liberal parsing, incorporating many complaints about Cookie.py barfing
+ on common cookie formats which can be reliably parsed (e.g. search 'cookie'
+ on the Python issue tracker)
+* Well-documented code, with chapter and verse from RFCs
+ (rather than arbitrary, undocumented decisions and huge tables of magic
+ values, as you see in Cookie.py).
+* Test coverage at 100%, with a much more comprehensive test suite
+ than Cookie.py
+* Single-source compatible with the following Python versions:
+ 2.6, 2.7, 3.2, 3.3 and PyPy (2.7).
+* Cleaner, less surprising API::
+
+ # old Cookie.py - this code is all directly from its docstring
+ >>> from Cookie import SmartCookie
+ >>> C = SmartCookie()
+ >>> # n.b. it's "smart" because it automatically pickles Python objects,
+ >>> # which is actually quite stupid for security reasons!
+ >>> C["rocky"] = "road"
+ >>> C["rocky"]["path"] = "/cookie"
+ >>> # So C["rocky"] is a string, except when it's a dict...
+ >>> # and why do I have to write [""] to access a fixed set of attrs?
+ >>> # Look at the atrocious way I render out a request header:
+ >>> C.output(attrs=[], header="Cookie:")
+ 'Cookie: rocky=road'
+
+ # new cookies.py
+ >>> from cookies import Cookies, Cookie
+ >>> cookies = Cookies(rocky='road')
+ >>> # Can also write explicitly: cookies['rocky'] = Cookie['road']
+ >>> cookies['rocky'].path = "/cookie"
+ >>> cookies.render_request()
+ 'rocky=road'
+* Friendly to customization, extension, and reuse of its parts.
+ Unlike Cookie.py, it doesn't lock all implementation inside its own classes
+ (forcing you to write ugly wrappers as Django, Trac, Werkzeug/Flask, web.py
+ and Tornado had to do). You can suppress minor parse exceptions with
+ parameters rather than subclass wrappers. You can plug in your own parsers,
+ renderers and validators for new or existing cookie attributes. You can
+ render the data out in a dict. You can easily use the underlying imperative
+ API or even lift the parser's regexps for your own parser or project. They
+ are very well documented and relate directly to RFCs, so you know exactly
+ what you are getting and why. It's MIT-licensed so do
+ what you want (but I'd love to know what use you are getting from it!)
+* One file, so you can just drop cookies.py into your project if you like
+* MIT license, so you can use it in whatever you want with no strings
+
+Things this is not meant to do
+------------------------------
+While this is intended to be a good module for handling cookies, it does not
+even try to do any of the following:
+
+* Maintain backward compatibility with Cookie.py, which would mean
+ inheriting its confusions and bugs
+* Implement RFCs 2109 or 2965, which have always been ignored by almost
+ everyone and are now obsolete as well
+* Handle every conceivable output from terrible legacy apps, which is not
+ possible to do without lots of silent data loss and corruption (the
+ parser does try to be liberal as possible otherwise, though)
+* Provide a means to store pickled Python objects in cookie values
+ (that's a big security hole)
+
+This doesn't compete with the cookielib (http.cookiejar) module in the Python
+standard library, which is specifically for implementing cookie storage and
+similar behavior in an HTTP client such as a browser. Things cookielib does
+that this doesn't:
+
+* Write to or read from browsers' cookie stores or other proprietary
+ formats for storing cookie data in files
+* Handle the browser/client logic like deciding which cookies to send or
+ discard, etc.
+
+If you are looking for a cookie library but neither this one nor cookielib
+will help, you might also consider the implementations in WebOb or Bottle.
+
+
diff --git a/third_party/python/cookies/cookies-2.2.1.dist-info/METADATA b/third_party/python/cookies/cookies-2.2.1.dist-info/METADATA
new file mode 100644
index 0000000000..b523ed38a1
--- /dev/null
+++ b/third_party/python/cookies/cookies-2.2.1.dist-info/METADATA
@@ -0,0 +1,111 @@
+Metadata-Version: 2.0
+Name: cookies
+Version: 2.2.1
+Summary: Friendlier RFC 6265-compliant cookie parser/renderer
+Home-page: https://github.com/sashahart/cookies
+Author: Sasha Hart
+Author-email: s@sashahart.net
+License: UNKNOWN
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Environment :: Other Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.2
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+
+What is this and what is it for?
+--------------------------------
+
+cookies.py is a Python module for working with HTTP cookies: parsing and
+rendering 'Cookie:' request headers and 'Set-Cookie:' response headers,
+and exposing a convenient API for creating and modifying cookies. It can be
+used as a replacement of Python's Cookie.py (aka http.cookies).
+
+Features
+--------
+
+* Rendering according to the excellent new RFC 6265
+ (rather than using a unique ad hoc format inconsistently relating to
+ unrealistic, very old RFCs which everyone ignored). Uses URL encoding to
+ represent non-ASCII by default, like many other languages' libraries
+* Liberal parsing, incorporating many complaints about Cookie.py barfing
+ on common cookie formats which can be reliably parsed (e.g. search 'cookie'
+ on the Python issue tracker)
+* Well-documented code, with chapter and verse from RFCs
+ (rather than arbitrary, undocumented decisions and huge tables of magic
+ values, as you see in Cookie.py).
+* Test coverage at 100%, with a much more comprehensive test suite
+ than Cookie.py
+* Single-source compatible with the following Python versions:
+ 2.6, 2.7, 3.2, 3.3 and PyPy (2.7).
+* Cleaner, less surprising API::
+
+ # old Cookie.py - this code is all directly from its docstring
+ >>> from Cookie import SmartCookie
+ >>> C = SmartCookie()
+ >>> # n.b. it's "smart" because it automatically pickles Python objects,
+ >>> # which is actually quite stupid for security reasons!
+ >>> C["rocky"] = "road"
+ >>> C["rocky"]["path"] = "/cookie"
+ >>> # So C["rocky"] is a string, except when it's a dict...
+ >>> # and why do I have to write [""] to access a fixed set of attrs?
+ >>> # Look at the atrocious way I render out a request header:
+ >>> C.output(attrs=[], header="Cookie:")
+ 'Cookie: rocky=road'
+
+ # new cookies.py
+ >>> from cookies import Cookies, Cookie
+ >>> cookies = Cookies(rocky='road')
+ >>> # Can also write explicitly: cookies['rocky'] = Cookie['road']
+ >>> cookies['rocky'].path = "/cookie"
+ >>> cookies.render_request()
+ 'rocky=road'
+* Friendly to customization, extension, and reuse of its parts.
+ Unlike Cookie.py, it doesn't lock all implementation inside its own classes
+ (forcing you to write ugly wrappers as Django, Trac, Werkzeug/Flask, web.py
+ and Tornado had to do). You can suppress minor parse exceptions with
+ parameters rather than subclass wrappers. You can plug in your own parsers,
+ renderers and validators for new or existing cookie attributes. You can
+ render the data out in a dict. You can easily use the underlying imperative
+ API or even lift the parser's regexps for your own parser or project. They
+ are very well documented and relate directly to RFCs, so you know exactly
+ what you are getting and why. It's MIT-licensed so do
+ what you want (but I'd love to know what use you are getting from it!)
+* One file, so you can just drop cookies.py into your project if you like
+* MIT license, so you can use it in whatever you want with no strings
+
+Things this is not meant to do
+------------------------------
+While this is intended to be a good module for handling cookies, it does not
+even try to do any of the following:
+
+* Maintain backward compatibility with Cookie.py, which would mean
+ inheriting its confusions and bugs
+* Implement RFCs 2109 or 2965, which have always been ignored by almost
+ everyone and are now obsolete as well
+* Handle every conceivable output from terrible legacy apps, which is not
+ possible to do without lots of silent data loss and corruption (the
+ parser does try to be liberal as possible otherwise, though)
+* Provide a means to store pickled Python objects in cookie values
+ (that's a big security hole)
+
+This doesn't compete with the cookielib (http.cookiejar) module in the Python
+standard library, which is specifically for implementing cookie storage and
+similar behavior in an HTTP client such as a browser. Things cookielib does
+that this doesn't:
+
+* Write to or read from browsers' cookie stores or other proprietary
+ formats for storing cookie data in files
+* Handle the browser/client logic like deciding which cookies to send or
+ discard, etc.
+
+If you are looking for a cookie library but neither this one nor cookielib
+will help, you might also consider the implementations in WebOb or Bottle.
+
+
diff --git a/third_party/python/cookies/cookies-2.2.1.dist-info/RECORD b/third_party/python/cookies/cookies-2.2.1.dist-info/RECORD
new file mode 100644
index 0000000000..f29a75c9b0
--- /dev/null
+++ b/third_party/python/cookies/cookies-2.2.1.dist-info/RECORD
@@ -0,0 +1,8 @@
+cookies.py,sha256=sF8kRzufOPGQAu8iiPfynJj2yRNGkUcC-JxvTX9mKQ8,47318
+test_cookies.py,sha256=cbFPYlNzzgTkVzz7Xb_3GqmQ4SE0EEz1gRIF1We5QTY,96777
+cookies-2.2.1.dist-info/RECORD,,
+cookies-2.2.1.dist-info/metadata.json,sha256=mQLffEYibwvk8r15ayQPMqbU4RCgtGlL5u59EY-8t6k,901
+cookies-2.2.1.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110
+cookies-2.2.1.dist-info/DESCRIPTION.rst,sha256=cMKRjszZhygoqs2V6ZKoKQGGtBY5RN_vfTYfd-UYFJ0,4351
+cookies-2.2.1.dist-info/METADATA,sha256=pTGwsy7mjUwouhm4j-E7ld4-rbbUCbiK-bHvwaChN2M,5170
+cookies-2.2.1.dist-info/top_level.txt,sha256=cmWJoCZMIIrsNW2u7GQHmLxsBkrQSFDP-t27J7-E_HQ,21
diff --git a/third_party/python/cookies/cookies-2.2.1.dist-info/WHEEL b/third_party/python/cookies/cookies-2.2.1.dist-info/WHEEL
new file mode 100644
index 0000000000..9dff69d861
--- /dev/null
+++ b/third_party/python/cookies/cookies-2.2.1.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.24.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/cookies/cookies-2.2.1.dist-info/metadata.json b/third_party/python/cookies/cookies-2.2.1.dist-info/metadata.json
new file mode 100644
index 0000000000..0009aea8fb
--- /dev/null
+++ b/third_party/python/cookies/cookies-2.2.1.dist-info/metadata.json
@@ -0,0 +1 @@
+{"name": "cookies", "classifiers": ["Development Status :: 4 - Beta", "Environment :: Other Environment", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Libraries :: Python Modules"], "generator": "bdist_wheel (0.24.0)", "extensions": {"python.details": {"document_names": {"description": "DESCRIPTION.rst"}, "contacts": [{"name": "Sasha Hart", "role": "author", "email": "s@sashahart.net"}], "project_urls": {"Home": "https://github.com/sashahart/cookies"}}}, "version": "2.2.1", "metadata_version": "2.0", "summary": "Friendlier RFC 6265-compliant cookie parser/renderer"} \ No newline at end of file
diff --git a/third_party/python/cookies/cookies-2.2.1.dist-info/top_level.txt b/third_party/python/cookies/cookies-2.2.1.dist-info/top_level.txt
new file mode 100644
index 0000000000..0358d8a02a
--- /dev/null
+++ b/third_party/python/cookies/cookies-2.2.1.dist-info/top_level.txt
@@ -0,0 +1,2 @@
+cookies
+test_cookies
diff --git a/third_party/python/cookies/cookies.py b/third_party/python/cookies/cookies.py
new file mode 100644
index 0000000000..d1637d2263
--- /dev/null
+++ b/third_party/python/cookies/cookies.py
@@ -0,0 +1,1169 @@
+"""Parse, manipulate and render cookies in a convenient way.
+
+Copyright (c) 2011-2014, Sasha Hart.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
+__version__ = "2.2.1"
+import re
+import datetime
+import logging
+import sys
+from unicodedata import normalize
+if sys.version_info >= (3, 0, 0): # pragma: no cover
+ from urllib.parse import (
+ quote as _default_quote, unquote as _default_unquote)
+ basestring = str
+ long = int
+else: # pragma: no cover
+ from urllib import (
+ quote as _default_quote, unquote as _default_unquote)
+
+
+def _total_seconds(td):
+ """Wrapper to work around lack of .total_seconds() method in Python 3.1.
+ """
+ if hasattr(td, "total_seconds"):
+ return td.total_seconds()
+ return td.days * 3600 * 24 + td.seconds + td.microseconds / 100000.0
+
+# see test_encoding_assumptions for how these magical safe= parms were figured
+# out. the differences are because of what cookie-octet may contain
+# vs the more liberal spec for extension-av
+default_cookie_quote = lambda item: _default_quote(
+ item, safe='!#$%&\'()*+/:<=>?@[]^`{|}~')
+
+default_extension_quote = lambda item: _default_quote(
+ item, safe=' !"#$%&\'()*+,/:<=>?@[\\]^`{|}~')
+
+default_unquote = _default_unquote
+
+
+def _report_invalid_cookie(data):
+ "How this module logs a bad cookie when exception suppressed"
+ logging.error("invalid Cookie: %r", data)
+
+
+def _report_unknown_attribute(name):
+ "How this module logs an unknown attribute when exception suppressed"
+ logging.error("unknown Cookie attribute: %r", name)
+
+
+def _report_invalid_attribute(name, value, reason):
+ "How this module logs a bad attribute when exception suppressed"
+ logging.error("invalid Cookie attribute (%s): %r=%r", reason, name, value)
+
+
+class CookieError(Exception):
+ """Base class for this module's exceptions, so you can catch them all if
+ you want to.
+ """
+ def __init__(self):
+ Exception.__init__(self)
+
+
+class InvalidCookieError(CookieError):
+ """Raised when attempting to parse or construct a cookie which is
+ syntactically invalid (in any way that has possibly serious implications).
+ """
+ def __init__(self, data=None, message=""):
+ CookieError.__init__(self)
+ self.data = data
+ self.message = message
+
+ def __str__(self):
+ return '%r %r' % (self.message, self.data)
+
+
+class InvalidCookieAttributeError(CookieError):
+ """Raised when setting an invalid attribute on a Cookie.
+ """
+ def __init__(self, name, value, reason=None):
+ CookieError.__init__(self)
+ self.name = name
+ self.value = value
+ self.reason = reason
+
+ def __str__(self):
+ prefix = ("%s: " % self.reason) if self.reason else ""
+ if self.name is None:
+ return '%s%r' % (prefix, self.value)
+ return '%s%r = %r' % (prefix, self.name, self.value)
+
+
+class Definitions(object):
+ """Namespace to hold definitions used in cookie parsing (mostly pieces of
+ regex).
+
+ These are separated out for individual testing against examples and RFC
+ grammar, and kept here to avoid cluttering other namespaces.
+ """
+ # Most of the following are set down or cited in RFC 6265 4.1.1
+
+ # This is the grammar's 'cookie-name' defined as 'token' per RFC 2616 2.2.
+ COOKIE_NAME = r"!#$%&'*+\-.0-9A-Z^_`a-z|~"
+
+ # 'cookie-octet' - as used twice in definition of 'cookie-value'
+ COOKIE_OCTET = r"\x21\x23-\x2B\--\x3A\x3C-\x5B\]-\x7E"
+
+ # extension-av - also happens to be a superset of cookie-av and path-value
+ EXTENSION_AV = """ !"#$%&\\\\'()*+,\-./0-9:<=>?@A-Z[\\]^_`a-z{|}~"""
+
+ # This is for the first pass parse on a Set-Cookie: response header. It
+ # includes cookie-value, cookie-pair, set-cookie-string, cookie-av.
+ # extension-av is used to extract the chunk containing variable-length,
+ # unordered attributes. The second pass then uses ATTR to break out each
+ # attribute and extract it appropriately.
+ # As compared with the RFC production grammar, it is must more liberal with
+ # space characters, in order not to break on data made by barbarians.
+ SET_COOKIE_HEADER = """(?x) # Verbose mode
+ ^(?:Set-Cookie:[ ]*)?
+ (?P<name>[{name}:]+)
+ [ ]*=[ ]*
+
+ # Accept anything in quotes - this is not RFC 6265, but might ease
+ # working with older code that half-heartedly works with 2965. Accept
+ # spaces inside tokens up front, so we can deal with that error one
+ # cookie at a time, after this first pass.
+ (?P<value>(?:"{value}*")|(?:[{cookie_octet} ]*))
+ [ ]*
+
+ # Extract everything up to the end in one chunk, which will be broken
+ # down in the second pass. Don't match if there's any unexpected
+ # garbage at the end (hence the \Z; $ matches before newline).
+ (?P<attrs>(?:;[ ]*[{cookie_av}]+)*)
+ """.format(name=COOKIE_NAME, cookie_av=EXTENSION_AV + ";",
+ cookie_octet=COOKIE_OCTET, value="[^;]")
+
+ # Now we specify the individual patterns for the attribute extraction pass
+ # of Set-Cookie parsing (mapping to *-av in the RFC grammar). Things which
+ # don't match any of these but are in extension-av are simply ignored;
+ # anything else should be rejected in the first pass (SET_COOKIE_HEADER).
+
+ # Max-Age attribute. These are digits, they are expressed this way
+ # because that is how they are expressed in the RFC.
+ MAX_AGE_AV = "Max-Age=(?P<max_age>[\x30-\x39]+)"
+
+ # Domain attribute; a label is one part of the domain
+ LABEL = '{let_dig}(?:(?:{let_dig_hyp}+)?{let_dig})?'.format(
+ let_dig="[A-Za-z0-9]", let_dig_hyp="[0-9A-Za-z\-]")
+ DOMAIN = "\.?(?:{label}\.)*(?:{label})".format(label=LABEL)
+ # Parse initial period though it's wrong, as RFC 6265 4.1.2.3
+ DOMAIN_AV = "Domain=(?P<domain>{domain})".format(domain=DOMAIN)
+
+ # Path attribute. We don't take special care with quotes because
+ # they are hardly used, they don't allow invalid characters per RFC 6265,
+ # and " is a valid character to occur in a path value anyway.
+ PATH_AV = 'Path=(?P<path>[%s]+)' % EXTENSION_AV
+
+ # Expires attribute. This gets big because of date parsing, which needs to
+ # support a large range of formats, so it's broken down into pieces.
+
+ # Generate a mapping of months to use in render/parse, to avoid
+ # localizations which might be produced by strftime (e.g. %a -> Mayo)
+ month_list = ["January", "February", "March", "April", "May", "June",
+ "July", "August", "September", "October", "November",
+ "December"]
+ month_abbr_list = [item[:3] for item in month_list]
+ month_numbers = {}
+ for index, name in enumerate(month_list):
+ name = name.lower()
+ month_numbers[name[:3]] = index + 1
+ month_numbers[name] = index + 1
+ # Use the same list to create regexps for months.
+ MONTH_SHORT = "(?:" + "|".join(item[:3] for item in month_list) + ")"
+ MONTH_LONG = "(?:" + "|".join(item for item in month_list) + ")"
+
+ # Same drill with weekdays, for the same reason.
+ weekday_list = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday",
+ "Saturday", "Sunday"]
+ weekday_abbr_list = [item[:3] for item in weekday_list]
+ WEEKDAY_SHORT = "(?:" + "|".join(item[:3] for item in weekday_list) + ")"
+ WEEKDAY_LONG = "(?:" + "|".join(item for item in weekday_list) + ")"
+
+ # This regexp tries to exclude obvious nonsense in the first pass.
+ DAY_OF_MONTH = "(?:[0 ]?[1-9]|[12][0-9]|[3][01])(?!\d)"
+
+ # Here is the overall date format; ~99% of cases fold into one generalized
+ # syntax like RFC 1123, and many of the rest use asctime-like formats.
+ # (see test_date_formats for a full exegesis)
+ DATE = """(?ix) # Case-insensitive mode, verbose mode
+ (?:
+ (?P<weekday>(?:{wdy}|{weekday}),[ ])?
+ (?P<day>{day})
+ [ \-]
+ (?P<month>{mon}|{month})
+ [ \-]
+ # This does not support 3-digit years, which are rare and don't
+ # seem to have one canonical interpretation.
+ (?P<year>(?:\d{{2}}|\d{{4}}))
+ [ ]
+ # HH:MM[:SS] GMT
+ (?P<hour>(?:[ 0][0-9]|[01][0-9]|2[0-3]))
+ :(?P<minute>(?:0[0-9]|[1-5][0-9]))
+ (?::(?P<second>\d{{2}}))?
+ [ ]GMT
+ |
+ # Support asctime format, e.g. 'Sun Nov 6 08:49:37 1994'
+ (?P<weekday2>{wdy})[ ]
+ (?P<month2>{mon})[ ]
+ (?P<day2>[ ]\d|\d\d)[ ]
+ (?P<hour2>\d\d):
+ (?P<minute2>\d\d)
+ (?::(?P<second2>\d\d)?)[ ]
+ (?P<year2>\d\d\d\d)
+ (?:[ ]GMT)? # GMT (Amazon)
+ )
+ """
+ DATE = DATE.format(wdy=WEEKDAY_SHORT, weekday=WEEKDAY_LONG,
+ day=DAY_OF_MONTH, mon=MONTH_SHORT, month=MONTH_LONG)
+
+ EXPIRES_AV = "Expires=(?P<expires>%s)" % DATE
+
+ # Now we're ready to define a regexp which can match any number of attrs
+ # in the variable portion of the Set-Cookie header (like the unnamed latter
+ # part of set-cookie-string in the grammar). Each regexp of any complexity
+ # is split out for testing by itself.
+ ATTR = """(?ix) # Case-insensitive mode, verbose mode
+ # Always start with start or semicolon and any number of spaces
+ (?:^|;)[ ]*(?:
+ # Big disjunction of attribute patterns (*_AV), with named capture
+ # groups to extract everything in one pass. Anything unrecognized
+ # goes in the 'unrecognized' capture group for reporting.
+ {expires}
+ |{max_age}
+ |{domain}
+ |{path}
+ |(?P<secure>Secure=?)
+ |(?P<httponly>HttpOnly=?)
+ |Version=(?P<version>[{stuff}]+)
+ |Comment=(?P<comment>[{stuff}]+)
+ |(?P<unrecognized>[{stuff}]+)
+ )
+ # End with any number of spaces not matched by the preceding (up to the
+ # next semicolon) - but do not capture these.
+ [ ]*
+ """.format(expires=EXPIRES_AV, max_age=MAX_AGE_AV, domain=DOMAIN_AV,
+ path=PATH_AV, stuff=EXTENSION_AV)
+
+ # For request data ("Cookie: ") parsing, with finditer cf. RFC 6265 4.2.1
+ COOKIE = """(?x) # Verbose mode
+ (?: # Either something close to valid...
+
+ # Match starts at start of string, or at separator.
+ # Split on comma for the sake of legacy code (RFC 2109/2965),
+ # and since it only breaks when invalid commas are put in values.
+ # see http://bugs.python.org/issue1210326
+ (?:^Cookie:|^|;|,)
+
+ # 1 or more valid token characters making up the name (captured)
+ # with colon added to accommodate users of some old Java apps, etc.
+ [ ]*
+ (?P<name>[{name}:]+)
+ [ ]*
+ =
+ [ ]*
+
+ # While 6265 provides only for cookie-octet, this allows just about
+ # anything in quotes (like in RFC 2616); people stuck on RFC
+ # 2109/2965 will expect it to work this way. The non-quoted token
+ # allows interior spaces ('\x20'), which is not valid. In both
+ # cases, the decision of whether to allow these is downstream.
+ (?P<value>
+ ["][^\00-\31"]*["]
+ |
+ [{value}]
+ |
+ [{value}][{value} ]*[{value}]+
+ |
+ )
+
+ # ... Or something way off-spec - extract to report and move on
+ |
+ (?P<invalid>[^;]+)
+ )
+ # Trailing spaces after value
+ [ ]*
+ # Must end with ; or be at end of string (don't consume this though,
+ # so use the lookahead assertion ?=
+ (?=;|\Z)
+ """.format(name=COOKIE_NAME, value=COOKIE_OCTET)
+
+ # Precompile externally useful definitions into re objects.
+ COOKIE_NAME_RE = re.compile("^([%s:]+)\Z" % COOKIE_NAME)
+ COOKIE_RE = re.compile(COOKIE)
+ SET_COOKIE_HEADER_RE = re.compile(SET_COOKIE_HEADER)
+ ATTR_RE = re.compile(ATTR)
+ DATE_RE = re.compile(DATE)
+ DOMAIN_RE = re.compile(DOMAIN)
+ PATH_RE = re.compile('^([%s]+)\Z' % EXTENSION_AV)
+ EOL = re.compile("(?:\r\n|\n)")
+
+
+def strip_spaces_and_quotes(value):
+ """Remove invalid whitespace and/or single pair of dquotes and return None
+ for empty strings.
+
+ Used to prepare cookie values, path, and domain attributes in a way which
+ tolerates simple formatting mistakes and standards variations.
+ """
+ value = value.strip() if value else ""
+ if value and len(value) > 1 and (value[0] == value[-1] == '"'):
+ value = value[1:-1]
+ if not value:
+ value = ""
+ return value
+
+
+def parse_string(data, unquote=default_unquote):
+ """Decode URL-encoded strings to UTF-8 containing the escaped chars.
+ """
+ if data is None:
+ return None
+
+ # We'll soon need to unquote to recover our UTF-8 data.
+ # In Python 2, unquote crashes on chars beyond ASCII. So encode functions
+ # had better not include anything beyond ASCII in data.
+ # In Python 3, unquote crashes on bytes objects, requiring conversion to
+ # str objects (unicode) using decode().
+ # But in Python 2, the same decode causes unquote to butcher the data.
+ # So in that case, just leave the bytes.
+ if isinstance(data, bytes):
+ if sys.version_info > (3, 0, 0): # pragma: no cover
+ data = data.decode('ascii')
+ # Recover URL encoded data
+ unquoted = unquote(data)
+ # Without this step, Python 2 may have good URL decoded *bytes*,
+ # which will therefore not normalize as unicode and not compare to
+ # the original.
+ if isinstance(unquoted, bytes):
+ unquoted = unquoted.decode('utf-8')
+ return unquoted
+
+
+def parse_date(value):
+ """Parse an RFC 1123 or asctime-like format date string to produce
+ a Python datetime object (without a timezone).
+ """
+ # Do the regex magic; also enforces 2 or 4 digit years
+ match = Definitions.DATE_RE.match(value) if value else None
+ if not match:
+ return None
+ # We're going to extract and prepare captured data in 'data'.
+ data = {}
+ captured = match.groupdict()
+ fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
+ # If we matched on the RFC 1123 family format
+ if captured['year']:
+ for field in fields:
+ data[field] = captured[field]
+ # If we matched on the asctime format, use year2 etc.
+ else:
+ for field in fields:
+ data[field] = captured[field + "2"]
+ year = data['year']
+ # Interpret lame 2-digit years - base the cutoff on UNIX epoch, in case
+ # someone sets a '70' cookie meaning 'distant past'. This won't break for
+ # 58 years and people who use 2-digit years are asking for it anyway.
+ if len(year) == 2:
+ if int(year) < 70:
+ year = "20" + year
+ else:
+ year = "19" + year
+ year = int(year)
+ # Clamp to [1900, 9999]: strftime has min 1900, datetime has max 9999
+ data['year'] = max(1900, min(year, 9999))
+ # Other things which are numbers should convert to integer
+ for field in ['day', 'hour', 'minute', 'second']:
+ if data[field] is None:
+ data[field] = 0
+ data[field] = int(data[field])
+ # Look up the number datetime needs for the named month
+ data['month'] = Definitions.month_numbers[data['month'].lower()]
+ return datetime.datetime(**data)
+
+
+def parse_domain(value):
+ """Parse and validate an incoming Domain attribute value.
+ """
+ value = strip_spaces_and_quotes(value)
+ if value:
+ assert valid_domain(value)
+ return value
+
+
+def parse_path(value):
+ """Parse and validate an incoming Path attribute value.
+ """
+ value = strip_spaces_and_quotes(value)
+ assert valid_path(value)
+ return value
+
+
+def parse_value(value, allow_spaces=True, unquote=default_unquote):
+ "Process a cookie value"
+ if value is None:
+ return None
+ value = strip_spaces_and_quotes(value)
+ value = parse_string(value, unquote=unquote)
+ if not allow_spaces:
+ assert ' ' not in value
+ return value
+
+
+def valid_name(name):
+ "Validate a cookie name string"
+ if isinstance(name, bytes):
+ name = name.decode('ascii')
+ if not Definitions.COOKIE_NAME_RE.match(name):
+ return False
+ # This module doesn't support $identifiers, which are part of an obsolete
+ # and highly complex standard which is never used.
+ if name[0] == "$":
+ return False
+ return True
+
+
+def valid_value(value, quote=default_cookie_quote, unquote=default_unquote):
+ """Validate a cookie value string.
+
+ This is generic across quote/unquote functions because it directly verifies
+ the encoding round-trip using the specified quote/unquote functions.
+ So if you use different quote/unquote functions, use something like this
+ as a replacement for valid_value::
+
+ my_valid_value = lambda value: valid_value(value, quote=my_quote,
+ unquote=my_unquote)
+ """
+ if value is None:
+ return False
+
+ # Put the value through a round trip with the given quote and unquote
+ # functions, so we will know whether data will get lost or not in the event
+ # that we don't complain.
+ encoded = encode_cookie_value(value, quote=quote)
+ decoded = parse_string(encoded, unquote=unquote)
+
+ # If the original string made the round trip, this is a valid value for the
+ # given quote and unquote functions. Since the round trip can generate
+ # different unicode forms, normalize before comparing, so we can ignore
+ # trivial inequalities.
+ decoded_normalized = (normalize("NFKD", decoded)
+ if not isinstance(decoded, bytes) else decoded)
+ value_normalized = (normalize("NFKD", value)
+ if not isinstance(value, bytes) else value)
+ if decoded_normalized == value_normalized:
+ return True
+ return False
+
+
+def valid_date(date):
+ "Validate an expires datetime object"
+ # We want something that acts like a datetime. In particular,
+ # strings indicate a failure to parse down to an object and ints are
+ # nonstandard and ambiguous at best.
+ if not hasattr(date, 'tzinfo'):
+ return False
+ # Relevant RFCs define UTC as 'close enough' to GMT, and the maximum
+ # difference between UTC and GMT is often stated to be less than a second.
+ if date.tzinfo is None or _total_seconds(date.utcoffset()) < 1.1:
+ return True
+ return False
+
+
+def valid_domain(domain):
+ "Validate a cookie domain ASCII string"
+ # Using encoding on domain would confuse browsers into not sending cookies.
+ # Generate UnicodeDecodeError up front if it can't store as ASCII.
+ domain.encode('ascii')
+ # Domains starting with periods are not RFC-valid, but this is very common
+ # in existing cookies, so they should still parse with DOMAIN_AV.
+ if Definitions.DOMAIN_RE.match(domain):
+ return True
+ return False
+
+
+def valid_path(value):
+ "Validate a cookie path ASCII string"
+ # Generate UnicodeDecodeError if path can't store as ASCII.
+ value.encode("ascii")
+ # Cookies without leading slash will likely be ignored, raise ASAP.
+ if not (value and value[0] == "/"):
+ return False
+ if not Definitions.PATH_RE.match(value):
+ return False
+ return True
+
+
+def valid_max_age(number):
+ "Validate a cookie Max-Age"
+ if isinstance(number, basestring):
+ try:
+ number = long(number)
+ except (ValueError, TypeError):
+ return False
+ if number >= 0 and number % 1 == 0:
+ return True
+ return False
+
+
+def encode_cookie_value(data, quote=default_cookie_quote):
+ """URL-encode strings to make them safe for a cookie value.
+
+ By default this uses urllib quoting, as used in many other cookie
+ implementations and in other Python code, instead of an ad hoc escaping
+ mechanism which includes backslashes (these also being illegal chars in RFC
+ 6265).
+ """
+ if data is None:
+ return None
+
+ # encode() to ASCII bytes so quote won't crash on non-ASCII.
+ # but doing that to bytes objects is nonsense.
+ # On Python 2 encode crashes if s is bytes containing non-ASCII.
+ # On Python 3 encode crashes on all byte objects.
+ if not isinstance(data, bytes):
+ data = data.encode("utf-8")
+
+ # URL encode data so it is safe for cookie value
+ quoted = quote(data)
+
+ # Don't force to bytes, so that downstream can use proper string API rather
+ # than crippled bytes, and to encourage encoding to be done just once.
+ return quoted
+
+
+def encode_extension_av(data, quote=default_extension_quote):
+ """URL-encode strings to make them safe for an extension-av
+ (extension attribute value): <any CHAR except CTLs or ";">
+ """
+ if not data:
+ return ''
+ return quote(data)
+
+
+def render_date(date):
+ """Render a date (e.g. an Expires value) per RFCs 6265/2616/1123.
+
+ Don't give this localized (timezone-aware) datetimes. If you use them,
+ convert them to GMT before passing them to this. There are too many
+ conversion corner cases to handle this universally.
+ """
+ if not date:
+ return None
+ assert valid_date(date)
+ # Avoid %a and %b, which can change with locale, breaking compliance
+ weekday = Definitions.weekday_abbr_list[date.weekday()]
+ month = Definitions.month_abbr_list[date.month - 1]
+ return date.strftime("{day}, %d {month} %Y %H:%M:%S GMT"
+ ).format(day=weekday, month=month)
+
+
+def render_domain(domain):
+ if not domain:
+ return None
+ if domain[0] == '.':
+ return domain[1:]
+ return domain
+
+
+def _parse_request(header_data, ignore_bad_cookies=False):
+ """Turn one or more lines of 'Cookie:' header data into a dict mapping
+ cookie names to cookie values (raw strings).
+ """
+ cookies_dict = {}
+ for line in Definitions.EOL.split(header_data.strip()):
+ matches = Definitions.COOKIE_RE.finditer(line)
+ matches = [item for item in matches]
+ for match in matches:
+ invalid = match.group('invalid')
+ if invalid:
+ if not ignore_bad_cookies:
+ raise InvalidCookieError(data=invalid)
+ _report_invalid_cookie(invalid)
+ continue
+ name = match.group('name')
+ values = cookies_dict.get(name)
+ value = match.group('value').strip('"')
+ if values:
+ values.append(value)
+ else:
+ cookies_dict[name] = [value]
+ if not matches:
+ if not ignore_bad_cookies:
+ raise InvalidCookieError(data=line)
+ _report_invalid_cookie(line)
+ return cookies_dict
+
+
+def parse_one_response(line, ignore_bad_cookies=False,
+ ignore_bad_attributes=True):
+ """Turn one 'Set-Cookie:' line into a dict mapping attribute names to
+ attribute values (raw strings).
+ """
+ cookie_dict = {}
+ # Basic validation, extract name/value/attrs-chunk
+ match = Definitions.SET_COOKIE_HEADER_RE.match(line)
+ if not match:
+ if not ignore_bad_cookies:
+ raise InvalidCookieError(data=line)
+ _report_invalid_cookie(line)
+ return None
+ cookie_dict.update({
+ 'name': match.group('name'),
+ 'value': match.group('value')})
+ # Extract individual attrs from the attrs chunk
+ for match in Definitions.ATTR_RE.finditer(match.group('attrs')):
+ captured = dict((k, v) for (k, v) in match.groupdict().items() if v)
+ unrecognized = captured.get('unrecognized', None)
+ if unrecognized:
+ if not ignore_bad_attributes:
+ raise InvalidCookieAttributeError(None, unrecognized,
+ "unrecognized")
+ _report_unknown_attribute(unrecognized)
+ continue
+ # for unary flags
+ for key in ('secure', 'httponly'):
+ if captured.get(key):
+ captured[key] = True
+ # ignore subcomponents of expires - they're still there to avoid doing
+ # two passes
+ timekeys = ('weekday', 'month', 'day', 'hour', 'minute', 'second',
+ 'year')
+ if 'year' in captured:
+ for key in timekeys:
+ del captured[key]
+ elif 'year2' in captured:
+ for key in timekeys:
+ del captured[key + "2"]
+ cookie_dict.update(captured)
+ return cookie_dict
+
+
+def _parse_response(header_data, ignore_bad_cookies=False,
+ ignore_bad_attributes=True):
+ """Turn one or more lines of 'Set-Cookie:' header data into a list of dicts
+ mapping attribute names to attribute values (as plain strings).
+ """
+ cookie_dicts = []
+ for line in Definitions.EOL.split(header_data.strip()):
+ if not line:
+ break
+ cookie_dict = parse_one_response(
+ line, ignore_bad_cookies=ignore_bad_cookies,
+ ignore_bad_attributes=ignore_bad_attributes)
+ if not cookie_dict:
+ continue
+ cookie_dicts.append(cookie_dict)
+ if not cookie_dicts:
+ if not ignore_bad_cookies:
+ raise InvalidCookieError(data=header_data)
+ _report_invalid_cookie(header_data)
+ return cookie_dicts
+
+
+class Cookie(object):
+ """Provide a simple interface for creating, modifying, and rendering
+ individual HTTP cookies.
+
+ Cookie attributes are represented as normal Python object attributes.
+ Parsing, rendering and validation are reconfigurable per-attribute. The
+ default behavior is intended to comply with RFC 6265, URL-encoding illegal
+ characters where necessary. For example: the default behavior for the
+ Expires attribute is to parse strings as datetimes using parse_date,
+ validate that any set value is a datetime, and render the attribute per the
+ preferred date format in RFC 1123.
+ """
+ def __init__(self, name, value, **kwargs):
+ # If we don't have or can't set a name value, we don't want to return
+ # junk, so we must break control flow. And we don't want to use
+ # InvalidCookieAttributeError, because users may want to catch that to
+ # suppress all complaining about funky attributes.
+ try:
+ self.name = name
+ except InvalidCookieAttributeError:
+ raise InvalidCookieError(message="invalid name for new Cookie",
+ data=name)
+ value = value or ''
+ try:
+ self.value = value
+ except InvalidCookieAttributeError:
+ raise InvalidCookieError(message="invalid value for new Cookie",
+ data=value)
+ if kwargs:
+ self._set_attributes(kwargs, ignore_bad_attributes=False)
+
+ def _set_attributes(self, attrs, ignore_bad_attributes=False):
+ for attr_name, attr_value in attrs.items():
+ if not attr_name in self.attribute_names:
+ if not ignore_bad_attributes:
+ raise InvalidCookieAttributeError(
+ attr_name, attr_value,
+ "unknown cookie attribute '%s'" % attr_name)
+ _report_unknown_attribute(attr_name)
+
+ try:
+ setattr(self, attr_name, attr_value)
+ except InvalidCookieAttributeError as error:
+ if not ignore_bad_attributes:
+ raise
+ _report_invalid_attribute(attr_name, attr_value, error.reason)
+ continue
+
+ @classmethod
+ def from_dict(cls, cookie_dict, ignore_bad_attributes=True):
+ """Construct an instance from a dict of strings to parse.
+
+ The main difference between this and Cookie(name, value, **kwargs) is
+ that the values in the argument to this method are parsed.
+
+ If ignore_bad_attributes=True (default), values which did not parse
+ are set to '' in order to avoid passing bad data.
+ """
+ name = cookie_dict.get('name', None)
+ if not name:
+ raise InvalidCookieError("Cookie must have name")
+ raw_value = cookie_dict.get('value', '')
+ # Absence or failure of parser here is fatal; errors in present name
+ # and value should be found by Cookie.__init__.
+ value = cls.attribute_parsers['value'](raw_value)
+ cookie = cls(name, value)
+
+ # Parse values from serialized formats into objects
+ parsed = {}
+ for key, value in cookie_dict.items():
+ # Don't want to pass name/value to _set_attributes
+ if key in ('name', 'value'):
+ continue
+ parser = cls.attribute_parsers.get(key)
+ if not parser:
+ # Don't let totally unknown attributes pass silently
+ if not ignore_bad_attributes:
+ raise InvalidCookieAttributeError(
+ key, value, "unknown cookie attribute '%s'" % key)
+ _report_unknown_attribute(key)
+ continue
+ try:
+ parsed_value = parser(value)
+ except Exception as e:
+ reason = "did not parse with %r: %r" % (parser, e)
+ if not ignore_bad_attributes:
+ raise InvalidCookieAttributeError(
+ key, value, reason)
+ _report_invalid_attribute(key, value, reason)
+ parsed_value = ''
+ parsed[key] = parsed_value
+
+ # Set the parsed objects (does object validation automatically)
+ cookie._set_attributes(parsed, ignore_bad_attributes)
+ return cookie
+
+ @classmethod
+ def from_string(cls, line, ignore_bad_cookies=False,
+ ignore_bad_attributes=True):
+ "Construct a Cookie object from a line of Set-Cookie header data."
+ cookie_dict = parse_one_response(
+ line, ignore_bad_cookies=ignore_bad_cookies,
+ ignore_bad_attributes=ignore_bad_attributes)
+ if not cookie_dict:
+ return None
+ return cls.from_dict(
+ cookie_dict, ignore_bad_attributes=ignore_bad_attributes)
+
+ def to_dict(self):
+ this_dict = {'name': self.name, 'value': self.value}
+ this_dict.update(self.attributes())
+ return this_dict
+
+ def validate(self, name, value):
+ """Validate a cookie attribute with an appropriate validator.
+
+ The value comes in already parsed (for example, an expires value
+ should be a datetime). Called automatically when an attribute
+ value is set.
+ """
+ validator = self.attribute_validators.get(name, None)
+ if validator:
+ return True if validator(value) else False
+ return True
+
+ def __setattr__(self, name, value):
+ """Attributes mentioned in attribute_names get validated using
+ functions in attribute_validators, raising an exception on failure.
+ Others get left alone.
+ """
+ if name in self.attribute_names or name in ("name", "value"):
+ if name == 'name' and not value:
+ raise InvalidCookieError(message="Cookies must have names")
+ # Ignore None values indicating unset attr. Other invalids should
+ # raise error so users of __setattr__ can learn.
+ if value is not None:
+ if not self.validate(name, value):
+ raise InvalidCookieAttributeError(
+ name, value, "did not validate with " +
+ repr(self.attribute_validators.get(name)))
+ object.__setattr__(self, name, value)
+
+ def __getattr__(self, name):
+ """Provide for acting like everything in attribute_names is
+ automatically set to None, rather than having to do so explicitly and
+ only at import time.
+ """
+ if name in self.attribute_names:
+ return None
+ raise AttributeError(name)
+
+ def attributes(self):
+ """Export this cookie's attributes as a dict of encoded values.
+
+ This is an important part of the code for rendering attributes, e.g.
+ render_response().
+ """
+ dictionary = {}
+ # Only look for attributes registered in attribute_names.
+ for python_attr_name, cookie_attr_name in self.attribute_names.items():
+ value = getattr(self, python_attr_name)
+ renderer = self.attribute_renderers.get(python_attr_name, None)
+ if renderer:
+ value = renderer(value)
+ # If renderer returns None, or it's just natively none, then the
+ # value is suppressed entirely - does not appear in any rendering.
+ if not value:
+ continue
+ dictionary[cookie_attr_name] = value
+ return dictionary
+
+ def render_request(self):
+ """Render as a string formatted for HTTP request headers
+ (simple 'Cookie: ' style).
+ """
+ # Use whatever renderers are defined for name and value.
+ name, value = self.name, self.value
+ renderer = self.attribute_renderers.get('name', None)
+ if renderer:
+ name = renderer(name)
+ renderer = self.attribute_renderers.get('value', None)
+ if renderer:
+ value = renderer(value)
+ return ''.join((name, "=", value))
+
+ def render_response(self):
+ """Render as a string formatted for HTTP response headers
+ (detailed 'Set-Cookie: ' style).
+ """
+ # Use whatever renderers are defined for name and value.
+ # (.attributes() is responsible for all other rendering.)
+ name, value = self.name, self.value
+ renderer = self.attribute_renderers.get('name', None)
+ if renderer:
+ name = renderer(name)
+ renderer = self.attribute_renderers.get('value', None)
+ if renderer:
+ value = renderer(value)
+ return '; '.join(
+ ['{0}={1}'.format(name, value)] +
+ [key if isinstance(val, bool) else '='.join((key, val))
+ for key, val in self.attributes().items()]
+ )
+
+ def __eq__(self, other):
+ attrs = ['name', 'value'] + list(self.attribute_names.keys())
+ for attr in attrs:
+ mine = getattr(self, attr, None)
+ his = getattr(other, attr, None)
+ if isinstance(mine, bytes):
+ mine = mine.decode('utf-8')
+ if isinstance(his, bytes):
+ his = his.decode('utf-8')
+ if attr == 'domain':
+ if mine and mine[0] == '.':
+ mine = mine[1:]
+ if his and his[0] == '.':
+ his = his[1:]
+ if mine != his:
+ return False
+ return True
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ # Add a name and its proper rendering to this dict to register an attribute
+ # as exportable. The key is the name of the Cookie object attribute in
+ # Python, and it is mapped to the name you want in the output.
+ # 'name' and 'value' should not be here.
+ attribute_names = {
+ 'expires': 'Expires',
+ 'max_age': 'Max-Age',
+ 'domain': 'Domain',
+ 'path': 'Path',
+ 'comment': 'Comment',
+ 'version': 'Version',
+ 'secure': 'Secure',
+ 'httponly': 'HttpOnly',
+ }
+
+ # Register single-parameter functions in this dictionary to have them
+ # used for encoding outgoing values (e.g. as RFC compliant strings,
+ # as base64, encrypted stuff, etc.)
+ # These are called by the property generated by cookie_attribute().
+ # Usually it would be wise not to define a renderer for name, but it is
+ # supported in case there is ever a real need.
+ attribute_renderers = {
+ 'value': encode_cookie_value,
+ 'domain': render_domain,
+ 'expires': render_date,
+ 'max_age': lambda item: str(item) if item is not None else None,
+ 'secure': lambda item: True if item else False,
+ 'httponly': lambda item: True if item else False,
+ 'comment': encode_extension_av,
+ 'version': lambda item: (str(item) if isinstance(item, int)
+ else encode_extension_av(item)),
+ }
+
+ # Register single-parameter functions in this dictionary to have them used
+ # for decoding incoming values for use in the Python API (e.g. into nice
+ # objects, numbers, unicode strings, etc.)
+ # These are called by the property generated by cookie_attribute().
+ attribute_parsers = {
+ 'value': parse_value,
+ 'expires': parse_date,
+ 'domain': parse_domain,
+ 'path': parse_path,
+ 'max_age': lambda item: long(strip_spaces_and_quotes(item)),
+ 'comment': parse_string,
+ 'version': lambda item: int(strip_spaces_and_quotes(item)),
+ 'secure': lambda item: True if item else False,
+ 'httponly': lambda item: True if item else False,
+ }
+
+ # Register single-parameter functions which return a true value for
+ # acceptable values, and a false value for unacceptable ones. An
+ # attribute's validator is run after it is parsed or when it is directly
+ # set, and InvalidCookieAttribute is raised if validation fails (and the
+ # validator doesn't raise a different exception prior)
+ attribute_validators = {
+ 'name': valid_name,
+ 'value': valid_value,
+ 'expires': valid_date,
+ 'domain': valid_domain,
+ 'path': valid_path,
+ 'max_age': valid_max_age,
+ 'comment': valid_value,
+ 'version': lambda number: re.match("^\d+\Z", str(number)),
+ 'secure': lambda item: item is True or item is False,
+ 'httponly': lambda item: item is True or item is False,
+ }
+
+
+class Cookies(dict):
+ """Represent a set of cookies indexed by name.
+
+ This class bundles together a set of Cookie objects and provides
+ a convenient interface to them. for parsing and producing cookie headers.
+ In basic operation it acts just like a dict of Cookie objects, but it adds
+ additional convenience methods for the usual cookie tasks: add cookie
+ objects by their names, create new cookie objects under specified names,
+ parse HTTP request or response data into new cookie objects automatically
+ stored in the dict, and render the set in formats suitable for HTTP request
+ or response headers.
+ """
+ DEFAULT_COOKIE_CLASS = Cookie
+
+ def __init__(self, *args, **kwargs):
+ dict.__init__(self)
+ self.all_cookies = []
+ self.cookie_class = kwargs.get(
+ "_cookie_class", self.DEFAULT_COOKIE_CLASS)
+ self.add(*args, **kwargs)
+
+ def add(self, *args, **kwargs):
+ """Add Cookie objects by their names, or create new ones under
+ specified names.
+
+ Any unnamed arguments are interpreted as existing cookies, and
+ are added under the value in their .name attribute. With keyword
+ arguments, the key is interpreted as the cookie name and the
+ value as the UNENCODED value stored in the cookie.
+ """
+ # Only the first one is accessible through the main interface,
+ # others accessible through get_all (all_cookies).
+ for cookie in args:
+ self.all_cookies.append(cookie)
+ if cookie.name in self:
+ continue
+ self[cookie.name] = cookie
+ for key, value in kwargs.items():
+ cookie = self.cookie_class(key, value)
+ self.all_cookies.append(cookie)
+ if key in self:
+ continue
+ self[key] = cookie
+
+ def get_all(self, key):
+ return [cookie for cookie in self.all_cookies
+ if cookie.name == key]
+
+ def parse_request(self, header_data, ignore_bad_cookies=False):
+ """Parse 'Cookie' header data into Cookie objects, and add them to
+ this Cookies object.
+
+ :arg header_data: string containing only 'Cookie:' request headers or
+ header values (as in CGI/WSGI HTTP_COOKIE); if more than one, they must
+ be separated by CRLF (\\r\\n).
+
+ :arg ignore_bad_cookies: if set, will log each syntactically invalid
+ cookie (at the granularity of semicolon-delimited blocks) rather than
+ raising an exception at the first bad cookie.
+
+ :returns: a Cookies instance containing Cookie objects parsed from
+ header_data.
+
+ .. note::
+ If you want to parse 'Set-Cookie:' response headers, please use
+ parse_response instead. parse_request will happily turn 'expires=frob'
+ into a separate cookie without complaining, according to the grammar.
+ """
+ cookies_dict = _parse_request(
+ header_data, ignore_bad_cookies=ignore_bad_cookies)
+ cookie_objects = []
+ for name, values in cookies_dict.items():
+ for value in values:
+ # Use from_dict to check name and parse value
+ cookie_dict = {'name': name, 'value': value}
+ try:
+ cookie = self.cookie_class.from_dict(cookie_dict)
+ except InvalidCookieError:
+ if not ignore_bad_cookies:
+ raise
+ else:
+ cookie_objects.append(cookie)
+ try:
+ self.add(*cookie_objects)
+ except InvalidCookieError:
+ if not ignore_bad_cookies:
+ raise
+ _report_invalid_cookie(header_data)
+ return self
+
+ def parse_response(self, header_data, ignore_bad_cookies=False,
+ ignore_bad_attributes=True):
+ """Parse 'Set-Cookie' header data into Cookie objects, and add them to
+ this Cookies object.
+
+ :arg header_data: string containing only 'Set-Cookie:' request headers
+ or their corresponding header values; if more than one, they must be
+ separated by CRLF (\\r\\n).
+
+ :arg ignore_bad_cookies: if set, will log each syntactically invalid
+ cookie rather than raising an exception at the first bad cookie. (This
+ includes cookies which have noncompliant characters in the attribute
+ section).
+
+ :arg ignore_bad_attributes: defaults to True, which means to log but
+ not raise an error when a particular attribute is unrecognized. (This
+ does not necessarily mean that the attribute is invalid, although that
+ would often be the case.) if unset, then an error will be raised at the
+ first semicolon-delimited block which has an unknown attribute.
+
+ :returns: a Cookies instance containing Cookie objects parsed from
+ header_data, each with recognized attributes populated.
+
+ .. note::
+ If you want to parse 'Cookie:' headers (i.e., data like what's sent
+ with an HTTP request, which has only name=value pairs and no
+ attributes), then please use parse_request instead. Such lines often
+ contain multiple name=value pairs, and parse_response will throw away
+ the pairs after the first one, which will probably generate errors or
+ confusing behavior. (Since there's no perfect way to automatically
+ determine which kind of parsing to do, you have to tell it manually by
+ choosing correctly from parse_request between part_response.)
+ """
+ cookie_dicts = _parse_response(
+ header_data,
+ ignore_bad_cookies=ignore_bad_cookies,
+ ignore_bad_attributes=ignore_bad_attributes)
+ cookie_objects = []
+ for cookie_dict in cookie_dicts:
+ cookie = self.cookie_class.from_dict(cookie_dict)
+ cookie_objects.append(cookie)
+ self.add(*cookie_objects)
+ return self
+
+ @classmethod
+ def from_request(cls, header_data, ignore_bad_cookies=False):
+ "Construct a Cookies object from request header data."
+ cookies = cls()
+ cookies.parse_request(
+ header_data, ignore_bad_cookies=ignore_bad_cookies)
+ return cookies
+
+ @classmethod
+ def from_response(cls, header_data, ignore_bad_cookies=False,
+ ignore_bad_attributes=True):
+ "Construct a Cookies object from response header data."
+ cookies = cls()
+ cookies.parse_response(
+ header_data,
+ ignore_bad_cookies=ignore_bad_cookies,
+ ignore_bad_attributes=ignore_bad_attributes)
+ return cookies
+
+ def render_request(self, sort=True):
+ """Render the dict's Cookie objects into a string formatted for HTTP
+ request headers (simple 'Cookie: ' style).
+ """
+ if not sort:
+ return ("; ".join(
+ cookie.render_request() for cookie in self.values()))
+ return ("; ".join(sorted(
+ cookie.render_request() for cookie in self.values())))
+
+ def render_response(self, sort=True):
+ """Render the dict's Cookie objects into list of strings formatted for
+ HTTP response headers (detailed 'Set-Cookie: ' style).
+ """
+ rendered = [cookie.render_response() for cookie in self.values()]
+ return rendered if not sort else sorted(rendered)
+
+ def __repr__(self):
+ return "Cookies(%s)" % ', '.join("%s=%r" % (name, cookie.value) for
+ (name, cookie) in self.items())
+
+ def __eq__(self, other):
+ """Test if a Cookies object is globally 'equal' to another one by
+ seeing if it looks like a dict such that d[k] == self[k]. This depends
+ on each Cookie object reporting its equality correctly.
+ """
+ if not hasattr(other, "keys"):
+ return False
+ try:
+ keys = sorted(set(self.keys()) | set(other.keys()))
+ for key in keys:
+ if not key in self:
+ return False
+ if not key in other:
+ return False
+ if self[key] != other[key]:
+ return False
+ except (TypeError, KeyError):
+ raise
+ return True
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
diff --git a/third_party/python/cookies/test_cookies.py b/third_party/python/cookies/test_cookies.py
new file mode 100644
index 0000000000..2197916eff
--- /dev/null
+++ b/third_party/python/cookies/test_cookies.py
@@ -0,0 +1,2447 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+"""Tests for code in cookies.py.
+"""
+from __future__ import unicode_literals
+import re
+import sys
+import logging
+if sys.version_info < (3, 0, 0):
+ from urllib import quote, unquote
+else:
+ from urllib.parse import quote, unquote
+ unichr = chr
+ basestring = str
+from datetime import datetime, tzinfo, timedelta
+from pytest import raises
+
+from cookies import (
+ InvalidCookieError, InvalidCookieAttributeError,
+ Definitions,
+ Cookie, Cookies,
+ render_date, parse_date,
+ parse_string, parse_value, parse_domain, parse_path,
+ parse_one_response,
+ encode_cookie_value, encode_extension_av,
+ valid_value, valid_date, valid_domain, valid_path,
+ strip_spaces_and_quotes, _total_seconds,
+ )
+
+
+class RFC1034:
+ """Definitions from RFC 1034: 'DOMAIN NAMES - CONCEPTS AND FACILITIES'
+ section 3.5, as cited in RFC 6265 4.1.1.
+ """
+ digit = "[0-9]"
+ letter = "[A-Za-z]"
+ let_dig = "[0-9A-Za-z]"
+ let_dig_hyp = "[0-9A-Za-z\-]"
+ assert "\\" in let_dig_hyp
+ ldh_str = "%s+" % let_dig_hyp
+ label = "(?:%s|%s|%s)" % (
+ letter,
+ letter + let_dig,
+ letter + ldh_str + let_dig)
+ subdomain = "(?:%s\.)*(?:%s)" % (label, label)
+ domain = "( |%s)" % (subdomain)
+
+ def test_sanity(self):
+ "Basic smoke tests that definitions transcribed OK"
+ match = re.compile("^%s\Z" % self.domain).match
+ assert match("A.ISI.EDU")
+ assert match("XX.LCS.MIT.EDU")
+ assert match("SRI-NIC.ARPA")
+ assert not match("foo+bar")
+ assert match("foo.com")
+ assert match("foo9.com")
+ assert not match("9foo.com")
+ assert not match("26.0.0.73.COM")
+ assert not match(".woo.com")
+ assert not match("blop.foo.")
+ assert match("foo-bar.com")
+ assert not match("-foo.com")
+ assert not match("foo.com-")
+
+
+class RFC1123:
+ """Definitions from RFC 1123: "Requirements for Internet Hosts --
+ Application and Support" section 2.1, cited in RFC 6265 section
+ 4.1.1 as an update to RFC 1034.
+ Here this is really just used for testing Domain attribute values.
+ """
+ # Changed per 2.1 (similar to some changes in RFC 1101)
+ # this implementation is a bit simpler...
+ # n.b.: there are length limits in the real thing
+ label = "{let_dig}(?:(?:{let_dig_hyp}+)?{let_dig})?".format(
+ let_dig=RFC1034.let_dig, let_dig_hyp=RFC1034.let_dig_hyp)
+ subdomain = "(?:%s\.)*(?:%s)" % (label, label)
+ domain = "( |%s)" % (subdomain)
+
+ def test_sanity(self):
+ "Basic smoke tests that definitions transcribed OK"
+ match = re.compile("^%s\Z" % self.domain).match
+ assert match("A.ISI.EDU")
+ assert match("XX.LCS.MIT.EDU")
+ assert match("SRI-NIC.ARPA")
+ assert not match("foo+bar")
+ assert match("foo.com")
+ assert match("9foo.com")
+ assert match("3Com.COM")
+ assert match("3M.COM")
+
+
+class RFC2616:
+ """Definitions from RFC 2616 section 2.2, as cited in RFC 6265 4.1.1
+ """
+ SEPARATORS = '()<>@,;:\\"/[]?={} \t'
+
+
+class RFC5234:
+ """Basic definitions per RFC 5234: 'Augmented BNF for Syntax
+ Specifications'
+ """
+ CHAR = "".join([chr(i) for i in range(0, 127 + 1)])
+ CTL = "".join([chr(i) for i in range(0, 31 + 1)]) + "\x7f"
+ # this isn't in the RFC but it can be handy
+ NONCTL = "".join([chr(i) for i in range(32, 127)])
+ # this is what the RFC says about a token more or less verbatim
+ TOKEN = "".join(sorted(set(NONCTL) - set(RFC2616.SEPARATORS)))
+
+
+class FixedOffsetTz(tzinfo):
+ """A tzinfo subclass for attaching to datetime objects.
+
+ Used for various tests involving date parsing, since Python stdlib does not
+ obviously provide tzinfo subclasses and testing this module only requires
+ a very simple one.
+ """
+ def __init__(self, offset):
+ # tzinfo.utcoffset() throws an error for sub-minute amounts,
+ # so round
+ minutes = round(offset / 60.0, 0)
+ self.__offset = timedelta(minutes=minutes)
+
+ def utcoffset(self, dt):
+ return self.__offset
+
+ def tzname(self, dt):
+ return "FixedOffsetTz" + str(self.__offset.seconds)
+
+ def dst(self, dt):
+ return timedelta(0)
+
+
+class TestInvalidCookieError(object):
+ """Exercise the trivial behavior of the InvalidCookieError exception.
+ """
+ def test_simple(self):
+ "This be the test"
+ def exception(data):
+ "Gather an InvalidCookieError exception"
+ try:
+ raise InvalidCookieError(data)
+ except InvalidCookieError as exception:
+ return exception
+ # other exceptions will pass through
+ return None
+ assert exception("no donut").data == "no donut"
+
+ # Spot check for obvious junk in loggable representations.
+ e = exception("yay\x00whee")
+ assert "\x00" not in repr(e)
+ assert "\x00" not in str(e)
+ assert "yaywhee" not in repr(e)
+ assert "yaywhee" not in str(e)
+ assert "\n" not in repr(exception("foo\nbar"))
+
+
+class TestInvalidCookieAttributeError(object):
+ """Exercise the trivial behavior of InvalidCookieAttributeError.
+ """
+ def exception(self, *args, **kwargs):
+ "Generate an InvalidCookieAttributeError exception naturally"
+ try:
+ raise InvalidCookieAttributeError(*args, **kwargs)
+ except InvalidCookieAttributeError as exception:
+ return exception
+ return None
+
+ def test_simple(self):
+ e = self.exception("foo", "bar")
+ assert e.name == "foo"
+ assert e.value == "bar"
+
+ def test_junk_in_loggables(self):
+ # Spot check for obvious junk in loggable representations.
+ # This isn't completely idle: for example, nulls are ignored in
+ # %-formatted text, and this could be very misleading
+ e = self.exception("ya\x00y", "whee")
+ assert "\x00" not in repr(e)
+ assert "\x00" not in str(e)
+ assert "yay" not in repr(e)
+ assert "yay" not in str(e)
+
+ e = self.exception("whee", "ya\x00y")
+ assert "\x00" not in repr(e)
+ assert "\x00" not in str(e)
+ assert "yay" not in repr(e)
+ assert "yay" not in str(e)
+
+ assert "\n" not in repr(self.exception("yay", "foo\nbar"))
+ assert "\n" not in repr(self.exception("foo\nbar", "yay"))
+
+ def test_no_name(self):
+ # not recommended to do this, but we want to handle it if people do
+ e = self.exception(None, "stuff")
+ assert e.name == None
+ assert e.value == "stuff"
+ assert e.reason == None
+ assert 'stuff' in str(e)
+
+
+class TestDefinitions(object):
+ """Test the patterns in cookies.Definitions against specs.
+ """
+ def test_cookie_name(self, check_unicode=False):
+ """Check COOKIE_NAME against the token definition in RFC 2616 2.2 (as
+ cited in RFC 6265):
+
+ token = 1*<any CHAR except CTLs or separators>
+ separators = "(" | ")" | "<" | ">" | "@"
+ | "," | ";" | ":" | "\" | <">
+ | "/" | "[" | "]" | "?" | "="
+ | "{" | "}" | SP | HT
+
+ (Definitions.COOKIE_NAME is regex-ready while RFC5234.TOKEN is more
+ clearly related to the RFC; they should be functionally the same)
+ """
+ regex = Definitions.COOKIE_NAME_RE
+ assert regex.match(RFC5234.TOKEN)
+ assert not regex.match(RFC5234.NONCTL)
+ for c in RFC5234.CTL:
+ assert not regex.match(c)
+ for c in RFC2616.SEPARATORS:
+ # Skip special case - some number of Java and PHP apps have used
+ # colon in names, while this is dumb we want to not choke on this
+ # by default since it may be the single biggest cause of bugs filed
+ # against Python's cookie libraries
+ if c == ':':
+ continue
+ assert not regex.match(c)
+ # Unicode over 7 bit ASCII shouldn't match, but this takes a while
+ if check_unicode:
+ for i in range(127, 0x10FFFF + 1):
+ assert not regex.match(unichr(i))
+
+ def test_cookie_octet(self):
+ """Check COOKIE_OCTET against the definition in RFC 6265:
+
+ cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E
+ ; US-ASCII characters excluding CTLs,
+ ; whitespace DQUOTE, comma, semicolon,
+ ; and backslash
+ """
+ match = re.compile("^[%s]+\Z" % Definitions.COOKIE_OCTET).match
+ for c in RFC5234.CTL:
+ assert not match(c)
+ assert not match("a%sb" % c)
+ # suspect RFC typoed 'whitespace, DQUOTE' as 'whitespace DQUOTE'
+ assert not match(' ')
+ assert not match('"')
+ assert not match(',')
+ assert not match(';')
+ assert not match('\\')
+ # the spec above DOES include =.-
+ assert match("=")
+ assert match(".")
+ assert match("-")
+
+ # Check that everything else in CHAR works.
+ safe_cookie_octet = "".join(sorted(
+ set(RFC5234.NONCTL) - set(' ",;\\')))
+ assert match(safe_cookie_octet)
+
+ def test_set_cookie_header(self):
+ """Smoke test SET_COOKIE_HEADER (used to compile SET_COOKIE_HEADER_RE)
+ against HEADER_CASES.
+ """
+ # should match if expectation is not an error, shouldn't match if it is
+ # an error. set-cookie-header is for responses not requests, so use
+ # response expectation rather than request expectation
+ match = re.compile(Definitions.SET_COOKIE_HEADER).match
+ for case in HEADER_CASES:
+ arg, kwargs, request_result, expected = case
+ this_match = match(arg)
+ if expected and not isinstance(expected, type):
+ assert this_match, "should match as response: " + repr(arg)
+ else:
+ if not request_result:
+ assert not this_match, \
+ "should not match as response: " + repr(arg)
+
+ def test_cookie_cases(self):
+ """Smoke test COOKIE_HEADER (used to compile COOKIE_HEADER_RE) against
+ HEADER_CASES.
+ """
+ # should match if expectation is not an error, shouldn't match if it is
+ # an error. cookie-header is for requests not responses, so use request
+ # expectation rather than response expectation
+ match = re.compile(Definitions.COOKIE).match
+ for case in HEADER_CASES:
+ arg, kwargs, expected, response_result = case
+ this_match = match(arg)
+ if expected and not isinstance(expected, type):
+ assert this_match, "should match as request: " + repr(arg)
+ else:
+ if not response_result:
+ assert not this_match, \
+ "should not match as request: " + repr(arg)
+
+ def test_cookie_pattern(self):
+ """Smoke test Definitions.COOKIE (used to compile COOKIE_RE) against
+ the grammar for cookie-header as in RFC 6265.
+
+ cookie-header = "Cookie:" OWS cookie-string OWS
+ cookie-string = cookie-pair *( ";" SP cookie-pair )
+ cookie-pair = cookie-name "=" cookie-value
+ cookie-name = token
+ cookie-value = *cookie-octet / ( DQUOTE *cookie-octet DQUOTE )
+
+ cookie-name and cookie-value are not broken apart for separate
+ testing, as the former is essentially just token and the latter
+ essentially just cookie-octet.
+ """
+ match = re.compile(Definitions.COOKIE).match
+ # cookie-pair behavior around =
+ assert match("foo").group('invalid')
+ assert match("foo=bar")
+ # Looks dumb, but this is legal because "=" is valid for cookie-octet.
+ assert match("a=b=c")
+ # DQUOTE *cookie-octet DQUOTE - allowed
+ assert match('foo="bar"')
+
+ # for testing on the contents of cookie name and cookie value,
+ # see test_cookie_name and test_cookie_octet.
+
+ regex = re.compile(Definitions.COOKIE)
+ correct = [
+ ('foo', 'yar', ''),
+ ('bar', 'eeg', ''),
+ ('baz', 'wog', ''),
+ ('frob', 'laz', '')]
+
+ def assert_correct(s):
+ #naive = re.findall(" *([^;]+)=([^;]+) *(?:;|\Z)", s)
+ result = regex.findall(s)
+ assert result == correct
+ # normal-looking case should work normally
+ assert_correct("foo=yar; bar=eeg; baz=wog; frob=laz")
+ # forgive lack of whitespace as long as semicolons are explicit
+ assert_correct("foo=yar;bar=eeg;baz=wog;frob=laz")
+ # forgive too much whitespace AROUND values
+ assert_correct(" foo=yar; bar=eeg; baz=wog; frob=laz ")
+
+ # Actually literal spaces are NOT allowed in cookie values per RFC 6265
+ # and it is UNWISE to put them in without escaping. But we want the
+ # flexibility to let this pass with a warning, because this is the kind
+ # of bad idea which is very common and results in loud complaining on
+ # issue trackers on the grounds that PHP does it or something. So the
+ # regex is weakened, but the presence of a space should still be at
+ # least noted, and an exception must be raised if = is also used
+ # - because that would often indicate the loss of cookies due to
+ # forgotten separator, as in "foo=yar bar=eeg baz=wog frob=laz".
+ assert regex.findall("foo=yar; bar=eeg; baz=wog; frob=l az") == [
+ ('foo', 'yar', ''),
+ ('bar', 'eeg', ''),
+ ('baz', 'wog', ''),
+ # handle invalid internal whitespace.
+ ('frob', 'l az', '')
+ ]
+
+ # Without semicolons or inside semicolon-delimited blocks, the part
+ # before the first = should be interpreted as a name, and the rest as
+ # a value (since = is not forbidden for cookie values). Thus:
+ result = regex.findall("foo=yarbar=eegbaz=wogfrob=laz")
+ assert result[0][0] == 'foo'
+ assert result[0][1] == 'yarbar=eegbaz=wogfrob=laz'
+ assert result[0][2] == ''
+
+ # Make some bad values and see that it's handled reasonably.
+ # (related to http://bugs.python.org/issue2988)
+ # don't test on semicolon because the regexp stops there, reasonably.
+ for c in '\x00",\\':
+ nasty = "foo=yar" + c + "bar"
+ result = regex.findall(nasty + "; baz=bam")
+ # whole bad pair reported in the 'invalid' group (the third one)
+ assert result[0][2] == nasty
+ # kept on truckin' and got the other one just fine.
+ assert result[1] == ('baz', 'bam', '')
+ # same thing if the good one is first and the bad one second
+ result = regex.findall("baz=bam; " + nasty)
+ assert result[0] == ('baz', 'bam', '')
+ assert result[1][2] == ' ' + nasty
+
+ def test_extension_av(self, check_unicode=False):
+ """Test Definitions.EXTENSION_AV against extension-av per RFC 6265.
+
+ extension-av = <any CHAR except CTLs or ";">
+ """
+ # This is how it's defined in RFC 6265, just about verbatim.
+ extension_av_explicit = "".join(sorted(
+ set(RFC5234.CHAR) - set(RFC5234.CTL + ";")))
+ # ... that should turn out to be the same as Definitions.EXTENSION_AV
+ match = re.compile("^([%s]+)\Z" % Definitions.EXTENSION_AV).match
+ # Verify I didn't mess up on escaping here first
+ assert match(r']')
+ assert match(r'[')
+ assert match(r"'")
+ assert match(r'"')
+ assert match("\\")
+ assert match(extension_av_explicit)
+ # There should be some CHAR not matched
+ assert not match(RFC5234.CHAR)
+ # Every single CTL should not match
+ for c in RFC5234.CTL + ";":
+ assert not match(c)
+ # Unicode over 7 bit ASCII shouldn't match, but this takes a while
+ if check_unicode:
+ for i in range(127, 0x10FFFF + 1):
+ assert not match(unichr(i))
+
+ def test_max_age_av(self):
+ "Smoke test Definitions.MAX_AGE_AV"
+ # Not a lot to this, it's just digits
+ match = re.compile("^%s\Z" % Definitions.MAX_AGE_AV).match
+ assert not match("")
+ assert not match("Whiskers")
+ assert not match("Max-Headroom=992")
+ for c in "123456789":
+ assert not match(c)
+ assert match("Max-Age=%s" % c)
+ assert match("Max-Age=0")
+ for c in RFC5234.CHAR:
+ assert not match(c)
+
+ def test_label(self, check_unicode=False):
+ "Test label, as used in Domain attribute"
+ match = re.compile("^(%s)\Z" % Definitions.LABEL).match
+ for i in range(0, 10):
+ assert match(str(i))
+ assert not match(".")
+ assert not match(",")
+ for c in RFC5234.CTL:
+ assert not match("a%sb" % c)
+ assert not match("%sb" % c)
+ assert not match("a%s" % c)
+ # Unicode over 7 bit ASCII shouldn't match, but this takes a while
+ if check_unicode:
+ for i in range(127, 0x10FFFF + 1):
+ assert not match(unichr(i))
+
+ def test_domain_av(self):
+ "Smoke test Definitions.DOMAIN_AV"
+ # This is basically just RFC1123.subdomain, which has its own
+ # assertions in the class definition
+ bad_domains = [
+ ""
+ ]
+ good_domains = [
+ "foobar.com",
+ "foo-bar.com",
+ "3Com.COM"
+ ]
+
+ # First test DOMAIN via DOMAIN_RE
+ match = Definitions.DOMAIN_RE.match
+ for domain in bad_domains:
+ assert not match(domain)
+ for domain in good_domains:
+ assert match(domain)
+
+ # Now same tests through DOMAIN_AV
+ match = re.compile("^%s\Z" % Definitions.DOMAIN_AV).match
+ for domain in bad_domains:
+ assert not match("Domain=%s" % domain)
+ for domain in good_domains:
+ assert not match(domain)
+ assert match("Domain=%s" % domain)
+ # This is NOT valid and shouldn't be tolerated in cookies we create,
+ # but it should be tolerated in existing cookies since people do it;
+ # interpreted by stripping the initial .
+ assert match("Domain=.foo.net")
+
+ def test_path_av(self):
+ "Smoke test PATH and PATH_AV"
+ # This is basically just EXTENSION_AV, see test_extension_av
+ bad_paths = [
+ ""
+ ]
+ good_paths = [
+ "/",
+ "/foo",
+ "/foo/bar"
+ ]
+ match = Definitions.PATH_RE.match
+ for path in bad_paths:
+ assert not match(path)
+ for path in good_paths:
+ assert match(path)
+
+ match = re.compile("^%s\Z" % Definitions.PATH_AV).match
+ for path in bad_paths:
+ assert not match("Path=%s" % path)
+ for path in good_paths:
+ assert not match(path)
+ assert match("Path=%s" % path)
+
+ def test_months(self):
+ """Sanity checks on MONTH_SHORT and MONTH_LONG month name recognizers.
+
+ The RFCs set these in stone, they aren't locale-dependent.
+ """
+ match = re.compile(Definitions.MONTH_SHORT).match
+ assert match("Jan")
+ assert match("Feb")
+ assert match("Mar")
+ assert match("Apr")
+ assert match("May")
+ assert match("Jun")
+ assert match("Jul")
+ assert match("Aug")
+ assert match("Sep")
+ assert match("Oct")
+ assert match("Nov")
+ assert match("Dec")
+
+ match = re.compile(Definitions.MONTH_LONG).match
+ assert match("January")
+ assert match("February")
+ assert match("March")
+ assert match("April")
+ assert match("May")
+ assert match("June")
+ assert match("July")
+ assert match("August")
+ assert match("September")
+ assert match("October")
+ assert match("November")
+ assert match("December")
+
+ def test_weekdays(self):
+ """Sanity check on WEEKDAY_SHORT and WEEKDAY_LONG weekday
+ recognizers.
+
+ The RFCs set these in stone, they aren't locale-dependent.
+ """
+ match = re.compile(Definitions.WEEKDAY_SHORT).match
+ assert match("Mon")
+ assert match("Tue")
+ assert match("Wed")
+ assert match("Thu")
+ assert match("Fri")
+ assert match("Sat")
+ assert match("Sun")
+
+ match = re.compile(Definitions.WEEKDAY_LONG).match
+ assert match("Monday")
+ assert match("Tuesday")
+ assert match("Wednesday")
+ assert match("Thursday")
+ assert match("Friday")
+ assert match("Saturday")
+ assert match("Sunday")
+
+ def test_day_of_month(self):
+ """Check that the DAY_OF_MONTH regex allows all actual days, but
+ excludes obviously wrong ones (so they are tossed in the first pass).
+ """
+ match = re.compile(Definitions.DAY_OF_MONTH).match
+ for day in ['01', '02', '03', '04', '05', '06', '07', '08', '09', ' 1',
+ ' 2', ' 3', ' 4', ' 5', ' 6', ' 7', ' 8', ' 9', '1', '2', '3',
+ '4', '5', '6', '7', '8', '9'] \
+ + [str(i) for i in range(10, 32)]:
+ assert match(day)
+ assert not match("0")
+ assert not match("00")
+ assert not match("000")
+ assert not match("111")
+ assert not match("99")
+ assert not match("41")
+
+ def test_expires_av(self):
+ "Smoke test the EXPIRES_AV regex pattern"
+ # Definitions.EXPIRES_AV is actually pretty bad because it's a disaster
+ # to test three different date formats with lots of definition
+ # dependencies, and odds are good that other implementations are loose.
+ # so this parser is also loose. "liberal in what you accept,
+ # conservative in what you produce"
+ match = re.compile("^%s\Z" % Definitions.EXPIRES_AV).match
+ assert not match("")
+ assert not match("Expires=")
+
+ assert match("Expires=Tue, 15-Jan-2013 21:47:38 GMT")
+ assert match("Expires=Sun, 06 Nov 1994 08:49:37 GMT")
+ assert match("Expires=Sunday, 06-Nov-94 08:49:37 GMT")
+ assert match("Expires=Sun Nov 6 08:49:37 1994")
+ # attributed to Netscape in RFC 2109 10.1.2
+ assert match("Expires=Mon, 13-Jun-93 10:00:00 GMT")
+
+ assert not match("Expires=S9n, 06 Nov 1994 08:49:37 GMT")
+ assert not match("Expires=Sun3ay, 06-Nov-94 08:49:37 GMT")
+ assert not match("Expires=S9n Nov 6 08:49:37 1994")
+
+ assert not match("Expires=Sun, A6 Nov 1994 08:49:37 GMT")
+ assert not match("Expires=Sunday, 0B-Nov-94 08:49:37 GMT")
+ assert not match("Expires=Sun No8 6 08:49:37 1994")
+
+ assert not match("Expires=Sun, 06 N3v 1994 08:49:37 GMT")
+ assert not match("Expires=Sunday, 06-N8v-94 08:49:37 GMT")
+ assert not match("Expires=Sun Nov A 08:49:37 1994")
+
+ assert not match("Expires=Sun, 06 Nov 1B94 08:49:37 GMT")
+ assert not match("Expires=Sunday, 06-Nov-C4 08:49:37 GMT")
+ assert not match("Expires=Sun Nov 6 08:49:37 1Z94")
+
+ def test_no_obvious_need_for_disjunctive_attr_pattern(self):
+ """Smoke test the assumption that extension-av is a reasonable set of
+ chars for all attrs (and thus that there is no reason to use a fancy
+ disjunctive pattern in the findall that splits out the attrs, freeing
+ us to use EXTENSION_AV instead).
+
+ If this works, then ATTR should work
+ """
+ match = re.compile("^[%s]+\Z" % Definitions.EXTENSION_AV).match
+ assert match("Expires=Sun, 06 Nov 1994 08:49:37 GMT")
+ assert match("Expires=Sunday, 06-Nov-94 08:49:37 GMT")
+ assert match("Expires=Sun Nov 6 08:49:37 1994")
+ assert match("Max-Age=14658240962")
+ assert match("Domain=FoO.b9ar.baz")
+ assert match("Path=/flakes")
+ assert match("Secure")
+ assert match("HttpOnly")
+
+ def test_attr(self):
+ """Smoke test ATTR, used to compile ATTR_RE.
+ """
+ match = re.compile(Definitions.ATTR).match
+
+ def recognized(pattern):
+ "macro for seeing if ATTR recognized something"
+ this_match = match(pattern)
+ if not this_match:
+ return False
+ groupdict = this_match.groupdict()
+ if groupdict['unrecognized']:
+ return False
+ return True
+
+ # Quickly test that a batch of attributes matching the explicitly
+ # recognized patterns make it through without anything in the
+ # 'unrecognized' catchall capture group.
+ for pattern in [
+ "Secure",
+ "HttpOnly",
+ "Max-Age=9523052",
+ "Domain=frobble.com",
+ "Domain=3Com.COM",
+ "Path=/",
+ "Expires=Wed, 09 Jun 2021 10:18:14 GMT",
+ ]:
+ assert recognized(pattern)
+
+ # Anything else is in extension-av and that's very broad;
+ # see test_extension_av for that test.
+ # This is only about the recognized ones.
+ assert not recognized("Frob=mugmannary")
+ assert not recognized("Fqjewp@1j5j510923")
+ assert not recognized(";aqjwe")
+ assert not recognized("ETJpqw;fjw")
+ assert not recognized("fjq;")
+ assert not recognized("Expires=\x00")
+
+ # Verify interface from regexp for extracting values isn't changed;
+ # a little rigidity here is a good idea
+ expires = "Wed, 09 Jun 2021 10:18:14 GMT"
+ m = match("Expires=%s" % expires)
+ assert m.group("expires") == expires
+
+ max_age = "233951698"
+ m = match("Max-Age=%s" % max_age)
+ assert m.group("max_age") == max_age
+
+ domain = "flarp"
+ m = match("Domain=%s" % domain)
+ assert m.group("domain") == domain
+
+ path = "2903"
+ m = match("Path=%s" % path)
+ assert m.group("path") == path
+
+ m = match("Secure")
+ assert m.group("secure")
+ assert not m.group("httponly")
+
+ m = match("HttpOnly")
+ assert not m.group("secure")
+ assert m.group("httponly")
+
+ def test_date_accepts_formats(self):
+ """Check that DATE matches most formats used in Expires: headers,
+ and explain what the different formats are about.
+
+ The value extraction of this regexp is more comprehensively exercised
+ by test_date_parsing().
+ """
+ # Date formats vary widely in the wild. Even the standards vary widely.
+ # This series of tests does spot-checks with instances of formats that
+ # it makes sense to support. In the following comments, each format is
+ # discussed and the rationale for the overall regexp is developed.
+
+ match = re.compile(Definitions.DATE).match
+
+ # The most common formats, related to the old Netscape cookie spec
+ # (NCSP), are supposed to follow this template:
+ #
+ # Wdy, DD-Mon-YYYY HH:MM:SS GMT
+ #
+ # (where 'Wdy' is a short weekday, and 'Mon' is a named month).
+ assert match("Mon, 20-Jan-1994 00:00:00 GMT")
+
+ # Similarly, RFC 850 proposes this format:
+ #
+ # Weekday, DD-Mon-YY HH:MM:SS GMT
+ #
+ # (with a long-form weekday and a 2-digit year).
+ assert match("Tuesday, 12-Feb-92 23:25:42 GMT")
+
+ # RFC 1036 obsoleted the RFC 850 format:
+ #
+ # Wdy, DD Mon YY HH:MM:SS GMT
+ #
+ # (shortening the weekday format and changing dashes to spaces).
+ assert match("Wed, 30 Mar 92 13:16:12 GMT")
+
+ # RFC 6265 cites a definition from RFC 2616, which uses the RFC 1123
+ # definition but limits it to GMT (consonant with NCSP). RFC 1123
+ # expanded RFC 822 with 2-4 digit years (more permissive than NCSP);
+ # RFC 822 left weekday and seconds as optional, and a day of 1-2 digits
+ # (all more permissive than NCSP). Giving something like this:
+ #
+ # [Wdy, ][D]D Mon [YY]YY HH:MM[:SS] GMT
+ #
+ assert match("Thu, 3 Apr 91 12:46 GMT")
+ # No weekday, two digit year.
+ assert match("13 Apr 91 12:46 GMT")
+
+ # Similarly, there is RFC 2822:
+ #
+ # [Wdy, ][D]D Mon YYYY HH:MM[:SS] GMT
+ # (which only differs in requiring a 4-digit year, where RFC 1123
+ # permits 2 or 3 digit years).
+ assert match("13 Apr 1991 12:46 GMT")
+ assert match("Wed, 13 Apr 1991 12:46 GMT")
+
+ # The generalized format given above encompasses RFC 1036 and RFC 2822
+ # and would encompass NCSP except for the dashes; allowing long-form
+ # weekdays also encompasses the format proposed in RFC 850. Taken
+ # together, this should cover something like 99% of Expires values
+ # (see, e.g., https://bugzilla.mozilla.org/show_bug.cgi?id=610218)
+
+ # Finally, we also want to support asctime format, as mentioned in RFC
+ # 850 and RFC 2616 and occasionally seen in the wild:
+ # Wdy Mon DD HH:MM:SS YYYY
+ # e.g.: Sun Nov 6 08:49:37 1994
+ assert match("Sun Nov 6 08:49:37 1994")
+ assert match("Sun Nov 26 08:49:37 1994")
+ # Reportedly someone has tacked 'GMT' on to the end of an asctime -
+ # although this is not RFC valid, it is pretty harmless
+ assert match("Sun Nov 26 08:49:37 1994 GMT")
+
+ # This test is not passed until it is shown that it wasn't trivially
+ # because DATE was matching .* or similar. This isn't intended to be
+ # a thorough test, just rule out the obvious reason. See test_date()
+ # for a more thorough workout of the whole parse and render mechanisms
+ assert not match("")
+ assert not match(" ")
+ assert not match("wobbly")
+ assert not match("Mon")
+ assert not match("Mon, 20")
+ assert not match("Mon, 20 Jan")
+ assert not match("Mon, 20,Jan,1994 00:00:00 GMT")
+ assert not match("Tuesday, 12-Feb-992 23:25:42 GMT")
+ assert not match("Wed, 30 Mar 92 13:16:1210 GMT")
+ assert not match("Wed, 30 Mar 92 13:16:12:10 GMT")
+ assert not match("Thu, 3 Apr 91 12:461 GMT")
+
+ def test_eol(self):
+ """Test that the simple EOL regex works basically as expected.
+ """
+ split = Definitions.EOL.split
+ assert split("foo\nbar") == ["foo", "bar"]
+ assert split("foo\r\nbar") == ["foo", "bar"]
+ letters = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
+ assert split("\n".join(letters)) == letters
+ assert split("\r\n".join(letters)) == letters
+
+ def test_compiled(self):
+ """Check that certain patterns are present as compiled regexps
+ """
+ re_type = type(re.compile(''))
+
+ def present(name):
+ "Macro for testing existence of an re in Definitions"
+ item = getattr(Definitions, name)
+ return item and isinstance(item, re_type)
+
+ assert present("COOKIE_NAME_RE")
+ assert present("COOKIE_RE")
+ assert present("SET_COOKIE_HEADER_RE")
+ assert present("ATTR_RE")
+ assert present("DATE_RE")
+ assert present("EOL")
+
+
+def _test_init(cls, args, kwargs, expected):
+ "Core instance test function for test_init"
+ print("test_init", cls, args, kwargs)
+ try:
+ instance = cls(*args, **kwargs)
+ except Exception as exception:
+ if type(exception) == expected:
+ return
+ logging.error("expected %s, got %s", expected, repr(exception))
+ raise
+ if isinstance(expected, type) and issubclass(expected, Exception):
+ raise AssertionError("No exception raised; "
+ "expected %s for %s/%s" % (
+ expected.__name__,
+ repr(args),
+ repr(kwargs)))
+ for attr_name, attr_value in expected.items():
+ assert getattr(instance, attr_name) == attr_value
+
+
+class TestCookie(object):
+ """Tests for the Cookie class.
+ """
+ # Test cases exercising different constructor calls to make a new Cookie
+ # from scratch. Each case is tuple:
+ # args, kwargs, exception or dict of expected attribute values
+ # this exercises the default validators as well.
+ creation_cases = [
+ # bad call gives TypeError
+ (("foo",), {}, TypeError),
+ (("a", "b", "c"), {}, TypeError),
+ # give un-ascii-able name - raises error due to likely
+ # compatibility problems (cookie ignored, etc.)
+ # in value it's fine, it'll be encoded and not inspected anyway.
+ (("ăŊĻ", "b"), {}, InvalidCookieError),
+ (("b", "ăŊĻ"), {}, {'name': 'b', 'value': "ăŊĻ"}),
+ # normal simple construction gives name and value
+ (("foo", "bar"), {}, {'name': 'foo', 'value': 'bar'}),
+ # add a valid attribute and get it set
+ (("baz", "bam"), {'max_age': 9},
+ {'name': 'baz', 'value': 'bam', 'max_age': 9}),
+ # multiple valid attributes
+ (("x", "y"), {'max_age': 9, 'comment': 'fruity'},
+ {'name': 'x', 'value': 'y',
+ 'max_age': 9, 'comment': 'fruity'}),
+ # invalid max-age
+ (("w", "m"), {'max_age': 'loopy'}, InvalidCookieAttributeError),
+ (("w", "m"), {'max_age': -1}, InvalidCookieAttributeError),
+ (("w", "m"), {'max_age': 1.2}, InvalidCookieAttributeError),
+ # invalid expires
+ (("w", "m"), {'expires': 0}, InvalidCookieAttributeError),
+ (("w", "m"), {'expires':
+ datetime(2010, 1, 1, tzinfo=FixedOffsetTz(600))},
+ InvalidCookieAttributeError),
+ # control: valid expires
+ (("w", "m"),
+ {'expires': datetime(2010, 1, 1)},
+ {'expires': datetime(2010, 1, 1)}),
+ # invalid domain
+ (("w", "m"), {'domain': ''}, InvalidCookieAttributeError),
+ (("w", "m"), {'domain': '@'}, InvalidCookieAttributeError),
+ (("w", "m"), {'domain': '.foo.net'}, {'domain': '.foo.net'}),
+ # control: valid domain
+ (("w", "m"),
+ {'domain': 'foo.net'},
+ {'domain': 'foo.net'},),
+ # invalid path
+ (("w", "m"), {'path': ''}, InvalidCookieAttributeError),
+ (("w", "m"), {'path': '""'}, InvalidCookieAttributeError),
+ (("w", "m"), {'path': 'foo'}, InvalidCookieAttributeError),
+ (("w", "m"), {'path': '"/foo"'}, InvalidCookieAttributeError),
+ (("w", "m"), {'path': ' /foo '}, InvalidCookieAttributeError),
+ # control: valid path
+ (("w", "m"), {'path': '/'},
+ {'path': '/'}),
+ (("w", "m"), {'path': '/axes'},
+ {'path': '/axes'}),
+ # invalid version per RFC 2109/RFC 2965
+ (("w", "m"), {'version': ''}, InvalidCookieAttributeError),
+ (("w", "m"), {'version': 'baa'}, InvalidCookieAttributeError),
+ (("w", "m"), {'version': -2}, InvalidCookieAttributeError),
+ (("w", "m"), {'version': 2.3}, InvalidCookieAttributeError),
+ # control: valid version
+ (("w", "m"), {'version': 0}, {'version': 0}),
+ (("w", "m"), {'version': 1}, {'version': 1}),
+ (("w", "m"), {'version': 3042}, {'version': 3042}),
+ # invalid secure, httponly
+ (("w", "m"), {'secure': ''}, InvalidCookieAttributeError),
+ (("w", "m"), {'secure': 0}, InvalidCookieAttributeError),
+ (("w", "m"), {'secure': 1}, InvalidCookieAttributeError),
+ (("w", "m"), {'secure': 'a'}, InvalidCookieAttributeError),
+ (("w", "m"), {'httponly': ''}, InvalidCookieAttributeError),
+ (("w", "m"), {'httponly': 0}, InvalidCookieAttributeError),
+ (("w", "m"), {'httponly': 1}, InvalidCookieAttributeError),
+ (("w", "m"), {'httponly': 'a'}, InvalidCookieAttributeError),
+ # valid comment
+ (("w", "m"), {'comment': 'a'}, {'comment': 'a'}),
+ # invalid names
+ # (unicode cases are done last because they mess with pytest print)
+ ((None, "m"), {}, InvalidCookieError),
+ (("", "m"), {}, InvalidCookieError),
+ (("ü", "m"), {}, InvalidCookieError),
+ # invalid values
+ (("w", None), {}, {'name': 'w'}),
+ # a control - unicode is valid value, just gets encoded on way out
+ (("w", "üm"), {}, {'value': "üm"}),
+ # comma
+ (('a', ','), {}, {'value': ','}),
+ # semicolons
+ (('a', ';'), {}, {'value': ';'}),
+ # spaces
+ (('a', ' '), {}, {'value': ' '}),
+ ]
+
+ def test_init(self):
+ """Exercise __init__ and validators.
+
+ This is important both because it is a user-facing API, and also
+ because the parse/render tests depend heavily on it.
+ """
+ creation_cases = self.creation_cases + [
+ (("a", "b"), {'frob': 10}, InvalidCookieAttributeError)
+ ]
+ counter = 0
+ for args, kwargs, expected in creation_cases:
+ counter += 1
+ logging.error("counter %d, %s, %s, %s", counter, args, kwargs,
+ expected)
+ _test_init(Cookie, args, kwargs, expected)
+
+ def test_set_attributes(self):
+ """Exercise setting, validation and getting of attributes without
+ much involving __init__. Also sets value and name.
+ """
+ for args, kwargs, expected in self.creation_cases:
+ if not kwargs:
+ continue
+ try:
+ cookie = Cookie("yarp", "flam")
+ for attr, value in kwargs.items():
+ setattr(cookie, attr, value)
+ if args:
+ cookie.name = args[0]
+ cookie.value = args[1]
+ except Exception as e:
+ if type(e) == expected:
+ continue
+ raise
+ if isinstance(expected, type) and issubclass(expected, Exception):
+ raise AssertionError("No exception raised; "
+ "expected %s for %s" % (
+ expected.__name__,
+ repr(kwargs)))
+ for attr_name, attr_value in expected.items():
+ assert getattr(cookie, attr_name) == attr_value
+
+ def test_get_defaults(self):
+ "Test that defaults are right for cookie attrs"
+ cookie = Cookie("foo", "bar")
+ for attr in (
+ "expires",
+ "max_age",
+ "domain",
+ "path",
+ "comment",
+ "version",
+ "secure",
+ "httponly"):
+ assert hasattr(cookie, attr)
+ assert getattr(cookie, attr) == None
+ # Verify that not every name is getting something
+ for attr in ("foo", "bar", "baz"):
+ assert not hasattr(cookie, attr)
+ with raises(AttributeError):
+ getattr(cookie, attr)
+
+ names_values = [
+ ("a", "b"),
+ ("foo", "bar"),
+ ("baz", "1234567890"),
+ ("!!#po99!", "blah"),
+ ("^_~`*", "foo"),
+ ("%s+|-.&$", "snah"),
+ ("lub", "!@#$%^&*()[]{}|/:'<>~.?`"),
+ ("woah", "====+-_"),
+ ]
+
+ def test_render_response(self):
+ "Test rendering Cookie object for Set-Cookie: header"
+ for name, value in self.names_values:
+ cookie = Cookie(name, value)
+ expected = "{name}={value}".format(
+ name=name, value=value)
+ assert cookie.render_response() == expected
+ for data, result in [
+ ({'name': 'a', 'value': 'b'}, "a=b"),
+ ({'name': 'foo', 'value': 'bar'}, "foo=bar"),
+ ({'name': 'baz', 'value': 'bam'}, "baz=bam"),
+ ({'name': 'baz', 'value': 'bam', 'max_age': 2},
+ "baz=bam; Max-Age=2"),
+ ({'name': 'baz', 'value': 'bam',
+ 'max_age': 2, 'comment': 'foobarbaz'},
+ "baz=bam; Max-Age=2; Comment=foobarbaz"),
+ ({'name': 'baz', 'value': 'bam',
+ 'max_age': 2,
+ 'expires': datetime(1970, 1, 1),
+ },
+ "baz=bam; Max-Age=2; "
+ "Expires=Thu, 01 Jan 1970 00:00:00 GMT"),
+ ({'name': 'baz', 'value': 'bam', 'path': '/yams',
+ 'domain': '3Com.COM'},
+ "baz=bam; Domain=3Com.COM; Path=/yams"),
+ ({'name': 'baz', 'value': 'bam', 'path': '/', 'secure': True,
+ 'httponly': True},
+ "baz=bam; Path=/; Secure; HttpOnly"),
+ ({'name': 'baz', 'value': 'bam', 'domain': '.domain'},
+ 'baz=bam; Domain=domain'),
+ ]:
+ cookie = Cookie(**data)
+ actual = sorted(cookie.render_response().split("; "))
+ ideal = sorted(result.split("; "))
+ assert actual == ideal
+
+ def test_render_encode(self):
+ """Test encoding of a few special characters.
+
+ as in http://bugs.python.org/issue9824
+ """
+ cases = {
+ ("x", "foo,bar;baz"): 'x=foo%2Cbar%3Bbaz',
+ ("y", 'yap"bip'): 'y=yap%22bip',
+ }
+ for args, ideal in cases.items():
+ cookie = Cookie(*args)
+ assert cookie.render_response() == ideal
+ assert cookie.render_request() == ideal
+
+ def test_legacy_quotes(self):
+ """Check that cookies which delimit values with quotes are understood
+ but that this non-6265 behavior is not repeated in the output
+ """
+ cookie = Cookie.from_string(
+ 'Set-Cookie: y="foo"; version="1"; Path="/foo"')
+ assert cookie.name == 'y'
+ assert cookie.value == 'foo'
+ assert cookie.version == 1
+ assert cookie.path == "/foo"
+ pieces = cookie.render_response().split("; ")
+ assert pieces[0] == 'y=foo'
+ assert set(pieces[1:]) == set([
+ 'Path=/foo', 'Version=1'
+ ])
+
+ def test_render_response_expires(self):
+ "Simple spot check of cookie expires rendering"
+ a = Cookie('a', 'blah')
+ a.expires = parse_date("Wed, 23-Jan-1992 00:01:02 GMT")
+ assert a.render_response() == \
+ 'a=blah; Expires=Thu, 23 Jan 1992 00:01:02 GMT'
+
+ b = Cookie('b', 'blr')
+ b.expires = parse_date("Sun Nov 6 08:49:37 1994")
+ assert b.render_response() == \
+ 'b=blr; Expires=Sun, 06 Nov 1994 08:49:37 GMT'
+
+ def test_eq(self):
+ "Smoke test equality/inequality with Cookie objects"
+ ref = Cookie('a', 'b')
+ # trivial cases
+ assert ref == ref
+ assert not (ref != ref)
+ assert None != ref
+ assert not (None == ref)
+ assert ref != None
+ assert not (ref == None)
+ # equivalence and nonequivalence
+ assert Cookie('a', 'b') is not ref
+ assert Cookie('a', 'b') == ref
+ assert Cookie('x', 'y') != ref
+ assert Cookie('a', 'y') != ref
+ assert Cookie('a', 'b', path='/') != ref
+ assert {'c': 'd'} != ref
+ assert ref != {'c': 'd'}
+ # unlike attribute values and sets of attributes
+ assert Cookie('a', 'b', path='/a') \
+ != Cookie('a', 'b', path='/')
+ assert Cookie('x', 'y', max_age=3) != \
+ Cookie('x', 'y', path='/b')
+ assert Cookie('yargo', 'z', max_age=5) != \
+ Cookie('yargo', 'z', max_age=6)
+ assert ref != Cookie('a', 'b', domain='yab')
+ # Exercise bytes conversion
+ assert Cookie(b'a', 'b') == Cookie('a', 'b')
+ assert Cookie(b'a', 'b') == Cookie(b'a', 'b')
+
+ def test_manifest(self):
+ "Test presence of important stuff on Cookie class"
+ for name in ("attribute_names", "attribute_renderers",
+ "attribute_parsers", "attribute_validators"):
+ dictionary = getattr(Cookie, name)
+ assert dictionary
+ assert isinstance(dictionary, dict)
+
+ def test_simple_extension(self):
+ "Trivial example/smoke test of extending Cookie"
+
+ count_state = [0]
+
+ def call_counter(item=None):
+ count_state[0] += 1
+ return True if item else False
+
+ class Cookie2(Cookie):
+ "Example Cookie subclass with new behavior"
+ attribute_names = {
+ 'foo': 'Foo',
+ 'bar': 'Bar',
+ 'baz': 'Baz',
+ 'ram': 'Ram',
+ }
+ attribute_parsers = {
+ 'foo': lambda s: "/".join(s),
+ 'bar': call_counter,
+ 'value': lambda s:
+ parse_value(s, allow_spaces=True),
+ }
+ attribute_validators = {
+ 'foo': lambda item: True,
+ 'bar': call_counter,
+ 'baz': lambda item: False,
+ }
+ attribute_renderers = {
+ 'foo': lambda s: "|".join(s) if s else None,
+ 'bar': call_counter,
+ 'name': lambda item: item,
+ }
+ cookie = Cookie2("a", "b")
+ for key in Cookie2.attribute_names:
+ assert hasattr(cookie, key)
+ assert getattr(cookie, key) == None
+ cookie.foo = "abc"
+ assert cookie.render_request() == "a=b"
+ assert cookie.render_response() == "a=b; Foo=a|b|c"
+ cookie.foo = None
+ # Setting it to None makes it drop from the listing
+ assert cookie.render_response() == "a=b"
+
+ cookie.bar = "what"
+ assert cookie.bar == "what"
+ assert cookie.render_request() == "a=b"
+ # bar's renderer returns a bool; if it's True we get Bar.
+ # that's a special case for flags like HttpOnly.
+ assert cookie.render_response() == "a=b; Bar"
+
+ with raises(InvalidCookieAttributeError):
+ cookie.baz = "anything"
+
+ Cookie2('a', 'b fog')
+ Cookie2('a', ' b=fo g')
+
+ def test_from_string(self):
+ with raises(InvalidCookieError):
+ Cookie.from_string("")
+ with raises(InvalidCookieError):
+ Cookie.from_string("", ignore_bad_attributes=True)
+ assert Cookie.from_string("", ignore_bad_cookies=True) == None
+
+ def test_from_dict(self):
+ assert Cookie.from_dict({'name': 'a', 'value': 'b'}) == \
+ Cookie('a', 'b')
+ assert Cookie.from_dict(
+ {'name': 'a', 'value': 'b', 'duh': 'no'},
+ ignore_bad_attributes=True) == \
+ Cookie('a', 'b')
+ with raises(InvalidCookieError):
+ Cookie.from_dict({}, ignore_bad_attributes=True)
+ with raises(InvalidCookieError):
+ Cookie.from_dict({}, ignore_bad_attributes=False)
+ with raises(InvalidCookieError):
+ Cookie.from_dict({'name': ''}, ignore_bad_attributes=False)
+ with raises(InvalidCookieError):
+ Cookie.from_dict({'name': None, 'value': 'b'},
+ ignore_bad_attributes=False)
+ assert Cookie.from_dict({'name': 'foo'}) == Cookie('foo', None)
+ assert Cookie.from_dict({'name': 'foo', 'value': ''}) == \
+ Cookie('foo', None)
+ with raises(InvalidCookieAttributeError):
+ assert Cookie.from_dict(
+ {'name': 'a', 'value': 'b', 'duh': 'no'},
+ ignore_bad_attributes=False)
+ assert Cookie.from_dict({'name': 'a', 'value': 'b', 'expires': 2},
+ ignore_bad_attributes=True) == Cookie('a', 'b')
+ with raises(InvalidCookieAttributeError):
+ assert Cookie.from_dict({'name': 'a', 'value': 'b', 'expires': 2},
+ ignore_bad_attributes=False)
+
+
+class Scone(object):
+ """Non-useful alternative to Cookie class for tests only.
+ """
+ def __init__(self, name, value):
+ self.name = name
+ self.value = value
+
+ @classmethod
+ def from_dict(cls, cookie_dict):
+ instance = cls(cookie_dict['name'], cookie_dict['value'])
+ return instance
+
+ def __eq__(self, other):
+ if type(self) != type(other):
+ return False
+ if self.name != other.name:
+ return False
+ if self.value != other.value:
+ return False
+ return True
+
+
+class Scones(Cookies):
+ """Non-useful alternative to Cookies class for tests only.
+ """
+ DEFAULT_COOKIE_CLASS = Scone
+
+
+class TestCookies(object):
+ """Tests for the Cookies class.
+ """
+ creation_cases = [
+ # Only args - simple
+ ((Cookie("a", "b"),), {}, 1),
+ # Only kwargs - simple
+ (tuple(), {'a': 'b'}, 1),
+ # Only kwargs - bigger
+ (tuple(),
+ {'axl': 'bosk',
+ 'x': 'y',
+ 'foo': 'bar',
+ 'baz': 'bam'}, 4),
+ # Sum between args/kwargs
+ ((Cookie('a', 'b'),),
+ {'axl': 'bosk',
+ 'x': 'y',
+ 'foo': 'bar',
+ 'baz': 'bam'}, 5),
+ # Redundant between args/kwargs
+ ((Cookie('a', 'b'),
+ Cookie('x', 'y')),
+ {'axl': 'bosk',
+ 'x': 'y',
+ 'foo': 'bar',
+ 'baz': 'bam'}, 5),
+ ]
+
+ def test_init(self):
+ """Create some Cookies objects with __init__, varying the constructor
+ arguments, and check on the results.
+
+ Exercises __init__, __repr__, render_request, render_response, and
+ simple cases of parse_response and parse_request.
+ """
+ def same(a, b):
+ keys = sorted(set(a.keys() + b.keys()))
+ for key in keys:
+ assert a[key] == b[key]
+
+ for args, kwargs, length in self.creation_cases:
+ # Make a Cookies object using the args.
+ cookies = Cookies(*args, **kwargs)
+ assert len(cookies) == length
+
+ # Render into various text formats.
+ rep = repr(cookies)
+ res = cookies.render_response()
+ req = cookies.render_request()
+
+ # Very basic sanity check on renders, fail fast and in a simple way
+ # if output is truly terrible
+ assert rep.count('=') == length
+ assert len(res) == length
+ assert [item.count('=') == 1 for item in res]
+ assert req.count('=') == length
+ assert len(req.split(";")) == length
+
+ # Explicitly parse out the data (this can be simple since the
+ # output should be in a highly consistent format)
+ pairs = [item.split("=") for item in req.split("; ")]
+ assert len(pairs) == length
+ for name, value in pairs:
+ cookie = cookies[name]
+ assert cookie.name == name
+ assert cookie.value == value
+
+ # Parse the rendered output, check that result is equal to the
+ # originally produced object.
+
+ parsed = Cookies()
+ parsed.parse_request(req)
+ assert parsed == cookies
+
+ parsed = Cookies()
+ for item in res:
+ parsed.parse_response(item)
+ assert parsed == cookies
+
+ # Check that all the requested cookies were created correctly:
+ # indexed with correct names in dict, also with correctly set name
+ # and value attributes.
+ for cookie in args:
+ assert cookies[cookie.name] == cookie
+ for name, value in kwargs.items():
+ cookie = cookies[name]
+ assert cookie.name == name
+ assert cookie.value == value
+ assert name in rep
+ assert value in rep
+
+ # Spot check that setting an attribute still works
+ # with these particular parameters. Not a torture test.
+ for key in cookies:
+ cookies[key].max_age = 42
+ for line in cookies.render_response():
+ assert line.endswith("Max-Age=42")
+
+ # Spot check attribute deletion
+ assert cookies[key].max_age
+ del cookies[key].max_age
+ assert cookies[key].max_age is None
+
+ # Spot check cookie deletion
+ keys = [key for key in cookies.keys()]
+ for key in keys:
+ del cookies[key]
+ assert key not in cookies
+
+ def test_eq(self):
+ "Smoke test equality/inequality of Cookies objects"
+ ref = Cookies(a='b')
+ assert Cookies(a='b') == ref
+ assert Cookies(b='c') != ref
+ assert ref != Cookies(d='e')
+ assert Cookies(a='x') != ref
+
+ class Dummy(object):
+ "Just any old object"
+ pass
+ x = Dummy()
+ x.keys = True
+ with raises(TypeError):
+ assert ref != x
+
+ def test_add(self):
+ "Test the Cookies.add method"
+ for args, kwargs, length in self.creation_cases:
+ cookies = Cookies()
+ cookies.add(*args, **kwargs)
+ assert len(cookies) == length
+ for cookie in args:
+ assert cookies[cookie.name] == cookie
+ for name, value in kwargs.items():
+ cookie = cookies[name]
+ assert cookie.value == value
+ count = len(cookies)
+ assert 'w' not in cookies
+ cookies.add(w='m')
+ assert 'w' in cookies
+ assert count == len(cookies) - 1
+ assert cookies['w'].value == 'm'
+
+ def test_empty(self):
+ "Trivial test of behavior of empty Cookies object"
+ cookies = Cookies()
+ assert len(cookies) == 0
+ assert Cookies() == cookies
+
+ def test_parse_request(self):
+ """Test Cookies.parse_request.
+ """
+ def run(arg, **kwargs):
+ "run Cookies.parse_request on an instance"
+ cookies = Cookies()
+ result = runner(cookies.parse_request)(arg, **kwargs)
+ return result
+
+ for i, case in enumerate(HEADER_CASES):
+ arg, kwargs, expected, response_result = case
+
+ # parse_request doesn't take ignore_bad_attributes. remove it
+ # without changing original kwargs for further tests
+ kwargs = kwargs.copy()
+ if 'ignore_bad_attributes' in kwargs:
+ del kwargs['ignore_bad_attributes']
+
+ def expect(arg, kwargs):
+ "repeated complex assertion"
+ result = run(arg, **kwargs)
+ assert result == expected \
+ or isinstance(expected, type) \
+ and type(result) == expected, \
+ "unexpected result for (%s): %s. should be %s" \
+ % (repr(arg), repr(result), repr(expected))
+
+ # Check result - should be same with and without the prefix
+ expect("Cookie: " + arg, kwargs)
+ expect(arg, kwargs)
+
+ # But it should not match with the response prefix.
+ other_result = run("Set-Cookie: " + arg, **kwargs)
+ assert other_result != expected
+ assert other_result != response_result
+
+ # If case expects InvalidCookieError, verify that it is suppressed
+ # by ignore_bad_cookies.
+ if expected == InvalidCookieError:
+ kwargs2 = kwargs.copy()
+ kwargs2['ignore_bad_cookies'] = True
+ cookies = Cookies()
+ # Let natural exception raise, easier to figure out
+ cookies.parse_request(arg, **kwargs2)
+
+ # Spot check that exception is raised for clearly wrong format
+ assert not isinstance(run("Cookie: a=b"), InvalidCookieError)
+ assert isinstance(run("Set-Cookie: a=b"), InvalidCookieError)
+
+ def test_parse_response(self):
+ """Test Cookies.parse_response.
+ """
+ def run(arg, **kwargs):
+ "run parse_response method of a Cookies instance"
+ cookies = Cookies()
+ return runner(cookies.parse_response)(arg, **kwargs)
+
+ for case in HEADER_CASES:
+ arg, kwargs, request_result, expected = case
+ # If we expect InvalidCookieError or InvalidCookieAttributeError,
+ # telling the function to ignore those should result in no
+ # exception.
+ kwargs2 = kwargs.copy()
+ if expected == InvalidCookieError:
+ kwargs2['ignore_bad_cookies'] = True
+ assert not isinstance(
+ run(arg, **kwargs2),
+ Exception)
+ elif expected == InvalidCookieAttributeError:
+ kwargs2['ignore_bad_attributes'] = True
+ result = run(arg, **kwargs2)
+ if isinstance(result, InvalidCookieAttributeError):
+ raise AssertionError("InvalidCookieAttributeError "
+ "should have been silenced/logged")
+ else:
+ assert not isinstance(result, Exception)
+ # Check result - should be same with and without the prefix
+ sys.stdout.flush()
+ result = run(arg, **kwargs)
+ assert result == expected \
+ or isinstance(expected, type) \
+ and type(result) == expected, \
+ "unexpected result for (%s): %s. should be %s" \
+ % (repr(arg), repr(result), repr(expected))
+ result = run("Set-Cookie: " + arg, **kwargs)
+ assert result == expected \
+ or isinstance(expected, type) \
+ and type(result) == expected, \
+ "unexpected result for (%s): %s. should be %s" \
+ % (repr("Set-Cookie: " + arg),
+ repr(result), repr(expected))
+ # But it should not match with the request prefix.
+ other_result = run("Cookie: " + arg, **kwargs)
+ assert other_result != expected
+ assert other_result != request_result
+
+ assert not isinstance(run("Set-Cookie: a=b"), InvalidCookieError)
+ assert isinstance(run("Cookie: a=b"), InvalidCookieError)
+
+ def test_exercise_parse_one_response_asctime(self):
+ asctime = 'Sun Nov 6 08:49:37 1994'
+ line = "Set-Cookie: a=b; Expires=%s" % asctime
+ response_dict = parse_one_response(line)
+ assert response_dict == \
+ {'expires': 'Sun Nov 6 08:49:37 1994', 'name': 'a', 'value': 'b'}
+ assert Cookie.from_dict(response_dict) == \
+ Cookie('a', 'b', expires=parse_date(asctime))
+
+ def test_get_all(self):
+ cookies = Cookies.from_request('a=b; a=c; b=x')
+ assert cookies['a'].value == 'b'
+ assert cookies['b'].value == 'x'
+ values = [cookie.value for cookie in cookies.get_all('a')]
+ assert values == ['b', 'c']
+
+ def test_custom_cookie_class_on_instance(self):
+ cookies = Cookies(_cookie_class=Scone)
+ cookies.add(a="b")
+ assert cookies['a'] == Scone("a", "b")
+
+ def test_custom_cookie_class_on_subclass(self):
+ cookies = Scones()
+ cookies.add(a="b")
+ assert cookies['a'] == Scone("a", "b")
+
+ def test_custom_cookie_class_on_instance_parse_request(self):
+ cookies = Scones()
+ cookies.parse_request("Cookie: c=d")
+ assert cookies['c'] == Scone("c", "d")
+
+ def test_custom_cookie_class_on_instance_parse_response(self):
+ cookies = Scones()
+ cookies.parse_response("Set-Cookie: c=d")
+ assert cookies['c'] == Scone("c", "d")
+
+
+def test_parse_date():
+ """Throw a ton of dirty samples at the date parse/render and verify the
+ exact output of rendering the parsed version of the sample.
+ """
+ cases = [
+ # Obviously off format
+ ("", None),
+ (" ", None),
+ ("\t", None),
+ ("\n", None),
+ ("\x02\x03\x04", None),
+ ("froppity", None),
+ ("@@@@@%@#:%", None),
+ ("foo bar baz", None),
+ # We'll do a number of overall manglings.
+ # First, show that the baseline passes
+ ("Sat, 10 Oct 2009 13:47:21 GMT", "Sat, 10 Oct 2009 13:47:21 GMT"),
+ # Delete semantically important pieces
+ (" Oct 2009 13:47:21 GMT", None),
+ ("Fri, Oct 2009 13:47:21 GMT", None),
+ ("Fri, 10 2009 13:47:21 GMT", None),
+ ("Sat, 10 Oct 2009 :47:21 GMT", None),
+ ("Sat, 10 Oct 2009 13::21 GMT", None),
+ ("Sat, 10 Oct 2009 13:47: GMT", None),
+ # Replace single characters out of tokens with spaces - harder to
+ # do programmatically because some whitespace can reasonably be
+ # tolerated.
+ ("F i, 10 Oct 2009 13:47:21 GMT", None),
+ ("Fr , 10 Oct 2009 13:47:21 GMT", None),
+ ("Fri, 10 ct 2009 13:47:21 GMT", None),
+ ("Fri, 10 O t 2009 13:47:21 GMT", None),
+ ("Fri, 10 Oc 2009 13:47:21 GMT", None),
+ ("Sat, 10 Oct 009 13:47:21 GMT", None),
+ ("Sat, 10 Oct 2 09 13:47:21 GMT", None),
+ ("Sat, 10 Oct 20 9 13:47:21 GMT", None),
+ ("Sat, 10 Oct 200 13:47:21 GMT", None),
+ ("Sat, 10 Oct 2009 1 :47:21 GMT", None),
+ ("Sat, 10 Oct 2009 13 47:21 GMT", None),
+ ("Sat, 10 Oct 2009 13: 7:21 GMT", None),
+ ("Sat, 10 Oct 2009 13:4 :21 GMT", None),
+ ("Sat, 10 Oct 2009 13:47 21 GMT", None),
+ ("Sat, 10 Oct 2009 13:47: 1 GMT", None),
+ ("Sat, 10 Oct 2009 13:47:2 GMT", None),
+ ("Sat, 10 Oct 2009 13:47:21 MT", None),
+ ("Sat, 10 Oct 2009 13:47:21 G T", None),
+ ("Sat, 10 Oct 2009 13:47:21 GM ", None),
+ # Replace numeric elements with stuff that contains A-Z
+ ("Fri, Burp Oct 2009 13:47:21 GMT", None),
+ ("Fri, 10 Tabalqplar 2009 13:47:21 GMT", None),
+ ("Sat, 10 Oct Fruit 13:47:21 GMT", None),
+ ("Sat, 10 Oct 2009 13:47:21 Fruits", None),
+ # Weekday
+ (", Dec 31 00:00:00 2003", None),
+ ("T, Dec 31 00:00:00 2003", None),
+ ("Tu, Dec 31 00:00:00 2003", None),
+ ("Hi, Dec 31 00:00:00 2003", None),
+ ("Heretounforeseen, Dec 31 00:00:00 2003", None),
+ ("Wednesday2, Dec 31 00:00:00 2003", None),
+ ("Mon\x00frobs, Dec 31 00:00:00 2003", None),
+ ("Mon\x10day, Dec 31 00:00:00 2003", None),
+ # Day of month
+ ("Fri, Oct 2009 13:47:21 GMT", None),
+ ("Fri, 110 Oct 2009 13:47:21 GMT", None),
+ ("Fri, 0 Oct 2009 13:47:21 GMT", None),
+ ("Fri, 00 Oct 2009 13:47:21 GMT", None),
+ ("Fri, 0 Oct 2009 13:47:21 GMT", None),
+ ("Fri, 0 Oct 2009 13:47:21 GMT", None),
+ ("Fri, 00 Oct 2009 13:47:21 GMT", None),
+ ("Fri, 33 Oct 2009 13:47:21 GMT", None),
+ ("Fri, 40 Oct 2009 13:47:21 GMT", None),
+ ("Fri, A2 Oct 2009 13:47:21 GMT", None),
+ ("Fri, 2\x00 Oct 2009 13:47:21 GMT", None),
+ ("Fri, \t3 Oct 2009 13:47:21 GMT", None),
+ ("Fri, 3\t Oct 2009 13:47:21 GMT", None),
+ # Month
+ ("Fri, 10 2009 13:47:21 GMT", None),
+ ("Fri, 10 O 2009 13:47:21 GMT", None),
+ ("Fri, 10 Oc 2009 13:47:21 GMT", None),
+ ("Sat, 10 Octuarial 2009 13:47:21 GMT", None),
+ ("Sat, 10 Octuary 2009 13:47:21 GMT", None),
+ ("Sat, 10 Octubre 2009 13:47:21 GMT", None),
+ # Year
+ ("Sat, 10 Oct 009 13:47:21 GMT", None),
+ ("Sat, 10 Oct 200 13:47:21 GMT", None),
+ ("Sat, 10 Oct 209 13:47:21 GMT", None),
+ ("Sat, 10 Oct 20 9 13:47:21 GMT", None),
+ # Hour
+ ("Sat, 10 Oct 2009 25:47:21 GMT", None),
+ ("Sat, 10 Oct 2009 1@:47:21 GMT", None),
+ # Minute
+ ("Sat, 10 Oct 2009 13:71:21 GMT", None),
+ ("Sat, 10 Oct 2009 13:61:21 GMT", None),
+ ("Sat, 10 Oct 2009 13:60:21 GMT", None),
+ ("Sat, 10 Oct 2009 24:01:00 GMT", None),
+ # Second
+ ("Sat, 10 Oct 2009 13:47 GMT", "Sat, 10 Oct 2009 13:47:00 GMT"),
+ ("Sat, 10 Oct 2009 13:47:00 GMT", "Sat, 10 Oct 2009 13:47:00 GMT"),
+ ("Sat, 10 Oct 2009 24:00:01 GMT", None),
+ # Some reasonable cases (ignore weekday)
+ ("Mon Dec 24 16:32:39 1977 GMT", "Sat, 24 Dec 1977 16:32:39 GMT"),
+ ("Sat, 7 Dec 1991 13:56:05 GMT", "Sat, 07 Dec 1991 13:56:05 GMT"),
+ ("Saturday, 8-Mar-2012 21:35:09 GMT", "Thu, 08 Mar 2012 21:35:09 GMT"),
+ ("Sun, 1-Feb-1998 00:00:00 GMT", "Sun, 01 Feb 1998 00:00:00 GMT"),
+ ("Thursday, 01-Jan-1983 01:01:01 GMT",
+ "Sat, 01 Jan 1983 01:01:01 GMT"),
+ ("Tue, 15-Nov-1973 22:23:24 GMT", "Thu, 15 Nov 1973 22:23:24 GMT"),
+ ("Wed, 09 Dec 1999 23:59:59 GMT", "Thu, 09 Dec 1999 23:59:59 GMT"),
+ ("Mon, 12-May-05 20:25:03 GMT", "Thu, 12 May 2005 20:25:03 GMT"),
+ ("Thursday, 01-Jan-12 09:00:00 GMT", "Sun, 01 Jan 2012 09:00:00 GMT"),
+ # starts like asctime, but flips the time and year - nonsense
+ ("Wed Mar 12 2007 08:25:07 GMT", None),
+ # starts like RFC 1123, but flips the time and year - nonsense
+ ("Thu, 31 Dec 23:55:55 2107 GMT", None),
+ ('Fri, 21-May-2004 10:40:51 GMT', "Fri, 21 May 2004 10:40:51 GMT"),
+ # extra 2-digit year exercises
+ ("Sat, 10 Oct 11 13:47:21 GMT", "Mon, 10 Oct 2011 13:47:21 GMT"),
+ ("Sat, 10 Oct 09 13:47:22 GMT", "Sat, 10 Oct 2009 13:47:22 GMT"),
+ ("Sat, 10 Oct 93 13:47:23 GMT", "Sun, 10 Oct 1993 13:47:23 GMT"),
+ ("Sat, 10 Oct 85 13:47:24 GMT", "Thu, 10 Oct 1985 13:47:24 GMT"),
+ ("Sat, 10 Oct 70 13:47:25 GMT", "Sat, 10 Oct 1970 13:47:25 GMT"),
+ ("Sat, 10 Oct 69 13:47:26 GMT", "Thu, 10 Oct 2069 13:47:26 GMT"),
+ # dealing with 3-digit year is incredibly tedious, will do as needed
+ ("Sat, 10 Oct 969 13:47:26 GMT", None),
+ ("Sat, 10 Oct 9 13:47:26 GMT", None),
+ ("Fri, 10 Oct 19691 13:47:26 GMT", None),
+ ]
+
+ def change(string, position, new_value):
+ "Macro to change a string"
+ return string[:position] + new_value + string[position + 1:]
+
+ original = "Sat, 10 Oct 2009 13:47:21 GMT"
+
+ # Stuff garbage in every position - none of these characters should
+ # ever be allowed in a date string.
+ # not included because pytest chokes: "¿�␦"
+ bad_chars = "/<>()\\*$#&=;\x00\b\f\n\r\"\'`?"
+ for pos in range(0, len(original)):
+ for bad_char in bad_chars:
+ cases.append((change(original, pos, bad_char), None))
+
+ # Invalidate each letter
+ letter_positions = [i for (i, c) in enumerate(original) \
+ if re.match("[A-Za-z]", c)]
+ for pos in letter_positions:
+ cases.append((change(original, pos, 'q'), None))
+ cases.append((change(original, pos, '0'), None))
+ cases.append((change(original, pos, '-'), None))
+ cases.append((change(original, pos, ''), None))
+ # But do tolerate case changes.
+ c = original[pos]
+ if c.isupper():
+ c = c.lower()
+ else:
+ c = c.upper()
+ cases.append((change(original, pos, c), original))
+
+ # Invalidate each digit
+ digit_positions = [i for (i, c) in enumerate(original) \
+ if c in "0123456789"]
+ for pos in digit_positions:
+ c = original[pos]
+ cases.append((change(original, pos, 'q'), None))
+ cases.append((change(original, pos, '-' + c), None))
+ cases.append((change(original, pos, '+' + c), None))
+
+ # Invalidate each space
+ space_positions = [i for (i, c) in enumerate(original) \
+ if c in " \t\n\r"]
+ for pos in space_positions:
+ cases.append((change(original, pos, 'x'), None))
+ cases.append((change(original, pos, '\t'), None))
+ cases.append((change(original, pos, ' '), None))
+ cases.append((change(original, pos, ''), None))
+
+ # Invalidate each colon
+ colon_positions = [i for (i, c) in enumerate(original) \
+ if c == ":"]
+ for pos in colon_positions:
+ cases.append((change(original, pos, 'z'), None))
+ cases.append((change(original, pos, '0'), None))
+ cases.append((change(original, pos, ' '), None))
+ cases.append((change(original, pos, ''), None))
+
+ for data, ideal in cases:
+ actual = render_date(parse_date(data))
+ assert actual == ideal
+
+
+def runner(function):
+ """Generate a function which collects the result/exception from another
+ function, for easier assertions.
+ """
+ def run(*args, **kwargs):
+ "Function which collects result/exception"
+ actual_result, actual_exception = None, None
+ try:
+ actual_result = function(*args, **kwargs)
+ except Exception as exception:
+ actual_exception = exception
+ return actual_exception or actual_result
+ return run
+
+
+# Define cases for testing parsing and rendering.
+# Format: input, kwargs, expected parse_request result, expected parse_response
+# result.
+
+HEADER_CASES = [
+ # cases with nothing that can be parsed out result in
+ # InvalidCookieError. unless ignore_bad_cookies=True, then they give an
+ # empty Cookies().
+ ("", {},
+ InvalidCookieError,
+ InvalidCookieError),
+ ('a', {},
+ InvalidCookieError,
+ InvalidCookieError),
+ (" ", {},
+ InvalidCookieError,
+ InvalidCookieError),
+ (";;;;;", {},
+ InvalidCookieError,
+ InvalidCookieError),
+ ("qwejrkqlwjere", {},
+ InvalidCookieError,
+ InvalidCookieError),
+ # vacuous headers should give invalid
+ ('Cookie: ', {},
+ InvalidCookieError,
+ InvalidCookieError),
+ ('Set-Cookie: ', {},
+ InvalidCookieError,
+ InvalidCookieError),
+ # Single pair should work the same as request or response
+ ("foo=bar", {},
+ Cookies(foo='bar'),
+ Cookies(foo='bar')),
+ ("SID=242d96421d4e", {},
+ Cookies(SID='242d96421d4e'),
+ Cookies(SID='242d96421d4e')),
+ # Two pairs on SAME line should work with request, fail with response.
+ # if ignore_bad_attributes, response should not raise.
+ # and ignore_bad_attributes behavior should be default
+ ("a=b; c=dx", {'ignore_bad_attributes': True},
+ Cookies(a='b', c='dx'),
+ Cookies(a='b')),
+ ("a=b; c=d", {'ignore_bad_attributes': False},
+ Cookies(a='b', c='d'),
+ InvalidCookieAttributeError),
+ ('g=h;j=k', {},
+ Cookies(g='h', j='k'),
+ Cookies(g='h')),
+ # tolerance: response shouldn't barf on unrecognized attr by default,
+ # but request should recognize as malformed
+ ('a=b; brains', {},
+ InvalidCookieError,
+ Cookies(a='b')),
+ # tolerance: should strip quotes and spaces
+ ('A="BBB"', {},
+ Cookies(A='BBB'),
+ Cookies(A='BBB'),
+ ),
+ ('A= "BBB" ', {},
+ Cookies(A='BBB'),
+ Cookies(A='BBB'),
+ ),
+ # tolerance: should ignore dumb trailing ;
+ ('foo=bar;', {},
+ Cookies(foo='bar'),
+ Cookies(foo='bar'),
+ ),
+ ('A="BBB";', {},
+ Cookies(A='BBB'),
+ Cookies(A='BBB'),
+ ),
+ ('A= "BBB" ;', {},
+ Cookies(A='BBB'),
+ Cookies(A='BBB'),
+ ),
+ # empty value
+ ("lang=; Expires=Sun, 06 Nov 1994 08:49:37 GMT", {},
+ InvalidCookieError,
+ Cookies(
+ Cookie('lang', '',
+ expires=parse_date(
+ "Sun, 06 Nov 1994 08:49:37 GMT")))),
+ # normal examples of varying complexity
+ ("frob=varvels; Expires=Wed, 09 Jun 2021 10:18:14 GMT", {},
+ InvalidCookieError,
+ Cookies(
+ Cookie('frob', 'varvels',
+ expires=parse_date(
+ "Wed, 09 Jun 2021 10:18:14 GMT"
+ )))),
+ ("lang=en-US; Expires=Wed, 03 Jun 2019 10:18:14 GMT", {},
+ InvalidCookieError,
+ Cookies(
+ Cookie('lang', 'en-US',
+ expires=parse_date(
+ "Wed, 03 Jun 2019 10:18:14 GMT"
+ )))),
+ # easily interpretable as multiple request cookies!
+ ("CID=39b4d9be4d42; Path=/; Domain=example.com", {},
+ Cookies(CID="39b4d9be4d42", Path='/', Domain='example.com'),
+ Cookies(Cookie('CID', '39b4d9be4d42', path='/',
+ domain='example.com'))),
+ ("lang=en-US; Path=/; Domain=example.com", {},
+ Cookies(lang='en-US', Path='/', Domain='example.com'),
+ Cookies(Cookie('lang', 'en-US',
+ path='/', domain='example.com'))),
+ ("foo=bar; path=/; expires=Mon, 04-Dec-2001 12:43:00 GMT", {},
+ InvalidCookieError,
+ Cookies(
+ Cookie('foo', 'bar', path='/',
+ expires=parse_date("Mon, 04-Dec-2001 12:43:00 GMT")
+ ))),
+ ("SID=0fae49; Path=/; Secure; HttpOnly", {},
+ InvalidCookieError,
+ Cookies(Cookie('SID', '0fae49',
+ path='/', secure=True, httponly=True))),
+ ('TMID=DQAAXKEaeo_aYp; Domain=mail.nauk.com; '
+ 'Path=/accounts; Expires=Wed, 13-Jan-2021 22:23:01 GMT; '
+ 'Secure; HttpOnly', {},
+ InvalidCookieError,
+ Cookies(
+ Cookie('TMID', 'DQAAXKEaeo_aYp',
+ domain='mail.nauk.com',
+ path='/accounts', secure=True, httponly=True,
+ expires=parse_date("Wed, 13-Jan-2021 22:23:01 GMT")
+ ))),
+ ("test=some_value; expires=Sat, 01-Jan-2000 00:00:00 GMT; "
+ "path=/;", {},
+ InvalidCookieError,
+ Cookies(
+ Cookie('test', 'some_value', path='/',
+ expires=parse_date('Sat, 01 Jan 2000 00:00:00 GMT')
+ ))),
+ # From RFC 2109 - accept the lots-of-dquotes style but don't produce.
+ ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"; '
+ 'Part_Number="Rocket_Launcher_0001"', {},
+ Cookies(Customer='WILE_E_COYOTE', Version='1', Path='/acme',
+ Part_Number='Rocket_Launcher_0001'),
+ Cookies(Cookie('Customer', 'WILE_E_COYOTE',
+ version=1, path='/acme'))),
+ # However, we don't honor RFC 2109 type meta-attributes
+ ('Cookie: $Version="1"; Customer="WILE_E_COYOTE"; $Path="/acme"', {},
+ InvalidCookieError,
+ InvalidCookieError),
+ # degenerate Domain=. is common, so should be handled though invalid
+ ("lu=Qg3OHJZLehYLjVgAqiZbZbzo; Expires=Tue, 15-Jan-2013 "
+ "21:47:38 GMT; Path=/; Domain=.foo.com; HttpOnly", {},
+ InvalidCookieError,
+ Cookies(Cookie('lu', "Qg3OHJZLehYLjVgAqiZbZbzo",
+ expires=parse_date('Tue, 15 Jan 2013 21:47:38 GMT'),
+ path='/', domain='.foo.com', httponly=True,
+ ))),
+ ('ZQID=AYBEVnDKrdst; Domain=.nauk.com; Path=/; '
+ 'Expires=Wed, 13-Jan-2021 22:23:01 GMT; HttpOnly', {},
+ InvalidCookieError,
+ Cookies(Cookie('ZQID', "AYBEVnDKrdst",
+ httponly=True, domain='.nauk.com', path='/',
+ expires=parse_date('Wed, 13 Jan 2021 22:23:01 GMT'),
+ ))),
+ ("OMID=Ap4PQQEq; Domain=.nauk.com; Path=/; "
+ 'Expires=Wed, 13-Jan-2021 22:23:01 GMT; Secure; HttpOnly', {},
+ InvalidCookieError,
+ Cookies(Cookie('OMID', "Ap4PQQEq",
+ path='/', domain='.nauk.com', secure=True, httponly=True,
+ expires=parse_date('Wed, 13 Jan 2021 22:23:01 GMT')
+ ))),
+ # question mark in value
+ ('foo="?foo"; Path=/', {},
+ Cookies(foo='?foo', Path='/'),
+ Cookies(Cookie('foo', '?foo', path='/'))),
+ # unusual format for secure/httponly
+ ("a=b; Secure=true; HttpOnly=true;", {},
+ Cookies(a='b', Secure='true', HttpOnly='true'),
+ Cookies(Cookie('a', 'b', secure=True, httponly=True))),
+ # invalid per RFC to have spaces in value, but here they are
+ # URL-encoded by default. Extend the mechanism if this is no good
+ ('user=RJMmei IORqmD; expires=Wed, 3 Nov 2007 23:20:39 GMT; path=/',
+ {},
+ InvalidCookieError,
+ Cookies(
+ Cookie('user', 'RJMmei IORqmD', path='/',
+ expires=parse_date("Wed, 3 Nov 2007 23:20:39 GMT")))),
+ # Most characters from 32 to \x31 + 1 should be allowed in values -
+ # not including space/32, dquote/34, comma/44.
+ ("x=!#$%&'()*+-./01", {},
+ Cookies(x="!#$%&'()*+-./01"),
+ Cookies(x="!#$%&'()*+-./01")),
+ # don't crash when value wrapped with quotes
+ # http://bugs.python.org/issue3924
+ ('a=b; version="1"', {},
+ Cookies(a='b', version='1'),
+ Cookies(Cookie('a', 'b', version=1))),
+ # cookie with name 'expires'. inadvisable, but valid.
+ # http://bugs.python.org/issue1117339
+ ('expires=foo', {},
+ Cookies(expires='foo'),
+ Cookies(expires='foo')),
+ # http://bugs.python.org/issue8826
+ # quick date parsing spot-check, see test_parse_date for a real workout
+ ('foo=bar; expires=Fri, 31-Dec-2010 23:59:59 GMT', {},
+ InvalidCookieError,
+ Cookies(
+ Cookie('foo', 'bar',
+ expires=datetime(2010, 12, 31, 23, 59, 59)))),
+ # allow VALID equals sign in values - not even an issue in RFC 6265 or
+ # this module, but very helpful for base64 and always worth checking.
+ # http://bugs.python.org/issue403473
+ ('a=Zm9vIGJhcg==', {},
+ Cookies(a='Zm9vIGJhcg=='),
+ Cookies(a='Zm9vIGJhcg==')),
+ ('blah="Foo=2"', {},
+ Cookies(blah='Foo=2'),
+ Cookies(blah='Foo=2')),
+ # take the first cookie in request parsing.
+ # (response parse ignores the second one as a bad attribute)
+ # http://bugs.python.org/issue1375011
+ # http://bugs.python.org/issue1372650
+ # http://bugs.python.org/issue7504
+ ('foo=33;foo=34', {},
+ Cookies(foo='33'),
+ Cookies(foo='33')),
+ # Colons in names (invalid!), as used by some dumb old Java/PHP code
+ # http://bugs.python.org/issue2988
+ # http://bugs.python.org/issue472646
+ # http://bugs.python.org/issue2193
+ ('a:b=c', {},
+ Cookies(
+ Cookie('a:b', 'c')),
+ Cookies(
+ Cookie('a:b', 'c'))),
+# # http://bugs.python.org/issue991266
+# # This module doesn't do the backslash quoting so this would
+# # effectively require allowing all possible characters inside arbitrary
+# # attributes, which does not seem reasonable.
+# ('foo=bar; Comment="\342\230\243"', {},
+# Cookies(foo='bar', Comment='\342\230\243'),
+# Cookies(
+# Cookie('foo', 'bar', comment='\342\230\243')
+# )),
+ ]
+
+
+def _cheap_request_parse(arg1, arg2):
+ """Really cheap parse like what client code often does, for
+ testing request rendering (determining order-insensitively whether two
+ cookies-as-text are equivalent). 'a=b; x=y' type format
+ """
+ def crumble(arg):
+ "Break down string into pieces"
+ pieces = [piece.strip('\r\n ;') for piece in re.split("(\r\n|;)", arg)]
+ pieces = [piece for piece in pieces if piece and '=' in piece]
+ pieces = [tuple(piece.split("=", 1)) for piece in pieces]
+ pieces = [(name.strip(), value.strip('" ')) for name, value in pieces]
+ # Keep the first one in front (can use set down the line);
+ # the rest are sorted
+ if len(pieces) > 1:
+ pieces = [pieces[0]] + sorted(pieces[1:])
+ return pieces
+
+ def dedupe(pieces):
+ "Eliminate duplicate pieces"
+ deduped = {}
+ for name, value in pieces:
+ if name in deduped:
+ continue
+ deduped[name] = value
+ return sorted(deduped.items(),
+ key=pieces.index)
+
+ return dedupe(crumble(arg1)), crumble(arg2)
+
+
+def _cheap_response_parse(arg1, arg2):
+ """Silly parser for 'name=value; attr=attrvalue' format,
+ to test out response renders
+ """
+ def crumble(arg):
+ "Break down string into pieces"
+ lines = [line for line in arg if line]
+ done = []
+ for line in lines:
+ clauses = [clause for clause in line.split(';')]
+ import logging
+ logging.error("clauses %r", clauses)
+ name, value = re.split(" *= *", clauses[0], 1)
+ value = unquote(value.strip(' "'))
+ attrs = [re.split(" *= *", clause, 1) \
+ for clause in clauses[1:] if clause]
+ attrs = [attr for attr in attrs \
+ if attr[0] in Cookie.attribute_names]
+ attrs = [(k, v.strip(' "')) for k, v in attrs]
+ done.append((name, value, tuple(attrs)))
+ return done
+ result1 = crumble([arg1])
+ result2 = crumble(arg2)
+ return result1, result2
+
+
+def test_render_request():
+ """Test the request renderer against HEADER_CASES.
+ Perhaps a wider range of values is tested in TestCookies.test_init.
+ """
+ for case in HEADER_CASES:
+ arg, kwargs, cookies, _ = case
+ # can't reproduce examples which are supposed to throw parse errors
+ if isinstance(cookies, type) and issubclass(cookies, Exception):
+ continue
+ rendered = cookies.render_request()
+ expected, actual = _cheap_request_parse(arg, rendered)
+ # we can only use set() here because requests aren't order sensitive.
+ assert set(actual) == set(expected)
+
+
+def test_render_response():
+ """Test the response renderer against HEADER_CASES.
+ Perhaps a wider range of values is tested in TestCookies.test_init.
+ """
+ def filter_attrs(items):
+ "Filter out the items which are Cookie attributes"
+ return [(name, value) for (name, value) in items \
+ if name.lower() in Cookie.attribute_names]
+
+ for case in HEADER_CASES:
+ arg, kwargs, _, cookies = case
+ # can't reproduce examples which are supposed to throw parse errors
+ if isinstance(cookies, type) and issubclass(cookies, Exception):
+ continue
+ rendered = cookies.render_response()
+ expected, actual = _cheap_response_parse(arg, rendered)
+ expected, actual = set(expected), set(actual)
+ assert actual == expected, \
+ "failed: %s -> %s | %s != %s" % (arg, repr(cookies), actual,
+ expected)
+
+
+def test_backslash_roundtrip():
+ """Check that backslash in input or value stays backslash internally but
+ goes out as %5C, and comes back in again as a backslash.
+ """
+ reference = Cookie('xx', '\\')
+ assert len(reference.value) == 1
+ reference_request = reference.render_request()
+ reference_response = reference.render_response()
+ assert '\\' not in reference_request
+ assert '\\' not in reference_response
+ assert '%5C' in reference_request
+ assert '%5C' in reference_response
+
+ # Parse from multiple entry points
+ raw_cookie = r'xx="\"'
+ parsed_cookies = [Cookie.from_string(raw_cookie),
+ Cookies.from_request(raw_cookie)['xx'],
+ Cookies.from_response(raw_cookie)['xx']]
+ for parsed_cookie in parsed_cookies:
+ assert parsed_cookie.name == reference.name
+ assert parsed_cookie.value == reference.value
+ # Renders should match exactly
+ request = parsed_cookie.render_request()
+ response = parsed_cookie.render_response()
+ assert request == reference_request
+ assert response == reference_response
+ # Reparses should too
+ rrequest = Cookies.from_request(request)['xx']
+ rresponse = Cookies.from_response(response)['xx']
+ assert rrequest.name == reference.name
+ assert rrequest.value == reference.value
+ assert rresponse.name == reference.name
+ assert rresponse.value == reference.value
+
+
+def _simple_test(function, case_dict):
+ "Macro for making simple case-based tests for a function call"
+ def actual_test():
+ "Test generated by _simple_test"
+ for arg, expected in case_dict.items():
+ logging.info("case for %s: %s %s",
+ repr(function), repr(arg), repr(expected))
+ result = function(arg)
+ assert result == expected, \
+ "%s(%s) != %s, rather %s" % (
+ function.__name__,
+ repr(arg),
+ repr(expected),
+ repr(result))
+ actual_test.cases = case_dict
+ return actual_test
+
+test_strip_spaces_and_quotes = _simple_test(strip_spaces_and_quotes, {
+ ' ': '',
+ '""': '',
+ '"': '"',
+ "''": "''",
+ ' foo ': 'foo',
+ 'foo ': 'foo',
+ ' foo': 'foo',
+ ' "" ': '',
+ ' " " ': ' ',
+ ' " ': '"',
+ 'foo bar': 'foo bar',
+ '"foo bar': '"foo bar',
+ 'foo bar"': 'foo bar"',
+ '"foo bar"': 'foo bar',
+ '"dquoted"': 'dquoted',
+ ' "dquoted"': 'dquoted',
+ '"dquoted" ': 'dquoted',
+ ' "dquoted" ': 'dquoted',
+ })
+
+test_parse_string = _simple_test(parse_string, {
+ None: None,
+ '': '',
+ b'': '',
+ })
+
+test_parse_domain = _simple_test(parse_domain, {
+ ' foo ': 'foo',
+ '"foo"': 'foo',
+ ' "foo" ': 'foo',
+ '.foo': '.foo',
+ })
+
+test_parse_path = _simple_test(parse_path, {
+ })
+
+
+def test_render_date():
+ "Test date render routine directly with raw datetime objects"
+ # Date rendering is also exercised pretty well in test_parse_date.
+
+ cases = {
+ # Error for anything which is not known UTC/GMT
+ datetime(2001, 10, 11, tzinfo=FixedOffsetTz(60 * 60)):
+ AssertionError,
+ # A couple of baseline tests
+ datetime(1970, 1, 1, 0, 0, 0):
+ 'Thu, 01 Jan 1970 00:00:00 GMT',
+ datetime(2007, 9, 2, 13, 59, 49):
+ 'Sun, 02 Sep 2007 13:59:49 GMT',
+ # Don't produce 1-digit hour
+ datetime(2007, 9, 2, 1, 59, 49):
+ "Sun, 02 Sep 2007 01:59:49 GMT",
+ # Don't produce 1-digit minute
+ datetime(2007, 9, 2, 1, 1, 49):
+ "Sun, 02 Sep 2007 01:01:49 GMT",
+ # Don't produce 1-digit second
+ datetime(2007, 9, 2, 1, 1, 2):
+ "Sun, 02 Sep 2007 01:01:02 GMT",
+ # Allow crazy past/future years for cookie delete/persist
+ datetime(1900, 9, 2, 1, 1, 2):
+ "Sun, 02 Sep 1900 01:01:02 GMT",
+ datetime(3000, 9, 2, 1, 1, 2):
+ "Tue, 02 Sep 3000 01:01:02 GMT"
+ }
+
+ for dt, expected in cases.items():
+ if isinstance(expected, type) and issubclass(expected, Exception):
+ try:
+ render_date(dt)
+ except expected:
+ continue
+ except Exception as exception:
+ raise AssertionError("expected %s, got %s"
+ % (expected, exception))
+ raise AssertionError("expected %s, got no exception"
+ % (expected))
+ else:
+ assert render_date(dt) == expected
+
+
+def test_encoding_assumptions(check_unicode=False):
+ "Document and test assumptions underlying URL encoding scheme"
+ # Use the RFC 6265 based character class to build a regexp matcher that
+ # will tell us whether or not a character is okay to put in cookie values.
+ cookie_value_re = re.compile("[%s]" % Definitions.COOKIE_OCTET)
+ # Figure out which characters are okay. (unichr doesn't exist in Python 3,
+ # in Python 2 it shouldn't be an issue)
+ cookie_value_safe1 = set(chr(i) for i in range(0, 256) \
+ if cookie_value_re.match(chr(i)))
+ cookie_value_safe2 = set(unichr(i) for i in range(0, 256) \
+ if cookie_value_re.match(unichr(i)))
+ # These two are NOT the same on Python3
+ assert cookie_value_safe1 == cookie_value_safe2
+ # Now which of these are quoted by urllib.quote?
+ # caveat: Python 2.6 crashes if chr(127) is passed to quote and safe="",
+ # so explicitly set it to b"" to avoid the issue
+ safe_but_quoted = set(c for c in cookie_value_safe1
+ if quote(c, safe=b"") != c)
+ # Produce a set of characters to give to urllib.quote for the safe parm.
+ dont_quote = "".join(sorted(safe_but_quoted))
+ # Make sure it works (and that it works because of what we passed)
+ for c in dont_quote:
+ assert quote(c, safe="") != c
+ assert quote(c, safe=dont_quote) == c
+
+ # Make sure that the result of using dont_quote as the safe characters for
+ # urllib.quote produces stuff which is safe as a cookie value, but not
+ # different unless it has to be.
+ for i in range(0, 255):
+ original = chr(i)
+ quoted = quote(original, safe=dont_quote)
+ # If it is a valid value for a cookie, that quoting should leave it
+ # alone.
+ if cookie_value_re.match(original):
+ assert original == quoted
+ # If it isn't a valid value, then the quoted value should be valid.
+ else:
+ assert cookie_value_re.match(quoted)
+
+ assert set(dont_quote) == set("!#$%&'()*+/:<=>?@[]^`{|}~")
+
+ # From 128 on urllib.quote will not work on a unichr() return value.
+ # We'll want to encode utf-8 values into ASCII, then do the quoting.
+ # Verify that this is reversible.
+ if check_unicode:
+ for c in (unichr(i) for i in range(0, 1114112)):
+ asc = c.encode('utf-8')
+ quoted = quote(asc, safe=dont_quote)
+ unquoted = unquote(asc)
+ unicoded = unquoted.decode('utf-8')
+ assert unicoded == c
+
+ # Now do the same for extension-av.
+ extension_av_re = re.compile("[%s]" % Definitions.EXTENSION_AV)
+ extension_av_safe = set(chr(i) for i in range(0, 256) \
+ if extension_av_re.match(chr(i)))
+ safe_but_quoted = set(c for c in extension_av_safe \
+ if quote(c, safe="") != c)
+ dont_quote = "".join(sorted(safe_but_quoted))
+ for c in dont_quote:
+ assert quote(c, safe="") != c
+ assert quote(c, safe=dont_quote) == c
+
+ for i in range(0, 255):
+ original = chr(i)
+ quoted = quote(original, safe=dont_quote)
+ if extension_av_re.match(original):
+ assert original == quoted
+ else:
+ assert extension_av_re.match(quoted)
+
+ assert set(dont_quote) == set(' !"#$%&\'()*+,/:<=>?@[\\]^`{|}~')
+
+
+test_encode_cookie_value = _simple_test(encode_cookie_value,
+ {
+ None: None,
+ ' ': '%20',
+ # let through
+ '!': '!',
+ '#': '#',
+ '$': '$',
+ '%': '%',
+ '&': '&',
+ "'": "'",
+ '(': '(',
+ ')': ')',
+ '*': '*',
+ '+': '+',
+ '/': '/',
+ ':': ':',
+ '<': '<',
+ '=': '=',
+ '>': '>',
+ '?': '?',
+ '@': '@',
+ '[': '[',
+ ']': ']',
+ '^': '^',
+ '`': '`',
+ '{': '{',
+ '|': '|',
+ '}': '}',
+ '~': '~',
+ # not let through
+ ' ': '%20',
+ '"': '%22',
+ ',': '%2C',
+ '\\': '%5C',
+ 'crud,': 'crud%2C',
+ })
+
+test_encode_extension_av = _simple_test(encode_extension_av,
+ {
+ None: '',
+ '': '',
+ 'foo': 'foo',
+ # stuff this lets through that cookie-value does not
+ ' ': ' ',
+ '"': '"',
+ ',': ',',
+ '\\': '\\',
+ 'yo\\b': 'yo\\b',
+ })
+
+test_valid_value = _simple_test(valid_value,
+ {
+ None: False,
+ '': True,
+ 'ಠ_ಠ': True,
+ 'μῆνιν ἄειδε θεὰ Πηληϊάδεω Ἀχιλῆος': True,
+ '这事情得搞好啊': True,
+ '宮崎 駿': True,
+ 'أم كلثوم': True,
+ 'ედუარდ შევარდნაძე': True,
+ 'Myötähäpeä': True,
+ 'Pedro Almodóvar': True,
+# b'': True,
+# b'ABCDEFGHIJKLMNOPQRSTUVWXYZ': True,
+ 'Pedro Almodóvar'.encode('utf-8'): False,
+ })
+
+test_valid_date = _simple_test(valid_date,
+ {
+ datetime(2011, 1, 1): True,
+ datetime(2011, 1, 1, tzinfo=FixedOffsetTz(1000)): False,
+ datetime(2011, 1, 1, tzinfo=FixedOffsetTz(0)): True,
+ })
+
+test_valid_domain = _simple_test(valid_domain,
+ {
+ '': False,
+ ' ': False,
+ '.': False,
+ '..': False,
+ '.foo': True,
+ '"foo"': False,
+ 'foo': True,
+ })
+
+test_valid_path = _simple_test(valid_path,
+ {
+ '': False,
+ ' ': False,
+ '/': True,
+ 'a': False,
+ '/a': True,
+ '\x00': False,
+ '/\x00': False,
+ })
+
+
+def test_many_pairs():
+ """Simple 'lots of pairs' test
+ """
+ from_request = Cookies.from_request
+ header = "a0=0"
+ for i in range(1, 100):
+ i_range = list(range(0, i))
+ cookies = from_request(header)
+ assert len(cookies) == i
+ for j in i_range:
+ key = 'a%d' % j
+ assert cookies[key].value == str(j * 10)
+ assert cookies[key].render_request() == \
+ "a%d=%d" % (j, j * 10)
+
+ # same test, different entry point
+ cookies = Cookies()
+ cookies.parse_request(header)
+ assert len(cookies) == i
+ for j in i_range:
+ key = 'a%d' % j
+ assert cookies[key].value == str(j * 10)
+ assert cookies[key].render_request() == \
+ "a%d=%d" % (j, j * 10)
+
+ # Add another piece to the header
+ header += "; a%d=%d" % (i, i * 10)
+
+
+def test_parse_value():
+ # this really just glues together strip_spaces_and_quotes
+ # and parse_string, so reuse their test cases
+ cases = {}
+ cases.update(test_strip_spaces_and_quotes.cases)
+ cases.update(test_parse_string.cases)
+ for inp, expected in cases.items():
+ print("case", inp, expected)
+ # Test with spaces allowed
+ obtained = parse_value(inp, allow_spaces=True)
+ assert obtained == expected
+
+ # Test with spaces disallowed, if it could do anything
+ if (isinstance(inp, bytes) and ' ' in inp.decode('utf-8').strip()) \
+ or (not isinstance(inp, bytes) and inp and ' ' in inp.strip()):
+ try:
+ obtained = parse_value(inp, allow_spaces=False)
+ except AssertionError:
+ pass
+ else:
+ raise AssertionError("parse_value(%s, allow_spaces=False) "
+ "did not raise" % repr(inp))
+
+
+def test_total_seconds():
+ """This wrapper probably doesn't need testing so much, and it's not
+ entirely trivial to fully exercise, but the coverage is nice to have
+ """
+ def basic_sanity(td_type):
+ assert _total_seconds(td_type(seconds=1)) == 1
+ assert _total_seconds(td_type(seconds=1, minutes=1)) == 1 + 60
+ assert _total_seconds(td_type(seconds=1, minutes=1, hours=1)) == \
+ 1 + 60 + 60 * 60
+
+ basic_sanity(timedelta)
+
+ class FakeTimeDelta(object):
+ def __init__(self, days=0, hours=0, minutes=0, seconds=0,
+ microseconds=0):
+ self.days = days
+ self.seconds = seconds + minutes * 60 + hours * 60 * 60
+ self.microseconds = microseconds
+
+ assert not hasattr(FakeTimeDelta, "total_seconds")
+ basic_sanity(FakeTimeDelta)
+
+ FakeTimeDelta.total_seconds = lambda: None.missing_attribute
+ try:
+ _total_seconds(None)
+ except AttributeError as e:
+ assert 'total_seconds' not in str(e)
+
+
+def test_valid_value_bad_quoter():
+ def bad_quote(s):
+ return "Frogs"
+
+ assert valid_value("eep", quote=bad_quote) == False
diff --git a/third_party/python/cram/cram-0.7.data/scripts/cram b/third_party/python/cram/cram-0.7.data/scripts/cram
new file mode 100755
index 0000000000..806c699782
--- /dev/null
+++ b/third_party/python/cram/cram-0.7.data/scripts/cram
@@ -0,0 +1,9 @@
+#!python
+import sys
+
+import cram
+
+try:
+ sys.exit(cram.main(sys.argv[1:]))
+except KeyboardInterrupt:
+ pass
diff --git a/third_party/python/cram/cram-0.7.dist-info/DESCRIPTION.rst b/third_party/python/cram/cram-0.7.dist-info/DESCRIPTION.rst
new file mode 100644
index 0000000000..0a6577392c
--- /dev/null
+++ b/third_party/python/cram/cram-0.7.dist-info/DESCRIPTION.rst
@@ -0,0 +1,227 @@
+======================
+ Cram: It's test time
+======================
+
+Cram is a functional testing framework for command line applications.
+Cram tests look like snippets of interactive shell sessions. Cram runs
+each command and compares the command output in the test with the
+command's actual output.
+
+Here's a snippet from `Cram's own test suite`_::
+
+ The $PYTHON environment variable should be set when running this test
+ from Python.
+
+ $ [ -n "$PYTHON" ] || PYTHON="`which python`"
+ $ [ -n "$PYTHONPATH" ] || PYTHONPATH="$TESTDIR/.." && export PYTHONPATH
+ $ if [ -n "$COVERAGE" ]; then
+ > coverage erase
+ > alias cram="`which coverage` run --branch -a $TESTDIR/../scripts/cram"
+ > else
+ > alias cram="$PYTHON $TESTDIR/../scripts/cram"
+ > fi
+ $ command -v md5 > /dev/null || alias md5=md5sum
+
+ Usage:
+
+ $ cram -h
+ [Uu]sage: cram \[OPTIONS\] TESTS\.\.\. (re)
+
+ [Oo]ptions: (re)
+ -h, --help show this help message and exit
+ -V, --version show version information and exit
+ -q, --quiet don't print diffs
+ -v, --verbose show filenames and test status
+ -i, --interactive interactively merge changed test output
+ -d, --debug write script output directly to the terminal
+ -y, --yes answer yes to all questions
+ -n, --no answer no to all questions
+ -E, --preserve-env don't reset common environment variables
+ --keep-tmpdir keep temporary directories
+ --shell=PATH shell to use for running tests (default: /bin/sh)
+ --shell-opts=OPTS arguments to invoke shell with
+ --indent=NUM number of spaces to use for indentation (default: 2)
+ --xunit-file=PATH path to write xUnit XML output
+
+The format in a nutshell:
+
+* Cram tests use the ``.t`` file extension.
+
+* Lines beginning with two spaces, a dollar sign, and a space are run
+ in the shell.
+
+* Lines beginning with two spaces, a greater than sign, and a space
+ allow multi-line commands.
+
+* All other lines beginning with two spaces are considered command
+ output.
+
+* Output lines ending with a space and the keyword ``(re)`` are
+ matched as `Perl-compatible regular expressions`_.
+
+* Lines ending with a space and the keyword ``(glob)`` are matched
+ with a glob-like syntax. The only special characters supported are
+ ``*`` and ``?``. Both characters can be escaped using ``\``, and the
+ backslash can be escaped itself.
+
+* Output lines ending with either of the above keywords are always
+ first matched literally with actual command output.
+
+* Lines ending with a space and the keyword ``(no-eol)`` will match
+ actual output that doesn't end in a newline.
+
+* Actual output lines containing unprintable characters are escaped
+ and suffixed with a space and the keyword ``(esc)``. Lines matching
+ unprintable output must also contain the keyword.
+
+* Anything else is a comment.
+
+.. _Cram's own test suite: https://bitbucket.org/brodie/cram/src/default/tests/cram.t
+.. _Perl-compatible regular expressions: https://en.wikipedia.org/wiki/Perl_Compatible_Regular_Expressions
+
+
+Download
+--------
+
+* `cram-0.7.tar.gz`_ (32 KB, requires Python 2.4-2.7 or Python 3.1 or newer)
+
+.. _cram-0.7.tar.gz: https://bitheap.org/cram/cram-0.7.tar.gz
+
+
+Installation
+------------
+
+Install Cram using make::
+
+ $ wget https://bitheap.org/cram/cram-0.7.tar.gz
+ $ tar zxvf cram-0.7.tar.gz
+ $ cd cram-0.7
+ $ make install
+
+
+Usage
+-----
+
+Cram will print a dot for each passing test. If a test fails, a
+`unified context diff`_ is printed showing the test's expected output
+and the actual output. Skipped tests (empty tests and tests that exit
+with return code ``80``) are marked with ``s`` instead of a dot.
+
+For example, if we run Cram on `its own example tests`_::
+
+ .s.!
+ --- examples/fail.t
+ +++ examples/fail.t.err
+ @@ -3,21 +3,22 @@
+ $ echo 1
+ 1
+ $ echo 1
+ - 2
+ + 1
+ $ echo 1
+ 1
+
+ Invalid regex:
+
+ $ echo 1
+ - +++ (re)
+ + 1
+
+ Offset regular expression:
+
+ $ printf 'foo\nbar\nbaz\n\n1\nA\n@\n'
+ foo
+ + bar
+ baz
+
+ \d (re)
+ [A-Z] (re)
+ - #
+ + @
+ s.
+ # Ran 6 tests, 2 skipped, 1 failed.
+
+Cram will also write the test with its actual output to
+``examples/fail.t.err``, allowing you to use other diff tools. This
+file is automatically removed the next time the test passes.
+
+When you're first writing a test, you might just write the commands
+and run the test to see what happens. If you run Cram with ``-i`` or
+``--interactive``, you'll be prompted to merge the actual output back
+into the test. This makes it easy to quickly prototype new tests.
+
+You can specify a default set of options by creating a ``.cramrc``
+file. For example::
+
+ [cram]
+ verbose = True
+ indent = 4
+
+Is the same as invoking Cram with ``--verbose`` and ``--indent=4``.
+
+To change what configuration file Cram loads, you can set the
+``CRAMRC`` environment variable. You can also specify command line
+options in the ``CRAM`` environment variable.
+
+Note that the following environment variables are reset before tests
+are run:
+
+* ``TMPDIR``, ``TEMP``, and ``TMP`` are set to the test runner's
+ ``tmp`` directory.
+
+* ``LANG``, ``LC_ALL``, and ``LANGUAGE`` are set to ``C``.
+
+* ``TZ`` is set to ``GMT``.
+
+* ``COLUMNS`` is set to ``80``. (Note: When using ``--shell=zsh``,
+ this cannot be reset. It will reflect the actual terminal's width.)
+
+* ``CDPATH`` and ``GREP_OPTIONS`` are set to an empty string.
+
+Cram also provides the following environment variables to tests:
+
+* ``CRAMTMP``, set to the test runner's temporary directory.
+
+* ``TESTDIR``, set to the directory containing the test file.
+
+* ``TESTFILE``, set to the basename of the current test file.
+
+* ``TESTSHELL``, set to the value specified by ``--shell``.
+
+Also note that care should be taken with commands that close the test
+shell's ``stdin``. For example, if you're trying to invoke ``ssh`` in
+a test, try adding the ``-n`` option to prevent it from closing
+``stdin``. Similarly, if you invoke a daemon process that inherits
+``stdout`` and fails to close it, it may cause Cram to hang while
+waiting for the test shell's ``stdout`` to be fully closed.
+
+.. _unified context diff: https://en.wikipedia.org/wiki/Diff#Unified_format
+.. _its own example tests: https://bitbucket.org/brodie/cram/src/default/examples/
+
+
+Development
+-----------
+
+Download the official development repository using Mercurial_::
+
+ hg clone https://bitbucket.org/brodie/cram
+
+Or Git_::
+
+ git clone https://github.com/brodie/cram.git
+
+Test Cram using Cram::
+
+ pip install -r requirements.txt
+ make test
+
+Visit Bitbucket_ or GitHub_ if you'd like to fork the project, watch
+for new changes, or report issues.
+
+.. _Mercurial: http://mercurial.selenic.com/
+.. _Git: http://git-scm.com/
+.. _coverage.py: http://nedbatchelder.com/code/coverage/
+.. _Bitbucket: https://bitbucket.org/brodie/cram
+.. _GitHub: https://github.com/brodie/cram
+
+
diff --git a/third_party/python/cram/cram-0.7.dist-info/METADATA b/third_party/python/cram/cram-0.7.dist-info/METADATA
new file mode 100644
index 0000000000..0edb62d168
--- /dev/null
+++ b/third_party/python/cram/cram-0.7.dist-info/METADATA
@@ -0,0 +1,250 @@
+Metadata-Version: 2.0
+Name: cram
+Version: 0.7
+Summary: A simple testing framework for command line applications
+Home-page: https://bitheap.org/cram/
+Author: Brodie Rao
+Author-email: brodie@bitheap.org
+License: GNU GPLv2 or any later version
+Download-URL: https://bitheap.org/cram/cram-0.7.tar.gz
+Keywords: automatic functional test framework
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: GNU General Public License (GPL)
+Classifier: License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)
+Classifier: Natural Language :: English
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Unix Shell
+Classifier: Topic :: Software Development :: Testing
+
+======================
+ Cram: It's test time
+======================
+
+Cram is a functional testing framework for command line applications.
+Cram tests look like snippets of interactive shell sessions. Cram runs
+each command and compares the command output in the test with the
+command's actual output.
+
+Here's a snippet from `Cram's own test suite`_::
+
+ The $PYTHON environment variable should be set when running this test
+ from Python.
+
+ $ [ -n "$PYTHON" ] || PYTHON="`which python`"
+ $ [ -n "$PYTHONPATH" ] || PYTHONPATH="$TESTDIR/.." && export PYTHONPATH
+ $ if [ -n "$COVERAGE" ]; then
+ > coverage erase
+ > alias cram="`which coverage` run --branch -a $TESTDIR/../scripts/cram"
+ > else
+ > alias cram="$PYTHON $TESTDIR/../scripts/cram"
+ > fi
+ $ command -v md5 > /dev/null || alias md5=md5sum
+
+ Usage:
+
+ $ cram -h
+ [Uu]sage: cram \[OPTIONS\] TESTS\.\.\. (re)
+
+ [Oo]ptions: (re)
+ -h, --help show this help message and exit
+ -V, --version show version information and exit
+ -q, --quiet don't print diffs
+ -v, --verbose show filenames and test status
+ -i, --interactive interactively merge changed test output
+ -d, --debug write script output directly to the terminal
+ -y, --yes answer yes to all questions
+ -n, --no answer no to all questions
+ -E, --preserve-env don't reset common environment variables
+ --keep-tmpdir keep temporary directories
+ --shell=PATH shell to use for running tests (default: /bin/sh)
+ --shell-opts=OPTS arguments to invoke shell with
+ --indent=NUM number of spaces to use for indentation (default: 2)
+ --xunit-file=PATH path to write xUnit XML output
+
+The format in a nutshell:
+
+* Cram tests use the ``.t`` file extension.
+
+* Lines beginning with two spaces, a dollar sign, and a space are run
+ in the shell.
+
+* Lines beginning with two spaces, a greater than sign, and a space
+ allow multi-line commands.
+
+* All other lines beginning with two spaces are considered command
+ output.
+
+* Output lines ending with a space and the keyword ``(re)`` are
+ matched as `Perl-compatible regular expressions`_.
+
+* Lines ending with a space and the keyword ``(glob)`` are matched
+ with a glob-like syntax. The only special characters supported are
+ ``*`` and ``?``. Both characters can be escaped using ``\``, and the
+ backslash can be escaped itself.
+
+* Output lines ending with either of the above keywords are always
+ first matched literally with actual command output.
+
+* Lines ending with a space and the keyword ``(no-eol)`` will match
+ actual output that doesn't end in a newline.
+
+* Actual output lines containing unprintable characters are escaped
+ and suffixed with a space and the keyword ``(esc)``. Lines matching
+ unprintable output must also contain the keyword.
+
+* Anything else is a comment.
+
+.. _Cram's own test suite: https://bitbucket.org/brodie/cram/src/default/tests/cram.t
+.. _Perl-compatible regular expressions: https://en.wikipedia.org/wiki/Perl_Compatible_Regular_Expressions
+
+
+Download
+--------
+
+* `cram-0.7.tar.gz`_ (32 KB, requires Python 2.4-2.7 or Python 3.1 or newer)
+
+.. _cram-0.7.tar.gz: https://bitheap.org/cram/cram-0.7.tar.gz
+
+
+Installation
+------------
+
+Install Cram using make::
+
+ $ wget https://bitheap.org/cram/cram-0.7.tar.gz
+ $ tar zxvf cram-0.7.tar.gz
+ $ cd cram-0.7
+ $ make install
+
+
+Usage
+-----
+
+Cram will print a dot for each passing test. If a test fails, a
+`unified context diff`_ is printed showing the test's expected output
+and the actual output. Skipped tests (empty tests and tests that exit
+with return code ``80``) are marked with ``s`` instead of a dot.
+
+For example, if we run Cram on `its own example tests`_::
+
+ .s.!
+ --- examples/fail.t
+ +++ examples/fail.t.err
+ @@ -3,21 +3,22 @@
+ $ echo 1
+ 1
+ $ echo 1
+ - 2
+ + 1
+ $ echo 1
+ 1
+
+ Invalid regex:
+
+ $ echo 1
+ - +++ (re)
+ + 1
+
+ Offset regular expression:
+
+ $ printf 'foo\nbar\nbaz\n\n1\nA\n@\n'
+ foo
+ + bar
+ baz
+
+ \d (re)
+ [A-Z] (re)
+ - #
+ + @
+ s.
+ # Ran 6 tests, 2 skipped, 1 failed.
+
+Cram will also write the test with its actual output to
+``examples/fail.t.err``, allowing you to use other diff tools. This
+file is automatically removed the next time the test passes.
+
+When you're first writing a test, you might just write the commands
+and run the test to see what happens. If you run Cram with ``-i`` or
+``--interactive``, you'll be prompted to merge the actual output back
+into the test. This makes it easy to quickly prototype new tests.
+
+You can specify a default set of options by creating a ``.cramrc``
+file. For example::
+
+ [cram]
+ verbose = True
+ indent = 4
+
+Is the same as invoking Cram with ``--verbose`` and ``--indent=4``.
+
+To change what configuration file Cram loads, you can set the
+``CRAMRC`` environment variable. You can also specify command line
+options in the ``CRAM`` environment variable.
+
+Note that the following environment variables are reset before tests
+are run:
+
+* ``TMPDIR``, ``TEMP``, and ``TMP`` are set to the test runner's
+ ``tmp`` directory.
+
+* ``LANG``, ``LC_ALL``, and ``LANGUAGE`` are set to ``C``.
+
+* ``TZ`` is set to ``GMT``.
+
+* ``COLUMNS`` is set to ``80``. (Note: When using ``--shell=zsh``,
+ this cannot be reset. It will reflect the actual terminal's width.)
+
+* ``CDPATH`` and ``GREP_OPTIONS`` are set to an empty string.
+
+Cram also provides the following environment variables to tests:
+
+* ``CRAMTMP``, set to the test runner's temporary directory.
+
+* ``TESTDIR``, set to the directory containing the test file.
+
+* ``TESTFILE``, set to the basename of the current test file.
+
+* ``TESTSHELL``, set to the value specified by ``--shell``.
+
+Also note that care should be taken with commands that close the test
+shell's ``stdin``. For example, if you're trying to invoke ``ssh`` in
+a test, try adding the ``-n`` option to prevent it from closing
+``stdin``. Similarly, if you invoke a daemon process that inherits
+``stdout`` and fails to close it, it may cause Cram to hang while
+waiting for the test shell's ``stdout`` to be fully closed.
+
+.. _unified context diff: https://en.wikipedia.org/wiki/Diff#Unified_format
+.. _its own example tests: https://bitbucket.org/brodie/cram/src/default/examples/
+
+
+Development
+-----------
+
+Download the official development repository using Mercurial_::
+
+ hg clone https://bitbucket.org/brodie/cram
+
+Or Git_::
+
+ git clone https://github.com/brodie/cram.git
+
+Test Cram using Cram::
+
+ pip install -r requirements.txt
+ make test
+
+Visit Bitbucket_ or GitHub_ if you'd like to fork the project, watch
+for new changes, or report issues.
+
+.. _Mercurial: http://mercurial.selenic.com/
+.. _Git: http://git-scm.com/
+.. _coverage.py: http://nedbatchelder.com/code/coverage/
+.. _Bitbucket: https://bitbucket.org/brodie/cram
+.. _GitHub: https://github.com/brodie/cram
+
+
diff --git a/third_party/python/cram/cram-0.7.dist-info/RECORD b/third_party/python/cram/cram-0.7.dist-info/RECORD
new file mode 100644
index 0000000000..8c2ef4ae3f
--- /dev/null
+++ b/third_party/python/cram/cram-0.7.dist-info/RECORD
@@ -0,0 +1,16 @@
+cram/__init__.py,sha256=80M3WLqeS6MAACoIZW89KZR4bOmFm7UcpoRPF6S-8jc,172
+cram/__main__.py,sha256=AUlczSWsDtiA6srk4dsmdsz8cZXb1QXMdPkobAR-Ex0,152
+cram/_cli.py,sha256=aIJE2BY0djuOqgtCHe9IVUIl7Vvvk-awsksdmMd1RNc,4345
+cram/_diff.py,sha256=pXLlKb1UgQX17ayJpPQsGoMHW7bKLcACe9KEZlnMkx0,5630
+cram/_encoding.py,sha256=PSPdcjenMvC0wabbPhWPkCxeUcohcQ6o3Rk58AC97Uo,2990
+cram/_main.py,sha256=5gwaBNSyKCq9bwkRLKqNXcsB5Okf0sfxDpousd51CO4,7728
+cram/_process.py,sha256=2JV6sRl_9p3DYu1IYN5_D-isln9vAh5ua6bAxAy8ytA,1805
+cram/_run.py,sha256=X5fOy7TKxMdBcis0JczYZkNUoQdJ5wUqlDCM2sRJDm0,2292
+cram/_test.py,sha256=9QYuf3DRuLs9O1QVP3MfoJlISBRfnC5ONhCL4uXGYG8,7904
+cram/_xunit.py,sha256=KUAUokY3HhkgPYp0IjSl2m7KvztYdbwW7p1aqdaUJgA,6247
+cram-0.7.data/scripts/cram,sha256=S3wCw9Ks2J4dtVftWZ8DU0eNtpb1ekf8Bz73Di3PvUs,112
+cram-0.7.dist-info/DESCRIPTION.rst,sha256=ejwfPio_dRLrZ2PhWnsGbLW6lPyiDTjUAejg5MPG-kg,7080
+cram-0.7.dist-info/METADATA,sha256=ExruW_6HNwqu-mVqvcCSUtund4CHxt5hb3019a3jLeo,8018
+cram-0.7.dist-info/RECORD,,
+cram-0.7.dist-info/WHEEL,sha256=o2k-Qa-RMNIJmUdIc7KU6VWR_ErNRbWNlxDIpl7lm34,110
+cram-0.7.dist-info/metadata.json,sha256=cRTULRj1eXU8xWOtqLK8DMhu0vWJELulW_PI8O4ytPU,1063
diff --git a/third_party/python/cram/cram-0.7.dist-info/WHEEL b/third_party/python/cram/cram-0.7.dist-info/WHEEL
new file mode 100644
index 0000000000..8b6dd1b5a8
--- /dev/null
+++ b/third_party/python/cram/cram-0.7.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.29.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/cram/cram-0.7.dist-info/metadata.json b/third_party/python/cram/cram-0.7.dist-info/metadata.json
new file mode 100644
index 0000000000..f2156d9e67
--- /dev/null
+++ b/third_party/python/cram/cram-0.7.dist-info/metadata.json
@@ -0,0 +1 @@
+{"classifiers": ["Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "License :: OSI Approved :: GNU General Public License (GPL)", "License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 2", "Programming Language :: Python :: 3", "Programming Language :: Unix Shell", "Topic :: Software Development :: Testing"], "download_url": "https://bitheap.org/cram/cram-0.7.tar.gz", "extensions": {"python.details": {"contacts": [{"email": "brodie@bitheap.org", "name": "Brodie Rao", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://bitheap.org/cram/"}}}, "generator": "bdist_wheel (0.29.0)", "keywords": ["automatic", "functional", "test", "framework"], "license": "GNU GPLv2 or any later version", "metadata_version": "2.0", "name": "cram", "summary": "A simple testing framework for command line applications", "version": "0.7"} \ No newline at end of file
diff --git a/third_party/python/cram/cram/__init__.py b/third_party/python/cram/cram/__init__.py
new file mode 100644
index 0000000000..4b626c4027
--- /dev/null
+++ b/third_party/python/cram/cram/__init__.py
@@ -0,0 +1,6 @@
+"""Functional testing framework for command line applications"""
+
+from cram._main import main
+from cram._test import test, testfile
+
+__all__ = ['main', 'test', 'testfile']
diff --git a/third_party/python/cram/cram/__main__.py b/third_party/python/cram/cram/__main__.py
new file mode 100644
index 0000000000..e6b0aef978
--- /dev/null
+++ b/third_party/python/cram/cram/__main__.py
@@ -0,0 +1,10 @@
+"""Main module (invoked by "python -m cram")"""
+
+import sys
+
+import cram
+
+try:
+ sys.exit(cram.main(sys.argv[1:]))
+except KeyboardInterrupt:
+ pass
diff --git a/third_party/python/cram/cram/_cli.py b/third_party/python/cram/cram/_cli.py
new file mode 100644
index 0000000000..8333b6b951
--- /dev/null
+++ b/third_party/python/cram/cram/_cli.py
@@ -0,0 +1,134 @@
+"""The command line interface implementation"""
+
+import os
+import sys
+
+from cram._encoding import b, bytestype, stdoutb
+from cram._process import execute
+
+__all__ = ['runcli']
+
+def _prompt(question, answers, auto=None):
+ """Write a prompt to stdout and ask for answer in stdin.
+
+ answers should be a string, with each character a single
+ answer. An uppercase letter is considered the default answer.
+
+ If an invalid answer is given, this asks again until it gets a
+ valid one.
+
+ If auto is set, the question is answered automatically with the
+ specified value.
+ """
+ default = [c for c in answers if c.isupper()]
+ while True:
+ sys.stdout.write('%s [%s] ' % (question, answers))
+ sys.stdout.flush()
+ if auto is not None:
+ sys.stdout.write(auto + '\n')
+ sys.stdout.flush()
+ return auto
+
+ answer = sys.stdin.readline().strip().lower()
+ if not answer and default:
+ return default[0]
+ elif answer and answer in answers.lower():
+ return answer
+
+def _log(msg=None, verbosemsg=None, verbose=False):
+ """Write msg to standard out and flush.
+
+ If verbose is True, write verbosemsg instead.
+ """
+ if verbose:
+ msg = verbosemsg
+ if msg:
+ if isinstance(msg, bytestype):
+ stdoutb.write(msg)
+ else: # pragma: nocover
+ sys.stdout.write(msg)
+ sys.stdout.flush()
+
+def _patch(cmd, diff):
+ """Run echo [lines from diff] | cmd -p0"""
+ out, retcode = execute([cmd, '-p0'], stdin=b('').join(diff))
+ return retcode == 0
+
+def runcli(tests, quiet=False, verbose=False, patchcmd=None, answer=None):
+ """Run tests with command line interface input/output.
+
+ tests should be a sequence of 2-tuples containing the following:
+
+ (test path, test function)
+
+ This function yields a new sequence where each test function is wrapped
+ with a function that handles CLI input/output.
+
+ If quiet is True, diffs aren't printed. If verbose is True,
+ filenames and status information are printed.
+
+ If patchcmd is set, a prompt is written to stdout asking if
+ changed output should be merged back into the original test. The
+ answer is read from stdin. If 'y', the test is patched using patch
+ based on the changed output.
+ """
+ total, skipped, failed = [0], [0], [0]
+
+ for path, test in tests:
+ def testwrapper():
+ """Test function that adds CLI output"""
+ total[0] += 1
+ _log(None, path + b(': '), verbose)
+
+ refout, postout, diff = test()
+ if refout is None:
+ skipped[0] += 1
+ _log('s', 'empty\n', verbose)
+ return refout, postout, diff
+
+ abspath = os.path.abspath(path)
+ errpath = abspath + b('.err')
+
+ if postout is None:
+ skipped[0] += 1
+ _log('s', 'skipped\n', verbose)
+ elif not diff:
+ _log('.', 'passed\n', verbose)
+ if os.path.exists(errpath):
+ os.remove(errpath)
+ else:
+ failed[0] += 1
+ _log('!', 'failed\n', verbose)
+ if not quiet:
+ _log('\n', None, verbose)
+
+ errfile = open(errpath, 'wb')
+ try:
+ for line in postout:
+ errfile.write(line)
+ finally:
+ errfile.close()
+
+ if not quiet:
+ origdiff = diff
+ diff = []
+ for line in origdiff:
+ stdoutb.write(line)
+ diff.append(line)
+
+ if (patchcmd and
+ _prompt('Accept this change?', 'yN', answer) == 'y'):
+ if _patch(patchcmd, diff):
+ _log(None, path + b(': merged output\n'), verbose)
+ os.remove(errpath)
+ else:
+ _log(path + b(': merge failed\n'))
+
+ return refout, postout, diff
+
+ yield (path, testwrapper)
+
+ if total[0] > 0:
+ _log('\n', None, verbose)
+ _log('# Ran %s tests, %s skipped, %s failed.\n'
+ % (total[0], skipped[0], failed[0]))
diff --git a/third_party/python/cram/cram/_diff.py b/third_party/python/cram/cram/_diff.py
new file mode 100644
index 0000000000..4877305082
--- /dev/null
+++ b/third_party/python/cram/cram/_diff.py
@@ -0,0 +1,158 @@
+"""Utilities for diffing test files and their output"""
+
+import codecs
+import difflib
+import re
+
+from cram._encoding import b
+
+__all__ = ['esc', 'glob', 'regex', 'unified_diff']
+
+def _regex(pattern, s):
+ """Match a regular expression or return False if invalid.
+
+ >>> from cram._encoding import b
+ >>> [bool(_regex(r, b('foobar'))) for r in (b('foo.*'), b('***'))]
+ [True, False]
+ """
+ try:
+ return re.match(pattern + b(r'\Z'), s)
+ except re.error:
+ return False
+
+def _glob(el, l):
+ r"""Match a glob-like pattern.
+
+ The only supported special characters are * and ?. Escaping is
+ supported.
+
+ >>> from cram._encoding import b
+ >>> bool(_glob(b(r'\* \\ \? fo?b*'), b('* \\ ? foobar')))
+ True
+ """
+ i, n = 0, len(el)
+ res = b('')
+ while i < n:
+ c = el[i:i + 1]
+ i += 1
+ if c == b('\\') and el[i] in b('*?\\'):
+ res += el[i - 1:i + 1]
+ i += 1
+ elif c == b('*'):
+ res += b('.*')
+ elif c == b('?'):
+ res += b('.')
+ else:
+ res += re.escape(c)
+ return _regex(res, l)
+
+def _matchannotation(keyword, matchfunc, el, l):
+ """Apply match function based on annotation keyword"""
+ ann = b(' (%s)\n' % keyword)
+ return el.endswith(ann) and matchfunc(el[:-len(ann)], l[:-1])
+
+def regex(el, l):
+ """Apply a regular expression match to a line annotated with '(re)'"""
+ return _matchannotation('re', _regex, el, l)
+
+def glob(el, l):
+ """Apply a glob match to a line annotated with '(glob)'"""
+ return _matchannotation('glob', _glob, el, l)
+
+def esc(el, l):
+ """Apply an escape match to a line annotated with '(esc)'"""
+ ann = b(' (esc)\n')
+
+ if el.endswith(ann):
+ el = codecs.escape_decode(el[:-len(ann)])[0] + b('\n')
+ if el == l:
+ return True
+
+ if l.endswith(ann):
+ l = codecs.escape_decode(l[:-len(ann)])[0] + b('\n')
+ return el == l
+
+class _SequenceMatcher(difflib.SequenceMatcher, object):
+ """Like difflib.SequenceMatcher, but supports custom match functions"""
+ def __init__(self, *args, **kwargs):
+ self._matchers = kwargs.pop('matchers', [])
+ super(_SequenceMatcher, self).__init__(*args, **kwargs)
+
+ def _match(self, el, l):
+ """Tests for matching lines using custom matchers"""
+ for matcher in self._matchers:
+ if matcher(el, l):
+ return True
+ return False
+
+ def find_longest_match(self, alo, ahi, blo, bhi):
+ """Find longest matching block in a[alo:ahi] and b[blo:bhi]"""
+ # SequenceMatcher uses find_longest_match() to slowly whittle down
+ # the differences between a and b until it has each matching block.
+ # Because of this, we can end up doing the same matches many times.
+ matches = []
+ for n, (el, line) in enumerate(zip(self.a[alo:ahi], self.b[blo:bhi])):
+ if el != line and self._match(el, line):
+ # This fools the superclass's method into thinking that the
+ # regex/glob in a is identical to b by replacing a's line (the
+ # expected output) with b's line (the actual output).
+ self.a[alo + n] = line
+ matches.append((n, el))
+ ret = super(_SequenceMatcher, self).find_longest_match(alo, ahi,
+ blo, bhi)
+ # Restore the lines replaced above. Otherwise, the diff output
+ # would seem to imply that the tests never had any regexes/globs.
+ for n, el in matches:
+ self.a[alo + n] = el
+ return ret
+
+def unified_diff(l1, l2, fromfile=b(''), tofile=b(''), fromfiledate=b(''),
+ tofiledate=b(''), n=3, lineterm=b('\n'), matchers=None):
+ r"""Compare two sequences of lines; generate the delta as a unified diff.
+
+ This is like difflib.unified_diff(), but allows custom matchers.
+
+ >>> from cram._encoding import b
+ >>> l1 = [b('a\n'), b('? (glob)\n')]
+ >>> l2 = [b('a\n'), b('b\n')]
+ >>> (list(unified_diff(l1, l2, b('f1'), b('f2'), b('1970-01-01'),
+ ... b('1970-01-02'))) ==
+ ... [b('--- f1\t1970-01-01\n'), b('+++ f2\t1970-01-02\n'),
+ ... b('@@ -1,2 +1,2 @@\n'), b(' a\n'), b('-? (glob)\n'), b('+b\n')])
+ True
+
+ >>> from cram._diff import glob
+ >>> list(unified_diff(l1, l2, matchers=[glob]))
+ []
+ """
+ if matchers is None:
+ matchers = []
+ started = False
+ matcher = _SequenceMatcher(None, l1, l2, matchers=matchers)
+ for group in matcher.get_grouped_opcodes(n):
+ if not started:
+ if fromfiledate:
+ fromdate = b('\t') + fromfiledate
+ else:
+ fromdate = b('')
+ if tofiledate:
+ todate = b('\t') + tofiledate
+ else:
+ todate = b('')
+ yield b('--- ') + fromfile + fromdate + lineterm
+ yield b('+++ ') + tofile + todate + lineterm
+ started = True
+ i1, i2, j1, j2 = group[0][1], group[-1][2], group[0][3], group[-1][4]
+ yield (b("@@ -%d,%d +%d,%d @@" % (i1 + 1, i2 - i1, j1 + 1, j2 - j1)) +
+ lineterm)
+ for tag, i1, i2, j1, j2 in group:
+ if tag == 'equal':
+ for line in l1[i1:i2]:
+ yield b(' ') + line
+ continue
+ if tag == 'replace' or tag == 'delete':
+ for line in l1[i1:i2]:
+ yield b('-') + line
+ if tag == 'replace' or tag == 'insert':
+ for line in l2[j1:j2]:
+ yield b('+') + line
diff --git a/third_party/python/cram/cram/_encoding.py b/third_party/python/cram/cram/_encoding.py
new file mode 100644
index 0000000000..d639ccee19
--- /dev/null
+++ b/third_party/python/cram/cram/_encoding.py
@@ -0,0 +1,106 @@
+"""Encoding utilities"""
+
+import os
+import sys
+
+try:
+ import builtins
+except ImportError:
+ import __builtin__ as builtins
+
+__all__ = ['b', 'bchr', 'bytestype', 'envencode', 'fsdecode', 'fsencode',
+ 'stdoutb', 'stderrb', 'u', 'ul', 'unicodetype']
+
+bytestype = getattr(builtins, 'bytes', str)
+unicodetype = getattr(builtins, 'unicode', str)
+
+if getattr(os, 'fsdecode', None) is not None:
+ fsdecode = os.fsdecode
+ fsencode = os.fsencode
+elif bytestype is not str:
+ if sys.platform == 'win32':
+ def fsdecode(s):
+ """Decode a filename from the filesystem encoding"""
+ if isinstance(s, unicodetype):
+ return s
+ encoding = sys.getfilesystemencoding()
+ if encoding == 'mbcs':
+ return s.decode(encoding)
+ else:
+ return s.decode(encoding, 'surrogateescape')
+
+ def fsencode(s):
+ """Encode a filename to the filesystem encoding"""
+ if isinstance(s, bytestype):
+ return s
+ encoding = sys.getfilesystemencoding()
+ if encoding == 'mbcs':
+ return s.encode(encoding)
+ else:
+ return s.encode(encoding, 'surrogateescape')
+ else:
+ def fsdecode(s):
+ """Decode a filename from the filesystem encoding"""
+ if isinstance(s, unicodetype):
+ return s
+ return s.decode(sys.getfilesystemencoding(), 'surrogateescape')
+
+ def fsencode(s):
+ """Encode a filename to the filesystem encoding"""
+ if isinstance(s, bytestype):
+ return s
+ return s.encode(sys.getfilesystemencoding(), 'surrogateescape')
+else:
+ def fsdecode(s):
+ """Decode a filename from the filesystem encoding"""
+ return s
+
+ def fsencode(s):
+ """Encode a filename to the filesystem encoding"""
+ return s
+
+if bytestype is str:
+ def envencode(s):
+ """Encode a byte string to the os.environ encoding"""
+ return s
+else:
+ envencode = fsdecode
+
+if getattr(sys.stdout, 'buffer', None) is not None:
+ stdoutb = sys.stdout.buffer
+ stderrb = sys.stderr.buffer
+else:
+ stdoutb = sys.stdout
+ stderrb = sys.stderr
+
+if bytestype is str:
+ def b(s):
+ """Convert an ASCII string literal into a bytes object"""
+ return s
+
+ bchr = chr
+
+ def u(s):
+ """Convert an ASCII string literal into a unicode object"""
+ return s.decode('ascii')
+else:
+ def b(s):
+ """Convert an ASCII string literal into a bytes object"""
+ return s.encode('ascii')
+
+ def bchr(i):
+ """Return a bytes character for a given integer value"""
+ return bytestype([i])
+
+ def u(s):
+ """Convert an ASCII string literal into a unicode object"""
+ return s
+
+try:
+ eval(r'u""')
+except SyntaxError:
+ ul = eval
+else:
+ def ul(e):
+ """Evaluate e as a unicode string literal"""
+ return eval('u' + e)
diff --git a/third_party/python/cram/cram/_main.py b/third_party/python/cram/cram/_main.py
new file mode 100644
index 0000000000..11d457bb16
--- /dev/null
+++ b/third_party/python/cram/cram/_main.py
@@ -0,0 +1,211 @@
+"""Main entry point"""
+
+import optparse
+import os
+import shlex
+import shutil
+import sys
+import tempfile
+
+try:
+ import configparser
+except ImportError: # pragma: nocover
+ import ConfigParser as configparser
+
+from cram._cli import runcli
+from cram._encoding import b, fsencode, stderrb, stdoutb
+from cram._run import runtests
+from cram._xunit import runxunit
+
+def _which(cmd):
+ """Return the path to cmd or None if not found"""
+ cmd = fsencode(cmd)
+ for p in os.environ['PATH'].split(os.pathsep):
+ path = os.path.join(fsencode(p), cmd)
+ if os.path.isfile(path) and os.access(path, os.X_OK):
+ return os.path.abspath(path)
+ return None
+
+def _expandpath(path):
+ """Expands ~ and environment variables in path"""
+ return os.path.expanduser(os.path.expandvars(path))
+
+class _OptionParser(optparse.OptionParser):
+ """Like optparse.OptionParser, but supports setting values through
+ CRAM= and .cramrc."""
+
+ def __init__(self, *args, **kwargs):
+ self._config_opts = {}
+ optparse.OptionParser.__init__(self, *args, **kwargs)
+
+ def add_option(self, *args, **kwargs):
+ option = optparse.OptionParser.add_option(self, *args, **kwargs)
+ if option.dest and option.dest != 'version':
+ key = option.dest.replace('_', '-')
+ self._config_opts[key] = option.action == 'store_true'
+ return option
+
+ def parse_args(self, args=None, values=None):
+ config = configparser.RawConfigParser()
+ config.read(_expandpath(os.environ.get('CRAMRC', '.cramrc')))
+ defaults = {}
+ for key, isbool in self._config_opts.items():
+ try:
+ if isbool:
+ try:
+ value = config.getboolean('cram', key)
+ except ValueError:
+ value = config.get('cram', key)
+ self.error('--%s: invalid boolean value: %r'
+ % (key, value))
+ else:
+ value = config.get('cram', key)
+ except (configparser.NoSectionError, configparser.NoOptionError):
+ pass
+ else:
+ defaults[key] = value
+ self.set_defaults(**defaults)
+
+ eargs = os.environ.get('CRAM', '').strip()
+ if eargs:
+ args = args or []
+ args += shlex.split(eargs)
+
+ try:
+ return optparse.OptionParser.parse_args(self, args, values)
+ except optparse.OptionValueError:
+ self.error(str(sys.exc_info()[1]))
+
+def _parseopts(args):
+ """Parse command line arguments"""
+ p = _OptionParser(usage='cram [OPTIONS] TESTS...', prog='cram')
+ p.add_option('-V', '--version', action='store_true',
+ help='show version information and exit')
+ p.add_option('-q', '--quiet', action='store_true',
+ help="don't print diffs")
+ p.add_option('-v', '--verbose', action='store_true',
+ help='show filenames and test status')
+ p.add_option('-i', '--interactive', action='store_true',
+ help='interactively merge changed test output')
+ p.add_option('-d', '--debug', action='store_true',
+ help='write script output directly to the terminal')
+ p.add_option('-y', '--yes', action='store_true',
+ help='answer yes to all questions')
+ p.add_option('-n', '--no', action='store_true',
+ help='answer no to all questions')
+ p.add_option('-E', '--preserve-env', action='store_true',
+ help="don't reset common environment variables")
+ p.add_option('--keep-tmpdir', action='store_true',
+ help='keep temporary directories')
+ p.add_option('--shell', action='store', default='/bin/sh', metavar='PATH',
+ help='shell to use for running tests (default: %default)')
+ p.add_option('--shell-opts', action='store', metavar='OPTS',
+ help='arguments to invoke shell with')
+ p.add_option('--indent', action='store', default=2, metavar='NUM',
+ type='int', help=('number of spaces to use for indentation '
+ '(default: %default)'))
+ p.add_option('--xunit-file', action='store', metavar='PATH',
+ help='path to write xUnit XML output')
+ opts, paths = p.parse_args(args)
+ paths = [fsencode(path) for path in paths]
+ return opts, paths, p.get_usage
+
+def main(args):
+ """Main entry point.
+
+ If you're thinking of using Cram in other Python code (e.g., unit tests),
+ consider using the test() or testfile() functions instead.
+
+ :param args: Script arguments (excluding script name)
+ :type args: str
+ :return: Exit code (non-zero on failure)
+ :rtype: int
+ """
+ opts, paths, getusage = _parseopts(args)
+ if opts.version:
+ sys.stdout.write("""Cram CLI testing framework (version 0.7)
+
+Copyright (C) 2010-2016 Brodie Rao <brodie@bitheap.org> and others
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+""")
+ return
+
+ conflicts = [('--yes', opts.yes, '--no', opts.no),
+ ('--quiet', opts.quiet, '--interactive', opts.interactive),
+ ('--debug', opts.debug, '--quiet', opts.quiet),
+ ('--debug', opts.debug, '--interactive', opts.interactive),
+ ('--debug', opts.debug, '--verbose', opts.verbose),
+ ('--debug', opts.debug, '--xunit-file', opts.xunit_file)]
+ for s1, o1, s2, o2 in conflicts:
+ if o1 and o2:
+ sys.stderr.write('options %s and %s are mutually exclusive\n'
+ % (s1, s2))
+ return 2
+
+ shellcmd = _which(opts.shell)
+ if not shellcmd:
+ stderrb.write(b('shell not found: ') + fsencode(opts.shell) + b('\n'))
+ return 2
+ shell = [shellcmd]
+ if opts.shell_opts:
+ shell += shlex.split(opts.shell_opts)
+
+ patchcmd = None
+ if opts.interactive:
+ patchcmd = _which('patch')
+ if not patchcmd:
+ sys.stderr.write('patch(1) required for -i\n')
+ return 2
+
+ if not paths:
+ sys.stdout.write(getusage())
+ return 2
+
+ badpaths = [path for path in paths if not os.path.exists(path)]
+ if badpaths:
+ stderrb.write(b('no such file: ') + badpaths[0] + b('\n'))
+ return 2
+
+ if opts.yes:
+ answer = 'y'
+ elif opts.no:
+ answer = 'n'
+ else:
+ answer = None
+
+ tmpdir = os.environ['CRAMTMP'] = tempfile.mkdtemp('', 'cramtests-')
+ tmpdirb = fsencode(tmpdir)
+ proctmp = os.path.join(tmpdir, 'tmp')
+ for s in ('TMPDIR', 'TEMP', 'TMP'):
+ os.environ[s] = proctmp
+
+ os.mkdir(proctmp)
+ try:
+ tests = runtests(paths, tmpdirb, shell, indent=opts.indent,
+ cleanenv=not opts.preserve_env, debug=opts.debug)
+ if not opts.debug:
+ tests = runcli(tests, quiet=opts.quiet, verbose=opts.verbose,
+ patchcmd=patchcmd, answer=answer)
+ if opts.xunit_file is not None:
+ tests = runxunit(tests, opts.xunit_file)
+
+ hastests = False
+ failed = False
+ for path, test in tests:
+ hastests = True
+ refout, postout, diff = test()
+ if diff:
+ failed = True
+
+ if not hastests:
+ sys.stderr.write('no tests found\n')
+ return 2
+
+ return int(failed)
+ finally:
+ if opts.keep_tmpdir:
+ stdoutb.write(b('# Kept temporary directory: ') + tmpdirb +
+ b('\n'))
+ else:
+ shutil.rmtree(tmpdir)
diff --git a/third_party/python/cram/cram/_process.py b/third_party/python/cram/cram/_process.py
new file mode 100644
index 0000000000..decdfbc3a7
--- /dev/null
+++ b/third_party/python/cram/cram/_process.py
@@ -0,0 +1,54 @@
+"""Utilities for running subprocesses"""
+
+import os
+import signal
+import subprocess
+import sys
+
+from cram._encoding import fsdecode
+
+__all__ = ['PIPE', 'STDOUT', 'execute']
+
+PIPE = subprocess.PIPE
+STDOUT = subprocess.STDOUT
+
+def _makeresetsigpipe():
+ """Make a function to reset SIGPIPE to SIG_DFL (for use in subprocesses).
+
+ Doing subprocess.Popen(..., preexec_fn=makeresetsigpipe()) will prevent
+ Python's SIGPIPE handler (SIG_IGN) from being inherited by the
+ child process.
+ """
+ if (sys.platform == 'win32' or
+ getattr(signal, 'SIGPIPE', None) is None): # pragma: nocover
+ return None
+ return lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+
+def execute(args, stdin=None, stdout=None, stderr=None, cwd=None, env=None):
+ """Run a process and return its output and return code.
+
+ stdin may either be None or a string to send to the process.
+
+ stdout may either be None or PIPE. If set to PIPE, the process's output
+ is returned as a string.
+
+ stderr may either be None or STDOUT. If stdout is set to PIPE and stderr
+ is set to STDOUT, the process's stderr output will be interleaved with
+ stdout and returned as a string.
+
+ cwd sets the process's current working directory.
+
+ env can be set to a dictionary to override the process's environment
+ variables.
+
+ This function returns a 2-tuple of (output, returncode).
+ """
+ if sys.platform == 'win32': # pragma: nocover
+ args = [fsdecode(arg) for arg in args]
+
+ p = subprocess.Popen(args, stdin=PIPE, stdout=stdout, stderr=stderr,
+ cwd=cwd, env=env, bufsize=-1,
+ preexec_fn=_makeresetsigpipe(),
+ close_fds=os.name == 'posix')
+ out, err = p.communicate(stdin)
+ return out, p.returncode
diff --git a/third_party/python/cram/cram/_run.py b/third_party/python/cram/cram/_run.py
new file mode 100644
index 0000000000..9111c0f686
--- /dev/null
+++ b/third_party/python/cram/cram/_run.py
@@ -0,0 +1,77 @@
+"""The test runner"""
+
+import os
+import sys
+
+from cram._encoding import b, fsdecode, fsencode
+from cram._test import testfile
+
+__all__ = ['runtests']
+
+if sys.platform == 'win32': # pragma: nocover
+ def _walk(top):
+ top = fsdecode(top)
+ for root, dirs, files in os.walk(top):
+ yield (fsencode(root),
+ [fsencode(p) for p in dirs],
+ [fsencode(p) for p in files])
+else:
+ _walk = os.walk
+
+def _findtests(paths):
+ """Yield tests in paths in sorted order"""
+ for p in paths:
+ if os.path.isdir(p):
+ for root, dirs, files in _walk(p):
+ if os.path.basename(root).startswith(b('.')):
+ continue
+ for f in sorted(files):
+ if not f.startswith(b('.')) and f.endswith(b('.t')):
+ yield os.path.normpath(os.path.join(root, f))
+ else:
+ yield os.path.normpath(p)
+
+def runtests(paths, tmpdir, shell, indent=2, cleanenv=True, debug=False):
+ """Run tests and yield results.
+
+ This yields a sequence of 2-tuples containing the following:
+
+ (test path, test function)
+
+ The test function, when called, runs the test in a temporary directory
+ and returns a 3-tuple:
+
+ (list of lines in the test, same list with actual output, diff)
+ """
+ cwd = os.getcwd()
+ seen = set()
+ basenames = set()
+ for i, path in enumerate(_findtests(paths)):
+ abspath = os.path.abspath(path)
+ if abspath in seen:
+ continue
+ seen.add(abspath)
+
+ if not os.stat(path).st_size:
+ yield (path, lambda: (None, None, None))
+ continue
+
+ basename = os.path.basename(path)
+ if basename in basenames:
+ basename = basename + b('-%s' % i)
+ else:
+ basenames.add(basename)
+
+ def test():
+ """Run test file"""
+ testdir = os.path.join(tmpdir, basename)
+ os.mkdir(testdir)
+ try:
+ os.chdir(testdir)
+ return testfile(abspath, shell, indent=indent,
+ cleanenv=cleanenv, debug=debug,
+ testname=path)
+ finally:
+ os.chdir(cwd)
+
+ yield (path, test)
diff --git a/third_party/python/cram/cram/_test.py b/third_party/python/cram/cram/_test.py
new file mode 100644
index 0000000000..27ef99c597
--- /dev/null
+++ b/third_party/python/cram/cram/_test.py
@@ -0,0 +1,230 @@
+"""Utilities for running individual tests"""
+
+import itertools
+import os
+import re
+import time
+
+from cram._encoding import b, bchr, bytestype, envencode, unicodetype
+from cram._diff import esc, glob, regex, unified_diff
+from cram._process import PIPE, STDOUT, execute
+
+__all__ = ['test', 'testfile']
+
+_needescape = re.compile(b(r'[\x00-\x09\x0b-\x1f\x7f-\xff]')).search
+_escapesub = re.compile(b(r'[\x00-\x09\x0b-\x1f\\\x7f-\xff]')).sub
+_escapemap = dict((bchr(i), b(r'\x%02x' % i)) for i in range(256))
+_escapemap.update({b('\\'): b('\\\\'), b('\r'): b(r'\r'), b('\t'): b(r'\t')})
+
+def _escape(s):
+ """Like the string-escape codec, but doesn't escape quotes"""
+ return (_escapesub(lambda m: _escapemap[m.group(0)], s[:-1]) +
+ b(' (esc)\n'))
+
+def test(lines, shell='/bin/sh', indent=2, testname=None, env=None,
+ cleanenv=True, debug=False):
+ r"""Run test lines and return input, output, and diff.
+
+ This returns a 3-tuple containing the following:
+
+ (list of lines in test, same list with actual output, diff)
+
+ diff is a generator that yields the diff between the two lists.
+
+ If a test exits with return code 80, the actual output is set to
+ None and diff is set to [].
+
+ Note that the TESTSHELL environment variable is available in the
+ test (set to the specified shell). However, the TESTDIR and
+ TESTFILE environment variables are not available. To run actual
+ test files, see testfile().
+
+ Example usage:
+
+ >>> from cram._encoding import b
+ >>> refout, postout, diff = test([b(' $ echo hi\n'),
+ ... b(' [a-z]{2} (re)\n')])
+ >>> refout == [b(' $ echo hi\n'), b(' [a-z]{2} (re)\n')]
+ True
+ >>> postout == [b(' $ echo hi\n'), b(' hi\n')]
+ True
+ >>> bool(diff)
+ False
+
+ lines may also be a single bytes string:
+
+ >>> refout, postout, diff = test(b(' $ echo hi\n bye\n'))
+ >>> refout == [b(' $ echo hi\n'), b(' bye\n')]
+ True
+ >>> postout == [b(' $ echo hi\n'), b(' hi\n')]
+ True
+ >>> bool(diff)
+ True
+ >>> (b('').join(diff) ==
+ ... b('--- \n+++ \n@@ -1,2 +1,2 @@\n $ echo hi\n- bye\n+ hi\n'))
+ True
+
+ Note that the b() function is internal to Cram. If you're using Python 2,
+ use normal string literals instead. If you're using Python 3, use bytes
+ literals.
+
+ :param lines: Test input
+ :type lines: bytes or collections.Iterable[bytes]
+ :param shell: Shell to run test in
+ :type shell: bytes or str or list[bytes] or list[str]
+ :param indent: Amount of indentation to use for shell commands
+ :type indent: int
+ :param testname: Optional test file name (used in diff output)
+ :type testname: bytes or None
+ :param env: Optional environment variables for the test shell
+ :type env: dict or None
+ :param cleanenv: Whether or not to sanitize the environment
+ :type cleanenv: bool
+ :param debug: Whether or not to run in debug mode (don't capture stdout)
+ :type debug: bool
+ :return: Input, output, and diff iterables
+ :rtype: (list[bytes], list[bytes], collections.Iterable[bytes])
+ """
+ indent = b(' ') * indent
+ cmdline = indent + b('$ ')
+ conline = indent + b('> ')
+ usalt = 'CRAM%s' % time.time()
+ salt = b(usalt)
+
+ if env is None:
+ env = os.environ.copy()
+
+ if cleanenv:
+ for s in ('LANG', 'LC_ALL', 'LANGUAGE'):
+ env[s] = 'C'
+ env['TZ'] = 'GMT'
+ env['CDPATH'] = ''
+ env['COLUMNS'] = '80'
+ env['GREP_OPTIONS'] = ''
+
+ if isinstance(lines, bytestype):
+ lines = lines.splitlines(True)
+
+ if isinstance(shell, (bytestype, unicodetype)):
+ shell = [shell]
+ env['TESTSHELL'] = shell[0]
+
+ if debug:
+ stdin = []
+ for line in lines:
+ if not line.endswith(b('\n')):
+ line += b('\n')
+ if line.startswith(cmdline):
+ stdin.append(line[len(cmdline):])
+ elif line.startswith(conline):
+ stdin.append(line[len(conline):])
+
+ execute(shell + ['-'], stdin=b('').join(stdin), env=env)
+ return ([], [], [])
+
+ after = {}
+ refout, postout = [], []
+ i = pos = prepos = -1
+ stdin = []
+ for i, line in enumerate(lines):
+ if not line.endswith(b('\n')):
+ line += b('\n')
+ refout.append(line)
+ if line.startswith(cmdline):
+ after.setdefault(pos, []).append(line)
+ prepos = pos
+ pos = i
+ stdin.append(b('echo %s %s $?\n' % (usalt, i)))
+ stdin.append(line[len(cmdline):])
+ elif line.startswith(conline):
+ after.setdefault(prepos, []).append(line)
+ stdin.append(line[len(conline):])
+ elif not line.startswith(indent):
+ after.setdefault(pos, []).append(line)
+ stdin.append(b('echo %s %s $?\n' % (usalt, i + 1)))
+
+ output, retcode = execute(shell + ['-'], stdin=b('').join(stdin),
+ stdout=PIPE, stderr=STDOUT, env=env)
+ if retcode == 80:
+ return (refout, None, [])
+
+ pos = -1
+ ret = 0
+ for i, line in enumerate(output[:-1].splitlines(True)):
+ out, cmd = line, None
+ if salt in line:
+ out, cmd = line.split(salt, 1)
+
+ if out:
+ if not out.endswith(b('\n')):
+ out += b(' (no-eol)\n')
+
+ if _needescape(out):
+ out = _escape(out)
+ postout.append(indent + out)
+
+ if cmd:
+ ret = int(cmd.split()[1])
+ if ret != 0:
+ postout.append(indent + b('[%s]\n' % (ret)))
+ postout += after.pop(pos, [])
+ pos = int(cmd.split()[0])
+
+ postout += after.pop(pos, [])
+
+ if testname:
+ diffpath = testname
+ errpath = diffpath + b('.err')
+ else:
+ diffpath = errpath = b('')
+ diff = unified_diff(refout, postout, diffpath, errpath,
+ matchers=[esc, glob, regex])
+ for firstline in diff:
+ return refout, postout, itertools.chain([firstline], diff)
+ return refout, postout, []
+
+def testfile(path, shell='/bin/sh', indent=2, env=None, cleanenv=True,
+ debug=False, testname=None):
+ """Run test at path and return input, output, and diff.
+
+ This returns a 3-tuple containing the following:
+
+ (list of lines in test, same list with actual output, diff)
+
+ diff is a generator that yields the diff between the two lists.
+
+ If a test exits with return code 80, the actual output is set to
+ None and diff is set to [].
+
+ Note that the TESTDIR, TESTFILE, and TESTSHELL environment
+ variables are available to use in the test.
+
+ :param path: Path to test file
+ :type path: bytes or str
+ :param shell: Shell to run test in
+ :type shell: bytes or str or list[bytes] or list[str]
+ :param indent: Amount of indentation to use for shell commands
+ :type indent: int
+ :param env: Optional environment variables for the test shell
+ :type env: dict or None
+ :param cleanenv: Whether or not to sanitize the environment
+ :type cleanenv: bool
+ :param debug: Whether or not to run in debug mode (don't capture stdout)
+ :type debug: bool
+ :param testname: Optional test file name (used in diff output)
+ :type testname: bytes or None
+ :return: Input, output, and diff iterables
+ :rtype: (list[bytes], list[bytes], collections.Iterable[bytes])
+ """
+ f = open(path, 'rb')
+ try:
+ abspath = os.path.abspath(path)
+ env = env or os.environ.copy()
+ env['TESTDIR'] = envencode(os.path.dirname(abspath))
+ env['TESTFILE'] = envencode(os.path.basename(abspath))
+ if testname is None: # pragma: nocover
+ testname = os.path.basename(abspath)
+ return test(f, shell, indent=indent, testname=testname, env=env,
+ cleanenv=cleanenv, debug=debug)
+ finally:
+ f.close()
diff --git a/third_party/python/cram/cram/_xunit.py b/third_party/python/cram/cram/_xunit.py
new file mode 100644
index 0000000000..0b3cb49cfc
--- /dev/null
+++ b/third_party/python/cram/cram/_xunit.py
@@ -0,0 +1,173 @@
+"""xUnit XML output"""
+
+import locale
+import os
+import re
+import socket
+import sys
+import time
+
+from cram._encoding import u, ul
+
+__all__ = ['runxunit']
+
+_widecdataregex = ul(r"'(?:[^\x09\x0a\x0d\x20-\ud7ff\ue000-\ufffd"
+ r"\U00010000-\U0010ffff]|]]>)'")
+_narrowcdataregex = ul(r"'(?:[^\x09\x0a\x0d\x20-\ud7ff\ue000-\ufffd]"
+ r"|]]>)'")
+_widequoteattrregex = ul(r"'[^\x20\x21\x23-\x25\x27-\x3b\x3d"
+ r"\x3f-\ud7ff\ue000-\ufffd"
+ r"\U00010000-\U0010ffff]'")
+_narrowquoteattrregex = ul(r"'[^\x20\x21\x23-\x25\x27-\x3b\x3d"
+ r"\x3f-\ud7ff\ue000-\ufffd]'")
+_replacementchar = ul(r"'\N{REPLACEMENT CHARACTER}'")
+
+if sys.maxunicode >= 0x10ffff: # pragma: nocover
+ _cdatasub = re.compile(_widecdataregex).sub
+ _quoteattrsub = re.compile(_widequoteattrregex).sub
+else: # pragma: nocover
+ _cdatasub = re.compile(_narrowcdataregex).sub
+ _quoteattrsub = re.compile(_narrowquoteattrregex).sub
+
+def _cdatareplace(m):
+ """Replace _cdatasub() regex match"""
+ if m.group(0) == u(']]>'):
+ return u(']]>]]&gt;<![CDATA[')
+ else:
+ return _replacementchar
+
+def _cdata(s):
+ r"""Escape a string as an XML CDATA block.
+
+ >>> from cram._encoding import ul
+ >>> (_cdata('1<\'2\'>&"3\x00]]>\t\r\n') ==
+ ... ul(r"'<![CDATA[1<\'2\'>&\"3\ufffd]]>]]&gt;<![CDATA[\t\r\n]]>'"))
+ True
+ """
+ return u('<![CDATA[%s]]>') % _cdatasub(_cdatareplace, s)
+
+def _quoteattrreplace(m):
+ """Replace _quoteattrsub() regex match"""
+ return {u('\t'): u('&#9;'),
+ u('\n'): u('&#10;'),
+ u('\r'): u('&#13;'),
+ u('"'): u('&quot;'),
+ u('&'): u('&amp;'),
+ u('<'): u('&lt;'),
+ u('>'): u('&gt;')}.get(m.group(0), _replacementchar)
+
+def _quoteattr(s):
+ r"""Escape a string for use as an XML attribute value.
+
+ >>> from cram._encoding import ul
+ >>> (_quoteattr('1<\'2\'>&"3\x00]]>\t\r\n') ==
+ ... ul(r"'\"1&lt;\'2\'&gt;&amp;&quot;3\ufffd]]&gt;&#9;&#13;&#10;\"'"))
+ True
+ """
+ return u('"%s"') % _quoteattrsub(_quoteattrreplace, s)
+
+def _timestamp():
+ """Return the current time in ISO 8601 format"""
+ tm = time.localtime()
+ if tm.tm_isdst == 1: # pragma: nocover
+ tz = time.altzone
+ else: # pragma: nocover
+ tz = time.timezone
+
+ timestamp = time.strftime('%Y-%m-%dT%H:%M:%S', tm)
+ tzhours = int(-tz / 60 / 60)
+ tzmins = int(abs(tz) / 60 % 60)
+ timestamp += u('%+03d:%02d') % (tzhours, tzmins)
+ return timestamp
+
+def runxunit(tests, xmlpath):
+ """Run tests with xUnit XML output.
+
+ tests should be a sequence of 2-tuples containing the following:
+
+ (test path, test function)
+
+ This function yields a new sequence where each test function is wrapped
+ with a function that writes test results to an xUnit XML file.
+ """
+ suitestart = time.time()
+ timestamp = _timestamp()
+ hostname = socket.gethostname()
+ total, skipped, failed = [0], [0], [0]
+ testcases = []
+
+ for path, test in tests:
+ def testwrapper():
+ """Run test and collect XML output"""
+ total[0] += 1
+
+ start = time.time()
+ refout, postout, diff = test()
+ testtime = time.time() - start
+
+ classname = path.decode(locale.getpreferredencoding(), 'replace')
+ name = os.path.basename(classname)
+
+ if postout is None:
+ skipped[0] += 1
+ testcase = (u(' <testcase classname=%(classname)s\n'
+ ' name=%(name)s\n'
+ ' time="%(time).6f">\n'
+ ' <skipped/>\n'
+ ' </testcase>\n') %
+ {'classname': _quoteattr(classname),
+ 'name': _quoteattr(name),
+ 'time': testtime})
+ elif diff:
+ failed[0] += 1
+ diff = list(diff)
+ diffu = u('').join(l.decode(locale.getpreferredencoding(),
+ 'replace')
+ for l in diff)
+ testcase = (u(' <testcase classname=%(classname)s\n'
+ ' name=%(name)s\n'
+ ' time="%(time).6f">\n'
+ ' <failure>%(diff)s</failure>\n'
+ ' </testcase>\n') %
+ {'classname': _quoteattr(classname),
+ 'name': _quoteattr(name),
+ 'time': testtime,
+ 'diff': _cdata(diffu)})
+ else:
+ testcase = (u(' <testcase classname=%(classname)s\n'
+ ' name=%(name)s\n'
+ ' time="%(time).6f"/>\n') %
+ {'classname': _quoteattr(classname),
+ 'name': _quoteattr(name),
+ 'time': testtime})
+ testcases.append(testcase)
+
+ return refout, postout, diff
+
+ yield path, testwrapper
+
+ suitetime = time.time() - suitestart
+ header = (u('<?xml version="1.0" encoding="utf-8"?>\n'
+ '<testsuite name="cram"\n'
+ ' tests="%(total)d"\n'
+ ' failures="%(failed)d"\n'
+ ' skipped="%(skipped)d"\n'
+ ' timestamp=%(timestamp)s\n'
+ ' hostname=%(hostname)s\n'
+ ' time="%(time).6f">\n') %
+ {'total': total[0],
+ 'failed': failed[0],
+ 'skipped': skipped[0],
+ 'timestamp': _quoteattr(timestamp),
+ 'hostname': _quoteattr(hostname),
+ 'time': suitetime})
+ footer = u('</testsuite>\n')
+
+ xmlfile = open(xmlpath, 'wb')
+ try:
+ xmlfile.write(header.encode('utf-8'))
+ for testcase in testcases:
+ xmlfile.write(testcase.encode('utf-8'))
+ xmlfile.write(footer.encode('utf-8'))
+ finally:
+ xmlfile.close()
diff --git a/third_party/python/diskcache/diskcache-4.1.0.dist-info/LICENSE b/third_party/python/diskcache/diskcache-4.1.0.dist-info/LICENSE
new file mode 100644
index 0000000000..3259b989fd
--- /dev/null
+++ b/third_party/python/diskcache/diskcache-4.1.0.dist-info/LICENSE
@@ -0,0 +1,12 @@
+Copyright 2016-2019 Grant Jenks
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
diff --git a/third_party/python/diskcache/diskcache-4.1.0.dist-info/METADATA b/third_party/python/diskcache/diskcache-4.1.0.dist-info/METADATA
new file mode 100644
index 0000000000..dff6db382f
--- /dev/null
+++ b/third_party/python/diskcache/diskcache-4.1.0.dist-info/METADATA
@@ -0,0 +1,430 @@
+Metadata-Version: 2.1
+Name: diskcache
+Version: 4.1.0
+Summary: Disk Cache -- Disk and file backed persistent cache.
+Home-page: http://www.grantjenks.com/docs/diskcache/
+Author: Grant Jenks
+Author-email: contact@grantjenks.com
+License: Apache 2.0
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Natural Language :: English
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+
+DiskCache: Disk Backed Cache
+============================
+
+`DiskCache`_ is an Apache2 licensed disk and file backed cache library, written
+in pure-Python, and compatible with Django.
+
+The cloud-based computing of 2019 puts a premium on memory. Gigabytes of empty
+space is left on disks as processes vie for memory. Among these processes is
+Memcached (and sometimes Redis) which is used as a cache. Wouldn't it be nice
+to leverage empty disk space for caching?
+
+Django is Python's most popular web framework and ships with several caching
+backends. Unfortunately the file-based cache in Django is essentially
+broken. The culling method is random and large caches repeatedly scan a cache
+directory which slows linearly with growth. Can you really allow it to take
+sixty milliseconds to store a key in a cache with a thousand items?
+
+In Python, we can do better. And we can do it in pure-Python!
+
+::
+
+ In [1]: import pylibmc
+ In [2]: client = pylibmc.Client(['127.0.0.1'], binary=True)
+ In [3]: client[b'key'] = b'value'
+ In [4]: %timeit client[b'key']
+
+ 10000 loops, best of 3: 25.4 µs per loop
+
+ In [5]: import diskcache as dc
+ In [6]: cache = dc.Cache('tmp')
+ In [7]: cache[b'key'] = b'value'
+ In [8]: %timeit cache[b'key']
+
+ 100000 loops, best of 3: 11.8 µs per loop
+
+**Note:** Micro-benchmarks have their place but are not a substitute for real
+measurements. DiskCache offers cache benchmarks to defend its performance
+claims. Micro-optimizations are avoided but your mileage may vary.
+
+DiskCache efficiently makes gigabytes of storage space available for
+caching. By leveraging rock-solid database libraries and memory-mapped files,
+cache performance can match and exceed industry-standard solutions. There's no
+need for a C compiler or running another process. Performance is a feature and
+testing has 100% coverage with unit tests and hours of stress.
+
+Testimonials
+------------
+
+`Daren Hasenkamp`_, Founder --
+
+ "It's a useful, simple API, just like I love about Redis. It has reduced
+ the amount of queries hitting my Elasticsearch cluster by over 25% for a
+ website that gets over a million users/day (100+ hits/second)."
+
+`Mathias Petermann`_, Senior Linux System Engineer --
+
+ "I implemented it into a wrapper for our Ansible lookup modules and we were
+ able to speed up some Ansible runs by almost 3 times. DiskCache is saving
+ us a ton of time."
+
+Does your company or website use `DiskCache`_? Send us a `message
+<contact@grantjenks.com>`_ and let us know.
+
+.. _`Daren Hasenkamp`: https://www.linkedin.com/in/daren-hasenkamp-93006438/
+.. _`Mathias Petermann`: https://www.linkedin.com/in/mathias-petermann-a8aa273b/
+
+Features
+--------
+
+- Pure-Python
+- Fully Documented
+- Benchmark comparisons (alternatives, Django cache backends)
+- 100% test coverage
+- Hours of stress testing
+- Performance matters
+- Django compatible API
+- Thread-safe and process-safe
+- Supports multiple eviction policies (LRU and LFU included)
+- Keys support "tag" metadata and eviction
+- Developed on Python 3.7
+- Tested on CPython 2.7, 3.4, 3.5, 3.6, 3.7 and PyPy
+- Tested on Linux, Mac OS X, and Windows
+- Tested using Travis CI and AppVeyor CI
+
+.. image:: https://api.travis-ci.org/grantjenks/python-diskcache.svg?branch=master
+ :target: http://www.grantjenks.com/docs/diskcache/
+
+.. image:: https://ci.appveyor.com/api/projects/status/github/grantjenks/python-diskcache?branch=master&svg=true
+ :target: http://www.grantjenks.com/docs/diskcache/
+
+Quickstart
+----------
+
+Installing `DiskCache`_ is simple with `pip <http://www.pip-installer.org/>`_::
+
+ $ pip install diskcache
+
+You can access documentation in the interpreter with Python's built-in help
+function::
+
+ >>> import diskcache
+ >>> help(diskcache)
+
+The core of `DiskCache`_ is three data types intended for caching. `Cache`_
+objects manage a SQLite database and filesystem directory to store key and
+value pairs. `FanoutCache`_ provides a sharding layer to utilize multiple
+caches and `DjangoCache`_ integrates that with `Django`_::
+
+ >>> from diskcache import Cache, FanoutCache, DjangoCache
+ >>> help(Cache)
+ >>> help(FanoutCache)
+ >>> help(DjangoCache)
+
+Built atop the caching data types, are `Deque`_ and `Index`_ which work as a
+cross-process, persistent replacements for Python's ``collections.deque`` and
+``dict``. These implement the sequence and mapping container base classes::
+
+ >>> from diskcache import Deque, Index
+ >>> help(Deque)
+ >>> help(Index)
+
+Finally, a number of `recipes`_ for cross-process synchronization are provided
+using an underlying cache. Features like memoization with cache stampede
+prevention, cross-process locking, and cross-process throttling are available::
+
+ >>> from diskcache import memoize_stampede, Lock, throttle
+ >>> help(memoize_stampede)
+ >>> help(Lock)
+ >>> help(throttle)
+
+Python's docstrings are a quick way to get started but not intended as a
+replacement for the `DiskCache Tutorial`_ and `DiskCache API Reference`_.
+
+.. _`Cache`: http://www.grantjenks.com/docs/diskcache/tutorial.html#cache
+.. _`FanoutCache`: http://www.grantjenks.com/docs/diskcache/tutorial.html#fanoutcache
+.. _`DjangoCache`: http://www.grantjenks.com/docs/diskcache/tutorial.html#djangocache
+.. _`Django`: https://www.djangoproject.com/
+.. _`Deque`: http://www.grantjenks.com/docs/diskcache/tutorial.html#deque
+.. _`Index`: http://www.grantjenks.com/docs/diskcache/tutorial.html#index
+.. _`recipes`: http://www.grantjenks.com/docs/diskcache/tutorial.html#recipes
+
+User Guide
+----------
+
+For those wanting more details, this part of the documentation describes
+tutorial, benchmarks, API, and development.
+
+* `DiskCache Tutorial`_
+* `DiskCache Cache Benchmarks`_
+* `DiskCache DjangoCache Benchmarks`_
+* `Case Study: Web Crawler`_
+* `Case Study: Landing Page Caching`_
+* `Talk: All Things Cached - SF Python 2017 Meetup`_
+* `DiskCache API Reference`_
+* `DiskCache Development`_
+
+.. _`DiskCache Tutorial`: http://www.grantjenks.com/docs/diskcache/tutorial.html
+.. _`DiskCache Cache Benchmarks`: http://www.grantjenks.com/docs/diskcache/cache-benchmarks.html
+.. _`DiskCache DjangoCache Benchmarks`: http://www.grantjenks.com/docs/diskcache/djangocache-benchmarks.html
+.. _`Talk: All Things Cached - SF Python 2017 Meetup`: http://www.grantjenks.com/docs/diskcache/sf-python-2017-meetup-talk.html
+.. _`Case Study: Web Crawler`: http://www.grantjenks.com/docs/diskcache/case-study-web-crawler.html
+.. _`Case Study: Landing Page Caching`: http://www.grantjenks.com/docs/diskcache/case-study-landing-page-caching.html
+.. _`DiskCache API Reference`: http://www.grantjenks.com/docs/diskcache/api.html
+.. _`DiskCache Development`: http://www.grantjenks.com/docs/diskcache/development.html
+
+Comparisons
+-----------
+
+Comparisons to popular projects related to `DiskCache`_.
+
+Key-Value Stores
+................
+
+`DiskCache`_ is mostly a simple key-value store. Feature comparisons with four
+other projects are shown in the tables below.
+
+* `dbm`_ is part of Python's standard library and implements a generic
+ interface to variants of the DBM database — dbm.gnu or dbm.ndbm. If none of
+ these modules is installed, the slow-but-simple dbm.dumb is used.
+* `shelve`_ is part of Python's standard library and implements a “shelf” as a
+ persistent, dictionary-like object. The difference with “dbm” databases is
+ that the values can be anything that the pickle module can handle.
+* `sqlitedict`_ is a lightweight wrapper around Python's sqlite3 database with
+ a simple, Pythonic dict-like interface and support for multi-thread
+ access. Keys are arbitrary strings, values arbitrary pickle-able objects.
+* `pickleDB`_ is a lightweight and simple key-value store. It is built upon
+ Python's simplejson module and was inspired by Redis. It is licensed with the
+ BSD three-caluse license.
+
+.. _`dbm`: https://docs.python.org/3/library/dbm.html
+.. _`shelve`: https://docs.python.org/3/library/shelve.html
+.. _`sqlitedict`: https://github.com/RaRe-Technologies/sqlitedict
+.. _`pickleDB`: https://pythonhosted.org/pickleDB/
+
+**Features**
+
+================ ============= ========= ========= ============ ============
+Feature diskcache dbm shelve sqlitedict pickleDB
+================ ============= ========= ========= ============ ============
+Atomic? Always Maybe Maybe Maybe No
+Persistent? Yes Yes Yes Yes Yes
+Thread-safe? Yes No No Yes No
+Process-safe? Yes No No Maybe No
+Backend? SQLite DBM DBM SQLite File
+Serialization? Customizable None Pickle Customizable JSON
+Data Types? Mapping/Deque Mapping Mapping Mapping Mapping
+Ordering? Insert/Sorted None None None None
+Eviction? LRU/LFU/more None None None None
+Vacuum? Automatic Maybe Maybe Manual Automatic
+Transactions? Yes No No Maybe No
+Multiprocessing? Yes No No No No
+Forkable? Yes No No No No
+Metadata? Yes No No No No
+================ ============= ========= ========= ============ ============
+
+**Quality**
+
+================ ============= ========= ========= ============ ============
+Project diskcache dbm shelve sqlitedict pickleDB
+================ ============= ========= ========= ============ ============
+Tests? Yes Yes Yes Yes Yes
+Coverage? Yes Yes Yes Yes No
+Stress? Yes No No No No
+CI Tests? Linux/Windows Yes Yes Linux No
+Python? 2/3/PyPy All All 2/3 2/3
+License? Apache2 Python Python Apache2 3-Clause BSD
+Docs? Extensive Summary Summary Readme Summary
+Benchmarks? Yes No No No No
+Sources? GitHub GitHub GitHub GitHub GitHub
+Pure-Python? Yes Yes Yes Yes Yes
+Server? No No No No No
+Integrations? Django None None None None
+================ ============= ========= ========= ============ ============
+
+**Timings**
+
+These are rough measurements. See `DiskCache Cache Benchmarks`_ for more
+rigorous data.
+
+================ ============= ========= ========= ============ ============
+Project diskcache dbm shelve sqlitedict pickleDB
+================ ============= ========= ========= ============ ============
+get 25 µs 36 µs 41 µs 513 µs 92 µs
+set 198 µs 900 µs 928 µs 697 µs 1,020 µs
+delete 248 µs 740 µs 702 µs 1,717 µs 1,020 µs
+================ ============= ========= ========= ============ ============
+
+Caching Libraries
+.................
+
+* `joblib.Memory`_ provides caching functions and works by explicitly saving
+ the inputs and outputs to files. It is designed to work with non-hashable and
+ potentially large input and output data types such as numpy arrays.
+* `klepto`_ extends Python’s `lru_cache` to utilize different keymaps and
+ alternate caching algorithms, such as `lfu_cache` and `mru_cache`. Klepto
+ uses a simple dictionary-sytle interface for all caches and archives.
+
+.. _`klepto`: https://pypi.org/project/klepto/
+.. _`joblib.Memory`: https://joblib.readthedocs.io/en/latest/memory.html
+
+Data Structures
+...............
+
+* `dict`_ is a mapping object that maps hashable keys to arbitrary
+ values. Mappings are mutable objects. There is currently only one standard
+ Python mapping type, the dictionary.
+* `pandas`_ is a Python package providing fast, flexible, and expressive data
+ structures designed to make working with “relational” or “labeled” data both
+ easy and intuitive.
+* `Sorted Containers`_ is an Apache2 licensed sorted collections library,
+ written in pure-Python, and fast as C-extensions. Sorted Containers
+ implements sorted list, sorted dictionary, and sorted set data types.
+
+.. _`dict`: https://docs.python.org/3/library/stdtypes.html#typesmapping
+.. _`pandas`: https://pandas.pydata.org/
+.. _`Sorted Containers`: http://www.grantjenks.com/docs/sortedcontainers/
+
+Pure-Python Databases
+.....................
+
+* `ZODB`_ supports an isomorphic interface for database operations which means
+ there's little impact on your code to make objects persistent and there's no
+ database mapper that partially hides the datbase.
+* `CodernityDB`_ is an open source, pure-Python, multi-platform, schema-less,
+ NoSQL database and includes an HTTP server version, and a Python client
+ library that aims to be 100% compatible with the embedded version.
+* `TinyDB`_ is a tiny, document oriented database optimized for your
+ happiness. If you need a simple database with a clean API that just works
+ without lots of configuration, TinyDB might be the right choice for you.
+
+.. _`ZODB`: http://www.zodb.org/
+.. _`CodernityDB`: https://pypi.org/project/CodernityDB/
+.. _`TinyDB`: https://tinydb.readthedocs.io/
+
+Object Relational Mappings (ORM)
+................................
+
+* `Django ORM`_ provides models that are the single, definitive source of
+ information about data and contains the essential fields and behaviors of the
+ stored data. Generally, each model maps to a single SQL database table.
+* `SQLAlchemy`_ is the Python SQL toolkit and Object Relational Mapper that
+ gives application developers the full power and flexibility of SQL. It
+ provides a full suite of well known enterprise-level persistence patterns.
+* `Peewee`_ is a simple and small ORM. It has few (but expressive) concepts,
+ making it easy to learn and intuitive to use. Peewee supports Sqlite, MySQL,
+ and PostgreSQL with tons of extensions.
+* `SQLObject`_ is a popular Object Relational Manager for providing an object
+ interface to your database, with tables as classes, rows as instances, and
+ columns as attributes.
+* `Pony ORM`_ is a Python ORM with beautiful query syntax. Use Python syntax
+ for interacting with the database. Pony translates such queries into SQL and
+ executes them in the database in the most efficient way.
+
+.. _`Django ORM`: https://docs.djangoproject.com/en/dev/topics/db/
+.. _`SQLAlchemy`: https://www.sqlalchemy.org/
+.. _`Peewee`: http://docs.peewee-orm.com/
+.. _`dataset`: https://dataset.readthedocs.io/
+.. _`SQLObject`: http://sqlobject.org/
+.. _`Pony ORM`: https://ponyorm.com/
+
+SQL Databases
+.............
+
+* `SQLite`_ is part of Python's standard library and provides a lightweight
+ disk-based database that doesn’t require a separate server process and allows
+ accessing the database using a nonstandard variant of the SQL query language.
+* `MySQL`_ is one of the world’s most popular open source databases and has
+ become a leading database choice for web-based applications. MySQL includes a
+ standardized database driver for Python platforms and development.
+* `PostgreSQL`_ is a powerful, open source object-relational database system
+ with over 30 years of active development. Psycopg is the most popular
+ PostgreSQL adapter for the Python programming language.
+* `Oracle DB`_ is a relational database management system (RDBMS) from the
+ Oracle Corporation. Originally developed in 1977, Oracle DB is one of the
+ most trusted and widely used enterprise relational database engines.
+* `Microsoft SQL Server`_ is a relational database management system developed
+ by Microsoft. As a database server, it stores and retrieves data as requested
+ by other software applications.
+
+.. _`SQLite`: https://docs.python.org/3/library/sqlite3.html
+.. _`MySQL`: https://dev.mysql.com/downloads/connector/python/
+.. _`PostgreSQL`: http://initd.org/psycopg/
+.. _`Oracle DB`: https://pypi.org/project/cx_Oracle/
+.. _`Microsoft SQL Server`: https://pypi.org/project/pyodbc/
+
+Other Databases
+...............
+
+* `Memcached`_ is free and open source, high-performance, distributed memory
+ object caching system, generic in nature, but intended for use in speeding up
+ dynamic web applications by alleviating database load.
+* `Redis`_ is an open source, in-memory data structure store, used as a
+ database, cache and message broker. It supports data structures such as
+ strings, hashes, lists, sets, sorted sets with range queries, and more.
+* `MongoDB`_ is a cross-platform document-oriented database program. Classified
+ as a NoSQL database program, MongoDB uses JSON-like documents with
+ schema. PyMongo is the recommended way to work with MongoDB from Python.
+* `LMDB`_ is a lightning-fast, memory-mapped database. With memory-mapped
+ files, it has the read performance of a pure in-memory database while
+ retaining the persistence of standard disk-based databases.
+* `BerkeleyDB`_ is a software library intended to provide a high-performance
+ embedded database for key/value data. Berkeley DB is a programmatic toolkit
+ that provides built-in database support for desktop and server applications.
+* `LevelDB`_ is a fast key-value storage library written at Google that
+ provides an ordered mapping from string keys to string values. Data is stored
+ sorted by key and users can provide a custom comparison function.
+
+.. _`Memcached`: https://pypi.org/project/python-memcached/
+.. _`MongoDB`: https://api.mongodb.com/python/current/
+.. _`Redis`: https://redis.io/clients#python
+.. _`LMDB`: https://lmdb.readthedocs.io/
+.. _`BerkeleyDB`: https://pypi.org/project/bsddb3/
+.. _`LevelDB`: https://plyvel.readthedocs.io/
+
+Reference
+---------
+
+* `DiskCache Documentation`_
+* `DiskCache at PyPI`_
+* `DiskCache at GitHub`_
+* `DiskCache Issue Tracker`_
+
+.. _`DiskCache Documentation`: http://www.grantjenks.com/docs/diskcache/
+.. _`DiskCache at PyPI`: https://pypi.python.org/pypi/diskcache/
+.. _`DiskCache at GitHub`: https://github.com/grantjenks/python-diskcache/
+.. _`DiskCache Issue Tracker`: https://github.com/grantjenks/python-diskcache/issues/
+
+License
+-------
+
+Copyright 2016-2019 Grant Jenks
+
+Licensed under the Apache License, Version 2.0 (the "License"); you may not use
+this file except in compliance with the License. You may obtain a copy of the
+License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software distributed
+under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
+CONDITIONS OF ANY KIND, either express or implied. See the License for the
+specific language governing permissions and limitations under the License.
+
+.. _`DiskCache`: http://www.grantjenks.com/docs/diskcache/
+
+
diff --git a/third_party/python/diskcache/diskcache-4.1.0.dist-info/RECORD b/third_party/python/diskcache/diskcache-4.1.0.dist-info/RECORD
new file mode 100644
index 0000000000..d4bca0ad45
--- /dev/null
+++ b/third_party/python/diskcache/diskcache-4.1.0.dist-info/RECORD
@@ -0,0 +1,12 @@
+diskcache/__init__.py,sha256=6RtBwXsSbdiZ-H44I7nLwF2-1VyVMHq1cJ5ynfnQt-E,1234
+diskcache/cli.py,sha256=JzkI2KtJJ0VRfBAq69lTkItoLwg4vOrTezczetSCfaY,40
+diskcache/core.py,sha256=nZRqL-VuJZZw1Ll5ADBzGivpIN_vXTDC8KSOyQ_XvjI,82426
+diskcache/djangocache.py,sha256=vAWT1FdmvHoHas244yoOblc6GhvozgLuFyjASMFPaK0,15488
+diskcache/fanout.py,sha256=Ha5C8BpClAHKEi6cJvJ5HvmAKlNwfiMpjb_az_hIJE0,21271
+diskcache/persistent.py,sha256=hgsS9-LymHsBeuNx0fBPOsiobvpJmGOIxT1T67BQUYw,37450
+diskcache/recipes.py,sha256=VQty-6AVoXcc6hfp1QOFvQZSf8W5AbQBFe1N3QlyILk,13849
+diskcache-4.1.0.dist-info/LICENSE,sha256=KBQYvOJPaViOo1FzqVpqPSGqW0jDZG6KiE8kLKMzNkw,559
+diskcache-4.1.0.dist-info/METADATA,sha256=wWGlNFCEiyWQ6R5zq3m3RFQbxELo6oJyrpryNir-yFo,19886
+diskcache-4.1.0.dist-info/WHEEL,sha256=h_aVn5OB2IERUjMbi2pucmR_zzWJtk303YXvhh60NJ8,110
+diskcache-4.1.0.dist-info/top_level.txt,sha256=A5fqg_AHgOQc_0o1NZ-Uo5Bsb7CV3fR99J-p1-F4yuA,10
+diskcache-4.1.0.dist-info/RECORD,,
diff --git a/third_party/python/diskcache/diskcache-4.1.0.dist-info/WHEEL b/third_party/python/diskcache/diskcache-4.1.0.dist-info/WHEEL
new file mode 100644
index 0000000000..78e6f69d1d
--- /dev/null
+++ b/third_party/python/diskcache/diskcache-4.1.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.4)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/diskcache/diskcache-4.1.0.dist-info/top_level.txt b/third_party/python/diskcache/diskcache-4.1.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..91667d46b1
--- /dev/null
+++ b/third_party/python/diskcache/diskcache-4.1.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+diskcache
diff --git a/third_party/python/diskcache/diskcache/__init__.py b/third_party/python/diskcache/diskcache/__init__.py
new file mode 100644
index 0000000000..192524e6ce
--- /dev/null
+++ b/third_party/python/diskcache/diskcache/__init__.py
@@ -0,0 +1,51 @@
+"""
+DiskCache API Reference
+=======================
+
+The :doc:`tutorial` provides a helpful walkthrough of most methods.
+
+"""
+
+from .core import Cache, Disk, EmptyDirWarning, JSONDisk, UnknownFileWarning, Timeout
+from .core import DEFAULT_SETTINGS, ENOVAL, EVICTION_POLICY, UNKNOWN
+from .fanout import FanoutCache
+from .persistent import Deque, Index
+from .recipes import Averager, BoundedSemaphore, Lock, RLock
+from .recipes import barrier, memoize_stampede, throttle
+
+__all__ = [
+ 'Averager',
+ 'BoundedSemaphore',
+ 'Cache',
+ 'DEFAULT_SETTINGS',
+ 'Deque',
+ 'Disk',
+ 'ENOVAL',
+ 'EVICTION_POLICY',
+ 'EmptyDirWarning',
+ 'FanoutCache',
+ 'Index',
+ 'JSONDisk',
+ 'Lock',
+ 'RLock',
+ 'Timeout',
+ 'UNKNOWN',
+ 'UnknownFileWarning',
+ 'barrier',
+ 'memoize_stampede',
+ 'throttle',
+]
+
+try:
+ from .djangocache import DjangoCache # pylint: disable=wrong-import-position
+ __all__.append('DjangoCache')
+except Exception: # pylint: disable=broad-except
+ # Django not installed or not setup so ignore.
+ pass
+
+__title__ = 'diskcache'
+__version__ = '4.1.0'
+__build__ = 0x040100
+__author__ = 'Grant Jenks'
+__license__ = 'Apache 2.0'
+__copyright__ = 'Copyright 2016-2018 Grant Jenks'
diff --git a/third_party/python/diskcache/diskcache/cli.py b/third_party/python/diskcache/diskcache/cli.py
new file mode 100644
index 0000000000..44bffebfcc
--- /dev/null
+++ b/third_party/python/diskcache/diskcache/cli.py
@@ -0,0 +1 @@
+"Command line interface to disk cache."
diff --git a/third_party/python/diskcache/diskcache/core.py b/third_party/python/diskcache/diskcache/core.py
new file mode 100644
index 0000000000..0c8fd2c745
--- /dev/null
+++ b/third_party/python/diskcache/diskcache/core.py
@@ -0,0 +1,2481 @@
+"""Core disk and file backed cache API.
+
+"""
+
+import codecs
+import contextlib as cl
+import errno
+import functools as ft
+import io
+import json
+import os
+import os.path as op
+import pickletools
+import sqlite3
+import struct
+import sys
+import tempfile
+import threading
+import time
+import warnings
+import zlib
+
+############################################################################
+# BEGIN Python 2/3 Shims
+############################################################################
+
+if sys.hexversion < 0x03000000:
+ import cPickle as pickle # pylint: disable=import-error
+ # ISSUE #25 Fix for http://bugs.python.org/issue10211
+ from cStringIO import StringIO as BytesIO # pylint: disable=import-error
+ from thread import get_ident # pylint: disable=import-error,no-name-in-module
+ TextType = unicode # pylint: disable=invalid-name,undefined-variable
+ BytesType = str
+ INT_TYPES = int, long # pylint: disable=undefined-variable
+ range = xrange # pylint: disable=redefined-builtin,invalid-name,undefined-variable
+ io_open = io.open # pylint: disable=invalid-name
+else:
+ import pickle
+ from io import BytesIO # pylint: disable=ungrouped-imports
+ from threading import get_ident
+ TextType = str
+ BytesType = bytes
+ INT_TYPES = (int,)
+ io_open = open # pylint: disable=invalid-name
+
+def full_name(func):
+ "Return full name of `func` by adding the module and function name."
+ try:
+ # The __qualname__ attribute is only available in Python 3.3 and later.
+ # GrantJ 2019-03-29 Remove after support for Python 2 is dropped.
+ name = func.__qualname__
+ except AttributeError:
+ name = func.__name__
+ return func.__module__ + '.' + name
+
+############################################################################
+# END Python 2/3 Shims
+############################################################################
+
+try:
+ WindowsError
+except NameError:
+ class WindowsError(Exception):
+ "Windows error place-holder on platforms without support."
+
+class Constant(tuple):
+ "Pretty display of immutable constant."
+ def __new__(cls, name):
+ return tuple.__new__(cls, (name,))
+
+ def __repr__(self):
+ return '%s' % self[0]
+
+DBNAME = 'cache.db'
+ENOVAL = Constant('ENOVAL')
+UNKNOWN = Constant('UNKNOWN')
+
+MODE_NONE = 0
+MODE_RAW = 1
+MODE_BINARY = 2
+MODE_TEXT = 3
+MODE_PICKLE = 4
+
+DEFAULT_SETTINGS = {
+ u'statistics': 0, # False
+ u'tag_index': 0, # False
+ u'eviction_policy': u'least-recently-stored',
+ u'size_limit': 2 ** 30, # 1gb
+ u'cull_limit': 10,
+ u'sqlite_auto_vacuum': 1, # FULL
+ u'sqlite_cache_size': 2 ** 13, # 8,192 pages
+ u'sqlite_journal_mode': u'wal',
+ u'sqlite_mmap_size': 2 ** 26, # 64mb
+ u'sqlite_synchronous': 1, # NORMAL
+ u'disk_min_file_size': 2 ** 15, # 32kb
+ u'disk_pickle_protocol': pickle.HIGHEST_PROTOCOL,
+}
+
+METADATA = {
+ u'count': 0,
+ u'size': 0,
+ u'hits': 0,
+ u'misses': 0,
+}
+
+EVICTION_POLICY = {
+ 'none': {
+ 'init': None,
+ 'get': None,
+ 'cull': None,
+ },
+ 'least-recently-stored': {
+ 'init': (
+ 'CREATE INDEX IF NOT EXISTS Cache_store_time ON'
+ ' Cache (store_time)'
+ ),
+ 'get': None,
+ 'cull': 'SELECT {fields} FROM Cache ORDER BY store_time LIMIT ?',
+ },
+ 'least-recently-used': {
+ 'init': (
+ 'CREATE INDEX IF NOT EXISTS Cache_access_time ON'
+ ' Cache (access_time)'
+ ),
+ 'get': 'access_time = {now}',
+ 'cull': 'SELECT {fields} FROM Cache ORDER BY access_time LIMIT ?',
+ },
+ 'least-frequently-used': {
+ 'init': (
+ 'CREATE INDEX IF NOT EXISTS Cache_access_count ON'
+ ' Cache (access_count)'
+ ),
+ 'get': 'access_count = access_count + 1',
+ 'cull': 'SELECT {fields} FROM Cache ORDER BY access_count LIMIT ?',
+ },
+}
+
+
+class Disk(object):
+ "Cache key and value serialization for SQLite database and files."
+ def __init__(self, directory, min_file_size=0, pickle_protocol=0):
+ """Initialize disk instance.
+
+ :param str directory: directory path
+ :param int min_file_size: minimum size for file use
+ :param int pickle_protocol: pickle protocol for serialization
+
+ """
+ self._directory = directory
+ self.min_file_size = min_file_size
+ self.pickle_protocol = pickle_protocol
+
+
+ def hash(self, key):
+ """Compute portable hash for `key`.
+
+ :param key: key to hash
+ :return: hash value
+
+ """
+ mask = 0xFFFFFFFF
+ disk_key, _ = self.put(key)
+ type_disk_key = type(disk_key)
+
+ if type_disk_key is sqlite3.Binary:
+ return zlib.adler32(disk_key) & mask
+ elif type_disk_key is TextType:
+ return zlib.adler32(disk_key.encode('utf-8')) & mask # pylint: disable=no-member
+ elif type_disk_key in INT_TYPES:
+ return disk_key % mask
+ else:
+ assert type_disk_key is float
+ return zlib.adler32(struct.pack('!d', disk_key)) & mask
+
+
+ def put(self, key):
+ """Convert `key` to fields key and raw for Cache table.
+
+ :param key: key to convert
+ :return: (database key, raw boolean) pair
+
+ """
+ # pylint: disable=bad-continuation,unidiomatic-typecheck
+ type_key = type(key)
+
+ if type_key is BytesType:
+ return sqlite3.Binary(key), True
+ elif ((type_key is TextType)
+ or (type_key in INT_TYPES
+ and -9223372036854775808 <= key <= 9223372036854775807)
+ or (type_key is float)):
+ return key, True
+ else:
+ data = pickle.dumps(key, protocol=self.pickle_protocol)
+ result = pickletools.optimize(data)
+ return sqlite3.Binary(result), False
+
+
+ def get(self, key, raw):
+ """Convert fields `key` and `raw` from Cache table to key.
+
+ :param key: database key to convert
+ :param bool raw: flag indicating raw database storage
+ :return: corresponding Python key
+
+ """
+ # pylint: disable=no-self-use,unidiomatic-typecheck
+ if raw:
+ return BytesType(key) if type(key) is sqlite3.Binary else key
+ else:
+ return pickle.load(BytesIO(key))
+
+
+ def store(self, value, read, key=UNKNOWN):
+ """Convert `value` to fields size, mode, filename, and value for Cache
+ table.
+
+ :param value: value to convert
+ :param bool read: True when value is file-like object
+ :param key: key for item (default UNKNOWN)
+ :return: (size, mode, filename, value) tuple for Cache table
+
+ """
+ # pylint: disable=unidiomatic-typecheck
+ type_value = type(value)
+ min_file_size = self.min_file_size
+
+ if ((type_value is TextType and len(value) < min_file_size)
+ or (type_value in INT_TYPES
+ and -9223372036854775808 <= value <= 9223372036854775807)
+ or (type_value is float)):
+ return 0, MODE_RAW, None, value
+ elif type_value is BytesType:
+ if len(value) < min_file_size:
+ return 0, MODE_RAW, None, sqlite3.Binary(value)
+ else:
+ filename, full_path = self.filename(key, value)
+
+ with open(full_path, 'wb') as writer:
+ writer.write(value)
+
+ return len(value), MODE_BINARY, filename, None
+ elif type_value is TextType:
+ filename, full_path = self.filename(key, value)
+
+ with io_open(full_path, 'w', encoding='UTF-8') as writer:
+ writer.write(value)
+
+ size = op.getsize(full_path)
+ return size, MODE_TEXT, filename, None
+ elif read:
+ size = 0
+ reader = ft.partial(value.read, 2 ** 22)
+ filename, full_path = self.filename(key, value)
+
+ with open(full_path, 'wb') as writer:
+ for chunk in iter(reader, b''):
+ size += len(chunk)
+ writer.write(chunk)
+
+ return size, MODE_BINARY, filename, None
+ else:
+ result = pickle.dumps(value, protocol=self.pickle_protocol)
+
+ if len(result) < min_file_size:
+ return 0, MODE_PICKLE, None, sqlite3.Binary(result)
+ else:
+ filename, full_path = self.filename(key, value)
+
+ with open(full_path, 'wb') as writer:
+ writer.write(result)
+
+ return len(result), MODE_PICKLE, filename, None
+
+
+ def fetch(self, mode, filename, value, read):
+ """Convert fields `mode`, `filename`, and `value` from Cache table to
+ value.
+
+ :param int mode: value mode raw, binary, text, or pickle
+ :param str filename: filename of corresponding value
+ :param value: database value
+ :param bool read: when True, return an open file handle
+ :return: corresponding Python value
+
+ """
+ # pylint: disable=no-self-use,unidiomatic-typecheck
+ if mode == MODE_RAW:
+ return BytesType(value) if type(value) is sqlite3.Binary else value
+ elif mode == MODE_BINARY:
+ if read:
+ return open(op.join(self._directory, filename), 'rb')
+ else:
+ with open(op.join(self._directory, filename), 'rb') as reader:
+ return reader.read()
+ elif mode == MODE_TEXT:
+ full_path = op.join(self._directory, filename)
+ with io_open(full_path, 'r', encoding='UTF-8') as reader:
+ return reader.read()
+ elif mode == MODE_PICKLE:
+ if value is None:
+ with open(op.join(self._directory, filename), 'rb') as reader:
+ return pickle.load(reader)
+ else:
+ return pickle.load(BytesIO(value))
+
+
+ def filename(self, key=UNKNOWN, value=UNKNOWN):
+ """Return filename and full-path tuple for file storage.
+
+ Filename will be a randomly generated 28 character hexadecimal string
+ with ".val" suffixed. Two levels of sub-directories will be used to
+ reduce the size of directories. On older filesystems, lookups in
+ directories with many files may be slow.
+
+ The default implementation ignores the `key` and `value` parameters.
+
+ In some scenarios, for example :meth:`Cache.push
+ <diskcache.Cache.push>`, the `key` or `value` may not be known when the
+ item is stored in the cache.
+
+ :param key: key for item (default UNKNOWN)
+ :param value: value for item (default UNKNOWN)
+
+ """
+ # pylint: disable=unused-argument
+ hex_name = codecs.encode(os.urandom(16), 'hex').decode('utf-8')
+ sub_dir = op.join(hex_name[:2], hex_name[2:4])
+ name = hex_name[4:] + '.val'
+ directory = op.join(self._directory, sub_dir)
+
+ try:
+ os.makedirs(directory)
+ except OSError as error:
+ if error.errno != errno.EEXIST:
+ raise
+
+ filename = op.join(sub_dir, name)
+ full_path = op.join(self._directory, filename)
+ return filename, full_path
+
+
+ def remove(self, filename):
+ """Remove a file given by `filename`.
+
+ This method is cross-thread and cross-process safe. If an "error no
+ entry" occurs, it is suppressed.
+
+ :param str filename: relative path to file
+
+ """
+ full_path = op.join(self._directory, filename)
+
+ try:
+ os.remove(full_path)
+ except WindowsError:
+ pass
+ except OSError as error:
+ if error.errno != errno.ENOENT:
+ # ENOENT may occur if two caches attempt to delete the same
+ # file at the same time.
+ raise
+
+
+class JSONDisk(Disk):
+ "Cache key and value using JSON serialization with zlib compression."
+ def __init__(self, directory, compress_level=1, **kwargs):
+ """Initialize JSON disk instance.
+
+ Keys and values are compressed using the zlib library. The
+ `compress_level` is an integer from 0 to 9 controlling the level of
+ compression; 1 is fastest and produces the least compression, 9 is
+ slowest and produces the most compression, and 0 is no compression.
+
+ :param str directory: directory path
+ :param int compress_level: zlib compression level (default 1)
+ :param kwargs: super class arguments
+
+ """
+ self.compress_level = compress_level
+ super(JSONDisk, self).__init__(directory, **kwargs)
+
+
+ def put(self, key):
+ json_bytes = json.dumps(key).encode('utf-8')
+ data = zlib.compress(json_bytes, self.compress_level)
+ return super(JSONDisk, self).put(data)
+
+
+ def get(self, key, raw):
+ data = super(JSONDisk, self).get(key, raw)
+ return json.loads(zlib.decompress(data).decode('utf-8'))
+
+
+ def store(self, value, read, key=UNKNOWN):
+ if not read:
+ json_bytes = json.dumps(value).encode('utf-8')
+ value = zlib.compress(json_bytes, self.compress_level)
+ return super(JSONDisk, self).store(value, read, key=key)
+
+
+ def fetch(self, mode, filename, value, read):
+ data = super(JSONDisk, self).fetch(mode, filename, value, read)
+ if not read:
+ data = json.loads(zlib.decompress(data).decode('utf-8'))
+ return data
+
+
+class Timeout(Exception):
+ "Database timeout expired."
+
+
+class UnknownFileWarning(UserWarning):
+ "Warning used by Cache.check for unknown files."
+
+
+class EmptyDirWarning(UserWarning):
+ "Warning used by Cache.check for empty directories."
+
+
+def args_to_key(base, args, kwargs, typed):
+ """Create cache key out of function arguments.
+
+ :param tuple base: base of key
+ :param tuple args: function arguments
+ :param dict kwargs: function keyword arguments
+ :param bool typed: include types in cache key
+ :return: cache key tuple
+
+ """
+ key = base + args
+
+ if kwargs:
+ key += (ENOVAL,)
+ sorted_items = sorted(kwargs.items())
+
+ for item in sorted_items:
+ key += item
+
+ if typed:
+ key += tuple(type(arg) for arg in args)
+
+ if kwargs:
+ key += tuple(type(value) for _, value in sorted_items)
+
+ return key
+
+
+class Cache(object):
+ "Disk and file backed cache."
+ # pylint: disable=bad-continuation
+ def __init__(self, directory=None, timeout=60, disk=Disk, **settings):
+ """Initialize cache instance.
+
+ :param str directory: cache directory
+ :param float timeout: SQLite connection timeout
+ :param disk: Disk type or subclass for serialization
+ :param settings: any of DEFAULT_SETTINGS
+
+ """
+ try:
+ assert issubclass(disk, Disk)
+ except (TypeError, AssertionError):
+ raise ValueError('disk must subclass diskcache.Disk')
+
+ if directory is None:
+ directory = tempfile.mkdtemp(prefix='diskcache-')
+ directory = op.expanduser(directory)
+ directory = op.expandvars(directory)
+
+ self._directory = directory
+ self._timeout = 0 # Manually handle retries during initialization.
+ self._local = threading.local()
+ self._txn_id = None
+
+ if not op.isdir(directory):
+ try:
+ os.makedirs(directory, 0o755)
+ except OSError as error:
+ if error.errno != errno.EEXIST:
+ raise EnvironmentError(
+ error.errno,
+ 'Cache directory "%s" does not exist'
+ ' and could not be created' % self._directory
+ )
+
+ sql = self._sql_retry
+
+ # Setup Settings table.
+
+ try:
+ current_settings = dict(sql(
+ 'SELECT key, value FROM Settings'
+ ).fetchall())
+ except sqlite3.OperationalError:
+ current_settings = {}
+
+ sets = DEFAULT_SETTINGS.copy()
+ sets.update(current_settings)
+ sets.update(settings)
+
+ for key in METADATA:
+ sets.pop(key, None)
+
+ # Chance to set pragmas before any tables are created.
+
+ for key, value in sorted(sets.items()):
+ if key.startswith('sqlite_'):
+ self.reset(key, value, update=False)
+
+ sql('CREATE TABLE IF NOT EXISTS Settings ('
+ ' key TEXT NOT NULL UNIQUE,'
+ ' value)'
+ )
+
+ # Setup Disk object (must happen after settings initialized).
+
+ kwargs = {
+ key[5:]: value for key, value in sets.items()
+ if key.startswith('disk_')
+ }
+ self._disk = disk(directory, **kwargs)
+
+ # Set cached attributes: updates settings and sets pragmas.
+
+ for key, value in sets.items():
+ query = 'INSERT OR REPLACE INTO Settings VALUES (?, ?)'
+ sql(query, (key, value))
+ self.reset(key, value)
+
+ for key, value in METADATA.items():
+ query = 'INSERT OR IGNORE INTO Settings VALUES (?, ?)'
+ sql(query, (key, value))
+ self.reset(key)
+
+ (self._page_size,), = sql('PRAGMA page_size').fetchall()
+
+ # Setup Cache table.
+
+ sql('CREATE TABLE IF NOT EXISTS Cache ('
+ ' rowid INTEGER PRIMARY KEY,'
+ ' key BLOB,'
+ ' raw INTEGER,'
+ ' store_time REAL,'
+ ' expire_time REAL,'
+ ' access_time REAL,'
+ ' access_count INTEGER DEFAULT 0,'
+ ' tag BLOB,'
+ ' size INTEGER DEFAULT 0,'
+ ' mode INTEGER DEFAULT 0,'
+ ' filename TEXT,'
+ ' value BLOB)'
+ )
+
+ sql('CREATE UNIQUE INDEX IF NOT EXISTS Cache_key_raw ON'
+ ' Cache(key, raw)'
+ )
+
+ sql('CREATE INDEX IF NOT EXISTS Cache_expire_time ON'
+ ' Cache (expire_time)'
+ )
+
+ query = EVICTION_POLICY[self.eviction_policy]['init']
+
+ if query is not None:
+ sql(query)
+
+ # Use triggers to keep Metadata updated.
+
+ sql('CREATE TRIGGER IF NOT EXISTS Settings_count_insert'
+ ' AFTER INSERT ON Cache FOR EACH ROW BEGIN'
+ ' UPDATE Settings SET value = value + 1'
+ ' WHERE key = "count"; END'
+ )
+
+ sql('CREATE TRIGGER IF NOT EXISTS Settings_count_delete'
+ ' AFTER DELETE ON Cache FOR EACH ROW BEGIN'
+ ' UPDATE Settings SET value = value - 1'
+ ' WHERE key = "count"; END'
+ )
+
+ sql('CREATE TRIGGER IF NOT EXISTS Settings_size_insert'
+ ' AFTER INSERT ON Cache FOR EACH ROW BEGIN'
+ ' UPDATE Settings SET value = value + NEW.size'
+ ' WHERE key = "size"; END'
+ )
+
+ sql('CREATE TRIGGER IF NOT EXISTS Settings_size_update'
+ ' AFTER UPDATE ON Cache FOR EACH ROW BEGIN'
+ ' UPDATE Settings'
+ ' SET value = value + NEW.size - OLD.size'
+ ' WHERE key = "size"; END'
+ )
+
+ sql('CREATE TRIGGER IF NOT EXISTS Settings_size_delete'
+ ' AFTER DELETE ON Cache FOR EACH ROW BEGIN'
+ ' UPDATE Settings SET value = value - OLD.size'
+ ' WHERE key = "size"; END'
+ )
+
+ # Create tag index if requested.
+
+ if self.tag_index: # pylint: disable=no-member
+ self.create_tag_index()
+ else:
+ self.drop_tag_index()
+
+ # Close and re-open database connection with given timeout.
+
+ self.close()
+ self._timeout = timeout
+ self._sql # pylint: disable=pointless-statement
+
+
+ @property
+ def directory(self):
+ """Cache directory."""
+ return self._directory
+
+
+ @property
+ def timeout(self):
+ """SQLite connection timeout value in seconds."""
+ return self._timeout
+
+
+ @property
+ def disk(self):
+ """Disk used for serialization."""
+ return self._disk
+
+
+ @property
+ def _con(self):
+ # Check process ID to support process forking. If the process
+ # ID changes, close the connection and update the process ID.
+
+ local_pid = getattr(self._local, 'pid', None)
+ pid = os.getpid()
+
+ if local_pid != pid:
+ self.close()
+ self._local.pid = pid
+
+ con = getattr(self._local, 'con', None)
+
+ if con is None:
+ con = self._local.con = sqlite3.connect(
+ op.join(self._directory, DBNAME),
+ timeout=self._timeout,
+ isolation_level=None,
+ )
+
+ # Some SQLite pragmas work on a per-connection basis so
+ # query the Settings table and reset the pragmas. The
+ # Settings table may not exist so catch and ignore the
+ # OperationalError that may occur.
+
+ try:
+ select = 'SELECT key, value FROM Settings'
+ settings = con.execute(select).fetchall()
+ except sqlite3.OperationalError:
+ pass
+ else:
+ for key, value in settings:
+ if key.startswith('sqlite_'):
+ self.reset(key, value, update=False)
+
+ return con
+
+
+ @property
+ def _sql(self):
+ return self._con.execute
+
+
+ @property
+ def _sql_retry(self):
+ sql = self._sql
+
+ # 2018-11-01 GrantJ - Some SQLite builds/versions handle
+ # the SQLITE_BUSY return value and connection parameter
+ # "timeout" differently. For a more reliable duration,
+ # manually retry the statement for 60 seconds. Only used
+ # by statements which modify the database and do not use
+ # a transaction (like those in ``__init__`` or ``reset``).
+ # See Issue #85 for and tests/issue_85.py for more details.
+
+ def _execute_with_retry(statement, *args, **kwargs):
+ start = time.time()
+ while True:
+ try:
+ return sql(statement, *args, **kwargs)
+ except sqlite3.OperationalError as exc:
+ if str(exc) != 'database is locked':
+ raise
+ diff = time.time() - start
+ if diff > 60:
+ raise
+ time.sleep(0.001)
+
+ return _execute_with_retry
+
+
+ @cl.contextmanager
+ def transact(self, retry=False):
+ """Context manager to perform a transaction by locking the cache.
+
+ While the cache is locked, no other write operation is permitted.
+ Transactions should therefore be as short as possible. Read and write
+ operations performed in a transaction are atomic. Read operations may
+ occur concurrent to a transaction.
+
+ Transactions may be nested and may not be shared between threads.
+
+ Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+ `False` (default).
+
+ >>> cache = Cache()
+ >>> with cache.transact(): # Atomically increment two keys.
+ ... _ = cache.incr('total', 123.4)
+ ... _ = cache.incr('count', 1)
+ >>> with cache.transact(): # Atomically calculate average.
+ ... average = cache['total'] / cache['count']
+ >>> average
+ 123.4
+
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: context manager for use in `with` statement
+ :raises Timeout: if database timeout occurs
+
+ """
+ with self._transact(retry=retry):
+ yield
+
+
+ @cl.contextmanager
+ def _transact(self, retry=False, filename=None):
+ sql = self._sql
+ filenames = []
+ _disk_remove = self._disk.remove
+ tid = get_ident()
+ txn_id = self._txn_id
+
+ if tid == txn_id:
+ begin = False
+ else:
+ while True:
+ try:
+ sql('BEGIN IMMEDIATE')
+ begin = True
+ self._txn_id = tid
+ break
+ except sqlite3.OperationalError:
+ if retry:
+ continue
+ if filename is not None:
+ _disk_remove(filename)
+ raise Timeout
+
+ try:
+ yield sql, filenames.append
+ except BaseException:
+ if begin:
+ assert self._txn_id == tid
+ self._txn_id = None
+ sql('ROLLBACK')
+ raise
+ else:
+ if begin:
+ assert self._txn_id == tid
+ self._txn_id = None
+ sql('COMMIT')
+ for name in filenames:
+ if name is not None:
+ _disk_remove(name)
+
+
+ def set(self, key, value, expire=None, read=False, tag=None, retry=False):
+ """Set `key` and `value` item in cache.
+
+ When `read` is `True`, `value` should be a file-like object opened
+ for reading in binary mode.
+
+ Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+ `False` (default).
+
+ :param key: key for item
+ :param value: value for item
+ :param float expire: seconds until item expires
+ (default None, no expiry)
+ :param bool read: read value as bytes from file (default False)
+ :param str tag: text to associate with key (default None)
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: True if item was set
+ :raises Timeout: if database timeout occurs
+
+ """
+ now = time.time()
+ db_key, raw = self._disk.put(key)
+ expire_time = None if expire is None else now + expire
+ size, mode, filename, db_value = self._disk.store(value, read, key=key)
+ columns = (expire_time, tag, size, mode, filename, db_value)
+
+ # The order of SELECT, UPDATE, and INSERT is important below.
+ #
+ # Typical cache usage pattern is:
+ #
+ # value = cache.get(key)
+ # if value is None:
+ # value = expensive_calculation()
+ # cache.set(key, value)
+ #
+ # Cache.get does not evict expired keys to avoid writes during lookups.
+ # Commonly used/expired keys will therefore remain in the cache making
+ # an UPDATE the preferred path.
+ #
+ # The alternative is to assume the key is not present by first trying
+ # to INSERT and then handling the IntegrityError that occurs from
+ # violating the UNIQUE constraint. This optimistic approach was
+ # rejected based on the common cache usage pattern.
+ #
+ # INSERT OR REPLACE aka UPSERT is not used because the old filename may
+ # need cleanup.
+
+ with self._transact(retry, filename) as (sql, cleanup):
+ rows = sql(
+ 'SELECT rowid, filename FROM Cache'
+ ' WHERE key = ? AND raw = ?',
+ (db_key, raw),
+ ).fetchall()
+
+ if rows:
+ (rowid, old_filename), = rows
+ cleanup(old_filename)
+ self._row_update(rowid, now, columns)
+ else:
+ self._row_insert(db_key, raw, now, columns)
+
+ self._cull(now, sql, cleanup)
+
+ return True
+
+
+ def __setitem__(self, key, value):
+ """Set corresponding `value` for `key` in cache.
+
+ :param key: key for item
+ :param value: value for item
+ :return: corresponding value
+ :raises KeyError: if key is not found
+
+ """
+ self.set(key, value, retry=True)
+
+
+ def _row_update(self, rowid, now, columns):
+ sql = self._sql
+ expire_time, tag, size, mode, filename, value = columns
+ sql('UPDATE Cache SET'
+ ' store_time = ?,'
+ ' expire_time = ?,'
+ ' access_time = ?,'
+ ' access_count = ?,'
+ ' tag = ?,'
+ ' size = ?,'
+ ' mode = ?,'
+ ' filename = ?,'
+ ' value = ?'
+ ' WHERE rowid = ?', (
+ now, # store_time
+ expire_time,
+ now, # access_time
+ 0, # access_count
+ tag,
+ size,
+ mode,
+ filename,
+ value,
+ rowid,
+ ),
+ )
+
+
+ def _row_insert(self, key, raw, now, columns):
+ sql = self._sql
+ expire_time, tag, size, mode, filename, value = columns
+ sql('INSERT INTO Cache('
+ ' key, raw, store_time, expire_time, access_time,'
+ ' access_count, tag, size, mode, filename, value'
+ ') VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)', (
+ key,
+ raw,
+ now, # store_time
+ expire_time,
+ now, # access_time
+ 0, # access_count
+ tag,
+ size,
+ mode,
+ filename,
+ value,
+ ),
+ )
+
+
+ def _cull(self, now, sql, cleanup, limit=None):
+ cull_limit = self.cull_limit if limit is None else limit
+
+ if cull_limit == 0:
+ return
+
+ # Evict expired keys.
+
+ select_expired_template = (
+ 'SELECT %s FROM Cache'
+ ' WHERE expire_time IS NOT NULL AND expire_time < ?'
+ ' ORDER BY expire_time LIMIT ?'
+ )
+
+ select_expired = select_expired_template % 'filename'
+ rows = sql(select_expired, (now, cull_limit)).fetchall()
+
+ if rows:
+ delete_expired = (
+ 'DELETE FROM Cache WHERE rowid IN (%s)'
+ % (select_expired_template % 'rowid')
+ )
+ sql(delete_expired, (now, cull_limit))
+
+ for filename, in rows:
+ cleanup(filename)
+
+ cull_limit -= len(rows)
+
+ if cull_limit == 0:
+ return
+
+ # Evict keys by policy.
+
+ select_policy = EVICTION_POLICY[self.eviction_policy]['cull']
+
+ if select_policy is None or self.volume() < self.size_limit:
+ return
+
+ select_filename = select_policy.format(fields='filename', now=now)
+ rows = sql(select_filename, (cull_limit,)).fetchall()
+
+ if rows:
+ delete = (
+ 'DELETE FROM Cache WHERE rowid IN (%s)'
+ % (select_policy.format(fields='rowid', now=now))
+ )
+ sql(delete, (cull_limit,))
+
+ for filename, in rows:
+ cleanup(filename)
+
+
+ def touch(self, key, expire=None, retry=False):
+ """Touch `key` in cache and update `expire` time.
+
+ Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+ `False` (default).
+
+ :param key: key for item
+ :param float expire: seconds until item expires
+ (default None, no expiry)
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: True if key was touched
+ :raises Timeout: if database timeout occurs
+
+ """
+ now = time.time()
+ db_key, raw = self._disk.put(key)
+ expire_time = None if expire is None else now + expire
+
+ with self._transact(retry) as (sql, _):
+ rows = sql(
+ 'SELECT rowid, expire_time FROM Cache'
+ ' WHERE key = ? AND raw = ?',
+ (db_key, raw),
+ ).fetchall()
+
+ if rows:
+ (rowid, old_expire_time), = rows
+
+ if old_expire_time is None or old_expire_time > now:
+ sql('UPDATE Cache SET expire_time = ? WHERE rowid = ?',
+ (expire_time, rowid),
+ )
+ return True
+
+ return False
+
+
+ def add(self, key, value, expire=None, read=False, tag=None, retry=False):
+ """Add `key` and `value` item to cache.
+
+ Similar to `set`, but only add to cache if key not present.
+
+ Operation is atomic. Only one concurrent add operation for a given key
+ will succeed.
+
+ When `read` is `True`, `value` should be a file-like object opened
+ for reading in binary mode.
+
+ Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+ `False` (default).
+
+ :param key: key for item
+ :param value: value for item
+ :param float expire: seconds until the key expires
+ (default None, no expiry)
+ :param bool read: read value as bytes from file (default False)
+ :param str tag: text to associate with key (default None)
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: True if item was added
+ :raises Timeout: if database timeout occurs
+
+ """
+ now = time.time()
+ db_key, raw = self._disk.put(key)
+ expire_time = None if expire is None else now + expire
+ size, mode, filename, db_value = self._disk.store(value, read, key=key)
+ columns = (expire_time, tag, size, mode, filename, db_value)
+
+ with self._transact(retry, filename) as (sql, cleanup):
+ rows = sql(
+ 'SELECT rowid, filename, expire_time FROM Cache'
+ ' WHERE key = ? AND raw = ?',
+ (db_key, raw),
+ ).fetchall()
+
+ if rows:
+ (rowid, old_filename, old_expire_time), = rows
+
+ if old_expire_time is None or old_expire_time > now:
+ cleanup(filename)
+ return False
+
+ cleanup(old_filename)
+ self._row_update(rowid, now, columns)
+ else:
+ self._row_insert(db_key, raw, now, columns)
+
+ self._cull(now, sql, cleanup)
+
+ return True
+
+
+ def incr(self, key, delta=1, default=0, retry=False):
+ """Increment value by delta for item with key.
+
+ If key is missing and default is None then raise KeyError. Else if key
+ is missing and default is not None then use default for value.
+
+ Operation is atomic. All concurrent increment operations will be
+ counted individually.
+
+ Assumes value may be stored in a SQLite column. Most builds that target
+ machines with 64-bit pointer widths will support 64-bit signed
+ integers.
+
+ Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+ `False` (default).
+
+ :param key: key for item
+ :param int delta: amount to increment (default 1)
+ :param int default: value if key is missing (default 0)
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: new value for item
+ :raises KeyError: if key is not found and default is None
+ :raises Timeout: if database timeout occurs
+
+ """
+ now = time.time()
+ db_key, raw = self._disk.put(key)
+ select = (
+ 'SELECT rowid, expire_time, filename, value FROM Cache'
+ ' WHERE key = ? AND raw = ?'
+ )
+
+ with self._transact(retry) as (sql, cleanup):
+ rows = sql(select, (db_key, raw)).fetchall()
+
+ if not rows:
+ if default is None:
+ raise KeyError(key)
+
+ value = default + delta
+ columns = (None, None) + self._disk.store(value, False, key=key)
+ self._row_insert(db_key, raw, now, columns)
+ self._cull(now, sql, cleanup)
+ return value
+
+ (rowid, expire_time, filename, value), = rows
+
+ if expire_time is not None and expire_time < now:
+ if default is None:
+ raise KeyError(key)
+
+ value = default + delta
+ columns = (None, None) + self._disk.store(value, False, key=key)
+ self._row_update(rowid, now, columns)
+ self._cull(now, sql, cleanup)
+ cleanup(filename)
+ return value
+
+ value += delta
+
+ columns = 'store_time = ?, value = ?'
+ update_column = EVICTION_POLICY[self.eviction_policy]['get']
+
+ if update_column is not None:
+ columns += ', ' + update_column.format(now=now)
+
+ update = 'UPDATE Cache SET %s WHERE rowid = ?' % columns
+ sql(update, (now, value, rowid))
+
+ return value
+
+
+ def decr(self, key, delta=1, default=0, retry=False):
+ """Decrement value by delta for item with key.
+
+ If key is missing and default is None then raise KeyError. Else if key
+ is missing and default is not None then use default for value.
+
+ Operation is atomic. All concurrent decrement operations will be
+ counted individually.
+
+ Unlike Memcached, negative values are supported. Value may be
+ decremented below zero.
+
+ Assumes value may be stored in a SQLite column. Most builds that target
+ machines with 64-bit pointer widths will support 64-bit signed
+ integers.
+
+ Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+ `False` (default).
+
+ :param key: key for item
+ :param int delta: amount to decrement (default 1)
+ :param int default: value if key is missing (default 0)
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: new value for item
+ :raises KeyError: if key is not found and default is None
+ :raises Timeout: if database timeout occurs
+
+ """
+ return self.incr(key, -delta, default, retry)
+
+
+ def get(self, key, default=None, read=False, expire_time=False, tag=False,
+ retry=False):
+ """Retrieve value from cache. If `key` is missing, return `default`.
+
+ Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+ `False` (default).
+
+ :param key: key for item
+ :param default: value to return if key is missing (default None)
+ :param bool read: if True, return file handle to value
+ (default False)
+ :param bool expire_time: if True, return expire_time in tuple
+ (default False)
+ :param bool tag: if True, return tag in tuple (default False)
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: value for item or default if key not found
+ :raises Timeout: if database timeout occurs
+
+ """
+ db_key, raw = self._disk.put(key)
+ update_column = EVICTION_POLICY[self.eviction_policy]['get']
+ select = (
+ 'SELECT rowid, expire_time, tag, mode, filename, value'
+ ' FROM Cache WHERE key = ? AND raw = ?'
+ ' AND (expire_time IS NULL OR expire_time > ?)'
+ )
+
+ if expire_time and tag:
+ default = (default, None, None)
+ elif expire_time or tag:
+ default = (default, None)
+
+ if not self.statistics and update_column is None:
+ # Fast path, no transaction necessary.
+
+ rows = self._sql(select, (db_key, raw, time.time())).fetchall()
+
+ if not rows:
+ return default
+
+ (rowid, db_expire_time, db_tag, mode, filename, db_value), = rows
+
+ try:
+ value = self._disk.fetch(mode, filename, db_value, read)
+ except IOError:
+ # Key was deleted before we could retrieve result.
+ return default
+
+ else: # Slow path, transaction required.
+ cache_hit = (
+ 'UPDATE Settings SET value = value + 1 WHERE key = "hits"'
+ )
+ cache_miss = (
+ 'UPDATE Settings SET value = value + 1 WHERE key = "misses"'
+ )
+
+ with self._transact(retry) as (sql, _):
+ rows = sql(select, (db_key, raw, time.time())).fetchall()
+
+ if not rows:
+ if self.statistics:
+ sql(cache_miss)
+ return default
+
+ (rowid, db_expire_time, db_tag,
+ mode, filename, db_value), = rows
+
+ try:
+ value = self._disk.fetch(mode, filename, db_value, read)
+ except IOError as error:
+ if error.errno == errno.ENOENT:
+ # Key was deleted before we could retrieve result.
+ if self.statistics:
+ sql(cache_miss)
+ return default
+ else:
+ raise
+
+ if self.statistics:
+ sql(cache_hit)
+
+ now = time.time()
+ update = 'UPDATE Cache SET %s WHERE rowid = ?'
+
+ if update_column is not None:
+ sql(update % update_column.format(now=now), (rowid,))
+
+ if expire_time and tag:
+ return (value, db_expire_time, db_tag)
+ elif expire_time:
+ return (value, db_expire_time)
+ elif tag:
+ return (value, db_tag)
+ else:
+ return value
+
+
+ def __getitem__(self, key):
+ """Return corresponding value for `key` from cache.
+
+ :param key: key matching item
+ :return: corresponding value
+ :raises KeyError: if key is not found
+
+ """
+ value = self.get(key, default=ENOVAL, retry=True)
+ if value is ENOVAL:
+ raise KeyError(key)
+ return value
+
+
+ def read(self, key, retry=False):
+ """Return file handle value corresponding to `key` from cache.
+
+ Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+ `False` (default).
+
+ :param key: key matching item
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: file open for reading in binary mode
+ :raises KeyError: if key is not found
+ :raises Timeout: if database timeout occurs
+
+ """
+ handle = self.get(key, default=ENOVAL, read=True, retry=retry)
+ if handle is ENOVAL:
+ raise KeyError(key)
+ return handle
+
+
+ def __contains__(self, key):
+ """Return `True` if `key` matching item is found in cache.
+
+ :param key: key matching item
+ :return: True if key matching item
+
+ """
+ sql = self._sql
+ db_key, raw = self._disk.put(key)
+ select = (
+ 'SELECT rowid FROM Cache'
+ ' WHERE key = ? AND raw = ?'
+ ' AND (expire_time IS NULL OR expire_time > ?)'
+ )
+
+ rows = sql(select, (db_key, raw, time.time())).fetchall()
+
+ return bool(rows)
+
+
+ def pop(self, key, default=None, expire_time=False, tag=False, retry=False):
+ """Remove corresponding item for `key` from cache and return value.
+
+ If `key` is missing, return `default`.
+
+ Operation is atomic. Concurrent operations will be serialized.
+
+ Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+ `False` (default).
+
+ :param key: key for item
+ :param default: value to return if key is missing (default None)
+ :param bool expire_time: if True, return expire_time in tuple
+ (default False)
+ :param bool tag: if True, return tag in tuple (default False)
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: value for item or default if key not found
+ :raises Timeout: if database timeout occurs
+
+ """
+ db_key, raw = self._disk.put(key)
+ select = (
+ 'SELECT rowid, expire_time, tag, mode, filename, value'
+ ' FROM Cache WHERE key = ? AND raw = ?'
+ ' AND (expire_time IS NULL OR expire_time > ?)'
+ )
+
+ if expire_time and tag:
+ default = default, None, None
+ elif expire_time or tag:
+ default = default, None
+
+ with self._transact(retry) as (sql, _):
+ rows = sql(select, (db_key, raw, time.time())).fetchall()
+
+ if not rows:
+ return default
+
+ (rowid, db_expire_time, db_tag, mode, filename, db_value), = rows
+
+ sql('DELETE FROM Cache WHERE rowid = ?', (rowid,))
+
+ try:
+ value = self._disk.fetch(mode, filename, db_value, False)
+ except IOError as error:
+ if error.errno == errno.ENOENT:
+ # Key was deleted before we could retrieve result.
+ return default
+ else:
+ raise
+ finally:
+ if filename is not None:
+ self._disk.remove(filename)
+
+ if expire_time and tag:
+ return value, db_expire_time, db_tag
+ elif expire_time:
+ return value, db_expire_time
+ elif tag:
+ return value, db_tag
+ else:
+ return value
+
+
+ def __delitem__(self, key, retry=True):
+ """Delete corresponding item for `key` from cache.
+
+ Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+ `False` (default `True`).
+
+ :param key: key matching item
+ :param bool retry: retry if database timeout occurs (default True)
+ :raises KeyError: if key is not found
+ :raises Timeout: if database timeout occurs
+
+ """
+ db_key, raw = self._disk.put(key)
+
+ with self._transact(retry) as (sql, cleanup):
+ rows = sql(
+ 'SELECT rowid, filename FROM Cache'
+ ' WHERE key = ? AND raw = ?'
+ ' AND (expire_time IS NULL OR expire_time > ?)',
+ (db_key, raw, time.time()),
+ ).fetchall()
+
+ if not rows:
+ raise KeyError(key)
+
+ (rowid, filename), = rows
+ sql('DELETE FROM Cache WHERE rowid = ?', (rowid,))
+ cleanup(filename)
+
+ return True
+
+
+ def delete(self, key, retry=False):
+ """Delete corresponding item for `key` from cache.
+
+ Missing keys are ignored.
+
+ Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+ `False` (default).
+
+ :param key: key matching item
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: True if item was deleted
+ :raises Timeout: if database timeout occurs
+
+ """
+ try:
+ return self.__delitem__(key, retry=retry)
+ except KeyError:
+ return False
+
+
+ def push(self, value, prefix=None, side='back', expire=None, read=False,
+ tag=None, retry=False):
+ """Push `value` onto `side` of queue identified by `prefix` in cache.
+
+ When prefix is None, integer keys are used. Otherwise, string keys are
+ used in the format "prefix-integer". Integer starts at 500 trillion.
+
+ Defaults to pushing value on back of queue. Set side to 'front' to push
+ value on front of queue. Side must be one of 'back' or 'front'.
+
+ Operation is atomic. Concurrent operations will be serialized.
+
+ When `read` is `True`, `value` should be a file-like object opened
+ for reading in binary mode.
+
+ Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+ `False` (default).
+
+ See also `Cache.pull`.
+
+ >>> cache = Cache()
+ >>> print(cache.push('first value'))
+ 500000000000000
+ >>> cache.get(500000000000000)
+ 'first value'
+ >>> print(cache.push('second value'))
+ 500000000000001
+ >>> print(cache.push('third value', side='front'))
+ 499999999999999
+ >>> cache.push(1234, prefix='userids')
+ 'userids-500000000000000'
+
+ :param value: value for item
+ :param str prefix: key prefix (default None, key is integer)
+ :param str side: either 'back' or 'front' (default 'back')
+ :param float expire: seconds until the key expires
+ (default None, no expiry)
+ :param bool read: read value as bytes from file (default False)
+ :param str tag: text to associate with key (default None)
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: key for item in cache
+ :raises Timeout: if database timeout occurs
+
+ """
+ if prefix is None:
+ min_key = 0
+ max_key = 999999999999999
+ else:
+ min_key = prefix + '-000000000000000'
+ max_key = prefix + '-999999999999999'
+
+ now = time.time()
+ raw = True
+ expire_time = None if expire is None else now + expire
+ size, mode, filename, db_value = self._disk.store(value, read)
+ columns = (expire_time, tag, size, mode, filename, db_value)
+ order = {'back': 'DESC', 'front': 'ASC'}
+ select = (
+ 'SELECT key FROM Cache'
+ ' WHERE ? < key AND key < ? AND raw = ?'
+ ' ORDER BY key %s LIMIT 1'
+ ) % order[side]
+
+ with self._transact(retry, filename) as (sql, cleanup):
+ rows = sql(select, (min_key, max_key, raw)).fetchall()
+
+ if rows:
+ (key,), = rows
+
+ if prefix is not None:
+ num = int(key[(key.rfind('-') + 1):])
+ else:
+ num = key
+
+ if side == 'back':
+ num += 1
+ else:
+ assert side == 'front'
+ num -= 1
+ else:
+ num = 500000000000000
+
+ if prefix is not None:
+ db_key = '{0}-{1:015d}'.format(prefix, num)
+ else:
+ db_key = num
+
+ self._row_insert(db_key, raw, now, columns)
+ self._cull(now, sql, cleanup)
+
+ return db_key
+
+
+ def pull(self, prefix=None, default=(None, None), side='front',
+ expire_time=False, tag=False, retry=False):
+ """Pull key and value item pair from `side` of queue in cache.
+
+ When prefix is None, integer keys are used. Otherwise, string keys are
+ used in the format "prefix-integer". Integer starts at 500 trillion.
+
+ If queue is empty, return default.
+
+ Defaults to pulling key and value item pairs from front of queue. Set
+ side to 'back' to pull from back of queue. Side must be one of 'front'
+ or 'back'.
+
+ Operation is atomic. Concurrent operations will be serialized.
+
+ Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+ `False` (default).
+
+ See also `Cache.push` and `Cache.get`.
+
+ >>> cache = Cache()
+ >>> cache.pull()
+ (None, None)
+ >>> for letter in 'abc':
+ ... print(cache.push(letter))
+ 500000000000000
+ 500000000000001
+ 500000000000002
+ >>> key, value = cache.pull()
+ >>> print(key)
+ 500000000000000
+ >>> value
+ 'a'
+ >>> _, value = cache.pull(side='back')
+ >>> value
+ 'c'
+ >>> cache.push(1234, 'userids')
+ 'userids-500000000000000'
+ >>> _, value = cache.pull('userids')
+ >>> value
+ 1234
+
+ :param str prefix: key prefix (default None, key is integer)
+ :param default: value to return if key is missing
+ (default (None, None))
+ :param str side: either 'front' or 'back' (default 'front')
+ :param bool expire_time: if True, return expire_time in tuple
+ (default False)
+ :param bool tag: if True, return tag in tuple (default False)
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: key and value item pair or default if queue is empty
+ :raises Timeout: if database timeout occurs
+
+ """
+ # Caution: Nearly identical code exists in Cache.peek
+ if prefix is None:
+ min_key = 0
+ max_key = 999999999999999
+ else:
+ min_key = prefix + '-000000000000000'
+ max_key = prefix + '-999999999999999'
+
+ order = {'front': 'ASC', 'back': 'DESC'}
+ select = (
+ 'SELECT rowid, key, expire_time, tag, mode, filename, value'
+ ' FROM Cache WHERE ? < key AND key < ? AND raw = 1'
+ ' ORDER BY key %s LIMIT 1'
+ ) % order[side]
+
+ if expire_time and tag:
+ default = default, None, None
+ elif expire_time or tag:
+ default = default, None
+
+ while True:
+ while True:
+ with self._transact(retry) as (sql, cleanup):
+ rows = sql(select, (min_key, max_key)).fetchall()
+
+ if not rows:
+ return default
+
+ (rowid, key, db_expire, db_tag, mode, name,
+ db_value), = rows
+
+ sql('DELETE FROM Cache WHERE rowid = ?', (rowid,))
+
+ if db_expire is not None and db_expire < time.time():
+ cleanup(name)
+ else:
+ break
+
+ try:
+ value = self._disk.fetch(mode, name, db_value, False)
+ except IOError as error:
+ if error.errno == errno.ENOENT:
+ # Key was deleted before we could retrieve result.
+ continue
+ else:
+ raise
+ finally:
+ if name is not None:
+ self._disk.remove(name)
+ break
+
+ if expire_time and tag:
+ return (key, value), db_expire, db_tag
+ elif expire_time:
+ return (key, value), db_expire
+ elif tag:
+ return (key, value), db_tag
+ else:
+ return key, value
+
+
+ def peek(self, prefix=None, default=(None, None), side='front',
+ expire_time=False, tag=False, retry=False):
+ """Peek at key and value item pair from `side` of queue in cache.
+
+ When prefix is None, integer keys are used. Otherwise, string keys are
+ used in the format "prefix-integer". Integer starts at 500 trillion.
+
+ If queue is empty, return default.
+
+ Defaults to peeking at key and value item pairs from front of queue.
+ Set side to 'back' to pull from back of queue. Side must be one of
+ 'front' or 'back'.
+
+ Expired items are deleted from cache. Operation is atomic. Concurrent
+ operations will be serialized.
+
+ Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+ `False` (default).
+
+ See also `Cache.pull` and `Cache.push`.
+
+ >>> cache = Cache()
+ >>> for letter in 'abc':
+ ... print(cache.push(letter))
+ 500000000000000
+ 500000000000001
+ 500000000000002
+ >>> key, value = cache.peek()
+ >>> print(key)
+ 500000000000000
+ >>> value
+ 'a'
+ >>> key, value = cache.peek(side='back')
+ >>> print(key)
+ 500000000000002
+ >>> value
+ 'c'
+
+ :param str prefix: key prefix (default None, key is integer)
+ :param default: value to return if key is missing
+ (default (None, None))
+ :param str side: either 'front' or 'back' (default 'front')
+ :param bool expire_time: if True, return expire_time in tuple
+ (default False)
+ :param bool tag: if True, return tag in tuple (default False)
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: key and value item pair or default if queue is empty
+ :raises Timeout: if database timeout occurs
+
+ """
+ # Caution: Nearly identical code exists in Cache.pull
+ if prefix is None:
+ min_key = 0
+ max_key = 999999999999999
+ else:
+ min_key = prefix + '-000000000000000'
+ max_key = prefix + '-999999999999999'
+
+ order = {'front': 'ASC', 'back': 'DESC'}
+ select = (
+ 'SELECT rowid, key, expire_time, tag, mode, filename, value'
+ ' FROM Cache WHERE ? < key AND key < ? AND raw = 1'
+ ' ORDER BY key %s LIMIT 1'
+ ) % order[side]
+
+ if expire_time and tag:
+ default = default, None, None
+ elif expire_time or tag:
+ default = default, None
+
+ while True:
+ while True:
+ with self._transact(retry) as (sql, cleanup):
+ rows = sql(select, (min_key, max_key)).fetchall()
+
+ if not rows:
+ return default
+
+ (rowid, key, db_expire, db_tag, mode, name,
+ db_value), = rows
+
+ if db_expire is not None and db_expire < time.time():
+ sql('DELETE FROM Cache WHERE rowid = ?', (rowid,))
+ cleanup(name)
+ else:
+ break
+
+ try:
+ value = self._disk.fetch(mode, name, db_value, False)
+ except IOError as error:
+ if error.errno == errno.ENOENT:
+ # Key was deleted before we could retrieve result.
+ continue
+ else:
+ raise
+ finally:
+ if name is not None:
+ self._disk.remove(name)
+ break
+
+ if expire_time and tag:
+ return (key, value), db_expire, db_tag
+ elif expire_time:
+ return (key, value), db_expire
+ elif tag:
+ return (key, value), db_tag
+ else:
+ return key, value
+
+
+ def peekitem(self, last=True, expire_time=False, tag=False, retry=False):
+ """Peek at key and value item pair in cache based on iteration order.
+
+ Expired items are deleted from cache. Operation is atomic. Concurrent
+ operations will be serialized.
+
+ Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+ `False` (default).
+
+ >>> cache = Cache()
+ >>> for num, letter in enumerate('abc'):
+ ... cache[letter] = num
+ >>> cache.peekitem()
+ ('c', 2)
+ >>> cache.peekitem(last=False)
+ ('a', 0)
+
+ :param bool last: last item in iteration order (default True)
+ :param bool expire_time: if True, return expire_time in tuple
+ (default False)
+ :param bool tag: if True, return tag in tuple (default False)
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: key and value item pair
+ :raises KeyError: if cache is empty
+ :raises Timeout: if database timeout occurs
+
+ """
+ order = ('ASC', 'DESC')
+ select = (
+ 'SELECT rowid, key, raw, expire_time, tag, mode, filename, value'
+ ' FROM Cache ORDER BY rowid %s LIMIT 1'
+ ) % order[last]
+
+ while True:
+ while True:
+ with self._transact(retry) as (sql, cleanup):
+ rows = sql(select).fetchall()
+
+ if not rows:
+ raise KeyError('dictionary is empty')
+
+ (rowid, db_key, raw, db_expire, db_tag, mode, name,
+ db_value), = rows
+
+ if db_expire is not None and db_expire < time.time():
+ sql('DELETE FROM Cache WHERE rowid = ?', (rowid,))
+ cleanup(name)
+ else:
+ break
+
+ key = self._disk.get(db_key, raw)
+
+ try:
+ value = self._disk.fetch(mode, name, db_value, False)
+ except IOError as error:
+ if error.errno == errno.ENOENT:
+ # Key was deleted before we could retrieve result.
+ continue
+ else:
+ raise
+ break
+
+ if expire_time and tag:
+ return (key, value), db_expire, db_tag
+ elif expire_time:
+ return (key, value), db_expire
+ elif tag:
+ return (key, value), db_tag
+ else:
+ return key, value
+
+
+ def memoize(self, name=None, typed=False, expire=None, tag=None):
+ """Memoizing cache decorator.
+
+ Decorator to wrap callable with memoizing function using cache.
+ Repeated calls with the same arguments will lookup result in cache and
+ avoid function evaluation.
+
+ If name is set to None (default), the callable name will be determined
+ automatically.
+
+ When expire is set to zero, function results will not be set in the
+ cache. Cache lookups still occur, however. Read
+ :doc:`case-study-landing-page-caching` for example usage.
+
+ If typed is set to True, function arguments of different types will be
+ cached separately. For example, f(3) and f(3.0) will be treated as
+ distinct calls with distinct results.
+
+ The original underlying function is accessible through the __wrapped__
+ attribute. This is useful for introspection, for bypassing the cache,
+ or for rewrapping the function with a different cache.
+
+ >>> from diskcache import Cache
+ >>> cache = Cache()
+ >>> @cache.memoize(expire=1, tag='fib')
+ ... def fibonacci(number):
+ ... if number == 0:
+ ... return 0
+ ... elif number == 1:
+ ... return 1
+ ... else:
+ ... return fibonacci(number - 1) + fibonacci(number - 2)
+ >>> print(fibonacci(100))
+ 354224848179261915075
+
+ An additional `__cache_key__` attribute can be used to generate the
+ cache key used for the given arguments.
+
+ >>> key = fibonacci.__cache_key__(100)
+ >>> print(cache[key])
+ 354224848179261915075
+
+ Remember to call memoize when decorating a callable. If you forget,
+ then a TypeError will occur. Note the lack of parenthenses after
+ memoize below:
+
+ >>> @cache.memoize
+ ... def test():
+ ... pass
+ Traceback (most recent call last):
+ ...
+ TypeError: name cannot be callable
+
+ :param cache: cache to store callable arguments and return values
+ :param str name: name given for callable (default None, automatic)
+ :param bool typed: cache different types separately (default False)
+ :param float expire: seconds until arguments expire
+ (default None, no expiry)
+ :param str tag: text to associate with arguments (default None)
+ :return: callable decorator
+
+ """
+ # Caution: Nearly identical code exists in DjangoCache.memoize
+ if callable(name):
+ raise TypeError('name cannot be callable')
+
+ def decorator(func):
+ "Decorator created by memoize() for callable `func`."
+ base = (full_name(func),) if name is None else (name,)
+
+ @ft.wraps(func)
+ def wrapper(*args, **kwargs):
+ "Wrapper for callable to cache arguments and return values."
+ key = wrapper.__cache_key__(*args, **kwargs)
+ result = self.get(key, default=ENOVAL, retry=True)
+
+ if result is ENOVAL:
+ result = func(*args, **kwargs)
+ if expire is None or expire > 0:
+ self.set(key, result, expire, tag=tag, retry=True)
+
+ return result
+
+ def __cache_key__(*args, **kwargs):
+ "Make key for cache given function arguments."
+ return args_to_key(base, args, kwargs, typed)
+
+ wrapper.__cache_key__ = __cache_key__
+ return wrapper
+
+ return decorator
+
+
+ def check(self, fix=False, retry=False):
+ """Check database and file system consistency.
+
+ Intended for use in testing and post-mortem error analysis.
+
+ While checking the Cache table for consistency, a writer lock is held
+ on the database. The lock blocks other cache clients from writing to
+ the database. For caches with many file references, the lock may be
+ held for a long time. For example, local benchmarking shows that a
+ cache with 1,000 file references takes ~60ms to check.
+
+ Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+ `False` (default).
+
+ :param bool fix: correct inconsistencies
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: list of warnings
+ :raises Timeout: if database timeout occurs
+
+ """
+ # pylint: disable=access-member-before-definition,W0201
+ with warnings.catch_warnings(record=True) as warns:
+ sql = self._sql
+
+ # Check integrity of database.
+
+ rows = sql('PRAGMA integrity_check').fetchall()
+
+ if len(rows) != 1 or rows[0][0] != u'ok':
+ for message, in rows:
+ warnings.warn(message)
+
+ if fix:
+ sql('VACUUM')
+
+ with self._transact(retry) as (sql, _):
+
+ # Check Cache.filename against file system.
+
+ filenames = set()
+ select = (
+ 'SELECT rowid, size, filename FROM Cache'
+ ' WHERE filename IS NOT NULL'
+ )
+
+ rows = sql(select).fetchall()
+
+ for rowid, size, filename in rows:
+ full_path = op.join(self._directory, filename)
+ filenames.add(full_path)
+
+ if op.exists(full_path):
+ real_size = op.getsize(full_path)
+
+ if size != real_size:
+ message = 'wrong file size: %s, %d != %d'
+ args = full_path, real_size, size
+ warnings.warn(message % args)
+
+ if fix:
+ sql('UPDATE Cache SET size = ?'
+ ' WHERE rowid = ?',
+ (real_size, rowid),
+ )
+
+ continue
+
+ warnings.warn('file not found: %s' % full_path)
+
+ if fix:
+ sql('DELETE FROM Cache WHERE rowid = ?', (rowid,))
+
+ # Check file system against Cache.filename.
+
+ for dirpath, _, files in os.walk(self._directory):
+ paths = [op.join(dirpath, filename) for filename in files]
+ error = set(paths) - filenames
+
+ for full_path in error:
+ if DBNAME in full_path:
+ continue
+
+ message = 'unknown file: %s' % full_path
+ warnings.warn(message, UnknownFileWarning)
+
+ if fix:
+ os.remove(full_path)
+
+ # Check for empty directories.
+
+ for dirpath, dirs, files in os.walk(self._directory):
+ if not (dirs or files):
+ message = 'empty directory: %s' % dirpath
+ warnings.warn(message, EmptyDirWarning)
+
+ if fix:
+ os.rmdir(dirpath)
+
+ # Check Settings.count against count of Cache rows.
+
+ self.reset('count')
+ (count,), = sql('SELECT COUNT(key) FROM Cache').fetchall()
+
+ if self.count != count:
+ message = 'Settings.count != COUNT(Cache.key); %d != %d'
+ warnings.warn(message % (self.count, count))
+
+ if fix:
+ sql('UPDATE Settings SET value = ? WHERE key = ?',
+ (count, 'count'),
+ )
+
+ # Check Settings.size against sum of Cache.size column.
+
+ self.reset('size')
+ select_size = 'SELECT COALESCE(SUM(size), 0) FROM Cache'
+ (size,), = sql(select_size).fetchall()
+
+ if self.size != size:
+ message = 'Settings.size != SUM(Cache.size); %d != %d'
+ warnings.warn(message % (self.size, size))
+
+ if fix:
+ sql('UPDATE Settings SET value = ? WHERE key =?',
+ (size, 'size'),
+ )
+
+ return warns
+
+
+ def create_tag_index(self):
+ """Create tag index on cache database.
+
+ It is better to initialize cache with `tag_index=True` than use this.
+
+ :raises Timeout: if database timeout occurs
+
+ """
+ sql = self._sql
+ sql('CREATE INDEX IF NOT EXISTS Cache_tag_rowid ON Cache(tag, rowid)')
+ self.reset('tag_index', 1)
+
+
+ def drop_tag_index(self):
+ """Drop tag index on cache database.
+
+ :raises Timeout: if database timeout occurs
+
+ """
+ sql = self._sql
+ sql('DROP INDEX IF EXISTS Cache_tag_rowid')
+ self.reset('tag_index', 0)
+
+
+ def evict(self, tag, retry=False):
+ """Remove items with matching `tag` from cache.
+
+ Removing items is an iterative process. In each iteration, a subset of
+ items is removed. Concurrent writes may occur between iterations.
+
+ If a :exc:`Timeout` occurs, the first element of the exception's
+ `args` attribute will be the number of items removed before the
+ exception occurred.
+
+ Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+ `False` (default).
+
+ :param str tag: tag identifying items
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: count of rows removed
+ :raises Timeout: if database timeout occurs
+
+ """
+ select = (
+ 'SELECT rowid, filename FROM Cache'
+ ' WHERE tag = ? AND rowid > ?'
+ ' ORDER BY rowid LIMIT ?'
+ )
+ args = [tag, 0, 100]
+ return self._select_delete(select, args, arg_index=1, retry=retry)
+
+
+ def expire(self, now=None, retry=False):
+ """Remove expired items from cache.
+
+ Removing items is an iterative process. In each iteration, a subset of
+ items is removed. Concurrent writes may occur between iterations.
+
+ If a :exc:`Timeout` occurs, the first element of the exception's
+ `args` attribute will be the number of items removed before the
+ exception occurred.
+
+ Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+ `False` (default).
+
+ :param float now: current time (default None, ``time.time()`` used)
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: count of items removed
+ :raises Timeout: if database timeout occurs
+
+ """
+ select = (
+ 'SELECT rowid, expire_time, filename FROM Cache'
+ ' WHERE ? < expire_time AND expire_time < ?'
+ ' ORDER BY expire_time LIMIT ?'
+ )
+ args = [0, now or time.time(), 100]
+ return self._select_delete(select, args, row_index=1, retry=retry)
+
+
+ def cull(self, retry=False):
+ """Cull items from cache until volume is less than size limit.
+
+ Removing items is an iterative process. In each iteration, a subset of
+ items is removed. Concurrent writes may occur between iterations.
+
+ If a :exc:`Timeout` occurs, the first element of the exception's
+ `args` attribute will be the number of items removed before the
+ exception occurred.
+
+ Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+ `False` (default).
+
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: count of items removed
+ :raises Timeout: if database timeout occurs
+
+ """
+ now = time.time()
+
+ # Remove expired items.
+
+ count = self.expire(now)
+
+ # Remove items by policy.
+
+ select_policy = EVICTION_POLICY[self.eviction_policy]['cull']
+
+ if select_policy is None:
+ return
+
+ select_filename = select_policy.format(fields='filename', now=now)
+
+ try:
+ while self.volume() > self.size_limit:
+ with self._transact(retry) as (sql, cleanup):
+ rows = sql(select_filename, (10,)).fetchall()
+
+ if not rows:
+ break
+
+ count += len(rows)
+ delete = (
+ 'DELETE FROM Cache WHERE rowid IN (%s)'
+ % select_policy.format(fields='rowid', now=now)
+ )
+ sql(delete, (10,))
+
+ for filename, in rows:
+ cleanup(filename)
+ except Timeout:
+ raise Timeout(count)
+
+ return count
+
+
+ def clear(self, retry=False):
+ """Remove all items from cache.
+
+ Removing items is an iterative process. In each iteration, a subset of
+ items is removed. Concurrent writes may occur between iterations.
+
+ If a :exc:`Timeout` occurs, the first element of the exception's
+ `args` attribute will be the number of items removed before the
+ exception occurred.
+
+ Raises :exc:`Timeout` error when database timeout occurs and `retry` is
+ `False` (default).
+
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: count of rows removed
+ :raises Timeout: if database timeout occurs
+
+ """
+ select = (
+ 'SELECT rowid, filename FROM Cache'
+ ' WHERE rowid > ?'
+ ' ORDER BY rowid LIMIT ?'
+ )
+ args = [0, 100]
+ return self._select_delete(select, args, retry=retry)
+
+
+ def _select_delete(self, select, args, row_index=0, arg_index=0,
+ retry=False):
+ count = 0
+ delete = 'DELETE FROM Cache WHERE rowid IN (%s)'
+
+ try:
+ while True:
+ with self._transact(retry) as (sql, cleanup):
+ rows = sql(select, args).fetchall()
+
+ if not rows:
+ break
+
+ count += len(rows)
+ sql(delete % ','.join(str(row[0]) for row in rows))
+
+ for row in rows:
+ args[arg_index] = row[row_index]
+ cleanup(row[-1])
+
+ except Timeout:
+ raise Timeout(count)
+
+ return count
+
+
+ def iterkeys(self, reverse=False):
+ """Iterate Cache keys in database sort order.
+
+ >>> cache = Cache()
+ >>> for key in [4, 1, 3, 0, 2]:
+ ... cache[key] = key
+ >>> list(cache.iterkeys())
+ [0, 1, 2, 3, 4]
+ >>> list(cache.iterkeys(reverse=True))
+ [4, 3, 2, 1, 0]
+
+ :param bool reverse: reverse sort order (default False)
+ :return: iterator of Cache keys
+
+ """
+ sql = self._sql
+ limit = 100
+ _disk_get = self._disk.get
+
+ if reverse:
+ select = (
+ 'SELECT key, raw FROM Cache'
+ ' ORDER BY key DESC, raw DESC LIMIT 1'
+ )
+ iterate = (
+ 'SELECT key, raw FROM Cache'
+ ' WHERE key = ? AND raw < ? OR key < ?'
+ ' ORDER BY key DESC, raw DESC LIMIT ?'
+ )
+ else:
+ select = (
+ 'SELECT key, raw FROM Cache'
+ ' ORDER BY key ASC, raw ASC LIMIT 1'
+ )
+ iterate = (
+ 'SELECT key, raw FROM Cache'
+ ' WHERE key = ? AND raw > ? OR key > ?'
+ ' ORDER BY key ASC, raw ASC LIMIT ?'
+ )
+
+ row = sql(select).fetchall()
+
+ if row:
+ (key, raw), = row
+ else:
+ return
+
+ yield _disk_get(key, raw)
+
+ while True:
+ rows = sql(iterate, (key, raw, key, limit)).fetchall()
+
+ if not rows:
+ break
+
+ for key, raw in rows:
+ yield _disk_get(key, raw)
+
+
+ def _iter(self, ascending=True):
+ sql = self._sql
+ rows = sql('SELECT MAX(rowid) FROM Cache').fetchall()
+ (max_rowid,), = rows
+ yield # Signal ready.
+
+ if max_rowid is None:
+ return
+
+ bound = max_rowid + 1
+ limit = 100
+ _disk_get = self._disk.get
+ rowid = 0 if ascending else bound
+ select = (
+ 'SELECT rowid, key, raw FROM Cache'
+ ' WHERE ? < rowid AND rowid < ?'
+ ' ORDER BY rowid %s LIMIT ?'
+ ) % ('ASC' if ascending else 'DESC')
+
+ while True:
+ if ascending:
+ args = (rowid, bound, limit)
+ else:
+ args = (0, rowid, limit)
+
+ rows = sql(select, args).fetchall()
+
+ if not rows:
+ break
+
+ for rowid, key, raw in rows:
+ yield _disk_get(key, raw)
+
+
+ def __iter__(self):
+ "Iterate keys in cache including expired items."
+ iterator = self._iter()
+ next(iterator)
+ return iterator
+
+
+ def __reversed__(self):
+ "Reverse iterate keys in cache including expired items."
+ iterator = self._iter(ascending=False)
+ next(iterator)
+ return iterator
+
+
+ def stats(self, enable=True, reset=False):
+ """Return cache statistics hits and misses.
+
+ :param bool enable: enable collecting statistics (default True)
+ :param bool reset: reset hits and misses to 0 (default False)
+ :return: (hits, misses)
+
+ """
+ # pylint: disable=E0203,W0201
+ result = (self.reset('hits'), self.reset('misses'))
+
+ if reset:
+ self.reset('hits', 0)
+ self.reset('misses', 0)
+
+ self.reset('statistics', enable)
+
+ return result
+
+
+ def volume(self):
+ """Return estimated total size of cache on disk.
+
+ :return: size in bytes
+
+ """
+ (page_count,), = self._sql('PRAGMA page_count').fetchall()
+ total_size = self._page_size * page_count + self.reset('size')
+ return total_size
+
+
+ def close(self):
+ """Close database connection.
+
+ """
+ con = getattr(self._local, 'con', None)
+
+ if con is None:
+ return
+
+ con.close()
+
+ try:
+ delattr(self._local, 'con')
+ except AttributeError:
+ pass
+
+
+ def __enter__(self):
+ # Create connection in thread.
+ connection = self._con # pylint: disable=unused-variable
+ return self
+
+
+ def __exit__(self, *exception):
+ self.close()
+
+
+ def __len__(self):
+ "Count of items in cache including expired items."
+ return self.reset('count')
+
+
+ def __getstate__(self):
+ return (self.directory, self.timeout, type(self.disk))
+
+
+ def __setstate__(self, state):
+ self.__init__(*state)
+
+
+ def reset(self, key, value=ENOVAL, update=True):
+ """Reset `key` and `value` item from Settings table.
+
+ Use `reset` to update the value of Cache settings correctly. Cache
+ settings are stored in the Settings table of the SQLite database. If
+ `update` is ``False`` then no attempt is made to update the database.
+
+ If `value` is not given, it is reloaded from the Settings
+ table. Otherwise, the Settings table is updated.
+
+ Settings with the ``disk_`` prefix correspond to Disk
+ attributes. Updating the value will change the unprefixed attribute on
+ the associated Disk instance.
+
+ Settings with the ``sqlite_`` prefix correspond to SQLite
+ pragmas. Updating the value will execute the corresponding PRAGMA
+ statement.
+
+ SQLite PRAGMA statements may be executed before the Settings table
+ exists in the database by setting `update` to ``False``.
+
+ :param str key: Settings key for item
+ :param value: value for item (optional)
+ :param bool update: update database Settings table (default True)
+ :return: updated value for item
+ :raises Timeout: if database timeout occurs
+
+ """
+ sql = self._sql
+ sql_retry = self._sql_retry
+
+ if value is ENOVAL:
+ select = 'SELECT value FROM Settings WHERE key = ?'
+ (value,), = sql_retry(select, (key,)).fetchall()
+ setattr(self, key, value)
+ return value
+
+ if update:
+ statement = 'UPDATE Settings SET value = ? WHERE key = ?'
+ sql_retry(statement, (value, key))
+
+ if key.startswith('sqlite_'):
+ pragma = key[7:]
+
+ # 2016-02-17 GrantJ - PRAGMA and isolation_level=None
+ # don't always play nicely together. Retry setting the
+ # PRAGMA. I think some PRAGMA statements expect to
+ # immediately take an EXCLUSIVE lock on the database. I
+ # can't find any documentation for this but without the
+ # retry, stress will intermittently fail with multiple
+ # processes.
+
+ # 2018-11-05 GrantJ - Avoid setting pragma values that
+ # are already set. Pragma settings like auto_vacuum and
+ # journal_mode can take a long time or may not work after
+ # tables have been created.
+
+ start = time.time()
+ while True:
+ try:
+ try:
+ (old_value,), = sql('PRAGMA %s' % (pragma)).fetchall()
+ update = old_value != value
+ except ValueError:
+ update = True
+ if update:
+ sql('PRAGMA %s = %s' % (pragma, value)).fetchall()
+ break
+ except sqlite3.OperationalError as exc:
+ if str(exc) != 'database is locked':
+ raise
+ diff = time.time() - start
+ if diff > 60:
+ raise
+ time.sleep(0.001)
+ elif key.startswith('disk_'):
+ attr = key[5:]
+ setattr(self._disk, attr, value)
+
+ setattr(self, key, value)
+ return value
diff --git a/third_party/python/diskcache/diskcache/djangocache.py b/third_party/python/diskcache/diskcache/djangocache.py
new file mode 100644
index 0000000000..997b852406
--- /dev/null
+++ b/third_party/python/diskcache/diskcache/djangocache.py
@@ -0,0 +1,433 @@
+"Django-compatible disk and file backed cache."
+
+from functools import wraps
+from django.core.cache.backends.base import BaseCache
+
+try:
+ from django.core.cache.backends.base import DEFAULT_TIMEOUT
+except ImportError:
+ # For older versions of Django simply use 300 seconds.
+ DEFAULT_TIMEOUT = 300
+
+from .core import ENOVAL, args_to_key, full_name
+from .fanout import FanoutCache
+
+
+class DjangoCache(BaseCache):
+ "Django-compatible disk and file backed cache."
+ def __init__(self, directory, params):
+ """Initialize DjangoCache instance.
+
+ :param str directory: cache directory
+ :param dict params: cache parameters
+
+ """
+ super(DjangoCache, self).__init__(params)
+ shards = params.get('SHARDS', 8)
+ timeout = params.get('DATABASE_TIMEOUT', 0.010)
+ options = params.get('OPTIONS', {})
+ self._cache = FanoutCache(directory, shards, timeout, **options)
+
+
+ @property
+ def directory(self):
+ """Cache directory."""
+ return self._cache.directory
+
+
+ def cache(self, name):
+ """Return Cache with given `name` in subdirectory.
+
+ :param str name: subdirectory name for Cache
+ :return: Cache with given name
+
+ """
+ return self._cache.cache(name)
+
+
+ def deque(self, name):
+ """Return Deque with given `name` in subdirectory.
+
+ :param str name: subdirectory name for Deque
+ :return: Deque with given name
+
+ """
+ return self._cache.deque(name)
+
+
+ def index(self, name):
+ """Return Index with given `name` in subdirectory.
+
+ :param str name: subdirectory name for Index
+ :return: Index with given name
+
+ """
+ return self._cache.index(name)
+
+
+ def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None,
+ read=False, tag=None, retry=True):
+ """Set a value in the cache if the key does not already exist. If
+ timeout is given, that timeout will be used for the key; otherwise the
+ default cache timeout will be used.
+
+ Return True if the value was stored, False otherwise.
+
+ :param key: key for item
+ :param value: value for item
+ :param float timeout: seconds until the item expires
+ (default 300 seconds)
+ :param int version: key version number (default None, cache parameter)
+ :param bool read: read value as bytes from file (default False)
+ :param str tag: text to associate with key (default None)
+ :param bool retry: retry if database timeout occurs (default True)
+ :return: True if item was added
+
+ """
+ # pylint: disable=arguments-differ
+ key = self.make_key(key, version=version)
+ timeout = self.get_backend_timeout(timeout=timeout)
+ return self._cache.add(key, value, timeout, read, tag, retry)
+
+
+ def get(self, key, default=None, version=None, read=False,
+ expire_time=False, tag=False, retry=False):
+ """Fetch a given key from the cache. If the key does not exist, return
+ default, which itself defaults to None.
+
+ :param key: key for item
+ :param default: return value if key is missing (default None)
+ :param int version: key version number (default None, cache parameter)
+ :param bool read: if True, return file handle to value
+ (default False)
+ :param float expire_time: if True, return expire_time in tuple
+ (default False)
+ :param tag: if True, return tag in tuple (default False)
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: value for item if key is found else default
+
+ """
+ # pylint: disable=arguments-differ
+ key = self.make_key(key, version=version)
+ return self._cache.get(key, default, read, expire_time, tag, retry)
+
+
+ def read(self, key, version=None):
+ """Return file handle corresponding to `key` from Cache.
+
+ :param key: Python key to retrieve
+ :param int version: key version number (default None, cache parameter)
+ :return: file open for reading in binary mode
+ :raises KeyError: if key is not found
+
+ """
+ key = self.make_key(key, version=version)
+ return self._cache.read(key)
+
+
+ def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None,
+ read=False, tag=None, retry=True):
+ """Set a value in the cache. If timeout is given, that timeout will be
+ used for the key; otherwise the default cache timeout will be used.
+
+ :param key: key for item
+ :param value: value for item
+ :param float timeout: seconds until the item expires
+ (default 300 seconds)
+ :param int version: key version number (default None, cache parameter)
+ :param bool read: read value as bytes from file (default False)
+ :param str tag: text to associate with key (default None)
+ :param bool retry: retry if database timeout occurs (default True)
+ :return: True if item was set
+
+ """
+ # pylint: disable=arguments-differ
+ key = self.make_key(key, version=version)
+ timeout = self.get_backend_timeout(timeout=timeout)
+ return self._cache.set(key, value, timeout, read, tag, retry)
+
+
+ def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None, retry=True):
+ """Touch a key in the cache. If timeout is given, that timeout will be
+ used for the key; otherwise the default cache timeout will be used.
+
+ :param key: key for item
+ :param float timeout: seconds until the item expires
+ (default 300 seconds)
+ :param int version: key version number (default None, cache parameter)
+ :param bool retry: retry if database timeout occurs (default True)
+ :return: True if key was touched
+
+ """
+ # pylint: disable=arguments-differ
+ key = self.make_key(key, version=version)
+ timeout = self.get_backend_timeout(timeout=timeout)
+ return self._cache.touch(key, timeout, retry)
+
+
+ def pop(self, key, default=None, version=None, expire_time=False,
+ tag=False, retry=True):
+ """Remove corresponding item for `key` from cache and return value.
+
+ If `key` is missing, return `default`.
+
+ Operation is atomic. Concurrent operations will be serialized.
+
+ :param key: key for item
+ :param default: return value if key is missing (default None)
+ :param int version: key version number (default None, cache parameter)
+ :param float expire_time: if True, return expire_time in tuple
+ (default False)
+ :param tag: if True, return tag in tuple (default False)
+ :param bool retry: retry if database timeout occurs (default True)
+ :return: value for item if key is found else default
+
+ """
+ key = self.make_key(key, version=version)
+ return self._cache.pop(key, default, expire_time, tag, retry)
+
+
+ def delete(self, key, version=None, retry=True):
+ """Delete a key from the cache, failing silently.
+
+ :param key: key for item
+ :param int version: key version number (default None, cache parameter)
+ :param bool retry: retry if database timeout occurs (default True)
+ :return: True if item was deleted
+
+ """
+ # pylint: disable=arguments-differ
+ key = self.make_key(key, version=version)
+ self._cache.delete(key, retry)
+
+
+ def incr(self, key, delta=1, version=None, default=None, retry=True):
+ """Increment value by delta for item with key.
+
+ If key is missing and default is None then raise KeyError. Else if key
+ is missing and default is not None then use default for value.
+
+ Operation is atomic. All concurrent increment operations will be
+ counted individually.
+
+ Assumes value may be stored in a SQLite column. Most builds that target
+ machines with 64-bit pointer widths will support 64-bit signed
+ integers.
+
+ :param key: key for item
+ :param int delta: amount to increment (default 1)
+ :param int version: key version number (default None, cache parameter)
+ :param int default: value if key is missing (default None)
+ :param bool retry: retry if database timeout occurs (default True)
+ :return: new value for item on success else None
+ :raises ValueError: if key is not found and default is None
+
+ """
+ # pylint: disable=arguments-differ
+ key = self.make_key(key, version=version)
+ try:
+ return self._cache.incr(key, delta, default, retry)
+ except KeyError:
+ raise ValueError("Key '%s' not found" % key)
+
+
+ def decr(self, key, delta=1, version=None, default=None, retry=True):
+ """Decrement value by delta for item with key.
+
+ If key is missing and default is None then raise KeyError. Else if key
+ is missing and default is not None then use default for value.
+
+ Operation is atomic. All concurrent decrement operations will be
+ counted individually.
+
+ Unlike Memcached, negative values are supported. Value may be
+ decremented below zero.
+
+ Assumes value may be stored in a SQLite column. Most builds that target
+ machines with 64-bit pointer widths will support 64-bit signed
+ integers.
+
+ :param key: key for item
+ :param int delta: amount to decrement (default 1)
+ :param int version: key version number (default None, cache parameter)
+ :param int default: value if key is missing (default None)
+ :param bool retry: retry if database timeout occurs (default True)
+ :return: new value for item on success else None
+ :raises ValueError: if key is not found and default is None
+
+ """
+ # pylint: disable=arguments-differ
+ return self.incr(key, -delta, version, default, retry)
+
+
+ def has_key(self, key, version=None):
+ """Returns True if the key is in the cache and has not expired.
+
+ :param key: key for item
+ :param int version: key version number (default None, cache parameter)
+ :return: True if key is found
+
+ """
+ key = self.make_key(key, version=version)
+ return key in self._cache
+
+
+ def expire(self):
+ """Remove expired items from cache.
+
+ :return: count of items removed
+
+ """
+ return self._cache.expire()
+
+
+ def stats(self, enable=True, reset=False):
+ """Return cache statistics hits and misses.
+
+ :param bool enable: enable collecting statistics (default True)
+ :param bool reset: reset hits and misses to 0 (default False)
+ :return: (hits, misses)
+
+ """
+ return self._cache.stats(enable=enable, reset=reset)
+
+
+ def create_tag_index(self):
+ """Create tag index on cache database.
+
+ Better to initialize cache with `tag_index=True` than use this.
+
+ :raises Timeout: if database timeout occurs
+
+ """
+ self._cache.create_tag_index()
+
+
+ def drop_tag_index(self):
+ """Drop tag index on cache database.
+
+ :raises Timeout: if database timeout occurs
+
+ """
+ self._cache.drop_tag_index()
+
+
+ def evict(self, tag):
+ """Remove items with matching `tag` from cache.
+
+ :param str tag: tag identifying items
+ :return: count of items removed
+
+ """
+ return self._cache.evict(tag)
+
+
+ def cull(self):
+ """Cull items from cache until volume is less than size limit.
+
+ :return: count of items removed
+
+ """
+ return self._cache.cull()
+
+
+ def clear(self):
+ "Remove *all* values from the cache at once."
+ return self._cache.clear()
+
+
+ def close(self, **kwargs):
+ "Close the cache connection."
+ # pylint: disable=unused-argument
+ self._cache.close()
+
+
+ def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
+ """Return seconds to expiration.
+
+ :param float timeout: seconds until the item expires
+ (default 300 seconds)
+
+ """
+ if timeout == DEFAULT_TIMEOUT:
+ timeout = self.default_timeout
+ elif timeout == 0:
+ # ticket 21147 - avoid time.time() related precision issues
+ timeout = -1
+ return None if timeout is None else timeout
+
+
+ def memoize(self, name=None, timeout=DEFAULT_TIMEOUT, version=None,
+ typed=False, tag=None):
+ """Memoizing cache decorator.
+
+ Decorator to wrap callable with memoizing function using cache.
+ Repeated calls with the same arguments will lookup result in cache and
+ avoid function evaluation.
+
+ If name is set to None (default), the callable name will be determined
+ automatically.
+
+ When timeout is set to zero, function results will not be set in the
+ cache. Cache lookups still occur, however. Read
+ :doc:`case-study-landing-page-caching` for example usage.
+
+ If typed is set to True, function arguments of different types will be
+ cached separately. For example, f(3) and f(3.0) will be treated as
+ distinct calls with distinct results.
+
+ The original underlying function is accessible through the __wrapped__
+ attribute. This is useful for introspection, for bypassing the cache,
+ or for rewrapping the function with a different cache.
+
+ An additional `__cache_key__` attribute can be used to generate the
+ cache key used for the given arguments.
+
+ Remember to call memoize when decorating a callable. If you forget,
+ then a TypeError will occur.
+
+ :param str name: name given for callable (default None, automatic)
+ :param float timeout: seconds until the item expires
+ (default 300 seconds)
+ :param int version: key version number (default None, cache parameter)
+ :param bool typed: cache different types separately (default False)
+ :param str tag: text to associate with arguments (default None)
+ :return: callable decorator
+
+ """
+ # Caution: Nearly identical code exists in Cache.memoize
+ if callable(name):
+ raise TypeError('name cannot be callable')
+
+ def decorator(func):
+ "Decorator created by memoize() for callable `func`."
+ base = (full_name(func),) if name is None else (name,)
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ "Wrapper for callable to cache arguments and return values."
+ key = wrapper.__cache_key__(*args, **kwargs)
+ result = self.get(key, ENOVAL, version, retry=True)
+
+ if result is ENOVAL:
+ result = func(*args, **kwargs)
+ valid_timeout = (
+ timeout is None
+ or timeout == DEFAULT_TIMEOUT
+ or timeout > 0
+ )
+ if valid_timeout:
+ self.set(
+ key, result, timeout, version, tag=tag, retry=True,
+ )
+
+ return result
+
+ def __cache_key__(*args, **kwargs):
+ "Make key for cache given function arguments."
+ return args_to_key(base, args, kwargs, typed)
+
+ wrapper.__cache_key__ = __cache_key__
+ return wrapper
+
+ return decorator
diff --git a/third_party/python/diskcache/diskcache/fanout.py b/third_party/python/diskcache/diskcache/fanout.py
new file mode 100644
index 0000000000..8a0a722ae6
--- /dev/null
+++ b/third_party/python/diskcache/diskcache/fanout.py
@@ -0,0 +1,677 @@
+"Fanout cache automatically shards keys and values."
+
+import itertools as it
+import operator
+import os.path as op
+import sqlite3
+import sys
+import tempfile
+import time
+
+from .core import ENOVAL, DEFAULT_SETTINGS, Cache, Disk, Timeout
+from .persistent import Deque, Index
+
+############################################################################
+# BEGIN Python 2/3 Shims
+############################################################################
+
+if sys.hexversion >= 0x03000000:
+ from functools import reduce
+
+############################################################################
+# END Python 2/3 Shims
+############################################################################
+
+
+class FanoutCache(object):
+ "Cache that shards keys and values."
+ def __init__(self, directory=None, shards=8, timeout=0.010, disk=Disk,
+ **settings):
+ """Initialize cache instance.
+
+ :param str directory: cache directory
+ :param int shards: number of shards to distribute writes
+ :param float timeout: SQLite connection timeout
+ :param disk: `Disk` instance for serialization
+ :param settings: any of `DEFAULT_SETTINGS`
+
+ """
+ if directory is None:
+ directory = tempfile.mkdtemp(prefix='diskcache-')
+ directory = op.expanduser(directory)
+ directory = op.expandvars(directory)
+
+ default_size_limit = DEFAULT_SETTINGS['size_limit']
+ size_limit = settings.pop('size_limit', default_size_limit) / shards
+
+ self._count = shards
+ self._directory = directory
+ self._shards = tuple(
+ Cache(
+ directory=op.join(directory, '%03d' % num),
+ timeout=timeout,
+ disk=disk,
+ size_limit=size_limit,
+ **settings
+ )
+ for num in range(shards)
+ )
+ self._hash = self._shards[0].disk.hash
+ self._caches = {}
+ self._deques = {}
+ self._indexes = {}
+
+
+ @property
+ def directory(self):
+ """Cache directory."""
+ return self._directory
+
+
+ def __getattr__(self, name):
+ return getattr(self._shards[0], name)
+
+
+ def set(self, key, value, expire=None, read=False, tag=None, retry=False):
+ """Set `key` and `value` item in cache.
+
+ When `read` is `True`, `value` should be a file-like object opened
+ for reading in binary mode.
+
+ If database timeout occurs then fails silently unless `retry` is set to
+ `True` (default `False`).
+
+ :param key: key for item
+ :param value: value for item
+ :param float expire: seconds until the key expires
+ (default None, no expiry)
+ :param bool read: read value as raw bytes from file (default False)
+ :param str tag: text to associate with key (default None)
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: True if item was set
+
+ """
+ index = self._hash(key) % self._count
+ shard = self._shards[index]
+ try:
+ return shard.set(key, value, expire, read, tag, retry)
+ except Timeout:
+ return False
+
+
+ def __setitem__(self, key, value):
+ """Set `key` and `value` item in cache.
+
+ Calls :func:`FanoutCache.set` internally with `retry` set to `True`.
+
+ :param key: key for item
+ :param value: value for item
+
+ """
+ index = self._hash(key) % self._count
+ shard = self._shards[index]
+ shard[key] = value
+
+
+ def touch(self, key, expire=None, retry=False):
+ """Touch `key` in cache and update `expire` time.
+
+ If database timeout occurs then fails silently unless `retry` is set to
+ `True` (default `False`).
+
+ :param key: key for item
+ :param float expire: seconds until the key expires
+ (default None, no expiry)
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: True if key was touched
+
+ """
+ index = self._hash(key) % self._count
+ shard = self._shards[index]
+ try:
+ return shard.touch(key, expire, retry)
+ except Timeout:
+ return False
+
+
+ def add(self, key, value, expire=None, read=False, tag=None, retry=False):
+ """Add `key` and `value` item to cache.
+
+ Similar to `set`, but only add to cache if key not present.
+
+ This operation is atomic. Only one concurrent add operation for given
+ key from separate threads or processes will succeed.
+
+ When `read` is `True`, `value` should be a file-like object opened
+ for reading in binary mode.
+
+ If database timeout occurs then fails silently unless `retry` is set to
+ `True` (default `False`).
+
+ :param key: key for item
+ :param value: value for item
+ :param float expire: seconds until the key expires
+ (default None, no expiry)
+ :param bool read: read value as bytes from file (default False)
+ :param str tag: text to associate with key (default None)
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: True if item was added
+
+ """
+ index = self._hash(key) % self._count
+ shard = self._shards[index]
+ try:
+ return shard.add(key, value, expire, read, tag, retry)
+ except Timeout:
+ return False
+
+
+ def incr(self, key, delta=1, default=0, retry=False):
+ """Increment value by delta for item with key.
+
+ If key is missing and default is None then raise KeyError. Else if key
+ is missing and default is not None then use default for value.
+
+ Operation is atomic. All concurrent increment operations will be
+ counted individually.
+
+ Assumes value may be stored in a SQLite column. Most builds that target
+ machines with 64-bit pointer widths will support 64-bit signed
+ integers.
+
+ If database timeout occurs then fails silently unless `retry` is set to
+ `True` (default `False`).
+
+ :param key: key for item
+ :param int delta: amount to increment (default 1)
+ :param int default: value if key is missing (default 0)
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: new value for item on success else None
+ :raises KeyError: if key is not found and default is None
+
+ """
+ index = self._hash(key) % self._count
+ shard = self._shards[index]
+ try:
+ return shard.incr(key, delta, default, retry)
+ except Timeout:
+ return None
+
+
+ def decr(self, key, delta=1, default=0, retry=False):
+ """Decrement value by delta for item with key.
+
+ If key is missing and default is None then raise KeyError. Else if key
+ is missing and default is not None then use default for value.
+
+ Operation is atomic. All concurrent decrement operations will be
+ counted individually.
+
+ Unlike Memcached, negative values are supported. Value may be
+ decremented below zero.
+
+ Assumes value may be stored in a SQLite column. Most builds that target
+ machines with 64-bit pointer widths will support 64-bit signed
+ integers.
+
+ If database timeout occurs then fails silently unless `retry` is set to
+ `True` (default `False`).
+
+ :param key: key for item
+ :param int delta: amount to decrement (default 1)
+ :param int default: value if key is missing (default 0)
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: new value for item on success else None
+ :raises KeyError: if key is not found and default is None
+
+ """
+ index = self._hash(key) % self._count
+ shard = self._shards[index]
+ try:
+ return shard.decr(key, delta, default, retry)
+ except Timeout:
+ return None
+
+
+ def get(self, key, default=None, read=False, expire_time=False, tag=False,
+ retry=False):
+ """Retrieve value from cache. If `key` is missing, return `default`.
+
+ If database timeout occurs then returns `default` unless `retry` is set
+ to `True` (default `False`).
+
+ :param key: key for item
+ :param default: return value if key is missing (default None)
+ :param bool read: if True, return file handle to value
+ (default False)
+ :param float expire_time: if True, return expire_time in tuple
+ (default False)
+ :param tag: if True, return tag in tuple (default False)
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: value for item if key is found else default
+
+ """
+ index = self._hash(key) % self._count
+ shard = self._shards[index]
+ try:
+ return shard.get(key, default, read, expire_time, tag, retry)
+ except (Timeout, sqlite3.OperationalError):
+ return default
+
+
+ def __getitem__(self, key):
+ """Return corresponding value for `key` from cache.
+
+ Calls :func:`FanoutCache.get` internally with `retry` set to `True`.
+
+ :param key: key for item
+ :return: value for item
+ :raises KeyError: if key is not found
+
+ """
+ index = self._hash(key) % self._count
+ shard = self._shards[index]
+ return shard[key]
+
+
+ def read(self, key):
+ """Return file handle corresponding to `key` from cache.
+
+ :param key: key for item
+ :return: file open for reading in binary mode
+ :raises KeyError: if key is not found
+
+ """
+ handle = self.get(key, default=ENOVAL, read=True, retry=True)
+ if handle is ENOVAL:
+ raise KeyError(key)
+ return handle
+
+
+ def __contains__(self, key):
+ """Return `True` if `key` matching item is found in cache.
+
+ :param key: key for item
+ :return: True if key is found
+
+ """
+ index = self._hash(key) % self._count
+ shard = self._shards[index]
+ return key in shard
+
+
+ def pop(self, key, default=None, expire_time=False, tag=False, retry=False):
+ """Remove corresponding item for `key` from cache and return value.
+
+ If `key` is missing, return `default`.
+
+ Operation is atomic. Concurrent operations will be serialized.
+
+ If database timeout occurs then fails silently unless `retry` is set to
+ `True` (default `False`).
+
+ :param key: key for item
+ :param default: return value if key is missing (default None)
+ :param float expire_time: if True, return expire_time in tuple
+ (default False)
+ :param tag: if True, return tag in tuple (default False)
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: value for item if key is found else default
+
+ """
+ index = self._hash(key) % self._count
+ shard = self._shards[index]
+ try:
+ return shard.pop(key, default, expire_time, tag, retry)
+ except Timeout:
+ return default
+
+
+ def delete(self, key, retry=False):
+ """Delete corresponding item for `key` from cache.
+
+ Missing keys are ignored.
+
+ If database timeout occurs then fails silently unless `retry` is set to
+ `True` (default `False`).
+
+ :param key: key for item
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: True if item was deleted
+
+ """
+ index = self._hash(key) % self._count
+ shard = self._shards[index]
+ try:
+ return shard.delete(key, retry)
+ except Timeout:
+ return False
+
+
+ def __delitem__(self, key):
+ """Delete corresponding item for `key` from cache.
+
+ Calls :func:`FanoutCache.delete` internally with `retry` set to `True`.
+
+ :param key: key for item
+ :raises KeyError: if key is not found
+
+ """
+ index = self._hash(key) % self._count
+ shard = self._shards[index]
+ del shard[key]
+
+
+ def check(self, fix=False, retry=False):
+ """Check database and file system consistency.
+
+ Intended for use in testing and post-mortem error analysis.
+
+ While checking the cache table for consistency, a writer lock is held
+ on the database. The lock blocks other cache clients from writing to
+ the database. For caches with many file references, the lock may be
+ held for a long time. For example, local benchmarking shows that a
+ cache with 1,000 file references takes ~60ms to check.
+
+ If database timeout occurs then fails silently unless `retry` is set to
+ `True` (default `False`).
+
+ :param bool fix: correct inconsistencies
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: list of warnings
+ :raises Timeout: if database timeout occurs
+
+ """
+ warnings = (shard.check(fix, retry) for shard in self._shards)
+ return reduce(operator.iadd, warnings, [])
+
+
+ def expire(self, retry=False):
+ """Remove expired items from cache.
+
+ If database timeout occurs then fails silently unless `retry` is set to
+ `True` (default `False`).
+
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: count of items removed
+
+ """
+ return self._remove('expire', args=(time.time(),), retry=retry)
+
+
+ def create_tag_index(self):
+ """Create tag index on cache database.
+
+ Better to initialize cache with `tag_index=True` than use this.
+
+ :raises Timeout: if database timeout occurs
+
+ """
+ for shard in self._shards:
+ shard.create_tag_index()
+
+
+ def drop_tag_index(self):
+ """Drop tag index on cache database.
+
+ :raises Timeout: if database timeout occurs
+
+ """
+ for shard in self._shards:
+ shard.drop_tag_index()
+
+
+ def evict(self, tag, retry=False):
+ """Remove items with matching `tag` from cache.
+
+ If database timeout occurs then fails silently unless `retry` is set to
+ `True` (default `False`).
+
+ :param str tag: tag identifying items
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: count of items removed
+
+ """
+ return self._remove('evict', args=(tag,), retry=retry)
+
+
+ def cull(self, retry=False):
+ """Cull items from cache until volume is less than size limit.
+
+ If database timeout occurs then fails silently unless `retry` is set to
+ `True` (default `False`).
+
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: count of items removed
+
+ """
+ return self._remove('cull', retry=retry)
+
+
+ def clear(self, retry=False):
+ """Remove all items from cache.
+
+ If database timeout occurs then fails silently unless `retry` is set to
+ `True` (default `False`).
+
+ :param bool retry: retry if database timeout occurs (default False)
+ :return: count of items removed
+
+ """
+ return self._remove('clear', retry=retry)
+
+
+ def _remove(self, name, args=(), retry=False):
+ total = 0
+ for shard in self._shards:
+ method = getattr(shard, name)
+ while True:
+ try:
+ count = method(*args, retry=retry)
+ total += count
+ except Timeout as timeout:
+ total += timeout.args[0]
+ else:
+ break
+ return total
+
+
+ def stats(self, enable=True, reset=False):
+ """Return cache statistics hits and misses.
+
+ :param bool enable: enable collecting statistics (default True)
+ :param bool reset: reset hits and misses to 0 (default False)
+ :return: (hits, misses)
+
+ """
+ results = [shard.stats(enable, reset) for shard in self._shards]
+ total_hits = sum(hits for hits, _ in results)
+ total_misses = sum(misses for _, misses in results)
+ return total_hits, total_misses
+
+
+ def volume(self):
+ """Return estimated total size of cache on disk.
+
+ :return: size in bytes
+
+ """
+ return sum(shard.volume() for shard in self._shards)
+
+
+ def close(self):
+ "Close database connection."
+ for shard in self._shards:
+ shard.close()
+ self._caches.clear()
+ self._deques.clear()
+ self._indexes.clear()
+
+
+ def __enter__(self):
+ return self
+
+
+ def __exit__(self, *exception):
+ self.close()
+
+
+ def __getstate__(self):
+ return (self._directory, self._count, self.timeout, type(self.disk))
+
+
+ def __setstate__(self, state):
+ self.__init__(*state)
+
+
+ def __iter__(self):
+ "Iterate keys in cache including expired items."
+ iterators = (iter(shard) for shard in self._shards)
+ return it.chain.from_iterable(iterators)
+
+
+ def __reversed__(self):
+ "Reverse iterate keys in cache including expired items."
+ iterators = (reversed(shard) for shard in reversed(self._shards))
+ return it.chain.from_iterable(iterators)
+
+
+ def __len__(self):
+ "Count of items in cache including expired items."
+ return sum(len(shard) for shard in self._shards)
+
+
+ def reset(self, key, value=ENOVAL):
+ """Reset `key` and `value` item from Settings table.
+
+ If `value` is not given, it is reloaded from the Settings
+ table. Otherwise, the Settings table is updated.
+
+ Settings attributes on cache objects are lazy-loaded and
+ read-only. Use `reset` to update the value.
+
+ Settings with the ``sqlite_`` prefix correspond to SQLite
+ pragmas. Updating the value will execute the corresponding PRAGMA
+ statement.
+
+ :param str key: Settings key for item
+ :param value: value for item (optional)
+ :return: updated value for item
+
+ """
+ for shard in self._shards:
+ while True:
+ try:
+ result = shard.reset(key, value)
+ except Timeout:
+ pass
+ else:
+ break
+ return result
+
+
+ def cache(self, name):
+ """Return Cache with given `name` in subdirectory.
+
+ >>> fanout_cache = FanoutCache()
+ >>> cache = fanout_cache.cache('test')
+ >>> cache.set('abc', 123)
+ True
+ >>> cache.get('abc')
+ 123
+ >>> len(cache)
+ 1
+ >>> cache.delete('abc')
+ True
+
+ :param str name: subdirectory name for Cache
+ :return: Cache with given name
+
+ """
+ _caches = self._caches
+
+ try:
+ return _caches[name]
+ except KeyError:
+ parts = name.split('/')
+ directory = op.join(self._directory, 'cache', *parts)
+ temp = Cache(directory=directory)
+ _caches[name] = temp
+ return temp
+
+
+ def deque(self, name):
+ """Return Deque with given `name` in subdirectory.
+
+ >>> cache = FanoutCache()
+ >>> deque = cache.deque('test')
+ >>> deque.extend('abc')
+ >>> deque.popleft()
+ 'a'
+ >>> deque.pop()
+ 'c'
+ >>> len(deque)
+ 1
+
+ :param str name: subdirectory name for Deque
+ :return: Deque with given name
+
+ """
+ _deques = self._deques
+
+ try:
+ return _deques[name]
+ except KeyError:
+ parts = name.split('/')
+ directory = op.join(self._directory, 'deque', *parts)
+ temp = Deque(directory=directory)
+ _deques[name] = temp
+ return temp
+
+
+ def index(self, name):
+ """Return Index with given `name` in subdirectory.
+
+ >>> cache = FanoutCache()
+ >>> index = cache.index('test')
+ >>> index['abc'] = 123
+ >>> index['def'] = 456
+ >>> index['ghi'] = 789
+ >>> index.popitem()
+ ('ghi', 789)
+ >>> del index['abc']
+ >>> len(index)
+ 1
+ >>> index['def']
+ 456
+
+ :param str name: subdirectory name for Index
+ :return: Index with given name
+
+ """
+ _indexes = self._indexes
+
+ try:
+ return _indexes[name]
+ except KeyError:
+ parts = name.split('/')
+ directory = op.join(self._directory, 'index', *parts)
+ temp = Index(directory)
+ _indexes[name] = temp
+ return temp
+
+
+############################################################################
+# BEGIN Python 2/3 Shims
+############################################################################
+
+if sys.hexversion < 0x03000000:
+ import types
+ memoize_func = Cache.__dict__['memoize'] # pylint: disable=invalid-name
+ FanoutCache.memoize = types.MethodType(memoize_func, None, FanoutCache)
+else:
+ FanoutCache.memoize = Cache.memoize
+
+############################################################################
+# END Python 2/3 Shims
+############################################################################
diff --git a/third_party/python/diskcache/diskcache/persistent.py b/third_party/python/diskcache/diskcache/persistent.py
new file mode 100644
index 0000000000..961f77361f
--- /dev/null
+++ b/third_party/python/diskcache/diskcache/persistent.py
@@ -0,0 +1,1403 @@
+"""Persistent Data Types
+
+"""
+
+import operator as op
+import sys
+
+from collections import OrderedDict
+from contextlib import contextmanager
+from shutil import rmtree
+
+from .core import BytesType, Cache, ENOVAL, TextType
+
+############################################################################
+# BEGIN Python 2/3 Shims
+############################################################################
+
+try:
+ from collections.abc import MutableMapping, Sequence
+ from collections.abc import KeysView, ValuesView, ItemsView
+except ImportError:
+ from collections import MutableMapping, Sequence
+ from collections import KeysView, ValuesView, ItemsView
+
+if sys.hexversion < 0x03000000:
+ from itertools import izip as zip # pylint: disable=redefined-builtin,no-name-in-module,ungrouped-imports
+ range = xrange # pylint: disable=redefined-builtin,invalid-name,undefined-variable
+
+############################################################################
+# END Python 2/3 Shims
+############################################################################
+
+
+def _make_compare(seq_op, doc):
+ "Make compare method with Sequence semantics."
+ def compare(self, that):
+ "Compare method for deque and sequence."
+ if not isinstance(that, Sequence):
+ return NotImplemented
+
+ len_self = len(self)
+ len_that = len(that)
+
+ if len_self != len_that:
+ if seq_op is op.eq:
+ return False
+ if seq_op is op.ne:
+ return True
+
+ for alpha, beta in zip(self, that):
+ if alpha != beta:
+ return seq_op(alpha, beta)
+
+ return seq_op(len_self, len_that)
+
+ compare.__name__ = '__{0}__'.format(seq_op.__name__)
+ doc_str = 'Return True if and only if deque is {0} `that`.'
+ compare.__doc__ = doc_str.format(doc)
+
+ return compare
+
+
+class Deque(Sequence):
+ """Persistent sequence with double-ended queue semantics.
+
+ Double-ended queue is an ordered collection with optimized access at its
+ endpoints.
+
+ Items are serialized to disk. Deque may be initialized from directory path
+ where items are stored.
+
+ >>> deque = Deque()
+ >>> deque += range(5)
+ >>> list(deque)
+ [0, 1, 2, 3, 4]
+ >>> for value in range(5):
+ ... deque.appendleft(-value)
+ >>> len(deque)
+ 10
+ >>> list(deque)
+ [-4, -3, -2, -1, 0, 0, 1, 2, 3, 4]
+ >>> deque.pop()
+ 4
+ >>> deque.popleft()
+ -4
+ >>> deque.reverse()
+ >>> list(deque)
+ [3, 2, 1, 0, 0, -1, -2, -3]
+
+ """
+ def __init__(self, iterable=(), directory=None):
+ """Initialize deque instance.
+
+ If directory is None then temporary directory created. The directory
+ will *not* be automatically removed.
+
+ :param iterable: iterable of items to append to deque
+ :param directory: deque directory (default None)
+
+ """
+ self._cache = Cache(directory, eviction_policy='none')
+ with self.transact():
+ self.extend(iterable)
+
+
+ @classmethod
+ def fromcache(cls, cache, iterable=()):
+ """Initialize deque using `cache`.
+
+ >>> cache = Cache()
+ >>> deque = Deque.fromcache(cache, [5, 6, 7, 8])
+ >>> deque.cache is cache
+ True
+ >>> len(deque)
+ 4
+ >>> 7 in deque
+ True
+ >>> deque.popleft()
+ 5
+
+ :param Cache cache: cache to use
+ :param iterable: iterable of items
+ :return: initialized Deque
+
+ """
+ # pylint: disable=no-member,protected-access
+ self = cls.__new__(cls)
+ self._cache = cache
+ self.extend(iterable)
+ return self
+
+
+ @property
+ def cache(self):
+ "Cache used by deque."
+ return self._cache
+
+
+ @property
+ def directory(self):
+ "Directory path where deque is stored."
+ return self._cache.directory
+
+
+ def _index(self, index, func):
+ len_self = len(self)
+
+ if index >= 0:
+ if index >= len_self:
+ raise IndexError('deque index out of range')
+
+ for key in self._cache.iterkeys():
+ if index == 0:
+ try:
+ return func(key)
+ except KeyError:
+ continue
+ index -= 1
+ else:
+ if index < -len_self:
+ raise IndexError('deque index out of range')
+
+ index += 1
+
+ for key in self._cache.iterkeys(reverse=True):
+ if index == 0:
+ try:
+ return func(key)
+ except KeyError:
+ continue
+ index += 1
+
+ raise IndexError('deque index out of range')
+
+
+ def __getitem__(self, index):
+ """deque.__getitem__(index) <==> deque[index]
+
+ Return corresponding item for `index` in deque.
+
+ See also `Deque.peekleft` and `Deque.peek` for indexing deque at index
+ ``0`` or ``-1``.
+
+ >>> deque = Deque()
+ >>> deque.extend('abcde')
+ >>> deque[1]
+ 'b'
+ >>> deque[-2]
+ 'd'
+
+ :param int index: index of item
+ :return: corresponding item
+ :raises IndexError: if index out of range
+
+ """
+ return self._index(index, self._cache.__getitem__)
+
+
+ def __setitem__(self, index, value):
+ """deque.__setitem__(index, value) <==> deque[index] = value
+
+ Store `value` in deque at `index`.
+
+ >>> deque = Deque()
+ >>> deque.extend([None] * 3)
+ >>> deque[0] = 'a'
+ >>> deque[1] = 'b'
+ >>> deque[-1] = 'c'
+ >>> ''.join(deque)
+ 'abc'
+
+ :param int index: index of value
+ :param value: value to store
+ :raises IndexError: if index out of range
+
+ """
+ set_value = lambda key: self._cache.__setitem__(key, value)
+ self._index(index, set_value)
+
+
+ def __delitem__(self, index):
+ """deque.__delitem__(index) <==> del deque[index]
+
+ Delete item in deque at `index`.
+
+ >>> deque = Deque()
+ >>> deque.extend([None] * 3)
+ >>> del deque[0]
+ >>> del deque[1]
+ >>> del deque[-1]
+ >>> len(deque)
+ 0
+
+ :param int index: index of item
+ :raises IndexError: if index out of range
+
+ """
+ self._index(index, self._cache.__delitem__)
+
+
+ def __repr__(self):
+ """deque.__repr__() <==> repr(deque)
+
+ Return string with printable representation of deque.
+
+ """
+ name = type(self).__name__
+ return '{0}(directory={1!r})'.format(name, self.directory)
+
+
+ __eq__ = _make_compare(op.eq, 'equal to')
+ __ne__ = _make_compare(op.ne, 'not equal to')
+ __lt__ = _make_compare(op.lt, 'less than')
+ __gt__ = _make_compare(op.gt, 'greater than')
+ __le__ = _make_compare(op.le, 'less than or equal to')
+ __ge__ = _make_compare(op.ge, 'greater than or equal to')
+
+
+ def __iadd__(self, iterable):
+ """deque.__iadd__(iterable) <==> deque += iterable
+
+ Extend back side of deque with items from iterable.
+
+ :param iterable: iterable of items to append to deque
+ :return: deque with added items
+
+ """
+ self.extend(iterable)
+ return self
+
+
+ def __iter__(self):
+ """deque.__iter__() <==> iter(deque)
+
+ Return iterator of deque from front to back.
+
+ """
+ _cache = self._cache
+
+ for key in _cache.iterkeys():
+ try:
+ yield _cache[key]
+ except KeyError:
+ pass
+
+
+ def __len__(self):
+ """deque.__len__() <==> len(deque)
+
+ Return length of deque.
+
+ """
+ return len(self._cache)
+
+
+ def __reversed__(self):
+ """deque.__reversed__() <==> reversed(deque)
+
+ Return iterator of deque from back to front.
+
+ >>> deque = Deque()
+ >>> deque.extend('abcd')
+ >>> iterator = reversed(deque)
+ >>> next(iterator)
+ 'd'
+ >>> list(iterator)
+ ['c', 'b', 'a']
+
+ """
+ _cache = self._cache
+
+ for key in _cache.iterkeys(reverse=True):
+ try:
+ yield _cache[key]
+ except KeyError:
+ pass
+
+
+ def __getstate__(self):
+ return self.directory
+
+
+ def __setstate__(self, state):
+ self.__init__(directory=state)
+
+
+ def append(self, value):
+ """Add `value` to back of deque.
+
+ >>> deque = Deque()
+ >>> deque.append('a')
+ >>> deque.append('b')
+ >>> deque.append('c')
+ >>> list(deque)
+ ['a', 'b', 'c']
+
+ :param value: value to add to back of deque
+
+ """
+ self._cache.push(value, retry=True)
+
+
+ def appendleft(self, value):
+ """Add `value` to front of deque.
+
+ >>> deque = Deque()
+ >>> deque.appendleft('a')
+ >>> deque.appendleft('b')
+ >>> deque.appendleft('c')
+ >>> list(deque)
+ ['c', 'b', 'a']
+
+ :param value: value to add to front of deque
+
+ """
+ self._cache.push(value, side='front', retry=True)
+
+
+ def clear(self):
+ """Remove all elements from deque.
+
+ >>> deque = Deque('abc')
+ >>> len(deque)
+ 3
+ >>> deque.clear()
+ >>> list(deque)
+ []
+
+ """
+ self._cache.clear(retry=True)
+
+
+ def count(self, value):
+ """Return number of occurrences of `value` in deque.
+
+ >>> deque = Deque()
+ >>> deque += [num for num in range(1, 5) for _ in range(num)]
+ >>> deque.count(0)
+ 0
+ >>> deque.count(1)
+ 1
+ >>> deque.count(4)
+ 4
+
+ :param value: value to count in deque
+ :return: count of items equal to value in deque
+
+ """
+ return sum(1 for item in self if value == item)
+
+
+ def extend(self, iterable):
+ """Extend back side of deque with values from `iterable`.
+
+ :param iterable: iterable of values
+
+ """
+ for value in iterable:
+ self.append(value)
+
+
+ def extendleft(self, iterable):
+ """Extend front side of deque with value from `iterable`.
+
+ >>> deque = Deque()
+ >>> deque.extendleft('abc')
+ >>> list(deque)
+ ['c', 'b', 'a']
+
+ :param iterable: iterable of values
+
+ """
+ for value in iterable:
+ self.appendleft(value)
+
+
+ def peek(self):
+ """Peek at value at back of deque.
+
+ Faster than indexing deque at -1.
+
+ If deque is empty then raise IndexError.
+
+ >>> deque = Deque()
+ >>> deque.peek()
+ Traceback (most recent call last):
+ ...
+ IndexError: peek from an empty deque
+ >>> deque += 'abc'
+ >>> deque.peek()
+ 'c'
+
+ :return: value at back of deque
+ :raises IndexError: if deque is empty
+
+ """
+ default = None, ENOVAL
+ _, value = self._cache.peek(default=default, side='back', retry=True)
+ if value is ENOVAL:
+ raise IndexError('peek from an empty deque')
+ return value
+
+
+ def peekleft(self):
+ """Peek at value at back of deque.
+
+ Faster than indexing deque at 0.
+
+ If deque is empty then raise IndexError.
+
+ >>> deque = Deque()
+ >>> deque.peekleft()
+ Traceback (most recent call last):
+ ...
+ IndexError: peek from an empty deque
+ >>> deque += 'abc'
+ >>> deque.peekleft()
+ 'a'
+
+ :return: value at front of deque
+ :raises IndexError: if deque is empty
+
+ """
+ default = None, ENOVAL
+ _, value = self._cache.peek(default=default, side='front', retry=True)
+ if value is ENOVAL:
+ raise IndexError('peek from an empty deque')
+ return value
+
+
+ def pop(self):
+ """Remove and return value at back of deque.
+
+ If deque is empty then raise IndexError.
+
+ >>> deque = Deque()
+ >>> deque += 'ab'
+ >>> deque.pop()
+ 'b'
+ >>> deque.pop()
+ 'a'
+ >>> deque.pop()
+ Traceback (most recent call last):
+ ...
+ IndexError: pop from an empty deque
+
+ :return: value at back of deque
+ :raises IndexError: if deque is empty
+
+ """
+ default = None, ENOVAL
+ _, value = self._cache.pull(default=default, side='back', retry=True)
+ if value is ENOVAL:
+ raise IndexError('pop from an empty deque')
+ return value
+
+
+ def popleft(self):
+ """Remove and return value at front of deque.
+
+ >>> deque = Deque()
+ >>> deque += 'ab'
+ >>> deque.popleft()
+ 'a'
+ >>> deque.popleft()
+ 'b'
+ >>> deque.popleft()
+ Traceback (most recent call last):
+ ...
+ IndexError: pop from an empty deque
+
+ :return: value at front of deque
+ :raises IndexError: if deque is empty
+
+ """
+ default = None, ENOVAL
+ _, value = self._cache.pull(default=default, retry=True)
+ if value is ENOVAL:
+ raise IndexError('pop from an empty deque')
+ return value
+
+
+ def remove(self, value):
+ """Remove first occurrence of `value` in deque.
+
+ >>> deque = Deque()
+ >>> deque += 'aab'
+ >>> deque.remove('a')
+ >>> list(deque)
+ ['a', 'b']
+ >>> deque.remove('b')
+ >>> list(deque)
+ ['a']
+ >>> deque.remove('c')
+ Traceback (most recent call last):
+ ...
+ ValueError: deque.remove(value): value not in deque
+
+ :param value: value to remove
+ :raises ValueError: if value not in deque
+
+ """
+ _cache = self._cache
+
+ for key in _cache.iterkeys():
+ try:
+ item = _cache[key]
+ except KeyError:
+ continue
+ else:
+ if value == item:
+ try:
+ del _cache[key]
+ except KeyError:
+ continue
+ return
+
+ raise ValueError('deque.remove(value): value not in deque')
+
+
+ def reverse(self):
+ """Reverse deque in place.
+
+ >>> deque = Deque()
+ >>> deque += 'abc'
+ >>> deque.reverse()
+ >>> list(deque)
+ ['c', 'b', 'a']
+
+ """
+ # GrantJ 2019-03-22 Consider using an algorithm that swaps the values
+ # at two keys. Like self._cache.swap(key1, key2, retry=True) The swap
+ # method would exchange the values at two given keys. Then, using a
+ # forward iterator and a reverse iterator, the reversis method could
+ # avoid making copies of the values.
+ temp = Deque(iterable=reversed(self))
+ self.clear()
+ self.extend(temp)
+ directory = temp.directory
+ del temp
+ rmtree(directory)
+
+
+ def rotate(self, steps=1):
+ """Rotate deque right by `steps`.
+
+ If steps is negative then rotate left.
+
+ >>> deque = Deque()
+ >>> deque += range(5)
+ >>> deque.rotate(2)
+ >>> list(deque)
+ [3, 4, 0, 1, 2]
+ >>> deque.rotate(-1)
+ >>> list(deque)
+ [4, 0, 1, 2, 3]
+
+ :param int steps: number of steps to rotate (default 1)
+
+ """
+ if not isinstance(steps, int):
+ type_name = type(steps).__name__
+ raise TypeError('integer argument expected, got %s' % type_name)
+
+ len_self = len(self)
+
+ if not len_self:
+ return
+
+ if steps >= 0:
+ steps %= len_self
+
+ for _ in range(steps):
+ try:
+ value = self.pop()
+ except IndexError:
+ return
+ else:
+ self.appendleft(value)
+ else:
+ steps *= -1
+ steps %= len_self
+
+ for _ in range(steps):
+ try:
+ value = self.popleft()
+ except IndexError:
+ return
+ else:
+ self.append(value)
+
+
+ __hash__ = None
+
+
+ @contextmanager
+ def transact(self):
+ """Context manager to perform a transaction by locking the deque.
+
+ While the deque is locked, no other write operation is permitted.
+ Transactions should therefore be as short as possible. Read and write
+ operations performed in a transaction are atomic. Read operations may
+ occur concurrent to a transaction.
+
+ Transactions may be nested and may not be shared between threads.
+
+ >>> from diskcache import Deque
+ >>> deque = Deque()
+ >>> deque += range(5)
+ >>> with deque.transact(): # Atomically rotate elements.
+ ... value = deque.pop()
+ ... deque.appendleft(value)
+ >>> list(deque)
+ [4, 0, 1, 2, 3]
+
+ :return: context manager for use in `with` statement
+
+ """
+ with self._cache.transact(retry=True):
+ yield
+
+
+class Index(MutableMapping):
+ """Persistent mutable mapping with insertion order iteration.
+
+ Items are serialized to disk. Index may be initialized from directory path
+ where items are stored.
+
+ Hashing protocol is not used. Keys are looked up by their serialized
+ format. See ``diskcache.Disk`` for details.
+
+ >>> index = Index()
+ >>> index.update([('a', 1), ('b', 2), ('c', 3)])
+ >>> index['a']
+ 1
+ >>> list(index)
+ ['a', 'b', 'c']
+ >>> len(index)
+ 3
+ >>> del index['b']
+ >>> index.popitem()
+ ('c', 3)
+
+ """
+ def __init__(self, *args, **kwargs):
+ """Initialize index in directory and update items.
+
+ Optional first argument may be string specifying directory where items
+ are stored. When None or not given, temporary directory is created.
+
+ >>> index = Index({'a': 1, 'b': 2, 'c': 3})
+ >>> len(index)
+ 3
+ >>> directory = index.directory
+ >>> inventory = Index(directory, d=4)
+ >>> inventory['b']
+ 2
+ >>> len(inventory)
+ 4
+
+ """
+ if args and isinstance(args[0], (BytesType, TextType)):
+ directory = args[0]
+ args = args[1:]
+ else:
+ if args and args[0] is None:
+ args = args[1:]
+ directory = None
+ self._cache = Cache(directory, eviction_policy='none')
+ self.update(*args, **kwargs)
+
+
+ @classmethod
+ def fromcache(cls, cache, *args, **kwargs):
+ """Initialize index using `cache` and update items.
+
+ >>> cache = Cache()
+ >>> index = Index.fromcache(cache, {'a': 1, 'b': 2, 'c': 3})
+ >>> index.cache is cache
+ True
+ >>> len(index)
+ 3
+ >>> 'b' in index
+ True
+ >>> index['c']
+ 3
+
+ :param Cache cache: cache to use
+ :param args: mapping or sequence of items
+ :param kwargs: mapping of items
+ :return: initialized Index
+
+ """
+ # pylint: disable=no-member,protected-access
+ self = cls.__new__(cls)
+ self._cache = cache
+ self.update(*args, **kwargs)
+ return self
+
+
+ @property
+ def cache(self):
+ "Cache used by index."
+ return self._cache
+
+
+ @property
+ def directory(self):
+ "Directory path where items are stored."
+ return self._cache.directory
+
+
+ def __getitem__(self, key):
+ """index.__getitem__(key) <==> index[key]
+
+ Return corresponding value for `key` in index.
+
+ >>> index = Index()
+ >>> index.update({'a': 1, 'b': 2})
+ >>> index['a']
+ 1
+ >>> index['b']
+ 2
+ >>> index['c']
+ Traceback (most recent call last):
+ ...
+ KeyError: 'c'
+
+ :param key: key for item
+ :return: value for item in index with given key
+ :raises KeyError: if key is not found
+
+ """
+ return self._cache[key]
+
+
+ def __setitem__(self, key, value):
+ """index.__setitem__(key, value) <==> index[key] = value
+
+ Set `key` and `value` item in index.
+
+ >>> index = Index()
+ >>> index['a'] = 1
+ >>> index[0] = None
+ >>> len(index)
+ 2
+
+ :param key: key for item
+ :param value: value for item
+
+ """
+ self._cache[key] = value
+
+
+ def __delitem__(self, key):
+ """index.__delitem__(key) <==> del index[key]
+
+ Delete corresponding item for `key` from index.
+
+ >>> index = Index()
+ >>> index.update({'a': 1, 'b': 2})
+ >>> del index['a']
+ >>> del index['b']
+ >>> len(index)
+ 0
+ >>> del index['c']
+ Traceback (most recent call last):
+ ...
+ KeyError: 'c'
+
+ :param key: key for item
+ :raises KeyError: if key is not found
+
+ """
+ del self._cache[key]
+
+
+ def setdefault(self, key, default=None):
+ """Set and get value for `key` in index using `default`.
+
+ If `key` is not in index then set corresponding value to `default`. If
+ `key` is in index then ignore `default` and return existing value.
+
+ >>> index = Index()
+ >>> index.setdefault('a', 0)
+ 0
+ >>> index.setdefault('a', 1)
+ 0
+
+ :param key: key for item
+ :param default: value if key is missing (default None)
+ :return: value for item in index with given key
+
+ """
+ _cache = self._cache
+ while True:
+ try:
+ return _cache[key]
+ except KeyError:
+ _cache.add(key, default, retry=True)
+
+
+ def peekitem(self, last=True):
+ """Peek at key and value item pair in index based on iteration order.
+
+ >>> index = Index()
+ >>> for num, letter in enumerate('xyz'):
+ ... index[letter] = num
+ >>> index.peekitem()
+ ('z', 2)
+ >>> index.peekitem(last=False)
+ ('x', 0)
+
+ :param bool last: last item in iteration order (default True)
+ :return: key and value item pair
+ :raises KeyError: if cache is empty
+
+ """
+ return self._cache.peekitem(last, retry=True)
+
+
+ def pop(self, key, default=ENOVAL):
+ """Remove corresponding item for `key` from index and return value.
+
+ If `key` is missing then return `default`. If `default` is `ENOVAL`
+ then raise KeyError.
+
+ >>> index = Index({'a': 1, 'b': 2})
+ >>> index.pop('a')
+ 1
+ >>> index.pop('b')
+ 2
+ >>> index.pop('c', default=3)
+ 3
+ >>> index.pop('d')
+ Traceback (most recent call last):
+ ...
+ KeyError: 'd'
+
+ :param key: key for item
+ :param default: return value if key is missing (default ENOVAL)
+ :return: value for item if key is found else default
+ :raises KeyError: if key is not found and default is ENOVAL
+
+ """
+ _cache = self._cache
+ value = _cache.pop(key, default=default, retry=True)
+ if value is ENOVAL:
+ raise KeyError(key)
+ return value
+
+
+ def popitem(self, last=True):
+ """Remove and return item pair.
+
+ Item pairs are returned in last-in-first-out (LIFO) order if last is
+ True else first-in-first-out (FIFO) order. LIFO order imitates a stack
+ and FIFO order imitates a queue.
+
+ >>> index = Index()
+ >>> index.update([('a', 1), ('b', 2), ('c', 3)])
+ >>> index.popitem()
+ ('c', 3)
+ >>> index.popitem(last=False)
+ ('a', 1)
+ >>> index.popitem()
+ ('b', 2)
+ >>> index.popitem()
+ Traceback (most recent call last):
+ ...
+ KeyError: 'dictionary is empty'
+
+ :param bool last: pop last item pair (default True)
+ :return: key and value item pair
+ :raises KeyError: if index is empty
+
+ """
+ # pylint: disable=arguments-differ
+ _cache = self._cache
+
+ with _cache.transact(retry=True):
+ key, value = _cache.peekitem(last=last)
+ del _cache[key]
+
+ return key, value
+
+
+ def push(self, value, prefix=None, side='back'):
+ """Push `value` onto `side` of queue in index identified by `prefix`.
+
+ When prefix is None, integer keys are used. Otherwise, string keys are
+ used in the format "prefix-integer". Integer starts at 500 trillion.
+
+ Defaults to pushing value on back of queue. Set side to 'front' to push
+ value on front of queue. Side must be one of 'back' or 'front'.
+
+ See also `Index.pull`.
+
+ >>> index = Index()
+ >>> print(index.push('apples'))
+ 500000000000000
+ >>> print(index.push('beans'))
+ 500000000000001
+ >>> print(index.push('cherries', side='front'))
+ 499999999999999
+ >>> index[500000000000001]
+ 'beans'
+ >>> index.push('dates', prefix='fruit')
+ 'fruit-500000000000000'
+
+ :param value: value for item
+ :param str prefix: key prefix (default None, key is integer)
+ :param str side: either 'back' or 'front' (default 'back')
+ :return: key for item in cache
+
+ """
+ return self._cache.push(value, prefix, side, retry=True)
+
+
+ def pull(self, prefix=None, default=(None, None), side='front'):
+ """Pull key and value item pair from `side` of queue in index.
+
+ When prefix is None, integer keys are used. Otherwise, string keys are
+ used in the format "prefix-integer". Integer starts at 500 trillion.
+
+ If queue is empty, return default.
+
+ Defaults to pulling key and value item pairs from front of queue. Set
+ side to 'back' to pull from back of queue. Side must be one of 'front'
+ or 'back'.
+
+ See also `Index.push`.
+
+ >>> index = Index()
+ >>> for letter in 'abc':
+ ... print(index.push(letter))
+ 500000000000000
+ 500000000000001
+ 500000000000002
+ >>> key, value = index.pull()
+ >>> print(key)
+ 500000000000000
+ >>> value
+ 'a'
+ >>> _, value = index.pull(side='back')
+ >>> value
+ 'c'
+ >>> index.pull(prefix='fruit')
+ (None, None)
+
+ :param str prefix: key prefix (default None, key is integer)
+ :param default: value to return if key is missing
+ (default (None, None))
+ :param str side: either 'front' or 'back' (default 'front')
+ :return: key and value item pair or default if queue is empty
+
+ """
+ return self._cache.pull(prefix, default, side, retry=True)
+
+
+ def clear(self):
+ """Remove all items from index.
+
+ >>> index = Index({'a': 0, 'b': 1, 'c': 2})
+ >>> len(index)
+ 3
+ >>> index.clear()
+ >>> dict(index)
+ {}
+
+ """
+ self._cache.clear(retry=True)
+
+
+ def __iter__(self):
+ """index.__iter__() <==> iter(index)
+
+ Return iterator of index keys in insertion order.
+
+ """
+ return iter(self._cache)
+
+
+ def __reversed__(self):
+ """index.__reversed__() <==> reversed(index)
+
+ Return iterator of index keys in reversed insertion order.
+
+ >>> index = Index()
+ >>> index.update([('a', 1), ('b', 2), ('c', 3)])
+ >>> iterator = reversed(index)
+ >>> next(iterator)
+ 'c'
+ >>> list(iterator)
+ ['b', 'a']
+
+ """
+ return reversed(self._cache)
+
+
+ def __len__(self):
+ """index.__len__() <==> len(index)
+
+ Return length of index.
+
+ """
+ return len(self._cache)
+
+
+ if sys.hexversion < 0x03000000:
+ def keys(self):
+ """List of index keys.
+
+ >>> index = Index()
+ >>> index.update([('a', 1), ('b', 2), ('c', 3)])
+ >>> index.keys()
+ ['a', 'b', 'c']
+
+ :return: list of keys
+
+ """
+ return list(self._cache)
+
+
+ def values(self):
+ """List of index values.
+
+ >>> index = Index()
+ >>> index.update([('a', 1), ('b', 2), ('c', 3)])
+ >>> index.values()
+ [1, 2, 3]
+
+ :return: list of values
+
+ """
+ return list(self.itervalues())
+
+
+ def items(self):
+ """List of index items.
+
+ >>> index = Index()
+ >>> index.update([('a', 1), ('b', 2), ('c', 3)])
+ >>> index.items()
+ [('a', 1), ('b', 2), ('c', 3)]
+
+ :return: list of items
+
+ """
+ return list(self.iteritems())
+
+
+ def iterkeys(self):
+ """Iterator of index keys.
+
+ >>> index = Index()
+ >>> index.update([('a', 1), ('b', 2), ('c', 3)])
+ >>> list(index.iterkeys())
+ ['a', 'b', 'c']
+
+ :return: iterator of keys
+
+ """
+ return iter(self._cache)
+
+
+ def itervalues(self):
+ """Iterator of index values.
+
+ >>> index = Index()
+ >>> index.update([('a', 1), ('b', 2), ('c', 3)])
+ >>> list(index.itervalues())
+ [1, 2, 3]
+
+ :return: iterator of values
+
+ """
+ _cache = self._cache
+
+ for key in _cache:
+ while True:
+ try:
+ yield _cache[key]
+ except KeyError:
+ pass
+ break
+
+
+ def iteritems(self):
+ """Iterator of index items.
+
+ >>> index = Index()
+ >>> index.update([('a', 1), ('b', 2), ('c', 3)])
+ >>> list(index.iteritems())
+ [('a', 1), ('b', 2), ('c', 3)]
+
+ :return: iterator of items
+
+ """
+ _cache = self._cache
+
+ for key in _cache:
+ while True:
+ try:
+ yield key, _cache[key]
+ except KeyError:
+ pass
+ break
+
+
+ def viewkeys(self):
+ """Set-like object providing a view of index keys.
+
+ >>> index = Index()
+ >>> index.update({'a': 1, 'b': 2, 'c': 3})
+ >>> keys_view = index.viewkeys()
+ >>> 'b' in keys_view
+ True
+
+ :return: keys view
+
+ """
+ return KeysView(self)
+
+
+ def viewvalues(self):
+ """Set-like object providing a view of index values.
+
+ >>> index = Index()
+ >>> index.update({'a': 1, 'b': 2, 'c': 3})
+ >>> values_view = index.viewvalues()
+ >>> 2 in values_view
+ True
+
+ :return: values view
+
+ """
+ return ValuesView(self)
+
+
+ def viewitems(self):
+ """Set-like object providing a view of index items.
+
+ >>> index = Index()
+ >>> index.update({'a': 1, 'b': 2, 'c': 3})
+ >>> items_view = index.viewitems()
+ >>> ('b', 2) in items_view
+ True
+
+ :return: items view
+
+ """
+ return ItemsView(self)
+
+
+ else:
+ def keys(self):
+ """Set-like object providing a view of index keys.
+
+ >>> index = Index()
+ >>> index.update({'a': 1, 'b': 2, 'c': 3})
+ >>> keys_view = index.keys()
+ >>> 'b' in keys_view
+ True
+
+ :return: keys view
+
+ """
+ return KeysView(self)
+
+
+ def values(self):
+ """Set-like object providing a view of index values.
+
+ >>> index = Index()
+ >>> index.update({'a': 1, 'b': 2, 'c': 3})
+ >>> values_view = index.values()
+ >>> 2 in values_view
+ True
+
+ :return: values view
+
+ """
+ return ValuesView(self)
+
+
+ def items(self):
+ """Set-like object providing a view of index items.
+
+ >>> index = Index()
+ >>> index.update({'a': 1, 'b': 2, 'c': 3})
+ >>> items_view = index.items()
+ >>> ('b', 2) in items_view
+ True
+
+ :return: items view
+
+ """
+ return ItemsView(self)
+
+
+ __hash__ = None
+
+
+ def __getstate__(self):
+ return self.directory
+
+
+ def __setstate__(self, state):
+ self.__init__(state)
+
+
+ def __eq__(self, other):
+ """index.__eq__(other) <==> index == other
+
+ Compare equality for index and `other`.
+
+ Comparison to another index or ordered dictionary is
+ order-sensitive. Comparison to all other mappings is order-insensitive.
+
+ >>> index = Index()
+ >>> pairs = [('a', 1), ('b', 2), ('c', 3)]
+ >>> index.update(pairs)
+ >>> from collections import OrderedDict
+ >>> od = OrderedDict(pairs)
+ >>> index == od
+ True
+ >>> index == {'c': 3, 'b': 2, 'a': 1}
+ True
+
+ :param other: other mapping in equality comparison
+ :return: True if index equals other
+
+ """
+ if len(self) != len(other):
+ return False
+
+ if isinstance(other, (Index, OrderedDict)):
+ alpha = ((key, self[key]) for key in self)
+ beta = ((key, other[key]) for key in other)
+ pairs = zip(alpha, beta)
+ return not any(a != x or b != y for (a, b), (x, y) in pairs)
+ else:
+ return all(self[key] == other.get(key, ENOVAL) for key in self)
+
+
+ def __ne__(self, other):
+ """index.__ne__(other) <==> index != other
+
+ Compare inequality for index and `other`.
+
+ Comparison to another index or ordered dictionary is
+ order-sensitive. Comparison to all other mappings is order-insensitive.
+
+ >>> index = Index()
+ >>> index.update([('a', 1), ('b', 2), ('c', 3)])
+ >>> from collections import OrderedDict
+ >>> od = OrderedDict([('c', 3), ('b', 2), ('a', 1)])
+ >>> index != od
+ True
+ >>> index != {'a': 1, 'b': 2}
+ True
+
+ :param other: other mapping in inequality comparison
+ :return: True if index does not equal other
+
+ """
+ return not self == other
+
+
+ def memoize(self, name=None, typed=False):
+ """Memoizing cache decorator.
+
+ Decorator to wrap callable with memoizing function using cache.
+ Repeated calls with the same arguments will lookup result in cache and
+ avoid function evaluation.
+
+ If name is set to None (default), the callable name will be determined
+ automatically.
+
+ If typed is set to True, function arguments of different types will be
+ cached separately. For example, f(3) and f(3.0) will be treated as
+ distinct calls with distinct results.
+
+ The original underlying function is accessible through the __wrapped__
+ attribute. This is useful for introspection, for bypassing the cache,
+ or for rewrapping the function with a different cache.
+
+ >>> from diskcache import Index
+ >>> mapping = Index()
+ >>> @mapping.memoize()
+ ... def fibonacci(number):
+ ... if number == 0:
+ ... return 0
+ ... elif number == 1:
+ ... return 1
+ ... else:
+ ... return fibonacci(number - 1) + fibonacci(number - 2)
+ >>> print(fibonacci(100))
+ 354224848179261915075
+
+ An additional `__cache_key__` attribute can be used to generate the
+ cache key used for the given arguments.
+
+ >>> key = fibonacci.__cache_key__(100)
+ >>> print(mapping[key])
+ 354224848179261915075
+
+ Remember to call memoize when decorating a callable. If you forget,
+ then a TypeError will occur. Note the lack of parenthenses after
+ memoize below:
+
+ >>> @mapping.memoize
+ ... def test():
+ ... pass
+ Traceback (most recent call last):
+ ...
+ TypeError: name cannot be callable
+
+ :param str name: name given for callable (default None, automatic)
+ :param bool typed: cache different types separately (default False)
+ :return: callable decorator
+
+ """
+ return self._cache.memoize(name, typed)
+
+
+ @contextmanager
+ def transact(self):
+ """Context manager to perform a transaction by locking the index.
+
+ While the index is locked, no other write operation is permitted.
+ Transactions should therefore be as short as possible. Read and write
+ operations performed in a transaction are atomic. Read operations may
+ occur concurrent to a transaction.
+
+ Transactions may be nested and may not be shared between threads.
+
+ >>> from diskcache import Index
+ >>> mapping = Index()
+ >>> with mapping.transact(): # Atomically increment two keys.
+ ... mapping['total'] = mapping.get('total', 0) + 123.4
+ ... mapping['count'] = mapping.get('count', 0) + 1
+ >>> with mapping.transact(): # Atomically calculate average.
+ ... average = mapping['total'] / mapping['count']
+ >>> average
+ 123.4
+
+ :return: context manager for use in `with` statement
+
+ """
+ with self._cache.transact(retry=True):
+ yield
+
+
+ def __repr__(self):
+ """index.__repr__() <==> repr(index)
+
+ Return string with printable representation of index.
+
+ """
+ name = type(self).__name__
+ return '{0}({1!r})'.format(name, self.directory)
diff --git a/third_party/python/diskcache/diskcache/recipes.py b/third_party/python/diskcache/diskcache/recipes.py
new file mode 100644
index 0000000000..fb6425090a
--- /dev/null
+++ b/third_party/python/diskcache/diskcache/recipes.py
@@ -0,0 +1,437 @@
+"""Disk Cache Recipes
+
+"""
+
+import functools
+import math
+import os
+import random
+import sys
+import threading
+import time
+
+from .core import ENOVAL, args_to_key, full_name
+
+############################################################################
+# BEGIN Python 2/3 Shims
+############################################################################
+
+if sys.hexversion < 0x03000000:
+ from thread import get_ident # pylint: disable=import-error
+else:
+ from threading import get_ident
+
+############################################################################
+# END Python 2/3 Shims
+############################################################################
+
+
+class Averager(object):
+ """Recipe for calculating a running average.
+
+ Sometimes known as "online statistics," the running average maintains the
+ total and count. The average can then be calculated at any time.
+
+ >>> import diskcache
+ >>> cache = diskcache.FanoutCache()
+ >>> ave = Averager(cache, 'latency')
+ >>> ave.add(0.080)
+ >>> ave.add(0.120)
+ >>> ave.get()
+ 0.1
+ >>> ave.add(0.160)
+ >>> ave.pop()
+ 0.12
+ >>> print(ave.get())
+ None
+
+ """
+ def __init__(self, cache, key, expire=None, tag=None):
+ self._cache = cache
+ self._key = key
+ self._expire = expire
+ self._tag = tag
+
+ def add(self, value):
+ "Add `value` to average."
+ with self._cache.transact(retry=True):
+ total, count = self._cache.get(self._key, default=(0.0, 0))
+ total += value
+ count += 1
+ self._cache.set(
+ self._key, (total, count), expire=self._expire, tag=self._tag,
+ )
+
+ def get(self):
+ "Get current average or return `None` if count equals zero."
+ total, count = self._cache.get(self._key, default=(0.0, 0), retry=True)
+ return None if count == 0 else total / count
+
+ def pop(self):
+ "Return current average and delete key."
+ total, count = self._cache.pop(self._key, default=(0.0, 0), retry=True)
+ return None if count == 0 else total / count
+
+
+class Lock(object):
+ """Recipe for cross-process and cross-thread lock.
+
+ >>> import diskcache
+ >>> cache = diskcache.Cache()
+ >>> lock = Lock(cache, 'report-123')
+ >>> lock.acquire()
+ >>> lock.release()
+ >>> with lock:
+ ... pass
+
+ """
+ def __init__(self, cache, key, expire=None, tag=None):
+ self._cache = cache
+ self._key = key
+ self._expire = expire
+ self._tag = tag
+
+ def acquire(self):
+ "Acquire lock using spin-lock algorithm."
+ while True:
+ added = self._cache.add(
+ self._key, None, expire=self._expire, tag=self._tag, retry=True,
+ )
+ if added:
+ break
+ time.sleep(0.001)
+
+ def release(self):
+ "Release lock by deleting key."
+ self._cache.delete(self._key, retry=True)
+
+ def __enter__(self):
+ self.acquire()
+
+ def __exit__(self, *exc_info):
+ self.release()
+
+
+class RLock(object):
+ """Recipe for cross-process and cross-thread re-entrant lock.
+
+ >>> import diskcache
+ >>> cache = diskcache.Cache()
+ >>> rlock = RLock(cache, 'user-123')
+ >>> rlock.acquire()
+ >>> rlock.acquire()
+ >>> rlock.release()
+ >>> with rlock:
+ ... pass
+ >>> rlock.release()
+ >>> rlock.release()
+ Traceback (most recent call last):
+ ...
+ AssertionError: cannot release un-acquired lock
+
+ """
+ def __init__(self, cache, key, expire=None, tag=None):
+ self._cache = cache
+ self._key = key
+ self._expire = expire
+ self._tag = tag
+
+ def acquire(self):
+ "Acquire lock by incrementing count using spin-lock algorithm."
+ pid = os.getpid()
+ tid = get_ident()
+ pid_tid = '{}-{}'.format(pid, tid)
+
+ while True:
+ with self._cache.transact(retry=True):
+ value, count = self._cache.get(self._key, default=(None, 0))
+ if pid_tid == value or count == 0:
+ self._cache.set(
+ self._key, (pid_tid, count + 1),
+ expire=self._expire, tag=self._tag,
+ )
+ return
+ time.sleep(0.001)
+
+ def release(self):
+ "Release lock by decrementing count."
+ pid = os.getpid()
+ tid = get_ident()
+ pid_tid = '{}-{}'.format(pid, tid)
+
+ with self._cache.transact(retry=True):
+ value, count = self._cache.get(self._key, default=(None, 0))
+ is_owned = pid_tid == value and count > 0
+ assert is_owned, 'cannot release un-acquired lock'
+ self._cache.set(
+ self._key, (value, count - 1),
+ expire=self._expire, tag=self._tag,
+ )
+
+ def __enter__(self):
+ self.acquire()
+
+ def __exit__(self, *exc_info):
+ self.release()
+
+
+class BoundedSemaphore(object):
+ """Recipe for cross-process and cross-thread bounded semaphore.
+
+ >>> import diskcache
+ >>> cache = diskcache.Cache()
+ >>> semaphore = BoundedSemaphore(cache, 'max-cons', value=2)
+ >>> semaphore.acquire()
+ >>> semaphore.acquire()
+ >>> semaphore.release()
+ >>> with semaphore:
+ ... pass
+ >>> semaphore.release()
+ >>> semaphore.release()
+ Traceback (most recent call last):
+ ...
+ AssertionError: cannot release un-acquired semaphore
+
+ """
+ def __init__(self, cache, key, value=1, expire=None, tag=None):
+ self._cache = cache
+ self._key = key
+ self._value = value
+ self._expire = expire
+ self._tag = tag
+
+ def acquire(self):
+ "Acquire semaphore by decrementing value using spin-lock algorithm."
+ while True:
+ with self._cache.transact(retry=True):
+ value = self._cache.get(self._key, default=self._value)
+ if value > 0:
+ self._cache.set(
+ self._key, value - 1,
+ expire=self._expire, tag=self._tag,
+ )
+ return
+ time.sleep(0.001)
+
+ def release(self):
+ "Release semaphore by incrementing value."
+ with self._cache.transact(retry=True):
+ value = self._cache.get(self._key, default=self._value)
+ assert self._value > value, 'cannot release un-acquired semaphore'
+ value += 1
+ self._cache.set(
+ self._key, value, expire=self._expire, tag=self._tag,
+ )
+
+ def __enter__(self):
+ self.acquire()
+
+ def __exit__(self, *exc_info):
+ self.release()
+
+
+def throttle(cache, count, seconds, name=None, expire=None, tag=None,
+ time_func=time.time, sleep_func=time.sleep):
+ """Decorator to throttle calls to function.
+
+ >>> import diskcache, time
+ >>> cache = diskcache.Cache()
+ >>> count = 0
+ >>> @throttle(cache, 2, 1) # 2 calls per 1 second
+ ... def increment():
+ ... global count
+ ... count += 1
+ >>> start = time.time()
+ >>> while (time.time() - start) <= 2:
+ ... increment()
+ >>> count in (6, 7) # 6 or 7 calls depending on CPU load
+ True
+
+ """
+ def decorator(func):
+ rate = count / float(seconds)
+ key = full_name(func) if name is None else name
+ now = time_func()
+ cache.set(key, (now, count), expire=expire, tag=tag, retry=True)
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ while True:
+ with cache.transact(retry=True):
+ last, tally = cache.get(key)
+ now = time_func()
+ tally += (now - last) * rate
+ delay = 0
+
+ if tally > count:
+ cache.set(key, (now, count - 1), expire)
+ elif tally >= 1:
+ cache.set(key, (now, tally - 1), expire)
+ else:
+ delay = (1 - tally) / rate
+
+ if delay:
+ sleep_func(delay)
+ else:
+ break
+
+ return func(*args, **kwargs)
+
+ return wrapper
+
+ return decorator
+
+
+def barrier(cache, lock_factory, name=None, expire=None, tag=None):
+ """Barrier to calling decorated function.
+
+ Supports different kinds of locks: Lock, RLock, BoundedSemaphore.
+
+ >>> import diskcache, time
+ >>> cache = diskcache.Cache()
+ >>> @barrier(cache, Lock)
+ ... def work(num):
+ ... print('worker started')
+ ... time.sleep(1)
+ ... print('worker finished')
+ >>> import multiprocessing.pool
+ >>> pool = multiprocessing.pool.ThreadPool(2)
+ >>> _ = pool.map(work, range(2))
+ worker started
+ worker finished
+ worker started
+ worker finished
+ >>> pool.terminate()
+
+ """
+ def decorator(func):
+ key = full_name(func) if name is None else name
+ lock = lock_factory(cache, key, expire=expire, tag=tag)
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ with lock:
+ return func(*args, **kwargs)
+
+ return wrapper
+
+ return decorator
+
+
+def memoize_stampede(cache, expire, name=None, typed=False, tag=None, beta=1):
+ """Memoizing cache decorator with cache stampede protection.
+
+ Cache stampedes are a type of system overload that can occur when parallel
+ computing systems using memoization come under heavy load. This behaviour
+ is sometimes also called dog-piling, cache miss storm, cache choking, or
+ the thundering herd problem.
+
+ The memoization decorator implements cache stampede protection through
+ early recomputation. Early recomputation of function results will occur
+ probabilistically before expiration in a background thread of
+ execution. Early probabilistic recomputation is based on research by
+ Vattani, A.; Chierichetti, F.; Lowenstein, K. (2015), Optimal Probabilistic
+ Cache Stampede Prevention, VLDB, pp. 886-897, ISSN 2150-8097
+
+ If name is set to None (default), the callable name will be determined
+ automatically.
+
+ If typed is set to True, function arguments of different types will be
+ cached separately. For example, f(3) and f(3.0) will be treated as distinct
+ calls with distinct results.
+
+ The original underlying function is accessible through the `__wrapped__`
+ attribute. This is useful for introspection, for bypassing the cache, or
+ for rewrapping the function with a different cache.
+
+ >>> from diskcache import Cache
+ >>> cache = Cache()
+ >>> @memoize_stampede(cache, expire=1)
+ ... def fib(number):
+ ... if number == 0:
+ ... return 0
+ ... elif number == 1:
+ ... return 1
+ ... else:
+ ... return fib(number - 1) + fib(number - 2)
+ >>> print(fib(100))
+ 354224848179261915075
+
+ An additional `__cache_key__` attribute can be used to generate the cache
+ key used for the given arguments.
+
+ >>> key = fib.__cache_key__(100)
+ >>> del cache[key]
+
+ Remember to call memoize when decorating a callable. If you forget, then a
+ TypeError will occur.
+
+ :param cache: cache to store callable arguments and return values
+ :param float expire: seconds until arguments expire
+ :param str name: name given for callable (default None, automatic)
+ :param bool typed: cache different types separately (default False)
+ :param str tag: text to associate with arguments (default None)
+ :return: callable decorator
+
+ """
+ # Caution: Nearly identical code exists in Cache.memoize
+ def decorator(func):
+ "Decorator created by memoize call for callable."
+ base = (full_name(func),) if name is None else (name,)
+
+ def timer(*args, **kwargs):
+ "Time execution of `func` and return result and time delta."
+ start = time.time()
+ result = func(*args, **kwargs)
+ delta = time.time() - start
+ return result, delta
+
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ "Wrapper for callable to cache arguments and return values."
+ key = wrapper.__cache_key__(*args, **kwargs)
+ pair, expire_time = cache.get(
+ key, default=ENOVAL, expire_time=True, retry=True,
+ )
+
+ if pair is not ENOVAL:
+ result, delta = pair
+ now = time.time()
+ ttl = expire_time - now
+
+ if (-delta * beta * math.log(random.random())) < ttl:
+ return result # Cache hit.
+
+ # Check whether a thread has started for early recomputation.
+
+ thread_key = key + (ENOVAL,)
+ thread_added = cache.add(
+ thread_key, None, expire=delta, retry=True,
+ )
+
+ if thread_added:
+ # Start thread for early recomputation.
+ def recompute():
+ with cache:
+ pair = timer(*args, **kwargs)
+ cache.set(
+ key, pair, expire=expire, tag=tag, retry=True,
+ )
+ thread = threading.Thread(target=recompute)
+ thread.daemon = True
+ thread.start()
+
+ return result
+
+ pair = timer(*args, **kwargs)
+ cache.set(key, pair, expire=expire, tag=tag, retry=True)
+ return pair[0]
+
+ def __cache_key__(*args, **kwargs):
+ "Make key for cache given function arguments."
+ return args_to_key(base, args, kwargs, typed)
+
+ wrapper.__cache_key__ = __cache_key__
+ return wrapper
+
+ return decorator
diff --git a/third_party/python/distro/distro-1.4.0.dist-info/LICENSE b/third_party/python/distro/distro-1.4.0.dist-info/LICENSE
new file mode 100644
index 0000000000..e06d208186
--- /dev/null
+++ b/third_party/python/distro/distro-1.4.0.dist-info/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/third_party/python/distro/distro-1.4.0.dist-info/METADATA b/third_party/python/distro/distro-1.4.0.dist-info/METADATA
new file mode 100644
index 0000000000..b34cf6205d
--- /dev/null
+++ b/third_party/python/distro/distro-1.4.0.dist-info/METADATA
@@ -0,0 +1,170 @@
+Metadata-Version: 2.1
+Name: distro
+Version: 1.4.0
+Summary: Distro - an OS platform information API
+Home-page: https://github.com/nir0s/distro
+Author: Nir Cohen
+Author-email: nir36g@gmail.com
+License: Apache License, Version 2.0
+Platform: All
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: POSIX :: Linux
+Classifier: Operating System :: POSIX :: BSD
+Classifier: Operating System :: POSIX :: BSD :: FreeBSD
+Classifier: Operating System :: POSIX :: BSD :: NetBSD
+Classifier: Operating System :: POSIX :: BSD :: OpenBSD
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: System :: Operating System
+Description-Content-Type: text/markdown
+
+Distro - an OS platform information API
+=======================================
+
+[![Build Status](https://travis-ci.org/nir0s/distro.svg?branch=master)](https://travis-ci.org/nir0s/distro)
+[![Build status](https://ci.appveyor.com/api/projects/status/e812qjk1gf0f74r5/branch/master?svg=true)](https://ci.appveyor.com/project/nir0s/distro/branch/master)
+[![PyPI version](http://img.shields.io/pypi/v/distro.svg)](https://pypi.python.org/pypi/distro)
+[![Supported Python Versions](https://img.shields.io/pypi/pyversions/distro.svg)](https://img.shields.io/pypi/pyversions/distro.svg)
+[![Requirements Status](https://requires.io/github/nir0s/distro/requirements.svg?branch=master)](https://requires.io/github/nir0s/distro/requirements/?branch=master)
+[![Code Coverage](https://codecov.io/github/nir0s/distro/coverage.svg?branch=master)](https://codecov.io/github/nir0s/distro?branch=master)
+[![Code Quality](https://landscape.io/github/nir0s/distro/master/landscape.svg?style=flat)](https://landscape.io/github/nir0s/distro)
+[![Is Wheel](https://img.shields.io/pypi/wheel/distro.svg?style=flat)](https://pypi.python.org/pypi/distro)
+[![Latest Github Release](https://readthedocs.org/projects/distro/badge/?version=stable)](http://distro.readthedocs.io/en/latest/)
+[![Join the chat at https://gitter.im/nir0s/distro](https://badges.gitter.im/nir0s/distro.svg)](https://gitter.im/nir0s/distro?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
+
+`distro` provides information about the
+OS distribution it runs on, such as a reliable machine-readable ID, or
+version information.
+
+It is the recommended replacement for Python's original
+[`platform.linux_distribution`](https://docs.python.org/3.7/library/platform.html#platform.linux_distribution)
+function (which will be removed in Python 3.8).
+It also provides much more functionality which isn't necessarily Python bound,
+like a command-line interface.
+
+Distro currently supports Linux and BSD based systems but [Windows and OS X support](https://github.com/nir0s/distro/issues/177) is also planned.
+
+For Python 2.6 support, see https://github.com/nir0s/distro/tree/python2.6-support
+
+## Installation
+
+Installation of the latest released version from PyPI:
+
+```shell
+pip install distro
+```
+
+Installation of the latest development version:
+
+```shell
+pip install https://github.com/nir0s/distro/archive/master.tar.gz
+```
+
+
+## Usage
+
+```bash
+$ distro
+Name: Antergos Linux
+Version: 2015.10 (ISO-Rolling)
+Codename: ISO-Rolling
+
+$ distro -j
+{
+ "codename": "ISO-Rolling",
+ "id": "antergos",
+ "like": "arch",
+ "version": "16.9",
+ "version_parts": {
+ "build_number": "",
+ "major": "16",
+ "minor": "9"
+ }
+}
+
+
+$ python
+>>> import distro
+>>> distro.linux_distribution(full_distribution_name=False)
+('centos', '7.1.1503', 'Core')
+```
+
+
+## Documentation
+
+On top of the aforementioned API, several more functions are available. For a complete description of the
+API, see the [latest API documentation](http://distro.readthedocs.org/en/latest/).
+
+## Background
+
+An alternative implementation became necessary because Python 3.5 deprecated
+this function, and Python 3.8 will remove it altogether.
+Its predecessor function `platform.dist` was already deprecated since
+Python 2.6 and will also be removed in Python 3.8.
+Still, there are many cases in which access to that information is needed.
+See [Python issue 1322](https://bugs.python.org/issue1322) for more
+information.
+
+The `distro` package implements a robust and inclusive way of retrieving the
+information about a distribution based on new standards and old methods,
+namely from these data sources (from high to low precedence):
+
+* The os-release file `/etc/os-release`, if present.
+* The output of the `lsb_release` command, if available.
+* The distro release file (`/etc/*(-|_)(release|version)`), if present.
+* The `uname` command for BSD based distrubtions.
+
+
+## Python and Distribution Support
+
+`distro` is supported and tested on Python 2.7, 3.4+ and PyPy and on
+any distribution that provides one or more of the data sources
+covered.
+
+This package is tested with test data that mimics the exact behavior of the data sources of [a number of Linux distributions](https://github.com/nir0s/distro/tree/master/tests/resources/distros).
+
+
+## Testing
+
+```shell
+git clone git@github.com:nir0s/distro.git
+cd distro
+pip install tox
+tox
+```
+
+
+## Contributions
+
+Pull requests are always welcome to deal with specific distributions or just
+for general merriment.
+
+See [CONTRIBUTIONS](https://github.com/nir0s/distro/blob/master/CONTRIBUTING.md) for contribution info.
+
+Reference implementations for supporting additional distributions and file
+formats can be found here:
+
+* https://github.com/saltstack/salt/blob/develop/salt/grains/core.py#L1172
+* https://github.com/chef/ohai/blob/master/lib/ohai/plugins/linux/platform.rb
+* https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/facts/system/distribution.py
+* https://github.com/puppetlabs/facter/blob/master/lib/src/facts/linux/os_linux.cc
+
+## Package manager distributions
+
+* https://src.fedoraproject.org/rpms/python-distro
+* https://www.archlinux.org/packages/community/any/python-distro/
+* https://launchpad.net/ubuntu/+source/python-distro
+* https://packages.debian.org/sid/python-distro
+* https://packages.gentoo.org/packages/dev-python/distro
+* https://pkgs.org/download/python2-distro
+* https://slackbuilds.org/repository/14.2/python/python-distro/
+
+
diff --git a/third_party/python/distro/distro-1.4.0.dist-info/RECORD b/third_party/python/distro/distro-1.4.0.dist-info/RECORD
new file mode 100644
index 0000000000..ac0fb59ebe
--- /dev/null
+++ b/third_party/python/distro/distro-1.4.0.dist-info/RECORD
@@ -0,0 +1,7 @@
+distro.py,sha256=X2So5kjrRKyMbQJ90Xgy93HU5eFtujCzKaYNeoy1k1c,43251
+distro-1.4.0.dist-info/LICENSE,sha256=y16Ofl9KOYjhBjwULGDcLfdWBfTEZRXnduOspt-XbhQ,11325
+distro-1.4.0.dist-info/METADATA,sha256=7u13dPkDA9zu5ahg3Ns-H2vfMpR6gBBS55EaBzlXIeg,6648
+distro-1.4.0.dist-info/WHEEL,sha256=_wJFdOYk7i3xxT8ElOkUJvOdOvfNGbR9g-bf6UQT6sU,110
+distro-1.4.0.dist-info/entry_points.txt,sha256=mDMyvS_AzB0WhRYe_6xrRkAAET1LwFiDTL5Sx57UFiY,40
+distro-1.4.0.dist-info/top_level.txt,sha256=ikde_V_XEdSBqaGd5tEriN_wzYHLgTX_zVtlsGLHvwQ,7
+distro-1.4.0.dist-info/RECORD,,
diff --git a/third_party/python/distro/distro-1.4.0.dist-info/WHEEL b/third_party/python/distro/distro-1.4.0.dist-info/WHEEL
new file mode 100644
index 0000000000..c4bde30377
--- /dev/null
+++ b/third_party/python/distro/distro-1.4.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.32.3)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/distro/distro-1.4.0.dist-info/entry_points.txt b/third_party/python/distro/distro-1.4.0.dist-info/entry_points.txt
new file mode 100644
index 0000000000..dd4023997f
--- /dev/null
+++ b/third_party/python/distro/distro-1.4.0.dist-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+distro = distro:main
+
diff --git a/third_party/python/distro/distro-1.4.0.dist-info/top_level.txt b/third_party/python/distro/distro-1.4.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..0e0933171d
--- /dev/null
+++ b/third_party/python/distro/distro-1.4.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+distro
diff --git a/third_party/python/distro/distro.py b/third_party/python/distro/distro.py
new file mode 100644
index 0000000000..33061633ef
--- /dev/null
+++ b/third_party/python/distro/distro.py
@@ -0,0 +1,1216 @@
+# Copyright 2015,2016,2017 Nir Cohen
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+The ``distro`` package (``distro`` stands for Linux Distribution) provides
+information about the Linux distribution it runs on, such as a reliable
+machine-readable distro ID, or version information.
+
+It is the recommended replacement for Python's original
+:py:func:`platform.linux_distribution` function, but it provides much more
+functionality. An alternative implementation became necessary because Python
+3.5 deprecated this function, and Python 3.8 will remove it altogether.
+Its predecessor function :py:func:`platform.dist` was already
+deprecated since Python 2.6 and will also be removed in Python 3.8.
+Still, there are many cases in which access to OS distribution information
+is needed. See `Python issue 1322 <https://bugs.python.org/issue1322>`_ for
+more information.
+"""
+
+import os
+import re
+import sys
+import json
+import shlex
+import logging
+import argparse
+import subprocess
+
+
+_UNIXCONFDIR = os.environ.get('UNIXCONFDIR', '/etc')
+_OS_RELEASE_BASENAME = 'os-release'
+
+#: Translation table for normalizing the "ID" attribute defined in os-release
+#: files, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as defined in the os-release file, translated to lower case,
+#: with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_OS_ID = {
+ 'ol': 'oracle', # Oracle Enterprise Linux
+}
+
+#: Translation table for normalizing the "Distributor ID" attribute returned by
+#: the lsb_release command, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as returned by the lsb_release command, translated to lower
+#: case, with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_LSB_ID = {
+ 'enterpriseenterprise': 'oracle', # Oracle Enterprise Linux
+ 'redhatenterpriseworkstation': 'rhel', # RHEL 6, 7 Workstation
+ 'redhatenterpriseserver': 'rhel', # RHEL 6, 7 Server
+}
+
+#: Translation table for normalizing the distro ID derived from the file name
+#: of distro release files, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as derived from the file name of a distro release file,
+#: translated to lower case, with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_DISTRO_ID = {
+ 'redhat': 'rhel', # RHEL 6.x, 7.x
+}
+
+# Pattern for content of distro release file (reversed)
+_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile(
+ r'(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)')
+
+# Pattern for base file name of distro release file
+_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(
+ r'(\w+)[-_](release|version)$')
+
+# Base file names to be ignored when searching for distro release file
+_DISTRO_RELEASE_IGNORE_BASENAMES = (
+ 'debian_version',
+ 'lsb-release',
+ 'oem-release',
+ _OS_RELEASE_BASENAME,
+ 'system-release'
+)
+
+
+def linux_distribution(full_distribution_name=True):
+ """
+ Return information about the current OS distribution as a tuple
+ ``(id_name, version, codename)`` with items as follows:
+
+ * ``id_name``: If *full_distribution_name* is false, the result of
+ :func:`distro.id`. Otherwise, the result of :func:`distro.name`.
+
+ * ``version``: The result of :func:`distro.version`.
+
+ * ``codename``: The result of :func:`distro.codename`.
+
+ The interface of this function is compatible with the original
+ :py:func:`platform.linux_distribution` function, supporting a subset of
+ its parameters.
+
+ The data it returns may not exactly be the same, because it uses more data
+ sources than the original function, and that may lead to different data if
+ the OS distribution is not consistent across multiple data sources it
+ provides (there are indeed such distributions ...).
+
+ Another reason for differences is the fact that the :func:`distro.id`
+ method normalizes the distro ID string to a reliable machine-readable value
+ for a number of popular OS distributions.
+ """
+ return _distro.linux_distribution(full_distribution_name)
+
+
+def id():
+ """
+ Return the distro ID of the current distribution, as a
+ machine-readable string.
+
+ For a number of OS distributions, the returned distro ID value is
+ *reliable*, in the sense that it is documented and that it does not change
+ across releases of the distribution.
+
+ This package maintains the following reliable distro ID values:
+
+ ============== =========================================
+ Distro ID Distribution
+ ============== =========================================
+ "ubuntu" Ubuntu
+ "debian" Debian
+ "rhel" RedHat Enterprise Linux
+ "centos" CentOS
+ "fedora" Fedora
+ "sles" SUSE Linux Enterprise Server
+ "opensuse" openSUSE
+ "amazon" Amazon Linux
+ "arch" Arch Linux
+ "cloudlinux" CloudLinux OS
+ "exherbo" Exherbo Linux
+ "gentoo" GenToo Linux
+ "ibm_powerkvm" IBM PowerKVM
+ "kvmibm" KVM for IBM z Systems
+ "linuxmint" Linux Mint
+ "mageia" Mageia
+ "mandriva" Mandriva Linux
+ "parallels" Parallels
+ "pidora" Pidora
+ "raspbian" Raspbian
+ "oracle" Oracle Linux (and Oracle Enterprise Linux)
+ "scientific" Scientific Linux
+ "slackware" Slackware
+ "xenserver" XenServer
+ "openbsd" OpenBSD
+ "netbsd" NetBSD
+ "freebsd" FreeBSD
+ ============== =========================================
+
+ If you have a need to get distros for reliable IDs added into this set,
+ or if you find that the :func:`distro.id` function returns a different
+ distro ID for one of the listed distros, please create an issue in the
+ `distro issue tracker`_.
+
+ **Lookup hierarchy and transformations:**
+
+ First, the ID is obtained from the following sources, in the specified
+ order. The first available and non-empty value is used:
+
+ * the value of the "ID" attribute of the os-release file,
+
+ * the value of the "Distributor ID" attribute returned by the lsb_release
+ command,
+
+ * the first part of the file name of the distro release file,
+
+ The so determined ID value then passes the following transformations,
+ before it is returned by this method:
+
+ * it is translated to lower case,
+
+ * blanks (which should not be there anyway) are translated to underscores,
+
+ * a normalization of the ID is performed, based upon
+ `normalization tables`_. The purpose of this normalization is to ensure
+ that the ID is as reliable as possible, even across incompatible changes
+ in the OS distributions. A common reason for an incompatible change is
+ the addition of an os-release file, or the addition of the lsb_release
+ command, with ID values that differ from what was previously determined
+ from the distro release file name.
+ """
+ return _distro.id()
+
+
+def name(pretty=False):
+ """
+ Return the name of the current OS distribution, as a human-readable
+ string.
+
+ If *pretty* is false, the name is returned without version or codename.
+ (e.g. "CentOS Linux")
+
+ If *pretty* is true, the version and codename are appended.
+ (e.g. "CentOS Linux 7.1.1503 (Core)")
+
+ **Lookup hierarchy:**
+
+ The name is obtained from the following sources, in the specified order.
+ The first available and non-empty value is used:
+
+ * If *pretty* is false:
+
+ - the value of the "NAME" attribute of the os-release file,
+
+ - the value of the "Distributor ID" attribute returned by the lsb_release
+ command,
+
+ - the value of the "<name>" field of the distro release file.
+
+ * If *pretty* is true:
+
+ - the value of the "PRETTY_NAME" attribute of the os-release file,
+
+ - the value of the "Description" attribute returned by the lsb_release
+ command,
+
+ - the value of the "<name>" field of the distro release file, appended
+ with the value of the pretty version ("<version_id>" and "<codename>"
+ fields) of the distro release file, if available.
+ """
+ return _distro.name(pretty)
+
+
+def version(pretty=False, best=False):
+ """
+ Return the version of the current OS distribution, as a human-readable
+ string.
+
+ If *pretty* is false, the version is returned without codename (e.g.
+ "7.0").
+
+ If *pretty* is true, the codename in parenthesis is appended, if the
+ codename is non-empty (e.g. "7.0 (Maipo)").
+
+ Some distributions provide version numbers with different precisions in
+ the different sources of distribution information. Examining the different
+ sources in a fixed priority order does not always yield the most precise
+ version (e.g. for Debian 8.2, or CentOS 7.1).
+
+ The *best* parameter can be used to control the approach for the returned
+ version:
+
+ If *best* is false, the first non-empty version number in priority order of
+ the examined sources is returned.
+
+ If *best* is true, the most precise version number out of all examined
+ sources is returned.
+
+ **Lookup hierarchy:**
+
+ In all cases, the version number is obtained from the following sources.
+ If *best* is false, this order represents the priority order:
+
+ * the value of the "VERSION_ID" attribute of the os-release file,
+ * the value of the "Release" attribute returned by the lsb_release
+ command,
+ * the version number parsed from the "<version_id>" field of the first line
+ of the distro release file,
+ * the version number parsed from the "PRETTY_NAME" attribute of the
+ os-release file, if it follows the format of the distro release files.
+ * the version number parsed from the "Description" attribute returned by
+ the lsb_release command, if it follows the format of the distro release
+ files.
+ """
+ return _distro.version(pretty, best)
+
+
+def version_parts(best=False):
+ """
+ Return the version of the current OS distribution as a tuple
+ ``(major, minor, build_number)`` with items as follows:
+
+ * ``major``: The result of :func:`distro.major_version`.
+
+ * ``minor``: The result of :func:`distro.minor_version`.
+
+ * ``build_number``: The result of :func:`distro.build_number`.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distro.version_parts(best)
+
+
+def major_version(best=False):
+ """
+ Return the major version of the current OS distribution, as a string,
+ if provided.
+ Otherwise, the empty string is returned. The major version is the first
+ part of the dot-separated version string.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distro.major_version(best)
+
+
+def minor_version(best=False):
+ """
+ Return the minor version of the current OS distribution, as a string,
+ if provided.
+ Otherwise, the empty string is returned. The minor version is the second
+ part of the dot-separated version string.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distro.minor_version(best)
+
+
+def build_number(best=False):
+ """
+ Return the build number of the current OS distribution, as a string,
+ if provided.
+ Otherwise, the empty string is returned. The build number is the third part
+ of the dot-separated version string.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distro.build_number(best)
+
+
+def like():
+ """
+ Return a space-separated list of distro IDs of distributions that are
+ closely related to the current OS distribution in regards to packaging
+ and programming interfaces, for example distributions the current
+ distribution is a derivative from.
+
+ **Lookup hierarchy:**
+
+ This information item is only provided by the os-release file.
+ For details, see the description of the "ID_LIKE" attribute in the
+ `os-release man page
+ <http://www.freedesktop.org/software/systemd/man/os-release.html>`_.
+ """
+ return _distro.like()
+
+
+def codename():
+ """
+ Return the codename for the release of the current OS distribution,
+ as a string.
+
+ If the distribution does not have a codename, an empty string is returned.
+
+ Note that the returned codename is not always really a codename. For
+ example, openSUSE returns "x86_64". This function does not handle such
+ cases in any special way and just returns the string it finds, if any.
+
+ **Lookup hierarchy:**
+
+ * the codename within the "VERSION" attribute of the os-release file, if
+ provided,
+
+ * the value of the "Codename" attribute returned by the lsb_release
+ command,
+
+ * the value of the "<codename>" field of the distro release file.
+ """
+ return _distro.codename()
+
+
+def info(pretty=False, best=False):
+ """
+ Return certain machine-readable information items about the current OS
+ distribution in a dictionary, as shown in the following example:
+
+ .. sourcecode:: python
+
+ {
+ 'id': 'rhel',
+ 'version': '7.0',
+ 'version_parts': {
+ 'major': '7',
+ 'minor': '0',
+ 'build_number': ''
+ },
+ 'like': 'fedora',
+ 'codename': 'Maipo'
+ }
+
+ The dictionary structure and keys are always the same, regardless of which
+ information items are available in the underlying data sources. The values
+ for the various keys are as follows:
+
+ * ``id``: The result of :func:`distro.id`.
+
+ * ``version``: The result of :func:`distro.version`.
+
+ * ``version_parts -> major``: The result of :func:`distro.major_version`.
+
+ * ``version_parts -> minor``: The result of :func:`distro.minor_version`.
+
+ * ``version_parts -> build_number``: The result of
+ :func:`distro.build_number`.
+
+ * ``like``: The result of :func:`distro.like`.
+
+ * ``codename``: The result of :func:`distro.codename`.
+
+ For a description of the *pretty* and *best* parameters, see the
+ :func:`distro.version` method.
+ """
+ return _distro.info(pretty, best)
+
+
+def os_release_info():
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the os-release file data source of the current OS distribution.
+
+ See `os-release file`_ for details about these information items.
+ """
+ return _distro.os_release_info()
+
+
+def lsb_release_info():
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the lsb_release command data source of the current OS distribution.
+
+ See `lsb_release command output`_ for details about these information
+ items.
+ """
+ return _distro.lsb_release_info()
+
+
+def distro_release_info():
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the distro release file data source of the current OS distribution.
+
+ See `distro release file`_ for details about these information items.
+ """
+ return _distro.distro_release_info()
+
+
+def uname_info():
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the distro release file data source of the current OS distribution.
+ """
+ return _distro.uname_info()
+
+
+def os_release_attr(attribute):
+ """
+ Return a single named information item from the os-release file data source
+ of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+
+ See `os-release file`_ for details about these information items.
+ """
+ return _distro.os_release_attr(attribute)
+
+
+def lsb_release_attr(attribute):
+ """
+ Return a single named information item from the lsb_release command output
+ data source of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+
+ See `lsb_release command output`_ for details about these information
+ items.
+ """
+ return _distro.lsb_release_attr(attribute)
+
+
+def distro_release_attr(attribute):
+ """
+ Return a single named information item from the distro release file
+ data source of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+
+ See `distro release file`_ for details about these information items.
+ """
+ return _distro.distro_release_attr(attribute)
+
+
+def uname_attr(attribute):
+ """
+ Return a single named information item from the distro release file
+ data source of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+ """
+ return _distro.uname_attr(attribute)
+
+
+class cached_property(object):
+ """A version of @property which caches the value. On access, it calls the
+ underlying function and sets the value in `__dict__` so future accesses
+ will not re-call the property.
+ """
+ def __init__(self, f):
+ self._fname = f.__name__
+ self._f = f
+
+ def __get__(self, obj, owner):
+ assert obj is not None, 'call {} on an instance'.format(self._fname)
+ ret = obj.__dict__[self._fname] = self._f(obj)
+ return ret
+
+
+class LinuxDistribution(object):
+ """
+ Provides information about a OS distribution.
+
+ This package creates a private module-global instance of this class with
+ default initialization arguments, that is used by the
+ `consolidated accessor functions`_ and `single source accessor functions`_.
+ By using default initialization arguments, that module-global instance
+ returns data about the current OS distribution (i.e. the distro this
+ package runs on).
+
+ Normally, it is not necessary to create additional instances of this class.
+ However, in situations where control is needed over the exact data sources
+ that are used, instances of this class can be created with a specific
+ distro release file, or a specific os-release file, or without invoking the
+ lsb_release command.
+ """
+
+ def __init__(self,
+ include_lsb=True,
+ os_release_file='',
+ distro_release_file='',
+ include_uname=True):
+ """
+ The initialization method of this class gathers information from the
+ available data sources, and stores that in private instance attributes.
+ Subsequent access to the information items uses these private instance
+ attributes, so that the data sources are read only once.
+
+ Parameters:
+
+ * ``include_lsb`` (bool): Controls whether the
+ `lsb_release command output`_ is included as a data source.
+
+ If the lsb_release command is not available in the program execution
+ path, the data source for the lsb_release command will be empty.
+
+ * ``os_release_file`` (string): The path name of the
+ `os-release file`_ that is to be used as a data source.
+
+ An empty string (the default) will cause the default path name to
+ be used (see `os-release file`_ for details).
+
+ If the specified or defaulted os-release file does not exist, the
+ data source for the os-release file will be empty.
+
+ * ``distro_release_file`` (string): The path name of the
+ `distro release file`_ that is to be used as a data source.
+
+ An empty string (the default) will cause a default search algorithm
+ to be used (see `distro release file`_ for details).
+
+ If the specified distro release file does not exist, or if no default
+ distro release file can be found, the data source for the distro
+ release file will be empty.
+
+ * ``include_name`` (bool): Controls whether uname command output is
+ included as a data source. If the uname command is not available in
+ the program execution path the data source for the uname command will
+ be empty.
+
+ Public instance attributes:
+
+ * ``os_release_file`` (string): The path name of the
+ `os-release file`_ that is actually used as a data source. The
+ empty string if no distro release file is used as a data source.
+
+ * ``distro_release_file`` (string): The path name of the
+ `distro release file`_ that is actually used as a data source. The
+ empty string if no distro release file is used as a data source.
+
+ * ``include_lsb`` (bool): The result of the ``include_lsb`` parameter.
+ This controls whether the lsb information will be loaded.
+
+ * ``include_uname`` (bool): The result of the ``include_uname``
+ parameter. This controls whether the uname information will
+ be loaded.
+
+ Raises:
+
+ * :py:exc:`IOError`: Some I/O issue with an os-release file or distro
+ release file.
+
+ * :py:exc:`subprocess.CalledProcessError`: The lsb_release command had
+ some issue (other than not being available in the program execution
+ path).
+
+ * :py:exc:`UnicodeError`: A data source has unexpected characters or
+ uses an unexpected encoding.
+ """
+ self.os_release_file = os_release_file or \
+ os.path.join(_UNIXCONFDIR, _OS_RELEASE_BASENAME)
+ self.distro_release_file = distro_release_file or '' # updated later
+ self.include_lsb = include_lsb
+ self.include_uname = include_uname
+
+ def __repr__(self):
+ """Return repr of all info
+ """
+ return \
+ "LinuxDistribution(" \
+ "os_release_file={self.os_release_file!r}, " \
+ "distro_release_file={self.distro_release_file!r}, " \
+ "include_lsb={self.include_lsb!r}, " \
+ "include_uname={self.include_uname!r}, " \
+ "_os_release_info={self._os_release_info!r}, " \
+ "_lsb_release_info={self._lsb_release_info!r}, " \
+ "_distro_release_info={self._distro_release_info!r}, " \
+ "_uname_info={self._uname_info!r})".format(
+ self=self)
+
+ def linux_distribution(self, full_distribution_name=True):
+ """
+ Return information about the OS distribution that is compatible
+ with Python's :func:`platform.linux_distribution`, supporting a subset
+ of its parameters.
+
+ For details, see :func:`distro.linux_distribution`.
+ """
+ return (
+ self.name() if full_distribution_name else self.id(),
+ self.version(),
+ self.codename()
+ )
+
+ def id(self):
+ """Return the distro ID of the OS distribution, as a string.
+
+ For details, see :func:`distro.id`.
+ """
+ def normalize(distro_id, table):
+ distro_id = distro_id.lower().replace(' ', '_')
+ return table.get(distro_id, distro_id)
+
+ distro_id = self.os_release_attr('id')
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_OS_ID)
+
+ distro_id = self.lsb_release_attr('distributor_id')
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_LSB_ID)
+
+ distro_id = self.distro_release_attr('id')
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_DISTRO_ID)
+
+ distro_id = self.uname_attr('id')
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_DISTRO_ID)
+
+ return ''
+
+ def name(self, pretty=False):
+ """
+ Return the name of the OS distribution, as a string.
+
+ For details, see :func:`distro.name`.
+ """
+ name = self.os_release_attr('name') \
+ or self.lsb_release_attr('distributor_id') \
+ or self.distro_release_attr('name') \
+ or self.uname_attr('name')
+ if pretty:
+ name = self.os_release_attr('pretty_name') \
+ or self.lsb_release_attr('description')
+ if not name:
+ name = self.distro_release_attr('name') \
+ or self.uname_attr('name')
+ version = self.version(pretty=True)
+ if version:
+ name = name + ' ' + version
+ return name or ''
+
+ def version(self, pretty=False, best=False):
+ """
+ Return the version of the OS distribution, as a string.
+
+ For details, see :func:`distro.version`.
+ """
+ versions = [
+ self.os_release_attr('version_id'),
+ self.lsb_release_attr('release'),
+ self.distro_release_attr('version_id'),
+ self._parse_distro_release_content(
+ self.os_release_attr('pretty_name')).get('version_id', ''),
+ self._parse_distro_release_content(
+ self.lsb_release_attr('description')).get('version_id', ''),
+ self.uname_attr('release')
+ ]
+ version = ''
+ if best:
+ # This algorithm uses the last version in priority order that has
+ # the best precision. If the versions are not in conflict, that
+ # does not matter; otherwise, using the last one instead of the
+ # first one might be considered a surprise.
+ for v in versions:
+ if v.count(".") > version.count(".") or version == '':
+ version = v
+ else:
+ for v in versions:
+ if v != '':
+ version = v
+ break
+ if pretty and version and self.codename():
+ version = u'{0} ({1})'.format(version, self.codename())
+ return version
+
+ def version_parts(self, best=False):
+ """
+ Return the version of the OS distribution, as a tuple of version
+ numbers.
+
+ For details, see :func:`distro.version_parts`.
+ """
+ version_str = self.version(best=best)
+ if version_str:
+ version_regex = re.compile(r'(\d+)\.?(\d+)?\.?(\d+)?')
+ matches = version_regex.match(version_str)
+ if matches:
+ major, minor, build_number = matches.groups()
+ return major, minor or '', build_number or ''
+ return '', '', ''
+
+ def major_version(self, best=False):
+ """
+ Return the major version number of the current distribution.
+
+ For details, see :func:`distro.major_version`.
+ """
+ return self.version_parts(best)[0]
+
+ def minor_version(self, best=False):
+ """
+ Return the minor version number of the current distribution.
+
+ For details, see :func:`distro.minor_version`.
+ """
+ return self.version_parts(best)[1]
+
+ def build_number(self, best=False):
+ """
+ Return the build number of the current distribution.
+
+ For details, see :func:`distro.build_number`.
+ """
+ return self.version_parts(best)[2]
+
+ def like(self):
+ """
+ Return the IDs of distributions that are like the OS distribution.
+
+ For details, see :func:`distro.like`.
+ """
+ return self.os_release_attr('id_like') or ''
+
+ def codename(self):
+ """
+ Return the codename of the OS distribution.
+
+ For details, see :func:`distro.codename`.
+ """
+ try:
+ # Handle os_release specially since distros might purposefully set
+ # this to empty string to have no codename
+ return self._os_release_info['codename']
+ except KeyError:
+ return self.lsb_release_attr('codename') \
+ or self.distro_release_attr('codename') \
+ or ''
+
+ def info(self, pretty=False, best=False):
+ """
+ Return certain machine-readable information about the OS
+ distribution.
+
+ For details, see :func:`distro.info`.
+ """
+ return dict(
+ id=self.id(),
+ version=self.version(pretty, best),
+ version_parts=dict(
+ major=self.major_version(best),
+ minor=self.minor_version(best),
+ build_number=self.build_number(best)
+ ),
+ like=self.like(),
+ codename=self.codename(),
+ )
+
+ def os_release_info(self):
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the os-release file data source of the OS distribution.
+
+ For details, see :func:`distro.os_release_info`.
+ """
+ return self._os_release_info
+
+ def lsb_release_info(self):
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the lsb_release command data source of the OS
+ distribution.
+
+ For details, see :func:`distro.lsb_release_info`.
+ """
+ return self._lsb_release_info
+
+ def distro_release_info(self):
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the distro release file data source of the OS
+ distribution.
+
+ For details, see :func:`distro.distro_release_info`.
+ """
+ return self._distro_release_info
+
+ def uname_info(self):
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the uname command data source of the OS distribution.
+
+ For details, see :func:`distro.uname_info`.
+ """
+ return self._uname_info
+
+ def os_release_attr(self, attribute):
+ """
+ Return a single named information item from the os-release file data
+ source of the OS distribution.
+
+ For details, see :func:`distro.os_release_attr`.
+ """
+ return self._os_release_info.get(attribute, '')
+
+ def lsb_release_attr(self, attribute):
+ """
+ Return a single named information item from the lsb_release command
+ output data source of the OS distribution.
+
+ For details, see :func:`distro.lsb_release_attr`.
+ """
+ return self._lsb_release_info.get(attribute, '')
+
+ def distro_release_attr(self, attribute):
+ """
+ Return a single named information item from the distro release file
+ data source of the OS distribution.
+
+ For details, see :func:`distro.distro_release_attr`.
+ """
+ return self._distro_release_info.get(attribute, '')
+
+ def uname_attr(self, attribute):
+ """
+ Return a single named information item from the uname command
+ output data source of the OS distribution.
+
+ For details, see :func:`distro.uname_release_attr`.
+ """
+ return self._uname_info.get(attribute, '')
+
+ @cached_property
+ def _os_release_info(self):
+ """
+ Get the information items from the specified os-release file.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ if os.path.isfile(self.os_release_file):
+ with open(self.os_release_file) as release_file:
+ return self._parse_os_release_content(release_file)
+ return {}
+
+ @staticmethod
+ def _parse_os_release_content(lines):
+ """
+ Parse the lines of an os-release file.
+
+ Parameters:
+
+ * lines: Iterable through the lines in the os-release file.
+ Each line must be a unicode string or a UTF-8 encoded byte
+ string.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ props = {}
+ lexer = shlex.shlex(lines, posix=True)
+ lexer.whitespace_split = True
+
+ # The shlex module defines its `wordchars` variable using literals,
+ # making it dependent on the encoding of the Python source file.
+ # In Python 2.6 and 2.7, the shlex source file is encoded in
+ # 'iso-8859-1', and the `wordchars` variable is defined as a byte
+ # string. This causes a UnicodeDecodeError to be raised when the
+ # parsed content is a unicode object. The following fix resolves that
+ # (... but it should be fixed in shlex...):
+ if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes):
+ lexer.wordchars = lexer.wordchars.decode('iso-8859-1')
+
+ tokens = list(lexer)
+ for token in tokens:
+ # At this point, all shell-like parsing has been done (i.e.
+ # comments processed, quotes and backslash escape sequences
+ # processed, multi-line values assembled, trailing newlines
+ # stripped, etc.), so the tokens are now either:
+ # * variable assignments: var=value
+ # * commands or their arguments (not allowed in os-release)
+ if '=' in token:
+ k, v = token.split('=', 1)
+ if isinstance(v, bytes):
+ v = v.decode('utf-8')
+ props[k.lower()] = v
+ else:
+ # Ignore any tokens that are not variable assignments
+ pass
+
+ if 'version_codename' in props:
+ # os-release added a version_codename field. Use that in
+ # preference to anything else Note that some distros purposefully
+ # do not have code names. They should be setting
+ # version_codename=""
+ props['codename'] = props['version_codename']
+ elif 'ubuntu_codename' in props:
+ # Same as above but a non-standard field name used on older Ubuntus
+ props['codename'] = props['ubuntu_codename']
+ elif 'version' in props:
+ # If there is no version_codename, parse it from the version
+ codename = re.search(r'(\(\D+\))|,(\s+)?\D+', props['version'])
+ if codename:
+ codename = codename.group()
+ codename = codename.strip('()')
+ codename = codename.strip(',')
+ codename = codename.strip()
+ # codename appears within paranthese.
+ props['codename'] = codename
+
+ return props
+
+ @cached_property
+ def _lsb_release_info(self):
+ """
+ Get the information items from the lsb_release command output.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ if not self.include_lsb:
+ return {}
+ with open(os.devnull, 'w') as devnull:
+ try:
+ cmd = ('lsb_release', '-a')
+ stdout = subprocess.check_output(cmd, stderr=devnull)
+ except OSError: # Command not found
+ return {}
+ content = stdout.decode(sys.getfilesystemencoding()).splitlines()
+ return self._parse_lsb_release_content(content)
+
+ @staticmethod
+ def _parse_lsb_release_content(lines):
+ """
+ Parse the output of the lsb_release command.
+
+ Parameters:
+
+ * lines: Iterable through the lines of the lsb_release output.
+ Each line must be a unicode string or a UTF-8 encoded byte
+ string.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ props = {}
+ for line in lines:
+ kv = line.strip('\n').split(':', 1)
+ if len(kv) != 2:
+ # Ignore lines without colon.
+ continue
+ k, v = kv
+ props.update({k.replace(' ', '_').lower(): v.strip()})
+ return props
+
+ @cached_property
+ def _uname_info(self):
+ with open(os.devnull, 'w') as devnull:
+ try:
+ cmd = ('uname', '-rs')
+ stdout = subprocess.check_output(cmd, stderr=devnull)
+ except OSError:
+ return {}
+ content = stdout.decode(sys.getfilesystemencoding()).splitlines()
+ return self._parse_uname_content(content)
+
+ @staticmethod
+ def _parse_uname_content(lines):
+ props = {}
+ match = re.search(r'^([^\s]+)\s+([\d\.]+)', lines[0].strip())
+ if match:
+ name, version = match.groups()
+
+ # This is to prevent the Linux kernel version from
+ # appearing as the 'best' version on otherwise
+ # identifiable distributions.
+ if name == 'Linux':
+ return {}
+ props['id'] = name.lower()
+ props['name'] = name
+ props['release'] = version
+ return props
+
+ @cached_property
+ def _distro_release_info(self):
+ """
+ Get the information items from the specified distro release file.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ if self.distro_release_file:
+ # If it was specified, we use it and parse what we can, even if
+ # its file name or content does not match the expected pattern.
+ distro_info = self._parse_distro_release_file(
+ self.distro_release_file)
+ basename = os.path.basename(self.distro_release_file)
+ # The file name pattern for user-specified distro release files
+ # is somewhat more tolerant (compared to when searching for the
+ # file), because we want to use what was specified as best as
+ # possible.
+ match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
+ if 'name' in distro_info \
+ and 'cloudlinux' in distro_info['name'].lower():
+ distro_info['id'] = 'cloudlinux'
+ elif match:
+ distro_info['id'] = match.group(1)
+ return distro_info
+ else:
+ try:
+ basenames = os.listdir(_UNIXCONFDIR)
+ # We sort for repeatability in cases where there are multiple
+ # distro specific files; e.g. CentOS, Oracle, Enterprise all
+ # containing `redhat-release` on top of their own.
+ basenames.sort()
+ except OSError:
+ # This may occur when /etc is not readable but we can't be
+ # sure about the *-release files. Check common entries of
+ # /etc for information. If they turn out to not be there the
+ # error is handled in `_parse_distro_release_file()`.
+ basenames = ['SuSE-release',
+ 'arch-release',
+ 'base-release',
+ 'centos-release',
+ 'fedora-release',
+ 'gentoo-release',
+ 'mageia-release',
+ 'mandrake-release',
+ 'mandriva-release',
+ 'mandrivalinux-release',
+ 'manjaro-release',
+ 'oracle-release',
+ 'redhat-release',
+ 'sl-release',
+ 'slackware-version']
+ for basename in basenames:
+ if basename in _DISTRO_RELEASE_IGNORE_BASENAMES:
+ continue
+ match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
+ if match:
+ filepath = os.path.join(_UNIXCONFDIR, basename)
+ distro_info = self._parse_distro_release_file(filepath)
+ if 'name' in distro_info:
+ # The name is always present if the pattern matches
+ self.distro_release_file = filepath
+ distro_info['id'] = match.group(1)
+ if 'cloudlinux' in distro_info['name'].lower():
+ distro_info['id'] = 'cloudlinux'
+ return distro_info
+ return {}
+
+ def _parse_distro_release_file(self, filepath):
+ """
+ Parse a distro release file.
+
+ Parameters:
+
+ * filepath: Path name of the distro release file.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ try:
+ with open(filepath) as fp:
+ # Only parse the first line. For instance, on SLES there
+ # are multiple lines. We don't want them...
+ return self._parse_distro_release_content(fp.readline())
+ except (OSError, IOError):
+ # Ignore not being able to read a specific, seemingly version
+ # related file.
+ # See https://github.com/nir0s/distro/issues/162
+ return {}
+
+ @staticmethod
+ def _parse_distro_release_content(line):
+ """
+ Parse a line from a distro release file.
+
+ Parameters:
+ * line: Line from the distro release file. Must be a unicode string
+ or a UTF-8 encoded byte string.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ if isinstance(line, bytes):
+ line = line.decode('utf-8')
+ matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(
+ line.strip()[::-1])
+ distro_info = {}
+ if matches:
+ # regexp ensures non-None
+ distro_info['name'] = matches.group(3)[::-1]
+ if matches.group(2):
+ distro_info['version_id'] = matches.group(2)[::-1]
+ if matches.group(1):
+ distro_info['codename'] = matches.group(1)[::-1]
+ elif line:
+ distro_info['name'] = line.strip()
+ return distro_info
+
+
+_distro = LinuxDistribution()
+
+
+def main():
+ logger = logging.getLogger(__name__)
+ logger.setLevel(logging.DEBUG)
+ logger.addHandler(logging.StreamHandler(sys.stdout))
+
+ parser = argparse.ArgumentParser(description="OS distro info tool")
+ parser.add_argument(
+ '--json',
+ '-j',
+ help="Output in machine readable format",
+ action="store_true")
+ args = parser.parse_args()
+
+ if args.json:
+ logger.info(json.dumps(info(), indent=4, sort_keys=True))
+ else:
+ logger.info('Name: %s', name(pretty=True))
+ distribution_version = version(pretty=True)
+ logger.info('Version: %s', distribution_version)
+ distribution_codename = codename()
+ logger.info('Codename: %s', distribution_codename)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/third_party/python/dlmanager/README.rst b/third_party/python/dlmanager/README.rst
new file mode 100644
index 0000000000..e8db528fa2
--- /dev/null
+++ b/third_party/python/dlmanager/README.rst
@@ -0,0 +1,59 @@
+.. image:: https://badge.fury.io/py/dlmanager.svg
+ :target: https://pypi.python.org/pypi/dlmanager
+
+.. image:: https://readthedocs.org/projects/dlmanager/badge/?version=latest
+ :target: http://dlmanager.readthedocs.org/en/latest/?badge=latest
+ :alt: Documentation Status
+
+.. image:: https://travis-ci.org/parkouss/dlmanager.svg?branch=master
+ :target: https://travis-ci.org/parkouss/dlmanager
+
+.. image:: https://codecov.io/github/parkouss/dlmanager/coverage.svg?branch=master
+ :target: https://codecov.io/github/parkouss/dlmanager?branch=master
+
+dlmanager
+=========
+
+**dlmanager** is Python 2 and 3 download manager library, with the following
+features:
+
+- Download files in background and in parallel
+- Cancel downloads
+- store downloads in a given directory, avoiding re-downloading files
+- Limit the size of this directory, removing oldest files
+
+
+Example
+-------
+
+.. code-block:: python
+
+ from dlmanager import DownloadManager, PersistLimit
+
+ manager = DownloadManager(
+ "dlmanager-destir",
+ persist_limit=PersistLimit(
+ size_limit=1073741824, # 1 GB max
+ file_limit=10, # force to keep 10 files even if size_limit is reached
+ )
+ )
+
+ # Start downloads in background
+ # Note that if files are already present, this is a no-op.
+ manager.download(url1)
+ manager.download(url2)
+
+ # Wait for completion
+ try:
+ manager.wait()
+ except:
+ manager.cancel()
+ raise
+
+
+Installation
+------------
+
+Use pip: ::
+
+ pip install -U dlmanager
diff --git a/third_party/python/dlmanager/check.py b/third_party/python/dlmanager/check.py
new file mode 100755
index 0000000000..bcc842305e
--- /dev/null
+++ b/third_party/python/dlmanager/check.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+"""
+Run flake8 checks and tests.
+"""
+
+import os
+import argparse
+import pipes
+import shutil
+import tempfile
+
+from subprocess import check_call
+
+
+def parse_args():
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument('-C', '--with-coverage', action='store_true',
+ help="Generate coverage data from the tests run")
+ parser.add_argument('-H', '--cover-html', action='store_true',
+ help='generate html files to see test coverage')
+ return parser.parse_args()
+
+
+def run(cmd, **kwargs):
+ msg = 'Running: |%s|' % ' '.join(pipes.quote(c) for c in cmd)
+ if kwargs.get('cwd'):
+ msg += ' in %s' % kwargs['cwd']
+ print(msg)
+ check_call(cmd, **kwargs)
+
+
+def rm(path):
+ if os.path.isfile(path):
+ os.unlink(path)
+ elif os.path.isdir(path):
+ shutil.rmtree(path)
+
+
+if __name__ == '__main__':
+ options = parse_args()
+
+ here = os.path.dirname(os.path.abspath(__file__))
+ os.chdir(here)
+
+ run(['flake8', 'dlmanager', 'tests', 'setup.py', __file__])
+
+ if options.with_coverage:
+ rm('.coverage')
+ test_run_cmd = ['coverage', 'run']
+ else:
+ test_run_cmd = ['python']
+
+ tmpdir = tempfile.gettempdir()
+ tmpfiles = set(os.listdir(tmpdir))
+ run(test_run_cmd + ['setup.py', 'test'])
+
+ remaining_tmpfiles = tmpfiles - set(os.listdir(tmpdir))
+ assert not remaining_tmpfiles, "tests leaked some temp files: %s" % (
+ ", ".join("`%s`" % os.path.join(tmpdir, f) for f in remaining_tmpfiles)
+ )
+
+ if options.with_coverage and options.cover_html:
+ rm('htmlcov')
+ run(['coverage', 'html'])
+ print("See coverage: |firefox %s|"
+ % os.path.join(here, 'htmlcov', 'index.html'))
diff --git a/third_party/python/dlmanager/dlmanager/__init__.py b/third_party/python/dlmanager/dlmanager/__init__.py
new file mode 100644
index 0000000000..0890af484a
--- /dev/null
+++ b/third_party/python/dlmanager/dlmanager/__init__.py
@@ -0,0 +1,18 @@
+import logging
+
+__version__ = "0.1.1"
+
+
+try: # Python 2.7+
+ from logging import NullHandler
+except ImportError:
+ class NullHandler(logging.Handler):
+ def emit(self, record):
+ pass
+
+# Set default logging handler to avoid "No handler found" warnings.
+logging.getLogger(__name__).addHandler(NullHandler())
+
+# exported api
+from dlmanager.manager import Download, DownloadInterrupt, DownloadManager # noqa
+from dlmanager.persist_limit import PersistLimit # noqa
diff --git a/third_party/python/dlmanager/dlmanager/fs.py b/third_party/python/dlmanager/dlmanager/fs.py
new file mode 100644
index 0000000000..8908b5efce
--- /dev/null
+++ b/third_party/python/dlmanager/dlmanager/fs.py
@@ -0,0 +1,116 @@
+import errno
+import logging
+import os
+import shutil
+import stat
+import time
+
+"""
+File system utilities, copied from mozfile.
+"""
+
+LOG = logging.getLogger(__name__)
+
+
+def _call_windows_retry(func, args=(), retry_max=5, retry_delay=0.5):
+ """
+ It's possible to see spurious errors on Windows due to various things
+ keeping a handle to the directory open (explorer, virus scanners, etc)
+ So we try a few times if it fails with a known error.
+ """
+ retry_count = 0
+ while True:
+ try:
+ func(*args)
+ except OSError as e:
+ # Error codes are defined in:
+ # http://docs.python.org/2/library/errno.html#module-errno
+ if e.errno not in (errno.EACCES, errno.ENOTEMPTY):
+ raise
+
+ if retry_count == retry_max:
+ raise
+
+ retry_count += 1
+
+ LOG.info('%s() failed for "%s". Reason: %s (%s). Retrying...',
+ func.__name__, args, e.strerror, e.errno)
+ time.sleep(retry_delay)
+ else:
+ # If no exception has been thrown it should be done
+ break
+
+
+def remove(path):
+ """Removes the specified file, link, or directory tree.
+
+ This is a replacement for shutil.rmtree that works better under
+ windows. It does the following things:
+
+ - check path access for the current user before trying to remove
+ - retry operations on some known errors due to various things keeping
+ a handle on file paths - like explorer, virus scanners, etc. The
+ known errors are errno.EACCES and errno.ENOTEMPTY, and it will
+ retry up to 5 five times with a delay of 0.5 seconds between each
+ attempt.
+
+ Note that no error will be raised if the given path does not exists.
+
+ :param path: path to be removed
+ """
+
+ def _call_with_windows_retry(*args, **kwargs):
+ try:
+ _call_windows_retry(*args, **kwargs)
+ except OSError as e:
+ # The file or directory to be removed doesn't exist anymore
+ if e.errno != errno.ENOENT:
+ raise
+
+ def _update_permissions(path):
+ """Sets specified pemissions depending on filetype"""
+ if os.path.islink(path):
+ # Path is a symlink which we don't have to modify
+ # because it should already have all the needed permissions
+ return
+
+ stats = os.stat(path)
+
+ if os.path.isfile(path):
+ mode = stats.st_mode | stat.S_IWUSR
+ elif os.path.isdir(path):
+ mode = stats.st_mode | stat.S_IWUSR | stat.S_IXUSR
+ else:
+ # Not supported type
+ return
+
+ _call_with_windows_retry(os.chmod, (path, mode))
+
+ if not os.path.exists(path):
+ return
+
+ if os.path.isfile(path) or os.path.islink(path):
+ # Verify the file or link is read/write for the current user
+ _update_permissions(path)
+ _call_with_windows_retry(os.remove, (path,))
+
+ elif os.path.isdir(path):
+ # Verify the directory is read/write/execute for the current user
+ _update_permissions(path)
+
+ # We're ensuring that every nested item has writable permission.
+ for root, dirs, files in os.walk(path):
+ for entry in dirs + files:
+ _update_permissions(os.path.join(root, entry))
+ _call_with_windows_retry(shutil.rmtree, (path,))
+
+
+def move(src, dst):
+ """
+ Move a file or directory path.
+
+ This is a replacement for shutil.move that works better under windows,
+ retrying operations on some known errors due to various things keeping
+ a handle on file paths.
+ """
+ _call_windows_retry(shutil.move, (src, dst))
diff --git a/third_party/python/dlmanager/dlmanager/manager.py b/third_party/python/dlmanager/dlmanager/manager.py
new file mode 100644
index 0000000000..3dce3b7838
--- /dev/null
+++ b/third_party/python/dlmanager/dlmanager/manager.py
@@ -0,0 +1,323 @@
+import os
+import requests
+import six
+import sys
+import tempfile
+import threading
+
+from contextlib import closing
+from six.moves.urllib.parse import urlparse
+
+from dlmanager import fs
+from dlmanager.persist_limit import PersistLimit
+
+
+class DownloadInterrupt(Exception):
+ "Raised when a download is interrupted."
+
+
+class Download(object):
+ """
+ Download is reponsible of downloading one file in the background.
+
+ Example of use: ::
+
+ dl = Download(url, dest)
+ dl.start()
+ dl.wait() # this will block until completion / cancel / error
+
+ If a download fail or is canceled, the temporary dest is removed from
+ the disk.
+
+ Usually, Downloads are created by using :meth:`DownloadManager.download`.
+
+ :param url: the url of the file to download
+ :param dest: the local file path destination
+ :param finished_callback: a callback that will be called in the thread
+ when the thread work is done. Takes the download
+ instance as a parameter.
+ :param chunk_size: size of the chunk that will be read. The thread can
+ not be stopped while we are reading that chunk size.
+ :param session: a requests.Session instance that will do do the real
+ downloading work. If None, `requests` module is used.
+ :param progress: A callable to report the progress (default to None).
+ see :meth:`set_progress`.
+ """
+ def __init__(self, url, dest, finished_callback=None,
+ chunk_size=16 * 1024, session=None, progress=None):
+ self.thread = threading.Thread(
+ target=self._download,
+ args=(url, dest, finished_callback, chunk_size,
+ session or requests)
+ )
+ self._lock = threading.Lock()
+ self.__url = url
+ self.__dest = dest
+ self.__progress = progress
+ self.__canceled = False
+ self.__error = None
+
+ def start(self):
+ """
+ Start the thread that will do the download.
+ """
+ self.thread.start()
+
+ def cancel(self):
+ """
+ Cancel a previously started download.
+ """
+ self.__canceled = True
+
+ def is_canceled(self):
+ """
+ Returns True if we canceled this download.
+ """
+ return self.__canceled
+
+ def is_running(self):
+ """
+ Returns True if the downloading thread is running.
+ """
+ return self.thread.is_alive()
+
+ def wait(self, raise_if_error=True):
+ """
+ Block until the downloading thread is finished.
+
+ :param raise_if_error: if True (the default), :meth:`raise_if_error`
+ will be called and raise an error if any.
+ """
+ while self.thread.is_alive():
+ try:
+ # in case of exception here (like KeyboardInterrupt),
+ # cancel the task.
+ self.thread.join(0.02)
+ except:
+ self.cancel()
+ raise
+ # this will raise exception that may happen inside the thread.
+ if raise_if_error:
+ self.raise_if_error()
+
+ def error(self):
+ """
+ Returns None or a tuple of three values (type, value, traceback)
+ that give information about the exception.
+ """
+ return self.__error
+
+ def raise_if_error(self):
+ """
+ Raise an error if any. If the download was canceled, raise
+ :class:`DownloadInterrupt`.
+ """
+ if self.__error:
+ six.reraise(*self.__error)
+ if self.__canceled:
+ raise DownloadInterrupt()
+
+ def set_progress(self, progress):
+ """
+ set a callable to report the progress of the download, or None to
+ disable any report.
+
+ The callable must take three parameters (download, current, total).
+ Note that this method is thread safe, you can call it during a
+ download.
+ """
+ with self._lock:
+ self.__progress = progress
+
+ def get_dest(self):
+ """
+ Returns the dest.
+ """
+ return self.__dest
+
+ def get_url(self):
+ """
+ Returns the url.
+ """
+ return self.__url
+
+ def _update_progress(self, current, total):
+ with self._lock:
+ if self.__progress:
+ self.__progress(self, current, total)
+
+ def _download(self, url, dest, finished_callback, chunk_size, session):
+ # save the file under a temporary name
+ # this allow to not use a broken file in case things went really bad
+ # while downloading the file (ie the python interpreter is killed
+ # abruptly)
+ temp = None
+ bytes_so_far = 0
+ try:
+ with closing(session.get(url, stream=True)) as response:
+ total_size = response.headers.get('Content-length', '').strip()
+ total_size = int(total_size) if total_size else None
+ self._update_progress(bytes_so_far, total_size)
+ # we use NamedTemporaryFile as raw open() call was causing
+ # issues on windows - see:
+ # https://bugzilla.mozilla.org/show_bug.cgi?id=1185756
+ with tempfile.NamedTemporaryFile(
+ delete=False,
+ suffix='.tmp',
+ dir=os.path.dirname(dest)) as temp:
+ for chunk in response.iter_content(chunk_size):
+ if self.is_canceled():
+ break
+ if chunk:
+ temp.write(chunk)
+ bytes_so_far += len(chunk)
+ self._update_progress(bytes_so_far, total_size)
+ response.raise_for_status()
+ except:
+ self.__error = sys.exc_info()
+ try:
+ if temp is None:
+ pass # not even opened the temp file, nothing to do
+ elif self.is_canceled() or self.__error:
+ fs.remove(temp.name)
+ else:
+ # if all goes well, then rename the file to the real dest
+ fs.remove(dest) # just in case it already existed
+ fs.move(temp.name, dest)
+ finally:
+ if finished_callback:
+ finished_callback(self)
+
+
+class DownloadManager(object):
+ """
+ DownloadManager is responsible of starting and managing downloads inside
+ a given directory. It will download a file only if a given filename
+ is not already there.
+
+ Note that background downloads needs to be stopped. For example, if
+ you have an exception while a download is occuring, python will only
+ exit when the download will finish. To get rid of that, there is a
+ possible idiom: ::
+
+ def download_things(manager):
+ # do things with the manager
+ manager.download(url1, f1)
+ manager.download(url2, f2)
+ ...
+
+ manager = DownloadManager(destdir)
+ try:
+ download_things(manager)
+ finally:
+ # ensure we cancel all background downloads to ask the end
+ # of possible remainings threads
+ manager.cancel()
+
+ :param destdir: a directory where files are downloaded. It will be created
+ if it does not exists.
+ :param session: a requests session. If None, one will be created for you.
+ :param persist_limit: an instance of :class:`PersistLimit`, to allow
+ limiting the size of the download dir. Defaults
+ to None, meaning no limit.
+ """
+ def __init__(self, destdir, session=None, persist_limit=None):
+ self.destdir = destdir
+ self.session = session or requests.Session()
+ self._downloads = {}
+ self._lock = threading.Lock()
+ self.persist_limit = persist_limit or PersistLimit(0)
+ self.persist_limit.register_dir_content(self.destdir)
+
+ # if persist folder does not exist, create it
+ if not os.path.isdir(destdir):
+ os.makedirs(destdir)
+
+ def get_dest(self, fname):
+ return os.path.join(self.destdir, fname)
+
+ def cancel(self, cancel_if=None):
+ """
+ Cancel downloads, if any.
+
+ if cancel_if is given, it must be a callable that take the download
+ instance as parameter, and return True if the download needs to be
+ canceled.
+
+ Note that download threads won't be stopped directly.
+ """
+ with self._lock:
+ for download in six.itervalues(self._downloads):
+ if cancel_if is None or cancel_if(download):
+ if download.is_running():
+ download.cancel()
+
+ def wait(self, raise_if_error=True):
+ """
+ Wait for all downloads to be finished.
+ """
+ for download in self._downloads.values():
+ download.wait(raise_if_error=raise_if_error)
+
+ def download(self, url, fname=None, progress=None):
+ """
+ Returns a started :class:`Download` instance, or None if fname is
+ already present in destdir.
+
+ if a download is already running for the given fname, it is just
+ returned. Else the download is created, started and returned.
+
+ :param url: url of the file to download.
+ :param fname: name to give for the downloaded file. If None, it will
+ be the name extracted in the url.
+ :param progress: a callable to report the download progress, or None.
+ See :meth:`Download.set_progress`.
+ """
+ if fname is None:
+ fname = urlparse(url).path.split('/')[-1]
+ dest = self.get_dest(fname)
+ with self._lock:
+ # if we are downloading, returns the instance
+ if dest in self._downloads:
+ dl = self._downloads[dest]
+ if progress:
+ dl.set_progress(progress)
+ return dl
+
+ if os.path.exists(dest):
+ return None
+
+ # else create the download (will be automatically removed of
+ # the list on completion) start it, and returns that.
+ with self._lock:
+ download = Download(url, dest,
+ session=self.session,
+ finished_callback=self._download_finished,
+ progress=progress)
+ self._downloads[dest] = download
+ download.start()
+ self._download_started(download)
+ return download
+
+ def _download_started(self, dl):
+ """
+ Useful when sub-classing. Report the start event of a download.
+
+ :param dl: The :class:`Download` instance.
+ """
+ pass
+
+ def _download_finished(self, dl):
+ """
+ Useful when sub-classing. Report the end of a download.
+
+ Note that this is executed in the download thread. Also, you should
+ make sure to call the base implementation.
+
+ :param dl: The :class:`Download` instance.
+ """
+ with self._lock:
+ dest = dl.get_dest()
+ del self._downloads[dest]
+ self.persist_limit.register_file(dest)
+ self.persist_limit.remove_old_files()
diff --git a/third_party/python/dlmanager/dlmanager/persist_limit.py b/third_party/python/dlmanager/dlmanager/persist_limit.py
new file mode 100644
index 0000000000..03a1829f70
--- /dev/null
+++ b/third_party/python/dlmanager/dlmanager/persist_limit.py
@@ -0,0 +1,65 @@
+import os
+import stat
+
+from collections import namedtuple
+from glob import glob
+
+from dlmanager import fs
+
+
+File = namedtuple('File', ('path', 'stat'))
+
+
+class PersistLimit(object):
+ """
+ Keep a list of files, removing the oldest ones when the size_limit
+ is reached.
+
+ The access time of a file is used to determine the oldests, e.g. the
+ last time a file was read.
+
+ :param size_limit: the size limit in bytes. A value of 0 means no limit.
+ :param file_limit: even if the size limit is reached, this force
+ to keep at least *file_limit* files.
+ """
+ def __init__(self, size_limit, file_limit=5):
+ self.size_limit = size_limit
+ self.file_limit = file_limit
+ self.files = []
+ self._files_size = 0
+
+ def register_file(self, path):
+ """
+ register a single file.
+ """
+ try:
+ fstat = os.stat(path)
+ except OSError:
+ # file do not exists probably, just skip it
+ # note this happen when backgound files are canceled
+ return
+ if stat.S_ISREG(fstat.st_mode):
+ self.files.append(File(path=path, stat=fstat))
+ self._files_size += fstat.st_size
+
+ def register_dir_content(self, directory, pattern="*"):
+ """
+ Register every files in a directory that match *pattern*.
+ """
+ for path in glob(os.path.join(directory, pattern)):
+ self.register_file(path)
+
+ def remove_old_files(self):
+ """
+ remove oldest registered files.
+ """
+ if self.size_limit <= 0 or self.file_limit <= 0:
+ return
+ # sort by creation time, oldest first
+ files = sorted(self.files, key=lambda f: f.stat.st_atime)
+ while len(files) > self.file_limit and \
+ self._files_size >= self.size_limit:
+ f = files.pop(0)
+ fs.remove(f.path)
+ self._files_size -= f.stat.st_size
+ self.files = files
diff --git a/third_party/python/dlmanager/doc/Makefile b/third_party/python/dlmanager/doc/Makefile
new file mode 100644
index 0000000000..6b477bf459
--- /dev/null
+++ b/third_party/python/dlmanager/doc/Makefile
@@ -0,0 +1,216 @@
+# Makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+PAPER =
+BUILDDIR = _build
+
+# User-friendly check for sphinx-build
+ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
+$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
+endif
+
+# Internal variables.
+PAPEROPT_a4 = -D latex_paper_size=a4
+PAPEROPT_letter = -D latex_paper_size=letter
+ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+# the i18n builder cannot share the environment and doctrees with the others
+I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
+
+.PHONY: help
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " html to make standalone HTML files"
+ @echo " dirhtml to make HTML files named index.html in directories"
+ @echo " singlehtml to make a single large HTML file"
+ @echo " pickle to make pickle files"
+ @echo " json to make JSON files"
+ @echo " htmlhelp to make HTML files and a HTML help project"
+ @echo " qthelp to make HTML files and a qthelp project"
+ @echo " applehelp to make an Apple Help Book"
+ @echo " devhelp to make HTML files and a Devhelp project"
+ @echo " epub to make an epub"
+ @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
+ @echo " latexpdf to make LaTeX files and run them through pdflatex"
+ @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
+ @echo " text to make text files"
+ @echo " man to make manual pages"
+ @echo " texinfo to make Texinfo files"
+ @echo " info to make Texinfo files and run them through makeinfo"
+ @echo " gettext to make PO message catalogs"
+ @echo " changes to make an overview of all changed/added/deprecated items"
+ @echo " xml to make Docutils-native XML files"
+ @echo " pseudoxml to make pseudoxml-XML files for display purposes"
+ @echo " linkcheck to check all external links for integrity"
+ @echo " doctest to run all doctests embedded in the documentation (if enabled)"
+ @echo " coverage to run coverage check of the documentation (if enabled)"
+
+.PHONY: clean
+clean:
+ rm -rf $(BUILDDIR)/*
+
+.PHONY: html
+html:
+ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
+
+.PHONY: dirhtml
+dirhtml:
+ $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
+ @echo
+ @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
+
+.PHONY: singlehtml
+singlehtml:
+ $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
+ @echo
+ @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
+
+.PHONY: pickle
+pickle:
+ $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
+ @echo
+ @echo "Build finished; now you can process the pickle files."
+
+.PHONY: json
+json:
+ $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
+ @echo
+ @echo "Build finished; now you can process the JSON files."
+
+.PHONY: htmlhelp
+htmlhelp:
+ $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
+ @echo
+ @echo "Build finished; now you can run HTML Help Workshop with the" \
+ ".hhp project file in $(BUILDDIR)/htmlhelp."
+
+.PHONY: qthelp
+qthelp:
+ $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
+ @echo
+ @echo "Build finished; now you can run "qcollectiongenerator" with the" \
+ ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
+ @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/dlmanager.qhcp"
+ @echo "To view the help file:"
+ @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/dlmanager.qhc"
+
+.PHONY: applehelp
+applehelp:
+ $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
+ @echo
+ @echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
+ @echo "N.B. You won't be able to view it unless you put it in" \
+ "~/Library/Documentation/Help or install it in your application" \
+ "bundle."
+
+.PHONY: devhelp
+devhelp:
+ $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
+ @echo
+ @echo "Build finished."
+ @echo "To view the help file:"
+ @echo "# mkdir -p $$HOME/.local/share/devhelp/dlmanager"
+ @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/dlmanager"
+ @echo "# devhelp"
+
+.PHONY: epub
+epub:
+ $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
+ @echo
+ @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
+
+.PHONY: latex
+latex:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo
+ @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
+ @echo "Run \`make' in that directory to run these through (pdf)latex" \
+ "(use \`make latexpdf' here to do that automatically)."
+
+.PHONY: latexpdf
+latexpdf:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through pdflatex..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+.PHONY: latexpdfja
+latexpdfja:
+ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
+ @echo "Running LaTeX files through platex and dvipdfmx..."
+ $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
+ @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
+
+.PHONY: text
+text:
+ $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
+ @echo
+ @echo "Build finished. The text files are in $(BUILDDIR)/text."
+
+.PHONY: man
+man:
+ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
+ @echo
+ @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
+
+.PHONY: texinfo
+texinfo:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo
+ @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
+ @echo "Run \`make' in that directory to run these through makeinfo" \
+ "(use \`make info' here to do that automatically)."
+
+.PHONY: info
+info:
+ $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
+ @echo "Running Texinfo files through makeinfo..."
+ make -C $(BUILDDIR)/texinfo info
+ @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
+
+.PHONY: gettext
+gettext:
+ $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
+ @echo
+ @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
+
+.PHONY: changes
+changes:
+ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
+ @echo
+ @echo "The overview file is in $(BUILDDIR)/changes."
+
+.PHONY: linkcheck
+linkcheck:
+ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
+ @echo
+ @echo "Link check complete; look for any errors in the above output " \
+ "or in $(BUILDDIR)/linkcheck/output.txt."
+
+.PHONY: doctest
+doctest:
+ $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
+ @echo "Testing of doctests in the sources finished, look at the " \
+ "results in $(BUILDDIR)/doctest/output.txt."
+
+.PHONY: coverage
+coverage:
+ $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
+ @echo "Testing of coverage in the sources finished, look at the " \
+ "results in $(BUILDDIR)/coverage/python.txt."
+
+.PHONY: xml
+xml:
+ $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
+ @echo
+ @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
+
+.PHONY: pseudoxml
+pseudoxml:
+ $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
+ @echo
+ @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
diff --git a/third_party/python/dlmanager/doc/api.rst b/third_party/python/dlmanager/doc/api.rst
new file mode 100644
index 0000000000..295ce7c1fa
--- /dev/null
+++ b/third_party/python/dlmanager/doc/api.rst
@@ -0,0 +1,25 @@
+API
+===
+
+DownloadManager
+---------------
+
+.. currentmodule:: dlmanager
+
+.. autoclass:: DownloadManager
+ :members:
+
+Download
+--------
+
+.. autoclass:: Download
+ :members:
+
+.. autoclass:: DownloadInterrupt
+ :members:
+
+PersistLimit
+------------
+
+.. autoclass:: PersistLimit
+ :members:
diff --git a/third_party/python/dlmanager/doc/conf.py b/third_party/python/dlmanager/doc/conf.py
new file mode 100644
index 0000000000..80bb5172d2
--- /dev/null
+++ b/third_party/python/dlmanager/doc/conf.py
@@ -0,0 +1,289 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+#
+# dlmanager documentation build configuration file, created by
+# sphinx-quickstart on Fri Feb 19 11:22:21 2016.
+#
+# This file is execfile()d with the current directory set to its
+# containing dir.
+#
+# Note that not all possible configuration values are present in this
+# autogenerated file.
+#
+# All configuration values have a default; values that are commented out
+# serve to show the default.
+
+import sys
+import os
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+sys.path.insert(0, os.path.abspath('..'))
+
+from dlmanager import __version__
+
+# -- General configuration ------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.viewcode',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+# source_suffix = ['.rst', '.md']
+source_suffix = '.rst'
+
+# The encoding of source files.
+#source_encoding = 'utf-8-sig'
+
+# The master toctree document.
+master_doc = 'index'
+
+# General information about the project.
+project = 'dlmanager'
+copyright = u'2016, Julien Pagès'
+author = u'Julien Pagès'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# The short X.Y version.
+version = __version__
+# The full version, including alpha/beta/rc tags.
+release = version
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# There are two options for replacing |today|: either, you set today to some
+# non-false value, then it is used:
+#today = ''
+# Else, today_fmt is used as the format for a strftime call.
+#today_fmt = '%B %d, %Y'
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+exclude_patterns = ['_build']
+
+# The reST default role (used for this markup: `text`) to use for all
+# documents.
+#default_role = None
+
+# If true, '()' will be appended to :func: etc. cross-reference text.
+#add_function_parentheses = True
+
+# If true, the current module name will be prepended to all description
+# unit titles (such as .. function::).
+#add_module_names = True
+
+# If true, sectionauthor and moduleauthor directives will be shown in the
+# output. They are ignored by default.
+#show_authors = False
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# A list of ignored prefixes for module index sorting.
+#modindex_common_prefix = []
+
+# If true, keep warnings as "system message" paragraphs in the built documents.
+#keep_warnings = False
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = False
+
+
+# -- Options for HTML output ----------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+html_theme = 'sphinx_rtd_theme'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#html_theme_options = {}
+
+# Add any paths that contain custom themes here, relative to this directory.
+#html_theme_path = []
+
+# The name for this set of Sphinx documents. If None, it defaults to
+# "<project> v<release> documentation".
+#html_title = None
+
+# A shorter title for the navigation bar. Default is the same as html_title.
+#html_short_title = None
+
+# The name of an image file (relative to this directory) to place at the top
+# of the sidebar.
+#html_logo = None
+
+# The name of an image file (within the static path) to use as favicon of the
+# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
+# pixels large.
+#html_favicon = None
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Add any extra paths that contain custom files (such as robots.txt or
+# .htaccess) here, relative to this directory. These files are copied
+# directly to the root of the documentation.
+#html_extra_path = []
+
+# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
+# using the given strftime format.
+#html_last_updated_fmt = '%b %d, %Y'
+
+# If true, SmartyPants will be used to convert quotes and dashes to
+# typographically correct entities.
+#html_use_smartypants = True
+
+# Custom sidebar templates, maps document names to template names.
+#html_sidebars = {}
+
+# Additional templates that should be rendered to pages, maps page names to
+# template names.
+#html_additional_pages = {}
+
+# If false, no module index is generated.
+#html_domain_indices = True
+
+# If false, no index is generated.
+#html_use_index = True
+
+# If true, the index is split into individual pages for each letter.
+#html_split_index = False
+
+# If true, links to the reST sources are added to the pages.
+#html_show_sourcelink = True
+
+# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
+#html_show_sphinx = True
+
+# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
+#html_show_copyright = True
+
+# If true, an OpenSearch description file will be output, and all pages will
+# contain a <link> tag referring to it. The value of this option must be the
+# base URL from which the finished HTML is served.
+#html_use_opensearch = ''
+
+# This is the file name suffix for HTML files (e.g. ".xhtml").
+#html_file_suffix = None
+
+# Language to be used for generating the HTML full-text search index.
+# Sphinx supports the following languages:
+# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
+# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
+#html_search_language = 'en'
+
+# A dictionary with options for the search language support, empty by default.
+# Now only 'ja' uses this config value
+#html_search_options = {'type': 'default'}
+
+# The name of a javascript file (relative to the configuration directory) that
+# implements a search results scorer. If empty, the default will be used.
+#html_search_scorer = 'scorer.js'
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'dlmanagerdoc'
+
+# -- Options for LaTeX output ---------------------------------------------
+
+latex_elements = {
+# The paper size ('letterpaper' or 'a4paper').
+#'papersize': 'letterpaper',
+
+# The font size ('10pt', '11pt' or '12pt').
+#'pointsize': '10pt',
+
+# Additional stuff for the LaTeX preamble.
+#'preamble': '',
+
+# Latex figure (float) alignment
+#'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (master_doc, 'dlmanager.tex', 'dlmanager Documentation',
+ 'Julien Pagès', 'manual'),
+]
+
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+#latex_logo = None
+
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+#latex_use_parts = False
+
+# If true, show page references after internal links.
+#latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+#latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+#latex_appendices = []
+
+# If false, no module index is generated.
+#latex_domain_indices = True
+
+
+# -- Options for manual page output ---------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ (master_doc, 'dlmanager', 'dlmanager Documentation',
+ [author], 1)
+]
+
+# If true, show URL addresses after external links.
+#man_show_urls = False
+
+
+# -- Options for Texinfo output -------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (master_doc, 'dlmanager', 'dlmanager Documentation',
+ author, 'dlmanager', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+# Documents to append as an appendix to all manuals.
+#texinfo_appendices = []
+
+# If false, no module index is generated.
+#texinfo_domain_indices = True
+
+# How to display URL addresses: 'footnote', 'no', or 'inline'.
+#texinfo_show_urls = 'footnote'
+
+# If true, do not generate a @detailmenu in the "Top" node's menu.
+#texinfo_no_detailmenu = False
diff --git a/third_party/python/dlmanager/doc/index.rst b/third_party/python/dlmanager/doc/index.rst
new file mode 100644
index 0000000000..c585e573ad
--- /dev/null
+++ b/third_party/python/dlmanager/doc/index.rst
@@ -0,0 +1,26 @@
+.. dlmanager documentation master file, created by
+ sphinx-quickstart on Fri Feb 19 11:22:21 2016.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Welcome to dlmanager's documentation!
+=====================================
+
+**dlmanager** is a Python 2 and 3 download manager library. It is hosted
+`on github <https://github.com/parkouss/dlmanager>`_.
+
+Contents:
+
+.. toctree::
+ :maxdepth: 2
+
+ api
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
+
diff --git a/third_party/python/dlmanager/doc/make.bat b/third_party/python/dlmanager/doc/make.bat
new file mode 100644
index 0000000000..5bcee17fab
--- /dev/null
+++ b/third_party/python/dlmanager/doc/make.bat
@@ -0,0 +1,263 @@
+@ECHO OFF
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set BUILDDIR=_build
+set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% .
+set I18NSPHINXOPTS=%SPHINXOPTS% .
+if NOT "%PAPER%" == "" (
+ set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS%
+ set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS%
+)
+
+if "%1" == "" goto help
+
+if "%1" == "help" (
+ :help
+ echo.Please use `make ^<target^>` where ^<target^> is one of
+ echo. html to make standalone HTML files
+ echo. dirhtml to make HTML files named index.html in directories
+ echo. singlehtml to make a single large HTML file
+ echo. pickle to make pickle files
+ echo. json to make JSON files
+ echo. htmlhelp to make HTML files and a HTML help project
+ echo. qthelp to make HTML files and a qthelp project
+ echo. devhelp to make HTML files and a Devhelp project
+ echo. epub to make an epub
+ echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter
+ echo. text to make text files
+ echo. man to make manual pages
+ echo. texinfo to make Texinfo files
+ echo. gettext to make PO message catalogs
+ echo. changes to make an overview over all changed/added/deprecated items
+ echo. xml to make Docutils-native XML files
+ echo. pseudoxml to make pseudoxml-XML files for display purposes
+ echo. linkcheck to check all external links for integrity
+ echo. doctest to run all doctests embedded in the documentation if enabled
+ echo. coverage to run coverage check of the documentation if enabled
+ goto end
+)
+
+if "%1" == "clean" (
+ for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i
+ del /q /s %BUILDDIR%\*
+ goto end
+)
+
+
+REM Check if sphinx-build is available and fallback to Python version if any
+%SPHINXBUILD% 1>NUL 2>NUL
+if errorlevel 9009 goto sphinx_python
+goto sphinx_ok
+
+:sphinx_python
+
+set SPHINXBUILD=python -m sphinx.__init__
+%SPHINXBUILD% 2> nul
+if errorlevel 9009 (
+ echo.
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+ echo.installed, then set the SPHINXBUILD environment variable to point
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
+ echo.may add the Sphinx directory to PATH.
+ echo.
+ echo.If you don't have Sphinx installed, grab it from
+ echo.http://sphinx-doc.org/
+ exit /b 1
+)
+
+:sphinx_ok
+
+
+if "%1" == "html" (
+ %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/html.
+ goto end
+)
+
+if "%1" == "dirhtml" (
+ %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml.
+ goto end
+)
+
+if "%1" == "singlehtml" (
+ %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml.
+ goto end
+)
+
+if "%1" == "pickle" (
+ %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the pickle files.
+ goto end
+)
+
+if "%1" == "json" (
+ %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can process the JSON files.
+ goto end
+)
+
+if "%1" == "htmlhelp" (
+ %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run HTML Help Workshop with the ^
+.hhp project file in %BUILDDIR%/htmlhelp.
+ goto end
+)
+
+if "%1" == "qthelp" (
+ %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; now you can run "qcollectiongenerator" with the ^
+.qhcp project file in %BUILDDIR%/qthelp, like this:
+ echo.^> qcollectiongenerator %BUILDDIR%\qthelp\dlmanager.qhcp
+ echo.To view the help file:
+ echo.^> assistant -collectionFile %BUILDDIR%\qthelp\dlmanager.ghc
+ goto end
+)
+
+if "%1" == "devhelp" (
+ %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished.
+ goto end
+)
+
+if "%1" == "epub" (
+ %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The epub file is in %BUILDDIR%/epub.
+ goto end
+)
+
+if "%1" == "latex" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished; the LaTeX files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "latexpdf" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ cd %BUILDDIR%/latex
+ make all-pdf
+ cd %~dp0
+ echo.
+ echo.Build finished; the PDF files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "latexpdfja" (
+ %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex
+ cd %BUILDDIR%/latex
+ make all-pdf-ja
+ cd %~dp0
+ echo.
+ echo.Build finished; the PDF files are in %BUILDDIR%/latex.
+ goto end
+)
+
+if "%1" == "text" (
+ %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The text files are in %BUILDDIR%/text.
+ goto end
+)
+
+if "%1" == "man" (
+ %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The manual pages are in %BUILDDIR%/man.
+ goto end
+)
+
+if "%1" == "texinfo" (
+ %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo.
+ goto end
+)
+
+if "%1" == "gettext" (
+ %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The message catalogs are in %BUILDDIR%/locale.
+ goto end
+)
+
+if "%1" == "changes" (
+ %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.The overview file is in %BUILDDIR%/changes.
+ goto end
+)
+
+if "%1" == "linkcheck" (
+ %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Link check complete; look for any errors in the above output ^
+or in %BUILDDIR%/linkcheck/output.txt.
+ goto end
+)
+
+if "%1" == "doctest" (
+ %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Testing of doctests in the sources finished, look at the ^
+results in %BUILDDIR%/doctest/output.txt.
+ goto end
+)
+
+if "%1" == "coverage" (
+ %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Testing of coverage in the sources finished, look at the ^
+results in %BUILDDIR%/coverage/python.txt.
+ goto end
+)
+
+if "%1" == "xml" (
+ %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The XML files are in %BUILDDIR%/xml.
+ goto end
+)
+
+if "%1" == "pseudoxml" (
+ %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml
+ if errorlevel 1 exit /b 1
+ echo.
+ echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml.
+ goto end
+)
+
+:end
diff --git a/third_party/python/dlmanager/examples/dl_progressbar.py b/third_party/python/dlmanager/examples/dl_progressbar.py
new file mode 100644
index 0000000000..98c36d55b6
--- /dev/null
+++ b/third_party/python/dlmanager/examples/dl_progressbar.py
@@ -0,0 +1,41 @@
+import argparse
+
+# for python 3, use https://github.com/coagulant/progressbar-python3
+from progressbar import ProgressBar, Percentage, RotatingMarker, ETA, \
+ FileTransferSpeed, Bar
+
+from six.moves.urllib.parse import urlparse
+
+from dlmanager import Download
+
+
+def parse_args(argv=None):
+ parser = argparse.ArgumentParser()
+ parser.add_argument("url", help="url to download")
+ return parser.parse_args(argv)
+
+
+def download_file(url, dest=None):
+ if dest is None:
+ dest = urlparse(url).path.split('/')[-1]
+
+ widgets = ['Download: ', Percentage(), ' ', Bar(marker=RotatingMarker()),
+ ' ', ETA(), ' ', FileTransferSpeed()]
+ bar = ProgressBar(widgets=widgets).start()
+
+ def download_progress(_, current, total):
+ bar.maxval = total
+ bar.update(current)
+
+ dl = Download(url, dest, progress=download_progress)
+ dl.start()
+ dl.wait()
+ bar.finish()
+
+
+if __name__ == '__main__':
+ options = parse_args()
+ try:
+ download_file(options.url)
+ except KeyboardInterrupt:
+ print("\nInterrupted.")
diff --git a/third_party/python/dlmanager/examples/dl_tqdm.py b/third_party/python/dlmanager/examples/dl_tqdm.py
new file mode 100644
index 0000000000..a4e458a415
--- /dev/null
+++ b/third_party/python/dlmanager/examples/dl_tqdm.py
@@ -0,0 +1,45 @@
+import argparse
+import tqdm
+
+from six.moves.urllib.parse import urlparse
+
+from dlmanager import Download
+
+
+def parse_args(argv=None):
+ parser = argparse.ArgumentParser()
+ parser.add_argument("url", help="url to download")
+ return parser.parse_args(argv)
+
+
+def download_progress(bar):
+ last_b = [0]
+
+ def inner(_, current, total):
+ if total is not None:
+ bar.total = total
+ delta = current - last_b[0]
+ last_b[0] = current
+
+ if delta > 0:
+ bar.update(delta)
+ return inner
+
+
+def download_file(url, dest=None):
+ if dest is None:
+ dest = urlparse(url).path.split('/')[-1]
+
+ with tqdm.tqdm(unit='B', unit_scale=True, miniters=1, dynamic_ncols=True,
+ desc=dest) as bar:
+ dl = Download(url, dest, progress=download_progress(bar))
+ dl.start()
+ dl.wait()
+
+
+if __name__ == '__main__':
+ options = parse_args()
+ try:
+ download_file(options.url)
+ except KeyboardInterrupt:
+ print("\nInterrupted.")
diff --git a/third_party/python/dlmanager/requirements.txt b/third_party/python/dlmanager/requirements.txt
new file mode 100644
index 0000000000..640e3d44a6
--- /dev/null
+++ b/third_party/python/dlmanager/requirements.txt
@@ -0,0 +1,2 @@
+requests
+six
diff --git a/third_party/python/dlmanager/setup.cfg b/third_party/python/dlmanager/setup.cfg
new file mode 100644
index 0000000000..3c6e79cf31
--- /dev/null
+++ b/third_party/python/dlmanager/setup.cfg
@@ -0,0 +1,2 @@
+[bdist_wheel]
+universal=1
diff --git a/third_party/python/dlmanager/setup.py b/third_party/python/dlmanager/setup.py
new file mode 100644
index 0000000000..b2a8fd392d
--- /dev/null
+++ b/third_party/python/dlmanager/setup.py
@@ -0,0 +1,60 @@
+# -*- coding: utf-8 -*-
+
+import os
+import re
+import sys
+from setuptools import setup
+from setuptools.command.test import test as TestCommand
+
+HERE = os.path.dirname(os.path.realpath(__file__))
+
+
+class PyTest(TestCommand):
+ """
+ Run py.test with the "python setup.py test command"
+ """
+ user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
+
+ def initialize_options(self):
+ TestCommand.initialize_options(self)
+ self.pytest_args = ''
+
+ def finalize_options(self):
+ TestCommand.finalize_options(self)
+ self.pytest_args += (' ' + self.distribution.test_suite)
+
+ def run_tests(self):
+ import pytest
+ errno = pytest.main(self.pytest_args)
+ sys.exit(errno)
+
+
+def read(*parts):
+ with open(os.path.join(HERE, *parts)) as f:
+ return f.read()
+
+
+def parse_requirements(data, exclude=()):
+ return [line for line in data.splitlines()
+ if line and not line.startswith("#") and line not in exclude]
+
+
+def version():
+ return re.findall(r"__version__ = \"([\d.]+)\"",
+ read("dlmanager", "__init__.py"))[0]
+
+setup(
+ name="dlmanager",
+ version=version(),
+ description="download manager library",
+ long_description=read("README.rst"),
+ author="Julien Pagès",
+ author_email="j.parkouss@gmail.com",
+ url="http://github.com/parkouss/dlmanager",
+ license="GPL/LGPL",
+ install_requires=parse_requirements(read("requirements.txt")),
+ cmdclass={'test': PyTest},
+ tests_require=parse_requirements(read("requirements.txt"),
+ exclude=("-e .",)),
+ test_suite='tests',
+)
diff --git a/third_party/python/dlmanager/test-requirements.txt b/third_party/python/dlmanager/test-requirements.txt
new file mode 100644
index 0000000000..a4db4b7672
--- /dev/null
+++ b/third_party/python/dlmanager/test-requirements.txt
@@ -0,0 +1,7 @@
+-e .
+mock
+pytest
+pytest-mock
+flake8
+coverage
+unittest2; python_version < '2.7'
diff --git a/third_party/python/dlmanager/tests/__init__.py b/third_party/python/dlmanager/tests/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/dlmanager/tests/__init__.py
diff --git a/third_party/python/dlmanager/tests/test_manager.py b/third_party/python/dlmanager/tests/test_manager.py
new file mode 100644
index 0000000000..f0ade9021f
--- /dev/null
+++ b/third_party/python/dlmanager/tests/test_manager.py
@@ -0,0 +1,251 @@
+try:
+ import unittest2 as unittest # python < 2.7 compat
+except ImportError:
+ import unittest
+import tempfile
+import shutil
+import os
+import time
+import six
+from mock import Mock
+
+from dlmanager import manager as download_manager
+
+
+def mock_session():
+ response = Mock()
+ session = Mock(get=Mock(return_value=response))
+ return session, response
+
+
+def mock_response(response, data, wait=0):
+ data = six.b(data)
+
+ def iter_content(chunk_size=4):
+ rest = data
+ while rest:
+ time.sleep(wait)
+ chunk = rest[:chunk_size]
+ rest = rest[chunk_size:]
+ yield chunk
+
+ response.headers = {'Content-length': str(len(data))}
+ response.iter_content = iter_content
+
+
+class TestDownload(unittest.TestCase):
+ def setUp(self):
+ self.tempdir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tempdir)
+ self.finished = Mock()
+ self.session, self.session_response = mock_session()
+ self.tempfile = os.path.join(self.tempdir, 'dest')
+ self.dl = download_manager.Download('http://url', self.tempfile,
+ finished_callback=self.finished,
+ chunk_size=4,
+ session=self.session)
+
+ def test_creation(self):
+ self.assertFalse(self.dl.is_canceled())
+ self.assertFalse(self.dl.is_running())
+ self.assertIsNone(self.dl.error())
+ self.assertEquals(self.dl.get_url(), 'http://url')
+ self.assertEquals(self.dl.get_dest(), self.tempfile)
+
+ def create_response(self, data, wait=0):
+ mock_response(self.session_response, data, wait)
+
+ def test_download(self):
+ self.create_response('1234' * 4, 0.01)
+
+ # no file present yet
+ self.assertFalse(os.path.exists(self.tempfile))
+
+ self.dl.start()
+ self.assertTrue(self.dl.is_running())
+ self.dl.wait()
+
+ self.assertFalse(self.dl.is_running())
+ self.finished.assert_called_with(self.dl)
+ # file has been downloaded
+ with open(self.tempfile) as f:
+ self.assertEquals(f.read(), '1234' * 4)
+
+ def test_download_cancel(self):
+ self.create_response('1234' * 1000, wait=0.01)
+
+ start = time.time()
+ self.dl.start()
+ time.sleep(0.1)
+ self.dl.cancel()
+
+ with self.assertRaises(download_manager.DownloadInterrupt):
+ self.dl.wait()
+
+ self.assertTrue(self.dl.is_canceled())
+
+ # response generation should have taken 1000 * 0.01 = 10 seconds.
+ # since we canceled, this must be lower.
+ self.assertTrue((time.time() - start) < 1.0)
+
+ # file was deleted
+ self.assertFalse(os.path.exists(self.tempfile))
+ # finished callback was called
+ self.finished.assert_called_with(self.dl)
+
+ def test_download_with_progress(self):
+ data = []
+
+ def update_progress(_dl, current, total):
+ data.append((_dl, current, total))
+
+ self.create_response('1234' * 4)
+
+ self.dl.set_progress(update_progress)
+ self.dl.start()
+ self.dl.wait()
+
+ self.assertEquals(data, [
+ (self.dl, 0, 16),
+ (self.dl, 4, 16),
+ (self.dl, 8, 16),
+ (self.dl, 12, 16),
+ (self.dl, 16, 16),
+ ])
+ # file has been downloaded
+ with open(self.tempfile) as f:
+ self.assertEquals(f.read(), '1234' * 4)
+ # finished callback was called
+ self.finished.assert_called_with(self.dl)
+
+ def test_download_error_in_thread(self):
+ self.session_response.headers = {'Content-length': '24'}
+ self.session_response.iter_content.side_effect = IOError
+
+ self.dl.start()
+ with self.assertRaises(IOError):
+ self.dl.wait()
+
+ self.assertEquals(self.dl.error()[0], IOError)
+ # finished callback was called
+ self.finished.assert_called_with(self.dl)
+
+ def test_wait_does_not_block_on_exception(self):
+ # this test the case when a user may hit CTRL-C for example
+ # during a dl.wait() call.
+ self.create_response('1234' * 1000, wait=0.01)
+
+ original_join = self.dl.thread.join
+ it = iter('123')
+
+ def join(timeout=None):
+ next(it) # will throw StopIteration after a few calls
+ original_join(timeout)
+
+ self.dl.thread.join = join
+
+ start = time.time()
+ self.dl.start()
+
+ with self.assertRaises(StopIteration):
+ self.dl.wait()
+
+ self.assertTrue(self.dl.is_canceled())
+ # wait for the thread to finish
+ original_join()
+
+ # response generation should have taken 1000 * 0.01 = 10 seconds.
+ # since we got an error, this must be lower.
+ self.assertTrue((time.time() - start) < 1.0)
+
+ # file was deleted
+ self.assertFalse(os.path.exists(self.tempfile))
+ # finished callback was called
+ self.finished.assert_called_with(self.dl)
+
+
+class TestDownloadManager(unittest.TestCase):
+ def setUp(self):
+ self.tempdir = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, self.tempdir)
+
+ self.dl_manager = download_manager.DownloadManager(self.tempdir)
+
+ def do_download(self, url, fname, data, wait=0):
+ session, response = mock_session()
+ mock_response(response, data, wait)
+ # patch the session, so the download will use that
+ self.dl_manager.session = session
+ return self.dl_manager.download(url, fname)
+
+ def test_download(self):
+ dl1 = self.do_download('http://foo', 'foo', 'hello' * 4, wait=0.02)
+ self.assertIsInstance(dl1, download_manager.Download)
+ self.assertTrue(dl1.is_running())
+
+ # with the same fname, no new download is started. The same instance
+ # is returned since the download is running.
+ dl2 = self.do_download('http://bar', 'foo', 'hello2' * 4, wait=0.02)
+ self.assertEquals(dl1, dl2)
+
+ # starting a download with another fname will trigger a new download
+ dl3 = self.do_download('http://bar', 'foo2', 'hello you' * 4)
+ self.assertIsInstance(dl3, download_manager.Download)
+ self.assertNotEquals(dl3, dl1)
+
+ # let's wait for the downloads to finish
+ dl3.wait()
+ dl1.wait()
+
+ # now if we try to download a fname that exists, None is returned
+ dl4 = self.do_download('http://bar', 'foo', 'hello2' * 4, wait=0.02)
+ self.assertIsNone(dl4)
+
+ # downloaded files are what is expected
+ def content(fname):
+ with open(os.path.join(self.tempdir, fname)) as f:
+ return f.read()
+ self.assertEquals(content('foo'), 'hello' * 4)
+ self.assertEquals(content('foo2'), 'hello you' * 4)
+
+ # download instances are removed from the manager (internal test)
+ self.assertEquals(self.dl_manager._downloads, {})
+
+ def test_cancel(self):
+ dl1 = self.do_download('http://foo', 'foo', 'foo' * 50000, wait=0.02)
+ dl2 = self.do_download('http://foo', 'bar', 'bar' * 50000, wait=0.02)
+ dl3 = self.do_download('http://foo', 'foobar', 'foobar' * 4)
+
+ # let's cancel only one
+ def cancel_if(dl):
+ if os.path.basename(dl.get_dest()) == 'foo':
+ return True
+ self.dl_manager.cancel(cancel_if=cancel_if)
+
+ self.assertTrue(dl1.is_canceled())
+ self.assertFalse(dl2.is_canceled())
+ self.assertFalse(dl3.is_canceled())
+
+ # wait for dl3
+ dl3.wait()
+
+ # cancel everything
+ self.dl_manager.cancel()
+
+ self.assertTrue(dl1.is_canceled())
+ self.assertTrue(dl2.is_canceled())
+ # dl3 is not canceled since it finished before
+ self.assertFalse(dl3.is_canceled())
+
+ # wait for the completion of dl1 and dl2 threads
+ dl1.wait(raise_if_error=False)
+ dl2.wait(raise_if_error=False)
+
+ # at the end, only dl3 has been downloaded
+ self.assertEquals(os.listdir(self.tempdir), ["foobar"])
+
+ with open(os.path.join(self.tempdir, 'foobar')) as f:
+ self.assertEquals(f.read(), 'foobar' * 4)
+
+ # download instances are removed from the manager (internal test)
+ self.assertEquals(self.dl_manager._downloads, {})
diff --git a/third_party/python/dlmanager/tests/test_persist_limit.py b/third_party/python/dlmanager/tests/test_persist_limit.py
new file mode 100644
index 0000000000..1d899a46f2
--- /dev/null
+++ b/third_party/python/dlmanager/tests/test_persist_limit.py
@@ -0,0 +1,56 @@
+import pytest
+import os
+import tempfile
+import time
+import six
+
+from dlmanager import fs
+from dlmanager.persist_limit import PersistLimit
+
+
+class TempCreator(object):
+ def __init__(self):
+ self.tempdir = tempfile.mkdtemp()
+
+ def list(self):
+ return os.listdir(self.tempdir)
+
+ def create_file(self, name, size, delay):
+ fname = os.path.join(self.tempdir, name)
+ with open(fname, 'wb') as f:
+ f.write(six.b('a' * size))
+ # equivalent to touch, but we apply a delay for the test
+ atime = time.time() + delay
+ os.utime(fname, (atime, atime))
+
+
+@pytest.yield_fixture
+def temp():
+ tmp = TempCreator()
+ yield tmp
+ fs.remove(tmp.tempdir)
+
+
+@pytest.mark.parametrize("size_limit,file_limit,files", [
+ # limit_file is always respected
+ (10, 5, "bcdef"),
+ (10, 3, "def"),
+ # if size_limit or file_limit is 0, nothing is removed
+ (0, 5, "abcdef"),
+ (5, 0, "abcdef"),
+ # limit_size works
+ (35, 1, "def"),
+])
+def test_persist_limit(temp, size_limit, file_limit, files):
+ temp.create_file("a", 10, -6)
+ temp.create_file("b", 10, -5)
+ temp.create_file("c", 10, -4)
+ temp.create_file("d", 10, -3)
+ temp.create_file("e", 10, -2)
+ temp.create_file("f", 10, -1)
+
+ persist_limit = PersistLimit(size_limit, file_limit)
+ persist_limit.register_dir_content(temp.tempdir)
+ persist_limit.remove_old_files()
+
+ assert ''.join(sorted(temp.list())) == ''.join(sorted(files))
diff --git a/third_party/python/ecdsa/ecdsa-0.15.dist-info/LICENSE b/third_party/python/ecdsa/ecdsa-0.15.dist-info/LICENSE
new file mode 100644
index 0000000000..474479a2ce
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa-0.15.dist-info/LICENSE
@@ -0,0 +1,24 @@
+"python-ecdsa" Copyright (c) 2010 Brian Warner
+
+Portions written in 2005 by Peter Pearson and placed in the public domain.
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
diff --git a/third_party/python/ecdsa/ecdsa-0.15.dist-info/METADATA b/third_party/python/ecdsa/ecdsa-0.15.dist-info/METADATA
new file mode 100644
index 0000000000..6e8a2efe29
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa-0.15.dist-info/METADATA
@@ -0,0 +1,625 @@
+Metadata-Version: 2.1
+Name: ecdsa
+Version: 0.15
+Summary: ECDSA cryptographic signature library (pure python)
+Home-page: http://github.com/warner/python-ecdsa
+Author: Brian Warner
+Author-email: warner@lothar.com
+License: MIT
+Platform: UNKNOWN
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Requires-Python: >=2.6, !=3.0.*, !=3.1.*, !=3.2.*
+Description-Content-Type: text/markdown
+Requires-Dist: six (>=1.9.0)
+Provides-Extra: gmpy
+Requires-Dist: gmpy ; extra == 'gmpy'
+Provides-Extra: gmpy2
+Requires-Dist: gmpy2 ; extra == 'gmpy2'
+
+# Pure-Python ECDSA
+
+[![build status](https://travis-ci.org/warner/python-ecdsa.png)](http://travis-ci.org/warner/python-ecdsa)
+[![Coverage Status](https://coveralls.io/repos/warner/python-ecdsa/badge.svg)](https://coveralls.io/r/warner/python-ecdsa)
+[![condition coverage](https://img.shields.io/badge/condition%20coverage-81%25-yellow)](https://travis-ci.org/warner/python-ecdsa/jobs/626479178#L776)
+[![Latest Version](https://img.shields.io/pypi/v/ecdsa.svg?style=flat)](https://pypi.python.org/pypi/ecdsa/)
+
+
+This is an easy-to-use implementation of ECDSA cryptography (Elliptic Curve
+Digital Signature Algorithm), implemented purely in Python, released under
+the MIT license. With this library, you can quickly create keypairs (signing
+key and verifying key), sign messages, and verify the signatures. The keys
+and signatures are very short, making them easy to handle and incorporate
+into other protocols.
+
+## Features
+
+This library provides key generation, signing, and verifying, for five
+popular NIST "Suite B" GF(p) (_prime field_) curves, with key lengths of 192,
+224, 256, 384, and 521 bits. The "short names" for these curves, as known by
+the OpenSSL tool (`openssl ecparam -list_curves`), are: `prime192v1`,
+`secp224r1`, `prime256v1`, `secp384r1`, and `secp521r1`. It includes the
+256-bit curve `secp256k1` used by Bitcoin. There is also support for the
+regular (non-twisted) variants of Brainpool curves from 160 to 512 bits. The
+"short names" of those curves are: `brainpoolP160r1`, `brainpoolP192r1`,
+`brainpoolP224r1`, `brainpoolP256r1`, `brainpoolP320r1`, `brainpoolP384r1`,
+`brainpoolP512r1`.
+No other curves are included, but it is not too hard to add support for more
+curves over prime fields.
+
+## Dependencies
+
+This library uses only Python and the 'six' package. It is compatible with
+Python 2.6, 2.7 and 3.3+. It also supports execution on the alternative
+implementations like pypy and pypy3.
+
+If `gmpy2` or `gmpy` is installed, they will be used for faster arithmetic.
+Either of them can be installed after this library is installed,
+`python-ecdsa` will detect their presence on start-up and use them
+automatically.
+
+To run the OpenSSL compatibility tests, the 'openssl' tool must be in your
+`PATH`. This release has been tested successfully against OpenSSL 0.9.8o,
+1.0.0a, 1.0.2f and 1.1.1d (among others).
+
+
+## Installation
+
+This library is available on PyPI, it's recommended to install it using `pip`:
+
+```
+pip install ecdsa
+```
+
+In case higher performance is wanted and using native code is not a problem,
+it's possible to specify installation together with `gmpy2`:
+
+```
+pip install ecdsa[gmpy2]
+```
+
+or (slower, legacy option):
+```
+pip install ecdsa[gmpy]
+```
+
+## Speed
+
+The following table shows how long this library takes to generate keypairs
+(`keygen`), to sign data (`sign`), and to verify those signatures (`verify`).
+All those values are in seconds.
+For convenience, the inverses of those values are also provided:
+how many keys per second can be generated (`keygen/s`), how many signatures
+can be made per second (`sign/s`) and how many signatures can be verified
+per second (`verify/s`). The size of raw signature (generally the smallest
+way a signature can be encoded) is also provided in the `siglen` column.
+Use `tox -e speed` to generate this table on your own computer.
+On an Intel Core i7 4790K @ 4.0GHz I'm getting the following performance:
+
+```
+ siglen keygen keygen/s sign sign/s verify verify/s
+ NIST192p: 48 0.00035s 2893.02 0.00038s 2620.53 0.00069s 1458.92
+ NIST224p: 56 0.00043s 2307.11 0.00048s 2092.00 0.00088s 1131.33
+ NIST256p: 64 0.00056s 1793.70 0.00061s 1639.87 0.00113s 883.79
+ NIST384p: 96 0.00116s 864.33 0.00124s 806.29 0.00233s 429.87
+ NIST521p: 132 0.00221s 452.16 0.00234s 427.31 0.00460s 217.19
+ SECP256k1: 64 0.00056s 1772.65 0.00061s 1628.73 0.00110s 912.13
+ BRAINPOOLP160r1: 40 0.00026s 3801.86 0.00029s 3401.11 0.00052s 1930.47
+ BRAINPOOLP192r1: 48 0.00034s 2925.73 0.00038s 2634.34 0.00070s 1438.06
+ BRAINPOOLP224r1: 56 0.00044s 2287.98 0.00048s 2083.87 0.00088s 1137.52
+ BRAINPOOLP256r1: 64 0.00056s 1774.11 0.00061s 1628.25 0.00112s 890.71
+ BRAINPOOLP320r1: 80 0.00081s 1238.18 0.00087s 1146.71 0.00151s 661.95
+ BRAINPOOLP384r1: 96 0.00117s 855.47 0.00124s 804.56 0.00241s 414.83
+ BRAINPOOLP512r1: 128 0.00223s 447.99 0.00234s 427.49 0.00437s 229.09
+
+ ecdh ecdh/s
+ NIST192p: 0.00110s 910.70
+ NIST224p: 0.00143s 701.17
+ NIST256p: 0.00178s 560.44
+ NIST384p: 0.00383s 261.03
+ NIST521p: 0.00745s 134.23
+ SECP256k1: 0.00168s 596.23
+ BRAINPOOLP160r1: 0.00085s 1174.02
+ BRAINPOOLP192r1: 0.00113s 883.47
+ BRAINPOOLP224r1: 0.00145s 687.82
+ BRAINPOOLP256r1: 0.00195s 514.03
+ BRAINPOOLP320r1: 0.00277s 360.80
+ BRAINPOOLP384r1: 0.00412s 242.58
+ BRAINPOOLP512r1: 0.00787s 127.12
+```
+
+To test performance with `gmpy2` loaded, use `tox -e speedgmpy2`.
+On the same machine I'm getting the following performance with `gmpy2`:
+```
+ siglen keygen keygen/s sign sign/s verify verify/s
+ NIST192p: 48 0.00017s 5945.50 0.00018s 5544.66 0.00033s 3002.54
+ NIST224p: 56 0.00021s 4742.14 0.00022s 4463.52 0.00044s 2248.59
+ NIST256p: 64 0.00024s 4155.73 0.00025s 3994.28 0.00047s 2105.34
+ NIST384p: 96 0.00041s 2415.06 0.00043s 2316.41 0.00085s 1177.18
+ NIST521p: 132 0.00072s 1391.14 0.00074s 1359.63 0.00140s 716.31
+ SECP256k1: 64 0.00024s 4216.50 0.00025s 3994.52 0.00047s 2120.57
+ BRAINPOOLP160r1: 40 0.00014s 7038.99 0.00015s 6501.55 0.00029s 3397.79
+ BRAINPOOLP192r1: 48 0.00017s 5983.18 0.00018s 5626.08 0.00035s 2843.62
+ BRAINPOOLP224r1: 56 0.00021s 4727.54 0.00022s 4464.86 0.00043s 2326.84
+ BRAINPOOLP256r1: 64 0.00024s 4221.00 0.00025s 4010.26 0.00049s 2046.40
+ BRAINPOOLP320r1: 80 0.00032s 3142.14 0.00033s 3009.15 0.00061s 1652.88
+ BRAINPOOLP384r1: 96 0.00041s 2415.98 0.00043s 2340.35 0.00083s 1198.77
+ BRAINPOOLP512r1: 128 0.00064s 1567.27 0.00066s 1526.33 0.00127s 788.51
+
+ ecdh ecdh/s
+ NIST192p: 0.00051s 1960.26
+ NIST224p: 0.00067s 1502.97
+ NIST256p: 0.00073s 1376.12
+ NIST384p: 0.00132s 758.68
+ NIST521p: 0.00231s 433.23
+ SECP256k1: 0.00072s 1387.18
+ BRAINPOOLP160r1: 0.00042s 2366.60
+ BRAINPOOLP192r1: 0.00049s 2026.80
+ BRAINPOOLP224r1: 0.00067s 1486.52
+ BRAINPOOLP256r1: 0.00076s 1310.31
+ BRAINPOOLP320r1: 0.00101s 986.16
+ BRAINPOOLP384r1: 0.00131s 761.35
+ BRAINPOOLP512r1: 0.00211s 473.30
+```
+
+(there's also `gmpy` version, execute it using `tox -e speedgmpy`)
+
+For comparison, a highly optimised implementation (including curve-specific
+assembly for some curves), like the one in OpenSSL 1.1.1d, provides following
+performance numbers on the same machine.
+Run `openssl speed ecdsa` and `openssl speed ecdh` to reproduce it:
+```
+ sign verify sign/s verify/s
+ 192 bits ecdsa (nistp192) 0.0002s 0.0002s 4785.6 5380.7
+ 224 bits ecdsa (nistp224) 0.0000s 0.0001s 22475.6 9822.0
+ 256 bits ecdsa (nistp256) 0.0000s 0.0001s 45069.6 14166.6
+ 384 bits ecdsa (nistp384) 0.0008s 0.0006s 1265.6 1648.1
+ 521 bits ecdsa (nistp521) 0.0003s 0.0005s 3753.1 1819.5
+ 256 bits ecdsa (brainpoolP256r1) 0.0003s 0.0003s 2983.5 3333.2
+ 384 bits ecdsa (brainpoolP384r1) 0.0008s 0.0007s 1258.8 1528.1
+ 512 bits ecdsa (brainpoolP512r1) 0.0015s 0.0012s 675.1 860.1
+
+ op op/s
+ 192 bits ecdh (nistp192) 0.0002s 4853.4
+ 224 bits ecdh (nistp224) 0.0001s 15252.1
+ 256 bits ecdh (nistp256) 0.0001s 18436.3
+ 384 bits ecdh (nistp384) 0.0008s 1292.7
+ 521 bits ecdh (nistp521) 0.0003s 2884.7
+ 256 bits ecdh (brainpoolP256r1) 0.0003s 3066.5
+ 384 bits ecdh (brainpoolP384r1) 0.0008s 1298.0
+ 512 bits ecdh (brainpoolP512r1) 0.0014s 694.8
+```
+
+Keys and signature can be serialized in different ways (see Usage, below).
+For a NIST192p key, the three basic representations require strings of the
+following lengths (in bytes):
+
+ to_string: signkey= 24, verifykey= 48, signature=48
+ compressed: signkey=n/a, verifykey= 25, signature=n/a
+ DER: signkey=106, verifykey= 80, signature=55
+ PEM: signkey=278, verifykey=162, (no support for PEM signatures)
+
+## History
+
+In 2006, Peter Pearson announced his pure-python implementation of ECDSA in a
+[message to sci.crypt][1], available from his [download site][2]. In 2010,
+Brian Warner wrote a wrapper around this code, to make it a bit easier and
+safer to use. Hubert Kario then included an implementation of elliptic curve
+cryptography that uses Jacobian coordinates internally, improving performance
+about 20-fold. You are looking at the README for this wrapper.
+
+[1]: http://www.derkeiler.com/Newsgroups/sci.crypt/2006-01/msg00651.html
+[2]: http://webpages.charter.net/curryfans/peter/downloads.html
+
+## Testing
+
+To run the full test suite, do this:
+
+ tox -e coverage
+
+On an Intel Core i7 4790K @ 4.0GHz, the tests take about 16 seconds to execute.
+The test suite uses
+[`hypothesis`](https://github.com/HypothesisWorks/hypothesis) so there is some
+inherent variability in the test suite execution time.
+
+One part of `test_pyecdsa.py` checks compatibility with OpenSSL, by
+running the "openssl" CLI tool, make sure it's in your `PATH` if you want
+to test compatibility with it.
+
+## Security
+
+This library was not designed with security in mind. If you are processing
+data that needs to be protected we suggest you use a quality wrapper around
+OpenSSL. [pyca/cryptography](https://cryptography.io) is one example of such
+a wrapper. The primary use-case of this library is as a portable library for
+interoperability testing and as a teaching tool.
+
+**This library does not protect against side channel attacks.**
+
+Do not allow attackers to measure how long it takes you to generate a keypair
+or sign a message. Do not allow attackers to run code on the same physical
+machine when keypair generation or signing is taking place (this includes
+virtual machines). Do not allow attackers to measure how much power your
+computer uses while generating the keypair or signing a message. Do not allow
+attackers to measure RF interference coming from your computer while generating
+a keypair or signing a message. Note: just loading the private key will cause
+keypair generation. Other operations or attack vectors may also be
+vulnerable to attacks. **For a sophisticated attacker observing just one
+operation with a private key will be sufficient to completely
+reconstruct the private key**.
+
+Please also note that any Pure-python cryptographic library will be vulnerable
+to the same side channel attacks. This is because Python does not provide
+side-channel secure primitives (with the exception of
+[`hmac.compare_digest()`][3]), making side-channel secure programming
+impossible.
+
+This library depends upon a strong source of random numbers. Do not use it on
+a system where `os.urandom()` does not provide cryptographically secure
+random numbers.
+
+[3]: https://docs.python.org/3/library/hmac.html#hmac.compare_digest
+
+## Usage
+
+You start by creating a `SigningKey`. You can use this to sign data, by passing
+in data as a byte string and getting back the signature (also a byte string).
+You can also ask a `SigningKey` to give you the corresponding `VerifyingKey`.
+The `VerifyingKey` can be used to verify a signature, by passing it both the
+data string and the signature byte string: it either returns True or raises
+`BadSignatureError`.
+
+```python
+from ecdsa import SigningKey
+sk = SigningKey.generate() # uses NIST192p
+vk = sk.verifying_key
+signature = sk.sign(b"message")
+assert vk.verify(signature, b"message")
+```
+
+Each `SigningKey`/`VerifyingKey` is associated with a specific curve, like
+NIST192p (the default one). Longer curves are more secure, but take longer to
+use, and result in longer keys and signatures.
+
+```python
+from ecdsa import SigningKey, NIST384p
+sk = SigningKey.generate(curve=NIST384p)
+vk = sk.verifying_key
+signature = sk.sign(b"message")
+assert vk.verify(signature, b"message")
+```
+
+The `SigningKey` can be serialized into several different formats: the shortest
+is to call `s=sk.to_string()`, and then re-create it with
+`SigningKey.from_string(s, curve)` . This short form does not record the
+curve, so you must be sure to pass to `from_string()` the same curve you used
+for the original key. The short form of a NIST192p-based signing key is just 24
+bytes long. If a point encoding is invalid or it does not lie on the specified
+curve, `from_string()` will raise `MalformedPointError`.
+
+```python
+from ecdsa import SigningKey, NIST384p
+sk = SigningKey.generate(curve=NIST384p)
+sk_string = sk.to_string()
+sk2 = SigningKey.from_string(sk_string, curve=NIST384p)
+print(sk_string.hex())
+print(sk2.to_string().hex())
+```
+
+Note: while the methods are called `to_string()` the type they return is
+actually `bytes`, the "string" part is leftover from Python 2.
+
+`sk.to_pem()` and `sk.to_der()` will serialize the signing key into the same
+formats that OpenSSL uses. The PEM file looks like the familiar ASCII-armored
+`"-----BEGIN EC PRIVATE KEY-----"` base64-encoded format, and the DER format
+is a shorter binary form of the same data.
+`SigningKey.from_pem()/.from_der()` will undo this serialization. These
+formats include the curve name, so you do not need to pass in a curve
+identifier to the deserializer. In case the file is malformed `from_der()`
+and `from_pem()` will raise `UnexpectedDER` or` MalformedPointError`.
+
+```python
+from ecdsa import SigningKey, NIST384p
+sk = SigningKey.generate(curve=NIST384p)
+sk_pem = sk.to_pem()
+sk2 = SigningKey.from_pem(sk_pem)
+# sk and sk2 are the same key
+```
+
+Likewise, the `VerifyingKey` can be serialized in the same way:
+`vk.to_string()/VerifyingKey.from_string()`, `to_pem()/from_pem()`, and
+`to_der()/from_der()`. The same `curve=` argument is needed for
+`VerifyingKey.from_string()`.
+
+```python
+from ecdsa import SigningKey, VerifyingKey, NIST384p
+sk = SigningKey.generate(curve=NIST384p)
+vk = sk.verifying_key
+vk_string = vk.to_string()
+vk2 = VerifyingKey.from_string(vk_string, curve=NIST384p)
+# vk and vk2 are the same key
+
+from ecdsa import SigningKey, VerifyingKey, NIST384p
+sk = SigningKey.generate(curve=NIST384p)
+vk = sk.verifying_key
+vk_pem = vk.to_pem()
+vk2 = VerifyingKey.from_pem(vk_pem)
+# vk and vk2 are the same key
+```
+
+There are a couple of different ways to compute a signature. Fundamentally,
+ECDSA takes a number that represents the data being signed, and returns a
+pair of numbers that represent the signature. The `hashfunc=` argument to
+`sk.sign()` and `vk.verify()` is used to turn an arbitrary string into
+fixed-length digest, which is then turned into a number that ECDSA can sign,
+and both sign and verify must use the same approach. The default value is
+`hashlib.sha1`, but if you use NIST256p or a longer curve, you can use
+`hashlib.sha256` instead.
+
+There are also multiple ways to represent a signature. The default
+`sk.sign()` and `vk.verify()` methods present it as a short string, for
+simplicity and minimal overhead. To use a different scheme, use the
+`sk.sign(sigencode=)` and `vk.verify(sigdecode=)` arguments. There are helper
+functions in the `ecdsa.util` module that can be useful here.
+
+It is also possible to create a `SigningKey` from a "seed", which is
+deterministic. This can be used in protocols where you want to derive
+consistent signing keys from some other secret, for example when you want
+three separate keys and only want to store a single master secret. You should
+start with a uniformly-distributed unguessable seed with about `curve.baselen`
+bytes of entropy, and then use one of the helper functions in `ecdsa.util` to
+convert it into an integer in the correct range, and then finally pass it
+into `SigningKey.from_secret_exponent()`, like this:
+
+```python
+import os
+from ecdsa import NIST384p, SigningKey
+from ecdsa.util import randrange_from_seed__trytryagain
+
+def make_key(seed):
+ secexp = randrange_from_seed__trytryagain(seed, NIST384p.order)
+ return SigningKey.from_secret_exponent(secexp, curve=NIST384p)
+
+seed = os.urandom(NIST384p.baselen) # or other starting point
+sk1a = make_key(seed)
+sk1b = make_key(seed)
+# note: sk1a and sk1b are the same key
+assert sk1a.to_string() == sk1b.to_string()
+sk2 = make_key(b"2-"+seed) # different key
+assert sk1a.to_string() != sk2.to_string()
+```
+
+In case the application will verify a lot of signatures made with a single
+key, it's possible to precompute some of the internal values to make
+signature verification significantly faster. The break-even point occurs at
+about 100 signatures verified.
+
+To perform precomputation, you can call the `precompute()` method
+on `VerifyingKey` instance:
+```python
+from ecdsa import SigningKey, NIST384p
+sk = SigningKey.generate(curve=NIST384p)
+vk = sk.verifying_key
+vk.precompute()
+signature = sk.sign(b"message")
+assert vk.verify(signature, b"message")
+```
+
+Once `precompute()` was called, all signature verifications with this key will
+be faster to execute.
+
+## OpenSSL Compatibility
+
+To produce signatures that can be verified by OpenSSL tools, or to verify
+signatures that were produced by those tools, use:
+
+```python
+# openssl ecparam -name prime256v1 -genkey -out sk.pem
+# openssl ec -in sk.pem -pubout -out vk.pem
+# echo "data for signing" > data
+# openssl dgst -sha256 -sign sk.pem -out data.sig data
+# openssl dgst -sha256 -verify vk.pem -signature data.sig data
+# openssl dgst -sha256 -prverify sk.pem -signature data.sig data
+
+import hashlib
+from ecdsa import SigningKey, VerifyingKey
+from ecdsa.util import sigencode_der, sigdecode_der
+
+with open("vk.pem") as f:
+ vk = VerifyingKey.from_pem(f.read())
+
+with open("data", "rb") as f:
+ data = f.read()
+
+with open("data.sig", "rb") as f:
+ signature = f.read()
+
+assert vk.verify(signature, data, hashlib.sha256, sigdecode=sigdecode_der)
+
+with open("sk.pem") as f:
+ sk = SigningKey.from_pem(f.read(), hashlib.sha256)
+
+new_signature = sk.sign_deterministic(data, sigencode=sigencode_der)
+
+with open("data.sig2", "wb") as f:
+ f.write(new_signature)
+
+# openssl dgst -sha256 -verify vk.pem -signature data.sig2 data
+```
+
+Note: if compatibility with OpenSSL 1.0.0 or earlier is necessary, the
+`sigencode_string` and `sigdecode_string` from `ecdsa.util` can be used for
+respectively writing and reading the signatures.
+
+The keys also can be written in format that openssl can handle:
+
+```python
+from ecdsa import SigningKey, VerifyingKey
+
+with open("sk.pem") as f:
+ sk = SigningKey.from_pem(f.read())
+with open("sk.pem", "wb") as f:
+ f.write(sk.to_pem())
+
+with open("vk.pem") as f:
+ vk = VerifyingKey.from_pem(f.read())
+with open("vk.pem", "wb") as f:
+ f.write(vk.to_pem())
+```
+
+## Entropy
+
+Creating a signing key with `SigningKey.generate()` requires some form of
+entropy (as opposed to
+`from_secret_exponent`/`from_string`/`from_der`/`from_pem`,
+which are deterministic and do not require an entropy source). The default
+source is `os.urandom()`, but you can pass any other function that behaves
+like `os.urandom` as the `entropy=` argument to do something different. This
+may be useful in unit tests, where you want to achieve repeatable results. The
+`ecdsa.util.PRNG` utility is handy here: it takes a seed and produces a strong
+pseudo-random stream from it:
+
+```python
+from ecdsa.util import PRNG
+from ecdsa import SigningKey
+rng1 = PRNG(b"seed")
+sk1 = SigningKey.generate(entropy=rng1)
+rng2 = PRNG(b"seed")
+sk2 = SigningKey.generate(entropy=rng2)
+# sk1 and sk2 are the same key
+```
+
+Likewise, ECDSA signature generation requires a random number, and each
+signature must use a different one (using the same number twice will
+immediately reveal the private signing key). The `sk.sign()` method takes an
+`entropy=` argument which behaves the same as `SigningKey.generate(entropy=)`.
+
+## Deterministic Signatures
+
+If you call `SigningKey.sign_deterministic(data)` instead of `.sign(data)`,
+the code will generate a deterministic signature instead of a random one.
+This uses the algorithm from RFC6979 to safely generate a unique `k` value,
+derived from the private key and the message being signed. Each time you sign
+the same message with the same key, you will get the same signature (using
+the same `k`).
+
+This may become the default in a future version, as it is not vulnerable to
+failures of the entropy source.
+
+## Examples
+
+Create a NIST192p keypair and immediately save both to disk:
+
+```python
+from ecdsa import SigningKey
+sk = SigningKey.generate()
+vk = sk.verifying_key
+with open("private.pem", "wb") as f:
+ f.write(sk.to_pem())
+with open("public.pem", "wb") as f:
+ f.write(vk.to_pem())
+```
+
+Load a signing key from disk, use it to sign a message (using SHA-1), and write
+the signature to disk:
+
+```python
+from ecdsa import SigningKey
+with open("private.pem") as f:
+ sk = SigningKey.from_pem(f.read())
+with open("message", "rb") as f:
+ message = f.read()
+sig = sk.sign(message)
+with open("signature", "wb") as f:
+ f.write(sig)
+```
+
+Load the verifying key, message, and signature from disk, and verify the
+signature (assume SHA-1 hash):
+
+```python
+from ecdsa import VerifyingKey, BadSignatureError
+vk = VerifyingKey.from_pem(open("public.pem").read())
+with open("message", "rb") as f:
+ message = f.read()
+with open("signature", "rb") as f:
+ sig = f.read()
+try:
+ vk.verify(sig, message)
+ print "good signature"
+except BadSignatureError:
+ print "BAD SIGNATURE"
+```
+
+Create a NIST521p keypair:
+
+```python
+from ecdsa import SigningKey, NIST521p
+sk = SigningKey.generate(curve=NIST521p)
+vk = sk.verifying_key
+```
+
+Create three independent signing keys from a master seed:
+
+```python
+from ecdsa import NIST192p, SigningKey
+from ecdsa.util import randrange_from_seed__trytryagain
+
+def make_key_from_seed(seed, curve=NIST192p):
+ secexp = randrange_from_seed__trytryagain(seed, curve.order)
+ return SigningKey.from_secret_exponent(secexp, curve)
+
+sk1 = make_key_from_seed("1:%s" % seed)
+sk2 = make_key_from_seed("2:%s" % seed)
+sk3 = make_key_from_seed("3:%s" % seed)
+```
+
+Load a verifying key from disk and print it using hex encoding in
+uncompressed and compressed format (defined in X9.62 and SEC1 standards):
+
+```python
+from ecdsa import VerifyingKey
+
+with open("public.pem") as f:
+ vk = VerifyingKey.from_pem(f.read())
+
+print("uncompressed: {0}".format(vk.to_string("uncompressed").hex()))
+print("compressed: {0}".format(vk.to_string("compressed").hex()))
+```
+
+Load a verifying key from a hex string from compressed format, output
+uncompressed:
+
+```python
+from ecdsa import VerifyingKey, NIST256p
+
+comp_str = '022799c0d0ee09772fdd337d4f28dc155581951d07082fb19a38aa396b67e77759'
+vk = VerifyingKey.from_string(bytearray.fromhex(comp_str), curve=NIST256p)
+print(vk.to_string("uncompressed").hex())
+```
+
+ECDH key exchange with remote party
+
+```python
+from ecdsa import ECDH, NIST256p
+
+ecdh = ECDH(curve=NIST256p)
+ecdh.generate_private_key()
+local_public_key = ecdh.get_public_key()
+#send `local_public_key` to remote party and receive `remote_public_key` from remote party
+with open("remote_public_key.pem") as e:
+ remote_public_key = e.read()
+ecdh.load_received_public_key_pem(remote_public_key)
+secret = ecdh.generate_sharedsecret_bytes()
+```
+
+
diff --git a/third_party/python/ecdsa/ecdsa-0.15.dist-info/RECORD b/third_party/python/ecdsa/ecdsa-0.15.dist-info/RECORD
new file mode 100644
index 0000000000..1a0163a7c0
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa-0.15.dist-info/RECORD
@@ -0,0 +1,28 @@
+ecdsa/__init__.py,sha256=3wbqSX9mkjn_sjkbx2vU-MJbKg0uz8DYLAZE5Jk4iyc,1219
+ecdsa/_compat.py,sha256=qmUf5lfl20-p8JleM4etlhplAEN37gbBqadBxXboomo,1108
+ecdsa/_rwlock.py,sha256=UVXDDwWF115oQroaHUtQo88uhhIoMLPIKfDQq3i7ETc,2848
+ecdsa/_version.py,sha256=J5ustrqphtIgbQXJKWGzATMRfq4koBTZ2UYvZuesnRw,496
+ecdsa/curves.py,sha256=Snq0JL6lydJunmSHeeycWvUQJ8Sj5N1tavcw6ZlZ4ik,4278
+ecdsa/der.py,sha256=rfV-KrVw10YAA2EWkVA4vZgbdeEhgsXaXfDd3S5qpp8,13864
+ecdsa/ecdh.py,sha256=qsUDPGMF9-tiqLaA9xUfhNBoUQ49gtMMFrc_O1YO_BQ,10459
+ecdsa/ecdsa.py,sha256=MB7v-2hUV982oOk-OzmKLtq-GXIPjNNK-Yd_dM4VcqU,17546
+ecdsa/ellipticcurve.py,sha256=wa3Om5WkW-HszXlBzyKdGaFfbQDsLABDCSXfrBzSMx0,24278
+ecdsa/keys.py,sha256=jeDeK5-G4C5jYebV0_sQGavRUQp5grNY7CV9eOH7o7I,52990
+ecdsa/numbertheory.py,sha256=FQiMnzY92Qi-Tt2z1czVd5MvaqqXzRgwlChZwPhwxEQ,15427
+ecdsa/rfc6979.py,sha256=7MR1nf19ZBD-EDgztlJ1SfSwLjlx3ePPb9BBFW7aEHo,2701
+ecdsa/test_der.py,sha256=XGZwUhZORvAZKEiWTLDDKlF_4JBplbUmTwkfdN-KGXU,12609
+ecdsa/test_ecdh.py,sha256=VlkuPt7fqwGh1nWwLVA-10Pguu5PYqWVaEOTDO7qlGM,13472
+ecdsa/test_ecdsa.py,sha256=zGC5L5vqc8nWNOKf0KOaUu3rJuLvpICioQ8tSypEjxs,18334
+ecdsa/test_ellipticcurve.py,sha256=odDCqwJm_sQgDFja9xSklpVskpXG5ebJ4xpBONU0duQ,6160
+ecdsa/test_jacobi.py,sha256=iGtWSMLpJ8HmJlrJkU7aiC5d50I8ahHKXFWfd0o_YP4,10778
+ecdsa/test_keys.py,sha256=NcnvEHsHJ0W-5T1F7M2RS9MzdR26ELlTv2LfAgMqEaU,12701
+ecdsa/test_malformed_sigs.py,sha256=6ow1rb-A-lbFD-TZjcl6a8VV9bwV2aL5Z0kwYJ4SJfk,10170
+ecdsa/test_numbertheory.py,sha256=KwC75hI2NfVPctlYki4JIUT8hUUcoK0x1AjcXDZQrow,9004
+ecdsa/test_pyecdsa.py,sha256=FqGtHsqwOpWz3Ne0Cmgib508pcEGv1b31eEBo-PQ5bE,64737
+ecdsa/test_rw_lock.py,sha256=5Gu_H73gU8Pb1_86X3AzkLMTYOtE4qdAwDOzBsEVbjk,6899
+ecdsa/util.py,sha256=CO6Jj3kUL28fIM3KnsevxYQJ1TCAAYDgCSacDAbSMu0,14007
+ecdsa-0.15.dist-info/LICENSE,sha256=PsqYRXc9LluMydjBGdNF8ApIBuS9Zg1KPWzfnA6di7I,1147
+ecdsa-0.15.dist-info/METADATA,sha256=Vipd5pI4sqqaWMjmDzRNRkZCQaq1YDHOHkAJPlI92tw,24899
+ecdsa-0.15.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110
+ecdsa-0.15.dist-info/top_level.txt,sha256=7ovPHfAPyTou19f8gOSbHm6B9dGjTibWolcCB7Zjovs,6
+ecdsa-0.15.dist-info/RECORD,,
diff --git a/third_party/python/ecdsa/ecdsa-0.15.dist-info/WHEEL b/third_party/python/ecdsa/ecdsa-0.15.dist-info/WHEEL
new file mode 100644
index 0000000000..8b701e93c2
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa-0.15.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.6)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/ecdsa/ecdsa-0.15.dist-info/top_level.txt b/third_party/python/ecdsa/ecdsa-0.15.dist-info/top_level.txt
new file mode 100644
index 0000000000..aa5efdb547
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa-0.15.dist-info/top_level.txt
@@ -0,0 +1 @@
+ecdsa
diff --git a/third_party/python/ecdsa/ecdsa/__init__.py b/third_party/python/ecdsa/ecdsa/__init__.py
new file mode 100644
index 0000000000..eef5fe38c4
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/__init__.py
@@ -0,0 +1,25 @@
+from .keys import SigningKey, VerifyingKey, BadSignatureError, BadDigestError,\
+ MalformedPointError
+from .curves import NIST192p, NIST224p, NIST256p, NIST384p, NIST521p,\
+ SECP256k1, BRAINPOOLP160r1, BRAINPOOLP192r1, BRAINPOOLP224r1,\
+ BRAINPOOLP256r1, BRAINPOOLP320r1, BRAINPOOLP384r1, BRAINPOOLP512r1
+from .ecdh import ECDH, NoKeyError, NoCurveError, InvalidCurveError, \
+ InvalidSharedSecretError
+from .der import UnexpectedDER
+
+# This code comes from http://github.com/warner/python-ecdsa
+from ._version import get_versions
+__version__ = get_versions()['version']
+del get_versions
+
+__all__ = ["curves", "der", "ecdsa", "ellipticcurve", "keys", "numbertheory",
+ "test_pyecdsa", "util", "six"]
+
+_hush_pyflakes = [SigningKey, VerifyingKey, BadSignatureError, BadDigestError,
+ MalformedPointError, UnexpectedDER, InvalidCurveError,
+ NoKeyError, InvalidSharedSecretError, ECDH, NoCurveError,
+ NIST192p, NIST224p, NIST256p, NIST384p, NIST521p, SECP256k1,
+ BRAINPOOLP160r1, BRAINPOOLP192r1, BRAINPOOLP224r1,
+ BRAINPOOLP256r1, BRAINPOOLP320r1, BRAINPOOLP384r1,
+ BRAINPOOLP512r1]
+del _hush_pyflakes
diff --git a/third_party/python/ecdsa/ecdsa/_compat.py b/third_party/python/ecdsa/ecdsa/_compat.py
new file mode 100644
index 0000000000..965d8c47b5
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/_compat.py
@@ -0,0 +1,39 @@
+"""
+Common functions for providing cross-python version compatibility.
+"""
+import sys
+from six import integer_types
+
+
+def str_idx_as_int(string, index):
+ """Take index'th byte from string, return as integer"""
+ val = string[index]
+ if isinstance(val, integer_types):
+ return val
+ return ord(val)
+
+
+if sys.version_info < (3, 0):
+ def normalise_bytes(buffer_object):
+ """Cast the input into array of bytes."""
+ # flake8 runs on py3 where `buffer` indeed doesn't exist...
+ return buffer(buffer_object) # noqa: F821
+
+ def hmac_compat(ret):
+ return ret
+
+else:
+ if sys.version_info < (3, 4):
+ # on python 3.3 hmac.hmac.update() accepts only bytes, on newer
+ # versions it does accept memoryview() also
+ def hmac_compat(data):
+ if not isinstance(data, bytes):
+ return bytes(data)
+ return data
+ else:
+ def hmac_compat(data):
+ return data
+
+ def normalise_bytes(buffer_object):
+ """Cast the input into array of bytes."""
+ return memoryview(buffer_object).cast('B')
diff --git a/third_party/python/ecdsa/ecdsa/_rwlock.py b/third_party/python/ecdsa/ecdsa/_rwlock.py
new file mode 100644
index 0000000000..e4ef78dcfc
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/_rwlock.py
@@ -0,0 +1,85 @@
+# Copyright Mateusz Kobos, (c) 2011
+# https://code.activestate.com/recipes/577803-reader-writer-lock-with-priority-for-writers/
+# released under the MIT licence
+
+import threading
+
+
+__author__ = "Mateusz Kobos"
+
+
+class RWLock:
+ """
+ Read-Write locking primitive
+
+ Synchronization object used in a solution of so-called second
+ readers-writers problem. In this problem, many readers can simultaneously
+ access a share, and a writer has an exclusive access to this share.
+ Additionally, the following constraints should be met:
+ 1) no reader should be kept waiting if the share is currently opened for
+ reading unless a writer is also waiting for the share,
+ 2) no writer should be kept waiting for the share longer than absolutely
+ necessary.
+
+ The implementation is based on [1, secs. 4.2.2, 4.2.6, 4.2.7]
+ with a modification -- adding an additional lock (C{self.__readers_queue})
+ -- in accordance with [2].
+
+ Sources:
+ [1] A.B. Downey: "The little book of semaphores", Version 2.1.5, 2008
+ [2] P.J. Courtois, F. Heymans, D.L. Parnas:
+ "Concurrent Control with 'Readers' and 'Writers'",
+ Communications of the ACM, 1971 (via [3])
+ [3] http://en.wikipedia.org/wiki/Readers-writers_problem
+ """
+
+ def __init__(self):
+ """
+ A lock giving an even higher priority to the writer in certain
+ cases (see [2] for a discussion).
+ """
+ self.__read_switch = _LightSwitch()
+ self.__write_switch = _LightSwitch()
+ self.__no_readers = threading.Lock()
+ self.__no_writers = threading.Lock()
+ self.__readers_queue = threading.Lock()
+
+ def reader_acquire(self):
+ self.__readers_queue.acquire()
+ self.__no_readers.acquire()
+ self.__read_switch.acquire(self.__no_writers)
+ self.__no_readers.release()
+ self.__readers_queue.release()
+
+ def reader_release(self):
+ self.__read_switch.release(self.__no_writers)
+
+ def writer_acquire(self):
+ self.__write_switch.acquire(self.__no_readers)
+ self.__no_writers.acquire()
+
+ def writer_release(self):
+ self.__no_writers.release()
+ self.__write_switch.release(self.__no_readers)
+
+
+class _LightSwitch:
+ """An auxiliary "light switch"-like object. The first thread turns on the
+ "switch", the last one turns it off (see [1, sec. 4.2.2] for details)."""
+ def __init__(self):
+ self.__counter = 0
+ self.__mutex = threading.Lock()
+
+ def acquire(self, lock):
+ self.__mutex.acquire()
+ self.__counter += 1
+ if self.__counter == 1:
+ lock.acquire()
+ self.__mutex.release()
+
+ def release(self, lock):
+ self.__mutex.acquire()
+ self.__counter -= 1
+ if self.__counter == 0:
+ lock.release()
+ self.__mutex.release()
diff --git a/third_party/python/ecdsa/ecdsa/_version.py b/third_party/python/ecdsa/ecdsa/_version.py
new file mode 100644
index 0000000000..038d62af2c
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/_version.py
@@ -0,0 +1,21 @@
+
+# This file was generated by 'versioneer.py' (0.17) from
+# revision-control system data, or from the parent directory name of an
+# unpacked source archive. Distribution tarballs contain a pre-generated copy
+# of this file.
+
+import json
+
+version_json = '''
+{
+ "date": "2020-01-02T17:05:04+0100",
+ "dirty": false,
+ "error": null,
+ "full-revisionid": "93b04ba3ddb7c2716e07761393a179c061718c34",
+ "version": "0.15"
+}
+''' # END VERSION_JSON
+
+
+def get_versions():
+ return json.loads(version_json)
diff --git a/third_party/python/ecdsa/ecdsa/curves.py b/third_party/python/ecdsa/ecdsa/curves.py
new file mode 100644
index 0000000000..173a2cda88
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/curves.py
@@ -0,0 +1,128 @@
+from __future__ import division
+
+from . import der, ecdsa
+from .util import orderlen
+
+
+# orderlen was defined in this module previously, so keep it in __all__,
+# will need to mark it as deprecated later
+__all__ = ["UnknownCurveError", "orderlen", "Curve", "NIST192p",
+ "NIST224p", "NIST256p", "NIST384p", "NIST521p", "curves",
+ "find_curve", "SECP256k1", "BRAINPOOLP160r1", "BRAINPOOLP192r1",
+ "BRAINPOOLP224r1", "BRAINPOOLP256r1", "BRAINPOOLP320r1",
+ "BRAINPOOLP384r1", "BRAINPOOLP512r1"]
+
+
+class UnknownCurveError(Exception):
+ pass
+
+
+class Curve:
+ def __init__(self, name, curve, generator, oid, openssl_name=None):
+ self.name = name
+ self.openssl_name = openssl_name # maybe None
+ self.curve = curve
+ self.generator = generator
+ self.order = generator.order()
+ self.baselen = orderlen(self.order)
+ self.verifying_key_length = 2*self.baselen
+ self.signature_length = 2*self.baselen
+ self.oid = oid
+ self.encoded_oid = der.encode_oid(*oid)
+
+ def __repr__(self):
+ return self.name
+
+
+# the NIST curves
+NIST192p = Curve("NIST192p", ecdsa.curve_192,
+ ecdsa.generator_192,
+ (1, 2, 840, 10045, 3, 1, 1), "prime192v1")
+
+
+NIST224p = Curve("NIST224p", ecdsa.curve_224,
+ ecdsa.generator_224,
+ (1, 3, 132, 0, 33), "secp224r1")
+
+
+NIST256p = Curve("NIST256p", ecdsa.curve_256,
+ ecdsa.generator_256,
+ (1, 2, 840, 10045, 3, 1, 7), "prime256v1")
+
+
+NIST384p = Curve("NIST384p", ecdsa.curve_384,
+ ecdsa.generator_384,
+ (1, 3, 132, 0, 34), "secp384r1")
+
+
+NIST521p = Curve("NIST521p", ecdsa.curve_521,
+ ecdsa.generator_521,
+ (1, 3, 132, 0, 35), "secp521r1")
+
+
+SECP256k1 = Curve("SECP256k1", ecdsa.curve_secp256k1,
+ ecdsa.generator_secp256k1,
+ (1, 3, 132, 0, 10), "secp256k1")
+
+
+BRAINPOOLP160r1 = Curve("BRAINPOOLP160r1",
+ ecdsa.curve_brainpoolp160r1,
+ ecdsa.generator_brainpoolp160r1,
+ (1, 3, 36, 3, 3, 2, 8, 1, 1, 1),
+ "brainpoolP160r1")
+
+
+BRAINPOOLP192r1 = Curve("BRAINPOOLP192r1",
+ ecdsa.curve_brainpoolp192r1,
+ ecdsa.generator_brainpoolp192r1,
+ (1, 3, 36, 3, 3, 2, 8, 1, 1, 3),
+ "brainpoolP192r1")
+
+
+BRAINPOOLP224r1 = Curve("BRAINPOOLP224r1",
+ ecdsa.curve_brainpoolp224r1,
+ ecdsa.generator_brainpoolp224r1,
+ (1, 3, 36, 3, 3, 2, 8, 1, 1, 5),
+ "brainpoolP224r1")
+
+
+BRAINPOOLP256r1 = Curve("BRAINPOOLP256r1",
+ ecdsa.curve_brainpoolp256r1,
+ ecdsa.generator_brainpoolp256r1,
+ (1, 3, 36, 3, 3, 2, 8, 1, 1, 7),
+ "brainpoolP256r1")
+
+
+BRAINPOOLP320r1 = Curve("BRAINPOOLP320r1",
+ ecdsa.curve_brainpoolp320r1,
+ ecdsa.generator_brainpoolp320r1,
+ (1, 3, 36, 3, 3, 2, 8, 1, 1, 9),
+ "brainpoolP320r1")
+
+
+BRAINPOOLP384r1 = Curve("BRAINPOOLP384r1",
+ ecdsa.curve_brainpoolp384r1,
+ ecdsa.generator_brainpoolp384r1,
+ (1, 3, 36, 3, 3, 2, 8, 1, 1, 11),
+ "brainpoolP384r1")
+
+
+BRAINPOOLP512r1 = Curve("BRAINPOOLP512r1",
+ ecdsa.curve_brainpoolp512r1,
+ ecdsa.generator_brainpoolp512r1,
+ (1, 3, 36, 3, 3, 2, 8, 1, 1, 13),
+ "brainpoolP512r1")
+
+
+curves = [NIST192p, NIST224p, NIST256p, NIST384p, NIST521p, SECP256k1,
+ BRAINPOOLP160r1, BRAINPOOLP192r1, BRAINPOOLP224r1, BRAINPOOLP256r1,
+ BRAINPOOLP320r1, BRAINPOOLP384r1, BRAINPOOLP512r1]
+
+
+def find_curve(oid_curve):
+ for c in curves:
+ if c.oid == oid_curve:
+ return c
+ raise UnknownCurveError("I don't know about the curve with oid %s."
+ "I only know about these: %s" %
+ (oid_curve, [c.name for c in curves]))
diff --git a/third_party/python/ecdsa/ecdsa/der.py b/third_party/python/ecdsa/ecdsa/der.py
new file mode 100644
index 0000000000..ad75b37b56
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/der.py
@@ -0,0 +1,384 @@
+from __future__ import division
+
+import binascii
+import base64
+import warnings
+from itertools import chain
+from six import int2byte, b, text_type
+from ._compat import str_idx_as_int
+
+
+class UnexpectedDER(Exception):
+ pass
+
+
+def encode_constructed(tag, value):
+ return int2byte(0xa0+tag) + encode_length(len(value)) + value
+
+
+def encode_integer(r):
+ assert r >= 0 # can't support negative numbers yet
+ h = ("%x" % r).encode()
+ if len(h) % 2:
+ h = b("0") + h
+ s = binascii.unhexlify(h)
+ num = str_idx_as_int(s, 0)
+ if num <= 0x7f:
+ return b("\x02") + encode_length(len(s)) + s
+ else:
+ # DER integers are two's complement, so if the first byte is
+ # 0x80-0xff then we need an extra 0x00 byte to prevent it from
+ # looking negative.
+ return b("\x02") + encode_length(len(s)+1) + b("\x00") + s
+
+
+# sentry object to check if an argument was specified (used to detect
+# deprecated calling convention)
+_sentry = object()
+
+
+def encode_bitstring(s, unused=_sentry):
+ """
+ Encode a binary string as a BIT STRING using :term:`DER` encoding.
+
+ Note, because there is no native Python object that can encode an actual
+ bit string, this function only accepts byte strings as the `s` argument.
+ The byte string is the actual bit string that will be encoded, padded
+ on the right (least significant bits, looking from big endian perspective)
+ to the first full byte. If the bit string has a bit length that is multiple
+ of 8, then the padding should not be included. For correct DER encoding
+ the padding bits MUST be set to 0.
+
+ Number of bits of padding need to be provided as the `unused` parameter.
+ In case they are specified as None, it means the number of unused bits
+ is already encoded in the string as the first byte.
+
+ The deprecated call convention specifies just the `s` parameters and
+ encodes the number of unused bits as first parameter (same convention
+ as with None).
+
+ Empty string must be encoded with `unused` specified as 0.
+
+ Future version of python-ecdsa will make specifying the `unused` argument
+ mandatory.
+
+ :param s: bytes to encode
+ :type s: bytes like object
+ :param unused: number of bits at the end of `s` that are unused, must be
+ between 0 and 7 (inclusive)
+ :type unused: int or None
+
+ :raises ValueError: when `unused` is too large or too small
+
+ :return: `s` encoded using DER
+ :rtype: bytes
+ """
+ encoded_unused = b''
+ len_extra = 0
+ if unused is _sentry:
+ warnings.warn("Legacy call convention used, unused= needs to be "
+ "specified",
+ DeprecationWarning)
+ elif unused is not None:
+ if not 0 <= unused <= 7:
+ raise ValueError("unused must be integer between 0 and 7")
+ if unused:
+ if not s:
+ raise ValueError("unused is non-zero but s is empty")
+ last = str_idx_as_int(s, -1)
+ if last & (2 ** unused - 1):
+ raise ValueError("unused bits must be zeros in DER")
+ encoded_unused = int2byte(unused)
+ len_extra = 1
+ return b("\x03") + encode_length(len(s) + len_extra) + encoded_unused + s
+
+
+def encode_octet_string(s):
+ return b("\x04") + encode_length(len(s)) + s
+
+
+def encode_oid(first, second, *pieces):
+ assert 0 <= first < 2 and 0 <= second <= 39 or first == 2 and 0 <= second
+ body = b''.join(chain([encode_number(40*first+second)],
+ (encode_number(p) for p in pieces)))
+ return b'\x06' + encode_length(len(body)) + body
+
+
+def encode_sequence(*encoded_pieces):
+ total_len = sum([len(p) for p in encoded_pieces])
+ return b('\x30') + encode_length(total_len) + b('').join(encoded_pieces)
+
+
+def encode_number(n):
+ b128_digits = []
+ while n:
+ b128_digits.insert(0, (n & 0x7f) | 0x80)
+ n = n >> 7
+ if not b128_digits:
+ b128_digits.append(0)
+ b128_digits[-1] &= 0x7f
+ return b('').join([int2byte(d) for d in b128_digits])
+
+
+def remove_constructed(string):
+ s0 = str_idx_as_int(string, 0)
+ if (s0 & 0xe0) != 0xa0:
+ raise UnexpectedDER("wanted type 'constructed tag' (0xa0-0xbf), "
+ "got 0x%02x" % s0)
+ tag = s0 & 0x1f
+ length, llen = read_length(string[1:])
+ body = string[1+llen:1+llen+length]
+ rest = string[1+llen+length:]
+ return tag, body, rest
+
+
+def remove_sequence(string):
+ if not string:
+ raise UnexpectedDER("Empty string does not encode a sequence")
+ if string[:1] != b"\x30":
+ n = str_idx_as_int(string, 0)
+ raise UnexpectedDER("wanted type 'sequence' (0x30), got 0x%02x" % n)
+ length, lengthlength = read_length(string[1:])
+ if length > len(string) - 1 - lengthlength:
+ raise UnexpectedDER("Length longer than the provided buffer")
+ endseq = 1+lengthlength+length
+ return string[1+lengthlength:endseq], string[endseq:]
+
+
+def remove_octet_string(string):
+ if string[:1] != b"\x04":
+ n = str_idx_as_int(string, 0)
+ raise UnexpectedDER("wanted type 'octetstring' (0x04), got 0x%02x" % n)
+ length, llen = read_length(string[1:])
+ body = string[1+llen:1+llen+length]
+ rest = string[1+llen+length:]
+ return body, rest
+
+
+def remove_object(string):
+ if not string:
+ raise UnexpectedDER(
+ "Empty string does not encode an object identifier")
+ if string[:1] != b"\x06":
+ n = str_idx_as_int(string, 0)
+ raise UnexpectedDER("wanted type 'object' (0x06), got 0x%02x" % n)
+ length, lengthlength = read_length(string[1:])
+ body = string[1+lengthlength:1+lengthlength+length]
+ rest = string[1+lengthlength+length:]
+ if not body:
+ raise UnexpectedDER("Empty object identifier")
+ if len(body) != length:
+ raise UnexpectedDER(
+ "Length of object identifier longer than the provided buffer")
+ numbers = []
+ while body:
+ n, ll = read_number(body)
+ numbers.append(n)
+ body = body[ll:]
+ n0 = numbers.pop(0)
+ if n0 < 80:
+ first = n0 // 40
+ else:
+ first = 2
+ second = n0 - (40 * first)
+ numbers.insert(0, first)
+ numbers.insert(1, second)
+ return tuple(numbers), rest
+
+
+def remove_integer(string):
+ if not string:
+ raise UnexpectedDER("Empty string is an invalid encoding of an "
+ "integer")
+ if string[:1] != b"\x02":
+ n = str_idx_as_int(string, 0)
+ raise UnexpectedDER("wanted type 'integer' (0x02), got 0x%02x" % n)
+ length, llen = read_length(string[1:])
+ if length > len(string) - 1 - llen:
+ raise UnexpectedDER("Length longer than provided buffer")
+ if length == 0:
+ raise UnexpectedDER("0-byte long encoding of integer")
+ numberbytes = string[1+llen:1+llen+length]
+ rest = string[1+llen+length:]
+ msb = str_idx_as_int(numberbytes, 0)
+ if not msb < 0x80:
+ raise UnexpectedDER("Negative integers are not supported")
+ # check if the encoding is the minimal one (DER requirement)
+ if length > 1 and not msb:
+ # leading zero byte is allowed if the integer would have been
+ # considered a negative number otherwise
+ smsb = str_idx_as_int(numberbytes, 1)
+ if smsb < 0x80:
+ raise UnexpectedDER("Invalid encoding of integer, unnecessary "
+ "zero padding bytes")
+ return int(binascii.hexlify(numberbytes), 16), rest
+
+
+def read_number(string):
+ number = 0
+ llen = 0
+ if str_idx_as_int(string, 0) == 0x80:
+ raise UnexpectedDER("Non minimal encoding of OID subidentifier")
+ # base-128 big endian, with most significant bit set in all but the last
+ # byte
+ while True:
+ if llen >= len(string):
+ raise UnexpectedDER("ran out of length bytes")
+ number = number << 7
+ d = str_idx_as_int(string, llen)
+ number += (d & 0x7f)
+ llen += 1
+ if not d & 0x80:
+ break
+ return number, llen
+
+
+def encode_length(l):
+ assert l >= 0
+ if l < 0x80:
+ return int2byte(l)
+ s = ("%x" % l).encode()
+ if len(s) % 2:
+ s = b("0") + s
+ s = binascii.unhexlify(s)
+ llen = len(s)
+ return int2byte(0x80 | llen) + s
+
+
+def read_length(string):
+ if not string:
+ raise UnexpectedDER("Empty string can't encode valid length value")
+ num = str_idx_as_int(string, 0)
+ if not (num & 0x80):
+ # short form
+ return (num & 0x7f), 1
+ # else long-form: b0&0x7f is number of additional base256 length bytes,
+ # big-endian
+ llen = num & 0x7f
+ if not llen:
+ raise UnexpectedDER("Invalid length encoding, length of length is 0")
+ if llen > len(string)-1:
+ raise UnexpectedDER("Length of length longer than provided buffer")
+ # verify that the encoding is minimal possible (DER requirement)
+ msb = str_idx_as_int(string, 1)
+ if not msb or llen == 1 and msb < 0x80:
+ raise UnexpectedDER("Not minimal encoding of length")
+ return int(binascii.hexlify(string[1:1+llen]), 16), 1+llen
+
+
+def remove_bitstring(string, expect_unused=_sentry):
+ """
+ Remove a BIT STRING object from `string` following :term:`DER`.
+
+ The `expect_unused` can be used to specify if the bit string should
+ have the amount of unused bits decoded or not. If it's an integer, any
+ read BIT STRING that has number of unused bits different from specified
+ value will cause UnexpectedDER exception to be raised (this is especially
+ useful when decoding BIT STRINGS that have DER encoded object in them;
+ DER encoding is byte oriented, so the unused bits will always equal 0).
+
+ If the `expect_unused` is specified as None, the first element returned
+ will be a tuple, with the first value being the extracted bit string
+ while the second value will be the decoded number of unused bits.
+
+ If the `expect_unused` is unspecified, the decoding of byte with
+ number of unused bits will not be attempted and the bit string will be
+ returned as-is, the callee will be required to decode it and verify its
+ correctness.
+
+ Future version of python will require the `expected_unused` parameter
+ to be specified.
+
+ :param string: string of bytes to extract the BIT STRING from
+ :type string: bytes like object
+ :param expect_unused: number of bits that should be unused in the BIT
+ STRING, or None, to return it to caller
+ :type expect_unused: int or None
+
+ :raises UnexpectedDER: when the encoding does not follow DER.
+
+ :return: a tuple with first element being the extracted bit string and
+ the second being the remaining bytes in the string (if any); if the
+ `expect_unused` is specified as None, the first element of the returned
+ tuple will be a tuple itself, with first element being the bit string
+ as bytes and the second element being the number of unused bits at the
+ end of the byte array as an integer
+ :rtype: tuple
+ """
+ if not string:
+ raise UnexpectedDER("Empty string does not encode a bitstring")
+ if expect_unused is _sentry:
+ warnings.warn("Legacy call convention used, expect_unused= needs to be"
+ " specified",
+ DeprecationWarning)
+ num = str_idx_as_int(string, 0)
+ if string[:1] != b"\x03":
+ raise UnexpectedDER("wanted bitstring (0x03), got 0x%02x" % num)
+ length, llen = read_length(string[1:])
+ if not length:
+ raise UnexpectedDER("Invalid length of bit string, can't be 0")
+ body = string[1+llen:1+llen+length]
+ rest = string[1+llen+length:]
+ if expect_unused is not _sentry:
+ unused = str_idx_as_int(body, 0)
+ if not 0 <= unused <= 7:
+ raise UnexpectedDER("Invalid encoding of unused bits")
+ if expect_unused is not None and expect_unused != unused:
+ raise UnexpectedDER("Unexpected number of unused bits")
+ body = body[1:]
+ if unused:
+ if not body:
+ raise UnexpectedDER("Invalid encoding of empty bit string")
+ last = str_idx_as_int(body, -1)
+ # verify that all the unused bits are set to zero (DER requirement)
+ if last & (2 ** unused - 1):
+ raise UnexpectedDER("Non zero padding bits in bit string")
+ if expect_unused is None:
+ body = (body, unused)
+ return body, rest
+
+# SEQUENCE([1, STRING(secexp), cont[0], OBJECT(curvename), cont[1], BINTSTRING)
+
+
+# signatures: (from RFC3279)
+# ansi-X9-62 OBJECT IDENTIFIER ::= {
+# iso(1) member-body(2) us(840) 10045 }
+#
+# id-ecSigType OBJECT IDENTIFIER ::= {
+# ansi-X9-62 signatures(4) }
+# ecdsa-with-SHA1 OBJECT IDENTIFIER ::= {
+# id-ecSigType 1 }
+## so 1,2,840,10045,4,1
+## so 0x42, .. ..
+
+# Ecdsa-Sig-Value ::= SEQUENCE {
+# r INTEGER,
+# s INTEGER }
+
+# id-public-key-type OBJECT IDENTIFIER ::= { ansi-X9.62 2 }
+#
+# id-ecPublicKey OBJECT IDENTIFIER ::= { id-publicKeyType 1 }
+
+# I think the secp224r1 identifier is (t=06,l=05,v=2b81040021)
+# secp224r1 OBJECT IDENTIFIER ::= {
+# iso(1) identified-organization(3) certicom(132) curve(0) 33 }
+# and the secp384r1 is (t=06,l=05,v=2b81040022)
+# secp384r1 OBJECT IDENTIFIER ::= {
+# iso(1) identified-organization(3) certicom(132) curve(0) 34 }
+
+def unpem(pem):
+ if isinstance(pem, text_type):
+ pem = pem.encode()
+
+ d = b("").join([l.strip() for l in pem.split(b("\n"))
+ if l and not l.startswith(b("-----"))])
+ return base64.b64decode(d)
+
+
+def topem(der, name):
+ b64 = base64.b64encode(der)
+ lines = [("-----BEGIN %s-----\n" % name).encode()]
+ lines.extend([b64[start:start+64]+b("\n")
+ for start in range(0, len(b64), 64)])
+ lines.append(("-----END %s-----\n" % name).encode())
+ return b("").join(lines)
diff --git a/third_party/python/ecdsa/ecdsa/ecdh.py b/third_party/python/ecdsa/ecdsa/ecdh.py
new file mode 100644
index 0000000000..88848f5503
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/ecdh.py
@@ -0,0 +1,306 @@
+"""
+Class for performing Elliptic-curve Diffie-Hellman (ECDH) operations.
+"""
+
+from .util import number_to_string
+from .ellipticcurve import INFINITY
+from .keys import SigningKey, VerifyingKey
+
+
+__all__ = ["ECDH", "NoKeyError", "NoCurveError", "InvalidCurveError",
+ "InvalidSharedSecretError"]
+
+
+class NoKeyError(Exception):
+ """ECDH. Key not found but it is needed for operation."""
+
+ pass
+
+
+class NoCurveError(Exception):
+ """ECDH. Curve not set but it is needed for operation."""
+
+ pass
+
+
+class InvalidCurveError(Exception):
+ """ECDH. Raised in case the public and private keys use different curves."""
+
+ pass
+
+
+class InvalidSharedSecretError(Exception):
+ """ECDH. Raised in case the shared secret we obtained is an INFINITY."""
+
+ pass
+
+
+class ECDH(object):
+ """
+ Elliptic-curve Diffie-Hellman (ECDH). A key agreement protocol.
+
+ Allows two parties, each having an elliptic-curve public-private key
+ pair, to establish a shared secret over an insecure channel
+ """""
+
+ def __init__(self, curve=None, private_key=None, public_key=None):
+ """
+ ECDH init.
+
+ Call can be initialised without parameters, then the first operation
+ (loading either key) will set the used curve.
+ All parameters must be ultimately set before shared secret
+ calculation will be allowed.
+
+ :param curve: curve for operations
+ :type curve: Curve
+ :param private_key: `my` private key for ECDH
+ :type private_key: SigningKey
+ :param public_key: `their` public key for ECDH
+ :type public_key: VerifyingKey
+ """
+ self.curve = curve
+ self.private_key = None
+ self.public_key = None
+ if private_key:
+ self.load_private_key(private_key)
+ if public_key:
+ self.load_received_public_key(public_key)
+
+ def _get_shared_secret(self, remote_public_key):
+ if not self.private_key:
+ raise NoKeyError(
+ "Private key needs to be set to create shared secret")
+ if not self.public_key:
+ raise NoKeyError(
+ "Public key needs to be set to create shared secret")
+ if not (self.private_key.curve == self.curve == remote_public_key.curve):
+ raise InvalidCurveError(
+ "Curves for public key and private key is not equal.")
+
+ # shared secret = PUBKEYtheirs * PRIVATEKEYours
+ result = remote_public_key.pubkey.point * self.private_key.privkey.secret_multiplier
+ if result == INFINITY:
+ raise InvalidSharedSecretError(
+ "Invalid shared secret (INFINITY).")
+
+ return result.x()
+
+ def set_curve(self, key_curve):
+ """
+ Set the working curve for ecdh operations.
+
+ :param key_curve: curve from `curves` module
+ :type key_curve: Curve
+ """
+ self.curve = key_curve
+
+ def generate_private_key(self):
+ """
+ Generate local private key for ecdh operation with curve that was set.
+
+ :raises NoCurveError: Curve must be set before key generation.
+
+ :return: public (verifying) key from this private key.
+ :rtype: VerifyingKey object
+ """
+ if not self.curve:
+ raise NoCurveError("Curve must be set prior to key generation.")
+ return self.load_private_key(SigningKey.generate(curve=self.curve))
+
+ def load_private_key(self, private_key):
+ """
+ Load private key from SigningKey (keys.py) object.
+
+ Needs to have the same curve as was set with set_curve method.
+ If curve is not set - it sets from this SigningKey
+
+ :param private_key: Initialised SigningKey class
+ :type private_key: SigningKey
+
+ :raises InvalidCurveError: private_key curve not the same as self.curve
+
+ :return: public (verifying) key from this private key.
+ :rtype: VerifyingKey object
+ """
+ if not self.curve:
+ self.curve = private_key.curve
+ if self.curve != private_key.curve:
+ raise InvalidCurveError("Curve mismatch.")
+ self.private_key = private_key
+ return self.private_key.get_verifying_key()
+
+ def load_private_key_bytes(self, private_key):
+ """
+ Load private key from byte string.
+
+ Uses current curve and checks if the provided key matches
+ the curve of ECDH key agreement.
+ Key loads via from_string method of SigningKey class
+
+ :param private_key: private key in bytes string format
+ :type private_key: :term:`bytes-like object`
+
+ :raises NoCurveError: Curve must be set before loading.
+
+ :return: public (verifying) key from this private key.
+ :rtype: VerifyingKey object
+ """
+ if not self.curve:
+ raise NoCurveError("Curve must be set prior to key load.")
+ return self.load_private_key(
+ SigningKey.from_string(private_key, curve=self.curve))
+
+ def load_private_key_der(self, private_key_der):
+ """
+ Load private key from DER byte string.
+
+ Compares the curve of the DER-encoded key with the ECDH set curve,
+ uses the former if unset.
+
+ Note, the only DER format supported is the RFC5915
+ Look at keys.py:SigningKey.from_der()
+
+ :param private_key_der: string with the DER encoding of private ECDSA key
+ :type private_key_der: string
+
+ :raises InvalidCurveError: private_key curve not the same as self.curve
+
+ :return: public (verifying) key from this private key.
+ :rtype: VerifyingKey object
+ """
+ return self.load_private_key(SigningKey.from_der(private_key_der))
+
+ def load_private_key_pem(self, private_key_pem):
+ """
+ Load private key from PEM string.
+
+ Compares the curve of the DER-encoded key with the ECDH set curve,
+ uses the former if unset.
+
+ Note, the only PEM format supported is the RFC5915
+ Look at keys.py:SigningKey.from_pem()
+ it needs to have `EC PRIVATE KEY` section
+
+ :param private_key_pem: string with PEM-encoded private ECDSA key
+ :type private_key_pem: string
+
+ :raises InvalidCurveError: private_key curve not the same as self.curve
+
+ :return: public (verifying) key from this private key.
+ :rtype: VerifyingKey object
+ """
+ return self.load_private_key(SigningKey.from_pem(private_key_pem))
+
+ def get_public_key(self):
+ """
+ Provides a public key that matches the local private key.
+
+ Needs to be sent to the remote party.
+
+ :return: public (verifying) key from local private key.
+ :rtype: VerifyingKey object
+ """
+ return self.private_key.get_verifying_key()
+
+ def load_received_public_key(self, public_key):
+ """
+ Load public key from VerifyingKey (keys.py) object.
+
+ Needs to have the same curve as set as current for ecdh operation.
+ If curve is not set - it sets it from VerifyingKey.
+
+ :param public_key: Initialised VerifyingKey class
+ :type public_key: VerifyingKey
+
+ :raises InvalidCurveError: public_key curve not the same as self.curve
+ """
+ if not self.curve:
+ self.curve = public_key.curve
+ if self.curve != public_key.curve:
+ raise InvalidCurveError("Curve mismatch.")
+ self.public_key = public_key
+
+ def load_received_public_key_bytes(self, public_key_str):
+ """
+ Load public key from byte string.
+
+ Uses current curve and checks if key length corresponds to
+ the current curve.
+ Key loads via from_string method of VerifyingKey class
+
+ :param public_key_str: public key in bytes string format
+ :type public_key_str: :term:`bytes-like object`
+ """
+ return self.load_received_public_key(
+ VerifyingKey.from_string(public_key_str, self.curve))
+
+ def load_received_public_key_der(self, public_key_der):
+ """
+ Load public key from DER byte string.
+
+ Compares the curve of the DER-encoded key with the ECDH set curve,
+ uses the former if unset.
+
+ Note, the only DER format supported is the RFC5912
+ Look at keys.py:VerifyingKey.from_der()
+
+ :param public_key_der: string with the DER encoding of public ECDSA key
+ :type public_key_der: string
+
+ :raises InvalidCurveError: public_key curve not the same as self.curve
+ """
+ return self.load_received_public_key(VerifyingKey.from_der(public_key_der))
+
+ def load_received_public_key_pem(self, public_key_pem):
+ """
+ Load public key from PEM string.
+
+ Compares the curve of the PEM-encoded key with the ECDH set curve,
+ uses the former if unset.
+
+ Note, the only PEM format supported is the RFC5912
+ Look at keys.py:VerifyingKey.from_pem()
+
+ :param public_key_pem: string with PEM-encoded public ECDSA key
+ :type public_key_pem: string
+
+ :raises InvalidCurveError: public_key curve not the same as self.curve
+ """
+ return self.load_received_public_key(VerifyingKey.from_pem(public_key_pem))
+
+ def generate_sharedsecret_bytes(self):
+ """
+ Generate shared secret from local private key and remote public key.
+
+ The objects needs to have both private key and received public key
+ before generation is allowed.
+
+ :raises InvalidCurveError: public_key curve not the same as self.curve
+ :raises NoKeyError: public_key or private_key is not set
+
+ :return: shared secret
+ :rtype: byte string
+ """
+ return number_to_string(
+ self.generate_sharedsecret(),
+ self.private_key.curve.order)
+
+ def generate_sharedsecret(self):
+ """
+ Generate shared secret from local private key and remote public key.
+
+ The objects needs to have both private key and received public key
+ before generation is allowed.
+
+ It's the same for local and remote party.
+ shared secret(local private key, remote public key ) ==
+ shared secret (local public key, remote private key)
+
+ :raises InvalidCurveError: public_key curve not the same as self.curve
+ :raises NoKeyError: public_key or private_key is not set
+
+ :return: shared secret
+ :rtype: int
+ """
+ return self._get_shared_secret(self.public_key)
diff --git a/third_party/python/ecdsa/ecdsa/ecdsa.py b/third_party/python/ecdsa/ecdsa/ecdsa.py
new file mode 100644
index 0000000000..4e9bab0898
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/ecdsa.py
@@ -0,0 +1,446 @@
+#! /usr/bin/env python
+
+"""
+Implementation of Elliptic-Curve Digital Signatures.
+
+Classes and methods for elliptic-curve signatures:
+private keys, public keys, signatures,
+NIST prime-modulus curves with modulus lengths of
+192, 224, 256, 384, and 521 bits.
+
+Example:
+
+ # (In real-life applications, you would probably want to
+ # protect against defects in SystemRandom.)
+ from random import SystemRandom
+ randrange = SystemRandom().randrange
+
+ # Generate a public/private key pair using the NIST Curve P-192:
+
+ g = generator_192
+ n = g.order()
+ secret = randrange( 1, n )
+ pubkey = Public_key( g, g * secret )
+ privkey = Private_key( pubkey, secret )
+
+ # Signing a hash value:
+
+ hash = randrange( 1, n )
+ signature = privkey.sign( hash, randrange( 1, n ) )
+
+ # Verifying a signature for a hash value:
+
+ if pubkey.verifies( hash, signature ):
+ print_("Demo verification succeeded.")
+ else:
+ print_("*** Demo verification failed.")
+
+ # Verification fails if the hash value is modified:
+
+ if pubkey.verifies( hash-1, signature ):
+ print_("**** Demo verification failed to reject tampered hash.")
+ else:
+ print_("Demo verification correctly rejected tampered hash.")
+
+Version of 2009.05.16.
+
+Revision history:
+ 2005.12.31 - Initial version.
+ 2008.11.25 - Substantial revisions introducing new classes.
+ 2009.05.16 - Warn against using random.randrange in real applications.
+ 2009.05.17 - Use random.SystemRandom by default.
+
+Written in 2005 by Peter Pearson and placed in the public domain.
+"""
+
+from six import int2byte, b
+from . import ellipticcurve
+from . import numbertheory
+from .util import bit_length
+
+
+class RSZeroError(RuntimeError):
+ pass
+
+
+class InvalidPointError(RuntimeError):
+ pass
+
+
+class Signature(object):
+ """ECDSA signature.
+ """
+ def __init__(self, r, s):
+ self.r = r
+ self.s = s
+
+ def recover_public_keys(self, hash, generator):
+ """Returns two public keys for which the signature is valid
+ hash is signed hash
+ generator is the used generator of the signature
+ """
+ curve = generator.curve()
+ n = generator.order()
+ r = self.r
+ s = self.s
+ e = hash
+ x = r
+
+ # Compute the curve point with x as x-coordinate
+ alpha = (pow(x, 3, curve.p()) + (curve.a() * x) + curve.b()) % curve.p()
+ beta = numbertheory.square_root_mod_prime(alpha, curve.p())
+ y = beta if beta % 2 == 0 else curve.p() - beta
+
+ # Compute the public key
+ R1 = ellipticcurve.PointJacobi(curve, x, y, 1, n)
+ Q1 = numbertheory.inverse_mod(r, n) * (s * R1 + (-e % n) * generator)
+ Pk1 = Public_key(generator, Q1)
+
+ # And the second solution
+ R2 = ellipticcurve.PointJacobi(curve, x, -y, 1, n)
+ Q2 = numbertheory.inverse_mod(r, n) * (s * R2 + (-e % n) * generator)
+ Pk2 = Public_key(generator, Q2)
+
+ return [Pk1, Pk2]
+
+
+class Public_key(object):
+ """Public key for ECDSA.
+ """
+
+ def __init__(self, generator, point, verify=True):
+ """
+ Low level ECDSA public key object.
+
+ :param generator: the Point that generates the group (the base point)
+ :param point: the Point that defines the public key
+ :param bool verify: if True check if point is valid point on curve
+
+ :raises InvalidPointError: if the point parameters are invalid or
+ point does not lie on the curve
+ """
+
+ self.curve = generator.curve()
+ self.generator = generator
+ self.point = point
+ n = generator.order()
+ p = self.curve.p()
+ if not (0 <= point.x() < p) or not (0 <= point.y() < p):
+ raise InvalidPointError("The public point has x or y out of range.")
+ if verify and not self.curve.contains_point(point.x(), point.y()):
+ raise InvalidPointError("Point does not lie on the curve")
+ if not n:
+ raise InvalidPointError("Generator point must have order.")
+ # for curve parameters with base point with cofactor 1, all points
+ # that are on the curve are scalar multiples of the base point, so
+ # verifying that is not necessary. See Section 3.2.2.1 of SEC 1 v2
+ if verify and self.curve.cofactor() != 1 and \
+ not n * point == ellipticcurve.INFINITY:
+ raise InvalidPointError("Generator point order is bad.")
+
+ def __eq__(self, other):
+ if isinstance(other, Public_key):
+ """Return True if the points are identical, False otherwise."""
+ return self.curve == other.curve \
+ and self.point == other.point
+ return NotImplemented
+
+ def verifies(self, hash, signature):
+ """Verify that signature is a valid signature of hash.
+ Return True if the signature is valid.
+ """
+
+ # From X9.62 J.3.1.
+
+ G = self.generator
+ n = G.order()
+ r = signature.r
+ s = signature.s
+ if r < 1 or r > n - 1:
+ return False
+ if s < 1 or s > n - 1:
+ return False
+ c = numbertheory.inverse_mod(s, n)
+ u1 = (hash * c) % n
+ u2 = (r * c) % n
+ if hasattr(G, "mul_add"):
+ xy = G.mul_add(u1, self.point, u2)
+ else:
+ xy = u1 * G + u2 * self.point
+ v = xy.x() % n
+ return v == r
+
+
+class Private_key(object):
+ """Private key for ECDSA.
+ """
+
+ def __init__(self, public_key, secret_multiplier):
+ """public_key is of class Public_key;
+ secret_multiplier is a large integer.
+ """
+
+ self.public_key = public_key
+ self.secret_multiplier = secret_multiplier
+
+ def __eq__(self, other):
+ if isinstance(other, Private_key):
+ """Return True if the points are identical, False otherwise."""
+ return self.public_key == other.public_key \
+ and self.secret_multiplier == other.secret_multiplier
+ return NotImplemented
+
+ def sign(self, hash, random_k):
+ """Return a signature for the provided hash, using the provided
+ random nonce. It is absolutely vital that random_k be an unpredictable
+ number in the range [1, self.public_key.point.order()-1]. If
+ an attacker can guess random_k, he can compute our private key from a
+ single signature. Also, if an attacker knows a few high-order
+ bits (or a few low-order bits) of random_k, he can compute our private
+ key from many signatures. The generation of nonces with adequate
+ cryptographic strength is very difficult and far beyond the scope
+ of this comment.
+
+ May raise RuntimeError, in which case retrying with a new
+ random value k is in order.
+ """
+
+ G = self.public_key.generator
+ n = G.order()
+ k = random_k % n
+ # Fix the bit-length of the random nonce,
+ # so that it doesn't leak via timing.
+ # This does not change that ks = k mod n
+ ks = k + n
+ kt = ks + n
+ if bit_length(ks) == bit_length(n):
+ p1 = kt * G
+ else:
+ p1 = ks * G
+ r = p1.x() % n
+ if r == 0:
+ raise RSZeroError("amazingly unlucky random number r")
+ s = (numbertheory.inverse_mod(k, n)
+ * (hash + (self.secret_multiplier * r) % n)) % n
+ if s == 0:
+ raise RSZeroError("amazingly unlucky random number s")
+ return Signature(r, s)
+
+
+def int_to_string(x):
+ """Convert integer x into a string of bytes, as per X9.62."""
+ assert x >= 0
+ if x == 0:
+ return b('\0')
+ result = []
+ while x:
+ ordinal = x & 0xFF
+ result.append(int2byte(ordinal))
+ x >>= 8
+
+ result.reverse()
+ return b('').join(result)
+
+
+def string_to_int(s):
+ """Convert a string of bytes into an integer, as per X9.62."""
+ result = 0
+ for c in s:
+ if not isinstance(c, int):
+ c = ord(c)
+ result = 256 * result + c
+ return result
+
+
+def digest_integer(m):
+ """Convert an integer into a string of bytes, compute
+ its SHA-1 hash, and convert the result to an integer."""
+ #
+ # I don't expect this function to be used much. I wrote
+ # it in order to be able to duplicate the examples
+ # in ECDSAVS.
+ #
+ from hashlib import sha1
+ return string_to_int(sha1(int_to_string(m)).digest())
+
+
+def point_is_valid(generator, x, y):
+ """Is (x,y) a valid public key based on the specified generator?"""
+
+ # These are the tests specified in X9.62.
+
+ n = generator.order()
+ curve = generator.curve()
+ p = curve.p()
+ if not (0 <= x < p) or not (0 <= y < p):
+ return False
+ if not curve.contains_point(x, y):
+ return False
+ if curve.cofactor() != 1 and \
+ not n * ellipticcurve.PointJacobi(curve, x, y, 1)\
+ == ellipticcurve.INFINITY:
+ return False
+ return True
+
+
+# NIST Curve P-192:
+_p = 6277101735386680763835789423207666416083908700390324961279
+_r = 6277101735386680763835789423176059013767194773182842284081
+# s = 0x3045ae6fc8422f64ed579528d38120eae12196d5L
+# c = 0x3099d2bbbfcb2538542dcd5fb078b6ef5f3d6fe2c745de65L
+_b = 0x64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1
+_Gx = 0x188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012
+_Gy = 0x07192b95ffc8da78631011ed6b24cdd573f977a11e794811
+
+curve_192 = ellipticcurve.CurveFp(_p, -3, _b, 1)
+generator_192 = ellipticcurve.PointJacobi(
+ curve_192, _Gx, _Gy, 1, _r, generator=True)
+
+
+# NIST Curve P-224:
+_p = 26959946667150639794667015087019630673557916260026308143510066298881
+_r = 26959946667150639794667015087019625940457807714424391721682722368061
+# s = 0xbd71344799d5c7fcdc45b59fa3b9ab8f6a948bc5L
+# c = 0x5b056c7e11dd68f40469ee7f3c7a7d74f7d121116506d031218291fbL
+_b = 0xb4050a850c04b3abf54132565044b0b7d7bfd8ba270b39432355ffb4
+_Gx = 0xb70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21
+_Gy = 0xbd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34
+
+curve_224 = ellipticcurve.CurveFp(_p, -3, _b, 1)
+generator_224 = ellipticcurve.PointJacobi(
+ curve_224, _Gx, _Gy, 1, _r, generator=True)
+
+# NIST Curve P-256:
+_p = 115792089210356248762697446949407573530086143415290314195533631308867097853951
+_r = 115792089210356248762697446949407573529996955224135760342422259061068512044369
+# s = 0xc49d360886e704936a6678e1139d26b7819f7e90L
+# c = 0x7efba1662985be9403cb055c75d4f7e0ce8d84a9c5114abcaf3177680104fa0dL
+_b = 0x5ac635d8aa3a93e7b3ebbd55769886bc651d06b0cc53b0f63bce3c3e27d2604b
+_Gx = 0x6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296
+_Gy = 0x4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5
+
+curve_256 = ellipticcurve.CurveFp(_p, -3, _b, 1)
+generator_256 = ellipticcurve.PointJacobi(
+ curve_256, _Gx, _Gy, 1, _r, generator=True)
+
+# NIST Curve P-384:
+_p = 39402006196394479212279040100143613805079739270465446667948293404245721771496870329047266088258938001861606973112319
+_r = 39402006196394479212279040100143613805079739270465446667946905279627659399113263569398956308152294913554433653942643
+# s = 0xa335926aa319a27a1d00896a6773a4827acdac73L
+# c = 0x79d1e655f868f02fff48dcdee14151ddb80643c1406d0ca10dfe6fc52009540a495e8042ea5f744f6e184667cc722483L
+_b = 0xb3312fa7e23ee7e4988e056be3f82d19181d9c6efe8141120314088f5013875ac656398d8a2ed19d2a85c8edd3ec2aef
+_Gx = 0xaa87ca22be8b05378eb1c71ef320ad746e1d3b628ba79b9859f741e082542a385502f25dbf55296c3a545e3872760ab7
+_Gy = 0x3617de4a96262c6f5d9e98bf9292dc29f8f41dbd289a147ce9da3113b5f0b8c00a60b1ce1d7e819d7a431d7c90ea0e5f
+
+curve_384 = ellipticcurve.CurveFp(_p, -3, _b, 1)
+generator_384 = ellipticcurve.PointJacobi(
+ curve_384, _Gx, _Gy, 1, _r, generator=True)
+
+# NIST Curve P-521:
+_p = 6864797660130609714981900799081393217269435300143305409394463459185543183397656052122559640661454554977296311391480858037121987999716643812574028291115057151
+_r = 6864797660130609714981900799081393217269435300143305409394463459185543183397655394245057746333217197532963996371363321113864768612440380340372808892707005449
+# s = 0xd09e8800291cb85396cc6717393284aaa0da64baL
+# c = 0x0b48bfa5f420a34949539d2bdfc264eeeeb077688e44fbf0ad8f6d0edb37bd6b533281000518e19f1b9ffbe0fe9ed8a3c2200b8f875e523868c70c1e5bf55bad637L
+_b = 0x051953eb9618e1c9a1f929a21a0b68540eea2da725b99b315f3b8b489918ef109e156193951ec7e937b1652c0bd3bb1bf073573df883d2c34f1ef451fd46b503f00
+_Gx = 0xc6858e06b70404e9cd9e3ecb662395b4429c648139053fb521f828af606b4d3dbaa14b5e77efe75928fe1dc127a2ffa8de3348b3c1856a429bf97e7e31c2e5bd66
+_Gy = 0x11839296a789a3bc0045c8a5fb42c7d1bd998f54449579b446817afbd17273e662c97ee72995ef42640c550b9013fad0761353c7086a272c24088be94769fd16650
+
+curve_521 = ellipticcurve.CurveFp(_p, -3, _b, 1)
+generator_521 = ellipticcurve.PointJacobi(
+ curve_521, _Gx, _Gy, 1, _r, generator=True)
+
+# Certicom secp256-k1
+_a = 0x0000000000000000000000000000000000000000000000000000000000000000
+_b = 0x0000000000000000000000000000000000000000000000000000000000000007
+_p = 0xfffffffffffffffffffffffffffffffffffffffffffffffffffffffefffffc2f
+_Gx = 0x79be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798
+_Gy = 0x483ada7726a3c4655da4fbfc0e1108a8fd17b448a68554199c47d08ffb10d4b8
+_r = 0xfffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141
+
+curve_secp256k1 = ellipticcurve.CurveFp(_p, _a, _b, 1)
+generator_secp256k1 = ellipticcurve.PointJacobi(
+ curve_secp256k1, _Gx, _Gy, 1, _r, generator=True)
+
+# Brainpool P-160-r1
+_a = 0x340E7BE2A280EB74E2BE61BADA745D97E8F7C300
+_b = 0x1E589A8595423412134FAA2DBDEC95C8D8675E58
+_p = 0xE95E4A5F737059DC60DFC7AD95B3D8139515620F
+_Gx = 0xBED5AF16EA3F6A4F62938C4631EB5AF7BDBCDBC3
+_Gy = 0x1667CB477A1A8EC338F94741669C976316DA6321
+_q = 0xE95E4A5F737059DC60DF5991D45029409E60FC09
+
+curve_brainpoolp160r1 = ellipticcurve.CurveFp(_p, _a, _b, 1)
+generator_brainpoolp160r1 = ellipticcurve.PointJacobi(
+ curve_brainpoolp160r1, _Gx, _Gy, 1, _q, generator=True)
+
+# Brainpool P-192-r1
+_a = 0x6A91174076B1E0E19C39C031FE8685C1CAE040E5C69A28EF
+_b = 0x469A28EF7C28CCA3DC721D044F4496BCCA7EF4146FBF25C9
+_p = 0xC302F41D932A36CDA7A3463093D18DB78FCE476DE1A86297
+_Gx = 0xC0A0647EAAB6A48753B033C56CB0F0900A2F5C4853375FD6
+_Gy = 0x14B690866ABD5BB88B5F4828C1490002E6773FA2FA299B8F
+_q = 0xC302F41D932A36CDA7A3462F9E9E916B5BE8F1029AC4ACC1
+
+curve_brainpoolp192r1 = ellipticcurve.CurveFp(_p, _a, _b, 1)
+generator_brainpoolp192r1 = ellipticcurve.PointJacobi(
+ curve_brainpoolp192r1, _Gx, _Gy, 1, _q, generator=True)
+
+# Brainpool P-224-r1
+_a = 0x68A5E62CA9CE6C1C299803A6C1530B514E182AD8B0042A59CAD29F43
+_b = 0x2580F63CCFE44138870713B1A92369E33E2135D266DBB372386C400B
+_p = 0xD7C134AA264366862A18302575D1D787B09F075797DA89F57EC8C0FF
+_Gx = 0x0D9029AD2C7E5CF4340823B2A87DC68C9E4CE3174C1E6EFDEE12C07D
+_Gy = 0x58AA56F772C0726F24C6B89E4ECDAC24354B9E99CAA3F6D3761402CD
+_q = 0xD7C134AA264366862A18302575D0FB98D116BC4B6DDEBCA3A5A7939F
+
+curve_brainpoolp224r1 = ellipticcurve.CurveFp(_p, _a, _b, 1)
+generator_brainpoolp224r1 = ellipticcurve.PointJacobi(
+ curve_brainpoolp224r1, _Gx, _Gy, 1, _q, generator=True)
+
+# Brainpool P-256-r1
+_a = 0x7D5A0975FC2C3057EEF67530417AFFE7FB8055C126DC5C6CE94A4B44F330B5D9
+_b = 0x26DC5C6CE94A4B44F330B5D9BBD77CBF958416295CF7E1CE6BCCDC18FF8C07B6
+_p = 0xA9FB57DBA1EEA9BC3E660A909D838D726E3BF623D52620282013481D1F6E5377
+_Gx = 0x8BD2AEB9CB7E57CB2C4B482FFC81B7AFB9DE27E1E3BD23C23A4453BD9ACE3262
+_Gy = 0x547EF835C3DAC4FD97F8461A14611DC9C27745132DED8E545C1D54C72F046997
+_q = 0xA9FB57DBA1EEA9BC3E660A909D838D718C397AA3B561A6F7901E0E82974856A7
+
+curve_brainpoolp256r1 = ellipticcurve.CurveFp(_p, _a, _b, 1)
+generator_brainpoolp256r1 = ellipticcurve.PointJacobi(
+ curve_brainpoolp256r1, _Gx, _Gy, 1, _q, generator=True)
+
+# Brainpool P-320-r1
+_a = 0x3EE30B568FBAB0F883CCEBD46D3F3BB8A2A73513F5EB79DA66190EB085FFA9F492F375A97D860EB4
+_b = 0x520883949DFDBC42D3AD198640688A6FE13F41349554B49ACC31DCCD884539816F5EB4AC8FB1F1A6
+_p = 0xD35E472036BC4FB7E13C785ED201E065F98FCFA6F6F40DEF4F92B9EC7893EC28FCD412B1F1B32E27
+_Gx = 0x43BD7E9AFB53D8B85289BCC48EE5BFE6F20137D10A087EB6E7871E2A10A599C710AF8D0D39E20611
+_Gy = 0x14FDD05545EC1CC8AB4093247F77275E0743FFED117182EAA9C77877AAAC6AC7D35245D1692E8EE1
+_q = 0xD35E472036BC4FB7E13C785ED201E065F98FCFA5B68F12A32D482EC7EE8658E98691555B44C59311
+
+curve_brainpoolp320r1 = ellipticcurve.CurveFp(_p, _a, _b, 1)
+generator_brainpoolp320r1 = ellipticcurve.PointJacobi(
+ curve_brainpoolp320r1, _Gx, _Gy, 1, _q, generator=True)
+
+# Brainpool P-384-r1
+_a = 0x7BC382C63D8C150C3C72080ACE05AFA0C2BEA28E4FB22787139165EFBA91F90F8AA5814A503AD4EB04A8C7DD22CE2826
+_b = 0x04A8C7DD22CE28268B39B55416F0447C2FB77DE107DCD2A62E880EA53EEB62D57CB4390295DBC9943AB78696FA504C11
+_p = 0x8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B412B1DA197FB71123ACD3A729901D1A71874700133107EC53
+_Gx = 0x1D1C64F068CF45FFA2A63A81B7C13F6B8847A3E77EF14FE3DB7FCAFE0CBD10E8E826E03436D646AAEF87B2E247D4AF1E
+_Gy = 0x8ABE1D7520F9C2A45CB1EB8E95CFD55262B70B29FEEC5864E19C054FF99129280E4646217791811142820341263C5315
+_q = 0x8CB91E82A3386D280F5D6F7E50E641DF152F7109ED5456B31F166E6CAC0425A7CF3AB6AF6B7FC3103B883202E9046565
+
+curve_brainpoolp384r1 = ellipticcurve.CurveFp(_p, _a, _b, 1)
+generator_brainpoolp384r1 = ellipticcurve.PointJacobi(
+ curve_brainpoolp384r1, _Gx, _Gy, 1, _q, generator=True)
+
+# Brainpool P-512-r1
+_a = 0x7830A3318B603B89E2327145AC234CC594CBDD8D3DF91610A83441CAEA9863BC2DED5D5AA8253AA10A2EF1C98B9AC8B57F1117A72BF2C7B9E7C1AC4D77FC94CA
+_b = 0x3DF91610A83441CAEA9863BC2DED5D5AA8253AA10A2EF1C98B9AC8B57F1117A72BF2C7B9E7C1AC4D77FC94CADC083E67984050B75EBAE5DD2809BD638016F723
+_p = 0xAADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA703308717D4D9B009BC66842AECDA12AE6A380E62881FF2F2D82C68528AA6056583A48F3
+_Gx = 0x81AEE4BDD82ED9645A21322E9C4C6A9385ED9F70B5D916C1B43B62EEF4D0098EFF3B1F78E2D0D48D50D1687B93B97D5F7C6D5047406A5E688B352209BCB9F822
+_Gy = 0x7DDE385D566332ECC0EABFA9CF7822FDF209F70024A57B1AA000C55B881F8111B2DCDE494A5F485E5BCA4BD88A2763AED1CA2B2FA8F0540678CD1E0F3AD80892
+_q = 0xAADD9DB8DBE9C48B3FD4E6AE33C9FC07CB308DB3B3C9D20ED6639CCA70330870553E5C414CA92619418661197FAC10471DB1D381085DDADDB58796829CA90069
+
+curve_brainpoolp512r1 = ellipticcurve.CurveFp(_p, _a, _b, 1)
+generator_brainpoolp512r1 = ellipticcurve.PointJacobi(
+ curve_brainpoolp512r1, _Gx, _Gy, 1, _q, generator=True)
diff --git a/third_party/python/ecdsa/ecdsa/ellipticcurve.py b/third_party/python/ecdsa/ecdsa/ellipticcurve.py
new file mode 100644
index 0000000000..3420454db4
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/ellipticcurve.py
@@ -0,0 +1,780 @@
+#! /usr/bin/env python
+# -*- coding: utf-8 -*-
+#
+# Implementation of elliptic curves, for cryptographic applications.
+#
+# This module doesn't provide any way to choose a random elliptic
+# curve, nor to verify that an elliptic curve was chosen randomly,
+# because one can simply use NIST's standard curves.
+#
+# Notes from X9.62-1998 (draft):
+# Nomenclature:
+# - Q is a public key.
+# The "Elliptic Curve Domain Parameters" include:
+# - q is the "field size", which in our case equals p.
+# - p is a big prime.
+# - G is a point of prime order (5.1.1.1).
+# - n is the order of G (5.1.1.1).
+# Public-key validation (5.2.2):
+# - Verify that Q is not the point at infinity.
+# - Verify that X_Q and Y_Q are in [0,p-1].
+# - Verify that Q is on the curve.
+# - Verify that nQ is the point at infinity.
+# Signature generation (5.3):
+# - Pick random k from [1,n-1].
+# Signature checking (5.4.2):
+# - Verify that r and s are in [1,n-1].
+#
+# Version of 2008.11.25.
+#
+# Revision history:
+# 2005.12.31 - Initial version.
+# 2008.11.25 - Change CurveFp.is_on to contains_point.
+#
+# Written in 2005 by Peter Pearson and placed in the public domain.
+
+from __future__ import division
+
+try:
+ from gmpy2 import mpz
+ GMPY = True
+except ImportError:
+ try:
+ from gmpy import mpz
+ GMPY = True
+ except ImportError:
+ GMPY = False
+
+
+from six import python_2_unicode_compatible
+from . import numbertheory
+from ._rwlock import RWLock
+
+
+@python_2_unicode_compatible
+class CurveFp(object):
+ """Elliptic Curve over the field of integers modulo a prime."""
+
+ if GMPY:
+ def __init__(self, p, a, b, h=None):
+ """
+ The curve of points satisfying y^2 = x^3 + a*x + b (mod p).
+
+ h is an integer that is the cofactor of the elliptic curve domain
+ parameters; it is the number of points satisfying the elliptic curve
+ equation divided by the order of the base point. It is used for selection
+ of efficient algorithm for public point verification.
+ """
+ self.__p = mpz(p)
+ self.__a = mpz(a)
+ self.__b = mpz(b)
+ # h is not used in calculations and it can be None, so don't use
+ # gmpy with it
+ self.__h = h
+ else:
+ def __init__(self, p, a, b, h=None):
+ """
+ The curve of points satisfying y^2 = x^3 + a*x + b (mod p).
+
+ h is an integer that is the cofactor of the elliptic curve domain
+ parameters; it is the number of points satisfying the elliptic curve
+ equation divided by the order of the base point. It is used for selection
+ of efficient algorithm for public point verification.
+ """
+ self.__p = p
+ self.__a = a
+ self.__b = b
+ self.__h = h
+
+ def __eq__(self, other):
+ if isinstance(other, CurveFp):
+ """Return True if the curves are identical, False otherwise."""
+ return self.__p == other.__p \
+ and self.__a == other.__a \
+ and self.__b == other.__b
+ return NotImplemented
+
+ def __hash__(self):
+ return hash((self.__p, self.__a, self.__b))
+
+ def p(self):
+ return self.__p
+
+ def a(self):
+ return self.__a
+
+ def b(self):
+ return self.__b
+
+ def cofactor(self):
+ return self.__h
+
+ def contains_point(self, x, y):
+ """Is the point (x,y) on this curve?"""
+ return (y * y - ((x * x + self.__a) * x + self.__b)) % self.__p == 0
+
+ def __str__(self):
+ return "CurveFp(p=%d, a=%d, b=%d, h=%d)" % (
+ self.__p, self.__a, self.__b, self.__h)
+
+
+class PointJacobi(object):
+ """
+ Point on an elliptic curve. Uses Jacobi coordinates.
+
+ In Jacobian coordinates, there are three parameters, X, Y and Z.
+ They correspond to affine parameters 'x' and 'y' like so:
+
+ x = X / Z²
+ y = Y / Z³
+ """
+ def __init__(self, curve, x, y, z, order=None, generator=False):
+ """
+ Initialise a point that uses Jacobi representation internally.
+
+ :param CurveFp curve: curve on which the point resides
+ :param int x: the X parameter of Jacobi representation (equal to x when
+ converting from affine coordinates
+ :param int y: the Y parameter of Jacobi representation (equal to y when
+ converting from affine coordinates
+ :param int z: the Z parameter of Jacobi representation (equal to 1 when
+ converting from affine coordinates
+ :param int order: the point order, must be non zero when using
+ generator=True
+ :param bool generator: the point provided is a curve generator, as
+ such, it will be commonly used with scalar multiplication. This will
+ cause to precompute multiplication table for it
+ """
+ self.__curve = curve
+ # since it's generally better (faster) to use scaled points vs unscaled
+ # ones, use writer-biased RWLock for locking:
+ self._scale_lock = RWLock()
+ if GMPY:
+ self.__x = mpz(x)
+ self.__y = mpz(y)
+ self.__z = mpz(z)
+ self.__order = order and mpz(order)
+ else:
+ self.__x = x
+ self.__y = y
+ self.__z = z
+ self.__order = order
+ self.__precompute = []
+ if generator:
+ assert order
+ i = 1
+ order *= 2
+ doubler = PointJacobi(curve, x, y, z, order)
+ order *= 2
+ self.__precompute.append((doubler.x(), doubler.y()))
+
+ while i < order:
+ i *= 2
+ doubler = doubler.double().scale()
+ self.__precompute.append((doubler.x(), doubler.y()))
+
+ def __eq__(self, other):
+ """Compare two points with each-other."""
+ try:
+ self._scale_lock.reader_acquire()
+ if other is INFINITY:
+ return not self.__y or not self.__z
+ x1, y1, z1 = self.__x, self.__y, self.__z
+ finally:
+ self._scale_lock.reader_release()
+ if isinstance(other, Point):
+ x2, y2, z2 = other.x(), other.y(), 1
+ elif isinstance(other, PointJacobi):
+ try:
+ other._scale_lock.reader_acquire()
+ x2, y2, z2 = other.__x, other.__y, other.__z
+ finally:
+ other._scale_lock.reader_release()
+ else:
+ return NotImplemented
+ if self.__curve != other.curve():
+ return False
+ p = self.__curve.p()
+
+ zz1 = z1 * z1 % p
+ zz2 = z2 * z2 % p
+
+ # compare the fractions by bringing them to the same denominator
+ # depend on short-circuit to save 4 multiplications in case of inequality
+ return (x1 * zz2 - x2 * zz1) % p == 0 and \
+ (y1 * zz2 * z2 - y2 * zz1 * z1) % p == 0
+
+ def order(self):
+ """Return the order of the point.
+
+ None if it is undefined.
+ """
+ return self.__order
+
+ def curve(self):
+ """Return curve over which the point is defined."""
+ return self.__curve
+
+ def x(self):
+ """
+ Return affine x coordinate.
+
+ This method should be used only when the 'y' coordinate is not needed.
+ It's computationally more efficient to use `to_affine()` and then
+ call x() and y() on the returned instance. Or call `scale()`
+ and then x() and y() on the returned instance.
+ """
+ try:
+ self._scale_lock.reader_acquire()
+ if self.__z == 1:
+ return self.__x
+ x = self.__x
+ z = self.__z
+ finally:
+ self._scale_lock.reader_release()
+ p = self.__curve.p()
+ z = numbertheory.inverse_mod(z, p)
+ return x * z**2 % p
+
+ def y(self):
+ """
+ Return affine y coordinate.
+
+ This method should be used only when the 'x' coordinate is not needed.
+ It's computationally more efficient to use `to_affine()` and then
+ call x() and y() on the returned instance. Or call `scale()`
+ and then x() and y() on the returned instance.
+ """
+ try:
+ self._scale_lock.reader_acquire()
+ if self.__z == 1:
+ return self.__y
+ y = self.__y
+ z = self.__z
+ finally:
+ self._scale_lock.reader_release()
+ p = self.__curve.p()
+ z = numbertheory.inverse_mod(z, p)
+ return y * z**3 % p
+
+ def scale(self):
+ """
+ Return point scaled so that z == 1.
+
+ Modifies point in place, returns self.
+ """
+ try:
+ self._scale_lock.reader_acquire()
+ if self.__z == 1:
+ return self
+ finally:
+ self._scale_lock.reader_release()
+
+ try:
+ self._scale_lock.writer_acquire()
+ # scaling already scaled point is safe (as inverse of 1 is 1) and
+ # quick so we don't need to optimise for the unlikely event when
+ # two threads hit the lock at the same time
+ p = self.__curve.p()
+ z_inv = numbertheory.inverse_mod(self.__z, p)
+ zz_inv = z_inv * z_inv % p
+ self.__x = self.__x * zz_inv % p
+ self.__y = self.__y * zz_inv * z_inv % p
+ # we are setting the z last so that the check above will return true
+ # only after all values were already updated
+ self.__z = 1
+ finally:
+ self._scale_lock.writer_release()
+ return self
+
+ def to_affine(self):
+ """Return point in affine form."""
+ if not self.__y or not self.__z:
+ return INFINITY
+ self.scale()
+ # after point is scaled, it's immutable, so no need to perform locking
+ return Point(self.__curve, self.__x,
+ self.__y, self.__order)
+
+ @staticmethod
+ def from_affine(point, generator=False):
+ """Create from an affine point.
+
+ :param bool generator: set to True to make the point to precalculate
+ multiplication table - useful for public point when verifying many
+ signatures (around 100 or so) or for generator points of a curve.
+ """
+ return PointJacobi(point.curve(), point.x(), point.y(), 1,
+ point.order(), generator)
+
+ # plese note that all the methods that use the equations from hyperelliptic
+ # are formatted in a way to maximise performance.
+ # Things that make code faster: multiplying instead of taking to the power
+ # (`xx = x * x; xxxx = xx * xx % p` is faster than `xxxx = x**4 % p` and
+ # `pow(x, 4, p)`),
+ # multiple assignments at the same time (`x1, x2 = self.x1, self.x2` is
+ # faster than `x1 = self.x1; x2 = self.x2`),
+ # similarly, sometimes the `% p` is skipped if it makes the calculation
+ # faster and the result of calculation is later reduced modulo `p`
+
+ def _double_with_z_1(self, X1, Y1, p, a):
+ """Add a point to itself with z == 1."""
+ # after:
+ # http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#doubling-mdbl-2007-bl
+ XX, YY = X1 * X1 % p, Y1 * Y1 % p
+ if not YY:
+ return 0, 0, 1
+ YYYY = YY * YY % p
+ S = 2 * ((X1 + YY)**2 - XX - YYYY) % p
+ M = 3 * XX + a
+ T = (M * M - 2 * S) % p
+ # X3 = T
+ Y3 = (M * (S - T) - 8 * YYYY) % p
+ Z3 = 2 * Y1 % p
+ return T, Y3, Z3
+
+ def _double(self, X1, Y1, Z1, p, a):
+ """Add a point to itself, arbitrary z."""
+ if Z1 == 1:
+ return self._double_with_z_1(X1, Y1, p, a)
+ if not Z1:
+ return 0, 0, 1
+ # after:
+ # http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#doubling-dbl-2007-bl
+ XX, YY = X1 * X1 % p, Y1 * Y1 % p
+ if not YY:
+ return 0, 0, 1
+ YYYY = YY * YY % p
+ ZZ = Z1 * Z1 % p
+ S = 2 * ((X1 + YY)**2 - XX - YYYY) % p
+ M = (3 * XX + a * ZZ * ZZ) % p
+ T = (M * M - 2 * S) % p
+ # X3 = T
+ Y3 = (M * (S - T) - 8 * YYYY) % p
+ Z3 = ((Y1 + Z1)**2 - YY - ZZ) % p
+
+ return T, Y3, Z3
+
+ def double(self):
+ """Add a point to itself."""
+ if not self.__y:
+ return INFINITY
+
+ p, a = self.__curve.p(), self.__curve.a()
+
+ try:
+ self._scale_lock.reader_acquire()
+ X1, Y1, Z1 = self.__x, self.__y, self.__z
+ finally:
+ self._scale_lock.reader_release()
+
+ X3, Y3, Z3 = self._double(X1, Y1, Z1, p, a)
+
+ if not Y3 or not Z3:
+ return INFINITY
+ return PointJacobi(self.__curve, X3, Y3, Z3, self.__order)
+
+ def _add_with_z_1(self, X1, Y1, X2, Y2, p):
+ """add points when both Z1 and Z2 equal 1"""
+ # after:
+ # http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#addition-mmadd-2007-bl
+ H = X2 - X1
+ HH = H * H
+ I = 4 * HH % p
+ J = H * I
+ r = 2 * (Y2 - Y1)
+ if not H and not r:
+ return self._double_with_z_1(X1, Y1, p, self.__curve.a())
+ V = X1 * I
+ X3 = (r**2 - J - 2 * V) % p
+ Y3 = (r * (V - X3) - 2 * Y1 * J) % p
+ Z3 = 2 * H % p
+ return X3, Y3, Z3
+
+ def _add_with_z_eq(self, X1, Y1, Z1, X2, Y2, p):
+ """add points when Z1 == Z2"""
+ # after:
+ # http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#addition-zadd-2007-m
+ A = (X2 - X1)**2 % p
+ B = X1 * A % p
+ C = X2 * A
+ D = (Y2 - Y1)**2 % p
+ if not A and not D:
+ return self._double(X1, Y1, Z1, p, self.__curve.a())
+ X3 = (D - B - C) % p
+ Y3 = ((Y2 - Y1) * (B - X3) - Y1 * (C - B)) % p
+ Z3 = Z1 * (X2 - X1) % p
+ return X3, Y3, Z3
+
+ def _add_with_z2_1(self, X1, Y1, Z1, X2, Y2, p):
+ """add points when Z2 == 1"""
+ # after:
+ # http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#addition-madd-2007-bl
+ Z1Z1 = Z1 * Z1 % p
+ U2, S2 = X2 * Z1Z1 % p, Y2 * Z1 * Z1Z1 % p
+ H = (U2 - X1) % p
+ HH = H * H % p
+ I = 4 * HH % p
+ J = H * I
+ r = 2 * (S2 - Y1) % p
+ if not r and not H:
+ return self._double_with_z_1(X2, Y2, p, self.__curve.a())
+ V = X1 * I
+ X3 = (r * r - J - 2 * V) % p
+ Y3 = (r * (V - X3) - 2 * Y1 * J) % p
+ Z3 = ((Z1 + H)**2 - Z1Z1 - HH) % p
+ return X3, Y3, Z3
+
+ def _add_with_z_ne(self, X1, Y1, Z1, X2, Y2, Z2, p):
+ """add points with arbitrary z"""
+ # after:
+ # http://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#addition-add-2007-bl
+ Z1Z1 = Z1 * Z1 % p
+ Z2Z2 = Z2 * Z2 % p
+ U1 = X1 * Z2Z2 % p
+ U2 = X2 * Z1Z1 % p
+ S1 = Y1 * Z2 * Z2Z2 % p
+ S2 = Y2 * Z1 * Z1Z1 % p
+ H = U2 - U1
+ I = 4 * H * H % p
+ J = H * I % p
+ r = 2 * (S2 - S1) % p
+ if not H and not r:
+ return self._double(X1, Y1, Z1, p, self.__curve.a())
+ V = U1 * I
+ X3 = (r * r - J - 2 * V) % p
+ Y3 = (r * (V - X3) - 2 * S1 * J) % p
+ Z3 = ((Z1 + Z2)**2 - Z1Z1 - Z2Z2) * H % p
+
+ return X3, Y3, Z3
+
+ def __radd__(self, other):
+ """Add other to self."""
+ return self + other
+
+ def _add(self, X1, Y1, Z1, X2, Y2, Z2, p):
+ """add two points, select fastest method."""
+ if not Y1 or not Z1:
+ return X2, Y2, Z2
+ if not Y2 or not Z2:
+ return X1, Y1, Z1
+ if Z1 == Z2:
+ if Z1 == 1:
+ return self._add_with_z_1(X1, Y1, X2, Y2, p)
+ return self._add_with_z_eq(X1, Y1, Z1, X2, Y2, p)
+ if Z1 == 1:
+ return self._add_with_z2_1(X2, Y2, Z2, X1, Y1, p)
+ if Z2 == 1:
+ return self._add_with_z2_1(X1, Y1, Z1, X2, Y2, p)
+ return self._add_with_z_ne(X1, Y1, Z1, X2, Y2, Z2, p)
+
+ def __add__(self, other):
+ """Add two points on elliptic curve."""
+ if self == INFINITY:
+ return other
+ if other == INFINITY:
+ return self
+ if isinstance(other, Point):
+ other = PointJacobi.from_affine(other)
+ if self.__curve != other.__curve:
+ raise ValueError("The other point is on different curve")
+
+ p = self.__curve.p()
+ try:
+ self._scale_lock.reader_acquire()
+ X1, Y1, Z1 = self.__x, self.__y, self.__z
+ finally:
+ self._scale_lock.reader_release()
+ try:
+ other._scale_lock.reader_acquire()
+ X2, Y2, Z2 = other.__x, other.__y, other.__z
+ finally:
+ other._scale_lock.reader_release()
+ X3, Y3, Z3 = self._add(X1, Y1, Z1, X2, Y2, Z2, p)
+
+ if not Y3 or not Z3:
+ return INFINITY
+ return PointJacobi(self.__curve, X3, Y3, Z3, self.__order)
+
+ def __rmul__(self, other):
+ """Multiply point by an integer."""
+ return self * other
+
+ def _mul_precompute(self, other):
+ """Multiply point by integer with precomputation table."""
+ X3, Y3, Z3, p = 0, 0, 1, self.__curve.p()
+ _add = self._add
+ for X2, Y2 in self.__precompute:
+ if other % 2:
+ if other % 4 >= 2:
+ other = (other + 1)//2
+ X3, Y3, Z3 = _add(X3, Y3, Z3, X2, -Y2, 1, p)
+ else:
+ other = (other - 1)//2
+ X3, Y3, Z3 = _add(X3, Y3, Z3, X2, Y2, 1, p)
+ else:
+ other //= 2
+
+ if not Y3 or not Z3:
+ return INFINITY
+ return PointJacobi(self.__curve, X3, Y3, Z3, self.__order)
+
+ @staticmethod
+ def _naf(mult):
+ """Calculate non-adjacent form of number."""
+ ret = []
+ while mult:
+ if mult % 2:
+ nd = mult % 4
+ if nd >= 2:
+ nd = nd - 4
+ ret += [nd]
+ mult -= nd
+ else:
+ ret += [0]
+ mult //= 2
+ return ret
+
+ def __mul__(self, other):
+ """Multiply point by an integer."""
+ if not self.__y or not other:
+ return INFINITY
+ if other == 1:
+ return self
+ if self.__order:
+ # order*2 as a protection for Minerva
+ other = other % (self.__order*2)
+ if self.__precompute:
+ return self._mul_precompute(other)
+
+ self = self.scale()
+ # once scaled, point is immutable, not need to lock
+ X2, Y2 = self.__x, self.__y
+ X3, Y3, Z3 = 0, 0, 1
+ p, a = self.__curve.p(), self.__curve.a()
+ _double = self._double
+ _add = self._add
+ # since adding points when at least one of them is scaled
+ # is quicker, reverse the NAF order
+ for i in reversed(self._naf(other)):
+ X3, Y3, Z3 = _double(X3, Y3, Z3, p, a)
+ if i < 0:
+ X3, Y3, Z3 = _add(X3, Y3, Z3, X2, -Y2, 1, p)
+ elif i > 0:
+ X3, Y3, Z3 = _add(X3, Y3, Z3, X2, Y2, 1, p)
+
+ if not Y3 or not Z3:
+ return INFINITY
+
+ return PointJacobi(self.__curve, X3, Y3, Z3, self.__order)
+
+ @staticmethod
+ def _leftmost_bit(x):
+ """Return integer with the same magnitude as x but hamming weight of 1"""
+ assert x > 0
+ result = 1
+ while result <= x:
+ result = 2 * result
+ return result // 2
+
+ def mul_add(self, self_mul, other, other_mul):
+ """
+ Do two multiplications at the same time, add results.
+
+ calculates self*self_mul + other*other_mul
+ """
+ if other is INFINITY or other_mul == 0:
+ return self * self_mul
+ if self_mul == 0:
+ return other * other_mul
+ if not isinstance(other, PointJacobi):
+ other = PointJacobi.from_affine(other)
+ # when the points have precomputed answers, then multiplying them alone
+ # is faster (as it uses NAF)
+ if self.__precompute and other.__precompute:
+ return self * self_mul + other * other_mul
+
+ if self.__order:
+ self_mul = self_mul % self.__order
+ other_mul = other_mul % self.__order
+
+ i = self._leftmost_bit(max(self_mul, other_mul))*2
+ X3, Y3, Z3 = 0, 0, 1
+ p, a = self.__curve.p(), self.__curve.a()
+ self = self.scale()
+ # after scaling, point is immutable, no need for locking
+ X1, Y1 = self.__x, self.__y
+ other = other.scale()
+ X2, Y2 = other.__x, other.__y
+ both = (self + other).scale()
+ X4, Y4 = both.__x, both.__y
+ _double = self._double
+ _add = self._add
+ while i > 1:
+ X3, Y3, Z3 = _double(X3, Y3, Z3, p, a)
+ i = i // 2
+
+ if self_mul & i and other_mul & i:
+ X3, Y3, Z3 = _add(X3, Y3, Z3, X4, Y4, 1, p)
+ elif self_mul & i:
+ X3, Y3, Z3 = _add(X3, Y3, Z3, X1, Y1, 1, p)
+ elif other_mul & i:
+ X3, Y3, Z3 = _add(X3, Y3, Z3, X2, Y2, 1, p)
+
+ if not Y3 or not Z3:
+ return INFINITY
+
+ return PointJacobi(self.__curve, X3, Y3, Z3, self.__order)
+
+ def __neg__(self):
+ """Return negated point."""
+ try:
+ self._scale_lock.reader_acquire()
+ return PointJacobi(self.__curve, self.__x, -self.__y, self.__z,
+ self.__order)
+ finally:
+ self._scale_lock.reader_release()
+
+
+class Point(object):
+ """A point on an elliptic curve. Altering x and y is forbidding,
+ but they can be read by the x() and y() methods."""
+ def __init__(self, curve, x, y, order=None):
+ """curve, x, y, order; order (optional) is the order of this point."""
+ self.__curve = curve
+ if GMPY:
+ self.__x = x and mpz(x)
+ self.__y = y and mpz(y)
+ self.__order = order and mpz(order)
+ else:
+ self.__x = x
+ self.__y = y
+ self.__order = order
+ # self.curve is allowed to be None only for INFINITY:
+ if self.__curve:
+ assert self.__curve.contains_point(x, y)
+ # for curves with cofactor 1, all points that are on the curve are scalar
+ # multiples of the base point, so performing multiplication is not
+ # necessary to verify that. See Section 3.2.2.1 of SEC 1 v2
+ if curve and curve.cofactor() != 1 and order:
+ assert self * order == INFINITY
+
+ def __eq__(self, other):
+ """Return True if the points are identical, False otherwise."""
+ if isinstance(other, Point):
+ return self.__curve == other.__curve \
+ and self.__x == other.__x \
+ and self.__y == other.__y
+ return NotImplemented
+
+ def __neg__(self):
+ return Point(self.__curve, self.__x, self.__curve.p() - self.__y)
+
+ def __add__(self, other):
+ """Add one point to another point."""
+
+ # X9.62 B.3:
+
+ if not isinstance(other, Point):
+ return NotImplemented
+ if other == INFINITY:
+ return self
+ if self == INFINITY:
+ return other
+ assert self.__curve == other.__curve
+ if self.__x == other.__x:
+ if (self.__y + other.__y) % self.__curve.p() == 0:
+ return INFINITY
+ else:
+ return self.double()
+
+ p = self.__curve.p()
+
+ l = ((other.__y - self.__y) * \
+ numbertheory.inverse_mod(other.__x - self.__x, p)) % p
+
+ x3 = (l * l - self.__x - other.__x) % p
+ y3 = (l * (self.__x - x3) - self.__y) % p
+
+ return Point(self.__curve, x3, y3)
+
+ def __mul__(self, other):
+ """Multiply a point by an integer."""
+
+ def leftmost_bit(x):
+ assert x > 0
+ result = 1
+ while result <= x:
+ result = 2 * result
+ return result // 2
+
+ e = other
+ if e == 0 or (self.__order and e % self.__order == 0):
+ return INFINITY
+ if self == INFINITY:
+ return INFINITY
+ if e < 0:
+ return (-self) * (-e)
+
+ # From X9.62 D.3.2:
+
+ e3 = 3 * e
+ negative_self = Point(self.__curve, self.__x, -self.__y, self.__order)
+ i = leftmost_bit(e3) // 2
+ result = self
+ # print_("Multiplying %s by %d (e3 = %d):" % (self, other, e3))
+ while i > 1:
+ result = result.double()
+ if (e3 & i) != 0 and (e & i) == 0:
+ result = result + self
+ if (e3 & i) == 0 and (e & i) != 0:
+ result = result + negative_self
+ # print_(". . . i = %d, result = %s" % ( i, result ))
+ i = i // 2
+
+ return result
+
+ def __rmul__(self, other):
+ """Multiply a point by an integer."""
+
+ return self * other
+
+ def __str__(self):
+ if self == INFINITY:
+ return "infinity"
+ return "(%d,%d)" % (self.__x, self.__y)
+
+ def double(self):
+ """Return a new point that is twice the old."""
+
+ if self == INFINITY:
+ return INFINITY
+
+ # X9.62 B.3:
+
+ p = self.__curve.p()
+ a = self.__curve.a()
+
+ l = ((3 * self.__x * self.__x + a) * \
+ numbertheory.inverse_mod(2 * self.__y, p)) % p
+
+ x3 = (l * l - 2 * self.__x) % p
+ y3 = (l * (self.__x - x3) - self.__y) % p
+
+ return Point(self.__curve, x3, y3)
+
+ def x(self):
+ return self.__x
+
+ def y(self):
+ return self.__y
+
+ def curve(self):
+ return self.__curve
+
+ def order(self):
+ return self.__order
+
+
+# This one point is the Point At Infinity for all purposes:
+INFINITY = Point(None, None, None)
diff --git a/third_party/python/ecdsa/ecdsa/keys.py b/third_party/python/ecdsa/ecdsa/keys.py
new file mode 100644
index 0000000000..172fdf5874
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/keys.py
@@ -0,0 +1,1219 @@
+"""
+Primary classes for performing signing and verification operations.
+
+.. glossary::
+
+ raw encoding
+ Conversion of public, private keys and signatures (which in
+ mathematical sense are integers or pairs of integers) to strings of
+ bytes that does not use any special tags or encoding rules.
+ For any given curve, all keys of the same type or signatures will be
+ encoded to byte strings of the same length. In more formal sense,
+ the integers are encoded as big-endian, constant length byte strings,
+ where the string length is determined by the curve order (e.g.
+ for NIST256p the order is 256 bits long, so the private key will be 32
+ bytes long while public key will be 64 bytes long). The encoding of a
+ single integer is zero-padded on the left if the numerical value is
+ low. In case of public keys and signatures, which are comprised of two
+ integers, the integers are simply concatenated.
+
+ uncompressed
+ The most common formatting specified in PKIX standards. Specified in
+ X9.62 and SEC1 standards. The only difference between it and
+ :term:`raw encoding` is the prepending of a 0x04 byte. Thus an
+ uncompressed NIST256p public key encoding will be 65 bytes long.
+
+ compressed
+ The public point representation that uses half of bytes of the
+ :term:`uncompressed` encoding (rounded up). It uses the first byte of
+ the encoding to specify the sign of the y coordinate and encodes the
+ x coordinate as-is. The first byte of the encoding is equal to
+ 0x02 or 0x03. Compressed encoding of NIST256p public key will be 33
+ bytes long.
+
+ hybrid
+ A combination of :term:`uncompressed` and :term:`compressed` encodings.
+ Both x and y coordinates are stored just as in :term:`compressed`
+ encoding, but the first byte reflects the sign of the y coordinate. The
+ first byte of the encoding will be equal to 0x06 or 0x7. Hybrid
+ encoding of NIST256p public key will be 65 bytes long.
+
+ PEM
+ The acronym stands for Privacy Enhanced Email, but currently it is used
+ primarily as the way to encode :term:`DER` objects into text that can
+ be either easily copy-pasted or transferred over email.
+ It uses headers like ``-----BEGIN <type of contents>-----`` and footers
+ like ``-----END <type of contents>-----`` to separate multiple
+ types of objects in the same file or the object from the surrounding
+ comments. The actual object stored is base64 encoded.
+
+ DER
+ Distinguished Encoding Rules, the way to encode :term:`ASN.1` objects
+ deterministically and uniquely into byte strings.
+
+ ASN.1
+ Abstract Syntax Notation 1 is a standard description language for
+ specifying serialisation and deserialisation of data structures in a
+ portable and cross-platform way.
+
+ bytes-like object
+ All the types that implement the buffer protocol. That includes
+ ``str`` (only on python2), ``bytes``, ``bytesarray``, ``array.array`
+ and ``memoryview`` of those objects.
+ Please note that ``array.array` serialisation (converting it to byte
+ string) is endianess dependant! Signature computed over ``array.array``
+ of integers on a big-endian system will not be verified on a
+ little-endian system and vice-versa.
+"""
+
+import binascii
+from hashlib import sha1
+from six import PY3, b
+from . import ecdsa
+from . import der
+from . import rfc6979
+from . import ellipticcurve
+from .curves import NIST192p, find_curve
+from .numbertheory import square_root_mod_prime, SquareRootError
+from .ecdsa import RSZeroError
+from .util import string_to_number, number_to_string, randrange
+from .util import sigencode_string, sigdecode_string
+from .util import oid_ecPublicKey, encoded_oid_ecPublicKey, MalformedSignature
+from ._compat import normalise_bytes
+
+
+__all__ = ["BadSignatureError", "BadDigestError", "VerifyingKey", "SigningKey",
+ "MalformedPointError"]
+
+
+class BadSignatureError(Exception):
+ """
+ Raised when verification of signature failed.
+
+ Will be raised irrespective of reason of the failure:
+
+ * the calculated or provided hash does not match the signature
+ * the signature does not match the curve/public key
+ * the encoding of the signature is malformed
+ * the size of the signature does not match the curve of the VerifyingKey
+ """
+
+ pass
+
+
+class BadDigestError(Exception):
+ """Raised in case the selected hash is too large for the curve."""
+
+ pass
+
+
+class MalformedPointError(AssertionError):
+ """Raised in case the encoding of private or public key is malformed."""
+
+ pass
+
+
+class VerifyingKey(object):
+ """
+ Class for handling keys that can verify signatures (public keys).
+
+ :ivar ecdsa.curves.Curve curve: The Curve over which all the cryptographic
+ operations will take place
+ :ivar default_hashfunc: the function that will be used for hashing the
+ data. Should implement the same API as hashlib.sha1
+ :vartype default_hashfunc: callable
+ :ivar pubkey: the actual public key
+ :vartype pubkey: ecdsa.ecdsa.Public_key
+ """
+
+ def __init__(self, _error__please_use_generate=None):
+ """Unsupported, please use one of the classmethods to initialise."""
+ if not _error__please_use_generate:
+ raise TypeError("Please use VerifyingKey.generate() to "
+ "construct me")
+ self.curve = None
+ self.default_hashfunc = None
+ self.pubkey = None
+
+ def __repr__(self):
+ pub_key = self.to_string("compressed")
+ return "VerifyingKey.from_string({0!r}, {1!r}, {2})".format(
+ pub_key, self.curve, self.default_hashfunc().name)
+
+ def __eq__(self, other):
+ """Return True if the points are identical, False otherwise."""
+ if isinstance(other, VerifyingKey):
+ return self.curve == other.curve \
+ and self.pubkey == other.pubkey
+ return NotImplemented
+
+ @classmethod
+ def from_public_point(cls, point, curve=NIST192p, hashfunc=sha1,
+ validate_point=True):
+ """
+ Initialise the object from a Point object.
+
+ This is a low-level method, generally you will not want to use it.
+
+ :param point: The point to wrap around, the actual public key
+ :type point: ecdsa.ellipticcurve.Point
+ :param curve: The curve on which the point needs to reside, defaults
+ to NIST192p
+ :type curve: ecdsa.curves.Curve
+ :param hashfunc: The default hash function that will be used for
+ verification, needs to implement the same interface
+ as hashlib.sha1
+ :type hashfunc: callable
+ :type bool validate_point: whether to check if the point lies on curve
+ should always be used if the public point is not a result
+ of our own calculation
+
+ :raises MalformedPointError: if the public point does not lie on the
+ curve
+
+ :return: Initialised VerifyingKey object
+ :rtype: VerifyingKey
+ """
+ self = cls(_error__please_use_generate=True)
+ if not isinstance(point, ellipticcurve.PointJacobi):
+ point = ellipticcurve.PointJacobi.from_affine(point)
+ self.curve = curve
+ self.default_hashfunc = hashfunc
+ try:
+ self.pubkey = ecdsa.Public_key(curve.generator, point,
+ validate_point)
+ except ecdsa.InvalidPointError:
+ raise MalformedPointError("Point does not lie on the curve")
+ self.pubkey.order = curve.order
+ return self
+
+ def precompute(self):
+ self.pubkey.point = ellipticcurve.PointJacobi.from_affine(
+ self.pubkey.point, True)
+
+ @staticmethod
+ def _from_raw_encoding(string, curve):
+ """
+ Decode public point from :term:`raw encoding`.
+
+ :term:`raw encoding` is the same as the :term:`uncompressed` encoding,
+ but without the 0x04 byte at the beginning.
+ """
+ order = curve.order
+ # real assert, from_string() should not call us with different length
+ assert len(string) == curve.verifying_key_length
+ xs = string[:curve.baselen]
+ ys = string[curve.baselen:]
+ if len(xs) != curve.baselen:
+ raise MalformedPointError("Unexpected length of encoded x")
+ if len(ys) != curve.baselen:
+ raise MalformedPointError("Unexpected length of encoded y")
+ x = string_to_number(xs)
+ y = string_to_number(ys)
+
+ return ellipticcurve.PointJacobi(curve.curve, x, y, 1, order)
+
+ @staticmethod
+ def _from_compressed(string, curve):
+ """Decode public point from compressed encoding."""
+ if string[:1] not in (b('\x02'), b('\x03')):
+ raise MalformedPointError("Malformed compressed point encoding")
+
+ is_even = string[:1] == b('\x02')
+ x = string_to_number(string[1:])
+ order = curve.order
+ p = curve.curve.p()
+ alpha = (pow(x, 3, p) + (curve.curve.a() * x) + curve.curve.b()) % p
+ try:
+ beta = square_root_mod_prime(alpha, p)
+ except SquareRootError as e:
+ raise MalformedPointError(
+ "Encoding does not correspond to a point on curve", e)
+ if is_even == bool(beta & 1):
+ y = p - beta
+ else:
+ y = beta
+ return ellipticcurve.PointJacobi(curve.curve, x, y, 1, order)
+
+ @classmethod
+ def _from_hybrid(cls, string, curve, validate_point):
+ """Decode public point from hybrid encoding."""
+ # real assert, from_string() should not call us with different types
+ assert string[:1] in (b('\x06'), b('\x07'))
+
+ # primarily use the uncompressed as it's easiest to handle
+ point = cls._from_raw_encoding(string[1:], curve)
+
+ # but validate if it's self-consistent if we're asked to do that
+ if validate_point \
+ and (point.y() & 1 and string[:1] != b('\x07')
+ or (not point.y() & 1) and string[:1] != b('\x06')):
+ raise MalformedPointError("Inconsistent hybrid point encoding")
+
+ return point
+
+ @classmethod
+ def from_string(cls, string, curve=NIST192p, hashfunc=sha1,
+ validate_point=True):
+ """
+ Initialise the object from byte encoding of public key.
+
+ The method does accept and automatically detect the type of point
+ encoding used. It supports the :term:`raw encoding`,
+ :term:`uncompressed`, :term:`compressed` and :term:`hybrid` encodings.
+
+ Note, while the method is named "from_string" it's a misnomer from
+ Python 2 days when there were no binary strings. In Python 3 the
+ input needs to be a bytes-like object.
+
+ :param string: single point encoding of the public key
+ :type string: :term:`bytes-like object`
+ :param curve: the curve on which the public key is expected to lie
+ :type curve: ecdsa.curves.Curve
+ :param hashfunc: The default hash function that will be used for
+ verification, needs to implement the same interface as hashlib.sha1
+ :type hashfunc: callable
+ :param validate_point: whether to verify that the point lies on the
+ provided curve or not, defaults to True
+ :type validate_point: bool
+
+ :raises MalformedPointError: if the public point does not lie on the
+ curve or the encoding is invalid
+
+ :return: Initialised VerifyingKey object
+ :rtype: VerifyingKey
+ """
+ string = normalise_bytes(string)
+ sig_len = len(string)
+ if sig_len == curve.verifying_key_length:
+ point = cls._from_raw_encoding(string, curve)
+ elif sig_len == curve.verifying_key_length + 1:
+ if string[:1] in (b('\x06'), b('\x07')):
+ point = cls._from_hybrid(string, curve, validate_point)
+ elif string[:1] == b('\x04'):
+ point = cls._from_raw_encoding(string[1:], curve)
+ else:
+ raise MalformedPointError(
+ "Invalid X9.62 encoding of the public point")
+ elif sig_len == curve.baselen + 1:
+ point = cls._from_compressed(string, curve)
+ else:
+ raise MalformedPointError(
+ "Length of string does not match lengths of "
+ "any of the supported encodings of {0} "
+ "curve.".format(curve.name))
+ return cls.from_public_point(point, curve, hashfunc,
+ validate_point)
+
+ @classmethod
+ def from_pem(cls, string, hashfunc=sha1):
+ """
+ Initialise from public key stored in :term:`PEM` format.
+
+ The PEM header of the key should be ``BEGIN PUBLIC KEY``.
+
+ See the :func:`~VerifyingKey.from_der()` method for details of the
+ format supported.
+
+ Note: only a single PEM object encoding is supported in provided
+ string.
+
+ :param string: text with PEM-encoded public ECDSA key
+ :type string: str
+
+ :return: Initialised VerifyingKey object
+ :rtype: VerifyingKey
+ """
+ return cls.from_der(der.unpem(string), hashfunc=hashfunc)
+
+ @classmethod
+ def from_der(cls, string, hashfunc=sha1):
+ """
+ Initialise the key stored in :term:`DER` format.
+
+ The expected format of the key is the SubjectPublicKeyInfo structure
+ from RFC5912 (for RSA keys, it's known as the PKCS#1 format)::
+
+ SubjectPublicKeyInfo {PUBLIC-KEY: IOSet} ::= SEQUENCE {
+ algorithm AlgorithmIdentifier {PUBLIC-KEY, {IOSet}},
+ subjectPublicKey BIT STRING
+ }
+
+ Note: only public EC keys are supported by this method. The
+ SubjectPublicKeyInfo.algorithm.algorithm field must specify
+ id-ecPublicKey (see RFC3279).
+
+ Only the named curve encoding is supported, thus the
+ SubjectPublicKeyInfo.algorithm.parameters field needs to be an
+ object identifier. A sequence in that field indicates an explicit
+ parameter curve encoding, this format is not supported. A NULL object
+ in that field indicates an "implicitlyCA" encoding, where the curve
+ parameters come from CA certificate, those, again, are not supported.
+
+ :param string: binary string with the DER encoding of public ECDSA key
+ :type string: bytes-like object
+
+ :return: Initialised VerifyingKey object
+ :rtype: VerifyingKey
+ """
+ string = normalise_bytes(string)
+ # [[oid_ecPublicKey,oid_curve], point_str_bitstring]
+ s1, empty = der.remove_sequence(string)
+ if empty != b"":
+ raise der.UnexpectedDER("trailing junk after DER pubkey: %s" %
+ binascii.hexlify(empty))
+ s2, point_str_bitstring = der.remove_sequence(s1)
+ # s2 = oid_ecPublicKey,oid_curve
+ oid_pk, rest = der.remove_object(s2)
+ oid_curve, empty = der.remove_object(rest)
+ if empty != b"":
+ raise der.UnexpectedDER("trailing junk after DER pubkey objects: %s" %
+ binascii.hexlify(empty))
+ if not oid_pk == oid_ecPublicKey:
+ raise der.UnexpectedDER("Unexpected object identifier in DER "
+ "encoding: {0!r}".format(oid_pk))
+ curve = find_curve(oid_curve)
+ point_str, empty = der.remove_bitstring(point_str_bitstring, 0)
+ if empty != b"":
+ raise der.UnexpectedDER("trailing junk after pubkey pointstring: %s" %
+ binascii.hexlify(empty))
+ # raw encoding of point is invalid in DER files
+ if len(point_str) == curve.verifying_key_length:
+ raise der.UnexpectedDER("Malformed encoding of public point")
+ return cls.from_string(point_str, curve, hashfunc=hashfunc)
+
+ @classmethod
+ def from_public_key_recovery(cls, signature, data, curve, hashfunc=sha1,
+ sigdecode=sigdecode_string):
+ """
+ Return keys that can be used as verifiers of the provided signature.
+
+ Tries to recover the public key that can be used to verify the
+ signature, usually returns two keys like that.
+
+ :param signature: the byte string with the encoded signature
+ :type signature: bytes-like object
+ :param data: the data to be hashed for signature verification
+ :type data: bytes-like object
+ :param curve: the curve over which the signature was performed
+ :type curve: ecdsa.curves.Curve
+ :param hashfunc: The default hash function that will be used for
+ verification, needs to implement the same interface as hashlib.sha1
+ :type hashfunc: callable
+ :param sigdecode: Callable to define the way the signature needs to
+ be decoded to an object, needs to handle `signature` as the
+ first parameter, the curve order (an int) as the second and return
+ a tuple with two integers, "r" as the first one and "s" as the
+ second one. See :func:`ecdsa.util.sigdecode_string` and
+ :func:`ecdsa.util.sigdecode_der` for examples.
+ :type sigdecode: callable
+
+ :return: Initialised VerifyingKey objects
+ :rtype: list of VerifyingKey
+ """
+ data = normalise_bytes(data)
+ digest = hashfunc(data).digest()
+ return cls.from_public_key_recovery_with_digest(
+ signature, digest, curve, hashfunc=hashfunc,
+ sigdecode=sigdecode)
+
+ @classmethod
+ def from_public_key_recovery_with_digest(
+ cls, signature, digest, curve,
+ hashfunc=sha1, sigdecode=sigdecode_string):
+ """
+ Return keys that can be used as verifiers of the provided signature.
+
+ Tries to recover the public key that can be used to verify the
+ signature, usually returns two keys like that.
+
+ :param signature: the byte string with the encoded signature
+ :type signature: bytes-like object
+ :param digest: the hash value of the message signed by the signature
+ :type digest: bytes-like object
+ :param curve: the curve over which the signature was performed
+ :type curve: ecdsa.curves.Curve
+ :param hashfunc: The default hash function that will be used for
+ verification, needs to implement the same interface as hashlib.sha1
+ :type hashfunc: callable
+ :param sigdecode: Callable to define the way the signature needs to
+ be decoded to an object, needs to handle `signature` as the
+ first parameter, the curve order (an int) as the second and return
+ a tuple with two integers, "r" as the first one and "s" as the
+ second one. See :func:`ecdsa.util.sigdecode_string` and
+ :func:`ecdsa.util.sigdecode_der` for examples.
+ :type sigdecode: callable
+
+
+ :return: Initialised VerifyingKey object
+ :rtype: VerifyingKey
+ """
+ generator = curve.generator
+ r, s = sigdecode(signature, generator.order())
+ sig = ecdsa.Signature(r, s)
+
+ digest = normalise_bytes(digest)
+ digest_as_number = string_to_number(digest)
+ pks = sig.recover_public_keys(digest_as_number, generator)
+
+ # Transforms the ecdsa.Public_key object into a VerifyingKey
+ verifying_keys = [cls.from_public_point(pk.point, curve, hashfunc)
+ for pk in pks]
+ return verifying_keys
+
+ def _raw_encode(self):
+ """Convert the public key to the :term:`raw encoding`."""
+ order = self.pubkey.order
+ x_str = number_to_string(self.pubkey.point.x(), order)
+ y_str = number_to_string(self.pubkey.point.y(), order)
+ return x_str + y_str
+
+ def _compressed_encode(self):
+ """Encode the public point into the compressed form."""
+ order = self.pubkey.order
+ x_str = number_to_string(self.pubkey.point.x(), order)
+ if self.pubkey.point.y() & 1:
+ return b('\x03') + x_str
+ else:
+ return b('\x02') + x_str
+
+ def _hybrid_encode(self):
+ """Encode the public point into the hybrid form."""
+ raw_enc = self._raw_encode()
+ if self.pubkey.point.y() & 1:
+ return b('\x07') + raw_enc
+ else:
+ return b('\x06') + raw_enc
+
+ def to_string(self, encoding="raw"):
+ """
+ Convert the public key to a byte string.
+
+ The method by default uses the :term:`raw encoding` (specified
+ by `encoding="raw"`. It can also output keys in :term:`uncompressed`,
+ :term:`compressed` and :term:`hybrid` formats.
+
+ Remember that the curve identification is not part of the encoding
+ so to decode the point using :func:`~VerifyingKey.from_string`, curve
+ needs to be specified.
+
+ Note: while the method is called "to_string", it's a misnomer from
+ Python 2 days when character strings and byte strings shared type.
+ On Python 3 the returned type will be `bytes`.
+
+ :return: :term:`raw encoding` of the public key (public point) on the
+ curve
+ :rtype: bytes
+ """
+ assert encoding in ("raw", "uncompressed", "compressed", "hybrid")
+ if encoding == "raw":
+ return self._raw_encode()
+ elif encoding == "uncompressed":
+ return b('\x04') + self._raw_encode()
+ elif encoding == "hybrid":
+ return self._hybrid_encode()
+ else:
+ return self._compressed_encode()
+
+ def to_pem(self, point_encoding="uncompressed"):
+ """
+ Convert the public key to the :term:`PEM` format.
+
+ The PEM header of the key will be ``BEGIN PUBLIC KEY``.
+
+ The format of the key is described in the
+ :func:`~VerifyingKey.from_der()` method.
+ This method supports only "named curve" encoding of keys.
+
+ :param str point_encoding: specification of the encoding format
+ of public keys. "uncompressed" is most portable, "compressed" is
+ smallest. "hybrid" is uncommon and unsupported by most
+ implementations, it is as big as "uncompressed".
+
+ :return: portable encoding of the public key
+ :rtype: str
+ """
+ return der.topem(self.to_der(point_encoding), "PUBLIC KEY")
+
+ def to_der(self, point_encoding="uncompressed"):
+ """
+ Convert the public key to the :term:`DER` format.
+
+ The format of the key is described in the
+ :func:`~VerifyingKey.from_der()` method.
+ This method supports only "named curve" encoding of keys.
+
+ :param str point_encoding: specification of the encoding format
+ of public keys. "uncompressed" is most portable, "compressed" is
+ smallest. "hybrid" is uncommon and unsupported by most
+ implementations, it is as big as "uncompressed".
+
+ :return: DER encoding of the public key
+ :rtype: bytes
+ """
+ if point_encoding == "raw":
+ raise ValueError("raw point_encoding not allowed in DER")
+ point_str = self.to_string(point_encoding)
+ return der.encode_sequence(der.encode_sequence(encoded_oid_ecPublicKey,
+ self.curve.encoded_oid),
+ # 0 is the number of unused bits in the
+ # bit string
+ der.encode_bitstring(point_str, 0))
+
+ def verify(self, signature, data, hashfunc=None,
+ sigdecode=sigdecode_string):
+ """
+ Verify a signature made over provided data.
+
+ Will hash `data` to verify the signature.
+
+ By default expects signature in :term:`raw encoding`. Can also be used
+ to verify signatures in ASN.1 DER encoding by using
+ :func:`ecdsa.util.sigdecode_der`
+ as the `sigdecode` parameter.
+
+ :param signature: encoding of the signature
+ :type signature: sigdecode method dependant
+ :param data: data signed by the `signature`, will be hashed using
+ `hashfunc`, if specified, or default hash function
+ :type data: bytes like object
+ :param hashfunc: The default hash function that will be used for
+ verification, needs to implement the same interface as hashlib.sha1
+ :type hashfunc: callable
+ :param sigdecode: Callable to define the way the signature needs to
+ be decoded to an object, needs to handle `signature` as the
+ first parameter, the curve order (an int) as the second and return
+ a tuple with two integers, "r" as the first one and "s" as the
+ second one. See :func:`ecdsa.util.sigdecode_string` and
+ :func:`ecdsa.util.sigdecode_der` for examples.
+ :type sigdecode: callable
+
+ :raises BadSignatureError: if the signature is invalid or malformed
+
+ :return: True if the verification was successful
+ :rtype: bool
+ """
+ # signature doesn't have to be a bytes-like-object so don't normalise
+ # it, the decoders will do that
+ data = normalise_bytes(data)
+
+ hashfunc = hashfunc or self.default_hashfunc
+ digest = hashfunc(data).digest()
+ return self.verify_digest(signature, digest, sigdecode, True)
+
+ def verify_digest(self, signature, digest, sigdecode=sigdecode_string,
+ allow_truncate=False):
+ """
+ Verify a signature made over provided hash value.
+
+ By default expects signature in :term:`raw encoding`. Can also be used
+ to verify signatures in ASN.1 DER encoding by using
+ :func:`ecdsa.util.sigdecode_der`
+ as the `sigdecode` parameter.
+
+ :param signature: encoding of the signature
+ :type signature: sigdecode method dependant
+ :param digest: raw hash value that the signature authenticates.
+ :type digest: bytes like object
+ :param sigdecode: Callable to define the way the signature needs to
+ be decoded to an object, needs to handle `signature` as the
+ first parameter, the curve order (an int) as the second and return
+ a tuple with two integers, "r" as the first one and "s" as the
+ second one. See :func:`ecdsa.util.sigdecode_string` and
+ :func:`ecdsa.util.sigdecode_der` for examples.
+ :type sigdecode: callable
+ :param bool allow_truncate: if True, the provided digest can have
+ bigger bit-size than the order of the curve, the extra bits (at
+ the end of the digest) will be truncated. Use it when verifying
+ SHA-384 output using NIST256p or in similar situations.
+
+ :raises BadSignatureError: if the signature is invalid or malformed
+ :raises BadDigestError: if the provided digest is too big for the curve
+ associated with this VerifyingKey and allow_truncate was not set
+
+ :return: True if the verification was successful
+ :rtype: bool
+ """
+ # signature doesn't have to be a bytes-like-object so don't normalise
+ # it, the decoders will do that
+ digest = normalise_bytes(digest)
+ if allow_truncate:
+ digest = digest[:self.curve.baselen]
+ if len(digest) > self.curve.baselen:
+ raise BadDigestError("this curve (%s) is too short "
+ "for your digest (%d)" % (self.curve.name,
+ 8 * len(digest)))
+ number = string_to_number(digest)
+ try:
+ r, s = sigdecode(signature, self.pubkey.order)
+ except (der.UnexpectedDER, MalformedSignature) as e:
+ raise BadSignatureError("Malformed formatting of signature", e)
+ sig = ecdsa.Signature(r, s)
+ if self.pubkey.verifies(number, sig):
+ return True
+ raise BadSignatureError("Signature verification failed")
+
+
+class SigningKey(object):
+ """
+ Class for handling keys that can create signatures (private keys).
+
+ :ivar ecdsa.curves.Curve curve: The Curve over which all the cryptographic
+ operations will take place
+ :ivar default_hashfunc: the function that will be used for hashing the
+ data. Should implement the same API as hashlib.sha1
+ :ivar int baselen: the length of a :term:`raw encoding` of private key
+ :ivar ecdsa.keys.VerifyingKey verifying_key: the public key
+ associated with this private key
+ :ivar ecdsa.ecdsa.Private_key privkey: the actual private key
+ """
+
+ def __init__(self, _error__please_use_generate=None):
+ """Unsupported, please use one of the classmethods to initialise."""
+ if not _error__please_use_generate:
+ raise TypeError("Please use SigningKey.generate() to construct me")
+ self.curve = None
+ self.default_hashfunc = None
+ self.baselen = None
+ self.verifying_key = None
+ self.privkey = None
+
+ def __eq__(self, other):
+ """Return True if the points are identical, False otherwise."""
+ if isinstance(other, SigningKey):
+ return self.curve == other.curve \
+ and self.verifying_key == other.verifying_key \
+ and self.privkey == other.privkey
+ return NotImplemented
+
+ @classmethod
+ def generate(cls, curve=NIST192p, entropy=None, hashfunc=sha1):
+ """
+ Generate a random private key.
+
+ :param curve: The curve on which the point needs to reside, defaults
+ to NIST192p
+ :type curve: ecdsa.curves.Curve
+ :param entropy: Source of randomness for generating the private keys,
+ should provide cryptographically secure random numbers if the keys
+ need to be secure. Uses os.urandom() by default.
+ :type entropy: callable
+ :param hashfunc: The default hash function that will be used for
+ signing, needs to implement the same interface
+ as hashlib.sha1
+ :type hashfunc: callable
+
+ :return: Initialised SigningKey object
+ :rtype: SigningKey
+ """
+ secexp = randrange(curve.order, entropy)
+ return cls.from_secret_exponent(secexp, curve, hashfunc)
+
+ @classmethod
+ def from_secret_exponent(cls, secexp, curve=NIST192p, hashfunc=sha1):
+ """
+ Create a private key from a random integer.
+
+ Note: it's a low level method, it's recommended to use the
+ :func:`~SigningKey.generate` method to create private keys.
+
+ :param int secexp: secret multiplier (the actual private key in ECDSA).
+ Needs to be an integer between 1 and the curve order.
+ :param curve: The curve on which the point needs to reside
+ :type curve: ecdsa.curves.Curve
+ :param hashfunc: The default hash function that will be used for
+ signing, needs to implement the same interface
+ as hashlib.sha1
+ :type hashfunc: callable
+
+ :raises MalformedPointError: when the provided secexp is too large
+ or too small for the curve selected
+ :raises RuntimeError: if the generation of public key from private
+ key failed
+
+ :return: Initialised SigningKey object
+ :rtype: SigningKey
+ """
+ self = cls(_error__please_use_generate=True)
+ self.curve = curve
+ self.default_hashfunc = hashfunc
+ self.baselen = curve.baselen
+ n = curve.order
+ if not 1 <= secexp < n:
+ raise MalformedPointError(
+ "Invalid value for secexp, expected integer between 1 and {0}"
+ .format(n))
+ pubkey_point = curve.generator * secexp
+ if hasattr(pubkey_point, "scale"):
+ pubkey_point = pubkey_point.scale()
+ self.verifying_key = VerifyingKey.from_public_point(pubkey_point, curve,
+ hashfunc, False)
+ pubkey = self.verifying_key.pubkey
+ self.privkey = ecdsa.Private_key(pubkey, secexp)
+ self.privkey.order = n
+ return self
+
+ @classmethod
+ def from_string(cls, string, curve=NIST192p, hashfunc=sha1):
+ """
+ Decode the private key from :term:`raw encoding`.
+
+ Note: the name of this method is a misnomer coming from days of
+ Python 2, when binary strings and character strings shared a type.
+ In Python 3, the expected type is `bytes`.
+
+ :param string: the raw encoding of the private key
+ :type string: bytes like object
+ :param curve: The curve on which the point needs to reside
+ :type curve: ecdsa.curves.Curve
+ :param hashfunc: The default hash function that will be used for
+ signing, needs to implement the same interface
+ as hashlib.sha1
+ :type hashfunc: callable
+
+ :raises MalformedPointError: if the length of encoding doesn't match
+ the provided curve or the encoded values is too large
+ :raises RuntimeError: if the generation of public key from private
+ key failed
+
+ :return: Initialised SigningKey object
+ :rtype: SigningKey
+ """
+ string = normalise_bytes(string)
+ if len(string) != curve.baselen:
+ raise MalformedPointError(
+ "Invalid length of private key, received {0}, expected {1}"
+ .format(len(string), curve.baselen))
+ secexp = string_to_number(string)
+ return cls.from_secret_exponent(secexp, curve, hashfunc)
+
+ @classmethod
+ def from_pem(cls, string, hashfunc=sha1):
+ """
+ Initialise from key stored in :term:`PEM` format.
+
+ Note, the only PEM format supported is the un-encrypted RFC5915
+ (the sslay format) supported by OpenSSL, the more common PKCS#8 format
+ is NOT supported (see:
+ https://github.com/warner/python-ecdsa/issues/113 )
+
+ ``openssl ec -in pkcs8.pem -out sslay.pem`` can be used to
+ convert PKCS#8 file to this legacy format.
+
+ The legacy format files have the header with the string
+ ``BEGIN EC PRIVATE KEY``.
+ Encrypted files (ones that include the string
+ ``Proc-Type: 4,ENCRYPTED``
+ right after the PEM header) are not supported.
+
+ See :func:`~SigningKey.from_der` for ASN.1 syntax of the objects in
+ this files.
+
+ :param string: text with PEM-encoded private ECDSA key
+ :type string: str
+
+ :raises MalformedPointError: if the length of encoding doesn't match
+ the provided curve or the encoded values is too large
+ :raises RuntimeError: if the generation of public key from private
+ key failed
+ :raises UnexpectedDER: if the encoding of the PEM file is incorrect
+
+ :return: Initialised VerifyingKey object
+ :rtype: VerifyingKey
+ """
+ # the privkey pem may have multiple sections, commonly it also has
+ # "EC PARAMETERS", we need just "EC PRIVATE KEY".
+ if PY3 and isinstance(string, str):
+ string = string.encode()
+ privkey_pem = string[string.index(b("-----BEGIN EC PRIVATE KEY-----")):]
+ return cls.from_der(der.unpem(privkey_pem), hashfunc)
+
+ @classmethod
+ def from_der(cls, string, hashfunc=sha1):
+ """
+ Initialise from key stored in :term:`DER` format.
+
+ Note, the only DER format supported is the RFC5915
+ (the sslay format) supported by OpenSSL, the more common PKCS#8 format
+ is NOT supported (see:
+ https://github.com/warner/python-ecdsa/issues/113 )
+
+ ``openssl ec -in pkcs8.pem -outform der -out sslay.der`` can be
+ used to convert PKCS#8 file to this legacy format.
+
+ The encoding of the ASN.1 object in those files follows following
+ syntax specified in RFC5915::
+
+ ECPrivateKey ::= SEQUENCE {
+ version INTEGER { ecPrivkeyVer1(1) }} (ecPrivkeyVer1),
+ privateKey OCTET STRING,
+ parameters [0] ECParameters {{ NamedCurve }} OPTIONAL,
+ publicKey [1] BIT STRING OPTIONAL
+ }
+
+ The only format supported for the `parameters` field is the named
+ curve method. Explicit encoding of curve parameters is not supported.
+
+ While `parameters` field is defined as optional, this implementation
+ requires its presence for correct parsing of the keys.
+
+ `publicKey` field is ignored completely (errors, if any, in it will
+ be undetected).
+
+ :param string: binary string with DER-encoded private ECDSA key
+ :type string: bytes like object
+
+ :raises MalformedPointError: if the length of encoding doesn't match
+ the provided curve or the encoded values is too large
+ :raises RuntimeError: if the generation of public key from private
+ key failed
+ :raises UnexpectedDER: if the encoding of the DER file is incorrect
+
+ :return: Initialised VerifyingKey object
+ :rtype: VerifyingKey
+ """
+ string = normalise_bytes(string)
+ s, empty = der.remove_sequence(string)
+ if empty != b(""):
+ raise der.UnexpectedDER("trailing junk after DER privkey: %s" %
+ binascii.hexlify(empty))
+ one, s = der.remove_integer(s)
+ if one != 1:
+ raise der.UnexpectedDER("expected '1' at start of DER privkey,"
+ " got %d" % one)
+ privkey_str, s = der.remove_octet_string(s)
+ tag, curve_oid_str, s = der.remove_constructed(s)
+ if tag != 0:
+ raise der.UnexpectedDER("expected tag 0 in DER privkey,"
+ " got %d" % tag)
+ curve_oid, empty = der.remove_object(curve_oid_str)
+ if empty != b(""):
+ raise der.UnexpectedDER("trailing junk after DER privkey "
+ "curve_oid: %s" % binascii.hexlify(empty))
+ curve = find_curve(curve_oid)
+
+ # we don't actually care about the following fields
+ #
+ # tag, pubkey_bitstring, s = der.remove_constructed(s)
+ # if tag != 1:
+ # raise der.UnexpectedDER("expected tag 1 in DER privkey, got %d"
+ # % tag)
+ # pubkey_str = der.remove_bitstring(pubkey_bitstring, 0)
+ # if empty != "":
+ # raise der.UnexpectedDER("trailing junk after DER privkey "
+ # "pubkeystr: %s" % binascii.hexlify(empty))
+
+ # our from_string method likes fixed-length privkey strings
+ if len(privkey_str) < curve.baselen:
+ privkey_str = b("\x00") * (curve.baselen - len(privkey_str)) + privkey_str
+ return cls.from_string(privkey_str, curve, hashfunc)
+
+ def to_string(self):
+ """
+ Convert the private key to :term:`raw encoding`.
+
+ Note: while the method is named "to_string", its name comes from
+ Python 2 days, when binary and character strings used the same type.
+ The type used in Python 3 is `bytes`.
+
+ :return: raw encoding of private key
+ :rtype: bytes
+ """
+ secexp = self.privkey.secret_multiplier
+ s = number_to_string(secexp, self.privkey.order)
+ return s
+
+ def to_pem(self, point_encoding="uncompressed"):
+ """
+ Convert the private key to the :term:`PEM` format.
+
+ See :func:`~SigningKey.from_pem` method for format description.
+
+ Only the named curve format is supported.
+ The public key will be included in generated string.
+
+ The PEM header will specify ``BEGIN EC PRIVATE KEY``
+
+ :param str point_encoding: format to use for encoding public point
+
+ :return: PEM encoded private key
+ :rtype: str
+ """
+ # TODO: "BEGIN ECPARAMETERS"
+ return der.topem(self.to_der(point_encoding), "EC PRIVATE KEY")
+
+ def to_der(self, point_encoding="uncompressed"):
+ """
+ Convert the private key to the :term:`DER` format.
+
+ See :func:`~SigningKey.from_der` method for format specification.
+
+ Only the named curve format is supported.
+ The public key will be included in the generated string.
+
+ :param str point_encoding: format to use for encoding public point
+
+ :return: DER encoded private key
+ :rtype: bytes
+ """
+ # SEQ([int(1), octetstring(privkey),cont[0], oid(secp224r1),
+ # cont[1],bitstring])
+ if point_encoding == "raw":
+ raise ValueError("raw encoding not allowed in DER")
+ encoded_vk = self.get_verifying_key().to_string(point_encoding)
+ # the 0 in encode_bitstring specifies the number of unused bits
+ # in the `encoded_vk` string
+ return der.encode_sequence(
+ der.encode_integer(1),
+ der.encode_octet_string(self.to_string()),
+ der.encode_constructed(0, self.curve.encoded_oid),
+ der.encode_constructed(1, der.encode_bitstring(encoded_vk, 0)))
+
+ def get_verifying_key(self):
+ """
+ Return the VerifyingKey associated with this private key.
+
+ Equivalent to reading the `verifying_key` field of an instance.
+
+ :return: a public key that can be used to verify the signatures made
+ with this SigningKey
+ :rtype: VerifyingKey
+ """
+ return self.verifying_key
+
+ def sign_deterministic(self, data, hashfunc=None,
+ sigencode=sigencode_string,
+ extra_entropy=b''):
+ """
+ Create signature over data using the deterministic RFC6679 algorithm.
+
+ The data will be hashed using the `hashfunc` function before signing.
+
+ This is the recommended method for performing signatures when hashing
+ of data is necessary.
+
+ :param data: data to be hashed and computed signature over
+ :type data: bytes like object
+ :param hashfunc: hash function to use for computing the signature,
+ if unspecified, the default hash function selected during
+ object initialisation will be used (see
+ `VerifyingKey.default_hashfunc`). The object needs to implement
+ the same interface as hashlib.sha1.
+ :type hashfunc: callable
+ :param sigencode: function used to encode the signature.
+ The function needs to accept three parameters: the two integers
+ that are the signature and the order of the curve over which the
+ signature was computed. It needs to return an encoded signature.
+ See `ecdsa.util.sigencode_string` and `ecdsa.util.sigencode_der`
+ as examples of such functions.
+ :type sigencode: callable
+ :param extra_entropy: additional data that will be fed into the random
+ number generator used in the RFC6979 process. Entirely optional.
+ :type extra_entropy: bytes like object
+
+ :return: encoded signature over `data`
+ :rtype: bytes or sigencode function dependant type
+ """
+ hashfunc = hashfunc or self.default_hashfunc
+ data = normalise_bytes(data)
+ extra_entropy = normalise_bytes(extra_entropy)
+ digest = hashfunc(data).digest()
+
+ return self.sign_digest_deterministic(
+ digest, hashfunc=hashfunc, sigencode=sigencode,
+ extra_entropy=extra_entropy, allow_truncate=True)
+
+ def sign_digest_deterministic(self, digest, hashfunc=None,
+ sigencode=sigencode_string,
+ extra_entropy=b'', allow_truncate=False):
+ """
+ Create signature for digest using the deterministic RFC6679 algorithm.
+
+ `digest` should be the output of cryptographically secure hash function
+ like SHA256 or SHA-3-256.
+
+ This is the recommended method for performing signatures when no
+ hashing of data is necessary.
+
+ :param digest: hash of data that will be signed
+ :type digest: bytes like object
+ :param hashfunc: hash function to use for computing the random "k"
+ value from RFC6979 process,
+ if unspecified, the default hash function selected during
+ object initialisation will be used (see
+ `VerifyingKey.default_hashfunc`). The object needs to implement
+ the same interface as hashlib.sha1.
+ :type hashfunc: callable
+ :param sigencode: function used to encode the signature.
+ The function needs to accept three parameters: the two integers
+ that are the signature and the order of the curve over which the
+ signature was computed. It needs to return an encoded signature.
+ See `ecdsa.util.sigencode_string` and `ecdsa.util.sigencode_der`
+ as examples of such functions.
+ :type sigencode: callable
+ :param extra_entropy: additional data that will be fed into the random
+ number generator used in the RFC6979 process. Entirely optional.
+ :type extra_entropy: bytes like object
+ :param bool allow_truncate: if True, the provided digest can have
+ bigger bit-size than the order of the curve, the extra bits (at
+ the end of the digest) will be truncated. Use it when signing
+ SHA-384 output using NIST256p or in similar situations.
+
+ :return: encoded signature for the `digest` hash
+ :rtype: bytes or sigencode function dependant type
+ """
+ secexp = self.privkey.secret_multiplier
+ hashfunc = hashfunc or self.default_hashfunc
+ digest = normalise_bytes(digest)
+ extra_entropy = normalise_bytes(extra_entropy)
+
+ def simple_r_s(r, s, order):
+ return r, s, order
+
+ retry_gen = 0
+ while True:
+ k = rfc6979.generate_k(
+ self.curve.generator.order(), secexp, hashfunc, digest,
+ retry_gen=retry_gen, extra_entropy=extra_entropy)
+ try:
+ r, s, order = self.sign_digest(digest,
+ sigencode=simple_r_s,
+ k=k,
+ allow_truncate=allow_truncate)
+ break
+ except RSZeroError:
+ retry_gen += 1
+
+ return sigencode(r, s, order)
+
+ def sign(self, data, entropy=None, hashfunc=None,
+ sigencode=sigencode_string, k=None):
+ """
+ Create signature over data using the probabilistic ECDSA algorithm.
+
+ This method uses the standard ECDSA algorithm that requires a
+ cryptographically secure random number generator.
+
+ It's recommended to use the :func:`~SigningKey.sign_deterministic`
+ method instead of this one.
+
+ :param data: data that will be hashed for signing
+ :type data: bytes like object
+ :param callable entropy: randomness source, os.urandom by default
+ :param hashfunc: hash function to use for hashing the provided `data`.
+ If unspecified the default hash function selected during
+ object initialisation will be used (see
+ `VerifyingKey.default_hashfunc`).
+ Should behave like hashlib.sha1. The output length of the
+ hash (in bytes) must not be longer than the length of the curve
+ order (rounded up to the nearest byte), so using SHA256 with
+ NIST256p is ok, but SHA256 with NIST192p is not. (In the 2**-96ish
+ unlikely event of a hash output larger than the curve order, the
+ hash will effectively be wrapped mod n).
+ Use hashfunc=hashlib.sha1 to match openssl's -ecdsa-with-SHA1 mode,
+ or hashfunc=hashlib.sha256 for openssl-1.0.0's -ecdsa-with-SHA256.
+ :type hashfunc: callable
+ :param sigencode: function used to encode the signature.
+ The function needs to accept three parameters: the two integers
+ that are the signature and the order of the curve over which the
+ signature was computed. It needs to return an encoded signature.
+ See `ecdsa.util.sigencode_string` and `ecdsa.util.sigencode_der`
+ as examples of such functions.
+ :type sigencode: callable
+ :param int k: a pre-selected nonce for calculating the signature.
+ In typical use cases, it should be set to None (the default) to
+ allow its generation from an entropy source.
+
+ :raises RSZeroError: in the unlikely event when "r" parameter or
+ "s" parameter is equal 0 as that would leak the key. Calee should
+ try a better entropy source or different 'k' in such case.
+
+ :return: encoded signature of the hash of `data`
+ :rtype: bytes or sigencode function dependant type
+ """
+ hashfunc = hashfunc or self.default_hashfunc
+ data = normalise_bytes(data)
+ h = hashfunc(data).digest()
+ return self.sign_digest(h, entropy, sigencode, k, allow_truncate=True)
+
+ def sign_digest(self, digest, entropy=None, sigencode=sigencode_string,
+ k=None, allow_truncate=False):
+ """
+ Create signature over digest using the probabilistic ECDSA algorithm.
+
+ This method uses the standard ECDSA algorithm that requires a
+ cryptographically secure random number generator.
+
+ This method does not hash the input.
+
+ It's recommended to use the
+ :func:`~SigningKey.sign_digest_deterministic` method
+ instead of this one.
+
+ :param digest: hash value that will be signed
+ :type digest: bytes like object
+ :param callable entropy: randomness source, os.urandom by default
+ :param sigencode: function used to encode the signature.
+ The function needs to accept three parameters: the two integers
+ that are the signature and the order of the curve over which the
+ signature was computed. It needs to return an encoded signature.
+ See `ecdsa.util.sigencode_string` and `ecdsa.util.sigencode_der`
+ as examples of such functions.
+ :type sigencode: callable
+ :param int k: a pre-selected nonce for calculating the signature.
+ In typical use cases, it should be set to None (the default) to
+ allow its generation from an entropy source.
+ :param bool allow_truncate: if True, the provided digest can have
+ bigger bit-size than the order of the curve, the extra bits (at
+ the end of the digest) will be truncated. Use it when signing
+ SHA-384 output using NIST256p or in similar situations.
+
+ :raises RSZeroError: in the unlikely event when "r" parameter or
+ "s" parameter is equal 0 as that would leak the key. Calee should
+ try a better entropy source in such case.
+
+ :return: encoded signature for the `digest` hash
+ :rtype: bytes or sigencode function dependant type
+ """
+ digest = normalise_bytes(digest)
+ if allow_truncate:
+ digest = digest[:self.curve.baselen]
+ if len(digest) > self.curve.baselen:
+ raise BadDigestError("this curve (%s) is too short "
+ "for your digest (%d)" % (self.curve.name,
+ 8 * len(digest)))
+ number = string_to_number(digest)
+ r, s = self.sign_number(number, entropy, k)
+ return sigencode(r, s, self.privkey.order)
+
+ def sign_number(self, number, entropy=None, k=None):
+ """
+ Sign an integer directly.
+
+ Note, this is a low level method, usually you will want to use
+ :func:`~SigningKey.sign_deterministic` or
+ :func:`~SigningKey.sign_digest_deterministic`.
+
+ :param int number: number to sign using the probabilistic ECDSA
+ algorithm.
+ :param callable entropy: entropy source, os.urandom by default
+ :param int k: pre-selected nonce for signature operation. If unset
+ it will be selected at random using the entropy source.
+
+ :raises RSZeroError: in the unlikely event when "r" parameter or
+ "s" parameter is equal 0 as that would leak the key. Calee should
+ try a different 'k' in such case.
+
+ :return: the "r" and "s" parameters of the signature
+ :rtype: tuple of ints
+ """
+ order = self.privkey.order
+
+ if k is not None:
+ _k = k
+ else:
+ _k = randrange(order, entropy)
+
+ assert 1 <= _k < order
+ sig = self.privkey.sign(number, _k)
+ return sig.r, sig.s
diff --git a/third_party/python/ecdsa/ecdsa/numbertheory.py b/third_party/python/ecdsa/ecdsa/numbertheory.py
new file mode 100644
index 0000000000..b300440c59
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/numbertheory.py
@@ -0,0 +1,600 @@
+#! /usr/bin/env python
+#
+# Provide some simple capabilities from number theory.
+#
+# Version of 2008.11.14.
+#
+# Written in 2005 and 2006 by Peter Pearson and placed in the public domain.
+# Revision history:
+# 2008.11.14: Use pow(base, exponent, modulus) for modular_exp.
+# Make gcd and lcm accept arbitrarly many arguments.
+
+from __future__ import division
+
+from six import integer_types, PY3
+from six.moves import reduce
+try:
+ xrange
+except NameError:
+ xrange = range
+try:
+ from gmpy2 import powmod
+ GMPY2 = True
+ GMPY = False
+except ImportError:
+ GMPY2 = False
+ try:
+ from gmpy import mpz
+ GMPY = True
+ except ImportError:
+ GMPY = False
+
+import math
+import warnings
+
+
+class Error(Exception):
+ """Base class for exceptions in this module."""
+ pass
+
+
+class SquareRootError(Error):
+ pass
+
+
+class NegativeExponentError(Error):
+ pass
+
+
+def modular_exp(base, exponent, modulus): # pragma: no cover
+ """Raise base to exponent, reducing by modulus"""
+ # deprecated in 0.14
+ warnings.warn("Function is unused in library code. If you use this code, "
+ "change to pow() builtin.", DeprecationWarning)
+ if exponent < 0:
+ raise NegativeExponentError("Negative exponents (%d) not allowed"
+ % exponent)
+ return pow(base, exponent, modulus)
+
+
+def polynomial_reduce_mod(poly, polymod, p):
+ """Reduce poly by polymod, integer arithmetic modulo p.
+
+ Polynomials are represented as lists of coefficients
+ of increasing powers of x."""
+
+ # This module has been tested only by extensive use
+ # in calculating modular square roots.
+
+ # Just to make this easy, require a monic polynomial:
+ assert polymod[-1] == 1
+
+ assert len(polymod) > 1
+
+ while len(poly) >= len(polymod):
+ if poly[-1] != 0:
+ for i in xrange(2, len(polymod) + 1):
+ poly[-i] = (poly[-i] - poly[-1] * polymod[-i]) % p
+ poly = poly[0:-1]
+
+ return poly
+
+
+def polynomial_multiply_mod(m1, m2, polymod, p):
+ """Polynomial multiplication modulo a polynomial over ints mod p.
+
+ Polynomials are represented as lists of coefficients
+ of increasing powers of x."""
+
+ # This is just a seat-of-the-pants implementation.
+
+ # This module has been tested only by extensive use
+ # in calculating modular square roots.
+
+ # Initialize the product to zero:
+
+ prod = (len(m1) + len(m2) - 1) * [0]
+
+ # Add together all the cross-terms:
+
+ for i in xrange(len(m1)):
+ for j in xrange(len(m2)):
+ prod[i + j] = (prod[i + j] + m1[i] * m2[j]) % p
+
+ return polynomial_reduce_mod(prod, polymod, p)
+
+
+def polynomial_exp_mod(base, exponent, polymod, p):
+ """Polynomial exponentiation modulo a polynomial over ints mod p.
+
+ Polynomials are represented as lists of coefficients
+ of increasing powers of x."""
+
+ # Based on the Handbook of Applied Cryptography, algorithm 2.227.
+
+ # This module has been tested only by extensive use
+ # in calculating modular square roots.
+
+ assert exponent < p
+
+ if exponent == 0:
+ return [1]
+
+ G = base
+ k = exponent
+ if k % 2 == 1:
+ s = G
+ else:
+ s = [1]
+
+ while k > 1:
+ k = k // 2
+ G = polynomial_multiply_mod(G, G, polymod, p)
+ if k % 2 == 1:
+ s = polynomial_multiply_mod(G, s, polymod, p)
+
+ return s
+
+
+def jacobi(a, n):
+ """Jacobi symbol"""
+
+ # Based on the Handbook of Applied Cryptography (HAC), algorithm 2.149.
+
+ # This function has been tested by comparison with a small
+ # table printed in HAC, and by extensive use in calculating
+ # modular square roots.
+
+ assert n >= 3
+ assert n % 2 == 1
+ a = a % n
+ if a == 0:
+ return 0
+ if a == 1:
+ return 1
+ a1, e = a, 0
+ while a1 % 2 == 0:
+ a1, e = a1 // 2, e + 1
+ if e % 2 == 0 or n % 8 == 1 or n % 8 == 7:
+ s = 1
+ else:
+ s = -1
+ if a1 == 1:
+ return s
+ if n % 4 == 3 and a1 % 4 == 3:
+ s = -s
+ return s * jacobi(n % a1, a1)
+
+
+def square_root_mod_prime(a, p):
+ """Modular square root of a, mod p, p prime."""
+
+ # Based on the Handbook of Applied Cryptography, algorithms 3.34 to 3.39.
+
+ # This module has been tested for all values in [0,p-1] for
+ # every prime p from 3 to 1229.
+
+ assert 0 <= a < p
+ assert 1 < p
+
+ if a == 0:
+ return 0
+ if p == 2:
+ return a
+
+ jac = jacobi(a, p)
+ if jac == -1:
+ raise SquareRootError("%d has no square root modulo %d" \
+ % (a, p))
+
+ if p % 4 == 3:
+ return pow(a, (p + 1) // 4, p)
+
+ if p % 8 == 5:
+ d = pow(a, (p - 1) // 4, p)
+ if d == 1:
+ return pow(a, (p + 3) // 8, p)
+ if d == p - 1:
+ return (2 * a * pow(4 * a, (p - 5) // 8, p)) % p
+ raise RuntimeError("Shouldn't get here.")
+
+ if PY3:
+ range_top = p
+ else:
+ # xrange on python2 can take integers representable as C long only
+ range_top = min(0x7fffffff, p)
+ for b in xrange(2, range_top):
+ if jacobi(b * b - 4 * a, p) == -1:
+ f = (a, -b, 1)
+ ff = polynomial_exp_mod((0, 1), (p + 1) // 2, f, p)
+ assert ff[1] == 0
+ return ff[0]
+ raise RuntimeError("No b found.")
+
+
+if GMPY2:
+ def inverse_mod(a, m):
+ """Inverse of a mod m."""
+ if a == 0:
+ return 0
+ return powmod(a, -1, m)
+elif GMPY:
+ def inverse_mod(a, m):
+ """Inverse of a mod m."""
+ # while libgmp likely does support inverses modulo, it is accessible
+ # only using the native `pow()` function, and `pow()` sanity checks
+ # the parameters before passing them on to underlying implementation
+ # on Python2
+ if a == 0:
+ return 0
+ a = mpz(a)
+ m = mpz(m)
+
+ lm, hm = mpz(1), mpz(0)
+ low, high = a % m, m
+ while low > 1:
+ r = high // low
+ lm, low, hm, high = hm - lm * r, high - low * r, lm, low
+
+ return lm % m
+else:
+ def inverse_mod(a, m):
+ """Inverse of a mod m."""
+
+ if a == 0:
+ return 0
+
+ lm, hm = 1, 0
+ low, high = a % m, m
+ while low > 1:
+ r = high // low
+ lm, low, hm, high = hm - lm * r, high - low * r, lm, low
+
+ return lm % m
+
+
+try:
+ gcd2 = math.gcd
+except AttributeError:
+ def gcd2(a, b):
+ """Greatest common divisor using Euclid's algorithm."""
+ while a:
+ a, b = b % a, a
+ return b
+
+
+def gcd(*a):
+ """Greatest common divisor.
+
+ Usage: gcd([ 2, 4, 6 ])
+ or: gcd(2, 4, 6)
+ """
+
+ if len(a) > 1:
+ return reduce(gcd2, a)
+ if hasattr(a[0], "__iter__"):
+ return reduce(gcd2, a[0])
+ return a[0]
+
+
+def lcm2(a, b):
+ """Least common multiple of two integers."""
+
+ return (a * b) // gcd(a, b)
+
+
+def lcm(*a):
+ """Least common multiple.
+
+ Usage: lcm([ 3, 4, 5 ])
+ or: lcm(3, 4, 5)
+ """
+
+ if len(a) > 1:
+ return reduce(lcm2, a)
+ if hasattr(a[0], "__iter__"):
+ return reduce(lcm2, a[0])
+ return a[0]
+
+
+def factorization(n):
+ """Decompose n into a list of (prime,exponent) pairs."""
+
+ assert isinstance(n, integer_types)
+
+ if n < 2:
+ return []
+
+ result = []
+ d = 2
+
+ # Test the small primes:
+
+ for d in smallprimes:
+ if d > n:
+ break
+ q, r = divmod(n, d)
+ if r == 0:
+ count = 1
+ while d <= n:
+ n = q
+ q, r = divmod(n, d)
+ if r != 0:
+ break
+ count = count + 1
+ result.append((d, count))
+
+ # If n is still greater than the last of our small primes,
+ # it may require further work:
+
+ if n > smallprimes[-1]:
+ if is_prime(n): # If what's left is prime, it's easy:
+ result.append((n, 1))
+ else: # Ugh. Search stupidly for a divisor:
+ d = smallprimes[-1]
+ while 1:
+ d = d + 2 # Try the next divisor.
+ q, r = divmod(n, d)
+ if q < d: # n < d*d means we're done, n = 1 or prime.
+ break
+ if r == 0: # d divides n. How many times?
+ count = 1
+ n = q
+ while d <= n: # As long as d might still divide n,
+ q, r = divmod(n, d) # see if it does.
+ if r != 0:
+ break
+ n = q # It does. Reduce n, increase count.
+ count = count + 1
+ result.append((d, count))
+ if n > 1:
+ result.append((n, 1))
+
+ return result
+
+
+def phi(n): # pragma: no cover
+ """Return the Euler totient function of n."""
+ # deprecated in 0.14
+ warnings.warn("Function is unused by library code. If you use this code, "
+ "please open an issue in "
+ "https://github.com/warner/python-ecdsa",
+ DeprecationWarning)
+
+ assert isinstance(n, integer_types)
+
+ if n < 3:
+ return 1
+
+ result = 1
+ ff = factorization(n)
+ for f in ff:
+ e = f[1]
+ if e > 1:
+ result = result * f[0] ** (e - 1) * (f[0] - 1)
+ else:
+ result = result * (f[0] - 1)
+ return result
+
+
+def carmichael(n): # pragma: no cover
+ """Return Carmichael function of n.
+
+ Carmichael(n) is the smallest integer x such that
+ m**x = 1 mod n for all m relatively prime to n.
+ """
+ # deprecated in 0.14
+ warnings.warn("Function is unused by library code. If you use this code, "
+ "please open an issue in "
+ "https://github.com/warner/python-ecdsa",
+ DeprecationWarning)
+
+ return carmichael_of_factorized(factorization(n))
+
+
+def carmichael_of_factorized(f_list): # pragma: no cover
+ """Return the Carmichael function of a number that is
+ represented as a list of (prime,exponent) pairs.
+ """
+ # deprecated in 0.14
+ warnings.warn("Function is unused by library code. If you use this code, "
+ "please open an issue in "
+ "https://github.com/warner/python-ecdsa",
+ DeprecationWarning)
+
+ if len(f_list) < 1:
+ return 1
+
+ result = carmichael_of_ppower(f_list[0])
+ for i in xrange(1, len(f_list)):
+ result = lcm(result, carmichael_of_ppower(f_list[i]))
+
+ return result
+
+
+def carmichael_of_ppower(pp): # pragma: no cover
+ """Carmichael function of the given power of the given prime.
+ """
+ # deprecated in 0.14
+ warnings.warn("Function is unused by library code. If you use this code, "
+ "please open an issue in "
+ "https://github.com/warner/python-ecdsa",
+ DeprecationWarning)
+
+ p, a = pp
+ if p == 2 and a > 2:
+ return 2**(a - 2)
+ else:
+ return (p - 1) * p**(a - 1)
+
+
+def order_mod(x, m): # pragma: no cover
+ """Return the order of x in the multiplicative group mod m.
+ """
+ # deprecated in 0.14
+ warnings.warn("Function is unused by library code. If you use this code, "
+ "please open an issue in "
+ "https://github.com/warner/python-ecdsa",
+ DeprecationWarning)
+
+ # Warning: this implementation is not very clever, and will
+ # take a long time if m is very large.
+
+ if m <= 1:
+ return 0
+
+ assert gcd(x, m) == 1
+
+ z = x
+ result = 1
+ while z != 1:
+ z = (z * x) % m
+ result = result + 1
+ return result
+
+
+def largest_factor_relatively_prime(a, b): # pragma: no cover
+ """Return the largest factor of a relatively prime to b.
+ """
+ # deprecated in 0.14
+ warnings.warn("Function is unused by library code. If you use this code, "
+ "please open an issue in "
+ "https://github.com/warner/python-ecdsa",
+ DeprecationWarning)
+
+ while 1:
+ d = gcd(a, b)
+ if d <= 1:
+ break
+ b = d
+ while 1:
+ q, r = divmod(a, d)
+ if r > 0:
+ break
+ a = q
+ return a
+
+
+def kinda_order_mod(x, m): # pragma: no cover
+ """Return the order of x in the multiplicative group mod m',
+ where m' is the largest factor of m relatively prime to x.
+ """
+ # deprecated in 0.14
+ warnings.warn("Function is unused by library code. If you use this code, "
+ "please open an issue in "
+ "https://github.com/warner/python-ecdsa",
+ DeprecationWarning)
+
+ return order_mod(x, largest_factor_relatively_prime(m, x))
+
+
+def is_prime(n):
+ """Return True if x is prime, False otherwise.
+
+ We use the Miller-Rabin test, as given in Menezes et al. p. 138.
+ This test is not exact: there are composite values n for which
+ it returns True.
+
+ In testing the odd numbers from 10000001 to 19999999,
+ about 66 composites got past the first test,
+ 5 got past the second test, and none got past the third.
+ Since factors of 2, 3, 5, 7, and 11 were detected during
+ preliminary screening, the number of numbers tested by
+ Miller-Rabin was (19999999 - 10000001)*(2/3)*(4/5)*(6/7)
+ = 4.57 million.
+ """
+
+ # (This is used to study the risk of false positives:)
+ global miller_rabin_test_count
+
+ miller_rabin_test_count = 0
+
+ if n <= smallprimes[-1]:
+ if n in smallprimes:
+ return True
+ else:
+ return False
+
+ if gcd(n, 2 * 3 * 5 * 7 * 11) != 1:
+ return False
+
+ # Choose a number of iterations sufficient to reduce the
+ # probability of accepting a composite below 2**-80
+ # (from Menezes et al. Table 4.4):
+
+ t = 40
+ n_bits = 1 + int(math.log(n, 2))
+ for k, tt in ((100, 27),
+ (150, 18),
+ (200, 15),
+ (250, 12),
+ (300, 9),
+ (350, 8),
+ (400, 7),
+ (450, 6),
+ (550, 5),
+ (650, 4),
+ (850, 3),
+ (1300, 2),
+ ):
+ if n_bits < k:
+ break
+ t = tt
+
+ # Run the test t times:
+
+ s = 0
+ r = n - 1
+ while (r % 2) == 0:
+ s = s + 1
+ r = r // 2
+ for i in xrange(t):
+ a = smallprimes[i]
+ y = pow(a, r, n)
+ if y != 1 and y != n - 1:
+ j = 1
+ while j <= s - 1 and y != n - 1:
+ y = pow(y, 2, n)
+ if y == 1:
+ miller_rabin_test_count = i + 1
+ return False
+ j = j + 1
+ if y != n - 1:
+ miller_rabin_test_count = i + 1
+ return False
+ return True
+
+
+def next_prime(starting_value):
+ "Return the smallest prime larger than the starting value."
+
+ if starting_value < 2:
+ return 2
+ result = (starting_value + 1) | 1
+ while not is_prime(result):
+ result = result + 2
+ return result
+
+
+smallprimes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41,
+ 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97,
+ 101, 103, 107, 109, 113, 127, 131, 137, 139, 149,
+ 151, 157, 163, 167, 173, 179, 181, 191, 193, 197,
+ 199, 211, 223, 227, 229, 233, 239, 241, 251, 257,
+ 263, 269, 271, 277, 281, 283, 293, 307, 311, 313,
+ 317, 331, 337, 347, 349, 353, 359, 367, 373, 379,
+ 383, 389, 397, 401, 409, 419, 421, 431, 433, 439,
+ 443, 449, 457, 461, 463, 467, 479, 487, 491, 499,
+ 503, 509, 521, 523, 541, 547, 557, 563, 569, 571,
+ 577, 587, 593, 599, 601, 607, 613, 617, 619, 631,
+ 641, 643, 647, 653, 659, 661, 673, 677, 683, 691,
+ 701, 709, 719, 727, 733, 739, 743, 751, 757, 761,
+ 769, 773, 787, 797, 809, 811, 821, 823, 827, 829,
+ 839, 853, 857, 859, 863, 877, 881, 883, 887, 907,
+ 911, 919, 929, 937, 941, 947, 953, 967, 971, 977,
+ 983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033,
+ 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093,
+ 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163,
+ 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229]
+
+miller_rabin_test_count = 0
diff --git a/third_party/python/ecdsa/ecdsa/rfc6979.py b/third_party/python/ecdsa/ecdsa/rfc6979.py
new file mode 100644
index 0000000000..a48938123d
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/rfc6979.py
@@ -0,0 +1,107 @@
+'''
+RFC 6979:
+ Deterministic Usage of the Digital Signature Algorithm (DSA) and
+ Elliptic Curve Digital Signature Algorithm (ECDSA)
+
+ http://tools.ietf.org/html/rfc6979
+
+Many thanks to Coda Hale for his implementation in Go language:
+ https://github.com/codahale/rfc6979
+'''
+
+import hmac
+from binascii import hexlify
+from .util import number_to_string, number_to_string_crop, bit_length
+from ._compat import hmac_compat
+
+
+# bit_length was defined in this module previously so keep it for backwards
+# compatibility, will need to deprecate and remove it later
+__all__ = ["bit_length", "bits2int", "bits2octets", "generate_k"]
+
+
+def bits2int(data, qlen):
+ x = int(hexlify(data), 16)
+ l = len(data) * 8
+
+ if l > qlen:
+ return x >> (l - qlen)
+ return x
+
+
+def bits2octets(data, order):
+ z1 = bits2int(data, bit_length(order))
+ z2 = z1 - order
+
+ if z2 < 0:
+ z2 = z1
+
+ return number_to_string_crop(z2, order)
+
+
+# https://tools.ietf.org/html/rfc6979#section-3.2
+def generate_k(order, secexp, hash_func, data, retry_gen=0, extra_entropy=b''):
+ '''
+ order - order of the DSA generator used in the signature
+ secexp - secure exponent (private key) in numeric form
+ hash_func - reference to the same hash function used for generating hash
+ data - hash in binary form of the signing data
+ retry_gen - int - how many good 'k' values to skip before returning
+ extra_entropy - extra added data in binary form as per section-3.6 of
+ rfc6979
+ '''
+
+ qlen = bit_length(order)
+ holen = hash_func().digest_size
+ rolen = (qlen + 7) / 8
+ bx = (hmac_compat(number_to_string(secexp, order)),
+ hmac_compat(bits2octets(data, order)),
+ hmac_compat(extra_entropy))
+
+ # Step B
+ v = b'\x01' * holen
+
+ # Step C
+ k = b'\x00' * holen
+
+ # Step D
+
+ k = hmac.new(k, digestmod=hash_func)
+ k.update(v + b'\x00')
+ for i in bx:
+ k.update(i)
+ k = k.digest()
+
+ # Step E
+ v = hmac.new(k, v, hash_func).digest()
+
+ # Step F
+ k = hmac.new(k, digestmod=hash_func)
+ k.update(v + b'\x01')
+ for i in bx:
+ k.update(i)
+ k = k.digest()
+
+ # Step G
+ v = hmac.new(k, v, hash_func).digest()
+
+ # Step H
+ while True:
+ # Step H1
+ t = b''
+
+ # Step H2
+ while len(t) < rolen:
+ v = hmac.new(k, v, hash_func).digest()
+ t += v
+
+ # Step H3
+ secret = bits2int(t, qlen)
+
+ if 1 <= secret < order:
+ if retry_gen <= 0:
+ return secret
+ retry_gen -= 1
+
+ k = hmac.new(k, v + b'\x00', hash_func).digest()
+ v = hmac.new(k, v, hash_func).digest()
diff --git a/third_party/python/ecdsa/ecdsa/test_der.py b/third_party/python/ecdsa/ecdsa/test_der.py
new file mode 100644
index 0000000000..e6cd593d3e
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/test_der.py
@@ -0,0 +1,384 @@
+
+# compatibility with Python 2.6, for that we need unittest2 package,
+# which is not available on 3.3 or 3.4
+import warnings
+from binascii import hexlify
+try:
+ import unittest2 as unittest
+except ImportError:
+ import unittest
+from six import b
+import hypothesis.strategies as st
+from hypothesis import given, example
+import pytest
+from ._compat import str_idx_as_int
+from .curves import NIST256p, NIST224p
+from .der import remove_integer, UnexpectedDER, read_length, encode_bitstring,\
+ remove_bitstring, remove_object, encode_oid
+
+
+class TestRemoveInteger(unittest.TestCase):
+ # DER requires the integers to be 0-padded only if they would be
+ # interpreted as negative, check if those errors are detected
+ def test_non_minimal_encoding(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_integer(b('\x02\x02\x00\x01'))
+
+ def test_negative_with_high_bit_set(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_integer(b('\x02\x01\x80'))
+
+ def test_minimal_with_high_bit_set(self):
+ val, rem = remove_integer(b('\x02\x02\x00\x80'))
+
+ self.assertEqual(val, 0x80)
+ self.assertFalse(rem)
+
+ def test_two_zero_bytes_with_high_bit_set(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_integer(b('\x02\x03\x00\x00\xff'))
+
+ def test_zero_length_integer(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_integer(b('\x02\x00'))
+
+ def test_empty_string(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_integer(b(''))
+
+ def test_encoding_of_zero(self):
+ val, rem = remove_integer(b('\x02\x01\x00'))
+
+ self.assertEqual(val, 0)
+ self.assertFalse(rem)
+
+ def test_encoding_of_127(self):
+ val, rem = remove_integer(b('\x02\x01\x7f'))
+
+ self.assertEqual(val, 127)
+ self.assertFalse(rem)
+
+ def test_encoding_of_128(self):
+ val, rem = remove_integer(b('\x02\x02\x00\x80'))
+
+ self.assertEqual(val, 128)
+ self.assertFalse(rem)
+
+
+class TestReadLength(unittest.TestCase):
+ # DER requires the lengths between 0 and 127 to be encoded using the short
+ # form and lengths above that encoded with minimal number of bytes
+ # necessary
+ def test_zero_length(self):
+ self.assertEqual((0, 1), read_length(b('\x00')))
+
+ def test_two_byte_zero_length(self):
+ with self.assertRaises(UnexpectedDER):
+ read_length(b('\x81\x00'))
+
+ def test_two_byte_small_length(self):
+ with self.assertRaises(UnexpectedDER):
+ read_length(b('\x81\x7f'))
+
+ def test_long_form_with_zero_length(self):
+ with self.assertRaises(UnexpectedDER):
+ read_length(b('\x80'))
+
+ def test_smallest_two_byte_length(self):
+ self.assertEqual((128, 2), read_length(b('\x81\x80')))
+
+ def test_zero_padded_length(self):
+ with self.assertRaises(UnexpectedDER):
+ read_length(b('\x82\x00\x80'))
+
+ def test_two_three_byte_length(self):
+ self.assertEqual((256, 3), read_length(b'\x82\x01\x00'))
+
+ def test_empty_string(self):
+ with self.assertRaises(UnexpectedDER):
+ read_length(b(''))
+
+ def test_length_overflow(self):
+ with self.assertRaises(UnexpectedDER):
+ read_length(b('\x83\x01\x00'))
+
+
+class TestEncodeBitstring(unittest.TestCase):
+ # DER requires BIT STRINGS to include a number of padding bits in the
+ # encoded byte string, that padding must be between 0 and 7
+
+ def test_old_call_convention(self):
+ """This is the old way to use the function."""
+ warnings.simplefilter('always')
+ with pytest.warns(DeprecationWarning) as warns:
+ der = encode_bitstring(b'\x00\xff')
+
+ self.assertEqual(len(warns), 1)
+ self.assertIn("unused= needs to be specified",
+ warns[0].message.args[0])
+
+ self.assertEqual(der, b'\x03\x02\x00\xff')
+
+ def test_new_call_convention(self):
+ """This is how it should be called now."""
+ warnings.simplefilter('always')
+ with pytest.warns(None) as warns:
+ der = encode_bitstring(b'\xff', 0)
+
+ # verify that new call convention doesn't raise Warnings
+ self.assertEqual(len(warns), 0)
+
+ self.assertEqual(der, b'\x03\x02\x00\xff')
+
+ def test_implicit_unused_bits(self):
+ """
+ Writing bit string with already included the number of unused bits.
+ """
+ warnings.simplefilter('always')
+ with pytest.warns(None) as warns:
+ der = encode_bitstring(b'\x00\xff', None)
+
+ # verify that new call convention doesn't raise Warnings
+ self.assertEqual(len(warns), 0)
+
+ self.assertEqual(der, b'\x03\x02\x00\xff')
+
+ def test_explicit_unused_bits(self):
+ der = encode_bitstring(b'\xff\xf0', 4)
+
+ self.assertEqual(der, b'\x03\x03\x04\xff\xf0')
+
+ def test_empty_string(self):
+ self.assertEqual(encode_bitstring(b'', 0), b'\x03\x01\x00')
+
+ def test_invalid_unused_count(self):
+ with self.assertRaises(ValueError):
+ encode_bitstring(b'\xff\x00', 8)
+
+ def test_invalid_unused_with_empty_string(self):
+ with self.assertRaises(ValueError):
+ encode_bitstring(b'', 1)
+
+ def test_non_zero_padding_bits(self):
+ with self.assertRaises(ValueError):
+ encode_bitstring(b'\xff', 2)
+
+
+class TestRemoveBitstring(unittest.TestCase):
+ def test_old_call_convention(self):
+ """This is the old way to call the function."""
+ warnings.simplefilter('always')
+ with pytest.warns(DeprecationWarning) as warns:
+ bits, rest = remove_bitstring(b'\x03\x02\x00\xff')
+
+ self.assertEqual(len(warns), 1)
+ self.assertIn("expect_unused= needs to be specified",
+ warns[0].message.args[0])
+
+ self.assertEqual(bits, b'\x00\xff')
+ self.assertEqual(rest, b'')
+
+ def test_new_call_convention(self):
+ warnings.simplefilter('always')
+ with pytest.warns(None) as warns:
+ bits, rest = remove_bitstring(b'\x03\x02\x00\xff', 0)
+
+ self.assertEqual(len(warns), 0)
+
+ self.assertEqual(bits, b'\xff')
+ self.assertEqual(rest, b'')
+
+ def test_implicit_unexpected_unused(self):
+ warnings.simplefilter('always')
+ with pytest.warns(None) as warns:
+ bits, rest = remove_bitstring(b'\x03\x02\x00\xff', None)
+
+ self.assertEqual(len(warns), 0)
+
+ self.assertEqual(bits, (b'\xff', 0))
+ self.assertEqual(rest, b'')
+
+ def test_with_padding(self):
+ ret, rest = remove_bitstring(b'\x03\x02\x04\xf0', None)
+
+ self.assertEqual(ret, (b'\xf0', 4))
+ self.assertEqual(rest, b'')
+
+ def test_not_a_bitstring(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_bitstring(b'\x02\x02\x00\xff', None)
+
+ def test_empty_encoding(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_bitstring(b'\x03\x00', None)
+
+ def test_empty_string(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_bitstring(b'', None)
+
+ def test_no_length(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_bitstring(b'\x03', None)
+
+ def test_unexpected_number_of_unused_bits(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_bitstring(b'\x03\x02\x00\xff', 1)
+
+ def test_invalid_encoding_of_unused_bits(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_bitstring(b'\x03\x03\x08\xff\x00', None)
+
+ def test_invalid_encoding_of_empty_string(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_bitstring(b'\x03\x01\x01', None)
+
+ def test_invalid_padding_bits(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_bitstring(b'\x03\x02\x01\xff', None)
+
+
+class TestStrIdxAsInt(unittest.TestCase):
+ def test_str(self):
+ self.assertEqual(115, str_idx_as_int('str', 0))
+
+ def test_bytes(self):
+ self.assertEqual(115, str_idx_as_int(b'str', 0))
+
+ def test_bytearray(self):
+ self.assertEqual(115, str_idx_as_int(bytearray(b'str'), 0))
+
+
+class TestEncodeOid(unittest.TestCase):
+ def test_pub_key_oid(self):
+ oid_ecPublicKey = encode_oid(1, 2, 840, 10045, 2, 1)
+ self.assertEqual(hexlify(oid_ecPublicKey), b("06072a8648ce3d0201"))
+
+ def test_nist224p_oid(self):
+ self.assertEqual(hexlify(NIST224p.encoded_oid), b("06052b81040021"))
+
+ def test_nist256p_oid(self):
+ self.assertEqual(hexlify(NIST256p.encoded_oid),
+ b"06082a8648ce3d030107")
+
+ def test_large_second_subid(self):
+ # from X.690, section 8.19.5
+ oid = encode_oid(2, 999, 3)
+ self.assertEqual(oid, b'\x06\x03\x88\x37\x03')
+
+ def test_with_two_subids(self):
+ oid = encode_oid(2, 999)
+ self.assertEqual(oid, b'\x06\x02\x88\x37')
+
+ def test_zero_zero(self):
+ oid = encode_oid(0, 0)
+ self.assertEqual(oid, b'\x06\x01\x00')
+
+ def test_with_wrong_types(self):
+ with self.assertRaises((TypeError, AssertionError)):
+ encode_oid(0, None)
+
+ def test_with_small_first_large_second(self):
+ with self.assertRaises(AssertionError):
+ encode_oid(1, 40)
+
+ def test_small_first_max_second(self):
+ oid = encode_oid(1, 39)
+ self.assertEqual(oid, b'\x06\x01\x4f')
+
+ def test_with_invalid_first(self):
+ with self.assertRaises(AssertionError):
+ encode_oid(3, 39)
+
+
+class TestRemoveObject(unittest.TestCase):
+ @classmethod
+ def setUpClass(cls):
+ cls.oid_ecPublicKey = encode_oid(1, 2, 840, 10045, 2, 1)
+
+ def test_pub_key_oid(self):
+ oid, rest = remove_object(self.oid_ecPublicKey)
+ self.assertEqual(rest, b'')
+ self.assertEqual(oid, (1, 2, 840, 10045, 2, 1))
+
+ def test_with_extra_bytes(self):
+ oid, rest = remove_object(self.oid_ecPublicKey + b'more')
+ self.assertEqual(rest, b'more')
+ self.assertEqual(oid, (1, 2, 840, 10045, 2, 1))
+
+ def test_with_large_second_subid(self):
+ # from X.690, section 8.19.5
+ oid, rest = remove_object(b'\x06\x03\x88\x37\x03')
+ self.assertEqual(rest, b'')
+ self.assertEqual(oid, (2, 999, 3))
+
+ def test_with_padded_first_subid(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_object(b'\x06\x02\x80\x00')
+
+ def test_with_padded_second_subid(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_object(b'\x06\x04\x88\x37\x80\x01')
+
+ def test_with_missing_last_byte_of_multi_byte(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_object(b'\x06\x03\x88\x37\x83')
+
+ def test_with_two_subids(self):
+ oid, rest = remove_object(b'\x06\x02\x88\x37')
+ self.assertEqual(rest, b'')
+ self.assertEqual(oid, (2, 999))
+
+ def test_zero_zero(self):
+ oid, rest = remove_object(b'\x06\x01\x00')
+ self.assertEqual(rest, b'')
+ self.assertEqual(oid, (0, 0))
+
+ def test_empty_string(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_object(b'')
+
+ def test_missing_length(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_object(b'\x06')
+
+ def test_empty_oid(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_object(b'\x06\x00')
+
+ def test_empty_oid_overflow(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_object(b'\x06\x01')
+
+ def test_with_wrong_type(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_object(b'\x04\x02\x88\x37')
+
+ def test_with_too_long_length(self):
+ with self.assertRaises(UnexpectedDER):
+ remove_object(b'\x06\x03\x88\x37')
+
+
+@st.composite
+def st_oid(draw, max_value=2**512, max_size=50):
+ """
+ Hypothesis strategy that returns valid OBJECT IDENTIFIERs as tuples
+
+ :param max_value: maximum value of any single sub-identifier
+ :param max_size: maximum length of the generated OID
+ """
+ first = draw(st.integers(min_value=0, max_value=2))
+ if first < 2:
+ second = draw(st.integers(min_value=0, max_value=39))
+ else:
+ second = draw(st.integers(min_value=0, max_value=max_value))
+ rest = draw(st.lists(st.integers(min_value=0, max_value=max_value),
+ max_size=max_size))
+ return (first, second) + tuple(rest)
+
+
+@given(st_oid())
+def test_oids(ids):
+ encoded_oid = encode_oid(*ids)
+ decoded_oid, rest = remove_object(encoded_oid)
+ assert rest == b''
+ assert decoded_oid == ids
diff --git a/third_party/python/ecdsa/ecdsa/test_ecdh.py b/third_party/python/ecdsa/ecdsa/test_ecdh.py
new file mode 100644
index 0000000000..74c8bbab64
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/test_ecdh.py
@@ -0,0 +1,350 @@
+
+import os
+import shutil
+import subprocess
+import pytest
+from binascii import hexlify, unhexlify
+
+from .curves import NIST192p, NIST224p, NIST256p, NIST384p, NIST521p
+from .curves import curves
+from .ecdh import ECDH, InvalidCurveError, \
+ InvalidSharedSecretError, NoKeyError
+from .keys import SigningKey, VerifyingKey
+
+
+@pytest.mark.parametrize("vcurve", curves, ids=[curve.name for curve in curves])
+def test_ecdh_each(vcurve):
+ ecdh1 = ECDH(curve=vcurve)
+ ecdh2 = ECDH(curve=vcurve)
+
+ ecdh2.generate_private_key()
+ ecdh1.load_received_public_key(ecdh2.get_public_key())
+ ecdh2.load_received_public_key(ecdh1.generate_private_key())
+
+ secret1 = ecdh1.generate_sharedsecret_bytes()
+ secret2 = ecdh2.generate_sharedsecret_bytes()
+ assert secret1 == secret2
+
+
+def test_ecdh_no_public_key():
+ ecdh1 = ECDH(curve=NIST192p)
+
+ with pytest.raises(NoKeyError):
+ ecdh1.generate_sharedsecret_bytes()
+
+ ecdh1.generate_private_key()
+
+ with pytest.raises(NoKeyError):
+ ecdh1.generate_sharedsecret_bytes()
+
+
+def test_ecdh_wrong_public_key_curve():
+ ecdh1 = ECDH(curve=NIST192p)
+ ecdh1.generate_private_key()
+ ecdh2 = ECDH(curve=NIST256p)
+ ecdh2.generate_private_key()
+
+ with pytest.raises(InvalidCurveError):
+ ecdh1.load_received_public_key(ecdh2.get_public_key())
+
+ with pytest.raises(InvalidCurveError):
+ ecdh2.load_received_public_key(ecdh1.get_public_key())
+
+ ecdh1.public_key = ecdh2.get_public_key()
+ ecdh2.public_key = ecdh1.get_public_key()
+
+ with pytest.raises(InvalidCurveError):
+ ecdh1.generate_sharedsecret_bytes()
+
+ with pytest.raises(InvalidCurveError):
+ ecdh2.generate_sharedsecret_bytes()
+
+
+def test_ecdh_invalid_shared_secret_curve():
+ ecdh1 = ECDH(curve=NIST256p)
+ ecdh1.generate_private_key()
+
+ ecdh1.load_received_public_key(SigningKey.generate(NIST256p).get_verifying_key())
+
+ ecdh1.private_key.privkey.secret_multiplier = ecdh1.private_key.curve.order
+
+ with pytest.raises(InvalidSharedSecretError):
+ ecdh1.generate_sharedsecret_bytes()
+
+
+# https://github.com/scogliani/ecc-test-vectors/blob/master/ecdh_kat/secp192r1.txt
+# https://github.com/scogliani/ecc-test-vectors/blob/master/ecdh_kat/secp256r1.txt
+# https://github.com/coruus/nist-testvectors/blob/master/csrc.nist.gov/groups/STM/cavp/documents/components/ecccdhtestvectors/KAS_ECC_CDH_PrimitiveTest.txt
+@pytest.mark.parametrize(
+ "curve,privatekey,pubkey,secret",
+ [
+ pytest.param(
+ NIST192p,
+ "f17d3fea367b74d340851ca4270dcb24c271f445bed9d527",
+ "42ea6dd9969dd2a61fea1aac7f8e98edcc896c6e55857cc0"
+ "dfbe5d7c61fac88b11811bde328e8a0d12bf01a9d204b523",
+ "803d8ab2e5b6e6fca715737c3a82f7ce3c783124f6d51cd0",
+ id="NIST192p-1"
+ ),
+ pytest.param(
+ NIST192p,
+ "56e853349d96fe4c442448dacb7cf92bb7a95dcf574a9bd5",
+ "deb5712fa027ac8d2f22c455ccb73a91e17b6512b5e030e7"
+ "7e2690a02cc9b28708431a29fb54b87b1f0c14e011ac2125",
+ "c208847568b98835d7312cef1f97f7aa298283152313c29d",
+ id="NIST192p-2"
+ ),
+ pytest.param(
+ NIST192p,
+ "c6ef61fe12e80bf56f2d3f7d0bb757394519906d55500949",
+ "4edaa8efc5a0f40f843663ec5815e7762dddc008e663c20f"
+ "0a9f8dc67a3e60ef6d64b522185d03df1fc0adfd42478279",
+ "87229107047a3b611920d6e3b2c0c89bea4f49412260b8dd",
+ id="NIST192p-3"
+ ),
+ pytest.param(
+ NIST192p,
+ "e6747b9c23ba7044f38ff7e62c35e4038920f5a0163d3cda",
+ "8887c276edeed3e9e866b46d58d895c73fbd80b63e382e88"
+ "04c5097ba6645e16206cfb70f7052655947dd44a17f1f9d5",
+ "eec0bed8fc55e1feddc82158fd6dc0d48a4d796aaf47d46c",
+ id="NIST192p-4"
+ ),
+ pytest.param(
+ NIST192p,
+ "beabedd0154a1afcfc85d52181c10f5eb47adc51f655047d",
+ "0d045f30254adc1fcefa8a5b1f31bf4e739dd327cd18d594"
+ "542c314e41427c08278a08ce8d7305f3b5b849c72d8aff73",
+ "716e743b1b37a2cd8479f0a3d5a74c10ba2599be18d7e2f4",
+ id="NIST192p-5"
+ ),
+ pytest.param(
+ NIST192p,
+ "cf70354226667321d6e2baf40999e2fd74c7a0f793fa8699",
+ "fb35ca20d2e96665c51b98e8f6eb3d79113508d8bccd4516"
+ "368eec0d5bfb847721df6aaff0e5d48c444f74bf9cd8a5a7",
+ "f67053b934459985a315cb017bf0302891798d45d0e19508",
+ id="NIST192p-6"
+ ),
+ pytest.param(
+ NIST224p,
+ "8346a60fc6f293ca5a0d2af68ba71d1dd389e5e40837942df3e43cbd",
+ "af33cd0629bc7e996320a3f40368f74de8704fa37b8fab69abaae280"
+ "882092ccbba7930f419a8a4f9bb16978bbc3838729992559a6f2e2d7",
+ "7d96f9a3bd3c05cf5cc37feb8b9d5209d5c2597464dec3e9983743e8",
+ id="NIST224p"
+ ),
+ pytest.param(
+ NIST256p,
+ "7d7dc5f71eb29ddaf80d6214632eeae03d9058af1fb6d22ed80badb62bc1a534",
+ "700c48f77f56584c5cc632ca65640db91b6bacce3a4df6b42ce7cc838833d287"
+ "db71e509e3fd9b060ddb20ba5c51dcc5948d46fbf640dfe0441782cab85fa4ac",
+ "46fc62106420ff012e54a434fbdd2d25ccc5852060561e68040dd7778997bd7b",
+ id="NIST256p-1"
+ ),
+ pytest.param(
+ NIST256p,
+ "38f65d6dce47676044d58ce5139582d568f64bb16098d179dbab07741dd5caf5",
+ "809f04289c64348c01515eb03d5ce7ac1a8cb9498f5caa50197e58d43a86a7ae"
+ "b29d84e811197f25eba8f5194092cb6ff440e26d4421011372461f579271cda3",
+ "057d636096cb80b67a8c038c890e887d1adfa4195e9b3ce241c8a778c59cda67",
+ id="NIST256p-2"
+ ),
+ pytest.param(
+ NIST256p,
+ "1accfaf1b97712b85a6f54b148985a1bdc4c9bec0bd258cad4b3d603f49f32c8",
+ "a2339c12d4a03c33546de533268b4ad667debf458b464d77443636440ee7fec3"
+ "ef48a3ab26e20220bcda2c1851076839dae88eae962869a497bf73cb66faf536",
+ "2d457b78b4614132477618a5b077965ec90730a8c81a1c75d6d4ec68005d67ec",
+ id="NIST256p-3"
+ ),
+ pytest.param(
+ NIST256p,
+ "207c43a79bfee03db6f4b944f53d2fb76cc49ef1c9c4d34d51b6c65c4db6932d",
+ "df3989b9fa55495719b3cf46dccd28b5153f7808191dd518eff0c3cff2b705ed"
+ "422294ff46003429d739a33206c8752552c8ba54a270defc06e221e0feaf6ac4",
+ "96441259534b80f6aee3d287a6bb17b5094dd4277d9e294f8fe73e48bf2a0024",
+ id="NIST256p-4"
+ ),
+ pytest.param(
+ NIST256p,
+ "59137e38152350b195c9718d39673d519838055ad908dd4757152fd8255c09bf",
+ "41192d2813e79561e6a1d6f53c8bc1a433a199c835e141b05a74a97b0faeb922"
+ "1af98cc45e98a7e041b01cf35f462b7562281351c8ebf3ffa02e33a0722a1328",
+ "19d44c8d63e8e8dd12c22a87b8cd4ece27acdde04dbf47f7f27537a6999a8e62",
+ id="NIST256p-5"
+ ),
+ pytest.param(
+ NIST256p,
+ "f5f8e0174610a661277979b58ce5c90fee6c9b3bb346a90a7196255e40b132ef",
+ "33e82092a0f1fb38f5649d5867fba28b503172b7035574bf8e5b7100a3052792"
+ "f2cf6b601e0a05945e335550bf648d782f46186c772c0f20d3cd0d6b8ca14b2f",
+ "664e45d5bba4ac931cd65d52017e4be9b19a515f669bea4703542a2c525cd3d3",
+ id="NIST256p-6"
+ ),
+ pytest.param(
+ NIST384p,
+ "3cc3122a68f0d95027ad38c067916ba0eb8c38894d22e1b1"
+ "5618b6818a661774ad463b205da88cf699ab4d43c9cf98a1",
+ "a7c76b970c3b5fe8b05d2838ae04ab47697b9eaf52e76459"
+ "2efda27fe7513272734466b400091adbf2d68c58e0c50066"
+ "ac68f19f2e1cb879aed43a9969b91a0839c4c38a49749b66"
+ "1efedf243451915ed0905a32b060992b468c64766fc8437a",
+ "5f9d29dc5e31a163060356213669c8ce132e22f57c9a04f4"
+ "0ba7fcead493b457e5621e766c40a2e3d4d6a04b25e533f1",
+ id="NIST384p"
+ ),
+ pytest.param(
+ NIST521p,
+ "017eecc07ab4b329068fba65e56a1f8890aa935e57134ae0ffcce802735151f4ea"
+ "c6564f6ee9974c5e6887a1fefee5743ae2241bfeb95d5ce31ddcb6f9edb4d6fc47",
+ "00685a48e86c79f0f0875f7bc18d25eb5fc8c0b07e5da4f4370f3a949034085433"
+ "4b1e1b87fa395464c60626124a4e70d0f785601d37c09870ebf176666877a2046d"
+ "01ba52c56fc8776d9e8f5db4f0cc27636d0b741bbe05400697942e80b739884a83"
+ "bde99e0f6716939e632bc8986fa18dccd443a348b6c3e522497955a4f3c302f676",
+ "005fc70477c3e63bc3954bd0df3ea0d1f41ee21746ed95fc5e1fdf90930d5e1366"
+ "72d72cc770742d1711c3c3a4c334a0ad9759436a4d3c5bf6e74b9578fac148c831",
+ id="NIST521p"
+ ),
+ ],
+)
+def test_ecdh_NIST(curve,privatekey,pubkey,secret):
+ ecdh = ECDH(curve=curve)
+ ecdh.load_private_key_bytes(unhexlify(privatekey))
+ ecdh.load_received_public_key_bytes(unhexlify(pubkey))
+
+ sharedsecret = ecdh.generate_sharedsecret_bytes()
+
+ assert sharedsecret == unhexlify(secret)
+
+
+pem_local_private_key = (
+ "-----BEGIN EC PRIVATE KEY-----\n"
+ "MF8CAQEEGF7IQgvW75JSqULpiQQ8op9WH6Uldw6xxaAKBggqhkjOPQMBAaE0AzIA\n"
+ "BLiBd9CE7xf15FY5QIAoNg+fWbSk1yZOYtoGUdzkejWkxbRc9RWTQjqLVXucIJnz\n"
+ "bA==\n"
+ "-----END EC PRIVATE KEY-----\n")
+der_local_private_key = (
+ "305f02010104185ec8420bd6ef9252a942e989043ca29f561fa525770eb1c5a00a06082a864"
+ "8ce3d030101a13403320004b88177d084ef17f5e45639408028360f9f59b4a4d7264e62da06"
+ "51dce47a35a4c5b45cf51593423a8b557b9c2099f36c")
+pem_remote_public_key = (
+ "-----BEGIN PUBLIC KEY-----\n"
+ "MEkwEwYHKoZIzj0CAQYIKoZIzj0DAQEDMgAEuIF30ITvF/XkVjlAgCg2D59ZtKTX\n"
+ "Jk5i2gZR3OR6NaTFtFz1FZNCOotVe5wgmfNs\n"
+ "-----END PUBLIC KEY-----\n")
+der_remote_public_key = (
+ "3049301306072a8648ce3d020106082a8648ce3d03010103320004b88177d084ef17f5e4563"
+ "9408028360f9f59b4a4d7264e62da0651dce47a35a4c5b45cf51593423a8b557b9c2099f36c")
+gshared_secret = "8f457e34982478d1c34b9cd2d0c15911b72dd60d869e2cea"
+
+
+def test_ecdh_pem():
+ ecdh = ECDH()
+ ecdh.load_private_key_pem(pem_local_private_key)
+ ecdh.load_received_public_key_pem(pem_remote_public_key)
+
+ sharedsecret = ecdh.generate_sharedsecret_bytes()
+
+ assert sharedsecret == unhexlify(gshared_secret)
+
+
+def test_ecdh_der():
+ ecdh = ECDH()
+ ecdh.load_private_key_der(unhexlify(der_local_private_key))
+ ecdh.load_received_public_key_der(unhexlify(der_remote_public_key))
+
+ sharedsecret = ecdh.generate_sharedsecret_bytes()
+
+ assert sharedsecret == unhexlify(gshared_secret)
+
+
+# Exception classes used by run_openssl.
+class RunOpenSslError(Exception):
+ pass
+
+
+def run_openssl(cmd):
+ OPENSSL = "openssl"
+ p = subprocess.Popen([OPENSSL] + cmd.split(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ stdout, ignored = p.communicate()
+ if p.returncode != 0:
+ raise RunOpenSslError(
+ "cmd '%s %s' failed: rc=%s, stdout/err was %s" %
+ (OPENSSL, cmd, p.returncode, stdout))
+ return stdout.decode()
+
+
+OPENSSL_SUPPORTED_CURVES = set(c.split(':')[0].strip() for c in
+ run_openssl("ecparam -list_curves")
+ .split('\n'))
+
+
+@pytest.mark.parametrize("vcurve", curves, ids=[curve.name for curve in curves])
+def test_ecdh_with_openssl(vcurve):
+ assert vcurve.openssl_name
+
+ if vcurve.openssl_name not in OPENSSL_SUPPORTED_CURVES:
+ pytest.skip("system openssl does not support " + vcurve.openssl_name)
+ return
+
+ try:
+ hlp = run_openssl("pkeyutl -help")
+ if hlp.find("-derive") == 0:
+ pytest.skip("system openssl does not support `pkeyutl -derive`")
+ return
+ except RunOpenSslError:
+ pytest.skip("system openssl does not support `pkeyutl -derive`")
+ return
+
+ if os.path.isdir("t"):
+ shutil.rmtree("t")
+ os.mkdir("t")
+ run_openssl("ecparam -name %s -genkey -out t/privkey1.pem" % vcurve.openssl_name)
+ run_openssl("ecparam -name %s -genkey -out t/privkey2.pem" % vcurve.openssl_name)
+ run_openssl("ec -in t/privkey1.pem -pubout -out t/pubkey1.pem")
+
+ ecdh1 = ECDH(curve=vcurve)
+ ecdh2 = ECDH(curve=vcurve)
+ with open("t/privkey1.pem") as e:
+ key = e.read()
+ ecdh1.load_private_key_pem(key)
+ with open("t/privkey2.pem") as e:
+ key = e.read()
+ ecdh2.load_private_key_pem(key)
+
+ with open("t/pubkey1.pem") as e:
+ key = e.read()
+ vk1 = VerifyingKey.from_pem(key)
+ assert vk1.to_string() == ecdh1.get_public_key().to_string()
+ vk2 = ecdh2.get_public_key()
+ with open("t/pubkey2.pem", "wb") as e:
+ e.write(vk2.to_pem())
+
+ ecdh1.load_received_public_key(vk2)
+ ecdh2.load_received_public_key(vk1)
+ secret1 = ecdh1.generate_sharedsecret_bytes()
+ secret2 = ecdh2.generate_sharedsecret_bytes()
+
+ assert secret1 == secret2
+
+ try:
+ run_openssl("pkeyutl -derive -inkey t/privkey1.pem -peerkey t/pubkey2.pem -out t/secret1")
+ run_openssl("pkeyutl -derive -inkey t/privkey2.pem -peerkey t/pubkey1.pem -out t/secret2")
+ except RunOpenSslError:
+ pytest.skip("system openssl does not support `pkeyutl -derive`")
+ return
+
+ with open("t/secret1", "rb") as e:
+ ssl_secret1 = e.read()
+ with open("t/secret1", "rb") as e:
+ ssl_secret2 = e.read()
+
+ if len(ssl_secret1) != vk1.curve.baselen:
+ pytest.skip("system openssl does not support `pkeyutl -derive`")
+ return
+
+ assert ssl_secret1 == ssl_secret2
+ assert secret1 == ssl_secret1
diff --git a/third_party/python/ecdsa/ecdsa/test_ecdsa.py b/third_party/python/ecdsa/ecdsa/test_ecdsa.py
new file mode 100644
index 0000000000..71c68913ac
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/test_ecdsa.py
@@ -0,0 +1,448 @@
+from __future__ import print_function
+import sys
+import hypothesis.strategies as st
+from hypothesis import given, settings, note, example
+try:
+ import unittest2 as unittest
+except ImportError:
+ import unittest
+import pytest
+from .ecdsa import Private_key, Public_key, Signature, \
+ generator_192, digest_integer, ellipticcurve, point_is_valid, \
+ generator_224, generator_256, generator_384, generator_521, \
+ generator_secp256k1
+
+
+HYP_SETTINGS = {}
+# old hypothesis doesn't have the "deadline" setting
+if sys.version_info > (2, 7): # pragma: no branch
+ # SEC521p is slow, allow long execution for it
+ HYP_SETTINGS["deadline"] = 5000
+
+
+class TestP192FromX9_62(unittest.TestCase):
+ """Check test vectors from X9.62"""
+ @classmethod
+ def setUpClass(cls):
+ cls.d = 651056770906015076056810763456358567190100156695615665659
+ cls.Q = cls.d * generator_192
+ cls.k = 6140507067065001063065065565667405560006161556565665656654
+ cls.R = cls.k * generator_192
+
+ cls.msg = 968236873715988614170569073515315707566766479517
+ cls.pubk = Public_key(generator_192, generator_192 * cls.d)
+ cls.privk = Private_key(cls.pubk, cls.d)
+ cls.sig = cls.privk.sign(cls.msg, cls.k)
+
+ def test_point_multiplication(self):
+ assert self.Q.x() == 0x62B12D60690CDCF330BABAB6E69763B471F994DD702D16A5
+
+ def test_point_multiplication_2(self):
+ assert self.R.x() == 0x885052380FF147B734C330C43D39B2C4A89F29B0F749FEAD
+ assert self.R.y() == 0x9CF9FA1CBEFEFB917747A3BB29C072B9289C2547884FD835
+
+ def test_mult_and_addition(self):
+ u1 = 2563697409189434185194736134579731015366492496392189760599
+ u2 = 6266643813348617967186477710235785849136406323338782220568
+ temp = u1 * generator_192 + u2 * self.Q
+ assert temp.x() == 0x885052380FF147B734C330C43D39B2C4A89F29B0F749FEAD
+ assert temp.y() == 0x9CF9FA1CBEFEFB917747A3BB29C072B9289C2547884FD835
+
+ def test_signature(self):
+ r, s = self.sig.r, self.sig.s
+ assert r == 3342403536405981729393488334694600415596881826869351677613
+ assert s == 5735822328888155254683894997897571951568553642892029982342
+
+ def test_verification(self):
+ assert self.pubk.verifies(self.msg, self.sig)
+
+ def test_rejection(self):
+ assert not self.pubk.verifies(self.msg - 1, self.sig)
+
+
+class TestPublicKey(unittest.TestCase):
+
+ def test_equality_public_keys(self):
+ gen = generator_192
+ x = 0xc58d61f88d905293bcd4cd0080bcb1b7f811f2ffa41979f6
+ y = 0x8804dc7a7c4c7f8b5d437f5156f3312ca7d6de8a0e11867f
+ point = ellipticcurve.Point(gen.curve(), x, y)
+ pub_key1 = Public_key(gen, point)
+ pub_key2 = Public_key(gen, point)
+ self.assertEqual(pub_key1, pub_key2)
+
+ def test_inequality_public_key(self):
+ gen = generator_192
+ x1 = 0xc58d61f88d905293bcd4cd0080bcb1b7f811f2ffa41979f6
+ y1 = 0x8804dc7a7c4c7f8b5d437f5156f3312ca7d6de8a0e11867f
+ point1 = ellipticcurve.Point(gen.curve(), x1, y1)
+
+ x2 = 0x6a223d00bd22c52833409a163e057e5b5da1def2a197dd15
+ y2 = 0x7b482604199367f1f303f9ef627f922f97023e90eae08abf
+ point2 = ellipticcurve.Point(gen.curve(), x2, y2)
+
+ pub_key1 = Public_key(gen, point1)
+ pub_key2 = Public_key(gen, point2)
+ self.assertNotEqual(pub_key1, pub_key2)
+
+ def test_inequality_public_key_not_implemented(self):
+ gen = generator_192
+ x = 0xc58d61f88d905293bcd4cd0080bcb1b7f811f2ffa41979f6
+ y = 0x8804dc7a7c4c7f8b5d437f5156f3312ca7d6de8a0e11867f
+ point = ellipticcurve.Point(gen.curve(), x, y)
+ pub_key = Public_key(gen, point)
+ self.assertNotEqual(pub_key, None)
+
+
+class TestPrivateKey(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ gen = generator_192
+ x = 0xc58d61f88d905293bcd4cd0080bcb1b7f811f2ffa41979f6
+ y = 0x8804dc7a7c4c7f8b5d437f5156f3312ca7d6de8a0e11867f
+ point = ellipticcurve.Point(gen.curve(), x, y)
+ cls.pub_key = Public_key(gen, point)
+
+ def test_equality_private_keys(self):
+ pr_key1 = Private_key(self.pub_key, 100)
+ pr_key2 = Private_key(self.pub_key, 100)
+ self.assertEqual(pr_key1, pr_key2)
+
+ def test_inequality_private_keys(self):
+ pr_key1 = Private_key(self.pub_key, 100)
+ pr_key2 = Private_key(self.pub_key, 200)
+ self.assertNotEqual(pr_key1, pr_key2)
+
+ def test_inequality_private_keys_not_implemented(self):
+ pr_key = Private_key(self.pub_key, 100)
+ self.assertNotEqual(pr_key, None)
+
+
+# Testing point validity, as per ECDSAVS.pdf B.2.2:
+P192_POINTS = [
+ (generator_192,
+ 0xcd6d0f029a023e9aaca429615b8f577abee685d8257cc83a,
+ 0x00019c410987680e9fb6c0b6ecc01d9a2647c8bae27721bacdfc,
+ False),
+
+ (generator_192,
+ 0x00017f2fce203639e9eaf9fb50b81fc32776b30e3b02af16c73b,
+ 0x95da95c5e72dd48e229d4748d4eee658a9a54111b23b2adb,
+ False),
+
+ (generator_192,
+ 0x4f77f8bc7fccbadd5760f4938746d5f253ee2168c1cf2792,
+ 0x000147156ff824d131629739817edb197717c41aab5c2a70f0f6,
+ False),
+
+ (generator_192,
+ 0xc58d61f88d905293bcd4cd0080bcb1b7f811f2ffa41979f6,
+ 0x8804dc7a7c4c7f8b5d437f5156f3312ca7d6de8a0e11867f,
+ True),
+
+ (generator_192,
+ 0xcdf56c1aa3d8afc53c521adf3ffb96734a6a630a4a5b5a70,
+ 0x97c1c44a5fb229007b5ec5d25f7413d170068ffd023caa4e,
+ True),
+
+ (generator_192,
+ 0x89009c0dc361c81e99280c8e91df578df88cdf4b0cdedced,
+ 0x27be44a529b7513e727251f128b34262a0fd4d8ec82377b9,
+ True),
+
+ (generator_192,
+ 0x6a223d00bd22c52833409a163e057e5b5da1def2a197dd15,
+ 0x7b482604199367f1f303f9ef627f922f97023e90eae08abf,
+ True),
+
+ (generator_192,
+ 0x6dccbde75c0948c98dab32ea0bc59fe125cf0fb1a3798eda,
+ 0x0001171a3e0fa60cf3096f4e116b556198de430e1fbd330c8835,
+ False),
+
+ (generator_192,
+ 0xd266b39e1f491fc4acbbbc7d098430931cfa66d55015af12,
+ 0x193782eb909e391a3148b7764e6b234aa94e48d30a16dbb2,
+ False),
+
+ (generator_192,
+ 0x9d6ddbcd439baa0c6b80a654091680e462a7d1d3f1ffeb43,
+ 0x6ad8efc4d133ccf167c44eb4691c80abffb9f82b932b8caa,
+ False),
+
+ (generator_192,
+ 0x146479d944e6bda87e5b35818aa666a4c998a71f4e95edbc,
+ 0xa86d6fe62bc8fbd88139693f842635f687f132255858e7f6,
+ False),
+
+ (generator_192,
+ 0xe594d4a598046f3598243f50fd2c7bd7d380edb055802253,
+ 0x509014c0c4d6b536e3ca750ec09066af39b4c8616a53a923,
+ False)]
+
+
+@pytest.mark.parametrize("generator,x,y,expected", P192_POINTS)
+def test_point_validity(generator, x, y, expected):
+ """
+ `generator` defines the curve; is `(x, y)` a point on
+ this curve? `expected` is True if the right answer is Yes.
+ """
+ assert point_is_valid(generator, x, y) == expected
+
+
+# Trying signature-verification tests from ECDSAVS.pdf B.2.4:
+CURVE_192_KATS = [
+ (generator_192,
+ int("0x84ce72aa8699df436059f052ac51b6398d2511e49631bcb7e71f89c499b9ee"
+ "425dfbc13a5f6d408471b054f2655617cbbaf7937b7c80cd8865cf02c8487d30"
+ "d2b0fbd8b2c4e102e16d828374bbc47b93852f212d5043c3ea720f086178ff79"
+ "8cc4f63f787b9c2e419efa033e7644ea7936f54462dc21a6c4580725f7f0e7d1"
+ "58", 16),
+ 0xd9dbfb332aa8e5ff091e8ce535857c37c73f6250ffb2e7ac,
+ 0x282102e364feded3ad15ddf968f88d8321aa268dd483ebc4,
+ 0x64dca58a20787c488d11d6dd96313f1b766f2d8efe122916,
+ 0x1ecba28141e84ab4ecad92f56720e2cc83eb3d22dec72479,
+ True),
+
+ (generator_192,
+ int("0x94bb5bacd5f8ea765810024db87f4224ad71362a3c28284b2b9f39fab86db1"
+ "2e8beb94aae899768229be8fdb6c4f12f28912bb604703a79ccff769c1607f5a"
+ "91450f30ba0460d359d9126cbd6296be6d9c4bb96c0ee74cbb44197c207f6db3"
+ "26ab6f5a659113a9034e54be7b041ced9dcf6458d7fb9cbfb2744d999f7dfd63"
+ "f4", 16),
+ 0x3e53ef8d3112af3285c0e74842090712cd324832d4277ae7,
+ 0xcc75f8952d30aec2cbb719fc6aa9934590b5d0ff5a83adb7,
+ 0x8285261607283ba18f335026130bab31840dcfd9c3e555af,
+ 0x356d89e1b04541afc9704a45e9c535ce4a50929e33d7e06c,
+ True),
+
+ (generator_192,
+ int("0xf6227a8eeb34afed1621dcc89a91d72ea212cb2f476839d9b4243c66877911"
+ "b37b4ad6f4448792a7bbba76c63bdd63414b6facab7dc71c3396a73bd7ee14cd"
+ "d41a659c61c99b779cecf07bc51ab391aa3252386242b9853ea7da67fd768d30"
+ "3f1b9b513d401565b6f1eb722dfdb96b519fe4f9bd5de67ae131e64b40e78c42"
+ "dd", 16),
+ 0x16335dbe95f8e8254a4e04575d736befb258b8657f773cb7,
+ 0x421b13379c59bc9dce38a1099ca79bbd06d647c7f6242336,
+ 0x4141bd5d64ea36c5b0bd21ef28c02da216ed9d04522b1e91,
+ 0x159a6aa852bcc579e821b7bb0994c0861fb08280c38daa09,
+ False),
+
+ (generator_192,
+ int("0x16b5f93afd0d02246f662761ed8e0dd9504681ed02a253006eb36736b56309"
+ "7ba39f81c8e1bce7a16c1339e345efabbc6baa3efb0612948ae51103382a8ee8"
+ "bc448e3ef71e9f6f7a9676694831d7f5dd0db5446f179bcb737d4a526367a447"
+ "bfe2c857521c7f40b6d7d7e01a180d92431fb0bbd29c04a0c420a57b3ed26ccd"
+ "8a", 16),
+ 0xfd14cdf1607f5efb7b1793037b15bdf4baa6f7c16341ab0b,
+ 0x83fa0795cc6c4795b9016dac928fd6bac32f3229a96312c4,
+ 0x8dfdb832951e0167c5d762a473c0416c5c15bc1195667dc1,
+ 0x1720288a2dc13fa1ec78f763f8fe2ff7354a7e6fdde44520,
+ False),
+
+ (generator_192,
+ int("0x08a2024b61b79d260e3bb43ef15659aec89e5b560199bc82cf7c65c77d3919"
+ "2e03b9a895d766655105edd9188242b91fbde4167f7862d4ddd61e5d4ab55196"
+ "683d4f13ceb90d87aea6e07eb50a874e33086c4a7cb0273a8e1c4408f4b846bc"
+ "eae1ebaac1b2b2ea851a9b09de322efe34cebe601653efd6ddc876ce8c2f2072"
+ "fb", 16),
+ 0x674f941dc1a1f8b763c9334d726172d527b90ca324db8828,
+ 0x65adfa32e8b236cb33a3e84cf59bfb9417ae7e8ede57a7ff,
+ 0x9508b9fdd7daf0d8126f9e2bc5a35e4c6d800b5b804d7796,
+ 0x36f2bf6b21b987c77b53bb801b3435a577e3d493744bfab0,
+ False),
+
+ (generator_192,
+ int("0x1843aba74b0789d4ac6b0b8923848023a644a7b70afa23b1191829bbe4397c"
+ "e15b629bf21a8838298653ed0c19222b95fa4f7390d1b4c844d96e645537e0aa"
+ "e98afb5c0ac3bd0e4c37f8daaff25556c64e98c319c52687c904c4de7240a1cc"
+ "55cd9756b7edaef184e6e23b385726e9ffcba8001b8f574987c1a3fedaaa83ca"
+ "6d", 16),
+ 0x10ecca1aad7220b56a62008b35170bfd5e35885c4014a19f,
+ 0x04eb61984c6c12ade3bc47f3c629ece7aa0a033b9948d686,
+ 0x82bfa4e82c0dfe9274169b86694e76ce993fd83b5c60f325,
+ 0xa97685676c59a65dbde002fe9d613431fb183e8006d05633,
+ False),
+
+ (generator_192,
+ int("0x5a478f4084ddd1a7fea038aa9732a822106385797d02311aeef4d0264f824f"
+ "698df7a48cfb6b578cf3da416bc0799425bb491be5b5ecc37995b85b03420a98"
+ "f2c4dc5c31a69a379e9e322fbe706bbcaf0f77175e05cbb4fa162e0da82010a2"
+ "78461e3e974d137bc746d1880d6eb02aa95216014b37480d84b87f717bb13f76"
+ "e1", 16),
+ 0x6636653cb5b894ca65c448277b29da3ad101c4c2300f7c04,
+ 0xfdf1cbb3fc3fd6a4f890b59e554544175fa77dbdbeb656c1,
+ 0xeac2ddecddfb79931a9c3d49c08de0645c783a24cb365e1c,
+ 0x3549fee3cfa7e5f93bc47d92d8ba100e881a2a93c22f8d50,
+ False),
+
+ (generator_192,
+ int("0xc598774259a058fa65212ac57eaa4f52240e629ef4c310722088292d1d4af6"
+ "c39b49ce06ba77e4247b20637174d0bd67c9723feb57b5ead232b47ea452d5d7"
+ "a089f17c00b8b6767e434a5e16c231ba0efa718a340bf41d67ea2d295812ff1b"
+ "9277daacb8bc27b50ea5e6443bcf95ef4e9f5468fe78485236313d53d1c68f6b"
+ "a2", 16),
+ 0xa82bd718d01d354001148cd5f69b9ebf38ff6f21898f8aaa,
+ 0xe67ceede07fc2ebfafd62462a51e4b6c6b3d5b537b7caf3e,
+ 0x4d292486c620c3de20856e57d3bb72fcde4a73ad26376955,
+ 0xa85289591a6081d5728825520e62ff1c64f94235c04c7f95,
+ False),
+
+ (generator_192,
+ int("0xca98ed9db081a07b7557f24ced6c7b9891269a95d2026747add9e9eb80638a"
+ "961cf9c71a1b9f2c29744180bd4c3d3db60f2243c5c0b7cc8a8d40a3f9a7fc91"
+ "0250f2187136ee6413ffc67f1a25e1c4c204fa9635312252ac0e0481d89b6d53"
+ "808f0c496ba87631803f6c572c1f61fa049737fdacce4adff757afed4f05beb6"
+ "58", 16),
+ 0x7d3b016b57758b160c4fca73d48df07ae3b6b30225126c2f,
+ 0x4af3790d9775742bde46f8da876711be1b65244b2b39e7ec,
+ 0x95f778f5f656511a5ab49a5d69ddd0929563c29cbc3a9e62,
+ 0x75c87fc358c251b4c83d2dd979faad496b539f9f2ee7a289,
+ False),
+
+ (generator_192,
+ int("0x31dd9a54c8338bea06b87eca813d555ad1850fac9742ef0bbe40dad400e102"
+ "88acc9c11ea7dac79eb16378ebea9490e09536099f1b993e2653cd50240014c9"
+ "0a9c987f64545abc6a536b9bd2435eb5e911fdfde2f13be96ea36ad38df4ae9e"
+ "a387b29cced599af777338af2794820c9cce43b51d2112380a35802ab7e396c9"
+ "7a", 16),
+ 0x9362f28c4ef96453d8a2f849f21e881cd7566887da8beb4a,
+ 0xe64d26d8d74c48a024ae85d982ee74cd16046f4ee5333905,
+ 0xf3923476a296c88287e8de914b0b324ad5a963319a4fe73b,
+ 0xf0baeed7624ed00d15244d8ba2aede085517dbdec8ac65f5,
+ True),
+
+ (generator_192,
+ int("0xb2b94e4432267c92f9fdb9dc6040c95ffa477652761290d3c7de312283f645"
+ "0d89cc4aabe748554dfb6056b2d8e99c7aeaad9cdddebdee9dbc099839562d90"
+ "64e68e7bb5f3a6bba0749ca9a538181fc785553a4000785d73cc207922f63e8c"
+ "e1112768cb1de7b673aed83a1e4a74592f1268d8e2a4e9e63d414b5d442bd045"
+ "6d", 16),
+ 0xcc6fc032a846aaac25533eb033522824f94e670fa997ecef,
+ 0xe25463ef77a029eccda8b294fd63dd694e38d223d30862f1,
+ 0x066b1d07f3a40e679b620eda7f550842a35c18b80c5ebe06,
+ 0xa0b0fb201e8f2df65e2c4508ef303bdc90d934016f16b2dc,
+ False),
+
+ (generator_192,
+ int("0x4366fcadf10d30d086911de30143da6f579527036937007b337f7282460eae"
+ "5678b15cccda853193ea5fc4bc0a6b9d7a31128f27e1214988592827520b214e"
+ "ed5052f7775b750b0c6b15f145453ba3fee24a085d65287e10509eb5d5f602c4"
+ "40341376b95c24e5c4727d4b859bfe1483d20538acdd92c7997fa9c614f0f839"
+ "d7", 16),
+ 0x955c908fe900a996f7e2089bee2f6376830f76a19135e753,
+ 0xba0c42a91d3847de4a592a46dc3fdaf45a7cc709b90de520,
+ 0x1f58ad77fc04c782815a1405b0925e72095d906cbf52a668,
+ 0xf2e93758b3af75edf784f05a6761c9b9a6043c66b845b599,
+ False),
+
+ (generator_192,
+ int("0x543f8af57d750e33aa8565e0cae92bfa7a1ff78833093421c2942cadf99866"
+ "70a5ff3244c02a8225e790fbf30ea84c74720abf99cfd10d02d34377c3d3b412"
+ "69bea763384f372bb786b5846f58932defa68023136cd571863b304886e95e52"
+ "e7877f445b9364b3f06f3c28da12707673fecb4b8071de06b6e0a3c87da160ce"
+ "f3", 16),
+ 0x31f7fa05576d78a949b24812d4383107a9a45bb5fccdd835,
+ 0x8dc0eb65994a90f02b5e19bd18b32d61150746c09107e76b,
+ 0xbe26d59e4e883dde7c286614a767b31e49ad88789d3a78ff,
+ 0x8762ca831c1ce42df77893c9b03119428e7a9b819b619068,
+ False),
+
+ (generator_192,
+ int("0xd2e8454143ce281e609a9d748014dcebb9d0bc53adb02443a6aac2ffe6cb009f"
+ "387c346ecb051791404f79e902ee333ad65e5c8cb38dc0d1d39a8dc90add502357"
+ "2720e5b94b190d43dd0d7873397504c0c7aef2727e628eb6a74411f2e400c65670"
+ "716cb4a815dc91cbbfeb7cfe8c929e93184c938af2c078584da045e8f8d1", 16),
+ 0x66aa8edbbdb5cf8e28ceb51b5bda891cae2df84819fe25c0,
+ 0x0c6bc2f69030a7ce58d4a00e3b3349844784a13b8936f8da,
+ 0xa4661e69b1734f4a71b788410a464b71e7ffe42334484f23,
+ 0x738421cf5e049159d69c57a915143e226cac8355e149afe9,
+ False),
+
+ (generator_192,
+ int("0x6660717144040f3e2f95a4e25b08a7079c702a8b29babad5a19a87654bc5c5af"
+ "a261512a11b998a4fb36b5d8fe8bd942792ff0324b108120de86d63f65855e5461"
+ "184fc96a0a8ffd2ce6d5dfb0230cbbdd98f8543e361b3205f5da3d500fdc8bac6d"
+ "b377d75ebef3cb8f4d1ff738071ad0938917889250b41dd1d98896ca06fb", 16),
+ 0xbcfacf45139b6f5f690a4c35a5fffa498794136a2353fc77,
+ 0x6f4a6c906316a6afc6d98fe1f0399d056f128fe0270b0f22,
+ 0x9db679a3dafe48f7ccad122933acfe9da0970b71c94c21c1,
+ 0x984c2db99827576c0a41a5da41e07d8cc768bc82f18c9da9,
+ False)
+ ]
+
+
+@pytest.mark.parametrize("gen,msg,qx,qy,r,s,expected", CURVE_192_KATS)
+def test_signature_validity(gen, msg, qx, qy, r, s, expected):
+ """
+ `msg` = message, `qx` and `qy` represent the base point on
+ elliptic curve of `gen`, `r` and `s` are the signature, and
+ `expected` is True iff the signature is expected to be valid."""
+ pubk = Public_key(gen,
+ ellipticcurve.Point(gen.curve(), qx, qy))
+ assert expected == pubk.verifies(digest_integer(msg), Signature(r, s))
+
+
+@pytest.mark.parametrize("gen,msg,qx,qy,r,s,expected",
+ [x for x in CURVE_192_KATS if x[6]])
+def test_pk_recovery(gen, msg, r, s, qx, qy, expected):
+ del expected
+ sign = Signature(r, s)
+ pks = sign.recover_public_keys(digest_integer(msg), gen)
+
+ assert pks
+
+ # Test if the signature is valid for all found public keys
+ for pk in pks:
+ q = pk.point
+ test_signature_validity(gen, msg, q.x(), q.y(), r, s, True)
+
+ # Test if the original public key is in the set of found keys
+ original_q = ellipticcurve.Point(gen.curve(), qx, qy)
+ points = [pk.point for pk in pks]
+ assert original_q in points
+
+
+@st.composite
+def st_random_gen_key_msg_nonce(draw):
+ """Hypothesis strategy for test_sig_verify()."""
+ name_gen = {
+ "generator_192": generator_192,
+ "generator_224": generator_224,
+ "generator_256": generator_256,
+ "generator_secp256k1": generator_secp256k1,
+ "generator_384": generator_384,
+ "generator_521": generator_521}
+ name = draw(st.sampled_from(sorted(name_gen.keys())))
+ note("Generator used: {0}".format(name))
+ generator = name_gen[name]
+ order = int(generator.order())
+
+ key = draw(st.integers(min_value=1, max_value=order))
+ msg = draw(st.integers(min_value=1, max_value=order))
+ nonce = draw(st.integers(min_value=1, max_value=order+1) |
+ st.integers(min_value=order>>1, max_value=order))
+ return generator, key, msg, nonce
+
+
+SIG_VER_SETTINGS = dict(HYP_SETTINGS)
+SIG_VER_SETTINGS["max_examples"] = 10
+@settings(**SIG_VER_SETTINGS)
+@example((generator_224, 4, 1, 1))
+@given(st_random_gen_key_msg_nonce())
+def test_sig_verify(args):
+ """
+ Check if signing and verification works for arbitrary messages and
+ that signatures for other messages are rejected.
+ """
+ generator, sec_mult, msg, nonce = args
+
+ pubkey = Public_key(generator, generator * sec_mult)
+ privkey = Private_key(pubkey, sec_mult)
+
+ signature = privkey.sign(msg, nonce)
+
+ assert pubkey.verifies(msg, signature)
+
+ assert not pubkey.verifies(msg - 1, signature)
diff --git a/third_party/python/ecdsa/ecdsa/test_ellipticcurve.py b/third_party/python/ecdsa/ecdsa/test_ellipticcurve.py
new file mode 100644
index 0000000000..924134cecd
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/test_ellipticcurve.py
@@ -0,0 +1,188 @@
+import pytest
+from six import print_
+try:
+ import unittest2 as unittest
+except ImportError:
+ import unittest
+from hypothesis import given, settings
+import hypothesis.strategies as st
+try:
+ from hypothesis import HealthCheck
+ HC_PRESENT=True
+except ImportError: # pragma: no cover
+ HC_PRESENT=False
+from .numbertheory import inverse_mod
+from .ellipticcurve import CurveFp, INFINITY, Point
+
+
+HYP_SETTINGS={}
+if HC_PRESENT: # pragma: no branch
+ HYP_SETTINGS['suppress_health_check']=[HealthCheck.too_slow]
+ HYP_SETTINGS['deadline'] = 5000
+
+
+# NIST Curve P-192:
+p = 6277101735386680763835789423207666416083908700390324961279
+r = 6277101735386680763835789423176059013767194773182842284081
+# s = 0x3045ae6fc8422f64ed579528d38120eae12196d5
+# c = 0x3099d2bbbfcb2538542dcd5fb078b6ef5f3d6fe2c745de65
+b = 0x64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1
+Gx = 0x188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012
+Gy = 0x07192b95ffc8da78631011ed6b24cdd573f977a11e794811
+
+c192 = CurveFp(p, -3, b)
+p192 = Point(c192, Gx, Gy, r)
+
+c_23 = CurveFp(23, 1, 1)
+g_23 = Point(c_23, 13, 7, 7)
+
+
+HYP_SLOW_SETTINGS=dict(HYP_SETTINGS)
+HYP_SLOW_SETTINGS["max_examples"]=10
+
+
+@settings(**HYP_SLOW_SETTINGS)
+@given(st.integers(min_value=1, max_value=r+1))
+def test_p192_mult_tests(multiple):
+ inv_m = inverse_mod(multiple, r)
+
+ p1 = p192 * multiple
+ assert p1 * inv_m == p192
+
+
+def add_n_times(point, n):
+ ret = INFINITY
+ i = 0
+ while i <= n:
+ yield ret
+ ret = ret + point
+ i += 1
+
+
+# From X9.62 I.1 (p. 96):
+@pytest.mark.parametrize(
+ "p, m, check",
+ [(g_23, n, exp) for n, exp in enumerate(add_n_times(g_23, 8))],
+ ids=["g_23 test with mult {0}".format(i) for i in range(9)])
+def test_add_and_mult_equivalence(p, m, check):
+ assert p * m == check
+
+
+class TestCurve(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.c_23 = CurveFp(23, 1, 1)
+
+ def test_equality_curves(self):
+ self.assertEqual(self.c_23, CurveFp(23, 1, 1))
+
+ def test_inequality_curves(self):
+ c192 = CurveFp(p, -3, b)
+ self.assertNotEqual(self.c_23, c192)
+
+ def test_usability_in_a_hashed_collection_curves(self):
+ {self.c_23: None}
+
+ def test_hashability_curves(self):
+ hash(self.c_23)
+
+ def test_conflation_curves(self):
+ ne1, ne2, ne3 = CurveFp(24, 1, 1), CurveFp(23, 2, 1), CurveFp(23, 1, 2)
+ eq1, eq2, eq3 = CurveFp(23, 1, 1), CurveFp(23, 1, 1), self.c_23
+ self.assertEqual(len(set((c_23, eq1, eq2, eq3))), 1)
+ self.assertEqual(len(set((c_23, ne1, ne2, ne3))), 4)
+ self.assertDictEqual({c_23: None}, {eq1: None})
+ self.assertTrue(eq2 in {eq3: None})
+
+
+class TestPoint(unittest.TestCase):
+
+ @classmethod
+ def setUpClass(cls):
+ cls.c_23 = CurveFp(23, 1, 1)
+ cls.g_23 = Point(cls.c_23, 13, 7, 7)
+
+ p = 6277101735386680763835789423207666416083908700390324961279
+ r = 6277101735386680763835789423176059013767194773182842284081
+ # s = 0x3045ae6fc8422f64ed579528d38120eae12196d5
+ # c = 0x3099d2bbbfcb2538542dcd5fb078b6ef5f3d6fe2c745de65
+ b = 0x64210519e59c80e70fa7e9ab72243049feb8deecc146b9b1
+ Gx = 0x188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012
+ Gy = 0x07192b95ffc8da78631011ed6b24cdd573f977a11e794811
+
+ cls.c192 = CurveFp(p, -3, b)
+ cls.p192 = Point(cls.c192, Gx, Gy, r)
+
+ def test_p192(self):
+ # Checking against some sample computations presented
+ # in X9.62:
+ d = 651056770906015076056810763456358567190100156695615665659
+ Q = d * self.p192
+ self.assertEqual(Q.x(), 0x62B12D60690CDCF330BABAB6E69763B471F994DD702D16A5)
+
+ k = 6140507067065001063065065565667405560006161556565665656654
+ R = k * self.p192
+ self.assertEqual(R.x(), 0x885052380FF147B734C330C43D39B2C4A89F29B0F749FEAD)
+ self.assertEqual(R.y(), 0x9CF9FA1CBEFEFB917747A3BB29C072B9289C2547884FD835)
+
+ u1 = 2563697409189434185194736134579731015366492496392189760599
+ u2 = 6266643813348617967186477710235785849136406323338782220568
+ temp = u1 * self.p192 + u2 * Q
+ self.assertEqual(temp.x(), 0x885052380FF147B734C330C43D39B2C4A89F29B0F749FEAD)
+ self.assertEqual(temp.y(), 0x9CF9FA1CBEFEFB917747A3BB29C072B9289C2547884FD835)
+
+ def test_double_infinity(self):
+ p1 = INFINITY
+ p3 = p1.double()
+ self.assertEqual(p1, p3)
+ self.assertEqual(p3.x(), p1.x())
+ self.assertEqual(p3.y(), p3.y())
+
+ def test_double(self):
+ x1, y1, x3, y3 = (3, 10, 7, 12)
+
+ p1 = Point(self.c_23, x1, y1)
+ p3 = p1.double()
+ self.assertEqual(p3.x(), x3)
+ self.assertEqual(p3.y(), y3)
+
+ def test_multiply(self):
+ x1, y1, m, x3, y3 = (3, 10, 2, 7, 12)
+ p1 = Point(self.c_23, x1, y1)
+ p3 = p1 * m
+ self.assertEqual(p3.x(), x3)
+ self.assertEqual(p3.y(), y3)
+
+ # Trivial tests from X9.62 B.3:
+ def test_add(self):
+ """We expect that on curve c, (x1,y1) + (x2, y2 ) = (x3, y3)."""
+
+ x1, y1, x2, y2, x3, y3 = (3, 10, 9, 7, 17, 20)
+ p1 = Point(self.c_23, x1, y1)
+ p2 = Point(self.c_23, x2, y2)
+ p3 = p1 + p2
+ self.assertEqual(p3.x(), x3)
+ self.assertEqual(p3.y(), y3)
+
+ def test_add_as_double(self):
+ """We expect that on curve c, (x1,y1) + (x2, y2 ) = (x3, y3)."""
+
+ x1, y1, x2, y2, x3, y3 = (3, 10, 3, 10, 7, 12)
+ p1 = Point(self.c_23, x1, y1)
+ p2 = Point(self.c_23, x2, y2)
+ p3 = p1 + p2
+ self.assertEqual(p3.x(), x3)
+ self.assertEqual(p3.y(), y3)
+
+ def test_equality_points(self):
+ self.assertEqual(self.g_23, Point(self.c_23, 13, 7, 7))
+
+ def test_inequality_points(self):
+ c = CurveFp(100, -3, 100)
+ p = Point(c, 100, 100, 100)
+ self.assertNotEqual(self.g_23, p)
+
+ def test_inaquality_points_diff_types(self):
+ c = CurveFp(100, -3, 100)
+ self.assertNotEqual(self.g_23, c)
diff --git a/third_party/python/ecdsa/ecdsa/test_jacobi.py b/third_party/python/ecdsa/ecdsa/test_jacobi.py
new file mode 100644
index 0000000000..35e524212a
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/test_jacobi.py
@@ -0,0 +1,365 @@
+
+try:
+ import unittest2 as unittest
+except ImportError:
+ import unittest
+
+import hypothesis.strategies as st
+from hypothesis import given, assume, settings, example
+
+from .ellipticcurve import Point, PointJacobi, INFINITY
+from .ecdsa import generator_256, curve_256, generator_224
+from .numbertheory import inverse_mod
+
+class TestJacobi(unittest.TestCase):
+ def test___init__(self):
+ curve = object()
+ x = 2
+ y = 3
+ z = 1
+ order = 4
+ pj = PointJacobi(curve, x, y, z, order)
+
+ self.assertEqual(pj.order(), order)
+ self.assertIs(pj.curve(), curve)
+ self.assertEqual(pj.x(), x)
+ self.assertEqual(pj.y(), y)
+
+ def test_add_with_different_curves(self):
+ p_a = PointJacobi.from_affine(generator_256)
+ p_b = PointJacobi.from_affine(generator_224)
+
+ with self.assertRaises(ValueError):
+ p_a + p_b
+
+ def test_compare_different_curves(self):
+ self.assertNotEqual(generator_256, generator_224)
+
+ def test_equality_with_non_point(self):
+ pj = PointJacobi.from_affine(generator_256)
+
+ self.assertNotEqual(pj, "value")
+
+ def test_conversion(self):
+ pj = PointJacobi.from_affine(generator_256)
+ pw = pj.to_affine()
+
+ self.assertEqual(generator_256, pw)
+
+ def test_single_double(self):
+ pj = PointJacobi.from_affine(generator_256)
+ pw = generator_256.double()
+
+ pj = pj.double()
+
+ self.assertEqual(pj.x(), pw.x())
+ self.assertEqual(pj.y(), pw.y())
+
+ def test_double_with_zero_point(self):
+ pj = PointJacobi(curve_256, 0, 0, 1)
+
+ pj = pj.double()
+
+ self.assertIs(pj, INFINITY)
+
+ def test_double_with_zero_equivalent_point(self):
+ pj = PointJacobi(curve_256, 0, curve_256.p(), 1)
+
+ pj = pj.double()
+
+ self.assertIs(pj, INFINITY)
+
+ def test_double_with_zero_equivalent_point_non_1_z(self):
+ pj = PointJacobi(curve_256, 0, curve_256.p(), 2)
+
+ pj = pj.double()
+
+ self.assertIs(pj, INFINITY)
+
+ def test_compare_with_affine_point(self):
+ pj = PointJacobi.from_affine(generator_256)
+ pa = pj.to_affine()
+
+ self.assertEqual(pj, pa)
+ self.assertEqual(pa, pj)
+
+ def test_to_affine_with_zero_point(self):
+ pj = PointJacobi(curve_256, 0, 0, 1)
+
+ pa = pj.to_affine()
+
+ self.assertIs(pa, INFINITY)
+
+ def test_add_with_affine_point(self):
+ pj = PointJacobi.from_affine(generator_256)
+ pa = pj.to_affine()
+
+ s = pj + pa
+
+ self.assertEqual(s, pj.double())
+
+ def test_radd_with_affine_point(self):
+ pj = PointJacobi.from_affine(generator_256)
+ pa = pj.to_affine()
+
+ s = pa + pj
+
+ self.assertEqual(s, pj.double())
+
+ def test_add_with_infinity(self):
+ pj = PointJacobi.from_affine(generator_256)
+
+ s = pj + INFINITY
+
+ self.assertEqual(s, pj)
+
+ def test_add_zero_point_to_affine(self):
+ pa = PointJacobi.from_affine(generator_256).to_affine()
+ pj = PointJacobi(curve_256, 0, 0, 1)
+
+ s = pj + pa
+
+ self.assertIs(s, pa)
+
+ def test_multiply_by_zero(self):
+ pj = PointJacobi.from_affine(generator_256)
+
+ pj = pj * 0
+
+ self.assertIs(pj, INFINITY)
+
+ def test_zero_point_multiply_by_one(self):
+ pj = PointJacobi(curve_256, 0, 0, 1)
+
+ pj = pj * 1
+
+ self.assertIs(pj, INFINITY)
+
+ def test_multiply_by_one(self):
+ pj = PointJacobi.from_affine(generator_256)
+ pw = generator_256 * 1
+
+ pj = pj * 1
+
+ self.assertEqual(pj.x(), pw.x())
+ self.assertEqual(pj.y(), pw.y())
+
+ def test_multiply_by_two(self):
+ pj = PointJacobi.from_affine(generator_256)
+ pw = generator_256 * 2
+
+ pj = pj * 2
+
+ self.assertEqual(pj.x(), pw.x())
+ self.assertEqual(pj.y(), pw.y())
+
+ def test_rmul_by_two(self):
+ pj = PointJacobi.from_affine(generator_256)
+ pw = generator_256 * 2
+
+ pj = 2 * pj
+
+ self.assertEqual(pj, pw)
+
+ def test_compare_non_zero_with_infinity(self):
+ pj = PointJacobi.from_affine(generator_256)
+
+ self.assertNotEqual(pj, INFINITY)
+
+ def test_compare_zero_point_with_infinity(self):
+ pj = PointJacobi(curve_256, 0, 0, 1)
+
+ self.assertEqual(pj, INFINITY)
+
+ def test_compare_double_with_multiply(self):
+ pj = PointJacobi.from_affine(generator_256)
+ dbl = pj.double()
+ mlpl = pj * 2
+
+ self.assertEqual(dbl, mlpl)
+
+ @settings(max_examples=10)
+ @given(st.integers(min_value=0, max_value=int(generator_256.order())))
+ def test_multiplications(self, mul):
+ pj = PointJacobi.from_affine(generator_256)
+ pw = pj.to_affine() * mul
+
+ pj = pj * mul
+
+ self.assertEqual((pj.x(), pj.y()), (pw.x(), pw.y()))
+ self.assertEqual(pj, pw)
+
+ @settings(max_examples=10)
+ @given(st.integers(min_value=0, max_value=int(generator_256.order())))
+ @example(0)
+ @example(int(generator_256.order()))
+ def test_precompute(self, mul):
+ precomp = PointJacobi.from_affine(generator_256, True)
+ pj = PointJacobi.from_affine(generator_256)
+
+ a = precomp * mul
+ b = pj * mul
+
+ self.assertEqual(a, b)
+
+ @settings(max_examples=10)
+ @given(st.integers(min_value=1, max_value=int(generator_256.order())),
+ st.integers(min_value=1, max_value=int(generator_256.order())))
+ @example(3, 3)
+ def test_add_scaled_points(self, a_mul, b_mul):
+ j_g = PointJacobi.from_affine(generator_256)
+ a = PointJacobi.from_affine(j_g * a_mul)
+ b = PointJacobi.from_affine(j_g * b_mul)
+
+ c = a + b
+
+ self.assertEqual(c, j_g * (a_mul + b_mul))
+
+ @settings(max_examples=10)
+ @given(st.integers(min_value=1, max_value=int(generator_256.order())),
+ st.integers(min_value=1, max_value=int(generator_256.order())),
+ st.integers(min_value=1, max_value=int(curve_256.p()-1)))
+ def test_add_one_scaled_point(self, a_mul, b_mul, new_z):
+ j_g = PointJacobi.from_affine(generator_256)
+ a = PointJacobi.from_affine(j_g * a_mul)
+ b = PointJacobi.from_affine(j_g * b_mul)
+
+ p = curve_256.p()
+
+ assume(inverse_mod(new_z, p))
+
+ new_zz = new_z * new_z % p
+
+ b = PointJacobi(
+ curve_256, b.x() * new_zz % p, b.y() * new_zz * new_z % p, new_z)
+
+ c = a + b
+
+ self.assertEqual(c, j_g * (a_mul + b_mul))
+
+ @settings(max_examples=10)
+ @given(st.integers(min_value=1, max_value=int(generator_256.order())),
+ st.integers(min_value=1, max_value=int(generator_256.order())),
+ st.integers(min_value=1, max_value=int(curve_256.p()-1)))
+ @example(1, 1, 1)
+ @example(3, 3, 3)
+ @example(2, int(generator_256.order()-2), 1)
+ @example(2, int(generator_256.order()-2), 3)
+ def test_add_same_scale_points(self, a_mul, b_mul, new_z):
+ j_g = PointJacobi.from_affine(generator_256)
+ a = PointJacobi.from_affine(j_g * a_mul)
+ b = PointJacobi.from_affine(j_g * b_mul)
+
+ p = curve_256.p()
+
+ assume(inverse_mod(new_z, p))
+
+ new_zz = new_z * new_z % p
+
+ a = PointJacobi(
+ curve_256, a.x() * new_zz % p, a.y() * new_zz * new_z % p, new_z)
+ b = PointJacobi(
+ curve_256, b.x() * new_zz % p, b.y() * new_zz * new_z % p, new_z)
+
+ c = a + b
+
+ self.assertEqual(c, j_g * (a_mul + b_mul))
+
+ @settings(max_examples=14)
+ @given(st.integers(min_value=1, max_value=int(generator_256.order())),
+ st.integers(min_value=1, max_value=int(generator_256.order())),
+ st.lists(st.integers(min_value=1, max_value=int(curve_256.p()-1)),
+ min_size=2, max_size=2, unique=True))
+ @example(2, 2, [2, 1])
+ @example(2, 2, [2, 3])
+ @example(2, int(generator_256.order()-2), [2, 3])
+ @example(2, int(generator_256.order()-2), [2, 1])
+ def test_add_different_scale_points(self, a_mul, b_mul, new_z):
+ j_g = PointJacobi.from_affine(generator_256)
+ a = PointJacobi.from_affine(j_g * a_mul)
+ b = PointJacobi.from_affine(j_g * b_mul)
+
+ p = curve_256.p()
+
+ assume(inverse_mod(new_z[0], p))
+ assume(inverse_mod(new_z[1], p))
+
+ new_zz0 = new_z[0] * new_z[0] % p
+ new_zz1 = new_z[1] * new_z[1] % p
+
+ a = PointJacobi(
+ curve_256,
+ a.x() * new_zz0 % p,
+ a.y() * new_zz0 * new_z[0] % p,
+ new_z[0])
+ b = PointJacobi(
+ curve_256,
+ b.x() * new_zz1 % p,
+ b.y() * new_zz1 * new_z[1] % p,
+ new_z[1])
+
+ c = a + b
+
+ self.assertEqual(c, j_g * (a_mul + b_mul))
+
+ def test_add_point_3_times(self):
+ j_g = PointJacobi.from_affine(generator_256)
+
+ self.assertEqual(j_g * 3, j_g + j_g + j_g)
+
+ def test_mul_add_inf(self):
+ j_g = PointJacobi.from_affine(generator_256)
+
+ self.assertEqual(j_g, j_g.mul_add(1, INFINITY, 1))
+
+ def test_mul_add_same(self):
+ j_g = PointJacobi.from_affine(generator_256)
+
+ self.assertEqual(j_g * 2, j_g.mul_add(1, j_g, 1))
+
+ def test_mul_add_precompute(self):
+ j_g = PointJacobi.from_affine(generator_256, True)
+ b = PointJacobi.from_affine(j_g * 255, True)
+
+ self.assertEqual(j_g * 256, j_g + b)
+ self.assertEqual(j_g * (5 + 255 * 7), j_g * 5 + b * 7)
+ self.assertEqual(j_g * (5 + 255 * 7), j_g.mul_add(5, b, 7))
+
+ def test_mul_add_precompute_large(self):
+ j_g = PointJacobi.from_affine(generator_256, True)
+ b = PointJacobi.from_affine(j_g * 255, True)
+
+ self.assertEqual(j_g * 256, j_g + b)
+ self.assertEqual(j_g * (0xff00 + 255 * 0xf0f0),
+ j_g * 0xff00 + b * 0xf0f0)
+ self.assertEqual(j_g * (0xff00 + 255 * 0xf0f0),
+ j_g.mul_add(0xff00, b, 0xf0f0))
+
+ def test_mul_add_to_mul(self):
+ j_g = PointJacobi.from_affine(generator_256)
+
+ a = j_g * 3
+ b = j_g.mul_add(2, j_g, 1)
+
+ self.assertEqual(a, b)
+
+ def test_mul_add(self):
+ j_g = PointJacobi.from_affine(generator_256)
+
+ w_a = generator_256 * 255
+ w_b = generator_256 * (0xa8*0xf0)
+ j_b = j_g * 0xa8
+
+ ret = j_g.mul_add(255, j_b, 0xf0)
+
+ self.assertEqual(ret.to_affine(), w_a + w_b)
+
+ def test_mul_add_large(self):
+ j_g = PointJacobi.from_affine(generator_256)
+ b = PointJacobi.from_affine(j_g * 255)
+
+ self.assertEqual(j_g * 256, j_g + b)
+ self.assertEqual(j_g * (0xff00 + 255 * 0xf0f0),
+ j_g * 0xff00 + b * 0xf0f0)
+ self.assertEqual(j_g * (0xff00 + 255 * 0xf0f0),
+ j_g.mul_add(0xff00, b, 0xf0f0))
diff --git a/third_party/python/ecdsa/ecdsa/test_keys.py b/third_party/python/ecdsa/ecdsa/test_keys.py
new file mode 100644
index 0000000000..56e128421e
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/test_keys.py
@@ -0,0 +1,373 @@
+try:
+ import unittest2 as unittest
+except ImportError:
+ import unittest
+
+try:
+ buffer
+except NameError:
+ buffer = memoryview
+
+import array
+import six
+import sys
+import pytest
+import hashlib
+
+from .keys import VerifyingKey, SigningKey
+from .der import unpem
+from .util import sigencode_string, sigencode_der, sigencode_strings, \
+ sigdecode_string, sigdecode_der, sigdecode_strings
+
+
+class TestVerifyingKeyFromString(unittest.TestCase):
+ """
+ Verify that ecdsa.keys.VerifyingKey.from_string() can be used with
+ bytes-like objects
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ cls.key_bytes = (b'\x04L\xa2\x95\xdb\xc7Z\xd7\x1f\x93\nz\xcf\x97\xcf'
+ b'\xd7\xc2\xd9o\xfe8}X!\xae\xd4\xfah\xfa^\rpI\xba\xd1'
+ b'Y\xfb\x92xa\xebo+\x9cG\xfav\xca')
+ cls.vk = VerifyingKey.from_string(cls.key_bytes)
+
+ def test_bytes(self):
+ self.assertIsNotNone(self.vk)
+ self.assertIsInstance(self.vk, VerifyingKey)
+ self.assertEqual(
+ self.vk.pubkey.point.x(),
+ 105419898848891948935835657980914000059957975659675736097)
+ self.assertEqual(
+ self.vk.pubkey.point.y(),
+ 4286866841217412202667522375431381222214611213481632495306)
+
+ def test_bytes_memoryview(self):
+ vk = VerifyingKey.from_string(buffer(self.key_bytes))
+
+ self.assertEqual(self.vk.to_string(), vk.to_string())
+
+ def test_bytearray(self):
+ vk = VerifyingKey.from_string(bytearray(self.key_bytes))
+
+ self.assertEqual(self.vk.to_string(), vk.to_string())
+
+ def test_bytesarray_memoryview(self):
+ vk = VerifyingKey.from_string(buffer(bytearray(self.key_bytes)))
+
+ self.assertEqual(self.vk.to_string(), vk.to_string())
+
+ def test_array_array_of_bytes(self):
+ arr = array.array('B', self.key_bytes)
+ vk = VerifyingKey.from_string(arr)
+
+ self.assertEqual(self.vk.to_string(), vk.to_string())
+
+ def test_array_array_of_bytes_memoryview(self):
+ arr = array.array('B', self.key_bytes)
+ vk = VerifyingKey.from_string(buffer(arr))
+
+ self.assertEqual(self.vk.to_string(), vk.to_string())
+
+ def test_array_array_of_ints(self):
+ arr = array.array('I', self.key_bytes)
+ vk = VerifyingKey.from_string(arr)
+
+ self.assertEqual(self.vk.to_string(), vk.to_string())
+
+ def test_array_array_of_ints_memoryview(self):
+ arr = array.array('I', self.key_bytes)
+ vk = VerifyingKey.from_string(buffer(arr))
+
+ self.assertEqual(self.vk.to_string(), vk.to_string())
+
+ def test_bytes_uncompressed(self):
+ vk = VerifyingKey.from_string(b'\x04' + self.key_bytes)
+
+ self.assertEqual(self.vk.to_string(), vk.to_string())
+
+ def test_bytearray_uncompressed(self):
+ vk = VerifyingKey.from_string(bytearray(b'\x04' + self.key_bytes))
+
+ self.assertEqual(self.vk.to_string(), vk.to_string())
+
+ def test_bytes_compressed(self):
+ vk = VerifyingKey.from_string(b'\x02' + self.key_bytes[:24])
+
+ self.assertEqual(self.vk.to_string(), vk.to_string())
+
+ def test_bytearray_compressed(self):
+ vk = VerifyingKey.from_string(bytearray(b'\x02' + self.key_bytes[:24]))
+
+ self.assertEqual(self.vk.to_string(), vk.to_string())
+
+
+class TestVerifyingKeyFromDer(unittest.TestCase):
+ """
+ Verify that ecdsa.keys.VerifyingKey.from_der() can be used with
+ bytes-like objects.
+ """
+ @classmethod
+ def setUpClass(cls):
+ prv_key_str = (
+ "-----BEGIN EC PRIVATE KEY-----\n"
+ "MF8CAQEEGF7IQgvW75JSqULpiQQ8op9WH6Uldw6xxaAKBggqhkjOPQMBAaE0AzIA\n"
+ "BLiBd9CE7xf15FY5QIAoNg+fWbSk1yZOYtoGUdzkejWkxbRc9RWTQjqLVXucIJnz\n"
+ "bA==\n"
+ "-----END EC PRIVATE KEY-----\n")
+ key_str = (
+ "-----BEGIN PUBLIC KEY-----\n"
+ "MEkwEwYHKoZIzj0CAQYIKoZIzj0DAQEDMgAEuIF30ITvF/XkVjlAgCg2D59ZtKTX\n"
+ "Jk5i2gZR3OR6NaTFtFz1FZNCOotVe5wgmfNs\n"
+ "-----END PUBLIC KEY-----\n")
+ cls.key_pem = key_str
+
+ cls.key_bytes = unpem(key_str)
+ assert isinstance(cls.key_bytes, bytes)
+ cls.vk = VerifyingKey.from_pem(key_str)
+ cls.sk = SigningKey.from_pem(prv_key_str)
+
+ key_str = (
+ "-----BEGIN PUBLIC KEY-----\n"
+ "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE4H3iRbG4TSrsSRb/gusPQB/4YcN8\n"
+ "Poqzgjau4kfxBPyZimeRfuY/9g/wMmPuhGl4BUve51DsnKJFRr8psk0ieA==\n"
+ "-----END PUBLIC KEY-----\n"
+ )
+ cls.vk2 = VerifyingKey.from_pem(key_str)
+
+ def test_custom_hashfunc(self):
+ vk = VerifyingKey.from_der(self.key_bytes, hashlib.sha256)
+
+ self.assertIs(vk.default_hashfunc, hashlib.sha256)
+
+ def test_from_pem_with_custom_hashfunc(self):
+ vk = VerifyingKey.from_pem(self.key_pem, hashlib.sha256)
+
+ self.assertIs(vk.default_hashfunc, hashlib.sha256)
+
+ def test_bytes(self):
+ vk = VerifyingKey.from_der(self.key_bytes)
+
+ self.assertEqual(self.vk.to_string(), vk.to_string())
+
+ def test_bytes_memoryview(self):
+ vk = VerifyingKey.from_der(buffer(self.key_bytes))
+
+ self.assertEqual(self.vk.to_string(), vk.to_string())
+
+ def test_bytearray(self):
+ vk = VerifyingKey.from_der(bytearray(self.key_bytes))
+
+ self.assertEqual(self.vk.to_string(), vk.to_string())
+
+ def test_bytesarray_memoryview(self):
+ vk = VerifyingKey.from_der(buffer(bytearray(self.key_bytes)))
+
+ self.assertEqual(self.vk.to_string(), vk.to_string())
+
+ def test_array_array_of_bytes(self):
+ arr = array.array('B', self.key_bytes)
+ vk = VerifyingKey.from_der(arr)
+
+ self.assertEqual(self.vk.to_string(), vk.to_string())
+
+ def test_array_array_of_bytes_memoryview(self):
+ arr = array.array('B', self.key_bytes)
+ vk = VerifyingKey.from_der(buffer(arr))
+
+ self.assertEqual(self.vk.to_string(), vk.to_string())
+
+ def test_equality_on_verifying_keys(self):
+ self.assertEqual(self.vk, self.sk.get_verifying_key())
+
+ def test_inequality_on_verifying_keys(self):
+ self.assertNotEqual(self.vk, self.vk2)
+
+ def test_inequality_on_verifying_keys_not_implemented(self):
+ self.assertNotEqual(self.vk, None)
+
+
+class TestSigningKey(unittest.TestCase):
+ """
+ Verify that ecdsa.keys.SigningKey.from_der() can be used with
+ bytes-like objects.
+ """
+ @classmethod
+ def setUpClass(cls):
+ prv_key_str = (
+ "-----BEGIN EC PRIVATE KEY-----\n"
+ "MF8CAQEEGF7IQgvW75JSqULpiQQ8op9WH6Uldw6xxaAKBggqhkjOPQMBAaE0AzIA\n"
+ "BLiBd9CE7xf15FY5QIAoNg+fWbSk1yZOYtoGUdzkejWkxbRc9RWTQjqLVXucIJnz\n"
+ "bA==\n"
+ "-----END EC PRIVATE KEY-----\n")
+ cls.sk1 = SigningKey.from_pem(prv_key_str)
+
+ prv_key_str = (
+ "-----BEGIN EC PRIVATE KEY-----\n"
+ "MHcCAQEEIKlL2EAm5NPPZuXwxRf4nXMk0A80y6UUbiQ17be/qFhRoAoGCCqGSM49\n"
+ "AwEHoUQDQgAE4H3iRbG4TSrsSRb/gusPQB/4YcN8Poqzgjau4kfxBPyZimeRfuY/\n"
+ "9g/wMmPuhGl4BUve51DsnKJFRr8psk0ieA==\n"
+ "-----END EC PRIVATE KEY-----\n")
+ cls.sk2 = SigningKey.from_pem(prv_key_str)
+
+ def test_equality_on_signing_keys(self):
+ sk = SigningKey.from_secret_exponent(self.sk1.privkey.secret_multiplier, self.sk1.curve)
+ self.assertEqual(self.sk1, sk)
+
+ def test_inequality_on_signing_keys(self):
+ self.assertNotEqual(self.sk1, self.sk2)
+
+ def test_inequality_on_signing_keys_not_implemented(self):
+ self.assertNotEqual(self.sk1, None)
+
+# test VerifyingKey.verify()
+prv_key_str = (
+ "-----BEGIN EC PRIVATE KEY-----\n"
+ "MF8CAQEEGF7IQgvW75JSqULpiQQ8op9WH6Uldw6xxaAKBggqhkjOPQMBAaE0AzIA\n"
+ "BLiBd9CE7xf15FY5QIAoNg+fWbSk1yZOYtoGUdzkejWkxbRc9RWTQjqLVXucIJnz\n"
+ "bA==\n"
+ "-----END EC PRIVATE KEY-----\n")
+key_bytes = unpem(prv_key_str)
+assert isinstance(key_bytes, bytes)
+sk = SigningKey.from_der(key_bytes)
+vk = sk.verifying_key
+
+data = (b"some string for signing"
+ b"contents don't really matter"
+ b"but do include also some crazy values: "
+ b"\x00\x01\t\r\n\x00\x00\x00\xff\xf0")
+assert len(data) % 4 == 0
+sha1 = hashlib.sha1()
+sha1.update(data)
+data_hash = sha1.digest()
+assert isinstance(data_hash, bytes)
+sig_raw = sk.sign(data, sigencode=sigencode_string)
+assert isinstance(sig_raw, bytes)
+sig_der = sk.sign(data, sigencode=sigencode_der)
+assert isinstance(sig_der, bytes)
+sig_strings = sk.sign(data, sigencode=sigencode_strings)
+assert isinstance(sig_strings[0], bytes)
+
+verifiers = []
+for modifier, fun in [
+ ("bytes", lambda x: x),
+ ("bytes memoryview", lambda x: buffer(x)),
+ ("bytearray", lambda x: bytearray(x)),
+ ("bytearray memoryview", lambda x: buffer(bytearray(x))),
+ ("array.array of bytes", lambda x: array.array('B', x)),
+ ("array.array of bytes memoryview", lambda x: buffer(array.array('B', x))),
+ ("array.array of ints", lambda x: array.array('I', x)),
+ ("array.array of ints memoryview", lambda x: buffer(array.array('I', x)))
+ ]:
+ if "ints" in modifier:
+ conv = lambda x: x
+ else:
+ conv = fun
+ for sig_format, signature, decoder, mod_apply in [
+ ("raw", sig_raw, sigdecode_string, lambda x: conv(x)),
+ ("der", sig_der, sigdecode_der, lambda x: conv(x)),
+ ("strings", sig_strings, sigdecode_strings, lambda x:
+ tuple(conv(i) for i in x))
+ ]:
+ for method_name, vrf_mthd, vrf_data in [
+ ("verify", vk.verify, data),
+ ("verify_digest", vk.verify_digest, data_hash)
+ ]:
+ verifiers.append(pytest.param(
+ signature, decoder, mod_apply, fun, vrf_mthd, vrf_data,
+ id="{2}-{0}-{1}".format(modifier, sig_format, method_name)))
+
+@pytest.mark.parametrize(
+ "signature,decoder,mod_apply,fun,vrf_mthd,vrf_data",
+ verifiers)
+def test_VerifyingKey_verify(
+ signature, decoder, mod_apply, fun, vrf_mthd, vrf_data):
+ sig = mod_apply(signature)
+
+ assert vrf_mthd(sig, fun(vrf_data), sigdecode=decoder)
+
+
+# test SigningKey.from_string()
+prv_key_bytes = (b'^\xc8B\x0b\xd6\xef\x92R\xa9B\xe9\x89\x04<\xa2'
+ b'\x9fV\x1f\xa5%w\x0e\xb1\xc5')
+assert len(prv_key_bytes) == 24
+converters = []
+for modifier, convert in [
+ ("bytes", lambda x: x),
+ ("bytes memoryview", buffer),
+ ("bytearray", bytearray),
+ ("bytearray memoryview", lambda x: buffer(bytearray(x))),
+ ("array.array of bytes", lambda x: array.array('B', x)),
+ ("array.array of bytes memoryview",
+ lambda x: buffer(array.array('B', x))),
+ ("array.array of ints", lambda x: array.array('I', x)),
+ ("array.array of ints memoryview",
+ lambda x: buffer(array.array('I', x)))
+ ]:
+ converters.append(pytest.param(
+ convert,
+ id=modifier))
+
+@pytest.mark.parametrize("convert", converters)
+def test_SigningKey_from_string(convert):
+ key = convert(prv_key_bytes)
+ sk = SigningKey.from_string(key)
+
+ assert sk.to_string() == prv_key_bytes
+
+
+# test SigningKey.from_der()
+prv_key_str = (
+ "-----BEGIN EC PRIVATE KEY-----\n"
+ "MF8CAQEEGF7IQgvW75JSqULpiQQ8op9WH6Uldw6xxaAKBggqhkjOPQMBAaE0AzIA\n"
+ "BLiBd9CE7xf15FY5QIAoNg+fWbSk1yZOYtoGUdzkejWkxbRc9RWTQjqLVXucIJnz\n"
+ "bA==\n"
+ "-----END EC PRIVATE KEY-----\n")
+key_bytes = unpem(prv_key_str)
+assert isinstance(key_bytes, bytes)
+
+# last two converters are for array.array of ints, those require input
+# that's multiple of 4, which no curve we support produces
+@pytest.mark.parametrize("convert", converters[:-2])
+def test_SigningKey_from_der(convert):
+ key = convert(key_bytes)
+ sk = SigningKey.from_der(key)
+
+ assert sk.to_string() == prv_key_bytes
+
+
+# test SigningKey.sign_deterministic()
+extra_entropy=b'\x0a\x0b\x0c\x0d\x0e\x0f\x10\x11'
+
+@pytest.mark.parametrize("convert", converters)
+def test_SigningKey_sign_deterministic(convert):
+ sig = sk.sign_deterministic(
+ convert(data),
+ extra_entropy=convert(extra_entropy))
+
+ vk.verify(sig, data)
+
+
+# test SigningKey.sign_digest_deterministic()
+@pytest.mark.parametrize("convert", converters)
+def test_SigningKey_sign_digest_deterministic(convert):
+ sig = sk.sign_digest_deterministic(
+ convert(data_hash),
+ extra_entropy=convert(extra_entropy))
+
+ vk.verify(sig, data)
+
+
+@pytest.mark.parametrize("convert", converters)
+def test_SigningKey_sign(convert):
+ sig = sk.sign(convert(data))
+
+ vk.verify(sig, data)
+
+
+@pytest.mark.parametrize("convert", converters)
+def test_SigningKey_sign_digest(convert):
+ sig = sk.sign_digest(convert(data_hash))
+
+ vk.verify(sig, data)
diff --git a/third_party/python/ecdsa/ecdsa/test_malformed_sigs.py b/third_party/python/ecdsa/ecdsa/test_malformed_sigs.py
new file mode 100644
index 0000000000..c1dca44a0e
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/test_malformed_sigs.py
@@ -0,0 +1,306 @@
+from __future__ import with_statement, division
+
+import hashlib
+try:
+ from hashlib import algorithms_available
+except ImportError: # pragma: no cover
+ algorithms_available = [
+ "md5", "sha1", "sha224", "sha256", "sha384", "sha512"]
+from functools import partial
+import pytest
+import sys
+from six import binary_type
+import hypothesis.strategies as st
+from hypothesis import note, assume, given, settings, example
+
+from .keys import SigningKey
+from .keys import BadSignatureError
+from .util import sigencode_der, sigencode_string
+from .util import sigdecode_der, sigdecode_string
+from .curves import curves, NIST256p
+from .der import encode_integer, encode_bitstring, encode_octet_string, \
+ encode_oid, encode_sequence, encode_constructed
+
+
+example_data = b"some data to sign"
+"""Since the data is hashed for processing, really any string will do."""
+
+
+hash_and_size = [(name, hashlib.new(name).digest_size)
+ for name in algorithms_available]
+"""Pairs of hash names and their output sizes.
+Needed for pairing with curves as we don't support hashes
+bigger than order sizes of curves."""
+
+
+keys_and_sigs = []
+"""Name of the curve+hash combination, VerifyingKey and DER signature."""
+
+
+# for hypothesis strategy shrinking we want smallest curves and hashes first
+for curve in sorted(curves, key=lambda x: x.baselen):
+ for hash_alg in [name for name, size in
+ sorted(hash_and_size, key=lambda x: x[1])
+ if 0 < size <= curve.baselen]:
+ sk = SigningKey.generate(
+ curve,
+ hashfunc=partial(hashlib.new, hash_alg))
+
+ keys_and_sigs.append(
+ ("{0} {1}".format(curve, hash_alg),
+ sk.verifying_key,
+ sk.sign(example_data, sigencode=sigencode_der)))
+
+
+# first make sure that the signatures can be verified
+@pytest.mark.parametrize(
+ "verifying_key,signature",
+ [pytest.param(vk, sig, id=name) for name, vk, sig in keys_and_sigs])
+def test_signatures(verifying_key, signature):
+ assert verifying_key.verify(signature, example_data,
+ sigdecode=sigdecode_der)
+
+
+@st.composite
+def st_fuzzed_sig(draw, keys_and_sigs):
+ """
+ Hypothesis strategy that generates pairs of VerifyingKey and malformed
+ signatures created by fuzzing of a valid signature.
+ """
+ name, verifying_key, old_sig = draw(st.sampled_from(keys_and_sigs))
+ note("Configuration: {0}".format(name))
+
+ sig = bytearray(old_sig)
+
+ # decide which bytes should be removed
+ to_remove = draw(st.lists(
+ st.integers(min_value=0, max_value=len(sig)-1),
+ unique=True))
+ to_remove.sort()
+ for i in reversed(to_remove):
+ del sig[i]
+ note("Remove bytes: {0}".format(to_remove))
+
+ # decide which bytes of the original signature should be changed
+ if sig: # pragma: no branch
+ xors = draw(st.dictionaries(
+ st.integers(min_value=0, max_value=len(sig)-1),
+ st.integers(min_value=1, max_value=255)))
+ for i, val in xors.items():
+ sig[i] ^= val
+ note("xors: {0}".format(xors))
+
+ # decide where new data should be inserted
+ insert_pos = draw(st.integers(min_value=0, max_value=len(sig)))
+ # NIST521p signature is about 140 bytes long, test slightly longer
+ insert_data = draw(st.binary(max_size=256))
+
+ sig = sig[:insert_pos] + insert_data + sig[insert_pos:]
+ note("Inserted at position {0} bytes: {1!r}"
+ .format(insert_pos, insert_data))
+
+ sig = bytes(sig)
+ # make sure that there was performed at least one mutation on the data
+ assume(to_remove or xors or insert_data)
+ # and that the mutations didn't cancel each-other out
+ assume(sig != old_sig)
+
+ return verifying_key, sig
+
+
+params = {}
+# not supported in hypothesis 2.0.0
+if sys.version_info >= (2, 7): # pragma: no branch
+ from hypothesis import HealthCheck
+ # deadline=5s because NIST521p are slow to verify
+ params["deadline"] = 5000
+ params["suppress_health_check"] = [HealthCheck.data_too_large,
+ HealthCheck.filter_too_much,
+ HealthCheck.too_slow]
+
+slow_params = dict(params)
+slow_params["max_examples"] = 10
+
+
+@settings(**params)
+@given(st_fuzzed_sig(keys_and_sigs))
+def test_fuzzed_der_signatures(args):
+ verifying_key, sig = args
+
+ with pytest.raises(BadSignatureError):
+ verifying_key.verify(sig, example_data, sigdecode=sigdecode_der)
+
+
+@st.composite
+def st_random_der_ecdsa_sig_value(draw):
+ """
+ Hypothesis strategy for selecting random values and encoding them
+ to ECDSA-Sig-Value object::
+
+ ECDSA-Sig-Value ::= SEQUENCE {
+ r INTEGER,
+ s INTEGER
+ }
+ """
+ name, verifying_key, _ = draw(st.sampled_from(keys_and_sigs))
+ note("Configuration: {0}".format(name))
+ order = int(verifying_key.curve.order)
+
+ # the encode_integer doesn't suport negative numbers, would be nice
+ # to generate them too, but we have coverage for remove_integer()
+ # verifying that it doesn't accept them, so meh.
+ # Test all numbers around the ones that can show up (around order)
+ # way smaller and slightly bigger
+ r = draw(st.integers(min_value=0, max_value=order << 4) |
+ st.integers(min_value=order >> 2, max_value=order+1))
+ s = draw(st.integers(min_value=0, max_value=order << 4) |
+ st.integers(min_value=order >> 2, max_value=order+1))
+
+ sig = encode_sequence(encode_integer(r), encode_integer(s))
+
+ return verifying_key, sig
+
+
+@settings(**slow_params)
+@given(st_random_der_ecdsa_sig_value())
+def test_random_der_ecdsa_sig_value(params):
+ """
+ Check if random values encoded in ECDSA-Sig-Value structure are rejected
+ as signature.
+ """
+ verifying_key, sig = params
+
+ with pytest.raises(BadSignatureError):
+ verifying_key.verify(sig, example_data, sigdecode=sigdecode_der)
+
+
+def st_der_integer(*args, **kwargs):
+ """
+ Hypothesis strategy that returns a random positive integer as DER
+ INTEGER.
+ Parameters are passed to hypothesis.strategy.integer.
+ """
+ if "min_value" not in kwargs: # pragma: no branch
+ kwargs["min_value"] = 0
+ return st.builds(encode_integer, st.integers(*args, **kwargs))
+
+
+@st.composite
+def st_der_bit_string(draw, *args, **kwargs):
+ """
+ Hypothesis strategy that returns a random DER BIT STRING.
+ Parameters are passed to hypothesis.strategy.binary.
+ """
+ data = draw(st.binary(*args, **kwargs))
+ if data:
+ unused = draw(st.integers(min_value=0, max_value=7))
+ data = bytearray(data)
+ data[-1] &= - (2**unused)
+ data = bytes(data)
+ else:
+ unused = 0
+ return encode_bitstring(data, unused)
+
+
+def st_der_octet_string(*args, **kwargs):
+ """
+ Hypothesis strategy that returns a random DER OCTET STRING object.
+ Parameters are passed to hypothesis.strategy.binary
+ """
+ return st.builds(encode_octet_string, st.binary(*args, **kwargs))
+
+
+def st_der_null():
+ """
+ Hypothesis strategy that returns DER NULL object.
+ """
+ return st.just(b'\x05\x00')
+
+
+@st.composite
+def st_der_oid(draw):
+ """
+ Hypothesis strategy that returns DER OBJECT IDENTIFIER objects.
+ """
+ first = draw(st.integers(min_value=0, max_value=2))
+ if first < 2:
+ second = draw(st.integers(min_value=0, max_value=39))
+ else:
+ second = draw(st.integers(min_value=0, max_value=2**512))
+ rest = draw(st.lists(st.integers(min_value=0, max_value=2**512),
+ max_size=50))
+ return encode_oid(first, second, *rest)
+
+
+def st_der():
+ """
+ Hypothesis strategy that returns random DER structures.
+
+ A valid DER structure is any primitive object, an octet encoding
+ of a valid DER structure, sequence of valid DER objects or a constructed
+ encoding of any of the above.
+ """
+ return st.recursive(
+ st.just(b'') | st_der_integer(max_value=2**4096) |
+ st_der_bit_string(max_size=1024**2) |
+ st_der_octet_string(max_size=1024**2) | st_der_null() | st_der_oid(),
+ lambda children:
+ st.builds(lambda x: encode_octet_string(x), st.one_of(children)) |
+ st.builds(lambda x: encode_bitstring(x, 0), st.one_of(children)) |
+ st.builds(lambda x: encode_sequence(*x),
+ st.lists(children, max_size=200)) |
+ st.builds(lambda tag, x:
+ encode_constructed(tag, x),
+ st.integers(min_value=0, max_value=0x3f),
+ st.one_of(children)),
+ max_leaves=40
+ )
+
+
+@settings(**params)
+@given(st.sampled_from(keys_and_sigs), st_der())
+def test_random_der_as_signature(params, der):
+ """Check if random DER structures are rejected as signature"""
+ name, verifying_key, _ = params
+
+ with pytest.raises(BadSignatureError):
+ verifying_key.verify(der, example_data, sigdecode=sigdecode_der)
+
+
+@settings(**params)
+@given(st.sampled_from(keys_and_sigs), st.binary(max_size=1024**2))
+@example(
+ keys_and_sigs[0],
+ encode_sequence(encode_integer(0), encode_integer(0)))
+@example(
+ keys_and_sigs[0],
+ encode_sequence(encode_integer(1), encode_integer(1)) + b'\x00')
+@example(
+ keys_and_sigs[0],
+ encode_sequence(*[encode_integer(1)] * 3))
+def test_random_bytes_as_signature(params, der):
+ """Check if random bytes are rejected as signature"""
+ name, verifying_key, _ = params
+
+ with pytest.raises(BadSignatureError):
+ verifying_key.verify(der, example_data, sigdecode=sigdecode_der)
+
+
+keys_and_string_sigs = [
+ (name, verifying_key,
+ sigencode_string(*sigdecode_der(sig, verifying_key.curve.order),
+ order=verifying_key.curve.order))
+ for name, verifying_key, sig in keys_and_sigs]
+"""
+Name of the curve+hash combination, VerifyingKey and signature as a
+byte string.
+"""
+
+
+@settings(**params)
+@given(st_fuzzed_sig(keys_and_string_sigs))
+def test_fuzzed_string_signatures(params):
+ verifying_key, sig = params
+
+ with pytest.raises(BadSignatureError):
+ verifying_key.verify(sig, example_data, sigdecode=sigdecode_string)
diff --git a/third_party/python/ecdsa/ecdsa/test_numbertheory.py b/third_party/python/ecdsa/ecdsa/test_numbertheory.py
new file mode 100644
index 0000000000..4cec4fd6a7
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/test_numbertheory.py
@@ -0,0 +1,275 @@
+import operator
+from six import print_
+from functools import reduce
+import operator
+try:
+ import unittest2 as unittest
+except ImportError:
+ import unittest
+import hypothesis.strategies as st
+import pytest
+from hypothesis import given, settings, example
+try:
+ from hypothesis import HealthCheck
+ HC_PRESENT=True
+except ImportError: # pragma: no cover
+ HC_PRESENT=False
+from .numbertheory import (SquareRootError, factorization, gcd, lcm,
+ jacobi, inverse_mod,
+ is_prime, next_prime, smallprimes,
+ square_root_mod_prime)
+
+
+BIGPRIMES = (999671,
+ 999683,
+ 999721,
+ 999727,
+ 999749,
+ 999763,
+ 999769,
+ 999773,
+ 999809,
+ 999853,
+ 999863,
+ 999883,
+ 999907,
+ 999917,
+ 999931,
+ 999953,
+ 999959,
+ 999961,
+ 999979,
+ 999983)
+
+
+@pytest.mark.parametrize(
+ "prime, next_p",
+ [(p, q) for p, q in zip(BIGPRIMES[:-1], BIGPRIMES[1:])])
+def test_next_prime(prime, next_p):
+ assert next_prime(prime) == next_p
+
+
+@pytest.mark.parametrize(
+ "val",
+ [-1, 0, 1])
+def test_next_prime_with_nums_less_2(val):
+ assert next_prime(val) == 2
+
+
+@pytest.mark.parametrize("prime", smallprimes)
+def test_square_root_mod_prime_for_small_primes(prime):
+ squares = set()
+ for num in range(0, 1 + prime // 2):
+ sq = num * num % prime
+ squares.add(sq)
+ root = square_root_mod_prime(sq, prime)
+ # tested for real with TestNumbertheory.test_square_root_mod_prime
+ assert root * root % prime == sq
+
+ for nonsquare in range(0, prime):
+ if nonsquare in squares:
+ continue
+ with pytest.raises(SquareRootError):
+ square_root_mod_prime(nonsquare, prime)
+
+
+@st.composite
+def st_two_nums_rel_prime(draw):
+ # 521-bit is the biggest curve we operate on, use 1024 for a bit
+ # of breathing space
+ mod = draw(st.integers(min_value=2, max_value=2**1024))
+ num = draw(st.integers(min_value=1, max_value=mod-1)
+ .filter(lambda x: gcd(x, mod) == 1))
+ return num, mod
+
+
+@st.composite
+def st_primes(draw, *args, **kwargs):
+ if "min_value" not in kwargs: # pragma: no branch
+ kwargs["min_value"] = 1
+ prime = draw(st.sampled_from(smallprimes) |
+ st.integers(*args, **kwargs)
+ .filter(is_prime))
+ return prime
+
+
+@st.composite
+def st_num_square_prime(draw):
+ prime = draw(st_primes(max_value=2**1024))
+ num = draw(st.integers(min_value=0, max_value=1 + prime // 2))
+ sq = num * num % prime
+ return sq, prime
+
+
+@st.composite
+def st_comp_with_com_fac(draw):
+ """
+ Strategy that returns lists of numbers, all having a common factor.
+ """
+ primes = draw(st.lists(st_primes(max_value=2**512), min_size=1,
+ max_size=10))
+ # select random prime(s) that will make the common factor of composites
+ com_fac_primes = draw(st.lists(st.sampled_from(primes),
+ min_size=1, max_size=20))
+ com_fac = reduce(operator.mul, com_fac_primes, 1)
+
+ # select at most 20 lists (returned numbers),
+ # each having at most 30 primes (factors) including none (then the number
+ # will be 1)
+ comp_primes = draw(
+ st.integers(min_value=1, max_value=20).
+ flatmap(lambda n: st.lists(st.lists(st.sampled_from(primes),
+ max_size=30),
+ min_size=1, max_size=n)))
+
+ return [reduce(operator.mul, nums, 1) * com_fac for nums in comp_primes]
+
+
+@st.composite
+def st_comp_no_com_fac(draw):
+ """
+ Strategy that returns lists of numbers that don't have a common factor.
+ """
+ primes = draw(st.lists(st_primes(max_value=2**512),
+ min_size=2, max_size=10, unique=True))
+ # first select the primes that will create the uncommon factor
+ # between returned numbers
+ uncom_fac_primes = draw(st.lists(
+ st.sampled_from(primes),
+ min_size=1, max_size=len(primes)-1, unique=True))
+ uncom_fac = reduce(operator.mul, uncom_fac_primes, 1)
+
+ # then build composites from leftover primes
+ leftover_primes = [i for i in primes if i not in uncom_fac_primes]
+
+ assert leftover_primes
+ assert uncom_fac_primes
+
+ # select at most 20 lists, each having at most 30 primes
+ # selected from the leftover_primes list
+ number_primes = draw(
+ st.integers(min_value=1, max_value=20).
+ flatmap(lambda n: st.lists(st.lists(st.sampled_from(leftover_primes),
+ max_size=30),
+ min_size=1, max_size=n)))
+
+ numbers = [reduce(operator.mul, nums, 1) for nums in number_primes]
+
+ insert_at = draw(st.integers(min_value=0, max_value=len(numbers)))
+ numbers.insert(insert_at, uncom_fac)
+ return numbers
+
+
+HYP_SETTINGS = {}
+if HC_PRESENT: # pragma: no branch
+ HYP_SETTINGS['suppress_health_check']=[HealthCheck.filter_too_much,
+ HealthCheck.too_slow]
+ # the factorization() sometimes takes a long time to finish
+ HYP_SETTINGS['deadline'] = 5000
+
+
+HYP_SLOW_SETTINGS=dict(HYP_SETTINGS)
+HYP_SLOW_SETTINGS["max_examples"] = 10
+
+
+class TestNumbertheory(unittest.TestCase):
+ def test_gcd(self):
+ assert gcd(3 * 5 * 7, 3 * 5 * 11, 3 * 5 * 13) == 3 * 5
+ assert gcd([3 * 5 * 7, 3 * 5 * 11, 3 * 5 * 13]) == 3 * 5
+ assert gcd(3) == 3
+
+ @unittest.skipUnless(HC_PRESENT,
+ "Hypothesis 2.0.0 can't be made tolerant of hard to "
+ "meet requirements (like `is_prime()`), the test "
+ "case times-out on it")
+ @settings(**HYP_SLOW_SETTINGS)
+ @given(st_comp_with_com_fac())
+ def test_gcd_with_com_factor(self, numbers):
+ n = gcd(numbers)
+ assert 1 in numbers or n != 1
+ for i in numbers:
+ assert i % n == 0
+
+ @unittest.skipUnless(HC_PRESENT,
+ "Hypothesis 2.0.0 can't be made tolerant of hard to "
+ "meet requirements (like `is_prime()`), the test "
+ "case times-out on it")
+ @settings(**HYP_SLOW_SETTINGS)
+ @given(st_comp_no_com_fac())
+ def test_gcd_with_uncom_factor(self, numbers):
+ n = gcd(numbers)
+ assert n == 1
+
+ @given(st.lists(st.integers(min_value=1, max_value=2**8192),
+ min_size=1, max_size=20))
+ def test_gcd_with_random_numbers(self, numbers):
+ n = gcd(numbers)
+ for i in numbers:
+ # check that at least it's a divider
+ assert i % n == 0
+
+ def test_lcm(self):
+ assert lcm(3, 5 * 3, 7 * 3) == 3 * 5 * 7
+ assert lcm([3, 5 * 3, 7 * 3]) == 3 * 5 * 7
+ assert lcm(3) == 3
+
+ @given(st.lists(st.integers(min_value=1, max_value=2**8192),
+ min_size=1, max_size=20))
+ def test_lcm_with_random_numbers(self, numbers):
+ n = lcm(numbers)
+ for i in numbers:
+ assert n % i == 0
+
+ @unittest.skipUnless(HC_PRESENT,
+ "Hypothesis 2.0.0 can't be made tolerant of hard to "
+ "meet requirements (like `is_prime()`), the test "
+ "case times-out on it")
+ @settings(**HYP_SETTINGS)
+ @given(st_num_square_prime())
+ def test_square_root_mod_prime(self, vals):
+ square, prime = vals
+
+ calc = square_root_mod_prime(square, prime)
+ assert calc * calc % prime == square
+
+ @settings(**HYP_SETTINGS)
+ @given(st.integers(min_value=1, max_value=10**12))
+ @example(265399 * 1526929)
+ @example(373297 ** 2 * 553991)
+ def test_factorization(self, num):
+ factors = factorization(num)
+ mult = 1
+ for i in factors:
+ mult *= i[0] ** i[1]
+ assert mult == num
+
+ @settings(**HYP_SETTINGS)
+ @given(st.integers(min_value=3, max_value=1000).filter(lambda x: x % 2))
+ def test_jacobi(self, mod):
+ if is_prime(mod):
+ squares = set()
+ for root in range(1, mod):
+ assert jacobi(root * root, mod) == 1
+ squares.add(root * root % mod)
+ for i in range(1, mod):
+ if i not in squares:
+ assert jacobi(i, mod) == -1
+ else:
+ factors = factorization(mod)
+ for a in range(1, mod):
+ c = 1
+ for i in factors:
+ c *= jacobi(a, i[0]) ** i[1]
+ assert c == jacobi(a, mod)
+
+ @given(st_two_nums_rel_prime())
+ def test_inverse_mod(self, nums):
+ num, mod = nums
+
+ inv = inverse_mod(num, mod)
+
+ assert 0 < inv < mod
+ assert num * inv % mod == 1
+
+ def test_inverse_mod_with_zero(self):
+ assert 0 == inverse_mod(0, 11)
diff --git a/third_party/python/ecdsa/ecdsa/test_pyecdsa.py b/third_party/python/ecdsa/ecdsa/test_pyecdsa.py
new file mode 100644
index 0000000000..d83eb01d10
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/test_pyecdsa.py
@@ -0,0 +1,1445 @@
+from __future__ import with_statement, division
+
+try:
+ import unittest2 as unittest
+except ImportError:
+ import unittest
+import os
+import time
+import shutil
+import subprocess
+import pytest
+from binascii import hexlify, unhexlify
+from hashlib import sha1, sha256, sha384, sha512
+import hashlib
+from functools import partial
+
+from hypothesis import given
+import hypothesis.strategies as st
+
+from six import b, print_, binary_type
+from .keys import SigningKey, VerifyingKey
+from .keys import BadSignatureError, MalformedPointError, BadDigestError
+from . import util
+from .util import sigencode_der, sigencode_strings
+from .util import sigdecode_der, sigdecode_strings
+from .util import number_to_string, encoded_oid_ecPublicKey, \
+ MalformedSignature
+from .curves import Curve, UnknownCurveError
+from .curves import NIST192p, NIST224p, NIST256p, NIST384p, NIST521p, \
+ SECP256k1, BRAINPOOLP160r1, BRAINPOOLP192r1, BRAINPOOLP224r1, \
+ BRAINPOOLP256r1, BRAINPOOLP320r1, BRAINPOOLP384r1, BRAINPOOLP512r1, \
+ curves
+from .ecdsa import curve_brainpoolp224r1, curve_brainpoolp256r1, \
+ curve_brainpoolp384r1, curve_brainpoolp512r1
+from .ellipticcurve import Point
+from . import der
+from . import rfc6979
+from . import ecdsa
+
+
+class SubprocessError(Exception):
+ pass
+
+
+def run_openssl(cmd):
+ OPENSSL = "openssl"
+ p = subprocess.Popen([OPENSSL] + cmd.split(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ stdout, ignored = p.communicate()
+ if p.returncode != 0:
+ raise SubprocessError("cmd '%s %s' failed: rc=%s, stdout/err was %s" %
+ (OPENSSL, cmd, p.returncode, stdout))
+ return stdout.decode()
+
+
+class ECDSA(unittest.TestCase):
+ def test_basic(self):
+ priv = SigningKey.generate()
+ pub = priv.get_verifying_key()
+
+ data = b("blahblah")
+ sig = priv.sign(data)
+
+ self.assertTrue(pub.verify(sig, data))
+ self.assertRaises(BadSignatureError, pub.verify, sig, data + b("bad"))
+
+ pub2 = VerifyingKey.from_string(pub.to_string())
+ self.assertTrue(pub2.verify(sig, data))
+
+ def test_deterministic(self):
+ data = b("blahblah")
+ secexp = int("9d0219792467d7d37b4d43298a7d0c05", 16)
+
+ priv = SigningKey.from_secret_exponent(secexp, SECP256k1, sha256)
+ pub = priv.get_verifying_key()
+
+ k = rfc6979.generate_k(
+ SECP256k1.generator.order(), secexp, sha256, sha256(data).digest())
+
+ sig1 = priv.sign(data, k=k)
+ self.assertTrue(pub.verify(sig1, data))
+
+ sig2 = priv.sign(data, k=k)
+ self.assertTrue(pub.verify(sig2, data))
+
+ sig3 = priv.sign_deterministic(data, sha256)
+ self.assertTrue(pub.verify(sig3, data))
+
+ self.assertEqual(sig1, sig2)
+ self.assertEqual(sig1, sig3)
+
+ def test_bad_usage(self):
+ # sk=SigningKey() is wrong
+ self.assertRaises(TypeError, SigningKey)
+ self.assertRaises(TypeError, VerifyingKey)
+
+ def test_lengths(self):
+ default = NIST192p
+ priv = SigningKey.generate()
+ pub = priv.get_verifying_key()
+ self.assertEqual(len(pub.to_string()), default.verifying_key_length)
+ sig = priv.sign(b("data"))
+ self.assertEqual(len(sig), default.signature_length)
+ for curve in (NIST192p, NIST224p, NIST256p, NIST384p, NIST521p,
+ BRAINPOOLP160r1, BRAINPOOLP192r1, BRAINPOOLP224r1,
+ BRAINPOOLP256r1, BRAINPOOLP320r1, BRAINPOOLP384r1,
+ BRAINPOOLP512r1):
+ start = time.time()
+ priv = SigningKey.generate(curve=curve)
+ pub1 = priv.get_verifying_key()
+ keygen_time = time.time() - start
+ pub2 = VerifyingKey.from_string(pub1.to_string(), curve)
+ self.assertEqual(pub1.to_string(), pub2.to_string())
+ self.assertEqual(len(pub1.to_string()),
+ curve.verifying_key_length)
+ start = time.time()
+ sig = priv.sign(b("data"))
+ sign_time = time.time() - start
+ self.assertEqual(len(sig), curve.signature_length)
+
+ def test_serialize(self):
+ seed = b("secret")
+ curve = NIST192p
+ secexp1 = util.randrange_from_seed__trytryagain(seed, curve.order)
+ secexp2 = util.randrange_from_seed__trytryagain(seed, curve.order)
+ self.assertEqual(secexp1, secexp2)
+ priv1 = SigningKey.from_secret_exponent(secexp1, curve)
+ priv2 = SigningKey.from_secret_exponent(secexp2, curve)
+ self.assertEqual(hexlify(priv1.to_string()),
+ hexlify(priv2.to_string()))
+ self.assertEqual(priv1.to_pem(), priv2.to_pem())
+ pub1 = priv1.get_verifying_key()
+ pub2 = priv2.get_verifying_key()
+ data = b("data")
+ sig1 = priv1.sign(data)
+ sig2 = priv2.sign(data)
+ self.assertTrue(pub1.verify(sig1, data))
+ self.assertTrue(pub2.verify(sig1, data))
+ self.assertTrue(pub1.verify(sig2, data))
+ self.assertTrue(pub2.verify(sig2, data))
+ self.assertEqual(hexlify(pub1.to_string()),
+ hexlify(pub2.to_string()))
+
+ def test_nonrandom(self):
+ s = b("all the entropy in the entire world, compressed into one line")
+
+ def not_much_entropy(numbytes):
+ return s[:numbytes]
+
+ # we control the entropy source, these two keys should be identical:
+ priv1 = SigningKey.generate(entropy=not_much_entropy)
+ priv2 = SigningKey.generate(entropy=not_much_entropy)
+ self.assertEqual(hexlify(priv1.get_verifying_key().to_string()),
+ hexlify(priv2.get_verifying_key().to_string()))
+ # likewise, signatures should be identical. Obviously you'd never
+ # want to do this with keys you care about, because the secrecy of
+ # the private key depends upon using different random numbers for
+ # each signature
+ sig1 = priv1.sign(b("data"), entropy=not_much_entropy)
+ sig2 = priv2.sign(b("data"), entropy=not_much_entropy)
+ self.assertEqual(hexlify(sig1), hexlify(sig2))
+
+ def assertTruePrivkeysEqual(self, priv1, priv2):
+ self.assertEqual(priv1.privkey.secret_multiplier,
+ priv2.privkey.secret_multiplier)
+ self.assertEqual(priv1.privkey.public_key.generator,
+ priv2.privkey.public_key.generator)
+
+ def test_privkey_creation(self):
+ s = b("all the entropy in the entire world, compressed into one line")
+
+ def not_much_entropy(numbytes):
+ return s[:numbytes]
+
+ priv1 = SigningKey.generate()
+ self.assertEqual(priv1.baselen, NIST192p.baselen)
+
+ priv1 = SigningKey.generate(curve=NIST224p)
+ self.assertEqual(priv1.baselen, NIST224p.baselen)
+
+ priv1 = SigningKey.generate(entropy=not_much_entropy)
+ self.assertEqual(priv1.baselen, NIST192p.baselen)
+ priv2 = SigningKey.generate(entropy=not_much_entropy)
+ self.assertEqual(priv2.baselen, NIST192p.baselen)
+ self.assertTruePrivkeysEqual(priv1, priv2)
+
+ priv1 = SigningKey.from_secret_exponent(secexp=3)
+ self.assertEqual(priv1.baselen, NIST192p.baselen)
+ priv2 = SigningKey.from_secret_exponent(secexp=3)
+ self.assertTruePrivkeysEqual(priv1, priv2)
+
+ priv1 = SigningKey.from_secret_exponent(secexp=4, curve=NIST224p)
+ self.assertEqual(priv1.baselen, NIST224p.baselen)
+
+ def test_privkey_strings(self):
+ priv1 = SigningKey.generate()
+ s1 = priv1.to_string()
+ self.assertEqual(type(s1), binary_type)
+ self.assertEqual(len(s1), NIST192p.baselen)
+ priv2 = SigningKey.from_string(s1)
+ self.assertTruePrivkeysEqual(priv1, priv2)
+
+ s1 = priv1.to_pem()
+ self.assertEqual(type(s1), binary_type)
+ self.assertTrue(s1.startswith(b("-----BEGIN EC PRIVATE KEY-----")))
+ self.assertTrue(s1.strip().endswith(b("-----END EC PRIVATE KEY-----")))
+ priv2 = SigningKey.from_pem(s1)
+ self.assertTruePrivkeysEqual(priv1, priv2)
+
+ s1 = priv1.to_der()
+ self.assertEqual(type(s1), binary_type)
+ priv2 = SigningKey.from_der(s1)
+ self.assertTruePrivkeysEqual(priv1, priv2)
+
+ priv1 = SigningKey.generate(curve=NIST256p)
+ s1 = priv1.to_pem()
+ self.assertEqual(type(s1), binary_type)
+ self.assertTrue(s1.startswith(b("-----BEGIN EC PRIVATE KEY-----")))
+ self.assertTrue(s1.strip().endswith(b("-----END EC PRIVATE KEY-----")))
+ priv2 = SigningKey.from_pem(s1)
+ self.assertTruePrivkeysEqual(priv1, priv2)
+
+ s1 = priv1.to_der()
+ self.assertEqual(type(s1), binary_type)
+ priv2 = SigningKey.from_der(s1)
+ self.assertTruePrivkeysEqual(priv1, priv2)
+
+ def test_privkey_strings_brainpool(self):
+ priv1 = SigningKey.generate(curve=BRAINPOOLP512r1)
+ s1 = priv1.to_pem()
+ self.assertEqual(type(s1), binary_type)
+ self.assertTrue(s1.startswith(b("-----BEGIN EC PRIVATE KEY-----")))
+ self.assertTrue(s1.strip().endswith(b("-----END EC PRIVATE KEY-----")))
+ priv2 = SigningKey.from_pem(s1)
+ self.assertTruePrivkeysEqual(priv1, priv2)
+
+ s1 = priv1.to_der()
+ self.assertEqual(type(s1), binary_type)
+ priv2 = SigningKey.from_der(s1)
+ self.assertTruePrivkeysEqual(priv1, priv2)
+
+ def assertTruePubkeysEqual(self, pub1, pub2):
+ self.assertEqual(pub1.pubkey.point, pub2.pubkey.point)
+ self.assertEqual(pub1.pubkey.generator, pub2.pubkey.generator)
+ self.assertEqual(pub1.curve, pub2.curve)
+
+ def test_pubkey_strings(self):
+ priv1 = SigningKey.generate()
+ pub1 = priv1.get_verifying_key()
+ s1 = pub1.to_string()
+ self.assertEqual(type(s1), binary_type)
+ self.assertEqual(len(s1), NIST192p.verifying_key_length)
+ pub2 = VerifyingKey.from_string(s1)
+ self.assertTruePubkeysEqual(pub1, pub2)
+
+ priv1 = SigningKey.generate(curve=NIST256p)
+ pub1 = priv1.get_verifying_key()
+ s1 = pub1.to_string()
+ self.assertEqual(type(s1), binary_type)
+ self.assertEqual(len(s1), NIST256p.verifying_key_length)
+ pub2 = VerifyingKey.from_string(s1, curve=NIST256p)
+ self.assertTruePubkeysEqual(pub1, pub2)
+
+ pub1_der = pub1.to_der()
+ self.assertEqual(type(pub1_der), binary_type)
+ pub2 = VerifyingKey.from_der(pub1_der)
+ self.assertTruePubkeysEqual(pub1, pub2)
+
+ self.assertRaises(der.UnexpectedDER,
+ VerifyingKey.from_der, pub1_der + b("junk"))
+ badpub = VerifyingKey.from_der(pub1_der)
+
+ class FakeGenerator:
+ def order(self):
+ return 123456789
+
+ badcurve = Curve("unknown", None, FakeGenerator(), (1, 2, 3, 4, 5, 6), None)
+ badpub.curve = badcurve
+ badder = badpub.to_der()
+ self.assertRaises(UnknownCurveError, VerifyingKey.from_der, badder)
+
+ pem = pub1.to_pem()
+ self.assertEqual(type(pem), binary_type)
+ self.assertTrue(pem.startswith(b("-----BEGIN PUBLIC KEY-----")), pem)
+ self.assertTrue(pem.strip().endswith(b("-----END PUBLIC KEY-----")), pem)
+ pub2 = VerifyingKey.from_pem(pem)
+ self.assertTruePubkeysEqual(pub1, pub2)
+
+ def test_pubkey_strings_brainpool(self):
+ priv1 = SigningKey.generate(curve=BRAINPOOLP512r1)
+ pub1 = priv1.get_verifying_key()
+ s1 = pub1.to_string()
+ self.assertEqual(type(s1), binary_type)
+ self.assertEqual(len(s1), BRAINPOOLP512r1.verifying_key_length)
+ pub2 = VerifyingKey.from_string(s1, curve=BRAINPOOLP512r1)
+ self.assertTruePubkeysEqual(pub1, pub2)
+
+ pub1_der = pub1.to_der()
+ self.assertEqual(type(pub1_der), binary_type)
+ pub2 = VerifyingKey.from_der(pub1_der)
+ self.assertTruePubkeysEqual(pub1, pub2)
+
+ def test_vk_to_der_with_invalid_point_encoding(self):
+ sk = SigningKey.generate()
+ vk = sk.verifying_key
+
+ with self.assertRaises(ValueError):
+ vk.to_der("raw")
+
+ def test_sk_to_der_with_invalid_point_encoding(self):
+ sk = SigningKey.generate()
+
+ with self.assertRaises(ValueError):
+ sk.to_der("raw")
+
+ def test_vk_from_der_garbage_after_curve_oid(self):
+ type_oid_der = encoded_oid_ecPublicKey
+ curve_oid_der = der.encode_oid(*(1, 2, 840, 10045, 3, 1, 1)) + \
+ b('garbage')
+ enc_type_der = der.encode_sequence(type_oid_der, curve_oid_der)
+ point_der = der.encode_bitstring(b'\x00\xff', None)
+ to_decode = der.encode_sequence(enc_type_der, point_der)
+
+ with self.assertRaises(der.UnexpectedDER):
+ VerifyingKey.from_der(to_decode)
+
+ def test_vk_from_der_invalid_key_type(self):
+ type_oid_der = der.encode_oid(*(1, 2, 3))
+ curve_oid_der = der.encode_oid(*(1, 2, 840, 10045, 3, 1, 1))
+ enc_type_der = der.encode_sequence(type_oid_der, curve_oid_der)
+ point_der = der.encode_bitstring(b'\x00\xff', None)
+ to_decode = der.encode_sequence(enc_type_der, point_der)
+
+ with self.assertRaises(der.UnexpectedDER):
+ VerifyingKey.from_der(to_decode)
+
+ def test_vk_from_der_garbage_after_point_string(self):
+ type_oid_der = encoded_oid_ecPublicKey
+ curve_oid_der = der.encode_oid(*(1, 2, 840, 10045, 3, 1, 1))
+ enc_type_der = der.encode_sequence(type_oid_der, curve_oid_der)
+ point_der = der.encode_bitstring(b'\x00\xff', None) + b('garbage')
+ to_decode = der.encode_sequence(enc_type_der, point_der)
+
+ with self.assertRaises(der.UnexpectedDER):
+ VerifyingKey.from_der(to_decode)
+
+ def test_vk_from_der_invalid_bitstring(self):
+ type_oid_der = encoded_oid_ecPublicKey
+ curve_oid_der = der.encode_oid(*(1, 2, 840, 10045, 3, 1, 1))
+ enc_type_der = der.encode_sequence(type_oid_der, curve_oid_der)
+ point_der = der.encode_bitstring(b'\x08\xff', None)
+ to_decode = der.encode_sequence(enc_type_der, point_der)
+
+ with self.assertRaises(der.UnexpectedDER):
+ VerifyingKey.from_der(to_decode)
+
+ def test_vk_from_der_with_invalid_length_of_encoding(self):
+ type_oid_der = encoded_oid_ecPublicKey
+ curve_oid_der = der.encode_oid(*(1, 2, 840, 10045, 3, 1, 1))
+ enc_type_der = der.encode_sequence(type_oid_der, curve_oid_der)
+ point_der = der.encode_bitstring(b'\xff'*64, 0)
+ to_decode = der.encode_sequence(enc_type_der, point_der)
+
+ with self.assertRaises(MalformedPointError):
+ VerifyingKey.from_der(to_decode)
+
+ def test_vk_from_der_with_raw_encoding(self):
+ type_oid_der = encoded_oid_ecPublicKey
+ curve_oid_der = der.encode_oid(*(1, 2, 840, 10045, 3, 1, 1))
+ enc_type_der = der.encode_sequence(type_oid_der, curve_oid_der)
+ point_der = der.encode_bitstring(b'\xff'*48, 0)
+ to_decode = der.encode_sequence(enc_type_der, point_der)
+
+ with self.assertRaises(der.UnexpectedDER):
+ VerifyingKey.from_der(to_decode)
+
+ def test_signature_strings(self):
+ priv1 = SigningKey.generate()
+ pub1 = priv1.get_verifying_key()
+ data = b("data")
+
+ sig = priv1.sign(data)
+ self.assertEqual(type(sig), binary_type)
+ self.assertEqual(len(sig), NIST192p.signature_length)
+ self.assertTrue(pub1.verify(sig, data))
+
+ sig = priv1.sign(data, sigencode=sigencode_strings)
+ self.assertEqual(type(sig), tuple)
+ self.assertEqual(len(sig), 2)
+ self.assertEqual(type(sig[0]), binary_type)
+ self.assertEqual(type(sig[1]), binary_type)
+ self.assertEqual(len(sig[0]), NIST192p.baselen)
+ self.assertEqual(len(sig[1]), NIST192p.baselen)
+ self.assertTrue(pub1.verify(sig, data, sigdecode=sigdecode_strings))
+
+ sig_der = priv1.sign(data, sigencode=sigencode_der)
+ self.assertEqual(type(sig_der), binary_type)
+ self.assertTrue(pub1.verify(sig_der, data, sigdecode=sigdecode_der))
+
+ def test_sig_decode_strings_with_invalid_count(self):
+ with self.assertRaises(MalformedSignature):
+ sigdecode_strings([b('one'), b('two'), b('three')], 0xff)
+
+ def test_sig_decode_strings_with_wrong_r_len(self):
+ with self.assertRaises(MalformedSignature):
+ sigdecode_strings([b('one'), b('two')], 0xff)
+
+ def test_sig_decode_strings_with_wrong_s_len(self):
+ with self.assertRaises(MalformedSignature):
+ sigdecode_strings([b('\xa0'), b('\xb0\xff')], 0xff)
+
+ def test_verify_with_too_long_input(self):
+ sk = SigningKey.generate()
+ vk = sk.verifying_key
+
+ with self.assertRaises(BadDigestError):
+ vk.verify_digest(None, b('\x00') * 128)
+
+ def test_sk_from_secret_exponent_with_wrong_sec_exponent(self):
+ with self.assertRaises(MalformedPointError):
+ SigningKey.from_secret_exponent(0)
+
+ def test_sk_from_string_with_wrong_len_string(self):
+ with self.assertRaises(MalformedPointError):
+ SigningKey.from_string(b('\x01'))
+
+ def test_sk_from_der_with_junk_after_sequence(self):
+ ver_der = der.encode_integer(1)
+ to_decode = der.encode_sequence(ver_der) + b('garbage')
+
+ with self.assertRaises(der.UnexpectedDER):
+ SigningKey.from_der(to_decode)
+
+ def test_sk_from_der_with_wrong_version(self):
+ ver_der = der.encode_integer(0)
+ to_decode = der.encode_sequence(ver_der)
+
+ with self.assertRaises(der.UnexpectedDER):
+ SigningKey.from_der(to_decode)
+
+ def test_sk_from_der_invalid_const_tag(self):
+ ver_der = der.encode_integer(1)
+ privkey_der = der.encode_octet_string(b('\x00\xff'))
+ curve_oid_der = der.encode_oid(*(1, 2, 3))
+ const_der = der.encode_constructed(1, curve_oid_der)
+ to_decode = der.encode_sequence(ver_der, privkey_der, const_der,
+ curve_oid_der)
+
+ with self.assertRaises(der.UnexpectedDER):
+ SigningKey.from_der(to_decode)
+
+ def test_sk_from_der_garbage_after_privkey_oid(self):
+ ver_der = der.encode_integer(1)
+ privkey_der = der.encode_octet_string(b('\x00\xff'))
+ curve_oid_der = der.encode_oid(*(1, 2, 3)) + b('garbage')
+ const_der = der.encode_constructed(0, curve_oid_der)
+ to_decode = der.encode_sequence(ver_der, privkey_der, const_der,
+ curve_oid_der)
+
+ with self.assertRaises(der.UnexpectedDER):
+ SigningKey.from_der(to_decode)
+
+ def test_sk_from_der_with_short_privkey(self):
+ ver_der = der.encode_integer(1)
+ privkey_der = der.encode_octet_string(b('\x00\xff'))
+ curve_oid_der = der.encode_oid(*(1, 2, 840, 10045, 3, 1, 1))
+ const_der = der.encode_constructed(0, curve_oid_der)
+ to_decode = der.encode_sequence(ver_der, privkey_der, const_der,
+ curve_oid_der)
+
+ sk = SigningKey.from_der(to_decode)
+ self.assertEqual(sk.privkey.secret_multiplier, 255)
+
+ def test_sign_with_too_long_hash(self):
+ sk = SigningKey.from_secret_exponent(12)
+
+ with self.assertRaises(BadDigestError):
+ sk.sign_digest(b('\xff') * 64)
+
+ def test_hashfunc(self):
+ sk = SigningKey.generate(curve=NIST256p, hashfunc=sha256)
+ data = b("security level is 128 bits")
+ sig = sk.sign(data)
+ vk = VerifyingKey.from_string(sk.get_verifying_key().to_string(),
+ curve=NIST256p, hashfunc=sha256)
+ self.assertTrue(vk.verify(sig, data))
+
+ sk2 = SigningKey.generate(curve=NIST256p)
+ sig2 = sk2.sign(data, hashfunc=sha256)
+ vk2 = VerifyingKey.from_string(sk2.get_verifying_key().to_string(),
+ curve=NIST256p, hashfunc=sha256)
+ self.assertTrue(vk2.verify(sig2, data))
+
+ vk3 = VerifyingKey.from_string(sk.get_verifying_key().to_string(),
+ curve=NIST256p)
+ self.assertTrue(vk3.verify(sig, data, hashfunc=sha256))
+
+ def test_public_key_recovery(self):
+ # Create keys
+ curve = NIST256p
+
+ sk = SigningKey.generate(curve=curve)
+ vk = sk.get_verifying_key()
+
+ # Sign a message
+ data = b("blahblah")
+ signature = sk.sign(data)
+
+ # Recover verifying keys
+ recovered_vks = VerifyingKey.from_public_key_recovery(signature, data, curve)
+
+ # Test if each pk is valid
+ for recovered_vk in recovered_vks:
+ # Test if recovered vk is valid for the data
+ self.assertTrue(recovered_vk.verify(signature, data))
+
+ # Test if properties are equal
+ self.assertEqual(vk.curve, recovered_vk.curve)
+ self.assertEqual(vk.default_hashfunc, recovered_vk.default_hashfunc)
+
+ # Test if original vk is the list of recovered keys
+ self.assertTrue(
+ vk.pubkey.point in [recovered_vk.pubkey.point for recovered_vk in recovered_vks])
+
+ def test_public_key_recovery_with_custom_hash(self):
+ # Create keys
+ curve = NIST256p
+
+ sk = SigningKey.generate(curve=curve, hashfunc=sha256)
+ vk = sk.get_verifying_key()
+
+ # Sign a message
+ data = b("blahblah")
+ signature = sk.sign(data)
+
+ # Recover verifying keys
+ recovered_vks = VerifyingKey.\
+ from_public_key_recovery(signature, data, curve,
+ hashfunc=sha256)
+
+ # Test if each pk is valid
+ for recovered_vk in recovered_vks:
+ # Test if recovered vk is valid for the data
+ self.assertTrue(recovered_vk.verify(signature, data))
+
+ # Test if properties are equal
+ self.assertEqual(vk.curve, recovered_vk.curve)
+ self.assertEqual(sha256, recovered_vk.default_hashfunc)
+
+ # Test if original vk is the list of recovered keys
+ self.assertTrue(vk.pubkey.point in
+ [recovered_vk.pubkey.point for recovered_vk in recovered_vks])
+
+ def test_encoding(self):
+ sk = SigningKey.from_secret_exponent(123456789)
+ vk = sk.verifying_key
+
+ exp = b('\x0c\xe0\x1d\xe0d\x1c\x8eS\x8a\xc0\x9eK\xa8x !\xd5\xc2\xc3'
+ '\xfd\xc8\xa0c\xff\xfb\x02\xb9\xc4\x84)\x1a\x0f\x8b\x87\xa4'
+ 'z\x8a#\xb5\x97\xecO\xb6\xa0HQ\x89*')
+ self.assertEqual(vk.to_string(), exp)
+ self.assertEqual(vk.to_string('raw'), exp)
+ self.assertEqual(vk.to_string('uncompressed'), b('\x04') + exp)
+ self.assertEqual(vk.to_string('compressed'), b('\x02') + exp[:24])
+ self.assertEqual(vk.to_string('hybrid'), b('\x06') + exp)
+
+ def test_decoding(self):
+ sk = SigningKey.from_secret_exponent(123456789)
+ vk = sk.verifying_key
+
+ enc = b('\x0c\xe0\x1d\xe0d\x1c\x8eS\x8a\xc0\x9eK\xa8x !\xd5\xc2\xc3'
+ '\xfd\xc8\xa0c\xff\xfb\x02\xb9\xc4\x84)\x1a\x0f\x8b\x87\xa4'
+ 'z\x8a#\xb5\x97\xecO\xb6\xa0HQ\x89*')
+
+ from_raw = VerifyingKey.from_string(enc)
+ self.assertEqual(from_raw.pubkey.point, vk.pubkey.point)
+
+ from_uncompressed = VerifyingKey.from_string(b('\x04') + enc)
+ self.assertEqual(from_uncompressed.pubkey.point, vk.pubkey.point)
+
+ from_compressed = VerifyingKey.from_string(b('\x02') + enc[:24])
+ self.assertEqual(from_compressed.pubkey.point, vk.pubkey.point)
+
+ from_uncompressed = VerifyingKey.from_string(b('\x06') + enc)
+ self.assertEqual(from_uncompressed.pubkey.point, vk.pubkey.point)
+
+ def test_decoding_with_malformed_uncompressed(self):
+ enc = b('\x0c\xe0\x1d\xe0d\x1c\x8eS\x8a\xc0\x9eK\xa8x !\xd5\xc2\xc3'
+ '\xfd\xc8\xa0c\xff\xfb\x02\xb9\xc4\x84)\x1a\x0f\x8b\x87\xa4'
+ 'z\x8a#\xb5\x97\xecO\xb6\xa0HQ\x89*')
+
+ with self.assertRaises(MalformedPointError):
+ VerifyingKey.from_string(b('\x02') + enc)
+
+ def test_decoding_with_malformed_compressed(self):
+ enc = b('\x0c\xe0\x1d\xe0d\x1c\x8eS\x8a\xc0\x9eK\xa8x !\xd5\xc2\xc3'
+ '\xfd\xc8\xa0c\xff\xfb\x02\xb9\xc4\x84)\x1a\x0f\x8b\x87\xa4'
+ 'z\x8a#\xb5\x97\xecO\xb6\xa0HQ\x89*')
+
+ with self.assertRaises(MalformedPointError):
+ VerifyingKey.from_string(b('\x01') + enc[:24])
+
+ def test_decoding_with_inconsistent_hybrid(self):
+ enc = b('\x0c\xe0\x1d\xe0d\x1c\x8eS\x8a\xc0\x9eK\xa8x !\xd5\xc2\xc3'
+ '\xfd\xc8\xa0c\xff\xfb\x02\xb9\xc4\x84)\x1a\x0f\x8b\x87\xa4'
+ 'z\x8a#\xb5\x97\xecO\xb6\xa0HQ\x89*')
+
+ with self.assertRaises(MalformedPointError):
+ VerifyingKey.from_string(b('\x07') + enc)
+
+ def test_decoding_with_point_not_on_curve(self):
+ enc = b('\x0c\xe0\x1d\xe0d\x1c\x8eS\x8a\xc0\x9eK\xa8x !\xd5\xc2\xc3'
+ '\xfd\xc8\xa0c\xff\xfb\x02\xb9\xc4\x84)\x1a\x0f\x8b\x87\xa4'
+ 'z\x8a#\xb5\x97\xecO\xb6\xa0HQ\x89*')
+
+ with self.assertRaises(MalformedPointError):
+ VerifyingKey.from_string(enc[:47] + b('\x00'))
+
+ def test_decoding_with_point_at_infinity(self):
+ # decoding it is unsupported, as it's not necessary to encode it
+ with self.assertRaises(MalformedPointError):
+ VerifyingKey.from_string(b('\x00'))
+
+ def test_not_lying_on_curve(self):
+ enc = number_to_string(NIST192p.curve.p(), NIST192p.curve.p()+1)
+
+ with self.assertRaises(MalformedPointError):
+ VerifyingKey.from_string(b('\x02') + enc)
+
+ def test_from_string_with_invalid_curve_too_short_ver_key_len(self):
+ # both verifying_key_length and baselen are calculated internally
+ # by the Curve constructor, but since we depend on them verify
+ # that inconsistent values are detected
+ curve = Curve("test", ecdsa.curve_192, ecdsa.generator_192, (1, 2))
+ curve.verifying_key_length = 16
+ curve.baselen = 32
+
+ with self.assertRaises(MalformedPointError):
+ VerifyingKey.from_string(b('\x00')*16, curve)
+
+ def test_from_string_with_invalid_curve_too_long_ver_key_len(self):
+ # both verifying_key_length and baselen are calculated internally
+ # by the Curve constructor, but since we depend on them verify
+ # that inconsistent values are detected
+ curve = Curve("test", ecdsa.curve_192, ecdsa.generator_192, (1, 2))
+ curve.verifying_key_length = 16
+ curve.baselen = 16
+
+ with self.assertRaises(MalformedPointError):
+ VerifyingKey.from_string(b('\x00')*16, curve)
+
+
+@pytest.mark.parametrize("val,even",
+ [(i, j) for i in range(256) for j in [True, False]])
+def test_VerifyingKey_decode_with_small_values(val, even):
+ enc = number_to_string(val, NIST192p.order)
+
+ if even:
+ enc = b('\x02') + enc
+ else:
+ enc = b('\x03') + enc
+
+ # small values can both be actual valid public keys and not, verify that
+ # only expected exceptions are raised if they are not
+ try:
+ vk = VerifyingKey.from_string(enc)
+ assert isinstance(vk, VerifyingKey)
+ except MalformedPointError:
+ assert True
+
+
+params = []
+for curve in curves:
+ for enc in ["raw", "uncompressed", "compressed", "hybrid"]:
+ params.append(pytest.param(curve, enc, id="{0}-{1}".format(
+ curve.name, enc)))
+
+
+@pytest.mark.parametrize("curve,encoding", params)
+def test_VerifyingKey_encode_decode(curve, encoding):
+ sk = SigningKey.generate(curve=curve)
+ vk = sk.verifying_key
+
+ encoded = vk.to_string(encoding)
+
+ from_enc = VerifyingKey.from_string(encoded, curve=curve)
+
+ assert vk.pubkey.point == from_enc.pubkey.point
+
+
+class OpenSSL(unittest.TestCase):
+ # test interoperability with OpenSSL tools. Note that openssl's ECDSA
+ # sign/verify arguments changed between 0.9.8 and 1.0.0: the early
+ # versions require "-ecdsa-with-SHA1", the later versions want just
+ # "-SHA1" (or to leave out that argument entirely, which means the
+ # signature will use some default digest algorithm, probably determined
+ # by the key, probably always SHA1).
+ #
+ # openssl ecparam -name secp224r1 -genkey -out privkey.pem
+ # openssl ec -in privkey.pem -text -noout # get the priv/pub keys
+ # openssl dgst -ecdsa-with-SHA1 -sign privkey.pem -out data.sig data.txt
+ # openssl asn1parse -in data.sig -inform DER
+ # data.sig is 64 bytes, probably 56b plus ASN1 overhead
+ # openssl dgst -ecdsa-with-SHA1 -prverify privkey.pem -signature data.sig data.txt ; echo $?
+ # openssl ec -in privkey.pem -pubout -out pubkey.pem
+ # openssl ec -in privkey.pem -pubout -outform DER -out pubkey.der
+
+ OPENSSL_SUPPORTED_CURVES = set(c.split(':')[0].strip() for c in
+ run_openssl("ecparam -list_curves")
+ .split('\n'))
+
+ def get_openssl_messagedigest_arg(self, hash_name):
+ v = run_openssl("version")
+ # e.g. "OpenSSL 1.0.0 29 Mar 2010", or "OpenSSL 1.0.0a 1 Jun 2010",
+ # or "OpenSSL 0.9.8o 01 Jun 2010"
+ vs = v.split()[1].split(".")
+ if vs >= ["1", "0", "0"]: # pragma: no cover
+ return "-{0}".format(hash_name)
+ else: # pragma: no cover
+ return "-ecdsa-with-{0}".format(hash_name)
+
+ # sk: 1:OpenSSL->python 2:python->OpenSSL
+ # vk: 3:OpenSSL->python 4:python->OpenSSL
+ # sig: 5:OpenSSL->python 6:python->OpenSSL
+
+ @pytest.mark.skipif("prime192v1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support prime192v1")
+ def test_from_openssl_nist192p(self):
+ return self.do_test_from_openssl(NIST192p)
+
+ @pytest.mark.skipif("prime192v1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support prime192v1")
+ def test_from_openssl_nist192p_sha256(self):
+ return self.do_test_from_openssl(NIST192p, "SHA256")
+
+ @pytest.mark.skipif("secp224r1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support secp224r1")
+ def test_from_openssl_nist224p(self):
+ return self.do_test_from_openssl(NIST224p)
+
+ @pytest.mark.skipif("prime256v1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support prime256v1")
+ def test_from_openssl_nist256p(self):
+ return self.do_test_from_openssl(NIST256p)
+
+ @pytest.mark.skipif("prime256v1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support prime256v1")
+ def test_from_openssl_nist256p_sha384(self):
+ return self.do_test_from_openssl(NIST256p, "SHA384")
+
+ @pytest.mark.skipif("prime256v1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support prime256v1")
+ def test_from_openssl_nist256p_sha512(self):
+ return self.do_test_from_openssl(NIST256p, "SHA512")
+
+ @pytest.mark.skipif("secp384r1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support secp384r1")
+ def test_from_openssl_nist384p(self):
+ return self.do_test_from_openssl(NIST384p)
+
+ @pytest.mark.skipif("secp521r1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support secp521r1")
+ def test_from_openssl_nist521p(self):
+ return self.do_test_from_openssl(NIST521p)
+
+ @pytest.mark.skipif("secp256k1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support secp256k1")
+ def test_from_openssl_secp256k1(self):
+ return self.do_test_from_openssl(SECP256k1)
+
+ @pytest.mark.skipif("brainpoolP160r1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support brainpoolP160r1")
+ def test_from_openssl_brainpoolp160r1(self):
+ return self.do_test_from_openssl(BRAINPOOLP160r1)
+
+ @pytest.mark.skipif("brainpoolP192r1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support brainpoolP192r1")
+ def test_from_openssl_brainpoolp192r1(self):
+ return self.do_test_from_openssl(BRAINPOOLP192r1)
+
+ @pytest.mark.skipif("brainpoolP224r1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support brainpoolP224r1")
+ def test_from_openssl_brainpoolp224r1(self):
+ return self.do_test_from_openssl(BRAINPOOLP224r1)
+
+ @pytest.mark.skipif("brainpoolP256r1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support brainpoolP256r1")
+ def test_from_openssl_brainpoolp256r1(self):
+ return self.do_test_from_openssl(BRAINPOOLP256r1)
+
+ @pytest.mark.skipif("brainpoolP320r1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support brainpoolP320r1")
+ def test_from_openssl_brainpoolp320r1(self):
+ return self.do_test_from_openssl(BRAINPOOLP320r1)
+
+ @pytest.mark.skipif("brainpoolP384r1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support brainpoolP384r1")
+ def test_from_openssl_brainpoolp384r1(self):
+ return self.do_test_from_openssl(BRAINPOOLP384r1)
+
+ @pytest.mark.skipif("brainpoolP512r1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support brainpoolP512r1")
+ def test_from_openssl_brainpoolp512r1(self):
+ return self.do_test_from_openssl(BRAINPOOLP512r1)
+
+ def do_test_from_openssl(self, curve, hash_name="SHA1"):
+ curvename = curve.openssl_name
+ assert curvename
+ # OpenSSL: create sk, vk, sign.
+ # Python: read vk(3), checksig(5), read sk(1), sign, check
+ mdarg = self.get_openssl_messagedigest_arg(hash_name)
+ if os.path.isdir("t"): # pragma: no cover
+ shutil.rmtree("t")
+ os.mkdir("t")
+ run_openssl("ecparam -name %s -genkey -out t/privkey.pem" % curvename)
+ run_openssl("ec -in t/privkey.pem -pubout -out t/pubkey.pem")
+ data = b("data")
+ with open("t/data.txt", "wb") as e:
+ e.write(data)
+ run_openssl("dgst %s -sign t/privkey.pem -out t/data.sig t/data.txt" % mdarg)
+ run_openssl("dgst %s -verify t/pubkey.pem -signature t/data.sig t/data.txt" % mdarg)
+ with open("t/pubkey.pem", "rb") as e:
+ pubkey_pem = e.read()
+ vk = VerifyingKey.from_pem(pubkey_pem) # 3
+ with open("t/data.sig", "rb") as e:
+ sig_der = e.read()
+ self.assertTrue(vk.verify(sig_der, data, # 5
+ hashfunc=partial(hashlib.new, hash_name),
+ sigdecode=sigdecode_der))
+
+ with open("t/privkey.pem") as e:
+ fp = e.read()
+ sk = SigningKey.from_pem(fp) # 1
+ sig = sk.sign(
+ data,
+ hashfunc=partial(hashlib.new, hash_name),
+ )
+ self.assertTrue(vk.verify(sig,
+ data,
+ hashfunc=partial(hashlib.new, hash_name)))
+
+ @pytest.mark.skipif("prime192v1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support prime192v1")
+ def test_to_openssl_nist192p(self):
+ self.do_test_to_openssl(NIST192p)
+
+ @pytest.mark.skipif("prime192v1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support prime192v1")
+ def test_to_openssl_nist192p_sha256(self):
+ self.do_test_to_openssl(NIST192p, "SHA256")
+
+ @pytest.mark.skipif("secp224r1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support secp224r1")
+ def test_to_openssl_nist224p(self):
+ self.do_test_to_openssl(NIST224p)
+
+ @pytest.mark.skipif("prime256v1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support prime256v1")
+ def test_to_openssl_nist256p(self):
+ self.do_test_to_openssl(NIST256p)
+
+ @pytest.mark.skipif("prime256v1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support prime256v1")
+ def test_to_openssl_nist256p_sha384(self):
+ self.do_test_to_openssl(NIST256p, "SHA384")
+
+ @pytest.mark.skipif("prime256v1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support prime256v1")
+ def test_to_openssl_nist256p_sha512(self):
+ self.do_test_to_openssl(NIST256p, "SHA512")
+
+ @pytest.mark.skipif("secp384r1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support secp384r1")
+ def test_to_openssl_nist384p(self):
+ self.do_test_to_openssl(NIST384p)
+
+ @pytest.mark.skipif("secp521r1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support secp521r1")
+ def test_to_openssl_nist521p(self):
+ self.do_test_to_openssl(NIST521p)
+
+ @pytest.mark.skipif("secp256k1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support secp256k1")
+ def test_to_openssl_secp256k1(self):
+ self.do_test_to_openssl(SECP256k1)
+
+ @pytest.mark.skipif("brainpoolP160r1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support brainpoolP160r1")
+ def test_to_openssl_brainpoolp160r1(self):
+ self.do_test_to_openssl(BRAINPOOLP160r1)
+
+ @pytest.mark.skipif("brainpoolP192r1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support brainpoolP192r1")
+ def test_to_openssl_brainpoolp192r1(self):
+ self.do_test_to_openssl(BRAINPOOLP192r1)
+
+ @pytest.mark.skipif("brainpoolP224r1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support brainpoolP224r1")
+ def test_to_openssl_brainpoolp224r1(self):
+ self.do_test_to_openssl(BRAINPOOLP224r1)
+
+ @pytest.mark.skipif("brainpoolP256r1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support brainpoolP256r1")
+ def test_to_openssl_brainpoolp256r1(self):
+ self.do_test_to_openssl(BRAINPOOLP256r1)
+
+ @pytest.mark.skipif("brainpoolP320r1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support brainpoolP320r1")
+ def test_to_openssl_brainpoolp320r1(self):
+ self.do_test_to_openssl(BRAINPOOLP320r1)
+
+ @pytest.mark.skipif("brainpoolP384r1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support brainpoolP384r1")
+ def test_to_openssl_brainpoolp384r1(self):
+ self.do_test_to_openssl(BRAINPOOLP384r1)
+
+ @pytest.mark.skipif("brainpoolP512r1" not in OPENSSL_SUPPORTED_CURVES,
+ reason="system openssl does not support brainpoolP512r1")
+ def test_to_openssl_brainpoolp512r1(self):
+ self.do_test_to_openssl(BRAINPOOLP512r1)
+
+ def do_test_to_openssl(self, curve, hash_name="SHA1"):
+ curvename = curve.openssl_name
+ assert curvename
+ # Python: create sk, vk, sign.
+ # OpenSSL: read vk(4), checksig(6), read sk(2), sign, check
+ mdarg = self.get_openssl_messagedigest_arg(hash_name)
+ if os.path.isdir("t"): # pragma: no cover
+ shutil.rmtree("t")
+ os.mkdir("t")
+ sk = SigningKey.generate(curve=curve)
+ vk = sk.get_verifying_key()
+ data = b("data")
+ with open("t/pubkey.der", "wb") as e:
+ e.write(vk.to_der()) # 4
+ with open("t/pubkey.pem", "wb") as e:
+ e.write(vk.to_pem()) # 4
+ sig_der = sk.sign(data, hashfunc=partial(hashlib.new, hash_name),
+ sigencode=sigencode_der)
+
+ with open("t/data.sig", "wb") as e:
+ e.write(sig_der) # 6
+ with open("t/data.txt", "wb") as e:
+ e.write(data)
+ with open("t/baddata.txt", "wb") as e:
+ e.write(data + b("corrupt"))
+
+ self.assertRaises(SubprocessError, run_openssl,
+ "dgst %s -verify t/pubkey.der -keyform DER -signature t/data.sig t/baddata.txt" % mdarg)
+ run_openssl("dgst %s -verify t/pubkey.der -keyform DER -signature t/data.sig t/data.txt" % mdarg)
+
+ with open("t/privkey.pem", "wb") as e:
+ e.write(sk.to_pem()) # 2
+ run_openssl("dgst %s -sign t/privkey.pem -out t/data.sig2 t/data.txt" % mdarg)
+ run_openssl("dgst %s -verify t/pubkey.pem -signature t/data.sig2 t/data.txt" % mdarg)
+
+
+class DER(unittest.TestCase):
+ def test_integer(self):
+ self.assertEqual(der.encode_integer(0), b("\x02\x01\x00"))
+ self.assertEqual(der.encode_integer(1), b("\x02\x01\x01"))
+ self.assertEqual(der.encode_integer(127), b("\x02\x01\x7f"))
+ self.assertEqual(der.encode_integer(128), b("\x02\x02\x00\x80"))
+ self.assertEqual(der.encode_integer(256), b("\x02\x02\x01\x00"))
+ # self.assertEqual(der.encode_integer(-1), b("\x02\x01\xff"))
+
+ def s(n):
+ return der.remove_integer(der.encode_integer(n) + b("junk"))
+ self.assertEqual(s(0), (0, b("junk")))
+ self.assertEqual(s(1), (1, b("junk")))
+ self.assertEqual(s(127), (127, b("junk")))
+ self.assertEqual(s(128), (128, b("junk")))
+ self.assertEqual(s(256), (256, b("junk")))
+ self.assertEqual(s(1234567890123456789012345678901234567890),
+ (1234567890123456789012345678901234567890, b("junk")))
+
+ def test_number(self):
+ self.assertEqual(der.encode_number(0), b("\x00"))
+ self.assertEqual(der.encode_number(127), b("\x7f"))
+ self.assertEqual(der.encode_number(128), b("\x81\x00"))
+ self.assertEqual(der.encode_number(3 * 128 + 7), b("\x83\x07"))
+ # self.assertEqual(der.read_number("\x81\x9b" + "more"), (155, 2))
+ # self.assertEqual(der.encode_number(155), b("\x81\x9b"))
+ for n in (0, 1, 2, 127, 128, 3 * 128 + 7, 840, 10045): # , 155):
+ x = der.encode_number(n) + b("more")
+ n1, llen = der.read_number(x)
+ self.assertEqual(n1, n)
+ self.assertEqual(x[llen:], b("more"))
+
+ def test_length(self):
+ self.assertEqual(der.encode_length(0), b("\x00"))
+ self.assertEqual(der.encode_length(127), b("\x7f"))
+ self.assertEqual(der.encode_length(128), b("\x81\x80"))
+ self.assertEqual(der.encode_length(255), b("\x81\xff"))
+ self.assertEqual(der.encode_length(256), b("\x82\x01\x00"))
+ self.assertEqual(der.encode_length(3 * 256 + 7), b("\x82\x03\x07"))
+ self.assertEqual(der.read_length(b("\x81\x9b") + b("more")), (155, 2))
+ self.assertEqual(der.encode_length(155), b("\x81\x9b"))
+ for n in (0, 1, 2, 127, 128, 255, 256, 3 * 256 + 7, 155):
+ x = der.encode_length(n) + b("more")
+ n1, llen = der.read_length(x)
+ self.assertEqual(n1, n)
+ self.assertEqual(x[llen:], b("more"))
+
+ def test_sequence(self):
+ x = der.encode_sequence(b("ABC"), b("DEF")) + b("GHI")
+ self.assertEqual(x, b("\x30\x06ABCDEFGHI"))
+ x1, rest = der.remove_sequence(x)
+ self.assertEqual(x1, b("ABCDEF"))
+ self.assertEqual(rest, b("GHI"))
+
+ def test_constructed(self):
+ x = der.encode_constructed(0, NIST224p.encoded_oid)
+ self.assertEqual(hexlify(x), b("a007") + b("06052b81040021"))
+ x = der.encode_constructed(1, unhexlify(b("0102030a0b0c")))
+ self.assertEqual(hexlify(x), b("a106") + b("0102030a0b0c"))
+
+
+class Util(unittest.TestCase):
+ def test_trytryagain(self):
+ tta = util.randrange_from_seed__trytryagain
+ for i in range(1000):
+ seed = "seed-%d" % i
+ for order in (2**8 - 2, 2**8 - 1, 2**8, 2**8 + 1, 2**8 + 2,
+ 2**16 - 1, 2**16 + 1):
+ n = tta(seed, order)
+ self.assertTrue(1 <= n < order, (1, n, order))
+ # this trytryagain *does* provide long-term stability
+ self.assertEqual(("%x" % (tta("seed", NIST224p.order))).encode(),
+ b("6fa59d73bf0446ae8743cf748fc5ac11d5585a90356417e97155c3bc"))
+
+ @given(st.integers(min_value=0, max_value=10**200))
+ def test_randrange(self, i):
+ # util.randrange does not provide long-term stability: we might
+ # change the algorithm in the future.
+ entropy = util.PRNG("seed-%d" % i)
+ for order in (2**8 - 2, 2**8 - 1, 2**8,
+ 2**16 - 1, 2**16 + 1,
+ ):
+ # that oddball 2**16+1 takes half our runtime
+ n = util.randrange(order, entropy=entropy)
+ self.assertTrue(1 <= n < order, (1, n, order))
+
+ def OFF_test_prove_uniformity(self): # pragma: no cover
+ order = 2**8 - 2
+ counts = dict([(i, 0) for i in range(1, order)])
+ assert 0 not in counts
+ assert order not in counts
+ for i in range(1000000):
+ seed = "seed-%d" % i
+ n = util.randrange_from_seed__trytryagain(seed, order)
+ counts[n] += 1
+ # this technique should use the full range
+ self.assertTrue(counts[order - 1])
+ for i in range(1, order):
+ print_("%3d: %s" % (i, "*" * (counts[i] // 100)))
+
+
+class RFC6979(unittest.TestCase):
+ # https://tools.ietf.org/html/rfc6979#appendix-A.1
+ def _do(self, generator, secexp, hsh, hash_func, expected):
+ actual = rfc6979.generate_k(generator.order(), secexp, hash_func, hsh)
+ self.assertEqual(expected, actual)
+
+ def test_SECP256k1(self):
+ '''RFC doesn't contain test vectors for SECP256k1 used in bitcoin.
+ This vector has been computed by Golang reference implementation instead.'''
+ self._do(
+ generator=SECP256k1.generator,
+ secexp=int("9d0219792467d7d37b4d43298a7d0c05", 16),
+ hsh=sha256(b("sample")).digest(),
+ hash_func=sha256,
+ expected=int("8fa1f95d514760e498f28957b824ee6ec39ed64826ff4fecc2b5739ec45b91cd", 16))
+
+ def test_SECP256k1_2(self):
+ self._do(
+ generator=SECP256k1.generator,
+ secexp=int("cca9fbcc1b41e5a95d369eaa6ddcff73b61a4efaa279cfc6567e8daa39cbaf50", 16),
+ hsh=sha256(b("sample")).digest(),
+ hash_func=sha256,
+ expected=int("2df40ca70e639d89528a6b670d9d48d9165fdc0febc0974056bdce192b8e16a3", 16))
+
+ def test_SECP256k1_3(self):
+ self._do(
+ generator=SECP256k1.generator,
+ secexp=0x1,
+ hsh=sha256(b("Satoshi Nakamoto")).digest(),
+ hash_func=sha256,
+ expected=0x8F8A276C19F4149656B280621E358CCE24F5F52542772691EE69063B74F15D15)
+
+ def test_SECP256k1_4(self):
+ self._do(
+ generator=SECP256k1.generator,
+ secexp=0x1,
+ hsh=sha256(b("All those moments will be lost in time, like tears in rain. Time to die...")).digest(),
+ hash_func=sha256,
+ expected=0x38AA22D72376B4DBC472E06C3BA403EE0A394DA63FC58D88686C611ABA98D6B3)
+
+ def test_SECP256k1_5(self):
+ self._do(
+ generator=SECP256k1.generator,
+ secexp=0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364140,
+ hsh=sha256(b("Satoshi Nakamoto")).digest(),
+ hash_func=sha256,
+ expected=0x33A19B60E25FB6F4435AF53A3D42D493644827367E6453928554F43E49AA6F90)
+
+ def test_SECP256k1_6(self):
+ self._do(
+ generator=SECP256k1.generator,
+ secexp=0xf8b8af8ce3c7cca5e300d33939540c10d45ce001b8f252bfbc57ba0342904181,
+ hsh=sha256(b("Alan Turing")).digest(),
+ hash_func=sha256,
+ expected=0x525A82B70E67874398067543FD84C83D30C175FDC45FDEEE082FE13B1D7CFDF1)
+
+ def test_1(self):
+ # Basic example of the RFC, it also tests 'try-try-again' from Step H of rfc6979
+ self._do(
+ generator=Point(None, 0, 0, int("4000000000000000000020108A2E0CC0D99F8A5EF", 16)),
+ secexp=int("09A4D6792295A7F730FC3F2B49CBC0F62E862272F", 16),
+ hsh=unhexlify(b("AF2BDBE1AA9B6EC1E2ADE1D694F41FC71A831D0268E9891562113D8A62ADD1BF")),
+ hash_func=sha256,
+ expected=int("23AF4074C90A02B3FE61D286D5C87F425E6BDD81B", 16))
+
+ def test_2(self):
+ self._do(
+ generator=NIST192p.generator,
+ secexp=int("6FAB034934E4C0FC9AE67F5B5659A9D7D1FEFD187EE09FD4", 16),
+ hsh=sha1(b("sample")).digest(),
+ hash_func=sha1,
+ expected=int("37D7CA00D2C7B0E5E412AC03BD44BA837FDD5B28CD3B0021", 16))
+
+ def test_3(self):
+ self._do(
+ generator=NIST192p.generator,
+ secexp=int("6FAB034934E4C0FC9AE67F5B5659A9D7D1FEFD187EE09FD4", 16),
+ hsh=sha256(b("sample")).digest(),
+ hash_func=sha256,
+ expected=int("32B1B6D7D42A05CB449065727A84804FB1A3E34D8F261496", 16))
+
+ def test_4(self):
+ self._do(
+ generator=NIST192p.generator,
+ secexp=int("6FAB034934E4C0FC9AE67F5B5659A9D7D1FEFD187EE09FD4", 16),
+ hsh=sha512(b("sample")).digest(),
+ hash_func=sha512,
+ expected=int("A2AC7AB055E4F20692D49209544C203A7D1F2C0BFBC75DB1", 16))
+
+ def test_5(self):
+ self._do(
+ generator=NIST192p.generator,
+ secexp=int("6FAB034934E4C0FC9AE67F5B5659A9D7D1FEFD187EE09FD4", 16),
+ hsh=sha1(b("test")).digest(),
+ hash_func=sha1,
+ expected=int("D9CF9C3D3297D3260773A1DA7418DB5537AB8DD93DE7FA25", 16))
+
+ def test_6(self):
+ self._do(
+ generator=NIST192p.generator,
+ secexp=int("6FAB034934E4C0FC9AE67F5B5659A9D7D1FEFD187EE09FD4", 16),
+ hsh=sha256(b("test")).digest(),
+ hash_func=sha256,
+ expected=int("5C4CE89CF56D9E7C77C8585339B006B97B5F0680B4306C6C", 16))
+
+ def test_7(self):
+ self._do(
+ generator=NIST192p.generator,
+ secexp=int("6FAB034934E4C0FC9AE67F5B5659A9D7D1FEFD187EE09FD4", 16),
+ hsh=sha512(b("test")).digest(),
+ hash_func=sha512,
+ expected=int("0758753A5254759C7CFBAD2E2D9B0792EEE44136C9480527", 16))
+
+ def test_8(self):
+ self._do(
+ generator=NIST521p.generator,
+ secexp=int("0FAD06DAA62BA3B25D2FB40133DA757205DE67F5BB0018FEE8C86E1B68C7E75CAA896EB32F1F47C70855836A6D16FCC1466F6D8FBEC67DB89EC0C08B0E996B83538", 16),
+ hsh=sha1(b("sample")).digest(),
+ hash_func=sha1,
+ expected=int("089C071B419E1C2820962321787258469511958E80582E95D8378E0C2CCDB3CB42BEDE42F50E3FA3C71F5A76724281D31D9C89F0F91FC1BE4918DB1C03A5838D0F9", 16))
+
+ def test_9(self):
+ self._do(
+ generator=NIST521p.generator,
+ secexp=int("0FAD06DAA62BA3B25D2FB40133DA757205DE67F5BB0018FEE8C86E1B68C7E75CAA896EB32F1F47C70855836A6D16FCC1466F6D8FBEC67DB89EC0C08B0E996B83538", 16),
+ hsh=sha256(b("sample")).digest(),
+ hash_func=sha256,
+ expected=int("0EDF38AFCAAECAB4383358B34D67C9F2216C8382AAEA44A3DAD5FDC9C32575761793FEF24EB0FC276DFC4F6E3EC476752F043CF01415387470BCBD8678ED2C7E1A0", 16))
+
+ def test_10(self):
+ self._do(
+ generator=NIST521p.generator,
+ secexp=int("0FAD06DAA62BA3B25D2FB40133DA757205DE67F5BB0018FEE8C86E1B68C7E75CAA896EB32F1F47C70855836A6D16FCC1466F6D8FBEC67DB89EC0C08B0E996B83538", 16),
+ hsh=sha512(b("test")).digest(),
+ hash_func=sha512,
+ expected=int("16200813020EC986863BEDFC1B121F605C1215645018AEA1A7B215A564DE9EB1B38A67AA1128B80CE391C4FB71187654AAA3431027BFC7F395766CA988C964DC56D", 16))
+
+
+class ECDH(unittest.TestCase):
+ def _do(self, curve, generator, dA, x_qA, y_qA, dB, x_qB, y_qB, x_Z, y_Z):
+ qA = dA * generator
+ qB = dB * generator
+ Z = dA * qB
+ self.assertEqual(Point(curve, x_qA, y_qA), qA)
+ self.assertEqual(Point(curve, x_qB, y_qB), qB)
+ self.assertTrue((dA * qB) ==
+ (dA * dB * generator) ==
+ (dB * dA * generator) ==
+ (dB * qA))
+ self.assertEqual(Point(curve, x_Z, y_Z), Z)
+
+
+class RFC6932(ECDH):
+ # https://tools.ietf.org/html/rfc6932#appendix-A.1
+
+ def test_brainpoolP224r1(self):
+ self._do(
+ curve=curve_brainpoolp224r1,
+ generator=BRAINPOOLP224r1.generator,
+ dA=int("7C4B7A2C8A4BAD1FBB7D79CC0955DB7C6A4660CA64CC4778159B495E",
+ 16),
+ x_qA=int("B104A67A6F6E85E14EC1825E1539E8ECDBBF584922367DD88C6BDCF2",
+ 16),
+ y_qA=int("46D782E7FDB5F60CD8404301AC5949C58EDB26BC68BA07695B750A94",
+ 16),
+ dB=int("63976D4AAE6CD0F6DD18DEFEF55D96569D0507C03E74D6486FFA28FB",
+ 16),
+ x_qB=int("2A97089A9296147B71B21A4B574E1278245B536F14D8C2B9D07A874E",
+ 16),
+ y_qB=int("9B900D7C77A709A797276B8CA1BA61BB95B546FC29F862E44D59D25B",
+ 16),
+ x_Z=int("312DFD98783F9FB77B9704945A73BEB6DCCBE3B65D0F967DCAB574EB",
+ 16),
+ y_Z=int("6F800811D64114B1C48C621AB3357CF93F496E4238696A2A012B3C98",
+ 16))
+
+ def test_brainpoolP256r1(self):
+ self._do(
+ curve=curve_brainpoolp256r1,
+ generator=BRAINPOOLP256r1.generator,
+ dA=int("041EB8B1E2BC681BCE8E39963B2E9FC415B05283313DD1A8BCC055F11AE"
+ "49699", 16),
+ x_qA=int("78028496B5ECAAB3C8B6C12E45DB1E02C9E4D26B4113BC4F015F60C5C"
+ "CC0D206", 16),
+ y_qA=int("A2AE1762A3831C1D20F03F8D1E3C0C39AFE6F09B4D44BBE80CD100987"
+ "B05F92B", 16),
+ dB=int("06F5240EACDB9837BC96D48274C8AA834B6C87BA9CC3EEDD81F99A16B8D"
+ "804D3", 16),
+ x_qB=int("8E07E219BA588916C5B06AA30A2F464C2F2ACFC1610A3BE2FB240B635"
+ "341F0DB", 16),
+ y_qB=int("148EA1D7D1E7E54B9555B6C9AC90629C18B63BEE5D7AA6949EBBF47B2"
+ "4FDE40D", 16),
+ x_Z=int("05E940915549E9F6A4A75693716E37466ABA79B4BF2919877A16DD2CC2"
+ "E23708", 16),
+ y_Z=int("6BC23B6702BC5A019438CEEA107DAAD8B94232FFBBC350F3B137628FE6"
+ "FD134C", 16))
+
+ def test_brainpoolP384r1(self):
+ self._do(
+ curve=curve_brainpoolp384r1,
+ generator=BRAINPOOLP384r1.generator,
+ dA=int("014EC0755B78594BA47FB0A56F6173045B4331E74BA1A6F47322E70D79D"
+ "828D97E095884CA72B73FDABD5910DF0FA76A", 16),
+ x_qA=int("45CB26E4384DAF6FB776885307B9A38B7AD1B5C692E0C32F012533277"
+ "8F3B8D3F50CA358099B30DEB5EE69A95C058B4E", 16),
+ y_qA=int("8173A1C54AFFA7E781D0E1E1D12C0DC2B74F4DF58E4A4E3AF7026C5D3"
+ "2DC530A2CD89C859BB4B4B768497F49AB8CC859", 16),
+ dB=int("6B461CB79BD0EA519A87D6828815D8CE7CD9B3CAA0B5A8262CBCD550A01"
+ "5C90095B976F3529957506E1224A861711D54", 16),
+ x_qB=int("01BF92A92EE4BE8DED1A911125C209B03F99E3161CFCC986DC7711383"
+ "FC30AF9CE28CA3386D59E2C8D72CE1E7B4666E8", 16),
+ y_qB=int("3289C4A3A4FEE035E39BDB885D509D224A142FF9FBCC5CFE5CCBB3026"
+ "8EE47487ED8044858D31D848F7A95C635A347AC", 16),
+ x_Z=int("04CC4FF3DCCCB07AF24E0ACC529955B36D7C807772B92FCBE48F3AFE9A"
+ "2F370A1F98D3FA73FD0C0747C632E12F1423EC", 16),
+ y_Z=int("7F465F90BD69AFB8F828A214EB9716D66ABC59F17AF7C75EE7F1DE22AB"
+ "5D05085F5A01A9382D05BF72D96698FE3FF64E", 16))
+
+ def test_brainpoolP512r1(self):
+ self._do(
+ curve=curve_brainpoolp512r1,
+ generator=BRAINPOOLP512r1.generator,
+ dA=int("636B6BE0482A6C1C41AA7AE7B245E983392DB94CECEA2660A379CFE1595"
+ "59E357581825391175FC195D28BAC0CF03A7841A383B95C262B98378287"
+ "4CCE6FE333", 16),
+ x_qA=int("0562E68B9AF7CBFD5565C6B16883B777FF11C199161ECC427A39D17EC"
+ "2166499389571D6A994977C56AD8252658BA8A1B72AE42F4FB7532151"
+ "AFC3EF0971CCDA", 16),
+ y_qA=int("A7CA2D8191E21776A89860AFBC1F582FAA308D551C1DC6133AF9F9C3C"
+ "AD59998D70079548140B90B1F311AFB378AA81F51B275B2BE6B7DEE97"
+ "8EFC7343EA642E", 16),
+ dB=int("0AF4E7F6D52EDD52907BB8DBAB3992A0BB696EC10DF11892FF205B66D38"
+ "1ECE72314E6A6EA079CEA06961DBA5AE6422EF2E9EE803A1F236FB96A17"
+ "99B86E5C8B", 16),
+ x_qB=int("5A7954E32663DFF11AE24712D87419F26B708AC2B92877D6BFEE2BFC4"
+ "3714D89BBDB6D24D807BBD3AEB7F0C325F862E8BADE4F74636B97EAAC"
+ "E739E11720D323", 16),
+ y_qB=int("96D14621A9283A1BED84DE8DD64836B2C0758B11441179DC0C54C0D49"
+ "A47C03807D171DD544B72CAAEF7B7CE01C7753E2CAD1A861ECA55A719"
+ "54EE1BA35E04BE", 16),
+ x_Z=int("1EE8321A4BBF93B9CF8921AB209850EC9B7066D1984EF08C2BB7232362"
+ "08AC8F1A483E79461A00E0D5F6921CE9D360502F85C812BEDEE23AC5B2"
+ "10E5811B191E", 16),
+ y_Z=int("2632095B7B936174B41FD2FAF369B1D18DCADEED7E410A7E251F083109"
+ "7C50D02CFED02607B6A2D5ADB4C0006008562208631875B58B54ECDA5A"
+ "4F9FE9EAABA6", 16))
+
+
+class RFC7027(ECDH):
+ # https://tools.ietf.org/html/rfc7027#appendix-A
+
+ def test_brainpoolP256r1(self):
+ self._do(
+ curve=curve_brainpoolp256r1,
+ generator=BRAINPOOLP256r1.generator,
+ dA=int("81DB1EE100150FF2EA338D708271BE38300CB54241D79950F77B0630398"
+ "04F1D", 16),
+ x_qA=int("44106E913F92BC02A1705D9953A8414DB95E1AAA49E81D9E85F929A8E"
+ "3100BE5", 16),
+ y_qA=int("8AB4846F11CACCB73CE49CBDD120F5A900A69FD32C272223F789EF10E"
+ "B089BDC", 16),
+ dB=int("55E40BC41E37E3E2AD25C3C6654511FFA8474A91A0032087593852D3E7D"
+ "76BD3", 16),
+ x_qB=int("8D2D688C6CF93E1160AD04CC4429117DC2C41825E1E9FCA0ADDD34E6F"
+ "1B39F7B", 16),
+ y_qB=int("990C57520812BE512641E47034832106BC7D3E8DD0E4C7F1136D70065"
+ "47CEC6A", 16),
+ x_Z=int("89AFC39D41D3B327814B80940B042590F96556EC91E6AE7939BCE31F3A"
+ "18BF2B", 16),
+ y_Z=int("49C27868F4ECA2179BFD7D59B1E3BF34C1DBDE61AE12931648F43E5963"
+ "2504DE", 16))
+
+ def test_brainpoolP384r1(self):
+ self._do(
+ curve=curve_brainpoolp384r1,
+ generator=BRAINPOOLP384r1.generator,
+ dA=int("1E20F5E048A5886F1F157C74E91BDE2B98C8B52D58E5003D57053FC4B0B"
+ "D65D6F15EB5D1EE1610DF870795143627D042", 16),
+ x_qA=int("68B665DD91C195800650CDD363C625F4E742E8134667B767B1B476793"
+ "588F885AB698C852D4A6E77A252D6380FCAF068", 16),
+ y_qA=int("55BC91A39C9EC01DEE36017B7D673A931236D2F1F5C83942D049E3FA2"
+ "0607493E0D038FF2FD30C2AB67D15C85F7FAA59", 16),
+ dB=int("032640BC6003C59260F7250C3DB58CE647F98E1260ACCE4ACDA3DD869F7"
+ "4E01F8BA5E0324309DB6A9831497ABAC96670", 16),
+ x_qB=int("4D44326F269A597A5B58BBA565DA5556ED7FD9A8A9EB76C25F46DB69D"
+ "19DC8CE6AD18E404B15738B2086DF37E71D1EB4", 16),
+ y_qB=int("62D692136DE56CBE93BF5FA3188EF58BC8A3A0EC6C1E151A21038A42E"
+ "9185329B5B275903D192F8D4E1F32FE9CC78C48", 16),
+ x_Z=int("0BD9D3A7EA0B3D519D09D8E48D0785FB744A6B355E6304BC51C229FBBC"
+ "E239BBADF6403715C35D4FB2A5444F575D4F42", 16),
+ y_Z=int("0DF213417EBE4D8E40A5F76F66C56470C489A3478D146DECF6DF0D94BA"
+ "E9E598157290F8756066975F1DB34B2324B7BD", 16))
+
+ def test_brainpoolP512r1(self):
+ self._do(
+ curve=curve_brainpoolp512r1,
+ generator=BRAINPOOLP512r1.generator,
+ dA=int("16302FF0DBBB5A8D733DAB7141C1B45ACBC8715939677F6A56850A38BD8"
+ "7BD59B09E80279609FF333EB9D4C061231FB26F92EEB04982A5F1D1764C"
+ "AD57665422", 16),
+ x_qA=int("0A420517E406AAC0ACDCE90FCD71487718D3B953EFD7FBEC5F7F27E28"
+ "C6149999397E91E029E06457DB2D3E640668B392C2A7E737A7F0BF044"
+ "36D11640FD09FD", 16),
+ y_qA=int("72E6882E8DB28AAD36237CD25D580DB23783961C8DC52DFA2EC138AD4"
+ "72A0FCEF3887CF62B623B2A87DE5C588301EA3E5FC269B373B60724F5"
+ "E82A6AD147FDE7", 16),
+ dB=int("230E18E1BCC88A362FA54E4EA3902009292F7F8033624FD471B5D8ACE49"
+ "D12CFABBC19963DAB8E2F1EBA00BFFB29E4D72D13F2224562F405CB8050"
+ "3666B25429", 16),
+ x_qB=int("9D45F66DE5D67E2E6DB6E93A59CE0BB48106097FF78A081DE781CDB31"
+ "FCE8CCBAAEA8DD4320C4119F1E9CD437A2EAB3731FA9668AB268D871D"
+ "EDA55A5473199F", 16),
+ y_qB=int("2FDC313095BCDD5FB3A91636F07A959C8E86B5636A1E930E8396049CB"
+ "481961D365CC11453A06C719835475B12CB52FC3C383BCE35E27EF194"
+ "512B71876285FA", 16),
+ x_Z=int("A7927098655F1F9976FA50A9D566865DC530331846381C87256BAF3226"
+ "244B76D36403C024D7BBF0AA0803EAFF405D3D24F11A9B5C0BEF679FE1"
+ "454B21C4CD1F", 16),
+ y_Z=int("7DB71C3DEF63212841C463E881BDCF055523BD368240E6C3143BD8DEF8"
+ "B3B3223B95E0F53082FF5E412F4222537A43DF1C6D25729DDB51620A83"
+ "2BE6A26680A2", 16))
+
+
+# https://tools.ietf.org/html/rfc4754#page-5
+@pytest.mark.parametrize("w, gwx, gwy, k, msg, md, r, s, curve",
+ [pytest.param(
+ "DC51D3866A15BACDE33D96F992FCA99DA7E6EF0934E7097559C27F1614C88A7F",
+ "2442A5CC0ECD015FA3CA31DC8E2BBC70BF42D60CBCA20085E0822CB04235E970",
+ "6FC98BD7E50211A4A27102FA3549DF79EBCB4BF246B80945CDDFE7D509BBFD7D",
+ "9E56F509196784D963D1C0A401510EE7ADA3DCC5DEE04B154BF61AF1D5A6DECE",
+ b"abc",
+ sha256,
+ "CB28E0999B9C7715FD0A80D8E47A77079716CBBF917DD72E97566EA1C066957C",
+ "86FA3BB4E26CAD5BF90B7F81899256CE7594BB1EA0C89212748BFF3B3D5B0315",
+ NIST256p,
+ id="ECDSA-256"),
+ pytest.param(
+ "0BEB646634BA87735D77AE4809A0EBEA865535DE4C1E1DCB692E84708E81A5AF"
+ "62E528C38B2A81B35309668D73524D9F",
+ "96281BF8DD5E0525CA049C048D345D3082968D10FEDF5C5ACA0C64E6465A97EA"
+ "5CE10C9DFEC21797415710721F437922",
+ "447688BA94708EB6E2E4D59F6AB6D7EDFF9301D249FE49C33096655F5D502FAD"
+ "3D383B91C5E7EDAA2B714CC99D5743CA",
+ "B4B74E44D71A13D568003D7489908D564C7761E229C58CBFA18950096EB7463B"
+ "854D7FA992F934D927376285E63414FA",
+ b'abc',
+ sha384,
+ "FB017B914E29149432D8BAC29A514640B46F53DDAB2C69948084E2930F1C8F7E"
+ "08E07C9C63F2D21A07DCB56A6AF56EB3",
+ "B263A1305E057F984D38726A1B46874109F417BCA112674C528262A40A629AF1"
+ "CBB9F516CE0FA7D2FF630863A00E8B9F",
+ NIST384p,
+ id="ECDSA-384"),
+ pytest.param(
+ "0065FDA3409451DCAB0A0EAD45495112A3D813C17BFD34BDF8C1209D7DF58491"
+ "20597779060A7FF9D704ADF78B570FFAD6F062E95C7E0C5D5481C5B153B48B37"
+ "5FA1",
+ "0151518F1AF0F563517EDD5485190DF95A4BF57B5CBA4CF2A9A3F6474725A35F"
+ "7AFE0A6DDEB8BEDBCD6A197E592D40188901CECD650699C9B5E456AEA5ADD190"
+ "52A8",
+ "006F3B142EA1BFFF7E2837AD44C9E4FF6D2D34C73184BBAD90026DD5E6E85317"
+ "D9DF45CAD7803C6C20035B2F3FF63AFF4E1BA64D1C077577DA3F4286C58F0AEA"
+ "E643",
+ "00C1C2B305419F5A41344D7E4359933D734096F556197A9B244342B8B62F46F9"
+ "373778F9DE6B6497B1EF825FF24F42F9B4A4BD7382CFC3378A540B1B7F0C1B95"
+ "6C2F",
+ b'abc',
+ sha512,
+ "0154FD3836AF92D0DCA57DD5341D3053988534FDE8318FC6AAAAB68E2E6F4339"
+ "B19F2F281A7E0B22C269D93CF8794A9278880ED7DBB8D9362CAEACEE54432055"
+ "2251",
+ "017705A7030290D1CEB605A9A1BB03FF9CDD521E87A696EC926C8C10C8362DF4"
+ "975367101F67D1CF9BCCBF2F3D239534FA509E70AAC851AE01AAC68D62F86647"
+ "2660",
+ NIST521p,
+ id="ECDSA-521")
+ ])
+def test_RFC4754_vectors(w, gwx, gwy, k, msg, md, r, s, curve):
+ sk = SigningKey.from_string(unhexlify(w), curve)
+ vk = VerifyingKey.from_string(unhexlify(gwx + gwy), curve)
+ assert sk.verifying_key == vk
+ sig = sk.sign(msg, hashfunc=md, sigencode=sigencode_strings, k=int(k, 16))
+
+ assert sig == (unhexlify(r), unhexlify(s))
+
+ assert vk.verify(sig, msg, md, sigdecode_strings)
diff --git a/third_party/python/ecdsa/ecdsa/test_rw_lock.py b/third_party/python/ecdsa/ecdsa/test_rw_lock.py
new file mode 100644
index 0000000000..de11d15622
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/test_rw_lock.py
@@ -0,0 +1,175 @@
+# Copyright Mateusz Kobos, (c) 2011
+# https://code.activestate.com/recipes/577803-reader-writer-lock-with-priority-for-writers/
+# released under the MIT licence
+
+import unittest
+import threading
+import time
+import copy
+from ._rwlock import RWLock
+
+
+class Writer(threading.Thread):
+ def __init__(self, buffer_, rw_lock, init_sleep_time, sleep_time, to_write):
+ """
+ @param buffer_: common buffer_ shared by the readers and writers
+ @type buffer_: list
+ @type rw_lock: L{RWLock}
+ @param init_sleep_time: sleep time before doing any action
+ @type init_sleep_time: C{float}
+ @param sleep_time: sleep time while in critical section
+ @type sleep_time: C{float}
+ @param to_write: data that will be appended to the buffer
+ """
+ threading.Thread.__init__(self)
+ self.__buffer = buffer_
+ self.__rw_lock = rw_lock
+ self.__init_sleep_time = init_sleep_time
+ self.__sleep_time = sleep_time
+ self.__to_write = to_write
+ self.entry_time = None
+ """Time of entry to the critical section"""
+ self.exit_time = None
+ """Time of exit from the critical section"""
+
+ def run(self):
+ time.sleep(self.__init_sleep_time)
+ self.__rw_lock.writer_acquire()
+ self.entry_time = time.time()
+ time.sleep(self.__sleep_time)
+ self.__buffer.append(self.__to_write)
+ self.exit_time = time.time()
+ self.__rw_lock.writer_release()
+
+
+class Reader(threading.Thread):
+ def __init__(self, buffer_, rw_lock, init_sleep_time, sleep_time):
+ """
+ @param buffer_: common buffer shared by the readers and writers
+ @type buffer_: list
+ @type rw_lock: L{RWLock}
+ @param init_sleep_time: sleep time before doing any action
+ @type init_sleep_time: C{float}
+ @param sleep_time: sleep time while in critical section
+ @type sleep_time: C{float}
+ """
+ threading.Thread.__init__(self)
+ self.__buffer = buffer_
+ self.__rw_lock = rw_lock
+ self.__init_sleep_time = init_sleep_time
+ self.__sleep_time = sleep_time
+ self.buffer_read = None
+ """a copy of a the buffer read while in critical section"""
+ self.entry_time = None
+ """Time of entry to the critical section"""
+ self.exit_time = None
+ """Time of exit from the critical section"""
+
+ def run(self):
+ time.sleep(self.__init_sleep_time)
+ self.__rw_lock.reader_acquire()
+ self.entry_time = time.time()
+ time.sleep(self.__sleep_time)
+ self.buffer_read = copy.deepcopy(self.__buffer)
+ self.exit_time = time.time()
+ self.__rw_lock.reader_release()
+
+
+class RWLockTestCase(unittest.TestCase):
+ def test_readers_nonexclusive_access(self):
+ (buffer_, rw_lock, threads) = self.__init_variables()
+
+ threads.append(Reader(buffer_, rw_lock, 0, 0))
+ threads.append(Writer(buffer_, rw_lock, 0.2, 0.4, 1))
+ threads.append(Reader(buffer_, rw_lock, 0.3, 0.3))
+ threads.append(Reader(buffer_, rw_lock, 0.5, 0))
+
+ self.__start_and_join_threads(threads)
+
+ ## The third reader should enter after the second one but it should
+ ## exit before the second one exits
+ ## (i.e. the readers should be in the critical section
+ ## at the same time)
+
+ self.assertEqual([], threads[0].buffer_read)
+ self.assertEqual([1], threads[2].buffer_read)
+ self.assertEqual([1], threads[3].buffer_read)
+ self.assert_(threads[1].exit_time <= threads[2].entry_time)
+ self.assert_(threads[2].entry_time <= threads[3].entry_time)
+ self.assert_(threads[3].exit_time < threads[2].exit_time)
+
+ def test_writers_exclusive_access(self):
+ (buffer_, rw_lock, threads) = self.__init_variables()
+
+ threads.append(Writer(buffer_, rw_lock, 0, 0.4, 1))
+ threads.append(Writer(buffer_, rw_lock, 0.1, 0, 2))
+ threads.append(Reader(buffer_, rw_lock, 0.2, 0))
+
+ self.__start_and_join_threads(threads)
+
+ ## The second writer should wait for the first one to exit
+
+ self.assertEqual([1, 2], threads[2].buffer_read)
+ self.assert_(threads[0].exit_time <= threads[1].entry_time)
+ self.assert_(threads[1].exit_time <= threads[2].exit_time)
+
+ def test_writer_priority(self):
+ (buffer_, rw_lock, threads) = self.__init_variables()
+
+ threads.append(Writer(buffer_, rw_lock, 0, 0, 1))
+ threads.append(Reader(buffer_, rw_lock, 0.1, 0.4))
+ threads.append(Writer(buffer_, rw_lock, 0.2, 0, 2))
+ threads.append(Reader(buffer_, rw_lock, 0.3, 0))
+ threads.append(Reader(buffer_, rw_lock, 0.3, 0))
+
+ self.__start_and_join_threads(threads)
+
+ ## The second writer should go before the second and the third reader
+
+ self.assertEqual([1], threads[1].buffer_read)
+ self.assertEqual([1, 2], threads[3].buffer_read)
+ self.assertEqual([1, 2], threads[4].buffer_read)
+ self.assert_(threads[0].exit_time < threads[1].entry_time)
+ self.assert_(threads[1].exit_time <= threads[2].entry_time)
+ self.assert_(threads[2].exit_time <= threads[3].entry_time)
+ self.assert_(threads[2].exit_time <= threads[4].entry_time)
+
+ def test_many_writers_priority(self):
+ (buffer_, rw_lock, threads) = self.__init_variables()
+
+ threads.append(Writer(buffer_, rw_lock, 0, 0, 1))
+ threads.append(Reader(buffer_, rw_lock, 0.1, 0.6))
+ threads.append(Writer(buffer_, rw_lock, 0.2, 0.1, 2))
+ threads.append(Reader(buffer_, rw_lock, 0.3, 0))
+ threads.append(Reader(buffer_, rw_lock, 0.4, 0))
+ threads.append(Writer(buffer_, rw_lock, 0.5, 0.1, 3))
+
+ self.__start_and_join_threads(threads)
+
+ ## The two last writers should go first -- after the first reader and
+ ## before the second and the third reader
+
+ self.assertEqual([1], threads[1].buffer_read)
+ self.assertEqual([1, 2, 3], threads[3].buffer_read)
+ self.assertEqual([1, 2, 3], threads[4].buffer_read)
+ self.assert_(threads[0].exit_time < threads[1].entry_time)
+ self.assert_(threads[1].exit_time <= threads[2].entry_time)
+ self.assert_(threads[1].exit_time <= threads[5].entry_time)
+ self.assert_(threads[2].exit_time <= threads[3].entry_time)
+ self.assert_(threads[2].exit_time <= threads[4].entry_time)
+ self.assert_(threads[5].exit_time <= threads[3].entry_time)
+ self.assert_(threads[5].exit_time <= threads[4].entry_time)
+
+ @staticmethod
+ def __init_variables():
+ buffer_ = []
+ rw_lock = RWLock()
+ threads = []
+ return (buffer_, rw_lock, threads)
+
+ @staticmethod
+ def __start_and_join_threads(threads):
+ for t in threads:
+ t.start()
+ for t in threads:
+ t.join()
diff --git a/third_party/python/ecdsa/ecdsa/util.py b/third_party/python/ecdsa/ecdsa/util.py
new file mode 100644
index 0000000000..5f1c7500b6
--- /dev/null
+++ b/third_party/python/ecdsa/ecdsa/util.py
@@ -0,0 +1,401 @@
+from __future__ import division
+
+import os
+import math
+import binascii
+import sys
+from hashlib import sha256
+from six import PY3, int2byte, b, next
+from . import der
+from ._compat import normalise_bytes
+
+# RFC5480:
+# The "unrestricted" algorithm identifier is:
+# id-ecPublicKey OBJECT IDENTIFIER ::= {
+# iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 }
+
+oid_ecPublicKey = (1, 2, 840, 10045, 2, 1)
+encoded_oid_ecPublicKey = der.encode_oid(*oid_ecPublicKey)
+
+if sys.version > '3':
+ def entropy_to_bits(ent_256):
+ """Convert a bytestring to string of 0's and 1's"""
+ return bin(int.from_bytes(ent_256, 'big'))[2:].zfill(len(ent_256)*8)
+else:
+ def entropy_to_bits(ent_256):
+ """Convert a bytestring to string of 0's and 1's"""
+ return ''.join(bin(ord(x))[2:].zfill(8) for x in ent_256)
+
+
+if sys.version < '2.7':
+ # Can't add a method to a built-in type so we are stuck with this
+ def bit_length(x):
+ return len(bin(x)) - 2
+else:
+ def bit_length(x):
+ return x.bit_length() or 1
+
+
+def orderlen(order):
+ return (1+len("%x" % order))//2 # bytes
+
+
+def randrange(order, entropy=None):
+ """Return a random integer k such that 1 <= k < order, uniformly
+ distributed across that range. Worst case should be a mean of 2 loops at
+ (2**k)+2.
+
+ Note that this function is not declared to be forwards-compatible: we may
+ change the behavior in future releases. The entropy= argument (which
+ should get a callable that behaves like os.urandom) can be used to
+ achieve stability within a given release (for repeatable unit tests), but
+ should not be used as a long-term-compatible key generation algorithm.
+ """
+ assert order > 1
+ if entropy is None:
+ entropy = os.urandom
+ upper_2 = bit_length(order-2)
+ upper_256 = upper_2//8 + 1
+ while True: # I don't think this needs a counter with bit-wise randrange
+ ent_256 = entropy(upper_256)
+ ent_2 = entropy_to_bits(ent_256)
+ rand_num = int(ent_2[:upper_2], base=2) + 1
+ if 0 < rand_num < order:
+ return rand_num
+
+
+class PRNG:
+ # this returns a callable which, when invoked with an integer N, will
+ # return N pseudorandom bytes. Note: this is a short-term PRNG, meant
+ # primarily for the needs of randrange_from_seed__trytryagain(), which
+ # only needs to run it a few times per seed. It does not provide
+ # protection against state compromise (forward security).
+ def __init__(self, seed):
+ self.generator = self.block_generator(seed)
+
+ def __call__(self, numbytes):
+ a = [next(self.generator) for i in range(numbytes)]
+
+ if PY3:
+ return bytes(a)
+ else:
+ return "".join(a)
+
+ def block_generator(self, seed):
+ counter = 0
+ while True:
+ for byte in sha256(("prng-%d-%s" % (counter, seed)).encode()).digest():
+ yield byte
+ counter += 1
+
+
+def randrange_from_seed__overshoot_modulo(seed, order):
+ # hash the data, then turn the digest into a number in [1,order).
+ #
+ # We use David-Sarah Hopwood's suggestion: turn it into a number that's
+ # sufficiently larger than the group order, then modulo it down to fit.
+ # This should give adequate (but not perfect) uniformity, and simple
+ # code. There are other choices: try-try-again is the main one.
+ base = PRNG(seed)(2 * orderlen(order))
+ number = (int(binascii.hexlify(base), 16) % (order - 1)) + 1
+ assert 1 <= number < order, (1, number, order)
+ return number
+
+
+def lsb_of_ones(numbits):
+ return (1 << numbits) - 1
+
+
+def bits_and_bytes(order):
+ bits = int(math.log(order - 1, 2) + 1)
+ bytes = bits // 8
+ extrabits = bits % 8
+ return bits, bytes, extrabits
+
+
+# the following randrange_from_seed__METHOD() functions take an
+# arbitrarily-sized secret seed and turn it into a number that obeys the same
+# range limits as randrange() above. They are meant for deriving consistent
+# signing keys from a secret rather than generating them randomly, for
+# example a protocol in which three signing keys are derived from a master
+# secret. You should use a uniformly-distributed unguessable seed with about
+# curve.baselen bytes of entropy. To use one, do this:
+# seed = os.urandom(curve.baselen) # or other starting point
+# secexp = ecdsa.util.randrange_from_seed__trytryagain(sed, curve.order)
+# sk = SigningKey.from_secret_exponent(secexp, curve)
+
+def randrange_from_seed__truncate_bytes(seed, order, hashmod=sha256):
+ # hash the seed, then turn the digest into a number in [1,order), but
+ # don't worry about trying to uniformly fill the range. This will lose,
+ # on average, four bits of entropy.
+ bits, _bytes, extrabits = bits_and_bytes(order)
+ if extrabits:
+ _bytes += 1
+ base = hashmod(seed).digest()[:_bytes]
+ base = "\x00" * (_bytes - len(base)) + base
+ number = 1 + int(binascii.hexlify(base), 16)
+ assert 1 <= number < order
+ return number
+
+
+def randrange_from_seed__truncate_bits(seed, order, hashmod=sha256):
+ # like string_to_randrange_truncate_bytes, but only lose an average of
+ # half a bit
+ bits = int(math.log(order - 1, 2) + 1)
+ maxbytes = (bits + 7) // 8
+ base = hashmod(seed).digest()[:maxbytes]
+ base = "\x00" * (maxbytes - len(base)) + base
+ topbits = 8 * maxbytes - bits
+ if topbits:
+ base = int2byte(ord(base[0]) & lsb_of_ones(topbits)) + base[1:]
+ number = 1 + int(binascii.hexlify(base), 16)
+ assert 1 <= number < order
+ return number
+
+
+def randrange_from_seed__trytryagain(seed, order):
+ # figure out exactly how many bits we need (rounded up to the nearest
+ # bit), so we can reduce the chance of looping to less than 0.5 . This is
+ # specified to feed from a byte-oriented PRNG, and discards the
+ # high-order bits of the first byte as necessary to get the right number
+ # of bits. The average number of loops will range from 1.0 (when
+ # order=2**k-1) to 2.0 (when order=2**k+1).
+ assert order > 1
+ bits, bytes, extrabits = bits_and_bytes(order)
+ generate = PRNG(seed)
+ while True:
+ extrabyte = b("")
+ if extrabits:
+ extrabyte = int2byte(ord(generate(1)) & lsb_of_ones(extrabits))
+ guess = string_to_number(extrabyte + generate(bytes)) + 1
+ if 1 <= guess < order:
+ return guess
+
+
+def number_to_string(num, order):
+ l = orderlen(order)
+ fmt_str = "%0" + str(2 * l) + "x"
+ string = binascii.unhexlify((fmt_str % num).encode())
+ assert len(string) == l, (len(string), l)
+ return string
+
+
+def number_to_string_crop(num, order):
+ l = orderlen(order)
+ fmt_str = "%0" + str(2 * l) + "x"
+ string = binascii.unhexlify((fmt_str % num).encode())
+ return string[:l]
+
+
+def string_to_number(string):
+ return int(binascii.hexlify(string), 16)
+
+
+def string_to_number_fixedlen(string, order):
+ l = orderlen(order)
+ assert len(string) == l, (len(string), l)
+ return int(binascii.hexlify(string), 16)
+
+
+# these methods are useful for the sigencode= argument to SK.sign() and the
+# sigdecode= argument to VK.verify(), and control how the signature is packed
+# or unpacked.
+
+def sigencode_strings(r, s, order):
+ r_str = number_to_string(r, order)
+ s_str = number_to_string(s, order)
+ return (r_str, s_str)
+
+
+def sigencode_string(r, s, order):
+ """
+ Encode the signature to raw format (:term:`raw encoding`)
+
+ It's expected that this function will be used as a `sigencode=` parameter
+ in :func:`ecdsa.keys.SigningKey.sign` method.
+
+ :param int r: first parameter of the signature
+ :param int s: second parameter of the signature
+ :param int order: the order of the curve over which the signature was
+ computed
+
+ :return: raw encoding of ECDSA signature
+ :rtype: bytes
+ """
+ # for any given curve, the size of the signature numbers is
+ # fixed, so just use simple concatenation
+ r_str, s_str = sigencode_strings(r, s, order)
+ return r_str + s_str
+
+
+def sigencode_der(r, s, order):
+ """
+ Encode the signature into the ECDSA-Sig-Value structure using :term:`DER`.
+
+ Encodes the signature to the following :term:`ASN.1` structure::
+
+ Ecdsa-Sig-Value ::= SEQUENCE {
+ r INTEGER,
+ s INTEGER
+ }
+
+ It's expected that this function will be used as a `sigencode=` parameter
+ in :func:`ecdsa.keys.SigningKey.sign` method.
+
+ :param int r: first parameter of the signature
+ :param int s: second parameter of the signature
+ :param int order: the order of the curve over which the signature was
+ computed
+
+ :return: DER encoding of ECDSA signature
+ :rtype: bytes
+ """
+ return der.encode_sequence(der.encode_integer(r), der.encode_integer(s))
+
+
+# canonical versions of sigencode methods
+# these enforce low S values, by negating the value (modulo the order) if above order/2
+# see CECKey::Sign() https://github.com/bitcoin/bitcoin/blob/master/src/key.cpp#L214
+def sigencode_strings_canonize(r, s, order):
+ if s > order / 2:
+ s = order - s
+ return sigencode_strings(r, s, order)
+
+
+def sigencode_string_canonize(r, s, order):
+ if s > order / 2:
+ s = order - s
+ return sigencode_string(r, s, order)
+
+
+def sigencode_der_canonize(r, s, order):
+ if s > order / 2:
+ s = order - s
+ return sigencode_der(r, s, order)
+
+
+class MalformedSignature(Exception):
+ """
+ Raised by decoding functions when the signature is malformed.
+
+ Malformed in this context means that the relevant strings or integers
+ do not match what a signature over provided curve would create. Either
+ because the byte strings have incorrect lengths or because the encoded
+ values are too large.
+ """
+
+ pass
+
+
+def sigdecode_string(signature, order):
+ """
+ Decoder for :term:`raw encoding` of ECDSA signatures.
+
+ raw encoding is a simple concatenation of the two integers that comprise
+ the signature, with each encoded using the same amount of bytes depending
+ on curve size/order.
+
+ It's expected that this function will be used as the `sigdecode=`
+ parameter to the :func:`ecdsa.keys.VerifyingKey.verify` method.
+
+ :param signature: encoded signature
+ :type signature: bytes like object
+ :param order: order of the curve over which the signature was computed
+ :type order: int
+
+ :raises MalformedSignature: when the encoding of the signature is invalid
+
+ :return: tuple with decoded 'r' and 's' values of signature
+ :rtype: tuple of ints
+ """
+ signature = normalise_bytes(signature)
+ l = orderlen(order)
+ if not len(signature) == 2 * l:
+ raise MalformedSignature(
+ "Invalid length of signature, expected {0} bytes long, "
+ "provided string is {1} bytes long"
+ .format(2 * l, len(signature)))
+ r = string_to_number_fixedlen(signature[:l], order)
+ s = string_to_number_fixedlen(signature[l:], order)
+ return r, s
+
+
+def sigdecode_strings(rs_strings, order):
+ """
+ Decode the signature from two strings.
+
+ First string needs to be a big endian encoding of 'r', second needs to
+ be a big endian encoding of the 's' parameter of an ECDSA signature.
+
+ It's expected that this function will be used as the `sigdecode=`
+ parameter to the :func:`ecdsa.keys.VerifyingKey.verify` method.
+
+ :param list rs_strings: list of two bytes-like objects, each encoding one
+ parameter of signature
+ :param int order: order of the curve over which the signature was computed
+
+ :raises MalformedSignature: when the encoding of the signature is invalid
+
+ :return: tuple with decoded 'r' and 's' values of signature
+ :rtype: tuple of ints
+ """
+ if not len(rs_strings) == 2:
+ raise MalformedSignature(
+ "Invalid number of strings provided: {0}, expected 2"
+ .format(len(rs_strings)))
+ (r_str, s_str) = rs_strings
+ r_str = normalise_bytes(r_str)
+ s_str = normalise_bytes(s_str)
+ l = orderlen(order)
+ if not len(r_str) == l:
+ raise MalformedSignature(
+ "Invalid length of first string ('r' parameter), "
+ "expected {0} bytes long, provided string is {1} bytes long"
+ .format(l, len(r_str)))
+ if not len(s_str) == l:
+ raise MalformedSignature(
+ "Invalid length of second string ('s' parameter), "
+ "expected {0} bytes long, provided string is {1} bytes long"
+ .format(l, len(s_str)))
+ r = string_to_number_fixedlen(r_str, order)
+ s = string_to_number_fixedlen(s_str, order)
+ return r, s
+
+
+def sigdecode_der(sig_der, order):
+ """
+ Decoder for DER format of ECDSA signatures.
+
+ DER format of signature is one that uses the :term:`ASN.1` :term:`DER`
+ rules to encode it as a sequence of two integers::
+
+ Ecdsa-Sig-Value ::= SEQUENCE {
+ r INTEGER,
+ s INTEGER
+ }
+
+ It's expected that this function will be used as as the `sigdecode=`
+ parameter to the :func:`ecdsa.keys.VerifyingKey.verify` method.
+
+ :param sig_der: encoded signature
+ :type sig_der: bytes like object
+ :param order: order of the curve over which the signature was computed
+ :type order: int
+
+ :raises UnexpectedDER: when the encoding of signature is invalid
+
+ :return: tuple with decoded 'r' and 's' values of signature
+ :rtype: tuple of ints
+ """
+ sig_der = normalise_bytes(sig_der)
+ # return der.encode_sequence(der.encode_integer(r), der.encode_integer(s))
+ rs_strings, empty = der.remove_sequence(sig_der)
+ if empty != b"":
+ raise der.UnexpectedDER("trailing junk after DER sig: %s" %
+ binascii.hexlify(empty))
+ r, rest = der.remove_integer(rs_strings)
+ s, empty = der.remove_integer(rest)
+ if empty != b"":
+ raise der.UnexpectedDER("trailing junk after DER numbers: %s" %
+ binascii.hexlify(empty))
+ return r, s
diff --git a/third_party/python/esprima/PKG-INFO b/third_party/python/esprima/PKG-INFO
new file mode 100644
index 0000000000..c2fee6ace6
--- /dev/null
+++ b/third_party/python/esprima/PKG-INFO
@@ -0,0 +1,143 @@
+Metadata-Version: 1.1
+Name: esprima
+Version: 4.0.1
+Summary: ECMAScript parsing infrastructure for multipurpose analysis in Python
+Home-page: https://github.com/Kronuz/esprima-python
+Author: German M. Bravo (Kronuz)
+Author-email: german.mb@gmail.com
+License: BSD License
+Description: |Donate| |PyPI Version| |PyPI License| |PyPI Format| |PyPI Status|
+
+ **Esprima** (`esprima.org <http://esprima.org>`__, BSD license) is a
+ high performance, standard-compliant
+ `ECMAScript <http://www.ecma-international.org/publications/standards/Ecma-262.htm>`__
+ parser officially written in ECMAScript (also popularly known as
+ `JavaScript <https://en.wikipedia.org/wiki/JavaScript>`__) and ported to
+ Python. Esprima is created and maintained by `Ariya
+ Hidayat <https://twitter.com/ariyahidayat>`__, with the help of `many
+ contributors <https://github.com/jquery/esprima/contributors>`__.
+
+ Python port is a line-by-line manual translation and was created and is
+ maintained by `German Mendez Bravo
+ (Kronuz) <https://twitter.com/germbravo>`__.
+
+ Features
+ ~~~~~~~~
+
+ - Full support for ECMAScript 2017 (`ECMA-262 8th
+ Edition <http://www.ecma-international.org/publications/standards/Ecma-262.htm>`__)
+ - Sensible `syntax tree
+ format <https://github.com/estree/estree/blob/master/es5.md>`__ as
+ standardized by `ESTree project <https://github.com/estree/estree>`__
+ - Experimental support for `JSX <https://facebook.github.io/jsx/>`__, a
+ syntax extension for `React <https://facebook.github.io/react/>`__
+ - Optional tracking of syntax node location (index-based and
+ line-column)
+ - `Heavily tested <http://esprima.org/test/ci.html>`__ (~1500 `unit
+ tests <https://github.com/jquery/esprima/tree/master/test/fixtures>`__
+ with `full code
+ coverage <https://codecov.io/github/jquery/esprima>`__)
+
+ Installation
+ ~~~~~~~~~~~~
+
+ .. code:: shell
+
+ pip install esprima
+
+ API
+ ~~~
+
+ Esprima can be used to perform `lexical
+ analysis <https://en.wikipedia.org/wiki/Lexical_analysis>`__
+ (tokenization) or `syntactic
+ analysis <https://en.wikipedia.org/wiki/Parsing>`__ (parsing) of a
+ JavaScript program.
+
+ A simple example:
+
+ .. code:: javascript
+
+ >>> import esprima
+ >>> program = 'const answer = 42'
+
+ >>> esprima.tokenize(program)
+ [{
+ type: "Keyword",
+ value: "const"
+ }, {
+ type: "Identifier",
+ value: "answer"
+ }, {
+ type: "Punctuator",
+ value: "="
+ }, {
+ type: "Numeric",
+ value: "42"
+ }]
+
+ >>> esprima.parseScript(program)
+ {
+ body: [
+ {
+ kind: "const",
+ declarations: [
+ {
+ init: {
+ raw: "42",
+ type: "Literal",
+ value: 42
+ },
+ type: "VariableDeclarator",
+ id: {
+ type: "Identifier",
+ name: "answer"
+ }
+ }
+ ],
+ type: "VariableDeclaration"
+ }
+ ],
+ type: "Program",
+ sourceType: "script"
+ }
+
+ For more information, please read the `complete
+ documentation <http://esprima.org/doc>`__.
+
+ .. |Donate| image:: https://img.shields.io/badge/Donate-PayPal-green.svg
+ :target: https://www.paypal.me/Kronuz/25
+ .. |PyPI Version| image:: https://img.shields.io/pypi/v/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+ .. |PyPI License| image:: https://img.shields.io/pypi/l/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+ .. |PyPI Wheel| image:: https://img.shields.io/pypi/wheel/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+ .. |PyPI Format| image:: https://img.shields.io/pypi/format/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+ .. |PyPI Python Version| image:: https://img.shields.io/pypi/pyversions/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+ .. |PyPI Implementation| image:: https://img.shields.io/pypi/implementation/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+ .. |PyPI Status| image:: https://img.shields.io/pypi/status/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+ .. |PyPI Downloads| image:: https://img.shields.io/pypi/dm/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+Keywords: esprima ecmascript javascript parser ast
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Topic :: Software Development :: Code Generators
+Classifier: Topic :: Software Development :: Compilers
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: General
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
diff --git a/third_party/python/esprima/README b/third_party/python/esprima/README
new file mode 100644
index 0000000000..442fbc7b11
--- /dev/null
+++ b/third_party/python/esprima/README
@@ -0,0 +1,117 @@
+|Donate| |PyPI Version| |PyPI License| |PyPI Format| |PyPI Status|
+
+**Esprima** (`esprima.org <http://esprima.org>`__, BSD license) is a
+high performance, standard-compliant
+`ECMAScript <http://www.ecma-international.org/publications/standards/Ecma-262.htm>`__
+parser officially written in ECMAScript (also popularly known as
+`JavaScript <https://en.wikipedia.org/wiki/JavaScript>`__) and ported to
+Python. Esprima is created and maintained by `Ariya
+Hidayat <https://twitter.com/ariyahidayat>`__, with the help of `many
+contributors <https://github.com/jquery/esprima/contributors>`__.
+
+Python port is a line-by-line manual translation and was created and is
+maintained by `German Mendez Bravo
+(Kronuz) <https://twitter.com/germbravo>`__.
+
+Features
+~~~~~~~~
+
+- Full support for ECMAScript 2017 (`ECMA-262 8th
+ Edition <http://www.ecma-international.org/publications/standards/Ecma-262.htm>`__)
+- Sensible `syntax tree
+ format <https://github.com/estree/estree/blob/master/es5.md>`__ as
+ standardized by `ESTree project <https://github.com/estree/estree>`__
+- Experimental support for `JSX <https://facebook.github.io/jsx/>`__, a
+ syntax extension for `React <https://facebook.github.io/react/>`__
+- Optional tracking of syntax node location (index-based and
+ line-column)
+- `Heavily tested <http://esprima.org/test/ci.html>`__ (~1500 `unit
+ tests <https://github.com/jquery/esprima/tree/master/test/fixtures>`__
+ with `full code
+ coverage <https://codecov.io/github/jquery/esprima>`__)
+
+Installation
+~~~~~~~~~~~~
+
+.. code:: shell
+
+ pip install esprima
+
+API
+~~~
+
+Esprima can be used to perform `lexical
+analysis <https://en.wikipedia.org/wiki/Lexical_analysis>`__
+(tokenization) or `syntactic
+analysis <https://en.wikipedia.org/wiki/Parsing>`__ (parsing) of a
+JavaScript program.
+
+A simple example:
+
+.. code:: javascript
+
+ >>> import esprima
+ >>> program = 'const answer = 42'
+
+ >>> esprima.tokenize(program)
+ [{
+ type: "Keyword",
+ value: "const"
+ }, {
+ type: "Identifier",
+ value: "answer"
+ }, {
+ type: "Punctuator",
+ value: "="
+ }, {
+ type: "Numeric",
+ value: "42"
+ }]
+
+ >>> esprima.parseScript(program)
+ {
+ body: [
+ {
+ kind: "const",
+ declarations: [
+ {
+ init: {
+ raw: "42",
+ type: "Literal",
+ value: 42
+ },
+ type: "VariableDeclarator",
+ id: {
+ type: "Identifier",
+ name: "answer"
+ }
+ }
+ ],
+ type: "VariableDeclaration"
+ }
+ ],
+ type: "Program",
+ sourceType: "script"
+ }
+
+For more information, please read the `complete
+documentation <http://esprima.org/doc>`__.
+
+.. |Donate| image:: https://img.shields.io/badge/Donate-PayPal-green.svg
+ :target: https://www.paypal.me/Kronuz/25
+.. |PyPI Version| image:: https://img.shields.io/pypi/v/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+.. |PyPI License| image:: https://img.shields.io/pypi/l/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+.. |PyPI Wheel| image:: https://img.shields.io/pypi/wheel/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+.. |PyPI Format| image:: https://img.shields.io/pypi/format/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+.. |PyPI Python Version| image:: https://img.shields.io/pypi/pyversions/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+.. |PyPI Implementation| image:: https://img.shields.io/pypi/implementation/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+.. |PyPI Status| image:: https://img.shields.io/pypi/status/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+.. |PyPI Downloads| image:: https://img.shields.io/pypi/dm/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
diff --git a/third_party/python/esprima/esprima.egg-info/PKG-INFO b/third_party/python/esprima/esprima.egg-info/PKG-INFO
new file mode 100644
index 0000000000..c2fee6ace6
--- /dev/null
+++ b/third_party/python/esprima/esprima.egg-info/PKG-INFO
@@ -0,0 +1,143 @@
+Metadata-Version: 1.1
+Name: esprima
+Version: 4.0.1
+Summary: ECMAScript parsing infrastructure for multipurpose analysis in Python
+Home-page: https://github.com/Kronuz/esprima-python
+Author: German M. Bravo (Kronuz)
+Author-email: german.mb@gmail.com
+License: BSD License
+Description: |Donate| |PyPI Version| |PyPI License| |PyPI Format| |PyPI Status|
+
+ **Esprima** (`esprima.org <http://esprima.org>`__, BSD license) is a
+ high performance, standard-compliant
+ `ECMAScript <http://www.ecma-international.org/publications/standards/Ecma-262.htm>`__
+ parser officially written in ECMAScript (also popularly known as
+ `JavaScript <https://en.wikipedia.org/wiki/JavaScript>`__) and ported to
+ Python. Esprima is created and maintained by `Ariya
+ Hidayat <https://twitter.com/ariyahidayat>`__, with the help of `many
+ contributors <https://github.com/jquery/esprima/contributors>`__.
+
+ Python port is a line-by-line manual translation and was created and is
+ maintained by `German Mendez Bravo
+ (Kronuz) <https://twitter.com/germbravo>`__.
+
+ Features
+ ~~~~~~~~
+
+ - Full support for ECMAScript 2017 (`ECMA-262 8th
+ Edition <http://www.ecma-international.org/publications/standards/Ecma-262.htm>`__)
+ - Sensible `syntax tree
+ format <https://github.com/estree/estree/blob/master/es5.md>`__ as
+ standardized by `ESTree project <https://github.com/estree/estree>`__
+ - Experimental support for `JSX <https://facebook.github.io/jsx/>`__, a
+ syntax extension for `React <https://facebook.github.io/react/>`__
+ - Optional tracking of syntax node location (index-based and
+ line-column)
+ - `Heavily tested <http://esprima.org/test/ci.html>`__ (~1500 `unit
+ tests <https://github.com/jquery/esprima/tree/master/test/fixtures>`__
+ with `full code
+ coverage <https://codecov.io/github/jquery/esprima>`__)
+
+ Installation
+ ~~~~~~~~~~~~
+
+ .. code:: shell
+
+ pip install esprima
+
+ API
+ ~~~
+
+ Esprima can be used to perform `lexical
+ analysis <https://en.wikipedia.org/wiki/Lexical_analysis>`__
+ (tokenization) or `syntactic
+ analysis <https://en.wikipedia.org/wiki/Parsing>`__ (parsing) of a
+ JavaScript program.
+
+ A simple example:
+
+ .. code:: javascript
+
+ >>> import esprima
+ >>> program = 'const answer = 42'
+
+ >>> esprima.tokenize(program)
+ [{
+ type: "Keyword",
+ value: "const"
+ }, {
+ type: "Identifier",
+ value: "answer"
+ }, {
+ type: "Punctuator",
+ value: "="
+ }, {
+ type: "Numeric",
+ value: "42"
+ }]
+
+ >>> esprima.parseScript(program)
+ {
+ body: [
+ {
+ kind: "const",
+ declarations: [
+ {
+ init: {
+ raw: "42",
+ type: "Literal",
+ value: 42
+ },
+ type: "VariableDeclarator",
+ id: {
+ type: "Identifier",
+ name: "answer"
+ }
+ }
+ ],
+ type: "VariableDeclaration"
+ }
+ ],
+ type: "Program",
+ sourceType: "script"
+ }
+
+ For more information, please read the `complete
+ documentation <http://esprima.org/doc>`__.
+
+ .. |Donate| image:: https://img.shields.io/badge/Donate-PayPal-green.svg
+ :target: https://www.paypal.me/Kronuz/25
+ .. |PyPI Version| image:: https://img.shields.io/pypi/v/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+ .. |PyPI License| image:: https://img.shields.io/pypi/l/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+ .. |PyPI Wheel| image:: https://img.shields.io/pypi/wheel/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+ .. |PyPI Format| image:: https://img.shields.io/pypi/format/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+ .. |PyPI Python Version| image:: https://img.shields.io/pypi/pyversions/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+ .. |PyPI Implementation| image:: https://img.shields.io/pypi/implementation/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+ .. |PyPI Status| image:: https://img.shields.io/pypi/status/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+ .. |PyPI Downloads| image:: https://img.shields.io/pypi/dm/esprima.svg
+ :target: https://pypi.python.org/pypi/esprima
+Keywords: esprima ecmascript javascript parser ast
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Topic :: Software Development :: Code Generators
+Classifier: Topic :: Software Development :: Compilers
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: General
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
diff --git a/third_party/python/esprima/esprima.egg-info/SOURCES.txt b/third_party/python/esprima/esprima.egg-info/SOURCES.txt
new file mode 100644
index 0000000000..16bea37b8d
--- /dev/null
+++ b/third_party/python/esprima/esprima.egg-info/SOURCES.txt
@@ -0,0 +1,29 @@
+README
+setup.py
+esprima/__init__.py
+esprima/__main__.py
+esprima/character.py
+esprima/comment_handler.py
+esprima/compat.py
+esprima/error_handler.py
+esprima/esprima.py
+esprima/jsx_nodes.py
+esprima/jsx_parser.py
+esprima/jsx_syntax.py
+esprima/messages.py
+esprima/nodes.py
+esprima/objects.py
+esprima/parser.py
+esprima/scanner.py
+esprima/syntax.py
+esprima/token.py
+esprima/tokenizer.py
+esprima/utils.py
+esprima/visitor.py
+esprima/xhtml_entities.py
+esprima.egg-info/PKG-INFO
+esprima.egg-info/SOURCES.txt
+esprima.egg-info/dependency_links.txt
+esprima.egg-info/entry_points.txt
+esprima.egg-info/pbr.json
+esprima.egg-info/top_level.txt \ No newline at end of file
diff --git a/third_party/python/esprima/esprima.egg-info/dependency_links.txt b/third_party/python/esprima/esprima.egg-info/dependency_links.txt
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/third_party/python/esprima/esprima.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/third_party/python/esprima/esprima.egg-info/entry_points.txt b/third_party/python/esprima/esprima.egg-info/entry_points.txt
new file mode 100644
index 0000000000..0170557792
--- /dev/null
+++ b/third_party/python/esprima/esprima.egg-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+esprima = esprima.__main__:main
+
diff --git a/third_party/python/esprima/esprima.egg-info/pbr.json b/third_party/python/esprima/esprima.egg-info/pbr.json
new file mode 100644
index 0000000000..d8e931d7dd
--- /dev/null
+++ b/third_party/python/esprima/esprima.egg-info/pbr.json
@@ -0,0 +1 @@
+{"is_release": false, "git_version": "ac65290"} \ No newline at end of file
diff --git a/third_party/python/esprima/esprima.egg-info/top_level.txt b/third_party/python/esprima/esprima.egg-info/top_level.txt
new file mode 100644
index 0000000000..c0ba54881e
--- /dev/null
+++ b/third_party/python/esprima/esprima.egg-info/top_level.txt
@@ -0,0 +1 @@
+esprima
diff --git a/third_party/python/esprima/esprima/__init__.py b/third_party/python/esprima/esprima/__init__.py
new file mode 100644
index 0000000000..0dcdf99e5e
--- /dev/null
+++ b/third_party/python/esprima/esprima/__init__.py
@@ -0,0 +1,29 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+
+version = '4.0.1'
+__version__ = (4, 0, 1)
+
+from .esprima import * # NOQA
diff --git a/third_party/python/esprima/esprima/__main__.py b/third_party/python/esprima/esprima/__main__.py
new file mode 100644
index 0000000000..92f2aa2ec5
--- /dev/null
+++ b/third_party/python/esprima/esprima/__main__.py
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, unicode_literals, print_function, division
+
+import sys
+
+from .esprima import parse, tokenize, Error, toDict
+from . import version
+
+
+def main():
+ import json
+ import time
+ import optparse
+
+ usage = "usage: %prog [options] [file.js]"
+ parser = optparse.OptionParser(usage=usage, version=version)
+ parser.add_option("--comment", dest="comment",
+ action="store_true", default=False,
+ help="Gather all line and block comments in an array")
+ parser.add_option("--attachComment", dest="attachComment",
+ action="store_true", default=False,
+ help="Attach comments to nodes")
+ parser.add_option("--loc", dest="loc", default=False,
+ action="store_true",
+ help="Include line-column location info for each syntax node")
+ parser.add_option("--range", dest="range", default=False,
+ action="store_true",
+ help="Include index-based range for each syntax node")
+ parser.add_option("--raw", dest="raw", default=False,
+ action="store_true",
+ help="Display the raw value of literals")
+ parser.add_option("--tokens", dest="tokens", default=False,
+ action="store_true",
+ help="List all tokens in an array")
+ parser.add_option("--tolerant", dest="tolerant", default=False,
+ action="store_true",
+ help="Tolerate errors on a best-effort basis (experimental)")
+ parser.add_option("--tokenize", dest="tokenize", default=False,
+ action="store_true",
+ help="Only tokenize, do not parse.")
+ parser.add_option("--module", dest="sourceType", default='string',
+ action="store_const", const='module',
+ help="Tolerate errors on a best-effort basis (experimental)")
+ parser.set_defaults(jsx=True, classProperties=True)
+ opts, args = parser.parse_args()
+
+ if len(args) == 1:
+ with open(args[0], 'rb') as f:
+ code = f.read().decode('utf-8')
+ elif sys.stdin.isatty():
+ parser.print_help()
+ return 64
+ else:
+ code = sys.stdin.read().decode('utf-8')
+
+ options = opts.__dict__
+ do_tokenize = options.pop('tokenize')
+
+ t = time.time()
+ try:
+ if do_tokenize:
+ del options['sourceType']
+ del options['tokens']
+ del options['raw']
+ del options['jsx']
+ res = toDict(tokenize(code, options=options))
+ else:
+ res = toDict(parse(code, options=options))
+ except Error as e:
+ res = e.toDict()
+ dt = time.time() - t + 0.000000001
+
+ print(json.dumps(res, indent=4))
+ print()
+ print('Parsed everyting in', round(dt, 5), 'seconds.')
+ print('Thats %d characters per second' % (len(code) // dt))
+
+ return 0
+
+
+if __name__ == '__main__':
+ retval = main()
+ sys.exit(retval)
diff --git a/third_party/python/esprima/esprima/character.py b/third_party/python/esprima/esprima/character.py
new file mode 100644
index 0000000000..a650a714a9
--- /dev/null
+++ b/third_party/python/esprima/esprima/character.py
@@ -0,0 +1,125 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, unicode_literals
+
+import sys
+
+import unicodedata
+from collections import defaultdict
+
+from .compat import uchr, xrange
+
+# http://stackoverflow.com/questions/14245893/efficiently-list-all-characters-in-a-given-unicode-category
+U_CATEGORIES = defaultdict(list)
+for c in map(uchr, xrange(sys.maxunicode + 1)):
+ U_CATEGORIES[unicodedata.category(c)].append(c)
+UNICODE_LETTER = set(
+ U_CATEGORIES['Lu'] + U_CATEGORIES['Ll'] +
+ U_CATEGORIES['Lt'] + U_CATEGORIES['Lm'] +
+ U_CATEGORIES['Lo'] + U_CATEGORIES['Nl']
+)
+UNICODE_OTHER_ID_START = set((
+ # Other_ID_Start
+ '\u1885', '\u1886', '\u2118', '\u212E', '\u309B', '\u309C',
+ # New in Unicode 8.0
+ '\u08B3', '\u0AF9', '\u13F8', '\u9FCD', '\uAB60', '\U00010CC0', '\U000108E0', '\U0002B820',
+ # New in Unicode 9.0
+ '\u1C80', '\U000104DB', '\U0001E922',
+ '\U0001EE00', '\U0001EE06', '\U0001EE0A',
+))
+UNICODE_OTHER_ID_CONTINUE = set((
+ # Other_ID_Continue
+ '\xB7', '\u0387', '\u1369', '\u136A', '\u136B', '\u136C',
+ '\u136D', '\u136E', '\u136F', '\u1370', '\u1371', '\u19DA',
+ # New in Unicode 8.0
+ '\u08E3', '\uA69E', '\U00011730',
+ # New in Unicode 9.0
+ '\u08D4', '\u1DFB', '\uA8C5', '\U00011450',
+ '\U0001EE03', '\U0001EE0B',
+))
+UNICODE_COMBINING_MARK = set(U_CATEGORIES['Mn'] + U_CATEGORIES['Mc'])
+UNICODE_DIGIT = set(U_CATEGORIES['Nd'])
+UNICODE_CONNECTOR_PUNCTUATION = set(U_CATEGORIES['Pc'])
+IDENTIFIER_START = UNICODE_LETTER.union(UNICODE_OTHER_ID_START).union(set(('$', '_', '\\')))
+IDENTIFIER_PART = IDENTIFIER_START.union(UNICODE_COMBINING_MARK).union(UNICODE_DIGIT).union(UNICODE_CONNECTOR_PUNCTUATION).union(set(('\u200D', '\u200C'))).union(UNICODE_OTHER_ID_CONTINUE)
+
+WHITE_SPACE = set((
+ '\x09', '\x0B', '\x0C', '\x20', '\xA0',
+ '\u1680', '\u180E', '\u2000', '\u2001', '\u2002',
+ '\u2003', '\u2004', '\u2005', '\u2006', '\u2007',
+ '\u2008', '\u2009', '\u200A', '\u202F', '\u205F',
+ '\u3000', '\uFEFF',
+))
+LINE_TERMINATOR = set(('\x0A', '\x0D', '\u2028', '\u2029'))
+
+DECIMAL_CONV = dict((c, n) for n, c in enumerate('0123456789'))
+OCTAL_CONV = dict((c, n) for n, c in enumerate('01234567'))
+HEX_CONV = dict((c, n) for n, c in enumerate('0123456789abcdef'))
+for n, c in enumerate('ABCDEF', 10):
+ HEX_CONV[c] = n
+DECIMAL_DIGIT = set(DECIMAL_CONV.keys())
+OCTAL_DIGIT = set(OCTAL_CONV.keys())
+HEX_DIGIT = set(HEX_CONV.keys())
+
+
+class Character:
+ @staticmethod
+ def fromCodePoint(code):
+ return uchr(code)
+
+ # https://tc39.github.io/ecma262/#sec-white-space
+
+ @staticmethod
+ def isWhiteSpace(ch):
+ return ch in WHITE_SPACE
+
+ # https://tc39.github.io/ecma262/#sec-line-terminators
+
+ @staticmethod
+ def isLineTerminator(ch):
+ return ch in LINE_TERMINATOR
+
+ # https://tc39.github.io/ecma262/#sec-names-and-keywords
+
+ @staticmethod
+ def isIdentifierStart(ch):
+ return ch in IDENTIFIER_START
+
+ @staticmethod
+ def isIdentifierPart(ch):
+ return ch in IDENTIFIER_PART
+
+ # https://tc39.github.io/ecma262/#sec-literals-numeric-literals
+
+ @staticmethod
+ def isDecimalDigit(ch):
+ return ch in DECIMAL_DIGIT
+
+ @staticmethod
+ def isHexDigit(ch):
+ return ch in HEX_DIGIT
+
+ @staticmethod
+ def isOctalDigit(ch):
+ return ch in OCTAL_DIGIT
diff --git a/third_party/python/esprima/esprima/comment_handler.py b/third_party/python/esprima/esprima/comment_handler.py
new file mode 100644
index 0000000000..09a37a5fd2
--- /dev/null
+++ b/third_party/python/esprima/esprima/comment_handler.py
@@ -0,0 +1,176 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, self.list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, self.list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# self.SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES
+# LOSS OF USE, DATA, OR PROFITS OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# self.SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, unicode_literals
+
+from .objects import Object
+from .nodes import Node
+from .syntax import Syntax
+
+
+class Comment(Node):
+ def __init__(self, type, value, range=None, loc=None):
+ self.type = type
+ self.value = value
+ self.range = range
+ self.loc = loc
+
+
+class Entry(Object):
+ def __init__(self, comment, start):
+ self.comment = comment
+ self.start = start
+
+
+class NodeInfo(Object):
+ def __init__(self, node, start):
+ self.node = node
+ self.start = start
+
+
+class CommentHandler(object):
+ def __init__(self):
+ self.attach = False
+ self.comments = []
+ self.stack = []
+ self.leading = []
+ self.trailing = []
+
+ def insertInnerComments(self, node, metadata):
+ # innnerComments for properties empty block
+ # `function a(:/** comments **\/}`
+ if node.type is Syntax.BlockStatement and not node.body:
+ innerComments = []
+ for i, entry in enumerate(self.leading):
+ if metadata.end.offset >= entry.start:
+ innerComments.append(entry.comment)
+ self.leading[i] = None
+ self.trailing[i] = None
+ if innerComments:
+ node.innerComments = innerComments
+ self.leading = [v for v in self.leading if v is not None]
+ self.trailing = [v for v in self.trailing if v is not None]
+
+ def findTrailingComments(self, metadata):
+ trailingComments = []
+
+ if self.trailing:
+ for i, entry in enumerate(self.trailing):
+ if entry.start >= metadata.end.offset:
+ trailingComments.append(entry.comment)
+ if trailingComments:
+ self.trailing = []
+ return trailingComments
+
+ last = self.stack and self.stack[-1]
+ if last and last.node.trailingComments:
+ firstComment = last.node.trailingComments[0]
+ if firstComment and firstComment.range[0] >= metadata.end.offset:
+ trailingComments = last.node.trailingComments
+ del last.node.trailingComments
+ return trailingComments
+
+ def findLeadingComments(self, metadata):
+ leadingComments = []
+
+ target = None
+ while self.stack:
+ entry = self.stack and self.stack[-1]
+ if entry and entry.start >= metadata.start.offset:
+ target = entry.node
+ self.stack.pop()
+ else:
+ break
+
+ if target:
+ if target.leadingComments:
+ for i, comment in enumerate(target.leadingComments):
+ if comment.range[1] <= metadata.start.offset:
+ leadingComments.append(comment)
+ target.leadingComments[i] = None
+ if leadingComments:
+ target.leadingComments = [v for v in target.leadingComments if v is not None]
+ if not target.leadingComments:
+ del target.leadingComments
+ return leadingComments
+
+ for i, entry in enumerate(self.leading):
+ if entry.start <= metadata.start.offset:
+ leadingComments.append(entry.comment)
+ self.leading[i] = None
+ if leadingComments:
+ self.leading = [v for v in self.leading if v is not None]
+
+ return leadingComments
+
+ def visitNode(self, node, metadata):
+ if node.type is Syntax.Program and node.body:
+ return
+
+ self.insertInnerComments(node, metadata)
+ trailingComments = self.findTrailingComments(metadata)
+ leadingComments = self.findLeadingComments(metadata)
+ if leadingComments:
+ node.leadingComments = leadingComments
+ if trailingComments:
+ node.trailingComments = trailingComments
+
+ self.stack.append(NodeInfo(
+ node=node,
+ start=metadata.start.offset
+ ))
+
+ def visitComment(self, node, metadata):
+ type = 'Line' if node.type[0] == 'L' else 'Block'
+ comment = Comment(
+ type=type,
+ value=node.value
+ )
+ if node.range:
+ comment.range = node.range
+ if node.loc:
+ comment.loc = node.loc
+ self.comments.append(comment)
+
+ if self.attach:
+ entry = Entry(
+ comment=Comment(
+ type=type,
+ value=node.value,
+ range=[metadata.start.offset, metadata.end.offset]
+ ),
+ start=metadata.start.offset
+ )
+ if node.loc:
+ entry.comment.loc = node.loc
+ node.type = type
+ self.leading.append(entry)
+ self.trailing.append(entry)
+
+ def visit(self, node, metadata):
+ if node.type == 'LineComment':
+ self.visitComment(node, metadata)
+ elif node.type == 'BlockComment':
+ self.visitComment(node, metadata)
+ elif self.attach:
+ self.visitNode(node, metadata)
diff --git a/third_party/python/esprima/esprima/compat.py b/third_party/python/esprima/esprima/compat.py
new file mode 100644
index 0000000000..79543255e3
--- /dev/null
+++ b/third_party/python/esprima/esprima/compat.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, unicode_literals
+
+import sys
+
+PY3 = sys.version_info >= (3, 0)
+
+if PY3:
+ # Python 3:
+ basestring = str
+ long = int
+ xrange = range
+ unicode = str
+ uchr = chr
+
+ def uord(ch):
+ return ord(ch[0])
+
+else:
+ basestring = basestring
+ long = long
+ xrange = xrange
+ unicode = unicode
+
+ try:
+ # Python 2 UCS4:
+ unichr(0x10000)
+ uchr = unichr
+
+ def uord(ch):
+ return ord(ch[0])
+
+ except ValueError:
+ # Python 2 UCS2:
+ def uchr(code):
+ # UTF-16 Encoding
+ if code <= 0xFFFF:
+ return unichr(code)
+ cu1 = ((code - 0x10000) >> 10) + 0xD800
+ cu2 = ((code - 0x10000) & 1023) + 0xDC00
+ return unichr(cu1) + unichr(cu2)
+
+ def uord(ch):
+ cp = ord(ch[0])
+ if cp >= 0xD800 and cp <= 0xDBFF:
+ second = ord(ch[1])
+ if second >= 0xDC00 and second <= 0xDFFF:
+ first = cp
+ cp = (first - 0xD800) * 0x400 + second - 0xDC00 + 0x10000
+ return cp
diff --git a/third_party/python/esprima/esprima/error_handler.py b/third_party/python/esprima/esprima/error_handler.py
new file mode 100644
index 0000000000..9b0f5cb843
--- /dev/null
+++ b/third_party/python/esprima/esprima/error_handler.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import unicode_literals
+
+from .compat import unicode
+
+
+class Error(Exception):
+ def __init__(self, message, name=None, index=None, lineNumber=None, column=None, description=None):
+ super(Error, self).__init__(message)
+ self.message = message
+ self.name = name
+ self.index = index
+ self.lineNumber = lineNumber
+ self.column = column
+ # self.description = description
+
+ def toString(self):
+ return '%s: %s' % (self.__class__.__name__, self)
+
+ def toDict(self):
+ d = dict((unicode(k), v) for k, v in self.__dict__.items() if v is not None)
+ d['message'] = self.toString()
+ return d
+
+
+class ErrorHandler:
+ def __init__(self):
+ self.errors = []
+ self.tolerant = False
+
+ def recordError(self, error):
+ self.errors.append(error.toDict())
+
+ def tolerate(self, error):
+ if self.tolerant:
+ self.recordError(error)
+ else:
+ raise error
+
+ def createError(self, index, line, col, description):
+ msg = 'Line %s: %s' % (line, description)
+ return Error(msg, index=index, lineNumber=line, column=col, description=description)
+
+ def throwError(self, index, line, col, description):
+ raise self.createError(index, line, col, description)
+
+ def tolerateError(self, index, line, col, description):
+ error = self.createError(index, line, col, description)
+ if self.tolerant:
+ self.recordError(error)
+ else:
+ raise error
diff --git a/third_party/python/esprima/esprima/esprima.py b/third_party/python/esprima/esprima/esprima.py
new file mode 100644
index 0000000000..faea0c2dda
--- /dev/null
+++ b/third_party/python/esprima/esprima/esprima.py
@@ -0,0 +1,125 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, unicode_literals
+
+from .comment_handler import CommentHandler
+from .error_handler import Error
+from .jsx_parser import JSXParser
+from .jsx_syntax import JSXSyntax
+from .objects import Array, toDict
+from .parser import Parser
+from .syntax import Syntax
+from .tokenizer import Tokenizer
+from .visitor import NodeVisitor
+from . import nodes
+from . import jsx_nodes
+
+
+__all__ = ['Syntax', 'JSXSyntax', 'Error', 'NodeVisitor', 'nodes', 'jsx_nodes',
+ 'parse', 'parseModule', 'parseScript', 'tokenize', 'toDict']
+
+
+def parse(code, options=None, delegate=None, **kwargs):
+ options = {} if options is None else options.copy()
+ options.update(kwargs)
+
+ # ESNext presset:
+ if options.get('esnext', False):
+ options['jsx'] = True
+ options['classProperties'] = True
+
+ commentHandler = None
+
+ def proxyDelegate(node, metadata):
+ if delegate:
+ new_node = delegate(node, metadata)
+ if new_node is not None:
+ node = new_node
+ if commentHandler:
+ commentHandler.visit(node, metadata)
+ return node
+
+ parserDelegate = None if delegate is None else proxyDelegate
+ collectComment = options.get('comment', False)
+ attachComment = options.get('attachComment', False)
+ if collectComment or attachComment:
+ commentHandler = CommentHandler()
+ commentHandler.attach = attachComment
+ options['comment'] = True
+ parserDelegate = proxyDelegate
+
+ isModule = options.get('sourceType', 'script') == 'module'
+
+ if options.get('jsx', False):
+ parser = JSXParser(code, options=options, delegate=parserDelegate)
+ else:
+ parser = Parser(code, options=options, delegate=parserDelegate)
+
+ ast = parser.parseModule() if isModule else parser.parseScript()
+
+ if collectComment and commentHandler:
+ ast.comments = commentHandler.comments
+
+ if parser.config.tokens:
+ ast.tokens = parser.tokens
+
+ if parser.config.tolerant:
+ ast.errors = parser.errorHandler.errors
+
+ return ast
+
+
+def parseModule(code, options=None, delegate=None, **kwargs):
+ kwargs['sourceType'] = 'module'
+ return parse(code, options, delegate, **kwargs)
+
+
+def parseScript(code, options=None, delegate=None, **kwargs):
+ kwargs['sourceType'] = 'script'
+ return parse(code, options, delegate, **kwargs)
+
+
+def tokenize(code, options=None, delegate=None, **kwargs):
+ options = {} if options is None else options.copy()
+ options.update(kwargs)
+
+ tokenizer = Tokenizer(code, options)
+
+ tokens = Array()
+
+ try:
+ while True:
+ token = tokenizer.getNextToken()
+ if not token:
+ break
+ if delegate:
+ token = delegate(token)
+ tokens.append(token)
+ except Error as e:
+ tokenizer.errorHandler.tolerate(e)
+
+ if tokenizer.errorHandler.tolerant:
+ tokens.errors = tokenizer.errors()
+
+ return tokens
diff --git a/third_party/python/esprima/esprima/jsx_nodes.py b/third_party/python/esprima/esprima/jsx_nodes.py
new file mode 100644
index 0000000000..f195653ab5
--- /dev/null
+++ b/third_party/python/esprima/esprima/jsx_nodes.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, unicode_literals
+
+from .nodes import Node
+from .jsx_syntax import JSXSyntax
+
+
+class JSXClosingElement(Node):
+ def __init__(self, name):
+ self.type = JSXSyntax.JSXClosingElement
+ self.name = name
+
+
+class JSXElement(Node):
+ def __init__(self, openingElement, children, closingElement):
+ self.type = JSXSyntax.JSXElement
+ self.openingElement = openingElement
+ self.children = children
+ self.closingElement = closingElement
+
+
+class JSXEmptyExpression(Node):
+ def __init__(self):
+ self.type = JSXSyntax.JSXEmptyExpression
+
+
+class JSXExpressionContainer(Node):
+ def __init__(self, expression):
+ self.type = JSXSyntax.JSXExpressionContainer
+ self.expression = expression
+
+
+class JSXIdentifier(Node):
+ def __init__(self, name):
+ self.type = JSXSyntax.JSXIdentifier
+ self.name = name
+
+
+class JSXMemberExpression(Node):
+ def __init__(self, object, property):
+ self.type = JSXSyntax.JSXMemberExpression
+ self.object = object
+ self.property = property
+
+
+class JSXAttribute(Node):
+ def __init__(self, name, value):
+ self.type = JSXSyntax.JSXAttribute
+ self.name = name
+ self.value = value
+
+
+class JSXNamespacedName(Node):
+ def __init__(self, namespace, name):
+ self.type = JSXSyntax.JSXNamespacedName
+ self.namespace = namespace
+ self.name = name
+
+
+class JSXOpeningElement(Node):
+ def __init__(self, name, selfClosing, attributes):
+ self.type = JSXSyntax.JSXOpeningElement
+ self.name = name
+ self.selfClosing = selfClosing
+ self.attributes = attributes
+
+
+class JSXSpreadAttribute(Node):
+ def __init__(self, argument):
+ self.type = JSXSyntax.JSXSpreadAttribute
+ self.argument = argument
+
+
+class JSXText(Node):
+ def __init__(self, value, raw):
+ self.type = JSXSyntax.JSXText
+ self.value = value
+ self.raw = raw
diff --git a/third_party/python/esprima/esprima/jsx_parser.py b/third_party/python/esprima/esprima/jsx_parser.py
new file mode 100644
index 0000000000..ec71b9251d
--- /dev/null
+++ b/third_party/python/esprima/esprima/jsx_parser.py
@@ -0,0 +1,584 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, unicode_literals
+
+from .compat import uchr
+from .character import Character
+from . import jsx_nodes as JSXNode
+from .jsx_syntax import JSXSyntax
+from . import nodes as Node
+from .parser import Marker, Parser
+from .token import Token, TokenName
+from .xhtml_entities import XHTMLEntities
+
+
+class MetaJSXElement(object):
+ def __init__(self, node=None, opening=None, closing=None, children=None):
+ self.node = node
+ self.opening = opening
+ self.closing = closing
+ self.children = children
+
+
+class JSXToken(object):
+ Identifier = 100
+ Text = 101
+
+
+class RawJSXToken(object):
+ def __init__(self, type=None, value=None, lineNumber=None, lineStart=None, start=None, end=None):
+ self.type = type
+ self.value = value
+ self.lineNumber = lineNumber
+ self.lineStart = lineStart
+ self.start = start
+ self.end = end
+
+
+TokenName[JSXToken.Identifier] = "JSXIdentifier"
+TokenName[JSXToken.Text] = "JSXText"
+
+
+# Fully qualified element name, e.g. <svg:path> returns "svg:path"
+def getQualifiedElementName(elementName):
+ typ = elementName.type
+ if typ is JSXSyntax.JSXIdentifier:
+ id = elementName
+ qualifiedName = id.name
+ elif typ is JSXSyntax.JSXNamespacedName:
+ ns = elementName
+ qualifiedName = getQualifiedElementName(ns.namespace) + ':' + getQualifiedElementName(ns.name)
+ elif typ is JSXSyntax.JSXMemberExpression:
+ expr = elementName
+ qualifiedName = getQualifiedElementName(expr.object) + '.' + getQualifiedElementName(expr.property)
+
+ return qualifiedName
+
+
+class JSXParser(Parser):
+ def __init__(self, code, options, delegate):
+ super(JSXParser, self).__init__(code, options, delegate)
+
+ def parsePrimaryExpression(self):
+ return self.parseJSXRoot() if self.match('<') else super(JSXParser, self).parsePrimaryExpression()
+
+ def startJSX(self):
+ # Unwind the scanner before the lookahead token.
+ self.scanner.index = self.startMarker.index
+ self.scanner.lineNumber = self.startMarker.line
+ self.scanner.lineStart = self.startMarker.index - self.startMarker.column
+
+ def finishJSX(self):
+ # Prime the next lookahead.
+ self.nextToken()
+
+ def reenterJSX(self):
+ self.startJSX()
+ self.expectJSX('}')
+
+ # Pop the closing '}' added from the lookahead.
+ if self.config.tokens:
+ self.tokens.pop()
+
+ def createJSXNode(self):
+ self.collectComments()
+ return Marker(
+ index=self.scanner.index,
+ line=self.scanner.lineNumber,
+ column=self.scanner.index - self.scanner.lineStart
+ )
+
+ def createJSXChildNode(self):
+ return Marker(
+ index=self.scanner.index,
+ line=self.scanner.lineNumber,
+ column=self.scanner.index - self.scanner.lineStart
+ )
+
+ def scanXHTMLEntity(self, quote):
+ result = '&'
+
+ valid = True
+ terminated = False
+ numeric = False
+ hex = False
+
+ while not self.scanner.eof() and valid and not terminated:
+ ch = self.scanner.source[self.scanner.index]
+ if ch == quote:
+ break
+
+ terminated = (ch == ';')
+ result += ch
+ self.scanner.index += 1
+ if not terminated:
+ length = len(result)
+ if length == 2:
+ # e.g. '&#123;'
+ numeric = (ch == '#')
+ elif length == 3:
+ if numeric:
+ # e.g. '&#x41;'
+ hex = ch == 'x'
+ valid = hex or Character.isDecimalDigit(ch)
+ numeric = numeric and not hex
+ else:
+ valid = valid and not (numeric and not Character.isDecimalDigit(ch))
+ valid = valid and not (hex and not Character.isHexDigit(ch))
+
+ if valid and terminated and len(result) > 2:
+ # e.g. '&#x41;' becomes just '#x41'
+ st = result[1:-1]
+ if numeric and len(st) > 1:
+ result = uchr(int(st[1:], 10))
+ elif hex and len(st) > 2:
+ result = uchr(int(st[2:], 16))
+ elif not numeric and not hex and st in XHTMLEntities:
+ result = XHTMLEntities[st]
+
+ return result
+
+ # Scan the next JSX token. This replaces Scanner#lex when in JSX mode.
+
+ def lexJSX(self):
+ ch = self.scanner.source[self.scanner.index]
+
+ # < > / : = { }
+ if ch in ('<', '>', '/', ':', '=', '{', '}'):
+ value = self.scanner.source[self.scanner.index]
+ self.scanner.index += 1
+ return RawJSXToken(
+ type=Token.Punctuator,
+ value=value,
+ lineNumber=self.scanner.lineNumber,
+ lineStart=self.scanner.lineStart,
+ start=self.scanner.index - 1,
+ end=self.scanner.index
+ )
+
+ # " '
+ if ch in ('\'', '"'):
+ start = self.scanner.index
+ quote = self.scanner.source[self.scanner.index]
+ self.scanner.index += 1
+ str = ''
+ while not self.scanner.eof():
+ ch = self.scanner.source[self.scanner.index]
+ self.scanner.index += 1
+ if ch == quote:
+ break
+ elif ch == '&':
+ str += self.scanXHTMLEntity(quote)
+ else:
+ str += ch
+
+ return RawJSXToken(
+ type=Token.StringLiteral,
+ value=str,
+ lineNumber=self.scanner.lineNumber,
+ lineStart=self.scanner.lineStart,
+ start=start,
+ end=self.scanner.index
+ )
+
+ # ... or .
+ if ch == '.':
+ start = self.scanner.index
+ if self.scanner.source[start + 1:start + 3] == '..':
+ value = '...'
+ self.scanner.index += 3
+ else:
+ value = '.'
+ self.scanner.index += 1
+ return RawJSXToken(
+ type=Token.Punctuator,
+ value=value,
+ lineNumber=self.scanner.lineNumber,
+ lineStart=self.scanner.lineStart,
+ start=start,
+ end=self.scanner.index
+ )
+
+ # `
+ if ch == '`':
+ # Only placeholder, since it will be rescanned as a real assignment expression.
+ return RawJSXToken(
+ type=Token.Template,
+ value='',
+ lineNumber=self.scanner.lineNumber,
+ lineStart=self.scanner.lineStart,
+ start=self.scanner.index,
+ end=self.scanner.index
+ )
+
+ # Identifer can not contain backslash (char code 92).
+ if Character.isIdentifierStart(ch) and ch != '\\':
+ start = self.scanner.index
+ self.scanner.index += 1
+ while not self.scanner.eof():
+ ch = self.scanner.source[self.scanner.index]
+ if Character.isIdentifierPart(ch) and ch != '\\':
+ self.scanner.index += 1
+ elif ch == '-':
+ # Hyphen (char code 45) can be part of an identifier.
+ self.scanner.index += 1
+ else:
+ break
+
+ id = self.scanner.source[start:self.scanner.index]
+ return RawJSXToken(
+ type=JSXToken.Identifier,
+ value=id,
+ lineNumber=self.scanner.lineNumber,
+ lineStart=self.scanner.lineStart,
+ start=start,
+ end=self.scanner.index
+ )
+
+ return self.scanner.lex()
+
+ def nextJSXToken(self):
+ self.collectComments()
+
+ self.startMarker.index = self.scanner.index
+ self.startMarker.line = self.scanner.lineNumber
+ self.startMarker.column = self.scanner.index - self.scanner.lineStart
+ token = self.lexJSX()
+ self.lastMarker.index = self.scanner.index
+ self.lastMarker.line = self.scanner.lineNumber
+ self.lastMarker.column = self.scanner.index - self.scanner.lineStart
+
+ if self.config.tokens:
+ self.tokens.append(self.convertToken(token))
+
+ return token
+
+ def nextJSXText(self):
+ self.startMarker.index = self.scanner.index
+ self.startMarker.line = self.scanner.lineNumber
+ self.startMarker.column = self.scanner.index - self.scanner.lineStart
+
+ start = self.scanner.index
+
+ text = ''
+ while not self.scanner.eof():
+ ch = self.scanner.source[self.scanner.index]
+ if ch in ('{', '<'):
+ break
+
+ self.scanner.index += 1
+ text += ch
+ if Character.isLineTerminator(ch):
+ self.scanner.lineNumber += 1
+ if ch == '\r' and self.scanner.source[self.scanner.index] == '\n':
+ self.scanner.index += 1
+
+ self.scanner.lineStart = self.scanner.index
+
+ self.lastMarker.index = self.scanner.index
+ self.lastMarker.line = self.scanner.lineNumber
+ self.lastMarker.column = self.scanner.index - self.scanner.lineStart
+
+ token = RawJSXToken(
+ type=JSXToken.Text,
+ value=text,
+ lineNumber=self.scanner.lineNumber,
+ lineStart=self.scanner.lineStart,
+ start=start,
+ end=self.scanner.index
+ )
+
+ if text and self.config.tokens:
+ self.tokens.append(self.convertToken(token))
+
+ return token
+
+ def peekJSXToken(self):
+ state = self.scanner.saveState()
+ self.scanner.scanComments()
+ next = self.lexJSX()
+ self.scanner.restoreState(state)
+
+ return next
+
+ # Expect the next JSX token to match the specified punctuator.
+ # If not, an exception will be thrown.
+
+ def expectJSX(self, value):
+ token = self.nextJSXToken()
+ if token.type is not Token.Punctuator or token.value != value:
+ self.throwUnexpectedToken(token)
+
+ # Return True if the next JSX token matches the specified punctuator.
+
+ def matchJSX(self, *value):
+ next = self.peekJSXToken()
+ return next.type is Token.Punctuator and next.value in value
+
+ def parseJSXIdentifier(self):
+ node = self.createJSXNode()
+ token = self.nextJSXToken()
+ if token.type is not JSXToken.Identifier:
+ self.throwUnexpectedToken(token)
+
+ return self.finalize(node, JSXNode.JSXIdentifier(token.value))
+
+ def parseJSXElementName(self):
+ node = self.createJSXNode()
+ elementName = self.parseJSXIdentifier()
+
+ if self.matchJSX(':'):
+ namespace = elementName
+ self.expectJSX(':')
+ name = self.parseJSXIdentifier()
+ elementName = self.finalize(node, JSXNode.JSXNamespacedName(namespace, name))
+ elif self.matchJSX('.'):
+ while self.matchJSX('.'):
+ object = elementName
+ self.expectJSX('.')
+ property = self.parseJSXIdentifier()
+ elementName = self.finalize(node, JSXNode.JSXMemberExpression(object, property))
+
+ return elementName
+
+ def parseJSXAttributeName(self):
+ node = self.createJSXNode()
+
+ identifier = self.parseJSXIdentifier()
+ if self.matchJSX(':'):
+ namespace = identifier
+ self.expectJSX(':')
+ name = self.parseJSXIdentifier()
+ attributeName = self.finalize(node, JSXNode.JSXNamespacedName(namespace, name))
+ else:
+ attributeName = identifier
+
+ return attributeName
+
+ def parseJSXStringLiteralAttribute(self):
+ node = self.createJSXNode()
+ token = self.nextJSXToken()
+ if token.type is not Token.StringLiteral:
+ self.throwUnexpectedToken(token)
+
+ raw = self.getTokenRaw(token)
+ return self.finalize(node, Node.Literal(token.value, raw))
+
+ def parseJSXExpressionAttribute(self):
+ node = self.createJSXNode()
+
+ self.expectJSX('{')
+ self.finishJSX()
+
+ if self.match('}'):
+ self.tolerateError('JSX attributes must only be assigned a non-empty expression')
+
+ expression = self.parseAssignmentExpression()
+ self.reenterJSX()
+
+ return self.finalize(node, JSXNode.JSXExpressionContainer(expression))
+
+ def parseJSXAttributeValue(self):
+ if self.matchJSX('{'):
+ return self.parseJSXExpressionAttribute()
+ if self.matchJSX('<'):
+ return self.parseJSXElement()
+
+ return self.parseJSXStringLiteralAttribute()
+
+ def parseJSXNameValueAttribute(self):
+ node = self.createJSXNode()
+ name = self.parseJSXAttributeName()
+ value = None
+ if self.matchJSX('='):
+ self.expectJSX('=')
+ value = self.parseJSXAttributeValue()
+
+ return self.finalize(node, JSXNode.JSXAttribute(name, value))
+
+ def parseJSXSpreadAttribute(self):
+ node = self.createJSXNode()
+ self.expectJSX('{')
+ self.expectJSX('...')
+
+ self.finishJSX()
+ argument = self.parseAssignmentExpression()
+ self.reenterJSX()
+
+ return self.finalize(node, JSXNode.JSXSpreadAttribute(argument))
+
+ def parseJSXAttributes(self):
+ attributes = []
+
+ while not self.matchJSX('/', '>'):
+ attribute = self.parseJSXSpreadAttribute() if self.matchJSX('{') else self.parseJSXNameValueAttribute()
+ attributes.append(attribute)
+
+ return attributes
+
+ def parseJSXOpeningElement(self):
+ node = self.createJSXNode()
+
+ self.expectJSX('<')
+ name = self.parseJSXElementName()
+ attributes = self.parseJSXAttributes()
+ selfClosing = self.matchJSX('/')
+ if selfClosing:
+ self.expectJSX('/')
+
+ self.expectJSX('>')
+
+ return self.finalize(node, JSXNode.JSXOpeningElement(name, selfClosing, attributes))
+
+ def parseJSXBoundaryElement(self):
+ node = self.createJSXNode()
+
+ self.expectJSX('<')
+ if self.matchJSX('/'):
+ self.expectJSX('/')
+ elementName = self.parseJSXElementName()
+ self.expectJSX('>')
+ return self.finalize(node, JSXNode.JSXClosingElement(elementName))
+
+ name = self.parseJSXElementName()
+ attributes = self.parseJSXAttributes()
+ selfClosing = self.matchJSX('/')
+ if selfClosing:
+ self.expectJSX('/')
+
+ self.expectJSX('>')
+
+ return self.finalize(node, JSXNode.JSXOpeningElement(name, selfClosing, attributes))
+
+ def parseJSXEmptyExpression(self):
+ node = self.createJSXChildNode()
+ self.collectComments()
+ self.lastMarker.index = self.scanner.index
+ self.lastMarker.line = self.scanner.lineNumber
+ self.lastMarker.column = self.scanner.index - self.scanner.lineStart
+ return self.finalize(node, JSXNode.JSXEmptyExpression())
+
+ def parseJSXExpressionContainer(self):
+ node = self.createJSXNode()
+ self.expectJSX('{')
+
+ if self.matchJSX('}'):
+ expression = self.parseJSXEmptyExpression()
+ self.expectJSX('}')
+ else:
+ self.finishJSX()
+ expression = self.parseAssignmentExpression()
+ self.reenterJSX()
+
+ return self.finalize(node, JSXNode.JSXExpressionContainer(expression))
+
+ def parseJSXChildren(self):
+ children = []
+
+ while not self.scanner.eof():
+ node = self.createJSXChildNode()
+ token = self.nextJSXText()
+ if token.start < token.end:
+ raw = self.getTokenRaw(token)
+ child = self.finalize(node, JSXNode.JSXText(token.value, raw))
+ children.append(child)
+
+ if self.scanner.source[self.scanner.index] == '{':
+ container = self.parseJSXExpressionContainer()
+ children.append(container)
+ else:
+ break
+
+ return children
+
+ def parseComplexJSXElement(self, el):
+ stack = []
+
+ while not self.scanner.eof():
+ el.children.extend(self.parseJSXChildren())
+ node = self.createJSXChildNode()
+ element = self.parseJSXBoundaryElement()
+ if element.type is JSXSyntax.JSXOpeningElement:
+ opening = element
+ if opening.selfClosing:
+ child = self.finalize(node, JSXNode.JSXElement(opening, [], None))
+ el.children.append(child)
+ else:
+ stack.append(el)
+ el = MetaJSXElement(
+ node=node,
+ opening=opening,
+ closing=None,
+ children=[],
+ )
+
+ if element.type is JSXSyntax.JSXClosingElement:
+ el.closing = element
+ open = getQualifiedElementName(el.opening.name)
+ close = getQualifiedElementName(el.closing.name)
+ if open != close:
+ self.tolerateError('Expected corresponding JSX closing tag for %0', open)
+
+ if stack:
+ child = self.finalize(el.node, JSXNode.JSXElement(el.opening, el.children, el.closing))
+ el = stack[-1]
+ el.children.append(child)
+ stack.pop()
+ else:
+ break
+
+ return el
+
+ def parseJSXElement(self):
+ node = self.createJSXNode()
+
+ opening = self.parseJSXOpeningElement()
+ children = []
+ closing = None
+
+ if not opening.selfClosing:
+ el = self.parseComplexJSXElement(MetaJSXElement(
+ node=node,
+ opening=opening,
+ closing=closing,
+ children=children
+ ))
+ children = el.children
+ closing = el.closing
+
+ return self.finalize(node, JSXNode.JSXElement(opening, children, closing))
+
+ def parseJSXRoot(self):
+ # Pop the opening '<' added from the lookahead.
+ if self.config.tokens:
+ self.tokens.pop()
+
+ self.startJSX()
+ element = self.parseJSXElement()
+ self.finishJSX()
+
+ return element
+
+ def isStartOfExpression(self):
+ return super(JSXParser, self).isStartOfExpression() or self.match('<')
diff --git a/third_party/python/esprima/esprima/jsx_syntax.py b/third_party/python/esprima/esprima/jsx_syntax.py
new file mode 100644
index 0000000000..808cc8b027
--- /dev/null
+++ b/third_party/python/esprima/esprima/jsx_syntax.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import unicode_literals
+
+
+class JSXSyntax:
+ JSXAttribute = "JSXAttribute"
+ JSXClosingElement = "JSXClosingElement"
+ JSXElement = "JSXElement"
+ JSXEmptyExpression = "JSXEmptyExpression"
+ JSXExpressionContainer = "JSXExpressionContainer"
+ JSXIdentifier = "JSXIdentifier"
+ JSXMemberExpression = "JSXMemberExpression"
+ JSXNamespacedName = "JSXNamespacedName"
+ JSXOpeningElement = "JSXOpeningElement"
+ JSXSpreadAttribute = "JSXSpreadAttribute"
+ JSXText = "JSXText"
diff --git a/third_party/python/esprima/esprima/messages.py b/third_party/python/esprima/esprima/messages.py
new file mode 100644
index 0000000000..bb6314e1ea
--- /dev/null
+++ b/third_party/python/esprima/esprima/messages.py
@@ -0,0 +1,90 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import unicode_literals
+
+
+# Error messages should be identical to V8.
+class Messages:
+ ObjectPatternAsRestParameter = "Unexpected token {"
+ BadImportCallArity = "Unexpected token"
+ BadGetterArity = "Getter must not have any formal parameters"
+ BadSetterArity = "Setter must have exactly one formal parameter"
+ BadSetterRestParameter = "Setter function argument must not be a rest parameter"
+ ConstructorIsAsync = "Class constructor may not be an async method"
+ ConstructorSpecialMethod = "Class constructor may not be an accessor"
+ DeclarationMissingInitializer = "Missing initializer in %0 declaration"
+ DefaultRestParameter = "Unexpected token ="
+ DefaultRestProperty = "Unexpected token ="
+ DuplicateBinding = "Duplicate binding %0"
+ DuplicateConstructor = "A class may only have one constructor"
+ DuplicateProtoProperty = "Duplicate __proto__ fields are not allowed in object literals"
+ ForInOfLoopInitializer = "%0 loop variable declaration may not have an initializer"
+ GeneratorInLegacyContext = "Generator declarations are not allowed in legacy contexts"
+ IllegalBreak = "Illegal break statement"
+ IllegalContinue = "Illegal continue statement"
+ IllegalExportDeclaration = "Unexpected token"
+ IllegalImportDeclaration = "Unexpected token"
+ IllegalLanguageModeDirective = "Illegal 'use strict' directive in function with non-simple parameter list"
+ IllegalReturn = "Illegal return statement"
+ InvalidEscapedReservedWord = "Keyword must not contain escaped characters"
+ InvalidHexEscapeSequence = "Invalid hexadecimal escape sequence"
+ InvalidLHSInAssignment = "Invalid left-hand side in assignment"
+ InvalidLHSInForIn = "Invalid left-hand side in for-in"
+ InvalidLHSInForLoop = "Invalid left-hand side in for-loop"
+ InvalidModuleSpecifier = "Unexpected token"
+ InvalidRegExp = "Invalid regular expression"
+ LetInLexicalBinding = "let is disallowed as a lexically bound name"
+ MissingFromClause = "Unexpected token"
+ MultipleDefaultsInSwitch = "More than one default clause in switch statement"
+ NewlineAfterThrow = "Illegal newline after throw"
+ NoAsAfterImportNamespace = "Unexpected token"
+ NoCatchOrFinally = "Missing catch or finally after try"
+ ParameterAfterRestParameter = "Rest parameter must be last formal parameter"
+ PropertyAfterRestProperty = "Unexpected token"
+ Redeclaration = "%0 '%1' has already been declared"
+ StaticPrototype = "Classes may not have static property named prototype"
+ StrictCatchVariable = "Catch variable may not be eval or arguments in strict mode"
+ StrictDelete = "Delete of an unqualified identifier in strict mode."
+ StrictFunction = "In strict mode code, functions can only be declared at top level or inside a block"
+ StrictFunctionName = "Function name may not be eval or arguments in strict mode"
+ StrictLHSAssignment = "Assignment to eval or arguments is not allowed in strict mode"
+ StrictLHSPostfix = "Postfix increment/decrement may not have eval or arguments operand in strict mode"
+ StrictLHSPrefix = "Prefix increment/decrement may not have eval or arguments operand in strict mode"
+ StrictModeWith = "Strict mode code may not include a with statement"
+ StrictOctalLiteral = "Octal literals are not allowed in strict mode."
+ StrictParamDupe = "Strict mode function may not have duplicate parameter names"
+ StrictParamName = "Parameter name eval or arguments is not allowed in strict mode"
+ StrictReservedWord = "Use of future reserved word in strict mode"
+ StrictVarName = "Variable name may not be eval or arguments in strict mode"
+ TemplateOctalLiteral = "Octal literals are not allowed in template strings."
+ UnexpectedEOS = "Unexpected end of input"
+ UnexpectedIdentifier = "Unexpected identifier"
+ UnexpectedNumber = "Unexpected number"
+ UnexpectedReserved = "Unexpected reserved word"
+ UnexpectedString = "Unexpected string"
+ UnexpectedTemplate = "Unexpected quasi %0"
+ UnexpectedToken = "Unexpected token %0"
+ UnexpectedTokenIllegal = "Unexpected token ILLEGAL"
+ UnknownLabel = "Undefined label '%0'"
+ UnterminatedRegExp = "Invalid regular expression: missing /"
diff --git a/third_party/python/esprima/esprima/nodes.py b/third_party/python/esprima/esprima/nodes.py
new file mode 100644
index 0000000000..bbbbdb893b
--- /dev/null
+++ b/third_party/python/esprima/esprima/nodes.py
@@ -0,0 +1,620 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, unicode_literals
+
+from .objects import Object
+from .syntax import Syntax
+from .scanner import RegExp
+
+
+class Node(Object):
+ def __dir__(self):
+ return list(self.__dict__.keys())
+
+ def __iter__(self):
+ return self.__iter__
+
+ def keys(self):
+ return self.__dict__.keys()
+
+ def items(self):
+ return self.__dict__.items()
+
+
+class ArrayExpression(Node):
+ def __init__(self, elements):
+ self.type = Syntax.ArrayExpression
+ self.elements = elements
+
+
+class ArrayPattern(Node):
+ def __init__(self, elements):
+ self.type = Syntax.ArrayPattern
+ self.elements = elements
+
+
+class ArrowFunctionExpression(Node):
+ def __init__(self, params, body, expression):
+ self.type = Syntax.ArrowFunctionExpression
+ self.generator = False
+ self.isAsync = False
+ self.params = params
+ self.body = body
+ self.expression = expression
+
+
+class AssignmentExpression(Node):
+ def __init__(self, operator, left, right):
+ self.type = Syntax.AssignmentExpression
+ self.operator = operator
+ self.left = left
+ self.right = right
+
+
+class AssignmentPattern(Node):
+ def __init__(self, left, right):
+ self.type = Syntax.AssignmentPattern
+ self.left = left
+ self.right = right
+
+
+class AsyncArrowFunctionExpression(Node):
+ def __init__(self, params, body, expression):
+ self.type = Syntax.ArrowFunctionExpression
+ self.generator = False
+ self.isAsync = True
+ self.params = params
+ self.body = body
+ self.expression = expression
+
+
+class AsyncFunctionDeclaration(Node):
+ def __init__(self, id, params, body):
+ self.type = Syntax.FunctionDeclaration
+ self.generator = False
+ self.expression = False
+ self.isAsync = True
+ self.id = id
+ self.params = params
+ self.body = body
+
+
+class AsyncFunctionExpression(Node):
+ def __init__(self, id, params, body):
+ self.type = Syntax.FunctionExpression
+ self.generator = False
+ self.expression = False
+ self.isAsync = True
+ self.id = id
+ self.params = params
+ self.body = body
+
+
+class AwaitExpression(Node):
+ def __init__(self, argument):
+ self.type = Syntax.AwaitExpression
+ self.argument = argument
+
+
+class BinaryExpression(Node):
+ def __init__(self, operator, left, right):
+ self.type = Syntax.LogicalExpression if operator in ('||', '&&') else Syntax.BinaryExpression
+ self.operator = operator
+ self.left = left
+ self.right = right
+
+
+class BlockStatement(Node):
+ def __init__(self, body):
+ self.type = Syntax.BlockStatement
+ self.body = body
+
+
+class BreakStatement(Node):
+ def __init__(self, label):
+ self.type = Syntax.BreakStatement
+ self.label = label
+
+
+class CallExpression(Node):
+ def __init__(self, callee, args):
+ self.type = Syntax.CallExpression
+ self.callee = callee
+ self.arguments = args
+
+
+class CatchClause(Node):
+ def __init__(self, param, body):
+ self.type = Syntax.CatchClause
+ self.param = param
+ self.body = body
+
+
+class ClassBody(Node):
+ def __init__(self, body):
+ self.type = Syntax.ClassBody
+ self.body = body
+
+
+class ClassDeclaration(Node):
+ def __init__(self, id, superClass, body):
+ self.type = Syntax.ClassDeclaration
+ self.id = id
+ self.superClass = superClass
+ self.body = body
+
+
+class ClassExpression(Node):
+ def __init__(self, id, superClass, body):
+ self.type = Syntax.ClassExpression
+ self.id = id
+ self.superClass = superClass
+ self.body = body
+
+
+class ComputedMemberExpression(Node):
+ def __init__(self, object, property):
+ self.type = Syntax.MemberExpression
+ self.computed = True
+ self.object = object
+ self.property = property
+
+
+class ConditionalExpression(Node):
+ def __init__(self, test, consequent, alternate):
+ self.type = Syntax.ConditionalExpression
+ self.test = test
+ self.consequent = consequent
+ self.alternate = alternate
+
+
+class ContinueStatement(Node):
+ def __init__(self, label):
+ self.type = Syntax.ContinueStatement
+ self.label = label
+
+
+class DebuggerStatement(Node):
+ def __init__(self):
+ self.type = Syntax.DebuggerStatement
+
+
+class Directive(Node):
+ def __init__(self, expression, directive):
+ self.type = Syntax.ExpressionStatement
+ self.expression = expression
+ self.directive = directive
+
+
+class DoWhileStatement(Node):
+ def __init__(self, body, test):
+ self.type = Syntax.DoWhileStatement
+ self.body = body
+ self.test = test
+
+
+class EmptyStatement(Node):
+ def __init__(self):
+ self.type = Syntax.EmptyStatement
+
+
+class ExportAllDeclaration(Node):
+ def __init__(self, source):
+ self.type = Syntax.ExportAllDeclaration
+ self.source = source
+
+
+class ExportDefaultDeclaration(Node):
+ def __init__(self, declaration):
+ self.type = Syntax.ExportDefaultDeclaration
+ self.declaration = declaration
+
+
+class ExportNamedDeclaration(Node):
+ def __init__(self, declaration, specifiers, source):
+ self.type = Syntax.ExportNamedDeclaration
+ self.declaration = declaration
+ self.specifiers = specifiers
+ self.source = source
+
+
+class ExportSpecifier(Node):
+ def __init__(self, local, exported):
+ self.type = Syntax.ExportSpecifier
+ self.exported = exported
+ self.local = local
+
+
+class ExportDefaultSpecifier(Node):
+ def __init__(self, local):
+ self.type = Syntax.ExportDefaultSpecifier
+ self.local = local
+
+
+class ExpressionStatement(Node):
+ def __init__(self, expression):
+ self.type = Syntax.ExpressionStatement
+ self.expression = expression
+
+
+class ForInStatement(Node):
+ def __init__(self, left, right, body):
+ self.type = Syntax.ForInStatement
+ self.each = False
+ self.left = left
+ self.right = right
+ self.body = body
+
+
+class ForOfStatement(Node):
+ def __init__(self, left, right, body):
+ self.type = Syntax.ForOfStatement
+ self.left = left
+ self.right = right
+ self.body = body
+
+
+class ForStatement(Node):
+ def __init__(self, init, test, update, body):
+ self.type = Syntax.ForStatement
+ self.init = init
+ self.test = test
+ self.update = update
+ self.body = body
+
+
+class FunctionDeclaration(Node):
+ def __init__(self, id, params, body, generator):
+ self.type = Syntax.FunctionDeclaration
+ self.expression = False
+ self.isAsync = False
+ self.id = id
+ self.params = params
+ self.body = body
+ self.generator = generator
+
+
+class FunctionExpression(Node):
+ def __init__(self, id, params, body, generator):
+ self.type = Syntax.FunctionExpression
+ self.expression = False
+ self.isAsync = False
+ self.id = id
+ self.params = params
+ self.body = body
+ self.generator = generator
+
+
+class Identifier(Node):
+ def __init__(self, name):
+ self.type = Syntax.Identifier
+ self.name = name
+
+
+class IfStatement(Node):
+ def __init__(self, test, consequent, alternate):
+ self.type = Syntax.IfStatement
+ self.test = test
+ self.consequent = consequent
+ self.alternate = alternate
+
+
+class Import(Node):
+ def __init__(self):
+ self.type = Syntax.Import
+
+
+class ImportDeclaration(Node):
+ def __init__(self, specifiers, source):
+ self.type = Syntax.ImportDeclaration
+ self.specifiers = specifiers
+ self.source = source
+
+
+class ImportDefaultSpecifier(Node):
+ def __init__(self, local):
+ self.type = Syntax.ImportDefaultSpecifier
+ self.local = local
+
+
+class ImportNamespaceSpecifier(Node):
+ def __init__(self, local):
+ self.type = Syntax.ImportNamespaceSpecifier
+ self.local = local
+
+
+class ImportSpecifier(Node):
+ def __init__(self, local, imported):
+ self.type = Syntax.ImportSpecifier
+ self.local = local
+ self.imported = imported
+
+
+class LabeledStatement(Node):
+ def __init__(self, label, body):
+ self.type = Syntax.LabeledStatement
+ self.label = label
+ self.body = body
+
+
+class Literal(Node):
+ def __init__(self, value, raw):
+ self.type = Syntax.Literal
+ self.value = value
+ self.raw = raw
+
+
+class MetaProperty(Node):
+ def __init__(self, meta, property):
+ self.type = Syntax.MetaProperty
+ self.meta = meta
+ self.property = property
+
+
+class MethodDefinition(Node):
+ def __init__(self, key, computed, value, kind, isStatic):
+ self.type = Syntax.MethodDefinition
+ self.key = key
+ self.computed = computed
+ self.value = value
+ self.kind = kind
+ self.static = isStatic
+
+
+class FieldDefinition(Node):
+ def __init__(self, key, computed, value, kind, isStatic):
+ self.type = Syntax.FieldDefinition
+ self.key = key
+ self.computed = computed
+ self.value = value
+ self.kind = kind
+ self.static = isStatic
+
+
+class Module(Node):
+ def __init__(self, body):
+ self.type = Syntax.Program
+ self.sourceType = 'module'
+ self.body = body
+
+
+class NewExpression(Node):
+ def __init__(self, callee, args):
+ self.type = Syntax.NewExpression
+ self.callee = callee
+ self.arguments = args
+
+
+class ObjectExpression(Node):
+ def __init__(self, properties):
+ self.type = Syntax.ObjectExpression
+ self.properties = properties
+
+
+class ObjectPattern(Node):
+ def __init__(self, properties):
+ self.type = Syntax.ObjectPattern
+ self.properties = properties
+
+
+class Property(Node):
+ def __init__(self, kind, key, computed, value, method, shorthand):
+ self.type = Syntax.Property
+ self.key = key
+ self.computed = computed
+ self.value = value
+ self.kind = kind
+ self.method = method
+ self.shorthand = shorthand
+
+
+class RegexLiteral(Node):
+ def __init__(self, value, raw, pattern, flags):
+ self.type = Syntax.Literal
+ self.value = value
+ self.raw = raw
+ self.regex = RegExp(
+ pattern=pattern,
+ flags=flags,
+ )
+
+
+class RestElement(Node):
+ def __init__(self, argument):
+ self.type = Syntax.RestElement
+ self.argument = argument
+
+
+class ReturnStatement(Node):
+ def __init__(self, argument):
+ self.type = Syntax.ReturnStatement
+ self.argument = argument
+
+
+class Script(Node):
+ def __init__(self, body):
+ self.type = Syntax.Program
+ self.sourceType = 'script'
+ self.body = body
+
+
+class SequenceExpression(Node):
+ def __init__(self, expressions):
+ self.type = Syntax.SequenceExpression
+ self.expressions = expressions
+
+
+class SpreadElement(Node):
+ def __init__(self, argument):
+ self.type = Syntax.SpreadElement
+ self.argument = argument
+
+
+class StaticMemberExpression(Node):
+ def __init__(self, object, property):
+ self.type = Syntax.MemberExpression
+ self.computed = False
+ self.object = object
+ self.property = property
+
+
+class Super(Node):
+ def __init__(self):
+ self.type = Syntax.Super
+
+
+class SwitchCase(Node):
+ def __init__(self, test, consequent):
+ self.type = Syntax.SwitchCase
+ self.test = test
+ self.consequent = consequent
+
+
+class SwitchStatement(Node):
+ def __init__(self, discriminant, cases):
+ self.type = Syntax.SwitchStatement
+ self.discriminant = discriminant
+ self.cases = cases
+
+
+class TaggedTemplateExpression(Node):
+ def __init__(self, tag, quasi):
+ self.type = Syntax.TaggedTemplateExpression
+ self.tag = tag
+ self.quasi = quasi
+
+
+class TemplateElement(Node):
+ class Value(Object):
+ def __init__(self, raw, cooked):
+ self.raw = raw
+ self.cooked = cooked
+
+ def __init__(self, raw, cooked, tail):
+ self.type = Syntax.TemplateElement
+ self.value = TemplateElement.Value(raw, cooked)
+ self.tail = tail
+
+
+class TemplateLiteral(Node):
+ def __init__(self, quasis, expressions):
+ self.type = Syntax.TemplateLiteral
+ self.quasis = quasis
+ self.expressions = expressions
+
+
+class ThisExpression(Node):
+ def __init__(self):
+ self.type = Syntax.ThisExpression
+
+
+class ThrowStatement(Node):
+ def __init__(self, argument):
+ self.type = Syntax.ThrowStatement
+ self.argument = argument
+
+
+class TryStatement(Node):
+ def __init__(self, block, handler, finalizer):
+ self.type = Syntax.TryStatement
+ self.block = block
+ self.handler = handler
+ self.finalizer = finalizer
+
+
+class UnaryExpression(Node):
+ def __init__(self, operator, argument):
+ self.type = Syntax.UnaryExpression
+ self.prefix = True
+ self.operator = operator
+ self.argument = argument
+
+
+class UpdateExpression(Node):
+ def __init__(self, operator, argument, prefix):
+ self.type = Syntax.UpdateExpression
+ self.operator = operator
+ self.argument = argument
+ self.prefix = prefix
+
+
+class VariableDeclaration(Node):
+ def __init__(self, declarations, kind):
+ self.type = Syntax.VariableDeclaration
+ self.declarations = declarations
+ self.kind = kind
+
+
+class VariableDeclarator(Node):
+ def __init__(self, id, init):
+ self.type = Syntax.VariableDeclarator
+ self.id = id
+ self.init = init
+
+
+class WhileStatement(Node):
+ def __init__(self, test, body):
+ self.type = Syntax.WhileStatement
+ self.test = test
+ self.body = body
+
+
+class WithStatement(Node):
+ def __init__(self, object, body):
+ self.type = Syntax.WithStatement
+ self.object = object
+ self.body = body
+
+
+class YieldExpression(Node):
+ def __init__(self, argument, delegate):
+ self.type = Syntax.YieldExpression
+ self.argument = argument
+ self.delegate = delegate
+
+
+class ArrowParameterPlaceHolder(Node):
+ def __init__(self, params):
+ self.type = Syntax.ArrowParameterPlaceHolder
+ self.params = params
+ self.isAsync = False
+
+
+class AsyncArrowParameterPlaceHolder(Node):
+ def __init__(self, params):
+ self.type = Syntax.ArrowParameterPlaceHolder
+ self.params = params
+ self.isAsync = True
+
+
+class BlockComment(Node):
+ def __init__(self, value):
+ self.type = Syntax.BlockComment
+ self.value = value
+
+
+class LineComment(Node):
+ def __init__(self, value):
+ self.type = Syntax.LineComment
+ self.value = value
diff --git a/third_party/python/esprima/esprima/objects.py b/third_party/python/esprima/esprima/objects.py
new file mode 100644
index 0000000000..a8acca1b63
--- /dev/null
+++ b/third_party/python/esprima/esprima/objects.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, unicode_literals
+
+
+def toDict(value):
+ from .visitor import ToDictVisitor
+ return ToDictVisitor().visit(value)
+
+
+class Array(list):
+ pass
+
+
+class Object(object):
+ def toDict(self):
+ from .visitor import ToDictVisitor
+ return ToDictVisitor().visit(self)
+
+ def __repr__(self):
+ from .visitor import ReprVisitor
+ return ReprVisitor().visit(self)
+
+ def __getattr__(self, name):
+ return None
diff --git a/third_party/python/esprima/esprima/parser.py b/third_party/python/esprima/esprima/parser.py
new file mode 100644
index 0000000000..2309e7b6fb
--- /dev/null
+++ b/third_party/python/esprima/esprima/parser.py
@@ -0,0 +1,3104 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, unicode_literals
+
+from .objects import Object
+from .compat import basestring, unicode
+from .utils import format
+from .error_handler import ErrorHandler
+from .messages import Messages
+from .scanner import RawToken, Scanner, SourceLocation, Position, RegExp
+from .token import Token, TokenName
+from .syntax import Syntax
+from . import nodes as Node
+
+
+class Value(object):
+ def __init__(self, value):
+ self.value = value
+
+
+class Params(object):
+ def __init__(self, simple=None, message=None, stricted=None, firstRestricted=None, inFor=None, paramSet=None, params=None, get=None):
+ self.simple = simple
+ self.message = message
+ self.stricted = stricted
+ self.firstRestricted = firstRestricted
+ self.inFor = inFor
+ self.paramSet = paramSet
+ self.params = params
+ self.get = get
+
+
+class Config(Object):
+ def __init__(self, range=False, loc=False, source=None, tokens=False, comment=False, tolerant=False, **options):
+ self.range = range
+ self.loc = loc
+ self.source = source
+ self.tokens = tokens
+ self.comment = comment
+ self.tolerant = tolerant
+ for k, v in options.items():
+ setattr(self, k, v)
+
+
+class Context(object):
+ def __init__(self, isModule=False, allowAwait=False, allowIn=True, allowStrictDirective=True, allowYield=True, firstCoverInitializedNameError=None, isAssignmentTarget=False, isBindingElement=False, inFunctionBody=False, inIteration=False, inSwitch=False, labelSet=None, strict=False):
+ self.isModule = isModule
+ self.allowAwait = allowAwait
+ self.allowIn = allowIn
+ self.allowStrictDirective = allowStrictDirective
+ self.allowYield = allowYield
+ self.firstCoverInitializedNameError = firstCoverInitializedNameError
+ self.isAssignmentTarget = isAssignmentTarget
+ self.isBindingElement = isBindingElement
+ self.inFunctionBody = inFunctionBody
+ self.inIteration = inIteration
+ self.inSwitch = inSwitch
+ self.labelSet = {} if labelSet is None else labelSet
+ self.strict = strict
+
+
+class Marker(object):
+ def __init__(self, index=None, line=None, column=None):
+ self.index = index
+ self.line = line
+ self.column = column
+
+
+class TokenEntry(Object):
+ def __init__(self, type=None, value=None, regex=None, range=None, loc=None):
+ self.type = type
+ self.value = value
+ self.regex = regex
+ self.range = range
+ self.loc = loc
+
+
+class Parser(object):
+ def __init__(self, code, options={}, delegate=None):
+ self.config = Config(**options)
+
+ self.delegate = delegate
+
+ self.errorHandler = ErrorHandler()
+ self.errorHandler.tolerant = self.config.tolerant
+ self.scanner = Scanner(code, self.errorHandler)
+ self.scanner.trackComment = self.config.comment
+
+ self.operatorPrecedence = {
+ '||': 1,
+ '&&': 2,
+ '|': 3,
+ '^': 4,
+ '&': 5,
+ '==': 6,
+ '!=': 6,
+ '===': 6,
+ '!==': 6,
+ '<': 7,
+ '>': 7,
+ '<=': 7,
+ '>=': 7,
+ 'instanceof': 7,
+ 'in': 7,
+ '<<': 8,
+ '>>': 8,
+ '>>>': 8,
+ '+': 9,
+ '-': 9,
+ '*': 11,
+ '/': 11,
+ '%': 11,
+ }
+
+ self.lookahead = RawToken(
+ type=Token.EOF,
+ value='',
+ lineNumber=self.scanner.lineNumber,
+ lineStart=0,
+ start=0,
+ end=0
+ )
+ self.hasLineTerminator = False
+
+ self.context = Context(
+ isModule=False,
+ allowAwait=False,
+ allowIn=True,
+ allowStrictDirective=True,
+ allowYield=True,
+ firstCoverInitializedNameError=None,
+ isAssignmentTarget=False,
+ isBindingElement=False,
+ inFunctionBody=False,
+ inIteration=False,
+ inSwitch=False,
+ labelSet={},
+ strict=False
+ )
+ self.tokens = []
+
+ self.startMarker = Marker(
+ index=0,
+ line=self.scanner.lineNumber,
+ column=0
+ )
+ self.lastMarker = Marker(
+ index=0,
+ line=self.scanner.lineNumber,
+ column=0
+ )
+ self.nextToken()
+ self.lastMarker = Marker(
+ index=self.scanner.index,
+ line=self.scanner.lineNumber,
+ column=self.scanner.index - self.scanner.lineStart
+ )
+
+ def throwError(self, messageFormat, *args):
+ msg = format(messageFormat, *args)
+ index = self.lastMarker.index
+ line = self.lastMarker.line
+ column = self.lastMarker.column + 1
+ raise self.errorHandler.createError(index, line, column, msg)
+
+ def tolerateError(self, messageFormat, *args):
+ msg = format(messageFormat, *args)
+ index = self.lastMarker.index
+ line = self.scanner.lineNumber
+ column = self.lastMarker.column + 1
+ self.errorHandler.tolerateError(index, line, column, msg)
+
+ # Throw an exception because of the token.
+
+ def unexpectedTokenError(self, token=None, message=None):
+ msg = message or Messages.UnexpectedToken
+ if token:
+ if not message:
+ typ = token.type
+ if typ is Token.EOF:
+ msg = Messages.UnexpectedEOS
+ elif typ is Token.Identifier:
+ msg = Messages.UnexpectedIdentifier
+ elif typ is Token.NumericLiteral:
+ msg = Messages.UnexpectedNumber
+ elif typ is Token.StringLiteral:
+ msg = Messages.UnexpectedString
+ elif typ is Token.Template:
+ msg = Messages.UnexpectedTemplate
+ elif typ is Token.Keyword:
+ if self.scanner.isFutureReservedWord(token.value):
+ msg = Messages.UnexpectedReserved
+ elif self.context.strict and self.scanner.isStrictModeReservedWord(token.value):
+ msg = Messages.StrictReservedWord
+ else:
+ msg = Messages.UnexpectedToken
+ value = token.value
+ else:
+ value = 'ILLEGAL'
+
+ msg = msg.replace('%0', unicode(value), 1)
+
+ if token and isinstance(token.lineNumber, int):
+ index = token.start
+ line = token.lineNumber
+ lastMarkerLineStart = self.lastMarker.index - self.lastMarker.column
+ column = token.start - lastMarkerLineStart + 1
+ return self.errorHandler.createError(index, line, column, msg)
+ else:
+ index = self.lastMarker.index
+ line = self.lastMarker.line
+ column = self.lastMarker.column + 1
+ return self.errorHandler.createError(index, line, column, msg)
+
+ def throwUnexpectedToken(self, token=None, message=None):
+ raise self.unexpectedTokenError(token, message)
+
+ def tolerateUnexpectedToken(self, token=None, message=None):
+ self.errorHandler.tolerate(self.unexpectedTokenError(token, message))
+
+ def collectComments(self):
+ if not self.config.comment:
+ self.scanner.scanComments()
+ else:
+ comments = self.scanner.scanComments()
+ if comments:
+ for e in comments:
+ if e.multiLine:
+ node = Node.BlockComment(self.scanner.source[e.slice[0]:e.slice[1]])
+ else:
+ node = Node.LineComment(self.scanner.source[e.slice[0]:e.slice[1]])
+ if self.config.range:
+ node.range = e.range
+ if self.config.loc:
+ node.loc = e.loc
+ if self.delegate:
+ metadata = SourceLocation(
+ start=Position(
+ line=e.loc.start.line,
+ column=e.loc.start.column,
+ offset=e.range[0],
+ ),
+ end=Position(
+ line=e.loc.end.line,
+ column=e.loc.end.column,
+ offset=e.range[1],
+ )
+ )
+ new_node = self.delegate(node, metadata)
+ if new_node is not None:
+ node = new_node
+
+ # From internal representation to an external structure
+
+ def getTokenRaw(self, token):
+ return self.scanner.source[token.start:token.end]
+
+ def convertToken(self, token):
+ t = TokenEntry(
+ type=TokenName[token.type],
+ value=self.getTokenRaw(token),
+ )
+ if self.config.range:
+ t.range = [token.start, token.end]
+ if self.config.loc:
+ t.loc = SourceLocation(
+ start=Position(
+ line=self.startMarker.line,
+ column=self.startMarker.column,
+ ),
+ end=Position(
+ line=self.scanner.lineNumber,
+ column=self.scanner.index - self.scanner.lineStart,
+ ),
+ )
+ if token.type is Token.RegularExpression:
+ t.regex = RegExp(
+ pattern=token.pattern,
+ flags=token.flags,
+ )
+
+ return t
+
+ def nextToken(self):
+ token = self.lookahead
+
+ self.lastMarker.index = self.scanner.index
+ self.lastMarker.line = self.scanner.lineNumber
+ self.lastMarker.column = self.scanner.index - self.scanner.lineStart
+
+ self.collectComments()
+
+ if self.scanner.index != self.startMarker.index:
+ self.startMarker.index = self.scanner.index
+ self.startMarker.line = self.scanner.lineNumber
+ self.startMarker.column = self.scanner.index - self.scanner.lineStart
+
+ next = self.scanner.lex()
+ self.hasLineTerminator = token.lineNumber != next.lineNumber
+
+ if next and self.context.strict and next.type is Token.Identifier:
+ if self.scanner.isStrictModeReservedWord(next.value):
+ next.type = Token.Keyword
+ self.lookahead = next
+
+ if self.config.tokens and next.type is not Token.EOF:
+ self.tokens.append(self.convertToken(next))
+
+ return token
+
+ def nextRegexToken(self):
+ self.collectComments()
+
+ token = self.scanner.scanRegExp()
+ if self.config.tokens:
+ # Pop the previous token, '/' or '/='
+ # self is added from the lookahead token.
+ self.tokens.pop()
+
+ self.tokens.append(self.convertToken(token))
+
+ # Prime the next lookahead.
+ self.lookahead = token
+ self.nextToken()
+
+ return token
+
+ def createNode(self):
+ return Marker(
+ index=self.startMarker.index,
+ line=self.startMarker.line,
+ column=self.startMarker.column,
+ )
+
+ def startNode(self, token, lastLineStart=0):
+ column = token.start - token.lineStart
+ line = token.lineNumber
+ if column < 0:
+ column += lastLineStart
+ line -= 1
+
+ return Marker(
+ index=token.start,
+ line=line,
+ column=column,
+ )
+
+ def finalize(self, marker, node):
+ if self.config.range:
+ node.range = [marker.index, self.lastMarker.index]
+
+ if self.config.loc:
+ node.loc = SourceLocation(
+ start=Position(
+ line=marker.line,
+ column=marker.column,
+ ),
+ end=Position(
+ line=self.lastMarker.line,
+ column=self.lastMarker.column,
+ ),
+ )
+ if self.config.source:
+ node.loc.source = self.config.source
+
+ if self.delegate:
+ metadata = SourceLocation(
+ start=Position(
+ line=marker.line,
+ column=marker.column,
+ offset=marker.index,
+ ),
+ end=Position(
+ line=self.lastMarker.line,
+ column=self.lastMarker.column,
+ offset=self.lastMarker.index,
+ )
+ )
+ new_node = self.delegate(node, metadata)
+ if new_node is not None:
+ node = new_node
+
+ return node
+
+ # Expect the next token to match the specified punctuator.
+ # If not, an exception will be thrown.
+
+ def expect(self, value):
+ token = self.nextToken()
+ if token.type is not Token.Punctuator or token.value != value:
+ self.throwUnexpectedToken(token)
+
+ # Quietly expect a comma when in tolerant mode, otherwise delegates to expect().
+
+ def expectCommaSeparator(self):
+ if self.config.tolerant:
+ token = self.lookahead
+ if token.type is Token.Punctuator and token.value == ',':
+ self.nextToken()
+ elif token.type is Token.Punctuator and token.value == ';':
+ self.nextToken()
+ self.tolerateUnexpectedToken(token)
+ else:
+ self.tolerateUnexpectedToken(token, Messages.UnexpectedToken)
+ else:
+ self.expect(',')
+
+ # Expect the next token to match the specified keyword.
+ # If not, an exception will be thrown.
+
+ def expectKeyword(self, keyword):
+ token = self.nextToken()
+ if token.type is not Token.Keyword or token.value != keyword:
+ self.throwUnexpectedToken(token)
+
+ # Return true if the next token matches the specified punctuator.
+
+ def match(self, *value):
+ return self.lookahead.type is Token.Punctuator and self.lookahead.value in value
+
+ # Return true if the next token matches the specified keyword
+
+ def matchKeyword(self, *keyword):
+ return self.lookahead.type is Token.Keyword and self.lookahead.value in keyword
+
+ # Return true if the next token matches the specified contextual keyword
+ # (where an identifier is sometimes a keyword depending on the context)
+
+ def matchContextualKeyword(self, *keyword):
+ return self.lookahead.type is Token.Identifier and self.lookahead.value in keyword
+
+ # Return true if the next token is an assignment operator
+
+ def matchAssign(self):
+ if self.lookahead.type is not Token.Punctuator:
+ return False
+
+ op = self.lookahead.value
+ return op in ('=', '*=', '**=', '/=', '%=', '+=', '-=', '<<=', '>>=', '>>>=', '&=', '^=', '|=')
+
+ # Cover grammar support.
+ #
+ # When an assignment expression position starts with an left parenthesis, the determination of the type
+ # of the syntax is to be deferred arbitrarily long until the end of the parentheses pair (plus a lookahead)
+ # or the first comma. This situation also defers the determination of all the expressions nested in the pair.
+ #
+ # There are three productions that can be parsed in a parentheses pair that needs to be determined
+ # after the outermost pair is closed. They are:
+ #
+ # 1. AssignmentExpression
+ # 2. BindingElements
+ # 3. AssignmentTargets
+ #
+ # In order to avoid exponential backtracking, we use two flags to denote if the production can be
+ # binding element or assignment target.
+ #
+ # The three productions have the relationship:
+ #
+ # BindingElements ⊆ AssignmentTargets ⊆ AssignmentExpression
+ #
+ # with a single exception that CoverInitializedName when used directly in an Expression, generates
+ # an early error. Therefore, we need the third state, firstCoverInitializedNameError, to track the
+ # first usage of CoverInitializedName and report it when we reached the end of the parentheses pair.
+ #
+ # isolateCoverGrammar function runs the given parser function with a new cover grammar context, and it does not
+ # effect the current flags. This means the production the parser parses is only used as an expression. Therefore
+ # the CoverInitializedName check is conducted.
+ #
+ # inheritCoverGrammar function runs the given parse function with a new cover grammar context, and it propagates
+ # the flags outside of the parser. This means the production the parser parses is used as a part of a potential
+ # pattern. The CoverInitializedName check is deferred.
+
+ def isolateCoverGrammar(self, parseFunction):
+ previousIsBindingElement = self.context.isBindingElement
+ previousIsAssignmentTarget = self.context.isAssignmentTarget
+ previousFirstCoverInitializedNameError = self.context.firstCoverInitializedNameError
+
+ self.context.isBindingElement = True
+ self.context.isAssignmentTarget = True
+ self.context.firstCoverInitializedNameError = None
+
+ result = parseFunction()
+ if self.context.firstCoverInitializedNameError is not None:
+ self.throwUnexpectedToken(self.context.firstCoverInitializedNameError)
+
+ self.context.isBindingElement = previousIsBindingElement
+ self.context.isAssignmentTarget = previousIsAssignmentTarget
+ self.context.firstCoverInitializedNameError = previousFirstCoverInitializedNameError
+
+ return result
+
+ def inheritCoverGrammar(self, parseFunction):
+ previousIsBindingElement = self.context.isBindingElement
+ previousIsAssignmentTarget = self.context.isAssignmentTarget
+ previousFirstCoverInitializedNameError = self.context.firstCoverInitializedNameError
+
+ self.context.isBindingElement = True
+ self.context.isAssignmentTarget = True
+ self.context.firstCoverInitializedNameError = None
+
+ result = parseFunction()
+
+ self.context.isBindingElement = self.context.isBindingElement and previousIsBindingElement
+ self.context.isAssignmentTarget = self.context.isAssignmentTarget and previousIsAssignmentTarget
+ self.context.firstCoverInitializedNameError = previousFirstCoverInitializedNameError or self.context.firstCoverInitializedNameError
+
+ return result
+
+ def consumeSemicolon(self):
+ if self.match(';'):
+ self.nextToken()
+ elif not self.hasLineTerminator:
+ if self.lookahead.type is not Token.EOF and not self.match('}'):
+ self.throwUnexpectedToken(self.lookahead)
+ self.lastMarker.index = self.startMarker.index
+ self.lastMarker.line = self.startMarker.line
+ self.lastMarker.column = self.startMarker.column
+
+ # https://tc39.github.io/ecma262/#sec-primary-expression
+
+ def parsePrimaryExpression(self):
+ node = self.createNode()
+
+ typ = self.lookahead.type
+ if typ is Token.Identifier:
+ if (self.context.isModule or self.context.allowAwait) and self.lookahead.value == 'await':
+ self.tolerateUnexpectedToken(self.lookahead)
+ expr = self.parseFunctionExpression() if self.matchAsyncFunction() else self.finalize(node, Node.Identifier(self.nextToken().value))
+
+ elif typ in (
+ Token.NumericLiteral,
+ Token.StringLiteral,
+ ):
+ if self.context.strict and self.lookahead.octal:
+ self.tolerateUnexpectedToken(self.lookahead, Messages.StrictOctalLiteral)
+ self.context.isAssignmentTarget = False
+ self.context.isBindingElement = False
+ token = self.nextToken()
+ raw = self.getTokenRaw(token)
+ expr = self.finalize(node, Node.Literal(token.value, raw))
+
+ elif typ is Token.BooleanLiteral:
+ self.context.isAssignmentTarget = False
+ self.context.isBindingElement = False
+ token = self.nextToken()
+ raw = self.getTokenRaw(token)
+ expr = self.finalize(node, Node.Literal(token.value == 'true', raw))
+
+ elif typ is Token.NullLiteral:
+ self.context.isAssignmentTarget = False
+ self.context.isBindingElement = False
+ token = self.nextToken()
+ raw = self.getTokenRaw(token)
+ expr = self.finalize(node, Node.Literal(None, raw))
+
+ elif typ is Token.Template:
+ expr = self.parseTemplateLiteral()
+
+ elif typ is Token.Punctuator:
+ value = self.lookahead.value
+ if value == '(':
+ self.context.isBindingElement = False
+ expr = self.inheritCoverGrammar(self.parseGroupExpression)
+ elif value == '[':
+ expr = self.inheritCoverGrammar(self.parseArrayInitializer)
+ elif value == '{':
+ expr = self.inheritCoverGrammar(self.parseObjectInitializer)
+ elif value in ('/', '/='):
+ self.context.isAssignmentTarget = False
+ self.context.isBindingElement = False
+ self.scanner.index = self.startMarker.index
+ token = self.nextRegexToken()
+ raw = self.getTokenRaw(token)
+ expr = self.finalize(node, Node.RegexLiteral(token.regex, raw, token.pattern, token.flags))
+ else:
+ expr = self.throwUnexpectedToken(self.nextToken())
+
+ elif typ is Token.Keyword:
+ if not self.context.strict and self.context.allowYield and self.matchKeyword('yield'):
+ expr = self.parseIdentifierName()
+ elif not self.context.strict and self.matchKeyword('let'):
+ expr = self.finalize(node, Node.Identifier(self.nextToken().value))
+ else:
+ self.context.isAssignmentTarget = False
+ self.context.isBindingElement = False
+ if self.matchKeyword('function'):
+ expr = self.parseFunctionExpression()
+ elif self.matchKeyword('this'):
+ self.nextToken()
+ expr = self.finalize(node, Node.ThisExpression())
+ elif self.matchKeyword('class'):
+ expr = self.parseClassExpression()
+ elif self.matchImportCall():
+ expr = self.parseImportCall()
+ else:
+ expr = self.throwUnexpectedToken(self.nextToken())
+
+ else:
+ expr = self.throwUnexpectedToken(self.nextToken())
+
+ return expr
+
+ # https://tc39.github.io/ecma262/#sec-array-initializer
+
+ def parseSpreadElement(self):
+ node = self.createNode()
+ self.expect('...')
+ arg = self.inheritCoverGrammar(self.parseAssignmentExpression)
+ return self.finalize(node, Node.SpreadElement(arg))
+
+ def parseArrayInitializer(self):
+ node = self.createNode()
+ elements = []
+
+ self.expect('[')
+ while not self.match(']'):
+ if self.match(','):
+ self.nextToken()
+ elements.append(None)
+ elif self.match('...'):
+ element = self.parseSpreadElement()
+ if not self.match(']'):
+ self.context.isAssignmentTarget = False
+ self.context.isBindingElement = False
+ self.expect(',')
+ elements.append(element)
+ else:
+ elements.append(self.inheritCoverGrammar(self.parseAssignmentExpression))
+ if not self.match(']'):
+ self.expect(',')
+ self.expect(']')
+
+ return self.finalize(node, Node.ArrayExpression(elements))
+
+ # https://tc39.github.io/ecma262/#sec-object-initializer
+
+ def parsePropertyMethod(self, params):
+ self.context.isAssignmentTarget = False
+ self.context.isBindingElement = False
+
+ previousStrict = self.context.strict
+ previousAllowStrictDirective = self.context.allowStrictDirective
+ self.context.allowStrictDirective = params.simple
+ body = self.isolateCoverGrammar(self.parseFunctionSourceElements)
+ if self.context.strict and params.firstRestricted:
+ self.tolerateUnexpectedToken(params.firstRestricted, params.message)
+ if self.context.strict and params.stricted:
+ self.tolerateUnexpectedToken(params.stricted, params.message)
+ self.context.strict = previousStrict
+ self.context.allowStrictDirective = previousAllowStrictDirective
+
+ return body
+
+ def parsePropertyMethodFunction(self):
+ isGenerator = False
+ node = self.createNode()
+
+ previousAllowYield = self.context.allowYield
+ self.context.allowYield = True
+ params = self.parseFormalParameters()
+ method = self.parsePropertyMethod(params)
+ self.context.allowYield = previousAllowYield
+
+ return self.finalize(node, Node.FunctionExpression(None, params.params, method, isGenerator))
+
+ def parsePropertyMethodAsyncFunction(self):
+ node = self.createNode()
+
+ previousAllowYield = self.context.allowYield
+ previousAwait = self.context.allowAwait
+ self.context.allowYield = False
+ self.context.allowAwait = True
+ params = self.parseFormalParameters()
+ method = self.parsePropertyMethod(params)
+ self.context.allowYield = previousAllowYield
+ self.context.allowAwait = previousAwait
+
+ return self.finalize(node, Node.AsyncFunctionExpression(None, params.params, method))
+
+ def parseObjectPropertyKey(self):
+ node = self.createNode()
+ token = self.nextToken()
+
+ typ = token.type
+ if typ in (
+ Token.StringLiteral,
+ Token.NumericLiteral,
+ ):
+ if self.context.strict and token.octal:
+ self.tolerateUnexpectedToken(token, Messages.StrictOctalLiteral)
+ raw = self.getTokenRaw(token)
+ key = self.finalize(node, Node.Literal(token.value, raw))
+
+ elif typ in (
+ Token.Identifier,
+ Token.BooleanLiteral,
+ Token.NullLiteral,
+ Token.Keyword,
+ ):
+ key = self.finalize(node, Node.Identifier(token.value))
+
+ elif typ is Token.Punctuator:
+ if token.value == '[':
+ key = self.isolateCoverGrammar(self.parseAssignmentExpression)
+ self.expect(']')
+ else:
+ key = self.throwUnexpectedToken(token)
+
+ else:
+ key = self.throwUnexpectedToken(token)
+
+ return key
+
+ def isPropertyKey(self, key, value):
+ return (
+ (key.type is Syntax.Identifier and key.name == value) or
+ (key.type is Syntax.Literal and key.value == value)
+ )
+
+ def parseObjectProperty(self, hasProto):
+ node = self.createNode()
+ token = self.lookahead
+
+ key = None
+ value = None
+
+ computed = False
+ method = False
+ shorthand = False
+ isAsync = False
+
+ if token.type is Token.Identifier:
+ id = token.value
+ self.nextToken()
+ computed = self.match('[')
+ isAsync = not self.hasLineTerminator and (id == 'async') and not (self.match(':', '(', '*', ','))
+ key = self.parseObjectPropertyKey() if isAsync else self.finalize(node, Node.Identifier(id))
+ elif self.match('*'):
+ self.nextToken()
+ else:
+ computed = self.match('[')
+ key = self.parseObjectPropertyKey()
+
+ lookaheadPropertyKey = self.qualifiedPropertyName(self.lookahead)
+ if token.type is Token.Identifier and not isAsync and token.value == 'get' and lookaheadPropertyKey:
+ kind = 'get'
+ computed = self.match('[')
+ key = self.parseObjectPropertyKey()
+ self.context.allowYield = False
+ value = self.parseGetterMethod()
+
+ elif token.type is Token.Identifier and not isAsync and token.value == 'set' and lookaheadPropertyKey:
+ kind = 'set'
+ computed = self.match('[')
+ key = self.parseObjectPropertyKey()
+ value = self.parseSetterMethod()
+
+ elif token.type is Token.Punctuator and token.value == '*' and lookaheadPropertyKey:
+ kind = 'init'
+ computed = self.match('[')
+ key = self.parseObjectPropertyKey()
+ value = self.parseGeneratorMethod()
+ method = True
+
+ else:
+ if not key:
+ self.throwUnexpectedToken(self.lookahead)
+
+ kind = 'init'
+ if self.match(':') and not isAsync:
+ if not computed and self.isPropertyKey(key, '__proto__'):
+ if hasProto.value:
+ self.tolerateError(Messages.DuplicateProtoProperty)
+ hasProto.value = True
+ self.nextToken()
+ value = self.inheritCoverGrammar(self.parseAssignmentExpression)
+
+ elif self.match('('):
+ value = self.parsePropertyMethodAsyncFunction() if isAsync else self.parsePropertyMethodFunction()
+ method = True
+
+ elif token.type is Token.Identifier:
+ id = self.finalize(node, Node.Identifier(token.value))
+ if self.match('='):
+ self.context.firstCoverInitializedNameError = self.lookahead
+ self.nextToken()
+ shorthand = True
+ init = self.isolateCoverGrammar(self.parseAssignmentExpression)
+ value = self.finalize(node, Node.AssignmentPattern(id, init))
+ else:
+ shorthand = True
+ value = id
+ else:
+ self.throwUnexpectedToken(self.nextToken())
+
+ return self.finalize(node, Node.Property(kind, key, computed, value, method, shorthand))
+
+ def parseObjectInitializer(self):
+ node = self.createNode()
+
+ self.expect('{')
+ properties = []
+ hasProto = Value(False)
+ while not self.match('}'):
+ properties.append(self.parseSpreadElement() if self.match('...') else self.parseObjectProperty(hasProto))
+ if not self.match('}'):
+ self.expectCommaSeparator()
+ self.expect('}')
+
+ return self.finalize(node, Node.ObjectExpression(properties))
+
+ # https://tc39.github.io/ecma262/#sec-template-literals
+
+ def parseTemplateHead(self):
+ assert self.lookahead.head, 'Template literal must start with a template head'
+
+ node = self.createNode()
+ token = self.nextToken()
+ raw = token.value
+ cooked = token.cooked
+
+ return self.finalize(node, Node.TemplateElement(raw, cooked, token.tail))
+
+ def parseTemplateElement(self):
+ if self.lookahead.type is not Token.Template:
+ self.throwUnexpectedToken()
+
+ node = self.createNode()
+ token = self.nextToken()
+ raw = token.value
+ cooked = token.cooked
+
+ return self.finalize(node, Node.TemplateElement(raw, cooked, token.tail))
+
+ def parseTemplateLiteral(self):
+ node = self.createNode()
+
+ expressions = []
+ quasis = []
+
+ quasi = self.parseTemplateHead()
+ quasis.append(quasi)
+ while not quasi.tail:
+ expressions.append(self.parseExpression())
+ quasi = self.parseTemplateElement()
+ quasis.append(quasi)
+
+ return self.finalize(node, Node.TemplateLiteral(quasis, expressions))
+
+ # https://tc39.github.io/ecma262/#sec-grouping-operator
+
+ def reinterpretExpressionAsPattern(self, expr):
+ typ = expr.type
+ if typ in (
+ Syntax.Identifier,
+ Syntax.MemberExpression,
+ Syntax.RestElement,
+ Syntax.AssignmentPattern,
+ ):
+ pass
+ elif typ is Syntax.SpreadElement:
+ expr.type = Syntax.RestElement
+ self.reinterpretExpressionAsPattern(expr.argument)
+ elif typ is Syntax.ArrayExpression:
+ expr.type = Syntax.ArrayPattern
+ for elem in expr.elements:
+ if elem is not None:
+ self.reinterpretExpressionAsPattern(elem)
+ elif typ is Syntax.ObjectExpression:
+ expr.type = Syntax.ObjectPattern
+ for prop in expr.properties:
+ self.reinterpretExpressionAsPattern(prop if prop.type is Syntax.SpreadElement else prop.value)
+ elif typ is Syntax.AssignmentExpression:
+ expr.type = Syntax.AssignmentPattern
+ del expr.operator
+ self.reinterpretExpressionAsPattern(expr.left)
+ else:
+ # Allow other node type for tolerant parsing.
+ pass
+
+ def parseGroupExpression(self):
+ self.expect('(')
+ if self.match(')'):
+ self.nextToken()
+ if not self.match('=>'):
+ self.expect('=>')
+ expr = Node.ArrowParameterPlaceHolder([])
+ else:
+ startToken = self.lookahead
+ params = []
+ if self.match('...'):
+ expr = self.parseRestElement(params)
+ self.expect(')')
+ if not self.match('=>'):
+ self.expect('=>')
+ expr = Node.ArrowParameterPlaceHolder([expr])
+ else:
+ arrow = False
+ self.context.isBindingElement = True
+ expr = self.inheritCoverGrammar(self.parseAssignmentExpression)
+
+ if self.match(','):
+ expressions = []
+
+ self.context.isAssignmentTarget = False
+ expressions.append(expr)
+ while self.lookahead.type is not Token.EOF:
+ if not self.match(','):
+ break
+ self.nextToken()
+ if self.match(')'):
+ self.nextToken()
+ for expression in expressions:
+ self.reinterpretExpressionAsPattern(expression)
+ arrow = True
+ expr = Node.ArrowParameterPlaceHolder(expressions)
+ elif self.match('...'):
+ if not self.context.isBindingElement:
+ self.throwUnexpectedToken(self.lookahead)
+ expressions.append(self.parseRestElement(params))
+ self.expect(')')
+ if not self.match('=>'):
+ self.expect('=>')
+ self.context.isBindingElement = False
+ for expression in expressions:
+ self.reinterpretExpressionAsPattern(expression)
+ arrow = True
+ expr = Node.ArrowParameterPlaceHolder(expressions)
+ else:
+ expressions.append(self.inheritCoverGrammar(self.parseAssignmentExpression))
+ if arrow:
+ break
+ if not arrow:
+ expr = self.finalize(self.startNode(startToken), Node.SequenceExpression(expressions))
+
+ if not arrow:
+ self.expect(')')
+ if self.match('=>'):
+ if expr.type is Syntax.Identifier and expr.name == 'yield':
+ arrow = True
+ expr = Node.ArrowParameterPlaceHolder([expr])
+ if not arrow:
+ if not self.context.isBindingElement:
+ self.throwUnexpectedToken(self.lookahead)
+
+ if expr.type is Syntax.SequenceExpression:
+ for expression in expr.expressions:
+ self.reinterpretExpressionAsPattern(expression)
+ else:
+ self.reinterpretExpressionAsPattern(expr)
+
+ if expr.type is Syntax.SequenceExpression:
+ parameters = expr.expressions
+ else:
+ parameters = [expr]
+ expr = Node.ArrowParameterPlaceHolder(parameters)
+ self.context.isBindingElement = False
+
+ return expr
+
+ # https://tc39.github.io/ecma262/#sec-left-hand-side-expressions
+
+ def parseArguments(self):
+ self.expect('(')
+ args = []
+ if not self.match(')'):
+ while True:
+ if self.match('...'):
+ expr = self.parseSpreadElement()
+ else:
+ expr = self.isolateCoverGrammar(self.parseAssignmentExpression)
+ args.append(expr)
+ if self.match(')'):
+ break
+ self.expectCommaSeparator()
+ if self.match(')'):
+ break
+ self.expect(')')
+
+ return args
+
+ def isIdentifierName(self, token):
+ return (
+ token.type is Token.Identifier or
+ token.type is Token.Keyword or
+ token.type is Token.BooleanLiteral or
+ token.type is Token.NullLiteral
+ )
+
+ def parseIdentifierName(self):
+ node = self.createNode()
+ token = self.nextToken()
+ if not self.isIdentifierName(token):
+ self.throwUnexpectedToken(token)
+ return self.finalize(node, Node.Identifier(token.value))
+
+ def parseNewExpression(self):
+ node = self.createNode()
+
+ id = self.parseIdentifierName()
+ assert id.name == 'new', 'New expression must start with `new`'
+
+ if self.match('.'):
+ self.nextToken()
+ if self.lookahead.type is Token.Identifier and self.context.inFunctionBody and self.lookahead.value == 'target':
+ property = self.parseIdentifierName()
+ expr = Node.MetaProperty(id, property)
+ else:
+ self.throwUnexpectedToken(self.lookahead)
+ elif self.matchKeyword('import'):
+ self.throwUnexpectedToken(self.lookahead)
+ else:
+ callee = self.isolateCoverGrammar(self.parseLeftHandSideExpression)
+ args = self.parseArguments() if self.match('(') else []
+ expr = Node.NewExpression(callee, args)
+ self.context.isAssignmentTarget = False
+ self.context.isBindingElement = False
+
+ return self.finalize(node, expr)
+
+ def parseAsyncArgument(self):
+ arg = self.parseAssignmentExpression()
+ self.context.firstCoverInitializedNameError = None
+ return arg
+
+ def parseAsyncArguments(self):
+ self.expect('(')
+ args = []
+ if not self.match(')'):
+ while True:
+ if self.match('...'):
+ expr = self.parseSpreadElement()
+ else:
+ expr = self.isolateCoverGrammar(self.parseAsyncArgument)
+ args.append(expr)
+ if self.match(')'):
+ break
+ self.expectCommaSeparator()
+ if self.match(')'):
+ break
+ self.expect(')')
+
+ return args
+
+ def matchImportCall(self):
+ match = self.matchKeyword('import')
+ if match:
+ state = self.scanner.saveState()
+ self.scanner.scanComments()
+ next = self.scanner.lex()
+ self.scanner.restoreState(state)
+ match = (next.type is Token.Punctuator) and (next.value == '(')
+
+ return match
+
+ def parseImportCall(self):
+ node = self.createNode()
+ self.expectKeyword('import')
+ return self.finalize(node, Node.Import())
+
+ def parseLeftHandSideExpressionAllowCall(self):
+ startToken = self.lookahead
+ maybeAsync = self.matchContextualKeyword('async')
+
+ previousAllowIn = self.context.allowIn
+ self.context.allowIn = True
+
+ if self.matchKeyword('super') and self.context.inFunctionBody:
+ expr = self.createNode()
+ self.nextToken()
+ expr = self.finalize(expr, Node.Super())
+ if not self.match('(') and not self.match('.') and not self.match('['):
+ self.throwUnexpectedToken(self.lookahead)
+ else:
+ expr = self.inheritCoverGrammar(self.parseNewExpression if self.matchKeyword('new') else self.parsePrimaryExpression)
+
+ while True:
+ if self.match('.'):
+ self.context.isBindingElement = False
+ self.context.isAssignmentTarget = True
+ self.expect('.')
+ property = self.parseIdentifierName()
+ expr = self.finalize(self.startNode(startToken), Node.StaticMemberExpression(expr, property))
+
+ elif self.match('('):
+ asyncArrow = maybeAsync and (startToken.lineNumber == self.lookahead.lineNumber)
+ self.context.isBindingElement = False
+ self.context.isAssignmentTarget = False
+ if asyncArrow:
+ args = self.parseAsyncArguments()
+ else:
+ args = self.parseArguments()
+ if expr.type is Syntax.Import and len(args) != 1:
+ self.tolerateError(Messages.BadImportCallArity)
+ expr = self.finalize(self.startNode(startToken), Node.CallExpression(expr, args))
+ if asyncArrow and self.match('=>'):
+ for arg in args:
+ self.reinterpretExpressionAsPattern(arg)
+ expr = Node.AsyncArrowParameterPlaceHolder(args)
+ elif self.match('['):
+ self.context.isBindingElement = False
+ self.context.isAssignmentTarget = True
+ self.expect('[')
+ property = self.isolateCoverGrammar(self.parseExpression)
+ self.expect(']')
+ expr = self.finalize(self.startNode(startToken), Node.ComputedMemberExpression(expr, property))
+
+ elif self.lookahead.type is Token.Template and self.lookahead.head:
+ quasi = self.parseTemplateLiteral()
+ expr = self.finalize(self.startNode(startToken), Node.TaggedTemplateExpression(expr, quasi))
+
+ else:
+ break
+
+ self.context.allowIn = previousAllowIn
+
+ return expr
+
+ def parseSuper(self):
+ node = self.createNode()
+
+ self.expectKeyword('super')
+ if not self.match('[') and not self.match('.'):
+ self.throwUnexpectedToken(self.lookahead)
+
+ return self.finalize(node, Node.Super())
+
+ def parseLeftHandSideExpression(self):
+ assert self.context.allowIn, 'callee of new expression always allow in keyword.'
+
+ node = self.startNode(self.lookahead)
+ if self.matchKeyword('super') and self.context.inFunctionBody:
+ expr = self.parseSuper()
+ else:
+ expr = self.inheritCoverGrammar(self.parseNewExpression if self.matchKeyword('new') else self.parsePrimaryExpression)
+
+ while True:
+ if self.match('['):
+ self.context.isBindingElement = False
+ self.context.isAssignmentTarget = True
+ self.expect('[')
+ property = self.isolateCoverGrammar(self.parseExpression)
+ self.expect(']')
+ expr = self.finalize(node, Node.ComputedMemberExpression(expr, property))
+
+ elif self.match('.'):
+ self.context.isBindingElement = False
+ self.context.isAssignmentTarget = True
+ self.expect('.')
+ property = self.parseIdentifierName()
+ expr = self.finalize(node, Node.StaticMemberExpression(expr, property))
+
+ elif self.lookahead.type is Token.Template and self.lookahead.head:
+ quasi = self.parseTemplateLiteral()
+ expr = self.finalize(node, Node.TaggedTemplateExpression(expr, quasi))
+
+ else:
+ break
+
+ return expr
+
+ # https://tc39.github.io/ecma262/#sec-update-expressions
+
+ def parseUpdateExpression(self):
+ startToken = self.lookahead
+
+ if self.match('++', '--'):
+ node = self.startNode(startToken)
+ token = self.nextToken()
+ expr = self.inheritCoverGrammar(self.parseUnaryExpression)
+ if self.context.strict and expr.type is Syntax.Identifier and self.scanner.isRestrictedWord(expr.name):
+ self.tolerateError(Messages.StrictLHSPrefix)
+ if not self.context.isAssignmentTarget:
+ self.tolerateError(Messages.InvalidLHSInAssignment)
+ prefix = True
+ expr = self.finalize(node, Node.UpdateExpression(token.value, expr, prefix))
+ self.context.isAssignmentTarget = False
+ self.context.isBindingElement = False
+ else:
+ expr = self.inheritCoverGrammar(self.parseLeftHandSideExpressionAllowCall)
+ if not self.hasLineTerminator and self.lookahead.type is Token.Punctuator:
+ if self.match('++', '--'):
+ if self.context.strict and expr.type is Syntax.Identifier and self.scanner.isRestrictedWord(expr.name):
+ self.tolerateError(Messages.StrictLHSPostfix)
+ if not self.context.isAssignmentTarget:
+ self.tolerateError(Messages.InvalidLHSInAssignment)
+ self.context.isAssignmentTarget = False
+ self.context.isBindingElement = False
+ operator = self.nextToken().value
+ prefix = False
+ expr = self.finalize(self.startNode(startToken), Node.UpdateExpression(operator, expr, prefix))
+
+ return expr
+
+ # https://tc39.github.io/ecma262/#sec-unary-operators
+
+ def parseAwaitExpression(self):
+ node = self.createNode()
+ self.nextToken()
+ argument = self.parseUnaryExpression()
+ return self.finalize(node, Node.AwaitExpression(argument))
+
+ def parseUnaryExpression(self):
+ if (
+ self.match('+', '-', '~', '!') or
+ self.matchKeyword('delete', 'void', 'typeof')
+ ):
+ node = self.startNode(self.lookahead)
+ token = self.nextToken()
+ expr = self.inheritCoverGrammar(self.parseUnaryExpression)
+ expr = self.finalize(node, Node.UnaryExpression(token.value, expr))
+ if self.context.strict and expr.operator == 'delete' and expr.argument.type is Syntax.Identifier:
+ self.tolerateError(Messages.StrictDelete)
+ self.context.isAssignmentTarget = False
+ self.context.isBindingElement = False
+ elif self.context.allowAwait and self.matchContextualKeyword('await'):
+ expr = self.parseAwaitExpression()
+ else:
+ expr = self.parseUpdateExpression()
+
+ return expr
+
+ def parseExponentiationExpression(self):
+ startToken = self.lookahead
+
+ expr = self.inheritCoverGrammar(self.parseUnaryExpression)
+ if expr.type is not Syntax.UnaryExpression and self.match('**'):
+ self.nextToken()
+ self.context.isAssignmentTarget = False
+ self.context.isBindingElement = False
+ left = expr
+ right = self.isolateCoverGrammar(self.parseExponentiationExpression)
+ expr = self.finalize(self.startNode(startToken), Node.BinaryExpression('**', left, right))
+
+ return expr
+
+ # https://tc39.github.io/ecma262/#sec-exp-operator
+ # https://tc39.github.io/ecma262/#sec-multiplicative-operators
+ # https://tc39.github.io/ecma262/#sec-additive-operators
+ # https://tc39.github.io/ecma262/#sec-bitwise-shift-operators
+ # https://tc39.github.io/ecma262/#sec-relational-operators
+ # https://tc39.github.io/ecma262/#sec-equality-operators
+ # https://tc39.github.io/ecma262/#sec-binary-bitwise-operators
+ # https://tc39.github.io/ecma262/#sec-binary-logical-operators
+
+ def binaryPrecedence(self, token):
+ op = token.value
+ if token.type is Token.Punctuator:
+ precedence = self.operatorPrecedence.get(op, 0)
+ elif token.type is Token.Keyword:
+ precedence = 7 if (op == 'instanceof' or (self.context.allowIn and op == 'in')) else 0
+ else:
+ precedence = 0
+ return precedence
+
+ def parseBinaryExpression(self):
+ startToken = self.lookahead
+
+ expr = self.inheritCoverGrammar(self.parseExponentiationExpression)
+
+ token = self.lookahead
+ prec = self.binaryPrecedence(token)
+ if prec > 0:
+ self.nextToken()
+
+ self.context.isAssignmentTarget = False
+ self.context.isBindingElement = False
+
+ markers = [startToken, self.lookahead]
+ left = expr
+ right = self.isolateCoverGrammar(self.parseExponentiationExpression)
+
+ stack = [left, token.value, right]
+ precedences = [prec]
+ while True:
+ prec = self.binaryPrecedence(self.lookahead)
+ if prec <= 0:
+ break
+
+ # Reduce: make a binary expression from the three topmost entries.
+ while len(stack) > 2 and prec <= precedences[-1]:
+ right = stack.pop()
+ operator = stack.pop()
+ precedences.pop()
+ left = stack.pop()
+ markers.pop()
+ node = self.startNode(markers[-1])
+ stack.append(self.finalize(node, Node.BinaryExpression(operator, left, right)))
+
+ # Shift.
+ stack.append(self.nextToken().value)
+ precedences.append(prec)
+ markers.append(self.lookahead)
+ stack.append(self.isolateCoverGrammar(self.parseExponentiationExpression))
+
+ # Final reduce to clean-up the stack.
+ i = len(stack) - 1
+ expr = stack[i]
+
+ lastMarker = markers.pop()
+ while i > 1:
+ marker = markers.pop()
+ lastLineStart = lastMarker.lineStart if lastMarker else 0
+ node = self.startNode(marker, lastLineStart)
+ operator = stack[i - 1]
+ expr = self.finalize(node, Node.BinaryExpression(operator, stack[i - 2], expr))
+ i -= 2
+ lastMarker = marker
+
+ return expr
+
+ # https://tc39.github.io/ecma262/#sec-conditional-operator
+
+ def parseConditionalExpression(self):
+ startToken = self.lookahead
+
+ expr = self.inheritCoverGrammar(self.parseBinaryExpression)
+ if self.match('?'):
+ self.nextToken()
+
+ previousAllowIn = self.context.allowIn
+ self.context.allowIn = True
+ consequent = self.isolateCoverGrammar(self.parseAssignmentExpression)
+ self.context.allowIn = previousAllowIn
+
+ self.expect(':')
+ alternate = self.isolateCoverGrammar(self.parseAssignmentExpression)
+
+ expr = self.finalize(self.startNode(startToken), Node.ConditionalExpression(expr, consequent, alternate))
+ self.context.isAssignmentTarget = False
+ self.context.isBindingElement = False
+
+ return expr
+
+ # https://tc39.github.io/ecma262/#sec-assignment-operators
+
+ def checkPatternParam(self, options, param):
+ typ = param.type
+ if typ is Syntax.Identifier:
+ self.validateParam(options, param, param.name)
+ elif typ is Syntax.RestElement:
+ self.checkPatternParam(options, param.argument)
+ elif typ is Syntax.AssignmentPattern:
+ self.checkPatternParam(options, param.left)
+ elif typ is Syntax.ArrayPattern:
+ for element in param.elements:
+ if element is not None:
+ self.checkPatternParam(options, element)
+ elif typ is Syntax.ObjectPattern:
+ for prop in param.properties:
+ self.checkPatternParam(options, prop if prop.type is Syntax.RestElement else prop.value)
+
+ options.simple = options.simple and isinstance(param, Node.Identifier)
+
+ def reinterpretAsCoverFormalsList(self, expr):
+ params = [expr]
+
+ asyncArrow = False
+ typ = expr.type
+ if typ is Syntax.Identifier:
+ pass
+ elif typ is Syntax.ArrowParameterPlaceHolder:
+ params = expr.params
+ asyncArrow = expr.isAsync
+ else:
+ return None
+
+ options = Params(
+ simple=True,
+ paramSet={},
+ )
+
+ for param in params:
+ if param.type is Syntax.AssignmentPattern:
+ if param.right.type is Syntax.YieldExpression:
+ if param.right.argument:
+ self.throwUnexpectedToken(self.lookahead)
+ param.right.type = Syntax.Identifier
+ param.right.name = 'yield'
+ del param.right.argument
+ del param.right.delegate
+ elif asyncArrow and param.type is Syntax.Identifier and param.name == 'await':
+ self.throwUnexpectedToken(self.lookahead)
+ self.checkPatternParam(options, param)
+
+ if self.context.strict or not self.context.allowYield:
+ for param in params:
+ if param.type is Syntax.YieldExpression:
+ self.throwUnexpectedToken(self.lookahead)
+
+ if options.message is Messages.StrictParamDupe:
+ token = options.stricted if self.context.strict else options.firstRestricted
+ self.throwUnexpectedToken(token, options.message)
+
+ return Params(
+ simple=options.simple,
+ params=params,
+ stricted=options.stricted,
+ firstRestricted=options.firstRestricted,
+ message=options.message
+ )
+
+ def parseAssignmentExpression(self):
+ if not self.context.allowYield and self.matchKeyword('yield'):
+ expr = self.parseYieldExpression()
+ else:
+ startToken = self.lookahead
+ token = startToken
+ expr = self.parseConditionalExpression()
+
+ if token.type is Token.Identifier and (token.lineNumber == self.lookahead.lineNumber) and token.value == 'async':
+ if self.lookahead.type is Token.Identifier or self.matchKeyword('yield'):
+ arg = self.parsePrimaryExpression()
+ self.reinterpretExpressionAsPattern(arg)
+ expr = Node.AsyncArrowParameterPlaceHolder([arg])
+
+ if expr.type is Syntax.ArrowParameterPlaceHolder or self.match('=>'):
+
+ # https://tc39.github.io/ecma262/#sec-arrow-function-definitions
+ self.context.isAssignmentTarget = False
+ self.context.isBindingElement = False
+ isAsync = expr.isAsync
+ list = self.reinterpretAsCoverFormalsList(expr)
+
+ if list:
+ if self.hasLineTerminator:
+ self.tolerateUnexpectedToken(self.lookahead)
+ self.context.firstCoverInitializedNameError = None
+
+ previousStrict = self.context.strict
+ previousAllowStrictDirective = self.context.allowStrictDirective
+ self.context.allowStrictDirective = list.simple
+
+ previousAllowYield = self.context.allowYield
+ previousAwait = self.context.allowAwait
+ self.context.allowYield = True
+ self.context.allowAwait = isAsync
+
+ node = self.startNode(startToken)
+ self.expect('=>')
+ if self.match('{'):
+ previousAllowIn = self.context.allowIn
+ self.context.allowIn = True
+ body = self.parseFunctionSourceElements()
+ self.context.allowIn = previousAllowIn
+ else:
+ body = self.isolateCoverGrammar(self.parseAssignmentExpression)
+ expression = body.type is not Syntax.BlockStatement
+
+ if self.context.strict and list.firstRestricted:
+ self.throwUnexpectedToken(list.firstRestricted, list.message)
+ if self.context.strict and list.stricted:
+ self.tolerateUnexpectedToken(list.stricted, list.message)
+ if isAsync:
+ expr = self.finalize(node, Node.AsyncArrowFunctionExpression(list.params, body, expression))
+ else:
+ expr = self.finalize(node, Node.ArrowFunctionExpression(list.params, body, expression))
+
+ self.context.strict = previousStrict
+ self.context.allowStrictDirective = previousAllowStrictDirective
+ self.context.allowYield = previousAllowYield
+ self.context.allowAwait = previousAwait
+ else:
+ if self.matchAssign():
+ if not self.context.isAssignmentTarget:
+ self.tolerateError(Messages.InvalidLHSInAssignment)
+
+ if self.context.strict and expr.type is Syntax.Identifier:
+ id = expr
+ if self.scanner.isRestrictedWord(id.name):
+ self.tolerateUnexpectedToken(token, Messages.StrictLHSAssignment)
+ if self.scanner.isStrictModeReservedWord(id.name):
+ self.tolerateUnexpectedToken(token, Messages.StrictReservedWord)
+
+ if not self.match('='):
+ self.context.isAssignmentTarget = False
+ self.context.isBindingElement = False
+ else:
+ self.reinterpretExpressionAsPattern(expr)
+
+ token = self.nextToken()
+ operator = token.value
+ right = self.isolateCoverGrammar(self.parseAssignmentExpression)
+ expr = self.finalize(self.startNode(startToken), Node.AssignmentExpression(operator, expr, right))
+ self.context.firstCoverInitializedNameError = None
+
+ return expr
+
+ # https://tc39.github.io/ecma262/#sec-comma-operator
+
+ def parseExpression(self):
+ startToken = self.lookahead
+ expr = self.isolateCoverGrammar(self.parseAssignmentExpression)
+
+ if self.match(','):
+ expressions = []
+ expressions.append(expr)
+ while self.lookahead.type is not Token.EOF:
+ if not self.match(','):
+ break
+ self.nextToken()
+ expressions.append(self.isolateCoverGrammar(self.parseAssignmentExpression))
+
+ expr = self.finalize(self.startNode(startToken), Node.SequenceExpression(expressions))
+
+ return expr
+
+ # https://tc39.github.io/ecma262/#sec-block
+
+ def parseStatementListItem(self):
+ self.context.isAssignmentTarget = True
+ self.context.isBindingElement = True
+ if self.lookahead.type is Token.Keyword:
+ value = self.lookahead.value
+ if value == 'export':
+ if not self.context.isModule:
+ self.tolerateUnexpectedToken(self.lookahead, Messages.IllegalExportDeclaration)
+ statement = self.parseExportDeclaration()
+ elif value == 'import':
+ if self.matchImportCall():
+ statement = self.parseExpressionStatement()
+ else:
+ if not self.context.isModule:
+ self.tolerateUnexpectedToken(self.lookahead, Messages.IllegalImportDeclaration)
+ statement = self.parseImportDeclaration()
+ elif value == 'const':
+ statement = self.parseLexicalDeclaration(Params(inFor=False))
+ elif value == 'function':
+ statement = self.parseFunctionDeclaration()
+ elif value == 'class':
+ statement = self.parseClassDeclaration()
+ elif value == 'let':
+ statement = self.parseLexicalDeclaration(Params(inFor=False)) if self.isLexicalDeclaration() else self.parseStatement()
+ else:
+ statement = self.parseStatement()
+ else:
+ statement = self.parseStatement()
+
+ return statement
+
+ def parseBlock(self):
+ node = self.createNode()
+
+ self.expect('{')
+ block = []
+ while True:
+ if self.match('}'):
+ break
+ block.append(self.parseStatementListItem())
+ self.expect('}')
+
+ return self.finalize(node, Node.BlockStatement(block))
+
+ # https://tc39.github.io/ecma262/#sec-let-and-const-declarations
+
+ def parseLexicalBinding(self, kind, options):
+ node = self.createNode()
+ params = []
+ id = self.parsePattern(params, kind)
+
+ if self.context.strict and id.type is Syntax.Identifier:
+ if self.scanner.isRestrictedWord(id.name):
+ self.tolerateError(Messages.StrictVarName)
+
+ init = None
+ if kind == 'const':
+ if not self.matchKeyword('in') and not self.matchContextualKeyword('of'):
+ if self.match('='):
+ self.nextToken()
+ init = self.isolateCoverGrammar(self.parseAssignmentExpression)
+ else:
+ self.throwError(Messages.DeclarationMissingInitializer, 'const')
+ elif (not options.inFor and id.type is not Syntax.Identifier) or self.match('='):
+ self.expect('=')
+ init = self.isolateCoverGrammar(self.parseAssignmentExpression)
+
+ return self.finalize(node, Node.VariableDeclarator(id, init))
+
+ def parseBindingList(self, kind, options):
+ lst = [self.parseLexicalBinding(kind, options)]
+
+ while self.match(','):
+ self.nextToken()
+ lst.append(self.parseLexicalBinding(kind, options))
+
+ return lst
+
+ def isLexicalDeclaration(self):
+ state = self.scanner.saveState()
+ self.scanner.scanComments()
+ next = self.scanner.lex()
+ self.scanner.restoreState(state)
+
+ return (
+ (next.type is Token.Identifier) or
+ (next.type is Token.Punctuator and next.value == '[') or
+ (next.type is Token.Punctuator and next.value == '{') or
+ (next.type is Token.Keyword and next.value == 'let') or
+ (next.type is Token.Keyword and next.value == 'yield')
+ )
+
+ def parseLexicalDeclaration(self, options):
+ node = self.createNode()
+ kind = self.nextToken().value
+ assert kind == 'let' or kind == 'const', 'Lexical declaration must be either or const'
+
+ declarations = self.parseBindingList(kind, options)
+ self.consumeSemicolon()
+
+ return self.finalize(node, Node.VariableDeclaration(declarations, kind))
+
+ # https://tc39.github.io/ecma262/#sec-destructuring-binding-patterns
+
+ def parseBindingRestElement(self, params, kind=None):
+ node = self.createNode()
+
+ self.expect('...')
+ arg = self.parsePattern(params, kind)
+
+ return self.finalize(node, Node.RestElement(arg))
+
+ def parseArrayPattern(self, params, kind=None):
+ node = self.createNode()
+
+ self.expect('[')
+ elements = []
+ while not self.match(']'):
+ if self.match(','):
+ self.nextToken()
+ elements.append(None)
+ else:
+ if self.match('...'):
+ elements.append(self.parseBindingRestElement(params, kind))
+ break
+ else:
+ elements.append(self.parsePatternWithDefault(params, kind))
+ if not self.match(']'):
+ self.expect(',')
+ self.expect(']')
+
+ return self.finalize(node, Node.ArrayPattern(elements))
+
+ def parsePropertyPattern(self, params, kind=None):
+ node = self.createNode()
+
+ computed = False
+ shorthand = False
+ method = False
+
+ key = None
+
+ if self.lookahead.type is Token.Identifier:
+ keyToken = self.lookahead
+ key = self.parseVariableIdentifier()
+ init = self.finalize(node, Node.Identifier(keyToken.value))
+ if self.match('='):
+ params.append(keyToken)
+ shorthand = True
+ self.nextToken()
+ expr = self.parseAssignmentExpression()
+ value = self.finalize(self.startNode(keyToken), Node.AssignmentPattern(init, expr))
+ elif not self.match(':'):
+ params.append(keyToken)
+ shorthand = True
+ value = init
+ else:
+ self.expect(':')
+ value = self.parsePatternWithDefault(params, kind)
+ else:
+ computed = self.match('[')
+ key = self.parseObjectPropertyKey()
+ self.expect(':')
+ value = self.parsePatternWithDefault(params, kind)
+
+ return self.finalize(node, Node.Property('init', key, computed, value, method, shorthand))
+
+ def parseRestProperty(self, params, kind):
+ node = self.createNode()
+ self.expect('...')
+ arg = self.parsePattern(params)
+ if self.match('='):
+ self.throwError(Messages.DefaultRestProperty)
+ if not self.match('}'):
+ self.throwError(Messages.PropertyAfterRestProperty)
+ return self.finalize(node, Node.RestElement(arg))
+
+ def parseObjectPattern(self, params, kind=None):
+ node = self.createNode()
+ properties = []
+
+ self.expect('{')
+ while not self.match('}'):
+ properties.append(self.parseRestProperty(params, kind) if self.match('...') else self.parsePropertyPattern(params, kind))
+ if not self.match('}'):
+ self.expect(',')
+ self.expect('}')
+
+ return self.finalize(node, Node.ObjectPattern(properties))
+
+ def parsePattern(self, params, kind=None):
+ if self.match('['):
+ pattern = self.parseArrayPattern(params, kind)
+ elif self.match('{'):
+ pattern = self.parseObjectPattern(params, kind)
+ else:
+ if self.matchKeyword('let') and (kind in ('const', 'let')):
+ self.tolerateUnexpectedToken(self.lookahead, Messages.LetInLexicalBinding)
+ params.append(self.lookahead)
+ pattern = self.parseVariableIdentifier(kind)
+
+ return pattern
+
+ def parsePatternWithDefault(self, params, kind=None):
+ startToken = self.lookahead
+
+ pattern = self.parsePattern(params, kind)
+ if self.match('='):
+ self.nextToken()
+ previousAllowYield = self.context.allowYield
+ self.context.allowYield = True
+ right = self.isolateCoverGrammar(self.parseAssignmentExpression)
+ self.context.allowYield = previousAllowYield
+ pattern = self.finalize(self.startNode(startToken), Node.AssignmentPattern(pattern, right))
+
+ return pattern
+
+ # https://tc39.github.io/ecma262/#sec-variable-statement
+
+ def parseVariableIdentifier(self, kind=None):
+ node = self.createNode()
+
+ token = self.nextToken()
+ if token.type is Token.Keyword and token.value == 'yield':
+ if self.context.strict:
+ self.tolerateUnexpectedToken(token, Messages.StrictReservedWord)
+ elif not self.context.allowYield:
+ self.throwUnexpectedToken(token)
+ elif token.type is not Token.Identifier:
+ if self.context.strict and token.type is Token.Keyword and self.scanner.isStrictModeReservedWord(token.value):
+ self.tolerateUnexpectedToken(token, Messages.StrictReservedWord)
+ else:
+ if self.context.strict or token.value != 'let' or kind != 'var':
+ self.throwUnexpectedToken(token)
+ elif (self.context.isModule or self.context.allowAwait) and token.type is Token.Identifier and token.value == 'await':
+ self.tolerateUnexpectedToken(token)
+
+ return self.finalize(node, Node.Identifier(token.value))
+
+ def parseVariableDeclaration(self, options):
+ node = self.createNode()
+
+ params = []
+ id = self.parsePattern(params, 'var')
+
+ if self.context.strict and id.type is Syntax.Identifier:
+ if self.scanner.isRestrictedWord(id.name):
+ self.tolerateError(Messages.StrictVarName)
+
+ init = None
+ if self.match('='):
+ self.nextToken()
+ init = self.isolateCoverGrammar(self.parseAssignmentExpression)
+ elif id.type is not Syntax.Identifier and not options.inFor:
+ self.expect('=')
+
+ return self.finalize(node, Node.VariableDeclarator(id, init))
+
+ def parseVariableDeclarationList(self, options):
+ opt = Params(inFor=options.inFor)
+
+ lst = []
+ lst.append(self.parseVariableDeclaration(opt))
+ while self.match(','):
+ self.nextToken()
+ lst.append(self.parseVariableDeclaration(opt))
+
+ return lst
+
+ def parseVariableStatement(self):
+ node = self.createNode()
+ self.expectKeyword('var')
+ declarations = self.parseVariableDeclarationList(Params(inFor=False))
+ self.consumeSemicolon()
+
+ return self.finalize(node, Node.VariableDeclaration(declarations, 'var'))
+
+ # https://tc39.github.io/ecma262/#sec-empty-statement
+
+ def parseEmptyStatement(self):
+ node = self.createNode()
+ self.expect(';')
+ return self.finalize(node, Node.EmptyStatement())
+
+ # https://tc39.github.io/ecma262/#sec-expression-statement
+
+ def parseExpressionStatement(self):
+ node = self.createNode()
+ expr = self.parseExpression()
+ self.consumeSemicolon()
+ return self.finalize(node, Node.ExpressionStatement(expr))
+
+ # https://tc39.github.io/ecma262/#sec-if-statement
+
+ def parseIfClause(self):
+ if self.context.strict and self.matchKeyword('function'):
+ self.tolerateError(Messages.StrictFunction)
+ return self.parseStatement()
+
+ def parseIfStatement(self):
+ node = self.createNode()
+ alternate = None
+
+ self.expectKeyword('if')
+ self.expect('(')
+ test = self.parseExpression()
+
+ if not self.match(')') and self.config.tolerant:
+ self.tolerateUnexpectedToken(self.nextToken())
+ consequent = self.finalize(self.createNode(), Node.EmptyStatement())
+ else:
+ self.expect(')')
+ consequent = self.parseIfClause()
+ if self.matchKeyword('else'):
+ self.nextToken()
+ alternate = self.parseIfClause()
+
+ return self.finalize(node, Node.IfStatement(test, consequent, alternate))
+
+ # https://tc39.github.io/ecma262/#sec-do-while-statement
+
+ def parseDoWhileStatement(self):
+ node = self.createNode()
+ self.expectKeyword('do')
+
+ previousInIteration = self.context.inIteration
+ self.context.inIteration = True
+ body = self.parseStatement()
+ self.context.inIteration = previousInIteration
+
+ self.expectKeyword('while')
+ self.expect('(')
+ test = self.parseExpression()
+
+ if not self.match(')') and self.config.tolerant:
+ self.tolerateUnexpectedToken(self.nextToken())
+ else:
+ self.expect(')')
+ if self.match(';'):
+ self.nextToken()
+
+ return self.finalize(node, Node.DoWhileStatement(body, test))
+
+ # https://tc39.github.io/ecma262/#sec-while-statement
+
+ def parseWhileStatement(self):
+ node = self.createNode()
+
+ self.expectKeyword('while')
+ self.expect('(')
+ test = self.parseExpression()
+
+ if not self.match(')') and self.config.tolerant:
+ self.tolerateUnexpectedToken(self.nextToken())
+ body = self.finalize(self.createNode(), Node.EmptyStatement())
+ else:
+ self.expect(')')
+
+ previousInIteration = self.context.inIteration
+ self.context.inIteration = True
+ body = self.parseStatement()
+ self.context.inIteration = previousInIteration
+
+ return self.finalize(node, Node.WhileStatement(test, body))
+
+ # https://tc39.github.io/ecma262/#sec-for-statement
+ # https://tc39.github.io/ecma262/#sec-for-in-and-for-of-statements
+
+ def parseForStatement(self):
+ init = None
+ test = None
+ update = None
+ forIn = True
+ left = None
+ right = None
+
+ node = self.createNode()
+ self.expectKeyword('for')
+ self.expect('(')
+
+ if self.match(';'):
+ self.nextToken()
+ else:
+ if self.matchKeyword('var'):
+ init = self.createNode()
+ self.nextToken()
+
+ previousAllowIn = self.context.allowIn
+ self.context.allowIn = False
+ declarations = self.parseVariableDeclarationList(Params(inFor=True))
+ self.context.allowIn = previousAllowIn
+
+ if len(declarations) == 1 and self.matchKeyword('in'):
+ decl = declarations[0]
+ if decl.init and (decl.id.type is Syntax.ArrayPattern or decl.id.type is Syntax.ObjectPattern or self.context.strict):
+ self.tolerateError(Messages.ForInOfLoopInitializer, 'for-in')
+ init = self.finalize(init, Node.VariableDeclaration(declarations, 'var'))
+ self.nextToken()
+ left = init
+ right = self.parseExpression()
+ init = None
+ elif len(declarations) == 1 and declarations[0].init is None and self.matchContextualKeyword('of'):
+ init = self.finalize(init, Node.VariableDeclaration(declarations, 'var'))
+ self.nextToken()
+ left = init
+ right = self.parseAssignmentExpression()
+ init = None
+ forIn = False
+ else:
+ init = self.finalize(init, Node.VariableDeclaration(declarations, 'var'))
+ self.expect(';')
+ elif self.matchKeyword('const', 'let'):
+ init = self.createNode()
+ kind = self.nextToken().value
+
+ if not self.context.strict and self.lookahead.value == 'in':
+ init = self.finalize(init, Node.Identifier(kind))
+ self.nextToken()
+ left = init
+ right = self.parseExpression()
+ init = None
+ else:
+ previousAllowIn = self.context.allowIn
+ self.context.allowIn = False
+ declarations = self.parseBindingList(kind, Params(inFor=True))
+ self.context.allowIn = previousAllowIn
+
+ if len(declarations) == 1 and declarations[0].init is None and self.matchKeyword('in'):
+ init = self.finalize(init, Node.VariableDeclaration(declarations, kind))
+ self.nextToken()
+ left = init
+ right = self.parseExpression()
+ init = None
+ elif len(declarations) == 1 and declarations[0].init is None and self.matchContextualKeyword('of'):
+ init = self.finalize(init, Node.VariableDeclaration(declarations, kind))
+ self.nextToken()
+ left = init
+ right = self.parseAssignmentExpression()
+ init = None
+ forIn = False
+ else:
+ self.consumeSemicolon()
+ init = self.finalize(init, Node.VariableDeclaration(declarations, kind))
+ else:
+ initStartToken = self.lookahead
+ previousAllowIn = self.context.allowIn
+ self.context.allowIn = False
+ init = self.inheritCoverGrammar(self.parseAssignmentExpression)
+ self.context.allowIn = previousAllowIn
+
+ if self.matchKeyword('in'):
+ if not self.context.isAssignmentTarget or init.type is Syntax.AssignmentExpression:
+ self.tolerateError(Messages.InvalidLHSInForIn)
+
+ self.nextToken()
+ self.reinterpretExpressionAsPattern(init)
+ left = init
+ right = self.parseExpression()
+ init = None
+ elif self.matchContextualKeyword('of'):
+ if not self.context.isAssignmentTarget or init.type is Syntax.AssignmentExpression:
+ self.tolerateError(Messages.InvalidLHSInForLoop)
+
+ self.nextToken()
+ self.reinterpretExpressionAsPattern(init)
+ left = init
+ right = self.parseAssignmentExpression()
+ init = None
+ forIn = False
+ else:
+ if self.match(','):
+ initSeq = [init]
+ while self.match(','):
+ self.nextToken()
+ initSeq.append(self.isolateCoverGrammar(self.parseAssignmentExpression))
+ init = self.finalize(self.startNode(initStartToken), Node.SequenceExpression(initSeq))
+ self.expect(';')
+
+ if left is None:
+ if not self.match(';'):
+ test = self.parseExpression()
+ self.expect(';')
+ if not self.match(')'):
+ update = self.parseExpression()
+
+ if not self.match(')') and self.config.tolerant:
+ self.tolerateUnexpectedToken(self.nextToken())
+ body = self.finalize(self.createNode(), Node.EmptyStatement())
+ else:
+ self.expect(')')
+
+ previousInIteration = self.context.inIteration
+ self.context.inIteration = True
+ body = self.isolateCoverGrammar(self.parseStatement)
+ self.context.inIteration = previousInIteration
+
+ if left is None:
+ return self.finalize(node, Node.ForStatement(init, test, update, body))
+
+ if forIn:
+ return self.finalize(node, Node.ForInStatement(left, right, body))
+
+ return self.finalize(node, Node.ForOfStatement(left, right, body))
+
+ # https://tc39.github.io/ecma262/#sec-continue-statement
+
+ def parseContinueStatement(self):
+ node = self.createNode()
+ self.expectKeyword('continue')
+
+ label = None
+ if self.lookahead.type is Token.Identifier and not self.hasLineTerminator:
+ id = self.parseVariableIdentifier()
+ label = id
+
+ key = '$' + id.name
+ if key not in self.context.labelSet:
+ self.throwError(Messages.UnknownLabel, id.name)
+
+ self.consumeSemicolon()
+ if label is None and not self.context.inIteration:
+ self.throwError(Messages.IllegalContinue)
+
+ return self.finalize(node, Node.ContinueStatement(label))
+
+ # https://tc39.github.io/ecma262/#sec-break-statement
+
+ def parseBreakStatement(self):
+ node = self.createNode()
+ self.expectKeyword('break')
+
+ label = None
+ if self.lookahead.type is Token.Identifier and not self.hasLineTerminator:
+ id = self.parseVariableIdentifier()
+
+ key = '$' + id.name
+ if key not in self.context.labelSet:
+ self.throwError(Messages.UnknownLabel, id.name)
+ label = id
+
+ self.consumeSemicolon()
+ if label is None and not self.context.inIteration and not self.context.inSwitch:
+ self.throwError(Messages.IllegalBreak)
+
+ return self.finalize(node, Node.BreakStatement(label))
+
+ # https://tc39.github.io/ecma262/#sec-return-statement
+
+ def parseReturnStatement(self):
+ if not self.context.inFunctionBody:
+ self.tolerateError(Messages.IllegalReturn)
+
+ node = self.createNode()
+ self.expectKeyword('return')
+
+ hasArgument = (
+ (
+ not self.match(';') and not self.match('}') and
+ not self.hasLineTerminator and self.lookahead.type is not Token.EOF
+ ) or
+ self.lookahead.type is Token.StringLiteral or
+ self.lookahead.type is Token.Template
+ )
+ argument = self.parseExpression() if hasArgument else None
+ self.consumeSemicolon()
+
+ return self.finalize(node, Node.ReturnStatement(argument))
+
+ # https://tc39.github.io/ecma262/#sec-with-statement
+
+ def parseWithStatement(self):
+ if self.context.strict:
+ self.tolerateError(Messages.StrictModeWith)
+
+ node = self.createNode()
+
+ self.expectKeyword('with')
+ self.expect('(')
+ object = self.parseExpression()
+
+ if not self.match(')') and self.config.tolerant:
+ self.tolerateUnexpectedToken(self.nextToken())
+ body = self.finalize(self.createNode(), Node.EmptyStatement())
+ else:
+ self.expect(')')
+ body = self.parseStatement()
+
+ return self.finalize(node, Node.WithStatement(object, body))
+
+ # https://tc39.github.io/ecma262/#sec-switch-statement
+
+ def parseSwitchCase(self):
+ node = self.createNode()
+
+ if self.matchKeyword('default'):
+ self.nextToken()
+ test = None
+ else:
+ self.expectKeyword('case')
+ test = self.parseExpression()
+ self.expect(':')
+
+ consequent = []
+ while True:
+ if self.match('}') or self.matchKeyword('default', 'case'):
+ break
+ consequent.append(self.parseStatementListItem())
+
+ return self.finalize(node, Node.SwitchCase(test, consequent))
+
+ def parseSwitchStatement(self):
+ node = self.createNode()
+ self.expectKeyword('switch')
+
+ self.expect('(')
+ discriminant = self.parseExpression()
+ self.expect(')')
+
+ previousInSwitch = self.context.inSwitch
+ self.context.inSwitch = True
+
+ cases = []
+ defaultFound = False
+ self.expect('{')
+ while True:
+ if self.match('}'):
+ break
+ clause = self.parseSwitchCase()
+ if clause.test is None:
+ if defaultFound:
+ self.throwError(Messages.MultipleDefaultsInSwitch)
+ defaultFound = True
+ cases.append(clause)
+ self.expect('}')
+
+ self.context.inSwitch = previousInSwitch
+
+ return self.finalize(node, Node.SwitchStatement(discriminant, cases))
+
+ # https://tc39.github.io/ecma262/#sec-labelled-statements
+
+ def parseLabelledStatement(self):
+ node = self.createNode()
+ expr = self.parseExpression()
+
+ if expr.type is Syntax.Identifier and self.match(':'):
+ self.nextToken()
+
+ id = expr
+ key = '$' + id.name
+ if key in self.context.labelSet:
+ self.throwError(Messages.Redeclaration, 'Label', id.name)
+
+ self.context.labelSet[key] = True
+ if self.matchKeyword('class'):
+ self.tolerateUnexpectedToken(self.lookahead)
+ body = self.parseClassDeclaration()
+ elif self.matchKeyword('function'):
+ token = self.lookahead
+ declaration = self.parseFunctionDeclaration()
+ if self.context.strict:
+ self.tolerateUnexpectedToken(token, Messages.StrictFunction)
+ elif declaration.generator:
+ self.tolerateUnexpectedToken(token, Messages.GeneratorInLegacyContext)
+ body = declaration
+ else:
+ body = self.parseStatement()
+ del self.context.labelSet[key]
+
+ statement = Node.LabeledStatement(id, body)
+ else:
+ self.consumeSemicolon()
+ statement = Node.ExpressionStatement(expr)
+
+ return self.finalize(node, statement)
+
+ # https://tc39.github.io/ecma262/#sec-throw-statement
+
+ def parseThrowStatement(self):
+ node = self.createNode()
+ self.expectKeyword('throw')
+
+ if self.hasLineTerminator:
+ self.throwError(Messages.NewlineAfterThrow)
+
+ argument = self.parseExpression()
+ self.consumeSemicolon()
+
+ return self.finalize(node, Node.ThrowStatement(argument))
+
+ # https://tc39.github.io/ecma262/#sec-try-statement
+
+ def parseCatchClause(self):
+ node = self.createNode()
+
+ self.expectKeyword('catch')
+
+ self.expect('(')
+ if self.match(')'):
+ self.throwUnexpectedToken(self.lookahead)
+
+ params = []
+ param = self.parsePattern(params)
+ paramMap = {}
+ for p in params:
+ key = '$' + p.value
+ if key in paramMap:
+ self.tolerateError(Messages.DuplicateBinding, p.value)
+ paramMap[key] = True
+
+ if self.context.strict and param.type is Syntax.Identifier:
+ if self.scanner.isRestrictedWord(param.name):
+ self.tolerateError(Messages.StrictCatchVariable)
+
+ self.expect(')')
+ body = self.parseBlock()
+
+ return self.finalize(node, Node.CatchClause(param, body))
+
+ def parseFinallyClause(self):
+ self.expectKeyword('finally')
+ return self.parseBlock()
+
+ def parseTryStatement(self):
+ node = self.createNode()
+ self.expectKeyword('try')
+
+ block = self.parseBlock()
+ handler = self.parseCatchClause() if self.matchKeyword('catch') else None
+ finalizer = self.parseFinallyClause() if self.matchKeyword('finally') else None
+
+ if not handler and not finalizer:
+ self.throwError(Messages.NoCatchOrFinally)
+
+ return self.finalize(node, Node.TryStatement(block, handler, finalizer))
+
+ # https://tc39.github.io/ecma262/#sec-debugger-statement
+
+ def parseDebuggerStatement(self):
+ node = self.createNode()
+ self.expectKeyword('debugger')
+ self.consumeSemicolon()
+ return self.finalize(node, Node.DebuggerStatement())
+
+ # https://tc39.github.io/ecma262/#sec-ecmascript-language-statements-and-declarations
+
+ def parseStatement(self):
+ typ = self.lookahead.type
+ if typ in (
+ Token.BooleanLiteral,
+ Token.NullLiteral,
+ Token.NumericLiteral,
+ Token.StringLiteral,
+ Token.Template,
+ Token.RegularExpression,
+ ):
+ statement = self.parseExpressionStatement()
+
+ elif typ is Token.Punctuator:
+ value = self.lookahead.value
+ if value == '{':
+ statement = self.parseBlock()
+ elif value == '(':
+ statement = self.parseExpressionStatement()
+ elif value == ';':
+ statement = self.parseEmptyStatement()
+ else:
+ statement = self.parseExpressionStatement()
+
+ elif typ is Token.Identifier:
+ statement = self.parseFunctionDeclaration() if self.matchAsyncFunction() else self.parseLabelledStatement()
+
+ elif typ is Token.Keyword:
+ value = self.lookahead.value
+ if value == 'break':
+ statement = self.parseBreakStatement()
+ elif value == 'continue':
+ statement = self.parseContinueStatement()
+ elif value == 'debugger':
+ statement = self.parseDebuggerStatement()
+ elif value == 'do':
+ statement = self.parseDoWhileStatement()
+ elif value == 'for':
+ statement = self.parseForStatement()
+ elif value == 'function':
+ statement = self.parseFunctionDeclaration()
+ elif value == 'if':
+ statement = self.parseIfStatement()
+ elif value == 'return':
+ statement = self.parseReturnStatement()
+ elif value == 'switch':
+ statement = self.parseSwitchStatement()
+ elif value == 'throw':
+ statement = self.parseThrowStatement()
+ elif value == 'try':
+ statement = self.parseTryStatement()
+ elif value == 'var':
+ statement = self.parseVariableStatement()
+ elif value == 'while':
+ statement = self.parseWhileStatement()
+ elif value == 'with':
+ statement = self.parseWithStatement()
+ else:
+ statement = self.parseExpressionStatement()
+
+ else:
+ statement = self.throwUnexpectedToken(self.lookahead)
+
+ return statement
+
+ # https://tc39.github.io/ecma262/#sec-function-definitions
+
+ def parseFunctionSourceElements(self):
+ node = self.createNode()
+
+ self.expect('{')
+ body = self.parseDirectivePrologues()
+
+ previousLabelSet = self.context.labelSet
+ previousInIteration = self.context.inIteration
+ previousInSwitch = self.context.inSwitch
+ previousInFunctionBody = self.context.inFunctionBody
+
+ self.context.labelSet = {}
+ self.context.inIteration = False
+ self.context.inSwitch = False
+ self.context.inFunctionBody = True
+
+ while self.lookahead.type is not Token.EOF:
+ if self.match('}'):
+ break
+ body.append(self.parseStatementListItem())
+
+ self.expect('}')
+
+ self.context.labelSet = previousLabelSet
+ self.context.inIteration = previousInIteration
+ self.context.inSwitch = previousInSwitch
+ self.context.inFunctionBody = previousInFunctionBody
+
+ return self.finalize(node, Node.BlockStatement(body))
+
+ def validateParam(self, options, param, name):
+ key = '$' + name
+ if self.context.strict:
+ if self.scanner.isRestrictedWord(name):
+ options.stricted = param
+ options.message = Messages.StrictParamName
+ if key in options.paramSet:
+ options.stricted = param
+ options.message = Messages.StrictParamDupe
+ elif not options.firstRestricted:
+ if self.scanner.isRestrictedWord(name):
+ options.firstRestricted = param
+ options.message = Messages.StrictParamName
+ elif self.scanner.isStrictModeReservedWord(name):
+ options.firstRestricted = param
+ options.message = Messages.StrictReservedWord
+ elif key in options.paramSet:
+ options.stricted = param
+ options.message = Messages.StrictParamDupe
+
+ options.paramSet[key] = True
+
+ def parseRestElement(self, params):
+ node = self.createNode()
+
+ self.expect('...')
+ arg = self.parsePattern(params)
+ if self.match('='):
+ self.throwError(Messages.DefaultRestParameter)
+ if not self.match(')'):
+ self.throwError(Messages.ParameterAfterRestParameter)
+
+ return self.finalize(node, Node.RestElement(arg))
+
+ def parseFormalParameter(self, options):
+ params = []
+ param = self.parseRestElement(params) if self.match('...') else self.parsePatternWithDefault(params)
+ for p in params:
+ self.validateParam(options, p, p.value)
+ options.simple = options.simple and isinstance(param, Node.Identifier)
+ options.params.append(param)
+
+ def parseFormalParameters(self, firstRestricted=None):
+ options = Params(
+ simple=True,
+ params=[],
+ firstRestricted=firstRestricted
+ )
+
+ self.expect('(')
+ if not self.match(')'):
+ options.paramSet = {}
+ while self.lookahead.type is not Token.EOF:
+ self.parseFormalParameter(options)
+ if self.match(')'):
+ break
+ self.expect(',')
+ if self.match(')'):
+ break
+ self.expect(')')
+
+ return Params(
+ simple=options.simple,
+ params=options.params,
+ stricted=options.stricted,
+ firstRestricted=options.firstRestricted,
+ message=options.message
+ )
+
+ def matchAsyncFunction(self):
+ match = self.matchContextualKeyword('async')
+ if match:
+ state = self.scanner.saveState()
+ self.scanner.scanComments()
+ next = self.scanner.lex()
+ self.scanner.restoreState(state)
+
+ match = (state.lineNumber == next.lineNumber) and (next.type is Token.Keyword) and (next.value == 'function')
+
+ return match
+
+ def parseFunctionDeclaration(self, identifierIsOptional=False):
+ node = self.createNode()
+
+ isAsync = self.matchContextualKeyword('async')
+ if isAsync:
+ self.nextToken()
+
+ self.expectKeyword('function')
+
+ isGenerator = False if isAsync else self.match('*')
+ if isGenerator:
+ self.nextToken()
+
+ id = None
+ firstRestricted = None
+
+ if not identifierIsOptional or not self.match('('):
+ token = self.lookahead
+ id = self.parseVariableIdentifier()
+ if self.context.strict:
+ if self.scanner.isRestrictedWord(token.value):
+ self.tolerateUnexpectedToken(token, Messages.StrictFunctionName)
+ else:
+ if self.scanner.isRestrictedWord(token.value):
+ firstRestricted = token
+ message = Messages.StrictFunctionName
+ elif self.scanner.isStrictModeReservedWord(token.value):
+ firstRestricted = token
+ message = Messages.StrictReservedWord
+
+ previousAllowAwait = self.context.allowAwait
+ previousAllowYield = self.context.allowYield
+ self.context.allowAwait = isAsync
+ self.context.allowYield = not isGenerator
+
+ formalParameters = self.parseFormalParameters(firstRestricted)
+ params = formalParameters.params
+ stricted = formalParameters.stricted
+ firstRestricted = formalParameters.firstRestricted
+ if formalParameters.message:
+ message = formalParameters.message
+
+ previousStrict = self.context.strict
+ previousAllowStrictDirective = self.context.allowStrictDirective
+ self.context.allowStrictDirective = formalParameters.simple
+ body = self.parseFunctionSourceElements()
+ if self.context.strict and firstRestricted:
+ self.throwUnexpectedToken(firstRestricted, message)
+ if self.context.strict and stricted:
+ self.tolerateUnexpectedToken(stricted, message)
+
+ self.context.strict = previousStrict
+ self.context.allowStrictDirective = previousAllowStrictDirective
+ self.context.allowAwait = previousAllowAwait
+ self.context.allowYield = previousAllowYield
+
+ if isAsync:
+ return self.finalize(node, Node.AsyncFunctionDeclaration(id, params, body))
+
+ return self.finalize(node, Node.FunctionDeclaration(id, params, body, isGenerator))
+
+ def parseFunctionExpression(self):
+ node = self.createNode()
+
+ isAsync = self.matchContextualKeyword('async')
+ if isAsync:
+ self.nextToken()
+
+ self.expectKeyword('function')
+
+ isGenerator = False if isAsync else self.match('*')
+ if isGenerator:
+ self.nextToken()
+
+ id = None
+ firstRestricted = None
+
+ previousAllowAwait = self.context.allowAwait
+ previousAllowYield = self.context.allowYield
+ self.context.allowAwait = isAsync
+ self.context.allowYield = not isGenerator
+
+ if not self.match('('):
+ token = self.lookahead
+ id = self.parseIdentifierName() if not self.context.strict and not isGenerator and self.matchKeyword('yield') else self.parseVariableIdentifier()
+ if self.context.strict:
+ if self.scanner.isRestrictedWord(token.value):
+ self.tolerateUnexpectedToken(token, Messages.StrictFunctionName)
+ else:
+ if self.scanner.isRestrictedWord(token.value):
+ firstRestricted = token
+ message = Messages.StrictFunctionName
+ elif self.scanner.isStrictModeReservedWord(token.value):
+ firstRestricted = token
+ message = Messages.StrictReservedWord
+
+ formalParameters = self.parseFormalParameters(firstRestricted)
+ params = formalParameters.params
+ stricted = formalParameters.stricted
+ firstRestricted = formalParameters.firstRestricted
+ if formalParameters.message:
+ message = formalParameters.message
+
+ previousStrict = self.context.strict
+ previousAllowStrictDirective = self.context.allowStrictDirective
+ self.context.allowStrictDirective = formalParameters.simple
+ body = self.parseFunctionSourceElements()
+ if self.context.strict and firstRestricted:
+ self.throwUnexpectedToken(firstRestricted, message)
+ if self.context.strict and stricted:
+ self.tolerateUnexpectedToken(stricted, message)
+ self.context.strict = previousStrict
+ self.context.allowStrictDirective = previousAllowStrictDirective
+ self.context.allowAwait = previousAllowAwait
+ self.context.allowYield = previousAllowYield
+
+ if isAsync:
+ return self.finalize(node, Node.AsyncFunctionExpression(id, params, body))
+
+ return self.finalize(node, Node.FunctionExpression(id, params, body, isGenerator))
+
+ # https://tc39.github.io/ecma262/#sec-directive-prologues-and-the-use-strict-directive
+
+ def parseDirective(self):
+ token = self.lookahead
+
+ node = self.createNode()
+ expr = self.parseExpression()
+ directive = self.getTokenRaw(token)[1:-1] if expr.type is Syntax.Literal else None
+ self.consumeSemicolon()
+
+ return self.finalize(node, Node.Directive(expr, directive) if directive else Node.ExpressionStatement(expr))
+
+ def parseDirectivePrologues(self):
+ firstRestricted = None
+
+ body = []
+ while True:
+ token = self.lookahead
+ if token.type is not Token.StringLiteral:
+ break
+
+ statement = self.parseDirective()
+ body.append(statement)
+ directive = statement.directive
+ if not isinstance(directive, basestring):
+ break
+
+ if directive == 'use strict':
+ self.context.strict = True
+ if firstRestricted:
+ self.tolerateUnexpectedToken(firstRestricted, Messages.StrictOctalLiteral)
+ if not self.context.allowStrictDirective:
+ self.tolerateUnexpectedToken(token, Messages.IllegalLanguageModeDirective)
+ else:
+ if not firstRestricted and token.octal:
+ firstRestricted = token
+
+ return body
+
+ # https://tc39.github.io/ecma262/#sec-method-definitions
+
+ def qualifiedPropertyName(self, token):
+ typ = token.type
+ if typ in (
+ Token.Identifier,
+ Token.StringLiteral,
+ Token.BooleanLiteral,
+ Token.NullLiteral,
+ Token.NumericLiteral,
+ Token.Keyword,
+ ):
+ return True
+ elif typ is Token.Punctuator:
+ return token.value == '['
+ return False
+
+ def parseGetterMethod(self):
+ node = self.createNode()
+
+ isGenerator = False
+ previousAllowYield = self.context.allowYield
+ self.context.allowYield = not isGenerator
+ formalParameters = self.parseFormalParameters()
+ if len(formalParameters.params) > 0:
+ self.tolerateError(Messages.BadGetterArity)
+ method = self.parsePropertyMethod(formalParameters)
+ self.context.allowYield = previousAllowYield
+
+ return self.finalize(node, Node.FunctionExpression(None, formalParameters.params, method, isGenerator))
+
+ def parseSetterMethod(self):
+ node = self.createNode()
+
+ isGenerator = False
+ previousAllowYield = self.context.allowYield
+ self.context.allowYield = not isGenerator
+ formalParameters = self.parseFormalParameters()
+ if len(formalParameters.params) != 1:
+ self.tolerateError(Messages.BadSetterArity)
+ elif isinstance(formalParameters.params[0], Node.RestElement):
+ self.tolerateError(Messages.BadSetterRestParameter)
+ method = self.parsePropertyMethod(formalParameters)
+ self.context.allowYield = previousAllowYield
+
+ return self.finalize(node, Node.FunctionExpression(None, formalParameters.params, method, isGenerator))
+
+ def parseGeneratorMethod(self):
+ node = self.createNode()
+
+ isGenerator = True
+ previousAllowYield = self.context.allowYield
+
+ self.context.allowYield = True
+ params = self.parseFormalParameters()
+ self.context.allowYield = False
+ method = self.parsePropertyMethod(params)
+ self.context.allowYield = previousAllowYield
+
+ return self.finalize(node, Node.FunctionExpression(None, params.params, method, isGenerator))
+
+ # https://tc39.github.io/ecma262/#sec-generator-function-definitions
+
+ def isStartOfExpression(self):
+ start = True
+
+ value = self.lookahead.value
+ typ = self.lookahead.type
+ if typ is Token.Punctuator:
+ start = value in ('[', '(', '{', '+', '-', '!', '~', '++', '--', '/', '/=') # regular expression literal )
+
+ elif typ is Token.Keyword:
+ start = value in ('class', 'delete', 'function', 'let', 'new', 'super', 'this', 'typeof', 'void', 'yield')
+
+ return start
+
+ def parseYieldExpression(self):
+ node = self.createNode()
+ self.expectKeyword('yield')
+
+ argument = None
+ delegate = False
+ if not self.hasLineTerminator:
+ previousAllowYield = self.context.allowYield
+ self.context.allowYield = False
+ delegate = self.match('*')
+ if delegate:
+ self.nextToken()
+ argument = self.parseAssignmentExpression()
+ elif self.isStartOfExpression():
+ argument = self.parseAssignmentExpression()
+ self.context.allowYield = previousAllowYield
+
+ return self.finalize(node, Node.YieldExpression(argument, delegate))
+
+ # https://tc39.github.io/ecma262/#sec-class-definitions
+
+ def parseClassElement(self, hasConstructor):
+ token = self.lookahead
+ node = self.createNode()
+
+ kind = ''
+ key = None
+ value = None
+ computed = False
+ isStatic = False
+ isAsync = False
+
+ if self.match('*'):
+ self.nextToken()
+
+ else:
+ computed = self.match('[')
+ key = self.parseObjectPropertyKey()
+ id = key
+ if id.name == 'static' and (self.qualifiedPropertyName(self.lookahead) or self.match('*')):
+ token = self.lookahead
+ isStatic = True
+ computed = self.match('[')
+ if self.match('*'):
+ self.nextToken()
+ else:
+ key = self.parseObjectPropertyKey()
+ if token.type is Token.Identifier and not self.hasLineTerminator and token.value == 'async':
+ punctuator = self.lookahead.value
+ if punctuator != ':' and punctuator != '(' and punctuator != '*':
+ isAsync = True
+ token = self.lookahead
+ key = self.parseObjectPropertyKey()
+ if token.type is Token.Identifier and token.value == 'constructor':
+ self.tolerateUnexpectedToken(token, Messages.ConstructorIsAsync)
+
+ lookaheadPropertyKey = self.qualifiedPropertyName(self.lookahead)
+ if token.type is Token.Identifier:
+ if token.value == 'get' and lookaheadPropertyKey:
+ kind = 'get'
+ computed = self.match('[')
+ key = self.parseObjectPropertyKey()
+ self.context.allowYield = False
+ value = self.parseGetterMethod()
+ elif token.value == 'set' and lookaheadPropertyKey:
+ kind = 'set'
+ computed = self.match('[')
+ key = self.parseObjectPropertyKey()
+ value = self.parseSetterMethod()
+ elif self.config.classProperties and not self.match('('):
+ kind = 'init'
+ id = self.finalize(node, Node.Identifier(token.value))
+ if self.match('='):
+ self.nextToken()
+ value = self.parseAssignmentExpression()
+
+ elif token.type is Token.Punctuator and token.value == '*' and lookaheadPropertyKey:
+ kind = 'method'
+ computed = self.match('[')
+ key = self.parseObjectPropertyKey()
+ value = self.parseGeneratorMethod()
+
+ if not kind and key and self.match('('):
+ kind = 'method'
+ value = self.parsePropertyMethodAsyncFunction() if isAsync else self.parsePropertyMethodFunction()
+
+ if not kind:
+ self.throwUnexpectedToken(self.lookahead)
+
+ if not computed:
+ if isStatic and self.isPropertyKey(key, 'prototype'):
+ self.throwUnexpectedToken(token, Messages.StaticPrototype)
+ if not isStatic and self.isPropertyKey(key, 'constructor'):
+ if kind != 'method' or (value and value.generator):
+ self.throwUnexpectedToken(token, Messages.ConstructorSpecialMethod)
+ if hasConstructor.value:
+ self.throwUnexpectedToken(token, Messages.DuplicateConstructor)
+ else:
+ hasConstructor.value = True
+ kind = 'constructor'
+
+ if kind in ('constructor', 'method', 'get', 'set'):
+ return self.finalize(node, Node.MethodDefinition(key, computed, value, kind, isStatic))
+
+ else:
+ return self.finalize(node, Node.FieldDefinition(key, computed, value, kind, isStatic))
+
+ def parseClassElementList(self):
+ body = []
+ hasConstructor = Value(False)
+
+ self.expect('{')
+ while not self.match('}'):
+ if self.match(';'):
+ self.nextToken()
+ else:
+ body.append(self.parseClassElement(hasConstructor))
+ self.expect('}')
+
+ return body
+
+ def parseClassBody(self):
+ node = self.createNode()
+ elementList = self.parseClassElementList()
+
+ return self.finalize(node, Node.ClassBody(elementList))
+
+ def parseClassDeclaration(self, identifierIsOptional=False):
+ node = self.createNode()
+
+ previousStrict = self.context.strict
+ self.context.strict = True
+ self.expectKeyword('class')
+
+ id = None if identifierIsOptional and self.lookahead.type is not Token.Identifier else self.parseVariableIdentifier()
+ superClass = None
+ if self.matchKeyword('extends'):
+ self.nextToken()
+ superClass = self.isolateCoverGrammar(self.parseLeftHandSideExpressionAllowCall)
+ classBody = self.parseClassBody()
+ self.context.strict = previousStrict
+
+ return self.finalize(node, Node.ClassDeclaration(id, superClass, classBody))
+
+ def parseClassExpression(self):
+ node = self.createNode()
+
+ previousStrict = self.context.strict
+ self.context.strict = True
+ self.expectKeyword('class')
+ id = self.parseVariableIdentifier() if self.lookahead.type is Token.Identifier else None
+ superClass = None
+ if self.matchKeyword('extends'):
+ self.nextToken()
+ superClass = self.isolateCoverGrammar(self.parseLeftHandSideExpressionAllowCall)
+ classBody = self.parseClassBody()
+ self.context.strict = previousStrict
+
+ return self.finalize(node, Node.ClassExpression(id, superClass, classBody))
+
+ # https://tc39.github.io/ecma262/#sec-scripts
+ # https://tc39.github.io/ecma262/#sec-modules
+
+ def parseModule(self):
+ self.context.strict = True
+ self.context.isModule = True
+ self.scanner.isModule = True
+ node = self.createNode()
+ body = self.parseDirectivePrologues()
+ while self.lookahead.type is not Token.EOF:
+ body.append(self.parseStatementListItem())
+ return self.finalize(node, Node.Module(body))
+
+ def parseScript(self):
+ node = self.createNode()
+ body = self.parseDirectivePrologues()
+ while self.lookahead.type is not Token.EOF:
+ body.append(self.parseStatementListItem())
+ return self.finalize(node, Node.Script(body))
+
+ # https://tc39.github.io/ecma262/#sec-imports
+
+ def parseModuleSpecifier(self):
+ node = self.createNode()
+
+ if self.lookahead.type is not Token.StringLiteral:
+ self.throwError(Messages.InvalidModuleSpecifier)
+
+ token = self.nextToken()
+ raw = self.getTokenRaw(token)
+ return self.finalize(node, Node.Literal(token.value, raw))
+
+ # import {<foo as bar>} ...
+ def parseImportSpecifier(self):
+ node = self.createNode()
+
+ if self.lookahead.type is Token.Identifier:
+ imported = self.parseVariableIdentifier()
+ local = imported
+ if self.matchContextualKeyword('as'):
+ self.nextToken()
+ local = self.parseVariableIdentifier()
+ else:
+ imported = self.parseIdentifierName()
+ local = imported
+ if self.matchContextualKeyword('as'):
+ self.nextToken()
+ local = self.parseVariableIdentifier()
+ else:
+ self.throwUnexpectedToken(self.nextToken())
+
+ return self.finalize(node, Node.ImportSpecifier(local, imported))
+
+ # {foo, bar as bas
+ def parseNamedImports(self):
+ self.expect('{')
+ specifiers = []
+ while not self.match('}'):
+ specifiers.append(self.parseImportSpecifier())
+ if not self.match('}'):
+ self.expect(',')
+ self.expect('}')
+
+ return specifiers
+
+ # import <foo> ...
+ def parseImportDefaultSpecifier(self):
+ node = self.createNode()
+ local = self.parseIdentifierName()
+ return self.finalize(node, Node.ImportDefaultSpecifier(local))
+
+ # import <* as foo> ...
+ def parseImportNamespaceSpecifier(self):
+ node = self.createNode()
+
+ self.expect('*')
+ if not self.matchContextualKeyword('as'):
+ self.throwError(Messages.NoAsAfterImportNamespace)
+ self.nextToken()
+ local = self.parseIdentifierName()
+
+ return self.finalize(node, Node.ImportNamespaceSpecifier(local))
+
+ def parseImportDeclaration(self):
+ if self.context.inFunctionBody:
+ self.throwError(Messages.IllegalImportDeclaration)
+
+ node = self.createNode()
+ self.expectKeyword('import')
+
+ specifiers = []
+ if self.lookahead.type is Token.StringLiteral:
+ # import 'foo'
+ src = self.parseModuleSpecifier()
+ else:
+ if self.match('{'):
+ # import {bar
+ specifiers.extend(self.parseNamedImports())
+ elif self.match('*'):
+ # import * as foo
+ specifiers.append(self.parseImportNamespaceSpecifier())
+ elif self.isIdentifierName(self.lookahead) and not self.matchKeyword('default'):
+ # import foo
+ specifiers.append(self.parseImportDefaultSpecifier())
+ if self.match(','):
+ self.nextToken()
+ if self.match('*'):
+ # import foo, * as foo
+ specifiers.append(self.parseImportNamespaceSpecifier())
+ elif self.match('{'):
+ # import foo, {bar
+ specifiers.extend(self.parseNamedImports())
+ else:
+ self.throwUnexpectedToken(self.lookahead)
+ else:
+ self.throwUnexpectedToken(self.nextToken())
+
+ if not self.matchContextualKeyword('from'):
+ message = Messages.UnexpectedToken if self.lookahead.value else Messages.MissingFromClause
+ self.throwError(message, self.lookahead.value)
+ self.nextToken()
+ src = self.parseModuleSpecifier()
+ self.consumeSemicolon()
+
+ return self.finalize(node, Node.ImportDeclaration(specifiers, src))
+
+ # https://tc39.github.io/ecma262/#sec-exports
+
+ def parseExportSpecifier(self):
+ node = self.createNode()
+
+ local = self.parseIdentifierName()
+ exported = local
+ if self.matchContextualKeyword('as'):
+ self.nextToken()
+ exported = self.parseIdentifierName()
+
+ return self.finalize(node, Node.ExportSpecifier(local, exported))
+
+ def parseExportDefaultSpecifier(self):
+ node = self.createNode()
+ local = self.parseIdentifierName()
+ return self.finalize(node, Node.ExportDefaultSpecifier(local))
+
+ def parseExportDeclaration(self):
+ if self.context.inFunctionBody:
+ self.throwError(Messages.IllegalExportDeclaration)
+
+ node = self.createNode()
+ self.expectKeyword('export')
+
+ if self.matchKeyword('default'):
+ # export default ...
+ self.nextToken()
+ if self.matchKeyword('function'):
+ # export default function foo (:
+ # export default function (:
+ declaration = self.parseFunctionDeclaration(True)
+ exportDeclaration = self.finalize(node, Node.ExportDefaultDeclaration(declaration))
+ elif self.matchKeyword('class'):
+ # export default class foo {
+ declaration = self.parseClassDeclaration(True)
+ exportDeclaration = self.finalize(node, Node.ExportDefaultDeclaration(declaration))
+ elif self.matchContextualKeyword('async'):
+ # export default async function f (:
+ # export default async function (:
+ # export default async x => x
+ declaration = self.parseFunctionDeclaration(True) if self.matchAsyncFunction() else self.parseAssignmentExpression()
+ exportDeclaration = self.finalize(node, Node.ExportDefaultDeclaration(declaration))
+ else:
+ if self.matchContextualKeyword('from'):
+ self.throwError(Messages.UnexpectedToken, self.lookahead.value)
+ # export default {}
+ # export default []
+ # export default (1 + 2)
+ if self.match('{'):
+ declaration = self.parseObjectInitializer()
+ elif self.match('['):
+ declaration = self.parseArrayInitializer()
+ else:
+ declaration = self.parseAssignmentExpression()
+ self.consumeSemicolon()
+ exportDeclaration = self.finalize(node, Node.ExportDefaultDeclaration(declaration))
+
+ elif self.match('*'):
+ # export * from 'foo'
+ self.nextToken()
+ if not self.matchContextualKeyword('from'):
+ message = Messages.UnexpectedToken if self.lookahead.value else Messages.MissingFromClause
+ self.throwError(message, self.lookahead.value)
+ self.nextToken()
+ src = self.parseModuleSpecifier()
+ self.consumeSemicolon()
+ exportDeclaration = self.finalize(node, Node.ExportAllDeclaration(src))
+
+ elif self.lookahead.type is Token.Keyword:
+ # export var f = 1
+ value = self.lookahead.value
+ if value in (
+ 'let',
+ 'const',
+ ):
+ declaration = self.parseLexicalDeclaration(Params(inFor=False))
+ elif value in (
+ 'var',
+ 'class',
+ 'function',
+ ):
+ declaration = self.parseStatementListItem()
+ else:
+ self.throwUnexpectedToken(self.lookahead)
+ exportDeclaration = self.finalize(node, Node.ExportNamedDeclaration(declaration, [], None))
+
+ elif self.matchAsyncFunction():
+ declaration = self.parseFunctionDeclaration()
+ exportDeclaration = self.finalize(node, Node.ExportNamedDeclaration(declaration, [], None))
+
+ else:
+ specifiers = []
+ source = None
+ isExportFromIdentifier = False
+
+ expectSpecifiers = True
+ if self.lookahead.type is Token.Identifier:
+ specifiers.append(self.parseExportDefaultSpecifier())
+ if self.match(','):
+ self.nextToken()
+ else:
+ expectSpecifiers = False
+
+ if expectSpecifiers:
+ self.expect('{')
+ while not self.match('}'):
+ isExportFromIdentifier = isExportFromIdentifier or self.matchKeyword('default')
+ specifiers.append(self.parseExportSpecifier())
+ if not self.match('}'):
+ self.expect(',')
+ self.expect('}')
+
+ if self.matchContextualKeyword('from'):
+ # export {default} from 'foo'
+ # export {foo} from 'foo'
+ self.nextToken()
+ source = self.parseModuleSpecifier()
+ self.consumeSemicolon()
+ elif isExportFromIdentifier:
+ # export {default}; # missing fromClause
+ message = Messages.UnexpectedToken if self.lookahead.value else Messages.MissingFromClause
+ self.throwError(message, self.lookahead.value)
+ else:
+ # export {foo}
+ self.consumeSemicolon()
+ exportDeclaration = self.finalize(node, Node.ExportNamedDeclaration(None, specifiers, source))
+
+ return exportDeclaration
diff --git a/third_party/python/esprima/esprima/scanner.py b/third_party/python/esprima/esprima/scanner.py
new file mode 100644
index 0000000000..53502a51d3
--- /dev/null
+++ b/third_party/python/esprima/esprima/scanner.py
@@ -0,0 +1,1189 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, unicode_literals
+
+import re
+
+from .objects import Object
+from .compat import xrange, unicode, uchr, uord
+from .character import Character, HEX_CONV, OCTAL_CONV
+from .messages import Messages
+from .token import Token
+
+
+def hexValue(ch):
+ return HEX_CONV[ch]
+
+
+def octalValue(ch):
+ return OCTAL_CONV[ch]
+
+
+class RegExp(Object):
+ def __init__(self, pattern=None, flags=None):
+ self.pattern = pattern
+ self.flags = flags
+
+
+class Position(Object):
+ def __init__(self, line=None, column=None, offset=None):
+ self.line = line
+ self.column = column
+ self.offset = offset
+
+
+class SourceLocation(Object):
+ def __init__(self, start=None, end=None, source=None):
+ self.start = start
+ self.end = end
+ self.source = source
+
+
+class Comment(Object):
+ def __init__(self, multiLine=None, slice=None, range=None, loc=None):
+ self.multiLine = multiLine
+ self.slice = slice
+ self.range = range
+ self.loc = loc
+
+
+class RawToken(Object):
+ def __init__(self, type=None, value=None, pattern=None, flags=None, regex=None, octal=None, cooked=None, head=None, tail=None, lineNumber=None, lineStart=None, start=None, end=None):
+ self.type = type
+ self.value = value
+ self.pattern = pattern
+ self.flags = flags
+ self.regex = regex
+ self.octal = octal
+ self.cooked = cooked
+ self.head = head
+ self.tail = tail
+ self.lineNumber = lineNumber
+ self.lineStart = lineStart
+ self.start = start
+ self.end = end
+
+
+class ScannerState(Object):
+ def __init__(self, index=None, lineNumber=None, lineStart=None):
+ self.index = index
+ self.lineNumber = lineNumber
+ self.lineStart = lineStart
+
+
+class Octal(object):
+ def __init__(self, octal, code):
+ self.octal = octal
+ self.code = code
+
+
+class Scanner(object):
+ def __init__(self, code, handler):
+ self.source = unicode(code) + '\x00'
+ self.errorHandler = handler
+ self.trackComment = False
+ self.isModule = False
+
+ self.length = len(code)
+ self.index = 0
+ self.lineNumber = 1 if self.length > 0 else 0
+ self.lineStart = 0
+ self.curlyStack = []
+
+ def saveState(self):
+ return ScannerState(
+ index=self.index,
+ lineNumber=self.lineNumber,
+ lineStart=self.lineStart
+ )
+
+ def restoreState(self, state):
+ self.index = state.index
+ self.lineNumber = state.lineNumber
+ self.lineStart = state.lineStart
+
+ def eof(self):
+ return self.index >= self.length
+
+ def throwUnexpectedToken(self, message=Messages.UnexpectedTokenIllegal):
+ return self.errorHandler.throwError(self.index, self.lineNumber,
+ self.index - self.lineStart + 1, message)
+
+ def tolerateUnexpectedToken(self, message=Messages.UnexpectedTokenIllegal):
+ self.errorHandler.tolerateError(self.index, self.lineNumber,
+ self.index - self.lineStart + 1, message)
+
+ # https://tc39.github.io/ecma262/#sec-comments
+
+ def skipSingleLineComment(self, offset):
+ comments = []
+
+ if self.trackComment:
+ start = self.index - offset
+ loc = SourceLocation(
+ start=Position(
+ line=self.lineNumber,
+ column=self.index - self.lineStart - offset
+ ),
+ end=Position()
+ )
+
+ while not self.eof():
+ ch = self.source[self.index]
+ self.index += 1
+ if Character.isLineTerminator(ch):
+ if self.trackComment:
+ loc.end = Position(
+ line=self.lineNumber,
+ column=self.index - self.lineStart - 1
+ )
+ entry = Comment(
+ multiLine=False,
+ slice=[start + offset, self.index - 1],
+ range=[start, self.index - 1],
+ loc=loc
+ )
+ comments.append(entry)
+
+ if ch == '\r' and self.source[self.index] == '\n':
+ self.index += 1
+
+ self.lineNumber += 1
+ self.lineStart = self.index
+ return comments
+
+ if self.trackComment:
+ loc.end = Position(
+ line=self.lineNumber,
+ column=self.index - self.lineStart
+ )
+ entry = Comment(
+ multiLine=False,
+ slice=[start + offset, self.index],
+ range=[start, self.index],
+ loc=loc
+ )
+ comments.append(entry)
+
+ return comments
+
+ def skipMultiLineComment(self):
+ comments = []
+
+ if self.trackComment:
+ comments = []
+ start = self.index - 2
+ loc = SourceLocation(
+ start=Position(
+ line=self.lineNumber,
+ column=self.index - self.lineStart - 2
+ ),
+ end=Position()
+ )
+
+ while not self.eof():
+ ch = self.source[self.index]
+ if Character.isLineTerminator(ch):
+ if ch == '\r' and self.source[self.index + 1] == '\n':
+ self.index += 1
+
+ self.lineNumber += 1
+ self.index += 1
+ self.lineStart = self.index
+ elif ch == '*':
+ # Block comment ends with '*/'.
+ if self.source[self.index + 1] == '/':
+ self.index += 2
+ if self.trackComment:
+ loc.end = Position(
+ line=self.lineNumber,
+ column=self.index - self.lineStart
+ )
+ entry = Comment(
+ multiLine=True,
+ slice=[start + 2, self.index - 2],
+ range=[start, self.index],
+ loc=loc
+ )
+ comments.append(entry)
+
+ return comments
+
+ self.index += 1
+ else:
+ self.index += 1
+
+ # Ran off the end of the file - the whole thing is a comment
+ if self.trackComment:
+ loc.end = Position(
+ line=self.lineNumber,
+ column=self.index - self.lineStart
+ )
+ entry = Comment(
+ multiLine=True,
+ slice=[start + 2, self.index],
+ range=[start, self.index],
+ loc=loc
+ )
+ comments.append(entry)
+
+ self.tolerateUnexpectedToken()
+ return comments
+
+ def scanComments(self):
+ comments = []
+
+ start = self.index == 0
+ while not self.eof():
+ ch = self.source[self.index]
+
+ if Character.isWhiteSpace(ch):
+ self.index += 1
+ elif Character.isLineTerminator(ch):
+ self.index += 1
+ if ch == '\r' and self.source[self.index] == '\n':
+ self.index += 1
+
+ self.lineNumber += 1
+ self.lineStart = self.index
+ start = True
+ elif ch == '/': # U+002F is '/'
+ ch = self.source[self.index + 1]
+ if ch == '/':
+ self.index += 2
+ comment = self.skipSingleLineComment(2)
+ if self.trackComment:
+ comments.extend(comment)
+
+ start = True
+ elif ch == '*': # U+002A is '*'
+ self.index += 2
+ comment = self.skipMultiLineComment()
+ if self.trackComment:
+ comments.extend(comment)
+
+ else:
+ break
+
+ elif start and ch == '-': # U+002D is '-'
+ # U+003E is '>'
+ if self.source[self.index + 1:self.index + 3] == '->':
+ # '-->' is a single-line comment
+ self.index += 3
+ comment = self.skipSingleLineComment(3)
+ if self.trackComment:
+ comments.extend(comment)
+
+ else:
+ break
+
+ elif ch == '<' and not self.isModule: # U+003C is '<'
+ if self.source[self.index + 1:self.index + 4] == '!--':
+ self.index += 4 # `<!--`
+ comment = self.skipSingleLineComment(4)
+ if self.trackComment:
+ comments.extend(comment)
+
+ else:
+ break
+
+ else:
+ break
+
+ return comments
+
+ # https://tc39.github.io/ecma262/#sec-future-reserved-words
+
+ def isFutureReservedWord(self, id):
+ return id in self.isFutureReservedWord.set
+ isFutureReservedWord.set = set((
+ 'enum',
+ 'export',
+ 'import',
+ 'super',
+ ))
+
+ def isStrictModeReservedWord(self, id):
+ return id in self.isStrictModeReservedWord.set
+ isStrictModeReservedWord.set = set((
+ 'implements',
+ 'interface',
+ 'package',
+ 'private',
+ 'protected',
+ 'public',
+ 'static',
+ 'yield',
+ 'let',
+ ))
+
+ def isRestrictedWord(self, id):
+ return id in self.isRestrictedWord.set
+ isRestrictedWord.set = set((
+ 'eval', 'arguments',
+ ))
+
+ # https://tc39.github.io/ecma262/#sec-keywords
+
+ def isKeyword(self, id):
+ return id in self.isKeyword.set
+ isKeyword.set = set((
+ 'if', 'in', 'do',
+
+ 'var', 'for', 'new',
+ 'try', 'let',
+
+ 'this', 'else', 'case',
+ 'void', 'with', 'enum',
+
+ 'while', 'break', 'catch',
+ 'throw', 'const', 'yield',
+ 'class', 'super',
+
+ 'return', 'typeof', 'delete',
+ 'switch', 'export', 'import',
+
+ 'default', 'finally', 'extends',
+
+ 'function', 'continue', 'debugger',
+
+ 'instanceof',
+ ))
+
+ def codePointAt(self, i):
+ return uord(self.source[i:i + 2])
+
+ def scanHexEscape(self, prefix):
+ length = 4 if prefix == 'u' else 2
+ code = 0
+
+ for i in xrange(length):
+ if not self.eof() and Character.isHexDigit(self.source[self.index]):
+ ch = self.source[self.index]
+ self.index += 1
+ code = code * 16 + hexValue(ch)
+ else:
+ return None
+
+ return uchr(code)
+
+ def scanUnicodeCodePointEscape(self):
+ ch = self.source[self.index]
+ code = 0
+
+ # At least, one hex digit is required.
+ if ch == '}':
+ self.throwUnexpectedToken()
+
+ while not self.eof():
+ ch = self.source[self.index]
+ self.index += 1
+ if not Character.isHexDigit(ch):
+ break
+
+ code = code * 16 + hexValue(ch)
+
+ if code > 0x10FFFF or ch != '}':
+ self.throwUnexpectedToken()
+
+ return Character.fromCodePoint(code)
+
+ def getIdentifier(self):
+ start = self.index
+ self.index += 1
+ while not self.eof():
+ ch = self.source[self.index]
+ if ch == '\\':
+ # Blackslash (U+005C) marks Unicode escape sequence.
+ self.index = start
+ return self.getComplexIdentifier()
+ else:
+ cp = ord(ch)
+ if cp >= 0xD800 and cp < 0xDFFF:
+ # Need to handle surrogate pairs.
+ self.index = start
+ return self.getComplexIdentifier()
+
+ if Character.isIdentifierPart(ch):
+ self.index += 1
+ else:
+ break
+
+ return self.source[start:self.index]
+
+ def getComplexIdentifier(self):
+ cp = self.codePointAt(self.index)
+ id = Character.fromCodePoint(cp)
+ self.index += len(id)
+
+ # '\u' (U+005C, U+0075) denotes an escaped character.
+ if cp == 0x5C:
+ if self.source[self.index] != 'u':
+ self.throwUnexpectedToken()
+
+ self.index += 1
+ if self.source[self.index] == '{':
+ self.index += 1
+ ch = self.scanUnicodeCodePointEscape()
+ else:
+ ch = self.scanHexEscape('u')
+ if not ch or ch == '\\' or not Character.isIdentifierStart(ch[0]):
+ self.throwUnexpectedToken()
+
+ id = ch
+
+ while not self.eof():
+ cp = self.codePointAt(self.index)
+ ch = Character.fromCodePoint(cp)
+ if not Character.isIdentifierPart(ch):
+ break
+
+ id += ch
+ self.index += len(ch)
+
+ # '\u' (U+005C, U+0075) denotes an escaped character.
+ if cp == 0x5C:
+ id = id[:-1]
+ if self.source[self.index] != 'u':
+ self.throwUnexpectedToken()
+
+ self.index += 1
+ if self.source[self.index] == '{':
+ self.index += 1
+ ch = self.scanUnicodeCodePointEscape()
+ else:
+ ch = self.scanHexEscape('u')
+ if not ch or ch == '\\' or not Character.isIdentifierPart(ch[0]):
+ self.throwUnexpectedToken()
+
+ id += ch
+
+ return id
+
+ def octalToDecimal(self, ch):
+ # \0 is not octal escape sequence
+ octal = ch != '0'
+ code = octalValue(ch)
+
+ if not self.eof() and Character.isOctalDigit(self.source[self.index]):
+ octal = True
+ code = code * 8 + octalValue(self.source[self.index])
+ self.index += 1
+
+ # 3 digits are only allowed when string starts
+ # with 0, 1, 2, 3
+ if ch in '0123' and not self.eof() and Character.isOctalDigit(self.source[self.index]):
+ code = code * 8 + octalValue(self.source[self.index])
+ self.index += 1
+
+ return Octal(octal, code)
+
+ # https://tc39.github.io/ecma262/#sec-names-and-keywords
+
+ def scanIdentifier(self):
+ start = self.index
+
+ # Backslash (U+005C) starts an escaped character.
+ id = self.getComplexIdentifier() if self.source[start] == '\\' else self.getIdentifier()
+
+ # There is no keyword or literal with only one character.
+ # Thus, it must be an identifier.
+ if len(id) == 1:
+ type = Token.Identifier
+ elif self.isKeyword(id):
+ type = Token.Keyword
+ elif id == 'null':
+ type = Token.NullLiteral
+ elif id == 'true' or id == 'false':
+ type = Token.BooleanLiteral
+ else:
+ type = Token.Identifier
+
+ if type is not Token.Identifier and start + len(id) != self.index:
+ restore = self.index
+ self.index = start
+ self.tolerateUnexpectedToken(Messages.InvalidEscapedReservedWord)
+ self.index = restore
+
+ return RawToken(
+ type=type,
+ value=id,
+ lineNumber=self.lineNumber,
+ lineStart=self.lineStart,
+ start=start,
+ end=self.index
+ )
+
+ # https://tc39.github.io/ecma262/#sec-punctuators
+
+ def scanPunctuator(self):
+ start = self.index
+
+ # Check for most common single-character punctuators.
+ str = self.source[self.index]
+ if str in (
+ '(',
+ '{',
+ ):
+ if str == '{':
+ self.curlyStack.append('{')
+
+ self.index += 1
+
+ elif str == '.':
+ self.index += 1
+ if self.source[self.index] == '.' and self.source[self.index + 1] == '.':
+ # Spread operator: ...
+ self.index += 2
+ str = '...'
+
+ elif str == '}':
+ self.index += 1
+ if self.curlyStack:
+ self.curlyStack.pop()
+
+ elif str in (
+ ')',
+ ';',
+ ',',
+ '[',
+ ']',
+ ':',
+ '?',
+ '~',
+ ):
+ self.index += 1
+
+ else:
+ # 4-character punctuator.
+ str = self.source[self.index:self.index + 4]
+ if str == '>>>=':
+ self.index += 4
+ else:
+
+ # 3-character punctuators.
+ str = str[:3]
+ if str in (
+ '===', '!==', '>>>',
+ '<<=', '>>=', '**='
+ ):
+ self.index += 3
+ else:
+
+ # 2-character punctuators.
+ str = str[:2]
+ if str in (
+ '&&', '||', '==', '!=',
+ '+=', '-=', '*=', '/=',
+ '++', '--', '<<', '>>',
+ '&=', '|=', '^=', '%=',
+ '<=', '>=', '=>', '**',
+ ):
+ self.index += 2
+ else:
+
+ # 1-character punctuators.
+ str = self.source[self.index]
+ if str in '<>=!+-*%&|^/':
+ self.index += 1
+
+ if self.index == start:
+ self.throwUnexpectedToken()
+
+ return RawToken(
+ type=Token.Punctuator,
+ value=str,
+ lineNumber=self.lineNumber,
+ lineStart=self.lineStart,
+ start=start,
+ end=self.index
+ )
+
+ # https://tc39.github.io/ecma262/#sec-literals-numeric-literals
+
+ def scanHexLiteral(self, start):
+ num = ''
+
+ while not self.eof():
+ if not Character.isHexDigit(self.source[self.index]):
+ break
+
+ num += self.source[self.index]
+ self.index += 1
+
+ if len(num) == 0:
+ self.throwUnexpectedToken()
+
+ if Character.isIdentifierStart(self.source[self.index]):
+ self.throwUnexpectedToken()
+
+ return RawToken(
+ type=Token.NumericLiteral,
+ value=int(num, 16),
+ lineNumber=self.lineNumber,
+ lineStart=self.lineStart,
+ start=start,
+ end=self.index
+ )
+
+ def scanBinaryLiteral(self, start):
+ num = ''
+
+ while not self.eof():
+ ch = self.source[self.index]
+ if ch != '0' and ch != '1':
+ break
+
+ num += self.source[self.index]
+ self.index += 1
+
+ if len(num) == 0:
+ # only 0b or 0B
+ self.throwUnexpectedToken()
+
+ if not self.eof():
+ ch = self.source[self.index]
+ if Character.isIdentifierStart(ch) or Character.isDecimalDigit(ch):
+ self.throwUnexpectedToken()
+
+ return RawToken(
+ type=Token.NumericLiteral,
+ value=int(num, 2),
+ lineNumber=self.lineNumber,
+ lineStart=self.lineStart,
+ start=start,
+ end=self.index
+ )
+
+ def scanOctalLiteral(self, prefix, start):
+ num = ''
+ octal = False
+
+ if Character.isOctalDigit(prefix[0]):
+ octal = True
+ num = '0' + self.source[self.index]
+ self.index += 1
+
+ while not self.eof():
+ if not Character.isOctalDigit(self.source[self.index]):
+ break
+
+ num += self.source[self.index]
+ self.index += 1
+
+ if not octal and len(num) == 0:
+ # only 0o or 0O
+ self.throwUnexpectedToken()
+
+ if Character.isIdentifierStart(self.source[self.index]) or Character.isDecimalDigit(self.source[self.index]):
+ self.throwUnexpectedToken()
+
+ return RawToken(
+ type=Token.NumericLiteral,
+ value=int(num, 8),
+ octal=octal,
+ lineNumber=self.lineNumber,
+ lineStart=self.lineStart,
+ start=start,
+ end=self.index
+ )
+
+ def isImplicitOctalLiteral(self):
+ # Implicit octal, unless there is a non-octal digit.
+ # (Annex B.1.1 on Numeric Literals)
+ for i in xrange(self.index + 1, self.length):
+ ch = self.source[i]
+ if ch in '89':
+ return False
+ if not Character.isOctalDigit(ch):
+ return True
+ return True
+
+ def scanNumericLiteral(self):
+ start = self.index
+ ch = self.source[start]
+ assert Character.isDecimalDigit(ch) or ch == '.', 'Numeric literal must start with a decimal digit or a decimal point'
+
+ num = ''
+ if ch != '.':
+ num = self.source[self.index]
+ self.index += 1
+ ch = self.source[self.index]
+
+ # Hex number starts with '0x'.
+ # Octal number starts with '0'.
+ # Octal number in ES6 starts with '0o'.
+ # Binary number in ES6 starts with '0b'.
+ if num == '0':
+ if ch in ('x', 'X'):
+ self.index += 1
+ return self.scanHexLiteral(start)
+
+ if ch in ('b', 'B'):
+ self.index += 1
+ return self.scanBinaryLiteral(start)
+
+ if ch in ('o', 'O'):
+ return self.scanOctalLiteral(ch, start)
+
+ if ch and Character.isOctalDigit(ch):
+ if self.isImplicitOctalLiteral():
+ return self.scanOctalLiteral(ch, start)
+
+ while Character.isDecimalDigit(self.source[self.index]):
+ num += self.source[self.index]
+ self.index += 1
+
+ ch = self.source[self.index]
+
+ if ch == '.':
+ num += self.source[self.index]
+ self.index += 1
+ while Character.isDecimalDigit(self.source[self.index]):
+ num += self.source[self.index]
+ self.index += 1
+
+ ch = self.source[self.index]
+
+ if ch in ('e', 'E'):
+ num += self.source[self.index]
+ self.index += 1
+
+ ch = self.source[self.index]
+ if ch in ('+', '-'):
+ num += self.source[self.index]
+ self.index += 1
+
+ if Character.isDecimalDigit(self.source[self.index]):
+ while Character.isDecimalDigit(self.source[self.index]):
+ num += self.source[self.index]
+ self.index += 1
+
+ else:
+ self.throwUnexpectedToken()
+
+ if Character.isIdentifierStart(self.source[self.index]):
+ self.throwUnexpectedToken()
+
+ value = float(num)
+ return RawToken(
+ type=Token.NumericLiteral,
+ value=int(value) if value.is_integer() else value,
+ lineNumber=self.lineNumber,
+ lineStart=self.lineStart,
+ start=start,
+ end=self.index
+ )
+
+ # https://tc39.github.io/ecma262/#sec-literals-string-literals
+
+ def scanStringLiteral(self):
+ start = self.index
+ quote = self.source[start]
+ assert quote in ('\'', '"'), 'String literal must starts with a quote'
+
+ self.index += 1
+ octal = False
+ str = ''
+
+ while not self.eof():
+ ch = self.source[self.index]
+ self.index += 1
+
+ if ch == quote:
+ quote = ''
+ break
+ elif ch == '\\':
+ ch = self.source[self.index]
+ self.index += 1
+ if not ch or not Character.isLineTerminator(ch):
+ if ch == 'u':
+ if self.source[self.index] == '{':
+ self.index += 1
+ str += self.scanUnicodeCodePointEscape()
+ else:
+ unescapedChar = self.scanHexEscape(ch)
+ if not unescapedChar:
+ self.throwUnexpectedToken()
+
+ str += unescapedChar
+
+ elif ch == 'x':
+ unescaped = self.scanHexEscape(ch)
+ if not unescaped:
+ self.throwUnexpectedToken(Messages.InvalidHexEscapeSequence)
+
+ str += unescaped
+ elif ch == 'n':
+ str += '\n'
+ elif ch == 'r':
+ str += '\r'
+ elif ch == 't':
+ str += '\t'
+ elif ch == 'b':
+ str += '\b'
+ elif ch == 'f':
+ str += '\f'
+ elif ch == 'v':
+ str += '\x0B'
+ elif ch in (
+ '8',
+ '9',
+ ):
+ str += ch
+ self.tolerateUnexpectedToken()
+
+ else:
+ if ch and Character.isOctalDigit(ch):
+ octToDec = self.octalToDecimal(ch)
+
+ octal = octToDec.octal or octal
+ str += uchr(octToDec.code)
+ else:
+ str += ch
+
+ else:
+ self.lineNumber += 1
+ if ch == '\r' and self.source[self.index] == '\n':
+ self.index += 1
+
+ self.lineStart = self.index
+
+ elif Character.isLineTerminator(ch):
+ break
+ else:
+ str += ch
+
+ if quote != '':
+ self.index = start
+ self.throwUnexpectedToken()
+
+ return RawToken(
+ type=Token.StringLiteral,
+ value=str,
+ octal=octal,
+ lineNumber=self.lineNumber,
+ lineStart=self.lineStart,
+ start=start,
+ end=self.index
+ )
+
+ # https://tc39.github.io/ecma262/#sec-template-literal-lexical-components
+
+ def scanTemplate(self):
+ cooked = ''
+ terminated = False
+ start = self.index
+
+ head = self.source[start] == '`'
+ tail = False
+ rawOffset = 2
+
+ self.index += 1
+
+ while not self.eof():
+ ch = self.source[self.index]
+ self.index += 1
+ if ch == '`':
+ rawOffset = 1
+ tail = True
+ terminated = True
+ break
+ elif ch == '$':
+ if self.source[self.index] == '{':
+ self.curlyStack.append('${')
+ self.index += 1
+ terminated = True
+ break
+
+ cooked += ch
+ elif ch == '\\':
+ ch = self.source[self.index]
+ self.index += 1
+ if not Character.isLineTerminator(ch):
+ if ch == 'n':
+ cooked += '\n'
+ elif ch == 'r':
+ cooked += '\r'
+ elif ch == 't':
+ cooked += '\t'
+ elif ch == 'u':
+ if self.source[self.index] == '{':
+ self.index += 1
+ cooked += self.scanUnicodeCodePointEscape()
+ else:
+ restore = self.index
+ unescapedChar = self.scanHexEscape(ch)
+ if unescapedChar:
+ cooked += unescapedChar
+ else:
+ self.index = restore
+ cooked += ch
+
+ elif ch == 'x':
+ unescaped = self.scanHexEscape(ch)
+ if not unescaped:
+ self.throwUnexpectedToken(Messages.InvalidHexEscapeSequence)
+
+ cooked += unescaped
+ elif ch == 'b':
+ cooked += '\b'
+ elif ch == 'f':
+ cooked += '\f'
+ elif ch == 'v':
+ cooked += '\v'
+
+ else:
+ if ch == '0':
+ if Character.isDecimalDigit(self.source[self.index]):
+ # Illegal: \01 \02 and so on
+ self.throwUnexpectedToken(Messages.TemplateOctalLiteral)
+
+ cooked += '\0'
+ elif Character.isOctalDigit(ch):
+ # Illegal: \1 \2
+ self.throwUnexpectedToken(Messages.TemplateOctalLiteral)
+ else:
+ cooked += ch
+
+ else:
+ self.lineNumber += 1
+ if ch == '\r' and self.source[self.index] == '\n':
+ self.index += 1
+
+ self.lineStart = self.index
+
+ elif Character.isLineTerminator(ch):
+ self.lineNumber += 1
+ if ch == '\r' and self.source[self.index] == '\n':
+ self.index += 1
+
+ self.lineStart = self.index
+ cooked += '\n'
+ else:
+ cooked += ch
+
+ if not terminated:
+ self.throwUnexpectedToken()
+
+ if not head:
+ if self.curlyStack:
+ self.curlyStack.pop()
+
+ return RawToken(
+ type=Token.Template,
+ value=self.source[start + 1:self.index - rawOffset],
+ cooked=cooked,
+ head=head,
+ tail=tail,
+ lineNumber=self.lineNumber,
+ lineStart=self.lineStart,
+ start=start,
+ end=self.index
+ )
+
+ # https://tc39.github.io/ecma262/#sec-literals-regular-expression-literals
+
+ def testRegExp(self, pattern, flags):
+ # The BMP character to use as a replacement for astral symbols when
+ # translating an ES6 "u"-flagged pattern to an ES5-compatible
+ # approximation.
+ # Note: replacing with '\uFFFF' enables false positives in unlikely
+ # scenarios. For example, `[\u{1044f}-\u{10440}]` is an invalid
+ # pattern that would not be detected by this substitution.
+ astralSubstitute = '\uFFFF'
+
+ # Replace every Unicode escape sequence with the equivalent
+ # BMP character or a constant ASCII code point in the case of
+ # astral symbols. (See the above note on `astralSubstitute`
+ # for more information.)
+ def astralSub(m):
+ codePoint = int(m.group(1) or m.group(2), 16)
+ if codePoint > 0x10FFFF:
+ self.tolerateUnexpectedToken(Messages.InvalidRegExp)
+ elif codePoint <= 0xFFFF:
+ return uchr(codePoint)
+ return astralSubstitute
+ pattern = re.sub(r'\\u\{([0-9a-fA-F]+)\}|\\u([a-fA-F0-9]{4})', astralSub, pattern)
+
+ # Replace each paired surrogate with a single ASCII symbol to
+ # avoid throwing on regular expressions that are only valid in
+ # combination with the "u" flag.
+ pattern = re.sub(r'[\uD800-\uDBFF][\uDC00-\uDFFF]', astralSubstitute, pattern)
+
+ # Return a regular expression object for this pattern-flag pair, or
+ # `null` in case the current environment doesn't support the flags it
+ # uses.
+ pyflags = 0 | re.M if 'm' in flags else 0 | re.I if 'i' in flags else 0
+ try:
+ return re.compile(pattern, pyflags)
+ except Exception:
+ self.tolerateUnexpectedToken(Messages.InvalidRegExp)
+
+ def scanRegExpBody(self):
+ ch = self.source[self.index]
+ assert ch == '/', 'Regular expression literal must start with a slash'
+
+ str = self.source[self.index]
+ self.index += 1
+ classMarker = False
+ terminated = False
+
+ while not self.eof():
+ ch = self.source[self.index]
+ self.index += 1
+ str += ch
+ if ch == '\\':
+ ch = self.source[self.index]
+ self.index += 1
+ # https://tc39.github.io/ecma262/#sec-literals-regular-expression-literals
+ if Character.isLineTerminator(ch):
+ self.throwUnexpectedToken(Messages.UnterminatedRegExp)
+
+ str += ch
+ elif Character.isLineTerminator(ch):
+ self.throwUnexpectedToken(Messages.UnterminatedRegExp)
+ elif classMarker:
+ if ch == ']':
+ classMarker = False
+
+ else:
+ if ch == '/':
+ terminated = True
+ break
+ elif ch == '[':
+ classMarker = True
+
+ if not terminated:
+ self.throwUnexpectedToken(Messages.UnterminatedRegExp)
+
+ # Exclude leading and trailing slash.
+ return str[1:-1]
+
+ def scanRegExpFlags(self):
+ str = ''
+ flags = ''
+ while not self.eof():
+ ch = self.source[self.index]
+ if not Character.isIdentifierPart(ch):
+ break
+
+ self.index += 1
+ if ch == '\\' and not self.eof():
+ ch = self.source[self.index]
+ if ch == 'u':
+ self.index += 1
+ restore = self.index
+ char = self.scanHexEscape('u')
+ if char:
+ flags += char
+ str += '\\u'
+ while restore < self.index:
+ str += self.source[restore]
+ restore += 1
+
+ else:
+ self.index = restore
+ flags += 'u'
+ str += '\\u'
+
+ self.tolerateUnexpectedToken()
+ else:
+ str += '\\'
+ self.tolerateUnexpectedToken()
+
+ else:
+ flags += ch
+ str += ch
+
+ return flags
+
+ def scanRegExp(self):
+ start = self.index
+
+ pattern = self.scanRegExpBody()
+ flags = self.scanRegExpFlags()
+ value = self.testRegExp(pattern, flags)
+
+ return RawToken(
+ type=Token.RegularExpression,
+ value='',
+ pattern=pattern,
+ flags=flags,
+ regex=value,
+ lineNumber=self.lineNumber,
+ lineStart=self.lineStart,
+ start=start,
+ end=self.index
+ )
+
+ def lex(self):
+ if self.eof():
+ return RawToken(
+ type=Token.EOF,
+ value='',
+ lineNumber=self.lineNumber,
+ lineStart=self.lineStart,
+ start=self.index,
+ end=self.index
+ )
+
+ ch = self.source[self.index]
+
+ if Character.isIdentifierStart(ch):
+ return self.scanIdentifier()
+
+ # Very common: ( and ) and ;
+ if ch in ('(', ')', ';'):
+ return self.scanPunctuator()
+
+ # String literal starts with single quote (U+0027) or double quote (U+0022).
+ if ch in ('\'', '"'):
+ return self.scanStringLiteral()
+
+ # Dot (.) U+002E can also start a floating-point number, hence the need
+ # to check the next character.
+ if ch == '.':
+ if Character.isDecimalDigit(self.source[self.index + 1]):
+ return self.scanNumericLiteral()
+
+ return self.scanPunctuator()
+
+ if Character.isDecimalDigit(ch):
+ return self.scanNumericLiteral()
+
+ # Template literals start with ` (U+0060) for template head
+ # or } (U+007D) for template middle or template tail.
+ if ch == '`' or (ch == '}' and self.curlyStack and self.curlyStack[-1] == '${'):
+ return self.scanTemplate()
+
+ # Possible identifier start in a surrogate pair.
+ cp = ord(ch)
+ if cp >= 0xD800 and cp < 0xDFFF:
+ cp = self.codePointAt(self.index)
+ ch = Character.fromCodePoint(cp)
+ if Character.isIdentifierStart(ch):
+ return self.scanIdentifier()
+
+ return self.scanPunctuator()
diff --git a/third_party/python/esprima/esprima/syntax.py b/third_party/python/esprima/esprima/syntax.py
new file mode 100644
index 0000000000..001b641e25
--- /dev/null
+++ b/third_party/python/esprima/esprima/syntax.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import unicode_literals
+
+
+class Syntax:
+ AssignmentExpression = "AssignmentExpression"
+ AssignmentPattern = "AssignmentPattern"
+ ArrayExpression = "ArrayExpression"
+ ArrayPattern = "ArrayPattern"
+ ArrowFunctionExpression = "ArrowFunctionExpression"
+ AwaitExpression = "AwaitExpression"
+ BlockStatement = "BlockStatement"
+ BinaryExpression = "BinaryExpression"
+ BreakStatement = "BreakStatement"
+ CallExpression = "CallExpression"
+ CatchClause = "CatchClause"
+ ClassBody = "ClassBody"
+ ClassDeclaration = "ClassDeclaration"
+ ClassExpression = "ClassExpression"
+ ConditionalExpression = "ConditionalExpression"
+ ContinueStatement = "ContinueStatement"
+ DoWhileStatement = "DoWhileStatement"
+ DebuggerStatement = "DebuggerStatement"
+ EmptyStatement = "EmptyStatement"
+ ExportAllDeclaration = "ExportAllDeclaration"
+ ExportDefaultDeclaration = "ExportDefaultDeclaration"
+ ExportNamedDeclaration = "ExportNamedDeclaration"
+ ExportSpecifier = "ExportSpecifier"
+ ExportDefaultSpecifier = "ExportDefaultSpecifier"
+ ExpressionStatement = "ExpressionStatement"
+ ForStatement = "ForStatement"
+ ForOfStatement = "ForOfStatement"
+ ForInStatement = "ForInStatement"
+ FunctionDeclaration = "FunctionDeclaration"
+ FunctionExpression = "FunctionExpression"
+ Identifier = "Identifier"
+ IfStatement = "IfStatement"
+ Import = "Import"
+ ImportDeclaration = "ImportDeclaration"
+ ImportDefaultSpecifier = "ImportDefaultSpecifier"
+ ImportNamespaceSpecifier = "ImportNamespaceSpecifier"
+ ImportSpecifier = "ImportSpecifier"
+ Literal = "Literal"
+ LabeledStatement = "LabeledStatement"
+ LogicalExpression = "LogicalExpression"
+ MemberExpression = "MemberExpression"
+ MetaProperty = "MetaProperty"
+ MethodDefinition = "MethodDefinition"
+ FieldDefinition = "FieldDefinition"
+ NewExpression = "NewExpression"
+ ObjectExpression = "ObjectExpression"
+ ObjectPattern = "ObjectPattern"
+ Program = "Program"
+ Property = "Property"
+ RestElement = "RestElement"
+ ReturnStatement = "ReturnStatement"
+ SequenceExpression = "SequenceExpression"
+ SpreadElement = "SpreadElement"
+ Super = "Super"
+ SwitchCase = "SwitchCase"
+ SwitchStatement = "SwitchStatement"
+ TaggedTemplateExpression = "TaggedTemplateExpression"
+ TemplateElement = "TemplateElement"
+ TemplateLiteral = "TemplateLiteral"
+ ThisExpression = "ThisExpression"
+ ThrowStatement = "ThrowStatement"
+ TryStatement = "TryStatement"
+ UnaryExpression = "UnaryExpression"
+ UpdateExpression = "UpdateExpression"
+ VariableDeclaration = "VariableDeclaration"
+ VariableDeclarator = "VariableDeclarator"
+ WhileStatement = "WhileStatement"
+ WithStatement = "WithStatement"
+ YieldExpression = "YieldExpression"
+
+ ArrowParameterPlaceHolder = "ArrowParameterPlaceHolder"
+ BlockComment = "BlockComment"
+ LineComment = "LineComment"
diff --git a/third_party/python/esprima/esprima/token.py b/third_party/python/esprima/esprima/token.py
new file mode 100644
index 0000000000..846ced6002
--- /dev/null
+++ b/third_party/python/esprima/esprima/token.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import unicode_literals
+
+
+class Token:
+ BooleanLiteral = 1
+ EOF = 2
+ Identifier = 3
+ Keyword = 4
+ NullLiteral = 5
+ NumericLiteral = 6
+ Punctuator = 7
+ StringLiteral = 8
+ RegularExpression = 9
+ Template = 10
+
+
+TokenName = {}
+TokenName[Token.BooleanLiteral] = "Boolean"
+TokenName[Token.EOF] = "<end>"
+TokenName[Token.Identifier] = "Identifier"
+TokenName[Token.Keyword] = "Keyword"
+TokenName[Token.NullLiteral] = "Null"
+TokenName[Token.NumericLiteral] = "Numeric"
+TokenName[Token.Punctuator] = "Punctuator"
+TokenName[Token.StringLiteral] = "String"
+TokenName[Token.RegularExpression] = "RegularExpression"
+TokenName[Token.Template] = "Template"
diff --git a/third_party/python/esprima/esprima/tokenizer.py b/third_party/python/esprima/esprima/tokenizer.py
new file mode 100644
index 0000000000..288193965d
--- /dev/null
+++ b/third_party/python/esprima/esprima/tokenizer.py
@@ -0,0 +1,193 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES
+# LOSS OF USE, DATA, OR PROFITS OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, unicode_literals
+
+from collections import deque
+
+from .objects import Object
+from .error_handler import ErrorHandler
+from .scanner import Scanner, SourceLocation, Position, RegExp
+from .token import Token, TokenName
+
+
+class BufferEntry(Object):
+ def __init__(self, type, value, regex=None, range=None, loc=None):
+ self.type = type
+ self.value = value
+ self.regex = regex
+ self.range = range
+ self.loc = loc
+
+
+class Reader(object):
+ def __init__(self):
+ self.values = []
+ self.curly = self.paren = -1
+
+ # A function following one of those tokens is an expression.
+ def beforeFunctionExpression(self, t):
+ return t in (
+ '(', '{', '[', 'in', 'typeof', 'instanceof', 'new',
+ 'return', 'case', 'delete', 'throw', 'void',
+ # assignment operators
+ '=', '+=', '-=', '*=', '**=', '/=', '%=', '<<=', '>>=', '>>>=',
+ '&=', '|=', '^=', ',',
+ # binary/unary operators
+ '+', '-', '*', '**', '/', '%', '++', '--', '<<', '>>', '>>>', '&',
+ '|', '^', '!', '~', '&&', '||', '?', ':', '===', '==', '>=',
+ '<=', '<', '>', '!=', '!=='
+ )
+
+ # Determine if forward slash (/) is an operator or part of a regular expression
+ # https://github.com/mozilla/sweet.js/wiki/design
+ def isRegexStart(self):
+ if not self.values:
+ return True
+
+ previous = self.values[-1]
+ regex = previous is not None
+
+ if previous in (
+ 'this',
+ ']',
+ ):
+ regex = False
+ elif previous == ')':
+ keyword = self.values[self.paren - 1]
+ regex = keyword in ('if', 'while', 'for', 'with')
+
+ elif previous == '}':
+ # Dividing a function by anything makes little sense,
+ # but we have to check for that.
+ regex = True
+ if len(self.values) >= 3 and self.values[self.curly - 3] == 'function':
+ # Anonymous function, e.g. function(){} /42
+ check = self.values[self.curly - 4]
+ regex = not self.beforeFunctionExpression(check) if check else False
+ elif len(self.values) >= 4 and self.values[self.curly - 4] == 'function':
+ # Named function, e.g. function f(){} /42/
+ check = self.values[self.curly - 5]
+ regex = not self.beforeFunctionExpression(check) if check else True
+
+ return regex
+
+ def append(self, token):
+ if token.type in (Token.Punctuator, Token.Keyword):
+ if token.value == '{':
+ self.curly = len(self.values)
+ elif token.value == '(':
+ self.paren = len(self.values)
+ self.values.append(token.value)
+ else:
+ self.values.append(None)
+
+
+class Config(Object):
+ def __init__(self, tolerant=None, comment=None, range=None, loc=None, **options):
+ self.tolerant = tolerant
+ self.comment = comment
+ self.range = range
+ self.loc = loc
+ for k, v in options.items():
+ setattr(self, k, v)
+
+
+class Tokenizer(object):
+ def __init__(self, code, options):
+ self.config = Config(**options)
+
+ self.errorHandler = ErrorHandler()
+ self.errorHandler.tolerant = self.config.tolerant
+ self.scanner = Scanner(code, self.errorHandler)
+ self.scanner.trackComment = self.config.comment
+
+ self.trackRange = self.config.range
+ self.trackLoc = self.config.loc
+ self.buffer = deque()
+ self.reader = Reader()
+
+ def errors(self):
+ return self.errorHandler.errors
+
+ def getNextToken(self):
+ if not self.buffer:
+
+ comments = self.scanner.scanComments()
+ if self.scanner.trackComment:
+ for e in comments:
+ value = self.scanner.source[e.slice[0]:e.slice[1]]
+ comment = BufferEntry(
+ type='BlockComment' if e.multiLine else 'LineComment',
+ value=value
+ )
+ if self.trackRange:
+ comment.range = e.range
+ if self.trackLoc:
+ comment.loc = e.loc
+ self.buffer.append(comment)
+
+ if not self.scanner.eof():
+ if self.trackLoc:
+ loc = SourceLocation(
+ start=Position(
+ line=self.scanner.lineNumber,
+ column=self.scanner.index - self.scanner.lineStart
+ ),
+ end=Position(),
+ )
+
+ maybeRegex = self.scanner.source[self.scanner.index] == '/' and self.reader.isRegexStart()
+ if maybeRegex:
+ state = self.scanner.saveState()
+ try:
+ token = self.scanner.scanRegExp()
+ except Exception:
+ self.scanner.restoreState(state)
+ token = self.scanner.lex()
+ else:
+ token = self.scanner.lex()
+
+ self.reader.append(token)
+
+ entry = BufferEntry(
+ type=TokenName[token.type],
+ value=self.scanner.source[token.start:token.end]
+ )
+ if self.trackRange:
+ entry.range = [token.start, token.end]
+ if self.trackLoc:
+ loc.end = Position(
+ line=self.scanner.lineNumber,
+ column=self.scanner.index - self.scanner.lineStart
+ )
+ entry.loc = loc
+ if token.type is Token.RegularExpression:
+ entry.regex = RegExp(
+ pattern=token.pattern,
+ flags=token.flags,
+ )
+
+ self.buffer.append(entry)
+
+ return self.buffer.popleft() if self.buffer else None
diff --git a/third_party/python/esprima/esprima/utils.py b/third_party/python/esprima/esprima/utils.py
new file mode 100644
index 0000000000..62cbe9e91b
--- /dev/null
+++ b/third_party/python/esprima/esprima/utils.py
@@ -0,0 +1,40 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import, unicode_literals
+
+import re
+
+from .compat import unicode
+
+
+def format(messageFormat, *args):
+ def formatter(m):
+ formatter.idx += 1
+ assert formatter.idx < len(args), 'Message reference must be in range'
+ return unicode(args[formatter.idx])
+ formatter.idx = -1
+ return format.re.sub(formatter, messageFormat)
+
+
+format.re = re.compile(r'%(\d)')
diff --git a/third_party/python/esprima/esprima/visitor.py b/third_party/python/esprima/esprima/visitor.py
new file mode 100644
index 0000000000..c508eb6b37
--- /dev/null
+++ b/third_party/python/esprima/esprima/visitor.py
@@ -0,0 +1,288 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import unicode_literals
+
+import json
+import types
+from collections import deque
+
+from .objects import Object
+from .compat import PY3, unicode
+
+
+class VisitRecursionError(Exception):
+ pass
+
+
+class Visited(object):
+ def __init__(self, result):
+ if isinstance(result, Visited):
+ result = result.result
+ self.result = result
+
+
+class Visitor(object):
+ """
+ An Object visitor base class that walks the abstract syntax tree and calls a
+ visitor function for every Object found. This function may return a value
+ which is forwarded by the `visit` method.
+
+ This class is meant to be subclassed, with the subclass adding visitor
+ methods.
+
+ Per default the visitor functions for the nodes are ``'visit_'`` +
+ class name of the Object. So a `Module` Object visit function would
+ be `visit_Module`. This behavior can be changed by overriding
+ the `visit` method. If no visitor function exists for a Object
+ (return value `None`) the `generic_visit` visitor is used instead.
+ """
+
+ def __call__(self, obj, metadata):
+ return self.transform(obj, metadata)
+
+ def transform(self, obj, metadata):
+ """Transform an Object."""
+ if isinstance(obj, Object):
+ method = 'transform_' + obj.__class__.__name__
+ transformer = getattr(self, method, self.transform_Object)
+ new_obj = transformer(obj, metadata)
+ if new_obj is not None and obj is not new_obj:
+ obj = new_obj
+ return obj
+
+ def transform_Object(self, obj, metadata):
+ """Called if no explicit transform function exists for an Object."""
+ return obj
+
+ def generic_visit(self, obj):
+ return self.visit(self.visit_Object(obj))
+
+ def visit(self, obj):
+ """Visit a Object."""
+ if not hasattr(self, 'visitors'):
+ self._visit_context = {}
+ self._visit_count = 0
+ try:
+ self._visit_count += 1
+ stack = deque()
+ stack.append((obj, None))
+ last_result = None
+ while stack:
+ try:
+ last, visited = stack[-1]
+ if isinstance(last, types.GeneratorType):
+ stack.append((last.send(last_result), None))
+ last_result = None
+ elif isinstance(last, Visited):
+ stack.pop()
+ last_result = last.result
+ elif isinstance(last, Object):
+ if last in self._visit_context:
+ if self._visit_context[last] == self.visit_Object:
+ visitor = self.visit_RecursionError
+ else:
+ visitor = self.visit_Object
+ else:
+ method = 'visit_' + last.__class__.__name__
+ visitor = getattr(self, method, self.visit_Object)
+ self._visit_context[last] = visitor
+ stack.pop()
+ stack.append((visitor(last), last))
+ else:
+ method = 'visit_' + last.__class__.__name__
+ visitor = getattr(self, method, self.visit_Generic)
+ stack.pop()
+ stack.append((visitor(last), None))
+ except StopIteration:
+ stack.pop()
+ if visited and visited in self._visit_context:
+ del self._visit_context[visited]
+ return last_result
+ finally:
+ self._visit_count -= 1
+ if self._visit_count <= 0:
+ self._visit_context = {}
+
+ def visit_RecursionError(self, obj):
+ raise VisitRecursionError
+
+ def visit_Object(self, obj):
+ """Called if no explicit visitor function exists for an Object."""
+ yield obj.__dict__
+ yield Visited(obj)
+
+ def visit_Generic(self, obj):
+ """Called if no explicit visitor function exists for an object."""
+ yield Visited(obj)
+
+ def visit_list(self, obj):
+ for item in obj:
+ yield item
+ yield Visited(obj)
+
+ visit_Array = visit_list
+
+ def visit_dict(self, obj):
+ for field, value in list(obj.items()):
+ if not field.startswith('_'):
+ yield value
+ yield Visited(obj)
+
+
+class NodeVisitor(Visitor):
+ pass
+
+
+class ReprVisitor(Visitor):
+ def visit(self, obj, indent=4, nl="\n", sp="", skip=()):
+ self.level = 0
+ if isinstance(indent, int):
+ indent = " " * indent
+ self.indent = indent
+ self.nl = nl
+ self.sp = sp
+ self.skip = skip
+ return super(ReprVisitor, self).visit(obj)
+
+ def visit_RecursionError(self, obj):
+ yield Visited("...")
+
+ def visit_Object(self, obj):
+ value_repr = yield obj.__dict__
+ yield Visited(value_repr)
+
+ def visit_Generic(self, obj):
+ yield Visited(repr(obj))
+
+ def visit_list(self, obj):
+ indent1 = self.indent * self.level
+ indent2 = indent1 + self.indent
+ self.level += 1
+ try:
+ items = []
+ for item in obj:
+ v = yield item
+ items.append(v)
+ if items:
+ value_repr = "[%s%s%s%s%s%s%s]" % (
+ self.sp,
+ self.nl,
+ indent2,
+ (",%s%s%s" % (self.nl, self.sp, indent2)).join(items),
+ self.nl,
+ indent1,
+ self.sp,
+ )
+ else:
+ value_repr = "[]"
+ finally:
+ self.level -= 1
+
+ yield Visited(value_repr)
+
+ visit_Array = visit_list
+
+ def visit_dict(self, obj):
+ indent1 = self.indent * self.level
+ indent2 = indent1 + self.indent
+ self.level += 1
+ try:
+ items = []
+ for k, item in obj.items():
+ if item is not None and not k.startswith('_') and k not in self.skip:
+ v = yield item
+ items.append("%s: %s" % (k, v))
+ if items:
+ value_repr = "{%s%s%s%s%s%s%s}" % (
+ self.sp,
+ self.nl,
+ indent2,
+ (",%s%s%s" % (self.nl, self.sp, indent2)).join(items),
+ self.nl,
+ indent1,
+ self.sp,
+ )
+ else:
+ value_repr = "{}"
+ finally:
+ self.level -= 1
+
+ yield Visited(value_repr)
+
+ if PY3:
+ def visit_str(self, obj):
+ value_repr = json.dumps(obj)
+ yield Visited(value_repr)
+ else:
+ def visit_unicode(self, obj):
+ value_repr = json.dumps(obj)
+ yield Visited(value_repr)
+
+ def visit_SourceLocation(self, obj):
+ old_indent, self.indent = self.indent, ""
+ old_nl, self.nl = self.nl, ""
+ old_sp, self.sp = self.sp, ""
+ try:
+ yield obj
+ finally:
+ self.indent = old_indent
+ self.nl = old_nl
+ self.sp = old_sp
+
+
+class ToDictVisitor(Visitor):
+ map = {
+ 'isAsync': 'async',
+ 'allowAwait': 'await',
+ }
+
+ def visit_RecursionError(self, obj):
+ yield Visited({
+ 'error': "Infinite recursion detected...",
+ })
+
+ def visit_Object(self, obj):
+ obj = yield obj.__dict__
+ yield Visited(obj)
+
+ def visit_list(self, obj):
+ items = []
+ for item in obj:
+ v = yield item
+ items.append(v)
+ yield Visited(items)
+
+ visit_Array = visit_list
+
+ def visit_dict(self, obj):
+ items = []
+ for k, item in obj.items():
+ if item is not None and not k.startswith('_'):
+ v = yield item
+ k = unicode(k)
+ items.append((self.map.get(k, k), v))
+ yield Visited(dict(items))
+
+ def visit_SRE_Pattern(self, obj):
+ yield Visited({})
diff --git a/third_party/python/esprima/esprima/xhtml_entities.py b/third_party/python/esprima/esprima/xhtml_entities.py
new file mode 100644
index 0000000000..7d487bbda8
--- /dev/null
+++ b/third_party/python/esprima/esprima/xhtml_entities.py
@@ -0,0 +1,281 @@
+# -*- coding: utf-8 -*-
+# Copyright JS Foundation and other contributors, https://js.foundation/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import unicode_literals
+
+# Generated by generate-xhtml-entities.js. DO NOT MODIFY!
+
+XHTMLEntities = {
+ 'quot': "\u0022",
+ 'amp': "\u0026",
+ 'apos': "\u0027",
+ 'gt': "\u003E",
+ 'nbsp': "\u00A0",
+ 'iexcl': "\u00A1",
+ 'cent': "\u00A2",
+ 'pound': "\u00A3",
+ 'curren': "\u00A4",
+ 'yen': "\u00A5",
+ 'brvbar': "\u00A6",
+ 'sect': "\u00A7",
+ 'uml': "\u00A8",
+ 'copy': "\u00A9",
+ 'ordf': "\u00AA",
+ 'laquo': "\u00AB",
+ 'not': "\u00AC",
+ 'shy': "\u00AD",
+ 'reg': "\u00AE",
+ 'macr': "\u00AF",
+ 'deg': "\u00B0",
+ 'plusmn': "\u00B1",
+ 'sup2': "\u00B2",
+ 'sup3': "\u00B3",
+ 'acute': "\u00B4",
+ 'micro': "\u00B5",
+ 'para': "\u00B6",
+ 'middot': "\u00B7",
+ 'cedil': "\u00B8",
+ 'sup1': "\u00B9",
+ 'ordm': "\u00BA",
+ 'raquo': "\u00BB",
+ 'frac14': "\u00BC",
+ 'frac12': "\u00BD",
+ 'frac34': "\u00BE",
+ 'iquest': "\u00BF",
+ 'Agrave': "\u00C0",
+ 'Aacute': "\u00C1",
+ 'Acirc': "\u00C2",
+ 'Atilde': "\u00C3",
+ 'Auml': "\u00C4",
+ 'Aring': "\u00C5",
+ 'AElig': "\u00C6",
+ 'Ccedil': "\u00C7",
+ 'Egrave': "\u00C8",
+ 'Eacute': "\u00C9",
+ 'Ecirc': "\u00CA",
+ 'Euml': "\u00CB",
+ 'Igrave': "\u00CC",
+ 'Iacute': "\u00CD",
+ 'Icirc': "\u00CE",
+ 'Iuml': "\u00CF",
+ 'ETH': "\u00D0",
+ 'Ntilde': "\u00D1",
+ 'Ograve': "\u00D2",
+ 'Oacute': "\u00D3",
+ 'Ocirc': "\u00D4",
+ 'Otilde': "\u00D5",
+ 'Ouml': "\u00D6",
+ 'times': "\u00D7",
+ 'Oslash': "\u00D8",
+ 'Ugrave': "\u00D9",
+ 'Uacute': "\u00DA",
+ 'Ucirc': "\u00DB",
+ 'Uuml': "\u00DC",
+ 'Yacute': "\u00DD",
+ 'THORN': "\u00DE",
+ 'szlig': "\u00DF",
+ 'agrave': "\u00E0",
+ 'aacute': "\u00E1",
+ 'acirc': "\u00E2",
+ 'atilde': "\u00E3",
+ 'auml': "\u00E4",
+ 'aring': "\u00E5",
+ 'aelig': "\u00E6",
+ 'ccedil': "\u00E7",
+ 'egrave': "\u00E8",
+ 'eacute': "\u00E9",
+ 'ecirc': "\u00EA",
+ 'euml': "\u00EB",
+ 'igrave': "\u00EC",
+ 'iacute': "\u00ED",
+ 'icirc': "\u00EE",
+ 'iuml': "\u00EF",
+ 'eth': "\u00F0",
+ 'ntilde': "\u00F1",
+ 'ograve': "\u00F2",
+ 'oacute': "\u00F3",
+ 'ocirc': "\u00F4",
+ 'otilde': "\u00F5",
+ 'ouml': "\u00F6",
+ 'divide': "\u00F7",
+ 'oslash': "\u00F8",
+ 'ugrave': "\u00F9",
+ 'uacute': "\u00FA",
+ 'ucirc': "\u00FB",
+ 'uuml': "\u00FC",
+ 'yacute': "\u00FD",
+ 'thorn': "\u00FE",
+ 'yuml': "\u00FF",
+ 'OElig': "\u0152",
+ 'oelig': "\u0153",
+ 'Scaron': "\u0160",
+ 'scaron': "\u0161",
+ 'Yuml': "\u0178",
+ 'fnof': "\u0192",
+ 'circ': "\u02C6",
+ 'tilde': "\u02DC",
+ 'Alpha': "\u0391",
+ 'Beta': "\u0392",
+ 'Gamma': "\u0393",
+ 'Delta': "\u0394",
+ 'Epsilon': "\u0395",
+ 'Zeta': "\u0396",
+ 'Eta': "\u0397",
+ 'Theta': "\u0398",
+ 'Iota': "\u0399",
+ 'Kappa': "\u039A",
+ 'Lambda': "\u039B",
+ 'Mu': "\u039C",
+ 'Nu': "\u039D",
+ 'Xi': "\u039E",
+ 'Omicron': "\u039F",
+ 'Pi': "\u03A0",
+ 'Rho': "\u03A1",
+ 'Sigma': "\u03A3",
+ 'Tau': "\u03A4",
+ 'Upsilon': "\u03A5",
+ 'Phi': "\u03A6",
+ 'Chi': "\u03A7",
+ 'Psi': "\u03A8",
+ 'Omega': "\u03A9",
+ 'alpha': "\u03B1",
+ 'beta': "\u03B2",
+ 'gamma': "\u03B3",
+ 'delta': "\u03B4",
+ 'epsilon': "\u03B5",
+ 'zeta': "\u03B6",
+ 'eta': "\u03B7",
+ 'theta': "\u03B8",
+ 'iota': "\u03B9",
+ 'kappa': "\u03BA",
+ 'lambda': "\u03BB",
+ 'mu': "\u03BC",
+ 'nu': "\u03BD",
+ 'xi': "\u03BE",
+ 'omicron': "\u03BF",
+ 'pi': "\u03C0",
+ 'rho': "\u03C1",
+ 'sigmaf': "\u03C2",
+ 'sigma': "\u03C3",
+ 'tau': "\u03C4",
+ 'upsilon': "\u03C5",
+ 'phi': "\u03C6",
+ 'chi': "\u03C7",
+ 'psi': "\u03C8",
+ 'omega': "\u03C9",
+ 'thetasym': "\u03D1",
+ 'upsih': "\u03D2",
+ 'piv': "\u03D6",
+ 'ensp': "\u2002",
+ 'emsp': "\u2003",
+ 'thinsp': "\u2009",
+ 'zwnj': "\u200C",
+ 'zwj': "\u200D",
+ 'lrm': "\u200E",
+ 'rlm': "\u200F",
+ 'ndash': "\u2013",
+ 'mdash': "\u2014",
+ 'lsquo': "\u2018",
+ 'rsquo': "\u2019",
+ 'sbquo': "\u201A",
+ 'ldquo': "\u201C",
+ 'rdquo': "\u201D",
+ 'bdquo': "\u201E",
+ 'dagger': "\u2020",
+ 'Dagger': "\u2021",
+ 'bull': "\u2022",
+ 'hellip': "\u2026",
+ 'permil': "\u2030",
+ 'prime': "\u2032",
+ 'Prime': "\u2033",
+ 'lsaquo': "\u2039",
+ 'rsaquo': "\u203A",
+ 'oline': "\u203E",
+ 'frasl': "\u2044",
+ 'euro': "\u20AC",
+ 'image': "\u2111",
+ 'weierp': "\u2118",
+ 'real': "\u211C",
+ 'trade': "\u2122",
+ 'alefsym': "\u2135",
+ 'larr': "\u2190",
+ 'uarr': "\u2191",
+ 'rarr': "\u2192",
+ 'darr': "\u2193",
+ 'harr': "\u2194",
+ 'crarr': "\u21B5",
+ 'lArr': "\u21D0",
+ 'uArr': "\u21D1",
+ 'rArr': "\u21D2",
+ 'dArr': "\u21D3",
+ 'hArr': "\u21D4",
+ 'forall': "\u2200",
+ 'part': "\u2202",
+ 'exist': "\u2203",
+ 'empty': "\u2205",
+ 'nabla': "\u2207",
+ 'isin': "\u2208",
+ 'notin': "\u2209",
+ 'ni': "\u220B",
+ 'prod': "\u220F",
+ 'sum': "\u2211",
+ 'minus': "\u2212",
+ 'lowast': "\u2217",
+ 'radic': "\u221A",
+ 'prop': "\u221D",
+ 'infin': "\u221E",
+ 'ang': "\u2220",
+ 'and': "\u2227",
+ 'or': "\u2228",
+ 'cap': "\u2229",
+ 'cup': "\u222A",
+ 'int': "\u222B",
+ 'there4': "\u2234",
+ 'sim': "\u223C",
+ 'cong': "\u2245",
+ 'asymp': "\u2248",
+ 'ne': "\u2260",
+ 'equiv': "\u2261",
+ 'le': "\u2264",
+ 'ge': "\u2265",
+ 'sub': "\u2282",
+ 'sup': "\u2283",
+ 'nsub': "\u2284",
+ 'sube': "\u2286",
+ 'supe': "\u2287",
+ 'oplus': "\u2295",
+ 'otimes': "\u2297",
+ 'perp': "\u22A5",
+ 'sdot': "\u22C5",
+ 'lceil': "\u2308",
+ 'rceil': "\u2309",
+ 'lfloor': "\u230A",
+ 'rfloor': "\u230B",
+ 'loz': "\u25CA",
+ 'spades': "\u2660",
+ 'clubs': "\u2663",
+ 'hearts': "\u2665",
+ 'diams': "\u2666",
+ 'lang': "\u27E8",
+ 'rang': "\u27E9",
+}
diff --git a/third_party/python/esprima/setup.cfg b/third_party/python/esprima/setup.cfg
new file mode 100644
index 0000000000..8bfd5a12f8
--- /dev/null
+++ b/third_party/python/esprima/setup.cfg
@@ -0,0 +1,4 @@
+[egg_info]
+tag_build =
+tag_date = 0
+
diff --git a/third_party/python/esprima/setup.py b/third_party/python/esprima/setup.py
new file mode 100644
index 0000000000..a28ff7e127
--- /dev/null
+++ b/third_party/python/esprima/setup.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+
+try:
+ from setuptools import setup
+except ImportError:
+ from distutils.core import setup
+
+import os
+
+from esprima import version
+
+
+def read(fname):
+ try:
+ with open(os.path.join(os.path.dirname(__file__), fname), "r") as fp:
+ return fp.read().strip()
+ except IOError:
+ return ''
+
+
+setup(
+ name="esprima",
+ version=version,
+ author="German M. Bravo (Kronuz)",
+ author_email="german.mb@gmail.com",
+ url="https://github.com/Kronuz/esprima-python",
+ license="BSD License",
+ keywords="esprima ecmascript javascript parser ast",
+ description="ECMAScript parsing infrastructure for multipurpose analysis in Python",
+ long_description=read("README.rst"),
+ packages=["esprima"],
+ classifiers=[
+ "Development Status :: 5 - Production/Stable",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: BSD License",
+ "Operating System :: OS Independent",
+ "Topic :: Software Development :: Code Generators",
+ "Topic :: Software Development :: Compilers",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ "Topic :: Text Processing :: General",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 2",
+ "Programming Language :: Python :: 2.7",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.3",
+ "Programming Language :: Python :: 3.4",
+ "Programming Language :: Python :: 3.5",
+ "Programming Language :: Python :: 3.6",
+ ],
+ entry_points={
+ 'console_scripts': [
+ 'esprima = esprima.__main__:main',
+ ]
+ },
+)
diff --git a/third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/LICENSE b/third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/LICENSE
new file mode 100644
index 0000000000..f6a01a51d0
--- /dev/null
+++ b/third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2016 Mozilla Foundation
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/METADATA b/third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/METADATA
new file mode 100644
index 0000000000..79dda9230b
--- /dev/null
+++ b/third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/METADATA
@@ -0,0 +1,62 @@
+Metadata-Version: 2.1
+Name: fluent.migrate
+Version: 0.12.0
+Summary: Toolchain to migrate legacy translation to Fluent.
+Home-page: https://github.com/mozilla/fluent-migrate
+Author: Mozilla
+Author-email: l10n-drivers@mozilla.org
+License: APL 2
+Keywords: fluent,localization,l10n
+Classifier: Development Status :: 3 - Alpha
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Description-Content-Type: text/markdown
+License-File: LICENSE
+Requires-Dist: compare-locales (<10.0,>=9.0.1)
+Requires-Dist: fluent.syntax (<0.20,>=0.19.0)
+Provides-Extra: hg
+Requires-Dist: python-hglib ; extra == 'hg'
+
+Fluent Migration Tools
+======================
+
+Programmatically create Fluent files from existing content in both legacy
+and Fluent formats. Use recipes written in Python to migrate content for each
+of your localizations.
+
+`migrate-l10n` is a CLI script which uses the `fluent.migrate` module under
+the hood to run migrations on existing translations.
+
+`validate-l10n-recipe` is a CLI script to test a migration recipe for common
+errors, without trying to apply it.
+
+Installation
+------------
+
+Install from PyPI:
+
+ pip install fluent.migrate[hg]
+
+If you only want to use the `MigrationContext` API, you can drop the
+requirement on `python-hglib`:
+
+ pip install fluent.migrate
+
+Usage
+-----
+
+Migrations consist of _recipes_, which are applied to a _localization repository_, based on _template files_.
+You can find recipes for Firefox in `mozilla-central/python/l10n/fluent_migrations/`,
+the reference repository is [gecko-strings](https://hg.mozilla.org/l10n/gecko-strings/) or _quarantine_.
+You apply those migrations to l10n repositories in [l10n-central](https://hg.mozilla.org/l10n-central/), or to `gecko-strings` for testing.
+
+The migrations are run as python modules, so you need to have their file location in `PYTHONPATH`.
+
+An example would look like
+
+ $ migrate-l10n --lang it --reference-dir gecko-strings --localization-dir l10n-central/it bug_1451992_preferences_sitedata bug_1451992_preferences_translation
diff --git a/third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/RECORD b/third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/RECORD
new file mode 100644
index 0000000000..b952d705c4
--- /dev/null
+++ b/third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/RECORD
@@ -0,0 +1,20 @@
+fluent/__init__.py,sha256=jv2YF__bseklT3OWEzlqJ5qE24c4aWd5F4r0TTjOrWQ,65
+fluent/migrate/__init__.py,sha256=TLqGTEnsuW9uy9WaUKTkeA3AvhyhnCslKMx4f_zV45o,136
+fluent/migrate/_context.py,sha256=kLTbci2fgVBtAXy6sTujse6l9hhgkk62F7sddhD_jhk,12360
+fluent/migrate/blame.py,sha256=Fh645Z1kOZHQN-5fBDdDUOJUf7B3LPf5Qzw-V6tdI8k,2624
+fluent/migrate/changesets.py,sha256=aSoQ5cmoJkP7EbFwNCZ8CL6HVD2cheuOxhJMp8yyzjk,1523
+fluent/migrate/context.py,sha256=Z8AokS8xhFJEUtlq_bHAIJCTPQZfXqiBuwbMy5l8iXg,6090
+fluent/migrate/errors.py,sha256=s7JjvA2yCWogO-Ta4OV3z_Ab31-V_ha_3LGyxF46SRk,313
+fluent/migrate/evaluator.py,sha256=NhLfdlSo1zKBNDS54sa-Xz67CjNYCnAYHRsBx2Gwj2Q,859
+fluent/migrate/helpers.py,sha256=YH6TGE6vjyR7B-d6zJGS2wuz0j-P3SVA22LuplqyCSM,5072
+fluent/migrate/merge.py,sha256=h7W0N3O9VcgZpWqL8JUpNM65p3sbH7Sm4chGZXpMZV0,1854
+fluent/migrate/tool.py,sha256=g0ecdS2vLC71opcHB1k0AX1pD1Dj9xRRV9aLh8gEhmI,5599
+fluent/migrate/transforms.py,sha256=CD5dFwAA9yG1g6nezna8HVVzP8Lx516bQ4cPB2jqkVU,20968
+fluent/migrate/util.py,sha256=V_m009XtdTmPj8YxQP4BQ2949Nar7kLQZQcXXeDLPV0,2875
+fluent/migrate/validator.py,sha256=1qA1Y_lYIpVmSEG_Nt95ZmMt3FZcoTDwSvDFNRZiwyc,11148
+fluent.migrate-0.12.0.dist-info/LICENSE,sha256=yC8xgAJuBJQ0ThoBNcQnXzmBUYVh5xfk3rMDaXQ8gO4,559
+fluent.migrate-0.12.0.dist-info/METADATA,sha256=E8HaaCMrwRrqSquzRcjGmUCOnYDtFMAhRK88F-qakso,2315
+fluent.migrate-0.12.0.dist-info/WHEEL,sha256=a-zpFRIJzOq5QfuhBzbhiA1eHTzNCJn8OdRvhdNX0Rk,110
+fluent.migrate-0.12.0.dist-info/entry_points.txt,sha256=q0mh-Wn0Z8L4j7xyyQhxLDw5yxAMDvSzMgm2uWjIBK8,109
+fluent.migrate-0.12.0.dist-info/top_level.txt,sha256=E6y0EXb_8ntRq2470rEss448Ec6wP_-DI3zVECukrn0,7
+fluent.migrate-0.12.0.dist-info/RECORD,,
diff --git a/third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/WHEEL b/third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/WHEEL
new file mode 100644
index 0000000000..f771c29b87
--- /dev/null
+++ b/third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.40.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/entry_points.txt b/third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/entry_points.txt
new file mode 100644
index 0000000000..e437e9ecf9
--- /dev/null
+++ b/third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+migrate-l10n = fluent.migrate.tool:cli
+validate-l10n-recipe = fluent.migrate.validator:cli
diff --git a/third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/top_level.txt b/third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..a3582d405a
--- /dev/null
+++ b/third_party/python/fluent.migrate/fluent.migrate-0.12.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+fluent
diff --git a/third_party/python/fluent.migrate/fluent/__init__.py b/third_party/python/fluent.migrate/fluent/__init__.py
new file mode 100644
index 0000000000..69e3be50da
--- /dev/null
+++ b/third_party/python/fluent.migrate/fluent/__init__.py
@@ -0,0 +1 @@
+__path__ = __import__('pkgutil').extend_path(__path__, __name__)
diff --git a/third_party/python/fluent.migrate/fluent/migrate/__init__.py b/third_party/python/fluent.migrate/fluent/migrate/__init__.py
new file mode 100644
index 0000000000..96bb6008b2
--- /dev/null
+++ b/third_party/python/fluent.migrate/fluent/migrate/__init__.py
@@ -0,0 +1,3 @@
+from .transforms import ( # noqa: F401
+ CONCAT, COPY, COPY_PATTERN, PLURALS, REPLACE, REPLACE_IN_TEXT
+)
diff --git a/third_party/python/fluent.migrate/fluent/migrate/_context.py b/third_party/python/fluent.migrate/fluent/migrate/_context.py
new file mode 100644
index 0000000000..14c4de15e4
--- /dev/null
+++ b/third_party/python/fluent.migrate/fluent/migrate/_context.py
@@ -0,0 +1,329 @@
+import os
+import codecs
+from functools import partial
+import logging
+from itertools import zip_longest
+
+import fluent.syntax.ast as FTL
+from fluent.syntax.parser import FluentParser
+from fluent.syntax.serializer import FluentSerializer
+from compare_locales.parser import getParser
+from compare_locales.plurals import get_plural
+
+from .evaluator import Evaluator
+from .merge import merge_resource
+from .errors import (
+ UnreadableReferenceError,
+)
+
+
+class InternalContext:
+ """Internal context for merging translation resources.
+
+ For the public interface, see `context.MigrationContext`.
+ """
+
+ def __init__(
+ self, lang, reference_dir, localization_dir, enforce_translated=False
+ ):
+ self.fluent_parser = FluentParser(with_spans=False)
+ self.fluent_serializer = FluentSerializer()
+
+ # An iterable of plural category names relevant to the context's
+ # language. E.g. ('one', 'other') for English.
+ self.plural_categories = get_plural(lang)
+ if self.plural_categories is None:
+ logger = logging.getLogger('migrate')
+ logger.warning(
+ 'Plural rule for "{}" is not defined in '
+ 'compare-locales'.format(lang))
+ self.plural_categories = ('one', 'other')
+
+ self.enforce_translated = enforce_translated
+ # Parsed input resources stored by resource path.
+ self.reference_resources = {}
+ self.localization_resources = {}
+ self.target_resources = {}
+
+ # An iterable of `FTL.Message` objects some of whose nodes can be the
+ # transform operations.
+ self.transforms = {}
+
+ # The evaluator instance is an AST transformer capable of walking an
+ # AST hierarchy and evaluating nodes which are migration Transforms.
+ self.evaluator = Evaluator(self)
+
+ def read_ftl_resource(self, path):
+ """Read an FTL resource and parse it into an AST."""
+ f = codecs.open(path, 'r', 'utf8')
+ try:
+ contents = f.read()
+ except UnicodeDecodeError as err:
+ logger = logging.getLogger('migrate')
+ logger.warning(f'Unable to read file {path}: {err}')
+ raise err
+ finally:
+ f.close()
+
+ ast = self.fluent_parser.parse(contents)
+
+ annots = [
+ annot
+ for entry in ast.body
+ if isinstance(entry, FTL.Junk)
+ for annot in entry.annotations
+ ]
+
+ if len(annots):
+ logger = logging.getLogger('migrate')
+ for annot in annots:
+ msg = annot.message
+ logger.warning(f'Syntax error in {path}: {msg}')
+
+ return ast
+
+ def read_legacy_resource(self, path):
+ """Read a legacy resource and parse it into a dict."""
+ parser = getParser(path)
+ parser.readFile(path)
+ # Transform the parsed result which is an iterator into a dict.
+ return {
+ entity.key: entity.val for entity in parser
+ if entity.localized or self.enforce_translated
+ }
+
+ def read_reference_ftl(self, path):
+ """Read and parse a reference FTL file.
+
+ A missing resource file is a fatal error and will raise an
+ UnreadableReferenceError.
+ """
+ fullpath = os.path.join(self.reference_dir, path)
+ try:
+ return self.read_ftl_resource(fullpath)
+ except OSError:
+ error_message = f'Missing reference file: {fullpath}'
+ logging.getLogger('migrate').error(error_message)
+ raise UnreadableReferenceError(error_message)
+ except UnicodeDecodeError as err:
+ error_message = f'Error reading file {fullpath}: {err}'
+ logging.getLogger('migrate').error(error_message)
+ raise UnreadableReferenceError(error_message)
+
+ def read_localization_ftl(self, path):
+ """Read and parse an existing localization FTL file.
+
+ Create a new FTL.Resource if the file doesn't exist or can't be
+ decoded.
+ """
+ fullpath = os.path.join(self.localization_dir, path)
+ try:
+ return self.read_ftl_resource(fullpath)
+ except OSError:
+ logger = logging.getLogger('migrate')
+ logger.info(
+ 'Localization file {} does not exist and '
+ 'it will be created'.format(path))
+ return FTL.Resource()
+ except UnicodeDecodeError:
+ logger = logging.getLogger('migrate')
+ logger.warning(
+ 'Localization file {} has broken encoding. '
+ 'It will be re-created and some translations '
+ 'may be lost'.format(path))
+ return FTL.Resource()
+
+ def maybe_add_localization(self, path):
+ """Add a localization resource to migrate translations from.
+
+ Uses a compare-locales parser to create a dict of (key, string value)
+ tuples.
+ For Fluent sources, we store the AST.
+ """
+ try:
+ fullpath = os.path.join(self.localization_dir, path)
+ if not fullpath.endswith('.ftl'):
+ collection = self.read_legacy_resource(fullpath)
+ else:
+ collection = self.read_ftl_resource(fullpath)
+ except OSError:
+ logger = logging.getLogger('migrate')
+ logger.warning(f'Missing localization file: {path}')
+ else:
+ self.localization_resources[path] = collection
+
+ def get_legacy_source(self, path, key):
+ """Get an entity value from a localized legacy source.
+
+ Used by the `Source` transform.
+ """
+ resource = self.localization_resources[path]
+ return resource.get(key, None)
+
+ def get_fluent_source_pattern(self, path, key):
+ """Get a pattern from a localized Fluent source.
+
+ If the key contains a `.`, does an attribute lookup.
+ Used by the `COPY_PATTERN` transform.
+ """
+ resource = self.localization_resources[path]
+ msg_key, _, attr_key = key.partition('.')
+ found = None
+ for entry in resource.body:
+ if isinstance(entry, (FTL.Message, FTL.Term)):
+ if entry.id.name == msg_key:
+ found = entry
+ break
+ if found is None:
+ return None
+ if not attr_key:
+ return found.value
+ for attribute in found.attributes:
+ if attribute.id.name == attr_key:
+ return attribute.value
+ return None
+
+ def messages_equal(self, res1, res2):
+ """Compare messages and terms of two FTL resources.
+
+ Uses FTL.BaseNode.equals to compare all messages/terms
+ in two FTL resources.
+ If the order or number of messages differ, the result is also False.
+ """
+ def message_id(message):
+ "Return the message's identifer name for sorting purposes."
+ return message.id.name
+
+ messages1 = sorted(
+ (entry for entry in res1.body
+ if isinstance(entry, FTL.Message)
+ or isinstance(entry, FTL.Term)),
+ key=message_id)
+ messages2 = sorted(
+ (entry for entry in res2.body
+ if isinstance(entry, FTL.Message)
+ or isinstance(entry, FTL.Term)),
+ key=message_id)
+ for msg1, msg2 in zip_longest(messages1, messages2):
+ if msg1 is None or msg2 is None:
+ return False
+ if not msg1.equals(msg2):
+ return False
+ return True
+
+ def merge_changeset(self, changeset=None, known_translations=None):
+ """Return a generator of FTL ASTs for the changeset.
+
+ The input data must be configured earlier using the `add_*` methods.
+ if given, `changeset` must be a set of (path, key) tuples describing
+ which legacy translations are to be merged. If `changeset` is None,
+ all legacy translations will be allowed to be migrated in a single
+ changeset.
+
+ We use the `in_changeset` method to determine if a message should be
+ migrated for the given changeset.
+
+ Given `changeset`, return a dict whose keys are resource paths and
+ values are `FTL.Resource` instances. The values will also be used to
+ update this context's existing localization resources.
+ """
+
+ if changeset is None:
+ # Merge all known legacy translations. Used in tests.
+ changeset = {
+ (path, key)
+ for path, strings in self.localization_resources.items()
+ if not path.endswith('.ftl')
+ for key in strings.keys()
+ }
+
+ if known_translations is None:
+ known_translations = changeset
+
+ for path, reference in self.reference_resources.items():
+ current = self.target_resources[path]
+ transforms = self.transforms.get(path, [])
+ in_changeset = partial(
+ self.in_changeset, changeset, known_translations, path)
+
+ # Merge legacy translations with the existing ones using the
+ # reference as a template.
+ snapshot = merge_resource(
+ self, reference, current, transforms, in_changeset
+ )
+
+ # Skip this path if the messages in the merged snapshot are
+ # identical to those in the current state of the localization file.
+ # This may happen when:
+ #
+ # - none of the transforms is in the changset, or
+ # - all messages which would be migrated by the context's
+ # transforms already exist in the current state.
+ if self.messages_equal(current, snapshot):
+ continue
+
+ # Store the merged snapshot on the context so that the next merge
+ # already takes it into account as the existing localization.
+ self.target_resources[path] = snapshot
+
+ # The result for this path is a complete `FTL.Resource`.
+ yield path, snapshot
+
+ def in_changeset(self, changeset, known_translations, path, ident):
+ """Check if a message should be migrated in this changeset.
+
+ The message is identified by path and ident.
+
+
+ A message will be migrated only if all of its dependencies
+ are present in the currently processed changeset.
+
+ If a transform defined for this message points to a missing
+ legacy translation, this message will not be merged. The
+ missing legacy dependency won't be present in the changeset.
+
+ This also means that partially translated messages (e.g.
+ constructed from two legacy strings out of which only one is
+ avaiable) will never be migrated.
+ """
+ message_deps = self.dependencies.get((path, ident), None)
+
+ # Don't merge if we don't have a transform for this message.
+ if message_deps is None:
+ return False
+
+ # As a special case, if a transform exists but has no
+ # dependecies, it's a hardcoded `FTL.Node` which doesn't
+ # migrate any existing translation but rather creates a new
+ # one. Merge it.
+ if len(message_deps) == 0:
+ return True
+
+ # Make sure all the dependencies are present in the current
+ # changeset. Partial migrations are not currently supported.
+ # See https://bugzilla.mozilla.org/show_bug.cgi?id=1321271
+ # We only return True if our current changeset touches
+ # the transform, and we have all of the dependencies.
+ active_deps = message_deps & changeset
+ available_deps = message_deps & known_translations
+ return active_deps and message_deps == available_deps
+
+ def serialize_changeset(self, changeset, known_translations=None):
+ """Return a dict of serialized FTLs for the changeset.
+
+ Given `changeset`, return a dict whose keys are resource paths and
+ values are serialized FTL snapshots.
+ """
+
+ return {
+ path: self.fluent_serializer.serialize(snapshot)
+ for path, snapshot in self.merge_changeset(
+ changeset, known_translations
+ )
+ }
+
+ def evaluate(self, node):
+ return self.evaluator.visit(node)
+
+
+logging.basicConfig()
diff --git a/third_party/python/fluent.migrate/fluent/migrate/blame.py b/third_party/python/fluent.migrate/fluent/migrate/blame.py
new file mode 100644
index 0000000000..2ae23d0ebe
--- /dev/null
+++ b/third_party/python/fluent.migrate/fluent/migrate/blame.py
@@ -0,0 +1,80 @@
+import argparse
+import json
+import os
+
+from compare_locales.parser import getParser, Junk
+from compare_locales.parser.fluent import FluentEntity
+from compare_locales import mozpath
+import hglib
+from hglib.util import b, cmdbuilder
+
+
+class Blame:
+ def __init__(self, client):
+ self.client = client
+ self.users = []
+ self.blame = {}
+
+ def attribution(self, file_paths):
+ args = cmdbuilder(
+ b('annotate'), *[b(p) for p in file_paths], template='json',
+ date=True, user=True, cwd=self.client.root())
+ blame_json = self.client.rawcommand(args)
+ file_blames = json.loads(blame_json)
+
+ for file_blame in file_blames:
+ self.handleFile(file_blame)
+
+ return {'authors': self.users,
+ 'blame': self.blame}
+
+ def handleFile(self, file_blame):
+ path = mozpath.normsep(file_blame['path'])
+
+ try:
+ parser = getParser(path)
+ except UserWarning:
+ return
+
+ self.blame[path] = {}
+
+ self.readFile(parser, path)
+ entities = parser.parse()
+ for e in entities:
+ if isinstance(e, Junk):
+ continue
+ if e.val_span:
+ key_vals = [(e.key, e.val_span)]
+ else:
+ key_vals = []
+ if isinstance(e, FluentEntity):
+ key_vals += [
+ (f'{e.key}.{attr.key}', attr.val_span)
+ for attr in e.attributes
+ ]
+ for key, (val_start, val_end) in key_vals:
+ entity_lines = file_blame['lines'][
+ (e.ctx.linecol(val_start)[0] - 1):e.ctx.linecol(val_end)[0]
+ ]
+ # ignore timezone
+ entity_lines.sort(key=lambda blame: -blame['date'][0])
+ line_blame = entity_lines[0]
+ user = line_blame['user']
+ timestamp = line_blame['date'][0] # ignore timezone
+ if user not in self.users:
+ self.users.append(user)
+ userid = self.users.index(user)
+ self.blame[path][key] = [userid, timestamp]
+
+ def readFile(self, parser, path):
+ parser.readFile(os.path.join(self.client.root().decode('utf-8'), path))
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument('repo_path')
+ parser.add_argument('file_path', nargs='+')
+ args = parser.parse_args()
+ blame = Blame(hglib.open(args.repo_path))
+ attrib = blame.attribution(args.file_path)
+ print(json.dumps(attrib, indent=4, separators=(',', ': ')))
diff --git a/third_party/python/fluent.migrate/fluent/migrate/changesets.py b/third_party/python/fluent.migrate/fluent/migrate/changesets.py
new file mode 100644
index 0000000000..c766216aa2
--- /dev/null
+++ b/third_party/python/fluent.migrate/fluent/migrate/changesets.py
@@ -0,0 +1,56 @@
+import time
+
+
+def by_first_commit(item):
+ """Order two changesets by their first commit date."""
+ return item['first_commit']
+
+
+def convert_blame_to_changesets(blame_json):
+ """Convert a blame dict into a list of changesets.
+
+ The blame information in `blame_json` should be a dict of the following
+ structure:
+
+ {
+ 'authors': [
+ 'A.N. Author <author@example.com>',
+ ],
+ 'blame': {
+ 'path/one': {
+ 'key1': [0, 1346095921.0],
+ },
+ }
+ }
+
+ It will be transformed into a list of changesets which can be fed into
+ `InternalContext.serialize_changeset`:
+
+ [
+ {
+ 'author': 'A.N. Author <author@example.com>',
+ 'first_commit': 1346095921.0,
+ 'changes': {
+ ('path/one', 'key1'),
+ }
+ },
+ ]
+
+ """
+ now = time.time()
+ changesets = [
+ {
+ 'author': author,
+ 'first_commit': now,
+ 'changes': set()
+ } for author in blame_json['authors']
+ ]
+
+ for path, keys_info in blame_json['blame'].items():
+ for key, (author_index, timestamp) in keys_info.items():
+ changeset = changesets[author_index]
+ changeset['changes'].add((path, key))
+ if timestamp < changeset['first_commit']:
+ changeset['first_commit'] = timestamp
+
+ return sorted(changesets, key=by_first_commit)
diff --git a/third_party/python/fluent.migrate/fluent/migrate/context.py b/third_party/python/fluent.migrate/fluent/migrate/context.py
new file mode 100644
index 0000000000..befeea7ab0
--- /dev/null
+++ b/third_party/python/fluent.migrate/fluent/migrate/context.py
@@ -0,0 +1,148 @@
+import logging
+
+import fluent.syntax.ast as FTL
+from fluent.migrate.util import fold
+
+from .transforms import Source
+from .util import get_message, skeleton
+from .errors import (
+ EmptyLocalizationError,
+ UnreadableReferenceError,
+)
+from ._context import InternalContext
+
+
+__all__ = [
+ 'EmptyLocalizationError',
+ 'UnreadableReferenceError',
+ 'MigrationContext',
+]
+
+
+class MigrationContext(InternalContext):
+ """Stateful context for merging translation resources.
+
+ `MigrationContext` must be configured with the target locale and the
+ directory locations of the input data.
+
+ The transformation takes four types of input data:
+
+ - The en-US FTL reference files which will be used as templates for
+ message order, comments and sections. If the reference_dir is None,
+ the migration will create Messages and Terms in the order given by
+ the transforms.
+
+ - The current FTL files for the given locale.
+
+ - A list of `FTL.Message` or `FTL.Term` objects some of whose nodes
+ are special helper or transform nodes:
+
+ helpers: VARIABLE_REFERENCE, MESSAGE_REFERENCE, TERM_REFERENCE
+ transforms: COPY, REPLACE_IN_TEXT, REPLACE, PLURALS, CONCAT
+ fluent value helper: COPY_PATTERN
+
+ The legacy (DTD, properties) translation files are deduced by the
+ dependencies in the transforms. The translations from these files will be
+ read from the localization_dir and transformed into FTL and merged
+ into the existing FTL files for the given language.
+ """
+
+ def __init__(
+ self, locale, reference_dir, localization_dir, enforce_translated=False
+ ):
+ super().__init__(
+ locale, reference_dir, localization_dir,
+ enforce_translated=enforce_translated
+ )
+ self.locale = locale
+ # Paths to directories with input data, relative to CWD.
+ self.reference_dir = reference_dir
+ self.localization_dir = localization_dir
+
+ # A dict whose keys are `(path, key)` tuples corresponding to target
+ # FTL translations, and values are sets of `(path, key)` tuples
+ # corresponding to localized entities which will be migrated.
+ self.dependencies = {}
+
+ def add_transforms(self, target, reference, transforms):
+ """Define transforms for target using reference as template.
+
+ `target` is a path of the destination FTL file relative to the
+ localization directory. `reference` is a path to the template FTL
+ file relative to the reference directory.
+
+ Each transform is an extended FTL node with `Transform` nodes as some
+ values. Transforms are stored in their lazy AST form until
+ `merge_changeset` is called, at which point they are evaluated to real
+ FTL nodes with migrated translations.
+
+ Each transform is scanned for `Source` nodes which will be used to
+ build the list of dependencies for the transformed message.
+
+ For transforms that merely copy legacy messages or Fluent patterns,
+ using `fluent.migrate.helpers.transforms_from` is recommended.
+ """
+ def get_sources(acc, cur):
+ if isinstance(cur, Source):
+ acc.add((cur.path, cur.key))
+ return acc
+
+ if self.reference_dir is None:
+ # Add skeletons to resource body for each transform
+ # if there's no reference.
+ reference_ast = self.reference_resources.get(target)
+ if reference_ast is None:
+ reference_ast = FTL.Resource()
+ reference_ast.body.extend(
+ skeleton(transform) for transform in transforms
+ )
+ else:
+ reference_ast = self.read_reference_ftl(reference)
+ self.reference_resources[target] = reference_ast
+
+ for node in transforms:
+ ident = node.id.name
+ # Scan `node` for `Source` nodes and collect the information they
+ # store into a set of dependencies.
+ dependencies = fold(get_sources, node, set())
+ # Set these sources as dependencies for the current transform.
+ self.dependencies[(target, ident)] = dependencies
+
+ # The target Fluent message should exist in the reference file. If
+ # it doesn't, it's probably a typo.
+ # Of course, only if we're having a reference.
+ if self.reference_dir is None:
+ continue
+ if get_message(reference_ast.body, ident) is None:
+ logger = logging.getLogger('migrate')
+ logger.warning(
+ '{} "{}" was not found in {}'.format(
+ type(node).__name__, ident, reference))
+
+ # Keep track of localization resource paths which were defined as
+ # sources in the transforms.
+ expected_paths = set()
+
+ # Read all legacy translation files defined in Source transforms. This
+ # may fail but a single missing legacy resource doesn't mean that the
+ # migration can't succeed.
+ for dependencies in self.dependencies.values():
+ for path in {path for path, _ in dependencies}:
+ expected_paths.add(path)
+ self.maybe_add_localization(path)
+
+ # However, if all legacy resources are missing, bail out early. There
+ # are no translations to migrate. We'd also get errors in hg annotate.
+ if len(expected_paths) > 0 and len(self.localization_resources) == 0:
+ error_message = 'No localization files were found'
+ logging.getLogger('migrate').error(error_message)
+ raise EmptyLocalizationError(error_message)
+
+ # Add the current transforms to any other transforms added earlier for
+ # this path.
+ path_transforms = self.transforms.setdefault(target, [])
+ path_transforms += transforms
+
+ if target not in self.target_resources:
+ target_ast = self.read_localization_ftl(target)
+ self.target_resources[target] = target_ast
diff --git a/third_party/python/fluent.migrate/fluent/migrate/errors.py b/third_party/python/fluent.migrate/fluent/migrate/errors.py
new file mode 100644
index 0000000000..dcc3025377
--- /dev/null
+++ b/third_party/python/fluent.migrate/fluent/migrate/errors.py
@@ -0,0 +1,22 @@
+class SkipTransform(RuntimeError):
+ pass
+
+
+class MigrationError(ValueError):
+ pass
+
+
+class EmptyLocalizationError(MigrationError):
+ pass
+
+
+class NotSupportedError(MigrationError):
+ pass
+
+
+class UnreadableReferenceError(MigrationError):
+ pass
+
+
+class InvalidTransformError(MigrationError):
+ pass
diff --git a/third_party/python/fluent.migrate/fluent/migrate/evaluator.py b/third_party/python/fluent.migrate/fluent/migrate/evaluator.py
new file mode 100644
index 0000000000..90c626f933
--- /dev/null
+++ b/third_party/python/fluent.migrate/fluent/migrate/evaluator.py
@@ -0,0 +1,28 @@
+from fluent.syntax import ast as FTL
+from fluent.syntax.visitor import Transformer
+
+from .transforms import Transform
+
+
+class Evaluator(Transformer):
+ """An AST transformer for evaluating migration Transforms.
+
+ An AST transformer (i.e. a visitor capable of modifying the AST) which
+ walks an AST hierarchy and evaluates nodes which are migration Transforms.
+ """
+
+ def __init__(self, ctx):
+ self.ctx = ctx
+
+ def visit(self, node):
+ if not isinstance(node, FTL.BaseNode):
+ return node
+
+ if isinstance(node, Transform):
+ # Some transforms don't expect other transforms as children.
+ # Evaluate the children first.
+ transform = self.generic_visit(node)
+ # Then, evaluate this transform.
+ return transform(self.ctx)
+
+ return self.generic_visit(node)
diff --git a/third_party/python/fluent.migrate/fluent/migrate/helpers.py b/third_party/python/fluent.migrate/fluent/migrate/helpers.py
new file mode 100644
index 0000000000..1c12f644fc
--- /dev/null
+++ b/third_party/python/fluent.migrate/fluent/migrate/helpers.py
@@ -0,0 +1,147 @@
+"""Fluent AST helpers.
+
+The functions defined in this module offer a shorthand for defining common AST
+nodes.
+
+They take a string argument and immediately return a corresponding AST node.
+(As opposed to Transforms which are AST nodes on their own and only return the
+migrated AST nodes when they are evaluated by a MigrationContext.) """
+
+
+from fluent.syntax import FluentParser, ast as FTL
+from fluent.syntax.visitor import Transformer
+from .transforms import Transform, CONCAT, COPY, COPY_PATTERN
+from .errors import NotSupportedError, InvalidTransformError
+
+
+def VARIABLE_REFERENCE(name):
+ """Create an ExternalArgument expression."""
+
+ return FTL.VariableReference(
+ id=FTL.Identifier(name)
+ )
+
+
+def MESSAGE_REFERENCE(name):
+ """Create a MessageReference expression.
+
+ If the passed name contains a `.`, we're generating
+ a message reference with an attribute.
+ """
+ if '.' in name:
+ name, attribute = name.split('.')
+ attribute = FTL.Identifier(attribute)
+ else:
+ attribute = None
+
+ return FTL.MessageReference(
+ id=FTL.Identifier(name),
+ attribute=attribute,
+ )
+
+
+def TERM_REFERENCE(name):
+ """Create a TermReference expression."""
+
+ return FTL.TermReference(
+ id=FTL.Identifier(name)
+ )
+
+
+class IntoTranforms(Transformer):
+ IMPLICIT_TRANSFORMS = ("CONCAT",)
+ FORBIDDEN_TRANSFORMS = ("PLURALS", "REPLACE", "REPLACE_IN_TEXT")
+
+ def __init__(self, substitutions):
+ self.substitutions = substitutions
+
+ def visit_Junk(self, node):
+ anno = node.annotations[0]
+ raise InvalidTransformError(
+ "Transform contains parse error: {}, at {}".format(
+ anno.message, anno.span.start))
+
+ def visit_FunctionReference(self, node):
+ name = node.id.name
+ if name in self.IMPLICIT_TRANSFORMS:
+ raise NotSupportedError(
+ "{} may not be used with transforms_from(). It runs "
+ "implicitly on all Patterns anyways.".format(name))
+ if name in self.FORBIDDEN_TRANSFORMS:
+ raise NotSupportedError(
+ "{} may not be used with transforms_from(). It requires "
+ "additional logic in Python code.".format(name))
+ if name in ('COPY', 'COPY_PATTERN'):
+ args = (
+ self.into_argument(arg) for arg in node.arguments.positional
+ )
+ kwargs = {
+ arg.name.name: self.into_argument(arg.value)
+ for arg in node.arguments.named}
+ if name == 'COPY':
+ return COPY(*args, **kwargs)
+ return COPY_PATTERN(*args, **kwargs)
+ return self.generic_visit(node)
+
+ def visit_Placeable(self, node):
+ """If the expression is a Transform, replace this Placeable
+ with the Transform it's holding.
+ Transforms evaluate to Patterns, which are flattened as
+ elements of Patterns in Transform.pattern_of, but only
+ one level deep.
+ """
+ node = self.generic_visit(node)
+ if isinstance(node.expression, Transform):
+ return node.expression
+ return node
+
+ def visit_Pattern(self, node):
+ """Replace the Pattern with CONCAT which is more accepting of its
+ elements. CONCAT takes PatternElements, Expressions and other
+ Patterns (e.g. returned from evaluating transforms).
+ """
+ node = self.generic_visit(node)
+ return CONCAT(*node.elements)
+
+ def into_argument(self, node):
+ """Convert AST node into an argument to migration transforms."""
+ if isinstance(node, FTL.StringLiteral):
+ # Special cases for booleans which don't exist in Fluent.
+ if node.value == "True":
+ return True
+ if node.value == "False":
+ return False
+ return node.value
+ if isinstance(node, FTL.MessageReference):
+ try:
+ return self.substitutions[node.id.name]
+ except KeyError:
+ raise InvalidTransformError(
+ "Unknown substitution in COPY: {}".format(
+ node.id.name))
+ else:
+ raise InvalidTransformError(
+ "Invalid argument passed to COPY: {}".format(
+ type(node).__name__))
+
+
+def transforms_from(ftl, **substitutions):
+ """Parse FTL code into a list of Message nodes with Transforms.
+
+ The FTL may use a fabricated COPY function inside of placeables which
+ will be converted into actual COPY migration transform.
+
+ new-key = Hardcoded text { COPY("filepath.dtd", "string.key") }
+
+ For convenience, COPY may also refer to transforms_from's keyword
+ arguments via the MessageReference syntax:
+
+ transforms_from(\"""
+ new-key = Hardcoded text { COPY(file_dtd, "string.key") }
+ \""", file_dtd="very/long/path/to/a/file.dtd")
+
+ """
+
+ parser = FluentParser(with_spans=False)
+ resource = parser.parse(ftl)
+ return IntoTranforms(substitutions).visit(resource).body
diff --git a/third_party/python/fluent.migrate/fluent/migrate/merge.py b/third_party/python/fluent.migrate/fluent/migrate/merge.py
new file mode 100644
index 0000000000..c8bedc4583
--- /dev/null
+++ b/third_party/python/fluent.migrate/fluent/migrate/merge.py
@@ -0,0 +1,55 @@
+import fluent.syntax.ast as FTL
+
+from .errors import SkipTransform
+from .util import get_message, get_transform
+
+
+def merge_resource(ctx, reference, current, transforms, in_changeset):
+ """Transform legacy translations into FTL.
+
+ Use the `reference` FTL AST as a template. For each en-US string in the
+ reference, first check for an existing translation in the current FTL
+ `localization` and use it if it's present; then if the string has
+ a transform defined in the migration specification and if it's in the
+ currently processed changeset, evaluate the transform.
+ """
+
+ def merge_body(body):
+ return [
+ entry
+ for entry in map(merge_entry, body)
+ if entry is not None
+ ]
+
+ def merge_entry(entry):
+ # All standalone comments will be merged.
+ if isinstance(entry, FTL.BaseComment):
+ return entry
+
+ # Ignore Junk
+ if isinstance(entry, FTL.Junk):
+ return None
+
+ ident = entry.id.name
+
+ # If the message is present in the existing localization, we add it to
+ # the resulting resource. This ensures consecutive merges don't remove
+ # translations but rather create supersets of them.
+ existing = get_message(current.body, ident)
+ if existing is not None:
+ return existing
+
+ transform = get_transform(transforms, ident)
+
+ # Make sure this message is supposed to be migrated as part of the
+ # current changeset.
+ if transform is not None and in_changeset(ident):
+ if transform.comment is None:
+ transform.comment = entry.comment
+ try:
+ return ctx.evaluate(transform)
+ except SkipTransform:
+ return None
+
+ body = merge_body(reference.body)
+ return FTL.Resource(body)
diff --git a/third_party/python/fluent.migrate/fluent/migrate/tool.py b/third_party/python/fluent.migrate/fluent/migrate/tool.py
new file mode 100644
index 0000000000..cb5ddb23b4
--- /dev/null
+++ b/third_party/python/fluent.migrate/fluent/migrate/tool.py
@@ -0,0 +1,181 @@
+import os
+import logging
+import argparse
+from contextlib import contextmanager
+import importlib
+import sys
+
+import hglib
+
+from fluent.migrate.context import MigrationContext
+from fluent.migrate.errors import MigrationError
+from fluent.migrate.changesets import convert_blame_to_changesets
+from fluent.migrate.blame import Blame
+
+
+@contextmanager
+def dont_write_bytecode():
+ _dont_write_bytecode = sys.dont_write_bytecode
+ sys.dont_write_bytecode = True
+ yield
+ sys.dont_write_bytecode = _dont_write_bytecode
+
+
+class Migrator:
+ def __init__(self, locale, reference_dir, localization_dir, dry_run):
+ self.locale = locale
+ self.reference_dir = reference_dir
+ self.localization_dir = localization_dir
+ self.dry_run = dry_run
+ self._client = None
+
+ @property
+ def client(self):
+ if self._client is None:
+ self._client = hglib.open(self.localization_dir, 'utf-8')
+ return self._client
+
+ def close(self):
+ # close hglib.client, if we cached one.
+ if self._client is not None:
+ self._client.close()
+
+ def run(self, migration):
+ print('\nRunning migration {} for {}'.format(
+ migration.__name__, self.locale))
+
+ # For each migration create a new context.
+ ctx = MigrationContext(
+ self.locale, self.reference_dir, self.localization_dir
+ )
+
+ try:
+ # Add the migration spec.
+ migration.migrate(ctx)
+ except MigrationError as e:
+ print(' Skipping migration {} for {}:\n {}'.format(
+ migration.__name__, self.locale, e))
+ return
+
+ # Keep track of how many changesets we're committing.
+ index = 0
+ description_template = migration.migrate.__doc__
+
+ # Annotate localization files used as sources by this migration
+ # to preserve attribution of translations.
+ files = ctx.localization_resources.keys()
+ blame = Blame(self.client).attribution(files)
+ changesets = convert_blame_to_changesets(blame)
+ known_legacy_translations = set()
+
+ for changeset in changesets:
+ snapshot = self.snapshot(
+ ctx, changeset['changes'], known_legacy_translations
+ )
+ if not snapshot:
+ continue
+ self.serialize_changeset(snapshot)
+ index += 1
+ self.commit_changeset(
+ description_template, changeset['author'], index
+ )
+
+ def snapshot(self, ctx, changes_in_changeset, known_legacy_translations):
+ '''Run the migration for the changeset, with the set of
+ this and all prior legacy translations.
+ '''
+ known_legacy_translations.update(changes_in_changeset)
+ return ctx.serialize_changeset(
+ changes_in_changeset,
+ known_legacy_translations
+ )
+
+ def serialize_changeset(self, snapshot):
+ '''Write serialized FTL files to disk.'''
+ for path, content in snapshot.items():
+ fullpath = os.path.join(self.localization_dir, path)
+ print(f' Writing to {fullpath}')
+ if not self.dry_run:
+ fulldir = os.path.dirname(fullpath)
+ if not os.path.isdir(fulldir):
+ os.makedirs(fulldir)
+ with open(fullpath, 'wb') as f:
+ f.write(content.encode('utf8'))
+ f.close()
+
+ def commit_changeset(
+ self, description_template, author, index
+ ):
+ message = description_template.format(
+ index=index,
+ author=author
+ )
+
+ print(f' Committing changeset: {message}')
+ if self.dry_run:
+ return
+ try:
+ self.client.commit(
+ message, user=author.encode('utf-8'), addremove=True
+ )
+ except hglib.error.CommandError as err:
+ print(f' WARNING: hg commit failed ({err})')
+
+
+def main(locale, reference_dir, localization_dir, migrations, dry_run):
+ """Run migrations and commit files with the result."""
+ migrator = Migrator(locale, reference_dir, localization_dir, dry_run)
+
+ for migration in migrations:
+ migrator.run(migration)
+
+ migrator.close()
+
+
+def cli():
+ parser = argparse.ArgumentParser(
+ description='Migrate translations to FTL.'
+ )
+ parser.add_argument(
+ 'migrations', metavar='MIGRATION', type=str, nargs='+',
+ help='migrations to run (Python modules)'
+ )
+ parser.add_argument(
+ '--locale', '--lang', type=str,
+ help='target locale code (--lang is deprecated)'
+ )
+ parser.add_argument(
+ '--reference-dir', type=str,
+ help='directory with reference FTL files'
+ )
+ parser.add_argument(
+ '--localization-dir', type=str,
+ help='directory for localization files'
+ )
+ parser.add_argument(
+ '--dry-run', action='store_true',
+ help='do not write to disk nor commit any changes'
+ )
+ parser.set_defaults(dry_run=False)
+
+ logger = logging.getLogger('migrate')
+ logger.setLevel(logging.INFO)
+
+ args = parser.parse_args()
+
+ # Don't byte-compile migrations.
+ # They're not our code, and infrequently run
+ with dont_write_bytecode():
+ migrations = map(importlib.import_module, args.migrations)
+
+ main(
+ locale=args.locale,
+ reference_dir=args.reference_dir,
+ localization_dir=args.localization_dir,
+ migrations=migrations,
+ dry_run=args.dry_run
+ )
+
+
+if __name__ == '__main__':
+ cli()
diff --git a/third_party/python/fluent.migrate/fluent/migrate/transforms.py b/third_party/python/fluent.migrate/fluent/migrate/transforms.py
new file mode 100644
index 0000000000..11b722125f
--- /dev/null
+++ b/third_party/python/fluent.migrate/fluent/migrate/transforms.py
@@ -0,0 +1,576 @@
+"""Migration Transforms.
+
+Transforms are AST nodes which describe how legacy translations should be
+migrated. They are created inert and only return the migrated AST nodes when
+they are evaluated by a MigrationContext.
+
+All Transforms evaluate to Fluent Patterns. This makes them suitable for
+defining migrations of values of message, attributes and variants. The special
+CONCAT Transform is capable of joining multiple Patterns returned by evaluating
+other Transforms into a single Pattern. It can also concatenate Pattern
+elements: TextElements and Placeables.
+
+The COPY, REPLACE and PLURALS Transforms inherit from Source which is a special
+AST Node defining the location (the file path and the id) of the legacy
+translation. During the migration, the current MigrationContext scans the
+migration spec for Source nodes and extracts the information about all legacy
+translations being migrated. For instance,
+
+ COPY('file.dtd', 'hello')
+
+is equivalent to:
+
+ FTL.Pattern([
+ Source('file.dtd', 'hello')
+ ])
+
+Sometimes it's useful to work with text rather than (path, key) source
+definitions. This is the case when the migrated translation requires some
+hardcoded text, e.g. <a> and </a> when multiple translations become a single
+one with a DOM overlay. In such cases it's best to use FTL.TextElements:
+
+ FTL.Message(
+ id=FTL.Identifier('update-failed'),
+ value=CONCAT(
+ COPY('aboutDialog.dtd', 'update.failed.start'),
+ FTL.TextElement('<a>'),
+ COPY('aboutDialog.dtd', 'update.failed.linkText'),
+ FTL.TextElement('</a>'),
+ COPY('aboutDialog.dtd', 'update.failed.end'),
+ )
+ )
+
+The REPLACE_IN_TEXT Transform also takes TextElements as input, making it
+possible to pass it as the foreach function of the PLURALS Transform. In the
+example below, each slice of the plural string is converted into a
+TextElement by PLURALS and then run through the REPLACE_IN_TEXT transform.
+
+ FTL.Message(
+ FTL.Identifier('delete-all'),
+ value=PLURALS(
+ 'aboutDownloads.dtd',
+ 'deleteAll',
+ VARIABLE_REFERENCE('num'),
+ lambda text: REPLACE_IN_TEXT(
+ text,
+ {
+ '#1': VARIABLE_REFERENCE('num')
+ }
+ )
+ )
+ )
+"""
+
+import re
+
+from fluent.syntax import ast as FTL
+from fluent.syntax.visitor import Transformer
+from .errors import NotSupportedError
+
+
+def chain_elements(elements):
+ '''Flatten a list of FTL nodes into an iterator over PatternElements.'''
+ for element in elements:
+ if isinstance(element, FTL.Pattern):
+ # PY3 yield from element.elements
+ yield from element.elements
+ elif isinstance(element, FTL.PatternElement):
+ yield element
+ elif isinstance(element, FTL.Expression):
+ yield FTL.Placeable(element)
+ else:
+ raise RuntimeError(
+ 'Expected Pattern, PatternElement or Expression')
+
+
+re_leading_ws = re.compile(
+ r'\A(?:(?P<whitespace> +)(?P<text>.*?)|(?P<block_text>\n.*?))\Z',
+ re.S,
+)
+re_trailing_ws = re.compile(
+ r'\A(?:(?P<text>.*?)(?P<whitespace> +)|(?P<block_text>.*\n))\Z',
+ re.S
+)
+
+
+def extract_whitespace(regex, element):
+ '''Extract leading or trailing whitespace from a TextElement.
+
+ Return a tuple of (Placeable, TextElement) in which the Placeable
+ encodes the extracted whitespace as a StringLiteral and the
+ TextElement has the same amount of whitespace removed. The
+ Placeable with the extracted whitespace is always returned first.
+ If the element starts or ends with a newline, add an empty
+ StringLiteral.
+ '''
+ match = re.search(regex, element.value)
+ if match:
+ # If white-space is None, we're a newline. Add an
+ # empty { "" }
+ whitespace = match.group('whitespace') or ''
+ placeable = FTL.Placeable(FTL.StringLiteral(whitespace))
+ if whitespace == element.value:
+ return placeable, None
+ else:
+ # Either text or block_text matched the rest.
+ text = match.group('text') or match.group('block_text')
+ return placeable, FTL.TextElement(text)
+ else:
+ return None, element
+
+
+class Transform(FTL.BaseNode):
+ def __call__(self, ctx):
+ raise NotImplementedError
+
+ @staticmethod
+ def pattern_of(*elements):
+ normalized = []
+
+ # Normalize text content: convert text content to TextElements, join
+ # adjacent text and prune empty. Text content is either existing
+ # TextElements or whitespace-only StringLiterals. This may result in
+ # leading and trailing whitespace being put back into TextElements if
+ # the new Pattern is built from existing Patterns (CONCAT(COPY...)).
+ # The leading and trailing whitespace of the new Pattern will be
+ # extracted later into new StringLiterals.
+ for element in chain_elements(elements):
+ if isinstance(element, FTL.TextElement):
+ text_content = element.value
+ elif isinstance(element, FTL.Placeable) \
+ and isinstance(element.expression, FTL.StringLiteral) \
+ and re.match(r'^ *$', element.expression.value):
+ text_content = element.expression.value
+ else:
+ # The element does not contain text content which should be
+ # normalized. It may be a number, a reference, or
+ # a StringLiteral which should be preserved in the Pattern.
+ normalized.append(element)
+ continue
+
+ previous = normalized[-1] if len(normalized) else None
+ if isinstance(previous, FTL.TextElement):
+ # Join adjacent TextElements.
+ previous.value += text_content
+ elif len(text_content) > 0:
+ # Normalize non-empty text to a TextElement.
+ normalized.append(FTL.TextElement(text_content))
+ else:
+ # Prune empty text.
+ pass
+
+ # Store empty values explicitly as {""}.
+ if len(normalized) == 0:
+ empty = FTL.Placeable(FTL.StringLiteral(''))
+ return FTL.Pattern([empty])
+
+ # Extract explicit leading whitespace into a StringLiteral.
+ if isinstance(normalized[0], FTL.TextElement):
+ ws, text = extract_whitespace(re_leading_ws, normalized[0])
+ normalized[:1] = [ws, text]
+
+ # Extract explicit trailing whitespace into a StringLiteral.
+ if isinstance(normalized[-1], FTL.TextElement):
+ ws, text = extract_whitespace(re_trailing_ws, normalized[-1])
+ normalized[-1:] = [text, ws]
+
+ return FTL.Pattern([
+ element
+ for element in normalized
+ if element is not None
+ ])
+
+
+class Source(Transform):
+ """Base class for Transforms that get translations from source files.
+
+ The contract is that the first argument is the source path, and the
+ second is a key representing legacy string IDs, or Fluent id.attr.
+ """
+ def __init__(self, path, key):
+ self.path = path
+ self.key = key
+
+
+class FluentSource(Source):
+ """Declare a Fluent source translation to be copied over.
+
+ When evaluated, it clones the Pattern of the parsed source.
+ """
+ def __init__(self, path, key):
+ if not path.endswith('.ftl'):
+ raise NotSupportedError(
+ 'Please use COPY to migrate from legacy files '
+ '({})'.format(path)
+ )
+ if key[0] == '-' and '.' in key:
+ raise NotSupportedError(
+ 'Cannot migrate from Term Attributes, as they are'
+ 'locale-dependent ({})'.format(path)
+ )
+ super().__init__(path, key)
+
+ def __call__(self, ctx):
+ pattern = ctx.get_fluent_source_pattern(self.path, self.key)
+ return pattern.clone()
+
+
+class COPY_PATTERN(FluentSource):
+ """Create a Pattern with the translation value from the given source.
+
+ The given key can be a Message ID, Message ID.attribute_name, or
+ Term ID. Accessing Term attributes is not supported, as they're internal
+ to the localization.
+ """
+ pass
+
+
+class TransformPattern(FluentSource, Transformer):
+ """Base class for modifying a Fluent pattern as part of a migration.
+
+ Implement visit_* methods of the Transformer pattern to do the
+ actual modifications.
+ """
+ def __call__(self, ctx):
+ pattern = super().__call__(ctx)
+ return self.visit(pattern)
+
+ def visit_Pattern(self, node):
+ # Make sure we're creating valid Patterns after restructuring
+ # transforms.
+ node = self.generic_visit(node)
+ pattern = Transform.pattern_of(*node.elements)
+ return pattern
+
+ def visit_Placeable(self, node):
+ # Ensure we have a Placeable with an expression still.
+ # Transforms could have replaced the expression with
+ # a Pattern or PatternElement, in which case we
+ # just pass that through.
+ # Patterns then get flattened by visit_Pattern.
+ node = self.generic_visit(node)
+ if isinstance(node.expression, (FTL.Pattern, FTL.PatternElement)):
+ return node.expression
+ return node
+
+
+class LegacySource(Source):
+ """Declare the source translation to be migrated with other transforms.
+
+ When evaluated, `Source` returns a TextElement with the content from the
+ source translation. Escaped characters are unescaped by the
+ compare-locales parser according to the file format:
+
+ - in properties files: \\uXXXX,
+ - in DTD files: known named, decimal, and hexadecimal HTML entities.
+
+ Consult the following files for the list of known named HTML entities:
+
+ https://github.com/python/cpython/blob/2.7/Lib/htmlentitydefs.py
+ https://github.com/python/cpython/blob/3.6/Lib/html/entities.py
+
+ By default, leading and trailing whitespace on each line as well as
+ leading and trailing empty lines will be stripped from the source
+ translation's content. Set `trim=False` to disable this behavior.
+ """
+
+ def __init__(self, path, key, trim=None):
+ if path.endswith('.ftl'):
+ raise NotSupportedError(
+ 'Please use COPY_PATTERN to migrate from Fluent files '
+ '({})'.format(path))
+
+ super().__init__(path, key)
+ self.trim = trim
+
+ def get_text(self, ctx):
+ return ctx.get_legacy_source(self.path, self.key)
+
+ @staticmethod
+ def trim_text(text):
+ # strip leading white-space from each line
+ text = re.sub('^[ \t]+', '', text, flags=re.M)
+ # strip trailing white-space from each line
+ text = re.sub('[ \t]+$', '', text, flags=re.M)
+ # strip leading and trailing empty lines
+ text = text.strip('\r\n')
+ return text
+
+ def __call__(self, ctx):
+ text = self.get_text(ctx)
+ if self.trim is not False:
+ text = self.trim_text(text)
+ return FTL.TextElement(text)
+
+
+class COPY(LegacySource):
+ """Create a Pattern with the translation value from the given source."""
+
+ def __call__(self, ctx):
+ element = super().__call__(ctx)
+ return Transform.pattern_of(element)
+
+
+PRINTF = re.compile(
+ r'%(?P<good>%|'
+ r'(?:(?P<number>[1-9][0-9]*)\$)?'
+ r'(?P<width>\*|[0-9]+)?'
+ r'(?P<prec>\.(?:\*|[0-9]+)?)?'
+ r'(?P<spec>[duxXosScpfg]))'
+)
+
+
+def number():
+ i = 1
+ while True:
+ yield i
+ i += 1
+
+
+def normalize_printf(text):
+ """Normalize printf arguments so that they're all numbered.
+ Gecko forbids mixing unnumbered and numbered ones, so
+ we just need to convert unnumbered to numbered ones.
+ Also remove ones that have zero width, as they're intended
+ to be removed from the output by the localizer.
+ """
+ next_number = number()
+
+ def normalized(match):
+ if match.group('good') == '%':
+ return '%'
+ hidden = match.group('width') == '0'
+ if match.group('number'):
+ return '' if hidden else match.group()
+ num = next(next_number)
+ return '' if hidden else '%{}${}'.format(num, match.group('spec'))
+
+ return PRINTF.sub(normalized, text)
+
+
+class REPLACE_IN_TEXT(Transform):
+ """Create a Pattern from a TextElement and replace legacy placeables.
+
+ The original placeables are defined as keys on the `replacements` dict.
+ For each key the value must be defined as a FTL Pattern, Placeable,
+ TextElement or Expression to be interpolated.
+ """
+
+ def __init__(self, element, replacements, normalize_printf=False):
+ self.element = element
+ self.replacements = replacements
+ self.normalize_printf = normalize_printf
+
+ def __call__(self, ctx):
+ # For each specified replacement, find all indices of the original
+ # placeable in the source translation. If missing, the list of indices
+ # will be empty.
+ value = self.element.value
+ if self.normalize_printf:
+ value = normalize_printf(value)
+ key_indices = {
+ key: [m.start() for m in re.finditer(re.escape(key), value)]
+ for key in self.replacements.keys()
+ }
+
+ # Build a dict of indices to replacement keys.
+ keys_indexed = {}
+ for key, indices in key_indices.items():
+ for index in indices:
+ keys_indexed[index] = key
+
+ # Order the replacements by the position of the original placeable in
+ # the translation.
+ replacements = (
+ (key, ctx.evaluate(self.replacements[key]))
+ for index, key
+ in sorted(keys_indexed.items(), key=lambda x: x[0])
+ )
+
+ # A list of PatternElements built from the legacy translation and the
+ # FTL replacements. It may contain empty or adjacent TextElements.
+ elements = []
+ tail = value
+
+ # Convert original placeables and text into FTL Nodes. For each
+ # original placeable the translation will be partitioned around it and
+ # the text before it will be converted into an `FTL.TextElement` and
+ # the placeable will be replaced with its replacement.
+ for key, node in replacements:
+ before, key, tail = tail.partition(key)
+ elements.append(FTL.TextElement(before))
+ elements.append(node)
+
+ # Don't forget about the tail after the loop ends.
+ elements.append(FTL.TextElement(tail))
+ return Transform.pattern_of(*elements)
+
+
+class REPLACE(LegacySource):
+ """Create a Pattern with interpolations from given source.
+
+ Interpolations in the translation value from the given source will be
+ replaced with FTL placeables using the `REPLACE_IN_TEXT` transform.
+ """
+
+ def __init__(
+ self, path, key, replacements, **kwargs
+ ):
+ # We default normalize_printf to False except for .properties files.
+ # We still allow the caller to override the default value.
+ normalize_printf = False
+ if 'normalize_printf' in kwargs:
+ normalize_printf = kwargs['normalize_printf']
+ del kwargs['normalize_printf']
+ elif path.endswith('.properties'):
+ normalize_printf = True
+
+ super().__init__(path, key, **kwargs)
+ self.replacements = replacements
+ self.normalize_printf = normalize_printf
+
+ def __call__(self, ctx):
+ element = super().__call__(ctx)
+ return REPLACE_IN_TEXT(
+ element, self.replacements,
+ normalize_printf=self.normalize_printf
+ )(ctx)
+
+
+class PLURALS(LegacySource):
+ """Create a Pattern with plurals from given source.
+
+ Build an `FTL.SelectExpression` with the supplied `selector` and variants
+ extracted from the source. The original translation should be a
+ semicolon-separated list of plural forms. Each form will be converted
+ into a TextElement and run through the `foreach` function, which should
+ return an `FTL.Node` or a `Transform`. By default, the `foreach` function
+ creates a valid Pattern from the TextElement passed into it.
+ """
+ DEFAULT_ORDER = ('zero', 'one', 'two', 'few', 'many', 'other')
+
+ def __init__(self, path, key, selector, foreach=Transform.pattern_of,
+ **kwargs):
+ super().__init__(path, key, **kwargs)
+ self.selector = selector
+ self.foreach = foreach
+
+ def __call__(self, ctx):
+ element = super().__call__(ctx)
+ selector = ctx.evaluate(self.selector)
+ keys = ctx.plural_categories
+ forms = [
+ FTL.TextElement(part.strip())
+ for part in element.value.split(';')
+ ]
+
+ # The default CLDR form should be the last we have in DEFAULT_ORDER,
+ # usually `other`, but in some cases `many`. If we don't have a variant
+ # for that, we'll append one, using the, in CLDR order, last existing
+ # variant in the legacy translation. That may or may not be the last
+ # variant.
+ default_key = [
+ key for key in reversed(self.DEFAULT_ORDER) if key in keys
+ ][0]
+
+ # Match keys to legacy forms in the order they are defined in Gecko's
+ # PluralForm.jsm. Filter out empty forms.
+ pairs = [
+ (key, var)
+ for key, var in zip(keys, forms)
+ if var.value
+ ]
+
+ # A special case for legacy translations which don't define any
+ # plural forms.
+ if len(pairs) == 0:
+ return Transform.pattern_of()
+
+ # A special case for languages with one plural category or one legacy
+ # variant. We don't need to insert a SelectExpression for them.
+ if len(pairs) == 1:
+ _, only_form = pairs[0]
+ only_variant = ctx.evaluate(self.foreach(only_form))
+ return Transform.pattern_of(only_variant)
+
+ # Make sure the default key is defined. If it's missing, use the last
+ # form (in CLDR order) found in the legacy translation.
+ pairs.sort(key=lambda pair: self.DEFAULT_ORDER.index(pair[0]))
+ last_key, last_form = pairs[-1]
+ if last_key != default_key:
+ pairs.append((default_key, last_form))
+
+ def createVariant(key, form):
+ # Run the legacy plural form through `foreach` which returns an
+ # `FTL.Node` describing the transformation required for each
+ # variant. Then evaluate it to a migrated FTL node.
+ value = ctx.evaluate(self.foreach(form))
+ return FTL.Variant(
+ key=FTL.Identifier(key),
+ value=value,
+ default=key == default_key
+ )
+
+ select = FTL.SelectExpression(
+ selector=selector,
+ variants=[
+ createVariant(key, form)
+ for key, form in pairs
+ ]
+ )
+
+ return Transform.pattern_of(select)
+
+
+class CONCAT(Transform):
+ """Create a new Pattern from Patterns, PatternElements and Expressions.
+
+ When called with at least two elements, `CONCAT` disables the trimming
+ behavior of the elements which are subclasses of `LegacySource` by
+ setting `trim=False`, unless `trim` has already been set explicitly. The
+ following two `CONCAT` calls are equivalent:
+
+ CONCAT(
+ FTL.TextElement("Hello"),
+ COPY("file.properties", "hello")
+ )
+
+ CONCAT(
+ FTL.TextElement("Hello"),
+ COPY("file.properties", "hello", trim=False)
+ )
+
+ Set `trim=True` explicitly to force trimming:
+
+ CONCAT(
+ FTL.TextElement("Hello "),
+ COPY("file.properties", "hello", trim=True)
+ )
+
+ When called with a single element and when the element is a subclass of
+ `LegacySource`, the trimming behavior is not changed. The following two
+ transforms are equivalent:
+
+ CONCAT(COPY("file.properties", "hello"))
+
+ COPY("file.properties", "hello")
+ """
+
+ def __init__(self, *elements, **kwargs):
+ # We want to support both passing elements as *elements in the
+ # migration specs and as elements=[]. The latter is used by
+ # FTL.BaseNode.traverse when it recreates the traversed node using its
+ # attributes as kwargs.
+ self.elements = list(kwargs.get('elements', elements))
+
+ # We want to make CONCAT(COPY()) equivalent to COPY() so that it's
+ # always safe (no-op) to wrap transforms in a CONCAT. This is used by
+ # the implementation of transforms_from.
+ if len(self.elements) > 1:
+ for elem in self.elements:
+ # Only change trim if it hasn't been set explicitly.
+ if isinstance(elem, LegacySource) and elem.trim is None:
+ elem.trim = False
+
+ def __call__(self, ctx):
+ return Transform.pattern_of(*self.elements)
diff --git a/third_party/python/fluent.migrate/fluent/migrate/util.py b/third_party/python/fluent.migrate/fluent/migrate/util.py
new file mode 100644
index 0000000000..3e4d725d3e
--- /dev/null
+++ b/third_party/python/fluent.migrate/fluent/migrate/util.py
@@ -0,0 +1,110 @@
+import textwrap
+
+import fluent.syntax.ast as FTL
+from fluent.syntax.parser import FluentParser, FluentParserStream
+
+
+fluent_parser = FluentParser(with_spans=False)
+
+
+def parse(Parser, string):
+ if Parser is FluentParser:
+ return fluent_parser.parse(string)
+
+ # Parsing a legacy resource.
+
+ # Parse the string into the internal Context.
+ parser = Parser()
+ # compare-locales expects ASCII strings.
+ parser.readContents(string.encode('utf8'))
+ # Transform the parsed result which is an iterator into a dict.
+ return {ent.key: ent for ent in parser}
+
+
+def ftl_resource_to_ast(code):
+ return fluent_parser.parse(ftl(code))
+
+
+def ftl_resource_to_json(code):
+ return fluent_parser.parse(ftl(code)).to_json()
+
+
+def ftl_pattern_to_json(code):
+ ps = FluentParserStream(ftl(code))
+ return fluent_parser.maybe_get_pattern(ps).to_json()
+
+
+def to_json(merged_iter):
+ return {
+ path: resource.to_json()
+ for path, resource in merged_iter
+ }
+
+
+LOCALIZABLE_ENTRIES = (FTL.Message, FTL.Term)
+
+
+def get_message(body, ident):
+ """Get message called `ident` from the `body` iterable."""
+ for entity in body:
+ if isinstance(entity, LOCALIZABLE_ENTRIES) and entity.id.name == ident:
+ return entity
+
+
+def get_transform(body, ident):
+ """Get entity called `ident` from the `body` iterable."""
+ for transform in body:
+ if transform.id.name == ident:
+ return transform
+
+
+def skeleton(node):
+ """Create a skeleton copy of the given node.
+
+ For localizable entries, the value is None and the attributes are {}.
+ That's not a valid Fluent entry, so it requires further manipulation to
+ set values and/or attributes.
+ """
+ if isinstance(node, LOCALIZABLE_ENTRIES):
+ return type(node)(id=node.id.clone(), value=None)
+ return node.clone()
+
+
+def ftl(code):
+ """Nicer indentation for FTL code.
+
+ The code returned by this function is meant to be compared against the
+ output of the FTL Serializer. The input code will end with a newline to
+ match the output of the serializer.
+ """
+
+ # The code might be triple-quoted.
+ code = code.lstrip('\n')
+
+ return textwrap.dedent(code)
+
+
+def fold(fun, node, init):
+ """Reduce `node` to a single value using `fun`.
+
+ Apply `fun` against an accumulator and each subnode of `node` (in postorder
+ traversal) to reduce it to a single value.
+ """
+
+ def fold_(vals, acc):
+ if not vals:
+ return acc
+
+ head = list(vals)[0]
+ tail = list(vals)[1:]
+
+ if isinstance(head, FTL.BaseNode):
+ acc = fold(fun, head, acc)
+ if isinstance(head, list):
+ acc = fold_(head, acc)
+ if isinstance(head, dict):
+ acc = fold_(head.values(), acc)
+
+ return fold_(tail, fun(acc, head))
+
+ return fold_(vars(node).values(), init)
diff --git a/third_party/python/fluent.migrate/fluent/migrate/validator.py b/third_party/python/fluent.migrate/fluent/migrate/validator.py
new file mode 100644
index 0000000000..191c4e15e8
--- /dev/null
+++ b/third_party/python/fluent.migrate/fluent/migrate/validator.py
@@ -0,0 +1,335 @@
+import argparse
+import ast
+from itertools import zip_longest
+
+from fluent.migrate import transforms
+from fluent.migrate.errors import MigrationError
+from fluent.migrate.helpers import transforms_from
+from fluent.syntax import ast as FTL
+from fluent.syntax.visitor import Visitor
+from compare_locales import mozpath
+
+
+class MigrateNotFoundException(Exception):
+ pass
+
+
+class BadContextAPIException(Exception):
+ pass
+
+
+def process_assign(node, context):
+ if isinstance(node.value, ast.Str):
+ val = node.value.s
+ elif isinstance(node.value, ast.Name):
+ val = context.get(node.value.id)
+ elif isinstance(node.value, ast.Call):
+ val = node.value
+ if val is None:
+ return
+ for target in node.targets:
+ if isinstance(target, ast.Name):
+ context[target.id] = val
+
+
+class Validator:
+ """Validate a migration recipe
+
+ Extract information from the migration recipe about which files to
+ migrate from, and which files to migrate to.
+ Also check for errors in the recipe, or bad API usage.
+ """
+
+ @classmethod
+ def validate(cls, path, code=None):
+ if code is None:
+ with open(path) as fh:
+ code = fh.read()
+ validator = cls(code, path)
+ return validator.inspect()
+
+ def __init__(self, code, path):
+ self.ast = ast.parse(code, path)
+
+ def inspect(self):
+ migrate_func = None
+ global_assigns = {}
+ for top_level in ast.iter_child_nodes(self.ast):
+ if (
+ isinstance(top_level, ast.FunctionDef)
+ and top_level.name == 'migrate'
+ ):
+ if migrate_func:
+ raise MigrateNotFoundException(
+ 'Duplicate definition of migrate'
+ )
+ migrate_func = top_level
+ details = self.inspect_migrate(migrate_func, global_assigns)
+ if isinstance(top_level, ast.Assign):
+ process_assign(top_level, global_assigns)
+ if isinstance(top_level, (ast.Import, ast.ImportFrom)):
+ if 'module' in top_level._fields:
+ module = top_level.module
+ else:
+ module = None
+ for alias in top_level.names:
+ asname = alias.asname or alias.name
+ dotted = alias.name
+ if module:
+ dotted = f'{module}.{dotted}'
+ global_assigns[asname] = dotted
+ if not migrate_func:
+ raise MigrateNotFoundException(
+ 'migrate function not found'
+ )
+ return details
+
+ def inspect_migrate(self, migrate_func, global_assigns):
+ if (
+ len(migrate_func.args.args) != 1 or
+ any(
+ getattr(migrate_func.args, arg_field)
+ for arg_field in migrate_func.args._fields
+ if arg_field != 'args'
+ )
+ ):
+ raise MigrateNotFoundException(
+ 'migrate takes only one positional argument'
+ )
+ arg = migrate_func.args.args[0]
+ if isinstance(arg, ast.Name):
+ ctx_var = arg.id # python 2
+ else:
+ ctx_var = arg.arg # python 3
+ visitor = MigrateAnalyzer(ctx_var, global_assigns)
+ visitor.visit(migrate_func)
+ return {
+ 'references': visitor.references,
+ 'issues': visitor.issues,
+ }
+
+
+def full_name(node, global_assigns):
+ leafs = []
+ while isinstance(node, ast.Attribute):
+ leafs.append(node.attr)
+ node = node.value
+ if isinstance(node, ast.Name):
+ leafs.append(global_assigns.get(node.id, node.id))
+ return '.'.join(reversed(leafs))
+
+
+PATH_TYPES = (str,) + (ast.Call,)
+
+
+class MigrateAnalyzer(ast.NodeVisitor):
+ def __init__(self, ctx_var, global_assigns):
+ super().__init__()
+ self.ctx_var = ctx_var
+ self.global_assigns = global_assigns
+ self.depth = 0
+ self.issues = []
+ self.references = set()
+
+ def generic_visit(self, node):
+ self.depth += 1
+ super().generic_visit(node)
+ self.depth -= 1
+
+ def visit_Assign(self, node):
+ if self.depth == 1:
+ process_assign(node, self.global_assigns)
+ self.generic_visit(node)
+
+ def visit_Attribute(self, node):
+ if isinstance(node.value, ast.Name) and node.value.id == self.ctx_var:
+ if node.attr not in (
+ 'add_transforms',
+ 'locale',
+ ):
+ raise BadContextAPIException(
+ 'Unexpected attribute access on {}.{}'.format(
+ self.ctx_var, node.attr
+ )
+ )
+ self.generic_visit(node)
+
+ def visit_Call(self, node):
+ if (
+ isinstance(node.func, ast.Attribute) and
+ isinstance(node.func.value, ast.Name) and
+ node.func.value.id == self.ctx_var
+ ):
+ return self.call_ctx(node)
+ dotted = full_name(node.func, self.global_assigns)
+ if dotted == 'fluent.migrate.helpers.transforms_from':
+ return self.call_helpers_transforms_from(node)
+ if dotted.startswith('fluent.migrate.'):
+ return self.call_transform(node, dotted)
+ self.generic_visit(node)
+
+ def call_ctx(self, node):
+ if node.func.attr == 'add_transforms':
+ return self.call_add_transforms(node)
+ raise BadContextAPIException(
+ 'Unexpected call on {}.{}'.format(
+ self.ctx_var, node.func.attr
+ )
+ )
+
+ def call_add_transforms(self, node):
+ args_msg = (
+ 'Expected arguments to {}.add_transforms: '
+ 'target_ftl_path, reference_ftl_path, list_of_transforms'
+ ).format(self.ctx_var)
+ ref_msg = (
+ 'Expected second argument to {}.add_transforms: '
+ 'reference should be string or variable with string value'
+ ).format(self.ctx_var)
+ # Just check call signature here, check actual types below
+ if not self.check_arguments(node, (ast.AST, ast.AST, ast.AST)):
+ self.issues.append({
+ 'msg': args_msg,
+ 'line': node.lineno,
+ })
+ return
+ in_reference = node.args[1]
+ if isinstance(in_reference, ast.Name):
+ in_reference = self.global_assigns.get(in_reference.id)
+ if isinstance(in_reference, ast.Str):
+ in_reference = in_reference.s
+ if not isinstance(in_reference, str):
+ self.issues.append({
+ 'msg': ref_msg,
+ 'line': node.args[1].lineno,
+ })
+ return
+ self.references.add(in_reference)
+ # Checked node.args[1].
+ # There's not a lot we can say about our target path,
+ # ignoring that.
+ # For our transforms, we want more checks.
+ self.generic_visit(node.args[2])
+
+ def call_transform(self, node, dotted):
+ module, called = dotted.rsplit('.', 1)
+ if module not in ('fluent.migrate', 'fluent.migrate.transforms'):
+ return
+ transform = getattr(transforms, called)
+ if not issubclass(transform, transforms.Source):
+ return
+ bad_args = f'{called} takes path and key as first two params'
+ if not self.check_arguments(
+ node, ((ast.Str, ast.Name), (ast.Str, ast.Name),),
+ allow_more=True, check_kwargs=False
+ ):
+ self.issues.append({
+ 'msg': bad_args,
+ 'line': node.lineno
+ })
+ return
+ path = node.args[0]
+ if isinstance(path, ast.Str):
+ path = path.s
+ if isinstance(path, ast.Name):
+ path = self.global_assigns.get(path.id)
+ if not isinstance(path, PATH_TYPES):
+ self.issues.append({
+ 'msg': bad_args,
+ 'line': node.lineno
+ })
+
+ def call_helpers_transforms_from(self, node):
+ args_msg = (
+ 'Expected arguments to transforms_from: '
+ 'str, **substitions'
+ )
+ if not self.check_arguments(
+ node, (ast.Str,), check_kwargs=False
+ ):
+ self.issues.append({
+ 'msg': args_msg,
+ 'line': node.lineno,
+ })
+ return
+ kwargs = {}
+ found_bad_keywords = False
+ for keyword in node.keywords:
+ v = keyword.value
+ if isinstance(v, ast.Str):
+ v = v.s
+ if isinstance(v, ast.Name):
+ v = self.global_assigns.get(v.id)
+ if isinstance(v, ast.Call):
+ v = 'determined at runtime'
+ if not isinstance(v, PATH_TYPES):
+ msg = 'Bad keyword arg {} to transforms_from'.format(
+ keyword.arg
+ )
+ self.issues.append({
+ 'msg': msg,
+ 'line': node.lineno,
+ })
+ found_bad_keywords = True
+ else:
+ kwargs[keyword.arg] = v
+ if found_bad_keywords:
+ return
+ try:
+ transforms = transforms_from(node.args[0].s, **kwargs)
+ except MigrationError as e:
+ self.issues.append({
+ 'msg': str(e),
+ 'line': node.lineno,
+ })
+ return
+ ti = TransformsInspector()
+ ti.visit(transforms)
+ self.issues.extend({
+ 'msg': issue,
+ 'line': node.lineno,
+ } for issue in set(ti.issues))
+
+ def check_arguments(
+ self, node, argspec, check_kwargs=True, allow_more=False
+ ):
+ if check_kwargs and (
+ node.keywords or
+ (hasattr(node, 'kwargs') and node.kwargs)
+ ):
+ return False
+ if hasattr(node, 'starargs') and node.starargs:
+ return False
+ for arg, NODE_TYPE in zip_longest(node.args, argspec):
+ if NODE_TYPE is None:
+ return True if allow_more else False
+ if not (isinstance(arg, NODE_TYPE)):
+ return False
+ return True
+
+
+class TransformsInspector(Visitor):
+ def __init__(self):
+ super().__init__()
+ self.issues = []
+
+ def generic_visit(self, node):
+ if isinstance(node, transforms.Source):
+ src = node.path
+ # Source needs paths to be normalized
+ # https://bugzilla.mozilla.org/show_bug.cgi?id=1568199
+ if src != mozpath.normpath(src):
+ self.issues.append(
+ f'Source "{src}" needs to be a normalized path'
+ )
+ super().generic_visit(node)
+
+
+def cli():
+ parser = argparse.ArgumentParser()
+ parser.add_argument('migration')
+ args = parser.parse_args()
+ issues = Validator.validate(args.migration)['issues']
+ for issue in issues:
+ print(issue['msg'], 'at line', issue['line'])
+ return 1 if issues else 0
diff --git a/third_party/python/fluent.syntax/fluent.syntax-0.19.0.dist-info/METADATA b/third_party/python/fluent.syntax/fluent.syntax-0.19.0.dist-info/METADATA
new file mode 100644
index 0000000000..04346d93a0
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent.syntax-0.19.0.dist-info/METADATA
@@ -0,0 +1,42 @@
+Metadata-Version: 2.1
+Name: fluent.syntax
+Version: 0.19.0
+Summary: Localization library for expressive translations.
+Home-page: https://github.com/projectfluent/python-fluent
+Author: Mozilla
+Author-email: l10n-drivers@mozilla.org
+License: APL 2
+Keywords: fluent,localization,l10n
+Classifier: Development Status :: 3 - Alpha
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3 :: Only
+Description-Content-Type: text/x-rst
+Requires-Dist: typing-extensions (<5,>=3.7)
+
+``fluent.syntax`` |fluent.syntax|
+---------------------------------
+
+Read, write, and transform `Fluent`_ files.
+
+This package includes the parser, serializer, and traversal
+utilities like Visitor and Transformer. You’re looking for this package
+if you work on tooling for Fluent in Python.
+
+.. code-block:: python
+
+ >>> from fluent.syntax import parse, ast, serialize
+ >>> resource = parse("a-key = String to localize")
+ >>> resource.body[0].value.elements[0].value = "Localized string"
+ >>> serialize(resource)
+ 'a-key = Localized string\n'
+
+
+Find the full documentation on https://projectfluent.org/python-fluent/fluent.syntax/.
+
+.. _fluent: https://projectfluent.org/
+.. |fluent.syntax| image:: https://github.com/projectfluent/python-fluent/workflows/fluent.syntax/badge.svg
diff --git a/third_party/python/fluent.syntax/fluent.syntax-0.19.0.dist-info/RECORD b/third_party/python/fluent.syntax/fluent.syntax-0.19.0.dist-info/RECORD
new file mode 100644
index 0000000000..375fe44f84
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent.syntax-0.19.0.dist-info/RECORD
@@ -0,0 +1,12 @@
+fluent/syntax/__init__.py,sha256=hSmmHKIWanCOPgJIMAA5qEQqfZLywJRWB6KZsrWjSjA,808
+fluent/syntax/ast.py,sha256=x18U6wwdDEyB2uBFPfHV5Xy01g_TPAPx5I-16eipOXo,11689
+fluent/syntax/errors.py,sha256=CDNTnoys3yzo9z4asXtdTj2tBUODmsLDCXfkLT7Iguo,2636
+fluent/syntax/parser.py,sha256=qSehrK_dthGGn4WKenWSjHSf7O5eg8l5gGaPQ2d1amo,22521
+fluent/syntax/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+fluent/syntax/serializer.py,sha256=7cMjxLpEaXPTjoVeJM8622EJj-afPIhjyw4vLKSO9O8,8207
+fluent/syntax/stream.py,sha256=DQHMDWS6vBbCzMXWle7GOlGb41rQqQ5-EwTFlSli8Ao,8638
+fluent/syntax/visitor.py,sha256=1b7ZmfXrbf_1Pwxw__dq3bRbzi1ErBnVdlAlmryvG3M,2164
+fluent.syntax-0.19.0.dist-info/METADATA,sha256=zzu67EEeV4_6dR-AKBmzgFSSKiGFMa3_WH_EXzReCHc,1561
+fluent.syntax-0.19.0.dist-info/WHEEL,sha256=a-zpFRIJzOq5QfuhBzbhiA1eHTzNCJn8OdRvhdNX0Rk,110
+fluent.syntax-0.19.0.dist-info/top_level.txt,sha256=E6y0EXb_8ntRq2470rEss448Ec6wP_-DI3zVECukrn0,7
+fluent.syntax-0.19.0.dist-info/RECORD,,
diff --git a/third_party/python/fluent.syntax/fluent.syntax-0.19.0.dist-info/WHEEL b/third_party/python/fluent.syntax/fluent.syntax-0.19.0.dist-info/WHEEL
new file mode 100644
index 0000000000..f771c29b87
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent.syntax-0.19.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.40.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/fluent.syntax/fluent.syntax-0.19.0.dist-info/top_level.txt b/third_party/python/fluent.syntax/fluent.syntax-0.19.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..a3582d405a
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent.syntax-0.19.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+fluent
diff --git a/third_party/python/fluent.syntax/fluent/syntax/__init__.py b/third_party/python/fluent.syntax/fluent/syntax/__init__.py
new file mode 100644
index 0000000000..1ff31745e6
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent/syntax/__init__.py
@@ -0,0 +1,34 @@
+from typing import Any
+
+from . import ast
+from .errors import ParseError
+from .parser import FluentParser
+from .serializer import FluentSerializer
+from .stream import FluentParserStream
+from .visitor import Transformer, Visitor
+
+__all__ = [
+ 'FluentParser',
+ 'FluentParserStream',
+ 'FluentSerializer',
+ 'ParseError',
+ 'Transformer',
+ 'Visitor',
+ 'ast',
+ 'parse',
+ 'serialize'
+]
+
+
+def parse(source: str, **kwargs: Any) -> ast.Resource:
+ """Create an ast.Resource from a Fluent Syntax source.
+ """
+ parser = FluentParser(**kwargs)
+ return parser.parse(source)
+
+
+def serialize(resource: ast.Resource, **kwargs: Any) -> str:
+ """Serialize an ast.Resource to a unicode string.
+ """
+ serializer = FluentSerializer(**kwargs)
+ return serializer.serialize(resource)
diff --git a/third_party/python/fluent.syntax/fluent/syntax/ast.py b/third_party/python/fluent.syntax/fluent/syntax/ast.py
new file mode 100644
index 0000000000..d2e4849079
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent/syntax/ast.py
@@ -0,0 +1,376 @@
+import re
+import sys
+import json
+from typing import Any, Callable, Dict, List, TypeVar, Union, cast
+
+Node = TypeVar('Node', bound='BaseNode')
+ToJsonFn = Callable[[Dict[str, Any]], Any]
+
+
+def to_json(value: Any, fn: Union[ToJsonFn, None] = None) -> Any:
+ if isinstance(value, BaseNode):
+ return value.to_json(fn)
+ if isinstance(value, list):
+ return list(to_json(item, fn) for item in value)
+ if isinstance(value, tuple):
+ return list(to_json(item, fn) for item in value)
+ else:
+ return value
+
+
+def from_json(value: Any) -> Any:
+ if isinstance(value, dict):
+ cls = getattr(sys.modules[__name__], value['type'])
+ args = {
+ k: from_json(v)
+ for k, v in value.items()
+ if k != 'type'
+ }
+ return cls(**args)
+ if isinstance(value, list):
+ return list(map(from_json, value))
+ else:
+ return value
+
+
+def scalars_equal(node1: Any, node2: Any, ignored_fields: List[str]) -> bool:
+ """Compare two nodes which are not lists."""
+
+ if type(node1) != type(node2):
+ return False
+
+ if isinstance(node1, BaseNode):
+ return node1.equals(node2, ignored_fields)
+
+ return cast(bool, node1 == node2)
+
+
+class BaseNode:
+ """Base class for all Fluent AST nodes.
+
+ All productions described in the ASDL subclass BaseNode, including Span and
+ Annotation. Implements __str__, to_json and traverse.
+ """
+
+ def clone(self: Node) -> Node:
+ """Create a deep clone of the current node."""
+ def visit(value: Any) -> Any:
+ """Clone node and its descendants."""
+ if isinstance(value, BaseNode):
+ return value.clone()
+ if isinstance(value, list):
+ return [visit(child) for child in value]
+ if isinstance(value, tuple):
+ return tuple(visit(child) for child in value)
+ return value
+
+ # Use all attributes found on the node as kwargs to the constructor.
+ return self.__class__(
+ **{name: visit(value) for name, value in vars(self).items()}
+ )
+
+ def equals(self, other: 'BaseNode', ignored_fields: List[str] = ['span']) -> bool:
+ """Compare two nodes.
+
+ Nodes are deeply compared on a field by field basis. If possible, False
+ is returned early. When comparing attributes and variants in
+ SelectExpressions, the order doesn't matter. By default, spans are not
+ taken into account.
+ """
+
+ self_keys = set(vars(self).keys())
+ other_keys = set(vars(other).keys())
+
+ if ignored_fields:
+ for key in ignored_fields:
+ self_keys.discard(key)
+ other_keys.discard(key)
+
+ if self_keys != other_keys:
+ return False
+
+ for key in self_keys:
+ field1 = getattr(self, key)
+ field2 = getattr(other, key)
+
+ # List-typed nodes are compared item-by-item. When comparing
+ # attributes and variants, the order of items doesn't matter.
+ if isinstance(field1, list) and isinstance(field2, list):
+ if len(field1) != len(field2):
+ return False
+
+ for elem1, elem2 in zip(field1, field2):
+ if not scalars_equal(elem1, elem2, ignored_fields):
+ return False
+
+ elif not scalars_equal(field1, field2, ignored_fields):
+ return False
+
+ return True
+
+ def to_json(self, fn: Union[ToJsonFn, None] = None) -> Any:
+ obj = {
+ name: to_json(value, fn)
+ for name, value in vars(self).items()
+ }
+ obj.update(
+ {'type': self.__class__.__name__}
+ )
+ return fn(obj) if fn else obj
+
+ def __str__(self) -> str:
+ return json.dumps(self.to_json())
+
+
+class SyntaxNode(BaseNode):
+ """Base class for AST nodes which can have Spans."""
+
+ def __init__(self, span: Union['Span', None] = None, **kwargs: Any):
+ super().__init__(**kwargs)
+ self.span = span
+
+ def add_span(self, start: int, end: int) -> None:
+ self.span = Span(start, end)
+
+
+class Resource(SyntaxNode):
+ def __init__(self, body: Union[List['EntryType'], None] = None, **kwargs: Any):
+ super().__init__(**kwargs)
+ self.body = body or []
+
+
+class Entry(SyntaxNode):
+ """An abstract base class for useful elements of Resource.body."""
+
+
+class Message(Entry):
+ def __init__(self,
+ id: 'Identifier',
+ value: Union['Pattern', None] = None,
+ attributes: Union[List['Attribute'], None] = None,
+ comment: Union['Comment', None] = None,
+ **kwargs: Any):
+ super().__init__(**kwargs)
+ self.id = id
+ self.value = value
+ self.attributes = attributes or []
+ self.comment = comment
+
+
+class Term(Entry):
+ def __init__(self, id: 'Identifier', value: 'Pattern', attributes: Union[List['Attribute'], None] = None,
+ comment: Union['Comment', None] = None, **kwargs: Any):
+ super().__init__(**kwargs)
+ self.id = id
+ self.value = value
+ self.attributes = attributes or []
+ self.comment = comment
+
+
+class Pattern(SyntaxNode):
+ def __init__(self, elements: List[Union['TextElement', 'Placeable']], **kwargs: Any):
+ super().__init__(**kwargs)
+ self.elements = elements
+
+
+class PatternElement(SyntaxNode):
+ """An abstract base class for elements of Patterns."""
+
+
+class TextElement(PatternElement):
+ def __init__(self, value: str, **kwargs: Any):
+ super().__init__(**kwargs)
+ self.value = value
+
+
+class Placeable(PatternElement):
+ def __init__(self,
+ expression: Union['InlineExpression', 'Placeable', 'SelectExpression'],
+ **kwargs: Any):
+ super().__init__(**kwargs)
+ self.expression = expression
+
+
+class Expression(SyntaxNode):
+ """An abstract base class for expressions."""
+
+
+class Literal(Expression):
+ """An abstract base class for literals."""
+
+ def __init__(self, value: str, **kwargs: Any):
+ super().__init__(**kwargs)
+ self.value = value
+
+ def parse(self) -> Dict[str, Any]:
+ return {'value': self.value}
+
+
+class StringLiteral(Literal):
+ def parse(self) -> Dict[str, str]:
+ def from_escape_sequence(matchobj: Any) -> str:
+ c, codepoint4, codepoint6 = matchobj.groups()
+ if c:
+ return cast(str, c)
+ codepoint = int(codepoint4 or codepoint6, 16)
+ if codepoint <= 0xD7FF or 0xE000 <= codepoint:
+ return chr(codepoint)
+ # Escape sequences reresenting surrogate code points are
+ # well-formed but invalid in Fluent. Replace them with U+FFFD
+ # REPLACEMENT CHARACTER.
+ return '�'
+
+ value = re.sub(
+ r'\\(?:(\\|")|u([0-9a-fA-F]{4})|U([0-9a-fA-F]{6}))',
+ from_escape_sequence,
+ self.value
+ )
+ return {'value': value}
+
+
+class NumberLiteral(Literal):
+ def parse(self) -> Dict[str, Union[float, int]]:
+ value = float(self.value)
+ decimal_position = self.value.find('.')
+ precision = 0
+ if decimal_position >= 0:
+ precision = len(self.value) - decimal_position - 1
+ return {
+ 'value': value,
+ 'precision': precision
+ }
+
+
+class MessageReference(Expression):
+ def __init__(self, id: 'Identifier', attribute: Union['Identifier', None] = None, **kwargs: Any):
+ super().__init__(**kwargs)
+ self.id = id
+ self.attribute = attribute
+
+
+class TermReference(Expression):
+ def __init__(self,
+ id: 'Identifier',
+ attribute: Union['Identifier', None] = None,
+ arguments: Union['CallArguments', None] = None,
+ **kwargs: Any):
+ super().__init__(**kwargs)
+ self.id = id
+ self.attribute = attribute
+ self.arguments = arguments
+
+
+class VariableReference(Expression):
+ def __init__(self, id: 'Identifier', **kwargs: Any):
+ super().__init__(**kwargs)
+ self.id = id
+
+
+class FunctionReference(Expression):
+ def __init__(self, id: 'Identifier', arguments: 'CallArguments', **kwargs: Any):
+ super().__init__(**kwargs)
+ self.id = id
+ self.arguments = arguments
+
+
+class SelectExpression(Expression):
+ def __init__(self, selector: 'InlineExpression', variants: List['Variant'], **kwargs: Any):
+ super().__init__(**kwargs)
+ self.selector = selector
+ self.variants = variants
+
+
+class CallArguments(SyntaxNode):
+ def __init__(self,
+ positional: Union[List[Union['InlineExpression', Placeable]], None] = None,
+ named: Union[List['NamedArgument'], None] = None,
+ **kwargs: Any):
+ super().__init__(**kwargs)
+ self.positional = [] if positional is None else positional
+ self.named = [] if named is None else named
+
+
+class Attribute(SyntaxNode):
+ def __init__(self, id: 'Identifier', value: Pattern, **kwargs: Any):
+ super().__init__(**kwargs)
+ self.id = id
+ self.value = value
+
+
+class Variant(SyntaxNode):
+ def __init__(self, key: Union['Identifier', NumberLiteral], value: Pattern, default: bool = False, **kwargs: Any):
+ super().__init__(**kwargs)
+ self.key = key
+ self.value = value
+ self.default = default
+
+
+class NamedArgument(SyntaxNode):
+ def __init__(self, name: 'Identifier', value: Union[NumberLiteral, StringLiteral], **kwargs: Any):
+ super().__init__(**kwargs)
+ self.name = name
+ self.value = value
+
+
+class Identifier(SyntaxNode):
+ def __init__(self, name: str, **kwargs: Any):
+ super().__init__(**kwargs)
+ self.name = name
+
+
+class BaseComment(Entry):
+ def __init__(self, content: Union[str, None] = None, **kwargs: Any):
+ super().__init__(**kwargs)
+ self.content = content
+
+
+class Comment(BaseComment):
+ def __init__(self, content: Union[str, None] = None, **kwargs: Any):
+ super().__init__(content, **kwargs)
+
+
+class GroupComment(BaseComment):
+ def __init__(self, content: Union[str, None] = None, **kwargs: Any):
+ super().__init__(content, **kwargs)
+
+
+class ResourceComment(BaseComment):
+ def __init__(self, content: Union[str, None] = None, **kwargs: Any):
+ super().__init__(content, **kwargs)
+
+
+class Junk(SyntaxNode):
+ def __init__(self,
+ content: Union[str, None] = None,
+ annotations: Union[List['Annotation'], None] = None,
+ **kwargs: Any):
+ super().__init__(**kwargs)
+ self.content = content
+ self.annotations = annotations or []
+
+ def add_annotation(self, annot: 'Annotation') -> None:
+ self.annotations.append(annot)
+
+
+class Span(BaseNode):
+ def __init__(self, start: int, end: int, **kwargs: Any):
+ super().__init__(**kwargs)
+ self.start = start
+ self.end = end
+
+
+class Annotation(SyntaxNode):
+ def __init__(self,
+ code: str,
+ arguments: Union[List[Any], None] = None,
+ message: Union[str, None] = None,
+ **kwargs: Any):
+ super().__init__(**kwargs)
+ self.code = code
+ self.arguments = arguments or []
+ self.message = message
+
+
+EntryType = Union[Message, Term, Comment, GroupComment, ResourceComment, Junk]
+InlineExpression = Union[NumberLiteral, StringLiteral, MessageReference,
+ TermReference, VariableReference, FunctionReference]
diff --git a/third_party/python/fluent.syntax/fluent/syntax/errors.py b/third_party/python/fluent.syntax/fluent/syntax/errors.py
new file mode 100644
index 0000000000..010374828f
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent/syntax/errors.py
@@ -0,0 +1,70 @@
+from typing import Tuple, Union
+
+
+class ParseError(Exception):
+ def __init__(self, code: str, *args: Union[str, None]):
+ self.code = code
+ self.args = args
+ self.message = get_error_message(code, args)
+
+
+def get_error_message(code: str, args: Tuple[Union[str, None], ...]) -> str:
+ if code == 'E00001':
+ return 'Generic error'
+ if code == 'E0002':
+ return 'Expected an entry start'
+ if code == 'E0003':
+ return 'Expected token: "{}"'.format(args[0])
+ if code == 'E0004':
+ return 'Expected a character from range: "{}"'.format(args[0])
+ if code == 'E0005':
+ msg = 'Expected message "{}" to have a value or attributes'
+ return msg.format(args[0])
+ if code == 'E0006':
+ msg = 'Expected term "-{}" to have a value'
+ return msg.format(args[0])
+ if code == 'E0007':
+ return 'Keyword cannot end with a whitespace'
+ if code == 'E0008':
+ return 'The callee has to be an upper-case identifier or a term'
+ if code == 'E0009':
+ return 'The argument name has to be a simple identifier'
+ if code == 'E0010':
+ return 'Expected one of the variants to be marked as default (*)'
+ if code == 'E0011':
+ return 'Expected at least one variant after "->"'
+ if code == 'E0012':
+ return 'Expected value'
+ if code == 'E0013':
+ return 'Expected variant key'
+ if code == 'E0014':
+ return 'Expected literal'
+ if code == 'E0015':
+ return 'Only one variant can be marked as default (*)'
+ if code == 'E0016':
+ return 'Message references cannot be used as selectors'
+ if code == 'E0017':
+ return 'Terms cannot be used as selectors'
+ if code == 'E0018':
+ return 'Attributes of messages cannot be used as selectors'
+ if code == 'E0019':
+ return 'Attributes of terms cannot be used as placeables'
+ if code == 'E0020':
+ return 'Unterminated string expression'
+ if code == 'E0021':
+ return 'Positional arguments must not follow named arguments'
+ if code == 'E0022':
+ return 'Named arguments must be unique'
+ if code == 'E0024':
+ return 'Cannot access variants of a message.'
+ if code == 'E0025':
+ return 'Unknown escape sequence: \\{}.'.format(args[0])
+ if code == 'E0026':
+ return 'Invalid Unicode escape sequence: {}.'.format(args[0])
+ if code == 'E0027':
+ return 'Unbalanced closing brace in TextElement.'
+ if code == 'E0028':
+ return 'Expected an inline expression'
+ if code == 'E0029':
+ return 'Expected simple expression as selector'
+ return code
diff --git a/third_party/python/fluent.syntax/fluent/syntax/parser.py b/third_party/python/fluent.syntax/fluent/syntax/parser.py
new file mode 100644
index 0000000000..87075409f1
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent/syntax/parser.py
@@ -0,0 +1,701 @@
+import re
+from typing import Any, Callable, List, Set, TypeVar, Union, cast
+from . import ast
+from .stream import EOL, FluentParserStream
+from .errors import ParseError
+
+R = TypeVar("R", bound=ast.SyntaxNode)
+
+
+def with_span(fn: Callable[..., R]) -> Callable[..., R]:
+ def decorated(self: 'FluentParser', ps: FluentParserStream, *args: Any, **kwargs: Any) -> Any:
+ if not self.with_spans:
+ return fn(self, ps, *args, **kwargs)
+
+ start = ps.index
+ node = fn(self, ps, *args, **kwargs)
+
+ # Don't re-add the span if the node already has it. This may happen
+ # when one decorated function calls another decorated function.
+ if node.span is not None:
+ return node
+
+ end = ps.index
+ node.add_span(start, end)
+ return node
+
+ return decorated
+
+
+class FluentParser:
+ """This class is used to parse Fluent source content.
+
+ ``with_spans`` enables source information in the form of
+ :class:`.ast.Span` objects for each :class:`.ast.SyntaxNode`.
+ """
+
+ def __init__(self, with_spans: bool = True):
+ self.with_spans = with_spans
+
+ def parse(self, source: str) -> ast.Resource:
+ """Create a :class:`.ast.Resource` from a Fluent source.
+ """
+ ps = FluentParserStream(source)
+ ps.skip_blank_block()
+
+ entries: List[ast.EntryType] = []
+ last_comment = None
+
+ while ps.current_char:
+ entry = self.get_entry_or_junk(ps)
+ blank_lines = ps.skip_blank_block()
+
+ # Regular Comments require special logic. Comments may be attached
+ # to Messages or Terms if they are followed immediately by them.
+ # However they should parse as standalone when they're followed by
+ # Junk. Consequently, we only attach Comments once we know that the
+ # Message or the Term parsed successfully.
+ if isinstance(entry, ast.Comment) and len(blank_lines) == 0 \
+ and ps.current_char:
+ # Stash the comment and decide what to do with it
+ # in the next pass.
+ last_comment = entry
+ continue
+
+ if last_comment is not None:
+ if isinstance(entry, (ast.Message, ast.Term)):
+ entry.comment = last_comment
+ if self.with_spans:
+ cast(ast.Span, entry.span).start = cast(ast.Span, entry.comment.span).start
+ else:
+ entries.append(last_comment)
+ # In either case, the stashed comment has been dealt with;
+ # clear it.
+ last_comment = None
+
+ entries.append(entry)
+
+ res = ast.Resource(entries)
+
+ if self.with_spans:
+ res.add_span(0, ps.index)
+
+ return res
+
+ def parse_entry(self, source: str) -> ast.EntryType:
+ """Parse the first :class:`.ast.Entry` in source.
+
+ Skip all encountered comments and start parsing at the first :class:`.ast.Message`
+ or :class:`.ast.Term` start. Return :class:`.ast.Junk` if the parsing is not successful.
+
+ Preceding comments are ignored unless they contain syntax errors
+ themselves, in which case :class:`.ast.Junk` for the invalid comment is returned.
+ """
+ ps = FluentParserStream(source)
+ ps.skip_blank_block()
+
+ while ps.current_char == '#':
+ skipped = self.get_entry_or_junk(ps)
+ if isinstance(skipped, ast.Junk):
+ # Don't skip Junk comments.
+ return skipped
+ ps.skip_blank_block()
+
+ return self.get_entry_or_junk(ps)
+
+ def get_entry_or_junk(self, ps: FluentParserStream) -> ast.EntryType:
+ entry_start_pos = ps.index
+
+ try:
+ entry = self.get_entry(ps)
+ ps.expect_line_end()
+ return entry
+ except ParseError as err:
+ error_index = ps.index
+ ps.skip_to_next_entry_start(entry_start_pos)
+ next_entry_start = ps.index
+ if next_entry_start < error_index:
+ # The position of the error must be inside of the Junk's span.
+ error_index = next_entry_start
+
+ # Create a Junk instance
+ slice = ps.string[entry_start_pos:next_entry_start]
+ junk = ast.Junk(slice)
+ if self.with_spans:
+ junk.add_span(entry_start_pos, next_entry_start)
+ annot = ast.Annotation(err.code, list(err.args) if err.args else None, err.message)
+ annot.add_span(error_index, error_index)
+ junk.add_annotation(annot)
+ return junk
+
+ def get_entry(self, ps: FluentParserStream) -> ast.EntryType:
+ if ps.current_char == '#':
+ return self.get_comment(ps)
+
+ if ps.current_char == '-':
+ return self.get_term(ps)
+
+ if ps.is_identifier_start():
+ return self.get_message(ps)
+
+ raise ParseError('E0002')
+
+ @with_span
+ def get_comment(self, ps: FluentParserStream) -> Union[ast.Comment, ast.GroupComment, ast.ResourceComment]:
+ # 0 - comment
+ # 1 - group comment
+ # 2 - resource comment
+ level = -1
+ content = ''
+
+ while True:
+ i = -1
+ while ps.current_char == '#' \
+ and (i < (2 if level == -1 else level)):
+ ps.next()
+ i += 1
+
+ if level == -1:
+ level = i
+
+ if ps.current_char != EOL:
+ ps.expect_char(' ')
+ ch = ps.take_char(lambda x: x != EOL)
+ while ch:
+ content += ch
+ ch = ps.take_char(lambda x: x != EOL)
+
+ if ps.is_next_line_comment(level=level):
+ content += cast(str, ps.current_char)
+ ps.next()
+ else:
+ break
+
+ if level == 0:
+ return ast.Comment(content)
+ elif level == 1:
+ return ast.GroupComment(content)
+ elif level == 2:
+ return ast.ResourceComment(content)
+
+ # never happens if ps.current_char == '#' when called
+ return cast(ast.Comment, None)
+
+ @with_span
+ def get_message(self, ps: FluentParserStream) -> ast.Message:
+ id = self.get_identifier(ps)
+ ps.skip_blank_inline()
+ ps.expect_char('=')
+
+ value = self.maybe_get_pattern(ps)
+ attrs = self.get_attributes(ps)
+
+ if value is None and len(attrs) == 0:
+ raise ParseError('E0005', id.name)
+
+ return ast.Message(id, value, attrs)
+
+ @with_span
+ def get_term(self, ps: FluentParserStream) -> ast.Term:
+ ps.expect_char('-')
+ id = self.get_identifier(ps)
+
+ ps.skip_blank_inline()
+ ps.expect_char('=')
+
+ value = self.maybe_get_pattern(ps)
+ if value is None:
+ raise ParseError('E0006', id.name)
+
+ attrs = self.get_attributes(ps)
+ return ast.Term(id, value, attrs)
+
+ @with_span
+ def get_attribute(self, ps: FluentParserStream) -> ast.Attribute:
+ ps.expect_char('.')
+
+ key = self.get_identifier(ps)
+
+ ps.skip_blank_inline()
+ ps.expect_char('=')
+
+ value = self.maybe_get_pattern(ps)
+ if value is None:
+ raise ParseError('E0012')
+
+ return ast.Attribute(key, value)
+
+ def get_attributes(self, ps: FluentParserStream) -> List[ast.Attribute]:
+ attrs: List[ast.Attribute] = []
+ ps.peek_blank()
+
+ while ps.is_attribute_start():
+ ps.skip_to_peek()
+ attr = self.get_attribute(ps)
+ attrs.append(attr)
+ ps.peek_blank()
+
+ return attrs
+
+ @with_span
+ def get_identifier(self, ps: FluentParserStream) -> ast.Identifier:
+ name = ps.take_id_start()
+ if name is None:
+ raise ParseError('E0004', 'a-zA-Z')
+
+ ch = ps.take_id_char()
+ while ch:
+ name += ch
+ ch = ps.take_id_char()
+
+ return ast.Identifier(name)
+
+ def get_variant_key(self, ps: FluentParserStream) -> Union[ast.Identifier, ast.NumberLiteral]:
+ ch = ps.current_char
+
+ if ch is None:
+ raise ParseError('E0013')
+
+ cc = ord(ch)
+ if ((cc >= 48 and cc <= 57) or cc == 45): # 0-9, -
+ return self.get_number(ps)
+
+ return self.get_identifier(ps)
+
+ @with_span
+ def get_variant(self, ps: FluentParserStream, has_default: bool) -> ast.Variant:
+ default_index = False
+
+ if ps.current_char == '*':
+ if has_default:
+ raise ParseError('E0015')
+ ps.next()
+ default_index = True
+
+ ps.expect_char('[')
+ ps.skip_blank()
+
+ key = self.get_variant_key(ps)
+
+ ps.skip_blank()
+ ps.expect_char(']')
+
+ value = self.maybe_get_pattern(ps)
+ if value is None:
+ raise ParseError('E0012')
+
+ return ast.Variant(key, value, default_index)
+
+ def get_variants(self, ps: FluentParserStream) -> List[ast.Variant]:
+ variants: List[ast.Variant] = []
+ has_default = False
+
+ ps.skip_blank()
+ while ps.is_variant_start():
+ variant = self.get_variant(ps, has_default)
+
+ if variant.default:
+ has_default = True
+
+ variants.append(variant)
+ ps.expect_line_end()
+ ps.skip_blank()
+
+ if len(variants) == 0:
+ raise ParseError('E0011')
+
+ if not has_default:
+ raise ParseError('E0010')
+
+ return variants
+
+ def get_digits(self, ps: FluentParserStream) -> str:
+ num = ''
+
+ ch = ps.take_digit()
+ while ch:
+ num += ch
+ ch = ps.take_digit()
+
+ if len(num) == 0:
+ raise ParseError('E0004', '0-9')
+
+ return num
+
+ @with_span
+ def get_number(self, ps: FluentParserStream) -> ast.NumberLiteral:
+ num = ''
+
+ if ps.current_char == '-':
+ num += '-'
+ ps.next()
+
+ num += self.get_digits(ps)
+
+ if ps.current_char == '.':
+ num += '.'
+ ps.next()
+ num += self.get_digits(ps)
+
+ return ast.NumberLiteral(num)
+
+ def maybe_get_pattern(self, ps: FluentParserStream) -> Union[ast.Pattern, None]:
+ '''Parse an inline or a block Pattern, or None
+
+ maybe_get_pattern distinguishes between patterns which start on the
+ same line as the indentifier (aka inline singleline patterns and inline
+ multiline patterns), and patterns which start on a new line (aka block
+ patterns). The distinction is important for the dedentation logic: the
+ indent of the first line of a block pattern must be taken into account
+ when calculating the maximum common indent.
+ '''
+ ps.peek_blank_inline()
+ if ps.is_value_start():
+ ps.skip_to_peek()
+ return self.get_pattern(ps, is_block=False)
+
+ ps.peek_blank_block()
+ if ps.is_value_continuation():
+ ps.skip_to_peek()
+ return self.get_pattern(ps, is_block=True)
+
+ return None
+
+ @with_span
+ def get_pattern(self, ps: FluentParserStream, is_block: bool) -> ast.Pattern:
+ elements: List[Any] = []
+ if is_block:
+ # A block pattern is a pattern which starts on a new line. Measure
+ # the indent of this first line for the dedentation logic.
+ blank_start = ps.index
+ first_indent = ps.skip_blank_inline()
+ elements.append(self.Indent(first_indent, blank_start, ps.index))
+ common_indent_length = len(first_indent)
+ else:
+ # Should get fixed by the subsequent min() operation
+ common_indent_length = cast(int, float('infinity'))
+
+ while ps.current_char:
+ if ps.current_char == EOL:
+ blank_start = ps.index
+ blank_lines = ps.peek_blank_block()
+ if ps.is_value_continuation():
+ ps.skip_to_peek()
+ indent = ps.skip_blank_inline()
+ common_indent_length = min(common_indent_length, len(indent))
+ elements.append(self.Indent(blank_lines + indent, blank_start, ps.index))
+ continue
+
+ # The end condition for get_pattern's while loop is a newline
+ # which is not followed by a valid pattern continuation.
+ ps.reset_peek()
+ break
+
+ if ps.current_char == '}':
+ raise ParseError('E0027')
+
+ element: Union[ast.TextElement, ast.Placeable]
+ if ps.current_char == '{':
+ element = self.get_placeable(ps)
+ else:
+ element = self.get_text_element(ps)
+
+ elements.append(element)
+
+ dedented = self.dedent(elements, common_indent_length)
+ return ast.Pattern(dedented)
+
+ class Indent(ast.SyntaxNode):
+ def __init__(self, value: str, start: int, end: int):
+ super(FluentParser.Indent, self).__init__()
+ self.value = value
+ self.add_span(start, end)
+
+ def dedent(self,
+ elements: List[Union[ast.TextElement, ast.Placeable, Indent]],
+ common_indent: int
+ ) -> List[Union[ast.TextElement, ast.Placeable]]:
+ '''Dedent a list of elements by removing the maximum common indent from
+ the beginning of text lines. The common indent is calculated in
+ get_pattern.
+ '''
+ trimmed: List[Union[ast.TextElement, ast.Placeable]] = []
+
+ for element in elements:
+ if isinstance(element, ast.Placeable):
+ trimmed.append(element)
+ continue
+
+ if isinstance(element, self.Indent):
+ # Strip the common indent.
+ element.value = element.value[:len(element.value) - common_indent]
+ if len(element.value) == 0:
+ continue
+
+ prev = trimmed[-1] if len(trimmed) > 0 else None
+ if isinstance(prev, ast.TextElement):
+ # Join adjacent TextElements by replacing them with their sum.
+ sum = ast.TextElement(prev.value + element.value)
+ if self.with_spans:
+ sum.add_span(cast(ast.Span, prev.span).start, cast(ast.Span, element.span).end)
+ trimmed[-1] = sum
+ continue
+
+ if isinstance(element, self.Indent):
+ # If the indent hasn't been merged into a preceding
+ # TextElements, convert it into a new TextElement.
+ text_element = ast.TextElement(element.value)
+ if self.with_spans:
+ text_element.add_span(cast(ast.Span, element.span).start, cast(ast.Span, element.span).end)
+ element = text_element
+
+ trimmed.append(element)
+
+ # Trim trailing whitespace from the Pattern.
+ last_element = trimmed[-1] if len(trimmed) > 0 else None
+ if isinstance(last_element, ast.TextElement):
+ last_element.value = last_element.value.rstrip(' \n\r')
+ if last_element.value == "":
+ trimmed.pop()
+
+ return trimmed
+
+ @with_span
+ def get_text_element(self, ps: FluentParserStream) -> ast.TextElement:
+ buf = ''
+
+ while ps.current_char:
+ ch = ps.current_char
+
+ if ch == '{' or ch == '}':
+ return ast.TextElement(buf)
+
+ if ch == EOL:
+ return ast.TextElement(buf)
+
+ buf += ch
+ ps.next()
+
+ return ast.TextElement(buf)
+
+ def get_escape_sequence(self, ps: FluentParserStream) -> str:
+ next = ps.current_char
+
+ if next == '\\' or next == '"':
+ ps.next()
+ return f'\\{next}'
+
+ if next == 'u':
+ return self.get_unicode_escape_sequence(ps, next, 4)
+
+ if next == 'U':
+ return self.get_unicode_escape_sequence(ps, next, 6)
+
+ raise ParseError('E0025', next)
+
+ def get_unicode_escape_sequence(self, ps: FluentParserStream, u: str, digits: int) -> str:
+ ps.expect_char(u)
+ sequence = ''
+ for _ in range(digits):
+ ch = ps.take_hex_digit()
+ if not ch:
+ raise ParseError('E0026', f'\\{u}{sequence}{ps.current_char}')
+ sequence += ch
+
+ return f'\\{u}{sequence}'
+
+ @with_span
+ def get_placeable(self, ps: FluentParserStream) -> ast.Placeable:
+ ps.expect_char('{')
+ ps.skip_blank()
+ expression = self.get_expression(ps)
+ ps.expect_char('}')
+ return ast.Placeable(expression)
+
+ @with_span
+ def get_expression(self, ps: FluentParserStream) -> Union[ast.InlineExpression,
+ ast.Placeable,
+ ast.SelectExpression]:
+ selector = self.get_inline_expression(ps)
+
+ ps.skip_blank()
+
+ if ps.current_char == '-':
+ if ps.peek() != '>':
+ ps.reset_peek()
+ return selector
+
+ if isinstance(selector, ast.MessageReference):
+ if selector.attribute is None:
+ raise ParseError('E0016')
+ else:
+ raise ParseError('E0018')
+
+ elif (
+ isinstance(selector, ast.TermReference)
+ ):
+ if selector.attribute is None:
+ raise ParseError('E0017')
+ elif not (
+ isinstance(selector, (
+ ast.StringLiteral,
+ ast.NumberLiteral,
+ ast.VariableReference,
+ ast.FunctionReference,
+ ))
+ ):
+ raise ParseError('E0029')
+
+ ps.next()
+ ps.next()
+
+ ps.skip_blank_inline()
+ ps.expect_line_end()
+
+ variants = self.get_variants(ps)
+ return ast.SelectExpression(selector, variants)
+
+ if (
+ isinstance(selector, ast.TermReference)
+ and selector.attribute is not None
+ ):
+ raise ParseError('E0019')
+
+ return selector
+
+ @with_span
+ def get_inline_expression(self, ps: FluentParserStream) -> Union[ast.InlineExpression, ast.Placeable]:
+ if ps.current_char == '{':
+ return self.get_placeable(ps)
+
+ if ps.is_number_start():
+ return self.get_number(ps)
+
+ if ps.current_char == '"':
+ return self.get_string(ps)
+
+ if ps.current_char == '$':
+ ps.next()
+ id = self.get_identifier(ps)
+ return ast.VariableReference(id)
+
+ if ps.current_char == '-':
+ ps.next()
+ id = self.get_identifier(ps)
+ attribute = None
+ if ps.current_char == '.':
+ ps.next()
+ attribute = self.get_identifier(ps)
+ arguments = None
+ ps.peek_blank()
+ if ps.current_peek == '(':
+ ps.skip_to_peek()
+ arguments = self.get_call_arguments(ps)
+ return ast.TermReference(id, attribute, arguments)
+
+ if ps.is_identifier_start():
+ id = self.get_identifier(ps)
+ ps.peek_blank()
+
+ if ps.current_peek == '(':
+ # It's a Function. Ensure it's all upper-case.
+ if not re.match('^[A-Z][A-Z0-9_-]*$', id.name):
+ raise ParseError('E0008')
+ ps.skip_to_peek()
+ args = self.get_call_arguments(ps)
+ return ast.FunctionReference(id, args)
+
+ attribute = None
+ if ps.current_char == '.':
+ ps.next()
+ attribute = self.get_identifier(ps)
+
+ return ast.MessageReference(id, attribute)
+
+ raise ParseError('E0028')
+
+ @with_span
+ def get_call_argument(self,
+ ps: FluentParserStream
+ ) -> Union[ast.InlineExpression, ast.NamedArgument, ast.Placeable]:
+ exp = self.get_inline_expression(ps)
+
+ ps.skip_blank()
+
+ if ps.current_char != ':':
+ return exp
+
+ if isinstance(exp, ast.MessageReference) and exp.attribute is None:
+ ps.next()
+ ps.skip_blank()
+
+ value = self.get_literal(ps)
+ return ast.NamedArgument(exp.id, value)
+
+ raise ParseError('E0009')
+
+ @with_span
+ def get_call_arguments(self, ps: FluentParserStream) -> ast.CallArguments:
+ positional: List[Union[ast.InlineExpression, ast.Placeable]] = []
+ named: List[ast.NamedArgument] = []
+ argument_names: Set[str] = set()
+
+ ps.expect_char('(')
+ ps.skip_blank()
+
+ while True:
+ if ps.current_char == ')':
+ break
+
+ arg = self.get_call_argument(ps)
+ if isinstance(arg, ast.NamedArgument):
+ if arg.name.name in argument_names:
+ raise ParseError('E0022')
+ named.append(arg)
+ argument_names.add(arg.name.name)
+ elif len(argument_names) > 0:
+ raise ParseError('E0021')
+ else:
+ positional.append(arg)
+
+ ps.skip_blank()
+
+ if ps.current_char == ',':
+ ps.next()
+ ps.skip_blank()
+ continue
+
+ break
+
+ ps.expect_char(')')
+ return ast.CallArguments(positional, named)
+
+ @with_span
+ def get_string(self, ps: FluentParserStream) -> ast.StringLiteral:
+ value = ''
+
+ ps.expect_char('"')
+
+ while True:
+ ch = ps.take_char(lambda x: x != '"' and x != EOL)
+ if not ch:
+ break
+ if ch == '\\':
+ value += self.get_escape_sequence(ps)
+ else:
+ value += ch
+
+ if ps.current_char == EOL:
+ raise ParseError('E0020')
+
+ ps.expect_char('"')
+
+ return ast.StringLiteral(value)
+
+ @with_span
+ def get_literal(self, ps: FluentParserStream) -> Union[ast.NumberLiteral, ast.StringLiteral]:
+ if ps.is_number_start():
+ return self.get_number(ps)
+ if ps.current_char == '"':
+ return self.get_string(ps)
+ raise ParseError('E0014')
diff --git a/third_party/python/fluent.syntax/fluent/syntax/py.typed b/third_party/python/fluent.syntax/fluent/syntax/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent/syntax/py.typed
diff --git a/third_party/python/fluent.syntax/fluent/syntax/serializer.py b/third_party/python/fluent.syntax/fluent/syntax/serializer.py
new file mode 100644
index 0000000000..68ea89b3d3
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent/syntax/serializer.py
@@ -0,0 +1,237 @@
+from typing import List, Union
+from . import ast
+
+
+def indent_except_first_line(content: str) -> str:
+ return " ".join(
+ content.splitlines(True)
+ )
+
+
+def includes_new_line(elem: Union[ast.TextElement, ast.Placeable]) -> bool:
+ return isinstance(elem, ast.TextElement) and "\n" in elem.value
+
+
+def is_select_expr(elem: Union[ast.TextElement, ast.Placeable]) -> bool:
+ return (
+ isinstance(elem, ast.Placeable) and
+ isinstance(elem.expression, ast.SelectExpression))
+
+
+def should_start_on_new_line(pattern: ast.Pattern) -> bool:
+ is_multiline = any(is_select_expr(elem) for elem in pattern.elements) \
+ or any(includes_new_line(elem) for elem in pattern.elements)
+
+ if is_multiline:
+ first_element = pattern.elements[0]
+ if isinstance(first_element, ast.TextElement):
+ first_char = first_element.value[0]
+ if first_char in ("[", ".", "*"):
+ return False
+ return True
+ return False
+
+
+class FluentSerializer:
+ """FluentSerializer converts :class:`.ast.SyntaxNode` objects to unicode strings.
+
+ `with_junk` controls if parse errors are written back or not.
+ """
+ HAS_ENTRIES = 1
+
+ def __init__(self, with_junk: bool = False):
+ self.with_junk = with_junk
+
+ def serialize(self, resource: ast.Resource) -> str:
+ "Serialize a :class:`.ast.Resource` to a string."
+ if not isinstance(resource, ast.Resource):
+ raise Exception('Unknown resource type: {}'.format(type(resource)))
+
+ state = 0
+
+ parts: List[str] = []
+ for entry in resource.body:
+ if not isinstance(entry, ast.Junk) or self.with_junk:
+ parts.append(self.serialize_entry(entry, state))
+ if not state & self.HAS_ENTRIES:
+ state |= self.HAS_ENTRIES
+
+ return "".join(parts)
+
+ def serialize_entry(self, entry: ast.EntryType, state: int = 0) -> str:
+ "Serialize an :class:`.ast.Entry` to a string."
+ if isinstance(entry, ast.Message):
+ return serialize_message(entry)
+ if isinstance(entry, ast.Term):
+ return serialize_term(entry)
+ if isinstance(entry, ast.Comment):
+ if state & self.HAS_ENTRIES:
+ return "\n{}\n".format(serialize_comment(entry, "#"))
+ return "{}\n".format(serialize_comment(entry, "#"))
+ if isinstance(entry, ast.GroupComment):
+ if state & self.HAS_ENTRIES:
+ return "\n{}\n".format(serialize_comment(entry, "##"))
+ return "{}\n".format(serialize_comment(entry, "##"))
+ if isinstance(entry, ast.ResourceComment):
+ if state & self.HAS_ENTRIES:
+ return "\n{}\n".format(serialize_comment(entry, "###"))
+ return "{}\n".format(serialize_comment(entry, "###"))
+ if isinstance(entry, ast.Junk):
+ return serialize_junk(entry)
+ raise Exception('Unknown entry type: {}'.format(type(entry)))
+
+
+def serialize_comment(comment: Union[ast.Comment, ast.GroupComment, ast.ResourceComment], prefix: str = "#") -> str:
+ if not comment.content:
+ return f'{prefix}\n'
+
+ prefixed = "\n".join([
+ prefix if len(line) == 0 else f"{prefix} {line}"
+ for line in comment.content.split("\n")
+ ])
+ # Add the trailing line break.
+ return f'{prefixed}\n'
+
+
+def serialize_junk(junk: ast.Junk) -> str:
+ return junk.content or ''
+
+
+def serialize_message(message: ast.Message) -> str:
+ parts: List[str] = []
+
+ if message.comment:
+ parts.append(serialize_comment(message.comment))
+
+ parts.append(f"{message.id.name} =")
+
+ if message.value:
+ parts.append(serialize_pattern(message.value))
+
+ if message.attributes:
+ for attribute in message.attributes:
+ parts.append(serialize_attribute(attribute))
+
+ parts.append("\n")
+ return ''.join(parts)
+
+
+def serialize_term(term: ast.Term) -> str:
+ parts: List[str] = []
+
+ if term.comment:
+ parts.append(serialize_comment(term.comment))
+
+ parts.append(f"-{term.id.name} =")
+ parts.append(serialize_pattern(term.value))
+
+ if term.attributes:
+ for attribute in term.attributes:
+ parts.append(serialize_attribute(attribute))
+
+ parts.append("\n")
+ return ''.join(parts)
+
+
+def serialize_attribute(attribute: ast.Attribute) -> str:
+ return "\n .{} ={}".format(
+ attribute.id.name,
+ indent_except_first_line(serialize_pattern(attribute.value))
+ )
+
+
+def serialize_pattern(pattern: ast.Pattern) -> str:
+ content = "".join(serialize_element(elem) for elem in pattern.elements)
+ content = indent_except_first_line(content)
+
+ if should_start_on_new_line(pattern):
+ return f'\n {content}'
+
+ return f' {content}'
+
+
+def serialize_element(element: ast.PatternElement) -> str:
+ if isinstance(element, ast.TextElement):
+ return element.value
+ if isinstance(element, ast.Placeable):
+ return serialize_placeable(element)
+ raise Exception('Unknown element type: {}'.format(type(element)))
+
+
+def serialize_placeable(placeable: ast.Placeable) -> str:
+ expr = placeable.expression
+ if isinstance(expr, ast.Placeable):
+ return "{{{}}}".format(serialize_placeable(expr))
+ if isinstance(expr, ast.SelectExpression):
+ # Special-case select expressions to control the withespace around the
+ # opening and the closing brace.
+ return "{{ {}}}".format(serialize_expression(expr))
+ if isinstance(expr, ast.Expression):
+ return "{{ {} }}".format(serialize_expression(expr))
+ raise Exception('Unknown expression type: {}'.format(type(expr)))
+
+
+def serialize_expression(expression: Union[ast.Expression, ast.Placeable]) -> str:
+ if isinstance(expression, ast.StringLiteral):
+ return f'"{expression.value}"'
+ if isinstance(expression, ast.NumberLiteral):
+ return expression.value
+ if isinstance(expression, ast.VariableReference):
+ return f"${expression.id.name}"
+ if isinstance(expression, ast.TermReference):
+ out = f"-{expression.id.name}"
+ if expression.attribute is not None:
+ out += f".{expression.attribute.name}"
+ if expression.arguments is not None:
+ out += serialize_call_arguments(expression.arguments)
+ return out
+ if isinstance(expression, ast.MessageReference):
+ out = expression.id.name
+ if expression.attribute is not None:
+ out += f".{expression.attribute.name}"
+ return out
+ if isinstance(expression, ast.FunctionReference):
+ args = serialize_call_arguments(expression.arguments)
+ return f"{expression.id.name}{args}"
+ if isinstance(expression, ast.SelectExpression):
+ out = "{} ->".format(
+ serialize_expression(expression.selector))
+ for variant in expression.variants:
+ out += serialize_variant(variant)
+ return f"{out}\n"
+ if isinstance(expression, ast.Placeable):
+ return serialize_placeable(expression)
+ raise Exception('Unknown expression type: {}'.format(type(expression)))
+
+
+def serialize_variant(variant: ast.Variant) -> str:
+ return "\n{}[{}]{}".format(
+ " *" if variant.default else " ",
+ serialize_variant_key(variant.key),
+ indent_except_first_line(serialize_pattern(variant.value))
+ )
+
+
+def serialize_call_arguments(expr: ast.CallArguments) -> str:
+ positional = ", ".join(
+ serialize_expression(arg) for arg in expr.positional)
+ named = ", ".join(
+ serialize_named_argument(arg) for arg in expr.named)
+ if len(expr.positional) > 0 and len(expr.named) > 0:
+ return f'({positional}, {named})'
+ return '({})'.format(positional or named)
+
+
+def serialize_named_argument(arg: ast.NamedArgument) -> str:
+ return "{}: {}".format(
+ arg.name.name,
+ serialize_expression(arg.value)
+ )
+
+
+def serialize_variant_key(key: Union[ast.Identifier, ast.NumberLiteral]) -> str:
+ if isinstance(key, ast.Identifier):
+ return key.name
+ if isinstance(key, ast.NumberLiteral):
+ return key.value
+ raise Exception('Unknown variant key type: {}'.format(type(key)))
diff --git a/third_party/python/fluent.syntax/fluent/syntax/stream.py b/third_party/python/fluent.syntax/fluent/syntax/stream.py
new file mode 100644
index 0000000000..150ac933ca
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent/syntax/stream.py
@@ -0,0 +1,283 @@
+from typing import Callable, Union
+from typing_extensions import Literal
+from .errors import ParseError
+
+
+class ParserStream:
+ def __init__(self, string: str):
+ self.string = string
+ self.index = 0
+ self.peek_offset = 0
+
+ def get(self, offset: int) -> Union[str, None]:
+ try:
+ return self.string[offset]
+ except IndexError:
+ return None
+
+ def char_at(self, offset: int) -> Union[str, None]:
+ # When the cursor is at CRLF, return LF but don't move the cursor. The
+ # cursor still points to the EOL position, which in this case is the
+ # beginning of the compound CRLF sequence. This ensures slices of
+ # [inclusive, exclusive) continue to work properly.
+ if self.get(offset) == '\r' \
+ and self.get(offset + 1) == '\n':
+ return '\n'
+
+ return self.get(offset)
+
+ @property
+ def current_char(self) -> Union[str, None]:
+ return self.char_at(self.index)
+
+ @property
+ def current_peek(self) -> Union[str, None]:
+ return self.char_at(self.index + self.peek_offset)
+
+ def next(self) -> Union[str, None]:
+ self.peek_offset = 0
+ # Skip over CRLF as if it was a single character.
+ if self.get(self.index) == '\r' \
+ and self.get(self.index + 1) == '\n':
+ self.index += 1
+ self.index += 1
+ return self.get(self.index)
+
+ def peek(self) -> Union[str, None]:
+ # Skip over CRLF as if it was a single character.
+ if self.get(self.index + self.peek_offset) == '\r' \
+ and self.get(self.index + self.peek_offset + 1) == '\n':
+ self.peek_offset += 1
+ self.peek_offset += 1
+ return self.get(self.index + self.peek_offset)
+
+ def reset_peek(self, offset: int = 0) -> None:
+ self.peek_offset = offset
+
+ def skip_to_peek(self) -> None:
+ self.index += self.peek_offset
+ self.peek_offset = 0
+
+
+EOL = '\n'
+EOF = None
+SPECIAL_LINE_START_CHARS = ('}', '.', '[', '*')
+
+
+class FluentParserStream(ParserStream):
+
+ def peek_blank_inline(self) -> str:
+ start = self.index + self.peek_offset
+ while self.current_peek == ' ':
+ self.peek()
+ return self.string[start:self.index + self.peek_offset]
+
+ def skip_blank_inline(self) -> str:
+ blank = self.peek_blank_inline()
+ self.skip_to_peek()
+ return blank
+
+ def peek_blank_block(self) -> str:
+ blank = ""
+ while True:
+ line_start = self.peek_offset
+ self.peek_blank_inline()
+
+ if self.current_peek == EOL:
+ blank += EOL
+ self.peek()
+ continue
+
+ if self.current_peek is EOF:
+ # Treat the blank line at EOF as a blank block.
+ return blank
+
+ # Any other char; reset to column 1 on this line.
+ self.reset_peek(line_start)
+ return blank
+
+ def skip_blank_block(self) -> str:
+ blank = self.peek_blank_block()
+ self.skip_to_peek()
+ return blank
+
+ def peek_blank(self) -> None:
+ while self.current_peek in (" ", EOL):
+ self.peek()
+
+ def skip_blank(self) -> None:
+ self.peek_blank()
+ self.skip_to_peek()
+
+ def expect_char(self, ch: str) -> Literal[True]:
+ if self.current_char == ch:
+ self.next()
+ return True
+
+ raise ParseError('E0003', ch)
+
+ def expect_line_end(self) -> Literal[True]:
+ if self.current_char is EOF:
+ # EOF is a valid line end in Fluent.
+ return True
+
+ if self.current_char == EOL:
+ self.next()
+ return True
+
+ # Unicode Character 'SYMBOL FOR NEWLINE' (U+2424)
+ raise ParseError('E0003', '\u2424')
+
+ def take_char(self, f: Callable[[str], bool]) -> Union[str, Literal[False], None]:
+ ch = self.current_char
+ if ch is None:
+ return EOF
+ if f(ch):
+ self.next()
+ return ch
+ return False
+
+ def is_char_id_start(self, ch: Union[str, None]) -> bool:
+ if ch is None:
+ return False
+
+ cc = ord(ch)
+ return (cc >= 97 and cc <= 122) or \
+ (cc >= 65 and cc <= 90)
+
+ def is_identifier_start(self) -> bool:
+ return self.is_char_id_start(self.current_peek)
+
+ def is_number_start(self) -> bool:
+ ch = self.peek() if self.current_char == '-' else self.current_char
+ if ch is None:
+ self.reset_peek()
+ return False
+
+ cc = ord(ch)
+ is_digit = cc >= 48 and cc <= 57
+ self.reset_peek()
+ return is_digit
+
+ def is_char_pattern_continuation(self, ch: Union[str, None]) -> bool:
+ if ch is EOF:
+ return False
+
+ return ch not in SPECIAL_LINE_START_CHARS
+
+ def is_value_start(self) -> bool:
+ # Inline Patterns may start with any char.
+ return self.current_peek is not EOF and self.current_peek != EOL
+
+ def is_value_continuation(self) -> bool:
+ column1 = self.peek_offset
+ self.peek_blank_inline()
+
+ if self.current_peek == '{':
+ self.reset_peek(column1)
+ return True
+
+ if self.peek_offset - column1 == 0:
+ return False
+
+ if self.is_char_pattern_continuation(self.current_peek):
+ self.reset_peek(column1)
+ return True
+
+ return False
+
+ # -1 - any
+ # 0 - comment
+ # 1 - group comment
+ # 2 - resource comment
+ def is_next_line_comment(self, level: int = -1) -> bool:
+ if self.current_peek != EOL:
+ return False
+
+ i = 0
+
+ while (i <= level or (level == -1 and i < 3)):
+ if self.peek() != '#':
+ if i <= level and level != -1:
+ self.reset_peek()
+ return False
+ break
+ i += 1
+
+ # The first char after #, ## or ###.
+ if self.peek() in (' ', EOL):
+ self.reset_peek()
+ return True
+
+ self.reset_peek()
+ return False
+
+ def is_variant_start(self) -> bool:
+ current_peek_offset = self.peek_offset
+ if self.current_peek == '*':
+ self.peek()
+ if self.current_peek == '[' and self.peek() != '[':
+ self.reset_peek(current_peek_offset)
+ return True
+
+ self.reset_peek(current_peek_offset)
+ return False
+
+ def is_attribute_start(self) -> bool:
+ return self.current_peek == '.'
+
+ def skip_to_next_entry_start(self, junk_start: int) -> None:
+ last_newline = self.string.rfind(EOL, 0, self.index)
+ if junk_start < last_newline:
+ # Last seen newline is _after_ the junk start. It's safe to rewind
+ # without the risk of resuming at the same broken entry.
+ self.index = last_newline
+
+ while self.current_char:
+ # We're only interested in beginnings of line.
+ if self.current_char != EOL:
+ self.next()
+ continue
+
+ # Break if the first char in this line looks like an entry start.
+ first = self.next()
+ if self.is_char_id_start(first) or first == '-' or first == '#':
+ break
+
+ # Syntax 0.4 compatibility
+ peek = self.peek()
+ self.reset_peek()
+ if (first, peek) == ('/', '/') or (first, peek) == ('[', '['):
+ break
+
+ def take_id_start(self) -> Union[str, None]:
+ if self.is_char_id_start(self.current_char):
+ ret = self.current_char
+ self.next()
+ return ret
+
+ raise ParseError('E0004', 'a-zA-Z')
+
+ def take_id_char(self) -> Union[str, Literal[False], None]:
+ def closure(ch: str) -> bool:
+ cc = ord(ch)
+ return ((cc >= 97 and cc <= 122) or
+ (cc >= 65 and cc <= 90) or
+ (cc >= 48 and cc <= 57) or
+ cc == 95 or cc == 45)
+ return self.take_char(closure)
+
+ def take_digit(self) -> Union[str, Literal[False], None]:
+ def closure(ch: str) -> bool:
+ cc = ord(ch)
+ return (cc >= 48 and cc <= 57)
+ return self.take_char(closure)
+
+ def take_hex_digit(self) -> Union[str, Literal[False], None]:
+ def closure(ch: str) -> bool:
+ cc = ord(ch)
+ return (
+ (cc >= 48 and cc <= 57) # 0-9
+ or (cc >= 65 and cc <= 70) # A-F
+ or (cc >= 97 and cc <= 102)) # a-f
+ return self.take_char(closure)
diff --git a/third_party/python/fluent.syntax/fluent/syntax/visitor.py b/third_party/python/fluent.syntax/fluent/syntax/visitor.py
new file mode 100644
index 0000000000..0df9f5963e
--- /dev/null
+++ b/third_party/python/fluent.syntax/fluent/syntax/visitor.py
@@ -0,0 +1,65 @@
+from typing import Any, List
+from .ast import BaseNode, Node
+
+
+class Visitor:
+ '''Read-only visitor pattern.
+
+ Subclass this to gather information from an AST.
+ To generally define which nodes not to descend in to, overload
+ `generic_visit`.
+ To handle specific node types, add methods like `visit_Pattern`.
+ If you want to still descend into the children of the node, call
+ `generic_visit` of the superclass.
+ '''
+
+ def visit(self, node: Any) -> None:
+ if isinstance(node, list):
+ for child in node:
+ self.visit(child)
+ return
+ if not isinstance(node, BaseNode):
+ return
+ nodename = type(node).__name__
+ visit = getattr(self, f'visit_{nodename}', self.generic_visit)
+ visit(node)
+
+ def generic_visit(self, node: BaseNode) -> None:
+ for propvalue in vars(node).values():
+ self.visit(propvalue)
+
+
+class Transformer(Visitor):
+ '''In-place AST Transformer pattern.
+
+ Subclass this to create an in-place modified variant
+ of the given AST.
+ If you need to keep the original AST around, pass
+ a `node.clone()` to the transformer.
+ '''
+
+ def visit(self, node: Any) -> Any:
+ if not isinstance(node, BaseNode):
+ return node
+
+ nodename = type(node).__name__
+ visit = getattr(self, f'visit_{nodename}', self.generic_visit)
+ return visit(node)
+
+ def generic_visit(self, node: Node) -> Node: # type: ignore
+ for propname, propvalue in vars(node).items():
+ if isinstance(propvalue, list):
+ new_vals: List[Any] = []
+ for child in propvalue:
+ new_val = self.visit(child)
+ if new_val is not None:
+ new_vals.append(new_val)
+ # in-place manipulation
+ propvalue[:] = new_vals
+ elif isinstance(propvalue, BaseNode):
+ new_val = self.visit(propvalue)
+ if new_val is None:
+ delattr(node, propname)
+ else:
+ setattr(node, propname, new_val)
+ return node
diff --git a/third_party/python/giturlparse/giturlparse-0.10.0.dist-info/LICENSE b/third_party/python/giturlparse/giturlparse-0.10.0.dist-info/LICENSE
new file mode 100644
index 0000000000..37ec93a14f
--- /dev/null
+++ b/third_party/python/giturlparse/giturlparse-0.10.0.dist-info/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/third_party/python/giturlparse/giturlparse-0.10.0.dist-info/METADATA b/third_party/python/giturlparse/giturlparse-0.10.0.dist-info/METADATA
new file mode 100644
index 0000000000..198277d691
--- /dev/null
+++ b/third_party/python/giturlparse/giturlparse-0.10.0.dist-info/METADATA
@@ -0,0 +1,165 @@
+Metadata-Version: 2.1
+Name: giturlparse
+Version: 0.10.0
+Summary: A Git URL parsing module (supports parsing and rewriting)
+Home-page: https://github.com/nephila/giturlparse
+Author: Aaron O Mullan
+Author-email: aaron@friendco.de
+Maintainer: Iacopo Spalletti
+Maintainer-email: i.spalletti@nephila.it
+License: Apache v2
+Keywords: giturlparse
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Framework :: Django
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Natural Language :: English
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Requires-Python: >=3.6
+Description-Content-Type: text/x-rst
+
+===========
+giturlparse
+===========
+
+Parse & rewrite git urls (supports GitHub, Bitbucket, FriendCode, Assembla, Gitlab ...)
+
+This is a fork of giturlparse.py with updated parsers.
+
+Original project can be found at https://github.com/FriendCode/giturlparse.py
+
+************
+Installing
+************
+
+::
+
+ pip install giturlparse
+
+******************
+Examples
+******************
+
+Exposed attributes
+==================
+
+* ``platform``: platform codename
+* ``host``: server hostname
+* ``resource``: same as ``host``
+* ``port``: URL port (only if explicitly defined in URL)
+* ``protocol``: URL protocol (git, ssh, http/https)
+* ``protocols``: list of protocols explicitly defined in URL
+* ``user``: repository user
+* ``owner``: repository owner (user or organization)
+* ``repo``: repository name
+* ``name``: same as ``repo``
+* ``groups``: list of groups - gitlab only
+* ``path``: path to file or directory (includes the branch name) - gitlab / github only
+* ``path_raw``: raw path starting from the repo name (might include platform keyword) - gitlab / github only
+* ``branch``: branch name (when parseable) - gitlab / github only
+
+Parse
+==================
+
+::
+
+ from giturlparse import parse
+
+ p = parse('git@bitbucket.org:AaronO/some-repo.git')
+
+ p.host, p.owner, p.repo
+
+ # => ('bitbucket.org', 'AaronO', 'some-repo')
+
+
+Rewrite
+==================
+
+::
+
+ from giturlparse import parse
+
+ url = 'git@github.com:Org/Private-repo.git'
+
+ p = parse(url)
+
+ p.url2ssh, p.url2https, p.url2git, p.url2http
+ # => ('git@github.com:Org/Private-repo.git', 'https://github.com/Org/Private-repo.git', 'git://github.com/Org/Private-repo.git', None)
+
+URLS
+==================
+
+Alternative URLs for same repo::
+
+ from giturlparse import parse
+
+ url = 'git@github.com:Org/Private-repo.git'
+
+ parse(url).urls
+ # => {
+ # 'ssh': 'git@github.com:Org/Private-repo.git',
+ # 'https': 'https://github.com/Org/Private-repo.git',
+ # 'git': 'git://github.com/Org/Private-repo.git'
+ # }
+
+Validate
+==================
+
+::
+
+ from giturlparse import parse, validate
+
+ url = 'git@github.com:Org/Private-repo.git'
+
+ parse(url).valid
+ # => True
+
+ # Or
+
+ validate(url)
+ # => True
+
+Tests
+==================
+
+::
+
+ python setup.py test
+
+License
+==================
+
+Apache v2 (Check out LICENSE file)
+
+.. :changelog:
+
+*******
+History
+*******
+
+.. towncrier release notes start
+
+0.10.0 (2020-12-05)
+===================
+
+Features
+--------
+
+- General matching improvements (#18)
+- Update tooling, drop python2 (#10213)
+
+0.9.2 (2018-10-27)
+==================
+
+* Removed "s" from the base platform regex
+* Fix license classifier in setup.py
+* Update meta files
+
+0.9.1 (2018-01-20)
+==================
+
+* First fork release
+
+
diff --git a/third_party/python/giturlparse/giturlparse-0.10.0.dist-info/RECORD b/third_party/python/giturlparse/giturlparse-0.10.0.dist-info/RECORD
new file mode 100644
index 0000000000..f5d97476fb
--- /dev/null
+++ b/third_party/python/giturlparse/giturlparse-0.10.0.dist-info/RECORD
@@ -0,0 +1,18 @@
+giturlparse/__init__.py,sha256=c5WMm7u1auWiuJrsY0bo1IsT6iRi8b6pGebNQC03_PI,332
+giturlparse/parser.py,sha256=BTaOH--z1-odYdOwEb5iNadYpCvUM4-bKHYXGKxGIZM,1924
+giturlparse/result.py,sha256=wKg1h9vYXkPseRgEAIk8TDPS1UMIU_z3t4IKbT7uD18,2765
+giturlparse/platforms/__init__.py,sha256=y8xzQWxqGHwlvx0pY99Hqott-xK2Q0iBzpQ9dTehTrY,527
+giturlparse/platforms/assembla.py,sha256=iPYpPOu8cNapbniD7sj63aTwPGT4DUH1U8RkvbUkiqE,498
+giturlparse/platforms/base.py,sha256=cZPxEa1u1WNq6IvhUVp3XWJtks9Dy2sifDaJAdeHclI,1566
+giturlparse/platforms/bitbucket.py,sha256=R6dsFBhuMlLe9-gIAP7X8hzJn-FHAjI-bBgnfNom4tc,680
+giturlparse/platforms/friendcode.py,sha256=w__PNSQAkNO2Y45doOw7YMDqwuSyu_FocQTRa305VM0,389
+giturlparse/platforms/github.py,sha256=G_7VRQpm5ZtvOcc1xbVF3CnC4AcCRnyK7EgkoaoqOEo,1446
+giturlparse/platforms/gitlab.py,sha256=2K65zlI8CA5OdXV9eXW3SBFH7oW78lFlkhLviW3Mwyo,1794
+giturlparse/tests/__init__.py,sha256=yBGT6Ycwx1AsTFYemzHoqrJ82seE0gfGti99VyrV3x0,37
+giturlparse/tests/parse.py,sha256=dpFzvo40qdH7Zg6CmgMqBMeZz473GhbZotmVK_nq_pk,14594
+giturlparse/tests/rewrite.py,sha256=scB7YGBUeFo3bEyI0Mvc0hK_ajlBY2RkrEGRtnrtukc,3386
+giturlparse-0.10.0.dist-info/LICENSE,sha256=c7p036pSC0mkAbXSFFmoUjoUbzt1GKgz7qXvqFEwv2g,10273
+giturlparse-0.10.0.dist-info/METADATA,sha256=NDWxArULRXhAAu2KttDMuZu1k35HvJ1eJHEcWfeB8lI,3511
+giturlparse-0.10.0.dist-info/WHEEL,sha256=oh0NKYrTcu1i1-wgrI1cnhkjYIi8WJ-8qd9Jrr5_y4E,110
+giturlparse-0.10.0.dist-info/top_level.txt,sha256=NHfX7iaRAYz-bnROU6Q0tgNInQU-YgIeeii0uznxCLA,12
+giturlparse-0.10.0.dist-info/RECORD,,
diff --git a/third_party/python/giturlparse/giturlparse-0.10.0.dist-info/WHEEL b/third_party/python/giturlparse/giturlparse-0.10.0.dist-info/WHEEL
new file mode 100644
index 0000000000..1f227afa9f
--- /dev/null
+++ b/third_party/python/giturlparse/giturlparse-0.10.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.36.1)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/giturlparse/giturlparse-0.10.0.dist-info/top_level.txt b/third_party/python/giturlparse/giturlparse-0.10.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..d756422c23
--- /dev/null
+++ b/third_party/python/giturlparse/giturlparse-0.10.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+giturlparse
diff --git a/third_party/python/giturlparse/giturlparse/__init__.py b/third_party/python/giturlparse/giturlparse/__init__.py
new file mode 100644
index 0000000000..aee86e3750
--- /dev/null
+++ b/third_party/python/giturlparse/giturlparse/__init__.py
@@ -0,0 +1,14 @@
+from .parser import parse as _parse
+from .result import GitUrlParsed
+
+__author__ = "Iacopo Spalletti"
+__email__ = "i.spalletti@nephila.it"
+__version__ = "0.10.0"
+
+
+def parse(url, check_domain=True):
+ return GitUrlParsed(_parse(url, check_domain))
+
+
+def validate(url, check_domain=True):
+ return parse(url, check_domain).valid
diff --git a/third_party/python/giturlparse/giturlparse/parser.py b/third_party/python/giturlparse/giturlparse/parser.py
new file mode 100644
index 0000000000..c67f03500d
--- /dev/null
+++ b/third_party/python/giturlparse/giturlparse/parser.py
@@ -0,0 +1,69 @@
+from collections import defaultdict
+
+from .platforms import PLATFORMS
+
+SUPPORTED_ATTRIBUTES = (
+ "domain",
+ "repo",
+ "owner",
+ "path_raw",
+ "groups_path",
+ "_user",
+ "port",
+ "url",
+ "platform",
+ "protocol",
+)
+
+
+def parse(url, check_domain=True):
+ # Values are None by default
+ parsed_info = defaultdict(lambda: None)
+ parsed_info["port"] = ""
+ parsed_info["path_raw"] = ""
+ parsed_info["groups_path"] = ""
+
+ # Defaults to all attributes
+ map(parsed_info.setdefault, SUPPORTED_ATTRIBUTES)
+
+ for name, platform in PLATFORMS:
+ for protocol, regex in platform.COMPILED_PATTERNS.items():
+ # print(name, protocol, regex)
+ # Match current regex against URL
+ match = regex.match(url)
+
+ # Skip if not matched
+ if not match:
+ # print("[%s] URL: %s dit not match %s" % (name, url, regex.pattern))
+ continue
+
+ # Skip if domain is bad
+ domain = match.group("domain")
+ # print('[%s] DOMAIN = %s' % (url, domain,))
+ if check_domain:
+ if platform.DOMAINS and not (domain in platform.DOMAINS):
+ continue
+ if platform.SKIP_DOMAINS and domain in platform.SKIP_DOMAINS:
+ continue
+
+ # add in platform defaults
+ parsed_info.update(platform.DEFAULTS)
+
+ # Get matches as dictionary
+ matches = platform.clean_data(match.groupdict(default=""))
+
+ # Update info with matches
+ parsed_info.update(matches)
+
+ # Update info with platform info
+ parsed_info.update(
+ {
+ "url": url,
+ "platform": name,
+ "protocol": protocol,
+ }
+ )
+ return parsed_info
+
+ # Empty if none matched
+ return parsed_info
diff --git a/third_party/python/giturlparse/giturlparse/platforms/__init__.py b/third_party/python/giturlparse/giturlparse/platforms/__init__.py
new file mode 100644
index 0000000000..8add1b7a78
--- /dev/null
+++ b/third_party/python/giturlparse/giturlparse/platforms/__init__.py
@@ -0,0 +1,18 @@
+from .assembla import AssemblaPlatform
+from .base import BasePlatform
+from .bitbucket import BitbucketPlatform
+from .friendcode import FriendCodePlatform
+from .github import GitHubPlatform
+from .gitlab import GitLabPlatform
+
+# Supported platforms
+PLATFORMS = [
+ # name -> Platform object
+ ("github", GitHubPlatform()),
+ ("bitbucket", BitbucketPlatform()),
+ ("friendcode", FriendCodePlatform()),
+ ("assembla", AssemblaPlatform()),
+ ("gitlab", GitLabPlatform()),
+ # Match url
+ ("base", BasePlatform()),
+]
diff --git a/third_party/python/giturlparse/giturlparse/platforms/assembla.py b/third_party/python/giturlparse/giturlparse/platforms/assembla.py
new file mode 100644
index 0000000000..2624e85954
--- /dev/null
+++ b/third_party/python/giturlparse/giturlparse/platforms/assembla.py
@@ -0,0 +1,14 @@
+from .base import BasePlatform
+
+
+class AssemblaPlatform(BasePlatform):
+ DOMAINS = ("git.assembla.com",)
+ PATTERNS = {
+ "ssh": r"(?P<protocols>(git\+)?(?P<protocol>ssh))?(://)?git@(?P<domain>.+?):(?P<pathname>(?P<repo>.+)).git",
+ "git": r"(?P<protocols>(?P<protocol>git))://(?P<domain>.+?)/(?P<pathname>(?P<repo>.+)).git",
+ }
+ FORMATS = {
+ "ssh": r"git@%(domain)s:%(repo)s.git",
+ "git": r"git://%(domain)s/%(repo)s.git",
+ }
+ DEFAULTS = {"_user": "git"}
diff --git a/third_party/python/giturlparse/giturlparse/platforms/base.py b/third_party/python/giturlparse/giturlparse/platforms/base.py
new file mode 100644
index 0000000000..000726381d
--- /dev/null
+++ b/third_party/python/giturlparse/giturlparse/platforms/base.py
@@ -0,0 +1,43 @@
+import itertools
+import re
+
+
+class BasePlatform:
+ FORMATS = {
+ "ssh": r"(?P<protocols>(git\+)?(?P<protocol>ssh))?(://)?%(_user)s@%(host)s:%(repo)s.git",
+ "http": r"(?P<protocols>(git\+)?(?P<protocol>http))://%(host)s/%(repo)s.git",
+ "https": r"(?P<protocols>(git\+)?(?P<protocol>https))://%(host)s/%(repo)s.git",
+ "git": r"(?P<protocols>(?P<protocol>git))://%(host)s/%(repo)s.git",
+ }
+
+ PATTERNS = {
+ "ssh": r"(?P<_user>.+)@(?P<domain>[^/]+?):(?P<repo>.+).git",
+ "http": r"http://(?P<domain>[^/]+?)/(?P<repo>.+).git",
+ "https": r"https://(?P<domain>[^/]+?)/(?P<repo>.+).git",
+ "git": r"git://(?P<domain>[^/]+?)/(?P<repo>.+).git",
+ }
+
+ # None means it matches all domains
+ DOMAINS = None
+ SKIP_DOMAINS = None
+ DEFAULTS = {}
+
+ def __init__(self):
+ # Precompile PATTERNS
+ self.COMPILED_PATTERNS = {proto: re.compile(regex, re.IGNORECASE) for proto, regex in self.PATTERNS.items()}
+
+ # Supported protocols
+ self.PROTOCOLS = self.PATTERNS.keys()
+
+ if self.__class__ == BasePlatform:
+ sub = [subclass.SKIP_DOMAINS for subclass in self.__class__.__subclasses__() if subclass.SKIP_DOMAINS]
+ if sub:
+ self.SKIP_DOMAINS = list(itertools.chain.from_iterable(sub))
+
+ @staticmethod
+ def clean_data(data):
+ data["path"] = ""
+ data["branch"] = ""
+ data["protocols"] = list(filter(lambda x: x, data["protocols"].split("+")))
+ data["pathname"] = data["pathname"].strip(":")
+ return data
diff --git a/third_party/python/giturlparse/giturlparse/platforms/bitbucket.py b/third_party/python/giturlparse/giturlparse/platforms/bitbucket.py
new file mode 100644
index 0000000000..baab24466b
--- /dev/null
+++ b/third_party/python/giturlparse/giturlparse/platforms/bitbucket.py
@@ -0,0 +1,20 @@
+from .base import BasePlatform
+
+
+class BitbucketPlatform(BasePlatform):
+ PATTERNS = {
+ "https": (
+ r"(?P<protocols>(git\+)?(?P<protocol>https))://(?P<_user>.+)@(?P<domain>.+?)"
+ r"(?P<pathname>/(?P<owner>.+)/(?P<repo>.+?)(?:\.git)?)$"
+ ),
+ "ssh": (
+ r"(?P<protocols>(git\+)?(?P<protocol>ssh))?(://)?git@(?P<domain>.+?):"
+ r"(?P<pathname>(?P<owner>.+)/(?P<repo>.+?)(?:\.git)?)$"
+ ),
+ }
+ FORMATS = {
+ "https": r"https://%(owner)s@%(domain)s/%(owner)s/%(repo)s.git",
+ "ssh": r"git@%(domain)s:%(owner)s/%(repo)s.git",
+ }
+ DOMAINS = ("bitbucket.org",)
+ DEFAULTS = {"_user": "git"}
diff --git a/third_party/python/giturlparse/giturlparse/platforms/friendcode.py b/third_party/python/giturlparse/giturlparse/platforms/friendcode.py
new file mode 100644
index 0000000000..6de9f17eab
--- /dev/null
+++ b/third_party/python/giturlparse/giturlparse/platforms/friendcode.py
@@ -0,0 +1,14 @@
+from .base import BasePlatform
+
+
+class FriendCodePlatform(BasePlatform):
+ DOMAINS = ("friendco.de",)
+ PATTERNS = {
+ "https": (
+ r"(?P<protocols>(git\+)?(?P<protocol>https))://(?P<domain>.+?)/"
+ r"(?P<pathname>(?P<owner>.+)@user/(?P<repo>.+)).git"
+ ),
+ }
+ FORMATS = {
+ "https": r"https://%(domain)s/%(owner)s@user/%(repo)s.git",
+ }
diff --git a/third_party/python/giturlparse/giturlparse/platforms/github.py b/third_party/python/giturlparse/giturlparse/platforms/github.py
new file mode 100644
index 0000000000..8eb44ef513
--- /dev/null
+++ b/third_party/python/giturlparse/giturlparse/platforms/github.py
@@ -0,0 +1,39 @@
+from .base import BasePlatform
+
+
+class GitHubPlatform(BasePlatform):
+ PATTERNS = {
+ "https": (
+ r"(?P<protocols>(git\+)?(?P<protocol>https))://(?P<domain>[^/]+?)"
+ r"(?P<pathname>/(?P<owner>[^/]+?)/(?P<repo>[^/]+?)(?:\.git)?(?P<path_raw>(/blob/|/tree/).+)?)$"
+ ),
+ "ssh": (
+ r"(?P<protocols>(git\+)?(?P<protocol>ssh))?(://)?git@(?P<domain>.+?)(?P<pathname>(:|/)"
+ r"(?P<owner>[^/]+)/(?P<repo>[^/]+?)(?:\.git)"
+ r"(?P<path_raw>(/blob/|/tree/).+)?)$"
+ ),
+ "git": (
+ r"(?P<protocols>(?P<protocol>git))://(?P<domain>.+?)"
+ r"(?P<pathname>/(?P<owner>[^/]+)/(?P<repo>[^/]+?)(?:\.git)?"
+ r"(?P<path_raw>(/blob/|/tree/).+)?)$"
+ ),
+ }
+ FORMATS = {
+ "https": r"https://%(domain)s/%(owner)s/%(repo)s.git%(path_raw)s",
+ "ssh": r"git@%(domain)s:%(owner)s/%(repo)s.git%(path_raw)s",
+ "git": r"git://%(domain)s/%(owner)s/%(repo)s.git%(path_raw)s",
+ }
+ DOMAINS = (
+ "github.com",
+ "gist.github.com",
+ )
+ DEFAULTS = {"_user": "git"}
+
+ @staticmethod
+ def clean_data(data):
+ data = BasePlatform.clean_data(data)
+ if data["path_raw"].startswith("/blob/"):
+ data["path"] = data["path_raw"].replace("/blob/", "")
+ if data["path_raw"].startswith("/tree/"):
+ data["branch"] = data["path_raw"].replace("/tree/", "")
+ return data
diff --git a/third_party/python/giturlparse/giturlparse/platforms/gitlab.py b/third_party/python/giturlparse/giturlparse/platforms/gitlab.py
new file mode 100644
index 0000000000..38b37efb23
--- /dev/null
+++ b/third_party/python/giturlparse/giturlparse/platforms/gitlab.py
@@ -0,0 +1,43 @@
+from .base import BasePlatform
+
+
+class GitLabPlatform(BasePlatform):
+ PATTERNS = {
+ "https": (
+ r"(?P<protocols>(git\+)?(?P<protocol>https))://(?P<domain>.+?)(?P<port>:[0-9]+)?"
+ r"(?P<pathname>/(?P<owner>[^/]+?)/"
+ r"(?P<groups_path>.*?)?(?(groups_path)/)?(?P<repo>[^/]+?)(?:\.git)?"
+ r"(?P<path_raw>(/blob/|/-/tree/).+)?)$"
+ ),
+ "ssh": (
+ r"(?P<protocols>(git\+)?(?P<protocol>ssh))?(://)?git@(?P<domain>.+?):(?P<port>[0-9]+)?(?(port))?"
+ r"(?P<pathname>/?(?P<owner>[^/]+)/"
+ r"(?P<groups_path>.*?)?(?(groups_path)/)?(?P<repo>[^/]+?)(?:\.git)?"
+ r"(?P<path_raw>(/blob/|/-/tree/).+)?)$"
+ ),
+ "git": (
+ r"(?P<protocols>(?P<protocol>git))://(?P<domain>.+?):(?P<port>[0-9]+)?(?(port))?"
+ r"(?P<pathname>/?(?P<owner>[^/]+)/"
+ r"(?P<groups_path>.*?)?(?(groups_path)/)?(?P<repo>[^/]+?)(?:\.git)?"
+ r"(?P<path_raw>(/blob/|/-/tree/).+)?)$"
+ ),
+ }
+ FORMATS = {
+ "https": r"https://%(domain)s/%(owner)s/%(groups_slash)s%(repo)s.git%(path_raw)s",
+ "ssh": r"git@%(domain)s:%(port_slash)s%(owner)s/%(groups_slash)s%(repo)s.git%(path_raw)s",
+ "git": r"git://%(domain)s%(port)s/%(owner)s/%(groups_slash)s%(repo)s.git%(path_raw)s",
+ }
+ SKIP_DOMAINS = (
+ "github.com",
+ "gist.github.com",
+ )
+ DEFAULTS = {"_user": "git", "port": ""}
+
+ @staticmethod
+ def clean_data(data):
+ data = BasePlatform.clean_data(data)
+ if data["path_raw"].startswith("/blob/"):
+ data["path"] = data["path_raw"].replace("/blob/", "")
+ if data["path_raw"].startswith("/-/tree/"):
+ data["branch"] = data["path_raw"].replace("/-/tree/", "")
+ return data
diff --git a/third_party/python/giturlparse/giturlparse/result.py b/third_party/python/giturlparse/giturlparse/result.py
new file mode 100644
index 0000000000..4a33136c51
--- /dev/null
+++ b/third_party/python/giturlparse/giturlparse/result.py
@@ -0,0 +1,131 @@
+from copy import copy
+
+from .platforms import PLATFORMS
+
+# Possible values to extract from a Git Url
+REQUIRED_ATTRIBUTES = (
+ "domain",
+ "repo",
+)
+
+
+class GitUrlParsed:
+ platform = None
+
+ def __init__(self, parsed_info):
+ self._parsed = parsed_info
+
+ # Set parsed objects as attributes
+ for k, v in parsed_info.items():
+ setattr(self, k, v)
+
+ for name, platform in PLATFORMS:
+ if name == self.platform:
+ self._platform_obj = platform
+ break
+
+ def _valid_attrs(self):
+ return all([getattr(self, attr, None) for attr in REQUIRED_ATTRIBUTES]) # NOQA
+
+ @property
+ def valid(self):
+ return all(
+ [
+ self._valid_attrs(),
+ ]
+ )
+
+ ##
+ # Alias properties
+ ##
+ @property
+ def host(self):
+ return self.domain
+
+ @property
+ def resource(self):
+ return self.domain
+
+ @property
+ def name(self):
+ return self.repo
+
+ @property
+ def user(self):
+ if hasattr(self, "_user"):
+ return self._user
+
+ return self.owner
+
+ @property
+ def groups(self):
+ if self.groups_path:
+ return self.groups_path.split("/")
+ else:
+ return []
+
+ def format(self, protocol): # noqa : A0003
+ """Reformat URL to protocol."""
+ items = copy(self._parsed)
+ items["port_slash"] = "%s/" % self.port if self.port else ""
+ items["groups_slash"] = "%s/" % self.groups_path if self.groups_path else ""
+ return self._platform_obj.FORMATS[protocol] % items
+
+ @property
+ def normalized(self):
+ """Normalize URL."""
+ return self.format(self.protocol)
+
+ ##
+ # Rewriting
+ ##
+ @property
+ def url2ssh(self):
+ return self.format("ssh")
+
+ @property
+ def url2http(self):
+ return self.format("http")
+
+ @property
+ def url2https(self):
+ return self.format("https")
+
+ @property
+ def url2git(self):
+ return self.format("git")
+
+ # All supported Urls for a repo
+ @property
+ def urls(self):
+ return {protocol: self.format(protocol) for protocol in self._platform_obj.PROTOCOLS}
+
+ ##
+ # Platforms
+ ##
+ @property
+ def github(self):
+ return self.platform == "github"
+
+ @property
+ def bitbucket(self):
+ return self.platform == "bitbucket"
+
+ @property
+ def friendcode(self):
+ return self.platform == "friendcode"
+
+ @property
+ def assembla(self):
+ return self.platform == "assembla"
+
+ @property
+ def gitlab(self):
+ return self.platform == "gitlab"
+
+ ##
+ # Get data as dict
+ ##
+ @property
+ def data(self):
+ return dict(self._parsed)
diff --git a/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/AUTHORS.md b/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/AUTHORS.md
new file mode 100644
index 0000000000..525116ee7e
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/AUTHORS.md
@@ -0,0 +1,17 @@
+# Credits
+
+## Development Lead
+
+- Jan-Erik Rediger <jrediger@mozilla.com>
+- Alessio Placitelli <aplacitelli@mozilla.com>
+
+## Contributors
+
+See [the full list of contributors](https://github.com/mozilla/glean_parser/graphs/contributors).
+
+## Acknowledgements
+
+This package was created with
+[Cookiecutter](https://github.com/audreyr/cookiecutter) and the
+[audreyr/cookiecutter-pypackage](https://github.com/audreyr/cookiecutter-pypackage)
+project template.
diff --git a/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/LICENSE b/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/LICENSE
new file mode 100644
index 0000000000..a612ad9813
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/METADATA b/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/METADATA
new file mode 100644
index 0000000000..4f3b85647f
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/METADATA
@@ -0,0 +1,726 @@
+Metadata-Version: 2.1
+Name: glean-parser
+Version: 7.2.1
+Summary: Parser tools for Mozilla's Glean telemetry
+Home-page: https://github.com/mozilla/glean_parser
+Author: The Glean Team
+Author-email: glean-team@mozilla.com
+License: UNKNOWN
+Keywords: glean_parser
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Natural Language :: English
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Description-Content-Type: text/markdown
+Requires-Dist: appdirs (>=1.4)
+Requires-Dist: Click (>=7)
+Requires-Dist: diskcache (>=4)
+Requires-Dist: Jinja2 (>=2.10.1)
+Requires-Dist: MarkupSafe (<=2.0.1,>=1.1.1)
+Requires-Dist: jsonschema (>=3.0.2)
+Requires-Dist: PyYAML (>=5.3.1)
+Requires-Dist: iso8601 (>=0.1.10) ; python_version <= "3.6"
+
+# Glean Parser
+
+Parser tools for Mozilla's Glean telemetry.
+
+## Features
+
+Contains various utilities for handling `metrics.yaml` and `pings.yaml` for [the
+Glean SDKs](https://mozilla.github.io/glean). This includes producing generated
+code for various integrations, linting and coverage testing.
+
+## Documentation
+
+- [How to Contribute](https://github.com/mozilla/glean_parser/blob/main/CONTRIBUTING.md). Please file bugs in [bugzilla](https://bugzilla.mozilla.org/enter_bug.cgi?assigned_to=nobody%40mozilla.org&bug_ignored=0&bug_severity=normal&bug_status=NEW&cf_fission_milestone=---&cf_fx_iteration=---&cf_fx_points=---&cf_status_firefox65=---&cf_status_firefox66=---&cf_status_firefox67=---&cf_status_firefox_esr60=---&cf_status_thunderbird_esr60=---&cf_tracking_firefox65=---&cf_tracking_firefox66=---&cf_tracking_firefox67=---&cf_tracking_firefox_esr60=---&cf_tracking_firefox_relnote=---&cf_tracking_thunderbird_esr60=---&product=Data%20Platform%20and%20Tools&component=Glean%3A%20SDK&contenttypemethod=list&contenttypeselection=text%2Fplain&defined_groups=1&flag_type-203=X&flag_type-37=X&flag_type-41=X&flag_type-607=X&flag_type-721=X&flag_type-737=X&flag_type-787=X&flag_type-799=X&flag_type-800=X&flag_type-803=X&flag_type-835=X&flag_type-846=X&flag_type-855=X&flag_type-864=X&flag_type-916=X&flag_type-929=X&flag_type-930=X&flag_type-935=X&flag_type-936=X&flag_type-937=X&form_name=enter_bug&maketemplate=Remember%20values%20as%20bookmarkable%20template&op_sys=Unspecified&priority=P3&&rep_platform=Unspecified&status_whiteboard=%5Btelemetry%3Aglean-rs%3Am%3F%5D&target_milestone=---&version=unspecified).
+- [User documentation for Glean](https://mozilla.github.io/glean/).
+- [`glean_parser` developer documentation](https://mozilla.github.io/glean_parser/).
+
+## Requirements
+
+- Python 3.6 (or later)
+
+The following library requirements are installed automatically when
+`glean_parser` is installed by `pip`.
+
+- appdirs
+- Click
+- diskcache
+- Jinja2
+- jsonschema
+- PyYAML
+
+Additionally on Python 3.6:
+
+- iso8601
+
+## Usage
+
+```sh
+$ glean_parser --help
+```
+
+Read in `metrics.yaml`, translate to Kotlin format, and
+output to `output_dir`:
+
+```sh
+$ glean_parser translate -o output_dir -f kotlin metrics.yaml
+```
+
+Check a Glean ping against the ping schema:
+
+```sh
+$ glean_parser check < ping.json
+```
+
+
+# Changelog
+
+## Unreleased
+
+## 7.2.1
+
+- Unbreak last minor release ([#579](https://github.com/mozilla/glean_parser/pull/579))
+
+## 7.2.0
+
+- Remove yamllint integration ([#578](https://github.com/mozilla/glean_parser/pull/578))
+
+## 7.1.0
+
+- ENHANCEMENT: Labels in `labels:` fields may now contain any printable ASCII characters ([bug 1672273](https://bugzilla.mozilla.org/show_bug.cgi?id=1672273))
+- BUGFIX: Enforce ordering of generation of Pings, Metrics and Tags such that order is deterministic ([bug 1820334](https://bugzilla.mozilla.org/show_bug.cgi?id=1820334))
+
+## 7.0.0
+
+- BUGFIX: Remove internal-only fields from serialized metrics data ([#550](https://github.com/mozilla/glean_parser/pull/550))
+- FEATURE: New subcommand: `dump` to dump the metrics data as JSON ([#550](https://github.com/mozilla/glean_parser/pull/550))
+- BUGFIX: Kotlin: Generate enums with the right generic bound for ping reason codes ([#551](https://github.com/mozilla/glean_parser/pull/551)).
+- **BREAKING CHANGE:** Fully remove support for the old events API ([#549](https://github.com/mozilla/glean_parser/pull/549))
+ Adds a new lint `OLD_EVENT_API` to warn about missing `type` attributes on event extra keys.
+ Note that the Glean SDK already dropped support for the old events API.
+
+## 6.4.0
+
+- BUGFIX: Correct code generation for labeled metrics in Rust ([#533](https://github.com/mozilla/glean_parser/pull/533))
+- BUGFIX: Correctly serialize `Rates` for Rust code ([#530](https://github.com/mozilla/glean_parser/pull/530))
+- Feature: Wrap labeled metric's static labels list as CoW strings (requires updated Glean support) ([#534](https://github.com/mozilla/glean_parser/pull/534))
+
+## 6.3.0
+
+- events: Increase extras limit to 15 ([bug 1798713](https://bugzilla.mozilla.org/show_bug.cgi?id=1798713))
+
+## 6.2.1
+
+- Add support for Rate, Denominator and Numerator metrics for JavaScript. ([bug 1793777](https://bugzilla.mozilla.org/show_bug.cgi?id=1793777))
+
+## 6.2.0
+
+- [data-review] Use a template to generate the Data Review Request template ([bug 1772605](https://bugzilla.mozilla.org/show_bug.cgi?id=1772605))
+- Make tag and no\_lint order deterministic ([#518](https://github.com/mozilla/glean_parser/pull/518))
+
+## 6.1.2
+
+- Swift: Add a conditional `import Foundation` to support generating metrics when Glean is delivered via the AppServices iOS megazord
+
+## 6.1.1
+
+- Rust: Use correct name for a ping in generated code.
+
+## 6.1.0
+
+- [data-review] Include extra keys' names and descriptions in data review template ([bug 1767027](https://bugzilla.mozilla.org/show_bug.cgi?id=1767027))
+- Raise limit on number of statically-defined labels to 4096. ([bug 1772163](https://bugzilla.mozilla.org/show_bug.cgi?id=1772163))
+- Fix Rust code generation for new UniFFI interface ([#491](https://github.com/mozilla/glean_parser/pull/491), [#494](https://github.com/mozilla/glean_parser/pull/494), [#495](https://github.com/mozilla/glean_parser/pull/495))
+
+## 6.0.1
+
+- Relax version requirement for MarkupSafe.
+ Now works with MarkupSafe v1.1.1 to v2.0.1 inclusive again.
+
+## 6.0.0
+
+- BUGFIX: Add missing `extra_args` to Rust constructor generation ([bug 1765855](https://bugzilla.mozilla.org/show_bug.cgi?id=1765855))
+- **Breaking change:** `glean_parser` now generates metrics compatible with the UniFFI-powered Glean SDK.
+ This is not backwards-compatible with previous versions.
+- Generate Rate, Denominator and Numerator metrics for Kotlin and Swift
+- Explicitly skip Rate, Denominator and Numerator metrics for JavaScript.
+ These will cause a build failure by default, but can be turned into warnings on request.
+ Use `-s fail_rates=false` to enable warning-only mode.
+
+## 5.1.2
+
+- BUGFIX: Revert changes made on v5.1.1.
+ - The issues addressed by those changes, were non-issues and result of misuse of the APIs.
+
+## 5.1.1
+
+- BUGFIX: Fix issues with Swift templates ([bug 1749494](https://bugzilla.mozilla.org/show_bug.cgi?id=1749494))
+ - Make metrics and pings all `public`
+ - Make pings `static`
+
+## 5.1.0
+
+- Add support for build info generation for JavaScript and Typescript targets ([bug 1749494](https://bugzilla.mozilla.org/show_bug.cgi?id=1749494))
+
+## 5.0.1
+
+- Fix the logic for the metric expiration by version ([bug 1753194](https://bugzilla.mozilla.org/show_bug.cgi?id=1753194))
+
+## 5.0.0
+
+- Remove C# support ([#436](https://github.com/mozilla/glean_parser/pull/436)).
+- Add support for Rust code generation ([bug 1677434](https://bugzilla.mozilla.org/show_bug.cgi?id=1677434))
+- Report an error if no files are passed ([bug 1751730](https://bugzilla.mozilla.org/show_bug.cgi?id=1751730))
+- [data-review] Report an error if no metrics match provided bug number ([bug 1752576](https://bugzilla.mozilla.org/show_bug.cgi?id=1752576))
+- [data-review] Include notification_emails in list of those responsible ([bug 1752576](https://bugzilla.mozilla.org/show_bug.cgi?id=1752576))
+- Add support for expiring metrics by the provided major version ([bug 1753194](https://bugzilla.mozilla.org/show_bug.cgi?id=1753194))
+
+## 4.4.0
+
+- Support global file-level tags in metrics.yaml ([bug 1745283](https://bugzilla.mozilla.org/show_bug.cgi?id=1745283))
+- Glinter: Reject metric files if they use `unit` by mistake. It should be `time_unit` ([#432](https://github.com/mozilla/glean_parser/pull/432)).
+- Automatically generate a build date when generating build info ([#431](https://github.com/mozilla/glean_parser/pull/431)).
+ Enabled for Kotlin and Swift.
+ This can be changed with the `build_date` command line option.
+ `build_date=0` will use a static unix epoch time.
+ `build_date=2022-01-03T17:30:00` will parse the ISO8601 string to use (as a UTC timestamp).
+ Other values will throw an error.
+
+ Example:
+
+ glean_parser translate --format kotlin --option build_date=2021-11-01T01:00:00 path/to/metrics.yaml
+
+## 4.3.1
+
+- BUGFIX: Skip tags for code generation ([#409](https://github.com/mozilla/glean_parser/pull/409))
+
+## 4.3.0
+
+- Support tags in glean parser ([bug 1734011](https://bugzilla.mozilla.org/show_bug.cgi?id=1734011))
+
+## 4.2.0
+
+- Improve the schema validation error messages. They will no longer include `OrderedDict(...)` on Python 3.7 and later ([bug 1733395](https://bugzilla.mozilla.org/show_bug.cgi?id=1733395))
+- Officially support Python 3.10
+
+## 4.1.1 (2021-09-28)
+
+- Update private import paths on Javascript / Typescript templates. ([bug 1702468](https://bugzilla.mozilla.org/show_bug.cgi?id=1702468))
+
+## 4.1.0 (2021-09-16)
+
+- Add support for Node.js platform on Javascript / Typescript templates. ([bug 1728982](https://bugzilla.mozilla.org/show_bug.cgi?id=1728982))
+
+## 4.0.0 (2021-08-20)
+
+- Add support for Text metric type ([#374](https://github.com/mozilla/glean_parser/pull/374))
+- Reserve the `default` ping name. It can't be used as a ping name, but it can be used in `send_in_pings` ([#376](https://github.com/mozilla/glean_parser/pull/376))
+
+## 3.8.0 (2021-08-18)
+
+- Expose ping reasons enum on JavaScript / TypeScript templates. ([bug 1719136](https://bugzilla.mozilla.org/show_bug.cgi?id=1719136))
+- Define an interface with the allowed extras for each event on the TypeScript template. ([bug 1693487](https://bugzilla.mozilla.org/show_bug.cgi?id=1693487))
+
+## 3.7.0 (2021-07-13)
+
+- New lint: Check for redundant words in ping names ([#355](https://github.com/mozilla/glean_parser/pull/355))
+- Add support for URL metric type ([#361](https://github.com/mozilla/glean_parser/pull/361))
+
+## 3.6.0 (2021-06-11)
+
+- Add a command `data-review` to generate a skeleton Data Review Request for all metrics matching a supplied bug number. ([bug 1704541](https://bugzilla.mozilla.org/show_bug.cgi?id=1704541))
+- Enable custom distribution outside of GeckoView (`gecko_datapoint` becomes optional)
+
+## 3.5.0 (2021-06-03)
+
+- Transform generated folder into QML Module when building Javascript templates for the Qt platform. ([bug 1707896](https://bugzilla.mozilla.org/show_bug.cgi?id=1707896)
+ - Import the Glean QML module from inside each generated file, removing the requirement to import Glean before importing any of the generated files;
+ - Prodive a `qmldir` file exposing all generated files;
+ - Drop the `namespace` option for Javascript templates;
+ - Add a new `version` option for Javascript templates, required when building for Qt, which expected the Glean QML module version.
+
+## 3.4.0 (2021-05-28)
+
+- Add missing import for Kotlin code ([#339](https://github.com/mozilla/glean_parser/pull/339))
+- Use a plain Kotlin type in the generated interface implementation ([#339](https://github.com/mozilla/glean_parser/pull/339))
+- Generate additional generics for event metrics ([#339](https://github.com/mozilla/glean_parser/pull/339))
+- For Kotlin skip generating `GleanBuildInfo.kt` when requested (with `with_buildinfo=false`) ([#341](https://github.com/mozilla/glean_parser/pull/341))
+
+## 3.3.2 (2021-05-18)
+
+- Fix another bug in the Swift code generation when generating extra keys ([#334](https://github.com/mozilla/glean_parser/pull/334))
+
+## 3.3.1 (2021-05-18)
+
+- Fix Swift code generation bug for pings ([#333](https://github.com/mozilla/glean_parser/pull/333))
+
+## 3.3.0 (2021-05-18)
+
+- Generate new event API construct ([#321](https://github.com/mozilla/glean_parser/pull/321))
+
+## 3.2.0 (2021-04-28)
+
+- Add option to add extra introductory text to generated markdown ([#298](https://github.com/mozilla/glean_parser/pull/298))
+- Add support for Qt in Javascript templates ([bug 1706252](https://bugzilla.mozilla.org/show_bug.cgi?id=1706252))
+ - Javascript templates will now accept the `platform` option. If this option is set to `qt`
+ the generated templates will be Qt compatible. Default value is `webext`.
+
+## 3.1.2 (2021-04-21)
+
+- BUGFIX: Remove the "DO NOT COMMIT" notice from the documentation.
+
+## 3.1.1 (2021-04-19)
+
+- Recommend to not commit as well as to not edit the generated files. ([bug 1706042](https://bugzilla.mozilla.org/show_bug.cgi?id=1706042))
+- BUGFIX: Include import statement for labeled metric subtypes in Javascript and Typescript templates.
+
+## 3.1.0 (2021-04-16)
+
+- Add support for labeled metric types in Javascript and Typescript templates.
+
+## 3.0.0 (2021-04-13)
+
+- Raise limit on number of statically-defined lables to 100. ([bug 1702263](https://bugzilla.mozilla.org/show_bug.cgi?id=1702263))
+- BUGFIX: Version 2.0.0 of the schema now allows the "special" `glean_.*` ping names for Glean-internal use again.
+- Remove support for JWE metric types.
+
+## 2.5.0 (2021-02-23)
+
+- Add parser and object model support for `rate` metric type. ([bug 1645166](https://bugzilla.mozilla.org/show_bug.cgi?id=1645166))
+- Add parser and object model support for telemetry_mirror property. ([bug 1685406](https://bugzilla.mozilla.org/show_bug.cgi?id=1685406))
+- Update the Javascript template to match Glean.js expectations. ([bug 1693516](https://bugzilla.mozilla.org/show_bug.cgi?id=1693516))
+ - Glean.js has updated it's export strategy. It will now export each metric type as an independent module;
+ - Glean.js has dropped support for non ES6 modules.
+- Add support for generating Typescript code. ([bug 1692157](https://bugzilla.mozilla.org/show_bug.cgi?id=1692157))
+ - The templates added generate metrics and pings code for Glean.js.
+
+## 2.4.0 (2021-02-18)
+
+- **Experimental:** `glean_parser` has a new subcommand `coverage` to convert raw coverage reports
+ into something consumable by coverage tools, such as codecov.io
+- The path to the file that each metric is defined in is now stored on the
+ `Metric` object in `defined_in["filepath"]`.
+
+## 2.3.0 (2021-02-17)
+
+- Leverage the `glean_namespace` to provide correct import when building for Javascript.
+
+## 2.2.0 (2021-02-11)
+
+- The Kotlin generator now generates static build information that can be passed
+ into `Glean.initialize` to avoid calling the package manager at runtime.
+
+## 2.1.0 (2021-02-10)
+
+- Add support for generating Javascript code.
+ - The templates added generate metrics and pings code for Glean.js.
+
+## 2.0.0 (2021-02-05)
+
+- New versions 2.0.0 of the `metrics.yaml` and `pings.yaml` schemas now ship
+ with `glean_parser`. These schemas are different from version 1.0.0 in the
+ following ways:
+
+ - Bugs must be specified as URLs. Bug numbers are disallowed.
+ - The legacy ping names containing underscores are no longer allowed. These
+ included `deletion_request`, `bookmarks_sync`, `history_sync`,
+ `session_end`, `all_pings`, `glean_*`). In these cases, the `_` should be
+ replaced with `-`.
+
+ To upgrade your app or library to use the new schema, replace the version in
+ the `$schema` value with `2-0-0`.
+
+- **Breaking change:** It is now an error to use bug numbers (rather than URLs)
+ in ping definitions.
+
+- Add the line number that metrics and pings were originally defined in the yaml
+ files.
+
+## 1.29.1 (2020-12-17)
+
+- BUGFIX: Linter output can now be redirected correctly (1675771).
+
+## 1.29.0 (2020-10-07)
+
+- **Breaking change:** `glean_parser` will now return an error code when any of
+ the input files do not exist (unless the `--allow-missing-files` flag is
+ passed).
+- Generated code now includes a comment next to each metric containing the name
+ of the metric in its original `snake_case` form.
+- When metrics don't provide a `unit` parameter, it is not included in the
+ output (as provided by probe-scraper).
+
+## 1.28.6 (2020-09-24)
+
+- BUGFIX: Ensure Kotlin arguments are deterministically ordered
+
+## 1.28.5 (2020-09-14)
+
+- Fix deploy step to update pip before deploying to pypi.
+
+## 1.28.4 (2020-09-14)
+
+- The `SUPERFLUOUS_NO_LINT` warning has been removed from the glinter.
+ It likely did more harm than good, and makes it hard to make
+ `metrics.yaml` files that pass across different versions of
+ `glean_parser`.
+- Expired metrics will now produce a linter warning, `EXPIRED_METRIC`.
+- Expiry dates that are more than 730 days (\~2 years) in the future
+ will produce a linter warning, `EXPIRATION_DATE_TOO_FAR`.
+- Allow using the Quantity metric type outside of Gecko.
+- New parser configs `custom_is_expired` and `custom_validate_expires`
+ added. These are both functions that take the `expires` value of the
+ metric and return a bool. (See `Metric.is_expired` and
+ `Metric.validate_expires`). These will allow FOG to provide custom
+ validation for its version-based `expires` values.
+
+## 1.28.3 (2020-07-28)
+
+- BUGFIX: Support HashSet and Dictionary in the C\## generated code.
+
+## 1.28.2 (2020-07-28)
+
+- BUGFIX: Generate valid C\## code when using Labeled metric types.
+
+## 1.28.1 (2020-07-24)
+
+- BUGFIX: Add missing column to correctly render markdown tables in generated
+ documentation.
+
+## 1.28.0 (2020-07-23)
+
+- **Breaking change:** The internal ping `deletion-request` was misnamed in
+ pings.py causing the linter to not allow use of the correctly named ping for
+ adding legacy ids to. Consuming apps will need to update their metrics.yaml if
+ they are using `deletion_request` in any `send_in_pings` to `deletion-request`
+ after updating.
+
+## 1.27.0 (2020-07-21)
+
+- Rename the `data_category` field to `data_sensitivity` to be clearer.
+
+## 1.26.0 (2020-07-21)
+
+- Add support for JWE metric types.
+- Add a `data_sensitivity` field to all metrics for specifying the type of data
+ collected in the field.
+
+## 1.25.0 (2020-07-17)
+
+- Add support for generating C\## code.
+- BUGFIX: The memory unit is now correctly passed to the MemoryDistribution
+ metric type in Swift.
+
+## 1.24.0 (2020-06-30)
+
+- BUGFIX: look for metrics in send\_if\_empty pings. Metrics for these kinds of
+ pings were being ignored.
+
+## 1.23.0 (2020-06-27)
+
+- Support for Python 3.5 has been dropped.
+- BUGFIX: The ordering of event extra keys will now match with their enum,
+ fixing a serious bug where keys of extras may not match the correct values in
+ the data payload. See <https://bugzilla.mozilla.org/show_bug.cgi?id=1648768>.
+
+## 1.22.0 (2020-05-28)
+
+- **Breaking change:** (Swift only) Combine all metrics and pings into a single
+ generated file `Metrics.swift`.
+
+## 1.21.0 (2020-05-25)
+
+- `glinter` messages have been improved with more details and to be more
+ actionable.
+- A maximum of 10 `extra_keys` is now enforced for `event` metric types.
+- BUGFIX: the `Lifetime` enum values now match the values of the implementation
+ in mozilla/glean.
+
+## 1.20.4 (2020-05-07)
+
+- BUGFIX: yamllint errors are now reported using the correct file name.
+
+## 1.20.3 (2020-05-06)
+
+- Support for using `timing_distribution`'s `time_unit` parameter to control
+ the range of acceptable values is documented. The default unit for this use
+ case is `nanosecond` to avoid creating a breaking change. See [bug
+ 1630997](https://bugzilla.mozilla.org/show_bug.cgi?id=1630997) for more
+ information.
+
+## 1.20.2 (2020-04-24)
+
+- Dependencies that depend on the version of Python being used are now specified
+ using the [Declaring platform specific dependencies syntax in
+ setuptools](https://setuptools.readthedocs.io/en/latest/setuptools.html##declaring-platform-specific-dependencies).
+ This means that more recent versions of dependencies are likely to be
+ installed on Python 3.6 and later, and unnecessary backport libraries won't
+ be installed on more recent Python versions.
+
+## 1.20.1 (2020-04-21)
+
+- The minimum version of the runtime dependencies has been lowered to increase
+ compatibility with other tools. These minimum versions are now tested in CI,
+ in addition to testing the latest versions of the dependencies that was
+ already happening in CI.
+
+## 1.20.0 (2020-04-15)
+
+- **Breaking change:** glinter errors found during the `translate` command will
+ now return an error code. glinter warnings will be displayed, but not return
+ an error code.
+- `glean_parser` now produces a linter warning when `user` lifetime metrics are
+ set to expire. See [bug
+ 1604854](https://bugzilla.mozilla.org/show_bug.cgi?id=1604854) for additional
+ context.
+
+## 1.19.0 (2020-03-18)
+
+- **Breaking change:** The regular expression used to validate labels is
+ stricter and more correct.
+- Add more information about pings to markdown documentation:
+ - State whether the ping includes client id;
+ - Add list of data review links;
+ - Add list of related bugs links.
+- `glean_parser` now makes it easier to write external translation
+ functions for different language targets.
+- BUGFIX: `glean_parser` now works on 32-bit Windows.
+
+## 1.18.3 (2020-02-24)
+
+- Dropped the `inflection` dependency.
+- Constrained the `zipp` and `MarkupSafe` transitive dependencies to versions
+ that support Python 3.5.
+
+## 1.18.2 (2020-02-14)
+
+- BUGFIX: Fix rendering of first element of reason list.
+
+## 1.18.1 (2020-02-14)
+
+- BUGFIX: Reason codes are displayed in markdown output for built-in
+ pings as well.
+- BUGFIX: Reason descriptions are indented correctly in markdown
+ output.
+- BUGFIX: To avoid a compiler error, the `@JvmName` annotation isn't
+ added to private members.
+
+## 1.18.0 (2020-02-13)
+
+- **Breaking Change (Java API)** Have the metrics names in Java match the names
+ in Kotlin. See [Bug
+ 1588060](https://bugzilla.mozilla.org/show_bug.cgi?id=1588060).
+- The reasons a ping are sent are now included in the generated markdown
+ documentation.
+
+## 1.17.3 (2020-02-05)
+
+- BUGFIX: The version of Jinja2 now specifies < 3.0, since that version no
+ longer supports Python 3.5.
+
+## 1.17.2 (2020-02-05)
+
+- BUGFIX: Fixes an import error in generated Kotlin code.
+
+## 1.17.1 (2020-02-05)
+
+- BUGFIX: Generated Swift code now includes `import Glean`, unless generating
+ for a Glean-internal build.
+
+## 1.17.0 (2020-02-03)
+
+- Remove default schema URL from `validate_ping`
+- Make `schema` argument required for CLI
+- BUGFIX: Avoid default import in Swift code for Glean itself
+- BUGFIX: Restore order of fields in generated Swift code
+
+## 1.16.0 (2020-01-15)
+
+- Support for `reason` codes on pings was added.
+
+## 1.15.6 (2020-02-06)
+
+- BUGFIX: The version of Jinja2 now specifies < 3.0, since that version no
+ longer supports Python 3.5 (backported from 1.17.3).
+
+## 1.15.5 (2019-12-19)
+
+- BUGFIX: Also allow the legacy name `all_pings` for `send_in_pings` parameter
+ on metrics
+
+## 1.15.4 (2019-12-19)
+
+- BUGFIX: Also allow the legacy name `all_pings`
+
+## 1.15.3 (2019-12-13)
+
+- Add project title to markdown template.
+- Remove "Sorry about that" from markdown template.
+- BUGFIX: Replace dashes in variable names to force proper naming
+
+## 1.15.2 (2019-12-12)
+
+- BUGFIX: Use a pure Python library for iso8601 so there is no compilation
+ required.
+
+## 1.15.1 (2019-12-12)
+
+- BUGFIX: Add some additional ping names to the non-kebab-case allow list.
+
+## 1.15.0 (2019-12-12)
+
+- Restrict new pings names to be kebab-case and change `all_pings` to
+ `all-pings`
+
+## 1.14.0 (2019-12-06)
+
+- `glean_parser` now supports Python versions 3.5, 3.6, 3.7 and 3.8.
+
+## 1.13.0 (2019-12-04)
+
+- The `translate` command will no longer clear extra files in the output
+ directory.
+- BUGFIX: Ensure all newlines in comments are prefixed with comment markers
+- BUGFIX: Escape Swift keywords in variable names in generated code
+- Generate documentation for pings that are sent if empty
+
+## 1.12.0 (2019-11-27)
+
+- Reserve the `deletion_request` ping name
+- Added a new flag `send_if_empty` for pings
+
+## 1.11.0 (2019-11-13)
+
+- The `glinter` command now performs `yamllint` validation on registry files.
+
+## 1.10.0 (2019-11-11)
+
+- The Kotlin linter `detekt` is now run during CI, and for local
+ testing if installed.
+- Python 3.8 is now tested in CI (in addition to Python 3.7). Using
+ `tox` for this doesn't work in modern versions of CircleCI, so the
+ `tox` configuration has been removed.
+- `yamllint` has been added to test the YAML files on CI.
+- ⚠ Metric types that don't yet have implementations in glean-core
+ have been removed. This includes `enumeration`, `rate`, `usage`, and
+ `use_counter`, as well as many labeled metrics that don't exist.
+
+## 1.9.5 (2019-10-22)
+
+- Allow a Swift lint for generated code
+- New lint: Restrict what metric can go into the `baseline` ping
+- New lint: Warn for slight misspellings in ping names
+- BUGFIX: change Labeled types labels from lists to sets.
+
+## 1.9.4 (2019-10-16)
+
+- Use lists instead of sets in Labeled types labels to ensure that the order of
+ the labels passed to the `metrics.yaml` is kept.
+- `glinter` will now check for duplicate labels and error if there are any.
+
+## 1.9.3 (2019-10-09)
+
+- Add labels from Labeled types to the Extra column in the Markdown template.
+
+## 1.9.2 (2019-10-08)
+
+- BUGFIX: Don't call `is_internal_metric` on `Ping` objects.
+
+## 1.9.1 (2019-10-07)
+
+- Don't include Glean internal metrics in the generated markdown.
+
+## 1.9.0 (2019-10-04)
+
+- Glinter now warns when bug numbers (rather than URLs) are used.
+- BUGFIX: add `HistogramType` and `MemoryUnit` imports in Kotlin generated code.
+
+## 1.8.4 (2019-10-02)
+
+- Removed unsupported labeled metric types.
+
+## 1.8.3 (2019-10-02)
+
+- Fix indentation for generated Swift code
+
+## 1.8.2 (2019-10-01)
+
+- Created labeled metrics and events in Swift code and wrap it in a
+ configured namespace
+
+## 1.8.1 (2019-09-27)
+
+- BUGFIX: `memory_unit` is now passed to the Kotlin generator.
+
+## 1.8.0 (2019-09-26)
+
+- A new parser config, `do_not_disable_expired`, was added to turn off the
+ feature that expired metrics are automatically disabled. This is useful if you
+ want to retain the disabled value that is explicitly in the `metrics.yaml`
+ file.
+- `glinter` will now report about superfluous `no_lint` entries.
+
+## 1.7.0 (2019-09-24)
+
+- A `glinter` tool is now included to find common mistakes in metric naming
+ and setup. This check is run during `translate` and warnings will be
+ displayed. ⚠ These warnings will be treated as errors in a future revision.
+
+## 1.6.1 (2019-09-17)
+
+- BUGFIX: `GleanGeckoMetricsMapping` must include `LabeledMetricType`
+ and `CounterMetricType`.
+
+## 1.6.0 (2019-09-17)
+
+- NEW: Support for outputting metrics in Swift.
+- BUGFIX: Provides a helpful error message when `geckoview_datapoint` is used on
+ an metric type that doesn't support GeckoView exfiltration.
+- Generate a lookup table for Gecko categorical histograms in
+ `GleanGeckoMetricsMapping`.
+- Introduce a 'Swift' output generator.
+
+## 1.4.1 (2019-08-28)
+
+- Documentation only.
+
+## 1.4.0 (2019-08-27)
+
+- Added support for generating markdown documentation from `metrics.yaml` files.
+
+## 1.3.0 (2019-08-22)
+
+- `quantity` metric type has been added.
+
+## 1.2.1 (2019-08-13)
+
+- BUGFIX: `includeClientId` was not being output for PingType.
+
+## 1.2.0 (2019-08-13)
+
+- `memory_distribution` metric type has been added.
+- `custom_distribution` metric type has been added.
+- `labeled_timespan` is no longer an allowed metric type.
+
+## 1.1.0 (2019-08-05)
+
+- Add a special `all_pings` value to `send_in_pings`.
+
+## 1.0.0 (2019-07-29)
+
+- First release to start following strict semver.
+
+## 0.1.0 (2018-10-15)
+
+- First release on PyPI.
+
+
diff --git a/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/RECORD b/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/RECORD
new file mode 100644
index 0000000000..f86c17c1d8
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/RECORD
@@ -0,0 +1,40 @@
+glean_parser/__init__.py,sha256=bJljD052_0y-efcBhYpllICVCXOMHLcXRLNyrvfgt5A,533
+glean_parser/__main__.py,sha256=7kIBMO-kL7boJxYrKp3CkRr4xX4_ct4BqCiCvtg2jjU,8631
+glean_parser/coverage.py,sha256=2IwC4XMDtDamMkBFoYilmqJzW4gyypq65YVCur8SNas,4405
+glean_parser/data_review.py,sha256=BweeeTkNNS6HrIDkztawhbDByrk_-Avxpg7YeST3VAs,2152
+glean_parser/javascript.py,sha256=w4ZhNBHBKWYk0h3t7G0Ud2tR__hRqzn9dlEXNKLdQrA,11230
+glean_parser/kotlin.py,sha256=5z8_74xlqvHDsedwZhGf1_qb7swPEgIZumkJIuj3ef8,12598
+glean_parser/lint.py,sha256=A21ZKb9WSrgug6t8q1YHvXUxlB198xrkmZ26HCUDSlE,16303
+glean_parser/markdown.py,sha256=GkCr1CrV6mnRQseT6FO1-JJ7Eup8X3lxUfRMBTxXpe4,9066
+glean_parser/metrics.py,sha256=CSad9CbUWKF771Z82LhBAFAL9uXum0ycRwIHtl_i91E,12384
+glean_parser/parser.py,sha256=cUOnvSXKfEBg8YTpRcWiPcMwpFpK1TTqsVO_zjUtpR4,15309
+glean_parser/pings.py,sha256=yh_DzRAI9k2_NiCIlpQiNg-ggVrttB4hk7gwtKlr72s,2815
+glean_parser/rust.py,sha256=PJzTfYWzAumJYCP5IYPc6fhS_Qa30Q8NTK9plg3sDnk,6744
+glean_parser/swift.py,sha256=T1BSGahd9wUd6VDeNC89SdN6M34jKXDlydMpSI0QLOs,8379
+glean_parser/tags.py,sha256=bemKYvcbMO4JrghiNSe-A4BNNDtx_FlUPkgrPPJy84Y,1391
+glean_parser/translate.py,sha256=S_a4PMXt3PyD7Wg35OM4xHEwPraqkcJzm_w95IEegPU,7962
+glean_parser/translation_options.py,sha256=Lxzr6G7MP0tC_ZYlZXftS4j0SLiqO-5mGVTEc7ggXis,2037
+glean_parser/util.py,sha256=Hei33QDq4a_lIHp5j98KovN6C7tmLrvVamEX2a1DcTo,16825
+glean_parser/validate_ping.py,sha256=0TNvILH6dtzJDys3W8Kqorw6kk03me73OCUDtpoHcXU,2118
+glean_parser/schemas/metrics.1-0-0.schema.yaml,sha256=cND3cvi6iBfPUVmtfIBQfGJV9AALpbvN7nu8E33_J-o,19566
+glean_parser/schemas/metrics.2-0-0.schema.yaml,sha256=SOgqMzRs9QxyCBhjZwUhzlryeNLeaVAKMTwggG7XtQk,23843
+glean_parser/schemas/pings.1-0-0.schema.yaml,sha256=hwCnsKpEysmrmVp-QHGBArEkVY3vaU1rVsxlTwhAzws,4315
+glean_parser/schemas/pings.2-0-0.schema.yaml,sha256=rD1s-rfz1xC9biHyLfBCnsoQxVYHwpe_S05awfe2xDA,4363
+glean_parser/schemas/tags.1-0-0.schema.yaml,sha256=OGXIJlvvVW1vaqB_NVZnwKeZ-sLlfH57vjBSHbj6DNI,1231
+glean_parser/templates/data_review.jinja2,sha256=jeYU29T1zLSyu9fKBBFu5BFPfIw8_hmOUXw8RXhRXK8,3287
+glean_parser/templates/javascript.buildinfo.jinja2,sha256=4mXiZCQIk9if4lxlA05kpSIL4a95IdwGwqle2OqqNAs,474
+glean_parser/templates/javascript.jinja2,sha256=cT_bG-jC6m4afECXmcsqHwiiHjRuVtJnfv90OD2Mwxw,2669
+glean_parser/templates/kotlin.buildinfo.jinja2,sha256=X0lk2SNu5OIIj2i6mUyF9CWFQIonLgfqkgT5fA-5G6c,920
+glean_parser/templates/kotlin.geckoview.jinja2,sha256=MJOgtoDXmBjE9pwk-G6T89y36RZuMbDWM_-DBN_gFJo,5099
+glean_parser/templates/kotlin.jinja2,sha256=3DqUMXJRkmTvSp_5IRyvGmw5iXYWdox7coMFe3YDxcc,5247
+glean_parser/templates/markdown.jinja2,sha256=vAHHGGm28HRDPd3zO_wQMAUZIuxE9uQ7hl3NpXxcKV4,3425
+glean_parser/templates/qmldir.jinja2,sha256=m6IGsp-tgTiOfQ7VN8XW6GqX0gJqJkt3B6Pkaul6FVo,156
+glean_parser/templates/rust.jinja2,sha256=tznLKaZxi_Z9puGqDKD0uuWefZcVHiNdQHB4BP9zJfs,10797
+glean_parser/templates/swift.jinja2,sha256=OsaEIlEdcOrUMvI_UzbxWv75lluTAWZGncH_pU-pbZQ,4809
+glean_parser-7.2.1.dist-info/AUTHORS.md,sha256=yxgj8MioO4wUnrh0gmfb8l3DJJrf-l4HmmEDbQsbbNI,455
+glean_parser-7.2.1.dist-info/LICENSE,sha256=HyVuytGSiAUQ6ErWBHTqt1iSGHhLmlC8fO7jTCuR8dU,16725
+glean_parser-7.2.1.dist-info/METADATA,sha256=6ZY8M4qK01Cz54nm4d9tOii3CBbW9lSaUSpHwUnm9JA,28275
+glean_parser-7.2.1.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
+glean_parser-7.2.1.dist-info/entry_points.txt,sha256=s-clJTIqp-PpJD-n3AnIQZFkTafIrzsTbAPX9vNY018,69
+glean_parser-7.2.1.dist-info/top_level.txt,sha256=q7T3duD-9tYZFyDry6Wv2LcdMsK2jGnzdDFhxWcT2Z8,13
+glean_parser-7.2.1.dist-info/RECORD,,
diff --git a/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/WHEEL b/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/WHEEL
new file mode 100644
index 0000000000..1f37c02f2e
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.40.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/entry_points.txt b/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/entry_points.txt
new file mode 100644
index 0000000000..2a22ca7321
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+glean_parser = glean_parser.__main__:main_wrapper
+
diff --git a/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/top_level.txt b/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/top_level.txt
new file mode 100644
index 0000000000..a7f3a37918
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser-7.2.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+glean_parser
diff --git a/third_party/python/glean_parser/glean_parser/__init__.py b/third_party/python/glean_parser/glean_parser/__init__.py
new file mode 100644
index 0000000000..ddca930c79
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/__init__.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""Top-level package for Glean parser."""
+
+from pkg_resources import get_distribution, DistributionNotFound
+
+try:
+ __version__ = get_distribution(__name__).version
+except DistributionNotFound:
+ # package is not installed
+ pass
+
+__author__ = """The Glean Team"""
+__email__ = "glean-team@mozilla.com"
diff --git a/third_party/python/glean_parser/glean_parser/__main__.py b/third_party/python/glean_parser/glean_parser/__main__.py
new file mode 100644
index 0000000000..24876a1439
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/__main__.py
@@ -0,0 +1,349 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""Console script for glean_parser."""
+
+import datetime
+import io
+from pathlib import Path
+import sys
+
+import click
+import json
+
+
+import glean_parser
+
+
+from . import coverage as mod_coverage
+from . import data_review as mod_data_review
+from . import lint
+from . import translate as mod_translate
+from . import validate_ping
+from . import translation_options
+
+
+CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
+
+
+@click.command(context_settings=CONTEXT_SETTINGS)
+@click.argument(
+ "input",
+ type=click.Path(exists=False, dir_okay=False, file_okay=True, readable=True),
+ nargs=-1,
+)
+@click.option(
+ "--output",
+ "-o",
+ type=click.Path(dir_okay=True, file_okay=False, writable=True),
+ nargs=1,
+ required=True,
+)
+@click.option(
+ "--format",
+ "-f",
+ type=click.Choice(list(mod_translate.OUTPUTTERS.keys())),
+ required=True,
+)
+@click.option(
+ "--option",
+ "-s",
+ help="Backend-specific option. Must be of the form key=value.\
+ Pass 'help' for valid options",
+ type=str,
+ multiple=True,
+ required=False,
+ is_eager=True,
+ callback=translation_options.translate_options,
+)
+@click.option(
+ "--allow-reserved",
+ is_flag=True,
+ help=(
+ "If provided, allow the use of reserved fields. "
+ "Should only be set when building the Glean library itself."
+ ),
+)
+@click.option(
+ "--allow-missing-files",
+ is_flag=True,
+ help=("Do not treat missing input files as an error."),
+)
+@click.option(
+ "--require-tags",
+ is_flag=True,
+ help=("Require tags to be specified for metrics and pings."),
+)
+@click.option(
+ "--expire-by-version",
+ help="Expire metrics by version, with the provided major version.",
+ type=click.INT,
+ required=False,
+)
+def translate(
+ input,
+ format,
+ output,
+ option,
+ allow_reserved,
+ allow_missing_files,
+ require_tags,
+ expire_by_version,
+):
+ """
+ Translate metrics.yaml and pings.yaml files to other formats.
+ """
+ option_dict = {}
+ for opt in option:
+ key, val = opt.split("=", 1)
+ option_dict[key] = val
+
+ sys.exit(
+ mod_translate.translate(
+ [Path(x) for x in input],
+ format,
+ Path(output),
+ option_dict,
+ {
+ "allow_reserved": allow_reserved,
+ "allow_missing_files": allow_missing_files,
+ "require_tags": require_tags,
+ "expire_by_version": expire_by_version,
+ },
+ )
+ )
+
+
+@click.command()
+@click.option(
+ "--schema",
+ "-s",
+ type=str,
+ nargs=1,
+ required=True,
+ help=("HTTP url or file path to Glean ping schema. If remote, will cache to disk."),
+)
+def check(schema):
+ """
+ Validate the contents of a Glean ping.
+
+ The ping contents are read from stdin, and the validation errors are
+ written to stdout.
+ """
+ sys.exit(
+ validate_ping.validate_ping(
+ io.TextIOWrapper(sys.stdin.buffer, encoding="utf-8"),
+ io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8"),
+ schema_url=schema,
+ )
+ )
+
+
+@click.command()
+@click.argument(
+ "input",
+ type=click.Path(exists=True, dir_okay=False, file_okay=True, readable=True),
+ nargs=-1,
+)
+@click.option(
+ "--allow-reserved",
+ is_flag=True,
+ help=(
+ "If provided, allow the use of reserved fields. "
+ "Should only be set when building the Glean library itself."
+ ),
+)
+@click.option(
+ "--allow-missing-files",
+ is_flag=True,
+ help=("Do not treat missing input files as an error."),
+)
+@click.option(
+ "--require-tags",
+ is_flag=True,
+ help=("Require tags to be specified for metrics and pings."),
+)
+def glinter(input, allow_reserved, allow_missing_files, require_tags):
+ """
+ Runs a linter over the metrics.
+ """
+ sys.exit(
+ lint.glinter(
+ [Path(x) for x in input],
+ {
+ "allow_reserved": allow_reserved,
+ "allow_missing_files": allow_missing_files,
+ "require_tags": require_tags,
+ },
+ )
+ )
+
+
+@click.command()
+@click.argument(
+ "input",
+ type=click.Path(exists=True, dir_okay=False, file_okay=True, readable=True),
+ nargs=-1,
+)
+@click.option(
+ "--allow-reserved",
+ is_flag=True,
+ help=(
+ "If provided, allow the use of reserved fields. "
+ "Should only be set when building the Glean library itself."
+ ),
+)
+@click.option(
+ "--allow-missing-files",
+ is_flag=True,
+ help=("Do not treat missing input files as an error."),
+)
+@click.option(
+ "--require-tags",
+ is_flag=True,
+ help=("Require tags to be specified for metrics and pings."),
+)
+def dump(input, allow_reserved, allow_missing_files, require_tags):
+ """
+ Dump the list of metrics/pings as JSON to stdout.
+ """
+
+ results = glean_parser.parser.parse_objects(
+ [Path(x) for x in input],
+ {
+ "allow_reserved": allow_reserved,
+ "allow_missing_files": allow_missing_files,
+ "require_tags": require_tags,
+ },
+ )
+ errs = list(results)
+ assert len(errs) == 0
+
+ metrics = {
+ metric.identifier(): metric.serialize()
+ for category, probes in results.value.items()
+ for probe_name, metric in probes.items()
+ }
+
+ def date_serializer(o):
+ if isinstance(o, datetime.datetime):
+ return o.isoformat()
+
+ print(
+ json.dumps(
+ metrics,
+ sort_keys=True,
+ indent=2,
+ separators=(",", ": "),
+ default=date_serializer,
+ )
+ )
+
+
+@click.command()
+@click.option(
+ "-c",
+ "--coverage_file",
+ type=click.Path(exists=True, dir_okay=False, file_okay=True, readable=True),
+ required=True,
+ multiple=True,
+)
+@click.argument(
+ "metrics_files",
+ type=click.Path(exists=True, dir_okay=False, file_okay=True, readable=True),
+ nargs=-1,
+)
+@click.option(
+ "-o",
+ "--output",
+ type=click.Path(exists=False, dir_okay=False, file_okay=True, writable=True),
+ required=True,
+)
+@click.option(
+ "--format",
+ "-f",
+ type=click.Choice(list(mod_coverage.OUTPUTTERS.keys())),
+ required=True,
+)
+@click.option(
+ "--allow-reserved",
+ is_flag=True,
+ help=(
+ "If provided, allow the use of reserved fields. "
+ "Should only be set when building the Glean library itself."
+ ),
+)
+def coverage(coverage_file, metrics_files, format, output, allow_reserved):
+ """
+ Produce a coverage analysis file given raw coverage output and a set of
+ metrics.yaml files.
+ """
+ sys.exit(
+ mod_coverage.coverage(
+ [Path(x) for x in coverage_file],
+ [Path(x) for x in metrics_files],
+ format,
+ Path(output),
+ {
+ "allow_reserved": allow_reserved,
+ },
+ )
+ )
+
+
+@click.command()
+@click.argument("bug", type=str)
+@click.argument(
+ "metrics_files",
+ type=click.Path(exists=True, dir_okay=False, file_okay=True, readable=True),
+ nargs=-1,
+)
+def data_review_request(bug, metrics_files):
+ """
+ Generate a skeleton Data Review Request for all metrics in metrics_files
+ whose bug_numbers fields contain the provided bug string.
+ For example, providing "1694739" matches
+ "https://bugzilla.mozilla.org/show_bug.cgi?id=1694739".
+ To ensure substrings don't match, the provided bug string will match only
+ if it is bounded by non-word characters.
+ Prints to stdout.
+ """
+ sys.exit(mod_data_review.generate(bug, [Path(x) for x in metrics_files]))
+
+
+@click.group()
+@click.version_option(glean_parser.__version__, prog_name="glean_parser")
+def main(args=None):
+ """Command line utility for glean_parser."""
+ pass
+
+
+main.add_command(translate)
+main.add_command(check)
+main.add_command(glinter)
+main.add_command(dump)
+main.add_command(coverage)
+main.add_command(data_review_request, "data-review")
+
+
+def main_wrapper(args=None):
+ """
+ A simple wrapper around click's `main` to display the glean_parser version
+ when there is an error.
+ """
+ try:
+ main(args=args)
+ except SystemExit as e:
+ if e.code != 0:
+ print(
+ f"ERROR running glean_parser v{glean_parser.__version__}",
+ file=sys.stderr,
+ )
+ raise
+
+
+if __name__ == "__main__":
+ main_wrapper() # pragma: no cover
diff --git a/third_party/python/glean_parser/glean_parser/coverage.py b/third_party/python/glean_parser/glean_parser/coverage.py
new file mode 100644
index 0000000000..776ea3183d
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/coverage.py
@@ -0,0 +1,140 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Produce coverage reports from the raw information produced by the
+`GLEAN_TEST_COVERAGE` feature.
+"""
+
+import json
+from .metrics import ObjectTree
+from pathlib import Path
+import sys
+from typing import Any, Dict, List, Optional, Sequence, Set
+
+
+from . import parser
+from . import util
+
+
+def _outputter_codecovio(metrics: ObjectTree, output_path: Path):
+ """
+ Output coverage in codecov.io format as defined here:
+
+ https://docs.codecov.io/docs/codecov-custom-coverage-format
+
+ :param metrics: The tree of metrics, already annotated with coverage by
+ `_annotate_coverage`.
+ :param output_path: The file to output to.
+ """
+ coverage: Dict[str, List] = {}
+ for category in metrics.values():
+ for metric in category.values():
+ defined_in = metric.defined_in
+ if defined_in is not None:
+ path = defined_in["filepath"]
+ if path not in coverage:
+ with open(path) as fd:
+ nlines = len(list(fd.readlines()))
+ lines = [None] * nlines
+ coverage[path] = lines
+ file_section = coverage[path]
+ file_section[int(defined_in["line"])] = getattr(metric, "covered", 0)
+
+ with open(output_path, "w") as fd:
+ json.dump({"coverage": coverage}, fd)
+
+
+OUTPUTTERS = {"codecovio": _outputter_codecovio}
+
+
+def _annotate_coverage(metrics, coverage_entries):
+ """
+ Annotate each metric with whether it is covered. Sets the attribute
+ `covered` to 1 on each metric that is covered.
+ """
+ mapping = {}
+ for category in metrics.values():
+ for metric in category.values():
+ mapping[metric.identifier()] = metric
+
+ for entry in coverage_entries:
+ metric_id = _coverage_entry_to_metric_id(entry)
+ if metric_id in mapping:
+ mapping[metric_id].covered = 1
+
+
+def _coverage_entry_to_metric_id(entry: str) -> str:
+ """
+ Convert a coverage entry to a metric id.
+
+ Technically, the coverage entries are rkv database keys, so are not just
+ the metric identifier. This extracts the metric identifier part out.
+ """
+ # If getting a glean error count, report it as covering the metric the
+ # error occurred in, not the `glean.error.*` metric itself.
+ if entry.startswith("glean.error."):
+ entry = entry.split("/")[-1]
+ # If a labeled metric, strip off the label part
+ return entry.split("/")[0]
+
+
+def _read_coverage_entries(coverage_reports: List[Path]) -> Set[str]:
+ """
+ Read coverage entries from one or more files, and deduplicates them.
+ """
+ entries = set()
+
+ for coverage_report in coverage_reports:
+ with open(coverage_report) as fd:
+ for line in fd.readlines():
+ entries.add(line.strip())
+
+ return entries
+
+
+def coverage(
+ coverage_reports: List[Path],
+ metrics_files: Sequence[Path],
+ output_format: str,
+ output_file: Path,
+ parser_config: Optional[Dict[str, Any]] = None,
+ file=sys.stderr,
+) -> int:
+ """
+ Commandline helper for coverage.
+
+ :param coverage_reports: List of coverage report files, output from the
+ Glean SDK when the `GLEAN_TEST_COVERAGE` environment variable is set.
+ :param metrics_files: List of Path objects to load metrics from.
+ :param output_format: The coverage output format to produce. Must be one of
+ `OUTPUTTERS.keys()`.
+ :param output_file: Path to output coverage report to.
+ :param parser_config: Parser configuration object, passed to
+ `parser.parse_objects`.
+ :return: Non-zero if there were any errors.
+ """
+
+ if parser_config is None:
+ parser_config = {}
+
+ if output_format not in OUTPUTTERS:
+ raise ValueError(f"Unknown outputter {output_format}")
+
+ metrics_files = util.ensure_list(metrics_files)
+
+ all_objects = parser.parse_objects(metrics_files, parser_config)
+
+ if util.report_validation_errors(all_objects):
+ return 1
+
+ entries = _read_coverage_entries(coverage_reports)
+
+ _annotate_coverage(all_objects.value, entries)
+
+ OUTPUTTERS[output_format](all_objects.value, output_file)
+
+ return 0
diff --git a/third_party/python/glean_parser/glean_parser/data_review.py b/third_party/python/glean_parser/glean_parser/data_review.py
new file mode 100644
index 0000000000..2267d49315
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/data_review.py
@@ -0,0 +1,79 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Produce skeleton Data Review Requests.
+"""
+
+from pathlib import Path
+from typing import Sequence
+import re
+
+
+from . import parser
+from . import util
+
+
+def generate(
+ bug: str,
+ metrics_files: Sequence[Path],
+) -> int:
+ """
+ Commandline helper for Data Review Request template generation.
+
+ :param bug: pattern to match in metrics' bug_numbers lists.
+ :param metrics_files: List of Path objects to load metrics from.
+ :return: Non-zero if there were any errors.
+ """
+
+ metrics_files = util.ensure_list(metrics_files)
+
+ # Accept any value of expires.
+ parser_options = {
+ "allow_reserved": True,
+ "custom_is_expired": lambda expires: False,
+ "custom_validate_expires": lambda expires: True,
+ }
+ all_objects = parser.parse_objects(metrics_files, parser_options)
+
+ if util.report_validation_errors(all_objects):
+ return 1
+
+ # I tried [\W\Z] but it complained. So `|` it is.
+ reobj = re.compile(f"\\W{bug}\\W|\\W{bug}$")
+ durations = set()
+ responsible_emails = set()
+ filtered_metrics = list()
+ for metrics in all_objects.value.values():
+ for metric in metrics.values():
+ if not any([len(reobj.findall(bug)) == 1 for bug in metric.bugs]):
+ continue
+
+ filtered_metrics.append(metric)
+
+ durations.add(metric.expires)
+
+ if metric.expires == "never":
+ responsible_emails.update(metric.notification_emails)
+
+ if len(filtered_metrics) == 0:
+ print(f"I'm sorry, I couldn't find metrics matching the bug number {bug}.")
+ return 1
+
+ template = util.get_jinja2_template(
+ "data_review.jinja2",
+ filters=(("snake_case", util.snake_case),),
+ )
+
+ print(
+ template.render(
+ metrics=filtered_metrics,
+ durations=durations,
+ responsible_emails=responsible_emails,
+ )
+ )
+
+ return 0
diff --git a/third_party/python/glean_parser/glean_parser/javascript.py b/third_party/python/glean_parser/glean_parser/javascript.py
new file mode 100644
index 0000000000..1473065beb
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/javascript.py
@@ -0,0 +1,322 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Outputter to generate Javascript code for metrics.
+"""
+
+import enum
+import json
+from pathlib import Path
+from typing import Any, Dict, Optional, Callable
+
+from . import __version__
+from . import metrics
+from . import util
+
+
+def javascript_datatypes_filter(value: util.JSONType) -> str:
+ """
+ A Jinja2 filter that renders Javascript literals.
+
+ Based on Python's JSONEncoder, but overrides:
+ - lists to use listOf
+ - sets to use setOf
+ - Rate objects to a CommonMetricData initializer
+ (for external Denominators' Numerators lists)
+ """
+
+ class JavascriptEncoder(json.JSONEncoder):
+ def iterencode(self, value):
+ if isinstance(value, enum.Enum):
+ yield from super().iterencode(util.camelize(value.name))
+ elif isinstance(value, list):
+ yield "["
+ first = True
+ for subvalue in value:
+ if not first:
+ yield ", "
+ yield from self.iterencode(subvalue)
+ first = False
+ yield "]"
+ elif isinstance(value, set):
+ yield "["
+ first = True
+ for subvalue in sorted(list(value)):
+ if not first:
+ yield ", "
+ yield from self.iterencode(subvalue)
+ first = False
+ yield "]"
+ elif isinstance(value, metrics.Rate):
+ yield "CommonMetricData("
+ first = True
+ for arg_name in util.common_metric_args:
+ if hasattr(value, arg_name):
+ if not first:
+ yield ", "
+ yield f"{util.camelize(arg_name)} = "
+ yield from self.iterencode(getattr(value, arg_name))
+ first = False
+ yield ")"
+ else:
+ yield from super().iterencode(value)
+
+ return "".join(JavascriptEncoder().iterencode(value))
+
+
+def class_name_factory(platform: str) -> Callable[[str], str]:
+ """
+ Returns a function that receives an obj_type and
+ returns the correct class name for that type in the current platform.
+ """
+
+ def class_name(obj_type: str) -> str:
+ if obj_type == "ping":
+ class_name = "PingType"
+ else:
+ if obj_type.startswith("labeled_"):
+ obj_type = obj_type[8:]
+ class_name = util.Camelize(obj_type) + "MetricType"
+
+ if platform == "qt":
+ return "Glean.Glean._private." + class_name
+
+ return class_name
+
+ return class_name
+
+
+def extra_type_name(extra_type: str) -> str:
+ """
+ Returns the equivalent TypeScript type to an extra type.
+ """
+ if extra_type == "quantity":
+ return "number"
+
+ return extra_type
+
+
+def import_path(obj_type: str) -> str:
+ """
+ Returns the import path of the given object inside the @mozilla/glean package.
+ """
+ if obj_type == "ping":
+ import_path = "ping"
+ else:
+ if obj_type.startswith("labeled_"):
+ obj_type = obj_type[8:]
+ import_path = "metrics/" + obj_type
+
+ return import_path
+
+
+def args(obj_type: str) -> Dict[str, object]:
+ """
+ Returns the list of arguments for each object type.
+ """
+ if obj_type == "ping":
+ return {"common": util.ping_args, "extra": []}
+
+ return {"common": util.common_metric_args, "extra": util.extra_metric_args}
+
+
+def generate_build_date(date: Optional[str]) -> str:
+ """
+ Generate the build Date object.
+ """
+
+ ts = util.build_date(date)
+
+ data = [
+ str(ts.year),
+ # In JavaScript the first month of the year in calendars is JANUARY which is 0.
+ # In Python it's 1-based
+ str(ts.month - 1),
+ str(ts.day),
+ str(ts.hour),
+ str(ts.minute),
+ str(ts.second),
+ ]
+ components = ", ".join(data)
+
+ # DatetimeMetricType takes a `Date` instance.
+ return f"new Date({components})" # noqa
+
+
+def output(
+ lang: str,
+ objs: metrics.ObjectTree,
+ output_dir: Path,
+ options: Optional[Dict[str, Any]] = None,
+) -> None:
+ """
+ Given a tree of objects, output Javascript or Typescript code to `output_dir`.
+
+ :param lang: Either "javascript" or "typescript";
+ :param objects: A tree of objects (metrics and pings) as returned from
+ `parser.parse_objects`.
+ :param output_dir: Path to an output directory to write to.
+ :param options: options dictionary, with the following optional keys:
+ - `platform`: Which platform are we building for. Options are `webext` and `qt`.
+ Default is `webext`.
+ - `version`: The version of the Glean.js Qt library being used.
+ This option is mandatory when targeting Qt. Note that the version
+ string must only contain the major and minor version i.e. 0.14.
+ - `with_buildinfo`: If "true" a `gleanBuildInfo.(js|ts)` file is generated.
+ Otherwise generation of that file is skipped. Defaults to "false".
+ - `build_date`: If set to `0` a static unix epoch time will be used.
+ If set to a ISO8601 datetime string (e.g. `2022-01-03T17:30:00`)
+ it will use that date.
+ Other values will throw an error.
+ If not set it will use the current date & time.
+ """
+
+ if options is None:
+ options = {}
+
+ platform = options.get("platform", "webext")
+ accepted_platforms = ["qt", "webext", "node"]
+ if platform not in accepted_platforms:
+ raise ValueError(
+ f"Unknown platform: {platform}. Accepted platforms are: {accepted_platforms}." # noqa
+ )
+ version = options.get("version")
+ if platform == "qt" and version is None:
+ raise ValueError(
+ "'version' option is required when building for the 'qt' platform."
+ )
+
+ template = util.get_jinja2_template(
+ "javascript.jinja2",
+ filters=(
+ ("class_name", class_name_factory(platform)),
+ ("extra_type_name", extra_type_name),
+ ("import_path", import_path),
+ ("js", javascript_datatypes_filter),
+ ("args", args),
+ ),
+ )
+
+ for category_key, category_val in objs.items():
+ extension = ".js" if lang == "javascript" else ".ts"
+ filename = util.camelize(category_key) + extension
+ filepath = output_dir / filename
+
+ types = set(
+ [
+ # This takes care of the regular metric type imports
+ # as well as the labeled metric subtype imports,
+ # thus the removal of the `labeled_` substring.
+ #
+ # The actual LabeledMetricType import is conditioned after
+ # the `has_labeled_metrics` boolean.
+ obj.type if not obj.type.startswith("labeled_") else obj.type[8:]
+ for obj in category_val.values()
+ ]
+ )
+ has_labeled_metrics = any(
+ getattr(metric, "labeled", False) for metric in category_val.values()
+ )
+ with filepath.open("w", encoding="utf-8") as fd:
+ fd.write(
+ template.render(
+ parser_version=__version__,
+ category_name=category_key,
+ objs=category_val,
+ extra_args=util.extra_args,
+ platform=platform,
+ version=version,
+ has_labeled_metrics=has_labeled_metrics,
+ types=types,
+ lang=lang,
+ )
+ )
+ # Jinja2 squashes the final newline, so we explicitly add it
+ fd.write("\n")
+
+ with_buildinfo = options.get("with_buildinfo", "").lower() == "true"
+ build_date = options.get("build_date", None)
+ if with_buildinfo:
+ # Write out the special "build info" file
+ template = util.get_jinja2_template(
+ "javascript.buildinfo.jinja2",
+ )
+ # This filename needs to start with "glean" so it can never
+ # clash with a metric category
+ filename = "gleanBuildInfo" + extension
+ filepath = output_dir / filename
+
+ with filepath.open("w", encoding="utf-8") as fd:
+ fd.write(
+ template.render(
+ parser_version=__version__,
+ platform=platform,
+ build_date=generate_build_date(build_date),
+ )
+ )
+ fd.write("\n")
+
+ if platform == "qt":
+ # Explicitly create a qmldir file when building for Qt
+ template = util.get_jinja2_template("qmldir.jinja2")
+ filepath = output_dir / "qmldir"
+
+ with filepath.open("w", encoding="utf-8") as fd:
+ fd.write(
+ template.render(
+ parser_version=__version__, categories=objs.keys(), version=version
+ )
+ )
+ # Jinja2 squashes the final newline, so we explicitly add it
+ fd.write("\n")
+
+
+def output_javascript(
+ objs: metrics.ObjectTree, output_dir: Path, options: Optional[Dict[str, Any]] = None
+) -> None:
+ """
+ Given a tree of objects, output Javascript code to `output_dir`.
+
+ :param objects: A tree of objects (metrics and pings) as returned from
+ `parser.parse_objects`.
+ :param output_dir: Path to an output directory to write to.
+ :param options: options dictionary, with the following optional keys:
+
+ - `namespace`: The identifier of the global variable to assign to.
+ This will only have and effect for Qt and static web sites.
+ Default is `Glean`.
+ - `platform`: Which platform are we building for. Options are `webext` and `qt`.
+ Default is `webext`.
+ """
+
+ output("javascript", objs, output_dir, options)
+
+
+def output_typescript(
+ objs: metrics.ObjectTree, output_dir: Path, options: Optional[Dict[str, Any]] = None
+) -> None:
+ """
+ Given a tree of objects, output Typescript code to `output_dir`.
+
+ # Note
+
+ The only difference between the typescript and javascript templates,
+ currently is the file extension.
+
+ :param objects: A tree of objects (metrics and pings) as returned from
+ `parser.parse_objects`.
+ :param output_dir: Path to an output directory to write to.
+ :param options: options dictionary, with the following optional keys:
+
+ - `namespace`: The identifier of the global variable to assign to.
+ This will only have and effect for Qt and static web sites.
+ Default is `Glean`.
+ - `platform`: Which platform are we building for. Options are `webext` and `qt`.
+ Default is `webext`.
+ """
+
+ output("typescript", objs, output_dir, options)
diff --git a/third_party/python/glean_parser/glean_parser/kotlin.py b/third_party/python/glean_parser/glean_parser/kotlin.py
new file mode 100644
index 0000000000..82cc63d237
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/kotlin.py
@@ -0,0 +1,356 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Outputter to generate Kotlin code for metrics.
+"""
+
+from collections import OrderedDict
+import enum
+import json
+from pathlib import Path
+from typing import Any, Dict, List, Optional, Union # noqa
+
+from . import __version__
+from . import metrics
+from . import pings
+from . import tags
+from . import util
+from .util import DictWrapper
+
+
+def kotlin_datatypes_filter(value: util.JSONType) -> str:
+ """
+ A Jinja2 filter that renders Kotlin literals.
+
+ Based on Python's JSONEncoder, but overrides:
+ - lists to use listOf
+ - dicts to use mapOf
+ - sets to use setOf
+ - enums to use the like-named Kotlin enum
+ - Rate objects to a CommonMetricData initializer
+ (for external Denominators' Numerators lists)
+ """
+
+ class KotlinEncoder(json.JSONEncoder):
+ def iterencode(self, value):
+ if isinstance(value, list):
+ yield "listOf("
+ first = True
+ for subvalue in value:
+ if not first:
+ yield ", "
+ yield from self.iterencode(subvalue)
+ first = False
+ yield ")"
+ elif isinstance(value, dict):
+ yield "mapOf("
+ first = True
+ for key, subvalue in value.items():
+ if not first:
+ yield ", "
+ yield from self.iterencode(key)
+ yield " to "
+ yield from self.iterencode(subvalue)
+ first = False
+ yield ")"
+ elif isinstance(value, enum.Enum):
+ # UniFFI generates SCREAMING_CASE enum variants.
+ yield (value.__class__.__name__ + "." + util.screaming_case(value.name))
+ elif isinstance(value, set):
+ yield "setOf("
+ first = True
+ for subvalue in sorted(list(value)):
+ if not first:
+ yield ", "
+ yield from self.iterencode(subvalue)
+ first = False
+ yield ")"
+ elif isinstance(value, metrics.Rate):
+ yield "CommonMetricData("
+ first = True
+ for arg_name in util.common_metric_args:
+ if hasattr(value, arg_name):
+ if not first:
+ yield ", "
+ yield f"{util.camelize(arg_name)} = "
+ yield from self.iterencode(getattr(value, arg_name))
+ first = False
+ yield ")"
+ else:
+ yield from super().iterencode(value)
+
+ return "".join(KotlinEncoder().iterencode(value))
+
+
+def type_name(obj: Union[metrics.Metric, pings.Ping]) -> str:
+ """
+ Returns the Kotlin type to use for a given metric or ping object.
+ """
+ generate_enums = getattr(obj, "_generate_enums", [])
+ if len(generate_enums):
+ generic = None
+ for member, suffix in generate_enums:
+ if len(getattr(obj, member)):
+ if isinstance(obj, metrics.Event):
+ generic = util.Camelize(obj.name) + suffix
+ else:
+ generic = util.camelize(obj.name) + suffix
+ else:
+ if isinstance(obj, metrics.Event):
+ generic = "NoExtras"
+ else:
+ generic = "No" + suffix
+
+ return "{}<{}>".format(class_name(obj.type), generic)
+
+ return class_name(obj.type)
+
+
+def extra_type_name(typ: str) -> str:
+ """
+ Returns the corresponding Kotlin type for event's extra key types.
+ """
+
+ if typ == "boolean":
+ return "Boolean"
+ elif typ == "string":
+ return "String"
+ elif typ == "quantity":
+ return "Int"
+ else:
+ return "UNSUPPORTED"
+
+
+def class_name(obj_type: str) -> str:
+ """
+ Returns the Kotlin class name for a given metric or ping type.
+ """
+ if obj_type == "ping":
+ return "PingType"
+ if obj_type.startswith("labeled_"):
+ obj_type = obj_type[8:]
+ return util.Camelize(obj_type) + "MetricType"
+
+
+def generate_build_date(date: Optional[str]) -> str:
+ """
+ Generate the build timestamp.
+ """
+
+ ts = util.build_date(date)
+
+ data = [
+ str(ts.year),
+ # In Java the first month of the year in calendars is JANUARY which is 0.
+ # In Python it's 1-based
+ str(ts.month - 1),
+ str(ts.day),
+ str(ts.hour),
+ str(ts.minute),
+ str(ts.second),
+ ]
+ components = ", ".join(data)
+
+ # DatetimeMetricType takes a `Calendar` instance.
+ return f'Calendar.getInstance(TimeZone.getTimeZone("GMT+0")).also {{ cal -> cal.set({components}) }}' # noqa
+
+
+def output_gecko_lookup(
+ objs: metrics.ObjectTree, output_dir: Path, options: Optional[Dict[str, Any]] = None
+) -> None:
+ """
+ Given a tree of objects, generate a Kotlin map between Gecko histograms and
+ Glean SDK metric types.
+
+ :param objects: A tree of objects (metrics and pings) as returned from
+ `parser.parse_objects`.
+ :param output_dir: Path to an output directory to write to.
+ :param options: options dictionary, with the following optional keys:
+
+ - `namespace`: The package namespace to declare at the top of the
+ generated files. Defaults to `GleanMetrics`.
+ - `glean_namespace`: The package namespace of the glean library itself.
+ This is where glean objects will be imported from in the generated
+ code.
+ """
+ if options is None:
+ options = {}
+
+ template = util.get_jinja2_template(
+ "kotlin.geckoview.jinja2",
+ filters=(
+ ("kotlin", kotlin_datatypes_filter),
+ ("type_name", type_name),
+ ("class_name", class_name),
+ ),
+ )
+
+ namespace = options.get("namespace", "GleanMetrics")
+ glean_namespace = options.get("glean_namespace", "mozilla.components.service.glean")
+
+ # Build a dictionary that contains data for metrics that are
+ # histogram-like/scalar-like and contain a gecko_datapoint, with this format:
+ #
+ # {
+ # "histograms": {
+ # "category": [
+ # {"gecko_datapoint": "the-datapoint", "name": "the-metric-name"},
+ # ...
+ # ],
+ # ...
+ # },
+ # "other-type": {}
+ # }
+ gecko_metrics: Dict[str, Dict[str, List[Dict[str, str]]]] = DictWrapper()
+
+ # Define scalar-like types.
+ SCALAR_LIKE_TYPES = ["boolean", "string", "quantity"]
+
+ for category_key, category_val in objs.items():
+ # Support exfiltration of Gecko metrics from products using both the
+ # Glean SDK and GeckoView. See bug 1566356 for more context.
+ for metric in category_val.values():
+ # This is not a Gecko metric, skip it.
+ if (
+ isinstance(metric, pings.Ping)
+ or isinstance(metric, tags.Tag)
+ or not getattr(metric, "gecko_datapoint", False)
+ ):
+ continue
+
+ # Put scalars in their own categories, histogram-like in "histograms" and
+ # categorical histograms in "categoricals".
+ type_category = "histograms"
+ if metric.type in SCALAR_LIKE_TYPES:
+ type_category = metric.type
+ elif metric.type == "labeled_counter":
+ # Labeled counters with a 'gecko_datapoint' property
+ # are categorical histograms.
+ type_category = "categoricals"
+
+ gecko_metrics.setdefault(type_category, OrderedDict())
+ gecko_metrics[type_category].setdefault(category_key, [])
+
+ gecko_metrics[type_category][category_key].append(
+ {"gecko_datapoint": metric.gecko_datapoint, "name": metric.name}
+ )
+
+ if not gecko_metrics:
+ # Bail out and don't create a file if no gecko metrics
+ # are found.
+ return
+
+ filepath = output_dir / "GleanGeckoMetricsMapping.kt"
+ with filepath.open("w", encoding="utf-8") as fd:
+ fd.write(
+ template.render(
+ parser_version=__version__,
+ gecko_metrics=gecko_metrics,
+ namespace=namespace,
+ glean_namespace=glean_namespace,
+ )
+ )
+ # Jinja2 squashes the final newline, so we explicitly add it
+ fd.write("\n")
+
+
+def output_kotlin(
+ objs: metrics.ObjectTree, output_dir: Path, options: Optional[Dict[str, Any]] = None
+) -> None:
+ """
+ Given a tree of objects, output Kotlin code to `output_dir`.
+
+ :param objects: A tree of objects (metrics and pings) as returned from
+ `parser.parse_objects`.
+ :param output_dir: Path to an output directory to write to.
+ :param options: options dictionary, with the following optional keys:
+
+ - `namespace`: The package namespace to declare at the top of the
+ generated files. Defaults to `GleanMetrics`.
+ - `glean_namespace`: The package namespace of the glean library itself.
+ This is where glean objects will be imported from in the generated
+ code.
+ - `with_buildinfo`: If "true" a `GleanBuildInfo.kt` file is generated.
+ Otherwise generation of that file is skipped.
+ Defaults to "true".
+ - `build_date`: If set to `0` a static unix epoch time will be used.
+ If set to a ISO8601 datetime string (e.g. `2022-01-03T17:30:00`)
+ it will use that date.
+ Other values will throw an error.
+ If not set it will use the current date & time.
+ """
+ if options is None:
+ options = {}
+
+ namespace = options.get("namespace", "GleanMetrics")
+ glean_namespace = options.get("glean_namespace", "mozilla.components.service.glean")
+ namespace_package = namespace[: namespace.rfind(".")]
+ with_buildinfo = options.get("with_buildinfo", "true").lower() == "true"
+ build_date = options.get("build_date", None)
+
+ # Write out the special "build info" object
+ template = util.get_jinja2_template(
+ "kotlin.buildinfo.jinja2",
+ )
+
+ if with_buildinfo:
+ build_date = generate_build_date(build_date)
+ # This filename needs to start with "Glean" so it can never clash with a
+ # metric category
+ with (output_dir / "GleanBuildInfo.kt").open("w", encoding="utf-8") as fd:
+ fd.write(
+ template.render(
+ parser_version=__version__,
+ namespace=namespace,
+ namespace_package=namespace_package,
+ glean_namespace=glean_namespace,
+ build_date=build_date,
+ )
+ )
+ fd.write("\n")
+
+ template = util.get_jinja2_template(
+ "kotlin.jinja2",
+ filters=(
+ ("kotlin", kotlin_datatypes_filter),
+ ("type_name", type_name),
+ ("extra_type_name", extra_type_name),
+ ("class_name", class_name),
+ ),
+ )
+
+ for category_key, category_val in objs.items():
+ filename = util.Camelize(category_key) + ".kt"
+ filepath = output_dir / filename
+
+ obj_types = sorted(
+ list(set(class_name(obj.type) for obj in category_val.values()))
+ )
+ has_labeled_metrics = any(
+ getattr(metric, "labeled", False) for metric in category_val.values()
+ )
+
+ with filepath.open("w", encoding="utf-8") as fd:
+ fd.write(
+ template.render(
+ parser_version=__version__,
+ category_name=category_key,
+ objs=category_val,
+ obj_types=obj_types,
+ common_metric_args=util.common_metric_args,
+ extra_metric_args=util.extra_metric_args,
+ ping_args=util.ping_args,
+ namespace=namespace,
+ has_labeled_metrics=has_labeled_metrics,
+ glean_namespace=glean_namespace,
+ )
+ )
+ # Jinja2 squashes the final newline, so we explicitly add it
+ fd.write("\n")
+
+ # TODO: Maybe this should just be a separate outputter?
+ output_gecko_lookup(objs, output_dir, options)
diff --git a/third_party/python/glean_parser/glean_parser/lint.py b/third_party/python/glean_parser/glean_parser/lint.py
new file mode 100644
index 0000000000..0dc2bddd5d
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/lint.py
@@ -0,0 +1,538 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import enum
+from pathlib import Path
+import re
+import sys
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Generator,
+ List,
+ Iterable,
+ Optional,
+ Tuple,
+ Union,
+) # noqa
+
+
+from . import metrics
+from . import parser
+from . import pings
+from . import tags
+from . import util
+
+
+LintGenerator = Generator[str, None, None]
+
+
+class CheckType(enum.Enum):
+ warning = 0
+ error = 1
+
+
+def _split_words(name: str) -> List[str]:
+ """
+ Helper function to split words on either `.` or `_`.
+ """
+ return re.split("[._-]", name)
+
+
+def _english_list(items: List[str]) -> str:
+ """
+ Helper function to format a list [A, B, C] as "'A', 'B', or 'C'".
+ """
+ if len(items) == 0:
+ return ""
+ elif len(items) == 1:
+ return f"'{items[0]}'"
+ else:
+ return "{}, or '{}'".format(
+ ", ".join([f"'{x}'" for x in items[:-1]]), items[-1]
+ )
+
+
+def _hamming_distance(str1: str, str2: str) -> int:
+ """
+ Count the # of differences between strings str1 and str2,
+ padding the shorter one with whitespace
+ """
+
+ diffs = 0
+ if len(str1) < len(str2):
+ str1, str2 = str2, str1
+ len_dist = len(str1) - len(str2)
+ str2 += " " * len_dist
+
+ for ch1, ch2 in zip(str1, str2):
+ if ch1 != ch2:
+ diffs += 1
+ return diffs
+
+
+def check_common_prefix(
+ category_name: str, metrics: Iterable[metrics.Metric]
+) -> LintGenerator:
+ """
+ Check if all metrics begin with a common prefix.
+ """
+ metric_words = sorted([_split_words(metric.name) for metric in metrics])
+
+ if len(metric_words) < 2:
+ return
+
+ first = metric_words[0]
+ last = metric_words[-1]
+
+ for i in range(min(len(first), len(last))):
+ if first[i] != last[i]:
+ break
+
+ if i > 0:
+ common_prefix = "_".join(first[:i])
+ yield (
+ f"Within category '{category_name}', all metrics begin with "
+ f"prefix '{common_prefix}'."
+ "Remove the prefixes on the metric names and (possibly) "
+ "rename the category."
+ )
+
+
+def check_unit_in_name(
+ metric: metrics.Metric, parser_config: Dict[str, Any]
+) -> LintGenerator:
+ """
+ The metric name ends in a unit.
+ """
+ TIME_UNIT_ABBREV = {
+ "nanosecond": "ns",
+ "microsecond": "us",
+ "millisecond": "ms",
+ "second": "s",
+ "minute": "m",
+ "hour": "h",
+ "day": "d",
+ }
+
+ MEMORY_UNIT_ABBREV = {
+ "byte": "b",
+ "kilobyte": "kb",
+ "megabyte": "mb",
+ "gigabyte": "gb",
+ }
+
+ name_words = _split_words(metric.name)
+ unit_in_name = name_words[-1]
+
+ time_unit = getattr(metric, "time_unit", None)
+ memory_unit = getattr(metric, "memory_unit", None)
+ unit = getattr(metric, "unit", None)
+
+ if time_unit is not None:
+ if (
+ unit_in_name == TIME_UNIT_ABBREV.get(time_unit.name)
+ or unit_in_name == time_unit.name
+ ):
+ yield (
+ f"Suffix '{unit_in_name}' is redundant with time_unit "
+ f"'{time_unit.name}'. Only include time_unit."
+ )
+ elif (
+ unit_in_name in TIME_UNIT_ABBREV.keys()
+ or unit_in_name in TIME_UNIT_ABBREV.values()
+ ):
+ yield (
+ f"Suffix '{unit_in_name}' doesn't match time_unit "
+ f"'{time_unit.name}'. "
+ "Confirm the unit is correct and only include time_unit."
+ )
+
+ elif memory_unit is not None:
+ if (
+ unit_in_name == MEMORY_UNIT_ABBREV.get(memory_unit.name)
+ or unit_in_name == memory_unit.name
+ ):
+ yield (
+ f"Suffix '{unit_in_name}' is redundant with memory_unit "
+ f"'{memory_unit.name}'. "
+ "Only include memory_unit."
+ )
+ elif (
+ unit_in_name in MEMORY_UNIT_ABBREV.keys()
+ or unit_in_name in MEMORY_UNIT_ABBREV.values()
+ ):
+ yield (
+ f"Suffix '{unit_in_name}' doesn't match memory_unit "
+ f"{memory_unit.name}'. "
+ "Confirm the unit is correct and only include memory_unit."
+ )
+
+ elif unit is not None:
+ if unit_in_name == unit:
+ yield (
+ f"Suffix '{unit_in_name}' is redundant with unit param "
+ f"'{unit}'. "
+ "Only include unit."
+ )
+
+
+def check_category_generic(
+ category_name: str, metrics: Iterable[metrics.Metric]
+) -> LintGenerator:
+ """
+ The category name is too generic.
+ """
+ GENERIC_CATEGORIES = ["metrics", "events"]
+
+ if category_name in GENERIC_CATEGORIES:
+ yield (
+ f"Category '{category_name}' is too generic. "
+ f"Don't use {_english_list(GENERIC_CATEGORIES)} for category names"
+ )
+
+
+def check_bug_number(
+ metric: Union[metrics.Metric, pings.Ping], parser_config: Dict[str, Any]
+) -> LintGenerator:
+ number_bugs = [str(bug) for bug in metric.bugs if isinstance(bug, int)]
+
+ if len(number_bugs):
+ yield (
+ f"For bugs {', '.join(number_bugs)}: "
+ "Bug numbers are deprecated and should be changed to full URLs. "
+ f"For example, use 'http://bugzilla.mozilla.org/{number_bugs[0]}' "
+ f"instead of '{number_bugs[0]}'."
+ )
+
+
+def check_valid_in_baseline(
+ metric: metrics.Metric, parser_config: Dict[str, Any]
+) -> LintGenerator:
+ allow_reserved = parser_config.get("allow_reserved", False)
+
+ if not allow_reserved and "baseline" in metric.send_in_pings:
+ yield (
+ "The baseline ping is Glean-internal. "
+ "Remove 'baseline' from the send_in_pings array."
+ )
+
+
+def check_misspelled_pings(
+ metric: metrics.Metric, parser_config: Dict[str, Any]
+) -> LintGenerator:
+ for ping in metric.send_in_pings:
+ for builtin in pings.RESERVED_PING_NAMES:
+ distance = _hamming_distance(ping, builtin)
+ if distance == 1:
+ yield f"Ping '{ping}' seems misspelled. Did you mean '{builtin}'?"
+
+
+def check_tags_required(
+ metric_or_ping: Union[metrics.Metric, pings.Ping], parser_config: Dict[str, Any]
+) -> LintGenerator:
+ if parser_config.get("require_tags", False) and not len(
+ metric_or_ping.metadata.get("tags", [])
+ ):
+ yield "Tags are required but no tags specified"
+
+
+def check_user_lifetime_expiration(
+ metric: metrics.Metric, parser_config: Dict[str, Any]
+) -> LintGenerator:
+ if metric.lifetime == metrics.Lifetime.user and metric.expires != "never":
+ yield (
+ "Metrics with 'user' lifetime cannot have an expiration date. "
+ "They live as long as the user profile does. "
+ "Set expires to 'never'."
+ )
+
+
+def check_expired_date(
+ metric: metrics.Metric, parser_config: Dict[str, Any]
+) -> LintGenerator:
+ try:
+ metric.validate_expires()
+ except ValueError as e:
+ yield (str(e))
+
+
+def check_expired_metric(
+ metric: metrics.Metric, parser_config: Dict[str, Any]
+) -> LintGenerator:
+ if metric.is_expired():
+ yield ("Metric has expired. Please consider removing it.")
+
+
+def check_old_event_api(
+ metric: metrics.Metric, parser_config: Dict[str, Any]
+) -> LintGenerator:
+ # Glean v52.0.0 removed the old events API.
+ # The metrics-2-0-0 schema still supports it.
+ # We want to warn about it.
+ # This can go when we introduce 3-0-0
+
+ if not isinstance(metric, metrics.Event):
+ return
+
+ if not all("type" in x for x in metric.extra_keys.values()):
+ yield ("The old event API is gone. Extra keys require a type.")
+
+
+def check_redundant_ping(
+ pings: pings.Ping, parser_config: Dict[str, Any]
+) -> LintGenerator:
+ """
+ Check if the pings contains 'ping' as the prefix or suffix, or 'ping' or 'custom'
+ """
+ ping_words = _split_words(pings.name)
+
+ if len(ping_words) != 0:
+ ping_first_word = ping_words[0]
+ ping_last_word = ping_words[-1]
+
+ if ping_first_word == "ping":
+ yield ("The prefix 'ping' is redundant.")
+ elif ping_last_word == "ping":
+ yield ("The suffix 'ping' is redundant.")
+ elif "ping" in ping_words:
+ yield ("The word 'ping' is redundant.")
+ elif "custom" in ping_words:
+ yield ("The word 'custom' is redundant.")
+
+
+# The checks that operate on an entire category of metrics:
+# {NAME: (function, is_error)}
+CATEGORY_CHECKS: Dict[
+ str, Tuple[Callable[[str, Iterable[metrics.Metric]], LintGenerator], CheckType]
+] = {
+ "COMMON_PREFIX": (check_common_prefix, CheckType.error),
+ "CATEGORY_GENERIC": (check_category_generic, CheckType.error),
+}
+
+
+# The checks that operate on individual metrics:
+# {NAME: (function, is_error)}
+METRIC_CHECKS: Dict[
+ str, Tuple[Callable[[metrics.Metric, dict], LintGenerator], CheckType]
+] = {
+ "UNIT_IN_NAME": (check_unit_in_name, CheckType.error),
+ "BUG_NUMBER": (check_bug_number, CheckType.error),
+ "BASELINE_PING": (check_valid_in_baseline, CheckType.error),
+ "MISSPELLED_PING": (check_misspelled_pings, CheckType.error),
+ "TAGS_REQUIRED": (check_tags_required, CheckType.error),
+ "EXPIRATION_DATE_TOO_FAR": (check_expired_date, CheckType.warning),
+ "USER_LIFETIME_EXPIRATION": (check_user_lifetime_expiration, CheckType.warning),
+ "EXPIRED": (check_expired_metric, CheckType.warning),
+ "OLD_EVENT_API": (check_old_event_api, CheckType.warning),
+}
+
+
+# The checks that operate on individual pings:
+# {NAME: (function, is_error)}
+PING_CHECKS: Dict[
+ str, Tuple[Callable[[pings.Ping, dict], LintGenerator], CheckType]
+] = {
+ "BUG_NUMBER": (check_bug_number, CheckType.error),
+ "TAGS_REQUIRED": (check_tags_required, CheckType.error),
+ "REDUNDANT_PING": (check_redundant_ping, CheckType.error),
+}
+
+
+class GlinterNit:
+ def __init__(self, check_name: str, name: str, msg: str, check_type: CheckType):
+ self.check_name = check_name
+ self.name = name
+ self.msg = msg
+ self.check_type = check_type
+
+ def format(self):
+ return (
+ f"{self.check_type.name.upper()}: {self.check_name}: "
+ f"{self.name}: {self.msg}"
+ )
+
+
+def _lint_item_tags(
+ item_name: str,
+ item_type: str,
+ item_tag_names: List[str],
+ valid_tag_names: List[str],
+) -> List[GlinterNit]:
+ invalid_tags = [tag for tag in item_tag_names if tag not in valid_tag_names]
+ return (
+ [
+ GlinterNit(
+ "INVALID_TAGS",
+ item_name,
+ f"Invalid tags specified in {item_type}: {', '.join(invalid_tags)}",
+ CheckType.error,
+ )
+ ]
+ if len(invalid_tags)
+ else []
+ )
+
+
+def _lint_pings(
+ category: Dict[str, Union[metrics.Metric, pings.Ping, tags.Tag]],
+ parser_config: Dict[str, Any],
+ valid_tag_names: List[str],
+) -> List[GlinterNit]:
+ nits: List[GlinterNit] = []
+
+ for ping_name, ping in sorted(list(category.items())):
+ assert isinstance(ping, pings.Ping)
+ for check_name, (check_func, check_type) in PING_CHECKS.items():
+ new_nits = list(check_func(ping, parser_config))
+ if len(new_nits):
+ if check_name not in ping.no_lint:
+ nits.extend(
+ GlinterNit(
+ check_name,
+ ping_name,
+ msg,
+ check_type,
+ )
+ for msg in new_nits
+ )
+ nits.extend(
+ _lint_item_tags(
+ ping_name,
+ "ping",
+ ping.metadata.get("tags", []),
+ valid_tag_names,
+ )
+ )
+ return nits
+
+
+def lint_metrics(
+ objs: metrics.ObjectTree,
+ parser_config: Optional[Dict[str, Any]] = None,
+ file=sys.stderr,
+) -> List[GlinterNit]:
+ """
+ Performs glinter checks on a set of metrics objects.
+
+ :param objs: Tree of metric objects, as returns by `parser.parse_objects`.
+ :param file: The stream to write errors to.
+ :returns: List of nits.
+ """
+ if parser_config is None:
+ parser_config = {}
+
+ nits: List[GlinterNit] = []
+ valid_tag_names = [tag for tag in objs.get("tags", [])]
+ for category_name, category in sorted(list(objs.items())):
+ if category_name == "pings":
+ nits.extend(_lint_pings(category, parser_config, valid_tag_names))
+ continue
+
+ if category_name == "tags":
+ # currently we have no linting for tags
+ continue
+
+ # Make sure the category has only Metrics, not Pings or Tags
+ category_metrics = dict(
+ (name, metric)
+ for (name, metric) in category.items()
+ if isinstance(metric, metrics.Metric)
+ )
+
+ for cat_check_name, (cat_check_func, check_type) in CATEGORY_CHECKS.items():
+ if any(
+ cat_check_name in metric.no_lint for metric in category_metrics.values()
+ ):
+ continue
+ nits.extend(
+ GlinterNit(cat_check_name, category_name, msg, check_type)
+ for msg in cat_check_func(category_name, category_metrics.values())
+ )
+
+ for _metric_name, metric in sorted(list(category_metrics.items())):
+ for check_name, (check_func, check_type) in METRIC_CHECKS.items():
+ new_nits = list(check_func(metric, parser_config))
+ if len(new_nits):
+ if check_name not in metric.no_lint:
+ nits.extend(
+ GlinterNit(
+ check_name,
+ ".".join([metric.category, metric.name]),
+ msg,
+ check_type,
+ )
+ for msg in new_nits
+ )
+
+ # also check that tags for metric are valid
+ nits.extend(
+ _lint_item_tags(
+ ".".join([metric.category, metric.name]),
+ "metric",
+ metric.metadata.get("tags", []),
+ valid_tag_names,
+ )
+ )
+
+ if len(nits):
+ print("Sorry, Glean found some glinter nits:", file=file)
+ for nit in nits:
+ print(nit.format(), file=file)
+ print("", file=file)
+ print("Please fix the above nits to continue.", file=file)
+ print(
+ "To disable a check, add a `no_lint` parameter "
+ "with a list of check names to disable.\n"
+ "This parameter can appear with each individual metric, or at the "
+ "top-level to affect the entire file.",
+ file=file,
+ )
+
+ return nits
+
+
+def lint_yaml_files(
+ input_filepaths: Iterable[Path],
+ file=sys.stderr,
+ parser_config: Optional[Dict[str, Any]] = None,
+) -> List:
+ """Always empty."""
+ return []
+
+
+def glinter(
+ input_filepaths: Iterable[Path],
+ parser_config: Optional[Dict[str, Any]] = None,
+ file=sys.stderr,
+) -> int:
+ """
+ Commandline helper for glinter.
+
+ :param input_filepaths: List of Path objects to load metrics from.
+ :param parser_config: Parser configuration object, passed to
+ `parser.parse_objects`.
+ :param file: The stream to write the errors to.
+ :return: Non-zero if there were any glinter errors.
+ """
+ if parser_config is None:
+ parser_config = {}
+
+ errors = 0
+
+ objs = parser.parse_objects(input_filepaths, parser_config)
+ errors += util.report_validation_errors(objs)
+
+ nits = lint_metrics(objs.value, parser_config=parser_config, file=file)
+ errors += len([nit for nit in nits if nit.check_type == CheckType.error])
+
+ if errors == 0:
+ print("✨ Your metrics are Glean! ✨", file=file)
+ return 0
+
+ print(f"❌ Found {errors} errors.")
+
+ return 1
diff --git a/third_party/python/glean_parser/glean_parser/markdown.py b/third_party/python/glean_parser/glean_parser/markdown.py
new file mode 100644
index 0000000000..68b288945f
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/markdown.py
@@ -0,0 +1,273 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Outputter to generate Markdown documentation for metrics.
+"""
+
+from pathlib import Path
+from typing import Any, Dict, List, Optional, Tuple, Union
+from urllib.parse import urlsplit, parse_qs
+
+
+from . import __version__
+from . import metrics
+from . import pings
+from . import util
+from collections import defaultdict
+
+
+def extra_info(obj: Union[metrics.Metric, pings.Ping]) -> List[Tuple[str, str]]:
+ """
+ Returns a list of string to string tuples with extra information for the type
+ (e.g. extra keys for events) or an empty list if nothing is available.
+ """
+ extra_info = []
+
+ if isinstance(obj, metrics.Event):
+ for key in obj.allowed_extra_keys:
+ extra_info.append((key, obj.extra_keys[key]["description"]))
+
+ if isinstance(obj, metrics.Labeled) and obj.ordered_labels is not None:
+ for label in obj.ordered_labels:
+ extra_info.append((label, None))
+
+ if isinstance(obj, metrics.Quantity):
+ extra_info.append(("unit", obj.unit))
+
+ return extra_info
+
+
+def ping_desc(
+ ping_name: str, custom_pings_cache: Optional[Dict[str, pings.Ping]] = None
+) -> str:
+ """
+ Return a text description of the ping. If a custom_pings_cache
+ is available, look in there for non-reserved ping names description.
+ """
+ desc = ""
+
+ if ping_name in pings.RESERVED_PING_NAMES:
+ desc = (
+ "This is a built-in ping that is assembled out of the "
+ "box by the Glean SDK."
+ )
+ elif ping_name == "all-pings":
+ desc = "These metrics are sent in every ping."
+ elif custom_pings_cache is not None and ping_name in custom_pings_cache:
+ desc = custom_pings_cache[ping_name].description
+
+ return desc
+
+
+def metrics_docs(obj_name: str) -> str:
+ """
+ Return a link to the documentation entry for the Glean SDK metric of the
+ requested type.
+ """
+ # We need to fixup labeled stuff, as types are singular and docs refer
+ # to them as plural.
+ fixedup_name = obj_name
+ if obj_name.startswith("labeled_"):
+ fixedup_name += "s"
+
+ return f"https://mozilla.github.io/glean/book/user/metrics/{fixedup_name}.html"
+
+
+def ping_docs(ping_name: str) -> str:
+ """
+ Return a link to the documentation entry for the requested Glean SDK
+ built-in ping.
+ """
+ if ping_name not in pings.RESERVED_PING_NAMES:
+ return ""
+
+ return f"https://mozilla.github.io/glean/book/user/pings/{ping_name}.html"
+
+
+def if_empty(
+ ping_name: str, custom_pings_cache: Optional[Dict[str, pings.Ping]] = None
+) -> bool:
+ if custom_pings_cache is not None and ping_name in custom_pings_cache:
+ return custom_pings_cache[ping_name].send_if_empty
+ else:
+ return False
+
+
+def ping_reasons(
+ ping_name: str, custom_pings_cache: Dict[str, pings.Ping]
+) -> Dict[str, str]:
+ """
+ Returns the reasons dictionary for the ping.
+ """
+ if ping_name == "all-pings":
+ return {}
+ elif ping_name in custom_pings_cache:
+ return custom_pings_cache[ping_name].reasons
+
+ return {}
+
+
+def ping_data_reviews(
+ ping_name: str, custom_pings_cache: Optional[Dict[str, pings.Ping]] = None
+) -> Optional[List[str]]:
+ if custom_pings_cache is not None and ping_name in custom_pings_cache:
+ return custom_pings_cache[ping_name].data_reviews
+ else:
+ return None
+
+
+def ping_review_title(data_url: str, index: int) -> str:
+ """
+ Return a title for a data review in human readable form.
+
+ :param data_url: A url for data review.
+ :param index: Position of the data review on list (e.g: 1, 2, 3...).
+ """
+ url_object = urlsplit(data_url)
+
+ # Bugzilla urls like `https://bugzilla.mozilla.org/show_bug.cgi?id=1581647`
+ query = url_object.query
+ params = parse_qs(query)
+
+ # GitHub urls like `https://github.com/mozilla-mobile/fenix/pull/1707`
+ path = url_object.path
+ short_url = path[1:].replace("/pull/", "#")
+
+ if params and params["id"]:
+ return f"Bug {params['id'][0]}"
+ elif url_object.netloc == "github.com":
+ return short_url
+
+ return f"Review {index}"
+
+
+def ping_bugs(
+ ping_name: str, custom_pings_cache: Optional[Dict[str, pings.Ping]] = None
+) -> Optional[List[str]]:
+ if custom_pings_cache is not None and ping_name in custom_pings_cache:
+ return custom_pings_cache[ping_name].bugs
+ else:
+ return None
+
+
+def ping_include_client_id(
+ ping_name: str, custom_pings_cache: Optional[Dict[str, pings.Ping]] = None
+) -> bool:
+ if custom_pings_cache is not None and ping_name in custom_pings_cache:
+ return custom_pings_cache[ping_name].include_client_id
+ else:
+ return False
+
+
+def data_sensitivity_numbers(
+ data_sensitivity: Optional[List[metrics.DataSensitivity]],
+) -> str:
+ if data_sensitivity is None:
+ return "unknown"
+ else:
+ return ", ".join(str(x.value) for x in data_sensitivity)
+
+
+def output_markdown(
+ objs: metrics.ObjectTree, output_dir: Path, options: Optional[Dict[str, Any]] = None
+) -> None:
+ """
+ Given a tree of objects, output Markdown docs to `output_dir`.
+
+ This produces a single `metrics.md`. The file contains a table of
+ contents and a section for each ping metrics are collected for.
+
+ :param objects: A tree of objects (metrics and pings) as returned from
+ `parser.parse_objects`.
+ :param output_dir: Path to an output directory to write to.
+ :param options: options dictionary, with the following optional key:
+ - `project_title`: The projects title.
+ """
+ if options is None:
+ options = {}
+
+ # Build a dictionary that associates pings with their metrics.
+ #
+ # {
+ # "baseline": [
+ # { ... metric data ... },
+ # ...
+ # ],
+ # "metrics": [
+ # { ... metric data ... },
+ # ...
+ # ],
+ # ...
+ # }
+ #
+ # This also builds a dictionary of custom pings, if available.
+ custom_pings_cache: Dict[str, pings.Ping] = defaultdict()
+ metrics_by_pings: Dict[str, List[metrics.Metric]] = defaultdict(list)
+ for _category_key, category_val in objs.items():
+ for obj in category_val.values():
+ # Filter out custom pings. We will need them for extracting
+ # the description
+ if isinstance(obj, pings.Ping):
+ custom_pings_cache[obj.name] = obj
+ # Pings that have `send_if_empty` set to true,
+ # might not have any metrics. They need to at least have an
+ # empty array of metrics to show up on the template.
+ if obj.send_if_empty and not metrics_by_pings[obj.name]:
+ metrics_by_pings[obj.name] = []
+
+ # If this is an internal Glean metric, and we don't
+ # want docs for it.
+ if isinstance(obj, metrics.Metric) and not obj.is_internal_metric():
+ # If we get here, obj is definitely a metric we want
+ # docs for.
+ for ping_name in obj.send_in_pings:
+ metrics_by_pings[ping_name].append(obj)
+
+ # Sort the metrics by their identifier, to make them show up nicely
+ # in the docs and to make generated docs reproducible.
+ for ping_name in metrics_by_pings:
+ metrics_by_pings[ping_name] = sorted(
+ metrics_by_pings[ping_name], key=lambda x: x.identifier()
+ )
+
+ project_title = options.get("project_title", "this project")
+ introduction_extra = options.get("introduction_extra")
+
+ template = util.get_jinja2_template(
+ "markdown.jinja2",
+ filters=(
+ ("extra_info", extra_info),
+ ("metrics_docs", metrics_docs),
+ ("ping_desc", lambda x: ping_desc(x, custom_pings_cache)),
+ ("ping_send_if_empty", lambda x: if_empty(x, custom_pings_cache)),
+ ("ping_docs", ping_docs),
+ ("ping_reasons", lambda x: ping_reasons(x, custom_pings_cache)),
+ ("ping_data_reviews", lambda x: ping_data_reviews(x, custom_pings_cache)),
+ ("ping_review_title", ping_review_title),
+ ("ping_bugs", lambda x: ping_bugs(x, custom_pings_cache)),
+ (
+ "ping_include_client_id",
+ lambda x: ping_include_client_id(x, custom_pings_cache),
+ ),
+ ("data_sensitivity_numbers", data_sensitivity_numbers),
+ ),
+ )
+
+ filename = "metrics.md"
+ filepath = output_dir / filename
+
+ with filepath.open("w", encoding="utf-8") as fd:
+ fd.write(
+ template.render(
+ parser_version=__version__,
+ metrics_by_pings=metrics_by_pings,
+ project_title=project_title,
+ introduction_extra=introduction_extra,
+ )
+ )
+ # Jinja2 squashes the final newline, so we explicitly add it
+ fd.write("\n")
diff --git a/third_party/python/glean_parser/glean_parser/metrics.py b/third_party/python/glean_parser/glean_parser/metrics.py
new file mode 100644
index 0000000000..6398938997
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/metrics.py
@@ -0,0 +1,435 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Classes for each of the high-level metric types.
+"""
+
+import enum
+from typing import Any, Dict, List, Optional, Type, Union # noqa
+
+
+from . import pings
+from . import tags
+from . import util
+
+
+# Important: if the values are ever changing here, make sure
+# to also fix mozilla/glean. Otherwise language bindings may
+# break there.
+class Lifetime(enum.Enum):
+ ping = 0
+ application = 1
+ user = 2
+
+
+class DataSensitivity(enum.Enum):
+ technical = 1
+ interaction = 2
+ web_activity = 3
+ highly_sensitive = 4
+
+
+class Metric:
+ typename: str = "ERROR"
+ glean_internal_metric_cat: str = "glean.internal.metrics"
+ metric_types: Dict[str, Any] = {}
+ default_store_names: List[str] = ["metrics"]
+
+ def __init__(
+ self,
+ type: str,
+ category: str,
+ name: str,
+ bugs: List[str],
+ description: str,
+ notification_emails: List[str],
+ expires: Any,
+ metadata: Optional[Dict] = None,
+ data_reviews: Optional[List[str]] = None,
+ version: int = 0,
+ disabled: bool = False,
+ lifetime: str = "ping",
+ send_in_pings: Optional[List[str]] = None,
+ unit: Optional[str] = None,
+ gecko_datapoint: str = "",
+ no_lint: Optional[List[str]] = None,
+ data_sensitivity: Optional[List[str]] = None,
+ defined_in: Optional[Dict] = None,
+ telemetry_mirror: Optional[str] = None,
+ _config: Optional[Dict[str, Any]] = None,
+ _validated: bool = False,
+ ):
+ # Avoid cyclical import
+ from . import parser
+
+ self.type = type
+ self.category = category
+ self.name = name
+ self.bugs = bugs
+ self.description = description
+ self.notification_emails = notification_emails
+ self.expires = expires
+ if metadata is None:
+ metadata = {}
+ self.metadata = metadata
+ if data_reviews is None:
+ data_reviews = []
+ self.data_reviews = data_reviews
+ self.version = version
+ self.disabled = disabled
+ self.lifetime = getattr(Lifetime, lifetime)
+ if send_in_pings is None:
+ send_in_pings = ["default"]
+ self.send_in_pings = send_in_pings
+ if unit is not None:
+ self.unit = unit
+ self.gecko_datapoint = gecko_datapoint
+ if no_lint is None:
+ no_lint = []
+ self.no_lint = no_lint
+ if data_sensitivity is not None:
+ self.data_sensitivity = [
+ getattr(DataSensitivity, x) for x in data_sensitivity
+ ]
+ self.defined_in = defined_in
+ if telemetry_mirror is not None:
+ self.telemetry_mirror = telemetry_mirror
+
+ # _validated indicates whether this metric has already been jsonschema
+ # validated (but not any of the Python-level validation).
+ if not _validated:
+ data = {
+ "$schema": parser.METRICS_ID,
+ self.category: {self.name: self._serialize_input()},
+ } # type: Dict[str, util.JSONType]
+ for error in parser.validate(data):
+ raise ValueError(error)
+
+ # Store the config, but only after validation.
+ if _config is None:
+ _config = {}
+ self._config = _config
+
+ # Metrics in the special category "glean.internal.metrics" need to have
+ # an empty category string when identifying the metrics in the ping.
+ if self.category == Metric.glean_internal_metric_cat:
+ self.category = ""
+
+ def __init_subclass__(cls, **kwargs):
+ # Create a mapping of all of the subclasses of this class
+ if cls not in Metric.metric_types and hasattr(cls, "typename"):
+ Metric.metric_types[cls.typename] = cls
+ super().__init_subclass__(**kwargs)
+
+ @classmethod
+ def make_metric(
+ cls,
+ category: str,
+ name: str,
+ metric_info: Dict[str, util.JSONType],
+ config: Optional[Dict[str, Any]] = None,
+ validated: bool = False,
+ ):
+ """
+ Given a metric_info dictionary from metrics.yaml, return a metric
+ instance.
+
+ :param: category The category the metric lives in
+ :param: name The name of the metric
+ :param: metric_info A dictionary of the remaining metric parameters
+ :param: config A dictionary containing commandline configuration
+ parameters
+ :param: validated True if the metric has already gone through
+ jsonschema validation
+ :return: A new Metric instance.
+ """
+ if config is None:
+ config = {}
+
+ metric_type = metric_info["type"]
+ if not isinstance(metric_type, str):
+ raise TypeError(f"Unknown metric type {metric_type}")
+ return cls.metric_types[metric_type](
+ category=category,
+ name=name,
+ defined_in=getattr(metric_info, "defined_in", None),
+ _validated=validated,
+ _config=config,
+ **metric_info,
+ )
+
+ def serialize(self) -> Dict[str, util.JSONType]:
+ """
+ Serialize the metric back to JSON object model.
+ """
+ d = self.__dict__.copy()
+ # Convert enum fields back to strings
+ for key, val in d.items():
+ if isinstance(val, enum.Enum):
+ d[key] = d[key].name
+ if isinstance(val, set):
+ d[key] = sorted(list(val))
+ if isinstance(val, list) and len(val) and isinstance(val[0], enum.Enum):
+ d[key] = [x.name for x in val]
+ del d["name"]
+ del d["category"]
+ d.pop("_config", None)
+ d.pop("_generate_enums", None)
+ return d
+
+ def _serialize_input(self) -> Dict[str, util.JSONType]:
+ d = self.serialize()
+ modified_dict = util.remove_output_params(d, "defined_in")
+ return modified_dict
+
+ def identifier(self) -> str:
+ """
+ Create an identifier unique for this metric.
+ Generally, category.name; however, Glean internal
+ metrics only use name.
+ """
+ if not self.category:
+ return self.name
+ return ".".join((self.category, self.name))
+
+ def is_disabled(self) -> bool:
+ return self.disabled or self.is_expired()
+
+ def is_expired(self) -> bool:
+ def default_handler(expires) -> bool:
+ return util.is_expired(expires, self._config.get("expire_by_version"))
+
+ return self._config.get("custom_is_expired", default_handler)(self.expires)
+
+ def validate_expires(self):
+ def default_handler(expires):
+ return util.validate_expires(expires, self._config.get("expire_by_version"))
+
+ return self._config.get("custom_validate_expires", default_handler)(
+ self.expires
+ )
+
+ def is_internal_metric(self) -> bool:
+ return self.category in (Metric.glean_internal_metric_cat, "")
+
+
+class Boolean(Metric):
+ typename = "boolean"
+
+
+class String(Metric):
+ typename = "string"
+
+
+class StringList(Metric):
+ typename = "string_list"
+
+
+class Counter(Metric):
+ typename = "counter"
+
+
+class Quantity(Metric):
+ typename = "quantity"
+
+
+class TimeUnit(enum.Enum):
+ nanosecond = 0
+ microsecond = 1
+ millisecond = 2
+ second = 3
+ minute = 4
+ hour = 5
+ day = 6
+
+
+class TimeBase(Metric):
+ def __init__(self, *args, **kwargs):
+ self.time_unit = getattr(TimeUnit, kwargs.pop("time_unit", "millisecond"))
+ super().__init__(*args, **kwargs)
+
+
+class Timespan(TimeBase):
+ typename = "timespan"
+
+
+class TimingDistribution(TimeBase):
+ typename = "timing_distribution"
+
+ def __init__(self, *args, **kwargs):
+ self.time_unit = getattr(TimeUnit, kwargs.pop("time_unit", "nanosecond"))
+ Metric.__init__(self, *args, **kwargs)
+
+
+class MemoryUnit(enum.Enum):
+ byte = 0
+ kilobyte = 1
+ megabyte = 2
+ gigabyte = 3
+
+
+class MemoryDistribution(Metric):
+ typename = "memory_distribution"
+
+ def __init__(self, *args, **kwargs):
+ self.memory_unit = getattr(MemoryUnit, kwargs.pop("memory_unit", "byte"))
+ super().__init__(*args, **kwargs)
+
+
+class HistogramType(enum.Enum):
+ linear = 0
+ exponential = 1
+
+
+class CustomDistribution(Metric):
+ typename = "custom_distribution"
+
+ def __init__(self, *args, **kwargs):
+ self.range_min = kwargs.pop("range_min", 1)
+ self.range_max = kwargs.pop("range_max")
+ self.bucket_count = kwargs.pop("bucket_count")
+ self.histogram_type = getattr(
+ HistogramType, kwargs.pop("histogram_type", "exponential")
+ )
+ super().__init__(*args, **kwargs)
+
+
+class Datetime(TimeBase):
+ typename = "datetime"
+
+
+class Event(Metric):
+ typename = "event"
+
+ default_store_names = ["events"]
+
+ def __init__(self, *args, **kwargs):
+ self.extra_keys = kwargs.pop("extra_keys", {})
+ self.validate_extra_keys(self.extra_keys, kwargs.get("_config", {}))
+ super().__init__(*args, **kwargs)
+ self._generate_enums = [("allowed_extra_keys_with_types", "Extra")]
+
+ @property
+ def allowed_extra_keys(self):
+ # Sort keys so that output is deterministic
+ return sorted(list(self.extra_keys.keys()))
+
+ @property
+ def allowed_extra_keys_with_types(self):
+ # Sort keys so that output is deterministic
+ return sorted(
+ [(k, v.get("type", "string")) for (k, v) in self.extra_keys.items()],
+ key=lambda x: x[0],
+ )
+
+ @staticmethod
+ def validate_extra_keys(extra_keys: Dict[str, str], config: Dict[str, Any]) -> None:
+ if not config.get("allow_reserved") and any(
+ k.startswith("glean.") for k in extra_keys.keys()
+ ):
+ raise ValueError(
+ "Extra keys beginning with 'glean.' are reserved for "
+ "Glean internal use."
+ )
+
+
+class Uuid(Metric):
+ typename = "uuid"
+
+
+class Url(Metric):
+ typename = "url"
+
+
+class Jwe(Metric):
+ typename = "jwe"
+
+ def __init__(self, *args, **kwargs):
+ raise ValueError(
+ "JWE support was removed. "
+ "If you require this send an email to glean-team@mozilla.com."
+ )
+
+
+class CowString(str):
+ """
+ Wrapper class for strings that should be represented
+ as a `Cow<'static, str>` in Rust,
+ or `String` in other target languages.
+
+ This wraps `str`, so unless `CowString` is specifically
+ handled it acts (and serializes)
+ as a string.
+ """
+
+ def __init__(self, val: str):
+ self.inner: str = val
+
+ def __eq__(self, other):
+ return self.inner == other.inner
+
+ def __hash__(self):
+ return self.inner.__hash__()
+
+ def __lt__(self, other):
+ return self.inner.__lt__(other.inner)
+
+
+class Labeled(Metric):
+ labeled = True
+
+ def __init__(self, *args, **kwargs):
+ labels = kwargs.pop("labels", None)
+ if labels is not None:
+ self.ordered_labels = labels
+ self.labels = set([CowString(label) for label in labels])
+ else:
+ self.ordered_labels = None
+ self.labels = None
+ super().__init__(*args, **kwargs)
+
+ def serialize(self) -> Dict[str, util.JSONType]:
+ """
+ Serialize the metric back to JSON object model.
+ """
+ d = super().serialize()
+ d["labels"] = self.ordered_labels
+ del d["ordered_labels"]
+ return d
+
+
+class LabeledBoolean(Labeled, Boolean):
+ typename = "labeled_boolean"
+
+
+class LabeledString(Labeled, String):
+ typename = "labeled_string"
+
+
+class LabeledCounter(Labeled, Counter):
+ typename = "labeled_counter"
+
+
+class Rate(Metric):
+ typename = "rate"
+
+ def __init__(self, *args, **kwargs):
+ self.denominator_metric = kwargs.pop("denominator_metric", None)
+ super().__init__(*args, **kwargs)
+
+
+class Denominator(Counter):
+ typename = "denominator"
+ # A denominator is a counter with an additional list of numerators.
+ numerators: List[Rate] = []
+
+
+class Text(Metric):
+ typename = "text"
+
+
+ObjectTree = Dict[str, Dict[str, Union[Metric, pings.Ping, tags.Tag]]]
diff --git a/third_party/python/glean_parser/glean_parser/parser.py b/third_party/python/glean_parser/glean_parser/parser.py
new file mode 100644
index 0000000000..5ca584ac1e
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/parser.py
@@ -0,0 +1,446 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Code for parsing metrics.yaml files.
+"""
+
+import functools
+from pathlib import Path
+import textwrap
+from typing import Any, Dict, Generator, Iterable, Optional, Tuple, Union
+
+import jsonschema # type: ignore
+from jsonschema.exceptions import ValidationError # type: ignore
+
+from .metrics import Metric, ObjectTree
+from .pings import Ping, RESERVED_PING_NAMES
+from .tags import Tag
+from . import util
+from .util import DictWrapper
+
+
+ROOT_DIR = Path(__file__).parent
+SCHEMAS_DIR = ROOT_DIR / "schemas"
+
+METRICS_ID = "moz://mozilla.org/schemas/glean/metrics/2-0-0"
+PINGS_ID = "moz://mozilla.org/schemas/glean/pings/2-0-0"
+TAGS_ID = "moz://mozilla.org/schemas/glean/tags/1-0-0"
+
+
+def _update_validator(validator):
+ """
+ Adds some custom validators to the jsonschema validator that produce
+ nicer error messages.
+ """
+
+ def required(validator, required, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+ missing_properties = set(
+ property for property in required if property not in instance
+ )
+ if len(missing_properties):
+ missing_properties = sorted(list(missing_properties))
+ yield ValidationError(
+ f"Missing required properties: {', '.join(missing_properties)}"
+ )
+
+ validator.VALIDATORS["required"] = required
+
+
+def _load_file(
+ filepath: Path, parser_config: Dict[str, Any]
+) -> Generator[str, None, Tuple[Dict[str, util.JSONType], Optional[str]]]:
+ """
+ Load a metrics.yaml or pings.yaml format file.
+
+ If the `filepath` does not exist, raises `FileNotFoundError`, unless
+ `parser_config["allow_missing_files"]` is `True`.
+ """
+ try:
+ content = util.load_yaml_or_json(filepath)
+ except FileNotFoundError:
+ if not parser_config.get("allow_missing_files", False):
+ raise
+ else:
+ return {}, None
+ except Exception as e:
+ yield util.format_error(filepath, "", textwrap.fill(str(e)))
+ return {}, None
+
+ if content is None:
+ yield util.format_error(filepath, "", f"'{filepath}' file can not be empty.")
+ return {}, None
+
+ if not isinstance(content, dict):
+ return {}, None
+
+ if content == {}:
+ return {}, None
+
+ schema_key = content.get("$schema")
+ if not isinstance(schema_key, str):
+ raise TypeError(f"Invalid schema key {schema_key}")
+
+ filetype: Optional[str] = None
+ try:
+ filetype = schema_key.split("/")[-2]
+ except IndexError:
+ filetype = None
+
+ if filetype not in ("metrics", "pings", "tags"):
+ filetype = None
+
+ for error in validate(content, filepath):
+ content = {}
+ yield error
+
+ return content, filetype
+
+
+@functools.lru_cache(maxsize=1)
+def _load_schemas() -> Dict[str, Tuple[Any, Any]]:
+ """
+ Load all of the known schemas from disk, and put them in a map based on the
+ schema's $id.
+ """
+ schemas = {}
+ for schema_path in SCHEMAS_DIR.glob("*.yaml"):
+ schema = util.load_yaml_or_json(schema_path)
+ resolver = util.get_null_resolver(schema)
+ validator_class = jsonschema.validators.validator_for(schema)
+ _update_validator(validator_class)
+ validator_class.check_schema(schema)
+ validator = validator_class(schema, resolver=resolver)
+ schemas[schema["$id"]] = (schema, validator)
+ return schemas
+
+
+def _get_schema(
+ schema_id: str, filepath: Union[str, Path] = "<input>"
+) -> Tuple[Any, Any]:
+ """
+ Get the schema for the given schema $id.
+ """
+ schemas = _load_schemas()
+ if schema_id not in schemas:
+ raise ValueError(
+ util.format_error(
+ filepath,
+ "",
+ f"$schema key must be one of {', '.join(schemas.keys())}",
+ )
+ )
+ return schemas[schema_id]
+
+
+def _get_schema_for_content(
+ content: Dict[str, util.JSONType], filepath: Union[str, Path]
+) -> Tuple[Any, Any]:
+ """
+ Get the appropriate schema for the given JSON content.
+ """
+ schema_url = content.get("$schema")
+ if not isinstance(schema_url, str):
+ raise TypeError("Invalid $schema type {schema_url}")
+ return _get_schema(schema_url, filepath)
+
+
+def validate(
+ content: Dict[str, util.JSONType], filepath: Union[str, Path] = "<input>"
+) -> Generator[str, None, None]:
+ """
+ Validate the given content against the appropriate schema.
+ """
+ try:
+ schema, validator = _get_schema_for_content(content, filepath)
+ except ValueError as e:
+ yield str(e)
+ else:
+ yield from (
+ util.format_error(filepath, "", util.pprint_validation_error(e))
+ for e in validator.iter_errors(content)
+ )
+
+
+def _instantiate_metrics(
+ all_objects: ObjectTree,
+ sources: Dict[Any, Path],
+ content: Dict[str, util.JSONType],
+ filepath: Path,
+ config: Dict[str, Any],
+) -> Generator[str, None, None]:
+ """
+ Load a list of metrics.yaml files, convert the JSON information into Metric
+ objects, and merge them into a single tree.
+ """
+ global_no_lint = content.get("no_lint", [])
+ global_tags = content.get("$tags", [])
+ assert isinstance(global_tags, list)
+
+ for category_key, category_val in sorted(content.items()):
+ if category_key.startswith("$"):
+ continue
+ if category_key == "no_lint":
+ continue
+ if not config.get("allow_reserved") and category_key.split(".")[0] == "glean":
+ yield util.format_error(
+ filepath,
+ f"For category '{category_key}'",
+ "Categories beginning with 'glean' are reserved for "
+ "Glean internal use.",
+ )
+ continue
+ all_objects.setdefault(category_key, DictWrapper())
+
+ if not isinstance(category_val, dict):
+ raise TypeError(f"Invalid content for {category_key}")
+
+ for metric_key, metric_val in sorted(category_val.items()):
+ try:
+ metric_obj = Metric.make_metric(
+ category_key, metric_key, metric_val, validated=True, config=config
+ )
+ except Exception as e:
+ yield util.format_error(
+ filepath,
+ f"On instance {category_key}.{metric_key}",
+ str(e),
+ metric_val.defined_in["line"],
+ )
+ metric_obj = None
+ else:
+ if (
+ not config.get("allow_reserved")
+ and "all-pings" in metric_obj.send_in_pings
+ ):
+ yield util.format_error(
+ filepath,
+ f"On instance {category_key}.{metric_key}",
+ 'Only internal metrics may specify "all-pings" '
+ 'in "send_in_pings"',
+ metric_val.defined_in["line"],
+ )
+ metric_obj = None
+
+ if metric_obj is not None:
+ metric_obj.no_lint = sorted(set(metric_obj.no_lint + global_no_lint))
+ if len(global_tags):
+ metric_obj.metadata["tags"] = sorted(
+ set(metric_obj.metadata.get("tags", []) + global_tags)
+ )
+
+ if isinstance(filepath, Path):
+ metric_obj.defined_in["filepath"] = str(filepath)
+
+ already_seen = sources.get((category_key, metric_key))
+ if already_seen is not None:
+ # We've seen this metric name already
+ yield util.format_error(
+ filepath,
+ "",
+ (
+ f"Duplicate metric name '{category_key}.{metric_key}' "
+ f"already defined in '{already_seen}'"
+ ),
+ metric_obj.defined_in["line"],
+ )
+ else:
+ all_objects[category_key][metric_key] = metric_obj
+ sources[(category_key, metric_key)] = filepath
+
+
+def _instantiate_pings(
+ all_objects: ObjectTree,
+ sources: Dict[Any, Path],
+ content: Dict[str, util.JSONType],
+ filepath: Path,
+ config: Dict[str, Any],
+) -> Generator[str, None, None]:
+ """
+ Load a list of pings.yaml files, convert the JSON information into Ping
+ objects.
+ """
+ global_no_lint = content.get("no_lint", [])
+ assert isinstance(global_no_lint, list)
+
+ for ping_key, ping_val in sorted(content.items()):
+ if ping_key.startswith("$"):
+ continue
+ if ping_key == "no_lint":
+ continue
+ if not config.get("allow_reserved"):
+ if ping_key in RESERVED_PING_NAMES:
+ yield util.format_error(
+ filepath,
+ f"For ping '{ping_key}'",
+ f"Ping uses a reserved name ({RESERVED_PING_NAMES})",
+ )
+ continue
+ if not isinstance(ping_val, dict):
+ raise TypeError(f"Invalid content for ping {ping_key}")
+ ping_val["name"] = ping_key
+ try:
+ ping_obj = Ping(
+ defined_in=getattr(ping_val, "defined_in", None),
+ _validated=True,
+ **ping_val,
+ )
+ except Exception as e:
+ yield util.format_error(filepath, f"On instance '{ping_key}'", str(e))
+ continue
+
+ if ping_obj is not None:
+ ping_obj.no_lint = sorted(set(ping_obj.no_lint + global_no_lint))
+
+ if isinstance(filepath, Path) and ping_obj.defined_in is not None:
+ ping_obj.defined_in["filepath"] = str(filepath)
+
+ already_seen = sources.get(ping_key)
+ if already_seen is not None:
+ # We've seen this ping name already
+ yield util.format_error(
+ filepath,
+ "",
+ f"Duplicate ping name '{ping_key}' "
+ f"already defined in '{already_seen}'",
+ )
+ else:
+ all_objects.setdefault("pings", {})[ping_key] = ping_obj
+ sources[ping_key] = filepath
+
+
+def _instantiate_tags(
+ all_objects: ObjectTree,
+ sources: Dict[Any, Path],
+ content: Dict[str, util.JSONType],
+ filepath: Path,
+ config: Dict[str, Any],
+) -> Generator[str, None, None]:
+ """
+ Load a list of tags.yaml files, convert the JSON information into Tag
+ objects.
+ """
+ global_no_lint = content.get("no_lint", [])
+ assert isinstance(global_no_lint, list)
+
+ for tag_key, tag_val in sorted(content.items()):
+ if tag_key.startswith("$"):
+ continue
+ if tag_key == "no_lint":
+ continue
+ if not isinstance(tag_val, dict):
+ raise TypeError(f"Invalid content for tag {tag_key}")
+ tag_val["name"] = tag_key
+ try:
+ tag_obj = Tag(
+ defined_in=getattr(tag_val, "defined_in", None),
+ _validated=True,
+ **tag_val,
+ )
+ except Exception as e:
+ yield util.format_error(filepath, f"On instance '{tag_key}'", str(e))
+ continue
+
+ if tag_obj is not None:
+ tag_obj.no_lint = sorted(set(tag_obj.no_lint + global_no_lint))
+
+ if isinstance(filepath, Path) and tag_obj.defined_in is not None:
+ tag_obj.defined_in["filepath"] = str(filepath)
+
+ already_seen = sources.get(tag_key)
+ if already_seen is not None:
+ # We've seen this tag name already
+ yield util.format_error(
+ filepath,
+ "",
+ f"Duplicate tag name '{tag_key}' "
+ f"already defined in '{already_seen}'",
+ )
+ else:
+ all_objects.setdefault("tags", {})[tag_key] = tag_obj
+ sources[tag_key] = filepath
+
+
+def _preprocess_objects(objs: ObjectTree, config: Dict[str, Any]) -> ObjectTree:
+ """
+ Preprocess the object tree to better set defaults.
+ """
+ for category in objs.values():
+ for obj in category.values():
+ if not isinstance(obj, Metric):
+ continue
+
+ if not config.get("do_not_disable_expired", False) and hasattr(
+ obj, "is_disabled"
+ ):
+ obj.disabled = obj.is_disabled()
+
+ if hasattr(obj, "send_in_pings"):
+ if "default" in obj.send_in_pings:
+ obj.send_in_pings = obj.default_store_names + [
+ x for x in obj.send_in_pings if x != "default"
+ ]
+ obj.send_in_pings = sorted(list(set(obj.send_in_pings)))
+ return objs
+
+
+@util.keep_value
+def parse_objects(
+ filepaths: Iterable[Path], config: Optional[Dict[str, Any]] = None
+) -> Generator[str, None, ObjectTree]:
+ """
+ Parse one or more metrics.yaml and/or pings.yaml files, returning a tree of
+ `metrics.Metric`, `pings.Ping`, and `tags.Tag` instances.
+
+ The result is a generator over any errors. If there are no errors, the
+ actual metrics can be obtained from `result.value`. For example::
+
+ result = metrics.parse_metrics(filepaths)
+ for err in result:
+ print(err)
+ all_metrics = result.value
+
+ The result value is a dictionary of category names to categories, where
+ each category is a dictionary from metric name to `metrics.Metric`
+ instances. There are also the special categories `pings` and `tags`
+ containing all of the `pings.Ping` and `tags.Tag` instances, respectively.
+
+ :param filepaths: list of Path objects to metrics.yaml, pings.yaml, and/or
+ tags.yaml files
+ :param config: A dictionary of options that change parsing behavior.
+ Supported keys are:
+
+ - `allow_reserved`: Allow values reserved for internal Glean use.
+ - `do_not_disable_expired`: Don't mark expired metrics as disabled.
+ This is useful when you want to retain the original "disabled"
+ value from the `metrics.yaml`, rather than having it overridden when
+ the metric expires.
+ - `allow_missing_files`: Do not raise a `FileNotFoundError` if any of
+ the input `filepaths` do not exist.
+ """
+ if config is None:
+ config = {}
+
+ all_objects: ObjectTree = DictWrapper()
+ sources: Dict[Any, Path] = {}
+ filepaths = util.ensure_list(filepaths)
+ for filepath in filepaths:
+ content, filetype = yield from _load_file(filepath, config)
+ if filetype == "metrics":
+ yield from _instantiate_metrics(
+ all_objects, sources, content, filepath, config
+ )
+ elif filetype == "pings":
+ yield from _instantiate_pings(
+ all_objects, sources, content, filepath, config
+ )
+ elif filetype == "tags":
+ yield from _instantiate_tags(
+ all_objects, sources, content, filepath, config
+ )
+ return _preprocess_objects(all_objects, config)
diff --git a/third_party/python/glean_parser/glean_parser/pings.py b/third_party/python/glean_parser/glean_parser/pings.py
new file mode 100644
index 0000000000..cb5f2487b9
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/pings.py
@@ -0,0 +1,97 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Classes for managing the description of pings.
+"""
+
+from typing import Dict, List, Optional
+
+
+from . import util
+
+
+RESERVED_PING_NAMES = ["baseline", "metrics", "events", "deletion-request", "default"]
+
+
+class Ping:
+ def __init__(
+ self,
+ name: str,
+ description: str,
+ bugs: List[str],
+ notification_emails: List[str],
+ metadata: Optional[Dict] = None,
+ data_reviews: Optional[List[str]] = None,
+ include_client_id: bool = False,
+ send_if_empty: bool = False,
+ reasons: Optional[Dict[str, str]] = None,
+ defined_in: Optional[Dict] = None,
+ no_lint: Optional[List[str]] = None,
+ _validated: bool = False,
+ ):
+ # Avoid cyclical import
+ from . import parser
+
+ self.name = name
+ self.description = description
+
+ self.bugs = bugs
+ self.notification_emails = notification_emails
+ if metadata is None:
+ metadata = {}
+ self.metadata = metadata
+ if data_reviews is None:
+ data_reviews = []
+ self.data_reviews = data_reviews
+ self.include_client_id = include_client_id
+ self.send_if_empty = send_if_empty
+ if reasons is None:
+ reasons = {}
+ self.reasons = reasons
+ self.defined_in = defined_in
+ if no_lint is None:
+ no_lint = []
+ self.no_lint = no_lint
+
+ # _validated indicates whether this ping has already been jsonschema
+ # validated (but not any of the Python-level validation).
+ if not _validated:
+ data: Dict[str, util.JSONType] = {
+ "$schema": parser.PINGS_ID,
+ self.name: self._serialize_input(),
+ }
+ for error in parser.validate(data):
+ raise ValueError(error)
+
+ _generate_enums = [("reason_codes", "ReasonCodes")]
+
+ @property
+ def type(self) -> str:
+ return "ping"
+
+ @property
+ def reason_codes(self) -> List[str]:
+ return sorted(list(self.reasons.keys()))
+
+ def serialize(self) -> Dict[str, util.JSONType]:
+ """
+ Serialize the metric back to JSON object model.
+ """
+ d = self.__dict__.copy()
+ del d["name"]
+ return d
+
+ def _serialize_input(self) -> Dict[str, util.JSONType]:
+ d = self.serialize()
+ modified_dict = util.remove_output_params(d, "defined_in")
+ return modified_dict
+
+ def identifier(self) -> str:
+ """
+ Used for the "generated from ..." comment in the output.
+ """
+ return self.name
diff --git a/third_party/python/glean_parser/glean_parser/rust.py b/third_party/python/glean_parser/glean_parser/rust.py
new file mode 100644
index 0000000000..eb3355e382
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/rust.py
@@ -0,0 +1,218 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Outputter to generate Rust code for metrics.
+"""
+
+import enum
+import json
+from pathlib import Path
+from typing import Any, Dict, Optional, Union
+
+from . import __version__
+from . import metrics
+from . import pings
+from . import tags
+from . import util
+
+
+def rust_datatypes_filter(value):
+ """
+ A Jinja2 filter that renders Rust literals.
+
+ Based on Python's JSONEncoder, but overrides:
+ - dicts and sets to raise an error
+ - sets to vec![] (used in labels)
+ - enums to become Class::Value
+ - lists to vec![] (used in send_in_pings)
+ - null to None
+ - strings to "value".into()
+ - Rate objects to a CommonMetricData initializer
+ (for external Denominators' Numerators lists)
+ """
+
+ class RustEncoder(json.JSONEncoder):
+ def iterencode(self, value):
+ if isinstance(value, dict):
+ raise ValueError("RustEncoder doesn't know dicts {}".format(str(value)))
+ elif isinstance(value, enum.Enum):
+ yield (value.__class__.__name__ + "::" + util.Camelize(value.name))
+ elif isinstance(value, set):
+ yield "vec!["
+ first = True
+ for subvalue in sorted(list(value)):
+ if not first:
+ yield ", "
+ yield from self.iterencode(subvalue)
+ first = False
+ yield "]"
+ elif isinstance(value, list):
+ yield "vec!["
+ first = True
+ for subvalue in list(value):
+ if not first:
+ yield ", "
+ yield from self.iterencode(subvalue)
+ first = False
+ yield "]"
+ elif value is None:
+ yield "None"
+ # `CowStr` is a `str`, so needs to be before next case
+ elif isinstance(value, metrics.CowString):
+ yield f'::std::borrow::Cow::from("{value.inner}")'
+ elif isinstance(value, str):
+ yield f'"{value}".into()'
+ elif isinstance(value, metrics.Rate):
+ yield "CommonMetricData("
+ first = True
+ for arg_name in util.common_metric_args:
+ if hasattr(value, arg_name):
+ if not first:
+ yield ", "
+ yield f"{util.camelize(arg_name)} = "
+ yield from self.iterencode(getattr(value, arg_name))
+ first = False
+ yield ")"
+ else:
+ yield from super().iterencode(value)
+
+ return "".join(RustEncoder().iterencode(value))
+
+
+def ctor(obj):
+ """
+ Returns the scope and name of the constructor to use for a metric object.
+ Necessary because LabeledMetric<T> is constructed using LabeledMetric::new
+ not LabeledMetric<T>::new
+ """
+ if getattr(obj, "labeled", False):
+ return "LabeledMetric::new"
+ return class_name(obj.type) + "::new"
+
+
+def type_name(obj):
+ """
+ Returns the Rust type to use for a given metric or ping object.
+ """
+
+ if getattr(obj, "labeled", False):
+ return "LabeledMetric<{}>".format(class_name(obj.type))
+ generate_enums = getattr(obj, "_generate_enums", []) # Extra Keys? Reasons?
+ if len(generate_enums):
+ generic = None
+ for name, suffix in generate_enums:
+ if len(getattr(obj, name)):
+ generic = util.Camelize(obj.name) + suffix
+ else:
+ if isinstance(obj, metrics.Event):
+ generic = "NoExtra"
+ else:
+ generic = "No" + suffix
+
+ return "{}<{}>".format(class_name(obj.type), generic)
+
+ return class_name(obj.type)
+
+
+def extra_type_name(typ: str) -> str:
+ """
+ Returns the corresponding Rust type for event's extra key types.
+ """
+
+ if typ == "boolean":
+ return "bool"
+ elif typ == "string":
+ return "String"
+ elif typ == "quantity":
+ return "u32"
+ else:
+ return "UNSUPPORTED"
+
+
+def class_name(obj_type):
+ """
+ Returns the Rust class name for a given metric or ping type.
+ """
+ if obj_type == "ping":
+ return "Ping"
+ if obj_type.startswith("labeled_"):
+ obj_type = obj_type[8:]
+ return util.Camelize(obj_type) + "Metric"
+
+
+def extra_keys(allowed_extra_keys):
+ """
+ Returns the &'static [&'static str] ALLOWED_EXTRA_KEYS for impl ExtraKeys
+ """
+ return "&[" + ", ".join([f'"{key}"' for key in allowed_extra_keys]) + "]"
+
+
+class Category:
+ """
+ Data struct holding information about a metric to be used in the template.
+ """
+
+ def __init__(
+ self,
+ name: str,
+ objs: Dict[str, Union[metrics.Metric, pings.Ping, tags.Tag]],
+ contains_pings: bool,
+ ):
+ self.name = name
+ self.objs = objs
+ self.contains_pings = contains_pings
+
+
+def output_rust(
+ objs: metrics.ObjectTree, output_dir: Path, options: Optional[Dict[str, Any]] = None
+) -> None:
+ """
+ Given a tree of objects, output Rust code to `output_dir`.
+
+ :param objs: A tree of objects (metrics and pings) as returned from
+ `parser.parse_objects`.
+ :param output_dir: Path to an output directory to write to.
+ :param options: options dictionary, not currently used for Rust
+ """
+
+ if options is None:
+ options = {}
+
+ template = util.get_jinja2_template(
+ "rust.jinja2",
+ filters=(
+ ("rust", rust_datatypes_filter),
+ ("snake_case", util.snake_case),
+ ("camelize", util.camelize),
+ ("type_name", type_name),
+ ("extra_type_name", extra_type_name),
+ ("ctor", ctor),
+ ("extra_keys", extra_keys),
+ ),
+ )
+
+ filename = "glean_metrics.rs"
+ filepath = output_dir / filename
+ categories = []
+
+ for category_key, category_val in objs.items():
+ contains_pings = any(
+ isinstance(obj, pings.Ping) for obj in category_val.values()
+ )
+
+ cat = Category(category_key, category_val, contains_pings)
+ categories.append(cat)
+
+ with filepath.open("w", encoding="utf-8") as fd:
+ fd.write(
+ template.render(
+ parser_version=__version__,
+ categories=categories,
+ extra_metric_args=util.extra_metric_args,
+ common_metric_args=util.common_metric_args,
+ )
+ )
diff --git a/third_party/python/glean_parser/glean_parser/schemas/metrics.1-0-0.schema.yaml b/third_party/python/glean_parser/glean_parser/schemas/metrics.1-0-0.schema.yaml
new file mode 100644
index 0000000000..047124b771
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/schemas/metrics.1-0-0.schema.yaml
@@ -0,0 +1,605 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+---
+$schema: http://json-schema.org/draft-07/schema#
+title: Metrics
+description: |
+ Schema for the metrics.yaml files for Mozilla's Glean telemetry SDK.
+
+ The top-level of the `metrics.yaml` file has a key defining each category of
+ metrics. Categories must be snake_case, and they may also have dots `.` to
+ define subcategories.
+
+$id: moz://mozilla.org/schemas/glean/metrics/1-0-0
+
+definitions:
+ token:
+ type: string
+ pattern: "^[A-Za-z_][A-Za-z0-9_\\.]*$"
+
+ snake_case:
+ type: string
+ pattern: "^[a-z_][a-z0-9_]*$"
+
+ dotted_snake_case:
+ type: string
+ pattern: "^[a-z_][a-z0-9_]{0,29}(\\.[a-z_][a-z0-9_]{0,29})*$"
+ maxLength: 40
+
+ kebab_case:
+ type: string
+ # Bug 1601270; we allow 3 specific existing snake_cased ping names for now,
+ # but these special cases can be removed once the number of legacy clients
+ # sufficiently dwindles, likely in 2020H2.
+ pattern: "^[a-z][a-z0-9-]{0,29}$\
+ |^deletion_request$|^bookmarks_sync$|^history_sync$|^session_end$|^all_pings$|^glean_.*$"
+
+ long_id:
+ allOf:
+ - $ref: "#/definitions/snake_case"
+ - maxLength: 40
+
+ short_id:
+ allOf:
+ - $ref: "#/definitions/snake_case"
+ - maxLength: 30
+
+ labeled_metric_id:
+ type: string
+ pattern: "^[a-z_][a-z0-9_-]{0,29}(\\.[a-z_][a-z0-9_-]{0,29})*$"
+ maxLength: 71 # Note: this should be category + metric + 1
+
+ metric:
+ description: |
+ Describes a single metric.
+
+ See https://mozilla.github.io/glean_parser/metrics-yaml.html
+
+ type: object
+
+ additionalProperties: false
+
+ properties:
+ type:
+ title: Metric type
+ description: |
+ **Required.**
+
+ Specifies the type of a metric, like "counter" or "event". This
+ defines which operations are valid for the metric, how it is stored
+ and how data analysis tooling displays it.
+
+ The supported types are:
+ - `event`: Record a specific event (with optional metadata).
+ Additional properties: `extra_keys`.
+
+ - `boolean`: A metric storing values of true or false.
+
+ - `string`: A metric storing Unicode string values.
+
+ - `string_list`: a list of Unicode strings.
+
+ - `counter`: A numeric value that can only be incremented.
+
+ - `quantity`: A numeric value that is set directly.
+
+ - `timespan`: Represents a time interval. Additional properties:
+ `time_unit`.
+
+ - `timing_distribution`: Record the distribution of multiple
+ timings. Additional properties: `time_unit`.
+
+ - `datetime`: A date/time value. Represented as an ISO datetime in
+ UTC. Additional properties: `time_unit`.
+
+ - `uuid`: Record a UUID v4.
+
+ - `jwe`: Record a [JWE](https://tools.ietf.org/html/rfc7516) value.
+
+ - `memory_distribution`: A histogram for recording memory usage
+ values. Additional properties: `memory_unit`.
+
+ - `custom_distribution`: A histogram with a custom range and number
+ of buckets. This metric type is for legacy support only and is
+ only allowed for metrics coming from GeckoView. Additional
+ properties: `range_min`, `range_max`, `bucket_count`,
+ `histogram_type`.
+
+ - `rate`: Used to record the rate something happens relative to some
+ other thing. For example, the number of HTTP connections that
+ experience an error relative to the number of total HTTP
+ connections made.
+
+ - Additionally, labeled versions of many metric types are supported.
+ These support the `labels`_ parameter, allowing multiple instances
+ of the metric to be stored at a given set of labels. The labeled
+ metric types include:
+
+ `labeled_boolean`, `labeled_string`, `labeled_counter`.
+
+ type: string
+ enum:
+ - event
+ - boolean
+ - string
+ - string_list
+ - counter
+ - quantity
+ - timespan
+ - timing_distribution
+ - custom_distribution
+ - memory_distribution
+ - datetime
+ - uuid
+ - jwe
+ - labeled_boolean
+ - labeled_string
+ - labeled_counter
+
+ description:
+ title: Description
+ description: |
+ **Required.**
+
+ A textual description of what this metric does, what it means, and its
+ edge cases or any other helpful information.
+
+ Descriptions may contain [markdown
+ syntax](https://www.markdownguide.org/basic-syntax/).
+ type: string
+
+ lifetime:
+ title: Lifetime
+ description: |
+ Defines the lifetime of the metric. It must be one of the following
+ values:
+
+ - `ping` (default): The metric is reset each time it is sent in a
+ ping.
+
+ - `user`: The metric contains a property that is part of the user's
+ profile and is never reset.
+
+ - `application`: The metric contains a property that is related to the
+ application, and is reset only at application restarts.
+ enum:
+ - ping
+ - user
+ - application
+ default: ping
+
+ send_in_pings:
+ title: Send in pings
+ description: |
+ Which pings the metric should be sent on. If not specified, the metric
+ is sent on the "default ping", which is the `events` ping for events,
+ and the `metrics` ping for everything else. Most metrics don't need to
+ specify this.
+
+ (There is an additional special value of `all-pings` for internal
+ Glean metrics only that is used to indicate that a metric may appear
+ in any ping.)
+ type: array
+ items:
+ $ref: "#/definitions/kebab_case"
+ default:
+ - default
+
+ notification_emails:
+ title: Notification emails
+ description: |
+ **Required.**
+
+ A list of email addresses to notify for important events with the
+ metric or when people with context or ownership for the metric need to
+ be contacted.
+ type: array
+ minItems: 1
+ items:
+ type: string
+ format: email
+
+ bugs:
+ title: Related bugs
+ description: |
+ **Required.**
+
+ A list of bug URLs (e.g. Bugzilla and Github) that are relevant to
+ this metric, e.g., tracking its original implementation or later
+ changes to it.
+
+ Using bug numbers alone is deprecated and will be an error in the
+ future. Each entry should be a full URL to the bug in its tracker.
+ type: array
+ minItems: 1
+ items:
+ anyOf:
+ - type: integer # Keep supporting integer for backward-compat
+ - type: string
+ format: uri
+
+ data_reviews:
+ title: Review references
+ description: |
+ **Required.**
+
+ A list of URIs to any data collection reviews relevant to the metric.
+ type: array
+ items:
+ type: string
+ format: uri
+
+ disabled:
+ title: Disabled
+ description: |
+ If `true`, the metric is disabled, and any metric collection on it
+ will be silently ignored at runtime.
+ type: boolean
+ default: false
+
+ expires:
+ title: Expires
+ description: |
+ **Required.**
+
+ By default it may be one of the following values:
+ - `<build date>`: An ISO date `yyyy-mm-dd` in UTC on which the
+ metric expires. For example, `2019-03-13`. This date is checked at
+ build time. Except in special cases, this form should be used so
+ that the metric automatically "sunsets" after a period of time.
+ - `never`: This metric never expires.
+ - `expired`: This metric is manually expired.
+
+ The default may be overriden in certain applications by the
+ `custom_validate_expires` and `custom_is_expired` configs.
+ type: string
+
+ version:
+ title: Metric version
+ description: |
+ The version of the metric. A monotonically increasing value. If not
+ provided, defaults to 0.
+
+ time_unit:
+ title: Time unit
+ description: |
+ For timespans and datetimes, specifies the unit that the metric will
+ be stored and displayed in. If not provided, it defaults to
+ "millisecond". Time values are sent to the backend as integers, so
+ `time_unit`_ determines the maximum resolution at which timespans are
+ recorded. Times are always truncated, not rounded, to the nearest time
+ unit. For example, a measurement of 25 ns will be returned as 0 ms if
+ `time_unit` is `"millisecond"`.
+
+ For timing distributions, times are always recorded and sent in
+ nanoseconds, but `time_unit` controls the minimum and maximum values.
+ If not provided, it defaults to "nanosecond".
+
+ - nanosecond: 1ns <= x <= 10 minutes
+ - microsecond: 1μs <= x <= ~6.94 days
+ - millisecond: 1ms <= x <= ~19 years
+
+ Valid when `type`_ is `timespan`, `timing_distribution` or `datetime`.
+ enum:
+ - nanosecond
+ - microsecond
+ - millisecond
+ - second
+ - minute
+ - hour
+ - day
+
+ memory_unit:
+ title: Memory unit
+ description: |
+ The unit that the incoming memory size values are recorded in.
+
+ The units are the power-of-2 units, so "kilobyte" is correctly a
+ "kibibyte".
+
+ - kilobyte == 2^10 == 1,024 bytes
+ - megabyte == 2^20 == 1,048,576 bytes
+ - gigabyte == 2^30 == 1,073,741,824 bytes
+
+ Values are automatically converted to and transmitted as bytes.
+
+ Valid when `type`_ is `memory_distribution`.
+ enum:
+ - byte
+ - kilobyte
+ - megabyte
+ - gigabyte
+
+ labels:
+ title: Labels
+ description: |
+ A list of labels for a labeled metric. If provided, the labels are
+ enforced at run time, and recording to an unknown label is recorded
+ to the special label `__other__`. If not provided, the labels
+ may be anything, but using too many unique labels will put some
+ labels in the special label `__other__`.
+
+ Valid with any of the labeled metric types.
+ anyOf:
+ - type: array
+ uniqueItems: true
+ items:
+ $ref: "#/definitions/labeled_metric_id"
+ maxItems: 16
+ - type: "null"
+
+ extra_keys:
+ title: Extra keys
+ description: |
+ The acceptable keys on the "extra" object sent with events. This is an
+ object mapping the key to an object containing metadata about the key.
+ A maximum of 10 extra keys is allowed.
+ This metadata object has the following keys:
+
+ - `description`: **Required.** A description of the key.
+
+ Valid when `type`_ is `event`.
+ type: object
+ propertyNames:
+ $ref: "#/definitions/dotted_snake_case"
+ additionalProperties:
+ type: object
+ properties:
+ description:
+ type: string
+ required:
+ - description
+ maxProperties: 10
+ default: {}
+
+ gecko_datapoint:
+ title: Gecko Datapoint
+ description: |
+ This is a Gecko-specific property. It is the name of the Gecko metric
+ to accumulate the data from, when using the Glean SDK in a product
+ using GeckoView. See bug 1566356 for more context.
+
+ type: string
+
+ range_min:
+ title: Range minimum
+ description: |
+ The minimum value of a custom distribution.
+
+ Valid when `type`_ is `custom_distribution`.
+ type: number
+ default: 1
+
+ range_max:
+ title: Range maximum
+ description: |
+ The maximum value of a custom distribution.
+
+ Required when `type`_ is `custom_distribution`.
+ type: number
+
+ bucket_count:
+ title: Bucket count
+ description: |
+ The number of buckets to include in a custom distribution.
+
+ Required when `type`_ is `custom_distribution`.
+ type: number
+ minimum: 1
+ maximum: 100
+
+ histogram_type:
+ title: Histogram type
+ description: |
+ The type of histogram bucketing to use:
+ - `linear`: The buckets are linearly spaced within the range.
+ - `exponential`: The buckets use the natural logarithmic so the
+ smaller-valued buckets are smaller in size than the higher-valued
+ buckets.
+
+ Required when `type`_ is `custom_distribution`.
+ enum:
+ - linear
+ - exponential
+
+ unit:
+ title: Unit
+ description: |
+ The unit of the metric, for metrics that don't already require a
+ meaningful unit, such as `time_unit`.
+ This is provided for informational purposes only and doesn't have any
+ effect on data collection.
+ type: string
+
+ no_lint:
+ title: Lint checks to skip
+ description: |
+ This parameter lists any lint checks to skip for this metric only.
+ type: array
+ items:
+ type: string
+
+ decrypted_name:
+ title: Decrypted name
+ description: |
+ Name of the column where to persist the decrypted value
+ stored in the JWE after processing.
+
+ Required when `type`_ is `jwe`.
+ type: string
+ pattern: "^[a-z_][a-z0-9_]{0,29}(\\.[a-z_][a-z0-9_]{0,29})*$"
+
+ data_sensitivity:
+ title: The level of data sensitivity
+ description: |
+ There are four data collection categories related to data sensitivity
+ [defined here](https://wiki.mozilla.org/Firefox/Data_Collection):
+
+ - **Category 1: Technical Data:** (`technical`) Information about the
+ machine or Firefox itself. Examples include OS, available memory,
+ crashes and errors, outcome of automated processes like updates,
+ safebrowsing, activation, version \#s, and buildid. This also
+ includes compatibility information about features and APIs used by
+ websites, addons, and other 3rd-party software that interact with
+ Firefox during usage.
+
+ - **Category 2: Interaction Data:** (`interaction`) Information about
+ the user’s direct engagement with Firefox. Examples include how many
+ tabs, addons, or windows a user has open; uses of specific Firefox
+ features; session length, scrolls and clicks; and the status of
+ discrete user preferences.
+
+ - **Category 3: Web activity data:** (`web_activity`) Information
+ about user web browsing that could be considered sensitive. Examples
+ include users’ specific web browsing history; general information
+ about their web browsing history (such as TLDs or categories of
+ webpages visited over time); and potentially certain types of
+ interaction data about specific webpages visited.
+
+ - **Category 4: Highly sensitive data:** (`highly_sensitive`)
+ Information that directly identifies a person, or if combined with
+ other data could identify a person. Examples include e-mail,
+ usernames, identifiers such as google ad id, apple id, fxaccount,
+ city or country (unless small ones are explicitly filtered out), or
+ certain cookies. It may be embedded within specific website content,
+ such as memory contents, dumps, captures of screen data, or DOM
+ data.
+ type: array
+ items:
+ enum:
+ - technical
+ - interaction
+ - web_activity
+ - highly_sensitive
+ type: string
+ minLength: 1
+ uniqueItems: true
+
+ required:
+ - type
+ - bugs
+ - description
+ - notification_emails
+ - data_reviews
+ - expires
+
+type: object
+
+propertyNames:
+ anyOf:
+ - allOf:
+ - $ref: "#/definitions/dotted_snake_case"
+ - not:
+ description: "'pings' is reserved as a category name."
+ const: pings
+ - enum: ['$schema']
+
+properties:
+ $schema:
+ type: string
+ format: url
+
+ no_lint:
+ title: Lint checks to skip globally
+ description: |
+ This parameter lists any lint checks to skip for this whole file.
+ type: array
+ items:
+ type: string
+
+additionalProperties:
+ type: object
+ propertyNames:
+ anyOf:
+ - $ref: "#/definitions/short_id"
+ additionalProperties:
+ allOf:
+ - $ref: "#/definitions/metric"
+ -
+ if:
+ properties:
+ type:
+ const: event
+ then:
+ properties:
+ lifetime:
+ description: |
+ Event metrics must have ping lifetime.
+ const: ping
+ - if:
+ not:
+ properties:
+ type:
+ enum:
+ - timing_distribution
+ - custom_distribution
+ - memory_distribution
+ - quantity
+ - boolean
+ - string
+ - labeled_counter
+ then:
+ properties:
+ gecko_datapoint:
+ description: |
+ `gecko_datapoint` is only allowed for `timing_distribution`,
+ `custom_distribution`, `memory_distribution`, `quantity`,
+ `boolean`, `string` and `labeled_counter`.
+ maxLength: 0
+ -
+ if:
+ properties:
+ type:
+ enum:
+ - custom_distribution
+ then:
+ required:
+ - gecko_datapoint
+ description: |
+ `custom_distribution` is only allowed for Gecko
+ metrics.
+ -
+ if:
+ properties:
+ type:
+ const: custom_distribution
+ then:
+ required:
+ - range_max
+ - bucket_count
+ - histogram_type
+ description: |
+ `custom_distribution` is missing required parameters `range_max`,
+ `bucket_count` and `histogram_type`.
+ -
+ if:
+ properties:
+ type:
+ const: memory_distribution
+ then:
+ required:
+ - memory_unit
+ description: |
+ `memory_distribution` is missing required parameter `memory_unit`.
+ -
+ if:
+ properties:
+ type:
+ const: quantity
+ then:
+ required:
+ - unit
+ description: |
+ `quantity` is missing required parameter `unit`.
+ -
+ if:
+ properties:
+ type:
+ const: jwe
+ then:
+ required:
+ - decrypted_name
+ description: |
+ `jwe` is missing required parameter `decrypted_name`.
diff --git a/third_party/python/glean_parser/glean_parser/schemas/metrics.2-0-0.schema.yaml b/third_party/python/glean_parser/glean_parser/schemas/metrics.2-0-0.schema.yaml
new file mode 100644
index 0000000000..ff99f328c9
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/schemas/metrics.2-0-0.schema.yaml
@@ -0,0 +1,735 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+---
+$schema: http://json-schema.org/draft-07/schema#
+title: Metrics
+description: |
+ Schema for the metrics.yaml files for Mozilla's Glean telemetry SDK.
+
+ The top-level of the `metrics.yaml` file has a key defining each category of
+ metrics. Categories must be snake_case, and they may also have dots `.` to
+ define subcategories.
+
+$id: moz://mozilla.org/schemas/glean/metrics/2-0-0
+
+definitions:
+ token:
+ type: string
+ pattern: "^[A-Za-z_][A-Za-z0-9_\\.]*$"
+
+ snake_case:
+ type: string
+ pattern: "^[a-z_][a-z0-9_]*$"
+
+ dotted_snake_case:
+ type: string
+ pattern: "^[a-z_][a-z0-9_]{0,29}(\\.[a-z_][a-z0-9_]{0,29})*$"
+ maxLength: 40
+
+ # Prior to version 2.0.0 of the schema, special ping names with underscores
+ # were also supported.
+ kebab_case:
+ type: string
+ pattern: "^[a-z][a-z0-9-]{0,29}$"
+
+ long_id:
+ allOf:
+ - $ref: "#/definitions/snake_case"
+ - maxLength: 40
+
+ short_id:
+ allOf:
+ - $ref: "#/definitions/snake_case"
+ - maxLength: 30
+
+ labeled_metric_id:
+ type: string
+ pattern: "^[ -~]+$"
+ maxLength: 71 # Note: this should be category + metric + 1
+
+ metric:
+ description: |
+ Describes a single metric.
+
+ See https://mozilla.github.io/glean_parser/metrics-yaml.html
+
+ type: object
+
+ additionalProperties: false
+
+ properties:
+ type:
+ title: Metric type
+ description: |
+ **Required.**
+
+ Specifies the type of a metric, like "counter" or "event". This
+ defines which operations are valid for the metric, how it is stored
+ and how data analysis tooling displays it.
+
+ The supported types are:
+ - `event`: Record a specific event (with optional metadata).
+ Additional properties: `extra_keys`.
+
+ - `boolean`: A metric storing values of true or false.
+
+ - `string`: A metric storing Unicode string values.
+
+ - `string_list`: a list of Unicode strings.
+
+ - `counter`: A numeric value that can only be incremented.
+
+ - `quantity`: A numeric value that is set directly.
+
+ - `timespan`: Represents a time interval. Additional properties:
+ `time_unit`.
+
+ - `timing_distribution`: Record the distribution of multiple
+ timings. Additional properties: `time_unit`.
+
+ - `datetime`: A date/time value. Represented as an ISO datetime in
+ UTC. Additional properties: `time_unit`.
+
+ - `uuid`: Record a UUID v4.
+
+ - `url`: Record a valid URL string.
+
+ - `memory_distribution`: A histogram for recording memory usage
+ values. Additional properties: `memory_unit`.
+
+ - `custom_distribution`: A histogram with a custom range and number
+ of buckets. This metric type is for legacy support only and is
+ only allowed for metrics coming from GeckoView. Additional
+ properties: `range_min`, `range_max`, `bucket_count`,
+ `histogram_type`.
+
+ - `rate`: Used to record the rate something happens relative to some
+ other thing. For example, the number of HTTP connections that
+ experience an error relative to the number of total HTTP
+ connections made.
+
+ - Additionally, labeled versions of many metric types are supported.
+ These support the `labels`_ parameter, allowing multiple instances
+ of the metric to be stored at a given set of labels. The labeled
+ metric types include:
+
+ `labeled_boolean`, `labeled_string`, `labeled_counter`.
+
+ - `text`: Record long text data.
+
+ type: string
+ enum:
+ - event
+ - boolean
+ - string
+ - string_list
+ - counter
+ - quantity
+ - timespan
+ - timing_distribution
+ - custom_distribution
+ - memory_distribution
+ - datetime
+ - uuid
+ - url
+ - jwe
+ - labeled_boolean
+ - labeled_string
+ - labeled_counter
+ - rate
+ - text
+
+ description:
+ title: Description
+ description: |
+ **Required.**
+
+ A textual description of what this metric does, what it means, and its
+ edge cases or any other helpful information.
+
+ Descriptions may contain [markdown
+ syntax](https://www.markdownguide.org/basic-syntax/).
+ type: string
+
+ metadata:
+ title: Metadata
+ description: |
+ Additional metadata about this metric. Currently limited to a list of
+ tags.
+ type: object
+ properties:
+ tags:
+ title: Tags
+ description: Which tags are specified for this metric.
+ type: array
+ items:
+ type: string
+ maxLength: 80
+ default: {}
+
+ lifetime:
+ title: Lifetime
+ description: |
+ Defines the lifetime of the metric. It must be one of the following
+ values:
+
+ - `ping` (default): The metric is reset each time it is sent in a
+ ping.
+
+ - `user`: The metric contains a property that is part of the user's
+ profile and is never reset.
+
+ - `application`: The metric contains a property that is related to the
+ application, and is reset only at application restarts.
+ enum:
+ - ping
+ - user
+ - application
+ default: ping
+
+ send_in_pings:
+ title: Send in pings
+ description: |
+ Which pings the metric should be sent on. If not specified, the metric
+ is sent on the "default ping", which is the `events` ping for events,
+ and the `metrics` ping for everything else. Most metrics don't need to
+ specify this.
+
+ (There is an additional special value of `all-pings` for internal
+ Glean metrics only that is used to indicate that a metric may appear
+ in any ping.)
+ type: array
+ items:
+ anyOf:
+ - $ref: "#/definitions/kebab_case"
+ # Allow "special" ping names that start with "glean_" used
+ # internally by the Glean SDK
+ - type: string
+ pattern: "^glean_.*$"
+ default:
+ - default
+
+ notification_emails:
+ title: Notification emails
+ description: |
+ **Required.**
+
+ A list of email addresses to notify for important events with the
+ metric or when people with context or ownership for the metric need to
+ be contacted.
+ type: array
+ minItems: 1
+ items:
+ type: string
+ format: email
+
+ bugs:
+ title: Related bugs
+ description: |
+ **Required.**
+
+ A list of bug URLs (e.g. Bugzilla and Github) that are relevant to
+ this metric, e.g., tracking its original implementation or later
+ changes to it.
+
+ Prior to version 2.0.0 of the schema, bugs could also be integers.
+ type: array
+ minItems: 1
+ items:
+ type: string
+ format: uri
+
+ data_reviews:
+ title: Review references
+ description: |
+ **Required.**
+
+ A list of URIs to any data collection reviews relevant to the metric.
+ type: array
+ items:
+ type: string
+ format: uri
+
+ disabled:
+ title: Disabled
+ description: |
+ If `true`, the metric is disabled, and any metric collection on it
+ will be silently ignored at runtime.
+ type: boolean
+ default: false
+
+ expires:
+ title: Expires
+ description: |
+ **Required.**
+
+ By default it may be one of the following values:
+ - `<build date>`: An ISO date `yyyy-mm-dd` in UTC on which the
+ metric expires. For example, `2019-03-13`. This date is checked at
+ build time. Except in special cases, this form should be used so
+ that the metric automatically "sunsets" after a period of time.
+ - `<major version>`: An integer greater than 0 representing the
+ major version the metric expires in. For example, `11`. The
+ version is checked at build time against the major provided to the
+ glean_parser and is only valid if a major version is provided at
+ built time. If no major version is provided at build time and
+ expiration by major version is used for a metric, an error is
+ raised.
+ Note that mixing expiration by date and version is not allowed
+ within a product.
+ - `never`: This metric never expires.
+ - `expired`: This metric is manually expired.
+
+ The default may be overriden in certain applications by the
+ `custom_validate_expires` and `custom_is_expired` configs.
+ oneOf:
+ - type: string
+ - type: integer
+ minimum: 1
+
+ version:
+ title: Metric version
+ description: |
+ The version of the metric. A monotonically increasing value. If not
+ provided, defaults to 0.
+
+ time_unit:
+ title: Time unit
+ description: |
+ For timespans and datetimes, specifies the unit that the metric will
+ be stored and displayed in. If not provided, it defaults to
+ "millisecond". Time values are sent to the backend as integers, so
+ `time_unit`_ determines the maximum resolution at which timespans are
+ recorded. Times are always truncated, not rounded, to the nearest time
+ unit. For example, a measurement of 25 ns will be returned as 0 ms if
+ `time_unit` is `"millisecond"`.
+
+ For timing distributions, times are always recorded and sent in
+ nanoseconds, but `time_unit` controls the minimum and maximum values.
+ If not provided, it defaults to "nanosecond".
+
+ - nanosecond: 1ns <= x <= 10 minutes
+ - microsecond: 1μs <= x <= ~6.94 days
+ - millisecond: 1ms <= x <= ~19 years
+
+ Valid when `type`_ is `timespan`, `timing_distribution` or `datetime`.
+ enum:
+ - nanosecond
+ - microsecond
+ - millisecond
+ - second
+ - minute
+ - hour
+ - day
+
+ memory_unit:
+ title: Memory unit
+ description: |
+ The unit that the incoming memory size values are recorded in.
+
+ The units are the power-of-2 units, so "kilobyte" is correctly a
+ "kibibyte".
+
+ - kilobyte == 2^10 == 1,024 bytes
+ - megabyte == 2^20 == 1,048,576 bytes
+ - gigabyte == 2^30 == 1,073,741,824 bytes
+
+ Values are automatically converted to and transmitted as bytes.
+
+ Valid when `type`_ is `memory_distribution`.
+ enum:
+ - byte
+ - kilobyte
+ - megabyte
+ - gigabyte
+
+ labels:
+ title: Labels
+ description: |
+ A list of labels for a labeled metric. If provided, the labels are
+ enforced at run time, and recording to an unknown label is recorded
+ to the special label `__other__`. If not provided, the labels
+ may be anything, but using too many unique labels will put some
+ labels in the special label `__other__`.
+
+ Valid with any of the labeled metric types.
+ anyOf:
+ - type: array
+ uniqueItems: true
+ items:
+ $ref: "#/definitions/labeled_metric_id"
+ maxItems: 4096
+ - type: "null"
+
+ extra_keys:
+ title: Extra keys
+ description: |
+ The acceptable keys on the "extra" object sent with events. This is an
+ object mapping the key to an object containing metadata about the key.
+ A maximum of 15 extra keys is allowed.
+ This metadata object has the following keys:
+
+ - `description`: **Required.** A description of the key.
+
+ Valid when `type`_ is `event`.
+ type: object
+ propertyNames:
+ $ref: "#/definitions/dotted_snake_case"
+ additionalProperties:
+ type: object
+ properties:
+ description:
+ type: string
+ type:
+ type: string
+ enum:
+ - string
+ - boolean
+ - quantity
+ required:
+ - description
+ maxProperties: 15
+ default: {}
+
+ gecko_datapoint:
+ title: Gecko Datapoint
+ description: |
+ This is a Gecko-specific property. It is the name of the Gecko metric
+ to accumulate the data from, when using the Glean SDK in a product
+ using GeckoView. See bug 1566356 for more context.
+
+ type: string
+
+ range_min:
+ title: Range minimum
+ description: |
+ The minimum value of a custom distribution.
+
+ Valid when `type`_ is `custom_distribution`.
+ type: number
+ default: 1
+
+ range_max:
+ title: Range maximum
+ description: |
+ The maximum value of a custom distribution.
+
+ Required when `type`_ is `custom_distribution`.
+ type: number
+
+ bucket_count:
+ title: Bucket count
+ description: |
+ The number of buckets to include in a custom distribution.
+
+ Required when `type`_ is `custom_distribution`.
+ type: number
+ minimum: 1
+ maximum: 100
+
+ histogram_type:
+ title: Histogram type
+ description: |
+ The type of histogram bucketing to use:
+ - `linear`: The buckets are linearly spaced within the range.
+ - `exponential`: The buckets use the natural logarithmic so the
+ smaller-valued buckets are smaller in size than the higher-valued
+ buckets.
+
+ Required when `type`_ is `custom_distribution`.
+ enum:
+ - linear
+ - exponential
+
+ unit:
+ title: Unit
+ description: |
+ The unit of the metric.
+ This is only required for metrics
+ that don't already require a meaningful unit, e.g. `quantity`
+ This is provided for informational purposes only and doesn't have any
+ effect on data collection.
+
+ Metric types like `timespan`, `datetime`
+ and `timing_distribution` take a `time_unit` instead.
+ type: string
+
+ no_lint:
+ title: Lint checks to skip
+ description: |
+ This parameter lists any lint checks to skip for this metric only.
+ type: array
+ items:
+ type: string
+
+ data_sensitivity:
+ title: The level of data sensitivity
+ description: |
+ There are four data collection categories related to data sensitivity
+ [defined here](https://wiki.mozilla.org/Firefox/Data_Collection):
+
+ - **Category 1: Technical Data:** (`technical`) Information about the
+ machine or Firefox itself. Examples include OS, available memory,
+ crashes and errors, outcome of automated processes like updates,
+ safebrowsing, activation, version \#s, and buildid. This also
+ includes compatibility information about features and APIs used by
+ websites, addons, and other 3rd-party software that interact with
+ Firefox during usage.
+
+ - **Category 2: Interaction Data:** (`interaction`) Information about
+ the user’s direct engagement with Firefox. Examples include how many
+ tabs, addons, or windows a user has open; uses of specific Firefox
+ features; session length, scrolls and clicks; and the status of
+ discrete user preferences.
+
+ - **Category 3: Web activity data:** (`web_activity`) Information
+ about user web browsing that could be considered sensitive. Examples
+ include users’ specific web browsing history; general information
+ about their web browsing history (such as TLDs or categories of
+ webpages visited over time); and potentially certain types of
+ interaction data about specific webpages visited.
+
+ - **Category 4: Highly sensitive data:** (`highly_sensitive`)
+ Information that directly identifies a person, or if combined with
+ other data could identify a person. Examples include e-mail,
+ usernames, identifiers such as google ad id, apple id, fxaccount,
+ city or country (unless small ones are explicitly filtered out), or
+ certain cookies. It may be embedded within specific website content,
+ such as memory contents, dumps, captures of screen data, or DOM
+ data.
+ type: array
+ items:
+ enum:
+ - technical
+ - interaction
+ - web_activity
+ - highly_sensitive
+ type: string
+ minLength: 1
+ uniqueItems: true
+
+ telemetry_mirror:
+ title: Which probe in Telemetry to mirror this metric's value to.
+ description: |
+ The C++ enum form of the Scalar, Event, or Histogram to which we
+ should mirror values.
+ Use is limited to Firefox Desktop only.
+ Has no effect when used with non-FOG outputters.
+ See FOG's documentation on mirroring for details -
+ https://firefox-source-docs.mozilla.org/toolkit/components/glean/mirroring.html
+ type: string
+ minLength: 6
+
+ denominator_metric:
+ title: The name of the denominator for this `rate` metric.
+ description: |
+ Denominators for `rate` metrics may be private and internal
+ or shared and external.
+ External denominators are `counter` metrics.
+ This field names the `counter` metric that serves as this
+ `rate` metric's external denominator.
+ The named denominator must be defined in this component
+ so glean_parser can find it.
+ type: string
+
+ required:
+ - type
+ - bugs
+ - description
+ - notification_emails
+ - data_reviews
+ - expires
+
+type: object
+
+propertyNames:
+ anyOf:
+ - allOf:
+ - $ref: "#/definitions/dotted_snake_case"
+ - not:
+ description: "'pings' is reserved as a category name."
+ const: pings
+ - not:
+ description: "'tags' is reserved as a category name."
+ const: tags
+ - enum: ['$schema', '$tags']
+
+properties:
+ $schema:
+ type: string
+ format: url
+
+ no_lint:
+ title: Lint checks to skip globally
+ description: |
+ This parameter lists any lint checks to skip for this whole file.
+ type: array
+ items:
+ type: string
+
+ $tags:
+ title: Tags that apply to the whole file
+ description: |
+ This denotes the list of tags that apply to all metrics in this file.
+ type: array
+ items:
+ type: string
+
+additionalProperties:
+ type: object
+ propertyNames:
+ anyOf:
+ - $ref: "#/definitions/short_id"
+ additionalProperties:
+ allOf:
+ - $ref: "#/definitions/metric"
+ -
+ if:
+ properties:
+ type:
+ const: event
+ then:
+ properties:
+ lifetime:
+ description: |
+ Event metrics must have ping lifetime.
+ const: ping
+ - if:
+ not:
+ properties:
+ type:
+ enum:
+ - timing_distribution
+ - custom_distribution
+ - memory_distribution
+ - quantity
+ - boolean
+ - string
+ - labeled_counter
+ then:
+ properties:
+ gecko_datapoint:
+ description: |
+ `gecko_datapoint` is only allowed for `timing_distribution`,
+ `custom_distribution`, `memory_distribution`, `quantity`,
+ `boolean`, `string` and `labeled_counter`.
+ maxLength: 0
+ -
+ if:
+ properties:
+ type:
+ const: custom_distribution
+ then:
+ required:
+ - range_max
+ - bucket_count
+ - histogram_type
+ description: |
+ `custom_distribution` is missing required parameters `range_max`,
+ `bucket_count` and `histogram_type`.
+ -
+ if:
+ properties:
+ type:
+ const: memory_distribution
+ then:
+ required:
+ - memory_unit
+ description: |
+ `memory_distribution` is missing required parameter `memory_unit`.
+ -
+ if:
+ properties:
+ type:
+ const: quantity
+ then:
+ required:
+ - unit
+ description: |
+ `quantity` is missing required parameter `unit`.
+ -
+ if:
+ properties:
+ type:
+ const: jwe
+ then:
+ required:
+ - jwe_support_was_removed
+ description: |
+ JWE support was removed.
+ If you require this send an email to glean-team@mozilla.com.
+ - if:
+ not:
+ properties:
+ type:
+ const: rate
+ then:
+ properties:
+ denominator_metric:
+ description: |
+ `denominator_metric` is only allowed for `rate`.
+ maxLength: 0
+ -
+ if:
+ properties:
+ type:
+ const: text
+ then:
+ properties:
+ lifetime:
+ description: >
+ Text metrics must have ping or application lifetime.
+ enum:
+ - ping
+ - application
+
+ data_sensitivity:
+ description: >
+ Text metrics require Category 3 (`web_activity`)
+ or Category 4 (`highly_sensitive`).
+ type: array
+ items:
+ enum:
+ - web_activity
+ - highly_sensitive
+
+ send_in_pings:
+ description: |
+ Text metrics can only be sent in custom pings.
+ Built-in pings are not allowed.
+ type: array
+ items:
+ allOf:
+ - $ref: "#/definitions/kebab_case"
+ - not:
+ description: >
+ Text metrics can only be sent in custom pings.
+ Built-in pings are not allowed."
+ pattern:
+ "^(metrics|baseline|events|deletion-request|default|glean_.*)$"
+
+ -
+ if:
+ # This is a schema check:
+ # This is true when the checked YAML passes the schema validation.
+ #
+ # If it has a datetime/timing_distribution/timespan type
+ # AND has a `unit` property, then...
+ properties:
+ type:
+ enum:
+ - datetime
+ - timing_distribution
+ - timespan
+ required:
+ - unit
+ # ... then `time_unit` is required,
+ # because that's the only way we can force this to fail.
+ then:
+ required:
+ - time_unit
+ description: |
+ This metric type uses the (optional) `time_unit` parameter,
+ not `unit`.
diff --git a/third_party/python/glean_parser/glean_parser/schemas/pings.1-0-0.schema.yaml b/third_party/python/glean_parser/glean_parser/schemas/pings.1-0-0.schema.yaml
new file mode 100644
index 0000000000..c15a4c85ac
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/schemas/pings.1-0-0.schema.yaml
@@ -0,0 +1,157 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+---
+$schema: http://json-schema.org/draft-07/schema#
+title: Pings
+description: |
+ Schema for the pings.yaml files for Mozilla's Glean telemetry SDK.
+
+ The top-level of the `pings.yaml` file has a key defining the name of each
+ ping. The values contain metadata about that ping. Ping names must be
+ kebab-case per https://docs.telemetry.mozilla.org/cookbooks/new_ping.html
+
+$id: moz://mozilla.org/schemas/glean/pings/1-0-0
+
+definitions:
+ dotted_snake_case:
+ type: string
+ pattern: "^[a-z_][a-z0-9_]{0,29}(\\.[a-z_][a-z0-9_]{0,29})*$"
+ maxLength: 40
+ kebab_case:
+ type: string
+ # Bug 1601270; we allow 3 specific existing snake_cased ping names for now,
+ # but these special cases can be removed once the number of legacy clients
+ # sufficiently dwindles, likely in 2020H2.
+ pattern: "^[a-z][a-z0-9-]{0,29}$\
+ |^deletion_request$|^bookmarks_sync$|^history_sync$|^session_end$|^all_pings$|^glean_.*$"
+
+type: object
+
+propertyNames:
+ allOf:
+ - anyOf:
+ - $ref: "#/definitions/kebab_case"
+ - enum: ['$schema', 'no_lint']
+ - not:
+ enum: ['all-pings']
+
+properties:
+ $schema:
+ type: string
+ format: url
+
+ no_lint:
+ title: Lint checks to skip globally
+ description: |
+ This parameter lists any lint checks to skip for this whole file.
+ type: array
+ items:
+ type: string
+
+additionalProperties:
+ type: object
+ properties:
+ description:
+ title: Description
+ description: |
+ **Required.**
+
+ A textual description of the purpose of this ping and what it contains.
+
+ Descriptions may contain [markdown
+ syntax](https://www.markdownguide.org/basic-syntax/).
+ type: string
+
+ include_client_id:
+ title: Include client id
+ description: |
+ **Required.**
+
+ When `true`, include the `client_id` value in the ping.
+ type: boolean
+
+ send_if_empty:
+ title: Send if empty
+ description: |
+ When `false` a ping is sent only if it contains data (the default).
+ When `true` a ping is sent even if it contains no data.
+ type: boolean
+
+ notification_emails:
+ title: Notification emails
+ description: |
+ **Required.**
+
+ A list of email addresses to notify for important events with the
+ ping or when people with context or ownership for the ping need to
+ be contacted.
+ type: array
+ minItems: 1
+ items:
+ type: string
+ format: email
+
+ bugs:
+ title: Related bugs
+ description: |
+ **Required.**
+
+ A list of bugs (e.g. Bugzilla and Github) that are relevant to this
+ ping, e.g., tracking its original implementation or later changes to
+ it.
+
+ If a number, it is an ID to an issue in the default tracker (e.g.
+ Mozilla's Bugzilla instance). If a string, it must be a URI to a bug
+ page in a tracker.
+ type: array
+ minItems: 1
+ items:
+ anyOf:
+ - type: integer # Keep supporting integer for backward-compat
+ - type: string
+ format: uri
+
+ data_reviews:
+ title: Review references
+ description: |
+ **Required.**
+
+ A list of URIs to any data collection reviews relevant to the ping.
+ type: array
+ items:
+ type: string
+ format: uri
+
+ reasons:
+ title: The reasons this ping can be sent.
+ description: |
+ A list of reasons that the ping might be triggered. Sent in the ping's
+ `ping_info.reason` field.
+
+ Specified as a mapping from reason codes (which are short strings), to
+ a textual description of the reason.
+ type: object
+ propertyNames:
+ type: string
+ maxLength: 30
+ additionalProperties:
+ type: string
+
+ no_lint:
+ title: Lint checks to skip
+ description: |
+ This parameter lists any lint checks to skip for this metric only.
+ type: array
+ items:
+ type: string
+
+ required:
+ - description
+ - include_client_id
+ - bugs
+ - notification_emails
+ - data_reviews
+
+ additionalProperties: false
diff --git a/third_party/python/glean_parser/glean_parser/schemas/pings.2-0-0.schema.yaml b/third_party/python/glean_parser/glean_parser/schemas/pings.2-0-0.schema.yaml
new file mode 100644
index 0000000000..fb0f9c1914
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/schemas/pings.2-0-0.schema.yaml
@@ -0,0 +1,169 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+---
+$schema: http://json-schema.org/draft-07/schema#
+title: Pings
+description: |
+ Schema for the pings.yaml files for Mozilla's Glean telemetry SDK.
+
+ The top-level of the `pings.yaml` file has a key defining the name of each
+ ping. The values contain metadata about that ping. Ping names must be
+ kebab-case per https://docs.telemetry.mozilla.org/cookbooks/new_ping.html
+
+$id: moz://mozilla.org/schemas/glean/pings/2-0-0
+
+definitions:
+ dotted_snake_case:
+ type: string
+ pattern: "^[a-z_][a-z0-9_]{0,29}(\\.[a-z_][a-z0-9_]{0,29})*$"
+ maxLength: 40
+ # Prior to version 2.0.0 of the schema, special ping names with underscores
+ # were also supported.
+ kebab_case:
+ type: string
+ pattern: "^[a-z][a-z0-9-]{0,29}$"
+
+type: object
+
+propertyNames:
+ allOf:
+ - anyOf:
+ - $ref: "#/definitions/kebab_case"
+ - enum: ['$schema', 'no_lint']
+ - not:
+ enum: ['all-pings']
+
+properties:
+ $schema:
+ type: string
+ format: url
+
+ no_lint:
+ title: Lint checks to skip globally
+ description: |
+ This parameter lists any lint checks to skip for this whole file.
+ type: array
+ items:
+ type: string
+
+additionalProperties:
+ type: object
+ properties:
+ description:
+ title: Description
+ description: |
+ **Required.**
+
+ A textual description of the purpose of this ping and what it contains.
+
+ Descriptions may contain [markdown
+ syntax](https://www.markdownguide.org/basic-syntax/).
+ type: string
+
+ metadata:
+ title: Metadata
+ description: |
+ Additional metadata about this ping. Currently limited to a list of
+ tags.
+ type: object
+ properties:
+ tags:
+ title: Tags
+ description: Which tags are specified for this ping.
+ type: array
+ items:
+ type: string
+ maxLength: 80
+ default: {}
+
+ include_client_id:
+ title: Include client id
+ description: |
+ **Required.**
+
+ When `true`, include the `client_id` value in the ping.
+ type: boolean
+
+ send_if_empty:
+ title: Send if empty
+ description: |
+ When `false` a ping is sent only if it contains data (the default).
+ When `true` a ping is sent even if it contains no data.
+ type: boolean
+
+ notification_emails:
+ title: Notification emails
+ description: |
+ **Required.**
+
+ A list of email addresses to notify for important events with the
+ ping or when people with context or ownership for the ping need to
+ be contacted.
+ type: array
+ minItems: 1
+ items:
+ type: string
+ format: email
+
+ bugs:
+ title: Related bugs
+ description: |
+ **Required.**
+
+ A list of bugs (e.g. Bugzilla and Github) that are relevant to this
+ ping, e.g., tracking its original implementation or later changes to
+ it.
+
+ It must be a URI to a bug page in a tracker.
+
+ Prior to version 2.0.0 of the schema, bugs could also be integers.
+ type: array
+ minItems: 1
+ items:
+ type: string
+ format: uri
+
+ data_reviews:
+ title: Review references
+ description: |
+ **Required.**
+
+ A list of URIs to any data collection reviews relevant to the ping.
+ type: array
+ items:
+ type: string
+ format: uri
+
+ reasons:
+ title: The reasons this ping can be sent.
+ description: |
+ A list of reasons that the ping might be triggered. Sent in the ping's
+ `ping_info.reason` field.
+
+ Specified as a mapping from reason codes (which are short strings), to
+ a textual description of the reason.
+ type: object
+ propertyNames:
+ type: string
+ maxLength: 30
+ additionalProperties:
+ type: string
+
+ no_lint:
+ title: Lint checks to skip
+ description: |
+ This parameter lists any lint checks to skip for this metric only.
+ type: array
+ items:
+ type: string
+
+ required:
+ - description
+ - include_client_id
+ - bugs
+ - notification_emails
+ - data_reviews
+
+ additionalProperties: false
diff --git a/third_party/python/glean_parser/glean_parser/schemas/tags.1-0-0.schema.yaml b/third_party/python/glean_parser/glean_parser/schemas/tags.1-0-0.schema.yaml
new file mode 100644
index 0000000000..aa0f083bcf
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/schemas/tags.1-0-0.schema.yaml
@@ -0,0 +1,51 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+---
+$schema: http://json-schema.org/draft-07/schema#
+title: Tags
+description: |
+ Schema for the tags.yaml files for Mozilla's Glean telemetry SDK.
+
+ The top-level of the `tags.yaml` file has a key defining the name of each
+ tag. The values contain metadata about that tag (currently just a
+ description).
+
+$id: moz://mozilla.org/schemas/glean/tags/1-0-0
+
+type: object
+
+propertyNames:
+ type: string
+ maxLength: 80
+
+properties:
+ $schema:
+ type: string
+ format: url
+
+ no_lint:
+ title: Lint checks to skip globally
+ description: |
+ This parameter lists any lint checks to skip for this whole file.
+ type: array
+ items:
+ type: string
+
+additionalProperties:
+ type: object
+ properties:
+ description:
+ title: Description
+ description: |
+ **Required.**
+
+ A textual description of this tag.
+
+ Descriptions may contain [markdown
+ syntax](https://www.markdownguide.org/basic-syntax/).
+ type: string
+ required:
+ - description
+ additionalProperties: false
diff --git a/third_party/python/glean_parser/glean_parser/swift.py b/third_party/python/glean_parser/glean_parser/swift.py
new file mode 100644
index 0000000000..c745c4d9ac
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/swift.py
@@ -0,0 +1,260 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Outputter to generate Swift code for metrics.
+"""
+
+import enum
+import json
+from pathlib import Path
+from typing import Any, Dict, Optional, Union
+
+from . import __version__
+from . import metrics
+from . import pings
+from . import tags
+from . import util
+
+# An (imcomplete) list of reserved keywords in Swift.
+# These will be replaced in generated code by their escaped form.
+SWIFT_RESERVED_NAMES = ["internal", "typealias"]
+
+
+def swift_datatypes_filter(value: util.JSONType) -> str:
+ """
+ A Jinja2 filter that renders Swift literals.
+
+ Based on Python's JSONEncoder, but overrides:
+ - dicts to use `[key: value]`
+ - sets to use `[...]`
+ - enums to use the like-named Swift enum
+ - Rate objects to a CommonMetricData initializer
+ (for external Denominators' Numerators lists)
+ """
+
+ class SwiftEncoder(json.JSONEncoder):
+ def iterencode(self, value):
+ if isinstance(value, dict):
+ yield "["
+ first = True
+ for key, subvalue in value.items():
+ if not first:
+ yield ", "
+ yield from self.iterencode(key)
+ yield ": "
+ yield from self.iterencode(subvalue)
+ first = False
+ yield "]"
+ elif isinstance(value, enum.Enum):
+ yield ("." + util.camelize(value.name))
+ elif isinstance(value, list):
+ yield "["
+ first = True
+ for subvalue in value:
+ if not first:
+ yield ", "
+ yield from self.iterencode(subvalue)
+ first = False
+ yield "]"
+ elif isinstance(value, set):
+ yield "["
+ first = True
+ for subvalue in sorted(list(value)):
+ if not first:
+ yield ", "
+ yield from self.iterencode(subvalue)
+ first = False
+ yield "]"
+ elif value is None:
+ yield "nil"
+ elif isinstance(value, metrics.Rate):
+ yield "CommonMetricData("
+ first = True
+ for arg_name in util.common_metric_args:
+ if hasattr(value, arg_name):
+ if not first:
+ yield ", "
+ yield f"{util.camelize(arg_name)}: "
+ yield from self.iterencode(getattr(value, arg_name))
+ first = False
+ yield ")"
+ else:
+ yield from super().iterencode(value)
+
+ return "".join(SwiftEncoder().iterencode(value))
+
+
+def type_name(obj: Union[metrics.Metric, pings.Ping]) -> str:
+ """
+ Returns the Swift type to use for a given metric or ping object.
+ """
+ generate_enums = getattr(obj, "_generate_enums", [])
+ if len(generate_enums):
+ generic = None
+ for member, suffix in generate_enums:
+ if len(getattr(obj, member)):
+ generic = util.Camelize(obj.name) + suffix
+ else:
+ if isinstance(obj, metrics.Event):
+ generic = "NoExtras"
+ else:
+ generic = "No" + suffix
+
+ return "{}<{}>".format(class_name(obj.type), generic)
+
+ return class_name(obj.type)
+
+
+def extra_type_name(typ: str) -> str:
+ """
+ Returns the corresponding Kotlin type for event's extra key types.
+ """
+
+ if typ == "boolean":
+ return "Bool"
+ elif typ == "string":
+ return "String"
+ elif typ == "quantity":
+ return "Int32"
+ else:
+ return "UNSUPPORTED"
+
+
+def class_name(obj_type: str) -> str:
+ """
+ Returns the Swift class name for a given metric or ping type.
+ """
+ if obj_type == "ping":
+ return "Ping"
+ if obj_type.startswith("labeled_"):
+ obj_type = obj_type[8:]
+ return util.Camelize(obj_type) + "MetricType"
+
+
+def variable_name(var: str) -> str:
+ """
+ Returns a valid Swift variable name, escaping keywords if necessary.
+ """
+ if var in SWIFT_RESERVED_NAMES:
+ return "`" + var + "`"
+ else:
+ return var
+
+
+class BuildInfo:
+ def __init__(self, build_date):
+ self.build_date = build_date
+
+
+def generate_build_date(date: Optional[str]) -> str:
+ """
+ Generate the build timestamp.
+ """
+
+ ts = util.build_date(date)
+
+ data = [
+ ("year", ts.year),
+ ("month", ts.month),
+ ("day", ts.day),
+ ("hour", ts.hour),
+ ("minute", ts.minute),
+ ("second", ts.second),
+ ]
+
+ # The internal DatetimeMetricType API can take a `DateComponents` object,
+ # which lets us easily specify the timezone.
+ components = ", ".join([f"{name}: {val}" for (name, val) in data])
+ return f'DateComponents(calendar: Calendar.current, timeZone: TimeZone(abbreviation: "UTC"), {components})' # noqa
+
+
+class Category:
+ """
+ Data struct holding information about a metric to be used in the template.
+ """
+
+ name: str
+ objs: Dict[str, Union[metrics.Metric, pings.Ping, tags.Tag]]
+ contains_pings: bool
+
+
+def output_swift(
+ objs: metrics.ObjectTree, output_dir: Path, options: Optional[Dict[str, Any]] = None
+) -> None:
+ """
+ Given a tree of objects, output Swift code to `output_dir`.
+
+ :param objects: A tree of objects (metrics and pings) as returned from
+ `parser.parse_objects`.
+ :param output_dir: Path to an output directory to write to.
+ :param options: options dictionary, with the following optional keys:
+ - namespace: The namespace to generate metrics in
+ - glean_namespace: The namespace to import Glean from
+ - allow_reserved: When True, this is a Glean-internal build
+ - with_buildinfo: If "true" the `GleanBuildInfo` is generated.
+ Otherwise generation of that file is skipped.
+ Defaults to "true".
+ - build_date: If set to `0` a static unix epoch time will be used.
+ If set to a ISO8601 datetime string (e.g. `2022-01-03T17:30:00`)
+ it will use that date.
+ Other values will throw an error.
+ If not set it will use the current date & time.
+ """
+ if options is None:
+ options = {}
+
+ template = util.get_jinja2_template(
+ "swift.jinja2",
+ filters=(
+ ("swift", swift_datatypes_filter),
+ ("type_name", type_name),
+ ("class_name", class_name),
+ ("variable_name", variable_name),
+ ("extra_type_name", extra_type_name),
+ ),
+ )
+
+ namespace = options.get("namespace", "GleanMetrics")
+ glean_namespace = options.get("glean_namespace", "Glean")
+ with_buildinfo = options.get("with_buildinfo", "true").lower() == "true"
+ build_date = options.get("build_date", None)
+ build_info = None
+ if with_buildinfo:
+ build_date = generate_build_date(build_date)
+ build_info = BuildInfo(build_date=build_date)
+
+ filename = "Metrics.swift"
+ filepath = output_dir / filename
+ categories = []
+
+ for category_key, category_val in objs.items():
+ contains_pings = any(
+ isinstance(obj, pings.Ping) for obj in category_val.values()
+ )
+
+ cat = Category()
+ cat.name = category_key
+ cat.objs = category_val
+ cat.contains_pings = contains_pings
+
+ categories.append(cat)
+
+ with filepath.open("w", encoding="utf-8") as fd:
+ fd.write(
+ template.render(
+ parser_version=__version__,
+ categories=categories,
+ common_metric_args=util.common_metric_args,
+ extra_metric_args=util.extra_metric_args,
+ namespace=namespace,
+ glean_namespace=glean_namespace,
+ allow_reserved=options.get("allow_reserved", False),
+ build_info=build_info,
+ )
+ )
+ # Jinja2 squashes the final newline, so we explicitly add it
+ fd.write("\n")
diff --git a/third_party/python/glean_parser/glean_parser/tags.py b/third_party/python/glean_parser/glean_parser/tags.py
new file mode 100644
index 0000000000..680b99731b
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/tags.py
@@ -0,0 +1,49 @@
+from typing import Dict, List, Optional
+from . import util
+
+
+class Tag:
+ def __init__(
+ self,
+ name: str,
+ description: str,
+ defined_in: Optional[Dict] = None,
+ no_lint: Optional[List[str]] = None,
+ _validated: bool = False,
+ ):
+ # Avoid cyclical import
+ from . import parser
+
+ self.name = name
+ self.description = description
+ self.defined_in = defined_in
+ if no_lint is None:
+ no_lint = []
+ self.no_lint = no_lint
+
+ # _validated indicates whether this tag has already been jsonschema
+ # validated (but not any of the Python-level validation).
+ if not _validated:
+ data: Dict[str, util.JSONType] = {
+ "$schema": parser.TAGS_ID,
+ self.name: self._serialize_input(),
+ }
+ for error in parser.validate(data):
+ raise ValueError(error)
+
+ @property
+ def type(self) -> str:
+ return "tag"
+
+ def _serialize_input(self) -> Dict[str, util.JSONType]:
+ d = self.serialize()
+ modified_dict = util.remove_output_params(d, "defined_in")
+ return modified_dict
+
+ def serialize(self) -> Dict[str, util.JSONType]:
+ """
+ Serialize the tag back to JSON object model.
+ """
+ d = self.__dict__.copy()
+ del d["name"]
+ return d
diff --git a/third_party/python/glean_parser/glean_parser/templates/data_review.jinja2 b/third_party/python/glean_parser/glean_parser/templates/data_review.jinja2
new file mode 100644
index 0000000000..b3541805ed
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/templates/data_review.jinja2
@@ -0,0 +1,82 @@
+!! Reminder: it is your responsibility to complete and check the correctness of
+!! this automatically-generated request skeleton before requesting Data
+!! Collection Review. See https://wiki.mozilla.org/Data_Collection for details.
+{# Data Review Request Template pulled from
+ https://github.com/mozilla/data-review/blob/main/request.md #}
+
+DATA REVIEW REQUEST
+1. What questions will you answer with this data?
+
+{{ "TODO: Fill this in." if not questions }}
+
+2. Why does Mozilla need to answer these questions? Are there benefits for users?
+ Do we need this information to address product or business requirements?
+
+{{ "TODO: Fill this in." if not why }}
+
+3. What alternative methods did you consider to answer these questions?
+ Why were they not sufficient?
+
+{{ "TODO: Fill this in." if not methods }}
+
+4. Can current instrumentation answer these questions?
+
+{{ "TODO: Fill this in." if not current_instrumentation_answers }}
+
+5. List all proposed measurements and indicate the category of data collection for each
+ measurement, using the Firefox data collection categories found on the Mozilla wiki.
+
+Measurement Name | Measurement Description | Data Collection Category | Tracking Bug
+---------------- | ----------------------- | ------------------------ | ------------
+{% for metric in metrics %}
+{% if metric.type == "event" and metric.allowed_extra_keys %}
+{% for extra_name, extra_detail in metric.extra_keys.items() %}
+`{{ metric.category|snake_case }}.{{ metric.name|snake_case }}#{{ extra_name }} | {{ extra_detail["description"]|replace("\n", " ") }} | {{ metric.data_sensitivity|join(", ", attribute="name") }} | {{ metric.bugs|last }}
+{% endfor %}
+{% else %}
+`{{ metric.category|snake_case }}.{{ metric.name|snake_case }}` | {{ metric.description|replace("\n", " ") }} | {{ metric.data_sensitivity|join(", ", attribute="name") }} | {{ metric.bugs|last }}
+{% endif %}
+{% endfor %}
+
+6. Please provide a link to the documentation for this data collection which
+ describes the ultimate data set in a public, complete, and accurate way.
+
+This collection is Glean so is documented [in the Glean Dictionary](https://dictionary.telemetry.mozilla.org).
+
+7. How long will this data be collected?
+
+{% if durations|length == 1 %}
+{% for duration in durations %}
+{% if duration == "never" %}
+This collection will be collected permanently.
+{% else %}
+This collection has expiry '{{duration}}'.
+{% endif %}
+{% endfor %}
+{% else %}
+Parts of this collection expire at different times: {{ durations|join(", ") }}.
+{% endif %}
+{% if "never" in durations %}
+{{ responsible_emails|join(", ") }} will be responsible for the permanent collections.
+{% endif %}
+
+8. What populations will you measure?
+
+All channels, countries, and locales. No filters.
+
+9. If this data collection is default on, what is the opt-out mechanism for users?
+
+These collections are Glean. The opt-out can be found in the product's preferences.
+
+10. Please provide a general description of how you will analyze this data.
+
+{{ "TODO: Fill this in." if not analysis_how }}
+
+11. Where do you intend to share the results of your analysis?
+
+{{ "TODO: Fill this in." if not analysis_where }}
+
+12. Is there a third-party tool (i.e. not Glean or Telemetry) that you
+ are proposing to use for this data collection?
+
+No.
diff --git a/third_party/python/glean_parser/glean_parser/templates/javascript.buildinfo.jinja2 b/third_party/python/glean_parser/glean_parser/templates/javascript.buildinfo.jinja2
new file mode 100644
index 0000000000..79968d3d0a
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/templates/javascript.buildinfo.jinja2
@@ -0,0 +1,11 @@
+/*
+ * AUTOGENERATED BY glean_parser v{{ parser_version }}. DO NOT EDIT. DO NOT COMMIT.
+ */
+{# The rendered markdown is autogenerated, but this
+Jinja2 template is not. Please file bugs! #}
+
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+{% if platform != "qt" %}export {% endif %}const buildDate = {{ build_date }};
diff --git a/third_party/python/glean_parser/glean_parser/templates/javascript.jinja2 b/third_party/python/glean_parser/glean_parser/templates/javascript.jinja2
new file mode 100644
index 0000000000..4036e8922f
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/templates/javascript.jinja2
@@ -0,0 +1,73 @@
+{# The final Javascript/Typescript code is autogenerated, but this
+Jinja2 template is not. Please file bugs! #}
+{% macro obj_declaration(obj) %}
+new {{ obj.type|class_name }}{% if obj.extra_keys and lang == "typescript" %}<{
+ {% for name, type in obj.allowed_extra_keys_with_types %}
+ {{ name }}?: {{ type|extra_type_name }},
+ {% endfor %}
+}>{% endif %}({
+ {% for arg_name in (obj.type|args).common if obj[arg_name] is defined %}
+ {{ arg_name|camelize }}: {{ obj[arg_name]|js }},
+ {% endfor %}
+}{% for arg_name in (obj.type|args).extra if obj[arg_name] is defined %}, {{ obj[arg_name]|js }}{% endfor %}){% endmacro %}
+{% macro labeled_obj_declaration(obj) %}
+new {{ "labeled"|class_name }}({
+ {% for arg_name in (obj.type|args).common if obj[arg_name] is defined %}
+ {{ arg_name|camelize }}: {{ obj[arg_name]|js }},
+ {% endfor %}
+}, {{ obj.type|class_name }}{% if obj.labels is not none %}, {{ obj.labels|js }}{% endif %}){% endmacro %}
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// AUTOGENERATED BY glean_parser v{{ parser_version }}. DO NOT EDIT. DO NOT COMMIT.
+
+{% if platform != "qt" %}
+{% if has_labeled_metrics %}
+import LabeledMetricType from "@mozilla/glean/private/metrics/labeled";
+{% endif %}
+{% for type in types %}
+import {{ type|class_name }} from "@mozilla/glean/private/{{ type|import_path }}";
+{% endfor %}
+{% else %}
+.import org.mozilla.Glean {{ version }} as Glean
+{% endif %}
+
+{% for obj in objs.values() %}
+/**
+ * {{ obj.description|wordwrap() | replace("\n", "\n * ") }}
+ *
+ * Generated from `{{ obj.identifier() }}`.
+ */
+{% if obj.labeled %}
+{% if platform != "qt" %}export {% endif %}const {{ obj.name|camelize }} = {{ labeled_obj_declaration(obj) }};
+{% else %}
+{% if platform != "qt" %}export {% endif %}const {{ obj.name|camelize }} = {{ obj_declaration(obj) }};
+{% endif %}
+
+{% if obj|attr("_generate_enums") %}
+{% for name, suffix in obj["_generate_enums"] %}
+{% if obj|attr(name)|length and name == "reason_codes" %}
+/**
+ * Reason codes for `{{ obj.identifier() }}`.
+ *
+ * @readonly
+ * @enum {string}
+ */
+{% if lang == "typescript" %}
+export enum {{ obj.name|Camelize }}{{ name|Camelize }} {
+ {% for key in obj|attr(name) %}
+ {{ key|Camelize }} = "{{ key }}",
+ {% endfor %}
+}
+{% else %}
+{% if platform != "qt" %}export {% endif %}const {{ obj.name|Camelize }}{{ name|Camelize }} = {
+ {% for key in obj|attr(name) %}
+ "{{ key|Camelize }}": "{{ key }}",
+ {% endfor %}
+}
+{% endif %}
+{% endif %}
+{% endfor %}
+{% endif %}
+{% endfor %}
diff --git a/third_party/python/glean_parser/glean_parser/templates/kotlin.buildinfo.jinja2 b/third_party/python/glean_parser/glean_parser/templates/kotlin.buildinfo.jinja2
new file mode 100644
index 0000000000..2e0db5b302
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/templates/kotlin.buildinfo.jinja2
@@ -0,0 +1,31 @@
+// -*- mode: kotlin -*-
+
+/*
+ * AUTOGENERATED BY glean_parser v{{ parser_version }}. DO NOT EDIT. DO NOT COMMIT.
+ */
+{# The rendered markdown is autogenerated, but this
+Jinja2 template is not. Please file bugs! #}
+
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+@file:Suppress("PackageNaming", "MaxLineLength")
+
+package {{ namespace }}
+
+import java.util.Calendar
+import java.util.TimeZone
+import {{ glean_namespace }}.BuildInfo
+import {{ namespace_package }}.BuildConfig
+
+@Suppress("MagicNumber")
+internal object GleanBuildInfo {
+ val buildInfo: BuildInfo by lazy {
+ BuildInfo(
+ versionCode = BuildConfig.VERSION_CODE.toString(),
+ versionName = BuildConfig.VERSION_NAME,
+ buildDate = {{ build_date }}
+ )
+ }
+}
diff --git a/third_party/python/glean_parser/glean_parser/templates/kotlin.geckoview.jinja2 b/third_party/python/glean_parser/glean_parser/templates/kotlin.geckoview.jinja2
new file mode 100644
index 0000000000..f58c788e93
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/templates/kotlin.geckoview.jinja2
@@ -0,0 +1,124 @@
+// -*- mode: kotlin -*-
+
+/*
+ * AUTOGENERATED BY glean_parser v{{ parser_version }}. DO NOT EDIT. DO NOT COMMIT.
+ */
+{# The rendered markdown is autogenerated, but this
+Jinja2 template is not. Please file bugs! #}
+
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+@file:Suppress("PackageNaming", "MaxLineLength")
+package {{ namespace }}
+
+import {{ glean_namespace }}.private.BooleanMetricType // ktlint-disable import-ordering no-unused-imports
+import {{ glean_namespace }}.private.CounterMetricType // ktlint-disable import-ordering no-unused-imports
+import {{ glean_namespace }}.private.HistogramMetricBase // ktlint-disable import-ordering no-unused-imports
+import {{ glean_namespace }}.private.LabeledMetricType // ktlint-disable import-ordering no-unused-imports
+import {{ glean_namespace }}.private.QuantityMetricType // ktlint-disable import-ordering no-unused-imports
+import {{ glean_namespace }}.private.StringMetricType // ktlint-disable import-ordering no-unused-imports
+
+/*
+ * This class performs the mapping between Gecko metrics and Glean SDK
+ * metric types.
+ */
+internal object GleanGeckoMetricsMapping {
+ // Support exfiltration of Gecko histograms from products using both the
+ // Glean SDK and GeckoView. See bug 1566356 for more context.
+ @Suppress("UNUSED_PARAMETER")
+ fun getHistogram(geckoMetricName: String): HistogramMetricBase? {
+ {% if 'histograms' in gecko_metrics %}
+ return when (geckoMetricName) {
+ {% for category in gecko_metrics['histograms'].keys()|sort %}
+ // From {{ category|Camelize }}.kt
+ {% for metric in gecko_metrics['histograms'][category] %}
+ "{{ metric.gecko_datapoint }}" -> {{ category|Camelize }}.{{ metric.name|camelize }}
+ {% endfor %}
+ {%- endfor %}
+ else -> null
+ }
+ {% else %}
+ return null
+ {% endif %}
+ }
+
+ // Support exfiltration of Gecko categorical histograms from products using
+ // both the Glean SDK and GeckoView. See bug 1571740 for more context.
+ @Suppress("UNUSED_PARAMETER")
+ fun getCategoricalMetric(
+ geckoMetricName: String
+ ): LabeledMetricType<CounterMetricType>? {
+ {% if 'categoricals' in gecko_metrics %}
+ return when (geckoMetricName) {
+ {% for category in gecko_metrics['categoricals'].keys()|sort %}
+ // From {{ category|Camelize }}.kt
+ {% for metric in gecko_metrics['categoricals'][category] %}
+ "{{ metric.gecko_datapoint }}" -> {{ category|Camelize }}.{{ metric.name|camelize }}
+ {% endfor %}
+ {%- endfor %}
+ else -> null
+ }
+ {% else %}
+ return null
+ {% endif %}
+ }
+
+ // Support exfiltration of Gecko boolean scalars from products using both the
+ // Glean SDK and GeckoView. See bug 1579365 for more context.
+ @Suppress("UNUSED_PARAMETER")
+ fun getBooleanScalar(geckoMetricName: String): BooleanMetricType? {
+ {% if 'boolean' in gecko_metrics %}
+ return when (geckoMetricName) {
+ {% for category in gecko_metrics['boolean'].keys()|sort %}
+ // From {{ category|Camelize }}.kt
+ {% for metric in gecko_metrics['boolean'][category] %}
+ "{{ metric.gecko_datapoint }}" -> {{ category|Camelize }}.{{ metric.name|camelize }}
+ {% endfor %}
+ {%- endfor %}
+ else -> null
+ }
+ {% else %}
+ return null
+ {% endif %}
+ }
+
+ // Support exfiltration of Gecko string scalars from products using both the
+ // Glean SDK and GeckoView. See bug 1579365 for more context.
+ @Suppress("UNUSED_PARAMETER")
+ fun getStringScalar(geckoMetricName: String): StringMetricType? {
+ {% if 'string' in gecko_metrics %}
+ return when (geckoMetricName) {
+ {% for category in gecko_metrics['string'].keys()|sort %}
+ // From {{ category|Camelize }}.kt
+ {% for metric in gecko_metrics['string'][category] %}
+ "{{ metric.gecko_datapoint }}" -> {{ category|Camelize }}.{{ metric.name|camelize }}
+ {% endfor %}
+ {%- endfor %}
+ else -> null
+ }
+ {% else %}
+ return null
+ {% endif %}
+ }
+
+ // Support exfiltration of Gecko quantity scalars from products using both the
+ // Glean SDK and GeckoView. See bug 1579365 for more context.
+ @Suppress("UNUSED_PARAMETER")
+ fun getQuantityScalar(geckoMetricName: String): QuantityMetricType? {
+ {% if 'quantity' in gecko_metrics %}
+ return when (geckoMetricName) {
+ {% for category in gecko_metrics['quantity'].keys()|sort %}
+ // From {{ category|Camelize }}.kt
+ {% for metric in gecko_metrics['quantity'][category] %}
+ "{{ metric.gecko_datapoint }}" -> {{ category|Camelize }}.{{ metric.name|camelize }}
+ {% endfor %}
+ {%- endfor %}
+ else -> null
+ }
+ {% else %}
+ return null
+ {% endif %}
+ }
+}
diff --git a/third_party/python/glean_parser/glean_parser/templates/kotlin.jinja2 b/third_party/python/glean_parser/glean_parser/templates/kotlin.jinja2
new file mode 100644
index 0000000000..bd800af01d
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/templates/kotlin.jinja2
@@ -0,0 +1,133 @@
+// -*- mode: kotlin -*-
+
+/*
+ * AUTOGENERATED BY glean_parser v{{ parser_version }}. DO NOT EDIT. DO NOT COMMIT.
+ */
+{# The rendered markdown is autogenerated, but this
+Jinja2 template is not. Please file bugs! #}
+
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+{%- macro obj_declaration(obj, suffix='', access='', lazy=False) -%}
+{% if (access != "private ") -%}
+@get:JvmName("{{ obj.name|camelize }}{{ suffix }}")
+{% endif -%}
+{{ access }}val {{ obj.name|camelize }}{{ suffix }}: {{ obj|type_name }}{% if lazy %} by lazy { {%- else %} ={% endif %} // generated from {{ obj.identifier() }}
+{% if obj.type == 'ping' %}
+ {{ obj|type_name }}(
+ {% for arg_name in ping_args if obj[arg_name] is defined %}
+ {{ arg_name|camelize }} = {{ obj[arg_name]|kotlin }}{{ "," if not loop.last }}
+ {% endfor %}
+ )
+{% else %}
+ {{ obj|type_name }}(
+ CommonMetricData(
+ {% for arg_name in common_metric_args if obj[arg_name] is defined %}
+ {{ arg_name|camelize }} = {{ obj[arg_name]|kotlin }}{{ "," if not loop.last }}
+ {% endfor %}
+ ){%- for arg_name in extra_metric_args if obj[arg_name] is defined -%}
+ , {{ arg_name|camelize }} = {{ obj[arg_name]|kotlin }}
+ {%- endfor -%}
+ )
+{% endif %}
+{% if lazy %}}{% endif %}
+{%- endmacro -%}
+
+{%- macro reason_enum_decl(obj, name, suffix) -%}
+@Suppress("ClassNaming", "EnumNaming")
+enum class {{ obj.name|camelize }}{{ suffix }} : ReasonCode {
+{% for key in obj|attr(name) %}
+ {{ key|camelize }} {
+ override fun code(): Int = {{ loop.index-1 }}
+ }{{ "," if not loop.last }}{{ ";" if loop.last }}
+
+{% endfor %}
+}
+{%- endmacro %}
+
+{%- macro struct_decl(obj, name, suffix) -%}
+@Suppress("ClassNaming", "EnumNaming")
+data class {{ obj.name|Camelize }}{{ suffix }}(
+{% for item, typ in obj|attr(name) %}
+ val {{ item|camelize }}: {{typ|extra_type_name}}? = null{{ "," if not loop.last }}
+{% endfor %}
+) : EventExtras {
+ override fun toExtraRecord(): Map<String, String> {
+ val map = mutableMapOf<String, String>()
+
+ {% for item in obj|attr(name) %}
+ this.{{ item[0]|camelize }}?.let {
+ map.put("{{item[0]}}", it.toString())
+ }
+ {% endfor %}
+ return map
+ }
+}
+{%- endmacro -%}
+
+/* ktlint-disable no-blank-line-before-rbrace */
+@file:Suppress("PackageNaming", "MaxLineLength")
+package {{ namespace }}
+
+import {{ glean_namespace }}.private.CommonMetricData // ktlint-disable import-ordering no-unused-imports
+import {{ glean_namespace }}.private.EventExtras // ktlint-disable import-ordering no-unused-imports
+import {{ glean_namespace }}.private.HistogramType // ktlint-disable import-ordering no-unused-imports
+import {{ glean_namespace }}.private.Lifetime // ktlint-disable import-ordering no-unused-imports
+import {{ glean_namespace }}.private.MemoryUnit // ktlint-disable import-ordering no-unused-imports
+import {{ glean_namespace }}.private.NoExtras // ktlint-disable import-ordering no-unused-imports
+import {{ glean_namespace }}.private.ReasonCode // ktlint-disable import-ordering no-unused-imports
+import {{ glean_namespace }}.private.NoReasonCodes // ktlint-disable import-ordering no-unused-imports
+import {{ glean_namespace }}.private.TimeUnit // ktlint-disable import-ordering no-unused-imports
+{% for obj_type in obj_types %}
+import {{ glean_namespace }}.private.{{ obj_type }} // ktlint-disable import-ordering
+{% endfor %}
+{% if has_labeled_metrics %}
+import {{ glean_namespace }}.private.LabeledMetricType // ktlint-disable import-ordering
+{% endif %}
+
+internal object {{ category_name|Camelize }} {
+{% for obj in objs.values() %}
+ {% if obj.type == "ping" %}
+ {% if obj|attr("_generate_enums") %}
+ {% for name, suffix in obj["_generate_enums"] %}
+ {% if obj|attr(name)|length %}
+ {{ reason_enum_decl(obj, name, suffix)|indent }}
+ {% endif %}
+ {% endfor %}
+ {% endif %}
+ {% else %}
+ {% if obj|attr("_generate_enums") %}
+ {% for name, suffix in obj["_generate_enums"] %}
+ {% if obj|attr(name)|length %}
+ {{ struct_decl(obj, name, suffix)|indent }}
+ {% endif %}
+ {% endfor %}
+ {% endif %}
+ {% endif %}
+{% endfor %}
+{% for obj in objs.values() %}
+ {% if obj.labeled %}
+ {{ obj_declaration(obj, 'Label', 'private ') | indent }}
+ /**
+ * {{ obj.description|wordwrap() | replace('\n', '\n * ') }}
+ */
+ val {{ obj.name|camelize }}: LabeledMetricType<{{ obj|type_name }}> by lazy { // generated from {{ obj.identifier() }}
+ LabeledMetricType(
+ category = {{ obj.category|kotlin }},
+ name = {{ obj.name|kotlin }},
+ subMetric = {{ obj.name|camelize }}Label,
+ disabled = {{ obj.is_disabled()|kotlin }},
+ lifetime = {{ obj.lifetime|kotlin }},
+ sendInPings = {{ obj.send_in_pings|kotlin }},
+ labels = {{ obj.labels|kotlin }}
+ )
+ }
+ {% else %}
+ /**
+ * {{ obj.description|wordwrap() | replace('\n', '\n * ') }}
+ */
+ {{ obj_declaration(obj, lazy=obj.type != 'ping') | indent }}
+ {% endif %}
+{%- endfor %}
+}
diff --git a/third_party/python/glean_parser/glean_parser/templates/markdown.jinja2 b/third_party/python/glean_parser/glean_parser/templates/markdown.jinja2
new file mode 100644
index 0000000000..9370311247
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/templates/markdown.jinja2
@@ -0,0 +1,98 @@
+<!-- AUTOGENERATED BY glean_parser v{{ parser_version }}. DO NOT EDIT. -->
+{# The rendered markdown is autogenerated, but this
+Jinja2 template is not. Please file bugs! #}
+
+# Metrics
+
+This document enumerates the metrics collected by {{ project_title }} using the [Glean SDK](https://mozilla.github.io/glean/book/index.html).
+This project may depend on other projects which also collect metrics.
+This means you might have to go searching through the dependency tree to get a full picture of everything collected by this project.
+{% if introduction_extra %}
+
+{{ introduction_extra }}
+{% endif %}
+
+# Pings
+
+{% for ping_name in metrics_by_pings.keys()|sort %}
+- [{{ ping_name }}]({{ '#' }}{{ ping_name|replace(" ","-") }})
+{% endfor %}
+
+{% for ping_name in metrics_by_pings.keys()|sort %}
+{% raw %}##{% endraw %} {{ ping_name }}
+
+{% if ping_name|ping_desc and ping_name|ping_desc|length > 0 %}
+{{ ping_name|ping_desc }}
+
+{% if ping_name|ping_docs|length > 0 %}
+See the Glean SDK documentation for the [`{{ ping_name }}` ping]({{ ping_name|ping_docs }}).
+
+{% endif %}
+{% endif %}
+{% if ping_name|ping_send_if_empty %}
+This ping is sent if empty.
+
+{% endif %}
+{% if ping_name|ping_include_client_id %}
+This ping includes the [client id](https://mozilla.github.io/glean/book/user/pings/index.html#the-client_info-section).
+
+{% endif %}
+{% if ping_name|ping_data_reviews %}
+**Data reviews for this ping:**
+
+{% for review in ping_name|ping_data_reviews %}
+- <{{review}}>
+{% endfor %}
+
+{% endif %}
+{% if ping_name|ping_bugs %}
+**Bugs related to this ping:**
+
+{% for bug in ping_name|ping_bugs %}
+- {% if bug|int != 0 %}{{bug}}{% else %}<{{bug}}>{% endif %}
+
+{% endfor %}
+
+{% endif %}
+{% if ping_name|ping_reasons %}
+**Reasons this ping may be sent:**
+
+{% for (reason, desc) in ping_name|ping_reasons|dictsort %}
+- `{{ reason }}`: {{ desc|indent(6, first=False) }}
+{% endfor %}
+
+{% endif %}
+All Glean pings contain built-in metrics in the [`ping_info`](https://mozilla.github.io/glean/book/user/pings/index.html#the-ping_info-section) and [`client_info`](https://mozilla.github.io/glean/book/user/pings/index.html#the-client_info-section) sections.
+
+{% if metrics_by_pings[ping_name] %}
+In addition to those built-in metrics, the following metrics are added to the ping:
+
+| Name | Type | Description | Data reviews | Extras | Expiration | [Data Sensitivity](https://wiki.mozilla.org/Firefox/Data_Collection) |
+| --- | --- | --- | --- | --- | --- | --- |
+{% for metric in metrics_by_pings[ping_name] %}
+| {{ metric.identifier() }} |
+{{- '['}}{{ metric.type }}]({{ metric.type|metrics_docs }}) |
+{{- metric.description|replace("\n", " ") }} |
+{%- for data_review in metric.data_reviews %}
+[{{ data_review|ping_review_title(loop.index) }}]({{ data_review }}){{ ", " if not loop.last }}
+{%- endfor -%} |
+{%- if metric|extra_info -%}
+<ul>
+{%- for property, desc in metric|extra_info %}
+<li>{{ property }}{%- if desc is not none -%}: {{ desc|replace("\n", " ") }}{%- endif -%}</li>
+{%- endfor -%}
+</ul>
+{%- endif -%} |
+{{- metric.expires }} |
+{{- metric.data_sensitivity|data_sensitivity_numbers }} |
+{% endfor %}
+{% else %}
+This ping contains no metrics.
+{% endif %}
+
+{% endfor %}
+Data categories are [defined here](https://wiki.mozilla.org/Firefox/Data_Collection).
+
+<!-- AUTOGENERATED BY glean_parser v{{ parser_version }}. DO NOT EDIT. -->
+{# The rendered markdown is autogenerated, but this
+Jinja2 template is not. Please file bugs! #}
diff --git a/third_party/python/glean_parser/glean_parser/templates/qmldir.jinja2 b/third_party/python/glean_parser/glean_parser/templates/qmldir.jinja2
new file mode 100644
index 0000000000..f511912808
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/templates/qmldir.jinja2
@@ -0,0 +1,4 @@
+{% for category in categories|sort %}
+{{ category|Camelize }} {{ version }} {{ category|camelize }}.js
+{% endfor %}
+depends org.mozilla.Glean {{ version }}
diff --git a/third_party/python/glean_parser/glean_parser/templates/rust.jinja2 b/third_party/python/glean_parser/glean_parser/templates/rust.jinja2
new file mode 100644
index 0000000000..aff78d47ed
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/templates/rust.jinja2
@@ -0,0 +1,276 @@
+// -*- mode: Rust -*-
+
+// AUTOGENERATED BY glean_parser v{{ parser_version }}. DO NOT EDIT. DO NOT COMMIT.
+{# The rendered source is autogenerated, but this
+Jinja2 template is not. Please file bugs! #}
+
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+{% macro generate_extra_keys(obj) %}
+{% for name, _ in obj["_generate_enums"] %}
+{# we always use the `extra` suffix, because we only expose the new event API #}
+{% set suffix = "Extra" %}
+{% if obj|attr(name)|length %}
+ {{ extra_keys_with_types(obj, name, suffix)|indent }}
+{% endif %}
+{% endfor %}
+{% endmacro %}
+{% macro extra_keys_with_types(obj, name, suffix) %}
+#[derive(Default, Debug, Clone, Hash, Eq, PartialEq)]
+pub struct {{ obj.name|Camelize }}{{ suffix }} {
+ {% for item, type in obj|attr(name) %}
+ pub {{ item|snake_case }}: Option<{{type|extra_type_name}}>,
+ {% endfor %}
+}
+
+impl ExtraKeys for {{ obj.name|Camelize }}{{ suffix }} {
+ const ALLOWED_KEYS: &'static [&'static str] = {{ obj.allowed_extra_keys|extra_keys }};
+
+ fn into_ffi_extra(self) -> ::std::collections::HashMap<::std::string::String, ::std::string::String> {
+ let mut map = ::std::collections::HashMap::new();
+ {% for key, _ in obj|attr(name) %}
+ self.{{key|snake_case}}.and_then(|val| map.insert("{{key}}".to_string(), val));
+ {% endfor %}
+ map
+ }
+}
+{% endmacro %}
+{% for category in categories %}
+{% if category.contains_pings %}
+{% for obj in category.objs.values() %}
+#[allow(non_upper_case_globals, dead_code)]
+/// {{ obj.description|wordwrap() | replace('\n', '\n/// ') }}
+#[rustfmt::skip]
+pub static {{ obj.name|snake_case }}: ::glean::private::__export::Lazy<::glean::private::PingType> =
+ ::glean::private::__export::Lazy::new(|| ::glean::private::PingType::new("{{ obj.name }}", {{ obj.include_client_id|rust }}, {{ obj.send_if_empty|rust }}, {{ obj.reason_codes|rust }}));
+{% endfor %}
+{% else %}
+pub mod {{ category.name|snake_case }} {
+ #[allow(unused_imports)] // HistogramType might be unusued, let's avoid warnings
+ use glean::{private::*, traits::ExtraKeys, traits::NoExtraKeys, CommonMetricData, HistogramType, Lifetime, TimeUnit, MemoryUnit};
+ {% for obj in category.objs.values() %}
+
+ {% if obj|attr("_generate_enums") %}
+{{ generate_extra_keys(obj) }}
+ {%- endif %}
+ #[allow(non_upper_case_globals, dead_code)]
+ /// generated from {{ category.name }}.{{ obj.name }}
+ ///
+ /// {{ obj.description|wordwrap() | replace('\n', '\n /// ') }}
+ pub static {{ obj.name|snake_case }}: ::glean::private::__export::Lazy<{{ obj|type_name }}> = ::glean::private::__export::Lazy::new(|| {
+ {{ obj|ctor }}(CommonMetricData {
+ category: {{ obj.category|rust }},
+ name: {{ obj.name|rust }},
+ send_in_pings: {{ obj.send_in_pings|rust }},
+ lifetime: {{ obj.lifetime|rust }},
+ disabled: {{ obj.is_disabled()|rust }},
+ ..Default::default()
+ }
+ {%- for arg_name in extra_metric_args if obj[arg_name] is defined and arg_name != 'allowed_extra_keys' -%}
+ , {{ obj[arg_name]|rust }}
+ {%- endfor -%}
+ {{ ", " if obj.labeled else ")\n" }}
+ {%- if obj.labeled -%}
+ {%- if obj.labels -%}
+ Some({{ obj.labels|rust }})
+ {%- else -%}
+ None
+ {%- endif -%})
+ {% endif %}
+ });
+ {% endfor %}
+}
+{% endif %}
+{% endfor %}
+{% if metric_by_type|length > 0 %}
+
+#[allow(dead_code)]
+pub(crate) mod __glean_metric_maps {
+ use std::collections::HashMap;
+
+ use super::{id_for_extra_key, extra_keys_len};
+ use crate::private::*;
+
+{% for typ, metrics in metric_by_type.items() %}
+ pub static {{typ.0}}: ::glean::private::__export::Lazy<HashMap<MetricId, &Lazy<{{typ.1}}>>> = ::glean::private::__export::Lazy::new(|| {
+ let mut map = HashMap::with_capacity({{metrics|length}});
+ {% for metric in metrics %}
+ map.insert({{metric.0}}.into(), &super::{{metric.1}});
+ {% endfor %}
+ map
+ });
+
+{% endfor %}
+
+ /// Wrapper to record an event based on its metric ID.
+ ///
+ /// # Arguments
+ ///
+ /// * `metric_id` - The metric's ID to look up
+ /// * `extra` - An map of (extra key id, string) pairs.
+ /// The map will be decoded into the appropriate `ExtraKeys` type.
+ /// # Returns
+ ///
+ /// Returns `Ok(())` if the event was found and `record` was called with the given `extra`,
+ /// or an `EventRecordingError::InvalidId` if no event by that ID exists
+ /// or an `EventRecordingError::InvalidExtraKey` if the `extra` map could not be deserialized.
+ pub(crate) fn record_event_by_id(metric_id: u32, extra: HashMap<i32, String>) -> Result<(), EventRecordingError> {
+ match metric_id {
+{% for metric_id, event in events_by_id.items() %}
+ {{metric_id}} => {
+ assert!(
+ extra_keys_len(&super::{{event}}) != 0 || extra.is_empty(),
+ "No extra keys allowed, but some were passed"
+ );
+
+ super::{{event}}.record_raw(extra);
+ Ok(())
+ }
+{% endfor %}
+ _ => Err(EventRecordingError::InvalidId),
+ }
+ }
+
+ /// Wrapper to record an event based on its metric ID, with a provided timestamp.
+ ///
+ /// # Arguments
+ ///
+ /// * `metric_id` - The metric's ID to look up
+ /// * `timestamp` - The time at which this event was recorded.
+ /// * `extra` - An map of (extra key id, string) pairs.
+ /// The map will be decoded into the appropriate `ExtraKeys` type.
+ /// # Returns
+ ///
+ /// Returns `Ok(())` if the event was found and `record` was called with the given `extra`,
+ /// or an `EventRecordingError::InvalidId` if no event by that ID exists
+ /// or an `EventRecordingError::InvalidExtraKey` if the event doesn't take extra pairs,
+ /// but some are passed in.
+ pub(crate) fn record_event_by_id_with_time(metric_id: MetricId, timestamp: u64, extra: HashMap<i32, String>) -> Result<(), EventRecordingError> {
+ match metric_id {
+{% for metric_id, event in events_by_id.items() %}
+ MetricId({{metric_id}}) => {
+ if extra_keys_len(&super::{{event}}) == 0 && !extra.is_empty() {
+ return Err(EventRecordingError::InvalidExtraKey);
+ }
+
+ super::{{event}}.record_with_time(timestamp, extra);
+ Ok(())
+ }
+{% endfor %}
+ _ => Err(EventRecordingError::InvalidId),
+ }
+ }
+
+ /// Wrapper to record an event based on its metric ID.
+ ///
+ /// # Arguments
+ ///
+ /// * `metric_id` - The metric's ID to look up
+ /// * `extra` - An map of (string, string) pairs.
+ /// The map will be decoded into the appropriate `ExtraKeys` types.
+ /// # Returns
+ ///
+ /// Returns `Ok(())` if the event was found and `record` was called with the given `extra`,
+ /// or an `EventRecordingError::InvalidId` if no event by that ID exists
+ /// or an `EventRecordingError::InvalidExtraKey` if the `extra` map could not be deserialized.
+ pub(crate) fn record_event_by_id_with_strings(metric_id: u32, extra: HashMap<String, String>) -> Result<(), EventRecordingError> {
+ match metric_id {
+{% for metric_id, event in events_by_id.items() %}
+ {{metric_id}} => {
+ assert!(
+ extra_keys_len(&super::{{event}}) != 0 || extra.is_empty(),
+ "No extra keys allowed, but some were passed"
+ );
+
+ let extra = extra
+ .into_iter()
+ .map(|(k, v)| id_for_extra_key(&*k, &super::{{event}}).map(|k| (k, v)))
+ .collect::<Result<HashMap<_, _>, _>>()?;
+ super::{{event}}.record_raw(extra);
+ Ok(())
+ }
+{% endfor %}
+ _ => Err(EventRecordingError::InvalidId),
+ }
+ }
+
+ /// Wrapper to get the currently stored events for event metric.
+ ///
+ /// # Arguments
+ ///
+ /// * `metric_id` - The metric's ID to look up
+ /// * `ping_name` - (Optional) The ping name to look into.
+ /// Defaults to the first value in `send_in_pings`.
+ ///
+ /// # Returns
+ ///
+ /// Returns the recorded events or `None` if nothing stored.
+ ///
+ /// # Panics
+ ///
+ /// Panics if no event by the given metric ID could be found.
+ pub(crate) fn event_test_get_value_wrapper(metric_id: u32, ping_name: Option<String>) -> Option<Vec<RecordedEvent>> {
+ match metric_id {
+{% for metric_id, event in events_by_id.items() %}
+ {{metric_id}} => super::{{event}}.test_get_value(ping_name.as_deref()),
+{% endfor %}
+ _ => panic!("No event for metric id {}", metric_id),
+ }
+ }
+
+ /// Check the provided event for errors.
+ ///
+ /// # Arguments
+ ///
+ /// * `metric_id` - The metric's ID to look up
+ /// * `ping_name` - (Optional) The ping name to look into.
+ /// Defaults to the first value in `send_in_pings`.
+ ///
+ /// # Returns
+ ///
+ /// Returns a string for the recorded error or `None`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if no event by the given metric ID could be found.
+ #[allow(unused_variables)]
+ pub(crate) fn event_test_get_error(metric_id: u32, ping_name: Option<String>) -> Option<String> {
+ #[cfg(feature = "with_gecko")]
+ match metric_id {
+{% for metric_id, event in events_by_id.items() %}
+ {{metric_id}} => test_get_errors_string!(super::{{event}}, ping_name),
+{% endfor %}
+ _ => panic!("No event for metric id {}", metric_id),
+ }
+
+ #[cfg(not(feature = "with_gecko"))]
+ {
+ return None;
+ }
+ }
+
+ pub(crate) mod submetric_maps {
+ use std::sync::{
+ atomic::AtomicU32,
+ RwLock,
+ };
+ use super::*;
+
+ pub(crate) const MIN_LABELED_SUBMETRIC_ID: u32 = {{min_submetric_id}};
+ pub(crate) static NEXT_LABELED_SUBMETRIC_ID: AtomicU32 = AtomicU32::new(MIN_LABELED_SUBMETRIC_ID);
+ pub(crate) static LABELED_METRICS_TO_IDS: ::glean::private::__export::Lazy<RwLock<HashMap<(u32, String), u32>>> = ::glean::private::__export::Lazy::new(||
+ RwLock::new(HashMap::new())
+ );
+
+{% for typ, metrics in metric_by_type.items() %}
+{% if typ.0 in ('BOOLEAN_MAP', 'COUNTER_MAP', 'STRING_MAP') %}
+ pub static {{typ.0}}: ::glean::private::__export::Lazy<RwLock<HashMap<MetricId, Labeled{{typ.1}}>>> = ::glean::private::__export::Lazy::new(||
+ RwLock::new(HashMap::new())
+ );
+{% endif %}
+{% endfor%}
+ }
+}
+{% endif %}
diff --git a/third_party/python/glean_parser/glean_parser/templates/swift.jinja2 b/third_party/python/glean_parser/glean_parser/templates/swift.jinja2
new file mode 100644
index 0000000000..10e2f61001
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/templates/swift.jinja2
@@ -0,0 +1,138 @@
+// -*- mode: Swift -*-
+
+// AUTOGENERATED BY glean_parser v{{ parser_version }}. DO NOT EDIT. DO NOT COMMIT.
+{# The rendered markdown is autogenerated, but this
+Jinja2 template is not. Please file bugs! #}
+
+#if canImport(Foundation)
+ import Foundation
+#endif
+
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+{% macro obj_declaration(obj, suffix='', access='') %}
+{{ access }}static let {{ obj.name|camelize|variable_name }}{{ suffix }} = {{ obj|type_name }}( // generated from {{ obj.identifier() }}
+ CommonMetricData(
+ {% for arg_name in common_metric_args if obj[arg_name] is defined %}
+ {{ arg_name|camelize }}: {{ obj[arg_name]|swift }}{{ "," if not loop.last }}
+ {% endfor %}
+ )
+ {% for arg_name in extra_metric_args if obj[arg_name] is defined %}
+ , {{ obj[arg_name]|swift }}
+ {% endfor %}
+ )
+{% endmacro %}
+
+{% macro struct_decl(obj, name, suffix) %}
+struct {{ obj.name|Camelize }}{{ suffix }}: EventExtras {
+ {% for item, typ in obj|attr(name) %}
+ var {{ item|camelize|variable_name }}: {{typ|extra_type_name}}?
+ {% endfor %}
+
+ func toExtraRecord() -> [String: String] {
+ var record = [String: String]()
+
+ {% for item in obj|attr(name) %}
+ if let {{ item[0]|camelize }} = self.{{item[0]|camelize}} {
+ record["{{item[0]}}"] = String({{ item[0]|camelize }})
+ }
+ {% endfor %}
+
+ return record
+ }
+ }
+{% endmacro %}
+
+{% if not allow_reserved %}
+import {{ glean_namespace }}
+
+{% endif %}
+// swiftlint:disable superfluous_disable_command
+// swiftlint:disable nesting
+// swiftlint:disable line_length
+// swiftlint:disable identifier_name
+// swiftlint:disable force_try
+
+extension {{ namespace }} {
+ {% if build_info %}
+ class GleanBuild {
+ private init() {
+ // Intentionally left private, no external user can instantiate a new global object.
+ }
+
+ public static let info = BuildInfo(buildDate: {{ build_info.build_date }})
+ }
+ {% endif %}
+
+ {% for category in categories %}
+ {% if category.contains_pings %}
+ class {{ category.name|Camelize }} {
+ public static let shared = {{ category.name|Camelize }}()
+ private init() {
+ // Intentionally left private, no external user can instantiate a new global object.
+ }
+
+ {% for obj in category.objs.values() %}
+ {% if obj|attr("_generate_enums") %}
+ {% for name, suffix in obj["_generate_enums"] %}
+ {% if obj|attr(name)|length %}
+ enum {{ obj.name|Camelize }}{{ suffix }}: Int, ReasonCodes {
+ {% for key in obj|attr(name) %}
+ case {{ key|camelize|variable_name }} = {{ loop.index-1 }}
+ {% endfor %}
+
+ public func index() -> Int {
+ return self.rawValue
+ }
+ }
+
+ {% endif %}
+ {% endfor %}
+ {% endif %}
+ /// {{ obj.description|wordwrap() | replace('\n', '\n /// ') }}
+ let {{ obj.name|camelize|variable_name }} = {{obj|type_name}}(
+ name: {{ obj.name|swift }},
+ includeClientId: {{obj.include_client_id|swift}},
+ sendIfEmpty: {{obj.send_if_empty|swift}},
+ reasonCodes: {{obj.reason_codes|swift}}
+ )
+
+ {% endfor %}
+ }
+
+ {% else %}
+ enum {{ category.name|Camelize }} {
+ {% for obj in category.objs.values() %}
+ {% if obj|attr("_generate_enums") %}
+ {% for name, suffix in obj["_generate_enums"] %}
+ {% if obj|attr(name)|length %}
+ {{ struct_decl(obj, name, suffix)|indent }}
+ {% endif %}
+ {% endfor %}
+ {% endif %}
+ {% endfor %}
+ {% for obj in category.objs.values() %}
+ {% if obj.labeled %}
+ {{ obj_declaration(obj, 'Label', 'private ') | indent }}
+ /// {{ obj.description|wordwrap() | replace('\n', '\n /// ') }}
+ static let {{ obj.name|camelize|variable_name }} = try! LabeledMetricType<{{ obj|type_name }}>( // generated from {{ obj.identifier() }}
+ category: {{ obj.category|swift }},
+ name: {{ obj.name|swift }},
+ sendInPings: {{ obj.send_in_pings|swift }},
+ lifetime: {{ obj.lifetime|swift }},
+ disabled: {{ obj.is_disabled()|swift }},
+ subMetric: {{ obj.name|camelize }}Label,
+ labels: {{ obj.labels|swift }}
+ )
+
+ {% else %}
+ /// {{ obj.description|wordwrap() | replace('\n', '\n /// ') }}
+ {{ obj_declaration(obj) | indent }}
+ {% endif %}
+ {% endfor %}
+ }
+
+ {% endif %}
+ {% endfor %}
+}
diff --git a/third_party/python/glean_parser/glean_parser/translate.py b/third_party/python/glean_parser/glean_parser/translate.py
new file mode 100644
index 0000000000..ecb7515c05
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/translate.py
@@ -0,0 +1,227 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+High-level interface for translating `metrics.yaml` into other formats.
+"""
+
+from pathlib import Path
+import os
+import shutil
+import sys
+import tempfile
+from typing import Any, Callable, Dict, Iterable, List, Optional
+
+from . import lint
+from . import parser
+from . import javascript
+from . import kotlin
+from . import markdown
+from . import metrics
+from . import rust
+from . import swift
+from . import util
+
+
+class Outputter:
+ """
+ Class to define an output format.
+
+ Each outputter in the table has the following member values:
+
+ - output_func: the main function of the outputter, the one which
+ does the actual translation.
+
+ - clear_patterns: A list of glob patterns to clear in the directory before
+ writing new results to it.
+ """
+
+ def __init__(
+ self,
+ output_func: Callable[[metrics.ObjectTree, Path, Dict[str, Any]], None],
+ clear_patterns: Optional[List[str]] = None,
+ ):
+ if clear_patterns is None:
+ clear_patterns = []
+
+ self.output_func = output_func
+ self.clear_patterns = clear_patterns
+
+
+OUTPUTTERS = {
+ "javascript": Outputter(javascript.output_javascript, []),
+ "typescript": Outputter(javascript.output_typescript, []),
+ "kotlin": Outputter(kotlin.output_kotlin, ["*.kt"]),
+ "markdown": Outputter(markdown.output_markdown, []),
+ "swift": Outputter(swift.output_swift, ["*.swift"]),
+ "rust": Outputter(rust.output_rust, []),
+}
+
+
+def transform_metrics(objects):
+ """
+ Transform the object model from one that represents the YAML definitions
+ to one that reflects the type specifics needed by code generators.
+
+ e.g. This will transform a `rate` to be a `numerator` if its denominator is
+ external.
+ """
+ counters = {}
+ numerators_by_denominator: Dict[str, Any] = {}
+ for category_name, category_val in objects.items():
+ if category_name == "tags":
+ continue
+ for metric in category_val.values():
+ fqmn = metric.identifier()
+ if getattr(metric, "type", None) == "counter":
+ counters[fqmn] = metric
+ denominator_name = getattr(metric, "denominator_metric", None)
+ if denominator_name:
+ metric.type = "numerator"
+ numerators_by_denominator.setdefault(denominator_name, [])
+ numerators_by_denominator[denominator_name].append(metric)
+
+ for denominator_name, numerators in numerators_by_denominator.items():
+ if denominator_name not in counters:
+ raise ValueError(
+ f"No `counter` named {denominator_name} found to be used as"
+ "denominator for {numerators}",
+ file=sys.stderr,
+ )
+ counters[denominator_name].__class__ = metrics.Denominator
+ counters[denominator_name].type = "denominator"
+ counters[denominator_name].numerators = numerators
+
+
+def translate_metrics(
+ input_filepaths: Iterable[Path],
+ output_dir: Path,
+ translation_func: Callable[[metrics.ObjectTree, Path, Dict[str, Any]], None],
+ clear_patterns: Optional[List[str]] = None,
+ options: Optional[Dict[str, Any]] = None,
+ parser_config: Optional[Dict[str, Any]] = None,
+):
+ """
+ Translate the files in `input_filepaths` by running the metrics through a
+ translation function and writing the results in `output_dir`.
+
+ :param input_filepaths: list of paths to input metrics.yaml files
+ :param output_dir: the path to the output directory
+ :param translation_func: the function that actually performs the translation.
+ It is passed the following arguments:
+
+ - metrics_objects: The tree of metrics as pings as returned by
+ `parser.parse_objects`.
+ - output_dir: The path to the output directory.
+ - options: A dictionary of output format-specific options.
+
+ Examples of translation functions are in `kotlin.py` and `swift.py`.
+ :param clear_patterns: a list of glob patterns of files to clear before
+ generating the output files. By default, no files will be cleared (i.e.
+ the directory should be left alone).
+ :param options: dictionary of options. The available options are backend
+ format specific. These are passed unchanged to `translation_func`.
+ :param parser_config: A dictionary of options that change parsing behavior.
+ See `parser.parse_metrics` for more info.
+ """
+ if clear_patterns is None:
+ clear_patterns = []
+
+ if options is None:
+ options = {}
+
+ if parser_config is None:
+ parser_config = {}
+
+ input_filepaths = util.ensure_list(input_filepaths)
+
+ allow_missing_files = parser_config.get("allow_missing_files", False)
+ if not input_filepaths and not allow_missing_files:
+ print("❌ No metric files specified. ", end="")
+ print("Use `--allow-missing-files` to not treat this as an error.")
+ return 1
+
+ if lint.glinter(input_filepaths, parser_config):
+ return 1
+
+ all_objects = parser.parse_objects(input_filepaths, parser_config)
+
+ if util.report_validation_errors(all_objects):
+ return 1
+
+ # allow_reserved is also relevant to the translators, so copy it there
+ if parser_config.get("allow_reserved"):
+ options["allow_reserved"] = True
+
+ # We don't render tags anywhere yet.
+ all_objects.value.pop("tags", None)
+
+ # Apply additional general transformations to all metrics
+ transform_metrics(all_objects.value)
+
+ # Write everything out to a temporary directory, and then move it to the
+ # real directory, for transactional integrity.
+ with tempfile.TemporaryDirectory() as tempdir:
+ tempdir_path = Path(tempdir)
+ translation_func(all_objects.value, tempdir_path, options)
+
+ if output_dir.is_file():
+ output_dir.unlink()
+ elif output_dir.is_dir() and len(clear_patterns):
+ for clear_pattern in clear_patterns:
+ for filepath in output_dir.glob(clear_pattern):
+ filepath.unlink()
+ if len(list(output_dir.iterdir())):
+ print(f"Extra contents found in '{output_dir}'.")
+
+ # We can't use shutil.copytree alone if the directory already exists.
+ # However, if it doesn't exist, make sure to create one otherwise
+ # shutil.copy will fail.
+ os.makedirs(str(output_dir), exist_ok=True)
+ for filename in tempdir_path.glob("*"):
+ shutil.copy(str(filename), str(output_dir))
+
+ return 0
+
+
+def translate(
+ input_filepaths: Iterable[Path],
+ output_format: str,
+ output_dir: Path,
+ options: Optional[Dict[str, Any]] = None,
+ parser_config: Optional[Dict[str, Any]] = None,
+):
+ """
+ Translate the files in `input_filepaths` to the given `output_format` and
+ put the results in `output_dir`.
+
+ :param input_filepaths: list of paths to input metrics.yaml files
+ :param output_format: the name of the output format
+ :param output_dir: the path to the output directory
+ :param options: dictionary of options. The available options are backend
+ format specific.
+ :param parser_config: A dictionary of options that change parsing behavior.
+ See `parser.parse_metrics` for more info.
+ """
+ if options is None:
+ options = {}
+
+ if parser_config is None:
+ parser_config = {}
+
+ format_desc = OUTPUTTERS.get(output_format, None)
+
+ if format_desc is None:
+ raise ValueError(f"Unknown output format '{output_format}'")
+
+ return translate_metrics(
+ input_filepaths,
+ output_dir,
+ format_desc.output_func,
+ format_desc.clear_patterns,
+ options,
+ parser_config,
+ )
diff --git a/third_party/python/glean_parser/glean_parser/translation_options.py b/third_party/python/glean_parser/glean_parser/translation_options.py
new file mode 100644
index 0000000000..48774fee3c
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/translation_options.py
@@ -0,0 +1,54 @@
+import pydoc
+
+
+def translate_options(ctx, param, value):
+ text = """Target language options for Translate function
+
+These are backend specific and optional, provide as key:value
+
+Rust: no options.
+
+Swift:
+- `namespace`: The namespace to generate metrics in
+- `glean_namespace`: The namespace to import Glean from
+- `allow_reserved`: When True, this is a Glean-internal build
+- `with_buildinfo`: If "true" the `GleanBuildInfo` is generated.
+ Otherwise generation of that file is skipped.
+ Defaults to "true".
+- `build_date`: If set to `0` a static unix epoch time will be used.
+ If set to a ISO8601 datetime string (e.g. `2022-01-03T17:30:00`)
+ it will use that date.
+ Other values will throw an error.
+ If not set it will use the current date & time.
+
+Kotlin:
+- `namespace`: The package namespace to declare at the top of the
+ generated files. Defaults to `GleanMetrics`.
+- `glean_namespace`: The package namespace of the glean library itself.
+ This is where glean objects will be imported from in the generated
+ code.
+
+JavaScript:
+- `platform`: Which platform are we building for. Options are `webext` and `qt`.
+ Default is `webext`.
+- `version`: The version of the Glean.js Qt library being used.
+ This option is mandatory when targeting Qt. Note that the version
+ string must only contain the major and minor version i.e. 0.14.
+- `with_buildinfo`: If "true" a `gleanBuildInfo.(js|ts)` file is generated.
+ Otherwise generation of that file is skipped. Defaults to "false".
+- `build_date`: If set to `0` a static unix epoch time will be used.
+ If set to a ISO8601 datetime string (e.g. `2022-01-03T17:30:00`)
+ it will use that date.
+ Other values will throw an error.
+ If not set it will use the current date & time.
+
+Markdown:
+- `project_title`: The project's title.
+
+(press q to exit)"""
+
+ if value:
+ if value[0].lower() == "help":
+ pydoc.pager(text)
+ ctx.exit()
+ return value
diff --git a/third_party/python/glean_parser/glean_parser/util.py b/third_party/python/glean_parser/glean_parser/util.py
new file mode 100644
index 0000000000..3b8b24cd78
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/util.py
@@ -0,0 +1,560 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from collections import OrderedDict
+import datetime
+import functools
+import json
+from pathlib import Path
+import sys
+import textwrap
+from typing import Any, Callable, Iterable, Sequence, Tuple, Union, Optional
+import urllib.request
+
+import appdirs # type: ignore
+import diskcache # type: ignore
+import jinja2
+import jsonschema # type: ignore
+from jsonschema import _utils # type: ignore
+import yaml
+
+if sys.version_info < (3, 7):
+ import iso8601 # type: ignore
+
+ def date_fromisoformat(datestr: str) -> datetime.date:
+ try:
+ return iso8601.parse_date(datestr).date()
+ except iso8601.ParseError:
+ raise ValueError()
+
+ def datetime_fromisoformat(datestr: str) -> datetime.datetime:
+ try:
+ return iso8601.parse_date(datestr)
+ except iso8601.ParseError:
+ raise ValueError()
+
+else:
+
+ def date_fromisoformat(datestr: str) -> datetime.date:
+ return datetime.date.fromisoformat(datestr)
+
+ def datetime_fromisoformat(datestr: str) -> datetime.datetime:
+ return datetime.datetime.fromisoformat(datestr)
+
+
+TESTING_MODE = "pytest" in sys.modules
+
+
+JSONType = Union[list, dict, str, int, float, None]
+"""
+The types supported by JSON.
+
+This is only an approximation -- this should really be a recursive type.
+"""
+
+# Adapted from
+# https://stackoverflow.com/questions/34667108/ignore-dates-and-times-while-parsing-yaml
+
+
+# A wrapper around OrderedDict for Python < 3.7 (where dict ordering is not
+# maintained by default), and regular dict everywhere else.
+if sys.version_info < (3, 7):
+
+ class DictWrapper(OrderedDict):
+ pass
+
+else:
+
+ class DictWrapper(dict):
+ pass
+
+
+class _NoDatesSafeLoader(yaml.SafeLoader):
+ @classmethod
+ def remove_implicit_resolver(cls, tag_to_remove):
+ """
+ Remove implicit resolvers for a particular tag
+
+ Takes care not to modify resolvers in super classes.
+
+ We want to load datetimes as strings, not dates, because we
+ go on to serialise as json which doesn't have the advanced types
+ of yaml, and leads to incompatibilities down the track.
+ """
+ if "yaml_implicit_resolvers" not in cls.__dict__:
+ cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
+
+ for first_letter, mappings in cls.yaml_implicit_resolvers.items():
+ cls.yaml_implicit_resolvers[first_letter] = [
+ (tag, regexp) for tag, regexp in mappings if tag != tag_to_remove
+ ]
+
+
+# Since we use JSON schema to validate, and JSON schema doesn't support
+# datetimes, we don't want the YAML loader to give us datetimes -- just
+# strings.
+_NoDatesSafeLoader.remove_implicit_resolver("tag:yaml.org,2002:timestamp")
+
+
+def yaml_load(stream):
+ """
+ Map line number to yaml nodes, and preserve the order
+ of metrics as they appear in the metrics.yaml file.
+ """
+
+ class SafeLineLoader(_NoDatesSafeLoader):
+ pass
+
+ def _construct_mapping_adding_line(loader, node):
+ loader.flatten_mapping(node)
+ mapping = DictWrapper(loader.construct_pairs(node))
+ mapping.defined_in = {"line": node.start_mark.line}
+ return mapping
+
+ SafeLineLoader.add_constructor(
+ yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, _construct_mapping_adding_line
+ )
+ return yaml.load(stream, SafeLineLoader)
+
+
+def ordered_yaml_dump(data, **kwargs):
+ class OrderedDumper(yaml.Dumper):
+ pass
+
+ def _dict_representer(dumper, data):
+ return dumper.represent_mapping(
+ yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, data.items()
+ )
+
+ OrderedDumper.add_representer(DictWrapper, _dict_representer)
+ return yaml.dump(data, Dumper=OrderedDumper, **kwargs)
+
+
+def load_yaml_or_json(path: Path):
+ """
+ Load the content from either a .json or .yaml file, based on the filename
+ extension.
+
+ :param path: `pathlib.Path` object
+ :rtype object: The tree of objects as a result of parsing the file.
+ :raises ValueError: The file is neither a .json, .yml or .yaml file.
+ :raises FileNotFoundError: The file does not exist.
+ """
+ # If in py.test, support bits of literal JSON/YAML content
+ if TESTING_MODE and isinstance(path, dict):
+ return yaml_load(yaml.dump(path))
+
+ if path.suffix == ".json":
+ with path.open("r", encoding="utf-8") as fd:
+ return json.load(fd)
+ elif path.suffix in (".yml", ".yaml", ".yamlx"):
+ with path.open("r", encoding="utf-8") as fd:
+ return yaml_load(fd)
+ else:
+ raise ValueError(f"Unknown file extension {path.suffix}")
+
+
+def ensure_list(value: Any) -> Sequence[Any]:
+ """
+ Ensures that the value is a list. If it is anything but a list or tuple, a
+ list with a single element containing only value is returned.
+ """
+ if not isinstance(value, (list, tuple)):
+ return [value]
+ return value
+
+
+def to_camel_case(input: str, capitalize_first_letter: bool) -> str:
+ """
+ Convert the value to camelCase.
+
+ This additionally replaces any '.' with '_'. The first letter is capitalized
+ depending on `capitalize_first_letter`.
+ """
+ sanitized_input = input.replace(".", "_").replace("-", "_")
+ # Filter out any empty token. This could happen due to leading '_' or
+ # consecutive '__'.
+ tokens = [s.capitalize() for s in sanitized_input.split("_") if len(s) != 0]
+ # If we're not meant to capitalize the first letter, then lowercase it.
+ if not capitalize_first_letter:
+ tokens[0] = tokens[0].lower()
+ # Finally join the tokens and capitalize.
+ return "".join(tokens)
+
+
+def camelize(value: str) -> str:
+ """
+ Convert the value to camelCase (with a lower case first letter).
+
+ This is a thin wrapper around inflection.camelize that handles dots in
+ addition to underscores.
+ """
+ return to_camel_case(value, False)
+
+
+def Camelize(value: str) -> str:
+ """
+ Convert the value to CamelCase (with an upper case first letter).
+
+ This is a thin wrapper around inflection.camelize that handles dots in
+ addition to underscores.
+ """
+ return to_camel_case(value, True)
+
+
+def snake_case(value: str) -> str:
+ """
+ Convert the value to snake_case.
+ """
+ return value.lower().replace(".", "_").replace("-", "_")
+
+
+def screaming_case(value: str) -> str:
+ """
+ Convert the value to SCREAMING_SNAKE_CASE.
+ """
+ return value.upper().replace(".", "_").replace("-", "_")
+
+
+@functools.lru_cache()
+def get_jinja2_template(
+ template_name: str, filters: Iterable[Tuple[str, Callable]] = ()
+):
+ """
+ Get a Jinja2 template that ships with glean_parser.
+
+ The template has extra filters for camel-casing identifiers.
+
+ :param template_name: Name of a file in ``glean_parser/templates``
+ :param filters: tuple of 2-tuple. A tuple of (name, func) pairs defining
+ additional filters.
+ """
+ env = jinja2.Environment(
+ loader=jinja2.PackageLoader("glean_parser", "templates"),
+ trim_blocks=True,
+ lstrip_blocks=True,
+ )
+
+ env.filters["camelize"] = camelize
+ env.filters["Camelize"] = Camelize
+ env.filters["scream"] = screaming_case
+ for filter_name, filter_func in filters:
+ env.filters[filter_name] = filter_func
+
+ return env.get_template(template_name)
+
+
+def keep_value(f):
+ """
+ Wrap a generator so the value it returns (rather than yields), will be
+ accessible on the .value attribute when the generator is exhausted.
+ """
+
+ class ValueKeepingGenerator(object):
+ def __init__(self, g):
+ self.g = g
+ self.value = None
+
+ def __iter__(self):
+ self.value = yield from self.g
+
+ @functools.wraps(f)
+ def g(*args, **kwargs):
+ return ValueKeepingGenerator(f(*args, **kwargs))
+
+ return g
+
+
+def get_null_resolver(schema):
+ """
+ Returns a JSON Pointer resolver that does nothing.
+
+ This lets us handle the moz: URLs in our schemas.
+ """
+
+ class NullResolver(jsonschema.RefResolver):
+ def resolve_remote(self, uri):
+ if uri in self.store:
+ return self.store[uri]
+ if uri == "":
+ return self.referrer
+
+ return NullResolver.from_schema(schema)
+
+
+def fetch_remote_url(url: str, cache: bool = True):
+ """
+ Fetches the contents from an HTTP url or local file path, and optionally
+ caches it to disk.
+ """
+ # Include the Python version in the cache key, since caches aren't
+ # sharable across Python versions.
+ key = (url, str(sys.version_info))
+
+ is_http = url.startswith("http")
+
+ if not is_http:
+ with open(url, "r", encoding="utf-8") as fd:
+ return fd.read()
+
+ if cache:
+ cache_dir = appdirs.user_cache_dir("glean_parser", "mozilla")
+ with diskcache.Cache(cache_dir) as dc:
+ if key in dc:
+ return dc[key]
+
+ contents: str = urllib.request.urlopen(url).read()
+
+ if cache:
+ with diskcache.Cache(cache_dir) as dc:
+ dc[key] = contents
+
+ return contents
+
+
+_unset = _utils.Unset()
+
+
+def pprint_validation_error(error) -> str:
+ """
+ A version of jsonschema's ValidationError __str__ method that doesn't
+ include the schema fragment that failed. This makes the error messages
+ much more succinct.
+
+ It also shows any subschemas of anyOf/allOf that failed, if any (what
+ jsonschema calls "context").
+ """
+ essential_for_verbose = (
+ error.validator,
+ error.validator_value,
+ error.instance,
+ error.schema,
+ )
+ if any(m is _unset for m in essential_for_verbose):
+ return textwrap.fill(error.message)
+
+ instance = error.instance
+ for path in list(error.relative_path)[::-1]:
+ if isinstance(path, str):
+ instance = {path: instance}
+ else:
+ instance = [instance]
+
+ yaml_instance = ordered_yaml_dump(instance, width=72, default_flow_style=False)
+
+ parts = ["```", yaml_instance.rstrip(), "```", "", textwrap.fill(error.message)]
+ if error.context:
+ parts.extend(
+ textwrap.fill(x.message, initial_indent=" ", subsequent_indent=" ")
+ for x in error.context
+ )
+
+ description = error.schema.get("description")
+ if description:
+ parts.extend(
+ ["", "Documentation for this node:", textwrap.indent(description, " ")]
+ )
+
+ return "\n".join(parts)
+
+
+def format_error(
+ filepath: Union[str, Path],
+ header: str,
+ content: str,
+ lineno: Optional[int] = None,
+) -> str:
+ """
+ Format a jsonshema validation error.
+ """
+ if isinstance(filepath, Path):
+ filepath = filepath.resolve()
+ else:
+ filepath = "<string>"
+ if lineno:
+ filepath = f"{filepath}:{lineno}"
+ if header:
+ return f"{filepath}: {header}\n{textwrap.indent(content, ' ')}"
+ else:
+ return f"{filepath}:\n{textwrap.indent(content, ' ')}"
+
+
+def parse_expiration_date(expires: str) -> datetime.date:
+ """
+ Parses the expired field date (yyyy-mm-dd) as a date.
+ Raises a ValueError in case the string is not properly formatted.
+ """
+ try:
+ return date_fromisoformat(expires)
+ except (TypeError, ValueError):
+ raise ValueError(
+ f"Invalid expiration date '{expires}'. "
+ "Must be of the form yyyy-mm-dd in UTC."
+ )
+
+
+def parse_expiration_version(expires: str) -> int:
+ """
+ Parses the expired field version string as an integer.
+ Raises a ValueError in case the string does not contain a valid
+ positive integer.
+ """
+ try:
+ if isinstance(expires, int):
+ version_number = int(expires)
+ if version_number > 0:
+ return version_number
+ # Fall-through: if it's not an integer or is not greater than zero,
+ # raise an error.
+ raise ValueError()
+ except ValueError:
+ raise ValueError(
+ f"Invalid expiration version '{expires}'. Must be a positive integer."
+ )
+
+
+def is_expired(expires: str, major_version: Optional[int] = None) -> bool:
+ """
+ Parses the `expires` field in a metric or ping and returns whether
+ the object should be considered expired.
+ """
+ if expires == "never":
+ return False
+ elif expires == "expired":
+ return True
+ elif major_version is not None:
+ return parse_expiration_version(expires) <= major_version
+ else:
+ date = parse_expiration_date(expires)
+ return date <= datetime.datetime.utcnow().date()
+
+
+def validate_expires(expires: str, major_version: Optional[int] = None) -> None:
+ """
+ If expiration by major version is enabled, raises a ValueError in
+ case `expires` is not a positive integer.
+ Otherwise raises a ValueError in case the `expires` is not ISO8601
+ parseable, or in case the date is more than 730 days (~2 years) in
+ the future.
+ """
+ if expires in ("never", "expired"):
+ return
+
+ if major_version is not None:
+ parse_expiration_version(expires)
+ # Don't need to keep parsing dates if expiration by version
+ # is enabled. We don't allow mixing dates and versions for a
+ # single product.
+ return
+
+ date = parse_expiration_date(expires)
+ max_date = datetime.datetime.now() + datetime.timedelta(days=730)
+ if date > max_date.date():
+ raise ValueError(
+ f"'{expires}' is more than 730 days (~2 years) in the future.",
+ "Please make sure this is intentional.",
+ "You can supress this warning by adding EXPIRATION_DATE_TOO_FAR to no_lint",
+ "See: https://mozilla.github.io/glean_parser/metrics-yaml.html#no_lint",
+ )
+
+
+def build_date(date: Optional[str]) -> datetime.datetime:
+ """
+ Generate the build timestamp.
+
+ If `date` is set to `0` a static unix epoch time will be used.
+ If `date` it is set to a ISO8601 datetime string (e.g. `2022-01-03T17:30:00`)
+ it will use that date.
+ Note that any timezone offset will be ignored and UTC will be used.
+ Otherwise it will throw an error.
+
+ If `date` is `None` it will use the current date & time.
+ """
+
+ if date is not None:
+ date = str(date)
+ if date == "0":
+ ts = datetime.datetime(1970, 1, 1, 0, 0, 0)
+ else:
+ ts = datetime_fromisoformat(date).replace(tzinfo=datetime.timezone.utc)
+ else:
+ ts = datetime.datetime.utcnow()
+
+ return ts
+
+
+def report_validation_errors(all_objects):
+ """
+ Report any validation errors found to the console.
+
+ Returns the number of errors reported.
+ """
+ found_errors = 0
+ for error in all_objects:
+ found_errors += 1
+ print("=" * 78, file=sys.stderr)
+ print(error, file=sys.stderr)
+ return found_errors
+
+
+def remove_output_params(d, output_params):
+ """
+ Remove output-only params, such as "defined_in",
+ in order to validate the output against the input schema.
+ """
+ modified_dict = {}
+ for key, value in d.items():
+ if key is not output_params:
+ modified_dict[key] = value
+ return modified_dict
+
+
+# Names of parameters to pass to all metrics constructors constructors.
+common_metric_args = [
+ "category",
+ "name",
+ "send_in_pings",
+ "lifetime",
+ "disabled",
+]
+
+
+# Names of parameters that only apply to some of the metrics types.
+# **CAUTION**: This list needs to be in the order the Swift & Rust type constructors
+# expects them. (The other language bindings don't care about the order).
+extra_metric_args = [
+ "time_unit",
+ "memory_unit",
+ "allowed_extra_keys",
+ "reason_codes",
+ "range_min",
+ "range_max",
+ "bucket_count",
+ "histogram_type",
+ "numerators",
+]
+
+
+# This includes only things that the language bindings care about, not things
+# that are metadata-only or are resolved into other parameters at parse time.
+# **CAUTION**: This list needs to be in the order the Swift & Rust type constructors
+# expects them. (The other language bindings don't care about the order). The
+# `test_order_of_fields` test checks that the generated code is valid.
+# **DO NOT CHANGE THE ORDER OR ADD NEW FIELDS IN THE MIDDLE**
+metric_args = common_metric_args + extra_metric_args
+
+
+# Names of ping parameters to pass to constructors.
+ping_args = [
+ "name",
+ "include_client_id",
+ "send_if_empty",
+ "reason_codes",
+]
+
+
+# Names of parameters to pass to both metric and ping constructors (no duplicates).
+extra_args = metric_args + [v for v in ping_args if v not in metric_args]
diff --git a/third_party/python/glean_parser/glean_parser/validate_ping.py b/third_party/python/glean_parser/glean_parser/validate_ping.py
new file mode 100644
index 0000000000..33598149eb
--- /dev/null
+++ b/third_party/python/glean_parser/glean_parser/validate_ping.py
@@ -0,0 +1,74 @@
+# -*- coding: utf-8 -*-
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Validates the contents of a Glean ping against the schema.
+"""
+
+import functools
+import io
+import json
+from pathlib import Path
+import sys
+
+import jsonschema # type: ignore
+
+from . import util
+
+
+ROOT_DIR = Path(__file__).parent
+SCHEMAS_DIR = ROOT_DIR / "schemas"
+
+
+@functools.lru_cache(maxsize=1)
+def _get_ping_schema(schema_url):
+ contents = util.fetch_remote_url(schema_url)
+ return json.loads(contents)
+
+
+def _validate_ping(ins, outs, schema_url):
+ schema = _get_ping_schema(schema_url)
+
+ resolver = util.get_null_resolver(schema)
+
+ document = json.load(ins)
+
+ validator_class = jsonschema.validators.validator_for(schema)
+ validator = validator_class(schema, resolver=resolver)
+
+ has_error = 0
+ for error in validator.iter_errors(document):
+ outs.write("=" * 76)
+ outs.write("\n")
+ outs.write(util.format_error("", "", util.pprint_validation_error(error)))
+ outs.write("\n")
+ has_error = 1
+
+ return has_error
+
+
+def validate_ping(ins, outs=None, schema_url=None):
+ """
+ Validates the contents of a Glean ping.
+
+ :param ins: Input stream or file path to the ping contents to validate
+ :param outs: Output stream to write errors to. (Defaults to stdout)
+ :param schema_url: HTTP URL or local filesystem path to Glean ping schema.
+ Defaults to the current version of the schema in
+ mozilla-pipeline-schemas.
+ :rtype: int 1 if any errors occurred, otherwise 0.
+ """
+ if schema_url is None:
+ raise TypeError("Missing required argument 'schema_url'")
+
+ if outs is None:
+ outs = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
+
+ if isinstance(ins, (str, bytes, Path)):
+ with open(ins, "r", encoding="utf-8") as fd:
+ return _validate_ping(fd, outs, schema_url=schema_url)
+ else:
+ return _validate_ping(ins, outs, schema_url=schema_url)
diff --git a/third_party/python/gyp/.gitignore b/third_party/python/gyp/.gitignore
new file mode 100644
index 0000000000..0d20b6487c
--- /dev/null
+++ b/third_party/python/gyp/.gitignore
@@ -0,0 +1 @@
+*.pyc
diff --git a/third_party/python/gyp/AUTHORS b/third_party/python/gyp/AUTHORS
new file mode 100644
index 0000000000..9e742f2966
--- /dev/null
+++ b/third_party/python/gyp/AUTHORS
@@ -0,0 +1,17 @@
+# Names should be added to this file like so:
+# Name or Organization <email address>
+
+Google Inc. <*@google.com>
+Bloomberg Finance L.P. <*@bloomberg.net>
+IBM Inc. <*@*.ibm.com>
+Yandex LLC <*@yandex-team.ru>
+
+Steven Knight <knight@baldmt.com>
+Ryan Norton <rnorton10@gmail.com>
+David J. Sankel <david@sankelsoftware.com>
+Eric N. Vander Weele <ericvw@gmail.com>
+Tom Freudenberg <th.freudenberg@gmail.com>
+Julien Brianceau <jbriance@cisco.com>
+Refael Ackermann <refack@gmail.com>
+Jiajie Hu <jiajie.hu@intel.com>
+Philip Nery <pbfnery@gmail.com>
diff --git a/third_party/python/gyp/DEPS b/third_party/python/gyp/DEPS
new file mode 100644
index 0000000000..167fb779b0
--- /dev/null
+++ b/third_party/python/gyp/DEPS
@@ -0,0 +1,23 @@
+# DEPS file for gclient use in buildbot execution of gyp tests.
+#
+# (You don't need to use gclient for normal GYP development work.)
+
+vars = {
+ "chromium_git": "https://chromium.googlesource.com/",
+}
+
+deps = {
+}
+
+deps_os = {
+ "win": {
+ "third_party/cygwin":
+ Var("chromium_git") + "chromium/deps/cygwin@4fbd5b9",
+
+ "third_party/python_26":
+ Var("chromium_git") + "chromium/deps/python_26@5bb4080",
+
+ "src/third_party/pefile":
+ Var("chromium_git") + "external/pefile@72c6ae4",
+ },
+}
diff --git a/third_party/python/gyp/LICENSE b/third_party/python/gyp/LICENSE
new file mode 100644
index 0000000000..ab6b011a10
--- /dev/null
+++ b/third_party/python/gyp/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/third_party/python/gyp/OWNERS b/third_party/python/gyp/OWNERS
new file mode 100644
index 0000000000..72e8ffc0db
--- /dev/null
+++ b/third_party/python/gyp/OWNERS
@@ -0,0 +1 @@
+*
diff --git a/third_party/python/gyp/PRESUBMIT.py b/third_party/python/gyp/PRESUBMIT.py
new file mode 100644
index 0000000000..5ee669b595
--- /dev/null
+++ b/third_party/python/gyp/PRESUBMIT.py
@@ -0,0 +1,125 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+"""Top-level presubmit script for GYP.
+
+See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
+for more details about the presubmit API built into gcl.
+"""
+
+
+PYLINT_BLACKLIST = [
+ # TODO: fix me.
+ # From SCons, not done in google style.
+ 'test/lib/TestCmd.py',
+ 'test/lib/TestCommon.py',
+ 'test/lib/TestGyp.py',
+]
+
+
+PYLINT_DISABLED_WARNINGS = [
+ # TODO: fix me.
+ # Many tests include modules they don't use.
+ 'W0611',
+ # Possible unbalanced tuple unpacking with sequence.
+ 'W0632',
+ # Attempting to unpack a non-sequence.
+ 'W0633',
+ # Include order doesn't properly include local files?
+ 'F0401',
+ # Some use of built-in names.
+ 'W0622',
+ # Some unused variables.
+ 'W0612',
+ # Operator not preceded/followed by space.
+ 'C0323',
+ 'C0322',
+ # Unnecessary semicolon.
+ 'W0301',
+ # Unused argument.
+ 'W0613',
+ # String has no effect (docstring in wrong place).
+ 'W0105',
+ # map/filter on lambda could be replaced by comprehension.
+ 'W0110',
+ # Use of eval.
+ 'W0123',
+ # Comma not followed by space.
+ 'C0324',
+ # Access to a protected member.
+ 'W0212',
+ # Bad indent.
+ 'W0311',
+ # Line too long.
+ 'C0301',
+ # Undefined variable.
+ 'E0602',
+ # Not exception type specified.
+ 'W0702',
+ # No member of that name.
+ 'E1101',
+ # Dangerous default {}.
+ 'W0102',
+ # Cyclic import.
+ 'R0401',
+ # Others, too many to sort.
+ 'W0201', 'W0232', 'E1103', 'W0621', 'W0108', 'W0223', 'W0231',
+ 'R0201', 'E0101', 'C0321',
+ # ************* Module copy
+ # W0104:427,12:_test.odict.__setitem__: Statement seems to have no effect
+ 'W0104',
+]
+
+
+def _LicenseHeader(input_api):
+ # Accept any year number from 2009 to the current year.
+ current_year = int(input_api.time.strftime('%Y'))
+ allowed_years = (str(s) for s in reversed(range(2009, current_year + 1)))
+ years_re = '(' + '|'.join(allowed_years) + ')'
+
+ # The (c) is deprecated, but tolerate it until it's removed from all files.
+ return (
+ r'.*? Copyright (\(c\) )?%(year)s Google Inc\. All rights reserved\.\n'
+ r'.*? Use of this source code is governed by a BSD-style license that '
+ r'can be\n'
+ r'.*? found in the LICENSE file\.\n'
+ ) % {
+ 'year': years_re,
+ }
+
+def CheckChangeOnUpload(input_api, output_api):
+ report = []
+ report.extend(input_api.canned_checks.PanProjectChecks(
+ input_api, output_api, license_header=_LicenseHeader(input_api)))
+ return report
+
+
+def CheckChangeOnCommit(input_api, output_api):
+ report = []
+
+ report.extend(input_api.canned_checks.PanProjectChecks(
+ input_api, output_api, license_header=_LicenseHeader(input_api)))
+ report.extend(input_api.canned_checks.CheckTreeIsOpen(
+ input_api, output_api,
+ 'http://gyp-status.appspot.com/status',
+ 'http://gyp-status.appspot.com/current'))
+
+ import os
+ import sys
+ old_sys_path = sys.path
+ try:
+ sys.path = ['pylib', 'test/lib'] + sys.path
+ blacklist = PYLINT_BLACKLIST
+ if sys.platform == 'win32':
+ blacklist = [os.path.normpath(x).replace('\\', '\\\\')
+ for x in PYLINT_BLACKLIST]
+ report.extend(input_api.canned_checks.RunPylint(
+ input_api,
+ output_api,
+ black_list=blacklist,
+ disabled_warnings=PYLINT_DISABLED_WARNINGS))
+ finally:
+ sys.path = old_sys_path
+ return report
diff --git a/third_party/python/gyp/README.md b/third_party/python/gyp/README.md
new file mode 100644
index 0000000000..b4766c9d63
--- /dev/null
+++ b/third_party/python/gyp/README.md
@@ -0,0 +1,5 @@
+GYP can Generate Your Projects.
+===================================
+
+Documents are available at [gyp.gsrc.io](https://gyp.gsrc.io), or you can
+check out ```md-pages``` branch to read those documents offline.
diff --git a/third_party/python/gyp/buildbot/buildbot_run.py b/third_party/python/gyp/buildbot/buildbot_run.py
new file mode 100755
index 0000000000..89416520d3
--- /dev/null
+++ b/third_party/python/gyp/buildbot/buildbot_run.py
@@ -0,0 +1,138 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Argument-less script to select what to run on the buildbots."""
+
+from __future__ import print_function
+
+import os
+import shutil
+import subprocess
+import sys
+
+
+BUILDBOT_DIR = os.path.dirname(os.path.abspath(__file__))
+TRUNK_DIR = os.path.dirname(BUILDBOT_DIR)
+ROOT_DIR = os.path.dirname(TRUNK_DIR)
+CMAKE_DIR = os.path.join(ROOT_DIR, 'cmake')
+CMAKE_BIN_DIR = os.path.join(CMAKE_DIR, 'bin')
+OUT_DIR = os.path.join(TRUNK_DIR, 'out')
+
+
+def CallSubProcess(*args, **kwargs):
+ """Wrapper around subprocess.call which treats errors as build exceptions."""
+ with open(os.devnull) as devnull_fd:
+ retcode = subprocess.call(stdin=devnull_fd, *args, **kwargs)
+ if retcode != 0:
+ print('@@@STEP_EXCEPTION@@@')
+ sys.exit(1)
+
+
+def PrepareCmake():
+ """Build CMake 2.8.8 since the version in Precise is 2.8.7."""
+ if os.environ['BUILDBOT_CLOBBER'] == '1':
+ print('@@@BUILD_STEP Clobber CMake checkout@@@')
+ shutil.rmtree(CMAKE_DIR)
+
+ # We always build CMake 2.8.8, so no need to do anything
+ # if the directory already exists.
+ if os.path.isdir(CMAKE_DIR):
+ return
+
+ print('@@@BUILD_STEP Initialize CMake checkout@@@')
+ os.mkdir(CMAKE_DIR)
+
+ print('@@@BUILD_STEP Sync CMake@@@')
+ CallSubProcess(
+ ['git', 'clone',
+ '--depth', '1',
+ '--single-branch',
+ '--branch', 'v2.8.8',
+ '--',
+ 'git://cmake.org/cmake.git',
+ CMAKE_DIR],
+ cwd=CMAKE_DIR)
+
+ print('@@@BUILD_STEP Build CMake@@@')
+ CallSubProcess(
+ ['/bin/bash', 'bootstrap', '--prefix=%s' % CMAKE_DIR],
+ cwd=CMAKE_DIR)
+
+ CallSubProcess( ['make', 'cmake'], cwd=CMAKE_DIR)
+
+
+def GypTestFormat(title, format=None, msvs_version=None, tests=[]):
+ """Run the gyp tests for a given format, emitting annotator tags.
+
+ See annotator docs at:
+ https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations
+ Args:
+ format: gyp format to test.
+ Returns:
+ 0 for sucesss, 1 for failure.
+ """
+ if not format:
+ format = title
+
+ print('@@@BUILD_STEP ' + title + '@@@')
+ sys.stdout.flush()
+ env = os.environ.copy()
+ if msvs_version:
+ env['GYP_MSVS_VERSION'] = msvs_version
+ command = ' '.join(
+ [sys.executable, 'gyp/gyptest.py',
+ '--all',
+ '--passed',
+ '--format', format,
+ '--path', CMAKE_BIN_DIR,
+ '--chdir', 'gyp'] + tests)
+ retcode = subprocess.call(command, cwd=ROOT_DIR, env=env, shell=True)
+ if retcode:
+ # Emit failure tag, and keep going.
+ print('@@@STEP_FAILURE@@@')
+ return 1
+ return 0
+
+
+def GypBuild():
+ # Dump out/ directory.
+ print('@@@BUILD_STEP cleanup@@@')
+ print('Removing %s...' % OUT_DIR)
+ shutil.rmtree(OUT_DIR, ignore_errors=True)
+ print('Done.')
+
+ retcode = 0
+ if sys.platform.startswith('linux'):
+ retcode += GypTestFormat('ninja')
+ retcode += GypTestFormat('make')
+ PrepareCmake()
+ retcode += GypTestFormat('cmake')
+ elif sys.platform == 'darwin':
+ retcode += GypTestFormat('ninja')
+ retcode += GypTestFormat('xcode')
+ retcode += GypTestFormat('make')
+ elif sys.platform == 'win32':
+ retcode += GypTestFormat('ninja')
+ if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64':
+ retcode += GypTestFormat('msvs-ninja-2013', format='msvs-ninja',
+ msvs_version='2013',
+ tests=[
+ r'test\generator-output\gyptest-actions.py',
+ r'test\generator-output\gyptest-relocate.py',
+ r'test\generator-output\gyptest-rules.py'])
+ retcode += GypTestFormat('msvs-2013', format='msvs', msvs_version='2013')
+ else:
+ raise Exception('Unknown platform')
+ if retcode:
+ # TODO(bradnelson): once the annotator supports a postscript (section for
+ # after the build proper that could be used for cumulative failures),
+ # use that instead of this. This isolates the final return value so
+ # that it isn't misattributed to the last stage.
+ print('@@@BUILD_STEP failures@@@')
+ sys.exit(retcode)
+
+
+if __name__ == '__main__':
+ GypBuild()
diff --git a/third_party/python/gyp/buildbot/commit_queue/OWNERS b/third_party/python/gyp/buildbot/commit_queue/OWNERS
new file mode 100644
index 0000000000..b269c198b4
--- /dev/null
+++ b/third_party/python/gyp/buildbot/commit_queue/OWNERS
@@ -0,0 +1,6 @@
+set noparent
+bradnelson@chromium.org
+bradnelson@google.com
+iannucci@chromium.org
+scottmg@chromium.org
+thakis@chromium.org
diff --git a/third_party/python/gyp/buildbot/commit_queue/README b/third_party/python/gyp/buildbot/commit_queue/README
new file mode 100644
index 0000000000..9428497883
--- /dev/null
+++ b/third_party/python/gyp/buildbot/commit_queue/README
@@ -0,0 +1,3 @@
+cq_config.json describes the trybots that must pass in order
+to land a change through the commit queue.
+Comments are here as the file is strictly JSON.
diff --git a/third_party/python/gyp/buildbot/commit_queue/cq_config.json b/third_party/python/gyp/buildbot/commit_queue/cq_config.json
new file mode 100644
index 0000000000..656c21e54f
--- /dev/null
+++ b/third_party/python/gyp/buildbot/commit_queue/cq_config.json
@@ -0,0 +1,15 @@
+{
+ "trybots": {
+ "launched": {
+ "tryserver.nacl": {
+ "gyp-presubmit": ["defaulttests"],
+ "gyp-linux": ["defaulttests"],
+ "gyp-mac": ["defaulttests"],
+ "gyp-win32": ["defaulttests"],
+ "gyp-win64": ["defaulttests"]
+ }
+ },
+ "triggered": {
+ }
+ }
+}
diff --git a/third_party/python/gyp/buildbot/travis-checkout.sh b/third_party/python/gyp/buildbot/travis-checkout.sh
new file mode 100755
index 0000000000..bc42f43754
--- /dev/null
+++ b/third_party/python/gyp/buildbot/travis-checkout.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+# Copyright 2018 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -ex
+
+get_depot_tools() {
+ cd
+ git clone https://chromium.googlesource.com/chromium/tools/depot_tools.git
+ export PATH="$HOME/depot_tools:$PATH"
+}
+
+gclient_sync() {
+ cd "${TRAVIS_BUILD_DIR}"/..
+ gclient config --unmanaged https://github.com/chromium/gyp.git
+ gclient sync
+ cd gyp
+}
+
+main() {
+ get_depot_tools
+ gclient_sync
+}
+
+main "$@"
diff --git a/third_party/python/gyp/buildbot/travis-test.sh b/third_party/python/gyp/buildbot/travis-test.sh
new file mode 100755
index 0000000000..4bd69df244
--- /dev/null
+++ b/third_party/python/gyp/buildbot/travis-test.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+
+# Copyright 2018 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+main() {
+ export PATH="$HOME/depot_tools:$PATH"
+ ./gyptest.py -a -f ninja
+}
+
+main "$@"
diff --git a/third_party/python/gyp/codereview.settings b/third_party/python/gyp/codereview.settings
new file mode 100644
index 0000000000..27fb9f99e2
--- /dev/null
+++ b/third_party/python/gyp/codereview.settings
@@ -0,0 +1,6 @@
+# This file is used by git cl to get repository specific information.
+CC_LIST: gyp-developer@googlegroups.com
+CODE_REVIEW_SERVER: codereview.chromium.org
+GERRIT_HOST: True
+PROJECT: gyp
+VIEW_VC: https://chromium.googlesource.com/external/gyp/+/
diff --git a/third_party/python/gyp/data/win/large-pdb-shim.cc b/third_party/python/gyp/data/win/large-pdb-shim.cc
new file mode 100644
index 0000000000..8bca510815
--- /dev/null
+++ b/third_party/python/gyp/data/win/large-pdb-shim.cc
@@ -0,0 +1,12 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file is used to generate an empty .pdb -- with a 4KB pagesize -- that is
+// then used during the final link for modules that have large PDBs. Otherwise,
+// the linker will generate a pdb with a page size of 1KB, which imposes a limit
+// of 1GB on the .pdb. By generating an initial empty .pdb with the compiler
+// (rather than the linker), this limit is avoided. With this in place PDBs may
+// grow to 2GB.
+//
+// This file is referenced by the msvs_large_pdb mechanism in MSVSUtil.py.
diff --git a/third_party/python/gyp/gyp b/third_party/python/gyp/gyp
new file mode 100755
index 0000000000..1c6e2d2a61
--- /dev/null
+++ b/third_party/python/gyp/gyp
@@ -0,0 +1,13 @@
+#!/bin/sh
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -eu
+base=$(dirname "$0")
+if type python3 >& /dev/null; then
+ python=python3
+else
+ python=python
+fi
+exec "${python}" "${base}/gyp_main.py" "$@"
diff --git a/third_party/python/gyp/gyp.bat b/third_party/python/gyp/gyp.bat
new file mode 100755
index 0000000000..c0b4ca24e5
--- /dev/null
+++ b/third_party/python/gyp/gyp.bat
@@ -0,0 +1,5 @@
+@rem Copyright (c) 2009 Google Inc. All rights reserved.
+@rem Use of this source code is governed by a BSD-style license that can be
+@rem found in the LICENSE file.
+
+@python "%~dp0gyp_main.py" %*
diff --git a/third_party/python/gyp/gyp_main.py b/third_party/python/gyp/gyp_main.py
new file mode 100755
index 0000000000..a7a0066bc7
--- /dev/null
+++ b/third_party/python/gyp/gyp_main.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python3
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+# Make sure we're using the version of pylib in this repo, not one installed
+# elsewhere on the system.
+sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), 'pylib'))
+import gyp
+
+if __name__ == '__main__':
+ sys.exit(gyp.script_main())
diff --git a/third_party/python/gyp/gyptest.py b/third_party/python/gyp/gyptest.py
new file mode 100755
index 0000000000..1a9ffca7a1
--- /dev/null
+++ b/third_party/python/gyp/gyptest.py
@@ -0,0 +1,243 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""gyptest.py -- test runner for GYP tests."""
+
+from __future__ import print_function
+
+import argparse
+import math
+import os
+import platform
+import subprocess
+import sys
+import time
+
+
+def is_test_name(f):
+ return f.startswith('gyptest') and f.endswith('.py')
+
+
+def find_all_gyptest_files(directory):
+ result = []
+ for root, dirs, files in os.walk(directory):
+ result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
+ result.sort()
+ return result
+
+
+def main(argv=None):
+ if argv is None:
+ argv = sys.argv
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-a", "--all", action="store_true",
+ help="run all tests")
+ parser.add_argument("-C", "--chdir", action="store",
+ help="change to directory")
+ parser.add_argument("-f", "--format", action="store", default='',
+ help="run tests with the specified formats")
+ parser.add_argument("-G", '--gyp_option', action="append", default=[],
+ help="Add -G options to the gyp command line")
+ parser.add_argument("-l", "--list", action="store_true",
+ help="list available tests and exit")
+ parser.add_argument("-n", "--no-exec", action="store_true",
+ help="no execute, just print the command line")
+ parser.add_argument("--path", action="append", default=[],
+ help="additional $PATH directory")
+ parser.add_argument("-q", "--quiet", action="store_true",
+ help="quiet, don't print anything unless there are failures")
+ parser.add_argument("-v", "--verbose", action="store_true",
+ help="print configuration info and test results.")
+ parser.add_argument('tests', nargs='*')
+ args = parser.parse_args(argv[1:])
+
+ if args.chdir:
+ os.chdir(args.chdir)
+
+ if args.path:
+ extra_path = [os.path.abspath(p) for p in args.path]
+ extra_path = os.pathsep.join(extra_path)
+ os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
+
+ if not args.tests:
+ if not args.all:
+ sys.stderr.write('Specify -a to get all tests.\n')
+ return 1
+ args.tests = ['test']
+
+ tests = []
+ for arg in args.tests:
+ if os.path.isdir(arg):
+ tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
+ else:
+ if not is_test_name(os.path.basename(arg)):
+ print(arg, 'is not a valid gyp test name.', file=sys.stderr)
+ sys.exit(1)
+ tests.append(arg)
+
+ if args.list:
+ for test in tests:
+ print(test)
+ sys.exit(0)
+
+ os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
+
+ if args.verbose:
+ print_configuration_info()
+
+ if args.gyp_option and not args.quiet:
+ print('Extra Gyp options: %s\n' % args.gyp_option)
+
+ if args.format:
+ format_list = args.format.split(',')
+ else:
+ format_list = {
+ 'aix5': ['make'],
+ 'freebsd7': ['make'],
+ 'freebsd8': ['make'],
+ 'openbsd5': ['make'],
+ 'cygwin': ['msvs'],
+ 'win32': ['msvs', 'ninja'],
+ 'linux': ['make', 'ninja'],
+ 'linux2': ['make', 'ninja'],
+ 'linux3': ['make', 'ninja'],
+
+ # TODO: Re-enable xcode-ninja.
+ # https://bugs.chromium.org/p/gyp/issues/detail?id=530
+ # 'darwin': ['make', 'ninja', 'xcode', 'xcode-ninja'],
+ 'darwin': ['make', 'ninja', 'xcode'],
+ }[sys.platform]
+
+ gyp_options = []
+ for option in args.gyp_option:
+ gyp_options += ['-G', option]
+
+ runner = Runner(format_list, tests, gyp_options, args.verbose)
+ runner.run()
+
+ if not args.quiet:
+ runner.print_results()
+
+ if runner.failures:
+ return 1
+ else:
+ return 0
+
+
+def print_configuration_info():
+ print('Test configuration:')
+ if sys.platform == 'darwin':
+ sys.path.append(os.path.abspath('test/lib'))
+ import TestMac
+ print(' Mac %s %s' % (platform.mac_ver()[0], platform.mac_ver()[2]))
+ print(' Xcode %s' % TestMac.Xcode.Version())
+ elif sys.platform == 'win32':
+ sys.path.append(os.path.abspath('pylib'))
+ import gyp.MSVSVersion
+ print(' Win %s %s\n' % platform.win32_ver()[0:2])
+ print(' MSVS %s' %
+ gyp.MSVSVersion.SelectVisualStudioVersion().Description())
+ elif sys.platform in ('linux', 'linux2'):
+ print(' Linux %s' % ' '.join(platform.linux_distribution()))
+ print(' Python %s' % platform.python_version())
+ print(' PYTHONPATH=%s' % os.environ['PYTHONPATH'])
+ print()
+
+
+class Runner(object):
+ def __init__(self, formats, tests, gyp_options, verbose):
+ self.formats = formats
+ self.tests = tests
+ self.verbose = verbose
+ self.gyp_options = gyp_options
+ self.failures = []
+ self.num_tests = len(formats) * len(tests)
+ num_digits = len(str(self.num_tests))
+ self.fmt_str = '[%%%dd/%%%dd] (%%s) %%s' % (num_digits, num_digits)
+ self.isatty = sys.stdout.isatty() and not self.verbose
+ self.env = os.environ.copy()
+ self.hpos = 0
+
+ def run(self):
+ run_start = time.time()
+
+ i = 1
+ for fmt in self.formats:
+ for test in self.tests:
+ self.run_test(test, fmt, i)
+ i += 1
+
+ if self.isatty:
+ self.erase_current_line()
+
+ self.took = time.time() - run_start
+
+ def run_test(self, test, fmt, i):
+ if self.isatty:
+ self.erase_current_line()
+
+ msg = self.fmt_str % (i, self.num_tests, fmt, test)
+ self.print_(msg)
+
+ start = time.time()
+ cmd = [sys.executable, test] + self.gyp_options
+ self.env['TESTGYP_FORMAT'] = fmt
+ proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT, env=self.env)
+ proc.wait()
+ took = time.time() - start
+
+ stdout = proc.stdout.read().decode('utf8')
+ if proc.returncode == 2:
+ res = 'skipped'
+ elif proc.returncode:
+ res = 'failed'
+ self.failures.append('(%s) %s' % (test, fmt))
+ else:
+ res = 'passed'
+ res_msg = ' %s %.3fs' % (res, took)
+ self.print_(res_msg)
+
+ if (stdout and
+ not stdout.endswith('PASSED\n') and
+ not (stdout.endswith('NO RESULT\n'))):
+ print()
+ for l in stdout.splitlines():
+ print(' %s' % l)
+ elif not self.isatty:
+ print()
+
+ def print_(self, msg):
+ print(msg, end='')
+ index = msg.rfind('\n')
+ if index == -1:
+ self.hpos += len(msg)
+ else:
+ self.hpos = len(msg) - index
+ sys.stdout.flush()
+
+ def erase_current_line(self):
+ print('\b' * self.hpos + ' ' * self.hpos + '\b' * self.hpos, end='')
+ sys.stdout.flush()
+ self.hpos = 0
+
+ def print_results(self):
+ num_failures = len(self.failures)
+ if num_failures:
+ print()
+ if num_failures == 1:
+ print("Failed the following test:")
+ else:
+ print("Failed the following %d tests:" % num_failures)
+ print("\t" + "\n\t".join(sorted(self.failures)))
+ print()
+ print('Ran %d tests in %.3fs, %d failed.' % (self.num_tests, self.took,
+ num_failures))
+ print()
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/third_party/python/gyp/pylib/gyp/MSVSNew.py b/third_party/python/gyp/pylib/gyp/MSVSNew.py
new file mode 100644
index 0000000000..73182ec880
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/MSVSNew.py
@@ -0,0 +1,353 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""New implementation of Visual Studio project generation."""
+
+import os
+import random
+import sys
+
+import gyp.common
+
+# hashlib is supplied as of Python 2.5 as the replacement interface for md5
+# and other secure hashes. In 2.6, md5 is deprecated. Import hashlib if
+# available, avoiding a deprecation warning under 2.6. Import md5 otherwise,
+# preserving 2.4 compatibility.
+try:
+ import hashlib
+ _new_md5 = hashlib.md5
+except ImportError:
+ import md5
+ _new_md5 = md5.new
+
+
+try:
+ # cmp was removed in python3.
+ cmp
+except NameError:
+ def cmp(a, b):
+ return (a > b) - (a < b)
+
+# Initialize random number generator
+random.seed()
+
+# GUIDs for project types
+ENTRY_TYPE_GUIDS = {
+ 'project': '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}',
+ 'folder': '{2150E333-8FDC-42A3-9474-1A3956D46DE8}',
+}
+
+#------------------------------------------------------------------------------
+# Helper functions
+
+
+def MakeGuid(name, seed='msvs_new'):
+ """Returns a GUID for the specified target name.
+
+ Args:
+ name: Target name.
+ seed: Seed for MD5 hash.
+ Returns:
+ A GUID-line string calculated from the name and seed.
+
+ This generates something which looks like a GUID, but depends only on the
+ name and seed. This means the same name/seed will always generate the same
+ GUID, so that projects and solutions which refer to each other can explicitly
+ determine the GUID to refer to explicitly. It also means that the GUID will
+ not change when the project for a target is rebuilt.
+ """
+
+ to_hash = str(seed) + str(name)
+ to_hash = to_hash.encode('utf-8')
+ # Calculate a MD5 signature for the seed and name.
+ d = _new_md5(to_hash).hexdigest().upper()
+ # Convert most of the signature to GUID form (discard the rest)
+ guid = ('{' + d[:8] + '-' + d[8:12] + '-' + d[12:16] + '-' + d[16:20]
+ + '-' + d[20:32] + '}')
+ return guid
+
+#------------------------------------------------------------------------------
+
+
+class MSVSSolutionEntry(object):
+ def __cmp__(self, other):
+ # Sort by name then guid (so things are in order on vs2008).
+ return cmp((self.name, self.get_guid()), (other.name, other.get_guid()))
+
+ def __lt__(self, other):
+ return (self.name, self.get_guid()) < (other.name, other.get_guid())
+
+class MSVSFolder(MSVSSolutionEntry):
+ """Folder in a Visual Studio project or solution."""
+
+ def __init__(self, path, name = None, entries = None,
+ guid = None, items = None):
+ """Initializes the folder.
+
+ Args:
+ path: Full path to the folder.
+ name: Name of the folder.
+ entries: List of folder entries to nest inside this folder. May contain
+ Folder or Project objects. May be None, if the folder is empty.
+ guid: GUID to use for folder, if not None.
+ items: List of solution items to include in the folder project. May be
+ None, if the folder does not directly contain items.
+ """
+ if name:
+ self.name = name
+ else:
+ # Use last layer.
+ self.name = os.path.basename(path)
+
+ self.path = path
+ self.guid = guid
+
+ # Copy passed lists (or set to empty lists)
+ self.entries = sorted(list(entries or []))
+ self.items = list(items or [])
+
+ self.entry_type_guid = ENTRY_TYPE_GUIDS['folder']
+
+ def get_guid(self):
+ if self.guid is None:
+ # Use consistent guids for folders (so things don't regenerate).
+ self.guid = MakeGuid(self.path, seed='msvs_folder')
+ return self.guid
+
+
+#------------------------------------------------------------------------------
+
+
+class MSVSProject(MSVSSolutionEntry):
+ """Visual Studio project."""
+
+ def __init__(self, path, name = None, dependencies = None, guid = None,
+ spec = None, build_file = None, config_platform_overrides = None,
+ fixpath_prefix = None):
+ """Initializes the project.
+
+ Args:
+ path: Absolute path to the project file.
+ name: Name of project. If None, the name will be the same as the base
+ name of the project file.
+ dependencies: List of other Project objects this project is dependent
+ upon, if not None.
+ guid: GUID to use for project, if not None.
+ spec: Dictionary specifying how to build this project.
+ build_file: Filename of the .gyp file that the vcproj file comes from.
+ config_platform_overrides: optional dict of configuration platforms to
+ used in place of the default for this target.
+ fixpath_prefix: the path used to adjust the behavior of _fixpath
+ """
+ self.path = path
+ self.guid = guid
+ self.spec = spec
+ self.build_file = build_file
+ # Use project filename if name not specified
+ self.name = name or os.path.splitext(os.path.basename(path))[0]
+
+ # Copy passed lists (or set to empty lists)
+ self.dependencies = list(dependencies or [])
+
+ self.entry_type_guid = ENTRY_TYPE_GUIDS['project']
+
+ if config_platform_overrides:
+ self.config_platform_overrides = config_platform_overrides
+ else:
+ self.config_platform_overrides = {}
+ self.fixpath_prefix = fixpath_prefix
+ self.msbuild_toolset = None
+
+ def set_dependencies(self, dependencies):
+ self.dependencies = list(dependencies or [])
+
+ def get_guid(self):
+ if self.guid is None:
+ # Set GUID from path
+ # TODO(rspangler): This is fragile.
+ # 1. We can't just use the project filename sans path, since there could
+ # be multiple projects with the same base name (for example,
+ # foo/unittest.vcproj and bar/unittest.vcproj).
+ # 2. The path needs to be relative to $SOURCE_ROOT, so that the project
+ # GUID is the same whether it's included from base/base.sln or
+ # foo/bar/baz/baz.sln.
+ # 3. The GUID needs to be the same each time this builder is invoked, so
+ # that we don't need to rebuild the solution when the project changes.
+ # 4. We should be able to handle pre-built project files by reading the
+ # GUID from the files.
+ self.guid = MakeGuid(self.name)
+ return self.guid
+
+ def set_msbuild_toolset(self, msbuild_toolset):
+ self.msbuild_toolset = msbuild_toolset
+
+#------------------------------------------------------------------------------
+
+
+class MSVSSolution(object):
+ """Visual Studio solution."""
+
+ def __init__(self, path, version, entries=None, variants=None,
+ websiteProperties=True):
+ """Initializes the solution.
+
+ Args:
+ path: Path to solution file.
+ version: Format version to emit.
+ entries: List of entries in solution. May contain Folder or Project
+ objects. May be None, if the folder is empty.
+ variants: List of build variant strings. If none, a default list will
+ be used.
+ websiteProperties: Flag to decide if the website properties section
+ is generated.
+ """
+ self.path = path
+ self.websiteProperties = websiteProperties
+ self.version = version
+
+ # Copy passed lists (or set to empty lists)
+ self.entries = list(entries or [])
+
+ if variants:
+ # Copy passed list
+ self.variants = variants[:]
+ else:
+ # Use default
+ self.variants = ['Debug|Win32', 'Release|Win32']
+ # TODO(rspangler): Need to be able to handle a mapping of solution config
+ # to project config. Should we be able to handle variants being a dict,
+ # or add a separate variant_map variable? If it's a dict, we can't
+ # guarantee the order of variants since dict keys aren't ordered.
+
+
+ # TODO(rspangler): Automatically write to disk for now; should delay until
+ # node-evaluation time.
+ self.Write()
+
+
+ def Write(self, writer=gyp.common.WriteOnDiff):
+ """Writes the solution file to disk.
+
+ Raises:
+ IndexError: An entry appears multiple times.
+ """
+ # Walk the entry tree and collect all the folders and projects.
+ all_entries = set()
+ entries_to_check = self.entries[:]
+ while entries_to_check:
+ e = entries_to_check.pop(0)
+
+ # If this entry has been visited, nothing to do.
+ if e in all_entries:
+ continue
+
+ all_entries.add(e)
+
+ # If this is a folder, check its entries too.
+ if isinstance(e, MSVSFolder):
+ entries_to_check += e.entries
+
+ all_entries = sorted(all_entries)
+
+ # Open file and print header
+ f = writer(self.path)
+ f.write('Microsoft Visual Studio Solution File, '
+ 'Format Version %s\r\n' % self.version.SolutionVersion())
+ f.write('# %s\r\n' % self.version.Description())
+
+ # Project entries
+ sln_root = os.path.split(self.path)[0]
+ for e in all_entries:
+ relative_path = gyp.common.RelativePath(e.path, sln_root)
+ # msbuild does not accept an empty folder_name.
+ # use '.' in case relative_path is empty.
+ folder_name = relative_path.replace('/', '\\') or '.'
+ f.write('Project("%s") = "%s", "%s", "%s"\r\n' % (
+ e.entry_type_guid, # Entry type GUID
+ e.name, # Folder name
+ folder_name, # Folder name (again)
+ e.get_guid(), # Entry GUID
+ ))
+
+ # TODO(rspangler): Need a way to configure this stuff
+ if self.websiteProperties:
+ f.write('\tProjectSection(WebsiteProperties) = preProject\r\n'
+ '\t\tDebug.AspNetCompiler.Debug = "True"\r\n'
+ '\t\tRelease.AspNetCompiler.Debug = "False"\r\n'
+ '\tEndProjectSection\r\n')
+
+ if isinstance(e, MSVSFolder):
+ if e.items:
+ f.write('\tProjectSection(SolutionItems) = preProject\r\n')
+ for i in e.items:
+ f.write('\t\t%s = %s\r\n' % (i, i))
+ f.write('\tEndProjectSection\r\n')
+
+ if isinstance(e, MSVSProject):
+ if e.dependencies:
+ f.write('\tProjectSection(ProjectDependencies) = postProject\r\n')
+ for d in e.dependencies:
+ f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid()))
+ f.write('\tEndProjectSection\r\n')
+
+ f.write('EndProject\r\n')
+
+ # Global section
+ f.write('Global\r\n')
+
+ # Configurations (variants)
+ f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n')
+ for v in self.variants:
+ f.write('\t\t%s = %s\r\n' % (v, v))
+ f.write('\tEndGlobalSection\r\n')
+
+ # Sort config guids for easier diffing of solution changes.
+ config_guids = []
+ config_guids_overrides = {}
+ for e in all_entries:
+ if isinstance(e, MSVSProject):
+ config_guids.append(e.get_guid())
+ config_guids_overrides[e.get_guid()] = e.config_platform_overrides
+ config_guids.sort()
+
+ f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n')
+ for g in config_guids:
+ for v in self.variants:
+ nv = config_guids_overrides[g].get(v, v)
+ # Pick which project configuration to build for this solution
+ # configuration.
+ f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % (
+ g, # Project GUID
+ v, # Solution build configuration
+ nv, # Project build config for that solution config
+ ))
+
+ # Enable project in this solution configuration.
+ f.write('\t\t%s.%s.Build.0 = %s\r\n' % (
+ g, # Project GUID
+ v, # Solution build configuration
+ nv, # Project build config for that solution config
+ ))
+ f.write('\tEndGlobalSection\r\n')
+
+ # TODO(rspangler): Should be able to configure this stuff too (though I've
+ # never seen this be any different)
+ f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n')
+ f.write('\t\tHideSolutionNode = FALSE\r\n')
+ f.write('\tEndGlobalSection\r\n')
+
+ # Folder mappings
+ # Omit this section if there are no folders
+ if any([e.entries for e in all_entries if isinstance(e, MSVSFolder)]):
+ f.write('\tGlobalSection(NestedProjects) = preSolution\r\n')
+ for e in all_entries:
+ if not isinstance(e, MSVSFolder):
+ continue # Does not apply to projects, only folders
+ for subentry in e.entries:
+ f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid()))
+ f.write('\tEndGlobalSection\r\n')
+
+ f.write('EndGlobal\r\n')
+
+ f.close()
diff --git a/third_party/python/gyp/pylib/gyp/MSVSProject.py b/third_party/python/gyp/pylib/gyp/MSVSProject.py
new file mode 100644
index 0000000000..db1ceede34
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/MSVSProject.py
@@ -0,0 +1,208 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Visual Studio project reader/writer."""
+
+import gyp.common
+import gyp.easy_xml as easy_xml
+
+#------------------------------------------------------------------------------
+
+
+class Tool(object):
+ """Visual Studio tool."""
+
+ def __init__(self, name, attrs=None):
+ """Initializes the tool.
+
+ Args:
+ name: Tool name.
+ attrs: Dict of tool attributes; may be None.
+ """
+ self._attrs = attrs or {}
+ self._attrs['Name'] = name
+
+ def _GetSpecification(self):
+ """Creates an element for the tool.
+
+ Returns:
+ A new xml.dom.Element for the tool.
+ """
+ return ['Tool', self._attrs]
+
+class Filter(object):
+ """Visual Studio filter - that is, a virtual folder."""
+
+ def __init__(self, name, contents=None):
+ """Initializes the folder.
+
+ Args:
+ name: Filter (folder) name.
+ contents: List of filenames and/or Filter objects contained.
+ """
+ self.name = name
+ self.contents = list(contents or [])
+
+
+#------------------------------------------------------------------------------
+
+
+class Writer(object):
+ """Visual Studio XML project writer."""
+
+ def __init__(self, project_path, version, name, guid=None, platforms=None):
+ """Initializes the project.
+
+ Args:
+ project_path: Path to the project file.
+ version: Format version to emit.
+ name: Name of the project.
+ guid: GUID to use for project, if not None.
+ platforms: Array of string, the supported platforms. If null, ['Win32']
+ """
+ self.project_path = project_path
+ self.version = version
+ self.name = name
+ self.guid = guid
+
+ # Default to Win32 for platforms.
+ if not platforms:
+ platforms = ['Win32']
+
+ # Initialize the specifications of the various sections.
+ self.platform_section = ['Platforms']
+ for platform in platforms:
+ self.platform_section.append(['Platform', {'Name': platform}])
+ self.tool_files_section = ['ToolFiles']
+ self.configurations_section = ['Configurations']
+ self.files_section = ['Files']
+
+ # Keep a dict keyed on filename to speed up access.
+ self.files_dict = dict()
+
+ def AddToolFile(self, path):
+ """Adds a tool file to the project.
+
+ Args:
+ path: Relative path from project to tool file.
+ """
+ self.tool_files_section.append(['ToolFile', {'RelativePath': path}])
+
+ def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools):
+ """Returns the specification for a configuration.
+
+ Args:
+ config_type: Type of configuration node.
+ config_name: Configuration name.
+ attrs: Dict of configuration attributes; may be None.
+ tools: List of tools (strings or Tool objects); may be None.
+ Returns:
+ """
+ # Handle defaults
+ if not attrs:
+ attrs = {}
+ if not tools:
+ tools = []
+
+ # Add configuration node and its attributes
+ node_attrs = attrs.copy()
+ node_attrs['Name'] = config_name
+ specification = [config_type, node_attrs]
+
+ # Add tool nodes and their attributes
+ if tools:
+ for t in tools:
+ if isinstance(t, Tool):
+ specification.append(t._GetSpecification())
+ else:
+ specification.append(Tool(t)._GetSpecification())
+ return specification
+
+
+ def AddConfig(self, name, attrs=None, tools=None):
+ """Adds a configuration to the project.
+
+ Args:
+ name: Configuration name.
+ attrs: Dict of configuration attributes; may be None.
+ tools: List of tools (strings or Tool objects); may be None.
+ """
+ spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools)
+ self.configurations_section.append(spec)
+
+ def _AddFilesToNode(self, parent, files):
+ """Adds files and/or filters to the parent node.
+
+ Args:
+ parent: Destination node
+ files: A list of Filter objects and/or relative paths to files.
+
+ Will call itself recursively, if the files list contains Filter objects.
+ """
+ for f in files:
+ if isinstance(f, Filter):
+ node = ['Filter', {'Name': f.name}]
+ self._AddFilesToNode(node, f.contents)
+ else:
+ node = ['File', {'RelativePath': f}]
+ self.files_dict[f] = node
+ parent.append(node)
+
+ def AddFiles(self, files):
+ """Adds files to the project.
+
+ Args:
+ files: A list of Filter objects and/or relative paths to files.
+
+ This makes a copy of the file/filter tree at the time of this call. If you
+ later add files to a Filter object which was passed into a previous call
+ to AddFiles(), it will not be reflected in this project.
+ """
+ self._AddFilesToNode(self.files_section, files)
+ # TODO(rspangler) This also doesn't handle adding files to an existing
+ # filter. That is, it doesn't merge the trees.
+
+ def AddFileConfig(self, path, config, attrs=None, tools=None):
+ """Adds a configuration to a file.
+
+ Args:
+ path: Relative path to the file.
+ config: Name of configuration to add.
+ attrs: Dict of configuration attributes; may be None.
+ tools: List of tools (strings or Tool objects); may be None.
+
+ Raises:
+ ValueError: Relative path does not match any file added via AddFiles().
+ """
+ # Find the file node with the right relative path
+ parent = self.files_dict.get(path)
+ if not parent:
+ raise ValueError('AddFileConfig: file "%s" not in project.' % path)
+
+ # Add the config to the file node
+ spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs,
+ tools)
+ parent.append(spec)
+
+ def WriteIfChanged(self):
+ """Writes the project file."""
+ # First create XML content definition
+ content = [
+ 'VisualStudioProject',
+ {'ProjectType': 'Visual C++',
+ 'Version': self.version.ProjectVersion(),
+ 'Name': self.name,
+ 'ProjectGUID': self.guid,
+ 'RootNamespace': self.name,
+ 'Keyword': 'Win32Proj'
+ },
+ self.platform_section,
+ self.tool_files_section,
+ self.configurations_section,
+ ['References'], # empty section
+ self.files_section,
+ ['Globals'] # empty section
+ ]
+ easy_xml.WriteXmlIfChanged(content, self.project_path,
+ encoding="Windows-1252")
diff --git a/third_party/python/gyp/pylib/gyp/MSVSSettings.py b/third_party/python/gyp/pylib/gyp/MSVSSettings.py
new file mode 100644
index 0000000000..1d2e25ab90
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/MSVSSettings.py
@@ -0,0 +1,1106 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+r"""Code to validate and convert settings of the Microsoft build tools.
+
+This file contains code to validate and convert settings of the Microsoft
+build tools. The function ConvertToMSBuildSettings(), ValidateMSVSSettings(),
+and ValidateMSBuildSettings() are the entry points.
+
+This file was created by comparing the projects created by Visual Studio 2008
+and Visual Studio 2010 for all available settings through the user interface.
+The MSBuild schemas were also considered. They are typically found in the
+MSBuild install directory, e.g. c:\Program Files (x86)\MSBuild
+"""
+
+from __future__ import print_function
+
+import sys
+import re
+
+try:
+ # basestring was removed in python3.
+ basestring
+except NameError:
+ basestring = str
+
+# Dictionaries of settings validators. The key is the tool name, the value is
+# a dictionary mapping setting names to validation functions.
+_msvs_validators = {}
+_msbuild_validators = {}
+
+
+# A dictionary of settings converters. The key is the tool name, the value is
+# a dictionary mapping setting names to conversion functions.
+_msvs_to_msbuild_converters = {}
+
+
+# Tool name mapping from MSVS to MSBuild.
+_msbuild_name_of_tool = {}
+
+
+class _Tool(object):
+ """Represents a tool used by MSVS or MSBuild.
+
+ Attributes:
+ msvs_name: The name of the tool in MSVS.
+ msbuild_name: The name of the tool in MSBuild.
+ """
+
+ def __init__(self, msvs_name, msbuild_name):
+ self.msvs_name = msvs_name
+ self.msbuild_name = msbuild_name
+
+
+def _AddTool(tool):
+ """Adds a tool to the four dictionaries used to process settings.
+
+ This only defines the tool. Each setting also needs to be added.
+
+ Args:
+ tool: The _Tool object to be added.
+ """
+ _msvs_validators[tool.msvs_name] = {}
+ _msbuild_validators[tool.msbuild_name] = {}
+ _msvs_to_msbuild_converters[tool.msvs_name] = {}
+ _msbuild_name_of_tool[tool.msvs_name] = tool.msbuild_name
+
+
+def _GetMSBuildToolSettings(msbuild_settings, tool):
+ """Returns an MSBuild tool dictionary. Creates it if needed."""
+ return msbuild_settings.setdefault(tool.msbuild_name, {})
+
+
+class _Type(object):
+ """Type of settings (Base class)."""
+
+ def ValidateMSVS(self, value):
+ """Verifies that the value is legal for MSVS.
+
+ Args:
+ value: the value to check for this type.
+
+ Raises:
+ ValueError if value is not valid for MSVS.
+ """
+
+ def ValidateMSBuild(self, value):
+ """Verifies that the value is legal for MSBuild.
+
+ Args:
+ value: the value to check for this type.
+
+ Raises:
+ ValueError if value is not valid for MSBuild.
+ """
+
+ def ConvertToMSBuild(self, value):
+ """Returns the MSBuild equivalent of the MSVS value given.
+
+ Args:
+ value: the MSVS value to convert.
+
+ Returns:
+ the MSBuild equivalent.
+
+ Raises:
+ ValueError if value is not valid.
+ """
+ return value
+
+
+class _String(_Type):
+ """A setting that's just a string."""
+
+ def ValidateMSVS(self, value):
+ if not isinstance(value, basestring):
+ raise ValueError('expected string; got %r' % value)
+
+ def ValidateMSBuild(self, value):
+ if not isinstance(value, basestring):
+ raise ValueError('expected string; got %r' % value)
+
+ def ConvertToMSBuild(self, value):
+ # Convert the macros
+ return ConvertVCMacrosToMSBuild(value)
+
+
+class _StringList(_Type):
+ """A settings that's a list of strings."""
+
+ def ValidateMSVS(self, value):
+ if not isinstance(value, basestring) and not isinstance(value, list):
+ raise ValueError('expected string list; got %r' % value)
+
+ def ValidateMSBuild(self, value):
+ if not isinstance(value, basestring) and not isinstance(value, list):
+ raise ValueError('expected string list; got %r' % value)
+
+ def ConvertToMSBuild(self, value):
+ # Convert the macros
+ if isinstance(value, list):
+ return [ConvertVCMacrosToMSBuild(i) for i in value]
+ else:
+ return ConvertVCMacrosToMSBuild(value)
+
+
+class _Boolean(_Type):
+ """Boolean settings, can have the values 'false' or 'true'."""
+
+ def _Validate(self, value):
+ if value != 'true' and value != 'false':
+ raise ValueError('expected bool; got %r' % value)
+
+ def ValidateMSVS(self, value):
+ self._Validate(value)
+
+ def ValidateMSBuild(self, value):
+ self._Validate(value)
+
+ def ConvertToMSBuild(self, value):
+ self._Validate(value)
+ return value
+
+
+class _Integer(_Type):
+ """Integer settings."""
+
+ def __init__(self, msbuild_base=10):
+ _Type.__init__(self)
+ self._msbuild_base = msbuild_base
+
+ def ValidateMSVS(self, value):
+ # Try to convert, this will raise ValueError if invalid.
+ self.ConvertToMSBuild(value)
+
+ def ValidateMSBuild(self, value):
+ # Try to convert, this will raise ValueError if invalid.
+ int(value, self._msbuild_base)
+
+ def ConvertToMSBuild(self, value):
+ msbuild_format = (self._msbuild_base == 10) and '%d' or '0x%04x'
+ return msbuild_format % int(value)
+
+
+class _Enumeration(_Type):
+ """Type of settings that is an enumeration.
+
+ In MSVS, the values are indexes like '0', '1', and '2'.
+ MSBuild uses text labels that are more representative, like 'Win32'.
+
+ Constructor args:
+ label_list: an array of MSBuild labels that correspond to the MSVS index.
+ In the rare cases where MSVS has skipped an index value, None is
+ used in the array to indicate the unused spot.
+ new: an array of labels that are new to MSBuild.
+ """
+
+ def __init__(self, label_list, new=None):
+ _Type.__init__(self)
+ self._label_list = label_list
+ self._msbuild_values = set(value for value in label_list
+ if value is not None)
+ if new is not None:
+ self._msbuild_values.update(new)
+
+ def ValidateMSVS(self, value):
+ # Try to convert. It will raise an exception if not valid.
+ self.ConvertToMSBuild(value)
+
+ def ValidateMSBuild(self, value):
+ if value not in self._msbuild_values:
+ raise ValueError('unrecognized enumerated value %s' % value)
+
+ def ConvertToMSBuild(self, value):
+ index = int(value)
+ if index < 0 or index >= len(self._label_list):
+ raise ValueError('index value (%d) not in expected range [0, %d)' %
+ (index, len(self._label_list)))
+ label = self._label_list[index]
+ if label is None:
+ raise ValueError('converted value for %s not specified.' % value)
+ return label
+
+
+# Instantiate the various generic types.
+_boolean = _Boolean()
+_integer = _Integer()
+# For now, we don't do any special validation on these types:
+_string = _String()
+_file_name = _String()
+_folder_name = _String()
+_file_list = _StringList()
+_folder_list = _StringList()
+_string_list = _StringList()
+# Some boolean settings went from numerical values to boolean. The
+# mapping is 0: default, 1: false, 2: true.
+_newly_boolean = _Enumeration(['', 'false', 'true'])
+
+
+def _Same(tool, name, setting_type):
+ """Defines a setting that has the same name in MSVS and MSBuild.
+
+ Args:
+ tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
+ name: the name of the setting.
+ setting_type: the type of this setting.
+ """
+ _Renamed(tool, name, name, setting_type)
+
+
+def _Renamed(tool, msvs_name, msbuild_name, setting_type):
+ """Defines a setting for which the name has changed.
+
+ Args:
+ tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
+ msvs_name: the name of the MSVS setting.
+ msbuild_name: the name of the MSBuild setting.
+ setting_type: the type of this setting.
+ """
+
+ def _Translate(value, msbuild_settings):
+ msbuild_tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
+ msbuild_tool_settings[msbuild_name] = setting_type.ConvertToMSBuild(value)
+
+ _msvs_validators[tool.msvs_name][msvs_name] = setting_type.ValidateMSVS
+ _msbuild_validators[tool.msbuild_name][msbuild_name] = (
+ setting_type.ValidateMSBuild)
+ _msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
+
+
+def _Moved(tool, settings_name, msbuild_tool_name, setting_type):
+ _MovedAndRenamed(tool, settings_name, msbuild_tool_name, settings_name,
+ setting_type)
+
+
+def _MovedAndRenamed(tool, msvs_settings_name, msbuild_tool_name,
+ msbuild_settings_name, setting_type):
+ """Defines a setting that may have moved to a new section.
+
+ Args:
+ tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
+ msvs_settings_name: the MSVS name of the setting.
+ msbuild_tool_name: the name of the MSBuild tool to place the setting under.
+ msbuild_settings_name: the MSBuild name of the setting.
+ setting_type: the type of this setting.
+ """
+
+ def _Translate(value, msbuild_settings):
+ tool_settings = msbuild_settings.setdefault(msbuild_tool_name, {})
+ tool_settings[msbuild_settings_name] = setting_type.ConvertToMSBuild(value)
+
+ _msvs_validators[tool.msvs_name][msvs_settings_name] = (
+ setting_type.ValidateMSVS)
+ validator = setting_type.ValidateMSBuild
+ _msbuild_validators[msbuild_tool_name][msbuild_settings_name] = validator
+ _msvs_to_msbuild_converters[tool.msvs_name][msvs_settings_name] = _Translate
+
+
+def _MSVSOnly(tool, name, setting_type):
+ """Defines a setting that is only found in MSVS.
+
+ Args:
+ tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
+ name: the name of the setting.
+ setting_type: the type of this setting.
+ """
+
+ def _Translate(unused_value, unused_msbuild_settings):
+ # Since this is for MSVS only settings, no translation will happen.
+ pass
+
+ _msvs_validators[tool.msvs_name][name] = setting_type.ValidateMSVS
+ _msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
+
+
+def _MSBuildOnly(tool, name, setting_type):
+ """Defines a setting that is only found in MSBuild.
+
+ Args:
+ tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
+ name: the name of the setting.
+ setting_type: the type of this setting.
+ """
+
+ def _Translate(value, msbuild_settings):
+ # Let msbuild-only properties get translated as-is from msvs_settings.
+ tool_settings = msbuild_settings.setdefault(tool.msbuild_name, {})
+ tool_settings[name] = value
+
+ _msbuild_validators[tool.msbuild_name][name] = setting_type.ValidateMSBuild
+ _msvs_to_msbuild_converters[tool.msvs_name][name] = _Translate
+
+
+def _ConvertedToAdditionalOption(tool, msvs_name, flag):
+ """Defines a setting that's handled via a command line option in MSBuild.
+
+ Args:
+ tool: a dictionary that gives the names of the tool for MSVS and MSBuild.
+ msvs_name: the name of the MSVS setting that if 'true' becomes a flag
+ flag: the flag to insert at the end of the AdditionalOptions
+ """
+
+ def _Translate(value, msbuild_settings):
+ if value == 'true':
+ tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
+ if 'AdditionalOptions' in tool_settings:
+ new_flags = '%s %s' % (tool_settings['AdditionalOptions'], flag)
+ else:
+ new_flags = flag
+ tool_settings['AdditionalOptions'] = new_flags
+ _msvs_validators[tool.msvs_name][msvs_name] = _boolean.ValidateMSVS
+ _msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
+
+
+def _CustomGeneratePreprocessedFile(tool, msvs_name):
+ def _Translate(value, msbuild_settings):
+ tool_settings = _GetMSBuildToolSettings(msbuild_settings, tool)
+ if value == '0':
+ tool_settings['PreprocessToFile'] = 'false'
+ tool_settings['PreprocessSuppressLineNumbers'] = 'false'
+ elif value == '1': # /P
+ tool_settings['PreprocessToFile'] = 'true'
+ tool_settings['PreprocessSuppressLineNumbers'] = 'false'
+ elif value == '2': # /EP /P
+ tool_settings['PreprocessToFile'] = 'true'
+ tool_settings['PreprocessSuppressLineNumbers'] = 'true'
+ else:
+ raise ValueError('value must be one of [0, 1, 2]; got %s' % value)
+ # Create a bogus validator that looks for '0', '1', or '2'
+ msvs_validator = _Enumeration(['a', 'b', 'c']).ValidateMSVS
+ _msvs_validators[tool.msvs_name][msvs_name] = msvs_validator
+ msbuild_validator = _boolean.ValidateMSBuild
+ msbuild_tool_validators = _msbuild_validators[tool.msbuild_name]
+ msbuild_tool_validators['PreprocessToFile'] = msbuild_validator
+ msbuild_tool_validators['PreprocessSuppressLineNumbers'] = msbuild_validator
+ _msvs_to_msbuild_converters[tool.msvs_name][msvs_name] = _Translate
+
+
+fix_vc_macro_slashes_regex_list = ('IntDir', 'OutDir')
+fix_vc_macro_slashes_regex = re.compile(
+ r'(\$\((?:%s)\))(?:[\\/]+)' % "|".join(fix_vc_macro_slashes_regex_list)
+)
+
+# Regular expression to detect keys that were generated by exclusion lists
+_EXCLUDED_SUFFIX_RE = re.compile('^(.*)_excluded$')
+
+
+def _ValidateExclusionSetting(setting, settings, error_msg, stderr=sys.stderr):
+ """Verify that 'setting' is valid if it is generated from an exclusion list.
+
+ If the setting appears to be generated from an exclusion list, the root name
+ is checked.
+
+ Args:
+ setting: A string that is the setting name to validate
+ settings: A dictionary where the keys are valid settings
+ error_msg: The message to emit in the event of error
+ stderr: The stream receiving the error messages.
+ """
+ # This may be unrecognized because it's an exclusion list. If the
+ # setting name has the _excluded suffix, then check the root name.
+ unrecognized = True
+ m = re.match(_EXCLUDED_SUFFIX_RE, setting)
+ if m:
+ root_setting = m.group(1)
+ unrecognized = root_setting not in settings
+
+ if unrecognized:
+ # We don't know this setting. Give a warning.
+ print(error_msg, file=stderr)
+
+
+def FixVCMacroSlashes(s):
+ """Replace macros which have excessive following slashes.
+
+ These macros are known to have a built-in trailing slash. Furthermore, many
+ scripts hiccup on processing paths with extra slashes in the middle.
+
+ This list is probably not exhaustive. Add as needed.
+ """
+ if '$' in s:
+ s = fix_vc_macro_slashes_regex.sub(r'\1', s)
+ return s
+
+
+def ConvertVCMacrosToMSBuild(s):
+ """Convert the the MSVS macros found in the string to the MSBuild equivalent.
+
+ This list is probably not exhaustive. Add as needed.
+ """
+ if '$' in s:
+ replace_map = {
+ '$(ConfigurationName)': '$(Configuration)',
+ '$(InputDir)': '%(RelativeDir)',
+ '$(InputExt)': '%(Extension)',
+ '$(InputFileName)': '%(Filename)%(Extension)',
+ '$(InputName)': '%(Filename)',
+ '$(InputPath)': '%(Identity)',
+ '$(ParentName)': '$(ProjectFileName)',
+ '$(PlatformName)': '$(Platform)',
+ '$(SafeInputName)': '%(Filename)',
+ }
+ for old, new in replace_map.items():
+ s = s.replace(old, new)
+ s = FixVCMacroSlashes(s)
+ return s
+
+
+def ConvertToMSBuildSettings(msvs_settings, stderr=sys.stderr):
+ """Converts MSVS settings (VS2008 and earlier) to MSBuild settings (VS2010+).
+
+ Args:
+ msvs_settings: A dictionary. The key is the tool name. The values are
+ themselves dictionaries of settings and their values.
+ stderr: The stream receiving the error messages.
+
+ Returns:
+ A dictionary of MSBuild settings. The key is either the MSBuild tool name
+ or the empty string (for the global settings). The values are themselves
+ dictionaries of settings and their values.
+ """
+ msbuild_settings = {}
+ for msvs_tool_name, msvs_tool_settings in msvs_settings.items():
+ if msvs_tool_name in _msvs_to_msbuild_converters:
+ msvs_tool = _msvs_to_msbuild_converters[msvs_tool_name]
+ for msvs_setting, msvs_value in msvs_tool_settings.items():
+ if msvs_setting in msvs_tool:
+ # Invoke the translation function.
+ try:
+ msvs_tool[msvs_setting](msvs_value, msbuild_settings)
+ except ValueError as e:
+ print(('Warning: while converting %s/%s to MSBuild, '
+ '%s' % (msvs_tool_name, msvs_setting, e)),
+ file=stderr)
+ else:
+ _ValidateExclusionSetting(msvs_setting,
+ msvs_tool,
+ ('Warning: unrecognized setting %s/%s '
+ 'while converting to MSBuild.' %
+ (msvs_tool_name, msvs_setting)),
+ stderr)
+ else:
+ print(('Warning: unrecognized tool %s while converting to '
+ 'MSBuild.' % msvs_tool_name), file=stderr)
+ return msbuild_settings
+
+
+def ValidateMSVSSettings(settings, stderr=sys.stderr):
+ """Validates that the names of the settings are valid for MSVS.
+
+ Args:
+ settings: A dictionary. The key is the tool name. The values are
+ themselves dictionaries of settings and their values.
+ stderr: The stream receiving the error messages.
+ """
+ _ValidateSettings(_msvs_validators, settings, stderr)
+
+
+def ValidateMSBuildSettings(settings, stderr=sys.stderr):
+ """Validates that the names of the settings are valid for MSBuild.
+
+ Args:
+ settings: A dictionary. The key is the tool name. The values are
+ themselves dictionaries of settings and their values.
+ stderr: The stream receiving the error messages.
+ """
+ _ValidateSettings(_msbuild_validators, settings, stderr)
+
+
+def _ValidateSettings(validators, settings, stderr):
+ """Validates that the settings are valid for MSBuild or MSVS.
+
+ We currently only validate the names of the settings, not their values.
+
+ Args:
+ validators: A dictionary of tools and their validators.
+ settings: A dictionary. The key is the tool name. The values are
+ themselves dictionaries of settings and their values.
+ stderr: The stream receiving the error messages.
+ """
+ for tool_name in settings:
+ if tool_name in validators:
+ tool_validators = validators[tool_name]
+ for setting, value in settings[tool_name].items():
+ if setting in tool_validators:
+ try:
+ tool_validators[setting](value)
+ except ValueError as e:
+ print(('Warning: for %s/%s, %s' %
+ (tool_name, setting, e)), file=stderr)
+ else:
+ _ValidateExclusionSetting(setting,
+ tool_validators,
+ ('Warning: unrecognized setting %s/%s' %
+ (tool_name, setting)),
+ stderr)
+
+ else:
+ print(('Warning: unrecognized tool %s' % tool_name), file=stderr)
+
+
+# MSVS and MBuild names of the tools.
+_compile = _Tool('VCCLCompilerTool', 'ClCompile')
+_link = _Tool('VCLinkerTool', 'Link')
+_midl = _Tool('VCMIDLTool', 'Midl')
+_rc = _Tool('VCResourceCompilerTool', 'ResourceCompile')
+_lib = _Tool('VCLibrarianTool', 'Lib')
+_manifest = _Tool('VCManifestTool', 'Manifest')
+_masm = _Tool('MASM', 'MASM')
+
+
+_AddTool(_compile)
+_AddTool(_link)
+_AddTool(_midl)
+_AddTool(_rc)
+_AddTool(_lib)
+_AddTool(_manifest)
+_AddTool(_masm)
+# Add sections only found in the MSBuild settings.
+_msbuild_validators[''] = {}
+_msbuild_validators['ProjectReference'] = {}
+_msbuild_validators['ManifestResourceCompile'] = {}
+
+# Descriptions of the compiler options, i.e. VCCLCompilerTool in MSVS and
+# ClCompile in MSBuild.
+# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\cl.xml" for
+# the schema of the MSBuild ClCompile settings.
+
+# Options that have the same name in MSVS and MSBuild
+_Same(_compile, 'AdditionalIncludeDirectories', _folder_list) # /I
+_Same(_compile, 'AdditionalOptions', _string_list)
+_Same(_compile, 'AdditionalUsingDirectories', _folder_list) # /AI
+_Same(_compile, 'AssemblerListingLocation', _file_name) # /Fa
+_Same(_compile, 'BrowseInformationFile', _file_name)
+_Same(_compile, 'BufferSecurityCheck', _boolean) # /GS
+_Same(_compile, 'DisableLanguageExtensions', _boolean) # /Za
+_Same(_compile, 'DisableSpecificWarnings', _string_list) # /wd
+_Same(_compile, 'EnableFiberSafeOptimizations', _boolean) # /GT
+_Same(_compile, 'EnablePREfast', _boolean) # /analyze Visible='false'
+_Same(_compile, 'ExpandAttributedSource', _boolean) # /Fx
+_Same(_compile, 'FloatingPointExceptions', _boolean) # /fp:except
+_Same(_compile, 'ForceConformanceInForLoopScope', _boolean) # /Zc:forScope
+_Same(_compile, 'ForcedIncludeFiles', _file_list) # /FI
+_Same(_compile, 'ForcedUsingFiles', _file_list) # /FU
+_Same(_compile, 'GenerateXMLDocumentationFiles', _boolean) # /doc
+_Same(_compile, 'IgnoreStandardIncludePath', _boolean) # /X
+_Same(_compile, 'MinimalRebuild', _boolean) # /Gm
+_Same(_compile, 'OmitDefaultLibName', _boolean) # /Zl
+_Same(_compile, 'OmitFramePointers', _boolean) # /Oy
+_Same(_compile, 'PreprocessorDefinitions', _string_list) # /D
+_Same(_compile, 'ProgramDataBaseFileName', _file_name) # /Fd
+_Same(_compile, 'RuntimeTypeInfo', _boolean) # /GR
+_Same(_compile, 'ShowIncludes', _boolean) # /showIncludes
+_Same(_compile, 'SmallerTypeCheck', _boolean) # /RTCc
+_Same(_compile, 'StringPooling', _boolean) # /GF
+_Same(_compile, 'SuppressStartupBanner', _boolean) # /nologo
+_Same(_compile, 'TreatWChar_tAsBuiltInType', _boolean) # /Zc:wchar_t
+_Same(_compile, 'UndefineAllPreprocessorDefinitions', _boolean) # /u
+_Same(_compile, 'UndefinePreprocessorDefinitions', _string_list) # /U
+_Same(_compile, 'UseFullPaths', _boolean) # /FC
+_Same(_compile, 'WholeProgramOptimization', _boolean) # /GL
+_Same(_compile, 'XMLDocumentationFileName', _file_name)
+_Same(_compile, 'CompileAsWinRT', _boolean) # /ZW
+
+_Same(_compile, 'AssemblerOutput',
+ _Enumeration(['NoListing',
+ 'AssemblyCode', # /FA
+ 'All', # /FAcs
+ 'AssemblyAndMachineCode', # /FAc
+ 'AssemblyAndSourceCode'])) # /FAs
+_Same(_compile, 'BasicRuntimeChecks',
+ _Enumeration(['Default',
+ 'StackFrameRuntimeCheck', # /RTCs
+ 'UninitializedLocalUsageCheck', # /RTCu
+ 'EnableFastChecks'])) # /RTC1
+_Same(_compile, 'BrowseInformation',
+ _Enumeration(['false',
+ 'true', # /FR
+ 'true'])) # /Fr
+_Same(_compile, 'CallingConvention',
+ _Enumeration(['Cdecl', # /Gd
+ 'FastCall', # /Gr
+ 'StdCall', # /Gz
+ 'VectorCall'])) # /Gv
+_Same(_compile, 'CompileAs',
+ _Enumeration(['Default',
+ 'CompileAsC', # /TC
+ 'CompileAsCpp'])) # /TP
+_Same(_compile, 'DebugInformationFormat',
+ _Enumeration(['', # Disabled
+ 'OldStyle', # /Z7
+ None,
+ 'ProgramDatabase', # /Zi
+ 'EditAndContinue'])) # /ZI
+_Same(_compile, 'EnableEnhancedInstructionSet',
+ _Enumeration(['NotSet',
+ 'StreamingSIMDExtensions', # /arch:SSE
+ 'StreamingSIMDExtensions2', # /arch:SSE2
+ 'AdvancedVectorExtensions', # /arch:AVX (vs2012+)
+ 'NoExtensions', # /arch:IA32 (vs2012+)
+ # This one only exists in the new msbuild format.
+ 'AdvancedVectorExtensions2', # /arch:AVX2 (vs2013r2+)
+ ]))
+_Same(_compile, 'ErrorReporting',
+ _Enumeration(['None', # /errorReport:none
+ 'Prompt', # /errorReport:prompt
+ 'Queue'], # /errorReport:queue
+ new=['Send'])) # /errorReport:send"
+_Same(_compile, 'ExceptionHandling',
+ _Enumeration(['false',
+ 'Sync', # /EHsc
+ 'Async'], # /EHa
+ new=['SyncCThrow'])) # /EHs
+_Same(_compile, 'FavorSizeOrSpeed',
+ _Enumeration(['Neither',
+ 'Speed', # /Ot
+ 'Size'])) # /Os
+_Same(_compile, 'FloatingPointModel',
+ _Enumeration(['Precise', # /fp:precise
+ 'Strict', # /fp:strict
+ 'Fast'])) # /fp:fast
+_Same(_compile, 'InlineFunctionExpansion',
+ _Enumeration(['Default',
+ 'OnlyExplicitInline', # /Ob1
+ 'AnySuitable'], # /Ob2
+ new=['Disabled'])) # /Ob0
+_Same(_compile, 'Optimization',
+ _Enumeration(['Disabled', # /Od
+ 'MinSpace', # /O1
+ 'MaxSpeed', # /O2
+ 'Full'])) # /Ox
+_Same(_compile, 'RuntimeLibrary',
+ _Enumeration(['MultiThreaded', # /MT
+ 'MultiThreadedDebug', # /MTd
+ 'MultiThreadedDLL', # /MD
+ 'MultiThreadedDebugDLL'])) # /MDd
+_Same(_compile, 'StructMemberAlignment',
+ _Enumeration(['Default',
+ '1Byte', # /Zp1
+ '2Bytes', # /Zp2
+ '4Bytes', # /Zp4
+ '8Bytes', # /Zp8
+ '16Bytes'])) # /Zp16
+_Same(_compile, 'WarningLevel',
+ _Enumeration(['TurnOffAllWarnings', # /W0
+ 'Level1', # /W1
+ 'Level2', # /W2
+ 'Level3', # /W3
+ 'Level4'], # /W4
+ new=['EnableAllWarnings'])) # /Wall
+
+# Options found in MSVS that have been renamed in MSBuild.
+_Renamed(_compile, 'EnableFunctionLevelLinking', 'FunctionLevelLinking',
+ _boolean) # /Gy
+_Renamed(_compile, 'EnableIntrinsicFunctions', 'IntrinsicFunctions',
+ _boolean) # /Oi
+_Renamed(_compile, 'KeepComments', 'PreprocessKeepComments', _boolean) # /C
+_Renamed(_compile, 'ObjectFile', 'ObjectFileName', _file_name) # /Fo
+_Renamed(_compile, 'OpenMP', 'OpenMPSupport', _boolean) # /openmp
+_Renamed(_compile, 'PrecompiledHeaderThrough', 'PrecompiledHeaderFile',
+ _file_name) # Used with /Yc and /Yu
+_Renamed(_compile, 'PrecompiledHeaderFile', 'PrecompiledHeaderOutputFile',
+ _file_name) # /Fp
+_Renamed(_compile, 'UsePrecompiledHeader', 'PrecompiledHeader',
+ _Enumeration(['NotUsing', # VS recognized '' for this value too.
+ 'Create', # /Yc
+ 'Use'])) # /Yu
+_Renamed(_compile, 'WarnAsError', 'TreatWarningAsError', _boolean) # /WX
+
+_ConvertedToAdditionalOption(_compile, 'DefaultCharIsUnsigned', '/J')
+
+# MSVS options not found in MSBuild.
+_MSVSOnly(_compile, 'Detect64BitPortabilityProblems', _boolean)
+_MSVSOnly(_compile, 'UseUnicodeResponseFiles', _boolean)
+
+# MSBuild options not found in MSVS.
+_MSBuildOnly(_compile, 'BuildingInIDE', _boolean)
+_MSBuildOnly(_compile, 'CompileAsManaged',
+ _Enumeration([], new=['false',
+ 'true'])) # /clr
+_MSBuildOnly(_compile, 'CreateHotpatchableImage', _boolean) # /hotpatch
+_MSBuildOnly(_compile, 'MultiProcessorCompilation', _boolean) # /MP
+_MSBuildOnly(_compile, 'PreprocessOutputPath', _string) # /Fi
+_MSBuildOnly(_compile, 'ProcessorNumber', _integer) # the number of processors
+_MSBuildOnly(_compile, 'TrackerLogDirectory', _folder_name)
+_MSBuildOnly(_compile, 'TreatSpecificWarningsAsErrors', _string_list) # /we
+_MSBuildOnly(_compile, 'UseUnicodeForAssemblerListing', _boolean) # /FAu
+
+# Defines a setting that needs very customized processing
+_CustomGeneratePreprocessedFile(_compile, 'GeneratePreprocessedFile')
+
+
+# Directives for converting MSVS VCLinkerTool to MSBuild Link.
+# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\link.xml" for
+# the schema of the MSBuild Link settings.
+
+# Options that have the same name in MSVS and MSBuild
+_Same(_link, 'AdditionalDependencies', _file_list)
+_Same(_link, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
+# /MANIFESTDEPENDENCY:
+_Same(_link, 'AdditionalManifestDependencies', _file_list)
+_Same(_link, 'AdditionalOptions', _string_list)
+_Same(_link, 'AddModuleNamesToAssembly', _file_list) # /ASSEMBLYMODULE
+_Same(_link, 'AllowIsolation', _boolean) # /ALLOWISOLATION
+_Same(_link, 'AssemblyLinkResource', _file_list) # /ASSEMBLYLINKRESOURCE
+_Same(_link, 'BaseAddress', _string) # /BASE
+_Same(_link, 'CLRUnmanagedCodeCheck', _boolean) # /CLRUNMANAGEDCODECHECK
+_Same(_link, 'DelayLoadDLLs', _file_list) # /DELAYLOAD
+_Same(_link, 'DelaySign', _boolean) # /DELAYSIGN
+_Same(_link, 'EmbedManagedResourceFile', _file_list) # /ASSEMBLYRESOURCE
+_Same(_link, 'EnableUAC', _boolean) # /MANIFESTUAC
+_Same(_link, 'EntryPointSymbol', _string) # /ENTRY
+_Same(_link, 'ForceSymbolReferences', _file_list) # /INCLUDE
+_Same(_link, 'FunctionOrder', _file_name) # /ORDER
+_Same(_link, 'GenerateDebugInformation', _boolean) # /DEBUG
+_Same(_link, 'GenerateMapFile', _boolean) # /MAP
+_Same(_link, 'HeapCommitSize', _string)
+_Same(_link, 'HeapReserveSize', _string) # /HEAP
+_Same(_link, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
+_Same(_link, 'IgnoreEmbeddedIDL', _boolean) # /IGNOREIDL
+_Same(_link, 'ImportLibrary', _file_name) # /IMPLIB
+_Same(_link, 'KeyContainer', _file_name) # /KEYCONTAINER
+_Same(_link, 'KeyFile', _file_name) # /KEYFILE
+_Same(_link, 'ManifestFile', _file_name) # /ManifestFile
+_Same(_link, 'MapExports', _boolean) # /MAPINFO:EXPORTS
+_Same(_link, 'MapFileName', _file_name)
+_Same(_link, 'MergedIDLBaseFileName', _file_name) # /IDLOUT
+_Same(_link, 'MergeSections', _string) # /MERGE
+_Same(_link, 'MidlCommandFile', _file_name) # /MIDL
+_Same(_link, 'ModuleDefinitionFile', _file_name) # /DEF
+_Same(_link, 'OutputFile', _file_name) # /OUT
+_Same(_link, 'PerUserRedirection', _boolean)
+_Same(_link, 'Profile', _boolean) # /PROFILE
+_Same(_link, 'ProfileGuidedDatabase', _file_name) # /PGD
+_Same(_link, 'ProgramDatabaseFile', _file_name) # /PDB
+_Same(_link, 'RegisterOutput', _boolean)
+_Same(_link, 'SetChecksum', _boolean) # /RELEASE
+_Same(_link, 'StackCommitSize', _string)
+_Same(_link, 'StackReserveSize', _string) # /STACK
+_Same(_link, 'StripPrivateSymbols', _file_name) # /PDBSTRIPPED
+_Same(_link, 'SupportUnloadOfDelayLoadedDLL', _boolean) # /DELAY:UNLOAD
+_Same(_link, 'SuppressStartupBanner', _boolean) # /NOLOGO
+_Same(_link, 'SwapRunFromCD', _boolean) # /SWAPRUN:CD
+_Same(_link, 'TurnOffAssemblyGeneration', _boolean) # /NOASSEMBLY
+_Same(_link, 'TypeLibraryFile', _file_name) # /TLBOUT
+_Same(_link, 'TypeLibraryResourceID', _integer) # /TLBID
+_Same(_link, 'UACUIAccess', _boolean) # /uiAccess='true'
+_Same(_link, 'Version', _string) # /VERSION
+
+_Same(_link, 'EnableCOMDATFolding', _newly_boolean) # /OPT:ICF
+_Same(_link, 'FixedBaseAddress', _newly_boolean) # /FIXED
+_Same(_link, 'LargeAddressAware', _newly_boolean) # /LARGEADDRESSAWARE
+_Same(_link, 'OptimizeReferences', _newly_boolean) # /OPT:REF
+_Same(_link, 'RandomizedBaseAddress', _newly_boolean) # /DYNAMICBASE
+_Same(_link, 'TerminalServerAware', _newly_boolean) # /TSAWARE
+
+_subsystem_enumeration = _Enumeration(
+ ['NotSet',
+ 'Console', # /SUBSYSTEM:CONSOLE
+ 'Windows', # /SUBSYSTEM:WINDOWS
+ 'Native', # /SUBSYSTEM:NATIVE
+ 'EFI Application', # /SUBSYSTEM:EFI_APPLICATION
+ 'EFI Boot Service Driver', # /SUBSYSTEM:EFI_BOOT_SERVICE_DRIVER
+ 'EFI ROM', # /SUBSYSTEM:EFI_ROM
+ 'EFI Runtime', # /SUBSYSTEM:EFI_RUNTIME_DRIVER
+ 'WindowsCE'], # /SUBSYSTEM:WINDOWSCE
+ new=['POSIX']) # /SUBSYSTEM:POSIX
+
+_target_machine_enumeration = _Enumeration(
+ ['NotSet',
+ 'MachineX86', # /MACHINE:X86
+ None,
+ 'MachineARM', # /MACHINE:ARM
+ 'MachineEBC', # /MACHINE:EBC
+ 'MachineIA64', # /MACHINE:IA64
+ None,
+ 'MachineMIPS', # /MACHINE:MIPS
+ 'MachineMIPS16', # /MACHINE:MIPS16
+ 'MachineMIPSFPU', # /MACHINE:MIPSFPU
+ 'MachineMIPSFPU16', # /MACHINE:MIPSFPU16
+ None,
+ None,
+ None,
+ 'MachineSH4', # /MACHINE:SH4
+ None,
+ 'MachineTHUMB', # /MACHINE:THUMB
+ 'MachineX64']) # /MACHINE:X64
+
+_Same(_link, 'AssemblyDebug',
+ _Enumeration(['',
+ 'true', # /ASSEMBLYDEBUG
+ 'false'])) # /ASSEMBLYDEBUG:DISABLE
+_Same(_link, 'CLRImageType',
+ _Enumeration(['Default',
+ 'ForceIJWImage', # /CLRIMAGETYPE:IJW
+ 'ForcePureILImage', # /Switch="CLRIMAGETYPE:PURE
+ 'ForceSafeILImage'])) # /Switch="CLRIMAGETYPE:SAFE
+_Same(_link, 'CLRThreadAttribute',
+ _Enumeration(['DefaultThreadingAttribute', # /CLRTHREADATTRIBUTE:NONE
+ 'MTAThreadingAttribute', # /CLRTHREADATTRIBUTE:MTA
+ 'STAThreadingAttribute'])) # /CLRTHREADATTRIBUTE:STA
+_Same(_link, 'DataExecutionPrevention',
+ _Enumeration(['',
+ 'false', # /NXCOMPAT:NO
+ 'true'])) # /NXCOMPAT
+_Same(_link, 'Driver',
+ _Enumeration(['NotSet',
+ 'Driver', # /Driver
+ 'UpOnly', # /DRIVER:UPONLY
+ 'WDM'])) # /DRIVER:WDM
+_Same(_link, 'LinkTimeCodeGeneration',
+ _Enumeration(['Default',
+ 'UseLinkTimeCodeGeneration', # /LTCG
+ 'PGInstrument', # /LTCG:PGInstrument
+ 'PGOptimization', # /LTCG:PGOptimize
+ 'PGUpdate'])) # /LTCG:PGUpdate
+_Same(_link, 'ShowProgress',
+ _Enumeration(['NotSet',
+ 'LinkVerbose', # /VERBOSE
+ 'LinkVerboseLib'], # /VERBOSE:Lib
+ new=['LinkVerboseICF', # /VERBOSE:ICF
+ 'LinkVerboseREF', # /VERBOSE:REF
+ 'LinkVerboseSAFESEH', # /VERBOSE:SAFESEH
+ 'LinkVerboseCLR'])) # /VERBOSE:CLR
+_Same(_link, 'SubSystem', _subsystem_enumeration)
+_Same(_link, 'TargetMachine', _target_machine_enumeration)
+_Same(_link, 'UACExecutionLevel',
+ _Enumeration(['AsInvoker', # /level='asInvoker'
+ 'HighestAvailable', # /level='highestAvailable'
+ 'RequireAdministrator'])) # /level='requireAdministrator'
+_Same(_link, 'MinimumRequiredVersion', _string)
+_Same(_link, 'TreatLinkerWarningAsErrors', _boolean) # /WX
+
+
+# Options found in MSVS that have been renamed in MSBuild.
+_Renamed(_link, 'ErrorReporting', 'LinkErrorReporting',
+ _Enumeration(['NoErrorReport', # /ERRORREPORT:NONE
+ 'PromptImmediately', # /ERRORREPORT:PROMPT
+ 'QueueForNextLogin'], # /ERRORREPORT:QUEUE
+ new=['SendErrorReport'])) # /ERRORREPORT:SEND
+_Renamed(_link, 'IgnoreDefaultLibraryNames', 'IgnoreSpecificDefaultLibraries',
+ _file_list) # /NODEFAULTLIB
+_Renamed(_link, 'ResourceOnlyDLL', 'NoEntryPoint', _boolean) # /NOENTRY
+_Renamed(_link, 'SwapRunFromNet', 'SwapRunFromNET', _boolean) # /SWAPRUN:NET
+
+_Moved(_link, 'GenerateManifest', '', _boolean)
+_Moved(_link, 'IgnoreImportLibrary', '', _boolean)
+_Moved(_link, 'LinkIncremental', '', _newly_boolean)
+_Moved(_link, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
+_Moved(_link, 'UseLibraryDependencyInputs', 'ProjectReference', _boolean)
+
+# MSVS options not found in MSBuild.
+_MSVSOnly(_link, 'OptimizeForWindows98', _newly_boolean)
+_MSVSOnly(_link, 'UseUnicodeResponseFiles', _boolean)
+
+# MSBuild options not found in MSVS.
+_MSBuildOnly(_link, 'BuildingInIDE', _boolean)
+_MSBuildOnly(_link, 'ImageHasSafeExceptionHandlers', _boolean) # /SAFESEH
+_MSBuildOnly(_link, 'LinkDLL', _boolean) # /DLL Visible='false'
+_MSBuildOnly(_link, 'LinkStatus', _boolean) # /LTCG:STATUS
+_MSBuildOnly(_link, 'PreventDllBinding', _boolean) # /ALLOWBIND
+_MSBuildOnly(_link, 'SupportNobindOfDelayLoadedDLL', _boolean) # /DELAY:NOBIND
+_MSBuildOnly(_link, 'TrackerLogDirectory', _folder_name)
+_MSBuildOnly(_link, 'MSDOSStubFileName', _file_name) # /STUB Visible='false'
+_MSBuildOnly(_link, 'SectionAlignment', _integer) # /ALIGN
+_MSBuildOnly(_link, 'SpecifySectionAttributes', _string) # /SECTION
+_MSBuildOnly(_link, 'ForceFileOutput',
+ _Enumeration([], new=['Enabled', # /FORCE
+ # /FORCE:MULTIPLE
+ 'MultiplyDefinedSymbolOnly',
+ 'UndefinedSymbolOnly'])) # /FORCE:UNRESOLVED
+_MSBuildOnly(_link, 'CreateHotPatchableImage',
+ _Enumeration([], new=['Enabled', # /FUNCTIONPADMIN
+ 'X86Image', # /FUNCTIONPADMIN:5
+ 'X64Image', # /FUNCTIONPADMIN:6
+ 'ItaniumImage'])) # /FUNCTIONPADMIN:16
+_MSBuildOnly(_link, 'CLRSupportLastError',
+ _Enumeration([], new=['Enabled', # /CLRSupportLastError
+ 'Disabled', # /CLRSupportLastError:NO
+ # /CLRSupportLastError:SYSTEMDLL
+ 'SystemDlls']))
+
+
+# Directives for converting VCResourceCompilerTool to ResourceCompile.
+# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\rc.xml" for
+# the schema of the MSBuild ResourceCompile settings.
+
+_Same(_rc, 'AdditionalOptions', _string_list)
+_Same(_rc, 'AdditionalIncludeDirectories', _folder_list) # /I
+_Same(_rc, 'Culture', _Integer(msbuild_base=16))
+_Same(_rc, 'IgnoreStandardIncludePath', _boolean) # /X
+_Same(_rc, 'PreprocessorDefinitions', _string_list) # /D
+_Same(_rc, 'ResourceOutputFileName', _string) # /fo
+_Same(_rc, 'ShowProgress', _boolean) # /v
+# There is no UI in VisualStudio 2008 to set the following properties.
+# However they are found in CL and other tools. Include them here for
+# completeness, as they are very likely to have the same usage pattern.
+_Same(_rc, 'SuppressStartupBanner', _boolean) # /nologo
+_Same(_rc, 'UndefinePreprocessorDefinitions', _string_list) # /u
+
+# MSBuild options not found in MSVS.
+_MSBuildOnly(_rc, 'NullTerminateStrings', _boolean) # /n
+_MSBuildOnly(_rc, 'TrackerLogDirectory', _folder_name)
+
+
+# Directives for converting VCMIDLTool to Midl.
+# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\midl.xml" for
+# the schema of the MSBuild Midl settings.
+
+_Same(_midl, 'AdditionalIncludeDirectories', _folder_list) # /I
+_Same(_midl, 'AdditionalOptions', _string_list)
+_Same(_midl, 'CPreprocessOptions', _string) # /cpp_opt
+_Same(_midl, 'ErrorCheckAllocations', _boolean) # /error allocation
+_Same(_midl, 'ErrorCheckBounds', _boolean) # /error bounds_check
+_Same(_midl, 'ErrorCheckEnumRange', _boolean) # /error enum
+_Same(_midl, 'ErrorCheckRefPointers', _boolean) # /error ref
+_Same(_midl, 'ErrorCheckStubData', _boolean) # /error stub_data
+_Same(_midl, 'GenerateStublessProxies', _boolean) # /Oicf
+_Same(_midl, 'GenerateTypeLibrary', _boolean)
+_Same(_midl, 'HeaderFileName', _file_name) # /h
+_Same(_midl, 'IgnoreStandardIncludePath', _boolean) # /no_def_idir
+_Same(_midl, 'InterfaceIdentifierFileName', _file_name) # /iid
+_Same(_midl, 'MkTypLibCompatible', _boolean) # /mktyplib203
+_Same(_midl, 'OutputDirectory', _string) # /out
+_Same(_midl, 'PreprocessorDefinitions', _string_list) # /D
+_Same(_midl, 'ProxyFileName', _file_name) # /proxy
+_Same(_midl, 'RedirectOutputAndErrors', _file_name) # /o
+_Same(_midl, 'SuppressStartupBanner', _boolean) # /nologo
+_Same(_midl, 'TypeLibraryName', _file_name) # /tlb
+_Same(_midl, 'UndefinePreprocessorDefinitions', _string_list) # /U
+_Same(_midl, 'WarnAsError', _boolean) # /WX
+
+_Same(_midl, 'DefaultCharType',
+ _Enumeration(['Unsigned', # /char unsigned
+ 'Signed', # /char signed
+ 'Ascii'])) # /char ascii7
+_Same(_midl, 'TargetEnvironment',
+ _Enumeration(['NotSet',
+ 'Win32', # /env win32
+ 'Itanium', # /env ia64
+ 'X64'])) # /env x64
+_Same(_midl, 'EnableErrorChecks',
+ _Enumeration(['EnableCustom',
+ 'None', # /error none
+ 'All'])) # /error all
+_Same(_midl, 'StructMemberAlignment',
+ _Enumeration(['NotSet',
+ '1', # Zp1
+ '2', # Zp2
+ '4', # Zp4
+ '8'])) # Zp8
+_Same(_midl, 'WarningLevel',
+ _Enumeration(['0', # /W0
+ '1', # /W1
+ '2', # /W2
+ '3', # /W3
+ '4'])) # /W4
+
+_Renamed(_midl, 'DLLDataFileName', 'DllDataFileName', _file_name) # /dlldata
+_Renamed(_midl, 'ValidateParameters', 'ValidateAllParameters',
+ _boolean) # /robust
+
+# MSBuild options not found in MSVS.
+_MSBuildOnly(_midl, 'ApplicationConfigurationMode', _boolean) # /app_config
+_MSBuildOnly(_midl, 'ClientStubFile', _file_name) # /cstub
+_MSBuildOnly(_midl, 'GenerateClientFiles',
+ _Enumeration([], new=['Stub', # /client stub
+ 'None'])) # /client none
+_MSBuildOnly(_midl, 'GenerateServerFiles',
+ _Enumeration([], new=['Stub', # /client stub
+ 'None'])) # /client none
+_MSBuildOnly(_midl, 'LocaleID', _integer) # /lcid DECIMAL
+_MSBuildOnly(_midl, 'ServerStubFile', _file_name) # /sstub
+_MSBuildOnly(_midl, 'SuppressCompilerWarnings', _boolean) # /no_warn
+_MSBuildOnly(_midl, 'TrackerLogDirectory', _folder_name)
+_MSBuildOnly(_midl, 'TypeLibFormat',
+ _Enumeration([], new=['NewFormat', # /newtlb
+ 'OldFormat'])) # /oldtlb
+
+
+# Directives for converting VCLibrarianTool to Lib.
+# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\lib.xml" for
+# the schema of the MSBuild Lib settings.
+
+_Same(_lib, 'AdditionalDependencies', _file_list)
+_Same(_lib, 'AdditionalLibraryDirectories', _folder_list) # /LIBPATH
+_Same(_lib, 'AdditionalOptions', _string_list)
+_Same(_lib, 'ExportNamedFunctions', _string_list) # /EXPORT
+_Same(_lib, 'ForceSymbolReferences', _string) # /INCLUDE
+_Same(_lib, 'IgnoreAllDefaultLibraries', _boolean) # /NODEFAULTLIB
+_Same(_lib, 'IgnoreSpecificDefaultLibraries', _file_list) # /NODEFAULTLIB
+_Same(_lib, 'ModuleDefinitionFile', _file_name) # /DEF
+_Same(_lib, 'OutputFile', _file_name) # /OUT
+_Same(_lib, 'SuppressStartupBanner', _boolean) # /NOLOGO
+_Same(_lib, 'UseUnicodeResponseFiles', _boolean)
+_Same(_lib, 'LinkTimeCodeGeneration', _boolean) # /LTCG
+_Same(_lib, 'TargetMachine', _target_machine_enumeration)
+
+# TODO(jeanluc) _link defines the same value that gets moved to
+# ProjectReference. We may want to validate that they are consistent.
+_Moved(_lib, 'LinkLibraryDependencies', 'ProjectReference', _boolean)
+
+_MSBuildOnly(_lib, 'DisplayLibrary', _string) # /LIST Visible='false'
+_MSBuildOnly(_lib, 'ErrorReporting',
+ _Enumeration([], new=['PromptImmediately', # /ERRORREPORT:PROMPT
+ 'QueueForNextLogin', # /ERRORREPORT:QUEUE
+ 'SendErrorReport', # /ERRORREPORT:SEND
+ 'NoErrorReport'])) # /ERRORREPORT:NONE
+_MSBuildOnly(_lib, 'MinimumRequiredVersion', _string)
+_MSBuildOnly(_lib, 'Name', _file_name) # /NAME
+_MSBuildOnly(_lib, 'RemoveObjects', _file_list) # /REMOVE
+_MSBuildOnly(_lib, 'SubSystem', _subsystem_enumeration)
+_MSBuildOnly(_lib, 'TrackerLogDirectory', _folder_name)
+_MSBuildOnly(_lib, 'TreatLibWarningAsErrors', _boolean) # /WX
+_MSBuildOnly(_lib, 'Verbose', _boolean)
+
+
+# Directives for converting VCManifestTool to Mt.
+# See "c:\Program Files (x86)\MSBuild\Microsoft.Cpp\v4.0\1033\mt.xml" for
+# the schema of the MSBuild Lib settings.
+
+# Options that have the same name in MSVS and MSBuild
+_Same(_manifest, 'AdditionalManifestFiles', _file_list) # /manifest
+_Same(_manifest, 'AdditionalOptions', _string_list)
+_Same(_manifest, 'AssemblyIdentity', _string) # /identity:
+_Same(_manifest, 'ComponentFileName', _file_name) # /dll
+_Same(_manifest, 'GenerateCatalogFiles', _boolean) # /makecdfs
+_Same(_manifest, 'InputResourceManifests', _string) # /inputresource
+_Same(_manifest, 'OutputManifestFile', _file_name) # /out
+_Same(_manifest, 'RegistrarScriptFile', _file_name) # /rgs
+_Same(_manifest, 'ReplacementsFile', _file_name) # /replacements
+_Same(_manifest, 'SuppressStartupBanner', _boolean) # /nologo
+_Same(_manifest, 'TypeLibraryFile', _file_name) # /tlb:
+_Same(_manifest, 'UpdateFileHashes', _boolean) # /hashupdate
+_Same(_manifest, 'UpdateFileHashesSearchPath', _file_name)
+_Same(_manifest, 'VerboseOutput', _boolean) # /verbose
+
+# Options that have moved location.
+_MovedAndRenamed(_manifest, 'ManifestResourceFile',
+ 'ManifestResourceCompile',
+ 'ResourceOutputFileName',
+ _file_name)
+_Moved(_manifest, 'EmbedManifest', '', _boolean)
+
+# MSVS options not found in MSBuild.
+_MSVSOnly(_manifest, 'DependencyInformationFile', _file_name)
+_MSVSOnly(_manifest, 'UseFAT32Workaround', _boolean)
+_MSVSOnly(_manifest, 'UseUnicodeResponseFiles', _boolean)
+
+# MSBuild options not found in MSVS.
+_MSBuildOnly(_manifest, 'EnableDPIAwareness', _boolean)
+_MSBuildOnly(_manifest, 'GenerateCategoryTags', _boolean) # /category
+_MSBuildOnly(_manifest, 'ManifestFromManagedAssembly',
+ _file_name) # /managedassemblyname
+_MSBuildOnly(_manifest, 'OutputResourceManifests', _string) # /outputresource
+_MSBuildOnly(_manifest, 'SuppressDependencyElement', _boolean) # /nodependency
+_MSBuildOnly(_manifest, 'TrackerLogDirectory', _folder_name)
+
+
+# Directives for MASM.
+# See "$(VCTargetsPath)\BuildCustomizations\masm.xml" for the schema of the
+# MSBuild MASM settings.
+
+# Options that have the same name in MSVS and MSBuild.
+_Same(_masm, 'UseSafeExceptionHandlers', _boolean) # /safeseh
diff --git a/third_party/python/gyp/pylib/gyp/MSVSSettings_test.py b/third_party/python/gyp/pylib/gyp/MSVSSettings_test.py
new file mode 100755
index 0000000000..73ed25e27d
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/MSVSSettings_test.py
@@ -0,0 +1,1486 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unit tests for the MSVSSettings.py file."""
+
+try:
+ from StringIO import StringIO
+except ImportError:
+ from io import StringIO
+import unittest
+import gyp.MSVSSettings as MSVSSettings
+
+
+class TestSequenceFunctions(unittest.TestCase):
+
+ def setUp(self):
+ self.stderr = StringIO()
+
+ def _ExpectedWarnings(self, expected):
+ """Compares recorded lines to expected warnings."""
+ self.stderr.seek(0)
+ actual = self.stderr.read().split('\n')
+ actual = [line for line in actual if line]
+ self.assertEqual(sorted(expected), sorted(actual))
+
+ def testValidateMSVSSettings_tool_names(self):
+ """Tests that only MSVS tool names are allowed."""
+ MSVSSettings.ValidateMSVSSettings(
+ {'VCCLCompilerTool': {},
+ 'VCLinkerTool': {},
+ 'VCMIDLTool': {},
+ 'foo': {},
+ 'VCResourceCompilerTool': {},
+ 'VCLibrarianTool': {},
+ 'VCManifestTool': {},
+ 'ClCompile': {}},
+ self.stderr)
+ self._ExpectedWarnings([
+ 'Warning: unrecognized tool foo',
+ 'Warning: unrecognized tool ClCompile'])
+
+ def testValidateMSVSSettings_settings(self):
+ """Tests that for invalid MSVS settings."""
+ MSVSSettings.ValidateMSVSSettings(
+ {'VCCLCompilerTool': {
+ 'AdditionalIncludeDirectories': 'folder1;folder2',
+ 'AdditionalOptions': ['string1', 'string2'],
+ 'AdditionalUsingDirectories': 'folder1;folder2',
+ 'AssemblerListingLocation': 'a_file_name',
+ 'AssemblerOutput': '0',
+ 'BasicRuntimeChecks': '5',
+ 'BrowseInformation': 'fdkslj',
+ 'BrowseInformationFile': 'a_file_name',
+ 'BufferSecurityCheck': 'true',
+ 'CallingConvention': '-1',
+ 'CompileAs': '1',
+ 'DebugInformationFormat': '2',
+ 'DefaultCharIsUnsigned': 'true',
+ 'Detect64BitPortabilityProblems': 'true',
+ 'DisableLanguageExtensions': 'true',
+ 'DisableSpecificWarnings': 'string1;string2',
+ 'EnableEnhancedInstructionSet': '1',
+ 'EnableFiberSafeOptimizations': 'true',
+ 'EnableFunctionLevelLinking': 'true',
+ 'EnableIntrinsicFunctions': 'true',
+ 'EnablePREfast': 'true',
+ 'Enableprefast': 'bogus',
+ 'ErrorReporting': '1',
+ 'ExceptionHandling': '1',
+ 'ExpandAttributedSource': 'true',
+ 'FavorSizeOrSpeed': '1',
+ 'FloatingPointExceptions': 'true',
+ 'FloatingPointModel': '1',
+ 'ForceConformanceInForLoopScope': 'true',
+ 'ForcedIncludeFiles': 'file1;file2',
+ 'ForcedUsingFiles': 'file1;file2',
+ 'GeneratePreprocessedFile': '1',
+ 'GenerateXMLDocumentationFiles': 'true',
+ 'IgnoreStandardIncludePath': 'true',
+ 'InlineFunctionExpansion': '1',
+ 'KeepComments': 'true',
+ 'MinimalRebuild': 'true',
+ 'ObjectFile': 'a_file_name',
+ 'OmitDefaultLibName': 'true',
+ 'OmitFramePointers': 'true',
+ 'OpenMP': 'true',
+ 'Optimization': '1',
+ 'PrecompiledHeaderFile': 'a_file_name',
+ 'PrecompiledHeaderThrough': 'a_file_name',
+ 'PreprocessorDefinitions': 'string1;string2',
+ 'ProgramDataBaseFileName': 'a_file_name',
+ 'RuntimeLibrary': '1',
+ 'RuntimeTypeInfo': 'true',
+ 'ShowIncludes': 'true',
+ 'SmallerTypeCheck': 'true',
+ 'StringPooling': 'true',
+ 'StructMemberAlignment': '1',
+ 'SuppressStartupBanner': 'true',
+ 'TreatWChar_tAsBuiltInType': 'true',
+ 'UndefineAllPreprocessorDefinitions': 'true',
+ 'UndefinePreprocessorDefinitions': 'string1;string2',
+ 'UseFullPaths': 'true',
+ 'UsePrecompiledHeader': '1',
+ 'UseUnicodeResponseFiles': 'true',
+ 'WarnAsError': 'true',
+ 'WarningLevel': '1',
+ 'WholeProgramOptimization': 'true',
+ 'XMLDocumentationFileName': 'a_file_name',
+ 'ZZXYZ': 'bogus'},
+ 'VCLinkerTool': {
+ 'AdditionalDependencies': 'file1;file2',
+ 'AdditionalDependencies_excluded': 'file3',
+ 'AdditionalLibraryDirectories': 'folder1;folder2',
+ 'AdditionalManifestDependencies': 'file1;file2',
+ 'AdditionalOptions': 'a string1',
+ 'AddModuleNamesToAssembly': 'file1;file2',
+ 'AllowIsolation': 'true',
+ 'AssemblyDebug': '2',
+ 'AssemblyLinkResource': 'file1;file2',
+ 'BaseAddress': 'a string1',
+ 'CLRImageType': '2',
+ 'CLRThreadAttribute': '2',
+ 'CLRUnmanagedCodeCheck': 'true',
+ 'DataExecutionPrevention': '2',
+ 'DelayLoadDLLs': 'file1;file2',
+ 'DelaySign': 'true',
+ 'Driver': '2',
+ 'EmbedManagedResourceFile': 'file1;file2',
+ 'EnableCOMDATFolding': '2',
+ 'EnableUAC': 'true',
+ 'EntryPointSymbol': 'a string1',
+ 'ErrorReporting': '2',
+ 'FixedBaseAddress': '2',
+ 'ForceSymbolReferences': 'file1;file2',
+ 'FunctionOrder': 'a_file_name',
+ 'GenerateDebugInformation': 'true',
+ 'GenerateManifest': 'true',
+ 'GenerateMapFile': 'true',
+ 'HeapCommitSize': 'a string1',
+ 'HeapReserveSize': 'a string1',
+ 'IgnoreAllDefaultLibraries': 'true',
+ 'IgnoreDefaultLibraryNames': 'file1;file2',
+ 'IgnoreEmbeddedIDL': 'true',
+ 'IgnoreImportLibrary': 'true',
+ 'ImportLibrary': 'a_file_name',
+ 'KeyContainer': 'a_file_name',
+ 'KeyFile': 'a_file_name',
+ 'LargeAddressAware': '2',
+ 'LinkIncremental': '2',
+ 'LinkLibraryDependencies': 'true',
+ 'LinkTimeCodeGeneration': '2',
+ 'ManifestFile': 'a_file_name',
+ 'MapExports': 'true',
+ 'MapFileName': 'a_file_name',
+ 'MergedIDLBaseFileName': 'a_file_name',
+ 'MergeSections': 'a string1',
+ 'MidlCommandFile': 'a_file_name',
+ 'ModuleDefinitionFile': 'a_file_name',
+ 'OptimizeForWindows98': '1',
+ 'OptimizeReferences': '2',
+ 'OutputFile': 'a_file_name',
+ 'PerUserRedirection': 'true',
+ 'Profile': 'true',
+ 'ProfileGuidedDatabase': 'a_file_name',
+ 'ProgramDatabaseFile': 'a_file_name',
+ 'RandomizedBaseAddress': '2',
+ 'RegisterOutput': 'true',
+ 'ResourceOnlyDLL': 'true',
+ 'SetChecksum': 'true',
+ 'ShowProgress': '2',
+ 'StackCommitSize': 'a string1',
+ 'StackReserveSize': 'a string1',
+ 'StripPrivateSymbols': 'a_file_name',
+ 'SubSystem': '2',
+ 'SupportUnloadOfDelayLoadedDLL': 'true',
+ 'SuppressStartupBanner': 'true',
+ 'SwapRunFromCD': 'true',
+ 'SwapRunFromNet': 'true',
+ 'TargetMachine': '2',
+ 'TerminalServerAware': '2',
+ 'TurnOffAssemblyGeneration': 'true',
+ 'TypeLibraryFile': 'a_file_name',
+ 'TypeLibraryResourceID': '33',
+ 'UACExecutionLevel': '2',
+ 'UACUIAccess': 'true',
+ 'UseLibraryDependencyInputs': 'true',
+ 'UseUnicodeResponseFiles': 'true',
+ 'Version': 'a string1'},
+ 'VCMIDLTool': {
+ 'AdditionalIncludeDirectories': 'folder1;folder2',
+ 'AdditionalOptions': 'a string1',
+ 'CPreprocessOptions': 'a string1',
+ 'DefaultCharType': '1',
+ 'DLLDataFileName': 'a_file_name',
+ 'EnableErrorChecks': '1',
+ 'ErrorCheckAllocations': 'true',
+ 'ErrorCheckBounds': 'true',
+ 'ErrorCheckEnumRange': 'true',
+ 'ErrorCheckRefPointers': 'true',
+ 'ErrorCheckStubData': 'true',
+ 'GenerateStublessProxies': 'true',
+ 'GenerateTypeLibrary': 'true',
+ 'HeaderFileName': 'a_file_name',
+ 'IgnoreStandardIncludePath': 'true',
+ 'InterfaceIdentifierFileName': 'a_file_name',
+ 'MkTypLibCompatible': 'true',
+ 'notgood': 'bogus',
+ 'OutputDirectory': 'a string1',
+ 'PreprocessorDefinitions': 'string1;string2',
+ 'ProxyFileName': 'a_file_name',
+ 'RedirectOutputAndErrors': 'a_file_name',
+ 'StructMemberAlignment': '1',
+ 'SuppressStartupBanner': 'true',
+ 'TargetEnvironment': '1',
+ 'TypeLibraryName': 'a_file_name',
+ 'UndefinePreprocessorDefinitions': 'string1;string2',
+ 'ValidateParameters': 'true',
+ 'WarnAsError': 'true',
+ 'WarningLevel': '1'},
+ 'VCResourceCompilerTool': {
+ 'AdditionalOptions': 'a string1',
+ 'AdditionalIncludeDirectories': 'folder1;folder2',
+ 'Culture': '1003',
+ 'IgnoreStandardIncludePath': 'true',
+ 'notgood2': 'bogus',
+ 'PreprocessorDefinitions': 'string1;string2',
+ 'ResourceOutputFileName': 'a string1',
+ 'ShowProgress': 'true',
+ 'SuppressStartupBanner': 'true',
+ 'UndefinePreprocessorDefinitions': 'string1;string2'},
+ 'VCLibrarianTool': {
+ 'AdditionalDependencies': 'file1;file2',
+ 'AdditionalLibraryDirectories': 'folder1;folder2',
+ 'AdditionalOptions': 'a string1',
+ 'ExportNamedFunctions': 'string1;string2',
+ 'ForceSymbolReferences': 'a string1',
+ 'IgnoreAllDefaultLibraries': 'true',
+ 'IgnoreSpecificDefaultLibraries': 'file1;file2',
+ 'LinkLibraryDependencies': 'true',
+ 'ModuleDefinitionFile': 'a_file_name',
+ 'OutputFile': 'a_file_name',
+ 'SuppressStartupBanner': 'true',
+ 'UseUnicodeResponseFiles': 'true'},
+ 'VCManifestTool': {
+ 'AdditionalManifestFiles': 'file1;file2',
+ 'AdditionalOptions': 'a string1',
+ 'AssemblyIdentity': 'a string1',
+ 'ComponentFileName': 'a_file_name',
+ 'DependencyInformationFile': 'a_file_name',
+ 'GenerateCatalogFiles': 'true',
+ 'InputResourceManifests': 'a string1',
+ 'ManifestResourceFile': 'a_file_name',
+ 'OutputManifestFile': 'a_file_name',
+ 'RegistrarScriptFile': 'a_file_name',
+ 'ReplacementsFile': 'a_file_name',
+ 'SuppressStartupBanner': 'true',
+ 'TypeLibraryFile': 'a_file_name',
+ 'UpdateFileHashes': 'truel',
+ 'UpdateFileHashesSearchPath': 'a_file_name',
+ 'UseFAT32Workaround': 'true',
+ 'UseUnicodeResponseFiles': 'true',
+ 'VerboseOutput': 'true'}},
+ self.stderr)
+ self._ExpectedWarnings([
+ 'Warning: for VCCLCompilerTool/BasicRuntimeChecks, '
+ 'index value (5) not in expected range [0, 4)',
+ 'Warning: for VCCLCompilerTool/BrowseInformation, '
+ "invalid literal for int() with base 10: 'fdkslj'",
+ 'Warning: for VCCLCompilerTool/CallingConvention, '
+ 'index value (-1) not in expected range [0, 4)',
+ 'Warning: for VCCLCompilerTool/DebugInformationFormat, '
+ 'converted value for 2 not specified.',
+ 'Warning: unrecognized setting VCCLCompilerTool/Enableprefast',
+ 'Warning: unrecognized setting VCCLCompilerTool/ZZXYZ',
+ 'Warning: for VCLinkerTool/TargetMachine, '
+ 'converted value for 2 not specified.',
+ 'Warning: unrecognized setting VCMIDLTool/notgood',
+ 'Warning: unrecognized setting VCResourceCompilerTool/notgood2',
+ 'Warning: for VCManifestTool/UpdateFileHashes, '
+ "expected bool; got 'truel'"
+ ''])
+
+ def testValidateMSBuildSettings_settings(self):
+ """Tests that for invalid MSBuild settings."""
+ MSVSSettings.ValidateMSBuildSettings(
+ {'ClCompile': {
+ 'AdditionalIncludeDirectories': 'folder1;folder2',
+ 'AdditionalOptions': ['string1', 'string2'],
+ 'AdditionalUsingDirectories': 'folder1;folder2',
+ 'AssemblerListingLocation': 'a_file_name',
+ 'AssemblerOutput': 'NoListing',
+ 'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
+ 'BrowseInformation': 'false',
+ 'BrowseInformationFile': 'a_file_name',
+ 'BufferSecurityCheck': 'true',
+ 'BuildingInIDE': 'true',
+ 'CallingConvention': 'Cdecl',
+ 'CompileAs': 'CompileAsC',
+ 'CompileAsManaged': 'true',
+ 'CreateHotpatchableImage': 'true',
+ 'DebugInformationFormat': 'ProgramDatabase',
+ 'DisableLanguageExtensions': 'true',
+ 'DisableSpecificWarnings': 'string1;string2',
+ 'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
+ 'EnableFiberSafeOptimizations': 'true',
+ 'EnablePREfast': 'true',
+ 'Enableprefast': 'bogus',
+ 'ErrorReporting': 'Prompt',
+ 'ExceptionHandling': 'SyncCThrow',
+ 'ExpandAttributedSource': 'true',
+ 'FavorSizeOrSpeed': 'Neither',
+ 'FloatingPointExceptions': 'true',
+ 'FloatingPointModel': 'Precise',
+ 'ForceConformanceInForLoopScope': 'true',
+ 'ForcedIncludeFiles': 'file1;file2',
+ 'ForcedUsingFiles': 'file1;file2',
+ 'FunctionLevelLinking': 'false',
+ 'GenerateXMLDocumentationFiles': 'true',
+ 'IgnoreStandardIncludePath': 'true',
+ 'InlineFunctionExpansion': 'OnlyExplicitInline',
+ 'IntrinsicFunctions': 'false',
+ 'MinimalRebuild': 'true',
+ 'MultiProcessorCompilation': 'true',
+ 'ObjectFileName': 'a_file_name',
+ 'OmitDefaultLibName': 'true',
+ 'OmitFramePointers': 'true',
+ 'OpenMPSupport': 'true',
+ 'Optimization': 'Disabled',
+ 'PrecompiledHeader': 'NotUsing',
+ 'PrecompiledHeaderFile': 'a_file_name',
+ 'PrecompiledHeaderOutputFile': 'a_file_name',
+ 'PreprocessKeepComments': 'true',
+ 'PreprocessorDefinitions': 'string1;string2',
+ 'PreprocessOutputPath': 'a string1',
+ 'PreprocessSuppressLineNumbers': 'false',
+ 'PreprocessToFile': 'false',
+ 'ProcessorNumber': '33',
+ 'ProgramDataBaseFileName': 'a_file_name',
+ 'RuntimeLibrary': 'MultiThreaded',
+ 'RuntimeTypeInfo': 'true',
+ 'ShowIncludes': 'true',
+ 'SmallerTypeCheck': 'true',
+ 'StringPooling': 'true',
+ 'StructMemberAlignment': '1Byte',
+ 'SuppressStartupBanner': 'true',
+ 'TrackerLogDirectory': 'a_folder',
+ 'TreatSpecificWarningsAsErrors': 'string1;string2',
+ 'TreatWarningAsError': 'true',
+ 'TreatWChar_tAsBuiltInType': 'true',
+ 'UndefineAllPreprocessorDefinitions': 'true',
+ 'UndefinePreprocessorDefinitions': 'string1;string2',
+ 'UseFullPaths': 'true',
+ 'UseUnicodeForAssemblerListing': 'true',
+ 'WarningLevel': 'TurnOffAllWarnings',
+ 'WholeProgramOptimization': 'true',
+ 'XMLDocumentationFileName': 'a_file_name',
+ 'ZZXYZ': 'bogus'},
+ 'Link': {
+ 'AdditionalDependencies': 'file1;file2',
+ 'AdditionalLibraryDirectories': 'folder1;folder2',
+ 'AdditionalManifestDependencies': 'file1;file2',
+ 'AdditionalOptions': 'a string1',
+ 'AddModuleNamesToAssembly': 'file1;file2',
+ 'AllowIsolation': 'true',
+ 'AssemblyDebug': '',
+ 'AssemblyLinkResource': 'file1;file2',
+ 'BaseAddress': 'a string1',
+ 'BuildingInIDE': 'true',
+ 'CLRImageType': 'ForceIJWImage',
+ 'CLRSupportLastError': 'Enabled',
+ 'CLRThreadAttribute': 'MTAThreadingAttribute',
+ 'CLRUnmanagedCodeCheck': 'true',
+ 'CreateHotPatchableImage': 'X86Image',
+ 'DataExecutionPrevention': 'false',
+ 'DelayLoadDLLs': 'file1;file2',
+ 'DelaySign': 'true',
+ 'Driver': 'NotSet',
+ 'EmbedManagedResourceFile': 'file1;file2',
+ 'EnableCOMDATFolding': 'false',
+ 'EnableUAC': 'true',
+ 'EntryPointSymbol': 'a string1',
+ 'FixedBaseAddress': 'false',
+ 'ForceFileOutput': 'Enabled',
+ 'ForceSymbolReferences': 'file1;file2',
+ 'FunctionOrder': 'a_file_name',
+ 'GenerateDebugInformation': 'true',
+ 'GenerateMapFile': 'true',
+ 'HeapCommitSize': 'a string1',
+ 'HeapReserveSize': 'a string1',
+ 'IgnoreAllDefaultLibraries': 'true',
+ 'IgnoreEmbeddedIDL': 'true',
+ 'IgnoreSpecificDefaultLibraries': 'a_file_list',
+ 'ImageHasSafeExceptionHandlers': 'true',
+ 'ImportLibrary': 'a_file_name',
+ 'KeyContainer': 'a_file_name',
+ 'KeyFile': 'a_file_name',
+ 'LargeAddressAware': 'false',
+ 'LinkDLL': 'true',
+ 'LinkErrorReporting': 'SendErrorReport',
+ 'LinkStatus': 'true',
+ 'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
+ 'ManifestFile': 'a_file_name',
+ 'MapExports': 'true',
+ 'MapFileName': 'a_file_name',
+ 'MergedIDLBaseFileName': 'a_file_name',
+ 'MergeSections': 'a string1',
+ 'MidlCommandFile': 'a_file_name',
+ 'MinimumRequiredVersion': 'a string1',
+ 'ModuleDefinitionFile': 'a_file_name',
+ 'MSDOSStubFileName': 'a_file_name',
+ 'NoEntryPoint': 'true',
+ 'OptimizeReferences': 'false',
+ 'OutputFile': 'a_file_name',
+ 'PerUserRedirection': 'true',
+ 'PreventDllBinding': 'true',
+ 'Profile': 'true',
+ 'ProfileGuidedDatabase': 'a_file_name',
+ 'ProgramDatabaseFile': 'a_file_name',
+ 'RandomizedBaseAddress': 'false',
+ 'RegisterOutput': 'true',
+ 'SectionAlignment': '33',
+ 'SetChecksum': 'true',
+ 'ShowProgress': 'LinkVerboseREF',
+ 'SpecifySectionAttributes': 'a string1',
+ 'StackCommitSize': 'a string1',
+ 'StackReserveSize': 'a string1',
+ 'StripPrivateSymbols': 'a_file_name',
+ 'SubSystem': 'Console',
+ 'SupportNobindOfDelayLoadedDLL': 'true',
+ 'SupportUnloadOfDelayLoadedDLL': 'true',
+ 'SuppressStartupBanner': 'true',
+ 'SwapRunFromCD': 'true',
+ 'SwapRunFromNET': 'true',
+ 'TargetMachine': 'MachineX86',
+ 'TerminalServerAware': 'false',
+ 'TrackerLogDirectory': 'a_folder',
+ 'TreatLinkerWarningAsErrors': 'true',
+ 'TurnOffAssemblyGeneration': 'true',
+ 'TypeLibraryFile': 'a_file_name',
+ 'TypeLibraryResourceID': '33',
+ 'UACExecutionLevel': 'AsInvoker',
+ 'UACUIAccess': 'true',
+ 'Version': 'a string1'},
+ 'ResourceCompile': {
+ 'AdditionalIncludeDirectories': 'folder1;folder2',
+ 'AdditionalOptions': 'a string1',
+ 'Culture': '0x236',
+ 'IgnoreStandardIncludePath': 'true',
+ 'NullTerminateStrings': 'true',
+ 'PreprocessorDefinitions': 'string1;string2',
+ 'ResourceOutputFileName': 'a string1',
+ 'ShowProgress': 'true',
+ 'SuppressStartupBanner': 'true',
+ 'TrackerLogDirectory': 'a_folder',
+ 'UndefinePreprocessorDefinitions': 'string1;string2'},
+ 'Midl': {
+ 'AdditionalIncludeDirectories': 'folder1;folder2',
+ 'AdditionalOptions': 'a string1',
+ 'ApplicationConfigurationMode': 'true',
+ 'ClientStubFile': 'a_file_name',
+ 'CPreprocessOptions': 'a string1',
+ 'DefaultCharType': 'Signed',
+ 'DllDataFileName': 'a_file_name',
+ 'EnableErrorChecks': 'EnableCustom',
+ 'ErrorCheckAllocations': 'true',
+ 'ErrorCheckBounds': 'true',
+ 'ErrorCheckEnumRange': 'true',
+ 'ErrorCheckRefPointers': 'true',
+ 'ErrorCheckStubData': 'true',
+ 'GenerateClientFiles': 'Stub',
+ 'GenerateServerFiles': 'None',
+ 'GenerateStublessProxies': 'true',
+ 'GenerateTypeLibrary': 'true',
+ 'HeaderFileName': 'a_file_name',
+ 'IgnoreStandardIncludePath': 'true',
+ 'InterfaceIdentifierFileName': 'a_file_name',
+ 'LocaleID': '33',
+ 'MkTypLibCompatible': 'true',
+ 'OutputDirectory': 'a string1',
+ 'PreprocessorDefinitions': 'string1;string2',
+ 'ProxyFileName': 'a_file_name',
+ 'RedirectOutputAndErrors': 'a_file_name',
+ 'ServerStubFile': 'a_file_name',
+ 'StructMemberAlignment': 'NotSet',
+ 'SuppressCompilerWarnings': 'true',
+ 'SuppressStartupBanner': 'true',
+ 'TargetEnvironment': 'Itanium',
+ 'TrackerLogDirectory': 'a_folder',
+ 'TypeLibFormat': 'NewFormat',
+ 'TypeLibraryName': 'a_file_name',
+ 'UndefinePreprocessorDefinitions': 'string1;string2',
+ 'ValidateAllParameters': 'true',
+ 'WarnAsError': 'true',
+ 'WarningLevel': '1'},
+ 'Lib': {
+ 'AdditionalDependencies': 'file1;file2',
+ 'AdditionalLibraryDirectories': 'folder1;folder2',
+ 'AdditionalOptions': 'a string1',
+ 'DisplayLibrary': 'a string1',
+ 'ErrorReporting': 'PromptImmediately',
+ 'ExportNamedFunctions': 'string1;string2',
+ 'ForceSymbolReferences': 'a string1',
+ 'IgnoreAllDefaultLibraries': 'true',
+ 'IgnoreSpecificDefaultLibraries': 'file1;file2',
+ 'LinkTimeCodeGeneration': 'true',
+ 'MinimumRequiredVersion': 'a string1',
+ 'ModuleDefinitionFile': 'a_file_name',
+ 'Name': 'a_file_name',
+ 'OutputFile': 'a_file_name',
+ 'RemoveObjects': 'file1;file2',
+ 'SubSystem': 'Console',
+ 'SuppressStartupBanner': 'true',
+ 'TargetMachine': 'MachineX86i',
+ 'TrackerLogDirectory': 'a_folder',
+ 'TreatLibWarningAsErrors': 'true',
+ 'UseUnicodeResponseFiles': 'true',
+ 'Verbose': 'true'},
+ 'Manifest': {
+ 'AdditionalManifestFiles': 'file1;file2',
+ 'AdditionalOptions': 'a string1',
+ 'AssemblyIdentity': 'a string1',
+ 'ComponentFileName': 'a_file_name',
+ 'EnableDPIAwareness': 'fal',
+ 'GenerateCatalogFiles': 'truel',
+ 'GenerateCategoryTags': 'true',
+ 'InputResourceManifests': 'a string1',
+ 'ManifestFromManagedAssembly': 'a_file_name',
+ 'notgood3': 'bogus',
+ 'OutputManifestFile': 'a_file_name',
+ 'OutputResourceManifests': 'a string1',
+ 'RegistrarScriptFile': 'a_file_name',
+ 'ReplacementsFile': 'a_file_name',
+ 'SuppressDependencyElement': 'true',
+ 'SuppressStartupBanner': 'true',
+ 'TrackerLogDirectory': 'a_folder',
+ 'TypeLibraryFile': 'a_file_name',
+ 'UpdateFileHashes': 'true',
+ 'UpdateFileHashesSearchPath': 'a_file_name',
+ 'VerboseOutput': 'true'},
+ 'ProjectReference': {
+ 'LinkLibraryDependencies': 'true',
+ 'UseLibraryDependencyInputs': 'true'},
+ 'ManifestResourceCompile': {
+ 'ResourceOutputFileName': 'a_file_name'},
+ '': {
+ 'EmbedManifest': 'true',
+ 'GenerateManifest': 'true',
+ 'IgnoreImportLibrary': 'true',
+ 'LinkIncremental': 'false'}},
+ self.stderr)
+ self._ExpectedWarnings([
+ 'Warning: unrecognized setting ClCompile/Enableprefast',
+ 'Warning: unrecognized setting ClCompile/ZZXYZ',
+ 'Warning: unrecognized setting Manifest/notgood3',
+ 'Warning: for Manifest/GenerateCatalogFiles, '
+ "expected bool; got 'truel'",
+ 'Warning: for Lib/TargetMachine, unrecognized enumerated value '
+ 'MachineX86i',
+ "Warning: for Manifest/EnableDPIAwareness, expected bool; got 'fal'"])
+
+ def testConvertToMSBuildSettings_empty(self):
+ """Tests an empty conversion."""
+ msvs_settings = {}
+ expected_msbuild_settings = {}
+ actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
+ msvs_settings,
+ self.stderr)
+ self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
+ self._ExpectedWarnings([])
+
+ def testConvertToMSBuildSettings_minimal(self):
+ """Tests a minimal conversion."""
+ msvs_settings = {
+ 'VCCLCompilerTool': {
+ 'AdditionalIncludeDirectories': 'dir1',
+ 'AdditionalOptions': '/foo',
+ 'BasicRuntimeChecks': '0',
+ },
+ 'VCLinkerTool': {
+ 'LinkTimeCodeGeneration': '1',
+ 'ErrorReporting': '1',
+ 'DataExecutionPrevention': '2',
+ },
+ }
+ expected_msbuild_settings = {
+ 'ClCompile': {
+ 'AdditionalIncludeDirectories': 'dir1',
+ 'AdditionalOptions': '/foo',
+ 'BasicRuntimeChecks': 'Default',
+ },
+ 'Link': {
+ 'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
+ 'LinkErrorReporting': 'PromptImmediately',
+ 'DataExecutionPrevention': 'true',
+ },
+ }
+ actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
+ msvs_settings,
+ self.stderr)
+ self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
+ self._ExpectedWarnings([])
+
+ def testConvertToMSBuildSettings_warnings(self):
+ """Tests conversion that generates warnings."""
+ msvs_settings = {
+ 'VCCLCompilerTool': {
+ 'AdditionalIncludeDirectories': '1',
+ 'AdditionalOptions': '2',
+ # These are incorrect values:
+ 'BasicRuntimeChecks': '12',
+ 'BrowseInformation': '21',
+ 'UsePrecompiledHeader': '13',
+ 'GeneratePreprocessedFile': '14'},
+ 'VCLinkerTool': {
+ # These are incorrect values:
+ 'Driver': '10',
+ 'LinkTimeCodeGeneration': '31',
+ 'ErrorReporting': '21',
+ 'FixedBaseAddress': '6'},
+ 'VCResourceCompilerTool': {
+ # Custom
+ 'Culture': '1003'}}
+ expected_msbuild_settings = {
+ 'ClCompile': {
+ 'AdditionalIncludeDirectories': '1',
+ 'AdditionalOptions': '2'},
+ 'Link': {},
+ 'ResourceCompile': {
+ # Custom
+ 'Culture': '0x03eb'}}
+ actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
+ msvs_settings,
+ self.stderr)
+ self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
+ self._ExpectedWarnings([
+ 'Warning: while converting VCCLCompilerTool/BasicRuntimeChecks to '
+ 'MSBuild, index value (12) not in expected range [0, 4)',
+ 'Warning: while converting VCCLCompilerTool/BrowseInformation to '
+ 'MSBuild, index value (21) not in expected range [0, 3)',
+ 'Warning: while converting VCCLCompilerTool/UsePrecompiledHeader to '
+ 'MSBuild, index value (13) not in expected range [0, 3)',
+ 'Warning: while converting VCCLCompilerTool/GeneratePreprocessedFile to '
+ 'MSBuild, value must be one of [0, 1, 2]; got 14',
+
+ 'Warning: while converting VCLinkerTool/Driver to '
+ 'MSBuild, index value (10) not in expected range [0, 4)',
+ 'Warning: while converting VCLinkerTool/LinkTimeCodeGeneration to '
+ 'MSBuild, index value (31) not in expected range [0, 5)',
+ 'Warning: while converting VCLinkerTool/ErrorReporting to '
+ 'MSBuild, index value (21) not in expected range [0, 3)',
+ 'Warning: while converting VCLinkerTool/FixedBaseAddress to '
+ 'MSBuild, index value (6) not in expected range [0, 3)',
+ ])
+
+ def testConvertToMSBuildSettings_full_synthetic(self):
+ """Tests conversion of all the MSBuild settings."""
+ msvs_settings = {
+ 'VCCLCompilerTool': {
+ 'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
+ 'AdditionalOptions': 'a_string',
+ 'AdditionalUsingDirectories': 'folder1;folder2;folder3',
+ 'AssemblerListingLocation': 'a_file_name',
+ 'AssemblerOutput': '0',
+ 'BasicRuntimeChecks': '1',
+ 'BrowseInformation': '2',
+ 'BrowseInformationFile': 'a_file_name',
+ 'BufferSecurityCheck': 'true',
+ 'CallingConvention': '0',
+ 'CompileAs': '1',
+ 'DebugInformationFormat': '4',
+ 'DefaultCharIsUnsigned': 'true',
+ 'Detect64BitPortabilityProblems': 'true',
+ 'DisableLanguageExtensions': 'true',
+ 'DisableSpecificWarnings': 'd1;d2;d3',
+ 'EnableEnhancedInstructionSet': '0',
+ 'EnableFiberSafeOptimizations': 'true',
+ 'EnableFunctionLevelLinking': 'true',
+ 'EnableIntrinsicFunctions': 'true',
+ 'EnablePREfast': 'true',
+ 'ErrorReporting': '1',
+ 'ExceptionHandling': '2',
+ 'ExpandAttributedSource': 'true',
+ 'FavorSizeOrSpeed': '0',
+ 'FloatingPointExceptions': 'true',
+ 'FloatingPointModel': '1',
+ 'ForceConformanceInForLoopScope': 'true',
+ 'ForcedIncludeFiles': 'file1;file2;file3',
+ 'ForcedUsingFiles': 'file1;file2;file3',
+ 'GeneratePreprocessedFile': '1',
+ 'GenerateXMLDocumentationFiles': 'true',
+ 'IgnoreStandardIncludePath': 'true',
+ 'InlineFunctionExpansion': '2',
+ 'KeepComments': 'true',
+ 'MinimalRebuild': 'true',
+ 'ObjectFile': 'a_file_name',
+ 'OmitDefaultLibName': 'true',
+ 'OmitFramePointers': 'true',
+ 'OpenMP': 'true',
+ 'Optimization': '3',
+ 'PrecompiledHeaderFile': 'a_file_name',
+ 'PrecompiledHeaderThrough': 'a_file_name',
+ 'PreprocessorDefinitions': 'd1;d2;d3',
+ 'ProgramDataBaseFileName': 'a_file_name',
+ 'RuntimeLibrary': '0',
+ 'RuntimeTypeInfo': 'true',
+ 'ShowIncludes': 'true',
+ 'SmallerTypeCheck': 'true',
+ 'StringPooling': 'true',
+ 'StructMemberAlignment': '1',
+ 'SuppressStartupBanner': 'true',
+ 'TreatWChar_tAsBuiltInType': 'true',
+ 'UndefineAllPreprocessorDefinitions': 'true',
+ 'UndefinePreprocessorDefinitions': 'd1;d2;d3',
+ 'UseFullPaths': 'true',
+ 'UsePrecompiledHeader': '1',
+ 'UseUnicodeResponseFiles': 'true',
+ 'WarnAsError': 'true',
+ 'WarningLevel': '2',
+ 'WholeProgramOptimization': 'true',
+ 'XMLDocumentationFileName': 'a_file_name'},
+ 'VCLinkerTool': {
+ 'AdditionalDependencies': 'file1;file2;file3',
+ 'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
+ 'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
+ 'AdditionalManifestDependencies': 'file1;file2;file3',
+ 'AdditionalOptions': 'a_string',
+ 'AddModuleNamesToAssembly': 'file1;file2;file3',
+ 'AllowIsolation': 'true',
+ 'AssemblyDebug': '0',
+ 'AssemblyLinkResource': 'file1;file2;file3',
+ 'BaseAddress': 'a_string',
+ 'CLRImageType': '1',
+ 'CLRThreadAttribute': '2',
+ 'CLRUnmanagedCodeCheck': 'true',
+ 'DataExecutionPrevention': '0',
+ 'DelayLoadDLLs': 'file1;file2;file3',
+ 'DelaySign': 'true',
+ 'Driver': '1',
+ 'EmbedManagedResourceFile': 'file1;file2;file3',
+ 'EnableCOMDATFolding': '0',
+ 'EnableUAC': 'true',
+ 'EntryPointSymbol': 'a_string',
+ 'ErrorReporting': '0',
+ 'FixedBaseAddress': '1',
+ 'ForceSymbolReferences': 'file1;file2;file3',
+ 'FunctionOrder': 'a_file_name',
+ 'GenerateDebugInformation': 'true',
+ 'GenerateManifest': 'true',
+ 'GenerateMapFile': 'true',
+ 'HeapCommitSize': 'a_string',
+ 'HeapReserveSize': 'a_string',
+ 'IgnoreAllDefaultLibraries': 'true',
+ 'IgnoreDefaultLibraryNames': 'file1;file2;file3',
+ 'IgnoreEmbeddedIDL': 'true',
+ 'IgnoreImportLibrary': 'true',
+ 'ImportLibrary': 'a_file_name',
+ 'KeyContainer': 'a_file_name',
+ 'KeyFile': 'a_file_name',
+ 'LargeAddressAware': '2',
+ 'LinkIncremental': '1',
+ 'LinkLibraryDependencies': 'true',
+ 'LinkTimeCodeGeneration': '2',
+ 'ManifestFile': 'a_file_name',
+ 'MapExports': 'true',
+ 'MapFileName': 'a_file_name',
+ 'MergedIDLBaseFileName': 'a_file_name',
+ 'MergeSections': 'a_string',
+ 'MidlCommandFile': 'a_file_name',
+ 'ModuleDefinitionFile': 'a_file_name',
+ 'OptimizeForWindows98': '1',
+ 'OptimizeReferences': '0',
+ 'OutputFile': 'a_file_name',
+ 'PerUserRedirection': 'true',
+ 'Profile': 'true',
+ 'ProfileGuidedDatabase': 'a_file_name',
+ 'ProgramDatabaseFile': 'a_file_name',
+ 'RandomizedBaseAddress': '1',
+ 'RegisterOutput': 'true',
+ 'ResourceOnlyDLL': 'true',
+ 'SetChecksum': 'true',
+ 'ShowProgress': '0',
+ 'StackCommitSize': 'a_string',
+ 'StackReserveSize': 'a_string',
+ 'StripPrivateSymbols': 'a_file_name',
+ 'SubSystem': '2',
+ 'SupportUnloadOfDelayLoadedDLL': 'true',
+ 'SuppressStartupBanner': 'true',
+ 'SwapRunFromCD': 'true',
+ 'SwapRunFromNet': 'true',
+ 'TargetMachine': '3',
+ 'TerminalServerAware': '2',
+ 'TurnOffAssemblyGeneration': 'true',
+ 'TypeLibraryFile': 'a_file_name',
+ 'TypeLibraryResourceID': '33',
+ 'UACExecutionLevel': '1',
+ 'UACUIAccess': 'true',
+ 'UseLibraryDependencyInputs': 'false',
+ 'UseUnicodeResponseFiles': 'true',
+ 'Version': 'a_string'},
+ 'VCResourceCompilerTool': {
+ 'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
+ 'AdditionalOptions': 'a_string',
+ 'Culture': '1003',
+ 'IgnoreStandardIncludePath': 'true',
+ 'PreprocessorDefinitions': 'd1;d2;d3',
+ 'ResourceOutputFileName': 'a_string',
+ 'ShowProgress': 'true',
+ 'SuppressStartupBanner': 'true',
+ 'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
+ 'VCMIDLTool': {
+ 'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
+ 'AdditionalOptions': 'a_string',
+ 'CPreprocessOptions': 'a_string',
+ 'DefaultCharType': '0',
+ 'DLLDataFileName': 'a_file_name',
+ 'EnableErrorChecks': '2',
+ 'ErrorCheckAllocations': 'true',
+ 'ErrorCheckBounds': 'true',
+ 'ErrorCheckEnumRange': 'true',
+ 'ErrorCheckRefPointers': 'true',
+ 'ErrorCheckStubData': 'true',
+ 'GenerateStublessProxies': 'true',
+ 'GenerateTypeLibrary': 'true',
+ 'HeaderFileName': 'a_file_name',
+ 'IgnoreStandardIncludePath': 'true',
+ 'InterfaceIdentifierFileName': 'a_file_name',
+ 'MkTypLibCompatible': 'true',
+ 'OutputDirectory': 'a_string',
+ 'PreprocessorDefinitions': 'd1;d2;d3',
+ 'ProxyFileName': 'a_file_name',
+ 'RedirectOutputAndErrors': 'a_file_name',
+ 'StructMemberAlignment': '3',
+ 'SuppressStartupBanner': 'true',
+ 'TargetEnvironment': '1',
+ 'TypeLibraryName': 'a_file_name',
+ 'UndefinePreprocessorDefinitions': 'd1;d2;d3',
+ 'ValidateParameters': 'true',
+ 'WarnAsError': 'true',
+ 'WarningLevel': '4'},
+ 'VCLibrarianTool': {
+ 'AdditionalDependencies': 'file1;file2;file3',
+ 'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
+ 'AdditionalLibraryDirectories_excluded': 'folder1;folder2;folder3',
+ 'AdditionalOptions': 'a_string',
+ 'ExportNamedFunctions': 'd1;d2;d3',
+ 'ForceSymbolReferences': 'a_string',
+ 'IgnoreAllDefaultLibraries': 'true',
+ 'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
+ 'LinkLibraryDependencies': 'true',
+ 'ModuleDefinitionFile': 'a_file_name',
+ 'OutputFile': 'a_file_name',
+ 'SuppressStartupBanner': 'true',
+ 'UseUnicodeResponseFiles': 'true'},
+ 'VCManifestTool': {
+ 'AdditionalManifestFiles': 'file1;file2;file3',
+ 'AdditionalOptions': 'a_string',
+ 'AssemblyIdentity': 'a_string',
+ 'ComponentFileName': 'a_file_name',
+ 'DependencyInformationFile': 'a_file_name',
+ 'EmbedManifest': 'true',
+ 'GenerateCatalogFiles': 'true',
+ 'InputResourceManifests': 'a_string',
+ 'ManifestResourceFile': 'my_name',
+ 'OutputManifestFile': 'a_file_name',
+ 'RegistrarScriptFile': 'a_file_name',
+ 'ReplacementsFile': 'a_file_name',
+ 'SuppressStartupBanner': 'true',
+ 'TypeLibraryFile': 'a_file_name',
+ 'UpdateFileHashes': 'true',
+ 'UpdateFileHashesSearchPath': 'a_file_name',
+ 'UseFAT32Workaround': 'true',
+ 'UseUnicodeResponseFiles': 'true',
+ 'VerboseOutput': 'true'}}
+ expected_msbuild_settings = {
+ 'ClCompile': {
+ 'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
+ 'AdditionalOptions': 'a_string /J',
+ 'AdditionalUsingDirectories': 'folder1;folder2;folder3',
+ 'AssemblerListingLocation': 'a_file_name',
+ 'AssemblerOutput': 'NoListing',
+ 'BasicRuntimeChecks': 'StackFrameRuntimeCheck',
+ 'BrowseInformation': 'true',
+ 'BrowseInformationFile': 'a_file_name',
+ 'BufferSecurityCheck': 'true',
+ 'CallingConvention': 'Cdecl',
+ 'CompileAs': 'CompileAsC',
+ 'DebugInformationFormat': 'EditAndContinue',
+ 'DisableLanguageExtensions': 'true',
+ 'DisableSpecificWarnings': 'd1;d2;d3',
+ 'EnableEnhancedInstructionSet': 'NotSet',
+ 'EnableFiberSafeOptimizations': 'true',
+ 'EnablePREfast': 'true',
+ 'ErrorReporting': 'Prompt',
+ 'ExceptionHandling': 'Async',
+ 'ExpandAttributedSource': 'true',
+ 'FavorSizeOrSpeed': 'Neither',
+ 'FloatingPointExceptions': 'true',
+ 'FloatingPointModel': 'Strict',
+ 'ForceConformanceInForLoopScope': 'true',
+ 'ForcedIncludeFiles': 'file1;file2;file3',
+ 'ForcedUsingFiles': 'file1;file2;file3',
+ 'FunctionLevelLinking': 'true',
+ 'GenerateXMLDocumentationFiles': 'true',
+ 'IgnoreStandardIncludePath': 'true',
+ 'InlineFunctionExpansion': 'AnySuitable',
+ 'IntrinsicFunctions': 'true',
+ 'MinimalRebuild': 'true',
+ 'ObjectFileName': 'a_file_name',
+ 'OmitDefaultLibName': 'true',
+ 'OmitFramePointers': 'true',
+ 'OpenMPSupport': 'true',
+ 'Optimization': 'Full',
+ 'PrecompiledHeader': 'Create',
+ 'PrecompiledHeaderFile': 'a_file_name',
+ 'PrecompiledHeaderOutputFile': 'a_file_name',
+ 'PreprocessKeepComments': 'true',
+ 'PreprocessorDefinitions': 'd1;d2;d3',
+ 'PreprocessSuppressLineNumbers': 'false',
+ 'PreprocessToFile': 'true',
+ 'ProgramDataBaseFileName': 'a_file_name',
+ 'RuntimeLibrary': 'MultiThreaded',
+ 'RuntimeTypeInfo': 'true',
+ 'ShowIncludes': 'true',
+ 'SmallerTypeCheck': 'true',
+ 'StringPooling': 'true',
+ 'StructMemberAlignment': '1Byte',
+ 'SuppressStartupBanner': 'true',
+ 'TreatWarningAsError': 'true',
+ 'TreatWChar_tAsBuiltInType': 'true',
+ 'UndefineAllPreprocessorDefinitions': 'true',
+ 'UndefinePreprocessorDefinitions': 'd1;d2;d3',
+ 'UseFullPaths': 'true',
+ 'WarningLevel': 'Level2',
+ 'WholeProgramOptimization': 'true',
+ 'XMLDocumentationFileName': 'a_file_name'},
+ 'Link': {
+ 'AdditionalDependencies': 'file1;file2;file3',
+ 'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
+ 'AdditionalManifestDependencies': 'file1;file2;file3',
+ 'AdditionalOptions': 'a_string',
+ 'AddModuleNamesToAssembly': 'file1;file2;file3',
+ 'AllowIsolation': 'true',
+ 'AssemblyDebug': '',
+ 'AssemblyLinkResource': 'file1;file2;file3',
+ 'BaseAddress': 'a_string',
+ 'CLRImageType': 'ForceIJWImage',
+ 'CLRThreadAttribute': 'STAThreadingAttribute',
+ 'CLRUnmanagedCodeCheck': 'true',
+ 'DataExecutionPrevention': '',
+ 'DelayLoadDLLs': 'file1;file2;file3',
+ 'DelaySign': 'true',
+ 'Driver': 'Driver',
+ 'EmbedManagedResourceFile': 'file1;file2;file3',
+ 'EnableCOMDATFolding': '',
+ 'EnableUAC': 'true',
+ 'EntryPointSymbol': 'a_string',
+ 'FixedBaseAddress': 'false',
+ 'ForceSymbolReferences': 'file1;file2;file3',
+ 'FunctionOrder': 'a_file_name',
+ 'GenerateDebugInformation': 'true',
+ 'GenerateMapFile': 'true',
+ 'HeapCommitSize': 'a_string',
+ 'HeapReserveSize': 'a_string',
+ 'IgnoreAllDefaultLibraries': 'true',
+ 'IgnoreEmbeddedIDL': 'true',
+ 'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
+ 'ImportLibrary': 'a_file_name',
+ 'KeyContainer': 'a_file_name',
+ 'KeyFile': 'a_file_name',
+ 'LargeAddressAware': 'true',
+ 'LinkErrorReporting': 'NoErrorReport',
+ 'LinkTimeCodeGeneration': 'PGInstrument',
+ 'ManifestFile': 'a_file_name',
+ 'MapExports': 'true',
+ 'MapFileName': 'a_file_name',
+ 'MergedIDLBaseFileName': 'a_file_name',
+ 'MergeSections': 'a_string',
+ 'MidlCommandFile': 'a_file_name',
+ 'ModuleDefinitionFile': 'a_file_name',
+ 'NoEntryPoint': 'true',
+ 'OptimizeReferences': '',
+ 'OutputFile': 'a_file_name',
+ 'PerUserRedirection': 'true',
+ 'Profile': 'true',
+ 'ProfileGuidedDatabase': 'a_file_name',
+ 'ProgramDatabaseFile': 'a_file_name',
+ 'RandomizedBaseAddress': 'false',
+ 'RegisterOutput': 'true',
+ 'SetChecksum': 'true',
+ 'ShowProgress': 'NotSet',
+ 'StackCommitSize': 'a_string',
+ 'StackReserveSize': 'a_string',
+ 'StripPrivateSymbols': 'a_file_name',
+ 'SubSystem': 'Windows',
+ 'SupportUnloadOfDelayLoadedDLL': 'true',
+ 'SuppressStartupBanner': 'true',
+ 'SwapRunFromCD': 'true',
+ 'SwapRunFromNET': 'true',
+ 'TargetMachine': 'MachineARM',
+ 'TerminalServerAware': 'true',
+ 'TurnOffAssemblyGeneration': 'true',
+ 'TypeLibraryFile': 'a_file_name',
+ 'TypeLibraryResourceID': '33',
+ 'UACExecutionLevel': 'HighestAvailable',
+ 'UACUIAccess': 'true',
+ 'Version': 'a_string'},
+ 'ResourceCompile': {
+ 'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
+ 'AdditionalOptions': 'a_string',
+ 'Culture': '0x03eb',
+ 'IgnoreStandardIncludePath': 'true',
+ 'PreprocessorDefinitions': 'd1;d2;d3',
+ 'ResourceOutputFileName': 'a_string',
+ 'ShowProgress': 'true',
+ 'SuppressStartupBanner': 'true',
+ 'UndefinePreprocessorDefinitions': 'd1;d2;d3'},
+ 'Midl': {
+ 'AdditionalIncludeDirectories': 'folder1;folder2;folder3',
+ 'AdditionalOptions': 'a_string',
+ 'CPreprocessOptions': 'a_string',
+ 'DefaultCharType': 'Unsigned',
+ 'DllDataFileName': 'a_file_name',
+ 'EnableErrorChecks': 'All',
+ 'ErrorCheckAllocations': 'true',
+ 'ErrorCheckBounds': 'true',
+ 'ErrorCheckEnumRange': 'true',
+ 'ErrorCheckRefPointers': 'true',
+ 'ErrorCheckStubData': 'true',
+ 'GenerateStublessProxies': 'true',
+ 'GenerateTypeLibrary': 'true',
+ 'HeaderFileName': 'a_file_name',
+ 'IgnoreStandardIncludePath': 'true',
+ 'InterfaceIdentifierFileName': 'a_file_name',
+ 'MkTypLibCompatible': 'true',
+ 'OutputDirectory': 'a_string',
+ 'PreprocessorDefinitions': 'd1;d2;d3',
+ 'ProxyFileName': 'a_file_name',
+ 'RedirectOutputAndErrors': 'a_file_name',
+ 'StructMemberAlignment': '4',
+ 'SuppressStartupBanner': 'true',
+ 'TargetEnvironment': 'Win32',
+ 'TypeLibraryName': 'a_file_name',
+ 'UndefinePreprocessorDefinitions': 'd1;d2;d3',
+ 'ValidateAllParameters': 'true',
+ 'WarnAsError': 'true',
+ 'WarningLevel': '4'},
+ 'Lib': {
+ 'AdditionalDependencies': 'file1;file2;file3',
+ 'AdditionalLibraryDirectories': 'folder1;folder2;folder3',
+ 'AdditionalOptions': 'a_string',
+ 'ExportNamedFunctions': 'd1;d2;d3',
+ 'ForceSymbolReferences': 'a_string',
+ 'IgnoreAllDefaultLibraries': 'true',
+ 'IgnoreSpecificDefaultLibraries': 'file1;file2;file3',
+ 'ModuleDefinitionFile': 'a_file_name',
+ 'OutputFile': 'a_file_name',
+ 'SuppressStartupBanner': 'true',
+ 'UseUnicodeResponseFiles': 'true'},
+ 'Manifest': {
+ 'AdditionalManifestFiles': 'file1;file2;file3',
+ 'AdditionalOptions': 'a_string',
+ 'AssemblyIdentity': 'a_string',
+ 'ComponentFileName': 'a_file_name',
+ 'GenerateCatalogFiles': 'true',
+ 'InputResourceManifests': 'a_string',
+ 'OutputManifestFile': 'a_file_name',
+ 'RegistrarScriptFile': 'a_file_name',
+ 'ReplacementsFile': 'a_file_name',
+ 'SuppressStartupBanner': 'true',
+ 'TypeLibraryFile': 'a_file_name',
+ 'UpdateFileHashes': 'true',
+ 'UpdateFileHashesSearchPath': 'a_file_name',
+ 'VerboseOutput': 'true'},
+ 'ManifestResourceCompile': {
+ 'ResourceOutputFileName': 'my_name'},
+ 'ProjectReference': {
+ 'LinkLibraryDependencies': 'true',
+ 'UseLibraryDependencyInputs': 'false'},
+ '': {
+ 'EmbedManifest': 'true',
+ 'GenerateManifest': 'true',
+ 'IgnoreImportLibrary': 'true',
+ 'LinkIncremental': 'false'}}
+ actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
+ msvs_settings,
+ self.stderr)
+ self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
+ self._ExpectedWarnings([])
+
+ def testConvertToMSBuildSettings_actual(self):
+ """Tests the conversion of an actual project.
+
+ A VS2008 project with most of the options defined was created through the
+ VS2008 IDE. It was then converted to VS2010. The tool settings found in
+ the .vcproj and .vcxproj files were converted to the two dictionaries
+ msvs_settings and expected_msbuild_settings.
+
+ Note that for many settings, the VS2010 converter adds macros like
+ %(AdditionalIncludeDirectories) to make sure than inherited values are
+ included. Since the Gyp projects we generate do not use inheritance,
+ we removed these macros. They were:
+ ClCompile:
+ AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)'
+ AdditionalOptions: ' %(AdditionalOptions)'
+ AdditionalUsingDirectories: ';%(AdditionalUsingDirectories)'
+ DisableSpecificWarnings: ';%(DisableSpecificWarnings)',
+ ForcedIncludeFiles: ';%(ForcedIncludeFiles)',
+ ForcedUsingFiles: ';%(ForcedUsingFiles)',
+ PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
+ UndefinePreprocessorDefinitions:
+ ';%(UndefinePreprocessorDefinitions)',
+ Link:
+ AdditionalDependencies: ';%(AdditionalDependencies)',
+ AdditionalLibraryDirectories: ';%(AdditionalLibraryDirectories)',
+ AdditionalManifestDependencies:
+ ';%(AdditionalManifestDependencies)',
+ AdditionalOptions: ' %(AdditionalOptions)',
+ AddModuleNamesToAssembly: ';%(AddModuleNamesToAssembly)',
+ AssemblyLinkResource: ';%(AssemblyLinkResource)',
+ DelayLoadDLLs: ';%(DelayLoadDLLs)',
+ EmbedManagedResourceFile: ';%(EmbedManagedResourceFile)',
+ ForceSymbolReferences: ';%(ForceSymbolReferences)',
+ IgnoreSpecificDefaultLibraries:
+ ';%(IgnoreSpecificDefaultLibraries)',
+ ResourceCompile:
+ AdditionalIncludeDirectories: ';%(AdditionalIncludeDirectories)',
+ AdditionalOptions: ' %(AdditionalOptions)',
+ PreprocessorDefinitions: ';%(PreprocessorDefinitions)',
+ Manifest:
+ AdditionalManifestFiles: ';%(AdditionalManifestFiles)',
+ AdditionalOptions: ' %(AdditionalOptions)',
+ InputResourceManifests: ';%(InputResourceManifests)',
+ """
+ msvs_settings = {
+ 'VCCLCompilerTool': {
+ 'AdditionalIncludeDirectories': 'dir1',
+ 'AdditionalOptions': '/more',
+ 'AdditionalUsingDirectories': 'test',
+ 'AssemblerListingLocation': '$(IntDir)\\a',
+ 'AssemblerOutput': '1',
+ 'BasicRuntimeChecks': '3',
+ 'BrowseInformation': '1',
+ 'BrowseInformationFile': '$(IntDir)\\e',
+ 'BufferSecurityCheck': 'false',
+ 'CallingConvention': '1',
+ 'CompileAs': '1',
+ 'DebugInformationFormat': '4',
+ 'DefaultCharIsUnsigned': 'true',
+ 'Detect64BitPortabilityProblems': 'true',
+ 'DisableLanguageExtensions': 'true',
+ 'DisableSpecificWarnings': 'abc',
+ 'EnableEnhancedInstructionSet': '1',
+ 'EnableFiberSafeOptimizations': 'true',
+ 'EnableFunctionLevelLinking': 'true',
+ 'EnableIntrinsicFunctions': 'true',
+ 'EnablePREfast': 'true',
+ 'ErrorReporting': '2',
+ 'ExceptionHandling': '2',
+ 'ExpandAttributedSource': 'true',
+ 'FavorSizeOrSpeed': '2',
+ 'FloatingPointExceptions': 'true',
+ 'FloatingPointModel': '1',
+ 'ForceConformanceInForLoopScope': 'false',
+ 'ForcedIncludeFiles': 'def',
+ 'ForcedUsingFiles': 'ge',
+ 'GeneratePreprocessedFile': '2',
+ 'GenerateXMLDocumentationFiles': 'true',
+ 'IgnoreStandardIncludePath': 'true',
+ 'InlineFunctionExpansion': '1',
+ 'KeepComments': 'true',
+ 'MinimalRebuild': 'true',
+ 'ObjectFile': '$(IntDir)\\b',
+ 'OmitDefaultLibName': 'true',
+ 'OmitFramePointers': 'true',
+ 'OpenMP': 'true',
+ 'Optimization': '3',
+ 'PrecompiledHeaderFile': '$(IntDir)\\$(TargetName).pche',
+ 'PrecompiledHeaderThrough': 'StdAfx.hd',
+ 'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
+ 'ProgramDataBaseFileName': '$(IntDir)\\vc90b.pdb',
+ 'RuntimeLibrary': '3',
+ 'RuntimeTypeInfo': 'false',
+ 'ShowIncludes': 'true',
+ 'SmallerTypeCheck': 'true',
+ 'StringPooling': 'true',
+ 'StructMemberAlignment': '3',
+ 'SuppressStartupBanner': 'false',
+ 'TreatWChar_tAsBuiltInType': 'false',
+ 'UndefineAllPreprocessorDefinitions': 'true',
+ 'UndefinePreprocessorDefinitions': 'wer',
+ 'UseFullPaths': 'true',
+ 'UsePrecompiledHeader': '0',
+ 'UseUnicodeResponseFiles': 'false',
+ 'WarnAsError': 'true',
+ 'WarningLevel': '3',
+ 'WholeProgramOptimization': 'true',
+ 'XMLDocumentationFileName': '$(IntDir)\\c'},
+ 'VCLinkerTool': {
+ 'AdditionalDependencies': 'zx',
+ 'AdditionalLibraryDirectories': 'asd',
+ 'AdditionalManifestDependencies': 's2',
+ 'AdditionalOptions': '/mor2',
+ 'AddModuleNamesToAssembly': 'd1',
+ 'AllowIsolation': 'false',
+ 'AssemblyDebug': '1',
+ 'AssemblyLinkResource': 'd5',
+ 'BaseAddress': '23423',
+ 'CLRImageType': '3',
+ 'CLRThreadAttribute': '1',
+ 'CLRUnmanagedCodeCheck': 'true',
+ 'DataExecutionPrevention': '0',
+ 'DelayLoadDLLs': 'd4',
+ 'DelaySign': 'true',
+ 'Driver': '2',
+ 'EmbedManagedResourceFile': 'd2',
+ 'EnableCOMDATFolding': '1',
+ 'EnableUAC': 'false',
+ 'EntryPointSymbol': 'f5',
+ 'ErrorReporting': '2',
+ 'FixedBaseAddress': '1',
+ 'ForceSymbolReferences': 'd3',
+ 'FunctionOrder': 'fssdfsd',
+ 'GenerateDebugInformation': 'true',
+ 'GenerateManifest': 'false',
+ 'GenerateMapFile': 'true',
+ 'HeapCommitSize': '13',
+ 'HeapReserveSize': '12',
+ 'IgnoreAllDefaultLibraries': 'true',
+ 'IgnoreDefaultLibraryNames': 'flob;flok',
+ 'IgnoreEmbeddedIDL': 'true',
+ 'IgnoreImportLibrary': 'true',
+ 'ImportLibrary': 'f4',
+ 'KeyContainer': 'f7',
+ 'KeyFile': 'f6',
+ 'LargeAddressAware': '2',
+ 'LinkIncremental': '0',
+ 'LinkLibraryDependencies': 'false',
+ 'LinkTimeCodeGeneration': '1',
+ 'ManifestFile':
+ '$(IntDir)\\$(TargetFileName).2intermediate.manifest',
+ 'MapExports': 'true',
+ 'MapFileName': 'd5',
+ 'MergedIDLBaseFileName': 'f2',
+ 'MergeSections': 'f5',
+ 'MidlCommandFile': 'f1',
+ 'ModuleDefinitionFile': 'sdsd',
+ 'OptimizeForWindows98': '2',
+ 'OptimizeReferences': '2',
+ 'OutputFile': '$(OutDir)\\$(ProjectName)2.exe',
+ 'PerUserRedirection': 'true',
+ 'Profile': 'true',
+ 'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
+ 'ProgramDatabaseFile': 'Flob.pdb',
+ 'RandomizedBaseAddress': '1',
+ 'RegisterOutput': 'true',
+ 'ResourceOnlyDLL': 'true',
+ 'SetChecksum': 'false',
+ 'ShowProgress': '1',
+ 'StackCommitSize': '15',
+ 'StackReserveSize': '14',
+ 'StripPrivateSymbols': 'd3',
+ 'SubSystem': '1',
+ 'SupportUnloadOfDelayLoadedDLL': 'true',
+ 'SuppressStartupBanner': 'false',
+ 'SwapRunFromCD': 'true',
+ 'SwapRunFromNet': 'true',
+ 'TargetMachine': '1',
+ 'TerminalServerAware': '1',
+ 'TurnOffAssemblyGeneration': 'true',
+ 'TypeLibraryFile': 'f3',
+ 'TypeLibraryResourceID': '12',
+ 'UACExecutionLevel': '2',
+ 'UACUIAccess': 'true',
+ 'UseLibraryDependencyInputs': 'true',
+ 'UseUnicodeResponseFiles': 'false',
+ 'Version': '333'},
+ 'VCResourceCompilerTool': {
+ 'AdditionalIncludeDirectories': 'f3',
+ 'AdditionalOptions': '/more3',
+ 'Culture': '3084',
+ 'IgnoreStandardIncludePath': 'true',
+ 'PreprocessorDefinitions': '_UNICODE;UNICODE2',
+ 'ResourceOutputFileName': '$(IntDir)/$(InputName)3.res',
+ 'ShowProgress': 'true'},
+ 'VCManifestTool': {
+ 'AdditionalManifestFiles': 'sfsdfsd',
+ 'AdditionalOptions': 'afdsdafsd',
+ 'AssemblyIdentity': 'sddfdsadfsa',
+ 'ComponentFileName': 'fsdfds',
+ 'DependencyInformationFile': '$(IntDir)\\mt.depdfd',
+ 'EmbedManifest': 'false',
+ 'GenerateCatalogFiles': 'true',
+ 'InputResourceManifests': 'asfsfdafs',
+ 'ManifestResourceFile':
+ '$(IntDir)\\$(TargetFileName).embed.manifest.resfdsf',
+ 'OutputManifestFile': '$(TargetPath).manifestdfs',
+ 'RegistrarScriptFile': 'sdfsfd',
+ 'ReplacementsFile': 'sdffsd',
+ 'SuppressStartupBanner': 'false',
+ 'TypeLibraryFile': 'sfsd',
+ 'UpdateFileHashes': 'true',
+ 'UpdateFileHashesSearchPath': 'sfsd',
+ 'UseFAT32Workaround': 'true',
+ 'UseUnicodeResponseFiles': 'false',
+ 'VerboseOutput': 'true'}}
+ expected_msbuild_settings = {
+ 'ClCompile': {
+ 'AdditionalIncludeDirectories': 'dir1',
+ 'AdditionalOptions': '/more /J',
+ 'AdditionalUsingDirectories': 'test',
+ 'AssemblerListingLocation': '$(IntDir)a',
+ 'AssemblerOutput': 'AssemblyCode',
+ 'BasicRuntimeChecks': 'EnableFastChecks',
+ 'BrowseInformation': 'true',
+ 'BrowseInformationFile': '$(IntDir)e',
+ 'BufferSecurityCheck': 'false',
+ 'CallingConvention': 'FastCall',
+ 'CompileAs': 'CompileAsC',
+ 'DebugInformationFormat': 'EditAndContinue',
+ 'DisableLanguageExtensions': 'true',
+ 'DisableSpecificWarnings': 'abc',
+ 'EnableEnhancedInstructionSet': 'StreamingSIMDExtensions',
+ 'EnableFiberSafeOptimizations': 'true',
+ 'EnablePREfast': 'true',
+ 'ErrorReporting': 'Queue',
+ 'ExceptionHandling': 'Async',
+ 'ExpandAttributedSource': 'true',
+ 'FavorSizeOrSpeed': 'Size',
+ 'FloatingPointExceptions': 'true',
+ 'FloatingPointModel': 'Strict',
+ 'ForceConformanceInForLoopScope': 'false',
+ 'ForcedIncludeFiles': 'def',
+ 'ForcedUsingFiles': 'ge',
+ 'FunctionLevelLinking': 'true',
+ 'GenerateXMLDocumentationFiles': 'true',
+ 'IgnoreStandardIncludePath': 'true',
+ 'InlineFunctionExpansion': 'OnlyExplicitInline',
+ 'IntrinsicFunctions': 'true',
+ 'MinimalRebuild': 'true',
+ 'ObjectFileName': '$(IntDir)b',
+ 'OmitDefaultLibName': 'true',
+ 'OmitFramePointers': 'true',
+ 'OpenMPSupport': 'true',
+ 'Optimization': 'Full',
+ 'PrecompiledHeader': 'NotUsing', # Actual conversion gives ''
+ 'PrecompiledHeaderFile': 'StdAfx.hd',
+ 'PrecompiledHeaderOutputFile': '$(IntDir)$(TargetName).pche',
+ 'PreprocessKeepComments': 'true',
+ 'PreprocessorDefinitions': 'WIN32;_DEBUG;_CONSOLE',
+ 'PreprocessSuppressLineNumbers': 'true',
+ 'PreprocessToFile': 'true',
+ 'ProgramDataBaseFileName': '$(IntDir)vc90b.pdb',
+ 'RuntimeLibrary': 'MultiThreadedDebugDLL',
+ 'RuntimeTypeInfo': 'false',
+ 'ShowIncludes': 'true',
+ 'SmallerTypeCheck': 'true',
+ 'StringPooling': 'true',
+ 'StructMemberAlignment': '4Bytes',
+ 'SuppressStartupBanner': 'false',
+ 'TreatWarningAsError': 'true',
+ 'TreatWChar_tAsBuiltInType': 'false',
+ 'UndefineAllPreprocessorDefinitions': 'true',
+ 'UndefinePreprocessorDefinitions': 'wer',
+ 'UseFullPaths': 'true',
+ 'WarningLevel': 'Level3',
+ 'WholeProgramOptimization': 'true',
+ 'XMLDocumentationFileName': '$(IntDir)c'},
+ 'Link': {
+ 'AdditionalDependencies': 'zx',
+ 'AdditionalLibraryDirectories': 'asd',
+ 'AdditionalManifestDependencies': 's2',
+ 'AdditionalOptions': '/mor2',
+ 'AddModuleNamesToAssembly': 'd1',
+ 'AllowIsolation': 'false',
+ 'AssemblyDebug': 'true',
+ 'AssemblyLinkResource': 'd5',
+ 'BaseAddress': '23423',
+ 'CLRImageType': 'ForceSafeILImage',
+ 'CLRThreadAttribute': 'MTAThreadingAttribute',
+ 'CLRUnmanagedCodeCheck': 'true',
+ 'DataExecutionPrevention': '',
+ 'DelayLoadDLLs': 'd4',
+ 'DelaySign': 'true',
+ 'Driver': 'UpOnly',
+ 'EmbedManagedResourceFile': 'd2',
+ 'EnableCOMDATFolding': 'false',
+ 'EnableUAC': 'false',
+ 'EntryPointSymbol': 'f5',
+ 'FixedBaseAddress': 'false',
+ 'ForceSymbolReferences': 'd3',
+ 'FunctionOrder': 'fssdfsd',
+ 'GenerateDebugInformation': 'true',
+ 'GenerateMapFile': 'true',
+ 'HeapCommitSize': '13',
+ 'HeapReserveSize': '12',
+ 'IgnoreAllDefaultLibraries': 'true',
+ 'IgnoreEmbeddedIDL': 'true',
+ 'IgnoreSpecificDefaultLibraries': 'flob;flok',
+ 'ImportLibrary': 'f4',
+ 'KeyContainer': 'f7',
+ 'KeyFile': 'f6',
+ 'LargeAddressAware': 'true',
+ 'LinkErrorReporting': 'QueueForNextLogin',
+ 'LinkTimeCodeGeneration': 'UseLinkTimeCodeGeneration',
+ 'ManifestFile': '$(IntDir)$(TargetFileName).2intermediate.manifest',
+ 'MapExports': 'true',
+ 'MapFileName': 'd5',
+ 'MergedIDLBaseFileName': 'f2',
+ 'MergeSections': 'f5',
+ 'MidlCommandFile': 'f1',
+ 'ModuleDefinitionFile': 'sdsd',
+ 'NoEntryPoint': 'true',
+ 'OptimizeReferences': 'true',
+ 'OutputFile': '$(OutDir)$(ProjectName)2.exe',
+ 'PerUserRedirection': 'true',
+ 'Profile': 'true',
+ 'ProfileGuidedDatabase': '$(TargetDir)$(TargetName).pgdd',
+ 'ProgramDatabaseFile': 'Flob.pdb',
+ 'RandomizedBaseAddress': 'false',
+ 'RegisterOutput': 'true',
+ 'SetChecksum': 'false',
+ 'ShowProgress': 'LinkVerbose',
+ 'StackCommitSize': '15',
+ 'StackReserveSize': '14',
+ 'StripPrivateSymbols': 'd3',
+ 'SubSystem': 'Console',
+ 'SupportUnloadOfDelayLoadedDLL': 'true',
+ 'SuppressStartupBanner': 'false',
+ 'SwapRunFromCD': 'true',
+ 'SwapRunFromNET': 'true',
+ 'TargetMachine': 'MachineX86',
+ 'TerminalServerAware': 'false',
+ 'TurnOffAssemblyGeneration': 'true',
+ 'TypeLibraryFile': 'f3',
+ 'TypeLibraryResourceID': '12',
+ 'UACExecutionLevel': 'RequireAdministrator',
+ 'UACUIAccess': 'true',
+ 'Version': '333'},
+ 'ResourceCompile': {
+ 'AdditionalIncludeDirectories': 'f3',
+ 'AdditionalOptions': '/more3',
+ 'Culture': '0x0c0c',
+ 'IgnoreStandardIncludePath': 'true',
+ 'PreprocessorDefinitions': '_UNICODE;UNICODE2',
+ 'ResourceOutputFileName': '$(IntDir)%(Filename)3.res',
+ 'ShowProgress': 'true'},
+ 'Manifest': {
+ 'AdditionalManifestFiles': 'sfsdfsd',
+ 'AdditionalOptions': 'afdsdafsd',
+ 'AssemblyIdentity': 'sddfdsadfsa',
+ 'ComponentFileName': 'fsdfds',
+ 'GenerateCatalogFiles': 'true',
+ 'InputResourceManifests': 'asfsfdafs',
+ 'OutputManifestFile': '$(TargetPath).manifestdfs',
+ 'RegistrarScriptFile': 'sdfsfd',
+ 'ReplacementsFile': 'sdffsd',
+ 'SuppressStartupBanner': 'false',
+ 'TypeLibraryFile': 'sfsd',
+ 'UpdateFileHashes': 'true',
+ 'UpdateFileHashesSearchPath': 'sfsd',
+ 'VerboseOutput': 'true'},
+ 'ProjectReference': {
+ 'LinkLibraryDependencies': 'false',
+ 'UseLibraryDependencyInputs': 'true'},
+ '': {
+ 'EmbedManifest': 'false',
+ 'GenerateManifest': 'false',
+ 'IgnoreImportLibrary': 'true',
+ 'LinkIncremental': ''
+ },
+ 'ManifestResourceCompile': {
+ 'ResourceOutputFileName':
+ '$(IntDir)$(TargetFileName).embed.manifest.resfdsf'}
+ }
+ actual_msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(
+ msvs_settings,
+ self.stderr)
+ self.assertEqual(expected_msbuild_settings, actual_msbuild_settings)
+ self._ExpectedWarnings([])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/python/gyp/pylib/gyp/MSVSToolFile.py b/third_party/python/gyp/pylib/gyp/MSVSToolFile.py
new file mode 100644
index 0000000000..74e529a17f
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/MSVSToolFile.py
@@ -0,0 +1,58 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Visual Studio project reader/writer."""
+
+import gyp.common
+import gyp.easy_xml as easy_xml
+
+
+class Writer(object):
+ """Visual Studio XML tool file writer."""
+
+ def __init__(self, tool_file_path, name):
+ """Initializes the tool file.
+
+ Args:
+ tool_file_path: Path to the tool file.
+ name: Name of the tool file.
+ """
+ self.tool_file_path = tool_file_path
+ self.name = name
+ self.rules_section = ['Rules']
+
+ def AddCustomBuildRule(self, name, cmd, description,
+ additional_dependencies,
+ outputs, extensions):
+ """Adds a rule to the tool file.
+
+ Args:
+ name: Name of the rule.
+ description: Description of the rule.
+ cmd: Command line of the rule.
+ additional_dependencies: other files which may trigger the rule.
+ outputs: outputs of the rule.
+ extensions: extensions handled by the rule.
+ """
+ rule = ['CustomBuildRule',
+ {'Name': name,
+ 'ExecutionDescription': description,
+ 'CommandLine': cmd,
+ 'Outputs': ';'.join(outputs),
+ 'FileExtensions': ';'.join(extensions),
+ 'AdditionalDependencies':
+ ';'.join(additional_dependencies)
+ }]
+ self.rules_section.append(rule)
+
+ def WriteIfChanged(self):
+ """Writes the tool file."""
+ content = ['VisualStudioToolFile',
+ {'Version': '8.00',
+ 'Name': self.name
+ },
+ self.rules_section
+ ]
+ easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
+ encoding="Windows-1252")
diff --git a/third_party/python/gyp/pylib/gyp/MSVSUserFile.py b/third_party/python/gyp/pylib/gyp/MSVSUserFile.py
new file mode 100644
index 0000000000..2264d64015
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/MSVSUserFile.py
@@ -0,0 +1,147 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Visual Studio user preferences file writer."""
+
+import os
+import re
+import socket # for gethostname
+
+import gyp.common
+import gyp.easy_xml as easy_xml
+
+
+#------------------------------------------------------------------------------
+
+def _FindCommandInPath(command):
+ """If there are no slashes in the command given, this function
+ searches the PATH env to find the given command, and converts it
+ to an absolute path. We have to do this because MSVS is looking
+ for an actual file to launch a debugger on, not just a command
+ line. Note that this happens at GYP time, so anything needing to
+ be built needs to have a full path."""
+ if '/' in command or '\\' in command:
+ # If the command already has path elements (either relative or
+ # absolute), then assume it is constructed properly.
+ return command
+ else:
+ # Search through the path list and find an existing file that
+ # we can access.
+ paths = os.environ.get('PATH','').split(os.pathsep)
+ for path in paths:
+ item = os.path.join(path, command)
+ if os.path.isfile(item) and os.access(item, os.X_OK):
+ return item
+ return command
+
+def _QuoteWin32CommandLineArgs(args):
+ new_args = []
+ for arg in args:
+ # Replace all double-quotes with double-double-quotes to escape
+ # them for cmd shell, and then quote the whole thing if there
+ # are any.
+ if arg.find('"') != -1:
+ arg = '""'.join(arg.split('"'))
+ arg = '"%s"' % arg
+
+ # Otherwise, if there are any spaces, quote the whole arg.
+ elif re.search(r'[ \t\n]', arg):
+ arg = '"%s"' % arg
+ new_args.append(arg)
+ return new_args
+
+class Writer(object):
+ """Visual Studio XML user user file writer."""
+
+ def __init__(self, user_file_path, version, name):
+ """Initializes the user file.
+
+ Args:
+ user_file_path: Path to the user file.
+ version: Version info.
+ name: Name of the user file.
+ """
+ self.user_file_path = user_file_path
+ self.version = version
+ self.name = name
+ self.configurations = {}
+
+ def AddConfig(self, name):
+ """Adds a configuration to the project.
+
+ Args:
+ name: Configuration name.
+ """
+ self.configurations[name] = ['Configuration', {'Name': name}]
+
+ def AddDebugSettings(self, config_name, command, environment = {},
+ working_directory=""):
+ """Adds a DebugSettings node to the user file for a particular config.
+
+ Args:
+ command: command line to run. First element in the list is the
+ executable. All elements of the command will be quoted if
+ necessary.
+ working_directory: other files which may trigger the rule. (optional)
+ """
+ command = _QuoteWin32CommandLineArgs(command)
+
+ abs_command = _FindCommandInPath(command[0])
+
+ if environment and isinstance(environment, dict):
+ env_list = ['%s="%s"' % (key, val)
+ for (key,val) in environment.items()]
+ environment = ' '.join(env_list)
+ else:
+ environment = ''
+
+ n_cmd = ['DebugSettings',
+ {'Command': abs_command,
+ 'WorkingDirectory': working_directory,
+ 'CommandArguments': " ".join(command[1:]),
+ 'RemoteMachine': socket.gethostname(),
+ 'Environment': environment,
+ 'EnvironmentMerge': 'true',
+ # Currently these are all "dummy" values that we're just setting
+ # in the default manner that MSVS does it. We could use some of
+ # these to add additional capabilities, I suppose, but they might
+ # not have parity with other platforms then.
+ 'Attach': 'false',
+ 'DebuggerType': '3', # 'auto' debugger
+ 'Remote': '1',
+ 'RemoteCommand': '',
+ 'HttpUrl': '',
+ 'PDBPath': '',
+ 'SQLDebugging': '',
+ 'DebuggerFlavor': '0',
+ 'MPIRunCommand': '',
+ 'MPIRunArguments': '',
+ 'MPIRunWorkingDirectory': '',
+ 'ApplicationCommand': '',
+ 'ApplicationArguments': '',
+ 'ShimCommand': '',
+ 'MPIAcceptMode': '',
+ 'MPIAcceptFilter': ''
+ }]
+
+ # Find the config, and add it if it doesn't exist.
+ if config_name not in self.configurations:
+ self.AddConfig(config_name)
+
+ # Add the DebugSettings onto the appropriate config.
+ self.configurations[config_name].append(n_cmd)
+
+ def WriteIfChanged(self):
+ """Writes the user file."""
+ configs = ['Configurations']
+ for config, spec in sorted(self.configurations.items()):
+ configs.append(spec)
+
+ content = ['VisualStudioUserFile',
+ {'Version': self.version.ProjectVersion(),
+ 'Name': self.name
+ },
+ configs]
+ easy_xml.WriteXmlIfChanged(content, self.user_file_path,
+ encoding="Windows-1252")
diff --git a/third_party/python/gyp/pylib/gyp/MSVSUtil.py b/third_party/python/gyp/pylib/gyp/MSVSUtil.py
new file mode 100644
index 0000000000..f24530b275
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/MSVSUtil.py
@@ -0,0 +1,271 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Utility functions shared amongst the Windows generators."""
+
+import copy
+import os
+
+
+# A dictionary mapping supported target types to extensions.
+TARGET_TYPE_EXT = {
+ 'executable': 'exe',
+ 'loadable_module': 'dll',
+ 'shared_library': 'dll',
+ 'static_library': 'lib',
+ 'windows_driver': 'sys',
+}
+
+
+def _GetLargePdbShimCcPath():
+ """Returns the path of the large_pdb_shim.cc file."""
+ this_dir = os.path.abspath(os.path.dirname(__file__))
+ src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
+ win_data_dir = os.path.join(src_dir, 'data', 'win')
+ large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
+ return large_pdb_shim_cc
+
+
+def _DeepCopySomeKeys(in_dict, keys):
+ """Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
+
+ Arguments:
+ in_dict: The dictionary to copy.
+ keys: The keys to be copied. If a key is in this list and doesn't exist in
+ |in_dict| this is not an error.
+ Returns:
+ The partially deep-copied dictionary.
+ """
+ d = {}
+ for key in keys:
+ if key not in in_dict:
+ continue
+ d[key] = copy.deepcopy(in_dict[key])
+ return d
+
+
+def _SuffixName(name, suffix):
+ """Add a suffix to the end of a target.
+
+ Arguments:
+ name: name of the target (foo#target)
+ suffix: the suffix to be added
+ Returns:
+ Target name with suffix added (foo_suffix#target)
+ """
+ parts = name.rsplit('#', 1)
+ parts[0] = '%s_%s' % (parts[0], suffix)
+ return '#'.join(parts)
+
+
+def _ShardName(name, number):
+ """Add a shard number to the end of a target.
+
+ Arguments:
+ name: name of the target (foo#target)
+ number: shard number
+ Returns:
+ Target name with shard added (foo_1#target)
+ """
+ return _SuffixName(name, str(number))
+
+
+def ShardTargets(target_list, target_dicts):
+ """Shard some targets apart to work around the linkers limits.
+
+ Arguments:
+ target_list: List of target pairs: 'base/base.gyp:base'.
+ target_dicts: Dict of target properties keyed on target pair.
+ Returns:
+ Tuple of the new sharded versions of the inputs.
+ """
+ # Gather the targets to shard, and how many pieces.
+ targets_to_shard = {}
+ for t in target_dicts:
+ shards = int(target_dicts[t].get('msvs_shard', 0))
+ if shards:
+ targets_to_shard[t] = shards
+ # Shard target_list.
+ new_target_list = []
+ for t in target_list:
+ if t in targets_to_shard:
+ for i in range(targets_to_shard[t]):
+ new_target_list.append(_ShardName(t, i))
+ else:
+ new_target_list.append(t)
+ # Shard target_dict.
+ new_target_dicts = {}
+ for t in target_dicts:
+ if t in targets_to_shard:
+ for i in range(targets_to_shard[t]):
+ name = _ShardName(t, i)
+ new_target_dicts[name] = copy.copy(target_dicts[t])
+ new_target_dicts[name]['target_name'] = _ShardName(
+ new_target_dicts[name]['target_name'], i)
+ sources = new_target_dicts[name].get('sources', [])
+ new_sources = []
+ for pos in range(i, len(sources), targets_to_shard[t]):
+ new_sources.append(sources[pos])
+ new_target_dicts[name]['sources'] = new_sources
+ else:
+ new_target_dicts[t] = target_dicts[t]
+ # Shard dependencies.
+ for t in sorted(new_target_dicts):
+ for deptype in ('dependencies', 'dependencies_original'):
+ dependencies = copy.copy(new_target_dicts[t].get(deptype, []))
+ new_dependencies = []
+ for d in dependencies:
+ if d in targets_to_shard:
+ for i in range(targets_to_shard[d]):
+ new_dependencies.append(_ShardName(d, i))
+ else:
+ new_dependencies.append(d)
+ new_target_dicts[t][deptype] = new_dependencies
+
+ return (new_target_list, new_target_dicts)
+
+
+def _GetPdbPath(target_dict, config_name, vars):
+ """Returns the path to the PDB file that will be generated by a given
+ configuration.
+
+ The lookup proceeds as follows:
+ - Look for an explicit path in the VCLinkerTool configuration block.
+ - Look for an 'msvs_large_pdb_path' variable.
+ - Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
+ specified.
+ - Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
+
+ Arguments:
+ target_dict: The target dictionary to be searched.
+ config_name: The name of the configuration of interest.
+ vars: A dictionary of common GYP variables with generator-specific values.
+ Returns:
+ The path of the corresponding PDB file.
+ """
+ config = target_dict['configurations'][config_name]
+ msvs = config.setdefault('msvs_settings', {})
+
+ linker = msvs.get('VCLinkerTool', {})
+
+ pdb_path = linker.get('ProgramDatabaseFile')
+ if pdb_path:
+ return pdb_path
+
+ variables = target_dict.get('variables', {})
+ pdb_path = variables.get('msvs_large_pdb_path', None)
+ if pdb_path:
+ return pdb_path
+
+
+ pdb_base = target_dict.get('product_name', target_dict['target_name'])
+ pdb_base = '%s.%s.pdb' % (pdb_base, TARGET_TYPE_EXT[target_dict['type']])
+ pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
+
+ return pdb_path
+
+
+def InsertLargePdbShims(target_list, target_dicts, vars):
+ """Insert a shim target that forces the linker to use 4KB pagesize PDBs.
+
+ This is a workaround for targets with PDBs greater than 1GB in size, the
+ limit for the 1KB pagesize PDBs created by the linker by default.
+
+ Arguments:
+ target_list: List of target pairs: 'base/base.gyp:base'.
+ target_dicts: Dict of target properties keyed on target pair.
+ vars: A dictionary of common GYP variables with generator-specific values.
+ Returns:
+ Tuple of the shimmed version of the inputs.
+ """
+ # Determine which targets need shimming.
+ targets_to_shim = []
+ for t in target_dicts:
+ target_dict = target_dicts[t]
+
+ # We only want to shim targets that have msvs_large_pdb enabled.
+ if not int(target_dict.get('msvs_large_pdb', 0)):
+ continue
+ # This is intended for executable, shared_library and loadable_module
+ # targets where every configuration is set up to produce a PDB output.
+ # If any of these conditions is not true then the shim logic will fail
+ # below.
+ targets_to_shim.append(t)
+
+ large_pdb_shim_cc = _GetLargePdbShimCcPath()
+
+ for t in targets_to_shim:
+ target_dict = target_dicts[t]
+ target_name = target_dict.get('target_name')
+
+ base_dict = _DeepCopySomeKeys(target_dict,
+ ['configurations', 'default_configuration', 'toolset'])
+
+ # This is the dict for copying the source file (part of the GYP tree)
+ # to the intermediate directory of the project. This is necessary because
+ # we can't always build a relative path to the shim source file (on Windows
+ # GYP and the project may be on different drives), and Ninja hates absolute
+ # paths (it ends up generating the .obj and .obj.d alongside the source
+ # file, polluting GYPs tree).
+ copy_suffix = 'large_pdb_copy'
+ copy_target_name = target_name + '_' + copy_suffix
+ full_copy_target_name = _SuffixName(t, copy_suffix)
+ shim_cc_basename = os.path.basename(large_pdb_shim_cc)
+ shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
+ shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
+ copy_dict = copy.deepcopy(base_dict)
+ copy_dict['target_name'] = copy_target_name
+ copy_dict['type'] = 'none'
+ copy_dict['sources'] = [ large_pdb_shim_cc ]
+ copy_dict['copies'] = [{
+ 'destination': shim_cc_dir,
+ 'files': [ large_pdb_shim_cc ]
+ }]
+
+ # This is the dict for the PDB generating shim target. It depends on the
+ # copy target.
+ shim_suffix = 'large_pdb_shim'
+ shim_target_name = target_name + '_' + shim_suffix
+ full_shim_target_name = _SuffixName(t, shim_suffix)
+ shim_dict = copy.deepcopy(base_dict)
+ shim_dict['target_name'] = shim_target_name
+ shim_dict['type'] = 'static_library'
+ shim_dict['sources'] = [ shim_cc_path ]
+ shim_dict['dependencies'] = [ full_copy_target_name ]
+
+ # Set up the shim to output its PDB to the same location as the final linker
+ # target.
+ for config_name, config in shim_dict.get('configurations').items():
+ pdb_path = _GetPdbPath(target_dict, config_name, vars)
+
+ # A few keys that we don't want to propagate.
+ for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
+ config.pop(key, None)
+
+ msvs = config.setdefault('msvs_settings', {})
+
+ # Update the compiler directives in the shim target.
+ compiler = msvs.setdefault('VCCLCompilerTool', {})
+ compiler['DebugInformationFormat'] = '3'
+ compiler['ProgramDataBaseFileName'] = pdb_path
+
+ # Set the explicit PDB path in the appropriate configuration of the
+ # original target.
+ config = target_dict['configurations'][config_name]
+ msvs = config.setdefault('msvs_settings', {})
+ linker = msvs.setdefault('VCLinkerTool', {})
+ linker['GenerateDebugInformation'] = 'true'
+ linker['ProgramDatabaseFile'] = pdb_path
+
+ # Add the new targets. They must go to the beginning of the list so that
+ # the dependency generation works as expected in ninja.
+ target_list.insert(0, full_copy_target_name)
+ target_list.insert(0, full_shim_target_name)
+ target_dicts[full_copy_target_name] = copy_dict
+ target_dicts[full_shim_target_name] = shim_dict
+
+ # Update the original target to depend on the shim target.
+ target_dict.setdefault('dependencies', []).append(full_shim_target_name)
+
+ return (target_list, target_dicts)
diff --git a/third_party/python/gyp/pylib/gyp/MSVSVersion.py b/third_party/python/gyp/pylib/gyp/MSVSVersion.py
new file mode 100644
index 0000000000..69444e49db
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/MSVSVersion.py
@@ -0,0 +1,537 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Handle version information related to Visual Stuio."""
+
+import errno
+import os
+import re
+import subprocess
+import sys
+import gyp
+import glob
+
+
+def JoinPath(*args):
+ return os.path.normpath(os.path.join(*args))
+
+
+def version_to_tuple(version_str):
+ return tuple(int(x) for x in version_str.split('.'))
+
+
+class VisualStudioVersion(object):
+ """Information regarding a version of Visual Studio."""
+
+ def __init__(self, short_name, description,
+ solution_version, project_version, flat_sln, uses_vcxproj,
+ path, sdk_based, default_toolset=None, compatible_sdks=None):
+ self.short_name = short_name
+ self.description = description
+ self.solution_version = solution_version
+ self.project_version = project_version
+ self.flat_sln = flat_sln
+ self.uses_vcxproj = uses_vcxproj
+ self.path = path
+ self.sdk_based = sdk_based
+ self.default_toolset = default_toolset
+ compatible_sdks = compatible_sdks or []
+ compatible_sdks.sort(key=lambda v: float(v.replace('v', '')), reverse=True)
+ self.compatible_sdks = compatible_sdks
+
+ def ShortName(self):
+ return self.short_name
+
+ def Description(self):
+ """Get the full description of the version."""
+ return self.description
+
+ def SolutionVersion(self):
+ """Get the version number of the sln files."""
+ return self.solution_version
+
+ def ProjectVersion(self):
+ """Get the version number of the vcproj or vcxproj files."""
+ return self.project_version
+
+ def FlatSolution(self):
+ return self.flat_sln
+
+ def UsesVcxproj(self):
+ """Returns true if this version uses a vcxproj file."""
+ return self.uses_vcxproj
+
+ def ProjectExtension(self):
+ """Returns the file extension for the project."""
+ return self.uses_vcxproj and '.vcxproj' or '.vcproj'
+
+ def Path(self):
+ """Returns the path to Visual Studio installation."""
+ return self.path
+
+ def ToolPath(self, tool):
+ """Returns the path to a given compiler tool. """
+ return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
+
+ def DefaultToolset(self):
+ """Returns the msbuild toolset version that will be used in the absence
+ of a user override."""
+ return self.default_toolset
+
+
+ def _SetupScriptInternal(self, target_arch):
+ """Returns a command (with arguments) to be used to set up the
+ environment."""
+ assert target_arch in ('x86', 'x64'), "target_arch not supported"
+ # If WindowsSDKDir is set and SetEnv.Cmd exists then we are using the
+ # depot_tools build tools and should run SetEnv.Cmd to set up the
+ # environment. The check for WindowsSDKDir alone is not sufficient because
+ # this is set by running vcvarsall.bat.
+ sdk_dir = os.environ.get('WindowsSDKDir', '')
+ setup_path = JoinPath(sdk_dir, 'Bin', 'SetEnv.Cmd')
+ if self.sdk_based and sdk_dir and os.path.exists(setup_path):
+ return [setup_path, '/' + target_arch]
+
+ is_host_arch_x64 = (
+ os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
+ os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'
+ )
+
+ # For VS2017 (and newer) it's fairly easy
+ if self.short_name >= '2017':
+ script_path = JoinPath(self.path,
+ 'VC', 'Auxiliary', 'Build', 'vcvarsall.bat')
+
+ # Always use a native executable, cross-compiling if necessary.
+ host_arch = 'amd64' if is_host_arch_x64 else 'x86'
+ msvc_target_arch = 'amd64' if target_arch == 'x64' else 'x86'
+ arg = host_arch
+ if host_arch != msvc_target_arch:
+ arg += '_' + msvc_target_arch
+
+ return [script_path, arg]
+
+ # We try to find the best version of the env setup batch.
+ vcvarsall = JoinPath(self.path, 'VC', 'vcvarsall.bat')
+ if target_arch == 'x86':
+ if self.short_name >= '2013' and self.short_name[-1] != 'e' and \
+ is_host_arch_x64:
+ # VS2013 and later, non-Express have a x64-x86 cross that we want
+ # to prefer.
+ return [vcvarsall, 'amd64_x86']
+ else:
+ # Otherwise, the standard x86 compiler. We don't use VC/vcvarsall.bat
+ # for x86 because vcvarsall calls vcvars32, which it can only find if
+ # VS??COMNTOOLS is set, which isn't guaranteed.
+ return [JoinPath(self.path, 'Common7', 'Tools', 'vsvars32.bat')]
+ elif target_arch == 'x64':
+ arg = 'x86_amd64'
+ # Use the 64-on-64 compiler if we're not using an express edition and
+ # we're running on a 64bit OS.
+ if self.short_name[-1] != 'e' and is_host_arch_x64:
+ arg = 'amd64'
+ return [vcvarsall, arg]
+
+ def SetupScript(self, target_arch):
+ script_data = self._SetupScriptInternal(target_arch)
+ script_path = script_data[0]
+ if not os.path.exists(script_path):
+ raise Exception('%s is missing - make sure VC++ tools are installed.' %
+ script_path)
+ return script_data
+
+
+def _RegistryQueryBase(sysdir, key, value):
+ """Use reg.exe to read a particular key.
+
+ While ideally we might use the win32 module, we would like gyp to be
+ python neutral, so for instance cygwin python lacks this module.
+
+ Arguments:
+ sysdir: The system subdirectory to attempt to launch reg.exe from.
+ key: The registry key to read from.
+ value: The particular value to read.
+ Return:
+ stdout from reg.exe, or None for failure.
+ """
+ # Skip if not on Windows or Python Win32 setup issue
+ if sys.platform not in ('win32', 'cygwin'):
+ return None
+ # Setup params to pass to and attempt to launch reg.exe
+ cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
+ 'query', key]
+ if value:
+ cmd.extend(['/v', value])
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ # Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
+ # Note that the error text may be in [1] in some cases
+ text = p.communicate()[0]
+ # Check return code from reg.exe; officially 0==success and 1==error
+ if p.returncode:
+ return None
+ return text
+
+
+def _RegistryQuery(key, value=None):
+ r"""Use reg.exe to read a particular key through _RegistryQueryBase.
+
+ First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
+ that fails, it falls back to System32. Sysnative is available on Vista and
+ up and available on Windows Server 2003 and XP through KB patch 942589. Note
+ that Sysnative will always fail if using 64-bit python due to it being a
+ virtual directory and System32 will work correctly in the first place.
+
+ KB 942589 - http://support.microsoft.com/kb/942589/en-us.
+
+ Arguments:
+ key: The registry key.
+ value: The particular registry value to read (optional).
+ Return:
+ stdout from reg.exe, or None for failure.
+ """
+ text = None
+ try:
+ text = _RegistryQueryBase('Sysnative', key, value)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ text = _RegistryQueryBase('System32', key, value)
+ else:
+ raise
+ return text
+
+
+def _RegistryGetValueUsingWinReg(key, value):
+ """Use the _winreg module to obtain the value of a registry key.
+
+ Args:
+ key: The registry key.
+ value: The particular registry value to read.
+ Return:
+ contents of the registry key's value, or None on failure. Throws
+ ImportError if _winreg is unavailable.
+ """
+ try:
+ import _winreg as winreg
+ except ImportError:
+ import winreg
+ try:
+ root, subkey = key.split('\\', 1)
+ assert root == 'HKLM' # Only need HKLM for now.
+ with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
+ return winreg.QueryValueEx(hkey, value)[0]
+ except WindowsError:
+ return None
+
+
+def _RegistryGetValue(key, value):
+ """Use _winreg or reg.exe to obtain the value of a registry key.
+
+ Using _winreg is preferable because it solves an issue on some corporate
+ environments where access to reg.exe is locked down. However, we still need
+ to fallback to reg.exe for the case where the _winreg module is not available
+ (for example in cygwin python).
+
+ Args:
+ key: The registry key.
+ value: The particular registry value to read.
+ Return:
+ contents of the registry key's value, or None on failure.
+ """
+ try:
+ return _RegistryGetValueUsingWinReg(key, value)
+ except ImportError:
+ pass
+
+ # Fallback to reg.exe if we fail to import _winreg.
+ text = _RegistryQuery(key, value)
+ if not text:
+ return None
+ # Extract value.
+ match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
+ if not match:
+ return None
+ return match.group(1)
+
+
+def _CreateVersion(name, path, sdk_based=False):
+ """Sets up MSVS project generation.
+
+ Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
+ autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
+ passed in that doesn't match a value in versions python will throw a error.
+ """
+ if path:
+ path = os.path.normpath(path)
+ versions = {
+ '2022': VisualStudioVersion('2022',
+ 'Visual Studio 2022',
+ solution_version='12.00',
+ project_version='17.0',
+ flat_sln=False,
+ uses_vcxproj=True,
+ path=path,
+ sdk_based=sdk_based,
+ default_toolset='v143',
+ compatible_sdks=['v8.1', 'v10.0']),
+ '2019': VisualStudioVersion('2019',
+ 'Visual Studio 2019',
+ solution_version='12.00',
+ project_version='16.0',
+ flat_sln=False,
+ uses_vcxproj=True,
+ path=path,
+ sdk_based=sdk_based,
+ default_toolset='v141',
+ compatible_sdks=['v8.1', 'v10.0']),
+ '2017': VisualStudioVersion('2017',
+ 'Visual Studio 2017',
+ solution_version='12.00',
+ project_version='15.0',
+ flat_sln=False,
+ uses_vcxproj=True,
+ path=path,
+ sdk_based=sdk_based,
+ default_toolset='v141',
+ compatible_sdks=['v8.1', 'v10.0']),
+ '2015': VisualStudioVersion('2015',
+ 'Visual Studio 2015',
+ solution_version='12.00',
+ project_version='14.0',
+ flat_sln=False,
+ uses_vcxproj=True,
+ path=path,
+ sdk_based=sdk_based,
+ default_toolset='v140'),
+ '2013': VisualStudioVersion('2013',
+ 'Visual Studio 2013',
+ solution_version='13.00',
+ project_version='12.0',
+ flat_sln=False,
+ uses_vcxproj=True,
+ path=path,
+ sdk_based=sdk_based,
+ default_toolset='v120'),
+ '2013e': VisualStudioVersion('2013e',
+ 'Visual Studio 2013',
+ solution_version='13.00',
+ project_version='12.0',
+ flat_sln=True,
+ uses_vcxproj=True,
+ path=path,
+ sdk_based=sdk_based,
+ default_toolset='v120'),
+ '2012': VisualStudioVersion('2012',
+ 'Visual Studio 2012',
+ solution_version='12.00',
+ project_version='4.0',
+ flat_sln=False,
+ uses_vcxproj=True,
+ path=path,
+ sdk_based=sdk_based,
+ default_toolset='v110'),
+ '2012e': VisualStudioVersion('2012e',
+ 'Visual Studio 2012',
+ solution_version='12.00',
+ project_version='4.0',
+ flat_sln=True,
+ uses_vcxproj=True,
+ path=path,
+ sdk_based=sdk_based,
+ default_toolset='v110'),
+ '2010': VisualStudioVersion('2010',
+ 'Visual Studio 2010',
+ solution_version='11.00',
+ project_version='4.0',
+ flat_sln=False,
+ uses_vcxproj=True,
+ path=path,
+ sdk_based=sdk_based),
+ '2010e': VisualStudioVersion('2010e',
+ 'Visual C++ Express 2010',
+ solution_version='11.00',
+ project_version='4.0',
+ flat_sln=True,
+ uses_vcxproj=True,
+ path=path,
+ sdk_based=sdk_based),
+ '2008': VisualStudioVersion('2008',
+ 'Visual Studio 2008',
+ solution_version='10.00',
+ project_version='9.00',
+ flat_sln=False,
+ uses_vcxproj=False,
+ path=path,
+ sdk_based=sdk_based),
+ '2008e': VisualStudioVersion('2008e',
+ 'Visual Studio 2008',
+ solution_version='10.00',
+ project_version='9.00',
+ flat_sln=True,
+ uses_vcxproj=False,
+ path=path,
+ sdk_based=sdk_based),
+ '2005': VisualStudioVersion('2005',
+ 'Visual Studio 2005',
+ solution_version='9.00',
+ project_version='8.00',
+ flat_sln=False,
+ uses_vcxproj=False,
+ path=path,
+ sdk_based=sdk_based),
+ '2005e': VisualStudioVersion('2005e',
+ 'Visual Studio 2005',
+ solution_version='9.00',
+ project_version='8.00',
+ flat_sln=True,
+ uses_vcxproj=False,
+ path=path,
+ sdk_based=sdk_based),
+ }
+ return versions[str(name)]
+
+
+def _ConvertToCygpath(path):
+ """Convert to cygwin path if we are using cygwin."""
+ if sys.platform == 'cygwin':
+ p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
+ path = p.communicate()[0].strip()
+ return path
+
+
+def _DetectVisualStudioVersions(versions_to_check, force_express):
+ """Collect the list of installed visual studio versions.
+
+ Returns:
+ A list of visual studio versions installed in descending order of
+ usage preference.
+ Base this on the registry and a quick check if devenv.exe exists.
+ Possibilities are:
+ 2005(e) - Visual Studio 2005 (8)
+ 2008(e) - Visual Studio 2008 (9)
+ 2010(e) - Visual Studio 2010 (10)
+ 2012(e) - Visual Studio 2012 (11)
+ 2013(e) - Visual Studio 2013 (12)
+ 2015 - Visual Studio 2015 (14)
+ 2017 - Visual Studio 2017 (15)
+ 2019 - Visual Studio 2019 (16)
+ 2022 - Visual Studio 2022 (17)
+ Where (e) is e for express editions of MSVS and blank otherwise.
+ """
+ version_to_year = {
+ '8.0': '2005',
+ '9.0': '2008',
+ '10.0': '2010',
+ '11.0': '2012',
+ '12.0': '2013',
+ '14.0': '2015',
+ '15.0': '2017',
+ '16.0': '2019',
+ '17.0': '2022',
+ }
+ versions = []
+
+ # MSVC's vcvars*.bat scripts set up extra environment variables we can use:
+ # * path to the VS installation root, for example:
+ # C:\Program Files\Microsoft Visual Studio\2022\Professional
+ env_vs_path = os.getenv('VSINSTALLDIR')
+ # * VS version, e.g. 17.0
+ env_vs_version = os.getenv('VisualStudioVersion')
+
+ for version in versions_to_check:
+ # Old method of searching for which VS version is installed
+ # We don't use the 2010-encouraged-way because we also want to get the
+ # path to the binaries, which it doesn't offer.
+ keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
+ r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
+ r'HKLM\Software\Microsoft\VCExpress\%s' % version,
+ r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
+ for index in range(len(keys)):
+ path = _RegistryGetValue(keys[index], 'InstallDir')
+ if not path:
+ continue
+ path = _ConvertToCygpath(path)
+ # Check for full.
+ full_path = os.path.join(path, 'devenv.exe')
+ express_path = os.path.join(path, '*express.exe')
+ if not force_express and os.path.exists(full_path):
+ # Add this one.
+ versions.append(_CreateVersion(version_to_year[version],
+ os.path.join(path, '..', '..')))
+ # Check for express.
+ elif glob.glob(express_path):
+ # Add this one.
+ versions.append(_CreateVersion(version_to_year[version] + 'e',
+ os.path.join(path, '..', '..')))
+
+ # The old method above does not work when only SDK is installed.
+ keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
+ r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7',
+ r'HKLM\Software\Microsoft\VisualStudio\SxS\VS7',
+ r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VS7']
+ for index in range(len(keys)):
+ path = _RegistryGetValue(keys[index], version)
+ if not path:
+ continue
+ path = _ConvertToCygpath(path)
+ if version_to_tuple(version) >= (15, 0):
+ if os.path.exists(path):
+ versions.append(_CreateVersion(version_to_year[version], path))
+ elif version != '14.0': # There is no Express edition for 2015.
+ versions.append(_CreateVersion(version_to_year[version] + 'e',
+ os.path.join(path, '..'), sdk_based=True))
+
+ if env_vs_version and env_vs_path and env_vs_version == version:
+ versions.append(_CreateVersion(version_to_year[env_vs_version],
+ env_vs_path))
+
+ return versions
+
+
+def SelectVisualStudioVersion(version='auto', allow_fallback=True):
+ """Select which version of Visual Studio projects to generate.
+
+ Arguments:
+ version: Hook to allow caller to force a particular version (vs auto).
+ Returns:
+ An object representing a visual studio project format version.
+ """
+ # In auto mode, check environment variable for override.
+ if version == 'auto':
+ version = os.environ.get('GYP_MSVS_VERSION', 'auto')
+ version_map = {
+ 'auto': ('17.0', '16.0', '15.0', '14.0', '12.0', '10.0', '9.0', '8.0',
+ '11.0'),
+ '2005': ('8.0',),
+ '2005e': ('8.0',),
+ '2008': ('9.0',),
+ '2008e': ('9.0',),
+ '2010': ('10.0',),
+ '2010e': ('10.0',),
+ '2012': ('11.0',),
+ '2012e': ('11.0',),
+ '2013': ('12.0',),
+ '2013e': ('12.0',),
+ '2015': ('14.0',),
+ '2017': ('15.0',),
+ '2019': ('16.0',),
+ '2022': ('17.0',),
+ }
+ override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
+ if override_path:
+ msvs_version = os.environ.get('GYP_MSVS_VERSION')
+ if not msvs_version:
+ raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
+ 'set to a particular version (e.g. 2010e).')
+ return _CreateVersion(msvs_version, override_path, sdk_based=True)
+ version = str(version)
+ versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
+ if not versions:
+ if not allow_fallback:
+ raise ValueError('Could not locate Visual Studio installation.')
+ if version == 'auto':
+ # Default to 2005 if we couldn't find anything
+ return _CreateVersion('2005', None)
+ else:
+ return _CreateVersion(version, None)
+ return versions[0]
diff --git a/third_party/python/gyp/pylib/gyp/__init__.py b/third_party/python/gyp/pylib/gyp/__init__.py
new file mode 100755
index 0000000000..e038151ba7
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/__init__.py
@@ -0,0 +1,555 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+
+import copy
+import gyp.input
+import optparse
+import os.path
+import re
+import shlex
+import sys
+import traceback
+from gyp.common import GypError
+
+try:
+ # basestring was removed in python3.
+ basestring
+except NameError:
+ basestring = str
+
+# Default debug modes for GYP
+debug = {}
+
+# List of "official" debug modes, but you can use anything you like.
+DEBUG_GENERAL = 'general'
+DEBUG_VARIABLES = 'variables'
+DEBUG_INCLUDES = 'includes'
+
+def DebugOutput(mode, message, *args):
+ if 'all' in gyp.debug or mode in gyp.debug:
+ ctx = ('unknown', 0, 'unknown')
+ try:
+ f = traceback.extract_stack(limit=2)
+ if f:
+ ctx = f[0][:3]
+ except:
+ pass
+ if args:
+ message %= args
+ print('%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]),
+ ctx[1], ctx[2], message))
+
+def FindBuildFiles():
+ extension = '.gyp'
+ files = os.listdir(os.getcwd())
+ build_files = []
+ for file in files:
+ if file.endswith(extension):
+ build_files.append(file)
+ return build_files
+
+
+def Load(build_files, format, default_variables={},
+ includes=[], depth='.', params=None, check=False,
+ circular_check=True, duplicate_basename_check=True):
+ """
+ Loads one or more specified build files.
+ default_variables and includes will be copied before use.
+ Returns the generator for the specified format and the
+ data returned by loading the specified build files.
+ """
+ if params is None:
+ params = {}
+
+ if '-' in format:
+ format, params['flavor'] = format.split('-', 1)
+
+ default_variables = copy.copy(default_variables)
+
+ # Default variables provided by this program and its modules should be
+ # named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
+ # avoiding collisions with user and automatic variables.
+ default_variables['GENERATOR'] = format
+ default_variables['GENERATOR_FLAVOR'] = params.get('flavor', '')
+
+ # Format can be a custom python file, or by default the name of a module
+ # within gyp.generator.
+ if format.endswith('.py'):
+ generator_name = os.path.splitext(format)[0]
+ path, generator_name = os.path.split(generator_name)
+
+ # Make sure the path to the custom generator is in sys.path
+ # Don't worry about removing it once we are done. Keeping the path
+ # to each generator that is used in sys.path is likely harmless and
+ # arguably a good idea.
+ path = os.path.abspath(path)
+ if path not in sys.path:
+ sys.path.insert(0, path)
+ else:
+ generator_name = 'gyp.generator.' + format
+
+ # These parameters are passed in order (as opposed to by key)
+ # because ActivePython cannot handle key parameters to __import__.
+ generator = __import__(generator_name, globals(), locals(), generator_name)
+ for (key, val) in generator.generator_default_variables.items():
+ default_variables.setdefault(key, val)
+
+ # Give the generator the opportunity to set additional variables based on
+ # the params it will receive in the output phase.
+ if getattr(generator, 'CalculateVariables', None):
+ generator.CalculateVariables(default_variables, params)
+
+ # Give the generator the opportunity to set generator_input_info based on
+ # the params it will receive in the output phase.
+ if getattr(generator, 'CalculateGeneratorInputInfo', None):
+ generator.CalculateGeneratorInputInfo(params)
+
+ # Fetch the generator specific info that gets fed to input, we use getattr
+ # so we can default things and the generators only have to provide what
+ # they need.
+ generator_input_info = {
+ 'non_configuration_keys':
+ getattr(generator, 'generator_additional_non_configuration_keys', []),
+ 'path_sections':
+ getattr(generator, 'generator_additional_path_sections', []),
+ 'extra_sources_for_rules':
+ getattr(generator, 'generator_extra_sources_for_rules', []),
+ 'generator_supports_multiple_toolsets':
+ getattr(generator, 'generator_supports_multiple_toolsets', False),
+ 'generator_wants_static_library_dependencies_adjusted':
+ getattr(generator,
+ 'generator_wants_static_library_dependencies_adjusted', True),
+ 'generator_wants_sorted_dependencies':
+ getattr(generator, 'generator_wants_sorted_dependencies', False),
+ 'generator_filelist_paths':
+ getattr(generator, 'generator_filelist_paths', None),
+ }
+
+ # Process the input specific to this generator.
+ result = gyp.input.Load(build_files, default_variables, includes[:],
+ depth, generator_input_info, check, circular_check,
+ duplicate_basename_check,
+ params['parallel'], params['root_targets'])
+ return [generator] + result
+
+def NameValueListToDict(name_value_list):
+ """
+ Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
+ of the pairs. If a string is simply NAME, then the value in the dictionary
+ is set to True. If VALUE can be converted to an integer, it is.
+ """
+ result = { }
+ for item in name_value_list:
+ tokens = item.split('=', 1)
+ if len(tokens) == 2:
+ # If we can make it an int, use that, otherwise, use the string.
+ try:
+ token_value = int(tokens[1])
+ except ValueError:
+ token_value = tokens[1]
+ # Set the variable to the supplied value.
+ result[tokens[0]] = token_value
+ else:
+ # No value supplied, treat it as a boolean and set it.
+ result[tokens[0]] = True
+ return result
+
+def ShlexEnv(env_name):
+ flags = os.environ.get(env_name, [])
+ if flags:
+ flags = shlex.split(flags)
+ return flags
+
+def FormatOpt(opt, value):
+ if opt.startswith('--'):
+ return '%s=%s' % (opt, value)
+ return opt + value
+
+def RegenerateAppendFlag(flag, values, predicate, env_name, options):
+ """Regenerate a list of command line flags, for an option of action='append'.
+
+ The |env_name|, if given, is checked in the environment and used to generate
+ an initial list of options, then the options that were specified on the
+ command line (given in |values|) are appended. This matches the handling of
+ environment variables and command line flags where command line flags override
+ the environment, while not requiring the environment to be set when the flags
+ are used again.
+ """
+ flags = []
+ if options.use_environment and env_name:
+ for flag_value in ShlexEnv(env_name):
+ value = FormatOpt(flag, predicate(flag_value))
+ if value in flags:
+ flags.remove(value)
+ flags.append(value)
+ if values:
+ for flag_value in values:
+ flags.append(FormatOpt(flag, predicate(flag_value)))
+ return flags
+
+def RegenerateFlags(options):
+ """Given a parsed options object, and taking the environment variables into
+ account, returns a list of flags that should regenerate an equivalent options
+ object (even in the absence of the environment variables.)
+
+ Any path options will be normalized relative to depth.
+
+ The format flag is not included, as it is assumed the calling generator will
+ set that as appropriate.
+ """
+ def FixPath(path):
+ path = gyp.common.FixIfRelativePath(path, options.depth)
+ if not path:
+ return os.path.curdir
+ return path
+
+ def Noop(value):
+ return value
+
+ # We always want to ignore the environment when regenerating, to avoid
+ # duplicate or changed flags in the environment at the time of regeneration.
+ flags = ['--ignore-environment']
+ for name, metadata in options._regeneration_metadata.items():
+ opt = metadata['opt']
+ value = getattr(options, name)
+ value_predicate = metadata['type'] == 'path' and FixPath or Noop
+ action = metadata['action']
+ env_name = metadata['env_name']
+ if action == 'append':
+ flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
+ env_name, options))
+ elif action in ('store', None): # None is a synonym for 'store'.
+ if value:
+ flags.append(FormatOpt(opt, value_predicate(value)))
+ elif options.use_environment and env_name and os.environ.get(env_name):
+ flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
+ elif action in ('store_true', 'store_false'):
+ if ((action == 'store_true' and value) or
+ (action == 'store_false' and not value)):
+ flags.append(opt)
+ elif options.use_environment and env_name:
+ print(('Warning: environment regeneration unimplemented '
+ 'for %s flag %r env_name %r' % (action, opt,
+ env_name)),
+ file=sys.stderr)
+ else:
+ print(('Warning: regeneration unimplemented for action %r '
+ 'flag %r' % (action, opt)), file=sys.stderr)
+
+ return flags
+
+class RegeneratableOptionParser(optparse.OptionParser):
+ def __init__(self):
+ self.__regeneratable_options = {}
+ optparse.OptionParser.__init__(self)
+
+ def add_option(self, *args, **kw):
+ """Add an option to the parser.
+
+ This accepts the same arguments as OptionParser.add_option, plus the
+ following:
+ regenerate: can be set to False to prevent this option from being included
+ in regeneration.
+ env_name: name of environment variable that additional values for this
+ option come from.
+ type: adds type='path', to tell the regenerator that the values of
+ this option need to be made relative to options.depth
+ """
+ env_name = kw.pop('env_name', None)
+ if 'dest' in kw and kw.pop('regenerate', True):
+ dest = kw['dest']
+
+ # The path type is needed for regenerating, for optparse we can just treat
+ # it as a string.
+ type = kw.get('type')
+ if type == 'path':
+ kw['type'] = 'string'
+
+ self.__regeneratable_options[dest] = {
+ 'action': kw.get('action'),
+ 'type': type,
+ 'env_name': env_name,
+ 'opt': args[0],
+ }
+
+ optparse.OptionParser.add_option(self, *args, **kw)
+
+ def parse_args(self, *args):
+ values, args = optparse.OptionParser.parse_args(self, *args)
+ values._regeneration_metadata = self.__regeneratable_options
+ return values, args
+
+def gyp_main(args):
+ my_name = os.path.basename(sys.argv[0])
+
+ parser = RegeneratableOptionParser()
+ usage = 'usage: %s [options ...] [build_file ...]'
+ parser.set_usage(usage.replace('%s', '%prog'))
+ parser.add_option('--build', dest='configs', action='append',
+ help='configuration for build after project generation')
+ parser.add_option('--check', dest='check', action='store_true',
+ help='check format of gyp files')
+ parser.add_option('--config-dir', dest='config_dir', action='store',
+ env_name='GYP_CONFIG_DIR', default=None,
+ help='The location for configuration files like '
+ 'include.gypi.')
+ parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
+ action='append', default=[], help='turn on a debugging '
+ 'mode for debugging GYP. Supported modes are "variables", '
+ '"includes" and "general" or "all" for all of them.')
+ parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
+ env_name='GYP_DEFINES',
+ help='sets variable VAR to value VAL')
+ parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
+ help='set DEPTH gyp variable to a relative path to PATH')
+ parser.add_option('-f', '--format', dest='formats', action='append',
+ env_name='GYP_GENERATORS', regenerate=False,
+ help='output formats to generate')
+ parser.add_option('-G', dest='generator_flags', action='append', default=[],
+ metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
+ help='sets generator flag FLAG to VAL')
+ parser.add_option('--generator-output', dest='generator_output',
+ action='store', default=None, metavar='DIR', type='path',
+ env_name='GYP_GENERATOR_OUTPUT',
+ help='puts generated build files under DIR')
+ parser.add_option('--ignore-environment', dest='use_environment',
+ action='store_false', default=True, regenerate=False,
+ help='do not read options from environment variables')
+ parser.add_option('-I', '--include', dest='includes', action='append',
+ metavar='INCLUDE', type='path',
+ help='files to include in all loaded .gyp files')
+ # --no-circular-check disables the check for circular relationships between
+ # .gyp files. These relationships should not exist, but they've only been
+ # observed to be harmful with the Xcode generator. Chromium's .gyp files
+ # currently have some circular relationships on non-Mac platforms, so this
+ # option allows the strict behavior to be used on Macs and the lenient
+ # behavior to be used elsewhere.
+ # TODO(mark): Remove this option when http://crbug.com/35878 is fixed.
+ parser.add_option('--no-circular-check', dest='circular_check',
+ action='store_false', default=True, regenerate=False,
+ help="don't check for circular relationships between files")
+ # --no-duplicate-basename-check disables the check for duplicate basenames
+ # in a static_library/shared_library project. Visual C++ 2008 generator
+ # doesn't support this configuration. Libtool on Mac also generates warnings
+ # when duplicate basenames are passed into Make generator on Mac.
+ # TODO(yukawa): Remove this option when these legacy generators are
+ # deprecated.
+ parser.add_option('--no-duplicate-basename-check',
+ dest='duplicate_basename_check', action='store_false',
+ default=True, regenerate=False,
+ help="don't check for duplicate basenames")
+ parser.add_option('--no-parallel', action='store_true', default=False,
+ help='Disable multiprocessing')
+ parser.add_option('-S', '--suffix', dest='suffix', default='',
+ help='suffix to add to generated files')
+ parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store',
+ default=None, metavar='DIR', type='path',
+ help='directory to use as the root of the source tree')
+ parser.add_option('-R', '--root-target', dest='root_targets',
+ action='append', metavar='TARGET',
+ help='include only TARGET and its deep dependencies')
+
+ options, build_files_arg = parser.parse_args(args)
+ build_files = build_files_arg
+
+ # Set up the configuration directory (defaults to ~/.gyp)
+ if not options.config_dir:
+ home = None
+ home_dot_gyp = None
+ if options.use_environment:
+ home_dot_gyp = os.environ.get('GYP_CONFIG_DIR', None)
+ if home_dot_gyp:
+ home_dot_gyp = os.path.expanduser(home_dot_gyp)
+
+ if not home_dot_gyp:
+ home_vars = ['HOME']
+ if sys.platform in ('cygwin', 'win32'):
+ home_vars.append('USERPROFILE')
+ for home_var in home_vars:
+ home = os.getenv(home_var)
+ if home != None:
+ home_dot_gyp = os.path.join(home, '.gyp')
+ if not os.path.exists(home_dot_gyp):
+ home_dot_gyp = None
+ else:
+ break
+ else:
+ home_dot_gyp = os.path.expanduser(options.config_dir)
+
+ if home_dot_gyp and not os.path.exists(home_dot_gyp):
+ home_dot_gyp = None
+
+ if not options.formats:
+ # If no format was given on the command line, then check the env variable.
+ generate_formats = []
+ if options.use_environment:
+ generate_formats = os.environ.get('GYP_GENERATORS', [])
+ if generate_formats:
+ generate_formats = re.split(r'[\s,]', generate_formats)
+ if generate_formats:
+ options.formats = generate_formats
+ else:
+ # Nothing in the variable, default based on platform.
+ if sys.platform == 'darwin':
+ options.formats = ['xcode']
+ elif sys.platform in ('win32', 'cygwin'):
+ options.formats = ['msvs']
+ else:
+ options.formats = ['make']
+
+ if not options.generator_output and options.use_environment:
+ g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
+ if g_o:
+ options.generator_output = g_o
+
+ options.parallel = not options.no_parallel
+
+ for mode in options.debug:
+ gyp.debug[mode] = 1
+
+ # Do an extra check to avoid work when we're not debugging.
+ if DEBUG_GENERAL in gyp.debug:
+ DebugOutput(DEBUG_GENERAL, 'running with these options:')
+ for option, value in sorted(options.__dict__.items()):
+ if option[0] == '_':
+ continue
+ if isinstance(value, basestring):
+ DebugOutput(DEBUG_GENERAL, " %s: '%s'", option, value)
+ else:
+ DebugOutput(DEBUG_GENERAL, " %s: %s", option, value)
+
+ if not build_files:
+ build_files = FindBuildFiles()
+ if not build_files:
+ raise GypError((usage + '\n\n%s: error: no build_file') %
+ (my_name, my_name))
+
+ # TODO(mark): Chromium-specific hack!
+ # For Chromium, the gyp "depth" variable should always be a relative path
+ # to Chromium's top-level "src" directory. If no depth variable was set
+ # on the command line, try to find a "src" directory by looking at the
+ # absolute path to each build file's directory. The first "src" component
+ # found will be treated as though it were the path used for --depth.
+ if not options.depth:
+ for build_file in build_files:
+ build_file_dir = os.path.abspath(os.path.dirname(build_file))
+ build_file_dir_components = build_file_dir.split(os.path.sep)
+ for component in reversed(build_file_dir_components):
+ if component == 'src':
+ options.depth = os.path.sep.join(build_file_dir_components)
+ break
+ del build_file_dir_components[-1]
+
+ # If the inner loop found something, break without advancing to another
+ # build file.
+ if options.depth:
+ break
+
+ if not options.depth:
+ raise GypError('Could not automatically locate src directory. This is'
+ 'a temporary Chromium feature that will be removed. Use'
+ '--depth as a workaround.')
+
+ # If toplevel-dir is not set, we assume that depth is the root of our source
+ # tree.
+ if not options.toplevel_dir:
+ options.toplevel_dir = options.depth
+
+ # -D on the command line sets variable defaults - D isn't just for define,
+ # it's for default. Perhaps there should be a way to force (-F?) a
+ # variable's value so that it can't be overridden by anything else.
+ cmdline_default_variables = {}
+ defines = []
+ if options.use_environment:
+ defines += ShlexEnv('GYP_DEFINES')
+ if options.defines:
+ defines += options.defines
+ cmdline_default_variables = NameValueListToDict(defines)
+ if DEBUG_GENERAL in gyp.debug:
+ DebugOutput(DEBUG_GENERAL,
+ "cmdline_default_variables: %s", cmdline_default_variables)
+
+ # Set up includes.
+ includes = []
+
+ # If ~/.gyp/include.gypi exists, it'll be forcibly included into every
+ # .gyp file that's loaded, before anything else is included.
+ if home_dot_gyp != None:
+ default_include = os.path.join(home_dot_gyp, 'include.gypi')
+ if os.path.exists(default_include):
+ print('Using overrides found in ' + default_include)
+ includes.append(default_include)
+
+ # Command-line --include files come after the default include.
+ if options.includes:
+ includes.extend(options.includes)
+
+ # Generator flags should be prefixed with the target generator since they
+ # are global across all generator runs.
+ gen_flags = []
+ if options.use_environment:
+ gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS')
+ if options.generator_flags:
+ gen_flags += options.generator_flags
+ generator_flags = NameValueListToDict(gen_flags)
+ if DEBUG_GENERAL in gyp.debug:
+ DebugOutput(DEBUG_GENERAL, "generator_flags: %s", generator_flags)
+
+ # Generate all requested formats (use a set in case we got one format request
+ # twice)
+ for format in set(options.formats):
+ params = {'options': options,
+ 'build_files': build_files,
+ 'generator_flags': generator_flags,
+ 'cwd': os.getcwd(),
+ 'build_files_arg': build_files_arg,
+ 'gyp_binary': sys.argv[0],
+ 'home_dot_gyp': home_dot_gyp,
+ 'parallel': options.parallel,
+ 'root_targets': options.root_targets,
+ 'target_arch': cmdline_default_variables.get('target_arch', '')}
+
+ # Start with the default variables from the command line.
+ [generator, flat_list, targets, data] = Load(
+ build_files, format, cmdline_default_variables, includes, options.depth,
+ params, options.check, options.circular_check,
+ options.duplicate_basename_check)
+
+ # TODO(mark): Pass |data| for now because the generator needs a list of
+ # build files that came in. In the future, maybe it should just accept
+ # a list, and not the whole data dict.
+ # NOTE: flat_list is the flattened dependency graph specifying the order
+ # that targets may be built. Build systems that operate serially or that
+ # need to have dependencies defined before dependents reference them should
+ # generate targets in the order specified in flat_list.
+ generator.GenerateOutput(flat_list, targets, data, params)
+
+ if options.configs:
+ valid_configs = targets[flat_list[0]]['configurations']
+ for conf in options.configs:
+ if conf not in valid_configs:
+ raise GypError('Invalid config specified via --build: %s' % conf)
+ generator.PerformBuild(data, options.configs, params)
+
+ # Done
+ return 0
+
+
+def main(args):
+ try:
+ return gyp_main(args)
+ except GypError as e:
+ sys.stderr.write("gyp: %s\n" % e)
+ return 1
+
+# NOTE: setuptools generated console_scripts calls function with no arguments
+def script_main():
+ return main(sys.argv[1:])
+
+if __name__ == '__main__':
+ sys.exit(script_main())
diff --git a/third_party/python/gyp/pylib/gyp/common.py b/third_party/python/gyp/pylib/gyp/common.py
new file mode 100644
index 0000000000..937f894879
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/common.py
@@ -0,0 +1,608 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import with_statement
+
+import errno
+import filecmp
+import os.path
+import re
+import tempfile
+import sys
+
+from six.moves import collections_abc
+
+
+# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
+# among other "problems".
+class memoize(object):
+ def __init__(self, func):
+ self.func = func
+ self.cache = {}
+ def __call__(self, *args):
+ try:
+ return self.cache[args]
+ except KeyError:
+ result = self.func(*args)
+ self.cache[args] = result
+ return result
+
+
+class GypError(Exception):
+ """Error class representing an error, which is to be presented
+ to the user. The main entry point will catch and display this.
+ """
+ pass
+
+
+def ExceptionAppend(e, msg):
+ """Append a message to the given exception's message."""
+ if not e.args:
+ e.args = (msg,)
+ elif len(e.args) == 1:
+ e.args = (str(e.args[0]) + ' ' + msg,)
+ else:
+ e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
+
+
+def FindQualifiedTargets(target, qualified_list):
+ """
+ Given a list of qualified targets, return the qualified targets for the
+ specified |target|.
+ """
+ return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
+
+
+def ParseQualifiedTarget(target):
+ # Splits a qualified target into a build file, target name and toolset.
+
+ # NOTE: rsplit is used to disambiguate the Windows drive letter separator.
+ target_split = target.rsplit(':', 1)
+ if len(target_split) == 2:
+ [build_file, target] = target_split
+ else:
+ build_file = None
+
+ target_split = target.rsplit('#', 1)
+ if len(target_split) == 2:
+ [target, toolset] = target_split
+ else:
+ toolset = None
+
+ return [build_file, target, toolset]
+
+
+def ResolveTarget(build_file, target, toolset):
+ # This function resolves a target into a canonical form:
+ # - a fully defined build file, either absolute or relative to the current
+ # directory
+ # - a target name
+ # - a toolset
+ #
+ # build_file is the file relative to which 'target' is defined.
+ # target is the qualified target.
+ # toolset is the default toolset for that target.
+ [parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
+
+ if parsed_build_file:
+ if build_file:
+ # If a relative path, parsed_build_file is relative to the directory
+ # containing build_file. If build_file is not in the current directory,
+ # parsed_build_file is not a usable path as-is. Resolve it by
+ # interpreting it as relative to build_file. If parsed_build_file is
+ # absolute, it is usable as a path regardless of the current directory,
+ # and os.path.join will return it as-is.
+ build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
+ parsed_build_file))
+ # Further (to handle cases like ../cwd), make it relative to cwd)
+ if not os.path.isabs(build_file):
+ build_file = RelativePath(build_file, '.')
+ else:
+ build_file = parsed_build_file
+
+ if parsed_toolset:
+ toolset = parsed_toolset
+
+ return [build_file, target, toolset]
+
+
+def BuildFile(fully_qualified_target):
+ # Extracts the build file from the fully qualified target.
+ return ParseQualifiedTarget(fully_qualified_target)[0]
+
+
+def GetEnvironFallback(var_list, default):
+ """Look up a key in the environment, with fallback to secondary keys
+ and finally falling back to a default value."""
+ for var in var_list:
+ if var in os.environ:
+ return os.environ[var]
+ return default
+
+
+def QualifiedTarget(build_file, target, toolset):
+ # "Qualified" means the file that a target was defined in and the target
+ # name, separated by a colon, suffixed by a # and the toolset name:
+ # /path/to/file.gyp:target_name#toolset
+ fully_qualified = build_file + ':' + target
+ if toolset:
+ fully_qualified = fully_qualified + '#' + toolset
+ return fully_qualified
+
+
+@memoize
+def RelativePath(path, relative_to, follow_path_symlink=True):
+ # Assuming both |path| and |relative_to| are relative to the current
+ # directory, returns a relative path that identifies path relative to
+ # relative_to.
+ # If |follow_symlink_path| is true (default) and |path| is a symlink, then
+ # this method returns a path to the real file represented by |path|. If it is
+ # false, this method returns a path to the symlink. If |path| is not a
+ # symlink, this option has no effect.
+
+ # Convert to normalized (and therefore absolute paths).
+ if follow_path_symlink:
+ path = os.path.realpath(path)
+ else:
+ path = os.path.abspath(path)
+ relative_to = os.path.realpath(relative_to)
+
+ # On Windows, we can't create a relative path to a different drive, so just
+ # use the absolute path.
+ if sys.platform == 'win32':
+ if (os.path.splitdrive(path)[0].lower() !=
+ os.path.splitdrive(relative_to)[0].lower()):
+ return path
+
+ relative = os.path.relpath(path, relative_to)
+ if relative == os.path.curdir:
+ # The paths were the same.
+ return ''
+
+ return relative
+
+
+@memoize
+def InvertRelativePath(path, toplevel_dir=None):
+ """Given a path like foo/bar that is relative to toplevel_dir, return
+ the inverse relative path back to the toplevel_dir.
+
+ E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
+ should always produce the empty string, unless the path contains symlinks.
+ """
+ if not path:
+ return path
+ toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
+ return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
+
+
+def FixIfRelativePath(path, relative_to):
+ # Like RelativePath but returns |path| unchanged if it is absolute.
+ if os.path.isabs(path):
+ return path
+ return RelativePath(path, relative_to)
+
+
+def UnrelativePath(path, relative_to):
+ # Assuming that |relative_to| is relative to the current directory, and |path|
+ # is a path relative to the dirname of |relative_to|, returns a path that
+ # identifies |path| relative to the current directory.
+ rel_dir = os.path.dirname(relative_to)
+ return os.path.normpath(os.path.join(rel_dir, path))
+
+
+# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
+# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
+# and the documentation for various shells.
+
+# _quote is a pattern that should match any argument that needs to be quoted
+# with double-quotes by EncodePOSIXShellArgument. It matches the following
+# characters appearing anywhere in an argument:
+# \t, \n, space parameter separators
+# # comments
+# $ expansions (quoted to always expand within one argument)
+# % called out by IEEE 1003.1 XCU.2.2
+# & job control
+# ' quoting
+# (, ) subshell execution
+# *, ?, [ pathname expansion
+# ; command delimiter
+# <, >, | redirection
+# = assignment
+# {, } brace expansion (bash)
+# ~ tilde expansion
+# It also matches the empty string, because "" (or '') is the only way to
+# represent an empty string literal argument to a POSIX shell.
+#
+# This does not match the characters in _escape, because those need to be
+# backslash-escaped regardless of whether they appear in a double-quoted
+# string.
+_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
+
+# _escape is a pattern that should match any character that needs to be
+# escaped with a backslash, whether or not the argument matched the _quote
+# pattern. _escape is used with re.sub to backslash anything in _escape's
+# first match group, hence the (parentheses) in the regular expression.
+#
+# _escape matches the following characters appearing anywhere in an argument:
+# " to prevent POSIX shells from interpreting this character for quoting
+# \ to prevent POSIX shells from interpreting this character for escaping
+# ` to prevent POSIX shells from interpreting this character for command
+# substitution
+# Missing from this list is $, because the desired behavior of
+# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
+#
+# Also missing from this list is !, which bash will interpret as the history
+# expansion character when history is enabled. bash does not enable history
+# by default in non-interactive shells, so this is not thought to be a problem.
+# ! was omitted from this list because bash interprets "\!" as a literal string
+# including the backslash character (avoiding history expansion but retaining
+# the backslash), which would not be correct for argument encoding. Handling
+# this case properly would also be problematic because bash allows the history
+# character to be changed with the histchars shell variable. Fortunately,
+# as history is not enabled in non-interactive shells and
+# EncodePOSIXShellArgument is only expected to encode for non-interactive
+# shells, there is no room for error here by ignoring !.
+_escape = re.compile(r'(["\\`])')
+
+def EncodePOSIXShellArgument(argument):
+ """Encodes |argument| suitably for consumption by POSIX shells.
+
+ argument may be quoted and escaped as necessary to ensure that POSIX shells
+ treat the returned value as a literal representing the argument passed to
+ this function. Parameter (variable) expansions beginning with $ are allowed
+ to remain intact without escaping the $, to allow the argument to contain
+ references to variables to be expanded by the shell.
+ """
+
+ if not isinstance(argument, str):
+ argument = str(argument)
+
+ if _quote.search(argument):
+ quote = '"'
+ else:
+ quote = ''
+
+ encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
+
+ return encoded
+
+
+def EncodePOSIXShellList(list):
+ """Encodes |list| suitably for consumption by POSIX shells.
+
+ Returns EncodePOSIXShellArgument for each item in list, and joins them
+ together using the space character as an argument separator.
+ """
+
+ encoded_arguments = []
+ for argument in list:
+ encoded_arguments.append(EncodePOSIXShellArgument(argument))
+ return ' '.join(encoded_arguments)
+
+
+def DeepDependencyTargets(target_dicts, roots):
+ """Returns the recursive list of target dependencies."""
+ dependencies = set()
+ pending = set(roots)
+ while pending:
+ # Pluck out one.
+ r = pending.pop()
+ # Skip if visited already.
+ if r in dependencies:
+ continue
+ # Add it.
+ dependencies.add(r)
+ # Add its children.
+ spec = target_dicts[r]
+ pending.update(set(spec.get('dependencies', [])))
+ pending.update(set(spec.get('dependencies_original', [])))
+ return list(dependencies - set(roots))
+
+
+def BuildFileTargets(target_list, build_file):
+ """From a target_list, returns the subset from the specified build_file.
+ """
+ return [p for p in target_list if BuildFile(p) == build_file]
+
+
+def AllTargets(target_list, target_dicts, build_file):
+ """Returns all targets (direct and dependencies) for the specified build_file.
+ """
+ bftargets = BuildFileTargets(target_list, build_file)
+ deptargets = DeepDependencyTargets(target_dicts, bftargets)
+ return bftargets + deptargets
+
+
+def WriteOnDiff(filename):
+ """Write to a file only if the new contents differ.
+
+ Arguments:
+ filename: name of the file to potentially write to.
+ Returns:
+ A file like object which will write to temporary file and only overwrite
+ the target if it differs (on close).
+ """
+
+ class Writer(object):
+ """Wrapper around file which only covers the target if it differs."""
+ def __init__(self):
+ # Pick temporary file.
+ tmp_fd, self.tmp_path = tempfile.mkstemp(
+ suffix='.tmp',
+ prefix=os.path.split(filename)[1] + '.gyp.',
+ dir=os.path.split(filename)[0])
+ try:
+ self.tmp_file = os.fdopen(tmp_fd, 'w')
+ except Exception:
+ # Don't leave turds behind.
+ os.unlink(self.tmp_path)
+ raise
+
+ def __getattr__(self, attrname):
+ # Delegate everything else to self.tmp_file
+ return getattr(self.tmp_file, attrname)
+
+ def close(self):
+ try:
+ # Close tmp file.
+ self.tmp_file.close()
+ # Determine if different.
+ same = False
+ try:
+ same = filecmp.cmp(self.tmp_path, filename, False)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ if same:
+ # The new file is identical to the old one, just get rid of the new
+ # one.
+ os.unlink(self.tmp_path)
+ else:
+ # The new file is different from the old one, or there is no old one.
+ # Rename the new file to the permanent name.
+ #
+ # tempfile.mkstemp uses an overly restrictive mode, resulting in a
+ # file that can only be read by the owner, regardless of the umask.
+ # There's no reason to not respect the umask here, which means that
+ # an extra hoop is required to fetch it and reset the new file's mode.
+ #
+ # No way to get the umask without setting a new one? Set a safe one
+ # and then set it back to the old value.
+ umask = os.umask(0o77)
+ os.umask(umask)
+ os.chmod(self.tmp_path, 0o666 & ~umask)
+ if sys.platform == 'win32' and os.path.exists(filename):
+ # NOTE: on windows (but not cygwin) rename will not replace an
+ # existing file, so it must be preceded with a remove. Sadly there
+ # is no way to make the switch atomic.
+ os.remove(filename)
+ os.rename(self.tmp_path, filename)
+ except Exception:
+ # Don't leave turds behind.
+ os.unlink(self.tmp_path)
+ raise
+
+ return Writer()
+
+
+def EnsureDirExists(path):
+ """Make sure the directory for |path| exists."""
+ try:
+ os.makedirs(os.path.dirname(path))
+ except OSError:
+ pass
+
+
+def GetFlavor(params):
+ """Returns |params.flavor| if it's set, the system's default flavor else."""
+ flavors = {
+ 'cygwin': 'win',
+ 'win32': 'win',
+ 'darwin': 'mac',
+ }
+
+ if 'flavor' in params:
+ return params['flavor']
+ if sys.platform in flavors:
+ return flavors[sys.platform]
+ if sys.platform.startswith('sunos'):
+ return 'solaris'
+ if sys.platform.startswith('freebsd'):
+ return 'freebsd'
+ if sys.platform.startswith('openbsd'):
+ return 'openbsd'
+ if sys.platform.startswith('netbsd'):
+ return 'netbsd'
+ if sys.platform.startswith('aix'):
+ return 'aix'
+ if sys.platform.startswith('zos'):
+ return 'zos'
+ if sys.platform.startswith('os390'):
+ return 'zos'
+
+ return 'linux'
+
+
+def CopyTool(flavor, out_path, generator_flags={}):
+ """Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
+ to |out_path|."""
+ # aix and solaris just need flock emulation. mac and win use more complicated
+ # support scripts.
+ prefix = {
+ 'aix': 'flock',
+ 'solaris': 'flock',
+ 'mac': 'mac',
+ 'win': 'win'
+ }.get(flavor, None)
+ if not prefix:
+ return
+
+ # Slurp input file.
+ source_path = os.path.join(
+ os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
+ with open(source_path) as source_file:
+ source = source_file.readlines()
+
+ # Set custom header flags.
+ header = '# Generated by gyp. Do not edit.\n'
+ mac_toolchain_dir = generator_flags.get('mac_toolchain_dir', None)
+ if flavor == 'mac' and mac_toolchain_dir:
+ header += "import os;\nos.environ['DEVELOPER_DIR']='%s'\n" \
+ % mac_toolchain_dir
+
+ # Add header and write it out.
+ tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
+ with open(tool_path, 'w') as tool_file:
+ tool_file.write(
+ ''.join([source[0], header] + source[1:]))
+
+ # Make file executable.
+ os.chmod(tool_path, 0o755)
+
+
+# From Alex Martelli,
+# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
+# ASPN: Python Cookbook: Remove duplicates from a sequence
+# First comment, dated 2001/10/13.
+# (Also in the printed Python Cookbook.)
+
+def uniquer(seq, idfun=None):
+ if idfun is None:
+ idfun = lambda x: x
+ seen = {}
+ result = []
+ for item in seq:
+ marker = idfun(item)
+ if marker in seen: continue
+ seen[marker] = 1
+ result.append(item)
+ return result
+
+
+# Based on http://code.activestate.com/recipes/576694/.
+class OrderedSet(collections_abc.MutableSet):
+ def __init__(self, iterable=None):
+ self.end = end = []
+ end += [None, end, end] # sentinel node for doubly linked list
+ self.map = {} # key --> [key, prev, next]
+ if iterable is not None:
+ self |= iterable
+
+ def __len__(self):
+ return len(self.map)
+
+ def __contains__(self, key):
+ return key in self.map
+
+ def add(self, key):
+ if key not in self.map:
+ end = self.end
+ curr = end[1]
+ curr[2] = end[1] = self.map[key] = [key, curr, end]
+
+ def discard(self, key):
+ if key in self.map:
+ key, prev_item, next_item = self.map.pop(key)
+ prev_item[2] = next_item
+ next_item[1] = prev_item
+
+ def __iter__(self):
+ end = self.end
+ curr = end[2]
+ while curr is not end:
+ yield curr[0]
+ curr = curr[2]
+
+ def __reversed__(self):
+ end = self.end
+ curr = end[1]
+ while curr is not end:
+ yield curr[0]
+ curr = curr[1]
+
+ # The second argument is an addition that causes a pylint warning.
+ def pop(self, last=True): # pylint: disable=W0221
+ if not self:
+ raise KeyError('set is empty')
+ key = self.end[1][0] if last else self.end[2][0]
+ self.discard(key)
+ return key
+
+ def __repr__(self):
+ if not self:
+ return '%s()' % (self.__class__.__name__,)
+ return '%s(%r)' % (self.__class__.__name__, list(self))
+
+ def __eq__(self, other):
+ if isinstance(other, OrderedSet):
+ return len(self) == len(other) and list(self) == list(other)
+ return set(self) == set(other)
+
+ # Extensions to the recipe.
+ def update(self, iterable):
+ for i in iterable:
+ if i not in self:
+ self.add(i)
+
+
+class CycleError(Exception):
+ """An exception raised when an unexpected cycle is detected."""
+ def __init__(self, nodes):
+ self.nodes = nodes
+ def __str__(self):
+ return 'CycleError: cycle involving: ' + str(self.nodes)
+
+
+def TopologicallySorted(graph, get_edges):
+ r"""Topologically sort based on a user provided edge definition.
+
+ Args:
+ graph: A list of node names.
+ get_edges: A function mapping from node name to a hashable collection
+ of node names which this node has outgoing edges to.
+ Returns:
+ A list containing all of the node in graph in topological order.
+ It is assumed that calling get_edges once for each node and caching is
+ cheaper than repeatedly calling get_edges.
+ Raises:
+ CycleError in the event of a cycle.
+ Example:
+ graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
+ def GetEdges(node):
+ return re.findall(r'\$\(([^))]\)', graph[node])
+ print(TopologicallySorted(graph.keys(), GetEdges))
+ ==>
+ ['a', 'c', b']
+ """
+ get_edges = memoize(get_edges)
+ visited = set()
+ visiting = set()
+ ordered_nodes = []
+ def Visit(node):
+ if node in visiting:
+ raise CycleError(visiting)
+ if node in visited:
+ return
+ visited.add(node)
+ visiting.add(node)
+ for neighbor in get_edges(node):
+ Visit(neighbor)
+ visiting.remove(node)
+ ordered_nodes.insert(0, node)
+ for node in sorted(graph):
+ Visit(node)
+ return ordered_nodes
+
+def CrossCompileRequested():
+ # TODO: figure out how to not build extra host objects in the
+ # non-cross-compile case when this is enabled, and enable unconditionally.
+ return (os.environ.get('GYP_CROSSCOMPILE') or
+ os.environ.get('AR_host') or
+ os.environ.get('CC_host') or
+ os.environ.get('CXX_host') or
+ os.environ.get('AR_target') or
+ os.environ.get('CC_target') or
+ os.environ.get('CXX_target'))
diff --git a/third_party/python/gyp/pylib/gyp/common_test.py b/third_party/python/gyp/pylib/gyp/common_test.py
new file mode 100755
index 0000000000..0b8ada3dc3
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/common_test.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unit tests for the common.py file."""
+
+import gyp.common
+import unittest
+import sys
+
+
+class TestTopologicallySorted(unittest.TestCase):
+ def test_Valid(self):
+ """Test that sorting works on a valid graph with one possible order."""
+ graph = {
+ 'a': ['b', 'c'],
+ 'b': [],
+ 'c': ['d'],
+ 'd': ['b'],
+ }
+ def GetEdge(node):
+ return tuple(graph[node])
+ self.assertEqual(
+ gyp.common.TopologicallySorted(graph.keys(), GetEdge),
+ ['a', 'c', 'd', 'b'])
+
+ def test_Cycle(self):
+ """Test that an exception is thrown on a cyclic graph."""
+ graph = {
+ 'a': ['b'],
+ 'b': ['c'],
+ 'c': ['d'],
+ 'd': ['a'],
+ }
+ def GetEdge(node):
+ return tuple(graph[node])
+ self.assertRaises(
+ gyp.common.CycleError, gyp.common.TopologicallySorted,
+ graph.keys(), GetEdge)
+
+
+class TestGetFlavor(unittest.TestCase):
+ """Test that gyp.common.GetFlavor works as intended"""
+ original_platform = ''
+
+ def setUp(self):
+ self.original_platform = sys.platform
+
+ def tearDown(self):
+ sys.platform = self.original_platform
+
+ def assertFlavor(self, expected, argument, param):
+ sys.platform = argument
+ self.assertEqual(expected, gyp.common.GetFlavor(param))
+
+ def test_platform_default(self):
+ self.assertFlavor('freebsd', 'freebsd9' , {})
+ self.assertFlavor('freebsd', 'freebsd10', {})
+ self.assertFlavor('openbsd', 'openbsd5' , {})
+ self.assertFlavor('solaris', 'sunos5' , {});
+ self.assertFlavor('solaris', 'sunos' , {});
+ self.assertFlavor('linux' , 'linux2' , {});
+ self.assertFlavor('linux' , 'linux3' , {});
+ self.assertFlavor('linux' , 'linux' , {});
+
+ def test_param(self):
+ self.assertFlavor('foobar', 'linux2' , {'flavor': 'foobar'})
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/python/gyp/pylib/gyp/easy_xml.py b/third_party/python/gyp/pylib/gyp/easy_xml.py
new file mode 100644
index 0000000000..2de51e25fb
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/easy_xml.py
@@ -0,0 +1,170 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+import os
+import locale
+import sys
+
+try:
+ # reduce moved to functools in python3.
+ reduce
+except NameError:
+ from functools import reduce
+
+def XmlToString(content, encoding='utf-8', pretty=False):
+ """ Writes the XML content to disk, touching the file only if it has changed.
+
+ Visual Studio files have a lot of pre-defined structures. This function makes
+ it easy to represent these structures as Python data structures, instead of
+ having to create a lot of function calls.
+
+ Each XML element of the content is represented as a list composed of:
+ 1. The name of the element, a string,
+ 2. The attributes of the element, a dictionary (optional), and
+ 3+. The content of the element, if any. Strings are simple text nodes and
+ lists are child elements.
+
+ Example 1:
+ <test/>
+ becomes
+ ['test']
+
+ Example 2:
+ <myelement a='value1' b='value2'>
+ <childtype>This is</childtype>
+ <childtype>it!</childtype>
+ </myelement>
+
+ becomes
+ ['myelement', {'a':'value1', 'b':'value2'},
+ ['childtype', 'This is'],
+ ['childtype', 'it!'],
+ ]
+
+ Args:
+ content: The structured content to be converted.
+ encoding: The encoding to report on the first XML line.
+ pretty: True if we want pretty printing with indents and new lines.
+
+ Returns:
+ The XML content as a string.
+ """
+ # We create a huge list of all the elements of the file.
+ xml_parts = ['<?xml version="1.0" encoding="%s"?>' % encoding]
+ if pretty:
+ xml_parts.append('\n')
+ _ConstructContentList(xml_parts, content, pretty)
+
+ # Convert it to a string
+ return ''.join(xml_parts)
+
+
+def _ConstructContentList(xml_parts, specification, pretty, level=0):
+ """ Appends the XML parts corresponding to the specification.
+
+ Args:
+ xml_parts: A list of XML parts to be appended to.
+ specification: The specification of the element. See EasyXml docs.
+ pretty: True if we want pretty printing with indents and new lines.
+ level: Indentation level.
+ """
+ # The first item in a specification is the name of the element.
+ if pretty:
+ indentation = ' ' * level
+ new_line = '\n'
+ else:
+ indentation = ''
+ new_line = ''
+ name = specification[0]
+ if not isinstance(name, str):
+ raise Exception('The first item of an EasyXml specification should be '
+ 'a string. Specification was ' + str(specification))
+ xml_parts.append(indentation + '<' + name)
+
+ # Optionally in second position is a dictionary of the attributes.
+ rest = specification[1:]
+ if rest and isinstance(rest[0], dict):
+ for at, val in sorted(rest[0].items()):
+ xml_parts.append(' %s="%s"' % (at, _XmlEscape(val, attr=True)))
+ rest = rest[1:]
+ if rest:
+ xml_parts.append('>')
+ all_strings = reduce(lambda x, y: x and isinstance(y, str), rest, True)
+ multi_line = not all_strings
+ if multi_line and new_line:
+ xml_parts.append(new_line)
+ for child_spec in rest:
+ # If it's a string, append a text node.
+ # Otherwise recurse over that child definition
+ if isinstance(child_spec, str):
+ xml_parts.append(_XmlEscape(child_spec))
+ else:
+ _ConstructContentList(xml_parts, child_spec, pretty, level + 1)
+ if multi_line and indentation:
+ xml_parts.append(indentation)
+ xml_parts.append('</%s>%s' % (name, new_line))
+ else:
+ xml_parts.append('/>%s' % new_line)
+
+
+def WriteXmlIfChanged(content, path, encoding='utf-8', pretty=False,
+ win32=False):
+ """ Writes the XML content to disk, touching the file only if it has changed.
+
+ Args:
+ content: The structured content to be written.
+ path: Location of the file.
+ encoding: The encoding to report on the first line of the XML file.
+ pretty: True if we want pretty printing with indents and new lines.
+ """
+ xml_string = XmlToString(content, encoding, pretty)
+ if win32 and os.linesep != '\r\n':
+ xml_string = xml_string.replace('\n', '\r\n')
+ default_encoding = locale.getdefaultlocale()[1]
+ if default_encoding and default_encoding.upper() != encoding.upper():
+ try:
+ xml_string = xml_string.decode(default_encoding).encode(encoding)
+ except AttributeError:
+ pass
+
+ # Get the old content
+ try:
+ f = open(path, 'r')
+ existing = f.read()
+ f.close()
+ except:
+ existing = None
+
+ # It has changed, write it
+ if existing != xml_string:
+ f = open(path, 'w')
+ f.write(xml_string)
+ f.close()
+
+
+_xml_escape_map = {
+ '"': '&quot;',
+ "'": '&apos;',
+ '<': '&lt;',
+ '>': '&gt;',
+ '&': '&amp;',
+ '\n': '&#xA;',
+ '\r': '&#xD;',
+}
+
+
+_xml_escape_re = re.compile(
+ "(%s)" % "|".join(map(re.escape, _xml_escape_map.keys())))
+
+
+def _XmlEscape(value, attr=False):
+ """ Escape a string for inclusion in XML."""
+ def replace(match):
+ m = match.string[match.start() : match.end()]
+ # don't replace single quotes in attrs
+ if attr and m == "'":
+ return m
+ return _xml_escape_map[m]
+ return _xml_escape_re.sub(replace, value)
diff --git a/third_party/python/gyp/pylib/gyp/easy_xml_test.py b/third_party/python/gyp/pylib/gyp/easy_xml_test.py
new file mode 100755
index 0000000000..c3be446417
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/easy_xml_test.py
@@ -0,0 +1,108 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+""" Unit tests for the easy_xml.py file. """
+
+import gyp.easy_xml as easy_xml
+import unittest
+try:
+ from StringIO import StringIO
+except ImportError:
+ from io import StringIO
+
+
+class TestSequenceFunctions(unittest.TestCase):
+
+ def setUp(self):
+ self.stderr = StringIO()
+
+ def test_EasyXml_simple(self):
+ self.assertEqual(
+ easy_xml.XmlToString(['test']),
+ '<?xml version="1.0" encoding="utf-8"?><test/>')
+
+ self.assertEqual(
+ easy_xml.XmlToString(['test'], encoding='Windows-1252'),
+ '<?xml version="1.0" encoding="Windows-1252"?><test/>')
+
+ def test_EasyXml_simple_with_attributes(self):
+ self.assertEqual(
+ easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
+ '<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
+
+ def test_EasyXml_escaping(self):
+ original = '<test>\'"\r&\nfoo'
+ converted = '&lt;test&gt;\'&quot;&#xD;&amp;&#xA;foo'
+ converted_apos = converted.replace("'", '&apos;')
+ self.assertEqual(
+ easy_xml.XmlToString(['test3', {'a': original}, original]),
+ '<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
+ (converted, converted_apos))
+
+ def test_EasyXml_pretty(self):
+ self.assertEqual(
+ easy_xml.XmlToString(
+ ['test3',
+ ['GrandParent',
+ ['Parent1',
+ ['Child']
+ ],
+ ['Parent2']
+ ]
+ ],
+ pretty=True),
+ '<?xml version="1.0" encoding="utf-8"?>\n'
+ '<test3>\n'
+ ' <GrandParent>\n'
+ ' <Parent1>\n'
+ ' <Child/>\n'
+ ' </Parent1>\n'
+ ' <Parent2/>\n'
+ ' </GrandParent>\n'
+ '</test3>\n')
+
+
+ def test_EasyXml_complex(self):
+ # We want to create:
+ target = (
+ '<?xml version="1.0" encoding="utf-8"?>'
+ '<Project>'
+ '<PropertyGroup Label="Globals">'
+ '<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
+ '<Keyword>Win32Proj</Keyword>'
+ '<RootNamespace>automated_ui_tests</RootNamespace>'
+ '</PropertyGroup>'
+ '<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
+ '<PropertyGroup '
+ 'Condition="\'$(Configuration)|$(Platform)\'=='
+ '\'Debug|Win32\'" Label="Configuration">'
+ '<ConfigurationType>Application</ConfigurationType>'
+ '<CharacterSet>Unicode</CharacterSet>'
+ '<SpectreMitigation>SpectreLoadCF</SpectreMitigation>'
+ '</PropertyGroup>'
+ '</Project>')
+
+ xml = easy_xml.XmlToString(
+ ['Project',
+ ['PropertyGroup', {'Label': 'Globals'},
+ ['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
+ ['Keyword', 'Win32Proj'],
+ ['RootNamespace', 'automated_ui_tests']
+ ],
+ ['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
+ ['PropertyGroup',
+ {'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
+ 'Label': 'Configuration'},
+ ['ConfigurationType', 'Application'],
+ ['CharacterSet', 'Unicode'],
+ ['SpectreMitigation', 'SpectreLoadCF']
+ ]
+ ])
+ self.assertEqual(xml, target)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/python/gyp/pylib/gyp/flock_tool.py b/third_party/python/gyp/pylib/gyp/flock_tool.py
new file mode 100755
index 0000000000..81fb79d136
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/flock_tool.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""These functions are executed via gyp-flock-tool when using the Makefile
+generator. Used on systems that don't have a built-in flock."""
+
+import fcntl
+import os
+import struct
+import subprocess
+import sys
+
+
+def main(args):
+ executor = FlockTool()
+ executor.Dispatch(args)
+
+
+class FlockTool(object):
+ """This class emulates the 'flock' command."""
+ def Dispatch(self, args):
+ """Dispatches a string command to a method."""
+ if len(args) < 1:
+ raise Exception("Not enough arguments")
+
+ method = "Exec%s" % self._CommandifyName(args[0])
+ getattr(self, method)(*args[1:])
+
+ def _CommandifyName(self, name_string):
+ """Transforms a tool name like copy-info-plist to CopyInfoPlist"""
+ return name_string.title().replace('-', '')
+
+ def ExecFlock(self, lockfile, *cmd_list):
+ """Emulates the most basic behavior of Linux's flock(1)."""
+ # Rely on exception handling to report errors.
+ # Note that the stock python on SunOS has a bug
+ # where fcntl.flock(fd, LOCK_EX) always fails
+ # with EBADF, that's why we use this F_SETLK
+ # hack instead.
+ fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
+ if sys.platform.startswith('aix'):
+ # Python on AIX is compiled with LARGEFILE support, which changes the
+ # struct size.
+ op = struct.pack('hhIllqq', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
+ else:
+ op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
+ fcntl.fcntl(fd, fcntl.F_SETLK, op)
+ return subprocess.call(cmd_list)
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/third_party/python/gyp/pylib/gyp/generator/__init__.py b/third_party/python/gyp/pylib/gyp/generator/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/generator/__init__.py
diff --git a/third_party/python/gyp/pylib/gyp/generator/analyzer.py b/third_party/python/gyp/pylib/gyp/generator/analyzer.py
new file mode 100644
index 0000000000..b3484dcb1b
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/generator/analyzer.py
@@ -0,0 +1,744 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+This script is intended for use as a GYP_GENERATOR. It takes as input (by way of
+the generator flag config_path) the path of a json file that dictates the files
+and targets to search for. The following keys are supported:
+files: list of paths (relative) of the files to search for.
+test_targets: unqualified target names to search for. Any target in this list
+that depends upon a file in |files| is output regardless of the type of target
+or chain of dependencies.
+additional_compile_targets: Unqualified targets to search for in addition to
+test_targets. Targets in the combined list that depend upon a file in |files|
+are not necessarily output. For example, if the target is of type none then the
+target is not output (but one of the descendants of the target will be).
+
+The following is output:
+error: only supplied if there is an error.
+compile_targets: minimal set of targets that directly or indirectly (for
+ targets of type none) depend on the files in |files| and is one of the
+ supplied targets or a target that one of the supplied targets depends on.
+ The expectation is this set of targets is passed into a build step. This list
+ always contains the output of test_targets as well.
+test_targets: set of targets from the supplied |test_targets| that either
+ directly or indirectly depend upon a file in |files|. This list if useful
+ if additional processing needs to be done for certain targets after the
+ build, such as running tests.
+status: outputs one of three values: none of the supplied files were found,
+ one of the include files changed so that it should be assumed everything
+ changed (in this case test_targets and compile_targets are not output) or at
+ least one file was found.
+invalid_targets: list of supplied targets that were not found.
+
+Example:
+Consider a graph like the following:
+ A D
+ / \
+B C
+A depends upon both B and C, A is of type none and B and C are executables.
+D is an executable, has no dependencies and nothing depends on it.
+If |additional_compile_targets| = ["A"], |test_targets| = ["B", "C"] and
+files = ["b.cc", "d.cc"] (B depends upon b.cc and D depends upon d.cc), then
+the following is output:
+|compile_targets| = ["B"] B must built as it depends upon the changed file b.cc
+and the supplied target A depends upon it. A is not output as a build_target
+as it is of type none with no rules and actions.
+|test_targets| = ["B"] B directly depends upon the change file b.cc.
+
+Even though the file d.cc, which D depends upon, has changed D is not output
+as it was not supplied by way of |additional_compile_targets| or |test_targets|.
+
+If the generator flag analyzer_output_path is specified, output is written
+there. Otherwise output is written to stdout.
+
+In Gyp the "all" target is shorthand for the root targets in the files passed
+to gyp. For example, if file "a.gyp" contains targets "a1" and
+"a2", and file "b.gyp" contains targets "b1" and "b2" and "a2" has a dependency
+on "b2" and gyp is supplied "a.gyp" then "all" consists of "a1" and "a2".
+Notice that "b1" and "b2" are not in the "all" target as "b.gyp" was not
+directly supplied to gyp. OTOH if both "a.gyp" and "b.gyp" are supplied to gyp
+then the "all" target includes "b1" and "b2".
+"""
+
+from __future__ import print_function
+
+import gyp.common
+import gyp.ninja_syntax as ninja_syntax
+import json
+import os
+import posixpath
+import sys
+
+debug = False
+
+found_dependency_string = 'Found dependency'
+no_dependency_string = 'No dependencies'
+# Status when it should be assumed that everything has changed.
+all_changed_string = 'Found dependency (all)'
+
+# MatchStatus is used indicate if and how a target depends upon the supplied
+# sources.
+# The target's sources contain one of the supplied paths.
+MATCH_STATUS_MATCHES = 1
+# The target has a dependency on another target that contains one of the
+# supplied paths.
+MATCH_STATUS_MATCHES_BY_DEPENDENCY = 2
+# The target's sources weren't in the supplied paths and none of the target's
+# dependencies depend upon a target that matched.
+MATCH_STATUS_DOESNT_MATCH = 3
+# The target doesn't contain the source, but the dependent targets have not yet
+# been visited to determine a more specific status yet.
+MATCH_STATUS_TBD = 4
+
+generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
+
+generator_wants_static_library_dependencies_adjusted = False
+
+generator_default_variables = {
+}
+for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
+ 'LIB_DIR', 'SHARED_LIB_DIR']:
+ generator_default_variables[dirname] = '!!!'
+
+for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
+ 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
+ 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
+ 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
+ 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
+ 'CONFIGURATION_NAME']:
+ generator_default_variables[unused] = ''
+
+
+def _ToGypPath(path):
+ """Converts a path to the format used by gyp."""
+ if os.sep == '\\' and os.altsep == '/':
+ return path.replace('\\', '/')
+ return path
+
+
+def _ResolveParent(path, base_path_components):
+ """Resolves |path|, which starts with at least one '../'. Returns an empty
+ string if the path shouldn't be considered. See _AddSources() for a
+ description of |base_path_components|."""
+ depth = 0
+ while path.startswith('../'):
+ depth += 1
+ path = path[3:]
+ # Relative includes may go outside the source tree. For example, an action may
+ # have inputs in /usr/include, which are not in the source tree.
+ if depth > len(base_path_components):
+ return ''
+ if depth == len(base_path_components):
+ return path
+ return '/'.join(base_path_components[0:len(base_path_components) - depth]) + \
+ '/' + path
+
+
+def _AddSources(sources, base_path, base_path_components, result):
+ """Extracts valid sources from |sources| and adds them to |result|. Each
+ source file is relative to |base_path|, but may contain '..'. To make
+ resolving '..' easier |base_path_components| contains each of the
+ directories in |base_path|. Additionally each source may contain variables.
+ Such sources are ignored as it is assumed dependencies on them are expressed
+ and tracked in some other means."""
+ # NOTE: gyp paths are always posix style.
+ for source in sources:
+ if not len(source) or source.startswith('!!!') or source.startswith('$'):
+ continue
+ # variable expansion may lead to //.
+ org_source = source
+ source = source[0] + source[1:].replace('//', '/')
+ if source.startswith('../'):
+ source = _ResolveParent(source, base_path_components)
+ if len(source):
+ result.append(source)
+ continue
+ result.append(base_path + source)
+ if debug:
+ print('AddSource', org_source, result[len(result) - 1])
+
+
+def _ExtractSourcesFromAction(action, base_path, base_path_components,
+ results):
+ if 'inputs' in action:
+ _AddSources(action['inputs'], base_path, base_path_components, results)
+
+
+def _ToLocalPath(toplevel_dir, path):
+ """Converts |path| to a path relative to |toplevel_dir|."""
+ if path == toplevel_dir:
+ return ''
+ if path.startswith(toplevel_dir + '/'):
+ return path[len(toplevel_dir) + len('/'):]
+ return path
+
+
+def _ExtractSources(target, target_dict, toplevel_dir):
+ # |target| is either absolute or relative and in the format of the OS. Gyp
+ # source paths are always posix. Convert |target| to a posix path relative to
+ # |toplevel_dir_|. This is done to make it easy to build source paths.
+ base_path = posixpath.dirname(_ToLocalPath(toplevel_dir, _ToGypPath(target)))
+ base_path_components = base_path.split('/')
+
+ # Add a trailing '/' so that _AddSources() can easily build paths.
+ if len(base_path):
+ base_path += '/'
+
+ if debug:
+ print('ExtractSources', target, base_path)
+
+ results = []
+ if 'sources' in target_dict:
+ _AddSources(target_dict['sources'], base_path, base_path_components,
+ results)
+ # Include the inputs from any actions. Any changes to these affect the
+ # resulting output.
+ if 'actions' in target_dict:
+ for action in target_dict['actions']:
+ _ExtractSourcesFromAction(action, base_path, base_path_components,
+ results)
+ if 'rules' in target_dict:
+ for rule in target_dict['rules']:
+ _ExtractSourcesFromAction(rule, base_path, base_path_components, results)
+
+ return results
+
+
+class Target(object):
+ """Holds information about a particular target:
+ deps: set of Targets this Target depends upon. This is not recursive, only the
+ direct dependent Targets.
+ match_status: one of the MatchStatus values.
+ back_deps: set of Targets that have a dependency on this Target.
+ visited: used during iteration to indicate whether we've visited this target.
+ This is used for two iterations, once in building the set of Targets and
+ again in _GetBuildTargets().
+ name: fully qualified name of the target.
+ requires_build: True if the target type is such that it needs to be built.
+ See _DoesTargetTypeRequireBuild for details.
+ added_to_compile_targets: used when determining if the target was added to the
+ set of targets that needs to be built.
+ in_roots: true if this target is a descendant of one of the root nodes.
+ is_executable: true if the type of target is executable.
+ is_static_library: true if the type of target is static_library.
+ is_or_has_linked_ancestor: true if the target does a link (eg executable), or
+ if there is a target in back_deps that does a link."""
+ def __init__(self, name):
+ self.deps = set()
+ self.match_status = MATCH_STATUS_TBD
+ self.back_deps = set()
+ self.name = name
+ # TODO(sky): I don't like hanging this off Target. This state is specific
+ # to certain functions and should be isolated there.
+ self.visited = False
+ self.requires_build = False
+ self.added_to_compile_targets = False
+ self.in_roots = False
+ self.is_executable = False
+ self.is_static_library = False
+ self.is_or_has_linked_ancestor = False
+
+
+class Config(object):
+ """Details what we're looking for
+ files: set of files to search for
+ targets: see file description for details."""
+ def __init__(self):
+ self.files = []
+ self.targets = set()
+ self.additional_compile_target_names = set()
+ self.test_target_names = set()
+
+ def Init(self, params):
+ """Initializes Config. This is a separate method as it raises an exception
+ if there is a parse error."""
+ generator_flags = params.get('generator_flags', {})
+ config_path = generator_flags.get('config_path', None)
+ if not config_path:
+ return
+ try:
+ f = open(config_path, 'r')
+ config = json.load(f)
+ f.close()
+ except IOError:
+ raise Exception('Unable to open file ' + config_path)
+ except ValueError as e:
+ raise Exception('Unable to parse config file ' + config_path + str(e))
+ if not isinstance(config, dict):
+ raise Exception('config_path must be a JSON file containing a dictionary')
+ self.files = config.get('files', [])
+ self.additional_compile_target_names = set(
+ config.get('additional_compile_targets', []))
+ self.test_target_names = set(config.get('test_targets', []))
+
+
+def _WasBuildFileModified(build_file, data, files, toplevel_dir):
+ """Returns true if the build file |build_file| is either in |files| or
+ one of the files included by |build_file| is in |files|. |toplevel_dir| is
+ the root of the source tree."""
+ if _ToLocalPath(toplevel_dir, _ToGypPath(build_file)) in files:
+ if debug:
+ print('gyp file modified', build_file)
+ return True
+
+ # First element of included_files is the file itself.
+ if len(data[build_file]['included_files']) <= 1:
+ return False
+
+ for include_file in data[build_file]['included_files'][1:]:
+ # |included_files| are relative to the directory of the |build_file|.
+ rel_include_file = \
+ _ToGypPath(gyp.common.UnrelativePath(include_file, build_file))
+ if _ToLocalPath(toplevel_dir, rel_include_file) in files:
+ if debug:
+ print('included gyp file modified, gyp_file=', build_file, \
+ 'included file=', rel_include_file)
+ return True
+ return False
+
+
+def _GetOrCreateTargetByName(targets, target_name):
+ """Creates or returns the Target at targets[target_name]. If there is no
+ Target for |target_name| one is created. Returns a tuple of whether a new
+ Target was created and the Target."""
+ if target_name in targets:
+ return False, targets[target_name]
+ target = Target(target_name)
+ targets[target_name] = target
+ return True, target
+
+
+def _DoesTargetTypeRequireBuild(target_dict):
+ """Returns true if the target type is such that it needs to be built."""
+ # If a 'none' target has rules or actions we assume it requires a build.
+ return bool(target_dict['type'] != 'none' or
+ target_dict.get('actions') or target_dict.get('rules'))
+
+
+def _GenerateTargets(data, target_list, target_dicts, toplevel_dir, files,
+ build_files):
+ """Returns a tuple of the following:
+ . A dictionary mapping from fully qualified name to Target.
+ . A list of the targets that have a source file in |files|.
+ . Targets that constitute the 'all' target. See description at top of file
+ for details on the 'all' target.
+ This sets the |match_status| of the targets that contain any of the source
+ files in |files| to MATCH_STATUS_MATCHES.
+ |toplevel_dir| is the root of the source tree."""
+ # Maps from target name to Target.
+ name_to_target = {}
+
+ # Targets that matched.
+ matching_targets = []
+
+ # Queue of targets to visit.
+ targets_to_visit = target_list[:]
+
+ # Maps from build file to a boolean indicating whether the build file is in
+ # |files|.
+ build_file_in_files = {}
+
+ # Root targets across all files.
+ roots = set()
+
+ # Set of Targets in |build_files|.
+ build_file_targets = set()
+
+ while len(targets_to_visit) > 0:
+ target_name = targets_to_visit.pop()
+ created_target, target = _GetOrCreateTargetByName(name_to_target,
+ target_name)
+ if created_target:
+ roots.add(target)
+ elif target.visited:
+ continue
+
+ target.visited = True
+ target.requires_build = _DoesTargetTypeRequireBuild(
+ target_dicts[target_name])
+ target_type = target_dicts[target_name]['type']
+ target.is_executable = target_type == 'executable'
+ target.is_static_library = target_type == 'static_library'
+ target.is_or_has_linked_ancestor = (target_type == 'executable' or
+ target_type == 'shared_library')
+
+ build_file = gyp.common.ParseQualifiedTarget(target_name)[0]
+ if not build_file in build_file_in_files:
+ build_file_in_files[build_file] = \
+ _WasBuildFileModified(build_file, data, files, toplevel_dir)
+
+ if build_file in build_files:
+ build_file_targets.add(target)
+
+ # If a build file (or any of its included files) is modified we assume all
+ # targets in the file are modified.
+ if build_file_in_files[build_file]:
+ print('matching target from modified build file', target_name)
+ target.match_status = MATCH_STATUS_MATCHES
+ matching_targets.append(target)
+ else:
+ sources = _ExtractSources(target_name, target_dicts[target_name],
+ toplevel_dir)
+ for source in sources:
+ if _ToGypPath(os.path.normpath(source)) in files:
+ print('target', target_name, 'matches', source)
+ target.match_status = MATCH_STATUS_MATCHES
+ matching_targets.append(target)
+ break
+
+ # Add dependencies to visit as well as updating back pointers for deps.
+ for dep in target_dicts[target_name].get('dependencies', []):
+ targets_to_visit.append(dep)
+
+ created_dep_target, dep_target = _GetOrCreateTargetByName(name_to_target,
+ dep)
+ if not created_dep_target:
+ roots.discard(dep_target)
+
+ target.deps.add(dep_target)
+ dep_target.back_deps.add(target)
+
+ return name_to_target, matching_targets, roots & build_file_targets
+
+
+def _GetUnqualifiedToTargetMapping(all_targets, to_find):
+ """Returns a tuple of the following:
+ . mapping (dictionary) from unqualified name to Target for all the
+ Targets in |to_find|.
+ . any target names not found. If this is empty all targets were found."""
+ result = {}
+ if not to_find:
+ return {}, []
+ to_find = set(to_find)
+ for target_name in all_targets.keys():
+ extracted = gyp.common.ParseQualifiedTarget(target_name)
+ if len(extracted) > 1 and extracted[1] in to_find:
+ to_find.remove(extracted[1])
+ result[extracted[1]] = all_targets[target_name]
+ if not to_find:
+ return result, []
+ return result, [x for x in to_find]
+
+
+def _DoesTargetDependOnMatchingTargets(target):
+ """Returns true if |target| or any of its dependencies is one of the
+ targets containing the files supplied as input to analyzer. This updates
+ |matches| of the Targets as it recurses.
+ target: the Target to look for."""
+ if target.match_status == MATCH_STATUS_DOESNT_MATCH:
+ return False
+ if target.match_status == MATCH_STATUS_MATCHES or \
+ target.match_status == MATCH_STATUS_MATCHES_BY_DEPENDENCY:
+ return True
+ for dep in target.deps:
+ if _DoesTargetDependOnMatchingTargets(dep):
+ target.match_status = MATCH_STATUS_MATCHES_BY_DEPENDENCY
+ print('\t', target.name, 'matches by dep', dep.name)
+ return True
+ target.match_status = MATCH_STATUS_DOESNT_MATCH
+ return False
+
+
+def _GetTargetsDependingOnMatchingTargets(possible_targets):
+ """Returns the list of Targets in |possible_targets| that depend (either
+ directly on indirectly) on at least one of the targets containing the files
+ supplied as input to analyzer.
+ possible_targets: targets to search from."""
+ found = []
+ print('Targets that matched by dependency:')
+ for target in possible_targets:
+ if _DoesTargetDependOnMatchingTargets(target):
+ found.append(target)
+ return found
+
+
+def _AddCompileTargets(target, roots, add_if_no_ancestor, result):
+ """Recurses through all targets that depend on |target|, adding all targets
+ that need to be built (and are in |roots|) to |result|.
+ roots: set of root targets.
+ add_if_no_ancestor: If true and there are no ancestors of |target| then add
+ |target| to |result|. |target| must still be in |roots|.
+ result: targets that need to be built are added here."""
+ if target.visited:
+ return
+
+ target.visited = True
+ target.in_roots = target in roots
+
+ for back_dep_target in target.back_deps:
+ _AddCompileTargets(back_dep_target, roots, False, result)
+ target.added_to_compile_targets |= back_dep_target.added_to_compile_targets
+ target.in_roots |= back_dep_target.in_roots
+ target.is_or_has_linked_ancestor |= (
+ back_dep_target.is_or_has_linked_ancestor)
+
+ # Always add 'executable' targets. Even though they may be built by other
+ # targets that depend upon them it makes detection of what is going to be
+ # built easier.
+ # And always add static_libraries that have no dependencies on them from
+ # linkables. This is necessary as the other dependencies on them may be
+ # static libraries themselves, which are not compile time dependencies.
+ if target.in_roots and \
+ (target.is_executable or
+ (not target.added_to_compile_targets and
+ (add_if_no_ancestor or target.requires_build)) or
+ (target.is_static_library and add_if_no_ancestor and
+ not target.is_or_has_linked_ancestor)):
+ print('\t\tadding to compile targets', target.name, 'executable',
+ target.is_executable, 'added_to_compile_targets',
+ target.added_to_compile_targets, 'add_if_no_ancestor',
+ add_if_no_ancestor, 'requires_build', target.requires_build,
+ 'is_static_library', target.is_static_library,
+ 'is_or_has_linked_ancestor', target.is_or_has_linked_ancestor
+ )
+ result.add(target)
+ target.added_to_compile_targets = True
+
+
+def _GetCompileTargets(matching_targets, supplied_targets):
+ """Returns the set of Targets that require a build.
+ matching_targets: targets that changed and need to be built.
+ supplied_targets: set of targets supplied to analyzer to search from."""
+ result = set()
+ for target in matching_targets:
+ print('finding compile targets for match', target.name)
+ _AddCompileTargets(target, supplied_targets, True, result)
+ return result
+
+
+def _WriteOutput(params, **values):
+ """Writes the output, either to stdout or a file is specified."""
+ if 'error' in values:
+ print('Error:', values['error'])
+ if 'status' in values:
+ print(values['status'])
+ if 'targets' in values:
+ values['targets'].sort()
+ print('Supplied targets that depend on changed files:')
+ for target in values['targets']:
+ print('\t', target)
+ if 'invalid_targets' in values:
+ values['invalid_targets'].sort()
+ print('The following targets were not found:')
+ for target in values['invalid_targets']:
+ print('\t', target)
+ if 'build_targets' in values:
+ values['build_targets'].sort()
+ print('Targets that require a build:')
+ for target in values['build_targets']:
+ print('\t', target)
+ if 'compile_targets' in values:
+ values['compile_targets'].sort()
+ print('Targets that need to be built:')
+ for target in values['compile_targets']:
+ print('\t', target)
+ if 'test_targets' in values:
+ values['test_targets'].sort()
+ print('Test targets:')
+ for target in values['test_targets']:
+ print('\t', target)
+
+ output_path = params.get('generator_flags', {}).get(
+ 'analyzer_output_path', None)
+ if not output_path:
+ print(json.dumps(values))
+ return
+ try:
+ f = open(output_path, 'w')
+ f.write(json.dumps(values) + '\n')
+ f.close()
+ except IOError as e:
+ print('Error writing to output file', output_path, str(e))
+
+
+def _WasGypIncludeFileModified(params, files):
+ """Returns true if one of the files in |files| is in the set of included
+ files."""
+ if params['options'].includes:
+ for include in params['options'].includes:
+ if _ToGypPath(os.path.normpath(include)) in files:
+ print('Include file modified, assuming all changed', include)
+ return True
+ return False
+
+
+def _NamesNotIn(names, mapping):
+ """Returns a list of the values in |names| that are not in |mapping|."""
+ return [name for name in names if name not in mapping]
+
+
+def _LookupTargets(names, mapping):
+ """Returns a list of the mapping[name] for each value in |names| that is in
+ |mapping|."""
+ return [mapping[name] for name in names if name in mapping]
+
+
+def CalculateVariables(default_variables, params):
+ """Calculate additional variables for use in the build (called by gyp)."""
+ flavor = gyp.common.GetFlavor(params)
+ if flavor == 'mac':
+ default_variables.setdefault('OS', 'mac')
+ elif flavor == 'win':
+ default_variables.setdefault('OS', 'win')
+ # Copy additional generator configuration data from VS, which is shared
+ # by the Windows Ninja generator.
+ import gyp.generator.msvs as msvs_generator
+ generator_additional_non_configuration_keys = getattr(msvs_generator,
+ 'generator_additional_non_configuration_keys', [])
+ generator_additional_path_sections = getattr(msvs_generator,
+ 'generator_additional_path_sections', [])
+
+ gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
+ else:
+ operating_system = flavor
+ if flavor == 'android':
+ operating_system = 'linux' # Keep this legacy behavior for now.
+ default_variables.setdefault('OS', operating_system)
+
+
+class TargetCalculator(object):
+ """Calculates the matching test_targets and matching compile_targets."""
+ def __init__(self, files, additional_compile_target_names, test_target_names,
+ data, target_list, target_dicts, toplevel_dir, build_files):
+ self._additional_compile_target_names = set(additional_compile_target_names)
+ self._test_target_names = set(test_target_names)
+ self._name_to_target, self._changed_targets, self._root_targets = (
+ _GenerateTargets(data, target_list, target_dicts, toplevel_dir,
+ frozenset(files), build_files))
+ self._unqualified_mapping, self.invalid_targets = (
+ _GetUnqualifiedToTargetMapping(self._name_to_target,
+ self._supplied_target_names_no_all()))
+
+ def _supplied_target_names(self):
+ return self._additional_compile_target_names | self._test_target_names
+
+ def _supplied_target_names_no_all(self):
+ """Returns the supplied test targets without 'all'."""
+ result = self._supplied_target_names();
+ result.discard('all')
+ return result
+
+ def is_build_impacted(self):
+ """Returns true if the supplied files impact the build at all."""
+ return self._changed_targets
+
+ def find_matching_test_target_names(self):
+ """Returns the set of output test targets."""
+ assert self.is_build_impacted()
+ # Find the test targets first. 'all' is special cased to mean all the
+ # root targets. To deal with all the supplied |test_targets| are expanded
+ # to include the root targets during lookup. If any of the root targets
+ # match, we remove it and replace it with 'all'.
+ test_target_names_no_all = set(self._test_target_names)
+ test_target_names_no_all.discard('all')
+ test_targets_no_all = _LookupTargets(test_target_names_no_all,
+ self._unqualified_mapping)
+ test_target_names_contains_all = 'all' in self._test_target_names
+ if test_target_names_contains_all:
+ test_targets = [x for x in (set(test_targets_no_all) |
+ set(self._root_targets))]
+ else:
+ test_targets = [x for x in test_targets_no_all]
+ print('supplied test_targets')
+ for target_name in self._test_target_names:
+ print('\t', target_name)
+ print('found test_targets')
+ for target in test_targets:
+ print('\t', target.name)
+ print('searching for matching test targets')
+ matching_test_targets = _GetTargetsDependingOnMatchingTargets(test_targets)
+ matching_test_targets_contains_all = (test_target_names_contains_all and
+ set(matching_test_targets) &
+ set(self._root_targets))
+ if matching_test_targets_contains_all:
+ # Remove any of the targets for all that were not explicitly supplied,
+ # 'all' is subsequentely added to the matching names below.
+ matching_test_targets = [x for x in (set(matching_test_targets) &
+ set(test_targets_no_all))]
+ print('matched test_targets')
+ for target in matching_test_targets:
+ print('\t', target.name)
+ matching_target_names = [gyp.common.ParseQualifiedTarget(target.name)[1]
+ for target in matching_test_targets]
+ if matching_test_targets_contains_all:
+ matching_target_names.append('all')
+ print('\tall')
+ return matching_target_names
+
+ def find_matching_compile_target_names(self):
+ """Returns the set of output compile targets."""
+ assert self.is_build_impacted();
+ # Compile targets are found by searching up from changed targets.
+ # Reset the visited status for _GetBuildTargets.
+ for target in self._name_to_target.values():
+ target.visited = False
+
+ supplied_targets = _LookupTargets(self._supplied_target_names_no_all(),
+ self._unqualified_mapping)
+ if 'all' in self._supplied_target_names():
+ supplied_targets = [x for x in (set(supplied_targets) |
+ set(self._root_targets))]
+ print('Supplied test_targets & compile_targets')
+ for target in supplied_targets:
+ print('\t', target.name)
+ print('Finding compile targets')
+ compile_targets = _GetCompileTargets(self._changed_targets,
+ supplied_targets)
+ return [gyp.common.ParseQualifiedTarget(target.name)[1]
+ for target in compile_targets]
+
+
+def GenerateOutput(target_list, target_dicts, data, params):
+ """Called by gyp as the final stage. Outputs results."""
+ config = Config()
+ try:
+ config.Init(params)
+
+ if not config.files:
+ raise Exception('Must specify files to analyze via config_path generator '
+ 'flag')
+
+ toplevel_dir = _ToGypPath(os.path.abspath(params['options'].toplevel_dir))
+ if debug:
+ print('toplevel_dir', toplevel_dir)
+
+ if _WasGypIncludeFileModified(params, config.files):
+ result_dict = { 'status': all_changed_string,
+ 'test_targets': list(config.test_target_names),
+ 'compile_targets': list(
+ config.additional_compile_target_names |
+ config.test_target_names) }
+ _WriteOutput(params, **result_dict)
+ return
+
+ calculator = TargetCalculator(config.files,
+ config.additional_compile_target_names,
+ config.test_target_names, data,
+ target_list, target_dicts, toplevel_dir,
+ params['build_files'])
+ if not calculator.is_build_impacted():
+ result_dict = { 'status': no_dependency_string,
+ 'test_targets': [],
+ 'compile_targets': [] }
+ if calculator.invalid_targets:
+ result_dict['invalid_targets'] = calculator.invalid_targets
+ _WriteOutput(params, **result_dict)
+ return
+
+ test_target_names = calculator.find_matching_test_target_names()
+ compile_target_names = calculator.find_matching_compile_target_names()
+ found_at_least_one_target = compile_target_names or test_target_names
+ result_dict = { 'test_targets': test_target_names,
+ 'status': found_dependency_string if
+ found_at_least_one_target else no_dependency_string,
+ 'compile_targets': list(
+ set(compile_target_names) |
+ set(test_target_names)) }
+ if calculator.invalid_targets:
+ result_dict['invalid_targets'] = calculator.invalid_targets
+ _WriteOutput(params, **result_dict)
+
+ except Exception as e:
+ _WriteOutput(params, error=str(e))
diff --git a/third_party/python/gyp/pylib/gyp/generator/cmake.py b/third_party/python/gyp/pylib/gyp/generator/cmake.py
new file mode 100644
index 0000000000..4a2041cf26
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/generator/cmake.py
@@ -0,0 +1,1256 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""cmake output module
+
+This module is under development and should be considered experimental.
+
+This module produces cmake (2.8.8+) input as its output. One CMakeLists.txt is
+created for each configuration.
+
+This module's original purpose was to support editing in IDEs like KDevelop
+which use CMake for project management. It is also possible to use CMake to
+generate projects for other IDEs such as eclipse cdt and code::blocks. QtCreator
+will convert the CMakeLists.txt to a code::blocks cbp for the editor to read,
+but build using CMake. As a result QtCreator editor is unaware of compiler
+defines. The generated CMakeLists.txt can also be used to build on Linux. There
+is currently no support for building on platforms other than Linux.
+
+The generated CMakeLists.txt should properly compile all projects. However,
+there is a mismatch between gyp and cmake with regard to linking. All attempts
+are made to work around this, but CMake sometimes sees -Wl,--start-group as a
+library and incorrectly repeats it. As a result the output of this generator
+should not be relied on for building.
+
+When using with kdevelop, use version 4.4+. Previous versions of kdevelop will
+not be able to find the header file directories described in the generated
+CMakeLists.txt file.
+"""
+
+from __future__ import print_function
+
+import multiprocessing
+import os
+import signal
+import string
+import subprocess
+import gyp.common
+import gyp.xcode_emulation
+
+try:
+ # maketrans moved to str in python3.
+ _maketrans = string.maketrans
+except NameError:
+ _maketrans = str.maketrans
+
+generator_default_variables = {
+ 'EXECUTABLE_PREFIX': '',
+ 'EXECUTABLE_SUFFIX': '',
+ 'STATIC_LIB_PREFIX': 'lib',
+ 'STATIC_LIB_SUFFIX': '.a',
+ 'SHARED_LIB_PREFIX': 'lib',
+ 'SHARED_LIB_SUFFIX': '.so',
+ 'SHARED_LIB_DIR': '${builddir}/lib.${TOOLSET}',
+ 'LIB_DIR': '${obj}.${TOOLSET}',
+ 'INTERMEDIATE_DIR': '${obj}.${TOOLSET}/${TARGET}/geni',
+ 'SHARED_INTERMEDIATE_DIR': '${obj}/gen',
+ 'PRODUCT_DIR': '${builddir}',
+ 'RULE_INPUT_PATH': '${RULE_INPUT_PATH}',
+ 'RULE_INPUT_DIRNAME': '${RULE_INPUT_DIRNAME}',
+ 'RULE_INPUT_NAME': '${RULE_INPUT_NAME}',
+ 'RULE_INPUT_ROOT': '${RULE_INPUT_ROOT}',
+ 'RULE_INPUT_EXT': '${RULE_INPUT_EXT}',
+ 'CONFIGURATION_NAME': '${configuration}',
+}
+
+FULL_PATH_VARS = ('${CMAKE_CURRENT_LIST_DIR}', '${builddir}', '${obj}')
+
+generator_supports_multiple_toolsets = True
+generator_wants_static_library_dependencies_adjusted = True
+
+COMPILABLE_EXTENSIONS = {
+ '.c': 'cc',
+ '.cc': 'cxx',
+ '.cpp': 'cxx',
+ '.cxx': 'cxx',
+ '.s': 's', # cc
+ '.S': 's', # cc
+}
+
+
+def RemovePrefix(a, prefix):
+ """Returns 'a' without 'prefix' if it starts with 'prefix'."""
+ return a[len(prefix):] if a.startswith(prefix) else a
+
+
+def CalculateVariables(default_variables, params):
+ """Calculate additional variables for use in the build (called by gyp)."""
+ default_variables.setdefault('OS', gyp.common.GetFlavor(params))
+
+
+def Compilable(filename):
+ """Return true if the file is compilable (should be in OBJS)."""
+ return any(filename.endswith(e) for e in COMPILABLE_EXTENSIONS)
+
+
+def Linkable(filename):
+ """Return true if the file is linkable (should be on the link line)."""
+ return filename.endswith('.o')
+
+
+def NormjoinPathForceCMakeSource(base_path, rel_path):
+ """Resolves rel_path against base_path and returns the result.
+
+ If rel_path is an absolute path it is returned unchanged.
+ Otherwise it is resolved against base_path and normalized.
+ If the result is a relative path, it is forced to be relative to the
+ CMakeLists.txt.
+ """
+ if os.path.isabs(rel_path):
+ return rel_path
+ if any([rel_path.startswith(var) for var in FULL_PATH_VARS]):
+ return rel_path
+ # TODO: do we need to check base_path for absolute variables as well?
+ return os.path.join('${CMAKE_CURRENT_LIST_DIR}',
+ os.path.normpath(os.path.join(base_path, rel_path)))
+
+
+def NormjoinPath(base_path, rel_path):
+ """Resolves rel_path against base_path and returns the result.
+ TODO: what is this really used for?
+ If rel_path begins with '$' it is returned unchanged.
+ Otherwise it is resolved against base_path if relative, then normalized.
+ """
+ if rel_path.startswith('$') and not rel_path.startswith('${configuration}'):
+ return rel_path
+ return os.path.normpath(os.path.join(base_path, rel_path))
+
+
+def CMakeStringEscape(a):
+ """Escapes the string 'a' for use inside a CMake string.
+
+ This means escaping
+ '\' otherwise it may be seen as modifying the next character
+ '"' otherwise it will end the string
+ ';' otherwise the string becomes a list
+
+ The following do not need to be escaped
+ '#' when the lexer is in string state, this does not start a comment
+
+ The following are yet unknown
+ '$' generator variables (like ${obj}) must not be escaped,
+ but text $ should be escaped
+ what is wanted is to know which $ come from generator variables
+ """
+ return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"')
+
+
+def SetFileProperty(output, source_name, property_name, values, sep):
+ """Given a set of source file, sets the given property on them."""
+ output.write('set_source_files_properties(')
+ output.write(source_name)
+ output.write(' PROPERTIES ')
+ output.write(property_name)
+ output.write(' "')
+ for value in values:
+ output.write(CMakeStringEscape(value))
+ output.write(sep)
+ output.write('")\n')
+
+
+def SetFilesProperty(output, variable, property_name, values, sep):
+ """Given a set of source files, sets the given property on them."""
+ output.write('set_source_files_properties(')
+ WriteVariable(output, variable)
+ output.write(' PROPERTIES ')
+ output.write(property_name)
+ output.write(' "')
+ for value in values:
+ output.write(CMakeStringEscape(value))
+ output.write(sep)
+ output.write('")\n')
+
+
+def SetTargetProperty(output, target_name, property_name, values, sep=''):
+ """Given a target, sets the given property."""
+ output.write('set_target_properties(')
+ output.write(target_name)
+ output.write(' PROPERTIES ')
+ output.write(property_name)
+ output.write(' "')
+ for value in values:
+ output.write(CMakeStringEscape(value))
+ output.write(sep)
+ output.write('")\n')
+
+
+def SetVariable(output, variable_name, value):
+ """Sets a CMake variable."""
+ output.write('set(')
+ output.write(variable_name)
+ output.write(' "')
+ output.write(CMakeStringEscape(value))
+ output.write('")\n')
+
+
+def SetVariableList(output, variable_name, values):
+ """Sets a CMake variable to a list."""
+ if not values:
+ return SetVariable(output, variable_name, "")
+ if len(values) == 1:
+ return SetVariable(output, variable_name, values[0])
+ output.write('list(APPEND ')
+ output.write(variable_name)
+ output.write('\n "')
+ output.write('"\n "'.join([CMakeStringEscape(value) for value in values]))
+ output.write('")\n')
+
+
+def UnsetVariable(output, variable_name):
+ """Unsets a CMake variable."""
+ output.write('unset(')
+ output.write(variable_name)
+ output.write(')\n')
+
+
+def WriteVariable(output, variable_name, prepend=None):
+ if prepend:
+ output.write(prepend)
+ output.write('${')
+ output.write(variable_name)
+ output.write('}')
+
+
+class CMakeTargetType(object):
+ def __init__(self, command, modifier, property_modifier):
+ self.command = command
+ self.modifier = modifier
+ self.property_modifier = property_modifier
+
+
+cmake_target_type_from_gyp_target_type = {
+ 'executable': CMakeTargetType('add_executable', None, 'RUNTIME'),
+ 'static_library': CMakeTargetType('add_library', 'STATIC', 'ARCHIVE'),
+ 'shared_library': CMakeTargetType('add_library', 'SHARED', 'LIBRARY'),
+ 'loadable_module': CMakeTargetType('add_library', 'MODULE', 'LIBRARY'),
+ 'none': CMakeTargetType('add_custom_target', 'SOURCES', None),
+}
+
+
+def StringToCMakeTargetName(a):
+ """Converts the given string 'a' to a valid CMake target name.
+
+ All invalid characters are replaced by '_'.
+ Invalid for cmake: ' ', '/', '(', ')', '"'
+ Invalid for make: ':'
+ Invalid for unknown reasons but cause failures: '.'
+ """
+ return a.translate(_maketrans(' /():."', '_______'))
+
+
+def WriteActions(target_name, actions, extra_sources, extra_deps,
+ path_to_gyp, output):
+ """Write CMake for the 'actions' in the target.
+
+ Args:
+ target_name: the name of the CMake target being generated.
+ actions: the Gyp 'actions' dict for this target.
+ extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
+ extra_deps: [<cmake_taget>] to append with generated targets.
+ path_to_gyp: relative path from CMakeLists.txt being generated to
+ the Gyp file in which the target being generated is defined.
+ """
+ for action in actions:
+ action_name = StringToCMakeTargetName(action['action_name'])
+ action_target_name = '%s__%s' % (target_name, action_name)
+
+ inputs = action['inputs']
+ inputs_name = action_target_name + '__input'
+ SetVariableList(output, inputs_name,
+ [NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
+
+ outputs = action['outputs']
+ cmake_outputs = [NormjoinPathForceCMakeSource(path_to_gyp, out)
+ for out in outputs]
+ outputs_name = action_target_name + '__output'
+ SetVariableList(output, outputs_name, cmake_outputs)
+
+ # Build up a list of outputs.
+ # Collect the output dirs we'll need.
+ dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
+
+ if int(action.get('process_outputs_as_sources', False)):
+ extra_sources.extend(zip(cmake_outputs, outputs))
+
+ # add_custom_command
+ output.write('add_custom_command(OUTPUT ')
+ WriteVariable(output, outputs_name)
+ output.write('\n')
+
+ if len(dirs) > 0:
+ for directory in dirs:
+ output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
+ output.write(directory)
+ output.write('\n')
+
+ output.write(' COMMAND ')
+ output.write(gyp.common.EncodePOSIXShellList(action['action']))
+ output.write('\n')
+
+ output.write(' DEPENDS ')
+ WriteVariable(output, inputs_name)
+ output.write('\n')
+
+ output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
+ output.write(path_to_gyp)
+ output.write('\n')
+
+ output.write(' COMMENT ')
+ if 'message' in action:
+ output.write(action['message'])
+ else:
+ output.write(action_target_name)
+ output.write('\n')
+
+ output.write(' VERBATIM\n')
+ output.write(')\n')
+
+ # add_custom_target
+ output.write('add_custom_target(')
+ output.write(action_target_name)
+ output.write('\n DEPENDS ')
+ WriteVariable(output, outputs_name)
+ output.write('\n SOURCES ')
+ WriteVariable(output, inputs_name)
+ output.write('\n)\n')
+
+ extra_deps.append(action_target_name)
+
+
+def NormjoinRulePathForceCMakeSource(base_path, rel_path, rule_source):
+ if rel_path.startswith(("${RULE_INPUT_PATH}","${RULE_INPUT_DIRNAME}")):
+ if any([rule_source.startswith(var) for var in FULL_PATH_VARS]):
+ return rel_path
+ return NormjoinPathForceCMakeSource(base_path, rel_path)
+
+
+def WriteRules(target_name, rules, extra_sources, extra_deps,
+ path_to_gyp, output):
+ """Write CMake for the 'rules' in the target.
+
+ Args:
+ target_name: the name of the CMake target being generated.
+ actions: the Gyp 'actions' dict for this target.
+ extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
+ extra_deps: [<cmake_taget>] to append with generated targets.
+ path_to_gyp: relative path from CMakeLists.txt being generated to
+ the Gyp file in which the target being generated is defined.
+ """
+ for rule in rules:
+ rule_name = StringToCMakeTargetName(target_name + '__' + rule['rule_name'])
+
+ inputs = rule.get('inputs', [])
+ inputs_name = rule_name + '__input'
+ SetVariableList(output, inputs_name,
+ [NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
+ outputs = rule['outputs']
+ var_outputs = []
+
+ for count, rule_source in enumerate(rule.get('rule_sources', [])):
+ action_name = rule_name + '_' + str(count)
+
+ rule_source_dirname, rule_source_basename = os.path.split(rule_source)
+ rule_source_root, rule_source_ext = os.path.splitext(rule_source_basename)
+
+ SetVariable(output, 'RULE_INPUT_PATH', rule_source)
+ SetVariable(output, 'RULE_INPUT_DIRNAME', rule_source_dirname)
+ SetVariable(output, 'RULE_INPUT_NAME', rule_source_basename)
+ SetVariable(output, 'RULE_INPUT_ROOT', rule_source_root)
+ SetVariable(output, 'RULE_INPUT_EXT', rule_source_ext)
+
+ # Build up a list of outputs.
+ # Collect the output dirs we'll need.
+ dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
+
+ # Create variables for the output, as 'local' variable will be unset.
+ these_outputs = []
+ for output_index, out in enumerate(outputs):
+ output_name = action_name + '_' + str(output_index)
+ SetVariable(output, output_name,
+ NormjoinRulePathForceCMakeSource(path_to_gyp, out,
+ rule_source))
+ if int(rule.get('process_outputs_as_sources', False)):
+ extra_sources.append(('${' + output_name + '}', out))
+ these_outputs.append('${' + output_name + '}')
+ var_outputs.append('${' + output_name + '}')
+
+ # add_custom_command
+ output.write('add_custom_command(OUTPUT\n')
+ for out in these_outputs:
+ output.write(' ')
+ output.write(out)
+ output.write('\n')
+
+ for directory in dirs:
+ output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
+ output.write(directory)
+ output.write('\n')
+
+ output.write(' COMMAND ')
+ output.write(gyp.common.EncodePOSIXShellList(rule['action']))
+ output.write('\n')
+
+ output.write(' DEPENDS ')
+ WriteVariable(output, inputs_name)
+ output.write(' ')
+ output.write(NormjoinPath(path_to_gyp, rule_source))
+ output.write('\n')
+
+ # CMAKE_CURRENT_LIST_DIR is where the CMakeLists.txt lives.
+ # The cwd is the current build directory.
+ output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
+ output.write(path_to_gyp)
+ output.write('\n')
+
+ output.write(' COMMENT ')
+ if 'message' in rule:
+ output.write(rule['message'])
+ else:
+ output.write(action_name)
+ output.write('\n')
+
+ output.write(' VERBATIM\n')
+ output.write(')\n')
+
+ UnsetVariable(output, 'RULE_INPUT_PATH')
+ UnsetVariable(output, 'RULE_INPUT_DIRNAME')
+ UnsetVariable(output, 'RULE_INPUT_NAME')
+ UnsetVariable(output, 'RULE_INPUT_ROOT')
+ UnsetVariable(output, 'RULE_INPUT_EXT')
+
+ # add_custom_target
+ output.write('add_custom_target(')
+ output.write(rule_name)
+ output.write(' DEPENDS\n')
+ for out in var_outputs:
+ output.write(' ')
+ output.write(out)
+ output.write('\n')
+ output.write('SOURCES ')
+ WriteVariable(output, inputs_name)
+ output.write('\n')
+ for rule_source in rule.get('rule_sources', []):
+ output.write(' ')
+ output.write(NormjoinPath(path_to_gyp, rule_source))
+ output.write('\n')
+ output.write(')\n')
+
+ extra_deps.append(rule_name)
+
+
+def WriteCopies(target_name, copies, extra_deps, path_to_gyp, output):
+ """Write CMake for the 'copies' in the target.
+
+ Args:
+ target_name: the name of the CMake target being generated.
+ actions: the Gyp 'actions' dict for this target.
+ extra_deps: [<cmake_taget>] to append with generated targets.
+ path_to_gyp: relative path from CMakeLists.txt being generated to
+ the Gyp file in which the target being generated is defined.
+ """
+ copy_name = target_name + '__copies'
+
+ # CMake gets upset with custom targets with OUTPUT which specify no output.
+ have_copies = any(copy['files'] for copy in copies)
+ if not have_copies:
+ output.write('add_custom_target(')
+ output.write(copy_name)
+ output.write(')\n')
+ extra_deps.append(copy_name)
+ return
+
+ class Copy(object):
+ def __init__(self, ext, command):
+ self.cmake_inputs = []
+ self.cmake_outputs = []
+ self.gyp_inputs = []
+ self.gyp_outputs = []
+ self.ext = ext
+ self.inputs_name = None
+ self.outputs_name = None
+ self.command = command
+
+ file_copy = Copy('', 'copy')
+ dir_copy = Copy('_dirs', 'copy_directory')
+
+ for copy in copies:
+ files = copy['files']
+ destination = copy['destination']
+ for src in files:
+ path = os.path.normpath(src)
+ basename = os.path.split(path)[1]
+ dst = os.path.join(destination, basename)
+
+ copy = file_copy if os.path.basename(src) else dir_copy
+
+ copy.cmake_inputs.append(NormjoinPathForceCMakeSource(path_to_gyp, src))
+ copy.cmake_outputs.append(NormjoinPathForceCMakeSource(path_to_gyp, dst))
+ copy.gyp_inputs.append(src)
+ copy.gyp_outputs.append(dst)
+
+ for copy in (file_copy, dir_copy):
+ if copy.cmake_inputs:
+ copy.inputs_name = copy_name + '__input' + copy.ext
+ SetVariableList(output, copy.inputs_name, copy.cmake_inputs)
+
+ copy.outputs_name = copy_name + '__output' + copy.ext
+ SetVariableList(output, copy.outputs_name, copy.cmake_outputs)
+
+ # add_custom_command
+ output.write('add_custom_command(\n')
+
+ output.write('OUTPUT')
+ for copy in (file_copy, dir_copy):
+ if copy.outputs_name:
+ WriteVariable(output, copy.outputs_name, ' ')
+ output.write('\n')
+
+ for copy in (file_copy, dir_copy):
+ for src, dst in zip(copy.gyp_inputs, copy.gyp_outputs):
+ # 'cmake -E copy src dst' will create the 'dst' directory if needed.
+ output.write('COMMAND ${CMAKE_COMMAND} -E %s ' % copy.command)
+ output.write(src)
+ output.write(' ')
+ output.write(dst)
+ output.write("\n")
+
+ output.write('DEPENDS')
+ for copy in (file_copy, dir_copy):
+ if copy.inputs_name:
+ WriteVariable(output, copy.inputs_name, ' ')
+ output.write('\n')
+
+ output.write('WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
+ output.write(path_to_gyp)
+ output.write('\n')
+
+ output.write('COMMENT Copying for ')
+ output.write(target_name)
+ output.write('\n')
+
+ output.write('VERBATIM\n')
+ output.write(')\n')
+
+ # add_custom_target
+ output.write('add_custom_target(')
+ output.write(copy_name)
+ output.write('\n DEPENDS')
+ for copy in (file_copy, dir_copy):
+ if copy.outputs_name:
+ WriteVariable(output, copy.outputs_name, ' ')
+ output.write('\n SOURCES')
+ if file_copy.inputs_name:
+ WriteVariable(output, file_copy.inputs_name, ' ')
+ output.write('\n)\n')
+
+ extra_deps.append(copy_name)
+
+
+def CreateCMakeTargetBaseName(qualified_target):
+ """This is the name we would like the target to have."""
+ _, gyp_target_name, gyp_target_toolset = (
+ gyp.common.ParseQualifiedTarget(qualified_target))
+ cmake_target_base_name = gyp_target_name
+ if gyp_target_toolset and gyp_target_toolset != 'target':
+ cmake_target_base_name += '_' + gyp_target_toolset
+ return StringToCMakeTargetName(cmake_target_base_name)
+
+
+def CreateCMakeTargetFullName(qualified_target):
+ """An unambiguous name for the target."""
+ gyp_file, gyp_target_name, gyp_target_toolset = (
+ gyp.common.ParseQualifiedTarget(qualified_target))
+ cmake_target_full_name = gyp_file + ':' + gyp_target_name
+ if gyp_target_toolset and gyp_target_toolset != 'target':
+ cmake_target_full_name += '_' + gyp_target_toolset
+ return StringToCMakeTargetName(cmake_target_full_name)
+
+
+class CMakeNamer(object):
+ """Converts Gyp target names into CMake target names.
+
+ CMake requires that target names be globally unique. One way to ensure
+ this is to fully qualify the names of the targets. Unfortunatly, this
+ ends up with all targets looking like "chrome_chrome_gyp_chrome" instead
+ of just "chrome". If this generator were only interested in building, it
+ would be possible to fully qualify all target names, then create
+ unqualified target names which depend on all qualified targets which
+ should have had that name. This is more or less what the 'make' generator
+ does with aliases. However, one goal of this generator is to create CMake
+ files for use with IDEs, and fully qualified names are not as user
+ friendly.
+
+ Since target name collision is rare, we do the above only when required.
+
+ Toolset variants are always qualified from the base, as this is required for
+ building. However, it also makes sense for an IDE, as it is possible for
+ defines to be different.
+ """
+ def __init__(self, target_list):
+ self.cmake_target_base_names_conficting = set()
+
+ cmake_target_base_names_seen = set()
+ for qualified_target in target_list:
+ cmake_target_base_name = CreateCMakeTargetBaseName(qualified_target)
+
+ if cmake_target_base_name not in cmake_target_base_names_seen:
+ cmake_target_base_names_seen.add(cmake_target_base_name)
+ else:
+ self.cmake_target_base_names_conficting.add(cmake_target_base_name)
+
+ def CreateCMakeTargetName(self, qualified_target):
+ base_name = CreateCMakeTargetBaseName(qualified_target)
+ if base_name in self.cmake_target_base_names_conficting:
+ return CreateCMakeTargetFullName(qualified_target)
+ return base_name
+
+
+def WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
+ options, generator_flags, all_qualified_targets, flavor,
+ output):
+ # The make generator does this always.
+ # TODO: It would be nice to be able to tell CMake all dependencies.
+ circular_libs = generator_flags.get('circular', True)
+
+ if not generator_flags.get('standalone', False):
+ output.write('\n#')
+ output.write(qualified_target)
+ output.write('\n')
+
+ gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
+ rel_gyp_file = gyp.common.RelativePath(gyp_file, options.toplevel_dir)
+ rel_gyp_dir = os.path.dirname(rel_gyp_file)
+
+ # Relative path from build dir to top dir.
+ build_to_top = gyp.common.InvertRelativePath(build_dir, options.toplevel_dir)
+ # Relative path from build dir to gyp dir.
+ build_to_gyp = os.path.join(build_to_top, rel_gyp_dir)
+
+ path_from_cmakelists_to_gyp = build_to_gyp
+
+ spec = target_dicts.get(qualified_target, {})
+ config = spec.get('configurations', {}).get(config_to_use, {})
+
+ xcode_settings = None
+ if flavor == 'mac':
+ xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
+
+ target_name = spec.get('target_name', '<missing target name>')
+ target_type = spec.get('type', '<missing target type>')
+ target_toolset = spec.get('toolset')
+
+ cmake_target_type = cmake_target_type_from_gyp_target_type.get(target_type)
+ if cmake_target_type is None:
+ print('Target %s has unknown target type %s, skipping.' %
+ ( target_name, target_type ))
+ return
+
+ SetVariable(output, 'TARGET', target_name)
+ SetVariable(output, 'TOOLSET', target_toolset)
+
+ cmake_target_name = namer.CreateCMakeTargetName(qualified_target)
+
+ extra_sources = []
+ extra_deps = []
+
+ # Actions must come first, since they can generate more OBJs for use below.
+ if 'actions' in spec:
+ WriteActions(cmake_target_name, spec['actions'], extra_sources, extra_deps,
+ path_from_cmakelists_to_gyp, output)
+
+ # Rules must be early like actions.
+ if 'rules' in spec:
+ WriteRules(cmake_target_name, spec['rules'], extra_sources, extra_deps,
+ path_from_cmakelists_to_gyp, output)
+
+ # Copies
+ if 'copies' in spec:
+ WriteCopies(cmake_target_name, spec['copies'], extra_deps,
+ path_from_cmakelists_to_gyp, output)
+
+ # Target and sources
+ srcs = spec.get('sources', [])
+
+ # Gyp separates the sheep from the goats based on file extensions.
+ # A full separation is done here because of flag handing (see below).
+ s_sources = []
+ c_sources = []
+ cxx_sources = []
+ linkable_sources = []
+ other_sources = []
+ for src in srcs:
+ _, ext = os.path.splitext(src)
+ src_type = COMPILABLE_EXTENSIONS.get(ext, None)
+ src_norm_path = NormjoinPath(path_from_cmakelists_to_gyp, src);
+
+ if src_type == 's':
+ s_sources.append(src_norm_path)
+ elif src_type == 'cc':
+ c_sources.append(src_norm_path)
+ elif src_type == 'cxx':
+ cxx_sources.append(src_norm_path)
+ elif Linkable(ext):
+ linkable_sources.append(src_norm_path)
+ else:
+ other_sources.append(src_norm_path)
+
+ for extra_source in extra_sources:
+ src, real_source = extra_source
+ _, ext = os.path.splitext(real_source)
+ src_type = COMPILABLE_EXTENSIONS.get(ext, None)
+
+ if src_type == 's':
+ s_sources.append(src)
+ elif src_type == 'cc':
+ c_sources.append(src)
+ elif src_type == 'cxx':
+ cxx_sources.append(src)
+ elif Linkable(ext):
+ linkable_sources.append(src)
+ else:
+ other_sources.append(src)
+
+ s_sources_name = None
+ if s_sources:
+ s_sources_name = cmake_target_name + '__asm_srcs'
+ SetVariableList(output, s_sources_name, s_sources)
+
+ c_sources_name = None
+ if c_sources:
+ c_sources_name = cmake_target_name + '__c_srcs'
+ SetVariableList(output, c_sources_name, c_sources)
+
+ cxx_sources_name = None
+ if cxx_sources:
+ cxx_sources_name = cmake_target_name + '__cxx_srcs'
+ SetVariableList(output, cxx_sources_name, cxx_sources)
+
+ linkable_sources_name = None
+ if linkable_sources:
+ linkable_sources_name = cmake_target_name + '__linkable_srcs'
+ SetVariableList(output, linkable_sources_name, linkable_sources)
+
+ other_sources_name = None
+ if other_sources:
+ other_sources_name = cmake_target_name + '__other_srcs'
+ SetVariableList(output, other_sources_name, other_sources)
+
+ # CMake gets upset when executable targets provide no sources.
+ # http://www.cmake.org/pipermail/cmake/2010-July/038461.html
+ dummy_sources_name = None
+ has_sources = (s_sources_name or
+ c_sources_name or
+ cxx_sources_name or
+ linkable_sources_name or
+ other_sources_name)
+ if target_type == 'executable' and not has_sources:
+ dummy_sources_name = cmake_target_name + '__dummy_srcs'
+ SetVariable(output, dummy_sources_name,
+ "${obj}.${TOOLSET}/${TARGET}/genc/dummy.c")
+ output.write('if(NOT EXISTS "')
+ WriteVariable(output, dummy_sources_name)
+ output.write('")\n')
+ output.write(' file(WRITE "')
+ WriteVariable(output, dummy_sources_name)
+ output.write('" "")\n')
+ output.write("endif()\n")
+
+
+ # CMake is opposed to setting linker directories and considers the practice
+ # of setting linker directories dangerous. Instead, it favors the use of
+ # find_library and passing absolute paths to target_link_libraries.
+ # However, CMake does provide the command link_directories, which adds
+ # link directories to targets defined after it is called.
+ # As a result, link_directories must come before the target definition.
+ # CMake unfortunately has no means of removing entries from LINK_DIRECTORIES.
+ library_dirs = config.get('library_dirs')
+ if library_dirs is not None:
+ output.write('link_directories(')
+ for library_dir in library_dirs:
+ output.write(' ')
+ output.write(NormjoinPath(path_from_cmakelists_to_gyp, library_dir))
+ output.write('\n')
+ output.write(')\n')
+
+ output.write(cmake_target_type.command)
+ output.write('(')
+ output.write(cmake_target_name)
+
+ if cmake_target_type.modifier is not None:
+ output.write(' ')
+ output.write(cmake_target_type.modifier)
+
+ if s_sources_name:
+ WriteVariable(output, s_sources_name, ' ')
+ if c_sources_name:
+ WriteVariable(output, c_sources_name, ' ')
+ if cxx_sources_name:
+ WriteVariable(output, cxx_sources_name, ' ')
+ if linkable_sources_name:
+ WriteVariable(output, linkable_sources_name, ' ')
+ if other_sources_name:
+ WriteVariable(output, other_sources_name, ' ')
+ if dummy_sources_name:
+ WriteVariable(output, dummy_sources_name, ' ')
+
+ output.write(')\n')
+
+ # Let CMake know if the 'all' target should depend on this target.
+ exclude_from_all = ('TRUE' if qualified_target not in all_qualified_targets
+ else 'FALSE')
+ SetTargetProperty(output, cmake_target_name,
+ 'EXCLUDE_FROM_ALL', exclude_from_all)
+ for extra_target_name in extra_deps:
+ SetTargetProperty(output, extra_target_name,
+ 'EXCLUDE_FROM_ALL', exclude_from_all)
+
+ # Output name and location.
+ if target_type != 'none':
+ # Link as 'C' if there are no other files
+ if not c_sources and not cxx_sources:
+ SetTargetProperty(output, cmake_target_name, 'LINKER_LANGUAGE', ['C'])
+
+ # Mark uncompiled sources as uncompiled.
+ if other_sources_name:
+ output.write('set_source_files_properties(')
+ WriteVariable(output, other_sources_name, '')
+ output.write(' PROPERTIES HEADER_FILE_ONLY "TRUE")\n')
+
+ # Mark object sources as linkable.
+ if linkable_sources_name:
+ output.write('set_source_files_properties(')
+ WriteVariable(output, other_sources_name, '')
+ output.write(' PROPERTIES EXTERNAL_OBJECT "TRUE")\n')
+
+ # Output directory
+ target_output_directory = spec.get('product_dir')
+ if target_output_directory is None:
+ if target_type in ('executable', 'loadable_module'):
+ target_output_directory = generator_default_variables['PRODUCT_DIR']
+ elif target_type == 'shared_library':
+ target_output_directory = '${builddir}/lib.${TOOLSET}'
+ elif spec.get('standalone_static_library', False):
+ target_output_directory = generator_default_variables['PRODUCT_DIR']
+ else:
+ base_path = gyp.common.RelativePath(os.path.dirname(gyp_file),
+ options.toplevel_dir)
+ target_output_directory = '${obj}.${TOOLSET}'
+ target_output_directory = (
+ os.path.join(target_output_directory, base_path))
+
+ cmake_target_output_directory = NormjoinPathForceCMakeSource(
+ path_from_cmakelists_to_gyp,
+ target_output_directory)
+ SetTargetProperty(output,
+ cmake_target_name,
+ cmake_target_type.property_modifier + '_OUTPUT_DIRECTORY',
+ cmake_target_output_directory)
+
+ # Output name
+ default_product_prefix = ''
+ default_product_name = target_name
+ default_product_ext = ''
+ if target_type == 'static_library':
+ static_library_prefix = generator_default_variables['STATIC_LIB_PREFIX']
+ default_product_name = RemovePrefix(default_product_name,
+ static_library_prefix)
+ default_product_prefix = static_library_prefix
+ default_product_ext = generator_default_variables['STATIC_LIB_SUFFIX']
+
+ elif target_type in ('loadable_module', 'shared_library'):
+ shared_library_prefix = generator_default_variables['SHARED_LIB_PREFIX']
+ default_product_name = RemovePrefix(default_product_name,
+ shared_library_prefix)
+ default_product_prefix = shared_library_prefix
+ default_product_ext = generator_default_variables['SHARED_LIB_SUFFIX']
+
+ elif target_type != 'executable':
+ print(('ERROR: What output file should be generated?',
+ 'type', target_type, 'target', target_name))
+
+ product_prefix = spec.get('product_prefix', default_product_prefix)
+ product_name = spec.get('product_name', default_product_name)
+ product_ext = spec.get('product_extension')
+ if product_ext:
+ product_ext = '.' + product_ext
+ else:
+ product_ext = default_product_ext
+
+ SetTargetProperty(output, cmake_target_name, 'PREFIX', product_prefix)
+ SetTargetProperty(output, cmake_target_name,
+ cmake_target_type.property_modifier + '_OUTPUT_NAME',
+ product_name)
+ SetTargetProperty(output, cmake_target_name, 'SUFFIX', product_ext)
+
+ # Make the output of this target referenceable as a source.
+ cmake_target_output_basename = product_prefix + product_name + product_ext
+ cmake_target_output = os.path.join(cmake_target_output_directory,
+ cmake_target_output_basename)
+ SetFileProperty(output, cmake_target_output, 'GENERATED', ['TRUE'], '')
+
+ # Includes
+ includes = config.get('include_dirs')
+ if includes:
+ # This (target include directories) is what requires CMake 2.8.8
+ includes_name = cmake_target_name + '__include_dirs'
+ SetVariableList(output, includes_name,
+ [NormjoinPathForceCMakeSource(path_from_cmakelists_to_gyp, include)
+ for include in includes])
+ output.write('set_property(TARGET ')
+ output.write(cmake_target_name)
+ output.write(' APPEND PROPERTY INCLUDE_DIRECTORIES ')
+ WriteVariable(output, includes_name, '')
+ output.write(')\n')
+
+ # Defines
+ defines = config.get('defines')
+ if defines is not None:
+ SetTargetProperty(output,
+ cmake_target_name,
+ 'COMPILE_DEFINITIONS',
+ defines,
+ ';')
+
+ # Compile Flags - http://www.cmake.org/Bug/view.php?id=6493
+ # CMake currently does not have target C and CXX flags.
+ # So, instead of doing...
+
+ # cflags_c = config.get('cflags_c')
+ # if cflags_c is not None:
+ # SetTargetProperty(output, cmake_target_name,
+ # 'C_COMPILE_FLAGS', cflags_c, ' ')
+
+ # cflags_cc = config.get('cflags_cc')
+ # if cflags_cc is not None:
+ # SetTargetProperty(output, cmake_target_name,
+ # 'CXX_COMPILE_FLAGS', cflags_cc, ' ')
+
+ # Instead we must...
+ cflags = config.get('cflags', [])
+ cflags_c = config.get('cflags_c', [])
+ cflags_cxx = config.get('cflags_cc', [])
+ if xcode_settings:
+ cflags = xcode_settings.GetCflags(config_to_use)
+ cflags_c = xcode_settings.GetCflagsC(config_to_use)
+ cflags_cxx = xcode_settings.GetCflagsCC(config_to_use)
+ #cflags_objc = xcode_settings.GetCflagsObjC(config_to_use)
+ #cflags_objcc = xcode_settings.GetCflagsObjCC(config_to_use)
+
+ if (not cflags_c or not c_sources) and (not cflags_cxx or not cxx_sources):
+ SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', cflags, ' ')
+
+ elif c_sources and not (s_sources or cxx_sources):
+ flags = []
+ flags.extend(cflags)
+ flags.extend(cflags_c)
+ SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
+
+ elif cxx_sources and not (s_sources or c_sources):
+ flags = []
+ flags.extend(cflags)
+ flags.extend(cflags_cxx)
+ SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
+
+ else:
+ # TODO: This is broken, one cannot generally set properties on files,
+ # as other targets may require different properties on the same files.
+ if s_sources and cflags:
+ SetFilesProperty(output, s_sources_name, 'COMPILE_FLAGS', cflags, ' ')
+
+ if c_sources and (cflags or cflags_c):
+ flags = []
+ flags.extend(cflags)
+ flags.extend(cflags_c)
+ SetFilesProperty(output, c_sources_name, 'COMPILE_FLAGS', flags, ' ')
+
+ if cxx_sources and (cflags or cflags_cxx):
+ flags = []
+ flags.extend(cflags)
+ flags.extend(cflags_cxx)
+ SetFilesProperty(output, cxx_sources_name, 'COMPILE_FLAGS', flags, ' ')
+
+ # Linker flags
+ ldflags = config.get('ldflags')
+ if ldflags is not None:
+ SetTargetProperty(output, cmake_target_name, 'LINK_FLAGS', ldflags, ' ')
+
+ # XCode settings
+ xcode_settings = config.get('xcode_settings', {})
+ for xcode_setting, xcode_value in xcode_settings.viewitems():
+ SetTargetProperty(output, cmake_target_name,
+ "XCODE_ATTRIBUTE_%s" % xcode_setting, xcode_value,
+ '' if isinstance(xcode_value, str) else ' ')
+
+ # Note on Dependencies and Libraries:
+ # CMake wants to handle link order, resolving the link line up front.
+ # Gyp does not retain or enforce specifying enough information to do so.
+ # So do as other gyp generators and use --start-group and --end-group.
+ # Give CMake as little information as possible so that it doesn't mess it up.
+
+ # Dependencies
+ rawDeps = spec.get('dependencies', [])
+
+ static_deps = []
+ shared_deps = []
+ other_deps = []
+ for rawDep in rawDeps:
+ dep_cmake_name = namer.CreateCMakeTargetName(rawDep)
+ dep_spec = target_dicts.get(rawDep, {})
+ dep_target_type = dep_spec.get('type', None)
+
+ if dep_target_type == 'static_library':
+ static_deps.append(dep_cmake_name)
+ elif dep_target_type == 'shared_library':
+ shared_deps.append(dep_cmake_name)
+ else:
+ other_deps.append(dep_cmake_name)
+
+ # ensure all external dependencies are complete before internal dependencies
+ # extra_deps currently only depend on their own deps, so otherwise run early
+ if static_deps or shared_deps or other_deps:
+ for extra_dep in extra_deps:
+ output.write('add_dependencies(')
+ output.write(extra_dep)
+ output.write('\n')
+ for deps in (static_deps, shared_deps, other_deps):
+ for dep in gyp.common.uniquer(deps):
+ output.write(' ')
+ output.write(dep)
+ output.write('\n')
+ output.write(')\n')
+
+ linkable = target_type in ('executable', 'loadable_module', 'shared_library')
+ other_deps.extend(extra_deps)
+ if other_deps or (not linkable and (static_deps or shared_deps)):
+ output.write('add_dependencies(')
+ output.write(cmake_target_name)
+ output.write('\n')
+ for dep in gyp.common.uniquer(other_deps):
+ output.write(' ')
+ output.write(dep)
+ output.write('\n')
+ if not linkable:
+ for deps in (static_deps, shared_deps):
+ for lib_dep in gyp.common.uniquer(deps):
+ output.write(' ')
+ output.write(lib_dep)
+ output.write('\n')
+ output.write(')\n')
+
+ # Libraries
+ if linkable:
+ external_libs = [lib for lib in spec.get('libraries', []) if len(lib) > 0]
+ if external_libs or static_deps or shared_deps:
+ output.write('target_link_libraries(')
+ output.write(cmake_target_name)
+ output.write('\n')
+ if static_deps:
+ write_group = circular_libs and len(static_deps) > 1 and flavor != 'mac'
+ if write_group:
+ output.write('-Wl,--start-group\n')
+ for dep in gyp.common.uniquer(static_deps):
+ output.write(' ')
+ output.write(dep)
+ output.write('\n')
+ if write_group:
+ output.write('-Wl,--end-group\n')
+ if shared_deps:
+ for dep in gyp.common.uniquer(shared_deps):
+ output.write(' ')
+ output.write(dep)
+ output.write('\n')
+ if external_libs:
+ for lib in gyp.common.uniquer(external_libs):
+ output.write(' "')
+ output.write(RemovePrefix(lib, "$(SDKROOT)"))
+ output.write('"\n')
+
+ output.write(')\n')
+
+ UnsetVariable(output, 'TOOLSET')
+ UnsetVariable(output, 'TARGET')
+
+
+def GenerateOutputForConfig(target_list, target_dicts, data,
+ params, config_to_use):
+ options = params['options']
+ generator_flags = params['generator_flags']
+ flavor = gyp.common.GetFlavor(params)
+
+ # generator_dir: relative path from pwd to where make puts build files.
+ # Makes migrating from make to cmake easier, cmake doesn't put anything here.
+ # Each Gyp configuration creates a different CMakeLists.txt file
+ # to avoid incompatibilities between Gyp and CMake configurations.
+ generator_dir = os.path.relpath(options.generator_output or '.')
+
+ # output_dir: relative path from generator_dir to the build directory.
+ output_dir = generator_flags.get('output_dir', 'out')
+
+ # build_dir: relative path from source root to our output files.
+ # e.g. "out/Debug"
+ build_dir = os.path.normpath(os.path.join(generator_dir,
+ output_dir,
+ config_to_use))
+
+ toplevel_build = os.path.join(options.toplevel_dir, build_dir)
+
+ output_file = os.path.join(toplevel_build, 'CMakeLists.txt')
+ gyp.common.EnsureDirExists(output_file)
+
+ output = open(output_file, 'w')
+ output.write('cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)\n')
+ output.write('cmake_policy(VERSION 2.8.8)\n')
+
+ gyp_file, project_target, _ = gyp.common.ParseQualifiedTarget(target_list[-1])
+ output.write('project(')
+ output.write(project_target)
+ output.write(')\n')
+
+ SetVariable(output, 'configuration', config_to_use)
+
+ ar = None
+ cc = None
+ cxx = None
+
+ make_global_settings = data[gyp_file].get('make_global_settings', [])
+ build_to_top = gyp.common.InvertRelativePath(build_dir,
+ options.toplevel_dir)
+ for key, value in make_global_settings:
+ if key == 'AR':
+ ar = os.path.join(build_to_top, value)
+ if key == 'CC':
+ cc = os.path.join(build_to_top, value)
+ if key == 'CXX':
+ cxx = os.path.join(build_to_top, value)
+
+ ar = gyp.common.GetEnvironFallback(['AR_target', 'AR'], ar)
+ cc = gyp.common.GetEnvironFallback(['CC_target', 'CC'], cc)
+ cxx = gyp.common.GetEnvironFallback(['CXX_target', 'CXX'], cxx)
+
+ if ar:
+ SetVariable(output, 'CMAKE_AR', ar)
+ if cc:
+ SetVariable(output, 'CMAKE_C_COMPILER', cc)
+ if cxx:
+ SetVariable(output, 'CMAKE_CXX_COMPILER', cxx)
+
+ # The following appears to be as-yet undocumented.
+ # http://public.kitware.com/Bug/view.php?id=8392
+ output.write('enable_language(ASM)\n')
+ # ASM-ATT does not support .S files.
+ # output.write('enable_language(ASM-ATT)\n')
+
+ if cc:
+ SetVariable(output, 'CMAKE_ASM_COMPILER', cc)
+
+ SetVariable(output, 'builddir', '${CMAKE_CURRENT_BINARY_DIR}')
+ SetVariable(output, 'obj', '${builddir}/obj')
+ output.write('\n')
+
+ # TODO: Undocumented/unsupported (the CMake Java generator depends on it).
+ # CMake by default names the object resulting from foo.c to be foo.c.o.
+ # Gyp traditionally names the object resulting from foo.c foo.o.
+ # This should be irrelevant, but some targets extract .o files from .a
+ # and depend on the name of the extracted .o files.
+ output.write('set(CMAKE_C_OUTPUT_EXTENSION_REPLACE 1)\n')
+ output.write('set(CMAKE_CXX_OUTPUT_EXTENSION_REPLACE 1)\n')
+ output.write('\n')
+
+ # Force ninja to use rsp files. Otherwise link and ar lines can get too long,
+ # resulting in 'Argument list too long' errors.
+ # However, rsp files don't work correctly on Mac.
+ if flavor != 'mac':
+ output.write('set(CMAKE_NINJA_FORCE_RESPONSE_FILE 1)\n')
+ output.write('\n')
+
+ namer = CMakeNamer(target_list)
+
+ # The list of targets upon which the 'all' target should depend.
+ # CMake has it's own implicit 'all' target, one is not created explicitly.
+ all_qualified_targets = set()
+ for build_file in params['build_files']:
+ for qualified_target in gyp.common.AllTargets(target_list,
+ target_dicts,
+ os.path.normpath(build_file)):
+ all_qualified_targets.add(qualified_target)
+
+ for qualified_target in target_list:
+ if flavor == 'mac':
+ gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
+ spec = target_dicts[qualified_target]
+ gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[gyp_file], spec)
+
+ WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
+ options, generator_flags, all_qualified_targets, flavor, output)
+
+ output.close()
+
+
+def PerformBuild(data, configurations, params):
+ options = params['options']
+ generator_flags = params['generator_flags']
+
+ # generator_dir: relative path from pwd to where make puts build files.
+ # Makes migrating from make to cmake easier, cmake doesn't put anything here.
+ generator_dir = os.path.relpath(options.generator_output or '.')
+
+ # output_dir: relative path from generator_dir to the build directory.
+ output_dir = generator_flags.get('output_dir', 'out')
+
+ for config_name in configurations:
+ # build_dir: relative path from source root to our output files.
+ # e.g. "out/Debug"
+ build_dir = os.path.normpath(os.path.join(generator_dir,
+ output_dir,
+ config_name))
+ arguments = ['cmake', '-G', 'Ninja']
+ print('Generating [%s]: %s' % (config_name, arguments))
+ subprocess.check_call(arguments, cwd=build_dir)
+
+ arguments = ['ninja', '-C', build_dir]
+ print('Building [%s]: %s' % (config_name, arguments))
+ subprocess.check_call(arguments)
+
+
+def CallGenerateOutputForConfig(arglist):
+ # Ignore the interrupt signal so that the parent process catches it and
+ # kills all multiprocessing children.
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+ target_list, target_dicts, data, params, config_name = arglist
+ GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
+
+
+def GenerateOutput(target_list, target_dicts, data, params):
+ user_config = params.get('generator_flags', {}).get('config', None)
+ if user_config:
+ GenerateOutputForConfig(target_list, target_dicts, data,
+ params, user_config)
+ else:
+ config_names = target_dicts[target_list[0]]['configurations']
+ if params['parallel']:
+ try:
+ pool = multiprocessing.Pool(len(config_names))
+ arglists = []
+ for config_name in config_names:
+ arglists.append((target_list, target_dicts, data,
+ params, config_name))
+ pool.map(CallGenerateOutputForConfig, arglists)
+ except KeyboardInterrupt as e:
+ pool.terminate()
+ raise e
+ else:
+ for config_name in config_names:
+ GenerateOutputForConfig(target_list, target_dicts, data,
+ params, config_name)
diff --git a/third_party/python/gyp/pylib/gyp/generator/dump_dependency_json.py b/third_party/python/gyp/pylib/gyp/generator/dump_dependency_json.py
new file mode 100644
index 0000000000..2bf3f397d6
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/generator/dump_dependency_json.py
@@ -0,0 +1,101 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+
+import collections
+import os
+import gyp
+import gyp.common
+import gyp.msvs_emulation
+import json
+import sys
+
+generator_supports_multiple_toolsets = True
+
+generator_wants_static_library_dependencies_adjusted = False
+
+generator_filelist_paths = {
+}
+
+generator_default_variables = {
+}
+for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
+ 'LIB_DIR', 'SHARED_LIB_DIR']:
+ # Some gyp steps fail if these are empty(!).
+ generator_default_variables[dirname] = 'dir'
+for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
+ 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
+ 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
+ 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
+ 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
+ 'CONFIGURATION_NAME']:
+ generator_default_variables[unused] = ''
+
+
+def CalculateVariables(default_variables, params):
+ generator_flags = params.get('generator_flags', {})
+ for key, val in generator_flags.items():
+ default_variables.setdefault(key, val)
+ default_variables.setdefault('OS', gyp.common.GetFlavor(params))
+
+ flavor = gyp.common.GetFlavor(params)
+ if flavor =='win':
+ # Copy additional generator configuration data from VS, which is shared
+ # by the Windows Ninja generator.
+ import gyp.generator.msvs as msvs_generator
+ generator_additional_non_configuration_keys = getattr(msvs_generator,
+ 'generator_additional_non_configuration_keys', [])
+ generator_additional_path_sections = getattr(msvs_generator,
+ 'generator_additional_path_sections', [])
+
+ gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
+
+
+def CalculateGeneratorInputInfo(params):
+ """Calculate the generator specific info that gets fed to input (called by
+ gyp)."""
+ generator_flags = params.get('generator_flags', {})
+ if generator_flags.get('adjust_static_libraries', False):
+ global generator_wants_static_library_dependencies_adjusted
+ generator_wants_static_library_dependencies_adjusted = True
+
+ toplevel = params['options'].toplevel_dir
+ generator_dir = os.path.relpath(params['options'].generator_output or '.')
+ # output_dir: relative path from generator_dir to the build directory.
+ output_dir = generator_flags.get('output_dir', 'out')
+ qualified_out_dir = os.path.normpath(os.path.join(
+ toplevel, generator_dir, output_dir, 'gypfiles'))
+ global generator_filelist_paths
+ generator_filelist_paths = {
+ 'toplevel': toplevel,
+ 'qualified_out_dir': qualified_out_dir,
+ }
+
+def GenerateOutput(target_list, target_dicts, data, params):
+ # Map of target -> list of targets it depends on.
+ edges = {}
+
+ # Queue of targets to visit.
+ targets_to_visit = target_list[:]
+
+ while len(targets_to_visit) > 0:
+ target = targets_to_visit.pop()
+ if target in edges:
+ continue
+ edges[target] = []
+
+ for dep in target_dicts[target].get('dependencies', []):
+ edges[target].append(dep)
+ targets_to_visit.append(dep)
+
+ try:
+ filepath = params['generator_flags']['output_dir']
+ except KeyError:
+ filepath = '.'
+ filename = os.path.join(filepath, 'dump.json')
+ f = open(filename, 'w')
+ json.dump(edges, f)
+ f.close()
+ print('Wrote json to %s.' % filename)
diff --git a/third_party/python/gyp/pylib/gyp/generator/eclipse.py b/third_party/python/gyp/pylib/gyp/generator/eclipse.py
new file mode 100644
index 0000000000..d039f03a2c
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/generator/eclipse.py
@@ -0,0 +1,425 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""GYP backend that generates Eclipse CDT settings files.
+
+This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML
+files that can be imported into an Eclipse CDT project. The XML file contains a
+list of include paths and symbols (i.e. defines).
+
+Because a full .cproject definition is not created by this generator, it's not
+possible to properly define the include dirs and symbols for each file
+individually. Instead, one set of includes/symbols is generated for the entire
+project. This works fairly well (and is a vast improvement in general), but may
+still result in a few indexer issues here and there.
+
+This generator has no automated tests, so expect it to be broken.
+"""
+
+from xml.sax.saxutils import escape
+import os.path
+import subprocess
+import gyp
+import gyp.common
+import gyp.msvs_emulation
+import shlex
+import xml.etree.cElementTree as ET
+
+generator_wants_static_library_dependencies_adjusted = False
+
+generator_default_variables = {
+}
+
+for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']:
+ # Some gyp steps fail if these are empty(!), so we convert them to variables
+ generator_default_variables[dirname] = '$' + dirname
+
+for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
+ 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
+ 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
+ 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
+ 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
+ 'CONFIGURATION_NAME']:
+ generator_default_variables[unused] = ''
+
+# Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as
+# part of the path when dealing with generated headers. This value will be
+# replaced dynamically for each configuration.
+generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
+ '$SHARED_INTERMEDIATE_DIR'
+
+
+def CalculateVariables(default_variables, params):
+ generator_flags = params.get('generator_flags', {})
+ for key, val in generator_flags.items():
+ default_variables.setdefault(key, val)
+ flavor = gyp.common.GetFlavor(params)
+ default_variables.setdefault('OS', flavor)
+ if flavor == 'win':
+ # Copy additional generator configuration data from VS, which is shared
+ # by the Eclipse generator.
+ import gyp.generator.msvs as msvs_generator
+ generator_additional_non_configuration_keys = getattr(msvs_generator,
+ 'generator_additional_non_configuration_keys', [])
+ generator_additional_path_sections = getattr(msvs_generator,
+ 'generator_additional_path_sections', [])
+
+ gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
+
+
+def CalculateGeneratorInputInfo(params):
+ """Calculate the generator specific info that gets fed to input (called by
+ gyp)."""
+ generator_flags = params.get('generator_flags', {})
+ if generator_flags.get('adjust_static_libraries', False):
+ global generator_wants_static_library_dependencies_adjusted
+ generator_wants_static_library_dependencies_adjusted = True
+
+
+def GetAllIncludeDirectories(target_list, target_dicts,
+ shared_intermediate_dirs, config_name, params,
+ compiler_path):
+ """Calculate the set of include directories to be used.
+
+ Returns:
+ A list including all the include_dir's specified for every target followed
+ by any include directories that were added as cflag compiler options.
+ """
+
+ gyp_includes_set = set()
+ compiler_includes_list = []
+
+ # Find compiler's default include dirs.
+ if compiler_path:
+ command = shlex.split(compiler_path)
+ command.extend(['-E', '-xc++', '-v', '-'])
+ proc = subprocess.Popen(args=command, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ output = proc.communicate()[1]
+ # Extract the list of include dirs from the output, which has this format:
+ # ...
+ # #include "..." search starts here:
+ # #include <...> search starts here:
+ # /usr/include/c++/4.6
+ # /usr/local/include
+ # End of search list.
+ # ...
+ in_include_list = False
+ for line in output.splitlines():
+ if line.startswith('#include'):
+ in_include_list = True
+ continue
+ if line.startswith('End of search list.'):
+ break
+ if in_include_list:
+ include_dir = line.strip()
+ if include_dir not in compiler_includes_list:
+ compiler_includes_list.append(include_dir)
+
+ flavor = gyp.common.GetFlavor(params)
+ if flavor == 'win':
+ generator_flags = params.get('generator_flags', {})
+ for target_name in target_list:
+ target = target_dicts[target_name]
+ if config_name in target['configurations']:
+ config = target['configurations'][config_name]
+
+ # Look for any include dirs that were explicitly added via cflags. This
+ # may be done in gyp files to force certain includes to come at the end.
+ # TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
+ # remove this.
+ if flavor == 'win':
+ msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
+ cflags = msvs_settings.GetCflags(config_name)
+ else:
+ cflags = config['cflags']
+ for cflag in cflags:
+ if cflag.startswith('-I'):
+ include_dir = cflag[2:]
+ if include_dir not in compiler_includes_list:
+ compiler_includes_list.append(include_dir)
+
+ # Find standard gyp include dirs.
+ if 'include_dirs' in config:
+ include_dirs = config['include_dirs']
+ for shared_intermediate_dir in shared_intermediate_dirs:
+ for include_dir in include_dirs:
+ include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',
+ shared_intermediate_dir)
+ if not os.path.isabs(include_dir):
+ base_dir = os.path.dirname(target_name)
+
+ include_dir = base_dir + '/' + include_dir
+ include_dir = os.path.abspath(include_dir)
+
+ gyp_includes_set.add(include_dir)
+
+ # Generate a list that has all the include dirs.
+ all_includes_list = list(gyp_includes_set)
+ all_includes_list.sort()
+ for compiler_include in compiler_includes_list:
+ if not compiler_include in gyp_includes_set:
+ all_includes_list.append(compiler_include)
+
+ # All done.
+ return all_includes_list
+
+
+def GetCompilerPath(target_list, data, options):
+ """Determine a command that can be used to invoke the compiler.
+
+ Returns:
+ If this is a gyp project that has explicit make settings, try to determine
+ the compiler from that. Otherwise, see if a compiler was specified via the
+ CC_target environment variable.
+ """
+ # First, see if the compiler is configured in make's settings.
+ build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
+ make_global_settings_dict = data[build_file].get('make_global_settings', {})
+ for key, value in make_global_settings_dict:
+ if key in ['CC', 'CXX']:
+ return os.path.join(options.toplevel_dir, value)
+
+ # Check to see if the compiler was specified as an environment variable.
+ for key in ['CC_target', 'CC', 'CXX']:
+ compiler = os.environ.get(key)
+ if compiler:
+ return compiler
+
+ return 'gcc'
+
+
+def GetAllDefines(target_list, target_dicts, data, config_name, params,
+ compiler_path):
+ """Calculate the defines for a project.
+
+ Returns:
+ A dict that includes explict defines declared in gyp files along with all of
+ the default defines that the compiler uses.
+ """
+
+ # Get defines declared in the gyp files.
+ all_defines = {}
+ flavor = gyp.common.GetFlavor(params)
+ if flavor == 'win':
+ generator_flags = params.get('generator_flags', {})
+ for target_name in target_list:
+ target = target_dicts[target_name]
+
+ if flavor == 'win':
+ msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
+ extra_defines = msvs_settings.GetComputedDefines(config_name)
+ else:
+ extra_defines = []
+ if config_name in target['configurations']:
+ config = target['configurations'][config_name]
+ target_defines = config['defines']
+ else:
+ target_defines = []
+ for define in target_defines + extra_defines:
+ split_define = define.split('=', 1)
+ if len(split_define) == 1:
+ split_define.append('1')
+ if split_define[0].strip() in all_defines:
+ # Already defined
+ continue
+ all_defines[split_define[0].strip()] = split_define[1].strip()
+ # Get default compiler defines (if possible).
+ if flavor == 'win':
+ return all_defines # Default defines already processed in the loop above.
+ if compiler_path:
+ command = shlex.split(compiler_path)
+ command.extend(['-E', '-dM', '-'])
+ cpp_proc = subprocess.Popen(args=command, cwd='.',
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE)
+ cpp_output = cpp_proc.communicate()[0]
+ cpp_lines = cpp_output.split('\n')
+ for cpp_line in cpp_lines:
+ if not cpp_line.strip():
+ continue
+ cpp_line_parts = cpp_line.split(' ', 2)
+ key = cpp_line_parts[1]
+ if len(cpp_line_parts) >= 3:
+ val = cpp_line_parts[2]
+ else:
+ val = '1'
+ all_defines[key] = val
+
+ return all_defines
+
+
+def WriteIncludePaths(out, eclipse_langs, include_dirs):
+ """Write the includes section of a CDT settings export file."""
+
+ out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
+ 'settingswizards.IncludePaths">\n')
+ out.write(' <language name="holder for library settings"></language>\n')
+ for lang in eclipse_langs:
+ out.write(' <language name="%s">\n' % lang)
+ for include_dir in include_dirs:
+ out.write(' <includepath workspace_path="false">%s</includepath>\n' %
+ include_dir)
+ out.write(' </language>\n')
+ out.write(' </section>\n')
+
+
+def WriteMacros(out, eclipse_langs, defines):
+ """Write the macros section of a CDT settings export file."""
+
+ out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
+ 'settingswizards.Macros">\n')
+ out.write(' <language name="holder for library settings"></language>\n')
+ for lang in eclipse_langs:
+ out.write(' <language name="%s">\n' % lang)
+ for key in sorted(defines.keys()):
+ out.write(' <macro><name>%s</name><value>%s</value></macro>\n' %
+ (escape(key), escape(defines[key])))
+ out.write(' </language>\n')
+ out.write(' </section>\n')
+
+
+def GenerateOutputForConfig(target_list, target_dicts, data, params,
+ config_name):
+ options = params['options']
+ generator_flags = params.get('generator_flags', {})
+
+ # build_dir: relative path from source root to our output files.
+ # e.g. "out/Debug"
+ build_dir = os.path.join(generator_flags.get('output_dir', 'out'),
+ config_name)
+
+ toplevel_build = os.path.join(options.toplevel_dir, build_dir)
+ # Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the
+ # SHARED_INTERMEDIATE_DIR. Include both possible locations.
+ shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'),
+ os.path.join(toplevel_build, 'gen')]
+
+ GenerateCdtSettingsFile(target_list,
+ target_dicts,
+ data,
+ params,
+ config_name,
+ os.path.join(toplevel_build,
+ 'eclipse-cdt-settings.xml'),
+ options,
+ shared_intermediate_dirs)
+ GenerateClasspathFile(target_list,
+ target_dicts,
+ options.toplevel_dir,
+ toplevel_build,
+ os.path.join(toplevel_build,
+ 'eclipse-classpath.xml'))
+
+
+def GenerateCdtSettingsFile(target_list, target_dicts, data, params,
+ config_name, out_name, options,
+ shared_intermediate_dirs):
+ gyp.common.EnsureDirExists(out_name)
+ with open(out_name, 'w') as out:
+ out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
+ out.write('<cdtprojectproperties>\n')
+
+ eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File',
+ 'GNU C++', 'GNU C', 'Assembly']
+ compiler_path = GetCompilerPath(target_list, data, options)
+ include_dirs = GetAllIncludeDirectories(target_list, target_dicts,
+ shared_intermediate_dirs,
+ config_name, params, compiler_path)
+ WriteIncludePaths(out, eclipse_langs, include_dirs)
+ defines = GetAllDefines(target_list, target_dicts, data, config_name,
+ params, compiler_path)
+ WriteMacros(out, eclipse_langs, defines)
+
+ out.write('</cdtprojectproperties>\n')
+
+
+def GenerateClasspathFile(target_list, target_dicts, toplevel_dir,
+ toplevel_build, out_name):
+ '''Generates a classpath file suitable for symbol navigation and code
+ completion of Java code (such as in Android projects) by finding all
+ .java and .jar files used as action inputs.'''
+ gyp.common.EnsureDirExists(out_name)
+ result = ET.Element('classpath')
+
+ def AddElements(kind, paths):
+ # First, we need to normalize the paths so they are all relative to the
+ # toplevel dir.
+ rel_paths = set()
+ for path in paths:
+ if os.path.isabs(path):
+ rel_paths.add(os.path.relpath(path, toplevel_dir))
+ else:
+ rel_paths.add(path)
+
+ for path in sorted(rel_paths):
+ entry_element = ET.SubElement(result, 'classpathentry')
+ entry_element.set('kind', kind)
+ entry_element.set('path', path)
+
+ AddElements('lib', GetJavaJars(target_list, target_dicts, toplevel_dir))
+ AddElements('src', GetJavaSourceDirs(target_list, target_dicts, toplevel_dir))
+ # Include the standard JRE container and a dummy out folder
+ AddElements('con', ['org.eclipse.jdt.launching.JRE_CONTAINER'])
+ # Include a dummy out folder so that Eclipse doesn't use the default /bin
+ # folder in the root of the project.
+ AddElements('output', [os.path.join(toplevel_build, '.eclipse-java-build')])
+
+ ET.ElementTree(result).write(out_name)
+
+
+def GetJavaJars(target_list, target_dicts, toplevel_dir):
+ '''Generates a sequence of all .jars used as inputs.'''
+ for target_name in target_list:
+ target = target_dicts[target_name]
+ for action in target.get('actions', []):
+ for input_ in action['inputs']:
+ if os.path.splitext(input_)[1] == '.jar' and not input_.startswith('$'):
+ if os.path.isabs(input_):
+ yield input_
+ else:
+ yield os.path.join(os.path.dirname(target_name), input_)
+
+
+def GetJavaSourceDirs(target_list, target_dicts, toplevel_dir):
+ '''Generates a sequence of all likely java package root directories.'''
+ for target_name in target_list:
+ target = target_dicts[target_name]
+ for action in target.get('actions', []):
+ for input_ in action['inputs']:
+ if (os.path.splitext(input_)[1] == '.java' and
+ not input_.startswith('$')):
+ dir_ = os.path.dirname(os.path.join(os.path.dirname(target_name),
+ input_))
+ # If there is a parent 'src' or 'java' folder, navigate up to it -
+ # these are canonical package root names in Chromium. This will
+ # break if 'src' or 'java' exists in the package structure. This
+ # could be further improved by inspecting the java file for the
+ # package name if this proves to be too fragile in practice.
+ parent_search = dir_
+ while os.path.basename(parent_search) not in ['src', 'java']:
+ parent_search, _ = os.path.split(parent_search)
+ if not parent_search or parent_search == toplevel_dir:
+ # Didn't find a known root, just return the original path
+ yield dir_
+ break
+ else:
+ yield parent_search
+
+
+def GenerateOutput(target_list, target_dicts, data, params):
+ """Generate an XML settings file that can be imported into a CDT project."""
+
+ if params['options'].generator_output:
+ raise NotImplementedError("--generator_output not implemented for eclipse")
+
+ user_config = params.get('generator_flags', {}).get('config', None)
+ if user_config:
+ GenerateOutputForConfig(target_list, target_dicts, data, params,
+ user_config)
+ else:
+ config_names = target_dicts[target_list[0]]['configurations']
+ for config_name in config_names:
+ GenerateOutputForConfig(target_list, target_dicts, data, params,
+ config_name)
+
diff --git a/third_party/python/gyp/pylib/gyp/generator/gypd.py b/third_party/python/gyp/pylib/gyp/generator/gypd.py
new file mode 100644
index 0000000000..78eeaa61b2
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/generator/gypd.py
@@ -0,0 +1,94 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""gypd output module
+
+This module produces gyp input as its output. Output files are given the
+.gypd extension to avoid overwriting the .gyp files that they are generated
+from. Internal references to .gyp files (such as those found in
+"dependencies" sections) are not adjusted to point to .gypd files instead;
+unlike other paths, which are relative to the .gyp or .gypd file, such paths
+are relative to the directory from which gyp was run to create the .gypd file.
+
+This generator module is intended to be a sample and a debugging aid, hence
+the "d" for "debug" in .gypd. It is useful to inspect the results of the
+various merges, expansions, and conditional evaluations performed by gyp
+and to see a representation of what would be fed to a generator module.
+
+It's not advisable to rename .gypd files produced by this module to .gyp,
+because they will have all merges, expansions, and evaluations already
+performed and the relevant constructs not present in the output; paths to
+dependencies may be wrong; and various sections that do not belong in .gyp
+files such as such as "included_files" and "*_excluded" will be present.
+Output will also be stripped of comments. This is not intended to be a
+general-purpose gyp pretty-printer; for that, you probably just want to
+run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
+comments but won't do all of the other things done to this module's output.
+
+The specific formatting of the output generated by this module is subject
+to change.
+"""
+
+
+import gyp.common
+import errno
+import os
+import pprint
+
+
+# These variables should just be spit back out as variable references.
+_generator_identity_variables = [
+ 'CONFIGURATION_NAME',
+ 'EXECUTABLE_PREFIX',
+ 'EXECUTABLE_SUFFIX',
+ 'INTERMEDIATE_DIR',
+ 'LIB_DIR',
+ 'PRODUCT_DIR',
+ 'RULE_INPUT_ROOT',
+ 'RULE_INPUT_DIRNAME',
+ 'RULE_INPUT_EXT',
+ 'RULE_INPUT_NAME',
+ 'RULE_INPUT_PATH',
+ 'SHARED_INTERMEDIATE_DIR',
+ 'SHARED_LIB_DIR',
+ 'SHARED_LIB_PREFIX',
+ 'SHARED_LIB_SUFFIX',
+ 'STATIC_LIB_PREFIX',
+ 'STATIC_LIB_SUFFIX',
+]
+
+# gypd doesn't define a default value for OS like many other generator
+# modules. Specify "-D OS=whatever" on the command line to provide a value.
+generator_default_variables = {
+}
+
+# gypd supports multiple toolsets
+generator_supports_multiple_toolsets = True
+
+# TODO(mark): This always uses <, which isn't right. The input module should
+# notify the generator to tell it which phase it is operating in, and this
+# module should use < for the early phase and then switch to > for the late
+# phase. Bonus points for carrying @ back into the output too.
+for v in _generator_identity_variables:
+ generator_default_variables[v] = '<(%s)' % v
+
+
+def GenerateOutput(target_list, target_dicts, data, params):
+ output_files = {}
+ for qualified_target in target_list:
+ [input_file, target] = \
+ gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
+
+ if input_file[-4:] != '.gyp':
+ continue
+ input_file_stem = input_file[:-4]
+ output_file = input_file_stem + params['options'].suffix + '.gypd'
+
+ if not output_file in output_files:
+ output_files[output_file] = input_file
+
+ for output_file, input_file in output_files.items():
+ output = open(output_file, 'w')
+ pprint.pprint(data[input_file], output)
+ output.close()
diff --git a/third_party/python/gyp/pylib/gyp/generator/gypsh.py b/third_party/python/gyp/pylib/gyp/generator/gypsh.py
new file mode 100644
index 0000000000..bd405f43a9
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/generator/gypsh.py
@@ -0,0 +1,56 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""gypsh output module
+
+gypsh is a GYP shell. It's not really a generator per se. All it does is
+fire up an interactive Python session with a few local variables set to the
+variables passed to the generator. Like gypd, it's intended as a debugging
+aid, to facilitate the exploration of .gyp structures after being processed
+by the input module.
+
+The expected usage is "gyp -f gypsh -D OS=desired_os".
+"""
+
+
+import code
+import sys
+
+
+# All of this stuff about generator variables was lovingly ripped from gypd.py.
+# That module has a much better description of what's going on and why.
+_generator_identity_variables = [
+ 'EXECUTABLE_PREFIX',
+ 'EXECUTABLE_SUFFIX',
+ 'INTERMEDIATE_DIR',
+ 'PRODUCT_DIR',
+ 'RULE_INPUT_ROOT',
+ 'RULE_INPUT_DIRNAME',
+ 'RULE_INPUT_EXT',
+ 'RULE_INPUT_NAME',
+ 'RULE_INPUT_PATH',
+ 'SHARED_INTERMEDIATE_DIR',
+]
+
+generator_default_variables = {
+}
+
+for v in _generator_identity_variables:
+ generator_default_variables[v] = '<(%s)' % v
+
+
+def GenerateOutput(target_list, target_dicts, data, params):
+ locals = {
+ 'target_list': target_list,
+ 'target_dicts': target_dicts,
+ 'data': data,
+ }
+
+ # Use a banner that looks like the stock Python one and like what
+ # code.interact uses by default, but tack on something to indicate what
+ # locals are available, and identify gypsh.
+ banner='Python %s on %s\nlocals.keys() = %s\ngypsh' % \
+ (sys.version, sys.platform, repr(sorted(locals.keys())))
+
+ code.interact(banner, local=locals)
diff --git a/third_party/python/gyp/pylib/gyp/generator/make.py b/third_party/python/gyp/pylib/gyp/generator/make.py
new file mode 100644
index 0000000000..997eec0866
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/generator/make.py
@@ -0,0 +1,2260 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Notes:
+#
+# This is all roughly based on the Makefile system used by the Linux
+# kernel, but is a non-recursive make -- we put the entire dependency
+# graph in front of make and let it figure it out.
+#
+# The code below generates a separate .mk file for each target, but
+# all are sourced by the top-level Makefile. This means that all
+# variables in .mk-files clobber one another. Be careful to use :=
+# where appropriate for immediate evaluation, and similarly to watch
+# that you're not relying on a variable value to last beween different
+# .mk files.
+#
+# TODOs:
+#
+# Global settings and utility functions are currently stuffed in the
+# toplevel Makefile. It may make sense to generate some .mk files on
+# the side to keep the the files readable.
+
+from __future__ import print_function
+
+import os
+import re
+import sys
+import subprocess
+import gyp
+import gyp.common
+import gyp.xcode_emulation
+from gyp.common import GetEnvironFallback
+from gyp.common import GypError
+
+import hashlib
+
+generator_default_variables = {
+ 'EXECUTABLE_PREFIX': '',
+ 'EXECUTABLE_SUFFIX': '',
+ 'STATIC_LIB_PREFIX': 'lib',
+ 'SHARED_LIB_PREFIX': 'lib',
+ 'STATIC_LIB_SUFFIX': '.a',
+ 'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
+ 'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
+ 'PRODUCT_DIR': '$(builddir)',
+ 'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
+ 'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
+ 'RULE_INPUT_PATH': '$(abspath $<)',
+ 'RULE_INPUT_EXT': '$(suffix $<)',
+ 'RULE_INPUT_NAME': '$(notdir $<)',
+ 'CONFIGURATION_NAME': '$(BUILDTYPE)',
+}
+
+# Make supports multiple toolsets
+generator_supports_multiple_toolsets = True
+
+# Request sorted dependencies in the order from dependents to dependencies.
+generator_wants_sorted_dependencies = False
+
+# Placates pylint.
+generator_additional_non_configuration_keys = []
+generator_additional_path_sections = []
+generator_extra_sources_for_rules = []
+generator_filelist_paths = None
+
+
+def CalculateVariables(default_variables, params):
+ """Calculate additional variables for use in the build (called by gyp)."""
+ flavor = gyp.common.GetFlavor(params)
+ if flavor == 'mac':
+ default_variables.setdefault('OS', 'mac')
+ default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
+ default_variables.setdefault('SHARED_LIB_DIR',
+ generator_default_variables['PRODUCT_DIR'])
+ default_variables.setdefault('LIB_DIR',
+ generator_default_variables['PRODUCT_DIR'])
+
+ # Copy additional generator configuration data from Xcode, which is shared
+ # by the Mac Make generator.
+ import gyp.generator.xcode as xcode_generator
+ global generator_additional_non_configuration_keys
+ generator_additional_non_configuration_keys = getattr(xcode_generator,
+ 'generator_additional_non_configuration_keys', [])
+ global generator_additional_path_sections
+ generator_additional_path_sections = getattr(xcode_generator,
+ 'generator_additional_path_sections', [])
+ global generator_extra_sources_for_rules
+ generator_extra_sources_for_rules = getattr(xcode_generator,
+ 'generator_extra_sources_for_rules', [])
+ COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
+ else:
+ operating_system = flavor
+ if flavor == 'android':
+ operating_system = 'linux' # Keep this legacy behavior for now.
+ default_variables.setdefault('OS', operating_system)
+ if flavor == 'aix':
+ default_variables.setdefault('SHARED_LIB_SUFFIX', '.a')
+ else:
+ default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
+ default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
+ default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')
+
+
+def CalculateGeneratorInputInfo(params):
+ """Calculate the generator specific info that gets fed to input (called by
+ gyp)."""
+ generator_flags = params.get('generator_flags', {})
+ android_ndk_version = generator_flags.get('android_ndk_version', None)
+ # Android NDK requires a strict link order.
+ if android_ndk_version:
+ global generator_wants_sorted_dependencies
+ generator_wants_sorted_dependencies = True
+
+ output_dir = params['options'].generator_output or \
+ params['options'].toplevel_dir
+ builddir_name = generator_flags.get('output_dir', 'out')
+ qualified_out_dir = os.path.normpath(os.path.join(
+ output_dir, builddir_name, 'gypfiles'))
+
+ global generator_filelist_paths
+ generator_filelist_paths = {
+ 'toplevel': params['options'].toplevel_dir,
+ 'qualified_out_dir': qualified_out_dir,
+ }
+
+
+# The .d checking code below uses these functions:
+# wildcard, sort, foreach, shell, wordlist
+# wildcard can handle spaces, the rest can't.
+# Since I could find no way to make foreach work with spaces in filenames
+# correctly, the .d files have spaces replaced with another character. The .d
+# file for
+# Chromium\ Framework.framework/foo
+# is for example
+# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
+# This is the replacement character.
+SPACE_REPLACEMENT = '?'
+
+
+LINK_COMMANDS_LINUX = """\
+quiet_cmd_alink = AR($(TOOLSET)) $@
+cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
+
+quiet_cmd_alink_thin = AR($(TOOLSET)) $@
+cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
+
+# Due to circular dependencies between libraries :(, we wrap the
+# special "figure out circular dependencies" flags around the entire
+# input list during linking.
+quiet_cmd_link = LINK($(TOOLSET)) $@
+cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
+
+# We support two kinds of shared objects (.so):
+# 1) shared_library, which is just bundling together many dependent libraries
+# into a link line.
+# 2) loadable_module, which is generating a module intended for dlopen().
+#
+# They differ only slightly:
+# In the former case, we want to package all dependent code into the .so.
+# In the latter case, we want to package just the API exposed by the
+# outermost module.
+# This means shared_library uses --whole-archive, while loadable_module doesn't.
+# (Note that --whole-archive is incompatible with the --start-group used in
+# normal linking.)
+
+# Other shared-object link notes:
+# - Set SONAME to the library filename so our binaries don't reference
+# the local, absolute paths used on the link command-line.
+quiet_cmd_solink = SOLINK($(TOOLSET)) $@
+cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
+
+quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
+cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
+"""
+
+LINK_COMMANDS_MAC = """\
+quiet_cmd_alink = LIBTOOL-STATIC $@
+cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
+
+quiet_cmd_link = LINK($(TOOLSET)) $@
+cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
+
+quiet_cmd_solink = SOLINK($(TOOLSET)) $@
+cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
+
+quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
+cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
+"""
+
+LINK_COMMANDS_ANDROID = """\
+quiet_cmd_alink = AR($(TOOLSET)) $@
+cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
+
+quiet_cmd_alink_thin = AR($(TOOLSET)) $@
+cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
+
+# Due to circular dependencies between libraries :(, we wrap the
+# special "figure out circular dependencies" flags around the entire
+# input list during linking.
+quiet_cmd_link = LINK($(TOOLSET)) $@
+quiet_cmd_link_host = LINK($(TOOLSET)) $@
+cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
+cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
+
+# Other shared-object link notes:
+# - Set SONAME to the library filename so our binaries don't reference
+# the local, absolute paths used on the link command-line.
+quiet_cmd_solink = SOLINK($(TOOLSET)) $@
+cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
+
+quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
+cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
+quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
+cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
+"""
+
+
+LINK_COMMANDS_AIX = """\
+quiet_cmd_alink = AR($(TOOLSET)) $@
+cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
+
+quiet_cmd_alink_thin = AR($(TOOLSET)) $@
+cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
+
+quiet_cmd_link = LINK($(TOOLSET)) $@
+cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
+
+quiet_cmd_solink = SOLINK($(TOOLSET)) $@
+cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
+
+quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
+cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
+"""
+
+
+LINK_COMMANDS_OS390 = """\
+quiet_cmd_alink = AR($(TOOLSET)) $@
+cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
+
+quiet_cmd_alink_thin = AR($(TOOLSET)) $@
+cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
+
+quiet_cmd_link = LINK($(TOOLSET)) $@
+cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
+
+quiet_cmd_solink = SOLINK($(TOOLSET)) $@
+cmd_solink = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS) -Wl,DLL
+
+quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
+cmd_solink_module = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS) -Wl,DLL
+"""
+
+
+# Header of toplevel Makefile.
+# This should go into the build tree, but it's easier to keep it here for now.
+SHARED_HEADER = ("""\
+# We borrow heavily from the kernel build setup, though we are simpler since
+# we don't have Kconfig tweaking settings on us.
+
+# The implicit make rules have it looking for RCS files, among other things.
+# We instead explicitly write all the rules we care about.
+# It's even quicker (saves ~200ms) to pass -r on the command line.
+MAKEFLAGS=-r
+
+# The source directory tree.
+srcdir := %(srcdir)s
+abs_srcdir := $(abspath $(srcdir))
+
+# The name of the builddir.
+builddir_name ?= %(builddir)s
+
+# The V=1 flag on command line makes us verbosely print command lines.
+ifdef V
+ quiet=
+else
+ quiet=quiet_
+endif
+
+# Specify BUILDTYPE=Release on the command line for a release build.
+BUILDTYPE ?= %(default_configuration)s
+
+# Directory all our build output goes into.
+# Note that this must be two directories beneath src/ for unit tests to pass,
+# as they reach into the src/ directory for data with relative paths.
+builddir ?= $(builddir_name)/$(BUILDTYPE)
+abs_builddir := $(abspath $(builddir))
+depsdir := $(builddir)/.deps
+
+# Object output directory.
+obj := $(builddir)/obj
+abs_obj := $(abspath $(obj))
+
+# We build up a list of every single one of the targets so we can slurp in the
+# generated dependency rule Makefiles in one pass.
+all_deps :=
+
+%(make_global_settings)s
+
+CC.target ?= %(CC.target)s
+CFLAGS.target ?= $(CPPFLAGS) $(CFLAGS)
+CXX.target ?= %(CXX.target)s
+CXXFLAGS.target ?= $(CPPFLAGS) $(CXXFLAGS)
+LINK.target ?= %(LINK.target)s
+LDFLAGS.target ?= $(LDFLAGS)
+AR.target ?= $(AR)
+
+# C++ apps need to be linked with g++.
+LINK ?= $(CXX.target)
+
+# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
+# to replicate this environment fallback in make as well.
+CC.host ?= %(CC.host)s
+CFLAGS.host ?= $(CPPFLAGS_host) $(CFLAGS_host)
+CXX.host ?= %(CXX.host)s
+CXXFLAGS.host ?= $(CPPFLAGS_host) $(CXXFLAGS_host)
+LINK.host ?= %(LINK.host)s
+LDFLAGS.host ?= $(LDFLAGS_host)
+AR.host ?= %(AR.host)s
+
+# Define a dir function that can handle spaces.
+# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
+# "leading spaces cannot appear in the text of the first argument as written.
+# These characters can be put into the argument value by variable substitution."
+empty :=
+space := $(empty) $(empty)
+
+# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
+replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1)
+unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1)
+dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
+
+# Flags to make gcc output dependency info. Note that you need to be
+# careful here to use the flags that ccache and distcc can understand.
+# We write to a dep file on the side first and then rename at the end
+# so we can't end up with a broken dep file.
+depfile = $(depsdir)/$(call replace_spaces,$@).d
+DEPFLAGS = %(makedep_args)s -MF $(depfile).raw
+
+# We have to fixup the deps output in a few ways.
+# (1) the file output should mention the proper .o file.
+# ccache or distcc lose the path to the target, so we convert a rule of
+# the form:
+# foobar.o: DEP1 DEP2
+# into
+# path/to/foobar.o: DEP1 DEP2
+# (2) we want missing files not to cause us to fail to build.
+# We want to rewrite
+# foobar.o: DEP1 DEP2 \\
+# DEP3
+# to
+# DEP1:
+# DEP2:
+# DEP3:
+# so if the files are missing, they're just considered phony rules.
+# We have to do some pretty insane escaping to get those backslashes
+# and dollar signs past make, the shell, and sed at the same time.
+# Doesn't work with spaces, but that's fine: .d files have spaces in
+# their names replaced with other characters."""
+r"""
+define fixup_dep
+# The depfile may not exist if the input file didn't have any #includes.
+touch $(depfile).raw
+# Fixup path as in (1).
+sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
+# Add extra rules as in (2).
+# We remove slashes and replace spaces with new lines;
+# remove blank lines;
+# delete the first line and append a colon to the remaining lines.
+sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
+ grep -v '^$$' |\
+ sed -e 1d -e 's|$$|:|' \
+ >> $(depfile)
+rm $(depfile).raw
+endef
+"""
+"""
+# Command definitions:
+# - cmd_foo is the actual command to run;
+# - quiet_cmd_foo is the brief-output summary of the command.
+
+quiet_cmd_cc = CC($(TOOLSET)) $@
+cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
+
+quiet_cmd_cxx = CXX($(TOOLSET)) $@
+cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
+%(extra_commands)s
+quiet_cmd_touch = TOUCH $@
+cmd_touch = touch $@
+
+quiet_cmd_copy = COPY $@
+# send stderr to /dev/null to ignore messages when linking directories.
+cmd_copy = ln -f "$<" "$@" 2>/dev/null || (rm -rf "$@" && cp %(copy_archive_args)s "$<" "$@")
+
+%(link_commands)s
+"""
+
+r"""
+# Define an escape_quotes function to escape single quotes.
+# This allows us to handle quotes properly as long as we always use
+# use single quotes and escape_quotes.
+escape_quotes = $(subst ','\'',$(1))
+# This comment is here just to include a ' to unconfuse syntax highlighting.
+# Define an escape_vars function to escape '$' variable syntax.
+# This allows us to read/write command lines with shell variables (e.g.
+# $LD_LIBRARY_PATH), without triggering make substitution.
+escape_vars = $(subst $$,$$$$,$(1))
+# Helper that expands to a shell command to echo a string exactly as it is in
+# make. This uses printf instead of echo because printf's behaviour with respect
+# to escape sequences is more portable than echo's across different shells
+# (e.g., dash, bash).
+exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))'
+"""
+"""
+# Helper to compare the command we're about to run against the command
+# we logged the last time we ran the command. Produces an empty
+# string (false) when the commands match.
+# Tricky point: Make has no string-equality test function.
+# The kernel uses the following, but it seems like it would have false
+# positives, where one string reordered its arguments.
+# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
+# $(filter-out $(cmd_$@), $(cmd_$(1))))
+# We instead substitute each for the empty string into the other, and
+# say they're equal if both substitutions produce the empty string.
+# .d files contain """ + SPACE_REPLACEMENT + \
+ """ instead of spaces, take that into account.
+command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\
+ $(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
+
+# Helper that is non-empty when a prerequisite changes.
+# Normally make does this implicitly, but we force rules to always run
+# so we can check their command lines.
+# $? -- new prerequisites
+# $| -- order-only dependencies
+prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
+
+# Helper that executes all postbuilds until one fails.
+define do_postbuilds
+ @E=0;\\
+ for p in $(POSTBUILDS); do\\
+ eval $$p;\\
+ E=$$?;\\
+ if [ $$E -ne 0 ]; then\\
+ break;\\
+ fi;\\
+ done;\\
+ if [ $$E -ne 0 ]; then\\
+ rm -rf "$@";\\
+ exit $$E;\\
+ fi
+endef
+
+# do_cmd: run a command via the above cmd_foo names, if necessary.
+# Should always run for a given target to handle command-line changes.
+# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
+# Third argument, if non-zero, makes it do POSTBUILDS processing.
+# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \
+ SPACE_REPLACEMENT + """ for
+# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \
+ """ characters.
+define do_cmd
+$(if $(or $(command_changed),$(prereq_changed)),
+ @$(call exact_echo, $($(quiet)cmd_$(1)))
+ @mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
+ $(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))),
+ @$(cmd_$(1))
+ @echo " $(quiet_cmd_$(1)): Finished",
+ @$(cmd_$(1))
+ )
+ @$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
+ @$(if $(2),$(fixup_dep))
+ $(if $(and $(3), $(POSTBUILDS)),
+ $(call do_postbuilds)
+ )
+)
+endef
+
+# Declare the "%(default_target)s" target first so it is the default,
+# even though we don't have the deps yet.
+.PHONY: %(default_target)s
+%(default_target)s:
+
+# make looks for ways to re-generate included makefiles, but in our case, we
+# don't have a direct way. Explicitly telling make that it has nothing to do
+# for them makes it go faster.
+%%.d: ;
+
+# Use FORCE_DO_CMD to force a target to run. Should be coupled with
+# do_cmd.
+.PHONY: FORCE_DO_CMD
+FORCE_DO_CMD:
+
+""")
+
+SHARED_HEADER_MAC_COMMANDS = """
+quiet_cmd_objc = CXX($(TOOLSET)) $@
+cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
+
+quiet_cmd_objcxx = CXX($(TOOLSET)) $@
+cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
+
+# Commands for precompiled header files.
+quiet_cmd_pch_c = CXX($(TOOLSET)) $@
+cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
+quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
+cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
+quiet_cmd_pch_m = CXX($(TOOLSET)) $@
+cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
+quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
+cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
+
+# gyp-mac-tool is written next to the root Makefile by gyp.
+# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
+# already.
+quiet_cmd_mac_tool = MACTOOL $(4) $<
+cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
+
+quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
+cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
+
+quiet_cmd_infoplist = INFOPLIST $@
+cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
+"""
+
+
+def WriteRootHeaderSuffixRules(writer):
+ extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
+
+ writer.write('# Suffix rules, putting all outputs into $(obj).\n')
+ for ext in extensions:
+ writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
+ writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
+
+ writer.write('\n# Try building from generated source, too.\n')
+ for ext in extensions:
+ writer.write(
+ '$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
+ writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
+ writer.write('\n')
+ for ext in extensions:
+ writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
+ writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
+ writer.write('\n')
+
+
+SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
+# Suffix rules, putting all outputs into $(obj).
+""")
+
+
+SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
+# Try building from generated source, too.
+""")
+
+
+SHARED_FOOTER = """\
+# "all" is a concatenation of the "all" targets from all the included
+# sub-makefiles. This is just here to clarify.
+all:
+
+# Add in dependency-tracking rules. $(all_deps) is the list of every single
+# target in our tree. Only consider the ones with .d (dependency) info:
+d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
+ifneq ($(d_files),)
+ include $(d_files)
+endif
+"""
+
+header = """\
+# This file is generated by gyp; do not edit.
+
+"""
+
+# Maps every compilable file extension to the do_cmd that compiles it.
+COMPILABLE_EXTENSIONS = {
+ '.c': 'cc',
+ '.cc': 'cxx',
+ '.cpp': 'cxx',
+ '.cxx': 'cxx',
+ '.s': 'cc',
+ '.S': 'cc',
+}
+
+def Compilable(filename):
+ """Return true if the file is compilable (should be in OBJS)."""
+ for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
+ if res:
+ return True
+ return False
+
+
+def Linkable(filename):
+ """Return true if the file is linkable (should be on the link line)."""
+ return filename.endswith('.o')
+
+
+def Target(filename):
+ """Translate a compilable filename to its .o target."""
+ return os.path.splitext(filename)[0] + '.o'
+
+
+def EscapeShellArgument(s):
+ """Quotes an argument so that it will be interpreted literally by a POSIX
+ shell. Taken from
+ http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
+ """
+ return "'" + s.replace("'", "'\\''") + "'"
+
+
+def EscapeMakeVariableExpansion(s):
+ """Make has its own variable expansion syntax using $. We must escape it for
+ string to be interpreted literally."""
+ return s.replace('$', '$$')
+
+
+def EscapeCppDefine(s):
+ """Escapes a CPP define so that it will reach the compiler unaltered."""
+ s = EscapeShellArgument(s)
+ s = EscapeMakeVariableExpansion(s)
+ # '#' characters must be escaped even embedded in a string, else Make will
+ # treat it as the start of a comment.
+ return s.replace('#', r'\#')
+
+
+def QuoteIfNecessary(string):
+ """TODO: Should this ideally be replaced with one or more of the above
+ functions?"""
+ if '"' in string:
+ string = '"' + string.replace('"', '\\"') + '"'
+ return string
+
+
+def StringToMakefileVariable(string):
+ """Convert a string to a value that is acceptable as a make variable name."""
+ return re.sub('[^a-zA-Z0-9_]', '_', string)
+
+
+srcdir_prefix = ''
+def Sourceify(path):
+ """Convert a path to its source directory form."""
+ if '$(' in path:
+ return path
+ if os.path.isabs(path):
+ return path
+ return srcdir_prefix + path
+
+
+def QuoteSpaces(s, quote=r'\ '):
+ return s.replace(' ', quote)
+
+
+# TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py.
+def _ValidateSourcesForOSX(spec, all_sources):
+ """Makes sure if duplicate basenames are not specified in the source list.
+
+ Arguments:
+ spec: The target dictionary containing the properties of the target.
+ """
+ if spec.get('type', None) != 'static_library':
+ return
+
+ basenames = {}
+ for source in all_sources:
+ name, ext = os.path.splitext(source)
+ is_compiled_file = ext in [
+ '.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
+ if not is_compiled_file:
+ continue
+ basename = os.path.basename(name) # Don't include extension.
+ basenames.setdefault(basename, []).append(source)
+
+ error = ''
+ for basename, files in basenames.items():
+ if len(files) > 1:
+ error += ' %s: %s\n' % (basename, ' '.join(files))
+
+ if error:
+ print('static library %s has several files with the same basename:\n' %
+ spec['target_name'] + error + 'libtool on OS X will generate' +
+ ' warnings for them.')
+ raise GypError('Duplicate basenames in sources section, see list above')
+
+
+# Map from qualified target to path to output.
+target_outputs = {}
+# Map from qualified target to any linkable output. A subset
+# of target_outputs. E.g. when mybinary depends on liba, we want to
+# include liba in the linker line; when otherbinary depends on
+# mybinary, we just want to build mybinary first.
+target_link_deps = {}
+
+
+class MakefileWriter(object):
+ """MakefileWriter packages up the writing of one target-specific foobar.mk.
+
+ Its only real entry point is Write(), and is mostly used for namespacing.
+ """
+
+ def __init__(self, generator_flags, flavor):
+ self.generator_flags = generator_flags
+ self.flavor = flavor
+
+ self.suffix_rules_srcdir = {}
+ self.suffix_rules_objdir1 = {}
+ self.suffix_rules_objdir2 = {}
+
+ # Generate suffix rules for all compilable extensions.
+ for ext in COMPILABLE_EXTENSIONS.keys():
+ # Suffix rules for source folder.
+ self.suffix_rules_srcdir.update({ext: ("""\
+$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
+ @$(call do_cmd,%s,1)
+""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
+
+ # Suffix rules for generated source files.
+ self.suffix_rules_objdir1.update({ext: ("""\
+$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
+ @$(call do_cmd,%s,1)
+""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
+ self.suffix_rules_objdir2.update({ext: ("""\
+$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
+ @$(call do_cmd,%s,1)
+""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
+
+
+ def Write(self, qualified_target, base_path, output_filename, spec, configs,
+ part_of_all):
+ """The main entry point: writes a .mk file for a single target.
+
+ Arguments:
+ qualified_target: target we're generating
+ base_path: path relative to source root we're building in, used to resolve
+ target-relative paths
+ output_filename: output .mk file name to write
+ spec, configs: gyp info
+ part_of_all: flag indicating this target is part of 'all'
+ """
+ gyp.common.EnsureDirExists(output_filename)
+
+ self.fp = open(output_filename, 'w')
+
+ self.fp.write(header)
+
+ self.qualified_target = qualified_target
+ self.path = base_path
+ self.target = spec['target_name']
+ self.type = spec['type']
+ self.toolset = spec['toolset']
+
+ self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
+ if self.flavor == 'mac':
+ self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
+ else:
+ self.xcode_settings = None
+
+ deps, link_deps = self.ComputeDeps(spec)
+
+ # Some of the generation below can add extra output, sources, or
+ # link dependencies. All of the out params of the functions that
+ # follow use names like extra_foo.
+ extra_outputs = []
+ extra_sources = []
+ extra_link_deps = []
+ extra_mac_bundle_resources = []
+ mac_bundle_deps = []
+
+ if self.is_mac_bundle:
+ self.output = self.ComputeMacBundleOutput(spec)
+ self.output_binary = self.ComputeMacBundleBinaryOutput(spec)
+ else:
+ self.output = self.output_binary = self.ComputeOutput(spec)
+
+ self.is_standalone_static_library = bool(
+ spec.get('standalone_static_library', 0))
+ self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
+ 'shared_library')
+ if (self.is_standalone_static_library or
+ self.type in self._INSTALLABLE_TARGETS):
+ self.alias = os.path.basename(self.output)
+ install_path = self._InstallableTargetInstallPath()
+ else:
+ self.alias = self.output
+ install_path = self.output
+
+ self.WriteLn("TOOLSET := " + self.toolset)
+ self.WriteLn("TARGET := " + self.target)
+
+ # Actions must come first, since they can generate more OBJs for use below.
+ if 'actions' in spec:
+ self.WriteActions(spec['actions'], extra_sources, extra_outputs,
+ extra_mac_bundle_resources, part_of_all)
+
+ # Rules must be early like actions.
+ if 'rules' in spec:
+ self.WriteRules(spec['rules'], extra_sources, extra_outputs,
+ extra_mac_bundle_resources, part_of_all)
+
+ if 'copies' in spec:
+ self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
+
+ # Bundle resources.
+ if self.is_mac_bundle:
+ all_mac_bundle_resources = (
+ spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources)
+ self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
+ self.WriteMacInfoPlist(mac_bundle_deps)
+
+ # Sources.
+ all_sources = spec.get('sources', []) + extra_sources
+ if all_sources:
+ if self.flavor == 'mac':
+ # libtool on OS X generates warnings for duplicate basenames in the same
+ # target.
+ _ValidateSourcesForOSX(spec, all_sources)
+ self.WriteSources(
+ configs, deps, all_sources, extra_outputs,
+ extra_link_deps, part_of_all,
+ gyp.xcode_emulation.MacPrefixHeader(
+ self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)),
+ self.Pchify))
+ sources = [x for x in all_sources if Compilable(x)]
+ if sources:
+ self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
+ extensions = set([os.path.splitext(s)[1] for s in sources])
+ for ext in extensions:
+ if ext in self.suffix_rules_srcdir:
+ self.WriteLn(self.suffix_rules_srcdir[ext])
+ self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
+ for ext in extensions:
+ if ext in self.suffix_rules_objdir1:
+ self.WriteLn(self.suffix_rules_objdir1[ext])
+ for ext in extensions:
+ if ext in self.suffix_rules_objdir2:
+ self.WriteLn(self.suffix_rules_objdir2[ext])
+ self.WriteLn('# End of this set of suffix rules')
+
+ # Add dependency from bundle to bundle binary.
+ if self.is_mac_bundle:
+ mac_bundle_deps.append(self.output_binary)
+
+ self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
+ mac_bundle_deps, extra_outputs, part_of_all)
+
+ # Update global list of target outputs, used in dependency tracking.
+ target_outputs[qualified_target] = install_path
+
+ # Update global list of link dependencies.
+ if self.type in ('static_library', 'shared_library'):
+ target_link_deps[qualified_target] = self.output_binary
+
+ # Currently any versions have the same effect, but in future the behavior
+ # could be different.
+ if self.generator_flags.get('android_ndk_version', None):
+ self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
+
+ self.fp.close()
+
+
+ def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
+ """Write a "sub-project" Makefile.
+
+ This is a small, wrapper Makefile that calls the top-level Makefile to build
+ the targets from a single gyp file (i.e. a sub-project).
+
+ Arguments:
+ output_filename: sub-project Makefile name to write
+ makefile_path: path to the top-level Makefile
+ targets: list of "all" targets for this sub-project
+ build_dir: build output directory, relative to the sub-project
+ """
+ gyp.common.EnsureDirExists(output_filename)
+ self.fp = open(output_filename, 'w')
+ self.fp.write(header)
+ # For consistency with other builders, put sub-project build output in the
+ # sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
+ self.WriteLn('export builddir_name ?= %s' %
+ os.path.join(os.path.dirname(output_filename), build_dir))
+ self.WriteLn('.PHONY: all')
+ self.WriteLn('all:')
+ if makefile_path:
+ makefile_path = ' -C ' + makefile_path
+ self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
+ self.fp.close()
+
+
+ def WriteActions(self, actions, extra_sources, extra_outputs,
+ extra_mac_bundle_resources, part_of_all):
+ """Write Makefile code for any 'actions' from the gyp input.
+
+ extra_sources: a list that will be filled in with newly generated source
+ files, if any
+ extra_outputs: a list that will be filled in with any outputs of these
+ actions (used to make other pieces dependent on these
+ actions)
+ part_of_all: flag indicating this target is part of 'all'
+ """
+ env = self.GetSortedXcodeEnv()
+ for action in actions:
+ name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
+ action['action_name']))
+ self.WriteLn('### Rules for action "%s":' % action['action_name'])
+ inputs = action['inputs']
+ outputs = action['outputs']
+
+ # Build up a list of outputs.
+ # Collect the output dirs we'll need.
+ dirs = set()
+ for out in outputs:
+ dir = os.path.split(out)[0]
+ if dir:
+ dirs.add(dir)
+ if int(action.get('process_outputs_as_sources', False)):
+ extra_sources += outputs
+ if int(action.get('process_outputs_as_mac_bundle_resources', False)):
+ extra_mac_bundle_resources += outputs
+
+ # Write the actual command.
+ action_commands = action['action']
+ if self.flavor == 'mac':
+ action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
+ for command in action_commands]
+ command = gyp.common.EncodePOSIXShellList(action_commands)
+ if 'message' in action:
+ self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
+ else:
+ self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
+ if len(dirs) > 0:
+ command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
+
+ cd_action = 'cd %s; ' % Sourceify(self.path or '.')
+
+ # command and cd_action get written to a toplevel variable called
+ # cmd_foo. Toplevel variables can't handle things that change per
+ # makefile like $(TARGET), so hardcode the target.
+ command = command.replace('$(TARGET)', self.target)
+ cd_action = cd_action.replace('$(TARGET)', self.target)
+
+ # Set LD_LIBRARY_PATH in case the action runs an executable from this
+ # build which links to shared libs from this build.
+ # actions run on the host, so they should in theory only use host
+ # libraries, but until everything is made cross-compile safe, also use
+ # target libraries.
+ # TODO(piman): when everything is cross-compile safe, remove lib.target
+ self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
+ '$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
+ 'export LD_LIBRARY_PATH; '
+ '%s%s'
+ % (name, cd_action, command))
+ self.WriteLn()
+ outputs = [self.Absolutify(o) for o in outputs]
+ # The makefile rules are all relative to the top dir, but the gyp actions
+ # are defined relative to their containing dir. This replaces the obj
+ # variable for the action rule with an absolute version so that the output
+ # goes in the right place.
+ # Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
+ # it's superfluous for the "extra outputs", and this avoids accidentally
+ # writing duplicate dummy rules for those outputs.
+ # Same for environment.
+ self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
+ self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
+ self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
+
+ for input in inputs:
+ assert ' ' not in input, (
+ "Spaces in action input filenames not supported (%s)" % input)
+ for output in outputs:
+ assert ' ' not in output, (
+ "Spaces in action output filenames not supported (%s)" % output)
+
+ # See the comment in WriteCopies about expanding env vars.
+ outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
+ inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
+
+ self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
+ part_of_all=part_of_all, command=name)
+
+ # Stuff the outputs in a variable so we can refer to them later.
+ outputs_variable = 'action_%s_outputs' % name
+ self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
+ extra_outputs.append('$(%s)' % outputs_variable)
+ self.WriteLn()
+
+ self.WriteLn()
+
+
+ def WriteRules(self, rules, extra_sources, extra_outputs,
+ extra_mac_bundle_resources, part_of_all):
+ """Write Makefile code for any 'rules' from the gyp input.
+
+ extra_sources: a list that will be filled in with newly generated source
+ files, if any
+ extra_outputs: a list that will be filled in with any outputs of these
+ rules (used to make other pieces dependent on these rules)
+ part_of_all: flag indicating this target is part of 'all'
+ """
+ env = self.GetSortedXcodeEnv()
+ for rule in rules:
+ name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
+ rule['rule_name']))
+ count = 0
+ self.WriteLn('### Generated for rule %s:' % name)
+
+ all_outputs = []
+
+ for rule_source in rule.get('rule_sources', []):
+ dirs = set()
+ (rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
+ (rule_source_root, rule_source_ext) = \
+ os.path.splitext(rule_source_basename)
+
+ outputs = [self.ExpandInputRoot(out, rule_source_root,
+ rule_source_dirname)
+ for out in rule['outputs']]
+
+ for out in outputs:
+ dir = os.path.dirname(out)
+ if dir:
+ dirs.add(dir)
+ if int(rule.get('process_outputs_as_sources', False)):
+ extra_sources += outputs
+ if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
+ extra_mac_bundle_resources += outputs
+ inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
+ rule.get('inputs', [])))
+ actions = ['$(call do_cmd,%s_%d)' % (name, count)]
+
+ if name == 'resources_grit':
+ # HACK: This is ugly. Grit intentionally doesn't touch the
+ # timestamp of its output file when the file doesn't change,
+ # which is fine in hash-based dependency systems like scons
+ # and forge, but not kosher in the make world. After some
+ # discussion, hacking around it here seems like the least
+ # amount of pain.
+ actions += ['@touch --no-create $@']
+
+ # See the comment in WriteCopies about expanding env vars.
+ outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
+ inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
+
+ outputs = [self.Absolutify(o) for o in outputs]
+ all_outputs += outputs
+ # Only write the 'obj' and 'builddir' rules for the "primary" output
+ # (:1); it's superfluous for the "extra outputs", and this avoids
+ # accidentally writing duplicate dummy rules for those outputs.
+ self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
+ self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
+ self.WriteMakeRule(outputs, inputs, actions,
+ command="%s_%d" % (name, count))
+ # Spaces in rule filenames are not supported, but rule variables have
+ # spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)').
+ # The spaces within the variables are valid, so remove the variables
+ # before checking.
+ variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)')
+ for output in outputs:
+ output = re.sub(variables_with_spaces, '', output)
+ assert ' ' not in output, (
+ "Spaces in rule filenames not yet supported (%s)" % output)
+ self.WriteLn('all_deps += %s' % ' '.join(outputs))
+
+ action = [self.ExpandInputRoot(ac, rule_source_root,
+ rule_source_dirname)
+ for ac in rule['action']]
+ mkdirs = ''
+ if len(dirs) > 0:
+ mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
+ cd_action = 'cd %s; ' % Sourceify(self.path or '.')
+
+ # action, cd_action, and mkdirs get written to a toplevel variable
+ # called cmd_foo. Toplevel variables can't handle things that change
+ # per makefile like $(TARGET), so hardcode the target.
+ if self.flavor == 'mac':
+ action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
+ for command in action]
+ action = gyp.common.EncodePOSIXShellList(action)
+ action = action.replace('$(TARGET)', self.target)
+ cd_action = cd_action.replace('$(TARGET)', self.target)
+ mkdirs = mkdirs.replace('$(TARGET)', self.target)
+
+ # Set LD_LIBRARY_PATH in case the rule runs an executable from this
+ # build which links to shared libs from this build.
+ # rules run on the host, so they should in theory only use host
+ # libraries, but until everything is made cross-compile safe, also use
+ # target libraries.
+ # TODO(piman): when everything is cross-compile safe, remove lib.target
+ self.WriteLn(
+ "cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
+ "$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
+ "export LD_LIBRARY_PATH; "
+ "%(cd_action)s%(mkdirs)s%(action)s" % {
+ 'action': action,
+ 'cd_action': cd_action,
+ 'count': count,
+ 'mkdirs': mkdirs,
+ 'name': name,
+ })
+ self.WriteLn(
+ 'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
+ 'count': count,
+ 'name': name,
+ })
+ self.WriteLn()
+ count += 1
+
+ outputs_variable = 'rule_%s_outputs' % name
+ self.WriteList(all_outputs, outputs_variable)
+ extra_outputs.append('$(%s)' % outputs_variable)
+
+ self.WriteLn('### Finished generating for rule: %s' % name)
+ self.WriteLn()
+ self.WriteLn('### Finished generating for all rules')
+ self.WriteLn('')
+
+
+ def WriteCopies(self, copies, extra_outputs, part_of_all):
+ """Write Makefile code for any 'copies' from the gyp input.
+
+ extra_outputs: a list that will be filled in with any outputs of this action
+ (used to make other pieces dependent on this action)
+ part_of_all: flag indicating this target is part of 'all'
+ """
+ self.WriteLn('### Generated for copy rule.')
+
+ variable = StringToMakefileVariable(self.qualified_target + '_copies')
+ outputs = []
+ for copy in copies:
+ for path in copy['files']:
+ # Absolutify() may call normpath, and will strip trailing slashes.
+ path = Sourceify(self.Absolutify(path))
+ filename = os.path.split(path)[1]
+ output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
+ filename)))
+
+ # If the output path has variables in it, which happens in practice for
+ # 'copies', writing the environment as target-local doesn't work,
+ # because the variables are already needed for the target name.
+ # Copying the environment variables into global make variables doesn't
+ # work either, because then the .d files will potentially contain spaces
+ # after variable expansion, and .d file handling cannot handle spaces.
+ # As a workaround, manually expand variables at gyp time. Since 'copies'
+ # can't run scripts, there's no need to write the env then.
+ # WriteDoCmd() will escape spaces for .d files.
+ env = self.GetSortedXcodeEnv()
+ output = gyp.xcode_emulation.ExpandEnvVars(output, env)
+ path = gyp.xcode_emulation.ExpandEnvVars(path, env)
+ self.WriteDoCmd([output], [path], 'copy', part_of_all)
+ outputs.append(output)
+ self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
+ extra_outputs.append('$(%s)' % variable)
+ self.WriteLn()
+
+
+ def WriteMacBundleResources(self, resources, bundle_deps):
+ """Writes Makefile code for 'mac_bundle_resources'."""
+ self.WriteLn('### Generated for mac_bundle_resources')
+
+ for output, res in gyp.xcode_emulation.GetMacBundleResources(
+ generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
+ map(Sourceify, map(self.Absolutify, resources))):
+ _, ext = os.path.splitext(output)
+ if ext != '.xcassets':
+ # Make does not supports '.xcassets' emulation.
+ self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource',
+ part_of_all=True)
+ bundle_deps.append(output)
+
+
+ def WriteMacInfoPlist(self, bundle_deps):
+ """Write Makefile code for bundle Info.plist files."""
+ info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
+ generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
+ lambda p: Sourceify(self.Absolutify(p)))
+ if not info_plist:
+ return
+ if defines:
+ # Create an intermediate file to store preprocessed results.
+ intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' +
+ os.path.basename(info_plist))
+ self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D',
+ quoter=EscapeCppDefine)
+ self.WriteMakeRule([intermediate_plist], [info_plist],
+ ['$(call do_cmd,infoplist)',
+ # "Convert" the plist so that any weird whitespace changes from the
+ # preprocessor do not affect the XML parser in mac_tool.
+ '@plutil -convert xml1 $@ $@'])
+ info_plist = intermediate_plist
+ # plists can contain envvars and substitute them into the file.
+ self.WriteSortedXcodeEnv(
+ out, self.GetSortedXcodeEnv(additional_settings=extra_env))
+ self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist',
+ part_of_all=True)
+ bundle_deps.append(out)
+
+
+ def WriteSources(self, configs, deps, sources,
+ extra_outputs, extra_link_deps,
+ part_of_all, precompiled_header):
+ """Write Makefile code for any 'sources' from the gyp input.
+ These are source files necessary to build the current target.
+
+ configs, deps, sources: input from gyp.
+ extra_outputs: a list of extra outputs this action should be dependent on;
+ used to serialize action/rules before compilation
+ extra_link_deps: a list that will be filled in with any outputs of
+ compilation (to be used in link lines)
+ part_of_all: flag indicating this target is part of 'all'
+ """
+
+ # Write configuration-specific variables for CFLAGS, etc.
+ for configname in sorted(configs.keys()):
+ config = configs[configname]
+ self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D',
+ quoter=EscapeCppDefine)
+
+ if self.flavor == 'mac':
+ cflags = self.xcode_settings.GetCflags(configname)
+ cflags_c = self.xcode_settings.GetCflagsC(configname)
+ cflags_cc = self.xcode_settings.GetCflagsCC(configname)
+ cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
+ cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
+ else:
+ cflags = config.get('cflags')
+ cflags_c = config.get('cflags_c')
+ cflags_cc = config.get('cflags_cc')
+
+ self.WriteLn("# Flags passed to all source files.");
+ self.WriteList(cflags, 'CFLAGS_%s' % configname)
+ self.WriteLn("# Flags passed to only C files.");
+ self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
+ self.WriteLn("# Flags passed to only C++ files.");
+ self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
+ if self.flavor == 'mac':
+ self.WriteLn("# Flags passed to only ObjC files.");
+ self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
+ self.WriteLn("# Flags passed to only ObjC++ files.");
+ self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
+ includes = config.get('include_dirs')
+ if includes:
+ includes = [Sourceify(self.Absolutify(include)) for include in includes]
+ self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
+
+ compilable = filter(Compilable, sources)
+ objs = [self.Objectify(self.Absolutify(Target(x))) for x in compilable]
+ self.WriteList(objs, 'OBJS')
+
+ for obj in objs:
+ assert ' ' not in obj, (
+ "Spaces in object filenames not supported (%s)" % obj)
+ self.WriteLn('# Add to the list of files we specially track '
+ 'dependencies for.')
+ self.WriteLn('all_deps += $(OBJS)')
+ self.WriteLn()
+
+ # Make sure our dependencies are built first.
+ if deps:
+ self.WriteMakeRule(['$(OBJS)'], deps,
+ comment = 'Make sure our dependencies are built '
+ 'before any of us.',
+ order_only = True)
+
+ # Make sure the actions and rules run first.
+ # If they generate any extra headers etc., the per-.o file dep tracking
+ # will catch the proper rebuilds, so order only is still ok here.
+ if extra_outputs:
+ self.WriteMakeRule(['$(OBJS)'], extra_outputs,
+ comment = 'Make sure our actions/rules run '
+ 'before any of us.',
+ order_only = True)
+
+ pchdeps = precompiled_header.GetObjDependencies(compilable, objs )
+ if pchdeps:
+ self.WriteLn('# Dependencies from obj files to their precompiled headers')
+ for source, obj, gch in pchdeps:
+ self.WriteLn('%s: %s' % (obj, gch))
+ self.WriteLn('# End precompiled header dependencies')
+
+ if objs:
+ extra_link_deps.append('$(OBJS)')
+ self.WriteLn("""\
+# CFLAGS et al overrides must be target-local.
+# See "Target-specific Variable Values" in the GNU Make manual.""")
+ self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
+ self.WriteLn("$(OBJS): GYP_CFLAGS := "
+ "$(DEFS_$(BUILDTYPE)) "
+ "$(INCS_$(BUILDTYPE)) "
+ "%s " % precompiled_header.GetInclude('c') +
+ "$(CFLAGS_$(BUILDTYPE)) "
+ "$(CFLAGS_C_$(BUILDTYPE))")
+ self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
+ "$(DEFS_$(BUILDTYPE)) "
+ "$(INCS_$(BUILDTYPE)) "
+ "%s " % precompiled_header.GetInclude('cc') +
+ "$(CFLAGS_$(BUILDTYPE)) "
+ "$(CFLAGS_CC_$(BUILDTYPE))")
+ if self.flavor == 'mac':
+ self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
+ "$(DEFS_$(BUILDTYPE)) "
+ "$(INCS_$(BUILDTYPE)) "
+ "%s " % precompiled_header.GetInclude('m') +
+ "$(CFLAGS_$(BUILDTYPE)) "
+ "$(CFLAGS_C_$(BUILDTYPE)) "
+ "$(CFLAGS_OBJC_$(BUILDTYPE))")
+ self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
+ "$(DEFS_$(BUILDTYPE)) "
+ "$(INCS_$(BUILDTYPE)) "
+ "%s " % precompiled_header.GetInclude('mm') +
+ "$(CFLAGS_$(BUILDTYPE)) "
+ "$(CFLAGS_CC_$(BUILDTYPE)) "
+ "$(CFLAGS_OBJCC_$(BUILDTYPE))")
+
+ self.WritePchTargets(precompiled_header.GetPchBuildCommands())
+
+ # If there are any object files in our input file list, link them into our
+ # output.
+ extra_link_deps += [source for source in sources if Linkable(source)]
+
+ self.WriteLn()
+
+ def WritePchTargets(self, pch_commands):
+ """Writes make rules to compile prefix headers."""
+ if not pch_commands:
+ return
+
+ for gch, lang_flag, lang, input in pch_commands:
+ extra_flags = {
+ 'c': '$(CFLAGS_C_$(BUILDTYPE))',
+ 'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
+ 'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
+ 'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
+ }[lang]
+ var_name = {
+ 'c': 'GYP_PCH_CFLAGS',
+ 'cc': 'GYP_PCH_CXXFLAGS',
+ 'm': 'GYP_PCH_OBJCFLAGS',
+ 'mm': 'GYP_PCH_OBJCXXFLAGS',
+ }[lang]
+ self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
+ "$(DEFS_$(BUILDTYPE)) "
+ "$(INCS_$(BUILDTYPE)) "
+ "$(CFLAGS_$(BUILDTYPE)) " +
+ extra_flags)
+
+ self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input))
+ self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
+ self.WriteLn('')
+ assert ' ' not in gch, (
+ "Spaces in gch filenames not supported (%s)" % gch)
+ self.WriteLn('all_deps += %s' % gch)
+ self.WriteLn('')
+
+
+ def ComputeOutputBasename(self, spec):
+ """Return the 'output basename' of a gyp spec.
+
+ E.g., the loadable module 'foobar' in directory 'baz' will produce
+ 'libfoobar.so'
+ """
+ assert not self.is_mac_bundle
+
+ if self.flavor == 'mac' and self.type in (
+ 'static_library', 'executable', 'shared_library', 'loadable_module'):
+ return self.xcode_settings.GetExecutablePath()
+
+ target = spec['target_name']
+ target_prefix = ''
+ target_ext = ''
+ if self.type == 'static_library':
+ if target[:3] == 'lib':
+ target = target[3:]
+ target_prefix = 'lib'
+ target_ext = '.a'
+ elif self.type in ('loadable_module', 'shared_library'):
+ if target[:3] == 'lib':
+ target = target[3:]
+ target_prefix = 'lib'
+ if self.flavor == 'aix':
+ target_ext = '.a'
+ else:
+ target_ext = '.so'
+ elif self.type == 'none':
+ target = '%s.stamp' % target
+ elif self.type != 'executable':
+ print(("ERROR: What output file should be generated?",
+ "type", self.type, "target", target))
+
+ target_prefix = spec.get('product_prefix', target_prefix)
+ target = spec.get('product_name', target)
+ product_ext = spec.get('product_extension')
+ if product_ext:
+ target_ext = '.' + product_ext
+
+ return target_prefix + target + target_ext
+
+
+ def _InstallImmediately(self):
+ return self.toolset == 'target' and self.flavor == 'mac' and self.type in (
+ 'static_library', 'executable', 'shared_library', 'loadable_module')
+
+
+ def ComputeOutput(self, spec):
+ """Return the 'output' (full output path) of a gyp spec.
+
+ E.g., the loadable module 'foobar' in directory 'baz' will produce
+ '$(obj)/baz/libfoobar.so'
+ """
+ assert not self.is_mac_bundle
+
+ path = os.path.join('$(obj).' + self.toolset, self.path)
+ if self.type == 'executable' or self._InstallImmediately():
+ path = '$(builddir)'
+ path = spec.get('product_dir', path)
+ return os.path.join(path, self.ComputeOutputBasename(spec))
+
+
+ def ComputeMacBundleOutput(self, spec):
+ """Return the 'output' (full output path) to a bundle output directory."""
+ assert self.is_mac_bundle
+ path = generator_default_variables['PRODUCT_DIR']
+ return os.path.join(path, self.xcode_settings.GetWrapperName())
+
+
+ def ComputeMacBundleBinaryOutput(self, spec):
+ """Return the 'output' (full output path) to the binary in a bundle."""
+ path = generator_default_variables['PRODUCT_DIR']
+ return os.path.join(path, self.xcode_settings.GetExecutablePath())
+
+
+ def ComputeDeps(self, spec):
+ """Compute the dependencies of a gyp spec.
+
+ Returns a tuple (deps, link_deps), where each is a list of
+ filenames that will need to be put in front of make for either
+ building (deps) or linking (link_deps).
+ """
+ deps = []
+ link_deps = []
+ if 'dependencies' in spec:
+ deps.extend([target_outputs[dep] for dep in spec['dependencies']
+ if target_outputs[dep]])
+ for dep in spec['dependencies']:
+ if dep in target_link_deps:
+ link_deps.append(target_link_deps[dep])
+ deps.extend(link_deps)
+ # TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
+ # This hack makes it work:
+ # link_deps.extend(spec.get('libraries', []))
+ return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
+
+
+ def WriteDependencyOnExtraOutputs(self, target, extra_outputs):
+ self.WriteMakeRule([self.output_binary], extra_outputs,
+ comment = 'Build our special outputs first.',
+ order_only = True)
+
+
+ def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps,
+ extra_outputs, part_of_all):
+ """Write Makefile code to produce the final target of the gyp spec.
+
+ spec, configs: input from gyp.
+ deps, link_deps: dependency lists; see ComputeDeps()
+ extra_outputs: any extra outputs that our target should depend on
+ part_of_all: flag indicating this target is part of 'all'
+ """
+
+ self.WriteLn('### Rules for final target.')
+
+ if extra_outputs:
+ self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs)
+ self.WriteMakeRule(extra_outputs, deps,
+ comment=('Preserve order dependency of '
+ 'special output on deps.'),
+ order_only = True)
+
+ target_postbuilds = {}
+ if self.type != 'none':
+ for configname in sorted(configs.keys()):
+ config = configs[configname]
+ if self.flavor == 'mac':
+ ldflags = self.xcode_settings.GetLdflags(configname,
+ generator_default_variables['PRODUCT_DIR'],
+ lambda p: Sourceify(self.Absolutify(p)))
+
+ # TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
+ gyp_to_build = gyp.common.InvertRelativePath(self.path)
+ target_postbuild = self.xcode_settings.AddImplicitPostbuilds(
+ configname,
+ QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
+ self.output))),
+ QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
+ self.output_binary))))
+ if target_postbuild:
+ target_postbuilds[configname] = target_postbuild
+ else:
+ ldflags = config.get('ldflags', [])
+ # Compute an rpath for this output if needed.
+ if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
+ # We want to get the literal string "$ORIGIN" into the link command,
+ # so we need lots of escaping.
+ ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
+ ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
+ self.toolset)
+ library_dirs = config.get('library_dirs', [])
+ ldflags += [('-L%s' % library_dir) for library_dir in library_dirs]
+ self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
+ if self.flavor == 'mac':
+ self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
+ 'LIBTOOLFLAGS_%s' % configname)
+ libraries = spec.get('libraries')
+ if libraries:
+ # Remove duplicate entries
+ libraries = gyp.common.uniquer(libraries)
+ if self.flavor == 'mac':
+ libraries = self.xcode_settings.AdjustLibraries(libraries)
+ self.WriteList(libraries, 'LIBS')
+ self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' %
+ QuoteSpaces(self.output_binary))
+ self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
+
+ if self.flavor == 'mac':
+ self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' %
+ QuoteSpaces(self.output_binary))
+
+ # Postbuild actions. Like actions, but implicitly depend on the target's
+ # output.
+ postbuilds = []
+ if self.flavor == 'mac':
+ if target_postbuilds:
+ postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
+ postbuilds.extend(
+ gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
+
+ if postbuilds:
+ # Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
+ # so we must output its definition first, since we declare variables
+ # using ":=".
+ self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
+
+ for configname in target_postbuilds:
+ self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' %
+ (QuoteSpaces(self.output),
+ configname,
+ gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
+
+ # Postbuilds expect to be run in the gyp file's directory, so insert an
+ # implicit postbuild to cd to there.
+ postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
+ for i, postbuild in enumerate(postbuilds):
+ if not postbuild.startswith('$'):
+ postbuilds[i] = EscapeShellArgument(postbuild)
+ self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
+ self.WriteLn('%s: POSTBUILDS := %s' % (
+ QuoteSpaces(self.output), ' '.join(postbuilds)))
+
+ # A bundle directory depends on its dependencies such as bundle resources
+ # and bundle binary. When all dependencies have been built, the bundle
+ # needs to be packaged.
+ if self.is_mac_bundle:
+ # If the framework doesn't contain a binary, then nothing depends
+ # on the actions -- make the framework depend on them directly too.
+ self.WriteDependencyOnExtraOutputs(self.output, extra_outputs)
+
+ # Bundle dependencies. Note that the code below adds actions to this
+ # target, so if you move these two lines, move the lines below as well.
+ self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
+ self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
+
+ # After the framework is built, package it. Needs to happen before
+ # postbuilds, since postbuilds depend on this.
+ if self.type in ('shared_library', 'loadable_module'):
+ self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' %
+ self.xcode_settings.GetFrameworkVersion())
+
+ # Bundle postbuilds can depend on the whole bundle, so run them after
+ # the bundle is packaged, not already after the bundle binary is done.
+ if postbuilds:
+ self.WriteLn('\t@$(call do_postbuilds)')
+ postbuilds = [] # Don't write postbuilds for target's output.
+
+ # Needed by test/mac/gyptest-rebuild.py.
+ self.WriteLn('\t@true # No-op, used by tests')
+
+ # Since this target depends on binary and resources which are in
+ # nested subfolders, the framework directory will be older than
+ # its dependencies usually. To prevent this rule from executing
+ # on every build (expensive, especially with postbuilds), expliclity
+ # update the time on the framework directory.
+ self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
+
+ if postbuilds:
+ assert not self.is_mac_bundle, ('Postbuilds for bundles should be done '
+ 'on the bundle, not the binary (target \'%s\')' % self.target)
+ assert 'product_dir' not in spec, ('Postbuilds do not work with '
+ 'custom product_dir')
+
+ if self.type == 'executable':
+ self.WriteLn('%s: LD_INPUTS := %s' % (
+ QuoteSpaces(self.output_binary),
+ ' '.join(map(QuoteSpaces, link_deps))))
+ if self.toolset == 'host' and self.flavor == 'android':
+ self.WriteDoCmd([self.output_binary], link_deps, 'link_host',
+ part_of_all, postbuilds=postbuilds)
+ else:
+ self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all,
+ postbuilds=postbuilds)
+
+ elif self.type == 'static_library':
+ for link_dep in link_deps:
+ assert ' ' not in link_dep, (
+ "Spaces in alink input filenames not supported (%s)" % link_dep)
+ if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
+ self.is_standalone_static_library):
+ self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin',
+ part_of_all, postbuilds=postbuilds)
+ else:
+ self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all,
+ postbuilds=postbuilds)
+ elif self.type == 'shared_library':
+ self.WriteLn('%s: LD_INPUTS := %s' % (
+ QuoteSpaces(self.output_binary),
+ ' '.join(map(QuoteSpaces, link_deps))))
+ self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all,
+ postbuilds=postbuilds)
+ elif self.type == 'loadable_module':
+ for link_dep in link_deps:
+ assert ' ' not in link_dep, (
+ "Spaces in module input filenames not supported (%s)" % link_dep)
+ if self.toolset == 'host' and self.flavor == 'android':
+ self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host',
+ part_of_all, postbuilds=postbuilds)
+ else:
+ self.WriteDoCmd(
+ [self.output_binary], link_deps, 'solink_module', part_of_all,
+ postbuilds=postbuilds)
+ elif self.type == 'none':
+ # Write a stamp line.
+ self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all,
+ postbuilds=postbuilds)
+ else:
+ print("WARNING: no output for", self.type, self.target)
+
+ # Add an alias for each target (if there are any outputs).
+ # Installable target aliases are created below.
+ if ((self.output and self.output != self.target) and
+ (self.type not in self._INSTALLABLE_TARGETS)):
+ self.WriteMakeRule([self.target], [self.output],
+ comment='Add target alias', phony = True)
+ if part_of_all:
+ self.WriteMakeRule(['all'], [self.target],
+ comment = 'Add target alias to "all" target.',
+ phony = True)
+
+ # Add special-case rules for our installable targets.
+ # 1) They need to install to the build dir or "product" dir.
+ # 2) They get shortcuts for building (e.g. "make chrome").
+ # 3) They are part of "make all".
+ if (self.type in self._INSTALLABLE_TARGETS or
+ self.is_standalone_static_library):
+ if self.type == 'shared_library':
+ file_desc = 'shared library'
+ elif self.type == 'static_library':
+ file_desc = 'static library'
+ else:
+ file_desc = 'executable'
+ install_path = self._InstallableTargetInstallPath()
+ installable_deps = [self.output]
+ if (self.flavor == 'mac' and not 'product_dir' in spec and
+ self.toolset == 'target'):
+ # On mac, products are created in install_path immediately.
+ assert install_path == self.output, '%s != %s' % (
+ install_path, self.output)
+
+ # Point the target alias to the final binary output.
+ self.WriteMakeRule([self.target], [install_path],
+ comment='Add target alias', phony = True)
+ if install_path != self.output:
+ assert not self.is_mac_bundle # See comment a few lines above.
+ self.WriteDoCmd([install_path], [self.output], 'copy',
+ comment = 'Copy this to the %s output path.' %
+ file_desc, part_of_all=part_of_all)
+ installable_deps.append(install_path)
+ if self.output != self.alias and self.alias != self.target:
+ self.WriteMakeRule([self.alias], installable_deps,
+ comment = 'Short alias for building this %s.' %
+ file_desc, phony = True)
+ if part_of_all:
+ self.WriteMakeRule(['all'], [install_path],
+ comment = 'Add %s to "all" target.' % file_desc,
+ phony = True)
+
+
+ def WriteList(self, value_list, variable=None, prefix='',
+ quoter=QuoteIfNecessary):
+ """Write a variable definition that is a list of values.
+
+ E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
+ foo = blaha blahb
+ but in a pretty-printed style.
+ """
+ values = ''
+ if value_list:
+ value_list = [quoter(prefix + l) for l in value_list]
+ values = ' \\\n\t' + ' \\\n\t'.join(value_list)
+ self.fp.write('%s :=%s\n\n' % (variable, values))
+
+
+ def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None,
+ postbuilds=False):
+ """Write a Makefile rule that uses do_cmd.
+
+ This makes the outputs dependent on the command line that was run,
+ as well as support the V= make command line flag.
+ """
+ suffix = ''
+ if postbuilds:
+ assert ',' not in command
+ suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
+ self.WriteMakeRule(outputs, inputs,
+ actions = ['$(call do_cmd,%s%s)' % (command, suffix)],
+ comment = comment,
+ command = command,
+ force = True)
+ # Add our outputs to the list of targets we read depfiles from.
+ # all_deps is only used for deps file reading, and for deps files we replace
+ # spaces with ? because escaping doesn't work with make's $(sort) and
+ # other functions.
+ outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
+ self.WriteLn('all_deps += %s' % ' '.join(outputs))
+
+
+ def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
+ order_only=False, force=False, phony=False, command=None):
+ """Write a Makefile rule, with some extra tricks.
+
+ outputs: a list of outputs for the rule (note: this is not directly
+ supported by make; see comments below)
+ inputs: a list of inputs for the rule
+ actions: a list of shell commands to run for the rule
+ comment: a comment to put in the Makefile above the rule (also useful
+ for making this Python script's code self-documenting)
+ order_only: if true, makes the dependency order-only
+ force: if true, include FORCE_DO_CMD as an order-only dep
+ phony: if true, the rule does not actually generate the named output, the
+ output is just a name to run the rule
+ command: (optional) command name to generate unambiguous labels
+ """
+ outputs = [QuoteSpaces(o) for o in outputs]
+ inputs = map(QuoteSpaces, inputs)
+
+ if comment:
+ self.WriteLn('# ' + comment)
+ if phony:
+ self.WriteLn('.PHONY: ' + ' '.join(outputs))
+ if actions:
+ self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
+ force_append = ' FORCE_DO_CMD' if force else ''
+
+ if order_only:
+ # Order only rule: Just write a simple rule.
+ # TODO(evanm): just make order_only a list of deps instead of this hack.
+ self.WriteLn('%s: | %s%s' %
+ (' '.join(outputs), ' '.join(inputs), force_append))
+ elif len(outputs) == 1:
+ # Regular rule, one output: Just write a simple rule.
+ self.WriteLn('%s: %s%s' % (outputs[0], ' '.join(inputs), force_append))
+ else:
+ # Regular rule, more than one output: Multiple outputs are tricky in
+ # make. We will write three rules:
+ # - All outputs depend on an intermediate file.
+ # - Make .INTERMEDIATE depend on the intermediate.
+ # - The intermediate file depends on the inputs and executes the
+ # actual command.
+ # - The intermediate recipe will 'touch' the intermediate file.
+ # - The multi-output rule will have an do-nothing recipe.
+
+ # Hash the target name to avoid generating overlong filenames.
+ cmdstring = (command if command else self.target).encode('utf-8')
+ cmddigest = hashlib.sha1(cmdstring).hexdigest()
+ intermediate = "%s.intermediate" % (cmddigest)
+ self.WriteLn('%s: %s' % (' '.join(outputs), intermediate))
+ self.WriteLn('\t%s' % '@:');
+ self.WriteLn('%s: %s' % ('.INTERMEDIATE', intermediate))
+ self.WriteLn('%s: %s%s' %
+ (intermediate, ' '.join(inputs), force_append))
+ actions.insert(0, '$(call do_cmd,touch)')
+
+ if actions:
+ for action in actions:
+ self.WriteLn('\t%s' % action)
+ self.WriteLn()
+
+
+ def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
+ """Write a set of LOCAL_XXX definitions for Android NDK.
+
+ These variable definitions will be used by Android NDK but do nothing for
+ non-Android applications.
+
+ Arguments:
+ module_name: Android NDK module name, which must be unique among all
+ module names.
+ all_sources: A list of source files (will be filtered by Compilable).
+ link_deps: A list of link dependencies, which must be sorted in
+ the order from dependencies to dependents.
+ """
+ if self.type not in ('executable', 'shared_library', 'static_library'):
+ return
+
+ self.WriteLn('# Variable definitions for Android applications')
+ self.WriteLn('include $(CLEAR_VARS)')
+ self.WriteLn('LOCAL_MODULE := ' + module_name)
+ self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
+ '$(DEFS_$(BUILDTYPE)) '
+ # LOCAL_CFLAGS is applied to both of C and C++. There is
+ # no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
+ # sources.
+ '$(CFLAGS_C_$(BUILDTYPE)) '
+ # $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
+ # LOCAL_C_INCLUDES does not expect it. So put it in
+ # LOCAL_CFLAGS.
+ '$(INCS_$(BUILDTYPE))')
+ # LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
+ self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
+ self.WriteLn('LOCAL_C_INCLUDES :=')
+ self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
+
+ # Detect the C++ extension.
+ cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
+ default_cpp_ext = '.cpp'
+ for filename in all_sources:
+ ext = os.path.splitext(filename)[1]
+ if ext in cpp_ext:
+ cpp_ext[ext] += 1
+ if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
+ default_cpp_ext = ext
+ self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
+
+ self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
+ 'LOCAL_SRC_FILES')
+
+ # Filter out those which do not match prefix and suffix and produce
+ # the resulting list without prefix and suffix.
+ def DepsToModules(deps, prefix, suffix):
+ modules = []
+ for filepath in deps:
+ filename = os.path.basename(filepath)
+ if filename.startswith(prefix) and filename.endswith(suffix):
+ modules.append(filename[len(prefix):-len(suffix)])
+ return modules
+
+ # Retrieve the default value of 'SHARED_LIB_SUFFIX'
+ params = {'flavor': 'linux'}
+ default_variables = {}
+ CalculateVariables(default_variables, params)
+
+ self.WriteList(
+ DepsToModules(link_deps,
+ generator_default_variables['SHARED_LIB_PREFIX'],
+ default_variables['SHARED_LIB_SUFFIX']),
+ 'LOCAL_SHARED_LIBRARIES')
+ self.WriteList(
+ DepsToModules(link_deps,
+ generator_default_variables['STATIC_LIB_PREFIX'],
+ generator_default_variables['STATIC_LIB_SUFFIX']),
+ 'LOCAL_STATIC_LIBRARIES')
+
+ if self.type == 'executable':
+ self.WriteLn('include $(BUILD_EXECUTABLE)')
+ elif self.type == 'shared_library':
+ self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
+ elif self.type == 'static_library':
+ self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
+ self.WriteLn()
+
+
+ def WriteLn(self, text=''):
+ self.fp.write(text + '\n')
+
+
+ def GetSortedXcodeEnv(self, additional_settings=None):
+ return gyp.xcode_emulation.GetSortedXcodeEnv(
+ self.xcode_settings, "$(abs_builddir)",
+ os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)",
+ additional_settings)
+
+
+ def GetSortedXcodePostbuildEnv(self):
+ # CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
+ # TODO(thakis): It would be nice to have some general mechanism instead.
+ strip_save_file = self.xcode_settings.GetPerTargetSetting(
+ 'CHROMIUM_STRIP_SAVE_FILE', '')
+ # Even if strip_save_file is empty, explicitly write it. Else a postbuild
+ # might pick up an export from an earlier target.
+ return self.GetSortedXcodeEnv(
+ additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
+
+
+ def WriteSortedXcodeEnv(self, target, env):
+ for k, v in env:
+ # For
+ # foo := a\ b
+ # the escaped space does the right thing. For
+ # export foo := a\ b
+ # it does not -- the backslash is written to the env as literal character.
+ # So don't escape spaces in |env[k]|.
+ self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
+
+
+ def Objectify(self, path):
+ """Convert a path to its output directory form."""
+ if '$(' in path:
+ path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
+ if not '$(obj)' in path:
+ path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
+ return path
+
+
+ def Pchify(self, path, lang):
+ """Convert a prefix header path to its output directory form."""
+ path = self.Absolutify(path)
+ if '$(' in path:
+ path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
+ (self.toolset, lang))
+ return path
+ return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
+
+
+ def Absolutify(self, path):
+ """Convert a subdirectory-relative path into a base-relative path.
+ Skips over paths that contain variables."""
+ if '$(' in path:
+ # Don't call normpath in this case, as it might collapse the
+ # path too aggressively if it features '..'. However it's still
+ # important to strip trailing slashes.
+ return path.rstrip('/')
+ return os.path.normpath(os.path.join(self.path, path))
+
+
+ def ExpandInputRoot(self, template, expansion, dirname):
+ if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
+ return template
+ path = template % {
+ 'INPUT_ROOT': expansion,
+ 'INPUT_DIRNAME': dirname,
+ }
+ return path
+
+
+ def _InstallableTargetInstallPath(self):
+ """Returns the location of the final output for an installable target."""
+ # Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
+ # rely on this. Emulate this behavior for mac.
+ if (self.type == 'shared_library' and
+ (self.flavor != 'mac' or self.toolset != 'target')):
+ # Install all shared libs into a common directory (per toolset) for
+ # convenient access with LD_LIBRARY_PATH.
+ return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
+ return '$(builddir)/' + self.alias
+
+
+def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
+ build_files):
+ """Write the target to regenerate the Makefile."""
+ options = params['options']
+ build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
+ for filename in params['build_files_arg']]
+
+ gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
+ options.toplevel_dir)
+ if not gyp_binary.startswith(os.sep):
+ gyp_binary = os.path.join('.', gyp_binary)
+
+ root_makefile.write(
+ "quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
+ "cmd_regen_makefile = cd $(srcdir); %(cmd)s\n"
+ "%(makefile_name)s: %(deps)s\n"
+ "\t$(call do_cmd,regen_makefile)\n\n" % {
+ 'makefile_name': makefile_name,
+ 'deps': ' '.join(map(Sourceify, build_files)),
+ 'cmd': gyp.common.EncodePOSIXShellList(
+ [gyp_binary, '-fmake'] +
+ gyp.RegenerateFlags(options) +
+ build_files_args)})
+
+
+def PerformBuild(data, configurations, params):
+ options = params['options']
+ for config in configurations:
+ arguments = ['make']
+ if options.toplevel_dir and options.toplevel_dir != '.':
+ arguments += '-C', options.toplevel_dir
+ arguments.append('BUILDTYPE=' + config)
+ print('Building [%s]: %s' % (config, arguments))
+ subprocess.check_call(arguments)
+
+
+def GenerateOutput(target_list, target_dicts, data, params):
+ options = params['options']
+ flavor = gyp.common.GetFlavor(params)
+ generator_flags = params.get('generator_flags', {})
+ builddir_name = generator_flags.get('output_dir', 'out')
+ android_ndk_version = generator_flags.get('android_ndk_version', None)
+ default_target = generator_flags.get('default_target', 'all')
+
+ def CalculateMakefilePath(build_file, base_name):
+ """Determine where to write a Makefile for a given gyp file."""
+ # Paths in gyp files are relative to the .gyp file, but we want
+ # paths relative to the source root for the master makefile. Grab
+ # the path of the .gyp file as the base to relativize against.
+ # E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
+ base_path = gyp.common.RelativePath(os.path.dirname(build_file),
+ options.depth)
+ # We write the file in the base_path directory.
+ output_file = os.path.join(options.depth, base_path, base_name)
+ if options.generator_output:
+ output_file = os.path.join(
+ options.depth, options.generator_output, base_path, base_name)
+ base_path = gyp.common.RelativePath(os.path.dirname(build_file),
+ options.toplevel_dir)
+ return base_path, output_file
+
+ # TODO: search for the first non-'Default' target. This can go
+ # away when we add verification that all targets have the
+ # necessary configurations.
+ default_configuration = None
+ toolsets = set([target_dicts[target]['toolset'] for target in target_list])
+ for target in target_list:
+ spec = target_dicts[target]
+ if spec['default_configuration'] != 'Default':
+ default_configuration = spec['default_configuration']
+ break
+ if not default_configuration:
+ default_configuration = 'Default'
+
+ srcdir = '.'
+ makefile_name = 'Makefile' + options.suffix
+ makefile_path = os.path.join(options.toplevel_dir, makefile_name)
+ if options.generator_output:
+ global srcdir_prefix
+ makefile_path = os.path.join(
+ options.toplevel_dir, options.generator_output, makefile_name)
+ srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
+ srcdir_prefix = '$(srcdir)/'
+
+ flock_command= 'flock'
+ copy_archive_arguments = '-af'
+ makedep_arguments = '-MMD'
+ header_params = {
+ 'default_target': default_target,
+ 'builddir': builddir_name,
+ 'default_configuration': default_configuration,
+ 'flock': flock_command,
+ 'flock_index': 1,
+ 'link_commands': LINK_COMMANDS_LINUX,
+ 'extra_commands': '',
+ 'srcdir': srcdir,
+ 'copy_archive_args': copy_archive_arguments,
+ 'makedep_args': makedep_arguments,
+ }
+ if flavor == 'mac':
+ flock_command = './gyp-mac-tool flock'
+ header_params.update({
+ 'flock': flock_command,
+ 'flock_index': 2,
+ 'link_commands': LINK_COMMANDS_MAC,
+ 'extra_commands': SHARED_HEADER_MAC_COMMANDS,
+ })
+ elif flavor == 'android':
+ header_params.update({
+ 'link_commands': LINK_COMMANDS_ANDROID,
+ })
+ elif flavor == 'zos':
+ copy_archive_arguments = '-fPR'
+ makedep_arguments = '-qmakedep=gcc'
+ header_params.update({
+ 'copy_archive_args': copy_archive_arguments,
+ 'makedep_args': makedep_arguments,
+ 'link_commands': LINK_COMMANDS_OS390,
+ })
+ elif flavor == 'solaris':
+ header_params.update({
+ 'flock': './gyp-flock-tool flock',
+ 'flock_index': 2,
+ })
+ elif flavor == 'freebsd':
+ # Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
+ header_params.update({
+ 'flock': 'lockf',
+ })
+ elif flavor == 'openbsd':
+ copy_archive_arguments = '-pPRf'
+ header_params.update({
+ 'copy_archive_args': copy_archive_arguments,
+ })
+ elif flavor == 'aix':
+ copy_archive_arguments = '-pPRf'
+ header_params.update({
+ 'copy_archive_args': copy_archive_arguments,
+ 'link_commands': LINK_COMMANDS_AIX,
+ 'flock': './gyp-flock-tool flock',
+ 'flock_index': 2,
+ })
+
+ header_params.update({
+ 'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
+ 'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'),
+ 'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'),
+ 'LINK.target': GetEnvironFallback(('LINK_target', 'LINK'), '$(LINK)'),
+ 'CC.host': GetEnvironFallback(('CC_host',), 'gcc'),
+ 'AR.host': GetEnvironFallback(('AR_host',), 'ar'),
+ 'CXX.host': GetEnvironFallback(('CXX_host',), 'g++'),
+ 'LINK.host': GetEnvironFallback(('LINK_host',), '$(CXX.host)'),
+ })
+
+ build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
+ make_global_settings_array = data[build_file].get('make_global_settings', [])
+ wrappers = {}
+ for key, value in make_global_settings_array:
+ if key.endswith('_wrapper'):
+ wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value
+ make_global_settings = ''
+ for key, value in make_global_settings_array:
+ if re.match('.*_wrapper', key):
+ continue
+ if value[0] != '$':
+ value = '$(abspath %s)' % value
+ wrapper = wrappers.get(key)
+ if wrapper:
+ value = '%s %s' % (wrapper, value)
+ del wrappers[key]
+ if key in ('CC', 'CC.host', 'CXX', 'CXX.host'):
+ make_global_settings += (
+ 'ifneq (,$(filter $(origin %s), undefined default))\n' % key)
+ # Let gyp-time envvars win over global settings.
+ env_key = key.replace('.', '_') # CC.host -> CC_host
+ if env_key in os.environ:
+ value = os.environ[env_key]
+ make_global_settings += ' %s = %s\n' % (key, value)
+ make_global_settings += 'endif\n'
+ else:
+ make_global_settings += '%s ?= %s\n' % (key, value)
+ # TODO(ukai): define cmd when only wrapper is specified in
+ # make_global_settings.
+
+ header_params['make_global_settings'] = make_global_settings
+
+ gyp.common.EnsureDirExists(makefile_path)
+ root_makefile = open(makefile_path, 'w')
+ root_makefile.write(SHARED_HEADER % header_params)
+ # Currently any versions have the same effect, but in future the behavior
+ # could be different.
+ if android_ndk_version:
+ root_makefile.write(
+ '# Define LOCAL_PATH for build of Android applications.\n'
+ 'LOCAL_PATH := $(call my-dir)\n'
+ '\n')
+ for toolset in toolsets:
+ root_makefile.write('TOOLSET := %s\n' % toolset)
+ WriteRootHeaderSuffixRules(root_makefile)
+
+ # Put build-time support tools next to the root Makefile.
+ dest_path = os.path.dirname(makefile_path)
+ gyp.common.CopyTool(flavor, dest_path)
+
+ # Find the list of targets that derive from the gyp file(s) being built.
+ needed_targets = set()
+ for build_file in params['build_files']:
+ for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
+ needed_targets.add(target)
+
+ build_files = set()
+ include_list = set()
+ for qualified_target in target_list:
+ build_file, target, toolset = gyp.common.ParseQualifiedTarget(
+ qualified_target)
+
+ this_make_global_settings = data[build_file].get('make_global_settings', [])
+ assert make_global_settings_array == this_make_global_settings, (
+ "make_global_settings needs to be the same for all targets. %s vs. %s" %
+ (this_make_global_settings, make_global_settings))
+
+ build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
+ included_files = data[build_file]['included_files']
+ for included_file in included_files:
+ # The included_files entries are relative to the dir of the build file
+ # that included them, so we have to undo that and then make them relative
+ # to the root dir.
+ relative_include_file = gyp.common.RelativePath(
+ gyp.common.UnrelativePath(included_file, build_file),
+ options.toplevel_dir)
+ abs_include_file = os.path.abspath(relative_include_file)
+ # If the include file is from the ~/.gyp dir, we should use absolute path
+ # so that relocating the src dir doesn't break the path.
+ if (params['home_dot_gyp'] and
+ abs_include_file.startswith(params['home_dot_gyp'])):
+ build_files.add(abs_include_file)
+ else:
+ build_files.add(relative_include_file)
+
+ base_path, output_file = CalculateMakefilePath(build_file,
+ target + '.' + toolset + options.suffix + '.mk')
+
+ spec = target_dicts[qualified_target]
+ configs = spec['configurations']
+
+ if flavor == 'mac':
+ gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
+
+ writer = MakefileWriter(generator_flags, flavor)
+ writer.Write(qualified_target, base_path, output_file, spec, configs,
+ part_of_all=qualified_target in needed_targets)
+
+ # Our root_makefile lives at the source root. Compute the relative path
+ # from there to the output_file for including.
+ mkfile_rel_path = gyp.common.RelativePath(output_file,
+ os.path.dirname(makefile_path))
+ include_list.add(mkfile_rel_path)
+
+ # Write out per-gyp (sub-project) Makefiles.
+ depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd())
+ for build_file in build_files:
+ # The paths in build_files were relativized above, so undo that before
+ # testing against the non-relativized items in target_list and before
+ # calculating the Makefile path.
+ build_file = os.path.join(depth_rel_path, build_file)
+ gyp_targets = [target_dicts[target]['target_name'] for target in target_list
+ if target.startswith(build_file) and
+ target in needed_targets]
+ # Only generate Makefiles for gyp files with targets.
+ if not gyp_targets:
+ continue
+ base_path, output_file = CalculateMakefilePath(build_file,
+ os.path.splitext(os.path.basename(build_file))[0] + '.Makefile')
+ makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path),
+ os.path.dirname(output_file))
+ writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets,
+ builddir_name)
+
+
+ # Write out the sorted list of includes.
+ root_makefile.write('\n')
+ for include_file in sorted(include_list):
+ # We wrap each .mk include in an if statement so users can tell make to
+ # not load a file by setting NO_LOAD. The below make code says, only
+ # load the .mk file if the .mk filename doesn't start with a token in
+ # NO_LOAD.
+ root_makefile.write(
+ "ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n"
+ " $(findstring $(join ^,$(prefix)),\\\n"
+ " $(join ^," + include_file + ")))),)\n")
+ root_makefile.write(" include " + include_file + "\n")
+ root_makefile.write("endif\n")
+ root_makefile.write('\n')
+
+ if (not generator_flags.get('standalone')
+ and generator_flags.get('auto_regeneration', True)):
+ WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
+
+ root_makefile.write(SHARED_FOOTER)
+
+ root_makefile.close()
diff --git a/third_party/python/gyp/pylib/gyp/generator/msvs.py b/third_party/python/gyp/pylib/gyp/generator/msvs.py
new file mode 100644
index 0000000000..2278c16acf
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/generator/msvs.py
@@ -0,0 +1,3543 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+
+import copy
+import ntpath
+import os
+import posixpath
+import re
+import subprocess
+import sys
+import collections
+
+import gyp.common
+import gyp.easy_xml as easy_xml
+import gyp.generator.ninja as ninja_generator
+import gyp.MSVSNew as MSVSNew
+import gyp.MSVSProject as MSVSProject
+import gyp.MSVSSettings as MSVSSettings
+import gyp.MSVSToolFile as MSVSToolFile
+import gyp.MSVSUserFile as MSVSUserFile
+import gyp.MSVSUtil as MSVSUtil
+import gyp.MSVSVersion as MSVSVersion
+from gyp.common import GypError
+from gyp.common import OrderedSet
+
+
+# Regular expression for validating Visual Studio GUIDs. If the GUID
+# contains lowercase hex letters, MSVS will be fine. However,
+# IncrediBuild BuildConsole will parse the solution file, but then
+# silently skip building the target causing hard to track down errors.
+# Note that this only happens with the BuildConsole, and does not occur
+# if IncrediBuild is executed from inside Visual Studio. This regex
+# validates that the string looks like a GUID with all uppercase hex
+# letters.
+VALID_MSVS_GUID_CHARS = re.compile(r'^[A-F0-9\-]+$')
+
+
+generator_default_variables = {
+ 'DRIVER_PREFIX': '',
+ 'DRIVER_SUFFIX': '.sys',
+ 'EXECUTABLE_PREFIX': '',
+ 'EXECUTABLE_SUFFIX': '.exe',
+ 'STATIC_LIB_PREFIX': '',
+ 'SHARED_LIB_PREFIX': '',
+ 'STATIC_LIB_SUFFIX': '.lib',
+ 'SHARED_LIB_SUFFIX': '.dll',
+ 'INTERMEDIATE_DIR': '$(IntDir)',
+ 'SHARED_INTERMEDIATE_DIR': '$(OutDir)obj/global_intermediate',
+ 'OS': 'win',
+ 'PRODUCT_DIR': '$(OutDir)',
+ 'LIB_DIR': '$(OutDir)lib',
+ 'RULE_INPUT_ROOT': '$(InputName)',
+ 'RULE_INPUT_DIRNAME': '$(InputDir)',
+ 'RULE_INPUT_EXT': '$(InputExt)',
+ 'RULE_INPUT_NAME': '$(InputFileName)',
+ 'RULE_INPUT_PATH': '$(InputPath)',
+ 'CONFIGURATION_NAME': '$(ConfigurationName)',
+}
+
+
+# The msvs specific sections that hold paths
+generator_additional_path_sections = [
+ 'msvs_cygwin_dirs',
+ 'msvs_props',
+]
+
+
+generator_additional_non_configuration_keys = [
+ 'msvs_cygwin_dirs',
+ 'msvs_cygwin_shell',
+ 'msvs_large_pdb',
+ 'msvs_shard',
+ 'msvs_external_builder',
+ 'msvs_external_builder_out_dir',
+ 'msvs_external_builder_build_cmd',
+ 'msvs_external_builder_clean_cmd',
+ 'msvs_external_builder_clcompile_cmd',
+ 'msvs_enable_winrt',
+ 'msvs_requires_importlibrary',
+ 'msvs_enable_winphone',
+ 'msvs_application_type_revision',
+ 'msvs_target_platform_version',
+ 'msvs_target_platform_minversion',
+]
+
+generator_filelist_paths = None
+
+# List of precompiled header related keys.
+precomp_keys = [
+ 'msvs_precompiled_header',
+ 'msvs_precompiled_source',
+]
+
+
+cached_username = None
+
+
+cached_domain = None
+
+
+# TODO(gspencer): Switch the os.environ calls to be
+# win32api.GetDomainName() and win32api.GetUserName() once the
+# python version in depot_tools has been updated to work on Vista
+# 64-bit.
+def _GetDomainAndUserName():
+ if sys.platform not in ('win32', 'cygwin'):
+ return ('DOMAIN', 'USERNAME')
+ global cached_username
+ global cached_domain
+ if not cached_domain or not cached_username:
+ domain = os.environ.get('USERDOMAIN')
+ username = os.environ.get('USERNAME')
+ if not domain or not username:
+ call = subprocess.Popen(['net', 'config', 'Workstation'],
+ stdout=subprocess.PIPE)
+ config = call.communicate()[0]
+ username_re = re.compile(r'^User name\s+(\S+)', re.MULTILINE)
+ username_match = username_re.search(config)
+ if username_match:
+ username = username_match.group(1)
+ domain_re = re.compile(r'^Logon domain\s+(\S+)', re.MULTILINE)
+ domain_match = domain_re.search(config)
+ if domain_match:
+ domain = domain_match.group(1)
+ cached_domain = domain
+ cached_username = username
+ return (cached_domain, cached_username)
+
+fixpath_prefix = None
+
+
+def _NormalizedSource(source):
+ """Normalize the path.
+
+ But not if that gets rid of a variable, as this may expand to something
+ larger than one directory.
+
+ Arguments:
+ source: The path to be normalize.d
+
+ Returns:
+ The normalized path.
+ """
+ normalized = os.path.normpath(source)
+ if source.count('$') == normalized.count('$'):
+ source = normalized
+ return source
+
+
+def _FixPath(path):
+ """Convert paths to a form that will make sense in a vcproj file.
+
+ Arguments:
+ path: The path to convert, may contain / etc.
+ Returns:
+ The path with all slashes made into backslashes.
+ """
+ if fixpath_prefix and path and not os.path.isabs(path) and not path[0] == '$':
+ path = os.path.join(fixpath_prefix, path)
+ path = path.replace('/', '\\')
+ path = _NormalizedSource(path)
+ if path and path[-1] == '\\':
+ path = path[:-1]
+ return path
+
+
+def _FixPaths(paths):
+ """Fix each of the paths of the list."""
+ return [_FixPath(i) for i in paths]
+
+
+def _ConvertSourcesToFilterHierarchy(sources, prefix=None, excluded=None,
+ list_excluded=True, msvs_version=None):
+ """Converts a list split source file paths into a vcproj folder hierarchy.
+
+ Arguments:
+ sources: A list of source file paths split.
+ prefix: A list of source file path layers meant to apply to each of sources.
+ excluded: A set of excluded files.
+ msvs_version: A MSVSVersion object.
+
+ Returns:
+ A hierarchy of filenames and MSVSProject.Filter objects that matches the
+ layout of the source tree.
+ For example:
+ _ConvertSourcesToFilterHierarchy([['a', 'bob1.c'], ['b', 'bob2.c']],
+ prefix=['joe'])
+ -->
+ [MSVSProject.Filter('a', contents=['joe\\a\\bob1.c']),
+ MSVSProject.Filter('b', contents=['joe\\b\\bob2.c'])]
+ """
+ if not prefix: prefix = []
+ result = []
+ excluded_result = []
+ folders = collections.OrderedDict()
+ # Gather files into the final result, excluded, or folders.
+ for s in sources:
+ if len(s) == 1:
+ filename = _NormalizedSource('\\'.join(prefix + s))
+ if filename in excluded:
+ excluded_result.append(filename)
+ else:
+ result.append(filename)
+ elif msvs_version and not msvs_version.UsesVcxproj():
+ # For MSVS 2008 and earlier, we need to process all files before walking
+ # the sub folders.
+ if not folders.get(s[0]):
+ folders[s[0]] = []
+ folders[s[0]].append(s[1:])
+ else:
+ contents = _ConvertSourcesToFilterHierarchy([s[1:]], prefix + [s[0]],
+ excluded=excluded,
+ list_excluded=list_excluded,
+ msvs_version=msvs_version)
+ contents = MSVSProject.Filter(s[0], contents=contents)
+ result.append(contents)
+ # Add a folder for excluded files.
+ if excluded_result and list_excluded:
+ excluded_folder = MSVSProject.Filter('_excluded_files',
+ contents=excluded_result)
+ result.append(excluded_folder)
+
+ if msvs_version and msvs_version.UsesVcxproj():
+ return result
+
+ # Populate all the folders.
+ for f in folders:
+ contents = _ConvertSourcesToFilterHierarchy(folders[f], prefix=prefix + [f],
+ excluded=excluded,
+ list_excluded=list_excluded,
+ msvs_version=msvs_version)
+ contents = MSVSProject.Filter(f, contents=contents)
+ result.append(contents)
+ return result
+
+
+def _ToolAppend(tools, tool_name, setting, value, only_if_unset=False):
+ if not value: return
+ _ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset)
+
+
+def _ToolSetOrAppend(tools, tool_name, setting, value, only_if_unset=False):
+ # TODO(bradnelson): ugly hack, fix this more generally!!!
+ if 'Directories' in setting or 'Dependencies' in setting:
+ if type(value) == str:
+ value = value.replace('/', '\\')
+ else:
+ value = [i.replace('/', '\\') for i in value]
+ if not tools.get(tool_name):
+ tools[tool_name] = dict()
+ tool = tools[tool_name]
+ if 'CompileAsWinRT' == setting:
+ return
+ if tool.get(setting):
+ if only_if_unset: return
+ if type(tool[setting]) == list and type(value) == list:
+ tool[setting] += value
+ else:
+ raise TypeError(
+ 'Appending "%s" to a non-list setting "%s" for tool "%s" is '
+ 'not allowed, previous value: %s' % (
+ value, setting, tool_name, str(tool[setting])))
+ else:
+ tool[setting] = value
+
+
+def _ConfigTargetVersion(config_data):
+ return config_data.get('msvs_target_version', 'Windows7')
+
+
+def _ConfigPlatform(config_data):
+ return config_data.get('msvs_configuration_platform', 'Win32')
+
+
+def _ConfigBaseName(config_name, platform_name):
+ if config_name.endswith('_' + platform_name):
+ return config_name[0:-len(platform_name) - 1]
+ else:
+ return config_name
+
+
+def _ConfigFullName(config_name, config_data):
+ platform_name = _ConfigPlatform(config_data)
+ return '%s|%s' % (_ConfigBaseName(config_name, platform_name), platform_name)
+
+
+def _ConfigWindowsTargetPlatformVersion(config_data, version):
+ config_ver = config_data.get('msvs_windows_sdk_version')
+ vers = [config_ver] if config_ver else version.compatible_sdks
+ for ver in vers:
+ for key in [
+ r'HKLM\Software\Microsoft\Microsoft SDKs\Windows\%s',
+ r'HKLM\Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows\%s']:
+ sdk_dir = MSVSVersion._RegistryGetValue(key % ver, 'InstallationFolder')
+ if not sdk_dir:
+ continue
+ version = MSVSVersion._RegistryGetValue(key % ver, 'ProductVersion') or ''
+ # Find a matching entry in sdk_dir\include.
+ expected_sdk_dir=r'%s\include' % sdk_dir
+ names = sorted([x for x in (os.listdir(expected_sdk_dir)
+ if os.path.isdir(expected_sdk_dir)
+ else []
+ )
+ if x.startswith(version)], reverse=True)
+ if names:
+ return names[0]
+ else:
+ print('Warning: No include files found for '
+ 'detected Windows SDK version %s' % (version))
+
+
+def _BuildCommandLineForRuleRaw(spec, cmd, cygwin_shell, has_input_path,
+ quote_cmd, do_setup_env):
+
+ if [x for x in cmd if '$(InputDir)' in x]:
+ input_dir_preamble = (
+ 'set INPUTDIR=$(InputDir)\n'
+ 'if NOT DEFINED INPUTDIR set INPUTDIR=.\\\n'
+ 'set INPUTDIR=%INPUTDIR:~0,-1%\n'
+ )
+ else:
+ input_dir_preamble = ''
+
+ if cygwin_shell:
+ # Find path to cygwin.
+ cygwin_dir = _FixPath(spec.get('msvs_cygwin_dirs', ['.'])[0])
+ # Prepare command.
+ direct_cmd = cmd
+ direct_cmd = [i.replace('$(IntDir)',
+ '`cygpath -m "${INTDIR}"`') for i in direct_cmd]
+ direct_cmd = [i.replace('$(OutDir)',
+ '`cygpath -m "${OUTDIR}"`') for i in direct_cmd]
+ direct_cmd = [i.replace('$(InputDir)',
+ '`cygpath -m "${INPUTDIR}"`') for i in direct_cmd]
+ if has_input_path:
+ direct_cmd = [i.replace('$(InputPath)',
+ '`cygpath -m "${INPUTPATH}"`')
+ for i in direct_cmd]
+ direct_cmd = ['\\"%s\\"' % i.replace('"', '\\\\\\"') for i in direct_cmd]
+ # direct_cmd = gyp.common.EncodePOSIXShellList(direct_cmd)
+ direct_cmd = ' '.join(direct_cmd)
+ # TODO(quote): regularize quoting path names throughout the module
+ cmd = ''
+ if do_setup_env:
+ cmd += 'call "$(ProjectDir)%(cygwin_dir)s\\setup_env.bat" && '
+ cmd += 'set CYGWIN=nontsec&& '
+ if direct_cmd.find('NUMBER_OF_PROCESSORS') >= 0:
+ cmd += 'set /a NUMBER_OF_PROCESSORS_PLUS_1=%%NUMBER_OF_PROCESSORS%%+1&& '
+ if direct_cmd.find('INTDIR') >= 0:
+ cmd += 'set INTDIR=$(IntDir)&& '
+ if direct_cmd.find('OUTDIR') >= 0:
+ cmd += 'set OUTDIR=$(OutDir)&& '
+ if has_input_path and direct_cmd.find('INPUTPATH') >= 0:
+ cmd += 'set INPUTPATH=$(InputPath) && '
+ cmd += 'bash -c "%(cmd)s"'
+ cmd = cmd % {'cygwin_dir': cygwin_dir,
+ 'cmd': direct_cmd}
+ return input_dir_preamble + cmd
+ else:
+ # Convert cat --> type to mimic unix.
+ if cmd[0] == 'cat':
+ command = ['type']
+ else:
+ command = [cmd[0].replace('/', '\\')]
+ # Add call before command to ensure that commands can be tied together one
+ # after the other without aborting in Incredibuild, since IB makes a bat
+ # file out of the raw command string, and some commands (like python) are
+ # actually batch files themselves.
+ command.insert(0, 'call')
+ # Fix the paths
+ # TODO(quote): This is a really ugly heuristic, and will miss path fixing
+ # for arguments like "--arg=path" or "/opt:path".
+ # If the argument starts with a slash or dash, it's probably a command line
+ # switch
+ arguments = [i if (i[:1] in "/-") else _FixPath(i) for i in cmd[1:]]
+ arguments = [i.replace('$(InputDir)', '%INPUTDIR%') for i in arguments]
+ arguments = [MSVSSettings.FixVCMacroSlashes(i) for i in arguments]
+ if quote_cmd:
+ # Support a mode for using cmd directly.
+ # Convert any paths to native form (first element is used directly).
+ # TODO(quote): regularize quoting path names throughout the module
+ arguments = ['"%s"' % i for i in arguments]
+ # Collapse into a single command.
+ return input_dir_preamble + ' '.join(command + arguments)
+
+
+def _BuildCommandLineForRule(spec, rule, has_input_path, do_setup_env):
+ # Currently this weird argument munging is used to duplicate the way a
+ # python script would need to be run as part of the chrome tree.
+ # Eventually we should add some sort of rule_default option to set this
+ # per project. For now the behavior chrome needs is the default.
+ mcs = rule.get('msvs_cygwin_shell')
+ if mcs is None:
+ mcs = int(spec.get('msvs_cygwin_shell', 1))
+ elif isinstance(mcs, str):
+ mcs = int(mcs)
+ quote_cmd = int(rule.get('msvs_quote_cmd', 1))
+ return _BuildCommandLineForRuleRaw(spec, rule['action'], mcs, has_input_path,
+ quote_cmd, do_setup_env=do_setup_env)
+
+
+def _AddActionStep(actions_dict, inputs, outputs, description, command):
+ """Merge action into an existing list of actions.
+
+ Care must be taken so that actions which have overlapping inputs either don't
+ get assigned to the same input, or get collapsed into one.
+
+ Arguments:
+ actions_dict: dictionary keyed on input name, which maps to a list of
+ dicts describing the actions attached to that input file.
+ inputs: list of inputs
+ outputs: list of outputs
+ description: description of the action
+ command: command line to execute
+ """
+ # Require there to be at least one input (call sites will ensure this).
+ assert inputs
+
+ action = {
+ 'inputs': inputs,
+ 'outputs': outputs,
+ 'description': description,
+ 'command': command,
+ }
+
+ # Pick where to stick this action.
+ # While less than optimal in terms of build time, attach them to the first
+ # input for now.
+ chosen_input = inputs[0]
+
+ # Add it there.
+ if chosen_input not in actions_dict:
+ actions_dict[chosen_input] = []
+ actions_dict[chosen_input].append(action)
+
+
+def _AddCustomBuildToolForMSVS(p, spec, primary_input,
+ inputs, outputs, description, cmd):
+ """Add a custom build tool to execute something.
+
+ Arguments:
+ p: the target project
+ spec: the target project dict
+ primary_input: input file to attach the build tool to
+ inputs: list of inputs
+ outputs: list of outputs
+ description: description of the action
+ cmd: command line to execute
+ """
+ inputs = _FixPaths(inputs)
+ outputs = _FixPaths(outputs)
+ tool = MSVSProject.Tool(
+ 'VCCustomBuildTool',
+ {'Description': description,
+ 'AdditionalDependencies': ';'.join(inputs),
+ 'Outputs': ';'.join(outputs),
+ 'CommandLine': cmd,
+ })
+ # Add to the properties of primary input for each config.
+ for config_name, c_data in spec['configurations'].items():
+ p.AddFileConfig(_FixPath(primary_input),
+ _ConfigFullName(config_name, c_data), tools=[tool])
+
+
+def _AddAccumulatedActionsToMSVS(p, spec, actions_dict):
+ """Add actions accumulated into an actions_dict, merging as needed.
+
+ Arguments:
+ p: the target project
+ spec: the target project dict
+ actions_dict: dictionary keyed on input name, which maps to a list of
+ dicts describing the actions attached to that input file.
+ """
+ for primary_input in actions_dict:
+ inputs = OrderedSet()
+ outputs = OrderedSet()
+ descriptions = []
+ commands = []
+ for action in actions_dict[primary_input]:
+ inputs.update(OrderedSet(action['inputs']))
+ outputs.update(OrderedSet(action['outputs']))
+ descriptions.append(action['description'])
+ commands.append(action['command'])
+ # Add the custom build step for one input file.
+ description = ', and also '.join(descriptions)
+ command = '\r\n'.join(commands)
+ _AddCustomBuildToolForMSVS(p, spec,
+ primary_input=primary_input,
+ inputs=inputs,
+ outputs=outputs,
+ description=description,
+ cmd=command)
+
+
+def _RuleExpandPath(path, input_file):
+ """Given the input file to which a rule applied, string substitute a path.
+
+ Arguments:
+ path: a path to string expand
+ input_file: the file to which the rule applied.
+ Returns:
+ The string substituted path.
+ """
+ path = path.replace('$(InputName)',
+ os.path.splitext(os.path.split(input_file)[1])[0])
+ path = path.replace('$(InputDir)', os.path.dirname(input_file))
+ path = path.replace('$(InputExt)',
+ os.path.splitext(os.path.split(input_file)[1])[1])
+ path = path.replace('$(InputFileName)', os.path.split(input_file)[1])
+ path = path.replace('$(InputPath)', input_file)
+ return path
+
+
+def _FindRuleTriggerFiles(rule, sources):
+ """Find the list of files which a particular rule applies to.
+
+ Arguments:
+ rule: the rule in question
+ sources: the set of all known source files for this project
+ Returns:
+ The list of sources that trigger a particular rule.
+ """
+ return rule.get('rule_sources', [])
+
+
+def _RuleInputsAndOutputs(rule, trigger_file):
+ """Find the inputs and outputs generated by a rule.
+
+ Arguments:
+ rule: the rule in question.
+ trigger_file: the main trigger for this rule.
+ Returns:
+ The pair of (inputs, outputs) involved in this rule.
+ """
+ raw_inputs = _FixPaths(rule.get('inputs', []))
+ raw_outputs = _FixPaths(rule.get('outputs', []))
+ inputs = OrderedSet()
+ outputs = OrderedSet()
+ inputs.add(trigger_file)
+ for i in raw_inputs:
+ inputs.add(_RuleExpandPath(i, trigger_file))
+ for o in raw_outputs:
+ outputs.add(_RuleExpandPath(o, trigger_file))
+ return (inputs, outputs)
+
+
+def _GenerateNativeRulesForMSVS(p, rules, output_dir, spec, options):
+ """Generate a native rules file.
+
+ Arguments:
+ p: the target project
+ rules: the set of rules to include
+ output_dir: the directory in which the project/gyp resides
+ spec: the project dict
+ options: global generator options
+ """
+ rules_filename = '%s%s.rules' % (spec['target_name'],
+ options.suffix)
+ rules_file = MSVSToolFile.Writer(os.path.join(output_dir, rules_filename),
+ spec['target_name'])
+ # Add each rule.
+ for r in rules:
+ rule_name = r['rule_name']
+ rule_ext = r['extension']
+ inputs = _FixPaths(r.get('inputs', []))
+ outputs = _FixPaths(r.get('outputs', []))
+ # Skip a rule with no action and no inputs.
+ if 'action' not in r and not r.get('rule_sources', []):
+ continue
+ cmd = _BuildCommandLineForRule(spec, r, has_input_path=True,
+ do_setup_env=True)
+ rules_file.AddCustomBuildRule(name=rule_name,
+ description=r.get('message', rule_name),
+ extensions=[rule_ext],
+ additional_dependencies=inputs,
+ outputs=outputs,
+ cmd=cmd)
+ # Write out rules file.
+ rules_file.WriteIfChanged()
+
+ # Add rules file to project.
+ p.AddToolFile(rules_filename)
+
+
+def _Cygwinify(path):
+ path = path.replace('$(OutDir)', '$(OutDirCygwin)')
+ path = path.replace('$(IntDir)', '$(IntDirCygwin)')
+ return path
+
+
+def _GenerateExternalRules(rules, output_dir, spec,
+ sources, options, actions_to_add):
+ """Generate an external makefile to do a set of rules.
+
+ Arguments:
+ rules: the list of rules to include
+ output_dir: path containing project and gyp files
+ spec: project specification data
+ sources: set of sources known
+ options: global generator options
+ actions_to_add: The list of actions we will add to.
+ """
+ filename = '%s_rules%s.mk' % (spec['target_name'], options.suffix)
+ mk_file = gyp.common.WriteOnDiff(os.path.join(output_dir, filename))
+ # Find cygwin style versions of some paths.
+ mk_file.write('OutDirCygwin:=$(shell cygpath -u "$(OutDir)")\n')
+ mk_file.write('IntDirCygwin:=$(shell cygpath -u "$(IntDir)")\n')
+ # Gather stuff needed to emit all: target.
+ all_inputs = OrderedSet()
+ all_outputs = OrderedSet()
+ all_output_dirs = OrderedSet()
+ first_outputs = []
+ for rule in rules:
+ trigger_files = _FindRuleTriggerFiles(rule, sources)
+ for tf in trigger_files:
+ inputs, outputs = _RuleInputsAndOutputs(rule, tf)
+ all_inputs.update(OrderedSet(inputs))
+ all_outputs.update(OrderedSet(outputs))
+ # Only use one target from each rule as the dependency for
+ # 'all' so we don't try to build each rule multiple times.
+ first_outputs.append(list(outputs)[0])
+ # Get the unique output directories for this rule.
+ output_dirs = [os.path.split(i)[0] for i in outputs]
+ for od in output_dirs:
+ all_output_dirs.add(od)
+ first_outputs_cyg = [_Cygwinify(i) for i in first_outputs]
+ # Write out all: target, including mkdir for each output directory.
+ mk_file.write('all: %s\n' % ' '.join(first_outputs_cyg))
+ for od in all_output_dirs:
+ if od:
+ mk_file.write('\tmkdir -p `cygpath -u "%s"`\n' % od)
+ mk_file.write('\n')
+ # Define how each output is generated.
+ for rule in rules:
+ trigger_files = _FindRuleTriggerFiles(rule, sources)
+ for tf in trigger_files:
+ # Get all the inputs and outputs for this rule for this trigger file.
+ inputs, outputs = _RuleInputsAndOutputs(rule, tf)
+ inputs = [_Cygwinify(i) for i in inputs]
+ outputs = [_Cygwinify(i) for i in outputs]
+ # Prepare the command line for this rule.
+ cmd = [_RuleExpandPath(c, tf) for c in rule['action']]
+ cmd = ['"%s"' % i for i in cmd]
+ cmd = ' '.join(cmd)
+ # Add it to the makefile.
+ mk_file.write('%s: %s\n' % (' '.join(outputs), ' '.join(inputs)))
+ mk_file.write('\t%s\n\n' % cmd)
+ # Close up the file.
+ mk_file.close()
+
+ # Add makefile to list of sources.
+ sources.add(filename)
+ # Add a build action to call makefile.
+ cmd = ['make',
+ 'OutDir=$(OutDir)',
+ 'IntDir=$(IntDir)',
+ '-j', '${NUMBER_OF_PROCESSORS_PLUS_1}',
+ '-f', filename]
+ cmd = _BuildCommandLineForRuleRaw(spec, cmd, True, False, True, True)
+ # Insert makefile as 0'th input, so it gets the action attached there,
+ # as this is easier to understand from in the IDE.
+ all_inputs = list(all_inputs)
+ all_inputs.insert(0, filename)
+ _AddActionStep(actions_to_add,
+ inputs=_FixPaths(all_inputs),
+ outputs=_FixPaths(all_outputs),
+ description='Running external rules for %s' %
+ spec['target_name'],
+ command=cmd)
+
+
+def _EscapeEnvironmentVariableExpansion(s):
+ """Escapes % characters.
+
+ Escapes any % characters so that Windows-style environment variable
+ expansions will leave them alone.
+ See http://connect.microsoft.com/VisualStudio/feedback/details/106127/cl-d-name-text-containing-percentage-characters-doesnt-compile
+ to understand why we have to do this.
+
+ Args:
+ s: The string to be escaped.
+
+ Returns:
+ The escaped string.
+ """
+ s = s.replace('%', '%%')
+ return s
+
+
+quote_replacer_regex = re.compile(r'(\\*)"')
+
+
+def _EscapeCommandLineArgumentForMSVS(s):
+ """Escapes a Windows command-line argument.
+
+ So that the Win32 CommandLineToArgv function will turn the escaped result back
+ into the original string.
+ See http://msdn.microsoft.com/en-us/library/17w5ykft.aspx
+ ("Parsing C++ Command-Line Arguments") to understand why we have to do
+ this.
+
+ Args:
+ s: the string to be escaped.
+ Returns:
+ the escaped string.
+ """
+
+ def _Replace(match):
+ # For a literal quote, CommandLineToArgv requires an odd number of
+ # backslashes preceding it, and it produces half as many literal backslashes
+ # (rounded down). So we need to produce 2n+1 backslashes.
+ return 2 * match.group(1) + '\\"'
+
+ # Escape all quotes so that they are interpreted literally.
+ s = quote_replacer_regex.sub(_Replace, s)
+ # Now add unescaped quotes so that any whitespace is interpreted literally.
+ s = '"' + s + '"'
+ return s
+
+
+delimiters_replacer_regex = re.compile(r'(\\*)([,;]+)')
+
+
+def _EscapeVCProjCommandLineArgListItem(s):
+ """Escapes command line arguments for MSVS.
+
+ The VCProj format stores string lists in a single string using commas and
+ semi-colons as separators, which must be quoted if they are to be
+ interpreted literally. However, command-line arguments may already have
+ quotes, and the VCProj parser is ignorant of the backslash escaping
+ convention used by CommandLineToArgv, so the command-line quotes and the
+ VCProj quotes may not be the same quotes. So to store a general
+ command-line argument in a VCProj list, we need to parse the existing
+ quoting according to VCProj's convention and quote any delimiters that are
+ not already quoted by that convention. The quotes that we add will also be
+ seen by CommandLineToArgv, so if backslashes precede them then we also have
+ to escape those backslashes according to the CommandLineToArgv
+ convention.
+
+ Args:
+ s: the string to be escaped.
+ Returns:
+ the escaped string.
+ """
+
+ def _Replace(match):
+ # For a non-literal quote, CommandLineToArgv requires an even number of
+ # backslashes preceding it, and it produces half as many literal
+ # backslashes. So we need to produce 2n backslashes.
+ return 2 * match.group(1) + '"' + match.group(2) + '"'
+
+ segments = s.split('"')
+ # The unquoted segments are at the even-numbered indices.
+ for i in range(0, len(segments), 2):
+ segments[i] = delimiters_replacer_regex.sub(_Replace, segments[i])
+ # Concatenate back into a single string
+ s = '"'.join(segments)
+ if len(segments) % 2 == 0:
+ # String ends while still quoted according to VCProj's convention. This
+ # means the delimiter and the next list item that follow this one in the
+ # .vcproj file will be misinterpreted as part of this item. There is nothing
+ # we can do about this. Adding an extra quote would correct the problem in
+ # the VCProj but cause the same problem on the final command-line. Moving
+ # the item to the end of the list does works, but that's only possible if
+ # there's only one such item. Let's just warn the user.
+ print(('Warning: MSVS may misinterpret the odd number of ' +
+ 'quotes in ' + s), file=sys.stderr)
+ return s
+
+
+def _EscapeCppDefineForMSVS(s):
+ """Escapes a CPP define so that it will reach the compiler unaltered."""
+ s = _EscapeEnvironmentVariableExpansion(s)
+ s = _EscapeCommandLineArgumentForMSVS(s)
+ s = _EscapeVCProjCommandLineArgListItem(s)
+ # cl.exe replaces literal # characters with = in preprocesor definitions for
+ # some reason. Octal-encode to work around that.
+ s = s.replace('#', '\\%03o' % ord('#'))
+ return s
+
+
+quote_replacer_regex2 = re.compile(r'(\\+)"')
+
+
+def _EscapeCommandLineArgumentForMSBuild(s):
+ """Escapes a Windows command-line argument for use by MSBuild."""
+
+ def _Replace(match):
+ return (len(match.group(1)) / 2 * 4) * '\\' + '\\"'
+
+ # Escape all quotes so that they are interpreted literally.
+ s = quote_replacer_regex2.sub(_Replace, s)
+ return s
+
+
+def _EscapeMSBuildSpecialCharacters(s):
+ escape_dictionary = {
+ '%': '%25',
+ '$': '%24',
+ '@': '%40',
+ "'": '%27',
+ ';': '%3B',
+ '?': '%3F',
+ '*': '%2A'
+ }
+ result = ''.join([escape_dictionary.get(c, c) for c in s])
+ return result
+
+
+def _EscapeCppDefineForMSBuild(s):
+ """Escapes a CPP define so that it will reach the compiler unaltered."""
+ s = _EscapeEnvironmentVariableExpansion(s)
+ s = _EscapeCommandLineArgumentForMSBuild(s)
+ s = _EscapeMSBuildSpecialCharacters(s)
+ # cl.exe replaces literal # characters with = in preprocesor definitions for
+ # some reason. Octal-encode to work around that.
+ s = s.replace('#', '\\%03o' % ord('#'))
+ return s
+
+
+def _GenerateRulesForMSVS(p, output_dir, options, spec,
+ sources, excluded_sources,
+ actions_to_add):
+ """Generate all the rules for a particular project.
+
+ Arguments:
+ p: the project
+ output_dir: directory to emit rules to
+ options: global options passed to the generator
+ spec: the specification for this project
+ sources: the set of all known source files in this project
+ excluded_sources: the set of sources excluded from normal processing
+ actions_to_add: deferred list of actions to add in
+ """
+ rules = spec.get('rules', [])
+ rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
+ rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
+
+ # Handle rules that use a native rules file.
+ if rules_native:
+ _GenerateNativeRulesForMSVS(p, rules_native, output_dir, spec, options)
+
+ # Handle external rules (non-native rules).
+ if rules_external:
+ _GenerateExternalRules(rules_external, output_dir, spec,
+ sources, options, actions_to_add)
+ _AdjustSourcesForRules(rules, sources, excluded_sources, False)
+
+
+def _AdjustSourcesForRules(rules, sources, excluded_sources, is_msbuild):
+ # Add outputs generated by each rule (if applicable).
+ for rule in rules:
+ # Add in the outputs from this rule.
+ trigger_files = _FindRuleTriggerFiles(rule, sources)
+ for trigger_file in trigger_files:
+ # Remove trigger_file from excluded_sources to let the rule be triggered
+ # (e.g. rule trigger ax_enums.idl is added to excluded_sources
+ # because it's also in an action's inputs in the same project)
+ excluded_sources.discard(_FixPath(trigger_file))
+ # Done if not processing outputs as sources.
+ if int(rule.get('process_outputs_as_sources', False)):
+ inputs, outputs = _RuleInputsAndOutputs(rule, trigger_file)
+ inputs = OrderedSet(_FixPaths(inputs))
+ outputs = OrderedSet(_FixPaths(outputs))
+ inputs.remove(_FixPath(trigger_file))
+ sources.update(inputs)
+ if not is_msbuild:
+ excluded_sources.update(inputs)
+ sources.update(outputs)
+
+
+def _FilterActionsFromExcluded(excluded_sources, actions_to_add):
+ """Take inputs with actions attached out of the list of exclusions.
+
+ Arguments:
+ excluded_sources: list of source files not to be built.
+ actions_to_add: dict of actions keyed on source file they're attached to.
+ Returns:
+ excluded_sources with files that have actions attached removed.
+ """
+ must_keep = OrderedSet(_FixPaths(actions_to_add.keys()))
+ return [s for s in excluded_sources if s not in must_keep]
+
+
+def _GetDefaultConfiguration(spec):
+ return spec['configurations'][spec['default_configuration']]
+
+
+def _GetGuidOfProject(proj_path, spec):
+ """Get the guid for the project.
+
+ Arguments:
+ proj_path: Path of the vcproj or vcxproj file to generate.
+ spec: The target dictionary containing the properties of the target.
+ Returns:
+ the guid.
+ Raises:
+ ValueError: if the specified GUID is invalid.
+ """
+ # Pluck out the default configuration.
+ default_config = _GetDefaultConfiguration(spec)
+ # Decide the guid of the project.
+ guid = default_config.get('msvs_guid')
+ if guid:
+ if VALID_MSVS_GUID_CHARS.match(guid) is None:
+ raise ValueError('Invalid MSVS guid: "%s". Must match regex: "%s".' %
+ (guid, VALID_MSVS_GUID_CHARS.pattern))
+ guid = '{%s}' % guid
+ guid = guid or MSVSNew.MakeGuid(proj_path)
+ return guid
+
+
+def _GetMsbuildToolsetOfProject(proj_path, spec, version):
+ """Get the platform toolset for the project.
+
+ Arguments:
+ proj_path: Path of the vcproj or vcxproj file to generate.
+ spec: The target dictionary containing the properties of the target.
+ version: The MSVSVersion object.
+ Returns:
+ the platform toolset string or None.
+ """
+ # Pluck out the default configuration.
+ default_config = _GetDefaultConfiguration(spec)
+ toolset = default_config.get('msbuild_toolset')
+ if not toolset and version.DefaultToolset():
+ toolset = version.DefaultToolset()
+ if spec['type'] == 'windows_driver':
+ toolset = 'WindowsKernelModeDriver10.0'
+ return toolset
+
+
+def _GenerateProject(project, options, version, generator_flags):
+ """Generates a vcproj file.
+
+ Arguments:
+ project: the MSVSProject object.
+ options: global generator options.
+ version: the MSVSVersion object.
+ generator_flags: dict of generator-specific flags.
+ Returns:
+ A list of source files that cannot be found on disk.
+ """
+ default_config = _GetDefaultConfiguration(project.spec)
+
+ # Skip emitting anything if told to with msvs_existing_vcproj option.
+ if default_config.get('msvs_existing_vcproj'):
+ return []
+
+ if version.UsesVcxproj():
+ return _GenerateMSBuildProject(project, options, version, generator_flags)
+ else:
+ return _GenerateMSVSProject(project, options, version, generator_flags)
+
+
+# TODO: Avoid code duplication with _ValidateSourcesForOSX in make.py.
+def _ValidateSourcesForMSVSProject(spec, version):
+ """Makes sure if duplicate basenames are not specified in the source list.
+
+ Arguments:
+ spec: The target dictionary containing the properties of the target.
+ version: The VisualStudioVersion object.
+ """
+ # This validation should not be applied to MSVC2010 and later.
+ assert not version.UsesVcxproj()
+
+ # TODO: Check if MSVC allows this for loadable_module targets.
+ if spec.get('type', None) not in ('static_library', 'shared_library'):
+ return
+ sources = spec.get('sources', [])
+ basenames = {}
+ for source in sources:
+ name, ext = os.path.splitext(source)
+ is_compiled_file = ext in [
+ '.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
+ if not is_compiled_file:
+ continue
+ basename = os.path.basename(name) # Don't include extension.
+ basenames.setdefault(basename, []).append(source)
+
+ error = ''
+ for basename, files in basenames.items():
+ if len(files) > 1:
+ error += ' %s: %s\n' % (basename, ' '.join(files))
+
+ if error:
+ print('static library %s has several files with the same basename:\n' %
+ spec['target_name'] + error + 'MSVC08 cannot handle that.')
+ raise GypError('Duplicate basenames in sources section, see list above')
+
+
+def _GenerateMSVSProject(project, options, version, generator_flags):
+ """Generates a .vcproj file. It may create .rules and .user files too.
+
+ Arguments:
+ project: The project object we will generate the file for.
+ options: Global options passed to the generator.
+ version: The VisualStudioVersion object.
+ generator_flags: dict of generator-specific flags.
+ """
+ spec = project.spec
+ gyp.common.EnsureDirExists(project.path)
+
+ platforms = _GetUniquePlatforms(spec)
+ p = MSVSProject.Writer(project.path, version, spec['target_name'],
+ project.guid, platforms)
+
+ # Get directory project file is in.
+ project_dir = os.path.split(project.path)[0]
+ gyp_path = _NormalizedSource(project.build_file)
+ relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
+
+ config_type = _GetMSVSConfigurationType(spec, project.build_file)
+ for config_name, config in spec['configurations'].items():
+ _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config)
+
+ # MSVC08 and prior version cannot handle duplicate basenames in the same
+ # target.
+ # TODO: Take excluded sources into consideration if possible.
+ _ValidateSourcesForMSVSProject(spec, version)
+
+ # Prepare list of sources and excluded sources.
+ gyp_file = os.path.split(project.build_file)[1]
+ sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
+ gyp_file)
+
+ # Add rules.
+ actions_to_add = {}
+ _GenerateRulesForMSVS(p, project_dir, options, spec,
+ sources, excluded_sources,
+ actions_to_add)
+ list_excluded = generator_flags.get('msvs_list_excluded_files', True)
+ sources, excluded_sources, excluded_idl = (
+ _AdjustSourcesAndConvertToFilterHierarchy(spec, options, project_dir,
+ sources, excluded_sources,
+ list_excluded, version))
+
+ # Add in files.
+ missing_sources = _VerifySourcesExist(sources, project_dir)
+ p.AddFiles(sources)
+
+ _AddToolFilesToMSVS(p, spec)
+ _HandlePreCompiledHeaders(p, sources, spec)
+ _AddActions(actions_to_add, spec, relative_path_of_gyp_file)
+ _AddCopies(actions_to_add, spec)
+ _WriteMSVSUserFile(project.path, version, spec)
+
+ # NOTE: this stanza must appear after all actions have been decided.
+ # Don't excluded sources with actions attached, or they won't run.
+ excluded_sources = _FilterActionsFromExcluded(
+ excluded_sources, actions_to_add)
+ _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
+ list_excluded)
+ _AddAccumulatedActionsToMSVS(p, spec, actions_to_add)
+
+ # Write it out.
+ p.WriteIfChanged()
+
+ return missing_sources
+
+
+def _GetUniquePlatforms(spec):
+ """Returns the list of unique platforms for this spec, e.g ['win32', ...].
+
+ Arguments:
+ spec: The target dictionary containing the properties of the target.
+ Returns:
+ The MSVSUserFile object created.
+ """
+ # Gather list of unique platforms.
+ platforms = OrderedSet()
+ for configuration in spec['configurations']:
+ platforms.add(_ConfigPlatform(spec['configurations'][configuration]))
+ platforms = list(platforms)
+ return platforms
+
+
+def _CreateMSVSUserFile(proj_path, version, spec):
+ """Generates a .user file for the user running this Gyp program.
+
+ Arguments:
+ proj_path: The path of the project file being created. The .user file
+ shares the same path (with an appropriate suffix).
+ version: The VisualStudioVersion object.
+ spec: The target dictionary containing the properties of the target.
+ Returns:
+ The MSVSUserFile object created.
+ """
+ (domain, username) = _GetDomainAndUserName()
+ vcuser_filename = '.'.join([proj_path, domain, username, 'user'])
+ user_file = MSVSUserFile.Writer(vcuser_filename, version,
+ spec['target_name'])
+ return user_file
+
+
+def _GetMSVSConfigurationType(spec, build_file):
+ """Returns the configuration type for this project.
+
+ It's a number defined by Microsoft. May raise an exception.
+
+ Args:
+ spec: The target dictionary containing the properties of the target.
+ build_file: The path of the gyp file.
+ Returns:
+ An integer, the configuration type.
+ """
+ try:
+ config_type = {
+ 'executable': '1', # .exe
+ 'shared_library': '2', # .dll
+ 'loadable_module': '2', # .dll
+ 'static_library': '4', # .lib
+ 'windows_driver': '5', # .sys
+ 'none': '10', # Utility type
+ }[spec['type']]
+ except KeyError:
+ if spec.get('type'):
+ raise GypError('Target type %s is not a valid target type for '
+ 'target %s in %s.' %
+ (spec['type'], spec['target_name'], build_file))
+ else:
+ raise GypError('Missing type field for target %s in %s.' %
+ (spec['target_name'], build_file))
+ return config_type
+
+
+def _AddConfigurationToMSVSProject(p, spec, config_type, config_name, config):
+ """Adds a configuration to the MSVS project.
+
+ Many settings in a vcproj file are specific to a configuration. This
+ function the main part of the vcproj file that's configuration specific.
+
+ Arguments:
+ p: The target project being generated.
+ spec: The target dictionary containing the properties of the target.
+ config_type: The configuration type, a number as defined by Microsoft.
+ config_name: The name of the configuration.
+ config: The dictionary that defines the special processing to be done
+ for this configuration.
+ """
+ # Get the information for this configuration
+ include_dirs, midl_include_dirs, resource_include_dirs = \
+ _GetIncludeDirs(config)
+ libraries = _GetLibraries(spec)
+ library_dirs = _GetLibraryDirs(config)
+ out_file, vc_tool, _ = _GetOutputFilePathAndTool(spec, msbuild=False)
+ defines = _GetDefines(config)
+ defines = [_EscapeCppDefineForMSVS(d) for d in defines]
+ disabled_warnings = _GetDisabledWarnings(config)
+ prebuild = config.get('msvs_prebuild')
+ postbuild = config.get('msvs_postbuild')
+ def_file = _GetModuleDefinition(spec)
+ precompiled_header = config.get('msvs_precompiled_header')
+
+ # Prepare the list of tools as a dictionary.
+ tools = dict()
+ # Add in user specified msvs_settings.
+ msvs_settings = config.get('msvs_settings', {})
+ MSVSSettings.ValidateMSVSSettings(msvs_settings)
+
+ # Prevent default library inheritance from the environment.
+ _ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', ['$(NOINHERIT)'])
+
+ for tool in msvs_settings:
+ settings = config['msvs_settings'][tool]
+ for setting in settings:
+ _ToolAppend(tools, tool, setting, settings[setting])
+ # Add the information to the appropriate tool
+ _ToolAppend(tools, 'VCCLCompilerTool',
+ 'AdditionalIncludeDirectories', include_dirs)
+ _ToolAppend(tools, 'VCMIDLTool',
+ 'AdditionalIncludeDirectories', midl_include_dirs)
+ _ToolAppend(tools, 'VCResourceCompilerTool',
+ 'AdditionalIncludeDirectories', resource_include_dirs)
+ # Add in libraries.
+ _ToolAppend(tools, 'VCLinkerTool', 'AdditionalDependencies', libraries)
+ _ToolAppend(tools, 'VCLinkerTool', 'AdditionalLibraryDirectories',
+ library_dirs)
+ if out_file:
+ _ToolAppend(tools, vc_tool, 'OutputFile', out_file, only_if_unset=True)
+ # Add defines.
+ _ToolAppend(tools, 'VCCLCompilerTool', 'PreprocessorDefinitions', defines)
+ _ToolAppend(tools, 'VCResourceCompilerTool', 'PreprocessorDefinitions',
+ defines)
+ # Change program database directory to prevent collisions.
+ _ToolAppend(tools, 'VCCLCompilerTool', 'ProgramDataBaseFileName',
+ '$(IntDir)$(ProjectName)\\vc80.pdb', only_if_unset=True)
+ # Add disabled warnings.
+ _ToolAppend(tools, 'VCCLCompilerTool',
+ 'DisableSpecificWarnings', disabled_warnings)
+ # Add Pre-build.
+ _ToolAppend(tools, 'VCPreBuildEventTool', 'CommandLine', prebuild)
+ # Add Post-build.
+ _ToolAppend(tools, 'VCPostBuildEventTool', 'CommandLine', postbuild)
+ # Turn on precompiled headers if appropriate.
+ if precompiled_header:
+ precompiled_header = os.path.split(precompiled_header)[1]
+ _ToolAppend(tools, 'VCCLCompilerTool', 'UsePrecompiledHeader', '2')
+ _ToolAppend(tools, 'VCCLCompilerTool',
+ 'PrecompiledHeaderThrough', precompiled_header)
+ _ToolAppend(tools, 'VCCLCompilerTool',
+ 'ForcedIncludeFiles', precompiled_header)
+ # Loadable modules don't generate import libraries;
+ # tell dependent projects to not expect one.
+ if spec['type'] == 'loadable_module':
+ _ToolAppend(tools, 'VCLinkerTool', 'IgnoreImportLibrary', 'true')
+ # Set the module definition file if any.
+ if def_file:
+ _ToolAppend(tools, 'VCLinkerTool', 'ModuleDefinitionFile', def_file)
+
+ _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name)
+
+
+def _GetIncludeDirs(config):
+ """Returns the list of directories to be used for #include directives.
+
+ Arguments:
+ config: The dictionary that defines the special processing to be done
+ for this configuration.
+ Returns:
+ The list of directory paths.
+ """
+ # TODO(bradnelson): include_dirs should really be flexible enough not to
+ # require this sort of thing.
+ include_dirs = (
+ config.get('include_dirs', []) +
+ config.get('msvs_system_include_dirs', []))
+ midl_include_dirs = (
+ config.get('midl_include_dirs', []) +
+ config.get('msvs_system_include_dirs', []))
+ resource_include_dirs = config.get('resource_include_dirs', include_dirs)
+ include_dirs = _FixPaths(include_dirs)
+ midl_include_dirs = _FixPaths(midl_include_dirs)
+ resource_include_dirs = _FixPaths(resource_include_dirs)
+ return include_dirs, midl_include_dirs, resource_include_dirs
+
+
+def _GetLibraryDirs(config):
+ """Returns the list of directories to be used for library search paths.
+
+ Arguments:
+ config: The dictionary that defines the special processing to be done
+ for this configuration.
+ Returns:
+ The list of directory paths.
+ """
+
+ library_dirs = config.get('library_dirs', [])
+ library_dirs = _FixPaths(library_dirs)
+ return library_dirs
+
+
+def _GetLibraries(spec):
+ """Returns the list of libraries for this configuration.
+
+ Arguments:
+ spec: The target dictionary containing the properties of the target.
+ Returns:
+ The list of directory paths.
+ """
+ libraries = spec.get('libraries', [])
+ # Strip out -l, as it is not used on windows (but is needed so we can pass
+ # in libraries that are assumed to be in the default library path).
+ # Also remove duplicate entries, leaving only the last duplicate, while
+ # preserving order.
+ found = OrderedSet()
+ unique_libraries_list = []
+ for entry in reversed(libraries):
+ library = re.sub(r'^\-l', '', entry)
+ if not os.path.splitext(library)[1]:
+ library += '.lib'
+ if library not in found:
+ found.add(library)
+ unique_libraries_list.append(library)
+ unique_libraries_list.reverse()
+ return unique_libraries_list
+
+
+def _GetOutputFilePathAndTool(spec, msbuild):
+ """Returns the path and tool to use for this target.
+
+ Figures out the path of the file this spec will create and the name of
+ the VC tool that will create it.
+
+ Arguments:
+ spec: The target dictionary containing the properties of the target.
+ Returns:
+ A triple of (file path, name of the vc tool, name of the msbuild tool)
+ """
+ # Select a name for the output file.
+ out_file = ''
+ vc_tool = ''
+ msbuild_tool = ''
+ output_file_map = {
+ 'executable': ('VCLinkerTool', 'Link', '$(OutDir)', '.exe'),
+ 'shared_library': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
+ 'loadable_module': ('VCLinkerTool', 'Link', '$(OutDir)', '.dll'),
+ 'windows_driver': ('VCLinkerTool', 'Link', '$(OutDir)', '.sys'),
+ 'static_library': ('VCLibrarianTool', 'Lib', '$(OutDir)lib\\', '.lib'),
+ }
+ output_file_props = output_file_map.get(spec['type'])
+ if output_file_props and int(spec.get('msvs_auto_output_file', 1)):
+ vc_tool, msbuild_tool, out_dir, suffix = output_file_props
+ if spec.get('standalone_static_library', 0):
+ out_dir = '$(OutDir)'
+ out_dir = spec.get('product_dir', out_dir)
+ product_extension = spec.get('product_extension')
+ if product_extension:
+ suffix = '.' + product_extension
+ elif msbuild:
+ suffix = '$(TargetExt)'
+ prefix = spec.get('product_prefix', '')
+ product_name = spec.get('product_name', '$(ProjectName)')
+ out_file = ntpath.join(out_dir, prefix + product_name + suffix)
+ return out_file, vc_tool, msbuild_tool
+
+
+def _GetOutputTargetExt(spec):
+ """Returns the extension for this target, including the dot
+
+ If product_extension is specified, set target_extension to this to avoid
+ MSB8012, returns None otherwise. Ignores any target_extension settings in
+ the input files.
+
+ Arguments:
+ spec: The target dictionary containing the properties of the target.
+ Returns:
+ A string with the extension, or None
+ """
+ target_extension = spec.get('product_extension')
+ if target_extension:
+ return '.' + target_extension
+ return None
+
+
+def _GetDefines(config):
+ """Returns the list of preprocessor definitions for this configuation.
+
+ Arguments:
+ config: The dictionary that defines the special processing to be done
+ for this configuration.
+ Returns:
+ The list of preprocessor definitions.
+ """
+ defines = []
+ for d in config.get('defines', []):
+ if type(d) == list:
+ fd = '='.join([str(dpart) for dpart in d])
+ else:
+ fd = str(d)
+ defines.append(fd)
+ return defines
+
+
+def _GetDisabledWarnings(config):
+ return [str(i) for i in config.get('msvs_disabled_warnings', [])]
+
+
+def _GetModuleDefinition(spec):
+ def_file = ''
+ if spec['type'] in ['shared_library', 'loadable_module', 'executable',
+ 'windows_driver']:
+ def_files = [s for s in spec.get('sources', []) if s.endswith('.def')]
+ if len(def_files) == 1:
+ def_file = _FixPath(def_files[0])
+ elif def_files:
+ raise ValueError(
+ 'Multiple module definition files in one target, target %s lists '
+ 'multiple .def files: %s' % (
+ spec['target_name'], ' '.join(def_files)))
+ return def_file
+
+
+def _ConvertToolsToExpectedForm(tools):
+ """Convert tools to a form expected by Visual Studio.
+
+ Arguments:
+ tools: A dictionary of settings; the tool name is the key.
+ Returns:
+ A list of Tool objects.
+ """
+ tool_list = []
+ for tool, settings in tools.items():
+ # Collapse settings with lists.
+ settings_fixed = {}
+ for setting, value in settings.items():
+ if type(value) == list:
+ if ((tool == 'VCLinkerTool' and
+ setting == 'AdditionalDependencies') or
+ setting == 'AdditionalOptions'):
+ settings_fixed[setting] = ' '.join(value)
+ else:
+ settings_fixed[setting] = ';'.join(value)
+ else:
+ settings_fixed[setting] = value
+ # Add in this tool.
+ tool_list.append(MSVSProject.Tool(tool, settings_fixed))
+ return tool_list
+
+
+def _AddConfigurationToMSVS(p, spec, tools, config, config_type, config_name):
+ """Add to the project file the configuration specified by config.
+
+ Arguments:
+ p: The target project being generated.
+ spec: the target project dict.
+ tools: A dictionary of settings; the tool name is the key.
+ config: The dictionary that defines the special processing to be done
+ for this configuration.
+ config_type: The configuration type, a number as defined by Microsoft.
+ config_name: The name of the configuration.
+ """
+ attributes = _GetMSVSAttributes(spec, config, config_type)
+ # Add in this configuration.
+ tool_list = _ConvertToolsToExpectedForm(tools)
+ p.AddConfig(_ConfigFullName(config_name, config),
+ attrs=attributes, tools=tool_list)
+
+
+def _GetMSVSAttributes(spec, config, config_type):
+ # Prepare configuration attributes.
+ prepared_attrs = {}
+ source_attrs = config.get('msvs_configuration_attributes', {})
+ for a in source_attrs:
+ prepared_attrs[a] = source_attrs[a]
+ # Add props files.
+ vsprops_dirs = config.get('msvs_props', [])
+ vsprops_dirs = _FixPaths(vsprops_dirs)
+ if vsprops_dirs:
+ prepared_attrs['InheritedPropertySheets'] = ';'.join(vsprops_dirs)
+ # Set configuration type.
+ prepared_attrs['ConfigurationType'] = config_type
+ output_dir = prepared_attrs.get('OutputDirectory',
+ '$(SolutionDir)$(ConfigurationName)')
+ prepared_attrs['OutputDirectory'] = _FixPath(output_dir) + '\\'
+ if 'IntermediateDirectory' not in prepared_attrs:
+ intermediate = '$(ConfigurationName)\\obj\\$(ProjectName)'
+ prepared_attrs['IntermediateDirectory'] = _FixPath(intermediate) + '\\'
+ else:
+ intermediate = _FixPath(prepared_attrs['IntermediateDirectory']) + '\\'
+ intermediate = MSVSSettings.FixVCMacroSlashes(intermediate)
+ prepared_attrs['IntermediateDirectory'] = intermediate
+ return prepared_attrs
+
+
+def _AddNormalizedSources(sources_set, sources_array):
+ sources_set.update(_NormalizedSource(s) for s in sources_array)
+
+
+def _PrepareListOfSources(spec, generator_flags, gyp_file):
+ """Prepare list of sources and excluded sources.
+
+ Besides the sources specified directly in the spec, adds the gyp file so
+ that a change to it will cause a re-compile. Also adds appropriate sources
+ for actions and copies. Assumes later stage will un-exclude files which
+ have custom build steps attached.
+
+ Arguments:
+ spec: The target dictionary containing the properties of the target.
+ gyp_file: The name of the gyp file.
+ Returns:
+ A pair of (list of sources, list of excluded sources).
+ The sources will be relative to the gyp file.
+ """
+ sources = OrderedSet()
+ _AddNormalizedSources(sources, spec.get('sources', []))
+ excluded_sources = OrderedSet()
+ # Add in the gyp file.
+ if not generator_flags.get('standalone'):
+ sources.add(gyp_file)
+
+ # Add in 'action' inputs and outputs.
+ for a in spec.get('actions', []):
+ inputs = a['inputs']
+ inputs = [_NormalizedSource(i) for i in inputs]
+ # Add all inputs to sources and excluded sources.
+ inputs = OrderedSet(inputs)
+ sources.update(inputs)
+ if not spec.get('msvs_external_builder'):
+ excluded_sources.update(inputs)
+ if int(a.get('process_outputs_as_sources', False)):
+ _AddNormalizedSources(sources, a.get('outputs', []))
+ # Add in 'copies' inputs and outputs.
+ for cpy in spec.get('copies', []):
+ _AddNormalizedSources(sources, cpy.get('files', []))
+ return (sources, excluded_sources)
+
+
+def _AdjustSourcesAndConvertToFilterHierarchy(
+ spec, options, gyp_dir, sources, excluded_sources, list_excluded, version):
+ """Adjusts the list of sources and excluded sources.
+
+ Also converts the sets to lists.
+
+ Arguments:
+ spec: The target dictionary containing the properties of the target.
+ options: Global generator options.
+ gyp_dir: The path to the gyp file being processed.
+ sources: A set of sources to be included for this project.
+ excluded_sources: A set of sources to be excluded for this project.
+ version: A MSVSVersion object.
+ Returns:
+ A trio of (list of sources, list of excluded sources,
+ path of excluded IDL file)
+ """
+ # Exclude excluded sources coming into the generator.
+ excluded_sources.update(OrderedSet(spec.get('sources_excluded', [])))
+ # Add excluded sources into sources for good measure.
+ sources.update(excluded_sources)
+ # Convert to proper windows form.
+ # NOTE: sources goes from being a set to a list here.
+ # NOTE: excluded_sources goes from being a set to a list here.
+ sources = _FixPaths(sources)
+ # Convert to proper windows form.
+ excluded_sources = _FixPaths(excluded_sources)
+
+ excluded_idl = _IdlFilesHandledNonNatively(spec, sources)
+
+ precompiled_related = _GetPrecompileRelatedFiles(spec)
+ # Find the excluded ones, minus the precompiled header related ones.
+ fully_excluded = [i for i in excluded_sources if i not in precompiled_related]
+
+ # Convert to folders and the right slashes.
+ sources = [i.split('\\') for i in sources]
+ sources = _ConvertSourcesToFilterHierarchy(sources, excluded=fully_excluded,
+ list_excluded=list_excluded,
+ msvs_version=version)
+
+ # Prune filters with a single child to flatten ugly directory structures
+ # such as ../../src/modules/module1 etc.
+ if version.UsesVcxproj():
+ while all([isinstance(s, MSVSProject.Filter) for s in sources]) \
+ and len(set([s.name for s in sources])) == 1:
+ assert all([len(s.contents) == 1 for s in sources])
+ sources = [s.contents[0] for s in sources]
+ else:
+ while len(sources) == 1 and isinstance(sources[0], MSVSProject.Filter):
+ sources = sources[0].contents
+
+ return sources, excluded_sources, excluded_idl
+
+
+def _IdlFilesHandledNonNatively(spec, sources):
+ # If any non-native rules use 'idl' as an extension exclude idl files.
+ # Gather a list here to use later.
+ using_idl = False
+ for rule in spec.get('rules', []):
+ if rule['extension'] == 'idl' and int(rule.get('msvs_external_rule', 0)):
+ using_idl = True
+ break
+ if using_idl:
+ excluded_idl = [i for i in sources if i.endswith('.idl')]
+ else:
+ excluded_idl = []
+ return excluded_idl
+
+
+def _GetPrecompileRelatedFiles(spec):
+ # Gather a list of precompiled header related sources.
+ precompiled_related = []
+ for _, config in spec['configurations'].items():
+ for k in precomp_keys:
+ f = config.get(k)
+ if f:
+ precompiled_related.append(_FixPath(f))
+ return precompiled_related
+
+
+def _ExcludeFilesFromBeingBuilt(p, spec, excluded_sources, excluded_idl,
+ list_excluded):
+ exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
+ for file_name, excluded_configs in exclusions.items():
+ if (not list_excluded and
+ len(excluded_configs) == len(spec['configurations'])):
+ # If we're not listing excluded files, then they won't appear in the
+ # project, so don't try to configure them to be excluded.
+ pass
+ else:
+ for config_name, config in excluded_configs:
+ p.AddFileConfig(file_name, _ConfigFullName(config_name, config),
+ {'ExcludedFromBuild': 'true'})
+
+
+def _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl):
+ exclusions = {}
+ # Exclude excluded sources from being built.
+ for f in excluded_sources:
+ excluded_configs = []
+ for config_name, config in spec['configurations'].items():
+ precomped = [_FixPath(config.get(i, '')) for i in precomp_keys]
+ # Don't do this for ones that are precompiled header related.
+ if f not in precomped:
+ excluded_configs.append((config_name, config))
+ exclusions[f] = excluded_configs
+ # If any non-native rules use 'idl' as an extension exclude idl files.
+ # Exclude them now.
+ for f in excluded_idl:
+ excluded_configs = []
+ for config_name, config in spec['configurations'].items():
+ excluded_configs.append((config_name, config))
+ exclusions[f] = excluded_configs
+ return exclusions
+
+
+def _AddToolFilesToMSVS(p, spec):
+ # Add in tool files (rules).
+ tool_files = OrderedSet()
+ for _, config in spec['configurations'].items():
+ for f in config.get('msvs_tool_files', []):
+ tool_files.add(f)
+ for f in tool_files:
+ p.AddToolFile(f)
+
+
+def _HandlePreCompiledHeaders(p, sources, spec):
+ # Pre-compiled header source stubs need a different compiler flag
+ # (generate precompiled header) and any source file not of the same
+ # kind (i.e. C vs. C++) as the precompiled header source stub needs
+ # to have use of precompiled headers disabled.
+ extensions_excluded_from_precompile = []
+ for config_name, config in spec['configurations'].items():
+ source = config.get('msvs_precompiled_source')
+ if source:
+ source = _FixPath(source)
+ # UsePrecompiledHeader=1 for if using precompiled headers.
+ tool = MSVSProject.Tool('VCCLCompilerTool',
+ {'UsePrecompiledHeader': '1'})
+ p.AddFileConfig(source, _ConfigFullName(config_name, config),
+ {}, tools=[tool])
+ basename, extension = os.path.splitext(source)
+ if extension == '.c':
+ extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
+ else:
+ extensions_excluded_from_precompile = ['.c']
+ def DisableForSourceTree(source_tree):
+ for source in source_tree:
+ if isinstance(source, MSVSProject.Filter):
+ DisableForSourceTree(source.contents)
+ else:
+ basename, extension = os.path.splitext(source)
+ if extension in extensions_excluded_from_precompile:
+ for config_name, config in spec['configurations'].items():
+ tool = MSVSProject.Tool('VCCLCompilerTool',
+ {'UsePrecompiledHeader': '0',
+ 'ForcedIncludeFiles': '$(NOINHERIT)'})
+ p.AddFileConfig(_FixPath(source),
+ _ConfigFullName(config_name, config),
+ {}, tools=[tool])
+ # Do nothing if there was no precompiled source.
+ if extensions_excluded_from_precompile:
+ DisableForSourceTree(sources)
+
+
+def _AddActions(actions_to_add, spec, relative_path_of_gyp_file):
+ # Add actions.
+ actions = spec.get('actions', [])
+ # Don't setup_env every time. When all the actions are run together in one
+ # batch file in VS, the PATH will grow too long.
+ # Membership in this set means that the cygwin environment has been set up,
+ # and does not need to be set up again.
+ have_setup_env = set()
+ for a in actions:
+ # Attach actions to the gyp file if nothing else is there.
+ inputs = a.get('inputs') or [relative_path_of_gyp_file]
+ attached_to = inputs[0]
+ need_setup_env = attached_to not in have_setup_env
+ cmd = _BuildCommandLineForRule(spec, a, has_input_path=False,
+ do_setup_env=need_setup_env)
+ have_setup_env.add(attached_to)
+ # Add the action.
+ _AddActionStep(actions_to_add,
+ inputs=inputs,
+ outputs=a.get('outputs', []),
+ description=a.get('message', a['action_name']),
+ command=cmd)
+
+
+def _WriteMSVSUserFile(project_path, version, spec):
+ # Add run_as and test targets.
+ if 'run_as' in spec:
+ run_as = spec['run_as']
+ action = run_as.get('action', [])
+ environment = run_as.get('environment', [])
+ working_directory = run_as.get('working_directory', '.')
+ elif int(spec.get('test', 0)):
+ action = ['$(TargetPath)', '--gtest_print_time']
+ environment = []
+ working_directory = '.'
+ else:
+ return # Nothing to add
+ # Write out the user file.
+ user_file = _CreateMSVSUserFile(project_path, version, spec)
+ for config_name, c_data in spec['configurations'].items():
+ user_file.AddDebugSettings(_ConfigFullName(config_name, c_data),
+ action, environment, working_directory)
+ user_file.WriteIfChanged()
+
+
+def _AddCopies(actions_to_add, spec):
+ copies = _GetCopies(spec)
+ for inputs, outputs, cmd, description in copies:
+ _AddActionStep(actions_to_add, inputs=inputs, outputs=outputs,
+ description=description, command=cmd)
+
+
+def _GetCopies(spec):
+ copies = []
+ # Add copies.
+ for cpy in spec.get('copies', []):
+ for src in cpy.get('files', []):
+ dst = os.path.join(cpy['destination'], os.path.basename(src))
+ # _AddCustomBuildToolForMSVS() will call _FixPath() on the inputs and
+ # outputs, so do the same for our generated command line.
+ if src.endswith('/'):
+ src_bare = src[:-1]
+ base_dir = posixpath.split(src_bare)[0]
+ outer_dir = posixpath.split(src_bare)[1]
+ fixed_dst = _FixPath(dst)
+ full_dst = '"%s\\%s\\"' % (fixed_dst, outer_dir)
+ cmd = 'mkdir %s 2>nul & cd "%s" && xcopy /e /f /y "%s" %s' % (
+ full_dst, _FixPath(base_dir), outer_dir, full_dst)
+ copies.append(([src], ['dummy_copies', dst], cmd,
+ 'Copying %s to %s' % (src, fixed_dst)))
+ else:
+ fix_dst = _FixPath(cpy['destination'])
+ cmd = 'mkdir "%s" 2>nul & set ERRORLEVEL=0 & copy /Y "%s" "%s"' % (
+ fix_dst, _FixPath(src), _FixPath(dst))
+ copies.append(([src], [dst], cmd, 'Copying %s to %s' % (src, fix_dst)))
+ return copies
+
+
+def _GetPathDict(root, path):
+ # |path| will eventually be empty (in the recursive calls) if it was initially
+ # relative; otherwise it will eventually end up as '\', 'D:\', etc.
+ if not path or path.endswith(os.sep):
+ return root
+ parent, folder = os.path.split(path)
+ parent_dict = _GetPathDict(root, parent)
+ if folder not in parent_dict:
+ parent_dict[folder] = dict()
+ return parent_dict[folder]
+
+
+def _DictsToFolders(base_path, bucket, flat):
+ # Convert to folders recursively.
+ children = []
+ for folder, contents in bucket.items():
+ if type(contents) == dict:
+ folder_children = _DictsToFolders(os.path.join(base_path, folder),
+ contents, flat)
+ if flat:
+ children += folder_children
+ else:
+ folder_children = MSVSNew.MSVSFolder(os.path.join(base_path, folder),
+ name='(' + folder + ')',
+ entries=folder_children)
+ children.append(folder_children)
+ else:
+ children.append(contents)
+ return children
+
+
+def _CollapseSingles(parent, node):
+ # Recursively explorer the tree of dicts looking for projects which are
+ # the sole item in a folder which has the same name as the project. Bring
+ # such projects up one level.
+ if (type(node) == dict and
+ len(node) == 1 and
+ next(iter(node)) == parent + '.vcproj'):
+ return node[next(iter(node))]
+ if type(node) != dict:
+ return node
+ for child in node:
+ node[child] = _CollapseSingles(child, node[child])
+ return node
+
+
+def _GatherSolutionFolders(sln_projects, project_objects, flat):
+ root = {}
+ # Convert into a tree of dicts on path.
+ for p in sln_projects:
+ gyp_file, target = gyp.common.ParseQualifiedTarget(p)[0:2]
+ gyp_dir = os.path.dirname(gyp_file)
+ path_dict = _GetPathDict(root, gyp_dir)
+ path_dict[target + '.vcproj'] = project_objects[p]
+ # Walk down from the top until we hit a folder that has more than one entry.
+ # In practice, this strips the top-level "src/" dir from the hierarchy in
+ # the solution.
+ while len(root) == 1 and type(root[next(iter(root))]) == dict:
+ root = root[next(iter(root))]
+ # Collapse singles.
+ root = _CollapseSingles('', root)
+ # Merge buckets until everything is a root entry.
+ return _DictsToFolders('', root, flat)
+
+
+def _GetPathOfProject(qualified_target, spec, options, msvs_version):
+ default_config = _GetDefaultConfiguration(spec)
+ proj_filename = default_config.get('msvs_existing_vcproj')
+ if not proj_filename:
+ proj_filename = (spec['target_name'] + options.suffix +
+ msvs_version.ProjectExtension())
+
+ build_file = gyp.common.BuildFile(qualified_target)
+ proj_path = os.path.join(os.path.dirname(build_file), proj_filename)
+ fix_prefix = None
+ if options.generator_output:
+ project_dir_path = os.path.dirname(os.path.abspath(proj_path))
+ proj_path = os.path.join(options.generator_output, proj_path)
+ fix_prefix = gyp.common.RelativePath(project_dir_path,
+ os.path.dirname(proj_path))
+ return proj_path, fix_prefix
+
+
+def _GetPlatformOverridesOfProject(spec):
+ # Prepare a dict indicating which project configurations are used for which
+ # solution configurations for this target.
+ config_platform_overrides = {}
+ for config_name, c in spec['configurations'].items():
+ config_fullname = _ConfigFullName(config_name, c)
+ platform = c.get('msvs_target_platform', _ConfigPlatform(c))
+ fixed_config_fullname = '%s|%s' % (
+ _ConfigBaseName(config_name, _ConfigPlatform(c)), platform)
+ config_platform_overrides[config_fullname] = fixed_config_fullname
+ return config_platform_overrides
+
+
+def _CreateProjectObjects(target_list, target_dicts, options, msvs_version):
+ """Create a MSVSProject object for the targets found in target list.
+
+ Arguments:
+ target_list: the list of targets to generate project objects for.
+ target_dicts: the dictionary of specifications.
+ options: global generator options.
+ msvs_version: the MSVSVersion object.
+ Returns:
+ A set of created projects, keyed by target.
+ """
+ global fixpath_prefix
+ # Generate each project.
+ projects = {}
+ for qualified_target in target_list:
+ spec = target_dicts[qualified_target]
+ if spec['toolset'] != 'target':
+ raise GypError(
+ 'Multiple toolsets not supported in msvs build (target %s)' %
+ qualified_target)
+ proj_path, fixpath_prefix = _GetPathOfProject(qualified_target, spec,
+ options, msvs_version)
+ guid = _GetGuidOfProject(proj_path, spec)
+ overrides = _GetPlatformOverridesOfProject(spec)
+ build_file = gyp.common.BuildFile(qualified_target)
+ # Create object for this project.
+ obj = MSVSNew.MSVSProject(
+ proj_path,
+ name=spec['target_name'],
+ guid=guid,
+ spec=spec,
+ build_file=build_file,
+ config_platform_overrides=overrides,
+ fixpath_prefix=fixpath_prefix)
+ # Set project toolset if any (MS build only)
+ if msvs_version.UsesVcxproj():
+ obj.set_msbuild_toolset(
+ _GetMsbuildToolsetOfProject(proj_path, spec, msvs_version))
+ projects[qualified_target] = obj
+ # Set all the dependencies, but not if we are using an external builder like
+ # ninja
+ for project in projects.values():
+ if not project.spec.get('msvs_external_builder'):
+ deps = project.spec.get('dependencies', [])
+ deps = [projects[d] for d in deps]
+ project.set_dependencies(deps)
+ return projects
+
+
+def _InitNinjaFlavor(params, target_list, target_dicts):
+ """Initialize targets for the ninja flavor.
+
+ This sets up the necessary variables in the targets to generate msvs projects
+ that use ninja as an external builder. The variables in the spec are only set
+ if they have not been set. This allows individual specs to override the
+ default values initialized here.
+ Arguments:
+ params: Params provided to the generator.
+ target_list: List of target pairs: 'base/base.gyp:base'.
+ target_dicts: Dict of target properties keyed on target pair.
+ """
+ for qualified_target in target_list:
+ spec = target_dicts[qualified_target]
+ if spec.get('msvs_external_builder'):
+ # The spec explicitly defined an external builder, so don't change it.
+ continue
+
+ path_to_ninja = spec.get('msvs_path_to_ninja', 'ninja.exe')
+
+ spec['msvs_external_builder'] = 'ninja'
+ if not spec.get('msvs_external_builder_out_dir'):
+ gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
+ gyp_dir = os.path.dirname(gyp_file)
+ configuration = '$(Configuration)'
+ if params.get('target_arch') == 'x64':
+ configuration += '_x64'
+ spec['msvs_external_builder_out_dir'] = os.path.join(
+ gyp.common.RelativePath(params['options'].toplevel_dir, gyp_dir),
+ ninja_generator.ComputeOutputDir(params),
+ configuration)
+ if not spec.get('msvs_external_builder_build_cmd'):
+ spec['msvs_external_builder_build_cmd'] = [
+ path_to_ninja,
+ '-C',
+ '$(OutDir)',
+ '$(ProjectName)',
+ ]
+ if not spec.get('msvs_external_builder_clean_cmd'):
+ spec['msvs_external_builder_clean_cmd'] = [
+ path_to_ninja,
+ '-C',
+ '$(OutDir)',
+ '-tclean',
+ '$(ProjectName)',
+ ]
+
+
+def CalculateVariables(default_variables, params):
+ """Generated variables that require params to be known."""
+
+ generator_flags = params.get('generator_flags', {})
+
+ # Select project file format version (if unset, default to auto detecting).
+ msvs_version = MSVSVersion.SelectVisualStudioVersion(
+ generator_flags.get('msvs_version', 'auto'))
+ # Stash msvs_version for later (so we don't have to probe the system twice).
+ params['msvs_version'] = msvs_version
+
+ # Set a variable so conditions can be based on msvs_version.
+ default_variables['MSVS_VERSION'] = msvs_version.ShortName()
+
+ # To determine processor word size on Windows, in addition to checking
+ # PROCESSOR_ARCHITECTURE (which reflects the word size of the current
+ # process), it is also necessary to check PROCESSOR_ARCITEW6432 (which
+ # contains the actual word size of the system when running thru WOW64).
+ if (os.environ.get('PROCESSOR_ARCHITECTURE', '').find('64') >= 0 or
+ os.environ.get('PROCESSOR_ARCHITEW6432', '').find('64') >= 0):
+ default_variables['MSVS_OS_BITS'] = 64
+ else:
+ default_variables['MSVS_OS_BITS'] = 32
+
+ if gyp.common.GetFlavor(params) == 'ninja':
+ default_variables['SHARED_INTERMEDIATE_DIR'] = '$(OutDir)gen'
+
+
+def PerformBuild(data, configurations, params):
+ options = params['options']
+ msvs_version = params['msvs_version']
+ devenv = os.path.join(msvs_version.path, 'Common7', 'IDE', 'devenv.com')
+
+ for build_file, build_file_dict in data.items():
+ (build_file_root, build_file_ext) = os.path.splitext(build_file)
+ if build_file_ext != '.gyp':
+ continue
+ sln_path = build_file_root + options.suffix + '.sln'
+ if options.generator_output:
+ sln_path = os.path.join(options.generator_output, sln_path)
+
+ for config in configurations:
+ arguments = [devenv, sln_path, '/Build', config]
+ print('Building [%s]: %s' % (config, arguments))
+ rtn = subprocess.check_call(arguments)
+
+
+def CalculateGeneratorInputInfo(params):
+ if params.get('flavor') == 'ninja':
+ toplevel = params['options'].toplevel_dir
+ qualified_out_dir = os.path.normpath(os.path.join(
+ toplevel, ninja_generator.ComputeOutputDir(params),
+ 'gypfiles-msvs-ninja'))
+
+ global generator_filelist_paths
+ generator_filelist_paths = {
+ 'toplevel': toplevel,
+ 'qualified_out_dir': qualified_out_dir,
+ }
+
+def GenerateOutput(target_list, target_dicts, data, params):
+ """Generate .sln and .vcproj files.
+
+ This is the entry point for this generator.
+ Arguments:
+ target_list: List of target pairs: 'base/base.gyp:base'.
+ target_dicts: Dict of target properties keyed on target pair.
+ data: Dictionary containing per .gyp data.
+ """
+ global fixpath_prefix
+
+ options = params['options']
+
+ # Get the project file format version back out of where we stashed it in
+ # GeneratorCalculatedVariables.
+ msvs_version = params['msvs_version']
+
+ generator_flags = params.get('generator_flags', {})
+
+ # Optionally shard targets marked with 'msvs_shard': SHARD_COUNT.
+ (target_list, target_dicts) = MSVSUtil.ShardTargets(target_list, target_dicts)
+
+ # Optionally use the large PDB workaround for targets marked with
+ # 'msvs_large_pdb': 1.
+ (target_list, target_dicts) = MSVSUtil.InsertLargePdbShims(
+ target_list, target_dicts, generator_default_variables)
+
+ # Optionally configure each spec to use ninja as the external builder.
+ if params.get('flavor') == 'ninja':
+ _InitNinjaFlavor(params, target_list, target_dicts)
+
+ # Prepare the set of configurations.
+ configs = set()
+ for qualified_target in target_list:
+ spec = target_dicts[qualified_target]
+ for config_name, config in spec['configurations'].items():
+ configs.add(_ConfigFullName(config_name, config))
+ configs = list(configs)
+
+ # Figure out all the projects that will be generated and their guids
+ project_objects = _CreateProjectObjects(target_list, target_dicts, options,
+ msvs_version)
+
+ # Generate each project.
+ missing_sources = []
+ for project in project_objects.values():
+ fixpath_prefix = project.fixpath_prefix
+ missing_sources.extend(_GenerateProject(project, options, msvs_version,
+ generator_flags))
+ fixpath_prefix = None
+
+ for build_file in data:
+ # Validate build_file extension
+ if not build_file.endswith('.gyp'):
+ continue
+ sln_path = os.path.splitext(build_file)[0] + options.suffix + '.sln'
+ if options.generator_output:
+ sln_path = os.path.join(options.generator_output, sln_path)
+ # Get projects in the solution, and their dependents.
+ sln_projects = gyp.common.BuildFileTargets(target_list, build_file)
+ sln_projects += gyp.common.DeepDependencyTargets(target_dicts, sln_projects)
+ # Create folder hierarchy.
+ root_entries = _GatherSolutionFolders(
+ sln_projects, project_objects, flat=msvs_version.FlatSolution())
+ # Create solution.
+ sln = MSVSNew.MSVSSolution(sln_path,
+ entries=root_entries,
+ variants=configs,
+ websiteProperties=False,
+ version=msvs_version)
+ sln.Write()
+
+ if missing_sources:
+ error_message = "Missing input files:\n" + \
+ '\n'.join(set(missing_sources))
+ if generator_flags.get('msvs_error_on_missing_sources', False):
+ raise GypError(error_message)
+ else:
+ print("Warning: " + error_message)
+
+
+def _GenerateMSBuildFiltersFile(filters_path, source_files,
+ rule_dependencies, extension_to_rule_name):
+ """Generate the filters file.
+
+ This file is used by Visual Studio to organize the presentation of source
+ files into folders.
+
+ Arguments:
+ filters_path: The path of the file to be created.
+ source_files: The hierarchical structure of all the sources.
+ extension_to_rule_name: A dictionary mapping file extensions to rules.
+ """
+ filter_group = []
+ source_group = []
+ _AppendFiltersForMSBuild('', source_files, rule_dependencies,
+ extension_to_rule_name, filter_group, source_group)
+ if filter_group:
+ content = ['Project',
+ {'ToolsVersion': '4.0',
+ 'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
+ },
+ ['ItemGroup'] + filter_group,
+ ['ItemGroup'] + source_group
+ ]
+ easy_xml.WriteXmlIfChanged(content, filters_path, pretty=True, win32=True)
+ elif os.path.exists(filters_path):
+ # We don't need this filter anymore. Delete the old filter file.
+ os.unlink(filters_path)
+
+
+def _AppendFiltersForMSBuild(parent_filter_name, sources, rule_dependencies,
+ extension_to_rule_name,
+ filter_group, source_group):
+ """Creates the list of filters and sources to be added in the filter file.
+
+ Args:
+ parent_filter_name: The name of the filter under which the sources are
+ found.
+ sources: The hierarchy of filters and sources to process.
+ extension_to_rule_name: A dictionary mapping file extensions to rules.
+ filter_group: The list to which filter entries will be appended.
+ source_group: The list to which source entries will be appeneded.
+ """
+ for source in sources:
+ if isinstance(source, MSVSProject.Filter):
+ # We have a sub-filter. Create the name of that sub-filter.
+ if not parent_filter_name:
+ filter_name = source.name
+ else:
+ filter_name = '%s\\%s' % (parent_filter_name, source.name)
+ # Add the filter to the group.
+ filter_group.append(
+ ['Filter', {'Include': filter_name},
+ ['UniqueIdentifier', MSVSNew.MakeGuid(source.name)]])
+ # Recurse and add its dependents.
+ _AppendFiltersForMSBuild(filter_name, source.contents,
+ rule_dependencies, extension_to_rule_name,
+ filter_group, source_group)
+ else:
+ # It's a source. Create a source entry.
+ _, element = _MapFileToMsBuildSourceType(source, rule_dependencies,
+ extension_to_rule_name)
+ source_entry = [element, {'Include': source}]
+ # Specify the filter it is part of, if any.
+ if parent_filter_name:
+ source_entry.append(['Filter', parent_filter_name])
+ source_group.append(source_entry)
+
+
+def _MapFileToMsBuildSourceType(source, rule_dependencies,
+ extension_to_rule_name):
+ """Returns the group and element type of the source file.
+
+ Arguments:
+ source: The source file name.
+ extension_to_rule_name: A dictionary mapping file extensions to rules.
+
+ Returns:
+ A pair of (group this file should be part of, the label of element)
+ """
+ _, ext = os.path.splitext(source)
+ if ext in extension_to_rule_name:
+ group = 'rule'
+ element = extension_to_rule_name[ext]
+ elif ext in ['.cc', '.cpp', '.c', '.cxx']:
+ group = 'compile'
+ element = 'ClCompile'
+ elif ext in ['.h', '.hxx']:
+ group = 'include'
+ element = 'ClInclude'
+ elif ext == '.rc':
+ group = 'resource'
+ element = 'ResourceCompile'
+ elif ext == '.asm':
+ group = 'masm'
+ element = 'MASM'
+ elif ext == '.idl':
+ group = 'midl'
+ element = 'Midl'
+ elif source in rule_dependencies:
+ group = 'rule_dependency'
+ element = 'CustomBuild'
+ else:
+ group = 'none'
+ element = 'None'
+ return (group, element)
+
+
+def _GenerateRulesForMSBuild(output_dir, options, spec,
+ sources, excluded_sources,
+ props_files_of_rules, targets_files_of_rules,
+ actions_to_add, rule_dependencies,
+ extension_to_rule_name):
+ # MSBuild rules are implemented using three files: an XML file, a .targets
+ # file and a .props file.
+ # See http://blogs.msdn.com/b/vcblog/archive/2010/04/21/quick-help-on-vs2010-custom-build-rule.aspx
+ # for more details.
+ rules = spec.get('rules', [])
+ rules_native = [r for r in rules if not int(r.get('msvs_external_rule', 0))]
+ rules_external = [r for r in rules if int(r.get('msvs_external_rule', 0))]
+
+ msbuild_rules = []
+ for rule in rules_native:
+ # Skip a rule with no action and no inputs.
+ if 'action' not in rule and not rule.get('rule_sources', []):
+ continue
+ msbuild_rule = MSBuildRule(rule, spec)
+ msbuild_rules.append(msbuild_rule)
+ rule_dependencies.update(msbuild_rule.additional_dependencies.split(';'))
+ extension_to_rule_name[msbuild_rule.extension] = msbuild_rule.rule_name
+ if msbuild_rules:
+ base = spec['target_name'] + options.suffix
+ props_name = base + '.props'
+ targets_name = base + '.targets'
+ xml_name = base + '.xml'
+
+ props_files_of_rules.add(props_name)
+ targets_files_of_rules.add(targets_name)
+
+ props_path = os.path.join(output_dir, props_name)
+ targets_path = os.path.join(output_dir, targets_name)
+ xml_path = os.path.join(output_dir, xml_name)
+
+ _GenerateMSBuildRulePropsFile(props_path, msbuild_rules)
+ _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules)
+ _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules)
+
+ if rules_external:
+ _GenerateExternalRules(rules_external, output_dir, spec,
+ sources, options, actions_to_add)
+ _AdjustSourcesForRules(rules, sources, excluded_sources, True)
+
+
+class MSBuildRule(object):
+ """Used to store information used to generate an MSBuild rule.
+
+ Attributes:
+ rule_name: The rule name, sanitized to use in XML.
+ target_name: The name of the target.
+ after_targets: The name of the AfterTargets element.
+ before_targets: The name of the BeforeTargets element.
+ depends_on: The name of the DependsOn element.
+ compute_output: The name of the ComputeOutput element.
+ dirs_to_make: The name of the DirsToMake element.
+ inputs: The name of the _inputs element.
+ tlog: The name of the _tlog element.
+ extension: The extension this rule applies to.
+ description: The message displayed when this rule is invoked.
+ additional_dependencies: A string listing additional dependencies.
+ outputs: The outputs of this rule.
+ command: The command used to run the rule.
+ """
+
+ def __init__(self, rule, spec):
+ self.display_name = rule['rule_name']
+ # Assure that the rule name is only characters and numbers
+ self.rule_name = re.sub(r'\W', '_', self.display_name)
+ # Create the various element names, following the example set by the
+ # Visual Studio 2008 to 2010 conversion. I don't know if VS2010
+ # is sensitive to the exact names.
+ self.target_name = '_' + self.rule_name
+ self.after_targets = self.rule_name + 'AfterTargets'
+ self.before_targets = self.rule_name + 'BeforeTargets'
+ self.depends_on = self.rule_name + 'DependsOn'
+ self.compute_output = 'Compute%sOutput' % self.rule_name
+ self.dirs_to_make = self.rule_name + 'DirsToMake'
+ self.inputs = self.rule_name + '_inputs'
+ self.tlog = self.rule_name + '_tlog'
+ self.extension = rule['extension']
+ if not self.extension.startswith('.'):
+ self.extension = '.' + self.extension
+
+ self.description = MSVSSettings.ConvertVCMacrosToMSBuild(
+ rule.get('message', self.rule_name))
+ old_additional_dependencies = _FixPaths(rule.get('inputs', []))
+ self.additional_dependencies = (
+ ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
+ for i in old_additional_dependencies]))
+ old_outputs = _FixPaths(rule.get('outputs', []))
+ self.outputs = ';'.join([MSVSSettings.ConvertVCMacrosToMSBuild(i)
+ for i in old_outputs])
+ old_command = _BuildCommandLineForRule(spec, rule, has_input_path=True,
+ do_setup_env=True)
+ self.command = MSVSSettings.ConvertVCMacrosToMSBuild(old_command)
+
+
+def _GenerateMSBuildRulePropsFile(props_path, msbuild_rules):
+ """Generate the .props file."""
+ content = ['Project',
+ {'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'}]
+ for rule in msbuild_rules:
+ content.extend([
+ ['PropertyGroup',
+ {'Condition': "'$(%s)' == '' and '$(%s)' == '' and "
+ "'$(ConfigurationType)' != 'Makefile'" % (rule.before_targets,
+ rule.after_targets)
+ },
+ [rule.before_targets, 'Midl'],
+ [rule.after_targets, 'CustomBuild'],
+ ],
+ ['PropertyGroup',
+ [rule.depends_on,
+ {'Condition': "'$(ConfigurationType)' != 'Makefile'"},
+ '_SelectedFiles;$(%s)' % rule.depends_on
+ ],
+ ],
+ ['ItemDefinitionGroup',
+ [rule.rule_name,
+ ['CommandLineTemplate', rule.command],
+ ['Outputs', rule.outputs],
+ ['ExecutionDescription', rule.description],
+ ['AdditionalDependencies', rule.additional_dependencies],
+ ],
+ ]
+ ])
+ easy_xml.WriteXmlIfChanged(content, props_path, pretty=True, win32=True)
+
+
+def _GenerateMSBuildRuleTargetsFile(targets_path, msbuild_rules):
+ """Generate the .targets file."""
+ content = ['Project',
+ {'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003'
+ }
+ ]
+ item_group = [
+ 'ItemGroup',
+ ['PropertyPageSchema',
+ {'Include': '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'}
+ ]
+ ]
+ for rule in msbuild_rules:
+ item_group.append(
+ ['AvailableItemName',
+ {'Include': rule.rule_name},
+ ['Targets', rule.target_name],
+ ])
+ content.append(item_group)
+
+ for rule in msbuild_rules:
+ content.append(
+ ['UsingTask',
+ {'TaskName': rule.rule_name,
+ 'TaskFactory': 'XamlTaskFactory',
+ 'AssemblyName': 'Microsoft.Build.Tasks.v4.0'
+ },
+ ['Task', '$(MSBuildThisFileDirectory)$(MSBuildThisFileName).xml'],
+ ])
+ for rule in msbuild_rules:
+ rule_name = rule.rule_name
+ target_outputs = '%%(%s.Outputs)' % rule_name
+ target_inputs = ('%%(%s.Identity);%%(%s.AdditionalDependencies);'
+ '$(MSBuildProjectFile)') % (rule_name, rule_name)
+ rule_inputs = '%%(%s.Identity)' % rule_name
+ extension_condition = ("'%(Extension)'=='.obj' or "
+ "'%(Extension)'=='.res' or "
+ "'%(Extension)'=='.rsc' or "
+ "'%(Extension)'=='.lib'")
+ remove_section = [
+ 'ItemGroup',
+ {'Condition': "'@(SelectedFiles)' != ''"},
+ [rule_name,
+ {'Remove': '@(%s)' % rule_name,
+ 'Condition': "'%(Identity)' != '@(SelectedFiles)'"
+ }
+ ]
+ ]
+ inputs_section = [
+ 'ItemGroup',
+ [rule.inputs, {'Include': '%%(%s.AdditionalDependencies)' % rule_name}]
+ ]
+ logging_section = [
+ 'ItemGroup',
+ [rule.tlog,
+ {'Include': '%%(%s.Outputs)' % rule_name,
+ 'Condition': ("'%%(%s.Outputs)' != '' and "
+ "'%%(%s.ExcludedFromBuild)' != 'true'" %
+ (rule_name, rule_name))
+ },
+ ['Source', "@(%s, '|')" % rule_name],
+ ['Inputs', "@(%s -> '%%(Fullpath)', ';')" % rule.inputs],
+ ],
+ ]
+ message_section = [
+ 'Message',
+ {'Importance': 'High',
+ 'Text': '%%(%s.ExecutionDescription)' % rule_name
+ }
+ ]
+ write_tlog_section = [
+ 'WriteLinesToFile',
+ {'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
+ "'true'" % (rule.tlog, rule.tlog),
+ 'File': '$(IntDir)$(ProjectName).write.1.tlog',
+ 'Lines': "^%%(%s.Source);@(%s->'%%(Fullpath)')" % (rule.tlog,
+ rule.tlog)
+ }
+ ]
+ read_tlog_section = [
+ 'WriteLinesToFile',
+ {'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
+ "'true'" % (rule.tlog, rule.tlog),
+ 'File': '$(IntDir)$(ProjectName).read.1.tlog',
+ 'Lines': "^%%(%s.Source);%%(%s.Inputs)" % (rule.tlog, rule.tlog)
+ }
+ ]
+ command_and_input_section = [
+ rule_name,
+ {'Condition': "'@(%s)' != '' and '%%(%s.ExcludedFromBuild)' != "
+ "'true'" % (rule_name, rule_name),
+ 'EchoOff': 'true',
+ 'StandardOutputImportance': 'High',
+ 'StandardErrorImportance': 'High',
+ 'CommandLineTemplate': '%%(%s.CommandLineTemplate)' % rule_name,
+ 'AdditionalOptions': '%%(%s.AdditionalOptions)' % rule_name,
+ 'Inputs': rule_inputs
+ }
+ ]
+ content.extend([
+ ['Target',
+ {'Name': rule.target_name,
+ 'BeforeTargets': '$(%s)' % rule.before_targets,
+ 'AfterTargets': '$(%s)' % rule.after_targets,
+ 'Condition': "'@(%s)' != ''" % rule_name,
+ 'DependsOnTargets': '$(%s);%s' % (rule.depends_on,
+ rule.compute_output),
+ 'Outputs': target_outputs,
+ 'Inputs': target_inputs
+ },
+ remove_section,
+ inputs_section,
+ logging_section,
+ message_section,
+ write_tlog_section,
+ read_tlog_section,
+ command_and_input_section,
+ ],
+ ['PropertyGroup',
+ ['ComputeLinkInputsTargets',
+ '$(ComputeLinkInputsTargets);',
+ '%s;' % rule.compute_output
+ ],
+ ['ComputeLibInputsTargets',
+ '$(ComputeLibInputsTargets);',
+ '%s;' % rule.compute_output
+ ],
+ ],
+ ['Target',
+ {'Name': rule.compute_output,
+ 'Condition': "'@(%s)' != ''" % rule_name
+ },
+ ['ItemGroup',
+ [rule.dirs_to_make,
+ {'Condition': "'@(%s)' != '' and "
+ "'%%(%s.ExcludedFromBuild)' != 'true'" % (rule_name, rule_name),
+ 'Include': '%%(%s.Outputs)' % rule_name
+ }
+ ],
+ ['Link',
+ {'Include': '%%(%s.Identity)' % rule.dirs_to_make,
+ 'Condition': extension_condition
+ }
+ ],
+ ['Lib',
+ {'Include': '%%(%s.Identity)' % rule.dirs_to_make,
+ 'Condition': extension_condition
+ }
+ ],
+ ['ImpLib',
+ {'Include': '%%(%s.Identity)' % rule.dirs_to_make,
+ 'Condition': extension_condition
+ }
+ ],
+ ],
+ ['MakeDir',
+ {'Directories': ("@(%s->'%%(RootDir)%%(Directory)')" %
+ rule.dirs_to_make)
+ }
+ ]
+ ],
+ ])
+ easy_xml.WriteXmlIfChanged(content, targets_path, pretty=True, win32=True)
+
+
+def _GenerateMSBuildRuleXmlFile(xml_path, msbuild_rules):
+ # Generate the .xml file
+ content = [
+ 'ProjectSchemaDefinitions',
+ {'xmlns': ('clr-namespace:Microsoft.Build.Framework.XamlTypes;'
+ 'assembly=Microsoft.Build.Framework'),
+ 'xmlns:x': 'http://schemas.microsoft.com/winfx/2006/xaml',
+ 'xmlns:sys': 'clr-namespace:System;assembly=mscorlib',
+ 'xmlns:transformCallback':
+ 'Microsoft.Cpp.Dev10.ConvertPropertyCallback'
+ }
+ ]
+ for rule in msbuild_rules:
+ content.extend([
+ ['Rule',
+ {'Name': rule.rule_name,
+ 'PageTemplate': 'tool',
+ 'DisplayName': rule.display_name,
+ 'Order': '200'
+ },
+ ['Rule.DataSource',
+ ['DataSource',
+ {'Persistence': 'ProjectFile',
+ 'ItemType': rule.rule_name
+ }
+ ]
+ ],
+ ['Rule.Categories',
+ ['Category',
+ {'Name': 'General'},
+ ['Category.DisplayName',
+ ['sys:String', 'General'],
+ ],
+ ],
+ ['Category',
+ {'Name': 'Command Line',
+ 'Subtype': 'CommandLine'
+ },
+ ['Category.DisplayName',
+ ['sys:String', 'Command Line'],
+ ],
+ ],
+ ],
+ ['StringListProperty',
+ {'Name': 'Inputs',
+ 'Category': 'Command Line',
+ 'IsRequired': 'true',
+ 'Switch': ' '
+ },
+ ['StringListProperty.DataSource',
+ ['DataSource',
+ {'Persistence': 'ProjectFile',
+ 'ItemType': rule.rule_name,
+ 'SourceType': 'Item'
+ }
+ ]
+ ],
+ ],
+ ['StringProperty',
+ {'Name': 'CommandLineTemplate',
+ 'DisplayName': 'Command Line',
+ 'Visible': 'False',
+ 'IncludeInCommandLine': 'False'
+ }
+ ],
+ ['DynamicEnumProperty',
+ {'Name': rule.before_targets,
+ 'Category': 'General',
+ 'EnumProvider': 'Targets',
+ 'IncludeInCommandLine': 'False'
+ },
+ ['DynamicEnumProperty.DisplayName',
+ ['sys:String', 'Execute Before'],
+ ],
+ ['DynamicEnumProperty.Description',
+ ['sys:String', 'Specifies the targets for the build customization'
+ ' to run before.'
+ ],
+ ],
+ ['DynamicEnumProperty.ProviderSettings',
+ ['NameValuePair',
+ {'Name': 'Exclude',
+ 'Value': '^%s|^Compute' % rule.before_targets
+ }
+ ]
+ ],
+ ['DynamicEnumProperty.DataSource',
+ ['DataSource',
+ {'Persistence': 'ProjectFile',
+ 'HasConfigurationCondition': 'true'
+ }
+ ]
+ ],
+ ],
+ ['DynamicEnumProperty',
+ {'Name': rule.after_targets,
+ 'Category': 'General',
+ 'EnumProvider': 'Targets',
+ 'IncludeInCommandLine': 'False'
+ },
+ ['DynamicEnumProperty.DisplayName',
+ ['sys:String', 'Execute After'],
+ ],
+ ['DynamicEnumProperty.Description',
+ ['sys:String', ('Specifies the targets for the build customization'
+ ' to run after.')
+ ],
+ ],
+ ['DynamicEnumProperty.ProviderSettings',
+ ['NameValuePair',
+ {'Name': 'Exclude',
+ 'Value': '^%s|^Compute' % rule.after_targets
+ }
+ ]
+ ],
+ ['DynamicEnumProperty.DataSource',
+ ['DataSource',
+ {'Persistence': 'ProjectFile',
+ 'ItemType': '',
+ 'HasConfigurationCondition': 'true'
+ }
+ ]
+ ],
+ ],
+ ['StringListProperty',
+ {'Name': 'Outputs',
+ 'DisplayName': 'Outputs',
+ 'Visible': 'False',
+ 'IncludeInCommandLine': 'False'
+ }
+ ],
+ ['StringProperty',
+ {'Name': 'ExecutionDescription',
+ 'DisplayName': 'Execution Description',
+ 'Visible': 'False',
+ 'IncludeInCommandLine': 'False'
+ }
+ ],
+ ['StringListProperty',
+ {'Name': 'AdditionalDependencies',
+ 'DisplayName': 'Additional Dependencies',
+ 'IncludeInCommandLine': 'False',
+ 'Visible': 'false'
+ }
+ ],
+ ['StringProperty',
+ {'Subtype': 'AdditionalOptions',
+ 'Name': 'AdditionalOptions',
+ 'Category': 'Command Line'
+ },
+ ['StringProperty.DisplayName',
+ ['sys:String', 'Additional Options'],
+ ],
+ ['StringProperty.Description',
+ ['sys:String', 'Additional Options'],
+ ],
+ ],
+ ],
+ ['ItemType',
+ {'Name': rule.rule_name,
+ 'DisplayName': rule.display_name
+ }
+ ],
+ ['FileExtension',
+ {'Name': '*' + rule.extension,
+ 'ContentType': rule.rule_name
+ }
+ ],
+ ['ContentType',
+ {'Name': rule.rule_name,
+ 'DisplayName': '',
+ 'ItemType': rule.rule_name
+ }
+ ]
+ ])
+ easy_xml.WriteXmlIfChanged(content, xml_path, pretty=True, win32=True)
+
+
+def _GetConfigurationAndPlatform(name, settings):
+ configuration = name.rsplit('_', 1)[0]
+ platform = settings.get('msvs_configuration_platform', 'Win32')
+ return (configuration, platform)
+
+
+def _GetConfigurationCondition(name, settings):
+ return (r"'$(Configuration)|$(Platform)'=='%s|%s'" %
+ _GetConfigurationAndPlatform(name, settings))
+
+
+def _GetMSBuildProjectConfigurations(configurations):
+ group = ['ItemGroup', {'Label': 'ProjectConfigurations'}]
+ for (name, settings) in sorted(configurations.items()):
+ configuration, platform = _GetConfigurationAndPlatform(name, settings)
+ designation = '%s|%s' % (configuration, platform)
+ group.append(
+ ['ProjectConfiguration', {'Include': designation},
+ ['Configuration', configuration],
+ ['Platform', platform]])
+ return [group]
+
+
+def _GetMSBuildGlobalProperties(spec, version, guid, gyp_file_name):
+ namespace = os.path.splitext(gyp_file_name)[0]
+ properties = [
+ ['PropertyGroup', {'Label': 'Globals'},
+ ['ProjectGuid', guid],
+ ['Keyword', 'Win32Proj'],
+ ['RootNamespace', namespace],
+ ['IgnoreWarnCompileDuplicatedFilename', 'true'],
+ ]
+ ]
+
+ if os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or \
+ os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64':
+ properties[0].append(['PreferredToolArchitecture', 'x64'])
+
+ if spec.get('msvs_target_platform_version'):
+ target_platform_version = spec.get('msvs_target_platform_version')
+ properties[0].append(['WindowsTargetPlatformVersion',
+ target_platform_version])
+ if spec.get('msvs_target_platform_minversion'):
+ target_platform_minversion = spec.get('msvs_target_platform_minversion')
+ properties[0].append(['WindowsTargetPlatformMinVersion',
+ target_platform_minversion])
+ else:
+ properties[0].append(['WindowsTargetPlatformMinVersion',
+ target_platform_version])
+
+ if spec.get('msvs_enable_winrt'):
+ properties[0].append(['DefaultLanguage', 'en-US'])
+ properties[0].append(['AppContainerApplication', 'true'])
+ if spec.get('msvs_application_type_revision'):
+ app_type_revision = spec.get('msvs_application_type_revision')
+ properties[0].append(['ApplicationTypeRevision', app_type_revision])
+ else:
+ properties[0].append(['ApplicationTypeRevision', '8.1'])
+ if spec.get('msvs_enable_winphone'):
+ properties[0].append(['ApplicationType', 'Windows Phone'])
+ else:
+ properties[0].append(['ApplicationType', 'Windows Store'])
+
+ platform_name = None
+ msvs_windows_sdk_version = None
+ for configuration in spec['configurations'].values():
+ platform_name = platform_name or _ConfigPlatform(configuration)
+ msvs_windows_sdk_version = (msvs_windows_sdk_version or
+ _ConfigWindowsTargetPlatformVersion(configuration, version))
+ if platform_name and msvs_windows_sdk_version:
+ break
+ if msvs_windows_sdk_version:
+ properties[0].append(['WindowsTargetPlatformVersion',
+ str(msvs_windows_sdk_version)])
+ elif version.compatible_sdks:
+ raise GypError('%s requires any SDK of %s version, but none were found' %
+ (version.description, version.compatible_sdks))
+
+ if platform_name == 'ARM':
+ properties[0].append(['WindowsSDKDesktopARMSupport', 'true'])
+
+ return properties
+
+
+def _GetMSBuildConfigurationDetails(spec, build_file):
+ properties = {}
+ for name, settings in spec['configurations'].items():
+ msbuild_attributes = _GetMSBuildAttributes(spec, settings, build_file)
+ condition = _GetConfigurationCondition(name, settings)
+ character_set = msbuild_attributes.get('CharacterSet')
+ config_type = msbuild_attributes.get('ConfigurationType')
+ _AddConditionalProperty(properties, condition, 'ConfigurationType',
+ config_type)
+ spectre_mitigation = msbuild_attributes.get('SpectreMitigation')
+ if spectre_mitigation:
+ _AddConditionalProperty(properties, condition, 'SpectreMitigation',
+ spectre_mitigation)
+ if config_type == 'Driver':
+ _AddConditionalProperty(properties, condition, 'DriverType', 'WDM')
+ _AddConditionalProperty(properties, condition, 'TargetVersion',
+ _ConfigTargetVersion(settings))
+ if character_set:
+ if 'msvs_enable_winrt' not in spec :
+ _AddConditionalProperty(properties, condition, 'CharacterSet',
+ character_set)
+ return _GetMSBuildPropertyGroup(spec, 'Configuration', properties)
+
+
+def _GetMSBuildLocalProperties(msbuild_toolset):
+ # Currently the only local property we support is PlatformToolset
+ properties = {}
+ if msbuild_toolset:
+ properties = [
+ ['PropertyGroup', {'Label': 'Locals'},
+ ['PlatformToolset', msbuild_toolset],
+ ]
+ ]
+ return properties
+
+
+def _GetMSBuildPropertySheets(configurations):
+ user_props = r'$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props'
+ additional_props = {}
+ props_specified = False
+ for name, settings in sorted(configurations.items()):
+ configuration = _GetConfigurationCondition(name, settings)
+ if 'msbuild_props' in settings:
+ additional_props[configuration] = _FixPaths(settings['msbuild_props'])
+ props_specified = True
+ else:
+ additional_props[configuration] = ''
+
+ if not props_specified:
+ return [
+ ['ImportGroup',
+ {'Label': 'PropertySheets'},
+ ['Import',
+ {'Project': user_props,
+ 'Condition': "exists('%s')" % user_props,
+ 'Label': 'LocalAppDataPlatform'
+ }
+ ]
+ ]
+ ]
+ else:
+ sheets = []
+ for condition, props in additional_props.items():
+ import_group = [
+ 'ImportGroup',
+ {'Label': 'PropertySheets',
+ 'Condition': condition
+ },
+ ['Import',
+ {'Project': user_props,
+ 'Condition': "exists('%s')" % user_props,
+ 'Label': 'LocalAppDataPlatform'
+ }
+ ]
+ ]
+ for props_file in props:
+ import_group.append(['Import', {'Project':props_file}])
+ sheets.append(import_group)
+ return sheets
+
+def _ConvertMSVSBuildAttributes(spec, config, build_file):
+ config_type = _GetMSVSConfigurationType(spec, build_file)
+ msvs_attributes = _GetMSVSAttributes(spec, config, config_type)
+ msbuild_attributes = {}
+ for a in msvs_attributes:
+ if a in ['IntermediateDirectory', 'OutputDirectory']:
+ directory = MSVSSettings.ConvertVCMacrosToMSBuild(msvs_attributes[a])
+ if not directory.endswith('\\'):
+ directory += '\\'
+ msbuild_attributes[a] = directory
+ elif a == 'CharacterSet':
+ msbuild_attributes[a] = _ConvertMSVSCharacterSet(msvs_attributes[a])
+ elif a == 'ConfigurationType':
+ msbuild_attributes[a] = _ConvertMSVSConfigurationType(msvs_attributes[a])
+ elif a == 'SpectreMitigation':
+ msbuild_attributes[a] = msvs_attributes[a]
+ else:
+ print('Warning: Do not know how to convert MSVS attribute ' + a)
+ return msbuild_attributes
+
+
+def _ConvertMSVSCharacterSet(char_set):
+ if char_set.isdigit():
+ char_set = {
+ '0': 'MultiByte',
+ '1': 'Unicode',
+ '2': 'MultiByte',
+ }[char_set]
+ return char_set
+
+
+def _ConvertMSVSConfigurationType(config_type):
+ if config_type.isdigit():
+ config_type = {
+ '1': 'Application',
+ '2': 'DynamicLibrary',
+ '4': 'StaticLibrary',
+ '5': 'Driver',
+ '10': 'Utility'
+ }[config_type]
+ return config_type
+
+
+def _GetMSBuildAttributes(spec, config, build_file):
+ if 'msbuild_configuration_attributes' not in config:
+ msbuild_attributes = _ConvertMSVSBuildAttributes(spec, config, build_file)
+
+ else:
+ config_type = _GetMSVSConfigurationType(spec, build_file)
+ config_type = _ConvertMSVSConfigurationType(config_type)
+ msbuild_attributes = config.get('msbuild_configuration_attributes', {})
+ msbuild_attributes.setdefault('ConfigurationType', config_type)
+ output_dir = msbuild_attributes.get('OutputDirectory',
+ '$(SolutionDir)$(Configuration)')
+ msbuild_attributes['OutputDirectory'] = _FixPath(output_dir) + '\\'
+ if 'IntermediateDirectory' not in msbuild_attributes:
+ intermediate = _FixPath('$(Configuration)') + '\\'
+ msbuild_attributes['IntermediateDirectory'] = intermediate
+ if 'CharacterSet' in msbuild_attributes:
+ msbuild_attributes['CharacterSet'] = _ConvertMSVSCharacterSet(
+ msbuild_attributes['CharacterSet'])
+ if 'TargetName' not in msbuild_attributes:
+ prefix = spec.get('product_prefix', '')
+ product_name = spec.get('product_name', '$(ProjectName)')
+ target_name = prefix + product_name
+ msbuild_attributes['TargetName'] = target_name
+ if 'TargetExt' not in msbuild_attributes and 'product_extension' in spec:
+ ext = spec.get('product_extension')
+ msbuild_attributes['TargetExt'] = '.' + ext
+
+ if spec.get('msvs_external_builder'):
+ external_out_dir = spec.get('msvs_external_builder_out_dir', '.')
+ msbuild_attributes['OutputDirectory'] = _FixPath(external_out_dir) + '\\'
+
+ # Make sure that 'TargetPath' matches 'Lib.OutputFile' or 'Link.OutputFile'
+ # (depending on the tool used) to avoid MSB8012 warning.
+ msbuild_tool_map = {
+ 'executable': 'Link',
+ 'shared_library': 'Link',
+ 'loadable_module': 'Link',
+ 'windows_driver': 'Link',
+ 'static_library': 'Lib',
+ }
+ msbuild_tool = msbuild_tool_map.get(spec['type'])
+ if msbuild_tool:
+ msbuild_settings = config['finalized_msbuild_settings']
+ out_file = msbuild_settings[msbuild_tool].get('OutputFile')
+ if out_file:
+ msbuild_attributes['TargetPath'] = _FixPath(out_file)
+ target_ext = msbuild_settings[msbuild_tool].get('TargetExt')
+ if target_ext:
+ msbuild_attributes['TargetExt'] = target_ext
+
+ return msbuild_attributes
+
+
+def _GetMSBuildConfigurationGlobalProperties(spec, configurations, build_file):
+ # TODO(jeanluc) We could optimize out the following and do it only if
+ # there are actions.
+ # TODO(jeanluc) Handle the equivalent of setting 'CYGWIN=nontsec'.
+ new_paths = []
+ cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])[0]
+ if cygwin_dirs:
+ cyg_path = '$(MSBuildProjectDirectory)\\%s\\bin\\' % _FixPath(cygwin_dirs)
+ new_paths.append(cyg_path)
+ # TODO(jeanluc) Change the convention to have both a cygwin_dir and a
+ # python_dir.
+ python_path = cyg_path.replace('cygwin\\bin', 'python_26')
+ new_paths.append(python_path)
+ if new_paths:
+ new_paths = '$(ExecutablePath);' + ';'.join(new_paths)
+
+ properties = {}
+ for (name, configuration) in sorted(configurations.items()):
+ condition = _GetConfigurationCondition(name, configuration)
+ attributes = _GetMSBuildAttributes(spec, configuration, build_file)
+ msbuild_settings = configuration['finalized_msbuild_settings']
+ _AddConditionalProperty(properties, condition, 'IntDir',
+ attributes['IntermediateDirectory'])
+ _AddConditionalProperty(properties, condition, 'OutDir',
+ attributes['OutputDirectory'])
+ _AddConditionalProperty(properties, condition, 'TargetName',
+ attributes['TargetName'])
+ if 'TargetExt' in attributes:
+ _AddConditionalProperty(properties, condition, 'TargetExt',
+ attributes['TargetExt'])
+
+ if attributes.get('TargetPath'):
+ _AddConditionalProperty(properties, condition, 'TargetPath',
+ attributes['TargetPath'])
+ if attributes.get('TargetExt'):
+ _AddConditionalProperty(properties, condition, 'TargetExt',
+ attributes['TargetExt'])
+
+ if new_paths:
+ _AddConditionalProperty(properties, condition, 'ExecutablePath',
+ new_paths)
+ tool_settings = msbuild_settings.get('', {})
+ for name, value in sorted(tool_settings.items()):
+ formatted_value = _GetValueFormattedForMSBuild('', name, value)
+ _AddConditionalProperty(properties, condition, name, formatted_value)
+ return _GetMSBuildPropertyGroup(spec, None, properties)
+
+
+def _AddConditionalProperty(properties, condition, name, value):
+ """Adds a property / conditional value pair to a dictionary.
+
+ Arguments:
+ properties: The dictionary to be modified. The key is the name of the
+ property. The value is itself a dictionary; its key is the value and
+ the value a list of condition for which this value is true.
+ condition: The condition under which the named property has the value.
+ name: The name of the property.
+ value: The value of the property.
+ """
+ if name not in properties:
+ properties[name] = {}
+ values = properties[name]
+ if value not in values:
+ values[value] = []
+ conditions = values[value]
+ conditions.append(condition)
+
+
+# Regex for msvs variable references ( i.e. $(FOO) ).
+MSVS_VARIABLE_REFERENCE = re.compile(r'\$\(([a-zA-Z_][a-zA-Z0-9_]*)\)')
+
+
+def _GetMSBuildPropertyGroup(spec, label, properties):
+ """Returns a PropertyGroup definition for the specified properties.
+
+ Arguments:
+ spec: The target project dict.
+ label: An optional label for the PropertyGroup.
+ properties: The dictionary to be converted. The key is the name of the
+ property. The value is itself a dictionary; its key is the value and
+ the value a list of condition for which this value is true.
+ """
+ group = ['PropertyGroup']
+ if label:
+ group.append({'Label': label})
+ num_configurations = len(spec['configurations'])
+ def GetEdges(node):
+ # Use a definition of edges such that user_of_variable -> used_varible.
+ # This happens to be easier in this case, since a variable's
+ # definition contains all variables it references in a single string.
+ edges = set()
+ for value in sorted(properties[node].keys()):
+ # Add to edges all $(...) references to variables.
+ #
+ # Variable references that refer to names not in properties are excluded
+ # These can exist for instance to refer built in definitions like
+ # $(SolutionDir).
+ #
+ # Self references are ignored. Self reference is used in a few places to
+ # append to the default value. I.e. PATH=$(PATH);other_path
+ edges.update(set([v for v in MSVS_VARIABLE_REFERENCE.findall(value)
+ if v in properties and v != node]))
+ return edges
+ properties_ordered = gyp.common.TopologicallySorted(
+ properties.keys(), GetEdges)
+ # Walk properties in the reverse of a topological sort on
+ # user_of_variable -> used_variable as this ensures variables are
+ # defined before they are used.
+ # NOTE: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
+ for name in reversed(properties_ordered):
+ values = properties[name]
+ for value, conditions in sorted(values.items()):
+ if len(conditions) == num_configurations:
+ # If the value is the same all configurations,
+ # just add one unconditional entry.
+ group.append([name, value])
+ else:
+ for condition in conditions:
+ group.append([name, {'Condition': condition}, value])
+ return [group]
+
+
+def _GetMSBuildToolSettingsSections(spec, configurations):
+ groups = []
+ for (name, configuration) in sorted(configurations.items()):
+ msbuild_settings = configuration['finalized_msbuild_settings']
+ group = ['ItemDefinitionGroup',
+ {'Condition': _GetConfigurationCondition(name, configuration)}
+ ]
+ for tool_name, tool_settings in sorted(msbuild_settings.items()):
+ # Skip the tool named '' which is a holder of global settings handled
+ # by _GetMSBuildConfigurationGlobalProperties.
+ if tool_name:
+ if tool_settings:
+ tool = [tool_name]
+ for name, value in sorted(tool_settings.items()):
+ formatted_value = _GetValueFormattedForMSBuild(tool_name, name,
+ value)
+ tool.append([name, formatted_value])
+ group.append(tool)
+ groups.append(group)
+ return groups
+
+
+def _FinalizeMSBuildSettings(spec, configuration):
+ if 'msbuild_settings' in configuration:
+ converted = False
+ msbuild_settings = configuration['msbuild_settings']
+ MSVSSettings.ValidateMSBuildSettings(msbuild_settings)
+ else:
+ converted = True
+ msvs_settings = configuration.get('msvs_settings', {})
+ msbuild_settings = MSVSSettings.ConvertToMSBuildSettings(msvs_settings)
+ include_dirs, midl_include_dirs, resource_include_dirs = \
+ _GetIncludeDirs(configuration)
+ libraries = _GetLibraries(spec)
+ library_dirs = _GetLibraryDirs(configuration)
+ out_file, _, msbuild_tool = _GetOutputFilePathAndTool(spec, msbuild=True)
+ target_ext = _GetOutputTargetExt(spec)
+ defines = _GetDefines(configuration)
+ if converted:
+ # Visual Studio 2010 has TR1
+ defines = [d for d in defines if d != '_HAS_TR1=0']
+ # Warn of ignored settings
+ ignored_settings = ['msvs_tool_files']
+ for ignored_setting in ignored_settings:
+ value = configuration.get(ignored_setting)
+ if value:
+ print('Warning: The automatic conversion to MSBuild does not handle '
+ '%s. Ignoring setting of %s' % (ignored_setting, str(value)))
+
+ defines = [_EscapeCppDefineForMSBuild(d) for d in defines]
+ disabled_warnings = _GetDisabledWarnings(configuration)
+ prebuild = configuration.get('msvs_prebuild')
+ postbuild = configuration.get('msvs_postbuild')
+ def_file = _GetModuleDefinition(spec)
+ precompiled_header = configuration.get('msvs_precompiled_header')
+
+ # Add the information to the appropriate tool
+ # TODO(jeanluc) We could optimize and generate these settings only if
+ # the corresponding files are found, e.g. don't generate ResourceCompile
+ # if you don't have any resources.
+ _ToolAppend(msbuild_settings, 'ClCompile',
+ 'AdditionalIncludeDirectories', include_dirs)
+ _ToolAppend(msbuild_settings, 'Midl',
+ 'AdditionalIncludeDirectories', midl_include_dirs)
+ _ToolAppend(msbuild_settings, 'ResourceCompile',
+ 'AdditionalIncludeDirectories', resource_include_dirs)
+ # Add in libraries, note that even for empty libraries, we want this
+ # set, to prevent inheriting default libraries from the enviroment.
+ _ToolSetOrAppend(msbuild_settings, 'Link', 'AdditionalDependencies',
+ libraries)
+ _ToolAppend(msbuild_settings, 'Link', 'AdditionalLibraryDirectories',
+ library_dirs)
+ if out_file:
+ _ToolAppend(msbuild_settings, msbuild_tool, 'OutputFile', out_file,
+ only_if_unset=True)
+ if target_ext:
+ _ToolAppend(msbuild_settings, msbuild_tool, 'TargetExt', target_ext,
+ only_if_unset=True)
+ # Add defines.
+ _ToolAppend(msbuild_settings, 'ClCompile',
+ 'PreprocessorDefinitions', defines)
+ _ToolAppend(msbuild_settings, 'ResourceCompile',
+ 'PreprocessorDefinitions', defines)
+ # Add disabled warnings.
+ _ToolAppend(msbuild_settings, 'ClCompile',
+ 'DisableSpecificWarnings', disabled_warnings)
+ # Turn on precompiled headers if appropriate.
+ if precompiled_header:
+ precompiled_header = os.path.split(precompiled_header)[1]
+ _ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'Use')
+ _ToolAppend(msbuild_settings, 'ClCompile',
+ 'PrecompiledHeaderFile', precompiled_header)
+ _ToolAppend(msbuild_settings, 'ClCompile',
+ 'ForcedIncludeFiles', [precompiled_header])
+ else:
+ _ToolAppend(msbuild_settings, 'ClCompile', 'PrecompiledHeader', 'NotUsing')
+ # Turn off WinRT compilation
+ _ToolAppend(msbuild_settings, 'ClCompile', 'CompileAsWinRT', 'false')
+ # Turn on import libraries if appropriate
+ if spec.get('msvs_requires_importlibrary'):
+ _ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'false')
+ # Loadable modules don't generate import libraries;
+ # tell dependent projects to not expect one.
+ if spec['type'] == 'loadable_module':
+ _ToolAppend(msbuild_settings, '', 'IgnoreImportLibrary', 'true')
+ # Set the module definition file if any.
+ if def_file:
+ _ToolAppend(msbuild_settings, 'Link', 'ModuleDefinitionFile', def_file)
+ configuration['finalized_msbuild_settings'] = msbuild_settings
+ if prebuild:
+ _ToolAppend(msbuild_settings, 'PreBuildEvent', 'Command', prebuild)
+ if postbuild:
+ _ToolAppend(msbuild_settings, 'PostBuildEvent', 'Command', postbuild)
+
+
+def _GetValueFormattedForMSBuild(tool_name, name, value):
+ if type(value) == list:
+ # For some settings, VS2010 does not automatically extends the settings
+ # TODO(jeanluc) Is this what we want?
+ if name in ['AdditionalIncludeDirectories',
+ 'AdditionalLibraryDirectories',
+ 'AdditionalOptions',
+ 'DelayLoadDLLs',
+ 'DisableSpecificWarnings',
+ 'PreprocessorDefinitions']:
+ value.append('%%(%s)' % name)
+ # For most tools, entries in a list should be separated with ';' but some
+ # settings use a space. Check for those first.
+ exceptions = {
+ 'ClCompile': ['AdditionalOptions'],
+ 'Link': ['AdditionalOptions'],
+ 'Lib': ['AdditionalOptions']}
+ if tool_name in exceptions and name in exceptions[tool_name]:
+ char = ' '
+ else:
+ char = ';'
+ formatted_value = char.join(
+ [MSVSSettings.ConvertVCMacrosToMSBuild(i) for i in value])
+ else:
+ formatted_value = MSVSSettings.ConvertVCMacrosToMSBuild(value)
+ return formatted_value
+
+
+def _VerifySourcesExist(sources, root_dir):
+ """Verifies that all source files exist on disk.
+
+ Checks that all regular source files, i.e. not created at run time,
+ exist on disk. Missing files cause needless recompilation but no otherwise
+ visible errors.
+
+ Arguments:
+ sources: A recursive list of Filter/file names.
+ root_dir: The root directory for the relative path names.
+ Returns:
+ A list of source files that cannot be found on disk.
+ """
+ missing_sources = []
+ for source in sources:
+ if isinstance(source, MSVSProject.Filter):
+ missing_sources.extend(_VerifySourcesExist(source.contents, root_dir))
+ else:
+ if '$' not in source:
+ full_path = os.path.join(root_dir, source)
+ if not os.path.exists(full_path):
+ missing_sources.append(full_path)
+ return missing_sources
+
+
+def _GetMSBuildSources(spec, sources, exclusions, rule_dependencies,
+ extension_to_rule_name, actions_spec,
+ sources_handled_by_action, list_excluded):
+ groups = ['none', 'masm', 'midl', 'include', 'compile', 'resource', 'rule',
+ 'rule_dependency']
+ grouped_sources = {}
+ for g in groups:
+ grouped_sources[g] = []
+
+ _AddSources2(spec, sources, exclusions, grouped_sources,
+ rule_dependencies, extension_to_rule_name,
+ sources_handled_by_action, list_excluded)
+ sources = []
+ for g in groups:
+ if grouped_sources[g]:
+ sources.append(['ItemGroup'] + grouped_sources[g])
+ if actions_spec:
+ sources.append(['ItemGroup'] + actions_spec)
+ return sources
+
+
+def _AddSources2(spec, sources, exclusions, grouped_sources,
+ rule_dependencies, extension_to_rule_name,
+ sources_handled_by_action,
+ list_excluded):
+ extensions_excluded_from_precompile = []
+ for source in sources:
+ if isinstance(source, MSVSProject.Filter):
+ _AddSources2(spec, source.contents, exclusions, grouped_sources,
+ rule_dependencies, extension_to_rule_name,
+ sources_handled_by_action,
+ list_excluded)
+ else:
+ if not source in sources_handled_by_action:
+ detail = []
+ excluded_configurations = exclusions.get(source, [])
+ if len(excluded_configurations) == len(spec['configurations']):
+ detail.append(['ExcludedFromBuild', 'true'])
+ else:
+ for config_name, configuration in sorted(excluded_configurations):
+ condition = _GetConfigurationCondition(config_name, configuration)
+ detail.append(['ExcludedFromBuild',
+ {'Condition': condition},
+ 'true'])
+ # Add precompile if needed
+ for config_name, configuration in spec['configurations'].items():
+ precompiled_source = configuration.get('msvs_precompiled_source', '')
+ if precompiled_source != '':
+ precompiled_source = _FixPath(precompiled_source)
+ if not extensions_excluded_from_precompile:
+ # If the precompiled header is generated by a C source, we must
+ # not try to use it for C++ sources, and vice versa.
+ basename, extension = os.path.splitext(precompiled_source)
+ if extension == '.c':
+ extensions_excluded_from_precompile = ['.cc', '.cpp', '.cxx']
+ else:
+ extensions_excluded_from_precompile = ['.c']
+
+ if precompiled_source == source:
+ condition = _GetConfigurationCondition(config_name, configuration)
+ detail.append(['PrecompiledHeader',
+ {'Condition': condition},
+ 'Create'
+ ])
+ else:
+ # Turn off precompiled header usage for source files of a
+ # different type than the file that generated the
+ # precompiled header.
+ for extension in extensions_excluded_from_precompile:
+ if source.endswith(extension):
+ detail.append(['PrecompiledHeader', ''])
+ detail.append(['ForcedIncludeFiles', ''])
+
+ group, element = _MapFileToMsBuildSourceType(source, rule_dependencies,
+ extension_to_rule_name)
+ grouped_sources[group].append([element, {'Include': source}] + detail)
+
+
+def _GetMSBuildProjectReferences(project):
+ references = []
+ if project.dependencies:
+ group = ['ItemGroup']
+ for dependency in project.dependencies:
+ guid = dependency.guid
+ project_dir = os.path.split(project.path)[0]
+ relative_path = gyp.common.RelativePath(dependency.path, project_dir)
+ project_ref = ['ProjectReference',
+ {'Include': relative_path},
+ ['Project', guid],
+ ['ReferenceOutputAssembly', 'false']
+ ]
+ for config in dependency.spec.get('configurations', {}).values():
+ if config.get('msvs_use_library_dependency_inputs', 0):
+ project_ref.append(['UseLibraryDependencyInputs', 'true'])
+ break
+ # If it's disabled in any config, turn it off in the reference.
+ if config.get('msvs_2010_disable_uldi_when_referenced', 0):
+ project_ref.append(['UseLibraryDependencyInputs', 'false'])
+ break
+ group.append(project_ref)
+ references.append(group)
+ return references
+
+
+def _GenerateMSBuildProject(project, options, version, generator_flags):
+ spec = project.spec
+ configurations = spec['configurations']
+ project_dir, project_file_name = os.path.split(project.path)
+ gyp.common.EnsureDirExists(project.path)
+ # Prepare list of sources and excluded sources.
+ gyp_path = _NormalizedSource(project.build_file)
+ relative_path_of_gyp_file = gyp.common.RelativePath(gyp_path, project_dir)
+
+ gyp_file = os.path.split(project.build_file)[1]
+ sources, excluded_sources = _PrepareListOfSources(spec, generator_flags,
+ gyp_file)
+ # Add rules.
+ actions_to_add = {}
+ props_files_of_rules = set()
+ targets_files_of_rules = set()
+ rule_dependencies = set()
+ extension_to_rule_name = {}
+ list_excluded = generator_flags.get('msvs_list_excluded_files', True)
+
+ # Don't generate rules if we are using an external builder like ninja.
+ if not spec.get('msvs_external_builder'):
+ _GenerateRulesForMSBuild(project_dir, options, spec,
+ sources, excluded_sources,
+ props_files_of_rules, targets_files_of_rules,
+ actions_to_add, rule_dependencies,
+ extension_to_rule_name)
+ else:
+ rules = spec.get('rules', [])
+ _AdjustSourcesForRules(rules, sources, excluded_sources, True)
+
+ sources, excluded_sources, excluded_idl = (
+ _AdjustSourcesAndConvertToFilterHierarchy(spec, options,
+ project_dir, sources,
+ excluded_sources,
+ list_excluded, version))
+
+ # Don't add actions if we are using an external builder like ninja.
+ if not spec.get('msvs_external_builder'):
+ _AddActions(actions_to_add, spec, project.build_file)
+ _AddCopies(actions_to_add, spec)
+
+ # NOTE: this stanza must appear after all actions have been decided.
+ # Don't excluded sources with actions attached, or they won't run.
+ excluded_sources = _FilterActionsFromExcluded(
+ excluded_sources, actions_to_add)
+
+ exclusions = _GetExcludedFilesFromBuild(spec, excluded_sources, excluded_idl)
+ actions_spec, sources_handled_by_action = _GenerateActionsForMSBuild(
+ spec, actions_to_add)
+
+ _GenerateMSBuildFiltersFile(project.path + '.filters', sources,
+ rule_dependencies,
+ extension_to_rule_name)
+ missing_sources = _VerifySourcesExist(sources, project_dir)
+
+ for configuration in configurations.values():
+ _FinalizeMSBuildSettings(spec, configuration)
+
+ # Add attributes to root element
+
+ import_default_section = [
+ ['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.Default.props'}]]
+ import_cpp_props_section = [
+ ['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.props'}]]
+ import_cpp_targets_section = [
+ ['Import', {'Project': r'$(VCTargetsPath)\Microsoft.Cpp.targets'}]]
+ import_masm_props_section = [
+ ['Import',
+ {'Project': r'$(VCTargetsPath)\BuildCustomizations\masm.props'}]]
+ import_masm_targets_section = [
+ ['Import',
+ {'Project': r'$(VCTargetsPath)\BuildCustomizations\masm.targets'}]]
+ macro_section = [['PropertyGroup', {'Label': 'UserMacros'}]]
+
+ content = [
+ 'Project',
+ {'xmlns': 'http://schemas.microsoft.com/developer/msbuild/2003',
+ 'ToolsVersion': version.ProjectVersion(),
+ 'DefaultTargets': 'Build'
+ }]
+
+ content += _GetMSBuildProjectConfigurations(configurations)
+ content += _GetMSBuildGlobalProperties(spec, version, project.guid,
+ project_file_name)
+ content += import_default_section
+ content += _GetMSBuildConfigurationDetails(spec, project.build_file)
+ if spec.get('msvs_enable_winphone'):
+ content += _GetMSBuildLocalProperties('v120_wp81')
+ else:
+ content += _GetMSBuildLocalProperties(project.msbuild_toolset)
+ content += import_cpp_props_section
+ content += import_masm_props_section
+ content += _GetMSBuildExtensions(props_files_of_rules)
+ content += _GetMSBuildPropertySheets(configurations)
+ content += macro_section
+ content += _GetMSBuildConfigurationGlobalProperties(spec, configurations,
+ project.build_file)
+ content += _GetMSBuildToolSettingsSections(spec, configurations)
+ content += _GetMSBuildSources(
+ spec, sources, exclusions, rule_dependencies, extension_to_rule_name,
+ actions_spec, sources_handled_by_action, list_excluded)
+ content += _GetMSBuildProjectReferences(project)
+ content += import_cpp_targets_section
+ content += import_masm_targets_section
+ content += _GetMSBuildExtensionTargets(targets_files_of_rules)
+
+ if spec.get('msvs_external_builder'):
+ content += _GetMSBuildExternalBuilderTargets(spec)
+
+ # TODO(jeanluc) File a bug to get rid of runas. We had in MSVS:
+ # has_run_as = _WriteMSVSUserFile(project.path, version, spec)
+
+ easy_xml.WriteXmlIfChanged(content, project.path, pretty=True, win32=True)
+
+ return missing_sources
+
+
+def _GetMSBuildExternalBuilderTargets(spec):
+ """Return a list of MSBuild targets for external builders.
+
+ The "Build" and "Clean" targets are always generated. If the spec contains
+ 'msvs_external_builder_clcompile_cmd', then the "ClCompile" target will also
+ be generated, to support building selected C/C++ files.
+
+ Arguments:
+ spec: The gyp target spec.
+ Returns:
+ List of MSBuild 'Target' specs.
+ """
+ build_cmd = _BuildCommandLineForRuleRaw(
+ spec, spec['msvs_external_builder_build_cmd'],
+ False, False, False, False)
+ build_target = ['Target', {'Name': 'Build'}]
+ build_target.append(['Exec', {'Command': build_cmd}])
+
+ clean_cmd = _BuildCommandLineForRuleRaw(
+ spec, spec['msvs_external_builder_clean_cmd'],
+ False, False, False, False)
+ clean_target = ['Target', {'Name': 'Clean'}]
+ clean_target.append(['Exec', {'Command': clean_cmd}])
+
+ targets = [build_target, clean_target]
+
+ if spec.get('msvs_external_builder_clcompile_cmd'):
+ clcompile_cmd = _BuildCommandLineForRuleRaw(
+ spec, spec['msvs_external_builder_clcompile_cmd'],
+ False, False, False, False)
+ clcompile_target = ['Target', {'Name': 'ClCompile'}]
+ clcompile_target.append(['Exec', {'Command': clcompile_cmd}])
+ targets.append(clcompile_target)
+
+ return targets
+
+
+def _GetMSBuildExtensions(props_files_of_rules):
+ extensions = ['ImportGroup', {'Label': 'ExtensionSettings'}]
+ for props_file in props_files_of_rules:
+ extensions.append(['Import', {'Project': props_file}])
+ return [extensions]
+
+
+def _GetMSBuildExtensionTargets(targets_files_of_rules):
+ targets_node = ['ImportGroup', {'Label': 'ExtensionTargets'}]
+ for targets_file in sorted(targets_files_of_rules):
+ targets_node.append(['Import', {'Project': targets_file}])
+ return [targets_node]
+
+
+def _GenerateActionsForMSBuild(spec, actions_to_add):
+ """Add actions accumulated into an actions_to_add, merging as needed.
+
+ Arguments:
+ spec: the target project dict
+ actions_to_add: dictionary keyed on input name, which maps to a list of
+ dicts describing the actions attached to that input file.
+
+ Returns:
+ A pair of (action specification, the sources handled by this action).
+ """
+ sources_handled_by_action = OrderedSet()
+ actions_spec = []
+ for primary_input, actions in actions_to_add.items():
+ inputs = OrderedSet()
+ outputs = OrderedSet()
+ descriptions = []
+ commands = []
+ for action in actions:
+ inputs.update(OrderedSet(action['inputs']))
+ outputs.update(OrderedSet(action['outputs']))
+ descriptions.append(action['description'])
+ cmd = action['command']
+ # For most actions, add 'call' so that actions that invoke batch files
+ # return and continue executing. msbuild_use_call provides a way to
+ # disable this but I have not seen any adverse effect from doing that
+ # for everything.
+ if action.get('msbuild_use_call', True):
+ cmd = 'call ' + cmd
+ commands.append(cmd)
+ # Add the custom build action for one input file.
+ description = ', and also '.join(descriptions)
+
+ # We can't join the commands simply with && because the command line will
+ # get too long. See also _AddActions: cygwin's setup_env mustn't be called
+ # for every invocation or the command that sets the PATH will grow too
+ # long.
+ command = '\r\n'.join([c + '\r\nif %errorlevel% neq 0 exit /b %errorlevel%'
+ for c in commands])
+ _AddMSBuildAction(spec,
+ primary_input,
+ inputs,
+ outputs,
+ command,
+ description,
+ sources_handled_by_action,
+ actions_spec)
+ return actions_spec, sources_handled_by_action
+
+
+def _AddMSBuildAction(spec, primary_input, inputs, outputs, cmd, description,
+ sources_handled_by_action, actions_spec):
+ command = MSVSSettings.ConvertVCMacrosToMSBuild(cmd)
+ primary_input = _FixPath(primary_input)
+ inputs_array = _FixPaths(inputs)
+ outputs_array = _FixPaths(outputs)
+ additional_inputs = ';'.join([i for i in inputs_array
+ if i != primary_input])
+ outputs = ';'.join(outputs_array)
+ sources_handled_by_action.add(primary_input)
+ action_spec = ['CustomBuild', {'Include': primary_input}]
+ action_spec.extend(
+ # TODO(jeanluc) 'Document' for all or just if as_sources?
+ [['FileType', 'Document'],
+ ['Command', command],
+ ['Message', description],
+ ['Outputs', outputs]
+ ])
+ if additional_inputs:
+ action_spec.append(['AdditionalInputs', additional_inputs])
+ actions_spec.append(action_spec)
diff --git a/third_party/python/gyp/pylib/gyp/generator/msvs_test.py b/third_party/python/gyp/pylib/gyp/generator/msvs_test.py
new file mode 100755
index 0000000000..838d236a2d
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/generator/msvs_test.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+""" Unit tests for the msvs.py file. """
+
+import gyp.generator.msvs as msvs
+import unittest
+try:
+ from StringIO import StringIO
+except ImportError:
+ from io import StringIO
+
+
+class TestSequenceFunctions(unittest.TestCase):
+
+ def setUp(self):
+ self.stderr = StringIO()
+
+ def test_GetLibraries(self):
+ self.assertEqual(
+ msvs._GetLibraries({}),
+ [])
+ self.assertEqual(
+ msvs._GetLibraries({'libraries': []}),
+ [])
+ self.assertEqual(
+ msvs._GetLibraries({'other':'foo', 'libraries': ['a.lib']}),
+ ['a.lib'])
+ self.assertEqual(
+ msvs._GetLibraries({'libraries': ['-la']}),
+ ['a.lib'])
+ self.assertEqual(
+ msvs._GetLibraries({'libraries': ['a.lib', 'b.lib', 'c.lib', '-lb.lib',
+ '-lb.lib', 'd.lib', 'a.lib']}),
+ ['c.lib', 'b.lib', 'd.lib', 'a.lib'])
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/python/gyp/pylib/gyp/generator/ninja.py b/third_party/python/gyp/pylib/gyp/generator/ninja.py
new file mode 100644
index 0000000000..3bcfe35292
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/generator/ninja.py
@@ -0,0 +1,2501 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+
+import collections
+import copy
+import hashlib
+import json
+import multiprocessing
+import os.path
+import re
+import signal
+import subprocess
+import sys
+import six
+import gyp
+import gyp.common
+from gyp.common import OrderedSet
+import gyp.msvs_emulation
+import gyp.MSVSUtil as MSVSUtil
+import gyp.xcode_emulation
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from io import StringIO
+
+from gyp.common import GetEnvironFallback
+import gyp.ninja_syntax as ninja_syntax
+
+generator_default_variables = {
+ 'EXECUTABLE_PREFIX': '',
+ 'EXECUTABLE_SUFFIX': '',
+ 'STATIC_LIB_PREFIX': 'lib',
+ 'STATIC_LIB_SUFFIX': '.a',
+ 'SHARED_LIB_PREFIX': 'lib',
+
+ # Gyp expects the following variables to be expandable by the build
+ # system to the appropriate locations. Ninja prefers paths to be
+ # known at gyp time. To resolve this, introduce special
+ # variables starting with $! and $| (which begin with a $ so gyp knows it
+ # should be treated specially, but is otherwise an invalid
+ # ninja/shell variable) that are passed to gyp here but expanded
+ # before writing out into the target .ninja files; see
+ # ExpandSpecial.
+ # $! is used for variables that represent a path and that can only appear at
+ # the start of a string, while $| is used for variables that can appear
+ # anywhere in a string.
+ 'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
+ 'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
+ 'PRODUCT_DIR': '$!PRODUCT_DIR',
+ 'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
+
+ # Special variables that may be used by gyp 'rule' targets.
+ # We generate definitions for these variables on the fly when processing a
+ # rule.
+ 'RULE_INPUT_ROOT': '${root}',
+ 'RULE_INPUT_DIRNAME': '${dirname}',
+ 'RULE_INPUT_PATH': '${source}',
+ 'RULE_INPUT_EXT': '${ext}',
+ 'RULE_INPUT_NAME': '${name}',
+}
+
+# Placates pylint.
+generator_additional_non_configuration_keys = []
+generator_additional_path_sections = []
+generator_extra_sources_for_rules = []
+generator_filelist_paths = None
+
+generator_supports_multiple_toolsets = gyp.common.CrossCompileRequested()
+
+def StripPrefix(arg, prefix):
+ if arg.startswith(prefix):
+ return arg[len(prefix):]
+ return arg
+
+
+def QuoteShellArgument(arg, flavor):
+ """Quote a string such that it will be interpreted as a single argument
+ by the shell."""
+ # Rather than attempting to enumerate the bad shell characters, just
+ # whitelist common OK ones and quote anything else.
+ if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
+ return arg # No quoting necessary.
+ if flavor == 'win':
+ return gyp.msvs_emulation.QuoteForRspFile(arg)
+ return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
+
+
+def Define(d, flavor):
+ """Takes a preprocessor define and returns a -D parameter that's ninja- and
+ shell-escaped."""
+ if flavor == 'win':
+ # cl.exe replaces literal # characters with = in preprocesor definitions for
+ # some reason. Octal-encode to work around that.
+ d = d.replace('#', '\\%03o' % ord('#'))
+ return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
+
+
+def AddArch(output, arch):
+ """Adds an arch string to an output path."""
+ output, extension = os.path.splitext(output)
+ return '%s.%s%s' % (output, arch, extension)
+
+
+class Target(object):
+ """Target represents the paths used within a single gyp target.
+
+ Conceptually, building a single target A is a series of steps:
+
+ 1) actions/rules/copies generates source/resources/etc.
+ 2) compiles generates .o files
+ 3) link generates a binary (library/executable)
+ 4) bundle merges the above in a mac bundle
+
+ (Any of these steps can be optional.)
+
+ From a build ordering perspective, a dependent target B could just
+ depend on the last output of this series of steps.
+
+ But some dependent commands sometimes need to reach inside the box.
+ For example, when linking B it needs to get the path to the static
+ library generated by A.
+
+ This object stores those paths. To keep things simple, member
+ variables only store concrete paths to single files, while methods
+ compute derived values like "the last output of the target".
+ """
+ def __init__(self, type):
+ # Gyp type ("static_library", etc.) of this target.
+ self.type = type
+ # File representing whether any input dependencies necessary for
+ # dependent actions have completed.
+ self.preaction_stamp = None
+ # File representing whether any input dependencies necessary for
+ # dependent compiles have completed.
+ self.precompile_stamp = None
+ # File representing the completion of actions/rules/copies, if any.
+ self.actions_stamp = None
+ # Path to the output of the link step, if any.
+ self.binary = None
+ # Path to the file representing the completion of building the bundle,
+ # if any.
+ self.bundle = None
+ # On Windows, incremental linking requires linking against all the .objs
+ # that compose a .lib (rather than the .lib itself). That list is stored
+ # here. In this case, we also need to save the compile_deps for the target,
+ # so that the the target that directly depends on the .objs can also depend
+ # on those.
+ self.component_objs = None
+ self.compile_deps = None
+ # Windows only. The import .lib is the output of a build step, but
+ # because dependents only link against the lib (not both the lib and the
+ # dll) we keep track of the import library here.
+ self.import_lib = None
+ # Track if this target contains any C++ files, to decide if gcc or g++
+ # should be used for linking.
+ self.uses_cpp = False
+
+ def Linkable(self):
+ """Return true if this is a target that can be linked against."""
+ return self.type in ('static_library', 'shared_library')
+
+ def UsesToc(self, flavor):
+ """Return true if the target should produce a restat rule based on a TOC
+ file."""
+ # For bundles, the .TOC should be produced for the binary, not for
+ # FinalOutput(). But the naive approach would put the TOC file into the
+ # bundle, so don't do this for bundles for now.
+ if flavor == 'win' or self.bundle:
+ return False
+ return self.type in ('shared_library', 'loadable_module')
+
+ def PreActionInput(self, flavor):
+ """Return the path, if any, that should be used as a dependency of
+ any dependent action step."""
+ if self.UsesToc(flavor):
+ return self.FinalOutput() + '.TOC'
+ return self.FinalOutput() or self.preaction_stamp
+
+ def PreCompileInput(self):
+ """Return the path, if any, that should be used as a dependency of
+ any dependent compile step."""
+ return self.actions_stamp or self.precompile_stamp
+
+ def FinalOutput(self):
+ """Return the last output of the target, which depends on all prior
+ steps."""
+ return self.bundle or self.binary or self.actions_stamp
+
+
+# A small discourse on paths as used within the Ninja build:
+# All files we produce (both at gyp and at build time) appear in the
+# build directory (e.g. out/Debug).
+#
+# Paths within a given .gyp file are always relative to the directory
+# containing the .gyp file. Call these "gyp paths". This includes
+# sources as well as the starting directory a given gyp rule/action
+# expects to be run from. We call the path from the source root to
+# the gyp file the "base directory" within the per-.gyp-file
+# NinjaWriter code.
+#
+# All paths as written into the .ninja files are relative to the build
+# directory. Call these paths "ninja paths".
+#
+# We translate between these two notions of paths with two helper
+# functions:
+#
+# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
+# into the equivalent ninja path.
+#
+# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
+# an output file; the result can be namespaced such that it is unique
+# to the input file name as well as the output target name.
+
+class NinjaWriter(object):
+ def __init__(self, hash_for_rules, target_outputs, base_dir, build_dir,
+ output_file, toplevel_build, output_file_name, flavor,
+ toplevel_dir=None):
+ """
+ base_dir: path from source root to directory containing this gyp file,
+ by gyp semantics, all input paths are relative to this
+ build_dir: path from source root to build output
+ toplevel_dir: path to the toplevel directory
+ """
+
+ self.hash_for_rules = hash_for_rules
+ self.target_outputs = target_outputs
+ self.base_dir = base_dir
+ self.build_dir = build_dir
+ self.ninja = ninja_syntax.Writer(output_file)
+ self.toplevel_build = toplevel_build
+ self.output_file_name = output_file_name
+
+ self.flavor = flavor
+ self.abs_build_dir = None
+ if toplevel_dir is not None:
+ self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
+ build_dir))
+ self.obj_ext = '.obj' if flavor == 'win' else '.o'
+ if flavor == 'win':
+ # See docstring of msvs_emulation.GenerateEnvironmentFiles().
+ self.win_env = {}
+ for arch in ('x86', 'x64'):
+ self.win_env[arch] = 'environment.' + arch
+
+ # Relative path from build output dir to base dir.
+ build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
+ self.build_to_base = os.path.join(build_to_top, base_dir)
+ # Relative path from base dir to build dir.
+ base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
+ self.base_to_build = os.path.join(base_to_top, build_dir)
+
+ def ExpandSpecial(self, path, product_dir=None):
+ """Expand specials like $!PRODUCT_DIR in |path|.
+
+ If |product_dir| is None, assumes the cwd is already the product
+ dir. Otherwise, |product_dir| is the relative path to the product
+ dir.
+ """
+
+ PRODUCT_DIR = '$!PRODUCT_DIR'
+ if PRODUCT_DIR in path:
+ if product_dir:
+ path = path.replace(PRODUCT_DIR, product_dir)
+ else:
+ path = path.replace(PRODUCT_DIR + '/', '')
+ path = path.replace(PRODUCT_DIR + '\\', '')
+ path = path.replace(PRODUCT_DIR, '.')
+
+ INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
+ if INTERMEDIATE_DIR in path:
+ int_dir = self.GypPathToUniqueOutput('gen')
+ # GypPathToUniqueOutput generates a path relative to the product dir,
+ # so insert product_dir in front if it is provided.
+ path = path.replace(INTERMEDIATE_DIR,
+ os.path.join(product_dir or '', int_dir))
+
+ CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
+ path = path.replace(CONFIGURATION_NAME, self.config_name)
+
+ return path
+
+ def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
+ if self.flavor == 'win':
+ path = self.msvs_settings.ConvertVSMacros(
+ path, config=self.config_name)
+ path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
+ path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
+ dirname)
+ path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
+ path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
+ path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
+ return path
+
+ def GypPathToNinja(self, path, env=None):
+ """Translate a gyp path to a ninja path, optionally expanding environment
+ variable references in |path| with |env|.
+
+ See the above discourse on path conversions."""
+ if env:
+ if self.flavor == 'mac':
+ path = gyp.xcode_emulation.ExpandEnvVars(path, env)
+ elif self.flavor == 'win':
+ path = gyp.msvs_emulation.ExpandMacros(path, env)
+ if path.startswith('$!'):
+ expanded = self.ExpandSpecial(path)
+ if self.flavor == 'win':
+ expanded = os.path.normpath(expanded)
+ return expanded
+ if '$|' in path:
+ path = self.ExpandSpecial(path)
+ assert '$' not in path, path
+ return os.path.normpath(os.path.join(self.build_to_base, path))
+
+ def GypPathToUniqueOutput(self, path, qualified=True):
+ """Translate a gyp path to a ninja path for writing output.
+
+ If qualified is True, qualify the resulting filename with the name
+ of the target. This is necessary when e.g. compiling the same
+ path twice for two separate output targets.
+
+ See the above discourse on path conversions."""
+
+ path = self.ExpandSpecial(path)
+ assert not path.startswith('$'), path
+
+ # Translate the path following this scheme:
+ # Input: foo/bar.gyp, target targ, references baz/out.o
+ # Output: obj/foo/baz/targ.out.o (if qualified)
+ # obj/foo/baz/out.o (otherwise)
+ # (and obj.host instead of obj for cross-compiles)
+ #
+ # Why this scheme and not some other one?
+ # 1) for a given input, you can compute all derived outputs by matching
+ # its path, even if the input is brought via a gyp file with '..'.
+ # 2) simple files like libraries and stamps have a simple filename.
+
+ obj = 'obj'
+ if self.toolset != 'target':
+ obj += '.' + self.toolset
+
+ path_dir, path_basename = os.path.split(path)
+ assert not os.path.isabs(path_dir), (
+ "'%s' can not be absolute path (see crbug.com/462153)." % path_dir)
+
+ if qualified:
+ path_basename = self.name + '.' + path_basename
+ return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
+ path_basename))
+
+ def WriteCollapsedDependencies(self, name, targets, order_only=None):
+ """Given a list of targets, return a path for a single file
+ representing the result of building all the targets or None.
+
+ Uses a stamp file if necessary."""
+
+ assert targets == [t for t in targets if t], targets
+ if len(targets) == 0:
+ assert not order_only
+ return None
+ if len(targets) > 1 or order_only:
+ stamp = self.GypPathToUniqueOutput(name + '.stamp')
+ targets = self.ninja.build(stamp, 'stamp', targets, order_only=order_only)
+ self.ninja.newline()
+ return targets[0]
+
+ def _SubninjaNameForArch(self, arch):
+ output_file_base = os.path.splitext(self.output_file_name)[0]
+ return '%s.%s.ninja' % (output_file_base, arch)
+
+ def WriteSpec(self, spec, config_name, generator_flags):
+ """The main entry point for NinjaWriter: write the build rules for a spec.
+
+ Returns a Target object, which represents the output paths for this spec.
+ Returns None if there are no outputs (e.g. a settings-only 'none' type
+ target)."""
+
+ self.config_name = config_name
+ self.name = spec['target_name']
+ self.toolset = spec['toolset']
+ config = spec['configurations'][config_name]
+ self.target = Target(spec['type'])
+ self.is_standalone_static_library = bool(
+ spec.get('standalone_static_library', 0))
+
+ self.target_rpath = generator_flags.get('target_rpath', r'\$$ORIGIN/lib/')
+
+ self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
+ self.xcode_settings = self.msvs_settings = None
+ if self.flavor == 'mac':
+ self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
+ mac_toolchain_dir = generator_flags.get('mac_toolchain_dir', None)
+ if mac_toolchain_dir:
+ self.xcode_settings.mac_toolchain_dir = mac_toolchain_dir
+
+ if self.flavor == 'win':
+ self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
+ generator_flags)
+ arch = self.msvs_settings.GetArch(config_name)
+ self.ninja.variable('arch', self.win_env[arch])
+ self.ninja.variable('cc', '$cl_' + arch)
+ self.ninja.variable('cxx', '$cl_' + arch)
+ self.ninja.variable('cc_host', '$cl_' + arch)
+ self.ninja.variable('cxx_host', '$cl_' + arch)
+ self.ninja.variable('asm', '$ml_' + arch)
+
+ if self.flavor == 'mac':
+ self.archs = self.xcode_settings.GetActiveArchs(config_name)
+ if len(self.archs) > 1:
+ self.arch_subninjas = dict(
+ (arch, ninja_syntax.Writer(
+ OpenOutput(os.path.join(self.toplevel_build,
+ self._SubninjaNameForArch(arch)),
+ 'w')))
+ for arch in self.archs)
+
+ # Compute predepends for all rules.
+ # actions_depends is the dependencies this target depends on before running
+ # any of its action/rule/copy steps.
+ # compile_depends is the dependencies this target depends on before running
+ # any of its compile steps.
+ actions_depends = []
+ compile_depends = []
+ # TODO(evan): it is rather confusing which things are lists and which
+ # are strings. Fix these.
+ if 'dependencies' in spec:
+ for dep in spec['dependencies']:
+ if dep in self.target_outputs:
+ target = self.target_outputs[dep]
+ actions_depends.append(target.PreActionInput(self.flavor))
+ compile_depends.append(target.PreCompileInput())
+ if target.uses_cpp:
+ self.target.uses_cpp = True
+ actions_depends = [d for d in actions_depends if d]
+ compile_depends = [d for d in compile_depends if d]
+ actions_depends = self.WriteCollapsedDependencies('actions_depends',
+ actions_depends)
+ compile_depends = self.WriteCollapsedDependencies('compile_depends',
+ compile_depends)
+ self.target.preaction_stamp = actions_depends
+ self.target.precompile_stamp = compile_depends
+
+ # Write out actions, rules, and copies. These must happen before we
+ # compile any sources, so compute a list of predependencies for sources
+ # while we do it.
+ extra_sources = []
+ mac_bundle_depends = []
+ self.target.actions_stamp = self.WriteActionsRulesCopies(
+ spec, extra_sources, actions_depends, mac_bundle_depends)
+
+ # If we have actions/rules/copies, we depend directly on those, but
+ # otherwise we depend on dependent target's actions/rules/copies etc.
+ # We never need to explicitly depend on previous target's link steps,
+ # because no compile ever depends on them.
+ compile_depends_stamp = (self.target.actions_stamp or compile_depends)
+
+ # Write out the compilation steps, if any.
+ link_deps = []
+ try:
+ sources = extra_sources + spec.get('sources', [])
+ except TypeError:
+ print('extra_sources: ', str(extra_sources))
+ print('spec.get("sources"): ', str(spec.get('sources')))
+ raise
+ if sources:
+ if self.flavor == 'mac' and len(self.archs) > 1:
+ # Write subninja file containing compile and link commands scoped to
+ # a single arch if a fat binary is being built.
+ for arch in self.archs:
+ self.ninja.subninja(self._SubninjaNameForArch(arch))
+
+ pch = None
+ if self.flavor == 'win':
+ gyp.msvs_emulation.VerifyMissingSources(
+ sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
+ pch = gyp.msvs_emulation.PrecompiledHeader(
+ self.msvs_settings, config_name, self.GypPathToNinja,
+ self.GypPathToUniqueOutput, self.obj_ext)
+ else:
+ pch = gyp.xcode_emulation.MacPrefixHeader(
+ self.xcode_settings, self.GypPathToNinja,
+ lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
+ link_deps = self.WriteSources(
+ self.ninja, config_name, config, sources, compile_depends_stamp, pch,
+ spec)
+ # Some actions/rules output 'sources' that are already object files.
+ obj_outputs = [f for f in sources if f.endswith(self.obj_ext)]
+ if obj_outputs:
+ if self.flavor != 'mac' or len(self.archs) == 1:
+ link_deps += [self.GypPathToNinja(o) for o in obj_outputs]
+ else:
+ print("Warning: Actions/rules writing object files don't work with " \
+ "multiarch targets, dropping. (target %s)" %
+ spec['target_name'])
+ elif self.flavor == 'mac' and len(self.archs) > 1:
+ link_deps = collections.defaultdict(list)
+
+ compile_deps = self.target.actions_stamp or actions_depends
+ if self.flavor == 'win' and self.target.type == 'static_library':
+ self.target.component_objs = link_deps
+ self.target.compile_deps = compile_deps
+
+ # Write out a link step, if needed.
+ output = None
+ is_empty_bundle = not link_deps and not mac_bundle_depends
+ if link_deps or self.target.actions_stamp or actions_depends:
+ output = self.WriteTarget(spec, config_name, config, link_deps,
+ compile_deps)
+ if self.is_mac_bundle:
+ mac_bundle_depends.append(output)
+
+ # Bundle all of the above together, if needed.
+ if self.is_mac_bundle:
+ output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle)
+
+ if not output:
+ return None
+
+ assert self.target.FinalOutput(), output
+ return self.target
+
+ def _WinIdlRule(self, source, prebuild, outputs):
+ """Handle the implicit VS .idl rule for one source file. Fills |outputs|
+ with files that are generated."""
+ outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
+ source, self.config_name)
+ outdir = self.GypPathToNinja(outdir)
+ def fix_path(path, rel=None):
+ path = os.path.join(outdir, path)
+ dirname, basename = os.path.split(source)
+ root, ext = os.path.splitext(basename)
+ path = self.ExpandRuleVariables(
+ path, root, dirname, source, ext, basename)
+ if rel:
+ path = os.path.relpath(path, rel)
+ return path
+ vars = [(name, fix_path(value, outdir)) for name, value in vars]
+ output = [fix_path(p) for p in output]
+ vars.append(('outdir', outdir))
+ vars.append(('idlflags', flags))
+ input = self.GypPathToNinja(source)
+ self.ninja.build(output, 'idl', input,
+ variables=vars, order_only=prebuild)
+ outputs.extend(output)
+
+ def WriteWinIdlFiles(self, spec, prebuild):
+ """Writes rules to match MSVS's implicit idl handling."""
+ assert self.flavor == 'win'
+ if self.msvs_settings.HasExplicitIdlRulesOrActions(spec):
+ return []
+ outputs = []
+ for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
+ self._WinIdlRule(source, prebuild, outputs)
+ return outputs
+
+ def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
+ mac_bundle_depends):
+ """Write out the Actions, Rules, and Copies steps. Return a path
+ representing the outputs of these steps."""
+ outputs = []
+ if self.is_mac_bundle:
+ mac_bundle_resources = spec.get('mac_bundle_resources', [])[:]
+ else:
+ mac_bundle_resources = []
+ extra_mac_bundle_resources = []
+
+ if 'actions' in spec:
+ outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
+ extra_mac_bundle_resources)
+ if 'rules' in spec:
+ outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
+ mac_bundle_resources,
+ extra_mac_bundle_resources)
+ if 'copies' in spec:
+ outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
+
+ if 'sources' in spec and self.flavor == 'win':
+ outputs += self.WriteWinIdlFiles(spec, prebuild)
+
+ if self.xcode_settings and self.xcode_settings.IsIosFramework():
+ self.WriteiOSFrameworkHeaders(spec, outputs, prebuild)
+
+ stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
+
+ if self.is_mac_bundle:
+ xcassets = self.WriteMacBundleResources(
+ extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends)
+ partial_info_plist = self.WriteMacXCassets(xcassets, mac_bundle_depends)
+ self.WriteMacInfoPlist(partial_info_plist, mac_bundle_depends)
+
+ return stamp
+
+ def GenerateDescription(self, verb, message, fallback):
+ """Generate and return a description of a build step.
+
+ |verb| is the short summary, e.g. ACTION or RULE.
+ |message| is a hand-written description, or None if not available.
+ |fallback| is the gyp-level name of the step, usable as a fallback.
+ """
+ if self.toolset != 'target':
+ verb += '(%s)' % self.toolset
+ if message:
+ return '%s %s' % (verb, self.ExpandSpecial(message))
+ else:
+ return '%s %s: %s' % (verb, self.name, fallback)
+
+ def WriteActions(self, actions, extra_sources, prebuild,
+ extra_mac_bundle_resources):
+ # Actions cd into the base directory.
+ env = self.GetToolchainEnv()
+ all_outputs = []
+ for action in actions:
+ # First write out a rule for the action.
+ name = '%s_%s' % (action['action_name'], self.hash_for_rules)
+ description = self.GenerateDescription('ACTION',
+ action.get('message', None),
+ name)
+ is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
+ if self.flavor == 'win' else False)
+ args = action['action']
+ depfile = action.get('depfile', None)
+ if depfile:
+ depfile = self.ExpandSpecial(depfile)
+ pool = 'console' if int(action.get('ninja_use_console', 0)) else None
+ rule_name, _ = self.WriteNewNinjaRule(name, args, description,
+ is_cygwin, env, pool,
+ depfile=depfile)
+
+ inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
+ if int(action.get('process_outputs_as_sources', False)):
+ extra_sources += action['outputs']
+ if int(action.get('process_outputs_as_mac_bundle_resources', False)):
+ extra_mac_bundle_resources += action['outputs']
+ outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
+
+ # Then write out an edge using the rule.
+ self.ninja.build(outputs, rule_name, inputs,
+ order_only=prebuild)
+ all_outputs += outputs
+
+ self.ninja.newline()
+
+ return all_outputs
+
+ def WriteRules(self, rules, extra_sources, prebuild,
+ mac_bundle_resources, extra_mac_bundle_resources):
+ env = self.GetToolchainEnv()
+ all_outputs = []
+ for rule in rules:
+ # Skip a rule with no action and no inputs.
+ if 'action' not in rule and not rule.get('rule_sources', []):
+ continue
+
+ # First write out a rule for the rule action.
+ name = '%s_%s' % (rule['rule_name'], self.hash_for_rules)
+
+ args = rule['action']
+ description = self.GenerateDescription(
+ 'RULE',
+ rule.get('message', None),
+ ('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
+ is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
+ if self.flavor == 'win' else False)
+ pool = 'console' if int(rule.get('ninja_use_console', 0)) else None
+ rule_name, args = self.WriteNewNinjaRule(
+ name, args, description, is_cygwin, env, pool)
+
+ # TODO: if the command references the outputs directly, we should
+ # simplify it to just use $out.
+
+ # Rules can potentially make use of some special variables which
+ # must vary per source file.
+ # Compute the list of variables we'll need to provide.
+ special_locals = ('source', 'root', 'dirname', 'ext', 'name')
+ needed_variables = set(['source'])
+ for argument in args:
+ for var in special_locals:
+ if '${%s}' % var in argument:
+ needed_variables.add(var)
+ needed_variables = sorted(needed_variables)
+
+ def cygwin_munge(path):
+ # pylint: disable=cell-var-from-loop
+ if is_cygwin:
+ return path.replace('\\', '/')
+ return path
+
+ inputs = [self.GypPathToNinja(i, env) for i in rule.get('inputs', [])]
+
+ # If there are n source files matching the rule, and m additional rule
+ # inputs, then adding 'inputs' to each build edge written below will
+ # write m * n inputs. Collapsing reduces this to m + n.
+ sources = rule.get('rule_sources', [])
+ num_inputs = len(inputs)
+ if prebuild:
+ num_inputs += 1
+ if num_inputs > 2 and len(sources) > 2:
+ inputs = [self.WriteCollapsedDependencies(
+ rule['rule_name'], inputs, order_only=prebuild)]
+ prebuild = []
+
+ # For each source file, write an edge that generates all the outputs.
+ for source in sources:
+ source = os.path.normpath(source)
+ dirname, basename = os.path.split(source)
+ root, ext = os.path.splitext(basename)
+
+ # Gather the list of inputs and outputs, expanding $vars if possible.
+ outputs = [self.ExpandRuleVariables(o, root, dirname,
+ source, ext, basename)
+ for o in rule['outputs']]
+
+ if int(rule.get('process_outputs_as_sources', False)):
+ extra_sources += outputs
+
+ was_mac_bundle_resource = source in mac_bundle_resources
+ if was_mac_bundle_resource or \
+ int(rule.get('process_outputs_as_mac_bundle_resources', False)):
+ extra_mac_bundle_resources += outputs
+ # Note: This is n_resources * n_outputs_in_rule. Put to-be-removed
+ # items in a set and remove them all in a single pass if this becomes
+ # a performance issue.
+ if was_mac_bundle_resource:
+ mac_bundle_resources.remove(source)
+
+ extra_bindings = []
+ for var in needed_variables:
+ if var == 'root':
+ extra_bindings.append(('root', cygwin_munge(root)))
+ elif var == 'dirname':
+ # '$dirname' is a parameter to the rule action, which means
+ # it shouldn't be converted to a Ninja path. But we don't
+ # want $!PRODUCT_DIR in there either.
+ dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
+ extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
+ elif var == 'source':
+ # '$source' is a parameter to the rule action, which means
+ # it shouldn't be converted to a Ninja path. But we don't
+ # want $!PRODUCT_DIR in there either.
+ source_expanded = self.ExpandSpecial(source, self.base_to_build)
+ extra_bindings.append(('source', cygwin_munge(source_expanded)))
+ elif var == 'ext':
+ extra_bindings.append(('ext', ext))
+ elif var == 'name':
+ extra_bindings.append(('name', cygwin_munge(basename)))
+ else:
+ assert var == None, repr(var)
+
+ outputs = [self.GypPathToNinja(o, env) for o in outputs]
+ if self.flavor == 'win':
+ # WriteNewNinjaRule uses unique_name for creating an rsp file on win.
+ extra_bindings.append(('unique_name',
+ hashlib.md5(six.ensure_binary(outputs[0])).hexdigest()))
+
+ self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
+ implicit=inputs,
+ order_only=prebuild,
+ variables=extra_bindings)
+
+ all_outputs.extend(outputs)
+
+ return all_outputs
+
+ def WriteCopies(self, copies, prebuild, mac_bundle_depends):
+ outputs = []
+ if self.xcode_settings:
+ extra_env = self.xcode_settings.GetPerTargetSettings()
+ env = self.GetToolchainEnv(additional_settings=extra_env)
+ else:
+ env = self.GetToolchainEnv()
+ for copy in copies:
+ for path in copy['files']:
+ # Normalize the path so trailing slashes don't confuse us.
+ path = os.path.normpath(path)
+ basename = os.path.split(path)[1]
+ src = self.GypPathToNinja(path, env)
+ dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
+ env)
+ outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
+ if self.is_mac_bundle:
+ # gyp has mac_bundle_resources to copy things into a bundle's
+ # Resources folder, but there's no built-in way to copy files to other
+ # places in the bundle. Hence, some targets use copies for this. Check
+ # if this file is copied into the current bundle, and if so add it to
+ # the bundle depends so that dependent targets get rebuilt if the copy
+ # input changes.
+ if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
+ mac_bundle_depends.append(dst)
+
+ return outputs
+
+ def WriteiOSFrameworkHeaders(self, spec, outputs, prebuild):
+ """Prebuild steps to generate hmap files and copy headers to destination."""
+ framework = self.ComputeMacBundleOutput()
+ all_sources = spec['sources']
+ copy_headers = spec['mac_framework_headers']
+ output = self.GypPathToUniqueOutput('headers.hmap')
+ self.xcode_settings.header_map_path = output
+ all_headers = map(self.GypPathToNinja,
+ filter(lambda x:x.endswith(('.h')), all_sources))
+ variables = [('framework', framework),
+ ('copy_headers', map(self.GypPathToNinja, copy_headers))]
+ outputs.extend(self.ninja.build(
+ output, 'compile_ios_framework_headers', all_headers,
+ variables=variables, order_only=prebuild))
+
+ def WriteMacBundleResources(self, resources, bundle_depends):
+ """Writes ninja edges for 'mac_bundle_resources'."""
+ xcassets = []
+
+ extra_env = self.xcode_settings.GetPerTargetSettings()
+ env = self.GetSortedXcodeEnv(additional_settings=extra_env)
+ env = self.ComputeExportEnvString(env)
+ isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
+
+ for output, res in gyp.xcode_emulation.GetMacBundleResources(
+ generator_default_variables['PRODUCT_DIR'],
+ self.xcode_settings, map(self.GypPathToNinja, resources)):
+ output = self.ExpandSpecial(output)
+ if os.path.splitext(output)[-1] != '.xcassets':
+ self.ninja.build(output, 'mac_tool', res,
+ variables=[('mactool_cmd', 'copy-bundle-resource'), \
+ ('env', env), ('binary', isBinary)])
+ bundle_depends.append(output)
+ else:
+ xcassets.append(res)
+ return xcassets
+
+ def WriteMacXCassets(self, xcassets, bundle_depends):
+ """Writes ninja edges for 'mac_bundle_resources' .xcassets files.
+
+ This add an invocation of 'actool' via the 'mac_tool.py' helper script.
+ It assumes that the assets catalogs define at least one imageset and
+ thus an Assets.car file will be generated in the application resources
+ directory. If this is not the case, then the build will probably be done
+ at each invocation of ninja."""
+ if not xcassets:
+ return
+
+ extra_arguments = {}
+ settings_to_arg = {
+ 'XCASSETS_APP_ICON': 'app-icon',
+ 'XCASSETS_LAUNCH_IMAGE': 'launch-image',
+ }
+ settings = self.xcode_settings.xcode_settings[self.config_name]
+ for settings_key, arg_name in settings_to_arg.items():
+ value = settings.get(settings_key)
+ if value:
+ extra_arguments[arg_name] = value
+
+ partial_info_plist = None
+ if extra_arguments:
+ partial_info_plist = self.GypPathToUniqueOutput(
+ 'assetcatalog_generated_info.plist')
+ extra_arguments['output-partial-info-plist'] = partial_info_plist
+
+ outputs = []
+ outputs.append(
+ os.path.join(
+ self.xcode_settings.GetBundleResourceFolder(),
+ 'Assets.car'))
+ if partial_info_plist:
+ outputs.append(partial_info_plist)
+
+ keys = QuoteShellArgument(json.dumps(extra_arguments), self.flavor)
+ extra_env = self.xcode_settings.GetPerTargetSettings()
+ env = self.GetSortedXcodeEnv(additional_settings=extra_env)
+ env = self.ComputeExportEnvString(env)
+
+ bundle_depends.extend(self.ninja.build(
+ outputs, 'compile_xcassets', xcassets,
+ variables=[('env', env), ('keys', keys)]))
+ return partial_info_plist
+
+ def WriteMacInfoPlist(self, partial_info_plist, bundle_depends):
+ """Write build rules for bundle Info.plist files."""
+ info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
+ generator_default_variables['PRODUCT_DIR'],
+ self.xcode_settings, self.GypPathToNinja)
+ if not info_plist:
+ return
+ out = self.ExpandSpecial(out)
+ if defines:
+ # Create an intermediate file to store preprocessed results.
+ intermediate_plist = self.GypPathToUniqueOutput(
+ os.path.basename(info_plist))
+ defines = ' '.join([Define(d, self.flavor) for d in defines])
+ info_plist = self.ninja.build(
+ intermediate_plist, 'preprocess_infoplist', info_plist,
+ variables=[('defines',defines)])
+
+ env = self.GetSortedXcodeEnv(additional_settings=extra_env)
+ env = self.ComputeExportEnvString(env)
+
+ if partial_info_plist:
+ intermediate_plist = self.GypPathToUniqueOutput('merged_info.plist')
+ info_plist = self.ninja.build(
+ intermediate_plist, 'merge_infoplist',
+ [partial_info_plist, info_plist])
+
+ keys = self.xcode_settings.GetExtraPlistItems(self.config_name)
+ keys = QuoteShellArgument(json.dumps(keys), self.flavor)
+ isBinary = self.xcode_settings.IsBinaryOutputFormat(self.config_name)
+ self.ninja.build(out, 'copy_infoplist', info_plist,
+ variables=[('env', env), ('keys', keys),
+ ('binary', isBinary)])
+ bundle_depends.append(out)
+
+ def WriteSources(self, ninja_file, config_name, config, sources, predepends,
+ precompiled_header, spec):
+ """Write build rules to compile all of |sources|."""
+ if self.toolset == 'host':
+ self.ninja.variable('ar', '$ar_host')
+ self.ninja.variable('cc', '$cc_host')
+ self.ninja.variable('cxx', '$cxx_host')
+ self.ninja.variable('ld', '$ld_host')
+ self.ninja.variable('ldxx', '$ldxx_host')
+ self.ninja.variable('nm', '$nm_host')
+ self.ninja.variable('readelf', '$readelf_host')
+
+ if self.flavor != 'mac' or len(self.archs) == 1:
+ return self.WriteSourcesForArch(
+ self.ninja, config_name, config, sources, predepends,
+ precompiled_header, spec)
+ else:
+ return dict((arch, self.WriteSourcesForArch(
+ self.arch_subninjas[arch], config_name, config, sources, predepends,
+ precompiled_header, spec, arch=arch))
+ for arch in self.archs)
+
+ def WriteSourcesForArch(self, ninja_file, config_name, config, sources,
+ predepends, precompiled_header, spec, arch=None):
+ """Write build rules to compile all of |sources|."""
+
+ extra_defines = []
+ if self.flavor == 'mac':
+ cflags = self.xcode_settings.GetCflags(config_name, arch=arch)
+ cflags_c = self.xcode_settings.GetCflagsC(config_name)
+ cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
+ cflags_objc = ['$cflags_c'] + \
+ self.xcode_settings.GetCflagsObjC(config_name)
+ cflags_objcc = ['$cflags_cc'] + \
+ self.xcode_settings.GetCflagsObjCC(config_name)
+ elif self.flavor == 'win':
+ asmflags = self.msvs_settings.GetAsmflags(config_name)
+ cflags = self.msvs_settings.GetCflags(config_name)
+ cflags_c = self.msvs_settings.GetCflagsC(config_name)
+ cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
+ extra_defines = self.msvs_settings.GetComputedDefines(config_name)
+ # See comment at cc_command for why there's two .pdb files.
+ pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName(
+ config_name, self.ExpandSpecial)
+ if not pdbpath_c:
+ obj = 'obj'
+ if self.toolset != 'target':
+ obj += '.' + self.toolset
+ pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name))
+ pdbpath_c = pdbpath + '.c.pdb'
+ pdbpath_cc = pdbpath + '.cc.pdb'
+ self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c])
+ self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc])
+ self.WriteVariableList(ninja_file, 'pchprefix', [self.name])
+ else:
+ cflags = config.get('cflags', [])
+ cflags_c = config.get('cflags_c', [])
+ cflags_cc = config.get('cflags_cc', [])
+
+ # Respect environment variables related to build, but target-specific
+ # flags can still override them.
+ if self.toolset == 'target':
+ cflags_c = (os.environ.get('CPPFLAGS', '').split() +
+ os.environ.get('CFLAGS', '').split() + cflags_c)
+ cflags_cc = (os.environ.get('CPPFLAGS', '').split() +
+ os.environ.get('CXXFLAGS', '').split() + cflags_cc)
+ elif self.toolset == 'host':
+ cflags_c = (os.environ.get('CPPFLAGS_host', '').split() +
+ os.environ.get('CFLAGS_host', '').split() + cflags_c)
+ cflags_cc = (os.environ.get('CPPFLAGS_host', '').split() +
+ os.environ.get('CXXFLAGS_host', '').split() + cflags_cc)
+
+ defines = config.get('defines', []) + extra_defines
+ self.WriteVariableList(ninja_file, 'defines',
+ [Define(d, self.flavor) for d in defines])
+ if self.flavor == 'win':
+ self.WriteVariableList(ninja_file, 'asmflags',
+ map(self.ExpandSpecial, asmflags))
+ self.WriteVariableList(ninja_file, 'rcflags',
+ [QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
+ for f in self.msvs_settings.GetRcflags(config_name,
+ self.GypPathToNinja)])
+
+ include_dirs = config.get('include_dirs', [])
+
+ env = self.GetToolchainEnv()
+ if self.flavor == 'win':
+ include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
+ config_name)
+ self.WriteVariableList(ninja_file, 'includes',
+ [QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
+ for i in include_dirs])
+
+ if self.flavor == 'win':
+ midl_include_dirs = config.get('midl_include_dirs', [])
+ midl_include_dirs = self.msvs_settings.AdjustMidlIncludeDirs(
+ midl_include_dirs, config_name)
+ self.WriteVariableList(ninja_file, 'midl_includes',
+ [QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
+ for i in midl_include_dirs])
+
+ pch_commands = precompiled_header.GetPchBuildCommands(arch)
+ if self.flavor == 'mac':
+ # Most targets use no precompiled headers, so only write these if needed.
+ for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
+ ('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
+ include = precompiled_header.GetInclude(ext, arch)
+ if include: ninja_file.variable(var, include)
+
+ arflags = config.get('arflags', [])
+
+ self.WriteVariableList(ninja_file, 'cflags',
+ map(self.ExpandSpecial, cflags))
+ self.WriteVariableList(ninja_file, 'cflags_c',
+ map(self.ExpandSpecial, cflags_c))
+ self.WriteVariableList(ninja_file, 'cflags_cc',
+ map(self.ExpandSpecial, cflags_cc))
+ if self.flavor == 'mac':
+ self.WriteVariableList(ninja_file, 'cflags_objc',
+ map(self.ExpandSpecial, cflags_objc))
+ self.WriteVariableList(ninja_file, 'cflags_objcc',
+ map(self.ExpandSpecial, cflags_objcc))
+ self.WriteVariableList(ninja_file, 'arflags',
+ map(self.ExpandSpecial, arflags))
+ ninja_file.newline()
+ outputs = []
+ has_rc_source = False
+ for source in sources:
+ filename, ext = os.path.splitext(source)
+ ext = ext[1:]
+ obj_ext = self.obj_ext
+ if ext in ('cc', 'cpp', 'cxx'):
+ command = 'cxx'
+ self.target.uses_cpp = True
+ elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
+ command = 'cc'
+ elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
+ command = 'cc_s'
+ elif (self.flavor == 'win' and ext == 'asm' and
+ not self.msvs_settings.HasExplicitAsmRules(spec)):
+ command = 'asm'
+ # Add the _asm suffix as msvs is capable of handling .cc and
+ # .asm files of the same name without collision.
+ obj_ext = '_asm.obj'
+ elif self.flavor == 'mac' and ext == 'm':
+ command = 'objc'
+ elif self.flavor == 'mac' and ext == 'mm':
+ command = 'objcxx'
+ self.target.uses_cpp = True
+ elif self.flavor == 'win' and ext == 'rc':
+ command = 'rc'
+ obj_ext = '.res'
+ has_rc_source = True
+ else:
+ # Ignore unhandled extensions.
+ continue
+ input = self.GypPathToNinja(source)
+ output = self.GypPathToUniqueOutput(filename + obj_ext)
+ if arch is not None:
+ output = AddArch(output, arch)
+ implicit = precompiled_header.GetObjDependencies([input], [output], arch)
+ variables = []
+ if self.flavor == 'win':
+ variables, output, implicit = precompiled_header.GetFlagsModifications(
+ input, output, implicit, command, cflags_c, cflags_cc,
+ self.ExpandSpecial)
+ ninja_file.build(output, command, input,
+ implicit=[gch for _, _, gch in implicit],
+ order_only=predepends, variables=variables)
+ outputs.append(output)
+
+ if has_rc_source:
+ resource_include_dirs = config.get('resource_include_dirs', include_dirs)
+ self.WriteVariableList(ninja_file, 'resource_includes',
+ [QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
+ for i in resource_include_dirs])
+
+ self.WritePchTargets(ninja_file, pch_commands)
+
+ ninja_file.newline()
+ return outputs
+
+ def WritePchTargets(self, ninja_file, pch_commands):
+ """Writes ninja rules to compile prefix headers."""
+ if not pch_commands:
+ return
+
+ for gch, lang_flag, lang, input in pch_commands:
+ var_name = {
+ 'c': 'cflags_pch_c',
+ 'cc': 'cflags_pch_cc',
+ 'm': 'cflags_pch_objc',
+ 'mm': 'cflags_pch_objcc',
+ }[lang]
+
+ map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
+ cmd = map.get(lang)
+ ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)])
+
+ def WriteLink(self, spec, config_name, config, link_deps, compile_deps):
+ """Write out a link step. Fills out target.binary. """
+ if self.flavor != 'mac' or len(self.archs) == 1:
+ return self.WriteLinkForArch(
+ self.ninja, spec, config_name, config, link_deps, compile_deps)
+ else:
+ output = self.ComputeOutput(spec)
+ inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec,
+ config_name, config, link_deps[arch],
+ compile_deps, arch=arch)
+ for arch in self.archs]
+ extra_bindings = []
+ build_output = output
+ if not self.is_mac_bundle:
+ self.AppendPostbuildVariable(extra_bindings, spec, output, output)
+
+ # TODO(yyanagisawa): more work needed to fix:
+ # https://code.google.com/p/gyp/issues/detail?id=411
+ if (spec['type'] in ('shared_library', 'loadable_module') and
+ not self.is_mac_bundle):
+ extra_bindings.append(('lib', output))
+ self.ninja.build([output, output + '.TOC'], 'solipo', inputs,
+ variables=extra_bindings)
+ else:
+ self.ninja.build(build_output, 'lipo', inputs, variables=extra_bindings)
+ return output
+
+ def WriteLinkForArch(self, ninja_file, spec, config_name, config,
+ link_deps, compile_deps, arch=None):
+ """Write out a link step. Fills out target.binary. """
+ command = {
+ 'executable': 'link',
+ 'loadable_module': 'solink_module',
+ 'shared_library': 'solink',
+ }[spec['type']]
+ command_suffix = ''
+
+ implicit_deps = set()
+ solibs = set()
+ order_deps = set()
+
+ if compile_deps:
+ # Normally, the compiles of the target already depend on compile_deps,
+ # but a shared_library target might have no sources and only link together
+ # a few static_library deps, so the link step also needs to depend
+ # on compile_deps to make sure actions in the shared_library target
+ # get run before the link.
+ order_deps.add(compile_deps)
+
+ if 'dependencies' in spec:
+ # Two kinds of dependencies:
+ # - Linkable dependencies (like a .a or a .so): add them to the link line.
+ # - Non-linkable dependencies (like a rule that generates a file
+ # and writes a stamp file): add them to implicit_deps
+ extra_link_deps = set()
+ for dep in spec['dependencies']:
+ target = self.target_outputs.get(dep)
+ if not target:
+ continue
+ linkable = target.Linkable()
+ if linkable:
+ new_deps = []
+ if (self.flavor == 'win' and
+ target.component_objs and
+ self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
+ new_deps = target.component_objs
+ if target.compile_deps:
+ order_deps.add(target.compile_deps)
+ elif self.flavor == 'win' and target.import_lib:
+ new_deps = [target.import_lib]
+ elif target.UsesToc(self.flavor):
+ solibs.add(target.binary)
+ implicit_deps.add(target.binary + '.TOC')
+ else:
+ new_deps = [target.binary]
+ for new_dep in new_deps:
+ if new_dep not in extra_link_deps:
+ extra_link_deps.add(new_dep)
+ link_deps.append(new_dep)
+
+ final_output = target.FinalOutput()
+ if not linkable or final_output != target.binary:
+ implicit_deps.add(final_output)
+
+ extra_bindings = []
+ if self.target.uses_cpp and self.flavor != 'win':
+ extra_bindings.append(('ld', '$ldxx'))
+
+ output = self.ComputeOutput(spec, arch)
+ if arch is None and not self.is_mac_bundle:
+ self.AppendPostbuildVariable(extra_bindings, spec, output, output)
+
+ is_executable = spec['type'] == 'executable'
+ # The ldflags config key is not used on mac or win. On those platforms
+ # linker flags are set via xcode_settings and msvs_settings, respectively.
+ if self.toolset == 'target':
+ env_ldflags = os.environ.get('LDFLAGS', '').split()
+ elif self.toolset == 'host':
+ env_ldflags = os.environ.get('LDFLAGS_host', '').split()
+ if self.flavor == 'mac':
+ ldflags = self.xcode_settings.GetLdflags(config_name,
+ self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
+ self.GypPathToNinja, arch)
+ ldflags = env_ldflags + ldflags
+ elif self.flavor == 'win':
+ manifest_base_name = self.GypPathToUniqueOutput(
+ self.ComputeOutputFileName(spec))
+ ldflags, intermediate_manifest, manifest_files = \
+ self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja,
+ self.ExpandSpecial, manifest_base_name,
+ output, is_executable,
+ self.toplevel_build)
+ ldflags = env_ldflags + ldflags
+ self.WriteVariableList(ninja_file, 'manifests', manifest_files)
+ implicit_deps = implicit_deps.union(manifest_files)
+ if intermediate_manifest:
+ self.WriteVariableList(
+ ninja_file, 'intermediatemanifest', [intermediate_manifest])
+ command_suffix = _GetWinLinkRuleNameSuffix(
+ self.msvs_settings.IsEmbedManifest(config_name))
+ def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
+ if def_file:
+ implicit_deps.add(def_file)
+ else:
+ # Respect environment variables related to build, but target-specific
+ # flags can still override them.
+ ldflags = env_ldflags + config.get('ldflags', [])
+ if is_executable and len(solibs):
+ rpath = 'lib/'
+ if self.toolset != 'target':
+ rpath += self.toolset
+ ldflags.append(r'-Wl,-rpath=\$$ORIGIN/%s' % rpath)
+ else:
+ ldflags.append('-Wl,-rpath=%s' % self.target_rpath)
+ ldflags.append('-Wl,-rpath-link=%s' % rpath)
+ self.WriteVariableList(ninja_file, 'ldflags',
+ map(self.ExpandSpecial, ldflags))
+
+ library_dirs = config.get('library_dirs', [])
+ if self.flavor == 'win':
+ library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
+ for l in library_dirs]
+ library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l),
+ self.flavor)
+ for l in library_dirs]
+ else:
+ library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
+ self.flavor)
+ for l in library_dirs]
+
+ libraries = gyp.common.uniquer(map(self.ExpandSpecial,
+ spec.get('libraries', [])))
+ if self.flavor == 'mac':
+ libraries = self.xcode_settings.AdjustLibraries(libraries, config_name)
+ elif self.flavor == 'win':
+ libraries = self.msvs_settings.AdjustLibraries(libraries)
+
+ self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries)
+
+ linked_binary = output
+
+ if command in ('solink', 'solink_module'):
+ extra_bindings.append(('soname', os.path.split(output)[1]))
+ extra_bindings.append(('lib',
+ gyp.common.EncodePOSIXShellArgument(output)))
+ if self.flavor != 'win':
+ link_file_list = output
+ if self.is_mac_bundle:
+ # 'Dependency Framework.framework/Versions/A/Dependency Framework' ->
+ # 'Dependency Framework.framework.rsp'
+ link_file_list = self.xcode_settings.GetWrapperName()
+ if arch:
+ link_file_list += '.' + arch
+ link_file_list += '.rsp'
+ # If an rspfile contains spaces, ninja surrounds the filename with
+ # quotes around it and then passes it to open(), creating a file with
+ # quotes in its name (and when looking for the rsp file, the name
+ # makes it through bash which strips the quotes) :-/
+ link_file_list = link_file_list.replace(' ', '_')
+ extra_bindings.append(
+ ('link_file_list',
+ gyp.common.EncodePOSIXShellArgument(link_file_list)))
+ if self.flavor == 'win':
+ extra_bindings.append(('binary', output))
+ if ('/NOENTRY' not in ldflags and
+ not self.msvs_settings.GetNoImportLibrary(config_name)):
+ self.target.import_lib = output + '.lib'
+ extra_bindings.append(('implibflag',
+ '/IMPLIB:%s' % self.target.import_lib))
+ pdbname = self.msvs_settings.GetPDBName(
+ config_name, self.ExpandSpecial, output + '.pdb')
+ output = [output, self.target.import_lib]
+ if pdbname:
+ output.append(pdbname)
+ elif not self.is_mac_bundle:
+ output = [output, output + '.TOC']
+ else:
+ command = command + '_notoc'
+ elif self.flavor == 'win':
+ extra_bindings.append(('binary', output))
+ pdbname = self.msvs_settings.GetPDBName(
+ config_name, self.ExpandSpecial, output + '.pdb')
+ if pdbname:
+ output = [output, pdbname]
+
+
+ if len(solibs):
+ extra_bindings.append(('solibs',
+ gyp.common.EncodePOSIXShellList(sorted(solibs))))
+
+ ninja_file.build(output, command + command_suffix, link_deps,
+ implicit=sorted(implicit_deps),
+ order_only=list(order_deps),
+ variables=extra_bindings)
+ return linked_binary
+
+ def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
+ extra_link_deps = any(self.target_outputs.get(dep).Linkable()
+ for dep in spec.get('dependencies', [])
+ if dep in self.target_outputs)
+ if spec['type'] == 'none' or (not link_deps and not extra_link_deps):
+ # TODO(evan): don't call this function for 'none' target types, as
+ # it doesn't do anything, and we fake out a 'binary' with a stamp file.
+ self.target.binary = compile_deps
+ self.target.type = 'none'
+ elif spec['type'] == 'static_library':
+ self.target.binary = self.ComputeOutput(spec)
+ if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
+ self.is_standalone_static_library):
+ self.ninja.build(self.target.binary, 'alink_thin', link_deps,
+ order_only=compile_deps)
+ else:
+ variables = []
+ if self.xcode_settings:
+ libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
+ if libtool_flags:
+ variables.append(('libtool_flags', libtool_flags))
+ if self.msvs_settings:
+ libflags = self.msvs_settings.GetLibFlags(config_name,
+ self.GypPathToNinja)
+ variables.append(('libflags', libflags))
+
+ if self.flavor != 'mac' or len(self.archs) == 1:
+ self.AppendPostbuildVariable(variables, spec,
+ self.target.binary, self.target.binary)
+ self.ninja.build(self.target.binary, 'alink', link_deps,
+ order_only=compile_deps, variables=variables)
+ else:
+ inputs = []
+ for arch in self.archs:
+ output = self.ComputeOutput(spec, arch)
+ self.arch_subninjas[arch].build(output, 'alink', link_deps[arch],
+ order_only=compile_deps,
+ variables=variables)
+ inputs.append(output)
+ # TODO: It's not clear if libtool_flags should be passed to the alink
+ # call that combines single-arch .a files into a fat .a file.
+ self.AppendPostbuildVariable(variables, spec,
+ self.target.binary, self.target.binary)
+ self.ninja.build(self.target.binary, 'alink', inputs,
+ # FIXME: test proving order_only=compile_deps isn't
+ # needed.
+ variables=variables)
+ else:
+ self.target.binary = self.WriteLink(spec, config_name, config, link_deps,
+ compile_deps)
+ return self.target.binary
+
+ def WriteMacBundle(self, spec, mac_bundle_depends, is_empty):
+ assert self.is_mac_bundle
+ package_framework = spec['type'] in ('shared_library', 'loadable_module')
+ output = self.ComputeMacBundleOutput()
+ if is_empty:
+ output += '.stamp'
+ variables = []
+ self.AppendPostbuildVariable(variables, spec, output, self.target.binary,
+ is_command_start=not package_framework)
+ if package_framework and not is_empty:
+ if spec['type'] == 'shared_library' and self.xcode_settings.isIOS:
+ self.ninja.build(output, 'package_ios_framework', mac_bundle_depends,
+ variables=variables)
+ else:
+ variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
+ self.ninja.build(output, 'package_framework', mac_bundle_depends,
+ variables=variables)
+ else:
+ self.ninja.build(output, 'stamp', mac_bundle_depends,
+ variables=variables)
+ self.target.bundle = output
+ return output
+
+ def GetToolchainEnv(self, additional_settings=None):
+ """Returns the variables toolchain would set for build steps."""
+ env = self.GetSortedXcodeEnv(additional_settings=additional_settings)
+ if self.flavor == 'win':
+ env = self.GetMsvsToolchainEnv(
+ additional_settings=additional_settings)
+ return env
+
+ def GetMsvsToolchainEnv(self, additional_settings=None):
+ """Returns the variables Visual Studio would set for build steps."""
+ return self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR',
+ config=self.config_name)
+
+ def GetSortedXcodeEnv(self, additional_settings=None):
+ """Returns the variables Xcode would set for build steps."""
+ assert self.abs_build_dir
+ abs_build_dir = self.abs_build_dir
+ return gyp.xcode_emulation.GetSortedXcodeEnv(
+ self.xcode_settings, abs_build_dir,
+ os.path.join(abs_build_dir, self.build_to_base), self.config_name,
+ additional_settings)
+
+ def GetSortedXcodePostbuildEnv(self):
+ """Returns the variables Xcode would set for postbuild steps."""
+ postbuild_settings = {}
+ # CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
+ # TODO(thakis): It would be nice to have some general mechanism instead.
+ strip_save_file = self.xcode_settings.GetPerTargetSetting(
+ 'CHROMIUM_STRIP_SAVE_FILE')
+ if strip_save_file:
+ postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
+ return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
+
+ def AppendPostbuildVariable(self, variables, spec, output, binary,
+ is_command_start=False):
+ """Adds a 'postbuild' variable if there is a postbuild for |output|."""
+ postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start)
+ if postbuild:
+ variables.append(('postbuilds', postbuild))
+
+ def GetPostbuildCommand(self, spec, output, output_binary, is_command_start):
+ """Returns a shell command that runs all the postbuilds, and removes
+ |output| if any of them fails. If |is_command_start| is False, then the
+ returned string will start with ' && '."""
+ if not self.xcode_settings or spec['type'] == 'none' or not output:
+ return ''
+ output = QuoteShellArgument(output, self.flavor)
+ postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
+ if output_binary is not None:
+ postbuilds = self.xcode_settings.AddImplicitPostbuilds(
+ self.config_name,
+ os.path.normpath(os.path.join(self.base_to_build, output)),
+ QuoteShellArgument(
+ os.path.normpath(os.path.join(self.base_to_build, output_binary)),
+ self.flavor),
+ postbuilds, quiet=True)
+
+ if not postbuilds:
+ return ''
+ # Postbuilds expect to be run in the gyp file's directory, so insert an
+ # implicit postbuild to cd to there.
+ postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
+ ['cd', self.build_to_base]))
+ env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
+ # G will be non-null if any postbuild fails. Run all postbuilds in a
+ # subshell.
+ commands = env + ' (' + \
+ ' && '.join([ninja_syntax.escape(command) for command in postbuilds])
+ command_string = (commands + '); G=$$?; '
+ # Remove the final output if any postbuild failed.
+ '((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
+ if is_command_start:
+ return '(' + command_string + ' && '
+ else:
+ return '$ && (' + command_string
+
+ def ComputeExportEnvString(self, env):
+ """Given an environment, returns a string looking like
+ 'export FOO=foo; export BAR="${FOO} bar;'
+ that exports |env| to the shell."""
+ export_str = []
+ for k, v in env:
+ export_str.append('export %s=%s;' %
+ (k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
+ return ' '.join(export_str)
+
+ def ComputeMacBundleOutput(self):
+ """Return the 'output' (full output path) to a bundle output directory."""
+ assert self.is_mac_bundle
+ path = generator_default_variables['PRODUCT_DIR']
+ return self.ExpandSpecial(
+ os.path.join(path, self.xcode_settings.GetWrapperName()))
+
+ def ComputeOutputFileName(self, spec, type=None):
+ """Compute the filename of the final output for the current target."""
+ if not type:
+ type = spec['type']
+
+ default_variables = copy.copy(generator_default_variables)
+ CalculateVariables(default_variables, {'flavor': self.flavor})
+
+ # Compute filename prefix: the product prefix, or a default for
+ # the product type.
+ DEFAULT_PREFIX = {
+ 'loadable_module': default_variables['SHARED_LIB_PREFIX'],
+ 'shared_library': default_variables['SHARED_LIB_PREFIX'],
+ 'static_library': default_variables['STATIC_LIB_PREFIX'],
+ 'executable': default_variables['EXECUTABLE_PREFIX'],
+ }
+ prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
+
+ # Compute filename extension: the product extension, or a default
+ # for the product type.
+ DEFAULT_EXTENSION = {
+ 'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
+ 'shared_library': default_variables['SHARED_LIB_SUFFIX'],
+ 'static_library': default_variables['STATIC_LIB_SUFFIX'],
+ 'executable': default_variables['EXECUTABLE_SUFFIX'],
+ }
+ extension = spec.get('product_extension')
+ if extension:
+ extension = '.' + extension
+ else:
+ extension = DEFAULT_EXTENSION.get(type, '')
+
+ if 'product_name' in spec:
+ # If we were given an explicit name, use that.
+ target = spec['product_name']
+ else:
+ # Otherwise, derive a name from the target name.
+ target = spec['target_name']
+ if prefix == 'lib':
+ # Snip out an extra 'lib' from libs if appropriate.
+ target = StripPrefix(target, 'lib')
+
+ if type in ('static_library', 'loadable_module', 'shared_library',
+ 'executable'):
+ return '%s%s%s' % (prefix, target, extension)
+ elif type == 'none':
+ return '%s.stamp' % target
+ else:
+ raise Exception('Unhandled output type %s' % type)
+
+ def ComputeOutput(self, spec, arch=None):
+ """Compute the path for the final output of the spec."""
+ type = spec['type']
+
+ if self.flavor == 'win':
+ override = self.msvs_settings.GetOutputName(self.config_name,
+ self.ExpandSpecial)
+ if override:
+ return override
+
+ if arch is None and self.flavor == 'mac' and type in (
+ 'static_library', 'executable', 'shared_library', 'loadable_module'):
+ filename = self.xcode_settings.GetExecutablePath()
+ else:
+ filename = self.ComputeOutputFileName(spec, type)
+
+ if arch is None and 'product_dir' in spec:
+ path = os.path.join(spec['product_dir'], filename)
+ return self.ExpandSpecial(path)
+
+ # Some products go into the output root, libraries go into shared library
+ # dir, and everything else goes into the normal place.
+ type_in_output_root = ['executable', 'loadable_module']
+ if self.flavor == 'mac' and self.toolset == 'target':
+ type_in_output_root += ['shared_library', 'static_library']
+ elif self.flavor == 'win' and self.toolset == 'target':
+ type_in_output_root += ['shared_library']
+
+ if arch is not None:
+ # Make sure partial executables don't end up in a bundle or the regular
+ # output directory.
+ archdir = 'arch'
+ if self.toolset != 'target':
+ archdir = os.path.join('arch', '%s' % self.toolset)
+ return os.path.join(archdir, AddArch(filename, arch))
+ elif type in type_in_output_root or self.is_standalone_static_library:
+ return filename
+ elif type == 'shared_library':
+ libdir = 'lib'
+ if self.toolset != 'target':
+ libdir = os.path.join('lib', '%s' % self.toolset)
+ return os.path.join(libdir, filename)
+ else:
+ return self.GypPathToUniqueOutput(filename, qualified=False)
+
+ def WriteVariableList(self, ninja_file, var, values):
+ assert not isinstance(values, str)
+ if values is None:
+ values = []
+ ninja_file.variable(var, ' '.join(values))
+
+ def WriteNewNinjaRule(self, name, args, description, is_cygwin, env, pool,
+ depfile=None):
+ """Write out a new ninja "rule" statement for a given command.
+
+ Returns the name of the new rule, and a copy of |args| with variables
+ expanded."""
+
+ if self.flavor == 'win':
+ args = [self.msvs_settings.ConvertVSMacros(
+ arg, self.base_to_build, config=self.config_name)
+ for arg in args]
+ description = self.msvs_settings.ConvertVSMacros(
+ description, config=self.config_name)
+ elif self.flavor == 'mac':
+ # |env| is an empty list on non-mac.
+ args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
+ description = gyp.xcode_emulation.ExpandEnvVars(description, env)
+
+ # TODO: we shouldn't need to qualify names; we do it because
+ # currently the ninja rule namespace is global, but it really
+ # should be scoped to the subninja.
+ rule_name = self.name
+ if self.toolset == 'target':
+ rule_name += '.' + self.toolset
+ rule_name += '.' + name
+ rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
+
+ # Remove variable references, but not if they refer to the magic rule
+ # variables. This is not quite right, as it also protects these for
+ # actions, not just for rules where they are valid. Good enough.
+ protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
+ protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
+ description = re.sub(protect + r'\$', '_', description)
+
+ # gyp dictates that commands are run from the base directory.
+ # cd into the directory before running, and adjust paths in
+ # the arguments to point to the proper locations.
+ rspfile = None
+ rspfile_content = None
+ args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
+ if self.flavor == 'win':
+ rspfile = rule_name + '.$unique_name.rsp'
+ # The cygwin case handles this inside the bash sub-shell.
+ run_in = '' if is_cygwin else ' ' + self.build_to_base
+ if is_cygwin:
+ rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
+ args, self.build_to_base)
+ else:
+ rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
+ command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
+ rspfile + run_in)
+ else:
+ env = self.ComputeExportEnvString(env)
+ command = gyp.common.EncodePOSIXShellList(args)
+ command = 'cd %s; ' % self.build_to_base + env + command
+
+ # GYP rules/actions express being no-ops by not touching their outputs.
+ # Avoid executing downstream dependencies in this case by specifying
+ # restat=1 to ninja.
+ self.ninja.rule(rule_name, command, description, depfile=depfile,
+ restat=True, pool=pool,
+ rspfile=rspfile, rspfile_content=rspfile_content)
+ self.ninja.newline()
+
+ return rule_name, args
+
+
+def CalculateVariables(default_variables, params):
+ """Calculate additional variables for use in the build (called by gyp)."""
+ global generator_additional_non_configuration_keys
+ global generator_additional_path_sections
+ flavor = gyp.common.GetFlavor(params)
+ if flavor == 'mac':
+ default_variables.setdefault('OS', 'mac')
+ default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
+ default_variables.setdefault('SHARED_LIB_DIR',
+ generator_default_variables['PRODUCT_DIR'])
+ default_variables.setdefault('LIB_DIR',
+ generator_default_variables['PRODUCT_DIR'])
+
+ # Copy additional generator configuration data from Xcode, which is shared
+ # by the Mac Ninja generator.
+ import gyp.generator.xcode as xcode_generator
+ generator_additional_non_configuration_keys = getattr(xcode_generator,
+ 'generator_additional_non_configuration_keys', [])
+ generator_additional_path_sections = getattr(xcode_generator,
+ 'generator_additional_path_sections', [])
+ global generator_extra_sources_for_rules
+ generator_extra_sources_for_rules = getattr(xcode_generator,
+ 'generator_extra_sources_for_rules', [])
+ elif flavor == 'win':
+ exts = gyp.MSVSUtil.TARGET_TYPE_EXT
+ default_variables.setdefault('OS', 'win')
+ default_variables['EXECUTABLE_SUFFIX'] = '.' + exts['executable']
+ default_variables['STATIC_LIB_PREFIX'] = ''
+ default_variables['STATIC_LIB_SUFFIX'] = '.' + exts['static_library']
+ default_variables['SHARED_LIB_PREFIX'] = ''
+ default_variables['SHARED_LIB_SUFFIX'] = '.' + exts['shared_library']
+
+ # Copy additional generator configuration data from VS, which is shared
+ # by the Windows Ninja generator.
+ import gyp.generator.msvs as msvs_generator
+ generator_additional_non_configuration_keys = getattr(msvs_generator,
+ 'generator_additional_non_configuration_keys', [])
+ generator_additional_path_sections = getattr(msvs_generator,
+ 'generator_additional_path_sections', [])
+
+ gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
+ else:
+ operating_system = flavor
+ if flavor == 'android':
+ operating_system = 'linux' # Keep this legacy behavior for now.
+ default_variables.setdefault('OS', operating_system)
+ default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
+ default_variables.setdefault('SHARED_LIB_DIR',
+ os.path.join('$!PRODUCT_DIR', 'lib'))
+ default_variables.setdefault('LIB_DIR',
+ os.path.join('$!PRODUCT_DIR', 'obj'))
+
+def ComputeOutputDir(params):
+ """Returns the path from the toplevel_dir to the build output directory."""
+ # generator_dir: relative path from pwd to where make puts build files.
+ # Makes migrating from make to ninja easier, ninja doesn't put anything here.
+ generator_dir = os.path.relpath(params['options'].generator_output or '.')
+
+ # output_dir: relative path from generator_dir to the build directory.
+ output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
+
+ # Relative path from source root to our output files. e.g. "out"
+ return os.path.normpath(os.path.join(generator_dir, output_dir))
+
+
+def CalculateGeneratorInputInfo(params):
+ """Called by __init__ to initialize generator values based on params."""
+ # E.g. "out/gypfiles"
+ toplevel = params['options'].toplevel_dir
+ qualified_out_dir = os.path.normpath(os.path.join(
+ toplevel, ComputeOutputDir(params), 'gypfiles'))
+
+ global generator_filelist_paths
+ generator_filelist_paths = {
+ 'toplevel': toplevel,
+ 'qualified_out_dir': qualified_out_dir,
+ }
+
+
+def OpenOutput(path, mode='w'):
+ """Open |path| for writing, creating directories if necessary."""
+ gyp.common.EnsureDirExists(path)
+ return open(path, mode)
+
+
+def CommandWithWrapper(cmd, wrappers, prog):
+ wrapper = wrappers.get(cmd, '')
+ if wrapper:
+ return wrapper + ' ' + prog
+ return prog
+
+
+def GetDefaultConcurrentLinks():
+ """Returns a best-guess for a number of concurrent links."""
+ pool_size = int(os.environ.get('GYP_LINK_CONCURRENCY', 0))
+ if pool_size:
+ return pool_size
+
+ if sys.platform in ('win32', 'cygwin'):
+ import ctypes
+
+ class MEMORYSTATUSEX(ctypes.Structure):
+ _fields_ = [
+ ("dwLength", ctypes.c_ulong),
+ ("dwMemoryLoad", ctypes.c_ulong),
+ ("ullTotalPhys", ctypes.c_ulonglong),
+ ("ullAvailPhys", ctypes.c_ulonglong),
+ ("ullTotalPageFile", ctypes.c_ulonglong),
+ ("ullAvailPageFile", ctypes.c_ulonglong),
+ ("ullTotalVirtual", ctypes.c_ulonglong),
+ ("ullAvailVirtual", ctypes.c_ulonglong),
+ ("sullAvailExtendedVirtual", ctypes.c_ulonglong),
+ ]
+
+ stat = MEMORYSTATUSEX()
+ stat.dwLength = ctypes.sizeof(stat)
+ ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
+
+ # VS 2015 uses 20% more working set than VS 2013 and can consume all RAM
+ # on a 64 GB machine.
+ mem_limit = max(1, stat.ullTotalPhys // (5 * (2 ** 30))) # total / 5GB
+ hard_cap = max(1, int(os.environ.get('GYP_LINK_CONCURRENCY_MAX', 2**32)))
+ return min(mem_limit, hard_cap)
+ elif sys.platform.startswith('linux'):
+ if os.path.exists("/proc/meminfo"):
+ with open("/proc/meminfo") as meminfo:
+ memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB')
+ for line in meminfo:
+ match = memtotal_re.match(line)
+ if not match:
+ continue
+ # Allow 8Gb per link on Linux because Gold is quite memory hungry
+ return max(1, int(match.group(1)) // (8 * (2 ** 20)))
+ return 1
+ elif sys.platform == 'darwin':
+ try:
+ avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']))
+ # A static library debug build of Chromium's unit_tests takes ~2.7GB, so
+ # 4GB per ld process allows for some more bloat.
+ return max(1, avail_bytes // (4 * (2 ** 30))) # total / 4GB
+ except:
+ return 1
+ else:
+ # TODO(scottmg): Implement this for other platforms.
+ return 1
+
+
+def _GetWinLinkRuleNameSuffix(embed_manifest):
+ """Returns the suffix used to select an appropriate linking rule depending on
+ whether the manifest embedding is enabled."""
+ return '_embed' if embed_manifest else ''
+
+
+def _AddWinLinkRules(master_ninja, embed_manifest):
+ """Adds link rules for Windows platform to |master_ninja|."""
+ def FullLinkCommand(ldcmd, out, binary_type):
+ resource_name = {
+ 'exe': '1',
+ 'dll': '2',
+ }[binary_type]
+ return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \
+ '%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \
+ '$manifests' % {
+ 'python': sys.executable,
+ 'out': out,
+ 'ldcmd': ldcmd,
+ 'resname': resource_name,
+ 'embed': embed_manifest }
+ rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest)
+ use_separate_mspdbsrv = (
+ int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0)
+ dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper()
+ dllcmd = ('%s gyp-win-tool link-wrapper $arch %s '
+ '$ld /nologo $implibflag /DLL /OUT:$binary '
+ '@$binary.rsp' % (sys.executable, use_separate_mspdbsrv))
+ dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll')
+ master_ninja.rule('solink' + rule_name_suffix,
+ description=dlldesc, command=dllcmd,
+ rspfile='$binary.rsp',
+ rspfile_content='$libs $in_newline $ldflags',
+ restat=True,
+ pool='link_pool')
+ master_ninja.rule('solink_module' + rule_name_suffix,
+ description=dlldesc, command=dllcmd,
+ rspfile='$binary.rsp',
+ rspfile_content='$libs $in_newline $ldflags',
+ restat=True,
+ pool='link_pool')
+ # Note that ldflags goes at the end so that it has the option of
+ # overriding default settings earlier in the command line.
+ exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s '
+ '$ld /nologo /OUT:$binary @$binary.rsp' %
+ (sys.executable, use_separate_mspdbsrv))
+ exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe')
+ master_ninja.rule('link' + rule_name_suffix,
+ description='LINK%s $binary' % rule_name_suffix.upper(),
+ command=exe_cmd,
+ rspfile='$binary.rsp',
+ rspfile_content='$in_newline $libs $ldflags',
+ pool='link_pool')
+
+
+def GenerateOutputForConfig(target_list, target_dicts, data, params,
+ config_name):
+ options = params['options']
+ flavor = gyp.common.GetFlavor(params)
+ generator_flags = params.get('generator_flags', {})
+
+ # build_dir: relative path from source root to our output files.
+ # e.g. "out/Debug"
+ build_dir = os.path.normpath(
+ os.path.join(ComputeOutputDir(params), config_name))
+
+ toplevel_build = os.path.join(options.toplevel_dir, build_dir)
+
+ master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja'))
+ master_ninja = ninja_syntax.Writer(master_ninja_file, width=120)
+
+ # Put build-time support tools in out/{config_name}.
+ gyp.common.CopyTool(flavor, toplevel_build, generator_flags)
+
+ # Grab make settings for CC/CXX.
+ # The rules are
+ # - The priority from low to high is gcc/g++, the 'make_global_settings' in
+ # gyp, the environment variable.
+ # - If there is no 'make_global_settings' for CC.host/CXX.host or
+ # 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
+ # to cc/cxx.
+ if flavor == 'win':
+ ar = 'lib.exe'
+ # cc and cxx must be set to the correct architecture by overriding with one
+ # of cl_x86 or cl_x64 below.
+ cc = 'UNSET'
+ cxx = 'UNSET'
+ ld = 'link.exe'
+ ld_host = '$ld'
+ else:
+ ar = 'ar'
+ cc = 'cc'
+ cxx = 'c++'
+ ld = '$cc'
+ ldxx = '$cxx'
+ ld_host = '$cc_host'
+ ldxx_host = '$cxx_host'
+
+ ar_host = ar
+ cc_host = None
+ cxx_host = None
+ cc_host_global_setting = None
+ cxx_host_global_setting = None
+ clang_cl = None
+ nm = 'nm'
+ nm_host = 'nm'
+ readelf = 'readelf'
+ readelf_host = 'readelf'
+
+ build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
+ make_global_settings = data[build_file].get('make_global_settings', [])
+ build_to_root = gyp.common.InvertRelativePath(build_dir,
+ options.toplevel_dir)
+ wrappers = {}
+ for key, value in make_global_settings:
+ if key == 'AR':
+ ar = os.path.join(build_to_root, value)
+ if key == 'AR.host':
+ ar_host = os.path.join(build_to_root, value)
+ if key == 'CC':
+ cc = os.path.join(build_to_root, value)
+ if cc.endswith('clang-cl'):
+ clang_cl = cc
+ if key == 'CXX':
+ cxx = os.path.join(build_to_root, value)
+ if key == 'CC.host':
+ cc_host = os.path.join(build_to_root, value)
+ cc_host_global_setting = value
+ if key == 'CXX.host':
+ cxx_host = os.path.join(build_to_root, value)
+ cxx_host_global_setting = value
+ if key == 'LD':
+ ld = os.path.join(build_to_root, value)
+ if key == 'LD.host':
+ ld_host = os.path.join(build_to_root, value)
+ if key == 'NM':
+ nm = os.path.join(build_to_root, value)
+ if key == 'NM.host':
+ nm_host = os.path.join(build_to_root, value)
+ if key == 'READELF':
+ readelf = os.path.join(build_to_root, value)
+ if key == 'READELF.host':
+ readelf_host = os.path.join(build_to_root, value)
+ if key.endswith('_wrapper'):
+ wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
+
+ # Support wrappers from environment variables too.
+ for key, value in os.environ.items():
+ if key.lower().endswith('_wrapper'):
+ key_prefix = key[:-len('_wrapper')]
+ key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
+ wrappers[key_prefix] = os.path.join(build_to_root, value)
+
+ mac_toolchain_dir = generator_flags.get('mac_toolchain_dir', None)
+ if mac_toolchain_dir:
+ wrappers['LINK'] = "export DEVELOPER_DIR='%s' &&" % mac_toolchain_dir
+
+ if flavor == 'win':
+ configs = [target_dicts[qualified_target]['configurations'][config_name]
+ for qualified_target in target_list]
+ shared_system_includes = None
+ if not generator_flags.get('ninja_use_custom_environment_files', 0):
+ shared_system_includes = \
+ gyp.msvs_emulation.ExtractSharedMSVSSystemIncludes(
+ configs, generator_flags)
+ cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
+ toplevel_build, generator_flags, shared_system_includes, OpenOutput)
+ for arch, path in sorted(cl_paths.items()):
+ if clang_cl:
+ # If we have selected clang-cl, use that instead.
+ path = clang_cl
+ command = CommandWithWrapper('CC', wrappers,
+ QuoteShellArgument(path, 'win'))
+ if clang_cl:
+ # Use clang-cl to cross-compile for x86 or x86_64.
+ command += (' -m32' if arch == 'x86' else ' -m64')
+ master_ninja.variable('cl_' + arch, command)
+
+ cc = GetEnvironFallback(['CC_target', 'CC'], cc)
+ master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
+ cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
+ master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
+
+ if flavor == 'win':
+ master_ninja.variable('ld', ld)
+ master_ninja.variable('idl', 'midl.exe')
+ master_ninja.variable('ar', ar)
+ master_ninja.variable('rc', 'rc.exe')
+ master_ninja.variable('ml_x86', 'ml.exe')
+ master_ninja.variable('ml_x64', 'ml64.exe')
+ master_ninja.variable('mt', 'mt.exe')
+ else:
+ master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
+ master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx))
+ master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], ar))
+ if flavor != 'mac':
+ # Mac does not use readelf/nm for .TOC generation, so avoiding polluting
+ # the master ninja with extra unused variables.
+ master_ninja.variable(
+ 'nm', GetEnvironFallback(['NM_target', 'NM'], nm))
+ master_ninja.variable(
+ 'readelf', GetEnvironFallback(['READELF_target', 'READELF'], readelf))
+
+ if generator_supports_multiple_toolsets:
+ if not cc_host:
+ cc_host = cc
+ if not cxx_host:
+ cxx_host = cxx
+
+ master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], ar_host))
+ master_ninja.variable('nm_host', GetEnvironFallback(['NM_host'], nm_host))
+ master_ninja.variable('readelf_host',
+ GetEnvironFallback(['READELF_host'], readelf_host))
+ cc_host = GetEnvironFallback(['CC_host'], cc_host)
+ cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
+
+ # The environment variable could be used in 'make_global_settings', like
+ # ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
+ if '$(CC)' in cc_host and cc_host_global_setting:
+ cc_host = cc_host_global_setting.replace('$(CC)', cc)
+ if '$(CXX)' in cxx_host and cxx_host_global_setting:
+ cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
+ master_ninja.variable('cc_host',
+ CommandWithWrapper('CC.host', wrappers, cc_host))
+ master_ninja.variable('cxx_host',
+ CommandWithWrapper('CXX.host', wrappers, cxx_host))
+ if flavor == 'win':
+ master_ninja.variable('ld_host', ld_host)
+ else:
+ master_ninja.variable('ld_host', CommandWithWrapper(
+ 'LINK', wrappers, ld_host))
+ master_ninja.variable('ldxx_host', CommandWithWrapper(
+ 'LINK', wrappers, ldxx_host))
+
+ master_ninja.newline()
+
+ master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
+ master_ninja.newline()
+
+ deps = 'msvc' if flavor == 'win' else 'gcc'
+
+ if flavor != 'win':
+ master_ninja.rule(
+ 'cc',
+ description='CC $out',
+ command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
+ '$cflags_pch_c -c $in -o $out'),
+ depfile='$out.d',
+ deps=deps)
+ master_ninja.rule(
+ 'cc_s',
+ description='CC $out',
+ command=('$cc $defines $includes $cflags $cflags_c '
+ '$cflags_pch_c -c $in -o $out'))
+ master_ninja.rule(
+ 'cxx',
+ description='CXX $out',
+ command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
+ '$cflags_pch_cc -c $in -o $out'),
+ depfile='$out.d',
+ deps=deps)
+ else:
+ # TODO(scottmg) Separate pdb names is a test to see if it works around
+ # http://crbug.com/142362. It seems there's a race between the creation of
+ # the .pdb by the precompiled header step for .cc and the compilation of
+ # .c files. This should be handled by mspdbsrv, but rarely errors out with
+ # c1xx : fatal error C1033: cannot open program database
+ # By making the rules target separate pdb files this might be avoided.
+ cc_command = ('ninja -t msvc -e $arch ' +
+ '-- '
+ '$cc /nologo /showIncludes /FC '
+ '@$out.rsp /c $in /Fo$out /Fd$pdbname_c ')
+ cxx_command = ('ninja -t msvc -e $arch ' +
+ '-- '
+ '$cxx /nologo /showIncludes /FC '
+ '@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ')
+ master_ninja.rule(
+ 'cc',
+ description='CC $out',
+ command=cc_command,
+ rspfile='$out.rsp',
+ rspfile_content='$defines $includes $cflags $cflags_c',
+ deps=deps)
+ master_ninja.rule(
+ 'cxx',
+ description='CXX $out',
+ command=cxx_command,
+ rspfile='$out.rsp',
+ rspfile_content='$defines $includes $cflags $cflags_cc',
+ deps=deps)
+ master_ninja.rule(
+ 'idl',
+ description='IDL $in',
+ command=('%s gyp-win-tool midl-wrapper $arch $outdir '
+ '$tlb $h $dlldata $iid $proxy $in '
+ '$midl_includes $idlflags' % sys.executable))
+ master_ninja.rule(
+ 'rc',
+ description='RC $in',
+ # Note: $in must be last otherwise rc.exe complains.
+ command=('%s gyp-win-tool rc-wrapper '
+ '$arch $rc $defines $resource_includes $rcflags /fo$out $in' %
+ sys.executable))
+ master_ninja.rule(
+ 'asm',
+ description='ASM $out',
+ command=('%s gyp-win-tool asm-wrapper '
+ '$arch $asm $defines $includes $asmflags /c /Fo $out $in' %
+ sys.executable))
+
+ if flavor != 'mac' and flavor != 'win':
+ master_ninja.rule(
+ 'alink',
+ description='AR $out',
+ command='rm -f $out && $ar rcs $arflags $out $in')
+ master_ninja.rule(
+ 'alink_thin',
+ description='AR $out',
+ command='rm -f $out && $ar rcsT $arflags $out $in')
+
+ # This allows targets that only need to depend on $lib's API to declare an
+ # order-only dependency on $lib.TOC and avoid relinking such downstream
+ # dependencies when $lib changes only in non-public ways.
+ # The resulting string leaves an uninterpolated %{suffix} which
+ # is used in the final substitution below.
+ mtime_preserving_solink_base = (
+ 'if [ ! -e $lib -o ! -e $lib.TOC ]; then '
+ '%(solink)s && %(extract_toc)s > $lib.TOC; else '
+ '%(solink)s && %(extract_toc)s > $lib.tmp && '
+ 'if ! cmp -s $lib.tmp $lib.TOC; then mv $lib.tmp $lib.TOC ; '
+ 'fi; fi'
+ % { 'solink':
+ '$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
+ 'extract_toc':
+ ('{ $readelf -d $lib | grep SONAME ; '
+ '$nm -gD -f p $lib | cut -f1-2 -d\' \'; }')})
+
+ master_ninja.rule(
+ 'solink',
+ description='SOLINK $lib',
+ restat=True,
+ command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
+ rspfile='$link_file_list',
+ rspfile_content=
+ '-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive $libs',
+ pool='link_pool')
+ master_ninja.rule(
+ 'solink_module',
+ description='SOLINK(module) $lib',
+ restat=True,
+ command=mtime_preserving_solink_base % {'suffix': '@$link_file_list'},
+ rspfile='$link_file_list',
+ rspfile_content='-Wl,--start-group $in -Wl,--end-group $solibs $libs',
+ pool='link_pool')
+ master_ninja.rule(
+ 'link',
+ description='LINK $out',
+ command=('$ld $ldflags -o $out '
+ '-Wl,--start-group $in -Wl,--end-group $solibs $libs'),
+ pool='link_pool')
+ elif flavor == 'win':
+ master_ninja.rule(
+ 'alink',
+ description='LIB $out',
+ command=('%s gyp-win-tool link-wrapper $arch False '
+ '$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
+ sys.executable),
+ rspfile='$out.rsp',
+ rspfile_content='$in_newline $libflags')
+ _AddWinLinkRules(master_ninja, embed_manifest=True)
+ _AddWinLinkRules(master_ninja, embed_manifest=False)
+ else:
+ master_ninja.rule(
+ 'objc',
+ description='OBJC $out',
+ command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
+ '$cflags_pch_objc -c $in -o $out'),
+ depfile='$out.d',
+ deps=deps)
+ master_ninja.rule(
+ 'objcxx',
+ description='OBJCXX $out',
+ command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
+ '$cflags_pch_objcc -c $in -o $out'),
+ depfile='$out.d',
+ deps=deps)
+ master_ninja.rule(
+ 'alink',
+ description='LIBTOOL-STATIC $out, POSTBUILDS',
+ command='rm -f $out && '
+ './gyp-mac-tool filter-libtool libtool $libtool_flags '
+ '-static -o $out $in'
+ '$postbuilds')
+ master_ninja.rule(
+ 'lipo',
+ description='LIPO $out, POSTBUILDS',
+ command='rm -f $out && lipo -create $in -output $out$postbuilds')
+ master_ninja.rule(
+ 'solipo',
+ description='SOLIPO $out, POSTBUILDS',
+ command=(
+ 'rm -f $lib $lib.TOC && lipo -create $in -output $lib$postbuilds &&'
+ '%(extract_toc)s > $lib.TOC'
+ % { 'extract_toc':
+ '{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
+ 'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'}))
+
+
+ # Record the public interface of $lib in $lib.TOC. See the corresponding
+ # comment in the posix section above for details.
+ solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s'
+ mtime_preserving_solink_base = (
+ 'if [ ! -e $lib -o ! -e $lib.TOC ] || '
+ # Always force dependent targets to relink if this library
+ # reexports something. Handling this correctly would require
+ # recursive TOC dumping but this is rare in practice, so punt.
+ 'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
+ '%(solink)s && %(extract_toc)s > $lib.TOC; '
+ 'else '
+ '%(solink)s && %(extract_toc)s > $lib.tmp && '
+ 'if ! cmp -s $lib.tmp $lib.TOC; then '
+ 'mv $lib.tmp $lib.TOC ; '
+ 'fi; '
+ 'fi'
+ % { 'solink': solink_base,
+ 'extract_toc':
+ '{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
+ 'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
+
+
+ solink_suffix = '@$link_file_list$postbuilds'
+ master_ninja.rule(
+ 'solink',
+ description='SOLINK $lib, POSTBUILDS',
+ restat=True,
+ command=mtime_preserving_solink_base % {'suffix': solink_suffix,
+ 'type': '-shared'},
+ rspfile='$link_file_list',
+ rspfile_content='$in $solibs $libs',
+ pool='link_pool')
+ master_ninja.rule(
+ 'solink_notoc',
+ description='SOLINK $lib, POSTBUILDS',
+ restat=True,
+ command=solink_base % {'suffix':solink_suffix, 'type': '-shared'},
+ rspfile='$link_file_list',
+ rspfile_content='$in $solibs $libs',
+ pool='link_pool')
+
+ master_ninja.rule(
+ 'solink_module',
+ description='SOLINK(module) $lib, POSTBUILDS',
+ restat=True,
+ command=mtime_preserving_solink_base % {'suffix': solink_suffix,
+ 'type': '-bundle'},
+ rspfile='$link_file_list',
+ rspfile_content='$in $solibs $libs',
+ pool='link_pool')
+ master_ninja.rule(
+ 'solink_module_notoc',
+ description='SOLINK(module) $lib, POSTBUILDS',
+ restat=True,
+ command=solink_base % {'suffix': solink_suffix, 'type': '-bundle'},
+ rspfile='$link_file_list',
+ rspfile_content='$in $solibs $libs',
+ pool='link_pool')
+
+ master_ninja.rule(
+ 'link',
+ description='LINK $out, POSTBUILDS',
+ command=('$ld $ldflags -o $out '
+ '$in $solibs $libs$postbuilds'),
+ pool='link_pool')
+ master_ninja.rule(
+ 'preprocess_infoplist',
+ description='PREPROCESS INFOPLIST $out',
+ command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
+ 'plutil -convert xml1 $out $out'))
+ master_ninja.rule(
+ 'copy_infoplist',
+ description='COPY INFOPLIST $in',
+ command='$env ./gyp-mac-tool copy-info-plist $in $out $binary $keys')
+ master_ninja.rule(
+ 'merge_infoplist',
+ description='MERGE INFOPLISTS $in',
+ command='$env ./gyp-mac-tool merge-info-plist $out $in')
+ master_ninja.rule(
+ 'compile_xcassets',
+ description='COMPILE XCASSETS $in',
+ command='$env ./gyp-mac-tool compile-xcassets $keys $in')
+ master_ninja.rule(
+ 'compile_ios_framework_headers',
+ description='COMPILE HEADER MAPS AND COPY FRAMEWORK HEADERS $in',
+ command='$env ./gyp-mac-tool compile-ios-framework-header-map $out '
+ '$framework $in && $env ./gyp-mac-tool '
+ 'copy-ios-framework-headers $framework $copy_headers')
+ master_ninja.rule(
+ 'mac_tool',
+ description='MACTOOL $mactool_cmd $in',
+ command='$env ./gyp-mac-tool $mactool_cmd $in $out $binary')
+ master_ninja.rule(
+ 'package_framework',
+ description='PACKAGE FRAMEWORK $out, POSTBUILDS',
+ command='./gyp-mac-tool package-framework $out $version$postbuilds '
+ '&& touch $out')
+ master_ninja.rule(
+ 'package_ios_framework',
+ description='PACKAGE IOS FRAMEWORK $out, POSTBUILDS',
+ command='./gyp-mac-tool package-ios-framework $out $postbuilds '
+ '&& touch $out')
+ if flavor == 'win':
+ master_ninja.rule(
+ 'stamp',
+ description='STAMP $out',
+ command='%s gyp-win-tool stamp $out' % sys.executable)
+ else:
+ master_ninja.rule(
+ 'stamp',
+ description='STAMP $out',
+ command='${postbuilds}touch $out')
+ if flavor == 'win':
+ master_ninja.rule(
+ 'copy',
+ description='COPY $in $out',
+ command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
+ elif flavor == 'zos':
+ master_ninja.rule(
+ 'copy',
+ description='COPY $in $out',
+ command='rm -rf $out && cp -fRP $in $out')
+ else:
+ master_ninja.rule(
+ 'copy',
+ description='COPY $in $out',
+ command='ln -f $in $out 2>/dev/null || (rm -rf $out && cp -af $in $out)')
+ master_ninja.newline()
+
+ all_targets = set()
+ for build_file in params['build_files']:
+ for target in gyp.common.AllTargets(target_list,
+ target_dicts,
+ os.path.normpath(build_file)):
+ all_targets.add(target)
+ all_outputs = set()
+
+ # target_outputs is a map from qualified target name to a Target object.
+ target_outputs = {}
+ # target_short_names is a map from target short name to a list of Target
+ # objects.
+ target_short_names = {}
+
+ # short name of targets that were skipped because they didn't contain anything
+ # interesting.
+ # NOTE: there may be overlap between this an non_empty_target_names.
+ empty_target_names = set()
+
+ # Set of non-empty short target names.
+ # NOTE: there may be overlap between this an empty_target_names.
+ non_empty_target_names = set()
+
+ for qualified_target in target_list:
+ # qualified_target is like: third_party/icu/icu.gyp:icui18n#target
+ build_file, name, toolset = \
+ gyp.common.ParseQualifiedTarget(qualified_target)
+
+ this_make_global_settings = data[build_file].get('make_global_settings', [])
+ assert make_global_settings == this_make_global_settings, (
+ "make_global_settings needs to be the same for all targets. %s vs. %s" %
+ (this_make_global_settings, make_global_settings))
+
+ spec = target_dicts[qualified_target]
+ if flavor == 'mac':
+ gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
+
+ # If build_file is a symlink, we must not follow it because there's a chance
+ # it could point to a path above toplevel_dir, and we cannot correctly deal
+ # with that case at the moment.
+ build_file = gyp.common.RelativePath(build_file, options.toplevel_dir,
+ False)
+
+ qualified_target_for_hash = gyp.common.QualifiedTarget(build_file, name,
+ toolset)
+ qualified_target_for_hash = qualified_target_for_hash.encode('utf-8')
+ hash_for_rules = hashlib.md5(qualified_target_for_hash).hexdigest()
+
+ base_path = os.path.dirname(build_file)
+ obj = 'obj'
+ if toolset != 'target':
+ obj += '.' + toolset
+ output_file = os.path.join(obj, base_path, name + '.ninja')
+
+ ninja_output = StringIO()
+ writer = NinjaWriter(hash_for_rules, target_outputs, base_path, build_dir,
+ ninja_output,
+ toplevel_build, output_file,
+ flavor, toplevel_dir=options.toplevel_dir)
+
+ target = writer.WriteSpec(spec, config_name, generator_flags)
+
+ if ninja_output.tell() > 0:
+ # Only create files for ninja files that actually have contents.
+ with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file:
+ ninja_file.write(ninja_output.getvalue())
+ ninja_output.close()
+ master_ninja.subninja(output_file)
+
+ if target:
+ if name != target.FinalOutput() and spec['toolset'] == 'target':
+ target_short_names.setdefault(name, []).append(target)
+ target_outputs[qualified_target] = target
+ if qualified_target in all_targets:
+ all_outputs.add(target.FinalOutput())
+ non_empty_target_names.add(name)
+ else:
+ empty_target_names.add(name)
+
+ if target_short_names:
+ # Write a short name to build this target. This benefits both the
+ # "build chrome" case as well as the gyp tests, which expect to be
+ # able to run actions and build libraries by their short name.
+ master_ninja.newline()
+ master_ninja.comment('Short names for targets.')
+ for short_name in sorted(target_short_names):
+ master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
+ target_short_names[short_name]])
+
+ # Write phony targets for any empty targets that weren't written yet. As
+ # short names are not necessarily unique only do this for short names that
+ # haven't already been output for another target.
+ empty_target_names = empty_target_names - non_empty_target_names
+ if empty_target_names:
+ master_ninja.newline()
+ master_ninja.comment('Empty targets (output for completeness).')
+ for name in sorted(empty_target_names):
+ master_ninja.build(name, 'phony')
+
+ if all_outputs:
+ master_ninja.newline()
+ master_ninja.build('all', 'phony', sorted(all_outputs))
+ master_ninja.default(generator_flags.get('default_target', 'all'))
+
+ master_ninja_file.close()
+
+
+def PerformBuild(data, configurations, params):
+ options = params['options']
+ for config in configurations:
+ builddir = os.path.join(options.toplevel_dir, 'out', config)
+ arguments = ['ninja', '-C', builddir]
+ print('Building [%s]: %s' % (config, arguments))
+ subprocess.check_call(arguments)
+
+
+def CallGenerateOutputForConfig(arglist):
+ # Ignore the interrupt signal so that the parent process catches it and
+ # kills all multiprocessing children.
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+ (target_list, target_dicts, data, params, config_name) = arglist
+ GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
+
+
+def GenerateOutput(target_list, target_dicts, data, params):
+ # Update target_dicts for iOS device builds.
+ target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator(
+ target_dicts)
+
+ user_config = params.get('generator_flags', {}).get('config', None)
+ if gyp.common.GetFlavor(params) == 'win':
+ target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
+ target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
+ target_list, target_dicts, generator_default_variables)
+
+ if user_config:
+ GenerateOutputForConfig(target_list, target_dicts, data, params,
+ user_config)
+ else:
+ config_names = target_dicts[target_list[0]]['configurations']
+ if params['parallel']:
+ try:
+ pool = multiprocessing.Pool(len(config_names))
+ arglists = []
+ for config_name in config_names:
+ arglists.append(
+ (target_list, target_dicts, data, params, config_name))
+ pool.map(CallGenerateOutputForConfig, arglists)
+ except KeyboardInterrupt as e:
+ pool.terminate()
+ raise e
+ else:
+ for config_name in config_names:
+ GenerateOutputForConfig(target_list, target_dicts, data, params,
+ config_name)
diff --git a/third_party/python/gyp/pylib/gyp/generator/ninja_test.py b/third_party/python/gyp/pylib/gyp/generator/ninja_test.py
new file mode 100644
index 0000000000..1ad68e4fc9
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/generator/ninja_test.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+""" Unit tests for the ninja.py file. """
+
+import gyp.generator.ninja as ninja
+import unittest
+import sys
+import TestCommon
+
+
+class TestPrefixesAndSuffixes(unittest.TestCase):
+ def test_BinaryNamesWindows(self):
+ # These cannot run on non-Windows as they require a VS installation to
+ # correctly handle variable expansion.
+ if sys.platform.startswith('win'):
+ writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
+ 'build.ninja', 'win')
+ spec = { 'target_name': 'wee' }
+ self.assertTrue(writer.ComputeOutputFileName(spec, 'executable').
+ endswith('.exe'))
+ self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
+ endswith('.dll'))
+ self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
+ endswith('.lib'))
+
+ def test_BinaryNamesLinux(self):
+ writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
+ 'build.ninja', 'linux')
+ spec = { 'target_name': 'wee' }
+ self.assertTrue('.' not in writer.ComputeOutputFileName(spec,
+ 'executable'))
+ self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
+ startswith('lib'))
+ self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
+ startswith('lib'))
+ self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
+ endswith('.so'))
+ self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
+ endswith('.a'))
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/python/gyp/pylib/gyp/generator/xcode.py b/third_party/python/gyp/pylib/gyp/generator/xcode.py
new file mode 100644
index 0000000000..8bc22bed10
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/generator/xcode.py
@@ -0,0 +1,1302 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+
+import filecmp
+import gyp.common
+import gyp.xcodeproj_file
+import gyp.xcode_ninja
+import errno
+import os
+import sys
+import posixpath
+import re
+import shutil
+import subprocess
+import tempfile
+
+
+# Project files generated by this module will use _intermediate_var as a
+# custom Xcode setting whose value is a DerivedSources-like directory that's
+# project-specific and configuration-specific. The normal choice,
+# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive
+# as it is likely that multiple targets within a single project file will want
+# to access the same set of generated files. The other option,
+# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific,
+# it is not configuration-specific. INTERMEDIATE_DIR is defined as
+# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION).
+_intermediate_var = 'INTERMEDIATE_DIR'
+
+# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all
+# targets that share the same BUILT_PRODUCTS_DIR.
+_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR'
+
+_library_search_paths_var = 'LIBRARY_SEARCH_PATHS'
+
+generator_default_variables = {
+ 'EXECUTABLE_PREFIX': '',
+ 'EXECUTABLE_SUFFIX': '',
+ 'STATIC_LIB_PREFIX': 'lib',
+ 'SHARED_LIB_PREFIX': 'lib',
+ 'STATIC_LIB_SUFFIX': '.a',
+ 'SHARED_LIB_SUFFIX': '.dylib',
+ # INTERMEDIATE_DIR is a place for targets to build up intermediate products.
+ # It is specific to each build environment. It is only guaranteed to exist
+ # and be constant within the context of a project, corresponding to a single
+ # input file. Some build environments may allow their intermediate directory
+ # to be shared on a wider scale, but this is not guaranteed.
+ 'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var,
+ 'OS': 'mac',
+ 'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)',
+ 'LIB_DIR': '$(BUILT_PRODUCTS_DIR)',
+ 'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)',
+ 'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)',
+ 'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)',
+ 'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)',
+ 'RULE_INPUT_DIRNAME': '$(INPUT_FILE_DIRNAME)',
+ 'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var,
+ 'CONFIGURATION_NAME': '$(CONFIGURATION)',
+}
+
+# The Xcode-specific sections that hold paths.
+generator_additional_path_sections = [
+ 'mac_bundle_resources',
+ 'mac_framework_headers',
+ 'mac_framework_private_headers',
+ # 'mac_framework_dirs', input already handles _dirs endings.
+]
+
+# The Xcode-specific keys that exist on targets and aren't moved down to
+# configurations.
+generator_additional_non_configuration_keys = [
+ 'ios_app_extension',
+ 'ios_watch_app',
+ 'ios_watchkit_extension',
+ 'mac_bundle',
+ 'mac_bundle_resources',
+ 'mac_framework_headers',
+ 'mac_framework_private_headers',
+ 'mac_xctest_bundle',
+ 'mac_xcuitest_bundle',
+ 'xcode_create_dependents_test_runner',
+]
+
+# We want to let any rules apply to files that are resources also.
+generator_extra_sources_for_rules = [
+ 'mac_bundle_resources',
+ 'mac_framework_headers',
+ 'mac_framework_private_headers',
+]
+
+generator_filelist_paths = None
+
+# Xcode's standard set of library directories, which don't need to be duplicated
+# in LIBRARY_SEARCH_PATHS. This list is not exhaustive, but that's okay.
+xcode_standard_library_dirs = frozenset([
+ '$(SDKROOT)/usr/lib',
+ '$(SDKROOT)/usr/local/lib',
+])
+
+def CreateXCConfigurationList(configuration_names):
+ xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []})
+ if len(configuration_names) == 0:
+ configuration_names = ['Default']
+ for configuration_name in configuration_names:
+ xcbc = gyp.xcodeproj_file.XCBuildConfiguration({
+ 'name': configuration_name})
+ xccl.AppendProperty('buildConfigurations', xcbc)
+ xccl.SetProperty('defaultConfigurationName', configuration_names[0])
+ return xccl
+
+
+class XcodeProject(object):
+ def __init__(self, gyp_path, path, build_file_dict):
+ self.gyp_path = gyp_path
+ self.path = path
+ self.project = gyp.xcodeproj_file.PBXProject(path=path)
+ projectDirPath = gyp.common.RelativePath(
+ os.path.dirname(os.path.abspath(self.gyp_path)),
+ os.path.dirname(path) or '.')
+ self.project.SetProperty('projectDirPath', projectDirPath)
+ self.project_file = \
+ gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project})
+ self.build_file_dict = build_file_dict
+
+ # TODO(mark): add destructor that cleans up self.path if created_dir is
+ # True and things didn't complete successfully. Or do something even
+ # better with "try"?
+ self.created_dir = False
+ try:
+ os.makedirs(self.path)
+ self.created_dir = True
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ def Finalize1(self, xcode_targets, serialize_all_tests):
+ # Collect a list of all of the build configuration names used by the
+ # various targets in the file. It is very heavily advised to keep each
+ # target in an entire project (even across multiple project files) using
+ # the same set of configuration names.
+ configurations = []
+ for xct in self.project.GetProperty('targets'):
+ xccl = xct.GetProperty('buildConfigurationList')
+ xcbcs = xccl.GetProperty('buildConfigurations')
+ for xcbc in xcbcs:
+ name = xcbc.GetProperty('name')
+ if name not in configurations:
+ configurations.append(name)
+
+ # Replace the XCConfigurationList attached to the PBXProject object with
+ # a new one specifying all of the configuration names used by the various
+ # targets.
+ try:
+ xccl = CreateXCConfigurationList(configurations)
+ self.project.SetProperty('buildConfigurationList', xccl)
+ except:
+ sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path)
+ raise
+
+ # The need for this setting is explained above where _intermediate_var is
+ # defined. The comments below about wanting to avoid project-wide build
+ # settings apply here too, but this needs to be set on a project-wide basis
+ # so that files relative to the _intermediate_var setting can be displayed
+ # properly in the Xcode UI.
+ #
+ # Note that for configuration-relative files such as anything relative to
+ # _intermediate_var, for the purposes of UI tree view display, Xcode will
+ # only resolve the configuration name once, when the project file is
+ # opened. If the active build configuration is changed, the project file
+ # must be closed and reopened if it is desired for the tree view to update.
+ # This is filed as Apple radar 6588391.
+ xccl.SetBuildSetting(_intermediate_var,
+ '$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)')
+ xccl.SetBuildSetting(_shared_intermediate_var,
+ '$(SYMROOT)/DerivedSources/$(CONFIGURATION)')
+
+ # Set user-specified project-wide build settings and config files. This
+ # is intended to be used very sparingly. Really, almost everything should
+ # go into target-specific build settings sections. The project-wide
+ # settings are only intended to be used in cases where Xcode attempts to
+ # resolve variable references in a project context as opposed to a target
+ # context, such as when resolving sourceTree references while building up
+ # the tree tree view for UI display.
+ # Any values set globally are applied to all configurations, then any
+ # per-configuration values are applied.
+ for xck, xcv in self.build_file_dict.get('xcode_settings', {}).items():
+ xccl.SetBuildSetting(xck, xcv)
+ if 'xcode_config_file' in self.build_file_dict:
+ config_ref = self.project.AddOrGetFileInRootGroup(
+ self.build_file_dict['xcode_config_file'])
+ xccl.SetBaseConfiguration(config_ref)
+ build_file_configurations = self.build_file_dict.get('configurations', {})
+ if build_file_configurations:
+ for config_name in configurations:
+ build_file_configuration_named = \
+ build_file_configurations.get(config_name, {})
+ if build_file_configuration_named:
+ xcc = xccl.ConfigurationNamed(config_name)
+ for xck, xcv in build_file_configuration_named.get('xcode_settings',
+ {}).items():
+ xcc.SetBuildSetting(xck, xcv)
+ if 'xcode_config_file' in build_file_configuration_named:
+ config_ref = self.project.AddOrGetFileInRootGroup(
+ build_file_configurations[config_name]['xcode_config_file'])
+ xcc.SetBaseConfiguration(config_ref)
+
+ # Sort the targets based on how they appeared in the input.
+ # TODO(mark): Like a lot of other things here, this assumes internal
+ # knowledge of PBXProject - in this case, of its "targets" property.
+
+ # ordinary_targets are ordinary targets that are already in the project
+ # file. run_test_targets are the targets that run unittests and should be
+ # used for the Run All Tests target. support_targets are the action/rule
+ # targets used by GYP file targets, just kept for the assert check.
+ ordinary_targets = []
+ run_test_targets = []
+ support_targets = []
+
+ # targets is full list of targets in the project.
+ targets = []
+
+ # does the it define it's own "all"?
+ has_custom_all = False
+
+ # targets_for_all is the list of ordinary_targets that should be listed
+ # in this project's "All" target. It includes each non_runtest_target
+ # that does not have suppress_wildcard set.
+ targets_for_all = []
+
+ for target in self.build_file_dict['targets']:
+ target_name = target['target_name']
+ toolset = target['toolset']
+ qualified_target = gyp.common.QualifiedTarget(self.gyp_path, target_name,
+ toolset)
+ xcode_target = xcode_targets[qualified_target]
+ # Make sure that the target being added to the sorted list is already in
+ # the unsorted list.
+ assert xcode_target in self.project._properties['targets']
+ targets.append(xcode_target)
+ ordinary_targets.append(xcode_target)
+ if xcode_target.support_target:
+ support_targets.append(xcode_target.support_target)
+ targets.append(xcode_target.support_target)
+
+ if not int(target.get('suppress_wildcard', False)):
+ targets_for_all.append(xcode_target)
+
+ if target_name.lower() == 'all':
+ has_custom_all = True;
+
+ # If this target has a 'run_as' attribute, add its target to the
+ # targets, and add it to the test targets.
+ if target.get('run_as'):
+ # Make a target to run something. It should have one
+ # dependency, the parent xcode target.
+ xccl = CreateXCConfigurationList(configurations)
+ run_target = gyp.xcodeproj_file.PBXAggregateTarget({
+ 'name': 'Run ' + target_name,
+ 'productName': xcode_target.GetProperty('productName'),
+ 'buildConfigurationList': xccl,
+ },
+ parent=self.project)
+ run_target.AddDependency(xcode_target)
+
+ command = target['run_as']
+ script = ''
+ if command.get('working_directory'):
+ script = script + 'cd "%s"\n' % \
+ gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
+ command.get('working_directory'))
+
+ if command.get('environment'):
+ script = script + "\n".join(
+ ['export %s="%s"' %
+ (key, gyp.xcodeproj_file.ConvertVariablesToShellSyntax(val))
+ for (key, val) in command.get('environment').items()]) + "\n"
+
+ # Some test end up using sockets, files on disk, etc. and can get
+ # confused if more then one test runs at a time. The generator
+ # flag 'xcode_serialize_all_test_runs' controls the forcing of all
+ # tests serially. It defaults to True. To get serial runs this
+ # little bit of python does the same as the linux flock utility to
+ # make sure only one runs at a time.
+ command_prefix = ''
+ if serialize_all_tests:
+ command_prefix = \
+"""python -c "import fcntl, subprocess, sys
+file = open('$TMPDIR/GYP_serialize_test_runs', 'a')
+fcntl.flock(file.fileno(), fcntl.LOCK_EX)
+sys.exit(subprocess.call(sys.argv[1:]))" """
+
+ # If we were unable to exec for some reason, we want to exit
+ # with an error, and fixup variable references to be shell
+ # syntax instead of xcode syntax.
+ script = script + 'exec ' + command_prefix + '%s\nexit 1\n' % \
+ gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
+ gyp.common.EncodePOSIXShellList(command.get('action')))
+
+ ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
+ 'shellScript': script,
+ 'showEnvVarsInLog': 0,
+ })
+ run_target.AppendProperty('buildPhases', ssbp)
+
+ # Add the run target to the project file.
+ targets.append(run_target)
+ run_test_targets.append(run_target)
+ xcode_target.test_runner = run_target
+
+
+ # Make sure that the list of targets being replaced is the same length as
+ # the one replacing it, but allow for the added test runner targets.
+ assert len(self.project._properties['targets']) == \
+ len(ordinary_targets) + len(support_targets)
+
+ self.project._properties['targets'] = targets
+
+ # Get rid of unnecessary levels of depth in groups like the Source group.
+ self.project.RootGroupsTakeOverOnlyChildren(True)
+
+ # Sort the groups nicely. Do this after sorting the targets, because the
+ # Products group is sorted based on the order of the targets.
+ self.project.SortGroups()
+
+ # Create an "All" target if there's more than one target in this project
+ # file and the project didn't define its own "All" target. Put a generated
+ # "All" target first so that people opening up the project for the first
+ # time will build everything by default.
+ if len(targets_for_all) > 1 and not has_custom_all:
+ xccl = CreateXCConfigurationList(configurations)
+ all_target = gyp.xcodeproj_file.PBXAggregateTarget(
+ {
+ 'buildConfigurationList': xccl,
+ 'name': 'All',
+ },
+ parent=self.project)
+
+ for target in targets_for_all:
+ all_target.AddDependency(target)
+
+ # TODO(mark): This is evil because it relies on internal knowledge of
+ # PBXProject._properties. It's important to get the "All" target first,
+ # though.
+ self.project._properties['targets'].insert(0, all_target)
+
+ # The same, but for run_test_targets.
+ if len(run_test_targets) > 1:
+ xccl = CreateXCConfigurationList(configurations)
+ run_all_tests_target = gyp.xcodeproj_file.PBXAggregateTarget(
+ {
+ 'buildConfigurationList': xccl,
+ 'name': 'Run All Tests',
+ },
+ parent=self.project)
+ for run_test_target in run_test_targets:
+ run_all_tests_target.AddDependency(run_test_target)
+
+ # Insert after the "All" target, which must exist if there is more than
+ # one run_test_target.
+ self.project._properties['targets'].insert(1, run_all_tests_target)
+
+ def Finalize2(self, xcode_targets, xcode_target_to_target_dict):
+ # Finalize2 needs to happen in a separate step because the process of
+ # updating references to other projects depends on the ordering of targets
+ # within remote project files. Finalize1 is responsible for sorting duty,
+ # and once all project files are sorted, Finalize2 can come in and update
+ # these references.
+
+ # To support making a "test runner" target that will run all the tests
+ # that are direct dependents of any given target, we look for
+ # xcode_create_dependents_test_runner being set on an Aggregate target,
+ # and generate a second target that will run the tests runners found under
+ # the marked target.
+ for bf_tgt in self.build_file_dict['targets']:
+ if int(bf_tgt.get('xcode_create_dependents_test_runner', 0)):
+ tgt_name = bf_tgt['target_name']
+ toolset = bf_tgt['toolset']
+ qualified_target = gyp.common.QualifiedTarget(self.gyp_path,
+ tgt_name, toolset)
+ xcode_target = xcode_targets[qualified_target]
+ if isinstance(xcode_target, gyp.xcodeproj_file.PBXAggregateTarget):
+ # Collect all the run test targets.
+ all_run_tests = []
+ pbxtds = xcode_target.GetProperty('dependencies')
+ for pbxtd in pbxtds:
+ pbxcip = pbxtd.GetProperty('targetProxy')
+ dependency_xct = pbxcip.GetProperty('remoteGlobalIDString')
+ if hasattr(dependency_xct, 'test_runner'):
+ all_run_tests.append(dependency_xct.test_runner)
+
+ # Directly depend on all the runners as they depend on the target
+ # that builds them.
+ if len(all_run_tests) > 0:
+ run_all_target = gyp.xcodeproj_file.PBXAggregateTarget({
+ 'name': 'Run %s Tests' % tgt_name,
+ 'productName': tgt_name,
+ },
+ parent=self.project)
+ for run_test_target in all_run_tests:
+ run_all_target.AddDependency(run_test_target)
+
+ # Insert the test runner after the related target.
+ idx = self.project._properties['targets'].index(xcode_target)
+ self.project._properties['targets'].insert(idx + 1, run_all_target)
+
+ # Update all references to other projects, to make sure that the lists of
+ # remote products are complete. Otherwise, Xcode will fill them in when
+ # it opens the project file, which will result in unnecessary diffs.
+ # TODO(mark): This is evil because it relies on internal knowledge of
+ # PBXProject._other_pbxprojects.
+ for other_pbxproject in self.project._other_pbxprojects.keys():
+ self.project.AddOrGetProjectReference(other_pbxproject)
+
+ self.project.SortRemoteProductReferences()
+
+ # Give everything an ID.
+ self.project_file.ComputeIDs()
+
+ # Make sure that no two objects in the project file have the same ID. If
+ # multiple objects wind up with the same ID, upon loading the file, Xcode
+ # will only recognize one object (the last one in the file?) and the
+ # results are unpredictable.
+ self.project_file.EnsureNoIDCollisions()
+
+ def Write(self):
+ # Write the project file to a temporary location first. Xcode watches for
+ # changes to the project file and presents a UI sheet offering to reload
+ # the project when it does change. However, in some cases, especially when
+ # multiple projects are open or when Xcode is busy, things don't work so
+ # seamlessly. Sometimes, Xcode is able to detect that a project file has
+ # changed but can't unload it because something else is referencing it.
+ # To mitigate this problem, and to avoid even having Xcode present the UI
+ # sheet when an open project is rewritten for inconsequential changes, the
+ # project file is written to a temporary file in the xcodeproj directory
+ # first. The new temporary file is then compared to the existing project
+ # file, if any. If they differ, the new file replaces the old; otherwise,
+ # the new project file is simply deleted. Xcode properly detects a file
+ # being renamed over an open project file as a change and so it remains
+ # able to present the "project file changed" sheet under this system.
+ # Writing to a temporary file first also avoids the possible problem of
+ # Xcode rereading an incomplete project file.
+ (output_fd, new_pbxproj_path) = \
+ tempfile.mkstemp(suffix='.tmp', prefix='project.pbxproj.gyp.',
+ dir=self.path)
+
+ try:
+ output_file = os.fdopen(output_fd, 'w')
+
+ self.project_file.Print(output_file)
+ output_file.close()
+
+ pbxproj_path = os.path.join(self.path, 'project.pbxproj')
+
+ same = False
+ try:
+ same = filecmp.cmp(pbxproj_path, new_pbxproj_path, False)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ if same:
+ # The new file is identical to the old one, just get rid of the new
+ # one.
+ os.unlink(new_pbxproj_path)
+ else:
+ # The new file is different from the old one, or there is no old one.
+ # Rename the new file to the permanent name.
+ #
+ # tempfile.mkstemp uses an overly restrictive mode, resulting in a
+ # file that can only be read by the owner, regardless of the umask.
+ # There's no reason to not respect the umask here, which means that
+ # an extra hoop is required to fetch it and reset the new file's mode.
+ #
+ # No way to get the umask without setting a new one? Set a safe one
+ # and then set it back to the old value.
+ umask = os.umask(0o77)
+ os.umask(umask)
+
+ os.chmod(new_pbxproj_path, 0o666 & ~umask)
+ os.rename(new_pbxproj_path, pbxproj_path)
+
+ except Exception:
+ # Don't leave turds behind. In fact, if this code was responsible for
+ # creating the xcodeproj directory, get rid of that too.
+ os.unlink(new_pbxproj_path)
+ if self.created_dir:
+ shutil.rmtree(self.path, True)
+ raise
+
+
+def AddSourceToTarget(source, type, pbxp, xct):
+ # TODO(mark): Perhaps source_extensions and library_extensions can be made a
+ # little bit fancier.
+ source_extensions = ['c', 'cc', 'cpp', 'cxx', 'm', 'mm', 's', 'swift']
+
+ # .o is conceptually more of a "source" than a "library," but Xcode thinks
+ # of "sources" as things to compile and "libraries" (or "frameworks") as
+ # things to link with. Adding an object file to an Xcode target's frameworks
+ # phase works properly.
+ library_extensions = ['a', 'dylib', 'framework', 'o']
+
+ basename = posixpath.basename(source)
+ (root, ext) = posixpath.splitext(basename)
+ if ext:
+ ext = ext[1:].lower()
+
+ if ext in source_extensions and type != 'none':
+ xct.SourcesPhase().AddFile(source)
+ elif ext in library_extensions and type != 'none':
+ xct.FrameworksPhase().AddFile(source)
+ else:
+ # Files that aren't added to a sources or frameworks build phase can still
+ # go into the project file, just not as part of a build phase.
+ pbxp.AddOrGetFileInRootGroup(source)
+
+
+def AddResourceToTarget(resource, pbxp, xct):
+ # TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
+ # where it's used.
+ xct.ResourcesPhase().AddFile(resource)
+
+
+def AddHeaderToTarget(header, pbxp, xct, is_public):
+ # TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
+ # where it's used.
+ settings = '{ATTRIBUTES = (%s, ); }' % ('Private', 'Public')[is_public]
+ xct.HeadersPhase().AddFile(header, settings)
+
+
+_xcode_variable_re = re.compile(r'(\$\((.*?)\))')
+def ExpandXcodeVariables(string, expansions):
+ """Expands Xcode-style $(VARIABLES) in string per the expansions dict.
+
+ In some rare cases, it is appropriate to expand Xcode variables when a
+ project file is generated. For any substring $(VAR) in string, if VAR is a
+ key in the expansions dict, $(VAR) will be replaced with expansions[VAR].
+ Any $(VAR) substring in string for which VAR is not a key in the expansions
+ dict will remain in the returned string.
+ """
+
+ matches = _xcode_variable_re.findall(string)
+ if matches == None:
+ return string
+
+ matches.reverse()
+ for match in matches:
+ (to_replace, variable) = match
+ if not variable in expansions:
+ continue
+
+ replacement = expansions[variable]
+ string = re.sub(re.escape(to_replace), replacement, string)
+
+ return string
+
+
+_xcode_define_re = re.compile(r'([\\\"\' ])')
+def EscapeXcodeDefine(s):
+ """We must escape the defines that we give to XCode so that it knows not to
+ split on spaces and to respect backslash and quote literals. However, we
+ must not quote the define, or Xcode will incorrectly intepret variables
+ especially $(inherited)."""
+ return re.sub(_xcode_define_re, r'\\\1', s)
+
+
+def PerformBuild(data, configurations, params):
+ options = params['options']
+
+ for build_file, build_file_dict in data.items():
+ (build_file_root, build_file_ext) = os.path.splitext(build_file)
+ if build_file_ext != '.gyp':
+ continue
+ xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
+ if options.generator_output:
+ xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
+
+ for config in configurations:
+ arguments = ['xcodebuild', '-project', xcodeproj_path]
+ arguments += ['-configuration', config]
+ print("Building [%s]: %s" % (config, arguments))
+ subprocess.check_call(arguments)
+
+
+def CalculateGeneratorInputInfo(params):
+ toplevel = params['options'].toplevel_dir
+ if params.get('flavor') == 'ninja':
+ generator_dir = os.path.relpath(params['options'].generator_output or '.')
+ output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
+ output_dir = os.path.normpath(os.path.join(generator_dir, output_dir))
+ qualified_out_dir = os.path.normpath(os.path.join(
+ toplevel, output_dir, 'gypfiles-xcode-ninja'))
+ else:
+ output_dir = os.path.normpath(os.path.join(toplevel, 'xcodebuild'))
+ qualified_out_dir = os.path.normpath(os.path.join(
+ toplevel, output_dir, 'gypfiles'))
+
+ global generator_filelist_paths
+ generator_filelist_paths = {
+ 'toplevel': toplevel,
+ 'qualified_out_dir': qualified_out_dir,
+ }
+
+
+def GenerateOutput(target_list, target_dicts, data, params):
+ # Optionally configure each spec to use ninja as the external builder.
+ ninja_wrapper = params.get('flavor') == 'ninja'
+ if ninja_wrapper:
+ (target_list, target_dicts, data) = \
+ gyp.xcode_ninja.CreateWrapper(target_list, target_dicts, data, params)
+
+ options = params['options']
+ generator_flags = params.get('generator_flags', {})
+ parallel_builds = generator_flags.get('xcode_parallel_builds', True)
+ serialize_all_tests = \
+ generator_flags.get('xcode_serialize_all_test_runs', True)
+ upgrade_check_project_version = \
+ generator_flags.get('xcode_upgrade_check_project_version', None)
+
+ # Format upgrade_check_project_version with leading zeros as needed.
+ if upgrade_check_project_version:
+ upgrade_check_project_version = str(upgrade_check_project_version)
+ while len(upgrade_check_project_version) < 4:
+ upgrade_check_project_version = '0' + upgrade_check_project_version
+
+ skip_excluded_files = \
+ not generator_flags.get('xcode_list_excluded_files', True)
+ xcode_projects = {}
+ for build_file, build_file_dict in data.items():
+ (build_file_root, build_file_ext) = os.path.splitext(build_file)
+ if build_file_ext != '.gyp':
+ continue
+ xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
+ if options.generator_output:
+ xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
+ xcp = XcodeProject(build_file, xcodeproj_path, build_file_dict)
+ xcode_projects[build_file] = xcp
+ pbxp = xcp.project
+
+ # Set project-level attributes from multiple options
+ project_attributes = {};
+ if parallel_builds:
+ project_attributes['BuildIndependentTargetsInParallel'] = 'YES'
+ if upgrade_check_project_version:
+ project_attributes['LastUpgradeCheck'] = upgrade_check_project_version
+ project_attributes['LastTestingUpgradeCheck'] = \
+ upgrade_check_project_version
+ project_attributes['LastSwiftUpdateCheck'] = \
+ upgrade_check_project_version
+ pbxp.SetProperty('attributes', project_attributes)
+
+ # Add gyp/gypi files to project
+ if not generator_flags.get('standalone'):
+ main_group = pbxp.GetProperty('mainGroup')
+ build_group = gyp.xcodeproj_file.PBXGroup({'name': 'Build'})
+ main_group.AppendChild(build_group)
+ for included_file in build_file_dict['included_files']:
+ build_group.AddOrGetFileByPath(included_file, False)
+
+ xcode_targets = {}
+ xcode_target_to_target_dict = {}
+ for qualified_target in target_list:
+ [build_file, target_name, toolset] = \
+ gyp.common.ParseQualifiedTarget(qualified_target)
+
+ spec = target_dicts[qualified_target]
+ if spec['toolset'] != 'target':
+ raise Exception(
+ 'Multiple toolsets not supported in xcode build (target %s)' %
+ qualified_target)
+ configuration_names = [spec['default_configuration']]
+ for configuration_name in sorted(spec['configurations'].keys()):
+ if configuration_name not in configuration_names:
+ configuration_names.append(configuration_name)
+ xcp = xcode_projects[build_file]
+ pbxp = xcp.project
+
+ # Set up the configurations for the target according to the list of names
+ # supplied.
+ xccl = CreateXCConfigurationList(configuration_names)
+
+ # Create an XCTarget subclass object for the target. The type with
+ # "+bundle" appended will be used if the target has "mac_bundle" set.
+ # loadable_modules not in a mac_bundle are mapped to
+ # com.googlecode.gyp.xcode.bundle, a pseudo-type that xcode.py interprets
+ # to create a single-file mh_bundle.
+ _types = {
+ 'executable': 'com.apple.product-type.tool',
+ 'loadable_module': 'com.googlecode.gyp.xcode.bundle',
+ 'shared_library': 'com.apple.product-type.library.dynamic',
+ 'static_library': 'com.apple.product-type.library.static',
+ 'mac_kernel_extension': 'com.apple.product-type.kernel-extension',
+ 'executable+bundle': 'com.apple.product-type.application',
+ 'loadable_module+bundle': 'com.apple.product-type.bundle',
+ 'loadable_module+xctest': 'com.apple.product-type.bundle.unit-test',
+ 'loadable_module+xcuitest': 'com.apple.product-type.bundle.ui-testing',
+ 'shared_library+bundle': 'com.apple.product-type.framework',
+ 'executable+extension+bundle': 'com.apple.product-type.app-extension',
+ 'executable+watch+extension+bundle':
+ 'com.apple.product-type.watchkit-extension',
+ 'executable+watch+bundle':
+ 'com.apple.product-type.application.watchapp',
+ 'mac_kernel_extension+bundle': 'com.apple.product-type.kernel-extension',
+ }
+
+ target_properties = {
+ 'buildConfigurationList': xccl,
+ 'name': target_name,
+ }
+
+ type = spec['type']
+ is_xctest = int(spec.get('mac_xctest_bundle', 0))
+ is_xcuitest = int(spec.get('mac_xcuitest_bundle', 0))
+ is_bundle = int(spec.get('mac_bundle', 0)) or is_xctest or is_xcuitest
+ is_app_extension = int(spec.get('ios_app_extension', 0))
+ is_watchkit_extension = int(spec.get('ios_watchkit_extension', 0))
+ is_watch_app = int(spec.get('ios_watch_app', 0))
+ if type != 'none':
+ type_bundle_key = type
+ if is_xcuitest:
+ type_bundle_key += '+xcuitest'
+ assert type == 'loadable_module', (
+ 'mac_xcuitest_bundle targets must have type loadable_module '
+ '(target %s)' % target_name)
+ elif is_xctest:
+ type_bundle_key += '+xctest'
+ assert type == 'loadable_module', (
+ 'mac_xctest_bundle targets must have type loadable_module '
+ '(target %s)' % target_name)
+ elif is_app_extension:
+ assert is_bundle, ('ios_app_extension flag requires mac_bundle '
+ '(target %s)' % target_name)
+ type_bundle_key += '+extension+bundle'
+ elif is_watchkit_extension:
+ assert is_bundle, ('ios_watchkit_extension flag requires mac_bundle '
+ '(target %s)' % target_name)
+ type_bundle_key += '+watch+extension+bundle'
+ elif is_watch_app:
+ assert is_bundle, ('ios_watch_app flag requires mac_bundle '
+ '(target %s)' % target_name)
+ type_bundle_key += '+watch+bundle'
+ elif is_bundle:
+ type_bundle_key += '+bundle'
+
+ xctarget_type = gyp.xcodeproj_file.PBXNativeTarget
+ try:
+ target_properties['productType'] = _types[type_bundle_key]
+ except KeyError as e:
+ gyp.common.ExceptionAppend(e, "-- unknown product type while "
+ "writing target %s" % target_name)
+ raise
+ else:
+ xctarget_type = gyp.xcodeproj_file.PBXAggregateTarget
+ assert not is_bundle, (
+ 'mac_bundle targets cannot have type none (target "%s")' %
+ target_name)
+ assert not is_xcuitest, (
+ 'mac_xcuitest_bundle targets cannot have type none (target "%s")' %
+ target_name)
+ assert not is_xctest, (
+ 'mac_xctest_bundle targets cannot have type none (target "%s")' %
+ target_name)
+
+ target_product_name = spec.get('product_name')
+ if target_product_name is not None:
+ target_properties['productName'] = target_product_name
+
+ xct = xctarget_type(target_properties, parent=pbxp,
+ force_outdir=spec.get('product_dir'),
+ force_prefix=spec.get('product_prefix'),
+ force_extension=spec.get('product_extension'))
+ pbxp.AppendProperty('targets', xct)
+ xcode_targets[qualified_target] = xct
+ xcode_target_to_target_dict[xct] = spec
+
+ spec_actions = spec.get('actions', [])
+ spec_rules = spec.get('rules', [])
+
+ # Xcode has some "issues" with checking dependencies for the "Compile
+ # sources" step with any source files/headers generated by actions/rules.
+ # To work around this, if a target is building anything directly (not
+ # type "none"), then a second target is used to run the GYP actions/rules
+ # and is made a dependency of this target. This way the work is done
+ # before the dependency checks for what should be recompiled.
+ support_xct = None
+ # The Xcode "issues" don't affect xcode-ninja builds, since the dependency
+ # logic all happens in ninja. Don't bother creating the extra targets in
+ # that case.
+ if type != 'none' and (spec_actions or spec_rules) and not ninja_wrapper:
+ support_xccl = CreateXCConfigurationList(configuration_names);
+ support_target_suffix = generator_flags.get(
+ 'support_target_suffix', ' Support')
+ support_target_properties = {
+ 'buildConfigurationList': support_xccl,
+ 'name': target_name + support_target_suffix,
+ }
+ if target_product_name:
+ support_target_properties['productName'] = \
+ target_product_name + ' Support'
+ support_xct = \
+ gyp.xcodeproj_file.PBXAggregateTarget(support_target_properties,
+ parent=pbxp)
+ pbxp.AppendProperty('targets', support_xct)
+ xct.AddDependency(support_xct)
+ # Hang the support target off the main target so it can be tested/found
+ # by the generator during Finalize.
+ xct.support_target = support_xct
+
+ prebuild_index = 0
+
+ # Add custom shell script phases for "actions" sections.
+ for action in spec_actions:
+ # There's no need to write anything into the script to ensure that the
+ # output directories already exist, because Xcode will look at the
+ # declared outputs and automatically ensure that they exist for us.
+
+ # Do we have a message to print when this action runs?
+ message = action.get('message')
+ if message:
+ message = 'echo note: ' + gyp.common.EncodePOSIXShellArgument(message)
+ else:
+ message = ''
+
+ # Turn the list into a string that can be passed to a shell.
+ action_string = gyp.common.EncodePOSIXShellList(action['action'])
+
+ # Convert Xcode-type variable references to sh-compatible environment
+ # variable references.
+ message_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(message)
+ action_string_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
+ action_string)
+
+ script = ''
+ # Include the optional message
+ if message_sh:
+ script += message_sh + '\n'
+ # Be sure the script runs in exec, and that if exec fails, the script
+ # exits signalling an error.
+ script += 'exec ' + action_string_sh + '\nexit 1\n'
+ ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
+ 'inputPaths': action['inputs'],
+ 'name': 'Action "' + action['action_name'] + '"',
+ 'outputPaths': action['outputs'],
+ 'shellScript': script,
+ 'showEnvVarsInLog': 0,
+ })
+
+ if support_xct:
+ support_xct.AppendProperty('buildPhases', ssbp)
+ else:
+ # TODO(mark): this assumes too much knowledge of the internals of
+ # xcodeproj_file; some of these smarts should move into xcodeproj_file
+ # itself.
+ xct._properties['buildPhases'].insert(prebuild_index, ssbp)
+ prebuild_index = prebuild_index + 1
+
+ # TODO(mark): Should verify that at most one of these is specified.
+ if int(action.get('process_outputs_as_sources', False)):
+ for output in action['outputs']:
+ AddSourceToTarget(output, type, pbxp, xct)
+
+ if int(action.get('process_outputs_as_mac_bundle_resources', False)):
+ for output in action['outputs']:
+ AddResourceToTarget(output, pbxp, xct)
+
+ # tgt_mac_bundle_resources holds the list of bundle resources so
+ # the rule processing can check against it.
+ if is_bundle:
+ tgt_mac_bundle_resources = spec.get('mac_bundle_resources', [])
+ else:
+ tgt_mac_bundle_resources = []
+
+ # Add custom shell script phases driving "make" for "rules" sections.
+ #
+ # Xcode's built-in rule support is almost powerful enough to use directly,
+ # but there are a few significant deficiencies that render them unusable.
+ # There are workarounds for some of its inadequacies, but in aggregate,
+ # the workarounds added complexity to the generator, and some workarounds
+ # actually require input files to be crafted more carefully than I'd like.
+ # Consequently, until Xcode rules are made more capable, "rules" input
+ # sections will be handled in Xcode output by shell script build phases
+ # performed prior to the compilation phase.
+ #
+ # The following problems with Xcode rules were found. The numbers are
+ # Apple radar IDs. I hope that these shortcomings are addressed, I really
+ # liked having the rules handled directly in Xcode during the period that
+ # I was prototyping this.
+ #
+ # 6588600 Xcode compiles custom script rule outputs too soon, compilation
+ # fails. This occurs when rule outputs from distinct inputs are
+ # interdependent. The only workaround is to put rules and their
+ # inputs in a separate target from the one that compiles the rule
+ # outputs. This requires input file cooperation and it means that
+ # process_outputs_as_sources is unusable.
+ # 6584932 Need to declare that custom rule outputs should be excluded from
+ # compilation. A possible workaround is to lie to Xcode about a
+ # rule's output, giving it a dummy file it doesn't know how to
+ # compile. The rule action script would need to touch the dummy.
+ # 6584839 I need a way to declare additional inputs to a custom rule.
+ # A possible workaround is a shell script phase prior to
+ # compilation that touches a rule's primary input files if any
+ # would-be additional inputs are newer than the output. Modifying
+ # the source tree - even just modification times - feels dirty.
+ # 6564240 Xcode "custom script" build rules always dump all environment
+ # variables. This is a low-prioroty problem and is not a
+ # show-stopper.
+ rules_by_ext = {}
+ for rule in spec_rules:
+ rules_by_ext[rule['extension']] = rule
+
+ # First, some definitions:
+ #
+ # A "rule source" is a file that was listed in a target's "sources"
+ # list and will have a rule applied to it on the basis of matching the
+ # rule's "extensions" attribute. Rule sources are direct inputs to
+ # rules.
+ #
+ # Rule definitions may specify additional inputs in their "inputs"
+ # attribute. These additional inputs are used for dependency tracking
+ # purposes.
+ #
+ # A "concrete output" is a rule output with input-dependent variables
+ # resolved. For example, given a rule with:
+ # 'extension': 'ext', 'outputs': ['$(INPUT_FILE_BASE).cc'],
+ # if the target's "sources" list contained "one.ext" and "two.ext",
+ # the "concrete output" for rule input "two.ext" would be "two.cc". If
+ # a rule specifies multiple outputs, each input file that the rule is
+ # applied to will have the same number of concrete outputs.
+ #
+ # If any concrete outputs are outdated or missing relative to their
+ # corresponding rule_source or to any specified additional input, the
+ # rule action must be performed to generate the concrete outputs.
+
+ # concrete_outputs_by_rule_source will have an item at the same index
+ # as the rule['rule_sources'] that it corresponds to. Each item is a
+ # list of all of the concrete outputs for the rule_source.
+ concrete_outputs_by_rule_source = []
+
+ # concrete_outputs_all is a flat list of all concrete outputs that this
+ # rule is able to produce, given the known set of input files
+ # (rule_sources) that apply to it.
+ concrete_outputs_all = []
+
+ # messages & actions are keyed by the same indices as rule['rule_sources']
+ # and concrete_outputs_by_rule_source. They contain the message and
+ # action to perform after resolving input-dependent variables. The
+ # message is optional, in which case None is stored for each rule source.
+ messages = []
+ actions = []
+
+ for rule_source in rule.get('rule_sources', []):
+ rule_source_dirname, rule_source_basename = \
+ posixpath.split(rule_source)
+ (rule_source_root, rule_source_ext) = \
+ posixpath.splitext(rule_source_basename)
+
+ # These are the same variable names that Xcode uses for its own native
+ # rule support. Because Xcode's rule engine is not being used, they
+ # need to be expanded as they are written to the makefile.
+ rule_input_dict = {
+ 'INPUT_FILE_BASE': rule_source_root,
+ 'INPUT_FILE_SUFFIX': rule_source_ext,
+ 'INPUT_FILE_NAME': rule_source_basename,
+ 'INPUT_FILE_PATH': rule_source,
+ 'INPUT_FILE_DIRNAME': rule_source_dirname,
+ }
+
+ concrete_outputs_for_this_rule_source = []
+ for output in rule.get('outputs', []):
+ # Fortunately, Xcode and make both use $(VAR) format for their
+ # variables, so the expansion is the only transformation necessary.
+ # Any remaning $(VAR)-type variables in the string can be given
+ # directly to make, which will pick up the correct settings from
+ # what Xcode puts into the environment.
+ concrete_output = ExpandXcodeVariables(output, rule_input_dict)
+ concrete_outputs_for_this_rule_source.append(concrete_output)
+
+ # Add all concrete outputs to the project.
+ pbxp.AddOrGetFileInRootGroup(concrete_output)
+
+ concrete_outputs_by_rule_source.append( \
+ concrete_outputs_for_this_rule_source)
+ concrete_outputs_all.extend(concrete_outputs_for_this_rule_source)
+
+ # TODO(mark): Should verify that at most one of these is specified.
+ if int(rule.get('process_outputs_as_sources', False)):
+ for output in concrete_outputs_for_this_rule_source:
+ AddSourceToTarget(output, type, pbxp, xct)
+
+ # If the file came from the mac_bundle_resources list or if the rule
+ # is marked to process outputs as bundle resource, do so.
+ was_mac_bundle_resource = rule_source in tgt_mac_bundle_resources
+ if was_mac_bundle_resource or \
+ int(rule.get('process_outputs_as_mac_bundle_resources', False)):
+ for output in concrete_outputs_for_this_rule_source:
+ AddResourceToTarget(output, pbxp, xct)
+
+ # Do we have a message to print when this rule runs?
+ message = rule.get('message')
+ if message:
+ message = gyp.common.EncodePOSIXShellArgument(message)
+ message = ExpandXcodeVariables(message, rule_input_dict)
+ messages.append(message)
+
+ # Turn the list into a string that can be passed to a shell.
+ action_string = gyp.common.EncodePOSIXShellList(rule['action'])
+
+ action = ExpandXcodeVariables(action_string, rule_input_dict)
+ actions.append(action)
+
+ if len(concrete_outputs_all) > 0:
+ # TODO(mark): There's a possibilty for collision here. Consider
+ # target "t" rule "A_r" and target "t_A" rule "r".
+ makefile_name = '%s.make' % re.sub(
+ '[^a-zA-Z0-9_]', '_' , '%s_%s' % (target_name, rule['rule_name']))
+ makefile_path = os.path.join(xcode_projects[build_file].path,
+ makefile_name)
+ # TODO(mark): try/close? Write to a temporary file and swap it only
+ # if it's got changes?
+ makefile = open(makefile_path, 'w')
+
+ # make will build the first target in the makefile by default. By
+ # convention, it's called "all". List all (or at least one)
+ # concrete output for each rule source as a prerequisite of the "all"
+ # target.
+ makefile.write('all: \\\n')
+ for concrete_output_index, concrete_output_by_rule_source in \
+ enumerate(concrete_outputs_by_rule_source):
+ # Only list the first (index [0]) concrete output of each input
+ # in the "all" target. Otherwise, a parallel make (-j > 1) would
+ # attempt to process each input multiple times simultaneously.
+ # Otherwise, "all" could just contain the entire list of
+ # concrete_outputs_all.
+ concrete_output = concrete_output_by_rule_source[0]
+ if concrete_output_index == len(concrete_outputs_by_rule_source) - 1:
+ eol = ''
+ else:
+ eol = ' \\'
+ makefile.write(' %s%s\n' % (concrete_output, eol))
+
+ for (rule_source, concrete_outputs, message, action) in \
+ zip(rule['rule_sources'], concrete_outputs_by_rule_source,
+ messages, actions):
+ makefile.write('\n')
+
+ # Add a rule that declares it can build each concrete output of a
+ # rule source. Collect the names of the directories that are
+ # required.
+ concrete_output_dirs = []
+ for concrete_output_index, concrete_output in \
+ enumerate(concrete_outputs):
+ if concrete_output_index == 0:
+ bol = ''
+ else:
+ bol = ' '
+ makefile.write('%s%s \\\n' % (bol, concrete_output))
+
+ concrete_output_dir = posixpath.dirname(concrete_output)
+ if (concrete_output_dir and
+ concrete_output_dir not in concrete_output_dirs):
+ concrete_output_dirs.append(concrete_output_dir)
+
+ makefile.write(' : \\\n')
+
+ # The prerequisites for this rule are the rule source itself and
+ # the set of additional rule inputs, if any.
+ prerequisites = [rule_source]
+ prerequisites.extend(rule.get('inputs', []))
+ for prerequisite_index, prerequisite in enumerate(prerequisites):
+ if prerequisite_index == len(prerequisites) - 1:
+ eol = ''
+ else:
+ eol = ' \\'
+ makefile.write(' %s%s\n' % (prerequisite, eol))
+
+ # Make sure that output directories exist before executing the rule
+ # action.
+ if len(concrete_output_dirs) > 0:
+ makefile.write('\t@mkdir -p "%s"\n' %
+ '" "'.join(concrete_output_dirs))
+
+ # The rule message and action have already had the necessary variable
+ # substitutions performed.
+ if message:
+ # Mark it with note: so Xcode picks it up in build output.
+ makefile.write('\t@echo note: %s\n' % message)
+ makefile.write('\t%s\n' % action)
+
+ makefile.close()
+
+ # It might be nice to ensure that needed output directories exist
+ # here rather than in each target in the Makefile, but that wouldn't
+ # work if there ever was a concrete output that had an input-dependent
+ # variable anywhere other than in the leaf position.
+
+ # To help speed things up, pass -j COUNT to make so it does some work
+ # in parallel. Don't use ncpus because Xcode will build ncpus targets
+ # in parallel and if each target happens to have a rules step, there
+ # would be ncpus^2 things going. With a machine that has 2 quad-core
+ # Xeons, a build can quickly run out of processes based on
+ # scheduling/other tasks, and randomly failing builds are no good.
+ script = \
+"""JOB_COUNT="$(/usr/sbin/sysctl -n hw.ncpu)"
+if [ "${JOB_COUNT}" -gt 4 ]; then
+ JOB_COUNT=4
+fi
+exec xcrun make -f "${PROJECT_FILE_PATH}/%s" -j "${JOB_COUNT}"
+exit 1
+""" % makefile_name
+ ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
+ 'inputPaths': rule['rule_sources'],
+ 'name': 'Rule "' + rule['rule_name'] + '"',
+ 'outputPaths': concrete_outputs_all,
+ 'shellScript': script,
+ 'showEnvVarsInLog': 0,
+ })
+
+ if support_xct:
+ support_xct.AppendProperty('buildPhases', ssbp)
+ else:
+ # TODO(mark): this assumes too much knowledge of the internals of
+ # xcodeproj_file; some of these smarts should move into xcodeproj_file
+ # itself.
+ xct._properties['buildPhases'].insert(prebuild_index, ssbp)
+ prebuild_index = prebuild_index + 1
+
+ # Extra rule inputs also go into the project file. Concrete outputs were
+ # already added when they were computed.
+ groups = ['inputs', 'inputs_excluded']
+ if skip_excluded_files:
+ groups = [x for x in groups if not x.endswith('_excluded')]
+ for group in groups:
+ for item in rule.get(group, []):
+ pbxp.AddOrGetFileInRootGroup(item)
+
+ # Add "sources".
+ for source in spec.get('sources', []):
+ (source_root, source_extension) = posixpath.splitext(source)
+ if source_extension[1:] not in rules_by_ext:
+ # AddSourceToTarget will add the file to a root group if it's not
+ # already there.
+ AddSourceToTarget(source, type, pbxp, xct)
+ else:
+ pbxp.AddOrGetFileInRootGroup(source)
+
+ # Add "mac_bundle_resources" and "mac_framework_private_headers" if
+ # it's a bundle of any type.
+ if is_bundle:
+ for resource in tgt_mac_bundle_resources:
+ (resource_root, resource_extension) = posixpath.splitext(resource)
+ if resource_extension[1:] not in rules_by_ext:
+ AddResourceToTarget(resource, pbxp, xct)
+ else:
+ pbxp.AddOrGetFileInRootGroup(resource)
+
+ for header in spec.get('mac_framework_private_headers', []):
+ AddHeaderToTarget(header, pbxp, xct, False)
+
+ # Add "mac_framework_headers". These can be valid for both frameworks
+ # and static libraries.
+ if is_bundle or type == 'static_library':
+ for header in spec.get('mac_framework_headers', []):
+ AddHeaderToTarget(header, pbxp, xct, True)
+
+ # Add "copies".
+ pbxcp_dict = {}
+ for copy_group in spec.get('copies', []):
+ dest = copy_group['destination']
+ if dest[0] not in ('/', '$'):
+ # Relative paths are relative to $(SRCROOT).
+ dest = '$(SRCROOT)/' + dest
+
+ code_sign = int(copy_group.get('xcode_code_sign', 0))
+ settings = (None, '{ATTRIBUTES = (CodeSignOnCopy, ); }')[code_sign];
+
+ # Coalesce multiple "copies" sections in the same target with the same
+ # "destination" property into the same PBXCopyFilesBuildPhase, otherwise
+ # they'll wind up with ID collisions.
+ pbxcp = pbxcp_dict.get(dest, None)
+ if pbxcp is None:
+ pbxcp = gyp.xcodeproj_file.PBXCopyFilesBuildPhase({
+ 'name': 'Copy to ' + copy_group['destination']
+ },
+ parent=xct)
+ pbxcp.SetDestination(dest)
+
+ # TODO(mark): The usual comment about this knowing too much about
+ # gyp.xcodeproj_file internals applies.
+ xct._properties['buildPhases'].insert(prebuild_index, pbxcp)
+
+ pbxcp_dict[dest] = pbxcp
+
+ for file in copy_group['files']:
+ pbxcp.AddFile(file, settings)
+
+ # Excluded files can also go into the project file.
+ if not skip_excluded_files:
+ for key in ['sources', 'mac_bundle_resources', 'mac_framework_headers',
+ 'mac_framework_private_headers']:
+ excluded_key = key + '_excluded'
+ for item in spec.get(excluded_key, []):
+ pbxp.AddOrGetFileInRootGroup(item)
+
+ # So can "inputs" and "outputs" sections of "actions" groups.
+ groups = ['inputs', 'inputs_excluded', 'outputs', 'outputs_excluded']
+ if skip_excluded_files:
+ groups = [x for x in groups if not x.endswith('_excluded')]
+ for action in spec.get('actions', []):
+ for group in groups:
+ for item in action.get(group, []):
+ # Exclude anything in BUILT_PRODUCTS_DIR. They're products, not
+ # sources.
+ if not item.startswith('$(BUILT_PRODUCTS_DIR)/'):
+ pbxp.AddOrGetFileInRootGroup(item)
+
+ for postbuild in spec.get('postbuilds', []):
+ action_string_sh = gyp.common.EncodePOSIXShellList(postbuild['action'])
+ script = 'exec ' + action_string_sh + '\nexit 1\n'
+
+ # Make the postbuild step depend on the output of ld or ar from this
+ # target. Apparently putting the script step after the link step isn't
+ # sufficient to ensure proper ordering in all cases. With an input
+ # declared but no outputs, the script step should run every time, as
+ # desired.
+ ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
+ 'inputPaths': ['$(BUILT_PRODUCTS_DIR)/$(EXECUTABLE_PATH)'],
+ 'name': 'Postbuild "' + postbuild['postbuild_name'] + '"',
+ 'shellScript': script,
+ 'showEnvVarsInLog': 0,
+ })
+ xct.AppendProperty('buildPhases', ssbp)
+
+ # Add dependencies before libraries, because adding a dependency may imply
+ # adding a library. It's preferable to keep dependencies listed first
+ # during a link phase so that they can override symbols that would
+ # otherwise be provided by libraries, which will usually include system
+ # libraries. On some systems, ld is finicky and even requires the
+ # libraries to be ordered in such a way that unresolved symbols in
+ # earlier-listed libraries may only be resolved by later-listed libraries.
+ # The Mac linker doesn't work that way, but other platforms do, and so
+ # their linker invocations need to be constructed in this way. There's
+ # no compelling reason for Xcode's linker invocations to differ.
+
+ if 'dependencies' in spec:
+ for dependency in spec['dependencies']:
+ xct.AddDependency(xcode_targets[dependency])
+ # The support project also gets the dependencies (in case they are
+ # needed for the actions/rules to work).
+ if support_xct:
+ support_xct.AddDependency(xcode_targets[dependency])
+
+ if 'libraries' in spec:
+ for library in spec['libraries']:
+ xct.FrameworksPhase().AddFile(library)
+ # Add the library's directory to LIBRARY_SEARCH_PATHS if necessary.
+ # I wish Xcode handled this automatically.
+ library_dir = posixpath.dirname(library)
+ if library_dir not in xcode_standard_library_dirs and (
+ not xct.HasBuildSetting(_library_search_paths_var) or
+ library_dir not in xct.GetBuildSetting(_library_search_paths_var)):
+ xct.AppendBuildSetting(_library_search_paths_var, library_dir)
+
+ for configuration_name in configuration_names:
+ configuration = spec['configurations'][configuration_name]
+ xcbc = xct.ConfigurationNamed(configuration_name)
+ for include_dir in configuration.get('mac_framework_dirs', []):
+ xcbc.AppendBuildSetting('FRAMEWORK_SEARCH_PATHS', include_dir)
+ for include_dir in configuration.get('include_dirs', []):
+ xcbc.AppendBuildSetting('HEADER_SEARCH_PATHS', include_dir)
+ for library_dir in configuration.get('library_dirs', []):
+ if library_dir not in xcode_standard_library_dirs and (
+ not xcbc.HasBuildSetting(_library_search_paths_var) or
+ library_dir not in xcbc.GetBuildSetting(_library_search_paths_var)):
+ xcbc.AppendBuildSetting(_library_search_paths_var, library_dir)
+
+ if 'defines' in configuration:
+ for define in configuration['defines']:
+ set_define = EscapeXcodeDefine(define)
+ xcbc.AppendBuildSetting('GCC_PREPROCESSOR_DEFINITIONS', set_define)
+ if 'xcode_settings' in configuration:
+ for xck, xcv in configuration['xcode_settings'].items():
+ xcbc.SetBuildSetting(xck, xcv)
+ if 'xcode_config_file' in configuration:
+ config_ref = pbxp.AddOrGetFileInRootGroup(
+ configuration['xcode_config_file'])
+ xcbc.SetBaseConfiguration(config_ref)
+
+ build_files = []
+ for build_file, build_file_dict in data.items():
+ if build_file.endswith('.gyp'):
+ build_files.append(build_file)
+
+ for build_file in build_files:
+ xcode_projects[build_file].Finalize1(xcode_targets, serialize_all_tests)
+
+ for build_file in build_files:
+ xcode_projects[build_file].Finalize2(xcode_targets,
+ xcode_target_to_target_dict)
+
+ for build_file in build_files:
+ xcode_projects[build_file].Write()
diff --git a/third_party/python/gyp/pylib/gyp/generator/xcode_test.py b/third_party/python/gyp/pylib/gyp/generator/xcode_test.py
new file mode 100644
index 0000000000..260324a43f
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/generator/xcode_test.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+""" Unit tests for the xcode.py file. """
+
+import gyp.generator.xcode as xcode
+import unittest
+import sys
+
+
+class TestEscapeXcodeDefine(unittest.TestCase):
+ if sys.platform == 'darwin':
+ def test_InheritedRemainsUnescaped(self):
+ self.assertEqual(xcode.EscapeXcodeDefine('$(inherited)'), '$(inherited)')
+
+ def test_Escaping(self):
+ self.assertEqual(xcode.EscapeXcodeDefine('a b"c\\'), 'a\\ b\\"c\\\\')
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/python/gyp/pylib/gyp/input.py b/third_party/python/gyp/pylib/gyp/input.py
new file mode 100644
index 0000000000..2bea3341ad
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/input.py
@@ -0,0 +1,2908 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+
+import ast
+import gyp.common
+import gyp.simple_copy
+import multiprocessing
+import optparse
+import os.path
+import re
+import shlex
+import signal
+import subprocess
+import sys
+import threading
+import time
+import traceback
+from gyp.common import GypError
+from gyp.common import OrderedSet
+
+
+# A list of types that are treated as linkable.
+linkable_types = [
+ 'executable',
+ 'shared_library',
+ 'loadable_module',
+ 'mac_kernel_extension',
+ 'windows_driver',
+]
+
+# A list of sections that contain links to other targets.
+dependency_sections = ['dependencies', 'export_dependent_settings']
+
+# base_path_sections is a list of sections defined by GYP that contain
+# pathnames. The generators can provide more keys, the two lists are merged
+# into path_sections, but you should call IsPathSection instead of using either
+# list directly.
+base_path_sections = [
+ 'destination',
+ 'files',
+ 'include_dirs',
+ 'inputs',
+ 'libraries',
+ 'outputs',
+ 'sources',
+]
+path_sections = set()
+
+# These per-process dictionaries are used to cache build file data when loading
+# in parallel mode.
+per_process_data = {}
+per_process_aux_data = {}
+
+try:
+ _str_types = (basestring,)
+# There's no basestring in python3.
+except NameError:
+ _str_types = (str,)
+
+try:
+ _int_types = (int, long)
+# There's no long in python3.
+except NameError:
+ _int_types = (int,)
+
+# Shortcuts as we use these combos a lot.
+_str_int_types = _str_types + _int_types
+_str_int_list_types = _str_int_types + (list,)
+
+
+def IsPathSection(section):
+ # If section ends in one of the '=+?!' characters, it's applied to a section
+ # without the trailing characters. '/' is notably absent from this list,
+ # because there's no way for a regular expression to be treated as a path.
+ while section and section[-1:] in '=+?!':
+ section = section[:-1]
+
+ if section in path_sections:
+ return True
+
+ # Sections mathing the regexp '_(dir|file|path)s?$' are also
+ # considered PathSections. Using manual string matching since that
+ # is much faster than the regexp and this can be called hundreds of
+ # thousands of times so micro performance matters.
+ if "_" in section:
+ tail = section[-6:]
+ if tail[-1] == 's':
+ tail = tail[:-1]
+ if tail[-5:] in ('_file', '_path'):
+ return True
+ return tail[-4:] == '_dir'
+
+ return False
+
+# base_non_configuration_keys is a list of key names that belong in the target
+# itself and should not be propagated into its configurations. It is merged
+# with a list that can come from the generator to
+# create non_configuration_keys.
+base_non_configuration_keys = [
+ # Sections that must exist inside targets and not configurations.
+ 'actions',
+ 'all_dependent_settings',
+ 'configurations',
+ 'copies',
+ 'default_configuration',
+ 'dependencies',
+ 'dependencies_original',
+ 'direct_dependent_settings',
+ 'libraries',
+ 'postbuilds',
+ 'product_dir',
+ 'product_extension',
+ 'product_name',
+ 'product_prefix',
+ 'rules',
+ 'run_as',
+ 'sources',
+ 'standalone_static_library',
+ 'suppress_wildcard',
+ 'target_name',
+ 'toolset',
+ 'toolsets',
+ 'type',
+
+ # Sections that can be found inside targets or configurations, but that
+ # should not be propagated from targets into their configurations.
+ 'variables',
+]
+non_configuration_keys = []
+
+# Keys that do not belong inside a configuration dictionary.
+invalid_configuration_keys = [
+ 'actions',
+ 'all_dependent_settings',
+ 'configurations',
+ 'dependencies',
+ 'direct_dependent_settings',
+ 'libraries',
+ 'link_settings',
+ 'sources',
+ 'standalone_static_library',
+ 'target_name',
+ 'type',
+]
+
+# Controls whether or not the generator supports multiple toolsets.
+multiple_toolsets = False
+
+# Paths for converting filelist paths to output paths: {
+# toplevel,
+# qualified_output_dir,
+# }
+generator_filelist_paths = None
+
+def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
+ """Return a list of all build files included into build_file_path.
+
+ The returned list will contain build_file_path as well as all other files
+ that it included, either directly or indirectly. Note that the list may
+ contain files that were included into a conditional section that evaluated
+ to false and was not merged into build_file_path's dict.
+
+ aux_data is a dict containing a key for each build file or included build
+ file. Those keys provide access to dicts whose "included" keys contain
+ lists of all other files included by the build file.
+
+ included should be left at its default None value by external callers. It
+ is used for recursion.
+
+ The returned list will not contain any duplicate entries. Each build file
+ in the list will be relative to the current directory.
+ """
+
+ if included == None:
+ included = []
+
+ if build_file_path in included:
+ return included
+
+ included.append(build_file_path)
+
+ for included_build_file in aux_data[build_file_path].get('included', []):
+ GetIncludedBuildFiles(included_build_file, aux_data, included)
+
+ return included
+
+
+def CheckedEval(file_contents):
+ """Return the eval of a gyp file.
+
+ The gyp file is restricted to dictionaries and lists only, and
+ repeated keys are not allowed.
+
+ Note that this is slower than eval() is.
+ """
+
+ syntax_tree = ast.parse(file_contents)
+ assert isinstance(syntax_tree, ast.Module)
+ c1 = syntax_tree.body
+ assert len(c1) == 1
+ c2 = c1[0]
+ assert isinstance(c2, ast.Expr)
+ return CheckNode(c2.value, [])
+
+
+def CheckNode(node, keypath):
+ if isinstance(node, ast.Dict):
+ dict = {}
+ for key, value in zip(node.keys, node.values):
+ assert isinstance(key, ast.Str)
+ key = key.s
+ if key in dict:
+ raise GypError("Key '" + key + "' repeated at level " +
+ repr(len(keypath) + 1) + " with key path '" +
+ '.'.join(keypath) + "'")
+ kp = list(keypath) # Make a copy of the list for descending this node.
+ kp.append(key)
+ dict[key] = CheckNode(value, kp)
+ return dict
+ elif isinstance(node, ast.List):
+ children = []
+ for index, child in enumerate(node.elts):
+ kp = list(keypath) # Copy list.
+ kp.append(repr(index))
+ children.append(CheckNode(child, kp))
+ return children
+ elif isinstance(node, ast.Str):
+ return node.s
+ else:
+ raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
+ "': " + repr(node))
+
+
+def LoadOneBuildFile(build_file_path, data, aux_data, includes,
+ is_target, check):
+ if build_file_path in data:
+ return data[build_file_path]
+
+ if os.path.exists(build_file_path):
+ build_file_contents = open(build_file_path, 'rb').read().decode('utf-8')
+ else:
+ raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
+
+ build_file_data = None
+ try:
+ if check:
+ build_file_data = CheckedEval(build_file_contents)
+ else:
+ build_file_data = eval(build_file_contents, {'__builtins__': None},
+ None)
+ except SyntaxError as e:
+ e.filename = build_file_path
+ raise
+ except Exception as e:
+ gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
+ raise
+
+ if type(build_file_data) is not dict:
+ raise GypError("%s does not evaluate to a dictionary." % build_file_path)
+
+ data[build_file_path] = build_file_data
+ aux_data[build_file_path] = {}
+
+ # Scan for includes and merge them in.
+ if ('skip_includes' not in build_file_data or
+ not build_file_data['skip_includes']):
+ try:
+ if is_target:
+ LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
+ aux_data, includes, check)
+ else:
+ LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
+ aux_data, None, check)
+ except Exception as e:
+ gyp.common.ExceptionAppend(e,
+ 'while reading includes of ' + build_file_path)
+ raise
+
+ return build_file_data
+
+
+def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
+ includes, check):
+ includes_list = []
+ if includes != None:
+ includes_list.extend(includes)
+ if 'includes' in subdict:
+ for include in subdict['includes']:
+ # "include" is specified relative to subdict_path, so compute the real
+ # path to include by appending the provided "include" to the directory
+ # in which subdict_path resides.
+ relative_include = \
+ os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
+ includes_list.append(relative_include)
+ # Unhook the includes list, it's no longer needed.
+ del subdict['includes']
+
+ # Merge in the included files.
+ for include in includes_list:
+ if not 'included' in aux_data[subdict_path]:
+ aux_data[subdict_path]['included'] = []
+ aux_data[subdict_path]['included'].append(include)
+
+ gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
+
+ MergeDicts(subdict,
+ LoadOneBuildFile(include, data, aux_data, None, False, check),
+ subdict_path, include)
+
+ # Recurse into subdictionaries.
+ for k, v in subdict.items():
+ if type(v) is dict:
+ LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
+ None, check)
+ elif type(v) is list:
+ LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
+ check)
+
+
+# This recurses into lists so that it can look for dicts.
+def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
+ for item in sublist:
+ if type(item) is dict:
+ LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
+ None, check)
+ elif type(item) is list:
+ LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
+
+# Processes toolsets in all the targets. This recurses into condition entries
+# since they can contain toolsets as well.
+def ProcessToolsetsInDict(data):
+ if 'targets' in data:
+ target_list = data['targets']
+ new_target_list = []
+ for target in target_list:
+ # If this target already has an explicit 'toolset', and no 'toolsets'
+ # list, don't modify it further.
+ if 'toolset' in target and 'toolsets' not in target:
+ new_target_list.append(target)
+ continue
+ if multiple_toolsets:
+ toolsets = target.get('toolsets', ['target'])
+ else:
+ toolsets = ['target']
+ # Make sure this 'toolsets' definition is only processed once.
+ if 'toolsets' in target:
+ del target['toolsets']
+ if len(toolsets) > 0:
+ # Optimization: only do copies if more than one toolset is specified.
+ for build in toolsets[1:]:
+ new_target = gyp.simple_copy.deepcopy(target)
+ new_target['toolset'] = build
+ new_target_list.append(new_target)
+ target['toolset'] = toolsets[0]
+ new_target_list.append(target)
+ data['targets'] = new_target_list
+ if 'conditions' in data:
+ for condition in data['conditions']:
+ if type(condition) is list:
+ for condition_dict in condition[1:]:
+ if type(condition_dict) is dict:
+ ProcessToolsetsInDict(condition_dict)
+
+
+# TODO(mark): I don't love this name. It just means that it's going to load
+# a build file that contains targets and is expected to provide a targets dict
+# that contains the targets...
+def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
+ depth, check, load_dependencies):
+ # If depth is set, predefine the DEPTH variable to be a relative path from
+ # this build file's directory to the directory identified by depth.
+ if depth:
+ # TODO(dglazkov) The backslash/forward-slash replacement at the end is a
+ # temporary measure. This should really be addressed by keeping all paths
+ # in POSIX until actual project generation.
+ d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
+ if d == '':
+ variables['DEPTH'] = '.'
+ else:
+ variables['DEPTH'] = d.replace('\\', '/')
+
+ # The 'target_build_files' key is only set when loading target build files in
+ # the non-parallel code path, where LoadTargetBuildFile is called
+ # recursively. In the parallel code path, we don't need to check whether the
+ # |build_file_path| has already been loaded, because the 'scheduled' set in
+ # ParallelState guarantees that we never load the same |build_file_path|
+ # twice.
+ if 'target_build_files' in data:
+ if build_file_path in data['target_build_files']:
+ # Already loaded.
+ return False
+ data['target_build_files'].add(build_file_path)
+
+ gyp.DebugOutput(gyp.DEBUG_INCLUDES,
+ "Loading Target Build File '%s'", build_file_path)
+
+ build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
+ includes, True, check)
+
+ # Store DEPTH for later use in generators.
+ build_file_data['_DEPTH'] = depth
+
+ # Set up the included_files key indicating which .gyp files contributed to
+ # this target dict.
+ if 'included_files' in build_file_data:
+ raise GypError(build_file_path + ' must not contain included_files key')
+
+ included = GetIncludedBuildFiles(build_file_path, aux_data)
+ build_file_data['included_files'] = []
+ for included_file in included:
+ # included_file is relative to the current directory, but it needs to
+ # be made relative to build_file_path's directory.
+ included_relative = \
+ gyp.common.RelativePath(included_file,
+ os.path.dirname(build_file_path))
+ build_file_data['included_files'].append(included_relative)
+
+ # Do a first round of toolsets expansion so that conditions can be defined
+ # per toolset.
+ ProcessToolsetsInDict(build_file_data)
+
+ # Apply "pre"/"early" variable expansions and condition evaluations.
+ ProcessVariablesAndConditionsInDict(
+ build_file_data, PHASE_EARLY, variables, build_file_path)
+
+ # Since some toolsets might have been defined conditionally, perform
+ # a second round of toolsets expansion now.
+ ProcessToolsetsInDict(build_file_data)
+
+ # Look at each project's target_defaults dict, and merge settings into
+ # targets.
+ if 'target_defaults' in build_file_data:
+ if 'targets' not in build_file_data:
+ raise GypError("Unable to find targets in build file %s" %
+ build_file_path)
+
+ index = 0
+ while index < len(build_file_data['targets']):
+ # This procedure needs to give the impression that target_defaults is
+ # used as defaults, and the individual targets inherit from that.
+ # The individual targets need to be merged into the defaults. Make
+ # a deep copy of the defaults for each target, merge the target dict
+ # as found in the input file into that copy, and then hook up the
+ # copy with the target-specific data merged into it as the replacement
+ # target dict.
+ old_target_dict = build_file_data['targets'][index]
+ new_target_dict = gyp.simple_copy.deepcopy(
+ build_file_data['target_defaults'])
+ MergeDicts(new_target_dict, old_target_dict,
+ build_file_path, build_file_path)
+ build_file_data['targets'][index] = new_target_dict
+ index += 1
+
+ # No longer needed.
+ del build_file_data['target_defaults']
+
+ # Look for dependencies. This means that dependency resolution occurs
+ # after "pre" conditionals and variable expansion, but before "post" -
+ # in other words, you can't put a "dependencies" section inside a "post"
+ # conditional within a target.
+
+ dependencies = []
+ if 'targets' in build_file_data:
+ for target_dict in build_file_data['targets']:
+ if 'dependencies' not in target_dict:
+ continue
+ for dependency in target_dict['dependencies']:
+ dependencies.append(
+ gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
+
+ if load_dependencies:
+ for dependency in dependencies:
+ try:
+ LoadTargetBuildFile(dependency, data, aux_data, variables,
+ includes, depth, check, load_dependencies)
+ except Exception as e:
+ gyp.common.ExceptionAppend(
+ e, 'while loading dependencies of %s' % build_file_path)
+ raise
+ else:
+ return (build_file_path, dependencies)
+
+def CallLoadTargetBuildFile(global_flags,
+ build_file_path, variables,
+ includes, depth, check,
+ generator_input_info):
+ """Wrapper around LoadTargetBuildFile for parallel processing.
+
+ This wrapper is used when LoadTargetBuildFile is executed in
+ a worker process.
+ """
+
+ try:
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+
+ # Apply globals so that the worker process behaves the same.
+ for key, value in global_flags.items():
+ globals()[key] = value
+
+ SetGeneratorGlobals(generator_input_info)
+ result = LoadTargetBuildFile(build_file_path, per_process_data,
+ per_process_aux_data, variables,
+ includes, depth, check, False)
+ if not result:
+ return result
+
+ (build_file_path, dependencies) = result
+
+ # We can safely pop the build_file_data from per_process_data because it
+ # will never be referenced by this process again, so we don't need to keep
+ # it in the cache.
+ build_file_data = per_process_data.pop(build_file_path)
+
+ # This gets serialized and sent back to the main process via a pipe.
+ # It's handled in LoadTargetBuildFileCallback.
+ return (build_file_path,
+ build_file_data,
+ dependencies)
+ except GypError as e:
+ sys.stderr.write("gyp: %s\n" % e)
+ return None
+ except Exception as e:
+ print('Exception:', e, file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
+ return None
+
+
+class ParallelProcessingError(Exception):
+ pass
+
+
+class ParallelState(object):
+ """Class to keep track of state when processing input files in parallel.
+
+ If build files are loaded in parallel, use this to keep track of
+ state during farming out and processing parallel jobs. It's stored
+ in a global so that the callback function can have access to it.
+ """
+
+ def __init__(self):
+ # The multiprocessing pool.
+ self.pool = None
+ # The condition variable used to protect this object and notify
+ # the main loop when there might be more data to process.
+ self.condition = None
+ # The "data" dict that was passed to LoadTargetBuildFileParallel
+ self.data = None
+ # The number of parallel calls outstanding; decremented when a response
+ # was received.
+ self.pending = 0
+ # The set of all build files that have been scheduled, so we don't
+ # schedule the same one twice.
+ self.scheduled = set()
+ # A list of dependency build file paths that haven't been scheduled yet.
+ self.dependencies = []
+ # Flag to indicate if there was an error in a child process.
+ self.error = False
+
+ def LoadTargetBuildFileCallback(self, result):
+ """Handle the results of running LoadTargetBuildFile in another process.
+ """
+ self.condition.acquire()
+ if not result:
+ self.error = True
+ self.condition.notify()
+ self.condition.release()
+ return
+ (build_file_path0, build_file_data0, dependencies0) = result
+ self.data[build_file_path0] = build_file_data0
+ self.data['target_build_files'].add(build_file_path0)
+ for new_dependency in dependencies0:
+ if new_dependency not in self.scheduled:
+ self.scheduled.add(new_dependency)
+ self.dependencies.append(new_dependency)
+ self.pending -= 1
+ self.condition.notify()
+ self.condition.release()
+
+
+def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
+ check, generator_input_info):
+ parallel_state = ParallelState()
+ parallel_state.condition = threading.Condition()
+ # Make copies of the build_files argument that we can modify while working.
+ parallel_state.dependencies = list(build_files)
+ parallel_state.scheduled = set(build_files)
+ parallel_state.pending = 0
+ parallel_state.data = data
+
+ try:
+ parallel_state.condition.acquire()
+ while parallel_state.dependencies or parallel_state.pending:
+ if parallel_state.error:
+ break
+ if not parallel_state.dependencies:
+ parallel_state.condition.wait()
+ continue
+
+ dependency = parallel_state.dependencies.pop()
+
+ parallel_state.pending += 1
+ global_flags = {
+ 'path_sections': globals()['path_sections'],
+ 'non_configuration_keys': globals()['non_configuration_keys'],
+ 'multiple_toolsets': globals()['multiple_toolsets']}
+
+ if not parallel_state.pool:
+ parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
+ parallel_state.pool.apply_async(
+ CallLoadTargetBuildFile,
+ args = (global_flags, dependency,
+ variables, includes, depth, check, generator_input_info),
+ callback = parallel_state.LoadTargetBuildFileCallback)
+ except KeyboardInterrupt as e:
+ parallel_state.pool.terminate()
+ raise e
+
+ parallel_state.condition.release()
+
+ parallel_state.pool.close()
+ parallel_state.pool.join()
+ parallel_state.pool = None
+
+ if parallel_state.error:
+ sys.exit(1)
+
+# Look for the bracket that matches the first bracket seen in a
+# string, and return the start and end as a tuple. For example, if
+# the input is something like "<(foo <(bar)) blah", then it would
+# return (1, 13), indicating the entire string except for the leading
+# "<" and trailing " blah".
+LBRACKETS= set('{[(')
+BRACKETS = {'}': '{', ']': '[', ')': '('}
+def FindEnclosingBracketGroup(input_str):
+ stack = []
+ start = -1
+ for index, char in enumerate(input_str):
+ if char in LBRACKETS:
+ stack.append(char)
+ if start == -1:
+ start = index
+ elif char in BRACKETS:
+ if not stack:
+ return (-1, -1)
+ if stack.pop() != BRACKETS[char]:
+ return (-1, -1)
+ if not stack:
+ return (start, index + 1)
+ return (-1, -1)
+
+
+def IsStrCanonicalInt(string):
+ """Returns True if |string| is in its canonical integer form.
+
+ The canonical form is such that str(int(string)) == string.
+ """
+ if isinstance(string, _str_types):
+ # This function is called a lot so for maximum performance, avoid
+ # involving regexps which would otherwise make the code much
+ # shorter. Regexps would need twice the time of this function.
+ if string:
+ if string == "0":
+ return True
+ if string[0] == "-":
+ string = string[1:]
+ if not string:
+ return False
+ if '1' <= string[0] <= '9':
+ return string.isdigit()
+
+ return False
+
+
+# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
+# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
+# In the last case, the inner "<()" is captured in match['content'].
+early_variable_re = re.compile(
+ r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
+ r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
+ r'\((?P<is_array>\s*\[?)'
+ r'(?P<content>.*?)(\]?)\))')
+
+# This matches the same as early_variable_re, but with '>' instead of '<'.
+late_variable_re = re.compile(
+ r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
+ r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
+ r'\((?P<is_array>\s*\[?)'
+ r'(?P<content>.*?)(\]?)\))')
+
+# This matches the same as early_variable_re, but with '^' instead of '<'.
+latelate_variable_re = re.compile(
+ r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
+ r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
+ r'\((?P<is_array>\s*\[?)'
+ r'(?P<content>.*?)(\]?)\))')
+
+# Global cache of results from running commands so they don't have to be run
+# more then once.
+cached_command_results = {}
+
+
+def FixupPlatformCommand(cmd):
+ if sys.platform == 'win32':
+ if type(cmd) is list:
+ cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
+ else:
+ cmd = re.sub('^cat ', 'type ', cmd)
+ return cmd
+
+
+PHASE_EARLY = 0
+PHASE_LATE = 1
+PHASE_LATELATE = 2
+
+
+def ExpandVariables(input, phase, variables, build_file):
+ # Look for the pattern that gets expanded into variables
+ if phase == PHASE_EARLY:
+ variable_re = early_variable_re
+ expansion_symbol = '<'
+ elif phase == PHASE_LATE:
+ variable_re = late_variable_re
+ expansion_symbol = '>'
+ elif phase == PHASE_LATELATE:
+ variable_re = latelate_variable_re
+ expansion_symbol = '^'
+ else:
+ assert False
+
+ input_str = str(input)
+ if IsStrCanonicalInt(input_str):
+ return int(input_str)
+
+ # Do a quick scan to determine if an expensive regex search is warranted.
+ if expansion_symbol not in input_str:
+ return input_str
+
+ # Get the entire list of matches as a list of MatchObject instances.
+ # (using findall here would return strings instead of MatchObjects).
+ matches = list(variable_re.finditer(input_str))
+ if not matches:
+ return input_str
+
+ output = input_str
+ # Reverse the list of matches so that replacements are done right-to-left.
+ # That ensures that earlier replacements won't mess up the string in a
+ # way that causes later calls to find the earlier substituted text instead
+ # of what's intended for replacement.
+ matches.reverse()
+ for match_group in matches:
+ match = match_group.groupdict()
+ gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
+ # match['replace'] is the substring to look for, match['type']
+ # is the character code for the replacement type (< > <! >! <| >| <@
+ # >@ <!@ >!@), match['is_array'] contains a '[' for command
+ # arrays, and match['content'] is the name of the variable (< >)
+ # or command to run (<! >!). match['command_string'] is an optional
+ # command string. Currently, only 'pymod_do_main' is supported.
+
+ # run_command is true if a ! variant is used.
+ run_command = '!' in match['type']
+ command_string = match['command_string']
+
+ # file_list is true if a | variant is used.
+ file_list = '|' in match['type']
+
+ # Capture these now so we can adjust them later.
+ replace_start = match_group.start('replace')
+ replace_end = match_group.end('replace')
+
+ # Find the ending paren, and re-evaluate the contained string.
+ (c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
+
+ # Adjust the replacement range to match the entire command
+ # found by FindEnclosingBracketGroup (since the variable_re
+ # probably doesn't match the entire command if it contained
+ # nested variables).
+ replace_end = replace_start + c_end
+
+ # Find the "real" replacement, matching the appropriate closing
+ # paren, and adjust the replacement start and end.
+ replacement = input_str[replace_start:replace_end]
+
+ # Figure out what the contents of the variable parens are.
+ contents_start = replace_start + c_start + 1
+ contents_end = replace_end - 1
+ contents = input_str[contents_start:contents_end]
+
+ # Do filter substitution now for <|().
+ # Admittedly, this is different than the evaluation order in other
+ # contexts. However, since filtration has no chance to run on <|(),
+ # this seems like the only obvious way to give them access to filters.
+ if file_list:
+ processed_variables = gyp.simple_copy.deepcopy(variables)
+ ProcessListFiltersInDict(contents, processed_variables)
+ # Recurse to expand variables in the contents
+ contents = ExpandVariables(contents, phase,
+ processed_variables, build_file)
+ else:
+ # Recurse to expand variables in the contents
+ contents = ExpandVariables(contents, phase, variables, build_file)
+
+ # Strip off leading/trailing whitespace so that variable matches are
+ # simpler below (and because they are rarely needed).
+ contents = contents.strip()
+
+ # expand_to_list is true if an @ variant is used. In that case,
+ # the expansion should result in a list. Note that the caller
+ # is to be expecting a list in return, and not all callers do
+ # because not all are working in list context. Also, for list
+ # expansions, there can be no other text besides the variable
+ # expansion in the input string.
+ expand_to_list = '@' in match['type'] and input_str == replacement
+
+ if run_command or file_list:
+ # Find the build file's directory, so commands can be run or file lists
+ # generated relative to it.
+ build_file_dir = os.path.dirname(build_file)
+ if build_file_dir == '' and not file_list:
+ # If build_file is just a leaf filename indicating a file in the
+ # current directory, build_file_dir might be an empty string. Set
+ # it to None to signal to subprocess.Popen that it should run the
+ # command in the current directory.
+ build_file_dir = None
+
+ # Support <|(listfile.txt ...) which generates a file
+ # containing items from a gyp list, generated at gyp time.
+ # This works around actions/rules which have more inputs than will
+ # fit on the command line.
+ if file_list:
+ if type(contents) is list:
+ contents_list = contents
+ else:
+ contents_list = contents.split(' ')
+ replacement = contents_list[0]
+ if os.path.isabs(replacement):
+ raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
+
+ if not generator_filelist_paths:
+ path = os.path.join(build_file_dir, replacement)
+ else:
+ if os.path.isabs(build_file_dir):
+ toplevel = generator_filelist_paths['toplevel']
+ rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
+ else:
+ rel_build_file_dir = build_file_dir
+ qualified_out_dir = generator_filelist_paths['qualified_out_dir']
+ path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
+ gyp.common.EnsureDirExists(path)
+
+ replacement = gyp.common.RelativePath(path, build_file_dir)
+ f = gyp.common.WriteOnDiff(path)
+ for i in contents_list[1:]:
+ f.write('%s\n' % i)
+ f.close()
+
+ elif run_command:
+ use_shell = True
+ if match['is_array']:
+ contents = eval(contents)
+ use_shell = False
+
+ # Check for a cached value to avoid executing commands, or generating
+ # file lists more than once. The cache key contains the command to be
+ # run as well as the directory to run it from, to account for commands
+ # that depend on their current directory.
+ # TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
+ # someone could author a set of GYP files where each time the command
+ # is invoked it produces different output by design. When the need
+ # arises, the syntax should be extended to support no caching off a
+ # command's output so it is run every time.
+ cache_key = (str(contents), build_file_dir)
+ cached_value = cached_command_results.get(cache_key, None)
+ if cached_value is None:
+ gyp.DebugOutput(gyp.DEBUG_VARIABLES,
+ "Executing command '%s' in directory '%s'",
+ contents, build_file_dir)
+
+ replacement = ''
+
+ if command_string == 'pymod_do_main':
+ # <!pymod_do_main(modulename param eters) loads |modulename| as a
+ # python module and then calls that module's DoMain() function,
+ # passing ["param", "eters"] as a single list argument. For modules
+ # that don't load quickly, this can be faster than
+ # <!(python modulename param eters). Do this in |build_file_dir|.
+ oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
+ if build_file_dir: # build_file_dir may be None (see above).
+ os.chdir(build_file_dir)
+ try:
+
+ parsed_contents = shlex.split(contents)
+ try:
+ py_module = __import__(parsed_contents[0])
+ except ImportError as e:
+ raise GypError("Error importing pymod_do_main"
+ "module (%s): %s" % (parsed_contents[0], e))
+ replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
+ finally:
+ os.chdir(oldwd)
+ assert replacement != None
+ elif command_string:
+ raise GypError("Unknown command string '%s' in '%s'." %
+ (command_string, contents))
+ else:
+ # Fix up command with platform specific workarounds.
+ contents = FixupPlatformCommand(contents)
+ try:
+ p = subprocess.Popen(contents, shell=use_shell,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ cwd=build_file_dir)
+ except Exception as e:
+ raise GypError("%s while executing command '%s' in %s" %
+ (e, contents, build_file))
+
+ p_stdout, p_stderr = p.communicate('')
+
+ if p.wait() != 0 or p_stderr:
+ p_stderr_decoded = p_stderr.decode('utf-8')
+ sys.stderr.write(p_stderr_decoded)
+ # Simulate check_call behavior, since check_call only exists
+ # in python 2.5 and later.
+ raise GypError("Call to '%s' returned exit status %d while in %s." %
+ (contents, p.returncode, build_file))
+ replacement = p_stdout.decode('utf-8').rstrip()
+
+ cached_command_results[cache_key] = replacement
+ else:
+ gyp.DebugOutput(gyp.DEBUG_VARIABLES,
+ "Had cache value for command '%s' in directory '%s'",
+ contents,build_file_dir)
+ replacement = cached_value
+
+ else:
+ if not contents in variables:
+ if contents[-1] in ['!', '/']:
+ # In order to allow cross-compiles (nacl) to happen more naturally,
+ # we will allow references to >(sources/) etc. to resolve to
+ # and empty list if undefined. This allows actions to:
+ # 'action!': [
+ # '>@(_sources!)',
+ # ],
+ # 'action/': [
+ # '>@(_sources/)',
+ # ],
+ replacement = []
+ else:
+ raise GypError('Undefined variable ' + contents +
+ ' in ' + build_file)
+ else:
+ replacement = variables[contents]
+
+ if type(replacement) is list:
+ for item in replacement:
+ if not contents[-1] == '/' and not isinstance(item, _str_int_types):
+ raise GypError('Variable ' + contents +
+ ' must expand to a string or list of strings; ' +
+ 'list contains a ' +
+ item.__class__.__name__)
+ # Run through the list and handle variable expansions in it. Since
+ # the list is guaranteed not to contain dicts, this won't do anything
+ # with conditions sections.
+ ProcessVariablesAndConditionsInList(replacement, phase, variables,
+ build_file)
+ elif not isinstance(replacement, _str_int_types):
+ raise GypError('Variable ' + str(contents) +
+ ' must expand to a string or list of strings; ' +
+ 'found a ' + replacement.__class__.__name__)
+
+ if expand_to_list:
+ # Expanding in list context. It's guaranteed that there's only one
+ # replacement to do in |input_str| and that it's this replacement. See
+ # above.
+ if type(replacement) is list:
+ # If it's already a list, make a copy.
+ output = replacement[:]
+ else:
+ # Split it the same way sh would split arguments.
+ output = shlex.split(str(replacement))
+ else:
+ # Expanding in string context.
+ encoded_replacement = ''
+ if type(replacement) is list:
+ # When expanding a list into string context, turn the list items
+ # into a string in a way that will work with a subprocess call.
+ #
+ # TODO(mark): This isn't completely correct. This should
+ # call a generator-provided function that observes the
+ # proper list-to-argument quoting rules on a specific
+ # platform instead of just calling the POSIX encoding
+ # routine.
+ encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
+ else:
+ encoded_replacement = replacement
+
+ output = output[:replace_start] + str(encoded_replacement) + \
+ output[replace_end:]
+ # Prepare for the next match iteration.
+ input_str = output
+
+ if output == input:
+ gyp.DebugOutput(gyp.DEBUG_VARIABLES,
+ "Found only identity matches on %r, avoiding infinite "
+ "recursion.",
+ output)
+ else:
+ # Look for more matches now that we've replaced some, to deal with
+ # expanding local variables (variables defined in the same
+ # variables block as this one).
+ gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
+ if type(output) is list:
+ if output and type(output[0]) is list:
+ # Leave output alone if it's a list of lists.
+ # We don't want such lists to be stringified.
+ pass
+ else:
+ new_output = []
+ for item in output:
+ new_output.append(
+ ExpandVariables(item, phase, variables, build_file))
+ output = new_output
+ else:
+ output = ExpandVariables(output, phase, variables, build_file)
+
+ # Convert all strings that are canonically-represented integers into integers.
+ if type(output) is list:
+ for index, outstr in enumerate(output):
+ if IsStrCanonicalInt(outstr):
+ output[index] = int(outstr)
+ elif IsStrCanonicalInt(output):
+ output = int(output)
+
+ return output
+
+# The same condition is often evaluated over and over again so it
+# makes sense to cache as much as possible between evaluations.
+cached_conditions_asts = {}
+
+def EvalCondition(condition, conditions_key, phase, variables, build_file):
+ """Returns the dict that should be used or None if the result was
+ that nothing should be used."""
+ if type(condition) is not list:
+ raise GypError(conditions_key + ' must be a list')
+ if len(condition) < 2:
+ # It's possible that condition[0] won't work in which case this
+ # attempt will raise its own IndexError. That's probably fine.
+ raise GypError(conditions_key + ' ' + condition[0] +
+ ' must be at least length 2, not ' + str(len(condition)))
+
+ i = 0
+ result = None
+ while i < len(condition):
+ cond_expr = condition[i]
+ true_dict = condition[i + 1]
+ if type(true_dict) is not dict:
+ raise GypError('{} {} must be followed by a dictionary, not {}'.format(
+ conditions_key, cond_expr, type(true_dict)))
+ if len(condition) > i + 2 and type(condition[i + 2]) is dict:
+ false_dict = condition[i + 2]
+ i = i + 3
+ if i != len(condition):
+ raise GypError('{} {} has {} unexpected trailing items'.format(
+ conditions_key, cond_expr, len(condition) - i))
+ else:
+ false_dict = None
+ i = i + 2
+ if result == None:
+ result = EvalSingleCondition(
+ cond_expr, true_dict, false_dict, phase, variables, build_file)
+
+ return result
+
+
+def EvalSingleCondition(
+ cond_expr, true_dict, false_dict, phase, variables, build_file):
+ """Returns true_dict if cond_expr evaluates to true, and false_dict
+ otherwise."""
+ # Do expansions on the condition itself. Since the conditon can naturally
+ # contain variable references without needing to resort to GYP expansion
+ # syntax, this is of dubious value for variables, but someone might want to
+ # use a command expansion directly inside a condition.
+ cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
+ build_file)
+ if not isinstance(cond_expr_expanded, _str_int_types):
+ raise ValueError(
+ 'Variable expansion in this context permits str and int ' + \
+ 'only, found ' + cond_expr_expanded.__class__.__name__)
+
+ try:
+ if cond_expr_expanded in cached_conditions_asts:
+ ast_code = cached_conditions_asts[cond_expr_expanded]
+ else:
+ ast_code = compile(cond_expr_expanded, '<string>', 'eval')
+ cached_conditions_asts[cond_expr_expanded] = ast_code
+ if eval(ast_code, {'__builtins__': None}, variables):
+ return true_dict
+ return false_dict
+ except SyntaxError as e:
+ syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
+ 'at character %d.' %
+ (str(e.args[0]), e.text, build_file, e.offset),
+ e.filename, e.lineno, e.offset, e.text)
+ raise syntax_error
+ except NameError as e:
+ gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
+ (cond_expr_expanded, build_file))
+ raise GypError(e)
+
+
+def ProcessConditionsInDict(the_dict, phase, variables, build_file):
+ # Process a 'conditions' or 'target_conditions' section in the_dict,
+ # depending on phase.
+ # early -> conditions
+ # late -> target_conditions
+ # latelate -> no conditions
+ #
+ # Each item in a conditions list consists of cond_expr, a string expression
+ # evaluated as the condition, and true_dict, a dict that will be merged into
+ # the_dict if cond_expr evaluates to true. Optionally, a third item,
+ # false_dict, may be present. false_dict is merged into the_dict if
+ # cond_expr evaluates to false.
+ #
+ # Any dict merged into the_dict will be recursively processed for nested
+ # conditionals and other expansions, also according to phase, immediately
+ # prior to being merged.
+
+ if phase == PHASE_EARLY:
+ conditions_key = 'conditions'
+ elif phase == PHASE_LATE:
+ conditions_key = 'target_conditions'
+ elif phase == PHASE_LATELATE:
+ return
+ else:
+ assert False
+
+ if not conditions_key in the_dict:
+ return
+
+ conditions_list = the_dict[conditions_key]
+ # Unhook the conditions list, it's no longer needed.
+ del the_dict[conditions_key]
+
+ for condition in conditions_list:
+ merge_dict = EvalCondition(condition, conditions_key, phase, variables,
+ build_file)
+
+ if merge_dict != None:
+ # Expand variables and nested conditinals in the merge_dict before
+ # merging it.
+ ProcessVariablesAndConditionsInDict(merge_dict, phase,
+ variables, build_file)
+
+ MergeDicts(the_dict, merge_dict, build_file, build_file)
+
+
+def LoadAutomaticVariablesFromDict(variables, the_dict):
+ # Any keys with plain string values in the_dict become automatic variables.
+ # The variable name is the key name with a "_" character prepended.
+ for key, value in the_dict.items():
+ if isinstance(value, _str_int_list_types):
+ variables['_' + key] = value
+
+
+def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
+ # Any keys in the_dict's "variables" dict, if it has one, becomes a
+ # variable. The variable name is the key name in the "variables" dict.
+ # Variables that end with the % character are set only if they are unset in
+ # the variables dict. the_dict_key is the name of the key that accesses
+ # the_dict in the_dict's parent dict. If the_dict's parent is not a dict
+ # (it could be a list or it could be parentless because it is a root dict),
+ # the_dict_key will be None.
+ for key, value in the_dict.get('variables', {}).items():
+ if not isinstance(value, _str_int_list_types):
+ continue
+
+ if key.endswith('%'):
+ variable_name = key[:-1]
+ if variable_name in variables:
+ # If the variable is already set, don't set it.
+ continue
+ if the_dict_key == 'variables' and variable_name in the_dict:
+ # If the variable is set without a % in the_dict, and the_dict is a
+ # variables dict (making |variables| a varaibles sub-dict of a
+ # variables dict), use the_dict's definition.
+ value = the_dict[variable_name]
+ else:
+ variable_name = key
+
+ variables[variable_name] = value
+
+
+def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
+ build_file, the_dict_key=None):
+ """Handle all variable and command expansion and conditional evaluation.
+
+ This function is the public entry point for all variable expansions and
+ conditional evaluations. The variables_in dictionary will not be modified
+ by this function.
+ """
+
+ # Make a copy of the variables_in dict that can be modified during the
+ # loading of automatics and the loading of the variables dict.
+ variables = variables_in.copy()
+ LoadAutomaticVariablesFromDict(variables, the_dict)
+
+ if 'variables' in the_dict:
+ # Make sure all the local variables are added to the variables
+ # list before we process them so that you can reference one
+ # variable from another. They will be fully expanded by recursion
+ # in ExpandVariables.
+ for key, value in the_dict['variables'].items():
+ variables[key] = value
+
+ # Handle the associated variables dict first, so that any variable
+ # references within can be resolved prior to using them as variables.
+ # Pass a copy of the variables dict to avoid having it be tainted.
+ # Otherwise, it would have extra automatics added for everything that
+ # should just be an ordinary variable in this scope.
+ ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
+ variables, build_file, 'variables')
+
+ LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
+
+ for key, value in the_dict.items():
+ # Skip "variables", which was already processed if present.
+ if key != 'variables' and isinstance(value, _str_types):
+ expanded = ExpandVariables(value, phase, variables, build_file)
+ if not isinstance(expanded, _str_int_types):
+ raise ValueError(
+ 'Variable expansion in this context permits str and int ' + \
+ 'only, found ' + expanded.__class__.__name__ + ' for ' + key)
+ the_dict[key] = expanded
+
+ # Variable expansion may have resulted in changes to automatics. Reload.
+ # TODO(mark): Optimization: only reload if no changes were made.
+ variables = variables_in.copy()
+ LoadAutomaticVariablesFromDict(variables, the_dict)
+ LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
+
+ # Process conditions in this dict. This is done after variable expansion
+ # so that conditions may take advantage of expanded variables. For example,
+ # if the_dict contains:
+ # {'type': '<(library_type)',
+ # 'conditions': [['_type=="static_library"', { ... }]]},
+ # _type, as used in the condition, will only be set to the value of
+ # library_type if variable expansion is performed before condition
+ # processing. However, condition processing should occur prior to recursion
+ # so that variables (both automatic and "variables" dict type) may be
+ # adjusted by conditions sections, merged into the_dict, and have the
+ # intended impact on contained dicts.
+ #
+ # This arrangement means that a "conditions" section containing a "variables"
+ # section will only have those variables effective in subdicts, not in
+ # the_dict. The workaround is to put a "conditions" section within a
+ # "variables" section. For example:
+ # {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
+ # 'defines': ['<(define)'],
+ # 'my_subdict': {'defines': ['<(define)']}},
+ # will not result in "IS_MAC" being appended to the "defines" list in the
+ # current scope but would result in it being appended to the "defines" list
+ # within "my_subdict". By comparison:
+ # {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
+ # 'defines': ['<(define)'],
+ # 'my_subdict': {'defines': ['<(define)']}},
+ # will append "IS_MAC" to both "defines" lists.
+
+ # Evaluate conditions sections, allowing variable expansions within them
+ # as well as nested conditionals. This will process a 'conditions' or
+ # 'target_conditions' section, perform appropriate merging and recursive
+ # conditional and variable processing, and then remove the conditions section
+ # from the_dict if it is present.
+ ProcessConditionsInDict(the_dict, phase, variables, build_file)
+
+ # Conditional processing may have resulted in changes to automatics or the
+ # variables dict. Reload.
+ variables = variables_in.copy()
+ LoadAutomaticVariablesFromDict(variables, the_dict)
+ LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
+
+ # Recurse into child dicts, or process child lists which may result in
+ # further recursion into descendant dicts.
+ for key, value in the_dict.items():
+ # Skip "variables" and string values, which were already processed if
+ # present.
+ if key == 'variables' or isinstance(value, _str_types):
+ continue
+ if type(value) is dict:
+ # Pass a copy of the variables dict so that subdicts can't influence
+ # parents.
+ ProcessVariablesAndConditionsInDict(value, phase, variables,
+ build_file, key)
+ elif type(value) is list:
+ # The list itself can't influence the variables dict, and
+ # ProcessVariablesAndConditionsInList will make copies of the variables
+ # dict if it needs to pass it to something that can influence it. No
+ # copy is necessary here.
+ ProcessVariablesAndConditionsInList(value, phase, variables,
+ build_file)
+ elif not isinstance(value, _int_types):
+ raise TypeError('Unknown type ' + value.__class__.__name__ + \
+ ' for ' + key)
+
+
+def ProcessVariablesAndConditionsInList(the_list, phase, variables,
+ build_file):
+ # Iterate using an index so that new values can be assigned into the_list.
+ index = 0
+ while index < len(the_list):
+ item = the_list[index]
+ if type(item) is dict:
+ # Make a copy of the variables dict so that it won't influence anything
+ # outside of its own scope.
+ ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
+ elif type(item) is list:
+ ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
+ elif isinstance(item, _str_types):
+ expanded = ExpandVariables(item, phase, variables, build_file)
+ if isinstance(expanded, _str_int_types):
+ the_list[index] = expanded
+ elif type(expanded) is list:
+ the_list[index:index+1] = expanded
+ index += len(expanded)
+
+ # index now identifies the next item to examine. Continue right now
+ # without falling into the index increment below.
+ continue
+ else:
+ raise ValueError(
+ 'Variable expansion in this context permits strings and ' + \
+ 'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
+ index)
+ elif not isinstance(item, _int_types):
+ raise TypeError('Unknown type ' + item.__class__.__name__ + \
+ ' at index ' + index)
+ index = index + 1
+
+
+def BuildTargetsDict(data):
+ """Builds a dict mapping fully-qualified target names to their target dicts.
+
+ |data| is a dict mapping loaded build files by pathname relative to the
+ current directory. Values in |data| are build file contents. For each
+ |data| value with a "targets" key, the value of the "targets" key is taken
+ as a list containing target dicts. Each target's fully-qualified name is
+ constructed from the pathname of the build file (|data| key) and its
+ "target_name" property. These fully-qualified names are used as the keys
+ in the returned dict. These keys provide access to the target dicts,
+ the dicts in the "targets" lists.
+ """
+
+ targets = {}
+ for build_file in data['target_build_files']:
+ for target in data[build_file].get('targets', []):
+ target_name = gyp.common.QualifiedTarget(build_file,
+ target['target_name'],
+ target['toolset'])
+ if target_name in targets:
+ raise GypError('Duplicate target definitions for ' + target_name)
+ targets[target_name] = target
+
+ return targets
+
+
+def QualifyDependencies(targets):
+ """Make dependency links fully-qualified relative to the current directory.
+
+ |targets| is a dict mapping fully-qualified target names to their target
+ dicts. For each target in this dict, keys known to contain dependency
+ links are examined, and any dependencies referenced will be rewritten
+ so that they are fully-qualified and relative to the current directory.
+ All rewritten dependencies are suitable for use as keys to |targets| or a
+ similar dict.
+ """
+
+ all_dependency_sections = [dep + op
+ for dep in dependency_sections
+ for op in ('', '!', '/')]
+
+ for target, target_dict in targets.items():
+ target_build_file = gyp.common.BuildFile(target)
+ toolset = target_dict['toolset']
+ for dependency_key in all_dependency_sections:
+ dependencies = target_dict.get(dependency_key, [])
+ for index, dep in enumerate(dependencies):
+ dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
+ target_build_file, dep, toolset)
+ if not multiple_toolsets:
+ # Ignore toolset specification in the dependency if it is specified.
+ dep_toolset = toolset
+ dependency = gyp.common.QualifiedTarget(dep_file,
+ dep_target,
+ dep_toolset)
+ dependencies[index] = dependency
+
+ # Make sure anything appearing in a list other than "dependencies" also
+ # appears in the "dependencies" list.
+ if dependency_key != 'dependencies' and \
+ dependency not in target_dict['dependencies']:
+ raise GypError('Found ' + dependency + ' in ' + dependency_key +
+ ' of ' + target + ', but not in dependencies')
+
+
+def ExpandWildcardDependencies(targets, data):
+ """Expands dependencies specified as build_file:*.
+
+ For each target in |targets|, examines sections containing links to other
+ targets. If any such section contains a link of the form build_file:*, it
+ is taken as a wildcard link, and is expanded to list each target in
+ build_file. The |data| dict provides access to build file dicts.
+
+ Any target that does not wish to be included by wildcard can provide an
+ optional "suppress_wildcard" key in its target dict. When present and
+ true, a wildcard dependency link will not include such targets.
+
+ All dependency names, including the keys to |targets| and the values in each
+ dependency list, must be qualified when this function is called.
+ """
+
+ for target, target_dict in targets.items():
+ toolset = target_dict['toolset']
+ target_build_file = gyp.common.BuildFile(target)
+ for dependency_key in dependency_sections:
+ dependencies = target_dict.get(dependency_key, [])
+
+ # Loop this way instead of "for dependency in" or "for index in xrange"
+ # because the dependencies list will be modified within the loop body.
+ index = 0
+ while index < len(dependencies):
+ (dependency_build_file, dependency_target, dependency_toolset) = \
+ gyp.common.ParseQualifiedTarget(dependencies[index])
+ if dependency_target != '*' and dependency_toolset != '*':
+ # Not a wildcard. Keep it moving.
+ index = index + 1
+ continue
+
+ if dependency_build_file == target_build_file:
+ # It's an error for a target to depend on all other targets in
+ # the same file, because a target cannot depend on itself.
+ raise GypError('Found wildcard in ' + dependency_key + ' of ' +
+ target + ' referring to same build file')
+
+ # Take the wildcard out and adjust the index so that the next
+ # dependency in the list will be processed the next time through the
+ # loop.
+ del dependencies[index]
+ index = index - 1
+
+ # Loop through the targets in the other build file, adding them to
+ # this target's list of dependencies in place of the removed
+ # wildcard.
+ dependency_target_dicts = data[dependency_build_file]['targets']
+ for dependency_target_dict in dependency_target_dicts:
+ if int(dependency_target_dict.get('suppress_wildcard', False)):
+ continue
+ dependency_target_name = dependency_target_dict['target_name']
+ if (dependency_target != '*' and
+ dependency_target != dependency_target_name):
+ continue
+ dependency_target_toolset = dependency_target_dict['toolset']
+ if (dependency_toolset != '*' and
+ dependency_toolset != dependency_target_toolset):
+ continue
+ dependency = gyp.common.QualifiedTarget(dependency_build_file,
+ dependency_target_name,
+ dependency_target_toolset)
+ index = index + 1
+ dependencies.insert(index, dependency)
+
+ index = index + 1
+
+
+def Unify(l):
+ """Removes duplicate elements from l, keeping the first element."""
+ seen = {}
+ return [seen.setdefault(e, e) for e in l if e not in seen]
+
+
+def RemoveDuplicateDependencies(targets):
+ """Makes sure every dependency appears only once in all targets's dependency
+ lists."""
+ for target_name, target_dict in targets.items():
+ for dependency_key in dependency_sections:
+ dependencies = target_dict.get(dependency_key, [])
+ if dependencies:
+ target_dict[dependency_key] = Unify(dependencies)
+
+
+def Filter(l, item):
+ """Removes item from l."""
+ res = {}
+ return [res.setdefault(e, e) for e in l if e != item]
+
+
+def RemoveSelfDependencies(targets):
+ """Remove self dependencies from targets that have the prune_self_dependency
+ variable set."""
+ for target_name, target_dict in targets.items():
+ for dependency_key in dependency_sections:
+ dependencies = target_dict.get(dependency_key, [])
+ if dependencies:
+ for t in dependencies:
+ if t == target_name:
+ if targets[t].get('variables', {}).get('prune_self_dependency', 0):
+ target_dict[dependency_key] = Filter(dependencies, target_name)
+
+
+def RemoveLinkDependenciesFromNoneTargets(targets):
+ """Remove dependencies having the 'link_dependency' attribute from the 'none'
+ targets."""
+ for target_name, target_dict in targets.items():
+ for dependency_key in dependency_sections:
+ dependencies = target_dict.get(dependency_key, [])
+ if dependencies:
+ for t in dependencies:
+ if target_dict.get('type', None) == 'none':
+ if targets[t].get('variables', {}).get('link_dependency', 0):
+ target_dict[dependency_key] = \
+ Filter(target_dict[dependency_key], t)
+
+
+class DependencyGraphNode(object):
+ """
+
+ Attributes:
+ ref: A reference to an object that this DependencyGraphNode represents.
+ dependencies: List of DependencyGraphNodes on which this one depends.
+ dependents: List of DependencyGraphNodes that depend on this one.
+ """
+
+ class CircularException(GypError):
+ pass
+
+ def __init__(self, ref):
+ self.ref = ref
+ self.dependencies = []
+ self.dependents = []
+
+ def __repr__(self):
+ return '<DependencyGraphNode: %r>' % self.ref
+
+ def FlattenToList(self):
+ # flat_list is the sorted list of dependencies - actually, the list items
+ # are the "ref" attributes of DependencyGraphNodes. Every target will
+ # appear in flat_list after all of its dependencies, and before all of its
+ # dependents.
+ flat_list = OrderedSet()
+
+ def ExtractNodeRef(node):
+ """Extracts the object that the node represents from the given node."""
+ return node.ref
+
+ # in_degree_zeros is the list of DependencyGraphNodes that have no
+ # dependencies not in flat_list. Initially, it is a copy of the children
+ # of this node, because when the graph was built, nodes with no
+ # dependencies were made implicit dependents of the root node.
+ in_degree_zeros = sorted(self.dependents[:], key=ExtractNodeRef)
+
+ while in_degree_zeros:
+ # Nodes in in_degree_zeros have no dependencies not in flat_list, so they
+ # can be appended to flat_list. Take these nodes out of in_degree_zeros
+ # as work progresses, so that the next node to process from the list can
+ # always be accessed at a consistent position.
+ node = in_degree_zeros.pop()
+ flat_list.add(node.ref)
+
+ # Look at dependents of the node just added to flat_list. Some of them
+ # may now belong in in_degree_zeros.
+ for node_dependent in sorted(node.dependents, key=ExtractNodeRef):
+ is_in_degree_zero = True
+ # TODO: We want to check through the
+ # node_dependent.dependencies list but if it's long and we
+ # always start at the beginning, then we get O(n^2) behaviour.
+ for node_dependent_dependency in (sorted(node_dependent.dependencies,
+ key=ExtractNodeRef)):
+ if not node_dependent_dependency.ref in flat_list:
+ # The dependent one or more dependencies not in flat_list. There
+ # will be more chances to add it to flat_list when examining
+ # it again as a dependent of those other dependencies, provided
+ # that there are no cycles.
+ is_in_degree_zero = False
+ break
+
+ if is_in_degree_zero:
+ # All of the dependent's dependencies are already in flat_list. Add
+ # it to in_degree_zeros where it will be processed in a future
+ # iteration of the outer loop.
+ in_degree_zeros += [node_dependent]
+
+ return list(flat_list)
+
+ def FindCycles(self):
+ """
+ Returns a list of cycles in the graph, where each cycle is its own list.
+ """
+ results = []
+ visited = set()
+
+ def Visit(node, path):
+ for child in node.dependents:
+ if child in path:
+ results.append([child] + path[:path.index(child) + 1])
+ elif not child in visited:
+ visited.add(child)
+ Visit(child, [child] + path)
+
+ visited.add(self)
+ Visit(self, [self])
+
+ return results
+
+ def DirectDependencies(self, dependencies=None):
+ """Returns a list of just direct dependencies."""
+ if dependencies == None:
+ dependencies = []
+
+ for dependency in self.dependencies:
+ # Check for None, corresponding to the root node.
+ if dependency.ref != None and dependency.ref not in dependencies:
+ dependencies.append(dependency.ref)
+
+ return dependencies
+
+ def _AddImportedDependencies(self, targets, dependencies=None):
+ """Given a list of direct dependencies, adds indirect dependencies that
+ other dependencies have declared to export their settings.
+
+ This method does not operate on self. Rather, it operates on the list
+ of dependencies in the |dependencies| argument. For each dependency in
+ that list, if any declares that it exports the settings of one of its
+ own dependencies, those dependencies whose settings are "passed through"
+ are added to the list. As new items are added to the list, they too will
+ be processed, so it is possible to import settings through multiple levels
+ of dependencies.
+
+ This method is not terribly useful on its own, it depends on being
+ "primed" with a list of direct dependencies such as one provided by
+ DirectDependencies. DirectAndImportedDependencies is intended to be the
+ public entry point.
+ """
+
+ if dependencies == None:
+ dependencies = []
+
+ index = 0
+ while index < len(dependencies):
+ dependency = dependencies[index]
+ dependency_dict = targets[dependency]
+ # Add any dependencies whose settings should be imported to the list
+ # if not already present. Newly-added items will be checked for
+ # their own imports when the list iteration reaches them.
+ # Rather than simply appending new items, insert them after the
+ # dependency that exported them. This is done to more closely match
+ # the depth-first method used by DeepDependencies.
+ add_index = 1
+ for imported_dependency in \
+ dependency_dict.get('export_dependent_settings', []):
+ if imported_dependency not in dependencies:
+ dependencies.insert(index + add_index, imported_dependency)
+ add_index = add_index + 1
+ index = index + 1
+
+ return dependencies
+
+ def DirectAndImportedDependencies(self, targets, dependencies=None):
+ """Returns a list of a target's direct dependencies and all indirect
+ dependencies that a dependency has advertised settings should be exported
+ through the dependency for.
+ """
+
+ dependencies = self.DirectDependencies(dependencies)
+ return self._AddImportedDependencies(targets, dependencies)
+
+ def DeepDependencies(self, dependencies=None):
+ """Returns an OrderedSet of all of a target's dependencies, recursively."""
+ if dependencies is None:
+ # Using a list to get ordered output and a set to do fast "is it
+ # already added" checks.
+ dependencies = OrderedSet()
+
+ for dependency in self.dependencies:
+ # Check for None, corresponding to the root node.
+ if dependency.ref is None:
+ continue
+ if dependency.ref not in dependencies:
+ dependency.DeepDependencies(dependencies)
+ dependencies.add(dependency.ref)
+
+ return dependencies
+
+ def _LinkDependenciesInternal(self, targets, include_shared_libraries,
+ dependencies=None, initial=True):
+ """Returns an OrderedSet of dependency targets that are linked
+ into this target.
+
+ This function has a split personality, depending on the setting of
+ |initial|. Outside callers should always leave |initial| at its default
+ setting.
+
+ When adding a target to the list of dependencies, this function will
+ recurse into itself with |initial| set to False, to collect dependencies
+ that are linked into the linkable target for which the list is being built.
+
+ If |include_shared_libraries| is False, the resulting dependencies will not
+ include shared_library targets that are linked into this target.
+ """
+ if dependencies is None:
+ # Using a list to get ordered output and a set to do fast "is it
+ # already added" checks.
+ dependencies = OrderedSet()
+
+ # Check for None, corresponding to the root node.
+ if self.ref is None:
+ return dependencies
+
+ # It's kind of sucky that |targets| has to be passed into this function,
+ # but that's presently the easiest way to access the target dicts so that
+ # this function can find target types.
+
+ if 'target_name' not in targets[self.ref]:
+ raise GypError("Missing 'target_name' field in target.")
+
+ if 'type' not in targets[self.ref]:
+ raise GypError("Missing 'type' field in target %s" %
+ targets[self.ref]['target_name'])
+
+ target_type = targets[self.ref]['type']
+
+ is_linkable = target_type in linkable_types
+
+ if initial and not is_linkable:
+ # If this is the first target being examined and it's not linkable,
+ # return an empty list of link dependencies, because the link
+ # dependencies are intended to apply to the target itself (initial is
+ # True) and this target won't be linked.
+ return dependencies
+
+ # Don't traverse 'none' targets if explicitly excluded.
+ if (target_type == 'none' and
+ not targets[self.ref].get('dependencies_traverse', True)):
+ dependencies.add(self.ref)
+ return dependencies
+
+ # Executables, mac kernel extensions, windows drivers and loadable modules
+ # are already fully and finally linked. Nothing else can be a link
+ # dependency of them, there can only be dependencies in the sense that a
+ # dependent target might run an executable or load the loadable_module.
+ if not initial and target_type in ('executable', 'loadable_module',
+ 'mac_kernel_extension',
+ 'windows_driver'):
+ return dependencies
+
+ # Shared libraries are already fully linked. They should only be included
+ # in |dependencies| when adjusting static library dependencies (in order to
+ # link against the shared_library's import lib), but should not be included
+ # in |dependencies| when propagating link_settings.
+ # The |include_shared_libraries| flag controls which of these two cases we
+ # are handling.
+ if (not initial and target_type == 'shared_library' and
+ not include_shared_libraries):
+ return dependencies
+
+ # The target is linkable, add it to the list of link dependencies.
+ if self.ref not in dependencies:
+ dependencies.add(self.ref)
+ if initial or not is_linkable:
+ # If this is a subsequent target and it's linkable, don't look any
+ # further for linkable dependencies, as they'll already be linked into
+ # this target linkable. Always look at dependencies of the initial
+ # target, and always look at dependencies of non-linkables.
+ for dependency in self.dependencies:
+ dependency._LinkDependenciesInternal(targets,
+ include_shared_libraries,
+ dependencies, False)
+
+ return dependencies
+
+ def DependenciesForLinkSettings(self, targets):
+ """
+ Returns a list of dependency targets whose link_settings should be merged
+ into this target.
+ """
+
+ # TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
+ # link_settings are propagated. So for now, we will allow it, unless the
+ # 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
+ # False. Once chrome is fixed, we can remove this flag.
+ include_shared_libraries = \
+ targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
+ return self._LinkDependenciesInternal(targets, include_shared_libraries)
+
+ def DependenciesToLinkAgainst(self, targets):
+ """
+ Returns a list of dependency targets that are linked into this target.
+ """
+ return self._LinkDependenciesInternal(targets, True)
+
+
+def BuildDependencyList(targets):
+ # Create a DependencyGraphNode for each target. Put it into a dict for easy
+ # access.
+ dependency_nodes = {}
+ for target, spec in targets.items():
+ if target not in dependency_nodes:
+ dependency_nodes[target] = DependencyGraphNode(target)
+
+ # Set up the dependency links. Targets that have no dependencies are treated
+ # as dependent on root_node.
+ root_node = DependencyGraphNode(None)
+ for target, spec in targets.items():
+ target_node = dependency_nodes[target]
+ target_build_file = gyp.common.BuildFile(target)
+ dependencies = spec.get('dependencies')
+ if not dependencies:
+ target_node.dependencies = [root_node]
+ root_node.dependents.append(target_node)
+ else:
+ for dependency in dependencies:
+ dependency_node = dependency_nodes.get(dependency)
+ if not dependency_node:
+ raise GypError("Dependency '%s' not found while "
+ "trying to load target %s" % (dependency, target))
+ target_node.dependencies.append(dependency_node)
+ dependency_node.dependents.append(target_node)
+
+ flat_list = root_node.FlattenToList()
+
+ # If there's anything left unvisited, there must be a circular dependency
+ # (cycle).
+ if len(flat_list) != len(targets):
+ if not root_node.dependents:
+ # If all targets have dependencies, add the first target as a dependent
+ # of root_node so that the cycle can be discovered from root_node.
+ target = next(iter(targets))
+ target_node = dependency_nodes[target]
+ target_node.dependencies.append(root_node)
+ root_node.dependents.append(target_node)
+
+ cycles = []
+ for cycle in root_node.FindCycles():
+ paths = [node.ref for node in cycle]
+ cycles.append('Cycle: %s' % ' -> '.join(paths))
+ raise DependencyGraphNode.CircularException(
+ 'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
+
+ return [dependency_nodes, flat_list]
+
+
+def VerifyNoGYPFileCircularDependencies(targets):
+ # Create a DependencyGraphNode for each gyp file containing a target. Put
+ # it into a dict for easy access.
+ dependency_nodes = {}
+ for target in targets.keys():
+ build_file = gyp.common.BuildFile(target)
+ if not build_file in dependency_nodes:
+ dependency_nodes[build_file] = DependencyGraphNode(build_file)
+
+ # Set up the dependency links.
+ for target, spec in targets.items():
+ build_file = gyp.common.BuildFile(target)
+ build_file_node = dependency_nodes[build_file]
+ target_dependencies = spec.get('dependencies', [])
+ for dependency in target_dependencies:
+ try:
+ dependency_build_file = gyp.common.BuildFile(dependency)
+ except GypError as e:
+ gyp.common.ExceptionAppend(
+ e, 'while computing dependencies of .gyp file %s' % build_file)
+ raise
+
+ if dependency_build_file == build_file:
+ # A .gyp file is allowed to refer back to itself.
+ continue
+ dependency_node = dependency_nodes.get(dependency_build_file)
+ if not dependency_node:
+ raise GypError("Dependancy '%s' not found" % dependency_build_file)
+ if dependency_node not in build_file_node.dependencies:
+ build_file_node.dependencies.append(dependency_node)
+ dependency_node.dependents.append(build_file_node)
+
+
+ # Files that have no dependencies are treated as dependent on root_node.
+ root_node = DependencyGraphNode(None)
+ for build_file_node in dependency_nodes.values():
+ if len(build_file_node.dependencies) == 0:
+ build_file_node.dependencies.append(root_node)
+ root_node.dependents.append(build_file_node)
+
+ flat_list = root_node.FlattenToList()
+
+ # If there's anything left unvisited, there must be a circular dependency
+ # (cycle).
+ if len(flat_list) != len(dependency_nodes):
+ if not root_node.dependents:
+ # If all files have dependencies, add the first file as a dependent
+ # of root_node so that the cycle can be discovered from root_node.
+ file_node = next(iter(dependency_nodes.values()))
+ file_node.dependencies.append(root_node)
+ root_node.dependents.append(file_node)
+ cycles = []
+ for cycle in root_node.FindCycles():
+ paths = [node.ref for node in cycle]
+ cycles.append('Cycle: %s' % ' -> '.join(paths))
+ raise DependencyGraphNode.CircularException(
+ 'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
+
+
+def DoDependentSettings(key, flat_list, targets, dependency_nodes):
+ # key should be one of all_dependent_settings, direct_dependent_settings,
+ # or link_settings.
+
+ for target in flat_list:
+ target_dict = targets[target]
+ build_file = gyp.common.BuildFile(target)
+
+ if key == 'all_dependent_settings':
+ dependencies = dependency_nodes[target].DeepDependencies()
+ elif key == 'direct_dependent_settings':
+ dependencies = \
+ dependency_nodes[target].DirectAndImportedDependencies(targets)
+ elif key == 'link_settings':
+ dependencies = \
+ dependency_nodes[target].DependenciesForLinkSettings(targets)
+ else:
+ raise GypError("DoDependentSettings doesn't know how to determine "
+ 'dependencies for ' + key)
+
+ for dependency in dependencies:
+ dependency_dict = targets[dependency]
+ if not key in dependency_dict:
+ continue
+ dependency_build_file = gyp.common.BuildFile(dependency)
+ MergeDicts(target_dict, dependency_dict[key],
+ build_file, dependency_build_file)
+
+
+def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
+ sort_dependencies):
+ # Recompute target "dependencies" properties. For each static library
+ # target, remove "dependencies" entries referring to other static libraries,
+ # unless the dependency has the "hard_dependency" attribute set. For each
+ # linkable target, add a "dependencies" entry referring to all of the
+ # target's computed list of link dependencies (including static libraries
+ # if no such entry is already present.
+ for target in flat_list:
+ target_dict = targets[target]
+ target_type = target_dict['type']
+
+ if target_type == 'static_library':
+ if not 'dependencies' in target_dict:
+ continue
+
+ target_dict['dependencies_original'] = target_dict.get(
+ 'dependencies', [])[:]
+
+ # A static library should not depend on another static library unless
+ # the dependency relationship is "hard," which should only be done when
+ # a dependent relies on some side effect other than just the build
+ # product, like a rule or action output. Further, if a target has a
+ # non-hard dependency, but that dependency exports a hard dependency,
+ # the non-hard dependency can safely be removed, but the exported hard
+ # dependency must be added to the target to keep the same dependency
+ # ordering.
+ dependencies = \
+ dependency_nodes[target].DirectAndImportedDependencies(targets)
+ index = 0
+ while index < len(dependencies):
+ dependency = dependencies[index]
+ dependency_dict = targets[dependency]
+
+ # Remove every non-hard static library dependency and remove every
+ # non-static library dependency that isn't a direct dependency.
+ if (dependency_dict['type'] == 'static_library' and \
+ not dependency_dict.get('hard_dependency', False)) or \
+ (dependency_dict['type'] != 'static_library' and \
+ not dependency in target_dict['dependencies']):
+ # Take the dependency out of the list, and don't increment index
+ # because the next dependency to analyze will shift into the index
+ # formerly occupied by the one being removed.
+ del dependencies[index]
+ else:
+ index = index + 1
+
+ # Update the dependencies. If the dependencies list is empty, it's not
+ # needed, so unhook it.
+ if len(dependencies) > 0:
+ target_dict['dependencies'] = dependencies
+ else:
+ del target_dict['dependencies']
+
+ elif target_type in linkable_types:
+ # Get a list of dependency targets that should be linked into this
+ # target. Add them to the dependencies list if they're not already
+ # present.
+
+ link_dependencies = \
+ dependency_nodes[target].DependenciesToLinkAgainst(targets)
+ for dependency in link_dependencies:
+ if dependency == target:
+ continue
+ if not 'dependencies' in target_dict:
+ target_dict['dependencies'] = []
+ if not dependency in target_dict['dependencies']:
+ target_dict['dependencies'].append(dependency)
+ # Sort the dependencies list in the order from dependents to dependencies.
+ # e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
+ # Note: flat_list is already sorted in the order from dependencies to
+ # dependents.
+ if sort_dependencies and 'dependencies' in target_dict:
+ target_dict['dependencies'] = [dep for dep in reversed(flat_list)
+ if dep in target_dict['dependencies']]
+
+
+# Initialize this here to speed up MakePathRelative.
+exception_re = re.compile(r'''["']?[-/$<>^]''')
+
+
+def MakePathRelative(to_file, fro_file, item):
+ # If item is a relative path, it's relative to the build file dict that it's
+ # coming from. Fix it up to make it relative to the build file dict that
+ # it's going into.
+ # Exception: any |item| that begins with these special characters is
+ # returned without modification.
+ # / Used when a path is already absolute (shortcut optimization;
+ # such paths would be returned as absolute anyway)
+ # $ Used for build environment variables
+ # - Used for some build environment flags (such as -lapr-1 in a
+ # "libraries" section)
+ # < Used for our own variable and command expansions (see ExpandVariables)
+ # > Used for our own variable and command expansions (see ExpandVariables)
+ # ^ Used for our own variable and command expansions (see ExpandVariables)
+ #
+ # "/' Used when a value is quoted. If these are present, then we
+ # check the second character instead.
+ #
+ if to_file == fro_file or exception_re.match(item):
+ return item
+ else:
+ # TODO(dglazkov) The backslash/forward-slash replacement at the end is a
+ # temporary measure. This should really be addressed by keeping all paths
+ # in POSIX until actual project generation.
+ ret = os.path.normpath(os.path.join(
+ gyp.common.RelativePath(os.path.dirname(fro_file),
+ os.path.dirname(to_file)),
+ item)).replace('\\', '/')
+ if item[-1] == '/':
+ ret += '/'
+ return ret
+
+def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
+ # Python documentation recommends objects which do not support hash
+ # set this value to None. Python library objects follow this rule.
+ is_hashable = lambda val: val.__hash__
+
+ # If x is hashable, returns whether x is in s. Else returns whether x is in l.
+ def is_in_set_or_list(x, s, l):
+ if is_hashable(x):
+ return x in s
+ return x in l
+
+ prepend_index = 0
+
+ # Make membership testing of hashables in |to| (in particular, strings)
+ # faster.
+ hashable_to_set = set(x for x in to if is_hashable(x))
+ for item in fro:
+ singleton = False
+ if isinstance(item, _str_int_types):
+ # The cheap and easy case.
+ if is_paths:
+ to_item = MakePathRelative(to_file, fro_file, item)
+ else:
+ to_item = item
+
+ if not (isinstance(item, _str_types) and item.startswith('-')):
+ # Any string that doesn't begin with a "-" is a singleton - it can
+ # only appear once in a list, to be enforced by the list merge append
+ # or prepend.
+ singleton = True
+ elif type(item) is dict:
+ # Make a copy of the dictionary, continuing to look for paths to fix.
+ # The other intelligent aspects of merge processing won't apply because
+ # item is being merged into an empty dict.
+ to_item = {}
+ MergeDicts(to_item, item, to_file, fro_file)
+ elif type(item) is list:
+ # Recurse, making a copy of the list. If the list contains any
+ # descendant dicts, path fixing will occur. Note that here, custom
+ # values for is_paths and append are dropped; those are only to be
+ # applied to |to| and |fro|, not sublists of |fro|. append shouldn't
+ # matter anyway because the new |to_item| list is empty.
+ to_item = []
+ MergeLists(to_item, item, to_file, fro_file)
+ else:
+ raise TypeError(
+ 'Attempt to merge list item of unsupported type ' + \
+ item.__class__.__name__)
+
+ if append:
+ # If appending a singleton that's already in the list, don't append.
+ # This ensures that the earliest occurrence of the item will stay put.
+ if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
+ to.append(to_item)
+ if is_hashable(to_item):
+ hashable_to_set.add(to_item)
+ else:
+ # If prepending a singleton that's already in the list, remove the
+ # existing instance and proceed with the prepend. This ensures that the
+ # item appears at the earliest possible position in the list.
+ while singleton and to_item in to:
+ to.remove(to_item)
+
+ # Don't just insert everything at index 0. That would prepend the new
+ # items to the list in reverse order, which would be an unwelcome
+ # surprise.
+ to.insert(prepend_index, to_item)
+ if is_hashable(to_item):
+ hashable_to_set.add(to_item)
+ prepend_index = prepend_index + 1
+
+
+def MergeDicts(to, fro, to_file, fro_file):
+ # I wanted to name the parameter "from" but it's a Python keyword...
+ for k, v in fro.items():
+ # It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
+ # copy semantics. Something else may want to merge from the |fro| dict
+ # later, and having the same dict ref pointed to twice in the tree isn't
+ # what anyone wants considering that the dicts may subsequently be
+ # modified.
+ if k in to:
+ bad_merge = False
+ if isinstance(v, _str_int_types):
+ if not isinstance(to[k], _str_int_types):
+ bad_merge = True
+ elif type(v) is not type(to[k]):
+ bad_merge = True
+
+ if bad_merge:
+ raise TypeError(
+ 'Attempt to merge dict value of type ' + v.__class__.__name__ + \
+ ' into incompatible type ' + to[k].__class__.__name__ + \
+ ' for key ' + k)
+ if isinstance(v, _str_int_types):
+ # Overwrite the existing value, if any. Cheap and easy.
+ is_path = IsPathSection(k)
+ if is_path:
+ to[k] = MakePathRelative(to_file, fro_file, v)
+ else:
+ to[k] = v
+ elif type(v) is dict:
+ # Recurse, guaranteeing copies will be made of objects that require it.
+ if not k in to:
+ to[k] = {}
+ MergeDicts(to[k], v, to_file, fro_file)
+ elif type(v) is list:
+ # Lists in dicts can be merged with different policies, depending on
+ # how the key in the "from" dict (k, the from-key) is written.
+ #
+ # If the from-key has ...the to-list will have this action
+ # this character appended:... applied when receiving the from-list:
+ # = replace
+ # + prepend
+ # ? set, only if to-list does not yet exist
+ # (none) append
+ #
+ # This logic is list-specific, but since it relies on the associated
+ # dict key, it's checked in this dict-oriented function.
+ ext = k[-1]
+ append = True
+ if ext == '=':
+ list_base = k[:-1]
+ lists_incompatible = [list_base, list_base + '?']
+ to[list_base] = []
+ elif ext == '+':
+ list_base = k[:-1]
+ lists_incompatible = [list_base + '=', list_base + '?']
+ append = False
+ elif ext == '?':
+ list_base = k[:-1]
+ lists_incompatible = [list_base, list_base + '=', list_base + '+']
+ else:
+ list_base = k
+ lists_incompatible = [list_base + '=', list_base + '?']
+
+ # Some combinations of merge policies appearing together are meaningless.
+ # It's stupid to replace and append simultaneously, for example. Append
+ # and prepend are the only policies that can coexist.
+ for list_incompatible in lists_incompatible:
+ if list_incompatible in fro:
+ raise GypError('Incompatible list policies ' + k + ' and ' +
+ list_incompatible)
+
+ if list_base in to:
+ if ext == '?':
+ # If the key ends in "?", the list will only be merged if it doesn't
+ # already exist.
+ continue
+ elif type(to[list_base]) is not list:
+ # This may not have been checked above if merging in a list with an
+ # extension character.
+ raise TypeError(
+ 'Attempt to merge dict value of type ' + v.__class__.__name__ + \
+ ' into incompatible type ' + to[list_base].__class__.__name__ + \
+ ' for key ' + list_base + '(' + k + ')')
+ else:
+ to[list_base] = []
+
+ # Call MergeLists, which will make copies of objects that require it.
+ # MergeLists can recurse back into MergeDicts, although this will be
+ # to make copies of dicts (with paths fixed), there will be no
+ # subsequent dict "merging" once entering a list because lists are
+ # always replaced, appended to, or prepended to.
+ is_paths = IsPathSection(list_base)
+ MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
+ else:
+ raise TypeError(
+ 'Attempt to merge dict value of unsupported type ' + \
+ v.__class__.__name__ + ' for key ' + k)
+
+
+def MergeConfigWithInheritance(new_configuration_dict, build_file,
+ target_dict, configuration, visited):
+ # Skip if previously visted.
+ if configuration in visited:
+ return
+
+ # Look at this configuration.
+ configuration_dict = target_dict['configurations'][configuration]
+
+ # Merge in parents.
+ for parent in configuration_dict.get('inherit_from', []):
+ MergeConfigWithInheritance(new_configuration_dict, build_file,
+ target_dict, parent, visited + [configuration])
+
+ # Merge it into the new config.
+ MergeDicts(new_configuration_dict, configuration_dict,
+ build_file, build_file)
+
+ # Drop abstract.
+ if 'abstract' in new_configuration_dict:
+ del new_configuration_dict['abstract']
+
+
+def SetUpConfigurations(target, target_dict):
+ # key_suffixes is a list of key suffixes that might appear on key names.
+ # These suffixes are handled in conditional evaluations (for =, +, and ?)
+ # and rules/exclude processing (for ! and /). Keys with these suffixes
+ # should be treated the same as keys without.
+ key_suffixes = ['=', '+', '?', '!', '/']
+
+ build_file = gyp.common.BuildFile(target)
+
+ # Provide a single configuration by default if none exists.
+ # TODO(mark): Signal an error if default_configurations exists but
+ # configurations does not.
+ if not 'configurations' in target_dict:
+ target_dict['configurations'] = {'Default': {}}
+ if not 'default_configuration' in target_dict:
+ concrete = [i for (i, config) in target_dict['configurations'].items()
+ if not config.get('abstract')]
+ target_dict['default_configuration'] = sorted(concrete)[0]
+
+ merged_configurations = {}
+ configs = target_dict['configurations']
+ for (configuration, old_configuration_dict) in configs.items():
+ # Skip abstract configurations (saves work only).
+ if old_configuration_dict.get('abstract'):
+ continue
+ # Configurations inherit (most) settings from the enclosing target scope.
+ # Get the inheritance relationship right by making a copy of the target
+ # dict.
+ new_configuration_dict = {}
+ for (key, target_val) in target_dict.items():
+ key_ext = key[-1:]
+ if key_ext in key_suffixes:
+ key_base = key[:-1]
+ else:
+ key_base = key
+ if not key_base in non_configuration_keys:
+ new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
+
+ # Merge in configuration (with all its parents first).
+ MergeConfigWithInheritance(new_configuration_dict, build_file,
+ target_dict, configuration, [])
+
+ merged_configurations[configuration] = new_configuration_dict
+
+ # Put the new configurations back into the target dict as a configuration.
+ for configuration in merged_configurations.keys():
+ target_dict['configurations'][configuration] = (
+ merged_configurations[configuration])
+
+ # Now drop all the abstract ones.
+ configs = target_dict['configurations']
+ target_dict['configurations'] = \
+ {k: v for k, v in configs.items() if not v.get('abstract')}
+
+ # Now that all of the target's configurations have been built, go through
+ # the target dict's keys and remove everything that's been moved into a
+ # "configurations" section.
+ delete_keys = []
+ for key in target_dict:
+ key_ext = key[-1:]
+ if key_ext in key_suffixes:
+ key_base = key[:-1]
+ else:
+ key_base = key
+ if not key_base in non_configuration_keys:
+ delete_keys.append(key)
+ for key in delete_keys:
+ del target_dict[key]
+
+ # Check the configurations to see if they contain invalid keys.
+ for configuration in target_dict['configurations'].keys():
+ configuration_dict = target_dict['configurations'][configuration]
+ for key in configuration_dict.keys():
+ if key in invalid_configuration_keys:
+ raise GypError('%s not allowed in the %s configuration, found in '
+ 'target %s' % (key, configuration, target))
+
+
+
+def ProcessListFiltersInDict(name, the_dict):
+ """Process regular expression and exclusion-based filters on lists.
+
+ An exclusion list is in a dict key named with a trailing "!", like
+ "sources!". Every item in such a list is removed from the associated
+ main list, which in this example, would be "sources". Removed items are
+ placed into a "sources_excluded" list in the dict.
+
+ Regular expression (regex) filters are contained in dict keys named with a
+ trailing "/", such as "sources/" to operate on the "sources" list. Regex
+ filters in a dict take the form:
+ 'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
+ ['include', '_mac\\.cc$'] ],
+ The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
+ _win.cc. The second filter then includes all files ending in _mac.cc that
+ are now or were once in the "sources" list. Items matching an "exclude"
+ filter are subject to the same processing as would occur if they were listed
+ by name in an exclusion list (ending in "!"). Items matching an "include"
+ filter are brought back into the main list if previously excluded by an
+ exclusion list or exclusion regex filter. Subsequent matching "exclude"
+ patterns can still cause items to be excluded after matching an "include".
+ """
+
+ # Look through the dictionary for any lists whose keys end in "!" or "/".
+ # These are lists that will be treated as exclude lists and regular
+ # expression-based exclude/include lists. Collect the lists that are
+ # needed first, looking for the lists that they operate on, and assemble
+ # then into |lists|. This is done in a separate loop up front, because
+ # the _included and _excluded keys need to be added to the_dict, and that
+ # can't be done while iterating through it.
+
+ lists = []
+ del_lists = []
+ for key, value in the_dict.items():
+ operation = key[-1]
+ if operation != '!' and operation != '/':
+ continue
+
+ if type(value) is not list:
+ raise ValueError(name + ' key ' + key + ' must be list, not ' + \
+ value.__class__.__name__)
+
+ list_key = key[:-1]
+ if list_key not in the_dict:
+ # This happens when there's a list like "sources!" but no corresponding
+ # "sources" list. Since there's nothing for it to operate on, queue up
+ # the "sources!" list for deletion now.
+ del_lists.append(key)
+ continue
+
+ if type(the_dict[list_key]) is not list:
+ value = the_dict[list_key]
+ raise ValueError(name + ' key ' + list_key + \
+ ' must be list, not ' + \
+ value.__class__.__name__ + ' when applying ' + \
+ {'!': 'exclusion', '/': 'regex'}[operation])
+
+ if not list_key in lists:
+ lists.append(list_key)
+
+ # Delete the lists that are known to be unneeded at this point.
+ for del_list in del_lists:
+ del the_dict[del_list]
+
+ for list_key in lists:
+ the_list = the_dict[list_key]
+
+ # Initialize the list_actions list, which is parallel to the_list. Each
+ # item in list_actions identifies whether the corresponding item in
+ # the_list should be excluded, unconditionally preserved (included), or
+ # whether no exclusion or inclusion has been applied. Items for which
+ # no exclusion or inclusion has been applied (yet) have value -1, items
+ # excluded have value 0, and items included have value 1. Includes and
+ # excludes override previous actions. All items in list_actions are
+ # initialized to -1 because no excludes or includes have been processed
+ # yet.
+ list_actions = list((-1,) * len(the_list))
+
+ exclude_key = list_key + '!'
+ if exclude_key in the_dict:
+ for exclude_item in the_dict[exclude_key]:
+ for index, list_item in enumerate(the_list):
+ if exclude_item == list_item:
+ # This item matches the exclude_item, so set its action to 0
+ # (exclude).
+ list_actions[index] = 0
+
+ # The "whatever!" list is no longer needed, dump it.
+ del the_dict[exclude_key]
+
+ regex_key = list_key + '/'
+ if regex_key in the_dict:
+ for regex_item in the_dict[regex_key]:
+ [action, pattern] = regex_item
+ pattern_re = re.compile(pattern)
+
+ if action == 'exclude':
+ # This item matches an exclude regex, so set its value to 0 (exclude).
+ action_value = 0
+ elif action == 'include':
+ # This item matches an include regex, so set its value to 1 (include).
+ action_value = 1
+ else:
+ # This is an action that doesn't make any sense.
+ raise ValueError('Unrecognized action ' + action + ' in ' + name + \
+ ' key ' + regex_key)
+
+ for index, list_item in enumerate(the_list):
+ if list_actions[index] == action_value:
+ # Even if the regex matches, nothing will change so continue (regex
+ # searches are expensive).
+ continue
+ if pattern_re.search(list_item):
+ # Regular expression match.
+ list_actions[index] = action_value
+
+ # The "whatever/" list is no longer needed, dump it.
+ del the_dict[regex_key]
+
+ # Add excluded items to the excluded list.
+ #
+ # Note that exclude_key ("sources!") is different from excluded_key
+ # ("sources_excluded"). The exclude_key list is input and it was already
+ # processed and deleted; the excluded_key list is output and it's about
+ # to be created.
+ excluded_key = list_key + '_excluded'
+ if excluded_key in the_dict:
+ raise GypError(name + ' key ' + excluded_key +
+ ' must not be present prior '
+ ' to applying exclusion/regex filters for ' + list_key)
+
+ excluded_list = []
+
+ # Go backwards through the list_actions list so that as items are deleted,
+ # the indices of items that haven't been seen yet don't shift. That means
+ # that things need to be prepended to excluded_list to maintain them in the
+ # same order that they existed in the_list.
+ for index in range(len(list_actions) - 1, -1, -1):
+ if list_actions[index] == 0:
+ # Dump anything with action 0 (exclude). Keep anything with action 1
+ # (include) or -1 (no include or exclude seen for the item).
+ excluded_list.insert(0, the_list[index])
+ del the_list[index]
+
+ # If anything was excluded, put the excluded list into the_dict at
+ # excluded_key.
+ if len(excluded_list) > 0:
+ the_dict[excluded_key] = excluded_list
+
+ # Now recurse into subdicts and lists that may contain dicts.
+ for key, value in the_dict.items():
+ if type(value) is dict:
+ ProcessListFiltersInDict(key, value)
+ elif type(value) is list:
+ ProcessListFiltersInList(key, value)
+
+
+def ProcessListFiltersInList(name, the_list):
+ for item in the_list:
+ if type(item) is dict:
+ ProcessListFiltersInDict(name, item)
+ elif type(item) is list:
+ ProcessListFiltersInList(name, item)
+
+
+def ValidateTargetType(target, target_dict):
+ """Ensures the 'type' field on the target is one of the known types.
+
+ Arguments:
+ target: string, name of target.
+ target_dict: dict, target spec.
+
+ Raises an exception on error.
+ """
+ VALID_TARGET_TYPES = ('executable', 'loadable_module',
+ 'static_library', 'shared_library',
+ 'mac_kernel_extension', 'none', 'windows_driver')
+ target_type = target_dict.get('type', None)
+ if target_type not in VALID_TARGET_TYPES:
+ raise GypError("Target %s has an invalid target type '%s'. "
+ "Must be one of %s." %
+ (target, target_type, '/'.join(VALID_TARGET_TYPES)))
+ if (target_dict.get('standalone_static_library', 0) and
+ not target_type == 'static_library'):
+ raise GypError('Target %s has type %s but standalone_static_library flag is'
+ ' only valid for static_library type.' % (target,
+ target_type))
+
+
+def ValidateSourcesInTarget(target, target_dict, build_file,
+ duplicate_basename_check):
+ if not duplicate_basename_check:
+ return
+ if target_dict.get('type', None) != 'static_library':
+ return
+ sources = target_dict.get('sources', [])
+ basenames = {}
+ for source in sources:
+ name, ext = os.path.splitext(source)
+ is_compiled_file = ext in [
+ '.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
+ if not is_compiled_file:
+ continue
+ basename = os.path.basename(name) # Don't include extension.
+ basenames.setdefault(basename, []).append(source)
+
+ error = ''
+ for basename, files in basenames.items():
+ if len(files) > 1:
+ error += ' %s: %s\n' % (basename, ' '.join(files))
+
+ if error:
+ print('static library %s has several files with the same basename:\n' %
+ target + error + 'libtool on Mac cannot handle that. Use '
+ '--no-duplicate-basename-check to disable this validation.')
+ raise GypError('Duplicate basenames in sources section, see list above')
+
+
+def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
+ """Ensures that the rules sections in target_dict are valid and consistent,
+ and determines which sources they apply to.
+
+ Arguments:
+ target: string, name of target.
+ target_dict: dict, target spec containing "rules" and "sources" lists.
+ extra_sources_for_rules: a list of keys to scan for rule matches in
+ addition to 'sources'.
+ """
+
+ # Dicts to map between values found in rules' 'rule_name' and 'extension'
+ # keys and the rule dicts themselves.
+ rule_names = {}
+ rule_extensions = {}
+
+ rules = target_dict.get('rules', [])
+ for rule in rules:
+ # Make sure that there's no conflict among rule names and extensions.
+ rule_name = rule['rule_name']
+ if rule_name in rule_names:
+ raise GypError('rule %s exists in duplicate, target %s' %
+ (rule_name, target))
+ rule_names[rule_name] = rule
+
+ rule_extension = rule['extension']
+ if rule_extension.startswith('.'):
+ rule_extension = rule_extension[1:]
+ if rule_extension in rule_extensions:
+ raise GypError(('extension %s associated with multiple rules, ' +
+ 'target %s rules %s and %s') %
+ (rule_extension, target,
+ rule_extensions[rule_extension]['rule_name'],
+ rule_name))
+ rule_extensions[rule_extension] = rule
+
+ # Make sure rule_sources isn't already there. It's going to be
+ # created below if needed.
+ if 'rule_sources' in rule:
+ raise GypError(
+ 'rule_sources must not exist in input, target %s rule %s' %
+ (target, rule_name))
+
+ rule_sources = []
+ source_keys = ['sources']
+ source_keys.extend(extra_sources_for_rules)
+ for source_key in source_keys:
+ for source in target_dict.get(source_key, []):
+ (source_root, source_extension) = os.path.splitext(source)
+ if source_extension.startswith('.'):
+ source_extension = source_extension[1:]
+ if source_extension == rule_extension:
+ rule_sources.append(source)
+
+ if len(rule_sources) > 0:
+ rule['rule_sources'] = rule_sources
+
+
+def ValidateRunAsInTarget(target, target_dict, build_file):
+ target_name = target_dict.get('target_name')
+ run_as = target_dict.get('run_as')
+ if not run_as:
+ return
+ if type(run_as) is not dict:
+ raise GypError("The 'run_as' in target %s from file %s should be a "
+ "dictionary." %
+ (target_name, build_file))
+ action = run_as.get('action')
+ if not action:
+ raise GypError("The 'run_as' in target %s from file %s must have an "
+ "'action' section." %
+ (target_name, build_file))
+ if type(action) is not list:
+ raise GypError("The 'action' for 'run_as' in target %s from file %s "
+ "must be a list." %
+ (target_name, build_file))
+ working_directory = run_as.get('working_directory')
+ if working_directory and not isinstance(working_directory, _str_types):
+ raise GypError("The 'working_directory' for 'run_as' in target %s "
+ "in file %s should be a string." %
+ (target_name, build_file))
+ environment = run_as.get('environment')
+ if environment and type(environment) is not dict:
+ raise GypError("The 'environment' for 'run_as' in target %s "
+ "in file %s should be a dictionary." %
+ (target_name, build_file))
+
+
+def ValidateActionsInTarget(target, target_dict, build_file):
+ '''Validates the inputs to the actions in a target.'''
+ target_name = target_dict.get('target_name')
+ actions = target_dict.get('actions', [])
+ for action in actions:
+ action_name = action.get('action_name')
+ if not action_name:
+ raise GypError("Anonymous action in target %s. "
+ "An action must have an 'action_name' field." %
+ target_name)
+ inputs = action.get('inputs', None)
+ if inputs is None:
+ raise GypError('Action in target %s has no inputs.' % target_name)
+ action_command = action.get('action')
+ if action_command and not action_command[0]:
+ raise GypError("Empty action as command in target %s." % target_name)
+
+
+def TurnIntIntoStrInDict(the_dict):
+ """Given dict the_dict, recursively converts all integers into strings.
+ """
+ # Use items instead of iteritems because there's no need to try to look at
+ # reinserted keys and their associated values.
+ for k, v in the_dict.items():
+ if isinstance(v, _int_types):
+ v = str(v)
+ the_dict[k] = v
+ elif type(v) is dict:
+ TurnIntIntoStrInDict(v)
+ elif type(v) is list:
+ TurnIntIntoStrInList(v)
+
+ if isinstance(k, _int_types):
+ del the_dict[k]
+ the_dict[str(k)] = v
+
+
+def TurnIntIntoStrInList(the_list):
+ """Given list the_list, recursively converts all integers into strings.
+ """
+ for index, item in enumerate(the_list):
+ if isinstance(item, _int_types):
+ the_list[index] = str(item)
+ elif type(item) is dict:
+ TurnIntIntoStrInDict(item)
+ elif type(item) is list:
+ TurnIntIntoStrInList(item)
+
+
+def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
+ data):
+ """Return only the targets that are deep dependencies of |root_targets|."""
+ qualified_root_targets = []
+ for target in root_targets:
+ target = target.strip()
+ qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
+ if not qualified_targets:
+ raise GypError("Could not find target %s" % target)
+ qualified_root_targets.extend(qualified_targets)
+
+ wanted_targets = {}
+ for target in qualified_root_targets:
+ wanted_targets[target] = targets[target]
+ for dependency in dependency_nodes[target].DeepDependencies():
+ wanted_targets[dependency] = targets[dependency]
+
+ wanted_flat_list = [t for t in flat_list if t in wanted_targets]
+
+ # Prune unwanted targets from each build_file's data dict.
+ for build_file in data['target_build_files']:
+ if not 'targets' in data[build_file]:
+ continue
+ new_targets = []
+ for target in data[build_file]['targets']:
+ qualified_name = gyp.common.QualifiedTarget(build_file,
+ target['target_name'],
+ target['toolset'])
+ if qualified_name in wanted_targets:
+ new_targets.append(target)
+ data[build_file]['targets'] = new_targets
+
+ return wanted_targets, wanted_flat_list
+
+
+def VerifyNoCollidingTargets(targets):
+ """Verify that no two targets in the same directory share the same name.
+
+ Arguments:
+ targets: A list of targets in the form 'path/to/file.gyp:target_name'.
+ """
+ # Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
+ used = {}
+ for target in targets:
+ # Separate out 'path/to/file.gyp, 'target_name' from
+ # 'path/to/file.gyp:target_name'.
+ path, name = target.rsplit(':', 1)
+ # Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
+ subdir, gyp = os.path.split(path)
+ # Use '.' for the current directory '', so that the error messages make
+ # more sense.
+ if not subdir:
+ subdir = '.'
+ # Prepare a key like 'path/to:target_name'.
+ key = subdir + ':' + name
+ if key in used:
+ # Complain if this target is already used.
+ raise GypError('Duplicate target name "%s" in directory "%s" used both '
+ 'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
+ used[key] = gyp
+
+
+def SetGeneratorGlobals(generator_input_info):
+ # Set up path_sections and non_configuration_keys with the default data plus
+ # the generator-specific data.
+ global path_sections
+ path_sections = set(base_path_sections)
+ path_sections.update(generator_input_info['path_sections'])
+
+ global non_configuration_keys
+ non_configuration_keys = base_non_configuration_keys[:]
+ non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
+
+ global multiple_toolsets
+ multiple_toolsets = generator_input_info[
+ 'generator_supports_multiple_toolsets']
+
+ global generator_filelist_paths
+ generator_filelist_paths = generator_input_info['generator_filelist_paths']
+
+
+def Load(build_files, variables, includes, depth, generator_input_info, check,
+ circular_check, duplicate_basename_check, parallel, root_targets):
+ SetGeneratorGlobals(generator_input_info)
+ # A generator can have other lists (in addition to sources) be processed
+ # for rules.
+ extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
+
+ # Load build files. This loads every target-containing build file into
+ # the |data| dictionary such that the keys to |data| are build file names,
+ # and the values are the entire build file contents after "early" or "pre"
+ # processing has been done and includes have been resolved.
+ # NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
+ # well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
+ # track of the keys corresponding to "target" files.
+ data = {'target_build_files': set()}
+ # Normalize paths everywhere. This is important because paths will be
+ # used as keys to the data dict and for references between input files.
+ build_files = set(map(os.path.normpath, build_files))
+ if parallel:
+ LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
+ check, generator_input_info)
+ else:
+ aux_data = {}
+ for build_file in build_files:
+ try:
+ LoadTargetBuildFile(build_file, data, aux_data,
+ variables, includes, depth, check, True)
+ except Exception as e:
+ gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
+ raise
+
+ # Build a dict to access each target's subdict by qualified name.
+ targets = BuildTargetsDict(data)
+
+ # Fully qualify all dependency links.
+ QualifyDependencies(targets)
+
+ # Remove self-dependencies from targets that have 'prune_self_dependencies'
+ # set to 1.
+ RemoveSelfDependencies(targets)
+
+ # Expand dependencies specified as build_file:*.
+ ExpandWildcardDependencies(targets, data)
+
+ # Remove all dependencies marked as 'link_dependency' from the targets of
+ # type 'none'.
+ RemoveLinkDependenciesFromNoneTargets(targets)
+
+ # Apply exclude (!) and regex (/) list filters only for dependency_sections.
+ for target_name, target_dict in targets.items():
+ tmp_dict = {}
+ for key_base in dependency_sections:
+ for op in ('', '!', '/'):
+ key = key_base + op
+ if key in target_dict:
+ tmp_dict[key] = target_dict[key]
+ del target_dict[key]
+ ProcessListFiltersInDict(target_name, tmp_dict)
+ # Write the results back to |target_dict|.
+ for key in tmp_dict:
+ target_dict[key] = tmp_dict[key]
+
+ # Make sure every dependency appears at most once.
+ RemoveDuplicateDependencies(targets)
+
+ if circular_check:
+ # Make sure that any targets in a.gyp don't contain dependencies in other
+ # .gyp files that further depend on a.gyp.
+ VerifyNoGYPFileCircularDependencies(targets)
+
+ [dependency_nodes, flat_list] = BuildDependencyList(targets)
+
+ if root_targets:
+ # Remove, from |targets| and |flat_list|, the targets that are not deep
+ # dependencies of the targets specified in |root_targets|.
+ targets, flat_list = PruneUnwantedTargets(
+ targets, flat_list, dependency_nodes, root_targets, data)
+
+ # Check that no two targets in the same directory have the same name.
+ VerifyNoCollidingTargets(flat_list)
+
+ # Handle dependent settings of various types.
+ for settings_type in ['all_dependent_settings',
+ 'direct_dependent_settings',
+ 'link_settings']:
+ DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
+
+ # Take out the dependent settings now that they've been published to all
+ # of the targets that require them.
+ for target in flat_list:
+ if settings_type in targets[target]:
+ del targets[target][settings_type]
+
+ # Make sure static libraries don't declare dependencies on other static
+ # libraries, but that linkables depend on all unlinked static libraries
+ # that they need so that their link steps will be correct.
+ gii = generator_input_info
+ if gii['generator_wants_static_library_dependencies_adjusted']:
+ AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
+ gii['generator_wants_sorted_dependencies'])
+
+ # Apply "post"/"late"/"target" variable expansions and condition evaluations.
+ for target in flat_list:
+ target_dict = targets[target]
+ build_file = gyp.common.BuildFile(target)
+ ProcessVariablesAndConditionsInDict(
+ target_dict, PHASE_LATE, variables, build_file)
+
+ # Move everything that can go into a "configurations" section into one.
+ for target in flat_list:
+ target_dict = targets[target]
+ SetUpConfigurations(target, target_dict)
+
+ # Apply exclude (!) and regex (/) list filters.
+ for target in flat_list:
+ target_dict = targets[target]
+ ProcessListFiltersInDict(target, target_dict)
+
+ # Apply "latelate" variable expansions and condition evaluations.
+ for target in flat_list:
+ target_dict = targets[target]
+ build_file = gyp.common.BuildFile(target)
+ ProcessVariablesAndConditionsInDict(
+ target_dict, PHASE_LATELATE, variables, build_file)
+
+ # Make sure that the rules make sense, and build up rule_sources lists as
+ # needed. Not all generators will need to use the rule_sources lists, but
+ # some may, and it seems best to build the list in a common spot.
+ # Also validate actions and run_as elements in targets.
+ for target in flat_list:
+ target_dict = targets[target]
+ build_file = gyp.common.BuildFile(target)
+ ValidateTargetType(target, target_dict)
+ ValidateSourcesInTarget(target, target_dict, build_file,
+ duplicate_basename_check)
+ ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
+ ValidateRunAsInTarget(target, target_dict, build_file)
+ ValidateActionsInTarget(target, target_dict, build_file)
+
+ # Generators might not expect ints. Turn them into strs.
+ TurnIntIntoStrInDict(data)
+
+ # TODO(mark): Return |data| for now because the generator needs a list of
+ # build files that came in. In the future, maybe it should just accept
+ # a list, and not the whole data dict.
+ return [flat_list, targets, data]
diff --git a/third_party/python/gyp/pylib/gyp/input_test.py b/third_party/python/gyp/pylib/gyp/input_test.py
new file mode 100755
index 0000000000..6c4b1cc526
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/input_test.py
@@ -0,0 +1,90 @@
+#!/usr/bin/env python
+
+# Copyright 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Unit tests for the input.py file."""
+
+import gyp.input
+import unittest
+import sys
+
+
+class TestFindCycles(unittest.TestCase):
+ def setUp(self):
+ self.nodes = {}
+ for x in ('a', 'b', 'c', 'd', 'e'):
+ self.nodes[x] = gyp.input.DependencyGraphNode(x)
+
+ def _create_dependency(self, dependent, dependency):
+ dependent.dependencies.append(dependency)
+ dependency.dependents.append(dependent)
+
+ def test_no_cycle_empty_graph(self):
+ for label, node in self.nodes.items():
+ self.assertEquals([], node.FindCycles())
+
+ def test_no_cycle_line(self):
+ self._create_dependency(self.nodes['a'], self.nodes['b'])
+ self._create_dependency(self.nodes['b'], self.nodes['c'])
+ self._create_dependency(self.nodes['c'], self.nodes['d'])
+
+ for label, node in self.nodes.items():
+ self.assertEquals([], node.FindCycles())
+
+ def test_no_cycle_dag(self):
+ self._create_dependency(self.nodes['a'], self.nodes['b'])
+ self._create_dependency(self.nodes['a'], self.nodes['c'])
+ self._create_dependency(self.nodes['b'], self.nodes['c'])
+
+ for label, node in self.nodes.items():
+ self.assertEquals([], node.FindCycles())
+
+ def test_cycle_self_reference(self):
+ self._create_dependency(self.nodes['a'], self.nodes['a'])
+
+ self.assertEquals([[self.nodes['a'], self.nodes['a']]],
+ self.nodes['a'].FindCycles())
+
+ def test_cycle_two_nodes(self):
+ self._create_dependency(self.nodes['a'], self.nodes['b'])
+ self._create_dependency(self.nodes['b'], self.nodes['a'])
+
+ self.assertEquals([[self.nodes['a'], self.nodes['b'], self.nodes['a']]],
+ self.nodes['a'].FindCycles())
+ self.assertEquals([[self.nodes['b'], self.nodes['a'], self.nodes['b']]],
+ self.nodes['b'].FindCycles())
+
+ def test_two_cycles(self):
+ self._create_dependency(self.nodes['a'], self.nodes['b'])
+ self._create_dependency(self.nodes['b'], self.nodes['a'])
+
+ self._create_dependency(self.nodes['b'], self.nodes['c'])
+ self._create_dependency(self.nodes['c'], self.nodes['b'])
+
+ cycles = self.nodes['a'].FindCycles()
+ self.assertTrue(
+ [self.nodes['a'], self.nodes['b'], self.nodes['a']] in cycles)
+ self.assertTrue(
+ [self.nodes['b'], self.nodes['c'], self.nodes['b']] in cycles)
+ self.assertEquals(2, len(cycles))
+
+ def test_big_cycle(self):
+ self._create_dependency(self.nodes['a'], self.nodes['b'])
+ self._create_dependency(self.nodes['b'], self.nodes['c'])
+ self._create_dependency(self.nodes['c'], self.nodes['d'])
+ self._create_dependency(self.nodes['d'], self.nodes['e'])
+ self._create_dependency(self.nodes['e'], self.nodes['a'])
+
+ self.assertEquals([[self.nodes['a'],
+ self.nodes['b'],
+ self.nodes['c'],
+ self.nodes['d'],
+ self.nodes['e'],
+ self.nodes['a']]],
+ self.nodes['a'].FindCycles())
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/python/gyp/pylib/gyp/mac_tool.py b/third_party/python/gyp/pylib/gyp/mac_tool.py
new file mode 100755
index 0000000000..64d21063ff
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/mac_tool.py
@@ -0,0 +1,721 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Utility functions to perform Xcode-style build steps.
+
+These functions are executed via gyp-mac-tool when using the Makefile generator.
+"""
+
+from __future__ import print_function
+
+import fcntl
+import fnmatch
+import glob
+import json
+import os
+import plistlib
+import re
+import shutil
+import struct
+import subprocess
+import sys
+import tempfile
+
+
+def main(args):
+ executor = MacTool()
+ exit_code = executor.Dispatch(args)
+ if exit_code is not None:
+ sys.exit(exit_code)
+
+
+class MacTool(object):
+ """This class performs all the Mac tooling steps. The methods can either be
+ executed directly, or dispatched from an argument list."""
+
+ def Dispatch(self, args):
+ """Dispatches a string command to a method."""
+ if len(args) < 1:
+ raise Exception("Not enough arguments")
+
+ method = "Exec%s" % self._CommandifyName(args[0])
+ return getattr(self, method)(*args[1:])
+
+ def _CommandifyName(self, name_string):
+ """Transforms a tool name like copy-info-plist to CopyInfoPlist"""
+ return name_string.title().replace('-', '')
+
+ def ExecCopyBundleResource(self, source, dest, convert_to_binary):
+ """Copies a resource file to the bundle/Resources directory, performing any
+ necessary compilation on each resource."""
+ convert_to_binary = convert_to_binary == 'True'
+ extension = os.path.splitext(source)[1].lower()
+ if os.path.isdir(source):
+ # Copy tree.
+ # TODO(thakis): This copies file attributes like mtime, while the
+ # single-file branch below doesn't. This should probably be changed to
+ # be consistent with the single-file branch.
+ if os.path.exists(dest):
+ shutil.rmtree(dest)
+ shutil.copytree(source, dest)
+ elif extension == '.xib':
+ return self._CopyXIBFile(source, dest)
+ elif extension == '.storyboard':
+ return self._CopyXIBFile(source, dest)
+ elif extension == '.strings' and not convert_to_binary:
+ self._CopyStringsFile(source, dest)
+ else:
+ if os.path.exists(dest):
+ os.unlink(dest)
+ shutil.copy(source, dest)
+
+ if convert_to_binary and extension in ('.plist', '.strings'):
+ self._ConvertToBinary(dest)
+
+ def _CopyXIBFile(self, source, dest):
+ """Compiles a XIB file with ibtool into a binary plist in the bundle."""
+
+ # ibtool sometimes crashes with relative paths. See crbug.com/314728.
+ base = os.path.dirname(os.path.realpath(__file__))
+ if os.path.relpath(source):
+ source = os.path.join(base, source)
+ if os.path.relpath(dest):
+ dest = os.path.join(base, dest)
+
+ args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices']
+
+ if os.environ['XCODE_VERSION_ACTUAL'] > '0700':
+ args.extend(['--auto-activate-custom-fonts'])
+ if 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ:
+ args.extend([
+ '--target-device', 'iphone', '--target-device', 'ipad',
+ '--minimum-deployment-target',
+ os.environ['IPHONEOS_DEPLOYMENT_TARGET'],
+ ])
+ else:
+ args.extend([
+ '--target-device', 'mac',
+ '--minimum-deployment-target',
+ os.environ['MACOSX_DEPLOYMENT_TARGET'],
+ ])
+
+ args.extend(['--output-format', 'human-readable-text', '--compile', dest,
+ source])
+
+ ibtool_section_re = re.compile(r'/\*.*\*/')
+ ibtool_re = re.compile(r'.*note:.*is clipping its content')
+ try:
+ stdout = subprocess.check_output(args)
+ except subprocess.CalledProcessError as e:
+ print(e.output)
+ raise
+ current_section_header = None
+ for line in stdout.splitlines():
+ line_decoded = line.decode('utf-8')
+ if ibtool_section_re.match(line_decoded):
+ current_section_header = line_decoded
+ elif not ibtool_re.match(line_decoded):
+ if current_section_header:
+ print(current_section_header)
+ current_section_header = None
+ print(line_decoded)
+ return 0
+
+ def _ConvertToBinary(self, dest):
+ subprocess.check_call([
+ 'xcrun', 'plutil', '-convert', 'binary1', '-o', dest, dest])
+
+ def _CopyStringsFile(self, source, dest):
+ """Copies a .strings file using iconv to reconvert the input into UTF-16."""
+ input_code = self._DetectInputEncoding(source) or "UTF-8"
+
+ # Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
+ # CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
+ # CFPropertyListCreateFromXMLData(): Old-style plist parser: missing
+ # semicolon in dictionary.
+ # on invalid files. Do the same kind of validation.
+ import CoreFoundation
+ s = open(source, 'rb').read()
+ d = CoreFoundation.CFDataCreate(None, s, len(s))
+ _, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
+ if error:
+ return
+
+ fp = open(dest, 'wb')
+ fp.write(s.decode(input_code).encode('UTF-16'))
+ fp.close()
+
+ def _DetectInputEncoding(self, file_name):
+ """Reads the first few bytes from file_name and tries to guess the text
+ encoding. Returns None as a guess if it can't detect it."""
+ fp = open(file_name, 'rb')
+ try:
+ header = fp.read(3)
+ except:
+ fp.close()
+ return None
+ fp.close()
+ if header.startswith(b"\xFE\xFF"):
+ return "UTF-16"
+ elif header.startswith(b"\xFF\xFE"):
+ return "UTF-16"
+ elif header.startswith(b"\xEF\xBB\xBF"):
+ return "UTF-8"
+ else:
+ return None
+
+ def ExecCopyInfoPlist(self, source, dest, convert_to_binary, *keys):
+ """Copies the |source| Info.plist to the destination directory |dest|."""
+ # Read the source Info.plist into memory.
+ fd = open(source, 'r')
+ lines = fd.read()
+ fd.close()
+
+ # Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
+ plist = plistlib.readPlistFromString(lines)
+ if keys:
+ plist.update(json.loads(keys[0]))
+ lines = plistlib.writePlistToString(plist)
+
+ # Go through all the environment variables and replace them as variables in
+ # the file.
+ IDENT_RE = re.compile(r'[_/\s]')
+ for key in os.environ:
+ if key.startswith('_'):
+ continue
+ evar = '${%s}' % key
+ evalue = os.environ[key]
+ lines = lines.replace(evar, evalue)
+
+ # Xcode supports various suffices on environment variables, which are
+ # all undocumented. :rfc1034identifier is used in the standard project
+ # template these days, and :identifier was used earlier. They are used to
+ # convert non-url characters into things that look like valid urls --
+ # except that the replacement character for :identifier, '_' isn't valid
+ # in a URL either -- oops, hence :rfc1034identifier was born.
+ evar = '${%s:identifier}' % key
+ evalue = IDENT_RE.sub('_', os.environ[key])
+ lines = lines.replace(evar, evalue)
+
+ evar = '${%s:rfc1034identifier}' % key
+ evalue = IDENT_RE.sub('-', os.environ[key])
+ lines = lines.replace(evar, evalue)
+
+ # Remove any keys with values that haven't been replaced.
+ lines = lines.split('\n')
+ for i in range(len(lines)):
+ if lines[i].strip().startswith("<string>${"):
+ lines[i] = None
+ lines[i - 1] = None
+ lines = '\n'.join(filter(lambda x: x is not None, lines))
+
+ # Write out the file with variables replaced.
+ fd = open(dest, 'w')
+ fd.write(lines)
+ fd.close()
+
+ # Now write out PkgInfo file now that the Info.plist file has been
+ # "compiled".
+ self._WritePkgInfo(dest)
+
+ if convert_to_binary == 'True':
+ self._ConvertToBinary(dest)
+
+ def _WritePkgInfo(self, info_plist):
+ """This writes the PkgInfo file from the data stored in Info.plist."""
+ plist = plistlib.readPlist(info_plist)
+ if not plist:
+ return
+
+ # Only create PkgInfo for executable types.
+ package_type = plist['CFBundlePackageType']
+ if package_type != 'APPL':
+ return
+
+ # The format of PkgInfo is eight characters, representing the bundle type
+ # and bundle signature, each four characters. If that is missing, four
+ # '?' characters are used instead.
+ signature_code = plist.get('CFBundleSignature', '????')
+ if len(signature_code) != 4: # Wrong length resets everything, too.
+ signature_code = '?' * 4
+
+ dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
+ fp = open(dest, 'w')
+ fp.write('%s%s' % (package_type, signature_code))
+ fp.close()
+
+ def ExecFlock(self, lockfile, *cmd_list):
+ """Emulates the most basic behavior of Linux's flock(1)."""
+ # Rely on exception handling to report errors.
+ fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
+ fcntl.flock(fd, fcntl.LOCK_EX)
+ return subprocess.call(cmd_list)
+
+ def ExecFilterLibtool(self, *cmd_list):
+ """Calls libtool and filters out '/path/to/libtool: file: foo.o has no
+ symbols'."""
+ libtool_re = re.compile(r'^.*libtool: (?:for architecture: \S* )?'
+ r'file: .* has no symbols$')
+ libtool_re5 = re.compile(
+ r'^.*libtool: warning for library: ' +
+ r'.* the table of contents is empty ' +
+ r'\(no object file members in the library define global symbols\)$')
+ env = os.environ.copy()
+ # Ref:
+ # http://www.opensource.apple.com/source/cctools/cctools-809/misc/libtool.c
+ # The problem with this flag is that it resets the file mtime on the file to
+ # epoch=0, e.g. 1970-1-1 or 1969-12-31 depending on timezone.
+ env['ZERO_AR_DATE'] = '1'
+ libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE, env=env)
+ _, err = libtoolout.communicate()
+ for line in err.splitlines():
+ line_decoded = line.decode('utf-8')
+ if not libtool_re.match(line_decoded) and not libtool_re5.match(line_decoded):
+ print(line_decoded, file=sys.stderr)
+ # Unconditionally touch the output .a file on the command line if present
+ # and the command succeeded. A bit hacky.
+ if not libtoolout.returncode:
+ for i in range(len(cmd_list) - 1):
+ if cmd_list[i] == "-o" and cmd_list[i+1].endswith('.a'):
+ os.utime(cmd_list[i+1], None)
+ break
+ return libtoolout.returncode
+
+ def ExecPackageIosFramework(self, framework):
+ # Find the name of the binary based on the part before the ".framework".
+ binary = os.path.basename(framework).split('.')[0]
+ module_path = os.path.join(framework, 'Modules');
+ if not os.path.exists(module_path):
+ os.mkdir(module_path)
+ module_template = 'framework module %s {\n' \
+ ' umbrella header "%s.h"\n' \
+ '\n' \
+ ' export *\n' \
+ ' module * { export * }\n' \
+ '}\n' % (binary, binary)
+
+ module_file = open(os.path.join(module_path, 'module.modulemap'), "w")
+ module_file.write(module_template)
+ module_file.close()
+
+ def ExecPackageFramework(self, framework, version):
+ """Takes a path to Something.framework and the Current version of that and
+ sets up all the symlinks."""
+ # Find the name of the binary based on the part before the ".framework".
+ binary = os.path.basename(framework).split('.')[0]
+
+ CURRENT = 'Current'
+ RESOURCES = 'Resources'
+ VERSIONS = 'Versions'
+
+ if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
+ # Binary-less frameworks don't seem to contain symlinks (see e.g.
+ # chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
+ return
+
+ # Move into the framework directory to set the symlinks correctly.
+ pwd = os.getcwd()
+ os.chdir(framework)
+
+ # Set up the Current version.
+ self._Relink(version, os.path.join(VERSIONS, CURRENT))
+
+ # Set up the root symlinks.
+ self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
+ self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
+
+ # Back to where we were before!
+ os.chdir(pwd)
+
+ def _Relink(self, dest, link):
+ """Creates a symlink to |dest| named |link|. If |link| already exists,
+ it is overwritten."""
+ if os.path.lexists(link):
+ os.remove(link)
+ os.symlink(dest, link)
+
+ def ExecCompileIosFrameworkHeaderMap(self, out, framework, *all_headers):
+ framework_name = os.path.basename(framework).split('.')[0]
+ all_headers = map(os.path.abspath, all_headers)
+ filelist = {}
+ for header in all_headers:
+ filename = os.path.basename(header)
+ filelist[filename] = header
+ filelist[os.path.join(framework_name, filename)] = header
+ WriteHmap(out, filelist)
+
+ def ExecCopyIosFrameworkHeaders(self, framework, *copy_headers):
+ header_path = os.path.join(framework, 'Headers');
+ if not os.path.exists(header_path):
+ os.makedirs(header_path)
+ for header in copy_headers:
+ shutil.copy(header, os.path.join(header_path, os.path.basename(header)))
+
+ def ExecCompileXcassets(self, keys, *inputs):
+ """Compiles multiple .xcassets files into a single .car file.
+
+ This invokes 'actool' to compile all the inputs .xcassets files. The
+ |keys| arguments is a json-encoded dictionary of extra arguments to
+ pass to 'actool' when the asset catalogs contains an application icon
+ or a launch image.
+
+ Note that 'actool' does not create the Assets.car file if the asset
+ catalogs does not contains imageset.
+ """
+ command_line = [
+ 'xcrun', 'actool', '--output-format', 'human-readable-text',
+ '--compress-pngs', '--notices', '--warnings', '--errors',
+ ]
+ is_iphone_target = 'IPHONEOS_DEPLOYMENT_TARGET' in os.environ
+ if is_iphone_target:
+ platform = os.environ['CONFIGURATION'].split('-')[-1]
+ if platform not in ('iphoneos', 'iphonesimulator'):
+ platform = 'iphonesimulator'
+ command_line.extend([
+ '--platform', platform, '--target-device', 'iphone',
+ '--target-device', 'ipad', '--minimum-deployment-target',
+ os.environ['IPHONEOS_DEPLOYMENT_TARGET'], '--compile',
+ os.path.abspath(os.environ['CONTENTS_FOLDER_PATH']),
+ ])
+ else:
+ command_line.extend([
+ '--platform', 'macosx', '--target-device', 'mac',
+ '--minimum-deployment-target', os.environ['MACOSX_DEPLOYMENT_TARGET'],
+ '--compile',
+ os.path.abspath(os.environ['UNLOCALIZED_RESOURCES_FOLDER_PATH']),
+ ])
+ if keys:
+ keys = json.loads(keys)
+ for key, value in keys.items():
+ arg_name = '--' + key
+ if isinstance(value, bool):
+ if value:
+ command_line.append(arg_name)
+ elif isinstance(value, list):
+ for v in value:
+ command_line.append(arg_name)
+ command_line.append(str(v))
+ else:
+ command_line.append(arg_name)
+ command_line.append(str(value))
+ # Note: actool crashes if inputs path are relative, so use os.path.abspath
+ # to get absolute path name for inputs.
+ command_line.extend(map(os.path.abspath, inputs))
+ subprocess.check_call(command_line)
+
+ def ExecMergeInfoPlist(self, output, *inputs):
+ """Merge multiple .plist files into a single .plist file."""
+ merged_plist = {}
+ for path in inputs:
+ plist = self._LoadPlistMaybeBinary(path)
+ self._MergePlist(merged_plist, plist)
+ plistlib.writePlist(merged_plist, output)
+
+ def ExecCodeSignBundle(self, key, entitlements, provisioning, path, preserve):
+ """Code sign a bundle.
+
+ This function tries to code sign an iOS bundle, following the same
+ algorithm as Xcode:
+ 1. pick the provisioning profile that best match the bundle identifier,
+ and copy it into the bundle as embedded.mobileprovision,
+ 2. copy Entitlements.plist from user or SDK next to the bundle,
+ 3. code sign the bundle.
+ """
+ substitutions, overrides = self._InstallProvisioningProfile(
+ provisioning, self._GetCFBundleIdentifier())
+ entitlements_path = self._InstallEntitlements(
+ entitlements, substitutions, overrides)
+
+ args = ['codesign', '--force', '--sign', key]
+ if preserve == 'True':
+ args.extend(['--deep', '--preserve-metadata=identifier,entitlements'])
+ else:
+ args.extend(['--entitlements', entitlements_path])
+ args.extend(['--timestamp=none', path])
+ subprocess.check_call(args)
+
+ def _InstallProvisioningProfile(self, profile, bundle_identifier):
+ """Installs embedded.mobileprovision into the bundle.
+
+ Args:
+ profile: string, optional, short name of the .mobileprovision file
+ to use, if empty or the file is missing, the best file installed
+ will be used
+ bundle_identifier: string, value of CFBundleIdentifier from Info.plist
+
+ Returns:
+ A tuple containing two dictionary: variables substitutions and values
+ to overrides when generating the entitlements file.
+ """
+ source_path, provisioning_data, team_id = self._FindProvisioningProfile(
+ profile, bundle_identifier)
+ target_path = os.path.join(
+ os.environ['BUILT_PRODUCTS_DIR'],
+ os.environ['CONTENTS_FOLDER_PATH'],
+ 'embedded.mobileprovision')
+ shutil.copy2(source_path, target_path)
+ substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.')
+ return substitutions, provisioning_data['Entitlements']
+
+ def _FindProvisioningProfile(self, profile, bundle_identifier):
+ """Finds the .mobileprovision file to use for signing the bundle.
+
+ Checks all the installed provisioning profiles (or if the user specified
+ the PROVISIONING_PROFILE variable, only consult it) and select the most
+ specific that correspond to the bundle identifier.
+
+ Args:
+ profile: string, optional, short name of the .mobileprovision file
+ to use, if empty or the file is missing, the best file installed
+ will be used
+ bundle_identifier: string, value of CFBundleIdentifier from Info.plist
+
+ Returns:
+ A tuple of the path to the selected provisioning profile, the data of
+ the embedded plist in the provisioning profile and the team identifier
+ to use for code signing.
+
+ Raises:
+ SystemExit: if no .mobileprovision can be used to sign the bundle.
+ """
+ profiles_dir = os.path.join(
+ os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
+ if not os.path.isdir(profiles_dir):
+ print((
+ 'cannot find mobile provisioning for %s' % bundle_identifier),
+ file=sys.stderr)
+ sys.exit(1)
+ provisioning_profiles = None
+ if profile:
+ profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
+ if os.path.exists(profile_path):
+ provisioning_profiles = [profile_path]
+ if not provisioning_profiles:
+ provisioning_profiles = glob.glob(
+ os.path.join(profiles_dir, '*.mobileprovision'))
+ valid_provisioning_profiles = {}
+ for profile_path in provisioning_profiles:
+ profile_data = self._LoadProvisioningProfile(profile_path)
+ app_id_pattern = profile_data.get(
+ 'Entitlements', {}).get('application-identifier', '')
+ for team_identifier in profile_data.get('TeamIdentifier', []):
+ app_id = '%s.%s' % (team_identifier, bundle_identifier)
+ if fnmatch.fnmatch(app_id, app_id_pattern):
+ valid_provisioning_profiles[app_id_pattern] = (
+ profile_path, profile_data, team_identifier)
+ if not valid_provisioning_profiles:
+ print((
+ 'cannot find mobile provisioning for %s' % bundle_identifier),
+ file=sys.stderr)
+ sys.exit(1)
+ # If the user has multiple provisioning profiles installed that can be
+ # used for ${bundle_identifier}, pick the most specific one (ie. the
+ # provisioning profile whose pattern is the longest).
+ selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
+ return valid_provisioning_profiles[selected_key]
+
+ def _LoadProvisioningProfile(self, profile_path):
+ """Extracts the plist embedded in a provisioning profile.
+
+ Args:
+ profile_path: string, path to the .mobileprovision file
+
+ Returns:
+ Content of the plist embedded in the provisioning profile as a dictionary.
+ """
+ with tempfile.NamedTemporaryFile() as temp:
+ subprocess.check_call([
+ 'security', 'cms', '-D', '-i', profile_path, '-o', temp.name])
+ return self._LoadPlistMaybeBinary(temp.name)
+
+ def _MergePlist(self, merged_plist, plist):
+ """Merge |plist| into |merged_plist|."""
+ for key, value in plist.items():
+ if isinstance(value, dict):
+ merged_value = merged_plist.get(key, {})
+ if isinstance(merged_value, dict):
+ self._MergePlist(merged_value, value)
+ merged_plist[key] = merged_value
+ else:
+ merged_plist[key] = value
+ else:
+ merged_plist[key] = value
+
+ def _LoadPlistMaybeBinary(self, plist_path):
+ """Loads into a memory a plist possibly encoded in binary format.
+
+ This is a wrapper around plistlib.readPlist that tries to convert the
+ plist to the XML format if it can't be parsed (assuming that it is in
+ the binary format).
+
+ Args:
+ plist_path: string, path to a plist file, in XML or binary format
+
+ Returns:
+ Content of the plist as a dictionary.
+ """
+ try:
+ # First, try to read the file using plistlib that only supports XML,
+ # and if an exception is raised, convert a temporary copy to XML and
+ # load that copy.
+ return plistlib.readPlist(plist_path)
+ except:
+ pass
+ with tempfile.NamedTemporaryFile() as temp:
+ shutil.copy2(plist_path, temp.name)
+ subprocess.check_call(['plutil', '-convert', 'xml1', temp.name])
+ return plistlib.readPlist(temp.name)
+
+ def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix):
+ """Constructs a dictionary of variable substitutions for Entitlements.plist.
+
+ Args:
+ bundle_identifier: string, value of CFBundleIdentifier from Info.plist
+ app_identifier_prefix: string, value for AppIdentifierPrefix
+
+ Returns:
+ Dictionary of substitutions to apply when generating Entitlements.plist.
+ """
+ return {
+ 'CFBundleIdentifier': bundle_identifier,
+ 'AppIdentifierPrefix': app_identifier_prefix,
+ }
+
+ def _GetCFBundleIdentifier(self):
+ """Extracts CFBundleIdentifier value from Info.plist in the bundle.
+
+ Returns:
+ Value of CFBundleIdentifier in the Info.plist located in the bundle.
+ """
+ info_plist_path = os.path.join(
+ os.environ['TARGET_BUILD_DIR'],
+ os.environ['INFOPLIST_PATH'])
+ info_plist_data = self._LoadPlistMaybeBinary(info_plist_path)
+ return info_plist_data['CFBundleIdentifier']
+
+ def _InstallEntitlements(self, entitlements, substitutions, overrides):
+ """Generates and install the ${BundleName}.xcent entitlements file.
+
+ Expands variables "$(variable)" pattern in the source entitlements file,
+ add extra entitlements defined in the .mobileprovision file and the copy
+ the generated plist to "${BundlePath}.xcent".
+
+ Args:
+ entitlements: string, optional, path to the Entitlements.plist template
+ to use, defaults to "${SDKROOT}/Entitlements.plist"
+ substitutions: dictionary, variable substitutions
+ overrides: dictionary, values to add to the entitlements
+
+ Returns:
+ Path to the generated entitlements file.
+ """
+ source_path = entitlements
+ target_path = os.path.join(
+ os.environ['BUILT_PRODUCTS_DIR'],
+ os.environ['PRODUCT_NAME'] + '.xcent')
+ if not source_path:
+ source_path = os.path.join(
+ os.environ['SDKROOT'],
+ 'Entitlements.plist')
+ shutil.copy2(source_path, target_path)
+ data = self._LoadPlistMaybeBinary(target_path)
+ data = self._ExpandVariables(data, substitutions)
+ if overrides:
+ for key in overrides:
+ if key not in data:
+ data[key] = overrides[key]
+ plistlib.writePlist(data, target_path)
+ return target_path
+
+ def _ExpandVariables(self, data, substitutions):
+ """Expands variables "$(variable)" in data.
+
+ Args:
+ data: object, can be either string, list or dictionary
+ substitutions: dictionary, variable substitutions to perform
+
+ Returns:
+ Copy of data where each references to "$(variable)" has been replaced
+ by the corresponding value found in substitutions, or left intact if
+ the key was not found.
+ """
+ if isinstance(data, str):
+ for key, value in substitutions.items():
+ data = data.replace('$(%s)' % key, value)
+ return data
+ if isinstance(data, list):
+ return [self._ExpandVariables(v, substitutions) for v in data]
+ if isinstance(data, dict):
+ return {k: self._ExpandVariables(data[k], substitutions) for k in data}
+ return data
+
+def NextGreaterPowerOf2(x):
+ return 2**(x).bit_length()
+
+def WriteHmap(output_name, filelist):
+ """Generates a header map based on |filelist|.
+
+ Per Mark Mentovai:
+ A header map is structured essentially as a hash table, keyed by names used
+ in #includes, and providing pathnames to the actual files.
+
+ The implementation below and the comment above comes from inspecting:
+ http://www.opensource.apple.com/source/distcc/distcc-2503/distcc_dist/include_server/headermap.py?txt
+ while also looking at the implementation in clang in:
+ https://llvm.org/svn/llvm-project/cfe/trunk/lib/Lex/HeaderMap.cpp
+ """
+ magic = 1751998832
+ version = 1
+ _reserved = 0
+ count = len(filelist)
+ capacity = NextGreaterPowerOf2(count)
+ strings_offset = 24 + (12 * capacity)
+ max_value_length = len(max(filelist.items(), key=lambda t: len(t[1]))[1])
+
+ out = open(output_name, "wb")
+ out.write(struct.pack('<LHHLLLL', magic, version, _reserved, strings_offset,
+ count, capacity, max_value_length))
+
+ # Create empty hashmap buckets.
+ buckets = [None] * capacity
+ for file, path in filelist.items():
+ key = 0
+ for c in file:
+ key += ord(c.lower()) * 13
+
+ # Fill next empty bucket.
+ while buckets[key & capacity - 1] is not None:
+ key = key + 1
+ buckets[key & capacity - 1] = (file, path)
+
+ next_offset = 1
+ for bucket in buckets:
+ if bucket is None:
+ out.write(struct.pack('<LLL', 0, 0, 0))
+ else:
+ (file, path) = bucket
+ key_offset = next_offset
+ prefix_offset = key_offset + len(file) + 1
+ suffix_offset = prefix_offset + len(os.path.dirname(path) + os.sep) + 1
+ next_offset = suffix_offset + len(os.path.basename(path)) + 1
+ out.write(struct.pack('<LLL', key_offset, prefix_offset, suffix_offset))
+
+ # Pad byte since next offset starts at 1.
+ out.write(struct.pack('<x'))
+
+ for bucket in buckets:
+ if bucket is not None:
+ (file, path) = bucket
+ out.write(struct.pack('<%ds' % len(file), file))
+ out.write(struct.pack('<s', '\0'))
+ base = os.path.dirname(path) + os.sep
+ out.write(struct.pack('<%ds' % len(base), base))
+ out.write(struct.pack('<s', '\0'))
+ path = os.path.basename(path)
+ out.write(struct.pack('<%ds' % len(path), path))
+ out.write(struct.pack('<s', '\0'))
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/third_party/python/gyp/pylib/gyp/msvs_emulation.py b/third_party/python/gyp/pylib/gyp/msvs_emulation.py
new file mode 100644
index 0000000000..034ef49591
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/msvs_emulation.py
@@ -0,0 +1,1118 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+This module helps emulate Visual Studio 2008 behavior on top of other
+build systems, primarily ninja.
+"""
+
+import os
+import re
+import subprocess
+import sys
+
+from six.moves import collections_abc
+
+from gyp.common import OrderedSet
+import gyp.MSVSUtil
+import gyp.MSVSVersion
+from gyp.MSVSVersion import version_to_tuple
+
+try:
+ # basestring was removed in python3.
+ basestring
+except NameError:
+ basestring = str
+
+
+windows_quoter_regex = re.compile(r'(\\*)"')
+
+
+def QuoteForRspFile(arg):
+ """Quote a command line argument so that it appears as one argument when
+ processed via cmd.exe and parsed by CommandLineToArgvW (as is typical for
+ Windows programs)."""
+ # See http://goo.gl/cuFbX and http://goo.gl/dhPnp including the comment
+ # threads. This is actually the quoting rules for CommandLineToArgvW, not
+ # for the shell, because the shell doesn't do anything in Windows. This
+ # works more or less because most programs (including the compiler, etc.)
+ # use that function to handle command line arguments.
+
+ if not os.getenv('GYP_MSVS_DISABLE_PATH_NORMALIZATION'):
+ # Use a heuristic to try to find args that are paths, and normalize them
+ if arg.find('/') > 0 or arg.count('/') > 1:
+ arg = os.path.normpath(arg)
+
+ # For a literal quote, CommandLineToArgvW requires 2n+1 backslashes
+ # preceding it, and results in n backslashes + the quote. So we substitute
+ # in 2* what we match, +1 more, plus the quote.
+ arg = windows_quoter_regex.sub(lambda mo: 2 * mo.group(1) + '\\"', arg)
+
+ # %'s also need to be doubled otherwise they're interpreted as batch
+ # positional arguments. Also make sure to escape the % so that they're
+ # passed literally through escaping so they can be singled to just the
+ # original %. Otherwise, trying to pass the literal representation that
+ # looks like an environment variable to the shell (e.g. %PATH%) would fail.
+ arg = arg.replace('%', '%%')
+
+ # These commands are used in rsp files, so no escaping for the shell (via ^)
+ # is necessary.
+
+ # Finally, wrap the whole thing in quotes so that the above quote rule
+ # applies and whitespace isn't a word break.
+ return '"' + arg + '"'
+
+
+def EncodeRspFileList(args):
+ """Process a list of arguments using QuoteCmdExeArgument."""
+ # Note that the first argument is assumed to be the command. Don't add
+ # quotes around it because then built-ins like 'echo', etc. won't work.
+ # Take care to normpath only the path in the case of 'call ../x.bat' because
+ # otherwise the whole thing is incorrectly interpreted as a path and not
+ # normalized correctly.
+ if not args: return ''
+ if args[0].startswith('call '):
+ call, program = args[0].split(' ', 1)
+ program = call + ' ' + os.path.normpath(program)
+ else:
+ program = os.path.normpath(args[0])
+ return program + ' ' + ' '.join(QuoteForRspFile(arg) for arg in args[1:])
+
+
+def _GenericRetrieve(root, default, path):
+ """Given a list of dictionary keys |path| and a tree of dicts |root|, find
+ value at path, or return |default| if any of the path doesn't exist."""
+ if not root:
+ return default
+ if not path:
+ return root
+ return _GenericRetrieve(root.get(path[0]), default, path[1:])
+
+
+def _AddPrefix(element, prefix):
+ """Add |prefix| to |element| or each subelement if element is iterable."""
+ if element is None:
+ return element
+ if (isinstance(element, collections_abc.Iterable) and
+ not isinstance(element, basestring)):
+ return [prefix + e for e in element]
+ else:
+ return prefix + element
+
+
+def _DoRemapping(element, map):
+ """If |element| then remap it through |map|. If |element| is iterable then
+ each item will be remapped. Any elements not found will be removed."""
+ if map is not None and element is not None:
+ if not callable(map):
+ map = map.get # Assume it's a dict, otherwise a callable to do the remap.
+ if (isinstance(element, collections_abc.Iterable) and
+ not isinstance(element, basestring)):
+ element = filter(None, [map(elem) for elem in element])
+ else:
+ element = map(element)
+ return element
+
+
+def _AppendOrReturn(append, element):
+ """If |append| is None, simply return |element|. If |append| is not None,
+ then add |element| to it, adding each item in |element| if it's a list or
+ tuple."""
+ if append is not None and element is not None:
+ if (isinstance(element, collections_abc.Iterable) and
+ not isinstance(element, basestring)):
+ append.extend(element)
+ else:
+ append.append(element)
+ else:
+ return element
+
+
+def _FindDirectXInstallation():
+ """Try to find an installation location for the DirectX SDK. Check for the
+ standard environment variable, and if that doesn't exist, try to find
+ via the registry. May return None if not found in either location."""
+ # Return previously calculated value, if there is one
+ if hasattr(_FindDirectXInstallation, 'dxsdk_dir'):
+ return _FindDirectXInstallation.dxsdk_dir
+
+ dxsdk_dir = os.environ.get('DXSDK_DIR')
+ if not dxsdk_dir:
+ # Setup params to pass to and attempt to launch reg.exe.
+ cmd = ['reg.exe', 'query', r'HKLM\Software\Microsoft\DirectX', '/s']
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
+ universal_newlines=True)
+ for line in p.communicate()[0].splitlines():
+ if 'InstallPath' in line:
+ dxsdk_dir = line.split(' ')[3] + "\\"
+
+ # Cache return value
+ _FindDirectXInstallation.dxsdk_dir = dxsdk_dir
+ return dxsdk_dir
+
+
+def GetGlobalVSMacroEnv(vs_version):
+ """Get a dict of variables mapping internal VS macro names to their gyp
+ equivalents. Returns all variables that are independent of the target."""
+ env = {}
+ # '$(VSInstallDir)' and '$(VCInstallDir)' are available when and only when
+ # Visual Studio is actually installed.
+ if vs_version.Path():
+ env['$(VSInstallDir)'] = vs_version.Path()
+ env['$(VCInstallDir)'] = os.path.join(vs_version.Path(), 'VC') + '\\'
+ # Chromium uses DXSDK_DIR in include/lib paths, but it may or may not be
+ # set. This happens when the SDK is sync'd via src-internal, rather than
+ # by typical end-user installation of the SDK. If it's not set, we don't
+ # want to leave the unexpanded variable in the path, so simply strip it.
+ dxsdk_dir = _FindDirectXInstallation()
+ env['$(DXSDK_DIR)'] = dxsdk_dir if dxsdk_dir else ''
+ # Try to find an installation location for the Windows DDK by checking
+ # the WDK_DIR environment variable, may be None.
+ env['$(WDK_DIR)'] = os.environ.get('WDK_DIR', '')
+ return env
+
+def ExtractSharedMSVSSystemIncludes(configs, generator_flags):
+ """Finds msvs_system_include_dirs that are common to all targets, removes
+ them from all targets, and returns an OrderedSet containing them."""
+ all_system_includes = OrderedSet(
+ configs[0].get('msvs_system_include_dirs', []))
+ for config in configs[1:]:
+ system_includes = config.get('msvs_system_include_dirs', [])
+ all_system_includes = all_system_includes & OrderedSet(system_includes)
+ if not all_system_includes:
+ return None
+ # Expand macros in all_system_includes.
+ env = GetGlobalVSMacroEnv(GetVSVersion(generator_flags))
+ expanded_system_includes = OrderedSet([ExpandMacros(include, env)
+ for include in all_system_includes])
+ if any(['$' in include for include in expanded_system_includes]):
+ # Some path relies on target-specific variables, bail.
+ return None
+
+ # Remove system includes shared by all targets from the targets.
+ for config in configs:
+ includes = config.get('msvs_system_include_dirs', [])
+ if includes: # Don't insert a msvs_system_include_dirs key if not needed.
+ # This must check the unexpanded includes list:
+ new_includes = [i for i in includes if i not in all_system_includes]
+ config['msvs_system_include_dirs'] = new_includes
+ return expanded_system_includes
+
+
+class MsvsSettings(object):
+ """A class that understands the gyp 'msvs_...' values (especially the
+ msvs_settings field). They largely correpond to the VS2008 IDE DOM. This
+ class helps map those settings to command line options."""
+
+ def __init__(self, spec, generator_flags):
+ self.spec = spec
+ self.vs_version = GetVSVersion(generator_flags)
+
+ supported_fields = [
+ ('msvs_configuration_attributes', dict),
+ ('msvs_settings', dict),
+ ('msvs_system_include_dirs', list),
+ ('msvs_disabled_warnings', list),
+ ('msvs_precompiled_header', str),
+ ('msvs_precompiled_source', str),
+ ('msvs_configuration_platform', str),
+ ('msvs_target_platform', str),
+ ]
+ configs = spec['configurations']
+ for field, default in supported_fields:
+ setattr(self, field, {})
+ for configname, config in configs.items():
+ getattr(self, field)[configname] = config.get(field, default())
+
+ self.msvs_cygwin_dirs = spec.get('msvs_cygwin_dirs', ['.'])
+
+ unsupported_fields = [
+ 'msvs_prebuild',
+ 'msvs_postbuild',
+ ]
+ unsupported = []
+ for field in unsupported_fields:
+ for config in configs.values():
+ if field in config:
+ unsupported += ["%s not supported (target %s)." %
+ (field, spec['target_name'])]
+ if unsupported:
+ raise Exception('\n'.join(unsupported))
+
+ def GetExtension(self):
+ """Returns the extension for the target, with no leading dot.
+
+ Uses 'product_extension' if specified, otherwise uses MSVS defaults based on
+ the target type.
+ """
+ ext = self.spec.get('product_extension', None)
+ if ext:
+ return ext
+ return gyp.MSVSUtil.TARGET_TYPE_EXT.get(self.spec['type'], '')
+
+ def GetVSMacroEnv(self, base_to_build=None, config=None):
+ """Get a dict of variables mapping internal VS macro names to their gyp
+ equivalents."""
+ target_platform = 'Win32' if self.GetArch(config) == 'x86' else 'x64'
+ target_name = self.spec.get('product_prefix', '') + \
+ self.spec.get('product_name', self.spec['target_name'])
+ target_dir = base_to_build + '\\' if base_to_build else ''
+ target_ext = '.' + self.GetExtension()
+ target_file_name = target_name + target_ext
+
+ replacements = {
+ '$(InputName)': '${root}',
+ '$(InputPath)': '${source}',
+ '$(IntDir)': '$!INTERMEDIATE_DIR',
+ '$(OutDir)\\': target_dir,
+ '$(PlatformName)': target_platform,
+ '$(ProjectDir)\\': '',
+ '$(ProjectName)': self.spec['target_name'],
+ '$(TargetDir)\\': target_dir,
+ '$(TargetExt)': target_ext,
+ '$(TargetFileName)': target_file_name,
+ '$(TargetName)': target_name,
+ '$(TargetPath)': os.path.join(target_dir, target_file_name),
+ }
+ replacements.update(GetGlobalVSMacroEnv(self.vs_version))
+ return replacements
+
+ def ConvertVSMacros(self, s, base_to_build=None, config=None):
+ """Convert from VS macro names to something equivalent."""
+ env = self.GetVSMacroEnv(base_to_build, config=config)
+ return ExpandMacros(s, env)
+
+ def AdjustLibraries(self, libraries):
+ """Strip -l from library if it's specified with that."""
+ libs = [lib[2:] if lib.startswith('-l') else lib for lib in libraries]
+ return [lib + '.lib' if not lib.lower().endswith('.lib') else lib
+ for lib in libs]
+
+ def _GetAndMunge(self, field, path, default, prefix, append, map):
+ """Retrieve a value from |field| at |path| or return |default|. If
+ |append| is specified, and the item is found, it will be appended to that
+ object instead of returned. If |map| is specified, results will be
+ remapped through |map| before being returned or appended."""
+ result = _GenericRetrieve(field, default, path)
+ result = _DoRemapping(result, map)
+ result = _AddPrefix(result, prefix)
+ return _AppendOrReturn(append, result)
+
+ class _GetWrapper(object):
+ def __init__(self, parent, field, base_path, append=None):
+ self.parent = parent
+ self.field = field
+ self.base_path = [base_path]
+ self.append = append
+ def __call__(self, name, map=None, prefix='', default=None):
+ return self.parent._GetAndMunge(self.field, self.base_path + [name],
+ default=default, prefix=prefix, append=self.append, map=map)
+
+ def GetArch(self, config):
+ """Get architecture based on msvs_configuration_platform and
+ msvs_target_platform. Returns either 'x86' or 'x64'."""
+ configuration_platform = self.msvs_configuration_platform.get(config, '')
+ platform = self.msvs_target_platform.get(config, '')
+ if not platform: # If no specific override, use the configuration's.
+ platform = configuration_platform
+ # Map from platform to architecture.
+ return {'Win32': 'x86', 'x64': 'x64'}.get(platform, 'x86')
+
+ def _TargetConfig(self, config):
+ """Returns the target-specific configuration."""
+ # There's two levels of architecture/platform specification in VS. The
+ # first level is globally for the configuration (this is what we consider
+ # "the" config at the gyp level, which will be something like 'Debug' or
+ # 'Release'), VS2015 and later only use this level
+ if int(self.vs_version.short_name) >= 2015:
+ return config
+ # and a second target-specific configuration, which is an
+ # override for the global one. |config| is remapped here to take into
+ # account the local target-specific overrides to the global configuration.
+ arch = self.GetArch(config)
+ if arch == 'x64' and not config.endswith('_x64'):
+ config += '_x64'
+ if arch == 'x86' and config.endswith('_x64'):
+ config = config.rsplit('_', 1)[0]
+ return config
+
+ def _Setting(self, path, config,
+ default=None, prefix='', append=None, map=None):
+ """_GetAndMunge for msvs_settings."""
+ return self._GetAndMunge(
+ self.msvs_settings[config], path, default, prefix, append, map)
+
+ def _ConfigAttrib(self, path, config,
+ default=None, prefix='', append=None, map=None):
+ """_GetAndMunge for msvs_configuration_attributes."""
+ return self._GetAndMunge(
+ self.msvs_configuration_attributes[config],
+ path, default, prefix, append, map)
+
+ def AdjustIncludeDirs(self, include_dirs, config):
+ """Updates include_dirs to expand VS specific paths, and adds the system
+ include dirs used for platform SDK and similar."""
+ config = self._TargetConfig(config)
+ includes = include_dirs + self.msvs_system_include_dirs[config]
+ includes.extend(self._Setting(
+ ('VCCLCompilerTool', 'AdditionalIncludeDirectories'), config, default=[]))
+ return [self.ConvertVSMacros(p, config=config) for p in includes]
+
+ def AdjustMidlIncludeDirs(self, midl_include_dirs, config):
+ """Updates midl_include_dirs to expand VS specific paths, and adds the
+ system include dirs used for platform SDK and similar."""
+ config = self._TargetConfig(config)
+ includes = midl_include_dirs + self.msvs_system_include_dirs[config]
+ includes.extend(self._Setting(
+ ('VCMIDLTool', 'AdditionalIncludeDirectories'), config, default=[]))
+ return [self.ConvertVSMacros(p, config=config) for p in includes]
+
+ def GetComputedDefines(self, config):
+ """Returns the set of defines that are injected to the defines list based
+ on other VS settings."""
+ config = self._TargetConfig(config)
+ defines = []
+ if self._ConfigAttrib(['CharacterSet'], config) == '1':
+ defines.extend(('_UNICODE', 'UNICODE'))
+ if self._ConfigAttrib(['CharacterSet'], config) == '2':
+ defines.append('_MBCS')
+ defines.extend(self._Setting(
+ ('VCCLCompilerTool', 'PreprocessorDefinitions'), config, default=[]))
+ return defines
+
+ def GetCompilerPdbName(self, config, expand_special):
+ """Get the pdb file name that should be used for compiler invocations, or
+ None if there's no explicit name specified."""
+ config = self._TargetConfig(config)
+ pdbname = self._Setting(
+ ('VCCLCompilerTool', 'ProgramDataBaseFileName'), config)
+ if pdbname:
+ pdbname = expand_special(self.ConvertVSMacros(pdbname))
+ return pdbname
+
+ def GetMapFileName(self, config, expand_special):
+ """Gets the explicitly overriden map file name for a target or returns None
+ if it's not set."""
+ config = self._TargetConfig(config)
+ map_file = self._Setting(('VCLinkerTool', 'MapFileName'), config)
+ if map_file:
+ map_file = expand_special(self.ConvertVSMacros(map_file, config=config))
+ return map_file
+
+ def GetOutputName(self, config, expand_special):
+ """Gets the explicitly overridden output name for a target or returns None
+ if it's not overridden."""
+ config = self._TargetConfig(config)
+ type = self.spec['type']
+ root = 'VCLibrarianTool' if type == 'static_library' else 'VCLinkerTool'
+ # TODO(scottmg): Handle OutputDirectory without OutputFile.
+ output_file = self._Setting((root, 'OutputFile'), config)
+ if output_file:
+ output_file = expand_special(self.ConvertVSMacros(
+ output_file, config=config))
+ return output_file
+
+ def GetPDBName(self, config, expand_special, default):
+ """Gets the explicitly overridden pdb name for a target or returns
+ default if it's not overridden, or if no pdb will be generated."""
+ config = self._TargetConfig(config)
+ output_file = self._Setting(('VCLinkerTool', 'ProgramDatabaseFile'), config)
+ generate_debug_info = self._Setting(
+ ('VCLinkerTool', 'GenerateDebugInformation'), config)
+ if generate_debug_info == 'true':
+ if output_file:
+ return expand_special(self.ConvertVSMacros(output_file, config=config))
+ else:
+ return default
+ else:
+ return None
+
+ def GetNoImportLibrary(self, config):
+ """If NoImportLibrary: true, ninja will not expect the output to include
+ an import library."""
+ config = self._TargetConfig(config)
+ noimplib = self._Setting(('NoImportLibrary',), config)
+ return noimplib == 'true'
+
+ def GetAsmflags(self, config):
+ """Returns the flags that need to be added to ml invocations."""
+ config = self._TargetConfig(config)
+ asmflags = []
+ safeseh = self._Setting(('MASM', 'UseSafeExceptionHandlers'), config)
+ if safeseh == 'true':
+ asmflags.append('/safeseh')
+ return asmflags
+
+ def GetCflags(self, config):
+ """Returns the flags that need to be added to .c and .cc compilations."""
+ config = self._TargetConfig(config)
+ cflags = []
+ cflags.extend(['/wd' + w for w in self.msvs_disabled_warnings[config]])
+ cl = self._GetWrapper(self, self.msvs_settings[config],
+ 'VCCLCompilerTool', append=cflags)
+ cl('Optimization',
+ map={'0': 'd', '1': '1', '2': '2', '3': 'x'}, prefix='/O', default='2')
+ cl('InlineFunctionExpansion', prefix='/Ob')
+ cl('DisableSpecificWarnings', prefix='/wd')
+ cl('StringPooling', map={'true': '/GF'})
+ cl('EnableFiberSafeOptimizations', map={'true': '/GT'})
+ cl('OmitFramePointers', map={'false': '-', 'true': ''}, prefix='/Oy')
+ cl('EnableIntrinsicFunctions', map={'false': '-', 'true': ''}, prefix='/Oi')
+ cl('FavorSizeOrSpeed', map={'1': 't', '2': 's'}, prefix='/O')
+ cl('FloatingPointModel',
+ map={'0': 'precise', '1': 'strict', '2': 'fast'}, prefix='/fp:',
+ default='0')
+ cl('CompileAsManaged', map={'false': '', 'true': '/clr'})
+ cl('WholeProgramOptimization', map={'true': '/GL'})
+ cl('WarningLevel', prefix='/W')
+ cl('WarnAsError', map={'true': '/WX'})
+ cl('CallingConvention',
+ map={'0': 'd', '1': 'r', '2': 'z', '3': 'v'}, prefix='/G')
+ cl('DebugInformationFormat',
+ map={'1': '7', '3': 'i', '4': 'I'}, prefix='/Z')
+ cl('RuntimeTypeInfo', map={'true': '/GR', 'false': '/GR-'})
+ cl('EnableFunctionLevelLinking', map={'true': '/Gy', 'false': '/Gy-'})
+ cl('MinimalRebuild', map={'true': '/Gm'})
+ cl('BufferSecurityCheck', map={'true': '/GS', 'false': '/GS-'})
+ cl('BasicRuntimeChecks', map={'1': 's', '2': 'u', '3': '1'}, prefix='/RTC')
+ cl('RuntimeLibrary',
+ map={'0': 'T', '1': 'Td', '2': 'D', '3': 'Dd'}, prefix='/M')
+ cl('ExceptionHandling', map={'1': 'sc','2': 'a'}, prefix='/EH')
+ cl('DefaultCharIsUnsigned', map={'true': '/J'})
+ cl('TreatWChar_tAsBuiltInType',
+ map={'false': '-', 'true': ''}, prefix='/Zc:wchar_t')
+ cl('EnablePREfast', map={'true': '/analyze'})
+ cl('AdditionalOptions', prefix='')
+ cl('EnableEnhancedInstructionSet',
+ map={'1': 'SSE', '2': 'SSE2', '3': 'AVX', '4': 'IA32', '5': 'AVX2'},
+ prefix='/arch:')
+ cflags.extend(['/FI' + f for f in self._Setting(
+ ('VCCLCompilerTool', 'ForcedIncludeFiles'), config, default=[])])
+ if version_to_tuple(self.vs_version.project_version) >= (12, 0):
+ # New flag introduced in VS2013 (project version 12.0) Forces writes to
+ # the program database (PDB) to be serialized through MSPDBSRV.EXE.
+ # https://msdn.microsoft.com/en-us/library/dn502518.aspx
+ cflags.append('/FS')
+ # ninja handles parallelism by itself, don't have the compiler do it too.
+ cflags = [x for x in cflags if not x.startswith('/MP')]
+ return cflags
+
+ def _GetPchFlags(self, config, extension):
+ """Get the flags to be added to the cflags for precompiled header support.
+ """
+ config = self._TargetConfig(config)
+ # The PCH is only built once by a particular source file. Usage of PCH must
+ # only be for the same language (i.e. C vs. C++), so only include the pch
+ # flags when the language matches.
+ if self.msvs_precompiled_header[config]:
+ source_ext = os.path.splitext(self.msvs_precompiled_source[config])[1]
+ if _LanguageMatchesForPch(source_ext, extension):
+ pch = self.msvs_precompiled_header[config]
+ pchbase = os.path.split(pch)[1]
+ return ['/Yu' + pch, '/FI' + pch, '/Fp${pchprefix}.' + pchbase + '.pch']
+ return []
+
+ def GetCflagsC(self, config):
+ """Returns the flags that need to be added to .c compilations."""
+ config = self._TargetConfig(config)
+ return self._GetPchFlags(config, '.c')
+
+ def GetCflagsCC(self, config):
+ """Returns the flags that need to be added to .cc compilations."""
+ config = self._TargetConfig(config)
+ return ['/TP'] + self._GetPchFlags(config, '.cc')
+
+ def _GetAdditionalLibraryDirectories(self, root, config, gyp_to_build_path):
+ """Get and normalize the list of paths in AdditionalLibraryDirectories
+ setting."""
+ config = self._TargetConfig(config)
+ libpaths = self._Setting((root, 'AdditionalLibraryDirectories'),
+ config, default=[])
+ libpaths = [os.path.normpath(
+ gyp_to_build_path(self.ConvertVSMacros(p, config=config)))
+ for p in libpaths]
+ return ['/LIBPATH:"' + p + '"' for p in libpaths]
+
+ def GetLibFlags(self, config, gyp_to_build_path):
+ """Returns the flags that need to be added to lib commands."""
+ config = self._TargetConfig(config)
+ libflags = []
+ lib = self._GetWrapper(self, self.msvs_settings[config],
+ 'VCLibrarianTool', append=libflags)
+ libflags.extend(self._GetAdditionalLibraryDirectories(
+ 'VCLibrarianTool', config, gyp_to_build_path))
+ lib('LinkTimeCodeGeneration', map={'true': '/LTCG'})
+ lib('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
+ prefix='/MACHINE:')
+ lib('AdditionalOptions')
+ return libflags
+
+ def GetDefFile(self, gyp_to_build_path):
+ """Returns the .def file from sources, if any. Otherwise returns None."""
+ spec = self.spec
+ if spec['type'] in ('shared_library', 'loadable_module', 'executable'):
+ def_files = [s for s in spec.get('sources', [])
+ if s.lower().endswith('.def')]
+ if len(def_files) == 1:
+ return gyp_to_build_path(def_files[0])
+ elif len(def_files) > 1:
+ raise Exception("Multiple .def files")
+ return None
+
+ def _GetDefFileAsLdflags(self, ldflags, gyp_to_build_path):
+ """.def files get implicitly converted to a ModuleDefinitionFile for the
+ linker in the VS generator. Emulate that behaviour here."""
+ def_file = self.GetDefFile(gyp_to_build_path)
+ if def_file:
+ ldflags.append('/DEF:"%s"' % def_file)
+
+ def GetPGDName(self, config, expand_special):
+ """Gets the explicitly overridden pgd name for a target or returns None
+ if it's not overridden."""
+ config = self._TargetConfig(config)
+ output_file = self._Setting(
+ ('VCLinkerTool', 'ProfileGuidedDatabase'), config)
+ if output_file:
+ output_file = expand_special(self.ConvertVSMacros(
+ output_file, config=config))
+ return output_file
+
+ def GetLdflags(self, config, gyp_to_build_path, expand_special,
+ manifest_base_name, output_name, is_executable, build_dir):
+ """Returns the flags that need to be added to link commands, and the
+ manifest files."""
+ config = self._TargetConfig(config)
+ ldflags = []
+ ld = self._GetWrapper(self, self.msvs_settings[config],
+ 'VCLinkerTool', append=ldflags)
+ self._GetDefFileAsLdflags(ldflags, gyp_to_build_path)
+ ld('GenerateDebugInformation', map={'true': '/DEBUG'})
+ ld('TargetMachine', map={'1': 'X86', '17': 'X64', '3': 'ARM'},
+ prefix='/MACHINE:')
+ ldflags.extend(self._GetAdditionalLibraryDirectories(
+ 'VCLinkerTool', config, gyp_to_build_path))
+ ld('DelayLoadDLLs', prefix='/DELAYLOAD:')
+ ld('TreatLinkerWarningAsErrors', prefix='/WX',
+ map={'true': '', 'false': ':NO'})
+ out = self.GetOutputName(config, expand_special)
+ if out:
+ ldflags.append('/OUT:' + out)
+ pdb = self.GetPDBName(config, expand_special, output_name + '.pdb')
+ if pdb:
+ ldflags.append('/PDB:' + pdb)
+ pgd = self.GetPGDName(config, expand_special)
+ if pgd:
+ ldflags.append('/PGD:' + pgd)
+ map_file = self.GetMapFileName(config, expand_special)
+ ld('GenerateMapFile', map={'true': '/MAP:' + map_file if map_file
+ else '/MAP'})
+ ld('MapExports', map={'true': '/MAPINFO:EXPORTS'})
+ ld('AdditionalOptions', prefix='')
+
+ minimum_required_version = self._Setting(
+ ('VCLinkerTool', 'MinimumRequiredVersion'), config, default='')
+ if minimum_required_version:
+ minimum_required_version = ',' + minimum_required_version
+ ld('SubSystem',
+ map={'1': 'CONSOLE%s' % minimum_required_version,
+ '2': 'WINDOWS%s' % minimum_required_version},
+ prefix='/SUBSYSTEM:')
+
+ stack_reserve_size = self._Setting(
+ ('VCLinkerTool', 'StackReserveSize'), config, default='')
+ if stack_reserve_size:
+ stack_commit_size = self._Setting(
+ ('VCLinkerTool', 'StackCommitSize'), config, default='')
+ if stack_commit_size:
+ stack_commit_size = ',' + stack_commit_size
+ ldflags.append('/STACK:%s%s' % (stack_reserve_size, stack_commit_size))
+
+ ld('TerminalServerAware', map={'1': ':NO', '2': ''}, prefix='/TSAWARE')
+ ld('LinkIncremental', map={'1': ':NO', '2': ''}, prefix='/INCREMENTAL')
+ ld('BaseAddress', prefix='/BASE:')
+ ld('FixedBaseAddress', map={'1': ':NO', '2': ''}, prefix='/FIXED')
+ ld('RandomizedBaseAddress',
+ map={'1': ':NO', '2': ''}, prefix='/DYNAMICBASE')
+ ld('DataExecutionPrevention',
+ map={'1': ':NO', '2': ''}, prefix='/NXCOMPAT')
+ ld('OptimizeReferences', map={'1': 'NOREF', '2': 'REF'}, prefix='/OPT:')
+ ld('ForceSymbolReferences', prefix='/INCLUDE:')
+ ld('EnableCOMDATFolding', map={'1': 'NOICF', '2': 'ICF'}, prefix='/OPT:')
+ ld('LinkTimeCodeGeneration',
+ map={'1': '', '2': ':PGINSTRUMENT', '3': ':PGOPTIMIZE',
+ '4': ':PGUPDATE'},
+ prefix='/LTCG')
+ ld('IgnoreDefaultLibraryNames', prefix='/NODEFAULTLIB:')
+ ld('ResourceOnlyDLL', map={'true': '/NOENTRY'})
+ ld('EntryPointSymbol', prefix='/ENTRY:')
+ ld('Profile', map={'true': '/PROFILE'})
+ ld('LargeAddressAware',
+ map={'1': ':NO', '2': ''}, prefix='/LARGEADDRESSAWARE')
+ # TODO(scottmg): This should sort of be somewhere else (not really a flag).
+ ld('AdditionalDependencies', prefix='')
+
+ if self.GetArch(config) == 'x86':
+ safeseh_default = 'true'
+ else:
+ safeseh_default = None
+ ld('ImageHasSafeExceptionHandlers',
+ map={'false': ':NO', 'true': ''}, prefix='/SAFESEH',
+ default=safeseh_default)
+
+ # If the base address is not specifically controlled, DYNAMICBASE should
+ # be on by default.
+ if not any('DYNAMICBASE' in flag or flag == '/FIXED' for flag in ldflags):
+ ldflags.append('/DYNAMICBASE')
+
+ # If the NXCOMPAT flag has not been specified, default to on. Despite the
+ # documentation that says this only defaults to on when the subsystem is
+ # Vista or greater (which applies to the linker), the IDE defaults it on
+ # unless it's explicitly off.
+ if not any('NXCOMPAT' in flag for flag in ldflags):
+ ldflags.append('/NXCOMPAT')
+
+ have_def_file = any(flag.startswith('/DEF:') for flag in ldflags)
+ manifest_flags, intermediate_manifest, manifest_files = \
+ self._GetLdManifestFlags(config, manifest_base_name, gyp_to_build_path,
+ is_executable and not have_def_file, build_dir)
+ ldflags.extend(manifest_flags)
+ return ldflags, intermediate_manifest, manifest_files
+
+ def _GetLdManifestFlags(self, config, name, gyp_to_build_path,
+ allow_isolation, build_dir):
+ """Returns a 3-tuple:
+ - the set of flags that need to be added to the link to generate
+ a default manifest
+ - the intermediate manifest that the linker will generate that should be
+ used to assert it doesn't add anything to the merged one.
+ - the list of all the manifest files to be merged by the manifest tool and
+ included into the link."""
+ generate_manifest = self._Setting(('VCLinkerTool', 'GenerateManifest'),
+ config,
+ default='true')
+ if generate_manifest != 'true':
+ # This means not only that the linker should not generate the intermediate
+ # manifest but also that the manifest tool should do nothing even when
+ # additional manifests are specified.
+ return ['/MANIFEST:NO'], [], []
+
+ output_name = name + '.intermediate.manifest'
+ flags = [
+ '/MANIFEST',
+ '/ManifestFile:' + output_name,
+ ]
+
+ # Instead of using the MANIFESTUAC flags, we generate a .manifest to
+ # include into the list of manifests. This allows us to avoid the need to
+ # do two passes during linking. The /MANIFEST flag and /ManifestFile are
+ # still used, and the intermediate manifest is used to assert that the
+ # final manifest we get from merging all the additional manifest files
+ # (plus the one we generate here) isn't modified by merging the
+ # intermediate into it.
+
+ # Always NO, because we generate a manifest file that has what we want.
+ flags.append('/MANIFESTUAC:NO')
+
+ config = self._TargetConfig(config)
+ enable_uac = self._Setting(('VCLinkerTool', 'EnableUAC'), config,
+ default='true')
+ manifest_files = []
+ generated_manifest_outer = \
+"<?xml version='1.0' encoding='UTF-8' standalone='yes'?>" \
+"<assembly xmlns='urn:schemas-microsoft-com:asm.v1' manifestVersion='1.0'>%s" \
+"</assembly>"
+ if enable_uac == 'true':
+ execution_level = self._Setting(('VCLinkerTool', 'UACExecutionLevel'),
+ config, default='0')
+ execution_level_map = {
+ '0': 'asInvoker',
+ '1': 'highestAvailable',
+ '2': 'requireAdministrator'
+ }
+
+ ui_access = self._Setting(('VCLinkerTool', 'UACUIAccess'), config,
+ default='false')
+
+ inner = '''
+<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
+ <security>
+ <requestedPrivileges>
+ <requestedExecutionLevel level='%s' uiAccess='%s' />
+ </requestedPrivileges>
+ </security>
+</trustInfo>''' % (execution_level_map[execution_level], ui_access)
+ else:
+ inner = ''
+
+ generated_manifest_contents = generated_manifest_outer % inner
+ generated_name = name + '.generated.manifest'
+ # Need to join with the build_dir here as we're writing it during
+ # generation time, but we return the un-joined version because the build
+ # will occur in that directory. We only write the file if the contents
+ # have changed so that simply regenerating the project files doesn't
+ # cause a relink.
+ build_dir_generated_name = os.path.join(build_dir, generated_name)
+ gyp.common.EnsureDirExists(build_dir_generated_name)
+ f = gyp.common.WriteOnDiff(build_dir_generated_name)
+ f.write(generated_manifest_contents)
+ f.close()
+ manifest_files = [generated_name]
+
+ if allow_isolation:
+ flags.append('/ALLOWISOLATION')
+
+ manifest_files += self._GetAdditionalManifestFiles(config,
+ gyp_to_build_path)
+ return flags, output_name, manifest_files
+
+ def _GetAdditionalManifestFiles(self, config, gyp_to_build_path):
+ """Gets additional manifest files that are added to the default one
+ generated by the linker."""
+ files = self._Setting(('VCManifestTool', 'AdditionalManifestFiles'), config,
+ default=[])
+ if isinstance(files, str):
+ files = files.split(';')
+ return [os.path.normpath(
+ gyp_to_build_path(self.ConvertVSMacros(f, config=config)))
+ for f in files]
+
+ def IsUseLibraryDependencyInputs(self, config):
+ """Returns whether the target should be linked via Use Library Dependency
+ Inputs (using component .objs of a given .lib)."""
+ config = self._TargetConfig(config)
+ uldi = self._Setting(('VCLinkerTool', 'UseLibraryDependencyInputs'), config)
+ return uldi == 'true'
+
+ def IsEmbedManifest(self, config):
+ """Returns whether manifest should be linked into binary."""
+ config = self._TargetConfig(config)
+ embed = self._Setting(('VCManifestTool', 'EmbedManifest'), config,
+ default='true')
+ return embed == 'true'
+
+ def IsLinkIncremental(self, config):
+ """Returns whether the target should be linked incrementally."""
+ config = self._TargetConfig(config)
+ link_inc = self._Setting(('VCLinkerTool', 'LinkIncremental'), config)
+ return link_inc != '1'
+
+ def GetRcflags(self, config, gyp_to_ninja_path):
+ """Returns the flags that need to be added to invocations of the resource
+ compiler."""
+ config = self._TargetConfig(config)
+ rcflags = []
+ rc = self._GetWrapper(self, self.msvs_settings[config],
+ 'VCResourceCompilerTool', append=rcflags)
+ rc('AdditionalIncludeDirectories', map=gyp_to_ninja_path, prefix='/I')
+ rcflags.append('/I' + gyp_to_ninja_path('.'))
+ rc('PreprocessorDefinitions', prefix='/d')
+ # /l arg must be in hex without leading '0x'
+ rc('Culture', prefix='/l', map=lambda x: hex(int(x))[2:])
+ return rcflags
+
+ def BuildCygwinBashCommandLine(self, args, path_to_base):
+ """Build a command line that runs args via cygwin bash. We assume that all
+ incoming paths are in Windows normpath'd form, so they need to be
+ converted to posix style for the part of the command line that's passed to
+ bash. We also have to do some Visual Studio macro emulation here because
+ various rules use magic VS names for things. Also note that rules that
+ contain ninja variables cannot be fixed here (for example ${source}), so
+ the outer generator needs to make sure that the paths that are written out
+ are in posix style, if the command line will be used here."""
+ cygwin_dir = os.path.normpath(
+ os.path.join(path_to_base, self.msvs_cygwin_dirs[0]))
+ cd = ('cd %s' % path_to_base).replace('\\', '/')
+ args = [a.replace('\\', '/').replace('"', '\\"') for a in args]
+ args = ["'%s'" % a.replace("'", "'\\''") for a in args]
+ bash_cmd = ' '.join(args)
+ cmd = (
+ 'call "%s\\setup_env.bat" && set CYGWIN=nontsec && ' % cygwin_dir +
+ 'bash -c "%s ; %s"' % (cd, bash_cmd))
+ return cmd
+
+ def IsRuleRunUnderCygwin(self, rule):
+ """Determine if an action should be run under cygwin. If the variable is
+ unset, or set to 1 we use cygwin."""
+ return int(rule.get('msvs_cygwin_shell',
+ self.spec.get('msvs_cygwin_shell', 1))) != 0
+
+ def _HasExplicitRuleForExtension(self, spec, extension):
+ """Determine if there's an explicit rule for a particular extension."""
+ for rule in spec.get('rules', []):
+ if rule['extension'] == extension:
+ return True
+ return False
+
+ def _HasExplicitIdlActions(self, spec):
+ """Determine if an action should not run midl for .idl files."""
+ return any([action.get('explicit_idl_action', 0)
+ for action in spec.get('actions', [])])
+
+ def HasExplicitIdlRulesOrActions(self, spec):
+ """Determine if there's an explicit rule or action for idl files. When
+ there isn't we need to generate implicit rules to build MIDL .idl files."""
+ return (self._HasExplicitRuleForExtension(spec, 'idl') or
+ self._HasExplicitIdlActions(spec))
+
+ def HasExplicitAsmRules(self, spec):
+ """Determine if there's an explicit rule for asm files. When there isn't we
+ need to generate implicit rules to assemble .asm files."""
+ return self._HasExplicitRuleForExtension(spec, 'asm')
+
+ def GetIdlBuildData(self, source, config):
+ """Determine the implicit outputs for an idl file. Returns output
+ directory, outputs, and variables and flags that are required."""
+ config = self._TargetConfig(config)
+ midl_get = self._GetWrapper(self, self.msvs_settings[config], 'VCMIDLTool')
+ def midl(name, default=None):
+ return self.ConvertVSMacros(midl_get(name, default=default),
+ config=config)
+ tlb = midl('TypeLibraryName', default='${root}.tlb')
+ header = midl('HeaderFileName', default='${root}.h')
+ dlldata = midl('DLLDataFileName', default='dlldata.c')
+ iid = midl('InterfaceIdentifierFileName', default='${root}_i.c')
+ proxy = midl('ProxyFileName', default='${root}_p.c')
+ # Note that .tlb is not included in the outputs as it is not always
+ # generated depending on the content of the input idl file.
+ outdir = midl('OutputDirectory', default='')
+ output = [header, dlldata, iid, proxy]
+ variables = [('tlb', tlb),
+ ('h', header),
+ ('dlldata', dlldata),
+ ('iid', iid),
+ ('proxy', proxy)]
+ # TODO(scottmg): Are there configuration settings to set these flags?
+ target_platform = 'win32' if self.GetArch(config) == 'x86' else 'x64'
+ flags = ['/char', 'signed', '/env', target_platform, '/Oicf']
+ return outdir, output, variables, flags
+
+
+def _LanguageMatchesForPch(source_ext, pch_source_ext):
+ c_exts = ('.c',)
+ cc_exts = ('.cc', '.cxx', '.cpp')
+ return ((source_ext in c_exts and pch_source_ext in c_exts) or
+ (source_ext in cc_exts and pch_source_ext in cc_exts))
+
+
+class PrecompiledHeader(object):
+ """Helper to generate dependencies and build rules to handle generation of
+ precompiled headers. Interface matches the GCH handler in xcode_emulation.py.
+ """
+ def __init__(
+ self, settings, config, gyp_to_build_path, gyp_to_unique_output, obj_ext):
+ self.settings = settings
+ self.config = config
+ pch_source = self.settings.msvs_precompiled_source[self.config]
+ self.pch_source = gyp_to_build_path(pch_source)
+ filename, _ = os.path.splitext(pch_source)
+ self.output_obj = gyp_to_unique_output(filename + obj_ext).lower()
+
+ def _PchHeader(self):
+ """Get the header that will appear in an #include line for all source
+ files."""
+ return self.settings.msvs_precompiled_header[self.config]
+
+ def GetObjDependencies(self, sources, objs, arch):
+ """Given a list of sources files and the corresponding object files,
+ returns a list of the pch files that should be depended upon. The
+ additional wrapping in the return value is for interface compatibility
+ with make.py on Mac, and xcode_emulation.py."""
+ assert arch is None
+ if not self._PchHeader():
+ return []
+ pch_ext = os.path.splitext(self.pch_source)[1]
+ for source in sources:
+ if _LanguageMatchesForPch(os.path.splitext(source)[1], pch_ext):
+ return [(None, None, self.output_obj)]
+ return []
+
+ def GetPchBuildCommands(self, arch):
+ """Not used on Windows as there are no additional build steps required
+ (instead, existing steps are modified in GetFlagsModifications below)."""
+ return []
+
+ def GetFlagsModifications(self, input, output, implicit, command,
+ cflags_c, cflags_cc, expand_special):
+ """Get the modified cflags and implicit dependencies that should be used
+ for the pch compilation step."""
+ if input == self.pch_source:
+ pch_output = ['/Yc' + self._PchHeader()]
+ if command == 'cxx':
+ return ([('cflags_cc', map(expand_special, cflags_cc + pch_output))],
+ self.output_obj, [])
+ elif command == 'cc':
+ return ([('cflags_c', map(expand_special, cflags_c + pch_output))],
+ self.output_obj, [])
+ return [], output, implicit
+
+
+vs_version = None
+def GetVSVersion(generator_flags):
+ global vs_version
+ if not vs_version:
+ vs_version = gyp.MSVSVersion.SelectVisualStudioVersion(
+ generator_flags.get('msvs_version', 'auto'),
+ allow_fallback=False)
+ return vs_version
+
+def _GetVsvarsSetupArgs(generator_flags, arch):
+ vs = GetVSVersion(generator_flags)
+ return vs.SetupScript()
+
+def ExpandMacros(string, expansions):
+ """Expand $(Variable) per expansions dict. See MsvsSettings.GetVSMacroEnv
+ for the canonical way to retrieve a suitable dict."""
+ if '$' in string:
+ for old, new in expansions.items():
+ assert '$(' not in new, new
+ string = string.replace(old, new)
+ return string
+
+def _ExtractImportantEnvironment(output_of_set):
+ """Extracts environment variables required for the toolchain to run from
+ a textual dump output by the cmd.exe 'set' command."""
+ envvars_to_save = (
+ 'goma_.*', # TODO(scottmg): This is ugly, but needed for goma.
+ 'include',
+ 'lib',
+ 'libpath',
+ 'path',
+ 'pathext',
+ 'systemroot',
+ 'temp',
+ 'tmp',
+ )
+ env = {}
+ # This occasionally happens and leads to misleading SYSTEMROOT error messages
+ # if not caught here.
+ if output_of_set.count('=') == 0:
+ raise Exception('Invalid output_of_set. Value is:\n%s' % output_of_set)
+ for line in output_of_set.splitlines():
+ for envvar in envvars_to_save:
+ if re.match(envvar + '=', line.lower()):
+ var, setting = line.split('=', 1)
+ if envvar == 'path':
+ # Our own rules (for running gyp-win-tool) and other actions in
+ # Chromium rely on python being in the path. Add the path to this
+ # python here so that if it's not in the path when ninja is run
+ # later, python will still be found.
+ setting = os.path.dirname(sys.executable) + os.pathsep + setting
+ env[var.upper()] = setting
+ break
+ for required in ('SYSTEMROOT', 'TEMP', 'TMP'):
+ if required not in env:
+ raise Exception('Environment variable "%s" '
+ 'required to be set to valid path' % required)
+ return env
+
+def _FormatAsEnvironmentBlock(envvar_dict):
+ """Format as an 'environment block' directly suitable for CreateProcess.
+ Briefly this is a list of key=value\0, terminated by an additional \0. See
+ CreateProcess documentation for more details."""
+ block = ''
+ nul = '\0'
+ for key, value in envvar_dict.items():
+ block += key + '=' + value + nul
+ block += nul
+ return block
+
+def _ExtractCLPath(output_of_where):
+ """Gets the path to cl.exe based on the output of calling the environment
+ setup batch file, followed by the equivalent of `where`."""
+ # Take the first line, as that's the first found in the PATH.
+ for line in output_of_where.strip().splitlines():
+ if line.startswith('LOC:'):
+ return line[len('LOC:'):].strip()
+
+def GenerateEnvironmentFiles(toplevel_build_dir, generator_flags,
+ system_includes, open_out):
+ """It's not sufficient to have the absolute path to the compiler, linker,
+ etc. on Windows, as those tools rely on .dlls being in the PATH. We also
+ need to support both x86 and x64 compilers within the same build (to support
+ msvs_target_platform hackery). Different architectures require a different
+ compiler binary, and different supporting environment variables (INCLUDE,
+ LIB, LIBPATH). So, we extract the environment here, wrap all invocations
+ of compiler tools (cl, link, lib, rc, midl, etc.) via win_tool.py which
+ sets up the environment, and then we do not prefix the compiler with
+ an absolute path, instead preferring something like "cl.exe" in the rule
+ which will then run whichever the environment setup has put in the path.
+ When the following procedure to generate environment files does not
+ meet your requirement (e.g. for custom toolchains), you can pass
+ "-G ninja_use_custom_environment_files" to the gyp to suppress file
+ generation and use custom environment files prepared by yourself."""
+ archs = ('x86', 'x64')
+ if generator_flags.get('ninja_use_custom_environment_files', 0):
+ cl_paths = {}
+ for arch in archs:
+ cl_paths[arch] = 'cl.exe'
+ return cl_paths
+ vs = GetVSVersion(generator_flags)
+ cl_paths = {}
+ for arch in archs:
+ # Extract environment variables for subprocesses.
+ args = vs.SetupScript(arch)
+ args.extend(('&&', 'set'))
+ popen = subprocess.Popen(
+ args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+ universal_newlines=True)
+ variables, _ = popen.communicate()
+ if popen.returncode != 0:
+ raise Exception('"%s" failed with error %d' % (args, popen.returncode))
+ env = _ExtractImportantEnvironment(variables)
+
+ # Inject system includes from gyp files into INCLUDE.
+ if system_includes:
+ system_includes = system_includes | OrderedSet(
+ env.get('INCLUDE', '').split(';'))
+ env['INCLUDE'] = ';'.join(system_includes)
+
+ env_block = _FormatAsEnvironmentBlock(env)
+ f = open_out(os.path.join(toplevel_build_dir, 'environment.' + arch), 'w')
+ f.write(env_block)
+ f.close()
+
+ # Find cl.exe location for this architecture.
+ args = vs.SetupScript(arch)
+ args.extend(('&&',
+ 'for', '%i', 'in', '(cl.exe)', 'do', '@echo', 'LOC:%~$PATH:i'))
+ popen = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE,
+ universal_newlines=True)
+ output, _ = popen.communicate()
+ cl_paths[arch] = _ExtractCLPath(output)
+ return cl_paths
+
+def VerifyMissingSources(sources, build_dir, generator_flags, gyp_to_ninja):
+ """Emulate behavior of msvs_error_on_missing_sources present in the msvs
+ generator: Check that all regular source files, i.e. not created at run time,
+ exist on disk. Missing files cause needless recompilation when building via
+ VS, and we want this check to match for people/bots that build using ninja,
+ so they're not surprised when the VS build fails."""
+ if int(generator_flags.get('msvs_error_on_missing_sources', 0)):
+ no_specials = filter(lambda x: '$' not in x, sources)
+ relative = [os.path.join(build_dir, gyp_to_ninja(s)) for s in no_specials]
+ missing = [x for x in relative if not os.path.exists(x)]
+ if missing:
+ # They'll look like out\Release\..\..\stuff\things.cc, so normalize the
+ # path for a slightly less crazy looking output.
+ cleaned_up = [os.path.normpath(x) for x in missing]
+ raise Exception('Missing input files:\n%s' % '\n'.join(cleaned_up))
+
+# Sets some values in default_variables, which are required for many
+# generators, run on Windows.
+def CalculateCommonVariables(default_variables, params):
+ generator_flags = params.get('generator_flags', {})
+
+ # Set a variable so conditions can be based on msvs_version.
+ msvs_version = gyp.msvs_emulation.GetVSVersion(generator_flags)
+ default_variables['MSVS_VERSION'] = msvs_version.ShortName()
+
+ # To determine processor word size on Windows, in addition to checking
+ # PROCESSOR_ARCHITECTURE (which reflects the word size of the current
+ # process), it is also necessary to check PROCESSOR_ARCHITEW6432 (which
+ # contains the actual word size of the system when running thru WOW64).
+ if ('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
+ '64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')):
+ default_variables['MSVS_OS_BITS'] = 64
+ else:
+ default_variables['MSVS_OS_BITS'] = 32
diff --git a/third_party/python/gyp/pylib/gyp/ninja_syntax.py b/third_party/python/gyp/pylib/gyp/ninja_syntax.py
new file mode 100644
index 0000000000..95e894276e
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/ninja_syntax.py
@@ -0,0 +1,168 @@
+# This file comes from
+# https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py
+# Do not edit! Edit the upstream one instead.
+
+"""Python module for generating .ninja files.
+
+Note that this is emphatically not a required piece of Ninja; it's
+just a helpful utility for build-file-generation systems that already
+use Python.
+"""
+
+import textwrap
+import re
+
+def escape_path(word):
+ return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:')
+
+class Writer(object):
+ def __init__(self, output, width=78):
+ self.output = output
+ self.width = width
+
+ def newline(self):
+ self.output.write('\n')
+
+ def comment(self, text):
+ for line in textwrap.wrap(text, self.width - 2):
+ self.output.write('# ' + line + '\n')
+
+ def variable(self, key, value, indent=0):
+ if value is None:
+ return
+ if isinstance(value, list):
+ value = ' '.join(filter(None, value)) # Filter out empty strings.
+ self._line('%s = %s' % (key, value), indent)
+
+ def pool(self, name, depth):
+ self._line('pool %s' % name)
+ self.variable('depth', depth, indent=1)
+
+ def rule(self, name, command, description=None, depfile=None,
+ generator=False, pool=None, restat=False, rspfile=None,
+ rspfile_content=None, deps=None):
+ self._line('rule %s' % name)
+ self.variable('command', command, indent=1)
+ if description:
+ self.variable('description', description, indent=1)
+ if depfile:
+ self.variable('depfile', depfile, indent=1)
+ if generator:
+ self.variable('generator', '1', indent=1)
+ if pool:
+ self.variable('pool', pool, indent=1)
+ if restat:
+ self.variable('restat', '1', indent=1)
+ if rspfile:
+ self.variable('rspfile', rspfile, indent=1)
+ if rspfile_content:
+ self.variable('rspfile_content', rspfile_content, indent=1)
+ if deps:
+ self.variable('deps', deps, indent=1)
+
+ def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
+ variables=None):
+ outputs = self._as_list(outputs)
+ all_inputs = self._as_list(inputs)[:]
+ out_outputs = list(map(escape_path, outputs))
+ all_inputs = list(map(escape_path, all_inputs))
+
+ if implicit:
+ implicit = map(escape_path, self._as_list(implicit))
+ all_inputs.append('|')
+ all_inputs.extend(implicit)
+ if order_only:
+ order_only = map(escape_path, self._as_list(order_only))
+ all_inputs.append('||')
+ all_inputs.extend(order_only)
+
+ self._line('build %s: %s' % (' '.join(out_outputs),
+ ' '.join([rule] + all_inputs)))
+
+ if variables:
+ if isinstance(variables, dict):
+ iterator = iter(variables.items())
+ else:
+ iterator = iter(variables)
+
+ for key, val in iterator:
+ self.variable(key, val, indent=1)
+
+ return outputs
+
+ def include(self, path):
+ self._line('include %s' % path)
+
+ def subninja(self, path):
+ self._line('subninja %s' % path)
+
+ def default(self, paths):
+ self._line('default %s' % ' '.join(self._as_list(paths)))
+
+ def _count_dollars_before_index(self, s, i):
+ """Returns the number of '$' characters right in front of s[i]."""
+ dollar_count = 0
+ dollar_index = i - 1
+ while dollar_index > 0 and s[dollar_index] == '$':
+ dollar_count += 1
+ dollar_index -= 1
+ return dollar_count
+
+ def _line(self, text, indent=0):
+ """Write 'text' word-wrapped at self.width characters."""
+ leading_space = ' ' * indent
+ while len(leading_space) + len(text) > self.width:
+ # The text is too wide; wrap if possible.
+
+ # Find the rightmost space that would obey our width constraint and
+ # that's not an escaped space.
+ available_space = self.width - len(leading_space) - len(' $')
+ space = available_space
+ while True:
+ space = text.rfind(' ', 0, space)
+ if space < 0 or \
+ self._count_dollars_before_index(text, space) % 2 == 0:
+ break
+
+ if space < 0:
+ # No such space; just use the first unescaped space we can find.
+ space = available_space - 1
+ while True:
+ space = text.find(' ', space + 1)
+ if space < 0 or \
+ self._count_dollars_before_index(text, space) % 2 == 0:
+ break
+ if space < 0:
+ # Give up on breaking.
+ break
+
+ self.output.write(leading_space + text[0:space] + ' $\n')
+ text = text[space+1:]
+
+ # Subsequent lines are continuations, so indent them.
+ leading_space = ' ' * (indent+2)
+
+ self.output.write(leading_space + text + '\n')
+
+ def _as_list(self, input):
+ if input is None:
+ return []
+ if isinstance(input, list):
+ return input
+
+ # map is not a class in Python 2
+ try:
+ if isinstance(input, map):
+ return list(input)
+ except TypeError:
+ pass
+
+ return [input]
+
+
+def escape(string):
+ """Escape a string such that it can be embedded into a Ninja file without
+ further interpretation."""
+ assert '\n' not in string, 'Ninja syntax does not allow newlines'
+ # We only have one special metacharacter: '$'.
+ return string.replace('$', '$$')
diff --git a/third_party/python/gyp/pylib/gyp/simple_copy.py b/third_party/python/gyp/pylib/gyp/simple_copy.py
new file mode 100644
index 0000000000..58a61c3423
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/simple_copy.py
@@ -0,0 +1,57 @@
+# Copyright 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""A clone of the default copy.deepcopy that doesn't handle cyclic
+structures or complex types except for dicts and lists. This is
+because gyp copies so large structure that small copy overhead ends up
+taking seconds in a project the size of Chromium."""
+
+class Error(Exception):
+ pass
+
+__all__ = ["Error", "deepcopy"]
+
+def deepcopy(x):
+ """Deep copy operation on gyp objects such as strings, ints, dicts
+ and lists. More than twice as fast as copy.deepcopy but much less
+ generic."""
+
+ try:
+ return _deepcopy_dispatch[type(x)](x)
+ except KeyError:
+ raise Error('Unsupported type %s for deepcopy. Use copy.deepcopy ' +
+ 'or expand simple_copy support.' % type(x))
+
+_deepcopy_dispatch = d = {}
+
+def _deepcopy_atomic(x):
+ return x
+
+try:
+ _string_types = (str, unicode)
+# There's no unicode in python3
+except NameError:
+ _string_types = (str, )
+
+try:
+ _integer_types = (int, long)
+# There's no long in python3
+except NameError:
+ _integer_types = (int, )
+
+for x in (type(None), float, bool, type) + _integer_types + _string_types:
+ d[x] = _deepcopy_atomic
+
+def _deepcopy_list(x):
+ return [deepcopy(a) for a in x]
+d[list] = _deepcopy_list
+
+def _deepcopy_dict(x):
+ y = {}
+ for key, value in x.items():
+ y[deepcopy(key)] = deepcopy(value)
+ return y
+d[dict] = _deepcopy_dict
+
+del d
diff --git a/third_party/python/gyp/pylib/gyp/win_tool.py b/third_party/python/gyp/pylib/gyp/win_tool.py
new file mode 100755
index 0000000000..e7c0dd81f7
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/win_tool.py
@@ -0,0 +1,331 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Utility functions for Windows builds.
+
+These functions are executed via gyp-win-tool when using the ninja generator.
+"""
+
+from __future__ import print_function
+
+import os
+import re
+import shutil
+import subprocess
+import stat
+import string
+import sys
+
+BASE_DIR = os.path.dirname(os.path.abspath(__file__))
+
+# A regex matching an argument corresponding to the output filename passed to
+# link.exe.
+_LINK_EXE_OUT_ARG = re.compile('/OUT:(?P<out>.+)$', re.IGNORECASE)
+
+def main(args):
+ executor = WinTool()
+ exit_code = executor.Dispatch(args)
+ if exit_code is not None:
+ sys.exit(exit_code)
+
+
+class WinTool(object):
+ """This class performs all the Windows tooling steps. The methods can either
+ be executed directly, or dispatched from an argument list."""
+
+ def _UseSeparateMspdbsrv(self, env, args):
+ """Allows to use a unique instance of mspdbsrv.exe per linker instead of a
+ shared one."""
+ if len(args) < 1:
+ raise Exception("Not enough arguments")
+
+ if args[0] != 'link.exe':
+ return
+
+ # Use the output filename passed to the linker to generate an endpoint name
+ # for mspdbsrv.exe.
+ endpoint_name = None
+ for arg in args:
+ m = _LINK_EXE_OUT_ARG.match(arg)
+ if m:
+ endpoint_name = re.sub(r'\W+', '',
+ '%s_%d' % (m.group('out'), os.getpid()))
+ break
+
+ if endpoint_name is None:
+ return
+
+ # Adds the appropriate environment variable. This will be read by link.exe
+ # to know which instance of mspdbsrv.exe it should connect to (if it's
+ # not set then the default endpoint is used).
+ env['_MSPDBSRV_ENDPOINT_'] = endpoint_name
+
+ def Dispatch(self, args):
+ """Dispatches a string command to a method."""
+ if len(args) < 1:
+ raise Exception("Not enough arguments")
+
+ method = "Exec%s" % self._CommandifyName(args[0])
+ return getattr(self, method)(*args[1:])
+
+ def _CommandifyName(self, name_string):
+ """Transforms a tool name like recursive-mirror to RecursiveMirror."""
+ return name_string.title().replace('-', '')
+
+ def _GetEnv(self, arch):
+ """Gets the saved environment from a file for a given architecture."""
+ # The environment is saved as an "environment block" (see CreateProcess
+ # and msvs_emulation for details). We convert to a dict here.
+ # Drop last 2 NULs, one for list terminator, one for trailing vs. separator.
+ pairs = open(arch).read()[:-2].split('\0')
+ kvs = [item.split('=', 1) for item in pairs]
+ return dict(kvs)
+
+ def ExecStamp(self, path):
+ """Simple stamp command."""
+ open(path, 'w').close()
+
+ def ExecRecursiveMirror(self, source, dest):
+ """Emulation of rm -rf out && cp -af in out."""
+ if os.path.exists(dest):
+ if os.path.isdir(dest):
+ def _on_error(fn, path, excinfo):
+ # The operation failed, possibly because the file is set to
+ # read-only. If that's why, make it writable and try the op again.
+ if not os.access(path, os.W_OK):
+ os.chmod(path, stat.S_IWRITE)
+ fn(path)
+ shutil.rmtree(dest, onerror=_on_error)
+ else:
+ if not os.access(dest, os.W_OK):
+ # Attempt to make the file writable before deleting it.
+ os.chmod(dest, stat.S_IWRITE)
+ os.unlink(dest)
+
+ if os.path.isdir(source):
+ shutil.copytree(source, dest)
+ else:
+ shutil.copy2(source, dest)
+
+ def ExecLinkWrapper(self, arch, use_separate_mspdbsrv, *args):
+ """Filter diagnostic output from link that looks like:
+ ' Creating library ui.dll.lib and object ui.dll.exp'
+ This happens when there are exports from the dll or exe.
+ """
+ env = self._GetEnv(arch)
+ if use_separate_mspdbsrv == 'True':
+ self._UseSeparateMspdbsrv(env, args)
+ if sys.platform == 'win32':
+ args = list(args) # *args is a tuple by default, which is read-only.
+ args[0] = args[0].replace('/', '\\')
+ # https://docs.python.org/2/library/subprocess.html:
+ # "On Unix with shell=True [...] if args is a sequence, the first item
+ # specifies the command string, and any additional items will be treated as
+ # additional arguments to the shell itself. That is to say, Popen does the
+ # equivalent of:
+ # Popen(['/bin/sh', '-c', args[0], args[1], ...])"
+ # For that reason, since going through the shell doesn't seem necessary on
+ # non-Windows don't do that there.
+ link = subprocess.Popen(args, shell=sys.platform == 'win32', env=env,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+ universal_newlines=True)
+ out, _ = link.communicate()
+ for line in out.splitlines():
+ if (not line.startswith(' Creating library ') and
+ not line.startswith('Generating code') and
+ not line.startswith('Finished generating code')):
+ print(line)
+ return link.returncode
+
+ def ExecLinkWithManifests(self, arch, embed_manifest, out, ldcmd, resname,
+ mt, rc, intermediate_manifest, *manifests):
+ """A wrapper for handling creating a manifest resource and then executing
+ a link command."""
+ # The 'normal' way to do manifests is to have link generate a manifest
+ # based on gathering dependencies from the object files, then merge that
+ # manifest with other manifests supplied as sources, convert the merged
+ # manifest to a resource, and then *relink*, including the compiled
+ # version of the manifest resource. This breaks incremental linking, and
+ # is generally overly complicated. Instead, we merge all the manifests
+ # provided (along with one that includes what would normally be in the
+ # linker-generated one, see msvs_emulation.py), and include that into the
+ # first and only link. We still tell link to generate a manifest, but we
+ # only use that to assert that our simpler process did not miss anything.
+ variables = {
+ 'python': sys.executable,
+ 'arch': arch,
+ 'out': out,
+ 'ldcmd': ldcmd,
+ 'resname': resname,
+ 'mt': mt,
+ 'rc': rc,
+ 'intermediate_manifest': intermediate_manifest,
+ 'manifests': ' '.join(manifests),
+ }
+ add_to_ld = ''
+ if manifests:
+ subprocess.check_call(
+ '%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
+ '-manifest %(manifests)s -out:%(out)s.manifest' % variables)
+ if embed_manifest == 'True':
+ subprocess.check_call(
+ '%(python)s gyp-win-tool manifest-to-rc %(arch)s %(out)s.manifest'
+ ' %(out)s.manifest.rc %(resname)s' % variables)
+ subprocess.check_call(
+ '%(python)s gyp-win-tool rc-wrapper %(arch)s %(rc)s '
+ '%(out)s.manifest.rc' % variables)
+ add_to_ld = ' %(out)s.manifest.res' % variables
+ subprocess.check_call(ldcmd + add_to_ld)
+
+ # Run mt.exe on the theoretically complete manifest we generated, merging
+ # it with the one the linker generated to confirm that the linker
+ # generated one does not add anything. This is strictly unnecessary for
+ # correctness, it's only to verify that e.g. /MANIFESTDEPENDENCY was not
+ # used in a #pragma comment.
+ if manifests:
+ # Merge the intermediate one with ours to .assert.manifest, then check
+ # that .assert.manifest is identical to ours.
+ subprocess.check_call(
+ '%(python)s gyp-win-tool manifest-wrapper %(arch)s %(mt)s -nologo '
+ '-manifest %(out)s.manifest %(intermediate_manifest)s '
+ '-out:%(out)s.assert.manifest' % variables)
+ assert_manifest = '%(out)s.assert.manifest' % variables
+ our_manifest = '%(out)s.manifest' % variables
+ # Load and normalize the manifests. mt.exe sometimes removes whitespace,
+ # and sometimes doesn't unfortunately.
+ with open(our_manifest, 'r') as our_f:
+ with open(assert_manifest, 'r') as assert_f:
+ our_data = re.sub(r'\s+', '', our_f.read())
+ assert_data = re.sub(r'\s+', '', assert_f.read())
+ if our_data != assert_data:
+ os.unlink(out)
+ def dump(filename):
+ print(filename, file=sys.stderr)
+ print('-----', file=sys.stderr)
+ with open(filename, 'r') as f:
+ print(f.read(), file=sys.stderr)
+ print('-----', file=sys.stderr)
+ dump(intermediate_manifest)
+ dump(our_manifest)
+ dump(assert_manifest)
+ sys.stderr.write(
+ 'Linker generated manifest "%s" added to final manifest "%s" '
+ '(result in "%s"). '
+ 'Were /MANIFEST switches used in #pragma statements? ' % (
+ intermediate_manifest, our_manifest, assert_manifest))
+ return 1
+
+ def ExecManifestWrapper(self, arch, *args):
+ """Run manifest tool with environment set. Strip out undesirable warning
+ (some XML blocks are recognized by the OS loader, but not the manifest
+ tool)."""
+ env = self._GetEnv(arch)
+ popen = subprocess.Popen(args, shell=True, env=env,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+ universal_newlines=True)
+ out, _ = popen.communicate()
+ for line in out.splitlines():
+ if line and 'manifest authoring warning 81010002' not in line:
+ print(line)
+ return popen.returncode
+
+ def ExecManifestToRc(self, arch, *args):
+ """Creates a resource file pointing a SxS assembly manifest.
+ |args| is tuple containing path to resource file, path to manifest file
+ and resource name which can be "1" (for executables) or "2" (for DLLs)."""
+ manifest_path, resource_path, resource_name = args
+ with open(resource_path, 'w') as output:
+ output.write('#include <windows.h>\n%s RT_MANIFEST "%s"' % (
+ resource_name,
+ os.path.abspath(manifest_path).replace('\\', '/')))
+
+ def ExecMidlWrapper(self, arch, outdir, tlb, h, dlldata, iid, proxy, idl,
+ *flags):
+ """Filter noisy filenames output from MIDL compile step that isn't
+ quietable via command line flags.
+ """
+ args = ['midl', '/nologo'] + list(flags) + [
+ '/out', outdir,
+ '/tlb', tlb,
+ '/h', h,
+ '/dlldata', dlldata,
+ '/iid', iid,
+ '/proxy', proxy,
+ idl]
+ env = self._GetEnv(arch)
+ popen = subprocess.Popen(args, shell=True, env=env,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+ universal_newlines=True)
+ out, _ = popen.communicate()
+ # Filter junk out of stdout, and write filtered versions. Output we want
+ # to filter is pairs of lines that look like this:
+ # Processing C:\Program Files (x86)\Microsoft SDKs\...\include\objidl.idl
+ # objidl.idl
+ lines = out.splitlines()
+ prefixes = ('Processing ', '64 bit Processing ')
+ processing = set(os.path.basename(x)
+ for x in lines if x.startswith(prefixes))
+ for line in lines:
+ if not line.startswith(prefixes) and line not in processing:
+ print(line)
+ return popen.returncode
+
+ def ExecAsmWrapper(self, arch, *args):
+ """Filter logo banner from invocations of asm.exe."""
+ env = self._GetEnv(arch)
+ popen = subprocess.Popen(args, shell=True, env=env,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+ universal_newlines=True)
+ out, _ = popen.communicate()
+ for line in out.splitlines():
+ if (not line.startswith('Copyright (C) Microsoft Corporation') and
+ not line.startswith('Microsoft (R) Macro Assembler') and
+ not line.startswith(' Assembling: ') and
+ line):
+ print(line)
+ return popen.returncode
+
+ def ExecRcWrapper(self, arch, *args):
+ """Filter logo banner from invocations of rc.exe. Older versions of RC
+ don't support the /nologo flag."""
+ env = self._GetEnv(arch)
+ popen = subprocess.Popen(args, shell=True, env=env,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
+ universal_newlines=True)
+ out, _ = popen.communicate()
+ for line in out.splitlines():
+ if (not line.startswith('Microsoft (R) Windows (R) Resource Compiler') and
+ not line.startswith('Copyright (C) Microsoft Corporation') and
+ line):
+ print(line)
+ return popen.returncode
+
+ def ExecActionWrapper(self, arch, rspfile, *dir):
+ """Runs an action command line from a response file using the environment
+ for |arch|. If |dir| is supplied, use that as the working directory."""
+ env = self._GetEnv(arch)
+ # TODO(scottmg): This is a temporary hack to get some specific variables
+ # through to actions that are set after gyp-time. http://crbug.com/333738.
+ for k, v in os.environ.items():
+ if k not in env:
+ env[k] = v
+ args = open(rspfile).read()
+ dir = dir[0] if dir else None
+ return subprocess.call(args, shell=True, env=env, cwd=dir)
+
+ def ExecClCompile(self, project_dir, selected_files):
+ """Executed by msvs-ninja projects when the 'ClCompile' target is used to
+ build selected C/C++ files."""
+ project_dir = os.path.relpath(project_dir, BASE_DIR)
+ selected_files = selected_files.split(';')
+ ninja_targets = [os.path.join(project_dir, filename) + '^^'
+ for filename in selected_files]
+ cmd = ['ninja.exe']
+ cmd.extend(ninja_targets)
+ return subprocess.call(cmd, shell=True, cwd=BASE_DIR)
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv[1:]))
diff --git a/third_party/python/gyp/pylib/gyp/xcode_emulation.py b/third_party/python/gyp/pylib/gyp/xcode_emulation.py
new file mode 100644
index 0000000000..ca76187b2f
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/xcode_emulation.py
@@ -0,0 +1,1800 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+This module contains classes that help to emulate xcodebuild behavior on top of
+other build systems, such as make and ninja.
+"""
+
+from __future__ import print_function
+
+import copy
+import gyp.common
+import os
+import os.path
+import re
+import shlex
+import subprocess
+import sys
+import tempfile
+from gyp.common import GypError
+
+# Populated lazily by XcodeVersion, for efficiency, and to fix an issue when
+# "xcodebuild" is called too quickly (it has been found to return incorrect
+# version number).
+XCODE_VERSION_CACHE = None
+
+# Populated lazily by GetXcodeArchsDefault, to an |XcodeArchsDefault| instance
+# corresponding to the installed version of Xcode.
+XCODE_ARCHS_DEFAULT_CACHE = None
+
+
+def XcodeArchsVariableMapping(archs, archs_including_64_bit=None):
+ """Constructs a dictionary with expansion for $(ARCHS_STANDARD) variable,
+ and optionally for $(ARCHS_STANDARD_INCLUDING_64_BIT)."""
+ mapping = {'$(ARCHS_STANDARD)': archs}
+ if archs_including_64_bit:
+ mapping['$(ARCHS_STANDARD_INCLUDING_64_BIT)'] = archs_including_64_bit
+ return mapping
+
+class XcodeArchsDefault(object):
+ """A class to resolve ARCHS variable from xcode_settings, resolving Xcode
+ macros and implementing filtering by VALID_ARCHS. The expansion of macros
+ depends on the SDKROOT used ("macosx", "iphoneos", "iphonesimulator") and
+ on the version of Xcode.
+ """
+
+ # Match variable like $(ARCHS_STANDARD).
+ variable_pattern = re.compile(r'\$\([a-zA-Z_][a-zA-Z0-9_]*\)$')
+
+ def __init__(self, default, mac, iphonesimulator, iphoneos):
+ self._default = (default,)
+ self._archs = {'mac': mac, 'ios': iphoneos, 'iossim': iphonesimulator}
+
+ def _VariableMapping(self, sdkroot):
+ """Returns the dictionary of variable mapping depending on the SDKROOT."""
+ sdkroot = sdkroot.lower()
+ if 'iphoneos' in sdkroot:
+ return self._archs['ios']
+ elif 'iphonesimulator' in sdkroot:
+ return self._archs['iossim']
+ else:
+ return self._archs['mac']
+
+ def _ExpandArchs(self, archs, sdkroot):
+ """Expands variables references in ARCHS, and remove duplicates."""
+ variable_mapping = self._VariableMapping(sdkroot)
+ expanded_archs = []
+ for arch in archs:
+ if self.variable_pattern.match(arch):
+ variable = arch
+ try:
+ variable_expansion = variable_mapping[variable]
+ for arch in variable_expansion:
+ if arch not in expanded_archs:
+ expanded_archs.append(arch)
+ except KeyError as e:
+ print('Warning: Ignoring unsupported variable "%s".' % variable)
+ elif arch not in expanded_archs:
+ expanded_archs.append(arch)
+ return expanded_archs
+
+ def ActiveArchs(self, archs, valid_archs, sdkroot):
+ """Expands variables references in ARCHS, and filter by VALID_ARCHS if it
+ is defined (if not set, Xcode accept any value in ARCHS, otherwise, only
+ values present in VALID_ARCHS are kept)."""
+ expanded_archs = self._ExpandArchs(archs or self._default, sdkroot or '')
+ if valid_archs:
+ filtered_archs = []
+ for arch in expanded_archs:
+ if arch in valid_archs:
+ filtered_archs.append(arch)
+ expanded_archs = filtered_archs
+ return expanded_archs
+
+
+def GetXcodeArchsDefault():
+ """Returns the |XcodeArchsDefault| object to use to expand ARCHS for the
+ installed version of Xcode. The default values used by Xcode for ARCHS
+ and the expansion of the variables depends on the version of Xcode used.
+
+ For all version anterior to Xcode 5.0 or posterior to Xcode 5.1 included
+ uses $(ARCHS_STANDARD) if ARCHS is unset, while Xcode 5.0 to 5.0.2 uses
+ $(ARCHS_STANDARD_INCLUDING_64_BIT). This variable was added to Xcode 5.0
+ and deprecated with Xcode 5.1.
+
+ For "macosx" SDKROOT, all version starting with Xcode 5.0 includes 64-bit
+ architecture as part of $(ARCHS_STANDARD) and default to only building it.
+
+ For "iphoneos" and "iphonesimulator" SDKROOT, 64-bit architectures are part
+ of $(ARCHS_STANDARD_INCLUDING_64_BIT) from Xcode 5.0. From Xcode 5.1, they
+ are also part of $(ARCHS_STANDARD).
+
+ All thoses rules are coded in the construction of the |XcodeArchsDefault|
+ object to use depending on the version of Xcode detected. The object is
+ for performance reason."""
+ global XCODE_ARCHS_DEFAULT_CACHE
+ if XCODE_ARCHS_DEFAULT_CACHE:
+ return XCODE_ARCHS_DEFAULT_CACHE
+ xcode_version, _ = XcodeVersion()
+ if xcode_version < '0500':
+ XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
+ '$(ARCHS_STANDARD)',
+ XcodeArchsVariableMapping(['i386']),
+ XcodeArchsVariableMapping(['i386']),
+ XcodeArchsVariableMapping(['armv7']))
+ elif xcode_version < '0510':
+ XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
+ '$(ARCHS_STANDARD_INCLUDING_64_BIT)',
+ XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
+ XcodeArchsVariableMapping(['i386'], ['i386', 'x86_64']),
+ XcodeArchsVariableMapping(
+ ['armv7', 'armv7s'],
+ ['armv7', 'armv7s', 'arm64']))
+ else:
+ XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
+ '$(ARCHS_STANDARD)',
+ XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
+ XcodeArchsVariableMapping(['i386', 'x86_64'], ['i386', 'x86_64']),
+ XcodeArchsVariableMapping(
+ ['armv7', 'armv7s', 'arm64'],
+ ['armv7', 'armv7s', 'arm64']))
+ return XCODE_ARCHS_DEFAULT_CACHE
+
+
+class XcodeSettings(object):
+ """A class that understands the gyp 'xcode_settings' object."""
+
+ # Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
+ # at class-level for efficiency.
+ _sdk_path_cache = {}
+ _platform_path_cache = {}
+ _sdk_root_cache = {}
+
+ # Populated lazily by GetExtraPlistItems(). Shared by all XcodeSettings, so
+ # cached at class-level for efficiency.
+ _plist_cache = {}
+
+ # Populated lazily by GetIOSPostbuilds. Shared by all XcodeSettings, so
+ # cached at class-level for efficiency.
+ _codesigning_key_cache = {}
+
+ def __init__(self, spec):
+ self.spec = spec
+
+ self.isIOS = False
+ self.mac_toolchain_dir = None
+ self.header_map_path = None
+
+ # Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
+ # This means self.xcode_settings[config] always contains all settings
+ # for that config -- the per-target settings as well. Settings that are
+ # the same for all configs are implicitly per-target settings.
+ self.xcode_settings = {}
+ configs = spec['configurations']
+ for configname, config in configs.items():
+ self.xcode_settings[configname] = config.get('xcode_settings', {})
+ self._ConvertConditionalKeys(configname)
+ if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET',
+ None):
+ self.isIOS = True
+
+ # This is only non-None temporarily during the execution of some methods.
+ self.configname = None
+
+ # Used by _AdjustLibrary to match .a and .dylib entries in libraries.
+ self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
+
+ def _ConvertConditionalKeys(self, configname):
+ """Converts or warns on conditional keys. Xcode supports conditional keys,
+ such as CODE_SIGN_IDENTITY[sdk=iphoneos*]. This is a partial implementation
+ with some keys converted while the rest force a warning."""
+ settings = self.xcode_settings[configname]
+ conditional_keys = [key for key in settings if key.endswith(']')]
+ for key in conditional_keys:
+ # If you need more, speak up at http://crbug.com/122592
+ if key.endswith("[sdk=iphoneos*]"):
+ if configname.endswith("iphoneos"):
+ new_key = key.split("[")[0]
+ settings[new_key] = settings[key]
+ else:
+ print('Warning: Conditional keys not implemented, ignoring:', \
+ ' '.join(conditional_keys))
+ del settings[key]
+
+ def _Settings(self):
+ assert self.configname
+ return self.xcode_settings[self.configname]
+
+ def _Test(self, test_key, cond_key, default):
+ return self._Settings().get(test_key, default) == cond_key
+
+ def _Appendf(self, lst, test_key, format_str, default=None):
+ if test_key in self._Settings():
+ lst.append(format_str % str(self._Settings()[test_key]))
+ elif default:
+ lst.append(format_str % str(default))
+
+ def _WarnUnimplemented(self, test_key):
+ if test_key in self._Settings():
+ print('Warning: Ignoring not yet implemented key "%s".' % test_key)
+
+ def IsBinaryOutputFormat(self, configname):
+ default = "binary" if self.isIOS else "xml"
+ format = self.xcode_settings[configname].get('INFOPLIST_OUTPUT_FORMAT',
+ default)
+ return format == "binary"
+
+ def IsIosFramework(self):
+ return self.spec['type'] == 'shared_library' and self._IsBundle() and \
+ self.isIOS
+
+ def _IsBundle(self):
+ return int(self.spec.get('mac_bundle', 0)) != 0 or self._IsXCTest() or \
+ self._IsXCUiTest()
+
+ def _IsXCTest(self):
+ return int(self.spec.get('mac_xctest_bundle', 0)) != 0
+
+ def _IsXCUiTest(self):
+ return int(self.spec.get('mac_xcuitest_bundle', 0)) != 0
+
+ def _IsIosAppExtension(self):
+ return int(self.spec.get('ios_app_extension', 0)) != 0
+
+ def _IsIosWatchKitExtension(self):
+ return int(self.spec.get('ios_watchkit_extension', 0)) != 0
+
+ def _IsIosWatchApp(self):
+ return int(self.spec.get('ios_watch_app', 0)) != 0
+
+ def GetFrameworkVersion(self):
+ """Returns the framework version of the current target. Only valid for
+ bundles."""
+ assert self._IsBundle()
+ return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
+
+ def GetWrapperExtension(self):
+ """Returns the bundle extension (.app, .framework, .plugin, etc). Only
+ valid for bundles."""
+ assert self._IsBundle()
+ if self.spec['type'] in ('loadable_module', 'shared_library'):
+ default_wrapper_extension = {
+ 'loadable_module': 'bundle',
+ 'shared_library': 'framework',
+ }[self.spec['type']]
+ wrapper_extension = self.GetPerTargetSetting(
+ 'WRAPPER_EXTENSION', default=default_wrapper_extension)
+ return '.' + self.spec.get('product_extension', wrapper_extension)
+ elif self.spec['type'] == 'executable':
+ if self._IsIosAppExtension() or self._IsIosWatchKitExtension():
+ return '.' + self.spec.get('product_extension', 'appex')
+ else:
+ return '.' + self.spec.get('product_extension', 'app')
+ else:
+ assert False, "Don't know extension for '%s', target '%s'" % (
+ self.spec['type'], self.spec['target_name'])
+
+ def GetProductName(self):
+ """Returns PRODUCT_NAME."""
+ return self.spec.get('product_name', self.spec['target_name'])
+
+ def GetFullProductName(self):
+ """Returns FULL_PRODUCT_NAME."""
+ if self._IsBundle():
+ return self.GetWrapperName()
+ else:
+ return self._GetStandaloneBinaryPath()
+
+ def GetWrapperName(self):
+ """Returns the directory name of the bundle represented by this target.
+ Only valid for bundles."""
+ assert self._IsBundle()
+ return self.GetProductName() + self.GetWrapperExtension()
+
+ def GetBundleContentsFolderPath(self):
+ """Returns the qualified path to the bundle's contents folder. E.g.
+ Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
+ if self.isIOS:
+ return self.GetWrapperName()
+ assert self._IsBundle()
+ if self.spec['type'] == 'shared_library':
+ return os.path.join(
+ self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
+ else:
+ # loadable_modules have a 'Contents' folder like executables.
+ return os.path.join(self.GetWrapperName(), 'Contents')
+
+ def GetBundleResourceFolder(self):
+ """Returns the qualified path to the bundle's resource folder. E.g.
+ Chromium.app/Contents/Resources. Only valid for bundles."""
+ assert self._IsBundle()
+ if self.isIOS:
+ return self.GetBundleContentsFolderPath()
+ return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
+
+ def GetBundleExecutableFolderPath(self):
+ """Returns the qualified path to the bundle's executables folder. E.g.
+ Chromium.app/Contents/MacOS. Only valid for bundles."""
+ assert self._IsBundle()
+ if self.spec['type'] in ('shared_library') or self.isIOS:
+ return self.GetBundleContentsFolderPath()
+ elif self.spec['type'] in ('executable', 'loadable_module'):
+ return os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
+
+ def GetBundleJavaFolderPath(self):
+ """Returns the qualified path to the bundle's Java resource folder.
+ E.g. Chromium.app/Contents/Resources/Java. Only valid for bundles."""
+ assert self._IsBundle()
+ return os.path.join(self.GetBundleResourceFolder(), 'Java')
+
+ def GetBundleFrameworksFolderPath(self):
+ """Returns the qualified path to the bundle's frameworks folder. E.g,
+ Chromium.app/Contents/Frameworks. Only valid for bundles."""
+ assert self._IsBundle()
+ return os.path.join(self.GetBundleContentsFolderPath(), 'Frameworks')
+
+ def GetBundleSharedFrameworksFolderPath(self):
+ """Returns the qualified path to the bundle's frameworks folder. E.g,
+ Chromium.app/Contents/SharedFrameworks. Only valid for bundles."""
+ assert self._IsBundle()
+ return os.path.join(self.GetBundleContentsFolderPath(),
+ 'SharedFrameworks')
+
+ def GetBundleSharedSupportFolderPath(self):
+ """Returns the qualified path to the bundle's shared support folder. E.g,
+ Chromium.app/Contents/SharedSupport. Only valid for bundles."""
+ assert self._IsBundle()
+ if self.spec['type'] == 'shared_library':
+ return self.GetBundleResourceFolder()
+ else:
+ return os.path.join(self.GetBundleContentsFolderPath(),
+ 'SharedSupport')
+
+ def GetBundlePlugInsFolderPath(self):
+ """Returns the qualified path to the bundle's plugins folder. E.g,
+ Chromium.app/Contents/PlugIns. Only valid for bundles."""
+ assert self._IsBundle()
+ return os.path.join(self.GetBundleContentsFolderPath(), 'PlugIns')
+
+ def GetBundleXPCServicesFolderPath(self):
+ """Returns the qualified path to the bundle's XPC services folder. E.g,
+ Chromium.app/Contents/XPCServices. Only valid for bundles."""
+ assert self._IsBundle()
+ return os.path.join(self.GetBundleContentsFolderPath(), 'XPCServices')
+
+ def GetBundlePlistPath(self):
+ """Returns the qualified path to the bundle's plist file. E.g.
+ Chromium.app/Contents/Info.plist. Only valid for bundles."""
+ assert self._IsBundle()
+ if self.spec['type'] in ('executable', 'loadable_module') or \
+ self.IsIosFramework():
+ return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
+ else:
+ return os.path.join(self.GetBundleContentsFolderPath(),
+ 'Resources', 'Info.plist')
+
+ def GetProductType(self):
+ """Returns the PRODUCT_TYPE of this target."""
+ if self._IsIosAppExtension():
+ assert self._IsBundle(), ('ios_app_extension flag requires mac_bundle '
+ '(target %s)' % self.spec['target_name'])
+ return 'com.apple.product-type.app-extension'
+ if self._IsIosWatchKitExtension():
+ assert self._IsBundle(), ('ios_watchkit_extension flag requires '
+ 'mac_bundle (target %s)' % self.spec['target_name'])
+ return 'com.apple.product-type.watchkit-extension'
+ if self._IsIosWatchApp():
+ assert self._IsBundle(), ('ios_watch_app flag requires mac_bundle '
+ '(target %s)' % self.spec['target_name'])
+ return 'com.apple.product-type.application.watchapp'
+ if self._IsXCUiTest():
+ assert self._IsBundle(), ('mac_xcuitest_bundle flag requires mac_bundle '
+ '(target %s)' % self.spec['target_name'])
+ return 'com.apple.product-type.bundle.ui-testing'
+ if self._IsBundle():
+ return {
+ 'executable': 'com.apple.product-type.application',
+ 'loadable_module': 'com.apple.product-type.bundle',
+ 'shared_library': 'com.apple.product-type.framework',
+ }[self.spec['type']]
+ else:
+ return {
+ 'executable': 'com.apple.product-type.tool',
+ 'loadable_module': 'com.apple.product-type.library.dynamic',
+ 'shared_library': 'com.apple.product-type.library.dynamic',
+ 'static_library': 'com.apple.product-type.library.static',
+ }[self.spec['type']]
+
+ def GetMachOType(self):
+ """Returns the MACH_O_TYPE of this target."""
+ # Weird, but matches Xcode.
+ if not self._IsBundle() and self.spec['type'] == 'executable':
+ return ''
+ return {
+ 'executable': 'mh_execute',
+ 'static_library': 'staticlib',
+ 'shared_library': 'mh_dylib',
+ 'loadable_module': 'mh_bundle',
+ }[self.spec['type']]
+
+ def _GetBundleBinaryPath(self):
+ """Returns the name of the bundle binary of by this target.
+ E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
+ assert self._IsBundle()
+ return os.path.join(self.GetBundleExecutableFolderPath(), \
+ self.GetExecutableName())
+
+ def _GetStandaloneExecutableSuffix(self):
+ if 'product_extension' in self.spec:
+ return '.' + self.spec['product_extension']
+ return {
+ 'executable': '',
+ 'static_library': '.a',
+ 'shared_library': '.dylib',
+ 'loadable_module': '.so',
+ }[self.spec['type']]
+
+ def _GetStandaloneExecutablePrefix(self):
+ return self.spec.get('product_prefix', {
+ 'executable': '',
+ 'static_library': 'lib',
+ 'shared_library': 'lib',
+ # Non-bundled loadable_modules are called foo.so for some reason
+ # (that is, .so and no prefix) with the xcode build -- match that.
+ 'loadable_module': '',
+ }[self.spec['type']])
+
+ def _GetStandaloneBinaryPath(self):
+ """Returns the name of the non-bundle binary represented by this target.
+ E.g. hello_world. Only valid for non-bundles."""
+ assert not self._IsBundle()
+ assert self.spec['type'] in (
+ 'executable', 'shared_library', 'static_library', 'loadable_module'), (
+ 'Unexpected type %s' % self.spec['type'])
+ target = self.spec['target_name']
+ if self.spec['type'] == 'static_library':
+ if target[:3] == 'lib':
+ target = target[3:]
+ elif self.spec['type'] in ('loadable_module', 'shared_library'):
+ if target[:3] == 'lib':
+ target = target[3:]
+
+ target_prefix = self._GetStandaloneExecutablePrefix()
+ target = self.spec.get('product_name', target)
+ target_ext = self._GetStandaloneExecutableSuffix()
+ return target_prefix + target + target_ext
+
+ def GetExecutableName(self):
+ """Returns the executable name of the bundle represented by this target.
+ E.g. Chromium."""
+ if self._IsBundle():
+ return self.spec.get('product_name', self.spec['target_name'])
+ else:
+ return self._GetStandaloneBinaryPath()
+
+ def GetExecutablePath(self):
+ """Returns the qualified path to the primary executable of the bundle
+ represented by this target. E.g. Chromium.app/Contents/MacOS/Chromium."""
+ if self._IsBundle():
+ return self._GetBundleBinaryPath()
+ else:
+ return self._GetStandaloneBinaryPath()
+
+ def GetActiveArchs(self, configname):
+ """Returns the architectures this target should be built for."""
+ config_settings = self.xcode_settings[configname]
+ xcode_archs_default = GetXcodeArchsDefault()
+ return xcode_archs_default.ActiveArchs(
+ config_settings.get('ARCHS'),
+ config_settings.get('VALID_ARCHS'),
+ config_settings.get('SDKROOT'))
+
+ def _GetSdkVersionInfoItem(self, sdk, infoitem):
+ # xcodebuild requires Xcode and can't run on Command Line Tools-only
+ # systems from 10.7 onward.
+ # Since the CLT has no SDK paths anyway, returning None is the
+ # most sensible route and should still do the right thing.
+ try:
+ return GetStdout(['xcrun', '--sdk', sdk, infoitem])
+ except:
+ pass
+
+ def _SdkRoot(self, configname):
+ if configname is None:
+ configname = self.configname
+ return self.GetPerConfigSetting('SDKROOT', configname, default='')
+
+ def _XcodePlatformPath(self, configname=None):
+ sdk_root = self._SdkRoot(configname)
+ if sdk_root not in XcodeSettings._platform_path_cache:
+ platform_path = self._GetSdkVersionInfoItem(sdk_root,
+ '--show-sdk-platform-path')
+ XcodeSettings._platform_path_cache[sdk_root] = platform_path
+ return XcodeSettings._platform_path_cache[sdk_root]
+
+ def _SdkPath(self, configname=None):
+ sdk_root = self._SdkRoot(configname)
+ if sdk_root.startswith('/'):
+ return sdk_root
+ return self._XcodeSdkPath(sdk_root)
+
+ def _XcodeSdkPath(self, sdk_root):
+ if sdk_root not in XcodeSettings._sdk_path_cache:
+ sdk_path = self._GetSdkVersionInfoItem(sdk_root, '--show-sdk-path')
+ XcodeSettings._sdk_path_cache[sdk_root] = sdk_path
+ if sdk_root:
+ XcodeSettings._sdk_root_cache[sdk_path] = sdk_root
+ return XcodeSettings._sdk_path_cache[sdk_root]
+
+ def _AppendPlatformVersionMinFlags(self, lst):
+ self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
+ if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
+ # TODO: Implement this better?
+ sdk_path_basename = os.path.basename(self._SdkPath())
+ if sdk_path_basename.lower().startswith('iphonesimulator'):
+ self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
+ '-mios-simulator-version-min=%s')
+ else:
+ self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
+ '-miphoneos-version-min=%s')
+
+ def GetCflags(self, configname, arch=None):
+ """Returns flags that need to be added to .c, .cc, .m, and .mm
+ compilations."""
+ # This functions (and the similar ones below) do not offer complete
+ # emulation of all xcode_settings keys. They're implemented on demand.
+
+ self.configname = configname
+ cflags = []
+
+ sdk_root = self._SdkPath()
+ if 'SDKROOT' in self._Settings() and sdk_root:
+ cflags.append('-isysroot %s' % sdk_root)
+
+ if self.header_map_path:
+ cflags.append('-I%s' % self.header_map_path)
+
+ if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
+ cflags.append('-Wconstant-conversion')
+
+ if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
+ cflags.append('-funsigned-char')
+
+ if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
+ cflags.append('-fasm-blocks')
+
+ if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
+ if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
+ cflags.append('-mdynamic-no-pic')
+ else:
+ pass
+ # TODO: In this case, it depends on the target. xcode passes
+ # mdynamic-no-pic by default for executable and possibly static lib
+ # according to mento
+
+ if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
+ cflags.append('-mpascal-strings')
+
+ self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
+
+ if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
+ dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
+ if dbg_format == 'dwarf':
+ cflags.append('-gdwarf-2')
+ elif dbg_format == 'stabs':
+ raise NotImplementedError('stabs debug format is not supported yet.')
+ elif dbg_format == 'dwarf-with-dsym':
+ cflags.append('-gdwarf-2')
+ else:
+ raise NotImplementedError('Unknown debug format %s' % dbg_format)
+
+ if self._Settings().get('GCC_STRICT_ALIASING') == 'YES':
+ cflags.append('-fstrict-aliasing')
+ elif self._Settings().get('GCC_STRICT_ALIASING') == 'NO':
+ cflags.append('-fno-strict-aliasing')
+
+ if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
+ cflags.append('-fvisibility=hidden')
+
+ if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
+ cflags.append('-Werror')
+
+ if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
+ cflags.append('-Wnewline-eof')
+
+ # In Xcode, this is only activated when GCC_COMPILER_VERSION is clang or
+ # llvm-gcc. It also requires a fairly recent libtool, and
+ # if the system clang isn't used, DYLD_LIBRARY_PATH needs to contain the
+ # path to the libLTO.dylib that matches the used clang.
+ if self._Test('LLVM_LTO', 'YES', default='NO'):
+ cflags.append('-flto')
+
+ self._AppendPlatformVersionMinFlags(cflags)
+
+ # TODO:
+ if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
+ self._WarnUnimplemented('COPY_PHASE_STRIP')
+ self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
+ self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
+
+ # TODO: This is exported correctly, but assigning to it is not supported.
+ self._WarnUnimplemented('MACH_O_TYPE')
+ self._WarnUnimplemented('PRODUCT_TYPE')
+
+ if arch is not None:
+ archs = [arch]
+ else:
+ assert self.configname
+ archs = self.GetActiveArchs(self.configname)
+ if len(archs) != 1:
+ # TODO: Supporting fat binaries will be annoying.
+ self._WarnUnimplemented('ARCHS')
+ archs = ['i386']
+ cflags.append('-arch ' + archs[0])
+
+ if archs[0] in ('i386', 'x86_64'):
+ if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
+ cflags.append('-msse3')
+ if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
+ default='NO'):
+ cflags.append('-mssse3') # Note 3rd 's'.
+ if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
+ cflags.append('-msse4.1')
+ if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
+ cflags.append('-msse4.2')
+
+ cflags += self._Settings().get('WARNING_CFLAGS', [])
+
+ platform_root = self._XcodePlatformPath(configname)
+ if platform_root and self._IsXCTest():
+ cflags.append('-F' + platform_root + '/Developer/Library/Frameworks/')
+
+ if sdk_root:
+ framework_root = sdk_root
+ else:
+ framework_root = ''
+ config = self.spec['configurations'][self.configname]
+ framework_dirs = config.get('mac_framework_dirs', [])
+ for directory in framework_dirs:
+ cflags.append('-F' + directory.replace('$(SDKROOT)', framework_root))
+
+ self.configname = None
+ return cflags
+
+ def GetCflagsC(self, configname):
+ """Returns flags that need to be added to .c, and .m compilations."""
+ self.configname = configname
+ cflags_c = []
+ if self._Settings().get('GCC_C_LANGUAGE_STANDARD', '') == 'ansi':
+ cflags_c.append('-ansi')
+ else:
+ self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
+ cflags_c += self._Settings().get('OTHER_CFLAGS', [])
+ self.configname = None
+ return cflags_c
+
+ def GetCflagsCC(self, configname):
+ """Returns flags that need to be added to .cc, and .mm compilations."""
+ self.configname = configname
+ cflags_cc = []
+
+ clang_cxx_language_standard = self._Settings().get(
+ 'CLANG_CXX_LANGUAGE_STANDARD')
+ # Note: Don't make c++0x to c++11 so that c++0x can be used with older
+ # clangs that don't understand c++11 yet (like Xcode 4.2's).
+ if clang_cxx_language_standard:
+ cflags_cc.append('-std=%s' % clang_cxx_language_standard)
+
+ self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
+
+ if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
+ cflags_cc.append('-fno-rtti')
+ if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
+ cflags_cc.append('-fno-exceptions')
+ if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
+ cflags_cc.append('-fvisibility-inlines-hidden')
+ if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
+ cflags_cc.append('-fno-threadsafe-statics')
+ # Note: This flag is a no-op for clang, it only has an effect for gcc.
+ if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
+ cflags_cc.append('-Wno-invalid-offsetof')
+
+ other_ccflags = []
+
+ for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
+ # TODO: More general variable expansion. Missing in many other places too.
+ if flag in ('$inherited', '$(inherited)', '${inherited}'):
+ flag = '$OTHER_CFLAGS'
+ if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
+ other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
+ else:
+ other_ccflags.append(flag)
+ cflags_cc += other_ccflags
+
+ self.configname = None
+ return cflags_cc
+
+ def _AddObjectiveCGarbageCollectionFlags(self, flags):
+ gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
+ if gc_policy == 'supported':
+ flags.append('-fobjc-gc')
+ elif gc_policy == 'required':
+ flags.append('-fobjc-gc-only')
+
+ def _AddObjectiveCARCFlags(self, flags):
+ if self._Test('CLANG_ENABLE_OBJC_ARC', 'YES', default='NO'):
+ flags.append('-fobjc-arc')
+ if self._Test('CLANG_ENABLE_OBJC_WEAK', 'YES', default='NO'):
+ flags.append('-fobjc-weak')
+
+ def _AddObjectiveCMissingPropertySynthesisFlags(self, flags):
+ if self._Test('CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS',
+ 'YES', default='NO'):
+ flags.append('-Wobjc-missing-property-synthesis')
+
+ def GetCflagsObjC(self, configname):
+ """Returns flags that need to be added to .m compilations."""
+ self.configname = configname
+ cflags_objc = []
+ self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
+ self._AddObjectiveCARCFlags(cflags_objc)
+ self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc)
+ self.configname = None
+ return cflags_objc
+
+ def GetCflagsObjCC(self, configname):
+ """Returns flags that need to be added to .mm compilations."""
+ self.configname = configname
+ cflags_objcc = []
+ self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
+ self._AddObjectiveCARCFlags(cflags_objcc)
+ self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objcc)
+ if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
+ cflags_objcc.append('-fobjc-call-cxx-cdtors')
+ self.configname = None
+ return cflags_objcc
+
+ def GetInstallNameBase(self):
+ """Return DYLIB_INSTALL_NAME_BASE for this target."""
+ # Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
+ if (self.spec['type'] != 'shared_library' and
+ (self.spec['type'] != 'loadable_module' or self._IsBundle())):
+ return None
+ install_base = self.GetPerTargetSetting(
+ 'DYLIB_INSTALL_NAME_BASE',
+ default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
+ return install_base
+
+ def _StandardizePath(self, path):
+ """Do :standardizepath processing for path."""
+ # I'm not quite sure what :standardizepath does. Just call normpath(),
+ # but don't let @executable_path/../foo collapse to foo.
+ if '/' in path:
+ prefix, rest = '', path
+ if path.startswith('@'):
+ prefix, rest = path.split('/', 1)
+ rest = os.path.normpath(rest) # :standardizepath
+ path = os.path.join(prefix, rest)
+ return path
+
+ def GetInstallName(self):
+ """Return LD_DYLIB_INSTALL_NAME for this target."""
+ # Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
+ if (self.spec['type'] != 'shared_library' and
+ (self.spec['type'] != 'loadable_module' or self._IsBundle())):
+ return None
+
+ default_install_name = \
+ '$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
+ install_name = self.GetPerTargetSetting(
+ 'LD_DYLIB_INSTALL_NAME', default=default_install_name)
+
+ # Hardcode support for the variables used in chromium for now, to
+ # unblock people using the make build.
+ if '$' in install_name:
+ assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
+ '$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
+ 'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
+ 'yet in target \'%s\' (got \'%s\')' %
+ (self.spec['target_name'], install_name))
+
+ install_name = install_name.replace(
+ '$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
+ self._StandardizePath(self.GetInstallNameBase()))
+ if self._IsBundle():
+ # These are only valid for bundles, hence the |if|.
+ install_name = install_name.replace(
+ '$(WRAPPER_NAME)', self.GetWrapperName())
+ install_name = install_name.replace(
+ '$(PRODUCT_NAME)', self.GetProductName())
+ else:
+ assert '$(WRAPPER_NAME)' not in install_name
+ assert '$(PRODUCT_NAME)' not in install_name
+
+ install_name = install_name.replace(
+ '$(EXECUTABLE_PATH)', self.GetExecutablePath())
+ return install_name
+
+ def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
+ """Checks if ldflag contains a filename and if so remaps it from
+ gyp-directory-relative to build-directory-relative."""
+ # This list is expanded on demand.
+ # They get matched as:
+ # -exported_symbols_list file
+ # -Wl,exported_symbols_list file
+ # -Wl,exported_symbols_list,file
+ LINKER_FILE = r'(\S+)'
+ WORD = r'\S+'
+ linker_flags = [
+ ['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
+ ['-unexported_symbols_list', LINKER_FILE],
+ ['-reexported_symbols_list', LINKER_FILE],
+ ['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
+ ]
+ for flag_pattern in linker_flags:
+ regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
+ m = regex.match(ldflag)
+ if m:
+ ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
+ ldflag[m.end(1):]
+ # Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
+ # TODO(thakis): Update ffmpeg.gyp):
+ if ldflag.startswith('-L'):
+ ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
+ return ldflag
+
+ def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None):
+ """Returns flags that need to be passed to the linker.
+
+ Args:
+ configname: The name of the configuration to get ld flags for.
+ product_dir: The directory where products such static and dynamic
+ libraries are placed. This is added to the library search path.
+ gyp_to_build_path: A function that converts paths relative to the
+ current gyp file to paths relative to the build direcotry.
+ """
+ self.configname = configname
+ ldflags = []
+
+ # The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
+ # can contain entries that depend on this. Explicitly absolutify these.
+ for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
+ ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
+
+ if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
+ ldflags.append('-Wl,-dead_strip')
+
+ if self._Test('PREBINDING', 'YES', default='NO'):
+ ldflags.append('-Wl,-prebind')
+
+ self._Appendf(
+ ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
+ self._Appendf(
+ ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
+
+ self._AppendPlatformVersionMinFlags(ldflags)
+
+ if 'SDKROOT' in self._Settings() and self._SdkPath():
+ ldflags.append('-isysroot ' + self._SdkPath())
+
+ for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
+ ldflags.append('-L' + gyp_to_build_path(library_path))
+
+ if 'ORDER_FILE' in self._Settings():
+ ldflags.append('-Wl,-order_file ' +
+ '-Wl,' + gyp_to_build_path(
+ self._Settings()['ORDER_FILE']))
+
+ if arch is not None:
+ archs = [arch]
+ else:
+ assert self.configname
+ archs = self.GetActiveArchs(self.configname)
+ if len(archs) != 1:
+ # TODO: Supporting fat binaries will be annoying.
+ self._WarnUnimplemented('ARCHS')
+ archs = ['i386']
+ ldflags.append('-arch ' + archs[0])
+
+ # Xcode adds the product directory by default.
+ # Rewrite -L. to -L./ to work around http://www.openradar.me/25313838
+ ldflags.append('-L' + (product_dir if product_dir != '.' else './'))
+
+ install_name = self.GetInstallName()
+ if install_name and self.spec['type'] != 'loadable_module':
+ ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
+
+ for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
+ ldflags.append('-Wl,-rpath,' + rpath)
+
+ sdk_root = self._SdkPath()
+ if not sdk_root:
+ sdk_root = ''
+ config = self.spec['configurations'][self.configname]
+ framework_dirs = config.get('mac_framework_dirs', [])
+ for directory in framework_dirs:
+ ldflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
+
+ platform_root = self._XcodePlatformPath(configname)
+ if sdk_root and platform_root and self._IsXCTest():
+ ldflags.append('-F' + platform_root + '/Developer/Library/Frameworks/')
+ ldflags.append('-framework XCTest')
+
+ is_extension = self._IsIosAppExtension() or self._IsIosWatchKitExtension()
+ if sdk_root and is_extension:
+ # Adds the link flags for extensions. These flags are common for all
+ # extensions and provide loader and main function.
+ # These flags reflect the compilation options used by xcode to compile
+ # extensions.
+ if XcodeVersion()[0] < '0900':
+ ldflags.append('-lpkstart')
+ ldflags.append(sdk_root +
+ '/System/Library/PrivateFrameworks/PlugInKit.framework/PlugInKit')
+ else:
+ ldflags.append('-e _NSExtensionMain')
+ ldflags.append('-fapplication-extension')
+
+ self._Appendf(ldflags, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
+
+ self.configname = None
+ return ldflags
+
+ def GetLibtoolflags(self, configname):
+ """Returns flags that need to be passed to the static linker.
+
+ Args:
+ configname: The name of the configuration to get ld flags for.
+ """
+ self.configname = configname
+ libtoolflags = []
+
+ for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
+ libtoolflags.append(libtoolflag)
+ # TODO(thakis): ARCHS?
+
+ self.configname = None
+ return libtoolflags
+
+ def GetPerTargetSettings(self):
+ """Gets a list of all the per-target settings. This will only fetch keys
+ whose values are the same across all configurations."""
+ first_pass = True
+ result = {}
+ for configname in sorted(self.xcode_settings.keys()):
+ if first_pass:
+ result = dict(self.xcode_settings[configname])
+ first_pass = False
+ else:
+ for key, value in self.xcode_settings[configname].items():
+ if key not in result:
+ continue
+ elif result[key] != value:
+ del result[key]
+ return result
+
+ def GetPerConfigSetting(self, setting, configname, default=None):
+ if configname in self.xcode_settings:
+ return self.xcode_settings[configname].get(setting, default)
+ else:
+ return self.GetPerTargetSetting(setting, default)
+
+ def GetPerTargetSetting(self, setting, default=None):
+ """Tries to get xcode_settings.setting from spec. Assumes that the setting
+ has the same value in all configurations and throws otherwise."""
+ is_first_pass = True
+ result = None
+ for configname in sorted(self.xcode_settings.keys()):
+ if is_first_pass:
+ result = self.xcode_settings[configname].get(setting, None)
+ is_first_pass = False
+ else:
+ assert result == self.xcode_settings[configname].get(setting, None), (
+ "Expected per-target setting for '%s', got per-config setting "
+ "(target %s)" % (setting, self.spec['target_name']))
+ if result is None:
+ return default
+ return result
+
+ def _GetStripPostbuilds(self, configname, output_binary, quiet):
+ """Returns a list of shell commands that contain the shell commands
+ neccessary to strip this target's binary. These should be run as postbuilds
+ before the actual postbuilds run."""
+ self.configname = configname
+
+ result = []
+ if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
+ self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
+
+ default_strip_style = 'debugging'
+ if ((self.spec['type'] == 'loadable_module' or self._IsIosAppExtension())
+ and self._IsBundle()):
+ default_strip_style = 'non-global'
+ elif self.spec['type'] == 'executable':
+ default_strip_style = 'all'
+
+ strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
+ strip_flags = {
+ 'all': '',
+ 'non-global': '-x',
+ 'debugging': '-S',
+ }[strip_style]
+
+ explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
+ if explicit_strip_flags:
+ strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
+
+ if not quiet:
+ result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
+ result.append('strip %s %s' % (strip_flags, output_binary))
+
+ self.configname = None
+ return result
+
+ def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
+ """Returns a list of shell commands that contain the shell commands
+ neccessary to massage this target's debug information. These should be run
+ as postbuilds before the actual postbuilds run."""
+ self.configname = configname
+
+ # For static libraries, no dSYMs are created.
+ result = []
+ if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
+ self._Test(
+ 'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
+ self.spec['type'] != 'static_library'):
+ if not quiet:
+ result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
+ result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
+
+ self.configname = None
+ return result
+
+ def _GetTargetPostbuilds(self, configname, output, output_binary,
+ quiet=False):
+ """Returns a list of shell commands that contain the shell commands
+ to run as postbuilds for this target, before the actual postbuilds."""
+ # dSYMs need to build before stripping happens.
+ return (
+ self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
+ self._GetStripPostbuilds(configname, output_binary, quiet))
+
+ def _GetIOSPostbuilds(self, configname, output_binary):
+ """Return a shell command to codesign the iOS output binary so it can
+ be deployed to a device. This should be run as the very last step of the
+ build."""
+ if not (self.isIOS and
+ (self.spec['type'] == 'executable' or self._IsXCTest()) or
+ self.IsIosFramework()):
+ return []
+
+ postbuilds = []
+ product_name = self.GetFullProductName()
+ settings = self.xcode_settings[configname]
+
+ # Xcode expects XCTests to be copied into the TEST_HOST dir.
+ if self._IsXCTest():
+ source = os.path.join("${BUILT_PRODUCTS_DIR}", product_name)
+ test_host = os.path.dirname(settings.get('TEST_HOST'));
+ xctest_destination = os.path.join(test_host, 'PlugIns', product_name)
+ postbuilds.extend(['ditto %s %s' % (source, xctest_destination)])
+
+ key = self._GetIOSCodeSignIdentityKey(settings)
+ if not key:
+ return postbuilds
+
+ # Warn for any unimplemented signing xcode keys.
+ unimpl = ['OTHER_CODE_SIGN_FLAGS']
+ unimpl = set(unimpl) & set(self.xcode_settings[configname].keys())
+ if unimpl:
+ print('Warning: Some codesign keys not implemented, ignoring: %s' % (
+ ', '.join(sorted(unimpl))))
+
+ if self._IsXCTest():
+ # For device xctests, Xcode copies two extra frameworks into $TEST_HOST.
+ test_host = os.path.dirname(settings.get('TEST_HOST'));
+ frameworks_dir = os.path.join(test_host, 'Frameworks')
+ platform_root = self._XcodePlatformPath(configname)
+ frameworks = \
+ ['Developer/Library/PrivateFrameworks/IDEBundleInjection.framework',
+ 'Developer/Library/Frameworks/XCTest.framework']
+ for framework in frameworks:
+ source = os.path.join(platform_root, framework)
+ destination = os.path.join(frameworks_dir, os.path.basename(framework))
+ postbuilds.extend(['ditto %s %s' % (source, destination)])
+
+ # Then re-sign everything with 'preserve=True'
+ postbuilds.extend(['%s code-sign-bundle "%s" "%s" "%s" "%s" %s' % (
+ os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key,
+ settings.get('CODE_SIGN_ENTITLEMENTS', ''),
+ settings.get('PROVISIONING_PROFILE', ''), destination, True)
+ ])
+ plugin_dir = os.path.join(test_host, 'PlugIns')
+ targets = [os.path.join(plugin_dir, product_name), test_host]
+ for target in targets:
+ postbuilds.extend(['%s code-sign-bundle "%s" "%s" "%s" "%s" %s' % (
+ os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key,
+ settings.get('CODE_SIGN_ENTITLEMENTS', ''),
+ settings.get('PROVISIONING_PROFILE', ''), target, True)
+ ])
+
+ postbuilds.extend(['%s code-sign-bundle "%s" "%s" "%s" "%s" %s' % (
+ os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key,
+ settings.get('CODE_SIGN_ENTITLEMENTS', ''),
+ settings.get('PROVISIONING_PROFILE', ''),
+ os.path.join("${BUILT_PRODUCTS_DIR}", product_name), False)
+ ])
+ return postbuilds
+
+ def _GetIOSCodeSignIdentityKey(self, settings):
+ identity = settings.get('CODE_SIGN_IDENTITY')
+ if not identity:
+ return None
+ if identity not in XcodeSettings._codesigning_key_cache:
+ output = subprocess.check_output(
+ ['security', 'find-identity', '-p', 'codesigning', '-v'])
+ for line in output.splitlines():
+ line_decoded = line.decode('utf-8')
+ if identity in line_decoded:
+ fingerprint = line_decoded.split()[1]
+ cache = XcodeSettings._codesigning_key_cache
+ assert identity not in cache or fingerprint == cache[identity], (
+ "Multiple codesigning fingerprints for identity: %s" % identity)
+ XcodeSettings._codesigning_key_cache[identity] = fingerprint
+ return XcodeSettings._codesigning_key_cache.get(identity, '')
+
+ def AddImplicitPostbuilds(self, configname, output, output_binary,
+ postbuilds=[], quiet=False):
+ """Returns a list of shell commands that should run before and after
+ |postbuilds|."""
+ assert output_binary is not None
+ pre = self._GetTargetPostbuilds(configname, output, output_binary, quiet)
+ post = self._GetIOSPostbuilds(configname, output_binary)
+ return pre + postbuilds + post
+
+ def _AdjustLibrary(self, library, config_name=None):
+ if library.endswith('.framework'):
+ l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
+ else:
+ m = self.library_re.match(library)
+ if m:
+ l = '-l' + m.group(1)
+ else:
+ l = library
+
+ sdk_root = self._SdkPath(config_name)
+ if not sdk_root:
+ sdk_root = ''
+ # Xcode 7 started shipping with ".tbd" (text based stubs) files instead of
+ # ".dylib" without providing a real support for them. What it does, for
+ # "/usr/lib" libraries, is do "-L/usr/lib -lname" which is dependent on the
+ # library order and cause collision when building Chrome.
+ #
+ # Instead substitude ".tbd" to ".dylib" in the generated project when the
+ # following conditions are both true:
+ # - library is referenced in the gyp file as "$(SDKROOT)/**/*.dylib",
+ # - the ".dylib" file does not exists but a ".tbd" file do.
+ library = l.replace('$(SDKROOT)', sdk_root)
+ if l.startswith('$(SDKROOT)'):
+ basename, ext = os.path.splitext(library)
+ if ext == '.dylib' and not os.path.exists(library):
+ tbd_library = basename + '.tbd'
+ if os.path.exists(tbd_library):
+ library = tbd_library
+ return library
+
+ def AdjustLibraries(self, libraries, config_name=None):
+ """Transforms entries like 'Cocoa.framework' in libraries into entries like
+ '-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
+ """
+ libraries = [self._AdjustLibrary(library, config_name)
+ for library in libraries]
+ return libraries
+
+ def _BuildMachineOSBuild(self):
+ return GetStdout(['sw_vers', '-buildVersion'])
+
+ def _XcodeIOSDeviceFamily(self, configname):
+ family = self.xcode_settings[configname].get('TARGETED_DEVICE_FAMILY', '1')
+ return [int(x) for x in family.split(',')]
+
+ def GetExtraPlistItems(self, configname=None):
+ """Returns a dictionary with extra items to insert into Info.plist."""
+ if configname not in XcodeSettings._plist_cache:
+ cache = {}
+ cache['BuildMachineOSBuild'] = self._BuildMachineOSBuild()
+
+ xcode, xcode_build = XcodeVersion()
+ cache['DTXcode'] = xcode
+ cache['DTXcodeBuild'] = xcode_build
+ compiler = self.xcode_settings[configname].get('GCC_VERSION')
+ if compiler is not None:
+ cache['DTCompiler'] = compiler
+
+ sdk_root = self._SdkRoot(configname)
+ if not sdk_root:
+ sdk_root = self._DefaultSdkRoot()
+ sdk_version = self._GetSdkVersionInfoItem(sdk_root, '--show-sdk-version')
+ cache['DTSDKName'] = sdk_root + (sdk_version or '')
+ if xcode >= '0720':
+ cache['DTSDKBuild'] = self._GetSdkVersionInfoItem(
+ sdk_root, '--show-sdk-build-version')
+ elif xcode >= '0430':
+ cache['DTSDKBuild'] = sdk_version
+ else:
+ cache['DTSDKBuild'] = cache['BuildMachineOSBuild']
+
+ if self.isIOS:
+ cache['MinimumOSVersion'] = self.xcode_settings[configname].get(
+ 'IPHONEOS_DEPLOYMENT_TARGET')
+ cache['DTPlatformName'] = sdk_root
+ cache['DTPlatformVersion'] = sdk_version
+
+ if configname.endswith("iphoneos"):
+ cache['CFBundleSupportedPlatforms'] = ['iPhoneOS']
+ cache['DTPlatformBuild'] = cache['DTSDKBuild']
+ else:
+ cache['CFBundleSupportedPlatforms'] = ['iPhoneSimulator']
+ # This is weird, but Xcode sets DTPlatformBuild to an empty field
+ # for simulator builds.
+ cache['DTPlatformBuild'] = ""
+ XcodeSettings._plist_cache[configname] = cache
+
+ # Include extra plist items that are per-target, not per global
+ # XcodeSettings.
+ items = dict(XcodeSettings._plist_cache[configname])
+ if self.isIOS:
+ items['UIDeviceFamily'] = self._XcodeIOSDeviceFamily(configname)
+ return items
+
+ def _DefaultSdkRoot(self):
+ """Returns the default SDKROOT to use.
+
+ Prior to version 5.0.0, if SDKROOT was not explicitly set in the Xcode
+ project, then the environment variable was empty. Starting with this
+ version, Xcode uses the name of the newest SDK installed.
+ """
+ xcode_version, xcode_build = XcodeVersion()
+ if xcode_version < '0500':
+ return ''
+ default_sdk_path = self._XcodeSdkPath('')
+ default_sdk_root = XcodeSettings._sdk_root_cache.get(default_sdk_path)
+ if default_sdk_root:
+ return default_sdk_root
+ try:
+ all_sdks = GetStdout(['xcodebuild', '-showsdks'])
+ except:
+ # If xcodebuild fails, there will be no valid SDKs
+ return ''
+ for line in all_sdks.splitlines():
+ items = line.split()
+ if len(items) >= 3 and items[-2] == '-sdk':
+ sdk_root = items[-1]
+ sdk_path = self._XcodeSdkPath(sdk_root)
+ if sdk_path == default_sdk_path:
+ return sdk_root
+ return ''
+
+
+class MacPrefixHeader(object):
+ """A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
+
+ This feature consists of several pieces:
+ * If GCC_PREFIX_HEADER is present, all compilations in that project get an
+ additional |-include path_to_prefix_header| cflag.
+ * If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
+ instead compiled, and all other compilations in the project get an
+ additional |-include path_to_compiled_header| instead.
+ + Compiled prefix headers have the extension gch. There is one gch file for
+ every language used in the project (c, cc, m, mm), since gch files for
+ different languages aren't compatible.
+ + gch files themselves are built with the target's normal cflags, but they
+ obviously don't get the |-include| flag. Instead, they need a -x flag that
+ describes their language.
+ + All o files in the target need to depend on the gch file, to make sure
+ it's built before any o file is built.
+
+ This class helps with some of these tasks, but it needs help from the build
+ system for writing dependencies to the gch files, for writing build commands
+ for the gch files, and for figuring out the location of the gch files.
+ """
+ def __init__(self, xcode_settings,
+ gyp_path_to_build_path, gyp_path_to_build_output):
+ """If xcode_settings is None, all methods on this class are no-ops.
+
+ Args:
+ gyp_path_to_build_path: A function that takes a gyp-relative path,
+ and returns a path relative to the build directory.
+ gyp_path_to_build_output: A function that takes a gyp-relative path and
+ a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
+ to where the output of precompiling that path for that language
+ should be placed (without the trailing '.gch').
+ """
+ # This doesn't support per-configuration prefix headers. Good enough
+ # for now.
+ self.header = None
+ self.compile_headers = False
+ if xcode_settings:
+ self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
+ self.compile_headers = xcode_settings.GetPerTargetSetting(
+ 'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
+ self.compiled_headers = {}
+ if self.header:
+ if self.compile_headers:
+ for lang in ['c', 'cc', 'm', 'mm']:
+ self.compiled_headers[lang] = gyp_path_to_build_output(
+ self.header, lang)
+ self.header = gyp_path_to_build_path(self.header)
+
+ def _CompiledHeader(self, lang, arch):
+ assert self.compile_headers
+ h = self.compiled_headers[lang]
+ if arch:
+ h += '.' + arch
+ return h
+
+ def GetInclude(self, lang, arch=None):
+ """Gets the cflags to include the prefix header for language |lang|."""
+ if self.compile_headers and lang in self.compiled_headers:
+ return '-include %s' % self._CompiledHeader(lang, arch)
+ elif self.header:
+ return '-include %s' % self.header
+ else:
+ return ''
+
+ def _Gch(self, lang, arch):
+ """Returns the actual file name of the prefix header for language |lang|."""
+ assert self.compile_headers
+ return self._CompiledHeader(lang, arch) + '.gch'
+
+ def GetObjDependencies(self, sources, objs, arch=None):
+ """Given a list of source files and the corresponding object files, returns
+ a list of (source, object, gch) tuples, where |gch| is the build-directory
+ relative path to the gch file each object file depends on. |compilable[i]|
+ has to be the source file belonging to |objs[i]|."""
+ if not self.header or not self.compile_headers:
+ return []
+
+ result = []
+ for source, obj in zip(sources, objs):
+ ext = os.path.splitext(source)[1]
+ lang = {
+ '.c': 'c',
+ '.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
+ '.m': 'm',
+ '.mm': 'mm',
+ }.get(ext, None)
+ if lang:
+ result.append((source, obj, self._Gch(lang, arch)))
+ return result
+
+ def GetPchBuildCommands(self, arch=None):
+ """Returns [(path_to_gch, language_flag, language, header)].
+ |path_to_gch| and |header| are relative to the build directory.
+ """
+ if not self.header or not self.compile_headers:
+ return []
+ return [
+ (self._Gch('c', arch), '-x c-header', 'c', self.header),
+ (self._Gch('cc', arch), '-x c++-header', 'cc', self.header),
+ (self._Gch('m', arch), '-x objective-c-header', 'm', self.header),
+ (self._Gch('mm', arch), '-x objective-c++-header', 'mm', self.header),
+ ]
+
+
+def XcodeVersion():
+ """Returns a tuple of version and build version of installed Xcode."""
+ # `xcodebuild -version` output looks like
+ # Xcode 4.6.3
+ # Build version 4H1503
+ # or like
+ # Xcode 3.2.6
+ # Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0
+ # BuildVersion: 10M2518
+ # Convert that to '0463', '4H1503'.
+ global XCODE_VERSION_CACHE
+ if XCODE_VERSION_CACHE:
+ return XCODE_VERSION_CACHE
+ try:
+ version_list = GetStdout(['xcodebuild', '-version']).splitlines()
+ # In some circumstances xcodebuild exits 0 but doesn't return
+ # the right results; for example, a user on 10.7 or 10.8 with
+ # a bogus path set via xcode-select
+ # In that case this may be a CLT-only install so fall back to
+ # checking that version.
+ if len(version_list) < 2:
+ raise GypError("xcodebuild returned unexpected results")
+ except:
+ version = CLTVersion()
+ if version:
+ version = re.match(r'(\d\.\d\.?\d*)', version).groups()[0]
+ else:
+ raise GypError("No Xcode or CLT version detected!")
+ # The CLT has no build information, so we return an empty string.
+ version_list = [version, '']
+ version = version_list[0]
+ build = version_list[-1]
+ # Be careful to convert "4.2" to "0420":
+ version = version.split()[-1].replace('.', '')
+ version = (version + '0' * (3 - len(version))).zfill(4)
+ if build:
+ build = build.split()[-1]
+ XCODE_VERSION_CACHE = (version, build)
+ return XCODE_VERSION_CACHE
+
+
+# This function ported from the logic in Homebrew's CLT version check
+def CLTVersion():
+ """Returns the version of command-line tools from pkgutil."""
+ # pkgutil output looks like
+ # package-id: com.apple.pkg.CLTools_Executables
+ # version: 5.0.1.0.1.1382131676
+ # volume: /
+ # location: /
+ # install-time: 1382544035
+ # groups: com.apple.FindSystemFiles.pkg-group com.apple.DevToolsBoth.pkg-group com.apple.DevToolsNonRelocatableShared.pkg-group
+ STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo"
+ FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI"
+ MAVERICKS_PKG_ID = "com.apple.pkg.CLTools_Executables"
+
+ regex = re.compile('version: (?P<version>.+)')
+ for key in [MAVERICKS_PKG_ID, STANDALONE_PKG_ID, FROM_XCODE_PKG_ID]:
+ try:
+ output = GetStdout(['/usr/sbin/pkgutil', '--pkg-info', key])
+ return re.search(regex, output).groupdict()['version']
+ except:
+ continue
+
+
+def GetStdout(cmdlist):
+ """Returns the content of standard output returned by invoking |cmdlist|.
+ Raises |GypError| if the command return with a non-zero return code."""
+ job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE)
+ out = job.communicate()[0]
+ if job.returncode != 0:
+ sys.stderr.write(out + b'\n')
+ raise GypError('Error %d running %s' % (job.returncode, cmdlist[0]))
+ return out.rstrip(b'\n').decode('utf-8')
+
+
+def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
+ """Merges the global xcode_settings dictionary into each configuration of the
+ target represented by spec. For keys that are both in the global and the local
+ xcode_settings dict, the local key gets precendence.
+ """
+ # The xcode generator special-cases global xcode_settings and does something
+ # that amounts to merging in the global xcode_settings into each local
+ # xcode_settings dict.
+ global_xcode_settings = global_dict.get('xcode_settings', {})
+ for config in spec['configurations'].values():
+ if 'xcode_settings' in config:
+ new_settings = global_xcode_settings.copy()
+ new_settings.update(config['xcode_settings'])
+ config['xcode_settings'] = new_settings
+
+
+def IsMacBundle(flavor, spec):
+ """Returns if |spec| should be treated as a bundle.
+
+ Bundles are directories with a certain subdirectory structure, instead of
+ just a single file. Bundle rules do not produce a binary but also package
+ resources into that directory."""
+ is_mac_bundle = int(spec.get('mac_xctest_bundle', 0)) != 0 or \
+ int(spec.get('mac_xcuitest_bundle', 0)) != 0 or \
+ (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
+
+ if is_mac_bundle:
+ assert spec['type'] != 'none', (
+ 'mac_bundle targets cannot have type none (target "%s")' %
+ spec['target_name'])
+ return is_mac_bundle
+
+
+def GetMacBundleResources(product_dir, xcode_settings, resources):
+ """Yields (output, resource) pairs for every resource in |resources|.
+ Only call this for mac bundle targets.
+
+ Args:
+ product_dir: Path to the directory containing the output bundle,
+ relative to the build directory.
+ xcode_settings: The XcodeSettings of the current target.
+ resources: A list of bundle resources, relative to the build directory.
+ """
+ dest = os.path.join(product_dir,
+ xcode_settings.GetBundleResourceFolder())
+ for res in resources:
+ output = dest
+
+ # The make generator doesn't support it, so forbid it everywhere
+ # to keep the generators more interchangable.
+ assert ' ' not in res, (
+ "Spaces in resource filenames not supported (%s)" % res)
+
+ # Split into (path,file).
+ res_parts = os.path.split(res)
+
+ # Now split the path into (prefix,maybe.lproj).
+ lproj_parts = os.path.split(res_parts[0])
+ # If the resource lives in a .lproj bundle, add that to the destination.
+ if lproj_parts[1].endswith('.lproj'):
+ output = os.path.join(output, lproj_parts[1])
+
+ output = os.path.join(output, res_parts[1])
+ # Compiled XIB files are referred to by .nib.
+ if output.endswith('.xib'):
+ output = os.path.splitext(output)[0] + '.nib'
+ # Compiled storyboard files are referred to by .storyboardc.
+ if output.endswith('.storyboard'):
+ output = os.path.splitext(output)[0] + '.storyboardc'
+
+ yield output, res
+
+
+def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
+ """Returns (info_plist, dest_plist, defines, extra_env), where:
+ * |info_plist| is the source plist path, relative to the
+ build directory,
+ * |dest_plist| is the destination plist path, relative to the
+ build directory,
+ * |defines| is a list of preprocessor defines (empty if the plist
+ shouldn't be preprocessed,
+ * |extra_env| is a dict of env variables that should be exported when
+ invoking |mac_tool copy-info-plist|.
+
+ Only call this for mac bundle targets.
+
+ Args:
+ product_dir: Path to the directory containing the output bundle,
+ relative to the build directory.
+ xcode_settings: The XcodeSettings of the current target.
+ gyp_to_build_path: A function that converts paths relative to the
+ current gyp file to paths relative to the build direcotry.
+ """
+ info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
+ if not info_plist:
+ return None, None, [], {}
+
+ # The make generator doesn't support it, so forbid it everywhere
+ # to keep the generators more interchangable.
+ assert ' ' not in info_plist, (
+ "Spaces in Info.plist filenames not supported (%s)" % info_plist)
+
+ info_plist = gyp_path_to_build_path(info_plist)
+
+ # If explicitly set to preprocess the plist, invoke the C preprocessor and
+ # specify any defines as -D flags.
+ if xcode_settings.GetPerTargetSetting(
+ 'INFOPLIST_PREPROCESS', default='NO') == 'YES':
+ # Create an intermediate file based on the path.
+ defines = shlex.split(xcode_settings.GetPerTargetSetting(
+ 'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
+ else:
+ defines = []
+
+ dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
+ extra_env = xcode_settings.GetPerTargetSettings()
+
+ return info_plist, dest_plist, defines, extra_env
+
+
+def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
+ additional_settings=None):
+ """Return the environment variables that Xcode would set. See
+ http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
+ for a full list.
+
+ Args:
+ xcode_settings: An XcodeSettings object. If this is None, this function
+ returns an empty dict.
+ built_products_dir: Absolute path to the built products dir.
+ srcroot: Absolute path to the source root.
+ configuration: The build configuration name.
+ additional_settings: An optional dict with more values to add to the
+ result.
+ """
+
+ if not xcode_settings: return {}
+
+ # This function is considered a friend of XcodeSettings, so let it reach into
+ # its implementation details.
+ spec = xcode_settings.spec
+
+ # These are filled in on an as-needed basis.
+ env = {
+ 'BUILT_FRAMEWORKS_DIR' : built_products_dir,
+ 'BUILT_PRODUCTS_DIR' : built_products_dir,
+ 'CONFIGURATION' : configuration,
+ 'PRODUCT_NAME' : xcode_settings.GetProductName(),
+ # See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
+ 'SRCROOT' : srcroot,
+ 'SOURCE_ROOT': '${SRCROOT}',
+ # This is not true for static libraries, but currently the env is only
+ # written for bundles:
+ 'TARGET_BUILD_DIR' : built_products_dir,
+ 'TEMP_DIR' : '${TMPDIR}',
+ 'XCODE_VERSION_ACTUAL' : XcodeVersion()[0],
+ }
+ if xcode_settings.GetPerConfigSetting('SDKROOT', configuration):
+ env['SDKROOT'] = xcode_settings._SdkPath(configuration)
+ else:
+ env['SDKROOT'] = ''
+
+ if xcode_settings.mac_toolchain_dir:
+ env['DEVELOPER_DIR'] = xcode_settings.mac_toolchain_dir
+
+ if spec['type'] in (
+ 'executable', 'static_library', 'shared_library', 'loadable_module'):
+ env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
+ env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
+ env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
+ mach_o_type = xcode_settings.GetMachOType()
+ if mach_o_type:
+ env['MACH_O_TYPE'] = mach_o_type
+ env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
+ if xcode_settings._IsBundle():
+ # xcodeproj_file.py sets the same Xcode subfolder value for this as for
+ # FRAMEWORKS_FOLDER_PATH so Xcode builds will actually use FFP's value.
+ env['BUILT_FRAMEWORKS_DIR'] = \
+ os.path.join(built_products_dir + os.sep \
+ + xcode_settings.GetBundleFrameworksFolderPath())
+ env['CONTENTS_FOLDER_PATH'] = \
+ xcode_settings.GetBundleContentsFolderPath()
+ env['EXECUTABLE_FOLDER_PATH'] = \
+ xcode_settings.GetBundleExecutableFolderPath()
+ env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
+ xcode_settings.GetBundleResourceFolder()
+ env['JAVA_FOLDER_PATH'] = xcode_settings.GetBundleJavaFolderPath()
+ env['FRAMEWORKS_FOLDER_PATH'] = \
+ xcode_settings.GetBundleFrameworksFolderPath()
+ env['SHARED_FRAMEWORKS_FOLDER_PATH'] = \
+ xcode_settings.GetBundleSharedFrameworksFolderPath()
+ env['SHARED_SUPPORT_FOLDER_PATH'] = \
+ xcode_settings.GetBundleSharedSupportFolderPath()
+ env['PLUGINS_FOLDER_PATH'] = xcode_settings.GetBundlePlugInsFolderPath()
+ env['XPCSERVICES_FOLDER_PATH'] = \
+ xcode_settings.GetBundleXPCServicesFolderPath()
+ env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
+ env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
+
+ install_name = xcode_settings.GetInstallName()
+ if install_name:
+ env['LD_DYLIB_INSTALL_NAME'] = install_name
+ install_name_base = xcode_settings.GetInstallNameBase()
+ if install_name_base:
+ env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
+ if XcodeVersion()[0] >= '0500' and not env.get('SDKROOT'):
+ sdk_root = xcode_settings._SdkRoot(configuration)
+ if not sdk_root:
+ sdk_root = xcode_settings._XcodeSdkPath('')
+ env['SDKROOT'] = sdk_root
+
+ if not additional_settings:
+ additional_settings = {}
+ else:
+ # Flatten lists to strings.
+ for k in additional_settings:
+ if not isinstance(additional_settings[k], str):
+ additional_settings[k] = ' '.join(additional_settings[k])
+ additional_settings.update(env)
+
+ for k in additional_settings:
+ additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
+
+ return additional_settings
+
+
+def _NormalizeEnvVarReferences(str):
+ """Takes a string containing variable references in the form ${FOO}, $(FOO),
+ or $FOO, and returns a string with all variable references in the form ${FOO}.
+ """
+ # $FOO -> ${FOO}
+ str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
+
+ # $(FOO) -> ${FOO}
+ matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
+ for match in matches:
+ to_replace, variable = match
+ assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
+ str = str.replace(to_replace, '${' + variable + '}')
+
+ return str
+
+
+def ExpandEnvVars(string, expansions):
+ """Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
+ expansions list. If the variable expands to something that references
+ another variable, this variable is expanded as well if it's in env --
+ until no variables present in env are left."""
+ for k, v in reversed(expansions):
+ string = string.replace('${' + k + '}', v)
+ string = string.replace('$(' + k + ')', v)
+ string = string.replace('$' + k, v)
+ return string
+
+
+def _TopologicallySortedEnvVarKeys(env):
+ """Takes a dict |env| whose values are strings that can refer to other keys,
+ for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
+ env such that key2 is after key1 in L if env[key2] refers to env[key1].
+
+ Throws an Exception in case of dependency cycles.
+ """
+ # Since environment variables can refer to other variables, the evaluation
+ # order is important. Below is the logic to compute the dependency graph
+ # and sort it.
+ regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
+ def GetEdges(node):
+ # Use a definition of edges such that user_of_variable -> used_varible.
+ # This happens to be easier in this case, since a variable's
+ # definition contains all variables it references in a single string.
+ # We can then reverse the result of the topological sort at the end.
+ # Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
+ matches = set([v for v in regex.findall(env[node]) if v in env])
+ for dependee in matches:
+ assert '${' not in dependee, 'Nested variables not supported: ' + dependee
+ return matches
+
+ try:
+ # Topologically sort, and then reverse, because we used an edge definition
+ # that's inverted from the expected result of this function (see comment
+ # above).
+ order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
+ order.reverse()
+ return order
+ except gyp.common.CycleError as e:
+ raise GypError(
+ 'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
+
+
+def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
+ configuration, additional_settings=None):
+ env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
+ additional_settings)
+ return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
+
+
+def GetSpecPostbuildCommands(spec, quiet=False):
+ """Returns the list of postbuilds explicitly defined on |spec|, in a form
+ executable by a shell."""
+ postbuilds = []
+ for postbuild in spec.get('postbuilds', []):
+ if not quiet:
+ postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
+ spec['target_name'], postbuild['postbuild_name']))
+ postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
+ return postbuilds
+
+
+def _HasIOSTarget(targets):
+ """Returns true if any target contains the iOS specific key
+ IPHONEOS_DEPLOYMENT_TARGET."""
+ for target_dict in targets.values():
+ for config in target_dict['configurations'].values():
+ if config.get('xcode_settings', {}).get('IPHONEOS_DEPLOYMENT_TARGET'):
+ return True
+ return False
+
+
+def _AddIOSDeviceConfigurations(targets):
+ """Clone all targets and append -iphoneos to the name. Configure these targets
+ to build for iOS devices and use correct architectures for those builds."""
+ for target_dict in targets.values():
+ toolset = target_dict['toolset']
+ configs = target_dict['configurations']
+
+ for config_name, simulator_config_dict in dict(configs).items():
+ iphoneos_config_dict = copy.deepcopy(simulator_config_dict)
+ configs[config_name + '-iphoneos'] = iphoneos_config_dict
+ configs[config_name + '-iphonesimulator'] = simulator_config_dict
+ if toolset == 'target':
+ simulator_config_dict['xcode_settings']['SDKROOT'] = 'iphonesimulator'
+ iphoneos_config_dict['xcode_settings']['SDKROOT'] = 'iphoneos'
+ return targets
+
+def CloneConfigurationForDeviceAndEmulator(target_dicts):
+ """If |target_dicts| contains any iOS targets, automatically create -iphoneos
+ targets for iOS device builds."""
+ if _HasIOSTarget(target_dicts):
+ return _AddIOSDeviceConfigurations(target_dicts)
+ return target_dicts
diff --git a/third_party/python/gyp/pylib/gyp/xcode_ninja.py b/third_party/python/gyp/pylib/gyp/xcode_ninja.py
new file mode 100644
index 0000000000..1d71b8c5f8
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/xcode_ninja.py
@@ -0,0 +1,289 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Xcode-ninja wrapper project file generator.
+
+This updates the data structures passed to the Xcode gyp generator to build
+with ninja instead. The Xcode project itself is transformed into a list of
+executable targets, each with a build step to build with ninja, and a target
+with every source and resource file. This appears to sidestep some of the
+major performance headaches experienced using complex projects and large number
+of targets within Xcode.
+"""
+
+import errno
+import gyp.generator.ninja
+import os
+import re
+import xml.sax.saxutils
+
+
+def _WriteWorkspace(main_gyp, sources_gyp, params):
+ """ Create a workspace to wrap main and sources gyp paths. """
+ (build_file_root, build_file_ext) = os.path.splitext(main_gyp)
+ workspace_path = build_file_root + '.xcworkspace'
+ options = params['options']
+ if options.generator_output:
+ workspace_path = os.path.join(options.generator_output, workspace_path)
+ try:
+ os.makedirs(workspace_path)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ output_string = '<?xml version="1.0" encoding="UTF-8"?>\n' + \
+ '<Workspace version = "1.0">\n'
+ for gyp_name in [main_gyp, sources_gyp]:
+ name = os.path.splitext(os.path.basename(gyp_name))[0] + '.xcodeproj'
+ name = xml.sax.saxutils.quoteattr("group:" + name)
+ output_string += ' <FileRef location = %s></FileRef>\n' % name
+ output_string += '</Workspace>\n'
+
+ workspace_file = os.path.join(workspace_path, "contents.xcworkspacedata")
+
+ try:
+ with open(workspace_file, 'r') as input_file:
+ input_string = input_file.read()
+ if input_string == output_string:
+ return
+ except IOError:
+ # Ignore errors if the file doesn't exist.
+ pass
+
+ with open(workspace_file, 'w') as output_file:
+ output_file.write(output_string)
+
+def _TargetFromSpec(old_spec, params):
+ """ Create fake target for xcode-ninja wrapper. """
+ # Determine ninja top level build dir (e.g. /path/to/out).
+ ninja_toplevel = None
+ jobs = 0
+ if params:
+ options = params['options']
+ ninja_toplevel = \
+ os.path.join(options.toplevel_dir,
+ gyp.generator.ninja.ComputeOutputDir(params))
+ jobs = params.get('generator_flags', {}).get('xcode_ninja_jobs', 0)
+
+ target_name = old_spec.get('target_name')
+ product_name = old_spec.get('product_name', target_name)
+ product_extension = old_spec.get('product_extension')
+
+ ninja_target = {}
+ ninja_target['target_name'] = target_name
+ ninja_target['product_name'] = product_name
+ if product_extension:
+ ninja_target['product_extension'] = product_extension
+ ninja_target['toolset'] = old_spec.get('toolset')
+ ninja_target['default_configuration'] = old_spec.get('default_configuration')
+ ninja_target['configurations'] = {}
+
+ # Tell Xcode to look in |ninja_toplevel| for build products.
+ new_xcode_settings = {}
+ if ninja_toplevel:
+ new_xcode_settings['CONFIGURATION_BUILD_DIR'] = \
+ "%s/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)" % ninja_toplevel
+
+ if 'configurations' in old_spec:
+ for config in old_spec['configurations'].keys():
+ old_xcode_settings = \
+ old_spec['configurations'][config].get('xcode_settings', {})
+ if 'IPHONEOS_DEPLOYMENT_TARGET' in old_xcode_settings:
+ new_xcode_settings['CODE_SIGNING_REQUIRED'] = "NO"
+ new_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] = \
+ old_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET']
+ for key in ['BUNDLE_LOADER', 'TEST_HOST']:
+ if key in old_xcode_settings:
+ new_xcode_settings[key] = old_xcode_settings[key]
+
+ ninja_target['configurations'][config] = {}
+ ninja_target['configurations'][config]['xcode_settings'] = \
+ new_xcode_settings
+
+ ninja_target['mac_bundle'] = old_spec.get('mac_bundle', 0)
+ ninja_target['mac_xctest_bundle'] = old_spec.get('mac_xctest_bundle', 0)
+ ninja_target['ios_app_extension'] = old_spec.get('ios_app_extension', 0)
+ ninja_target['ios_watchkit_extension'] = \
+ old_spec.get('ios_watchkit_extension', 0)
+ ninja_target['ios_watchkit_app'] = old_spec.get('ios_watchkit_app', 0)
+ ninja_target['type'] = old_spec['type']
+ if ninja_toplevel:
+ ninja_target['actions'] = [
+ {
+ 'action_name': 'Compile and copy %s via ninja' % target_name,
+ 'inputs': [],
+ 'outputs': [],
+ 'action': [
+ 'env',
+ 'PATH=%s' % os.environ['PATH'],
+ 'ninja',
+ '-C',
+ new_xcode_settings['CONFIGURATION_BUILD_DIR'],
+ target_name,
+ ],
+ 'message': 'Compile and copy %s via ninja' % target_name,
+ },
+ ]
+ if jobs > 0:
+ ninja_target['actions'][0]['action'].extend(('-j', jobs))
+ return ninja_target
+
+def IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
+ """Limit targets for Xcode wrapper.
+
+ Xcode sometimes performs poorly with too many targets, so only include
+ proper executable targets, with filters to customize.
+ Arguments:
+ target_extras: Regular expression to always add, matching any target.
+ executable_target_pattern: Regular expression limiting executable targets.
+ spec: Specifications for target.
+ """
+ target_name = spec.get('target_name')
+ # Always include targets matching target_extras.
+ if target_extras is not None and re.search(target_extras, target_name):
+ return True
+
+ # Otherwise just show executable targets and xc_tests.
+ if (int(spec.get('mac_xctest_bundle', 0)) != 0 or
+ (spec.get('type', '') == 'executable' and
+ spec.get('product_extension', '') != 'bundle')):
+
+ # If there is a filter and the target does not match, exclude the target.
+ if executable_target_pattern is not None:
+ if not re.search(executable_target_pattern, target_name):
+ return False
+ return True
+ return False
+
+def CreateWrapper(target_list, target_dicts, data, params):
+ """Initialize targets for the ninja wrapper.
+
+ This sets up the necessary variables in the targets to generate Xcode projects
+ that use ninja as an external builder.
+ Arguments:
+ target_list: List of target pairs: 'base/base.gyp:base'.
+ target_dicts: Dict of target properties keyed on target pair.
+ data: Dict of flattened build files keyed on gyp path.
+ params: Dict of global options for gyp.
+ """
+ orig_gyp = params['build_files'][0]
+ for gyp_name, gyp_dict in data.items():
+ if gyp_name == orig_gyp:
+ depth = gyp_dict['_DEPTH']
+
+ # Check for custom main gyp name, otherwise use the default CHROMIUM_GYP_FILE
+ # and prepend .ninja before the .gyp extension.
+ generator_flags = params.get('generator_flags', {})
+ main_gyp = generator_flags.get('xcode_ninja_main_gyp', None)
+ if main_gyp is None:
+ (build_file_root, build_file_ext) = os.path.splitext(orig_gyp)
+ main_gyp = build_file_root + ".ninja" + build_file_ext
+
+ # Create new |target_list|, |target_dicts| and |data| data structures.
+ new_target_list = []
+ new_target_dicts = {}
+ new_data = {}
+
+ # Set base keys needed for |data|.
+ new_data[main_gyp] = {}
+ new_data[main_gyp]['included_files'] = []
+ new_data[main_gyp]['targets'] = []
+ new_data[main_gyp]['xcode_settings'] = \
+ data[orig_gyp].get('xcode_settings', {})
+
+ # Normally the xcode-ninja generator includes only valid executable targets.
+ # If |xcode_ninja_executable_target_pattern| is set, that list is reduced to
+ # executable targets that match the pattern. (Default all)
+ executable_target_pattern = \
+ generator_flags.get('xcode_ninja_executable_target_pattern', None)
+
+ # For including other non-executable targets, add the matching target name
+ # to the |xcode_ninja_target_pattern| regular expression. (Default none)
+ target_extras = generator_flags.get('xcode_ninja_target_pattern', None)
+
+ for old_qualified_target in target_list:
+ spec = target_dicts[old_qualified_target]
+ if IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
+ # Add to new_target_list.
+ target_name = spec.get('target_name')
+ new_target_name = '%s:%s#target' % (main_gyp, target_name)
+ new_target_list.append(new_target_name)
+
+ # Add to new_target_dicts.
+ new_target_dicts[new_target_name] = _TargetFromSpec(spec, params)
+
+ # Add to new_data.
+ for old_target in data[old_qualified_target.split(':')[0]]['targets']:
+ if old_target['target_name'] == target_name:
+ new_data_target = {}
+ new_data_target['target_name'] = old_target['target_name']
+ new_data_target['toolset'] = old_target['toolset']
+ new_data[main_gyp]['targets'].append(new_data_target)
+
+ # Create sources target.
+ sources_target_name = 'sources_for_indexing'
+ sources_target = _TargetFromSpec(
+ { 'target_name' : sources_target_name,
+ 'toolset': 'target',
+ 'default_configuration': 'Default',
+ 'mac_bundle': '0',
+ 'type': 'executable'
+ }, None)
+
+ # Tell Xcode to look everywhere for headers.
+ sources_target['configurations'] = {'Default': { 'include_dirs': [ depth ] } }
+
+ # Put excluded files into the sources target so they can be opened in Xcode.
+ skip_excluded_files = \
+ not generator_flags.get('xcode_ninja_list_excluded_files', True)
+
+ sources = []
+ for target, target_dict in target_dicts.items():
+ base = os.path.dirname(target)
+ files = target_dict.get('sources', []) + \
+ target_dict.get('mac_bundle_resources', [])
+
+ if not skip_excluded_files:
+ files.extend(target_dict.get('sources_excluded', []) +
+ target_dict.get('mac_bundle_resources_excluded', []))
+
+ for action in target_dict.get('actions', []):
+ files.extend(action.get('inputs', []))
+
+ if not skip_excluded_files:
+ files.extend(action.get('inputs_excluded', []))
+
+ # Remove files starting with $. These are mostly intermediate files for the
+ # build system.
+ files = [ file for file in files if not file.startswith('$')]
+
+ # Make sources relative to root build file.
+ relative_path = os.path.dirname(main_gyp)
+ sources += [ os.path.relpath(os.path.join(base, file), relative_path)
+ for file in files ]
+
+ sources_target['sources'] = sorted(set(sources))
+
+ # Put sources_to_index in it's own gyp.
+ sources_gyp = \
+ os.path.join(os.path.dirname(main_gyp), sources_target_name + ".gyp")
+ fully_qualified_target_name = \
+ '%s:%s#target' % (sources_gyp, sources_target_name)
+
+ # Add to new_target_list, new_target_dicts and new_data.
+ new_target_list.append(fully_qualified_target_name)
+ new_target_dicts[fully_qualified_target_name] = sources_target
+ new_data_target = {}
+ new_data_target['target_name'] = sources_target['target_name']
+ new_data_target['_DEPTH'] = depth
+ new_data_target['toolset'] = "target"
+ new_data[sources_gyp] = {}
+ new_data[sources_gyp]['targets'] = []
+ new_data[sources_gyp]['included_files'] = []
+ new_data[sources_gyp]['xcode_settings'] = \
+ data[orig_gyp].get('xcode_settings', {})
+ new_data[sources_gyp]['targets'].append(new_data_target)
+
+ # Write workspace to file.
+ _WriteWorkspace(main_gyp, sources_gyp, params)
+ return (new_target_list, new_target_dicts, new_data)
diff --git a/third_party/python/gyp/pylib/gyp/xcodeproj_file.py b/third_party/python/gyp/pylib/gyp/xcodeproj_file.py
new file mode 100644
index 0000000000..19edcb07fb
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/xcodeproj_file.py
@@ -0,0 +1,2995 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Xcode project file generator.
+
+This module is both an Xcode project file generator and a documentation of the
+Xcode project file format. Knowledge of the project file format was gained
+based on extensive experience with Xcode, and by making changes to projects in
+Xcode.app and observing the resultant changes in the associated project files.
+
+XCODE PROJECT FILES
+
+The generator targets the file format as written by Xcode 3.2 (specifically,
+3.2.6), but past experience has taught that the format has not changed
+significantly in the past several years, and future versions of Xcode are able
+to read older project files.
+
+Xcode project files are "bundled": the project "file" from an end-user's
+perspective is actually a directory with an ".xcodeproj" extension. The
+project file from this module's perspective is actually a file inside this
+directory, always named "project.pbxproj". This file contains a complete
+description of the project and is all that is needed to use the xcodeproj.
+Other files contained in the xcodeproj directory are simply used to store
+per-user settings, such as the state of various UI elements in the Xcode
+application.
+
+The project.pbxproj file is a property list, stored in a format almost
+identical to the NeXTstep property list format. The file is able to carry
+Unicode data, and is encoded in UTF-8. The root element in the property list
+is a dictionary that contains several properties of minimal interest, and two
+properties of immense interest. The most important property is a dictionary
+named "objects". The entire structure of the project is represented by the
+children of this property. The objects dictionary is keyed by unique 96-bit
+values represented by 24 uppercase hexadecimal characters. Each value in the
+objects dictionary is itself a dictionary, describing an individual object.
+
+Each object in the dictionary is a member of a class, which is identified by
+the "isa" property of each object. A variety of classes are represented in a
+project file. Objects can refer to other objects by ID, using the 24-character
+hexadecimal object key. A project's objects form a tree, with a root object
+of class PBXProject at the root. As an example, the PBXProject object serves
+as parent to an XCConfigurationList object defining the build configurations
+used in the project, a PBXGroup object serving as a container for all files
+referenced in the project, and a list of target objects, each of which defines
+a target in the project. There are several different types of target object,
+such as PBXNativeTarget and PBXAggregateTarget. In this module, this
+relationship is expressed by having each target type derive from an abstract
+base named XCTarget.
+
+The project.pbxproj file's root dictionary also contains a property, sibling to
+the "objects" dictionary, named "rootObject". The value of rootObject is a
+24-character object key referring to the root PBXProject object in the
+objects dictionary.
+
+In Xcode, every file used as input to a target or produced as a final product
+of a target must appear somewhere in the hierarchy rooted at the PBXGroup
+object referenced by the PBXProject's mainGroup property. A PBXGroup is
+generally represented as a folder in the Xcode application. PBXGroups can
+contain other PBXGroups as well as PBXFileReferences, which are pointers to
+actual files.
+
+Each XCTarget contains a list of build phases, represented in this module by
+the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations
+are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the
+"Compile Sources" and "Link Binary With Libraries" phases displayed in the
+Xcode application. Files used as input to these phases (for example, source
+files in the former case and libraries and frameworks in the latter) are
+represented by PBXBuildFile objects, referenced by elements of "files" lists
+in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile
+object as a "weak" reference: it does not "own" the PBXBuildFile, which is
+owned by the root object's mainGroup or a descendant group. In most cases, the
+layer of indirection between an XCBuildPhase and a PBXFileReference via a
+PBXBuildFile appears extraneous, but there's actually one reason for this:
+file-specific compiler flags are added to the PBXBuildFile object so as to
+allow a single file to be a member of multiple targets while having distinct
+compiler flags for each. These flags can be modified in the Xcode applciation
+in the "Build" tab of a File Info window.
+
+When a project is open in the Xcode application, Xcode will rewrite it. As
+such, this module is careful to adhere to the formatting used by Xcode, to
+avoid insignificant changes appearing in the file when it is used in the
+Xcode application. This will keep version control repositories happy, and
+makes it possible to compare a project file used in Xcode to one generated by
+this module to determine if any significant changes were made in the
+application.
+
+Xcode has its own way of assigning 24-character identifiers to each object,
+which is not duplicated here. Because the identifier only is only generated
+once, when an object is created, and is then left unchanged, there is no need
+to attempt to duplicate Xcode's behavior in this area. The generator is free
+to select any identifier, even at random, to refer to the objects it creates,
+and Xcode will retain those identifiers and use them when subsequently
+rewriting the project file. However, the generator would choose new random
+identifiers each time the project files are generated, leading to difficulties
+comparing "used" project files to "pristine" ones produced by this module,
+and causing the appearance of changes as every object identifier is changed
+when updated projects are checked in to a version control repository. To
+mitigate this problem, this module chooses identifiers in a more deterministic
+way, by hashing a description of each object as well as its parent and ancestor
+objects. This strategy should result in minimal "shift" in IDs as successive
+generations of project files are produced.
+
+THIS MODULE
+
+This module introduces several classes, all derived from the XCObject class.
+Nearly all of the "brains" are built into the XCObject class, which understands
+how to create and modify objects, maintain the proper tree structure, compute
+identifiers, and print objects. For the most part, classes derived from
+XCObject need only provide a _schema class object, a dictionary that
+expresses what properties objects of the class may contain.
+
+Given this structure, it's possible to build a minimal project file by creating
+objects of the appropriate types and making the proper connections:
+
+ config_list = XCConfigurationList()
+ group = PBXGroup()
+ project = PBXProject({'buildConfigurationList': config_list,
+ 'mainGroup': group})
+
+With the project object set up, it can be added to an XCProjectFile object.
+XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject
+subclass that does not actually correspond to a class type found in a project
+file. Rather, it is used to represent the project file's root dictionary.
+Printing an XCProjectFile will print the entire project file, including the
+full "objects" dictionary.
+
+ project_file = XCProjectFile({'rootObject': project})
+ project_file.ComputeIDs()
+ project_file.Print()
+
+Xcode project files are always encoded in UTF-8. This module will accept
+strings of either the str class or the unicode class. Strings of class str
+are assumed to already be encoded in UTF-8. Obviously, if you're just using
+ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset.
+Strings of class unicode are handled properly and encoded in UTF-8 when
+a project file is output.
+"""
+
+import functools
+import gyp.common
+import posixpath
+import re
+import struct
+import sys
+
+# hashlib is supplied as of Python 2.5 as the replacement interface for sha
+# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if
+# available, avoiding a deprecation warning under 2.6. Import sha otherwise,
+# preserving 2.4 compatibility.
+try:
+ import hashlib
+ _new_sha1 = hashlib.sha1
+except ImportError:
+ import sha
+ _new_sha1 = sha.new
+
+try:
+ # basestring was removed in python3.
+ basestring
+except NameError:
+ basestring = str
+
+try:
+ # cmp was removed in python3.
+ cmp
+except NameError:
+ def cmp(a, b):
+ return (a > b) - (a < b)
+
+# See XCObject._EncodeString. This pattern is used to determine when a string
+# can be printed unquoted. Strings that match this pattern may be printed
+# unquoted. Strings that do not match must be quoted and may be further
+# transformed to be properly encoded. Note that this expression matches the
+# characters listed with "+", for 1 or more occurrences: if a string is empty,
+# it must not match this pattern, because it needs to be encoded as "".
+_unquoted = re.compile('^[A-Za-z0-9$./_]+$')
+
+# Strings that match this pattern are quoted regardless of what _unquoted says.
+# Oddly, Xcode will quote any string with a run of three or more underscores.
+_quoted = re.compile('___')
+
+# This pattern should match any character that needs to be escaped by
+# XCObject._EncodeString. See that function.
+_escaped = re.compile('[\\\\"]|[\x00-\x1f]')
+
+
+# Used by SourceTreeAndPathFromPath
+_path_leading_variable = re.compile(r'^\$\((.*?)\)(/(.*))?$')
+
+def SourceTreeAndPathFromPath(input_path):
+ """Given input_path, returns a tuple with sourceTree and path values.
+
+ Examples:
+ input_path (source_tree, output_path)
+ '$(VAR)/path' ('VAR', 'path')
+ '$(VAR)' ('VAR', None)
+ 'path' (None, 'path')
+ """
+
+ source_group_match = _path_leading_variable.match(input_path)
+ if source_group_match:
+ source_tree = source_group_match.group(1)
+ output_path = source_group_match.group(3) # This may be None.
+ else:
+ source_tree = None
+ output_path = input_path
+
+ return (source_tree, output_path)
+
+def ConvertVariablesToShellSyntax(input_string):
+ return re.sub(r'\$\((.*?)\)', '${\\1}', input_string)
+
+class XCObject(object):
+ """The abstract base of all class types used in Xcode project files.
+
+ Class variables:
+ _schema: A dictionary defining the properties of this class. The keys to
+ _schema are string property keys as used in project files. Values
+ are a list of four or five elements:
+ [ is_list, property_type, is_strong, is_required, default ]
+ is_list: True if the property described is a list, as opposed
+ to a single element.
+ property_type: The type to use as the value of the property,
+ or if is_list is True, the type to use for each
+ element of the value's list. property_type must
+ be an XCObject subclass, or one of the built-in
+ types str, int, or dict.
+ is_strong: If property_type is an XCObject subclass, is_strong
+ is True to assert that this class "owns," or serves
+ as parent, to the property value (or, if is_list is
+ True, values). is_strong must be False if
+ property_type is not an XCObject subclass.
+ is_required: True if the property is required for the class.
+ Note that is_required being True does not preclude
+ an empty string ("", in the case of property_type
+ str) or list ([], in the case of is_list True) from
+ being set for the property.
+ default: Optional. If is_requried is True, default may be set
+ to provide a default value for objects that do not supply
+ their own value. If is_required is True and default
+ is not provided, users of the class must supply their own
+ value for the property.
+ Note that although the values of the array are expressed in
+ boolean terms, subclasses provide values as integers to conserve
+ horizontal space.
+ _should_print_single_line: False in XCObject. Subclasses whose objects
+ should be written to the project file in the
+ alternate single-line format, such as
+ PBXFileReference and PBXBuildFile, should
+ set this to True.
+ _encode_transforms: Used by _EncodeString to encode unprintable characters.
+ The index into this list is the ordinal of the
+ character to transform; each value is a string
+ used to represent the character in the output. XCObject
+ provides an _encode_transforms list suitable for most
+ XCObject subclasses.
+ _alternate_encode_transforms: Provided for subclasses that wish to use
+ the alternate encoding rules. Xcode seems
+ to use these rules when printing objects in
+ single-line format. Subclasses that desire
+ this behavior should set _encode_transforms
+ to _alternate_encode_transforms.
+ _hashables: A list of XCObject subclasses that can be hashed by ComputeIDs
+ to construct this object's ID. Most classes that need custom
+ hashing behavior should do it by overriding Hashables,
+ but in some cases an object's parent may wish to push a
+ hashable value into its child, and it can do so by appending
+ to _hashables.
+ Attributes:
+ id: The object's identifier, a 24-character uppercase hexadecimal string.
+ Usually, objects being created should not set id until the entire
+ project file structure is built. At that point, UpdateIDs() should
+ be called on the root object to assign deterministic values for id to
+ each object in the tree.
+ parent: The object's parent. This is set by a parent XCObject when a child
+ object is added to it.
+ _properties: The object's property dictionary. An object's properties are
+ described by its class' _schema variable.
+ """
+
+ _schema = {}
+ _should_print_single_line = False
+
+ # See _EncodeString.
+ _encode_transforms = []
+ i = 0
+ while i < ord(' '):
+ _encode_transforms.append('\\U%04x' % i)
+ i = i + 1
+ _encode_transforms[7] = '\\a'
+ _encode_transforms[8] = '\\b'
+ _encode_transforms[9] = '\\t'
+ _encode_transforms[10] = '\\n'
+ _encode_transforms[11] = '\\v'
+ _encode_transforms[12] = '\\f'
+ _encode_transforms[13] = '\\n'
+
+ _alternate_encode_transforms = list(_encode_transforms)
+ _alternate_encode_transforms[9] = chr(9)
+ _alternate_encode_transforms[10] = chr(10)
+ _alternate_encode_transforms[11] = chr(11)
+
+ def __init__(self, properties=None, id=None, parent=None):
+ self.id = id
+ self.parent = parent
+ self._properties = {}
+ self._hashables = []
+ self._SetDefaultsFromSchema()
+ self.UpdateProperties(properties)
+
+ def __repr__(self):
+ try:
+ name = self.Name()
+ except NotImplementedError:
+ return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
+ return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
+
+ def Copy(self):
+ """Make a copy of this object.
+
+ The new object will have its own copy of lists and dicts. Any XCObject
+ objects owned by this object (marked "strong") will be copied in the
+ new object, even those found in lists. If this object has any weak
+ references to other XCObjects, the same references are added to the new
+ object without making a copy.
+ """
+
+ that = self.__class__(id=self.id, parent=self.parent)
+ for key, value in self._properties.items():
+ is_strong = self._schema[key][2]
+
+ if isinstance(value, XCObject):
+ if is_strong:
+ new_value = value.Copy()
+ new_value.parent = that
+ that._properties[key] = new_value
+ else:
+ that._properties[key] = value
+ elif isinstance(value, basestring) or isinstance(value, int):
+ that._properties[key] = value
+ elif isinstance(value, list):
+ if is_strong:
+ # If is_strong is True, each element is an XCObject, so it's safe to
+ # call Copy.
+ that._properties[key] = []
+ for item in value:
+ new_item = item.Copy()
+ new_item.parent = that
+ that._properties[key].append(new_item)
+ else:
+ that._properties[key] = value[:]
+ elif isinstance(value, dict):
+ # dicts are never strong.
+ if is_strong:
+ raise TypeError('Strong dict for key ' + key + ' in ' + \
+ self.__class__.__name__)
+ else:
+ that._properties[key] = value.copy()
+ else:
+ raise TypeError('Unexpected type ' + value.__class__.__name__ + \
+ ' for key ' + key + ' in ' + self.__class__.__name__)
+
+ return that
+
+ def Name(self):
+ """Return the name corresponding to an object.
+
+ Not all objects necessarily need to be nameable, and not all that do have
+ a "name" property. Override as needed.
+ """
+
+ # If the schema indicates that "name" is required, try to access the
+ # property even if it doesn't exist. This will result in a KeyError
+ # being raised for the property that should be present, which seems more
+ # appropriate than NotImplementedError in this case.
+ if 'name' in self._properties or \
+ ('name' in self._schema and self._schema['name'][3]):
+ return self._properties['name']
+
+ raise NotImplementedError(self.__class__.__name__ + ' must implement Name')
+
+ def Comment(self):
+ """Return a comment string for the object.
+
+ Most objects just use their name as the comment, but PBXProject uses
+ different values.
+
+ The returned comment is not escaped and does not have any comment marker
+ strings applied to it.
+ """
+
+ return self.Name()
+
+ def Hashables(self):
+ hashables = [self.__class__.__name__]
+
+ name = self.Name()
+ if name != None:
+ hashables.append(name)
+
+ hashables.extend(self._hashables)
+
+ return hashables
+
+ def HashablesForChild(self):
+ return None
+
+ def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None):
+ """Set "id" properties deterministically.
+
+ An object's "id" property is set based on a hash of its class type and
+ name, as well as the class type and name of all ancestor objects. As
+ such, it is only advisable to call ComputeIDs once an entire project file
+ tree is built.
+
+ If recursive is True, recurse into all descendant objects and update their
+ hashes.
+
+ If overwrite is True, any existing value set in the "id" property will be
+ replaced.
+ """
+
+ def _HashUpdate(hash, data):
+ """Update hash with data's length and contents.
+
+ If the hash were updated only with the value of data, it would be
+ possible for clowns to induce collisions by manipulating the names of
+ their objects. By adding the length, it's exceedingly less likely that
+ ID collisions will be encountered, intentionally or not.
+ """
+
+ hash.update(struct.pack('>i', len(data)))
+ hash.update(data.encode('utf-8'))
+
+ if seed_hash is None:
+ seed_hash = _new_sha1()
+
+ hash = seed_hash.copy()
+
+ hashables = self.Hashables()
+ assert len(hashables) > 0
+ for hashable in hashables:
+ _HashUpdate(hash, hashable)
+
+ if recursive:
+ hashables_for_child = self.HashablesForChild()
+ if hashables_for_child is None:
+ child_hash = hash
+ else:
+ assert len(hashables_for_child) > 0
+ child_hash = seed_hash.copy()
+ for hashable in hashables_for_child:
+ _HashUpdate(child_hash, hashable)
+
+ for child in self.Children():
+ child.ComputeIDs(recursive, overwrite, child_hash)
+
+ if overwrite or self.id is None:
+ # Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is
+ # is 160 bits. Instead of throwing out 64 bits of the digest, xor them
+ # into the portion that gets used.
+ assert hash.digest_size % 4 == 0
+ digest_int_count = hash.digest_size // 4
+ digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest())
+ id_ints = [0, 0, 0]
+ for index in range(0, digest_int_count):
+ id_ints[index % 3] ^= digest_ints[index]
+ self.id = '%08X%08X%08X' % tuple(id_ints)
+
+ def EnsureNoIDCollisions(self):
+ """Verifies that no two objects have the same ID. Checks all descendants.
+ """
+
+ ids = {}
+ descendants = self.Descendants()
+ for descendant in descendants:
+ if descendant.id in ids:
+ other = ids[descendant.id]
+ raise KeyError(
+ 'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \
+ (descendant.id, str(descendant._properties),
+ str(other._properties), self._properties['rootObject'].Name()))
+ ids[descendant.id] = descendant
+
+ def Children(self):
+ """Returns a list of all of this object's owned (strong) children."""
+
+ children = []
+ for property, attributes in self._schema.items():
+ (is_list, property_type, is_strong) = attributes[0:3]
+ if is_strong and property in self._properties:
+ if not is_list:
+ children.append(self._properties[property])
+ else:
+ children.extend(self._properties[property])
+ return children
+
+ def Descendants(self):
+ """Returns a list of all of this object's descendants, including this
+ object.
+ """
+
+ children = self.Children()
+ descendants = [self]
+ for child in children:
+ descendants.extend(child.Descendants())
+ return descendants
+
+ def PBXProjectAncestor(self):
+ # The base case for recursion is defined at PBXProject.PBXProjectAncestor.
+ if self.parent:
+ return self.parent.PBXProjectAncestor()
+ return None
+
+ def _EncodeComment(self, comment):
+ """Encodes a comment to be placed in the project file output, mimicing
+ Xcode behavior.
+ """
+
+ # This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If
+ # the string already contains a "*/", it is turned into "(*)/". This keeps
+ # the file writer from outputting something that would be treated as the
+ # end of a comment in the middle of something intended to be entirely a
+ # comment.
+
+ return '/* ' + comment.replace('*/', '(*)/') + ' */'
+
+ def _EncodeTransform(self, match):
+ # This function works closely with _EncodeString. It will only be called
+ # by re.sub with match.group(0) containing a character matched by the
+ # the _escaped expression.
+ char = match.group(0)
+
+ # Backslashes (\) and quotation marks (") are always replaced with a
+ # backslash-escaped version of the same. Everything else gets its
+ # replacement from the class' _encode_transforms array.
+ if char == '\\':
+ return '\\\\'
+ if char == '"':
+ return '\\"'
+ return self._encode_transforms[ord(char)]
+
+ def _EncodeString(self, value):
+ """Encodes a string to be placed in the project file output, mimicing
+ Xcode behavior.
+ """
+
+ # Use quotation marks when any character outside of the range A-Z, a-z, 0-9,
+ # $ (dollar sign), . (period), and _ (underscore) is present. Also use
+ # quotation marks to represent empty strings.
+ #
+ # Escape " (double-quote) and \ (backslash) by preceding them with a
+ # backslash.
+ #
+ # Some characters below the printable ASCII range are encoded specially:
+ # 7 ^G BEL is encoded as "\a"
+ # 8 ^H BS is encoded as "\b"
+ # 11 ^K VT is encoded as "\v"
+ # 12 ^L NP is encoded as "\f"
+ # 127 ^? DEL is passed through as-is without escaping
+ # - In PBXFileReference and PBXBuildFile objects:
+ # 9 ^I HT is passed through as-is without escaping
+ # 10 ^J NL is passed through as-is without escaping
+ # 13 ^M CR is passed through as-is without escaping
+ # - In other objects:
+ # 9 ^I HT is encoded as "\t"
+ # 10 ^J NL is encoded as "\n"
+ # 13 ^M CR is encoded as "\n" rendering it indistinguishable from
+ # 10 ^J NL
+ # All other characters within the ASCII control character range (0 through
+ # 31 inclusive) are encoded as "\U001f" referring to the Unicode code point
+ # in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e".
+ # Characters above the ASCII range are passed through to the output encoded
+ # as UTF-8 without any escaping. These mappings are contained in the
+ # class' _encode_transforms list.
+
+ if _unquoted.search(value) and not _quoted.search(value):
+ return value
+
+ return '"' + _escaped.sub(self._EncodeTransform, value) + '"'
+
+ def _XCPrint(self, file, tabs, line):
+ file.write('\t' * tabs + line)
+
+ def _XCPrintableValue(self, tabs, value, flatten_list=False):
+ """Returns a representation of value that may be printed in a project file,
+ mimicing Xcode's behavior.
+
+ _XCPrintableValue can handle str and int values, XCObjects (which are
+ made printable by returning their id property), and list and dict objects
+ composed of any of the above types. When printing a list or dict, and
+ _should_print_single_line is False, the tabs parameter is used to determine
+ how much to indent the lines corresponding to the items in the list or
+ dict.
+
+ If flatten_list is True, single-element lists will be transformed into
+ strings.
+ """
+
+ printable = ''
+ comment = None
+
+ if self._should_print_single_line:
+ sep = ' '
+ element_tabs = ''
+ end_tabs = ''
+ else:
+ sep = '\n'
+ element_tabs = '\t' * (tabs + 1)
+ end_tabs = '\t' * tabs
+
+ if isinstance(value, XCObject):
+ printable += value.id
+ comment = value.Comment()
+ elif isinstance(value, str):
+ printable += self._EncodeString(value)
+ # A python3 compatible way of saying isinstance(value, unicode).
+ # basestring is str in python3 so this is equivalent to the above
+ # isinstance. Thus if it failed above it will fail here.
+ # In python2 we test against str and unicode at this point. str has already
+ # failed in the above isinstance so we test against unicode.
+ elif isinstance(value, basestring):
+ printable += self._EncodeString(value.encode('utf-8'))
+ elif isinstance(value, int):
+ printable += str(value)
+ elif isinstance(value, list):
+ if flatten_list and len(value) <= 1:
+ if len(value) == 0:
+ printable += self._EncodeString('')
+ else:
+ printable += self._EncodeString(value[0])
+ else:
+ printable = '(' + sep
+ for item in value:
+ printable += element_tabs + \
+ self._XCPrintableValue(tabs + 1, item, flatten_list) + \
+ ',' + sep
+ printable += end_tabs + ')'
+ elif isinstance(value, dict):
+ printable = '{' + sep
+ for item_key, item_value in sorted(value.items()):
+ printable += element_tabs + \
+ self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \
+ self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \
+ sep
+ printable += end_tabs + '}'
+ else:
+ raise TypeError("Can't make " + value.__class__.__name__ + ' printable')
+
+ if comment != None:
+ printable += ' ' + self._EncodeComment(comment)
+
+ return printable
+
+ def _XCKVPrint(self, file, tabs, key, value):
+ """Prints a key and value, members of an XCObject's _properties dictionary,
+ to file.
+
+ tabs is an int identifying the indentation level. If the class'
+ _should_print_single_line variable is True, tabs is ignored and the
+ key-value pair will be followed by a space insead of a newline.
+ """
+
+ if self._should_print_single_line:
+ printable = ''
+ after_kv = ' '
+ else:
+ printable = '\t' * tabs
+ after_kv = '\n'
+
+ # Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy
+ # objects without comments. Sometimes it prints them with comments, but
+ # the majority of the time, it doesn't. To avoid unnecessary changes to
+ # the project file after Xcode opens it, don't write comments for
+ # remoteGlobalIDString. This is a sucky hack and it would certainly be
+ # cleaner to extend the schema to indicate whether or not a comment should
+ # be printed, but since this is the only case where the problem occurs and
+ # Xcode itself can't seem to make up its mind, the hack will suffice.
+ #
+ # Also see PBXContainerItemProxy._schema['remoteGlobalIDString'].
+ if key == 'remoteGlobalIDString' and isinstance(self,
+ PBXContainerItemProxy):
+ value_to_print = value.id
+ else:
+ value_to_print = value
+
+ # PBXBuildFile's settings property is represented in the output as a dict,
+ # but a hack here has it represented as a string. Arrange to strip off the
+ # quotes so that it shows up in the output as expected.
+ if key == 'settings' and isinstance(self, PBXBuildFile):
+ strip_value_quotes = True
+ else:
+ strip_value_quotes = False
+
+ # In another one-off, let's set flatten_list on buildSettings properties
+ # of XCBuildConfiguration objects, because that's how Xcode treats them.
+ if key == 'buildSettings' and isinstance(self, XCBuildConfiguration):
+ flatten_list = True
+ else:
+ flatten_list = False
+
+ try:
+ printable_key = self._XCPrintableValue(tabs, key, flatten_list)
+ printable_value = self._XCPrintableValue(tabs, value_to_print,
+ flatten_list)
+ if strip_value_quotes and len(printable_value) > 1 and \
+ printable_value[0] == '"' and printable_value[-1] == '"':
+ printable_value = printable_value[1:-1]
+ printable += printable_key + ' = ' + printable_value + ';' + after_kv
+ except TypeError as e:
+ gyp.common.ExceptionAppend(e,
+ 'while printing key "%s"' % key)
+ raise
+
+ self._XCPrint(file, 0, printable)
+
+ def Print(self, file=sys.stdout):
+ """Prints a reprentation of this object to file, adhering to Xcode output
+ formatting.
+ """
+
+ self.VerifyHasRequiredProperties()
+
+ if self._should_print_single_line:
+ # When printing an object in a single line, Xcode doesn't put any space
+ # between the beginning of a dictionary (or presumably a list) and the
+ # first contained item, so you wind up with snippets like
+ # ...CDEF = {isa = PBXFileReference; fileRef = 0123...
+ # If it were me, I would have put a space in there after the opening
+ # curly, but I guess this is just another one of those inconsistencies
+ # between how Xcode prints PBXFileReference and PBXBuildFile objects as
+ # compared to other objects. Mimic Xcode's behavior here by using an
+ # empty string for sep.
+ sep = ''
+ end_tabs = 0
+ else:
+ sep = '\n'
+ end_tabs = 2
+
+ # Start the object. For example, '\t\tPBXProject = {\n'.
+ self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep)
+
+ # "isa" isn't in the _properties dictionary, it's an intrinsic property
+ # of the class which the object belongs to. Xcode always outputs "isa"
+ # as the first element of an object dictionary.
+ self._XCKVPrint(file, 3, 'isa', self.__class__.__name__)
+
+ # The remaining elements of an object dictionary are sorted alphabetically.
+ for property, value in sorted(self._properties.items()):
+ self._XCKVPrint(file, 3, property, value)
+
+ # End the object.
+ self._XCPrint(file, end_tabs, '};\n')
+
+ def UpdateProperties(self, properties, do_copy=False):
+ """Merge the supplied properties into the _properties dictionary.
+
+ The input properties must adhere to the class schema or a KeyError or
+ TypeError exception will be raised. If adding an object of an XCObject
+ subclass and the schema indicates a strong relationship, the object's
+ parent will be set to this object.
+
+ If do_copy is True, then lists, dicts, strong-owned XCObjects, and
+ strong-owned XCObjects in lists will be copied instead of having their
+ references added.
+ """
+
+ if properties is None:
+ return
+
+ for property, value in properties.items():
+ # Make sure the property is in the schema.
+ if not property in self._schema:
+ raise KeyError(property + ' not in ' + self.__class__.__name__)
+
+ # Make sure the property conforms to the schema.
+ (is_list, property_type, is_strong) = self._schema[property][0:3]
+ if is_list:
+ if value.__class__ != list:
+ raise TypeError(
+ property + ' of ' + self.__class__.__name__ + \
+ ' must be list, not ' + value.__class__.__name__)
+ for item in value:
+ if not isinstance(item, property_type) and \
+ not (isinstance(item, basestring) and property_type == str):
+ # Accept unicode where str is specified. str is treated as
+ # UTF-8-encoded.
+ raise TypeError(
+ 'item of ' + property + ' of ' + self.__class__.__name__ + \
+ ' must be ' + property_type.__name__ + ', not ' + \
+ item.__class__.__name__)
+ elif not isinstance(value, property_type) and \
+ not (isinstance(value, basestring) and property_type == str):
+ # Accept unicode where str is specified. str is treated as
+ # UTF-8-encoded.
+ raise TypeError(
+ property + ' of ' + self.__class__.__name__ + ' must be ' + \
+ property_type.__name__ + ', not ' + value.__class__.__name__)
+
+ # Checks passed, perform the assignment.
+ if do_copy:
+ if isinstance(value, XCObject):
+ if is_strong:
+ self._properties[property] = value.Copy()
+ else:
+ self._properties[property] = value
+ elif isinstance(value, basestring) or isinstance(value, int):
+ self._properties[property] = value
+ elif isinstance(value, list):
+ if is_strong:
+ # If is_strong is True, each element is an XCObject, so it's safe
+ # to call Copy.
+ self._properties[property] = []
+ for item in value:
+ self._properties[property].append(item.Copy())
+ else:
+ self._properties[property] = value[:]
+ elif isinstance(value, dict):
+ self._properties[property] = value.copy()
+ else:
+ raise TypeError("Don't know how to copy a " + \
+ value.__class__.__name__ + ' object for ' + \
+ property + ' in ' + self.__class__.__name__)
+ else:
+ self._properties[property] = value
+
+ # Set up the child's back-reference to this object. Don't use |value|
+ # any more because it may not be right if do_copy is true.
+ if is_strong:
+ if not is_list:
+ self._properties[property].parent = self
+ else:
+ for item in self._properties[property]:
+ item.parent = self
+
+ def HasProperty(self, key):
+ return key in self._properties
+
+ def GetProperty(self, key):
+ return self._properties[key]
+
+ def SetProperty(self, key, value):
+ self.UpdateProperties({key: value})
+
+ def DelProperty(self, key):
+ if key in self._properties:
+ del self._properties[key]
+
+ def AppendProperty(self, key, value):
+ # TODO(mark): Support ExtendProperty too (and make this call that)?
+
+ # Schema validation.
+ if not key in self._schema:
+ raise KeyError(key + ' not in ' + self.__class__.__name__)
+
+ (is_list, property_type, is_strong) = self._schema[key][0:3]
+ if not is_list:
+ raise TypeError(key + ' of ' + self.__class__.__name__ + ' must be list')
+ if not isinstance(value, property_type):
+ raise TypeError('item of ' + key + ' of ' + self.__class__.__name__ + \
+ ' must be ' + property_type.__name__ + ', not ' + \
+ value.__class__.__name__)
+
+ # If the property doesn't exist yet, create a new empty list to receive the
+ # item.
+ if not key in self._properties:
+ self._properties[key] = []
+
+ # Set up the ownership link.
+ if is_strong:
+ value.parent = self
+
+ # Store the item.
+ self._properties[key].append(value)
+
+ def VerifyHasRequiredProperties(self):
+ """Ensure that all properties identified as required by the schema are
+ set.
+ """
+
+ # TODO(mark): A stronger verification mechanism is needed. Some
+ # subclasses need to perform validation beyond what the schema can enforce.
+ for property, attributes in self._schema.items():
+ (is_list, property_type, is_strong, is_required) = attributes[0:4]
+ if is_required and not property in self._properties:
+ raise KeyError(self.__class__.__name__ + ' requires ' + property)
+
+ def _SetDefaultsFromSchema(self):
+ """Assign object default values according to the schema. This will not
+ overwrite properties that have already been set."""
+
+ defaults = {}
+ for property, attributes in self._schema.items():
+ (is_list, property_type, is_strong, is_required) = attributes[0:4]
+ if is_required and len(attributes) >= 5 and \
+ not property in self._properties:
+ default = attributes[4]
+
+ defaults[property] = default
+
+ if len(defaults) > 0:
+ # Use do_copy=True so that each new object gets its own copy of strong
+ # objects, lists, and dicts.
+ self.UpdateProperties(defaults, do_copy=True)
+
+
+class XCHierarchicalElement(XCObject):
+ """Abstract base for PBXGroup and PBXFileReference. Not represented in a
+ project file."""
+
+ # TODO(mark): Do name and path belong here? Probably so.
+ # If path is set and name is not, name may have a default value. Name will
+ # be set to the basename of path, if the basename of path is different from
+ # the full value of path. If path is already just a leaf name, name will
+ # not be set.
+ _schema = XCObject._schema.copy()
+ _schema.update({
+ 'comments': [0, str, 0, 0],
+ 'fileEncoding': [0, str, 0, 0],
+ 'includeInIndex': [0, int, 0, 0],
+ 'indentWidth': [0, int, 0, 0],
+ 'lineEnding': [0, int, 0, 0],
+ 'sourceTree': [0, str, 0, 1, '<group>'],
+ 'tabWidth': [0, int, 0, 0],
+ 'usesTabs': [0, int, 0, 0],
+ 'wrapsLines': [0, int, 0, 0],
+ })
+
+ def __init__(self, properties=None, id=None, parent=None):
+ # super
+ XCObject.__init__(self, properties, id, parent)
+ if 'path' in self._properties and not 'name' in self._properties:
+ path = self._properties['path']
+ name = posixpath.basename(path)
+ if name != '' and path != name:
+ self.SetProperty('name', name)
+
+ if 'path' in self._properties and \
+ (not 'sourceTree' in self._properties or \
+ self._properties['sourceTree'] == '<group>'):
+ # If the pathname begins with an Xcode variable like "$(SDKROOT)/", take
+ # the variable out and make the path be relative to that variable by
+ # assigning the variable name as the sourceTree.
+ (source_tree, path) = SourceTreeAndPathFromPath(self._properties['path'])
+ if source_tree != None:
+ self._properties['sourceTree'] = source_tree
+ if path != None:
+ self._properties['path'] = path
+ if source_tree != None and path is None and \
+ not 'name' in self._properties:
+ # The path was of the form "$(SDKROOT)" with no path following it.
+ # This object is now relative to that variable, so it has no path
+ # attribute of its own. It does, however, keep a name.
+ del self._properties['path']
+ self._properties['name'] = source_tree
+
+ def Name(self):
+ if 'name' in self._properties:
+ return self._properties['name']
+ elif 'path' in self._properties:
+ return self._properties['path']
+ else:
+ # This happens in the case of the root PBXGroup.
+ return None
+
+ def Hashables(self):
+ """Custom hashables for XCHierarchicalElements.
+
+ XCHierarchicalElements are special. Generally, their hashes shouldn't
+ change if the paths don't change. The normal XCObject implementation of
+ Hashables adds a hashable for each object, which means that if
+ the hierarchical structure changes (possibly due to changes caused when
+ TakeOverOnlyChild runs and encounters slight changes in the hierarchy),
+ the hashes will change. For example, if a project file initially contains
+ a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent
+ a/b. If someone later adds a/f2 to the project file, a/b can no longer be
+ collapsed, and f1 winds up with parent b and grandparent a. That would
+ be sufficient to change f1's hash.
+
+ To counteract this problem, hashables for all XCHierarchicalElements except
+ for the main group (which has neither a name nor a path) are taken to be
+ just the set of path components. Because hashables are inherited from
+ parents, this provides assurance that a/b/f1 has the same set of hashables
+ whether its parent is b or a/b.
+
+ The main group is a special case. As it is permitted to have no name or
+ path, it is permitted to use the standard XCObject hash mechanism. This
+ is not considered a problem because there can be only one main group.
+ """
+
+ if self == self.PBXProjectAncestor()._properties['mainGroup']:
+ # super
+ return XCObject.Hashables(self)
+
+ hashables = []
+
+ # Put the name in first, ensuring that if TakeOverOnlyChild collapses
+ # children into a top-level group like "Source", the name always goes
+ # into the list of hashables without interfering with path components.
+ if 'name' in self._properties:
+ # Make it less likely for people to manipulate hashes by following the
+ # pattern of always pushing an object type value onto the list first.
+ hashables.append(self.__class__.__name__ + '.name')
+ hashables.append(self._properties['name'])
+
+ # NOTE: This still has the problem that if an absolute path is encountered,
+ # including paths with a sourceTree, they'll still inherit their parents'
+ # hashables, even though the paths aren't relative to their parents. This
+ # is not expected to be much of a problem in practice.
+ path = self.PathFromSourceTreeAndPath()
+ if path != None:
+ components = path.split(posixpath.sep)
+ for component in components:
+ hashables.append(self.__class__.__name__ + '.path')
+ hashables.append(component)
+
+ hashables.extend(self._hashables)
+
+ return hashables
+
+ def Compare(self, other):
+ # Allow comparison of these types. PBXGroup has the highest sort rank;
+ # PBXVariantGroup is treated as equal to PBXFileReference.
+ valid_class_types = {
+ PBXFileReference: 'file',
+ PBXGroup: 'group',
+ PBXVariantGroup: 'file',
+ }
+ self_type = valid_class_types[self.__class__]
+ other_type = valid_class_types[other.__class__]
+
+ if self_type == other_type:
+ # If the two objects are of the same sort rank, compare their names.
+ return cmp(self.Name(), other.Name())
+
+ # Otherwise, sort groups before everything else.
+ if self_type == 'group':
+ return -1
+ return 1
+
+ def CompareRootGroup(self, other):
+ # This function should be used only to compare direct children of the
+ # containing PBXProject's mainGroup. These groups should appear in the
+ # listed order.
+ # TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the
+ # generator should have a way of influencing this list rather than having
+ # to hardcode for the generator here.
+ order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products',
+ 'Build']
+
+ # If the groups aren't in the listed order, do a name comparison.
+ # Otherwise, groups in the listed order should come before those that
+ # aren't.
+ self_name = self.Name()
+ other_name = other.Name()
+ self_in = isinstance(self, PBXGroup) and self_name in order
+ other_in = isinstance(self, PBXGroup) and other_name in order
+ if not self_in and not other_in:
+ return self.Compare(other)
+ if self_name in order and not other_name in order:
+ return -1
+ if other_name in order and not self_name in order:
+ return 1
+
+ # If both groups are in the listed order, go by the defined order.
+ self_index = order.index(self_name)
+ other_index = order.index(other_name)
+ if self_index < other_index:
+ return -1
+ if self_index > other_index:
+ return 1
+ return 0
+
+ def PathFromSourceTreeAndPath(self):
+ # Turn the object's sourceTree and path properties into a single flat
+ # string of a form comparable to the path parameter. If there's a
+ # sourceTree property other than "<group>", wrap it in $(...) for the
+ # comparison.
+ components = []
+ if self._properties['sourceTree'] != '<group>':
+ components.append('$(' + self._properties['sourceTree'] + ')')
+ if 'path' in self._properties:
+ components.append(self._properties['path'])
+
+ if len(components) > 0:
+ return posixpath.join(*components)
+
+ return None
+
+ def FullPath(self):
+ # Returns a full path to self relative to the project file, or relative
+ # to some other source tree. Start with self, and walk up the chain of
+ # parents prepending their paths, if any, until no more parents are
+ # available (project-relative path) or until a path relative to some
+ # source tree is found.
+ xche = self
+ path = None
+ while isinstance(xche, XCHierarchicalElement) and \
+ (path is None or \
+ (not path.startswith('/') and not path.startswith('$'))):
+ this_path = xche.PathFromSourceTreeAndPath()
+ if this_path != None and path != None:
+ path = posixpath.join(this_path, path)
+ elif this_path != None:
+ path = this_path
+ xche = xche.parent
+
+ return path
+
+
+class PBXGroup(XCHierarchicalElement):
+ """
+ Attributes:
+ _children_by_path: Maps pathnames of children of this PBXGroup to the
+ actual child XCHierarchicalElement objects.
+ _variant_children_by_name_and_path: Maps (name, path) tuples of
+ PBXVariantGroup children to the actual child PBXVariantGroup objects.
+ """
+
+ _schema = XCHierarchicalElement._schema.copy()
+ _schema.update({
+ 'children': [1, XCHierarchicalElement, 1, 1, []],
+ 'name': [0, str, 0, 0],
+ 'path': [0, str, 0, 0],
+ })
+
+ def __init__(self, properties=None, id=None, parent=None):
+ # super
+ XCHierarchicalElement.__init__(self, properties, id, parent)
+ self._children_by_path = {}
+ self._variant_children_by_name_and_path = {}
+ for child in self._properties.get('children', []):
+ self._AddChildToDicts(child)
+
+ def Hashables(self):
+ # super
+ hashables = XCHierarchicalElement.Hashables(self)
+
+ # It is not sufficient to just rely on name and parent to build a unique
+ # hashable : a node could have two child PBXGroup sharing a common name.
+ # To add entropy the hashable is enhanced with the names of all its
+ # children.
+ for child in self._properties.get('children', []):
+ child_name = child.Name()
+ if child_name != None:
+ hashables.append(child_name)
+
+ return hashables
+
+ def HashablesForChild(self):
+ # To avoid a circular reference the hashables used to compute a child id do
+ # not include the child names.
+ return XCHierarchicalElement.Hashables(self)
+
+ def _AddChildToDicts(self, child):
+ # Sets up this PBXGroup object's dicts to reference the child properly.
+ child_path = child.PathFromSourceTreeAndPath()
+ if child_path:
+ if child_path in self._children_by_path:
+ raise ValueError('Found multiple children with path ' + child_path)
+ self._children_by_path[child_path] = child
+
+ if isinstance(child, PBXVariantGroup):
+ child_name = child._properties.get('name', None)
+ key = (child_name, child_path)
+ if key in self._variant_children_by_name_and_path:
+ raise ValueError('Found multiple PBXVariantGroup children with ' + \
+ 'name ' + str(child_name) + ' and path ' + \
+ str(child_path))
+ self._variant_children_by_name_and_path[key] = child
+
+ def AppendChild(self, child):
+ # Callers should use this instead of calling
+ # AppendProperty('children', child) directly because this function
+ # maintains the group's dicts.
+ self.AppendProperty('children', child)
+ self._AddChildToDicts(child)
+
+ def GetChildByName(self, name):
+ # This is not currently optimized with a dict as GetChildByPath is because
+ # it has few callers. Most callers probably want GetChildByPath. This
+ # function is only useful to get children that have names but no paths,
+ # which is rare. The children of the main group ("Source", "Products",
+ # etc.) is pretty much the only case where this likely to come up.
+ #
+ # TODO(mark): Maybe this should raise an error if more than one child is
+ # present with the same name.
+ if not 'children' in self._properties:
+ return None
+
+ for child in self._properties['children']:
+ if child.Name() == name:
+ return child
+
+ return None
+
+ def GetChildByPath(self, path):
+ if not path:
+ return None
+
+ if path in self._children_by_path:
+ return self._children_by_path[path]
+
+ return None
+
+ def GetChildByRemoteObject(self, remote_object):
+ # This method is a little bit esoteric. Given a remote_object, which
+ # should be a PBXFileReference in another project file, this method will
+ # return this group's PBXReferenceProxy object serving as a local proxy
+ # for the remote PBXFileReference.
+ #
+ # This function might benefit from a dict optimization as GetChildByPath
+ # for some workloads, but profiling shows that it's not currently a
+ # problem.
+ if not 'children' in self._properties:
+ return None
+
+ for child in self._properties['children']:
+ if not isinstance(child, PBXReferenceProxy):
+ continue
+
+ container_proxy = child._properties['remoteRef']
+ if container_proxy._properties['remoteGlobalIDString'] == remote_object:
+ return child
+
+ return None
+
+ def AddOrGetFileByPath(self, path, hierarchical):
+ """Returns an existing or new file reference corresponding to path.
+
+ If hierarchical is True, this method will create or use the necessary
+ hierarchical group structure corresponding to path. Otherwise, it will
+ look in and create an item in the current group only.
+
+ If an existing matching reference is found, it is returned, otherwise, a
+ new one will be created, added to the correct group, and returned.
+
+ If path identifies a directory by virtue of carrying a trailing slash,
+ this method returns a PBXFileReference of "folder" type. If path
+ identifies a variant, by virtue of it identifying a file inside a directory
+ with an ".lproj" extension, this method returns a PBXVariantGroup
+ containing the variant named by path, and possibly other variants. For
+ all other paths, a "normal" PBXFileReference will be returned.
+ """
+
+ # Adding or getting a directory? Directories end with a trailing slash.
+ is_dir = False
+ if path.endswith('/'):
+ is_dir = True
+ path = posixpath.normpath(path)
+ if is_dir:
+ path = path + '/'
+
+ # Adding or getting a variant? Variants are files inside directories
+ # with an ".lproj" extension. Xcode uses variants for localization. For
+ # a variant path/to/Language.lproj/MainMenu.nib, put a variant group named
+ # MainMenu.nib inside path/to, and give it a variant named Language. In
+ # this example, grandparent would be set to path/to and parent_root would
+ # be set to Language.
+ variant_name = None
+ parent = posixpath.dirname(path)
+ grandparent = posixpath.dirname(parent)
+ parent_basename = posixpath.basename(parent)
+ (parent_root, parent_ext) = posixpath.splitext(parent_basename)
+ if parent_ext == '.lproj':
+ variant_name = parent_root
+ if grandparent == '':
+ grandparent = None
+
+ # Putting a directory inside a variant group is not currently supported.
+ assert not is_dir or variant_name is None
+
+ path_split = path.split(posixpath.sep)
+ if len(path_split) == 1 or \
+ ((is_dir or variant_name != None) and len(path_split) == 2) or \
+ not hierarchical:
+ # The PBXFileReference or PBXVariantGroup will be added to or gotten from
+ # this PBXGroup, no recursion necessary.
+ if variant_name is None:
+ # Add or get a PBXFileReference.
+ file_ref = self.GetChildByPath(path)
+ if file_ref != None:
+ assert file_ref.__class__ == PBXFileReference
+ else:
+ file_ref = PBXFileReference({'path': path})
+ self.AppendChild(file_ref)
+ else:
+ # Add or get a PBXVariantGroup. The variant group name is the same
+ # as the basename (MainMenu.nib in the example above). grandparent
+ # specifies the path to the variant group itself, and path_split[-2:]
+ # is the path of the specific variant relative to its group.
+ variant_group_name = posixpath.basename(path)
+ variant_group_ref = self.AddOrGetVariantGroupByNameAndPath(
+ variant_group_name, grandparent)
+ variant_path = posixpath.sep.join(path_split[-2:])
+ variant_ref = variant_group_ref.GetChildByPath(variant_path)
+ if variant_ref != None:
+ assert variant_ref.__class__ == PBXFileReference
+ else:
+ variant_ref = PBXFileReference({'name': variant_name,
+ 'path': variant_path})
+ variant_group_ref.AppendChild(variant_ref)
+ # The caller is interested in the variant group, not the specific
+ # variant file.
+ file_ref = variant_group_ref
+ return file_ref
+ else:
+ # Hierarchical recursion. Add or get a PBXGroup corresponding to the
+ # outermost path component, and then recurse into it, chopping off that
+ # path component.
+ next_dir = path_split[0]
+ group_ref = self.GetChildByPath(next_dir)
+ if group_ref != None:
+ assert group_ref.__class__ == PBXGroup
+ else:
+ group_ref = PBXGroup({'path': next_dir})
+ self.AppendChild(group_ref)
+ return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]),
+ hierarchical)
+
+ def AddOrGetVariantGroupByNameAndPath(self, name, path):
+ """Returns an existing or new PBXVariantGroup for name and path.
+
+ If a PBXVariantGroup identified by the name and path arguments is already
+ present as a child of this object, it is returned. Otherwise, a new
+ PBXVariantGroup with the correct properties is created, added as a child,
+ and returned.
+
+ This method will generally be called by AddOrGetFileByPath, which knows
+ when to create a variant group based on the structure of the pathnames
+ passed to it.
+ """
+
+ key = (name, path)
+ if key in self._variant_children_by_name_and_path:
+ variant_group_ref = self._variant_children_by_name_and_path[key]
+ assert variant_group_ref.__class__ == PBXVariantGroup
+ return variant_group_ref
+
+ variant_group_properties = {'name': name}
+ if path != None:
+ variant_group_properties['path'] = path
+ variant_group_ref = PBXVariantGroup(variant_group_properties)
+ self.AppendChild(variant_group_ref)
+
+ return variant_group_ref
+
+ def TakeOverOnlyChild(self, recurse=False):
+ """If this PBXGroup has only one child and it's also a PBXGroup, take
+ it over by making all of its children this object's children.
+
+ This function will continue to take over only children when those children
+ are groups. If there are three PBXGroups representing a, b, and c, with
+ c inside b and b inside a, and a and b have no other children, this will
+ result in a taking over both b and c, forming a PBXGroup for a/b/c.
+
+ If recurse is True, this function will recurse into children and ask them
+ to collapse themselves by taking over only children as well. Assuming
+ an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f
+ (d1, d2, and f are files, the rest are groups), recursion will result in
+ a group for a/b/c containing a group for d3/e.
+ """
+
+ # At this stage, check that child class types are PBXGroup exactly,
+ # instead of using isinstance. The only subclass of PBXGroup,
+ # PBXVariantGroup, should not participate in reparenting in the same way:
+ # reparenting by merging different object types would be wrong.
+ while len(self._properties['children']) == 1 and \
+ self._properties['children'][0].__class__ == PBXGroup:
+ # Loop to take over the innermost only-child group possible.
+
+ child = self._properties['children'][0]
+
+ # Assume the child's properties, including its children. Save a copy
+ # of this object's old properties, because they'll still be needed.
+ # This object retains its existing id and parent attributes.
+ old_properties = self._properties
+ self._properties = child._properties
+ self._children_by_path = child._children_by_path
+
+ if not 'sourceTree' in self._properties or \
+ self._properties['sourceTree'] == '<group>':
+ # The child was relative to its parent. Fix up the path. Note that
+ # children with a sourceTree other than "<group>" are not relative to
+ # their parents, so no path fix-up is needed in that case.
+ if 'path' in old_properties:
+ if 'path' in self._properties:
+ # Both the original parent and child have paths set.
+ self._properties['path'] = posixpath.join(old_properties['path'],
+ self._properties['path'])
+ else:
+ # Only the original parent has a path, use it.
+ self._properties['path'] = old_properties['path']
+ if 'sourceTree' in old_properties:
+ # The original parent had a sourceTree set, use it.
+ self._properties['sourceTree'] = old_properties['sourceTree']
+
+ # If the original parent had a name set, keep using it. If the original
+ # parent didn't have a name but the child did, let the child's name
+ # live on. If the name attribute seems unnecessary now, get rid of it.
+ if 'name' in old_properties and old_properties['name'] != None and \
+ old_properties['name'] != self.Name():
+ self._properties['name'] = old_properties['name']
+ if 'name' in self._properties and 'path' in self._properties and \
+ self._properties['name'] == self._properties['path']:
+ del self._properties['name']
+
+ # Notify all children of their new parent.
+ for child in self._properties['children']:
+ child.parent = self
+
+ # If asked to recurse, recurse.
+ if recurse:
+ for child in self._properties['children']:
+ if child.__class__ == PBXGroup:
+ child.TakeOverOnlyChild(recurse)
+
+ def SortGroup(self):
+ self._properties['children'] = \
+ sorted(self._properties['children'],
+ key=functools.cmp_to_key(XCHierarchicalElement.Compare))
+
+ # Recurse.
+ for child in self._properties['children']:
+ if isinstance(child, PBXGroup):
+ child.SortGroup()
+
+
+class XCFileLikeElement(XCHierarchicalElement):
+ # Abstract base for objects that can be used as the fileRef property of
+ # PBXBuildFile.
+
+ def PathHashables(self):
+ # A PBXBuildFile that refers to this object will call this method to
+ # obtain additional hashables specific to this XCFileLikeElement. Don't
+ # just use this object's hashables, they're not specific and unique enough
+ # on their own (without access to the parent hashables.) Instead, provide
+ # hashables that identify this object by path by getting its hashables as
+ # well as the hashables of ancestor XCHierarchicalElement objects.
+
+ hashables = []
+ xche = self
+ while xche != None and isinstance(xche, XCHierarchicalElement):
+ xche_hashables = xche.Hashables()
+ for index, xche_hashable in enumerate(xche_hashables):
+ hashables.insert(index, xche_hashable)
+ xche = xche.parent
+ return hashables
+
+
+class XCContainerPortal(XCObject):
+ # Abstract base for objects that can be used as the containerPortal property
+ # of PBXContainerItemProxy.
+ pass
+
+
+class XCRemoteObject(XCObject):
+ # Abstract base for objects that can be used as the remoteGlobalIDString
+ # property of PBXContainerItemProxy.
+ pass
+
+
+class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject):
+ _schema = XCFileLikeElement._schema.copy()
+ _schema.update({
+ 'explicitFileType': [0, str, 0, 0],
+ 'lastKnownFileType': [0, str, 0, 0],
+ 'name': [0, str, 0, 0],
+ 'path': [0, str, 0, 1],
+ })
+
+ # Weird output rules for PBXFileReference.
+ _should_print_single_line = True
+ # super
+ _encode_transforms = XCFileLikeElement._alternate_encode_transforms
+
+ def __init__(self, properties=None, id=None, parent=None):
+ # super
+ XCFileLikeElement.__init__(self, properties, id, parent)
+ if 'path' in self._properties and self._properties['path'].endswith('/'):
+ self._properties['path'] = self._properties['path'][:-1]
+ is_dir = True
+ else:
+ is_dir = False
+
+ if 'path' in self._properties and \
+ not 'lastKnownFileType' in self._properties and \
+ not 'explicitFileType' in self._properties:
+ # TODO(mark): This is the replacement for a replacement for a quick hack.
+ # It is no longer incredibly sucky, but this list needs to be extended.
+ extension_map = {
+ 'a': 'archive.ar',
+ 'app': 'wrapper.application',
+ 'bdic': 'file',
+ 'bundle': 'wrapper.cfbundle',
+ 'c': 'sourcecode.c.c',
+ 'cc': 'sourcecode.cpp.cpp',
+ 'cpp': 'sourcecode.cpp.cpp',
+ 'css': 'text.css',
+ 'cxx': 'sourcecode.cpp.cpp',
+ 'dart': 'sourcecode',
+ 'dylib': 'compiled.mach-o.dylib',
+ 'framework': 'wrapper.framework',
+ 'gyp': 'sourcecode',
+ 'gypi': 'sourcecode',
+ 'h': 'sourcecode.c.h',
+ 'hxx': 'sourcecode.cpp.h',
+ 'icns': 'image.icns',
+ 'java': 'sourcecode.java',
+ 'js': 'sourcecode.javascript',
+ 'kext': 'wrapper.kext',
+ 'm': 'sourcecode.c.objc',
+ 'mm': 'sourcecode.cpp.objcpp',
+ 'nib': 'wrapper.nib',
+ 'o': 'compiled.mach-o.objfile',
+ 'pdf': 'image.pdf',
+ 'pl': 'text.script.perl',
+ 'plist': 'text.plist.xml',
+ 'pm': 'text.script.perl',
+ 'png': 'image.png',
+ 'py': 'text.script.python',
+ 'r': 'sourcecode.rez',
+ 'rez': 'sourcecode.rez',
+ 's': 'sourcecode.asm',
+ 'storyboard': 'file.storyboard',
+ 'strings': 'text.plist.strings',
+ 'swift': 'sourcecode.swift',
+ 'ttf': 'file',
+ 'xcassets': 'folder.assetcatalog',
+ 'xcconfig': 'text.xcconfig',
+ 'xcdatamodel': 'wrapper.xcdatamodel',
+ 'xcdatamodeld':'wrapper.xcdatamodeld',
+ 'xib': 'file.xib',
+ 'y': 'sourcecode.yacc',
+ 'tbd': 'sourcecode.text-based-dylib-definition',
+ }
+
+ prop_map = {
+ 'dart': 'explicitFileType',
+ 'gyp': 'explicitFileType',
+ 'gypi': 'explicitFileType',
+ }
+
+ if is_dir:
+ file_type = 'folder'
+ prop_name = 'lastKnownFileType'
+ else:
+ basename = posixpath.basename(self._properties['path'])
+ (root, ext) = posixpath.splitext(basename)
+ # Check the map using a lowercase extension.
+ # TODO(mark): Maybe it should try with the original case first and fall
+ # back to lowercase, in case there are any instances where case
+ # matters. There currently aren't.
+ if ext != '':
+ ext = ext[1:].lower()
+
+ # TODO(mark): "text" is the default value, but "file" is appropriate
+ # for unrecognized files not containing text. Xcode seems to choose
+ # based on content.
+ file_type = extension_map.get(ext, 'text')
+ prop_name = prop_map.get(ext, 'lastKnownFileType')
+
+ self._properties[prop_name] = file_type
+
+
+class PBXVariantGroup(PBXGroup, XCFileLikeElement):
+ """PBXVariantGroup is used by Xcode to represent localizations."""
+ # No additions to the schema relative to PBXGroup.
+ pass
+
+
+# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below
+# because it uses PBXContainerItemProxy, defined below.
+
+
+class XCBuildConfiguration(XCObject):
+ _schema = XCObject._schema.copy()
+ _schema.update({
+ 'baseConfigurationReference': [0, PBXFileReference, 0, 0],
+ 'buildSettings': [0, dict, 0, 1, {}],
+ 'name': [0, str, 0, 1],
+ })
+
+ def HasBuildSetting(self, key):
+ return key in self._properties['buildSettings']
+
+ def GetBuildSetting(self, key):
+ return self._properties['buildSettings'][key]
+
+ def SetBuildSetting(self, key, value):
+ # TODO(mark): If a list, copy?
+ self._properties['buildSettings'][key] = value
+
+ def AppendBuildSetting(self, key, value):
+ if not key in self._properties['buildSettings']:
+ self._properties['buildSettings'][key] = []
+ self._properties['buildSettings'][key].append(value)
+
+ def DelBuildSetting(self, key):
+ if key in self._properties['buildSettings']:
+ del self._properties['buildSettings'][key]
+
+ def SetBaseConfiguration(self, value):
+ self._properties['baseConfigurationReference'] = value
+
+class XCConfigurationList(XCObject):
+ # _configs is the default list of configurations.
+ _configs = [ XCBuildConfiguration({'name': 'Debug'}),
+ XCBuildConfiguration({'name': 'Release'}) ]
+
+ _schema = XCObject._schema.copy()
+ _schema.update({
+ 'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs],
+ 'defaultConfigurationIsVisible': [0, int, 0, 1, 1],
+ 'defaultConfigurationName': [0, str, 0, 1, 'Release'],
+ })
+
+ def Name(self):
+ return 'Build configuration list for ' + \
+ self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"'
+
+ def ConfigurationNamed(self, name):
+ """Convenience accessor to obtain an XCBuildConfiguration by name."""
+ for configuration in self._properties['buildConfigurations']:
+ if configuration._properties['name'] == name:
+ return configuration
+
+ raise KeyError(name)
+
+ def DefaultConfiguration(self):
+ """Convenience accessor to obtain the default XCBuildConfiguration."""
+ return self.ConfigurationNamed(self._properties['defaultConfigurationName'])
+
+ def HasBuildSetting(self, key):
+ """Determines the state of a build setting in all XCBuildConfiguration
+ child objects.
+
+ If all child objects have key in their build settings, and the value is the
+ same in all child objects, returns 1.
+
+ If no child objects have the key in their build settings, returns 0.
+
+ If some, but not all, child objects have the key in their build settings,
+ or if any children have different values for the key, returns -1.
+ """
+
+ has = None
+ value = None
+ for configuration in self._properties['buildConfigurations']:
+ configuration_has = configuration.HasBuildSetting(key)
+ if has is None:
+ has = configuration_has
+ elif has != configuration_has:
+ return -1
+
+ if configuration_has:
+ configuration_value = configuration.GetBuildSetting(key)
+ if value is None:
+ value = configuration_value
+ elif value != configuration_value:
+ return -1
+
+ if not has:
+ return 0
+
+ return 1
+
+ def GetBuildSetting(self, key):
+ """Gets the build setting for key.
+
+ All child XCConfiguration objects must have the same value set for the
+ setting, or a ValueError will be raised.
+ """
+
+ # TODO(mark): This is wrong for build settings that are lists. The list
+ # contents should be compared (and a list copy returned?)
+
+ value = None
+ for configuration in self._properties['buildConfigurations']:
+ configuration_value = configuration.GetBuildSetting(key)
+ if value is None:
+ value = configuration_value
+ else:
+ if value != configuration_value:
+ raise ValueError('Variant values for ' + key)
+
+ return value
+
+ def SetBuildSetting(self, key, value):
+ """Sets the build setting for key to value in all child
+ XCBuildConfiguration objects.
+ """
+
+ for configuration in self._properties['buildConfigurations']:
+ configuration.SetBuildSetting(key, value)
+
+ def AppendBuildSetting(self, key, value):
+ """Appends value to the build setting for key, which is treated as a list,
+ in all child XCBuildConfiguration objects.
+ """
+
+ for configuration in self._properties['buildConfigurations']:
+ configuration.AppendBuildSetting(key, value)
+
+ def DelBuildSetting(self, key):
+ """Deletes the build setting key from all child XCBuildConfiguration
+ objects.
+ """
+
+ for configuration in self._properties['buildConfigurations']:
+ configuration.DelBuildSetting(key)
+
+ def SetBaseConfiguration(self, value):
+ """Sets the build configuration in all child XCBuildConfiguration objects.
+ """
+
+ for configuration in self._properties['buildConfigurations']:
+ configuration.SetBaseConfiguration(value)
+
+
+class PBXBuildFile(XCObject):
+ _schema = XCObject._schema.copy()
+ _schema.update({
+ 'fileRef': [0, XCFileLikeElement, 0, 1],
+ 'settings': [0, str, 0, 0], # hack, it's a dict
+ })
+
+ # Weird output rules for PBXBuildFile.
+ _should_print_single_line = True
+ _encode_transforms = XCObject._alternate_encode_transforms
+
+ def Name(self):
+ # Example: "main.cc in Sources"
+ return self._properties['fileRef'].Name() + ' in ' + self.parent.Name()
+
+ def Hashables(self):
+ # super
+ hashables = XCObject.Hashables(self)
+
+ # It is not sufficient to just rely on Name() to get the
+ # XCFileLikeElement's name, because that is not a complete pathname.
+ # PathHashables returns hashables unique enough that no two
+ # PBXBuildFiles should wind up with the same set of hashables, unless
+ # someone adds the same file multiple times to the same target. That
+ # would be considered invalid anyway.
+ hashables.extend(self._properties['fileRef'].PathHashables())
+
+ return hashables
+
+
+class XCBuildPhase(XCObject):
+ """Abstract base for build phase classes. Not represented in a project
+ file.
+
+ Attributes:
+ _files_by_path: A dict mapping each path of a child in the files list by
+ path (keys) to the corresponding PBXBuildFile children (values).
+ _files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys)
+ to the corresponding PBXBuildFile children (values).
+ """
+
+ # TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't
+ # actually have a "files" list. XCBuildPhase should not have "files" but
+ # another abstract subclass of it should provide this, and concrete build
+ # phase types that do have "files" lists should be derived from that new
+ # abstract subclass. XCBuildPhase should only provide buildActionMask and
+ # runOnlyForDeploymentPostprocessing, and not files or the various
+ # file-related methods and attributes.
+
+ _schema = XCObject._schema.copy()
+ _schema.update({
+ 'buildActionMask': [0, int, 0, 1, 0x7fffffff],
+ 'files': [1, PBXBuildFile, 1, 1, []],
+ 'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0],
+ })
+
+ def __init__(self, properties=None, id=None, parent=None):
+ # super
+ XCObject.__init__(self, properties, id, parent)
+
+ self._files_by_path = {}
+ self._files_by_xcfilelikeelement = {}
+ for pbxbuildfile in self._properties.get('files', []):
+ self._AddBuildFileToDicts(pbxbuildfile)
+
+ def FileGroup(self, path):
+ # Subclasses must override this by returning a two-element tuple. The
+ # first item in the tuple should be the PBXGroup to which "path" should be
+ # added, either as a child or deeper descendant. The second item should
+ # be a boolean indicating whether files should be added into hierarchical
+ # groups or one single flat group.
+ raise NotImplementedError(
+ self.__class__.__name__ + ' must implement FileGroup')
+
+ def _AddPathToDict(self, pbxbuildfile, path):
+ """Adds path to the dict tracking paths belonging to this build phase.
+
+ If the path is already a member of this build phase, raises an exception.
+ """
+
+ if path in self._files_by_path:
+ raise ValueError('Found multiple build files with path ' + path)
+ self._files_by_path[path] = pbxbuildfile
+
+ def _AddBuildFileToDicts(self, pbxbuildfile, path=None):
+ """Maintains the _files_by_path and _files_by_xcfilelikeelement dicts.
+
+ If path is specified, then it is the path that is being added to the
+ phase, and pbxbuildfile must contain either a PBXFileReference directly
+ referencing that path, or it must contain a PBXVariantGroup that itself
+ contains a PBXFileReference referencing the path.
+
+ If path is not specified, either the PBXFileReference's path or the paths
+ of all children of the PBXVariantGroup are taken as being added to the
+ phase.
+
+ If the path is already present in the phase, raises an exception.
+
+ If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile
+ are already present in the phase, referenced by a different PBXBuildFile
+ object, raises an exception. This does not raise an exception when
+ a PBXFileReference or PBXVariantGroup reappear and are referenced by the
+ same PBXBuildFile that has already introduced them, because in the case
+ of PBXVariantGroup objects, they may correspond to multiple paths that are
+ not all added simultaneously. When this situation occurs, the path needs
+ to be added to _files_by_path, but nothing needs to change in
+ _files_by_xcfilelikeelement, and the caller should have avoided adding
+ the PBXBuildFile if it is already present in the list of children.
+ """
+
+ xcfilelikeelement = pbxbuildfile._properties['fileRef']
+
+ paths = []
+ if path != None:
+ # It's best when the caller provides the path.
+ if isinstance(xcfilelikeelement, PBXVariantGroup):
+ paths.append(path)
+ else:
+ # If the caller didn't provide a path, there can be either multiple
+ # paths (PBXVariantGroup) or one.
+ if isinstance(xcfilelikeelement, PBXVariantGroup):
+ for variant in xcfilelikeelement._properties['children']:
+ paths.append(variant.FullPath())
+ else:
+ paths.append(xcfilelikeelement.FullPath())
+
+ # Add the paths first, because if something's going to raise, the
+ # messages provided by _AddPathToDict are more useful owing to its
+ # having access to a real pathname and not just an object's Name().
+ for a_path in paths:
+ self._AddPathToDict(pbxbuildfile, a_path)
+
+ # If another PBXBuildFile references this XCFileLikeElement, there's a
+ # problem.
+ if xcfilelikeelement in self._files_by_xcfilelikeelement and \
+ self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile:
+ raise ValueError('Found multiple build files for ' + \
+ xcfilelikeelement.Name())
+ self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile
+
+ def AppendBuildFile(self, pbxbuildfile, path=None):
+ # Callers should use this instead of calling
+ # AppendProperty('files', pbxbuildfile) directly because this function
+ # maintains the object's dicts. Better yet, callers can just call AddFile
+ # with a pathname and not worry about building their own PBXBuildFile
+ # objects.
+ self.AppendProperty('files', pbxbuildfile)
+ self._AddBuildFileToDicts(pbxbuildfile, path)
+
+ def AddFile(self, path, settings=None):
+ (file_group, hierarchical) = self.FileGroup(path)
+ file_ref = file_group.AddOrGetFileByPath(path, hierarchical)
+
+ if file_ref in self._files_by_xcfilelikeelement and \
+ isinstance(file_ref, PBXVariantGroup):
+ # There's already a PBXBuildFile in this phase corresponding to the
+ # PBXVariantGroup. path just provides a new variant that belongs to
+ # the group. Add the path to the dict.
+ pbxbuildfile = self._files_by_xcfilelikeelement[file_ref]
+ self._AddBuildFileToDicts(pbxbuildfile, path)
+ else:
+ # Add a new PBXBuildFile to get file_ref into the phase.
+ if settings is None:
+ pbxbuildfile = PBXBuildFile({'fileRef': file_ref})
+ else:
+ pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings})
+ self.AppendBuildFile(pbxbuildfile, path)
+
+
+class PBXHeadersBuildPhase(XCBuildPhase):
+ # No additions to the schema relative to XCBuildPhase.
+
+ def Name(self):
+ return 'Headers'
+
+ def FileGroup(self, path):
+ return self.PBXProjectAncestor().RootGroupForPath(path)
+
+
+class PBXResourcesBuildPhase(XCBuildPhase):
+ # No additions to the schema relative to XCBuildPhase.
+
+ def Name(self):
+ return 'Resources'
+
+ def FileGroup(self, path):
+ return self.PBXProjectAncestor().RootGroupForPath(path)
+
+
+class PBXSourcesBuildPhase(XCBuildPhase):
+ # No additions to the schema relative to XCBuildPhase.
+
+ def Name(self):
+ return 'Sources'
+
+ def FileGroup(self, path):
+ return self.PBXProjectAncestor().RootGroupForPath(path)
+
+
+class PBXFrameworksBuildPhase(XCBuildPhase):
+ # No additions to the schema relative to XCBuildPhase.
+
+ def Name(self):
+ return 'Frameworks'
+
+ def FileGroup(self, path):
+ (root, ext) = posixpath.splitext(path)
+ if ext != '':
+ ext = ext[1:].lower()
+ if ext == 'o':
+ # .o files are added to Xcode Frameworks phases, but conceptually aren't
+ # frameworks, they're more like sources or intermediates. Redirect them
+ # to show up in one of those other groups.
+ return self.PBXProjectAncestor().RootGroupForPath(path)
+ else:
+ return (self.PBXProjectAncestor().FrameworksGroup(), False)
+
+
+class PBXShellScriptBuildPhase(XCBuildPhase):
+ _schema = XCBuildPhase._schema.copy()
+ _schema.update({
+ 'inputPaths': [1, str, 0, 1, []],
+ 'name': [0, str, 0, 0],
+ 'outputPaths': [1, str, 0, 1, []],
+ 'shellPath': [0, str, 0, 1, '/bin/sh'],
+ 'shellScript': [0, str, 0, 1],
+ 'showEnvVarsInLog': [0, int, 0, 0],
+ })
+
+ def Name(self):
+ if 'name' in self._properties:
+ return self._properties['name']
+
+ return 'ShellScript'
+
+
+class PBXCopyFilesBuildPhase(XCBuildPhase):
+ _schema = XCBuildPhase._schema.copy()
+ _schema.update({
+ 'dstPath': [0, str, 0, 1],
+ 'dstSubfolderSpec': [0, int, 0, 1],
+ 'name': [0, str, 0, 0],
+ })
+
+ # path_tree_re matches "$(DIR)/path", "$(DIR)/$(DIR2)/path" or just "$(DIR)".
+ # Match group 1 is "DIR", group 3 is "path" or "$(DIR2") or "$(DIR2)/path"
+ # or None. If group 3 is "path", group 4 will be None otherwise group 4 is
+ # "DIR2" and group 6 is "path".
+ path_tree_re = re.compile(r'^\$\((.*?)\)(/(\$\((.*?)\)(/(.*)|)|(.*)|)|)$')
+
+ # path_tree_{first,second}_to_subfolder map names of Xcode variables to the
+ # associated dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase
+ # object.
+ path_tree_first_to_subfolder = {
+ # Types that can be chosen via the Xcode UI.
+ 'BUILT_PRODUCTS_DIR': 16, # Products Directory
+ 'BUILT_FRAMEWORKS_DIR': 10, # Not an official Xcode macro.
+ # Existed before support for the
+ # names below was added. Maps to
+ # "Frameworks".
+ }
+
+ path_tree_second_to_subfolder = {
+ 'WRAPPER_NAME': 1, # Wrapper
+ # Although Xcode's friendly name is "Executables", the destination
+ # is demonstrably the value of the build setting
+ # EXECUTABLE_FOLDER_PATH not EXECUTABLES_FOLDER_PATH.
+ 'EXECUTABLE_FOLDER_PATH': 6, # Executables.
+ 'UNLOCALIZED_RESOURCES_FOLDER_PATH': 7, # Resources
+ 'JAVA_FOLDER_PATH': 15, # Java Resources
+ 'FRAMEWORKS_FOLDER_PATH': 10, # Frameworks
+ 'SHARED_FRAMEWORKS_FOLDER_PATH': 11, # Shared Frameworks
+ 'SHARED_SUPPORT_FOLDER_PATH': 12, # Shared Support
+ 'PLUGINS_FOLDER_PATH': 13, # PlugIns
+ # For XPC Services, Xcode sets both dstPath and dstSubfolderSpec.
+ # Note that it re-uses the BUILT_PRODUCTS_DIR value for
+ # dstSubfolderSpec. dstPath is set below.
+ 'XPCSERVICES_FOLDER_PATH': 16, # XPC Services.
+ }
+
+ def Name(self):
+ if 'name' in self._properties:
+ return self._properties['name']
+
+ return 'CopyFiles'
+
+ def FileGroup(self, path):
+ return self.PBXProjectAncestor().RootGroupForPath(path)
+
+ def SetDestination(self, path):
+ """Set the dstSubfolderSpec and dstPath properties from path.
+
+ path may be specified in the same notation used for XCHierarchicalElements,
+ specifically, "$(DIR)/path".
+ """
+
+ path_tree_match = self.path_tree_re.search(path)
+ if path_tree_match:
+ path_tree = path_tree_match.group(1);
+ if path_tree in self.path_tree_first_to_subfolder:
+ subfolder = self.path_tree_first_to_subfolder[path_tree]
+ relative_path = path_tree_match.group(3)
+ if relative_path is None:
+ relative_path = ''
+
+ if subfolder == 16 and path_tree_match.group(4) is not None:
+ # BUILT_PRODUCTS_DIR (16) is the first element in a path whose
+ # second element is possibly one of the variable names in
+ # path_tree_second_to_subfolder. Xcode sets the values of all these
+ # variables to relative paths so .gyp files must prefix them with
+ # BUILT_PRODUCTS_DIR, e.g.
+ # $(BUILT_PRODUCTS_DIR)/$(PLUGINS_FOLDER_PATH). Then
+ # xcode_emulation.py can export these variables with the same values
+ # as Xcode yet make & ninja files can determine the absolute path
+ # to the target. Xcode uses the dstSubfolderSpec value set here
+ # to determine the full path.
+ #
+ # An alternative of xcode_emulation.py setting the values to absolute
+ # paths when exporting these variables has been ruled out because
+ # then the values would be different depending on the build tool.
+ #
+ # Another alternative is to invent new names for the variables used
+ # to match to the subfolder indices in the second table. .gyp files
+ # then will not need to prepend $(BUILT_PRODUCTS_DIR) because
+ # xcode_emulation.py can set the values of those variables to
+ # the absolute paths when exporting. This is possibly the thinking
+ # behind BUILT_FRAMEWORKS_DIR which is used in exactly this manner.
+ #
+ # Requiring prepending BUILT_PRODUCTS_DIR has been chosen because
+ # this same way could be used to specify destinations in .gyp files
+ # that pre-date this addition to GYP. However they would only work
+ # with the Xcode generator. The previous version of xcode_emulation.py
+ # does not export these variables. Such files will get the benefit
+ # of the Xcode UI showing the proper destination name simply by
+ # regenerating the projects with this version of GYP.
+ path_tree = path_tree_match.group(4)
+ relative_path = path_tree_match.group(6)
+ separator = '/'
+
+ if path_tree in self.path_tree_second_to_subfolder:
+ subfolder = self.path_tree_second_to_subfolder[path_tree]
+ if relative_path is None:
+ relative_path = ''
+ separator = ''
+ if path_tree == 'XPCSERVICES_FOLDER_PATH':
+ relative_path = '$(CONTENTS_FOLDER_PATH)/XPCServices' \
+ + separator + relative_path
+ else:
+ # subfolder = 16 from above
+ # The second element of the path is an unrecognized variable.
+ # Include it and any remaining elements in relative_path.
+ relative_path = path_tree_match.group(3);
+
+ else:
+ # The path starts with an unrecognized Xcode variable
+ # name like $(SRCROOT). Xcode will still handle this
+ # as an "absolute path" that starts with the variable.
+ subfolder = 0
+ relative_path = path
+ elif path.startswith('/'):
+ # Special case. Absolute paths are in dstSubfolderSpec 0.
+ subfolder = 0
+ relative_path = path[1:]
+ else:
+ raise ValueError('Can\'t use path %s in a %s' % \
+ (path, self.__class__.__name__))
+
+ self._properties['dstPath'] = relative_path
+ self._properties['dstSubfolderSpec'] = subfolder
+
+
+class PBXBuildRule(XCObject):
+ _schema = XCObject._schema.copy()
+ _schema.update({
+ 'compilerSpec': [0, str, 0, 1],
+ 'filePatterns': [0, str, 0, 0],
+ 'fileType': [0, str, 0, 1],
+ 'isEditable': [0, int, 0, 1, 1],
+ 'outputFiles': [1, str, 0, 1, []],
+ 'script': [0, str, 0, 0],
+ })
+
+ def Name(self):
+ # Not very inspired, but it's what Xcode uses.
+ return self.__class__.__name__
+
+ def Hashables(self):
+ # super
+ hashables = XCObject.Hashables(self)
+
+ # Use the hashables of the weak objects that this object refers to.
+ hashables.append(self._properties['fileType'])
+ if 'filePatterns' in self._properties:
+ hashables.append(self._properties['filePatterns'])
+ return hashables
+
+
+class PBXContainerItemProxy(XCObject):
+ # When referencing an item in this project file, containerPortal is the
+ # PBXProject root object of this project file. When referencing an item in
+ # another project file, containerPortal is a PBXFileReference identifying
+ # the other project file.
+ #
+ # When serving as a proxy to an XCTarget (in this project file or another),
+ # proxyType is 1. When serving as a proxy to a PBXFileReference (in another
+ # project file), proxyType is 2. Type 2 is used for references to the
+ # producs of the other project file's targets.
+ #
+ # Xcode is weird about remoteGlobalIDString. Usually, it's printed without
+ # a comment, indicating that it's tracked internally simply as a string, but
+ # sometimes it's printed with a comment (usually when the object is initially
+ # created), indicating that it's tracked as a project file object at least
+ # sometimes. This module always tracks it as an object, but contains a hack
+ # to prevent it from printing the comment in the project file output. See
+ # _XCKVPrint.
+ _schema = XCObject._schema.copy()
+ _schema.update({
+ 'containerPortal': [0, XCContainerPortal, 0, 1],
+ 'proxyType': [0, int, 0, 1],
+ 'remoteGlobalIDString': [0, XCRemoteObject, 0, 1],
+ 'remoteInfo': [0, str, 0, 1],
+ })
+
+ def __repr__(self):
+ props = self._properties
+ name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo'])
+ return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
+
+ def Name(self):
+ # Admittedly not the best name, but it's what Xcode uses.
+ return self.__class__.__name__
+
+ def Hashables(self):
+ # super
+ hashables = XCObject.Hashables(self)
+
+ # Use the hashables of the weak objects that this object refers to.
+ hashables.extend(self._properties['containerPortal'].Hashables())
+ hashables.extend(self._properties['remoteGlobalIDString'].Hashables())
+ return hashables
+
+
+class PBXTargetDependency(XCObject):
+ # The "target" property accepts an XCTarget object, and obviously not
+ # NoneType. But XCTarget is defined below, so it can't be put into the
+ # schema yet. The definition of PBXTargetDependency can't be moved below
+ # XCTarget because XCTarget's own schema references PBXTargetDependency.
+ # Python doesn't deal well with this circular relationship, and doesn't have
+ # a real way to do forward declarations. To work around, the type of
+ # the "target" property is reset below, after XCTarget is defined.
+ #
+ # At least one of "name" and "target" is required.
+ _schema = XCObject._schema.copy()
+ _schema.update({
+ 'name': [0, str, 0, 0],
+ 'target': [0, None.__class__, 0, 0],
+ 'targetProxy': [0, PBXContainerItemProxy, 1, 1],
+ })
+
+ def __repr__(self):
+ name = self._properties.get('name') or self._properties['target'].Name()
+ return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
+
+ def Name(self):
+ # Admittedly not the best name, but it's what Xcode uses.
+ return self.__class__.__name__
+
+ def Hashables(self):
+ # super
+ hashables = XCObject.Hashables(self)
+
+ # Use the hashables of the weak objects that this object refers to.
+ hashables.extend(self._properties['targetProxy'].Hashables())
+ return hashables
+
+
+class PBXReferenceProxy(XCFileLikeElement):
+ _schema = XCFileLikeElement._schema.copy()
+ _schema.update({
+ 'fileType': [0, str, 0, 1],
+ 'path': [0, str, 0, 1],
+ 'remoteRef': [0, PBXContainerItemProxy, 1, 1],
+ })
+
+
+class XCTarget(XCRemoteObject):
+ # An XCTarget is really just an XCObject, the XCRemoteObject thing is just
+ # to allow PBXProject to be used in the remoteGlobalIDString property of
+ # PBXContainerItemProxy.
+ #
+ # Setting a "name" property at instantiation may also affect "productName",
+ # which may in turn affect the "PRODUCT_NAME" build setting in children of
+ # "buildConfigurationList". See __init__ below.
+ _schema = XCRemoteObject._schema.copy()
+ _schema.update({
+ 'buildConfigurationList': [0, XCConfigurationList, 1, 1,
+ XCConfigurationList()],
+ 'buildPhases': [1, XCBuildPhase, 1, 1, []],
+ 'dependencies': [1, PBXTargetDependency, 1, 1, []],
+ 'name': [0, str, 0, 1],
+ 'productName': [0, str, 0, 1],
+ })
+
+ def __init__(self, properties=None, id=None, parent=None,
+ force_outdir=None, force_prefix=None, force_extension=None):
+ # super
+ XCRemoteObject.__init__(self, properties, id, parent)
+
+ # Set up additional defaults not expressed in the schema. If a "name"
+ # property was supplied, set "productName" if it is not present. Also set
+ # the "PRODUCT_NAME" build setting in each configuration, but only if
+ # the setting is not present in any build configuration.
+ if 'name' in self._properties:
+ if not 'productName' in self._properties:
+ self.SetProperty('productName', self._properties['name'])
+
+ if 'productName' in self._properties:
+ if 'buildConfigurationList' in self._properties:
+ configs = self._properties['buildConfigurationList']
+ if configs.HasBuildSetting('PRODUCT_NAME') == 0:
+ configs.SetBuildSetting('PRODUCT_NAME',
+ self._properties['productName'])
+
+ def AddDependency(self, other):
+ pbxproject = self.PBXProjectAncestor()
+ other_pbxproject = other.PBXProjectAncestor()
+ if pbxproject == other_pbxproject:
+ # Add a dependency to another target in the same project file.
+ container = PBXContainerItemProxy({'containerPortal': pbxproject,
+ 'proxyType': 1,
+ 'remoteGlobalIDString': other,
+ 'remoteInfo': other.Name()})
+ dependency = PBXTargetDependency({'target': other,
+ 'targetProxy': container})
+ self.AppendProperty('dependencies', dependency)
+ else:
+ # Add a dependency to a target in a different project file.
+ other_project_ref = \
+ pbxproject.AddOrGetProjectReference(other_pbxproject)[1]
+ container = PBXContainerItemProxy({
+ 'containerPortal': other_project_ref,
+ 'proxyType': 1,
+ 'remoteGlobalIDString': other,
+ 'remoteInfo': other.Name(),
+ })
+ dependency = PBXTargetDependency({'name': other.Name(),
+ 'targetProxy': container})
+ self.AppendProperty('dependencies', dependency)
+
+ # Proxy all of these through to the build configuration list.
+
+ def ConfigurationNamed(self, name):
+ return self._properties['buildConfigurationList'].ConfigurationNamed(name)
+
+ def DefaultConfiguration(self):
+ return self._properties['buildConfigurationList'].DefaultConfiguration()
+
+ def HasBuildSetting(self, key):
+ return self._properties['buildConfigurationList'].HasBuildSetting(key)
+
+ def GetBuildSetting(self, key):
+ return self._properties['buildConfigurationList'].GetBuildSetting(key)
+
+ def SetBuildSetting(self, key, value):
+ return self._properties['buildConfigurationList'].SetBuildSetting(key, \
+ value)
+
+ def AppendBuildSetting(self, key, value):
+ return self._properties['buildConfigurationList'].AppendBuildSetting(key, \
+ value)
+
+ def DelBuildSetting(self, key):
+ return self._properties['buildConfigurationList'].DelBuildSetting(key)
+
+
+# Redefine the type of the "target" property. See PBXTargetDependency._schema
+# above.
+PBXTargetDependency._schema['target'][1] = XCTarget
+
+
+class PBXNativeTarget(XCTarget):
+ # buildPhases is overridden in the schema to be able to set defaults.
+ #
+ # NOTE: Contrary to most objects, it is advisable to set parent when
+ # constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject
+ # object. A parent reference is required for a PBXNativeTarget during
+ # construction to be able to set up the target defaults for productReference,
+ # because a PBXBuildFile object must be created for the target and it must
+ # be added to the PBXProject's mainGroup hierarchy.
+ _schema = XCTarget._schema.copy()
+ _schema.update({
+ 'buildPhases': [1, XCBuildPhase, 1, 1,
+ [PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]],
+ 'buildRules': [1, PBXBuildRule, 1, 1, []],
+ 'productReference': [0, PBXFileReference, 0, 1],
+ 'productType': [0, str, 0, 1],
+ })
+
+ # Mapping from Xcode product-types to settings. The settings are:
+ # filetype : used for explicitFileType in the project file
+ # prefix : the prefix for the file name
+ # suffix : the suffix for the file name
+ _product_filetypes = {
+ 'com.apple.product-type.application': ['wrapper.application',
+ '', '.app'],
+ 'com.apple.product-type.application.watchapp': ['wrapper.application',
+ '', '.app'],
+ 'com.apple.product-type.watchkit-extension': ['wrapper.app-extension',
+ '', '.appex'],
+ 'com.apple.product-type.app-extension': ['wrapper.app-extension',
+ '', '.appex'],
+ 'com.apple.product-type.bundle': ['wrapper.cfbundle',
+ '', '.bundle'],
+ 'com.apple.product-type.framework': ['wrapper.framework',
+ '', '.framework'],
+ 'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib',
+ 'lib', '.dylib'],
+ 'com.apple.product-type.library.static': ['archive.ar',
+ 'lib', '.a'],
+ 'com.apple.product-type.tool': ['compiled.mach-o.executable',
+ '', ''],
+ 'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle',
+ '', '.xctest'],
+ 'com.apple.product-type.bundle.ui-testing': ['wrapper.cfbundle',
+ '', '.xctest'],
+ 'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib',
+ '', '.so'],
+ 'com.apple.product-type.kernel-extension': ['wrapper.kext',
+ '', '.kext'],
+ }
+
+ def __init__(self, properties=None, id=None, parent=None,
+ force_outdir=None, force_prefix=None, force_extension=None):
+ # super
+ XCTarget.__init__(self, properties, id, parent)
+
+ if 'productName' in self._properties and \
+ 'productType' in self._properties and \
+ not 'productReference' in self._properties and \
+ self._properties['productType'] in self._product_filetypes:
+ products_group = None
+ pbxproject = self.PBXProjectAncestor()
+ if pbxproject != None:
+ products_group = pbxproject.ProductsGroup()
+
+ if products_group != None:
+ (filetype, prefix, suffix) = \
+ self._product_filetypes[self._properties['productType']]
+ # Xcode does not have a distinct type for loadable modules that are
+ # pure BSD targets (not in a bundle wrapper). GYP allows such modules
+ # to be specified by setting a target type to loadable_module without
+ # having mac_bundle set. These are mapped to the pseudo-product type
+ # com.googlecode.gyp.xcode.bundle.
+ #
+ # By picking up this special type and converting it to a dynamic
+ # library (com.apple.product-type.library.dynamic) with fix-ups,
+ # single-file loadable modules can be produced.
+ #
+ # MACH_O_TYPE is changed to mh_bundle to produce the proper file type
+ # (as opposed to mh_dylib). In order for linking to succeed,
+ # DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be
+ # cleared. They are meaningless for type mh_bundle.
+ #
+ # Finally, the .so extension is forcibly applied over the default
+ # (.dylib), unless another forced extension is already selected.
+ # .dylib is plainly wrong, and .bundle is used by loadable_modules in
+ # bundle wrappers (com.apple.product-type.bundle). .so seems an odd
+ # choice because it's used as the extension on many other systems that
+ # don't distinguish between linkable shared libraries and non-linkable
+ # loadable modules, but there's precedent: Python loadable modules on
+ # Mac OS X use an .so extension.
+ if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle':
+ self._properties['productType'] = \
+ 'com.apple.product-type.library.dynamic'
+ self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
+ self.SetBuildSetting('DYLIB_CURRENT_VERSION', '')
+ self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '')
+ if force_extension is None:
+ force_extension = suffix[1:]
+
+ if self._properties['productType'] == \
+ 'com.apple.product-type-bundle.unit.test' or \
+ self._properties['productType'] == \
+ 'com.apple.product-type-bundle.ui-testing':
+ if force_extension is None:
+ force_extension = suffix[1:]
+
+ if force_extension is not None:
+ # If it's a wrapper (bundle), set WRAPPER_EXTENSION.
+ # Extension override.
+ suffix = '.' + force_extension
+ if filetype.startswith('wrapper.'):
+ self.SetBuildSetting('WRAPPER_EXTENSION', force_extension)
+ else:
+ self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension)
+
+ if filetype.startswith('compiled.mach-o.executable'):
+ product_name = self._properties['productName']
+ product_name += suffix
+ suffix = ''
+ self.SetProperty('productName', product_name)
+ self.SetBuildSetting('PRODUCT_NAME', product_name)
+
+ # Xcode handles most prefixes based on the target type, however there
+ # are exceptions. If a "BSD Dynamic Library" target is added in the
+ # Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that
+ # behavior.
+ if force_prefix is not None:
+ prefix = force_prefix
+ if filetype.startswith('wrapper.'):
+ self.SetBuildSetting('WRAPPER_PREFIX', prefix)
+ else:
+ self.SetBuildSetting('EXECUTABLE_PREFIX', prefix)
+
+ if force_outdir is not None:
+ self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir)
+
+ # TODO(tvl): Remove the below hack.
+ # http://code.google.com/p/gyp/issues/detail?id=122
+
+ # Some targets include the prefix in the target_name. These targets
+ # really should just add a product_name setting that doesn't include
+ # the prefix. For example:
+ # target_name = 'libevent', product_name = 'event'
+ # This check cleans up for them.
+ product_name = self._properties['productName']
+ prefix_len = len(prefix)
+ if prefix_len and (product_name[:prefix_len] == prefix):
+ product_name = product_name[prefix_len:]
+ self.SetProperty('productName', product_name)
+ self.SetBuildSetting('PRODUCT_NAME', product_name)
+
+ ref_props = {
+ 'explicitFileType': filetype,
+ 'includeInIndex': 0,
+ 'path': prefix + product_name + suffix,
+ 'sourceTree': 'BUILT_PRODUCTS_DIR',
+ }
+ file_ref = PBXFileReference(ref_props)
+ products_group.AppendChild(file_ref)
+ self.SetProperty('productReference', file_ref)
+
+ def GetBuildPhaseByType(self, type):
+ if not 'buildPhases' in self._properties:
+ return None
+
+ the_phase = None
+ for phase in self._properties['buildPhases']:
+ if isinstance(phase, type):
+ # Some phases may be present in multiples in a well-formed project file,
+ # but phases like PBXSourcesBuildPhase may only be present singly, and
+ # this function is intended as an aid to GetBuildPhaseByType. Loop
+ # over the entire list of phases and assert if more than one of the
+ # desired type is found.
+ assert the_phase is None
+ the_phase = phase
+
+ return the_phase
+
+ def HeadersPhase(self):
+ headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase)
+ if headers_phase is None:
+ headers_phase = PBXHeadersBuildPhase()
+
+ # The headers phase should come before the resources, sources, and
+ # frameworks phases, if any.
+ insert_at = len(self._properties['buildPhases'])
+ for index, phase in enumerate(self._properties['buildPhases']):
+ if isinstance(phase, PBXResourcesBuildPhase) or \
+ isinstance(phase, PBXSourcesBuildPhase) or \
+ isinstance(phase, PBXFrameworksBuildPhase):
+ insert_at = index
+ break
+
+ self._properties['buildPhases'].insert(insert_at, headers_phase)
+ headers_phase.parent = self
+
+ return headers_phase
+
+ def ResourcesPhase(self):
+ resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase)
+ if resources_phase is None:
+ resources_phase = PBXResourcesBuildPhase()
+
+ # The resources phase should come before the sources and frameworks
+ # phases, if any.
+ insert_at = len(self._properties['buildPhases'])
+ for index, phase in enumerate(self._properties['buildPhases']):
+ if isinstance(phase, PBXSourcesBuildPhase) or \
+ isinstance(phase, PBXFrameworksBuildPhase):
+ insert_at = index
+ break
+
+ self._properties['buildPhases'].insert(insert_at, resources_phase)
+ resources_phase.parent = self
+
+ return resources_phase
+
+ def SourcesPhase(self):
+ sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase)
+ if sources_phase is None:
+ sources_phase = PBXSourcesBuildPhase()
+ self.AppendProperty('buildPhases', sources_phase)
+
+ return sources_phase
+
+ def FrameworksPhase(self):
+ frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase)
+ if frameworks_phase is None:
+ frameworks_phase = PBXFrameworksBuildPhase()
+ self.AppendProperty('buildPhases', frameworks_phase)
+
+ return frameworks_phase
+
+ def AddDependency(self, other):
+ # super
+ XCTarget.AddDependency(self, other)
+
+ static_library_type = 'com.apple.product-type.library.static'
+ shared_library_type = 'com.apple.product-type.library.dynamic'
+ framework_type = 'com.apple.product-type.framework'
+ if isinstance(other, PBXNativeTarget) and \
+ 'productType' in self._properties and \
+ self._properties['productType'] != static_library_type and \
+ 'productType' in other._properties and \
+ (other._properties['productType'] == static_library_type or \
+ ((other._properties['productType'] == shared_library_type or \
+ other._properties['productType'] == framework_type) and \
+ ((not other.HasBuildSetting('MACH_O_TYPE')) or
+ other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))):
+
+ file_ref = other.GetProperty('productReference')
+
+ pbxproject = self.PBXProjectAncestor()
+ other_pbxproject = other.PBXProjectAncestor()
+ if pbxproject != other_pbxproject:
+ other_project_product_group = \
+ pbxproject.AddOrGetProjectReference(other_pbxproject)[0]
+ file_ref = other_project_product_group.GetChildByRemoteObject(file_ref)
+
+ self.FrameworksPhase().AppendProperty('files',
+ PBXBuildFile({'fileRef': file_ref}))
+
+
+class PBXAggregateTarget(XCTarget):
+ pass
+
+
+class PBXProject(XCContainerPortal):
+ # A PBXProject is really just an XCObject, the XCContainerPortal thing is
+ # just to allow PBXProject to be used in the containerPortal property of
+ # PBXContainerItemProxy.
+ """
+
+ Attributes:
+ path: "sample.xcodeproj". TODO(mark) Document me!
+ _other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each
+ value is a reference to the dict in the
+ projectReferences list associated with the keyed
+ PBXProject.
+ """
+
+ _schema = XCContainerPortal._schema.copy()
+ _schema.update({
+ 'attributes': [0, dict, 0, 0],
+ 'buildConfigurationList': [0, XCConfigurationList, 1, 1,
+ XCConfigurationList()],
+ 'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'],
+ 'hasScannedForEncodings': [0, int, 0, 1, 1],
+ 'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()],
+ 'projectDirPath': [0, str, 0, 1, ''],
+ 'projectReferences': [1, dict, 0, 0],
+ 'projectRoot': [0, str, 0, 1, ''],
+ 'targets': [1, XCTarget, 1, 1, []],
+ })
+
+ def __init__(self, properties=None, id=None, parent=None, path=None):
+ self.path = path
+ self._other_pbxprojects = {}
+ # super
+ return XCContainerPortal.__init__(self, properties, id, parent)
+
+ def Name(self):
+ name = self.path
+ if name[-10:] == '.xcodeproj':
+ name = name[:-10]
+ return posixpath.basename(name)
+
+ def Path(self):
+ return self.path
+
+ def Comment(self):
+ return 'Project object'
+
+ def Children(self):
+ # super
+ children = XCContainerPortal.Children(self)
+
+ # Add children that the schema doesn't know about. Maybe there's a more
+ # elegant way around this, but this is the only case where we need to own
+ # objects in a dictionary (that is itself in a list), and three lines for
+ # a one-off isn't that big a deal.
+ if 'projectReferences' in self._properties:
+ for reference in self._properties['projectReferences']:
+ children.append(reference['ProductGroup'])
+
+ return children
+
+ def PBXProjectAncestor(self):
+ return self
+
+ def _GroupByName(self, name):
+ if not 'mainGroup' in self._properties:
+ self.SetProperty('mainGroup', PBXGroup())
+
+ main_group = self._properties['mainGroup']
+ group = main_group.GetChildByName(name)
+ if group is None:
+ group = PBXGroup({'name': name})
+ main_group.AppendChild(group)
+
+ return group
+
+ # SourceGroup and ProductsGroup are created by default in Xcode's own
+ # templates.
+ def SourceGroup(self):
+ return self._GroupByName('Source')
+
+ def ProductsGroup(self):
+ return self._GroupByName('Products')
+
+ # IntermediatesGroup is used to collect source-like files that are generated
+ # by rules or script phases and are placed in intermediate directories such
+ # as DerivedSources.
+ def IntermediatesGroup(self):
+ return self._GroupByName('Intermediates')
+
+ # FrameworksGroup and ProjectsGroup are top-level groups used to collect
+ # frameworks and projects.
+ def FrameworksGroup(self):
+ return self._GroupByName('Frameworks')
+
+ def ProjectsGroup(self):
+ return self._GroupByName('Projects')
+
+ def RootGroupForPath(self, path):
+ """Returns a PBXGroup child of this object to which path should be added.
+
+ This method is intended to choose between SourceGroup and
+ IntermediatesGroup on the basis of whether path is present in a source
+ directory or an intermediates directory. For the purposes of this
+ determination, any path located within a derived file directory such as
+ PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates
+ directory.
+
+ The returned value is a two-element tuple. The first element is the
+ PBXGroup, and the second element specifies whether that group should be
+ organized hierarchically (True) or as a single flat list (False).
+ """
+
+ # TODO(mark): make this a class variable and bind to self on call?
+ # Also, this list is nowhere near exhaustive.
+ # INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by
+ # gyp.generator.xcode. There should probably be some way for that module
+ # to push the names in, rather than having to hard-code them here.
+ source_tree_groups = {
+ 'DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
+ 'INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
+ 'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
+ 'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
+ }
+
+ (source_tree, path) = SourceTreeAndPathFromPath(path)
+ if source_tree != None and source_tree in source_tree_groups:
+ (group_func, hierarchical) = source_tree_groups[source_tree]
+ group = group_func()
+ return (group, hierarchical)
+
+ # TODO(mark): make additional choices based on file extension.
+
+ return (self.SourceGroup(), True)
+
+ def AddOrGetFileInRootGroup(self, path):
+ """Returns a PBXFileReference corresponding to path in the correct group
+ according to RootGroupForPath's heuristics.
+
+ If an existing PBXFileReference for path exists, it will be returned.
+ Otherwise, one will be created and returned.
+ """
+
+ (group, hierarchical) = self.RootGroupForPath(path)
+ return group.AddOrGetFileByPath(path, hierarchical)
+
+ def RootGroupsTakeOverOnlyChildren(self, recurse=False):
+ """Calls TakeOverOnlyChild for all groups in the main group."""
+
+ for group in self._properties['mainGroup']._properties['children']:
+ if isinstance(group, PBXGroup):
+ group.TakeOverOnlyChild(recurse)
+
+ def SortGroups(self):
+ # Sort the children of the mainGroup (like "Source" and "Products")
+ # according to their defined order.
+ self._properties['mainGroup']._properties['children'] = \
+ sorted(self._properties['mainGroup']._properties['children'],
+ key=functools.cmp_to_key(XCHierarchicalElement.CompareRootGroup))
+
+ # Sort everything else by putting group before files, and going
+ # alphabetically by name within sections of groups and files. SortGroup
+ # is recursive.
+ for group in self._properties['mainGroup']._properties['children']:
+ if not isinstance(group, PBXGroup):
+ continue
+
+ if group.Name() == 'Products':
+ # The Products group is a special case. Instead of sorting
+ # alphabetically, sort things in the order of the targets that
+ # produce the products. To do this, just build up a new list of
+ # products based on the targets.
+ products = []
+ for target in self._properties['targets']:
+ if not isinstance(target, PBXNativeTarget):
+ continue
+ product = target._properties['productReference']
+ # Make sure that the product is already in the products group.
+ assert product in group._properties['children']
+ products.append(product)
+
+ # Make sure that this process doesn't miss anything that was already
+ # in the products group.
+ assert len(products) == len(group._properties['children'])
+ group._properties['children'] = products
+ else:
+ group.SortGroup()
+
+ def AddOrGetProjectReference(self, other_pbxproject):
+ """Add a reference to another project file (via PBXProject object) to this
+ one.
+
+ Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in
+ this project file that contains a PBXReferenceProxy object for each
+ product of each PBXNativeTarget in the other project file. ProjectRef is
+ a PBXFileReference to the other project file.
+
+ If this project file already references the other project file, the
+ existing ProductGroup and ProjectRef are returned. The ProductGroup will
+ still be updated if necessary.
+ """
+
+ if not 'projectReferences' in self._properties:
+ self._properties['projectReferences'] = []
+
+ product_group = None
+ project_ref = None
+
+ if not other_pbxproject in self._other_pbxprojects:
+ # This project file isn't yet linked to the other one. Establish the
+ # link.
+ product_group = PBXGroup({'name': 'Products'})
+
+ # ProductGroup is strong.
+ product_group.parent = self
+
+ # There's nothing unique about this PBXGroup, and if left alone, it will
+ # wind up with the same set of hashables as all other PBXGroup objects
+ # owned by the projectReferences list. Add the hashables of the
+ # remote PBXProject that it's related to.
+ product_group._hashables.extend(other_pbxproject.Hashables())
+
+ # The other project reports its path as relative to the same directory
+ # that this project's path is relative to. The other project's path
+ # is not necessarily already relative to this project. Figure out the
+ # pathname that this project needs to use to refer to the other one.
+ this_path = posixpath.dirname(self.Path())
+ projectDirPath = self.GetProperty('projectDirPath')
+ if projectDirPath:
+ if posixpath.isabs(projectDirPath[0]):
+ this_path = projectDirPath
+ else:
+ this_path = posixpath.join(this_path, projectDirPath)
+ other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path)
+
+ # ProjectRef is weak (it's owned by the mainGroup hierarchy).
+ project_ref = PBXFileReference({
+ 'lastKnownFileType': 'wrapper.pb-project',
+ 'path': other_path,
+ 'sourceTree': 'SOURCE_ROOT',
+ })
+ self.ProjectsGroup().AppendChild(project_ref)
+
+ ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref}
+ self._other_pbxprojects[other_pbxproject] = ref_dict
+ self.AppendProperty('projectReferences', ref_dict)
+
+ # Xcode seems to sort this list case-insensitively
+ self._properties['projectReferences'] = \
+ sorted(self._properties['projectReferences'],
+ key=lambda x: x['ProjectRef'].Name().lower())
+ else:
+ # The link already exists. Pull out the relevnt data.
+ project_ref_dict = self._other_pbxprojects[other_pbxproject]
+ product_group = project_ref_dict['ProductGroup']
+ project_ref = project_ref_dict['ProjectRef']
+
+ self._SetUpProductReferences(other_pbxproject, product_group, project_ref)
+
+ inherit_unique_symroot = self._AllSymrootsUnique(other_pbxproject, False)
+ targets = other_pbxproject.GetProperty('targets')
+ if all(self._AllSymrootsUnique(t, inherit_unique_symroot) for t in targets):
+ dir_path = project_ref._properties['path']
+ product_group._hashables.extend(dir_path)
+
+ return [product_group, project_ref]
+
+ def _AllSymrootsUnique(self, target, inherit_unique_symroot):
+ # Returns True if all configurations have a unique 'SYMROOT' attribute.
+ # The value of inherit_unique_symroot decides, if a configuration is assumed
+ # to inherit a unique 'SYMROOT' attribute from its parent, if it doesn't
+ # define an explicit value for 'SYMROOT'.
+ symroots = self._DefinedSymroots(target)
+ for s in self._DefinedSymroots(target):
+ if (s is not None and not self._IsUniqueSymrootForTarget(s) or
+ s is None and not inherit_unique_symroot):
+ return False
+ return True if symroots else inherit_unique_symroot
+
+ def _DefinedSymroots(self, target):
+ # Returns all values for the 'SYMROOT' attribute defined in all
+ # configurations for this target. If any configuration doesn't define the
+ # 'SYMROOT' attribute, None is added to the returned set. If all
+ # configurations don't define the 'SYMROOT' attribute, an empty set is
+ # returned.
+ config_list = target.GetProperty('buildConfigurationList')
+ symroots = set()
+ for config in config_list.GetProperty('buildConfigurations'):
+ setting = config.GetProperty('buildSettings')
+ if 'SYMROOT' in setting:
+ symroots.add(setting['SYMROOT'])
+ else:
+ symroots.add(None)
+ if len(symroots) == 1 and None in symroots:
+ return set()
+ return symroots
+
+ def _IsUniqueSymrootForTarget(self, symroot):
+ # This method returns True if all configurations in target contain a
+ # 'SYMROOT' attribute that is unique for the given target. A value is
+ # unique, if the Xcode macro '$SRCROOT' appears in it in any form.
+ uniquifier = ['$SRCROOT', '$(SRCROOT)']
+ if any(x in symroot for x in uniquifier):
+ return True
+ return False
+
+ def _SetUpProductReferences(self, other_pbxproject, product_group,
+ project_ref):
+ # TODO(mark): This only adds references to products in other_pbxproject
+ # when they don't exist in this pbxproject. Perhaps it should also
+ # remove references from this pbxproject that are no longer present in
+ # other_pbxproject. Perhaps it should update various properties if they
+ # change.
+ for target in other_pbxproject._properties['targets']:
+ if not isinstance(target, PBXNativeTarget):
+ continue
+
+ other_fileref = target._properties['productReference']
+ if product_group.GetChildByRemoteObject(other_fileref) is None:
+ # Xcode sets remoteInfo to the name of the target and not the name
+ # of its product, despite this proxy being a reference to the product.
+ container_item = PBXContainerItemProxy({
+ 'containerPortal': project_ref,
+ 'proxyType': 2,
+ 'remoteGlobalIDString': other_fileref,
+ 'remoteInfo': target.Name()
+ })
+ # TODO(mark): Does sourceTree get copied straight over from the other
+ # project? Can the other project ever have lastKnownFileType here
+ # instead of explicitFileType? (Use it if so?) Can path ever be
+ # unset? (I don't think so.) Can other_fileref have name set, and
+ # does it impact the PBXReferenceProxy if so? These are the questions
+ # that perhaps will be answered one day.
+ reference_proxy = PBXReferenceProxy({
+ 'fileType': other_fileref._properties['explicitFileType'],
+ 'path': other_fileref._properties['path'],
+ 'sourceTree': other_fileref._properties['sourceTree'],
+ 'remoteRef': container_item,
+ })
+
+ product_group.AppendChild(reference_proxy)
+
+ def SortRemoteProductReferences(self):
+ # For each remote project file, sort the associated ProductGroup in the
+ # same order that the targets are sorted in the remote project file. This
+ # is the sort order used by Xcode.
+
+ for other_pbxproject, ref_dict in self._other_pbxprojects.items():
+ # Build up a list of products in the remote project file, ordered the
+ # same as the targets that produce them.
+ remote_products = []
+ for target in other_pbxproject._properties['targets']:
+ if not isinstance(target, PBXNativeTarget):
+ continue
+ remote_products.append(target._properties['productReference'])
+
+ # Sort the PBXReferenceProxy children according to the list of remote
+ # products.
+ product_group = ref_dict['ProductGroup']
+ product_group._properties['children'] = sorted(
+ product_group._properties['children'],
+ key=lambda x: remote_products.index(x._properties['remoteRef']._properties['remoteGlobalIDString']))
+
+
+class XCProjectFile(XCObject):
+ _schema = XCObject._schema.copy()
+ _schema.update({
+ 'archiveVersion': [0, int, 0, 1, 1],
+ 'classes': [0, dict, 0, 1, {}],
+ 'objectVersion': [0, int, 0, 1, 46],
+ 'rootObject': [0, PBXProject, 1, 1],
+ })
+
+ def ComputeIDs(self, recursive=True, overwrite=True, hash=None):
+ # Although XCProjectFile is implemented here as an XCObject, it's not a
+ # proper object in the Xcode sense, and it certainly doesn't have its own
+ # ID. Pass through an attempt to update IDs to the real root object.
+ if recursive:
+ self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash)
+
+ def Print(self, file=sys.stdout):
+ self.VerifyHasRequiredProperties()
+
+ # Add the special "objects" property, which will be caught and handled
+ # separately during printing. This structure allows a fairly standard
+ # loop do the normal printing.
+ self._properties['objects'] = {}
+ self._XCPrint(file, 0, '// !$*UTF8*$!\n')
+ if self._should_print_single_line:
+ self._XCPrint(file, 0, '{ ')
+ else:
+ self._XCPrint(file, 0, '{\n')
+ for property, value in sorted(self._properties.items()):
+ if property == 'objects':
+ self._PrintObjects(file)
+ else:
+ self._XCKVPrint(file, 1, property, value)
+ self._XCPrint(file, 0, '}\n')
+ del self._properties['objects']
+
+ def _PrintObjects(self, file):
+ if self._should_print_single_line:
+ self._XCPrint(file, 0, 'objects = {')
+ else:
+ self._XCPrint(file, 1, 'objects = {\n')
+
+ objects_by_class = {}
+ for object in self.Descendants():
+ if object == self:
+ continue
+ class_name = object.__class__.__name__
+ if not class_name in objects_by_class:
+ objects_by_class[class_name] = []
+ objects_by_class[class_name].append(object)
+
+ for class_name in sorted(objects_by_class):
+ self._XCPrint(file, 0, '\n')
+ self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n')
+ for object in sorted(objects_by_class[class_name],
+ key=lambda x: x.id):
+ object.Print(file)
+ self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n')
+
+ if self._should_print_single_line:
+ self._XCPrint(file, 0, '}; ')
+ else:
+ self._XCPrint(file, 1, '};\n')
diff --git a/third_party/python/gyp/pylib/gyp/xml_fix.py b/third_party/python/gyp/pylib/gyp/xml_fix.py
new file mode 100644
index 0000000000..4308d99b47
--- /dev/null
+++ b/third_party/python/gyp/pylib/gyp/xml_fix.py
@@ -0,0 +1,68 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Applies a fix to CR LF TAB handling in xml.dom.
+
+Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293
+Working around this: http://bugs.python.org/issue5752
+TODO(bradnelson): Consider dropping this when we drop XP support.
+"""
+
+
+import xml.dom.minidom
+
+
+def _Replacement_write_data(writer, data, is_attrib=False):
+ """Writes datachars to writer."""
+ data = data.replace("&", "&amp;").replace("<", "&lt;")
+ data = data.replace("\"", "&quot;").replace(">", "&gt;")
+ if is_attrib:
+ data = data.replace(
+ "\r", "&#xD;").replace(
+ "\n", "&#xA;").replace(
+ "\t", "&#x9;")
+ writer.write(data)
+
+
+def _Replacement_writexml(self, writer, indent="", addindent="", newl=""):
+ # indent = current indentation
+ # addindent = indentation to add to higher levels
+ # newl = newline string
+ writer.write(indent+"<" + self.tagName)
+
+ attrs = self._get_attributes()
+ a_names = sorted(attrs.keys())
+
+ for a_name in a_names:
+ writer.write(" %s=\"" % a_name)
+ _Replacement_write_data(writer, attrs[a_name].value, is_attrib=True)
+ writer.write("\"")
+ if self.childNodes:
+ writer.write(">%s" % newl)
+ for node in self.childNodes:
+ node.writexml(writer, indent + addindent, addindent, newl)
+ writer.write("%s</%s>%s" % (indent, self.tagName, newl))
+ else:
+ writer.write("/>%s" % newl)
+
+
+class XmlFix(object):
+ """Object to manage temporary patching of xml.dom.minidom."""
+
+ def __init__(self):
+ # Preserve current xml.dom.minidom functions.
+ self.write_data = xml.dom.minidom._write_data
+ self.writexml = xml.dom.minidom.Element.writexml
+ # Inject replacement versions of a function and a method.
+ xml.dom.minidom._write_data = _Replacement_write_data
+ xml.dom.minidom.Element.writexml = _Replacement_writexml
+
+ def Cleanup(self):
+ if self.write_data:
+ xml.dom.minidom._write_data = self.write_data
+ xml.dom.minidom.Element.writexml = self.writexml
+ self.write_data = None
+
+ def __del__(self):
+ self.Cleanup()
diff --git a/third_party/python/gyp/samples/samples b/third_party/python/gyp/samples/samples
new file mode 100755
index 0000000000..ff26de3825
--- /dev/null
+++ b/third_party/python/gyp/samples/samples
@@ -0,0 +1,83 @@
+#!/usr/bin/python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+
+import os.path
+import shutil
+import sys
+
+
+gyps = [
+ 'app/app.gyp',
+ 'base/base.gyp',
+ 'build/temp_gyp/googleurl.gyp',
+ 'build/all.gyp',
+ 'build/common.gypi',
+ 'build/external_code.gypi',
+ 'chrome/test/security_tests/security_tests.gyp',
+ 'chrome/third_party/hunspell/hunspell.gyp',
+ 'chrome/chrome.gyp',
+ 'media/media.gyp',
+ 'net/net.gyp',
+ 'printing/printing.gyp',
+ 'sdch/sdch.gyp',
+ 'skia/skia.gyp',
+ 'testing/gmock.gyp',
+ 'testing/gtest.gyp',
+ 'third_party/bzip2/bzip2.gyp',
+ 'third_party/icu38/icu38.gyp',
+ 'third_party/libevent/libevent.gyp',
+ 'third_party/libjpeg/libjpeg.gyp',
+ 'third_party/libpng/libpng.gyp',
+ 'third_party/libxml/libxml.gyp',
+ 'third_party/libxslt/libxslt.gyp',
+ 'third_party/lzma_sdk/lzma_sdk.gyp',
+ 'third_party/modp_b64/modp_b64.gyp',
+ 'third_party/npapi/npapi.gyp',
+ 'third_party/sqlite/sqlite.gyp',
+ 'third_party/zlib/zlib.gyp',
+ 'v8/tools/gyp/v8.gyp',
+ 'webkit/activex_shim/activex_shim.gyp',
+ 'webkit/activex_shim_dll/activex_shim_dll.gyp',
+ 'webkit/build/action_csspropertynames.py',
+ 'webkit/build/action_cssvaluekeywords.py',
+ 'webkit/build/action_jsconfig.py',
+ 'webkit/build/action_makenames.py',
+ 'webkit/build/action_maketokenizer.py',
+ 'webkit/build/action_useragentstylesheets.py',
+ 'webkit/build/rule_binding.py',
+ 'webkit/build/rule_bison.py',
+ 'webkit/build/rule_gperf.py',
+ 'webkit/tools/test_shell/test_shell.gyp',
+ 'webkit/webkit.gyp',
+]
+
+
+def Main(argv):
+ if len(argv) != 3 or argv[1] not in ['push', 'pull']:
+ print('Usage: %s push/pull PATH_TO_CHROME' % argv[0])
+ return 1
+
+ path_to_chrome = argv[2]
+
+ for g in gyps:
+ chrome_file = os.path.join(path_to_chrome, g)
+ local_file = os.path.join(os.path.dirname(argv[0]), os.path.split(g)[1])
+ if argv[1] == 'push':
+ print('Copying %s to %s' % (local_file, chrome_file))
+ shutil.copyfile(local_file, chrome_file)
+ elif argv[1] == 'pull':
+ print('Copying %s to %s' % (chrome_file, local_file))
+ shutil.copyfile(chrome_file, local_file)
+ else:
+ assert False
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(Main(sys.argv))
diff --git a/third_party/python/gyp/samples/samples.bat b/third_party/python/gyp/samples/samples.bat
new file mode 100644
index 0000000000..778d9c90f0
--- /dev/null
+++ b/third_party/python/gyp/samples/samples.bat
@@ -0,0 +1,5 @@
+@rem Copyright (c) 2009 Google Inc. All rights reserved.
+@rem Use of this source code is governed by a BSD-style license that can be
+@rem found in the LICENSE file.
+
+@python %~dp0/samples %*
diff --git a/third_party/python/gyp/setup.py b/third_party/python/gyp/setup.py
new file mode 100755
index 0000000000..75a42558d8
--- /dev/null
+++ b/third_party/python/gyp/setup.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from setuptools import setup
+
+setup(
+ name='gyp',
+ version='0.1',
+ description='Generate Your Projects',
+ author='Chromium Authors',
+ author_email='chromium-dev@googlegroups.com',
+ url='http://code.google.com/p/gyp',
+ package_dir = {'': 'pylib'},
+ packages=['gyp', 'gyp.generator'],
+ entry_points = {'console_scripts': ['gyp=gyp:script_main'] }
+)
diff --git a/third_party/python/gyp/test/actions-bare/gyptest-bare.py b/third_party/python/gyp/test/actions-bare/gyptest-bare.py
new file mode 100755
index 0000000000..e3d6db1029
--- /dev/null
+++ b/third_party/python/gyp/test/actions-bare/gyptest-bare.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies actions which are not depended on by other targets get executed.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('bare.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+test.build('bare.gyp', chdir='relocate/src')
+
+file_content = 'Hello from bare.py\n'
+
+test.built_file_must_match('out.txt', file_content, chdir='relocate/src')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/actions-bare/src/bare.gyp b/third_party/python/gyp/test/actions-bare/src/bare.gyp
new file mode 100644
index 0000000000..3d28f099d4
--- /dev/null
+++ b/third_party/python/gyp/test/actions-bare/src/bare.gyp
@@ -0,0 +1,25 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'bare',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'action1',
+ 'inputs': [
+ 'bare.py',
+ ],
+ 'outputs': [
+ '<(PRODUCT_DIR)/out.txt',
+ ],
+ 'action': ['python', 'bare.py', '<(PRODUCT_DIR)/out.txt'],
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/actions-bare/src/bare.py b/third_party/python/gyp/test/actions-bare/src/bare.py
new file mode 100755
index 0000000000..e153b774f9
--- /dev/null
+++ b/third_party/python/gyp/test/actions-bare/src/bare.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+f = open(sys.argv[1], 'w')
+f.write('Hello from bare.py\n')
+f.close()
diff --git a/third_party/python/gyp/test/actions-depfile/depfile.gyp b/third_party/python/gyp/test/actions-depfile/depfile.gyp
new file mode 100644
index 0000000000..617fe705b5
--- /dev/null
+++ b/third_party/python/gyp/test/actions-depfile/depfile.gyp
@@ -0,0 +1,42 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'depfile_target',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'depfile_action',
+ 'inputs': [
+ 'input.txt',
+ ],
+ 'outputs': [
+ 'output.txt',
+ ],
+ 'depfile': 'depfile_action.d',
+ 'action': [
+ 'python', 'touch.py', '<(PRODUCT_DIR)/<(_depfile)',
+ ],
+ 'msvs_cygwin_shell': 0,
+ },
+ {
+ 'action_name': 'depfile_action_intermediate_dir',
+ 'inputs': [
+ 'input.txt',
+ ],
+ 'outputs': [
+ 'output-intermediate.txt',
+ ],
+ 'depfile': '<(INTERMEDIATE_DIR)/depfile_action_intermediate_dir.d',
+ 'action': [
+ 'python', 'touch.py', '<(_depfile)',
+ ],
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/actions-depfile/gyptest-all.py b/third_party/python/gyp/test/actions-depfile/gyptest-all.py
new file mode 100644
index 0000000000..68b32d611c
--- /dev/null
+++ b/third_party/python/gyp/test/actions-depfile/gyptest-all.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Verifies that depfile fields are output in ninja rules."""
+
+import TestGyp
+import os
+
+test = TestGyp.TestGyp()
+
+if test.format == 'ninja':
+ test.run_gyp('depfile.gyp')
+ contents = open(test.built_file_path('obj/depfile_target.ninja')).read()
+
+ expected = [
+ 'depfile = depfile_action.d',
+ 'depfile = ' + os.path.join(
+ 'obj', 'depfile_target.gen/depfile_action_intermediate_dir.d'),
+ ]
+ test.must_contain_all_lines(contents, expected)
+
+ test.build('depfile.gyp')
+ test.built_file_must_exist('depfile_action.d')
+ test.built_file_must_exist(
+ 'obj/depfile_target.gen/depfile_action_intermediate_dir.d')
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/actions-depfile/input.txt b/third_party/python/gyp/test/actions-depfile/input.txt
new file mode 100644
index 0000000000..3f9177e45e
--- /dev/null
+++ b/third_party/python/gyp/test/actions-depfile/input.txt
@@ -0,0 +1 @@
+input
diff --git a/third_party/python/gyp/test/actions-depfile/touch.py b/third_party/python/gyp/test/actions-depfile/touch.py
new file mode 100644
index 0000000000..57f8316093
--- /dev/null
+++ b/third_party/python/gyp/test/actions-depfile/touch.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+"""Cross-platform touch."""
+
+for fname in sys.argv[1:]:
+ if os.path.exists(fname):
+ os.utime(fname, None)
+ else:
+ if not os.path.exists(os.path.join('.', os.path.dirname(fname))):
+ os.makedirs(os.path.dirname(fname))
+ open(fname, 'w').close()
diff --git a/third_party/python/gyp/test/actions-multiple-outputs-with-dependencies/gyptest-action.py b/third_party/python/gyp/test/actions-multiple-outputs-with-dependencies/gyptest-action.py
new file mode 100755
index 0000000000..a9d218282c
--- /dev/null
+++ b/third_party/python/gyp/test/actions-multiple-outputs-with-dependencies/gyptest-action.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies actions with multiple outputs & dependncies will correctly rebuild.
+
+This is a regression test for crrev.com/1177163002.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+import os
+import sys
+import time
+
+if sys.platform in ('darwin', 'win32'):
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+test = TestGyp.TestGyp()
+
+TESTDIR='relocate/src'
+test.run_gyp('action.gyp', chdir='src')
+test.relocate('src', TESTDIR)
+
+def build_and_check(content):
+ test.write(TESTDIR + '/input.txt', content)
+ test.build('action.gyp', 'upper', chdir=TESTDIR)
+ test.built_file_must_match('result.txt', content, chdir=TESTDIR)
+
+build_and_check('Content for first build.')
+
+# Ninja works with timestamps and the test above is fast enough that the
+# 'updated' file may end up with the same timestamp as the original, meaning
+# that ninja may not always recognize the input file has changed.
+if test.format == 'ninja':
+ time.sleep(1)
+
+build_and_check('An updated input file.')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/actions-multiple-outputs-with-dependencies/src/action.gyp b/third_party/python/gyp/test/actions-multiple-outputs-with-dependencies/src/action.gyp
new file mode 100644
index 0000000000..a305d65ea9
--- /dev/null
+++ b/third_party/python/gyp/test/actions-multiple-outputs-with-dependencies/src/action.gyp
@@ -0,0 +1,28 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'upper',
+ 'type': 'none',
+ 'actions': [{
+ 'action_name': 'upper_action',
+ 'inputs': ['<(PRODUCT_DIR)/out2.txt'],
+ 'outputs': ['<(PRODUCT_DIR)/result.txt'],
+ 'action': ['python', 'rcopy.py', '<@(_inputs)', '<@(_outputs)'],
+ }],
+ },
+ {
+ 'target_name': 'lower',
+ 'type': 'none',
+ 'actions': [{
+ 'action_name': 'lower_action',
+ 'inputs': ['input.txt'],
+ 'outputs': ['<(PRODUCT_DIR)/out1.txt', '<(PRODUCT_DIR)/out2.txt'],
+ 'action': ['python', 'rcopy.py', '<@(_inputs)', '<@(_outputs)'],
+ }],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/actions-multiple-outputs-with-dependencies/src/rcopy.py b/third_party/python/gyp/test/actions-multiple-outputs-with-dependencies/src/rcopy.py
new file mode 100644
index 0000000000..fb029598c4
--- /dev/null
+++ b/third_party/python/gyp/test/actions-multiple-outputs-with-dependencies/src/rcopy.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+"""A slightly odd 'cp' implementation for this test.
+
+This 'cp' can have many targets, but only one source. 'cp src dest1 dest2'
+will copy the file 'src' to both 'dest1' and 'dest2'."""
+
+with open(sys.argv[1], 'r') as f:
+ src = f.read()
+for dest in sys.argv[2:]:
+ with open(dest, 'w') as f:
+ f.write(src)
+
diff --git a/third_party/python/gyp/test/actions-multiple-outputs/gyptest-multiple-outputs.py b/third_party/python/gyp/test/actions-multiple-outputs/gyptest-multiple-outputs.py
new file mode 100755
index 0000000000..5e2682d00f
--- /dev/null
+++ b/third_party/python/gyp/test/actions-multiple-outputs/gyptest-multiple-outputs.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies actions with multiple outputs will correctly rebuild.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+import os
+import sys
+
+if sys.platform == 'win32':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('multiple-outputs.gyp', chdir='src')
+
+chdir = 'relocate/src'
+test.relocate('src', chdir)
+
+def build_and_check():
+ # Build + check that both outputs exist.
+ test.build('multiple-outputs.gyp', chdir=chdir)
+ test.built_file_must_exist('out1.txt', chdir=chdir)
+ test.built_file_must_exist('out2.txt', chdir=chdir)
+
+# Plain build.
+build_and_check()
+
+# Remove either + rebuild. Both should exist (again).
+os.remove(test.built_file_path('out1.txt', chdir=chdir))
+build_and_check();
+
+# Remove the other + rebuild. Both should exist (again).
+os.remove(test.built_file_path('out2.txt', chdir=chdir))
+build_and_check();
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/actions-multiple-outputs/src/multiple-outputs.gyp b/third_party/python/gyp/test/actions-multiple-outputs/src/multiple-outputs.gyp
new file mode 100644
index 0000000000..7a3d74b11a
--- /dev/null
+++ b/third_party/python/gyp/test/actions-multiple-outputs/src/multiple-outputs.gyp
@@ -0,0 +1,23 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'multiple-outputs',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'action1',
+ 'inputs': [],
+ 'outputs': [
+ '<(PRODUCT_DIR)/out1.txt',
+ '<(PRODUCT_DIR)/out2.txt',
+ ],
+ 'action': ['python', 'touch.py', '<@(_outputs)'],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/actions-multiple-outputs/src/touch.py b/third_party/python/gyp/test/actions-multiple-outputs/src/touch.py
new file mode 100644
index 0000000000..bc61267f39
--- /dev/null
+++ b/third_party/python/gyp/test/actions-multiple-outputs/src/touch.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+"""Cross-platform touch."""
+
+for fname in sys.argv[1:]:
+ if os.path.exists(fname):
+ os.utime(fname, None)
+ else:
+ open(fname, 'w').close()
diff --git a/third_party/python/gyp/test/actions-multiple/gyptest-all.py b/third_party/python/gyp/test/actions-multiple/gyptest-all.py
new file mode 100755
index 0000000000..2a083de9b0
--- /dev/null
+++ b/third_party/python/gyp/test/actions-multiple/gyptest-all.py
@@ -0,0 +1,72 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies two actions can be attached to the same input files.
+"""
+
+import sys
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('actions.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+# Test of fine-grained dependencies for generators that can build individual
+# files on demand.
+# In particular:
+# - TargetA depends on TargetB.
+# - TargetA and TargetB are 'none' type with actions attached.
+# - TargetA has multiple actions.
+# - An output from one of the actions in TargetA (not the first listed),
+# is requested as the build target.
+# Ensure that TargetB gets built.
+#
+# This sub-test can only be done with generators/build tools that can
+# be asked to build individual files rather than whole targets (make, ninja).
+if test.format in ['make', 'ninja']:
+ # Select location of target based on generator.
+ if test.format == 'make':
+ target = 'multi2.txt'
+ elif test.format == 'ninja':
+ if sys.platform in ['win32', 'cygwin']:
+ target = '..\\..\\multi2.txt'
+ else:
+ target = '../../multi2.txt'
+ else:
+ assert False
+ test.build('actions.gyp', chdir='relocate/src', target=target)
+ test.must_contain('relocate/src/multi2.txt', 'hello there')
+ test.must_contain('relocate/src/multi_dep.txt', 'hello there')
+
+
+# Test that two actions can be attached to the same inputs.
+test.build('actions.gyp', test.ALL, chdir='relocate/src')
+test.must_contain('relocate/src/output1.txt', 'hello there')
+test.must_contain('relocate/src/output2.txt', 'hello there')
+test.must_contain('relocate/src/output3.txt', 'hello there')
+test.must_contain('relocate/src/output4.txt', 'hello there')
+
+# Test that process_outputs_as_sources works in conjuction with merged
+# actions.
+test.run_built_executable(
+ 'multiple_action_source_filter',
+ chdir='relocate/src',
+ stdout=(
+ '{\n'
+ 'bar\n'
+ 'car\n'
+ 'dar\n'
+ 'ear\n'
+ '}\n'
+ ),
+)
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/actions-multiple/src/actions.gyp b/third_party/python/gyp/test/actions-multiple/src/actions.gyp
new file mode 100644
index 0000000000..d7423b589f
--- /dev/null
+++ b/third_party/python/gyp/test/actions-multiple/src/actions.gyp
@@ -0,0 +1,226 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ # Have a long string so that actions will exceed xp 512 character
+ # command limit on xp.
+ 'long_string':
+ 'abcdefghijklmnopqrstuvwxyz0123456789'
+ 'abcdefghijklmnopqrstuvwxyz0123456789'
+ 'abcdefghijklmnopqrstuvwxyz0123456789'
+ 'abcdefghijklmnopqrstuvwxyz0123456789'
+ 'abcdefghijklmnopqrstuvwxyz0123456789'
+ 'abcdefghijklmnopqrstuvwxyz0123456789'
+ 'abcdefghijklmnopqrstuvwxyz0123456789'
+ 'abcdefghijklmnopqrstuvwxyz0123456789'
+ 'abcdefghijklmnopqrstuvwxyz0123456789'
+ 'abcdefghijklmnopqrstuvwxyz0123456789'
+ 'abcdefghijklmnopqrstuvwxyz0123456789'
+ },
+ 'targets': [
+ {
+ 'target_name': 'multiple_action_target',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'action1',
+ 'inputs': [
+ 'copyfile.py',
+ 'input.txt',
+ ],
+ 'outputs': [
+ 'output1.txt',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(_outputs)', '<(long_string)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ {
+ 'action_name': 'action2',
+ 'inputs': [
+ 'copyfile.py',
+ 'input.txt',
+ ],
+ 'outputs': [
+ 'output2.txt',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(_outputs)', '<(long_string)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ {
+ 'action_name': 'action3',
+ 'inputs': [
+ 'copyfile.py',
+ 'input.txt',
+ ],
+ 'outputs': [
+ 'output3.txt',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(_outputs)', '<(long_string)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ {
+ 'action_name': 'action4',
+ 'inputs': [
+ 'copyfile.py',
+ 'input.txt',
+ ],
+ 'outputs': [
+ 'output4.txt',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(_outputs)', '<(long_string)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ {
+ 'target_name': 'multiple_action_source_filter',
+ 'type': 'executable',
+ 'sources': [
+ 'main.c',
+ # TODO(bradnelson): add foo.c here once this issue is fixed:
+ # http://code.google.com/p/gyp/issues/detail?id=175
+ ],
+ 'actions': [
+ {
+ 'action_name': 'action1',
+ 'inputs': [
+ 'foo.c',
+ 'filter.py',
+ ],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/output1.c',
+ ],
+ 'process_outputs_as_sources': 1,
+ 'action': [
+ 'python', 'filter.py', 'foo', 'bar', 'foo.c', '<@(_outputs)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ {
+ 'action_name': 'action2',
+ 'inputs': [
+ 'foo.c',
+ 'filter.py',
+ ],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/output2.c',
+ ],
+ 'process_outputs_as_sources': 1,
+ 'action': [
+ 'python', 'filter.py', 'foo', 'car', 'foo.c', '<@(_outputs)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ {
+ 'action_name': 'action3',
+ 'inputs': [
+ 'foo.c',
+ 'filter.py',
+ ],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/output3.c',
+ ],
+ 'process_outputs_as_sources': 1,
+ 'action': [
+ 'python', 'filter.py', 'foo', 'dar', 'foo.c', '<@(_outputs)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ {
+ 'action_name': 'action4',
+ 'inputs': [
+ 'foo.c',
+ 'filter.py',
+ ],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/output4.c',
+ ],
+ 'process_outputs_as_sources': 1,
+ 'action': [
+ 'python', 'filter.py', 'foo', 'ear', 'foo.c', '<@(_outputs)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ {
+ 'target_name': 'multiple_dependent_target',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'action1',
+ 'inputs': [
+ 'copyfile.py',
+ 'input.txt',
+ ],
+ 'outputs': [
+ 'multi1.txt',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(_outputs)', '<(long_string)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ {
+ 'action_name': 'action2',
+ 'inputs': [
+ 'copyfile.py',
+ 'input.txt',
+ ],
+ 'outputs': [
+ 'multi2.txt',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(_outputs)', '<(long_string)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ 'dependencies': [
+ 'multiple_required_target',
+ ],
+ },
+ {
+ 'target_name': 'multiple_required_target',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'multi_dep',
+ 'inputs': [
+ 'copyfile.py',
+ 'input.txt',
+ ],
+ 'outputs': [
+ 'multi_dep.txt',
+ ],
+ 'process_outputs_as_sources': 1,
+ 'action': [
+ 'python', '<@(_inputs)', '<(_outputs)', '<(long_string)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/actions-multiple/src/copyfile.py b/third_party/python/gyp/test/actions-multiple/src/copyfile.py
new file mode 100755
index 0000000000..0774679380
--- /dev/null
+++ b/third_party/python/gyp/test/actions-multiple/src/copyfile.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import shutil
+import sys
+
+shutil.copyfile(sys.argv[1], sys.argv[2])
diff --git a/third_party/python/gyp/test/actions-multiple/src/filter.py b/third_party/python/gyp/test/actions-multiple/src/filter.py
new file mode 100755
index 0000000000..f61a5fa59a
--- /dev/null
+++ b/third_party/python/gyp/test/actions-multiple/src/filter.py
@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import sys
+
+data = open(sys.argv[3], 'r').read()
+fh = open(sys.argv[4], 'w')
+fh.write(data.replace(sys.argv[1], sys.argv[2]))
+fh.close()
diff --git a/third_party/python/gyp/test/actions-multiple/src/foo.c b/third_party/python/gyp/test/actions-multiple/src/foo.c
new file mode 100644
index 0000000000..23c4ef7f26
--- /dev/null
+++ b/third_party/python/gyp/test/actions-multiple/src/foo.c
@@ -0,0 +1,11 @@
+/*
+ * Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <stdio.h>
+
+void foo(void) {
+ printf("foo\n");
+}
diff --git a/third_party/python/gyp/test/actions-multiple/src/input.txt b/third_party/python/gyp/test/actions-multiple/src/input.txt
new file mode 100644
index 0000000000..c7c7da3c64
--- /dev/null
+++ b/third_party/python/gyp/test/actions-multiple/src/input.txt
@@ -0,0 +1 @@
+hello there
diff --git a/third_party/python/gyp/test/actions-multiple/src/main.c b/third_party/python/gyp/test/actions-multiple/src/main.c
new file mode 100644
index 0000000000..0a420b9034
--- /dev/null
+++ b/third_party/python/gyp/test/actions-multiple/src/main.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <stdio.h>
+
+void bar(void);
+void car(void);
+void dar(void);
+void ear(void);
+
+int main() {
+ printf("{\n");
+ bar();
+ car();
+ dar();
+ ear();
+ printf("}\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/actions-none/gyptest-none.py b/third_party/python/gyp/test/actions-none/gyptest-none.py
new file mode 100755
index 0000000000..933cfad30c
--- /dev/null
+++ b/third_party/python/gyp/test/actions-none/gyptest-none.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies actions can be in 'none' type targets with source files.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('none_with_source_files.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+test.build('none_with_source_files.gyp', chdir='relocate/src')
+
+file_content = 'foo.cc\n'
+
+test.built_file_must_match('fake.out', file_content, chdir='relocate/src')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/actions-none/src/fake_cross.py b/third_party/python/gyp/test/actions-none/src/fake_cross.py
new file mode 100644
index 0000000000..a03ea87fc9
--- /dev/null
+++ b/third_party/python/gyp/test/actions-none/src/fake_cross.py
@@ -0,0 +1,12 @@
+#!/usr/bin/python
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import sys
+
+fh = open(sys.argv[-1], 'w')
+for filename in sys.argv[1:-1]:
+ fh.write(open(filename).read())
+fh.close()
diff --git a/third_party/python/gyp/test/actions-none/src/foo.cc b/third_party/python/gyp/test/actions-none/src/foo.cc
new file mode 100644
index 0000000000..c6c61745ba
--- /dev/null
+++ b/third_party/python/gyp/test/actions-none/src/foo.cc
@@ -0,0 +1 @@
+foo.cc
diff --git a/third_party/python/gyp/test/actions-none/src/none_with_source_files.gyp b/third_party/python/gyp/test/actions-none/src/none_with_source_files.gyp
new file mode 100644
index 0000000000..e2aaebc10a
--- /dev/null
+++ b/third_party/python/gyp/test/actions-none/src/none_with_source_files.gyp
@@ -0,0 +1,35 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Test that 'none' type targets can have .cc files in them.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'none_with_sources',
+ 'type': 'none',
+ 'msvs_cygwin_shell': 0,
+ 'sources': [
+ 'foo.cc',
+ ],
+ 'actions': [
+ {
+ 'action_name': 'fake_cross',
+ 'inputs': [
+ 'fake_cross.py',
+ '<@(_sources)',
+ ],
+ 'outputs': [
+ '<(PRODUCT_DIR)/fake.out',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<@(_outputs)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ }
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/actions-subdir/gyptest-action.py b/third_party/python/gyp/test/actions-subdir/gyptest-action.py
new file mode 100755
index 0000000000..09cfef1893
--- /dev/null
+++ b/third_party/python/gyp/test/actions-subdir/gyptest-action.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Test actions that output to PRODUCT_DIR.
+"""
+
+import TestGyp
+
+# TODO fix this for xcode: http://code.google.com/p/gyp/issues/detail?id=88
+test = TestGyp.TestGyp(formats=['!xcode'])
+
+test.run_gyp('none.gyp', chdir='src')
+
+test.build('none.gyp', test.ALL, chdir='src')
+
+file_content = 'Hello from make-file.py\n'
+subdir_file_content = 'Hello from make-subdir-file.py\n'
+
+test.built_file_must_match('file.out', file_content, chdir='src')
+test.built_file_must_match('subdir_file.out', subdir_file_content, chdir='src')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/actions-subdir/src/make-file.py b/third_party/python/gyp/test/actions-subdir/src/make-file.py
new file mode 100755
index 0000000000..6055ab9bb3
--- /dev/null
+++ b/third_party/python/gyp/test/actions-subdir/src/make-file.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+contents = 'Hello from make-file.py\n'
+
+open(sys.argv[1], 'w').write(contents)
diff --git a/third_party/python/gyp/test/actions-subdir/src/none.gyp b/third_party/python/gyp/test/actions-subdir/src/none.gyp
new file mode 100644
index 0000000000..23f8d25a53
--- /dev/null
+++ b/third_party/python/gyp/test/actions-subdir/src/none.gyp
@@ -0,0 +1,31 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'file',
+ 'type': 'none',
+ 'msvs_cygwin_shell': 0,
+ 'actions': [
+ {
+ 'action_name': 'make-file',
+ 'inputs': [
+ 'make-file.py',
+ ],
+ 'outputs': [
+ '<(PRODUCT_DIR)/file.out',
+ ],
+ 'action': [
+ 'python', '<(_inputs)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ }
+ ],
+ 'dependencies': [
+ 'subdir/subdir.gyp:subdir_file',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/actions-subdir/src/subdir/make-subdir-file.py b/third_party/python/gyp/test/actions-subdir/src/subdir/make-subdir-file.py
new file mode 100755
index 0000000000..02c090a021
--- /dev/null
+++ b/third_party/python/gyp/test/actions-subdir/src/subdir/make-subdir-file.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+contents = 'Hello from make-subdir-file.py\n'
+
+open(sys.argv[1], 'w').write(contents)
diff --git a/third_party/python/gyp/test/actions-subdir/src/subdir/subdir.gyp b/third_party/python/gyp/test/actions-subdir/src/subdir/subdir.gyp
new file mode 100644
index 0000000000..0315d4eb83
--- /dev/null
+++ b/third_party/python/gyp/test/actions-subdir/src/subdir/subdir.gyp
@@ -0,0 +1,28 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'subdir_file',
+ 'type': 'none',
+ 'msvs_cygwin_shell': 0,
+ 'actions': [
+ {
+ 'action_name': 'make-subdir-file',
+ 'inputs': [
+ 'make-subdir-file.py',
+ ],
+ 'outputs': [
+ '<(PRODUCT_DIR)/subdir_file.out',
+ ],
+ 'action': [
+ 'python', '<(_inputs)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ }
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/actions/generated-header/action.py b/third_party/python/gyp/test/actions/generated-header/action.py
new file mode 100644
index 0000000000..9be98798d6
--- /dev/null
+++ b/third_party/python/gyp/test/actions/generated-header/action.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+outfile = sys.argv[1]
+open(outfile, 'w').write('const char kFoo[] = "%s";' % sys.argv[2])
diff --git a/third_party/python/gyp/test/actions/generated-header/main.cc b/third_party/python/gyp/test/actions/generated-header/main.cc
new file mode 100644
index 0000000000..7973781bc6
--- /dev/null
+++ b/third_party/python/gyp/test/actions/generated-header/main.cc
@@ -0,0 +1,7 @@
+#include <stdio.h>
+
+#include "MyHeader.h"
+
+int main() {
+ printf("%s\n", kFoo);
+}
diff --git a/third_party/python/gyp/test/actions/generated-header/test.gyp b/third_party/python/gyp/test/actions/generated-header/test.gyp
new file mode 100644
index 0000000000..209b951ef6
--- /dev/null
+++ b/third_party/python/gyp/test/actions/generated-header/test.gyp
@@ -0,0 +1,34 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'generate_header',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'inputs': [ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/MyHeader.h',
+ ],
+ 'action_name': 'generate header',
+ 'action': ['python', './action.py',
+ '<(SHARED_INTERMEDIATE_DIR)/MyHeader.h', 'foobar output' ],
+ },
+ ],
+ 'msvs_cygwin_shell': 0,
+ },
+ {
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'dependencies': [
+ 'generate_header',
+ ],
+ 'include_dirs': [
+ '<(SHARED_INTERMEDIATE_DIR)',
+ ],
+ 'sources': [ 'main.cc' ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/actions/gyptest-all.py b/third_party/python/gyp/test/actions/gyptest-all.py
new file mode 100755
index 0000000000..c8833a5d1e
--- /dev/null
+++ b/third_party/python/gyp/test/actions/gyptest-all.py
@@ -0,0 +1,101 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies simple actions when using an explicit build target of 'all'.
+"""
+
+import glob
+import os
+import TestGyp
+
+test = TestGyp.TestGyp(workdir='workarea_all')
+
+test.run_gyp('actions.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+# Some gyp files use an action that mentions an output but never
+# writes it as a means to making the action run on every build. That
+# doesn't mesh well with ninja's semantics. TODO(evan): figure out
+# how to work always-run actions in to ninja.
+if test.format in ['ninja', 'xcode-ninja']:
+ test.build('actions.gyp', test.ALL, chdir='relocate/src')
+else:
+ # Test that an "always run" action increases a counter on multiple
+ # invocations, and that a dependent action updates in step.
+ test.build('actions.gyp', test.ALL, chdir='relocate/src')
+ test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '1')
+ test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '1')
+ test.build('actions.gyp', test.ALL, chdir='relocate/src')
+ test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2')
+ test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2')
+
+ # The "always run" action only counts to 2, but the dependent target
+ # will count forever if it's allowed to run. This verifies that the
+ # dependent target only runs when the "always run" action generates
+ # new output, not just because the "always run" ran.
+ test.build('actions.gyp', test.ALL, chdir='relocate/src')
+ test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2')
+ test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2')
+
+expect = """\
+Hello from program.c
+Hello from make-prog1.py
+Hello from make-prog2.py
+"""
+
+if test.format == 'xcode':
+ chdir = 'relocate/src/subdir1'
+else:
+ chdir = 'relocate/src'
+test.run_built_executable('program', chdir=chdir, stdout=expect)
+
+
+test.must_match('relocate/src/subdir2/file.out', "Hello from make-file.py\n")
+
+
+expect = "Hello from generate_main.py\n"
+
+if test.format == 'xcode':
+ chdir = 'relocate/src/subdir3'
+else:
+ chdir = 'relocate/src'
+test.run_built_executable('null_input', chdir=chdir, stdout=expect)
+
+
+# Clean out files which may have been created if test.ALL was run.
+def clean_dep_files():
+ for file in (glob.glob('relocate/src/dep_*.txt') +
+ glob.glob('relocate/src/deps_all_done_*.txt')):
+ if os.path.exists(file):
+ os.remove(file)
+
+# Confirm our clean.
+clean_dep_files()
+test.must_not_exist('relocate/src/dep_1.txt')
+test.must_not_exist('relocate/src/deps_all_done_first_123.txt')
+
+# Make sure all deps finish before an action is run on a 'None' target.
+# If using the Make builder, add -j to make things more difficult.
+arguments = []
+if test.format == 'make':
+ arguments = ['-j']
+test.build('actions.gyp', 'action_with_dependencies_123', chdir='relocate/src',
+ arguments=arguments)
+test.must_exist('relocate/src/deps_all_done_first_123.txt')
+
+# Try again with a target that has deps in reverse. Output files from
+# previous tests deleted. Confirm this execution did NOT run the ALL
+# target which would mess up our dep tests.
+clean_dep_files()
+test.build('actions.gyp', 'action_with_dependencies_321', chdir='relocate/src',
+ arguments=arguments)
+test.must_exist('relocate/src/deps_all_done_first_321.txt')
+test.must_not_exist('relocate/src/deps_all_done_first_123.txt')
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/actions/gyptest-default.py b/third_party/python/gyp/test/actions/gyptest-default.py
new file mode 100755
index 0000000000..70c99ec9ce
--- /dev/null
+++ b/third_party/python/gyp/test/actions/gyptest-default.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies simple actions when using the default build target.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp(workdir='workarea_default')
+
+test.run_gyp('actions.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+# Some gyp files use an action that mentions an output but never
+# writes it as a means to making the action run on every build. That
+# doesn't mesh well with ninja's semantics. TODO(evan): figure out
+# how to work always-run actions in to ninja.
+if test.format in ['ninja', 'xcode-ninja']:
+ test.build('actions.gyp', test.ALL, chdir='relocate/src')
+else:
+ # Test that an "always run" action increases a counter on multiple
+ # invocations, and that a dependent action updates in step.
+ test.build('actions.gyp', chdir='relocate/src')
+ test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '1')
+ test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '1')
+ test.build('actions.gyp', chdir='relocate/src')
+ test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2')
+ test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2')
+
+ # The "always run" action only counts to 2, but the dependent target
+ # will count forever if it's allowed to run. This verifies that the
+ # dependent target only runs when the "always run" action generates
+ # new output, not just because the "always run" ran.
+ test.build('actions.gyp', test.ALL, chdir='relocate/src')
+ test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2')
+ test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2')
+
+expect = """\
+Hello from program.c
+Hello from make-prog1.py
+Hello from make-prog2.py
+"""
+
+if test.format == 'xcode':
+ chdir = 'relocate/src/subdir1'
+else:
+ chdir = 'relocate/src'
+test.run_built_executable('program', chdir=chdir, stdout=expect)
+
+
+test.must_match('relocate/src/subdir2/file.out', "Hello from make-file.py\n")
+
+
+expect = "Hello from generate_main.py\n"
+
+if test.format == 'xcode':
+ chdir = 'relocate/src/subdir3'
+else:
+ chdir = 'relocate/src'
+test.run_built_executable('null_input', chdir=chdir, stdout=expect)
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/actions/gyptest-errors.py b/third_party/python/gyp/test/actions/gyptest-errors.py
new file mode 100755
index 0000000000..e1ef883e1e
--- /dev/null
+++ b/third_party/python/gyp/test/actions/gyptest-errors.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies behavior for different action configuration errors:
+exit status of 1, and the expected error message must be in stderr.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp(workdir='workarea_errors')
+
+
+test.run_gyp('action_missing_name.gyp', chdir='src', status=1, stderr=None)
+expect = [
+ "Anonymous action in target broken_actions2. An action must have an 'action_name' field.",
+]
+test.must_contain_all_lines(test.stderr(), expect)
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/actions/gyptest-generated-header.py b/third_party/python/gyp/test/actions/gyptest-generated-header.py
new file mode 100644
index 0000000000..cd5bd691a6
--- /dev/null
+++ b/third_party/python/gyp/test/actions/gyptest-generated-header.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that dependencies on generated headers work, even if the header has
+a mixed-case file name.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+CHDIR = 'generated-header'
+
+test.run_gyp('test.gyp', chdir=CHDIR)
+test.build('test.gyp', 'program', chdir=CHDIR)
+test.up_to_date('test.gyp', 'program', chdir=CHDIR)
+
+expect = 'foobar output\n'
+test.run_built_executable('program', chdir=CHDIR, stdout=expect)
+
+# Change what's written to the generated header, regyp and rebuild, and check
+# that the change makes it to the executable and that the build is clean.
+test.sleep()
+test.write('generated-header/test.gyp',
+ test.read('generated-header/test.gyp').replace('foobar', 'barbaz'))
+
+test.run_gyp('test.gyp', chdir=CHDIR)
+test.build('test.gyp', 'program', chdir=CHDIR)
+test.up_to_date('test.gyp', 'program', chdir=CHDIR)
+
+expect = 'barbaz output\n'
+test.run_built_executable('program', chdir=CHDIR, stdout=expect)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/actions/src/action_missing_name.gyp b/third_party/python/gyp/test/actions/src/action_missing_name.gyp
new file mode 100644
index 0000000000..6647aac3b5
--- /dev/null
+++ b/third_party/python/gyp/test/actions/src/action_missing_name.gyp
@@ -0,0 +1,24 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'broken_actions2',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'inputs': [
+ 'no_name.input',
+ ],
+ 'action': [
+ 'python',
+ '-c',
+ 'from __future__ import print_function; print(\'missing name\')',
+ ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/actions/src/actions.gyp b/third_party/python/gyp/test/actions/src/actions.gyp
new file mode 100644
index 0000000000..5d2db1955e
--- /dev/null
+++ b/third_party/python/gyp/test/actions/src/actions.gyp
@@ -0,0 +1,114 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'pull_in_all_actions',
+ 'type': 'none',
+ 'dependencies': [
+ 'subdir1/executable.gyp:*',
+ 'subdir2/none.gyp:*',
+ 'subdir3/null_input.gyp:*',
+ ],
+ },
+ {
+ 'target_name': 'depend_on_always_run_action',
+ 'type': 'none',
+ 'dependencies': [ 'subdir1/executable.gyp:counter' ],
+ 'actions': [
+ {
+ 'action_name': 'use_always_run_output',
+ 'inputs': [
+ 'subdir1/actions-out/action-counter.txt',
+ 'subdir1/counter.py',
+ ],
+ 'outputs': [
+ 'subdir1/actions-out/action-counter_2.txt',
+ ],
+ 'action': [
+ 'python', 'subdir1/counter.py', '<(_outputs)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+
+ # Three deps which don't finish immediately.
+ # Each one has a small delay then creates a file.
+ # Delays are 1.0, 1.1, and 2.0 seconds.
+ {
+ 'target_name': 'dep_1',
+ 'type': 'none',
+ 'actions': [{
+ 'inputs': [ 'actions.gyp' ],
+ 'outputs': [ 'dep_1.txt' ],
+ 'action_name': 'dep_1',
+ 'action': [ 'python', '-c',
+ 'import time; time.sleep(1); open(\'dep_1.txt\', \'w\')' ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ }],
+ },
+ {
+ 'target_name': 'dep_2',
+ 'type': 'none',
+ 'actions': [{
+ 'inputs': [ 'actions.gyp' ],
+ 'outputs': [ 'dep_2.txt' ],
+ 'action_name': 'dep_2',
+ 'action': [ 'python', '-c',
+ 'import time; time.sleep(1.1); open(\'dep_2.txt\', \'w\')' ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ }],
+ },
+ {
+ 'target_name': 'dep_3',
+ 'type': 'none',
+ 'actions': [{
+ 'inputs': [ 'actions.gyp' ],
+ 'outputs': [ 'dep_3.txt' ],
+ 'action_name': 'dep_3',
+ 'action': [ 'python', '-c',
+ 'import time; time.sleep(2.0); open(\'dep_3.txt\', \'w\')' ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ }],
+ },
+
+ # An action which assumes the deps have completed.
+ # Does NOT list the output files of it's deps as inputs.
+ # On success create the file deps_all_done_first.txt.
+ {
+ 'target_name': 'action_with_dependencies_123',
+ 'type': 'none',
+ 'dependencies': [ 'dep_1', 'dep_2', 'dep_3' ],
+ 'actions': [{
+ 'inputs': [ 'actions.gyp' ],
+ 'outputs': [ 'deps_all_done_first_123.txt' ],
+ 'action_name': 'action_with_dependencies_123',
+ 'action': [ 'python', 'confirm-dep-files.py', '<(_outputs)' ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ }],
+ },
+ # Same as above but with deps in reverse.
+ {
+ 'target_name': 'action_with_dependencies_321',
+ 'type': 'none',
+ 'dependencies': [ 'dep_3', 'dep_2', 'dep_1' ],
+ 'actions': [{
+ 'inputs': [ 'actions.gyp' ],
+ 'outputs': [ 'deps_all_done_first_321.txt' ],
+ 'action_name': 'action_with_dependencies_321',
+ 'action': [ 'python', 'confirm-dep-files.py', '<(_outputs)' ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ }],
+ },
+
+ ],
+}
diff --git a/third_party/python/gyp/test/actions/src/confirm-dep-files.py b/third_party/python/gyp/test/actions/src/confirm-dep-files.py
new file mode 100755
index 0000000000..3b8463057d
--- /dev/null
+++ b/third_party/python/gyp/test/actions/src/confirm-dep-files.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Confirms presence of files generated by our targets we depend on.
+If they exist, create a new file.
+
+Note target's input files are explicitly NOT defined in the gyp file
+so they can't easily be passed to this script as args.
+"""
+
+import os
+import sys
+
+outfile = sys.argv[1] # Example value we expect: deps_all_done_first_123.txt
+if (os.path.exists("dep_1.txt") and
+ os.path.exists("dep_2.txt") and
+ os.path.exists("dep_3.txt")):
+ open(outfile, "w")
diff --git a/third_party/python/gyp/test/actions/src/subdir1/counter.py b/third_party/python/gyp/test/actions/src/subdir1/counter.py
new file mode 100755
index 0000000000..d888f2e803
--- /dev/null
+++ b/third_party/python/gyp/test/actions/src/subdir1/counter.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+import time
+
+output = sys.argv[1]
+persistoutput = "%s.persist" % sys.argv[1]
+
+count = 0
+try:
+ count = open(persistoutput, 'r').read()
+except:
+ pass
+count = int(count) + 1
+
+if len(sys.argv) > 2:
+ max_count = int(sys.argv[2])
+ if count > max_count:
+ count = max_count
+
+oldcount = 0
+try:
+ oldcount = open(output, 'r').read()
+except:
+ pass
+
+# Save the count in a file that is undeclared, and thus hidden, to gyp. We need
+# to do this because, prior to running commands, some build systems deletes
+# any declared outputs, so we would lose our count if we just wrote to the
+# given output file.
+open(persistoutput, 'w').write('%d' % (count))
+
+# Only write the given output file if the count has changed.
+if int(oldcount) != count:
+ open(output, 'w').write('%d' % (count))
+ # Sleep so the next run changes the file time sufficiently to make the build
+ # detect the file as changed.
+ time.sleep(1)
+
+sys.exit(0)
diff --git a/third_party/python/gyp/test/actions/src/subdir1/executable.gyp b/third_party/python/gyp/test/actions/src/subdir1/executable.gyp
new file mode 100644
index 0000000000..6a1ce4f91e
--- /dev/null
+++ b/third_party/python/gyp/test/actions/src/subdir1/executable.gyp
@@ -0,0 +1,74 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'msvs_cygwin_shell': 0,
+ 'sources': [
+ 'program.c',
+ ],
+ 'actions': [
+ {
+ 'action_name': 'make-prog1',
+ 'inputs': [
+ 'make-prog1.py',
+ ],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/prog1.c',
+ ],
+ 'action': [
+ 'python', '<(_inputs)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ },
+ {
+ 'action_name': 'make-prog2',
+ 'inputs': [
+ 'make-prog2.py',
+ ],
+ 'outputs': [
+ 'actions-out/prog2.c',
+ ],
+ 'action': [
+ 'python', '<(_inputs)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ {
+ 'target_name': 'counter',
+ 'type': 'none',
+ 'actions': [
+ {
+ # This action should always run, regardless of whether or not it's
+ # inputs or the command-line change. We do this by creating a dummy
+ # first output, which is always missing, thus causing the build to
+ # always try to recreate it. Actual output files should be listed
+ # after the dummy one, and dependent targets should list the real
+ # output(s) in their inputs
+ # (see '../actions.gyp:depend_on_always_run_action').
+ 'action_name': 'action_counter',
+ 'inputs': [
+ 'counter.py',
+ ],
+ 'outputs': [
+ 'actions-out/action-counter.txt.always',
+ 'actions-out/action-counter.txt',
+ ],
+ 'action': [
+ 'python', '<(_inputs)', 'actions-out/action-counter.txt', '2',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/actions/src/subdir1/make-prog1.py b/third_party/python/gyp/test/actions/src/subdir1/make-prog1.py
new file mode 100755
index 0000000000..7ea1d8a2d4
--- /dev/null
+++ b/third_party/python/gyp/test/actions/src/subdir1/make-prog1.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+contents = r"""
+#include <stdio.h>
+
+void prog1(void)
+{
+ printf("Hello from make-prog1.py\n");
+}
+"""
+
+open(sys.argv[1], 'w').write(contents)
+
+sys.exit(0)
diff --git a/third_party/python/gyp/test/actions/src/subdir1/make-prog2.py b/third_party/python/gyp/test/actions/src/subdir1/make-prog2.py
new file mode 100755
index 0000000000..0bfe4973c2
--- /dev/null
+++ b/third_party/python/gyp/test/actions/src/subdir1/make-prog2.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+contents = r"""
+#include <stdio.h>
+
+void prog2(void)
+{
+ printf("Hello from make-prog2.py\n");
+}
+"""
+
+open(sys.argv[1], 'w').write(contents)
+
+sys.exit(0)
diff --git a/third_party/python/gyp/test/actions/src/subdir1/program.c b/third_party/python/gyp/test/actions/src/subdir1/program.c
new file mode 100644
index 0000000000..c0931534eb
--- /dev/null
+++ b/third_party/python/gyp/test/actions/src/subdir1/program.c
@@ -0,0 +1,12 @@
+#include <stdio.h>
+
+extern void prog1(void);
+extern void prog2(void);
+
+int main(void)
+{
+ printf("Hello from program.c\n");
+ prog1();
+ prog2();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/actions/src/subdir2/make-file.py b/third_party/python/gyp/test/actions/src/subdir2/make-file.py
new file mode 100755
index 0000000000..088a05e0b0
--- /dev/null
+++ b/third_party/python/gyp/test/actions/src/subdir2/make-file.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+contents = "Hello from make-file.py\n"
+
+open(sys.argv[1], 'w').write(contents)
diff --git a/third_party/python/gyp/test/actions/src/subdir2/none.gyp b/third_party/python/gyp/test/actions/src/subdir2/none.gyp
new file mode 100644
index 0000000000..2caa97d55c
--- /dev/null
+++ b/third_party/python/gyp/test/actions/src/subdir2/none.gyp
@@ -0,0 +1,33 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'file',
+ 'type': 'none',
+ 'msvs_cygwin_shell': 0,
+ 'actions': [
+ {
+ 'action_name': 'make-file',
+ 'inputs': [
+ 'make-file.py',
+ ],
+ 'outputs': [
+ 'file.out',
+ # TODO: enhance testing infrastructure to test this
+ # without having to hard-code the intermediate dir paths.
+ #'<(INTERMEDIATE_DIR)/file.out',
+ ],
+ 'action': [
+ 'python', '<(_inputs)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ }
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/actions/src/subdir3/generate_main.py b/third_party/python/gyp/test/actions/src/subdir3/generate_main.py
new file mode 100755
index 0000000000..804d38df31
--- /dev/null
+++ b/third_party/python/gyp/test/actions/src/subdir3/generate_main.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+contents = """
+#include <stdio.h>
+
+int main(void)
+{
+ printf("Hello from generate_main.py\\n");
+ return 0;
+}
+"""
+
+open(sys.argv[1], 'w').write(contents)
+
+sys.exit(0)
diff --git a/third_party/python/gyp/test/actions/src/subdir3/null_input.gyp b/third_party/python/gyp/test/actions/src/subdir3/null_input.gyp
new file mode 100644
index 0000000000..9b0bea5fdb
--- /dev/null
+++ b/third_party/python/gyp/test/actions/src/subdir3/null_input.gyp
@@ -0,0 +1,29 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'null_input',
+ 'type': 'executable',
+ 'msvs_cygwin_shell': 0,
+ 'actions': [
+ {
+ 'action_name': 'generate_main',
+ 'process_outputs_as_sources': 1,
+ 'inputs': [],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/main.c',
+ ],
+ 'action': [
+ # TODO: we can't just use <(_outputs) here?!
+ 'python', 'generate_main.py', '<(INTERMEDIATE_DIR)/main.c',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/additional-targets/gyptest-additional.py b/third_party/python/gyp/test/additional-targets/gyptest-additional.py
new file mode 100755
index 0000000000..466283e55c
--- /dev/null
+++ b/third_party/python/gyp/test/additional-targets/gyptest-additional.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies simple actions when using an explicit build target of 'all'.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('all.gyp',
+ '-G', 'xcode_ninja_target_pattern=^all_targets$',
+ chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+# Build all.
+test.build('all.gyp', chdir='relocate/src')
+
+if test.format=='xcode':
+ chdir = 'relocate/src/dir1'
+else:
+ chdir = 'relocate/src'
+
+# Output is as expected.
+file_content = 'Hello from emit.py\n'
+test.built_file_must_match('out2.txt', file_content, chdir=chdir)
+
+test.built_file_must_not_exist('out.txt', chdir='relocate/src')
+test.built_file_must_not_exist('foolib1',
+ type=test.SHARED_LIB,
+ chdir=chdir)
+
+# xcode-ninja doesn't generate separate workspaces for sub-gyps by design
+if test.format == 'xcode-ninja':
+ test.pass_test()
+
+# TODO(mmoss) Make consistent with msvs, with 'dir1' before 'out/Default'?
+if test.format in ('make', 'ninja', 'cmake'):
+ chdir='relocate/src'
+else:
+ chdir='relocate/src/dir1'
+
+# Build the action explicitly.
+test.build('actions.gyp', 'action1_target', chdir=chdir)
+
+# Check that things got run.
+file_content = 'Hello from emit.py\n'
+test.built_file_must_exist('out.txt', chdir=chdir)
+
+# Build the shared library explicitly.
+test.build('actions.gyp', 'foolib1', chdir=chdir)
+
+test.built_file_must_exist('foolib1',
+ type=test.SHARED_LIB,
+ chdir=chdir,
+ subdir='dir1')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/additional-targets/src/all.gyp b/third_party/python/gyp/test/additional-targets/src/all.gyp
new file mode 100644
index 0000000000..21c83080aa
--- /dev/null
+++ b/third_party/python/gyp/test/additional-targets/src/all.gyp
@@ -0,0 +1,13 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'all_targets',
+ 'type': 'none',
+ 'dependencies': ['dir1/actions.gyp:*'],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/additional-targets/src/dir1/actions.gyp b/third_party/python/gyp/test/additional-targets/src/dir1/actions.gyp
new file mode 100644
index 0000000000..5089c80913
--- /dev/null
+++ b/third_party/python/gyp/test/additional-targets/src/dir1/actions.gyp
@@ -0,0 +1,56 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'action1_target',
+ 'type': 'none',
+ 'suppress_wildcard': 1,
+ 'actions': [
+ {
+ 'action_name': 'action1',
+ 'inputs': [
+ 'emit.py',
+ ],
+ 'outputs': [
+ '<(PRODUCT_DIR)/out.txt',
+ ],
+ 'action': ['python', 'emit.py', '<(PRODUCT_DIR)/out.txt'],
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ {
+ 'target_name': 'action2_target',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'action2',
+ 'inputs': [
+ 'emit.py',
+ ],
+ 'outputs': [
+ '<(PRODUCT_DIR)/out2.txt',
+ ],
+ 'action': ['python', 'emit.py', '<(PRODUCT_DIR)/out2.txt'],
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ {
+ 'target_name': 'foolib1',
+ 'type': 'shared_library',
+ 'suppress_wildcard': 1,
+ 'sources': ['lib1.c'],
+ },
+ ],
+ 'conditions': [
+ ['OS=="linux"', {
+ 'target_defaults': {
+ 'cflags': ['-fPIC'],
+ },
+ }],
+ ],
+}
diff --git a/third_party/python/gyp/test/additional-targets/src/dir1/emit.py b/third_party/python/gyp/test/additional-targets/src/dir1/emit.py
new file mode 100755
index 0000000000..96db7a57df
--- /dev/null
+++ b/third_party/python/gyp/test/additional-targets/src/dir1/emit.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+f = open(sys.argv[1], 'w')
+f.write('Hello from emit.py\n')
+f.close()
diff --git a/third_party/python/gyp/test/additional-targets/src/dir1/lib1.c b/third_party/python/gyp/test/additional-targets/src/dir1/lib1.c
new file mode 100644
index 0000000000..df4cb10f79
--- /dev/null
+++ b/third_party/python/gyp/test/additional-targets/src/dir1/lib1.c
@@ -0,0 +1,6 @@
+#ifdef _WIN32
+__declspec(dllexport)
+#endif
+int func1(void) {
+ return 42;
+}
diff --git a/third_party/python/gyp/test/analyzer/common.gypi b/third_party/python/gyp/test/analyzer/common.gypi
new file mode 100644
index 0000000000..7c664e40da
--- /dev/null
+++ b/third_party/python/gyp/test/analyzer/common.gypi
@@ -0,0 +1,6 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+}
diff --git a/third_party/python/gyp/test/analyzer/gyptest-analyzer.py b/third_party/python/gyp/test/analyzer/gyptest-analyzer.py
new file mode 100644
index 0000000000..58a1ce6f07
--- /dev/null
+++ b/third_party/python/gyp/test/analyzer/gyptest-analyzer.py
@@ -0,0 +1,427 @@
+#!/usr/bin/env python
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Tests for analyzer
+"""
+
+from __future__ import print_function
+
+import json
+import TestGyp
+
+found = 'Found dependency'
+found_all = 'Found dependency (all)'
+not_found = 'No dependencies'
+
+
+def _CreateConfigFile(files, additional_compile_targets, test_targets=[]):
+ """Creates the analyzer config file, which is used as the input to analyzer.
+ See description of analyzer.py for description of the arguments."""
+ f = open('test_file', 'w')
+ to_write = {'files': files,
+ 'test_targets': test_targets,
+ 'additional_compile_targets': additional_compile_targets }
+ json.dump(to_write, f)
+ f.close()
+
+
+def _CreateBogusConfigFile():
+ f = open('test_file','w')
+ f.write('bogus')
+ f.close()
+
+
+def _ReadOutputFileContents():
+ f = open('analyzer_output', 'r')
+ result = json.load(f)
+ f.close()
+ return result
+
+
+# NOTE: this would be clearer if it subclassed TestGypCustom, but that trips
+# over a bug in pylint (E1002).
+test = TestGyp.TestGypCustom(format='analyzer')
+
+def CommonArgs():
+ return ('-Gconfig_path=test_file',
+ '-Ganalyzer_output_path=analyzer_output')
+
+
+def run_analyzer(*args, **kw):
+ """Runs the test specifying a particular config and output path."""
+ args += CommonArgs()
+ test.run_gyp('test.gyp', *args, **kw)
+
+
+def run_analyzer2(*args, **kw):
+ """Same as run_analyzer(), but passes in test2.gyp instead of test.gyp."""
+ args += CommonArgs()
+ test.run_gyp('test2.gyp', *args, **kw)
+
+
+def run_analyzer3(*args, **kw):
+ """Same as run_analyzer(), but passes in test3.gyp instead of test.gyp."""
+ args += CommonArgs()
+ test.run_gyp('test3.gyp', *args, **kw)
+
+
+def run_analyzer4(*args, **kw):
+ """Same as run_analyzer(), but passes in test3.gyp instead of test.gyp."""
+ args += CommonArgs()
+ test.run_gyp('test4.gyp', *args, **kw)
+
+
+def EnsureContains(matched=False, compile_targets=set(), test_targets=set()):
+ """Verifies output contains |compile_targets|."""
+ result = _ReadOutputFileContents()
+ if 'error' in result:
+ print('unexpected error', result.get('error'))
+ test.fail_test()
+
+ if 'invalid_targets' in result:
+ print('unexpected invalid_targets', result.get('invalid_targets'))
+ test.fail_test()
+
+ actual_compile_targets = set(result['compile_targets'])
+ if actual_compile_targets != compile_targets:
+ print('actual compile_targets:', actual_compile_targets,
+ '\nexpected compile_targets:', compile_targets)
+ test.fail_test()
+
+ actual_test_targets = set(result['test_targets'])
+ if actual_test_targets != test_targets:
+ print('actual test_targets:', actual_test_targets,
+ '\nexpected test_targets:', test_targets)
+ test.fail_test()
+
+ if matched and result['status'] != found:
+ print('expected', found, 'got', result['status'])
+ test.fail_test()
+ elif not matched and result['status'] != not_found:
+ print('expected', not_found, 'got', result['status'])
+ test.fail_test()
+
+
+def EnsureMatchedAll(compile_targets, test_targets=set()):
+ result = _ReadOutputFileContents()
+ if 'error' in result:
+ print('unexpected error', result.get('error'))
+ test.fail_test()
+
+ if 'invalid_targets' in result:
+ print('unexpected invalid_targets', result.get('invalid_targets'))
+ test.fail_test()
+
+ if result['status'] != found_all:
+ print('expected', found_all, 'got', result['status'])
+ test.fail_test()
+
+ actual_compile_targets = set(result['compile_targets'])
+ if actual_compile_targets != compile_targets:
+ print('actual compile_targets:', actual_compile_targets,
+ '\nexpected compile_targets:', compile_targets)
+ test.fail_test()
+
+ actual_test_targets = set(result['test_targets'])
+ if actual_test_targets != test_targets:
+ print('actual test_targets:', actual_test_targets,
+ '\nexpected test_targets:', test_targets)
+ test.fail_test()
+
+
+def EnsureError(expected_error_string):
+ """Verifies output contains the error string."""
+ result = _ReadOutputFileContents()
+ if result.get('error', '').find(expected_error_string) == -1:
+ print('actual error:', result.get('error', ''), '\nexpected error:',
+ expected_error_string)
+ test.fail_test()
+
+
+def EnsureStdoutContains(expected_error_string):
+ if test.stdout().find(expected_error_string) == -1:
+ print('actual stdout:', test.stdout(), '\nexpected stdout:',
+ expected_error_string)
+ test.fail_test()
+
+
+def EnsureInvalidTargets(expected_invalid_targets):
+ """Verifies output contains invalid_targets."""
+ result = _ReadOutputFileContents()
+ actual_invalid_targets = set(result['invalid_targets'])
+ if actual_invalid_targets != expected_invalid_targets:
+ print('actual invalid_targets:', actual_invalid_targets,
+ '\nexpected :', expected_invalid_targets)
+ test.fail_test()
+
+
+# Two targets, A and B (both static_libraries) and A depends upon B. If a file
+# in B changes, then both A and B are output. It is not strictly necessary that
+# A is compiled in this case, only B.
+_CreateConfigFile(['b.c'], ['all'])
+test.run_gyp('static_library_test.gyp', *CommonArgs())
+EnsureContains(matched=True, compile_targets={'a' ,'b'})
+
+# Verifies config_path must be specified.
+test.run_gyp('test.gyp')
+EnsureStdoutContains('Must specify files to analyze via config_path')
+
+# Verifies config_path must point to a valid file.
+test.run_gyp('test.gyp', '-Gconfig_path=bogus_file',
+ '-Ganalyzer_output_path=analyzer_output')
+EnsureError('Unable to open file bogus_file')
+
+# Verify 'invalid_targets' is present when bad target is specified.
+_CreateConfigFile(['exe2.c'], ['bad_target'])
+run_analyzer()
+EnsureInvalidTargets({'bad_target'})
+
+# Verifies config_path must point to a valid json file.
+_CreateBogusConfigFile()
+run_analyzer()
+EnsureError('Unable to parse config file test_file')
+
+# Trivial test of a source.
+_CreateConfigFile(['foo.c'], ['all'])
+run_analyzer()
+EnsureContains(matched=True, compile_targets={'exe'})
+
+# Conditional source that is excluded.
+_CreateConfigFile(['conditional_source.c'], ['all'])
+run_analyzer()
+EnsureContains(matched=False)
+
+# Conditional source that is included by way of argument.
+_CreateConfigFile(['conditional_source.c'], ['all'])
+run_analyzer('-Dtest_variable=1')
+EnsureContains(matched=True, compile_targets={'exe'})
+
+# Two unknown files.
+_CreateConfigFile(['unknown1.c', 'unoknow2.cc'], ['all'])
+run_analyzer()
+EnsureContains()
+
+# Two unknown files.
+_CreateConfigFile(['unknown1.c', 'subdir/subdir_sourcex.c'], ['all'])
+run_analyzer()
+EnsureContains()
+
+# Included dependency
+_CreateConfigFile(['unknown1.c', 'subdir/subdir_source.c'], ['all'])
+run_analyzer()
+EnsureContains(matched=True, compile_targets={'exe', 'exe3'})
+
+# Included inputs to actions.
+_CreateConfigFile(['action_input.c'], ['all'])
+run_analyzer()
+EnsureContains(matched=True, compile_targets={'exe'})
+
+# Don't consider outputs.
+_CreateConfigFile(['action_output.c'], ['all'])
+run_analyzer()
+EnsureContains(matched=False)
+
+# Rule inputs.
+_CreateConfigFile(['rule_input.c'], ['all'])
+run_analyzer()
+EnsureContains(matched=True, compile_targets={'exe'})
+
+# Ignore path specified with PRODUCT_DIR.
+_CreateConfigFile(['product_dir_input.c'], ['all'])
+run_analyzer()
+EnsureContains(matched=False)
+
+# Path specified via a variable.
+_CreateConfigFile(['subdir/subdir_source2.c'], ['all'])
+run_analyzer()
+EnsureContains(matched=True, compile_targets={'exe'})
+
+# Verifies paths with // are fixed up correctly.
+_CreateConfigFile(['parent_source.c'], ['all'])
+run_analyzer()
+EnsureContains(matched=True, compile_targets={'exe', 'exe3'})
+
+# Verifies relative paths are resolved correctly.
+_CreateConfigFile(['subdir/subdir_source.h'], ['all'])
+run_analyzer()
+EnsureContains(matched=True, compile_targets={'exe'})
+
+# Verifies relative paths in inputs are resolved correctly.
+_CreateConfigFile(['rel_path1.h'], ['all'])
+run_analyzer()
+EnsureContains(matched=True, compile_targets={'exe'})
+
+# Various permutations when passing in targets.
+_CreateConfigFile(['exe2.c', 'subdir/subdir2b_source.c'],
+ ['all'], ['exe', 'exe3'])
+run_analyzer()
+EnsureContains(matched=True, test_targets={'exe3'},
+ compile_targets={'exe2', 'exe3'})
+
+_CreateConfigFile(['exe2.c', 'subdir/subdir2b_source.c'], ['all'], ['exe'])
+run_analyzer()
+EnsureContains(matched=True, compile_targets={'exe2', 'exe3'})
+
+# Verifies duplicates are ignored.
+_CreateConfigFile(['exe2.c', 'subdir/subdir2b_source.c'], ['all'],
+ ['exe', 'exe'])
+run_analyzer()
+EnsureContains(matched=True, compile_targets={'exe2', 'exe3'})
+
+_CreateConfigFile(['exe2.c'], ['all'], ['exe'])
+run_analyzer()
+EnsureContains(matched=True, compile_targets={'exe2'})
+
+_CreateConfigFile(['exe2.c'], ['all'])
+run_analyzer()
+EnsureContains(matched=True, compile_targets={'exe2'})
+
+_CreateConfigFile(['subdir/subdir2b_source.c', 'exe2.c'], ['all'])
+run_analyzer()
+EnsureContains(matched=True, compile_targets={'exe2', 'exe3'})
+
+_CreateConfigFile(['subdir/subdir2b_source.c'], ['all'], ['exe3'])
+run_analyzer()
+EnsureContains(matched=True, test_targets={'exe3'}, compile_targets={'exe3'})
+
+_CreateConfigFile(['exe2.c'], ['all'])
+run_analyzer()
+EnsureContains(matched=True, compile_targets={'exe2'})
+
+_CreateConfigFile(['foo.c'], ['all'])
+run_analyzer()
+EnsureContains(matched=True, compile_targets={'exe'})
+
+# Assertions when modifying build (gyp/gypi) files, especially when said files
+# are included.
+_CreateConfigFile(['subdir2/d.cc'], ['all'], ['exe', 'exe2', 'foo', 'exe3'])
+run_analyzer2()
+EnsureContains(matched=True, test_targets={'exe', 'foo'},
+ compile_targets={'exe', 'foo'})
+
+_CreateConfigFile(['subdir2/subdir.includes.gypi'], ['all'],
+ ['exe', 'exe2', 'foo', 'exe3'])
+run_analyzer2()
+EnsureContains(matched=True, test_targets={'exe', 'foo'},
+ compile_targets={'exe', 'foo'})
+
+_CreateConfigFile(['subdir2/subdir.gyp'], ['all'],
+ ['exe', 'exe2', 'foo', 'exe3'])
+run_analyzer2()
+EnsureContains(matched=True, test_targets={'exe', 'foo'},
+ compile_targets={'exe', 'foo'})
+
+_CreateConfigFile(['test2.includes.gypi'], ['all'],
+ ['exe', 'exe2', 'foo', 'exe3'])
+run_analyzer2()
+EnsureContains(matched=True, test_targets={'exe', 'exe2', 'exe3'},
+ compile_targets={'exe', 'exe2', 'exe3'})
+
+# Verify modifying a file included makes all targets dirty.
+_CreateConfigFile(['common.gypi'], ['all'], ['exe', 'exe2', 'foo', 'exe3'])
+run_analyzer2('-Icommon.gypi')
+EnsureMatchedAll({'all', 'exe', 'exe2', 'foo', 'exe3'},
+ {'exe', 'exe2', 'foo', 'exe3'})
+
+# Assertions from test3.gyp.
+_CreateConfigFile(['d.c', 'f.c'], ['all'], ['a'])
+run_analyzer3()
+EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a', 'b'})
+
+_CreateConfigFile(['f.c'], ['all'], ['a'])
+run_analyzer3()
+EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a', 'b'})
+
+_CreateConfigFile(['f.c'], ['all'])
+run_analyzer3()
+EnsureContains(matched=True, compile_targets={'a', 'b'})
+
+_CreateConfigFile(['c.c', 'e.c'], ['all'])
+run_analyzer3()
+EnsureContains(matched=True, compile_targets={'a', 'b', 'c', 'e'})
+
+_CreateConfigFile(['d.c'], ['all'], ['a'])
+run_analyzer3()
+EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a', 'b'})
+
+_CreateConfigFile(['a.c'], ['all'], ['a', 'b'])
+run_analyzer3()
+EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a'})
+
+_CreateConfigFile(['a.c'], ['all'], ['a', 'b'])
+run_analyzer3()
+EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a'})
+
+_CreateConfigFile(['d.c'], ['all'], ['a', 'b'])
+run_analyzer3()
+EnsureContains(matched=True, test_targets={'a', 'b'},
+ compile_targets={'a', 'b'})
+
+_CreateConfigFile(['f.c'], ['all'], ['a'])
+run_analyzer3()
+EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a', 'b'})
+
+_CreateConfigFile(['a.c'], ['all'], ['a'])
+run_analyzer3()
+EnsureContains(matched=True, test_targets={'a'}, compile_targets={'a'})
+
+_CreateConfigFile(['a.c'], ['all'])
+run_analyzer3()
+EnsureContains(matched=True, compile_targets={'a'})
+
+_CreateConfigFile(['d.c'], ['all'])
+run_analyzer3()
+EnsureContains(matched=True, compile_targets={'a', 'b'})
+
+# Assertions around test4.gyp.
+_CreateConfigFile(['f.c'], ['all'])
+run_analyzer4()
+EnsureContains(matched=True, compile_targets={'e', 'f'})
+
+_CreateConfigFile(['d.c'], ['all'])
+run_analyzer4()
+EnsureContains(matched=True, compile_targets={'a', 'b', 'c', 'd'})
+
+_CreateConfigFile(['i.c'], ['all'])
+run_analyzer4()
+EnsureContains(matched=True, compile_targets={'h', 'i'})
+
+# Assertions where 'all' is not supplied in compile_targets.
+
+_CreateConfigFile(['exe2.c'], [], ['exe2'])
+run_analyzer()
+EnsureContains(matched=True, test_targets={'exe2'}, compile_targets={'exe2'})
+
+_CreateConfigFile(['exe20.c'], [], ['exe2'])
+run_analyzer()
+EnsureContains(matched=False)
+
+
+_CreateConfigFile(['exe2.c', 'exe3.c'], [], ['exe2', 'exe3'])
+run_analyzer()
+EnsureContains(matched=True, test_targets={'exe2', 'exe3'},
+ compile_targets={'exe2', 'exe3'})
+
+_CreateConfigFile(['exe2.c', 'exe3.c'], ['exe3'], ['exe2'])
+run_analyzer()
+EnsureContains(matched=True, test_targets={'exe2'},
+ compile_targets={'exe2', 'exe3'})
+
+_CreateConfigFile(['exe3.c'], ['exe2'], ['exe2'])
+run_analyzer()
+EnsureContains(matched=False)
+
+# Assertions with 'all' listed as a test_target.
+_CreateConfigFile(['exe3.c'], [], ['all'])
+run_analyzer()
+EnsureContains(matched=True, compile_targets={'exe3', 'all'},
+ test_targets={'all'})
+
+_CreateConfigFile(['exe2.c'], [], ['all', 'exe2'])
+run_analyzer()
+EnsureContains(matched=True, compile_targets={'exe2', 'all'},
+ test_targets={'all', 'exe2'})
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/analyzer/static_library_test.gyp b/third_party/python/gyp/test/analyzer/static_library_test.gyp
new file mode 100644
index 0000000000..2c8e4bd826
--- /dev/null
+++ b/third_party/python/gyp/test/analyzer/static_library_test.gyp
@@ -0,0 +1,34 @@
+# Copyright 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# These gyp files create the following dependencies:
+#
+# test.gyp:
+# #a -> b
+# a.c
+# #b
+# b.c
+# a and b are static libraries.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'a',
+ 'type': 'static_library',
+ 'sources': [
+ 'a.c',
+ ],
+ 'dependencies': [
+ 'b',
+ ],
+ },
+ {
+ 'target_name': 'b',
+ 'type': 'static_library',
+ 'sources': [
+ 'b.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/analyzer/subdir/subdir.gyp b/third_party/python/gyp/test/analyzer/subdir/subdir.gyp
new file mode 100644
index 0000000000..bfa2df48e1
--- /dev/null
+++ b/third_party/python/gyp/test/analyzer/subdir/subdir.gyp
@@ -0,0 +1,36 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'trailing_dir_path': '../',
+ },
+ 'targets': [
+ {
+ 'target_name': 'foo',
+ 'type': 'static_library',
+ 'sources': [
+ 'subdir_source.c',
+ '<(trailing_dir_path)/parent_source.c',
+ ],
+ },
+ {
+ 'target_name': 'subdir2a',
+ 'type': 'static_library',
+ 'sources': [
+ 'subdir2_source.c',
+ ],
+ 'dependencies': [
+ 'subdir2b',
+ ],
+ },
+ {
+ 'target_name': 'subdir2b',
+ 'type': 'static_library',
+ 'sources': [
+ 'subdir2b_source.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/analyzer/subdir/subdir2/subdir2.gyp b/third_party/python/gyp/test/analyzer/subdir/subdir2/subdir2.gyp
new file mode 100644
index 0000000000..e5aaa92b18
--- /dev/null
+++ b/third_party/python/gyp/test/analyzer/subdir/subdir2/subdir2.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'subdir2',
+ 'type': 'static_library',
+ 'sources': [
+ '../subdir_source.h',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/analyzer/subdir2/subdir.gyp b/third_party/python/gyp/test/analyzer/subdir2/subdir.gyp
new file mode 100644
index 0000000000..d6c709c9ef
--- /dev/null
+++ b/third_party/python/gyp/test/analyzer/subdir2/subdir.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'foo',
+ 'type': 'static_library',
+ 'sources': [
+ 'subdir_source.c',
+ ],
+ 'includes': [
+ 'subdir.includes.gypi',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/analyzer/subdir2/subdir.includes.gypi b/third_party/python/gyp/test/analyzer/subdir2/subdir.includes.gypi
new file mode 100644
index 0000000000..324e92bcd4
--- /dev/null
+++ b/third_party/python/gyp/test/analyzer/subdir2/subdir.includes.gypi
@@ -0,0 +1,9 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'sources': [
+ 'd.cc'
+ ],
+}
diff --git a/third_party/python/gyp/test/analyzer/test.gyp b/third_party/python/gyp/test/analyzer/test.gyp
new file mode 100644
index 0000000000..c25ca73bff
--- /dev/null
+++ b/third_party/python/gyp/test/analyzer/test.gyp
@@ -0,0 +1,114 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# These gyp files create the following dependencies:
+#
+# test.gyp:
+# #exe -> subdir/subdir.gyp#foo, subdir/subdir2/subdir2.gyp#subdir2
+# foo.c
+# subdir/subdir_source2.c
+# conditional_source.c (if test_variable==1)
+# action_input.c
+# action_output.c
+# rule_input.c
+# rule_output.pdf
+# #exe2
+# exe2.c
+# #exe3 -> subdir/subdir.gyp#foo, subdir/subdir.gyp#subdir2a
+# exe3.c
+# #allx (type none) -> exe, exe3
+#
+# subdir/subdir.gyp
+# #foo
+# subdir/subdir_source.c
+# parent_source.c
+# #subdir2a -> subdir2b
+# subdir/subdir2_source.c
+# #subdir2b
+# subdir/subdir2b_source.c
+#
+# subdir/subdir2/subdir2.gyp
+# #subdir2
+# subdir/subdir_source.h
+
+{
+ 'variables': {
+ 'test_variable%': 0,
+ 'variable_path': 'subdir',
+ },
+ 'targets': [
+ {
+ 'target_name': 'exe',
+ 'type': 'executable',
+ 'dependencies': [
+ 'subdir/subdir.gyp:foo',
+ 'subdir/subdir2/subdir2.gyp:subdir2',
+ ],
+ 'sources': [
+ 'foo.c',
+ '<(variable_path)/subdir_source2.c',
+ ],
+ 'conditions': [
+ ['test_variable==1', {
+ 'sources': [
+ 'conditional_source.c',
+ ],
+ }],
+ ],
+ 'actions': [
+ {
+ 'action_name': 'action',
+ 'inputs': [
+ '<(PRODUCT_DIR)/product_dir_input.c',
+ 'action_input.c',
+ '../bad_path1.h',
+ '../../bad_path2.h',
+ './rel_path1.h',
+ ],
+ 'outputs': [
+ 'action_output.c',
+ ],
+ },
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'rule',
+ 'extension': 'pdf',
+ 'inputs': [
+ 'rule_input.c',
+ ],
+ 'outputs': [
+ 'rule_output.pdf',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'exe2',
+ 'type': 'executable',
+ 'sources': [
+ 'exe2.c',
+ ],
+ },
+ {
+ 'target_name': 'exe3',
+ 'type': 'executable',
+ 'dependencies': [
+ 'subdir/subdir.gyp:foo',
+ 'subdir/subdir.gyp:subdir2a',
+ ],
+ 'sources': [
+ 'exe3.c',
+ ],
+ },
+ {
+ 'target_name': 'allx',
+ 'type': 'none',
+ 'dependencies': [
+ 'exe',
+ 'exe3',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/analyzer/test2.gyp b/third_party/python/gyp/test/analyzer/test2.gyp
new file mode 100644
index 0000000000..782b6e6428
--- /dev/null
+++ b/third_party/python/gyp/test/analyzer/test2.gyp
@@ -0,0 +1,25 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'exe',
+ 'type': 'executable',
+ 'dependencies': [
+ 'subdir2/subdir.gyp:foo',
+ ],
+ },
+ {
+ 'target_name': 'exe2',
+ 'type': 'executable',
+ 'includes': [
+ 'test2.includes.gypi',
+ ],
+ },
+ ],
+ 'includes': [
+ 'test2.toplevel_includes.gypi',
+ ],
+}
diff --git a/third_party/python/gyp/test/analyzer/test2.includes.gypi b/third_party/python/gyp/test/analyzer/test2.includes.gypi
new file mode 100644
index 0000000000..3e21de23cb
--- /dev/null
+++ b/third_party/python/gyp/test/analyzer/test2.includes.gypi
@@ -0,0 +1,13 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'sources': [
+ 'a.cc',
+ 'b.cc'
+ ],
+ 'includes': [
+ 'test2.includes.includes.gypi',
+ ],
+}
diff --git a/third_party/python/gyp/test/analyzer/test2.includes.includes.gypi b/third_party/python/gyp/test/analyzer/test2.includes.includes.gypi
new file mode 100644
index 0000000000..de3a025dbb
--- /dev/null
+++ b/third_party/python/gyp/test/analyzer/test2.includes.includes.gypi
@@ -0,0 +1,9 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'sources': [
+ 'c.cc'
+ ],
+}
diff --git a/third_party/python/gyp/test/analyzer/test2.toplevel_includes.gypi b/third_party/python/gyp/test/analyzer/test2.toplevel_includes.gypi
new file mode 100644
index 0000000000..54fa453b08
--- /dev/null
+++ b/third_party/python/gyp/test/analyzer/test2.toplevel_includes.gypi
@@ -0,0 +1,15 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'exe3',
+ 'type': 'executable',
+ 'sources': [
+ 'e.cc',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/analyzer/test3.gyp b/third_party/python/gyp/test/analyzer/test3.gyp
new file mode 100644
index 0000000000..e52f6bc7d3
--- /dev/null
+++ b/third_party/python/gyp/test/analyzer/test3.gyp
@@ -0,0 +1,77 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'allx',
+ 'type': 'none',
+ 'dependencies': [
+ 'a',
+ 'b',
+ ],
+ },
+ {
+ 'target_name': 'a',
+ 'type': 'executable',
+ 'sources': [
+ 'a.c',
+ ],
+ 'dependencies': [
+ 'c',
+ 'd',
+ ],
+ },
+ {
+ 'target_name': 'b',
+ 'type': 'executable',
+ 'sources': [
+ 'b.c',
+ ],
+ 'dependencies': [
+ 'd',
+ 'e',
+ ],
+ },
+ {
+ 'target_name': 'c',
+ 'type': 'executable',
+ 'sources': [
+ 'c.c',
+ ],
+ },
+ {
+ 'target_name': 'd',
+ 'type': 'none',
+ 'sources': [
+ 'd.c',
+ ],
+ 'dependencies': [
+ 'f',
+ 'g',
+ ],
+ },
+ {
+ 'target_name': 'e',
+ 'type': 'executable',
+ 'sources': [
+ 'e.c',
+ ],
+ },
+ {
+ 'target_name': 'f',
+ 'type': 'static_library',
+ 'sources': [
+ 'f.c',
+ ],
+ },
+ {
+ 'target_name': 'g',
+ 'type': 'executable',
+ 'sources': [
+ 'g.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/analyzer/test4.gyp b/third_party/python/gyp/test/analyzer/test4.gyp
new file mode 100644
index 0000000000..91cea56c1f
--- /dev/null
+++ b/third_party/python/gyp/test/analyzer/test4.gyp
@@ -0,0 +1,80 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'a',
+ 'type': 'executable',
+ 'sources': [
+ 'a.c',
+ ],
+ 'dependencies': [
+ 'b',
+ 'c',
+ ],
+ },
+ {
+ 'target_name': 'b',
+ 'type': 'executable',
+ 'sources': [
+ 'b.c',
+ ],
+ 'dependencies': [
+ 'd',
+ ],
+ },
+ {
+ 'target_name': 'c',
+ 'type': 'executable',
+ 'sources': [
+ 'c.c',
+ ],
+ 'dependencies': [
+ 'b',
+ 'd',
+ ],
+ },
+ {
+ 'target_name': 'd',
+ 'type': 'executable',
+ 'sources': [
+ 'd.c',
+ ],
+ },
+ {
+ 'target_name': 'e',
+ 'type': 'executable',
+ 'dependencies': [
+ 'test5.gyp:f',
+ ],
+ },
+ {
+ 'target_name': 'h',
+ 'type': 'none',
+ 'dependencies': [
+ 'i',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'rule',
+ 'extension': 'pdf',
+ 'inputs': [
+ 'rule_input.c',
+ ],
+ 'outputs': [
+ 'rule_output.pdf',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'i',
+ 'type': 'static_library',
+ 'sources': [
+ 'i.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/analyzer/test5.gyp b/third_party/python/gyp/test/analyzer/test5.gyp
new file mode 100644
index 0000000000..f3ea5b0061
--- /dev/null
+++ b/third_party/python/gyp/test/analyzer/test5.gyp
@@ -0,0 +1,25 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'f',
+ 'type': 'executable',
+ 'sources': [
+ 'f.c',
+ ],
+ },
+ {
+ 'target_name': 'g',
+ 'type': 'executable',
+ 'sources': [
+ 'g.c',
+ ],
+ 'dependencies': [
+ 'f',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/arflags/gyptest-arflags.py b/third_party/python/gyp/test/arflags/gyptest-arflags.py
new file mode 100644
index 0000000000..870a2d8946
--- /dev/null
+++ b/third_party/python/gyp/test/arflags/gyptest-arflags.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that building a target with invalid arflags fails.
+"""
+
+from __future__ import print_function
+
+import os
+import sys
+import TestGyp
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+
+test = TestGyp.TestGyp(formats=['ninja'])
+test.run_gyp('test.gyp')
+expected_status = 0 if sys.platform in ['darwin', 'win32'] else 1
+test.build('test.gyp', target='lib', status=expected_status)
+test.pass_test()
diff --git a/third_party/python/gyp/test/arflags/lib.cc b/third_party/python/gyp/test/arflags/lib.cc
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/arflags/lib.cc
diff --git a/third_party/python/gyp/test/arflags/test.gyp b/third_party/python/gyp/test/arflags/test.gyp
new file mode 100644
index 0000000000..f7430fae2d
--- /dev/null
+++ b/third_party/python/gyp/test/arflags/test.gyp
@@ -0,0 +1,10 @@
+{
+ 'targets': [
+ {
+ 'target_name': 'lib',
+ 'type': 'static_library',
+ 'sources': ['lib.cc'],
+ 'arflags': ['--nonexistent'],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/assembly/gyptest-assembly.py b/third_party/python/gyp/test/assembly/gyptest-assembly.py
new file mode 100755
index 0000000000..8a84310544
--- /dev/null
+++ b/third_party/python/gyp/test/assembly/gyptest-assembly.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+A basic test of compiling assembler files.
+"""
+
+import sys
+import TestGyp
+
+if sys.platform != 'win32':
+ # TODO(bradnelson): get this working for windows.
+ test = TestGyp.TestGyp(formats=['!msvs'])
+
+ test.run_gyp('assembly.gyp', chdir='src')
+
+ test.relocate('src', 'relocate/src')
+
+ test.build('assembly.gyp', test.ALL, chdir='relocate/src')
+
+ expect = """\
+Hello from program.c
+Got 42.
+"""
+ test.run_built_executable('program', chdir='relocate/src', stdout=expect)
+
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/assembly/gyptest-override.py b/third_party/python/gyp/test/assembly/gyptest-override.py
new file mode 100644
index 0000000000..e84a23e855
--- /dev/null
+++ b/third_party/python/gyp/test/assembly/gyptest-override.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure that manual rules on Windows override the built in ones.
+"""
+
+import sys
+import TestGyp
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+ CHDIR = 'src'
+ test.run_gyp('override.gyp', chdir=CHDIR)
+ test.build('override.gyp', test.ALL, chdir=CHDIR)
+ expect = """\
+Hello from program.c
+Got 42.
+"""
+ test.run_built_executable('program', chdir=CHDIR, stdout=expect)
+ test.pass_test()
diff --git a/third_party/python/gyp/test/assembly/src/as.bat b/third_party/python/gyp/test/assembly/src/as.bat
new file mode 100644
index 0000000000..b796db97ca
--- /dev/null
+++ b/third_party/python/gyp/test/assembly/src/as.bat
@@ -0,0 +1,4 @@
+@echo off
+:: Mock windows assembler.
+cl /MD /c %1 /Fo"%2"
+
diff --git a/third_party/python/gyp/test/assembly/src/assembly.gyp b/third_party/python/gyp/test/assembly/src/assembly.gyp
new file mode 100644
index 0000000000..565cb0fa0e
--- /dev/null
+++ b/third_party/python/gyp/test/assembly/src/assembly.gyp
@@ -0,0 +1,62 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ 'conditions': [
+ ['OS=="win"', {
+ 'defines': ['PLATFORM_WIN'],
+ }],
+ ['OS=="mac" or OS=="ios"', {
+ 'defines': ['PLATFORM_MAC'],
+ }],
+ ['OS=="linux"', {
+ 'defines': ['PLATFORM_LINUX'],
+ }],
+ ['OS=="android"', {
+ 'defines': ['PLATFORM_ANDROID'],
+ }],
+ ],
+ },
+ 'targets': [
+ {
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'dependencies': ['lib1'],
+ 'sources': [
+ 'program.c',
+ ],
+ },
+ {
+ 'target_name': 'lib1',
+ 'type': 'static_library',
+ 'sources': [
+ 'lib1.S',
+ ],
+ },
+ ],
+ 'conditions': [
+ ['OS=="win"', {
+ 'target_defaults': {
+ 'rules': [
+ {
+ 'rule_name': 'assembler',
+ 'msvs_cygwin_shell': 0,
+ 'extension': 'S',
+ 'inputs': [
+ 'as.bat',
+ ],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).obj',
+ ],
+ 'action':
+ ['as.bat', 'lib1.c', '<(_outputs)'],
+ 'message': 'Building assembly file <(RULE_INPUT_PATH)',
+ 'process_outputs_as_sources': 1,
+ },
+ ],
+ },
+ },],
+ ],
+}
diff --git a/third_party/python/gyp/test/assembly/src/lib1.S b/third_party/python/gyp/test/assembly/src/lib1.S
new file mode 100644
index 0000000000..7de9f19cf9
--- /dev/null
+++ b/third_party/python/gyp/test/assembly/src/lib1.S
@@ -0,0 +1,15 @@
+#if PLATFORM_WINDOWS || PLATFORM_MAC
+# define IDENTIFIER(n) _##n
+#else /* Linux */
+# define IDENTIFIER(n) n
+#endif
+
+.globl IDENTIFIER(lib1_function)
+IDENTIFIER(lib1_function):
+#if !defined(PLATFORM_ANDROID)
+ movl $42, %eax
+ ret
+#else /* Android (assuming ARM) */
+ mov r0, #42
+ bx lr
+#endif
diff --git a/third_party/python/gyp/test/assembly/src/lib1.c b/third_party/python/gyp/test/assembly/src/lib1.c
new file mode 100644
index 0000000000..be21ecd5f6
--- /dev/null
+++ b/third_party/python/gyp/test/assembly/src/lib1.c
@@ -0,0 +1,3 @@
+int lib1_function(void) {
+ return 42;
+}
diff --git a/third_party/python/gyp/test/assembly/src/override.gyp b/third_party/python/gyp/test/assembly/src/override.gyp
new file mode 100644
index 0000000000..39a4072eff
--- /dev/null
+++ b/third_party/python/gyp/test/assembly/src/override.gyp
@@ -0,0 +1,34 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'sources': [
+ 'program.c',
+ 'override_asm.asm',
+ ],
+ 'rules': [
+ {
+ # Test that if there's a specific .asm rule, it overrides the
+ # built in one on Windows.
+ 'rule_name': 'assembler',
+ 'msvs_cygwin_shell': 0,
+ 'extension': 'asm',
+ 'inputs': [
+ 'as.bat',
+ ],
+ 'outputs': [
+ 'output.obj',
+ ],
+ 'action': ['as.bat', 'lib1.c', '<(_outputs)'],
+ 'message': 'Building assembly file <(RULE_INPUT_PATH)',
+ 'process_outputs_as_sources': 1,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/assembly/src/override_asm.asm b/third_party/python/gyp/test/assembly/src/override_asm.asm
new file mode 100644
index 0000000000..be93b23baa
--- /dev/null
+++ b/third_party/python/gyp/test/assembly/src/override_asm.asm
@@ -0,0 +1,8 @@
+; Copyright (c) 2012 Google Inc. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+; This is a placeholder. It should not be referenced if overrides work
+; correctly.
+
+Bad stuff that shouldn't assemble.
diff --git a/third_party/python/gyp/test/assembly/src/program.c b/third_party/python/gyp/test/assembly/src/program.c
new file mode 100644
index 0000000000..eee862712e
--- /dev/null
+++ b/third_party/python/gyp/test/assembly/src/program.c
@@ -0,0 +1,12 @@
+#include <stdio.h>
+
+extern int lib1_function(void);
+
+int main(void)
+{
+ fprintf(stdout, "Hello from program.c\n");
+ fflush(stdout);
+ fprintf(stdout, "Got %d.\n", lib1_function());
+ fflush(stdout);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/build-option/gyptest-build.py b/third_party/python/gyp/test/build-option/gyptest-build.py
new file mode 100755
index 0000000000..34a9e11d35
--- /dev/null
+++ b/third_party/python/gyp/test/build-option/gyptest-build.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies simplest-possible build of a "Hello, world!" program
+using the default build target.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp(workdir='workarea_default')
+
+if test.format == 'xcode-ninja':
+ # The xcode-ninja generator doesn't support --build
+ # cf. https://code.google.com/p/gyp/issues/detail?id=453
+ test.skip_test()
+
+test.run_gyp('hello.gyp', '--build=Default')
+
+test.run_built_executable('hello', stdout="Hello, world!\n")
+
+test.up_to_date('hello.gyp', test.DEFAULT)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/build-option/hello.c b/third_party/python/gyp/test/build-option/hello.c
new file mode 100644
index 0000000000..f6ad129fd7
--- /dev/null
+++ b/third_party/python/gyp/test/build-option/hello.c
@@ -0,0 +1,13 @@
+/*
+ * Copyright (c) 2012 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <stdio.h>
+
+int main(void)
+{
+ printf("Hello, world!\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/build-option/hello.gyp b/third_party/python/gyp/test/build-option/hello.gyp
new file mode 100644
index 0000000000..1974d51ccd
--- /dev/null
+++ b/third_party/python/gyp/test/build-option/hello.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'hello',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/builddir/gyptest-all.py b/third_party/python/gyp/test/builddir/gyptest-all.py
new file mode 100755
index 0000000000..a26543f49e
--- /dev/null
+++ b/third_party/python/gyp/test/builddir/gyptest-all.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verify the settings that cause a set of programs to be created in
+a specific build directory, and that no intermediate built files
+get created outside of that build directory hierarchy even when
+referred to with deeply-nested ../../.. paths.
+"""
+
+import TestGyp
+
+# TODO(mmoss): Make only supports (theoretically) a single, global build
+# directory (through GYP_GENERATOR_FLAGS 'output_dir'), rather than
+# gyp-file-specific settings (e.g. the stuff in builddir.gypi) that the other
+# generators support, so this doesn't work yet for make.
+# TODO(mmoss) Make also has the issue that the top-level Makefile is written to
+# the "--depth" location, which is one level above 'src', but then this test
+# moves 'src' somewhere else, leaving the Makefile behind, so make can't find
+# its sources. I'm not sure if make is wrong for writing outside the current
+# directory, or if the test is wrong for assuming everything generated is under
+# the current directory.
+# Ninja and CMake do not support setting the build directory.
+test = TestGyp.TestGyp(formats=['!make', '!ninja', '!cmake'])
+
+test.run_gyp('prog1.gyp', '--depth=..', chdir='src')
+if test.format == 'msvs':
+ if test.uses_msbuild:
+ test.must_contain('src/prog1.vcxproj',
+ '<OutDir>..\\builddir\\Default\\</OutDir>')
+ else:
+ test.must_contain('src/prog1.vcproj',
+ 'OutputDirectory="..\\builddir\\Default\\"')
+
+test.relocate('src', 'relocate/src')
+
+test.subdir('relocate/builddir')
+
+# Make sure that all the built ../../etc. files only get put under builddir,
+# by making all of relocate read-only and then making only builddir writable.
+test.writable('relocate', False)
+test.writable('relocate/builddir', True)
+
+# Suppress the test infrastructure's setting SYMROOT on the command line.
+test.build('prog1.gyp', test.ALL, SYMROOT=None, chdir='relocate/src')
+
+expect1 = """\
+Hello from prog1.c
+Hello from func1.c
+"""
+
+expect2 = """\
+Hello from subdir2/prog2.c
+Hello from func2.c
+"""
+
+expect3 = """\
+Hello from subdir2/subdir3/prog3.c
+Hello from func3.c
+"""
+
+expect4 = """\
+Hello from subdir2/subdir3/subdir4/prog4.c
+Hello from func4.c
+"""
+
+expect5 = """\
+Hello from subdir2/subdir3/subdir4/subdir5/prog5.c
+Hello from func5.c
+"""
+
+def run_builddir(prog, expect):
+ dir = 'relocate/builddir/Default/'
+ test.run(program=test.workpath(dir + prog), stdout=expect)
+
+run_builddir('prog1', expect1)
+run_builddir('prog2', expect2)
+run_builddir('prog3', expect3)
+run_builddir('prog4', expect4)
+run_builddir('prog5', expect5)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/builddir/gyptest-default.py b/third_party/python/gyp/test/builddir/gyptest-default.py
new file mode 100755
index 0000000000..4904cdab42
--- /dev/null
+++ b/third_party/python/gyp/test/builddir/gyptest-default.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verify the settings that cause a set of programs to be created in
+a specific build directory, and that no intermediate built files
+get created outside of that build directory hierarchy even when
+referred to with deeply-nested ../../.. paths.
+"""
+
+import TestGyp
+
+# TODO(mmoss): Make only supports (theoretically) a single, global build
+# directory (through GYP_GENERATOR_FLAGS 'output_dir'), rather than
+# gyp-file-specific settings (e.g. the stuff in builddir.gypi) that the other
+# generators support, so this doesn't work yet for make.
+# TODO(mmoss) Make also has the issue that the top-level Makefile is written to
+# the "--depth" location, which is one level above 'src', but then this test
+# moves 'src' somewhere else, leaving the Makefile behind, so make can't find
+# its sources. I'm not sure if make is wrong for writing outside the current
+# directory, or if the test is wrong for assuming everything generated is under
+# the current directory.
+# Ninja and CMake do not support setting the build directory.
+test = TestGyp.TestGyp(formats=['!make', '!ninja', '!cmake'])
+
+test.run_gyp('prog1.gyp', '--depth=..', chdir='src')
+if test.format == 'msvs':
+ if test.uses_msbuild:
+ test.must_contain('src/prog1.vcxproj',
+ '<OutDir>..\\builddir\\Default\\</OutDir>')
+ else:
+ test.must_contain('src/prog1.vcproj',
+ 'OutputDirectory="..\\builddir\\Default\\"')
+
+test.relocate('src', 'relocate/src')
+
+test.subdir('relocate/builddir')
+
+# Make sure that all the built ../../etc. files only get put under builddir,
+# by making all of relocate read-only and then making only builddir writable.
+test.writable('relocate', False)
+test.writable('relocate/builddir', True)
+
+# Suppress the test infrastructure's setting SYMROOT on the command line.
+test.build('prog1.gyp', SYMROOT=None, chdir='relocate/src')
+
+expect1 = """\
+Hello from prog1.c
+Hello from func1.c
+"""
+
+expect2 = """\
+Hello from subdir2/prog2.c
+Hello from func2.c
+"""
+
+expect3 = """\
+Hello from subdir2/subdir3/prog3.c
+Hello from func3.c
+"""
+
+expect4 = """\
+Hello from subdir2/subdir3/subdir4/prog4.c
+Hello from func4.c
+"""
+
+expect5 = """\
+Hello from subdir2/subdir3/subdir4/subdir5/prog5.c
+Hello from func5.c
+"""
+
+def run_builddir(prog, expect):
+ dir = 'relocate/builddir/Default/'
+ test.run(program=test.workpath(dir + prog), stdout=expect)
+
+run_builddir('prog1', expect1)
+run_builddir('prog2', expect2)
+run_builddir('prog3', expect3)
+run_builddir('prog4', expect4)
+run_builddir('prog5', expect5)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/builddir/src/builddir.gypi b/third_party/python/gyp/test/builddir/src/builddir.gypi
new file mode 100644
index 0000000000..ce175db8f8
--- /dev/null
+++ b/third_party/python/gyp/test/builddir/src/builddir.gypi
@@ -0,0 +1,18 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ 'configurations': {
+ 'Default': {
+ 'msvs_configuration_attributes': {
+ 'OutputDirectory': '<(DEPTH)\\builddir/Default',
+ },
+ },
+ },
+ },
+ 'xcode_settings': {
+ 'SYMROOT': '<(DEPTH)/builddir',
+ },
+}
diff --git a/third_party/python/gyp/test/builddir/src/func1.c b/third_party/python/gyp/test/builddir/src/func1.c
new file mode 100644
index 0000000000..b8e6a06951
--- /dev/null
+++ b/third_party/python/gyp/test/builddir/src/func1.c
@@ -0,0 +1,6 @@
+#include <stdio.h>
+
+void func1(void)
+{
+ printf("Hello from func1.c\n");
+}
diff --git a/third_party/python/gyp/test/builddir/src/func2.c b/third_party/python/gyp/test/builddir/src/func2.c
new file mode 100644
index 0000000000..14aabac475
--- /dev/null
+++ b/third_party/python/gyp/test/builddir/src/func2.c
@@ -0,0 +1,6 @@
+#include <stdio.h>
+
+void func2(void)
+{
+ printf("Hello from func2.c\n");
+}
diff --git a/third_party/python/gyp/test/builddir/src/func3.c b/third_party/python/gyp/test/builddir/src/func3.c
new file mode 100644
index 0000000000..3b4edeae6d
--- /dev/null
+++ b/third_party/python/gyp/test/builddir/src/func3.c
@@ -0,0 +1,6 @@
+#include <stdio.h>
+
+void func3(void)
+{
+ printf("Hello from func3.c\n");
+}
diff --git a/third_party/python/gyp/test/builddir/src/func4.c b/third_party/python/gyp/test/builddir/src/func4.c
new file mode 100644
index 0000000000..732891b79a
--- /dev/null
+++ b/third_party/python/gyp/test/builddir/src/func4.c
@@ -0,0 +1,6 @@
+#include <stdio.h>
+
+void func4(void)
+{
+ printf("Hello from func4.c\n");
+}
diff --git a/third_party/python/gyp/test/builddir/src/func5.c b/third_party/python/gyp/test/builddir/src/func5.c
new file mode 100644
index 0000000000..18fdfabbbe
--- /dev/null
+++ b/third_party/python/gyp/test/builddir/src/func5.c
@@ -0,0 +1,6 @@
+#include <stdio.h>
+
+void func5(void)
+{
+ printf("Hello from func5.c\n");
+}
diff --git a/third_party/python/gyp/test/builddir/src/prog1.c b/third_party/python/gyp/test/builddir/src/prog1.c
new file mode 100644
index 0000000000..a32aaf04f9
--- /dev/null
+++ b/third_party/python/gyp/test/builddir/src/prog1.c
@@ -0,0 +1,10 @@
+#include <stdio.h>
+
+extern void func1(void);
+
+int main(void)
+{
+ printf("Hello from prog1.c\n");
+ func1();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/builddir/src/prog1.gyp b/third_party/python/gyp/test/builddir/src/prog1.gyp
new file mode 100644
index 0000000000..5b96f035ec
--- /dev/null
+++ b/third_party/python/gyp/test/builddir/src/prog1.gyp
@@ -0,0 +1,30 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': [
+ 'builddir.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'pull_in_all',
+ 'type': 'none',
+ 'dependencies': [
+ 'prog1',
+ 'subdir2/prog2.gyp:prog2',
+ 'subdir2/subdir3/prog3.gyp:prog3',
+ 'subdir2/subdir3/subdir4/prog4.gyp:prog4',
+ 'subdir2/subdir3/subdir4/subdir5/prog5.gyp:prog5',
+ ],
+ },
+ {
+ 'target_name': 'prog1',
+ 'type': 'executable',
+ 'sources': [
+ 'prog1.c',
+ 'func1.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/builddir/src/subdir2/prog2.c b/third_party/python/gyp/test/builddir/src/subdir2/prog2.c
new file mode 100644
index 0000000000..9d682cd783
--- /dev/null
+++ b/third_party/python/gyp/test/builddir/src/subdir2/prog2.c
@@ -0,0 +1,10 @@
+#include <stdio.h>
+
+extern void func2(void);
+
+int main(void)
+{
+ printf("Hello from subdir2/prog2.c\n");
+ func2();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/builddir/src/subdir2/prog2.gyp b/third_party/python/gyp/test/builddir/src/subdir2/prog2.gyp
new file mode 100644
index 0000000000..96299b646d
--- /dev/null
+++ b/third_party/python/gyp/test/builddir/src/subdir2/prog2.gyp
@@ -0,0 +1,19 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': [
+ '../builddir.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'prog2',
+ 'type': 'executable',
+ 'sources': [
+ 'prog2.c',
+ '../func2.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/builddir/src/subdir2/subdir3/prog3.c b/third_party/python/gyp/test/builddir/src/subdir2/subdir3/prog3.c
new file mode 100644
index 0000000000..da74965985
--- /dev/null
+++ b/third_party/python/gyp/test/builddir/src/subdir2/subdir3/prog3.c
@@ -0,0 +1,10 @@
+#include <stdio.h>
+
+extern void func3(void);
+
+int main(void)
+{
+ printf("Hello from subdir2/subdir3/prog3.c\n");
+ func3();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/builddir/src/subdir2/subdir3/prog3.gyp b/third_party/python/gyp/test/builddir/src/subdir2/subdir3/prog3.gyp
new file mode 100644
index 0000000000..d7df43c7bd
--- /dev/null
+++ b/third_party/python/gyp/test/builddir/src/subdir2/subdir3/prog3.gyp
@@ -0,0 +1,19 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': [
+ '../../builddir.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'prog3',
+ 'type': 'executable',
+ 'sources': [
+ 'prog3.c',
+ '../../func3.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/builddir/src/subdir2/subdir3/subdir4/prog4.c b/third_party/python/gyp/test/builddir/src/subdir2/subdir3/subdir4/prog4.c
new file mode 100644
index 0000000000..5787d5fa43
--- /dev/null
+++ b/third_party/python/gyp/test/builddir/src/subdir2/subdir3/subdir4/prog4.c
@@ -0,0 +1,10 @@
+#include <stdio.h>
+
+extern void func4(void);
+
+int main(void)
+{
+ printf("Hello from subdir2/subdir3/subdir4/prog4.c\n");
+ func4();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/builddir/src/subdir2/subdir3/subdir4/prog4.gyp b/third_party/python/gyp/test/builddir/src/subdir2/subdir3/subdir4/prog4.gyp
new file mode 100644
index 0000000000..862a8a18cd
--- /dev/null
+++ b/third_party/python/gyp/test/builddir/src/subdir2/subdir3/subdir4/prog4.gyp
@@ -0,0 +1,19 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': [
+ '../../../builddir.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'prog4',
+ 'type': 'executable',
+ 'sources': [
+ 'prog4.c',
+ '../../../func4.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/builddir/src/subdir2/subdir3/subdir4/subdir5/prog5.c b/third_party/python/gyp/test/builddir/src/subdir2/subdir3/subdir4/subdir5/prog5.c
new file mode 100644
index 0000000000..c6e2ab521f
--- /dev/null
+++ b/third_party/python/gyp/test/builddir/src/subdir2/subdir3/subdir4/subdir5/prog5.c
@@ -0,0 +1,10 @@
+#include <stdio.h>
+
+extern void func5(void);
+
+int main(void)
+{
+ printf("Hello from subdir2/subdir3/subdir4/subdir5/prog5.c\n");
+ func5();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/builddir/src/subdir2/subdir3/subdir4/subdir5/prog5.gyp b/third_party/python/gyp/test/builddir/src/subdir2/subdir3/subdir4/subdir5/prog5.gyp
new file mode 100644
index 0000000000..fe1c9cbf50
--- /dev/null
+++ b/third_party/python/gyp/test/builddir/src/subdir2/subdir3/subdir4/subdir5/prog5.gyp
@@ -0,0 +1,19 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': [
+ '../../../../builddir.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'prog5',
+ 'type': 'executable',
+ 'sources': [
+ 'prog5.c',
+ '../../../../func5.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/cflags/cflags.c b/third_party/python/gyp/test/cflags/cflags.c
new file mode 100644
index 0000000000..0a02ba9074
--- /dev/null
+++ b/third_party/python/gyp/test/cflags/cflags.c
@@ -0,0 +1,15 @@
+/* Copyright (c) 2010 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#include <stdio.h>
+
+int main(void)
+{
+#ifdef FOO
+ printf("FOO defined\n");
+#else
+ printf("FOO not defined\n");
+#endif
+ return 0;
+}
diff --git a/third_party/python/gyp/test/cflags/cflags.gyp b/third_party/python/gyp/test/cflags/cflags.gyp
new file mode 100644
index 0000000000..2840dc6318
--- /dev/null
+++ b/third_party/python/gyp/test/cflags/cflags.gyp
@@ -0,0 +1,23 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'cflags',
+ 'type': 'executable',
+ 'sources': [
+ 'cflags.c',
+ ],
+ },
+ {
+ 'target_name': 'cflags_host',
+ 'toolsets': ['host'],
+ 'type': 'executable',
+ 'sources': [
+ 'cflags.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/cflags/gyptest-cflags.py b/third_party/python/gyp/test/cflags/gyptest-cflags.py
new file mode 100755
index 0000000000..f4efccba9b
--- /dev/null
+++ b/third_party/python/gyp/test/cflags/gyptest-cflags.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies the use of the environment during regeneration when the gyp file
+changes, specifically via build of an executable with C preprocessor
+definition specified by CFLAGS.
+
+In this test, gyp and build both run in same local environment.
+"""
+
+import TestGyp
+
+# CPPFLAGS works in ninja but not make; CFLAGS works in both
+FORMATS = ('make', 'ninja')
+
+test = TestGyp.TestGyp(formats=FORMATS)
+
+# First set CFLAGS to blank in case the platform doesn't support unsetenv.
+with TestGyp.LocalEnv({'CFLAGS': '',
+ 'GYP_CROSSCOMPILE': '1'}):
+ test.run_gyp('cflags.gyp')
+ test.build('cflags.gyp')
+
+expect = """FOO not defined\n"""
+test.run_built_executable('cflags', stdout=expect)
+test.run_built_executable('cflags_host', stdout=expect)
+
+test.sleep()
+
+with TestGyp.LocalEnv({'CFLAGS': '-DFOO=1',
+ 'GYP_CROSSCOMPILE': '1'}):
+ test.run_gyp('cflags.gyp')
+ test.build('cflags.gyp')
+
+expect = """FOO defined\n"""
+test.run_built_executable('cflags', stdout=expect)
+
+# Environment variable CFLAGS shouldn't influence the flags for the host.
+expect = """FOO not defined\n"""
+test.run_built_executable('cflags_host', stdout=expect)
+
+test.sleep()
+
+with TestGyp.LocalEnv({'CFLAGS_host': '-DFOO=1',
+ 'GYP_CROSSCOMPILE': '1'}):
+ test.run_gyp('cflags.gyp')
+ test.build('cflags.gyp')
+
+# Environment variable CFLAGS_host should influence the flags for the host.
+expect = """FOO defined\n"""
+test.run_built_executable('cflags_host', stdout=expect)
+
+test.sleep()
+
+with TestGyp.LocalEnv({'CFLAGS': ''}):
+ test.run_gyp('cflags.gyp')
+ test.build('cflags.gyp')
+
+expect = """FOO not defined\n"""
+test.run_built_executable('cflags', stdout=expect)
+
+test.sleep()
+
+with TestGyp.LocalEnv({'CFLAGS': '-DFOO=1'}):
+ test.run_gyp('cflags.gyp')
+ test.build('cflags.gyp')
+
+expect = """FOO defined\n"""
+test.run_built_executable('cflags', stdout=expect)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/compilable/gyptest-headers.py b/third_party/python/gyp/test/compilable/gyptest-headers.py
new file mode 100755
index 0000000000..91760216fb
--- /dev/null
+++ b/third_party/python/gyp/test/compilable/gyptest-headers.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that .hpp files are ignored when included in the source list on all
+platforms.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('headers.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('headers.gyp', test.ALL, chdir='relocate/src')
+
+expect = """\
+Hello from program.c
+Hello from lib1.c
+"""
+test.run_built_executable('program', chdir='relocate/src', stdout=expect)
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/compilable/src/headers.gyp b/third_party/python/gyp/test/compilable/src/headers.gyp
new file mode 100644
index 0000000000..b6c2a8857b
--- /dev/null
+++ b/third_party/python/gyp/test/compilable/src/headers.gyp
@@ -0,0 +1,26 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'dependencies': [
+ 'lib1'
+ ],
+ 'sources': [
+ 'program.cpp',
+ ],
+ },
+ {
+ 'target_name': 'lib1',
+ 'type': 'static_library',
+ 'sources': [
+ 'lib1.hpp',
+ 'lib1.cpp',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/compilable/src/lib1.cpp b/third_party/python/gyp/test/compilable/src/lib1.cpp
new file mode 100644
index 0000000000..51bc31a40b
--- /dev/null
+++ b/third_party/python/gyp/test/compilable/src/lib1.cpp
@@ -0,0 +1,7 @@
+#include <stdio.h>
+#include "lib1.hpp"
+
+void lib1_function(void) {
+ fprintf(stdout, "Hello from lib1.c\n");
+ fflush(stdout);
+}
diff --git a/third_party/python/gyp/test/compilable/src/lib1.hpp b/third_party/python/gyp/test/compilable/src/lib1.hpp
new file mode 100644
index 0000000000..72e63e8acd
--- /dev/null
+++ b/third_party/python/gyp/test/compilable/src/lib1.hpp
@@ -0,0 +1,6 @@
+#ifndef _lib1_hpp
+#define _lib1_hpp
+
+extern void lib1_function(void);
+
+#endif
diff --git a/third_party/python/gyp/test/compilable/src/program.cpp b/third_party/python/gyp/test/compilable/src/program.cpp
new file mode 100644
index 0000000000..8af2c9b6ff
--- /dev/null
+++ b/third_party/python/gyp/test/compilable/src/program.cpp
@@ -0,0 +1,9 @@
+#include <stdio.h>
+#include "lib1.hpp"
+
+int main(void) {
+ fprintf(stdout, "Hello from program.c\n");
+ fflush(stdout);
+ lib1_function();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/compiler-override/compiler-exe.gyp b/third_party/python/gyp/test/compiler-override/compiler-exe.gyp
new file mode 100644
index 0000000000..c2f3002f20
--- /dev/null
+++ b/third_party/python/gyp/test/compiler-override/compiler-exe.gyp
@@ -0,0 +1,16 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'hello',
+ 'type': 'executable',
+ 'sources': [
+ 'test.c',
+ 'cxxtest.cc',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/compiler-override/compiler-global-settings.gyp.in b/third_party/python/gyp/test/compiler-override/compiler-global-settings.gyp.in
new file mode 100644
index 0000000000..ca13a53e8d
--- /dev/null
+++ b/third_party/python/gyp/test/compiler-override/compiler-global-settings.gyp.in
@@ -0,0 +1,34 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ # PYTHON and PWD are replaced by the test code before this
+ # gyp file runs
+ 'make_global_settings': [
+ ['CC', r'$PYTHON $PWD/my_cc.py FOO'],
+ ['CXX', r'$PYTHON $PWD/my_cxx.py FOO'],
+ ['CC.host', r'$PYTHON $PWD/my_cc.py BAR'],
+ ['CXX.host', r'$PYTHON $PWD/my_cxx.py BAR'],
+
+ ['LD', r'$PYTHON $PWD/my_ld.py FOO_LINK'],
+ ['LD.host', r'$PYTHON $PWD/my_ld.py BAR_LINK'],
+ ['LINK', r'$PYTHON $PWD/my_ld.py FOO_LINK'],
+ ['LINK.host', r'$PYTHON $PWD/my_ld.py BAR_LINK'],
+ ],
+
+ # The above global settings should mean that
+ # that these targets are built using the fake
+ # toolchain above.
+ 'targets': [
+ {
+ 'toolset': '$TOOLSET',
+ 'target_name': 'hello',
+ 'type': 'executable',
+ 'sources': [
+ 'test.c',
+ 'cxxtest.cc',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/compiler-override/compiler-host.gyp b/third_party/python/gyp/test/compiler-override/compiler-host.gyp
new file mode 100644
index 0000000000..ab3d247e0b
--- /dev/null
+++ b/third_party/python/gyp/test/compiler-override/compiler-host.gyp
@@ -0,0 +1,17 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'toolset': 'host',
+ 'target_name': 'hello',
+ 'type': 'executable',
+ 'sources': [
+ 'test.c',
+ 'cxxtest.cc',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/compiler-override/compiler-shared-lib.gyp b/third_party/python/gyp/test/compiler-override/compiler-shared-lib.gyp
new file mode 100644
index 0000000000..d3e4316135
--- /dev/null
+++ b/third_party/python/gyp/test/compiler-override/compiler-shared-lib.gyp
@@ -0,0 +1,16 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'hello-lib',
+ 'type': 'shared_library',
+ 'sources': [
+ 'test.c',
+ 'cxxtest.cc',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/compiler-override/cxxtest.cc b/third_party/python/gyp/test/compiler-override/cxxtest.cc
new file mode 100644
index 0000000000..517a353619
--- /dev/null
+++ b/third_party/python/gyp/test/compiler-override/cxxtest.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Deliberate C syntax error as this file should never be passed to
+// the actual compiler
+#error Should not be passed to a real compiler
diff --git a/third_party/python/gyp/test/compiler-override/gyptest-compiler-env-toolchain.py b/third_party/python/gyp/test/compiler-override/gyptest-compiler-env-toolchain.py
new file mode 100644
index 0000000000..2361d0c7c2
--- /dev/null
+++ b/third_party/python/gyp/test/compiler-override/gyptest-compiler-env-toolchain.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""
+Verifies that the user can override the compiler and linker using
+CC/CXX/NM/READELF environment variables.
+"""
+
+import TestGyp
+import os
+import copy
+import sys
+
+here = os.path.dirname(os.path.abspath(__file__))
+
+if sys.platform == 'win32':
+ # cross compiling not supported by ninja on windows
+ # and make not supported on windows at all.
+ sys.exit(0)
+
+# Clear any existing compiler related env vars.
+for key in ['CC', 'CXX', 'LINK', 'CC_host', 'CXX_host', 'LINK_host',
+ 'NM_target', 'READELF_target']:
+ if key in os.environ:
+ del os.environ[key]
+
+
+def CheckCompiler(test, gypfile, check_for, run_gyp):
+ if run_gyp:
+ test.run_gyp(gypfile)
+ test.build(gypfile)
+
+ test.must_contain_all_lines(test.stdout(), check_for)
+
+
+test = TestGyp.TestGyp(formats=['ninja'])
+# Must set the test format to something with a flavor (the part after the '-')
+# in order to test the desired behavior. Since we want to run a non-host
+# toolchain, we have to set the flavor to something that the ninja generator
+# doesn't know about, so it doesn't default to the host-specific tools (e.g.,
+# 'otool' on mac to generate the .TOC).
+#
+# Note that we can't just pass format=['ninja-some_toolchain'] to the
+# constructor above, because then this test wouldn't be recognized as a ninja
+# format test.
+test.formats = ['ninja-my_flavor' if f == 'ninja' else f for f in test.formats]
+
+
+def TestTargetOverideSharedLib():
+ # The std output from nm and readelf is redirected to files, so we can't
+ # expect their output to appear. Instead, check for the files they create to
+ # see if they actually ran.
+ expected = ['my_cc.py', 'my_cxx.py', 'FOO']
+
+ # Check that CC, CXX, NM, READELF, set target compiler
+ env = {'CC': 'python %s/my_cc.py FOO' % here,
+ 'CXX': 'python %s/my_cxx.py FOO' % here,
+ 'NM': 'python %s/my_nm.py' % here,
+ 'READELF': 'python %s/my_readelf.py' % here}
+
+ with TestGyp.LocalEnv(env):
+ CheckCompiler(test, 'compiler-shared-lib.gyp', expected, True)
+ test.must_contain(test.built_file_path('RAN_MY_NM'), 'RAN_MY_NM')
+ test.must_contain(test.built_file_path('RAN_MY_READELF'), 'RAN_MY_READELF')
+ test.unlink(test.built_file_path('RAN_MY_NM'))
+ test.unlink(test.built_file_path('RAN_MY_READELF'))
+
+ # Run the same tests once the eviron has been restored. The generated
+ # projects should have embedded all the settings in the project files so the
+ # results should be the same.
+ CheckCompiler(test, 'compiler-shared-lib.gyp', expected, False)
+ test.must_contain(test.built_file_path('RAN_MY_NM'), 'RAN_MY_NM')
+ test.must_contain(test.built_file_path('RAN_MY_READELF'), 'RAN_MY_READELF')
+
+
+TestTargetOverideSharedLib()
+test.pass_test()
diff --git a/third_party/python/gyp/test/compiler-override/gyptest-compiler-env.py b/third_party/python/gyp/test/compiler-override/gyptest-compiler-env.py
new file mode 100755
index 0000000000..bb38b6e55b
--- /dev/null
+++ b/third_party/python/gyp/test/compiler-override/gyptest-compiler-env.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""
+Verifies that the user can override the compiler and linker using CC/CXX/LD
+environment variables.
+"""
+
+import TestGyp
+import os
+import copy
+import sys
+
+here = os.path.dirname(os.path.abspath(__file__))
+
+if sys.platform == 'win32':
+ # cross compiling not supported by ninja on windows
+ # and make not supported on windows at all.
+ sys.exit(0)
+
+# Clear any existing compiler related env vars.
+for key in ['CC', 'CXX', 'LINK', 'CC_host', 'CXX_host', 'LINK_host']:
+ if key in os.environ:
+ del os.environ[key]
+
+
+def CheckCompiler(test, gypfile, check_for, run_gyp):
+ if run_gyp:
+ test.run_gyp(gypfile)
+ test.build(gypfile)
+
+ test.must_contain_all_lines(test.stdout(), check_for)
+
+
+test = TestGyp.TestGyp(formats=['ninja', 'make'])
+
+def TestTargetOveride():
+ expected = ['my_cc.py', 'my_cxx.py', 'FOO' ]
+
+ # ninja just uses $CC / $CXX as linker.
+ if test.format not in ['ninja', 'xcode-ninja']:
+ expected.append('FOO_LINK')
+
+ # Check that CC, CXX and LD set target compiler
+ oldenv = os.environ.copy()
+ try:
+ os.environ['CC'] = 'python %s/my_cc.py FOO' % here
+ os.environ['CXX'] = 'python %s/my_cxx.py FOO' % here
+ os.environ['LINK'] = 'python %s/my_ld.py FOO_LINK' % here
+
+ CheckCompiler(test, 'compiler-exe.gyp', expected, True)
+ finally:
+ os.environ.clear()
+ os.environ.update(oldenv)
+
+ # Run the same tests once the eviron has been restored. The
+ # generated should have embedded all the settings in the
+ # project files so the results should be the same.
+ CheckCompiler(test, 'compiler-exe.gyp', expected, False)
+
+
+def TestTargetOverideCompilerOnly():
+ # Same test again but with that CC, CXX and not LD
+ oldenv = os.environ.copy()
+ try:
+ os.environ['CC'] = 'python %s/my_cc.py FOO' % here
+ os.environ['CXX'] = 'python %s/my_cxx.py FOO' % here
+
+ CheckCompiler(test, 'compiler-exe.gyp',
+ ['my_cc.py', 'my_cxx.py', 'FOO'],
+ True)
+ finally:
+ os.environ.clear()
+ os.environ.update(oldenv)
+
+ # Run the same tests once the eviron has been restored. The
+ # generated should have embedded all the settings in the
+ # project files so the results should be the same.
+ CheckCompiler(test, 'compiler-exe.gyp',
+ ['my_cc.py', 'my_cxx.py', 'FOO'],
+ False)
+
+
+def TestHostOveride():
+ expected = ['my_cc.py', 'my_cxx.py', 'HOST' ]
+ if test.format != 'ninja': # ninja just uses $CC / $CXX as linker.
+ expected.append('HOST_LINK')
+
+ # Check that CC_host sets host compilee
+ oldenv = os.environ.copy()
+ try:
+ os.environ['CC_host'] = 'python %s/my_cc.py HOST' % here
+ os.environ['CXX_host'] = 'python %s/my_cxx.py HOST' % here
+ os.environ['LINK_host'] = 'python %s/my_ld.py HOST_LINK' % here
+ CheckCompiler(test, 'compiler-host.gyp', expected, True)
+ finally:
+ os.environ.clear()
+ os.environ.update(oldenv)
+
+ # Run the same tests once the eviron has been restored. The
+ # generated should have embedded all the settings in the
+ # project files so the results should be the same.
+ CheckCompiler(test, 'compiler-host.gyp', expected, False)
+
+
+TestTargetOveride()
+TestTargetOverideCompilerOnly()
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/compiler-override/gyptest-compiler-global-settings.py b/third_party/python/gyp/test/compiler-override/gyptest-compiler-global-settings.py
new file mode 100755
index 0000000000..9f062a4fef
--- /dev/null
+++ b/third_party/python/gyp/test/compiler-override/gyptest-compiler-global-settings.py
@@ -0,0 +1,82 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""
+Verifies that make_global_settings can be used to override the
+compiler settings.
+"""
+from __future__ import print_function
+
+import TestGyp
+import os
+import copy
+import sys
+from string import Template
+
+
+if sys.platform == 'win32':
+ # cross compiling not support by ninja on windows
+ # and make not supported on windows at all.
+ sys.exit(0)
+
+print("This test is currently disabled: https://crbug.com/483696.")
+sys.exit(0)
+
+test = TestGyp.TestGyp(formats=['ninja', 'make'])
+
+gypfile = 'compiler-global-settings.gyp'
+
+replacements = { 'PYTHON': '/usr/bin/python', 'PWD': os.getcwd()}
+
+# Process the .in gyp file to produce the final gyp file
+# since we need to include absolute paths in the make_global_settings
+# section.
+replacements['TOOLSET'] = 'target'
+s = Template(open(gypfile + '.in').read())
+output = open(gypfile, 'w')
+output.write(s.substitute(replacements))
+output.close()
+
+old_env = dict(os.environ)
+os.environ['GYP_CROSSCOMPILE'] = '1'
+test.run_gyp(gypfile)
+os.environ.clear()
+os.environ.update(old_env)
+
+test.build(gypfile)
+test.must_contain_all_lines(test.stdout(), ['my_cc.py', 'my_cxx.py', 'FOO'])
+
+# The xcode generator chokes on the 'host' toolset. Skip the rest of
+# this test (cf. https://code.google.com/p/gyp/issues/detail?id=454).
+if test.format == 'xcode-ninja':
+ test.pass_test()
+
+# Same again but with the host toolset.
+replacements['TOOLSET'] = 'host'
+s = Template(open(gypfile + '.in').read())
+output = open(gypfile, 'w')
+output.write(s.substitute(replacements))
+output.close()
+
+old_env = dict(os.environ)
+os.environ['GYP_CROSSCOMPILE'] = '1'
+test.run_gyp(gypfile)
+os.environ.clear()
+os.environ.update(old_env)
+
+test.build(gypfile)
+test.must_contain_all_lines(test.stdout(), ['my_cc.py', 'my_cxx.py', 'BAR'])
+
+# Check that CC_host overrides make_global_settings
+old_env = dict(os.environ)
+os.environ['CC_host'] = '%s %s/my_cc.py SECRET' % (replacements['PYTHON'],
+ replacements['PWD'])
+test.run_gyp(gypfile)
+os.environ.clear()
+os.environ.update(old_env)
+
+test.build(gypfile)
+test.must_contain_all_lines(test.stdout(), ['SECRET', 'my_cxx.py', 'BAR'])
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/compiler-override/my_cc.py b/third_party/python/gyp/test/compiler-override/my_cc.py
new file mode 100755
index 0000000000..09e1d3c58d
--- /dev/null
+++ b/third_party/python/gyp/test/compiler-override/my_cc.py
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from __future__ import print_function
+import sys
+print(sys.argv)
diff --git a/third_party/python/gyp/test/compiler-override/my_cxx.py b/third_party/python/gyp/test/compiler-override/my_cxx.py
new file mode 100755
index 0000000000..09e1d3c58d
--- /dev/null
+++ b/third_party/python/gyp/test/compiler-override/my_cxx.py
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from __future__ import print_function
+import sys
+print(sys.argv)
diff --git a/third_party/python/gyp/test/compiler-override/my_ld.py b/third_party/python/gyp/test/compiler-override/my_ld.py
new file mode 100755
index 0000000000..09e1d3c58d
--- /dev/null
+++ b/third_party/python/gyp/test/compiler-override/my_ld.py
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from __future__ import print_function
+import sys
+print(sys.argv)
diff --git a/third_party/python/gyp/test/compiler-override/my_nm.py b/third_party/python/gyp/test/compiler-override/my_nm.py
new file mode 100755
index 0000000000..2c4e678110
--- /dev/null
+++ b/third_party/python/gyp/test/compiler-override/my_nm.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from __future__ import print_function
+import sys
+print(sys.argv)
+with open('RAN_MY_NM', 'w') as f:
+ f.write('RAN_MY_NM')
diff --git a/third_party/python/gyp/test/compiler-override/my_readelf.py b/third_party/python/gyp/test/compiler-override/my_readelf.py
new file mode 100755
index 0000000000..626665435e
--- /dev/null
+++ b/third_party/python/gyp/test/compiler-override/my_readelf.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from __future__ import print_function
+import sys
+print(sys.argv)
+with open('RAN_MY_READELF', 'w') as f:
+ f.write('RAN_MY_READELF')
diff --git a/third_party/python/gyp/test/compiler-override/test.c b/third_party/python/gyp/test/compiler-override/test.c
new file mode 100644
index 0000000000..517a353619
--- /dev/null
+++ b/third_party/python/gyp/test/compiler-override/test.c
@@ -0,0 +1,7 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Deliberate C syntax error as this file should never be passed to
+// the actual compiler
+#error Should not be passed to a real compiler
diff --git a/third_party/python/gyp/test/conditions/elseif/elseif.gyp b/third_party/python/gyp/test/conditions/elseif/elseif.gyp
new file mode 100644
index 0000000000..6367ff7d7a
--- /dev/null
+++ b/third_party/python/gyp/test/conditions/elseif/elseif.gyp
@@ -0,0 +1,43 @@
+# Copyright (c) 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'variables': { 'test_var': 0 },
+ 'target_name': 'program0',
+ 'type': 'executable',
+ 'sources': [ 'program.cc' ],
+ 'includes': [ 'elseif_conditions.gypi' ],
+ },
+ {
+ 'variables': { 'test_var': 1 },
+ 'target_name': 'program1',
+ 'type': 'executable',
+ 'sources': [ 'program.cc' ],
+ 'includes': [ 'elseif_conditions.gypi' ],
+ },
+ {
+ 'variables': { 'test_var': 2 },
+ 'target_name': 'program2',
+ 'type': 'executable',
+ 'sources': [ 'program.cc' ],
+ 'includes': [ 'elseif_conditions.gypi' ],
+ },
+ {
+ 'variables': { 'test_var': 3 },
+ 'target_name': 'program3',
+ 'type': 'executable',
+ 'sources': [ 'program.cc' ],
+ 'includes': [ 'elseif_conditions.gypi' ],
+ },
+ {
+ 'variables': { 'test_var': 4 },
+ 'target_name': 'program4',
+ 'type': 'executable',
+ 'sources': [ 'program.cc' ],
+ 'includes': [ 'elseif_conditions.gypi' ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/conditions/elseif/elseif_bad1.gyp b/third_party/python/gyp/test/conditions/elseif/elseif_bad1.gyp
new file mode 100644
index 0000000000..35c8455cca
--- /dev/null
+++ b/third_party/python/gyp/test/conditions/elseif/elseif_bad1.gyp
@@ -0,0 +1,20 @@
+# Copyright (c) 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Trigger an error because of two consecutive string conditions.
+
+{
+ 'targets': [
+ {
+ 'variables': { 'test_var': 0 },
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'sources': [ 'program.cc' ],
+ 'conditions': [
+ ['test_var==0', 'test_var==1', {
+ }],
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/conditions/elseif/elseif_bad2.gyp b/third_party/python/gyp/test/conditions/elseif/elseif_bad2.gyp
new file mode 100644
index 0000000000..b529f292c0
--- /dev/null
+++ b/third_party/python/gyp/test/conditions/elseif/elseif_bad2.gyp
@@ -0,0 +1,22 @@
+# Copyright (c) 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Trigger an error because of two consecutive string conditions, even if the
+# conditions are not actually evaluated.
+
+{
+ 'targets': [
+ {
+ 'variables': { 'test_var': 0 },
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'sources': [ 'program.cc' ],
+ 'conditions': [
+ ['test_var==0', {
+ }, 'test_var==1', 'test_var==2', {
+ }],
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/conditions/elseif/elseif_bad3.gyp b/third_party/python/gyp/test/conditions/elseif/elseif_bad3.gyp
new file mode 100644
index 0000000000..126e186053
--- /dev/null
+++ b/third_party/python/gyp/test/conditions/elseif/elseif_bad3.gyp
@@ -0,0 +1,23 @@
+# Copyright (c) 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Trigger an error because there are unexpected trailing items in a condition.
+
+{
+ 'targets': [
+ {
+ 'variables': { 'test_var': 0 },
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'sources': [ 'program.cc' ],
+ 'conditions': [
+ ['test_var==0' {
+ }, 'test_var==1', {
+ }, {
+ }, 'test_var==2', {
+ }],
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/conditions/elseif/elseif_conditions.gypi b/third_party/python/gyp/test/conditions/elseif/elseif_conditions.gypi
new file mode 100644
index 0000000000..4310ccc031
--- /dev/null
+++ b/third_party/python/gyp/test/conditions/elseif/elseif_conditions.gypi
@@ -0,0 +1,15 @@
+{
+ 'conditions': [
+ ['test_var==0', {
+ 'defines': ['FOO="first_if"'],
+ }, 'test_var==1', {
+ 'defines': ['FOO="first_else_if"'],
+ }, 'test_var==2', {
+ 'defines': ['FOO="second_else_if"'],
+ }, 'test_var==3', {
+ 'defines': ['FOO="third_else_if"'],
+ }, {
+ 'defines': ['FOO="last_else"'],
+ }],
+ ],
+}
diff --git a/third_party/python/gyp/test/conditions/elseif/gyptest_elseif.py b/third_party/python/gyp/test/conditions/elseif/gyptest_elseif.py
new file mode 100644
index 0000000000..9d030cf3fe
--- /dev/null
+++ b/third_party/python/gyp/test/conditions/elseif/gyptest_elseif.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verify that "else-if" conditions work.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('elseif.gyp')
+test.build('elseif.gyp', test.ALL)
+test.run_built_executable(
+ 'program0', stdout='first_if\n')
+test.run_built_executable(
+ 'program1', stdout='first_else_if\n')
+test.run_built_executable(
+ 'program2', stdout='second_else_if\n')
+test.run_built_executable(
+ 'program3', stdout='third_else_if\n')
+test.run_built_executable(
+ 'program4', stdout='last_else\n')
+
+# Verify that bad condition blocks fail at gyp time.
+test.run_gyp('elseif_bad1.gyp', status=1, stderr=None)
+test.run_gyp('elseif_bad2.gyp', status=1, stderr=None)
+test.run_gyp('elseif_bad3.gyp', status=1, stderr=None)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/conditions/elseif/program.cc b/third_party/python/gyp/test/conditions/elseif/program.cc
new file mode 100644
index 0000000000..147fe2f75e
--- /dev/null
+++ b/third_party/python/gyp/test/conditions/elseif/program.cc
@@ -0,0 +1,10 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdio.h>
+
+int main() {
+ printf("%s\n", FOO);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/configurations/basics/configurations.c b/third_party/python/gyp/test/configurations/basics/configurations.c
new file mode 100644
index 0000000000..39e13c9c83
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/basics/configurations.c
@@ -0,0 +1,15 @@
+#include <stdio.h>
+
+int main(void)
+{
+#ifdef FOO
+ printf("Foo configuration\n");
+#endif
+#ifdef DEBUG
+ printf("Debug configuration\n");
+#endif
+#ifdef RELEASE
+ printf("Release configuration\n");
+#endif
+ return 0;
+}
diff --git a/third_party/python/gyp/test/configurations/basics/configurations.gyp b/third_party/python/gyp/test/configurations/basics/configurations.gyp
new file mode 100644
index 0000000000..93f1d8d5c7
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/basics/configurations.gyp
@@ -0,0 +1,32 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'configurations',
+ 'type': 'executable',
+ 'sources': [
+ 'configurations.c',
+ ],
+ 'configurations': {
+ 'Debug': {
+ 'defines': [
+ 'DEBUG',
+ ],
+ },
+ 'Release': {
+ 'defines': [
+ 'RELEASE',
+ ],
+ },
+ 'Foo': {
+ 'defines': [
+ 'FOO',
+ ],
+ },
+ }
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/configurations/basics/gyptest-configurations.py b/third_party/python/gyp/test/configurations/basics/gyptest-configurations.py
new file mode 100755
index 0000000000..27cd2e87d2
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/basics/gyptest-configurations.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies build of an executable in three different configurations.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('configurations.gyp')
+
+test.set_configuration('Release')
+test.build('configurations.gyp')
+test.run_built_executable('configurations', stdout="Release configuration\n")
+
+test.set_configuration('Debug')
+test.build('configurations.gyp')
+test.run_built_executable('configurations', stdout="Debug configuration\n")
+
+test.set_configuration('Foo')
+test.build('configurations.gyp')
+test.run_built_executable('configurations', stdout="Foo configuration\n")
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/configurations/inheritance/configurations.c b/third_party/python/gyp/test/configurations/inheritance/configurations.c
new file mode 100644
index 0000000000..ebb9f8450e
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/inheritance/configurations.c
@@ -0,0 +1,21 @@
+#include <stdio.h>
+
+int main(void)
+{
+#ifdef BASE
+ printf("Base configuration\n");
+#endif
+#ifdef COMMON
+ printf("Common configuration\n");
+#endif
+#ifdef COMMON2
+ printf("Common2 configuration\n");
+#endif
+#ifdef DEBUG
+ printf("Debug configuration\n");
+#endif
+#ifdef RELEASE
+ printf("Release configuration\n");
+#endif
+ return 0;
+}
diff --git a/third_party/python/gyp/test/configurations/inheritance/configurations.gyp b/third_party/python/gyp/test/configurations/inheritance/configurations.gyp
new file mode 100644
index 0000000000..9441376b4d
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/inheritance/configurations.gyp
@@ -0,0 +1,40 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ 'configurations': {
+ 'Base': {
+ 'abstract': 1,
+ 'defines': ['BASE'],
+ },
+ 'Common': {
+ 'abstract': 1,
+ 'inherit_from': ['Base'],
+ 'defines': ['COMMON'],
+ },
+ 'Common2': {
+ 'abstract': 1,
+ 'defines': ['COMMON2'],
+ },
+ 'Debug': {
+ 'inherit_from': ['Common', 'Common2'],
+ 'defines': ['DEBUG'],
+ },
+ 'Release': {
+ 'inherit_from': ['Common', 'Common2'],
+ 'defines': ['RELEASE'],
+ },
+ },
+ },
+ 'targets': [
+ {
+ 'target_name': 'configurations',
+ 'type': 'executable',
+ 'sources': [
+ 'configurations.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/configurations/inheritance/duplicates.gyp b/third_party/python/gyp/test/configurations/inheritance/duplicates.gyp
new file mode 100644
index 0000000000..6930ce3b39
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/inheritance/duplicates.gyp
@@ -0,0 +1,27 @@
+# Copyright 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ 'default_configuration': 'A',
+ 'configurations': {
+ 'A': {
+ 'defines': ['SOMETHING'],
+ },
+ 'B': {
+ 'inherit_from': ['A'],
+ },
+ },
+ 'cflags': ['-g'],
+ },
+ 'targets': [
+ {
+ 'target_name': 'configurations',
+ 'type': 'executable',
+ 'sources': [
+ 'configurations.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/configurations/inheritance/duplicates.gypd.golden b/third_party/python/gyp/test/configurations/inheritance/duplicates.gypd.golden
new file mode 100644
index 0000000000..719b70861e
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/inheritance/duplicates.gypd.golden
@@ -0,0 +1,12 @@
+{'_DEPTH': '.',
+ 'included_files': ['duplicates.gyp'],
+ 'targets': [{'configurations': {'A': {'cflags': ['-g'],
+ 'defines': ['SOMETHING']},
+ 'B': {'cflags': ['-g'],
+ 'defines': ['SOMETHING'],
+ 'inherit_from': ['A']}},
+ 'default_configuration': 'A',
+ 'sources': ['configurations.c'],
+ 'target_name': 'configurations',
+ 'toolset': 'target',
+ 'type': 'executable'}]}
diff --git a/third_party/python/gyp/test/configurations/inheritance/gyptest-duplicates.py b/third_party/python/gyp/test/configurations/inheritance/gyptest-duplicates.py
new file mode 100755
index 0000000000..f015638b6d
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/inheritance/gyptest-duplicates.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+# Copyright 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that configurations do not duplicate other settings.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+test = TestGyp.TestGyp(format='gypd')
+
+test.run_gyp('duplicates.gyp')
+
+# Verify the duplicates.gypd against the checked-in expected contents.
+#
+# Normally, we should canonicalize line endings in the expected
+# contents file setting the Subversion svn:eol-style to native,
+# but that would still fail if multiple systems are sharing a single
+# workspace on a network-mounted file system. Consequently, we
+# massage the Windows line endings ('\r\n') in the output to the
+# checked-in UNIX endings ('\n').
+
+contents = test.read('duplicates.gypd').replace(
+ '\r', '').replace('\\\\', '/')
+expect = test.read('duplicates.gypd.golden').replace('\r', '')
+if not test.match(contents, expect):
+ print("Unexpected contents of `duplicates.gypd'")
+ test.diff(expect, contents, 'duplicates.gypd ')
+ test.fail_test()
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/configurations/inheritance/gyptest-inheritance.py b/third_party/python/gyp/test/configurations/inheritance/gyptest-inheritance.py
new file mode 100755
index 0000000000..22c73a3754
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/inheritance/gyptest-inheritance.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies build of an executable in three different configurations.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('configurations.gyp')
+
+test.set_configuration('Release')
+test.build('configurations.gyp')
+test.run_built_executable('configurations',
+ stdout=('Base configuration\n'
+ 'Common configuration\n'
+ 'Common2 configuration\n'
+ 'Release configuration\n'))
+
+test.set_configuration('Debug')
+test.build('configurations.gyp')
+test.run_built_executable('configurations',
+ stdout=('Base configuration\n'
+ 'Common configuration\n'
+ 'Common2 configuration\n'
+ 'Debug configuration\n'))
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/configurations/invalid/actions.gyp b/third_party/python/gyp/test/configurations/invalid/actions.gyp
new file mode 100644
index 0000000000..a6e42089eb
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/invalid/actions.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'configurations',
+ 'type': 'none',
+ 'configurations': {
+ 'Debug': {
+ 'actions': [
+ ],
+ },
+ }
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/configurations/invalid/all_dependent_settings.gyp b/third_party/python/gyp/test/configurations/invalid/all_dependent_settings.gyp
new file mode 100644
index 0000000000..b16a245df5
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/invalid/all_dependent_settings.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'configurations',
+ 'type': 'none',
+ 'configurations': {
+ 'Debug': {
+ 'all_dependent_settings': [
+ ],
+ },
+ }
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/configurations/invalid/configurations.gyp b/third_party/python/gyp/test/configurations/invalid/configurations.gyp
new file mode 100644
index 0000000000..2cfc960049
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/invalid/configurations.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'configurations',
+ 'type': 'none',
+ 'configurations': {
+ 'Debug': {
+ 'configurations': [
+ ],
+ },
+ }
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/configurations/invalid/dependencies.gyp b/third_party/python/gyp/test/configurations/invalid/dependencies.gyp
new file mode 100644
index 0000000000..74633f3f11
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/invalid/dependencies.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'configurations',
+ 'type': 'none',
+ 'configurations': {
+ 'Debug': {
+ 'dependencies': [
+ ],
+ },
+ }
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/configurations/invalid/direct_dependent_settings.gyp b/third_party/python/gyp/test/configurations/invalid/direct_dependent_settings.gyp
new file mode 100644
index 0000000000..8a0f2e95ea
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/invalid/direct_dependent_settings.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'configurations',
+ 'type': 'none',
+ 'configurations': {
+ 'Debug': {
+ 'direct_dependent_settings': [
+ ],
+ },
+ }
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/configurations/invalid/gyptest-configurations.py b/third_party/python/gyp/test/configurations/invalid/gyptest-configurations.py
new file mode 100755
index 0000000000..bd844b95dd
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/invalid/gyptest-configurations.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies build of an executable in three different configurations.
+"""
+
+import TestGyp
+
+# Keys that do not belong inside a configuration dictionary.
+invalid_configuration_keys = [
+ 'actions',
+ 'all_dependent_settings',
+ 'configurations',
+ 'dependencies',
+ 'direct_dependent_settings',
+ 'libraries',
+ 'link_settings',
+ 'sources',
+ 'standalone_static_library',
+ 'target_name',
+ 'type',
+]
+
+test = TestGyp.TestGyp()
+
+for test_key in invalid_configuration_keys:
+ test.run_gyp('%s.gyp' % test_key, status=1, stderr=None)
+ expect = ['%s not allowed in the Debug configuration, found in target '
+ '%s.gyp:configurations#target' % (test_key, test_key)]
+ test.must_contain_all_lines(test.stderr(), expect)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/configurations/invalid/libraries.gyp b/third_party/python/gyp/test/configurations/invalid/libraries.gyp
new file mode 100644
index 0000000000..c4014ed406
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/invalid/libraries.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'configurations',
+ 'type': 'none',
+ 'configurations': {
+ 'Debug': {
+ 'libraries': [
+ ],
+ },
+ }
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/configurations/invalid/link_settings.gyp b/third_party/python/gyp/test/configurations/invalid/link_settings.gyp
new file mode 100644
index 0000000000..2f0e1c46f5
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/invalid/link_settings.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'configurations',
+ 'type': 'none',
+ 'configurations': {
+ 'Debug': {
+ 'link_settings': [
+ ],
+ },
+ }
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/configurations/invalid/sources.gyp b/third_party/python/gyp/test/configurations/invalid/sources.gyp
new file mode 100644
index 0000000000..b38cca0381
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/invalid/sources.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'configurations',
+ 'type': 'none',
+ 'configurations': {
+ 'Debug': {
+ 'sources': [
+ ],
+ },
+ }
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/configurations/invalid/standalone_static_library.gyp b/third_party/python/gyp/test/configurations/invalid/standalone_static_library.gyp
new file mode 100644
index 0000000000..2edb9febd6
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/invalid/standalone_static_library.gyp
@@ -0,0 +1,17 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'configurations',
+ 'type': 'none',
+ 'configurations': {
+ 'Debug': {
+ 'standalone_static_library': 1,
+ },
+ }
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/configurations/invalid/target_name.gyp b/third_party/python/gyp/test/configurations/invalid/target_name.gyp
new file mode 100644
index 0000000000..83baad95d6
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/invalid/target_name.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'configurations',
+ 'type': 'none',
+ 'configurations': {
+ 'Debug': {
+ 'target_name': [
+ ],
+ },
+ }
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/configurations/invalid/type.gyp b/third_party/python/gyp/test/configurations/invalid/type.gyp
new file mode 100644
index 0000000000..bc55898b89
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/invalid/type.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'configurations',
+ 'type': 'none',
+ 'configurations': {
+ 'Debug': {
+ 'type': [
+ ],
+ },
+ }
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/configurations/target_platform/configurations.gyp b/third_party/python/gyp/test/configurations/target_platform/configurations.gyp
new file mode 100644
index 0000000000..d15429f4e5
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/target_platform/configurations.gyp
@@ -0,0 +1,58 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ 'configurations': {
+ 'Debug_Win32': {
+ 'msvs_configuration_platform': 'Win32',
+ },
+ 'Debug_x64': {
+ 'msvs_configuration_platform': 'x64',
+ },
+ },
+ },
+ 'targets': [
+ {
+ 'target_name': 'left',
+ 'type': 'static_library',
+ 'sources': [
+ 'left.c',
+ ],
+ 'configurations': {
+ 'Debug_Win32': {
+ 'msvs_target_platform': 'x64',
+ },
+ },
+ },
+ {
+ 'target_name': 'right',
+ 'type': 'static_library',
+ 'sources': [
+ 'right.c',
+ ],
+ },
+ {
+ 'target_name': 'front_left',
+ 'type': 'executable',
+ 'dependencies': ['left'],
+ 'sources': [
+ 'front.c',
+ ],
+ 'configurations': {
+ 'Debug_Win32': {
+ 'msvs_target_platform': 'x64',
+ },
+ },
+ },
+ {
+ 'target_name': 'front_right',
+ 'type': 'executable',
+ 'dependencies': ['right'],
+ 'sources': [
+ 'front.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/configurations/target_platform/front.c b/third_party/python/gyp/test/configurations/target_platform/front.c
new file mode 100644
index 0000000000..7a91689ff5
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/target_platform/front.c
@@ -0,0 +1,8 @@
+#include <stdio.h>
+
+const char *message(void);
+
+int main(void) {
+ printf("%s\n", message());
+ return 0;
+}
diff --git a/third_party/python/gyp/test/configurations/target_platform/gyptest-target_platform.py b/third_party/python/gyp/test/configurations/target_platform/gyptest-target_platform.py
new file mode 100755
index 0000000000..1645d6ec08
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/target_platform/gyptest-target_platform.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Tests the msvs specific msvs_target_platform option.
+"""
+
+import TestGyp
+import TestCommon
+
+
+def RunX64(exe, stdout):
+ try:
+ test.run_built_executable(exe, stdout=stdout)
+ except WindowsError as e:
+ # Assume the exe is 64-bit if it can't load on 32-bit systems.
+ # Both versions of the error are required because different versions
+ # of python seem to return different errors for invalid exe type.
+ if e.errno != 193 and '[Error 193]' not in str(e):
+ raise
+
+
+test = TestGyp.TestGyp(formats=['msvs'])
+
+test.run_gyp('configurations.gyp')
+
+test.set_configuration('Debug|x64')
+test.build('configurations.gyp', rebuild=True)
+RunX64('front_left', stdout=('left\n'))
+RunX64('front_right', stdout=('right\n'))
+
+test.set_configuration('Debug|Win32')
+test.build('configurations.gyp', rebuild=True)
+RunX64('front_left', stdout=('left\n'))
+test.run_built_executable('front_right', stdout=('right\n'))
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/configurations/target_platform/left.c b/third_party/python/gyp/test/configurations/target_platform/left.c
new file mode 100644
index 0000000000..1ce2ea1227
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/target_platform/left.c
@@ -0,0 +1,3 @@
+const char *message(void) {
+ return "left";
+}
diff --git a/third_party/python/gyp/test/configurations/target_platform/right.c b/third_party/python/gyp/test/configurations/target_platform/right.c
new file mode 100644
index 0000000000..b1578492fe
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/target_platform/right.c
@@ -0,0 +1,3 @@
+const char *message(void) {
+ return "right";
+}
diff --git a/third_party/python/gyp/test/configurations/x64/configurations.c b/third_party/python/gyp/test/configurations/x64/configurations.c
new file mode 100644
index 0000000000..37018438fc
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/x64/configurations.c
@@ -0,0 +1,12 @@
+#include <stdio.h>
+
+int main(void) {
+ if (sizeof(void*) == 4) {
+ printf("Running Win32\n");
+ } else if (sizeof(void*) == 8) {
+ printf("Running x64\n");
+ } else {
+ printf("Unexpected platform\n");
+ }
+ return 0;
+}
diff --git a/third_party/python/gyp/test/configurations/x64/configurations.gyp b/third_party/python/gyp/test/configurations/x64/configurations.gyp
new file mode 100644
index 0000000000..8b0139f141
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/x64/configurations.gyp
@@ -0,0 +1,38 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ 'configurations': {
+ 'Debug': {
+ 'msvs_configuration_platform': 'Win32',
+ },
+ 'Debug_x64': {
+ 'inherit_from': ['Debug'],
+ 'msvs_configuration_platform': 'x64',
+ },
+ },
+ },
+ 'targets': [
+ {
+ 'target_name': 'configurations',
+ 'type': 'executable',
+ 'sources': [
+ 'configurations.c',
+ ],
+ },
+ {
+ 'target_name': 'configurations64',
+ 'type': 'executable',
+ 'sources': [
+ 'configurations.c',
+ ],
+ 'configurations': {
+ 'Debug': {
+ 'msvs_target_platform': 'x64',
+ },
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/configurations/x64/gyptest-x86.py b/third_party/python/gyp/test/configurations/x64/gyptest-x86.py
new file mode 100755
index 0000000000..8675d8f7e7
--- /dev/null
+++ b/third_party/python/gyp/test/configurations/x64/gyptest-x86.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies build of an executable in three different configurations.
+"""
+
+import TestGyp
+
+import sys
+
+formats = ['msvs']
+if sys.platform == 'win32':
+ formats += ['ninja']
+test = TestGyp.TestGyp(formats=formats)
+
+test.run_gyp('configurations.gyp')
+test.set_configuration('Debug|Win32')
+test.build('configurations.gyp', test.ALL)
+
+for machine, suffix in [('14C machine (x86)', ''),
+ ('8664 machine (x64)', '64')]:
+ output = test.run_dumpbin(
+ '/headers', test.built_file_path('configurations%s.exe' % suffix))
+ if machine not in output:
+ test.fail_test()
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/copies/gyptest-all.py b/third_party/python/gyp/test/copies/gyptest-all.py
new file mode 100755
index 0000000000..aeccf3324c
--- /dev/null
+++ b/third_party/python/gyp/test/copies/gyptest-all.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies file copies using an explicit build target of 'all'.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('copies.gyp',
+ '-G', 'xcode_ninja_target_pattern=^(?!copies_null)',
+ chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('copies.gyp', test.ALL, chdir='relocate/src')
+
+test.must_match(['relocate', 'src', 'copies-out', 'file1'], 'file1 contents\n')
+
+test.built_file_must_match('copies-out/file2',
+ 'file2 contents\n',
+ chdir='relocate/src')
+
+test.built_file_must_match('copies-out/directory/file3',
+ 'file3 contents\n',
+ chdir='relocate/src')
+test.built_file_must_match('copies-out/directory/file4',
+ 'file4 contents\n',
+ chdir='relocate/src')
+test.built_file_must_match('copies-out/directory/subdir/file5',
+ 'file5 contents\n',
+ chdir='relocate/src')
+test.built_file_must_match('copies-out/subdir/file6',
+ 'file6 contents\n',
+ chdir='relocate/src')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/copies/gyptest-attribs.py b/third_party/python/gyp/test/copies/gyptest-attribs.py
new file mode 100644
index 0000000000..70d717a45e
--- /dev/null
+++ b/third_party/python/gyp/test/copies/gyptest-attribs.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that copying files preserves file attributes.
+"""
+
+import TestGyp
+
+import os
+import stat
+import sys
+
+
+def check_attribs(path, expected_exec_bit):
+ out_path = test.built_file_path(path, chdir='src')
+
+ in_stat = os.stat(os.path.join('src', path))
+ out_stat = os.stat(out_path)
+ if out_stat.st_mode & stat.S_IXUSR != expected_exec_bit:
+ test.fail_test()
+
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('copies-attribs.gyp', chdir='src')
+
+test.build('copies-attribs.gyp', chdir='src')
+
+if sys.platform != 'win32':
+ out_path = test.built_file_path('executable-file.sh', chdir='src')
+ test.must_contain(out_path,
+ '#!/bin/bash\n'
+ '\n'
+ 'echo echo echo echo cho ho o o\n')
+ check_attribs('executable-file.sh', expected_exec_bit=stat.S_IXUSR)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/copies/gyptest-default.py b/third_party/python/gyp/test/copies/gyptest-default.py
new file mode 100755
index 0000000000..a916869f0d
--- /dev/null
+++ b/third_party/python/gyp/test/copies/gyptest-default.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies file copies using the build tool default.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('copies.gyp',
+ '-G', 'xcode_ninja_target_pattern=^(?!copies_null)',
+ chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('copies.gyp', chdir='relocate/src')
+
+test.must_match(['relocate', 'src', 'copies-out', 'file1'], 'file1 contents\n')
+
+test.built_file_must_match('copies-out/file2',
+ 'file2 contents\n',
+ chdir='relocate/src')
+
+test.built_file_must_match('copies-out/directory/file3',
+ 'file3 contents\n',
+ chdir='relocate/src')
+test.built_file_must_match('copies-out/directory/file4',
+ 'file4 contents\n',
+ chdir='relocate/src')
+test.built_file_must_match('copies-out/directory/subdir/file5',
+ 'file5 contents\n',
+ chdir='relocate/src')
+test.built_file_must_match('copies-out/subdir/file6',
+ 'file6 contents\n',
+ chdir='relocate/src')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/copies/gyptest-samedir.py b/third_party/python/gyp/test/copies/gyptest-samedir.py
new file mode 100755
index 0000000000..923ca61557
--- /dev/null
+++ b/third_party/python/gyp/test/copies/gyptest-samedir.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies file copies where two copies sections in the same target have the
+same destination directory.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+test.run_gyp('copies-samedir.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+test.build('copies-samedir.gyp', 'copies_samedir', chdir='relocate/src')
+
+test.built_file_must_match('copies-out-samedir/file1',
+ 'file1 contents\n',
+ chdir='relocate/src')
+
+test.built_file_must_match('copies-out-samedir/file2',
+ 'file2 contents\n',
+ chdir='relocate/src')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/copies/gyptest-slash.py b/third_party/python/gyp/test/copies/gyptest-slash.py
new file mode 100755
index 0000000000..f7a2e549eb
--- /dev/null
+++ b/third_party/python/gyp/test/copies/gyptest-slash.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies file copies with a trailing slash in the destination directory.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+test.run_gyp('copies-slash.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+test.build('copies-slash.gyp', chdir='relocate/src')
+
+test.built_file_must_match('copies-out-slash/directory/file3',
+ 'file3 contents\n',
+ chdir='relocate/src')
+test.built_file_must_match('copies-out-slash/directory/file4',
+ 'file4 contents\n',
+ chdir='relocate/src')
+test.built_file_must_match('copies-out-slash/directory/subdir/file5',
+ 'file5 contents\n',
+ chdir='relocate/src')
+
+test.built_file_must_match('copies-out-slash-2/directory/file3',
+ 'file3 contents\n',
+ chdir='relocate/src')
+test.built_file_must_match('copies-out-slash-2/directory/file4',
+ 'file4 contents\n',
+ chdir='relocate/src')
+test.built_file_must_match('copies-out-slash-2/directory/subdir/file5',
+ 'file5 contents\n',
+ chdir='relocate/src')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/copies/gyptest-sourceless-shared-lib.py b/third_party/python/gyp/test/copies/gyptest-sourceless-shared-lib.py
new file mode 100644
index 0000000000..6ec2e512ad
--- /dev/null
+++ b/third_party/python/gyp/test/copies/gyptest-sourceless-shared-lib.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies copies in sourceless shared_library targets are executed.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+test.run_gyp('copies-sourceless-shared-lib.gyp', chdir='src')
+test.relocate('src', 'relocate/src')
+test.build('copies-sourceless-shared-lib.gyp', chdir='relocate/src')
+test.built_file_must_match('copies-out/file1',
+ 'file1 contents\n',
+ chdir='relocate/src')
+test.pass_test()
diff --git a/third_party/python/gyp/test/copies/gyptest-updir.py b/third_party/python/gyp/test/copies/gyptest-updir.py
new file mode 100755
index 0000000000..47a2ca2e1d
--- /dev/null
+++ b/third_party/python/gyp/test/copies/gyptest-updir.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies file copies where the destination is one level above an expansion that
+yields a make variable.
+"""
+
+from __future__ import print_function
+
+import sys
+
+import TestGyp
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+test = TestGyp.TestGyp()
+test.run_gyp('copies-updir.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+test.build('copies-updir.gyp', 'copies_up', chdir='relocate/src')
+
+test.built_file_must_match('../copies-out-updir/file1',
+ 'file1 contents\n',
+ chdir='relocate/src')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/copies/src/copies-attribs.gyp b/third_party/python/gyp/test/copies/src/copies-attribs.gyp
new file mode 100644
index 0000000000..073e0d0cf6
--- /dev/null
+++ b/third_party/python/gyp/test/copies/src/copies-attribs.gyp
@@ -0,0 +1,20 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'copies1',
+ 'type': 'none',
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)',
+ 'files': [
+ 'executable-file.sh',
+ ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/copies/src/copies-samedir.gyp b/third_party/python/gyp/test/copies/src/copies-samedir.gyp
new file mode 100644
index 0000000000..2919ce503e
--- /dev/null
+++ b/third_party/python/gyp/test/copies/src/copies-samedir.gyp
@@ -0,0 +1,37 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'copies_samedir',
+ 'type': 'none',
+ 'dependencies': [
+ 'copies_samedir_dependency',
+ ],
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/copies-out-samedir',
+ 'files': [
+ 'file1',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'copies_samedir_dependency',
+ 'type': 'none',
+ 'direct_dependent_settings': {
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/copies-out-samedir',
+ 'files': [
+ 'file2',
+ ],
+ },
+ ],
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/copies/src/copies-slash.gyp b/third_party/python/gyp/test/copies/src/copies-slash.gyp
new file mode 100644
index 0000000000..9bf54bd181
--- /dev/null
+++ b/third_party/python/gyp/test/copies/src/copies-slash.gyp
@@ -0,0 +1,36 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ # A trailing slash on the destination directory should be ignored.
+ {
+ 'target_name': 'copies_recursive_trailing_slash',
+ 'type': 'none',
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/copies-out-slash/',
+ 'files': [
+ 'directory/',
+ ],
+ },
+ ],
+ },
+ # Even if the source directory is below <(PRODUCT_DIR).
+ {
+ 'target_name': 'copies_recursive_trailing_slash_in_product_dir',
+ 'type': 'none',
+ 'dependencies': [ ':copies_recursive_trailing_slash' ],
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/copies-out-slash-2/',
+ 'files': [
+ '<(PRODUCT_DIR)/copies-out-slash/directory/',
+ ],
+ },
+ ],
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/copies/src/copies-sourceless-shared-lib.gyp b/third_party/python/gyp/test/copies/src/copies-sourceless-shared-lib.gyp
new file mode 100644
index 0000000000..7908f716a9
--- /dev/null
+++ b/third_party/python/gyp/test/copies/src/copies-sourceless-shared-lib.gyp
@@ -0,0 +1,27 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'mylib',
+ 'type': 'static_library',
+ 'sources': [ 'foo.c' ],
+ },
+ {
+ 'target_name': 'mysolib',
+ 'type': 'shared_library',
+ 'dependencies': [ 'mylib' ],
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/copies-out',
+ 'files': [ 'file1' ],
+ },
+ ],
+ # link.exe gets confused by sourceless shared libraries and needs this
+ # to become unconfused.
+ 'msvs_settings': { 'VCLinkerTool': { 'TargetMachine': '1', }, },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/copies/src/copies-updir.gyp b/third_party/python/gyp/test/copies/src/copies-updir.gyp
new file mode 100644
index 0000000000..bd3bfdd1d2
--- /dev/null
+++ b/third_party/python/gyp/test/copies/src/copies-updir.gyp
@@ -0,0 +1,21 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'copies_up',
+ 'type': 'none',
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/../copies-out-updir',
+ 'files': [
+ 'file1',
+ ],
+ },
+ ],
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/copies/src/copies.gyp b/third_party/python/gyp/test/copies/src/copies.gyp
new file mode 100644
index 0000000000..ce2e0cabca
--- /dev/null
+++ b/third_party/python/gyp/test/copies/src/copies.gyp
@@ -0,0 +1,70 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'copies1',
+ 'type': 'none',
+ 'copies': [
+ {
+ 'destination': 'copies-out',
+ 'files': [
+ 'file1',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'copies2',
+ 'type': 'none',
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/copies-out',
+ 'files': [
+ 'file2',
+ ],
+ },
+ ],
+ },
+ # Copy a directory tree.
+ {
+ 'target_name': 'copies_recursive',
+ 'type': 'none',
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/copies-out',
+ 'files': [
+ 'directory/',
+ ],
+ },
+ ],
+ },
+ # Copy a directory from deeper in the tree (this should not reproduce the
+ # entire directory path in the destination, only the final directory).
+ {
+ 'target_name': 'copies_recursive_depth',
+ 'type': 'none',
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/copies-out',
+ 'files': [
+ 'parentdir/subdir/',
+ ],
+ },
+ ],
+ },
+ # Verify that a null 'files' list doesn't gag the generators.
+ {
+ 'target_name': 'copies_null',
+ 'type': 'none',
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/copies-null',
+ 'files': [],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/copies/src/directory/file3 b/third_party/python/gyp/test/copies/src/directory/file3
new file mode 100644
index 0000000000..43f16f3522
--- /dev/null
+++ b/third_party/python/gyp/test/copies/src/directory/file3
@@ -0,0 +1 @@
+file3 contents
diff --git a/third_party/python/gyp/test/copies/src/directory/file4 b/third_party/python/gyp/test/copies/src/directory/file4
new file mode 100644
index 0000000000..5f7270a084
--- /dev/null
+++ b/third_party/python/gyp/test/copies/src/directory/file4
@@ -0,0 +1 @@
+file4 contents
diff --git a/third_party/python/gyp/test/copies/src/directory/subdir/file5 b/third_party/python/gyp/test/copies/src/directory/subdir/file5
new file mode 100644
index 0000000000..41f47186bd
--- /dev/null
+++ b/third_party/python/gyp/test/copies/src/directory/subdir/file5
@@ -0,0 +1 @@
+file5 contents
diff --git a/third_party/python/gyp/test/copies/src/executable-file.sh b/third_party/python/gyp/test/copies/src/executable-file.sh
new file mode 100755
index 0000000000..796953a1a2
--- /dev/null
+++ b/third_party/python/gyp/test/copies/src/executable-file.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+echo echo echo echo cho ho o o
diff --git a/third_party/python/gyp/test/copies/src/file1 b/third_party/python/gyp/test/copies/src/file1
new file mode 100644
index 0000000000..84d55c5759
--- /dev/null
+++ b/third_party/python/gyp/test/copies/src/file1
@@ -0,0 +1 @@
+file1 contents
diff --git a/third_party/python/gyp/test/copies/src/file2 b/third_party/python/gyp/test/copies/src/file2
new file mode 100644
index 0000000000..af1b8ae35d
--- /dev/null
+++ b/third_party/python/gyp/test/copies/src/file2
@@ -0,0 +1 @@
+file2 contents
diff --git a/third_party/python/gyp/test/copies/src/foo.c b/third_party/python/gyp/test/copies/src/foo.c
new file mode 100644
index 0000000000..99a4c103ba
--- /dev/null
+++ b/third_party/python/gyp/test/copies/src/foo.c
@@ -0,0 +1,13 @@
+// Copyright (c) 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+int f() { return 42; }
+
+#ifdef _MSC_VER
+// link.exe gets confused by sourceless shared libraries and needs this
+// to become unconfused.
+int __stdcall _DllMainCRTStartup(
+ unsigned hInst, unsigned reason, void* reserved) {
+ return 1;
+}
+#endif
diff --git a/third_party/python/gyp/test/copies/src/parentdir/subdir/file6 b/third_party/python/gyp/test/copies/src/parentdir/subdir/file6
new file mode 100644
index 0000000000..f5d5757348
--- /dev/null
+++ b/third_party/python/gyp/test/copies/src/parentdir/subdir/file6
@@ -0,0 +1 @@
+file6 contents
diff --git a/third_party/python/gyp/test/custom-generator/gyptest-custom-generator.py b/third_party/python/gyp/test/custom-generator/gyptest-custom-generator.py
new file mode 100755
index 0000000000..85fd0724a1
--- /dev/null
+++ b/third_party/python/gyp/test/custom-generator/gyptest-custom-generator.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Test that custom generators can be passed to --format
+"""
+
+import TestGyp
+
+test = TestGyp.TestGypCustom(format='mygenerator.py')
+test.run_gyp('test.gyp')
+
+# mygenerator.py should generate a file called MyBuildFile containing
+# "Testing..." alongside the gyp file.
+test.must_match('MyBuildFile', 'Testing...\n')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/custom-generator/mygenerator.py b/third_party/python/gyp/test/custom-generator/mygenerator.py
new file mode 100644
index 0000000000..5fcac3d779
--- /dev/null
+++ b/third_party/python/gyp/test/custom-generator/mygenerator.py
@@ -0,0 +1,14 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Custom gyp generator that doesn't do much."""
+
+import gyp.common
+
+generator_default_variables = {}
+
+def GenerateOutput(target_list, target_dicts, data, params):
+ f = open("MyBuildFile", "w")
+ f.write("Testing...\n")
+ f.close()
diff --git a/third_party/python/gyp/test/custom-generator/test.gyp b/third_party/python/gyp/test/custom-generator/test.gyp
new file mode 100644
index 0000000000..aa5f864a3b
--- /dev/null
+++ b/third_party/python/gyp/test/custom-generator/test.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'exe',
+ 'type': 'executable',
+ 'sources': [
+ 'main.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/cxxflags/cxxflags.cc b/third_party/python/gyp/test/cxxflags/cxxflags.cc
new file mode 100644
index 0000000000..e70e39dfd3
--- /dev/null
+++ b/third_party/python/gyp/test/cxxflags/cxxflags.cc
@@ -0,0 +1,15 @@
+/* Copyright (c) 2010 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#include <stdio.h>
+
+int main(void)
+{
+#ifdef ABC
+ printf("With define\n");
+#else
+ printf("No define\n");
+#endif
+ return 0;
+}
diff --git a/third_party/python/gyp/test/cxxflags/cxxflags.gyp b/third_party/python/gyp/test/cxxflags/cxxflags.gyp
new file mode 100644
index 0000000000..a082d49492
--- /dev/null
+++ b/third_party/python/gyp/test/cxxflags/cxxflags.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'cxxflags',
+ 'type': 'executable',
+ 'sources': [
+ 'cxxflags.cc',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/cxxflags/gyptest-cxxflags.py b/third_party/python/gyp/test/cxxflags/gyptest-cxxflags.py
new file mode 100755
index 0000000000..117a1800de
--- /dev/null
+++ b/third_party/python/gyp/test/cxxflags/gyptest-cxxflags.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies the use of the environment during regeneration when the gyp file
+changes, specifically via build of an executable with C++ flags specified by
+CXXFLAGS.
+
+In this test, gyp happens within a local environment, but build outside of it.
+"""
+
+import TestGyp
+
+FORMATS = ('ninja',)
+
+test = TestGyp.TestGyp(formats=FORMATS)
+
+# We reset the environ after calling gyp. When the auto-regeneration happens,
+# the same define should be reused anyway.
+with TestGyp.LocalEnv({'CXXFLAGS': ''}):
+ test.run_gyp('cxxflags.gyp')
+
+test.build('cxxflags.gyp')
+
+expect = """\
+No define
+"""
+test.run_built_executable('cxxflags', stdout=expect)
+
+test.sleep()
+
+with TestGyp.LocalEnv({'CXXFLAGS': '-DABC'}):
+ test.run_gyp('cxxflags.gyp')
+
+test.build('cxxflags.gyp')
+
+expect = """\
+With define
+"""
+test.run_built_executable('cxxflags', stdout=expect)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/defines-escaping/defines-escaping.c b/third_party/python/gyp/test/defines-escaping/defines-escaping.c
new file mode 100644
index 0000000000..a0aa4c286d
--- /dev/null
+++ b/third_party/python/gyp/test/defines-escaping/defines-escaping.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2010 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#include <stdio.h>
+
+int main(void)
+{
+ printf(TEST_FORMAT, TEST_ARGS);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/defines-escaping/defines-escaping.gyp b/third_party/python/gyp/test/defines-escaping/defines-escaping.gyp
new file mode 100644
index 0000000000..6f0f3fde41
--- /dev/null
+++ b/third_party/python/gyp/test/defines-escaping/defines-escaping.gyp
@@ -0,0 +1,19 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'defines_escaping',
+ 'type': 'executable',
+ 'sources': [
+ 'defines-escaping.c',
+ ],
+ 'defines': [
+ 'TEST_FORMAT="<(test_format)"',
+ 'TEST_ARGS=<(test_args)',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/defines-escaping/gyptest-defines-escaping.py b/third_party/python/gyp/test/defines-escaping/gyptest-defines-escaping.py
new file mode 100755
index 0000000000..eb18a3d369
--- /dev/null
+++ b/third_party/python/gyp/test/defines-escaping/gyptest-defines-escaping.py
@@ -0,0 +1,184 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies build of an executable with C++ define specified by a gyp define using
+various special characters such as quotes, commas, etc.
+"""
+
+import os
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+# Tests string literals, percents, and backslash escapes.
+try:
+ os.environ['GYP_DEFINES'] = (
+ r"""test_format='\n%s\n' """
+ r"""test_args='"Simple test of %s with a literal"'""")
+ test.run_gyp('defines-escaping.gyp')
+finally:
+ del os.environ['GYP_DEFINES']
+
+test.build('defines-escaping.gyp')
+
+expect = """
+Simple test of %s with a literal
+"""
+test.run_built_executable('defines_escaping', stdout=expect)
+
+
+# Test multiple comma-and-space-separated string literals.
+try:
+ os.environ['GYP_DEFINES'] = \
+ r"""test_format='\n%s and %s\n' test_args='"foo", "bar"'"""
+ test.run_gyp('defines-escaping.gyp')
+finally:
+ del os.environ['GYP_DEFINES']
+
+test.sleep()
+test.touch('defines-escaping.c')
+test.build('defines-escaping.gyp')
+
+expect = """
+foo and bar
+"""
+test.run_built_executable('defines_escaping', stdout=expect)
+
+
+# Test string literals containing quotes.
+try:
+ os.environ['GYP_DEFINES'] = (
+ r"""test_format='\n%s %s %s %s %s\n' """
+ r"""test_args='"\"These,\"","""
+ r""" "\"words,\"","""
+ r""" "\"are,\"","""
+ r""" "\"in,\"","""
+ r""" "\"quotes.\""'""")
+ test.run_gyp('defines-escaping.gyp')
+finally:
+ del os.environ['GYP_DEFINES']
+
+test.sleep()
+test.touch('defines-escaping.c')
+test.build('defines-escaping.gyp')
+
+expect = """
+"These," "words," "are," "in," "quotes."
+"""
+test.run_built_executable('defines_escaping', stdout=expect)
+
+
+# Test string literals containing single quotes.
+try:
+ os.environ['GYP_DEFINES'] = (
+ r"""test_format='\n%s %s %s %s %s\n' """
+ r"""test_args="\"'These,'\","""
+ r""" \"'words,'\","""
+ r""" \"'are,'\","""
+ r""" \"'in,'\","""
+ r""" \"'quotes.'\"" """)
+ test.run_gyp('defines-escaping.gyp')
+finally:
+ del os.environ['GYP_DEFINES']
+
+test.sleep()
+test.touch('defines-escaping.c')
+test.build('defines-escaping.gyp')
+
+expect = """
+'These,' 'words,' 'are,' 'in,' 'quotes.'
+"""
+test.run_built_executable('defines_escaping', stdout=expect)
+
+
+# Test string literals containing different numbers of backslashes before quotes
+# (to exercise Windows' quoting behaviour).
+try:
+ os.environ['GYP_DEFINES'] = (
+ r"""test_format='\n%s\n%s\n%s\n' """
+ r"""test_args='"\\\"1 visible slash\\\"","""
+ r""" "\\\\\"2 visible slashes\\\\\"","""
+ r""" "\\\\\\\"3 visible slashes\\\\\\\""'""")
+ test.run_gyp('defines-escaping.gyp')
+finally:
+ del os.environ['GYP_DEFINES']
+
+test.sleep()
+test.touch('defines-escaping.c')
+test.build('defines-escaping.gyp')
+
+expect = r"""
+\"1 visible slash\"
+\\"2 visible slashes\\"
+\\\"3 visible slashes\\\"
+"""
+test.run_built_executable('defines_escaping', stdout=expect)
+
+
+# Test that various scary sequences are passed unfettered.
+try:
+ os.environ['GYP_DEFINES'] = (
+ r"""test_format='\n%s\n' """
+ r"""test_args='"$foo, &quot; `foo`;"'""")
+ test.run_gyp('defines-escaping.gyp')
+finally:
+ del os.environ['GYP_DEFINES']
+
+test.sleep()
+test.touch('defines-escaping.c')
+test.build('defines-escaping.gyp')
+
+expect = """
+$foo, &quot; `foo`;
+"""
+test.run_built_executable('defines_escaping', stdout=expect)
+
+
+# VisualStudio 2010 can't handle passing %PATH%
+if not (test.format == 'msvs' and test.uses_msbuild):
+ try:
+ os.environ['GYP_DEFINES'] = (
+ """test_format='%s' """
+ """test_args='"%PATH%"'""")
+ test.run_gyp('defines-escaping.gyp')
+ finally:
+ del os.environ['GYP_DEFINES']
+
+ test.sleep()
+ test.touch('defines-escaping.c')
+ test.build('defines-escaping.gyp')
+
+ expect = "%PATH%"
+ test.run_built_executable('defines_escaping', stdout=expect)
+
+
+# Test commas and semi-colons preceded by backslashes (to exercise Windows'
+# quoting behaviour).
+try:
+ os.environ['GYP_DEFINES'] = (
+ r"""test_format='\n%s\n%s\n' """
+ r"""test_args='"\\, \\\\;","""
+ # Same thing again, but enclosed in visible quotes.
+ r""" "\"\\, \\\\;\""'""")
+ test.run_gyp('defines-escaping.gyp')
+finally:
+ del os.environ['GYP_DEFINES']
+
+test.sleep()
+test.touch('defines-escaping.c')
+test.build('defines-escaping.gyp')
+
+expect = r"""
+\, \\;
+"\, \\;"
+"""
+test.run_built_executable('defines_escaping', stdout=expect)
+
+# We deliberately do not test having an odd number of quotes in a string
+# literal because that isn't feasible in MSVS.
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/defines/defines-env.gyp b/third_party/python/gyp/test/defines/defines-env.gyp
new file mode 100644
index 0000000000..1781546ae0
--- /dev/null
+++ b/third_party/python/gyp/test/defines/defines-env.gyp
@@ -0,0 +1,22 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'value%': '5',
+ },
+ 'targets': [
+ {
+ 'target_name': 'defines',
+ 'type': 'executable',
+ 'sources': [
+ 'defines.c',
+ ],
+ 'defines': [
+ 'VALUE=<(value)',
+ ],
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/defines/defines.c b/third_party/python/gyp/test/defines/defines.c
new file mode 100644
index 0000000000..dda139275d
--- /dev/null
+++ b/third_party/python/gyp/test/defines/defines.c
@@ -0,0 +1,23 @@
+/* Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#include <stdio.h>
+
+int main(void)
+{
+#ifdef FOO
+ printf("FOO is defined\n");
+#endif
+ printf("VALUE is %d\n", VALUE);
+
+#ifdef PAREN_VALUE
+ printf("2*PAREN_VALUE is %d\n", 2*PAREN_VALUE);
+#endif
+
+#ifdef HASH_VALUE
+ printf("HASH_VALUE is %s\n", HASH_VALUE);
+#endif
+
+ return 0;
+}
diff --git a/third_party/python/gyp/test/defines/defines.gyp b/third_party/python/gyp/test/defines/defines.gyp
new file mode 100644
index 0000000000..90a755eb84
--- /dev/null
+++ b/third_party/python/gyp/test/defines/defines.gyp
@@ -0,0 +1,38 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'defines',
+ 'type': 'executable',
+ 'sources': [
+ 'defines.c',
+ ],
+ 'defines': [
+ 'FOO',
+ 'VALUE=1',
+ 'PAREN_VALUE=(1+2+3)',
+ 'HASH_VALUE="a#1"',
+ ],
+ },
+ ],
+ 'conditions': [
+ ['OS=="fakeos"', {
+ 'targets': [
+ {
+ 'target_name': 'fakeosprogram',
+ 'type': 'executable',
+ 'sources': [
+ 'defines.c',
+ ],
+ 'defines': [
+ 'FOO',
+ 'VALUE=1',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/third_party/python/gyp/test/defines/gyptest-define-override.py b/third_party/python/gyp/test/defines/gyptest-define-override.py
new file mode 100755
index 0000000000..9730455b67
--- /dev/null
+++ b/third_party/python/gyp/test/defines/gyptest-define-override.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that a default gyp define can be overridden.
+"""
+
+import os
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+# CMake loudly warns about passing '#' to the compiler and drops the define.
+expect_stderr = ''
+if test.format == 'cmake':
+ expect_stderr = (
+"""WARNING: Preprocessor definitions containing '#' may not be passed on the"""
+""" compiler command line because many compilers do not support it.\n"""
+"""CMake is dropping a preprocessor definition: HASH_VALUE="a#1"\n"""
+"""Consider defining the macro in a (configured) header file.\n\n""")
+
+# Command-line define
+test.run_gyp('defines.gyp', '-D', 'OS=fakeos')
+test.build('defines.gyp', stderr=expect_stderr)
+test.built_file_must_exist('fakeosprogram', type=test.EXECUTABLE)
+# Clean up the exe so subsequent tests don't find an old exe.
+os.remove(test.built_file_path('fakeosprogram', type=test.EXECUTABLE))
+
+# Without "OS" override, fokeosprogram shouldn't be built.
+test.run_gyp('defines.gyp')
+test.build('defines.gyp', stderr=expect_stderr)
+test.built_file_must_not_exist('fakeosprogram', type=test.EXECUTABLE)
+
+# Environment define
+os.environ['GYP_DEFINES'] = 'OS=fakeos'
+test.run_gyp('defines.gyp')
+test.build('defines.gyp', stderr=expect_stderr)
+test.built_file_must_exist('fakeosprogram', type=test.EXECUTABLE)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/defines/gyptest-defines-env-regyp.py b/third_party/python/gyp/test/defines/gyptest-defines-env-regyp.py
new file mode 100755
index 0000000000..f2d931c2f7
--- /dev/null
+++ b/third_party/python/gyp/test/defines/gyptest-defines-env-regyp.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies build of an executable with C++ define specified by a gyp define, and
+the use of the environment during regeneration when the gyp file changes.
+"""
+
+import os
+import TestGyp
+
+# Regenerating build files when a gyp file changes is currently only supported
+# by the make generator.
+test = TestGyp.TestGyp(formats=['make'])
+
+try:
+ os.environ['GYP_DEFINES'] = 'value=50'
+ test.run_gyp('defines.gyp')
+finally:
+ # We clear the environ after calling gyp. When the auto-regeneration happens,
+ # the same define should be reused anyway. Reset to empty string first in
+ # case the platform doesn't support unsetenv.
+ os.environ['GYP_DEFINES'] = ''
+ del os.environ['GYP_DEFINES']
+
+test.build('defines.gyp')
+
+expect = """\
+FOO is defined
+VALUE is 1
+2*PAREN_VALUE is 12
+HASH_VALUE is a#1
+"""
+test.run_built_executable('defines', stdout=expect)
+
+# Sleep so that the changed gyp file will have a newer timestamp than the
+# previously generated build files.
+test.sleep()
+test.write('defines.gyp', test.read('defines-env.gyp'))
+
+test.build('defines.gyp', test.ALL)
+
+expect = """\
+VALUE is 50
+"""
+test.run_built_executable('defines', stdout=expect)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/defines/gyptest-defines-env.py b/third_party/python/gyp/test/defines/gyptest-defines-env.py
new file mode 100755
index 0000000000..6b4e7175a6
--- /dev/null
+++ b/third_party/python/gyp/test/defines/gyptest-defines-env.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies build of an executable with C++ define specified by a gyp define.
+"""
+
+import os
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+# With the value only given in environment, it should be used.
+try:
+ os.environ['GYP_DEFINES'] = 'value=10'
+ test.run_gyp('defines-env.gyp')
+finally:
+ del os.environ['GYP_DEFINES']
+
+test.build('defines-env.gyp')
+
+expect = """\
+VALUE is 10
+"""
+test.run_built_executable('defines', stdout=expect)
+
+
+# With the value given in both command line and environment,
+# command line should take precedence.
+try:
+ os.environ['GYP_DEFINES'] = 'value=20'
+ test.run_gyp('defines-env.gyp', '-Dvalue=25')
+finally:
+ del os.environ['GYP_DEFINES']
+
+test.sleep()
+test.touch('defines.c')
+test.build('defines-env.gyp')
+
+expect = """\
+VALUE is 25
+"""
+test.run_built_executable('defines', stdout=expect)
+
+
+# With the value only given in environment, it should be ignored if
+# --ignore-environment is specified.
+try:
+ os.environ['GYP_DEFINES'] = 'value=30'
+ test.run_gyp('defines-env.gyp', '--ignore-environment')
+finally:
+ del os.environ['GYP_DEFINES']
+
+test.sleep()
+test.touch('defines.c')
+test.build('defines-env.gyp')
+
+expect = """\
+VALUE is 5
+"""
+test.run_built_executable('defines', stdout=expect)
+
+
+# With the value given in both command line and environment, and
+# --ignore-environment also specified, command line should still be used.
+try:
+ os.environ['GYP_DEFINES'] = 'value=40'
+ test.run_gyp('defines-env.gyp', '--ignore-environment', '-Dvalue=45')
+finally:
+ del os.environ['GYP_DEFINES']
+
+test.sleep()
+test.touch('defines.c')
+test.build('defines-env.gyp')
+
+expect = """\
+VALUE is 45
+"""
+test.run_built_executable('defines', stdout=expect)
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/defines/gyptest-defines.py b/third_party/python/gyp/test/defines/gyptest-defines.py
new file mode 100755
index 0000000000..77a3af53b9
--- /dev/null
+++ b/third_party/python/gyp/test/defines/gyptest-defines.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies build of an executable with C++ defines.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('defines.gyp')
+
+expect = """\
+FOO is defined
+VALUE is 1
+2*PAREN_VALUE is 12
+"""
+
+#CMake loudly warns about passing '#' to the compiler and drops the define.
+expect_stderr = ''
+if test.format == 'cmake':
+ expect_stderr = (
+"""WARNING: Preprocessor definitions containing '#' may not be passed on the"""
+""" compiler command line because many compilers do not support it.\n"""
+"""CMake is dropping a preprocessor definition: HASH_VALUE="a#1"\n"""
+"""Consider defining the macro in a (configured) header file.\n\n""")
+else:
+ expect += """HASH_VALUE is a#1
+"""
+
+test.build('defines.gyp', stderr=expect_stderr)
+
+test.run_built_executable('defines', stdout=expect)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/dependencies/a.c b/third_party/python/gyp/test/dependencies/a.c
new file mode 100755
index 0000000000..3bba111d24
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/a.c
@@ -0,0 +1,9 @@
+/* Copyright (c) 2009 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+extern int funcB();
+
+int funcA() {
+ return funcB();
+}
diff --git a/third_party/python/gyp/test/dependencies/adso/all_dependent_settings_order.gyp b/third_party/python/gyp/test/dependencies/adso/all_dependent_settings_order.gyp
new file mode 100644
index 0000000000..89817d6bbb
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/adso/all_dependent_settings_order.gyp
@@ -0,0 +1,45 @@
+{
+ 'targets': [
+ {
+ 'target_name': 'a',
+ 'type': 'none',
+ 'sources': ['a.cc'],
+ 'all_dependent_settings': {'sources': ['a.cc']},
+ },
+ {
+ 'target_name': 'b',
+ 'type': 'none',
+ 'sources': ['b.cc'],
+ 'all_dependent_settings': {'sources': ['b.cc']},
+ 'dependencies': ['a'],
+ },
+
+ {
+ 'target_name': 'c',
+ 'type': 'none',
+ 'sources': ['c.cc'],
+ 'all_dependent_settings': {'sources': ['c.cc']},
+ 'dependencies': ['b', 'a'],
+ },
+ {
+ 'target_name': 'd',
+ 'type': 'none',
+ 'sources': ['d.cc'],
+ 'dependencies': ['c', 'a', 'b'],
+ 'actions': [
+ {
+ 'action_name': 'write_sources',
+ 'inputs': ['write_args.py'],
+ 'outputs': ['<(PRODUCT_DIR)/out.txt'],
+ 'action': [
+ 'python',
+ 'write_args.py',
+ '<(PRODUCT_DIR)/out.txt',
+ '>@(_sources)'
+ ],
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/dependencies/adso/write_args.py b/third_party/python/gyp/test/dependencies/adso/write_args.py
new file mode 100755
index 0000000000..5e388b8f70
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/adso/write_args.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# Copyright 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+f = open(sys.argv[1], 'w')
+f.write(' '.join(sys.argv[2:]))
+f.close()
diff --git a/third_party/python/gyp/test/dependencies/b/b.c b/third_party/python/gyp/test/dependencies/b/b.c
new file mode 100755
index 0000000000..b5e771bcc7
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/b/b.c
@@ -0,0 +1,3 @@
+int funcB() {
+ return 2;
+}
diff --git a/third_party/python/gyp/test/dependencies/b/b.gyp b/third_party/python/gyp/test/dependencies/b/b.gyp
new file mode 100755
index 0000000000..893dc64d65
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/b/b.gyp
@@ -0,0 +1,22 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'b',
+ 'type': 'static_library',
+ 'sources': [
+ 'b.c',
+ ],
+ },
+ {
+ 'target_name': 'b3',
+ 'type': 'static_library',
+ 'sources': [
+ 'b3.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/dependencies/b/b3.c b/third_party/python/gyp/test/dependencies/b/b3.c
new file mode 100755
index 0000000000..287f67ff31
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/b/b3.c
@@ -0,0 +1,9 @@
+/*
+ * Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+int funcB() {
+ return 3;
+}
diff --git a/third_party/python/gyp/test/dependencies/c/c.c b/third_party/python/gyp/test/dependencies/c/c.c
new file mode 100644
index 0000000000..4949daf3ee
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/c/c.c
@@ -0,0 +1,4 @@
+int funcC() {
+ return 3
+ // Intentional syntax error. This file should never be compiled, so this
+ // shouldn't be a problem.
diff --git a/third_party/python/gyp/test/dependencies/c/c.gyp b/third_party/python/gyp/test/dependencies/c/c.gyp
new file mode 100644
index 0000000000..eabebea9ef
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/c/c.gyp
@@ -0,0 +1,22 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'c_unused',
+ 'type': 'static_library',
+ 'sources': [
+ 'c.c',
+ ],
+ },
+ {
+ 'target_name': 'd',
+ 'type': 'static_library',
+ 'sources': [
+ 'd.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/dependencies/c/d.c b/third_party/python/gyp/test/dependencies/c/d.c
new file mode 100644
index 0000000000..05465fc1af
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/c/d.c
@@ -0,0 +1,3 @@
+int funcD() {
+ return 4;
+}
diff --git a/third_party/python/gyp/test/dependencies/double_dependency.gyp b/third_party/python/gyp/test/dependencies/double_dependency.gyp
new file mode 100644
index 0000000000..c4a2d00139
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/double_dependency.gyp
@@ -0,0 +1,23 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'double_dependency',
+ 'type': 'shared_library',
+ 'dependencies': [
+ 'double_dependent.gyp:double_dependent',
+ ],
+ 'conditions': [
+ ['1==1', {
+ 'dependencies': [
+ 'double_dependent.gyp:*',
+ ],
+ }],
+ ],
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/dependencies/double_dependent.gyp b/third_party/python/gyp/test/dependencies/double_dependent.gyp
new file mode 100644
index 0000000000..334caff723
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/double_dependent.gyp
@@ -0,0 +1,12 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'double_dependent',
+ 'type': 'none',
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/dependencies/extra_targets.gyp b/third_party/python/gyp/test/dependencies/extra_targets.gyp
new file mode 100644
index 0000000000..c1a26de422
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/extra_targets.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'a',
+ 'type': 'static_library',
+ 'sources': [
+ 'a.c',
+ ],
+ # This only depends on the "d" target; other targets in c.gyp
+ # should not become part of the build (unlike with 'c/c.gyp:*').
+ 'dependencies': ['c/c.gyp:d'],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/dependencies/gyptest-all-dependent-settings-order.py b/third_party/python/gyp/test/dependencies/gyptest-all-dependent-settings-order.py
new file mode 100644
index 0000000000..715f322f41
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/gyptest-all-dependent-settings-order.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+
+# Copyright 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Tests that all_dependent_settings are processed in topological order.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('all_dependent_settings_order.gyp', chdir='adso')
+test.build('all_dependent_settings_order.gyp', chdir='adso')
+test.built_file_must_match('out.txt', 'd.cc a.cc b.cc c.cc',
+ chdir='adso')
+test.pass_test()
diff --git a/third_party/python/gyp/test/dependencies/gyptest-double-dependency.py b/third_party/python/gyp/test/dependencies/gyptest-double-dependency.py
new file mode 100644
index 0000000000..7692740c54
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/gyptest-double-dependency.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verify that pulling in a dependency a second time in a conditional works for
+shared_library targets. Regression test for http://crbug.com/122588
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('double_dependency.gyp')
+
+# If running gyp worked, all is well.
+test.pass_test()
diff --git a/third_party/python/gyp/test/dependencies/gyptest-extra-targets.py b/third_party/python/gyp/test/dependencies/gyptest-extra-targets.py
new file mode 100755
index 0000000000..09b00d958b
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/gyptest-extra-targets.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verify that dependencies don't pull unused targets into the build.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('extra_targets.gyp',
+ '-G', 'xcode_ninja_target_pattern=^a$')
+
+# This should fail if it tries to build 'c_unused' since 'c/c.c' has a syntax
+# error and won't compile.
+test.build('extra_targets.gyp', test.ALL)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/dependencies/gyptest-indirect-module-dependency.py b/third_party/python/gyp/test/dependencies/gyptest-indirect-module-dependency.py
new file mode 100644
index 0000000000..d001b57e7d
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/gyptest-indirect-module-dependency.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure that we cause downstream modules to get built when we depend on the
+parent targets.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+CHDIR = 'module-dep'
+test.run_gyp('indirect-module-dependency.gyp', chdir=CHDIR)
+test.build('indirect-module-dependency.gyp', 'an_exe', chdir=CHDIR)
+test.built_file_must_exist(
+ test.built_file_basename('a_module', test.LOADABLE_MODULE), chdir=CHDIR)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/dependencies/gyptest-lib-only.py b/third_party/python/gyp/test/dependencies/gyptest-lib-only.py
new file mode 100755
index 0000000000..3a99a7f4aa
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/gyptest-lib-only.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verify that a link time only dependency will get pulled into the set of built
+targets, even if no executable uses it.
+"""
+
+import TestGyp
+
+import sys
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('lib_only.gyp')
+
+test.build('lib_only.gyp', test.ALL)
+
+test.built_file_must_exist('a', type=test.STATIC_LIB)
+
+# TODO(bradnelson/mark):
+# On linux and windows a library target will at least pull its link dependencies
+# into the generated project, since not doing so confuses users.
+# This is not currently implemented on mac, which has the opposite behavior.
+if sys.platform == 'darwin':
+ if test.format == 'xcode':
+ test.built_file_must_not_exist('b', type=test.STATIC_LIB)
+ else:
+ assert test.format in ('make', 'ninja', 'xcode-ninja')
+ test.built_file_must_exist('b', type=test.STATIC_LIB)
+else:
+ # Make puts the resulting library in a directory matching the input gyp file;
+ # for the 'b' library, that is in the 'b' subdirectory.
+ test.built_file_must_exist('b', type=test.STATIC_LIB, subdir='b')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/dependencies/gyptest-none-traversal.py b/third_party/python/gyp/test/dependencies/gyptest-none-traversal.py
new file mode 100755
index 0000000000..c09063dad3
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/gyptest-none-traversal.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verify that static library dependencies don't traverse none targets, unless
+explicitly specified.
+"""
+
+import TestGyp
+
+import sys
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('none_traversal.gyp')
+
+test.build('none_traversal.gyp', test.ALL)
+
+test.run_built_executable('needs_chain', stdout="2\n")
+test.run_built_executable('doesnt_need_chain', stdout="3\n")
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/dependencies/gyptest-sharedlib-linksettings.py b/third_party/python/gyp/test/dependencies/gyptest-sharedlib-linksettings.py
new file mode 100644
index 0000000000..87428af459
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/gyptest-sharedlib-linksettings.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verify that link_settings in a shared_library are not propagated to targets
+that depend on the shared_library, but are used in the shared_library itself.
+"""
+
+import TestGyp
+import sys
+
+CHDIR='sharedlib-linksettings'
+
+test = TestGyp.TestGyp()
+test.run_gyp('test.gyp', chdir=CHDIR)
+test.build('test.gyp', test.ALL, chdir=CHDIR)
+test.run_built_executable('program', stdout="1\n2\n", chdir=CHDIR)
+test.pass_test()
diff --git a/third_party/python/gyp/test/dependencies/lib_only.gyp b/third_party/python/gyp/test/dependencies/lib_only.gyp
new file mode 100755
index 0000000000..f6c84dea64
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/lib_only.gyp
@@ -0,0 +1,16 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'a',
+ 'type': 'static_library',
+ 'sources': [
+ 'a.c',
+ ],
+ 'dependencies': ['b/b.gyp:b'],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/dependencies/main.c b/third_party/python/gyp/test/dependencies/main.c
new file mode 100644
index 0000000000..185bd482f2
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/main.c
@@ -0,0 +1,14 @@
+/*
+ * Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <stdio.h>
+
+extern int funcA();
+
+int main() {
+ printf("%d\n", funcA());
+ return 0;
+}
diff --git a/third_party/python/gyp/test/dependencies/module-dep/a.cc b/third_party/python/gyp/test/dependencies/module-dep/a.cc
new file mode 100644
index 0000000000..231fc7a9df
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/module-dep/a.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int some_function() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/dependencies/module-dep/dll.cc b/third_party/python/gyp/test/dependencies/module-dep/dll.cc
new file mode 100644
index 0000000000..e1eea0205b
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/module-dep/dll.cc
@@ -0,0 +1,9 @@
+// Copyright (c) 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if defined(_MSC_VER)
+__declspec(dllexport)
+#endif
+ void SomeFunction() {
+}
diff --git a/third_party/python/gyp/test/dependencies/module-dep/exe.cc b/third_party/python/gyp/test/dependencies/module-dep/exe.cc
new file mode 100644
index 0000000000..b3039ace96
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/module-dep/exe.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/dependencies/module-dep/indirect-module-dependency.gyp b/third_party/python/gyp/test/dependencies/module-dep/indirect-module-dependency.gyp
new file mode 100644
index 0000000000..f3fb5320fe
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/module-dep/indirect-module-dependency.gyp
@@ -0,0 +1,37 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'an_exe',
+ 'type': 'executable',
+ 'sources': ['exe.cc'],
+ 'dependencies': [
+ 'a_dll',
+ ],
+ },
+ {
+ 'target_name': 'a_dll',
+ 'type': 'shared_library',
+ 'sources': ['dll.cc'],
+ 'dependencies': [
+ 'a_lib',
+ ],
+ },
+ {
+ 'target_name': 'a_lib',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'a_module',
+ ],
+ 'sources': ['a.cc'],
+ },
+ {
+ 'target_name': 'a_module',
+ 'type': 'loadable_module',
+ 'sources': ['a.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/dependencies/none_traversal.gyp b/third_party/python/gyp/test/dependencies/none_traversal.gyp
new file mode 100755
index 0000000000..3d8ab30aff
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/none_traversal.gyp
@@ -0,0 +1,46 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'needs_chain',
+ 'type': 'executable',
+ 'sources': [
+ 'a.c',
+ 'main.c',
+ ],
+ 'dependencies': ['chain'],
+ },
+ {
+ 'target_name': 'chain',
+ 'type': 'none',
+ 'dependencies': ['b/b.gyp:b'],
+ },
+ {
+ 'target_name': 'doesnt_need_chain',
+ 'type': 'executable',
+ 'sources': [
+ 'main.c',
+ ],
+ 'dependencies': ['no_chain', 'other_chain'],
+ },
+ {
+ 'target_name': 'no_chain',
+ 'type': 'none',
+ 'sources': [
+ ],
+ 'dependencies': ['b/b.gyp:b'],
+ 'dependencies_traverse': 0,
+ },
+ {
+ 'target_name': 'other_chain',
+ 'type': 'static_library',
+ 'sources': [
+ 'a.c',
+ ],
+ 'dependencies': ['b/b.gyp:b3'],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/dependencies/sharedlib-linksettings/program.c b/third_party/python/gyp/test/dependencies/sharedlib-linksettings/program.c
new file mode 100644
index 0000000000..b7c15edcd6
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/sharedlib-linksettings/program.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2013 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <stdio.h>
+
+/*
+ * This will fail to compile if TEST_DEFINE was propagated from sharedlib to
+ * program.
+ */
+#ifdef TEST_DEFINE
+#error TEST_DEFINE is already defined!
+#endif
+
+#define TEST_DEFINE 2
+
+extern int staticLibFunc();
+
+int main() {
+ printf("%d\n", staticLibFunc());
+ printf("%d\n", TEST_DEFINE);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/dependencies/sharedlib-linksettings/sharedlib.c b/third_party/python/gyp/test/dependencies/sharedlib-linksettings/sharedlib.c
new file mode 100644
index 0000000000..3199bccd66
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/sharedlib-linksettings/sharedlib.c
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2013 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifdef _WIN32
+__declspec(dllexport)
+#endif
+int sharedLibFunc() {
+ /*
+ * This will fail to compile if TEST_DEFINE was not obtained from sharedlib's
+ * link_settings.
+ */
+ return TEST_DEFINE;
+}
diff --git a/third_party/python/gyp/test/dependencies/sharedlib-linksettings/staticlib.c b/third_party/python/gyp/test/dependencies/sharedlib-linksettings/staticlib.c
new file mode 100644
index 0000000000..e889b419fd
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/sharedlib-linksettings/staticlib.c
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/*
+ * This will fail to compile if TEST_DEFINE was propagated from sharedlib to
+ * staticlib.
+ */
+#ifdef TEST_DEFINE
+#error TEST_DEFINE is defined!
+#endif
+
+#ifdef _WIN32
+__declspec(dllimport)
+#else
+extern
+#endif
+int sharedLibFunc();
+
+int staticLibFunc() {
+ return sharedLibFunc();
+}
diff --git a/third_party/python/gyp/test/dependencies/sharedlib-linksettings/test.gyp b/third_party/python/gyp/test/dependencies/sharedlib-linksettings/test.gyp
new file mode 100644
index 0000000000..830ce3236d
--- /dev/null
+++ b/third_party/python/gyp/test/dependencies/sharedlib-linksettings/test.gyp
@@ -0,0 +1,37 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ 'allow_sharedlib_linksettings_propagation': 0,
+ },
+ 'targets': [
+ {
+ 'target_name': 'sharedlib',
+ 'type': 'shared_library',
+ 'sources': [ 'sharedlib.c' ],
+ 'link_settings': {
+ 'defines': [ 'TEST_DEFINE=1' ],
+ },
+ 'conditions': [
+ ['OS=="linux"', {
+ # Support 64-bit shared libs (also works fine for 32-bit).
+ 'cflags': ['-fPIC'],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'staticlib',
+ 'type': 'static_library',
+ 'sources': [ 'staticlib.c' ],
+ 'dependencies': [ 'sharedlib' ],
+ },
+ {
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'sources': [ 'program.c' ],
+ 'dependencies': [ 'staticlib' ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/dependency-copy/gyptest-copy.py b/third_party/python/gyp/test/dependency-copy/gyptest-copy.py
new file mode 100755
index 0000000000..5ba7c73d41
--- /dev/null
+++ b/third_party/python/gyp/test/dependency-copy/gyptest-copy.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies dependencies do the copy step.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('copies.gyp', chdir='src')
+
+test.build('copies.gyp', 'proj2', chdir='src')
+
+test.run_built_executable('proj1',
+ chdir='src',
+ stdout="Hello from file1.c\n")
+test.run_built_executable('proj2',
+ chdir='src',
+ stdout="Hello from file2.c\n")
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/dependency-copy/src/copies.gyp b/third_party/python/gyp/test/dependency-copy/src/copies.gyp
new file mode 100644
index 0000000000..4176b18787
--- /dev/null
+++ b/third_party/python/gyp/test/dependency-copy/src/copies.gyp
@@ -0,0 +1,25 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'proj1',
+ 'type': 'executable',
+ 'sources': [
+ 'file1.c',
+ ],
+ },
+ {
+ 'target_name': 'proj2',
+ 'type': 'executable',
+ 'sources': [
+ 'file2.c',
+ ],
+ 'dependencies': [
+ 'proj1',
+ ]
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/dependency-copy/src/file1.c b/third_party/python/gyp/test/dependency-copy/src/file1.c
new file mode 100644
index 0000000000..d7c3159186
--- /dev/null
+++ b/third_party/python/gyp/test/dependency-copy/src/file1.c
@@ -0,0 +1,7 @@
+#include <stdio.h>
+
+int main(void)
+{
+ printf("Hello from file1.c\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/dependency-copy/src/file2.c b/third_party/python/gyp/test/dependency-copy/src/file2.c
new file mode 100644
index 0000000000..cf40f57f94
--- /dev/null
+++ b/third_party/python/gyp/test/dependency-copy/src/file2.c
@@ -0,0 +1,7 @@
+#include <stdio.h>
+
+int main(void)
+{
+ printf("Hello from file2.c\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/dependent-settings/nested-dependent-settings/all-dependent-settings.gyp b/third_party/python/gyp/test/dependent-settings/nested-dependent-settings/all-dependent-settings.gyp
new file mode 100644
index 0000000000..b67ccaeb69
--- /dev/null
+++ b/third_party/python/gyp/test/dependent-settings/nested-dependent-settings/all-dependent-settings.gyp
@@ -0,0 +1,19 @@
+{
+ "targets": [
+ {
+ "target_name": "settings",
+ "type": "none",
+ "all_dependent_settings": {
+ "target_conditions": [
+ ["'library' in _type", {"all_dependent_settings": {}}]
+ ]
+ },
+ },
+ {
+ "target_name": "library",
+ "type": "static_library",
+ "dependencies": ["settings"],
+ },
+ ]
+}
+
diff --git a/third_party/python/gyp/test/dependent-settings/nested-dependent-settings/direct-dependent-settings.gyp b/third_party/python/gyp/test/dependent-settings/nested-dependent-settings/direct-dependent-settings.gyp
new file mode 100644
index 0000000000..6e8a6165e4
--- /dev/null
+++ b/third_party/python/gyp/test/dependent-settings/nested-dependent-settings/direct-dependent-settings.gyp
@@ -0,0 +1,19 @@
+{
+ "targets": [
+ {
+ "target_name": "settings",
+ "type": "none",
+ "all_dependent_settings": {
+ "target_conditions": [
+ ["'library' in _type", {"direct_dependent_settings": {}}]
+ ]
+ },
+ },
+ {
+ "target_name": "library",
+ "type": "static_library",
+ "dependencies": ["settings"],
+ },
+ ]
+}
+
diff --git a/third_party/python/gyp/test/dependent-settings/nested-dependent-settings/gyptest-nested-dependent-settings.py b/third_party/python/gyp/test/dependent-settings/nested-dependent-settings/gyptest-nested-dependent-settings.py
new file mode 100644
index 0000000000..a45de898a4
--- /dev/null
+++ b/third_party/python/gyp/test/dependent-settings/nested-dependent-settings/gyptest-nested-dependent-settings.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies nested dependent_settings directives project generation.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp("all-dependent-settings.gyp")
+test.run_gyp("direct-dependent-settings.gyp")
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/determinism/determinism.gyp b/third_party/python/gyp/test/determinism/determinism.gyp
new file mode 100644
index 0000000000..81346748a1
--- /dev/null
+++ b/third_party/python/gyp/test/determinism/determinism.gyp
@@ -0,0 +1,59 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'determinism',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'depfile_action',
+ 'inputs': [
+ 'input.txt',
+ ],
+ 'outputs': [
+ 'output.txt',
+ ],
+ 'depfile': 'depfile.d',
+ 'action': [ ]
+ },
+ ],
+ },
+ {
+ 'target_name': 'determinism2',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'depfile_action',
+ 'inputs': [
+ 'input.txt',
+ ],
+ 'outputs': [
+ 'output.txt',
+ ],
+ 'depfile': 'depfile.d',
+ 'action': [ ]
+ },
+ ],
+ },
+ {
+ 'target_name': 'determinism3',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'depfile_action',
+ 'inputs': [
+ 'input.txt',
+ ],
+ 'outputs': [
+ 'output.txt',
+ ],
+ 'depfile': 'depfile.d',
+ 'action': [ ]
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/determinism/empty-targets.gyp b/third_party/python/gyp/test/determinism/empty-targets.gyp
new file mode 100644
index 0000000000..a4ccdd703c
--- /dev/null
+++ b/third_party/python/gyp/test/determinism/empty-targets.gyp
@@ -0,0 +1,32 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'empty_target1',
+ 'type': 'none',
+ },
+ {
+ 'target_name': 'empty_target2',
+ 'type': 'none',
+ },
+ {
+ 'target_name': 'empty_target3',
+ 'type': 'none',
+ },
+ {
+ 'target_name': 'empty_target4',
+ 'type': 'none',
+ },
+ {
+ 'target_name': 'empty_target5',
+ 'type': 'none',
+ },
+ {
+ 'target_name': 'empty_target6',
+ 'type': 'none',
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/determinism/gyptest-determinism.py b/third_party/python/gyp/test/determinism/gyptest-determinism.py
new file mode 100644
index 0000000000..670cb4bc9f
--- /dev/null
+++ b/third_party/python/gyp/test/determinism/gyptest-determinism.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies builds are the same even with different PYTHONHASHSEEDs.
+Tests target_short_names and FlattenToList.
+"""
+
+import os
+import sys
+import TestGyp
+
+test = TestGyp.TestGyp()
+if test.format == 'ninja':
+ os.environ["PYTHONHASHSEED"] = "1"
+ test.run_gyp('determinism.gyp')
+ base = open(test.built_file_path('build.ninja')).read()
+
+ for i in range(1,5):
+ os.environ["PYTHONHASHSEED"] = str(i)
+ test.run_gyp('determinism.gyp')
+ contents = open(test.built_file_path('build.ninja')).read()
+ if base != contents:
+ test.fail_test()
+
+ del os.environ["PYTHONHASHSEED"]
+ test.pass_test()
diff --git a/third_party/python/gyp/test/determinism/gyptest-empty-target-names.py b/third_party/python/gyp/test/determinism/gyptest-empty-target-names.py
new file mode 100644
index 0000000000..cf49f50084
--- /dev/null
+++ b/third_party/python/gyp/test/determinism/gyptest-empty-target-names.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies builds are the same even with different PYTHONHASHSEEDs.
+Tests both solibs and implicit_deps.
+"""
+
+import os
+import sys
+import TestGyp
+
+test = TestGyp.TestGyp()
+if test.format == 'ninja':
+ os.environ["PYTHONHASHSEED"] = "1"
+ test.run_gyp('empty-targets.gyp')
+ base = open(test.built_file_path('build.ninja')).read()
+
+ for i in range(1,5):
+ os.environ["PYTHONHASHSEED"] = str(i)
+ test.run_gyp('empty-targets.gyp')
+ contents = open(test.built_file_path('build.ninja')).read()
+ if base != contents:
+ test.fail_test()
+
+ del os.environ["PYTHONHASHSEED"]
+ test.pass_test()
diff --git a/third_party/python/gyp/test/determinism/gyptest-needed-variables.py b/third_party/python/gyp/test/determinism/gyptest-needed-variables.py
new file mode 100644
index 0000000000..7b97cca0d2
--- /dev/null
+++ b/third_party/python/gyp/test/determinism/gyptest-needed-variables.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies builds are the same even with different PYTHONHASHSEEDs.
+Tests needed_variables.
+"""
+
+import os
+import sys
+import TestGyp
+
+test = TestGyp.TestGyp()
+if test.format == 'ninja':
+ os.environ["PYTHONHASHSEED"] = "1"
+ test.run_gyp('needed-variables.gyp')
+ base = open(test.built_file_path('test.ninja', subdir='obj')).read()
+
+ for i in range(1,5):
+ os.environ["PYTHONHASHSEED"] = str(i)
+ test.run_gyp('needed-variables.gyp')
+ contents = open(test.built_file_path('test.ninja', subdir='obj')).read()
+ if base != contents:
+ test.fail_test()
+
+ del os.environ["PYTHONHASHSEED"]
+ test.pass_test()
diff --git a/third_party/python/gyp/test/determinism/gyptest-solibs.py b/third_party/python/gyp/test/determinism/gyptest-solibs.py
new file mode 100644
index 0000000000..a9c312573b
--- /dev/null
+++ b/third_party/python/gyp/test/determinism/gyptest-solibs.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies builds are the same even with different PYTHONHASHSEEDs.
+Tests all_targets, implicit_deps and solibs.
+"""
+
+from __future__ import print_function
+
+import os
+import sys
+import TestGyp
+
+test = TestGyp.TestGyp()
+if test.format == 'ninja':
+ os.environ["PYTHONHASHSEED"] = "1"
+ test.run_gyp('solibs.gyp')
+ base1 = open(test.built_file_path('c.ninja', subdir='obj')).read()
+ base2 = open(test.built_file_path('build.ninja')).read()
+
+ for i in range(1,5):
+ os.environ["PYTHONHASHSEED"] = str(i)
+ test.run_gyp('solibs.gyp')
+ contents1 = open(test.built_file_path('c.ninja', subdir='obj')).read()
+ contents2 = open(test.built_file_path('build.ninja')).read()
+ if base1 != contents1:
+ test.fail_test()
+ if base2 != contents2:
+ print(base2)
+ test.fail_test()
+
+ del os.environ["PYTHONHASHSEED"]
+ test.pass_test()
diff --git a/third_party/python/gyp/test/determinism/main.cc b/third_party/python/gyp/test/determinism/main.cc
new file mode 100644
index 0000000000..2cd74d3c77
--- /dev/null
+++ b/third_party/python/gyp/test/determinism/main.cc
@@ -0,0 +1,5 @@
+extern int foo();
+
+int main() {
+ return foo();
+}
diff --git a/third_party/python/gyp/test/determinism/needed-variables.gyp b/third_party/python/gyp/test/determinism/needed-variables.gyp
new file mode 100644
index 0000000000..022165bebd
--- /dev/null
+++ b/third_party/python/gyp/test/determinism/needed-variables.gyp
@@ -0,0 +1,33 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test',
+ 'type': 'executable',
+ 'sources': ['rule.ext'],
+ 'rules': [{
+ 'rule_name': 'rule',
+ 'extension': 'ext',
+ 'inputs': [ 'rule.py', ],
+ 'action': [
+ 'python',
+ 'rule.py',
+ '<(RULE_INPUT_ROOT)',
+ '<(RULE_INPUT_EXT)',
+ '<(RULE_INPUT_DIRNAME)',
+ '<(RULE_INPUT_NAME)',
+ '<(RULE_INPUT_PATH)',
+ ],
+ 'outputs': [ 'hello_world.txt' ],
+ 'sources': ['rule.ext'],
+ 'message': 'Processing <(RULE_INPUT_PATH)',
+ 'process_outputs_as_sources': 1,
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ }],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/determinism/rule.py b/third_party/python/gyp/test/determinism/rule.py
new file mode 100644
index 0000000000..e18c314557
--- /dev/null
+++ b/third_party/python/gyp/test/determinism/rule.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python
+# Copyright (c) 2017 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+
+print('Hello World')
diff --git a/third_party/python/gyp/test/determinism/solib.cc b/third_party/python/gyp/test/determinism/solib.cc
new file mode 100644
index 0000000000..0856cd4e00
--- /dev/null
+++ b/third_party/python/gyp/test/determinism/solib.cc
@@ -0,0 +1,8 @@
+#ifdef _MSC_VER
+__declspec(dllexport)
+#else
+__attribute__((visibility("default")))
+#endif
+int foo() {
+ return 42;
+}
diff --git a/third_party/python/gyp/test/determinism/solibs.gyp b/third_party/python/gyp/test/determinism/solibs.gyp
new file mode 100644
index 0000000000..9ae3246d63
--- /dev/null
+++ b/third_party/python/gyp/test/determinism/solibs.gyp
@@ -0,0 +1,32 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This test both tests solibs and implicit_deps.
+{
+ 'targets': [
+ {
+ 'target_name': 'a',
+ 'type': 'shared_library',
+ 'sources': [ 'solib.cc' ],
+ },
+ {
+ 'target_name': 'b',
+ 'type': 'shared_library',
+ 'sources': [ 'solib.cc' ],
+ },
+ {
+ 'target_name': 'c',
+ 'type': 'executable',
+ 'sources': [ 'main.cc' ],
+ 'dependencies': [ 'a', 'b' ],
+ },
+ ],
+ 'conditions': [
+ ['OS=="linux"', {
+ 'target_defaults': {
+ 'cflags': ['-fPIC'],
+ },
+ }],
+ ],
+}
diff --git a/third_party/python/gyp/test/empty-target/empty-target.gyp b/third_party/python/gyp/test/empty-target/empty-target.gyp
new file mode 100644
index 0000000000..feefa28058
--- /dev/null
+++ b/third_party/python/gyp/test/empty-target/empty-target.gyp
@@ -0,0 +1,12 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'empty_target',
+ 'type': 'none',
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/empty-target/gyptest-empty-target.py b/third_party/python/gyp/test/empty-target/gyptest-empty-target.py
new file mode 100644
index 0000000000..ecadd4a87f
--- /dev/null
+++ b/third_party/python/gyp/test/empty-target/gyptest-empty-target.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies building a target with nothing succeeds.
+"""
+
+import os
+import sys
+import TestGyp
+
+test = TestGyp.TestGyp()
+test.run_gyp('empty-target.gyp')
+test.build('empty-target.gyp', target='empty_target')
+test.pass_test()
diff --git a/third_party/python/gyp/test/errors/dependency_cycle.gyp b/third_party/python/gyp/test/errors/dependency_cycle.gyp
new file mode 100644
index 0000000000..eef44bc9eb
--- /dev/null
+++ b/third_party/python/gyp/test/errors/dependency_cycle.gyp
@@ -0,0 +1,23 @@
+# Copyright 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'target0',
+ 'type': 'none',
+ 'dependencies': [ 'target1' ],
+ },
+ {
+ 'target_name': 'target1',
+ 'type': 'none',
+ 'dependencies': [ 'target2' ],
+ },
+ {
+ 'target_name': 'target2',
+ 'type': 'none',
+ 'dependencies': [ 'target0' ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/errors/duplicate_basenames.gyp b/third_party/python/gyp/test/errors/duplicate_basenames.gyp
new file mode 100644
index 0000000000..b3dceb3949
--- /dev/null
+++ b/third_party/python/gyp/test/errors/duplicate_basenames.gyp
@@ -0,0 +1,13 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'foo',
+ 'type': 'static_library',
+ 'sources': ['foo.c', 'foo.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/errors/duplicate_node.gyp b/third_party/python/gyp/test/errors/duplicate_node.gyp
new file mode 100644
index 0000000000..d6096096bd
--- /dev/null
+++ b/third_party/python/gyp/test/errors/duplicate_node.gyp
@@ -0,0 +1,12 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ { 'target_name' : 'foo', 'type': 'executable' },
+ ],
+ 'targets': [
+ { 'target_name' : 'bar', 'type': 'executable' },
+ ]
+}
diff --git a/third_party/python/gyp/test/errors/duplicate_rule.gyp b/third_party/python/gyp/test/errors/duplicate_rule.gyp
new file mode 100644
index 0000000000..dab98e96c2
--- /dev/null
+++ b/third_party/python/gyp/test/errors/duplicate_rule.gyp
@@ -0,0 +1,22 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'foo',
+ 'type': 'executable',
+ 'rules': [
+ {
+ 'rule_name': 'bar',
+ 'extension': '',
+ },
+ {
+ 'rule_name': 'bar',
+ 'extension': '',
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/errors/duplicate_targets.gyp b/third_party/python/gyp/test/errors/duplicate_targets.gyp
new file mode 100644
index 0000000000..aec470eefa
--- /dev/null
+++ b/third_party/python/gyp/test/errors/duplicate_targets.gyp
@@ -0,0 +1,14 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'foo'
+ },
+ {
+ 'target_name': 'foo'
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/errors/error_command.gyp b/third_party/python/gyp/test/errors/error_command.gyp
new file mode 100644
index 0000000000..1736fc9882
--- /dev/null
+++ b/third_party/python/gyp/test/errors/error_command.gyp
@@ -0,0 +1,12 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'foo',
+ 'type': '<!(["python", "-c", "import sys; sys.exit(3)"])',
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/errors/file_cycle0.gyp b/third_party/python/gyp/test/errors/file_cycle0.gyp
new file mode 100644
index 0000000000..3bfafb6cb3
--- /dev/null
+++ b/third_party/python/gyp/test/errors/file_cycle0.gyp
@@ -0,0 +1,17 @@
+# Copyright 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'top',
+ 'type': 'none',
+ 'dependencies': [ 'file_cycle1.gyp:middle' ],
+ },
+ {
+ 'target_name': 'bottom',
+ 'type': 'none',
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/errors/file_cycle1.gyp b/third_party/python/gyp/test/errors/file_cycle1.gyp
new file mode 100644
index 0000000000..fbd7a0d167
--- /dev/null
+++ b/third_party/python/gyp/test/errors/file_cycle1.gyp
@@ -0,0 +1,13 @@
+# Copyright 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'middle',
+ 'type': 'none',
+ 'dependencies': [ 'file_cycle0.gyp:bottom' ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/errors/gyptest-errors.py b/third_party/python/gyp/test/errors/gyptest-errors.py
new file mode 100755
index 0000000000..0296f800f5
--- /dev/null
+++ b/third_party/python/gyp/test/errors/gyptest-errors.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Test that two targets with the same name generates an error.
+"""
+
+import os
+import sys
+
+import TestGyp
+import TestCmd
+
+# TODO(sbc): Remove the use of match_re below, done because scons
+# error messages were not consistent with other generators.
+# Also remove input.py:generator_wants_absolute_build_file_paths.
+
+test = TestGyp.TestGyp()
+
+stderr = ('gyp: Duplicate target definitions for '
+ '.*duplicate_targets.gyp:foo#target\n')
+test.run_gyp('duplicate_targets.gyp', status=1, stderr=stderr,
+ match=TestCmd.match_re)
+
+stderr = ('.*: Unable to find targets in build file .*missing_targets.gyp.*')
+test.run_gyp('missing_targets.gyp', status=1, stderr=stderr,
+ match=TestCmd.match_re_dotall)
+
+stderr = ('gyp: rule bar exists in duplicate, target '
+ '.*duplicate_rule.gyp:foo#target\n')
+test.run_gyp('duplicate_rule.gyp', status=1, stderr=stderr,
+ match=TestCmd.match_re)
+
+stderr = ("gyp: Key 'targets' repeated at level 1 with key path '' while "
+ "reading .*duplicate_node.gyp.*")
+test.run_gyp('duplicate_node.gyp', '--check', status=1, stderr=stderr,
+ match=TestCmd.match_re_dotall)
+
+stderr = (".*target0.*target1.*target2.*target0.*")
+test.run_gyp('dependency_cycle.gyp', status=1, stderr=stderr,
+ match=TestCmd.match_re_dotall)
+
+stderr = (".*file_cycle0.*file_cycle1.*file_cycle0.*")
+test.run_gyp('file_cycle0.gyp', status=1, stderr=stderr,
+ match=TestCmd.match_re_dotall)
+
+stderr = 'gyp: Duplicate basenames in sources section, see list above\n'
+test.run_gyp('duplicate_basenames.gyp', status=1, stderr=stderr)
+
+# Check if '--no-duplicate-basename-check' works.
+if ((test.format == 'make' and sys.platform == 'darwin') or
+ (test.format == 'msvs' and
+ int(os.environ.get('GYP_MSVS_VERSION', 2010)) < 2010)):
+ stderr = 'gyp: Duplicate basenames in sources section, see list above\n'
+ test.run_gyp('duplicate_basenames.gyp', '--no-duplicate-basename-check',
+ status=1, stderr=stderr)
+else:
+ test.run_gyp('duplicate_basenames.gyp', '--no-duplicate-basename-check')
+
+stderr = ("gyp: Dependency '.*missing_dep.gyp:missing.gyp#target' not found "
+ "while trying to load target .*missing_dep.gyp:foo#target\n")
+test.run_gyp('missing_dep.gyp', status=1, stderr=stderr,
+ match=TestCmd.match_re)
+
+# Make sure invalid <!() command invocations say what command it was and
+# mention the gyp file name. Use a "random" command name to trigger an ENOENT.
+stderr = (".*invalid-command-name-egtyevNif3.*netDurj9.*missing_command.gyp.*")
+test.run_gyp('missing_command.gyp', status=1, stderr=stderr,
+ match=TestCmd.match_re_dotall)
+
+# Make sure <!() commands that error out result in a message that mentions
+# the command and gyp file name
+stderr = (".*python.*-c.*import sys.*sys.exit.*3.*error_command.gyp.*")
+test.run_gyp('error_command.gyp', status=1, stderr=stderr,
+ match=TestCmd.match_re_dotall)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/errors/missing_command.gyp b/third_party/python/gyp/test/errors/missing_command.gyp
new file mode 100644
index 0000000000..c93d9542c6
--- /dev/null
+++ b/third_party/python/gyp/test/errors/missing_command.gyp
@@ -0,0 +1,12 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'foo',
+ 'type': '<!(["invalid-command-name-egtyevNif3", "netDurj9"])',
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/errors/missing_dep.gyp b/third_party/python/gyp/test/errors/missing_dep.gyp
new file mode 100644
index 0000000000..08746be3d7
--- /dev/null
+++ b/third_party/python/gyp/test/errors/missing_dep.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'foo',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'missing.gyp'
+ ]
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/errors/missing_targets.gyp b/third_party/python/gyp/test/errors/missing_targets.gyp
new file mode 100644
index 0000000000..13d4f924c1
--- /dev/null
+++ b/third_party/python/gyp/test/errors/missing_targets.gyp
@@ -0,0 +1,8 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ },
+}
diff --git a/third_party/python/gyp/test/escaping/colon/test.gyp b/third_party/python/gyp/test/escaping/colon/test.gyp
new file mode 100644
index 0000000000..715f95490e
--- /dev/null
+++ b/third_party/python/gyp/test/escaping/colon/test.gyp
@@ -0,0 +1,21 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'colon',
+ 'type': 'executable',
+ 'sources': [
+ 'a:b.c',
+ ],
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/',
+ # MSVS2008 gets confused if the same file is in 'sources' and 'copies'
+ 'files': [ 'a:b.c-d', ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/escaping/gyptest-colon.py b/third_party/python/gyp/test/escaping/gyptest-colon.py
new file mode 100644
index 0000000000..f62f8dc65e
--- /dev/null
+++ b/third_party/python/gyp/test/escaping/gyptest-colon.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Tests that filenames that contain colons are handled correctly.
+(This is important for absolute paths on Windows.)
+"""
+
+from __future__ import print_function
+
+import os
+import sys
+
+if sys.platform == 'win32':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+
+import TestGyp
+
+# TODO: Make colons in filenames work with make, if required.
+test = TestGyp.TestGyp(formats=['!make'])
+CHDIR = 'colon'
+
+source_name = 'colon/a:b.c'
+copies_name = 'colon/a:b.c-d'
+if sys.platform == 'win32':
+ # Windows uses : as drive separator and doesn't allow it in regular filenames.
+ # Use abspath() to create a path that contains a colon instead.
+ abs_source = os.path.abspath('colon/file.c')
+ test.write('colon/test.gyp',
+ test.read('colon/test.gyp').replace("'a:b.c'", repr(abs_source)))
+ source_name = abs_source
+
+ abs_copies = os.path.abspath('colon/file.txt')
+ test.write('colon/test.gyp',
+ test.read('colon/test.gyp').replace("'a:b.c-d'", repr(abs_copies)))
+ copies_name = abs_copies
+
+# Create the file dynamically, Windows is unhappy if a file with a colon in
+# its name is checked in.
+test.write(source_name, 'int main() {}')
+test.write(copies_name, 'foo')
+
+test.run_gyp('test.gyp', chdir=CHDIR)
+test.build('test.gyp', test.ALL, chdir=CHDIR)
+test.built_file_must_exist(os.path.basename(copies_name), chdir=CHDIR)
+test.pass_test()
diff --git a/third_party/python/gyp/test/exclusion/exclusion.gyp b/third_party/python/gyp/test/exclusion/exclusion.gyp
new file mode 100644
index 0000000000..1232dabaef
--- /dev/null
+++ b/third_party/python/gyp/test/exclusion/exclusion.gyp
@@ -0,0 +1,23 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'hello',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.c',
+ 'bogus.c',
+ 'also/not/real.c',
+ 'also/not/real2.c',
+ ],
+ 'sources!': [
+ 'bogus.c',
+ 'also/not/real.c',
+ 'also/not/real2.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/exclusion/gyptest-exclusion.py b/third_party/python/gyp/test/exclusion/gyptest-exclusion.py
new file mode 100755
index 0000000000..1fc32bf871
--- /dev/null
+++ b/third_party/python/gyp/test/exclusion/gyptest-exclusion.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that exclusions (e.g. sources!) are respected. Excluded sources
+that do not exist should not prevent the build from succeeding.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('exclusion.gyp')
+test.build('exclusion.gyp')
+
+# executables
+test.built_file_must_exist('hello' + test._exe, test.EXECUTABLE, bare=True)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/exclusion/hello.c b/third_party/python/gyp/test/exclusion/hello.c
new file mode 100644
index 0000000000..6e7dc8e419
--- /dev/null
+++ b/third_party/python/gyp/test/exclusion/hello.c
@@ -0,0 +1,15 @@
+/* Copyright (c) 2010 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#include <stdio.h>
+
+int func1(void) {
+ return 42;
+}
+
+int main(void) {
+ printf("Hello, world!\n");
+ printf("%d\n", func1());
+ return 0;
+}
diff --git a/third_party/python/gyp/test/external-cross-compile/gyptest-cross.py b/third_party/python/gyp/test/external-cross-compile/gyptest-cross.py
new file mode 100755
index 0000000000..a837ec57dc
--- /dev/null
+++ b/third_party/python/gyp/test/external-cross-compile/gyptest-cross.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that actions can be + a source scanner can be used to implement,
+cross-compiles (for Native Client at this point).
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('cross.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('cross.gyp', test.ALL, chdir='relocate/src')
+
+expect = """\
+From test1.cc
+From test2.c
+From test3.cc
+From test4.c
+"""
+test.run_built_executable('program', chdir='relocate/src', stdout=expect)
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/external-cross-compile/src/bogus1.cc b/third_party/python/gyp/test/external-cross-compile/src/bogus1.cc
new file mode 100644
index 0000000000..1b8d01199b
--- /dev/null
+++ b/third_party/python/gyp/test/external-cross-compile/src/bogus1.cc
@@ -0,0 +1 @@
+From bogus1.cc
diff --git a/third_party/python/gyp/test/external-cross-compile/src/bogus2.c b/third_party/python/gyp/test/external-cross-compile/src/bogus2.c
new file mode 100644
index 0000000000..cbf4a123c4
--- /dev/null
+++ b/third_party/python/gyp/test/external-cross-compile/src/bogus2.c
@@ -0,0 +1 @@
+From bogus2.c
diff --git a/third_party/python/gyp/test/external-cross-compile/src/cross.gyp b/third_party/python/gyp/test/external-cross-compile/src/cross.gyp
new file mode 100644
index 0000000000..aeda76b5bd
--- /dev/null
+++ b/third_party/python/gyp/test/external-cross-compile/src/cross.gyp
@@ -0,0 +1,83 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': ['cross_compile.gypi'],
+ 'target_defaults': {
+ 'variables': {
+ 'nix_lame%': 0,
+ },
+ 'target_conditions': [
+ ['nix_lame==1', {
+ 'sources/': [
+ ['exclude', 'lame'],
+ ],
+ }],
+ ],
+ },
+ 'targets': [
+ {
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'dependencies': [
+ 'program_inc',
+ ],
+ 'include_dirs': [
+ '<(SHARED_INTERMEDIATE_DIR)',
+ ],
+ 'sources': [
+ 'program.cc',
+ ],
+ },
+ {
+ 'target_name': 'program_inc',
+ 'type': 'none',
+ 'dependencies': ['cross_program'],
+ 'actions': [
+ {
+ 'action_name': 'program_inc',
+ 'inputs': ['<(SHARED_INTERMEDIATE_DIR)/cross_program.fake'],
+ 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/cross_program.h'],
+ 'action': ['python', 'tochar.py', '<@(_inputs)', '<@(_outputs)'],
+ },
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ {
+ 'target_name': 'cross_program',
+ 'type': 'none',
+ 'variables': {
+ 'cross': 1,
+ 'nix_lame': 1,
+ },
+ 'dependencies': ['cross_lib'],
+ 'sources': [
+ 'test1.cc',
+ 'test2.c',
+ 'very_lame.cc',
+ '<(SHARED_INTERMEDIATE_DIR)/cross_lib.fake',
+ ],
+ },
+ {
+ 'target_name': 'cross_lib',
+ 'type': 'none',
+ 'variables': {
+ 'cross': 1,
+ 'nix_lame': 1,
+ },
+ 'sources': [
+ 'test3.cc',
+ 'test4.c',
+ 'bogus1.cc',
+ 'bogus2.c',
+ 'sort_of_lame.cc',
+ ],
+ 'sources!': [
+ 'bogus1.cc',
+ 'bogus2.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/external-cross-compile/src/cross_compile.gypi b/third_party/python/gyp/test/external-cross-compile/src/cross_compile.gypi
new file mode 100644
index 0000000000..36e651903f
--- /dev/null
+++ b/third_party/python/gyp/test/external-cross-compile/src/cross_compile.gypi
@@ -0,0 +1,23 @@
+{
+ 'target_defaults': {
+ 'variables': {
+ 'cross%': 0,
+ },
+ 'target_conditions': [
+ ['cross==1', {
+ 'actions': [
+ {
+ 'action_name': 'cross compile >(_target_name)',
+ 'inputs': ['^@(_sources)'],
+ 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/>(_target_name).fake'],
+ 'action': [
+ 'python', 'fake_cross.py', '>@(_outputs)', '^@(_sources)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ }],
+ ],
+ },
+}
diff --git a/third_party/python/gyp/test/external-cross-compile/src/fake_cross.py b/third_party/python/gyp/test/external-cross-compile/src/fake_cross.py
new file mode 100644
index 0000000000..05eacc6a63
--- /dev/null
+++ b/third_party/python/gyp/test/external-cross-compile/src/fake_cross.py
@@ -0,0 +1,18 @@
+#!/usr/bin/python
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+fh = open(sys.argv[1], 'w')
+
+filenames = sys.argv[2:]
+
+for filename in filenames:
+ subfile = open(filename)
+ data = subfile.read()
+ subfile.close()
+ fh.write(data)
+
+fh.close()
diff --git a/third_party/python/gyp/test/external-cross-compile/src/program.cc b/third_party/python/gyp/test/external-cross-compile/src/program.cc
new file mode 100644
index 0000000000..5172ae90fe
--- /dev/null
+++ b/third_party/python/gyp/test/external-cross-compile/src/program.cc
@@ -0,0 +1,16 @@
+/*
+ * Copyright (c) 2012 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <stdio.h>
+
+static char data[] = {
+#include "cross_program.h"
+};
+
+int main(void) {
+ fwrite(data, 1, sizeof(data), stdout);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/external-cross-compile/src/test1.cc b/third_party/python/gyp/test/external-cross-compile/src/test1.cc
new file mode 100644
index 0000000000..b584c31d15
--- /dev/null
+++ b/third_party/python/gyp/test/external-cross-compile/src/test1.cc
@@ -0,0 +1 @@
+From test1.cc
diff --git a/third_party/python/gyp/test/external-cross-compile/src/test2.c b/third_party/python/gyp/test/external-cross-compile/src/test2.c
new file mode 100644
index 0000000000..367ae19ea0
--- /dev/null
+++ b/third_party/python/gyp/test/external-cross-compile/src/test2.c
@@ -0,0 +1 @@
+From test2.c
diff --git a/third_party/python/gyp/test/external-cross-compile/src/test3.cc b/third_party/python/gyp/test/external-cross-compile/src/test3.cc
new file mode 100644
index 0000000000..9eb64735b8
--- /dev/null
+++ b/third_party/python/gyp/test/external-cross-compile/src/test3.cc
@@ -0,0 +1 @@
+From test3.cc
diff --git a/third_party/python/gyp/test/external-cross-compile/src/test4.c b/third_party/python/gyp/test/external-cross-compile/src/test4.c
new file mode 100644
index 0000000000..8ecc33ec16
--- /dev/null
+++ b/third_party/python/gyp/test/external-cross-compile/src/test4.c
@@ -0,0 +1 @@
+From test4.c
diff --git a/third_party/python/gyp/test/external-cross-compile/src/tochar.py b/third_party/python/gyp/test/external-cross-compile/src/tochar.py
new file mode 100644
index 0000000000..c0780d984f
--- /dev/null
+++ b/third_party/python/gyp/test/external-cross-compile/src/tochar.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+src = open(sys.argv[1])
+dst = open(sys.argv[2], 'w')
+for ch in src.read():
+ dst.write('%d,\n' % ord(ch))
+src.close()
+dst.close()
diff --git a/third_party/python/gyp/test/generator-output/actions/actions.gyp b/third_party/python/gyp/test/generator-output/actions/actions.gyp
new file mode 100644
index 0000000000..dded59aff3
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/actions/actions.gyp
@@ -0,0 +1,16 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'pull_in_all_actions',
+ 'type': 'none',
+ 'dependencies': [
+ 'subdir1/executable.gyp:*',
+ 'subdir2/none.gyp:*',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/generator-output/actions/build/README.txt b/third_party/python/gyp/test/generator-output/actions/build/README.txt
new file mode 100644
index 0000000000..1b052c9a24
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/actions/build/README.txt
@@ -0,0 +1,4 @@
+A place-holder for this Xcode build output directory, so that the
+test script can verify that .xcodeproj files are not created in
+their normal location by making the src/ read-only, and then
+selectively making this build directory writable.
diff --git a/third_party/python/gyp/test/generator-output/actions/subdir1/actions-out/README.txt b/third_party/python/gyp/test/generator-output/actions/subdir1/actions-out/README.txt
new file mode 100644
index 0000000000..1b052c9a24
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/actions/subdir1/actions-out/README.txt
@@ -0,0 +1,4 @@
+A place-holder for this Xcode build output directory, so that the
+test script can verify that .xcodeproj files are not created in
+their normal location by making the src/ read-only, and then
+selectively making this build directory writable.
diff --git a/third_party/python/gyp/test/generator-output/actions/subdir1/build/README.txt b/third_party/python/gyp/test/generator-output/actions/subdir1/build/README.txt
new file mode 100644
index 0000000000..1b052c9a24
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/actions/subdir1/build/README.txt
@@ -0,0 +1,4 @@
+A place-holder for this Xcode build output directory, so that the
+test script can verify that .xcodeproj files are not created in
+their normal location by making the src/ read-only, and then
+selectively making this build directory writable.
diff --git a/third_party/python/gyp/test/generator-output/actions/subdir1/executable.gyp b/third_party/python/gyp/test/generator-output/actions/subdir1/executable.gyp
new file mode 100644
index 0000000000..6bdd60a1fb
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/actions/subdir1/executable.gyp
@@ -0,0 +1,44 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'msvs_cygwin_shell': 0,
+ 'sources': [
+ 'program.c',
+ ],
+ 'actions': [
+ {
+ 'action_name': 'make-prog1',
+ 'inputs': [
+ 'make-prog1.py',
+ ],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/prog1.c',
+ ],
+ 'action': [
+ 'python', '<(_inputs)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ },
+ {
+ 'action_name': 'make-prog2',
+ 'inputs': [
+ 'make-prog2.py',
+ ],
+ 'outputs': [
+ 'actions-out/prog2.c',
+ ],
+ 'action': [
+ 'python', '<(_inputs)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/generator-output/actions/subdir1/make-prog1.py b/third_party/python/gyp/test/generator-output/actions/subdir1/make-prog1.py
new file mode 100755
index 0000000000..7ea1d8a2d4
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/actions/subdir1/make-prog1.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+contents = r"""
+#include <stdio.h>
+
+void prog1(void)
+{
+ printf("Hello from make-prog1.py\n");
+}
+"""
+
+open(sys.argv[1], 'w').write(contents)
+
+sys.exit(0)
diff --git a/third_party/python/gyp/test/generator-output/actions/subdir1/make-prog2.py b/third_party/python/gyp/test/generator-output/actions/subdir1/make-prog2.py
new file mode 100755
index 0000000000..0bfe4973c2
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/actions/subdir1/make-prog2.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+contents = r"""
+#include <stdio.h>
+
+void prog2(void)
+{
+ printf("Hello from make-prog2.py\n");
+}
+"""
+
+open(sys.argv[1], 'w').write(contents)
+
+sys.exit(0)
diff --git a/third_party/python/gyp/test/generator-output/actions/subdir1/program.c b/third_party/python/gyp/test/generator-output/actions/subdir1/program.c
new file mode 100644
index 0000000000..c0931534eb
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/actions/subdir1/program.c
@@ -0,0 +1,12 @@
+#include <stdio.h>
+
+extern void prog1(void);
+extern void prog2(void);
+
+int main(void)
+{
+ printf("Hello from program.c\n");
+ prog1();
+ prog2();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/generator-output/actions/subdir2/actions-out/README.txt b/third_party/python/gyp/test/generator-output/actions/subdir2/actions-out/README.txt
new file mode 100644
index 0000000000..1b052c9a24
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/actions/subdir2/actions-out/README.txt
@@ -0,0 +1,4 @@
+A place-holder for this Xcode build output directory, so that the
+test script can verify that .xcodeproj files are not created in
+their normal location by making the src/ read-only, and then
+selectively making this build directory writable.
diff --git a/third_party/python/gyp/test/generator-output/actions/subdir2/build/README.txt b/third_party/python/gyp/test/generator-output/actions/subdir2/build/README.txt
new file mode 100644
index 0000000000..1b052c9a24
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/actions/subdir2/build/README.txt
@@ -0,0 +1,4 @@
+A place-holder for this Xcode build output directory, so that the
+test script can verify that .xcodeproj files are not created in
+their normal location by making the src/ read-only, and then
+selectively making this build directory writable.
diff --git a/third_party/python/gyp/test/generator-output/actions/subdir2/make-file.py b/third_party/python/gyp/test/generator-output/actions/subdir2/make-file.py
new file mode 100755
index 0000000000..088a05e0b0
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/actions/subdir2/make-file.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+contents = "Hello from make-file.py\n"
+
+open(sys.argv[1], 'w').write(contents)
diff --git a/third_party/python/gyp/test/generator-output/actions/subdir2/none.gyp b/third_party/python/gyp/test/generator-output/actions/subdir2/none.gyp
new file mode 100644
index 0000000000..f98f52753d
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/actions/subdir2/none.gyp
@@ -0,0 +1,31 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'file',
+ 'type': 'none',
+ 'msvs_cygwin_shell': 0,
+ 'actions': [
+ {
+ 'action_name': 'make-file',
+ 'inputs': [
+ 'make-file.py',
+ ],
+ 'outputs': [
+ 'actions-out/file.out',
+ # TODO: enhance testing infrastructure to test this
+ # without having to hard-code the intermediate dir paths.
+ #'<(INTERMEDIATE_DIR)/file.out',
+ ],
+ 'action': [
+ 'python', '<(_inputs)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ }
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/generator-output/copies/build/README.txt b/third_party/python/gyp/test/generator-output/copies/build/README.txt
new file mode 100644
index 0000000000..90ef886193
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/copies/build/README.txt
@@ -0,0 +1,4 @@
+A place-holder for this Xcode build output directory, so that the
+test script can verify that .xcodeproj files are not created in
+their normal location by making the src/ read-only, and then
+selectively making this build directory writable.
diff --git a/third_party/python/gyp/test/generator-output/copies/copies-out/README.txt b/third_party/python/gyp/test/generator-output/copies/copies-out/README.txt
new file mode 100644
index 0000000000..90ef886193
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/copies/copies-out/README.txt
@@ -0,0 +1,4 @@
+A place-holder for this Xcode build output directory, so that the
+test script can verify that .xcodeproj files are not created in
+their normal location by making the src/ read-only, and then
+selectively making this build directory writable.
diff --git a/third_party/python/gyp/test/generator-output/copies/copies.gyp b/third_party/python/gyp/test/generator-output/copies/copies.gyp
new file mode 100644
index 0000000000..479a3d9b6e
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/copies/copies.gyp
@@ -0,0 +1,50 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'pull_in_subdir',
+ 'type': 'none',
+ 'dependencies': [
+ 'subdir/subdir.gyp:*',
+ ],
+ },
+ {
+ 'target_name': 'copies1',
+ 'type': 'none',
+ 'copies': [
+ {
+ 'destination': 'copies-out',
+ 'files': [
+ 'file1',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'copies2',
+ 'type': 'none',
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/copies-out',
+ 'files': [
+ 'file2',
+ ],
+ },
+ ],
+ },
+ # Verify that a null 'files' list doesn't gag the generators.
+ {
+ 'target_name': 'copies_null',
+ 'type': 'none',
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/copies-null',
+ 'files': [],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/generator-output/copies/file1 b/third_party/python/gyp/test/generator-output/copies/file1
new file mode 100644
index 0000000000..84d55c5759
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/copies/file1
@@ -0,0 +1 @@
+file1 contents
diff --git a/third_party/python/gyp/test/generator-output/copies/file2 b/third_party/python/gyp/test/generator-output/copies/file2
new file mode 100644
index 0000000000..af1b8ae35d
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/copies/file2
@@ -0,0 +1 @@
+file2 contents
diff --git a/third_party/python/gyp/test/generator-output/copies/subdir/build/README.txt b/third_party/python/gyp/test/generator-output/copies/subdir/build/README.txt
new file mode 100644
index 0000000000..90ef886193
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/copies/subdir/build/README.txt
@@ -0,0 +1,4 @@
+A place-holder for this Xcode build output directory, so that the
+test script can verify that .xcodeproj files are not created in
+their normal location by making the src/ read-only, and then
+selectively making this build directory writable.
diff --git a/third_party/python/gyp/test/generator-output/copies/subdir/copies-out/README.txt b/third_party/python/gyp/test/generator-output/copies/subdir/copies-out/README.txt
new file mode 100644
index 0000000000..90ef886193
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/copies/subdir/copies-out/README.txt
@@ -0,0 +1,4 @@
+A place-holder for this Xcode build output directory, so that the
+test script can verify that .xcodeproj files are not created in
+their normal location by making the src/ read-only, and then
+selectively making this build directory writable.
diff --git a/third_party/python/gyp/test/generator-output/copies/subdir/file3 b/third_party/python/gyp/test/generator-output/copies/subdir/file3
new file mode 100644
index 0000000000..43f16f3522
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/copies/subdir/file3
@@ -0,0 +1 @@
+file3 contents
diff --git a/third_party/python/gyp/test/generator-output/copies/subdir/file4 b/third_party/python/gyp/test/generator-output/copies/subdir/file4
new file mode 100644
index 0000000000..5f7270a084
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/copies/subdir/file4
@@ -0,0 +1 @@
+file4 contents
diff --git a/third_party/python/gyp/test/generator-output/copies/subdir/subdir.gyp b/third_party/python/gyp/test/generator-output/copies/subdir/subdir.gyp
new file mode 100644
index 0000000000..af031d283a
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/copies/subdir/subdir.gyp
@@ -0,0 +1,32 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'copies3',
+ 'type': 'none',
+ 'copies': [
+ {
+ 'destination': 'copies-out',
+ 'files': [
+ 'file3',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'copies4',
+ 'type': 'none',
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/copies-out',
+ 'files': [
+ 'file4',
+ ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/generator-output/gyptest-actions.py b/third_party/python/gyp/test/generator-output/gyptest-actions.py
new file mode 100755
index 0000000000..47121d0770
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/gyptest-actions.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies --generator-output= behavior when using actions.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+# All the generated files should go under 'gypfiles'. The source directory
+# ('actions') should be untouched.
+test.writable(test.workpath('actions'), False)
+test.run_gyp('actions.gyp',
+ '--generator-output=' + test.workpath('gypfiles'),
+ chdir='actions')
+
+test.writable(test.workpath('actions'), True)
+
+test.relocate('actions', 'relocate/actions')
+test.relocate('gypfiles', 'relocate/gypfiles')
+
+test.writable(test.workpath('relocate/actions'), False)
+
+# Some of the action outputs use "pure" relative paths (i.e. without prefixes
+# like <(INTERMEDIATE_DIR) or <(PROGRAM_DIR)). Even though we are building under
+# 'gypfiles', such outputs will still be created relative to the original .gyp
+# sources. Projects probably wouldn't normally do this, since it kind of defeats
+# the purpose of '--generator-output', but it is supported behaviour.
+test.writable(test.workpath('relocate/actions/build'), True)
+test.writable(test.workpath('relocate/actions/subdir1/build'), True)
+test.writable(test.workpath('relocate/actions/subdir1/actions-out'), True)
+test.writable(test.workpath('relocate/actions/subdir2/build'), True)
+test.writable(test.workpath('relocate/actions/subdir2/actions-out'), True)
+
+test.build('actions.gyp', test.ALL, chdir='relocate/gypfiles')
+
+expect = """\
+Hello from program.c
+Hello from make-prog1.py
+Hello from make-prog2.py
+"""
+
+if test.format == 'xcode':
+ chdir = 'relocate/actions/subdir1'
+else:
+ chdir = 'relocate/gypfiles'
+test.run_built_executable('program', chdir=chdir, stdout=expect)
+
+test.must_match('relocate/actions/subdir2/actions-out/file.out',
+ "Hello from make-file.py\n")
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/generator-output/gyptest-copies.py b/third_party/python/gyp/test/generator-output/gyptest-copies.py
new file mode 100755
index 0000000000..262dfc30fa
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/gyptest-copies.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies file copies with --generator-output using an explicit build
+target of 'all'.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.writable(test.workpath('copies'), False)
+
+test.run_gyp('copies.gyp',
+ '--generator-output=' + test.workpath('gypfiles'),
+ '-G', 'xcode_ninja_target_pattern=^(?!copies_null)',
+ chdir='copies')
+
+test.writable(test.workpath('copies'), True)
+
+test.relocate('copies', 'relocate/copies')
+test.relocate('gypfiles', 'relocate/gypfiles')
+
+test.writable(test.workpath('relocate/copies'), False)
+
+test.writable(test.workpath('relocate/copies/build'), True)
+test.writable(test.workpath('relocate/copies/copies-out'), True)
+test.writable(test.workpath('relocate/copies/subdir/build'), True)
+test.writable(test.workpath('relocate/copies/subdir/copies-out'), True)
+
+test.build('copies.gyp', test.ALL, chdir='relocate/gypfiles')
+
+test.must_match(['relocate', 'copies', 'copies-out', 'file1'],
+ "file1 contents\n")
+
+if test.format == 'xcode':
+ chdir = 'relocate/copies/build'
+elif test.format in ['make', 'ninja', 'xcode-ninja', 'cmake']:
+ chdir = 'relocate/gypfiles/out'
+else:
+ chdir = 'relocate/gypfiles'
+test.must_match([chdir, 'Default', 'copies-out', 'file2'], "file2 contents\n")
+
+test.must_match(['relocate', 'copies', 'subdir', 'copies-out', 'file3'],
+ "file3 contents\n")
+
+if test.format == 'xcode':
+ chdir = 'relocate/copies/subdir/build'
+elif test.format in ['make', 'ninja', 'xcode-ninja', 'cmake']:
+ chdir = 'relocate/gypfiles/out'
+else:
+ chdir = 'relocate/gypfiles'
+test.must_match([chdir, 'Default', 'copies-out', 'file4'], "file4 contents\n")
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/generator-output/gyptest-depth.py b/third_party/python/gyp/test/generator-output/gyptest-depth.py
new file mode 100755
index 0000000000..ee59a11f04
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/gyptest-depth.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+
+# Copyright 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies building a project hierarchy created when the --generator-output=
+and --depth= options is used to put the build configuration files in a separate
+directory tree.
+"""
+
+import TestGyp
+import os
+
+# This is a regression test for the make generator only.
+test = TestGyp.TestGyp(formats=['make'])
+
+test.writable(test.workpath('src'), False)
+
+toplevel_dir = os.path.basename(test.workpath())
+
+test.run_gyp(os.path.join(toplevel_dir, 'src', 'prog1.gyp'),
+ '-Dset_symroot=1',
+ '--generator-output=gypfiles',
+ depth=toplevel_dir,
+ chdir='..')
+
+test.writable(test.workpath('src/build'), True)
+test.writable(test.workpath('src/subdir2/build'), True)
+test.writable(test.workpath('src/subdir3/build'), True)
+
+test.build('prog1.gyp', test.ALL, chdir='gypfiles')
+
+chdir = 'gypfiles'
+
+expect = """\
+Hello from %s
+Hello from inc.h
+Hello from inc1/include1.h
+Hello from inc2/include2.h
+Hello from inc3/include3.h
+Hello from subdir2/deeper/deeper.h
+"""
+
+if test.format == 'xcode':
+ chdir = 'src'
+test.run_built_executable('prog1', chdir=chdir, stdout=expect % 'prog1.c')
+
+if test.format == 'xcode':
+ chdir = 'src/subdir2'
+test.run_built_executable('prog2', chdir=chdir, stdout=expect % 'prog2.c')
+
+if test.format == 'xcode':
+ chdir = 'src/subdir3'
+test.run_built_executable('prog3', chdir=chdir, stdout=expect % 'prog3.c')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/generator-output/gyptest-mac-bundle.py b/third_party/python/gyp/test/generator-output/gyptest-mac-bundle.py
new file mode 100644
index 0000000000..14597d8de2
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/gyptest-mac-bundle.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies mac bundles work with --generator-output.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ test = TestGyp.TestGyp(formats=[])
+
+ MAC_BUNDLE_DIR = 'mac-bundle'
+ GYPFILES_DIR = 'gypfiles'
+ test.writable(test.workpath(MAC_BUNDLE_DIR), False)
+ test.run_gyp('test.gyp',
+ '--generator-output=' + test.workpath(GYPFILES_DIR),
+ chdir=MAC_BUNDLE_DIR)
+ test.writable(test.workpath(MAC_BUNDLE_DIR), True)
+
+ test.build('test.gyp', test.ALL, chdir=GYPFILES_DIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/generator-output/gyptest-relocate.py b/third_party/python/gyp/test/generator-output/gyptest-relocate.py
new file mode 100755
index 0000000000..b867a6cffb
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/gyptest-relocate.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that a project hierarchy created with the --generator-output=
+option can be built even when it's relocated to a different path.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.writable(test.workpath('src'), False)
+
+test.run_gyp('prog1.gyp',
+ '-Dset_symroot=1',
+ '--generator-output=' + test.workpath('gypfiles'),
+ chdir='src')
+
+test.writable(test.workpath('src'), True)
+
+test.relocate('src', 'relocate/src')
+test.relocate('gypfiles', 'relocate/gypfiles')
+
+test.writable(test.workpath('relocate/src'), False)
+
+test.writable(test.workpath('relocate/src/build'), True)
+test.writable(test.workpath('relocate/src/subdir2/build'), True)
+test.writable(test.workpath('relocate/src/subdir3/build'), True)
+
+test.build('prog1.gyp', test.ALL, chdir='relocate/gypfiles')
+
+chdir = 'relocate/gypfiles'
+
+expect = """\
+Hello from %s
+Hello from inc.h
+Hello from inc1/include1.h
+Hello from inc2/include2.h
+Hello from inc3/include3.h
+Hello from subdir2/deeper/deeper.h
+"""
+
+if test.format == 'xcode':
+ chdir = 'relocate/src'
+test.run_built_executable('prog1', chdir=chdir, stdout=expect % 'prog1.c')
+
+if test.format == 'xcode':
+ chdir = 'relocate/src/subdir2'
+test.run_built_executable('prog2', chdir=chdir, stdout=expect % 'prog2.c')
+
+if test.format == 'xcode':
+ chdir = 'relocate/src/subdir3'
+test.run_built_executable('prog3', chdir=chdir, stdout=expect % 'prog3.c')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/generator-output/gyptest-rules.py b/third_party/python/gyp/test/generator-output/gyptest-rules.py
new file mode 100755
index 0000000000..a3ff8bd858
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/gyptest-rules.py
@@ -0,0 +1,58 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies --generator-output= behavior when using rules.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.writable(test.workpath('rules'), False)
+
+test.run_gyp('rules.gyp',
+ '--generator-output=' + test.workpath('gypfiles'),
+ chdir='rules')
+
+test.writable(test.workpath('rules'), True)
+
+test.relocate('rules', 'relocate/rules')
+test.relocate('gypfiles', 'relocate/gypfiles')
+
+test.writable(test.workpath('relocate/rules'), False)
+
+test.writable(test.workpath('relocate/rules/build'), True)
+test.writable(test.workpath('relocate/rules/subdir1/build'), True)
+test.writable(test.workpath('relocate/rules/subdir2/build'), True)
+test.writable(test.workpath('relocate/rules/subdir2/rules-out'), True)
+
+test.build('rules.gyp', test.ALL, chdir='relocate/gypfiles')
+
+expect = """\
+Hello from program.c
+Hello from function1.in1
+Hello from function2.in1
+Hello from define3.in0
+Hello from define4.in0
+"""
+
+if test.format == 'xcode':
+ chdir = 'relocate/rules/subdir1'
+else:
+ chdir = 'relocate/gypfiles'
+test.run_built_executable('program', chdir=chdir, stdout=expect)
+
+test.must_match('relocate/rules/subdir2/rules-out/file1.out',
+ "Hello from file1.in0\n")
+test.must_match('relocate/rules/subdir2/rules-out/file2.out',
+ "Hello from file2.in0\n")
+test.must_match('relocate/rules/subdir2/rules-out/file3.out',
+ "Hello from file3.in1\n")
+test.must_match('relocate/rules/subdir2/rules-out/file4.out',
+ "Hello from file4.in1\n")
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/generator-output/gyptest-subdir2-deep.py b/third_party/python/gyp/test/generator-output/gyptest-subdir2-deep.py
new file mode 100755
index 0000000000..ec7862ddd9
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/gyptest-subdir2-deep.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies building a target from a .gyp file a few subdirectories
+deep when the --generator-output= option is used to put the build
+configuration files in a separate directory tree.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.writable(test.workpath('src'), False)
+
+test.writable(test.workpath('src/subdir2/deeper/build'), True)
+
+test.run_gyp('deeper.gyp',
+ '-Dset_symroot=1',
+ '--generator-output=' + test.workpath('gypfiles'),
+ chdir='src/subdir2/deeper')
+
+test.build('deeper.gyp', test.ALL, chdir='gypfiles')
+
+chdir = 'gypfiles'
+
+if test.format == 'xcode':
+ chdir = 'src/subdir2/deeper'
+test.run_built_executable('deeper',
+ chdir=chdir,
+ stdout="Hello from deeper.c\n")
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/generator-output/gyptest-symlink.py b/third_party/python/gyp/test/generator-output/gyptest-symlink.py
new file mode 100755
index 0000000000..d7fe05830f
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/gyptest-symlink.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies building a target when the --generator-output= option is used to put
+the build configuration files in a separate directory tree referenced by a
+symlink.
+"""
+
+import TestGyp
+import os
+import sys
+
+test = TestGyp.TestGyp()
+if not hasattr(os, 'symlink') or sys.platform == 'win32':
+ # Python3 on windows has symlink but it doesn't work reliably.
+ test.skip_test('Missing or bad os.symlink -- skipping test.\n')
+
+test.writable(test.workpath('src'), False)
+
+test.writable(test.workpath('src/subdir2/deeper/build'), True)
+
+test.subdir(test.workpath('build'))
+test.subdir(test.workpath('build/deeper'))
+test.symlink('build/deeper', test.workpath('symlink'))
+
+test.writable(test.workpath('build/deeper'), True)
+test.run_gyp('deeper.gyp',
+ '-Dset_symroot=2',
+ '--generator-output=' + test.workpath('symlink'),
+ chdir='src/subdir2/deeper')
+
+chdir = 'symlink'
+test.build('deeper.gyp', test.ALL, chdir=chdir)
+
+if test.format == 'xcode':
+ chdir = 'src/subdir2/deeper'
+test.run_built_executable('deeper',
+ chdir=chdir,
+ stdout="Hello from deeper.c\n")
+test.pass_test()
diff --git a/third_party/python/gyp/test/generator-output/gyptest-top-all.py b/third_party/python/gyp/test/generator-output/gyptest-top-all.py
new file mode 100755
index 0000000000..b1776776ea
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/gyptest-top-all.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies building a project hierarchy created when the --generator-output=
+option is used to put the build configuration files in a separate
+directory tree.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.writable(test.workpath('src'), False)
+
+test.run_gyp('prog1.gyp',
+ '-Dset_symroot=1',
+ '--generator-output=' + test.workpath('gypfiles'),
+ chdir='src')
+
+test.writable(test.workpath('src/build'), True)
+test.writable(test.workpath('src/subdir2/build'), True)
+test.writable(test.workpath('src/subdir3/build'), True)
+
+test.build('prog1.gyp', test.ALL, chdir='gypfiles')
+
+chdir = 'gypfiles'
+
+expect = """\
+Hello from %s
+Hello from inc.h
+Hello from inc1/include1.h
+Hello from inc2/include2.h
+Hello from inc3/include3.h
+Hello from subdir2/deeper/deeper.h
+"""
+
+if test.format == 'xcode':
+ chdir = 'src'
+test.run_built_executable('prog1', chdir=chdir, stdout=expect % 'prog1.c')
+
+if test.format == 'xcode':
+ chdir = 'src/subdir2'
+test.run_built_executable('prog2', chdir=chdir, stdout=expect % 'prog2.c')
+
+if test.format == 'xcode':
+ chdir = 'src/subdir3'
+test.run_built_executable('prog3', chdir=chdir, stdout=expect % 'prog3.c')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/generator-output/mac-bundle/Info.plist b/third_party/python/gyp/test/generator-output/mac-bundle/Info.plist
new file mode 100644
index 0000000000..8cb142e9f5
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/mac-bundle/Info.plist
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIconFile</key>
+ <string></string>
+ <key>CFBundleIdentifier</key>
+ <string>com.google.${PRODUCT_NAME}</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>ause</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+ <key>LSMinimumSystemVersion</key>
+ <string>${MACOSX_DEPLOYMENT_TARGET}</string>
+ <key>NSMainNibFile</key>
+ <string>MainMenu</string>
+ <key>NSPrincipalClass</key>
+ <string>NSApplication</string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/generator-output/mac-bundle/app.order b/third_party/python/gyp/test/generator-output/mac-bundle/app.order
new file mode 100644
index 0000000000..4eb9e89d39
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/mac-bundle/app.order
@@ -0,0 +1 @@
+_main
diff --git a/third_party/python/gyp/test/generator-output/mac-bundle/header.h b/third_party/python/gyp/test/generator-output/mac-bundle/header.h
new file mode 100644
index 0000000000..7ed7775122
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/mac-bundle/header.h
@@ -0,0 +1 @@
+int f();
diff --git a/third_party/python/gyp/test/generator-output/mac-bundle/main.c b/third_party/python/gyp/test/generator-output/mac-bundle/main.c
new file mode 100644
index 0000000000..237c8ce181
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/mac-bundle/main.c
@@ -0,0 +1 @@
+int main() {}
diff --git a/third_party/python/gyp/test/generator-output/mac-bundle/resource.sb b/third_party/python/gyp/test/generator-output/mac-bundle/resource.sb
new file mode 100644
index 0000000000..731befc457
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/mac-bundle/resource.sb
@@ -0,0 +1 @@
+A text file.
diff --git a/third_party/python/gyp/test/generator-output/mac-bundle/test.gyp b/third_party/python/gyp/test/generator-output/mac-bundle/test.gyp
new file mode 100644
index 0000000000..35ac674f6d
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/mac-bundle/test.gyp
@@ -0,0 +1,25 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'test_app',
+ 'product_name': 'Test App Gyp',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'main.c',
+ ],
+ 'mac_bundle_resources': [
+ 'resource.sb',
+ ],
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'Info.plist',
+ 'ORDER_FILE': 'app.order',
+ 'GCC_PREFIX_HEADER': 'header.h',
+ 'GCC_PRECOMPILE_PREFIX_HEADER': 'YES',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/generator-output/rules/build/README.txt b/third_party/python/gyp/test/generator-output/rules/build/README.txt
new file mode 100644
index 0000000000..1b052c9a24
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/rules/build/README.txt
@@ -0,0 +1,4 @@
+A place-holder for this Xcode build output directory, so that the
+test script can verify that .xcodeproj files are not created in
+their normal location by making the src/ read-only, and then
+selectively making this build directory writable.
diff --git a/third_party/python/gyp/test/generator-output/rules/copy-file.py b/third_party/python/gyp/test/generator-output/rules/copy-file.py
new file mode 100755
index 0000000000..80c6749f93
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/rules/copy-file.py
@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+contents = open(sys.argv[1], 'r').read()
+open(sys.argv[2], 'w').write(contents)
+
+sys.exit(0)
diff --git a/third_party/python/gyp/test/generator-output/rules/rules.gyp b/third_party/python/gyp/test/generator-output/rules/rules.gyp
new file mode 100644
index 0000000000..dded59aff3
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/rules/rules.gyp
@@ -0,0 +1,16 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'pull_in_all_actions',
+ 'type': 'none',
+ 'dependencies': [
+ 'subdir1/executable.gyp:*',
+ 'subdir2/none.gyp:*',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/generator-output/rules/subdir1/build/README.txt b/third_party/python/gyp/test/generator-output/rules/subdir1/build/README.txt
new file mode 100644
index 0000000000..1b052c9a24
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/rules/subdir1/build/README.txt
@@ -0,0 +1,4 @@
+A place-holder for this Xcode build output directory, so that the
+test script can verify that .xcodeproj files are not created in
+their normal location by making the src/ read-only, and then
+selectively making this build directory writable.
diff --git a/third_party/python/gyp/test/generator-output/rules/subdir1/define3.in0 b/third_party/python/gyp/test/generator-output/rules/subdir1/define3.in0
new file mode 100644
index 0000000000..cc29c643f3
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/rules/subdir1/define3.in0
@@ -0,0 +1 @@
+#define STRING3 "Hello from define3.in0\n"
diff --git a/third_party/python/gyp/test/generator-output/rules/subdir1/define4.in0 b/third_party/python/gyp/test/generator-output/rules/subdir1/define4.in0
new file mode 100644
index 0000000000..c9b0467b32
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/rules/subdir1/define4.in0
@@ -0,0 +1 @@
+#define STRING4 "Hello from define4.in0\n"
diff --git a/third_party/python/gyp/test/generator-output/rules/subdir1/executable.gyp b/third_party/python/gyp/test/generator-output/rules/subdir1/executable.gyp
new file mode 100644
index 0000000000..42bee4d746
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/rules/subdir1/executable.gyp
@@ -0,0 +1,59 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'msvs_cygwin_shell': 0,
+ 'sources': [
+ 'program.c',
+ 'function1.in1',
+ 'function2.in1',
+ 'define3.in0',
+ 'define4.in0',
+ ],
+ 'include_dirs': [
+ '<(INTERMEDIATE_DIR)',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'copy_file_0',
+ 'extension': 'in0',
+ 'inputs': [
+ '../copy-file.py',
+ ],
+ 'outputs': [
+ # TODO: fix Make to support generated files not
+ # in a variable-named path like <(INTERMEDIATE_DIR)
+ #'<(RULE_INPUT_ROOT).c',
+ '<(INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).h',
+ ],
+ 'action': [
+ 'python', '<(_inputs)', '<(RULE_INPUT_PATH)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 0,
+ },
+ {
+ 'rule_name': 'copy_file_1',
+ 'extension': 'in1',
+ 'inputs': [
+ '../copy-file.py',
+ ],
+ 'outputs': [
+ # TODO: fix Make to support generated files not
+ # in a variable-named path like <(INTERMEDIATE_DIR)
+ #'<(RULE_INPUT_ROOT).c',
+ '<(INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).c',
+ ],
+ 'action': [
+ 'python', '<(_inputs)', '<(RULE_INPUT_PATH)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/generator-output/rules/subdir1/function1.in1 b/third_party/python/gyp/test/generator-output/rules/subdir1/function1.in1
new file mode 100644
index 0000000000..545e7ca16b
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/rules/subdir1/function1.in1
@@ -0,0 +1,6 @@
+#include <stdio.h>
+
+void function1(void)
+{
+ printf("Hello from function1.in1\n");
+}
diff --git a/third_party/python/gyp/test/generator-output/rules/subdir1/function2.in1 b/third_party/python/gyp/test/generator-output/rules/subdir1/function2.in1
new file mode 100644
index 0000000000..6bad43f9cf
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/rules/subdir1/function2.in1
@@ -0,0 +1,6 @@
+#include <stdio.h>
+
+void function2(void)
+{
+ printf("Hello from function2.in1\n");
+}
diff --git a/third_party/python/gyp/test/generator-output/rules/subdir1/program.c b/third_party/python/gyp/test/generator-output/rules/subdir1/program.c
new file mode 100644
index 0000000000..56b320632a
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/rules/subdir1/program.c
@@ -0,0 +1,18 @@
+#include <stdio.h>
+#include "define3.h"
+#include "define4.h"
+
+extern void function1(void);
+extern void function2(void);
+extern void function3(void);
+extern void function4(void);
+
+int main(void)
+{
+ printf("Hello from program.c\n");
+ function1();
+ function2();
+ printf("%s", STRING3);
+ printf("%s", STRING4);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/generator-output/rules/subdir2/build/README.txt b/third_party/python/gyp/test/generator-output/rules/subdir2/build/README.txt
new file mode 100644
index 0000000000..1b052c9a24
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/rules/subdir2/build/README.txt
@@ -0,0 +1,4 @@
+A place-holder for this Xcode build output directory, so that the
+test script can verify that .xcodeproj files are not created in
+their normal location by making the src/ read-only, and then
+selectively making this build directory writable.
diff --git a/third_party/python/gyp/test/generator-output/rules/subdir2/file1.in0 b/third_party/python/gyp/test/generator-output/rules/subdir2/file1.in0
new file mode 100644
index 0000000000..7aca64f4ce
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/rules/subdir2/file1.in0
@@ -0,0 +1 @@
+Hello from file1.in0
diff --git a/third_party/python/gyp/test/generator-output/rules/subdir2/file2.in0 b/third_party/python/gyp/test/generator-output/rules/subdir2/file2.in0
new file mode 100644
index 0000000000..80a281a2a9
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/rules/subdir2/file2.in0
@@ -0,0 +1 @@
+Hello from file2.in0
diff --git a/third_party/python/gyp/test/generator-output/rules/subdir2/file3.in1 b/third_party/python/gyp/test/generator-output/rules/subdir2/file3.in1
new file mode 100644
index 0000000000..60ae2e7931
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/rules/subdir2/file3.in1
@@ -0,0 +1 @@
+Hello from file3.in1
diff --git a/third_party/python/gyp/test/generator-output/rules/subdir2/file4.in1 b/third_party/python/gyp/test/generator-output/rules/subdir2/file4.in1
new file mode 100644
index 0000000000..5a3c30720e
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/rules/subdir2/file4.in1
@@ -0,0 +1 @@
+Hello from file4.in1
diff --git a/third_party/python/gyp/test/generator-output/rules/subdir2/none.gyp b/third_party/python/gyp/test/generator-output/rules/subdir2/none.gyp
new file mode 100644
index 0000000000..664cbd9cb7
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/rules/subdir2/none.gyp
@@ -0,0 +1,49 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'files',
+ 'type': 'none',
+ 'msvs_cygwin_shell': 0,
+ 'sources': [
+ 'file1.in0',
+ 'file2.in0',
+ 'file3.in1',
+ 'file4.in1',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'copy_file_0',
+ 'extension': 'in0',
+ 'inputs': [
+ '../copy-file.py',
+ ],
+ 'outputs': [
+ 'rules-out/<(RULE_INPUT_ROOT).out',
+ ],
+ 'action': [
+ 'python', '<(_inputs)', '<(RULE_INPUT_PATH)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 0,
+ },
+ {
+ 'rule_name': 'copy_file_1',
+ 'extension': 'in1',
+ 'inputs': [
+ '../copy-file.py',
+ ],
+ 'outputs': [
+ 'rules-out/<(RULE_INPUT_ROOT).out',
+ ],
+ 'action': [
+ 'python', '<(_inputs)', '<(RULE_INPUT_PATH)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/generator-output/rules/subdir2/rules-out/README.txt b/third_party/python/gyp/test/generator-output/rules/subdir2/rules-out/README.txt
new file mode 100644
index 0000000000..1b052c9a24
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/rules/subdir2/rules-out/README.txt
@@ -0,0 +1,4 @@
+A place-holder for this Xcode build output directory, so that the
+test script can verify that .xcodeproj files are not created in
+their normal location by making the src/ read-only, and then
+selectively making this build directory writable.
diff --git a/third_party/python/gyp/test/generator-output/src/build/README.txt b/third_party/python/gyp/test/generator-output/src/build/README.txt
new file mode 100644
index 0000000000..90ef886193
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/src/build/README.txt
@@ -0,0 +1,4 @@
+A place-holder for this Xcode build output directory, so that the
+test script can verify that .xcodeproj files are not created in
+their normal location by making the src/ read-only, and then
+selectively making this build directory writable.
diff --git a/third_party/python/gyp/test/generator-output/src/inc.h b/third_party/python/gyp/test/generator-output/src/inc.h
new file mode 100644
index 0000000000..57aa1a5a74
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/src/inc.h
@@ -0,0 +1 @@
+#define INC_STRING "inc.h"
diff --git a/third_party/python/gyp/test/generator-output/src/inc1/include1.h b/third_party/python/gyp/test/generator-output/src/inc1/include1.h
new file mode 100644
index 0000000000..1d59065fc9
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/src/inc1/include1.h
@@ -0,0 +1 @@
+#define INCLUDE1_STRING "inc1/include1.h"
diff --git a/third_party/python/gyp/test/generator-output/src/prog1.c b/third_party/python/gyp/test/generator-output/src/prog1.c
new file mode 100644
index 0000000000..bf7c2a17bd
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/src/prog1.c
@@ -0,0 +1,18 @@
+#include <stdio.h>
+
+#include "inc.h"
+#include "include1.h"
+#include "include2.h"
+#include "include3.h"
+#include "deeper.h"
+
+int main(void)
+{
+ printf("Hello from prog1.c\n");
+ printf("Hello from %s\n", INC_STRING);
+ printf("Hello from %s\n", INCLUDE1_STRING);
+ printf("Hello from %s\n", INCLUDE2_STRING);
+ printf("Hello from %s\n", INCLUDE3_STRING);
+ printf("Hello from %s\n", DEEPER_STRING);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/generator-output/src/prog1.gyp b/third_party/python/gyp/test/generator-output/src/prog1.gyp
new file mode 100644
index 0000000000..d50e6fb0a7
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/src/prog1.gyp
@@ -0,0 +1,28 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': [
+ 'symroot.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'prog1',
+ 'type': 'executable',
+ 'dependencies': [
+ 'subdir2/prog2.gyp:prog2',
+ ],
+ 'include_dirs': [
+ '.',
+ 'inc1',
+ 'subdir2/inc2',
+ 'subdir3/inc3',
+ 'subdir2/deeper',
+ ],
+ 'sources': [
+ 'prog1.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/generator-output/src/subdir2/build/README.txt b/third_party/python/gyp/test/generator-output/src/subdir2/build/README.txt
new file mode 100644
index 0000000000..90ef886193
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/src/subdir2/build/README.txt
@@ -0,0 +1,4 @@
+A place-holder for this Xcode build output directory, so that the
+test script can verify that .xcodeproj files are not created in
+their normal location by making the src/ read-only, and then
+selectively making this build directory writable.
diff --git a/third_party/python/gyp/test/generator-output/src/subdir2/deeper/build/README.txt b/third_party/python/gyp/test/generator-output/src/subdir2/deeper/build/README.txt
new file mode 100644
index 0000000000..90ef886193
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/src/subdir2/deeper/build/README.txt
@@ -0,0 +1,4 @@
+A place-holder for this Xcode build output directory, so that the
+test script can verify that .xcodeproj files are not created in
+their normal location by making the src/ read-only, and then
+selectively making this build directory writable.
diff --git a/third_party/python/gyp/test/generator-output/src/subdir2/deeper/deeper.c b/third_party/python/gyp/test/generator-output/src/subdir2/deeper/deeper.c
new file mode 100644
index 0000000000..843505cd11
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/src/subdir2/deeper/deeper.c
@@ -0,0 +1,7 @@
+#include <stdio.h>
+
+int main(void)
+{
+ printf("Hello from deeper.c\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/generator-output/src/subdir2/deeper/deeper.gyp b/third_party/python/gyp/test/generator-output/src/subdir2/deeper/deeper.gyp
new file mode 100644
index 0000000000..8648770872
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/src/subdir2/deeper/deeper.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': [
+ '../../symroot.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'deeper',
+ 'type': 'executable',
+ 'sources': [
+ 'deeper.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/generator-output/src/subdir2/deeper/deeper.h b/third_party/python/gyp/test/generator-output/src/subdir2/deeper/deeper.h
new file mode 100644
index 0000000000..f6484a0fe5
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/src/subdir2/deeper/deeper.h
@@ -0,0 +1 @@
+#define DEEPER_STRING "subdir2/deeper/deeper.h"
diff --git a/third_party/python/gyp/test/generator-output/src/subdir2/inc2/include2.h b/third_party/python/gyp/test/generator-output/src/subdir2/inc2/include2.h
new file mode 100644
index 0000000000..1ccfa5dea7
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/src/subdir2/inc2/include2.h
@@ -0,0 +1 @@
+#define INCLUDE2_STRING "inc2/include2.h"
diff --git a/third_party/python/gyp/test/generator-output/src/subdir2/prog2.c b/third_party/python/gyp/test/generator-output/src/subdir2/prog2.c
new file mode 100644
index 0000000000..d80d871984
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/src/subdir2/prog2.c
@@ -0,0 +1,18 @@
+#include <stdio.h>
+
+#include "inc.h"
+#include "include1.h"
+#include "include2.h"
+#include "include3.h"
+#include "deeper.h"
+
+int main(void)
+{
+ printf("Hello from prog2.c\n");
+ printf("Hello from %s\n", INC_STRING);
+ printf("Hello from %s\n", INCLUDE1_STRING);
+ printf("Hello from %s\n", INCLUDE2_STRING);
+ printf("Hello from %s\n", INCLUDE3_STRING);
+ printf("Hello from %s\n", DEEPER_STRING);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/generator-output/src/subdir2/prog2.gyp b/third_party/python/gyp/test/generator-output/src/subdir2/prog2.gyp
new file mode 100644
index 0000000000..7176ed8be7
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/src/subdir2/prog2.gyp
@@ -0,0 +1,28 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': [
+ '../symroot.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'prog2',
+ 'type': 'executable',
+ 'include_dirs': [
+ '..',
+ '../inc1',
+ 'inc2',
+ '../subdir3/inc3',
+ 'deeper',
+ ],
+ 'dependencies': [
+ '../subdir3/prog3.gyp:prog3',
+ ],
+ 'sources': [
+ 'prog2.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/generator-output/src/subdir3/build/README.txt b/third_party/python/gyp/test/generator-output/src/subdir3/build/README.txt
new file mode 100644
index 0000000000..90ef886193
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/src/subdir3/build/README.txt
@@ -0,0 +1,4 @@
+A place-holder for this Xcode build output directory, so that the
+test script can verify that .xcodeproj files are not created in
+their normal location by making the src/ read-only, and then
+selectively making this build directory writable.
diff --git a/third_party/python/gyp/test/generator-output/src/subdir3/inc3/include3.h b/third_party/python/gyp/test/generator-output/src/subdir3/inc3/include3.h
new file mode 100644
index 0000000000..bf53bf1f00
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/src/subdir3/inc3/include3.h
@@ -0,0 +1 @@
+#define INCLUDE3_STRING "inc3/include3.h"
diff --git a/third_party/python/gyp/test/generator-output/src/subdir3/prog3.c b/third_party/python/gyp/test/generator-output/src/subdir3/prog3.c
new file mode 100644
index 0000000000..c72233da19
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/src/subdir3/prog3.c
@@ -0,0 +1,18 @@
+#include <stdio.h>
+
+#include "inc.h"
+#include "include1.h"
+#include "include2.h"
+#include "include3.h"
+#include "deeper.h"
+
+int main(void)
+{
+ printf("Hello from prog3.c\n");
+ printf("Hello from %s\n", INC_STRING);
+ printf("Hello from %s\n", INCLUDE1_STRING);
+ printf("Hello from %s\n", INCLUDE2_STRING);
+ printf("Hello from %s\n", INCLUDE3_STRING);
+ printf("Hello from %s\n", DEEPER_STRING);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/generator-output/src/subdir3/prog3.gyp b/third_party/python/gyp/test/generator-output/src/subdir3/prog3.gyp
new file mode 100644
index 0000000000..46c5e000a2
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/src/subdir3/prog3.gyp
@@ -0,0 +1,25 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': [
+ '../symroot.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'prog3',
+ 'type': 'executable',
+ 'include_dirs': [
+ '..',
+ '../inc1',
+ '../subdir2/inc2',
+ 'inc3',
+ '../subdir2/deeper',
+ ],
+ 'sources': [
+ 'prog3.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/generator-output/src/symroot.gypi b/third_party/python/gyp/test/generator-output/src/symroot.gypi
new file mode 100644
index 0000000000..519916427c
--- /dev/null
+++ b/third_party/python/gyp/test/generator-output/src/symroot.gypi
@@ -0,0 +1,16 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'set_symroot%': 0,
+ },
+ 'conditions': [
+ ['set_symroot == 1', {
+ 'xcode_settings': {
+ 'SYMROOT': '<(DEPTH)/build',
+ },
+ }],
+ ],
+}
diff --git a/third_party/python/gyp/test/gyp-defines/defines.gyp b/third_party/python/gyp/test/gyp-defines/defines.gyp
new file mode 100644
index 0000000000..f59bbd20d2
--- /dev/null
+++ b/third_party/python/gyp/test/gyp-defines/defines.gyp
@@ -0,0 +1,26 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_target',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'test_action',
+ 'inputs': [],
+ 'outputs': [ 'action.txt' ],
+ 'action': [
+ 'python',
+ 'echo.py',
+ '<(key)',
+ '<(_outputs)',
+ ],
+ 'msvs_cygwin_shell': 0,
+ }
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/gyp-defines/echo.py b/third_party/python/gyp/test/gyp-defines/echo.py
new file mode 100644
index 0000000000..b85add12f6
--- /dev/null
+++ b/third_party/python/gyp/test/gyp-defines/echo.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+f = open(sys.argv[2], 'w+')
+f.write(sys.argv[1])
+f.close()
diff --git a/third_party/python/gyp/test/gyp-defines/gyptest-multiple-values.py b/third_party/python/gyp/test/gyp-defines/gyptest-multiple-values.py
new file mode 100644
index 0000000000..67735cce6a
--- /dev/null
+++ b/third_party/python/gyp/test/gyp-defines/gyptest-multiple-values.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that when multiple values are supplied for a gyp define, the last one
+is used.
+"""
+
+import os
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+os.environ['GYP_DEFINES'] = 'key=value1 key=value2 key=value3'
+test.run_gyp('defines.gyp')
+
+test.build('defines.gyp')
+test.must_contain('action.txt', 'value3')
+
+# The last occurrence of a repeated set should take precedence over other
+# values.
+os.environ['GYP_DEFINES'] = 'key=repeated_value key=value1 key=repeated_value'
+test.run_gyp('defines.gyp')
+
+if test.format == 'msvs' and not test.uses_msbuild:
+ # msvs versions before 2010 don't detect build rule changes not reflected
+ # in file system timestamps. Rebuild to see differences.
+ test.build('defines.gyp', rebuild=True)
+else:
+ test.build('defines.gyp')
+test.must_contain('action.txt', 'repeated_value')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/gyp-defines/gyptest-regyp.py b/third_party/python/gyp/test/gyp-defines/gyptest-regyp.py
new file mode 100644
index 0000000000..0895d81d4f
--- /dev/null
+++ b/third_party/python/gyp/test/gyp-defines/gyptest-regyp.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that when the same value is repeated for a gyp define, duplicates are
+stripped from the regeneration rule.
+"""
+
+import os
+import TestGyp
+
+# Regenerating build files when a gyp file changes is currently only supported
+# by the make generator.
+test = TestGyp.TestGyp(formats=['make'])
+
+os.environ['GYP_DEFINES'] = 'key=repeated_value key=value1 key=repeated_value'
+test.run_gyp('defines.gyp')
+test.build('defines.gyp')
+
+# The last occurrence of a repeated set should take precedence over other
+# values. See gyptest-multiple-values.py.
+test.must_contain('action.txt', 'repeated_value')
+
+# So the regeneration rule needs to use the correct order.
+test.must_not_contain(
+ 'Makefile', '"-Dkey=repeated_value" "-Dkey=value1" "-Dkey=repeated_value"')
+test.must_contain('Makefile', '"-Dkey=value1" "-Dkey=repeated_value"')
+
+# Sleep so that the changed gyp file will have a newer timestamp than the
+# previously generated build files.
+test.sleep()
+os.utime("defines.gyp", None)
+
+test.build('defines.gyp')
+test.must_contain('action.txt', 'repeated_value')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/hard_dependency/gyptest-exported-hard-dependency.py b/third_party/python/gyp/test/hard_dependency/gyptest-exported-hard-dependency.py
new file mode 100755
index 0000000000..ba51528800
--- /dev/null
+++ b/third_party/python/gyp/test/hard_dependency/gyptest-exported-hard-dependency.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verify that a hard_dependency that is exported is pulled in as a dependency
+for a target if the target is a static library and if the generator will
+remove dependencies between static libraries.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+if test.format == 'dump_dependency_json':
+ test.skip_test('Skipping test; dependency JSON does not adjust ' \
+ 'static libraries.\n')
+
+test.run_gyp('hard_dependency.gyp', chdir='src')
+
+chdir = 'relocate/src'
+test.relocate('src', chdir)
+
+test.build('hard_dependency.gyp', 'c', chdir=chdir)
+
+# The 'a' static library should be built, as it has actions with side-effects
+# that are necessary to compile 'c'. Even though 'c' does not directly depend
+# on 'a', because 'a' is a hard_dependency that 'b' exports, 'c' should import
+# it as a hard_dependency and ensure it is built before building 'c'.
+test.built_file_must_exist('a', type=test.STATIC_LIB, chdir=chdir)
+test.built_file_must_not_exist('b', type=test.STATIC_LIB, chdir=chdir)
+test.built_file_must_exist('c', type=test.STATIC_LIB, chdir=chdir)
+test.built_file_must_not_exist('d', type=test.STATIC_LIB, chdir=chdir)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/hard_dependency/gyptest-no-exported-hard-dependency.py b/third_party/python/gyp/test/hard_dependency/gyptest-no-exported-hard-dependency.py
new file mode 100755
index 0000000000..10774ca2a0
--- /dev/null
+++ b/third_party/python/gyp/test/hard_dependency/gyptest-no-exported-hard-dependency.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verify that a hard_dependency that is not exported is not pulled in as a
+dependency for a target if the target does not explicitly specify a dependency
+and none of its dependencies export the hard_dependency.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+if test.format == 'dump_dependency_json':
+ test.skip_test('Skipping test; dependency JSON does not adjust ' \
+ 'static libaries.\n')
+
+test.run_gyp('hard_dependency.gyp', chdir='src')
+
+chdir = 'relocate/src'
+test.relocate('src', chdir)
+
+test.build('hard_dependency.gyp', 'd', chdir=chdir)
+
+# Because 'c' does not export a hard_dependency, only the target 'd' should
+# be built. This is because the 'd' target does not need the generated headers
+# in order to be compiled.
+test.built_file_must_not_exist('a', type=test.STATIC_LIB, chdir=chdir)
+test.built_file_must_not_exist('b', type=test.STATIC_LIB, chdir=chdir)
+test.built_file_must_not_exist('c', type=test.STATIC_LIB, chdir=chdir)
+test.built_file_must_exist('d', type=test.STATIC_LIB, chdir=chdir)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/hard_dependency/src/a.c b/third_party/python/gyp/test/hard_dependency/src/a.c
new file mode 100644
index 0000000000..0fa0223c97
--- /dev/null
+++ b/third_party/python/gyp/test/hard_dependency/src/a.c
@@ -0,0 +1,9 @@
+/* Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#include "a.h"
+
+int funcA() {
+ return 42;
+}
diff --git a/third_party/python/gyp/test/hard_dependency/src/a.h b/third_party/python/gyp/test/hard_dependency/src/a.h
new file mode 100644
index 0000000000..854a06504a
--- /dev/null
+++ b/third_party/python/gyp/test/hard_dependency/src/a.h
@@ -0,0 +1,12 @@
+/* Copyright (c) 2009 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#ifndef A_H_
+#define A_H_
+
+#include "generated.h"
+
+int funcA();
+
+#endif // A_H_
diff --git a/third_party/python/gyp/test/hard_dependency/src/b.c b/third_party/python/gyp/test/hard_dependency/src/b.c
new file mode 100644
index 0000000000..0baace929e
--- /dev/null
+++ b/third_party/python/gyp/test/hard_dependency/src/b.c
@@ -0,0 +1,9 @@
+/* Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#include "a.h"
+
+int funcB() {
+ return funcA();
+}
diff --git a/third_party/python/gyp/test/hard_dependency/src/b.h b/third_party/python/gyp/test/hard_dependency/src/b.h
new file mode 100644
index 0000000000..22b48cefe2
--- /dev/null
+++ b/third_party/python/gyp/test/hard_dependency/src/b.h
@@ -0,0 +1,12 @@
+/* Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#ifndef B_H_
+#define B_H_
+
+#include "a.h"
+
+int funcB();
+
+#endif // B_H_
diff --git a/third_party/python/gyp/test/hard_dependency/src/c.c b/third_party/python/gyp/test/hard_dependency/src/c.c
new file mode 100644
index 0000000000..7d0068208e
--- /dev/null
+++ b/third_party/python/gyp/test/hard_dependency/src/c.c
@@ -0,0 +1,10 @@
+/* Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#include "b.h"
+#include "c.h"
+
+int funcC() {
+ return funcB();
+}
diff --git a/third_party/python/gyp/test/hard_dependency/src/c.h b/third_party/python/gyp/test/hard_dependency/src/c.h
new file mode 100644
index 0000000000..f4ea7fefa2
--- /dev/null
+++ b/third_party/python/gyp/test/hard_dependency/src/c.h
@@ -0,0 +1,10 @@
+/* Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#ifndef C_H_
+#define C_H_
+
+int funcC();
+
+#endif // C_H_
diff --git a/third_party/python/gyp/test/hard_dependency/src/d.c b/third_party/python/gyp/test/hard_dependency/src/d.c
new file mode 100644
index 0000000000..d016c3ce71
--- /dev/null
+++ b/third_party/python/gyp/test/hard_dependency/src/d.c
@@ -0,0 +1,9 @@
+/* Copyright (c) 2009 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#include "c.h"
+
+int funcD() {
+ return funcC();
+}
diff --git a/third_party/python/gyp/test/hard_dependency/src/emit.py b/third_party/python/gyp/test/hard_dependency/src/emit.py
new file mode 100755
index 0000000000..8ed12f7393
--- /dev/null
+++ b/third_party/python/gyp/test/hard_dependency/src/emit.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+f = open(sys.argv[1], 'w')
+f.write('/* Hello World */\n')
+f.close()
diff --git a/third_party/python/gyp/test/hard_dependency/src/hard_dependency.gyp b/third_party/python/gyp/test/hard_dependency/src/hard_dependency.gyp
new file mode 100644
index 0000000000..4479c5f045
--- /dev/null
+++ b/third_party/python/gyp/test/hard_dependency/src/hard_dependency.gyp
@@ -0,0 +1,78 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'a',
+ 'type': 'static_library',
+ 'sources': [
+ 'a.c',
+ 'a.h',
+ ],
+ 'hard_dependency': 1,
+ 'actions': [
+ {
+ 'action_name': 'generate_headers',
+ 'inputs': [
+ 'emit.py'
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/generated.h'
+ ],
+ 'action': [
+ 'python',
+ 'emit.py',
+ '<(SHARED_INTERMEDIATE_DIR)/generated.h',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ 'include_dirs': [
+ '<(SHARED_INTERMEDIATE_DIR)',
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '<(SHARED_INTERMEDIATE_DIR)',
+ ],
+ },
+ },
+ {
+ 'target_name': 'b',
+ 'type': 'static_library',
+ 'sources': [
+ 'b.c',
+ 'b.h',
+ ],
+ 'dependencies': [
+ 'a',
+ ],
+ 'export_dependent_settings': [
+ 'a',
+ ],
+ },
+ {
+ 'target_name': 'c',
+ 'type': 'static_library',
+ 'sources': [
+ 'c.c',
+ 'c.h',
+ ],
+ 'dependencies': [
+ 'b',
+ ],
+ },
+ {
+ 'target_name': 'd',
+ 'type': 'static_library',
+ 'sources': [
+ 'd.c',
+ ],
+ 'dependencies': [
+ 'c',
+ ],
+ }
+ ],
+}
diff --git a/third_party/python/gyp/test/hello/gyptest-all.py b/third_party/python/gyp/test/hello/gyptest-all.py
new file mode 100755
index 0000000000..1739b6886e
--- /dev/null
+++ b/third_party/python/gyp/test/hello/gyptest-all.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies simplest-possible build of a "Hello, world!" program
+using an explicit build target of 'all'.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp(workdir='workarea_all')
+
+test.run_gyp('hello.gyp')
+
+test.build('hello.gyp', test.ALL)
+
+test.run_built_executable('hello', stdout="Hello, world!\n")
+
+test.up_to_date('hello.gyp', test.ALL)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/hello/gyptest-default.py b/third_party/python/gyp/test/hello/gyptest-default.py
new file mode 100755
index 0000000000..22377e7ac5
--- /dev/null
+++ b/third_party/python/gyp/test/hello/gyptest-default.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies simplest-possible build of a "Hello, world!" program
+using the default build target.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp(workdir='workarea_default')
+
+test.run_gyp('hello.gyp')
+
+test.build('hello.gyp')
+
+test.run_built_executable('hello', stdout="Hello, world!\n")
+
+test.up_to_date('hello.gyp', test.DEFAULT)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/hello/gyptest-disable-regyp.py b/third_party/python/gyp/test/hello/gyptest-disable-regyp.py
new file mode 100755
index 0000000000..1e4b306674
--- /dev/null
+++ b/third_party/python/gyp/test/hello/gyptest-disable-regyp.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that Makefiles don't get rebuilt when a source gyp file changes and
+the disable_regeneration generator flag is set.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('hello.gyp', '-Gauto_regeneration=0')
+
+test.build('hello.gyp', test.ALL)
+
+test.run_built_executable('hello', stdout="Hello, world!\n")
+
+# Sleep so that the changed gyp file will have a newer timestamp than the
+# previously generated build files.
+test.sleep()
+test.write('hello.gyp', test.read('hello2.gyp'))
+
+test.build('hello.gyp', test.ALL)
+
+# Should still be the old executable, as regeneration was disabled.
+test.run_built_executable('hello', stdout="Hello, world!\n")
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/hello/gyptest-regyp-output.py b/third_party/python/gyp/test/hello/gyptest-regyp-output.py
new file mode 100644
index 0000000000..fd88a85503
--- /dev/null
+++ b/third_party/python/gyp/test/hello/gyptest-regyp-output.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that Makefiles get rebuilt when a source gyp file changes and
+--generator-output is used.
+"""
+
+import TestGyp
+
+# Regenerating build files when a gyp file changes is currently only supported
+# by the make generator, and --generator-output is not supported by ninja, so we
+# can only test for make.
+test = TestGyp.TestGyp(formats=['make'])
+
+CHDIR='generator-output'
+
+test.run_gyp('hello.gyp', '--generator-output=%s' % CHDIR)
+
+test.build('hello.gyp', test.ALL, chdir=CHDIR)
+
+test.run_built_executable('hello', stdout="Hello, world!\n", chdir=CHDIR)
+
+# Sleep so that the changed gyp file will have a newer timestamp than the
+# previously generated build files.
+test.sleep()
+test.write('hello.gyp', test.read('hello2.gyp'))
+
+test.build('hello.gyp', test.ALL, chdir=CHDIR)
+
+test.run_built_executable('hello', stdout="Hello, two!\n", chdir=CHDIR)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/hello/gyptest-regyp.py b/third_party/python/gyp/test/hello/gyptest-regyp.py
new file mode 100755
index 0000000000..b513edcd07
--- /dev/null
+++ b/third_party/python/gyp/test/hello/gyptest-regyp.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that Makefiles get rebuilt when a source gyp file changes.
+"""
+
+import TestGyp
+
+# Regenerating build files when a gyp file changes is currently only supported
+# by the make generator.
+test = TestGyp.TestGyp(formats=['make'])
+
+test.run_gyp('hello.gyp')
+
+test.build('hello.gyp', test.ALL)
+
+test.run_built_executable('hello', stdout="Hello, world!\n")
+
+# Sleep so that the changed gyp file will have a newer timestamp than the
+# previously generated build files.
+test.sleep()
+test.write('hello.gyp', test.read('hello2.gyp'))
+
+test.build('hello.gyp', test.ALL)
+
+test.run_built_executable('hello', stdout="Hello, two!\n")
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/hello/gyptest-target.py b/third_party/python/gyp/test/hello/gyptest-target.py
new file mode 100755
index 0000000000..1abaf7057b
--- /dev/null
+++ b/third_party/python/gyp/test/hello/gyptest-target.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies simplest-possible build of a "Hello, world!" program
+using an explicit build target of 'hello'.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp(workdir='workarea_target')
+
+test.run_gyp('hello.gyp')
+
+test.build('hello.gyp', 'hello')
+
+test.run_built_executable('hello', stdout="Hello, world!\n")
+
+test.up_to_date('hello.gyp', 'hello')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/hello/hello.c b/third_party/python/gyp/test/hello/hello.c
new file mode 100644
index 0000000000..0a4c806019
--- /dev/null
+++ b/third_party/python/gyp/test/hello/hello.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2009 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#include <stdio.h>
+
+int main(void)
+{
+ printf("Hello, world!\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/hello/hello.gyp b/third_party/python/gyp/test/hello/hello.gyp
new file mode 100644
index 0000000000..1974d51ccd
--- /dev/null
+++ b/third_party/python/gyp/test/hello/hello.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'hello',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/hello/hello2.c b/third_party/python/gyp/test/hello/hello2.c
new file mode 100644
index 0000000000..b14299cae0
--- /dev/null
+++ b/third_party/python/gyp/test/hello/hello2.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2009 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#include <stdio.h>
+
+int main(void)
+{
+ printf("Hello, two!\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/hello/hello2.gyp b/third_party/python/gyp/test/hello/hello2.gyp
new file mode 100644
index 0000000000..25b08caf3c
--- /dev/null
+++ b/third_party/python/gyp/test/hello/hello2.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'hello',
+ 'type': 'executable',
+ 'sources': [
+ 'hello2.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/home_dot_gyp/gyptest-home-includes-config-arg.py b/third_party/python/gyp/test/home_dot_gyp/gyptest-home-includes-config-arg.py
new file mode 100755
index 0000000000..82e39f9d07
--- /dev/null
+++ b/third_party/python/gyp/test/home_dot_gyp/gyptest-home-includes-config-arg.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies inclusion of $HOME/.gyp/include.gypi works when --config-dir is
+specified.
+"""
+
+import os
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+os.environ['HOME'] = os.path.abspath('home2')
+
+test.run_gyp('all.gyp', '--config-dir=~/.gyp_new', chdir='src')
+
+# After relocating, we should still be able to build (build file shouldn't
+# contain relative reference to ~/.gyp/include.gypi)
+test.relocate('src', 'relocate/src')
+
+test.build('all.gyp', test.ALL, chdir='relocate/src')
+
+test.run_built_executable('printfoo',
+ chdir='relocate/src',
+ stdout='FOO is fromhome3\n')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/home_dot_gyp/gyptest-home-includes-config-env.py b/third_party/python/gyp/test/home_dot_gyp/gyptest-home-includes-config-env.py
new file mode 100755
index 0000000000..6f4b299ede
--- /dev/null
+++ b/third_party/python/gyp/test/home_dot_gyp/gyptest-home-includes-config-env.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies inclusion of $HOME/.gyp_new/include.gypi works when GYP_CONFIG_DIR
+is set.
+"""
+
+import os
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+os.environ['HOME'] = os.path.abspath('home')
+os.environ['GYP_CONFIG_DIR'] = os.path.join(os.path.abspath('home2'),
+ '.gyp_new')
+
+test.run_gyp('all.gyp', chdir='src')
+
+# After relocating, we should still be able to build (build file shouldn't
+# contain relative reference to ~/.gyp_new/include.gypi)
+test.relocate('src', 'relocate/src')
+
+test.build('all.gyp', test.ALL, chdir='relocate/src')
+
+test.run_built_executable('printfoo',
+ chdir='relocate/src',
+ stdout='FOO is fromhome3\n')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/home_dot_gyp/gyptest-home-includes-regyp.py b/third_party/python/gyp/test/home_dot_gyp/gyptest-home-includes-regyp.py
new file mode 100755
index 0000000000..fdf8b14464
--- /dev/null
+++ b/third_party/python/gyp/test/home_dot_gyp/gyptest-home-includes-regyp.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies inclusion of $HOME/.gyp/include.gypi works properly with relocation
+and with regeneration.
+"""
+
+import os
+import TestGyp
+
+# Regenerating build files when a gyp file changes is currently only supported
+# by the make generator.
+test = TestGyp.TestGyp(formats=['make'])
+
+os.environ['HOME'] = os.path.abspath('home')
+
+test.run_gyp('all.gyp', chdir='src')
+
+# After relocating, we should still be able to build (build file shouldn't
+# contain relative reference to ~/.gyp/include.gypi)
+test.relocate('src', 'relocate/src')
+
+test.build('all.gyp', test.ALL, chdir='relocate/src')
+
+test.run_built_executable('printfoo',
+ chdir='relocate/src',
+ stdout='FOO is fromhome\n')
+
+# Building should notice any changes to ~/.gyp/include.gypi and regyp.
+test.sleep()
+
+test.write('home/.gyp/include.gypi', test.read('home2/.gyp/include.gypi'))
+
+test.build('all.gyp', test.ALL, chdir='relocate/src')
+
+test.run_built_executable('printfoo',
+ chdir='relocate/src',
+ stdout='FOO is fromhome2\n')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/home_dot_gyp/gyptest-home-includes.py b/third_party/python/gyp/test/home_dot_gyp/gyptest-home-includes.py
new file mode 100755
index 0000000000..8ad52556be
--- /dev/null
+++ b/third_party/python/gyp/test/home_dot_gyp/gyptest-home-includes.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies inclusion of $HOME/.gyp/include.gypi works.
+"""
+
+import os
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+os.environ['HOME'] = os.path.abspath('home')
+
+test.run_gyp('all.gyp', chdir='src')
+
+# After relocating, we should still be able to build (build file shouldn't
+# contain relative reference to ~/.gyp/include.gypi)
+test.relocate('src', 'relocate/src')
+
+test.build('all.gyp', test.ALL, chdir='relocate/src')
+
+test.run_built_executable('printfoo',
+ chdir='relocate/src',
+ stdout='FOO is fromhome\n')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/home_dot_gyp/home/.gyp/include.gypi b/third_party/python/gyp/test/home_dot_gyp/home/.gyp/include.gypi
new file mode 100644
index 0000000000..fcfb39befd
--- /dev/null
+++ b/third_party/python/gyp/test/home_dot_gyp/home/.gyp/include.gypi
@@ -0,0 +1,5 @@
+{
+ 'variables': {
+ 'foo': '"fromhome"',
+ },
+}
diff --git a/third_party/python/gyp/test/home_dot_gyp/home2/.gyp/include.gypi b/third_party/python/gyp/test/home_dot_gyp/home2/.gyp/include.gypi
new file mode 100644
index 0000000000..f0d84b31ad
--- /dev/null
+++ b/third_party/python/gyp/test/home_dot_gyp/home2/.gyp/include.gypi
@@ -0,0 +1,5 @@
+{
+ 'variables': {
+ 'foo': '"fromhome2"',
+ },
+}
diff --git a/third_party/python/gyp/test/home_dot_gyp/home2/.gyp_new/include.gypi b/third_party/python/gyp/test/home_dot_gyp/home2/.gyp_new/include.gypi
new file mode 100644
index 0000000000..4094dfd2f8
--- /dev/null
+++ b/third_party/python/gyp/test/home_dot_gyp/home2/.gyp_new/include.gypi
@@ -0,0 +1,5 @@
+{
+ 'variables': {
+ 'foo': '"fromhome3"',
+ },
+}
diff --git a/third_party/python/gyp/test/home_dot_gyp/src/all.gyp b/third_party/python/gyp/test/home_dot_gyp/src/all.gyp
new file mode 100644
index 0000000000..14b6aea285
--- /dev/null
+++ b/third_party/python/gyp/test/home_dot_gyp/src/all.gyp
@@ -0,0 +1,22 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'foo%': '"fromdefault"',
+ },
+ 'targets': [
+ {
+ 'target_name': 'printfoo',
+ 'type': 'executable',
+ 'sources': [
+ 'printfoo.c',
+ ],
+ 'defines': [
+ 'FOO=<(foo)',
+ ],
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/home_dot_gyp/src/printfoo.c b/third_party/python/gyp/test/home_dot_gyp/src/printfoo.c
new file mode 100644
index 0000000000..9bb67181b9
--- /dev/null
+++ b/third_party/python/gyp/test/home_dot_gyp/src/printfoo.c
@@ -0,0 +1,7 @@
+#include <stdio.h>
+
+int main(void)
+{
+ printf("FOO is %s\n", FOO);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/include_dirs/gyptest-all.py b/third_party/python/gyp/test/include_dirs/gyptest-all.py
new file mode 100755
index 0000000000..d64bc6a9ca
--- /dev/null
+++ b/third_party/python/gyp/test/include_dirs/gyptest-all.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies use of include_dirs when using an explicit build target of 'all'.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('includes.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('includes.gyp', test.ALL, chdir='relocate/src')
+
+expect = """\
+Hello from includes.c
+Hello from inc.h
+Hello from include1.h
+Hello from subdir/inc2/include2.h
+Hello from shadow2/shadow.h
+"""
+test.run_built_executable('includes', stdout=expect, chdir='relocate/src')
+
+if test.format == 'xcode':
+ chdir='relocate/src/subdir'
+else:
+ chdir='relocate/src'
+
+expect = """\
+Hello from subdir/subdir_includes.c
+Hello from subdir/inc.h
+Hello from include1.h
+Hello from subdir/inc2/include2.h
+"""
+test.run_built_executable('subdir_includes', stdout=expect, chdir=chdir)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/include_dirs/gyptest-default.py b/third_party/python/gyp/test/include_dirs/gyptest-default.py
new file mode 100755
index 0000000000..fc6141587e
--- /dev/null
+++ b/third_party/python/gyp/test/include_dirs/gyptest-default.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies use of include_dirs when using the default build target.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('includes.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('includes.gyp', test.ALL, chdir='relocate/src')
+
+expect = """\
+Hello from includes.c
+Hello from inc.h
+Hello from include1.h
+Hello from subdir/inc2/include2.h
+Hello from shadow2/shadow.h
+"""
+test.run_built_executable('includes', stdout=expect, chdir='relocate/src')
+
+if test.format == 'xcode':
+ chdir='relocate/src/subdir'
+else:
+ chdir='relocate/src'
+
+expect = """\
+Hello from subdir/subdir_includes.c
+Hello from subdir/inc.h
+Hello from include1.h
+Hello from subdir/inc2/include2.h
+"""
+test.run_built_executable('subdir_includes', stdout=expect, chdir=chdir)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/include_dirs/src/inc.h b/third_party/python/gyp/test/include_dirs/src/inc.h
new file mode 100644
index 0000000000..0398d6915f
--- /dev/null
+++ b/third_party/python/gyp/test/include_dirs/src/inc.h
@@ -0,0 +1 @@
+#define INC_STRING "inc.h"
diff --git a/third_party/python/gyp/test/include_dirs/src/inc1/include1.h b/third_party/python/gyp/test/include_dirs/src/inc1/include1.h
new file mode 100644
index 0000000000..43356b5f47
--- /dev/null
+++ b/third_party/python/gyp/test/include_dirs/src/inc1/include1.h
@@ -0,0 +1 @@
+#define INCLUDE1_STRING "include1.h"
diff --git a/third_party/python/gyp/test/include_dirs/src/includes.c b/third_party/python/gyp/test/include_dirs/src/includes.c
new file mode 100644
index 0000000000..6e2a23cdff
--- /dev/null
+++ b/third_party/python/gyp/test/include_dirs/src/includes.c
@@ -0,0 +1,19 @@
+#include <stdio.h>
+
+#include "inc.h"
+#include "include1.h"
+#include "include2.h"
+#include "shadow.h"
+
+int main(void)
+{
+ printf("Hello from includes.c\n");
+ printf("Hello from %s\n", INC_STRING);
+ printf("Hello from %s\n", INCLUDE1_STRING);
+ printf("Hello from %s\n", INCLUDE2_STRING);
+ /* Test that include_dirs happen first: The gyp file has a -Ishadow1
+ cflag and an include_dir of shadow2. Including shadow.h should get
+ the shadow.h from the include_dir. */
+ printf("Hello from %s\n", SHADOW_STRING);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/include_dirs/src/includes.gyp b/third_party/python/gyp/test/include_dirs/src/includes.gyp
new file mode 100644
index 0000000000..3592690208
--- /dev/null
+++ b/third_party/python/gyp/test/include_dirs/src/includes.gyp
@@ -0,0 +1,27 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'includes',
+ 'type': 'executable',
+ 'dependencies': [
+ 'subdir/subdir_includes.gyp:subdir_includes',
+ ],
+ 'cflags': [
+ '-Ishadow1',
+ ],
+ 'include_dirs': [
+ '.',
+ 'inc1',
+ 'shadow2',
+ 'subdir/inc2',
+ ],
+ 'sources': [
+ 'includes.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/include_dirs/src/shadow1/shadow.h b/third_party/python/gyp/test/include_dirs/src/shadow1/shadow.h
new file mode 100644
index 0000000000..80f6de20b8
--- /dev/null
+++ b/third_party/python/gyp/test/include_dirs/src/shadow1/shadow.h
@@ -0,0 +1 @@
+#define SHADOW_STRING "shadow1/shadow.h"
diff --git a/third_party/python/gyp/test/include_dirs/src/shadow2/shadow.h b/third_party/python/gyp/test/include_dirs/src/shadow2/shadow.h
new file mode 100644
index 0000000000..fad5ccd085
--- /dev/null
+++ b/third_party/python/gyp/test/include_dirs/src/shadow2/shadow.h
@@ -0,0 +1 @@
+#define SHADOW_STRING "shadow2/shadow.h"
diff --git a/third_party/python/gyp/test/include_dirs/src/subdir/inc.h b/third_party/python/gyp/test/include_dirs/src/subdir/inc.h
new file mode 100644
index 0000000000..0a68d7b36a
--- /dev/null
+++ b/third_party/python/gyp/test/include_dirs/src/subdir/inc.h
@@ -0,0 +1 @@
+#define INC_STRING "subdir/inc.h"
diff --git a/third_party/python/gyp/test/include_dirs/src/subdir/inc2/include2.h b/third_party/python/gyp/test/include_dirs/src/subdir/inc2/include2.h
new file mode 100644
index 0000000000..721577effb
--- /dev/null
+++ b/third_party/python/gyp/test/include_dirs/src/subdir/inc2/include2.h
@@ -0,0 +1 @@
+#define INCLUDE2_STRING "subdir/inc2/include2.h"
diff --git a/third_party/python/gyp/test/include_dirs/src/subdir/subdir_includes.c b/third_party/python/gyp/test/include_dirs/src/subdir/subdir_includes.c
new file mode 100644
index 0000000000..4623543c43
--- /dev/null
+++ b/third_party/python/gyp/test/include_dirs/src/subdir/subdir_includes.c
@@ -0,0 +1,14 @@
+#include <stdio.h>
+
+#include "inc.h"
+#include "include1.h"
+#include "include2.h"
+
+int main(void)
+{
+ printf("Hello from subdir/subdir_includes.c\n");
+ printf("Hello from %s\n", INC_STRING);
+ printf("Hello from %s\n", INCLUDE1_STRING);
+ printf("Hello from %s\n", INCLUDE2_STRING);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/include_dirs/src/subdir/subdir_includes.gyp b/third_party/python/gyp/test/include_dirs/src/subdir/subdir_includes.gyp
new file mode 100644
index 0000000000..257d052c3c
--- /dev/null
+++ b/third_party/python/gyp/test/include_dirs/src/subdir/subdir_includes.gyp
@@ -0,0 +1,20 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'subdir_includes',
+ 'type': 'executable',
+ 'include_dirs': [
+ '.',
+ '../inc1',
+ 'inc2',
+ ],
+ 'sources': [
+ 'subdir_includes.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/intermediate_dir/gyptest-intermediate-dir.py b/third_party/python/gyp/test/intermediate_dir/gyptest-intermediate-dir.py
new file mode 100755
index 0000000000..bf4b91a2fc
--- /dev/null
+++ b/third_party/python/gyp/test/intermediate_dir/gyptest-intermediate-dir.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that targets have independent INTERMEDIATE_DIRs.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('test.gyp', chdir='src')
+
+test.build('test.gyp', 'target1', chdir='src')
+# Check stuff exists.
+intermediate_file1 = test.read('src/outfile.txt')
+test.must_contain(intermediate_file1, 'target1')
+
+shared_intermediate_file1 = test.read('src/shared_outfile.txt')
+test.must_contain(shared_intermediate_file1, 'shared_target1')
+
+test.run_gyp('test2.gyp', chdir='src')
+
+# Force the shared intermediate to be rebuilt.
+test.sleep()
+test.touch('src/shared_infile.txt')
+test.build('test2.gyp', 'target2', chdir='src')
+# Check INTERMEDIATE_DIR file didn't get overwritten but SHARED_INTERMEDIATE_DIR
+# file did.
+intermediate_file2 = test.read('src/outfile.txt')
+test.must_contain(intermediate_file1, 'target1')
+test.must_contain(intermediate_file2, 'target2')
+
+shared_intermediate_file2 = test.read('src/shared_outfile.txt')
+if shared_intermediate_file1 != shared_intermediate_file2:
+ test.fail_test(shared_intermediate_file1 + ' != ' + shared_intermediate_file2)
+
+test.must_contain(shared_intermediate_file1, 'shared_target2')
+test.must_contain(shared_intermediate_file2, 'shared_target2')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/intermediate_dir/src/script.py b/third_party/python/gyp/test/intermediate_dir/src/script.py
new file mode 100755
index 0000000000..2eb73ac206
--- /dev/null
+++ b/third_party/python/gyp/test/intermediate_dir/src/script.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Takes 3 arguments. Writes the 1st argument to the file in the 2nd argument,
+# and writes the absolute path to the file in the 2nd argument to the file in
+# the 3rd argument.
+
+import os
+import shlex
+import sys
+
+if len(sys.argv) == 3 and ' ' in sys.argv[2]:
+ sys.argv[2], fourth = shlex.split(sys.argv[2].replace('\\', '\\\\'))
+ sys.argv.append(fourth)
+
+with open(sys.argv[2], 'w') as f:
+ f.write(sys.argv[1])
+
+with open(sys.argv[3], 'w') as f:
+ f.write(os.path.abspath(sys.argv[2]))
diff --git a/third_party/python/gyp/test/intermediate_dir/src/shared_infile.txt b/third_party/python/gyp/test/intermediate_dir/src/shared_infile.txt
new file mode 100644
index 0000000000..e2aba15d04
--- /dev/null
+++ b/third_party/python/gyp/test/intermediate_dir/src/shared_infile.txt
@@ -0,0 +1 @@
+dummy input
diff --git a/third_party/python/gyp/test/intermediate_dir/src/test.gyp b/third_party/python/gyp/test/intermediate_dir/src/test.gyp
new file mode 100644
index 0000000000..b61e7e8ea5
--- /dev/null
+++ b/third_party/python/gyp/test/intermediate_dir/src/test.gyp
@@ -0,0 +1,42 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'target1',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'intermediate',
+ 'inputs': [],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/intermediate_out.txt',
+ 'outfile.txt',
+ ],
+ 'action': [
+ 'python', 'script.py', 'target1', '<(_outputs)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ {
+ 'action_name': 'shared_intermediate',
+ 'inputs': [
+ 'shared_infile.txt',
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/intermediate_out.txt',
+ 'shared_outfile.txt',
+ ],
+ 'action': [
+ 'python', 'script.py', 'shared_target1', '<(_outputs)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/intermediate_dir/src/test2.gyp b/third_party/python/gyp/test/intermediate_dir/src/test2.gyp
new file mode 100644
index 0000000000..41f5564663
--- /dev/null
+++ b/third_party/python/gyp/test/intermediate_dir/src/test2.gyp
@@ -0,0 +1,42 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'target2',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'intermediate',
+ 'inputs': [],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/intermediate_out.txt',
+ 'outfile.txt',
+ ],
+ 'action': [
+ 'python', 'script.py', 'target2', '<(_outputs)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ {
+ 'action_name': 'shared_intermediate',
+ 'inputs': [
+ 'shared_infile.txt',
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/intermediate_out.txt',
+ 'shared_outfile.txt',
+ ],
+ 'action': [
+ 'python', 'script.py', 'shared_target2', '<(_outputs)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/ios/app-bundle/TestApp/English.lproj/InfoPlist-error.strings b/third_party/python/gyp/test/ios/app-bundle/TestApp/English.lproj/InfoPlist-error.strings
new file mode 100644
index 0000000000..452e7fabf9
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/TestApp/English.lproj/InfoPlist-error.strings
@@ -0,0 +1,3 @@
+/* Localized versions of Info.plist keys */
+
+NSHumanReadableCopyright = "Copyright ©2011 Google Inc."
diff --git a/third_party/python/gyp/test/ios/app-bundle/TestApp/English.lproj/InfoPlist.strings b/third_party/python/gyp/test/ios/app-bundle/TestApp/English.lproj/InfoPlist.strings
new file mode 100644
index 0000000000..35bd33a96e
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/TestApp/English.lproj/InfoPlist.strings
@@ -0,0 +1,3 @@
+/* Localized versions of Info.plist keys */
+
+NSHumanReadableCopyright = "Copyright ©2011 Google Inc.";
diff --git a/third_party/python/gyp/test/ios/app-bundle/TestApp/English.lproj/LanguageMap.plist b/third_party/python/gyp/test/ios/app-bundle/TestApp/English.lproj/LanguageMap.plist
new file mode 100644
index 0000000000..6b94882328
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/TestApp/English.lproj/LanguageMap.plist
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>en</key>
+ <string>en</string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/ios/app-bundle/TestApp/English.lproj/MainMenu.xib b/third_party/python/gyp/test/ios/app-bundle/TestApp/English.lproj/MainMenu.xib
new file mode 100644
index 0000000000..21b60448ad
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/TestApp/English.lproj/MainMenu.xib
@@ -0,0 +1,17 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<document type="com.apple.InterfaceBuilder3.CocoaTouch.XIB" version="3.0" toolsVersion="9060" systemVersion="15B42" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" launchScreen="YES" useTraitCollections="YES">
+ <dependencies>
+ <plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="9051"/>
+ <capability name="Aspect ratio constraints" minToolsVersion="5.1"/>
+ <capability name="Constraints with non-1.0 multipliers" minToolsVersion="5.1"/>
+ </dependencies>
+ <objects>
+ <placeholder placeholderIdentifier="IBFilesOwner" id="-1" userLabel="File's Owner"/>
+ <placeholder placeholderIdentifier="IBFirstResponder" id="-2" customClass="UIResponder"/>
+ <viewController id="Ssz-5V-cv2">
+ <view key="view" contentMode="scaleToFill" id="tRS-Cx-RH3">
+ </view>
+ <point key="canvasLocation" x="548" y="1086"/>
+ </viewController>
+ </objects>
+</document>
diff --git a/third_party/python/gyp/test/ios/app-bundle/TestApp/English.lproj/Main_iPhone.storyboard b/third_party/python/gyp/test/ios/app-bundle/TestApp/English.lproj/Main_iPhone.storyboard
new file mode 100644
index 0000000000..723bc85122
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/TestApp/English.lproj/Main_iPhone.storyboard
@@ -0,0 +1,27 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<document type="com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB" version="1.0" toolsVersion="1906" systemVersion="11A511" targetRuntime="iOS.CocoaTouch" nextObjectID="6" propertyAccessControl="none" initialViewController="2">
+ <dependencies>
+ <development defaultVersion="4200" identifier="xcode"/>
+ <plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="902"/>
+ </dependencies>
+ <scenes>
+ <scene sceneID="5">
+ <objects>
+ <placeholder placeholderIdentifier="IBFirstResponder" id="4" sceneMemberID="firstResponder"/>
+ <viewController id="2" customClass="ViewController" sceneMemberID="viewController">
+ <view key="view" contentMode="scaleToFill" id="3">
+ <rect key="frame" x="0.0" y="20" width="320" height="460"/>
+ <autoresizingMask key="autoresizingMask" flexibleMaxX="YES" flexibleMaxY="YES"/>
+ <subviews/>
+ <color key="backgroundColor" white="1" alpha="1" colorSpace="custom" customColorSpace="calibratedWhite"/>
+ </view>
+ </viewController>
+ </objects>
+ </scene>
+ </scenes>
+ <simulatedMetricsContainer key="defaultSimulatedMetrics">
+ <simulatedStatusBarMetrics key="statusBar"/>
+ <simulatedOrientationMetrics key="orientation"/>
+ <simulatedScreenMetrics key="destination"/>
+ </simulatedMetricsContainer>
+</document>
diff --git a/third_party/python/gyp/test/ios/app-bundle/TestApp/Images.xcassets/AppIcon.appiconset/Contents.json b/third_party/python/gyp/test/ios/app-bundle/TestApp/Images.xcassets/AppIcon.appiconset/Contents.json
new file mode 100644
index 0000000000..2db2b1c7c6
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/TestApp/Images.xcassets/AppIcon.appiconset/Contents.json
@@ -0,0 +1,58 @@
+{
+ "images" : [
+ {
+ "idiom" : "mac",
+ "size" : "16x16",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "mac",
+ "size" : "16x16",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "mac",
+ "size" : "32x32",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "mac",
+ "size" : "32x32",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "mac",
+ "size" : "128x128",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "mac",
+ "size" : "128x128",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "mac",
+ "size" : "256x256",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "mac",
+ "size" : "256x256",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "mac",
+ "size" : "512x512",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "mac",
+ "size" : "512x512",
+ "scale" : "2x"
+ }
+ ],
+ "info" : {
+ "version" : 1,
+ "author" : "xcode"
+ }
+} \ No newline at end of file
diff --git a/third_party/python/gyp/test/ios/app-bundle/TestApp/Images.xcassets/image.imageset/Contents.json b/third_party/python/gyp/test/ios/app-bundle/TestApp/Images.xcassets/image.imageset/Contents.json
new file mode 100644
index 0000000000..0a87b6edc6
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/TestApp/Images.xcassets/image.imageset/Contents.json
@@ -0,0 +1,23 @@
+{
+ "images" : [
+ {
+ "idiom" : "universal",
+ "scale" : "1x",
+ "filename" : "super_sylvain.png"
+ },
+ {
+ "idiom" : "universal",
+ "scale" : "2x",
+ "filename" : "super_sylvain@2x.png"
+ },
+ {
+ "idiom" : "universal",
+ "scale" : "3x",
+ "filename" : "super_sylvain@3x.png"
+ }
+ ],
+ "info" : {
+ "version" : 1,
+ "author" : "xcode"
+ }
+} \ No newline at end of file
diff --git a/third_party/python/gyp/test/ios/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain.png b/third_party/python/gyp/test/ios/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain.png
new file mode 100644
index 0000000000..0ba769182f
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain.png
Binary files differ
diff --git a/third_party/python/gyp/test/ios/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain@2x.png b/third_party/python/gyp/test/ios/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain@2x.png
new file mode 100644
index 0000000000..edfa6a5682
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain@2x.png
Binary files differ
diff --git a/third_party/python/gyp/test/ios/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain@3x.png b/third_party/python/gyp/test/ios/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain@3x.png
new file mode 100644
index 0000000000..e0652efc72
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain@3x.png
Binary files differ
diff --git a/third_party/python/gyp/test/ios/app-bundle/TestApp/TestApp-Info.plist b/third_party/python/gyp/test/ios/app-bundle/TestApp/TestApp-Info.plist
new file mode 100644
index 0000000000..bb90043682
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/TestApp/TestApp-Info.plist
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIdentifier</key>
+ <string>com.google.${PRODUCT_NAME}</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>ause</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+ <key>NSMainNibFile</key>
+ <string>MainMenu</string>
+ <key>NSPrincipalClass</key>
+ <string>NSApplication</string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/ios/app-bundle/TestApp/check_no_signature.py b/third_party/python/gyp/test/ios/app-bundle/TestApp/check_no_signature.py
new file mode 100644
index 0000000000..4f6e340072
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/TestApp/check_no_signature.py
@@ -0,0 +1,13 @@
+#!/usr/bin/python
+
+import os
+import subprocess
+import sys
+
+p = os.path.join(os.environ['BUILT_PRODUCTS_DIR'],os.environ['EXECUTABLE_PATH'])
+proc = subprocess.Popen(['codesign', '-v', p],
+ stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
+o = proc.communicate()[0].strip()
+if "code object is not signed at all" not in o:
+ sys.stderr.write('File should not already be signed.')
+ sys.exit(1)
diff --git a/third_party/python/gyp/test/ios/app-bundle/TestApp/main.m b/third_party/python/gyp/test/ios/app-bundle/TestApp/main.m
new file mode 100644
index 0000000000..ec93e0e237
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/TestApp/main.m
@@ -0,0 +1,13 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <UIKit/UIKit.h>
+
+int main(int argc, char *argv[])
+{
+ NSAutoreleasePool* pool = [[NSAutoreleasePool alloc] init];
+ int retVal = UIApplicationMain(argc, argv, nil, nil);
+ [pool release];
+ return retVal;
+}
diff --git a/third_party/python/gyp/test/ios/app-bundle/TestApp/only-compile-in-32-bits.m b/third_party/python/gyp/test/ios/app-bundle/TestApp/only-compile-in-32-bits.m
new file mode 100644
index 0000000000..28bb117788
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/TestApp/only-compile-in-32-bits.m
@@ -0,0 +1,7 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if defined(__LP64__)
+# error 64-bit build
+#endif
diff --git a/third_party/python/gyp/test/ios/app-bundle/TestApp/only-compile-in-64-bits.m b/third_party/python/gyp/test/ios/app-bundle/TestApp/only-compile-in-64-bits.m
new file mode 100644
index 0000000000..e6d2558418
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/TestApp/only-compile-in-64-bits.m
@@ -0,0 +1,7 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if !defined(__LP64__)
+# error 32-bit build
+#endif
diff --git a/third_party/python/gyp/test/ios/app-bundle/test-archs.gyp b/third_party/python/gyp/test/ios/app-bundle/test-archs.gyp
new file mode 100644
index 0000000000..fa935c4fb4
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/test-archs.gyp
@@ -0,0 +1,109 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'make_global_settings': [
+ ['CC', '/usr/bin/clang'],
+ ],
+ 'target_defaults': {
+ 'mac_bundle_resources': [
+ 'TestApp/English.lproj/InfoPlist.strings',
+ 'TestApp/English.lproj/MainMenu.xib',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ '$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
+ ],
+ },
+ 'xcode_settings': {
+ 'OTHER_CFLAGS': [
+ '-fobjc-abi-version=2',
+ ],
+ 'CODE_SIGNING_REQUIRED': 'NO',
+ 'SDKROOT': 'iphoneos', # -isysroot
+ 'TARGETED_DEVICE_FAMILY': '1,2',
+ 'INFOPLIST_FILE': 'TestApp/TestApp-Info.plist',
+ 'IPHONEOS_DEPLOYMENT_TARGET': '7.0',
+ 'CONFIGURATION_BUILD_DIR':'build/Default',
+ },
+ },
+ 'targets': [
+ {
+ 'target_name': 'TestNoArchs',
+ 'product_name': 'TestNoArchs',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'TestApp/main.m',
+ 'TestApp/only-compile-in-32-bits.m',
+ ],
+ 'xcode_settings': {
+ 'VALID_ARCHS': [
+ 'i386',
+ 'x86_64',
+ 'arm64',
+ 'armv7',
+ ],
+ }
+ },
+ {
+ 'target_name': 'TestArch32Bits',
+ 'product_name': 'TestArch32Bits',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'TestApp/main.m',
+ 'TestApp/only-compile-in-32-bits.m',
+ ],
+ 'xcode_settings': {
+ 'ARCHS': [
+ '$(ARCHS_STANDARD)',
+ ],
+ 'VALID_ARCHS': [
+ 'i386',
+ 'armv7',
+ ],
+ },
+ },
+ {
+ 'target_name': 'TestArch64Bits',
+ 'product_name': 'TestArch64Bits',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'TestApp/main.m',
+ 'TestApp/only-compile-in-64-bits.m',
+ ],
+ 'xcode_settings': {
+ 'ARCHS': [
+ '$(ARCHS_STANDARD_INCLUDING_64_BIT)',
+ ],
+ 'VALID_ARCHS': [
+ 'x86_64',
+ 'arm64',
+ ],
+ },
+ },
+ {
+ 'target_name': 'TestMultiArchs',
+ 'product_name': 'TestMultiArchs',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'TestApp/main.m',
+ ],
+ 'xcode_settings': {
+ 'ARCHS': [
+ '$(ARCHS_STANDARD_INCLUDING_64_BIT)',
+ ],
+ 'VALID_ARCHS': [
+ 'x86_64',
+ 'i386',
+ 'arm64',
+ 'armv7',
+ ],
+ }
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/ios/app-bundle/test-assets-catalog.gyp b/third_party/python/gyp/test/ios/app-bundle/test-assets-catalog.gyp
new file mode 100644
index 0000000000..9a12d07af7
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/test-assets-catalog.gyp
@@ -0,0 +1,45 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'conditions': [
+ ['"<(GENERATOR)"=="ninja"', {
+ 'make_global_settings': [
+ ['CC', '/usr/bin/clang'],
+ ['CXX', '/usr/bin/clang++'],
+ ],
+ }],
+ ],
+ 'targets': [
+ {
+ 'target_name': 'test_app',
+ 'product_name': 'Test App Assets Catalog Gyp',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'TestApp/main.m',
+ ],
+ 'mac_bundle_resources': [
+ 'TestApp/English.lproj/InfoPlist.strings',
+ 'TestApp/English.lproj/MainMenu.xib',
+ 'TestApp/English.lproj/Main_iPhone.storyboard',
+ 'TestApp/Images.xcassets',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ '$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
+ ],
+ },
+ 'xcode_settings': {
+ 'OTHER_CFLAGS': [
+ '-fobjc-abi-version=2',
+ ],
+ 'INFOPLIST_FILE': 'TestApp/TestApp-Info.plist',
+ 'SDKROOT': 'iphonesimulator', # -isysroot
+ 'IPHONEOS_DEPLOYMENT_TARGET': '7.0',
+ 'CONFIGURATION_BUILD_DIR':'build/Default',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/ios/app-bundle/test-crosscompile.gyp b/third_party/python/gyp/test/ios/app-bundle/test-crosscompile.gyp
new file mode 100644
index 0000000000..d9049588ba
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/test-crosscompile.gyp
@@ -0,0 +1,47 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'make_global_settings': [
+ ['CC', '/usr/bin/clang'],
+ ],
+ 'targets': [
+ # This target will not be built, but is here so that ninja Xcode emulation
+ # understand this is a multi-platform (ios + mac) build.
+ {
+ 'target_name': 'TestDummy',
+ 'product_name': 'TestDummy',
+ 'toolsets': ['target'],
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'tool_main.cc',
+ ],
+ 'xcode_settings': {
+ 'SDKROOT': 'iphonesimulator', # -isysroot
+ 'TARGETED_DEVICE_FAMILY': '1,2',
+ 'IPHONEOS_DEPLOYMENT_TARGET': '7.0',
+ },
+ },
+ {
+ 'target_name': 'TestHost',
+ 'product_name': 'TestHost',
+ 'toolsets': ['host'],
+ 'type': 'executable',
+ 'mac_bundle': 0,
+ 'sources': [
+ 'tool_main.cc',
+ ],
+ 'xcode_settings': {
+ 'SDKROOT': 'macosx',
+ 'ARCHS': [
+ '$(ARCHS_STANDARD)',
+ 'x86_64',
+ ],
+ 'VALID_ARCHS': [
+ 'x86_64',
+ ],
+ }
+ }
+ ],
+}
diff --git a/third_party/python/gyp/test/ios/app-bundle/test-device.gyp b/third_party/python/gyp/test/ios/app-bundle/test-device.gyp
new file mode 100644
index 0000000000..a0cfff7cdb
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/test-device.gyp
@@ -0,0 +1,109 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'conditions': [
+ ['"<(GENERATOR)"=="xcode"', {
+ 'target_defaults': {
+ 'configurations': {
+ 'Default': {
+ 'xcode_settings': {
+ 'SDKROOT': 'iphonesimulator',
+ 'CONFIGURATION_BUILD_DIR':'build/Default',
+ }
+ },
+ 'Default-iphoneos': {
+ 'xcode_settings': {
+ 'SDKROOT': 'iphoneos',
+ 'CONFIGURATION_BUILD_DIR':'build/Default-iphoneos',
+ }
+ },
+ },
+ },
+ }, {
+ 'target_defaults': {
+ 'configurations': {
+ 'Default': {
+ 'xcode_settings': {
+ 'SDKROOT': 'iphonesimulator',
+ }
+ },
+ },
+ },
+ }],
+ ],
+ 'targets': [
+ {
+ 'target_name': 'test_app',
+ 'product_name': 'Test App Gyp',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'TestApp/main.m',
+ ],
+ 'mac_bundle_resources': [
+ 'TestApp/English.lproj/InfoPlist.strings',
+ 'TestApp/English.lproj/MainMenu.xib',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ '$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
+ ],
+ },
+ 'xcode_settings': {
+ 'OTHER_CFLAGS': [
+ '-fobjc-abi-version=2',
+ ],
+ 'SDKROOT': 'iphonesimulator', # -isysroot
+ 'TARGETED_DEVICE_FAMILY': '1,2',
+ 'INFOPLIST_OUTPUT_FORMAT':'xml',
+ 'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
+ 'INFOPLIST_FILE': 'TestApp/TestApp-Info.plist',
+ 'IPHONEOS_DEPLOYMENT_TARGET': '8.0',
+ 'CODE_SIGNING_REQUIRED': 'NO',
+ 'CODE_SIGN_IDENTITY[sdk=iphoneos*]': '',
+
+ },
+ },
+ {
+ 'target_name': 'sig_test',
+ 'product_name': 'sigtest',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'TestApp/main.m',
+ ],
+ 'mac_bundle_resources': [
+ 'TestApp/English.lproj/InfoPlist.strings',
+ 'TestApp/English.lproj/MainMenu.xib',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ '$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
+ ],
+ },
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'Verify no signature',
+ 'action': [
+ 'python',
+ 'TestApp/check_no_signature.py'
+ ],
+ },
+ ],
+ 'xcode_settings': {
+ 'OTHER_CFLAGS': [
+ '-fobjc-abi-version=2',
+ ],
+ 'SDKROOT': 'iphonesimulator', # -isysroot
+ 'CODE_SIGN_IDENTITY[sdk=iphoneos*]': 'iPhone Developer',
+ 'INFOPLIST_OUTPUT_FORMAT':'xml',
+ 'INFOPLIST_FILE': 'TestApp/TestApp-Info.plist',
+ 'IPHONEOS_DEPLOYMENT_TARGET': '8.0',
+ 'CONFIGURATION_BUILD_DIR':'buildsig/Default',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/ios/app-bundle/test.gyp b/third_party/python/gyp/test/ios/app-bundle/test.gyp
new file mode 100644
index 0000000000..544c589f60
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/test.gyp
@@ -0,0 +1,75 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'conditions': [
+ ['"<(GENERATOR)"=="ninja"', {
+ 'make_global_settings': [
+ ['CC', '/usr/bin/clang'],
+ ['CXX', '/usr/bin/clang++'],
+ ],
+ }],
+ ],
+ 'targets': [
+ {
+ 'target_name': 'test_app',
+ 'product_name': 'Test App Gyp',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'TestApp/main.m',
+ ],
+ 'mac_bundle_resources': [
+ 'TestApp/English.lproj/InfoPlist.strings',
+ 'TestApp/English.lproj/LanguageMap.plist',
+ 'TestApp/English.lproj/MainMenu.xib',
+ 'TestApp/English.lproj/Main_iPhone.storyboard',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ '$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
+ ],
+ },
+ 'xcode_settings': {
+ 'OTHER_CFLAGS': [
+ '-fobjc-abi-version=2',
+ ],
+ 'INFOPLIST_FILE': 'TestApp/TestApp-Info.plist',
+ 'SDKROOT': 'iphonesimulator', # -isysroot
+ 'IPHONEOS_DEPLOYMENT_TARGET': '5.0',
+ 'CONFIGURATION_BUILD_DIR':'build/Default',
+ },
+ },
+ {
+ 'target_name': 'test_app_xml',
+ 'product_name': 'Test App Gyp XML',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'TestApp/main.m',
+ ],
+ 'mac_bundle_resources': [
+ 'TestApp/English.lproj/InfoPlist.strings',
+ 'TestApp/English.lproj/MainMenu.xib',
+ 'TestApp/English.lproj/Main_iPhone.storyboard',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ '$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
+ ],
+ },
+ 'xcode_settings': {
+ 'OTHER_CFLAGS': [
+ '-fobjc-abi-version=2',
+ ],
+ 'INFOPLIST_FILE': 'TestApp/TestApp-Info.plist',
+ 'INFOPLIST_OUTPUT_FORMAT':'xml',
+ 'SDKROOT': 'iphonesimulator', # -isysroot
+ 'IPHONEOS_DEPLOYMENT_TARGET': '5.0',
+ 'CONFIGURATION_BUILD_DIR':'build/Default',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/ios/app-bundle/tool_main.cc b/third_party/python/gyp/test/ios/app-bundle/tool_main.cc
new file mode 100644
index 0000000000..9dc3c94f34
--- /dev/null
+++ b/third_party/python/gyp/test/ios/app-bundle/tool_main.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/ios/copies-with-xcode-envvars/Info.plist b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/Info.plist
new file mode 100644
index 0000000000..a0985c3e4d
--- /dev/null
+++ b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/Info.plist
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIdentifier</key>
+ <string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/ios/copies-with-xcode-envvars/copies-with-xcode-envvars.gyp b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/copies-with-xcode-envvars.gyp
new file mode 100644
index 0000000000..217dbb0479
--- /dev/null
+++ b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/copies-with-xcode-envvars.gyp
@@ -0,0 +1,97 @@
+# Copyright (c) 2016 Mark Callow. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# For testing use of the UI settings & environment variables
+# available in Xcode's PBXCopyFilesBuildPhase.
+{
+ 'targets': [
+ {
+ 'target_name': 'copies-with-xcode-envvars',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'xcode_settings': {
+ 'SDKROOT': 'iphoneos',
+ 'TARGETED_DEVICE_FAMILY': '1,2',
+ 'IPHONEOS_DEPLOYMENT_TARGET': '7.0',
+ 'CODE_SIGNING_REQUIRED': 'NO',
+ 'INFOPLIST_FILE': 'Info.plist',
+ # This is where the test framework looks for results. Without
+ # this line the result will be in build/Default-iphoneos.
+ 'CONFIGURATION_BUILD_DIR':'build/Default',
+ },
+ 'sources': [ 'empty.c' ],
+ 'conditions': [
+ ['OS == "ios" or OS == "mac"', {
+ 'copies': [{
+ 'destination': '$(BUILT_PRODUCTS_DIR)',
+ 'files': [
+ 'file0',
+ ],
+ }, {
+ 'destination': '$(BUILT_PRODUCTS_DIR)/$(WRAPPER_NAME)',
+ 'files': [
+ 'file1',
+ ],
+ }, {
+ 'destination': '<(PRODUCT_DIR)/$(EXECUTABLE_FOLDER_PATH)',
+ 'files': [
+ 'file2',
+ ],
+ }, {
+ 'destination': '<(PRODUCT_DIR)/$(UNLOCALIZED_RESOURCES_FOLDER_PATH)',
+ 'files': [
+ 'file3',
+ ],
+ }, {
+ 'destination': '<(PRODUCT_DIR)/$(UNLOCALIZED_RESOURCES_FOLDER_PATH)/testimages',
+ 'files': [
+ 'file4',
+ ],
+ }, {
+ 'destination': '$(BUILT_PRODUCTS_DIR)/$(JAVA_FOLDER_PATH)',
+ 'files': [
+ 'file5',
+ ],
+ }, {
+ 'destination': '<(PRODUCT_DIR)/$(FRAMEWORKS_FOLDER_PATH)',
+ 'files': [
+ 'file6',
+ ],
+ }, {
+ # NOTE: This is not an Xcode macro name but
+ # xcodeproj_file.py recognizes it and sends
+ # the output to the same place as
+ # $(FRAMEWORKS_FOLDER_PATH). xcode_emulation.py
+ # sets its value to an absolute path.
+ 'destination': '$(BUILT_FRAMEWORKS_DIR)',
+ 'files': [
+ 'file7',
+ ],
+ }, {
+ 'destination': '<(PRODUCT_DIR)/$(SHARED_FRAMEWORKS_FOLDER_PATH)',
+ 'files': [
+ 'file8',
+ ],
+ }, {
+ 'destination': '<(PRODUCT_DIR)/$(SHARED_SUPPORT_FOLDER_PATH)',
+ 'files': [
+ 'file9',
+ ],
+ }, {
+ 'destination': '<(PRODUCT_DIR)/$(PLUGINS_FOLDER_PATH)',
+ 'files': [
+ 'file10',
+ ],
+ }, {
+ 'destination': '<(PRODUCT_DIR)/$(XPCSERVICES_FOLDER_PATH)',
+ 'files': [
+ 'file11',
+ ],
+ }], # copies
+ }], # OS == "ios" or OS == "mac"
+ ], # conditions
+ }], # targets
+}
+
+# vim:ai:ts=4:sts=4:sw=2:expandtab:textwidth=70
diff --git a/third_party/python/gyp/test/ios/copies-with-xcode-envvars/empty.c b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/empty.c
new file mode 100644
index 0000000000..237c8ce181
--- /dev/null
+++ b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/empty.c
@@ -0,0 +1 @@
+int main() {}
diff --git a/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file0 b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file0
new file mode 100644
index 0000000000..117889361f
--- /dev/null
+++ b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file0
@@ -0,0 +1 @@
+file0 contents
diff --git a/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file1 b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file1
new file mode 100644
index 0000000000..84d55c5759
--- /dev/null
+++ b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file1
@@ -0,0 +1 @@
+file1 contents
diff --git a/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file10 b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file10
new file mode 100644
index 0000000000..372e992ef9
--- /dev/null
+++ b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file10
@@ -0,0 +1 @@
+file10 contents
diff --git a/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file11 b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file11
new file mode 100644
index 0000000000..923e760e1f
--- /dev/null
+++ b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file11
@@ -0,0 +1 @@
+file11 contents
diff --git a/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file2 b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file2
new file mode 100644
index 0000000000..af1b8ae35d
--- /dev/null
+++ b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file2
@@ -0,0 +1 @@
+file2 contents
diff --git a/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file3 b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file3
new file mode 100644
index 0000000000..43f16f3522
--- /dev/null
+++ b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file3
@@ -0,0 +1 @@
+file3 contents
diff --git a/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file4 b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file4
new file mode 100644
index 0000000000..5f7270a084
--- /dev/null
+++ b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file4
@@ -0,0 +1 @@
+file4 contents
diff --git a/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file5 b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file5
new file mode 100644
index 0000000000..41f47186bd
--- /dev/null
+++ b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file5
@@ -0,0 +1 @@
+file5 contents
diff --git a/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file6 b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file6
new file mode 100644
index 0000000000..f5d5757348
--- /dev/null
+++ b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file6
@@ -0,0 +1 @@
+file6 contents
diff --git a/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file7 b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file7
new file mode 100644
index 0000000000..90dbe6e9e1
--- /dev/null
+++ b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file7
@@ -0,0 +1 @@
+file7 contents
diff --git a/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file8 b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file8
new file mode 100644
index 0000000000..9eb613fabb
--- /dev/null
+++ b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file8
@@ -0,0 +1 @@
+file8 contents
diff --git a/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file9 b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file9
new file mode 100644
index 0000000000..e37ac72ada
--- /dev/null
+++ b/third_party/python/gyp/test/ios/copies-with-xcode-envvars/file9
@@ -0,0 +1 @@
+file9 contents
diff --git a/third_party/python/gyp/test/ios/deployment-target/check-version-min.c b/third_party/python/gyp/test/ios/deployment-target/check-version-min.c
new file mode 100644
index 0000000000..761c529085
--- /dev/null
+++ b/third_party/python/gyp/test/ios/deployment-target/check-version-min.c
@@ -0,0 +1,33 @@
+/* Copyright (c) 2013 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#include <Availability.h>
+
+/* GYPTEST_MAC_VERSION_MIN: should be set to the corresponding value of
+ * xcode setting 'MACOSX_DEPLOYMENT_TARGET', otherwise both should be
+ * left undefined.
+ *
+ * GYPTEST_IOS_VERSION_MIN: should be set to the corresponding value of
+ * xcode setting 'IPHONEOS_DEPLOYMENT_TARGET', otherwise both should be
+ * left undefined.
+ */
+
+#if defined(GYPTEST_MAC_VERSION_MIN)
+# if GYPTEST_MAC_VERSION_MIN != __MAC_OS_X_VERSION_MIN_REQUIRED
+# error __MAC_OS_X_VERSION_MIN_REQUIRED has wrong value
+# endif
+#elif defined(__MAC_OS_X_VERSION_MIN_REQUIRED)
+# error __MAC_OS_X_VERSION_MIN_REQUIRED should be undefined
+#endif
+
+#if defined(GYPTEST_IOS_VERSION_MIN)
+# if GYPTEST_IOS_VERSION_MIN != __IPHONE_OS_VERSION_MIN_REQUIRED
+# error __IPHONE_OS_VERSION_MIN_REQUIRED has wrong value
+# endif
+#elif defined(__IPHONE_OS_VERSION_MIN_REQUIRED)
+# error __IPHONE_OS_VERSION_MIN_REQUIRED should be undefined
+#endif
+
+int main() { return 0; }
+
diff --git a/third_party/python/gyp/test/ios/deployment-target/deployment-target.gyp b/third_party/python/gyp/test/ios/deployment-target/deployment-target.gyp
new file mode 100644
index 0000000000..bdc1439b5e
--- /dev/null
+++ b/third_party/python/gyp/test/ios/deployment-target/deployment-target.gyp
@@ -0,0 +1,34 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'make_global_settings': [
+ ['CC', '/usr/bin/clang'],
+ ['CXX', '/usr/bin/clang++'],
+ ],
+ 'targets': [
+ {
+ 'target_name': 'version-min-4.3',
+ 'type': 'static_library',
+ 'sources': [ 'check-version-min.c', ],
+ 'defines': [ 'GYPTEST_IOS_VERSION_MIN=40300', ],
+ 'xcode_settings': {
+ 'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
+ 'SDKROOT': 'iphoneos',
+ 'IPHONEOS_DEPLOYMENT_TARGET': '4.3',
+ },
+ },
+ {
+ 'target_name': 'version-min-5.0',
+ 'type': 'static_library',
+ 'sources': [ 'check-version-min.c', ],
+ 'defines': [ 'GYPTEST_IOS_VERSION_MIN=50000', ],
+ 'xcode_settings': {
+ 'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
+ 'SDKROOT': 'iphoneos',
+ 'IPHONEOS_DEPLOYMENT_TARGET': '5.0',
+ },
+ }
+ ],
+}
+
diff --git a/third_party/python/gyp/test/ios/extension/ActionExtension/ActionViewController.h b/third_party/python/gyp/test/ios/extension/ActionExtension/ActionViewController.h
new file mode 100644
index 0000000000..1c92509029
--- /dev/null
+++ b/third_party/python/gyp/test/ios/extension/ActionExtension/ActionViewController.h
@@ -0,0 +1,9 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <UIKit/UIKit.h>
+
+@interface ActionViewController : UIViewController
+
+@end
diff --git a/third_party/python/gyp/test/ios/extension/ActionExtension/ActionViewController.m b/third_party/python/gyp/test/ios/extension/ActionExtension/ActionViewController.m
new file mode 100644
index 0000000000..d37bacdae1
--- /dev/null
+++ b/third_party/python/gyp/test/ios/extension/ActionExtension/ActionViewController.m
@@ -0,0 +1,31 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "ActionViewController.h"
+#import <MobileCoreServices/MobileCoreServices.h>
+
+@interface ActionViewController ()
+
+@end
+
+@implementation ActionViewController
+
+- (void)viewDidLoad {
+ [super viewDidLoad];
+}
+
+- (void)didReceiveMemoryWarning {
+ [super didReceiveMemoryWarning];
+ // Dispose of any resources that can be recreated.
+}
+
+- (IBAction)done {
+ // Return any edited content to the host app.
+ // This template doesn't do anything, so we just echo the passed in items.
+ [self.extensionContext
+ completeRequestReturningItems:self.extensionContext.inputItems
+ completionHandler:nil];
+}
+
+@end
diff --git a/third_party/python/gyp/test/ios/extension/ActionExtension/Info.plist b/third_party/python/gyp/test/ios/extension/ActionExtension/Info.plist
new file mode 100644
index 0000000000..f89cd790bc
--- /dev/null
+++ b/third_party/python/gyp/test/ios/extension/ActionExtension/Info.plist
@@ -0,0 +1,42 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>en</string>
+ <key>CFBundleDisplayName</key>
+ <string>ActionExtension</string>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIdentifier</key>
+ <string>com.google.gyptest.extension.ActionExtension</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>XPC!</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+ <key>NSExtension</key>
+ <dict>
+ <key>NSExtensionAttributes</key>
+ <dict>
+ <key>NSExtensionActivationRule</key>
+ <string>TRUEPREDICATE</string>
+ <key>NSExtensionPointName</key>
+ <string>com.apple.ui-services</string>
+ <key>NSExtensionPointVersion</key>
+ <string>1.0</string>
+ </dict>
+ <key>NSExtensionMainStoryboard</key>
+ <string>MainInterface</string>
+ <key>NSExtensionPointIdentifier</key>
+ <string>com.apple.ui-services</string>
+ </dict>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/ios/extension/ActionExtension/MainInterface.storyboard b/third_party/python/gyp/test/ios/extension/ActionExtension/MainInterface.storyboard
new file mode 100644
index 0000000000..5aa58184e8
--- /dev/null
+++ b/third_party/python/gyp/test/ios/extension/ActionExtension/MainInterface.storyboard
@@ -0,0 +1,63 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<document type="com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB" version="3.0" toolsVersion="6148" systemVersion="14A229a" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" initialViewController="ObA-dk-sSI">
+ <dependencies>
+ <plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="6147"/>
+ </dependencies>
+ <scenes>
+ <!--Action View Controller - Image-->
+ <scene sceneID="7MM-of-jgj">
+ <objects>
+ <viewController title="Image" id="ObA-dk-sSI" customClass="ActionViewController" customModuleProvider="" sceneMemberID="viewController">
+ <layoutGuides>
+ <viewControllerLayoutGuide type="top" id="qkL-Od-lgU"/>
+ <viewControllerLayoutGuide type="bottom" id="n38-gi-rB5"/>
+ </layoutGuides>
+ <view key="view" contentMode="scaleToFill" id="zMn-AG-sqS">
+ <rect key="frame" x="0.0" y="0.0" width="320" height="528"/>
+ <autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
+ <subviews>
+ <imageView userInteractionEnabled="NO" contentMode="scaleAspectFit" horizontalHuggingPriority="251" verticalHuggingPriority="251" translatesAutoresizingMaskIntoConstraints="NO" id="9ga-4F-77Z">
+ <rect key="frame" x="0.0" y="64" width="320" height="464"/>
+ </imageView>
+ <navigationBar contentMode="scaleToFill" translatesAutoresizingMaskIntoConstraints="NO" id="NOA-Dm-cuz">
+ <rect key="frame" x="0.0" y="20" width="320" height="44"/>
+ <items>
+ <navigationItem id="3HJ-uW-3hn">
+ <barButtonItem key="leftBarButtonItem" title="Done" style="done" id="WYi-yp-eM6">
+ <connections>
+ <action selector="done" destination="ObA-dk-sSI" id="Qdu-qn-U6V"/>
+ </connections>
+ </barButtonItem>
+ </navigationItem>
+ </items>
+ </navigationBar>
+ </subviews>
+ <color key="backgroundColor" white="1" alpha="1" colorSpace="calibratedWhite"/>
+ <constraints>
+ <constraint firstAttribute="trailing" secondItem="NOA-Dm-cuz" secondAttribute="trailing" id="A05-Pj-hrr"/>
+ <constraint firstItem="9ga-4F-77Z" firstAttribute="top" secondItem="NOA-Dm-cuz" secondAttribute="bottom" id="Fps-3D-QQW"/>
+ <constraint firstItem="NOA-Dm-cuz" firstAttribute="leading" secondItem="zMn-AG-sqS" secondAttribute="leading" id="HxO-8t-aoh"/>
+ <constraint firstAttribute="trailing" secondItem="9ga-4F-77Z" secondAttribute="trailing" id="Ozw-Hg-0yh"/>
+ <constraint firstItem="9ga-4F-77Z" firstAttribute="leading" secondItem="zMn-AG-sqS" secondAttribute="leading" id="XH5-ld-ONA"/>
+ <constraint firstItem="n38-gi-rB5" firstAttribute="top" secondItem="9ga-4F-77Z" secondAttribute="bottom" id="eQg-nn-Zy4"/>
+ <constraint firstItem="NOA-Dm-cuz" firstAttribute="top" secondItem="qkL-Od-lgU" secondAttribute="bottom" id="we0-1t-bgp"/>
+ </constraints>
+ </view>
+ <freeformSimulatedSizeMetrics key="simulatedDestinationMetrics"/>
+ <size key="freeformSize" width="320" height="528"/>
+ <connections>
+ <outlet property="imageView" destination="9ga-4F-77Z" id="5y6-5w-9QO"/>
+ <outlet property="view" destination="zMn-AG-sqS" id="Qma-de-2ek"/>
+ </connections>
+ </viewController>
+ <placeholder placeholderIdentifier="IBFirstResponder" id="X47-rx-isc" userLabel="First Responder" sceneMemberID="firstResponder"/>
+ </objects>
+ <point key="canvasLocation" x="252" y="-124"/>
+ </scene>
+ </scenes>
+ <simulatedMetricsContainer key="defaultSimulatedMetrics">
+ <simulatedStatusBarMetrics key="statusBar"/>
+ <simulatedOrientationMetrics key="orientation"/>
+ <simulatedScreenMetrics key="destination" type="retina4"/>
+ </simulatedMetricsContainer>
+</document>
diff --git a/third_party/python/gyp/test/ios/extension/ExtensionContainer/AppDelegate.h b/third_party/python/gyp/test/ios/extension/ExtensionContainer/AppDelegate.h
new file mode 100644
index 0000000000..510e2300b1
--- /dev/null
+++ b/third_party/python/gyp/test/ios/extension/ExtensionContainer/AppDelegate.h
@@ -0,0 +1,12 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <UIKit/UIKit.h>
+
+@interface AppDelegate : UIResponder <UIApplicationDelegate>
+
+@property (strong, nonatomic) UIWindow *window;
+
+@end
+
diff --git a/third_party/python/gyp/test/ios/extension/ExtensionContainer/AppDelegate.m b/third_party/python/gyp/test/ios/extension/ExtensionContainer/AppDelegate.m
new file mode 100644
index 0000000000..1197bc1bbc
--- /dev/null
+++ b/third_party/python/gyp/test/ios/extension/ExtensionContainer/AppDelegate.m
@@ -0,0 +1,19 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "AppDelegate.h"
+
+@interface AppDelegate ()
+
+@end
+
+@implementation AppDelegate
+
+- (BOOL)application:(UIApplication*)application
+ didFinishLaunchingWithOptions:(NSDictionary*)launchOptions {
+ // Override point for customization after application launch.
+ return YES;
+}
+
+@end
diff --git a/third_party/python/gyp/test/ios/extension/ExtensionContainer/Base.lproj/Main.storyboard b/third_party/python/gyp/test/ios/extension/ExtensionContainer/Base.lproj/Main.storyboard
new file mode 100644
index 0000000000..e8f3cfb40c
--- /dev/null
+++ b/third_party/python/gyp/test/ios/extension/ExtensionContainer/Base.lproj/Main.storyboard
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<document type="com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB" version="3.0" toolsVersion="6162" systemVersion="14A238h" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" useTraitCollections="YES" initialViewController="BYZ-38-t0r">
+ <dependencies>
+ <plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="6160"/>
+ </dependencies>
+ <scenes>
+ <!--View Controller-->
+ <scene sceneID="tne-QT-ifu">
+ <objects>
+ <viewController id="BYZ-38-t0r" customClass="ViewController" customModuleProvider="" sceneMemberID="viewController">
+ <layoutGuides>
+ <viewControllerLayoutGuide type="top" id="y3c-jy-aDJ"/>
+ <viewControllerLayoutGuide type="bottom" id="wfy-db-euE"/>
+ </layoutGuides>
+ <view key="view" contentMode="scaleToFill" id="8bC-Xf-vdC">
+ <rect key="frame" x="0.0" y="0.0" width="480" height="480"/>
+ <autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
+ <color key="backgroundColor" white="1" alpha="1" colorSpace="custom" customColorSpace="calibratedWhite"/>
+ </view>
+ </viewController>
+ <placeholder placeholderIdentifier="IBFirstResponder" id="dkx-z0-nzr" sceneMemberID="firstResponder"/>
+ </objects>
+ </scene>
+ </scenes>
+</document>
diff --git a/third_party/python/gyp/test/ios/extension/ExtensionContainer/Images.xcassets/AppIcon.appiconset/Contents.json b/third_party/python/gyp/test/ios/extension/ExtensionContainer/Images.xcassets/AppIcon.appiconset/Contents.json
new file mode 100644
index 0000000000..f697f61f4a
--- /dev/null
+++ b/third_party/python/gyp/test/ios/extension/ExtensionContainer/Images.xcassets/AppIcon.appiconset/Contents.json
@@ -0,0 +1,53 @@
+{
+ "images" : [
+ {
+ "idiom" : "iphone",
+ "size" : "29x29",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "iphone",
+ "size" : "40x40",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "iphone",
+ "size" : "60x60",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "29x29",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "29x29",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "40x40",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "40x40",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "76x76",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "76x76",
+ "scale" : "2x"
+ }
+ ],
+ "info" : {
+ "version" : 1,
+ "author" : "xcode"
+ }
+}
diff --git a/third_party/python/gyp/test/ios/extension/ExtensionContainer/Images.xcassets/LaunchImage.launchimage/Contents.json b/third_party/python/gyp/test/ios/extension/ExtensionContainer/Images.xcassets/LaunchImage.launchimage/Contents.json
new file mode 100644
index 0000000000..4458b40c05
--- /dev/null
+++ b/third_party/python/gyp/test/ios/extension/ExtensionContainer/Images.xcassets/LaunchImage.launchimage/Contents.json
@@ -0,0 +1,51 @@
+{
+ "images" : [
+ {
+ "orientation" : "portrait",
+ "idiom" : "iphone",
+ "extent" : "full-screen",
+ "minimum-system-version" : "7.0",
+ "scale" : "2x"
+ },
+ {
+ "orientation" : "portrait",
+ "idiom" : "iphone",
+ "subtype" : "retina4",
+ "extent" : "full-screen",
+ "minimum-system-version" : "7.0",
+ "scale" : "2x"
+ },
+ {
+ "orientation" : "portrait",
+ "idiom" : "ipad",
+ "extent" : "full-screen",
+ "minimum-system-version" : "7.0",
+ "scale" : "1x"
+ },
+ {
+ "orientation" : "landscape",
+ "idiom" : "ipad",
+ "extent" : "full-screen",
+ "minimum-system-version" : "7.0",
+ "scale" : "1x"
+ },
+ {
+ "orientation" : "portrait",
+ "idiom" : "ipad",
+ "extent" : "full-screen",
+ "minimum-system-version" : "7.0",
+ "scale" : "2x"
+ },
+ {
+ "orientation" : "landscape",
+ "idiom" : "ipad",
+ "extent" : "full-screen",
+ "minimum-system-version" : "7.0",
+ "scale" : "2x"
+ }
+ ],
+ "info" : {
+ "version" : 1,
+ "author" : "xcode"
+ }
+}
diff --git a/third_party/python/gyp/test/ios/extension/ExtensionContainer/Info.plist b/third_party/python/gyp/test/ios/extension/ExtensionContainer/Info.plist
new file mode 100644
index 0000000000..31ccf4cc82
--- /dev/null
+++ b/third_party/python/gyp/test/ios/extension/ExtensionContainer/Info.plist
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>en</string>
+ <key>CFBundleExecutable</key>
+ <string>ExtensionContainer</string>
+ <key>CFBundleIdentifier</key>
+ <string>com.google.gyptest.extension</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+ <key>LSRequiresIPhoneOS</key>
+ <true/>
+ <key>UIMainStoryboardFile</key>
+ <string>Main</string>
+ <key>UIRequiredDeviceCapabilities</key>
+ <array>
+ <string>armv7</string>
+ </array>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/ios/extension/ExtensionContainer/ViewController.h b/third_party/python/gyp/test/ios/extension/ExtensionContainer/ViewController.h
new file mode 100644
index 0000000000..fad7754714
--- /dev/null
+++ b/third_party/python/gyp/test/ios/extension/ExtensionContainer/ViewController.h
@@ -0,0 +1,11 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <UIKit/UIKit.h>
+
+@interface ViewController : UIViewController
+
+
+@end
+
diff --git a/third_party/python/gyp/test/ios/extension/ExtensionContainer/ViewController.m b/third_party/python/gyp/test/ios/extension/ExtensionContainer/ViewController.m
new file mode 100644
index 0000000000..3810fa9cba
--- /dev/null
+++ b/third_party/python/gyp/test/ios/extension/ExtensionContainer/ViewController.m
@@ -0,0 +1,24 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "ViewController.h"
+
+@interface ViewController ()
+
+
+@end
+
+@implementation ViewController
+
+- (void)viewDidLoad {
+ [super viewDidLoad];
+ // Do any additional setup after loading the view, typically from a nib.
+}
+
+- (void)didReceiveMemoryWarning {
+ [super didReceiveMemoryWarning];
+ // Dispose of any resources that can be recreated.
+}
+
+@end
diff --git a/third_party/python/gyp/test/ios/extension/ExtensionContainer/main.m b/third_party/python/gyp/test/ios/extension/ExtensionContainer/main.m
new file mode 100644
index 0000000000..47aecb5148
--- /dev/null
+++ b/third_party/python/gyp/test/ios/extension/ExtensionContainer/main.m
@@ -0,0 +1,13 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+#import <UIKit/UIKit.h>
+#import "AppDelegate.h"
+
+int main(int argc, char* argv[]) {
+ @autoreleasepool {
+ return UIApplicationMain(argc, argv, nil,
+ NSStringFromClass([AppDelegate class]));
+ }
+}
diff --git a/third_party/python/gyp/test/ios/extension/extension.gyp b/third_party/python/gyp/test/ios/extension/extension.gyp
new file mode 100644
index 0000000000..91c068413d
--- /dev/null
+++ b/third_party/python/gyp/test/ios/extension/extension.gyp
@@ -0,0 +1,91 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'make_global_settings': [
+ ['CC', '/usr/bin/clang'],
+ ['CXX', '/usr/bin/clang++'],
+ ],
+ 'targets': [
+ {
+ 'target_name': 'ExtensionContainer',
+ 'product_name': 'ExtensionContainer',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'mac_bundle_resources': [
+ 'ExtensionContainer/Base.lproj/Main.storyboard',
+ ],
+ 'sources': [
+ 'ExtensionContainer/AppDelegate.h',
+ 'ExtensionContainer/AppDelegate.m',
+ 'ExtensionContainer/ViewController.h',
+ 'ExtensionContainer/ViewController.m',
+ 'ExtensionContainer/main.m',
+ ],
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/ExtensionContainer.app/PlugIns',
+ 'files': [
+ '<(PRODUCT_DIR)/ActionExtension.appex',
+ ]}],
+ 'dependencies': [
+ 'ActionExtension'
+ ],
+
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ '$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
+ ],
+ },
+ 'xcode_settings': {
+ 'OTHER_CFLAGS': [
+ '-fobjc-abi-version=2',
+ ],
+ 'INFOPLIST_FILE': 'ExtensionContainer/Info.plist',
+ 'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
+ 'ARCHS': [ 'armv7' ],
+ 'SDKROOT': 'iphoneos',
+ 'IPHONEOS_DEPLOYMENT_TARGET': '7.0',
+ 'CODE_SIGNING_REQUIRED': 'NO',
+ 'DEPLOYMENT_POSTPROCESSING': 'YES',
+ 'STRIP_INSTALLED_PRODUCT': 'YES',
+ 'CONFIGURATION_BUILD_DIR':'build/Default',
+ },
+ },
+ {
+ 'target_name': 'ActionExtension',
+ 'product_name': 'ActionExtension',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'ios_app_extension': 1,
+ 'sources': [
+ 'ActionExtension/ActionViewController.h',
+ 'ActionExtension/ActionViewController.m',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ '$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
+ '$(SDKROOT)/System/Library/Frameworks/MobileCoreServices.framework',
+ ],
+ },
+ 'xcode_settings': {
+ 'OTHER_CFLAGS': [
+ '-fobjc-abi-version=2',
+ ],
+ 'INFOPLIST_FILE': 'ActionExtension/Info.plist',
+ 'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
+ 'ARCHS': [ 'armv7' ],
+ 'SDKROOT': 'iphoneos',
+ 'IPHONEOS_DEPLOYMENT_TARGET': '7.0',
+ 'CODE_SIGNING_REQUIRED': 'NO',
+ 'DEPLOYMENT_POSTPROCESSING': 'YES',
+ 'STRIP_INSTALLED_PRODUCT': 'YES',
+ 'CONFIGURATION_BUILD_DIR':'build/Default',
+ },
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/ios/framework/framework.gyp b/third_party/python/gyp/test/ios/framework/framework.gyp
new file mode 100644
index 0000000000..2c6fdd5b27
--- /dev/null
+++ b/third_party/python/gyp/test/ios/framework/framework.gyp
@@ -0,0 +1,43 @@
+# Copyright 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'iOSFramework',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'iOSFramework/iOSFramework.h',
+ 'iOSFramework/Thing.h',
+ 'iOSFramework/Thing.m',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ '$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
+ ],
+ },
+ 'mac_framework_headers': [
+ # Using two headers here tests mac_tool.py NextGreaterPowerOf2.
+ 'iOSFramework/iOSFramework.h',
+ 'iOSFramework/Thing.h',
+ ],
+ 'mac_framework_dirs': [
+ '$(SDKROOT)/../../Library/Frameworks',
+ ],
+ 'xcode_settings': {
+ 'OTHER_CFLAGS': [
+ '-fobjc-abi-version=2',
+ ],
+ 'INFOPLIST_FILE': 'iOSFramework/Info.plist',
+ 'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
+ 'SDKROOT': 'iphoneos',
+ 'IPHONEOS_DEPLOYMENT_TARGET': '8.0',
+ 'CONFIGURATION_BUILD_DIR':'build/Default',
+ 'CODE_SIGN_IDENTITY[sdk=iphoneos*]': 'iPhone Developer',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/ios/framework/iOSFramework/Info.plist b/third_party/python/gyp/test/ios/framework/iOSFramework/Info.plist
new file mode 100644
index 0000000000..d3de8eefb6
--- /dev/null
+++ b/third_party/python/gyp/test/ios/framework/iOSFramework/Info.plist
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>en</string>
+ <key>CFBundleExecutable</key>
+ <string>$(EXECUTABLE_NAME)</string>
+ <key>CFBundleIdentifier</key>
+ <string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>$(PRODUCT_NAME)</string>
+ <key>CFBundlePackageType</key>
+ <string>FMWK</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>$(CURRENT_PROJECT_VERSION)</string>
+ <key>NSPrincipalClass</key>
+ <string></string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/ios/framework/iOSFramework/Thing.h b/third_party/python/gyp/test/ios/framework/iOSFramework/Thing.h
new file mode 100644
index 0000000000..a34e908fc9
--- /dev/null
+++ b/third_party/python/gyp/test/ios/framework/iOSFramework/Thing.h
@@ -0,0 +1,10 @@
+#import <Foundation/Foundation.h>
+#import <UIKit/UIKit.h>
+
+@interface Thing : NSObject
+
++ (instancetype)thing;
+
+- (void)sayHello;
+
+@end
diff --git a/third_party/python/gyp/test/ios/framework/iOSFramework/Thing.m b/third_party/python/gyp/test/ios/framework/iOSFramework/Thing.m
new file mode 100644
index 0000000000..5b2b54925e
--- /dev/null
+++ b/third_party/python/gyp/test/ios/framework/iOSFramework/Thing.m
@@ -0,0 +1,22 @@
+#import "Thing.h"
+
+@interface Thing ()
+
+@end
+
+@implementation Thing
+
++ (instancetype)thing {
+ static Thing* thing = nil;
+ static dispatch_once_t onceToken;
+ dispatch_once(&onceToken, ^{
+ thing = [[[self class] alloc] init];
+ });
+ return thing;
+}
+
+- (void)sayHello {
+ NSLog(@"Hello World");
+}
+
+@end
diff --git a/third_party/python/gyp/test/ios/framework/iOSFramework/iOSFramework.h b/third_party/python/gyp/test/ios/framework/iOSFramework/iOSFramework.h
new file mode 100644
index 0000000000..e86b524d17
--- /dev/null
+++ b/third_party/python/gyp/test/ios/framework/iOSFramework/iOSFramework.h
@@ -0,0 +1,9 @@
+#import <UIKit/UIKit.h>
+
+//! Project version number for iOSFramework.
+FOUNDATION_EXPORT double iOSFrameworkVersionNumber;
+
+//! Project version string for iOSFramework.
+FOUNDATION_EXPORT const unsigned char iOSFrameworkVersionString[];
+
+#import <iOSFramework/Thing.h>
diff --git a/third_party/python/gyp/test/ios/gyptest-app-ios-assets-catalog.py b/third_party/python/gyp/test/ios/gyptest-app-ios-assets-catalog.py
new file mode 100755
index 0000000000..efd96ac752
--- /dev/null
+++ b/third_party/python/gyp/test/ios/gyptest-app-ios-assets-catalog.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that ios app bundles are built correctly.
+"""
+
+import TestGyp
+import TestMac
+
+import os.path
+import sys
+
+# Xcode supports for assets catalog was introduced in Xcode 6.0
+if sys.platform == 'darwin' and TestMac.Xcode.Version() >= '0600':
+ test_gyp_path = 'test-assets-catalog.gyp'
+ test_app_path = 'Test App Assets Catalog Gyp.app'
+
+ test = TestGyp.TestGyp(formats=['xcode', 'ninja'])
+ test.run_gyp(test_gyp_path, chdir='app-bundle')
+ test.build(test_gyp_path, test.ALL, chdir='app-bundle')
+
+ # Test that the extension is .bundle
+ test.built_file_must_exist(
+ os.path.join(test_app_path, 'Test App Assets Catalog Gyp'),
+ chdir='app-bundle')
+
+ # Info.plist
+ info_plist = test.built_file_path(
+ os.path.join(test_app_path, 'Info.plist'),
+ chdir='app-bundle')
+ # Resources
+ test.built_file_must_exist(
+ os.path.join(test_app_path, 'English.lproj/InfoPlist.strings'),
+ chdir='app-bundle')
+ test.built_file_must_exist(
+ os.path.join(test_app_path, 'English.lproj/MainMenu.nib'),
+ chdir='app-bundle')
+ test.built_file_must_exist(
+ os.path.join(test_app_path, 'English.lproj/Main_iPhone.storyboardc'),
+ chdir='app-bundle')
+ test.built_file_must_exist(
+ os.path.join(test_app_path, 'Assets.car'),
+ chdir='app-bundle')
+
+ # Packaging
+ test.built_file_must_exist(
+ os.path.join(test_app_path, 'PkgInfo'),
+ chdir='app-bundle')
+ test.built_file_must_match(
+ os.path.join(test_app_path, 'PkgInfo'), 'APPLause',
+ chdir='app-bundle')
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/ios/gyptest-app-ios.py b/third_party/python/gyp/test/ios/gyptest-app-ios.py
new file mode 100755
index 0000000000..99f9e865dc
--- /dev/null
+++ b/third_party/python/gyp/test/ios/gyptest-app-ios.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that ios app bundles are built correctly.
+"""
+
+import TestGyp
+
+import subprocess
+import sys
+
+def CheckFileXMLPropertyList(file):
+ output = subprocess.check_output(['file', file])
+ if not 'XML 1.0 document text' in output:
+ print('File: Expected XML 1.0 document text, got %s' % output)
+ test.fail_test()
+
+def CheckFileBinaryPropertyList(file):
+ output = subprocess.check_output(['file', file])
+ if not 'Apple binary property list' in output:
+ print('File: Expected Apple binary property list, got %s' % output)
+ test.fail_test()
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['xcode', 'ninja'])
+
+ test.run_gyp('test.gyp', chdir='app-bundle')
+
+ test.build('test.gyp', test.ALL, chdir='app-bundle')
+
+ # Test that the extension is .bundle
+ test.built_file_must_exist('Test App Gyp.app/Test App Gyp',
+ chdir='app-bundle')
+
+ # Info.plist
+ info_plist = test.built_file_path('Test App Gyp.app/Info.plist',
+ chdir='app-bundle')
+ test.built_file_must_exist(info_plist)
+ CheckFileBinaryPropertyList(info_plist)
+
+ # XML Info.plist
+ info_plist = test.built_file_path('Test App Gyp XML.app/Info.plist',
+ chdir='app-bundle')
+ CheckFileXMLPropertyList(info_plist)
+
+ # Resources
+ strings_file = test.built_file_path(
+ 'Test App Gyp.app/English.lproj/InfoPlist.strings',
+ chdir='app-bundle')
+ test.built_file_must_exist(strings_file)
+ CheckFileBinaryPropertyList(strings_file)
+
+ extra_plist_file = test.built_file_path(
+ 'Test App Gyp.app/English.lproj/LanguageMap.plist',
+ chdir='app-bundle')
+ test.built_file_must_exist(extra_plist_file)
+ CheckFileBinaryPropertyList(extra_plist_file)
+
+ test.built_file_must_exist(
+ 'Test App Gyp.app/English.lproj/MainMenu.nib',
+ chdir='app-bundle')
+ test.built_file_must_exist(
+ 'Test App Gyp.app/English.lproj/Main_iPhone.storyboardc',
+ chdir='app-bundle')
+
+ # Packaging
+ test.built_file_must_exist('Test App Gyp.app/PkgInfo',
+ chdir='app-bundle')
+ test.built_file_must_match('Test App Gyp.app/PkgInfo', 'APPLause',
+ chdir='app-bundle')
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/ios/gyptest-archs.py b/third_party/python/gyp/test/ios/gyptest-archs.py
new file mode 100644
index 0000000000..c3340431bd
--- /dev/null
+++ b/third_party/python/gyp/test/ios/gyptest-archs.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that device and simulator bundles are built correctly.
+"""
+
+import TestGyp
+import TestMac
+
+import collections
+import sys
+
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'xcode'])
+
+ if test.format == 'xcode':
+ # This test appears to hang flakily.
+ test.skip_test() # bug=532
+
+ test_cases = [
+ ('Default', 'TestArch32Bits', ['i386']),
+ ('Default-iphoneos', 'TestArch32Bits', ['armv7']),
+ ]
+
+ if TestMac.Xcode.Version() < '0510':
+ test_cases.extend([
+ ('Default', 'TestNoArchs', ['i386']),
+ ('Default-iphoneos', 'TestNoArchs', ['armv7'])])
+
+ if TestMac.Xcode.Version() >= '0500':
+ test_cases.extend([
+ ('Default', 'TestArch64Bits', ['x86_64']),
+ ('Default', 'TestMultiArchs', ['i386', 'x86_64']),
+ ('Default-iphoneos', 'TestArch64Bits', ['arm64']),
+ ('Default-iphoneos', 'TestMultiArchs', ['armv7', 'arm64'])])
+
+ test.run_gyp('test-archs.gyp', chdir='app-bundle')
+ for configuration, target, archs in test_cases:
+ is_device_build = configuration.endswith('-iphoneos')
+
+ kwds = collections.defaultdict(list)
+ if test.format == 'xcode':
+ if is_device_build:
+ configuration, sdk = configuration.split('-')
+ kwds['arguments'].extend(['-sdk', sdk])
+ if TestMac.Xcode.Version() < '0500':
+ kwds['arguments'].extend(['-arch', archs[0]])
+
+ test.set_configuration(configuration)
+ filename = '%s.app/%s' % (target, target)
+ test.build('test-archs.gyp', target, chdir='app-bundle', **kwds)
+ result_file = test.built_file_path(filename, chdir='app-bundle')
+
+ test.must_exist(result_file)
+ TestMac.CheckFileType(test, result_file, archs)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/ios/gyptest-copies-with-xcode-envvars.py b/third_party/python/gyp/test/ios/gyptest-copies-with-xcode-envvars.py
new file mode 100644
index 0000000000..88d9e028b8
--- /dev/null
+++ b/third_party/python/gyp/test/ios/gyptest-copies-with-xcode-envvars.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2016 Mark Callow. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that files are copied to the correct destinations when those
+destinations are specified using environment variables available in
+Xcode's PBXCopyFilesBuildPhase.
+"""
+
+import TestGyp
+
+import os
+import stat
+import sys
+
+
+test = TestGyp.TestGyp(formats=['ninja', 'xcode'])
+
+if sys.platform == 'darwin':
+ test.run_gyp('copies-with-xcode-envvars.gyp',
+ chdir='copies-with-xcode-envvars')
+
+ test.build('copies-with-xcode-envvars.gyp', chdir='copies-with-xcode-envvars')
+
+ wrapper_name = 'copies-with-xcode-envvars.app/'
+ contents_path = wrapper_name
+ out_path = test.built_file_path('file0', chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file0 contents\n')
+ out_path = test.built_file_path(wrapper_name + 'file1',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file1 contents\n')
+ out_path = test.built_file_path(contents_path + 'file2',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file2 contents\n')
+ out_path = test.built_file_path(contents_path + 'file3',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file3 contents\n')
+ out_path = test.built_file_path(contents_path + 'testimages/file4',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file4 contents\n')
+ out_path = test.built_file_path(contents_path + 'Java/file5',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file5 contents\n')
+ out_path = test.built_file_path(contents_path + 'Frameworks/file6',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file6 contents\n')
+ out_path = test.built_file_path(contents_path + 'Frameworks/file7',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file7 contents\n')
+ out_path = test.built_file_path(contents_path + 'SharedFrameworks/file8',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file8 contents\n')
+ out_path = test.built_file_path(contents_path + 'SharedSupport/file9',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file9 contents\n')
+ out_path = test.built_file_path(contents_path + 'PlugIns/file10',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file10 contents\n')
+ out_path = test.built_file_path(contents_path + 'XPCServices/file11',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file11 contents\n')
+ test.pass_test()
diff --git a/third_party/python/gyp/test/ios/gyptest-crosscompile.py b/third_party/python/gyp/test/ios/gyptest-crosscompile.py
new file mode 100644
index 0000000000..a0816836e5
--- /dev/null
+++ b/third_party/python/gyp/test/ios/gyptest-crosscompile.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that tools are built correctly.
+"""
+
+import TestGyp
+import TestMac
+
+import sys
+import os
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'xcode'])
+
+ oldenv = os.environ.copy()
+ try:
+ os.environ['GYP_CROSSCOMPILE'] = '1'
+ test.run_gyp('test-crosscompile.gyp', chdir='app-bundle')
+ finally:
+ os.environ.clear()
+ os.environ.update(oldenv)
+
+ test.set_configuration('Default')
+ test.build('test-crosscompile.gyp', 'TestHost', chdir='app-bundle')
+ result_file = test.built_file_path('TestHost', chdir='app-bundle')
+ test.must_exist(result_file)
+ TestMac.CheckFileType(test, result_file, ['x86_64'])
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/ios/gyptest-deployment-target.py b/third_party/python/gyp/test/ios/gyptest-deployment-target.py
new file mode 100644
index 0000000000..6c09d9dc04
--- /dev/null
+++ b/third_party/python/gyp/test/ios/gyptest-deployment-target.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that IPHONEOS_DEPLOYMENT_TARGET works.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['make', 'ninja', 'xcode'])
+
+ test.run_gyp('deployment-target.gyp', chdir='deployment-target')
+
+ test.build('deployment-target.gyp', test.ALL, chdir='deployment-target')
+
+ test.pass_test()
+
diff --git a/third_party/python/gyp/test/ios/gyptest-extension.py b/third_party/python/gyp/test/ios/gyptest-extension.py
new file mode 100755
index 0000000000..bb239ae5b8
--- /dev/null
+++ b/third_party/python/gyp/test/ios/gyptest-extension.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that ios app extensions are built correctly.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+import TestMac
+import subprocess
+import sys
+
+def CheckStrip(p, expected):
+ if expected not in subprocess.check_output(['nm','-gU', p]):
+ print(expected + " shouldn't get stripped out.")
+ test.fail_test()
+
+def CheckEntrypoint(p, expected):
+ if expected not in subprocess.check_output(['nm', p]):
+ print(expected + "not found.")
+ test.fail_test()
+
+if sys.platform == 'darwin' and TestMac.Xcode.Version()>="0600":
+
+ test = TestGyp.TestGyp(formats=['ninja', 'xcode'])
+
+ if test.format in ('ninja', 'xcode-ninja'):
+ test.skip_test() # bug=534
+
+ test.run_gyp('extension.gyp', chdir='extension')
+
+ test.build('extension.gyp', 'ExtensionContainer', chdir='extension')
+
+ # Test that the extension is .appex
+ test.built_file_must_exist(
+ 'ExtensionContainer.app/PlugIns/ActionExtension.appex',
+ chdir='extension')
+
+ path = test.built_file_path(
+ 'ExtensionContainer.app/PlugIns/ActionExtension.appex/ActionExtension',
+ chdir='extension')
+ CheckStrip(path, "ActionViewController")
+ CheckEntrypoint(path, "_NSExtensionMain")
+
+ test.pass_test()
+
diff --git a/third_party/python/gyp/test/ios/gyptest-framework.py b/third_party/python/gyp/test/ios/gyptest-framework.py
new file mode 100755
index 0000000000..a6dd857b20
--- /dev/null
+++ b/third_party/python/gyp/test/ios/gyptest-framework.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+# Copyright 2016 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that ios app frameworks are built correctly.
+"""
+
+import TestGyp
+import TestMac
+import subprocess
+import sys
+
+if sys.platform == 'darwin' and TestMac.Xcode.Version()>="0700":
+
+ test = TestGyp.TestGyp(formats=['ninja'])
+ if test.format == 'xcode-ninja':
+ test.skip_test()
+
+ test.run_gyp('framework.gyp', chdir='framework')
+
+ test.build('framework.gyp', 'iOSFramework', chdir='framework')
+
+ test.built_file_must_exist(
+ 'iOSFramework.framework/Headers/iOSFramework.h',
+ chdir='framework')
+ test.built_file_must_exist(
+ 'iOSFramework.framework/Headers/Thing.h',
+ chdir='framework')
+ test.built_file_must_exist(
+ 'iOSFramework.framework/iOSFramework',
+ chdir='framework')
+
+ test.pass_test()
+
diff --git a/third_party/python/gyp/test/ios/gyptest-per-config-settings.py b/third_party/python/gyp/test/ios/gyptest-per-config-settings.py
new file mode 100644
index 0000000000..c3a22e0562
--- /dev/null
+++ b/third_party/python/gyp/test/ios/gyptest-per-config-settings.py
@@ -0,0 +1,190 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that device and simulator bundles are built correctly.
+"""
+
+from __future__ import print_function
+
+import plistlib
+import TestGyp
+import os
+import struct
+import subprocess
+import sys
+import tempfile
+import TestMac
+
+print("This test is currently disabled: https://crbug.com/483696.")
+sys.exit(0)
+
+def CheckFileType(file, expected):
+ proc = subprocess.Popen(['lipo', '-info', file], stdout=subprocess.PIPE)
+ o = proc.communicate()[0].strip()
+ assert not proc.returncode
+ if not expected in o:
+ print('File: Expected %s, got %s' % (expected, o))
+ test.fail_test()
+
+def HasCerts():
+ # Because the bots do not have certs, don't check them if there are no
+ # certs available.
+ proc = subprocess.Popen(['security','find-identity','-p', 'codesigning',
+ '-v'], stdout=subprocess.PIPE)
+ return "0 valid identities found" not in proc.communicate()[0].strip()
+
+def CheckSignature(file):
+ proc = subprocess.Popen(['codesign', '-v', file], stdout=subprocess.PIPE)
+ o = proc.communicate()[0].strip()
+ assert not proc.returncode
+ if "code object is not signed at all" in o:
+ print('File %s not properly signed.' % (file))
+ test.fail_test()
+
+def CheckEntitlements(file, expected_entitlements):
+ with tempfile.NamedTemporaryFile() as temp:
+ proc = subprocess.Popen(['codesign', '--display', '--entitlements',
+ temp.name, file], stdout=subprocess.PIPE)
+ o = proc.communicate()[0].strip()
+ assert not proc.returncode
+ data = temp.read()
+ entitlements = ParseEntitlements(data)
+ if not entitlements:
+ print('No valid entitlements found in %s.' % (file))
+ test.fail_test()
+ if entitlements != expected_entitlements:
+ print('Unexpected entitlements found in %s.' % (file))
+ test.fail_test()
+
+def ParseEntitlements(data):
+ if len(data) < 8:
+ return None
+ magic, length = struct.unpack('>II', data[:8])
+ if magic != 0xfade7171 or length != len(data):
+ return None
+ return data[8:]
+
+def GetXcodeVersionValue(type):
+ args = ['xcodebuild', '-version', '-sdk', 'iphoneos', type]
+ job = subprocess.Popen(args, stdout=subprocess.PIPE)
+ return job.communicate()[0].strip()
+
+def GetMachineBuild():
+ args = ['sw_vers', '-buildVersion']
+ job = subprocess.Popen(args, stdout=subprocess.PIPE)
+ return job.communicate()[0].strip()
+
+def CheckPlistvalue(plist, key, expected):
+ if key not in plist:
+ print('%s not set in plist' % key)
+ test.fail_test()
+ return
+ actual = plist[key]
+ if actual != expected:
+ print('File: Expected %s, got %s for %s' % (expected, actual, key))
+ test.fail_test()
+
+def CheckPlistNotSet(plist, key):
+ if key in plist:
+ print('%s should not be set in plist' % key)
+ test.fail_test()
+ return
+
+def ConvertBinaryPlistToXML(path):
+ proc = subprocess.call(['plutil', '-convert', 'xml1', path],
+ stdout=subprocess.PIPE)
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'xcode'])
+
+ test.run_gyp('test-device.gyp', chdir='app-bundle')
+
+ test_configs = ['Default-iphoneos', 'Default']
+ for configuration in test_configs:
+ test.set_configuration(configuration)
+ test.build('test-device.gyp', 'test_app', chdir='app-bundle')
+ result_file = test.built_file_path('Test App Gyp.app/Test App Gyp',
+ chdir='app-bundle')
+ test.must_exist(result_file)
+ info_plist = test.built_file_path('Test App Gyp.app/Info.plist',
+ chdir='app-bundle')
+ plist = plistlib.readPlist(info_plist)
+ xcode_version = TestMac.Xcode.Version()
+ if xcode_version >= '0720':
+ if len(plist) != 23:
+ print('plist should have 23 entries, but it has %s' % len(plist))
+ test.fail_test()
+
+ # Values that will hopefully never change.
+ CheckPlistvalue(plist, 'CFBundleDevelopmentRegion', 'English')
+ CheckPlistvalue(plist, 'CFBundleExecutable', 'Test App Gyp')
+ CheckPlistvalue(plist, 'CFBundleIdentifier', 'com.google.Test App Gyp')
+ CheckPlistvalue(plist, 'CFBundleInfoDictionaryVersion', '6.0')
+ CheckPlistvalue(plist, 'CFBundleName', 'Test App Gyp')
+ CheckPlistvalue(plist, 'CFBundlePackageType', 'APPL')
+ CheckPlistvalue(plist, 'CFBundleShortVersionString', '1.0')
+ CheckPlistvalue(plist, 'CFBundleSignature', 'ause')
+ CheckPlistvalue(plist, 'CFBundleVersion', '1')
+ CheckPlistvalue(plist, 'NSMainNibFile', 'MainMenu')
+ CheckPlistvalue(plist, 'NSPrincipalClass', 'NSApplication')
+ CheckPlistvalue(plist, 'UIDeviceFamily', [1, 2])
+
+ # Values that get pulled from xcodebuild.
+ machine_build = GetMachineBuild()
+ platform_version = GetXcodeVersionValue('ProductVersion')
+ sdk_build = GetXcodeVersionValue('ProductBuildVersion')
+ xcode_build = TestMac.Xcode.Build()
+
+ # Xcode keeps changing what gets included in executable plists, and it
+ # changes between device and simuator builds. Allow the strictest tests for
+ # Xcode 7.2 and above.
+ if xcode_version >= '0720':
+ CheckPlistvalue(plist, 'BuildMachineOSBuild', machine_build)
+ CheckPlistvalue(plist, 'DTCompiler', 'com.apple.compilers.llvm.clang.1_0')
+ CheckPlistvalue(plist, 'DTPlatformVersion', platform_version)
+ CheckPlistvalue(plist, 'DTSDKBuild', sdk_build)
+ CheckPlistvalue(plist, 'DTXcode', xcode_version)
+ CheckPlistvalue(plist, 'DTXcodeBuild', xcode_build)
+ CheckPlistvalue(plist, 'MinimumOSVersion', '8.0')
+
+
+ if configuration == 'Default-iphoneos':
+ platform_name = 'iphoneos'
+ CheckFileType(result_file, 'armv7')
+ CheckPlistvalue(plist, 'CFBundleSupportedPlatforms', ['iPhoneOS'])
+ # Apple keeps changing their mind.
+ if xcode_version >= '0720':
+ CheckPlistvalue(plist, 'DTPlatformBuild', sdk_build)
+ else:
+ platform_name = 'iphonesimulator'
+ CheckFileType(result_file, 'i386')
+ CheckPlistvalue(plist, 'CFBundleSupportedPlatforms', ['iPhoneSimulator'])
+ if xcode_version >= '0720':
+ CheckPlistvalue(plist, 'DTPlatformBuild', '')
+
+ CheckPlistvalue(plist, 'DTPlatformName', platform_name)
+ CheckPlistvalue(plist, 'DTSDKName', platform_name + platform_version)
+
+
+ if HasCerts() and configuration == 'Default-iphoneos':
+ test.build('test-device.gyp', 'sig_test', chdir='app-bundle')
+ result_file = test.built_file_path('sigtest.app/sigtest',
+ chdir='app-bundle')
+ CheckSignature(result_file)
+ info_plist = test.built_file_path('sigtest.app/Info.plist',
+ chdir='app-bundle')
+
+ plist = plistlib.readPlist(info_plist)
+ CheckPlistvalue(plist, 'UIDeviceFamily', [1])
+
+ entitlements_file = test.built_file_path('sig_test.xcent',
+ chdir='app-bundle')
+ if os.path.isfile(entitlements_file):
+ expected_entitlements = open(entitlements_file).read()
+ CheckEntitlements(result_file, expected_entitlements)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/ios/gyptest-watch.py b/third_party/python/gyp/test/ios/gyptest-watch.py
new file mode 100755
index 0000000000..39bab49bb0
--- /dev/null
+++ b/third_party/python/gyp/test/ios/gyptest-watch.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that ios watch extensions and apps are built correctly.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+import TestMac
+
+import sys
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+
+if sys.platform == 'darwin' and TestMac.Xcode.Version() >= "0620":
+ test = TestGyp.TestGyp(formats=['ninja', 'xcode'])
+
+ test.run_gyp('watch.gyp', chdir='watch')
+
+ test.build(
+ 'watch.gyp',
+ 'WatchContainer',
+ chdir='watch')
+
+ # Test that the extension exists
+ test.built_file_must_exist(
+ 'WatchContainer.app/PlugIns/WatchKitExtension.appex',
+ chdir='watch')
+
+ # Test that the watch app exists
+ test.built_file_must_exist(
+ 'WatchContainer.app/PlugIns/WatchKitExtension.appex/WatchApp.app',
+ chdir='watch')
+
+ test.pass_test()
+
diff --git a/third_party/python/gyp/test/ios/gyptest-xcode-ninja.py b/third_party/python/gyp/test/ios/gyptest-xcode-ninja.py
new file mode 100644
index 0000000000..609db8c98f
--- /dev/null
+++ b/third_party/python/gyp/test/ios/gyptest-xcode-ninja.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verify that the xcode-ninja GYP_GENERATOR runs and builds correctly.
+"""
+
+import TestGyp
+
+import os
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['xcode'])
+
+ # Run ninja and xcode-ninja
+ test.formats = ['ninja', 'xcode-ninja']
+ test.run_gyp('test.gyp', chdir='app-bundle')
+
+ # If it builds the target, it works.
+ test.build('test.ninja.gyp', chdir='app-bundle')
+ test.pass_test()
diff --git a/third_party/python/gyp/test/ios/watch/WatchApp/Images.xcassets/AppIcon.appiconset/Contents.json b/third_party/python/gyp/test/ios/watch/WatchApp/Images.xcassets/AppIcon.appiconset/Contents.json
new file mode 100644
index 0000000000..562c5eff97
--- /dev/null
+++ b/third_party/python/gyp/test/ios/watch/WatchApp/Images.xcassets/AppIcon.appiconset/Contents.json
@@ -0,0 +1,62 @@
+{
+ "images" : [
+ {
+ "size" : "14.5x14.5",
+ "idiom" : "watch",
+ "scale" : "2x",
+ "role" : "notificationCenter",
+ "subtype" : "38mm"
+ },
+ {
+ "size" : "18x18",
+ "idiom" : "watch",
+ "scale" : "2x",
+ "role" : "notificationCenter",
+ "subtype" : "42mm"
+ },
+ {
+ "size" : "29x29",
+ "idiom" : "watch",
+ "role" : "companionSettings",
+ "scale" : "2x"
+ },
+ {
+ "size" : "29.3x29.3",
+ "idiom" : "watch",
+ "role" : "companionSettings",
+ "scale" : "3x"
+ },
+ {
+ "size" : "40x40",
+ "idiom" : "watch",
+ "scale" : "2x",
+ "role" : "appLauncher",
+ "subtype" : "38mm"
+ },
+ {
+ "size" : "44x44",
+ "idiom" : "watch",
+ "scale" : "2x",
+ "role" : "appLauncher",
+ "subtype" : "42mm"
+ },
+ {
+ "size" : "86x86",
+ "idiom" : "watch",
+ "scale" : "2x",
+ "role" : "quickLook",
+ "subtype" : "38mm"
+ },
+ {
+ "size" : "98x98",
+ "idiom" : "watch",
+ "scale" : "2x",
+ "role" : "quickLook",
+ "subtype" : "42mm"
+ }
+ ],
+ "info" : {
+ "version" : 1,
+ "author" : "xcode"
+ }
+}
diff --git a/third_party/python/gyp/test/ios/watch/WatchApp/Images.xcassets/LaunchImage.launchimage/Contents.json b/third_party/python/gyp/test/ios/watch/WatchApp/Images.xcassets/LaunchImage.launchimage/Contents.json
new file mode 100644
index 0000000000..ed123feff6
--- /dev/null
+++ b/third_party/python/gyp/test/ios/watch/WatchApp/Images.xcassets/LaunchImage.launchimage/Contents.json
@@ -0,0 +1,24 @@
+{
+ "images" : [
+ {
+ "orientation" : "portrait",
+ "idiom" : "watch",
+ "extent" : "full-screen",
+ "minimum-system-version" : "8.0",
+ "subtype" : "38mm",
+ "scale" : "2x"
+ },
+ {
+ "orientation" : "portrait",
+ "idiom" : "watch",
+ "extent" : "full-screen",
+ "minimum-system-version" : "8.0",
+ "subtype" : "42mm",
+ "scale" : "2x"
+ }
+ ],
+ "info" : {
+ "version" : 1,
+ "author" : "xcode"
+ }
+}
diff --git a/third_party/python/gyp/test/ios/watch/WatchApp/Info.plist b/third_party/python/gyp/test/ios/watch/WatchApp/Info.plist
new file mode 100644
index 0000000000..3cf65b8285
--- /dev/null
+++ b/third_party/python/gyp/test/ios/watch/WatchApp/Info.plist
@@ -0,0 +1,35 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>en</string>
+ <key>CFBundleDisplayName</key>
+ <string>WatchApp</string>
+ <key>CFBundleExecutable</key>
+ <string>$(EXECUTABLE_NAME)</string>
+ <key>CFBundleIdentifier</key>
+ <string>com.google.gyptest.watch.watchapp</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>$(PRODUCT_NAME)</string>
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+ <key>UISupportedInterfaceOrientations</key>
+ <array>
+ <string>UIInterfaceOrientationPortrait</string>
+ <string>UIInterfaceOrientationPortraitUpsideDown</string>
+ </array>
+ <key>WKCompanionAppBundleIdentifier</key>
+ <string>com.google.gyptest.watch</string>
+ <key>WKWatchKitApp</key>
+ <true/>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/ios/watch/WatchApp/Interface.storyboard b/third_party/python/gyp/test/ios/watch/WatchApp/Interface.storyboard
new file mode 100644
index 0000000000..5f52cb6c90
--- /dev/null
+++ b/third_party/python/gyp/test/ios/watch/WatchApp/Interface.storyboard
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<document type="com.apple.InterfaceBuilder.WatchKit.Storyboard" version="3.0" toolsVersion="6221" systemVersion="13E28" targetRuntime="watchKit" propertyAccessControl="none" useAutolayout="YES" useTraitCollections="YES" initialViewController="AgC-eL-Hgc">
+ <dependencies>
+ <plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="6213"/>
+ <plugIn identifier="com.apple.InterfaceBuilder.IBWatchKitPlugin" version="3733"/>
+ </dependencies>
+ <scenes>
+ <!--Interface Controller-->
+ <scene sceneID="aou-V4-d1y">
+ <objects>
+ <controller id="AgC-eL-Hgc" customClass="InterfaceController" customModuleProvider=""/>
+ </objects>
+ </scene>
+ </scenes>
+</document>
diff --git a/third_party/python/gyp/test/ios/watch/WatchContainer/AppDelegate.h b/third_party/python/gyp/test/ios/watch/WatchContainer/AppDelegate.h
new file mode 100644
index 0000000000..510e2300b1
--- /dev/null
+++ b/third_party/python/gyp/test/ios/watch/WatchContainer/AppDelegate.h
@@ -0,0 +1,12 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <UIKit/UIKit.h>
+
+@interface AppDelegate : UIResponder <UIApplicationDelegate>
+
+@property (strong, nonatomic) UIWindow *window;
+
+@end
+
diff --git a/third_party/python/gyp/test/ios/watch/WatchContainer/AppDelegate.m b/third_party/python/gyp/test/ios/watch/WatchContainer/AppDelegate.m
new file mode 100644
index 0000000000..1197bc1bbc
--- /dev/null
+++ b/third_party/python/gyp/test/ios/watch/WatchContainer/AppDelegate.m
@@ -0,0 +1,19 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "AppDelegate.h"
+
+@interface AppDelegate ()
+
+@end
+
+@implementation AppDelegate
+
+- (BOOL)application:(UIApplication*)application
+ didFinishLaunchingWithOptions:(NSDictionary*)launchOptions {
+ // Override point for customization after application launch.
+ return YES;
+}
+
+@end
diff --git a/third_party/python/gyp/test/ios/watch/WatchContainer/Base.lproj/Main.storyboard b/third_party/python/gyp/test/ios/watch/WatchContainer/Base.lproj/Main.storyboard
new file mode 100644
index 0000000000..e8f3cfb40c
--- /dev/null
+++ b/third_party/python/gyp/test/ios/watch/WatchContainer/Base.lproj/Main.storyboard
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<document type="com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB" version="3.0" toolsVersion="6162" systemVersion="14A238h" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" useTraitCollections="YES" initialViewController="BYZ-38-t0r">
+ <dependencies>
+ <plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="6160"/>
+ </dependencies>
+ <scenes>
+ <!--View Controller-->
+ <scene sceneID="tne-QT-ifu">
+ <objects>
+ <viewController id="BYZ-38-t0r" customClass="ViewController" customModuleProvider="" sceneMemberID="viewController">
+ <layoutGuides>
+ <viewControllerLayoutGuide type="top" id="y3c-jy-aDJ"/>
+ <viewControllerLayoutGuide type="bottom" id="wfy-db-euE"/>
+ </layoutGuides>
+ <view key="view" contentMode="scaleToFill" id="8bC-Xf-vdC">
+ <rect key="frame" x="0.0" y="0.0" width="480" height="480"/>
+ <autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
+ <color key="backgroundColor" white="1" alpha="1" colorSpace="custom" customColorSpace="calibratedWhite"/>
+ </view>
+ </viewController>
+ <placeholder placeholderIdentifier="IBFirstResponder" id="dkx-z0-nzr" sceneMemberID="firstResponder"/>
+ </objects>
+ </scene>
+ </scenes>
+</document>
diff --git a/third_party/python/gyp/test/ios/watch/WatchContainer/Images.xcassets/AppIcon.appiconset/Contents.json b/third_party/python/gyp/test/ios/watch/WatchContainer/Images.xcassets/AppIcon.appiconset/Contents.json
new file mode 100644
index 0000000000..f697f61f4a
--- /dev/null
+++ b/third_party/python/gyp/test/ios/watch/WatchContainer/Images.xcassets/AppIcon.appiconset/Contents.json
@@ -0,0 +1,53 @@
+{
+ "images" : [
+ {
+ "idiom" : "iphone",
+ "size" : "29x29",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "iphone",
+ "size" : "40x40",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "iphone",
+ "size" : "60x60",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "29x29",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "29x29",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "40x40",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "40x40",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "76x76",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "76x76",
+ "scale" : "2x"
+ }
+ ],
+ "info" : {
+ "version" : 1,
+ "author" : "xcode"
+ }
+}
diff --git a/third_party/python/gyp/test/ios/watch/WatchContainer/Images.xcassets/LaunchImage.launchimage/Contents.json b/third_party/python/gyp/test/ios/watch/WatchContainer/Images.xcassets/LaunchImage.launchimage/Contents.json
new file mode 100644
index 0000000000..4458b40c05
--- /dev/null
+++ b/third_party/python/gyp/test/ios/watch/WatchContainer/Images.xcassets/LaunchImage.launchimage/Contents.json
@@ -0,0 +1,51 @@
+{
+ "images" : [
+ {
+ "orientation" : "portrait",
+ "idiom" : "iphone",
+ "extent" : "full-screen",
+ "minimum-system-version" : "7.0",
+ "scale" : "2x"
+ },
+ {
+ "orientation" : "portrait",
+ "idiom" : "iphone",
+ "subtype" : "retina4",
+ "extent" : "full-screen",
+ "minimum-system-version" : "7.0",
+ "scale" : "2x"
+ },
+ {
+ "orientation" : "portrait",
+ "idiom" : "ipad",
+ "extent" : "full-screen",
+ "minimum-system-version" : "7.0",
+ "scale" : "1x"
+ },
+ {
+ "orientation" : "landscape",
+ "idiom" : "ipad",
+ "extent" : "full-screen",
+ "minimum-system-version" : "7.0",
+ "scale" : "1x"
+ },
+ {
+ "orientation" : "portrait",
+ "idiom" : "ipad",
+ "extent" : "full-screen",
+ "minimum-system-version" : "7.0",
+ "scale" : "2x"
+ },
+ {
+ "orientation" : "landscape",
+ "idiom" : "ipad",
+ "extent" : "full-screen",
+ "minimum-system-version" : "7.0",
+ "scale" : "2x"
+ }
+ ],
+ "info" : {
+ "version" : 1,
+ "author" : "xcode"
+ }
+}
diff --git a/third_party/python/gyp/test/ios/watch/WatchContainer/Info.plist b/third_party/python/gyp/test/ios/watch/WatchContainer/Info.plist
new file mode 100644
index 0000000000..a40319c78e
--- /dev/null
+++ b/third_party/python/gyp/test/ios/watch/WatchContainer/Info.plist
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>en</string>
+ <key>CFBundleExecutable</key>
+ <string>WatchContainer</string>
+ <key>CFBundleIdentifier</key>
+ <string>com.google.gyptest.watch</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+ <key>LSRequiresIPhoneOS</key>
+ <true/>
+ <key>UIMainStoryboardFile</key>
+ <string>Main</string>
+ <key>UIRequiredDeviceCapabilities</key>
+ <array>
+ <string>armv7</string>
+ </array>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/ios/watch/WatchContainer/ViewController.h b/third_party/python/gyp/test/ios/watch/WatchContainer/ViewController.h
new file mode 100644
index 0000000000..fad7754714
--- /dev/null
+++ b/third_party/python/gyp/test/ios/watch/WatchContainer/ViewController.h
@@ -0,0 +1,11 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <UIKit/UIKit.h>
+
+@interface ViewController : UIViewController
+
+
+@end
+
diff --git a/third_party/python/gyp/test/ios/watch/WatchContainer/ViewController.m b/third_party/python/gyp/test/ios/watch/WatchContainer/ViewController.m
new file mode 100644
index 0000000000..3810fa9cba
--- /dev/null
+++ b/third_party/python/gyp/test/ios/watch/WatchContainer/ViewController.m
@@ -0,0 +1,24 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "ViewController.h"
+
+@interface ViewController ()
+
+
+@end
+
+@implementation ViewController
+
+- (void)viewDidLoad {
+ [super viewDidLoad];
+ // Do any additional setup after loading the view, typically from a nib.
+}
+
+- (void)didReceiveMemoryWarning {
+ [super didReceiveMemoryWarning];
+ // Dispose of any resources that can be recreated.
+}
+
+@end
diff --git a/third_party/python/gyp/test/ios/watch/WatchContainer/main.m b/third_party/python/gyp/test/ios/watch/WatchContainer/main.m
new file mode 100644
index 0000000000..47aecb5148
--- /dev/null
+++ b/third_party/python/gyp/test/ios/watch/WatchContainer/main.m
@@ -0,0 +1,13 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+#import <UIKit/UIKit.h>
+#import "AppDelegate.h"
+
+int main(int argc, char* argv[]) {
+ @autoreleasepool {
+ return UIApplicationMain(argc, argv, nil,
+ NSStringFromClass([AppDelegate class]));
+ }
+}
diff --git a/third_party/python/gyp/test/ios/watch/WatchKitExtension/Images.xcassets/MyImage.imageset/Contents.json b/third_party/python/gyp/test/ios/watch/WatchKitExtension/Images.xcassets/MyImage.imageset/Contents.json
new file mode 100644
index 0000000000..f80d950868
--- /dev/null
+++ b/third_party/python/gyp/test/ios/watch/WatchKitExtension/Images.xcassets/MyImage.imageset/Contents.json
@@ -0,0 +1,20 @@
+{
+ "images" : [
+ {
+ "idiom" : "universal",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "universal",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "universal",
+ "scale" : "3x"
+ }
+ ],
+ "info" : {
+ "version" : 1,
+ "author" : "xcode"
+ }
+}
diff --git a/third_party/python/gyp/test/ios/watch/WatchKitExtension/Info.plist b/third_party/python/gyp/test/ios/watch/WatchKitExtension/Info.plist
new file mode 100644
index 0000000000..7a354643ef
--- /dev/null
+++ b/third_party/python/gyp/test/ios/watch/WatchKitExtension/Info.plist
@@ -0,0 +1,38 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>en</string>
+ <key>CFBundleDisplayName</key>
+ <string>WatchContainer WatchKit Extension</string>
+ <key>CFBundleExecutable</key>
+ <string>$(EXECUTABLE_NAME)</string>
+ <key>CFBundleIdentifier</key>
+ <string>com.google.gyptest.watch.watchkitextension</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>$(PRODUCT_NAME)</string>
+ <key>CFBundlePackageType</key>
+ <string>XPC!</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1.0</string>
+ <key>NSExtension</key>
+ <dict>
+ <key>NSExtensionAttributes</key>
+ <dict>
+ <key>WKAppBundleIdentifier</key>
+ <string>com.google.gyptest.watch.watchapp</string>
+ </dict>
+ <key>NSExtensionPointIdentifier</key>
+ <string>com.apple.watchkit</string>
+ </dict>
+ <key>RemoteInterfacePrincipalClass</key>
+ <string>InterfaceController</string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/ios/watch/WatchKitExtension/InterfaceController.h b/third_party/python/gyp/test/ios/watch/WatchKitExtension/InterfaceController.h
new file mode 100644
index 0000000000..c3395eb484
--- /dev/null
+++ b/third_party/python/gyp/test/ios/watch/WatchKitExtension/InterfaceController.h
@@ -0,0 +1,10 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <Foundation/Foundation.h>
+#import <WatchKit/WatchKit.h>
+
+@interface InterfaceController : WKInterfaceController
+@end
+
diff --git a/third_party/python/gyp/test/ios/watch/WatchKitExtension/InterfaceController.m b/third_party/python/gyp/test/ios/watch/WatchKitExtension/InterfaceController.m
new file mode 100644
index 0000000000..564b7d1da5
--- /dev/null
+++ b/third_party/python/gyp/test/ios/watch/WatchKitExtension/InterfaceController.m
@@ -0,0 +1,25 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "InterfaceController.h"
+
+@implementation InterfaceController
+
+- (instancetype)initWithContext:(id)context {
+ if ((self = [super initWithContext:context])) {
+ // -initWithContext:
+ }
+ return self;
+}
+
+- (void)willActivate {
+ // -willActivate
+}
+
+- (void)didDeactivate {
+ // -didDeactivate
+}
+
+@end
+
diff --git a/third_party/python/gyp/test/ios/watch/WatchKitExtension/MainInterface.storyboard b/third_party/python/gyp/test/ios/watch/WatchKitExtension/MainInterface.storyboard
new file mode 100644
index 0000000000..5aa58184e8
--- /dev/null
+++ b/third_party/python/gyp/test/ios/watch/WatchKitExtension/MainInterface.storyboard
@@ -0,0 +1,63 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<document type="com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB" version="3.0" toolsVersion="6148" systemVersion="14A229a" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" initialViewController="ObA-dk-sSI">
+ <dependencies>
+ <plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="6147"/>
+ </dependencies>
+ <scenes>
+ <!--Action View Controller - Image-->
+ <scene sceneID="7MM-of-jgj">
+ <objects>
+ <viewController title="Image" id="ObA-dk-sSI" customClass="ActionViewController" customModuleProvider="" sceneMemberID="viewController">
+ <layoutGuides>
+ <viewControllerLayoutGuide type="top" id="qkL-Od-lgU"/>
+ <viewControllerLayoutGuide type="bottom" id="n38-gi-rB5"/>
+ </layoutGuides>
+ <view key="view" contentMode="scaleToFill" id="zMn-AG-sqS">
+ <rect key="frame" x="0.0" y="0.0" width="320" height="528"/>
+ <autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
+ <subviews>
+ <imageView userInteractionEnabled="NO" contentMode="scaleAspectFit" horizontalHuggingPriority="251" verticalHuggingPriority="251" translatesAutoresizingMaskIntoConstraints="NO" id="9ga-4F-77Z">
+ <rect key="frame" x="0.0" y="64" width="320" height="464"/>
+ </imageView>
+ <navigationBar contentMode="scaleToFill" translatesAutoresizingMaskIntoConstraints="NO" id="NOA-Dm-cuz">
+ <rect key="frame" x="0.0" y="20" width="320" height="44"/>
+ <items>
+ <navigationItem id="3HJ-uW-3hn">
+ <barButtonItem key="leftBarButtonItem" title="Done" style="done" id="WYi-yp-eM6">
+ <connections>
+ <action selector="done" destination="ObA-dk-sSI" id="Qdu-qn-U6V"/>
+ </connections>
+ </barButtonItem>
+ </navigationItem>
+ </items>
+ </navigationBar>
+ </subviews>
+ <color key="backgroundColor" white="1" alpha="1" colorSpace="calibratedWhite"/>
+ <constraints>
+ <constraint firstAttribute="trailing" secondItem="NOA-Dm-cuz" secondAttribute="trailing" id="A05-Pj-hrr"/>
+ <constraint firstItem="9ga-4F-77Z" firstAttribute="top" secondItem="NOA-Dm-cuz" secondAttribute="bottom" id="Fps-3D-QQW"/>
+ <constraint firstItem="NOA-Dm-cuz" firstAttribute="leading" secondItem="zMn-AG-sqS" secondAttribute="leading" id="HxO-8t-aoh"/>
+ <constraint firstAttribute="trailing" secondItem="9ga-4F-77Z" secondAttribute="trailing" id="Ozw-Hg-0yh"/>
+ <constraint firstItem="9ga-4F-77Z" firstAttribute="leading" secondItem="zMn-AG-sqS" secondAttribute="leading" id="XH5-ld-ONA"/>
+ <constraint firstItem="n38-gi-rB5" firstAttribute="top" secondItem="9ga-4F-77Z" secondAttribute="bottom" id="eQg-nn-Zy4"/>
+ <constraint firstItem="NOA-Dm-cuz" firstAttribute="top" secondItem="qkL-Od-lgU" secondAttribute="bottom" id="we0-1t-bgp"/>
+ </constraints>
+ </view>
+ <freeformSimulatedSizeMetrics key="simulatedDestinationMetrics"/>
+ <size key="freeformSize" width="320" height="528"/>
+ <connections>
+ <outlet property="imageView" destination="9ga-4F-77Z" id="5y6-5w-9QO"/>
+ <outlet property="view" destination="zMn-AG-sqS" id="Qma-de-2ek"/>
+ </connections>
+ </viewController>
+ <placeholder placeholderIdentifier="IBFirstResponder" id="X47-rx-isc" userLabel="First Responder" sceneMemberID="firstResponder"/>
+ </objects>
+ <point key="canvasLocation" x="252" y="-124"/>
+ </scene>
+ </scenes>
+ <simulatedMetricsContainer key="defaultSimulatedMetrics">
+ <simulatedStatusBarMetrics key="statusBar"/>
+ <simulatedOrientationMetrics key="orientation"/>
+ <simulatedScreenMetrics key="destination" type="retina4"/>
+ </simulatedMetricsContainer>
+</document>
diff --git a/third_party/python/gyp/test/ios/watch/watch.gyp b/third_party/python/gyp/test/ios/watch/watch.gyp
new file mode 100644
index 0000000000..49be5554ee
--- /dev/null
+++ b/third_party/python/gyp/test/ios/watch/watch.gyp
@@ -0,0 +1,105 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'make_global_settings': [
+ ['CC', '/usr/bin/clang'],
+ ['CXX', '/usr/bin/clang++'],
+ ],
+ 'target_defaults': {
+ 'xcode_settings': {
+ 'OTHER_CFLAGS': [
+ '-fobjc-abi-version=2',
+ ],
+ 'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
+ 'SDKROOT': 'iphoneos',
+ 'IPHONEOS_DEPLOYMENT_TARGET': '8.2',
+ 'CODE_SIGN_IDENTITY[sdk=iphoneos*]': 'iPhone Developer',
+ }
+ },
+ 'targets': [
+ {
+ 'target_name': 'WatchContainer',
+ 'product_name': 'WatchContainer',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'mac_bundle_resources': [
+ 'WatchContainer/Base.lproj/Main.storyboard',
+ ],
+ 'sources': [
+ 'WatchContainer/AppDelegate.h',
+ 'WatchContainer/AppDelegate.m',
+ 'WatchContainer/ViewController.h',
+ 'WatchContainer/ViewController.m',
+ 'WatchContainer/main.m',
+ ],
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/WatchContainer.app/PlugIns',
+ 'files': [
+ '<(PRODUCT_DIR)/WatchKitExtension.appex',
+ ]}],
+ 'dependencies': [
+ 'WatchKitExtension'
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ '$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
+ ],
+ },
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'WatchContainer/Info.plist',
+ },
+ },
+ {
+ 'target_name': 'WatchKitExtension',
+ 'product_name': 'WatchKitExtension',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'ios_watchkit_extension': 1,
+ 'sources': [
+ 'WatchKitExtension/InterfaceController.h',
+ 'WatchKitExtension/InterfaceController.m',
+ ],
+ 'mac_bundle_resources': [
+ 'WatchKitExtension/Images.xcassets',
+ '<(PRODUCT_DIR)/WatchApp.app',
+ ],
+ 'dependencies': [
+ 'WatchApp'
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ '$(SDKROOT)/System/Library/Frameworks/WatchKit.framework',
+ ],
+ },
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'WatchKitExtension/Info.plist',
+ 'SKIP_INSTALL': 'YES',
+ 'COPY_PHASE_STRIP': 'NO',
+ },
+ },
+ {
+ 'target_name': 'WatchApp',
+ 'product_name': 'WatchApp',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'ios_watch_app': 1,
+ 'mac_bundle_resources': [
+ 'WatchApp/Images.xcassets',
+ 'WatchApp/Interface.storyboard',
+ ],
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'WatchApp/Info.plist',
+ 'SKIP_INSTALL': 'YES',
+ 'COPY_PHASE_STRIP': 'NO',
+ 'TARGETED_DEVICE_FAMILY': '4',
+ 'TARGETED_DEVICE_FAMILY[sdk=iphonesimulator*]': '1,4',
+ },
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/ios/xctests/App/AppDelegate.h b/third_party/python/gyp/test/ios/xctests/App/AppDelegate.h
new file mode 100644
index 0000000000..f8efce97ed
--- /dev/null
+++ b/third_party/python/gyp/test/ios/xctests/App/AppDelegate.h
@@ -0,0 +1,11 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <UIKit/UIKit.h>
+
+@interface AppDelegate : UIResponder<UIApplicationDelegate>
+
+@property(strong, nonatomic) UIWindow* window;
+
+@end
diff --git a/third_party/python/gyp/test/ios/xctests/App/AppDelegate.m b/third_party/python/gyp/test/ios/xctests/App/AppDelegate.m
new file mode 100644
index 0000000000..825dda75b7
--- /dev/null
+++ b/third_party/python/gyp/test/ios/xctests/App/AppDelegate.m
@@ -0,0 +1,18 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "AppDelegate.h"
+
+@interface AppDelegate ()
+
+@end
+
+@implementation AppDelegate
+
+- (BOOL)application:(UIApplication*)application
+ didFinishLaunchingWithOptions:(NSDictionary*)launchOptions {
+ return YES;
+}
+
+@end
diff --git a/third_party/python/gyp/test/ios/xctests/App/Base.lproj/LaunchScreen.xib b/third_party/python/gyp/test/ios/xctests/App/Base.lproj/LaunchScreen.xib
new file mode 100644
index 0000000000..063dc5ea79
--- /dev/null
+++ b/third_party/python/gyp/test/ios/xctests/App/Base.lproj/LaunchScreen.xib
@@ -0,0 +1,41 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<document type="com.apple.InterfaceBuilder3.CocoaTouch.XIB" version="3.0" toolsVersion="6214" systemVersion="14A314h" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" launchScreen="YES" useTraitCollections="YES">
+ <dependencies>
+ <plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="6207"/>
+ <capability name="Constraints with non-1.0 multipliers" minToolsVersion="5.1"/>
+ </dependencies>
+ <objects>
+ <placeholder placeholderIdentifier="IBFilesOwner" id="-1" userLabel="File's Owner"/>
+ <placeholder placeholderIdentifier="IBFirstResponder" id="-2" customClass="UIResponder"/>
+ <view contentMode="scaleToFill" id="iN0-l3-epB">
+ <rect key="frame" x="0.0" y="0.0" width="480" height="480"/>
+ <autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
+ <subviews>
+ <label opaque="NO" clipsSubviews="YES" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" text=" Copyright (c) 2014 Google. All rights reserved." textAlignment="center" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" minimumFontSize="9" translatesAutoresizingMaskIntoConstraints="NO" id="8ie-xW-0ye">
+ <rect key="frame" x="20" y="439" width="441" height="21"/>
+ <fontDescription key="fontDescription" type="system" pointSize="17"/>
+ <color key="textColor" cocoaTouchSystemColor="darkTextColor"/>
+ <nil key="highlightedColor"/>
+ </label>
+ <label opaque="NO" clipsSubviews="YES" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" text="App" textAlignment="center" lineBreakMode="middleTruncation" baselineAdjustment="alignBaselines" minimumFontSize="18" translatesAutoresizingMaskIntoConstraints="NO" id="kId-c2-rCX">
+ <rect key="frame" x="20" y="140" width="441" height="43"/>
+ <fontDescription key="fontDescription" type="boldSystem" pointSize="36"/>
+ <color key="textColor" cocoaTouchSystemColor="darkTextColor"/>
+ <nil key="highlightedColor"/>
+ </label>
+ </subviews>
+ <color key="backgroundColor" white="1" alpha="1" colorSpace="custom" customColorSpace="calibratedWhite"/>
+ <constraints>
+ <constraint firstItem="kId-c2-rCX" firstAttribute="centerY" secondItem="iN0-l3-epB" secondAttribute="bottom" multiplier="1/3" constant="1" id="5cJ-9S-tgC"/>
+ <constraint firstAttribute="centerX" secondItem="kId-c2-rCX" secondAttribute="centerX" id="Koa-jz-hwk"/>
+ <constraint firstAttribute="bottom" secondItem="8ie-xW-0ye" secondAttribute="bottom" constant="20" id="Kzo-t9-V3l"/>
+ <constraint firstItem="8ie-xW-0ye" firstAttribute="leading" secondItem="iN0-l3-epB" secondAttribute="leading" constant="20" symbolic="YES" id="MfP-vx-nX0"/>
+ <constraint firstAttribute="centerX" secondItem="8ie-xW-0ye" secondAttribute="centerX" id="ZEH-qu-HZ9"/>
+ <constraint firstItem="kId-c2-rCX" firstAttribute="leading" secondItem="iN0-l3-epB" secondAttribute="leading" constant="20" symbolic="YES" id="fvb-Df-36g"/>
+ </constraints>
+ <nil key="simulatedStatusBarMetrics"/>
+ <freeformSimulatedSizeMetrics key="simulatedDestinationMetrics"/>
+ <point key="canvasLocation" x="548" y="455"/>
+ </view>
+ </objects>
+</document>
diff --git a/third_party/python/gyp/test/ios/xctests/App/Base.lproj/Main.storyboard b/third_party/python/gyp/test/ios/xctests/App/Base.lproj/Main.storyboard
new file mode 100644
index 0000000000..f56d2f3bb5
--- /dev/null
+++ b/third_party/python/gyp/test/ios/xctests/App/Base.lproj/Main.storyboard
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<document type="com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB" version="3.0" toolsVersion="6211" systemVersion="14A298i" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" useTraitCollections="YES" initialViewController="BYZ-38-t0r">
+ <dependencies>
+ <plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="6204"/>
+ </dependencies>
+ <scenes>
+ <!--View Controller-->
+ <scene sceneID="tne-QT-ifu">
+ <objects>
+ <viewController id="BYZ-38-t0r" customClass="ViewController" customModuleProvider="" sceneMemberID="viewController">
+ <layoutGuides>
+ <viewControllerLayoutGuide type="top" id="y3c-jy-aDJ"/>
+ <viewControllerLayoutGuide type="bottom" id="wfy-db-euE"/>
+ </layoutGuides>
+ <view key="view" contentMode="scaleToFill" id="8bC-Xf-vdC">
+ <rect key="frame" x="0.0" y="0.0" width="600" height="600"/>
+ <autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
+ <color key="backgroundColor" white="1" alpha="1" colorSpace="custom" customColorSpace="calibratedWhite"/>
+ </view>
+ </viewController>
+ <placeholder placeholderIdentifier="IBFirstResponder" id="dkx-z0-nzr" sceneMemberID="firstResponder"/>
+ </objects>
+ </scene>
+ </scenes>
+</document>
diff --git a/third_party/python/gyp/test/ios/xctests/App/Images.xcassets/AppIcon.appiconset/Contents.json b/third_party/python/gyp/test/ios/xctests/App/Images.xcassets/AppIcon.appiconset/Contents.json
new file mode 100644
index 0000000000..36d2c80d88
--- /dev/null
+++ b/third_party/python/gyp/test/ios/xctests/App/Images.xcassets/AppIcon.appiconset/Contents.json
@@ -0,0 +1,68 @@
+{
+ "images" : [
+ {
+ "idiom" : "iphone",
+ "size" : "29x29",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "iphone",
+ "size" : "29x29",
+ "scale" : "3x"
+ },
+ {
+ "idiom" : "iphone",
+ "size" : "40x40",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "iphone",
+ "size" : "40x40",
+ "scale" : "3x"
+ },
+ {
+ "idiom" : "iphone",
+ "size" : "60x60",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "iphone",
+ "size" : "60x60",
+ "scale" : "3x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "29x29",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "29x29",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "40x40",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "40x40",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "76x76",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "ipad",
+ "size" : "76x76",
+ "scale" : "2x"
+ }
+ ],
+ "info" : {
+ "version" : 1,
+ "author" : "xcode"
+ }
+} \ No newline at end of file
diff --git a/third_party/python/gyp/test/ios/xctests/App/Info.plist b/third_party/python/gyp/test/ios/xctests/App/Info.plist
new file mode 100644
index 0000000000..3f938f60f4
--- /dev/null
+++ b/third_party/python/gyp/test/ios/xctests/App/Info.plist
@@ -0,0 +1,47 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>en</string>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIdentifier</key>
+ <string>com.google.gyptest.App</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+ <key>LSRequiresIPhoneOS</key>
+ <true/>
+ <key>UILaunchStoryboardName</key>
+ <string>LaunchScreen</string>
+ <key>UIMainStoryboardFile</key>
+ <string>Main</string>
+ <key>UIRequiredDeviceCapabilities</key>
+ <array>
+ <string>armv7</string>
+ </array>
+ <key>UISupportedInterfaceOrientations</key>
+ <array>
+ <string>UIInterfaceOrientationPortrait</string>
+ <string>UIInterfaceOrientationLandscapeLeft</string>
+ <string>UIInterfaceOrientationLandscapeRight</string>
+ </array>
+ <key>UISupportedInterfaceOrientations~ipad</key>
+ <array>
+ <string>UIInterfaceOrientationPortrait</string>
+ <string>UIInterfaceOrientationPortraitUpsideDown</string>
+ <string>UIInterfaceOrientationLandscapeLeft</string>
+ <string>UIInterfaceOrientationLandscapeRight</string>
+ </array>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/ios/xctests/App/ViewController.h b/third_party/python/gyp/test/ios/xctests/App/ViewController.h
new file mode 100644
index 0000000000..95a281e8d8
--- /dev/null
+++ b/third_party/python/gyp/test/ios/xctests/App/ViewController.h
@@ -0,0 +1,9 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <UIKit/UIKit.h>
+
+@interface ViewController : UIViewController
+
+@end
diff --git a/third_party/python/gyp/test/ios/xctests/App/ViewController.m b/third_party/python/gyp/test/ios/xctests/App/ViewController.m
new file mode 100644
index 0000000000..d38e3c5bb7
--- /dev/null
+++ b/third_party/python/gyp/test/ios/xctests/App/ViewController.m
@@ -0,0 +1,21 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "ViewController.h"
+
+@interface ViewController ()
+
+@end
+
+@implementation ViewController
+
+- (void)viewDidLoad {
+ [super viewDidLoad];
+}
+
+- (void)didReceiveMemoryWarning {
+ [super didReceiveMemoryWarning];
+}
+
+@end
diff --git a/third_party/python/gyp/test/ios/xctests/App/main.m b/third_party/python/gyp/test/ios/xctests/App/main.m
new file mode 100644
index 0000000000..83368075cd
--- /dev/null
+++ b/third_party/python/gyp/test/ios/xctests/App/main.m
@@ -0,0 +1,13 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <UIKit/UIKit.h>
+#import "AppDelegate.h"
+
+int main(int argc, char* argv[]) {
+ @autoreleasepool {
+ return UIApplicationMain(
+ argc, argv, nil, NSStringFromClass([AppDelegate class]));
+ }
+}
diff --git a/third_party/python/gyp/test/ios/xctests/AppTests/AppTests.m b/third_party/python/gyp/test/ios/xctests/AppTests/AppTests.m
new file mode 100644
index 0000000000..22121b089d
--- /dev/null
+++ b/third_party/python/gyp/test/ios/xctests/AppTests/AppTests.m
@@ -0,0 +1,31 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <UIKit/UIKit.h>
+#import <XCTest/XCTest.h>
+
+@interface AppTests : XCTestCase
+
+@end
+
+@implementation AppTests
+
+- (void)setUp {
+ [super setUp];
+}
+
+- (void)tearDown {
+ [super tearDown];
+}
+
+- (void)testExample {
+ XCTAssert(YES, @"Pass");
+}
+
+- (void)testPerformanceExample {
+ [self measureBlock:^{
+ }];
+}
+
+@end
diff --git a/third_party/python/gyp/test/ios/xctests/AppTests/Info.plist b/third_party/python/gyp/test/ios/xctests/AppTests/Info.plist
new file mode 100644
index 0000000000..d43ff4ba8d
--- /dev/null
+++ b/third_party/python/gyp/test/ios/xctests/AppTests/Info.plist
@@ -0,0 +1,24 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>en</string>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIdentifier</key>
+ <string>com.google.gyptest.AppTests</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>BNDL</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/ios/xctests/gyptest-xctests.py b/third_party/python/gyp/test/ios/xctests/gyptest-xctests.py
new file mode 100644
index 0000000000..6642cfb938
--- /dev/null
+++ b/third_party/python/gyp/test/ios/xctests/gyptest-xctests.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+
+# Copyright 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that iOS XCTests can be built correctly.
+"""
+
+import TestGyp
+
+import os
+import subprocess
+import sys
+
+def HasCerts():
+ # Because the bots do not have certs, don't check them if there are no
+ # certs available.
+ proc = subprocess.Popen(['security','find-identity','-p', 'codesigning',
+ '-v'], stdout=subprocess.PIPE)
+ return "0 valid identities found" not in proc.communicate()[0].strip()
+
+if sys.platform == "darwin":
+ # This test appears to be flaky and hangs some of the time.
+ sys.exit(2) # bug=531
+
+ test = TestGyp.TestGyp(formats=['xcode', 'ninja'])
+ test.run_gyp('xctests.gyp')
+ test_configs = ['Default']
+ # TODO(crbug.com/557418): Enable this once xcodebuild works for iOS devices.
+ #if HasCerts() and test.format == 'xcode':
+ # test_configs.append('Default-iphoneos')
+ for config in test_configs:
+ test.set_configuration(config)
+ test.build('xctests.gyp', test.ALL)
+ test.built_file_must_exist('app_under_test.app/app_under_test')
+ test.built_file_must_exist('app_tests.xctest/app_tests')
+ if 'ninja' in test.format:
+ test.built_file_must_exist('obj/AppTests/app_tests.AppTests.i386.o')
+ test.built_file_must_exist('obj/AppTests/app_tests.AppTests.x86_64.o')
+ elif test.format == 'xcode':
+ xcode_object_path = os.path.join('..', 'xctests.build',
+ 'Default-iphonesimulator',
+ 'app_tests.build', 'Objects-normal',
+ '%s', 'AppTests.o')
+ test.built_file_must_exist(xcode_object_path % 'i386')
+ test.built_file_must_exist(xcode_object_path % 'x86_64')
+ test.pass_test()
diff --git a/third_party/python/gyp/test/ios/xctests/xctests.gyp b/third_party/python/gyp/test/ios/xctests/xctests.gyp
new file mode 100644
index 0000000000..8d4d6393b7
--- /dev/null
+++ b/third_party/python/gyp/test/ios/xctests/xctests.gyp
@@ -0,0 +1,74 @@
+# Copyright 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'conditions': [
+ ['"<(GENERATOR)"=="ninja"', {
+ 'make_global_settings': [
+ ['CC', '/usr/bin/clang'],
+ ['CXX', '/usr/bin/clang++'],
+ ],
+ }]
+ ],
+ 'target_defaults': {
+ 'xcode_settings': {
+ 'OTHER_CFLAGS': [
+ '-fobjc-abi-version=2',
+ ],
+ 'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
+ 'SDKROOT': 'iphonesimulator', # -isysroot
+ 'CONFIGURATION_BUILD_DIR':'build/Default',
+ 'IPHONEOS_DEPLOYMENT_TARGET': '9.0',
+ 'CODE_SIGN_IDENTITY[sdk=iphoneos*]': 'iPhone Developer',
+ }
+ },
+ 'targets': [
+ {
+ 'target_name': 'app_under_test',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'mac_bundle_resources': [
+ 'App/Base.lproj/LaunchScreen.xib',
+ 'App/Base.lproj/Main.storyboard',
+ ],
+ 'sources': [
+ 'App/AppDelegate.h',
+ 'App/AppDelegate.m',
+ 'App/ViewController.h',
+ 'App/ViewController.m',
+ 'App/main.m',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ '$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
+ ],
+ },
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'App/Info.plist',
+ },
+ },
+ {
+ 'target_name': 'app_tests',
+ 'type': 'loadable_module',
+ 'mac_xctest_bundle': 1,
+ 'sources': [
+ 'AppTests/AppTests.m',
+ ],
+ 'dependencies': [
+ 'app_under_test'
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ ],
+ },
+ 'xcode_settings': {
+ 'WRAPPER_EXTENSION': 'xctest',
+ 'INFOPLIST_FILE': 'AppTests/Info.plist',
+ 'BUNDLE_LOADER': '$(BUILT_PRODUCTS_DIR)/app_under_test.app/app_under_test',
+ 'TEST_HOST': '$(BUNDLE_LOADER)',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/lib/README.txt b/third_party/python/gyp/test/lib/README.txt
new file mode 100644
index 0000000000..b3d724574e
--- /dev/null
+++ b/third_party/python/gyp/test/lib/README.txt
@@ -0,0 +1,17 @@
+Supporting modules for GYP testing.
+
+ TestCmd.py
+ TestCommon.py
+
+ Modules for generic testing of command-line utilities,
+ specifically including the ability to copy a test configuration
+ to temporary directories (with default cleanup on exit) as part
+ of running test scripts that invoke commands, compare actual
+ against expected output, etc.
+
+ Our copies of these come from the SCons project,
+ http://www.scons.org/.
+
+ TestGyp.py
+
+ Modules for GYP-specific tests, of course.
diff --git a/third_party/python/gyp/test/lib/TestCmd.py b/third_party/python/gyp/test/lib/TestCmd.py
new file mode 100644
index 0000000000..1ec50933a4
--- /dev/null
+++ b/third_party/python/gyp/test/lib/TestCmd.py
@@ -0,0 +1,1597 @@
+# Copyright (c) 2018 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+TestCmd.py: a testing framework for commands and scripts.
+
+The TestCmd module provides a framework for portable automated testing
+of executable commands and scripts (in any language, not just Python),
+especially commands and scripts that require file system interaction.
+
+In addition to running tests and evaluating conditions, the TestCmd
+module manages and cleans up one or more temporary workspace
+directories, and provides methods for creating files and directories in
+those workspace directories from in-line data, here-documents), allowing
+tests to be completely self-contained.
+
+A TestCmd environment object is created via the usual invocation:
+
+ import TestCmd
+ test = TestCmd.TestCmd()
+
+There are a bunch of keyword arguments available at instantiation:
+
+ test = TestCmd.TestCmd(description = 'string',
+ program = 'program_or_script_to_test',
+ interpreter = 'script_interpreter',
+ workdir = 'prefix',
+ subdir = 'subdir',
+ verbose = Boolean,
+ match = default_match_function,
+ diff = default_diff_function,
+ combine = Boolean)
+
+There are a bunch of methods that let you do different things:
+
+ test.verbose_set(1)
+
+ test.description_set('string')
+
+ test.program_set('program_or_script_to_test')
+
+ test.interpreter_set('script_interpreter')
+ test.interpreter_set(['script_interpreter', 'arg'])
+
+ test.workdir_set('prefix')
+ test.workdir_set('')
+
+ test.workpath('file')
+ test.workpath('subdir', 'file')
+
+ test.subdir('subdir', ...)
+
+ test.rmdir('subdir', ...)
+
+ test.write('file', "contents\n")
+ test.write(['subdir', 'file'], "contents\n")
+
+ test.read('file')
+ test.read(['subdir', 'file'])
+ test.read('file', mode)
+ test.read(['subdir', 'file'], mode)
+
+ test.writable('dir', 1)
+ test.writable('dir', None)
+
+ test.preserve(condition, ...)
+
+ test.cleanup(condition)
+
+ test.command_args(program = 'program_or_script_to_run',
+ interpreter = 'script_interpreter',
+ arguments = 'arguments to pass to program')
+
+ test.run(program = 'program_or_script_to_run',
+ interpreter = 'script_interpreter',
+ arguments = 'arguments to pass to program',
+ chdir = 'directory_to_chdir_to',
+ stdin = 'input to feed to the program\n')
+ universal_newlines = True)
+
+ p = test.start(program = 'program_or_script_to_run',
+ interpreter = 'script_interpreter',
+ arguments = 'arguments to pass to program',
+ universal_newlines = None)
+
+ test.finish(self, p)
+
+ test.pass_test()
+ test.pass_test(condition)
+ test.pass_test(condition, function)
+
+ test.fail_test()
+ test.fail_test(condition)
+ test.fail_test(condition, function)
+ test.fail_test(condition, function, skip)
+
+ test.no_result()
+ test.no_result(condition)
+ test.no_result(condition, function)
+ test.no_result(condition, function, skip)
+
+ test.stdout()
+ test.stdout(run)
+
+ test.stderr()
+ test.stderr(run)
+
+ test.symlink(target, link)
+
+ test.banner(string)
+ test.banner(string, width)
+
+ test.diff(actual, expected)
+
+ test.match(actual, expected)
+
+ test.match_exact("actual 1\nactual 2\n", "expected 1\nexpected 2\n")
+ test.match_exact(["actual 1\n", "actual 2\n"],
+ ["expected 1\n", "expected 2\n"])
+
+ test.match_re("actual 1\nactual 2\n", regex_string)
+ test.match_re(["actual 1\n", "actual 2\n"], list_of_regexes)
+
+ test.match_re_dotall("actual 1\nactual 2\n", regex_string)
+ test.match_re_dotall(["actual 1\n", "actual 2\n"], list_of_regexes)
+
+ test.tempdir()
+ test.tempdir('temporary-directory')
+
+ test.sleep()
+ test.sleep(seconds)
+
+ test.where_is('foo')
+ test.where_is('foo', 'PATH1:PATH2')
+ test.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
+
+ test.unlink('file')
+ test.unlink('subdir', 'file')
+
+The TestCmd module provides pass_test(), fail_test(), and no_result()
+unbound functions that report test results for use with the Aegis change
+management system. These methods terminate the test immediately,
+reporting PASSED, FAILED, or NO RESULT respectively, and exiting with
+status 0 (success), 1 or 2 respectively. This allows for a distinction
+between an actual failed test and a test that could not be properly
+evaluated because of an external condition (such as a full file system
+or incorrect permissions).
+
+ import TestCmd
+
+ TestCmd.pass_test()
+ TestCmd.pass_test(condition)
+ TestCmd.pass_test(condition, function)
+
+ TestCmd.fail_test()
+ TestCmd.fail_test(condition)
+ TestCmd.fail_test(condition, function)
+ TestCmd.fail_test(condition, function, skip)
+
+ TestCmd.no_result()
+ TestCmd.no_result(condition)
+ TestCmd.no_result(condition, function)
+ TestCmd.no_result(condition, function, skip)
+
+The TestCmd module also provides unbound functions that handle matching
+in the same way as the match_*() methods described above.
+
+ import TestCmd
+
+ test = TestCmd.TestCmd(match = TestCmd.match_exact)
+
+ test = TestCmd.TestCmd(match = TestCmd.match_re)
+
+ test = TestCmd.TestCmd(match = TestCmd.match_re_dotall)
+
+The TestCmd module provides unbound functions that can be used for the
+"diff" argument to TestCmd.TestCmd instantiation:
+
+ import TestCmd
+
+ test = TestCmd.TestCmd(match = TestCmd.match_re,
+ diff = TestCmd.diff_re)
+
+ test = TestCmd.TestCmd(diff = TestCmd.simple_diff)
+
+The "diff" argument can also be used with standard difflib functions:
+
+ import difflib
+
+ test = TestCmd.TestCmd(diff = difflib.context_diff)
+
+ test = TestCmd.TestCmd(diff = difflib.unified_diff)
+
+Lastly, the where_is() method also exists in an unbound function
+version.
+
+ import TestCmd
+
+ TestCmd.where_is('foo')
+ TestCmd.where_is('foo', 'PATH1:PATH2')
+ TestCmd.where_is('foo', 'PATH1;PATH2', '.suffix3;.suffix4')
+"""
+
+# Copyright 2000-2010 Steven Knight
+# This module is free software, and you may redistribute it and/or modify
+# it under the same terms as Python itself, so long as this copyright message
+# and disclaimer are retained in their original form.
+#
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
+# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
+# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.
+#
+# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
+# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
+# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+
+from __future__ import print_function
+
+__author__ = "Steven Knight <knight at baldmt dot com>"
+__revision__ = "TestCmd.py 0.37.D001 2010/01/11 16:55:50 knight"
+__version__ = "0.37"
+
+import errno
+import os
+import os.path
+import re
+import shutil
+import stat
+import sys
+import tempfile
+import time
+import traceback
+try:
+ from UserList import UserList
+except ImportError:
+ from collections import UserList
+
+__all__ = [
+ 'diff_re',
+ 'fail_test',
+ 'no_result',
+ 'pass_test',
+ 'match_exact',
+ 'match_re',
+ 'match_re_dotall',
+ 'python_executable',
+ 'TestCmd'
+]
+
+try:
+ import difflib
+except ImportError:
+ __all__.append('simple_diff')
+
+def is_List(e):
+ return (type(e) is list) or isinstance(e, UserList)
+
+try:
+ from UserString import UserString
+except ImportError:
+ try:
+ from collections import UserString
+ except ImportError:
+ class UserString:
+ pass
+
+try:
+ # basestring was removed in python3.
+ basestring
+except NameError:
+ basestring = str
+
+def is_String(e):
+ return isinstance(e, basestring) or isinstance(e, UserString)
+
+tempfile.template = 'testcmd.'
+if os.name in ('posix', 'nt'):
+ tempfile.template = 'testcmd.' + str(os.getpid()) + '.'
+else:
+ tempfile.template = 'testcmd.'
+
+re_space = re.compile('\s')
+
+_Cleanup = []
+
+_chain_to_exitfunc = None
+
+def _clean():
+ global _Cleanup
+ for test in reversed(_Cleanup):
+ if test:
+ test.cleanup()
+ del _Cleanup[:]
+ if _chain_to_exitfunc:
+ _chain_to_exitfunc()
+
+try:
+ import atexit
+except ImportError:
+ # TODO(1.5): atexit requires python 2.0, so chain sys.exitfunc
+ try:
+ _chain_to_exitfunc = sys.exitfunc
+ except AttributeError:
+ pass
+ sys.exitfunc = _clean
+else:
+ atexit.register(_clean)
+
+try:
+ zip
+except NameError:
+ def zip(*lists):
+ result = []
+ for i in range(min(map(len, lists))):
+ result.append(tuple(map(lambda l, i=i: l[i], lists)))
+ return result
+
+class Collector:
+ def __init__(self, top):
+ self.entries = [top]
+ def __call__(self, arg, dirname, names):
+ pathjoin = lambda n, d=dirname: os.path.join(d, n)
+ self.entries.extend(map(pathjoin, names))
+
+def _caller(tblist, skip):
+ string = ""
+ arr = []
+ for file, line, name, text in tblist:
+ if file[-10:] == "TestCmd.py":
+ break
+ arr = [(file, line, name, text)] + arr
+ atfrom = "at"
+ for file, line, name, text in arr[skip:]:
+ if name in ("?", "<module>"):
+ name = ""
+ else:
+ name = " (" + name + ")"
+ string = string + ("%s line %d of %s%s\n" % (atfrom, line, file, name))
+ atfrom = "\tfrom"
+ return string
+
+def fail_test(self = None, condition = 1, function = None, skip = 0):
+ """Cause the test to fail.
+
+ By default, the fail_test() method reports that the test FAILED
+ and exits with a status of 1. If a condition argument is supplied,
+ the test fails only if the condition is true.
+ """
+ if not condition:
+ return
+ if not function is None:
+ function()
+ of = ""
+ desc = ""
+ sep = " "
+ if not self is None:
+ if self.program:
+ of = " of " + self.program
+ sep = "\n\t"
+ if self.description:
+ desc = " [" + self.description + "]"
+ sep = "\n\t"
+
+ at = _caller(traceback.extract_stack(), skip)
+ sys.stderr.write("FAILED test" + of + desc + sep + at)
+
+ sys.exit(1)
+
+def no_result(self = None, condition = 1, function = None, skip = 0):
+ """Causes a test to exit with no valid result.
+
+ By default, the no_result() method reports NO RESULT for the test
+ and exits with a status of 2. If a condition argument is supplied,
+ the test fails only if the condition is true.
+ """
+ if not condition:
+ return
+ if not function is None:
+ function()
+ of = ""
+ desc = ""
+ sep = " "
+ if not self is None:
+ if self.program:
+ of = " of " + self.program
+ sep = "\n\t"
+ if self.description:
+ desc = " [" + self.description + "]"
+ sep = "\n\t"
+
+ if os.environ.get('TESTCMD_DEBUG_SKIPS'):
+ at = _caller(traceback.extract_stack(), skip)
+ sys.stderr.write("NO RESULT for test" + of + desc + sep + at)
+ else:
+ sys.stderr.write("NO RESULT\n")
+
+ sys.exit(2)
+
+def pass_test(self = None, condition = 1, function = None):
+ """Causes a test to pass.
+
+ By default, the pass_test() method reports PASSED for the test
+ and exits with a status of 0. If a condition argument is supplied,
+ the test passes only if the condition is true.
+ """
+ if not condition:
+ return
+ if not function is None:
+ function()
+ sys.stderr.write("PASSED\n")
+ sys.exit(0)
+
+def match_exact(lines = None, matches = None):
+ """
+ """
+ if not is_List(lines):
+ lines = lines.split("\n")
+ if not is_List(matches):
+ matches = matches.split("\n")
+ if len(lines) != len(matches):
+ return
+ for i in range(len(lines)):
+ if lines[i] != matches[i]:
+ return
+ return 1
+
+def match_re(lines = None, res = None):
+ """
+ """
+ if not is_List(lines):
+ lines = lines.split("\n")
+ if not is_List(res):
+ res = res.split("\n")
+ if len(lines) != len(res):
+ return
+ for i in range(len(lines)):
+ s = "^" + res[i] + "$"
+ try:
+ expr = re.compile(s)
+ except re.error as e:
+ msg = "Regular expression error in %s: %s"
+ raise re.error(msg % (repr(s), e[0]))
+ if not expr.search(lines[i]):
+ return
+ return 1
+
+def match_re_dotall(lines = None, res = None):
+ """
+ """
+ if not type(lines) is type(""):
+ lines = "\n".join(lines)
+ if not type(res) is type(""):
+ res = "\n".join(res)
+ s = "^" + res + "$"
+ try:
+ expr = re.compile(s, re.DOTALL)
+ except re.error as e:
+ msg = "Regular expression error in %s: %s"
+ raise re.error(msg % (repr(s), e[0]))
+ if expr.match(lines):
+ return 1
+
+try:
+ import difflib
+except ImportError:
+ pass
+else:
+ def simple_diff(a, b, fromfile='', tofile='',
+ fromfiledate='', tofiledate='', n=3, lineterm='\n'):
+ """
+ A function with the same calling signature as difflib.context_diff
+ (diff -c) and difflib.unified_diff (diff -u) but which prints
+ output like the simple, unadorned 'diff" command.
+ """
+ sm = difflib.SequenceMatcher(None, a, b)
+ def comma(x1, x2):
+ return x1+1 == x2 and str(x2) or '%s,%s' % (x1+1, x2)
+ result = []
+ for op, a1, a2, b1, b2 in sm.get_opcodes():
+ if op == 'delete':
+ result.append("%sd%d" % (comma(a1, a2), b1))
+ result.extend(map(lambda l: '< ' + l, a[a1:a2]))
+ elif op == 'insert':
+ result.append("%da%s" % (a1, comma(b1, b2)))
+ result.extend(map(lambda l: '> ' + l, b[b1:b2]))
+ elif op == 'replace':
+ result.append("%sc%s" % (comma(a1, a2), comma(b1, b2)))
+ result.extend(map(lambda l: '< ' + l, a[a1:a2]))
+ result.append('---')
+ result.extend(map(lambda l: '> ' + l, b[b1:b2]))
+ return result
+
+def diff_re(a, b, fromfile='', tofile='',
+ fromfiledate='', tofiledate='', n=3, lineterm='\n'):
+ """
+ A simple "diff" of two sets of lines when the expected lines
+ are regular expressions. This is a really dumb thing that
+ just compares each line in turn, so it doesn't look for
+ chunks of matching lines and the like--but at least it lets
+ you know exactly which line first didn't compare correctl...
+ """
+ result = []
+ diff = len(a) - len(b)
+ if diff < 0:
+ a = a + ['']*(-diff)
+ elif diff > 0:
+ b = b + ['']*diff
+ i = 0
+ for aline, bline in zip(a, b):
+ s = "^" + aline + "$"
+ try:
+ expr = re.compile(s)
+ except re.error as e:
+ msg = "Regular expression error in %s: %s"
+ raise re.error(msg % (repr(s), e[0]))
+ if not expr.search(bline):
+ result.append("%sc%s" % (i+1, i+1))
+ result.append('< ' + repr(a[i]))
+ result.append('---')
+ result.append('> ' + repr(b[i]))
+ i = i+1
+ return result
+
+if os.name == 'java':
+
+ python_executable = os.path.join(sys.prefix, 'jython')
+
+else:
+
+ python_executable = sys.executable
+
+if sys.platform == 'win32':
+
+ default_sleep_seconds = 2
+
+ def where_is(file, path=None, pathext=None):
+ if path is None:
+ path = os.environ['PATH']
+ if is_String(path):
+ path = path.split(os.pathsep)
+ if pathext is None:
+ pathext = os.environ['PATHEXT']
+ if is_String(pathext):
+ pathext = pathext.split(os.pathsep)
+ for ext in pathext:
+ if ext.lower() == file[-len(ext):].lower():
+ pathext = ['']
+ break
+ for dir in path:
+ f = os.path.join(dir, file)
+ for ext in pathext:
+ fext = f + ext
+ if os.path.isfile(fext):
+ return fext
+ return None
+
+else:
+
+ def where_is(file, path=None, pathext=None):
+ if path is None:
+ path = os.environ['PATH']
+ if is_String(path):
+ path = path.split(os.pathsep)
+ for dir in path:
+ f = os.path.join(dir, file)
+ if os.path.isfile(f):
+ try:
+ st = os.stat(f)
+ except OSError:
+ continue
+ if stat.S_IMODE(st[stat.ST_MODE]) & 0o111:
+ return f
+ return None
+
+ default_sleep_seconds = 1
+
+
+
+try:
+ import subprocess
+except ImportError:
+ # The subprocess module doesn't exist in this version of Python,
+ # so we're going to cobble up something that looks just enough
+ # like its API for our purposes below.
+ import new
+
+ subprocess = new.module('subprocess')
+
+ subprocess.PIPE = 'PIPE'
+ subprocess.STDOUT = 'STDOUT'
+ subprocess.mswindows = (sys.platform == 'win32')
+
+ try:
+ import popen2
+ popen2.Popen3
+ except AttributeError:
+ class Popen3:
+ universal_newlines = 1
+ def __init__(self, command, **kw):
+ if sys.platform == 'win32' and command[0] == '"':
+ command = '"' + command + '"'
+ (stdin, stdout, stderr) = os.popen3(' ' + command)
+ self.stdin = stdin
+ self.stdout = stdout
+ self.stderr = stderr
+ def close_output(self):
+ self.stdout.close()
+ self.resultcode = self.stderr.close()
+ def wait(self):
+ resultcode = self.resultcode
+ if os.WIFEXITED(resultcode):
+ return os.WEXITSTATUS(resultcode)
+ elif os.WIFSIGNALED(resultcode):
+ return os.WTERMSIG(resultcode)
+ else:
+ return None
+
+ else:
+ try:
+ popen2.Popen4
+ except AttributeError:
+ # A cribbed Popen4 class, with some retrofitted code from
+ # the Python 1.5 Popen3 class methods to do certain things
+ # by hand.
+ class Popen4(popen2.Popen3):
+ childerr = None
+
+ def __init__(self, cmd, bufsize=-1):
+ p2cread, p2cwrite = os.pipe()
+ c2pread, c2pwrite = os.pipe()
+ self.pid = os.fork()
+ if self.pid == 0:
+ # Child
+ os.dup2(p2cread, 0)
+ os.dup2(c2pwrite, 1)
+ os.dup2(c2pwrite, 2)
+ for i in range(3, popen2.MAXFD):
+ try:
+ os.close(i)
+ except: pass
+ try:
+ os.execvp(cmd[0], cmd)
+ finally:
+ os._exit(1)
+ # Shouldn't come here, I guess
+ os._exit(1)
+ os.close(p2cread)
+ self.tochild = os.fdopen(p2cwrite, 'w', bufsize)
+ os.close(c2pwrite)
+ self.fromchild = os.fdopen(c2pread, 'r', bufsize)
+ popen2._active.append(self)
+
+ popen2.Popen4 = Popen4
+
+ class Popen3(popen2.Popen3, popen2.Popen4):
+ universal_newlines = 1
+ def __init__(self, command, **kw):
+ if kw.get('stderr') == 'STDOUT':
+ popen2.Popen4.__init__(self, command, 1)
+ else:
+ popen2.Popen3.__init__(self, command, 1)
+ self.stdin = self.tochild
+ self.stdout = self.fromchild
+ self.stderr = self.childerr
+ def wait(self, *args, **kw):
+ resultcode = popen2.Popen3.wait(self, *args, **kw)
+ if os.WIFEXITED(resultcode):
+ return os.WEXITSTATUS(resultcode)
+ elif os.WIFSIGNALED(resultcode):
+ return os.WTERMSIG(resultcode)
+ else:
+ return None
+
+ subprocess.Popen = Popen3
+
+
+
+# From Josiah Carlson,
+# ASPN : Python Cookbook : Module to allow Asynchronous subprocess use on Windows and Posix platforms
+# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/440554
+
+PIPE = subprocess.PIPE
+
+if sys.platform == 'win32':
+ from win32file import ReadFile, WriteFile
+ from win32pipe import PeekNamedPipe
+ import msvcrt
+else:
+ import select
+ import fcntl
+
+ try: fcntl.F_GETFL
+ except AttributeError: fcntl.F_GETFL = 3
+
+ try: fcntl.F_SETFL
+ except AttributeError: fcntl.F_SETFL = 4
+
+class Popen(subprocess.Popen):
+ def recv(self, maxsize=None):
+ return self._recv('stdout', maxsize)
+
+ def recv_err(self, maxsize=None):
+ return self._recv('stderr', maxsize)
+
+ def send_recv(self, input='', maxsize=None):
+ return self.send(input), self.recv(maxsize), self.recv_err(maxsize)
+
+ def get_conn_maxsize(self, which, maxsize):
+ if maxsize is None:
+ maxsize = 1024
+ elif maxsize < 1:
+ maxsize = 1
+ return getattr(self, which), maxsize
+
+ def _close(self, which):
+ getattr(self, which).close()
+ setattr(self, which, None)
+
+ if sys.platform == 'win32':
+ def send(self, input):
+ if not self.stdin:
+ return None
+
+ try:
+ x = msvcrt.get_osfhandle(self.stdin.fileno())
+ (errCode, written) = WriteFile(x, input)
+ except ValueError:
+ return self._close('stdin')
+ except (subprocess.pywintypes.error, Exception) as why:
+ if why[0] in (109, errno.ESHUTDOWN):
+ return self._close('stdin')
+ raise
+
+ return written
+
+ def _recv(self, which, maxsize):
+ conn, maxsize = self.get_conn_maxsize(which, maxsize)
+ if conn is None:
+ return None
+
+ try:
+ x = msvcrt.get_osfhandle(conn.fileno())
+ (read, nAvail, nMessage) = PeekNamedPipe(x, 0)
+ if maxsize < nAvail:
+ nAvail = maxsize
+ if nAvail > 0:
+ (errCode, read) = ReadFile(x, nAvail, None)
+ except ValueError:
+ return self._close(which)
+ except (subprocess.pywintypes.error, Exception) as why:
+ if why[0] in (109, errno.ESHUTDOWN):
+ return self._close(which)
+ raise
+
+ #if self.universal_newlines:
+ # read = self._translate_newlines(read)
+ return read
+
+ else:
+ def send(self, input):
+ if not self.stdin:
+ return None
+
+ if not select.select([], [self.stdin], [], 0)[1]:
+ return 0
+
+ try:
+ written = os.write(self.stdin.fileno(), input)
+ except OSError as why:
+ if why[0] == errno.EPIPE: #broken pipe
+ return self._close('stdin')
+ raise
+
+ return written
+
+ def _recv(self, which, maxsize):
+ conn, maxsize = self.get_conn_maxsize(which, maxsize)
+ if conn is None:
+ return None
+
+ try:
+ flags = fcntl.fcntl(conn, fcntl.F_GETFL)
+ except TypeError:
+ flags = None
+ else:
+ if not conn.closed:
+ fcntl.fcntl(conn, fcntl.F_SETFL, flags| os.O_NONBLOCK)
+
+ try:
+ if not select.select([conn], [], [], 0)[0]:
+ return ''
+
+ r = conn.read(maxsize)
+ if not r:
+ return self._close(which)
+
+ #if self.universal_newlines:
+ # r = self._translate_newlines(r)
+ return r
+ finally:
+ if not conn.closed and not flags is None:
+ fcntl.fcntl(conn, fcntl.F_SETFL, flags)
+
+disconnect_message = "Other end disconnected!"
+
+def recv_some(p, t=.1, e=1, tr=5, stderr=0):
+ if tr < 1:
+ tr = 1
+ x = time.time()+t
+ y = []
+ r = ''
+ pr = p.recv
+ if stderr:
+ pr = p.recv_err
+ while time.time() < x or r:
+ r = pr()
+ if r is None:
+ if e:
+ raise Exception(disconnect_message)
+ else:
+ break
+ elif r:
+ y.append(r)
+ else:
+ time.sleep(max((x-time.time())/tr, 0))
+ return ''.join(y)
+
+def send_all(p, data):
+ data = memoryview(data)
+ while len(data):
+ sent = p.send(data)
+ if sent is None:
+ raise Exception(disconnect_message)
+ data = data[sent:]
+
+
+
+class TestCmd(object):
+ """Class TestCmd
+ """
+
+ def __init__(self, description = None,
+ program = None,
+ interpreter = None,
+ workdir = None,
+ subdir = None,
+ verbose = None,
+ match = None,
+ diff = None,
+ combine = 0,
+ universal_newlines = 1):
+ self._cwd = os.getcwd()
+ self.description_set(description)
+ self.program_set(program)
+ self.interpreter_set(interpreter)
+ if verbose is None:
+ try:
+ verbose = max( 0, int(os.environ.get('TESTCMD_VERBOSE', 0)) )
+ except ValueError:
+ verbose = 0
+ self.verbose_set(verbose)
+ self.combine = combine
+ self.universal_newlines = universal_newlines
+ if match is not None:
+ self.match_function = match
+ else:
+ self.match_function = match_re
+ if diff is not None:
+ self.diff_function = diff
+ else:
+ try:
+ difflib
+ except NameError:
+ pass
+ else:
+ self.diff_function = simple_diff
+ #self.diff_function = difflib.context_diff
+ #self.diff_function = difflib.unified_diff
+ self._dirlist = []
+ self._preserve = {'pass_test': 0, 'fail_test': 0, 'no_result': 0}
+ if 'PRESERVE' in os.environ and os.environ['PRESERVE'] is not '':
+ self._preserve['pass_test'] = os.environ['PRESERVE']
+ self._preserve['fail_test'] = os.environ['PRESERVE']
+ self._preserve['no_result'] = os.environ['PRESERVE']
+ else:
+ try:
+ self._preserve['pass_test'] = os.environ['PRESERVE_PASS']
+ except KeyError:
+ pass
+ try:
+ self._preserve['fail_test'] = os.environ['PRESERVE_FAIL']
+ except KeyError:
+ pass
+ try:
+ self._preserve['no_result'] = os.environ['PRESERVE_NO_RESULT']
+ except KeyError:
+ pass
+ self._stdout = []
+ self._stderr = []
+ self.status = None
+ self.condition = 'no_result'
+ self.workdir_set(workdir)
+ self.subdir(subdir)
+
+ def __del__(self):
+ self.cleanup()
+
+ def __repr__(self):
+ return "%x" % id(self)
+
+ banner_char = '='
+ banner_width = 80
+
+ def banner(self, s, width=None):
+ if width is None:
+ width = self.banner_width
+ return s + self.banner_char * (width - len(s))
+
+ if os.name == 'posix':
+
+ def escape(self, arg):
+ "escape shell special characters"
+ slash = '\\'
+ special = '"$'
+
+ arg = arg.replace(slash, slash+slash)
+ for c in special:
+ arg = arg.replace(c, slash+c)
+
+ if re_space.search(arg):
+ arg = '"' + arg + '"'
+ return arg
+
+ else:
+
+ # Windows does not allow special characters in file names
+ # anyway, so no need for an escape function, we will just quote
+ # the arg.
+ def escape(self, arg):
+ if re_space.search(arg):
+ arg = '"' + arg + '"'
+ return arg
+
+ def canonicalize(self, path):
+ if is_List(path):
+ path = os.path.join(*path)
+ if not os.path.isabs(path):
+ path = os.path.join(self.workdir, path)
+ return path
+
+ def chmod(self, path, mode):
+ """Changes permissions on the specified file or directory
+ path name."""
+ path = self.canonicalize(path)
+ os.chmod(path, mode)
+
+ def cleanup(self, condition = None):
+ """Removes any temporary working directories for the specified
+ TestCmd environment. If the environment variable PRESERVE was
+ set when the TestCmd environment was created, temporary working
+ directories are not removed. If any of the environment variables
+ PRESERVE_PASS, PRESERVE_FAIL, or PRESERVE_NO_RESULT were set
+ when the TestCmd environment was created, then temporary working
+ directories are not removed if the test passed, failed, or had
+ no result, respectively. Temporary working directories are also
+ preserved for conditions specified via the preserve method.
+
+ Typically, this method is not called directly, but is used when
+ the script exits to clean up temporary working directories as
+ appropriate for the exit status.
+ """
+ if not self._dirlist:
+ return
+ os.chdir(self._cwd)
+ self.workdir = None
+ if condition is None:
+ condition = self.condition
+ if self._preserve[condition]:
+ for dir in self._dirlist:
+ print("Preserved directory", dir)
+ else:
+ list = self._dirlist[:]
+ list.reverse()
+ for dir in list:
+ self.writable(dir, 1)
+ shutil.rmtree(dir, ignore_errors = 1)
+ self._dirlist = []
+
+ try:
+ global _Cleanup
+ _Cleanup.remove(self)
+ except (AttributeError, ValueError):
+ pass
+
+ def command_args(self, program = None,
+ interpreter = None,
+ arguments = None):
+ if program:
+ if type(program) == type('') and not os.path.isabs(program):
+ program = os.path.join(self._cwd, program)
+ else:
+ program = self.program
+ if not interpreter:
+ interpreter = self.interpreter
+ if not type(program) in [type([]), type(())]:
+ program = [program]
+ cmd = list(program)
+ if interpreter:
+ if not type(interpreter) in [type([]), type(())]:
+ interpreter = [interpreter]
+ cmd = list(interpreter) + cmd
+ if arguments:
+ if type(arguments) == type(''):
+ arguments = arguments.split()
+ cmd.extend(arguments)
+ return cmd
+
+ def description_set(self, description):
+ """Set the description of the functionality being tested.
+ """
+ self.description = description
+
+ try:
+ difflib
+ except NameError:
+ def diff(self, a, b, name, *args, **kw):
+ print(self.banner('Expected %s' % name))
+ print(a)
+ print(self.banner('Actual %s' % name))
+ print(b)
+ else:
+ def diff(self, a, b, name, *args, **kw):
+ print(self.banner(name))
+ args = (a.splitlines(), b.splitlines()) + args
+ lines = self.diff_function(*args, **kw)
+ for l in lines:
+ print(l)
+
+ def fail_test(self, condition = 1, function = None, skip = 0):
+ """Cause the test to fail.
+ """
+ if not condition:
+ return
+ self.condition = 'fail_test'
+ fail_test(self = self,
+ condition = condition,
+ function = function,
+ skip = skip)
+
+ def interpreter_set(self, interpreter):
+ """Set the program to be used to interpret the program
+ under test as a script.
+ """
+ self.interpreter = interpreter
+
+ def match(self, lines, matches):
+ """Compare actual and expected file contents.
+ """
+ return self.match_function(lines, matches)
+
+ def match_exact(self, lines, matches):
+ """Compare actual and expected file contents.
+ """
+ return match_exact(lines, matches)
+
+ def match_re(self, lines, res):
+ """Compare actual and expected file contents.
+ """
+ return match_re(lines, res)
+
+ def match_re_dotall(self, lines, res):
+ """Compare actual and expected file contents.
+ """
+ return match_re_dotall(lines, res)
+
+ def no_result(self, condition = 1, function = None, skip = 0):
+ """Report that the test could not be run.
+ """
+ if not condition:
+ return
+ self.condition = 'no_result'
+ no_result(self = self,
+ condition = condition,
+ function = function,
+ skip = skip)
+
+ def pass_test(self, condition = 1, function = None):
+ """Cause the test to pass.
+ """
+ if not condition:
+ return
+ self.condition = 'pass_test'
+ pass_test(self = self, condition = condition, function = function)
+
+ def preserve(self, *conditions):
+ """Arrange for the temporary working directories for the
+ specified TestCmd environment to be preserved for one or more
+ conditions. If no conditions are specified, arranges for
+ the temporary working directories to be preserved for all
+ conditions.
+ """
+ if conditions is ():
+ conditions = ('pass_test', 'fail_test', 'no_result')
+ for cond in conditions:
+ self._preserve[cond] = 1
+
+ def program_set(self, program):
+ """Set the executable program or script to be tested.
+ """
+ if program and not os.path.isabs(program):
+ program = os.path.join(self._cwd, program)
+ self.program = program
+
+ def read(self, file, mode = 'r'):
+ """Reads and returns the contents of the specified file name.
+ The file name may be a list, in which case the elements are
+ concatenated with the os.path.join() method. The file is
+ assumed to be under the temporary working directory unless it
+ is an absolute path name. The I/O mode for the file may
+ be specified; it must begin with an 'r'. The default is
+ 'r' (string read).
+ """
+ file = self.canonicalize(file)
+ if mode[0] != 'r':
+ raise ValueError("mode must begin with 'r'")
+ with open(file, mode) as f:
+ result = f.read()
+ return result
+
+ def rmdir(self, dir):
+ """Removes the specified dir name.
+ The dir name may be a list, in which case the elements are
+ concatenated with the os.path.join() method. The dir is
+ assumed to be under the temporary working directory unless it
+ is an absolute path name.
+ The dir must be empty.
+ """
+ dir = self.canonicalize(dir)
+ os.rmdir(dir)
+
+ def start(self, program = None,
+ interpreter = None,
+ arguments = None,
+ universal_newlines = None,
+ **kw):
+ """
+ Starts a program or script for the test environment.
+
+ The specified program will have the original directory
+ prepended unless it is enclosed in a [list].
+ """
+ cmd = self.command_args(program, interpreter, arguments)
+ cmd_string = ' '.join(map(self.escape, cmd))
+ if self.verbose:
+ sys.stderr.write(cmd_string + "\n")
+ if universal_newlines is None:
+ universal_newlines = self.universal_newlines
+
+ # On Windows, if we make stdin a pipe when we plan to send
+ # no input, and the test program exits before
+ # Popen calls msvcrt.open_osfhandle, that call will fail.
+ # So don't use a pipe for stdin if we don't need one.
+ stdin = kw.get('stdin', None)
+ if stdin is not None:
+ stdin = subprocess.PIPE
+
+ combine = kw.get('combine', self.combine)
+ if combine:
+ stderr_value = subprocess.STDOUT
+ else:
+ stderr_value = subprocess.PIPE
+
+ return Popen(cmd,
+ stdin=stdin,
+ stdout=subprocess.PIPE,
+ stderr=stderr_value,
+ universal_newlines=universal_newlines)
+
+ def finish(self, popen, **kw):
+ """
+ Finishes and waits for the process being run under control of
+ the specified popen argument, recording the exit status,
+ standard output and error output.
+ """
+ popen.stdin.close()
+ self.status = popen.wait()
+ if not self.status:
+ self.status = 0
+ self._stdout.append(popen.stdout.read())
+ if popen.stderr:
+ stderr = popen.stderr.read()
+ else:
+ stderr = ''
+ self._stderr.append(stderr)
+
+ def run(self, program = None,
+ interpreter = None,
+ arguments = None,
+ chdir = None,
+ stdin = None,
+ universal_newlines = None):
+ """Runs a test of the program or script for the test
+ environment. Standard output and error output are saved for
+ future retrieval via the stdout() and stderr() methods.
+
+ The specified program will have the original directory
+ prepended unless it is enclosed in a [list].
+ """
+ if chdir:
+ oldcwd = os.getcwd()
+ if not os.path.isabs(chdir):
+ chdir = os.path.join(self.workpath(chdir))
+ if self.verbose:
+ sys.stderr.write("chdir(" + chdir + ")\n")
+ os.chdir(chdir)
+ p = self.start(program,
+ interpreter,
+ arguments,
+ universal_newlines,
+ stdin=stdin)
+ if stdin:
+ if is_List(stdin):
+ for line in stdin:
+ p.stdin.write(line)
+ else:
+ p.stdin.write(stdin)
+ p.stdin.close()
+
+ out = p.stdout.read()
+ if p.stderr is None:
+ err = ''
+ else:
+ err = p.stderr.read()
+ try:
+ close_output = p.close_output
+ except AttributeError:
+ p.stdout.close()
+ if not p.stderr is None:
+ p.stderr.close()
+ else:
+ close_output()
+
+ self._stdout.append(out)
+ self._stderr.append(err)
+
+ self.status = p.wait()
+ if not self.status:
+ self.status = 0
+
+ if chdir:
+ os.chdir(oldcwd)
+ if self.verbose >= 2:
+ write = sys.stdout.write
+ write('============ STATUS: %d\n' % self.status)
+ out = self.stdout()
+ if out or self.verbose >= 3:
+ write('============ BEGIN STDOUT (len=%d):\n' % len(out))
+ write(out)
+ write('============ END STDOUT\n')
+ err = self.stderr()
+ if err or self.verbose >= 3:
+ write('============ BEGIN STDERR (len=%d)\n' % len(err))
+ write(err)
+ write('============ END STDERR\n')
+
+ def sleep(self, seconds = default_sleep_seconds):
+ """Sleeps at least the specified number of seconds. If no
+ number is specified, sleeps at least the minimum number of
+ seconds necessary to advance file time stamps on the current
+ system. Sleeping more seconds is all right.
+ """
+ time.sleep(seconds)
+
+ def stderr(self, run = None):
+ """Returns the error output from the specified run number.
+ If there is no specified run number, then returns the error
+ output of the last run. If the run number is less than zero,
+ then returns the error output from that many runs back from the
+ current run.
+ """
+ if not run:
+ run = len(self._stderr)
+ elif run < 0:
+ run = len(self._stderr) + run
+ run = run - 1
+ return self._stderr[run]
+
+ def stdout(self, run = None):
+ """Returns the standard output from the specified run number.
+ If there is no specified run number, then returns the standard
+ output of the last run. If the run number is less than zero,
+ then returns the standard output from that many runs back from
+ the current run.
+ """
+ if not run:
+ run = len(self._stdout)
+ elif run < 0:
+ run = len(self._stdout) + run
+ run = run - 1
+ return self._stdout[run]
+
+ def subdir(self, *subdirs):
+ """Create new subdirectories under the temporary working
+ directory, one for each argument. An argument may be a list,
+ in which case the list elements are concatenated using the
+ os.path.join() method. Subdirectories multiple levels deep
+ must be created using a separate argument for each level:
+
+ test.subdir('sub', ['sub', 'dir'], ['sub', 'dir', 'ectory'])
+
+ Returns the number of subdirectories actually created.
+ """
+ count = 0
+ for sub in subdirs:
+ if sub is None:
+ continue
+ if is_List(sub):
+ sub = os.path.join(*sub)
+ new = os.path.join(self.workdir, sub)
+ try:
+ os.mkdir(new)
+ except OSError:
+ pass
+ else:
+ count = count + 1
+ return count
+
+ def symlink(self, target, link):
+ """Creates a symlink to the specified target.
+ The link name may be a list, in which case the elements are
+ concatenated with the os.path.join() method. The link is
+ assumed to be under the temporary working directory unless it
+ is an absolute path name. The target is *not* assumed to be
+ under the temporary working directory.
+ """
+ link = self.canonicalize(link)
+ os.symlink(target, link)
+
+ def tempdir(self, path=None):
+ """Creates a temporary directory.
+ A unique directory name is generated if no path name is specified.
+ The directory is created, and will be removed when the TestCmd
+ object is destroyed.
+ """
+ if path is None:
+ try:
+ path = tempfile.mktemp(prefix=tempfile.template)
+ except TypeError:
+ path = tempfile.mktemp()
+ os.mkdir(path)
+
+ # Symlinks in the path will report things
+ # differently from os.getcwd(), so chdir there
+ # and back to fetch the canonical path.
+ cwd = os.getcwd()
+ try:
+ os.chdir(path)
+ path = os.getcwd()
+ finally:
+ os.chdir(cwd)
+
+ # Uppercase the drive letter since the case of drive
+ # letters is pretty much random on win32:
+ drive,rest = os.path.splitdrive(path)
+ if drive:
+ path = drive.upper() + rest
+
+ #
+ self._dirlist.append(path)
+ global _Cleanup
+ try:
+ _Cleanup.index(self)
+ except ValueError:
+ _Cleanup.append(self)
+
+ return path
+
+ def touch(self, path, mtime=None):
+ """Updates the modification time on the specified file or
+ directory path name. The default is to update to the
+ current time if no explicit modification time is specified.
+ """
+ path = self.canonicalize(path)
+ atime = os.path.getatime(path)
+ if mtime is None:
+ mtime = time.time()
+ os.utime(path, (atime, mtime))
+
+ def unlink(self, file):
+ """Unlinks the specified file name.
+ The file name may be a list, in which case the elements are
+ concatenated with the os.path.join() method. The file is
+ assumed to be under the temporary working directory unless it
+ is an absolute path name.
+ """
+ file = self.canonicalize(file)
+ os.unlink(file)
+
+ def verbose_set(self, verbose):
+ """Set the verbose level.
+ """
+ self.verbose = verbose
+
+ def where_is(self, file, path=None, pathext=None):
+ """Find an executable file.
+ """
+ if is_List(file):
+ file = os.path.join(*file)
+ if not os.path.isabs(file):
+ file = where_is(file, path, pathext)
+ return file
+
+ def workdir_set(self, path):
+ """Creates a temporary working directory with the specified
+ path name. If the path is a null string (''), a unique
+ directory name is created.
+ """
+ if (path != None):
+ if path == '':
+ path = None
+ path = self.tempdir(path)
+ self.workdir = path
+
+ def workpath(self, *args):
+ """Returns the absolute path name to a subdirectory or file
+ within the current temporary working directory. Concatenates
+ the temporary working directory name with the specified
+ arguments using the os.path.join() method.
+ """
+ return os.path.join(self.workdir, *args)
+
+ def readable(self, top, read=1):
+ """Make the specified directory tree readable (read == 1)
+ or not (read == None).
+
+ This method has no effect on Windows systems, which use a
+ completely different mechanism to control file readability.
+ """
+
+ if sys.platform == 'win32':
+ return
+
+ if read:
+ def do_chmod(fname):
+ try: st = os.stat(fname)
+ except OSError: pass
+ else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|stat.S_IREAD))
+ else:
+ def do_chmod(fname):
+ try: st = os.stat(fname)
+ except OSError: pass
+ else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~stat.S_IREAD))
+
+ if os.path.isfile(top):
+ # If it's a file, that's easy, just chmod it.
+ do_chmod(top)
+ elif read:
+ # It's a directory and we're trying to turn on read
+ # permission, so it's also pretty easy, just chmod the
+ # directory and then chmod every entry on our walk down the
+ # tree. Because os.walk() is top-down, we'll enable
+ # read permission on any directories that have it disabled
+ # before os.walk() tries to list their contents.
+ do_chmod(top)
+
+ def chmod_entries(arg, dirname, names, do_chmod=do_chmod):
+ for n in names:
+ do_chmod(os.path.join(dirname, n))
+
+ os.walk(top, chmod_entries, None)
+ else:
+ # It's a directory and we're trying to turn off read
+ # permission, which means we have to chmod the directoreis
+ # in the tree bottom-up, lest disabling read permission from
+ # the top down get in the way of being able to get at lower
+ # parts of the tree. But os.walk() visits things top
+ # down, so we just use an object to collect a list of all
+ # of the entries in the tree, reverse the list, and then
+ # chmod the reversed (bottom-up) list.
+ col = Collector(top)
+ os.walk(top, col, None)
+ col.entries.reverse()
+ for d in col.entries: do_chmod(d)
+
+ def writable(self, top, write=1):
+ """Make the specified directory tree writable (write == 1)
+ or not (write == None).
+ """
+
+ if sys.platform == 'win32':
+
+ if write:
+ def do_chmod(fname):
+ try: os.chmod(fname, stat.S_IWRITE)
+ except OSError: pass
+ else:
+ def do_chmod(fname):
+ try: os.chmod(fname, stat.S_IREAD)
+ except OSError: pass
+
+ else:
+
+ if write:
+ def do_chmod(fname):
+ try: st = os.stat(fname)
+ except OSError: pass
+ else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|0o200))
+ else:
+ def do_chmod(fname):
+ try: st = os.stat(fname)
+ except OSError: pass
+ else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~0o200))
+
+ if os.path.isfile(top):
+ do_chmod(top)
+ else:
+ col = Collector(top)
+ os.walk(top, col, None)
+ for d in col.entries: do_chmod(d)
+
+ def executable(self, top, execute=1):
+ """Make the specified directory tree executable (execute == 1)
+ or not (execute == None).
+
+ This method has no effect on Windows systems, which use a
+ completely different mechanism to control file executability.
+ """
+
+ if sys.platform == 'win32':
+ return
+
+ if execute:
+ def do_chmod(fname):
+ try: st = os.stat(fname)
+ except OSError: pass
+ else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]|stat.S_IEXEC))
+ else:
+ def do_chmod(fname):
+ try: st = os.stat(fname)
+ except OSError: pass
+ else: os.chmod(fname, stat.S_IMODE(st[stat.ST_MODE]&~stat.S_IEXEC))
+
+ if os.path.isfile(top):
+ # If it's a file, that's easy, just chmod it.
+ do_chmod(top)
+ elif execute:
+ # It's a directory and we're trying to turn on execute
+ # permission, so it's also pretty easy, just chmod the
+ # directory and then chmod every entry on our walk down the
+ # tree. Because os.walk() is top-down, we'll enable
+ # execute permission on any directories that have it disabled
+ # before os.walk() tries to list their contents.
+ do_chmod(top)
+
+ def chmod_entries(arg, dirname, names, do_chmod=do_chmod):
+ for n in names:
+ do_chmod(os.path.join(dirname, n))
+
+ os.walk(top, chmod_entries, None)
+ else:
+ # It's a directory and we're trying to turn off execute
+ # permission, which means we have to chmod the directories
+ # in the tree bottom-up, lest disabling execute permission from
+ # the top down get in the way of being able to get at lower
+ # parts of the tree. But os.walk() visits things top
+ # down, so we just use an object to collect a list of all
+ # of the entries in the tree, reverse the list, and then
+ # chmod the reversed (bottom-up) list.
+ col = Collector(top)
+ os.walk(top, col, None)
+ col.entries.reverse()
+ for d in col.entries: do_chmod(d)
+
+ def write(self, file, content, mode = 'w'):
+ """Writes the specified content text (second argument) to the
+ specified file name (first argument). The file name may be
+ a list, in which case the elements are concatenated with the
+ os.path.join() method. The file is created under the temporary
+ working directory. Any subdirectories in the path must already
+ exist. The I/O mode for the file may be specified; it must
+ begin with a 'w'. The default is 'w' (binary write).
+ """
+ file = self.canonicalize(file)
+ if mode[0] != 'w':
+ raise ValueError("mode must begin with 'w'")
+ with open(file, mode) as f:
+ f.write(content)
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/third_party/python/gyp/test/lib/TestCommon.py b/third_party/python/gyp/test/lib/TestCommon.py
new file mode 100644
index 0000000000..6850ce9ada
--- /dev/null
+++ b/third_party/python/gyp/test/lib/TestCommon.py
@@ -0,0 +1,591 @@
+# Copyright (c) 2017 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+TestCommon.py: a testing framework for commands and scripts
+ with commonly useful error handling
+
+The TestCommon module provides a simple, high-level interface for writing
+tests of executable commands and scripts, especially commands and scripts
+that interact with the file system. All methods throw exceptions and
+exit on failure, with useful error messages. This makes a number of
+explicit checks unnecessary, making the test scripts themselves simpler
+to write and easier to read.
+
+The TestCommon class is a subclass of the TestCmd class. In essence,
+TestCommon is a wrapper that handles common TestCmd error conditions in
+useful ways. You can use TestCommon directly, or subclass it for your
+program and add additional (or override) methods to tailor it to your
+program's specific needs. Alternatively, the TestCommon class serves
+as a useful example of how to define your own TestCmd subclass.
+
+As a subclass of TestCmd, TestCommon provides access to all of the
+variables and methods from the TestCmd module. Consequently, you can
+use any variable or method documented in the TestCmd module without
+having to explicitly import TestCmd.
+
+A TestCommon environment object is created via the usual invocation:
+
+ import TestCommon
+ test = TestCommon.TestCommon()
+
+You can use all of the TestCmd keyword arguments when instantiating a
+TestCommon object; see the TestCmd documentation for details.
+
+Here is an overview of the methods and keyword arguments that are
+provided by the TestCommon class:
+
+ test.must_be_writable('file1', ['file2', ...])
+
+ test.must_contain('file', 'required text\n')
+
+ test.must_contain_all_lines(output, lines, ['title', find])
+
+ test.must_contain_any_line(output, lines, ['title', find])
+
+ test.must_exist('file1', ['file2', ...])
+
+ test.must_match('file', "expected contents\n")
+
+ test.must_not_be_writable('file1', ['file2', ...])
+
+ test.must_not_contain('file', 'banned text\n')
+
+ test.must_not_contain_any_line(output, lines, ['title', find])
+
+ test.must_not_exist('file1', ['file2', ...])
+
+ test.run(options = "options to be prepended to arguments",
+ stdout = "expected standard output from the program",
+ stderr = "expected error output from the program",
+ status = expected_status,
+ match = match_function)
+
+The TestCommon module also provides the following variables
+
+ TestCommon.python_executable
+ TestCommon.exe_suffix
+ TestCommon.obj_suffix
+ TestCommon.shobj_prefix
+ TestCommon.shobj_suffix
+ TestCommon.lib_prefix
+ TestCommon.lib_suffix
+ TestCommon.dll_prefix
+ TestCommon.dll_suffix
+
+"""
+
+# Copyright 2000-2010 Steven Knight
+# This module is free software, and you may redistribute it and/or modify
+# it under the same terms as Python itself, so long as this copyright message
+# and disclaimer are retained in their original form.
+#
+# IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
+# SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
+# THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+# DAMAGE.
+#
+# THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+# PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
+# AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
+# SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+
+from __future__ import print_function
+
+__author__ = "Steven Knight <knight at baldmt dot com>"
+__revision__ = "TestCommon.py 0.37.D001 2010/01/11 16:55:50 knight"
+__version__ = "0.37"
+
+import copy
+import os
+import os.path
+import stat
+import sys
+try:
+ from UserList import UserList
+except ImportError:
+ from collections import UserList
+
+from TestCmd import *
+from TestCmd import __all__
+
+__all__.extend([ 'TestCommon',
+ 'exe_suffix',
+ 'obj_suffix',
+ 'shobj_prefix',
+ 'shobj_suffix',
+ 'lib_prefix',
+ 'lib_suffix',
+ 'dll_prefix',
+ 'dll_suffix',
+ ])
+
+# Variables that describe the prefixes and suffixes on this system.
+if sys.platform == 'win32':
+ exe_suffix = '.exe'
+ obj_suffix = '.obj'
+ shobj_suffix = '.obj'
+ shobj_prefix = ''
+ lib_prefix = ''
+ lib_suffix = '.lib'
+ dll_prefix = ''
+ dll_suffix = '.dll'
+ module_prefix = ''
+ module_suffix = '.dll'
+elif sys.platform == 'cygwin':
+ exe_suffix = '.exe'
+ obj_suffix = '.o'
+ shobj_suffix = '.os'
+ shobj_prefix = ''
+ lib_prefix = 'lib'
+ lib_suffix = '.a'
+ dll_prefix = ''
+ dll_suffix = '.dll'
+ module_prefix = ''
+ module_suffix = '.dll'
+elif sys.platform.find('irix') != -1:
+ exe_suffix = ''
+ obj_suffix = '.o'
+ shobj_suffix = '.o'
+ shobj_prefix = ''
+ lib_prefix = 'lib'
+ lib_suffix = '.a'
+ dll_prefix = 'lib'
+ dll_suffix = '.so'
+ module_prefix = 'lib'
+ module_prefix = '.so'
+elif sys.platform.find('darwin') != -1:
+ exe_suffix = ''
+ obj_suffix = '.o'
+ shobj_suffix = '.os'
+ shobj_prefix = ''
+ lib_prefix = 'lib'
+ lib_suffix = '.a'
+ dll_prefix = 'lib'
+ dll_suffix = '.dylib'
+ module_prefix = ''
+ module_suffix = '.so'
+elif sys.platform.find('sunos') != -1:
+ exe_suffix = ''
+ obj_suffix = '.o'
+ shobj_suffix = '.os'
+ shobj_prefix = 'so_'
+ lib_prefix = 'lib'
+ lib_suffix = '.a'
+ dll_prefix = 'lib'
+ dll_suffix = '.dylib'
+ module_prefix = ''
+ module_suffix = '.so'
+else:
+ exe_suffix = ''
+ obj_suffix = '.o'
+ shobj_suffix = '.os'
+ shobj_prefix = ''
+ lib_prefix = 'lib'
+ lib_suffix = '.a'
+ dll_prefix = 'lib'
+ dll_suffix = '.so'
+ module_prefix = 'lib'
+ module_suffix = '.so'
+
+def is_List(e):
+ return type(e) is list \
+ or isinstance(e, UserList)
+
+def is_writable(f):
+ mode = os.stat(f)[stat.ST_MODE]
+ return mode & stat.S_IWUSR
+
+def separate_files(flist):
+ existing = []
+ missing = []
+ for f in flist:
+ if os.path.exists(f):
+ existing.append(f)
+ else:
+ missing.append(f)
+ return existing, missing
+
+def _failed(self, status = 0):
+ if self.status is None or status is None:
+ return None
+ try:
+ return _status(self) not in status
+ except TypeError:
+ # status wasn't an iterable
+ return _status(self) != status
+
+def _status(self):
+ return self.status
+
+class TestCommon(TestCmd):
+
+ # Additional methods from the Perl Test::Cmd::Common module
+ # that we may wish to add in the future:
+ #
+ # $test->subdir('subdir', ...);
+ #
+ # $test->copy('src_file', 'dst_file');
+
+ def __init__(self, **kw):
+ """Initialize a new TestCommon instance. This involves just
+ calling the base class initialization, and then changing directory
+ to the workdir.
+ """
+ TestCmd.__init__(self, **kw)
+ os.chdir(self.workdir)
+
+ def must_be_writable(self, *files):
+ """Ensures that the specified file(s) exist and are writable.
+ An individual file can be specified as a list of directory names,
+ in which case the pathname will be constructed by concatenating
+ them. Exits FAILED if any of the files does not exist or is
+ not writable.
+ """
+ files = map((lambda x: os.path.join(*x) if is_List(x) else x), files)
+ existing, missing = separate_files(files)
+ unwritable = [x for x in existing if not is_writable(x)]
+ if missing:
+ print("Missing files: `%s'" % "', `".join(missing))
+ if unwritable:
+ print("Unwritable files: `%s'" % "', `".join(unwritable))
+ self.fail_test(missing + unwritable)
+
+ def must_contain(self, file, required, mode = 'r'):
+ """Ensures that the specified file contains the required text.
+ """
+ file_contents = self.read(file, mode)
+ contains = (file_contents.find(required) != -1)
+ if not contains:
+ print("File `%s' does not contain required string." % file)
+ print(self.banner('Required string '))
+ print(required)
+ print(self.banner('%s contents ' % file))
+ print(file_contents)
+ self.fail_test(not contains)
+
+ def must_contain_all_lines(self, output, lines, title=None, find=None):
+ """Ensures that the specified output string (first argument)
+ contains all of the specified lines (second argument).
+
+ An optional third argument can be used to describe the type
+ of output being searched, and only shows up in failure output.
+
+ An optional fourth argument can be used to supply a different
+ function, of the form "find(line, output), to use when searching
+ for lines in the output.
+ """
+ if find is None:
+ find = lambda o, l: o.find(l) != -1
+ missing = []
+ for line in lines:
+ if not find(output, line):
+ missing.append(line)
+
+ if missing:
+ if title is None:
+ title = 'output'
+ sys.stdout.write("Missing expected lines from %s:\n" % title)
+ for line in missing:
+ sys.stdout.write(' ' + repr(line) + '\n')
+ sys.stdout.write(self.banner(title + ' '))
+ sys.stdout.write(output)
+ self.fail_test()
+
+ def must_contain_any_line(self, output, lines, title=None, find=None):
+ """Ensures that the specified output string (first argument)
+ contains at least one of the specified lines (second argument).
+
+ An optional third argument can be used to describe the type
+ of output being searched, and only shows up in failure output.
+
+ An optional fourth argument can be used to supply a different
+ function, of the form "find(line, output), to use when searching
+ for lines in the output.
+ """
+ if find is None:
+ find = lambda o, l: o.find(l) != -1
+ for line in lines:
+ if find(output, line):
+ return
+
+ if title is None:
+ title = 'output'
+ sys.stdout.write("Missing any expected line from %s:\n" % title)
+ for line in lines:
+ sys.stdout.write(' ' + repr(line) + '\n')
+ sys.stdout.write(self.banner(title + ' '))
+ sys.stdout.write(output)
+ self.fail_test()
+
+ def must_contain_lines(self, lines, output, title=None):
+ # Deprecated; retain for backwards compatibility.
+ return self.must_contain_all_lines(output, lines, title)
+
+ def must_exist(self, *files):
+ """Ensures that the specified file(s) must exist. An individual
+ file be specified as a list of directory names, in which case the
+ pathname will be constructed by concatenating them. Exits FAILED
+ if any of the files does not exist.
+ """
+ files = map((lambda x: os.path.join(*x) if is_List(x) else x), files)
+ missing = [f for f in files if not os.path.exists(f)]
+ if missing:
+ print("Missing files: `%s'" % "', `".join(missing))
+ self.fail_test(missing)
+
+ def must_match(self, file, expect, mode = 'r'):
+ """Matches the contents of the specified file (first argument)
+ against the expected contents (second argument). The expected
+ contents are a list of lines or a string which will be split
+ on newlines.
+ """
+ file_contents = self.read(file, mode)
+ try:
+ self.fail_test(not self.match(file_contents, expect))
+ except KeyboardInterrupt:
+ raise
+ except:
+ print("Unexpected contents of `%s'" % file)
+ self.diff(expect, file_contents, 'contents ')
+ raise
+
+ def must_not_contain(self, file, banned, mode = 'r'):
+ """Ensures that the specified file doesn't contain the banned text.
+ """
+ file_contents = self.read(file, mode)
+ contains = (file_contents.find(banned) != -1)
+ if contains:
+ print("File `%s' contains banned string." % file)
+ print(self.banner('Banned string '))
+ print(banned)
+ print(self.banner('%s contents ' % file))
+ print(file_contents)
+ self.fail_test(contains)
+
+ def must_not_contain_any_line(self, output, lines, title=None, find=None):
+ """Ensures that the specified output string (first argument)
+ does not contain any of the specified lines (second argument).
+
+ An optional third argument can be used to describe the type
+ of output being searched, and only shows up in failure output.
+
+ An optional fourth argument can be used to supply a different
+ function, of the form "find(line, output), to use when searching
+ for lines in the output.
+ """
+ if find is None:
+ find = lambda o, l: o.find(l) != -1
+ unexpected = []
+ for line in lines:
+ if find(output, line):
+ unexpected.append(line)
+
+ if unexpected:
+ if title is None:
+ title = 'output'
+ sys.stdout.write("Unexpected lines in %s:\n" % title)
+ for line in unexpected:
+ sys.stdout.write(' ' + repr(line) + '\n')
+ sys.stdout.write(self.banner(title + ' '))
+ sys.stdout.write(output)
+ self.fail_test()
+
+ def must_not_contain_lines(self, lines, output, title=None):
+ return self.must_not_contain_any_line(output, lines, title)
+
+ def must_not_exist(self, *files):
+ """Ensures that the specified file(s) must not exist.
+ An individual file be specified as a list of directory names, in
+ which case the pathname will be constructed by concatenating them.
+ Exits FAILED if any of the files exists.
+ """
+ files = map((lambda x: os.path.join(*x) if is_List(x) else x), files)
+ existing = [f for f in files if os.path.exists(f)]
+ if existing:
+ print("Unexpected files exist: `%s'" % "', `".join(existing))
+ self.fail_test(existing)
+
+ def must_not_be_writable(self, *files):
+ """Ensures that the specified file(s) exist and are not writable.
+ An individual file can be specified as a list of directory names,
+ in which case the pathname will be constructed by concatenating
+ them. Exits FAILED if any of the files does not exist or is
+ writable.
+ """
+ files = map((lambda x: os.path.join(*x) if is_List(x) else x), files)
+ existing, missing = separate_files(files)
+ writable = [x for x in existing if is_writable(x)]
+ if missing:
+ print("Missing files: `%s'" % "', `".join(missing))
+ if writable:
+ print("Writable files: `%s'" % "', `".join(writable))
+ self.fail_test(missing + writable)
+
+ def _complete(self, actual_stdout, expected_stdout,
+ actual_stderr, expected_stderr, status, match):
+ """
+ Post-processes running a subcommand, checking for failure
+ status and displaying output appropriately.
+ """
+ if _failed(self, status):
+ expect = ''
+ if status != 0:
+ expect = " (expected %s)" % str(status)
+ print("%s returned %s%s" % (self.program, str(_status(self)),
+ expect))
+ print(self.banner('STDOUT '))
+ print(actual_stdout)
+ print(self.banner('STDERR '))
+ print(actual_stderr)
+ self.fail_test()
+ if not expected_stdout is None and not match(actual_stdout,
+ expected_stdout):
+ self.diff(expected_stdout, actual_stdout, 'STDOUT ')
+ if actual_stderr:
+ print(self.banner('STDERR '))
+ print(actual_stderr)
+ self.fail_test()
+ if not expected_stderr is None and not match(actual_stderr,
+ expected_stderr):
+ print(self.banner('STDOUT '))
+ print(actual_stdout)
+ self.diff(expected_stderr, actual_stderr, 'STDERR ')
+ self.fail_test()
+
+ def start(self, program = None,
+ interpreter = None,
+ arguments = None,
+ universal_newlines = None,
+ **kw):
+ """
+ Starts a program or script for the test environment.
+
+ This handles the "options" keyword argument and exceptions.
+ """
+ options = kw.pop('options', None)
+ if options:
+ if arguments is None:
+ arguments = options
+ else:
+ arguments = options + " " + arguments
+
+ try:
+ return TestCmd.start(self, program, interpreter, arguments,
+ universal_newlines, **kw)
+ except KeyboardInterrupt:
+ raise
+ except Exception as e:
+ print(self.banner('STDOUT '))
+ try:
+ print(self.stdout())
+ except IndexError:
+ pass
+ print(self.banner('STDERR '))
+ try:
+ print(self.stderr())
+ except IndexError:
+ pass
+ cmd_args = self.command_args(program, interpreter, arguments)
+ sys.stderr.write('Exception trying to execute: %s\n' % cmd_args)
+ raise e
+
+ def finish(self, popen, stdout = None, stderr = '', status = 0, **kw):
+ """
+ Finishes and waits for the process being run under control of
+ the specified popen argument. Additional arguments are similar
+ to those of the run() method:
+
+ stdout The expected standard output from
+ the command. A value of None means
+ don't test standard output.
+
+ stderr The expected error output from
+ the command. A value of None means
+ don't test error output.
+
+ status The expected exit status from the
+ command. A value of None means don't
+ test exit status.
+ """
+ TestCmd.finish(self, popen, **kw)
+ match = kw.get('match', self.match)
+ self._complete(self.stdout(), stdout,
+ self.stderr(), stderr, status, match)
+
+ def run(self, options = None, arguments = None,
+ stdout = None, stderr = '', status = 0, **kw):
+ """Runs the program under test, checking that the test succeeded.
+
+ The arguments are the same as the base TestCmd.run() method,
+ with the addition of:
+
+ options Extra options that get appended to the beginning
+ of the arguments.
+
+ stdout The expected standard output from
+ the command. A value of None means
+ don't test standard output.
+
+ stderr The expected error output from
+ the command. A value of None means
+ don't test error output.
+
+ status The expected exit status from the
+ command. A value of None means don't
+ test exit status.
+
+ By default, this expects a successful exit (status = 0), does
+ not test standard output (stdout = None), and expects that error
+ output is empty (stderr = "").
+ """
+ if options:
+ if arguments is None:
+ arguments = options
+ else:
+ arguments = options + " " + arguments
+ kw['arguments'] = arguments
+ match = kw.pop('match', self.match)
+ TestCmd.run(self, **kw)
+ self._complete(self.stdout(), stdout,
+ self.stderr(), stderr, status, match)
+
+ def skip_test(self, message="Skipping test.\n"):
+ """Skips a test.
+
+ Proper test-skipping behavior is dependent on the external
+ TESTCOMMON_PASS_SKIPS environment variable. If set, we treat
+ the skip as a PASS (exit 0), and otherwise treat it as NO RESULT.
+ In either case, we print the specified message as an indication
+ that the substance of the test was skipped.
+
+ (This was originally added to support development under Aegis.
+ Technically, skipping a test is a NO RESULT, but Aegis would
+ treat that as a test failure and prevent the change from going to
+ the next step. Since we ddn't want to force anyone using Aegis
+ to have to install absolutely every tool used by the tests, we
+ would actually report to Aegis that a skipped test has PASSED
+ so that the workflow isn't held up.)
+ """
+ if message:
+ sys.stdout.write(message)
+ sys.stdout.flush()
+ pass_skips = os.environ.get('TESTCOMMON_PASS_SKIPS')
+ if pass_skips in [None, 0, '0']:
+ # skip=1 means skip this function when showing where this
+ # result came from. They only care about the line where the
+ # script called test.skip_test(), not the line number where
+ # we call test.no_result().
+ self.no_result(skip=1)
+ else:
+ # We're under the development directory for this change,
+ # so this is an Aegis invocation; pass the test (exit 0).
+ self.pass_test()
+
+# Local Variables:
+# tab-width:4
+# indent-tabs-mode:nil
+# End:
+# vim: set expandtab tabstop=4 shiftwidth=4:
diff --git a/third_party/python/gyp/test/lib/TestGyp.py b/third_party/python/gyp/test/lib/TestGyp.py
new file mode 100644
index 0000000000..2aa78684f5
--- /dev/null
+++ b/third_party/python/gyp/test/lib/TestGyp.py
@@ -0,0 +1,1260 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+TestGyp.py: a testing framework for GYP integration tests.
+"""
+from __future__ import print_function
+
+import errno
+import itertools
+import os
+import re
+import shutil
+import subprocess
+import sys
+import tempfile
+
+from contextlib import contextmanager
+
+from six.moves import collections_abc
+
+import TestCmd
+import TestCommon
+from TestCommon import __all__
+
+__all__.extend([
+ 'TestGyp',
+])
+
+
+def remove_debug_line_numbers(contents):
+ """Function to remove the line numbers from the debug output
+ of gyp and thus reduce the extreme fragility of the stdout
+ comparison tests.
+ """
+ lines = contents.splitlines()
+ # split each line on ":"
+ lines = [l.split(":", 3) for l in lines]
+ # join each line back together while ignoring the
+ # 3rd column which is the line number
+ lines = [len(l) > 3 and ":".join(l[3:]) or l for l in lines]
+ return "\n".join(lines)
+
+
+def match_modulo_line_numbers(contents_a, contents_b):
+ """File contents matcher that ignores line numbers."""
+ contents_a = remove_debug_line_numbers(contents_a)
+ contents_b = remove_debug_line_numbers(contents_b)
+ return TestCommon.match_exact(contents_a, contents_b)
+
+
+@contextmanager
+def LocalEnv(local_env):
+ """Context manager to provide a local OS environment."""
+ old_env = os.environ.copy()
+ os.environ.update(local_env)
+ try:
+ yield
+ finally:
+ os.environ.clear()
+ os.environ.update(old_env)
+
+
+class TestGypBase(TestCommon.TestCommon):
+ """
+ Class for controlling end-to-end tests of gyp generators.
+
+ Instantiating this class will create a temporary directory and
+ arrange for its destruction (via the TestCmd superclass) and
+ copy all of the non-gyptest files in the directory hierarchy of the
+ executing script.
+
+ The default behavior is to test the 'gyp' or 'gyp.bat' file in the
+ current directory. An alternative may be specified explicitly on
+ instantiation, or by setting the TESTGYP_GYP environment variable.
+
+ This class should be subclassed for each supported gyp generator
+ (format). Various abstract methods below define calling signatures
+ used by the test scripts to invoke builds on the generated build
+ configuration and to run executables generated by those builds.
+ """
+
+ formats = []
+ build_tool = None
+ build_tool_list = []
+
+ _exe = TestCommon.exe_suffix
+ _obj = TestCommon.obj_suffix
+ shobj_ = TestCommon.shobj_prefix
+ _shobj = TestCommon.shobj_suffix
+ lib_ = TestCommon.lib_prefix
+ _lib = TestCommon.lib_suffix
+ dll_ = TestCommon.dll_prefix
+ _dll = TestCommon.dll_suffix
+ module_ = TestCommon.module_prefix
+ _module = TestCommon.module_suffix
+
+ # Constants to represent different targets.
+ ALL = '__all__'
+ DEFAULT = '__default__'
+
+ # Constants for different target types.
+ EXECUTABLE = '__executable__'
+ STATIC_LIB = '__static_lib__'
+ SHARED_LIB = '__shared_lib__'
+ LOADABLE_MODULE = '__loadable_module__'
+
+ def __init__(self, gyp=None, *args, **kw):
+ self.origin_cwd = os.path.abspath(os.path.dirname(sys.argv[0]))
+ self.extra_args = sys.argv[1:]
+
+ if not gyp:
+ gyp = os.environ.get('TESTGYP_GYP')
+ if not gyp:
+ if sys.platform == 'win32':
+ gyp = 'gyp.bat'
+ else:
+ gyp = 'gyp'
+ self.gyp = os.path.abspath(gyp)
+ self.no_parallel = False
+
+ self.formats = [self.format]
+
+ self.initialize_build_tool()
+
+ kw.setdefault('match', TestCommon.match_exact)
+
+ # Put test output in out/testworkarea by default.
+ # Use temporary names so there are no collisions.
+ workdir = os.path.join('out', kw.get('workdir', 'testworkarea'))
+ # Create work area if it doesn't already exist.
+ if not os.path.isdir(workdir):
+ os.makedirs(workdir)
+
+ kw['workdir'] = tempfile.mktemp(prefix='testgyp.', dir=workdir)
+
+ formats = kw.pop('formats', [])
+
+ super(TestGypBase, self).__init__(*args, **kw)
+
+ real_format = self.format.split('-')[-1]
+ excluded_formats = set([f for f in formats if f[0] == '!'])
+ included_formats = set(formats) - excluded_formats
+ if ('!'+real_format in excluded_formats or
+ included_formats and real_format not in included_formats):
+ msg = 'Invalid test for %r format; skipping test.\n'
+ self.skip_test(msg % self.format)
+
+ self.copy_test_configuration(self.origin_cwd, self.workdir)
+ self.set_configuration(None)
+
+ # Set $HOME so that gyp doesn't read the user's actual
+ # ~/.gyp/include.gypi file, which may contain variables
+ # and other settings that would change the output.
+ os.environ['HOME'] = self.workpath()
+ # Clear $GYP_DEFINES for the same reason.
+ if 'GYP_DEFINES' in os.environ:
+ del os.environ['GYP_DEFINES']
+ # Override the user's language settings, which could
+ # otherwise make the output vary from what is expected.
+ os.environ['LC_ALL'] = 'C'
+
+ def built_file_must_exist(self, name, type=None, **kw):
+ """
+ Fails the test if the specified built file name does not exist.
+ """
+ return self.must_exist(self.built_file_path(name, type, **kw))
+
+ def built_file_must_not_exist(self, name, type=None, **kw):
+ """
+ Fails the test if the specified built file name exists.
+ """
+ return self.must_not_exist(self.built_file_path(name, type, **kw))
+
+ def built_file_must_match(self, name, contents, **kw):
+ """
+ Fails the test if the contents of the specified built file name
+ do not match the specified contents.
+ """
+ return self.must_match(self.built_file_path(name, **kw), contents)
+
+ def built_file_must_not_match(self, name, contents, **kw):
+ """
+ Fails the test if the contents of the specified built file name
+ match the specified contents.
+ """
+ return self.must_not_match(self.built_file_path(name, **kw), contents)
+
+ def built_file_must_not_contain(self, name, contents, **kw):
+ """
+ Fails the test if the specified built file name contains the specified
+ contents.
+ """
+ return self.must_not_contain(self.built_file_path(name, **kw), contents)
+
+ def copy_test_configuration(self, source_dir, dest_dir):
+ """
+ Copies the test configuration from the specified source_dir
+ (the directory in which the test script lives) to the
+ specified dest_dir (a temporary working directory).
+
+ This ignores all files and directories that begin with
+ the string 'gyptest', and all '.svn' subdirectories.
+ """
+ for root, dirs, files in os.walk(source_dir):
+ if '.svn' in dirs:
+ dirs.remove('.svn')
+ dirs = [ d for d in dirs if not d.startswith('gyptest') ]
+ files = [ f for f in files if not f.startswith('gyptest') ]
+ for dirname in dirs:
+ source = os.path.join(root, dirname)
+ destination = source.replace(source_dir, dest_dir)
+ os.mkdir(destination)
+ if sys.platform != 'win32':
+ shutil.copystat(source, destination)
+ for filename in files:
+ source = os.path.join(root, filename)
+ destination = source.replace(source_dir, dest_dir)
+ shutil.copy2(source, destination)
+
+ # The gyp tests are run with HOME pointing to |dest_dir| to provide an
+ # hermetic environment. Symlink login.keychain and the 'Provisioning
+ # Profiles' folder to allow codesign to access to the data required for
+ # signing binaries.
+ if sys.platform == 'darwin':
+ old_keychain = GetDefaultKeychainPath()
+ old_provisioning_profiles = os.path.join(
+ os.environ['HOME'], 'Library', 'MobileDevice',
+ 'Provisioning Profiles')
+
+ new_keychain = os.path.join(dest_dir, 'Library', 'Keychains')
+ MakeDirs(new_keychain)
+ os.symlink(old_keychain, os.path.join(new_keychain, 'login.keychain'))
+
+ if os.path.exists(old_provisioning_profiles):
+ new_provisioning_profiles = os.path.join(
+ dest_dir, 'Library', 'MobileDevice')
+ MakeDirs(new_provisioning_profiles)
+ os.symlink(old_provisioning_profiles,
+ os.path.join(new_provisioning_profiles, 'Provisioning Profiles'))
+
+ def initialize_build_tool(self):
+ """
+ Initializes the .build_tool attribute.
+
+ Searches the .build_tool_list for an executable name on the user's
+ $PATH. The first tool on the list is used as-is if nothing is found
+ on the current $PATH.
+ """
+ for build_tool in self.build_tool_list:
+ if not build_tool:
+ continue
+ if os.path.isabs(build_tool):
+ self.build_tool = build_tool
+ return
+ build_tool = self.where_is(build_tool)
+ if build_tool:
+ self.build_tool = build_tool
+ return
+
+ if self.build_tool_list:
+ self.build_tool = self.build_tool_list[0]
+
+ def relocate(self, source, destination):
+ """
+ Renames (relocates) the specified source (usually a directory)
+ to the specified destination, creating the destination directory
+ first if necessary.
+
+ Note: Don't use this as a generic "rename" operation. In the
+ future, "relocating" parts of a GYP tree may affect the state of
+ the test to modify the behavior of later method calls.
+ """
+ destination_dir = os.path.dirname(destination)
+ if not os.path.exists(destination_dir):
+ self.subdir(destination_dir)
+ os.rename(source, destination)
+
+ def report_not_up_to_date(self):
+ """
+ Reports that a build is not up-to-date.
+
+ This provides common reporting for formats that have complicated
+ conditions for checking whether a build is up-to-date. Formats
+ that expect exact output from the command (make) can
+ just set stdout= when they call the run_build() method.
+ """
+ print("Build is not up-to-date:")
+ print(self.banner('STDOUT '))
+ print(self.stdout())
+ stderr = self.stderr()
+ if stderr:
+ print(self.banner('STDERR '))
+ print(stderr)
+
+ def run_gyp(self, gyp_file, *args, **kw):
+ """
+ Runs gyp against the specified gyp_file with the specified args.
+ """
+
+ # When running gyp, and comparing its output we use a comparitor
+ # that ignores the line numbers that gyp logs in its debug output.
+ if kw.pop('ignore_line_numbers', False):
+ kw.setdefault('match', match_modulo_line_numbers)
+
+ # TODO: --depth=. works around Chromium-specific tree climbing.
+ depth = kw.pop('depth', '.')
+ run_args = ['--depth='+depth]
+ run_args.extend(['--format='+f for f in self.formats])
+ run_args.append(gyp_file)
+ if self.no_parallel:
+ run_args += ['--no-parallel']
+ # TODO: if extra_args contains a '--build' flag
+ # we really want that to only apply to the last format (self.format).
+ run_args.extend(self.extra_args)
+ # Default xcode_ninja_target_pattern to ^.*$ to fix xcode-ninja tests
+ xcode_ninja_target_pattern = kw.pop('xcode_ninja_target_pattern', '.*')
+ if self is TestGypXcodeNinja:
+ run_args.extend(
+ ['-G', 'xcode_ninja_target_pattern=%s' % xcode_ninja_target_pattern])
+ run_args.extend(args)
+ return self.run(program=self.gyp, arguments=run_args, **kw)
+
+ def run(self, *args, **kw):
+ """
+ Executes a program by calling the superclass .run() method.
+
+ This exists to provide a common place to filter out keyword
+ arguments implemented in this layer, without having to update
+ the tool-specific subclasses or clutter the tests themselves
+ with platform-specific code.
+ """
+ if 'SYMROOT' in kw:
+ del kw['SYMROOT']
+ super(TestGypBase, self).run(*args, **kw)
+
+ def set_configuration(self, configuration):
+ """
+ Sets the configuration, to be used for invoking the build
+ tool and testing potential built output.
+ """
+ self.configuration = configuration
+
+ def configuration_dirname(self):
+ if self.configuration:
+ return self.configuration.split('|')[0]
+ else:
+ return 'Default'
+
+ def configuration_buildname(self):
+ if self.configuration:
+ return self.configuration
+ else:
+ return 'Default'
+
+ #
+ # Abstract methods to be defined by format-specific subclasses.
+ #
+
+ def build(self, gyp_file, target=None, **kw):
+ """
+ Runs a build of the specified target against the configuration
+ generated from the specified gyp_file.
+
+ A 'target' argument of None or the special value TestGyp.DEFAULT
+ specifies the default argument for the underlying build tool.
+ A 'target' argument of TestGyp.ALL specifies the 'all' target
+ (if any) of the underlying build tool.
+ """
+ raise NotImplementedError
+
+ def built_file_path(self, name, type=None, **kw):
+ """
+ Returns a path to the specified file name, of the specified type.
+ """
+ raise NotImplementedError
+
+ def built_file_basename(self, name, type=None, **kw):
+ """
+ Returns the base name of the specified file name, of the specified type.
+
+ A bare=True keyword argument specifies that prefixes and suffixes shouldn't
+ be applied.
+ """
+ if not kw.get('bare'):
+ if type == self.EXECUTABLE:
+ name = name + self._exe
+ elif type == self.STATIC_LIB:
+ name = self.lib_ + name + self._lib
+ elif type == self.SHARED_LIB:
+ name = self.dll_ + name + self._dll
+ elif type == self.LOADABLE_MODULE:
+ name = self.module_ + name + self._module
+ return name
+
+ def run_built_executable(self, name, *args, **kw):
+ """
+ Runs an executable program built from a gyp-generated configuration.
+
+ The specified name should be independent of any particular generator.
+ Subclasses should find the output executable in the appropriate
+ output build directory, tack on any necessary executable suffix, etc.
+ """
+ raise NotImplementedError
+
+ def up_to_date(self, gyp_file, target=None, **kw):
+ """
+ Verifies that a build of the specified target is up to date.
+
+ The subclass should implement this by calling build()
+ (or a reasonable equivalent), checking whatever conditions
+ will tell it the build was an "up to date" null build, and
+ failing if it isn't.
+ """
+ raise NotImplementedError
+
+
+class TestGypGypd(TestGypBase):
+ """
+ Subclass for testing the GYP 'gypd' generator (spit out the
+ internal data structure as pretty-printed Python).
+ """
+ format = 'gypd'
+ def __init__(self, gyp=None, *args, **kw):
+ super(TestGypGypd, self).__init__(*args, **kw)
+ # gypd implies the use of 'golden' files, so parallelizing conflicts as it
+ # causes ordering changes.
+ self.no_parallel = True
+
+
+class TestGypCustom(TestGypBase):
+ """
+ Subclass for testing the GYP with custom generator
+ """
+
+ def __init__(self, gyp=None, *args, **kw):
+ self.format = kw.pop("format")
+ super(TestGypCustom, self).__init__(*args, **kw)
+
+
+class TestGypCMake(TestGypBase):
+ """
+ Subclass for testing the GYP CMake generator, using cmake's ninja backend.
+ """
+ format = 'cmake'
+ build_tool_list = ['cmake']
+ ALL = 'all'
+
+ def cmake_build(self, gyp_file, target=None, **kw):
+ arguments = kw.get('arguments', [])[:]
+
+ self.build_tool_list = ['cmake']
+ self.initialize_build_tool()
+
+ chdir = os.path.join(kw.get('chdir', '.'),
+ 'out',
+ self.configuration_dirname())
+ kw['chdir'] = chdir
+
+ arguments.append('-G')
+ arguments.append('Ninja')
+
+ kw['arguments'] = arguments
+
+ stderr = kw.get('stderr', None)
+ if stderr:
+ kw['stderr'] = stderr.split('$$$')[0]
+
+ self.run(program=self.build_tool, **kw)
+
+ def ninja_build(self, gyp_file, target=None, **kw):
+ arguments = kw.get('arguments', [])[:]
+
+ self.build_tool_list = ['ninja']
+ self.initialize_build_tool()
+
+ # Add a -C output/path to the command line.
+ arguments.append('-C')
+ arguments.append(os.path.join('out', self.configuration_dirname()))
+
+ if target not in (None, self.DEFAULT):
+ arguments.append(target)
+
+ kw['arguments'] = arguments
+
+ stderr = kw.get('stderr', None)
+ if stderr:
+ stderrs = stderr.split('$$$')
+ kw['stderr'] = stderrs[1] if len(stderrs) > 1 else ''
+
+ return self.run(program=self.build_tool, **kw)
+
+ def build(self, gyp_file, target=None, status=0, **kw):
+ # Two tools must be run to build, cmake and the ninja.
+ # Allow cmake to succeed when the overall expectation is to fail.
+ if status is None:
+ kw['status'] = None
+ else:
+ if not isinstance(status, collections_abc.Iterable): status = (status,)
+ kw['status'] = list(itertools.chain((0,), status))
+ self.cmake_build(gyp_file, target, **kw)
+ kw['status'] = status
+ self.ninja_build(gyp_file, target, **kw)
+
+ def run_built_executable(self, name, *args, **kw):
+ # Enclosing the name in a list avoids prepending the original dir.
+ program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
+ if sys.platform == 'darwin':
+ configuration = self.configuration_dirname()
+ os.environ['DYLD_LIBRARY_PATH'] = os.path.join('out', configuration)
+ return self.run(program=program, *args, **kw)
+
+ def built_file_path(self, name, type=None, **kw):
+ result = []
+ chdir = kw.get('chdir')
+ if chdir:
+ result.append(chdir)
+ result.append('out')
+ result.append(self.configuration_dirname())
+ if type == self.STATIC_LIB:
+ if sys.platform != 'darwin':
+ result.append('obj.target')
+ elif type == self.SHARED_LIB:
+ if sys.platform != 'darwin' and sys.platform != 'win32':
+ result.append('lib.target')
+ subdir = kw.get('subdir')
+ if subdir and type != self.SHARED_LIB:
+ result.append(subdir)
+ result.append(self.built_file_basename(name, type, **kw))
+ return self.workpath(*result)
+
+ def up_to_date(self, gyp_file, target=None, **kw):
+ result = self.ninja_build(gyp_file, target, **kw)
+ if not result:
+ stdout = self.stdout()
+ if 'ninja: no work to do' not in stdout:
+ self.report_not_up_to_date()
+ self.fail_test()
+ return result
+
+
+class TestGypMake(TestGypBase):
+ """
+ Subclass for testing the GYP Make generator.
+ """
+ format = 'make'
+ build_tool_list = ['make']
+ ALL = 'all'
+ def build(self, gyp_file, target=None, **kw):
+ """
+ Runs a Make build using the Makefiles generated from the specified
+ gyp_file.
+ """
+ arguments = kw.get('arguments', [])[:]
+ if self.configuration:
+ arguments.append('BUILDTYPE=' + self.configuration)
+ if target not in (None, self.DEFAULT):
+ arguments.append(target)
+ # Sub-directory builds provide per-gyp Makefiles (i.e.
+ # Makefile.gyp_filename), so use that if there is no Makefile.
+ chdir = kw.get('chdir', '')
+ if not os.path.exists(os.path.join(chdir, 'Makefile')):
+ print("NO Makefile in " + os.path.join(chdir, 'Makefile'))
+ arguments.insert(0, '-f')
+ arguments.insert(1, os.path.splitext(gyp_file)[0] + '.Makefile')
+ kw['arguments'] = arguments
+ return self.run(program=self.build_tool, **kw)
+ def up_to_date(self, gyp_file, target=None, **kw):
+ """
+ Verifies that a build of the specified Make target is up to date.
+ """
+ if target in (None, self.DEFAULT):
+ message_target = 'all'
+ else:
+ message_target = target
+ kw['stdout'] = "make: Nothing to be done for '%s'.\n" % message_target
+ return self.build(gyp_file, target, **kw)
+ def run_built_executable(self, name, *args, **kw):
+ """
+ Runs an executable built by Make.
+ """
+ configuration = self.configuration_dirname()
+ libdir = os.path.join('out', configuration, 'lib')
+ # TODO(piman): when everything is cross-compile safe, remove lib.target
+ if sys.platform == 'darwin':
+ # Mac puts target shared libraries right in the product directory.
+ configuration = self.configuration_dirname()
+ os.environ['DYLD_LIBRARY_PATH'] = (
+ libdir + '.host:' + os.path.join('out', configuration))
+ else:
+ os.environ['LD_LIBRARY_PATH'] = libdir + '.host:' + libdir + '.target'
+ # Enclosing the name in a list avoids prepending the original dir.
+ program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
+ return self.run(program=program, *args, **kw)
+ def built_file_path(self, name, type=None, **kw):
+ """
+ Returns a path to the specified file name, of the specified type,
+ as built by Make.
+
+ Built files are in the subdirectory 'out/{configuration}'.
+ The default is 'out/Default'.
+
+ A chdir= keyword argument specifies the source directory
+ relative to which the output subdirectory can be found.
+
+ "type" values of STATIC_LIB or SHARED_LIB append the necessary
+ prefixes and suffixes to a platform-independent library base name.
+
+ A subdir= keyword argument specifies a library subdirectory within
+ the default 'obj.target'.
+ """
+ result = []
+ chdir = kw.get('chdir')
+ if chdir:
+ result.append(chdir)
+ configuration = self.configuration_dirname()
+ result.extend(['out', configuration])
+ if type == self.STATIC_LIB and sys.platform != 'darwin':
+ result.append('obj.target')
+ elif type == self.SHARED_LIB and sys.platform != 'darwin':
+ result.append('lib.target')
+ subdir = kw.get('subdir')
+ if subdir and type != self.SHARED_LIB:
+ result.append(subdir)
+ result.append(self.built_file_basename(name, type, **kw))
+ return self.workpath(*result)
+
+
+def ConvertToCygpath(path):
+ """Convert to cygwin path if we are using cygwin."""
+ if sys.platform == 'cygwin':
+ p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
+ path = p.communicate()[0].strip()
+ return path
+
+
+def MakeDirs(new_dir):
+ """A wrapper around os.makedirs() that emulates "mkdir -p"."""
+ try:
+ os.makedirs(new_dir)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+def GetDefaultKeychainPath():
+ """Get the keychain path, for used before updating HOME."""
+ assert sys.platform == 'darwin'
+ # Format is:
+ # $ security default-keychain
+ # "/Some/Path/To/default.keychain"
+ path = subprocess.check_output(['security', 'default-keychain']).decode(
+ 'utf-8', 'ignore').strip()
+ return path[1:-1]
+
+def FindMSBuildInstallation(msvs_version = 'auto'):
+ """Returns path to MSBuild for msvs_version or latest available.
+
+ Looks in the registry to find install location of MSBuild.
+ MSBuild before v4.0 will not build c++ projects, so only use newer versions.
+ """
+ import TestWin
+ registry = TestWin.Registry()
+
+ msvs_to_msbuild = {
+ '2013': r'12.0',
+ '2012': r'4.0', # Really v4.0.30319 which comes with .NET 4.5.
+ '2010': r'4.0'}
+
+ msbuild_basekey = r'HKLM\SOFTWARE\Microsoft\MSBuild\ToolsVersions'
+ if not registry.KeyExists(msbuild_basekey):
+ print('Error: could not find MSBuild base registry entry')
+ return None
+
+ msbuild_version = None
+ if msvs_version in msvs_to_msbuild:
+ msbuild_test_version = msvs_to_msbuild[msvs_version]
+ if registry.KeyExists(msbuild_basekey + '\\' + msbuild_test_version):
+ msbuild_version = msbuild_test_version
+ else:
+ print('Warning: Environment variable GYP_MSVS_VERSION specifies "%s" '
+ 'but corresponding MSBuild "%s" was not found.' %
+ (msvs_version, msbuild_version))
+ if not msbuild_version:
+ for msvs_version in sorted(msvs_to_msbuild, reverse=True):
+ msbuild_test_version = msvs_to_msbuild[msvs_version]
+ if registry.KeyExists(msbuild_basekey + '\\' + msbuild_test_version):
+ msbuild_version = msbuild_test_version
+ break
+ if not msbuild_version:
+ print('Error: could not find MSBuild registry entry')
+ return None
+
+ msbuild_path = registry.GetValue(msbuild_basekey + '\\' + msbuild_version,
+ 'MSBuildToolsPath')
+ if not msbuild_path:
+ print('Error: could not get MSBuild registry entry value')
+ return None
+
+ return os.path.join(msbuild_path, 'MSBuild.exe')
+
+
+def FindVisualStudioInstallation():
+ """Returns appropriate values for .build_tool and .uses_msbuild fields
+ of TestGypBase for Visual Studio.
+
+ We use the value specified by GYP_MSVS_VERSION. If not specified, we
+ search %PATH% and %PATHEXT% for a devenv.{exe,bat,...} executable.
+ Failing that, we search for likely deployment paths.
+ """
+ override_build_tool = os.environ.get('GYP_BUILD_TOOL')
+ if override_build_tool:
+ return override_build_tool, True, override_build_tool
+
+ possible_roots = ['%s:\\Program Files%s' % (chr(drive), suffix)
+ for drive in range(ord('C'), ord('Z') + 1)
+ for suffix in ['', ' (x86)']]
+ possible_paths = {
+ '2017': r'Microsoft Visual Studio\2017',
+ '2015': r'Microsoft Visual Studio 14.0\Common7\IDE\devenv.com',
+ '2013': r'Microsoft Visual Studio 12.0\Common7\IDE\devenv.com',
+ '2012': r'Microsoft Visual Studio 11.0\Common7\IDE\devenv.com',
+ '2010': r'Microsoft Visual Studio 10.0\Common7\IDE\devenv.com',
+ '2008': r'Microsoft Visual Studio 9.0\Common7\IDE\devenv.com',
+ '2005': r'Microsoft Visual Studio 8\Common7\IDE\devenv.com'}
+
+ possible_roots = [ConvertToCygpath(r) for r in possible_roots]
+
+ msvs_version = 'auto'
+ for flag in (f for f in sys.argv if f.startswith('msvs_version=')):
+ msvs_version = flag.split('=')[-1]
+ msvs_version = os.environ.get('GYP_MSVS_VERSION', msvs_version)
+
+ if msvs_version in ['2017', 'auto']:
+ msbuild_exes = []
+ try:
+ path = possible_paths['2017']
+ for r in possible_roots:
+ build_tool = os.path.join(r, path)
+ if os.path.exists(build_tool):
+ break;
+ else:
+ build_tool = None
+ if not build_tool:
+ args1 = ['reg', 'query',
+ 'HKLM\Software\Microsoft\VisualStudio\SxS\VS7',
+ '/v', '15.0', '/reg:32']
+ build_tool = subprocess.check_output(args1).decode(
+ 'utf-8', 'ignore').strip().split(b'\r\n').pop().split(b' ').pop()
+ build_tool = build_tool.decode('utf-8')
+ if build_tool:
+ args2 = ['cmd.exe', '/d', '/c',
+ 'cd', '/d', build_tool,
+ '&', 'dir', '/b', '/s', 'msbuild.exe']
+ msbuild_exes = subprocess.check_output(args2).strip().split(b'\r\n')
+ msbuild_exes = [m.decode('utf-8') for m in msbuild_exes]
+ if len(msbuild_exes):
+ msbuild_Path = os.path.join(build_tool, msbuild_exes[0])
+ if os.path.exists(msbuild_Path):
+ os.environ['GYP_MSVS_VERSION'] = '2017'
+ os.environ['GYP_BUILD_TOOL'] = msbuild_Path
+ return msbuild_Path, True, msbuild_Path
+ except Exception as e:
+ pass
+
+ if msvs_version in possible_paths:
+ # Check that the path to the specified GYP_MSVS_VERSION exists.
+ path = possible_paths[msvs_version]
+ for r in possible_roots:
+ build_tool = os.path.join(r, path)
+ if os.path.exists(build_tool):
+ uses_msbuild = msvs_version >= '2010'
+ msbuild_path = FindMSBuildInstallation(msvs_version)
+ return build_tool, uses_msbuild, msbuild_path
+ else:
+ print('Warning: Environment variable GYP_MSVS_VERSION specifies "%s" '
+ 'but corresponding "%s" was not found.' % (msvs_version, path))
+ # Neither GYP_MSVS_VERSION nor the path help us out. Iterate through
+ # the choices looking for a match.
+ for version in sorted(possible_paths, reverse=True):
+ path = possible_paths[version]
+ for r in possible_roots:
+ build_tool = os.path.join(r, path)
+ if os.path.exists(build_tool):
+ uses_msbuild = msvs_version >= '2010'
+ msbuild_path = FindMSBuildInstallation(msvs_version)
+ return build_tool, uses_msbuild, msbuild_path
+ print('Error: could not find devenv')
+ sys.exit(1)
+
+class TestGypOnMSToolchain(TestGypBase):
+ """
+ Common subclass for testing generators that target the Microsoft Visual
+ Studio toolchain (cl, link, dumpbin, etc.)
+ """
+ @staticmethod
+ def _ComputeVsvarsPath(devenv_path):
+ devenv_dir = os.path.split(devenv_path)[0]
+
+ # Check for location of Community install (in VS2017, at least).
+ vcvars_path = os.path.join(devenv_path, '..', '..', '..', '..', 'VC',
+ 'Auxiliary', 'Build', 'vcvars32.bat')
+ if os.path.exists(vcvars_path):
+ return os.path.abspath(vcvars_path)
+
+ vsvars_path = os.path.join(devenv_path, '..', '..', 'Tools',
+ 'vsvars32.bat')
+ return os.path.abspath(vsvars_path)
+
+ def initialize_build_tool(self):
+ super(TestGypOnMSToolchain, self).initialize_build_tool()
+ if sys.platform in ('win32', 'cygwin'):
+ build_tools = FindVisualStudioInstallation()
+ self.devenv_path, self.uses_msbuild, self.msbuild_path = build_tools
+ self.vsvars_path = TestGypOnMSToolchain._ComputeVsvarsPath(
+ self.devenv_path)
+
+ def run_dumpbin(self, *dumpbin_args):
+ """Run the dumpbin tool with the specified arguments, and capturing and
+ returning stdout."""
+ assert sys.platform in ('win32', 'cygwin')
+ cmd = os.environ.get('COMSPEC', 'cmd.exe')
+ arguments = [cmd, '/c', self.vsvars_path, '&&', 'dumpbin']
+ arguments.extend(dumpbin_args)
+ proc = subprocess.Popen(arguments, stdout=subprocess.PIPE)
+ output = proc.communicate()[0].decode('utf-8', 'ignore')
+ assert not proc.returncode
+ return output
+
+class TestGypNinja(TestGypOnMSToolchain):
+ """
+ Subclass for testing the GYP Ninja generator.
+ """
+ format = 'ninja'
+ build_tool_list = ['ninja']
+ ALL = 'all'
+ DEFAULT = 'all'
+
+ def run_gyp(self, gyp_file, *args, **kw):
+ TestGypBase.run_gyp(self, gyp_file, *args, **kw)
+
+ def build(self, gyp_file, target=None, **kw):
+ arguments = kw.get('arguments', [])[:]
+
+ # Add a -C output/path to the command line.
+ arguments.append('-C')
+ arguments.append(os.path.join('out', self.configuration_dirname()))
+
+ if target is None:
+ target = 'all'
+ arguments.append(target)
+
+ kw['arguments'] = arguments
+ return self.run(program=self.build_tool, **kw)
+
+ def run_built_executable(self, name, *args, **kw):
+ # Enclosing the name in a list avoids prepending the original dir.
+ program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
+ if sys.platform == 'darwin':
+ configuration = self.configuration_dirname()
+ os.environ['DYLD_LIBRARY_PATH'] = os.path.join('out', configuration)
+ return self.run(program=program, *args, **kw)
+
+ def built_file_path(self, name, type=None, **kw):
+ result = []
+ chdir = kw.get('chdir')
+ if chdir:
+ result.append(chdir)
+ result.append('out')
+ result.append(self.configuration_dirname())
+ if type == self.STATIC_LIB:
+ if sys.platform != 'darwin':
+ result.append('obj')
+ elif type == self.SHARED_LIB:
+ if sys.platform != 'darwin' and sys.platform != 'win32':
+ result.append('lib')
+ subdir = kw.get('subdir')
+ if subdir and type != self.SHARED_LIB:
+ result.append(subdir)
+ result.append(self.built_file_basename(name, type, **kw))
+ return self.workpath(*result)
+
+ def up_to_date(self, gyp_file, target=None, **kw):
+ result = self.build(gyp_file, target, **kw)
+ if not result:
+ stdout = self.stdout()
+ if 'ninja: no work to do' not in stdout:
+ self.report_not_up_to_date()
+ self.fail_test()
+ return result
+
+
+class TestGypMSVS(TestGypOnMSToolchain):
+ """
+ Subclass for testing the GYP Visual Studio generator.
+ """
+ format = 'msvs'
+
+ u = r'=== Build: 0 succeeded, 0 failed, (\d+) up-to-date, 0 skipped ==='
+ up_to_date_re = re.compile(u, re.M)
+
+ # Initial None element will indicate to our .initialize_build_tool()
+ # method below that 'devenv' was not found on %PATH%.
+ #
+ # Note: we must use devenv.com to be able to capture build output.
+ # Directly executing devenv.exe only sends output to BuildLog.htm.
+ build_tool_list = [None, 'devenv.com']
+
+ def initialize_build_tool(self):
+ super(TestGypMSVS, self).initialize_build_tool()
+ self.build_tool = self.devenv_path
+
+ def build(self, gyp_file, target=None, rebuild=False, clean=False, **kw):
+ """
+ Runs a Visual Studio build using the configuration generated
+ from the specified gyp_file.
+ """
+ if '15.0' in self.build_tool:
+ configuration = '/p:Configuration=' + (
+ self.configuration or self.configuration_buildname())
+ build = '/t'
+ if target not in (None, self.ALL, self.DEFAULT):
+ build += ':' + target
+ if clean:
+ build += ':Clean'
+ elif rebuild:
+ build += ':Rebuild'
+ elif ':' not in build:
+ build += ':Build'
+ arguments = kw.get('arguments', [])[:]
+ arguments.extend([gyp_file.replace('.gyp', '.sln'),
+ build, configuration])
+ else:
+ configuration = self.configuration_buildname()
+ if clean:
+ build = '/Clean'
+ elif rebuild:
+ build = '/Rebuild'
+ else:
+ build = '/Build'
+ arguments = kw.get('arguments', [])[:]
+ arguments.extend([gyp_file.replace('.gyp', '.sln'),
+ build, configuration])
+ # Note: the Visual Studio generator doesn't add an explicit 'all'
+ # target, so we just treat it the same as the default.
+ if target not in (None, self.ALL, self.DEFAULT):
+ arguments.extend(['/Project', target])
+ if self.configuration:
+ arguments.extend(['/ProjectConfig', self.configuration])
+ kw['arguments'] = arguments
+ return self.run(program=self.build_tool, **kw)
+ def up_to_date(self, gyp_file, target=None, **kw):
+ r"""
+ Verifies that a build of the specified Visual Studio target is up to date.
+
+ Beware that VS2010 will behave strangely if you build under
+ C:\USERS\yourname\AppData\Local. It will cause needless work. The ouptut
+ will be "1 succeeded and 0 up to date". MSBuild tracing reveals that:
+ "Project 'C:\Users\...\AppData\Local\...vcxproj' not up to date because
+ 'C:\PROGRAM FILES (X86)\MICROSOFT VISUAL STUDIO 10.0\VC\BIN\1033\CLUI.DLL'
+ was modified at 02/21/2011 17:03:30, which is newer than '' which was
+ modified at 01/01/0001 00:00:00.
+
+ The workaround is to specify a workdir when instantiating the test, e.g.
+ test = TestGyp.TestGyp(workdir='workarea')
+ """
+ result = self.build(gyp_file, target, **kw)
+ if not result:
+ stdout = self.stdout()
+
+ m = self.up_to_date_re.search(stdout)
+ up_to_date = m and int(m.group(1)) > 0
+ if not up_to_date:
+ self.report_not_up_to_date()
+ self.fail_test()
+ return result
+ def run_built_executable(self, name, *args, **kw):
+ """
+ Runs an executable built by Visual Studio.
+ """
+ configuration = self.configuration_dirname()
+ # Enclosing the name in a list avoids prepending the original dir.
+ program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
+ return self.run(program=program, *args, **kw)
+ def built_file_path(self, name, type=None, **kw):
+ """
+ Returns a path to the specified file name, of the specified type,
+ as built by Visual Studio.
+
+ Built files are in a subdirectory that matches the configuration
+ name. The default is 'Default'.
+
+ A chdir= keyword argument specifies the source directory
+ relative to which the output subdirectory can be found.
+
+ "type" values of STATIC_LIB or SHARED_LIB append the necessary
+ prefixes and suffixes to a platform-independent library base name.
+ """
+ result = []
+ chdir = kw.get('chdir')
+ if chdir:
+ result.append(chdir)
+ result.append(self.configuration_dirname())
+ if type == self.STATIC_LIB:
+ result.append('lib')
+ result.append(self.built_file_basename(name, type, **kw))
+ return self.workpath(*result)
+
+
+class TestGypMSVSNinja(TestGypNinja):
+ """
+ Subclass for testing the GYP Visual Studio Ninja generator.
+ """
+ format = 'msvs-ninja'
+
+ def initialize_build_tool(self):
+ super(TestGypMSVSNinja, self).initialize_build_tool()
+ # When using '--build', make sure ninja is first in the format list.
+ self.formats.insert(0, 'ninja')
+
+ def build(self, gyp_file, target=None, rebuild=False, clean=False, **kw):
+ """
+ Runs a Visual Studio build using the configuration generated
+ from the specified gyp_file.
+ """
+ arguments = kw.get('arguments', [])[:]
+ if target in (None, self.ALL, self.DEFAULT):
+ # Note: the Visual Studio generator doesn't add an explicit 'all' target.
+ # This will build each project. This will work if projects are hermetic,
+ # but may fail if they are not (a project may run more than once).
+ # It would be nice to supply an all.metaproj for MSBuild.
+ arguments.extend([gyp_file.replace('.gyp', '.sln')])
+ else:
+ # MSBuild documentation claims that one can specify a sln but then build a
+ # project target like 'msbuild a.sln /t:proj:target' but this format only
+ # supports 'Clean', 'Rebuild', and 'Publish' (with none meaning Default).
+ # This limitation is due to the .sln -> .sln.metaproj conversion.
+ # The ':' is not special, 'proj:target' is a target in the metaproj.
+ arguments.extend([target+'.vcxproj'])
+
+ if clean:
+ build = 'Clean'
+ elif rebuild:
+ build = 'Rebuild'
+ else:
+ build = 'Build'
+ arguments.extend(['/target:'+build])
+ configuration = self.configuration_buildname()
+ config = configuration.split('|')
+ arguments.extend(['/property:Configuration='+config[0]])
+ if len(config) > 1:
+ arguments.extend(['/property:Platform='+config[1]])
+ arguments.extend(['/property:BuildInParallel=false'])
+ arguments.extend(['/verbosity:minimal'])
+
+ kw['arguments'] = arguments
+ return self.run(program=self.msbuild_path, **kw)
+
+
+class TestGypXcode(TestGypBase):
+ """
+ Subclass for testing the GYP Xcode generator.
+ """
+ format = 'xcode'
+ build_tool_list = ['xcodebuild']
+
+ phase_script_execution = ("\n"
+ "PhaseScriptExecution /\\S+/Script-[0-9A-F]+\\.sh\n"
+ " cd /\\S+\n"
+ " /bin/sh -c /\\S+/Script-[0-9A-F]+\\.sh\n"
+ "(make: Nothing to be done for .all.\\.\n)?")
+
+ strip_up_to_date_expressions = [
+ # Various actions or rules can run even when the overall build target
+ # is up to date. Strip those phases' GYP-generated output.
+ re.compile(phase_script_execution, re.S),
+
+ # The message from distcc_pump can trail the "BUILD SUCCEEDED"
+ # message, so strip that, too.
+ re.compile('__________Shutting down distcc-pump include server\n', re.S),
+ ]
+
+ up_to_date_endings = (
+ 'Checking Dependencies...\n** BUILD SUCCEEDED **\n', # Xcode 3.0/3.1
+ 'Check dependencies\n** BUILD SUCCEEDED **\n\n', # Xcode 3.2
+ 'Check dependencies\n\n\n** BUILD SUCCEEDED **\n\n', # Xcode 4.2
+ 'Check dependencies\n\n** BUILD SUCCEEDED **\n\n', # Xcode 5.0
+ )
+
+ def build(self, gyp_file, target=None, **kw):
+ """
+ Runs an xcodebuild using the .xcodeproj generated from the specified
+ gyp_file.
+ """
+ # Be sure we're working with a copy of 'arguments' since we modify it.
+ # The caller may not be expecting it to be modified.
+ arguments = kw.get('arguments', [])[:]
+ arguments.extend(['-project', gyp_file.replace('.gyp', '.xcodeproj')])
+ if target == self.ALL:
+ arguments.append('-alltargets',)
+ elif target not in (None, self.DEFAULT):
+ arguments.extend(['-target', target])
+ if self.configuration:
+ arguments.extend(['-configuration', self.configuration])
+ symroot = kw.get('SYMROOT', '$SRCROOT/build')
+ if symroot:
+ arguments.append('SYMROOT='+symroot)
+ kw['arguments'] = arguments
+
+ # Work around spurious stderr output from Xcode 4, http://crbug.com/181012
+ match = kw.pop('match', self.match)
+ def match_filter_xcode(actual, expected):
+ if actual:
+ if not TestCmd.is_List(actual):
+ actual = actual.split('\n')
+ if not TestCmd.is_List(expected):
+ expected = expected.split('\n')
+ actual = [a for a in actual
+ if 'No recorder, buildTask: <Xcode3BuildTask:' not in a and
+ 'Beginning test session' not in a and
+ 'Writing diagnostic log' not in a and
+ 'Logs/Test/' not in a]
+ return match(actual, expected)
+ kw['match'] = match_filter_xcode
+
+ return self.run(program=self.build_tool, **kw)
+ def up_to_date(self, gyp_file, target=None, **kw):
+ """
+ Verifies that a build of the specified Xcode target is up to date.
+ """
+ result = self.build(gyp_file, target, **kw)
+ if not result:
+ output = self.stdout()
+ for expression in self.strip_up_to_date_expressions:
+ output = expression.sub('', output)
+ if not output.endswith(self.up_to_date_endings):
+ self.report_not_up_to_date()
+ self.fail_test()
+ return result
+ def run_built_executable(self, name, *args, **kw):
+ """
+ Runs an executable built by xcodebuild.
+ """
+ configuration = self.configuration_dirname()
+ os.environ['DYLD_LIBRARY_PATH'] = os.path.join('build', configuration)
+ # Enclosing the name in a list avoids prepending the original dir.
+ program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
+ return self.run(program=program, *args, **kw)
+ def built_file_path(self, name, type=None, **kw):
+ """
+ Returns a path to the specified file name, of the specified type,
+ as built by Xcode.
+
+ Built files are in the subdirectory 'build/{configuration}'.
+ The default is 'build/Default'.
+
+ A chdir= keyword argument specifies the source directory
+ relative to which the output subdirectory can be found.
+
+ "type" values of STATIC_LIB or SHARED_LIB append the necessary
+ prefixes and suffixes to a platform-independent library base name.
+ """
+ result = []
+ chdir = kw.get('chdir')
+ if chdir:
+ result.append(chdir)
+ configuration = self.configuration_dirname()
+ result.extend(['build', configuration])
+ result.append(self.built_file_basename(name, type, **kw))
+ return self.workpath(*result)
+
+
+class TestGypXcodeNinja(TestGypXcode):
+ """
+ Subclass for testing the GYP Xcode Ninja generator.
+ """
+ format = 'xcode-ninja'
+
+ def initialize_build_tool(self):
+ super(TestGypXcodeNinja, self).initialize_build_tool()
+ # When using '--build', make sure ninja is first in the format list.
+ self.formats.insert(0, 'ninja')
+
+ def build(self, gyp_file, target=None, **kw):
+ """
+ Runs an xcodebuild using the .xcodeproj generated from the specified
+ gyp_file.
+ """
+ build_config = self.configuration
+ if build_config and build_config.endswith(('-iphoneos',
+ '-iphonesimulator')):
+ build_config, sdk = self.configuration.split('-')
+ kw['arguments'] = kw.get('arguments', []) + ['-sdk', sdk]
+
+ with self._build_configuration(build_config):
+ return super(TestGypXcodeNinja, self).build(
+ gyp_file.replace('.gyp', '.ninja.gyp'), target, **kw)
+
+ @contextmanager
+ def _build_configuration(self, build_config):
+ config = self.configuration
+ self.configuration = build_config
+ try:
+ yield
+ finally:
+ self.configuration = config
+
+ def built_file_path(self, name, type=None, **kw):
+ result = []
+ chdir = kw.get('chdir')
+ if chdir:
+ result.append(chdir)
+ result.append('out')
+ result.append(self.configuration_dirname())
+ subdir = kw.get('subdir')
+ if subdir and type != self.SHARED_LIB:
+ result.append(subdir)
+ result.append(self.built_file_basename(name, type, **kw))
+ return self.workpath(*result)
+
+ def up_to_date(self, gyp_file, target=None, **kw):
+ result = self.build(gyp_file, target, **kw)
+ if not result:
+ stdout = self.stdout()
+ if 'ninja: no work to do' not in stdout:
+ self.report_not_up_to_date()
+ self.fail_test()
+ return result
+
+ def run_built_executable(self, name, *args, **kw):
+ """
+ Runs an executable built by xcodebuild + ninja.
+ """
+ configuration = self.configuration_dirname()
+ os.environ['DYLD_LIBRARY_PATH'] = os.path.join('out', configuration)
+ # Enclosing the name in a list avoids prepending the original dir.
+ program = [self.built_file_path(name, type=self.EXECUTABLE, **kw)]
+ return self.run(program=program, *args, **kw)
+
+
+format_class_list = [
+ TestGypGypd,
+ TestGypCMake,
+ TestGypMake,
+ TestGypMSVS,
+ TestGypMSVSNinja,
+ TestGypNinja,
+ TestGypXcode,
+ TestGypXcodeNinja,
+]
+
+def TestGyp(*args, **kw):
+ """
+ Returns an appropriate TestGyp* instance for a specified GYP format.
+ """
+ format = kw.pop('format', os.environ.get('TESTGYP_FORMAT'))
+ for format_class in format_class_list:
+ if format == format_class.format:
+ return format_class(*args, **kw)
+ raise Exception("unknown format %r" % format)
diff --git a/third_party/python/gyp/test/lib/TestMac.py b/third_party/python/gyp/test/lib/TestMac.py
new file mode 100644
index 0000000000..d13afd5781
--- /dev/null
+++ b/third_party/python/gyp/test/lib/TestMac.py
@@ -0,0 +1,76 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+TestMac.py: a collection of helper function shared between test on Mac OS X.
+"""
+
+from __future__ import print_function
+
+import re
+import subprocess
+
+__all__ = ['Xcode', 'CheckFileType']
+
+
+def CheckFileType(test, file, archs):
+ """Check that |file| contains exactly |archs| or fails |test|."""
+ proc = subprocess.Popen(['lipo', '-info', file], stdout=subprocess.PIPE)
+ o = proc.communicate()[0].decode('utf-8').strip()
+ assert not proc.returncode
+ if len(archs) == 1:
+ pattern = re.compile('^Non-fat file: (.*) is architecture: (.*)$')
+ else:
+ pattern = re.compile('^Architectures in the fat file: (.*) are: (.*)$')
+ match = pattern.match(o)
+ if match is None:
+ print('Ouput does not match expected pattern: %s' % (pattern.pattern))
+ test.fail_test()
+ else:
+ found_file, found_archs = match.groups()
+ if found_file != file or set(found_archs.split()) != set(archs):
+ print('Expected file %s with arch %s, got %s with arch %s' % (
+ file, ' '.join(archs), found_file, found_archs))
+ test.fail_test()
+
+
+class XcodeInfo(object):
+ """Simplify access to Xcode informations."""
+
+ def __init__(self):
+ self._cache = {}
+
+ def _XcodeVersion(self):
+ lines = subprocess.check_output(['xcodebuild', '-version']).splitlines()
+ version = ''.join(lines[0].decode('utf-8').split()[-1].split('.'))
+ version = (version + '0' * (3 - len(version))).zfill(4)
+ return version, lines[-1].split()[-1]
+
+ def Version(self):
+ if 'Version' not in self._cache:
+ self._cache['Version'], self._cache['Build'] = self._XcodeVersion()
+ return self._cache['Version']
+
+ def Build(self):
+ if 'Build' not in self._cache:
+ self._cache['Version'], self._cache['Build'] = self._XcodeVersion()
+ return self._cache['Build']
+
+ def SDKBuild(self):
+ if 'SDKBuild' not in self._cache:
+ self._cache['SDKBuild'] = subprocess.check_output(
+ ['xcodebuild', '-version', '-sdk', '', 'ProductBuildVersion'])
+ self._cache['SDKBuild'] = self._cache['SDKBuild'].decode('utf-8')
+ self._cache['SDKBuild'] = self._cache['SDKBuild'].rstrip('\n')
+ return self._cache['SDKBuild']
+
+ def SDKVersion(self):
+ if 'SDKVersion' not in self._cache:
+ self._cache['SDKVersion'] = subprocess.check_output(
+ ['xcodebuild', '-version', '-sdk', '', 'SDKVersion'])
+ self._cache['SDKVersion'] = self._cache['SDKVersion'].rstrip('\n')
+ return self._cache['SDKVersion']
+
+
+Xcode = XcodeInfo()
diff --git a/third_party/python/gyp/test/lib/TestWin.py b/third_party/python/gyp/test/lib/TestWin.py
new file mode 100644
index 0000000000..ef676db121
--- /dev/null
+++ b/third_party/python/gyp/test/lib/TestWin.py
@@ -0,0 +1,101 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+TestWin.py: a collection of helpers for testing on Windows.
+"""
+
+import errno
+import os
+import re
+import sys
+import subprocess
+
+class Registry(object):
+ def _QueryBase(self, sysdir, key, value):
+ """Use reg.exe to read a particular key.
+
+ While ideally we might use the win32 module, we would like gyp to be
+ python neutral, so for instance cygwin python lacks this module.
+
+ Arguments:
+ sysdir: The system subdirectory to attempt to launch reg.exe from.
+ key: The registry key to read from.
+ value: The particular value to read.
+ Return:
+ stdout from reg.exe, or None for failure.
+ """
+ # Skip if not on Windows or Python Win32 setup issue
+ if sys.platform not in ('win32', 'cygwin'):
+ return None
+ # Setup params to pass to and attempt to launch reg.exe
+ cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
+ 'query', key]
+ if value:
+ cmd.extend(['/v', value])
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ # Get the stdout from reg.exe, reading to the end so p.returncode is valid
+ # Note that the error text may be in [1] in some cases
+ text = p.communicate()[0].decode('utf-8', 'ignore')
+ # Check return code from reg.exe; officially 0==success and 1==error
+ if p.returncode:
+ return None
+ return text
+
+ def Query(self, key, value=None):
+ r"""Use reg.exe to read a particular key through _QueryBase.
+
+ First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
+ that fails, it falls back to System32. Sysnative is available on Vista and
+ up and available on Windows Server 2003 and XP through KB patch 942589. Note
+ that Sysnative will always fail if using 64-bit python due to it being a
+ virtual directory and System32 will work correctly in the first place.
+
+ KB 942589 - http://support.microsoft.com/kb/942589/en-us.
+
+ Arguments:
+ key: The registry key.
+ value: The particular registry value to read (optional).
+ Return:
+ stdout from reg.exe, or None for failure.
+ """
+ text = None
+ try:
+ text = self._QueryBase('Sysnative', key, value)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ text = self._QueryBase('System32', key, value)
+ else:
+ raise
+ return text
+
+ def GetValue(self, key, value):
+ """Use reg.exe to obtain the value of a registry key.
+
+ Args:
+ key: The registry key.
+ value: The particular registry value to read.
+ Return:
+ contents of the registry key's value, or None on failure.
+ """
+ text = self.Query(key, value)
+ if not text:
+ return None
+ # Extract value.
+ match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
+ if not match:
+ return None
+ return match.group(1)
+
+ def KeyExists(self, key):
+ """Use reg.exe to see if a key exists.
+
+ Args:
+ key: The registry key to check.
+ Return:
+ True if the key exists
+ """
+ if not self.Query(key):
+ return False
+ return True
diff --git a/third_party/python/gyp/test/library/gyptest-shared-obj-install-path.py b/third_party/python/gyp/test/library/gyptest-shared-obj-install-path.py
new file mode 100755
index 0000000000..af335365f9
--- /dev/null
+++ b/third_party/python/gyp/test/library/gyptest-shared-obj-install-path.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that .so files that are order only dependencies are specified by
+their install location rather than by their alias.
+"""
+
+# Python 2.5 needs this for the with statement.
+from __future__ import with_statement
+
+import os
+import TestGyp
+
+test = TestGyp.TestGyp(formats=['make'])
+
+test.run_gyp('shared_dependency.gyp',
+ chdir='src')
+test.relocate('src', 'relocate/src')
+
+test.build('shared_dependency.gyp', test.ALL, chdir='relocate/src')
+
+makefile_path = 'relocate/src/Makefile'
+
+with open(makefile_path) as makefile:
+ make_contents = makefile.read()
+
+# If we remove the code to generate lib1, Make should still be able
+# to build lib2 since lib1.so already exists.
+make_contents = make_contents.replace('include lib1.target.mk', '')
+with open(makefile_path, 'w') as makefile:
+ makefile.write(make_contents)
+
+test.build('shared_dependency.gyp', test.ALL, chdir='relocate/src')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/library/gyptest-shared.py b/third_party/python/gyp/test/library/gyptest-shared.py
new file mode 100755
index 0000000000..a1d2985d91
--- /dev/null
+++ b/third_party/python/gyp/test/library/gyptest-shared.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies simple build of a "Hello, world!" program with shared libraries,
+including verifying that libraries are rebuilt correctly when functions
+move between libraries.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('library.gyp',
+ '-Dlibrary=shared_library',
+ '-Dmoveable_function=lib1',
+ chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('library.gyp', test.ALL, chdir='relocate/src')
+
+expect = """\
+Hello from program.c
+Hello from lib1.c
+Hello from lib2.c
+Hello from lib1_moveable.c
+"""
+test.run_built_executable('program', chdir='relocate/src', stdout=expect)
+
+
+test.run_gyp('library.gyp',
+ '-Dlibrary=shared_library',
+ '-Dmoveable_function=lib2',
+ chdir='relocate/src')
+
+# Update program.c to force a rebuild.
+test.sleep()
+contents = test.read('relocate/src/program.c')
+contents = contents.replace('Hello', 'Hello again')
+test.write('relocate/src/program.c', contents)
+
+test.build('library.gyp', test.ALL, chdir='relocate/src')
+
+expect = """\
+Hello again from program.c
+Hello from lib1.c
+Hello from lib2.c
+Hello from lib2_moveable.c
+"""
+test.run_built_executable('program', chdir='relocate/src', stdout=expect)
+
+
+test.run_gyp('library.gyp',
+ '-Dlibrary=shared_library',
+ '-Dmoveable_function=lib1',
+ chdir='relocate/src')
+
+# Update program.c to force a rebuild.
+test.sleep()
+contents = test.read('relocate/src/program.c')
+contents = contents.replace('again', 'again again')
+test.write('relocate/src/program.c', contents)
+
+# TODO(sgk): we have to force a rebuild of lib2 so that it weeds out
+# the "moved" module. This should be done in gyp by adding a dependency
+# on the generated .vcproj file itself.
+test.touch('relocate/src/lib2.c')
+
+test.build('library.gyp', test.ALL, chdir='relocate/src')
+
+expect = """\
+Hello again again from program.c
+Hello from lib1.c
+Hello from lib2.c
+Hello from lib1_moveable.c
+"""
+test.run_built_executable('program', chdir='relocate/src', stdout=expect)
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/library/gyptest-static.py b/third_party/python/gyp/test/library/gyptest-static.py
new file mode 100755
index 0000000000..4bc71c4962
--- /dev/null
+++ b/third_party/python/gyp/test/library/gyptest-static.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies simple build of a "Hello, world!" program with static libraries,
+including verifying that libraries are rebuilt correctly when functions
+move between libraries.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('library.gyp',
+ '-Dlibrary=static_library',
+ '-Dmoveable_function=lib1',
+ chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('library.gyp', test.ALL, chdir='relocate/src')
+
+expect = """\
+Hello from program.c
+Hello from lib1.c
+Hello from lib2.c
+Hello from lib1_moveable.c
+"""
+test.run_built_executable('program', chdir='relocate/src', stdout=expect)
+
+
+test.run_gyp('library.gyp',
+ '-Dlibrary=static_library',
+ '-Dmoveable_function=lib2',
+ chdir='relocate/src')
+
+# Update program.c to force a rebuild.
+test.sleep()
+contents = test.read('relocate/src/program.c')
+contents = contents.replace('Hello', 'Hello again')
+test.write('relocate/src/program.c', contents)
+
+test.build('library.gyp', test.ALL, chdir='relocate/src')
+
+expect = """\
+Hello again from program.c
+Hello from lib1.c
+Hello from lib2.c
+Hello from lib2_moveable.c
+"""
+test.run_built_executable('program', chdir='relocate/src', stdout=expect)
+
+
+test.run_gyp('library.gyp',
+ '-Dlibrary=static_library',
+ '-Dmoveable_function=lib1',
+ chdir='relocate/src')
+
+# Update program.c and lib2.c to force a rebuild.
+test.sleep()
+contents = test.read('relocate/src/program.c')
+contents = contents.replace('again', 'again again')
+test.write('relocate/src/program.c', contents)
+
+# TODO(sgk): we have to force a rebuild of lib2 so that it weeds out
+# the "moved" module. This should be done in gyp by adding a dependency
+# on the generated .vcproj file itself.
+test.touch('relocate/src/lib2.c')
+
+test.build('library.gyp', test.ALL, chdir='relocate/src')
+
+expect = """\
+Hello again again from program.c
+Hello from lib1.c
+Hello from lib2.c
+Hello from lib1_moveable.c
+"""
+test.run_built_executable('program', chdir='relocate/src', stdout=expect)
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/library/src/lib1.c b/third_party/python/gyp/test/library/src/lib1.c
new file mode 100644
index 0000000000..3866b1b845
--- /dev/null
+++ b/third_party/python/gyp/test/library/src/lib1.c
@@ -0,0 +1,10 @@
+#include <stdio.h>
+
+#ifdef _WIN32
+__declspec(dllexport)
+#endif
+void lib1_function(void)
+{
+ fprintf(stdout, "Hello from lib1.c\n");
+ fflush(stdout);
+}
diff --git a/third_party/python/gyp/test/library/src/lib1_moveable.c b/third_party/python/gyp/test/library/src/lib1_moveable.c
new file mode 100644
index 0000000000..5d3cc1d9aa
--- /dev/null
+++ b/third_party/python/gyp/test/library/src/lib1_moveable.c
@@ -0,0 +1,10 @@
+#include <stdio.h>
+
+#ifdef _WIN32
+__declspec(dllexport)
+#endif
+void moveable_function(void)
+{
+ fprintf(stdout, "Hello from lib1_moveable.c\n");
+ fflush(stdout);
+}
diff --git a/third_party/python/gyp/test/library/src/lib2.c b/third_party/python/gyp/test/library/src/lib2.c
new file mode 100644
index 0000000000..21dda72653
--- /dev/null
+++ b/third_party/python/gyp/test/library/src/lib2.c
@@ -0,0 +1,10 @@
+#include <stdio.h>
+
+#ifdef _WIN32
+__declspec(dllexport)
+#endif
+void lib2_function(void)
+{
+ fprintf(stdout, "Hello from lib2.c\n");
+ fflush(stdout);
+}
diff --git a/third_party/python/gyp/test/library/src/lib2_moveable.c b/third_party/python/gyp/test/library/src/lib2_moveable.c
new file mode 100644
index 0000000000..f645071d1e
--- /dev/null
+++ b/third_party/python/gyp/test/library/src/lib2_moveable.c
@@ -0,0 +1,10 @@
+#include <stdio.h>
+
+#ifdef _WIN32
+__declspec(dllexport)
+#endif
+void moveable_function(void)
+{
+ fprintf(stdout, "Hello from lib2_moveable.c\n");
+ fflush(stdout);
+}
diff --git a/third_party/python/gyp/test/library/src/library.gyp b/third_party/python/gyp/test/library/src/library.gyp
new file mode 100644
index 0000000000..bc35516426
--- /dev/null
+++ b/third_party/python/gyp/test/library/src/library.gyp
@@ -0,0 +1,58 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'moveable_function%': 0,
+ },
+ 'targets': [
+ {
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'dependencies': [
+ 'lib1',
+ 'lib2',
+ ],
+ 'sources': [
+ 'program.c',
+ ],
+ },
+ {
+ 'target_name': 'lib1',
+ 'type': '<(library)',
+ 'sources': [
+ 'lib1.c',
+ ],
+ 'conditions': [
+ ['moveable_function=="lib1"', {
+ 'sources': [
+ 'lib1_moveable.c',
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'lib2',
+ 'type': '<(library)',
+ 'sources': [
+ 'lib2.c',
+ ],
+ 'conditions': [
+ ['moveable_function=="lib2"', {
+ 'sources': [
+ 'lib2_moveable.c',
+ ],
+ }],
+ ],
+ },
+ ],
+ 'conditions': [
+ ['OS=="linux"', {
+ 'target_defaults': {
+ # Support 64-bit shared libs (also works fine for 32-bit).
+ 'cflags': ['-fPIC'],
+ },
+ }],
+ ],
+}
diff --git a/third_party/python/gyp/test/library/src/program.c b/third_party/python/gyp/test/library/src/program.c
new file mode 100644
index 0000000000..d460f60e40
--- /dev/null
+++ b/third_party/python/gyp/test/library/src/program.c
@@ -0,0 +1,15 @@
+#include <stdio.h>
+
+extern void lib1_function(void);
+extern void lib2_function(void);
+extern void moveable_function(void);
+
+int main(void)
+{
+ fprintf(stdout, "Hello from program.c\n");
+ fflush(stdout);
+ lib1_function();
+ lib2_function();
+ moveable_function();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/library/src/shared_dependency.gyp b/third_party/python/gyp/test/library/src/shared_dependency.gyp
new file mode 100644
index 0000000000..7d29f5de59
--- /dev/null
+++ b/third_party/python/gyp/test/library/src/shared_dependency.gyp
@@ -0,0 +1,33 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'lib1',
+ 'type': 'shared_library',
+ 'sources': [
+ 'lib1.c',
+ ],
+ },
+ {
+ 'target_name': 'lib2',
+ 'type': 'shared_library',
+ 'sources': [
+ 'lib2.c',
+ ],
+ 'dependencies': [
+ 'lib1',
+ ],
+ },
+ ],
+ 'conditions': [
+ ['OS=="linux"', {
+ 'target_defaults': {
+ # Support 64-bit shared libs (also works fine for 32-bit).
+ 'cflags': ['-fPIC'],
+ },
+ }],
+ ],
+}
diff --git a/third_party/python/gyp/test/library_dirs/gyptest-library-dirs.py b/third_party/python/gyp/test/library_dirs/gyptest-library-dirs.py
new file mode 100644
index 0000000000..e725dd1176
--- /dev/null
+++ b/third_party/python/gyp/test/library_dirs/gyptest-library-dirs.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies library_dirs (in link_settings) are properly found.
+"""
+
+import sys
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+lib_dir = test.tempdir('secret_location')
+
+test.run_gyp('test.gyp',
+ '-D', 'abs_path_to_secret_library_location={0}'.format(lib_dir),
+ chdir='subdir')
+
+# Must build each target independently, since they are not in each others'
+# 'dependencies' (test.ALL does NOT work here for some builders, and in any case
+# would not ensure the correct ordering).
+test.build('test.gyp', 'mylib', chdir='subdir')
+test.build('test.gyp', 'libraries-search-path-test', chdir='subdir')
+
+expect = """Hello world
+"""
+test.run_built_executable(
+ 'libraries-search-path-test', chdir='subdir', stdout=expect)
+
+if sys.platform in ('win32', 'cygwin'):
+ test.run_gyp('test-win.gyp',
+ '-D',
+ 'abs_path_to_secret_library_location={0}'.format(lib_dir),
+ chdir='subdir')
+
+ test.build('test.gyp', 'mylib', chdir='subdir')
+ test.build('test-win.gyp',
+ 'libraries-search-path-test-lib-suffix',
+ chdir='subdir')
+
+ test.run_built_executable(
+ 'libraries-search-path-test-lib-suffix', chdir='subdir', stdout=expect)
+
+
+test.pass_test()
+test.cleanup()
diff --git a/third_party/python/gyp/test/library_dirs/subdir/README.txt b/third_party/python/gyp/test/library_dirs/subdir/README.txt
new file mode 100644
index 0000000000..4031ded85f
--- /dev/null
+++ b/third_party/python/gyp/test/library_dirs/subdir/README.txt
@@ -0,0 +1 @@
+Make things live in a subdirectory, to make sure that DEPTH works correctly.
diff --git a/third_party/python/gyp/test/library_dirs/subdir/hello.cc b/third_party/python/gyp/test/library_dirs/subdir/hello.cc
new file mode 100644
index 0000000000..5dbbd48d34
--- /dev/null
+++ b/third_party/python/gyp/test/library_dirs/subdir/hello.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <iostream>
+#include "mylib.h"
+
+int main() {
+ std::cout << "Hello " << my_foo(99) << std::endl;
+ return 0;
+}
diff --git a/third_party/python/gyp/test/library_dirs/subdir/mylib.cc b/third_party/python/gyp/test/library_dirs/subdir/mylib.cc
new file mode 100644
index 0000000000..654f3d0e6c
--- /dev/null
+++ b/third_party/python/gyp/test/library_dirs/subdir/mylib.cc
@@ -0,0 +1,9 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "mylib.h"
+
+std::string my_foo(int x) {
+ return std::string("world");
+}
diff --git a/third_party/python/gyp/test/library_dirs/subdir/mylib.h b/third_party/python/gyp/test/library_dirs/subdir/mylib.h
new file mode 100644
index 0000000000..84b4022e7b
--- /dev/null
+++ b/third_party/python/gyp/test/library_dirs/subdir/mylib.h
@@ -0,0 +1,12 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef TEST_LIBRARY_DIRS_SUBDIR_MYLIB_H
+#define TEST_LIBRARY_DIRS_SUBDIR_MYLIB_H
+
+#include <string>
+
+std::string my_foo(int);
+
+#endif // TEST_LIBRARY_DIRS_SUBDIR_MYLIB_H
diff --git a/third_party/python/gyp/test/library_dirs/subdir/test-win.gyp b/third_party/python/gyp/test/library_dirs/subdir/test-win.gyp
new file mode 100644
index 0000000000..033b6f7fdd
--- /dev/null
+++ b/third_party/python/gyp/test/library_dirs/subdir/test-win.gyp
@@ -0,0 +1,60 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ # This creates a static library and puts it in a nonstandard location for
+ # libraries-search-path-test.
+ 'target_name': 'mylib',
+ 'type': 'static_library',
+ 'standalone_static_library': 1,
+ # This directory is NOT in the default library search locations. It also
+ # MUST be passed in on the gyp command line:
+ #
+ # -D abs_path_to_secret_library_location=/some_absolute_path
+ #
+ # The gyptest itself (../gyptest-library-dirs.py) provides this.
+ 'product_dir': '<(abs_path_to_secret_library_location)',
+ 'sources': [
+ 'mylib.cc',
+ ],
+ },
+ {
+ 'target_name': 'libraries-search-path-test-lib-suffix',
+ 'type': 'executable',
+ 'dependencies': [
+ # It is important to NOT list the mylib as a dependency here, because
+ # some build systems will track it down based on its product_dir,
+ # such that the link succeeds even without the library_dirs below.
+ #
+ # The point of this weird structuring is to ensure that 'library_dirs'
+ # works as advertised, such that just '-lmylib' (or its equivalent)
+ # works based on the directories that library_dirs puts in the library
+ # link path.
+ #
+ # If 'mylib' was listed as a proper dependency here, the build system
+ # would find it and link with its path on disk.
+ #
+ # Note that this implies 'mylib' must already be built when building
+ # 'libraries-search-path-test' (see ../gyptest-library-dirs.py).
+ #
+ #'mylib',
+ ],
+ 'sources': [
+ 'hello.cc',
+ ],
+ # Note that without this, the mylib library would not be found and
+ # successfully linked.
+ 'library_dirs': [
+ '<(abs_path_to_secret_library_location)',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '-lmylib.lib',
+ ],
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/library_dirs/subdir/test.gyp b/third_party/python/gyp/test/library_dirs/subdir/test.gyp
new file mode 100644
index 0000000000..f83d7f2bf1
--- /dev/null
+++ b/third_party/python/gyp/test/library_dirs/subdir/test.gyp
@@ -0,0 +1,68 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ # This creates a static library and puts it in a nonstandard location for
+ # libraries-search-path-test.
+ 'target_name': 'mylib',
+ 'type': 'static_library',
+ 'standalone_static_library': 1,
+ # This directory is NOT in the default library search locations. It also
+ # MUST be passed in on the gyp command line:
+ #
+ # -D abs_path_to_secret_library_location=/some_absolute_path
+ #
+ # The gyptest itself (../gyptest-library-dirs.py) provides this.
+ 'product_dir': '<(abs_path_to_secret_library_location)',
+ 'sources': [
+ 'mylib.cc',
+ ],
+ },
+ {
+ 'target_name': 'libraries-search-path-test',
+ 'type': 'executable',
+ 'dependencies': [
+ # It is important to NOT list the mylib as a dependency here, because
+ # some build systems will track it down based on its product_dir,
+ # such that the link succeeds even without the library_dirs below.
+ #
+ # The point of this weird structuring is to ensure that 'library_dirs'
+ # works as advertised, such that just '-lmylib' (or its equivalent)
+ # works based on the directories that library_dirs puts in the library
+ # link path.
+ #
+ # If 'mylib' was listed as a proper dependency here, the build system
+ # would find it and link with its path on disk.
+ #
+ # Note that this implies 'mylib' must already be built when building
+ # 'libraries-search-path-test' (see ../gyptest-library-dirs.py).
+ #
+ #'mylib',
+ ],
+ 'sources': [
+ 'hello.cc',
+ ],
+ # Note that without this, the mylib library would not be found and
+ # successfully linked.
+ 'library_dirs': [
+ '<(abs_path_to_secret_library_location)',
+ ],
+ 'link_settings': {
+ 'conditions': [
+ ['OS=="linux"', {
+ 'libraries': [
+ '-lmylib',
+ ],
+ }, { # else
+ 'libraries': [
+ '<(STATIC_LIB_PREFIX)mylib<(STATIC_LIB_SUFFIX)',
+ ],
+ }],
+ ], # conditions
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/link-dependency/gyptest-link-dependency.py b/third_party/python/gyp/test/link-dependency/gyptest-link-dependency.py
new file mode 100755
index 0000000000..3a8300d44e
--- /dev/null
+++ b/third_party/python/gyp/test/link-dependency/gyptest-link-dependency.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verify that a target marked as 'link_dependency==1' isn't being pulled into
+the 'none' target's dependency (which would otherwise lead to a dependency
+cycle in ninja).
+"""
+
+import TestGyp
+
+# See https://codereview.chromium.org/177043010/#msg15 for why this doesn't
+# work with cmake.
+test = TestGyp.TestGyp(formats=['!cmake'])
+
+test.run_gyp('test.gyp')
+test.build('test.gyp', 'main')
+
+# If running gyp worked, all is well.
+test.pass_test()
diff --git a/third_party/python/gyp/test/link-dependency/main.c b/third_party/python/gyp/test/link-dependency/main.c
new file mode 100644
index 0000000000..543d8b6951
--- /dev/null
+++ b/third_party/python/gyp/test/link-dependency/main.c
@@ -0,0 +1,7 @@
+#include <stdio.h>
+#include <stdlib.h>
+int main() {
+ void *p = malloc(1);
+ printf("p: %p\n", p);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/link-dependency/mymalloc.c b/third_party/python/gyp/test/link-dependency/mymalloc.c
new file mode 100644
index 0000000000..f80bc02a62
--- /dev/null
+++ b/third_party/python/gyp/test/link-dependency/mymalloc.c
@@ -0,0 +1,12 @@
+#include <stdlib.h>
+
+// The windows ninja generator is expecting an import library to get generated,
+// but it doesn't if there are no exports.
+#ifdef _MSC_VER
+__declspec(dllexport) void foo() {}
+#endif
+
+void *malloc(size_t size) {
+ (void)size;
+ return (void*)0xdeadbeef;
+}
diff --git a/third_party/python/gyp/test/link-dependency/test.gyp b/third_party/python/gyp/test/link-dependency/test.gyp
new file mode 100644
index 0000000000..47cec15005
--- /dev/null
+++ b/third_party/python/gyp/test/link-dependency/test.gyp
@@ -0,0 +1,37 @@
+{
+ 'variables': {
+ 'custom_malloc%' : 1,
+ },
+ 'target_defaults': {
+ 'conditions': [
+ ['custom_malloc==1', {
+ 'dependencies': [
+ 'malloc',
+ ],
+ }],
+ ],
+ },
+ 'targets': [
+ {
+ 'target_name': 'main',
+ 'type': 'none',
+ 'dependencies': [ 'main_initial',],
+ },
+ {
+ 'target_name': 'main_initial',
+ 'type': 'executable',
+ 'product_name': 'main',
+ 'sources': [ 'main.c' ],
+ },
+ {
+ 'target_name': 'malloc',
+ 'type': 'shared_library',
+ 'variables': {
+ 'prune_self_dependency': 1,
+ # Targets with type 'none' won't depend on this target.
+ 'link_dependency': 1,
+ },
+ 'sources': [ 'mymalloc.c' ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/link-objects/base.c b/third_party/python/gyp/test/link-objects/base.c
new file mode 100644
index 0000000000..3327459205
--- /dev/null
+++ b/third_party/python/gyp/test/link-objects/base.c
@@ -0,0 +1,6 @@
+void extra();
+
+int main(void) {
+ extra();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/link-objects/extra.c b/third_party/python/gyp/test/link-objects/extra.c
new file mode 100644
index 0000000000..1d7ee09b10
--- /dev/null
+++ b/third_party/python/gyp/test/link-objects/extra.c
@@ -0,0 +1,5 @@
+#include <stdio.h>
+
+void extra() {
+ printf("PASS\n");
+}
diff --git a/third_party/python/gyp/test/link-objects/gyptest-all.py b/third_party/python/gyp/test/link-objects/gyptest-all.py
new file mode 100755
index 0000000000..45bd6e1891
--- /dev/null
+++ b/third_party/python/gyp/test/link-objects/gyptest-all.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Put an object file on the sources list.
+Expect the result to link ok.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform != 'darwin':
+ # Currently only works under the linux make build.
+ test = TestGyp.TestGyp(formats=['make'])
+
+ test.run_gyp('link-objects.gyp')
+
+ test.build('link-objects.gyp', test.ALL)
+
+ test.run_built_executable('link-objects', stdout="PASS\n")
+
+ test.up_to_date('link-objects.gyp', test.ALL)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/link-objects/link-objects.gyp b/third_party/python/gyp/test/link-objects/link-objects.gyp
new file mode 100644
index 0000000000..ab72855531
--- /dev/null
+++ b/third_party/python/gyp/test/link-objects/link-objects.gyp
@@ -0,0 +1,24 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'link-objects',
+ 'type': 'executable',
+ 'actions': [
+ {
+ 'action_name': 'build extra object',
+ 'inputs': ['extra.c'],
+ 'outputs': ['extra.o'],
+ 'action': ['gcc', '-o', 'extra.o', '-c', 'extra.c'],
+ 'process_outputs_as_sources': 1,
+ },
+ ],
+ 'sources': [
+ 'base.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/linux/gyptest-implicit-rpath.py b/third_party/python/gyp/test/linux/gyptest-implicit-rpath.py
new file mode 100644
index 0000000000..8e17a3f16d
--- /dev/null
+++ b/third_party/python/gyp/test/linux/gyptest-implicit-rpath.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that the implicit rpath is added only when needed.
+"""
+
+import TestGyp
+
+import re
+import subprocess
+import sys
+
+if sys.platform.startswith('linux'):
+ test = TestGyp.TestGyp(formats=['ninja', 'make'])
+
+ CHDIR = 'implicit-rpath'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+ test.build('test.gyp', test.ALL, chdir=CHDIR)
+
+ def GetRpaths(p):
+ p = test.built_file_path(p, chdir=CHDIR)
+ r = re.compile(r'Library rpath: \[([^\]]+)\]')
+ proc = subprocess.Popen(['readelf', '-d', p], stdout=subprocess.PIPE)
+ o = proc.communicate()[0].decode('utf-8')
+ assert not proc.returncode
+ return r.findall(o)
+
+ if test.format == 'ninja':
+ expect = '$ORIGIN/lib/'
+ elif test.format == 'make':
+ expect = '$ORIGIN/lib.target/'
+ else:
+ test.fail_test()
+
+ if GetRpaths('shared_executable') != [expect]:
+ test.fail_test()
+
+ if GetRpaths('shared_executable_no_so_suffix') != [expect]:
+ test.fail_test()
+
+ if GetRpaths('static_executable'):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/linux/gyptest-ldflags-duplicates.py b/third_party/python/gyp/test/linux/gyptest-ldflags-duplicates.py
new file mode 100644
index 0000000000..43a4607adf
--- /dev/null
+++ b/third_party/python/gyp/test/linux/gyptest-ldflags-duplicates.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies duplicate ldflags are not removed.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform.startswith('linux'):
+ test = TestGyp.TestGyp()
+
+ CHDIR = 'ldflags-duplicates'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+ test.build('test.gyp', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/linux/gyptest-ldflags-from-environment.py b/third_party/python/gyp/test/linux/gyptest-ldflags-from-environment.py
new file mode 100644
index 0000000000..4aea193e4b
--- /dev/null
+++ b/third_party/python/gyp/test/linux/gyptest-ldflags-from-environment.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2017 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies the use of linker flags in environment variables.
+
+In this test, gyp and build both run in same local environment.
+"""
+
+import TestGyp
+
+import re
+import subprocess
+import sys
+
+FORMATS = ('make', 'ninja')
+
+if sys.platform.startswith('linux'):
+ test = TestGyp.TestGyp(formats=FORMATS)
+
+ CHDIR = 'ldflags-from-environment'
+ with TestGyp.LocalEnv({'LDFLAGS': '-Wl,--dynamic-linker=/target',
+ 'LDFLAGS_host': '-Wl,--dynamic-linker=/host',
+ 'GYP_CROSSCOMPILE': '1'}):
+ test.run_gyp('test.gyp', chdir=CHDIR)
+ test.build('test.gyp', chdir=CHDIR)
+
+ def GetDynamicLinker(p):
+ p = test.built_file_path(p, chdir=CHDIR)
+ r = re.compile(r'\[Requesting program interpreter: ([^\]]+)\]')
+ proc = subprocess.Popen(['readelf', '-l', p], stdout=subprocess.PIPE)
+ o = proc.communicate()[0].decode('utf-8')
+ assert not proc.returncode
+ return r.search(o).group(1)
+
+ if GetDynamicLinker('ldflags') != '/target':
+ test.fail_test()
+
+ if GetDynamicLinker('ldflags_host') != '/host':
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/linux/gyptest-target-rpath.py b/third_party/python/gyp/test/linux/gyptest-target-rpath.py
new file mode 100644
index 0000000000..f275caaece
--- /dev/null
+++ b/third_party/python/gyp/test/linux/gyptest-target-rpath.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Check target_rpath generator flag for ninja.
+"""
+
+import TestGyp
+
+import re
+import subprocess
+import sys
+
+if sys.platform.startswith('linux'):
+ test = TestGyp.TestGyp(formats=['ninja'])
+
+ CHDIR = 'target-rpath'
+ test.run_gyp('test.gyp', '-G', 'target_rpath=/usr/lib/gyptest/', chdir=CHDIR)
+ test.build('test.gyp', test.ALL, chdir=CHDIR)
+
+ def GetRpaths(p):
+ p = test.built_file_path(p, chdir=CHDIR)
+ r = re.compile(r'Library rpath: \[([^\]]+)\]')
+ proc = subprocess.Popen(['readelf', '-d', p], stdout=subprocess.PIPE)
+ o = proc.communicate()[0].decode('utf-8')
+ assert not proc.returncode
+ return r.findall(o)
+
+ expect = '/usr/lib/gyptest/'
+
+ if GetRpaths('shared_executable') != [expect]:
+ test.fail_test()
+
+ if GetRpaths('shared_executable_no_so_suffix') != [expect]:
+ test.fail_test()
+
+ if GetRpaths('static_executable'):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/linux/implicit-rpath/file.c b/third_party/python/gyp/test/linux/implicit-rpath/file.c
new file mode 100644
index 0000000000..56757a701b
--- /dev/null
+++ b/third_party/python/gyp/test/linux/implicit-rpath/file.c
@@ -0,0 +1 @@
+void f() {}
diff --git a/third_party/python/gyp/test/linux/implicit-rpath/main.c b/third_party/python/gyp/test/linux/implicit-rpath/main.c
new file mode 100644
index 0000000000..237c8ce181
--- /dev/null
+++ b/third_party/python/gyp/test/linux/implicit-rpath/main.c
@@ -0,0 +1 @@
+int main() {}
diff --git a/third_party/python/gyp/test/linux/implicit-rpath/test.gyp b/third_party/python/gyp/test/linux/implicit-rpath/test.gyp
new file mode 100644
index 0000000000..b546106986
--- /dev/null
+++ b/third_party/python/gyp/test/linux/implicit-rpath/test.gyp
@@ -0,0 +1,47 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'shared',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c' ],
+ },
+ {
+ 'target_name': 'shared_no_so_suffix',
+ 'product_extension': 'so.0.1',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c' ],
+ },
+ {
+ 'target_name': 'static',
+ 'type': 'static_library',
+ 'sources': [ 'file.c' ],
+ },
+ {
+ 'target_name': 'shared_executable',
+ 'type': 'executable',
+ 'sources': [ 'main.c' ],
+ 'dependencies': [
+ 'shared',
+ ]
+ },
+ {
+ 'target_name': 'shared_executable_no_so_suffix',
+ 'type': 'executable',
+ 'sources': [ 'main.c' ],
+ 'dependencies': [
+ 'shared_no_so_suffix',
+ ]
+ },
+ {
+ 'target_name': 'static_executable',
+ 'type': 'executable',
+ 'sources': [ 'main.c' ],
+ 'dependencies': [
+ 'static',
+ ]
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/linux/ldflags-duplicates/check-ldflags.py b/third_party/python/gyp/test/linux/ldflags-duplicates/check-ldflags.py
new file mode 100755
index 0000000000..ef1029529d
--- /dev/null
+++ b/third_party/python/gyp/test/linux/ldflags-duplicates/check-ldflags.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies duplicate ldflags are not removed.
+"""
+
+from __future__ import print_function
+
+import sys
+
+def CheckContainsFlags(args, substring):
+ if args.find(substring) is -1:
+ print('ERROR: Linker arguments "%s" are missing in "%s"' % (substring,
+ args))
+ return False;
+ return True;
+
+if __name__ == '__main__':
+ args = " ".join(sys.argv)
+ print("args = " +args)
+ if not CheckContainsFlags(args, 'lib1.a -Wl,--no-whole-archive') \
+ or not CheckContainsFlags(args, 'lib2.a -Wl,--no-whole-archive'):
+ sys.exit(1);
+ sys.exit(0)
diff --git a/third_party/python/gyp/test/linux/ldflags-duplicates/lib1.c b/third_party/python/gyp/test/linux/ldflags-duplicates/lib1.c
new file mode 100644
index 0000000000..a1322e7395
--- /dev/null
+++ b/third_party/python/gyp/test/linux/ldflags-duplicates/lib1.c
@@ -0,0 +1,6 @@
+// Copyright (c) 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+void foo() {
+}
diff --git a/third_party/python/gyp/test/linux/ldflags-duplicates/lib2.c b/third_party/python/gyp/test/linux/ldflags-duplicates/lib2.c
new file mode 100644
index 0000000000..8e7a082820
--- /dev/null
+++ b/third_party/python/gyp/test/linux/ldflags-duplicates/lib2.c
@@ -0,0 +1,6 @@
+// Copyright (c) 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+void bar() {
+}
diff --git a/third_party/python/gyp/test/linux/ldflags-duplicates/main.c b/third_party/python/gyp/test/linux/ldflags-duplicates/main.c
new file mode 100644
index 0000000000..b3039ace96
--- /dev/null
+++ b/third_party/python/gyp/test/linux/ldflags-duplicates/main.c
@@ -0,0 +1,7 @@
+// Copyright (c) 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/linux/ldflags-duplicates/test.gyp b/third_party/python/gyp/test/linux/ldflags-duplicates/test.gyp
new file mode 100644
index 0000000000..c36835b18f
--- /dev/null
+++ b/third_party/python/gyp/test/linux/ldflags-duplicates/test.gyp
@@ -0,0 +1,45 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'make_global_settings': [
+ ['LINK_wrapper', './check-ldflags.py'],
+ ],
+ 'targets': [
+ {
+ 'target_name': 'test',
+ 'type': 'executable',
+ 'ldflags': [
+ '-Wl,--whole-archive <(PRODUCT_DIR)/lib1.a',
+ '-Wl,--no-whole-archive',
+
+ '-Wl,--whole-archive <(PRODUCT_DIR)/lib2.a',
+ '-Wl,--no-whole-archive',
+ ],
+ 'dependencies': [
+ 'lib1',
+ 'lib2',
+ ],
+ 'sources': [
+ 'main.c',
+ ],
+ },
+ {
+ 'target_name': 'lib1',
+ 'type': 'static_library',
+ 'standalone_static_library': 1,
+ 'sources': [
+ 'lib1.c',
+ ],
+ },
+ {
+ 'target_name': 'lib2',
+ 'type': 'static_library',
+ 'standalone_static_library': 1,
+ 'sources': [
+ 'lib2.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/linux/ldflags-from-environment/main.c b/third_party/python/gyp/test/linux/ldflags-from-environment/main.c
new file mode 100644
index 0000000000..1b8742a107
--- /dev/null
+++ b/third_party/python/gyp/test/linux/ldflags-from-environment/main.c
@@ -0,0 +1,7 @@
+// Copyright (c) 2017 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/linux/ldflags-from-environment/test.gyp b/third_party/python/gyp/test/linux/ldflags-from-environment/test.gyp
new file mode 100644
index 0000000000..7ed1d07ed5
--- /dev/null
+++ b/third_party/python/gyp/test/linux/ldflags-from-environment/test.gyp
@@ -0,0 +1,23 @@
+# Copyright (c) 2017 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'ldflags',
+ 'type': 'executable',
+ 'sources': [
+ 'main.c',
+ ],
+ },
+ {
+ 'target_name': 'ldflags_host',
+ 'toolsets': ['host'],
+ 'type': 'executable',
+ 'sources': [
+ 'main.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/linux/target-rpath/file.c b/third_party/python/gyp/test/linux/target-rpath/file.c
new file mode 100644
index 0000000000..56757a701b
--- /dev/null
+++ b/third_party/python/gyp/test/linux/target-rpath/file.c
@@ -0,0 +1 @@
+void f() {}
diff --git a/third_party/python/gyp/test/linux/target-rpath/main.c b/third_party/python/gyp/test/linux/target-rpath/main.c
new file mode 100644
index 0000000000..237c8ce181
--- /dev/null
+++ b/third_party/python/gyp/test/linux/target-rpath/main.c
@@ -0,0 +1 @@
+int main() {}
diff --git a/third_party/python/gyp/test/linux/target-rpath/test.gyp b/third_party/python/gyp/test/linux/target-rpath/test.gyp
new file mode 100644
index 0000000000..b546106986
--- /dev/null
+++ b/third_party/python/gyp/test/linux/target-rpath/test.gyp
@@ -0,0 +1,47 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'shared',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c' ],
+ },
+ {
+ 'target_name': 'shared_no_so_suffix',
+ 'product_extension': 'so.0.1',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c' ],
+ },
+ {
+ 'target_name': 'static',
+ 'type': 'static_library',
+ 'sources': [ 'file.c' ],
+ },
+ {
+ 'target_name': 'shared_executable',
+ 'type': 'executable',
+ 'sources': [ 'main.c' ],
+ 'dependencies': [
+ 'shared',
+ ]
+ },
+ {
+ 'target_name': 'shared_executable_no_so_suffix',
+ 'type': 'executable',
+ 'sources': [ 'main.c' ],
+ 'dependencies': [
+ 'shared_no_so_suffix',
+ ]
+ },
+ {
+ 'target_name': 'static_executable',
+ 'type': 'executable',
+ 'sources': [ 'main.c' ],
+ 'dependencies': [
+ 'static',
+ ]
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/action-envvars/action/action.gyp b/third_party/python/gyp/test/mac/action-envvars/action/action.gyp
new file mode 100644
index 0000000000..d9d65745ca
--- /dev/null
+++ b/third_party/python/gyp/test/mac/action-envvars/action/action.gyp
@@ -0,0 +1,34 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'action',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'inputs': [ ],
+ 'outputs': [
+ '<(PRODUCT_DIR)/result',
+ '<(SHARED_INTERMEDIATE_DIR)/tempfile',
+ ],
+ 'action_name': 'Test action',
+ 'action': ['./action.sh', '<(SHARED_INTERMEDIATE_DIR)/tempfile' ],
+ },
+ {
+ 'inputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/tempfile',
+ ],
+ 'outputs': [
+ '<(PRODUCT_DIR)/other_result',
+ ],
+ 'action_name': 'Other test action',
+ 'action': ['cp', '<(SHARED_INTERMEDIATE_DIR)/tempfile',
+ '<(PRODUCT_DIR)/other_result' ],
+ },
+ ],
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/mac/action-envvars/action/action.sh b/third_party/python/gyp/test/mac/action-envvars/action/action.sh
new file mode 100755
index 0000000000..48d5f6bf86
--- /dev/null
+++ b/third_party/python/gyp/test/mac/action-envvars/action/action.sh
@@ -0,0 +1,8 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -e
+
+echo 'Test output' > "${BUILT_PRODUCTS_DIR}/result"
+echo 'Other output' > "$1"
diff --git a/third_party/python/gyp/test/mac/app-bundle/TestApp/English.lproj/InfoPlist-error.strings b/third_party/python/gyp/test/mac/app-bundle/TestApp/English.lproj/InfoPlist-error.strings
new file mode 100644
index 0000000000..452e7fabf9
--- /dev/null
+++ b/third_party/python/gyp/test/mac/app-bundle/TestApp/English.lproj/InfoPlist-error.strings
@@ -0,0 +1,3 @@
+/* Localized versions of Info.plist keys */
+
+NSHumanReadableCopyright = "Copyright ©2011 Google Inc."
diff --git a/third_party/python/gyp/test/mac/app-bundle/TestApp/English.lproj/InfoPlist.strings b/third_party/python/gyp/test/mac/app-bundle/TestApp/English.lproj/InfoPlist.strings
new file mode 100644
index 0000000000..35bd33a96e
--- /dev/null
+++ b/third_party/python/gyp/test/mac/app-bundle/TestApp/English.lproj/InfoPlist.strings
@@ -0,0 +1,3 @@
+/* Localized versions of Info.plist keys */
+
+NSHumanReadableCopyright = "Copyright ©2011 Google Inc.";
diff --git a/third_party/python/gyp/test/mac/app-bundle/TestApp/English.lproj/MainMenu.xib b/third_party/python/gyp/test/mac/app-bundle/TestApp/English.lproj/MainMenu.xib
new file mode 100644
index 0000000000..4524596787
--- /dev/null
+++ b/third_party/python/gyp/test/mac/app-bundle/TestApp/English.lproj/MainMenu.xib
@@ -0,0 +1,4119 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<archive type="com.apple.InterfaceBuilder3.Cocoa.XIB" version="7.10">
+ <data>
+ <int key="IBDocument.SystemTarget">1060</int>
+ <string key="IBDocument.SystemVersion">10A324</string>
+ <string key="IBDocument.InterfaceBuilderVersion">719</string>
+ <string key="IBDocument.AppKitVersion">1015</string>
+ <string key="IBDocument.HIToolboxVersion">418.00</string>
+ <object class="NSMutableDictionary" key="IBDocument.PluginVersions">
+ <string key="NS.key.0">com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string key="NS.object.0">719</string>
+ </object>
+ <object class="NSMutableArray" key="IBDocument.EditedObjectIDs">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <integer value="371"/>
+ <integer value="29"/>
+ </object>
+ <object class="NSArray" key="IBDocument.PluginDependencies">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ </object>
+ <object class="NSMutableDictionary" key="IBDocument.Metadata">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSArray" key="dict.sortedKeys" id="0">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ </object>
+ <object class="NSMutableArray" key="dict.values">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ </object>
+ </object>
+ <object class="NSMutableArray" key="IBDocument.RootObjects" id="1048">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSCustomObject" id="1021">
+ <string key="NSClassName">NSApplication</string>
+ </object>
+ <object class="NSCustomObject" id="1014">
+ <string key="NSClassName">FirstResponder</string>
+ </object>
+ <object class="NSCustomObject" id="1050">
+ <string key="NSClassName">NSApplication</string>
+ </object>
+ <object class="NSMenu" id="649796088">
+ <string key="NSTitle">AMainMenu</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSMenuItem" id="694149608">
+ <reference key="NSMenu" ref="649796088"/>
+ <string key="NSTitle">TestApp</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <object class="NSCustomResource" key="NSOnImage" id="35465992">
+ <string key="NSClassName">NSImage</string>
+ <string key="NSResourceName">NSMenuCheckmark</string>
+ </object>
+ <object class="NSCustomResource" key="NSMixedImage" id="502551668">
+ <string key="NSClassName">NSImage</string>
+ <string key="NSResourceName">NSMenuMixedState</string>
+ </object>
+ <string key="NSAction">submenuAction:</string>
+ <object class="NSMenu" key="NSSubmenu" id="110575045">
+ <string key="NSTitle">TestApp</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSMenuItem" id="238522557">
+ <reference key="NSMenu" ref="110575045"/>
+ <string key="NSTitle">About TestApp</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="304266470">
+ <reference key="NSMenu" ref="110575045"/>
+ <bool key="NSIsDisabled">YES</bool>
+ <bool key="NSIsSeparator">YES</bool>
+ <string key="NSTitle"/>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="609285721">
+ <reference key="NSMenu" ref="110575045"/>
+ <string key="NSTitle">Preferences…</string>
+ <string key="NSKeyEquiv">,</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="481834944">
+ <reference key="NSMenu" ref="110575045"/>
+ <bool key="NSIsDisabled">YES</bool>
+ <bool key="NSIsSeparator">YES</bool>
+ <string key="NSTitle"/>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="1046388886">
+ <reference key="NSMenu" ref="110575045"/>
+ <string key="NSTitle">Services</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <string key="NSAction">submenuAction:</string>
+ <object class="NSMenu" key="NSSubmenu" id="752062318">
+ <string key="NSTitle">Services</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ </object>
+ <string key="NSName">_NSServicesMenu</string>
+ </object>
+ </object>
+ <object class="NSMenuItem" id="646227648">
+ <reference key="NSMenu" ref="110575045"/>
+ <bool key="NSIsDisabled">YES</bool>
+ <bool key="NSIsSeparator">YES</bool>
+ <string key="NSTitle"/>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="755159360">
+ <reference key="NSMenu" ref="110575045"/>
+ <string key="NSTitle">Hide TestApp</string>
+ <string key="NSKeyEquiv">h</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="342932134">
+ <reference key="NSMenu" ref="110575045"/>
+ <string key="NSTitle">Hide Others</string>
+ <string key="NSKeyEquiv">h</string>
+ <int key="NSKeyEquivModMask">1572864</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="908899353">
+ <reference key="NSMenu" ref="110575045"/>
+ <string key="NSTitle">Show All</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="1056857174">
+ <reference key="NSMenu" ref="110575045"/>
+ <bool key="NSIsDisabled">YES</bool>
+ <bool key="NSIsSeparator">YES</bool>
+ <string key="NSTitle"/>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="632727374">
+ <reference key="NSMenu" ref="110575045"/>
+ <string key="NSTitle">Quit TestApp</string>
+ <string key="NSKeyEquiv">q</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ </object>
+ <string key="NSName">_NSAppleMenu</string>
+ </object>
+ </object>
+ <object class="NSMenuItem" id="379814623">
+ <reference key="NSMenu" ref="649796088"/>
+ <string key="NSTitle">File</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <string key="NSAction">submenuAction:</string>
+ <object class="NSMenu" key="NSSubmenu" id="720053764">
+ <string key="NSTitle">File</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSMenuItem" id="705341025">
+ <reference key="NSMenu" ref="720053764"/>
+ <string key="NSTitle">New</string>
+ <string key="NSKeyEquiv">n</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="722745758">
+ <reference key="NSMenu" ref="720053764"/>
+ <string key="NSTitle">Open…</string>
+ <string key="NSKeyEquiv">o</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="1025936716">
+ <reference key="NSMenu" ref="720053764"/>
+ <string key="NSTitle">Open Recent</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <string key="NSAction">submenuAction:</string>
+ <object class="NSMenu" key="NSSubmenu" id="1065607017">
+ <string key="NSTitle">Open Recent</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSMenuItem" id="759406840">
+ <reference key="NSMenu" ref="1065607017"/>
+ <string key="NSTitle">Clear Menu</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ </object>
+ <string key="NSName">_NSRecentDocumentsMenu</string>
+ </object>
+ </object>
+ <object class="NSMenuItem" id="425164168">
+ <reference key="NSMenu" ref="720053764"/>
+ <bool key="NSIsDisabled">YES</bool>
+ <bool key="NSIsSeparator">YES</bool>
+ <string key="NSTitle"/>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="776162233">
+ <reference key="NSMenu" ref="720053764"/>
+ <string key="NSTitle">Close</string>
+ <string key="NSKeyEquiv">w</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="1023925487">
+ <reference key="NSMenu" ref="720053764"/>
+ <string key="NSTitle">Save</string>
+ <string key="NSKeyEquiv">s</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="117038363">
+ <reference key="NSMenu" ref="720053764"/>
+ <string key="NSTitle">Save As…</string>
+ <string key="NSKeyEquiv">S</string>
+ <int key="NSKeyEquivModMask">1179648</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="579971712">
+ <reference key="NSMenu" ref="720053764"/>
+ <string key="NSTitle">Revert to Saved</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="1010469920">
+ <reference key="NSMenu" ref="720053764"/>
+ <bool key="NSIsDisabled">YES</bool>
+ <bool key="NSIsSeparator">YES</bool>
+ <string key="NSTitle"/>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="294629803">
+ <reference key="NSMenu" ref="720053764"/>
+ <string key="NSTitle">Page Setup...</string>
+ <string key="NSKeyEquiv">P</string>
+ <int key="NSKeyEquivModMask">1179648</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <string key="NSToolTip"/>
+ </object>
+ <object class="NSMenuItem" id="49223823">
+ <reference key="NSMenu" ref="720053764"/>
+ <string key="NSTitle">Print…</string>
+ <string key="NSKeyEquiv">p</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ </object>
+ </object>
+ </object>
+ <object class="NSMenuItem" id="952259628">
+ <reference key="NSMenu" ref="649796088"/>
+ <string key="NSTitle">Edit</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <string key="NSAction">submenuAction:</string>
+ <object class="NSMenu" key="NSSubmenu" id="789758025">
+ <string key="NSTitle">Edit</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSMenuItem" id="1058277027">
+ <reference key="NSMenu" ref="789758025"/>
+ <string key="NSTitle">Undo</string>
+ <string key="NSKeyEquiv">z</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="790794224">
+ <reference key="NSMenu" ref="789758025"/>
+ <string key="NSTitle">Redo</string>
+ <string key="NSKeyEquiv">Z</string>
+ <int key="NSKeyEquivModMask">1179648</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="1040322652">
+ <reference key="NSMenu" ref="789758025"/>
+ <bool key="NSIsDisabled">YES</bool>
+ <bool key="NSIsSeparator">YES</bool>
+ <string key="NSTitle"/>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="296257095">
+ <reference key="NSMenu" ref="789758025"/>
+ <string key="NSTitle">Cut</string>
+ <string key="NSKeyEquiv">x</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="860595796">
+ <reference key="NSMenu" ref="789758025"/>
+ <string key="NSTitle">Copy</string>
+ <string key="NSKeyEquiv">c</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="29853731">
+ <reference key="NSMenu" ref="789758025"/>
+ <string key="NSTitle">Paste</string>
+ <string key="NSKeyEquiv">v</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="82994268">
+ <reference key="NSMenu" ref="789758025"/>
+ <string key="NSTitle">Paste and Match Style</string>
+ <string key="NSKeyEquiv">V</string>
+ <int key="NSKeyEquivModMask">1572864</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="437104165">
+ <reference key="NSMenu" ref="789758025"/>
+ <string key="NSTitle">Delete</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="583158037">
+ <reference key="NSMenu" ref="789758025"/>
+ <string key="NSTitle">Select All</string>
+ <string key="NSKeyEquiv">a</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="212016141">
+ <reference key="NSMenu" ref="789758025"/>
+ <bool key="NSIsDisabled">YES</bool>
+ <bool key="NSIsSeparator">YES</bool>
+ <string key="NSTitle"/>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="892235320">
+ <reference key="NSMenu" ref="789758025"/>
+ <string key="NSTitle">Find</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <string key="NSAction">submenuAction:</string>
+ <object class="NSMenu" key="NSSubmenu" id="963351320">
+ <string key="NSTitle">Find</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSMenuItem" id="447796847">
+ <reference key="NSMenu" ref="963351320"/>
+ <string key="NSTitle">Find…</string>
+ <string key="NSKeyEquiv">f</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <int key="NSTag">1</int>
+ </object>
+ <object class="NSMenuItem" id="326711663">
+ <reference key="NSMenu" ref="963351320"/>
+ <string key="NSTitle">Find Next</string>
+ <string key="NSKeyEquiv">g</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <int key="NSTag">2</int>
+ </object>
+ <object class="NSMenuItem" id="270902937">
+ <reference key="NSMenu" ref="963351320"/>
+ <string key="NSTitle">Find Previous</string>
+ <string key="NSKeyEquiv">G</string>
+ <int key="NSKeyEquivModMask">1179648</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <int key="NSTag">3</int>
+ </object>
+ <object class="NSMenuItem" id="159080638">
+ <reference key="NSMenu" ref="963351320"/>
+ <string key="NSTitle">Use Selection for Find</string>
+ <string key="NSKeyEquiv">e</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <int key="NSTag">7</int>
+ </object>
+ <object class="NSMenuItem" id="88285865">
+ <reference key="NSMenu" ref="963351320"/>
+ <string key="NSTitle">Jump to Selection</string>
+ <string key="NSKeyEquiv">j</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ </object>
+ </object>
+ </object>
+ <object class="NSMenuItem" id="972420730">
+ <reference key="NSMenu" ref="789758025"/>
+ <string key="NSTitle">Spelling and Grammar</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <string key="NSAction">submenuAction:</string>
+ <object class="NSMenu" key="NSSubmenu" id="769623530">
+ <string key="NSTitle">Spelling and Grammar</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSMenuItem" id="679648819">
+ <reference key="NSMenu" ref="769623530"/>
+ <string key="NSTitle">Show Spelling and Grammar</string>
+ <string key="NSKeyEquiv">:</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="96193923">
+ <reference key="NSMenu" ref="769623530"/>
+ <string key="NSTitle">Check Document Now</string>
+ <string key="NSKeyEquiv">;</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="859480356">
+ <reference key="NSMenu" ref="769623530"/>
+ <bool key="NSIsDisabled">YES</bool>
+ <bool key="NSIsSeparator">YES</bool>
+ <string key="NSTitle"/>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="948374510">
+ <reference key="NSMenu" ref="769623530"/>
+ <string key="NSTitle">Check Spelling While Typing</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="967646866">
+ <reference key="NSMenu" ref="769623530"/>
+ <string key="NSTitle">Check Grammar With Spelling</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="795346622">
+ <reference key="NSMenu" ref="769623530"/>
+ <string key="NSTitle">Correct Spelling Automatically</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ </object>
+ </object>
+ </object>
+ <object class="NSMenuItem" id="507821607">
+ <reference key="NSMenu" ref="789758025"/>
+ <string key="NSTitle">Substitutions</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <string key="NSAction">submenuAction:</string>
+ <object class="NSMenu" key="NSSubmenu" id="698887838">
+ <string key="NSTitle">Substitutions</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSMenuItem" id="65139061">
+ <reference key="NSMenu" ref="698887838"/>
+ <string key="NSTitle">Show Substitutions</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="19036812">
+ <reference key="NSMenu" ref="698887838"/>
+ <bool key="NSIsDisabled">YES</bool>
+ <bool key="NSIsSeparator">YES</bool>
+ <string key="NSTitle"/>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="605118523">
+ <reference key="NSMenu" ref="698887838"/>
+ <string key="NSTitle">Smart Copy/Paste</string>
+ <string key="NSKeyEquiv">f</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <int key="NSTag">1</int>
+ </object>
+ <object class="NSMenuItem" id="197661976">
+ <reference key="NSMenu" ref="698887838"/>
+ <string key="NSTitle">Smart Quotes</string>
+ <string key="NSKeyEquiv">g</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <int key="NSTag">2</int>
+ </object>
+ <object class="NSMenuItem" id="672708820">
+ <reference key="NSMenu" ref="698887838"/>
+ <string key="NSTitle">Smart Dashes</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="708854459">
+ <reference key="NSMenu" ref="698887838"/>
+ <string key="NSTitle">Smart Links</string>
+ <string key="NSKeyEquiv">G</string>
+ <int key="NSKeyEquivModMask">1179648</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <int key="NSTag">3</int>
+ </object>
+ <object class="NSMenuItem" id="537092702">
+ <reference key="NSMenu" ref="698887838"/>
+ <string key="NSTitle">Text Replacement</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ </object>
+ </object>
+ </object>
+ <object class="NSMenuItem" id="288088188">
+ <reference key="NSMenu" ref="789758025"/>
+ <string key="NSTitle">Transformations</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <string key="NSAction">submenuAction:</string>
+ <object class="NSMenu" key="NSSubmenu" id="579392910">
+ <string key="NSTitle">Transformations</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSMenuItem" id="1060694897">
+ <reference key="NSMenu" ref="579392910"/>
+ <string key="NSTitle">Make Upper Case</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="879586729">
+ <reference key="NSMenu" ref="579392910"/>
+ <string key="NSTitle">Make Lower Case</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="56570060">
+ <reference key="NSMenu" ref="579392910"/>
+ <string key="NSTitle">Capitalize</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ </object>
+ </object>
+ </object>
+ <object class="NSMenuItem" id="676164635">
+ <reference key="NSMenu" ref="789758025"/>
+ <string key="NSTitle">Speech</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <string key="NSAction">submenuAction:</string>
+ <object class="NSMenu" key="NSSubmenu" id="785027613">
+ <string key="NSTitle">Speech</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSMenuItem" id="731782645">
+ <reference key="NSMenu" ref="785027613"/>
+ <string key="NSTitle">Start Speaking</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="680220178">
+ <reference key="NSMenu" ref="785027613"/>
+ <string key="NSTitle">Stop Speaking</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ </object>
+ </object>
+ </object>
+ </object>
+ </object>
+ </object>
+ <object class="NSMenuItem" id="302598603">
+ <reference key="NSMenu" ref="649796088"/>
+ <string key="NSTitle">Format</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <string key="NSAction">submenuAction:</string>
+ <object class="NSMenu" key="NSSubmenu" id="941447902">
+ <string key="NSTitle">Format</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSMenuItem" id="792887677">
+ <reference key="NSMenu" ref="941447902"/>
+ <string key="NSTitle">Font</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <string key="NSAction">submenuAction:</string>
+ <object class="NSMenu" key="NSSubmenu" id="786677654">
+ <string key="NSTitle">Font</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSMenuItem" id="159677712">
+ <reference key="NSMenu" ref="786677654"/>
+ <string key="NSTitle">Show Fonts</string>
+ <string key="NSKeyEquiv">t</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="305399458">
+ <reference key="NSMenu" ref="786677654"/>
+ <string key="NSTitle">Bold</string>
+ <string key="NSKeyEquiv">b</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <int key="NSTag">2</int>
+ </object>
+ <object class="NSMenuItem" id="814362025">
+ <reference key="NSMenu" ref="786677654"/>
+ <string key="NSTitle">Italic</string>
+ <string key="NSKeyEquiv">i</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <int key="NSTag">1</int>
+ </object>
+ <object class="NSMenuItem" id="330926929">
+ <reference key="NSMenu" ref="786677654"/>
+ <string key="NSTitle">Underline</string>
+ <string key="NSKeyEquiv">u</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="533507878">
+ <reference key="NSMenu" ref="786677654"/>
+ <bool key="NSIsDisabled">YES</bool>
+ <bool key="NSIsSeparator">YES</bool>
+ <string key="NSTitle"/>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="158063935">
+ <reference key="NSMenu" ref="786677654"/>
+ <string key="NSTitle">Bigger</string>
+ <string key="NSKeyEquiv">+</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <int key="NSTag">3</int>
+ </object>
+ <object class="NSMenuItem" id="885547335">
+ <reference key="NSMenu" ref="786677654"/>
+ <string key="NSTitle">Smaller</string>
+ <string key="NSKeyEquiv">-</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <int key="NSTag">4</int>
+ </object>
+ <object class="NSMenuItem" id="901062459">
+ <reference key="NSMenu" ref="786677654"/>
+ <bool key="NSIsDisabled">YES</bool>
+ <bool key="NSIsSeparator">YES</bool>
+ <string key="NSTitle"/>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="767671776">
+ <reference key="NSMenu" ref="786677654"/>
+ <string key="NSTitle">Kern</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <string key="NSAction">submenuAction:</string>
+ <object class="NSMenu" key="NSSubmenu" id="175441468">
+ <string key="NSTitle">Kern</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSMenuItem" id="252969304">
+ <reference key="NSMenu" ref="175441468"/>
+ <string key="NSTitle">Use Default</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="766922938">
+ <reference key="NSMenu" ref="175441468"/>
+ <string key="NSTitle">Use None</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="677519740">
+ <reference key="NSMenu" ref="175441468"/>
+ <string key="NSTitle">Tighten</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="238351151">
+ <reference key="NSMenu" ref="175441468"/>
+ <string key="NSTitle">Loosen</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ </object>
+ </object>
+ </object>
+ <object class="NSMenuItem" id="691570813">
+ <reference key="NSMenu" ref="786677654"/>
+ <string key="NSTitle">Ligature</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <string key="NSAction">submenuAction:</string>
+ <object class="NSMenu" key="NSSubmenu" id="1058217995">
+ <string key="NSTitle">Ligature</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSMenuItem" id="706297211">
+ <reference key="NSMenu" ref="1058217995"/>
+ <string key="NSTitle">Use Default</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="568384683">
+ <reference key="NSMenu" ref="1058217995"/>
+ <string key="NSTitle">Use None</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="663508465">
+ <reference key="NSMenu" ref="1058217995"/>
+ <string key="NSTitle">Use All</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ </object>
+ </object>
+ </object>
+ <object class="NSMenuItem" id="769124883">
+ <reference key="NSMenu" ref="786677654"/>
+ <string key="NSTitle">Baseline</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <string key="NSAction">submenuAction:</string>
+ <object class="NSMenu" key="NSSubmenu" id="18263474">
+ <string key="NSTitle">Baseline</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSMenuItem" id="257962622">
+ <reference key="NSMenu" ref="18263474"/>
+ <string key="NSTitle">Use Default</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="644725453">
+ <reference key="NSMenu" ref="18263474"/>
+ <string key="NSTitle">Superscript</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="1037576581">
+ <reference key="NSMenu" ref="18263474"/>
+ <string key="NSTitle">Subscript</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="941806246">
+ <reference key="NSMenu" ref="18263474"/>
+ <string key="NSTitle">Raise</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="1045724900">
+ <reference key="NSMenu" ref="18263474"/>
+ <string key="NSTitle">Lower</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ </object>
+ </object>
+ </object>
+ <object class="NSMenuItem" id="739652853">
+ <reference key="NSMenu" ref="786677654"/>
+ <bool key="NSIsDisabled">YES</bool>
+ <bool key="NSIsSeparator">YES</bool>
+ <string key="NSTitle"/>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="1012600125">
+ <reference key="NSMenu" ref="786677654"/>
+ <string key="NSTitle">Show Colors</string>
+ <string key="NSKeyEquiv">C</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="214559597">
+ <reference key="NSMenu" ref="786677654"/>
+ <bool key="NSIsDisabled">YES</bool>
+ <bool key="NSIsSeparator">YES</bool>
+ <string key="NSTitle"/>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="596732606">
+ <reference key="NSMenu" ref="786677654"/>
+ <string key="NSTitle">Copy Style</string>
+ <string key="NSKeyEquiv">c</string>
+ <int key="NSKeyEquivModMask">1572864</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="393423671">
+ <reference key="NSMenu" ref="786677654"/>
+ <string key="NSTitle">Paste Style</string>
+ <string key="NSKeyEquiv">v</string>
+ <int key="NSKeyEquivModMask">1572864</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ </object>
+ <string key="NSName">_NSFontMenu</string>
+ </object>
+ </object>
+ <object class="NSMenuItem" id="215659978">
+ <reference key="NSMenu" ref="941447902"/>
+ <string key="NSTitle">Text</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <string key="NSAction">submenuAction:</string>
+ <object class="NSMenu" key="NSSubmenu" id="446991534">
+ <string key="NSTitle">Text</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSMenuItem" id="875092757">
+ <reference key="NSMenu" ref="446991534"/>
+ <string key="NSTitle">Align Left</string>
+ <string key="NSKeyEquiv">{</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="630155264">
+ <reference key="NSMenu" ref="446991534"/>
+ <string key="NSTitle">Center</string>
+ <string key="NSKeyEquiv">|</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="945678886">
+ <reference key="NSMenu" ref="446991534"/>
+ <string key="NSTitle">Justify</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="512868991">
+ <reference key="NSMenu" ref="446991534"/>
+ <string key="NSTitle">Align Right</string>
+ <string key="NSKeyEquiv">}</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="163117631">
+ <reference key="NSMenu" ref="446991534"/>
+ <bool key="NSIsDisabled">YES</bool>
+ <bool key="NSIsSeparator">YES</bool>
+ <string key="NSTitle"/>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="31516759">
+ <reference key="NSMenu" ref="446991534"/>
+ <string key="NSTitle">Writing Direction</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <string key="NSAction">submenuAction:</string>
+ <object class="NSMenu" key="NSSubmenu" id="956096989">
+ <string key="NSTitle">Writing Direction</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSMenuItem" id="257099033">
+ <reference key="NSMenu" ref="956096989"/>
+ <bool key="NSIsDisabled">YES</bool>
+ <string key="NSTitle">Paragraph</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="551969625">
+ <reference key="NSMenu" ref="956096989"/>
+ <string type="base64-UTF8" key="NSTitle">CURlZmF1bHQ</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="249532473">
+ <reference key="NSMenu" ref="956096989"/>
+ <string type="base64-UTF8" key="NSTitle">CUxlZnQgdG8gUmlnaHQ</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="607364498">
+ <reference key="NSMenu" ref="956096989"/>
+ <string type="base64-UTF8" key="NSTitle">CVJpZ2h0IHRvIExlZnQ</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="508151438">
+ <reference key="NSMenu" ref="956096989"/>
+ <bool key="NSIsDisabled">YES</bool>
+ <bool key="NSIsSeparator">YES</bool>
+ <string key="NSTitle"/>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="981751889">
+ <reference key="NSMenu" ref="956096989"/>
+ <bool key="NSIsDisabled">YES</bool>
+ <string key="NSTitle">Selection</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="380031999">
+ <reference key="NSMenu" ref="956096989"/>
+ <string type="base64-UTF8" key="NSTitle">CURlZmF1bHQ</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="825984362">
+ <reference key="NSMenu" ref="956096989"/>
+ <string type="base64-UTF8" key="NSTitle">CUxlZnQgdG8gUmlnaHQ</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="560145579">
+ <reference key="NSMenu" ref="956096989"/>
+ <string type="base64-UTF8" key="NSTitle">CVJpZ2h0IHRvIExlZnQ</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ </object>
+ </object>
+ </object>
+ <object class="NSMenuItem" id="908105787">
+ <reference key="NSMenu" ref="446991534"/>
+ <bool key="NSIsDisabled">YES</bool>
+ <bool key="NSIsSeparator">YES</bool>
+ <string key="NSTitle"/>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="644046920">
+ <reference key="NSMenu" ref="446991534"/>
+ <string key="NSTitle">Show Ruler</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="231811626">
+ <reference key="NSMenu" ref="446991534"/>
+ <string key="NSTitle">Copy Ruler</string>
+ <string key="NSKeyEquiv">c</string>
+ <int key="NSKeyEquivModMask">1310720</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="883618387">
+ <reference key="NSMenu" ref="446991534"/>
+ <string key="NSTitle">Paste Ruler</string>
+ <string key="NSKeyEquiv">v</string>
+ <int key="NSKeyEquivModMask">1310720</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ </object>
+ </object>
+ </object>
+ </object>
+ </object>
+ </object>
+ <object class="NSMenuItem" id="586577488">
+ <reference key="NSMenu" ref="649796088"/>
+ <string key="NSTitle">View</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <string key="NSAction">submenuAction:</string>
+ <object class="NSMenu" key="NSSubmenu" id="466310130">
+ <string key="NSTitle">View</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSMenuItem" id="102151532">
+ <reference key="NSMenu" ref="466310130"/>
+ <string key="NSTitle">Show Toolbar</string>
+ <string key="NSKeyEquiv">t</string>
+ <int key="NSKeyEquivModMask">1572864</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="237841660">
+ <reference key="NSMenu" ref="466310130"/>
+ <string key="NSTitle">Customize Toolbar…</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ </object>
+ </object>
+ </object>
+ <object class="NSMenuItem" id="713487014">
+ <reference key="NSMenu" ref="649796088"/>
+ <string key="NSTitle">Window</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <string key="NSAction">submenuAction:</string>
+ <object class="NSMenu" key="NSSubmenu" id="835318025">
+ <string key="NSTitle">Window</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSMenuItem" id="1011231497">
+ <reference key="NSMenu" ref="835318025"/>
+ <string key="NSTitle">Minimize</string>
+ <string key="NSKeyEquiv">m</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="575023229">
+ <reference key="NSMenu" ref="835318025"/>
+ <string key="NSTitle">Zoom</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="299356726">
+ <reference key="NSMenu" ref="835318025"/>
+ <bool key="NSIsDisabled">YES</bool>
+ <bool key="NSIsSeparator">YES</bool>
+ <string key="NSTitle"/>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ <object class="NSMenuItem" id="625202149">
+ <reference key="NSMenu" ref="835318025"/>
+ <string key="NSTitle">Bring All to Front</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ </object>
+ <string key="NSName">_NSWindowsMenu</string>
+ </object>
+ </object>
+ <object class="NSMenuItem" id="448692316">
+ <reference key="NSMenu" ref="649796088"/>
+ <string key="NSTitle">Help</string>
+ <string key="NSKeyEquiv"/>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ <string key="NSAction">submenuAction:</string>
+ <object class="NSMenu" key="NSSubmenu" id="992780483">
+ <string key="NSTitle">Help</string>
+ <object class="NSMutableArray" key="NSMenuItems">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSMenuItem" id="105068016">
+ <reference key="NSMenu" ref="992780483"/>
+ <string key="NSTitle">TestApp Help</string>
+ <string key="NSKeyEquiv">?</string>
+ <int key="NSKeyEquivModMask">1048576</int>
+ <int key="NSMnemonicLoc">2147483647</int>
+ <reference key="NSOnImage" ref="35465992"/>
+ <reference key="NSMixedImage" ref="502551668"/>
+ </object>
+ </object>
+ <string key="NSName">_NSHelpMenu</string>
+ </object>
+ </object>
+ </object>
+ <string key="NSName">_NSMainMenu</string>
+ </object>
+ <object class="NSWindowTemplate" id="972006081">
+ <int key="NSWindowStyleMask">15</int>
+ <int key="NSWindowBacking">2</int>
+ <string key="NSWindowRect">{{335, 390}, {480, 360}}</string>
+ <int key="NSWTFlags">1954021376</int>
+ <string key="NSWindowTitle">TestApp</string>
+ <string key="NSWindowClass">NSWindow</string>
+ <nil key="NSViewClass"/>
+ <string key="NSWindowContentMaxSize">{1.79769e+308, 1.79769e+308}</string>
+ <object class="NSView" key="NSWindowView" id="439893737">
+ <reference key="NSNextResponder"/>
+ <int key="NSvFlags">256</int>
+ <string key="NSFrameSize">{480, 360}</string>
+ <reference key="NSSuperview"/>
+ </object>
+ <string key="NSScreenRect">{{0, 0}, {1920, 1178}}</string>
+ <string key="NSMaxSize">{1.79769e+308, 1.79769e+308}</string>
+ </object>
+ <object class="NSCustomObject" id="976324537">
+ <string key="NSClassName">TestAppAppDelegate</string>
+ </object>
+ <object class="NSCustomObject" id="755631768">
+ <string key="NSClassName">NSFontManager</string>
+ </object>
+ </object>
+ <object class="IBObjectContainer" key="IBDocument.Objects">
+ <object class="NSMutableArray" key="connectionRecords">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">performMiniaturize:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="1011231497"/>
+ </object>
+ <int key="connectionID">37</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">arrangeInFront:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="625202149"/>
+ </object>
+ <int key="connectionID">39</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">print:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="49223823"/>
+ </object>
+ <int key="connectionID">86</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">runPageLayout:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="294629803"/>
+ </object>
+ <int key="connectionID">87</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">clearRecentDocuments:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="759406840"/>
+ </object>
+ <int key="connectionID">127</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">orderFrontStandardAboutPanel:</string>
+ <reference key="source" ref="1021"/>
+ <reference key="destination" ref="238522557"/>
+ </object>
+ <int key="connectionID">142</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">performClose:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="776162233"/>
+ </object>
+ <int key="connectionID">193</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">toggleContinuousSpellChecking:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="948374510"/>
+ </object>
+ <int key="connectionID">222</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">undo:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="1058277027"/>
+ </object>
+ <int key="connectionID">223</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">copy:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="860595796"/>
+ </object>
+ <int key="connectionID">224</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">checkSpelling:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="96193923"/>
+ </object>
+ <int key="connectionID">225</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">paste:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="29853731"/>
+ </object>
+ <int key="connectionID">226</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">stopSpeaking:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="680220178"/>
+ </object>
+ <int key="connectionID">227</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">cut:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="296257095"/>
+ </object>
+ <int key="connectionID">228</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">showGuessPanel:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="679648819"/>
+ </object>
+ <int key="connectionID">230</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">redo:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="790794224"/>
+ </object>
+ <int key="connectionID">231</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">selectAll:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="583158037"/>
+ </object>
+ <int key="connectionID">232</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">startSpeaking:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="731782645"/>
+ </object>
+ <int key="connectionID">233</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">delete:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="437104165"/>
+ </object>
+ <int key="connectionID">235</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">performZoom:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="575023229"/>
+ </object>
+ <int key="connectionID">240</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">performFindPanelAction:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="447796847"/>
+ </object>
+ <int key="connectionID">241</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">centerSelectionInVisibleArea:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="88285865"/>
+ </object>
+ <int key="connectionID">245</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">toggleGrammarChecking:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="967646866"/>
+ </object>
+ <int key="connectionID">347</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">toggleSmartInsertDelete:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="605118523"/>
+ </object>
+ <int key="connectionID">355</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">toggleAutomaticQuoteSubstitution:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="197661976"/>
+ </object>
+ <int key="connectionID">356</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">toggleAutomaticLinkDetection:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="708854459"/>
+ </object>
+ <int key="connectionID">357</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">saveDocument:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="1023925487"/>
+ </object>
+ <int key="connectionID">362</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">saveDocumentAs:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="117038363"/>
+ </object>
+ <int key="connectionID">363</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">revertDocumentToSaved:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="579971712"/>
+ </object>
+ <int key="connectionID">364</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">runToolbarCustomizationPalette:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="237841660"/>
+ </object>
+ <int key="connectionID">365</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">toggleToolbarShown:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="102151532"/>
+ </object>
+ <int key="connectionID">366</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">hide:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="755159360"/>
+ </object>
+ <int key="connectionID">367</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">hideOtherApplications:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="342932134"/>
+ </object>
+ <int key="connectionID">368</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">unhideAllApplications:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="908899353"/>
+ </object>
+ <int key="connectionID">370</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">newDocument:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="705341025"/>
+ </object>
+ <int key="connectionID">373</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">openDocument:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="722745758"/>
+ </object>
+ <int key="connectionID">374</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">addFontTrait:</string>
+ <reference key="source" ref="755631768"/>
+ <reference key="destination" ref="305399458"/>
+ </object>
+ <int key="connectionID">421</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">addFontTrait:</string>
+ <reference key="source" ref="755631768"/>
+ <reference key="destination" ref="814362025"/>
+ </object>
+ <int key="connectionID">422</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">modifyFont:</string>
+ <reference key="source" ref="755631768"/>
+ <reference key="destination" ref="885547335"/>
+ </object>
+ <int key="connectionID">423</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">orderFrontFontPanel:</string>
+ <reference key="source" ref="755631768"/>
+ <reference key="destination" ref="159677712"/>
+ </object>
+ <int key="connectionID">424</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">modifyFont:</string>
+ <reference key="source" ref="755631768"/>
+ <reference key="destination" ref="158063935"/>
+ </object>
+ <int key="connectionID">425</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">raiseBaseline:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="941806246"/>
+ </object>
+ <int key="connectionID">426</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">lowerBaseline:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="1045724900"/>
+ </object>
+ <int key="connectionID">427</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">copyFont:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="596732606"/>
+ </object>
+ <int key="connectionID">428</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">subscript:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="1037576581"/>
+ </object>
+ <int key="connectionID">429</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">superscript:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="644725453"/>
+ </object>
+ <int key="connectionID">430</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">tightenKerning:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="677519740"/>
+ </object>
+ <int key="connectionID">431</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">underline:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="330926929"/>
+ </object>
+ <int key="connectionID">432</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">orderFrontColorPanel:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="1012600125"/>
+ </object>
+ <int key="connectionID">433</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">useAllLigatures:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="663508465"/>
+ </object>
+ <int key="connectionID">434</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">loosenKerning:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="238351151"/>
+ </object>
+ <int key="connectionID">435</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">pasteFont:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="393423671"/>
+ </object>
+ <int key="connectionID">436</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">unscript:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="257962622"/>
+ </object>
+ <int key="connectionID">437</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">useStandardKerning:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="252969304"/>
+ </object>
+ <int key="connectionID">438</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">useStandardLigatures:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="706297211"/>
+ </object>
+ <int key="connectionID">439</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">turnOffLigatures:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="568384683"/>
+ </object>
+ <int key="connectionID">440</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">turnOffKerning:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="766922938"/>
+ </object>
+ <int key="connectionID">441</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">terminate:</string>
+ <reference key="source" ref="1050"/>
+ <reference key="destination" ref="632727374"/>
+ </object>
+ <int key="connectionID">449</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">toggleAutomaticSpellingCorrection:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="795346622"/>
+ </object>
+ <int key="connectionID">456</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">orderFrontSubstitutionsPanel:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="65139061"/>
+ </object>
+ <int key="connectionID">458</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">toggleAutomaticDashSubstitution:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="672708820"/>
+ </object>
+ <int key="connectionID">461</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">toggleAutomaticTextReplacement:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="537092702"/>
+ </object>
+ <int key="connectionID">463</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">uppercaseWord:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="1060694897"/>
+ </object>
+ <int key="connectionID">464</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">capitalizeWord:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="56570060"/>
+ </object>
+ <int key="connectionID">467</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">lowercaseWord:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="879586729"/>
+ </object>
+ <int key="connectionID">468</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">pasteAsPlainText:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="82994268"/>
+ </object>
+ <int key="connectionID">486</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">performFindPanelAction:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="326711663"/>
+ </object>
+ <int key="connectionID">487</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">performFindPanelAction:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="270902937"/>
+ </object>
+ <int key="connectionID">488</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">performFindPanelAction:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="159080638"/>
+ </object>
+ <int key="connectionID">489</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">showHelp:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="105068016"/>
+ </object>
+ <int key="connectionID">493</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBOutletConnection" key="connection">
+ <string key="label">delegate</string>
+ <reference key="source" ref="1021"/>
+ <reference key="destination" ref="976324537"/>
+ </object>
+ <int key="connectionID">495</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">alignCenter:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="630155264"/>
+ </object>
+ <int key="connectionID">518</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">pasteRuler:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="883618387"/>
+ </object>
+ <int key="connectionID">519</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">toggleRuler:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="644046920"/>
+ </object>
+ <int key="connectionID">520</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">alignRight:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="512868991"/>
+ </object>
+ <int key="connectionID">521</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">copyRuler:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="231811626"/>
+ </object>
+ <int key="connectionID">522</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">alignJustified:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="945678886"/>
+ </object>
+ <int key="connectionID">523</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">alignLeft:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="875092757"/>
+ </object>
+ <int key="connectionID">524</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">makeBaseWritingDirectionNatural:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="551969625"/>
+ </object>
+ <int key="connectionID">525</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">makeBaseWritingDirectionLeftToRight:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="249532473"/>
+ </object>
+ <int key="connectionID">526</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">makeBaseWritingDirectionRightToLeft:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="607364498"/>
+ </object>
+ <int key="connectionID">527</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">makeTextWritingDirectionNatural:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="380031999"/>
+ </object>
+ <int key="connectionID">528</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">makeTextWritingDirectionLeftToRight:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="825984362"/>
+ </object>
+ <int key="connectionID">529</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBActionConnection" key="connection">
+ <string key="label">makeTextWritingDirectionRightToLeft:</string>
+ <reference key="source" ref="1014"/>
+ <reference key="destination" ref="560145579"/>
+ </object>
+ <int key="connectionID">530</int>
+ </object>
+ <object class="IBConnectionRecord">
+ <object class="IBOutletConnection" key="connection">
+ <string key="label">window</string>
+ <reference key="source" ref="976324537"/>
+ <reference key="destination" ref="972006081"/>
+ </object>
+ <int key="connectionID">532</int>
+ </object>
+ </object>
+ <object class="IBMutableOrderedSet" key="objectRecords">
+ <object class="NSArray" key="orderedObjects">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="IBObjectRecord">
+ <int key="objectID">0</int>
+ <reference key="object" ref="0"/>
+ <reference key="children" ref="1048"/>
+ <nil key="parent"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">-2</int>
+ <reference key="object" ref="1021"/>
+ <reference key="parent" ref="0"/>
+ <string key="objectName">File's Owner</string>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">-1</int>
+ <reference key="object" ref="1014"/>
+ <reference key="parent" ref="0"/>
+ <string key="objectName">First Responder</string>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">-3</int>
+ <reference key="object" ref="1050"/>
+ <reference key="parent" ref="0"/>
+ <string key="objectName">Application</string>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">29</int>
+ <reference key="object" ref="649796088"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="713487014"/>
+ <reference ref="694149608"/>
+ <reference ref="952259628"/>
+ <reference ref="379814623"/>
+ <reference ref="586577488"/>
+ <reference ref="302598603"/>
+ <reference ref="448692316"/>
+ </object>
+ <reference key="parent" ref="0"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">19</int>
+ <reference key="object" ref="713487014"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="835318025"/>
+ </object>
+ <reference key="parent" ref="649796088"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">56</int>
+ <reference key="object" ref="694149608"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="110575045"/>
+ </object>
+ <reference key="parent" ref="649796088"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">217</int>
+ <reference key="object" ref="952259628"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="789758025"/>
+ </object>
+ <reference key="parent" ref="649796088"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">83</int>
+ <reference key="object" ref="379814623"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="720053764"/>
+ </object>
+ <reference key="parent" ref="649796088"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">81</int>
+ <reference key="object" ref="720053764"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="1023925487"/>
+ <reference ref="117038363"/>
+ <reference ref="49223823"/>
+ <reference ref="722745758"/>
+ <reference ref="705341025"/>
+ <reference ref="1025936716"/>
+ <reference ref="294629803"/>
+ <reference ref="776162233"/>
+ <reference ref="425164168"/>
+ <reference ref="579971712"/>
+ <reference ref="1010469920"/>
+ </object>
+ <reference key="parent" ref="379814623"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">75</int>
+ <reference key="object" ref="1023925487"/>
+ <reference key="parent" ref="720053764"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">80</int>
+ <reference key="object" ref="117038363"/>
+ <reference key="parent" ref="720053764"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">78</int>
+ <reference key="object" ref="49223823"/>
+ <reference key="parent" ref="720053764"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">72</int>
+ <reference key="object" ref="722745758"/>
+ <reference key="parent" ref="720053764"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">82</int>
+ <reference key="object" ref="705341025"/>
+ <reference key="parent" ref="720053764"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">124</int>
+ <reference key="object" ref="1025936716"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="1065607017"/>
+ </object>
+ <reference key="parent" ref="720053764"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">77</int>
+ <reference key="object" ref="294629803"/>
+ <reference key="parent" ref="720053764"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">73</int>
+ <reference key="object" ref="776162233"/>
+ <reference key="parent" ref="720053764"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">79</int>
+ <reference key="object" ref="425164168"/>
+ <reference key="parent" ref="720053764"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">112</int>
+ <reference key="object" ref="579971712"/>
+ <reference key="parent" ref="720053764"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">74</int>
+ <reference key="object" ref="1010469920"/>
+ <reference key="parent" ref="720053764"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">125</int>
+ <reference key="object" ref="1065607017"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="759406840"/>
+ </object>
+ <reference key="parent" ref="1025936716"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">126</int>
+ <reference key="object" ref="759406840"/>
+ <reference key="parent" ref="1065607017"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">205</int>
+ <reference key="object" ref="789758025"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="437104165"/>
+ <reference ref="583158037"/>
+ <reference ref="1058277027"/>
+ <reference ref="212016141"/>
+ <reference ref="296257095"/>
+ <reference ref="29853731"/>
+ <reference ref="860595796"/>
+ <reference ref="1040322652"/>
+ <reference ref="790794224"/>
+ <reference ref="892235320"/>
+ <reference ref="972420730"/>
+ <reference ref="676164635"/>
+ <reference ref="507821607"/>
+ <reference ref="288088188"/>
+ <reference ref="82994268"/>
+ </object>
+ <reference key="parent" ref="952259628"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">202</int>
+ <reference key="object" ref="437104165"/>
+ <reference key="parent" ref="789758025"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">198</int>
+ <reference key="object" ref="583158037"/>
+ <reference key="parent" ref="789758025"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">207</int>
+ <reference key="object" ref="1058277027"/>
+ <reference key="parent" ref="789758025"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">214</int>
+ <reference key="object" ref="212016141"/>
+ <reference key="parent" ref="789758025"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">199</int>
+ <reference key="object" ref="296257095"/>
+ <reference key="parent" ref="789758025"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">203</int>
+ <reference key="object" ref="29853731"/>
+ <reference key="parent" ref="789758025"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">197</int>
+ <reference key="object" ref="860595796"/>
+ <reference key="parent" ref="789758025"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">206</int>
+ <reference key="object" ref="1040322652"/>
+ <reference key="parent" ref="789758025"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">215</int>
+ <reference key="object" ref="790794224"/>
+ <reference key="parent" ref="789758025"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">218</int>
+ <reference key="object" ref="892235320"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="963351320"/>
+ </object>
+ <reference key="parent" ref="789758025"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">216</int>
+ <reference key="object" ref="972420730"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="769623530"/>
+ </object>
+ <reference key="parent" ref="789758025"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">200</int>
+ <reference key="object" ref="769623530"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="948374510"/>
+ <reference ref="96193923"/>
+ <reference ref="679648819"/>
+ <reference ref="967646866"/>
+ <reference ref="859480356"/>
+ <reference ref="795346622"/>
+ </object>
+ <reference key="parent" ref="972420730"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">219</int>
+ <reference key="object" ref="948374510"/>
+ <reference key="parent" ref="769623530"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">201</int>
+ <reference key="object" ref="96193923"/>
+ <reference key="parent" ref="769623530"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">204</int>
+ <reference key="object" ref="679648819"/>
+ <reference key="parent" ref="769623530"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">220</int>
+ <reference key="object" ref="963351320"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="270902937"/>
+ <reference ref="88285865"/>
+ <reference ref="159080638"/>
+ <reference ref="326711663"/>
+ <reference ref="447796847"/>
+ </object>
+ <reference key="parent" ref="892235320"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">213</int>
+ <reference key="object" ref="270902937"/>
+ <reference key="parent" ref="963351320"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">210</int>
+ <reference key="object" ref="88285865"/>
+ <reference key="parent" ref="963351320"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">221</int>
+ <reference key="object" ref="159080638"/>
+ <reference key="parent" ref="963351320"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">208</int>
+ <reference key="object" ref="326711663"/>
+ <reference key="parent" ref="963351320"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">209</int>
+ <reference key="object" ref="447796847"/>
+ <reference key="parent" ref="963351320"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">57</int>
+ <reference key="object" ref="110575045"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="238522557"/>
+ <reference ref="755159360"/>
+ <reference ref="908899353"/>
+ <reference ref="632727374"/>
+ <reference ref="646227648"/>
+ <reference ref="609285721"/>
+ <reference ref="481834944"/>
+ <reference ref="304266470"/>
+ <reference ref="1046388886"/>
+ <reference ref="1056857174"/>
+ <reference ref="342932134"/>
+ </object>
+ <reference key="parent" ref="694149608"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">58</int>
+ <reference key="object" ref="238522557"/>
+ <reference key="parent" ref="110575045"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">134</int>
+ <reference key="object" ref="755159360"/>
+ <reference key="parent" ref="110575045"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">150</int>
+ <reference key="object" ref="908899353"/>
+ <reference key="parent" ref="110575045"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">136</int>
+ <reference key="object" ref="632727374"/>
+ <reference key="parent" ref="110575045"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">144</int>
+ <reference key="object" ref="646227648"/>
+ <reference key="parent" ref="110575045"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">129</int>
+ <reference key="object" ref="609285721"/>
+ <reference key="parent" ref="110575045"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">143</int>
+ <reference key="object" ref="481834944"/>
+ <reference key="parent" ref="110575045"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">236</int>
+ <reference key="object" ref="304266470"/>
+ <reference key="parent" ref="110575045"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">131</int>
+ <reference key="object" ref="1046388886"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="752062318"/>
+ </object>
+ <reference key="parent" ref="110575045"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">149</int>
+ <reference key="object" ref="1056857174"/>
+ <reference key="parent" ref="110575045"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">145</int>
+ <reference key="object" ref="342932134"/>
+ <reference key="parent" ref="110575045"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">130</int>
+ <reference key="object" ref="752062318"/>
+ <reference key="parent" ref="1046388886"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">24</int>
+ <reference key="object" ref="835318025"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="299356726"/>
+ <reference ref="625202149"/>
+ <reference ref="575023229"/>
+ <reference ref="1011231497"/>
+ </object>
+ <reference key="parent" ref="713487014"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">92</int>
+ <reference key="object" ref="299356726"/>
+ <reference key="parent" ref="835318025"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">5</int>
+ <reference key="object" ref="625202149"/>
+ <reference key="parent" ref="835318025"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">239</int>
+ <reference key="object" ref="575023229"/>
+ <reference key="parent" ref="835318025"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">23</int>
+ <reference key="object" ref="1011231497"/>
+ <reference key="parent" ref="835318025"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">295</int>
+ <reference key="object" ref="586577488"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="466310130"/>
+ </object>
+ <reference key="parent" ref="649796088"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">296</int>
+ <reference key="object" ref="466310130"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="102151532"/>
+ <reference ref="237841660"/>
+ </object>
+ <reference key="parent" ref="586577488"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">297</int>
+ <reference key="object" ref="102151532"/>
+ <reference key="parent" ref="466310130"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">298</int>
+ <reference key="object" ref="237841660"/>
+ <reference key="parent" ref="466310130"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">211</int>
+ <reference key="object" ref="676164635"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="785027613"/>
+ </object>
+ <reference key="parent" ref="789758025"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">212</int>
+ <reference key="object" ref="785027613"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="680220178"/>
+ <reference ref="731782645"/>
+ </object>
+ <reference key="parent" ref="676164635"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">195</int>
+ <reference key="object" ref="680220178"/>
+ <reference key="parent" ref="785027613"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">196</int>
+ <reference key="object" ref="731782645"/>
+ <reference key="parent" ref="785027613"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">346</int>
+ <reference key="object" ref="967646866"/>
+ <reference key="parent" ref="769623530"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">348</int>
+ <reference key="object" ref="507821607"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="698887838"/>
+ </object>
+ <reference key="parent" ref="789758025"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">349</int>
+ <reference key="object" ref="698887838"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="605118523"/>
+ <reference ref="197661976"/>
+ <reference ref="708854459"/>
+ <reference ref="65139061"/>
+ <reference ref="19036812"/>
+ <reference ref="672708820"/>
+ <reference ref="537092702"/>
+ </object>
+ <reference key="parent" ref="507821607"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">350</int>
+ <reference key="object" ref="605118523"/>
+ <reference key="parent" ref="698887838"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">351</int>
+ <reference key="object" ref="197661976"/>
+ <reference key="parent" ref="698887838"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">354</int>
+ <reference key="object" ref="708854459"/>
+ <reference key="parent" ref="698887838"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">371</int>
+ <reference key="object" ref="972006081"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="439893737"/>
+ </object>
+ <reference key="parent" ref="0"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">372</int>
+ <reference key="object" ref="439893737"/>
+ <reference key="parent" ref="972006081"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">375</int>
+ <reference key="object" ref="302598603"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="941447902"/>
+ </object>
+ <reference key="parent" ref="649796088"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">376</int>
+ <reference key="object" ref="941447902"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="792887677"/>
+ <reference ref="215659978"/>
+ </object>
+ <reference key="parent" ref="302598603"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">377</int>
+ <reference key="object" ref="792887677"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="786677654"/>
+ </object>
+ <reference key="parent" ref="941447902"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">388</int>
+ <reference key="object" ref="786677654"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="159677712"/>
+ <reference ref="305399458"/>
+ <reference ref="814362025"/>
+ <reference ref="330926929"/>
+ <reference ref="533507878"/>
+ <reference ref="158063935"/>
+ <reference ref="885547335"/>
+ <reference ref="901062459"/>
+ <reference ref="767671776"/>
+ <reference ref="691570813"/>
+ <reference ref="769124883"/>
+ <reference ref="739652853"/>
+ <reference ref="1012600125"/>
+ <reference ref="214559597"/>
+ <reference ref="596732606"/>
+ <reference ref="393423671"/>
+ </object>
+ <reference key="parent" ref="792887677"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">389</int>
+ <reference key="object" ref="159677712"/>
+ <reference key="parent" ref="786677654"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">390</int>
+ <reference key="object" ref="305399458"/>
+ <reference key="parent" ref="786677654"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">391</int>
+ <reference key="object" ref="814362025"/>
+ <reference key="parent" ref="786677654"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">392</int>
+ <reference key="object" ref="330926929"/>
+ <reference key="parent" ref="786677654"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">393</int>
+ <reference key="object" ref="533507878"/>
+ <reference key="parent" ref="786677654"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">394</int>
+ <reference key="object" ref="158063935"/>
+ <reference key="parent" ref="786677654"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">395</int>
+ <reference key="object" ref="885547335"/>
+ <reference key="parent" ref="786677654"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">396</int>
+ <reference key="object" ref="901062459"/>
+ <reference key="parent" ref="786677654"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">397</int>
+ <reference key="object" ref="767671776"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="175441468"/>
+ </object>
+ <reference key="parent" ref="786677654"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">398</int>
+ <reference key="object" ref="691570813"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="1058217995"/>
+ </object>
+ <reference key="parent" ref="786677654"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">399</int>
+ <reference key="object" ref="769124883"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="18263474"/>
+ </object>
+ <reference key="parent" ref="786677654"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">400</int>
+ <reference key="object" ref="739652853"/>
+ <reference key="parent" ref="786677654"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">401</int>
+ <reference key="object" ref="1012600125"/>
+ <reference key="parent" ref="786677654"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">402</int>
+ <reference key="object" ref="214559597"/>
+ <reference key="parent" ref="786677654"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">403</int>
+ <reference key="object" ref="596732606"/>
+ <reference key="parent" ref="786677654"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">404</int>
+ <reference key="object" ref="393423671"/>
+ <reference key="parent" ref="786677654"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">405</int>
+ <reference key="object" ref="18263474"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="257962622"/>
+ <reference ref="644725453"/>
+ <reference ref="1037576581"/>
+ <reference ref="941806246"/>
+ <reference ref="1045724900"/>
+ </object>
+ <reference key="parent" ref="769124883"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">406</int>
+ <reference key="object" ref="257962622"/>
+ <reference key="parent" ref="18263474"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">407</int>
+ <reference key="object" ref="644725453"/>
+ <reference key="parent" ref="18263474"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">408</int>
+ <reference key="object" ref="1037576581"/>
+ <reference key="parent" ref="18263474"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">409</int>
+ <reference key="object" ref="941806246"/>
+ <reference key="parent" ref="18263474"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">410</int>
+ <reference key="object" ref="1045724900"/>
+ <reference key="parent" ref="18263474"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">411</int>
+ <reference key="object" ref="1058217995"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="706297211"/>
+ <reference ref="568384683"/>
+ <reference ref="663508465"/>
+ </object>
+ <reference key="parent" ref="691570813"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">412</int>
+ <reference key="object" ref="706297211"/>
+ <reference key="parent" ref="1058217995"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">413</int>
+ <reference key="object" ref="568384683"/>
+ <reference key="parent" ref="1058217995"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">414</int>
+ <reference key="object" ref="663508465"/>
+ <reference key="parent" ref="1058217995"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">415</int>
+ <reference key="object" ref="175441468"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="252969304"/>
+ <reference ref="766922938"/>
+ <reference ref="677519740"/>
+ <reference ref="238351151"/>
+ </object>
+ <reference key="parent" ref="767671776"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">416</int>
+ <reference key="object" ref="252969304"/>
+ <reference key="parent" ref="175441468"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">417</int>
+ <reference key="object" ref="766922938"/>
+ <reference key="parent" ref="175441468"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">418</int>
+ <reference key="object" ref="677519740"/>
+ <reference key="parent" ref="175441468"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">419</int>
+ <reference key="object" ref="238351151"/>
+ <reference key="parent" ref="175441468"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">420</int>
+ <reference key="object" ref="755631768"/>
+ <reference key="parent" ref="0"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">450</int>
+ <reference key="object" ref="288088188"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="579392910"/>
+ </object>
+ <reference key="parent" ref="789758025"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">451</int>
+ <reference key="object" ref="579392910"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="1060694897"/>
+ <reference ref="879586729"/>
+ <reference ref="56570060"/>
+ </object>
+ <reference key="parent" ref="288088188"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">452</int>
+ <reference key="object" ref="1060694897"/>
+ <reference key="parent" ref="579392910"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">453</int>
+ <reference key="object" ref="859480356"/>
+ <reference key="parent" ref="769623530"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">454</int>
+ <reference key="object" ref="795346622"/>
+ <reference key="parent" ref="769623530"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">457</int>
+ <reference key="object" ref="65139061"/>
+ <reference key="parent" ref="698887838"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">459</int>
+ <reference key="object" ref="19036812"/>
+ <reference key="parent" ref="698887838"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">460</int>
+ <reference key="object" ref="672708820"/>
+ <reference key="parent" ref="698887838"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">462</int>
+ <reference key="object" ref="537092702"/>
+ <reference key="parent" ref="698887838"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">465</int>
+ <reference key="object" ref="879586729"/>
+ <reference key="parent" ref="579392910"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">466</int>
+ <reference key="object" ref="56570060"/>
+ <reference key="parent" ref="579392910"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">485</int>
+ <reference key="object" ref="82994268"/>
+ <reference key="parent" ref="789758025"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">490</int>
+ <reference key="object" ref="448692316"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="992780483"/>
+ </object>
+ <reference key="parent" ref="649796088"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">491</int>
+ <reference key="object" ref="992780483"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="105068016"/>
+ </object>
+ <reference key="parent" ref="448692316"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">492</int>
+ <reference key="object" ref="105068016"/>
+ <reference key="parent" ref="992780483"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">494</int>
+ <reference key="object" ref="976324537"/>
+ <reference key="parent" ref="0"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">496</int>
+ <reference key="object" ref="215659978"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="446991534"/>
+ </object>
+ <reference key="parent" ref="941447902"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">497</int>
+ <reference key="object" ref="446991534"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="875092757"/>
+ <reference ref="630155264"/>
+ <reference ref="945678886"/>
+ <reference ref="512868991"/>
+ <reference ref="163117631"/>
+ <reference ref="31516759"/>
+ <reference ref="908105787"/>
+ <reference ref="644046920"/>
+ <reference ref="231811626"/>
+ <reference ref="883618387"/>
+ </object>
+ <reference key="parent" ref="215659978"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">498</int>
+ <reference key="object" ref="875092757"/>
+ <reference key="parent" ref="446991534"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">499</int>
+ <reference key="object" ref="630155264"/>
+ <reference key="parent" ref="446991534"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">500</int>
+ <reference key="object" ref="945678886"/>
+ <reference key="parent" ref="446991534"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">501</int>
+ <reference key="object" ref="512868991"/>
+ <reference key="parent" ref="446991534"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">502</int>
+ <reference key="object" ref="163117631"/>
+ <reference key="parent" ref="446991534"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">503</int>
+ <reference key="object" ref="31516759"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="956096989"/>
+ </object>
+ <reference key="parent" ref="446991534"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">504</int>
+ <reference key="object" ref="908105787"/>
+ <reference key="parent" ref="446991534"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">505</int>
+ <reference key="object" ref="644046920"/>
+ <reference key="parent" ref="446991534"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">506</int>
+ <reference key="object" ref="231811626"/>
+ <reference key="parent" ref="446991534"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">507</int>
+ <reference key="object" ref="883618387"/>
+ <reference key="parent" ref="446991534"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">508</int>
+ <reference key="object" ref="956096989"/>
+ <object class="NSMutableArray" key="children">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference ref="257099033"/>
+ <reference ref="551969625"/>
+ <reference ref="249532473"/>
+ <reference ref="607364498"/>
+ <reference ref="508151438"/>
+ <reference ref="981751889"/>
+ <reference ref="380031999"/>
+ <reference ref="825984362"/>
+ <reference ref="560145579"/>
+ </object>
+ <reference key="parent" ref="31516759"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">509</int>
+ <reference key="object" ref="257099033"/>
+ <reference key="parent" ref="956096989"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">510</int>
+ <reference key="object" ref="551969625"/>
+ <reference key="parent" ref="956096989"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">511</int>
+ <reference key="object" ref="249532473"/>
+ <reference key="parent" ref="956096989"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">512</int>
+ <reference key="object" ref="607364498"/>
+ <reference key="parent" ref="956096989"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">513</int>
+ <reference key="object" ref="508151438"/>
+ <reference key="parent" ref="956096989"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">514</int>
+ <reference key="object" ref="981751889"/>
+ <reference key="parent" ref="956096989"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">515</int>
+ <reference key="object" ref="380031999"/>
+ <reference key="parent" ref="956096989"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">516</int>
+ <reference key="object" ref="825984362"/>
+ <reference key="parent" ref="956096989"/>
+ </object>
+ <object class="IBObjectRecord">
+ <int key="objectID">517</int>
+ <reference key="object" ref="560145579"/>
+ <reference key="parent" ref="956096989"/>
+ </object>
+ </object>
+ </object>
+ <object class="NSMutableDictionary" key="flattenedProperties">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSArray" key="dict.sortedKeys">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>-3.IBPluginDependency</string>
+ <string>112.IBPluginDependency</string>
+ <string>112.ImportedFromIB2</string>
+ <string>124.IBPluginDependency</string>
+ <string>124.ImportedFromIB2</string>
+ <string>125.IBPluginDependency</string>
+ <string>125.ImportedFromIB2</string>
+ <string>125.editorWindowContentRectSynchronizationRect</string>
+ <string>126.IBPluginDependency</string>
+ <string>126.ImportedFromIB2</string>
+ <string>129.IBPluginDependency</string>
+ <string>129.ImportedFromIB2</string>
+ <string>130.IBPluginDependency</string>
+ <string>130.ImportedFromIB2</string>
+ <string>130.editorWindowContentRectSynchronizationRect</string>
+ <string>131.IBPluginDependency</string>
+ <string>131.ImportedFromIB2</string>
+ <string>134.IBPluginDependency</string>
+ <string>134.ImportedFromIB2</string>
+ <string>136.IBPluginDependency</string>
+ <string>136.ImportedFromIB2</string>
+ <string>143.IBPluginDependency</string>
+ <string>143.ImportedFromIB2</string>
+ <string>144.IBPluginDependency</string>
+ <string>144.ImportedFromIB2</string>
+ <string>145.IBPluginDependency</string>
+ <string>145.ImportedFromIB2</string>
+ <string>149.IBPluginDependency</string>
+ <string>149.ImportedFromIB2</string>
+ <string>150.IBPluginDependency</string>
+ <string>150.ImportedFromIB2</string>
+ <string>19.IBPluginDependency</string>
+ <string>19.ImportedFromIB2</string>
+ <string>195.IBPluginDependency</string>
+ <string>195.ImportedFromIB2</string>
+ <string>196.IBPluginDependency</string>
+ <string>196.ImportedFromIB2</string>
+ <string>197.IBPluginDependency</string>
+ <string>197.ImportedFromIB2</string>
+ <string>198.IBPluginDependency</string>
+ <string>198.ImportedFromIB2</string>
+ <string>199.IBPluginDependency</string>
+ <string>199.ImportedFromIB2</string>
+ <string>200.IBEditorWindowLastContentRect</string>
+ <string>200.IBPluginDependency</string>
+ <string>200.ImportedFromIB2</string>
+ <string>200.editorWindowContentRectSynchronizationRect</string>
+ <string>201.IBPluginDependency</string>
+ <string>201.ImportedFromIB2</string>
+ <string>202.IBPluginDependency</string>
+ <string>202.ImportedFromIB2</string>
+ <string>203.IBPluginDependency</string>
+ <string>203.ImportedFromIB2</string>
+ <string>204.IBPluginDependency</string>
+ <string>204.ImportedFromIB2</string>
+ <string>205.IBEditorWindowLastContentRect</string>
+ <string>205.IBPluginDependency</string>
+ <string>205.ImportedFromIB2</string>
+ <string>205.editorWindowContentRectSynchronizationRect</string>
+ <string>206.IBPluginDependency</string>
+ <string>206.ImportedFromIB2</string>
+ <string>207.IBPluginDependency</string>
+ <string>207.ImportedFromIB2</string>
+ <string>208.IBPluginDependency</string>
+ <string>208.ImportedFromIB2</string>
+ <string>209.IBPluginDependency</string>
+ <string>209.ImportedFromIB2</string>
+ <string>210.IBPluginDependency</string>
+ <string>210.ImportedFromIB2</string>
+ <string>211.IBPluginDependency</string>
+ <string>211.ImportedFromIB2</string>
+ <string>212.IBPluginDependency</string>
+ <string>212.ImportedFromIB2</string>
+ <string>212.editorWindowContentRectSynchronizationRect</string>
+ <string>213.IBPluginDependency</string>
+ <string>213.ImportedFromIB2</string>
+ <string>214.IBPluginDependency</string>
+ <string>214.ImportedFromIB2</string>
+ <string>215.IBPluginDependency</string>
+ <string>215.ImportedFromIB2</string>
+ <string>216.IBPluginDependency</string>
+ <string>216.ImportedFromIB2</string>
+ <string>217.IBPluginDependency</string>
+ <string>217.ImportedFromIB2</string>
+ <string>218.IBPluginDependency</string>
+ <string>218.ImportedFromIB2</string>
+ <string>219.IBPluginDependency</string>
+ <string>219.ImportedFromIB2</string>
+ <string>220.IBEditorWindowLastContentRect</string>
+ <string>220.IBPluginDependency</string>
+ <string>220.ImportedFromIB2</string>
+ <string>220.editorWindowContentRectSynchronizationRect</string>
+ <string>221.IBPluginDependency</string>
+ <string>221.ImportedFromIB2</string>
+ <string>23.IBPluginDependency</string>
+ <string>23.ImportedFromIB2</string>
+ <string>236.IBPluginDependency</string>
+ <string>236.ImportedFromIB2</string>
+ <string>239.IBPluginDependency</string>
+ <string>239.ImportedFromIB2</string>
+ <string>24.IBEditorWindowLastContentRect</string>
+ <string>24.IBPluginDependency</string>
+ <string>24.ImportedFromIB2</string>
+ <string>24.editorWindowContentRectSynchronizationRect</string>
+ <string>29.IBEditorWindowLastContentRect</string>
+ <string>29.IBPluginDependency</string>
+ <string>29.ImportedFromIB2</string>
+ <string>29.WindowOrigin</string>
+ <string>29.editorWindowContentRectSynchronizationRect</string>
+ <string>295.IBPluginDependency</string>
+ <string>296.IBEditorWindowLastContentRect</string>
+ <string>296.IBPluginDependency</string>
+ <string>296.editorWindowContentRectSynchronizationRect</string>
+ <string>297.IBPluginDependency</string>
+ <string>298.IBPluginDependency</string>
+ <string>346.IBPluginDependency</string>
+ <string>346.ImportedFromIB2</string>
+ <string>348.IBPluginDependency</string>
+ <string>348.ImportedFromIB2</string>
+ <string>349.IBEditorWindowLastContentRect</string>
+ <string>349.IBPluginDependency</string>
+ <string>349.ImportedFromIB2</string>
+ <string>349.editorWindowContentRectSynchronizationRect</string>
+ <string>350.IBPluginDependency</string>
+ <string>350.ImportedFromIB2</string>
+ <string>351.IBPluginDependency</string>
+ <string>351.ImportedFromIB2</string>
+ <string>354.IBPluginDependency</string>
+ <string>354.ImportedFromIB2</string>
+ <string>371.IBEditorWindowLastContentRect</string>
+ <string>371.IBPluginDependency</string>
+ <string>371.IBWindowTemplateEditedContentRect</string>
+ <string>371.NSWindowTemplate.visibleAtLaunch</string>
+ <string>371.editorWindowContentRectSynchronizationRect</string>
+ <string>371.windowTemplate.maxSize</string>
+ <string>372.IBPluginDependency</string>
+ <string>375.IBPluginDependency</string>
+ <string>376.IBEditorWindowLastContentRect</string>
+ <string>376.IBPluginDependency</string>
+ <string>377.IBPluginDependency</string>
+ <string>388.IBEditorWindowLastContentRect</string>
+ <string>388.IBPluginDependency</string>
+ <string>389.IBPluginDependency</string>
+ <string>390.IBPluginDependency</string>
+ <string>391.IBPluginDependency</string>
+ <string>392.IBPluginDependency</string>
+ <string>393.IBPluginDependency</string>
+ <string>394.IBPluginDependency</string>
+ <string>395.IBPluginDependency</string>
+ <string>396.IBPluginDependency</string>
+ <string>397.IBPluginDependency</string>
+ <string>398.IBPluginDependency</string>
+ <string>399.IBPluginDependency</string>
+ <string>400.IBPluginDependency</string>
+ <string>401.IBPluginDependency</string>
+ <string>402.IBPluginDependency</string>
+ <string>403.IBPluginDependency</string>
+ <string>404.IBPluginDependency</string>
+ <string>405.IBPluginDependency</string>
+ <string>406.IBPluginDependency</string>
+ <string>407.IBPluginDependency</string>
+ <string>408.IBPluginDependency</string>
+ <string>409.IBPluginDependency</string>
+ <string>410.IBPluginDependency</string>
+ <string>411.IBPluginDependency</string>
+ <string>412.IBPluginDependency</string>
+ <string>413.IBPluginDependency</string>
+ <string>414.IBPluginDependency</string>
+ <string>415.IBPluginDependency</string>
+ <string>416.IBPluginDependency</string>
+ <string>417.IBPluginDependency</string>
+ <string>418.IBPluginDependency</string>
+ <string>419.IBPluginDependency</string>
+ <string>450.IBPluginDependency</string>
+ <string>451.IBEditorWindowLastContentRect</string>
+ <string>451.IBPluginDependency</string>
+ <string>452.IBPluginDependency</string>
+ <string>453.IBPluginDependency</string>
+ <string>454.IBPluginDependency</string>
+ <string>457.IBPluginDependency</string>
+ <string>459.IBPluginDependency</string>
+ <string>460.IBPluginDependency</string>
+ <string>462.IBPluginDependency</string>
+ <string>465.IBPluginDependency</string>
+ <string>466.IBPluginDependency</string>
+ <string>485.IBPluginDependency</string>
+ <string>490.IBPluginDependency</string>
+ <string>491.IBEditorWindowLastContentRect</string>
+ <string>491.IBPluginDependency</string>
+ <string>492.IBPluginDependency</string>
+ <string>496.IBPluginDependency</string>
+ <string>497.IBEditorWindowLastContentRect</string>
+ <string>497.IBPluginDependency</string>
+ <string>498.IBPluginDependency</string>
+ <string>499.IBPluginDependency</string>
+ <string>5.IBPluginDependency</string>
+ <string>5.ImportedFromIB2</string>
+ <string>500.IBPluginDependency</string>
+ <string>501.IBPluginDependency</string>
+ <string>502.IBPluginDependency</string>
+ <string>503.IBPluginDependency</string>
+ <string>504.IBPluginDependency</string>
+ <string>505.IBPluginDependency</string>
+ <string>506.IBPluginDependency</string>
+ <string>507.IBPluginDependency</string>
+ <string>508.IBEditorWindowLastContentRect</string>
+ <string>508.IBPluginDependency</string>
+ <string>509.IBPluginDependency</string>
+ <string>510.IBPluginDependency</string>
+ <string>511.IBPluginDependency</string>
+ <string>512.IBPluginDependency</string>
+ <string>513.IBPluginDependency</string>
+ <string>514.IBPluginDependency</string>
+ <string>515.IBPluginDependency</string>
+ <string>516.IBPluginDependency</string>
+ <string>517.IBPluginDependency</string>
+ <string>56.IBPluginDependency</string>
+ <string>56.ImportedFromIB2</string>
+ <string>57.IBEditorWindowLastContentRect</string>
+ <string>57.IBPluginDependency</string>
+ <string>57.ImportedFromIB2</string>
+ <string>57.editorWindowContentRectSynchronizationRect</string>
+ <string>58.IBPluginDependency</string>
+ <string>58.ImportedFromIB2</string>
+ <string>72.IBPluginDependency</string>
+ <string>72.ImportedFromIB2</string>
+ <string>73.IBPluginDependency</string>
+ <string>73.ImportedFromIB2</string>
+ <string>74.IBPluginDependency</string>
+ <string>74.ImportedFromIB2</string>
+ <string>75.IBPluginDependency</string>
+ <string>75.ImportedFromIB2</string>
+ <string>77.IBPluginDependency</string>
+ <string>77.ImportedFromIB2</string>
+ <string>78.IBPluginDependency</string>
+ <string>78.ImportedFromIB2</string>
+ <string>79.IBPluginDependency</string>
+ <string>79.ImportedFromIB2</string>
+ <string>80.IBPluginDependency</string>
+ <string>80.ImportedFromIB2</string>
+ <string>81.IBEditorWindowLastContentRect</string>
+ <string>81.IBPluginDependency</string>
+ <string>81.ImportedFromIB2</string>
+ <string>81.editorWindowContentRectSynchronizationRect</string>
+ <string>82.IBPluginDependency</string>
+ <string>82.ImportedFromIB2</string>
+ <string>83.IBPluginDependency</string>
+ <string>83.ImportedFromIB2</string>
+ <string>92.IBPluginDependency</string>
+ <string>92.ImportedFromIB2</string>
+ </object>
+ <object class="NSMutableArray" key="dict.values">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>{{522, 812}, {146, 23}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>{{436, 809}, {64, 6}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>{{753, 187}, {275, 113}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>{{608, 612}, {275, 83}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>{{547, 180}, {254, 283}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>{{187, 434}, {243, 243}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>{{608, 612}, {167, 43}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>{{753, 217}, {238, 103}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>{{608, 612}, {241, 103}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>{{654, 239}, {194, 73}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>{{525, 802}, {197, 73}}</string>
+ <string>{{380, 836}, {512, 20}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>{74, 862}</string>
+ <string>{{6, 978}, {478, 20}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>{{604, 269}, {231, 43}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>{{475, 832}, {234, 43}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>{{746, 287}, {220, 133}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>{{608, 612}, {215, 63}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>{{380, 496}, {480, 360}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>{{380, 496}, {480, 360}}</string>
+ <integer value="1"/>
+ <string>{{33, 99}, {480, 360}}</string>
+ <string>{3.40282e+38, 3.40282e+38}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>{{591, 420}, {83, 43}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>{{523, 2}, {178, 283}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>{{753, 197}, {170, 63}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>{{725, 289}, {246, 23}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>{{674, 260}, {204, 183}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>{{878, 180}, {164, 173}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>{{286, 129}, {275, 183}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>{{23, 794}, {245, 183}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>{{452, 109}, {196, 203}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>{{145, 474}, {199, 203}}</string>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ <string>com.apple.InterfaceBuilder.CocoaPlugin</string>
+ <integer value="1"/>
+ </object>
+ </object>
+ <object class="NSMutableDictionary" key="unlocalizedProperties">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference key="dict.sortedKeys" ref="0"/>
+ <object class="NSMutableArray" key="dict.values">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ </object>
+ </object>
+ <nil key="activeLocalization"/>
+ <object class="NSMutableDictionary" key="localizations">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <reference key="dict.sortedKeys" ref="0"/>
+ <object class="NSMutableArray" key="dict.values">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ </object>
+ </object>
+ <nil key="sourceID"/>
+ <int key="maxID">532</int>
+ </object>
+ <object class="IBClassDescriber" key="IBDocument.Classes">
+ <object class="NSMutableArray" key="referencedPartialClassDescriptions">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="IBPartialClassDescription">
+ <string key="className">TestAppAppDelegate</string>
+ <string key="superclassName">NSObject</string>
+ <object class="NSMutableDictionary" key="outlets">
+ <string key="NS.key.0">window</string>
+ <string key="NS.object.0">NSWindow</string>
+ </object>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBProjectSource</string>
+ <string key="minorKey">TestAppAppDelegate.h</string>
+ </object>
+ </object>
+ </object>
+ <object class="NSMutableArray" key="referencedPartialClassDescriptionsV3.2+">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSApplication</string>
+ <string key="superclassName">NSResponder</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier" id="822405504">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSApplication.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSApplication</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier" id="850738725">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSApplicationScripting.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSApplication</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier" id="624831158">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSColorPanel.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSApplication</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSHelpManager.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSApplication</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSPageLayout.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSApplication</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSUserInterfaceItemSearching.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSBrowser</string>
+ <string key="superclassName">NSControl</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSBrowser.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSControl</string>
+ <string key="superclassName">NSView</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier" id="310914472">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSControl.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSDocument</string>
+ <string key="superclassName">NSObject</string>
+ <object class="NSMutableDictionary" key="actions">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSArray" key="dict.sortedKeys">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>printDocument:</string>
+ <string>revertDocumentToSaved:</string>
+ <string>runPageLayout:</string>
+ <string>saveDocument:</string>
+ <string>saveDocumentAs:</string>
+ <string>saveDocumentTo:</string>
+ </object>
+ <object class="NSMutableArray" key="dict.values">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>id</string>
+ <string>id</string>
+ <string>id</string>
+ <string>id</string>
+ <string>id</string>
+ <string>id</string>
+ </object>
+ </object>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSDocument.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSDocument</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSDocumentScripting.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSDocumentController</string>
+ <string key="superclassName">NSObject</string>
+ <object class="NSMutableDictionary" key="actions">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <object class="NSArray" key="dict.sortedKeys">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>clearRecentDocuments:</string>
+ <string>newDocument:</string>
+ <string>openDocument:</string>
+ <string>saveAllDocuments:</string>
+ </object>
+ <object class="NSMutableArray" key="dict.values">
+ <bool key="EncodedWithXMLCoder">YES</bool>
+ <string>id</string>
+ <string>id</string>
+ <string>id</string>
+ <string>id</string>
+ </object>
+ </object>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSDocumentController.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSFontManager</string>
+ <string key="superclassName">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier" id="946436764">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSFontManager.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSFormatter</string>
+ <string key="superclassName">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSFormatter.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSMatrix</string>
+ <string key="superclassName">NSControl</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSMatrix.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSMenu</string>
+ <string key="superclassName">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier" id="1056362899">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSMenu.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSMenuItem</string>
+ <string key="superclassName">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier" id="472958451">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSMenuItem.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSMovieView</string>
+ <string key="superclassName">NSView</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSMovieView.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSAccessibility.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <reference key="sourceIdentifier" ref="822405504"/>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <reference key="sourceIdentifier" ref="850738725"/>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <reference key="sourceIdentifier" ref="624831158"/>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <reference key="sourceIdentifier" ref="310914472"/>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSDictionaryController.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSDragging.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <reference key="sourceIdentifier" ref="946436764"/>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSFontPanel.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSKeyValueBinding.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <reference key="sourceIdentifier" ref="1056362899"/>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSNibLoading.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSOutlineView.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSPasteboard.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSSavePanel.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier" id="809545482">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSTableView.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSToolbarItem.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier" id="260078765">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSView.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSArchiver.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSClassDescription.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSError.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSFileManager.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSKeyValueCoding.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSKeyValueObserving.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSKeyedArchiver.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSObject.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSObjectScripting.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSPortCoder.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSRunLoop.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSScriptClassDescription.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSScriptKeyValueCoding.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSScriptObjectSpecifiers.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSScriptWhoseTests.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSThread.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSURL.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSURLConnection.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">Foundation.framework/Headers/NSURLDownload.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSResponder</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSInterfaceStyle.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSResponder</string>
+ <string key="superclassName">NSObject</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSResponder.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSTableView</string>
+ <string key="superclassName">NSControl</string>
+ <reference key="sourceIdentifier" ref="809545482"/>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSText</string>
+ <string key="superclassName">NSView</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSText.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSTextView</string>
+ <string key="superclassName">NSText</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSTextView.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSView</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSClipView.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSView</string>
+ <reference key="sourceIdentifier" ref="472958451"/>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSView</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSRulerView.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSView</string>
+ <string key="superclassName">NSResponder</string>
+ <reference key="sourceIdentifier" ref="260078765"/>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSWindow</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSDrawer.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSWindow</string>
+ <string key="superclassName">NSResponder</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSWindow.h</string>
+ </object>
+ </object>
+ <object class="IBPartialClassDescription">
+ <string key="className">NSWindow</string>
+ <object class="IBClassDescriptionSource" key="sourceIdentifier">
+ <string key="majorKey">IBFrameworkSource</string>
+ <string key="minorKey">AppKit.framework/Headers/NSWindowScripting.h</string>
+ </object>
+ </object>
+ </object>
+ </object>
+ <int key="IBDocument.localizationMode">0</int>
+ <object class="NSMutableDictionary" key="IBDocument.PluginDeclaredDependencyDefaults">
+ <string key="NS.key.0">com.apple.InterfaceBuilder.CocoaPlugin.macosx</string>
+ <integer value="1060" key="NS.object.0"/>
+ </object>
+ <object class="NSMutableDictionary" key="IBDocument.PluginDeclaredDevelopmentDependencies">
+ <string key="NS.key.0">com.apple.InterfaceBuilder.CocoaPlugin.InterfaceBuilder3</string>
+ <integer value="3000" key="NS.object.0"/>
+ </object>
+ <bool key="IBDocument.PluginDeclaredDependenciesTrackSystemTargetVersion">YES</bool>
+ <string key="IBDocument.LastKnownRelativeProjectPath">../TestApp.xcodeproj</string>
+ <int key="IBDocument.defaultPropertyAccessControl">3</int>
+ </data>
+</archive>
diff --git a/third_party/python/gyp/test/mac/app-bundle/TestApp/English.lproj/utf-16be.strings b/third_party/python/gyp/test/mac/app-bundle/TestApp/English.lproj/utf-16be.strings
new file mode 100644
index 0000000000..580783735f
--- /dev/null
+++ b/third_party/python/gyp/test/mac/app-bundle/TestApp/English.lproj/utf-16be.strings
Binary files differ
diff --git a/third_party/python/gyp/test/mac/app-bundle/TestApp/English.lproj/utf-16le.strings b/third_party/python/gyp/test/mac/app-bundle/TestApp/English.lproj/utf-16le.strings
new file mode 100644
index 0000000000..eeb383784c
--- /dev/null
+++ b/third_party/python/gyp/test/mac/app-bundle/TestApp/English.lproj/utf-16le.strings
Binary files differ
diff --git a/third_party/python/gyp/test/mac/app-bundle/TestApp/Images.xcassets/AppIcon.appiconset/Contents.json b/third_party/python/gyp/test/mac/app-bundle/TestApp/Images.xcassets/AppIcon.appiconset/Contents.json
new file mode 100644
index 0000000000..2db2b1c7c6
--- /dev/null
+++ b/third_party/python/gyp/test/mac/app-bundle/TestApp/Images.xcassets/AppIcon.appiconset/Contents.json
@@ -0,0 +1,58 @@
+{
+ "images" : [
+ {
+ "idiom" : "mac",
+ "size" : "16x16",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "mac",
+ "size" : "16x16",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "mac",
+ "size" : "32x32",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "mac",
+ "size" : "32x32",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "mac",
+ "size" : "128x128",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "mac",
+ "size" : "128x128",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "mac",
+ "size" : "256x256",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "mac",
+ "size" : "256x256",
+ "scale" : "2x"
+ },
+ {
+ "idiom" : "mac",
+ "size" : "512x512",
+ "scale" : "1x"
+ },
+ {
+ "idiom" : "mac",
+ "size" : "512x512",
+ "scale" : "2x"
+ }
+ ],
+ "info" : {
+ "version" : 1,
+ "author" : "xcode"
+ }
+} \ No newline at end of file
diff --git a/third_party/python/gyp/test/mac/app-bundle/TestApp/Images.xcassets/image.imageset/Contents.json b/third_party/python/gyp/test/mac/app-bundle/TestApp/Images.xcassets/image.imageset/Contents.json
new file mode 100644
index 0000000000..0a87b6edc6
--- /dev/null
+++ b/third_party/python/gyp/test/mac/app-bundle/TestApp/Images.xcassets/image.imageset/Contents.json
@@ -0,0 +1,23 @@
+{
+ "images" : [
+ {
+ "idiom" : "universal",
+ "scale" : "1x",
+ "filename" : "super_sylvain.png"
+ },
+ {
+ "idiom" : "universal",
+ "scale" : "2x",
+ "filename" : "super_sylvain@2x.png"
+ },
+ {
+ "idiom" : "universal",
+ "scale" : "3x",
+ "filename" : "super_sylvain@3x.png"
+ }
+ ],
+ "info" : {
+ "version" : 1,
+ "author" : "xcode"
+ }
+} \ No newline at end of file
diff --git a/third_party/python/gyp/test/mac/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain.png b/third_party/python/gyp/test/mac/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain.png
new file mode 100644
index 0000000000..0ba769182f
--- /dev/null
+++ b/third_party/python/gyp/test/mac/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain.png
Binary files differ
diff --git a/third_party/python/gyp/test/mac/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain@2x.png b/third_party/python/gyp/test/mac/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain@2x.png
new file mode 100644
index 0000000000..edfa6a5682
--- /dev/null
+++ b/third_party/python/gyp/test/mac/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain@2x.png
Binary files differ
diff --git a/third_party/python/gyp/test/mac/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain@3x.png b/third_party/python/gyp/test/mac/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain@3x.png
new file mode 100644
index 0000000000..e0652efc72
--- /dev/null
+++ b/third_party/python/gyp/test/mac/app-bundle/TestApp/Images.xcassets/image.imageset/super_sylvain@3x.png
Binary files differ
diff --git a/third_party/python/gyp/test/mac/app-bundle/TestApp/TestApp-Info.plist b/third_party/python/gyp/test/mac/app-bundle/TestApp/TestApp-Info.plist
new file mode 100644
index 0000000000..e005852f9f
--- /dev/null
+++ b/third_party/python/gyp/test/mac/app-bundle/TestApp/TestApp-Info.plist
@@ -0,0 +1,34 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>BuildMachineOSBuild</key>
+ <string>Doesn't matter, will be overwritten</string>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIconFile</key>
+ <string></string>
+ <key>CFBundleIdentifier</key>
+ <string>com.google.${PRODUCT_NAME:rfc1034identifier}</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>ause</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+ <key>LSMinimumSystemVersion</key>
+ <string>${MACOSX_DEPLOYMENT_TARGET}</string>
+ <key>NSMainNibFile</key>
+ <string>MainMenu</string>
+ <key>NSPrincipalClass</key>
+ <string>NSApplication</string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/mac/app-bundle/TestApp/TestAppAppDelegate.h b/third_party/python/gyp/test/mac/app-bundle/TestApp/TestAppAppDelegate.h
new file mode 100644
index 0000000000..518645eae9
--- /dev/null
+++ b/third_party/python/gyp/test/mac/app-bundle/TestApp/TestAppAppDelegate.h
@@ -0,0 +1,13 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <Cocoa/Cocoa.h>
+
+@interface TestAppAppDelegate : NSObject <NSApplicationDelegate> {
+ NSWindow *window;
+}
+
+@property (assign) IBOutlet NSWindow *window;
+
+@end
diff --git a/third_party/python/gyp/test/mac/app-bundle/TestApp/TestAppAppDelegate.m b/third_party/python/gyp/test/mac/app-bundle/TestApp/TestAppAppDelegate.m
new file mode 100644
index 0000000000..9aafa42000
--- /dev/null
+++ b/third_party/python/gyp/test/mac/app-bundle/TestApp/TestAppAppDelegate.m
@@ -0,0 +1,15 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "TestAppAppDelegate.h"
+
+@implementation TestAppAppDelegate
+
+@synthesize window;
+
+- (void)applicationDidFinishLaunching:(NSNotification *)aNotification {
+ // Insert code here to initialize your application
+}
+
+@end
diff --git a/third_party/python/gyp/test/mac/app-bundle/TestApp/main.m b/third_party/python/gyp/test/mac/app-bundle/TestApp/main.m
new file mode 100644
index 0000000000..df6a12d065
--- /dev/null
+++ b/third_party/python/gyp/test/mac/app-bundle/TestApp/main.m
@@ -0,0 +1,10 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <Cocoa/Cocoa.h>
+
+int main(int argc, char *argv[])
+{
+ return NSApplicationMain(argc, (const char **) argv);
+}
diff --git a/third_party/python/gyp/test/mac/app-bundle/empty.c b/third_party/python/gyp/test/mac/app-bundle/empty.c
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/mac/app-bundle/empty.c
diff --git a/third_party/python/gyp/test/mac/app-bundle/test-assets-catalog.gyp b/third_party/python/gyp/test/mac/app-bundle/test-assets-catalog.gyp
new file mode 100644
index 0000000000..25f94a12d0
--- /dev/null
+++ b/third_party/python/gyp/test/mac/app-bundle/test-assets-catalog.gyp
@@ -0,0 +1,43 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'dep_framework',
+ 'product_name': 'Dependency Framework',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [ 'empty.c', ],
+ },
+ {
+ 'target_name': 'test_app',
+ 'product_name': 'Test App Assets Catalog Gyp',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'dependencies': [ 'dep_framework', ],
+ 'sources': [
+ 'TestApp/main.m',
+ 'TestApp/TestApp_Prefix.pch',
+ 'TestApp/TestAppAppDelegate.h',
+ 'TestApp/TestAppAppDelegate.m',
+ ],
+ 'mac_bundle_resources': [
+ 'TestApp/English.lproj/InfoPlist.strings', # UTF-8
+ 'TestApp/English.lproj/utf-16be.strings',
+ 'TestApp/English.lproj/utf-16le.strings',
+ 'TestApp/English.lproj/MainMenu.xib',
+ 'TestApp/Images.xcassets',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Cocoa.framework',
+ ],
+ },
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'TestApp/TestApp-Info.plist',
+ 'MACOSX_DEPLOYMENT_TARGET': '10.9',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/app-bundle/test-error.gyp b/third_party/python/gyp/test/mac/app-bundle/test-error.gyp
new file mode 100644
index 0000000000..370772cc31
--- /dev/null
+++ b/third_party/python/gyp/test/mac/app-bundle/test-error.gyp
@@ -0,0 +1,31 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'test_app',
+ 'product_name': 'Test App Gyp',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'TestApp/main.m',
+ 'TestApp/TestApp_Prefix.pch',
+ 'TestApp/TestAppAppDelegate.h',
+ 'TestApp/TestAppAppDelegate.m',
+ ],
+ 'mac_bundle_resources': [
+ 'TestApp/English.lproj/InfoPlist-error.strings',
+ 'TestApp/English.lproj/MainMenu.xib',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Cocoa.framework',
+ ],
+ },
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'TestApp/TestApp-Info.plist',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/app-bundle/test.gyp b/third_party/python/gyp/test/mac/app-bundle/test.gyp
new file mode 100644
index 0000000000..21973c3623
--- /dev/null
+++ b/third_party/python/gyp/test/mac/app-bundle/test.gyp
@@ -0,0 +1,41 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'dep_framework',
+ 'product_name': 'Dependency Framework',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [ 'empty.c', ],
+ },
+ {
+ 'target_name': 'test_app',
+ 'product_name': 'Test App Gyp',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'dependencies': [ 'dep_framework', ],
+ 'sources': [
+ 'TestApp/main.m',
+ 'TestApp/TestApp_Prefix.pch',
+ 'TestApp/TestAppAppDelegate.h',
+ 'TestApp/TestAppAppDelegate.m',
+ ],
+ 'mac_bundle_resources': [
+ 'TestApp/English.lproj/InfoPlist.strings', # UTF-8
+ 'TestApp/English.lproj/utf-16be.strings',
+ 'TestApp/English.lproj/utf-16le.strings',
+ 'TestApp/English.lproj/MainMenu.xib',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Cocoa.framework',
+ ],
+ },
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'TestApp/TestApp-Info.plist',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/archs/empty_main.cc b/third_party/python/gyp/test/mac/archs/empty_main.cc
new file mode 100644
index 0000000000..237c8ce181
--- /dev/null
+++ b/third_party/python/gyp/test/mac/archs/empty_main.cc
@@ -0,0 +1 @@
+int main() {}
diff --git a/third_party/python/gyp/test/mac/archs/file.mm b/third_party/python/gyp/test/mac/archs/file.mm
new file mode 100644
index 0000000000..d0b39d1f6d
--- /dev/null
+++ b/third_party/python/gyp/test/mac/archs/file.mm
@@ -0,0 +1 @@
+MyInt f() { return 0; }
diff --git a/third_party/python/gyp/test/mac/archs/file_a.cc b/third_party/python/gyp/test/mac/archs/file_a.cc
new file mode 100644
index 0000000000..7307873c83
--- /dev/null
+++ b/third_party/python/gyp/test/mac/archs/file_a.cc
@@ -0,0 +1,8 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "file_a.h"
+
+void DependentFunctionA() {
+}
diff --git a/third_party/python/gyp/test/mac/archs/file_a.h b/third_party/python/gyp/test/mac/archs/file_a.h
new file mode 100644
index 0000000000..7439d13182
--- /dev/null
+++ b/third_party/python/gyp/test/mac/archs/file_a.h
@@ -0,0 +1,10 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef _INCLUDED_TEST_MAC_DEPENDENCIES_FILE_A_H_
+#define _INCLUDED_TEST_MAC_DEPENDENCIES_FILE_A_H_
+
+void DependentFunctionA();
+
+#endif // _INCLUDED_TEST_MAC_DEPENDENCIES_FILE_A_H_
diff --git a/third_party/python/gyp/test/mac/archs/file_b.cc b/third_party/python/gyp/test/mac/archs/file_b.cc
new file mode 100644
index 0000000000..72d59cbfb4
--- /dev/null
+++ b/third_party/python/gyp/test/mac/archs/file_b.cc
@@ -0,0 +1,8 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "file_b.h"
+
+void DependentFunctionB() {
+}
diff --git a/third_party/python/gyp/test/mac/archs/file_b.h b/third_party/python/gyp/test/mac/archs/file_b.h
new file mode 100644
index 0000000000..eb272ece55
--- /dev/null
+++ b/third_party/python/gyp/test/mac/archs/file_b.h
@@ -0,0 +1,10 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef _INCLUDED_TEST_MAC_DEPENDENCIES_FILE_B_H_
+#define _INCLUDED_TEST_MAC_DEPENDENCIES_FILE_B_H_
+
+void DependentFunctionB();
+
+#endif // _INCLUDED_TEST_MAC_DEPENDENCIES_FILE_B_H_
diff --git a/third_party/python/gyp/test/mac/archs/file_c.cc b/third_party/python/gyp/test/mac/archs/file_c.cc
new file mode 100644
index 0000000000..ca39f7a671
--- /dev/null
+++ b/third_party/python/gyp/test/mac/archs/file_c.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "file_a.h"
+#include "file_b.h"
+
+void PublicFunctionC() {
+ DependentFunctionA();
+ DependentFunctionB();
+}
diff --git a/third_party/python/gyp/test/mac/archs/file_d.cc b/third_party/python/gyp/test/mac/archs/file_d.cc
new file mode 100644
index 0000000000..c40911cdca
--- /dev/null
+++ b/third_party/python/gyp/test/mac/archs/file_d.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "file_a.h"
+#include "file_b.h"
+
+void PublicFunctionD() {
+ DependentFunctionA();
+ DependentFunctionB();
+}
diff --git a/third_party/python/gyp/test/mac/archs/header.h b/third_party/python/gyp/test/mac/archs/header.h
new file mode 100644
index 0000000000..0716e500c5
--- /dev/null
+++ b/third_party/python/gyp/test/mac/archs/header.h
@@ -0,0 +1 @@
+typedef int MyInt;
diff --git a/third_party/python/gyp/test/mac/archs/my_file.cc b/third_party/python/gyp/test/mac/archs/my_file.cc
new file mode 100644
index 0000000000..94216a74df
--- /dev/null
+++ b/third_party/python/gyp/test/mac/archs/my_file.cc
@@ -0,0 +1,4 @@
+/* Copyright (c) 2012 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+int x = 1;
diff --git a/third_party/python/gyp/test/mac/archs/my_main_file.cc b/third_party/python/gyp/test/mac/archs/my_main_file.cc
new file mode 100644
index 0000000000..f1fa06f276
--- /dev/null
+++ b/third_party/python/gyp/test/mac/archs/my_main_file.cc
@@ -0,0 +1,9 @@
+/* Copyright (c) 2012 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+#include <stdio.h>
+extern int x;
+int main() {
+ printf("hello, world %d\n", x);
+}
+
diff --git a/third_party/python/gyp/test/mac/archs/test-archs-multiarch.gyp b/third_party/python/gyp/test/mac/archs/test-archs-multiarch.gyp
new file mode 100644
index 0000000000..567e8a6653
--- /dev/null
+++ b/third_party/python/gyp/test/mac/archs/test-archs-multiarch.gyp
@@ -0,0 +1,92 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'static_32_64',
+ 'type': 'static_library',
+ 'sources': [ 'my_file.cc' ],
+ 'xcode_settings': {
+ 'ARCHS': [ 'i386', 'x86_64' ],
+ },
+ },
+ {
+ 'target_name': 'shared_32_64',
+ 'type': 'shared_library',
+ 'sources': [ 'my_file.cc' ],
+ 'xcode_settings': {
+ 'ARCHS': [ 'i386', 'x86_64' ],
+ },
+ },
+ {
+ 'target_name': 'shared_32_64_bundle',
+ 'product_name': 'My Framework',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [ 'my_file.cc' ],
+ 'xcode_settings': {
+ 'ARCHS': [ 'i386', 'x86_64' ],
+ },
+ },
+ {
+ 'target_name': 'module_32_64',
+ 'type': 'loadable_module',
+ 'sources': [ 'my_file.cc' ],
+ 'xcode_settings': {
+ 'ARCHS': [ 'i386', 'x86_64' ],
+ },
+ },
+ {
+ 'target_name': 'module_32_64_bundle',
+ 'product_name': 'My Bundle',
+ 'type': 'loadable_module',
+ 'mac_bundle': 1,
+ 'sources': [ 'my_file.cc' ],
+ 'xcode_settings': {
+ 'ARCHS': [ 'i386', 'x86_64' ],
+ },
+ },
+ {
+ 'target_name': 'exe_32_64',
+ 'type': 'executable',
+ 'sources': [ 'empty_main.cc' ],
+ 'xcode_settings': {
+ 'ARCHS': [ 'i386', 'x86_64' ],
+ },
+ },
+ {
+ 'target_name': 'exe_32_64_bundle',
+ 'product_name': 'Test App',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [ 'empty_main.cc' ],
+ 'xcode_settings': {
+ 'ARCHS': [ 'i386', 'x86_64' ],
+ },
+ },
+ # This only needs to compile.
+ {
+ 'target_name': 'precompiled_prefix_header_mm_32_64',
+ 'type': 'shared_library',
+ 'sources': [ 'file.mm', ],
+ 'xcode_settings': {
+ 'GCC_PREFIX_HEADER': 'header.h',
+ 'GCC_PRECOMPILE_PREFIX_HEADER': 'YES',
+ },
+ },
+ # This does not compile but should not cause generation errors.
+ {
+ 'target_name': 'exe_32_64_no_sources',
+ 'type': 'executable',
+ 'dependencies': [
+ 'static_32_64',
+ ],
+ 'sources': [],
+ 'xcode_settings': {
+ 'ARCHS': ['i386', 'x86_64'],
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/mac/archs/test-archs-x86_64.gyp b/third_party/python/gyp/test/mac/archs/test-archs-x86_64.gyp
new file mode 100644
index 0000000000..d11a896273
--- /dev/null
+++ b/third_party/python/gyp/test/mac/archs/test-archs-x86_64.gyp
@@ -0,0 +1,27 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'lib',
+ 'product_name': 'Test64',
+ 'type': 'static_library',
+ 'sources': [ 'my_file.cc' ],
+ 'xcode_settings': {
+ 'ARCHS': [ 'x86_64' ],
+ },
+ },
+ {
+ 'target_name': 'exe',
+ 'product_name': 'Test64',
+ 'type': 'executable',
+ 'dependencies': [ 'lib' ],
+ 'sources': [ 'my_main_file.cc' ],
+ 'xcode_settings': {
+ 'ARCHS': [ 'x86_64' ],
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/mac/archs/test-dependencies.gyp b/third_party/python/gyp/test/mac/archs/test-dependencies.gyp
new file mode 100644
index 0000000000..0431f5f2f4
--- /dev/null
+++ b/third_party/python/gyp/test/mac/archs/test-dependencies.gyp
@@ -0,0 +1,92 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ 'xcode_settings': {
+ 'ARCHS': ['i386', 'x86_64'],
+ },
+ },
+ 'targets': [
+ {
+ 'target_name': 'target_a',
+ 'type': 'static_library',
+ 'sources': [
+ 'file_a.cc',
+ 'file_a.h',
+ ],
+ },
+ {
+ 'target_name': 'target_b',
+ 'type': 'static_library',
+ 'sources': [
+ 'file_b.cc',
+ 'file_b.h',
+ ],
+ },
+ {
+ 'target_name': 'target_c_standalone_helper',
+ 'type': 'loadable_module',
+ 'hard_dependency': 1,
+ 'dependencies': [
+ 'target_a',
+ 'target_b',
+ ],
+ 'sources': [
+ 'file_c.cc',
+ ],
+ },
+ {
+ 'target_name': 'target_c_standalone',
+ 'type': 'none',
+ 'dependencies': [
+ 'target_c_standalone_helper',
+ ],
+ 'actions': [
+ {
+ 'action_name': 'Package C',
+ 'inputs': [],
+ 'outputs': [
+ '<(PRODUCT_DIR)/libc_standalone.a',
+ ],
+ 'action': [
+ 'touch',
+ '<@(_outputs)',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'target_d_standalone_helper',
+ 'type': 'shared_library',
+ 'dependencies': [
+ 'target_a',
+ 'target_b',
+ ],
+ 'sources': [
+ 'file_d.cc',
+ ],
+ },
+ {
+ 'target_name': 'target_d_standalone',
+ 'type': 'none',
+ 'dependencies': [
+ 'target_d_standalone_helper',
+ ],
+ 'actions': [
+ {
+ 'action_name': 'Package D',
+ 'inputs': [],
+ 'outputs': [
+ '<(PRODUCT_DIR)/libd_standalone.a',
+ ],
+ 'action': [
+ 'touch',
+ '<@(_outputs)',
+ ],
+ },
+ ],
+ }
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/archs/test-no-archs.gyp b/third_party/python/gyp/test/mac/archs/test-no-archs.gyp
new file mode 100644
index 0000000000..8f3b6b47cc
--- /dev/null
+++ b/third_party/python/gyp/test/mac/archs/test-no-archs.gyp
@@ -0,0 +1,21 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'lib',
+ 'product_name': 'Test',
+ 'type': 'static_library',
+ 'sources': [ 'my_file.cc' ],
+ },
+ {
+ 'target_name': 'exe',
+ 'product_name': 'Test',
+ 'type': 'executable',
+ 'dependencies': [ 'lib' ],
+ 'sources': [ 'my_main_file.cc' ],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/mac/archs/test-valid-archs.gyp b/third_party/python/gyp/test/mac/archs/test-valid-archs.gyp
new file mode 100644
index 0000000000..c90ec1fe9b
--- /dev/null
+++ b/third_party/python/gyp/test/mac/archs/test-valid-archs.gyp
@@ -0,0 +1,28 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'lib',
+ 'product_name': 'Test',
+ 'type': 'static_library',
+ 'sources': [ 'my_file.cc' ],
+ 'xcode_settings': {
+ 'ARCHS': ['i386', 'x86_64', 'unknown-arch'],
+ 'VALID_ARCHS': ['x86_64'],
+ },
+ },
+ {
+ 'target_name': 'exe',
+ 'product_name': 'Test',
+ 'type': 'executable',
+ 'dependencies': [ 'lib' ],
+ 'sources': [ 'my_main_file.cc' ],
+ 'xcode_settings': {
+ 'ARCHS': ['i386', 'x86_64', 'unknown-arch'],
+ 'VALID_ARCHS': ['x86_64'],
+ },
+ }]
+}
diff --git a/third_party/python/gyp/test/mac/bundle-resources/change.sh b/third_party/python/gyp/test/mac/bundle-resources/change.sh
new file mode 100755
index 0000000000..6d0fe6c7c2
--- /dev/null
+++ b/third_party/python/gyp/test/mac/bundle-resources/change.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+tr a-z A-Z < "${1}" > "${2}"
diff --git a/third_party/python/gyp/test/mac/bundle-resources/executable-file.sh b/third_party/python/gyp/test/mac/bundle-resources/executable-file.sh
new file mode 100755
index 0000000000..796953a1a2
--- /dev/null
+++ b/third_party/python/gyp/test/mac/bundle-resources/executable-file.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+echo echo echo echo cho ho o o
diff --git a/third_party/python/gyp/test/mac/bundle-resources/secret.txt b/third_party/python/gyp/test/mac/bundle-resources/secret.txt
new file mode 100644
index 0000000000..8baef1b4ab
--- /dev/null
+++ b/third_party/python/gyp/test/mac/bundle-resources/secret.txt
@@ -0,0 +1 @@
+abc
diff --git a/third_party/python/gyp/test/mac/bundle-resources/test.gyp b/third_party/python/gyp/test/mac/bundle-resources/test.gyp
new file mode 100644
index 0000000000..af034ce3f4
--- /dev/null
+++ b/third_party/python/gyp/test/mac/bundle-resources/test.gyp
@@ -0,0 +1,59 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'resource',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'mac_bundle_resources': [
+ 'secret.txt',
+ 'executable-file.sh',
+ ],
+ },
+ # A rule with process_outputs_as_mac_bundle_resources should copy files
+ # into the Resources folder.
+ {
+ 'target_name': 'source_rule',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'secret.txt',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'bundlerule',
+ 'extension': 'txt',
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).txt',
+ ],
+ 'action': ['./change.sh', '<(RULE_INPUT_PATH)', '<@(_outputs)'],
+ 'message': 'Running rule on <(RULE_INPUT_PATH)',
+ 'process_outputs_as_mac_bundle_resources': 1,
+ },
+ ],
+ },
+ # So should an ordinary rule acting on mac_bundle_resources.
+ {
+ 'target_name': 'resource_rule',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'mac_bundle_resources': [
+ 'secret.txt',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'bundlerule',
+ 'extension': 'txt',
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).txt',
+ ],
+ 'action': ['./change.sh', '<(RULE_INPUT_PATH)', '<@(_outputs)'],
+ 'message': 'Running rule on <(RULE_INPUT_PATH)',
+ },
+ ],
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/mac/cflags/ccfile.cc b/third_party/python/gyp/test/mac/cflags/ccfile.cc
new file mode 100644
index 0000000000..1a54d18eec
--- /dev/null
+++ b/third_party/python/gyp/test/mac/cflags/ccfile.cc
@@ -0,0 +1,7 @@
+#ifdef CFLAG
+#error CFLAG should not be set
+#endif
+
+#ifndef CCFLAG
+#error CCFLAG should be set
+#endif
diff --git a/third_party/python/gyp/test/mac/cflags/ccfile_withcflags.cc b/third_party/python/gyp/test/mac/cflags/ccfile_withcflags.cc
new file mode 100644
index 0000000000..de078a0641
--- /dev/null
+++ b/third_party/python/gyp/test/mac/cflags/ccfile_withcflags.cc
@@ -0,0 +1,7 @@
+#ifndef CFLAG
+#error CFLAG should be set
+#endif
+
+#ifndef CCFLAG
+#error CCFLAG should be set
+#endif
diff --git a/third_party/python/gyp/test/mac/cflags/cfile.c b/third_party/python/gyp/test/mac/cflags/cfile.c
new file mode 100644
index 0000000000..0af9d0af5d
--- /dev/null
+++ b/third_party/python/gyp/test/mac/cflags/cfile.c
@@ -0,0 +1,7 @@
+#ifndef CFLAG
+#error CFLAG should be set
+#endif
+
+#ifdef CCFLAG
+#error CCFLAG should not be set
+#endif
diff --git a/third_party/python/gyp/test/mac/cflags/cppfile.cpp b/third_party/python/gyp/test/mac/cflags/cppfile.cpp
new file mode 100644
index 0000000000..1a54d18eec
--- /dev/null
+++ b/third_party/python/gyp/test/mac/cflags/cppfile.cpp
@@ -0,0 +1,7 @@
+#ifdef CFLAG
+#error CFLAG should not be set
+#endif
+
+#ifndef CCFLAG
+#error CCFLAG should be set
+#endif
diff --git a/third_party/python/gyp/test/mac/cflags/cppfile_withcflags.cpp b/third_party/python/gyp/test/mac/cflags/cppfile_withcflags.cpp
new file mode 100644
index 0000000000..de078a0641
--- /dev/null
+++ b/third_party/python/gyp/test/mac/cflags/cppfile_withcflags.cpp
@@ -0,0 +1,7 @@
+#ifndef CFLAG
+#error CFLAG should be set
+#endif
+
+#ifndef CCFLAG
+#error CCFLAG should be set
+#endif
diff --git a/third_party/python/gyp/test/mac/cflags/cxxfile.cxx b/third_party/python/gyp/test/mac/cflags/cxxfile.cxx
new file mode 100644
index 0000000000..1a54d18eec
--- /dev/null
+++ b/third_party/python/gyp/test/mac/cflags/cxxfile.cxx
@@ -0,0 +1,7 @@
+#ifdef CFLAG
+#error CFLAG should not be set
+#endif
+
+#ifndef CCFLAG
+#error CCFLAG should be set
+#endif
diff --git a/third_party/python/gyp/test/mac/cflags/cxxfile_withcflags.cxx b/third_party/python/gyp/test/mac/cflags/cxxfile_withcflags.cxx
new file mode 100644
index 0000000000..de078a0641
--- /dev/null
+++ b/third_party/python/gyp/test/mac/cflags/cxxfile_withcflags.cxx
@@ -0,0 +1,7 @@
+#ifndef CFLAG
+#error CFLAG should be set
+#endif
+
+#ifndef CCFLAG
+#error CCFLAG should be set
+#endif
diff --git a/third_party/python/gyp/test/mac/cflags/mfile.m b/third_party/python/gyp/test/mac/cflags/mfile.m
new file mode 100644
index 0000000000..0af9d0af5d
--- /dev/null
+++ b/third_party/python/gyp/test/mac/cflags/mfile.m
@@ -0,0 +1,7 @@
+#ifndef CFLAG
+#error CFLAG should be set
+#endif
+
+#ifdef CCFLAG
+#error CCFLAG should not be set
+#endif
diff --git a/third_party/python/gyp/test/mac/cflags/mmfile.mm b/third_party/python/gyp/test/mac/cflags/mmfile.mm
new file mode 100644
index 0000000000..1a54d18eec
--- /dev/null
+++ b/third_party/python/gyp/test/mac/cflags/mmfile.mm
@@ -0,0 +1,7 @@
+#ifdef CFLAG
+#error CFLAG should not be set
+#endif
+
+#ifndef CCFLAG
+#error CCFLAG should be set
+#endif
diff --git a/third_party/python/gyp/test/mac/cflags/mmfile_withcflags.mm b/third_party/python/gyp/test/mac/cflags/mmfile_withcflags.mm
new file mode 100644
index 0000000000..de078a0641
--- /dev/null
+++ b/third_party/python/gyp/test/mac/cflags/mmfile_withcflags.mm
@@ -0,0 +1,7 @@
+#ifndef CFLAG
+#error CFLAG should be set
+#endif
+
+#ifndef CCFLAG
+#error CCFLAG should be set
+#endif
diff --git a/third_party/python/gyp/test/mac/cflags/test.gyp b/third_party/python/gyp/test/mac/cflags/test.gyp
new file mode 100644
index 0000000000..d330a548f6
--- /dev/null
+++ b/third_party/python/gyp/test/mac/cflags/test.gyp
@@ -0,0 +1,132 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'mytarget',
+ 'type': 'shared_library',
+ 'sources': [
+ 'cfile.c',
+ 'mfile.m',
+ 'ccfile.cc',
+ 'cppfile.cpp',
+ 'cxxfile.cxx',
+ 'mmfile.mm',
+ ],
+ 'xcode_settings': {
+ # Normally, defines would go in 'defines' instead. This is just for
+ # testing.
+ 'OTHER_CFLAGS': [
+ '-DCFLAG',
+ ],
+ 'OTHER_CPLUSPLUSFLAGS': [
+ '-DCCFLAG',
+ ],
+ 'GCC_C_LANGUAGE_STANDARD': 'c99',
+ },
+ },
+ {
+ 'target_name': 'mytarget_reuse_cflags',
+ 'type': 'shared_library',
+ 'sources': [
+ 'cfile.c',
+ 'mfile.m',
+ 'ccfile_withcflags.cc',
+ 'cppfile_withcflags.cpp',
+ 'cxxfile_withcflags.cxx',
+ 'mmfile_withcflags.mm',
+ ],
+ 'xcode_settings': {
+ 'OTHER_CFLAGS': [
+ '-DCFLAG',
+ ],
+ 'OTHER_CPLUSPLUSFLAGS': [
+ '$OTHER_CFLAGS',
+ '-DCCFLAG',
+ ],
+ # This is a C-only flag, to check these don't get added to C++ files.
+ 'GCC_C_LANGUAGE_STANDARD': 'c99',
+ },
+ },
+ {
+ 'target_name': 'mytarget_inherit_cflags',
+ 'type': 'shared_library',
+ 'sources': [
+ 'cfile.c',
+ 'mfile.m',
+ 'ccfile_withcflags.cc',
+ 'cppfile_withcflags.cpp',
+ 'cxxfile_withcflags.cxx',
+ 'mmfile_withcflags.mm',
+ ],
+ 'xcode_settings': {
+ 'OTHER_CFLAGS': [
+ '-DCFLAG',
+ ],
+ 'OTHER_CPLUSPLUSFLAGS': [
+ '$inherited',
+ '-DCCFLAG',
+ ],
+ 'GCC_C_LANGUAGE_STANDARD': 'c99',
+ },
+ },
+ {
+ 'target_name': 'mytarget_inherit_cflags_parens',
+ 'type': 'shared_library',
+ 'sources': [
+ 'cfile.c',
+ 'mfile.m',
+ 'ccfile_withcflags.cc',
+ 'cppfile_withcflags.cpp',
+ 'cxxfile_withcflags.cxx',
+ 'mmfile_withcflags.mm',
+ ],
+ 'xcode_settings': {
+ 'OTHER_CFLAGS': [
+ '-DCFLAG',
+ ],
+ 'OTHER_CPLUSPLUSFLAGS': [
+ '$(inherited)',
+ '-DCCFLAG',
+ ],
+ 'GCC_C_LANGUAGE_STANDARD': 'c99',
+ },
+ },
+ {
+ 'target_name': 'mytarget_inherit_cflags_braces',
+ 'type': 'shared_library',
+ 'sources': [
+ 'cfile.c',
+ 'mfile.m',
+ 'ccfile_withcflags.cc',
+ 'cppfile_withcflags.cpp',
+ 'cxxfile_withcflags.cxx',
+ 'mmfile_withcflags.mm',
+ ],
+ 'xcode_settings': {
+ 'OTHER_CFLAGS': [
+ '-DCFLAG',
+ ],
+ 'OTHER_CPLUSPLUSFLAGS': [
+ '${inherited}',
+ '-DCCFLAG',
+ ],
+ 'GCC_C_LANGUAGE_STANDARD': 'c99',
+ },
+ },
+ {
+ 'target_name': 'ansi_standard',
+ 'type': 'shared_library',
+ 'sources': [
+ 'cfile.c',
+ ],
+ 'xcode_settings': {
+ 'OTHER_CFLAGS': [
+ '-DCFLAG',
+ ],
+ 'GCC_C_LANGUAGE_STANDARD': 'ansi',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/clang-cxx-language-standard/c++11.cc b/third_party/python/gyp/test/mac/clang-cxx-language-standard/c++11.cc
new file mode 100644
index 0000000000..756dc1c7e6
--- /dev/null
+++ b/third_party/python/gyp/test/mac/clang-cxx-language-standard/c++11.cc
@@ -0,0 +1,8 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+static_assert(__cplusplus == 201103L, "wrong c++ standard version");
+
+int main() { return 0; }
+
diff --git a/third_party/python/gyp/test/mac/clang-cxx-language-standard/c++98.cc b/third_party/python/gyp/test/mac/clang-cxx-language-standard/c++98.cc
new file mode 100644
index 0000000000..a6a00c70f4
--- /dev/null
+++ b/third_party/python/gyp/test/mac/clang-cxx-language-standard/c++98.cc
@@ -0,0 +1,24 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if __cplusplus != 199711L
+#error wrong c++ standard version
+#endif
+
+enum cxx11_keywords {
+ alignas,
+ alignof,
+ char16_t,
+ char32_t,
+ constexpr,
+ decltype,
+ noexcept,
+ nullptr,
+ override,
+ static_assert,
+ thread_local,
+};
+
+int main() { return 0; }
+
diff --git a/third_party/python/gyp/test/mac/clang-cxx-language-standard/clang-cxx-language-standard.gyp b/third_party/python/gyp/test/mac/clang-cxx-language-standard/clang-cxx-language-standard.gyp
new file mode 100644
index 0000000000..eb60bbd0e8
--- /dev/null
+++ b/third_party/python/gyp/test/mac/clang-cxx-language-standard/clang-cxx-language-standard.gyp
@@ -0,0 +1,30 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'make_global_settings': [
+ ['CC', '/usr/bin/clang'],
+ ['CXX', '/usr/bin/clang++'],
+ ],
+ 'targets': [
+ {
+ 'target_name': 'c++98',
+ 'type': 'executable',
+ 'sources': [ 'c++98.cc', ],
+ 'xcode_settings': {
+ 'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
+ 'CLANG_CXX_LANGUAGE_STANDARD': 'c++98',
+ },
+ },
+ {
+ 'target_name': 'c++11',
+ 'type': 'executable',
+ 'sources': [ 'c++11.cc', ],
+ 'xcode_settings': {
+ 'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
+ 'CLANG_CXX_LANGUAGE_STANDARD': 'c++0x',
+ },
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/mac/clang-cxx-library/clang-cxx-library.gyp b/third_party/python/gyp/test/mac/clang-cxx-library/clang-cxx-library.gyp
new file mode 100644
index 0000000000..67006e50a0
--- /dev/null
+++ b/third_party/python/gyp/test/mac/clang-cxx-library/clang-cxx-library.gyp
@@ -0,0 +1,32 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'make_global_settings': [
+ ['CC', '/usr/bin/clang'],
+ ['CXX', '/usr/bin/clang++'],
+ ],
+ 'targets': [
+ {
+ 'target_name': 'libc++',
+ 'type': 'executable',
+ 'sources': [ 'libc++.cc', ],
+ 'xcode_settings': {
+ 'CC': 'clang',
+ # libc++ requires OS X 10.7+.
+ 'MACOSX_DEPLOYMENT_TARGET': '10.7',
+ 'CLANG_CXX_LIBRARY': 'libc++',
+ },
+ },
+ {
+ 'target_name': 'libstdc++',
+ 'type': 'executable',
+ 'sources': [ 'libstdc++.cc', ],
+ 'xcode_settings': {
+ 'CC': 'clang',
+ 'CLANG_CXX_LIBRARY': 'libstdc++',
+ },
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/mac/clang-cxx-library/libc++.cc b/third_party/python/gyp/test/mac/clang-cxx-library/libc++.cc
new file mode 100644
index 0000000000..b8d6e6b3e2
--- /dev/null
+++ b/third_party/python/gyp/test/mac/clang-cxx-library/libc++.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+#ifndef _LIBCPP_VERSION
+#error expected std library: libc++
+#endif
+
+int main() { std::string x; return x.size(); }
+
diff --git a/third_party/python/gyp/test/mac/clang-cxx-library/libstdc++.cc b/third_party/python/gyp/test/mac/clang-cxx-library/libstdc++.cc
new file mode 100644
index 0000000000..474dbf350d
--- /dev/null
+++ b/third_party/python/gyp/test/mac/clang-cxx-library/libstdc++.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string>
+#ifndef __GLIBCXX__
+#error expected std library: libstdc++
+#endif
+
+int main() { std::string x; return x.size(); }
+
diff --git a/third_party/python/gyp/test/mac/copies-with-xcode-envvars/copies-with-xcode-envvars.gyp b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/copies-with-xcode-envvars.gyp
new file mode 100644
index 0000000000..c1b1241fb7
--- /dev/null
+++ b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/copies-with-xcode-envvars.gyp
@@ -0,0 +1,87 @@
+# Copyright (c) 2016 Mark Callow. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+#
+# For testing use of the UI settings & environment variables
+# available in Xcode's PBXCopyFilesBuildPhase.
+{
+'targets': [
+ {
+ 'target_name': 'copies-with-xcode-envvars',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [ 'empty.c' ],
+ 'conditions': [
+ ['OS == "ios" or OS == "mac"', {
+ 'copies': [{
+ 'destination': '$(BUILT_PRODUCTS_DIR)',
+ 'files': [
+ 'file0',
+ ],
+ }, {
+ 'destination': '$(BUILT_PRODUCTS_DIR)/$(WRAPPER_NAME)',
+ 'files': [
+ 'file1',
+ ],
+ }, {
+ 'destination': '<(PRODUCT_DIR)/$(EXECUTABLE_FOLDER_PATH)',
+ 'files': [
+ 'file2',
+ ],
+ }, {
+ 'destination': '<(PRODUCT_DIR)/$(UNLOCALIZED_RESOURCES_FOLDER_PATH)',
+ 'files': [
+ 'file3',
+ ],
+ }, {
+ 'destination': '<(PRODUCT_DIR)/$(UNLOCALIZED_RESOURCES_FOLDER_PATH)/testimages',
+ 'files': [
+ 'file4',
+ ],
+ }, {
+ 'destination': '$(BUILT_PRODUCTS_DIR)/$(JAVA_FOLDER_PATH)',
+ 'files': [
+ 'file5',
+ ],
+ }, {
+ 'destination': '<(PRODUCT_DIR)/$(FRAMEWORKS_FOLDER_PATH)',
+ 'files': [
+ 'file6',
+ ],
+ }, {
+ # NOTE: This is not an Xcode macro name but
+ # xcodeproj_file.py recognizes it and sends
+ # the output to the same place as
+ # $(FRAMEWORKS_FOLDER_PATH). xcode_emulation.py
+ # sets its value to an absolute path.
+ 'destination': '$(BUILT_FRAMEWORKS_DIR)',
+ 'files': [
+ 'file7',
+ ],
+ }, {
+ 'destination': '<(PRODUCT_DIR)/$(SHARED_FRAMEWORKS_FOLDER_PATH)',
+ 'files': [
+ 'file8',
+ ],
+ }, {
+ 'destination': '<(PRODUCT_DIR)/$(SHARED_SUPPORT_FOLDER_PATH)',
+ 'files': [
+ 'file9',
+ ],
+ }, {
+ 'destination': '<(PRODUCT_DIR)/$(PLUGINS_FOLDER_PATH)',
+ 'files': [
+ 'file10',
+ ],
+ }, {
+ 'destination': '<(PRODUCT_DIR)/$(XPCSERVICES_FOLDER_PATH)',
+ 'files': [
+ 'file11',
+ ],
+ }], # copies
+ }], # OS == "ios" or OS == "mac"
+ ], # conditions
+ }], # targets
+}
+
+# vim:ai:ts=4:sts=4:sw=2:expandtab:textwidth=70
diff --git a/third_party/python/gyp/test/mac/copies-with-xcode-envvars/empty.c b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/empty.c
new file mode 100644
index 0000000000..237c8ce181
--- /dev/null
+++ b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/empty.c
@@ -0,0 +1 @@
+int main() {}
diff --git a/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file0 b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file0
new file mode 100644
index 0000000000..117889361f
--- /dev/null
+++ b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file0
@@ -0,0 +1 @@
+file0 contents
diff --git a/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file1 b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file1
new file mode 100644
index 0000000000..84d55c5759
--- /dev/null
+++ b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file1
@@ -0,0 +1 @@
+file1 contents
diff --git a/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file10 b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file10
new file mode 100644
index 0000000000..372e992ef9
--- /dev/null
+++ b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file10
@@ -0,0 +1 @@
+file10 contents
diff --git a/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file11 b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file11
new file mode 100644
index 0000000000..923e760e1f
--- /dev/null
+++ b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file11
@@ -0,0 +1 @@
+file11 contents
diff --git a/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file2 b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file2
new file mode 100644
index 0000000000..af1b8ae35d
--- /dev/null
+++ b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file2
@@ -0,0 +1 @@
+file2 contents
diff --git a/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file3 b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file3
new file mode 100644
index 0000000000..43f16f3522
--- /dev/null
+++ b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file3
@@ -0,0 +1 @@
+file3 contents
diff --git a/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file4 b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file4
new file mode 100644
index 0000000000..5f7270a084
--- /dev/null
+++ b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file4
@@ -0,0 +1 @@
+file4 contents
diff --git a/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file5 b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file5
new file mode 100644
index 0000000000..41f47186bd
--- /dev/null
+++ b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file5
@@ -0,0 +1 @@
+file5 contents
diff --git a/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file6 b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file6
new file mode 100644
index 0000000000..f5d5757348
--- /dev/null
+++ b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file6
@@ -0,0 +1 @@
+file6 contents
diff --git a/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file7 b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file7
new file mode 100644
index 0000000000..90dbe6e9e1
--- /dev/null
+++ b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file7
@@ -0,0 +1 @@
+file7 contents
diff --git a/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file8 b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file8
new file mode 100644
index 0000000000..9eb613fabb
--- /dev/null
+++ b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file8
@@ -0,0 +1 @@
+file8 contents
diff --git a/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file9 b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file9
new file mode 100644
index 0000000000..e37ac72ada
--- /dev/null
+++ b/third_party/python/gyp/test/mac/copies-with-xcode-envvars/file9
@@ -0,0 +1 @@
+file9 contents
diff --git a/third_party/python/gyp/test/mac/copy-dylib/empty.c b/third_party/python/gyp/test/mac/copy-dylib/empty.c
new file mode 100644
index 0000000000..237c8ce181
--- /dev/null
+++ b/third_party/python/gyp/test/mac/copy-dylib/empty.c
@@ -0,0 +1 @@
+int main() {}
diff --git a/third_party/python/gyp/test/mac/copy-dylib/test.gyp b/third_party/python/gyp/test/mac/copy-dylib/test.gyp
new file mode 100644
index 0000000000..4210c51463
--- /dev/null
+++ b/third_party/python/gyp/test/mac/copy-dylib/test.gyp
@@ -0,0 +1,31 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'my_dylib',
+ 'type': 'shared_library',
+ 'sources': [ 'empty.c', ],
+ },
+ {
+ 'target_name': 'test_app',
+ 'product_name': 'Test App',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'dependencies': [ 'my_dylib', ],
+ 'sources': [
+ 'empty.c',
+ ],
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/Test App.app/Contents/Resources',
+ 'files': [
+ '<(PRODUCT_DIR)/libmy_dylib.dylib',
+ ],
+ },
+ ],
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/mac/debuginfo/file.c b/third_party/python/gyp/test/mac/debuginfo/file.c
new file mode 100644
index 0000000000..9cddaf1b0b
--- /dev/null
+++ b/third_party/python/gyp/test/mac/debuginfo/file.c
@@ -0,0 +1,6 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+void f() {}
+int main() {}
diff --git a/third_party/python/gyp/test/mac/debuginfo/test.gyp b/third_party/python/gyp/test/mac/debuginfo/test.gyp
new file mode 100644
index 0000000000..3faf6b5c76
--- /dev/null
+++ b/third_party/python/gyp/test/mac/debuginfo/test.gyp
@@ -0,0 +1,82 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'nonbundle_static_library',
+ 'type': 'static_library',
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'DEBUG_INFORMATION_FORMAT': 'dwarf-with-dsym',
+ 'DEPLOYMENT_POSTPROCESSING': 'YES',
+ 'STRIP_INSTALLED_PRODUCT': 'YES',
+ },
+ },
+ {
+ 'target_name': 'nonbundle_shared_library',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'DEBUG_INFORMATION_FORMAT': 'dwarf-with-dsym',
+ 'DEPLOYMENT_POSTPROCESSING': 'YES',
+ 'STRIP_INSTALLED_PRODUCT': 'YES',
+ },
+ },
+ {
+ 'target_name': 'nonbundle_loadable_module',
+ 'type': 'loadable_module',
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'DEBUG_INFORMATION_FORMAT': 'dwarf-with-dsym',
+ 'DEPLOYMENT_POSTPROCESSING': 'YES',
+ 'STRIP_INSTALLED_PRODUCT': 'YES',
+ },
+ },
+ {
+ 'target_name': 'nonbundle_executable',
+ 'type': 'executable',
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'DEBUG_INFORMATION_FORMAT': 'dwarf-with-dsym',
+ 'DEPLOYMENT_POSTPROCESSING': 'YES',
+ 'STRIP_INSTALLED_PRODUCT': 'YES',
+ },
+ },
+
+ {
+ 'target_name': 'bundle_shared_library',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'DEBUG_INFORMATION_FORMAT': 'dwarf-with-dsym',
+ 'DEPLOYMENT_POSTPROCESSING': 'YES',
+ 'STRIP_INSTALLED_PRODUCT': 'YES',
+ },
+ },
+ {
+ 'target_name': 'bundle_loadable_module',
+ 'type': 'loadable_module',
+ 'mac_bundle': 1,
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'DEBUG_INFORMATION_FORMAT': 'dwarf-with-dsym',
+ 'DEPLOYMENT_POSTPROCESSING': 'YES',
+ 'STRIP_INSTALLED_PRODUCT': 'YES',
+ },
+ },
+ {
+ 'target_name': 'my_app',
+ 'product_name': 'My App',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'DEBUG_INFORMATION_FORMAT': 'dwarf-with-dsym',
+ 'DEPLOYMENT_POSTPROCESSING': 'YES',
+ 'STRIP_INSTALLED_PRODUCT': 'YES',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/depend-on-bundle/English.lproj/InfoPlist.strings b/third_party/python/gyp/test/mac/depend-on-bundle/English.lproj/InfoPlist.strings
new file mode 100644
index 0000000000..b92732c79e
--- /dev/null
+++ b/third_party/python/gyp/test/mac/depend-on-bundle/English.lproj/InfoPlist.strings
@@ -0,0 +1 @@
+/* Localized versions of Info.plist keys */
diff --git a/third_party/python/gyp/test/mac/depend-on-bundle/Info.plist b/third_party/python/gyp/test/mac/depend-on-bundle/Info.plist
new file mode 100644
index 0000000000..5e05a5190c
--- /dev/null
+++ b/third_party/python/gyp/test/mac/depend-on-bundle/Info.plist
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIconFile</key>
+ <string></string>
+ <key>CFBundleIdentifier</key>
+ <string>com.yourcompany.${PRODUCT_NAME}</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>FMWK</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+ <key>NSPrincipalClass</key>
+ <string></string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/mac/depend-on-bundle/bundle.c b/third_party/python/gyp/test/mac/depend-on-bundle/bundle.c
new file mode 100644
index 0000000000..d64ff8ca23
--- /dev/null
+++ b/third_party/python/gyp/test/mac/depend-on-bundle/bundle.c
@@ -0,0 +1 @@
+int f() { return 42; }
diff --git a/third_party/python/gyp/test/mac/depend-on-bundle/executable.c b/third_party/python/gyp/test/mac/depend-on-bundle/executable.c
new file mode 100644
index 0000000000..931bce637e
--- /dev/null
+++ b/third_party/python/gyp/test/mac/depend-on-bundle/executable.c
@@ -0,0 +1,4 @@
+int f();
+int main() {
+ return f();
+}
diff --git a/third_party/python/gyp/test/mac/depend-on-bundle/test.gyp b/third_party/python/gyp/test/mac/depend-on-bundle/test.gyp
new file mode 100644
index 0000000000..e00b105415
--- /dev/null
+++ b/third_party/python/gyp/test/mac/depend-on-bundle/test.gyp
@@ -0,0 +1,28 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'my_bundle',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [ 'bundle.c' ],
+ 'mac_bundle_resources': [
+ 'English.lproj/InfoPlist.strings',
+ ],
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'Info.plist',
+ }
+ },
+ {
+ 'target_name': 'dependent_on_bundle',
+ 'type': 'executable',
+ 'sources': [ 'executable.c' ],
+ 'dependencies': [
+ 'my_bundle',
+ ],
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/mac/deployment-target/check-version-min.c b/third_party/python/gyp/test/mac/deployment-target/check-version-min.c
new file mode 100644
index 0000000000..761c529085
--- /dev/null
+++ b/third_party/python/gyp/test/mac/deployment-target/check-version-min.c
@@ -0,0 +1,33 @@
+/* Copyright (c) 2013 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#include <Availability.h>
+
+/* GYPTEST_MAC_VERSION_MIN: should be set to the corresponding value of
+ * xcode setting 'MACOSX_DEPLOYMENT_TARGET', otherwise both should be
+ * left undefined.
+ *
+ * GYPTEST_IOS_VERSION_MIN: should be set to the corresponding value of
+ * xcode setting 'IPHONEOS_DEPLOYMENT_TARGET', otherwise both should be
+ * left undefined.
+ */
+
+#if defined(GYPTEST_MAC_VERSION_MIN)
+# if GYPTEST_MAC_VERSION_MIN != __MAC_OS_X_VERSION_MIN_REQUIRED
+# error __MAC_OS_X_VERSION_MIN_REQUIRED has wrong value
+# endif
+#elif defined(__MAC_OS_X_VERSION_MIN_REQUIRED)
+# error __MAC_OS_X_VERSION_MIN_REQUIRED should be undefined
+#endif
+
+#if defined(GYPTEST_IOS_VERSION_MIN)
+# if GYPTEST_IOS_VERSION_MIN != __IPHONE_OS_VERSION_MIN_REQUIRED
+# error __IPHONE_OS_VERSION_MIN_REQUIRED has wrong value
+# endif
+#elif defined(__IPHONE_OS_VERSION_MIN_REQUIRED)
+# error __IPHONE_OS_VERSION_MIN_REQUIRED should be undefined
+#endif
+
+int main() { return 0; }
+
diff --git a/third_party/python/gyp/test/mac/deployment-target/deployment-target.gyp b/third_party/python/gyp/test/mac/deployment-target/deployment-target.gyp
new file mode 100644
index 0000000000..47e0565c0c
--- /dev/null
+++ b/third_party/python/gyp/test/mac/deployment-target/deployment-target.gyp
@@ -0,0 +1,28 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'macosx-version-min-10.5',
+ 'type': 'executable',
+ 'sources': [ 'check-version-min.c', ],
+ 'defines': [ 'GYPTEST_MAC_VERSION_MIN=1050', ],
+ 'xcode_settings': {
+ 'SDKROOT': 'macosx',
+ 'MACOSX_DEPLOYMENT_TARGET': '10.5',
+ },
+ },
+ {
+ 'target_name': 'macosx-version-min-10.6',
+ 'type': 'executable',
+ 'sources': [ 'check-version-min.c', ],
+ 'defines': [ 'GYPTEST_MAC_VERSION_MIN=1060', ],
+ 'xcode_settings': {
+ 'SDKROOT': 'macosx',
+ 'MACOSX_DEPLOYMENT_TARGET': '10.6',
+ },
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/mac/framework-dirs/calculate.c b/third_party/python/gyp/test/mac/framework-dirs/calculate.c
new file mode 100644
index 0000000000..7dc9d2d8b4
--- /dev/null
+++ b/third_party/python/gyp/test/mac/framework-dirs/calculate.c
@@ -0,0 +1,15 @@
+/* Copyright (c) 2012 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+int CalculatePerformExpression(char* expr,
+ int significantDigits,
+ int flags,
+ char* answer);
+
+int main() {
+ char buffer[1024];
+ return CalculatePerformExpression("42", 1, 0, buffer);
+}
+
diff --git a/third_party/python/gyp/test/mac/framework-dirs/framework-dirs.gyp b/third_party/python/gyp/test/mac/framework-dirs/framework-dirs.gyp
new file mode 100644
index 0000000000..bf1cbde4de
--- /dev/null
+++ b/third_party/python/gyp/test/mac/framework-dirs/framework-dirs.gyp
@@ -0,0 +1,21 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'calculate',
+ 'type': 'executable',
+ 'sources': [
+ 'calculate.c',
+ ],
+ 'libraries': [
+ '/System/Library/PrivateFrameworks/Calculate.framework',
+ ],
+ 'mac_framework_dirs': [
+ '/System/Library/PrivateFrameworks',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/framework-headers/myframework.h b/third_party/python/gyp/test/mac/framework-headers/myframework.h
new file mode 100644
index 0000000000..961fc701bc
--- /dev/null
+++ b/third_party/python/gyp/test/mac/framework-headers/myframework.h
@@ -0,0 +1,8 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <Foundation/Foundation.h>
+
+@interface TestObject : NSObject
+@end
diff --git a/third_party/python/gyp/test/mac/framework-headers/myframework.m b/third_party/python/gyp/test/mac/framework-headers/myframework.m
new file mode 100644
index 0000000000..13d53a37ab
--- /dev/null
+++ b/third_party/python/gyp/test/mac/framework-headers/myframework.m
@@ -0,0 +1,8 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "myframework.h"
+
+@implementation TestObject
+@end
diff --git a/third_party/python/gyp/test/mac/framework-headers/test.gyp b/third_party/python/gyp/test/mac/framework-headers/test.gyp
new file mode 100644
index 0000000000..70ed00715c
--- /dev/null
+++ b/third_party/python/gyp/test/mac/framework-headers/test.gyp
@@ -0,0 +1,44 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'test_framework_headers_framework',
+ 'product_name': 'TestFramework',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'myframework.h',
+ 'myframework.m',
+ ],
+ 'mac_framework_headers': [
+ 'myframework.h',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ ],
+ },
+ },{
+ 'target_name': 'test_framework_headers_static',
+ 'product_name': 'TestLibrary',
+ 'type': 'static_library',
+ 'xcode_settings': {
+ 'PUBLIC_HEADERS_FOLDER_PATH': 'include',
+ },
+ 'sources': [
+ 'myframework.h',
+ 'myframework.m',
+ ],
+ 'mac_framework_headers': [
+ 'myframework.h',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ ],
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/framework/TestFramework/English.lproj/InfoPlist.strings b/third_party/python/gyp/test/mac/framework/TestFramework/English.lproj/InfoPlist.strings
new file mode 100644
index 0000000000..88f65cf6ea
--- /dev/null
+++ b/third_party/python/gyp/test/mac/framework/TestFramework/English.lproj/InfoPlist.strings
@@ -0,0 +1,2 @@
+/* Localized versions of Info.plist keys */
+
diff --git a/third_party/python/gyp/test/mac/framework/TestFramework/Info.plist b/third_party/python/gyp/test/mac/framework/TestFramework/Info.plist
new file mode 100644
index 0000000000..a791b3ee48
--- /dev/null
+++ b/third_party/python/gyp/test/mac/framework/TestFramework/Info.plist
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIconFile</key>
+ <string></string>
+ <key>CFBundleIdentifier</key>
+ <string>com.yourcompany.${PRODUCT_NAME:identifier}</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>FMWK</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+ <key>NSPrincipalClass</key>
+ <string></string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/mac/framework/TestFramework/ObjCVector.h b/third_party/python/gyp/test/mac/framework/TestFramework/ObjCVector.h
new file mode 100644
index 0000000000..c2450960cd
--- /dev/null
+++ b/third_party/python/gyp/test/mac/framework/TestFramework/ObjCVector.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <Cocoa/Cocoa.h>
+
+#ifdef __cplusplus
+struct ObjCVectorImp;
+#else
+typedef struct _ObjCVectorImpT ObjCVectorImp;
+#endif
+
+@interface ObjCVector : NSObject {
+ @private
+ ObjCVectorImp* imp_;
+}
+
+- (id)init;
+
+- (void)addObject:(id)obj;
+- (void)addObject:(id)obj atIndex:(NSUInteger)index;
+
+- (void)removeObject:(id)obj;
+- (void)removeObjectAtIndex:(NSUInteger)index;
+
+- (id)objectAtIndex:(NSUInteger)index;
+
+@end
diff --git a/third_party/python/gyp/test/mac/framework/TestFramework/ObjCVector.mm b/third_party/python/gyp/test/mac/framework/TestFramework/ObjCVector.mm
new file mode 100644
index 0000000000..cbf431f28d
--- /dev/null
+++ b/third_party/python/gyp/test/mac/framework/TestFramework/ObjCVector.mm
@@ -0,0 +1,63 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "ObjCVectorInternal.h"
+#import "ObjCVector.h"
+
+#include <vector>
+
+@interface ObjCVector (Private)
+- (std::vector<id>::iterator)makeIterator:(NSUInteger)index;
+@end
+
+@implementation ObjCVector
+
+- (id)init {
+ if ((self = [super init])) {
+ imp_ = new ObjCVectorImp();
+ }
+ return self;
+}
+
+- (void)dealloc {
+ delete imp_;
+ [super dealloc];
+}
+
+- (void)addObject:(id)obj {
+ imp_->v.push_back([obj retain]);
+}
+
+- (void)addObject:(id)obj atIndex:(NSUInteger)index {
+ imp_->v.insert([self makeIterator:index], [obj retain]);
+}
+
+- (void)removeObject:(id)obj {
+ for (std::vector<id>::iterator it = imp_->v.begin();
+ it != imp_->v.end();
+ ++it) {
+ if ([*it isEqual:obj]) {
+ [*it autorelease];
+ imp_->v.erase(it);
+ return;
+ }
+ }
+}
+
+- (void)removeObjectAtIndex:(NSUInteger)index {
+ [imp_->v[index] autorelease];
+ imp_->v.erase([self makeIterator:index]);
+}
+
+- (id)objectAtIndex:(NSUInteger)index {
+ return imp_->v[index];
+}
+
+- (std::vector<id>::iterator)makeIterator:(NSUInteger)index {
+ std::vector<id>::iterator it = imp_->v.begin();
+ it += index;
+ return it;
+}
+
+@end
diff --git a/third_party/python/gyp/test/mac/framework/TestFramework/ObjCVectorInternal.h b/third_party/python/gyp/test/mac/framework/TestFramework/ObjCVectorInternal.h
new file mode 100644
index 0000000000..fb6c98258b
--- /dev/null
+++ b/third_party/python/gyp/test/mac/framework/TestFramework/ObjCVectorInternal.h
@@ -0,0 +1,9 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+struct ObjCVectorImp {
+ std::vector<id> v;
+};
diff --git a/third_party/python/gyp/test/mac/framework/TestFramework/TestFramework_Prefix.pch b/third_party/python/gyp/test/mac/framework/TestFramework/TestFramework_Prefix.pch
new file mode 100644
index 0000000000..394f41d957
--- /dev/null
+++ b/third_party/python/gyp/test/mac/framework/TestFramework/TestFramework_Prefix.pch
@@ -0,0 +1,7 @@
+//
+// Prefix header for all source files of the 'TestFramework' target in the 'TestFramework' project.
+//
+
+#ifdef __OBJC__
+ #import <Cocoa/Cocoa.h>
+#endif
diff --git a/third_party/python/gyp/test/mac/framework/empty.c b/third_party/python/gyp/test/mac/framework/empty.c
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/mac/framework/empty.c
diff --git a/third_party/python/gyp/test/mac/framework/framework.gyp b/third_party/python/gyp/test/mac/framework/framework.gyp
new file mode 100644
index 0000000000..52b4f37be9
--- /dev/null
+++ b/third_party/python/gyp/test/mac/framework/framework.gyp
@@ -0,0 +1,108 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'dep_framework',
+ 'product_name': 'Dependency Bundle',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [ 'empty.c', ],
+ },
+ {
+ 'target_name': 'test_framework',
+ 'product_name': 'Test Framework',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'dependencies': [ 'dep_framework', ],
+ 'sources': [
+ 'TestFramework/ObjCVector.h',
+ 'TestFramework/ObjCVectorInternal.h',
+ 'TestFramework/ObjCVector.mm',
+ ],
+ 'mac_bundle_resources': [
+ 'TestFramework/English.lproj/InfoPlist.strings',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Cocoa.framework',
+ ],
+ },
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'TestFramework/Info.plist',
+ 'GCC_DYNAMIC_NO_PIC': 'NO',
+ },
+ 'copies': [
+ # Test copying to a file that has envvars in its dest path.
+ # Needs to be in a mac_bundle target, else CONTENTS_FOLDER_PATH isn't
+ # set.
+ {
+ 'destination': '<(PRODUCT_DIR)/$(CONTENTS_FOLDER_PATH)/Libraries',
+ 'files': [
+ 'empty.c',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'copy_target',
+ 'type': 'none',
+ 'dependencies': [ 'test_framework', 'dep_framework', ],
+ 'copies': [
+ # Test copying directories with spaces in src and dest paths.
+ {
+ 'destination': '<(PRODUCT_DIR)/Test Framework.framework/foo',
+ 'files': [
+ '<(PRODUCT_DIR)/Dependency Bundle.framework',
+ ],
+ },
+ ],
+ 'actions': [
+ {
+ 'action_name': 'aektschn',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/touched_file'],
+ 'action': ['touch', '${BUILT_PRODUCTS_DIR}/action_file'],
+ },
+ ],
+ },
+ {
+ 'target_name': 'copy_embedded',
+ 'type': 'none',
+ 'dependencies': [ 'test_framework' ],
+ 'copies': [
+ # Test copying framework to FRAMEWORK directory.
+ {
+ 'destination': '$(BUILT_FRAMEWORKS_DIR)/Embedded',
+ 'files': [
+ '<(PRODUCT_DIR)/Test Framework.framework',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'copy_target_code_sign',
+ 'type': 'none',
+ 'dependencies': [ 'test_framework', 'dep_framework', ],
+ 'copies': [
+ # Test copying directories with spaces in src and dest paths.
+ {
+ 'destination': '<(PRODUCT_DIR)/Test Framework.framework/foo',
+ 'files': [
+ '<(PRODUCT_DIR)/Dependency Bundle.framework',
+ ],
+ 'xcode_code_sign': 1,
+ },
+ ],
+ 'actions': [
+ {
+ 'action_name': 'aektschn',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/touched_file'],
+ 'action': ['touch', '${BUILT_PRODUCTS_DIR}/action_file'],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/global-settings/src/dir1/dir1.gyp b/third_party/python/gyp/test/mac/global-settings/src/dir1/dir1.gyp
new file mode 100644
index 0000000000..153e34ddd6
--- /dev/null
+++ b/third_party/python/gyp/test/mac/global-settings/src/dir1/dir1.gyp
@@ -0,0 +1,11 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'dir1_target',
+ 'type': 'none',
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/global-settings/src/dir2/dir2.gyp b/third_party/python/gyp/test/mac/global-settings/src/dir2/dir2.gyp
new file mode 100644
index 0000000000..cda46c839b
--- /dev/null
+++ b/third_party/python/gyp/test/mac/global-settings/src/dir2/dir2.gyp
@@ -0,0 +1,22 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'dir2_target',
+ 'type': 'none',
+ 'dependencies': [
+ '../dir1/dir1.gyp:dir1_target',
+ ],
+ 'actions': [
+ {
+ 'inputs': [ ],
+ 'outputs': [ '<(PRODUCT_DIR)/file.txt' ],
+ 'action_name': 'Test action',
+ 'action': ['cp', 'file.txt', '${BUILT_PRODUCTS_DIR}/file.txt' ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/global-settings/src/dir2/file.txt b/third_party/python/gyp/test/mac/global-settings/src/dir2/file.txt
new file mode 100644
index 0000000000..58da2d8e9a
--- /dev/null
+++ b/third_party/python/gyp/test/mac/global-settings/src/dir2/file.txt
@@ -0,0 +1 @@
+File.
diff --git a/third_party/python/gyp/test/mac/gyptest-action-envvars.py b/third_party/python/gyp/test/mac/gyptest-action-envvars.py
new file mode 100644
index 0000000000..c84eeaa465
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-action-envvars.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that env vars work with actions, with relative directory paths.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ # The xcode-ninja generator handles gypfiles which are not at the
+ # project root incorrectly.
+ # cf. https://code.google.com/p/gyp/issues/detail?id=460
+ if test.format == 'xcode-ninja':
+ test.skip_test()
+
+ CHDIR = 'action-envvars'
+ test.run_gyp('action/action.gyp', chdir=CHDIR)
+ test.build('action/action.gyp', 'action', chdir=CHDIR, SYMROOT='../build')
+
+ result_file = test.built_file_path('result', chdir=CHDIR)
+ test.must_exist(result_file)
+ test.must_contain(result_file, 'Test output')
+
+ other_result_file = test.built_file_path('other_result', chdir=CHDIR)
+ test.must_exist(other_result_file)
+ test.must_contain(other_result_file, 'Other output')
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-app-assets-catalog.py b/third_party/python/gyp/test/mac/gyptest-app-assets-catalog.py
new file mode 100755
index 0000000000..7b1c0f67de
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-app-assets-catalog.py
@@ -0,0 +1,125 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that app bundles are built correctly.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+import TestMac
+
+import os
+import plistlib
+import subprocess
+import sys
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+def ExpectEq(expected, actual):
+ if expected != actual:
+ print('Expected "%s", got "%s"' % (expected, actual), file=sys.stderr)
+ test.fail_test()
+
+def ls(path):
+ '''Returns a list of all files in a directory, relative to the directory.'''
+ result = []
+ for dirpath, _, files in os.walk(path):
+ for f in files:
+ result.append(os.path.join(dirpath, f)[len(path) + 1:])
+ return result
+
+# Xcode supports for assets catalog was introduced in Xcode 6.0
+if sys.platform == 'darwin' and TestMac.Xcode.Version() >= '0600':
+ test_gyp_path = 'test-assets-catalog.gyp'
+ test_app_path = 'Test App Assets Catalog Gyp.app'
+
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+ test.run_gyp(test_gyp_path, chdir='app-bundle')
+ test.build(test_gyp_path, test.ALL, chdir='app-bundle')
+
+ # Binary
+ test.built_file_must_exist(
+ os.path.join(test_app_path, 'Contents/MacOS/Test App Assets Catalog Gyp'),
+ chdir='app-bundle')
+
+ # Info.plist
+ info_plist = test.built_file_path(
+ os.path.join(test_app_path, 'Contents/Info.plist'),
+ chdir='app-bundle')
+ test.must_exist(info_plist)
+ test.must_contain(
+ info_plist,
+ 'com.google.Test-App-Assets-Catalog-Gyp') # Variable expansion
+ test.must_not_contain(info_plist, '${MACOSX_DEPLOYMENT_TARGET}');
+
+ if test.format != 'make':
+ # TODO: Synthesized plist entries aren't hooked up in the make generator.
+ machine = subprocess.check_output(['sw_vers', '-buildVersion']).rstrip('\n')
+ plist = plistlib.readPlist(info_plist)
+ ExpectEq(machine, plist['BuildMachineOSBuild'])
+
+ expected = ''
+ version = TestMac.Xcode.SDKVersion()
+ expected = 'macosx' + version
+ ExpectEq(expected, plist['DTSDKName'])
+ sdkbuild = TestMac.Xcode.SDKBuild()
+ if not sdkbuild:
+ # Above command doesn't work in Xcode 4.2.
+ sdkbuild = plist['BuildMachineOSBuild']
+ ExpectEq(sdkbuild, plist['DTSDKBuild'])
+ ExpectEq(TestMac.Xcode.Version(), plist['DTXcode'])
+ ExpectEq(TestMac.Xcode.Build(), plist['DTXcodeBuild'])
+
+ # Resources
+ strings_files = ['InfoPlist.strings', 'utf-16be.strings', 'utf-16le.strings']
+ for f in strings_files:
+ strings = test.built_file_path(
+ os.path.join(test_app_path, 'Contents/Resources/English.lproj', f),
+ chdir='app-bundle')
+ test.must_exist(strings)
+ # Xcodes writes UTF-16LE with BOM.
+ contents = open(strings, 'rb').read()
+ if not contents.startswith('\xff\xfe' + '/* Localized'.encode('utf-16le')):
+ test.fail_test()
+
+ test.built_file_must_exist(
+ os.path.join(
+ test_app_path, 'Contents/Resources/English.lproj/MainMenu.nib'),
+ chdir='app-bundle')
+
+ # make does not supports .xcassets files
+ extra_content_files = []
+ if test.format != 'make':
+ extra_content_files = ['Contents/Resources/Assets.car']
+ for f in extra_content_files:
+ test.built_file_must_exist(
+ os.path.join(test_app_path, f),
+ chdir='app-bundle')
+
+ # Packaging
+ test.built_file_must_exist(
+ os.path.join(test_app_path, 'Contents/PkgInfo'),
+ chdir='app-bundle')
+ test.built_file_must_match(
+ os.path.join(test_app_path, 'Contents/PkgInfo'), 'APPLause',
+ chdir='app-bundle')
+
+ # Check that no other files get added to the bundle.
+ if set(ls(test.built_file_path(test_app_path, chdir='app-bundle'))) != \
+ set(['Contents/MacOS/Test App Assets Catalog Gyp',
+ 'Contents/Info.plist',
+ 'Contents/Resources/English.lproj/MainMenu.nib',
+ 'Contents/PkgInfo',
+ ] + extra_content_files +
+ [os.path.join('Contents/Resources/English.lproj', f)
+ for f in strings_files]):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-app-error.py b/third_party/python/gyp/test/mac/gyptest-app-error.py
new file mode 100755
index 0000000000..df0781d455
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-app-error.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that invalid strings files cause the build to fail.
+"""
+
+from __future__ import print_function
+
+import TestCmd
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ expected_error = 'Old-style plist parser: missing semicolon in dictionary'
+ saw_expected_error = [False] # Python2 has no "nonlocal" keyword.
+ def match(a, b):
+ if a == b:
+ return True
+ if not TestCmd.is_List(a):
+ a = a.split('\n')
+ if not TestCmd.is_List(b):
+ b = b.split('\n')
+ if expected_error in '\n'.join(a) + '\n'.join(b):
+ saw_expected_error[0] = True
+ return True
+ return False
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'], match=match)
+
+ test.run_gyp('test-error.gyp', chdir='app-bundle')
+
+ test.build('test-error.gyp', test.ALL, chdir='app-bundle')
+
+ # Ninja pipes stderr of subprocesses to stdout.
+ if test.format in ['ninja', 'xcode-ninja'] \
+ and expected_error in test.stdout():
+ saw_expected_error[0] = True
+
+ if saw_expected_error[0]:
+ test.pass_test()
+ else:
+ test.fail_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-app.py b/third_party/python/gyp/test/mac/gyptest-app.py
new file mode 100755
index 0000000000..16c9640373
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-app.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that app bundles are built correctly.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+import TestMac
+
+import os
+import plistlib
+import subprocess
+import sys
+
+
+if sys.platform in ('darwin', 'win32'):
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+
+def CheckFileXMLPropertyList(file):
+ output = subprocess.check_output(['file', file])
+ # The double space after XML is intentional.
+ if not 'XML document text' in output:
+ print('File: Expected XML document text, got %s' % output)
+ test.fail_test()
+
+def ExpectEq(expected, actual):
+ if expected != actual:
+ print('Expected "%s", got "%s"' % (expected, actual), file=sys.stderr)
+ test.fail_test()
+
+def ls(path):
+ '''Returns a list of all files in a directory, relative to the directory.'''
+ result = []
+ for dirpath, _, files in os.walk(path):
+ for f in files:
+ result.append(os.path.join(dirpath, f)[len(path) + 1:])
+ return result
+
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ test.run_gyp('test.gyp', chdir='app-bundle')
+
+ test.build('test.gyp', test.ALL, chdir='app-bundle')
+
+ # Binary
+ test.built_file_must_exist('Test App Gyp.app/Contents/MacOS/Test App Gyp',
+ chdir='app-bundle')
+
+ # Info.plist
+ info_plist = test.built_file_path('Test App Gyp.app/Contents/Info.plist',
+ chdir='app-bundle')
+ test.must_exist(info_plist)
+ test.must_contain(info_plist, 'com.google.Test-App-Gyp') # Variable expansion
+ test.must_not_contain(info_plist, '${MACOSX_DEPLOYMENT_TARGET}');
+ CheckFileXMLPropertyList(info_plist)
+
+ if test.format != 'make':
+ # TODO: Synthesized plist entries aren't hooked up in the make generator.
+ machine = subprocess.check_output(['sw_vers', '-buildVersion']).rstrip('\n')
+ plist = plistlib.readPlist(info_plist)
+ ExpectEq(machine, plist['BuildMachineOSBuild'])
+
+ # Prior to Xcode 5.0.0, SDKROOT (and thus DTSDKName) was only defined if
+ # set in the Xcode project file. Starting with that version, it is always
+ # defined.
+ expected = ''
+ if TestMac.Xcode.Version() >= '0500':
+ version = TestMac.Xcode.SDKVersion()
+ expected = 'macosx' + version
+ ExpectEq(expected, plist['DTSDKName'])
+ sdkbuild = TestMac.Xcode.SDKBuild()
+ if not sdkbuild:
+ # Above command doesn't work in Xcode 4.2.
+ sdkbuild = plist['BuildMachineOSBuild']
+ ExpectEq(sdkbuild, plist['DTSDKBuild'])
+ ExpectEq(TestMac.Xcode.Version(), plist['DTXcode'])
+ ExpectEq(TestMac.Xcode.Build(), plist['DTXcodeBuild'])
+
+ # Resources
+ strings_files = ['InfoPlist.strings', 'utf-16be.strings', 'utf-16le.strings']
+ for f in strings_files:
+ strings = test.built_file_path(
+ os.path.join('Test App Gyp.app/Contents/Resources/English.lproj', f),
+ chdir='app-bundle')
+ test.must_exist(strings)
+ # Xcodes writes UTF-16LE with BOM.
+ contents = open(strings, 'rb').read()
+ if not contents.startswith('\xff\xfe' + '/* Localized'.encode('utf-16le')):
+ test.fail_test()
+
+ test.built_file_must_exist(
+ 'Test App Gyp.app/Contents/Resources/English.lproj/MainMenu.nib',
+ chdir='app-bundle')
+
+ # Packaging
+ test.built_file_must_exist('Test App Gyp.app/Contents/PkgInfo',
+ chdir='app-bundle')
+ test.built_file_must_match('Test App Gyp.app/Contents/PkgInfo', 'APPLause',
+ chdir='app-bundle')
+
+ # Check that no other files get added to the bundle.
+ if set(ls(test.built_file_path('Test App Gyp.app', chdir='app-bundle'))) != \
+ set(['Contents/MacOS/Test App Gyp',
+ 'Contents/Info.plist',
+ 'Contents/Resources/English.lproj/MainMenu.nib',
+ 'Contents/PkgInfo',
+ ] +
+ [os.path.join('Contents/Resources/English.lproj', f)
+ for f in strings_files]):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-archs.py b/third_party/python/gyp/test/mac/gyptest-archs.py
new file mode 100644
index 0000000000..c56f20c4d6
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-archs.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Tests things related to ARCHS.
+"""
+
+import TestGyp
+import TestMac
+
+import re
+import subprocess
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ test.run_gyp('test-no-archs.gyp', chdir='archs')
+ test.build('test-no-archs.gyp', test.ALL, chdir='archs')
+ result_file = test.built_file_path('Test', chdir='archs')
+ test.must_exist(result_file)
+
+ if TestMac.Xcode.Version() >= '0500':
+ expected_type = ['x86_64']
+ else:
+ expected_type = ['i386']
+ TestMac.CheckFileType(test, result_file, expected_type)
+
+ test.run_gyp('test-valid-archs.gyp', chdir='archs')
+ test.build('test-valid-archs.gyp', test.ALL, chdir='archs')
+ result_file = test.built_file_path('Test', chdir='archs')
+ test.must_exist(result_file)
+ TestMac.CheckFileType(test, result_file, ['x86_64'])
+
+ test.run_gyp('test-archs-x86_64.gyp', chdir='archs')
+ test.build('test-archs-x86_64.gyp', test.ALL, chdir='archs')
+ result_file = test.built_file_path('Test64', chdir='archs')
+ test.must_exist(result_file)
+ TestMac.CheckFileType(test, result_file, ['x86_64'])
+
+ test.run_gyp('test-dependencies.gyp', chdir='archs')
+ test.build('test-dependencies.gyp', target=test.ALL, chdir='archs')
+ products = ['c_standalone', 'd_standalone']
+ for product in products:
+ result_file = test.built_file_path(
+ product, chdir='archs', type=test.STATIC_LIB)
+ test.must_exist(result_file)
+
+ if test.format != 'make':
+ # Build all targets except 'exe_32_64_no_sources' that does build
+ # but should not cause error when generating ninja files
+ targets = [
+ 'static_32_64', 'shared_32_64', 'shared_32_64_bundle',
+ 'module_32_64', 'module_32_64_bundle',
+ 'exe_32_64', 'exe_32_64_bundle', 'precompiled_prefix_header_mm_32_64',
+ ]
+
+ test.run_gyp('test-archs-multiarch.gyp', chdir='archs')
+
+ for target in targets:
+ test.build('test-archs-multiarch.gyp', target=target, chdir='archs')
+
+ result_file = test.built_file_path(
+ 'static_32_64', chdir='archs', type=test.STATIC_LIB)
+ test.must_exist(result_file)
+ TestMac.CheckFileType(test, result_file, ['i386', 'x86_64'])
+
+ result_file = test.built_file_path(
+ 'shared_32_64', chdir='archs', type=test.SHARED_LIB)
+ test.must_exist(result_file)
+ TestMac.CheckFileType(test, result_file, ['i386', 'x86_64'])
+
+ result_file = test.built_file_path('My Framework.framework/My Framework',
+ chdir='archs')
+ test.must_exist(result_file)
+ TestMac.CheckFileType(test, result_file, ['i386', 'x86_64'])
+ # Check that symbol "_x" made it into both versions of the binary:
+ if not all(['D _x' in subprocess.check_output(
+ ['nm', '-arch', arch, result_file]).decode('utf-8')
+ for arch in ['i386', 'x86_64']]):
+ # This can only flakily fail, due to process ordering issues. If this
+ # does fail flakily, then something's broken, it's not the test at fault.
+ test.fail_test()
+
+ result_file = test.built_file_path(
+ 'exe_32_64', chdir='archs', type=test.EXECUTABLE)
+ test.must_exist(result_file)
+ TestMac.CheckFileType(test, result_file, ['i386', 'x86_64'])
+
+ result_file = test.built_file_path('Test App.app/Contents/MacOS/Test App',
+ chdir='archs')
+ test.must_exist(result_file)
+ TestMac.CheckFileType(test, result_file, ['i386', 'x86_64'])
diff --git a/third_party/python/gyp/test/mac/gyptest-bundle-resources.py b/third_party/python/gyp/test/mac/gyptest-bundle-resources.py
new file mode 100644
index 0000000000..275cdbaa54
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-bundle-resources.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies things related to bundle resources.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import os
+import stat
+import sys
+
+if sys.platform in ('darwin'):
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+
+def check_attribs(path, expected_exec_bit):
+ out_path = test.built_file_path(
+ os.path.join('resource.app/Contents/Resources', path), chdir=CHDIR)
+
+ in_stat = os.stat(os.path.join(CHDIR, path))
+ out_stat = os.stat(out_path)
+ if in_stat.st_mtime == out_stat.st_mtime:
+ test.fail_test()
+ if out_stat.st_mode & stat.S_IXUSR != expected_exec_bit:
+ test.fail_test()
+
+
+if sys.platform == 'darwin':
+ # set |match| to ignore build stderr output.
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ CHDIR = 'bundle-resources'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+
+ test.build('test.gyp', test.ALL, chdir=CHDIR)
+
+ test.built_file_must_match('resource.app/Contents/Resources/secret.txt',
+ 'abc\n', chdir=CHDIR)
+ test.built_file_must_match('source_rule.app/Contents/Resources/secret.txt',
+ 'ABC\n', chdir=CHDIR)
+
+ test.built_file_must_match(
+ 'resource.app/Contents/Resources/executable-file.sh',
+ '#!/bin/bash\n'
+ '\n'
+ 'echo echo echo echo cho ho o o\n', chdir=CHDIR)
+
+ check_attribs('executable-file.sh', expected_exec_bit=stat.S_IXUSR)
+ check_attribs('secret.txt', expected_exec_bit=0)
+
+ # TODO(thakis): This currently fails with make.
+ if test.format != 'make':
+ test.built_file_must_match(
+ 'resource_rule.app/Contents/Resources/secret.txt', 'ABC\n', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-cflags.py b/third_party/python/gyp/test/mac/gyptest-cflags.py
new file mode 100644
index 0000000000..17afd15665
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-cflags.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that compile-time flags work.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+ CHDIR = 'cflags'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+
+ test.build('test.gyp', test.ALL, chdir=CHDIR)
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-clang-cxx-language-standard.py b/third_party/python/gyp/test/mac/gyptest-clang-cxx-language-standard.py
new file mode 100644
index 0000000000..75c6c74c97
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-clang-cxx-language-standard.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that CLANG_CXX_LANGUAGE_STANDARD works.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['make', 'ninja', 'xcode'])
+
+ test.run_gyp('clang-cxx-language-standard.gyp',
+ chdir='clang-cxx-language-standard')
+
+ test.build('clang-cxx-language-standard.gyp', test.ALL,
+ chdir='clang-cxx-language-standard')
+
+ test.pass_test()
+
diff --git a/third_party/python/gyp/test/mac/gyptest-clang-cxx-library.py b/third_party/python/gyp/test/mac/gyptest-clang-cxx-library.py
new file mode 100644
index 0000000000..177d6376ff
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-clang-cxx-library.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that CLANG_CXX_LIBRARY works.
+"""
+
+import TestGyp
+import TestMac
+
+import sys
+
+if sys.platform == 'darwin':
+ # Xcode 4.2 on OS X 10.6 doesn't install the libc++ headers, don't run this
+ # test there.
+ if TestMac.Xcode.Version() <= '0420':
+ sys.exit(0)
+
+ test = TestGyp.TestGyp(formats=['make', 'ninja', 'xcode'])
+
+ if test.format == 'make':
+ # This is failing because of a deprecation warning for libstdc++.
+ test.skip_test() # bug=533
+
+ test.run_gyp('clang-cxx-library.gyp', chdir='clang-cxx-library')
+ test.build('clang-cxx-library.gyp', test.ALL, chdir='clang-cxx-library')
+
+ test.pass_test()
+
diff --git a/third_party/python/gyp/test/mac/gyptest-copies-with-xcode-envvars.py b/third_party/python/gyp/test/mac/gyptest-copies-with-xcode-envvars.py
new file mode 100644
index 0000000000..80b0ecfa28
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-copies-with-xcode-envvars.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2016 Mark Callow. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that files are copied to the correct destinations when those
+destinations are specified using environment variables available in
+Xcode's PBXCopyFilesBuildPhase.
+"""
+
+import TestGyp
+
+import os
+import stat
+import sys
+
+
+test = TestGyp.TestGyp(formats=['make', 'ninja', 'xcode'])
+
+if sys.platform == 'darwin':
+ test.run_gyp('copies-with-xcode-envvars.gyp',
+ chdir='copies-with-xcode-envvars')
+
+ test.build('copies-with-xcode-envvars.gyp', chdir='copies-with-xcode-envvars')
+
+ wrapper_name = 'copies-with-xcode-envvars.app/'
+ contents_path = wrapper_name + 'Contents/'
+ out_path = test.built_file_path('file0', chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file0 contents\n')
+ out_path = test.built_file_path(wrapper_name + 'file1',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file1 contents\n')
+ out_path = test.built_file_path(contents_path + 'MacOS/file2',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file2 contents\n')
+ out_path = test.built_file_path(contents_path + 'Resources/file3',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file3 contents\n')
+ out_path = test.built_file_path(contents_path + 'Resources/testimages/file4',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file4 contents\n')
+ out_path = test.built_file_path(contents_path + 'Resources/Java/file5',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file5 contents\n')
+ out_path = test.built_file_path(contents_path + 'Frameworks/file6',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file6 contents\n')
+ out_path = test.built_file_path(contents_path + 'Frameworks/file7',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file7 contents\n')
+ out_path = test.built_file_path(contents_path + 'SharedFrameworks/file8',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file8 contents\n')
+ out_path = test.built_file_path(contents_path + 'SharedSupport/file9',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file9 contents\n')
+ out_path = test.built_file_path(contents_path + 'PlugIns/file10',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file10 contents\n')
+ out_path = test.built_file_path(contents_path + 'XPCServices/file11',
+ chdir='copies-with-xcode-envvars')
+ test.must_contain(out_path, 'file11 contents\n')
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-copies.py b/third_party/python/gyp/test/mac/gyptest-copies.py
new file mode 100755
index 0000000000..838c62dc37
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-copies.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that 'copies' with app bundles are handled correctly.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import os
+import sys
+import time
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ test.run_gyp('framework.gyp', chdir='framework')
+
+ test.build('framework.gyp', 'copy_target', chdir='framework')
+
+ # Check that the copy succeeded.
+ test.built_file_must_exist(
+ 'Test Framework.framework/foo/Dependency Bundle.framework',
+ chdir='framework')
+ test.built_file_must_exist(
+ 'Test Framework.framework/foo/Dependency Bundle.framework/Versions/A',
+ chdir='framework')
+ test.built_file_must_exist(
+ 'Test Framework.framework/Versions/A/Libraries/empty.c',
+ chdir='framework')
+
+ # Verify BUILT_FRAMEWORKS_DIR is set and working.
+ test.build('framework.gyp', 'copy_embedded', chdir='framework')
+
+ test.built_file_must_exist(
+ 'Embedded/Test Framework.framework', chdir='framework')
+
+ # Check that rebuilding the target a few times works.
+ dep_bundle = test.built_file_path('Dependency Bundle.framework',
+ chdir='framework')
+ mtime = os.path.getmtime(dep_bundle)
+ atime = os.path.getatime(dep_bundle)
+ for i in range(3):
+ os.utime(dep_bundle, (atime + i * 1000, mtime + i * 1000))
+ test.build('framework.gyp', 'copy_target', chdir='framework')
+
+
+ # Check that actions ran.
+ test.built_file_must_exist('action_file', chdir='framework')
+
+ # Test that a copy with the "Code Sign on Copy" flag on succeeds.
+ test.build('framework.gyp', 'copy_target_code_sign', chdir='framework')
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-copy-dylib.py b/third_party/python/gyp/test/mac/gyptest-copy-dylib.py
new file mode 100644
index 0000000000..253623d1c6
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-copy-dylib.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that dylibs can be copied into app bundles.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ test.run_gyp('test.gyp', chdir='copy-dylib')
+
+ test.build('test.gyp', 'test_app', chdir='copy-dylib')
+
+ test.built_file_must_exist(
+ 'Test App.app/Contents/Resources/libmy_dylib.dylib', chdir='copy-dylib')
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-debuginfo.py b/third_party/python/gyp/test/mac/gyptest-debuginfo.py
new file mode 100755
index 0000000000..a0e9438e2a
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-debuginfo.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Tests things related to debug information generation.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ test.run_gyp('test.gyp', chdir='debuginfo')
+
+ test.build('test.gyp', test.ALL, chdir='debuginfo')
+
+ test.built_file_must_exist('libnonbundle_shared_library.dylib.dSYM',
+ chdir='debuginfo')
+ test.built_file_must_exist('nonbundle_loadable_module.so.dSYM',
+ chdir='debuginfo')
+ test.built_file_must_exist('nonbundle_executable.dSYM',
+ chdir='debuginfo')
+
+ test.built_file_must_exist('bundle_shared_library.framework.dSYM',
+ chdir='debuginfo')
+ test.built_file_must_exist('bundle_loadable_module.bundle.dSYM',
+ chdir='debuginfo')
+ test.built_file_must_exist('My App.app.dSYM',
+ chdir='debuginfo')
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-depend-on-bundle.py b/third_party/python/gyp/test/mac/gyptest-depend-on-bundle.py
new file mode 100644
index 0000000000..b8b06d4cc0
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-depend-on-bundle.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that a dependency on a bundle causes the whole bundle to be built.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ test.run_gyp('test.gyp', chdir='depend-on-bundle')
+
+ test.build('test.gyp', 'dependent_on_bundle', chdir='depend-on-bundle')
+
+ # Binary itself.
+ test.built_file_must_exist('dependent_on_bundle', chdir='depend-on-bundle')
+
+ # Bundle dependency.
+ test.built_file_must_exist(
+ 'my_bundle.framework/Versions/A/my_bundle',
+ chdir='depend-on-bundle')
+ test.built_file_must_exist( # package_framework
+ 'my_bundle.framework/my_bundle',
+ chdir='depend-on-bundle')
+ test.built_file_must_exist( # plist
+ 'my_bundle.framework/Versions/A/Resources/Info.plist',
+ chdir='depend-on-bundle')
+ test.built_file_must_exist(
+ 'my_bundle.framework/Versions/A/Resources/English.lproj/' # Resources
+ 'InfoPlist.strings',
+ chdir='depend-on-bundle')
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-deployment-target.py b/third_party/python/gyp/test/mac/gyptest-deployment-target.py
new file mode 100644
index 0000000000..c7eabde6fe
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-deployment-target.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that MACOSX_DEPLOYMENT_TARGET works.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['make', 'ninja', 'xcode'])
+
+ if test.format == 'make':
+ # This is failing because of a deprecation warning for libstdc++.
+ test.skip_test() # bug=533
+
+ test.run_gyp('deployment-target.gyp', chdir='deployment-target')
+
+ test.build('deployment-target.gyp', test.ALL, chdir='deployment-target')
+
+ test.pass_test()
+
diff --git a/third_party/python/gyp/test/mac/gyptest-framework-dirs.py b/third_party/python/gyp/test/mac/gyptest-framework-dirs.py
new file mode 100644
index 0000000000..a1ae54c57f
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-framework-dirs.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verify that it is possible to build an object that depends on a
+PrivateFramework.
+"""
+
+import os
+import sys
+import TestGyp
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ CHDIR = 'framework-dirs'
+ test.run_gyp('framework-dirs.gyp', chdir=CHDIR)
+ test.build('framework-dirs.gyp', 'calculate', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-framework-headers.py b/third_party/python/gyp/test/mac/gyptest-framework-headers.py
new file mode 100644
index 0000000000..aa13a742cd
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-framework-headers.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that mac_framework_headers works properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ # TODO(thakis): Make this work with ninja, make. http://crbug.com/129013
+ test = TestGyp.TestGyp(formats=['xcode'])
+
+ CHDIR = 'framework-headers'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+
+ # Test that headers are installed for frameworks
+ test.build('test.gyp', 'test_framework_headers_framework', chdir=CHDIR)
+
+ test.built_file_must_exist(
+ 'TestFramework.framework/Versions/A/TestFramework', chdir=CHDIR)
+
+ test.built_file_must_exist(
+ 'TestFramework.framework/Versions/A/Headers/myframework.h', chdir=CHDIR)
+
+ # Test that headers are installed for static libraries.
+ test.build('test.gyp', 'test_framework_headers_static', chdir=CHDIR)
+
+ test.built_file_must_exist('libTestLibrary.a', chdir=CHDIR)
+
+ test.built_file_must_exist('include/myframework.h', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-framework.py b/third_party/python/gyp/test/mac/gyptest-framework.py
new file mode 100755
index 0000000000..faf05cf313
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-framework.py
@@ -0,0 +1,80 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that app bundles are built correctly.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import os
+import sys
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+
+def ls(path):
+ '''Returns a list of all files in a directory, relative to the directory.'''
+ result = []
+ for dirpath, _, files in os.walk(path):
+ for f in files:
+ result.append(os.path.join(dirpath, f)[len(path) + 1:])
+ return result
+
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ test.run_gyp('framework.gyp', chdir='framework')
+
+ test.build('framework.gyp', 'test_framework', chdir='framework')
+
+ # Binary
+ test.built_file_must_exist(
+ 'Test Framework.framework/Versions/A/Test Framework',
+ chdir='framework')
+
+ # Info.plist
+ info_plist = test.built_file_path(
+ 'Test Framework.framework/Versions/A/Resources/Info.plist',
+ chdir='framework')
+ test.must_exist(info_plist)
+ test.must_contain(info_plist, 'com.yourcompany.Test_Framework')
+
+ # Resources
+ test.built_file_must_exist(
+ 'Test Framework.framework/Versions/A/Resources/English.lproj/'
+ 'InfoPlist.strings',
+ chdir='framework')
+
+ # Symlinks created by packaging process
+ test.built_file_must_exist('Test Framework.framework/Versions/Current',
+ chdir='framework')
+ test.built_file_must_exist('Test Framework.framework/Resources',
+ chdir='framework')
+ test.built_file_must_exist('Test Framework.framework/Test Framework',
+ chdir='framework')
+ # PkgInfo.
+ test.built_file_must_not_exist(
+ 'Test Framework.framework/Versions/A/Resources/PkgInfo',
+ chdir='framework')
+
+ # Check that no other files get added to the bundle.
+ if set(ls(test.built_file_path('Test Framework.framework',
+ chdir='framework'))) != \
+ set(['Versions/A/Test Framework',
+ 'Versions/A/Resources/Info.plist',
+ 'Versions/A/Resources/English.lproj/InfoPlist.strings',
+ 'Test Framework',
+ 'Versions/A/Libraries/empty.c', # Written by a gyp action.
+ ]):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-global-settings.py b/third_party/python/gyp/test/mac/gyptest-global-settings.py
new file mode 100644
index 0000000000..f4ed16630e
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-global-settings.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that the global xcode_settings processing doesn't throw.
+Regression test for http://crbug.com/109163
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ # The xcode-ninja generator handles gypfiles which are not at the
+ # project root incorrectly.
+ # cf. https://code.google.com/p/gyp/issues/detail?id=460
+ if test.format == 'xcode-ninja':
+ test.skip_test()
+
+ test.run_gyp('src/dir2/dir2.gyp', chdir='global-settings', depth='src')
+ # run_gyp shouldn't throw.
+
+ # Check that BUILT_PRODUCTS_DIR was set correctly, too.
+ test.build('dir2/dir2.gyp', 'dir2_target', chdir='global-settings/src',
+ SYMROOT='../build')
+ test.built_file_must_exist('file.txt', chdir='global-settings/src')
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-identical-name.py b/third_party/python/gyp/test/mac/gyptest-identical-name.py
new file mode 100644
index 0000000000..0d358df921
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-identical-name.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies libraries (in identical-names) are properly handeled by xcode.
+
+The names for all libraries participating in this build are:
+libtestlib.a - identical-name/testlib
+libtestlib.a - identical-name/proxy/testlib
+libproxy.a - identical-name/proxy
+The first two libs produce a hash collision in Xcode when Gyp is executed,
+because they have the same name and would be copied to the same directory with
+Xcode default settings.
+For this scenario to work one needs to change the Xcode variables SYMROOT and
+CONFIGURATION_BUILD_DIR. Setting these to per-lib-unique directories, avoids
+copying the libs into the same directory.
+
+The test consists of two steps. The first one verifies that by setting both
+vars, there is no hash collision anymore during Gyp execution and that the libs
+can actually be be built. The second one verifies that there is still a hash
+collision if the vars are not set and thus the current behavior is preserved.
+"""
+
+import TestGyp
+
+import sys
+
+def IgnoreOutput(string, expected_string):
+ return True
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['xcode'])
+
+
+ test.run_gyp('test.gyp', chdir='identical-name')
+ test.build('test.gyp', test.ALL, chdir='identical-name')
+
+ test.run_gyp('test-should-fail.gyp', chdir='identical-name')
+ test.built_file_must_not_exist('test-should-fail.xcodeproj')
+
+ test.pass_test()
+
diff --git a/third_party/python/gyp/test/mac/gyptest-infoplist-process.py b/third_party/python/gyp/test/mac/gyptest-infoplist-process.py
new file mode 100755
index 0000000000..24260e1c34
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-infoplist-process.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies the Info.plist preprocessor functionality.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ CHDIR = 'infoplist-process'
+ INFO_PLIST_PATH = 'Test.app/Contents/Info.plist'
+
+ # First process both keys.
+ test.set_configuration('One')
+ test.run_gyp('test1.gyp', chdir=CHDIR)
+ test.build('test1.gyp', test.ALL, chdir=CHDIR)
+ info_plist = test.built_file_path(INFO_PLIST_PATH, chdir=CHDIR)
+ test.must_exist(info_plist)
+ test.must_contain(info_plist, 'Foo')
+ test.must_contain(info_plist, 'Bar')
+
+ # Then process a single key.
+ test.set_configuration('Two')
+ test.run_gyp('test2.gyp', chdir=CHDIR)
+ test.build('test2.gyp', chdir=CHDIR)
+ info_plist = test.built_file_path(INFO_PLIST_PATH, chdir=CHDIR)
+ test.must_exist(info_plist)
+ test.must_contain(info_plist, 'com.google.Test') # Normal expansion works.
+ test.must_contain(info_plist, 'Foo (Bar)')
+ test.must_contain(info_plist, 'PROCESSED_KEY2')
+
+ # Then turn off the processor.
+ test.set_configuration('Three')
+ test.run_gyp('test3.gyp', chdir=CHDIR)
+ test.build('test3.gyp', chdir=CHDIR)
+ info_plist = test.built_file_path('Test App.app/Contents/Info.plist',
+ chdir=CHDIR)
+ test.must_exist(info_plist)
+ test.must_contain(info_plist, 'com.google.Test') # Normal expansion works.
+ test.must_contain(info_plist, 'PROCESSED_KEY1')
+ test.must_contain(info_plist, 'PROCESSED_KEY2')
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-installname.py b/third_party/python/gyp/test/mac/gyptest-installname.py
new file mode 100644
index 0000000000..17831aeaf4
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-installname.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that LD_DYLIB_INSTALL_NAME and DYLIB_INSTALL_NAME_BASE are handled
+correctly.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import re
+import subprocess
+import sys
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ CHDIR = 'installname'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+
+ test.build('test.gyp', test.ALL, chdir=CHDIR)
+
+ def GetInstallname(p):
+ p = test.built_file_path(p, chdir=CHDIR)
+ r = re.compile(r'cmd LC_ID_DYLIB.*?name (.*?) \(offset \d+\)', re.DOTALL)
+ proc = subprocess.Popen(['otool', '-l', p], stdout=subprocess.PIPE)
+ o = proc.communicate()[0]
+ assert not proc.returncode
+ m = r.search(o)
+ assert m
+ return m.group(1)
+
+ if (GetInstallname('libdefault_installname.dylib') !=
+ '/usr/local/lib/libdefault_installname.dylib'):
+ test.fail_test()
+
+ if (GetInstallname('My Framework.framework/My Framework') !=
+ '/Library/Frameworks/My Framework.framework/'
+ 'Versions/A/My Framework'):
+ test.fail_test()
+
+ if (GetInstallname('libexplicit_installname.dylib') !=
+ 'Trapped in a dynamiclib factory'):
+ test.fail_test()
+
+ if (GetInstallname('libexplicit_installname_base.dylib') !=
+ '@executable_path/../../../libexplicit_installname_base.dylib'):
+ test.fail_test()
+
+ if (GetInstallname('My Other Framework.framework/My Other Framework') !=
+ '@executable_path/../../../My Other Framework.framework/'
+ 'Versions/A/My Other Framework'):
+ test.fail_test()
+
+ if (GetInstallname('libexplicit_installname_with_base.dylib') !=
+ '/usr/local/lib/libexplicit_installname_with_base.dylib'):
+ test.fail_test()
+
+ if (GetInstallname('libexplicit_installname_with_explicit_base.dylib') !=
+ '@executable_path/../libexplicit_installname_with_explicit_base.dylib'):
+ test.fail_test()
+
+ if (GetInstallname('libboth_base_and_installname.dylib') !=
+ 'Still trapped in a dynamiclib factory'):
+ test.fail_test()
+
+ if (GetInstallname('install_name_with_info_plist.framework/'
+ 'install_name_with_info_plist') !=
+ '/Library/Frameworks/install_name_with_info_plist.framework/'
+ 'Versions/A/install_name_with_info_plist'):
+ test.fail_test()
+
+ if ('DYLIB_INSTALL_NAME_BASE:standardizepath: command not found' in
+ test.stdout()):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-kext.py b/third_party/python/gyp/test/mac/gyptest-kext.py
new file mode 100755
index 0000000000..56790bdb65
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-kext.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that kext bundles are built correctly.
+"""
+
+import TestGyp
+import TestMac
+
+import os
+import plistlib
+import subprocess
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['xcode'])
+ test.run_gyp('kext.gyp', chdir='kext')
+ test.build('kext.gyp', test.ALL, chdir='kext')
+ test.built_file_must_exist('GypKext.kext/Contents/MacOS/GypKext',
+ chdir='kext')
+ test.built_file_must_exist('GypKext.kext/Contents/Info.plist',
+ chdir='kext')
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-ldflags-passed-to-libtool.py b/third_party/python/gyp/test/mac/gyptest-ldflags-passed-to-libtool.py
new file mode 100644
index 0000000000..e24e305d9f
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-ldflags-passed-to-libtool.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that OTHER_LDFLAGS is passed to libtool.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'],
+ match = lambda a, b: True)
+
+ build_error_code = {
+ 'xcode': [1, 65], # 1 for xcode 3, 65 for xcode 4 (see `man sysexits`)
+ 'make': 2,
+ 'ninja': 1,
+ 'xcode-ninja': [1, 65],
+ }[test.format]
+
+ CHDIR = 'ldflags-libtool'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+
+ test.build('test.gyp', 'ldflags_passed_to_libtool', chdir=CHDIR,
+ status=build_error_code)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-ldflags.py b/third_party/python/gyp/test/mac/gyptest-ldflags.py
new file mode 100644
index 0000000000..af44b8c5a0
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-ldflags.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that filenames passed to various linker flags are converted into
+build-directory relative paths correctly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ # The xcode-ninja generator handles gypfiles which are not at the
+ # project root incorrectly.
+ # cf. https://code.google.com/p/gyp/issues/detail?id=460
+ if test.format == 'xcode-ninja':
+ test.skip_test()
+
+ CHDIR = 'ldflags'
+ test.run_gyp('subdirectory/test.gyp', chdir=CHDIR)
+
+ test.build('subdirectory/test.gyp', test.ALL, chdir=CHDIR)
+
+ test.pass_test()
+
+
+# These flags from `man ld` couldl show up in OTHER_LDFLAGS and need path
+# translation.
+#
+# Done:
+# -exported_symbols_list filename
+# -unexported_symbols_list file
+# -reexported_symbols_list file
+# -sectcreate segname sectname file
+#
+# Will be done on demand:
+# -weak_library path_to_library
+# -reexport_library path_to_library
+# -lazy_library path_to_library
+# -upward_library path_to_library
+# -syslibroot rootdir
+# -framework name[,suffix]
+# -weak_framework name[,suffix]
+# -reexport_framework name[,suffix]
+# -lazy_framework name[,suffix]
+# -upward_framework name[,suffix]
+# -force_load path_to_archive
+# -filelist file[,dirname]
+# -dtrace file
+# -order_file file # should use ORDER_FILE
+# -exported_symbols_order file
+# -bundle_loader executable # should use BUNDLE_LOADER
+# -alias_list filename
+# -seg_addr_table filename
+# -dylib_file install_name:file_name
+# -interposable_list filename
+# -object_path_lto filename
+#
+#
+# obsolete:
+# -sectorder segname sectname orderfile
+# -seg_addr_table_filename path
+#
+#
+# ??:
+# -map map_file_path
+# -sub_library library_name
+# -sub_umbrella framework_name
diff --git a/third_party/python/gyp/test/mac/gyptest-libraries.py b/third_party/python/gyp/test/mac/gyptest-libraries.py
new file mode 100755
index 0000000000..5ea4faf6ea
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-libraries.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies libraries (in link_settings) are properly found.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ # The xcode-ninja generator handles gypfiles which are not at the
+ # project root incorrectly.
+ # cf. https://code.google.com/p/gyp/issues/detail?id=460
+ if test.format == 'xcode-ninja':
+ test.skip_test()
+
+ test.skip_test() # bug=535
+
+ test.run_gyp('subdir/test.gyp', chdir='libraries')
+
+ test.build('subdir/test.gyp', test.ALL, chdir='libraries')
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-libtool-zero.py b/third_party/python/gyp/test/mac/gyptest-libtool-zero.py
new file mode 100644
index 0000000000..ae5b7e635b
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-libtool-zero.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies libraries have proper mtime.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ CHDIR = 'libtool-zero'
+
+ test.run_gyp('test.gyp', chdir=CHDIR)
+
+ test.build('test.gyp', 'mylib', chdir=CHDIR)
+
+ test.up_to_date('test.gyp', 'mylib', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-loadable-module-bundle-product-extension.py b/third_party/python/gyp/test/mac/gyptest-loadable-module-bundle-product-extension.py
new file mode 100644
index 0000000000..7a60ca2d17
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-loadable-module-bundle-product-extension.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Tests that loadable_modules don't collide when using the same name with
+different file extensions.
+"""
+
+import TestGyp
+
+import os
+import struct
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ CHDIR = 'loadable-module-bundle-product-extension'
+ test.run_gyp('test.gyp',
+ '-G', 'xcode_ninja_target_pattern=^.*$',
+ chdir=CHDIR)
+
+ test.build('test.gyp', test.ALL, chdir=CHDIR)
+
+ test.must_exist(test.built_file_path('Collide.foo', chdir=CHDIR))
+ test.must_exist(test.built_file_path('Collide.bar', chdir=CHDIR))
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-loadable-module.py b/third_party/python/gyp/test/mac/gyptest-loadable-module.py
new file mode 100755
index 0000000000..77dde1d6cd
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-loadable-module.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Tests that a loadable_module target is built correctly.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import os
+import struct
+import sys
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ CHDIR = 'loadable-module'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+
+ test.build('test.gyp', test.ALL, chdir=CHDIR)
+
+ # Binary.
+ binary = test.built_file_path(
+ 'test_loadable_module.plugin/Contents/MacOS/test_loadable_module',
+ chdir=CHDIR)
+ test.must_exist(binary)
+ MH_BUNDLE = 8
+ if struct.unpack('4I', open(binary, 'rb').read(16))[3] != MH_BUNDLE:
+ test.fail_test()
+
+ # Info.plist.
+ info_plist = test.built_file_path(
+ 'test_loadable_module.plugin/Contents/Info.plist', chdir=CHDIR)
+ test.must_exist(info_plist)
+ test.must_contain(info_plist, """
+ <key>CFBundleExecutable</key>
+ <string>test_loadable_module</string>
+""")
+
+ # PkgInfo.
+ test.built_file_must_not_exist(
+ 'test_loadable_module.plugin/Contents/PkgInfo', chdir=CHDIR)
+ test.built_file_must_not_exist(
+ 'test_loadable_module.plugin/Contents/Resources', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-lto.py b/third_party/python/gyp/test/mac/gyptest-lto.py
new file mode 100644
index 0000000000..d37068f336
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-lto.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that LTO flags work.
+"""
+
+import TestGyp
+
+import os
+import re
+import subprocess
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ CHDIR = 'lto'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+
+ test.build('test.gyp', test.ALL, chdir=CHDIR)
+
+ def ObjPath(srcpath, target):
+ # TODO: Move this into TestGyp if it's needed elsewhere.
+ if test.format == 'xcode':
+ return os.path.join(CHDIR, 'build', 'test.build', 'Default',
+ target + '.build', 'Objects-normal', 'x86_64',
+ srcpath + '.o')
+ elif 'ninja' in test.format: # ninja, xcode-ninja
+ return os.path.join(CHDIR, 'out', 'Default', 'obj',
+ target + '.' + srcpath + '.o')
+ elif test.format == 'make':
+ return os.path.join(CHDIR, 'out', 'Default', 'obj.target',
+ target, srcpath + '.o')
+
+ def ObjType(p, t_expected):
+ r = re.compile(r'nsyms\s+(\d+)')
+ o = subprocess.check_output(['file', p]).decode('utf-8')
+ objtype = 'unknown'
+ if ': Mach-O ' in o:
+ objtype = 'mach-o'
+ elif ': LLVM bitcode' in o:
+ objtype = 'llvm'
+ if objtype != t_expected:
+ print('Expected %s, got %s' % (t_expected, objtype))
+ test.fail_test()
+
+ ObjType(ObjPath('cfile', 'lto'), 'llvm')
+ ObjType(ObjPath('ccfile', 'lto'), 'llvm')
+ ObjType(ObjPath('mfile', 'lto'), 'llvm')
+ ObjType(ObjPath('mmfile', 'lto'), 'llvm')
+ ObjType(ObjPath('asmfile', 'lto'), 'mach-o')
+
+ ObjType(ObjPath('cfile', 'lto_static'), 'llvm')
+ ObjType(ObjPath('ccfile', 'lto_static'), 'llvm')
+ ObjType(ObjPath('mfile', 'lto_static'), 'llvm')
+ ObjType(ObjPath('mmfile', 'lto_static'), 'llvm')
+ ObjType(ObjPath('asmfile', 'lto_static'), 'mach-o')
+
+ test.pass_test()
+
+ # TODO: Probably test for -object_path_lto too, else dsymutil won't be
+ # useful maybe?
diff --git a/third_party/python/gyp/test/mac/gyptest-missing-cfbundlesignature.py b/third_party/python/gyp/test/mac/gyptest-missing-cfbundlesignature.py
new file mode 100644
index 0000000000..be66492467
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-missing-cfbundlesignature.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that an Info.plist with CFBundleSignature works.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ test.run_gyp('test.gyp', chdir='missing-cfbundlesignature')
+ test.build('test.gyp', test.ALL, chdir='missing-cfbundlesignature')
+
+ test.built_file_must_match('mytarget.app/Contents/PkgInfo', 'APPL????',
+ chdir='missing-cfbundlesignature')
+
+ test.built_file_must_match('myothertarget.app/Contents/PkgInfo', 'APPL????',
+ chdir='missing-cfbundlesignature')
+
+ test.built_file_must_match('thirdtarget.app/Contents/PkgInfo', 'APPL????',
+ chdir='missing-cfbundlesignature')
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-non-strs-flattened-to-env.py b/third_party/python/gyp/test/mac/gyptest-non-strs-flattened-to-env.py
new file mode 100644
index 0000000000..cb42a939d4
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-non-strs-flattened-to-env.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that list xcode_settings are flattened before being exported to the
+environment.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ CHDIR = 'non-strs-flattened-to-env'
+ INFO_PLIST_PATH = 'Test.app/Contents/Info.plist'
+
+ test.run_gyp('test.gyp', chdir=CHDIR)
+ test.build('test.gyp', test.ALL, chdir=CHDIR)
+ info_plist = test.built_file_path(INFO_PLIST_PATH, chdir=CHDIR)
+ test.must_exist(info_plist)
+ test.must_contain(info_plist, '''\
+\t<key>My Variable</key>
+\t<string>some expansion</string>''')
+ test.must_contain(info_plist, '''\
+\t<key>CFlags</key>
+\t<string>-fstack-protector-all -fno-strict-aliasing -DS="A Space"</string>''')
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-objc-arc.py b/third_party/python/gyp/test/mac/gyptest-objc-arc.py
new file mode 100755
index 0000000000..dc15a0e821
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-objc-arc.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that ARC objc settings are handled correctly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ # set |match| to ignore build stderr output.
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'],
+ match = lambda a, b: True)
+
+ CHDIR = 'objc-arc'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+
+ test.build('test.gyp', 'arc_enabled', chdir=CHDIR)
+ test.build('test.gyp', 'weak_enabled', chdir=CHDIR)
+ test.build('test.gyp', 'arc_disabled', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-objc-gc.py b/third_party/python/gyp/test/mac/gyptest-objc-gc.py
new file mode 100644
index 0000000000..0cec458983
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-objc-gc.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that GC objc settings are handled correctly.
+"""
+
+import TestGyp
+import TestMac
+
+import sys
+
+if sys.platform == 'darwin':
+ # set |match| to ignore build stderr output.
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'],
+ match = lambda a, b: True)
+
+ # Xcode 5.1 removed support for garbage-collection:
+ # error: garbage collection is no longer supported
+ if TestMac.Xcode.Version() < '0510':
+
+ CHDIR = 'objc-gc'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+
+ build_error_code = {
+ 'xcode': [1, 65], # 1 for xcode 3, 65 for xcode 4 (see `man sysexits`)
+ 'make': 2,
+ 'ninja': 1,
+ }[test.format]
+
+ test.build('test.gyp', 'gc_exe_fails', chdir=CHDIR, status=build_error_code)
+ test.build(
+ 'test.gyp', 'gc_off_exe_req_lib', chdir=CHDIR, status=build_error_code)
+
+ test.build('test.gyp', 'gc_req_exe', chdir=CHDIR)
+ test.run_built_executable('gc_req_exe', chdir=CHDIR, stdout="gc on: 1\n")
+
+ test.build('test.gyp', 'gc_exe_req_lib', chdir=CHDIR)
+ test.run_built_executable(
+ 'gc_exe_req_lib', chdir=CHDIR, stdout="gc on: 1\n")
+
+ test.build('test.gyp', 'gc_exe', chdir=CHDIR)
+ test.run_built_executable('gc_exe', chdir=CHDIR, stdout="gc on: 1\n")
+
+ test.build('test.gyp', 'gc_off_exe', chdir=CHDIR)
+ test.run_built_executable('gc_off_exe', chdir=CHDIR, stdout="gc on: 0\n")
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-postbuild-copy-bundle.py b/third_party/python/gyp/test/mac/gyptest-postbuild-copy-bundle.py
new file mode 100644
index 0000000000..1f04d1cb36
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-postbuild-copy-bundle.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that a postbuild copying a dependend framework into an app bundle is
+rerun if the resources in the framework change.
+"""
+
+import TestGyp
+
+import os.path
+import sys
+
+if sys.platform == 'darwin':
+ # TODO(thakis): Make this pass with the make generator, http://crbug.com/95529
+ test = TestGyp.TestGyp(formats=['ninja', 'xcode'])
+
+ CHDIR = 'postbuild-copy-bundle'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+
+ app_bundle_dir = test.built_file_path('Test App.app', chdir=CHDIR)
+ bundled_framework_dir = os.path.join(
+ app_bundle_dir, 'Contents', 'My Framework.framework', 'Resources')
+ final_plist_path = os.path.join(bundled_framework_dir, 'Info.plist')
+ final_resource_path = os.path.join(bundled_framework_dir, 'resource_file.sb')
+ final_copies_path = os.path.join(
+ app_bundle_dir, 'Contents', 'My Framework.framework', 'Versions', 'A',
+ 'Libraries', 'copied.txt')
+
+ # Check that the dependency was built and copied into the app bundle:
+ test.build('test.gyp', 'test_app', chdir=CHDIR)
+ test.must_exist(final_resource_path)
+ test.must_match(final_resource_path,
+ 'This is included in the framework bundle.\n')
+
+ test.must_exist(final_plist_path)
+ test.must_contain(final_plist_path, '''\
+\t<key>RandomKey</key>
+\t<string>RandomValue</string>''')
+
+ # Touch the dependency's bundle resource, and check that the modification
+ # makes it all the way into the app bundle:
+ test.sleep()
+ test.write('postbuild-copy-bundle/resource_file.sb', 'New text\n')
+ test.build('test.gyp', 'test_app', chdir=CHDIR)
+
+ test.must_exist(final_resource_path)
+ test.must_match(final_resource_path, 'New text\n')
+
+ # Check the same for the plist file.
+ test.sleep()
+ contents = test.read('postbuild-copy-bundle/Framework-Info.plist')
+ contents = contents.replace('RandomValue', 'NewRandomValue')
+ test.write('postbuild-copy-bundle/Framework-Info.plist', contents)
+ test.build('test.gyp', 'test_app', chdir=CHDIR)
+
+ test.must_exist(final_plist_path)
+ test.must_contain(final_plist_path, '''\
+\t<key>RandomKey</key>
+\t<string>NewRandomValue</string>''')
+
+ # Check the same for the copies section, test for http://crbug.com/157077
+ test.sleep()
+ contents = test.read('postbuild-copy-bundle/copied.txt')
+ contents = contents.replace('old', 'new')
+ test.write('postbuild-copy-bundle/copied.txt', contents)
+ test.build('test.gyp', 'test_app', chdir=CHDIR)
+
+ test.must_exist(final_copies_path)
+ test.must_contain(final_copies_path, 'new copied file')
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-postbuild-defaults.py b/third_party/python/gyp/test/mac/gyptest-postbuild-defaults.py
new file mode 100644
index 0000000000..0f7d25bd89
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-postbuild-defaults.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that a postbuild invoking |defaults| works.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ CHDIR = 'postbuild-defaults'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+ test.build('test.gyp', test.ALL, chdir=CHDIR)
+
+ result_file = test.built_file_path('result', chdir=CHDIR)
+ test.must_exist(result_file)
+ test.must_contain(result_file, '''\
+Test
+${PRODUCT_NAME}
+''')
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-postbuild-fail.py b/third_party/python/gyp/test/mac/gyptest-postbuild-fail.py
new file mode 100755
index 0000000000..1a229df695
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-postbuild-fail.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that a failing postbuild step lets the build fail.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ # set |match| to ignore build stderr output.
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'],
+ match = lambda a, b: True)
+
+ test.run_gyp('test.gyp', chdir='postbuild-fail')
+
+ build_error_code = {
+ 'xcode': [1, 65], # 1 for xcode 3, 65 for xcode 4 (see `man sysexits`)
+ 'make': 2,
+ 'ninja': 1,
+ 'xcode-ninja': [1, 65],
+ }[test.format]
+
+
+ # If a postbuild fails, all postbuilds should be re-run on the next build.
+ # In Xcode 3, even if the first postbuild fails the other postbuilds were
+ # still executed. In Xcode 4, postbuilds are stopped after the first
+ # failing postbuild. This test checks for the Xcode 4 behavior.
+
+ # Ignore this test on Xcode 3.
+ import subprocess
+ job = subprocess.Popen(['xcodebuild', '-version'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ out, _ = job.communicate()
+ out = out.decode('utf-8')
+ if job.returncode != 0:
+ print(out)
+ raise Exception('Error %d running xcodebuild' % job.returncode)
+ if out.startswith('Xcode 3.'):
+ test.pass_test()
+
+ # Non-bundles
+ test.build('test.gyp', 'nonbundle', chdir='postbuild-fail',
+ status=build_error_code)
+ test.built_file_must_not_exist('static_touch',
+ chdir='postbuild-fail')
+ # Check for non-up-to-date-ness by checking if building again produces an
+ # error.
+ test.build('test.gyp', 'nonbundle', chdir='postbuild-fail',
+ status=build_error_code)
+
+
+ # Bundles
+ test.build('test.gyp', 'bundle', chdir='postbuild-fail',
+ status=build_error_code)
+ test.built_file_must_not_exist('dynamic_touch',
+ chdir='postbuild-fail')
+ # Check for non-up-to-date-ness by checking if building again produces an
+ # error.
+ test.build('test.gyp', 'bundle', chdir='postbuild-fail',
+ status=build_error_code)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-postbuild-multiple-configurations.py b/third_party/python/gyp/test/mac/gyptest-postbuild-multiple-configurations.py
new file mode 100644
index 0000000000..84694f36cc
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-postbuild-multiple-configurations.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that a postbuild work in projects with multiple configurations.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ CHDIR = 'postbuild-multiple-configurations'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+
+ for configuration in ['Debug', 'Release']:
+ test.set_configuration(configuration)
+ test.build('test.gyp', test.ALL, chdir=CHDIR)
+ test.built_file_must_exist('postbuild-file', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-postbuild-static-library.py b/third_party/python/gyp/test/mac/gyptest-postbuild-static-library.py
new file mode 100644
index 0000000000..8f9a6ebcb0
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-postbuild-static-library.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that a postbuilds on static libraries work, and that sourceless
+libraries don't cause failures at gyp time.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['make', 'xcode'])
+
+ CHDIR = 'postbuild-static-library'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+ test.build('test.gyp', 'my_lib', chdir=CHDIR)
+ # Building my_sourceless_lib doesn't work with make. gyp should probably
+ # forbid sourceless static libraries, since they're pretty pointless.
+ # But they shouldn't cause gyp time exceptions.
+
+ test.built_file_must_exist('postbuild-file', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-postbuild.py b/third_party/python/gyp/test/mac/gyptest-postbuild.py
new file mode 100755
index 0000000000..684e7b8426
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-postbuild.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that postbuild steps work.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ test.run_gyp('test.gyp', chdir='postbuilds')
+
+ test.build('test.gyp', test.ALL, chdir='postbuilds')
+
+ # See comment in test/subdirectory/gyptest-subdir-default.py
+ if test.format == 'xcode':
+ chdir = 'postbuilds/subdirectory'
+ else:
+ chdir = 'postbuilds'
+
+ # Created by the postbuild scripts
+ test.built_file_must_exist('el.a_touch',
+ type=test.STATIC_LIB,
+ chdir='postbuilds')
+ test.built_file_must_exist('el.a_gyp_touch',
+ type=test.STATIC_LIB,
+ chdir='postbuilds')
+ test.built_file_must_exist('nest_el.a_touch',
+ type=test.STATIC_LIB,
+ chdir=chdir)
+ test.built_file_must_exist(
+ 'dyna.framework/Versions/A/dyna_touch',
+ chdir='postbuilds')
+ test.built_file_must_exist(
+ 'dyna.framework/Versions/A/dyna_gyp_touch',
+ chdir='postbuilds')
+ test.built_file_must_exist(
+ 'nest_dyna.framework/Versions/A/nest_dyna_touch',
+ chdir=chdir)
+ test.built_file_must_exist('dyna_standalone.dylib_gyp_touch',
+ type=test.SHARED_LIB,
+ chdir='postbuilds')
+ test.built_file_must_exist('copied_file.txt', chdir='postbuilds')
+ test.built_file_must_exist('copied_file_2.txt', chdir=chdir)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-prefixheader.py b/third_party/python/gyp/test/mac/gyptest-prefixheader.py
new file mode 100755
index 0000000000..0cf85f9422
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-prefixheader.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that GCC_PREFIX_HEADER works.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+ test.run_gyp('test.gyp', chdir='prefixheader')
+
+ test.build('test.gyp', test.ALL, chdir='prefixheader')
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-rebuild.py b/third_party/python/gyp/test/mac/gyptest-rebuild.py
new file mode 100755
index 0000000000..c7d8cad02d
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-rebuild.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that app bundles are rebuilt correctly.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ CHDIR = 'rebuild'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+
+ test.build('test.gyp', 'test_app', chdir=CHDIR)
+
+ # Touch a source file, rebuild, and check that the app target is up-to-date.
+ test.touch('rebuild/main.c')
+ test.build('test.gyp', 'test_app', chdir=CHDIR)
+
+ test.up_to_date('test.gyp', 'test_app', chdir=CHDIR)
+
+ # Xcode runs postbuilds on every build, so targets with postbuilds are
+ # never marked as up_to_date.
+ if test.format != 'xcode':
+ # Same for a framework bundle.
+ test.build('test.gyp', 'test_framework_postbuilds', chdir=CHDIR)
+ test.up_to_date('test.gyp', 'test_framework_postbuilds', chdir=CHDIR)
+
+ # Test that an app bundle with a postbuild that touches the app binary needs
+ # to be built only once.
+ test.build('test.gyp', 'test_app_postbuilds', chdir=CHDIR)
+ test.up_to_date('test.gyp', 'test_app_postbuilds', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-rpath.py b/third_party/python/gyp/test/mac/gyptest-rpath.py
new file mode 100644
index 0000000000..a82e2fd562
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-rpath.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that LD_DYLIB_INSTALL_NAME and DYLIB_INSTALL_NAME_BASE are handled
+correctly.
+"""
+
+import TestGyp
+
+import re
+import subprocess
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ CHDIR = 'rpath'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+
+ test.build('test.gyp', test.ALL, chdir=CHDIR)
+
+ def GetRpaths(p):
+ p = test.built_file_path(p, chdir=CHDIR)
+ r = re.compile(r'cmd LC_RPATH.*?path (.*?) \(offset \d+\)', re.DOTALL)
+ proc = subprocess.Popen(['otool', '-l', p], stdout=subprocess.PIPE)
+ o = proc.communicate()[0].decode('utf-8')
+ assert not proc.returncode
+ return r.findall(o)
+
+ if GetRpaths('libdefault_rpath.dylib') != []:
+ test.fail_test()
+
+ if GetRpaths('libexplicit_rpath.dylib') != ['@executable_path/.']:
+ test.fail_test()
+
+ if (GetRpaths('libexplicit_rpaths_escaped.dylib') !=
+ ['First rpath', 'Second rpath']):
+ test.fail_test()
+
+ if GetRpaths('My Framework.framework/My Framework') != ['@loader_path/.']:
+ test.fail_test()
+
+ if GetRpaths('executable') != ['@executable_path/.']:
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-sdkroot.py b/third_party/python/gyp/test/mac/gyptest-sdkroot.py
new file mode 100644
index 0000000000..f8edbaa583
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-sdkroot.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that setting SDKROOT works.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import os
+import subprocess
+import sys
+
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ def GetSDKPath(sdk):
+ """Return SDKROOT if the SDK version |sdk| is installed or empty string."""
+ DEVNULL = open(os.devnull, 'wb')
+ try:
+ proc = subprocess.Popen(
+ ['xcodebuild', '-version', '-sdk', 'macosx' + sdk, 'Path'],
+ stdout=subprocess.PIPE, stderr=DEVNULL)
+ return proc.communicate()[0].rstrip('\n')
+ finally:
+ DEVNULL.close()
+
+ def SelectSDK():
+ """Select the oldest SDK installed (greater than 10.6)."""
+ for sdk in ['10.6', '10.7', '10.8', '10.9']:
+ path = GetSDKPath(sdk)
+ if path:
+ return True, sdk, path
+ return False, '', ''
+
+ # Make sure this works on the bots, which only have the 10.6 sdk, and on
+ # dev machines which usually don't have the 10.6 sdk.
+ sdk_found, sdk, sdk_path = SelectSDK()
+ if not sdk_found:
+ test.fail_test()
+
+ test.write('sdkroot/test.gyp', test.read('sdkroot/test.gyp') % sdk)
+
+ test.run_gyp('test.gyp', '-D', 'sdk_path=%s' % sdk_path,
+ chdir='sdkroot')
+ test.build('test.gyp', test.ALL, chdir='sdkroot')
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-sourceless-module.py b/third_party/python/gyp/test/mac/gyptest-sourceless-module.py
new file mode 100644
index 0000000000..f2801c20aa
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-sourceless-module.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that bundles that have no 'sources' (pure resource containers) work.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ test.run_gyp('test.gyp', chdir='sourceless-module')
+
+ # Just needs to build without errors.
+ test.build('test.gyp', 'empty_bundle', chdir='sourceless-module')
+ test.built_file_must_not_exist(
+ 'empty_bundle.bundle', chdir='sourceless-module')
+
+ # Needs to build, and contain a resource.
+ test.build('test.gyp', 'resource_bundle', chdir='sourceless-module')
+
+ test.built_file_must_exist(
+ 'resource_bundle.bundle/Contents/Resources/foo.manifest',
+ chdir='sourceless-module')
+ test.built_file_must_not_exist(
+ 'resource_bundle.bundle/Contents/MacOS/resource_bundle',
+ chdir='sourceless-module')
+
+ # Build an app containing an actionless bundle.
+ test.build(
+ 'test.gyp',
+ 'bundle_dependent_on_resource_bundle_no_actions',
+ chdir='sourceless-module')
+
+ test.built_file_must_exist(
+ 'bundle_dependent_on_resource_bundle_no_actions.app/Contents/Resources/'
+ 'mac_resource_bundle_no_actions.bundle/Contents/Resources/empty.txt',
+ chdir='sourceless-module')
+
+ # Needs to build and cause the bundle to be built.
+ test.build(
+ 'test.gyp', 'dependent_on_resource_bundle', chdir='sourceless-module')
+
+ test.built_file_must_exist(
+ 'resource_bundle.bundle/Contents/Resources/foo.manifest',
+ chdir='sourceless-module')
+ test.built_file_must_not_exist(
+ 'resource_bundle.bundle/Contents/MacOS/resource_bundle',
+ chdir='sourceless-module')
+
+ # TODO(thakis): shared_libraries that have no sources but depend on static
+ # libraries currently only work with the ninja generator. This is used by
+ # chrome/mac's components build.
+ if test.format == 'ninja':
+ # Check that an executable depending on a resource framework links fine too.
+ test.build(
+ 'test.gyp', 'dependent_on_resource_framework', chdir='sourceless-module')
+
+ test.built_file_must_exist(
+ 'resource_framework.framework/Resources/foo.manifest',
+ chdir='sourceless-module')
+ test.built_file_must_exist(
+ 'resource_framework.framework/resource_framework',
+ chdir='sourceless-module')
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-strip-default.py b/third_party/python/gyp/test/mac/gyptest-strip-default.py
new file mode 100644
index 0000000000..b851782fd5
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-strip-default.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that the default STRIP_STYLEs match between different generators.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import re
+import subprocess
+import sys
+import time
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ CHDIR='strip'
+ test.run_gyp('test-defaults.gyp', chdir=CHDIR)
+
+ test.build('test-defaults.gyp', test.ALL, chdir=CHDIR)
+
+ # Lightweight check if stripping was done.
+ def OutPath(s):
+ return test.built_file_path(s, chdir=CHDIR)
+
+ def CheckNsyms(p, o_expected):
+ proc = subprocess.Popen(['nm', '-aU', p], stdout=subprocess.PIPE)
+ o = proc.communicate()[0].decode('utf-8')
+
+ # Filter out mysterious "00 0000 OPT radr://5614542" symbol which
+ # is apparently only printed on the bots (older toolchain?).
+ # Yes, "radr", not "rdar".
+ o = ''.join(filter(lambda s: 'radr://5614542' not in s, o.splitlines(True)))
+
+ o = o.replace('A', 'T')
+ o = re.sub(r'^[a-fA-F0-9]+', 'XXXXXXXX', o, flags=re.MULTILINE)
+ assert not proc.returncode
+ if o != o_expected:
+ print('Stripping: Expected symbols """\n%s""", got """\n%s"""' % (
+ o_expected, o))
+ test.fail_test()
+
+ CheckNsyms(OutPath('libsingle_dylib.dylib'),
+"""\
+XXXXXXXX S _ci
+XXXXXXXX S _i
+XXXXXXXX T _the_function
+XXXXXXXX t _the_hidden_function
+XXXXXXXX T _the_used_function
+XXXXXXXX T _the_visible_function
+""")
+ CheckNsyms(OutPath('single_so.so'),
+"""\
+XXXXXXXX S _ci
+XXXXXXXX S _i
+XXXXXXXX T _the_function
+XXXXXXXX t _the_hidden_function
+XXXXXXXX T _the_used_function
+XXXXXXXX T _the_visible_function
+""")
+ CheckNsyms(OutPath('single_exe'),
+"""\
+XXXXXXXX T __mh_execute_header
+""")
+
+ CheckNsyms(test.built_file_path(
+ 'bundle_dylib.framework/Versions/A/bundle_dylib', chdir=CHDIR),
+"""\
+XXXXXXXX S _ci
+XXXXXXXX S _i
+XXXXXXXX T _the_function
+XXXXXXXX t _the_hidden_function
+XXXXXXXX T _the_used_function
+XXXXXXXX T _the_visible_function
+""")
+ CheckNsyms(test.built_file_path(
+ 'bundle_so.bundle/Contents/MacOS/bundle_so', chdir=CHDIR),
+"""\
+XXXXXXXX S _ci
+XXXXXXXX S _i
+XXXXXXXX T _the_function
+XXXXXXXX T _the_used_function
+XXXXXXXX T _the_visible_function
+""")
+ CheckNsyms(test.built_file_path(
+ 'bundle_exe.app/Contents/MacOS/bundle_exe', chdir=CHDIR),
+"""\
+XXXXXXXX T __mh_execute_header
+""")
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-strip.py b/third_party/python/gyp/test/mac/gyptest-strip.py
new file mode 100755
index 0000000000..d4694834ac
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-strip.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that stripping works.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+import TestMac
+
+import re
+import subprocess
+import sys
+import time
+
+print("This test is currently disabled: https://crbug.com/483696.")
+sys.exit(0)
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ test.run_gyp('test.gyp', chdir='strip')
+
+ test.build('test.gyp', test.ALL, chdir='strip')
+
+ # Lightweight check if stripping was done.
+ def OutPath(s):
+ return test.built_file_path(s, type=test.SHARED_LIB, chdir='strip')
+
+ def CheckNsyms(p, n_expected):
+ r = re.compile(r'nsyms\s+(\d+)')
+ o = subprocess.check_output(['otool', '-l', p])
+ m = r.search(o)
+ n = int(m.group(1))
+ if n != n_expected:
+ print('Stripping: Expected %d symbols, got %d' % (n_expected, n))
+ test.fail_test()
+
+ # Starting with Xcode 5.0, clang adds an additional symbols to the compiled
+ # file when using a relative path to the input file. So when using ninja
+ # with Xcode 5.0 or higher, take this additional symbol into consideration
+ # for unstripped builds (it is stripped by all strip commands).
+ expected_extra_symbol_count = 0
+ if test.format in ['ninja', 'xcode-ninja'] \
+ and TestMac.Xcode.Version() >= '0500':
+ expected_extra_symbol_count = 1
+
+ # The actual numbers here are not interesting, they just need to be the same
+ # in both the xcode and the make build.
+ CheckNsyms(OutPath('no_postprocess'), 29 + expected_extra_symbol_count)
+ CheckNsyms(OutPath('no_strip'), 29 + expected_extra_symbol_count)
+ CheckNsyms(OutPath('strip_all'), 0)
+ CheckNsyms(OutPath('strip_nonglobal'), 6)
+ CheckNsyms(OutPath('strip_debugging'), 7)
+ CheckNsyms(OutPath('strip_all_custom_flags'), 0)
+ CheckNsyms(test.built_file_path(
+ 'strip_all_bundle.framework/Versions/A/strip_all_bundle', chdir='strip'),
+ 0)
+ CheckNsyms(OutPath('strip_save'), 7)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-swift-library.py b/third_party/python/gyp/test/mac/gyptest-swift-library.py
new file mode 100644
index 0000000000..d3433753fd
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-swift-library.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that a swift framework builds correctly.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+import TestMac
+
+import collections
+import sys
+import subprocess
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ test = TestGyp.TestGyp(formats=['xcode'])
+
+ # Ensures that the given symbol is present in the given file, by running nm.
+ def CheckHasSymbolName(path, symbol):
+ output = subprocess.check_output(['nm', '-j', path])
+ idx = output.find(symbol)
+ if idx == -1:
+ print('Swift: Could not find symobl: %s' % symbol)
+ test.fail_test()
+
+ test_cases = []
+
+ # Run this for iOS on XCode 6.0 or greater
+ if TestMac.Xcode.Version() >= '0600':
+ test_cases.append(('Default', 'iphoneos'))
+ test_cases.append(('Default', 'iphonesimulator'))
+
+ # Run it for Mac on XCode 6.1 or greater
+ if TestMac.Xcode.Version() >= '0610':
+ test_cases.append(('Default', None))
+
+ # Generate the project.
+ test.run_gyp('test.gyp', chdir='swift-library')
+
+ # Build and verify for each configuration.
+ for configuration, sdk in test_cases:
+ kwds = collections.defaultdict(list)
+ if test.format == 'xcode':
+ if sdk is not None:
+ kwds['arguments'].extend(['-sdk', sdk])
+
+ test.set_configuration(configuration)
+ test.build('test.gyp', 'SwiftFramework', chdir='swift-library', **kwds)
+
+ filename = 'SwiftFramework.framework/SwiftFramework'
+ result_file = test.built_file_path(filename, chdir='swift-library')
+
+ test.must_exist(result_file)
+
+ # Check to make sure that our swift class (GypSwiftTest) is present in the
+ # built binary
+ CheckHasSymbolName(result_file, "C14SwiftFramework12GypSwiftTest")
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-type-envvars.py b/third_party/python/gyp/test/mac/gyptest-type-envvars.py
new file mode 100755
index 0000000000..a5203c5f1e
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-type-envvars.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Test that MACH_O_TYPE etc are set correctly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ test.run_gyp('test.gyp',
+ '-G', 'xcode_ninja_target_pattern=^(?!nonbundle_none).*$',
+ chdir='type_envvars')
+
+ test.build('test.gyp', test.ALL, chdir='type_envvars')
+
+ # The actual test is done by postbuild scripts during |test.build()|.
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-unicode-settings.py b/third_party/python/gyp/test/mac/gyptest-unicode-settings.py
new file mode 100644
index 0000000000..a71b3bd9a3
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-unicode-settings.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+
+# Copyright 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that unicode strings in 'xcode_settings' work.
+Also checks that ASCII control characters are escaped properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['xcode'])
+ test.run_gyp('test.gyp', chdir='unicode-settings')
+ test.build('test.gyp', test.ALL, chdir='unicode-settings')
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-xcode-env-order.py b/third_party/python/gyp/test/mac/gyptest-xcode-env-order.py
new file mode 100755
index 0000000000..bda19988b2
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-xcode-env-order.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that dependent Xcode settings are processed correctly.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+import TestMac
+
+import subprocess
+import sys
+
+if sys.platform == 'darwin':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ CHDIR = 'xcode-env-order'
+ INFO_PLIST_PATH = 'Test.app/Contents/Info.plist'
+
+ test.run_gyp('test.gyp', chdir=CHDIR)
+ test.build('test.gyp', test.ALL, chdir=CHDIR)
+
+ # Env vars in 'copies' filenames.
+ test.built_file_must_exist('Test-copy-brace/main.c', chdir=CHDIR)
+ test.built_file_must_exist('Test-copy-paren/main.c', chdir=CHDIR)
+ test.built_file_must_exist('Test-copy-bare/main.c', chdir=CHDIR)
+
+ # Env vars in 'actions' filenames and inline actions
+ test.built_file_must_exist('action-copy-brace.txt', chdir=CHDIR)
+ test.built_file_must_exist('action-copy-paren.txt', chdir=CHDIR)
+ test.built_file_must_exist('action-copy-bare.txt', chdir=CHDIR)
+
+ # Env vars in 'rules' filenames and inline actions
+ test.built_file_must_exist('rule-copy-brace.txt', chdir=CHDIR)
+ test.built_file_must_exist('rule-copy-paren.txt', chdir=CHDIR)
+ # TODO: see comment in test.gyp for this file.
+ #test.built_file_must_exist('rule-copy-bare.txt', chdir=CHDIR)
+
+ # Env vars in Info.plist.
+ info_plist = test.built_file_path(INFO_PLIST_PATH, chdir=CHDIR)
+ test.must_exist(info_plist)
+
+ test.must_contain(info_plist, '''\
+\t<key>BraceProcessedKey1</key>
+\t<string>D:/Source/Project/Test</string>''')
+ test.must_contain(info_plist, '''\
+\t<key>BraceProcessedKey2</key>
+\t<string>/Source/Project/Test</string>''')
+ test.must_contain(info_plist, '''\
+\t<key>BraceProcessedKey3</key>
+\t<string>com.apple.product-type.application:D:/Source/Project/Test</string>''')
+
+ test.must_contain(info_plist, '''\
+\t<key>ParenProcessedKey1</key>
+\t<string>D:/Source/Project/Test</string>''')
+ test.must_contain(info_plist, '''\
+\t<key>ParenProcessedKey2</key>
+\t<string>/Source/Project/Test</string>''')
+ test.must_contain(info_plist, '''\
+\t<key>ParenProcessedKey3</key>
+\t<string>com.apple.product-type.application:D:/Source/Project/Test</string>''')
+
+ test.must_contain(info_plist, '''\
+\t<key>BareProcessedKey1</key>
+\t<string>D:/Source/Project/Test</string>''')
+ test.must_contain(info_plist, '''\
+\t<key>BareProcessedKey2</key>
+\t<string>/Source/Project/Test</string>''')
+ # NOTE: For bare variables, $PRODUCT_TYPE is not replaced! It _is_ replaced
+ # if it's not right at the start of the string (e.g. ':$PRODUCT_TYPE'), so
+ # this looks like an Xcode bug. This bug isn't emulated (yet?), so check this
+ # only for Xcode.
+ if test.format == 'xcode' and TestMac.Xcode.Version() < '0500':
+ test.must_contain(info_plist, '''\
+\t<key>BareProcessedKey3</key>
+\t<string>$PRODUCT_TYPE:D:/Source/Project/Test</string>''')
+ else:
+ # The bug has been fixed by Xcode version 5.0.0.
+ test.must_contain(info_plist, '''\
+\t<key>BareProcessedKey3</key>
+\t<string>com.apple.product-type.application:D:/Source/Project/Test</string>''')
+
+ test.must_contain(info_plist, '''\
+\t<key>MixedProcessedKey</key>
+\t<string>/Source/Project:Test:mh_execute</string>''')
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-xcode-gcc-clang.py b/third_party/python/gyp/test/mac/gyptest-xcode-gcc-clang.py
new file mode 100644
index 0000000000..981c3fc564
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-xcode-gcc-clang.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that xcode-style GCC_... settings that require clang are handled
+properly.
+"""
+
+import TestGyp
+
+import os
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ CHDIR = 'xcode-gcc'
+ test.run_gyp('test-clang.gyp', chdir=CHDIR)
+
+ test.build('test-clang.gyp', 'aliasing_yes', chdir=CHDIR)
+ test.run_built_executable('aliasing_yes', chdir=CHDIR, stdout="1\n")
+ test.build('test-clang.gyp', 'aliasing_no', chdir=CHDIR)
+ test.run_built_executable('aliasing_no', chdir=CHDIR, stdout="0\n")
+
+ # The default behavior changed: strict aliasing used to be off, now it's on
+ # by default. The important part is that this is identical for all generators
+ # (which it is). TODO(thakis): Enable this once the bots have a newer Xcode.
+ #test.build('test-clang.gyp', 'aliasing_default', chdir=CHDIR)
+ #test.run_built_executable('aliasing_default', chdir=CHDIR, stdout="1\n")
+ # For now, just check the generated ninja file:
+ if test.format == 'ninja':
+ contents = open(test.built_file_path('obj/aliasing_default.ninja',
+ chdir=CHDIR)).read()
+ if 'strict-aliasing' in contents:
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-xcode-gcc.py b/third_party/python/gyp/test/mac/gyptest-xcode-gcc.py
new file mode 100644
index 0000000000..a1d201ae03
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-xcode-gcc.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that xcode-style GCC_... settings are handled properly.
+"""
+
+import TestGyp
+
+import os
+import subprocess
+import sys
+
+def IgnoreOutput(string, expected_string):
+ return True
+
+def CompilerVersion(compiler):
+ stdout = subprocess.check_output([compiler, '-v'], stderr=subprocess.STDOUT)
+ stdout = stdout.decode('utf-8')
+ return stdout.rstrip('\n')
+
+def CompilerSupportsWarnAboutInvalidOffsetOfMacro(test):
+ # "clang" does not support the "-Winvalid-offsetof" flag, and silently
+ # ignore it. Starting with Xcode 5.0.0, "gcc" is just a "clang" binary with
+ # some hard-coded include path hack, so use the output of "-v" to detect if
+ # the compiler supports the flag or not.
+ return 'clang' not in CompilerVersion('/usr/bin/cc')
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+ if test.format == 'xcode-ninja':
+ test.skip_test()
+
+ CHDIR = 'xcode-gcc'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+
+ # List of targets that'll pass. It expects targets of the same name with
+ # '-fail' appended that'll fail to build.
+ targets = [
+ 'warn_about_missing_newline',
+ ]
+
+ # clang doesn't warn on invalid offsetofs, it silently ignores
+ # -Wno-invalid-offsetof.
+ if CompilerSupportsWarnAboutInvalidOffsetOfMacro(test):
+ targets.append('warn_about_invalid_offsetof_macro')
+
+ for target in targets:
+ test.build('test.gyp', target, chdir=CHDIR)
+ test.built_file_must_exist(target, chdir=CHDIR)
+ fail_target = target + '-fail'
+ test.build('test.gyp', fail_target, chdir=CHDIR, status=None,
+ stderr=None, match=IgnoreOutput)
+ test.built_file_must_not_exist(fail_target, chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-xcode-support-actions.py b/third_party/python/gyp/test/mac/gyptest-xcode-support-actions.py
new file mode 100755
index 0000000000..ecc1402972
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-xcode-support-actions.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that support actions are properly created.
+"""
+
+import TestGyp
+
+import os
+import subprocess
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['xcode'])
+
+ CHDIR = 'xcode-support-actions'
+
+ test.run_gyp('test.gyp', '-Gsupport_target_suffix=_customsuffix', chdir=CHDIR)
+ test.build('test.gyp', target='target_customsuffix', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-xctest.py b/third_party/python/gyp/test/mac/gyptest-xctest.py
new file mode 100644
index 0000000000..fb478bb31c
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-xctest.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that xctest targets are correctly configured.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['xcode'])
+
+ # This test appears to be flaky.
+ test.skip_test() # bug=531
+
+ # Ignore this test if Xcode 5 is not installed
+ import subprocess
+ job = subprocess.Popen(['xcodebuild', '-version'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ out, err = job.communicate()
+ if job.returncode != 0:
+ raise Exception('Error %d running xcodebuild' % job.returncode)
+ xcode_version, build_number = out.splitlines()
+ # Convert the version string from 'Xcode 5.0' to ['5','0'].
+ xcode_version = xcode_version.split()[-1].split('.')
+ if xcode_version < ['5']:
+ test.pass_test()
+
+ CHDIR = 'xctest'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+ test.build('test.gyp', chdir=CHDIR, arguments=['-scheme', 'classes', 'test'])
+
+ test.built_file_must_match('tests.xctest/Contents/Resources/resource.txt',
+ 'foo\n', chdir=CHDIR)
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/gyptest-xcuitest.py b/third_party/python/gyp/test/mac/gyptest-xcuitest.py
new file mode 100755
index 0000000000..410de297d0
--- /dev/null
+++ b/third_party/python/gyp/test/mac/gyptest-xcuitest.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that xcuitest targets are correctly configured.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'darwin':
+ test = TestGyp.TestGyp(formats=['xcode'])
+
+ # Ignore this test if Xcode 5 is not installed
+ import subprocess
+ job = subprocess.Popen(['xcodebuild', '-version'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT)
+ out, err = job.communicate()
+ if job.returncode != 0:
+ raise Exception('Error %d running xcodebuild' % job.returncode)
+ xcode_version, build_number = out.decode('utf-8').splitlines()
+ # Convert the version string from 'Xcode 5.0' to ['5','0'].
+ xcode_version = xcode_version.split()[-1].split('.')
+ if xcode_version < ['7']:
+ test.pass_test()
+
+ CHDIR = 'xcuitest'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+ test.build('test.gyp', chdir=CHDIR, arguments=[
+ '-target', 'tests',
+ '-sdk', 'iphonesimulator',
+ ])
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/mac/identical-name/proxy/proxy.cc b/third_party/python/gyp/test/mac/identical-name/proxy/proxy.cc
new file mode 100644
index 0000000000..8e1782da63
--- /dev/null
+++ b/third_party/python/gyp/test/mac/identical-name/proxy/proxy.cc
@@ -0,0 +1,2 @@
+// Empty file
+
diff --git a/third_party/python/gyp/test/mac/identical-name/proxy/proxy.gyp b/third_party/python/gyp/test/mac/identical-name/proxy/proxy.gyp
new file mode 100644
index 0000000000..38f44af1b5
--- /dev/null
+++ b/third_party/python/gyp/test/mac/identical-name/proxy/proxy.gyp
@@ -0,0 +1,9 @@
+{
+ 'includes': ['../test.gypi'],
+ 'targets': [{
+ 'target_name': 'testlib',
+ 'type': 'none',
+ 'dependencies': ['testlib/testlib.gyp:testlib'],
+ 'sources': ['proxy.cc'],
+ }],
+}
diff --git a/third_party/python/gyp/test/mac/identical-name/proxy/testlib/testlib.cc b/third_party/python/gyp/test/mac/identical-name/proxy/testlib/testlib.cc
new file mode 100644
index 0000000000..8e1782da63
--- /dev/null
+++ b/third_party/python/gyp/test/mac/identical-name/proxy/testlib/testlib.cc
@@ -0,0 +1,2 @@
+// Empty file
+
diff --git a/third_party/python/gyp/test/mac/identical-name/proxy/testlib/testlib.gyp b/third_party/python/gyp/test/mac/identical-name/proxy/testlib/testlib.gyp
new file mode 100644
index 0000000000..ed1c62e982
--- /dev/null
+++ b/third_party/python/gyp/test/mac/identical-name/proxy/testlib/testlib.gyp
@@ -0,0 +1,8 @@
+{
+ 'includes': ['../../test.gypi'],
+ 'targets': [{
+ 'target_name': 'testlib',
+ 'type': 'static_library',
+ 'sources': ['testlib.cc'],
+ }],
+}
diff --git a/third_party/python/gyp/test/mac/identical-name/test-should-fail.gyp b/third_party/python/gyp/test/mac/identical-name/test-should-fail.gyp
new file mode 100644
index 0000000000..72bfc7af0f
--- /dev/null
+++ b/third_party/python/gyp/test/mac/identical-name/test-should-fail.gyp
@@ -0,0 +1,10 @@
+{
+ 'targets': [{
+ 'target_name': 'test',
+ 'type': 'executable',
+ 'dependencies': [
+ 'testlib/testlib.gyp:proxy',
+ 'proxy/proxy.gyp:testlib',
+ ],
+ }],
+}
diff --git a/third_party/python/gyp/test/mac/identical-name/test.gyp b/third_party/python/gyp/test/mac/identical-name/test.gyp
new file mode 100644
index 0000000000..717220e866
--- /dev/null
+++ b/third_party/python/gyp/test/mac/identical-name/test.gyp
@@ -0,0 +1,11 @@
+{
+ 'includes': ['test.gypi'],
+ 'targets': [{
+ 'target_name': 'test',
+ 'type': 'executable',
+ 'dependencies': [
+ 'testlib/testlib.gyp:proxy',
+ 'proxy/proxy.gyp:testlib',
+ ],
+ }],
+} \ No newline at end of file
diff --git a/third_party/python/gyp/test/mac/identical-name/test.gypi b/third_party/python/gyp/test/mac/identical-name/test.gypi
new file mode 100644
index 0000000000..61b7c2badf
--- /dev/null
+++ b/third_party/python/gyp/test/mac/identical-name/test.gypi
@@ -0,0 +1,7 @@
+{
+ 'target_defaults': {
+ 'xcode_settings': {
+ 'SYMROOT': '<(DEPTH)/$SRCROOT/',
+ },
+ },
+}
diff --git a/third_party/python/gyp/test/mac/identical-name/testlib/main.cc b/third_party/python/gyp/test/mac/identical-name/testlib/main.cc
new file mode 100644
index 0000000000..5c2fa9bb6a
--- /dev/null
+++ b/third_party/python/gyp/test/mac/identical-name/testlib/main.cc
@@ -0,0 +1,3 @@
+int main(int argc, char **argv) {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/mac/identical-name/testlib/testlib.gyp b/third_party/python/gyp/test/mac/identical-name/testlib/testlib.gyp
new file mode 100644
index 0000000000..aa8b851004
--- /dev/null
+++ b/third_party/python/gyp/test/mac/identical-name/testlib/testlib.gyp
@@ -0,0 +1,14 @@
+{
+ 'includes': ['../test.gypi'],
+ 'targets': [{
+ 'target_name': 'proxy',
+ 'type': 'static_library',
+ 'sources': ['void.cc'],
+ 'dependencies': ['testlib'],
+ 'export_dependent_settings': ['testlib'],
+ }, {
+ 'target_name': 'testlib',
+ 'type': 'static_library',
+ 'sources': ['main.cc'],
+ }],
+}
diff --git a/third_party/python/gyp/test/mac/identical-name/testlib/void.cc b/third_party/python/gyp/test/mac/identical-name/testlib/void.cc
new file mode 100644
index 0000000000..8e1782da63
--- /dev/null
+++ b/third_party/python/gyp/test/mac/identical-name/testlib/void.cc
@@ -0,0 +1,2 @@
+// Empty file
+
diff --git a/third_party/python/gyp/test/mac/infoplist-process/Info.plist b/third_party/python/gyp/test/mac/infoplist-process/Info.plist
new file mode 100644
index 0000000000..cb65721f43
--- /dev/null
+++ b/third_party/python/gyp/test/mac/infoplist-process/Info.plist
@@ -0,0 +1,36 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIconFile</key>
+ <string></string>
+ <key>CFBundleIdentifier</key>
+ <string>com.google.${PRODUCT_NAME}</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+ <key>LSMinimumSystemVersion</key>
+ <string>${MACOSX_DEPLOYMENT_TARGET}</string>
+ <key>NSMainNibFile</key>
+ <string>MainMenu</string>
+ <key>NSPrincipalClass</key>
+ <string>NSApplication</string>
+ <key>ProcessedKey1</key>
+ <string>PROCESSED_KEY1</string>
+ <key>ProcessedKey2</key>
+ <string>PROCESSED_KEY2</string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/mac/infoplist-process/main.c b/third_party/python/gyp/test/mac/infoplist-process/main.c
new file mode 100644
index 0000000000..1bf4b2a11a
--- /dev/null
+++ b/third_party/python/gyp/test/mac/infoplist-process/main.c
@@ -0,0 +1,7 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/mac/infoplist-process/test1.gyp b/third_party/python/gyp/test/mac/infoplist-process/test1.gyp
new file mode 100644
index 0000000000..bc625a968b
--- /dev/null
+++ b/third_party/python/gyp/test/mac/infoplist-process/test1.gyp
@@ -0,0 +1,25 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'test_app',
+ 'product_name': 'Test',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'main.c',
+ ],
+ 'configurations': {
+ 'One': {
+ },
+ },
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'Info.plist',
+ 'INFOPLIST_PREPROCESS': 'YES',
+ 'INFOPLIST_PREPROCESSOR_DEFINITIONS': 'PROCESSED_KEY1=Foo PROCESSED_KEY2=Bar',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/infoplist-process/test2.gyp b/third_party/python/gyp/test/mac/infoplist-process/test2.gyp
new file mode 100644
index 0000000000..ecfbc9f64c
--- /dev/null
+++ b/third_party/python/gyp/test/mac/infoplist-process/test2.gyp
@@ -0,0 +1,25 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'test_app',
+ 'product_name': 'Test',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'main.c',
+ ],
+ 'configurations': {
+ 'Two': {
+ },
+ },
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'Info.plist',
+ 'INFOPLIST_PREPROCESS': 'YES',
+ 'INFOPLIST_PREPROCESSOR_DEFINITIONS': 'PROCESSED_KEY1="Foo (Bar)"',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/infoplist-process/test3.gyp b/third_party/python/gyp/test/mac/infoplist-process/test3.gyp
new file mode 100644
index 0000000000..be8fe75a53
--- /dev/null
+++ b/third_party/python/gyp/test/mac/infoplist-process/test3.gyp
@@ -0,0 +1,25 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'test_app',
+ 'product_name': 'Test App',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'main.c',
+ ],
+ 'configurations': {
+ 'Three': {
+ },
+ },
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'Info.plist',
+ 'INFOPLIST_PREPROCESS': 'NO',
+ 'INFOPLIST_PREPROCESSOR_DEFINITIONS': 'PROCESSED_KEY1=Foo',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/installname/Info.plist b/third_party/python/gyp/test/mac/installname/Info.plist
new file mode 100644
index 0000000000..5e05a5190c
--- /dev/null
+++ b/third_party/python/gyp/test/mac/installname/Info.plist
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIconFile</key>
+ <string></string>
+ <key>CFBundleIdentifier</key>
+ <string>com.yourcompany.${PRODUCT_NAME}</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>FMWK</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+ <key>NSPrincipalClass</key>
+ <string></string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/mac/installname/file.c b/third_party/python/gyp/test/mac/installname/file.c
new file mode 100644
index 0000000000..a39fce095f
--- /dev/null
+++ b/third_party/python/gyp/test/mac/installname/file.c
@@ -0,0 +1 @@
+int f() { return 0; }
diff --git a/third_party/python/gyp/test/mac/installname/main.c b/third_party/python/gyp/test/mac/installname/main.c
new file mode 100644
index 0000000000..237c8ce181
--- /dev/null
+++ b/third_party/python/gyp/test/mac/installname/main.c
@@ -0,0 +1 @@
+int main() {}
diff --git a/third_party/python/gyp/test/mac/installname/test.gyp b/third_party/python/gyp/test/mac/installname/test.gyp
new file mode 100644
index 0000000000..60c867ff12
--- /dev/null
+++ b/third_party/python/gyp/test/mac/installname/test.gyp
@@ -0,0 +1,93 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'default_installname',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c' ],
+ },
+ {
+ 'target_name': 'default_bundle_installname',
+ 'product_name': 'My Framework',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [ 'file.c' ],
+ },
+ {
+ 'target_name': 'explicit_installname',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c' ],
+ 'xcode_settings': {
+ 'LD_DYLIB_INSTALL_NAME': 'Trapped in a dynamiclib factory',
+ },
+ },
+ {
+ 'target_name': 'explicit_installname_base',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c' ],
+ 'xcode_settings': {
+ 'DYLIB_INSTALL_NAME_BASE': '@executable_path/../../..',
+
+ },
+ },
+ {
+ 'target_name': 'explicit_installname_base_bundle',
+ 'product_name': 'My Other Framework',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [ 'file.c' ],
+ 'xcode_settings': {
+ 'DYLIB_INSTALL_NAME_BASE': '@executable_path/../../..',
+
+ },
+ },
+ {
+ 'target_name': 'both_base_and_installname',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c' ],
+ 'xcode_settings': {
+ # LD_DYLIB_INSTALL_NAME wins.
+ 'LD_DYLIB_INSTALL_NAME': 'Still trapped in a dynamiclib factory',
+ 'DYLIB_INSTALL_NAME_BASE': '@executable_path/../../..',
+ },
+ },
+ {
+ 'target_name': 'explicit_installname_with_base',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c' ],
+ 'xcode_settings': {
+ 'LD_DYLIB_INSTALL_NAME': '$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)',
+ },
+ },
+ {
+ 'target_name': 'explicit_installname_with_explicit_base',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c' ],
+ 'xcode_settings': {
+ 'DYLIB_INSTALL_NAME_BASE': '@executable_path/..',
+ 'LD_DYLIB_INSTALL_NAME': '$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)',
+ },
+ },
+ {
+ 'target_name': 'executable',
+ 'type': 'executable',
+ 'sources': [ 'main.c' ],
+ 'xcode_settings': {
+ 'LD_DYLIB_INSTALL_NAME': 'Should be ignored for not shared_lib',
+ },
+ },
+ # Regression test for http://crbug.com/113918
+ {
+ 'target_name': 'install_name_with_info_plist',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [ 'file.c' ],
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'Info.plist',
+ 'LD_DYLIB_INSTALL_NAME': '$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/kext/GypKext/GypKext-Info.plist b/third_party/python/gyp/test/mac/kext/GypKext/GypKext-Info.plist
new file mode 100644
index 0000000000..84226099c1
--- /dev/null
+++ b/third_party/python/gyp/test/mac/kext/GypKext/GypKext-Info.plist
@@ -0,0 +1,35 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>BuildMachineOSBuild</key>
+ <string>Doesn't matter, will be overwritten</string>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIconFile</key>
+ <string></string>
+ <key>CFBundleIdentifier</key>
+ <string>com.google.${PRODUCT_NAME:rfc1034identifier}</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>KEXT</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>ause</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+ <key>LSMinimumSystemVersion</key>
+ <string>${MACOSX_DEPLOYMENT_TARGET}</string>
+ <key>OSBundleLibraries</key>
+ <dict>
+ <key>com.apple.kpi.libkern</key>
+ <string>10.0</string>
+ </dict>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/mac/kext/GypKext/GypKext.c b/third_party/python/gyp/test/mac/kext/GypKext/GypKext.c
new file mode 100644
index 0000000000..9b611b0dc5
--- /dev/null
+++ b/third_party/python/gyp/test/mac/kext/GypKext/GypKext.c
@@ -0,0 +1,16 @@
+// Copyright (c) 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <sys/systm.h>
+#include <mach/mach_types.h>
+
+kern_return_t GypKext_start(kmod_info_t* ki, void* d) {
+ printf("GypKext has started.\n");
+ return KERN_SUCCESS;
+}
+
+kern_return_t GypKext_stop(kmod_info_t* ki, void* d) {
+ printf("GypKext has stopped.\n");
+ return KERN_SUCCESS;
+}
diff --git a/third_party/python/gyp/test/mac/kext/kext.gyp b/third_party/python/gyp/test/mac/kext/kext.gyp
new file mode 100644
index 0000000000..5b93087543
--- /dev/null
+++ b/third_party/python/gyp/test/mac/kext/kext.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'gypkext',
+ 'product_name': 'GypKext',
+ 'type': 'mac_kernel_extension',
+ 'sources': [
+ 'GypKext/GypKext.c',
+ ],
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'GypKext/GypKext-Info.plist',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/ldflags-libtool/file.c b/third_party/python/gyp/test/mac/ldflags-libtool/file.c
new file mode 100644
index 0000000000..56757a701b
--- /dev/null
+++ b/third_party/python/gyp/test/mac/ldflags-libtool/file.c
@@ -0,0 +1 @@
+void f() {}
diff --git a/third_party/python/gyp/test/mac/ldflags-libtool/test.gyp b/third_party/python/gyp/test/mac/ldflags-libtool/test.gyp
new file mode 100644
index 0000000000..4e7aa07106
--- /dev/null
+++ b/third_party/python/gyp/test/mac/ldflags-libtool/test.gyp
@@ -0,0 +1,17 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'ldflags_passed_to_libtool',
+ 'type': 'static_library',
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'OTHER_LDFLAGS': [
+ '-fblorfen-horf-does-not-exist',
+ ],
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/ldflags/subdirectory/Info.plist b/third_party/python/gyp/test/mac/ldflags/subdirectory/Info.plist
new file mode 100644
index 0000000000..5f5e9abfbb
--- /dev/null
+++ b/third_party/python/gyp/test/mac/ldflags/subdirectory/Info.plist
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/mac/ldflags/subdirectory/file.c b/third_party/python/gyp/test/mac/ldflags/subdirectory/file.c
new file mode 100644
index 0000000000..90c45543bf
--- /dev/null
+++ b/third_party/python/gyp/test/mac/ldflags/subdirectory/file.c
@@ -0,0 +1,2 @@
+void f() {}
+void g() {}
diff --git a/third_party/python/gyp/test/mac/ldflags/subdirectory/symbol_list.def b/third_party/python/gyp/test/mac/ldflags/subdirectory/symbol_list.def
new file mode 100644
index 0000000000..0ab7543b1f
--- /dev/null
+++ b/third_party/python/gyp/test/mac/ldflags/subdirectory/symbol_list.def
@@ -0,0 +1 @@
+_f
diff --git a/third_party/python/gyp/test/mac/ldflags/subdirectory/test.gyp b/third_party/python/gyp/test/mac/ldflags/subdirectory/test.gyp
new file mode 100644
index 0000000000..db00c7465c
--- /dev/null
+++ b/third_party/python/gyp/test/mac/ldflags/subdirectory/test.gyp
@@ -0,0 +1,66 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'raw',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'OTHER_LDFLAGS': [
+ '-exported_symbols_list symbol_list.def',
+ '-sectcreate __TEXT __info_plist Info.plist',
+ ],
+ },
+ },
+ # TODO(thakis): This form should ideally be supported, too. (But
+ # -Wlfoo,bar,baz is cleaner so people should use that anyway.)
+ #{
+ # 'target_name': 'raw_sep',
+ # 'type': 'shared_library',
+ # 'sources': [ 'file.c', ],
+ # 'xcode_settings': {
+ # 'OTHER_LDFLAGS': [
+ # '-exported_symbols_list', 'symbol_list.def',
+ # '-sectcreate', '__TEXT', '__info_plist', 'Info.plist',
+ # ],
+ # },
+ #},
+ {
+ 'target_name': 'wl_space',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'OTHER_LDFLAGS': [
+ # Works because clang passes unknown files on to the linker.
+ '-Wl,-exported_symbols_list symbol_list.def',
+ ],
+ },
+ },
+ # TODO(thakis): This form should ideally be supported, too. (But
+ # -Wlfoo,bar,baz is cleaner so people should use that anyway.)
+ #{
+ # 'target_name': 'wl_space_sep',
+ # 'type': 'shared_library',
+ # 'sources': [ 'file.c', ],
+ # 'xcode_settings': {
+ # 'OTHER_LDFLAGS': [
+ # # Works because clang passes unknown files on to the linker.
+ # '-Wl,-exported_symbols_list', 'symbol_list.def',
+ # ],
+ # },
+ #},
+ {
+ 'target_name': 'wl_comma',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'OTHER_LDFLAGS': [
+ '-Wl,-exported_symbols_list,symbol_list.def',
+ '-Wl,-sectcreate,__TEXT,__info_plist,Info.plist',
+ ],
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/libraries/subdir/README.txt b/third_party/python/gyp/test/mac/libraries/subdir/README.txt
new file mode 100644
index 0000000000..4031ded85f
--- /dev/null
+++ b/third_party/python/gyp/test/mac/libraries/subdir/README.txt
@@ -0,0 +1 @@
+Make things live in a subdirectory, to make sure that DEPTH works correctly.
diff --git a/third_party/python/gyp/test/mac/libraries/subdir/hello.cc b/third_party/python/gyp/test/mac/libraries/subdir/hello.cc
new file mode 100644
index 0000000000..a43554c8ca
--- /dev/null
+++ b/third_party/python/gyp/test/mac/libraries/subdir/hello.cc
@@ -0,0 +1,10 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <iostream>
+
+int main() {
+ std::cout << "Hello, world!" << std::endl;
+ return 0;
+}
diff --git a/third_party/python/gyp/test/mac/libraries/subdir/mylib.c b/third_party/python/gyp/test/mac/libraries/subdir/mylib.c
new file mode 100644
index 0000000000..e771991e83
--- /dev/null
+++ b/third_party/python/gyp/test/mac/libraries/subdir/mylib.c
@@ -0,0 +1,7 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int my_foo(int x) {
+ return x + 1;
+}
diff --git a/third_party/python/gyp/test/mac/libraries/subdir/test.gyp b/third_party/python/gyp/test/mac/libraries/subdir/test.gyp
new file mode 100644
index 0000000000..59fef51017
--- /dev/null
+++ b/third_party/python/gyp/test/mac/libraries/subdir/test.gyp
@@ -0,0 +1,65 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'libraries-test',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.cc',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ 'libcrypto.dylib',
+ ],
+ },
+ },
+ {
+ # This creates a static library and puts it in a nonstandard location for
+ # libraries-search-path-test.
+ 'target_name': 'mylib',
+ 'type': 'static_library',
+ 'sources': [
+ 'mylib.c',
+ ],
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'Make a secret location',
+ 'action': [
+ 'mkdir',
+ '-p',
+ '${SRCROOT}/../secret_location',
+ ],
+ },
+ {
+ 'postbuild_name': 'Copy to secret location, with secret name',
+ 'action': [
+ 'cp',
+ '${BUILT_PRODUCTS_DIR}/${EXECUTABLE_PATH}',
+ '${SRCROOT}/../secret_location/libmysecretlib.a',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'libraries-search-path-test',
+ 'type': 'executable',
+ 'dependencies': [ 'mylib' ],
+ 'sources': [
+ 'hello.cc',
+ ],
+ 'xcode_settings': {
+ 'LIBRARY_SEARCH_PATHS': [
+ '<(DEPTH)/secret_location',
+ ],
+ },
+ 'link_settings': {
+ 'libraries': [
+ 'libmysecretlib.a',
+ ],
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/libtool-zero/mylib.c b/third_party/python/gyp/test/mac/libtool-zero/mylib.c
new file mode 100644
index 0000000000..b26d61bd6b
--- /dev/null
+++ b/third_party/python/gyp/test/mac/libtool-zero/mylib.c
@@ -0,0 +1,7 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int my_foo(int x) {
+ return x + 1;
+}
diff --git a/third_party/python/gyp/test/mac/libtool-zero/test.gyp b/third_party/python/gyp/test/mac/libtool-zero/test.gyp
new file mode 100644
index 0000000000..0d6ee5535e
--- /dev/null
+++ b/third_party/python/gyp/test/mac/libtool-zero/test.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'mylib',
+ 'type': 'static_library',
+ 'sources': [
+ 'mylib.c',
+ ],
+ 'xcode_settings': {
+ 'ARCHS': [ 'i386', 'x86_64' ],
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/loadable-module-bundle-product-extension/src.cc b/third_party/python/gyp/test/mac/loadable-module-bundle-product-extension/src.cc
new file mode 100644
index 0000000000..3d878e9697
--- /dev/null
+++ b/third_party/python/gyp/test/mac/loadable-module-bundle-product-extension/src.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int test() {
+ return 1337;
+}
diff --git a/third_party/python/gyp/test/mac/loadable-module-bundle-product-extension/test.gyp b/third_party/python/gyp/test/mac/loadable-module-bundle-product-extension/test.gyp
new file mode 100644
index 0000000000..684a2c02aa
--- /dev/null
+++ b/third_party/python/gyp/test/mac/loadable-module-bundle-product-extension/test.gyp
@@ -0,0 +1,24 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [{
+ 'target_name': 'test',
+ 'type': 'none',
+ 'dependencies': ['child_one', 'child_two'],
+ }, {
+ 'target_name': 'child_one',
+ 'product_name': 'Collide',
+ 'product_extension': 'bar',
+ 'sources': ['src.cc'],
+ 'type': 'loadable_module',
+ 'mac_bundle': 1,
+ }, {
+ 'target_name': 'child_two',
+ 'product_name': 'Collide',
+ 'product_extension': 'foo',
+ 'sources': ['src.cc'],
+ 'type': 'loadable_module',
+ 'mac_bundle': 1,
+ }],
+}
diff --git a/third_party/python/gyp/test/mac/loadable-module/Info.plist b/third_party/python/gyp/test/mac/loadable-module/Info.plist
new file mode 100644
index 0000000000..f6607aebd9
--- /dev/null
+++ b/third_party/python/gyp/test/mac/loadable-module/Info.plist
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIdentifier</key>
+ <string>com.google.test_loadable_module</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>BRPL</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1.0</string>
+ <key>CFPlugInDynamicRegisterFunction</key>
+ <string></string>
+ <key>CFPlugInDynamicRegistration</key>
+ <string>NO</string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/mac/loadable-module/module.c b/third_party/python/gyp/test/mac/loadable-module/module.c
new file mode 100644
index 0000000000..9584538347
--- /dev/null
+++ b/third_party/python/gyp/test/mac/loadable-module/module.c
@@ -0,0 +1,11 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int SuperFly() {
+ return 42;
+}
+
+const char* SuperFoo() {
+ return "Hello World";
+}
diff --git a/third_party/python/gyp/test/mac/loadable-module/test.gyp b/third_party/python/gyp/test/mac/loadable-module/test.gyp
new file mode 100644
index 0000000000..3c8a5309d2
--- /dev/null
+++ b/third_party/python/gyp/test/mac/loadable-module/test.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_loadable_module',
+ 'type': 'loadable_module',
+ 'mac_bundle': 1,
+ 'sources': [ 'module.c' ],
+ 'product_extension': 'plugin',
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'Info.plist',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/lto/asmfile.S b/third_party/python/gyp/test/mac/lto/asmfile.S
new file mode 100644
index 0000000000..ea23759a39
--- /dev/null
+++ b/third_party/python/gyp/test/mac/lto/asmfile.S
@@ -0,0 +1,2 @@
+.globl _asfun
+ret
diff --git a/third_party/python/gyp/test/mac/lto/ccfile.cc b/third_party/python/gyp/test/mac/lto/ccfile.cc
new file mode 100644
index 0000000000..2503afd7b1
--- /dev/null
+++ b/third_party/python/gyp/test/mac/lto/ccfile.cc
@@ -0,0 +1 @@
+void ccfun() {}
diff --git a/third_party/python/gyp/test/mac/lto/cfile.c b/third_party/python/gyp/test/mac/lto/cfile.c
new file mode 100644
index 0000000000..d02ef4b8d6
--- /dev/null
+++ b/third_party/python/gyp/test/mac/lto/cfile.c
@@ -0,0 +1 @@
+void cfun() {}
diff --git a/third_party/python/gyp/test/mac/lto/mfile.m b/third_party/python/gyp/test/mac/lto/mfile.m
new file mode 100644
index 0000000000..85b7d93afe
--- /dev/null
+++ b/third_party/python/gyp/test/mac/lto/mfile.m
@@ -0,0 +1 @@
+void mfun() {}
diff --git a/third_party/python/gyp/test/mac/lto/mmfile.mm b/third_party/python/gyp/test/mac/lto/mmfile.mm
new file mode 100644
index 0000000000..beaa3595f8
--- /dev/null
+++ b/third_party/python/gyp/test/mac/lto/mmfile.mm
@@ -0,0 +1 @@
+void mmfun() {}
diff --git a/third_party/python/gyp/test/mac/lto/test.gyp b/third_party/python/gyp/test/mac/lto/test.gyp
new file mode 100644
index 0000000000..0a8e85183d
--- /dev/null
+++ b/third_party/python/gyp/test/mac/lto/test.gyp
@@ -0,0 +1,35 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'lto',
+ 'type': 'shared_library',
+ 'sources': [
+ 'cfile.c',
+ 'mfile.m',
+ 'ccfile.cc',
+ 'mmfile.mm',
+ 'asmfile.S',
+ ],
+ 'xcode_settings': {
+ 'LLVM_LTO': 'YES',
+ },
+ },
+ {
+ 'target_name': 'lto_static',
+ 'type': 'static_library',
+ 'sources': [
+ 'cfile.c',
+ 'mfile.m',
+ 'ccfile.cc',
+ 'mmfile.mm',
+ 'asmfile.S',
+ ],
+ 'xcode_settings': {
+ 'LLVM_LTO': 'YES',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/missing-cfbundlesignature/Info.plist b/third_party/python/gyp/test/mac/missing-cfbundlesignature/Info.plist
new file mode 100644
index 0000000000..0c31674884
--- /dev/null
+++ b/third_party/python/gyp/test/mac/missing-cfbundlesignature/Info.plist
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/mac/missing-cfbundlesignature/Other-Info.plist b/third_party/python/gyp/test/mac/missing-cfbundlesignature/Other-Info.plist
new file mode 100644
index 0000000000..47095281c8
--- /dev/null
+++ b/third_party/python/gyp/test/mac/missing-cfbundlesignature/Other-Info.plist
@@ -0,0 +1,12 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleSignature</key>
+ <string>F</string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/mac/missing-cfbundlesignature/Third-Info.plist b/third_party/python/gyp/test/mac/missing-cfbundlesignature/Third-Info.plist
new file mode 100644
index 0000000000..5b61fe2664
--- /dev/null
+++ b/third_party/python/gyp/test/mac/missing-cfbundlesignature/Third-Info.plist
@@ -0,0 +1,12 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleSignature</key>
+ <string>some really long string</string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/mac/missing-cfbundlesignature/file.c b/third_party/python/gyp/test/mac/missing-cfbundlesignature/file.c
new file mode 100644
index 0000000000..237c8ce181
--- /dev/null
+++ b/third_party/python/gyp/test/mac/missing-cfbundlesignature/file.c
@@ -0,0 +1 @@
+int main() {}
diff --git a/third_party/python/gyp/test/mac/missing-cfbundlesignature/test.gyp b/third_party/python/gyp/test/mac/missing-cfbundlesignature/test.gyp
new file mode 100644
index 0000000000..b50cc2791a
--- /dev/null
+++ b/third_party/python/gyp/test/mac/missing-cfbundlesignature/test.gyp
@@ -0,0 +1,34 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'mytarget',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'Info.plist',
+ },
+ },
+ {
+ 'target_name': 'myothertarget',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'Other-Info.plist',
+ },
+ },
+ {
+ 'target_name': 'thirdtarget',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'Third-Info.plist',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/non-strs-flattened-to-env/Info.plist b/third_party/python/gyp/test/mac/non-strs-flattened-to-env/Info.plist
new file mode 100644
index 0000000000..11fc4b660d
--- /dev/null
+++ b/third_party/python/gyp/test/mac/non-strs-flattened-to-env/Info.plist
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <!-- Not a valid plist file since it's missing so much. That's fine. -->
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>My Variable</key>
+ <string>${MY_VAR}</string>
+ <key>CFlags</key>
+ <string>${OTHER_CFLAGS}</string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/mac/non-strs-flattened-to-env/main.c b/third_party/python/gyp/test/mac/non-strs-flattened-to-env/main.c
new file mode 100644
index 0000000000..1711567ef5
--- /dev/null
+++ b/third_party/python/gyp/test/mac/non-strs-flattened-to-env/main.c
@@ -0,0 +1,7 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/mac/non-strs-flattened-to-env/test.gyp b/third_party/python/gyp/test/mac/non-strs-flattened-to-env/test.gyp
new file mode 100644
index 0000000000..aaf821c925
--- /dev/null
+++ b/third_party/python/gyp/test/mac/non-strs-flattened-to-env/test.gyp
@@ -0,0 +1,27 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'test_app',
+ 'product_name': 'Test',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [ 'main.c', ],
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'Info.plist',
+ 'MY_VAR': 'some expansion',
+ 'OTHER_CFLAGS': [
+ # Just some (more than one) random flags.
+ '-fstack-protector-all',
+ '-fno-strict-aliasing',
+ '-DS="A Space"', # Would normally be in 'defines'
+ ],
+ },
+ 'include_dirs': [
+ '$(SDKROOT)/usr/include/libxml2',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/objc-arc/c-file.c b/third_party/python/gyp/test/mac/objc-arc/c-file.c
new file mode 100644
index 0000000000..208935946f
--- /dev/null
+++ b/third_party/python/gyp/test/mac/objc-arc/c-file.c
@@ -0,0 +1,5 @@
+#if __has_feature(objc_arc) || __has_feature(objc_arc_weak)
+#error "C files shouldn't be ARC'd!"
+#endif
+
+void c_fun() {}
diff --git a/third_party/python/gyp/test/mac/objc-arc/cc-file.cc b/third_party/python/gyp/test/mac/objc-arc/cc-file.cc
new file mode 100644
index 0000000000..70deb60b9a
--- /dev/null
+++ b/third_party/python/gyp/test/mac/objc-arc/cc-file.cc
@@ -0,0 +1,5 @@
+#if __has_feature(objc_arc) || __has_feature(objc_arc_weak)
+#error "C++ files shouldn't be ARC'd!"
+#endif
+
+void cc_fun() {}
diff --git a/third_party/python/gyp/test/mac/objc-arc/m-file-arc-weak.m b/third_party/python/gyp/test/mac/objc-arc/m-file-arc-weak.m
new file mode 100644
index 0000000000..4ab7c51a82
--- /dev/null
+++ b/third_party/python/gyp/test/mac/objc-arc/m-file-arc-weak.m
@@ -0,0 +1,9 @@
+#if __has_feature(objc_arc)
+#error "ObjC files without CLANG_ENABLE_OBJC_ARC should not be ARC'd!"
+#endif
+
+#if !__has_feature(objc_arc_weak)
+#error "With CLANG_ENABLE_OBJC_WEAK, weak references should be enabled."
+#endif
+
+void m_fun() {}
diff --git a/third_party/python/gyp/test/mac/objc-arc/m-file-no-arc.m b/third_party/python/gyp/test/mac/objc-arc/m-file-no-arc.m
new file mode 100644
index 0000000000..4f067abb67
--- /dev/null
+++ b/third_party/python/gyp/test/mac/objc-arc/m-file-no-arc.m
@@ -0,0 +1,9 @@
+#if __has_feature(objc_arc)
+#error "ObjC files without CLANG_ENABLE_OBJC_ARC should not be ARC'd!"
+#endif
+
+#if __has_feature(objc_arc_weak)
+#error "Without CLANG_ENABLE_OBJC_WEAK, weak references should be disabled."
+#endif
+
+void m_fun() {}
diff --git a/third_party/python/gyp/test/mac/objc-arc/m-file.m b/third_party/python/gyp/test/mac/objc-arc/m-file.m
new file mode 100644
index 0000000000..8234ba582d
--- /dev/null
+++ b/third_party/python/gyp/test/mac/objc-arc/m-file.m
@@ -0,0 +1,9 @@
+#if !__has_feature(objc_arc)
+#error "ObjC files with CLANG_ENABLE_OBJC_ARC should be ARC'd!"
+#endif
+
+#if !__has_feature(objc_arc_weak)
+#error "Weak references should always be enabled for ARC."
+#endif
+
+void m_fun() {}
diff --git a/third_party/python/gyp/test/mac/objc-arc/mm-file-arc-weak.mm b/third_party/python/gyp/test/mac/objc-arc/mm-file-arc-weak.mm
new file mode 100644
index 0000000000..f9d4b6759a
--- /dev/null
+++ b/third_party/python/gyp/test/mac/objc-arc/mm-file-arc-weak.mm
@@ -0,0 +1,9 @@
+#if __has_feature(objc_arc)
+#error "ObjC++ files without CLANG_ENABLE_OBJC_ARC should not be ARC'd!"
+#endif
+
+#if !__has_feature(objc_arc_weak)
+#error "With CLANG_ENABLE_OBJC_WEAK, weak references should be enabled."
+#endif
+
+void mm_fun() {}
diff --git a/third_party/python/gyp/test/mac/objc-arc/mm-file-no-arc.mm b/third_party/python/gyp/test/mac/objc-arc/mm-file-no-arc.mm
new file mode 100644
index 0000000000..823f52feba
--- /dev/null
+++ b/third_party/python/gyp/test/mac/objc-arc/mm-file-no-arc.mm
@@ -0,0 +1,9 @@
+#if __has_feature(objc_arc)
+#error "ObjC++ files without CLANG_ENABLE_OBJC_ARC should not be ARC'd!"
+#endif
+
+#if __has_feature(objc_arc_weak)
+#error "Without CLANG_ENABLE_OBJC_WEAK, weak references should be disabled."
+#endif
+
+void mm_fun() {}
diff --git a/third_party/python/gyp/test/mac/objc-arc/mm-file.mm b/third_party/python/gyp/test/mac/objc-arc/mm-file.mm
new file mode 100644
index 0000000000..90150ef0b6
--- /dev/null
+++ b/third_party/python/gyp/test/mac/objc-arc/mm-file.mm
@@ -0,0 +1,9 @@
+#if !__has_feature(objc_arc)
+#error "ObjC++ files with CLANG_ENABLE_OBJC_ARC should be ARC'd!"
+#endif
+
+#if !__has_feature(objc_arc_weak)
+#error "Weak references should always be enabled for ARC."
+#endif
+
+void mm_fun() {}
diff --git a/third_party/python/gyp/test/mac/objc-arc/test.gyp b/third_party/python/gyp/test/mac/objc-arc/test.gyp
new file mode 100644
index 0000000000..87ddda962e
--- /dev/null
+++ b/third_party/python/gyp/test/mac/objc-arc/test.gyp
@@ -0,0 +1,53 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'make_global_settings': [
+ ['CC', '/usr/bin/clang'],
+ ['CXX', '/usr/bin/clang++'],
+ ],
+
+ 'targets': [
+ {
+ 'target_name': 'arc_enabled',
+ 'type': 'static_library',
+ 'sources': [
+ 'c-file.c',
+ 'cc-file.cc',
+ 'm-file.m',
+ 'mm-file.mm',
+ ],
+ 'xcode_settings': {
+ 'CLANG_ENABLE_OBJC_ARC': 'YES',
+ },
+ },
+
+ {
+ 'target_name': 'weak_enabled',
+ 'type': 'static_library',
+ 'sources': [
+ 'c-file.c',
+ 'cc-file.cc',
+ 'm-file-arc-weak.m',
+ 'mm-file-arc-weak.mm',
+ ],
+ 'xcode_settings': {
+ 'CLANG_ENABLE_OBJC_WEAK': 'YES',
+ },
+ },
+
+ {
+ 'target_name': 'arc_disabled',
+ 'type': 'static_library',
+ 'sources': [
+ 'c-file.c',
+ 'cc-file.cc',
+ 'm-file-no-arc.m',
+ 'mm-file-no-arc.mm',
+ ],
+ 'xcode_settings': {
+ },
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/mac/objc-gc/c-file.c b/third_party/python/gyp/test/mac/objc-gc/c-file.c
new file mode 100644
index 0000000000..2855a00eaa
--- /dev/null
+++ b/third_party/python/gyp/test/mac/objc-gc/c-file.c
@@ -0,0 +1 @@
+void c_fun() {}
diff --git a/third_party/python/gyp/test/mac/objc-gc/cc-file.cc b/third_party/python/gyp/test/mac/objc-gc/cc-file.cc
new file mode 100644
index 0000000000..71e47a0126
--- /dev/null
+++ b/third_party/python/gyp/test/mac/objc-gc/cc-file.cc
@@ -0,0 +1 @@
+void cc_fun() {}
diff --git a/third_party/python/gyp/test/mac/objc-gc/main.m b/third_party/python/gyp/test/mac/objc-gc/main.m
new file mode 100644
index 0000000000..1a87f8e70f
--- /dev/null
+++ b/third_party/python/gyp/test/mac/objc-gc/main.m
@@ -0,0 +1,6 @@
+#import <Foundation/Foundation.h>
+
+int main() {
+ printf("gc on: %d\n", [NSGarbageCollector defaultCollector] != NULL);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/mac/objc-gc/needs-gc-mm.mm b/third_party/python/gyp/test/mac/objc-gc/needs-gc-mm.mm
new file mode 100644
index 0000000000..fc3fee9f34
--- /dev/null
+++ b/third_party/python/gyp/test/mac/objc-gc/needs-gc-mm.mm
@@ -0,0 +1 @@
+void objcpp_fun() { }
diff --git a/third_party/python/gyp/test/mac/objc-gc/needs-gc.m b/third_party/python/gyp/test/mac/objc-gc/needs-gc.m
new file mode 100644
index 0000000000..ca77976b1d
--- /dev/null
+++ b/third_party/python/gyp/test/mac/objc-gc/needs-gc.m
@@ -0,0 +1 @@
+void objc_fun() { }
diff --git a/third_party/python/gyp/test/mac/objc-gc/test.gyp b/third_party/python/gyp/test/mac/objc-gc/test.gyp
new file mode 100644
index 0000000000..4d827c1b39
--- /dev/null
+++ b/third_party/python/gyp/test/mac/objc-gc/test.gyp
@@ -0,0 +1,102 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ # For some reason, static_library targets that are built with gc=required
+ # and then linked to executables that don't use gc, the linker doesn't
+ # complain. For shared_libraries it does, so use that.
+ {
+ 'target_name': 'no_gc_lib',
+ 'type': 'shared_library',
+ 'sources': [
+ 'c-file.c',
+ 'cc-file.cc',
+ 'needs-gc-mm.mm',
+ 'needs-gc.m',
+ ],
+ },
+ {
+ 'target_name': 'gc_lib',
+ 'type': 'shared_library',
+ 'sources': [
+ 'c-file.c',
+ 'cc-file.cc',
+ 'needs-gc-mm.mm',
+ 'needs-gc.m',
+ ],
+ 'xcode_settings': {
+ 'GCC_ENABLE_OBJC_GC': 'supported',
+ },
+ },
+ {
+ 'target_name': 'gc_req_lib',
+ 'type': 'shared_library',
+ 'sources': [
+ 'c-file.c',
+ 'cc-file.cc',
+ 'needs-gc-mm.mm',
+ 'needs-gc.m',
+ ],
+ 'xcode_settings': {
+ 'GCC_ENABLE_OBJC_GC': 'required',
+ },
+ },
+
+ {
+ 'target_name': 'gc_exe_fails',
+ 'type': 'executable',
+ 'sources': [ 'main.m' ],
+ 'dependencies': [ 'no_gc_lib' ],
+ 'xcode_settings': {
+ 'GCC_ENABLE_OBJC_GC': 'required',
+ },
+ 'libraries': [ 'Foundation.framework' ],
+ },
+ {
+ 'target_name': 'gc_req_exe',
+ 'type': 'executable',
+ 'sources': [ 'main.m' ],
+ 'dependencies': [ 'gc_lib' ],
+ 'xcode_settings': {
+ 'GCC_ENABLE_OBJC_GC': 'required',
+ },
+ 'libraries': [ 'Foundation.framework' ],
+ },
+ {
+ 'target_name': 'gc_exe_req_lib',
+ 'type': 'executable',
+ 'sources': [ 'main.m' ],
+ 'dependencies': [ 'gc_req_lib' ],
+ 'xcode_settings': {
+ 'GCC_ENABLE_OBJC_GC': 'supported',
+ },
+ 'libraries': [ 'Foundation.framework' ],
+ },
+ {
+ 'target_name': 'gc_exe',
+ 'type': 'executable',
+ 'sources': [ 'main.m' ],
+ 'dependencies': [ 'gc_lib' ],
+ 'xcode_settings': {
+ 'GCC_ENABLE_OBJC_GC': 'supported',
+ },
+ 'libraries': [ 'Foundation.framework' ],
+ },
+ {
+ 'target_name': 'gc_off_exe_req_lib',
+ 'type': 'executable',
+ 'sources': [ 'main.m' ],
+ 'dependencies': [ 'gc_req_lib' ],
+ 'libraries': [ 'Foundation.framework' ],
+ },
+ {
+ 'target_name': 'gc_off_exe',
+ 'type': 'executable',
+ 'sources': [ 'main.m' ],
+ 'dependencies': [ 'gc_lib' ],
+ 'libraries': [ 'Foundation.framework' ],
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/mac/postbuild-copy-bundle/Framework-Info.plist b/third_party/python/gyp/test/mac/postbuild-copy-bundle/Framework-Info.plist
new file mode 100644
index 0000000000..ec36829c08
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-copy-bundle/Framework-Info.plist
@@ -0,0 +1,30 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIconFile</key>
+ <string></string>
+ <key>CFBundleIdentifier</key>
+ <string>com.yourcompany.${PRODUCT_NAME}</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>FMWK</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+ <key>NSPrincipalClass</key>
+ <string></string>
+ <key>RandomKey</key>
+ <string>RandomValue</string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/mac/postbuild-copy-bundle/TestApp-Info.plist b/third_party/python/gyp/test/mac/postbuild-copy-bundle/TestApp-Info.plist
new file mode 100644
index 0000000000..98fd515200
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-copy-bundle/TestApp-Info.plist
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIconFile</key>
+ <string></string>
+ <key>CFBundleIdentifier</key>
+ <string>com.google.${PRODUCT_NAME}</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+ <key>LSMinimumSystemVersion</key>
+ <string>${MACOSX_DEPLOYMENT_TARGET}</string>
+ <key>NSMainNibFile</key>
+ <string>MainMenu</string>
+ <key>NSPrincipalClass</key>
+ <string>NSApplication</string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/mac/postbuild-copy-bundle/copied.txt b/third_party/python/gyp/test/mac/postbuild-copy-bundle/copied.txt
new file mode 100644
index 0000000000..178413886a
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-copy-bundle/copied.txt
@@ -0,0 +1 @@
+old copied file
diff --git a/third_party/python/gyp/test/mac/postbuild-copy-bundle/empty.c b/third_party/python/gyp/test/mac/postbuild-copy-bundle/empty.c
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-copy-bundle/empty.c
diff --git a/third_party/python/gyp/test/mac/postbuild-copy-bundle/main.c b/third_party/python/gyp/test/mac/postbuild-copy-bundle/main.c
new file mode 100644
index 0000000000..21c1963526
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-copy-bundle/main.c
@@ -0,0 +1,4 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+int main() {}
diff --git a/third_party/python/gyp/test/mac/postbuild-copy-bundle/postbuild-copy-framework.sh b/third_party/python/gyp/test/mac/postbuild-copy-bundle/postbuild-copy-framework.sh
new file mode 100755
index 0000000000..930fec6612
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-copy-bundle/postbuild-copy-framework.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -e
+
+rsync -acC --delete "$1" "$2"
diff --git a/third_party/python/gyp/test/mac/postbuild-copy-bundle/resource_file.sb b/third_party/python/gyp/test/mac/postbuild-copy-bundle/resource_file.sb
new file mode 100644
index 0000000000..42057fa235
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-copy-bundle/resource_file.sb
@@ -0,0 +1 @@
+This is included in the framework bundle.
diff --git a/third_party/python/gyp/test/mac/postbuild-copy-bundle/test.gyp b/third_party/python/gyp/test/mac/postbuild-copy-bundle/test.gyp
new file mode 100644
index 0000000000..a03e6432f2
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-copy-bundle/test.gyp
@@ -0,0 +1,49 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'test_bundle',
+ 'product_name': 'My Framework',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [ 'empty.c', ],
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'Framework-Info.plist',
+ },
+ 'mac_bundle_resources': [
+ 'resource_file.sb',
+ ],
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/$(CONTENTS_FOLDER_PATH)/Libraries',
+ 'files': [ 'copied.txt' ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'test_app',
+ 'product_name': 'Test App',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'dependencies': [
+ 'test_bundle',
+ ],
+ 'sources': [ 'main.c', ],
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'TestApp-Info.plist',
+ },
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'Copy dependent framework into app',
+ 'action': [
+ './postbuild-copy-framework.sh',
+ '${BUILT_PRODUCTS_DIR}/My Framework.framework',
+ '${BUILT_PRODUCTS_DIR}/${CONTENTS_FOLDER_PATH}/',
+ ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/postbuild-defaults/Info.plist b/third_party/python/gyp/test/mac/postbuild-defaults/Info.plist
new file mode 100644
index 0000000000..d3f54d76cd
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-defaults/Info.plist
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <!-- Not a valid plist file since it's missing so much. That's fine. -->
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/mac/postbuild-defaults/main.c b/third_party/python/gyp/test/mac/postbuild-defaults/main.c
new file mode 100644
index 0000000000..1711567ef5
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-defaults/main.c
@@ -0,0 +1,7 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/mac/postbuild-defaults/postbuild-defaults.sh b/third_party/python/gyp/test/mac/postbuild-defaults/postbuild-defaults.sh
new file mode 100755
index 0000000000..56af2a8329
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-defaults/postbuild-defaults.sh
@@ -0,0 +1,15 @@
+#!/bin/bash
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -e
+
+# This is the built Info.plist in the output directory.
+PLIST="${BUILT_PRODUCTS_DIR}"/Test.app/Contents/Info # No trailing .plist
+echo $(defaults read "${PLIST}" "CFBundleName") > "${BUILT_PRODUCTS_DIR}/result"
+
+# This is the source Info.plist next to this script file.
+PLIST="${SRCROOT}"/Info # No trailing .plist
+echo $(defaults read "${PLIST}" "CFBundleName") \
+ >> "${BUILT_PRODUCTS_DIR}/result"
diff --git a/third_party/python/gyp/test/mac/postbuild-defaults/test.gyp b/third_party/python/gyp/test/mac/postbuild-defaults/test.gyp
new file mode 100644
index 0000000000..be0a075efc
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-defaults/test.gyp
@@ -0,0 +1,26 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'test_app',
+ 'product_name': 'Test',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [ 'main.c', ],
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'Info.plist',
+ },
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'Postbuild that calls defaults',
+ 'action': [
+ './postbuild-defaults.sh',
+ '${BUILT_PRODUCTS_DIR}/${EXECUTABLE_PATH}',
+ ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/postbuild-fail/file.c b/third_party/python/gyp/test/mac/postbuild-fail/file.c
new file mode 100644
index 0000000000..91695b10c6
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-fail/file.c
@@ -0,0 +1,6 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// That's right, this is copyrighted.
+void f() {}
diff --git a/third_party/python/gyp/test/mac/postbuild-fail/postbuild-fail.sh b/third_party/python/gyp/test/mac/postbuild-fail/postbuild-fail.sh
new file mode 100755
index 0000000000..dc1a60d987
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-fail/postbuild-fail.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/bash
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+exit 1
diff --git a/third_party/python/gyp/test/mac/postbuild-fail/test.gyp b/third_party/python/gyp/test/mac/postbuild-fail/test.gyp
new file mode 100644
index 0000000000..e63283db03
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-fail/test.gyp
@@ -0,0 +1,38 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'nonbundle',
+ 'type': 'static_library',
+ 'sources': [ 'file.c', ],
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'Postbuild Fail',
+ 'action': [ './postbuild-fail.sh', ],
+ },
+ {
+ 'postbuild_name': 'Runs after failing postbuild',
+ 'action': [ './touch-static.sh', ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'bundle',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [ 'file.c', ],
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'Postbuild Fail',
+ 'action': [ './postbuild-fail.sh', ],
+ },
+ {
+ 'postbuild_name': 'Runs after failing postbuild',
+ 'action': [ './touch-dynamic.sh', ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/postbuild-fail/touch-dynamic.sh b/third_party/python/gyp/test/mac/postbuild-fail/touch-dynamic.sh
new file mode 100755
index 0000000000..a388a64102
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-fail/touch-dynamic.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -e
+touch "${BUILT_PRODUCTS_DIR}/dynamic_touch"
diff --git a/third_party/python/gyp/test/mac/postbuild-fail/touch-static.sh b/third_party/python/gyp/test/mac/postbuild-fail/touch-static.sh
new file mode 100755
index 0000000000..97ecaa6868
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-fail/touch-static.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -e
+touch "${BUILT_PRODUCTS_DIR}/static_touch"
diff --git a/third_party/python/gyp/test/mac/postbuild-multiple-configurations/main.c b/third_party/python/gyp/test/mac/postbuild-multiple-configurations/main.c
new file mode 100644
index 0000000000..21c1963526
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-multiple-configurations/main.c
@@ -0,0 +1,4 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+int main() {}
diff --git a/third_party/python/gyp/test/mac/postbuild-multiple-configurations/postbuild-touch-file.sh b/third_party/python/gyp/test/mac/postbuild-multiple-configurations/postbuild-touch-file.sh
new file mode 100755
index 0000000000..b6170cf7a7
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-multiple-configurations/postbuild-touch-file.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+touch "${BUILT_PRODUCTS_DIR}/postbuild-file"
diff --git a/third_party/python/gyp/test/mac/postbuild-multiple-configurations/test.gyp b/third_party/python/gyp/test/mac/postbuild-multiple-configurations/test.gyp
new file mode 100644
index 0000000000..c350b20d68
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-multiple-configurations/test.gyp
@@ -0,0 +1,26 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'target_defaults': {
+ 'configurations': {
+ 'Debug': {},
+ 'Release': {},
+ },
+ },
+ 'targets': [
+ {
+ 'target_name': 'random_target',
+ 'type': 'executable',
+ 'sources': [ 'main.c', ],
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'Touch a file.',
+ 'action': [
+ './postbuild-touch-file.sh',
+ ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/postbuild-static-library/empty.c b/third_party/python/gyp/test/mac/postbuild-static-library/empty.c
new file mode 100644
index 0000000000..9554336c0c
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-static-library/empty.c
@@ -0,0 +1,4 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+void f() {}
diff --git a/third_party/python/gyp/test/mac/postbuild-static-library/postbuild-touch-file.sh b/third_party/python/gyp/test/mac/postbuild-static-library/postbuild-touch-file.sh
new file mode 100755
index 0000000000..37de4de4f6
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-static-library/postbuild-touch-file.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+touch "${BUILT_PRODUCTS_DIR}/$1"
diff --git a/third_party/python/gyp/test/mac/postbuild-static-library/test.gyp b/third_party/python/gyp/test/mac/postbuild-static-library/test.gyp
new file mode 100644
index 0000000000..9ef55a0afa
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuild-static-library/test.gyp
@@ -0,0 +1,34 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'my_lib',
+ 'type': 'static_library',
+ 'sources': [ 'empty.c', ],
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'Postbuild that touches a file',
+ 'action': [
+ './postbuild-touch-file.sh', 'postbuild-file'
+ ],
+ },
+ ],
+ },
+
+ {
+ 'target_name': 'my_sourceless_lib',
+ 'type': 'static_library',
+ 'dependencies': [ 'my_lib' ],
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'Postbuild that touches a file',
+ 'action': [
+ './postbuild-touch-file.sh', 'postbuild-file-sourceless'
+ ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/postbuilds/copy.sh b/third_party/python/gyp/test/mac/postbuilds/copy.sh
new file mode 100755
index 0000000000..ecad0381db
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuilds/copy.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+cp "$@"
diff --git a/third_party/python/gyp/test/mac/postbuilds/file.c b/third_party/python/gyp/test/mac/postbuilds/file.c
new file mode 100644
index 0000000000..653e71ff7e
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuilds/file.c
@@ -0,0 +1,4 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+void f() {}
diff --git a/third_party/python/gyp/test/mac/postbuilds/file_g.c b/third_party/python/gyp/test/mac/postbuilds/file_g.c
new file mode 100644
index 0000000000..0f7849d208
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuilds/file_g.c
@@ -0,0 +1,4 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+void g() {}
diff --git a/third_party/python/gyp/test/mac/postbuilds/file_h.c b/third_party/python/gyp/test/mac/postbuilds/file_h.c
new file mode 100644
index 0000000000..521d1f4d56
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuilds/file_h.c
@@ -0,0 +1,4 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+void h() {}
diff --git a/third_party/python/gyp/test/mac/postbuilds/script/shared_library_postbuild.sh b/third_party/python/gyp/test/mac/postbuilds/script/shared_library_postbuild.sh
new file mode 100755
index 0000000000..c623c8bf21
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuilds/script/shared_library_postbuild.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -e
+
+lib="${BUILT_PRODUCTS_DIR}/${EXECUTABLE_PATH}"
+nm ${lib} > /dev/null # Just make sure this works.
+
+pattern="${1}"
+
+if [ $pattern != "a|b" ]; then
+ echo "Parameter quoting is broken"
+ exit 1
+fi
+
+if [ "${2}" != "arg with spaces" ]; then
+ echo "Parameter space escaping is broken"
+ exit 1
+fi
+
+touch "${lib}"_touch
diff --git a/third_party/python/gyp/test/mac/postbuilds/script/static_library_postbuild.sh b/third_party/python/gyp/test/mac/postbuilds/script/static_library_postbuild.sh
new file mode 100755
index 0000000000..2bf09b34e1
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuilds/script/static_library_postbuild.sh
@@ -0,0 +1,23 @@
+#!/bin/bash
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -e
+
+lib="${BUILT_PRODUCTS_DIR}/${FULL_PRODUCT_NAME}"
+nm ${lib} > /dev/null # Just make sure this works.
+
+pattern="${1}"
+
+if [ $pattern != "a|b" ]; then
+ echo "Parameter quote escaping is broken"
+ exit 1
+fi
+
+if [ "${2}" != "arg with spaces" ]; then
+ echo "Parameter space escaping is broken"
+ exit 1
+fi
+
+touch "${lib}"_touch.a
diff --git a/third_party/python/gyp/test/mac/postbuilds/subdirectory/copied_file.txt b/third_party/python/gyp/test/mac/postbuilds/subdirectory/copied_file.txt
new file mode 100644
index 0000000000..a634f85b6c
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuilds/subdirectory/copied_file.txt
@@ -0,0 +1 @@
+This file should be copied to the products dir.
diff --git a/third_party/python/gyp/test/mac/postbuilds/subdirectory/nested_target.gyp b/third_party/python/gyp/test/mac/postbuilds/subdirectory/nested_target.gyp
new file mode 100644
index 0000000000..6d4f2395e3
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuilds/subdirectory/nested_target.gyp
@@ -0,0 +1,53 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'nest_el',
+ 'type': 'static_library',
+ 'sources': [ '../file_g.c', ],
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'Static library postbuild',
+ 'variables': {
+ 'some_regex': 'a|b',
+ },
+ 'action': [
+ '../script/static_library_postbuild.sh',
+ '<(some_regex)',
+ 'arg with spaces',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'nest_dyna',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [ '../file_h.c', ],
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'Dynamic library postbuild',
+ 'variables': {
+ 'some_regex': 'a|b',
+ },
+ 'action': [
+ '../script/shared_library_postbuild.sh',
+ '<(some_regex)',
+ 'arg with spaces',
+ ],
+ },
+ {
+ 'postbuild_name': 'Test paths relative to gyp file',
+ 'action': [
+ '../copy.sh',
+ './copied_file.txt',
+ '${BUILT_PRODUCTS_DIR}/copied_file_2.txt',
+ ],
+ },
+ ],
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/mac/postbuilds/test.gyp b/third_party/python/gyp/test/mac/postbuilds/test.gyp
new file mode 100644
index 0000000000..7c0b523f86
--- /dev/null
+++ b/third_party/python/gyp/test/mac/postbuilds/test.gyp
@@ -0,0 +1,93 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'el',
+ 'type': 'static_library',
+ 'sources': [ 'file.c', ],
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'Static library postbuild',
+ 'variables': {
+ 'some_regex': 'a|b',
+ },
+ 'action': [
+ 'script/static_library_postbuild.sh',
+ '<(some_regex)',
+ 'arg with spaces',
+ ],
+ },
+ {
+ 'postbuild_name': 'Test variable in gyp file',
+ 'action': [
+ 'cp',
+ '${BUILT_PRODUCTS_DIR}/${EXECUTABLE_PATH}',
+ '${BUILT_PRODUCTS_DIR}/${EXECUTABLE_PATH}_gyp_touch.a',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'dyna',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [ 'file.c', ],
+ 'dependencies': [
+ 'subdirectory/nested_target.gyp:nest_dyna',
+ 'subdirectory/nested_target.gyp:nest_el',
+ ],
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'Dynamic library postbuild',
+ 'variables': {
+ 'some_regex': 'a|b',
+ },
+ 'action': [
+ 'script/shared_library_postbuild.sh',
+ '<(some_regex)',
+ 'arg with spaces',
+ ],
+ },
+ {
+ 'postbuild_name': 'Test variable in gyp file',
+ 'action': [
+ 'cp',
+ '${BUILT_PRODUCTS_DIR}/${EXECUTABLE_PATH}',
+ '${BUILT_PRODUCTS_DIR}/${EXECUTABLE_PATH}_gyp_touch',
+ ],
+ },
+ {
+ 'postbuild_name': 'Test paths relative to gyp file',
+ 'action': [
+ './copy.sh',
+ 'subdirectory/copied_file.txt',
+ '${BUILT_PRODUCTS_DIR}',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'dyna_standalone',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c', ],
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'Test variable in gyp file',
+ 'action': [
+ 'cp',
+ '${BUILT_PRODUCTS_DIR}/${EXECUTABLE_PATH}',
+ '${BUILT_PRODUCTS_DIR}/${EXECUTABLE_PATH}_gyp_touch.dylib',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'EmptyBundle',
+ 'product_extension': 'bundle',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/prefixheader/file.c b/third_party/python/gyp/test/mac/prefixheader/file.c
new file mode 100644
index 0000000000..d0b39d1f6d
--- /dev/null
+++ b/third_party/python/gyp/test/mac/prefixheader/file.c
@@ -0,0 +1 @@
+MyInt f() { return 0; }
diff --git a/third_party/python/gyp/test/mac/prefixheader/file.cc b/third_party/python/gyp/test/mac/prefixheader/file.cc
new file mode 100644
index 0000000000..d0b39d1f6d
--- /dev/null
+++ b/third_party/python/gyp/test/mac/prefixheader/file.cc
@@ -0,0 +1 @@
+MyInt f() { return 0; }
diff --git a/third_party/python/gyp/test/mac/prefixheader/file.m b/third_party/python/gyp/test/mac/prefixheader/file.m
new file mode 100644
index 0000000000..d0b39d1f6d
--- /dev/null
+++ b/third_party/python/gyp/test/mac/prefixheader/file.m
@@ -0,0 +1 @@
+MyInt f() { return 0; }
diff --git a/third_party/python/gyp/test/mac/prefixheader/file.mm b/third_party/python/gyp/test/mac/prefixheader/file.mm
new file mode 100644
index 0000000000..d0b39d1f6d
--- /dev/null
+++ b/third_party/python/gyp/test/mac/prefixheader/file.mm
@@ -0,0 +1 @@
+MyInt f() { return 0; }
diff --git a/third_party/python/gyp/test/mac/prefixheader/header.h b/third_party/python/gyp/test/mac/prefixheader/header.h
new file mode 100644
index 0000000000..0716e500c5
--- /dev/null
+++ b/third_party/python/gyp/test/mac/prefixheader/header.h
@@ -0,0 +1 @@
+typedef int MyInt;
diff --git a/third_party/python/gyp/test/mac/prefixheader/test.gyp b/third_party/python/gyp/test/mac/prefixheader/test.gyp
new file mode 100644
index 0000000000..7e6b1af807
--- /dev/null
+++ b/third_party/python/gyp/test/mac/prefixheader/test.gyp
@@ -0,0 +1,82 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'prefix_header_c',
+ 'type': 'static_library',
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'GCC_PREFIX_HEADER': 'header.h',
+ },
+ },
+ {
+ 'target_name': 'precompiled_prefix_header_c',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'GCC_PREFIX_HEADER': 'header.h',
+ 'GCC_PRECOMPILE_PREFIX_HEADER': 'YES',
+ },
+ },
+
+ {
+ 'target_name': 'prefix_header_cc',
+ 'type': 'static_library',
+ 'sources': [ 'file.cc', ],
+ 'xcode_settings': {
+ 'GCC_PREFIX_HEADER': 'header.h',
+ },
+ },
+ {
+ 'target_name': 'precompiled_prefix_header_cc',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [ 'file.cc', ],
+ 'xcode_settings': {
+ 'GCC_PREFIX_HEADER': 'header.h',
+ 'GCC_PRECOMPILE_PREFIX_HEADER': 'YES',
+ },
+ },
+
+ {
+ 'target_name': 'prefix_header_m',
+ 'type': 'static_library',
+ 'sources': [ 'file.m', ],
+ 'xcode_settings': {
+ 'GCC_PREFIX_HEADER': 'header.h',
+ },
+ },
+ {
+ 'target_name': 'precompiled_prefix_header_m',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [ 'file.m', ],
+ 'xcode_settings': {
+ 'GCC_PREFIX_HEADER': 'header.h',
+ 'GCC_PRECOMPILE_PREFIX_HEADER': 'YES',
+ },
+ },
+
+ {
+ 'target_name': 'prefix_header_mm',
+ 'type': 'static_library',
+ 'sources': [ 'file.mm', ],
+ 'xcode_settings': {
+ 'GCC_PREFIX_HEADER': 'header.h',
+ },
+ },
+ {
+ 'target_name': 'precompiled_prefix_header_mm',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [ 'file.mm', ],
+ 'xcode_settings': {
+ 'GCC_PREFIX_HEADER': 'header.h',
+ 'GCC_PRECOMPILE_PREFIX_HEADER': 'YES',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/rebuild/TestApp-Info.plist b/third_party/python/gyp/test/mac/rebuild/TestApp-Info.plist
new file mode 100644
index 0000000000..98fd515200
--- /dev/null
+++ b/third_party/python/gyp/test/mac/rebuild/TestApp-Info.plist
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIconFile</key>
+ <string></string>
+ <key>CFBundleIdentifier</key>
+ <string>com.google.${PRODUCT_NAME}</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+ <key>LSMinimumSystemVersion</key>
+ <string>${MACOSX_DEPLOYMENT_TARGET}</string>
+ <key>NSMainNibFile</key>
+ <string>MainMenu</string>
+ <key>NSPrincipalClass</key>
+ <string>NSApplication</string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/mac/rebuild/delay-touch.sh b/third_party/python/gyp/test/mac/rebuild/delay-touch.sh
new file mode 100755
index 0000000000..7caf105b6e
--- /dev/null
+++ b/third_party/python/gyp/test/mac/rebuild/delay-touch.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+set -e
+
+sleep 1 # mtime resolution is 1 sec on unix.
+touch "$1"
diff --git a/third_party/python/gyp/test/mac/rebuild/empty.c b/third_party/python/gyp/test/mac/rebuild/empty.c
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/mac/rebuild/empty.c
diff --git a/third_party/python/gyp/test/mac/rebuild/main.c b/third_party/python/gyp/test/mac/rebuild/main.c
new file mode 100644
index 0000000000..237c8ce181
--- /dev/null
+++ b/third_party/python/gyp/test/mac/rebuild/main.c
@@ -0,0 +1 @@
+int main() {}
diff --git a/third_party/python/gyp/test/mac/rebuild/test.gyp b/third_party/python/gyp/test/mac/rebuild/test.gyp
new file mode 100644
index 0000000000..15b4e4ef2f
--- /dev/null
+++ b/third_party/python/gyp/test/mac/rebuild/test.gyp
@@ -0,0 +1,56 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'test_app',
+ 'product_name': 'Test App',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'main.c',
+ ],
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'TestApp-Info.plist',
+ },
+ },
+ {
+ 'target_name': 'test_app_postbuilds',
+ 'product_name': 'Test App 2',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'main.c',
+ ],
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'TestApp-Info.plist',
+ },
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'Postbuild that touches the app binary',
+ 'action': [
+ './delay-touch.sh', '${BUILT_PRODUCTS_DIR}/${EXECUTABLE_PATH}',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'test_framework_postbuilds',
+ 'product_name': 'Test Framework',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'empty.c',
+ ],
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'Postbuild that touches the framework binary',
+ 'action': [
+ './delay-touch.sh', '${BUILT_PRODUCTS_DIR}/${EXECUTABLE_PATH}',
+ ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/rpath/file.c b/third_party/python/gyp/test/mac/rpath/file.c
new file mode 100644
index 0000000000..56757a701b
--- /dev/null
+++ b/third_party/python/gyp/test/mac/rpath/file.c
@@ -0,0 +1 @@
+void f() {}
diff --git a/third_party/python/gyp/test/mac/rpath/main.c b/third_party/python/gyp/test/mac/rpath/main.c
new file mode 100644
index 0000000000..237c8ce181
--- /dev/null
+++ b/third_party/python/gyp/test/mac/rpath/main.c
@@ -0,0 +1 @@
+int main() {}
diff --git a/third_party/python/gyp/test/mac/rpath/test.gyp b/third_party/python/gyp/test/mac/rpath/test.gyp
new file mode 100644
index 0000000000..7255cb7cd2
--- /dev/null
+++ b/third_party/python/gyp/test/mac/rpath/test.gyp
@@ -0,0 +1,48 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'default_rpath',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c' ],
+ },
+ {
+ 'target_name': 'explicit_rpath',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c' ],
+ 'xcode_settings': {
+ 'LD_RUNPATH_SEARCH_PATHS': ['@executable_path/.'],
+ },
+ },
+ {
+ 'target_name': 'explicit_rpaths_escaped',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c' ],
+ 'xcode_settings': {
+ # Xcode requires spaces to be escaped, else it ends up adding two
+ # independent rpaths.
+ 'LD_RUNPATH_SEARCH_PATHS': ['First\\ rpath', 'Second\\ rpath'],
+ },
+ },
+ {
+ 'target_name': 'explicit_rpaths_bundle',
+ 'product_name': 'My Framework',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [ 'file.c' ],
+ 'xcode_settings': {
+ 'LD_RUNPATH_SEARCH_PATHS': ['@loader_path/.'],
+ },
+ },
+ {
+ 'target_name': 'executable',
+ 'type': 'executable',
+ 'sources': [ 'main.c' ],
+ 'xcode_settings': {
+ 'LD_RUNPATH_SEARCH_PATHS': ['@executable_path/.'],
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/sdkroot/file.cc b/third_party/python/gyp/test/mac/sdkroot/file.cc
new file mode 100644
index 0000000000..13ae971040
--- /dev/null
+++ b/third_party/python/gyp/test/mac/sdkroot/file.cc
@@ -0,0 +1,5 @@
+#include <map>
+using std::map;
+
+int main() {
+}
diff --git a/third_party/python/gyp/test/mac/sdkroot/test.gyp b/third_party/python/gyp/test/mac/sdkroot/test.gyp
new file mode 100644
index 0000000000..2fc11a0280
--- /dev/null
+++ b/third_party/python/gyp/test/mac/sdkroot/test.gyp
@@ -0,0 +1,35 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'mytarget',
+ 'type': 'executable',
+ 'sources': [ 'file.cc', ],
+ 'xcode_settings': {
+ 'SDKROOT': 'macosx%s',
+ },
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'envtest',
+ 'action': [ './test_shorthand.sh', ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'absolute',
+ 'type': 'executable',
+ 'sources': [ 'file.cc', ],
+ 'xcode_settings': {
+ 'SDKROOT': '<(sdk_path)',
+ },
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'envtest',
+ 'action': [ './test_shorthand.sh', ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/sdkroot/test_shorthand.sh b/third_party/python/gyp/test/mac/sdkroot/test_shorthand.sh
new file mode 100755
index 0000000000..ac4ac229ae
--- /dev/null
+++ b/third_party/python/gyp/test/mac/sdkroot/test_shorthand.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -e
+
+found=false
+for sdk in 10.6 10.7 10.8 10.9 ; do
+ if expected=$(xcodebuild -version -sdk macosx$sdk Path 2>/dev/null) ; then
+ found=true
+ break
+ fi
+done
+if ! $found ; then
+ echo >&2 "cannot find installed SDK"
+ exit 1
+fi
+
+test $SDKROOT = $expected
diff --git a/third_party/python/gyp/test/mac/sourceless-module/empty.c b/third_party/python/gyp/test/mac/sourceless-module/empty.c
new file mode 100644
index 0000000000..237c8ce181
--- /dev/null
+++ b/third_party/python/gyp/test/mac/sourceless-module/empty.c
@@ -0,0 +1 @@
+int main() {}
diff --git a/third_party/python/gyp/test/mac/sourceless-module/empty.txt b/third_party/python/gyp/test/mac/sourceless-module/empty.txt
new file mode 100644
index 0000000000..139597f9cb
--- /dev/null
+++ b/third_party/python/gyp/test/mac/sourceless-module/empty.txt
@@ -0,0 +1,2 @@
+
+
diff --git a/third_party/python/gyp/test/mac/sourceless-module/fun.c b/third_party/python/gyp/test/mac/sourceless-module/fun.c
new file mode 100644
index 0000000000..d64ff8ca23
--- /dev/null
+++ b/third_party/python/gyp/test/mac/sourceless-module/fun.c
@@ -0,0 +1 @@
+int f() { return 42; }
diff --git a/third_party/python/gyp/test/mac/sourceless-module/test.gyp b/third_party/python/gyp/test/mac/sourceless-module/test.gyp
new file mode 100644
index 0000000000..cbbe63df02
--- /dev/null
+++ b/third_party/python/gyp/test/mac/sourceless-module/test.gyp
@@ -0,0 +1,96 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'empty_bundle',
+ 'type': 'loadable_module',
+ 'mac_bundle': 1,
+ },
+ {
+ 'target_name': 'resource_bundle',
+ 'type': 'loadable_module',
+ 'mac_bundle': 1,
+ 'actions': [
+ {
+ 'action_name': 'Add Resource',
+ 'inputs': [],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/app_manifest/foo.manifest',
+ ],
+ 'action': [
+ 'touch', '<(INTERMEDIATE_DIR)/app_manifest/foo.manifest',
+ ],
+ 'process_outputs_as_mac_bundle_resources': 1,
+ },
+ ],
+ },
+ {
+ 'target_name': 'dependent_on_resource_bundle',
+ 'type': 'executable',
+ 'sources': [ 'empty.c' ],
+ 'dependencies': [
+ 'resource_bundle',
+ ],
+ },
+
+ {
+ 'target_name': 'alib',
+ 'type': 'static_library',
+ 'sources': [ 'fun.c' ]
+ },
+ { # No sources, but depends on a static_library so must be linked.
+ 'target_name': 'resource_framework',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'dependencies': [
+ 'alib',
+ ],
+ 'actions': [
+ {
+ 'action_name': 'Add Resource',
+ 'inputs': [],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/app_manifest/foo.manifest',
+ ],
+ 'action': [
+ 'touch', '<(INTERMEDIATE_DIR)/app_manifest/foo.manifest',
+ ],
+ 'process_outputs_as_mac_bundle_resources': 1,
+ },
+ ],
+ },
+ {
+ 'target_name': 'dependent_on_resource_framework',
+ 'type': 'executable',
+ 'sources': [ 'empty.c' ],
+ 'dependencies': [
+ 'resource_framework',
+ ],
+ },
+
+ { # No actions, but still have resources.
+ 'target_name': 'mac_resource_bundle_no_actions',
+ 'product_extension': 'bundle',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'mac_bundle_resources': [
+ 'empty.txt',
+ ],
+ },
+ {
+ 'target_name': 'bundle_dependent_on_resource_bundle_no_actions',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [ 'empty.c' ],
+ 'dependencies': [
+ 'mac_resource_bundle_no_actions',
+ ],
+ 'mac_bundle_resources': [
+ '<(PRODUCT_DIR)/mac_resource_bundle_no_actions.bundle',
+ ],
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/mac/strip/file.c b/third_party/python/gyp/test/mac/strip/file.c
new file mode 100644
index 0000000000..a4c504de71
--- /dev/null
+++ b/third_party/python/gyp/test/mac/strip/file.c
@@ -0,0 +1,22 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+static void the_static_function() {}
+__attribute__((used)) void the_used_function() {}
+
+__attribute__((visibility("hidden"))) __attribute__((used))
+ void the_hidden_function() {}
+__attribute__((visibility("default"))) __attribute__((used))
+ void the_visible_function() {}
+
+extern const int eci;
+__attribute__((used)) int i;
+__attribute__((used)) const int ci = 34623;
+
+void the_function() {
+ the_static_function();
+ the_used_function();
+ the_hidden_function();
+ the_visible_function();
+}
diff --git a/third_party/python/gyp/test/mac/strip/main.c b/third_party/python/gyp/test/mac/strip/main.c
new file mode 100644
index 0000000000..b2291a6b09
--- /dev/null
+++ b/third_party/python/gyp/test/mac/strip/main.c
@@ -0,0 +1,25 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+static void the_static_function() {}
+__attribute__((used)) void the_used_function() {}
+
+__attribute__((visibility("hidden"))) __attribute__((used))
+void the_hidden_function() {}
+__attribute__((visibility("default"))) __attribute__((used))
+void the_visible_function() {}
+
+void the_function() {}
+
+extern const int eci;
+__attribute__((used)) int i;
+__attribute__((used)) const int ci = 34623;
+
+int main() {
+ the_function();
+ the_static_function();
+ the_used_function();
+ the_hidden_function();
+ the_visible_function();
+}
diff --git a/third_party/python/gyp/test/mac/strip/strip.saves b/third_party/python/gyp/test/mac/strip/strip.saves
new file mode 100644
index 0000000000..b60ca62857
--- /dev/null
+++ b/third_party/python/gyp/test/mac/strip/strip.saves
@@ -0,0 +1,5 @@
+# Copyright (c) 2011 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file would list symbols that should not be stripped.
diff --git a/third_party/python/gyp/test/mac/strip/subdirectory/nested_file.c b/third_party/python/gyp/test/mac/strip/subdirectory/nested_file.c
new file mode 100644
index 0000000000..50daa6c13b
--- /dev/null
+++ b/third_party/python/gyp/test/mac/strip/subdirectory/nested_file.c
@@ -0,0 +1 @@
+void nested_f() {}
diff --git a/third_party/python/gyp/test/mac/strip/subdirectory/nested_strip.saves b/third_party/python/gyp/test/mac/strip/subdirectory/nested_strip.saves
new file mode 100644
index 0000000000..d434c0ef45
--- /dev/null
+++ b/third_party/python/gyp/test/mac/strip/subdirectory/nested_strip.saves
@@ -0,0 +1,5 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file would list symbols that should not be stripped.
diff --git a/third_party/python/gyp/test/mac/strip/subdirectory/subdirectory.gyp b/third_party/python/gyp/test/mac/strip/subdirectory/subdirectory.gyp
new file mode 100644
index 0000000000..5d0d190914
--- /dev/null
+++ b/third_party/python/gyp/test/mac/strip/subdirectory/subdirectory.gyp
@@ -0,0 +1,38 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'nested_strip_save',
+ 'type': 'shared_library',
+ 'sources': [ 'nested_file.c', ],
+ 'xcode_settings': {
+ 'DEPLOYMENT_POSTPROCESSING': 'YES',
+ 'STRIP_INSTALLED_PRODUCT': 'YES',
+ 'STRIPFLAGS': '-s $(CHROMIUM_STRIP_SAVE_FILE)',
+ 'CHROMIUM_STRIP_SAVE_FILE': 'nested_strip.saves',
+ },
+ },
+ {
+ 'target_name': 'nested_strip_save_postbuild',
+ 'type': 'shared_library',
+ 'sources': [ 'nested_file.c', ],
+ 'xcode_settings': {
+ 'DEPLOYMENT_POSTPROCESSING': 'YES',
+ 'STRIP_INSTALLED_PRODUCT': 'YES',
+ 'STRIPFLAGS': '-s $(CHROMIUM_STRIP_SAVE_FILE)',
+ 'CHROMIUM_STRIP_SAVE_FILE': 'nested_strip.saves',
+ },
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'Action that reads CHROMIUM_STRIP_SAVE_FILE',
+ 'action': [
+ './test_reading_save_file_from_postbuild.sh',
+ ],
+ },
+ ],
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/mac/strip/subdirectory/test_reading_save_file_from_postbuild.sh b/third_party/python/gyp/test/mac/strip/subdirectory/test_reading_save_file_from_postbuild.sh
new file mode 100755
index 0000000000..976943680e
--- /dev/null
+++ b/third_party/python/gyp/test/mac/strip/subdirectory/test_reading_save_file_from_postbuild.sh
@@ -0,0 +1,5 @@
+#!/bin/bash
+
+set -e
+
+test -f ${CHROMIUM_STRIP_SAVE_FILE}
diff --git a/third_party/python/gyp/test/mac/strip/test-defaults.gyp b/third_party/python/gyp/test/mac/strip/test-defaults.gyp
new file mode 100644
index 0000000000..e688b955a7
--- /dev/null
+++ b/third_party/python/gyp/test/mac/strip/test-defaults.gyp
@@ -0,0 +1,51 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'make_global_settings': [
+ ['CC', '/usr/bin/clang'],
+ ],
+ 'target_defaults': {
+ 'xcode_settings': {
+ 'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
+ 'DEPLOYMENT_POSTPROCESSING': 'YES',
+ 'STRIP_INSTALLED_PRODUCT': 'YES',
+ },
+ },
+ 'targets': [
+ {
+ 'target_name': 'single_dylib',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c', ],
+ },
+ {
+ 'target_name': 'single_so',
+ 'type': 'loadable_module',
+ 'sources': [ 'file.c', ],
+ },
+ {
+ 'target_name': 'single_exe',
+ 'type': 'executable',
+ 'sources': [ 'main.c', ],
+ },
+
+ {
+ 'target_name': 'bundle_dylib',
+ 'type': 'shared_library',
+ 'mac_bundle': '1',
+ 'sources': [ 'file.c', ],
+ },
+ {
+ 'target_name': 'bundle_so',
+ 'type': 'loadable_module',
+ 'mac_bundle': '1',
+ 'sources': [ 'file.c', ],
+ },
+ {
+ 'target_name': 'bundle_exe',
+ 'type': 'executable',
+ 'mac_bundle': '1',
+ 'sources': [ 'main.c', ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/strip/test.gyp b/third_party/python/gyp/test/mac/strip/test.gyp
new file mode 100644
index 0000000000..2558aa91bb
--- /dev/null
+++ b/third_party/python/gyp/test/mac/strip/test.gyp
@@ -0,0 +1,119 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# These xcode_settings affect stripping:
+# "Deployment postprocessing involves stripping the binary, and setting
+# its file mode, owner, and group."
+#'DEPLOYMENT_POSTPROCESSING': 'YES',
+
+# "Specifies whether to strip symbol information from the binary.
+# Prerequisite: $DEPLOYMENT_POSTPROCESSING = YES" "Default Value: 'NO'"
+#'STRIP_INSTALLED_PRODUCT': 'YES',
+
+# "Values:
+# * all: Strips the binary completely, removing the symbol table and
+# relocation information
+# * non-global: Strips nonglobal symbols but saves external symbols.
+# * debugging: Strips debugging symbols but saves local and global
+# symbols."
+# (maps to no flag, -x, -S in that order)
+#'STRIP_STYLE': 'non-global',
+
+# "Additional strip flags"
+#'STRIPFLAGS': '-c',
+
+# "YES: Copied binaries are stripped of debugging symbols. This does
+# not cause the binary produced by the linker to be stripped. Use
+# 'STRIP_INSTALLED_PRODUCT (Strip Linked Product)' to have the linker
+# strip the binary."
+#'COPY_PHASE_STRIP': 'NO',
+{
+ 'targets': [
+ {
+ 'target_name': 'no_postprocess',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'DEPLOYMENT_POSTPROCESSING': 'NO',
+ 'STRIP_INSTALLED_PRODUCT': 'YES',
+ },
+ },
+ {
+ 'target_name': 'no_strip',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'DEPLOYMENT_POSTPROCESSING': 'YES',
+ 'STRIP_INSTALLED_PRODUCT': 'NO',
+ },
+ },
+ {
+ 'target_name': 'strip_all',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'DEPLOYMENT_POSTPROCESSING': 'YES',
+ 'STRIP_INSTALLED_PRODUCT': 'YES',
+ 'STRIP_STYLE': 'all',
+ },
+ },
+ {
+ 'target_name': 'strip_nonglobal',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'DEPLOYMENT_POSTPROCESSING': 'YES',
+ 'STRIP_INSTALLED_PRODUCT': 'YES',
+ 'STRIP_STYLE': 'non-global',
+ },
+ },
+ {
+ 'target_name': 'strip_debugging',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'DEPLOYMENT_POSTPROCESSING': 'YES',
+ 'STRIP_INSTALLED_PRODUCT': 'YES',
+ 'STRIP_STYLE': 'debugging',
+ },
+ },
+ {
+ 'target_name': 'strip_all_custom_flags',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'DEPLOYMENT_POSTPROCESSING': 'YES',
+ 'STRIP_INSTALLED_PRODUCT': 'YES',
+ 'STRIP_STYLE': 'all',
+ 'STRIPFLAGS': '-c',
+ },
+ },
+ {
+ 'target_name': 'strip_all_bundle',
+ 'type': 'shared_library',
+ 'mac_bundle': '1',
+ 'sources': [ 'file.c', ],
+ 'xcode_settings': {
+ 'DEPLOYMENT_POSTPROCESSING': 'YES',
+ 'STRIP_INSTALLED_PRODUCT': 'YES',
+ 'STRIP_STYLE': 'all',
+ },
+ },
+ {
+ 'target_name': 'strip_save',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c', ],
+ 'dependencies': [
+ 'subdirectory/subdirectory.gyp:nested_strip_save',
+ 'subdirectory/subdirectory.gyp:nested_strip_save_postbuild',
+ ],
+ 'xcode_settings': {
+ 'DEPLOYMENT_POSTPROCESSING': 'YES',
+ 'STRIP_INSTALLED_PRODUCT': 'YES',
+ 'STRIPFLAGS': '-s $(CHROMIUM_STRIP_SAVE_FILE)',
+ 'CHROMIUM_STRIP_SAVE_FILE': 'strip.saves',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/swift-library/Info.plist b/third_party/python/gyp/test/mac/swift-library/Info.plist
new file mode 100644
index 0000000000..804990ca5e
--- /dev/null
+++ b/third_party/python/gyp/test/mac/swift-library/Info.plist
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIconFile</key>
+ <string></string>
+ <key>CFBundleIdentifier</key>
+ <string>com.yourcompany.${PRODUCT_NAME:identifier}</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>FMWK</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+ <key>NSPrincipalClass</key>
+ <string></string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/mac/swift-library/file.swift b/third_party/python/gyp/test/mac/swift-library/file.swift
new file mode 100644
index 0000000000..88db7da5c6
--- /dev/null
+++ b/third_party/python/gyp/test/mac/swift-library/file.swift
@@ -0,0 +1,9 @@
+import Foundation
+
+public class GypSwiftTest {
+ let myProperty = false
+
+ init() {
+ self.myProperty = true
+ }
+} \ No newline at end of file
diff --git a/third_party/python/gyp/test/mac/swift-library/test.gyp b/third_party/python/gyp/test/mac/swift-library/test.gyp
new file mode 100644
index 0000000000..373a677cbd
--- /dev/null
+++ b/third_party/python/gyp/test/mac/swift-library/test.gyp
@@ -0,0 +1,21 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'SwiftFramework',
+ 'product_name': 'SwiftFramework',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'Info.plist',
+ 'CODE_SIGNING_REQUIRED': 'NO',
+ 'CONFIGURATION_BUILD_DIR':'build/Default',
+ },
+ 'sources': [
+ 'file.swift',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/type_envvars/file.c b/third_party/python/gyp/test/mac/type_envvars/file.c
new file mode 100644
index 0000000000..9cddaf1b0b
--- /dev/null
+++ b/third_party/python/gyp/test/mac/type_envvars/file.c
@@ -0,0 +1,6 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+void f() {}
+int main() {}
diff --git a/third_party/python/gyp/test/mac/type_envvars/test.gyp b/third_party/python/gyp/test/mac/type_envvars/test.gyp
new file mode 100644
index 0000000000..465670056b
--- /dev/null
+++ b/third_party/python/gyp/test/mac/type_envvars/test.gyp
@@ -0,0 +1,100 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'my_app',
+ 'product_name': 'My App',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [ 'file.c', ],
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'envtest',
+ 'action': [ './test_bundle_executable.sh', ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'bundle_loadable_module',
+ 'type': 'loadable_module',
+ 'mac_bundle': 1,
+ 'sources': [ 'file.c', ],
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'envtest',
+ 'action': [ './test_bundle_loadable_module.sh', ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'bundle_shared_library',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [ 'file.c', ],
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'envtest',
+ 'action': [ './test_bundle_shared_library.sh', ],
+ },
+ ],
+ },
+ # Types 'static_library' and 'none' can't exist as bundles.
+
+ {
+ 'target_name': 'nonbundle_executable',
+ 'type': 'executable',
+ 'sources': [ 'file.c', ],
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'envtest',
+ 'action': [ './test_nonbundle_executable.sh', ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'nonbundle_loadable_module',
+ 'type': 'loadable_module',
+ 'sources': [ 'file.c', ],
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'envtest',
+ 'action': [ './test_nonbundle_loadable_module.sh', ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'nonbundle_shared_library',
+ 'type': 'shared_library',
+ 'sources': [ 'file.c', ],
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'envtest',
+ 'action': [ './test_nonbundle_shared_library.sh', ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'nonbundle_static_library',
+ 'type': 'static_library',
+ 'sources': [ 'file.c', ],
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'envtest',
+ 'action': [ './test_nonbundle_static_library.sh', ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'nonbundle_none',
+ 'type': 'none',
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'envtest',
+ 'action': [ './test_nonbundle_none.sh', ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/type_envvars/test_bundle_executable.sh b/third_party/python/gyp/test/mac/type_envvars/test_bundle_executable.sh
new file mode 100755
index 0000000000..9a08c8f0cc
--- /dev/null
+++ b/third_party/python/gyp/test/mac/type_envvars/test_bundle_executable.sh
@@ -0,0 +1,30 @@
+#!/bin/bash
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -e
+
+test $MACH_O_TYPE = mh_execute
+test $PRODUCT_TYPE = com.apple.product-type.application
+test "${PRODUCT_NAME}" = "My App"
+test "${FULL_PRODUCT_NAME}" = "My App.app"
+
+test "${EXECUTABLE_NAME}" = "My App"
+test "${EXECUTABLE_PATH}" = "My App.app/Contents/MacOS/My App"
+test "${WRAPPER_NAME}" = "My App.app"
+
+test "${CONTENTS_FOLDER_PATH}" = "My App.app/Contents"
+test "${EXECUTABLE_FOLDER_PATH}" = "My App.app/Contents/MacOS"
+test "${UNLOCALIZED_RESOURCES_FOLDER_PATH}" = "My App.app/Contents/Resources"
+test "${JAVA_FOLDER_PATH}" = "My App.app/Contents/Resources/Java"
+test "${FRAMEWORKS_FOLDER_PATH}" = "My App.app/Contents/Frameworks"
+test "${SHARED_FRAMEWORKS_FOLDER_PATH}" = "My App.app/Contents/SharedFrameworks"
+test "${SHARED_SUPPORT_FOLDER_PATH}" = "My App.app/Contents/SharedSupport"
+test "${PLUGINS_FOLDER_PATH}" = "My App.app/Contents/PlugIns"
+test "${XPCSERVICES_FOLDER_PATH}" = "My App.app/Contents/XPCServices"
+
+[[ ! $DYLIB_INSTALL_NAME_BASE && ${DYLIB_INSTALL_NAME_BASE-_} ]]
+[[ ! $LD_DYLIB_INSTALL_NAME && ${LD_DYLIB_INSTALL_NAME-_} ]]
+
+"$(dirname "$0")/test_check_sdkroot.sh"
diff --git a/third_party/python/gyp/test/mac/type_envvars/test_bundle_loadable_module.sh b/third_party/python/gyp/test/mac/type_envvars/test_bundle_loadable_module.sh
new file mode 100755
index 0000000000..b5c7638293
--- /dev/null
+++ b/third_party/python/gyp/test/mac/type_envvars/test_bundle_loadable_module.sh
@@ -0,0 +1,35 @@
+#!/bin/bash
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -e
+
+test $MACH_O_TYPE = mh_bundle
+test $PRODUCT_TYPE = com.apple.product-type.bundle
+test $PRODUCT_NAME = bundle_loadable_module
+test $FULL_PRODUCT_NAME = bundle_loadable_module.bundle
+
+test $EXECUTABLE_NAME = bundle_loadable_module
+test $EXECUTABLE_PATH = \
+ "bundle_loadable_module.bundle/Contents/MacOS/bundle_loadable_module"
+test $WRAPPER_NAME = bundle_loadable_module.bundle
+
+test $CONTENTS_FOLDER_PATH = bundle_loadable_module.bundle/Contents
+test $EXECUTABLE_FOLDER_PATH = bundle_loadable_module.bundle/Contents/MacOS
+test $UNLOCALIZED_RESOURCES_FOLDER_PATH = \
+ bundle_loadable_module.bundle/Contents/Resources
+test $JAVA_FOLDER_PATH = bundle_loadable_module.bundle/Contents/Resources/Java
+test $FRAMEWORKS_FOLDER_PATH = bundle_loadable_module.bundle/Contents/Frameworks
+test $SHARED_FRAMEWORKS_FOLDER_PATH = \
+ bundle_loadable_module.bundle/Contents/SharedFrameworks
+test $SHARED_SUPPORT_FOLDER_PATH = \
+ bundle_loadable_module.bundle/Contents/SharedSupport
+test $PLUGINS_FOLDER_PATH = bundle_loadable_module.bundle/Contents/PlugIns
+test $XPCSERVICES_FOLDER_PATH = \
+ bundle_loadable_module.bundle/Contents/XPCServices
+
+[[ ! $DYLIB_INSTALL_NAME_BASE && ${DYLIB_INSTALL_NAME_BASE-_} ]]
+[[ ! $LD_DYLIB_INSTALL_NAME && ${LD_DYLIB_INSTALL_NAME-_} ]]
+
+"$(dirname "$0")/test_check_sdkroot.sh"
diff --git a/third_party/python/gyp/test/mac/type_envvars/test_bundle_shared_library.sh b/third_party/python/gyp/test/mac/type_envvars/test_bundle_shared_library.sh
new file mode 100755
index 0000000000..9c2dc0626f
--- /dev/null
+++ b/third_party/python/gyp/test/mac/type_envvars/test_bundle_shared_library.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -e
+
+test $MACH_O_TYPE = mh_dylib
+test $PRODUCT_TYPE = com.apple.product-type.framework
+test $PRODUCT_NAME = bundle_shared_library
+test $FULL_PRODUCT_NAME = bundle_shared_library.framework
+
+test $EXECUTABLE_NAME = bundle_shared_library
+test $EXECUTABLE_PATH = \
+ "bundle_shared_library.framework/Versions/A/bundle_shared_library"
+test $WRAPPER_NAME = bundle_shared_library.framework
+
+test $CONTENTS_FOLDER_PATH = bundle_shared_library.framework/Versions/A
+test $EXECUTABLE_FOLDER_PATH = bundle_shared_library.framework/Versions/A
+test $UNLOCALIZED_RESOURCES_FOLDER_PATH = \
+ bundle_shared_library.framework/Versions/A/Resources
+test $JAVA_FOLDER_PATH = \
+ bundle_shared_library.framework/Versions/A/Resources/Java
+test $FRAMEWORKS_FOLDER_PATH = \
+ bundle_shared_library.framework/Versions/A/Frameworks
+test $SHARED_FRAMEWORKS_FOLDER_PATH = \
+ bundle_shared_library.framework/Versions/A/SharedFrameworks
+test $SHARED_SUPPORT_FOLDER_PATH = \
+ bundle_shared_library.framework/Versions/A/Resources
+test $PLUGINS_FOLDER_PATH = bundle_shared_library.framework/Versions/A/PlugIns
+test $XPCSERVICES_FOLDER_PATH = \
+ bundle_shared_library.framework/Versions/A/XPCServices
+
+test $DYLIB_INSTALL_NAME_BASE = "/Library/Frameworks"
+test $LD_DYLIB_INSTALL_NAME = \
+ "/Library/Frameworks/bundle_shared_library.framework/Versions/A/bundle_shared_library"
+
+"$(dirname "$0")/test_check_sdkroot.sh"
diff --git a/third_party/python/gyp/test/mac/type_envvars/test_check_sdkroot.sh b/third_party/python/gyp/test/mac/type_envvars/test_check_sdkroot.sh
new file mode 100755
index 0000000000..1297dbeff1
--- /dev/null
+++ b/third_party/python/gyp/test/mac/type_envvars/test_check_sdkroot.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -e
+
+# `xcodebuild -version` output looks like
+# Xcode 4.6.3
+# Build version 4H1503
+# or like
+# Xcode 4.2
+# Build version 4C199
+# or like
+# Xcode 3.2.6
+# Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0
+# BuildVersion: 10M2518
+# Convert that to '0463', '0420' and '0326' respectively.
+function xcodeversion() {
+ xcodebuild -version | awk '/Xcode ([0-9]+\.[0-9]+(\.[0-9]+)?)/ {
+ version = $2
+ gsub(/\./, "", version)
+ if (length(version) < 3) {
+ version = version "0"
+ }
+ if (length(version) < 4) {
+ version = "0" version
+ }
+ }
+ END { print version }'
+}
+
+# Returns true if |string1| is smaller than |string2|.
+# This function assumes that both strings represent Xcode version numbers
+# as returned by |xcodeversion|.
+function smaller() {
+ local min="$(echo -ne "${1}\n${2}\n" | sort -n | head -n1)"
+ test "${min}" != "${2}"
+}
+
+if [[ "$(xcodeversion)" < "0500" ]]; then
+ # Xcode version is older than 5.0, check that SDKROOT is set but empty.
+ [[ -z "${SDKROOT}" && -z "${SDKROOT-_}" ]]
+else
+ # Xcode version is newer than 5.0, check that SDKROOT is set.
+ [[ "${SDKROOT}" == "$(xcodebuild -version -sdk '' Path)" ]]
+fi
diff --git a/third_party/python/gyp/test/mac/type_envvars/test_nonbundle_executable.sh b/third_party/python/gyp/test/mac/type_envvars/test_nonbundle_executable.sh
new file mode 100755
index 0000000000..9fbbd95b8d
--- /dev/null
+++ b/third_party/python/gyp/test/mac/type_envvars/test_nonbundle_executable.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -e
+
+# For some reason, Xcode doesn't set MACH_O_TYPE for non-bundle executables.
+# Check for "not set", not just "empty":
+[[ ! $MACH_O_TYPE && ${MACH_O_TYPE-_} ]]
+test $PRODUCT_TYPE = com.apple.product-type.tool
+test $PRODUCT_NAME = nonbundle_executable
+test $FULL_PRODUCT_NAME = nonbundle_executable
+
+test $EXECUTABLE_NAME = nonbundle_executable
+test $EXECUTABLE_PATH = nonbundle_executable
+[[ ! $WRAPPER_NAME && ${WRAPPER_NAME-_} ]]
+
+[[ ! $CONTENTS_FOLDER_PATH && ${CONTENTS_FOLDER_PATH-_} ]]
+[[ ! $EXECUTABLE_FOLDER_PATH && ${EXECUTABLE_FOLDER_PATH-_} ]]
+[[ ! $UNLOCALIZED_RESOURCES_FOLDER_PATH \
+ && ${UNLOCALIZED_RESOURCES_FOLDER_PATH-_} ]]
+[[ ! $JAVA_FOLDER_PATH && ${JAVA_FOLDER_PATH-_} ]]
+[[ ! $FRAMEWORKS_FOLDER_PATH && ${FRAMEWORKS_FOLDER_PATH-_} ]]
+[[ ! $SHARED_FRAMEWORKS_FOLDER_PATH && ${SHARED_FRAMEWORKS_FOLDER_PATH-_} ]]
+[[ ! $SHARED_SUPPORT_FOLDER_PATH && ${SHARED_SUPPORT_FOLDER_PATH-_} ]]
+[[ ! $PLUGINS_FOLDER_PATH && ${PLUGINS_FOLDER_PATH-_} ]]
+[[ ! $XPCSERVICES_FOLDER_PATH && ${XPCSERVICES_FOLDER_PATH-_} ]]
+
+[[ ! $DYLIB_INSTALL_NAME_BASE && ${DYLIB_INSTALL_NAME_BASE-_} ]]
+[[ ! $LD_DYLIB_INSTALL_NAME && ${LD_DYLIB_INSTALL_NAME-_} ]]
+
+"$(dirname "$0")/test_check_sdkroot.sh"
diff --git a/third_party/python/gyp/test/mac/type_envvars/test_nonbundle_loadable_module.sh b/third_party/python/gyp/test/mac/type_envvars/test_nonbundle_loadable_module.sh
new file mode 100755
index 0000000000..b4c3ba976e
--- /dev/null
+++ b/third_party/python/gyp/test/mac/type_envvars/test_nonbundle_loadable_module.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -e
+
+test $MACH_O_TYPE = mh_bundle
+test $PRODUCT_TYPE = com.apple.product-type.library.dynamic
+test $PRODUCT_NAME = nonbundle_loadable_module
+test $FULL_PRODUCT_NAME = nonbundle_loadable_module.so
+
+test $EXECUTABLE_NAME = nonbundle_loadable_module.so
+test $EXECUTABLE_PATH = nonbundle_loadable_module.so
+[[ ! $WRAPPER_NAME && ${WRAPPER_NAME-_} ]]
+
+[[ ! $CONTENTS_FOLDER_PATH && ${CONTENTS_FOLDER_PATH-_} ]]
+[[ ! $EXECUTABLE_FOLDER_PATH && ${EXECUTABLE_FOLDER_PATH-_} ]]
+[[ ! $UNLOCALIZED_RESOURCES_FOLDER_PATH \
+ && ${UNLOCALIZED_RESOURCES_FOLDER_PATH-_} ]]
+[[ ! $JAVA_FOLDER_PATH && ${JAVA_FOLDER_PATH-_} ]]
+[[ ! $FRAMEWORKS_FOLDER_PATH && ${FRAMEWORKS_FOLDER_PATH-_} ]]
+[[ ! $SHARED_FRAMEWORKS_FOLDER_PATH && ${SHARED_FRAMEWORKS_FOLDER_PATH-_} ]]
+[[ ! $SHARED_SUPPORT_FOLDER_PATH && ${SHARED_SUPPORT_FOLDER_PATH-_} ]]
+[[ ! $PLUGINS_FOLDER_PATH && ${PLUGINS_FOLDER_PATH-_} ]]
+[[ ! $XPCSERVICES_FOLDER_PATH && ${XPCSERVICES_FOLDER_PATH-_} ]]
+
+test $DYLIB_INSTALL_NAME_BASE = "/usr/local/lib"
+test $LD_DYLIB_INSTALL_NAME = "/usr/local/lib/nonbundle_loadable_module.so"
+
+"$(dirname "$0")/test_check_sdkroot.sh"
diff --git a/third_party/python/gyp/test/mac/type_envvars/test_nonbundle_none.sh b/third_party/python/gyp/test/mac/type_envvars/test_nonbundle_none.sh
new file mode 100755
index 0000000000..e2dc7fd9cd
--- /dev/null
+++ b/third_party/python/gyp/test/mac/type_envvars/test_nonbundle_none.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -e
+
+# Check for "not set", not just "empty":
+[[ ! $MACH_O_TYPE && ${MACH_O_TYPE-_} ]]
+[[ ! $PRODUCT_TYPE && ${PRODUCT_TYPE-_} ]]
+test $PRODUCT_NAME = nonbundle_none
+[[ ! $FULL_PRODUCT_NAME && ${FULL_PRODUCT_NAME-_} ]]
+
+[[ ! $EXECUTABLE_NAME && ${EXECUTABLE_NAME-_} ]]
+[[ ! $EXECUTABLE_PATH && ${EXECUTABLE_PATH-_} ]]
+[[ ! $WRAPPER_NAME && ${WRAPPER_NAME-_} ]]
+
+[[ ! $CONTENTS_FOLDER_PATH && ${CONTENTS_FOLDER_PATH-_} ]]
+[[ ! $EXECUTABLE_FOLDER_PATH && ${EXECUTABLE_FOLDER_PATH-_} ]]
+[[ ! $UNLOCALIZED_RESOURCES_FOLDER_PATH \
+ && ${UNLOCALIZED_RESOURCES_FOLDER_PATH-_} ]]
+[[ ! $JAVA_FOLDER_PATH && ${JAVA_FOLDER_PATH-_} ]]
+[[ ! $FRAMEWORKS_FOLDER_PATH && ${FRAMEWORKS_FOLDER_PATH-_} ]]
+[[ ! $SHARED_FRAMEWORKS_FOLDER_PATH && ${SHARED_FRAMEWORKS_FOLDER_PATH-_} ]]
+[[ ! $SHARED_SUPPORT_FOLDER_PATH && ${SHARED_SUPPORT_FOLDER_PATH-_} ]]
+[[ ! $PLUGINS_FOLDER_PATH && ${PLUGINS_FOLDER_PATH-_} ]]
+[[ ! $XPCSERVICES_FOLDER_PATH && ${XPCSERVICES_FOLDER_PATH-_} ]]
+
+[[ ! $DYLIB_INSTALL_NAME_BASE && ${DYLIB_INSTALL_NAME_BASE-_} ]]
+[[ ! $LD_DYLIB_INSTALL_NAME && ${LD_DYLIB_INSTALL_NAME-_} ]]
+
+"$(dirname "$0")/test_check_sdkroot.sh"
diff --git a/third_party/python/gyp/test/mac/type_envvars/test_nonbundle_shared_library.sh b/third_party/python/gyp/test/mac/type_envvars/test_nonbundle_shared_library.sh
new file mode 100755
index 0000000000..ba63ec1a48
--- /dev/null
+++ b/third_party/python/gyp/test/mac/type_envvars/test_nonbundle_shared_library.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -e
+
+test $MACH_O_TYPE = mh_dylib
+test $PRODUCT_TYPE = com.apple.product-type.library.dynamic
+test $PRODUCT_NAME = nonbundle_shared_library
+test $FULL_PRODUCT_NAME = libnonbundle_shared_library.dylib
+
+test $EXECUTABLE_NAME = libnonbundle_shared_library.dylib
+test $EXECUTABLE_PATH = libnonbundle_shared_library.dylib
+[[ ! $WRAPPER_NAME && ${WRAPPER_NAME-_} ]]
+
+[[ ! $CONTENTS_FOLDER_PATH && ${CONTENTS_FOLDER_PATH-_} ]]
+[[ ! $EXECUTABLE_FOLDER_PATH && ${EXECUTABLE_FOLDER_PATH-_} ]]
+[[ ! $UNLOCALIZED_RESOURCES_FOLDER_PATH && \
+ ${UNLOCALIZED_RESOURCES_FOLDER_PATH-_} ]]
+[[ ! $JAVA_FOLDER_PATH && ${JAVA_FOLDER_PATH-_} ]]
+[[ ! $FRAMEWORKS_FOLDER_PATH && ${FRAMEWORKS_FOLDER_PATH-_} ]]
+[[ ! $SHARED_FRAMEWORKS_FOLDER_PATH && ${SHARED_FRAMEWORKS_FOLDER_PATH-_} ]]
+[[ ! $SHARED_SUPPORT_FOLDER_PATH && ${SHARED_SUPPORT_FOLDER_PATH-_} ]]
+[[ ! $PLUGINS_FOLDER_PATH && ${PLUGINS_FOLDER_PATH-_} ]]
+[[ ! $XPCSERVICES_FOLDER_PATH && ${XPCSERVICES_FOLDER_PATH-_} ]]
+
+test $DYLIB_INSTALL_NAME_BASE = "/usr/local/lib"
+test $LD_DYLIB_INSTALL_NAME = "/usr/local/lib/libnonbundle_shared_library.dylib"
+
+"$(dirname "$0")/test_check_sdkroot.sh"
diff --git a/third_party/python/gyp/test/mac/type_envvars/test_nonbundle_static_library.sh b/third_party/python/gyp/test/mac/type_envvars/test_nonbundle_static_library.sh
new file mode 100755
index 0000000000..63aac57b0b
--- /dev/null
+++ b/third_party/python/gyp/test/mac/type_envvars/test_nonbundle_static_library.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+set -e
+
+test $MACH_O_TYPE = staticlib
+test $PRODUCT_TYPE = com.apple.product-type.library.static
+test $PRODUCT_NAME = nonbundle_static_library
+test $FULL_PRODUCT_NAME = libnonbundle_static_library.a
+
+test $EXECUTABLE_NAME = libnonbundle_static_library.a
+test $EXECUTABLE_PATH = libnonbundle_static_library.a
+[[ ! $WRAPPER_NAME && ${WRAPPER_NAME-_} ]]
+
+[[ ! $CONTENTS_FOLDER_PATH && ${CONTENTS_FOLDER_PATH-_} ]]
+[[ ! $EXECUTABLE_FOLDER_PATH && ${EXECUTABLE_FOLDER_PATH-_} ]]
+[[ ! $UNLOCALIZED_RESOURCES_FOLDER_PATH && \
+ ${UNLOCALIZED_RESOURCES_FOLDER_PATH-_} ]]
+[[ ! $JAVA_FOLDER_PATH && ${JAVA_FOLDER_PATH-_} ]]
+[[ ! $FRAMEWORKS_FOLDER_PATH && ${FRAMEWORKS_FOLDER_PATH-_} ]]
+[[ ! $SHARED_FRAMEWORKS_FOLDER_PATH && ${SHARED_FRAMEWORKS_FOLDER_PATH-_} ]]
+[[ ! $SHARED_SUPPORT_FOLDER_PATH && ${SHARED_SUPPORT_FOLDER_PATH-_} ]]
+[[ ! $PLUGINS_FOLDER_PATH && ${PLUGINS_FOLDER_PATH-_} ]]
+[[ ! $XPCSERVICES_FOLDER_PATH && ${XPCSERVICES_FOLDER_PATH-_} ]]
+
+[[ ! $DYLIB_INSTALL_NAME_BASE && ${DYLIB_INSTALL_NAME_BASE-_} ]]
+[[ ! $LD_DYLIB_INSTALL_NAME && ${LD_DYLIB_INSTALL_NAME-_} ]]
+
+"$(dirname "$0")/test_check_sdkroot.sh"
diff --git a/third_party/python/gyp/test/mac/unicode-settings/file.cc b/third_party/python/gyp/test/mac/unicode-settings/file.cc
new file mode 100644
index 0000000000..b2f997621b
--- /dev/null
+++ b/third_party/python/gyp/test/mac/unicode-settings/file.cc
@@ -0,0 +1,2 @@
+int main() {
+}
diff --git a/third_party/python/gyp/test/mac/unicode-settings/test.gyp b/third_party/python/gyp/test/mac/unicode-settings/test.gyp
new file mode 100644
index 0000000000..b331ae453f
--- /dev/null
+++ b/third_party/python/gyp/test/mac/unicode-settings/test.gyp
@@ -0,0 +1,23 @@
+# Copyright 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'myapp',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [ 'file.cc', ],
+ 'xcode_settings': {
+ 'BUNDLE_DISPLAY_NAME': 'α\011',
+ },
+ 'postbuilds': [
+ {
+ 'postbuild_name': 'envtest',
+ 'action': [ './test_bundle_display_name.sh', ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/unicode-settings/test_bundle_display_name.sh b/third_party/python/gyp/test/mac/unicode-settings/test_bundle_display_name.sh
new file mode 100755
index 0000000000..95dd6267a3
--- /dev/null
+++ b/third_party/python/gyp/test/mac/unicode-settings/test_bundle_display_name.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+# Copyright 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+test "${BUNDLE_DISPLAY_NAME}" = 'α '
diff --git a/third_party/python/gyp/test/mac/xcode-env-order/Info.plist b/third_party/python/gyp/test/mac/xcode-env-order/Info.plist
new file mode 100644
index 0000000000..e11f21e52d
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcode-env-order/Info.plist
@@ -0,0 +1,56 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIconFile</key>
+ <string></string>
+ <key>CFBundleIdentifier</key>
+ <string>com.google.${PRODUCT_NAME}</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>APPL</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+ <key>LSMinimumSystemVersion</key>
+ <string>${MACOSX_DEPLOYMENT_TARGET}</string>
+ <key>NSMainNibFile</key>
+ <string>MainMenu</string>
+ <key>NSPrincipalClass</key>
+ <string>NSApplication</string>
+
+ <key>BraceProcessedKey1</key>
+ <string>${BRACE_DEPENDENT_KEY1}</string>
+ <key>BraceProcessedKey2</key>
+ <string>${BRACE_DEPENDENT_KEY2}</string>
+ <key>BraceProcessedKey3</key>
+ <string>${BRACE_DEPENDENT_KEY3}</string>
+
+ <key>ParenProcessedKey1</key>
+ <string>${PAREN_DEPENDENT_KEY1}</string>
+ <key>ParenProcessedKey2</key>
+ <string>${PAREN_DEPENDENT_KEY2}</string>
+ <key>ParenProcessedKey3</key>
+ <string>${PAREN_DEPENDENT_KEY3}</string>
+
+ <key>BareProcessedKey1</key>
+ <string>${BARE_DEPENDENT_KEY1}</string>
+ <key>BareProcessedKey2</key>
+ <string>${BARE_DEPENDENT_KEY2}</string>
+ <key>BareProcessedKey3</key>
+ <string>${BARE_DEPENDENT_KEY3}</string>
+
+ <key>MixedProcessedKey</key>
+ <string>${MIXED_DEPENDENT_KEY}</string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/mac/xcode-env-order/file.ext1 b/third_party/python/gyp/test/mac/xcode-env-order/file.ext1
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcode-env-order/file.ext1
diff --git a/third_party/python/gyp/test/mac/xcode-env-order/file.ext2 b/third_party/python/gyp/test/mac/xcode-env-order/file.ext2
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcode-env-order/file.ext2
diff --git a/third_party/python/gyp/test/mac/xcode-env-order/file.ext3 b/third_party/python/gyp/test/mac/xcode-env-order/file.ext3
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcode-env-order/file.ext3
diff --git a/third_party/python/gyp/test/mac/xcode-env-order/main.c b/third_party/python/gyp/test/mac/xcode-env-order/main.c
new file mode 100644
index 0000000000..1bf4b2a11a
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcode-env-order/main.c
@@ -0,0 +1,7 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/mac/xcode-env-order/test.gyp b/third_party/python/gyp/test/mac/xcode-env-order/test.gyp
new file mode 100644
index 0000000000..8f975f7d6b
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcode-env-order/test.gyp
@@ -0,0 +1,121 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'test_app',
+ 'product_name': 'Test',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'main.c',
+ 'file.ext1',
+ 'file.ext2',
+ 'file.ext3',
+ ],
+ # Env vars in copies.
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/${PRODUCT_NAME}-copy-brace',
+ 'files': [ 'main.c', ], # ${SOURCE_ROOT} doesn't work with xcode
+ },
+ {
+ 'destination': '<(PRODUCT_DIR)/$(PRODUCT_NAME)-copy-paren',
+ 'files': [ '$(SOURCE_ROOT)/main.c', ],
+ },
+ {
+ 'destination': '<(PRODUCT_DIR)/$PRODUCT_NAME-copy-bare',
+ 'files': [ 'main.c', ], # $SOURCE_ROOT doesn't work with xcode
+ },
+ ],
+ # Env vars in actions. The $FOO's are here to test that env vars that
+ # aren't defined are handled in some way that doesn't break the build.
+ 'actions': [
+ {
+ 'action_name': 'Action copy braces ${PRODUCT_NAME} ${FOO}',
+ 'description': 'Action copy braces ${PRODUCT_NAME} ${FOO}',
+ 'inputs': [ '${SOURCE_ROOT}/main.c' ],
+ # Referencing ${PRODUCT_NAME} in action outputs doesn't work with
+ # the Xcode generator (PRODUCT_NAME expands to "Test Support").
+ 'outputs': [ '<(PRODUCT_DIR)/action-copy-brace.txt' ],
+ 'action': [ 'cp', '${SOURCE_ROOT}/main.c',
+ '<(PRODUCT_DIR)/action-copy-brace.txt' ],
+ },
+ {
+ 'action_name': 'Action copy parens $(PRODUCT_NAME) $(FOO)',
+ 'description': 'Action copy parens $(PRODUCT_NAME) $(FOO)',
+ 'inputs': [ '$(SOURCE_ROOT)/main.c' ],
+ # Referencing $(PRODUCT_NAME) in action outputs doesn't work with
+ # the Xcode generator (PRODUCT_NAME expands to "Test Support").
+ 'outputs': [ '<(PRODUCT_DIR)/action-copy-paren.txt' ],
+ 'action': [ 'cp', '$(SOURCE_ROOT)/main.c',
+ '<(PRODUCT_DIR)/action-copy-paren.txt' ],
+ },
+ {
+ 'action_name': 'Action copy bare $PRODUCT_NAME $FOO',
+ 'description': 'Action copy bare $PRODUCT_NAME $FOO',
+ 'inputs': [ '$SOURCE_ROOT/main.c' ],
+ # Referencing $PRODUCT_NAME in action outputs doesn't work with
+ # the Xcode generator (PRODUCT_NAME expands to "Test Support").
+ 'outputs': [ '<(PRODUCT_DIR)/action-copy-bare.txt' ],
+ 'action': [ 'cp', '$SOURCE_ROOT/main.c',
+ '<(PRODUCT_DIR)/action-copy-bare.txt' ],
+ },
+ ],
+ # Env vars in xcode_settings.
+ 'xcode_settings': {
+ 'INFOPLIST_FILE': 'Info.plist',
+ 'STRING_KEY': '/Source/Project',
+
+ 'BRACE_DEPENDENT_KEY2': '${STRING_KEY}/${PRODUCT_NAME}',
+ 'BRACE_DEPENDENT_KEY1': 'D:${BRACE_DEPENDENT_KEY2}',
+ 'BRACE_DEPENDENT_KEY3': '${PRODUCT_TYPE}:${BRACE_DEPENDENT_KEY1}',
+
+ 'PAREN_DEPENDENT_KEY2': '$(STRING_KEY)/$(PRODUCT_NAME)',
+ 'PAREN_DEPENDENT_KEY1': 'D:$(PAREN_DEPENDENT_KEY2)',
+ 'PAREN_DEPENDENT_KEY3': '$(PRODUCT_TYPE):$(PAREN_DEPENDENT_KEY1)',
+
+ 'BARE_DEPENDENT_KEY2': '$STRING_KEY/$PRODUCT_NAME',
+ 'BARE_DEPENDENT_KEY1': 'D:$BARE_DEPENDENT_KEY2',
+ 'BARE_DEPENDENT_KEY3': '$PRODUCT_TYPE:$BARE_DEPENDENT_KEY1',
+
+ 'MIXED_DEPENDENT_KEY': '${STRING_KEY}:$(PRODUCT_NAME):$MACH_O_TYPE',
+ },
+ # Env vars in rules. The $FOO's are here to test that env vars that
+ # aren't defined are handled in some way that doesn't break the build.
+ 'rules': [
+ {
+ 'rule_name': 'brace_rule',
+ 'message': 'Rule braces ${PRODUCT_NAME} ${FOO} <(RULE_INPUT_NAME)',
+ 'extension': 'ext1',
+ 'inputs': [ '${SOURCE_ROOT}/main.c' ],
+ 'outputs': [ '<(PRODUCT_DIR)/rule-copy-brace.txt' ],
+ 'action': [ 'cp', '${SOURCE_ROOT}/main.c',
+ '<(PRODUCT_DIR)/rule-copy-brace.txt' ],
+ },
+ {
+ 'rule_name': 'paren_rule',
+ 'message': 'Rule parens $(PRODUCT_NAME) $(FOO) <(RULE_INPUT_NAME)',
+ 'extension': 'ext2',
+ 'inputs': [ '$(SOURCE_ROOT)/main.c' ],
+ 'outputs': [ '<(PRODUCT_DIR)/rule-copy-paren.txt' ],
+ 'action': [ 'cp', '$(SOURCE_ROOT)/main.c',
+ '<(PRODUCT_DIR)/rule-copy-paren.txt' ],
+ },
+ # TODO: Fails in xcode. Looks like a bug in the xcode generator though
+ # (which uses makefiles for rules, and thinks $PRODUCT_NAME is
+ # $(P)RODUCT_NAME).
+ #{
+ # 'rule_name': 'bare_rule',
+ # 'message': 'Rule copy bare $PRODUCT_NAME $FOO',
+ # 'extension': 'ext3',
+ # 'inputs': [ '$SOURCE_ROOT/main.c' ],
+ # 'outputs': [ '<(PRODUCT_DIR)/rule-copy-bare.txt' ],
+ # 'action': [ 'cp', '$SOURCE_ROOT/main.c',
+ # '<(PRODUCT_DIR)/rule-copy-bare.txt' ],
+ #},
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/xcode-gcc/aliasing.cc b/third_party/python/gyp/test/mac/xcode-gcc/aliasing.cc
new file mode 100644
index 0000000000..16a41efb15
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcode-gcc/aliasing.cc
@@ -0,0 +1,13 @@
+#include <stdio.h>
+
+void check(int* h, long* k) {
+ *h = 1;
+ *k = 0;
+ printf("%d\n", *h);
+}
+
+int main(void) {
+ long k;
+ check((int*)&k, &k);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/mac/xcode-gcc/test-clang.gyp b/third_party/python/gyp/test/mac/xcode-gcc/test-clang.gyp
new file mode 100644
index 0000000000..9f4a98ae73
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcode-gcc/test-clang.gyp
@@ -0,0 +1,42 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'make_global_settings': [
+ ['CC', '/usr/bin/clang'],
+ ['CXX', '/usr/bin/clang++'],
+ ],
+
+ 'targets': [
+ {
+ 'target_name': 'aliasing_yes',
+ 'type': 'executable',
+ 'sources': [ 'aliasing.cc', ],
+ 'xcode_settings': {
+ 'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
+ 'GCC_STRICT_ALIASING': 'YES',
+ 'GCC_OPTIMIZATION_LEVEL': 2,
+ },
+ },
+ {
+ 'target_name': 'aliasing_no',
+ 'type': 'executable',
+ 'sources': [ 'aliasing.cc', ],
+ 'xcode_settings': {
+ 'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
+ 'GCC_STRICT_ALIASING': 'NO',
+ 'GCC_OPTIMIZATION_LEVEL': 2,
+ },
+ },
+ {
+ 'target_name': 'aliasing_default',
+ 'type': 'executable',
+ 'sources': [ 'aliasing.cc', ],
+ 'xcode_settings': {
+ 'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
+ 'GCC_OPTIMIZATION_LEVEL': 2,
+ },
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/mac/xcode-gcc/test.gyp b/third_party/python/gyp/test/mac/xcode-gcc/test.gyp
new file mode 100644
index 0000000000..1ca8b215d8
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcode-gcc/test.gyp
@@ -0,0 +1,60 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'target_defaults': {
+ 'xcode_settings': {
+ 'GCC_TREAT_WARNINGS_AS_ERRORS': 'YES',
+ },
+ },
+
+ 'variables': {
+ # Non-failing tests should check that these trivial files in every language
+ # still compile correctly.
+ 'valid_sources': [
+ 'valid_c.c',
+ 'valid_cc.cc',
+ 'valid_m.m',
+ 'valid_mm.mm',
+ ],
+ },
+
+ # Targets come in pairs: 'foo' and 'foo-fail', with the former building with
+ # no warnings and the latter not.
+ 'targets': [
+ # GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO (default: YES):
+ {
+ 'target_name': 'warn_about_invalid_offsetof_macro',
+ 'type': 'executable',
+ 'sources': [
+ 'warn_about_invalid_offsetof_macro.cc',
+ '<@(valid_sources)',
+ ],
+ 'xcode_settings': {
+ 'GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO': 'NO',
+ },
+ },
+ {
+ 'target_name': 'warn_about_invalid_offsetof_macro-fail',
+ 'type': 'executable',
+ 'sources': [ 'warn_about_invalid_offsetof_macro.cc', ],
+ },
+ # GCC_WARN_ABOUT_MISSING_NEWLINE (default: NO):
+ {
+ 'target_name': 'warn_about_missing_newline',
+ 'type': 'executable',
+ 'sources': [
+ 'warn_about_missing_newline.c',
+ '<@(valid_sources)',
+ ],
+ },
+ {
+ 'target_name': 'warn_about_missing_newline-fail',
+ 'type': 'executable',
+ 'sources': [ 'warn_about_missing_newline.c', ],
+ 'xcode_settings': {
+ 'GCC_WARN_ABOUT_MISSING_NEWLINE': 'YES',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/xcode-gcc/valid_c.c b/third_party/python/gyp/test/mac/xcode-gcc/valid_c.c
new file mode 100644
index 0000000000..2b10ac3ed7
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcode-gcc/valid_c.c
@@ -0,0 +1,8 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file exists to test that valid C files compile correctly.
+
+void FunctionInCFile(void) {
+}
diff --git a/third_party/python/gyp/test/mac/xcode-gcc/valid_cc.cc b/third_party/python/gyp/test/mac/xcode-gcc/valid_cc.cc
new file mode 100644
index 0000000000..31cddc3c9c
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcode-gcc/valid_cc.cc
@@ -0,0 +1,8 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file exists to test that valid C++ files compile correctly.
+
+void FunctionInCCFile() {
+}
diff --git a/third_party/python/gyp/test/mac/xcode-gcc/valid_m.m b/third_party/python/gyp/test/mac/xcode-gcc/valid_m.m
new file mode 100644
index 0000000000..95bddb2723
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcode-gcc/valid_m.m
@@ -0,0 +1,8 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file exists to test that valid Objective-C files compile correctly.
+
+void FunctionInMFile(void) {
+}
diff --git a/third_party/python/gyp/test/mac/xcode-gcc/valid_mm.mm b/third_party/python/gyp/test/mac/xcode-gcc/valid_mm.mm
new file mode 100644
index 0000000000..a7db7e3ad6
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcode-gcc/valid_mm.mm
@@ -0,0 +1,8 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file exists to test that valid Objective-C++ files compile correctly.
+
+void FunctionInMMFile() {
+}
diff --git a/third_party/python/gyp/test/mac/xcode-gcc/warn_about_invalid_offsetof_macro.cc b/third_party/python/gyp/test/mac/xcode-gcc/warn_about_invalid_offsetof_macro.cc
new file mode 100644
index 0000000000..4a4612be0d
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcode-gcc/warn_about_invalid_offsetof_macro.cc
@@ -0,0 +1,15 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#define offsetof(st, m) ((unsigned)((char*)&((st*)0)->m - (char*)0))
+
+struct MyStruct {
+ virtual void MyFunc() = 0;
+ int my_member;
+};
+
+int main() {
+ unsigned x = offsetof(MyStruct, my_member);
+ return x ? 0 : 1;
+}
diff --git a/third_party/python/gyp/test/mac/xcode-gcc/warn_about_missing_newline.c b/third_party/python/gyp/test/mac/xcode-gcc/warn_about_missing_newline.c
new file mode 100644
index 0000000000..6faf0895db
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcode-gcc/warn_about_missing_newline.c
@@ -0,0 +1,8 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Important: Don't terminate this file with a newline.
+int main() {
+ return 0;
+} \ No newline at end of file
diff --git a/third_party/python/gyp/test/mac/xcode-support-actions/source.c b/third_party/python/gyp/test/mac/xcode-support-actions/source.c
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcode-support-actions/source.c
diff --git a/third_party/python/gyp/test/mac/xcode-support-actions/test.gyp b/third_party/python/gyp/test/mac/xcode-support-actions/test.gyp
new file mode 100644
index 0000000000..ad81b8c456
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcode-support-actions/test.gyp
@@ -0,0 +1,26 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'target',
+ 'product_name': 'Product',
+ 'type': 'shared_library',
+ 'mac_bundle': 1,
+ 'sources': [
+ '<(PRODUCT_DIR)/copy.c',
+ ],
+ 'actions': [
+ {
+ 'action_name': 'Helper',
+ 'description': 'Helps',
+ 'inputs': [ 'source.c' ],
+ 'outputs': [ '<(PRODUCT_DIR)/copy.c' ],
+ 'action': [ 'cp', '${SOURCE_ROOT}/source.c',
+ '<(PRODUCT_DIR)/copy.c' ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/mac/xctest/MyClass.h b/third_party/python/gyp/test/mac/xctest/MyClass.h
new file mode 100644
index 0000000000..dde13aa33d
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xctest/MyClass.h
@@ -0,0 +1,8 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <Foundation/Foundation.h>
+
+@interface MyClass : NSObject
+@end
diff --git a/third_party/python/gyp/test/mac/xctest/MyClass.m b/third_party/python/gyp/test/mac/xctest/MyClass.m
new file mode 100644
index 0000000000..df11471b07
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xctest/MyClass.m
@@ -0,0 +1,8 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "MyClass.h"
+
+@implementation MyClass
+@end
diff --git a/third_party/python/gyp/test/mac/xctest/TestCase.m b/third_party/python/gyp/test/mac/xctest/TestCase.m
new file mode 100644
index 0000000000..36846a1fda
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xctest/TestCase.m
@@ -0,0 +1,16 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <XCTest/XCTest.h>
+#import "MyClass.h"
+
+@interface TestCase : XCTestCase
+@end
+
+@implementation TestCase
+- (void)testFoo {
+ MyClass *foo = [[MyClass alloc] init];
+ XCTAssertNotNil(foo, @"expected non-nil object");
+}
+@end
diff --git a/third_party/python/gyp/test/mac/xctest/resource.txt b/third_party/python/gyp/test/mac/xctest/resource.txt
new file mode 100644
index 0000000000..257cc5642c
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xctest/resource.txt
@@ -0,0 +1 @@
+foo
diff --git a/third_party/python/gyp/test/mac/xctest/test.gyp b/third_party/python/gyp/test/mac/xctest/test.gyp
new file mode 100644
index 0000000000..ac25656b35
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xctest/test.gyp
@@ -0,0 +1,47 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'classes',
+ 'type': 'static_library',
+ 'sources': [
+ 'MyClass.h',
+ 'MyClass.m',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ ],
+ },
+ },
+ {
+ 'target_name': 'tests',
+ 'type': 'loadable_module',
+ 'mac_xctest_bundle': 1,
+ 'sources': [
+ 'TestCase.m',
+ ],
+ 'dependencies': [
+ 'classes',
+ ],
+ 'mac_bundle_resources': [
+ 'resource.txt',
+ ],
+ 'xcode_settings': {
+ 'WRAPPER_EXTENSION': 'xctest',
+ 'FRAMEWORK_SEARCH_PATHS': [
+ '$(inherited)',
+ '$(DEVELOPER_FRAMEWORKS_DIR)',
+ ],
+ 'OTHER_LDFLAGS': [
+ '$(inherited)',
+ '-ObjC',
+ ],
+ },
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/mac/xctest/test.xcodeproj/xcshareddata/xcschemes/classes.xcscheme b/third_party/python/gyp/test/mac/xctest/test.xcodeproj/xcshareddata/xcschemes/classes.xcscheme
new file mode 100644
index 0000000000..6bd1bb9696
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xctest/test.xcodeproj/xcshareddata/xcschemes/classes.xcscheme
@@ -0,0 +1,69 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<Scheme
+ LastUpgradeVersion = "0500"
+ version = "1.3">
+ <BuildAction
+ parallelizeBuildables = "YES"
+ buildImplicitDependencies = "YES">
+ <BuildActionEntries>
+ <BuildActionEntry
+ buildForTesting = "YES"
+ buildForRunning = "YES"
+ buildForProfiling = "YES"
+ buildForArchiving = "YES"
+ buildForAnalyzing = "YES">
+ <BuildableReference
+ BuildableIdentifier = "primary"
+ BlueprintIdentifier = "D3B79173B4570A3C70A902FF"
+ BuildableName = "libclasses.a"
+ BlueprintName = "classes"
+ ReferencedContainer = "container:test.xcodeproj">
+ </BuildableReference>
+ </BuildActionEntry>
+ </BuildActionEntries>
+ </BuildAction>
+ <TestAction
+ selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
+ selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
+ shouldUseLaunchSchemeArgsEnv = "YES"
+ buildConfiguration = "Default">
+ <Testables>
+ <TestableReference
+ skipped = "NO">
+ <BuildableReference
+ BuildableIdentifier = "primary"
+ BlueprintIdentifier = "2ACDAB234B9E5D65CACBCF9C"
+ BuildableName = "tests.xctest"
+ BlueprintName = "tests"
+ ReferencedContainer = "container:test.xcodeproj">
+ </BuildableReference>
+ </TestableReference>
+ </Testables>
+ </TestAction>
+ <LaunchAction
+ selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
+ selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
+ launchStyle = "0"
+ useCustomWorkingDirectory = "NO"
+ buildConfiguration = "Default"
+ ignoresPersistentStateOnLaunch = "NO"
+ debugDocumentVersioning = "YES"
+ allowLocationSimulation = "YES">
+ <AdditionalOptions>
+ </AdditionalOptions>
+ </LaunchAction>
+ <ProfileAction
+ shouldUseLaunchSchemeArgsEnv = "YES"
+ savedToolIdentifier = ""
+ useCustomWorkingDirectory = "NO"
+ buildConfiguration = "Default"
+ debugDocumentVersioning = "YES">
+ </ProfileAction>
+ <AnalyzeAction
+ buildConfiguration = "Default">
+ </AnalyzeAction>
+ <ArchiveAction
+ buildConfiguration = "Default"
+ revealArchiveInOrganizer = "YES">
+ </ArchiveAction>
+</Scheme>
diff --git a/third_party/python/gyp/test/mac/xcuitest/Info.plist b/third_party/python/gyp/test/mac/xcuitest/Info.plist
new file mode 100644
index 0000000000..ae8852b836
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcuitest/Info.plist
@@ -0,0 +1,28 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>CFBundleDevelopmentRegion</key>
+ <string>English</string>
+ <key>CFBundleExecutable</key>
+ <string>${EXECUTABLE_NAME}</string>
+ <key>CFBundleIconFile</key>
+ <string></string>
+ <key>CFBundleIdentifier</key>
+ <string>com.yourcompany.${PRODUCT_NAME}</string>
+ <key>CFBundleInfoDictionaryVersion</key>
+ <string>6.0</string>
+ <key>CFBundleName</key>
+ <string>${PRODUCT_NAME}</string>
+ <key>CFBundlePackageType</key>
+ <string>BNDL</string>
+ <key>CFBundleShortVersionString</key>
+ <string>1.0</string>
+ <key>CFBundleSignature</key>
+ <string>????</string>
+ <key>CFBundleVersion</key>
+ <string>1</string>
+ <key>NSPrincipalClass</key>
+ <string></string>
+</dict>
+</plist>
diff --git a/third_party/python/gyp/test/mac/xcuitest/MyAppDelegate.h b/third_party/python/gyp/test/mac/xcuitest/MyAppDelegate.h
new file mode 100644
index 0000000000..445be2cb42
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcuitest/MyAppDelegate.h
@@ -0,0 +1,8 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <UIKit/UIKit.h>
+
+@interface MyAppDelegate : NSObject<UIApplicationDelegate>
+@end
diff --git a/third_party/python/gyp/test/mac/xcuitest/MyAppDelegate.m b/third_party/python/gyp/test/mac/xcuitest/MyAppDelegate.m
new file mode 100644
index 0000000000..6ad60fa9d4
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcuitest/MyAppDelegate.m
@@ -0,0 +1,19 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import "MyAppDelegate.h"
+
+
+@implementation MyAppDelegate
+@synthesize window;
+
+- (BOOL)application:(UIApplication *)application
+ didFinishLaunchingWithOptions:(NSDictionary *)launchOptions {
+ self.window = [[UIWindow alloc] init];
+ self.window.rootViewController = [[UIViewController alloc] init];
+ [self.window makeKeyAndVisible];
+ return YES;
+}
+
+@end
diff --git a/third_party/python/gyp/test/mac/xcuitest/TestCase.m b/third_party/python/gyp/test/mac/xcuitest/TestCase.m
new file mode 100644
index 0000000000..1f32b7af74
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcuitest/TestCase.m
@@ -0,0 +1,15 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <XCTest/XCTest.h>
+
+@interface TestCase : XCTestCase
+@end
+
+@implementation TestCase
+- (void)testFoo {
+ XCUIApplication *foo = [[XCUIApplication alloc] init];
+ XCTAssertNotNil(foo, @"expected non-nil object");
+}
+@end
diff --git a/third_party/python/gyp/test/mac/xcuitest/main.m b/third_party/python/gyp/test/mac/xcuitest/main.m
new file mode 100644
index 0000000000..e7cb62e639
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcuitest/main.m
@@ -0,0 +1,15 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#import <UIKit/UIKit.h>
+
+#import "MyAppDelegate.h"
+
+int main(int argc, char * argv[]) {
+ @autoreleasepool {
+ UIApplicationMain(argc, argv,
+ nil, NSStringFromClass([MyAppDelegate class]));
+ }
+ return 1;
+}
diff --git a/third_party/python/gyp/test/mac/xcuitest/resource.txt b/third_party/python/gyp/test/mac/xcuitest/resource.txt
new file mode 100644
index 0000000000..257cc5642c
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcuitest/resource.txt
@@ -0,0 +1 @@
+foo
diff --git a/third_party/python/gyp/test/mac/xcuitest/test.gyp b/third_party/python/gyp/test/mac/xcuitest/test.gyp
new file mode 100644
index 0000000000..80cdf9032d
--- /dev/null
+++ b/third_party/python/gyp/test/mac/xcuitest/test.gyp
@@ -0,0 +1,69 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ 'xcode_settings': {
+ 'SDKROOT': 'iphoneos',
+ 'FRAMEWORK_SEARCH_PATHS': [
+ '$(inherited)',
+ '$(DEVELOPER_FRAMEWORKS_DIR)',
+ ],
+ 'OTHER_LDFLAGS': [
+ '$(inherited)',
+ '-ObjC',
+ ],
+ 'GCC_PREFIX_HEADER': '',
+ 'CLANG_ENABLE_OBJC_ARC': 'YES',
+ 'INFOPLIST_FILE': 'Info.plist',
+ },
+ },
+ 'targets': [
+ {
+ 'target_name': 'testApp',
+ 'type': 'executable',
+ 'mac_bundle': 1,
+ 'sources': [
+ 'MyAppDelegate.h',
+ 'MyAppDelegate.m',
+ 'main.m',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
+ '$(SDKROOT)/System/Library/Frameworks/UIKit.framework',
+ ],
+ },
+ },
+ {
+ 'target_name': 'tests',
+ 'type': 'loadable_module',
+ 'mac_bundle': 1,
+ 'mac_xcuitest_bundle': 1,
+ 'sources': [
+ 'TestCase.m',
+ ],
+ 'dependencies': [
+ 'testApp',
+ ],
+ 'mac_bundle_resources': [
+ 'resource.txt',
+ ],
+ 'variables': {
+ # This must *not* be set for xctest ui tests.
+ 'xctest_host': '',
+ },
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/XCTest.framework',
+ ]
+ },
+ 'xcode_settings': {
+ 'WRAPPER_EXTENSION': 'xctest',
+ 'TEST_TARGET_NAME': 'testApp',
+ },
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/make/dependencies.gyp b/third_party/python/gyp/test/make/dependencies.gyp
new file mode 100644
index 0000000000..e2bee24fce
--- /dev/null
+++ b/third_party/python/gyp/test/make/dependencies.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'main',
+ 'type': 'executable',
+ 'sources': [
+ 'main.cc',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/make/gyptest-dependencies.py b/third_party/python/gyp/test/make/gyptest-dependencies.py
new file mode 100755
index 0000000000..d215f76782
--- /dev/null
+++ b/third_party/python/gyp/test/make/gyptest-dependencies.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that .d files and all.deps are properly generated.
+"""
+
+import TestGyp
+
+# .d files are only used by the make build.
+test = TestGyp.TestGyp(formats=['make'])
+
+test.run_gyp('dependencies.gyp')
+
+test.build('dependencies.gyp', test.ALL)
+
+deps_file = test.built_file_path(".deps/out/Default/obj.target/main/main.o.d")
+test.must_contain(deps_file, "main.h")
+
+# Build a second time to make sure we generate all.deps.
+test.build('dependencies.gyp', test.ALL)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/make/gyptest-noload.py b/third_party/python/gyp/test/make/gyptest-noload.py
new file mode 100755
index 0000000000..1f5103315c
--- /dev/null
+++ b/third_party/python/gyp/test/make/gyptest-noload.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Tests the use of the NO_LOAD flag which makes loading sub .mk files
+optional.
+"""
+
+# Python 2.5 needs this for the with statement.
+from __future__ import with_statement
+
+import os
+import TestGyp
+
+test = TestGyp.TestGyp(formats=['make'])
+
+test.run_gyp('all.gyp', chdir='noload')
+
+test.relocate('noload', 'relocate/noload')
+
+test.build('build/all.gyp', test.ALL, chdir='relocate/noload')
+test.run_built_executable('exe', chdir='relocate/noload',
+ stdout='Hello from shared.c.\n')
+
+# Just sanity test that NO_LOAD=lib doesn't break anything.
+test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
+ arguments=['NO_LOAD=lib'])
+test.run_built_executable('exe', chdir='relocate/noload',
+ stdout='Hello from shared.c.\n')
+test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
+ arguments=['NO_LOAD=z'])
+test.run_built_executable('exe', chdir='relocate/noload',
+ stdout='Hello from shared.c.\n')
+
+# Make sure we can rebuild without reloading the sub .mk file.
+with open('relocate/noload/main.c', 'a') as src_file:
+ src_file.write("\n")
+test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
+ arguments=['NO_LOAD=lib'])
+test.run_built_executable('exe', chdir='relocate/noload',
+ stdout='Hello from shared.c.\n')
+
+# Change shared.c, but verify that it doesn't get rebuild if we don't load it.
+with open('relocate/noload/lib/shared.c', 'w') as shared_file:
+ shared_file.write(
+ '#include "shared.h"\n'
+ 'const char kSharedStr[] = "modified";\n'
+ )
+test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
+ arguments=['NO_LOAD=lib'])
+test.run_built_executable('exe', chdir='relocate/noload',
+ stdout='Hello from shared.c.\n')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/make/main.cc b/third_party/python/gyp/test/make/main.cc
new file mode 100644
index 0000000000..3b9a705c24
--- /dev/null
+++ b/third_party/python/gyp/test/make/main.cc
@@ -0,0 +1,12 @@
+/* Copyright (c) 2009 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#include <stdio.h>
+
+#include "main.h"
+
+int main(void) {
+ printf("hello world\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/make/main.h b/third_party/python/gyp/test/make/main.h
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/make/main.h
diff --git a/third_party/python/gyp/test/make/noload/all.gyp b/third_party/python/gyp/test/make/noload/all.gyp
new file mode 100644
index 0000000000..1617a9e97c
--- /dev/null
+++ b/third_party/python/gyp/test/make/noload/all.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'exe',
+ 'type': 'executable',
+ 'sources': [
+ 'main.c',
+ ],
+ 'dependencies': [
+ 'lib/shared.gyp:shared',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/make/noload/lib/shared.c b/third_party/python/gyp/test/make/noload/lib/shared.c
new file mode 100644
index 0000000000..51776c5acf
--- /dev/null
+++ b/third_party/python/gyp/test/make/noload/lib/shared.c
@@ -0,0 +1,3 @@
+#include "shared.h"
+
+const char kSharedStr[] = "shared.c";
diff --git a/third_party/python/gyp/test/make/noload/lib/shared.gyp b/third_party/python/gyp/test/make/noload/lib/shared.gyp
new file mode 100644
index 0000000000..8a8841b3a0
--- /dev/null
+++ b/third_party/python/gyp/test/make/noload/lib/shared.gyp
@@ -0,0 +1,16 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'shared',
+ 'type': 'shared_library',
+ 'sources': [
+ 'shared.c',
+ 'shared.h',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/make/noload/lib/shared.h b/third_party/python/gyp/test/make/noload/lib/shared.h
new file mode 100644
index 0000000000..a21da7538b
--- /dev/null
+++ b/third_party/python/gyp/test/make/noload/lib/shared.h
@@ -0,0 +1 @@
+extern const char kSharedStr[];
diff --git a/third_party/python/gyp/test/make/noload/main.c b/third_party/python/gyp/test/make/noload/main.c
new file mode 100644
index 0000000000..26ec1889ad
--- /dev/null
+++ b/third_party/python/gyp/test/make/noload/main.c
@@ -0,0 +1,9 @@
+#include <stdio.h>
+
+#include "lib/shared.h"
+
+int main(void)
+{
+ printf("Hello from %s.\n", kSharedStr);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/make_global_settings/ar/gyptest-make_global_settings_ar.py b/third_party/python/gyp/test/make_global_settings/ar/gyptest-make_global_settings_ar.py
new file mode 100644
index 0000000000..aabc5618d5
--- /dev/null
+++ b/third_party/python/gyp/test/make_global_settings/ar/gyptest-make_global_settings_ar.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies 'AR' in make_global_settings.
+"""
+
+import os
+import sys
+import TestGyp
+
+def resolve_path(test, path):
+ if path is None:
+ return None
+ elif test.format == 'make':
+ return '$(abspath %s)' % path
+ elif test.format in ['ninja', 'xcode-ninja']:
+ return os.path.join('..', '..', path)
+ else:
+ test.fail_test()
+
+
+def verify_ar_target(test, ar=None, rel_path=False):
+ if rel_path:
+ ar_expected = resolve_path(test, ar)
+ else:
+ ar_expected = ar
+ # Resolve default values
+ if ar_expected is None:
+ if test.format == 'make':
+ # Make generator hasn't set the default value for AR.
+ # You can remove the following assertion as long as it doesn't
+ # break existing projects.
+ test.must_not_contain('Makefile', 'AR ?= ')
+ return
+ elif test.format in ['ninja', 'xcode-ninja']:
+ if sys.platform == 'win32':
+ ar_expected = 'lib.exe'
+ else:
+ ar_expected = 'ar'
+ if test.format == 'make':
+ test.must_contain('Makefile', 'AR ?= %s' % ar_expected)
+ elif test.format in ['ninja', 'xcode-ninja']:
+ test.must_contain('out/Default/build.ninja', 'ar = %s' % ar_expected)
+ else:
+ test.fail_test()
+
+
+def verify_ar_host(test, ar=None, rel_path=False):
+ if rel_path:
+ ar_expected = resolve_path(test, ar)
+ else:
+ ar_expected = ar
+ # Resolve default values
+ if ar_expected is None:
+ if sys.platform == 'win32':
+ ar_expected = 'lib.exe'
+ else:
+ ar_expected = 'ar'
+ if test.format == 'make':
+ test.must_contain('Makefile', 'AR.host ?= %s' % ar_expected)
+ elif test.format in ['ninja', 'xcode-ninja']:
+ test.must_contain('out/Default/build.ninja', 'ar_host = %s' % ar_expected)
+ else:
+ test.fail_test()
+
+
+test_format = ['ninja']
+if sys.platform.startswith('linux') or sys.platform == 'darwin':
+ test_format += ['make']
+
+test = TestGyp.TestGyp(formats=test_format)
+
+# Check default values
+test.run_gyp('make_global_settings_ar.gyp')
+verify_ar_target(test)
+
+
+# Check default values with GYP_CROSSCOMPILE enabled.
+with TestGyp.LocalEnv({'GYP_CROSSCOMPILE': '1'}):
+ test.run_gyp('make_global_settings_ar.gyp')
+verify_ar_target(test)
+verify_ar_host(test)
+
+
+# Test 'AR' in 'make_global_settings'.
+with TestGyp.LocalEnv({'GYP_CROSSCOMPILE': '1'}):
+ test.run_gyp('make_global_settings_ar.gyp', '-Dcustom_ar_target=my_ar')
+verify_ar_target(test, ar='my_ar', rel_path=True)
+
+
+# Test 'AR'/'AR.host' in 'make_global_settings'.
+with TestGyp.LocalEnv({'GYP_CROSSCOMPILE': '1'}):
+ test.run_gyp('make_global_settings_ar.gyp',
+ '-Dcustom_ar_target=my_ar_target1',
+ '-Dcustom_ar_host=my_ar_host1')
+verify_ar_target(test, ar='my_ar_target1', rel_path=True)
+verify_ar_host(test, ar='my_ar_host1', rel_path=True)
+
+
+# Test $AR and $AR_host environment variables.
+with TestGyp.LocalEnv({'AR': 'my_ar_target2',
+ 'AR_host': 'my_ar_host2'}):
+ test.run_gyp('make_global_settings_ar.gyp')
+# Ninja generator resolves $AR in gyp phase. Make generator doesn't.
+if test.format == 'ninja':
+ if sys.platform == 'win32':
+ # TODO(yukawa): Make sure if this is an expected result or not.
+ verify_ar_target(test, ar='lib.exe', rel_path=False)
+ else:
+ verify_ar_target(test, ar='my_ar_target2', rel_path=False)
+verify_ar_host(test, ar='my_ar_host2', rel_path=False)
+
+
+# Test 'AR' in 'make_global_settings' with $AR_host environment variable.
+with TestGyp.LocalEnv({'AR_host': 'my_ar_host3'}):
+ test.run_gyp('make_global_settings_ar.gyp',
+ '-Dcustom_ar_target=my_ar_target3')
+verify_ar_target(test, ar='my_ar_target3', rel_path=True)
+verify_ar_host(test, ar='my_ar_host3', rel_path=False)
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/make_global_settings/ar/make_global_settings_ar.gyp b/third_party/python/gyp/test/make_global_settings/ar/make_global_settings_ar.gyp
new file mode 100644
index 0000000000..3430d82a51
--- /dev/null
+++ b/third_party/python/gyp/test/make_global_settings/ar/make_global_settings_ar.gyp
@@ -0,0 +1,29 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style licence that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'custom_ar_target%': '',
+ 'custom_ar_host%': '',
+ },
+ 'conditions': [
+ ['"<(custom_ar_target)"!=""', {
+ 'make_global_settings': [
+ ['AR', '<(custom_ar_target)'],
+ ],
+ }],
+ ['"<(custom_ar_host)"!=""', {
+ 'make_global_settings': [
+ ['AR.host', '<(custom_ar_host)'],
+ ],
+ }],
+ ],
+ 'targets': [
+ {
+ 'target_name': 'make_global_settings_ar_test',
+ 'type': 'static_library',
+ 'sources': [ 'foo.c' ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/make_global_settings/basics/gyptest-make_global_settings.py b/third_party/python/gyp/test/make_global_settings/basics/gyptest-make_global_settings.py
new file mode 100644
index 0000000000..8f48875967
--- /dev/null
+++ b/third_party/python/gyp/test/make_global_settings/basics/gyptest-make_global_settings.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies make_global_settings.
+"""
+
+from __future__ import print_function
+
+import os
+import sys
+import TestGyp
+
+print("This test is currently disabled: https://crbug.com/483696.")
+sys.exit(0)
+
+test_format = ['ninja']
+if sys.platform.startswith('linux') or sys.platform == 'darwin':
+ test_format += ['make']
+
+test = TestGyp.TestGyp(formats=test_format)
+
+test.run_gyp('make_global_settings.gyp')
+
+if test.format == 'make':
+ cc_expected = """ifneq (,$(filter $(origin CC), undefined default))
+ CC = $(abspath clang)
+endif
+"""
+ if sys.platform.startswith('linux'):
+ link_expected = """
+LINK ?= $(abspath clang)
+"""
+ elif sys.platform == 'darwin':
+ link_expected = """
+LINK ?= $(abspath clang)
+"""
+ test.must_contain('Makefile', cc_expected)
+ test.must_contain('Makefile', link_expected)
+if test.format == 'ninja':
+ cc_expected = 'cc = ' + os.path.join('..', '..', 'clang')
+ ld_expected = 'ld = $cc'
+ if sys.platform == 'win32':
+ ld_expected = 'link.exe'
+ test.must_contain('out/Default/build.ninja', cc_expected)
+ test.must_contain('out/Default/build.ninja', ld_expected)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/make_global_settings/basics/make_global_settings.gyp b/third_party/python/gyp/test/make_global_settings/basics/make_global_settings.gyp
new file mode 100644
index 0000000000..47dbc8570f
--- /dev/null
+++ b/third_party/python/gyp/test/make_global_settings/basics/make_global_settings.gyp
@@ -0,0 +1,17 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style licence that can be
+# found in the LICENSE file.
+
+{
+ 'make_global_settings': [
+ ['CC', 'clang'],
+ ['LINK', 'clang'],
+ ],
+ 'targets': [
+ {
+ 'target_name': 'test',
+ 'type': 'static_library',
+ 'sources': [ 'foo.c' ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/make_global_settings/env-wrapper/gyptest-wrapper.py b/third_party/python/gyp/test/make_global_settings/env-wrapper/gyptest-wrapper.py
new file mode 100644
index 0000000000..409799e315
--- /dev/null
+++ b/third_party/python/gyp/test/make_global_settings/env-wrapper/gyptest-wrapper.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies *_wrapper in environment.
+"""
+
+from __future__ import print_function
+
+import os
+import sys
+import TestGyp
+
+print("This test is currently disabled: https://crbug.com/483696.")
+sys.exit(0)
+
+test_format = ['ninja']
+
+os.environ['CC_wrapper'] = 'distcc'
+os.environ['LINK_wrapper'] = 'distlink'
+os.environ['CC.host_wrapper'] = 'ccache'
+
+test = TestGyp.TestGyp(formats=test_format)
+
+old_env = dict(os.environ)
+os.environ['GYP_CROSSCOMPILE'] = '1'
+test.run_gyp('wrapper.gyp')
+os.environ.clear()
+os.environ.update(old_env)
+
+if test.format == 'ninja':
+ cc_expected = ('cc = ' + os.path.join('..', '..', 'distcc') + ' ' +
+ os.path.join('..', '..', 'clang'))
+ cc_host_expected = ('cc_host = ' + os.path.join('..', '..', 'ccache') + ' ' +
+ os.path.join('..', '..', 'clang'))
+ ld_expected = 'ld = ../../distlink $cc'
+ if sys.platform != 'win32':
+ ldxx_expected = 'ldxx = ../../distlink $cxx'
+
+ if sys.platform == 'win32':
+ ld_expected = 'link.exe'
+ test.must_contain('out/Default/build.ninja', cc_expected)
+ test.must_contain('out/Default/build.ninja', cc_host_expected)
+ test.must_contain('out/Default/build.ninja', ld_expected)
+ if sys.platform != 'win32':
+ test.must_contain('out/Default/build.ninja', ldxx_expected)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/make_global_settings/env-wrapper/wrapper.gyp b/third_party/python/gyp/test/make_global_settings/env-wrapper/wrapper.gyp
new file mode 100644
index 0000000000..1698d71dd4
--- /dev/null
+++ b/third_party/python/gyp/test/make_global_settings/env-wrapper/wrapper.gyp
@@ -0,0 +1,17 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'make_global_settings': [
+ ['CC', 'clang'],
+ ['CC.host', 'clang'],
+ ],
+ 'targets': [
+ {
+ 'target_name': 'test',
+ 'type': 'static_library',
+ 'sources': [ 'foo.c' ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/make_global_settings/full-toolchain/bar.cc b/third_party/python/gyp/test/make_global_settings/full-toolchain/bar.cc
new file mode 100644
index 0000000000..afb422ba1a
--- /dev/null
+++ b/third_party/python/gyp/test/make_global_settings/full-toolchain/bar.cc
@@ -0,0 +1 @@
+#error Not a real source file
diff --git a/third_party/python/gyp/test/make_global_settings/full-toolchain/foo.c b/third_party/python/gyp/test/make_global_settings/full-toolchain/foo.c
new file mode 100644
index 0000000000..afb422ba1a
--- /dev/null
+++ b/third_party/python/gyp/test/make_global_settings/full-toolchain/foo.c
@@ -0,0 +1 @@
+#error Not a real source file
diff --git a/third_party/python/gyp/test/make_global_settings/full-toolchain/gyptest-make_global_settings.py b/third_party/python/gyp/test/make_global_settings/full-toolchain/gyptest-make_global_settings.py
new file mode 100644
index 0000000000..542fd631c2
--- /dev/null
+++ b/third_party/python/gyp/test/make_global_settings/full-toolchain/gyptest-make_global_settings.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies make_global_settings works with the full toolchain.
+"""
+
+from __future__ import print_function
+
+import os
+import sys
+import TestGyp
+
+if sys.platform == 'win32':
+ # cross compiling not supported by ninja on windows
+ # and make not supported on windows at all.
+ sys.exit(0)
+
+print("This test is currently disabled: https://crbug.com/483696.")
+sys.exit(0)
+
+test = TestGyp.TestGyp(formats=['ninja'])
+# Must set the test format to something with a flavor (the part after the '-')
+# in order to test the desired behavior. Since we want to run a non-host
+# toolchain, we have to set the flavor to something that the ninja generator
+# doesn't know about, so it doesn't default to the host-specific tools (e.g.,
+# 'otool' on mac to generate the .TOC).
+#
+# Note that we can't just pass format=['ninja-some_toolchain'] to the
+# constructor above, because then this test wouldn't be recognized as a ninja
+# format test.
+test.formats = ['ninja-my_flavor' if f == 'ninja' else f for f in test.formats]
+
+gyp_file = 'make_global_settings.gyp'
+
+test.run_gyp(gyp_file,
+ # Teach the .gyp file about the location of my_nm.py and
+ # my_readelf.py, and the python executable.
+ '-Dworkdir=%s' % test.workdir,
+ '-Dpython=%s' % sys.executable)
+test.build(gyp_file,
+ arguments=['-v'] if test.format == 'ninja-my_flavor' else [])
+
+expected = ['MY_CC', 'MY_CXX']
+test.must_contain_all_lines(test.stdout(), expected)
+
+test.must_contain(test.built_file_path('RAN_MY_NM'), 'RAN_MY_NM')
+test.must_contain(test.built_file_path('RAN_MY_READELF'), 'RAN_MY_READELF')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/make_global_settings/full-toolchain/make_global_settings.gyp b/third_party/python/gyp/test/make_global_settings/full-toolchain/make_global_settings.gyp
new file mode 100644
index 0000000000..2c3266322d
--- /dev/null
+++ b/third_party/python/gyp/test/make_global_settings/full-toolchain/make_global_settings.gyp
@@ -0,0 +1,22 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style licence that can be
+# found in the LICENSE file.
+
+{
+ 'make_global_settings': [
+ ['CC', '/bin/echo MY_CC'],
+ ['CXX', '/bin/echo MY_CXX'],
+ ['NM', '<(python) <(workdir)/my_nm.py'],
+ ['READELF', '<(python) <(workdir)/my_readelf.py'],
+ ],
+ 'targets': [
+ {
+ 'target_name': 'test',
+ 'type': 'shared_library',
+ 'sources': [
+ 'foo.c',
+ 'bar.cc',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/make_global_settings/full-toolchain/my_nm.py b/third_party/python/gyp/test/make_global_settings/full-toolchain/my_nm.py
new file mode 100755
index 0000000000..2c4e678110
--- /dev/null
+++ b/third_party/python/gyp/test/make_global_settings/full-toolchain/my_nm.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from __future__ import print_function
+import sys
+print(sys.argv)
+with open('RAN_MY_NM', 'w') as f:
+ f.write('RAN_MY_NM')
diff --git a/third_party/python/gyp/test/make_global_settings/full-toolchain/my_readelf.py b/third_party/python/gyp/test/make_global_settings/full-toolchain/my_readelf.py
new file mode 100755
index 0000000000..626665435e
--- /dev/null
+++ b/third_party/python/gyp/test/make_global_settings/full-toolchain/my_readelf.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+from __future__ import print_function
+import sys
+print(sys.argv)
+with open('RAN_MY_READELF', 'w') as f:
+ f.write('RAN_MY_READELF')
diff --git a/third_party/python/gyp/test/make_global_settings/ld/gyptest-make_global_settings_ld.py b/third_party/python/gyp/test/make_global_settings/ld/gyptest-make_global_settings_ld.py
new file mode 100644
index 0000000000..e5f50fbb5b
--- /dev/null
+++ b/third_party/python/gyp/test/make_global_settings/ld/gyptest-make_global_settings_ld.py
@@ -0,0 +1,130 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies 'LD' in make_global_settings.
+"""
+
+import os
+import sys
+import TestGyp
+
+def resolve_path(test, path):
+ if path is None:
+ return None
+ elif test.format == 'make':
+ return '$(abspath %s)' % path
+ elif test.format in ['ninja', 'xcode-ninja']:
+ return os.path.join('..', '..', path)
+ else:
+ test.fail_test()
+
+
+def verify_ld_target(test, ld=None, rel_path=False):
+ if rel_path:
+ ld_expected = resolve_path(test, ld)
+ else:
+ ld_expected = ld
+ # Resolve default values
+ if ld_expected is None:
+ if test.format == 'make':
+ # Make generator hasn't set the default value for LD.
+ # You can remove the following assertion as long as it doesn't
+ # break existing projects.
+ test.must_not_contain('Makefile', 'LD ?= ')
+ return
+ elif test.format in ['ninja', 'xcode-ninja']:
+ if sys.platform == 'win32':
+ ld_expected = 'link.exe'
+ else:
+ ld_expected = '$cc'
+ if test.format == 'make':
+ test.must_contain('Makefile', 'LD ?= %s' % ld_expected)
+ elif test.format in ['ninja', 'xcode-ninja']:
+ test.must_contain('out/Default/build.ninja', 'ld = %s' % ld_expected)
+ else:
+ test.fail_test()
+
+
+def verify_ld_host(test, ld=None, rel_path=False):
+ if rel_path:
+ ld_expected = resolve_path(test, ld)
+ else:
+ ld_expected = ld
+ # Resolve default values
+ if ld_expected is None:
+ if test.format == 'make':
+ # Make generator hasn't set the default value for LD.host.
+ # You can remove the following assertion as long as it doesn't
+ # break existing projects.
+ test.must_not_contain('Makefile', 'LD.host ?= ')
+ return
+ elif test.format in ['ninja', 'xcode-ninja']:
+ if sys.platform == 'win32':
+ ld_expected = '$ld'
+ else:
+ ld_expected = '$cc_host'
+ if test.format == 'make':
+ test.must_contain('Makefile', 'LD.host ?= %s' % ld_expected)
+ elif test.format in ['ninja', 'xcode-ninja']:
+ test.must_contain('out/Default/build.ninja', 'ld_host = %s' % ld_expected)
+ else:
+ test.fail_test()
+
+
+test_format = ['ninja']
+if sys.platform.startswith('linux') or sys.platform == 'darwin':
+ test_format += ['make']
+
+test = TestGyp.TestGyp(formats=test_format)
+
+# Check default values
+test.run_gyp('make_global_settings_ld.gyp')
+verify_ld_target(test)
+
+
+# Check default values with GYP_CROSSCOMPILE enabled.
+with TestGyp.LocalEnv({'GYP_CROSSCOMPILE': '1'}):
+ test.run_gyp('make_global_settings_ld.gyp')
+verify_ld_target(test)
+verify_ld_host(test)
+
+
+# Test 'LD' in 'make_global_settings'.
+with TestGyp.LocalEnv({'GYP_CROSSCOMPILE': '1'}):
+ test.run_gyp('make_global_settings_ld.gyp', '-Dcustom_ld_target=my_ld')
+verify_ld_target(test, ld='my_ld', rel_path=True)
+
+
+# Test 'LD'/'LD.host' in 'make_global_settings'.
+with TestGyp.LocalEnv({'GYP_CROSSCOMPILE': '1'}):
+ test.run_gyp('make_global_settings_ld.gyp',
+ '-Dcustom_ld_target=my_ld_target1',
+ '-Dcustom_ld_host=my_ld_host1')
+verify_ld_target(test, ld='my_ld_target1', rel_path=True)
+verify_ld_host(test, ld='my_ld_host1', rel_path=True)
+
+
+# Unlike other environment variables such as $AR/$AR_host, $CC/$CC_host,
+# and $CXX/$CXX_host, neither Make generator nor Ninja generator recognizes
+# $LD/$LD_host environment variables as of r1935. This may or may not be
+# intentional, but here we leave a test case to verify this behavior just for
+# the record.
+# If you want to support $LD/$LD_host, please revise the following test case as
+# well as the generator.
+with TestGyp.LocalEnv({'GYP_CROSSCOMPILE': '1',
+ 'LD': 'my_ld_target2',
+ 'LD_host': 'my_ld_host2'}):
+ test.run_gyp('make_global_settings_ld.gyp')
+if test.format == 'make':
+ test.must_not_contain('Makefile', 'my_ld_target2')
+ test.must_not_contain('Makefile', 'my_ld_host2')
+elif test.format == 'ninja':
+ test.must_not_contain('out/Default/build.ninja', 'my_ld_target2')
+ test.must_not_contain('out/Default/build.ninja', 'my_ld_host2')
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/make_global_settings/ld/make_global_settings_ld.gyp b/third_party/python/gyp/test/make_global_settings/ld/make_global_settings_ld.gyp
new file mode 100644
index 0000000000..6837c77326
--- /dev/null
+++ b/third_party/python/gyp/test/make_global_settings/ld/make_global_settings_ld.gyp
@@ -0,0 +1,29 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style licence that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'custom_ld_target%': '',
+ 'custom_ld_host%': '',
+ },
+ 'conditions': [
+ ['"<(custom_ld_target)"!=""', {
+ 'make_global_settings': [
+ ['LD', '<(custom_ld_target)'],
+ ],
+ }],
+ ['"<(custom_ld_host)"!=""', {
+ 'make_global_settings': [
+ ['LD.host', '<(custom_ld_host)'],
+ ],
+ }],
+ ],
+ 'targets': [
+ {
+ 'target_name': 'make_global_settings_ld_test',
+ 'type': 'static_library',
+ 'sources': [ 'foo.c' ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/make_global_settings/wrapper/gyptest-wrapper.py b/third_party/python/gyp/test/make_global_settings/wrapper/gyptest-wrapper.py
new file mode 100644
index 0000000000..7ef4314b3e
--- /dev/null
+++ b/third_party/python/gyp/test/make_global_settings/wrapper/gyptest-wrapper.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies *_wrapper in make_global_settings.
+"""
+
+from __future__ import print_function
+
+import os
+import sys
+import TestGyp
+
+print("This test is currently disabled: https://crbug.com/483696.")
+sys.exit(0)
+
+test_format = ['ninja']
+if sys.platform.startswith('linux') or sys.platform == 'darwin':
+ test_format += ['make']
+
+test = TestGyp.TestGyp(formats=test_format)
+
+old_env = dict(os.environ)
+os.environ['GYP_CROSSCOMPILE'] = '1'
+test.run_gyp('wrapper.gyp')
+os.environ.clear()
+os.environ.update(old_env)
+
+if test.format == 'make':
+ cc_expected = """ifneq (,$(filter $(origin CC), undefined default))
+ CC = $(abspath distcc) $(abspath clang)
+endif
+"""
+ link_expected = 'LINK ?= $(abspath distlink) $(abspath clang++)'
+ test.must_contain('Makefile', cc_expected)
+ test.must_contain('Makefile', link_expected)
+if test.format == 'ninja':
+ cc_expected = ('cc = ' + os.path.join('..', '..', 'distcc') + ' ' +
+ os.path.join('..', '..', 'clang'))
+ cc_host_expected = ('cc_host = ' + os.path.join('..', '..', 'ccache') + ' ' +
+ os.path.join('..', '..', 'clang'))
+ ld_expected = 'ld = ../../distlink $cc'
+ if sys.platform == 'win32':
+ ld_expected = 'link.exe'
+ test.must_contain('out/Default/build.ninja', cc_expected)
+ test.must_contain('out/Default/build.ninja', cc_host_expected)
+ test.must_contain('out/Default/build.ninja', ld_expected)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/make_global_settings/wrapper/wrapper.gyp b/third_party/python/gyp/test/make_global_settings/wrapper/wrapper.gyp
new file mode 100644
index 0000000000..3d4cd04b16
--- /dev/null
+++ b/third_party/python/gyp/test/make_global_settings/wrapper/wrapper.gyp
@@ -0,0 +1,21 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'make_global_settings': [
+ ['CC', 'clang'],
+ ['CC_wrapper', 'distcc'],
+ ['LINK', 'clang++'],
+ ['LINK_wrapper', 'distlink'],
+ ['CC.host', 'clang'],
+ ['CC.host_wrapper', 'ccache'],
+ ],
+ 'targets': [
+ {
+ 'target_name': 'test',
+ 'type': 'static_library',
+ 'sources': [ 'foo.c' ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/many-actions/file0 b/third_party/python/gyp/test/many-actions/file0
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/many-actions/file0
diff --git a/third_party/python/gyp/test/many-actions/file1 b/third_party/python/gyp/test/many-actions/file1
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/many-actions/file1
diff --git a/third_party/python/gyp/test/many-actions/file2 b/third_party/python/gyp/test/many-actions/file2
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/many-actions/file2
diff --git a/third_party/python/gyp/test/many-actions/file3 b/third_party/python/gyp/test/many-actions/file3
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/many-actions/file3
diff --git a/third_party/python/gyp/test/many-actions/file4 b/third_party/python/gyp/test/many-actions/file4
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/many-actions/file4
diff --git a/third_party/python/gyp/test/many-actions/gyptest-many-actions-unsorted.py b/third_party/python/gyp/test/many-actions/gyptest-many-actions-unsorted.py
new file mode 100644
index 0000000000..6927d1c7a7
--- /dev/null
+++ b/third_party/python/gyp/test/many-actions/gyptest-many-actions-unsorted.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure lots of actions in the same target don't cause exceeding command
+line length.
+"""
+
+from __future__ import print_function
+
+import sys
+
+if sys.platform == 'win32':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('many-actions-unsorted.gyp')
+
+test.build('many-actions-unsorted.gyp', test.ALL)
+for i in range(15):
+ test.built_file_must_exist('generated_%d.h' % i)
+
+# Make sure the optimized cygwin setup doesn't cause problems for incremental
+# builds.
+test.touch('file1')
+test.build('many-actions-unsorted.gyp', test.ALL)
+
+test.touch('file0')
+test.build('many-actions-unsorted.gyp', test.ALL)
+
+test.touch('file2')
+test.touch('file3')
+test.touch('file4')
+test.build('many-actions-unsorted.gyp', test.ALL)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/many-actions/gyptest-many-actions.py b/third_party/python/gyp/test/many-actions/gyptest-many-actions.py
new file mode 100644
index 0000000000..4a525d32d6
--- /dev/null
+++ b/third_party/python/gyp/test/many-actions/gyptest-many-actions.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure lots of actions in the same target don't cause exceeding command
+line length.
+"""
+
+from __future__ import print_function
+
+import sys
+
+if sys.platform == 'win32':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('many-actions.gyp')
+test.build('many-actions.gyp', test.ALL)
+for i in range(200):
+ test.built_file_must_exist('generated_%d.h' % i)
+test.pass_test()
diff --git a/third_party/python/gyp/test/many-actions/many-actions-unsorted.gyp b/third_party/python/gyp/test/many-actions/many-actions-unsorted.gyp
new file mode 100644
index 0000000000..eec79fe8d8
--- /dev/null
+++ b/third_party/python/gyp/test/many-actions/many-actions-unsorted.gyp
@@ -0,0 +1,154 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ 'msvs_cygwin_dirs': ['../../../../<(DEPTH)/third_party/cygwin'],
+ },
+ 'targets': [
+ {
+ 'target_name': 'a',
+ 'type': 'none',
+ 'actions': [
+ # Notice that the inputs go 0, 1, ..., 0, 1, .... This is to test
+ # a regression in the msvs generator in _AddActions.
+ {
+ 'action_name': 'do_0',
+ 'inputs': ['file0'],
+ 'outputs': ['<(PRODUCT_DIR)/generated_0.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_0.h',
+ ],
+ },
+ {
+ 'action_name': 'do_1',
+ 'inputs': ['file1'],
+ 'outputs': ['<(PRODUCT_DIR)/generated_1.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_1.h',
+ ],
+ },
+ {
+ 'action_name': 'do_2',
+ 'inputs': ['file2'],
+ 'outputs': ['<(PRODUCT_DIR)/generated_2.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_2.h',
+ ],
+ },
+ {
+ 'action_name': 'do_3',
+ 'inputs': ['file3'],
+ 'outputs': ['<(PRODUCT_DIR)/generated_3.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_3.h',
+ ],
+ },
+ {
+ 'action_name': 'do_4',
+ 'inputs': ['file4'],
+ 'outputs': ['<(PRODUCT_DIR)/generated_4.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_4.h',
+ ],
+ },
+ {
+ 'action_name': 'do_5',
+ 'inputs': ['file0'],
+ 'outputs': ['<(PRODUCT_DIR)/generated_5.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_5.h',
+ ],
+ },
+ {
+ 'action_name': 'do_6',
+ 'inputs': ['file1'],
+ 'outputs': ['<(PRODUCT_DIR)/generated_6.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_6.h',
+ ],
+ },
+ {
+ 'action_name': 'do_7',
+ 'inputs': ['file2'],
+ 'outputs': ['<(PRODUCT_DIR)/generated_7.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_7.h',
+ ],
+ },
+ {
+ 'action_name': 'do_8',
+ 'inputs': ['file3'],
+ 'outputs': ['<(PRODUCT_DIR)/generated_8.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_8.h',
+ ],
+ },
+ {
+ 'action_name': 'do_9',
+ 'inputs': ['file4'],
+ 'outputs': ['<(PRODUCT_DIR)/generated_9.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_9.h',
+ ],
+ },
+ {
+ 'action_name': 'do_10',
+ 'inputs': ['file0'],
+ 'outputs': ['<(PRODUCT_DIR)/generated_10.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_10.h',
+ ],
+ },
+ {
+ 'action_name': 'do_11',
+ 'inputs': ['file1'],
+ 'outputs': ['<(PRODUCT_DIR)/generated_11.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_11.h',
+ ],
+ },
+ {
+ 'action_name': 'do_12',
+ 'inputs': ['file2'],
+ 'outputs': ['<(PRODUCT_DIR)/generated_12.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_12.h',
+ ],
+ },
+ {
+ 'action_name': 'do_13',
+ 'inputs': ['file3'],
+ 'outputs': ['<(PRODUCT_DIR)/generated_13.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_13.h',
+ ],
+ },
+ {
+ 'action_name': 'do_14',
+ 'inputs': ['file4'],
+ 'outputs': ['<(PRODUCT_DIR)/generated_14.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_14.h',
+ ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/many-actions/many-actions.gyp b/third_party/python/gyp/test/many-actions/many-actions.gyp
new file mode 100644
index 0000000000..38545d2d88
--- /dev/null
+++ b/third_party/python/gyp/test/many-actions/many-actions.gyp
@@ -0,0 +1,1817 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ 'msvs_cygwin_dirs': ['../../../../<(DEPTH)/third_party/cygwin'],
+ },
+ 'targets': [
+ {
+ 'target_name': 'a',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'do_0',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_0.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_0.h',
+ ],
+ },
+ {
+ 'action_name': 'do_1',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_1.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_1.h',
+ ],
+ },
+ {
+ 'action_name': 'do_2',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_2.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_2.h',
+ ],
+ },
+ {
+ 'action_name': 'do_3',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_3.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_3.h',
+ ],
+ },
+ {
+ 'action_name': 'do_4',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_4.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_4.h',
+ ],
+ },
+ {
+ 'action_name': 'do_5',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_5.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_5.h',
+ ],
+ },
+ {
+ 'action_name': 'do_6',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_6.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_6.h',
+ ],
+ },
+ {
+ 'action_name': 'do_7',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_7.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_7.h',
+ ],
+ },
+ {
+ 'action_name': 'do_8',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_8.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_8.h',
+ ],
+ },
+ {
+ 'action_name': 'do_9',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_9.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_9.h',
+ ],
+ },
+ {
+ 'action_name': 'do_10',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_10.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_10.h',
+ ],
+ },
+ {
+ 'action_name': 'do_11',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_11.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_11.h',
+ ],
+ },
+ {
+ 'action_name': 'do_12',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_12.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_12.h',
+ ],
+ },
+ {
+ 'action_name': 'do_13',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_13.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_13.h',
+ ],
+ },
+ {
+ 'action_name': 'do_14',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_14.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_14.h',
+ ],
+ },
+ {
+ 'action_name': 'do_15',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_15.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_15.h',
+ ],
+ },
+ {
+ 'action_name': 'do_16',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_16.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_16.h',
+ ],
+ },
+ {
+ 'action_name': 'do_17',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_17.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_17.h',
+ ],
+ },
+ {
+ 'action_name': 'do_18',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_18.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_18.h',
+ ],
+ },
+ {
+ 'action_name': 'do_19',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_19.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_19.h',
+ ],
+ },
+ {
+ 'action_name': 'do_20',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_20.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_20.h',
+ ],
+ },
+ {
+ 'action_name': 'do_21',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_21.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_21.h',
+ ],
+ },
+ {
+ 'action_name': 'do_22',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_22.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_22.h',
+ ],
+ },
+ {
+ 'action_name': 'do_23',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_23.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_23.h',
+ ],
+ },
+ {
+ 'action_name': 'do_24',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_24.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_24.h',
+ ],
+ },
+ {
+ 'action_name': 'do_25',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_25.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_25.h',
+ ],
+ },
+ {
+ 'action_name': 'do_26',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_26.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_26.h',
+ ],
+ },
+ {
+ 'action_name': 'do_27',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_27.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_27.h',
+ ],
+ },
+ {
+ 'action_name': 'do_28',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_28.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_28.h',
+ ],
+ },
+ {
+ 'action_name': 'do_29',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_29.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_29.h',
+ ],
+ },
+ {
+ 'action_name': 'do_30',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_30.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_30.h',
+ ],
+ },
+ {
+ 'action_name': 'do_31',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_31.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_31.h',
+ ],
+ },
+ {
+ 'action_name': 'do_32',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_32.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_32.h',
+ ],
+ },
+ {
+ 'action_name': 'do_33',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_33.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_33.h',
+ ],
+ },
+ {
+ 'action_name': 'do_34',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_34.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_34.h',
+ ],
+ },
+ {
+ 'action_name': 'do_35',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_35.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_35.h',
+ ],
+ },
+ {
+ 'action_name': 'do_36',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_36.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_36.h',
+ ],
+ },
+ {
+ 'action_name': 'do_37',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_37.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_37.h',
+ ],
+ },
+ {
+ 'action_name': 'do_38',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_38.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_38.h',
+ ],
+ },
+ {
+ 'action_name': 'do_39',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_39.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_39.h',
+ ],
+ },
+ {
+ 'action_name': 'do_40',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_40.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_40.h',
+ ],
+ },
+ {
+ 'action_name': 'do_41',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_41.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_41.h',
+ ],
+ },
+ {
+ 'action_name': 'do_42',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_42.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_42.h',
+ ],
+ },
+ {
+ 'action_name': 'do_43',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_43.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_43.h',
+ ],
+ },
+ {
+ 'action_name': 'do_44',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_44.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_44.h',
+ ],
+ },
+ {
+ 'action_name': 'do_45',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_45.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_45.h',
+ ],
+ },
+ {
+ 'action_name': 'do_46',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_46.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_46.h',
+ ],
+ },
+ {
+ 'action_name': 'do_47',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_47.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_47.h',
+ ],
+ },
+ {
+ 'action_name': 'do_48',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_48.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_48.h',
+ ],
+ },
+ {
+ 'action_name': 'do_49',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_49.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_49.h',
+ ],
+ },
+ {
+ 'action_name': 'do_50',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_50.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_50.h',
+ ],
+ },
+ {
+ 'action_name': 'do_51',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_51.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_51.h',
+ ],
+ },
+ {
+ 'action_name': 'do_52',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_52.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_52.h',
+ ],
+ },
+ {
+ 'action_name': 'do_53',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_53.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_53.h',
+ ],
+ },
+ {
+ 'action_name': 'do_54',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_54.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_54.h',
+ ],
+ },
+ {
+ 'action_name': 'do_55',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_55.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_55.h',
+ ],
+ },
+ {
+ 'action_name': 'do_56',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_56.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_56.h',
+ ],
+ },
+ {
+ 'action_name': 'do_57',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_57.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_57.h',
+ ],
+ },
+ {
+ 'action_name': 'do_58',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_58.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_58.h',
+ ],
+ },
+ {
+ 'action_name': 'do_59',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_59.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_59.h',
+ ],
+ },
+ {
+ 'action_name': 'do_60',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_60.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_60.h',
+ ],
+ },
+ {
+ 'action_name': 'do_61',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_61.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_61.h',
+ ],
+ },
+ {
+ 'action_name': 'do_62',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_62.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_62.h',
+ ],
+ },
+ {
+ 'action_name': 'do_63',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_63.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_63.h',
+ ],
+ },
+ {
+ 'action_name': 'do_64',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_64.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_64.h',
+ ],
+ },
+ {
+ 'action_name': 'do_65',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_65.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_65.h',
+ ],
+ },
+ {
+ 'action_name': 'do_66',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_66.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_66.h',
+ ],
+ },
+ {
+ 'action_name': 'do_67',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_67.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_67.h',
+ ],
+ },
+ {
+ 'action_name': 'do_68',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_68.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_68.h',
+ ],
+ },
+ {
+ 'action_name': 'do_69',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_69.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_69.h',
+ ],
+ },
+ {
+ 'action_name': 'do_70',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_70.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_70.h',
+ ],
+ },
+ {
+ 'action_name': 'do_71',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_71.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_71.h',
+ ],
+ },
+ {
+ 'action_name': 'do_72',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_72.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_72.h',
+ ],
+ },
+ {
+ 'action_name': 'do_73',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_73.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_73.h',
+ ],
+ },
+ {
+ 'action_name': 'do_74',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_74.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_74.h',
+ ],
+ },
+ {
+ 'action_name': 'do_75',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_75.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_75.h',
+ ],
+ },
+ {
+ 'action_name': 'do_76',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_76.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_76.h',
+ ],
+ },
+ {
+ 'action_name': 'do_77',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_77.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_77.h',
+ ],
+ },
+ {
+ 'action_name': 'do_78',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_78.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_78.h',
+ ],
+ },
+ {
+ 'action_name': 'do_79',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_79.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_79.h',
+ ],
+ },
+ {
+ 'action_name': 'do_80',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_80.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_80.h',
+ ],
+ },
+ {
+ 'action_name': 'do_81',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_81.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_81.h',
+ ],
+ },
+ {
+ 'action_name': 'do_82',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_82.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_82.h',
+ ],
+ },
+ {
+ 'action_name': 'do_83',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_83.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_83.h',
+ ],
+ },
+ {
+ 'action_name': 'do_84',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_84.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_84.h',
+ ],
+ },
+ {
+ 'action_name': 'do_85',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_85.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_85.h',
+ ],
+ },
+ {
+ 'action_name': 'do_86',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_86.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_86.h',
+ ],
+ },
+ {
+ 'action_name': 'do_87',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_87.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_87.h',
+ ],
+ },
+ {
+ 'action_name': 'do_88',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_88.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_88.h',
+ ],
+ },
+ {
+ 'action_name': 'do_89',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_89.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_89.h',
+ ],
+ },
+ {
+ 'action_name': 'do_90',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_90.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_90.h',
+ ],
+ },
+ {
+ 'action_name': 'do_91',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_91.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_91.h',
+ ],
+ },
+ {
+ 'action_name': 'do_92',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_92.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_92.h',
+ ],
+ },
+ {
+ 'action_name': 'do_93',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_93.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_93.h',
+ ],
+ },
+ {
+ 'action_name': 'do_94',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_94.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_94.h',
+ ],
+ },
+ {
+ 'action_name': 'do_95',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_95.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_95.h',
+ ],
+ },
+ {
+ 'action_name': 'do_96',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_96.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_96.h',
+ ],
+ },
+ {
+ 'action_name': 'do_97',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_97.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_97.h',
+ ],
+ },
+ {
+ 'action_name': 'do_98',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_98.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_98.h',
+ ],
+ },
+ {
+ 'action_name': 'do_99',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_99.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_99.h',
+ ],
+ },
+ {
+ 'action_name': 'do_100',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_100.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_100.h',
+ ],
+ },
+ {
+ 'action_name': 'do_101',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_101.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_101.h',
+ ],
+ },
+ {
+ 'action_name': 'do_102',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_102.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_102.h',
+ ],
+ },
+ {
+ 'action_name': 'do_103',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_103.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_103.h',
+ ],
+ },
+ {
+ 'action_name': 'do_104',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_104.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_104.h',
+ ],
+ },
+ {
+ 'action_name': 'do_105',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_105.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_105.h',
+ ],
+ },
+ {
+ 'action_name': 'do_106',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_106.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_106.h',
+ ],
+ },
+ {
+ 'action_name': 'do_107',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_107.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_107.h',
+ ],
+ },
+ {
+ 'action_name': 'do_108',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_108.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_108.h',
+ ],
+ },
+ {
+ 'action_name': 'do_109',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_109.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_109.h',
+ ],
+ },
+ {
+ 'action_name': 'do_110',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_110.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_110.h',
+ ],
+ },
+ {
+ 'action_name': 'do_111',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_111.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_111.h',
+ ],
+ },
+ {
+ 'action_name': 'do_112',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_112.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_112.h',
+ ],
+ },
+ {
+ 'action_name': 'do_113',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_113.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_113.h',
+ ],
+ },
+ {
+ 'action_name': 'do_114',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_114.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_114.h',
+ ],
+ },
+ {
+ 'action_name': 'do_115',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_115.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_115.h',
+ ],
+ },
+ {
+ 'action_name': 'do_116',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_116.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_116.h',
+ ],
+ },
+ {
+ 'action_name': 'do_117',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_117.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_117.h',
+ ],
+ },
+ {
+ 'action_name': 'do_118',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_118.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_118.h',
+ ],
+ },
+ {
+ 'action_name': 'do_119',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_119.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_119.h',
+ ],
+ },
+ {
+ 'action_name': 'do_120',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_120.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_120.h',
+ ],
+ },
+ {
+ 'action_name': 'do_121',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_121.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_121.h',
+ ],
+ },
+ {
+ 'action_name': 'do_122',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_122.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_122.h',
+ ],
+ },
+ {
+ 'action_name': 'do_123',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_123.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_123.h',
+ ],
+ },
+ {
+ 'action_name': 'do_124',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_124.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_124.h',
+ ],
+ },
+ {
+ 'action_name': 'do_125',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_125.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_125.h',
+ ],
+ },
+ {
+ 'action_name': 'do_126',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_126.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_126.h',
+ ],
+ },
+ {
+ 'action_name': 'do_127',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_127.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_127.h',
+ ],
+ },
+ {
+ 'action_name': 'do_128',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_128.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_128.h',
+ ],
+ },
+ {
+ 'action_name': 'do_129',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_129.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_129.h',
+ ],
+ },
+ {
+ 'action_name': 'do_130',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_130.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_130.h',
+ ],
+ },
+ {
+ 'action_name': 'do_131',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_131.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_131.h',
+ ],
+ },
+ {
+ 'action_name': 'do_132',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_132.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_132.h',
+ ],
+ },
+ {
+ 'action_name': 'do_133',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_133.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_133.h',
+ ],
+ },
+ {
+ 'action_name': 'do_134',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_134.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_134.h',
+ ],
+ },
+ {
+ 'action_name': 'do_135',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_135.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_135.h',
+ ],
+ },
+ {
+ 'action_name': 'do_136',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_136.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_136.h',
+ ],
+ },
+ {
+ 'action_name': 'do_137',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_137.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_137.h',
+ ],
+ },
+ {
+ 'action_name': 'do_138',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_138.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_138.h',
+ ],
+ },
+ {
+ 'action_name': 'do_139',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_139.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_139.h',
+ ],
+ },
+ {
+ 'action_name': 'do_140',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_140.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_140.h',
+ ],
+ },
+ {
+ 'action_name': 'do_141',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_141.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_141.h',
+ ],
+ },
+ {
+ 'action_name': 'do_142',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_142.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_142.h',
+ ],
+ },
+ {
+ 'action_name': 'do_143',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_143.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_143.h',
+ ],
+ },
+ {
+ 'action_name': 'do_144',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_144.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_144.h',
+ ],
+ },
+ {
+ 'action_name': 'do_145',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_145.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_145.h',
+ ],
+ },
+ {
+ 'action_name': 'do_146',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_146.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_146.h',
+ ],
+ },
+ {
+ 'action_name': 'do_147',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_147.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_147.h',
+ ],
+ },
+ {
+ 'action_name': 'do_148',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_148.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_148.h',
+ ],
+ },
+ {
+ 'action_name': 'do_149',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_149.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_149.h',
+ ],
+ },
+ {
+ 'action_name': 'do_150',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_150.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_150.h',
+ ],
+ },
+ {
+ 'action_name': 'do_151',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_151.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_151.h',
+ ],
+ },
+ {
+ 'action_name': 'do_152',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_152.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_152.h',
+ ],
+ },
+ {
+ 'action_name': 'do_153',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_153.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_153.h',
+ ],
+ },
+ {
+ 'action_name': 'do_154',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_154.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_154.h',
+ ],
+ },
+ {
+ 'action_name': 'do_155',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_155.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_155.h',
+ ],
+ },
+ {
+ 'action_name': 'do_156',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_156.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_156.h',
+ ],
+ },
+ {
+ 'action_name': 'do_157',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_157.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_157.h',
+ ],
+ },
+ {
+ 'action_name': 'do_158',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_158.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_158.h',
+ ],
+ },
+ {
+ 'action_name': 'do_159',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_159.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_159.h',
+ ],
+ },
+ {
+ 'action_name': 'do_160',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_160.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_160.h',
+ ],
+ },
+ {
+ 'action_name': 'do_161',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_161.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_161.h',
+ ],
+ },
+ {
+ 'action_name': 'do_162',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_162.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_162.h',
+ ],
+ },
+ {
+ 'action_name': 'do_163',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_163.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_163.h',
+ ],
+ },
+ {
+ 'action_name': 'do_164',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_164.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_164.h',
+ ],
+ },
+ {
+ 'action_name': 'do_165',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_165.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_165.h',
+ ],
+ },
+ {
+ 'action_name': 'do_166',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_166.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_166.h',
+ ],
+ },
+ {
+ 'action_name': 'do_167',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_167.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_167.h',
+ ],
+ },
+ {
+ 'action_name': 'do_168',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_168.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_168.h',
+ ],
+ },
+ {
+ 'action_name': 'do_169',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_169.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_169.h',
+ ],
+ },
+ {
+ 'action_name': 'do_170',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_170.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_170.h',
+ ],
+ },
+ {
+ 'action_name': 'do_171',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_171.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_171.h',
+ ],
+ },
+ {
+ 'action_name': 'do_172',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_172.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_172.h',
+ ],
+ },
+ {
+ 'action_name': 'do_173',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_173.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_173.h',
+ ],
+ },
+ {
+ 'action_name': 'do_174',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_174.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_174.h',
+ ],
+ },
+ {
+ 'action_name': 'do_175',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_175.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_175.h',
+ ],
+ },
+ {
+ 'action_name': 'do_176',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_176.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_176.h',
+ ],
+ },
+ {
+ 'action_name': 'do_177',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_177.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_177.h',
+ ],
+ },
+ {
+ 'action_name': 'do_178',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_178.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_178.h',
+ ],
+ },
+ {
+ 'action_name': 'do_179',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_179.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_179.h',
+ ],
+ },
+ {
+ 'action_name': 'do_180',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_180.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_180.h',
+ ],
+ },
+ {
+ 'action_name': 'do_181',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_181.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_181.h',
+ ],
+ },
+ {
+ 'action_name': 'do_182',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_182.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_182.h',
+ ],
+ },
+ {
+ 'action_name': 'do_183',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_183.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_183.h',
+ ],
+ },
+ {
+ 'action_name': 'do_184',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_184.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_184.h',
+ ],
+ },
+ {
+ 'action_name': 'do_185',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_185.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_185.h',
+ ],
+ },
+ {
+ 'action_name': 'do_186',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_186.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_186.h',
+ ],
+ },
+ {
+ 'action_name': 'do_187',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_187.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_187.h',
+ ],
+ },
+ {
+ 'action_name': 'do_188',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_188.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_188.h',
+ ],
+ },
+ {
+ 'action_name': 'do_189',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_189.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_189.h',
+ ],
+ },
+ {
+ 'action_name': 'do_190',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_190.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_190.h',
+ ],
+ },
+ {
+ 'action_name': 'do_191',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_191.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_191.h',
+ ],
+ },
+ {
+ 'action_name': 'do_192',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_192.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_192.h',
+ ],
+ },
+ {
+ 'action_name': 'do_193',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_193.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_193.h',
+ ],
+ },
+ {
+ 'action_name': 'do_194',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_194.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_194.h',
+ ],
+ },
+ {
+ 'action_name': 'do_195',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_195.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_195.h',
+ ],
+ },
+ {
+ 'action_name': 'do_196',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_196.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_196.h',
+ ],
+ },
+ {
+ 'action_name': 'do_197',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_197.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_197.h',
+ ],
+ },
+ {
+ 'action_name': 'do_198',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_198.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_198.h',
+ ],
+ },
+ {
+ 'action_name': 'do_199',
+ 'inputs': [],
+ 'outputs': ['<(PRODUCT_DIR)/generated_199.h'],
+ 'action': [
+ 'touch',
+ '<(PRODUCT_DIR)/generated_199.h',
+ ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/module/gyptest-default.py b/third_party/python/gyp/test/module/gyptest-default.py
new file mode 100755
index 0000000000..7fecf3ca4d
--- /dev/null
+++ b/third_party/python/gyp/test/module/gyptest-default.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies simple build of a "Hello, world!" program with loadable modules. The
+default for all platforms should be to output the loadable modules to the same
+path as the executable.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('module.gyp', chdir='src')
+
+test.build('module.gyp', test.ALL, chdir='src')
+
+expect = """\
+Hello from program.c
+Hello from lib1.c
+Hello from lib2.c
+"""
+test.run_built_executable('program', chdir='src', stdout=expect)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/module/src/lib1.c b/third_party/python/gyp/test/module/src/lib1.c
new file mode 100644
index 0000000000..8de0e94bee
--- /dev/null
+++ b/third_party/python/gyp/test/module/src/lib1.c
@@ -0,0 +1,10 @@
+#include <stdio.h>
+
+#ifdef _WIN32
+__declspec(dllexport)
+#endif
+void module_main(void)
+{
+ fprintf(stdout, "Hello from lib1.c\n");
+ fflush(stdout);
+}
diff --git a/third_party/python/gyp/test/module/src/lib2.c b/third_party/python/gyp/test/module/src/lib2.c
new file mode 100644
index 0000000000..266396dc91
--- /dev/null
+++ b/third_party/python/gyp/test/module/src/lib2.c
@@ -0,0 +1,10 @@
+#include <stdio.h>
+
+#ifdef _WIN32
+__declspec(dllexport)
+#endif
+void module_main(void)
+{
+ fprintf(stdout, "Hello from lib2.c\n");
+ fflush(stdout);
+}
diff --git a/third_party/python/gyp/test/module/src/module.gyp b/third_party/python/gyp/test/module/src/module.gyp
new file mode 100644
index 0000000000..2bc398bb3b
--- /dev/null
+++ b/third_party/python/gyp/test/module/src/module.gyp
@@ -0,0 +1,53 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ 'conditions': [
+ ['OS=="win"', {
+ 'defines': ['PLATFORM_WIN'],
+ }],
+ ['OS=="mac" or OS=="ios"', {
+ 'defines': ['PLATFORM_MAC'],
+ }],
+ ['OS=="linux"', {
+ 'defines': ['PLATFORM_LINUX'],
+ # Support 64-bit shared libs (also works fine for 32-bit).
+ 'cflags': ['-fPIC'],
+ 'libraries': ['-ldl'],
+ }],
+ ],
+ },
+ 'targets': [
+ {
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'dependencies': [
+ 'lib1',
+ 'lib2',
+ ],
+ 'sources': [
+ 'program.c',
+ ],
+ },
+ {
+ 'target_name': 'lib1',
+ 'type': 'loadable_module',
+ 'product_name': 'lib1',
+ 'product_prefix': '',
+ 'sources': [
+ 'lib1.c',
+ ],
+ },
+ {
+ 'target_name': 'lib2',
+ 'product_name': 'lib2',
+ 'product_prefix': '',
+ 'type': 'loadable_module',
+ 'sources': [
+ 'lib2.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/module/src/program.c b/third_party/python/gyp/test/module/src/program.c
new file mode 100644
index 0000000000..7cc3dd3466
--- /dev/null
+++ b/third_party/python/gyp/test/module/src/program.c
@@ -0,0 +1,111 @@
+#include <stdio.h>
+#include <stdlib.h>
+
+#if defined(PLATFORM_WIN)
+#include <windows.h>
+#elif defined(PLATFORM_MAC) || defined(PLATFORM_LINUX)
+#include <dlfcn.h>
+#include <libgen.h>
+#include <string.h>
+#include <sys/param.h>
+#define MAX_PATH PATH_MAX
+#endif
+
+#if defined(PLATFORM_WIN)
+#define MODULE_SUFFIX ".dll"
+#elif defined(PLATFORM_MAC)
+#define MODULE_SUFFIX ".so"
+#elif defined(PLATFORM_LINUX)
+#define MODULE_SUFFIX ".so"
+#endif
+
+typedef void (*module_symbol)(void);
+char bin_path[MAX_PATH + 1];
+
+
+void CallModule(const char* module) {
+ char module_path[MAX_PATH + 1];
+ const char* module_function = "module_main";
+ module_symbol funcptr;
+#if defined(PLATFORM_WIN)
+ HMODULE dl;
+ char drive[_MAX_DRIVE];
+ char dir[_MAX_DIR];
+
+ if (_splitpath_s(bin_path, drive, _MAX_DRIVE, dir, _MAX_DIR,
+ NULL, 0, NULL, 0)) {
+ fprintf(stderr, "Failed to split executable path.\n");
+ return;
+ }
+ if (_makepath_s(module_path, MAX_PATH, drive, dir, module, MODULE_SUFFIX)) {
+ fprintf(stderr, "Failed to calculate module path.\n");
+ return;
+ }
+
+ dl = LoadLibrary(module_path);
+ if (!dl) {
+ fprintf(stderr, "Failed to open module: %s\n", module_path);
+ return;
+ }
+
+ funcptr = (module_symbol) GetProcAddress(dl, module_function);
+ if (!funcptr) {
+ fprintf(stderr, "Failed to find symbol: %s\n", module_function);
+ return;
+ }
+ funcptr();
+
+ FreeLibrary(dl);
+#elif defined(PLATFORM_MAC) || defined(PLATFORM_LINUX)
+ void* dl;
+ char* path_copy = strdup(bin_path);
+ char* bin_dir = dirname(path_copy);
+ int path_size = snprintf(module_path, MAX_PATH, "%s/%s%s", bin_dir, module,
+ MODULE_SUFFIX);
+ free(path_copy);
+ if (path_size < 0 || path_size > MAX_PATH) {
+ fprintf(stderr, "Failed to calculate module path.\n");
+ return;
+ }
+ module_path[path_size] = 0;
+
+ dl = dlopen(module_path, RTLD_LAZY);
+ if (!dl) {
+ fprintf(stderr, "Failed to open module: %s\n", module_path);
+ return;
+ }
+
+ funcptr = dlsym(dl, module_function);
+ if (!funcptr) {
+ fprintf(stderr, "Failed to find symbol: %s\n", module_function);
+ return;
+ }
+ funcptr();
+
+ dlclose(dl);
+#endif
+}
+
+int main(int argc, char *argv[])
+{
+ fprintf(stdout, "Hello from program.c\n");
+ fflush(stdout);
+
+#if defined(PLATFORM_WIN)
+ if (!GetModuleFileName(NULL, bin_path, MAX_PATH)) {
+ fprintf(stderr, "Failed to determine executable path.\n");
+ return 1;
+ }
+#elif defined(PLATFORM_MAC) || defined(PLATFORM_LINUX)
+ // Using argv[0] should be OK here since we control how the tests run, and
+ // can avoid exec and such issues that make it unreliable.
+ if (!realpath(argv[0], bin_path)) {
+ fprintf(stderr, "Failed to determine executable path (%s).\n", argv[0]);
+ return 1;
+ }
+#endif
+
+ CallModule("lib1");
+ CallModule("lib2");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/msvs/buildevents/buildevents.gyp b/third_party/python/gyp/test/msvs/buildevents/buildevents.gyp
new file mode 100644
index 0000000000..e0304dd5c6
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/buildevents/buildevents.gyp
@@ -0,0 +1,14 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'main',
+ 'type': 'executable',
+ 'sources': [ 'main.cc', ],
+ 'msvs_prebuild': r'echo starting',
+ 'msvs_postbuild': r'echo finished',
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/msvs/buildevents/gyptest-msbuild-supports-prepostbuild.py b/third_party/python/gyp/test/msvs/buildevents/gyptest-msbuild-supports-prepostbuild.py
new file mode 100755
index 0000000000..208f434560
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/buildevents/gyptest-msbuild-supports-prepostbuild.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that msvs_prebuild and msvs_postbuild can be specified in both
+VS 2008 and 2010.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp(formats=['msvs'], workdir='workarea_all')
+
+test.run_gyp('buildevents.gyp', '-G', 'msvs_version=2008')
+test.must_contain('main.vcproj', 'Name="VCPreBuildEventTool"')
+test.must_contain('main.vcproj', 'Name="VCPostBuildEventTool"')
+
+test.run_gyp('buildevents.gyp', '-G', 'msvs_version=2010')
+test.must_contain('main.vcxproj', '<PreBuildEvent>')
+test.must_contain('main.vcxproj', '<PostBuildEvent>')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/msvs/buildevents/gyptest-ninja-warnings.py b/third_party/python/gyp/test/msvs/buildevents/gyptest-ninja-warnings.py
new file mode 100755
index 0000000000..be4ec9921a
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/buildevents/gyptest-ninja-warnings.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that ninja errors out when encountering msvs_prebuild/msvs_postbuild.
+"""
+
+import sys
+import TestCmd
+import TestGyp
+
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['ninja'])
+
+ test.run_gyp('buildevents.gyp',
+ status=1,
+ stderr=r'.*msvs_prebuild not supported \(target main\).*',
+ match=TestCmd.match_re_dotall)
+
+ test.run_gyp('buildevents.gyp',
+ status=1,
+ stderr=r'.*msvs_postbuild not supported \(target main\).*',
+ match=TestCmd.match_re_dotall)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/msvs/buildevents/main.cc b/third_party/python/gyp/test/msvs/buildevents/main.cc
new file mode 100644
index 0000000000..03c0285a6c
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/buildevents/main.cc
@@ -0,0 +1,5 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {}
diff --git a/third_party/python/gyp/test/msvs/config_attrs/gyptest-config_attrs.py b/third_party/python/gyp/test/msvs/config_attrs/gyptest-config_attrs.py
new file mode 100644
index 0000000000..29a8022bd4
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/config_attrs/gyptest-config_attrs.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that msvs_configuration_attributes and
+msbuild_configuration_attributes are applied by using
+them to set the OutputDirectory.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+import os
+
+import sys
+
+if sys.platform == 'win32':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+
+
+test = TestGyp.TestGyp(workdir='workarea_all',formats=['msvs'])
+
+vc_version = 'VC90'
+
+if os.getenv('GYP_MSVS_VERSION'):
+ vc_version = ['VC90','VC100'][int(os.getenv('GYP_MSVS_VERSION')) >= 2010]
+
+expected_exe_file = os.path.join(test.workdir, vc_version, 'hello.exe')
+
+test.run_gyp('hello.gyp')
+
+test.build('hello.gyp')
+
+test.must_exist(expected_exe_file)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/msvs/config_attrs/hello.c b/third_party/python/gyp/test/msvs/config_attrs/hello.c
new file mode 100644
index 0000000000..faadc75e2c
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/config_attrs/hello.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2012 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#include <stdio.h>
+
+int main(void)
+{
+ printf("Hello, world!\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/msvs/config_attrs/hello.gyp b/third_party/python/gyp/test/msvs/config_attrs/hello.gyp
new file mode 100644
index 0000000000..810a80edd8
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/config_attrs/hello.gyp
@@ -0,0 +1,21 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'hello',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.c',
+ ],
+ 'msvs_configuration_attributes': {
+ 'OutputDirectory':'$(SolutionDir)VC90/'
+ },
+ 'msbuild_configuration_attributes': {
+ 'OutputDirectory':'$(SolutionDir)VC100/',
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/msvs/express/base/base.gyp b/third_party/python/gyp/test/msvs/express/base/base.gyp
new file mode 100644
index 0000000000..b7c9fc6d81
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/express/base/base.gyp
@@ -0,0 +1,22 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'a',
+ 'type': 'static_library',
+ 'sources': [
+ 'a.c',
+ ],
+ },
+ {
+ 'target_name': 'b',
+ 'type': 'static_library',
+ 'sources': [
+ 'b.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/msvs/express/express.gyp b/third_party/python/gyp/test/msvs/express/express.gyp
new file mode 100644
index 0000000000..917abe2cc0
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/express/express.gyp
@@ -0,0 +1,19 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'express',
+ 'type': 'executable',
+ 'dependencies': [
+ 'base/base.gyp:a',
+ 'base/base.gyp:b',
+ ],
+ 'sources': [
+ 'main.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/msvs/express/gyptest-express.py b/third_party/python/gyp/test/msvs/express/gyptest-express.py
new file mode 100755
index 0000000000..54c06f664a
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/express/gyptest-express.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that flat solutions get generated for Express versions of
+Visual Studio.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp(formats=['msvs'])
+
+test.run_gyp('express.gyp', '-G', 'msvs_version=2005')
+test.must_contain('express.sln', '(base)')
+
+test.run_gyp('express.gyp', '-G', 'msvs_version=2008')
+test.must_contain('express.sln', '(base)')
+
+test.run_gyp('express.gyp', '-G', 'msvs_version=2005e')
+test.must_not_contain('express.sln', '(base)')
+
+test.run_gyp('express.gyp', '-G', 'msvs_version=2008e')
+test.must_not_contain('express.sln', '(base)')
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/msvs/external_builder/external.gyp b/third_party/python/gyp/test/msvs/external_builder/external.gyp
new file mode 100644
index 0000000000..abe5b5889c
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/external_builder/external.gyp
@@ -0,0 +1,68 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ # the test driver switches this flag when testing external builder
+ 'use_external_builder%': 0,
+ },
+ 'targets': [
+ {
+ 'target_name': 'external',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.cpp',
+ 'hello.z',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'test_rule',
+ 'extension': 'z',
+ 'outputs': [
+ 'msbuild_rule.out',
+ ],
+ 'action': [
+ 'python',
+ 'msbuild_rule.py',
+ '<(RULE_INPUT_PATH)',
+ 'a', 'b', 'c',
+ ],
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ 'actions': [
+ {
+ 'action_name': 'test action',
+ 'inputs': [
+ 'msbuild_action.py',
+ ],
+ 'outputs': [
+ 'msbuild_action.out',
+ ],
+ 'action': [
+ 'python',
+ '<@(_inputs)',
+ 'x', 'y', 'z',
+ ],
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ 'conditions': [
+ ['use_external_builder==1', {
+ 'msvs_external_builder': 'test',
+ 'msvs_external_builder_build_cmd': [
+ 'python',
+ 'external_builder.py',
+ 'build', '1', '2', '3',
+ ],
+ 'msvs_external_builder_clean_cmd': [
+ 'python',
+ 'external_builder.py',
+ 'clean', '4', '5',
+ ],
+ }],
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/msvs/external_builder/external_builder.py b/third_party/python/gyp/test/msvs/external_builder/external_builder.py
new file mode 100644
index 0000000000..ddfc1e5e33
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/external_builder/external_builder.py
@@ -0,0 +1,9 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+with open('external_builder.out', 'w') as f:
+ f.write(' '.join(sys.argv))
+
diff --git a/third_party/python/gyp/test/msvs/external_builder/gyptest-all.py b/third_party/python/gyp/test/msvs/external_builder/gyptest-all.py
new file mode 100644
index 0000000000..72faa7ab7f
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/external_builder/gyptest-all.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that msvs_external_builder being set will invoke the provided
+msvs_external_builder_build_cmd and msvs_external_builder_clean_cmd, and will
+not invoke MSBuild actions and rules.
+"""
+
+import os
+import sys
+import TestGyp
+
+if int(os.environ.get('GYP_MSVS_VERSION', 0)) < 2010:
+ sys.exit(0)
+
+test = TestGyp.TestGyp(formats=['msvs'], workdir='workarea_all')
+
+# without the flag set
+test.run_gyp('external.gyp')
+test.build('external.gyp', target='external')
+test.must_not_exist('external_builder.out')
+test.must_exist('msbuild_rule.out')
+test.must_exist('msbuild_action.out')
+test.must_match('msbuild_rule.out', 'msbuild_rule.py hello.z a b c')
+test.must_match('msbuild_action.out', 'msbuild_action.py x y z')
+os.remove('msbuild_rule.out')
+os.remove('msbuild_action.out')
+
+# with the flag set, using Build
+try:
+ os.environ['GYP_DEFINES'] = 'use_external_builder=1'
+ test.run_gyp('external.gyp')
+ test.build('external.gyp', target='external')
+finally:
+ del os.environ['GYP_DEFINES']
+test.must_not_exist('msbuild_rule.out')
+test.must_not_exist('msbuild_action.out')
+test.must_exist('external_builder.out')
+test.must_match('external_builder.out', 'external_builder.py build 1 2 3')
+os.remove('external_builder.out')
+
+# with the flag set, using Clean
+try:
+ os.environ['GYP_DEFINES'] = 'use_external_builder=1'
+ test.run_gyp('external.gyp')
+ test.build('external.gyp', target='external', clean=True)
+finally:
+ del os.environ['GYP_DEFINES']
+test.must_not_exist('msbuild_rule.out')
+test.must_not_exist('msbuild_action.out')
+test.must_exist('external_builder.out')
+test.must_match('external_builder.out', 'external_builder.py clean 4 5')
+os.remove('external_builder.out')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/msvs/external_builder/hello.cpp b/third_party/python/gyp/test/msvs/external_builder/hello.cpp
new file mode 100644
index 0000000000..bc0c0265b5
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/external_builder/hello.cpp
@@ -0,0 +1,10 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdio.h>
+
+int main(void) {
+ printf("Hello, world!\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/msvs/external_builder/hello.z b/third_party/python/gyp/test/msvs/external_builder/hello.z
new file mode 100644
index 0000000000..aa478827b5
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/external_builder/hello.z
@@ -0,0 +1,6 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+This file will be passed to the test rule.
+
diff --git a/third_party/python/gyp/test/msvs/external_builder/msbuild_action.py b/third_party/python/gyp/test/msvs/external_builder/msbuild_action.py
new file mode 100644
index 0000000000..632d786922
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/external_builder/msbuild_action.py
@@ -0,0 +1,9 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+with open('msbuild_action.out', 'w') as f:
+ f.write(' '.join(sys.argv))
+
diff --git a/third_party/python/gyp/test/msvs/external_builder/msbuild_rule.py b/third_party/python/gyp/test/msvs/external_builder/msbuild_rule.py
new file mode 100644
index 0000000000..0d6e315775
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/external_builder/msbuild_rule.py
@@ -0,0 +1,11 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys, os.path
+
+sys.argv[1] = os.path.basename(sys.argv[1])
+
+with open('msbuild_rule.out', 'w') as f:
+ f.write(' '.join(sys.argv))
+
diff --git a/third_party/python/gyp/test/msvs/filters/filters.gyp b/third_party/python/gyp/test/msvs/filters/filters.gyp
new file mode 100644
index 0000000000..a4106dc8eb
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/filters/filters.gyp
@@ -0,0 +1,47 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'no_source_files',
+ 'type': 'none',
+ 'sources': [ ],
+ },
+ {
+ 'target_name': 'one_source_file',
+ 'type': 'executable',
+ 'sources': [
+ '../folder/a.c',
+ ],
+ },
+ {
+ 'target_name': 'two_source_files',
+ 'type': 'executable',
+ 'sources': [
+ '../folder/a.c',
+ '../folder/b.c',
+ ],
+ },
+ {
+ 'target_name': 'three_files_in_two_folders',
+ 'type': 'executable',
+ 'sources': [
+ '../folder1/a.c',
+ '../folder1/b.c',
+ '../folder2/c.c',
+ ],
+ },
+ {
+ 'target_name': 'nested_folders',
+ 'type': 'executable',
+ 'sources': [
+ '../folder1/nested/a.c',
+ '../folder2/d.c',
+ '../folder1/nested/b.c',
+ '../folder1/other/c.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/msvs/filters/gyptest-filters-2008.py b/third_party/python/gyp/test/msvs/filters/gyptest-filters-2008.py
new file mode 100644
index 0000000000..41ca085823
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/filters/gyptest-filters-2008.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that extra filters are pruned correctly for Visual Studio 2008.
+"""
+
+import re
+import TestGyp
+
+
+def strip_ws(str):
+ return re.sub('^ +', '', str, flags=re.M).replace('\n', '')
+
+
+test = TestGyp.TestGyp(formats=['msvs'])
+
+test.run_gyp('filters.gyp', '-G', 'standalone', '-G', 'msvs_version=2008')
+
+test.must_contain('no_source_files.vcproj', '<Files/>')
+
+test.must_contain('one_source_file.vcproj', strip_ws('''\
+<Files>
+ <File RelativePath="..\\folder\\a.c"/>
+</Files>
+'''))
+
+test.must_contain('two_source_files.vcproj', strip_ws('''\
+<Files>
+ <File RelativePath="..\\folder\\a.c"/>
+ <File RelativePath="..\\folder\\b.c"/>
+</Files>
+'''))
+
+test.must_contain('three_files_in_two_folders.vcproj', strip_ws('''\
+<Files>
+ <Filter Name="folder1">
+ <File RelativePath="..\\folder1\\a.c"/>
+ <File RelativePath="..\\folder1\\b.c"/>
+ </Filter>
+ <Filter Name="folder2">
+ <File RelativePath="..\\folder2\\c.c"/>
+ </Filter>
+</Files>
+'''))
+
+test.must_contain('nested_folders.vcproj', strip_ws('''\
+<Files>
+ <Filter Name="folder1">
+ <Filter Name="nested">
+ <File RelativePath="..\\folder1\\nested\\a.c"/>
+ <File RelativePath="..\\folder1\\nested\\b.c"/>
+ </Filter>
+ <Filter Name="other">
+ <File RelativePath="..\\folder1\\other\\c.c"/>
+ </Filter>
+ </Filter>
+ <Filter Name="folder2">
+ <File RelativePath="..\\folder2\\d.c"/>
+ </Filter>
+</Files>
+'''))
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/msvs/filters/gyptest-filters-2010.py b/third_party/python/gyp/test/msvs/filters/gyptest-filters-2010.py
new file mode 100644
index 0000000000..d8131d5d7b
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/filters/gyptest-filters-2010.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that extra filters are pruned correctly for Visual Studio 2010
+and later.
+"""
+
+import TestGyp
+
+
+test = TestGyp.TestGyp(formats=['msvs'])
+
+test.run_gyp('filters.gyp', '-G', 'standalone', '-G', 'msvs_version=2010')
+
+test.must_not_exist('no_source_files.vcxproj.filters')
+
+test.must_not_exist('one_source_file.vcxproj.filters')
+
+test.must_not_exist('two_source_files.vcxproj.filters')
+
+test.must_contain('three_files_in_two_folders.vcxproj.filters', '''\
+ <ItemGroup>
+ <ClCompile Include="..\\folder1\\a.c">
+ <Filter>folder1</Filter>
+ </ClCompile>
+ <ClCompile Include="..\\folder1\\b.c">
+ <Filter>folder1</Filter>
+ </ClCompile>
+ <ClCompile Include="..\\folder2\\c.c">
+ <Filter>folder2</Filter>
+ </ClCompile>
+ </ItemGroup>
+'''.replace('\n', '\r\n'))
+
+test.must_contain('nested_folders.vcxproj.filters', '''\
+ <ItemGroup>
+ <ClCompile Include="..\\folder1\\nested\\a.c">
+ <Filter>folder1\\nested</Filter>
+ </ClCompile>
+ <ClCompile Include="..\\folder2\\d.c">
+ <Filter>folder2</Filter>
+ </ClCompile>
+ <ClCompile Include="..\\folder1\\nested\\b.c">
+ <Filter>folder1\\nested</Filter>
+ </ClCompile>
+ <ClCompile Include="..\\folder1\\other\\c.c">
+ <Filter>folder1\\other</Filter>
+ </ClCompile>
+ </ItemGroup>
+'''.replace('\n', '\r\n'))
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/msvs/list_excluded/gyptest-all.py b/third_party/python/gyp/test/msvs/list_excluded/gyptest-all.py
new file mode 100644
index 0000000000..5a370f6b47
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/list_excluded/gyptest-all.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that msvs_list_excluded_files=0 doesn't list files that would
+normally be in _excluded_files, and that if that flag is not set, then they
+are still listed.
+"""
+
+import os
+import TestGyp
+
+test = TestGyp.TestGyp(formats=['msvs'], workdir='workarea_all')
+
+
+# with the flag set to 0
+try:
+ os.environ['GYP_GENERATOR_FLAGS'] = 'msvs_list_excluded_files=0'
+ test.run_gyp('hello_exclude.gyp')
+finally:
+ del os.environ['GYP_GENERATOR_FLAGS']
+if test.uses_msbuild:
+ test.must_not_contain('hello.vcxproj', 'hello_mac')
+else:
+ test.must_not_contain('hello.vcproj', 'hello_mac')
+
+
+# with the flag not set
+test.run_gyp('hello_exclude.gyp')
+if test.uses_msbuild:
+ test.must_contain('hello.vcxproj', 'hello_mac')
+else:
+ test.must_contain('hello.vcproj', 'hello_mac')
+
+
+# with the flag explicitly set to 1
+try:
+ os.environ['GYP_GENERATOR_FLAGS'] = 'msvs_list_excluded_files=1'
+ test.run_gyp('hello_exclude.gyp')
+finally:
+ del os.environ['GYP_GENERATOR_FLAGS']
+if test.uses_msbuild:
+ test.must_contain('hello.vcxproj', 'hello_mac')
+else:
+ test.must_contain('hello.vcproj', 'hello_mac')
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/msvs/list_excluded/hello.cpp b/third_party/python/gyp/test/msvs/list_excluded/hello.cpp
new file mode 100644
index 0000000000..bc0c0265b5
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/list_excluded/hello.cpp
@@ -0,0 +1,10 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdio.h>
+
+int main(void) {
+ printf("Hello, world!\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/msvs/list_excluded/hello_exclude.gyp b/third_party/python/gyp/test/msvs/list_excluded/hello_exclude.gyp
new file mode 100644
index 0000000000..aa160f2367
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/list_excluded/hello_exclude.gyp
@@ -0,0 +1,19 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'hello',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.cpp',
+ 'hello_mac.cpp',
+ ],
+ 'conditions': [
+ ['OS!="mac"', {'sources!': ['hello_mac.cpp']}],
+ ]
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/msvs/list_excluded/hello_mac.cpp b/third_party/python/gyp/test/msvs/list_excluded/hello_mac.cpp
new file mode 100644
index 0000000000..b9f6242c4b
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/list_excluded/hello_mac.cpp
@@ -0,0 +1,10 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdio.h>
+
+int hello2() {
+ printf("Hello, two!\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/msvs/missing_sources/gyptest-missing.py b/third_party/python/gyp/test/msvs/missing_sources/gyptest-missing.py
new file mode 100644
index 0000000000..62a99ef0f1
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/missing_sources/gyptest-missing.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that missing 'sources' files are treated as fatal errors when the
+the generator flag 'msvs_error_on_missing_sources' is set.
+"""
+
+import TestGyp
+import os
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'], workdir='workarea_all')
+
+ # With the flag not set
+ test.run_gyp('hello_missing.gyp')
+
+ # With the flag explicitly set to 0
+ try:
+ os.environ['GYP_GENERATOR_FLAGS'] = 'msvs_error_on_missing_sources=0'
+ test.run_gyp('hello_missing.gyp')
+ finally:
+ del os.environ['GYP_GENERATOR_FLAGS']
+
+ # With the flag explicitly set to 1
+ try:
+ os.environ['GYP_GENERATOR_FLAGS'] = 'msvs_error_on_missing_sources=1'
+ # Test to make sure GYP raises an exception (exit status 1). Since this will
+ # also print a backtrace, ensure that TestGyp is not checking that stderr is
+ # empty by specifying None, which means do not perform any checking.
+ # Instead, stderr is checked below to ensure it contains the expected
+ # output.
+ test.run_gyp('hello_missing.gyp', status=1, stderr=None)
+ finally:
+ del os.environ['GYP_GENERATOR_FLAGS']
+ test.must_contain_any_line(test.stderr(),
+ ["Missing input files:"])
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/msvs/missing_sources/hello_missing.gyp b/third_party/python/gyp/test/msvs/missing_sources/hello_missing.gyp
new file mode 100644
index 0000000000..c08926bbff
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/missing_sources/hello_missing.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'hello',
+ 'type': 'executable',
+ 'sources': [
+ 'hello_missing.cpp',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/msvs/multiple_actions_error_handling/action_fail.py b/third_party/python/gyp/test/msvs/multiple_actions_error_handling/action_fail.py
new file mode 100644
index 0000000000..286fc4e132
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/multiple_actions_error_handling/action_fail.py
@@ -0,0 +1,7 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+sys.exit(1)
diff --git a/third_party/python/gyp/test/msvs/multiple_actions_error_handling/action_succeed.py b/third_party/python/gyp/test/msvs/multiple_actions_error_handling/action_succeed.py
new file mode 100644
index 0000000000..3554373197
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/multiple_actions_error_handling/action_succeed.py
@@ -0,0 +1,7 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+sys.exit(0)
diff --git a/third_party/python/gyp/test/msvs/multiple_actions_error_handling/actions.gyp b/third_party/python/gyp/test/msvs/multiple_actions_error_handling/actions.gyp
new file mode 100644
index 0000000000..ab99e929e2
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/multiple_actions_error_handling/actions.gyp
@@ -0,0 +1,40 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'actions-test',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'first action (fails)',
+ 'inputs': [
+ 'action_fail.py',
+ ],
+ 'outputs': [
+ 'ALWAYS_OUT_OF_DATE',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)'
+ ],
+ 'msvs_cygwin_shell': 0,
+ },
+ {
+ 'action_name': 'second action (succeeds)',
+ 'inputs': [
+ 'action_succeed.py',
+ ],
+ 'outputs': [
+ 'ALWAYS_OUT_OF_DATE',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)'
+ ],
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/msvs/multiple_actions_error_handling/gyptest.py b/third_party/python/gyp/test/msvs/multiple_actions_error_handling/gyptest.py
new file mode 100644
index 0000000000..3aa6b8fdb2
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/multiple_actions_error_handling/gyptest.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that failing actions make the build fail reliably, even when there
+are multiple actions in one project.
+"""
+
+import os
+import sys
+import TestGyp
+import TestCmd
+
+test = TestGyp.TestGyp(formats=['msvs'], workdir='workarea_all')
+
+test.run_gyp('actions.gyp')
+test.build('actions.gyp',
+ target='actions-test',
+ status=1,
+ stdout=r'.*"cmd\.exe" exited with code 1\..*',
+ match=TestCmd.match_re_dotall)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/msvs/props/AppName.props b/third_party/python/gyp/test/msvs/props/AppName.props
new file mode 100644
index 0000000000..b688f663d5
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/props/AppName.props
@@ -0,0 +1,14 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <PropertyGroup Label="UserMacros">
+ <AppName>Greet</AppName>
+ </PropertyGroup>
+ <PropertyGroup>
+ <_ProjectFileVersion>10.0.40219.1</_ProjectFileVersion>
+ </PropertyGroup>
+ <ItemGroup>
+ <BuildMacro Include="AppName">
+ <Value>$(AppName)</Value>
+ </BuildMacro>
+ </ItemGroup>
+</Project>
diff --git a/third_party/python/gyp/test/msvs/props/AppName.vsprops b/third_party/python/gyp/test/msvs/props/AppName.vsprops
new file mode 100644
index 0000000000..84b9af3800
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/props/AppName.vsprops
@@ -0,0 +1,11 @@
+<?xml version="1.0" encoding="Windows-1252"?>
+<VisualStudioPropertySheet
+ ProjectType="Visual C++"
+ Version="8.00"
+ Name="Common"
+ >
+ <UserMacro
+ Name="AppName"
+ Value="Greet"
+ />
+</VisualStudioPropertySheet>
diff --git a/third_party/python/gyp/test/msvs/props/gyptest-props.py b/third_party/python/gyp/test/msvs/props/gyptest-props.py
new file mode 100644
index 0000000000..abd4df2241
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/props/gyptest-props.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies props files are added by using a
+props file to set the name of the built executable.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp(workdir='workarea_all', formats=['msvs'])
+
+test.run_gyp('hello.gyp')
+
+test.build('hello.gyp')
+
+test.built_file_must_exist('Greet.exe')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/msvs/props/hello.c b/third_party/python/gyp/test/msvs/props/hello.c
new file mode 100644
index 0000000000..faadc75e2c
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/props/hello.c
@@ -0,0 +1,11 @@
+/* Copyright (c) 2012 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#include <stdio.h>
+
+int main(void)
+{
+ printf("Hello, world!\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/msvs/props/hello.gyp b/third_party/python/gyp/test/msvs/props/hello.gyp
new file mode 100644
index 0000000000..5a58317fa7
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/props/hello.gyp
@@ -0,0 +1,22 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'hello',
+ 'product_name': '$(AppName)',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.c',
+ ],
+ 'msvs_props': [
+ '$(SolutionDir)AppName.vsprops'
+ ],
+ 'msbuild_props': [
+ '$(SolutionDir)AppName.props'
+ ],
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/msvs/rules_stdout_stderr/dummy.bar b/third_party/python/gyp/test/msvs/rules_stdout_stderr/dummy.bar
new file mode 100644
index 0000000000..25178696d2
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/rules_stdout_stderr/dummy.bar
@@ -0,0 +1,5 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+A dummy file with the .bar extension (used for stderr rule).
diff --git a/third_party/python/gyp/test/msvs/rules_stdout_stderr/dummy.foo b/third_party/python/gyp/test/msvs/rules_stdout_stderr/dummy.foo
new file mode 100644
index 0000000000..6a7990bb98
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/rules_stdout_stderr/dummy.foo
@@ -0,0 +1,5 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+A dummy file with the .foo extension (used for stdout rule).
diff --git a/third_party/python/gyp/test/msvs/rules_stdout_stderr/gyptest-rules-stdout-stderr.py b/third_party/python/gyp/test/msvs/rules_stdout_stderr/gyptest-rules-stdout-stderr.py
new file mode 100644
index 0000000000..804505a23d
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/rules_stdout_stderr/gyptest-rules-stdout-stderr.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+""" Verifies that stdout and stderr from rules get logged in the build's
+stdout."""
+
+import sys
+import TestGyp
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs'])
+
+ test.run_gyp('rules-stdout-stderr.gyp')
+ test.build('rules-stdout-stderr.gyp', test.ALL)
+
+ expected_stdout_lines = [
+ 'testing stdout',
+ 'This will go to stdout',
+
+ # Note: stderr output from rules will go to the build's stdout.
+ 'testing stderr',
+ 'This will go to stderr',
+ ]
+ test.must_contain_all_lines(test.stdout(), expected_stdout_lines)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/msvs/rules_stdout_stderr/rule_stderr.py b/third_party/python/gyp/test/msvs/rules_stdout_stderr/rule_stderr.py
new file mode 100644
index 0000000000..2081513ec8
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/rules_stdout_stderr/rule_stderr.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+import sys
+print("This will go to stderr", file=sys.stderr)
diff --git a/third_party/python/gyp/test/msvs/rules_stdout_stderr/rule_stdout.py b/third_party/python/gyp/test/msvs/rules_stdout_stderr/rule_stdout.py
new file mode 100644
index 0000000000..4c073ebc45
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/rules_stdout_stderr/rule_stdout.py
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+print("This will go to stdout")
diff --git a/third_party/python/gyp/test/msvs/rules_stdout_stderr/rules-stdout-stderr.gyp b/third_party/python/gyp/test/msvs/rules_stdout_stderr/rules-stdout-stderr.gyp
new file mode 100644
index 0000000000..ce93643f8e
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/rules_stdout_stderr/rules-stdout-stderr.gyp
@@ -0,0 +1,52 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test',
+ 'type': 'none',
+ 'sources': [
+ 'dummy.foo',
+ 'dummy.bar',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'test_stdout',
+ 'extension': 'foo',
+ 'message': 'testing stdout',
+ 'msvs_cygwin_shell': 0,
+ 'inputs': [
+ 'rule_stdout.py',
+ ],
+ 'outputs': [
+ 'dummy.foo_output',
+ ],
+ 'action': [
+ 'python',
+ 'rule_stdout.py',
+ '<(RULE_INPUT_PATH)',
+ ],
+ },
+ {
+ 'rule_name': 'test_stderr',
+ 'extension': 'bar',
+ 'message': 'testing stderr',
+ 'msvs_cygwin_shell': 0,
+ 'inputs': [
+ 'rule_stderr.py',
+ ],
+ 'outputs': [
+ 'dummy.bar_output',
+ ],
+ 'action': [
+ 'python',
+ 'rule_stderr.py',
+ '<(RULE_INPUT_PATH)',
+ ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/msvs/shared_output/common.gypi b/third_party/python/gyp/test/msvs/shared_output/common.gypi
new file mode 100644
index 0000000000..c6fa341d68
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/shared_output/common.gypi
@@ -0,0 +1,17 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ 'default_configuration': 'Baz',
+ 'configurations': {
+ 'Baz': {
+ 'msvs_configuration_attributes': {
+ 'OutputDirectory': '<(DEPTH)/foo',
+ 'IntermediateDirectory': '$(OutDir)/bar',
+ },
+ },
+ },
+ },
+}
diff --git a/third_party/python/gyp/test/msvs/shared_output/gyptest-shared_output.py b/third_party/python/gyp/test/msvs/shared_output/gyptest-shared_output.py
new file mode 100644
index 0000000000..270b280e6b
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/shared_output/gyptest-shared_output.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Test checking that IntermediateDirectory can be defined in terms of
+OutputDirectory. We previously had emitted the definition of
+IntermediateDirectory before the definition of OutputDirectory.
+This is required so that $(IntDir) can be based on $(OutDir).
+"""
+
+import TestGyp
+import os
+
+# NOTE: This test really is vcbuild/msbuild specific (not applicable to windows
+# ninja), as it is testing the msvs output location when opening an .sln
+# other than all.sln.
+test = TestGyp.TestGyp(workdir='workarea_shared_output', formats=['msvs'])
+
+test.run_gyp('hello.gyp')
+test.set_configuration('Baz')
+
+test.build('there/there.gyp', test.ALL)
+test.must_exist(os.path.join(test.workdir, 'foo', 'there.exe'))
+test.must_exist(os.path.join(test.workdir, 'foo', 'bar', 'there.obj'))
+
+test.build('hello.gyp', test.ALL)
+test.must_exist(os.path.join(test.workdir, 'foo', 'hello.exe'))
+test.must_exist(os.path.join(test.workdir, 'foo', 'bar', 'hello.obj'))
+
+if test.format == 'msvs':
+ if test.uses_msbuild:
+ test.must_contain('pull_in_there.vcxproj',
+ '<IntDir>$(OutDir)bar\\</IntDir>')
+ else:
+ test.must_contain('pull_in_there.vcproj',
+ 'IntermediateDirectory="$(OutDir)bar\\"')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/msvs/shared_output/hello.c b/third_party/python/gyp/test/msvs/shared_output/hello.c
new file mode 100644
index 0000000000..698e4fd36c
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/shared_output/hello.c
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2012 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <stdio.h>
+
+int main(void) {
+ printf("Hello, world!\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/msvs/shared_output/hello.gyp b/third_party/python/gyp/test/msvs/shared_output/hello.gyp
new file mode 100644
index 0000000000..f80e5cfca1
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/shared_output/hello.gyp
@@ -0,0 +1,21 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': ['common.gypi'],
+ 'targets': [
+ {
+ 'target_name': 'pull_in_there',
+ 'type': 'none',
+ 'dependencies': ['there/there.gyp:*'],
+ },
+ {
+ 'target_name': 'hello',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/msvs/shared_output/there/there.c b/third_party/python/gyp/test/msvs/shared_output/there/there.c
new file mode 100644
index 0000000000..698e4fd36c
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/shared_output/there/there.c
@@ -0,0 +1,12 @@
+/*
+ * Copyright (c) 2012 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <stdio.h>
+
+int main(void) {
+ printf("Hello, world!\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/msvs/shared_output/there/there.gyp b/third_party/python/gyp/test/msvs/shared_output/there/there.gyp
new file mode 100644
index 0000000000..56feff326c
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/shared_output/there/there.gyp
@@ -0,0 +1,16 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': ['../common.gypi'],
+ 'targets': [
+ {
+ 'target_name': 'there',
+ 'type': 'executable',
+ 'sources': [
+ 'there.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/msvs/uldi2010/gyptest-all.py b/third_party/python/gyp/test/msvs/uldi2010/gyptest-all.py
new file mode 100644
index 0000000000..cc248fbd63
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/uldi2010/gyptest-all.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that uldi can be disabled on a per-project-reference basis in vs2010.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp(formats=['msvs'], workdir='workarea_all')
+
+test.run_gyp('hello.gyp')
+
+if test.uses_msbuild:
+ test.must_contain('hello.vcxproj', '<UseLibraryDependencyInputs>false')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/msvs/uldi2010/hello.c b/third_party/python/gyp/test/msvs/uldi2010/hello.c
new file mode 100644
index 0000000000..06e6a02905
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/uldi2010/hello.c
@@ -0,0 +1,13 @@
+/* Copyright (c) 2012 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#include <stdio.h>
+
+extern int hello2();
+
+int main(void) {
+ printf("Hello, world!\n");
+ hello2();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/msvs/uldi2010/hello.gyp b/third_party/python/gyp/test/msvs/uldi2010/hello.gyp
new file mode 100644
index 0000000000..a2bf2badb1
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/uldi2010/hello.gyp
@@ -0,0 +1,26 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'hello',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.c',
+ ],
+ 'dependencies': [
+ 'hellolib',
+ ]
+ },
+ {
+ 'target_name': 'hellolib',
+ 'type': 'static_library',
+ 'sources': [
+ 'hello2.c',
+ ],
+ 'msvs_2010_disable_uldi_when_referenced': 1,
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/msvs/uldi2010/hello2.c b/third_party/python/gyp/test/msvs/uldi2010/hello2.c
new file mode 100644
index 0000000000..e2f23238d1
--- /dev/null
+++ b/third_party/python/gyp/test/msvs/uldi2010/hello2.c
@@ -0,0 +1,10 @@
+/* Copyright (c) 2012 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#include <stdio.h>
+
+int hello2() {
+ printf("Hello, two!\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/multiple-targets/gyptest-all.py b/third_party/python/gyp/test/multiple-targets/gyptest-all.py
new file mode 100755
index 0000000000..3ef50090d1
--- /dev/null
+++ b/third_party/python/gyp/test/multiple-targets/gyptest-all.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('multiple.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('multiple.gyp', test.ALL, chdir='relocate/src', stderr=None)
+
+expect1 = """\
+hello from prog1.c
+hello from common.c
+"""
+
+expect2 = """\
+hello from prog2.c
+hello from common.c
+"""
+
+test.run_built_executable('prog1', stdout=expect1, chdir='relocate/src')
+test.run_built_executable('prog2', stdout=expect2, chdir='relocate/src')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/multiple-targets/gyptest-default.py b/third_party/python/gyp/test/multiple-targets/gyptest-default.py
new file mode 100755
index 0000000000..db15d794c3
--- /dev/null
+++ b/third_party/python/gyp/test/multiple-targets/gyptest-default.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('multiple.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('multiple.gyp', chdir='relocate/src')
+
+expect1 = """\
+hello from prog1.c
+hello from common.c
+"""
+
+expect2 = """\
+hello from prog2.c
+hello from common.c
+"""
+
+test.run_built_executable('prog1', stdout=expect1, chdir='relocate/src')
+test.run_built_executable('prog2', stdout=expect2, chdir='relocate/src')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/multiple-targets/src/common.c b/third_party/python/gyp/test/multiple-targets/src/common.c
new file mode 100644
index 0000000000..f1df7c1431
--- /dev/null
+++ b/third_party/python/gyp/test/multiple-targets/src/common.c
@@ -0,0 +1,7 @@
+#include <stdio.h>
+
+void common(void)
+{
+ printf("hello from common.c\n");
+ return;
+}
diff --git a/third_party/python/gyp/test/multiple-targets/src/multiple.gyp b/third_party/python/gyp/test/multiple-targets/src/multiple.gyp
new file mode 100644
index 0000000000..3db4ea30cd
--- /dev/null
+++ b/third_party/python/gyp/test/multiple-targets/src/multiple.gyp
@@ -0,0 +1,24 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'prog1',
+ 'type': 'executable',
+ 'sources': [
+ 'prog1.c',
+ 'common.c',
+ ],
+ },
+ {
+ 'target_name': 'prog2',
+ 'type': 'executable',
+ 'sources': [
+ 'prog2.c',
+ 'common.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/multiple-targets/src/prog1.c b/third_party/python/gyp/test/multiple-targets/src/prog1.c
new file mode 100644
index 0000000000..fbf8d4cd7c
--- /dev/null
+++ b/third_party/python/gyp/test/multiple-targets/src/prog1.c
@@ -0,0 +1,10 @@
+#include <stdio.h>
+
+extern void common(void);
+
+int main(void)
+{
+ printf("hello from prog1.c\n");
+ common();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/multiple-targets/src/prog2.c b/third_party/python/gyp/test/multiple-targets/src/prog2.c
new file mode 100644
index 0000000000..a94b5c155e
--- /dev/null
+++ b/third_party/python/gyp/test/multiple-targets/src/prog2.c
@@ -0,0 +1,10 @@
+#include <stdio.h>
+
+extern void common(void);
+
+int main(void)
+{
+ printf("hello from prog2.c\n");
+ common();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/ninja/action-rule-hash/gyptest-action-rule-hash.py b/third_party/python/gyp/test/ninja/action-rule-hash/gyptest-action-rule-hash.py
new file mode 100644
index 0000000000..7147fd2fc3
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/action-rule-hash/gyptest-action-rule-hash.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verify that running gyp in a different directory does not cause actions and
+rules to rerun.
+"""
+
+import os
+import sys
+import TestGyp
+
+test = TestGyp.TestGyp(formats=['ninja'])
+# The xcode-ninja generator handles gypfiles which are not at the
+# project root incorrectly.
+# cf. https://code.google.com/p/gyp/issues/detail?id=460
+if test.format == 'xcode-ninja':
+ test.skip_test()
+
+test.run_gyp('subdir/action-rule-hash.gyp')
+test.build('subdir/action-rule-hash.gyp', test.ALL)
+test.up_to_date('subdir/action-rule-hash.gyp')
+
+# Verify that everything is still up-to-date when we re-invoke gyp from a
+# different directory.
+test.run_gyp('action-rule-hash.gyp', '--depth=../', chdir='subdir')
+test.up_to_date('subdir/action-rule-hash.gyp')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/ninja/action-rule-hash/subdir/action-rule-hash.gyp b/third_party/python/gyp/test/ninja/action-rule-hash/subdir/action-rule-hash.gyp
new file mode 100644
index 0000000000..0e88a3019f
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/action-rule-hash/subdir/action-rule-hash.gyp
@@ -0,0 +1,29 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'sources': [
+ '<(INTERMEDIATE_DIR)/main.cc',
+ ],
+ 'actions': [
+ {
+ 'action_name': 'emit_main_cc',
+ 'inputs': ['emit.py'],
+ 'outputs': ['<(INTERMEDIATE_DIR)/main.cc'],
+ 'action': [
+ 'python',
+ 'emit.py',
+ '<(INTERMEDIATE_DIR)/main.cc',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/ninja/action-rule-hash/subdir/emit.py b/third_party/python/gyp/test/ninja/action-rule-hash/subdir/emit.py
new file mode 100644
index 0000000000..6b17125574
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/action-rule-hash/subdir/emit.py
@@ -0,0 +1,13 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+f = open(sys.argv[1], 'w')
+f.write('int main() {\n')
+f.write(' return 0;\n')
+f.write('}\n')
+f.close()
diff --git a/third_party/python/gyp/test/ninja/action_dependencies/gyptest-action-dependencies.py b/third_party/python/gyp/test/ninja/action_dependencies/gyptest-action-dependencies.py
new file mode 100755
index 0000000000..89813bab17
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/action_dependencies/gyptest-action-dependencies.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verify that building an object file correctly depends on running actions in
+dependent targets, but not the targets themselves.
+"""
+
+from __future__ import print_function
+
+import os
+import sys
+
+if sys.platform == 'win32':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+
+import TestGyp
+
+# NOTE(piman): This test will not work with other generators because:
+# - it explicitly tests the optimization, which is not implemented (yet?) on
+# other generators
+# - it relies on the exact path to output object files, which is generator
+# dependent, and actually, relies on the ability to build only that object file,
+# which I don't think is available on all generators.
+# TODO(piman): Extend to other generators when possible.
+test = TestGyp.TestGyp(formats=['ninja'])
+# xcode-ninja doesn't support building single object files by design.
+if test.format == 'xcode-ninja':
+ test.skip_test()
+
+test.run_gyp('action_dependencies.gyp', chdir='src')
+
+chdir = 'relocate/src'
+test.relocate('src', chdir)
+
+objext = '.obj' if sys.platform == 'win32' else '.o'
+
+test.build('action_dependencies.gyp',
+ os.path.join('obj', 'b.b' + objext),
+ chdir=chdir)
+
+# The 'a' actions should be run (letting b.c compile), but the a static library
+# should not be built.
+test.built_file_must_not_exist('a', type=test.STATIC_LIB, chdir=chdir)
+test.built_file_must_not_exist('b', type=test.STATIC_LIB, chdir=chdir)
+test.built_file_must_exist(os.path.join('obj', 'b.b' + objext), chdir=chdir)
+
+test.build('action_dependencies.gyp',
+ os.path.join('obj', 'c.c' + objext),
+ chdir=chdir)
+
+# 'a' and 'b' should be built, so that the 'c' action succeeds, letting c.c
+# compile
+test.built_file_must_exist('a', type=test.STATIC_LIB, chdir=chdir)
+test.built_file_must_exist('b', type=test.EXECUTABLE, chdir=chdir)
+test.built_file_must_exist(os.path.join('obj', 'c.c' + objext), chdir=chdir)
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/ninja/action_dependencies/src/a.c b/third_party/python/gyp/test/ninja/action_dependencies/src/a.c
new file mode 100644
index 0000000000..4d7af9b26c
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/action_dependencies/src/a.c
@@ -0,0 +1,10 @@
+/* Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "a.h"
+
+int funcA() {
+ return 42;
+}
diff --git a/third_party/python/gyp/test/ninja/action_dependencies/src/a.h b/third_party/python/gyp/test/ninja/action_dependencies/src/a.h
new file mode 100644
index 0000000000..335db56739
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/action_dependencies/src/a.h
@@ -0,0 +1,13 @@
+/* Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef A_H_
+#define A_H_
+
+#include "a/generated.h"
+
+int funcA();
+
+#endif // A_H_
diff --git a/third_party/python/gyp/test/ninja/action_dependencies/src/action_dependencies.gyp b/third_party/python/gyp/test/ninja/action_dependencies/src/action_dependencies.gyp
new file mode 100644
index 0000000000..5baa7a7d47
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/action_dependencies/src/action_dependencies.gyp
@@ -0,0 +1,88 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'a',
+ 'type': 'static_library',
+ 'sources': [
+ 'a.c',
+ 'a.h',
+ ],
+ 'actions': [
+ {
+ 'action_name': 'generate_headers',
+ 'inputs': [
+ 'emit.py'
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/a/generated.h'
+ ],
+ 'action': [
+ 'python',
+ 'emit.py',
+ '<(SHARED_INTERMEDIATE_DIR)/a/generated.h',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ 'include_dirs': [
+ '<(SHARED_INTERMEDIATE_DIR)',
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '<(SHARED_INTERMEDIATE_DIR)',
+ ],
+ },
+ },
+ {
+ 'target_name': 'b',
+ 'type': 'executable',
+ 'sources': [
+ 'b.c',
+ 'b.h',
+ ],
+ 'dependencies': [
+ 'a',
+ ],
+ },
+ {
+ 'target_name': 'c',
+ 'type': 'static_library',
+ 'sources': [
+ 'c.c',
+ 'c.h',
+ ],
+ 'dependencies': [
+ 'b',
+ ],
+ 'actions': [
+ {
+ 'action_name': 'generate_headers',
+ 'inputs': [
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/c/generated.h'
+ ],
+ 'action': [
+ '<(PRODUCT_DIR)/b',
+ '<(SHARED_INTERMEDIATE_DIR)/c/generated.h',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ 'include_dirs': [
+ '<(SHARED_INTERMEDIATE_DIR)',
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '<(SHARED_INTERMEDIATE_DIR)',
+ ],
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/ninja/action_dependencies/src/b.c b/third_party/python/gyp/test/ninja/action_dependencies/src/b.c
new file mode 100644
index 0000000000..824464695a
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/action_dependencies/src/b.c
@@ -0,0 +1,18 @@
+/* Copyright (c) 2012 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <stdio.h>
+
+#include "b.h"
+
+int main(int argc, char** argv) {
+ FILE* f;
+ if (argc < 2)
+ return 1;
+ f = fopen(argv[1], "wt");
+ fprintf(f, "#define VALUE %d\n", funcA());
+ fclose(f);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/ninja/action_dependencies/src/b.h b/third_party/python/gyp/test/ninja/action_dependencies/src/b.h
new file mode 100644
index 0000000000..91362cd899
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/action_dependencies/src/b.h
@@ -0,0 +1,13 @@
+/* Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef B_H_
+#define B_H_
+
+#include "a.h"
+
+int funcB();
+
+#endif // B_H_
diff --git a/third_party/python/gyp/test/ninja/action_dependencies/src/c.c b/third_party/python/gyp/test/ninja/action_dependencies/src/c.c
new file mode 100644
index 0000000000..b412087ec8
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/action_dependencies/src/c.c
@@ -0,0 +1,10 @@
+/* Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include "c.h"
+
+int funcC() {
+ return VALUE;
+}
diff --git a/third_party/python/gyp/test/ninja/action_dependencies/src/c.h b/third_party/python/gyp/test/ninja/action_dependencies/src/c.h
new file mode 100644
index 0000000000..c81a45bbe7
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/action_dependencies/src/c.h
@@ -0,0 +1,13 @@
+/* Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#ifndef C_H_
+#define C_H_
+
+#include "c/generated.h"
+
+int funcC();
+
+#endif // C_H_
diff --git a/third_party/python/gyp/test/ninja/action_dependencies/src/emit.py b/third_party/python/gyp/test/ninja/action_dependencies/src/emit.py
new file mode 100755
index 0000000000..8ed12f7393
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/action_dependencies/src/emit.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+f = open(sys.argv[1], 'w')
+f.write('/* Hello World */\n')
+f.close()
diff --git a/third_party/python/gyp/test/ninja/chained-dependency/chained-dependency.gyp b/third_party/python/gyp/test/ninja/chained-dependency/chained-dependency.gyp
new file mode 100644
index 0000000000..3fe68ae85a
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/chained-dependency/chained-dependency.gyp
@@ -0,0 +1,53 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ # This first target generates a header.
+ {
+ 'target_name': 'generate_header',
+ 'type': 'none',
+ 'msvs_cygwin_shell': '0',
+ 'actions': [
+ {
+ 'action_name': 'generate header',
+ 'inputs': [],
+ 'outputs': ['<(SHARED_INTERMEDIATE_DIR)/generated/header.h'],
+ 'action': [
+ 'python', '-c', 'open(<(_outputs), "w")'
+ ]
+ },
+ ],
+ 'all_dependent_settings': {
+ 'include_dirs': [
+ '<(SHARED_INTERMEDIATE_DIR)',
+ ],
+ },
+ },
+
+ # This intermediate target does nothing other than pull in a
+ # dependency on the above generated target.
+ {
+ 'target_name': 'chain',
+ 'type': 'none',
+ 'dependencies': [
+ 'generate_header',
+ ],
+ },
+
+ # This final target is:
+ # - a static library (so gyp doesn't transitively pull in dependencies);
+ # - that relies on the generated file two dependencies away.
+ {
+ 'target_name': 'chained',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'chain',
+ ],
+ 'sources': [
+ 'chained.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/ninja/chained-dependency/chained.c b/third_party/python/gyp/test/ninja/chained-dependency/chained.c
new file mode 100644
index 0000000000..c1ff1a7b12
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/chained-dependency/chained.c
@@ -0,0 +1,5 @@
+#include "generated/header.h"
+
+int main(void) {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/ninja/chained-dependency/gyptest-chained-dependency.py b/third_party/python/gyp/test/ninja/chained-dependency/gyptest-chained-dependency.py
new file mode 100755
index 0000000000..d8763f1d3d
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/chained-dependency/gyptest-chained-dependency.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that files generated by two-steps-removed actions are built before
+dependent compile steps.
+"""
+
+import os
+import sys
+import TestGyp
+
+# This test is Ninja-specific in that:
+# - the bug only showed nondeterministically in parallel builds;
+# - it relies on a ninja-specific output file path.
+
+test = TestGyp.TestGyp(formats=['ninja'])
+# xcode-ninja doesn't support building single object files by design.
+if test.format == 'xcode-ninja':
+ test.skip_test()
+
+test.run_gyp('chained-dependency.gyp')
+objext = '.obj' if sys.platform == 'win32' else '.o'
+test.build('chained-dependency.gyp',
+ os.path.join('obj', 'chained.chained' + objext))
+# The test passes if the .o file builds successfully.
+test.pass_test()
diff --git a/third_party/python/gyp/test/ninja/empty-and-non-empty-duplicate-name/gyptest-empty-and-non-empty-duplicate-name.py b/third_party/python/gyp/test/ninja/empty-and-non-empty-duplicate-name/gyptest-empty-and-non-empty-duplicate-name.py
new file mode 100644
index 0000000000..0bdca66cdc
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/empty-and-non-empty-duplicate-name/gyptest-empty-and-non-empty-duplicate-name.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies a phony target isn't output if a target exists with the same name that
+was output.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp(formats=['ninja'])
+
+# Reset xcode_ninja_target_pattern to its default for this test.
+test.run_gyp('test.gyp', '-G', 'xcode_ninja_target_pattern=^$')
+
+# Check for both \r and \n to cover both windows and linux.
+test.must_not_contain('out/Default/build.ninja', 'build empty_target: phony\r')
+test.must_not_contain('out/Default/build.ninja', 'build empty_target: phony\n')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/ninja/empty-and-non-empty-duplicate-name/subdir/included.gyp b/third_party/python/gyp/test/ninja/empty-and-non-empty-duplicate-name/subdir/included.gyp
new file mode 100644
index 0000000000..1b9fc42f3f
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/empty-and-non-empty-duplicate-name/subdir/included.gyp
@@ -0,0 +1,19 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'empty_target',
+ 'type': 'executable',
+ 'sources': [
+ 'test.cc',
+ ],
+ },
+ {
+ 'target_name': 'included_empty_target',
+ 'type': 'none',
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/ninja/empty-and-non-empty-duplicate-name/test.gyp b/third_party/python/gyp/test/ninja/empty-and-non-empty-duplicate-name/test.gyp
new file mode 100644
index 0000000000..9aa6287c7c
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/empty-and-non-empty-duplicate-name/test.gyp
@@ -0,0 +1,19 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'All',
+ 'type': 'none',
+ 'dependencies': [
+ 'subdir/included.gyp:included_empty_target'
+ ]
+ },
+ {
+ 'target_name': 'empty_target',
+ 'type': 'none',
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/ninja/normalize-paths-win/gyptest-normalize-paths.py b/third_party/python/gyp/test/ninja/normalize-paths-win/gyptest-normalize-paths.py
new file mode 100644
index 0000000000..f56dbe5921
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/normalize-paths-win/gyptest-normalize-paths.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure paths are normalized with VS macros properly expanded on Windows.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['ninja'])
+
+ test.run_gyp('normalize-paths.gyp')
+
+ # We can't use existence tests because any case will pass, so we check the
+ # contents of ninja files directly since that's what we're most concerned
+ # with anyway.
+ subninja = open(test.built_file_path('obj/some_target.ninja')).read()
+ if '$!product_dir' in subninja:
+ test.fail_test()
+ if 'out\\Default' in subninja:
+ test.fail_test()
+
+ second = open(test.built_file_path('obj/second.ninja')).read()
+ if ('..\\..\\things\\AnotherName.exe' in second or
+ 'AnotherName.exe' not in second):
+ test.fail_test()
+
+ copytarget = open(test.built_file_path('obj/copy_target.ninja')).read()
+ if '$(VSInstallDir)' in copytarget:
+ test.fail_test()
+
+ action = open(test.built_file_path('obj/action.ninja')).read()
+ if '..\\..\\out\\Default' in action:
+ test.fail_test()
+ if '..\\..\\SomethingElse' in action or 'SomethingElse' not in action:
+ test.fail_test()
+ if '..\\..\\SomeOtherInput' in action or 'SomeOtherInput' not in action:
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/ninja/normalize-paths-win/hello.cc b/third_party/python/gyp/test/ninja/normalize-paths-win/hello.cc
new file mode 100644
index 0000000000..1711567ef5
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/normalize-paths-win/hello.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/ninja/normalize-paths-win/normalize-paths.gyp b/third_party/python/gyp/test/ninja/normalize-paths-win/normalize-paths.gyp
new file mode 100644
index 0000000000..544d06456d
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/normalize-paths-win/normalize-paths.gyp
@@ -0,0 +1,68 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'Some_Target',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '<(PRODUCT_DIR)/stuff/AnotherName.exe',
+ },
+ },
+ 'sources': [
+ 'HeLLo.cc',
+ 'blOrP.idl',
+ ],
+ },
+ {
+ 'target_name': 'second',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(OutDir)\\things\\AnotherName.exe',
+ },
+ },
+ 'sources': [
+ 'HeLLo.cc',
+ ],
+ },
+ {
+ 'target_name': 'Copy_Target',
+ 'type': 'none',
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)',
+ 'files': [
+ '$(VSInstallDir)\\bin\\cl.exe',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'action',
+ 'type': 'none',
+ 'msvs_cygwin_shell': '0',
+ 'actions': [
+ {
+ 'inputs': [
+ '$(IntDir)\\SomeInput',
+ '$(OutDir)\\SomeOtherInput',
+ ],
+ 'outputs': [
+ '<(PRODUCT_DIR)/ReSuLt',
+ '<(SHARED_INTERMEDIATE_DIR)/TempFile',
+ '$(OutDir)\SomethingElse',
+ ],
+ 'action_name': 'Test action',
+ # Unfortunately, we can't normalize this field because it's
+ # free-form. Fortunately, ninja doesn't inspect it at all (only the
+ # inputs and outputs) so it's not mandatory.
+ 'action': [],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/ninja/s-needs-no-depfiles/empty.s b/third_party/python/gyp/test/ninja/s-needs-no-depfiles/empty.s
new file mode 100644
index 0000000000..218d8921e5
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/s-needs-no-depfiles/empty.s
@@ -0,0 +1 @@
+# This file intentionally left blank.
diff --git a/third_party/python/gyp/test/ninja/s-needs-no-depfiles/gyptest-s-needs-no-depfiles.py b/third_party/python/gyp/test/ninja/s-needs-no-depfiles/gyptest-s-needs-no-depfiles.py
new file mode 100755
index 0000000000..77a3245d46
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/s-needs-no-depfiles/gyptest-s-needs-no-depfiles.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verify that .s files don't always trigger a rebuild, as would happen if depfiles
+were used for them (since clang & gcc ignore -MMD when building .s->.o on
+linux).
+"""
+
+import os
+import sys
+import TestCommon
+import TestGyp
+
+# NOTE(fischman): Each generator uses depfiles (or not) differently, so this is
+# a ninja-specific test.
+test = TestGyp.TestGyp(formats=['ninja'])
+
+if sys.platform == 'win32' or sys.platform == 'win64':
+ # This test is about clang/gcc vs. depfiles; VS gets a pass.
+ test.pass_test()
+ sys.exit(0)
+
+test.run_gyp('s-needs-no-depfiles.gyp')
+
+# Build the library, grab its timestamp, rebuild the library, ensure timestamp
+# hasn't changed.
+test.build('s-needs-no-depfiles.gyp', 'empty')
+empty_dll = test.built_file_path('empty', test.SHARED_LIB)
+test.built_file_must_exist(empty_dll)
+pre_stat = os.stat(test.built_file_path(empty_dll))
+test.sleep()
+test.build('s-needs-no-depfiles.gyp', 'empty')
+post_stat = os.stat(test.built_file_path(empty_dll))
+
+if pre_stat.st_mtime != post_stat.st_mtime:
+ test.fail_test()
+else:
+ test.pass_test()
diff --git a/third_party/python/gyp/test/ninja/s-needs-no-depfiles/s-needs-no-depfiles.gyp b/third_party/python/gyp/test/ninja/s-needs-no-depfiles/s-needs-no-depfiles.gyp
new file mode 100644
index 0000000000..bd66b1a70a
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/s-needs-no-depfiles/s-needs-no-depfiles.gyp
@@ -0,0 +1,13 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'empty',
+ 'type': 'shared_library',
+ 'sources': [ 'empty.s' ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/ninja/solibs_avoid_relinking/gyptest-solibs-avoid-relinking.py b/third_party/python/gyp/test/ninja/solibs_avoid_relinking/gyptest-solibs-avoid-relinking.py
new file mode 100755
index 0000000000..fd4470ac23
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/solibs_avoid_relinking/gyptest-solibs-avoid-relinking.py
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verify that relinking a solib doesn't relink a dependent executable if the
+solib's public API hasn't changed.
+"""
+
+from __future__ import print_function
+
+import os
+import sys
+import TestCommon
+import TestGyp
+
+# NOTE(fischman): This test will not work with other generators because the
+# API-hash-based-mtime-preservation optimization is only implemented in
+# ninja.py. It could be extended to the make.py generator as well pretty
+# easily, probably.
+# (also, it tests ninja-specific out paths, which would have to be generalized
+# if this was extended to other generators).
+test = TestGyp.TestGyp(formats=['ninja'])
+
+if not os.environ.get('ProgramFiles(x86)'):
+ # TODO(scottmg)
+ print('Skipping test on x86, http://crbug.com/365833')
+ test.pass_test()
+
+test.run_gyp('solibs_avoid_relinking.gyp')
+
+# Build the executable, grab its timestamp, touch the solib's source, rebuild
+# executable, ensure timestamp hasn't changed.
+test.build('solibs_avoid_relinking.gyp', 'b')
+test.built_file_must_exist('b' + TestCommon.exe_suffix)
+pre_stat = os.stat(test.built_file_path('b' + TestCommon.exe_suffix))
+os.utime(os.path.join(test.workdir, 'solib.cc'),
+ (pre_stat.st_atime, pre_stat.st_mtime + 100))
+test.sleep()
+test.build('solibs_avoid_relinking.gyp', 'b')
+post_stat = os.stat(test.built_file_path('b' + TestCommon.exe_suffix))
+
+if pre_stat.st_mtime != post_stat.st_mtime:
+ test.fail_test()
+else:
+ test.pass_test()
diff --git a/third_party/python/gyp/test/ninja/solibs_avoid_relinking/main.cc b/third_party/python/gyp/test/ninja/solibs_avoid_relinking/main.cc
new file mode 100644
index 0000000000..2cd74d3c77
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/solibs_avoid_relinking/main.cc
@@ -0,0 +1,5 @@
+extern int foo();
+
+int main() {
+ return foo();
+}
diff --git a/third_party/python/gyp/test/ninja/solibs_avoid_relinking/solib.cc b/third_party/python/gyp/test/ninja/solibs_avoid_relinking/solib.cc
new file mode 100644
index 0000000000..0856cd4e00
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/solibs_avoid_relinking/solib.cc
@@ -0,0 +1,8 @@
+#ifdef _MSC_VER
+__declspec(dllexport)
+#else
+__attribute__((visibility("default")))
+#endif
+int foo() {
+ return 42;
+}
diff --git a/third_party/python/gyp/test/ninja/solibs_avoid_relinking/solibs_avoid_relinking.gyp b/third_party/python/gyp/test/ninja/solibs_avoid_relinking/solibs_avoid_relinking.gyp
new file mode 100644
index 0000000000..e816351d68
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/solibs_avoid_relinking/solibs_avoid_relinking.gyp
@@ -0,0 +1,38 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'a',
+ 'type': 'shared_library',
+ 'sources': [ 'solib.cc' ],
+ # Incremental linking enabled so that .lib timestamp is maintained when
+ # exports are unchanged.
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'LinkIncremental': '2',
+ }
+ },
+ },
+ {
+ 'target_name': 'b',
+ 'type': 'executable',
+ 'sources': [ 'main.cc' ],
+ 'dependencies': [ 'a' ],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'LinkIncremental': '2',
+ }
+ },
+ },
+ ],
+ 'conditions': [
+ ['OS=="linux"', {
+ 'target_defaults': {
+ 'cflags': ['-fPIC'],
+ },
+ }],
+ ],
+}
diff --git a/third_party/python/gyp/test/ninja/use-console/foo.bar b/third_party/python/gyp/test/ninja/use-console/foo.bar
new file mode 100644
index 0000000000..07c476a866
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/use-console/foo.bar
@@ -0,0 +1,5 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+This is a dummy file for rule/action input.
diff --git a/third_party/python/gyp/test/ninja/use-console/gyptest-use-console.py b/third_party/python/gyp/test/ninja/use-console/gyptest-use-console.py
new file mode 100644
index 0000000000..f76fcd9829
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/use-console/gyptest-use-console.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure 'ninja_use_console' is supported in actions and rules.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp(formats=['ninja'])
+
+test.run_gyp('use-console.gyp')
+
+no_pool = open(test.built_file_path('obj/no_pool.ninja')).read()
+if 'pool =' in no_pool:
+ test.fail_test()
+
+action_pool = open(test.built_file_path('obj/action_pool.ninja')).read()
+if 'pool = console' not in action_pool:
+ test.fail_test()
+
+rule_pool = open(test.built_file_path('obj/rule_pool.ninja')).read()
+if 'pool = console' not in rule_pool:
+ test.fail_test()
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/ninja/use-console/use-console.gyp b/third_party/python/gyp/test/ninja/use-console/use-console.gyp
new file mode 100644
index 0000000000..84e63184c6
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/use-console/use-console.gyp
@@ -0,0 +1,60 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'no_pool',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'some_action',
+ 'action': ['echo', 'hello'],
+ 'inputs': ['foo.bar'],
+ 'outputs': ['dummy'],
+ },
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'some_rule',
+ 'extension': 'bar',
+ 'action': ['echo', 'hello'],
+ 'outputs': ['dummy'],
+ },
+ ],
+ 'sources': [
+ 'foo.bar',
+ ],
+ },
+ {
+ 'target_name': 'action_pool',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'some_action',
+ 'action': ['echo', 'hello'],
+ 'inputs': ['foo.bar'],
+ 'outputs': ['dummy'],
+ 'ninja_use_console': 1,
+ },
+ ],
+ },
+ {
+ 'target_name': 'rule_pool',
+ 'type': 'none',
+ 'rules': [
+ {
+ 'rule_name': 'some_rule',
+ 'extension': 'bar',
+ 'action': ['echo', 'hello'],
+ 'outputs': ['dummy'],
+ 'ninja_use_console': 1,
+ },
+ ],
+ 'sources': [
+ 'foo.bar',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/ninja/use-custom-environment-files/gyptest-use-custom-environment-files.py b/third_party/python/gyp/test/ninja/use-custom-environment-files/gyptest-use-custom-environment-files.py
new file mode 100644
index 0000000000..0c44b1d1c9
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/use-custom-environment-files/gyptest-use-custom-environment-files.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure environment files can be suppressed.
+"""
+
+import TestGyp
+
+import os
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['ninja'])
+
+ test.run_gyp('use-custom-environment-files.gyp',
+ '-G', 'ninja_use_custom_environment_files')
+
+ # Make sure environment files do not exist.
+ if os.path.exists(test.built_file_path('environment.x86')):
+ test.fail_test()
+ if os.path.exists(test.built_file_path('environment.x64')):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/ninja/use-custom-environment-files/use-custom-environment-files.cc b/third_party/python/gyp/test/ninja/use-custom-environment-files/use-custom-environment-files.cc
new file mode 100644
index 0000000000..1711567ef5
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/use-custom-environment-files/use-custom-environment-files.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/ninja/use-custom-environment-files/use-custom-environment-files.gyp b/third_party/python/gyp/test/ninja/use-custom-environment-files/use-custom-environment-files.gyp
new file mode 100644
index 0000000000..dbc95a9439
--- /dev/null
+++ b/third_party/python/gyp/test/ninja/use-custom-environment-files/use-custom-environment-files.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_use_custom_environment_files',
+ 'type': 'executable',
+ 'sources': [
+ 'use-custom-environment-files.cc',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/no-cpp/gyptest-no-cpp.py b/third_party/python/gyp/test/no-cpp/gyptest-no-cpp.py
new file mode 100644
index 0000000000..a5d64512af
--- /dev/null
+++ b/third_party/python/gyp/test/no-cpp/gyptest-no-cpp.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Checks that C-only targets aren't linked against libstdc++.
+"""
+
+import TestGyp
+
+import re
+import subprocess
+import sys
+
+# set |match| to ignore build stderr output.
+test = TestGyp.TestGyp(match = lambda a, b: True)
+if (sys.platform != 'win32' and
+ not (sys.platform == 'darwin' and test.format == 'make')):
+ # TODO: Does a test like this make sense with Windows?
+
+ CHDIR = 'src'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+ test.build('test.gyp', 'no_cpp', chdir=CHDIR)
+
+ def LinksLibStdCpp(path):
+ path = test.built_file_path(path, chdir=CHDIR)
+ if sys.platform == 'darwin':
+ proc = subprocess.Popen(['otool', '-L', path], stdout=subprocess.PIPE)
+ else:
+ proc = subprocess.Popen(['ldd', path], stdout=subprocess.PIPE)
+ output = proc.communicate()[0].decode('utf-8')
+ assert not proc.returncode
+ return 'libstdc++' in output or 'libc++' in output
+
+ if LinksLibStdCpp('no_cpp'):
+ test.fail_test()
+
+ # Make, ninja, and CMake pick the compiler driver based on transitive
+ # checks. Xcode doesn't.
+ build_error_code = {
+ 'xcode': 65, # EX_DATAERR, see `man sysexits`
+ 'make': 0,
+ 'ninja': 0,
+ 'cmake': 0,
+ 'xcode-ninja': 0,
+ }[test.format]
+
+ test.build('test.gyp', 'no_cpp_dep_on_cc_lib', chdir=CHDIR,
+ status=build_error_code)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/no-cpp/src/call-f-main.c b/third_party/python/gyp/test/no-cpp/src/call-f-main.c
new file mode 100644
index 0000000000..8b95c5910e
--- /dev/null
+++ b/third_party/python/gyp/test/no-cpp/src/call-f-main.c
@@ -0,0 +1,2 @@
+void* f();
+int main() { f(); }
diff --git a/third_party/python/gyp/test/no-cpp/src/empty-main.c b/third_party/python/gyp/test/no-cpp/src/empty-main.c
new file mode 100644
index 0000000000..237c8ce181
--- /dev/null
+++ b/third_party/python/gyp/test/no-cpp/src/empty-main.c
@@ -0,0 +1 @@
+int main() {}
diff --git a/third_party/python/gyp/test/no-cpp/src/f.cc b/third_party/python/gyp/test/no-cpp/src/f.cc
new file mode 100644
index 0000000000..02f50f21a0
--- /dev/null
+++ b/third_party/python/gyp/test/no-cpp/src/f.cc
@@ -0,0 +1,3 @@
+extern "C" { void* f(); }
+
+void* f() { return new int; }
diff --git a/third_party/python/gyp/test/no-cpp/src/test.gyp b/third_party/python/gyp/test/no-cpp/src/test.gyp
new file mode 100644
index 0000000000..417015ec80
--- /dev/null
+++ b/third_party/python/gyp/test/no-cpp/src/test.gyp
@@ -0,0 +1,25 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'no_cpp',
+ 'type': 'executable',
+ 'sources': [ 'empty-main.c' ],
+ },
+ # A static_library with a cpp file and a linkable with only .c files
+ # depending on it causes a linker error:
+ {
+ 'target_name': 'cpp_lib',
+ 'type': 'static_library',
+ 'sources': [ 'f.cc' ],
+ },
+ {
+ 'target_name': 'no_cpp_dep_on_cc_lib',
+ 'type': 'executable',
+ 'dependencies': [ 'cpp_lib' ],
+ 'sources': [ 'call-f-main.c' ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/no-output/gyptest-no-output.py b/third_party/python/gyp/test/no-output/gyptest-no-output.py
new file mode 100755
index 0000000000..bf9a0b5aaa
--- /dev/null
+++ b/third_party/python/gyp/test/no-output/gyptest-no-output.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verified things don't explode when there are targets without outputs.
+"""
+
+import TestGyp
+
+# TODO(evan): in ninja when there are no targets, there is no 'all'
+# target either. Disabling this test for now.
+test = TestGyp.TestGyp(formats=['!ninja'])
+
+test.run_gyp('nooutput.gyp', chdir='src')
+test.relocate('src', 'relocate/src')
+test.build('nooutput.gyp', chdir='relocate/src')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/no-output/src/nooutput.gyp b/third_party/python/gyp/test/no-output/src/nooutput.gyp
new file mode 100644
index 0000000000..c40124efc1
--- /dev/null
+++ b/third_party/python/gyp/test/no-output/src/nooutput.gyp
@@ -0,0 +1,17 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'no_output',
+ 'type': 'none',
+ 'direct_dependent_settings': {
+ 'defines': [
+ 'NADA',
+ ],
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/product/gyptest-product.py b/third_party/python/gyp/test/product/gyptest-product.py
new file mode 100755
index 0000000000..53eb5c376b
--- /dev/null
+++ b/third_party/python/gyp/test/product/gyptest-product.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies simplest-possible build of a "Hello, world!" program
+using the default build target.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('product.gyp')
+test.build('product.gyp')
+
+# executables
+test.built_file_must_exist('alt1' + test._exe, test.EXECUTABLE, bare=True)
+test.built_file_must_exist('hello2.stuff', test.EXECUTABLE, bare=True)
+test.built_file_must_exist('yoalt3.stuff', test.EXECUTABLE, bare=True)
+
+# shared libraries
+test.built_file_must_exist(test.dll_ + 'alt4' + test._dll,
+ test.SHARED_LIB, bare=True)
+test.built_file_must_exist(test.dll_ + 'hello5.stuff',
+ test.SHARED_LIB, bare=True)
+test.built_file_must_exist('yoalt6.stuff', test.SHARED_LIB, bare=True)
+
+# static libraries
+test.built_file_must_exist(test.lib_ + 'alt7' + test._lib,
+ test.STATIC_LIB, bare=True)
+test.built_file_must_exist(test.lib_ + 'hello8.stuff',
+ test.STATIC_LIB, bare=True)
+test.built_file_must_exist('yoalt9.stuff', test.STATIC_LIB, bare=True)
+
+# alternate product_dir
+test.built_file_must_exist('bob/yoalt10.stuff', test.EXECUTABLE, bare=True)
+test.built_file_must_exist('bob/yoalt11.stuff', test.EXECUTABLE, bare=True)
+test.built_file_must_exist('bob/yoalt12.stuff', test.EXECUTABLE, bare=True)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/product/hello.c b/third_party/python/gyp/test/product/hello.c
new file mode 100644
index 0000000000..41fdff0e38
--- /dev/null
+++ b/third_party/python/gyp/test/product/hello.c
@@ -0,0 +1,15 @@
+/* Copyright (c) 2009 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#include <stdio.h>
+
+int func1(void) {
+ return 42;
+}
+
+int main(void) {
+ printf("Hello, world!\n");
+ printf("%d\n", func1());
+ return 0;
+}
diff --git a/third_party/python/gyp/test/product/product.gyp b/third_party/python/gyp/test/product/product.gyp
new file mode 100644
index 0000000000..c25eaaacb5
--- /dev/null
+++ b/third_party/python/gyp/test/product/product.gyp
@@ -0,0 +1,128 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'hello1',
+ 'product_name': 'alt1',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.c',
+ ],
+ },
+ {
+ 'target_name': 'hello2',
+ 'product_extension': 'stuff',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.c',
+ ],
+ },
+ {
+ 'target_name': 'hello3',
+ 'product_name': 'alt3',
+ 'product_extension': 'stuff',
+ 'product_prefix': 'yo',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.c',
+ ],
+ },
+
+ {
+ 'target_name': 'hello4',
+ 'product_name': 'alt4',
+ 'type': 'shared_library',
+ 'sources': [
+ 'hello.c',
+ ],
+ },
+ {
+ 'target_name': 'hello5',
+ 'product_extension': 'stuff',
+ 'type': 'shared_library',
+ 'sources': [
+ 'hello.c',
+ ],
+ },
+ {
+ 'target_name': 'hello6',
+ 'product_name': 'alt6',
+ 'product_extension': 'stuff',
+ 'product_prefix': 'yo',
+ 'type': 'shared_library',
+ 'sources': [
+ 'hello.c',
+ ],
+ },
+
+ {
+ 'target_name': 'hello7',
+ 'product_name': 'alt7',
+ 'type': 'static_library',
+ 'sources': [
+ 'hello.c',
+ ],
+ },
+ {
+ 'target_name': 'hello8',
+ 'product_extension': 'stuff',
+ 'type': 'static_library',
+ 'sources': [
+ 'hello.c',
+ ],
+ },
+ {
+ 'target_name': 'hello9',
+ 'product_name': 'alt9',
+ 'product_extension': 'stuff',
+ 'product_prefix': 'yo',
+ 'type': 'static_library',
+ 'sources': [
+ 'hello.c',
+ ],
+ },
+ {
+ 'target_name': 'hello10',
+ 'product_name': 'alt10',
+ 'product_extension': 'stuff',
+ 'product_prefix': 'yo',
+ 'product_dir': '<(PRODUCT_DIR)/bob',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.c',
+ ],
+ },
+ {
+ 'target_name': 'hello11',
+ 'product_name': 'alt11',
+ 'product_extension': 'stuff',
+ 'product_prefix': 'yo',
+ 'product_dir': '<(PRODUCT_DIR)/bob',
+ 'type': 'shared_library',
+ 'sources': [
+ 'hello.c',
+ ],
+ },
+ {
+ 'target_name': 'hello12',
+ 'product_name': 'alt12',
+ 'product_extension': 'stuff',
+ 'product_prefix': 'yo',
+ 'product_dir': '<(PRODUCT_DIR)/bob',
+ 'type': 'static_library',
+ 'sources': [
+ 'hello.c',
+ ],
+ },
+ ],
+ 'conditions': [
+ ['OS=="linux"', {
+ 'target_defaults': {
+ 'cflags': ['-fPIC'],
+ },
+ }],
+ ],
+}
diff --git a/third_party/python/gyp/test/prune_targets/gyptest-prune-targets.py b/third_party/python/gyp/test/prune_targets/gyptest-prune-targets.py
new file mode 100644
index 0000000000..b2c90f717e
--- /dev/null
+++ b/third_party/python/gyp/test/prune_targets/gyptest-prune-targets.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies --root-target removes the unnecessary targets.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+# The xcode-ninja generator has its own logic for which targets to include
+if test.format == 'xcode-ninja':
+ test.skip_test()
+
+build_error_code = {
+ 'cmake': 1,
+ 'make': 2,
+ 'msvs': 1,
+ 'ninja': 1,
+ 'xcode': 65,
+}[test.format]
+
+# By default, everything will be included.
+test.run_gyp('test1.gyp')
+test.build('test2.gyp', 'lib1')
+test.build('test2.gyp', 'lib2')
+test.build('test2.gyp', 'lib3')
+test.build('test2.gyp', 'lib_indirect')
+test.build('test1.gyp', 'program1')
+test.build('test1.gyp', 'program2')
+test.build('test1.gyp', 'program3')
+
+# With deep dependencies of program1 only.
+test.run_gyp('test1.gyp', '--root-target=program1')
+test.build('test2.gyp', 'lib1')
+test.build('test2.gyp', 'lib2', status=build_error_code, stderr=None)
+test.build('test2.gyp', 'lib3', status=build_error_code, stderr=None)
+test.build('test2.gyp', 'lib_indirect')
+test.build('test1.gyp', 'program1')
+test.build('test1.gyp', 'program2', status=build_error_code, stderr=None)
+test.build('test1.gyp', 'program3', status=build_error_code, stderr=None)
+
+# With deep dependencies of program2 only.
+test.run_gyp('test1.gyp', '--root-target=program2')
+test.build('test2.gyp', 'lib1', status=build_error_code, stderr=None)
+test.build('test2.gyp', 'lib2')
+test.build('test2.gyp', 'lib3', status=build_error_code, stderr=None)
+test.build('test2.gyp', 'lib_indirect')
+test.build('test1.gyp', 'program1', status=build_error_code, stderr=None)
+test.build('test1.gyp', 'program2')
+test.build('test1.gyp', 'program3', status=build_error_code, stderr=None)
+
+# With deep dependencies of program1 and program2.
+test.run_gyp('test1.gyp', '--root-target=program1', '--root-target=program2')
+test.build('test2.gyp', 'lib1')
+test.build('test2.gyp', 'lib2')
+test.build('test2.gyp', 'lib3', status=build_error_code, stderr=None)
+test.build('test2.gyp', 'lib_indirect')
+test.build('test1.gyp', 'program1')
+test.build('test1.gyp', 'program2')
+test.build('test1.gyp', 'program3', status=build_error_code, stderr=None)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/prune_targets/lib1.cc b/third_party/python/gyp/test/prune_targets/lib1.cc
new file mode 100644
index 0000000000..692b7de6d8
--- /dev/null
+++ b/third_party/python/gyp/test/prune_targets/lib1.cc
@@ -0,0 +1,6 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+void libfunc1() {
+}
diff --git a/third_party/python/gyp/test/prune_targets/lib2.cc b/third_party/python/gyp/test/prune_targets/lib2.cc
new file mode 100644
index 0000000000..aed394afcf
--- /dev/null
+++ b/third_party/python/gyp/test/prune_targets/lib2.cc
@@ -0,0 +1,6 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+void libfunc2() {
+}
diff --git a/third_party/python/gyp/test/prune_targets/lib3.cc b/third_party/python/gyp/test/prune_targets/lib3.cc
new file mode 100644
index 0000000000..af0f717b02
--- /dev/null
+++ b/third_party/python/gyp/test/prune_targets/lib3.cc
@@ -0,0 +1,6 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+void libfunc3() {
+}
diff --git a/third_party/python/gyp/test/prune_targets/lib_indirect.cc b/third_party/python/gyp/test/prune_targets/lib_indirect.cc
new file mode 100644
index 0000000000..92d9ea40db
--- /dev/null
+++ b/third_party/python/gyp/test/prune_targets/lib_indirect.cc
@@ -0,0 +1,6 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+void libfunc_indirect() {
+}
diff --git a/third_party/python/gyp/test/prune_targets/program.cc b/third_party/python/gyp/test/prune_targets/program.cc
new file mode 100644
index 0000000000..c9ac070ecd
--- /dev/null
+++ b/third_party/python/gyp/test/prune_targets/program.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/prune_targets/test1.gyp b/third_party/python/gyp/test/prune_targets/test1.gyp
new file mode 100644
index 0000000000..b65ec19fa4
--- /dev/null
+++ b/third_party/python/gyp/test/prune_targets/test1.gyp
@@ -0,0 +1,26 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'program1',
+ 'type': 'executable',
+ 'sources': [ 'program.cc' ],
+ 'dependencies': [ 'test2.gyp:lib1' ],
+ },
+ {
+ 'target_name': 'program2',
+ 'type': 'executable',
+ 'sources': [ 'program.cc' ],
+ 'dependencies': [ 'test2.gyp:lib2' ],
+ },
+ {
+ 'target_name': 'program3',
+ 'type': 'executable',
+ 'sources': [ 'program.cc' ],
+ 'dependencies': [ 'test2.gyp:lib3' ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/prune_targets/test2.gyp b/third_party/python/gyp/test/prune_targets/test2.gyp
new file mode 100644
index 0000000000..16f0fd3290
--- /dev/null
+++ b/third_party/python/gyp/test/prune_targets/test2.gyp
@@ -0,0 +1,30 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'lib1',
+ 'type': 'static_library',
+ 'sources': [ 'lib1.cc' ],
+ 'dependencies': [ 'lib_indirect' ],
+ },
+ {
+ 'target_name': 'lib2',
+ 'type': 'static_library',
+ 'sources': [ 'lib2.cc' ],
+ 'dependencies': [ 'lib_indirect' ],
+ },
+ {
+ 'target_name': 'lib3',
+ 'type': 'static_library',
+ 'sources': [ 'lib3.cc' ],
+ },
+ {
+ 'target_name': 'lib_indirect',
+ 'type': 'static_library',
+ 'sources': [ 'lib_indirect.cc' ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/relative/foo/a/a.cc b/third_party/python/gyp/test/relative/foo/a/a.cc
new file mode 100644
index 0000000000..7d1c953448
--- /dev/null
+++ b/third_party/python/gyp/test/relative/foo/a/a.cc
@@ -0,0 +1,9 @@
+/*
+ * Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/relative/foo/a/a.gyp b/third_party/python/gyp/test/relative/foo/a/a.gyp
new file mode 100644
index 0000000000..66316ac681
--- /dev/null
+++ b/third_party/python/gyp/test/relative/foo/a/a.gyp
@@ -0,0 +1,13 @@
+{
+ 'targets': [
+ {
+ 'target_name': 'a',
+ 'type': 'executable',
+ 'sources': ['a.cc'],
+ 'dependencies': [
+ '../../foo/b/b.gyp:b',
+ 'c/c.gyp:c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/relative/foo/a/c/c.cc b/third_party/python/gyp/test/relative/foo/a/c/c.cc
new file mode 100644
index 0000000000..9d22471684
--- /dev/null
+++ b/third_party/python/gyp/test/relative/foo/a/c/c.cc
@@ -0,0 +1,9 @@
+/*
+ * Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+int func() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/relative/foo/a/c/c.gyp b/third_party/python/gyp/test/relative/foo/a/c/c.gyp
new file mode 100644
index 0000000000..c1f087db99
--- /dev/null
+++ b/third_party/python/gyp/test/relative/foo/a/c/c.gyp
@@ -0,0 +1,12 @@
+{
+ 'targets': [
+ {
+ 'target_name': 'c',
+ 'type': 'static_library',
+ 'sources': ['c.cc'],
+ 'dependencies': [
+ '../../b/b.gyp:b',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/relative/foo/b/b.cc b/third_party/python/gyp/test/relative/foo/b/b.cc
new file mode 100644
index 0000000000..011d59cebb
--- /dev/null
+++ b/third_party/python/gyp/test/relative/foo/b/b.cc
@@ -0,0 +1,9 @@
+/*
+ * Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+int func2() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/relative/foo/b/b.gyp b/third_party/python/gyp/test/relative/foo/b/b.gyp
new file mode 100644
index 0000000000..0ebe4533d3
--- /dev/null
+++ b/third_party/python/gyp/test/relative/foo/b/b.gyp
@@ -0,0 +1,9 @@
+{
+ 'targets': [
+ {
+ 'target_name': 'b',
+ 'type': 'static_library',
+ 'sources': ['b.cc'],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/relative/gyptest-default.py b/third_party/python/gyp/test/relative/gyptest-default.py
new file mode 100755
index 0000000000..685cdfd75e
--- /dev/null
+++ b/third_party/python/gyp/test/relative/gyptest-default.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies simplest-possible build of a "Hello, world!" program
+using the default build target.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp(workdir='workarea_default', formats=['msvs'])
+
+# Run from down in foo.
+test.run_gyp('a.gyp', chdir='foo/a')
+sln = test.workpath('foo/a/a.sln')
+sln_data = open(sln, 'rb').read().decode('utf-8', 'ignore')
+vcproj = sln_data.count('b.vcproj')
+vcxproj = sln_data.count('b.vcxproj')
+if (vcproj, vcxproj) not in [(1, 0), (0, 1)]:
+ test.fail_test()
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/rename/filecase/file.c b/third_party/python/gyp/test/rename/filecase/file.c
new file mode 100644
index 0000000000..76e8197013
--- /dev/null
+++ b/third_party/python/gyp/test/rename/filecase/file.c
@@ -0,0 +1 @@
+int main() { return 0; }
diff --git a/third_party/python/gyp/test/rename/filecase/test-casesensitive.gyp b/third_party/python/gyp/test/rename/filecase/test-casesensitive.gyp
new file mode 100644
index 0000000000..48eaa6eb67
--- /dev/null
+++ b/third_party/python/gyp/test/rename/filecase/test-casesensitive.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'filecaserename_sensitive',
+ 'type': 'executable',
+ 'sources': [
+ 'FiLe.c',
+ 'fIlE.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/rename/filecase/test.gyp b/third_party/python/gyp/test/rename/filecase/test.gyp
new file mode 100644
index 0000000000..eaee9337b6
--- /dev/null
+++ b/third_party/python/gyp/test/rename/filecase/test.gyp
@@ -0,0 +1,14 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'filecaserename',
+ 'type': 'executable',
+ 'sources': [
+ 'file.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/rename/gyptest-filecase.py b/third_party/python/gyp/test/rename/gyptest-filecase.py
new file mode 100644
index 0000000000..daed5180d3
--- /dev/null
+++ b/third_party/python/gyp/test/rename/gyptest-filecase.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Checks that files whose file case changes get rebuilt correctly.
+"""
+
+import os
+import TestGyp
+
+test = TestGyp.TestGyp()
+CHDIR = 'filecase'
+test.run_gyp('test.gyp', chdir=CHDIR)
+test.build('test.gyp', test.ALL, chdir=CHDIR)
+
+os.rename('filecase/file.c', 'filecase/fIlE.c')
+test.write('filecase/test.gyp',
+ test.read('filecase/test.gyp').replace('file.c', 'fIlE.c'))
+test.run_gyp('test.gyp', chdir=CHDIR)
+test.build('test.gyp', test.ALL, chdir=CHDIR)
+
+
+# Check that having files that differ just in their case still work on
+# case-sensitive file systems.
+test.write('filecase/FiLe.c', 'int f(); int main() { return f(); }')
+test.write('filecase/fIlE.c', 'int f() { return 42; }')
+is_case_sensitive = test.read('filecase/FiLe.c') != test.read('filecase/fIlE.c')
+if is_case_sensitive:
+ test.run_gyp('test-casesensitive.gyp', chdir=CHDIR)
+ test.build('test-casesensitive.gyp', test.ALL, chdir=CHDIR)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/restat/gyptest-restat.py b/third_party/python/gyp/test/restat/gyptest-restat.py
new file mode 100644
index 0000000000..87379044dd
--- /dev/null
+++ b/third_party/python/gyp/test/restat/gyptest-restat.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verify that dependent rules are executed iff a dependency action modifies its
+outputs.
+"""
+
+import TestGyp
+import os
+
+test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
+
+test.run_gyp('restat.gyp', chdir='src')
+
+chdir = 'relocate/src'
+test.relocate('src', chdir)
+
+# Building 'dependent' the first time generates 'side_effect', but building it
+# the second time doesn't, because 'create_intermediate' doesn't update its
+# output.
+test.build('restat.gyp', 'dependent', chdir=chdir)
+test.built_file_must_exist('side_effect', chdir=chdir)
+os.remove(test.built_file_path('side_effect', chdir=chdir))
+test.build('restat.gyp', 'dependent', chdir=chdir)
+test.built_file_must_not_exist('side_effect', chdir=chdir)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/restat/src/create_intermediate.py b/third_party/python/gyp/test/restat/src/create_intermediate.py
new file mode 100644
index 0000000000..a4d7450371
--- /dev/null
+++ b/third_party/python/gyp/test/restat/src/create_intermediate.py
@@ -0,0 +1,17 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+"""
+Create argv[1] iff it doesn't already exist.
+"""
+
+outfile = sys.argv[1]
+if os.path.exists(outfile):
+ sys.exit()
+open(outfile, "wb").close()
diff --git a/third_party/python/gyp/test/restat/src/restat.gyp b/third_party/python/gyp/test/restat/src/restat.gyp
new file mode 100644
index 0000000000..ff020e0ce6
--- /dev/null
+++ b/third_party/python/gyp/test/restat/src/restat.gyp
@@ -0,0 +1,50 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'create_intermediate',
+ 'type': 'none',
+ 'msvs_cygwin_shell': '0',
+ 'actions': [
+ {
+ 'action_name': 'create_intermediate',
+ 'inputs': [
+ 'create_intermediate.py',
+ ],
+ 'outputs': [
+ '<(PRODUCT_DIR)/intermediate',
+ 'ALWAYS.run.ALWAYS',
+ ],
+ 'action': [
+ 'python', 'create_intermediate.py', '<(PRODUCT_DIR)/intermediate',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'dependent',
+ 'type': 'none',
+ 'msvs_cygwin_shell': '0',
+ 'dependencies': [
+ 'create_intermediate',
+ ],
+ 'actions': [
+ {
+ 'action_name': 'dependent',
+ 'inputs': [
+ '<(PRODUCT_DIR)/intermediate',
+ ],
+ 'outputs': [
+ '<(PRODUCT_DIR)/dependent'
+ ],
+ 'action': [
+ 'python', 'touch.py', '<(PRODUCT_DIR)/dependent', '<(PRODUCT_DIR)/side_effect',
+ ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/restat/src/touch.py b/third_party/python/gyp/test/restat/src/touch.py
new file mode 100644
index 0000000000..7cd781a90c
--- /dev/null
+++ b/third_party/python/gyp/test/restat/src/touch.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+#
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+"""Cross-platform touch."""
+
+for fname in sys.argv[1:]:
+ if os.path.exists(fname):
+ os.utime(fname, None)
+ else:
+ open(fname, 'w').close()
diff --git a/third_party/python/gyp/test/rules-dirname/gyptest-dirname.py b/third_party/python/gyp/test/rules-dirname/gyptest-dirname.py
new file mode 100755
index 0000000000..da5429cbad
--- /dev/null
+++ b/third_party/python/gyp/test/rules-dirname/gyptest-dirname.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies simple rules when using an explicit build target of 'all'.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+import os
+import sys
+
+if sys.platform == 'win32':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+
+test = TestGyp.TestGyp(formats=['make', 'ninja', 'xcode', 'msvs'])
+
+test.run_gyp('actions.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('actions.gyp', chdir='relocate/src')
+
+expect = """\
+no dir here
+hi c
+hello baz
+"""
+if test.format == 'xcode':
+ chdir = 'relocate/src/subdir'
+else:
+ chdir = 'relocate/src'
+test.run_built_executable('gencc_int_output', chdir=chdir, stdout=expect)
+if test.format == 'msvs':
+ test.run_built_executable('gencc_int_output_external', chdir=chdir,
+ stdout=expect)
+
+test.must_match('relocate/src/subdir/foo/bar/baz.dirname',
+ os.path.join('foo', 'bar'))
+test.must_match('relocate/src/subdir/a/b/c.dirname',
+ os.path.join('a', 'b'))
+
+# FIXME the xcode and make generators incorrectly convert RULE_INPUT_PATH
+# to an absolute path, making the tests below fail!
+if test.format != 'xcode' and test.format != 'make':
+ test.must_match('relocate/src/subdir/foo/bar/baz.path',
+ os.path.join('foo', 'bar', 'baz.printvars'))
+ test.must_match('relocate/src/subdir/a/b/c.path',
+ os.path.join('a', 'b', 'c.printvars'))
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/rules-dirname/src/actions.gyp b/third_party/python/gyp/test/rules-dirname/src/actions.gyp
new file mode 100644
index 0000000000..c5693c6c9e
--- /dev/null
+++ b/third_party/python/gyp/test/rules-dirname/src/actions.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'pull_in_all_actions',
+ 'type': 'none',
+ 'dependencies': [
+ 'subdir/input-rule-dirname.gyp:*',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/rules-dirname/src/copy-file.py b/third_party/python/gyp/test/rules-dirname/src/copy-file.py
new file mode 100755
index 0000000000..271a72b6b1
--- /dev/null
+++ b/third_party/python/gyp/test/rules-dirname/src/copy-file.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import sys
+
+contents = open(sys.argv[1], 'r').read()
+open(sys.argv[2], 'w').write(contents)
+
+sys.exit(0)
diff --git a/third_party/python/gyp/test/rules-dirname/src/subdir/a/b/c.gencc b/third_party/python/gyp/test/rules-dirname/src/subdir/a/b/c.gencc
new file mode 100644
index 0000000000..29cb5f79ad
--- /dev/null
+++ b/third_party/python/gyp/test/rules-dirname/src/subdir/a/b/c.gencc
@@ -0,0 +1,8 @@
+// -*- mode: c++ -*-
+#include <stdio.h>
+
+namespace gen {
+ void c() {
+ printf("hi c\n");
+ }
+}
diff --git a/third_party/python/gyp/test/rules-dirname/src/subdir/a/b/c.printvars b/third_party/python/gyp/test/rules-dirname/src/subdir/a/b/c.printvars
new file mode 100644
index 0000000000..cc4561dc41
--- /dev/null
+++ b/third_party/python/gyp/test/rules-dirname/src/subdir/a/b/c.printvars
@@ -0,0 +1 @@
+# Empty file for testing build rules
diff --git a/third_party/python/gyp/test/rules-dirname/src/subdir/foo/bar/baz.gencc b/third_party/python/gyp/test/rules-dirname/src/subdir/foo/bar/baz.gencc
new file mode 100644
index 0000000000..90b4ce9243
--- /dev/null
+++ b/third_party/python/gyp/test/rules-dirname/src/subdir/foo/bar/baz.gencc
@@ -0,0 +1,8 @@
+// -*- mode: c++ -*-
+#include <stdio.h>
+
+namespace gen {
+ void baz() {
+ printf("hello baz\n");
+ }
+}
diff --git a/third_party/python/gyp/test/rules-dirname/src/subdir/foo/bar/baz.printvars b/third_party/python/gyp/test/rules-dirname/src/subdir/foo/bar/baz.printvars
new file mode 100644
index 0000000000..cc4561dc41
--- /dev/null
+++ b/third_party/python/gyp/test/rules-dirname/src/subdir/foo/bar/baz.printvars
@@ -0,0 +1 @@
+# Empty file for testing build rules
diff --git a/third_party/python/gyp/test/rules-dirname/src/subdir/input-rule-dirname.gyp b/third_party/python/gyp/test/rules-dirname/src/subdir/input-rule-dirname.gyp
new file mode 100644
index 0000000000..da749a2231
--- /dev/null
+++ b/third_party/python/gyp/test/rules-dirname/src/subdir/input-rule-dirname.gyp
@@ -0,0 +1,140 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'print_rule_input_dirname',
+ 'type': 'none',
+ 'msvs_cygwin_shell': 0,
+ 'sources': [
+ 'foo/bar/baz.printvars',
+ 'a/b/c.printvars',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'printvars',
+ 'extension': 'printvars',
+ 'inputs': [
+ 'printvars.py',
+ ],
+ 'outputs': [
+ '<(RULE_INPUT_DIRNAME)/<(RULE_INPUT_ROOT).dirname',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(RULE_INPUT_DIRNAME)', '<@(_outputs)',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'print_rule_input_path',
+ 'type': 'none',
+ 'msvs_cygwin_shell': 0,
+ 'sources': [
+ 'foo/bar/baz.printvars',
+ 'a/b/c.printvars',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'printvars',
+ 'extension': 'printvars',
+ 'inputs': [
+ 'printvars.py',
+ ],
+ 'outputs': [
+ '<(RULE_INPUT_DIRNAME)/<(RULE_INPUT_ROOT).path',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(RULE_INPUT_PATH)', '<@(_outputs)',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'gencc_int_output',
+ 'type': 'executable',
+ 'msvs_cygwin_shell': 0,
+ 'sources': [
+ 'nodir.gencc',
+ 'foo/bar/baz.gencc',
+ 'a/b/c.gencc',
+ 'main.cc',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'gencc',
+ 'extension': 'gencc',
+ 'inputs': [
+ '<(DEPTH)/copy-file.py',
+ ],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/<(RULE_INPUT_DIRNAME)/<(RULE_INPUT_ROOT).cc',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(RULE_INPUT_PATH)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ },
+ ],
+ },
+ ],
+ 'conditions': [
+ ['OS=="win"', {
+ 'targets': [
+ {
+ 'target_name': 'gencc_int_output_external',
+ 'type': 'executable',
+ 'msvs_cygwin_shell': 0,
+ 'msvs_cygwin_dirs': ['../../../../../../<(DEPTH)/third_party/cygwin'],
+ 'sources': [
+ 'nodir.gencc',
+ 'foo/bar/baz.gencc',
+ 'a/b/c.gencc',
+ 'main.cc',
+ ],
+ 'dependencies': [
+ 'cygwin',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'gencc',
+ 'extension': 'gencc',
+ 'msvs_external_rule': 1,
+ 'inputs': [
+ '<(DEPTH)/copy-file.py',
+ ],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/<(RULE_INPUT_DIRNAME)/<(RULE_INPUT_ROOT).cc',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(RULE_INPUT_PATH)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ },
+ ],
+ },
+ {
+ 'target_name': 'cygwin',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'setup_mount',
+ 'msvs_cygwin_shell': 0,
+ 'inputs': [
+ '../../../../../../<(DEPTH)/third_party/cygwin/setup_mount.bat',
+ ],
+ # Visual Studio requires an output file, or else the
+ # custom build step won't run.
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/_always_run_setup_mount.marker',
+ ],
+ 'action': ['<@(_inputs)'],
+ },
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/third_party/python/gyp/test/rules-dirname/src/subdir/main.cc b/third_party/python/gyp/test/rules-dirname/src/subdir/main.cc
new file mode 100644
index 0000000000..3bb8e01395
--- /dev/null
+++ b/third_party/python/gyp/test/rules-dirname/src/subdir/main.cc
@@ -0,0 +1,14 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+namespace gen {
+ extern void nodir();
+ extern void c();
+ extern void baz();
+}
+
+int main() {
+ gen::nodir();
+ gen::c();
+ gen::baz();
+}
diff --git a/third_party/python/gyp/test/rules-dirname/src/subdir/nodir.gencc b/third_party/python/gyp/test/rules-dirname/src/subdir/nodir.gencc
new file mode 100644
index 0000000000..720f589bc2
--- /dev/null
+++ b/third_party/python/gyp/test/rules-dirname/src/subdir/nodir.gencc
@@ -0,0 +1,8 @@
+// -*- mode: c++ -*-
+#include <stdio.h>
+
+namespace gen {
+ void nodir() {
+ printf("no dir here\n");
+ }
+}
diff --git a/third_party/python/gyp/test/rules-dirname/src/subdir/printvars.py b/third_party/python/gyp/test/rules-dirname/src/subdir/printvars.py
new file mode 100755
index 0000000000..ef3d92e8cf
--- /dev/null
+++ b/third_party/python/gyp/test/rules-dirname/src/subdir/printvars.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Prints interesting vars
+"""
+
+import sys;
+
+out = open(sys.argv[2], 'w')
+out.write(sys.argv[1]);
diff --git a/third_party/python/gyp/test/rules-rebuild/gyptest-all.py b/third_party/python/gyp/test/rules-rebuild/gyptest-all.py
new file mode 100755
index 0000000000..aaaa2a6e6f
--- /dev/null
+++ b/third_party/python/gyp/test/rules-rebuild/gyptest-all.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that a rule that generates multiple outputs rebuilds
+correctly when the inputs change.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp(workdir='workarea_all')
+
+test.run_gyp('same_target.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+
+test.build('same_target.gyp', test.ALL, chdir='relocate/src')
+
+expect = """\
+Hello from main.c
+Hello from prog1.in!
+Hello from prog2.in!
+"""
+
+test.run_built_executable('program', chdir='relocate/src', stdout=expect)
+
+test.up_to_date('same_target.gyp', 'program', chdir='relocate/src')
+
+
+test.sleep()
+contents = test.read(['relocate', 'src', 'prog1.in'])
+contents = contents.replace('!', ' AGAIN!')
+test.write(['relocate', 'src', 'prog1.in'], contents)
+
+test.build('same_target.gyp', test.ALL, chdir='relocate/src')
+
+expect = """\
+Hello from main.c
+Hello from prog1.in AGAIN!
+Hello from prog2.in!
+"""
+
+test.run_built_executable('program', chdir='relocate/src', stdout=expect)
+
+test.up_to_date('same_target.gyp', 'program', chdir='relocate/src')
+
+
+test.sleep()
+contents = test.read(['relocate', 'src', 'prog2.in'])
+contents = contents.replace('!', ' AGAIN!')
+test.write(['relocate', 'src', 'prog2.in'], contents)
+
+test.build('same_target.gyp', test.ALL, chdir='relocate/src')
+
+expect = """\
+Hello from main.c
+Hello from prog1.in AGAIN!
+Hello from prog2.in AGAIN!
+"""
+
+test.run_built_executable('program', chdir='relocate/src', stdout=expect)
+
+test.up_to_date('same_target.gyp', 'program', chdir='relocate/src')
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/rules-rebuild/gyptest-default.py b/third_party/python/gyp/test/rules-rebuild/gyptest-default.py
new file mode 100755
index 0000000000..ac3f0209aa
--- /dev/null
+++ b/third_party/python/gyp/test/rules-rebuild/gyptest-default.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that a rule that generates multiple outputs rebuilds
+correctly when the inputs change.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp(workdir='workarea_default')
+
+test.run_gyp('same_target.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+
+test.build('same_target.gyp', chdir='relocate/src')
+
+expect = """\
+Hello from main.c
+Hello from prog1.in!
+Hello from prog2.in!
+"""
+
+test.run_built_executable('program', chdir='relocate/src', stdout=expect)
+
+test.up_to_date('same_target.gyp', 'program', chdir='relocate/src')
+
+
+test.sleep()
+contents = test.read(['relocate', 'src', 'prog1.in'])
+contents = contents.replace('!', ' AGAIN!')
+test.write(['relocate', 'src', 'prog1.in'], contents)
+
+test.build('same_target.gyp', chdir='relocate/src')
+
+expect = """\
+Hello from main.c
+Hello from prog1.in AGAIN!
+Hello from prog2.in!
+"""
+
+test.run_built_executable('program', chdir='relocate/src', stdout=expect)
+
+test.up_to_date('same_target.gyp', 'program', chdir='relocate/src')
+
+
+test.sleep()
+contents = test.read(['relocate', 'src', 'prog2.in'])
+contents = contents.replace('!', ' AGAIN!')
+test.write(['relocate', 'src', 'prog2.in'], contents)
+
+test.build('same_target.gyp', chdir='relocate/src')
+
+expect = """\
+Hello from main.c
+Hello from prog1.in AGAIN!
+Hello from prog2.in AGAIN!
+"""
+
+test.run_built_executable('program', chdir='relocate/src', stdout=expect)
+
+test.up_to_date('same_target.gyp', 'program', chdir='relocate/src')
+
+
+# Test that modifying a rule's inputs (specifically, make-sources.py) causes
+# the targets to be built.
+
+test.sleep()
+contents = test.read(['relocate', 'src', 'make-sources.py'])
+contents = contents.replace('%s', 'the amazing %s')
+test.write(['relocate', 'src', 'make-sources.py'], contents)
+
+test.build('same_target.gyp', chdir='relocate/src')
+
+expect = """\
+Hello from main.c
+Hello from the amazing prog1.in AGAIN!
+Hello from the amazing prog2.in AGAIN!
+"""
+
+test.run_built_executable('program', chdir='relocate/src', stdout=expect)
+
+test.up_to_date('same_target.gyp', 'program', chdir='relocate/src')
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/rules-rebuild/src/main.c b/third_party/python/gyp/test/rules-rebuild/src/main.c
new file mode 100644
index 0000000000..bd8fbb20ea
--- /dev/null
+++ b/third_party/python/gyp/test/rules-rebuild/src/main.c
@@ -0,0 +1,12 @@
+#include <stdio.h>
+
+extern void prog1(void);
+extern void prog2(void);
+
+int main(void)
+{
+ printf("Hello from main.c\n");
+ prog1();
+ prog2();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/rules-rebuild/src/make-sources.py b/third_party/python/gyp/test/rules-rebuild/src/make-sources.py
new file mode 100755
index 0000000000..dd9e52856e
--- /dev/null
+++ b/third_party/python/gyp/test/rules-rebuild/src/make-sources.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+assert len(sys.argv) == 4, sys.argv
+
+(in_file, c_file, h_file) = sys.argv[1:]
+
+def write_file(filename, contents):
+ open(filename, 'w').write(contents)
+
+write_file(c_file, open(in_file, 'r').read())
+
+write_file(h_file, '#define NAME "%s"\n' % in_file)
+
+sys.exit(0)
diff --git a/third_party/python/gyp/test/rules-rebuild/src/prog1.in b/third_party/python/gyp/test/rules-rebuild/src/prog1.in
new file mode 100644
index 0000000000..191b00ef1e
--- /dev/null
+++ b/third_party/python/gyp/test/rules-rebuild/src/prog1.in
@@ -0,0 +1,7 @@
+#include <stdio.h>
+#include "prog1.h"
+
+void prog1(void)
+{
+ printf("Hello from %s!\n", NAME);
+}
diff --git a/third_party/python/gyp/test/rules-rebuild/src/prog2.in b/third_party/python/gyp/test/rules-rebuild/src/prog2.in
new file mode 100644
index 0000000000..7bfac5104c
--- /dev/null
+++ b/third_party/python/gyp/test/rules-rebuild/src/prog2.in
@@ -0,0 +1,7 @@
+#include <stdio.h>
+#include "prog2.h"
+
+void prog2(void)
+{
+ printf("Hello from %s!\n", NAME);
+}
diff --git a/third_party/python/gyp/test/rules-rebuild/src/same_target.gyp b/third_party/python/gyp/test/rules-rebuild/src/same_target.gyp
new file mode 100644
index 0000000000..22ba56056d
--- /dev/null
+++ b/third_party/python/gyp/test/rules-rebuild/src/same_target.gyp
@@ -0,0 +1,31 @@
+{
+ 'targets': [
+ {
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'msvs_cygwin_shell': 0,
+ 'sources': [
+ 'main.c',
+ 'prog1.in',
+ 'prog2.in',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'make_sources',
+ 'extension': 'in',
+ 'inputs': [
+ 'make-sources.py',
+ ],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).c',
+ '<(INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).h',
+ ],
+ 'action': [
+ 'python', '<(_inputs)', '<(RULE_INPUT_NAME)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/rules-use-built-dependencies/gyptest-use-built-dependencies.py b/third_party/python/gyp/test/rules-use-built-dependencies/gyptest-use-built-dependencies.py
new file mode 100755
index 0000000000..a57c36d5b0
--- /dev/null
+++ b/third_party/python/gyp/test/rules-use-built-dependencies/gyptest-use-built-dependencies.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that rules which use built dependencies work correctly.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('use-built-dependencies-rule.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+test.build('use-built-dependencies-rule.gyp', chdir='relocate/src')
+
+test.built_file_must_exist('main_output', chdir='relocate/src')
+test.built_file_must_match('main_output', 'output', chdir='relocate/src')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/rules-use-built-dependencies/src/main.cc b/third_party/python/gyp/test/rules-use-built-dependencies/src/main.cc
new file mode 100644
index 0000000000..937d284599
--- /dev/null
+++ b/third_party/python/gyp/test/rules-use-built-dependencies/src/main.cc
@@ -0,0 +1,17 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+#include <stdio.h>
+
+int main(int argc, char *argv[]) {
+ if (argc < 2) {
+ return 2;
+ }
+ FILE* file;
+ file = fopen(argv[1], "wb");
+ const char output[] = "output";
+ fwrite(output, 1, sizeof(output) - 1, file);
+ fclose(file);
+ return 0;
+}
+
diff --git a/third_party/python/gyp/test/rules-use-built-dependencies/src/use-built-dependencies-rule.gyp b/third_party/python/gyp/test/rules-use-built-dependencies/src/use-built-dependencies-rule.gyp
new file mode 100644
index 0000000000..92bfeda392
--- /dev/null
+++ b/third_party/python/gyp/test/rules-use-built-dependencies/src/use-built-dependencies-rule.gyp
@@ -0,0 +1,42 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'main',
+ 'toolsets': ['host'],
+ 'type': 'executable',
+ 'sources': [
+ 'main.cc',
+ ],
+ },
+ {
+ 'target_name': 'post',
+ 'toolsets': ['host'],
+ 'type': 'none',
+ 'dependencies': [
+ 'main',
+ ],
+ 'sources': [
+ # As this test is written it could easily be made into an action.
+ # An acutal use case would have a number of these 'sources'.
+ '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)main<(EXECUTABLE_SUFFIX)',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'generate_output',
+ 'extension': '<(EXECUTABLE_SUFFIX)',
+ 'outputs': [ '<(RULE_INPUT_DIRNAME)/<(RULE_INPUT_ROOT)_output', ],
+ 'msvs_cygwin_shell': 0,
+ 'action': [
+ '<(RULE_INPUT_PATH)',
+ '<(RULE_INPUT_DIRNAME)/<(RULE_INPUT_ROOT)_output',
+ ],
+ 'message': 'Generating output for <(RULE_INPUT_ROOT)'
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/rules-variables/gyptest-rules-variables.py b/third_party/python/gyp/test/rules-variables/gyptest-rules-variables.py
new file mode 100755
index 0000000000..16afc22ef9
--- /dev/null
+++ b/third_party/python/gyp/test/rules-variables/gyptest-rules-variables.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies rules related variables are expanded.
+"""
+
+from __future__ import print_function
+
+import sys
+
+if sys.platform == 'win32':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+
+import TestGyp
+
+test = TestGyp.TestGyp(formats=['ninja'])
+
+test.relocate('src', 'relocate/src')
+
+test.run_gyp('variables.gyp', chdir='relocate/src')
+
+test.build('variables.gyp', chdir='relocate/src')
+
+test.run_built_executable('all_rule_variables',
+ chdir='relocate/src',
+ stdout="input_root\ninput_dirname\ninput_path\n" +
+ "input_ext\ninput_name\n")
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/rules-variables/src/input_ext.c b/third_party/python/gyp/test/rules-variables/src/input_ext.c
new file mode 100644
index 0000000000..f41e73ef8a
--- /dev/null
+++ b/third_party/python/gyp/test/rules-variables/src/input_ext.c
@@ -0,0 +1,9 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdio.h>
+
+void input_ext() {
+ printf("input_ext\n");
+}
diff --git a/third_party/python/gyp/test/rules-variables/src/input_name/test.c b/third_party/python/gyp/test/rules-variables/src/input_name/test.c
new file mode 100644
index 0000000000..e28b74d115
--- /dev/null
+++ b/third_party/python/gyp/test/rules-variables/src/input_name/test.c
@@ -0,0 +1,9 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdio.h>
+
+void input_name() {
+ printf("input_name\n");
+}
diff --git a/third_party/python/gyp/test/rules-variables/src/input_path/subdir/test.c b/third_party/python/gyp/test/rules-variables/src/input_path/subdir/test.c
new file mode 100644
index 0000000000..403dbbda4c
--- /dev/null
+++ b/third_party/python/gyp/test/rules-variables/src/input_path/subdir/test.c
@@ -0,0 +1,9 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdio.h>
+
+void input_path() {
+ printf("input_path\n");
+}
diff --git a/third_party/python/gyp/test/rules-variables/src/subdir/input_dirname.c b/third_party/python/gyp/test/rules-variables/src/subdir/input_dirname.c
new file mode 100644
index 0000000000..40cecd87d9
--- /dev/null
+++ b/third_party/python/gyp/test/rules-variables/src/subdir/input_dirname.c
@@ -0,0 +1,9 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdio.h>
+
+void input_dirname() {
+ printf("input_dirname\n");
+}
diff --git a/third_party/python/gyp/test/rules-variables/src/subdir/test.c b/third_party/python/gyp/test/rules-variables/src/subdir/test.c
new file mode 100644
index 0000000000..6c0280b8ad
--- /dev/null
+++ b/third_party/python/gyp/test/rules-variables/src/subdir/test.c
@@ -0,0 +1,18 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+extern void input_root();
+extern void input_dirname();
+extern void input_path();
+extern void input_ext();
+extern void input_name();
+
+int main() {
+ input_root();
+ input_dirname();
+ input_path();
+ input_ext();
+ input_name();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/rules-variables/src/test.input_root.c b/third_party/python/gyp/test/rules-variables/src/test.input_root.c
new file mode 100644
index 0000000000..33a7740a5c
--- /dev/null
+++ b/third_party/python/gyp/test/rules-variables/src/test.input_root.c
@@ -0,0 +1,9 @@
+// Copyright (c) 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdio.h>
+
+void input_root() {
+ printf("input_root\n");
+}
diff --git a/third_party/python/gyp/test/rules-variables/src/variables.gyp b/third_party/python/gyp/test/rules-variables/src/variables.gyp
new file mode 100644
index 0000000000..6debba12e3
--- /dev/null
+++ b/third_party/python/gyp/test/rules-variables/src/variables.gyp
@@ -0,0 +1,40 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ # This test shouldn't ever actually need to execute its rules: there's no
+ # command line that generates any output anyway. However, there's something
+ # slightly broken in either ninja or (maybe more likely?) on the win32 VM
+ # gypbots that breaks dependency checking and causes this rule to want to
+ # run. When it does run, the cygwin path is wrong, so the do-nothing step
+ # fails.
+ # TODO: Investigate and fix whatever's actually failing and remove this.
+ 'msvs_cygwin_dirs': ['../../../../../../<(DEPTH)/third_party/cygwin'],
+ },
+ 'targets': [
+ {
+ 'target_name': 'all_rule_variables',
+ 'type': 'executable',
+ 'sources': [
+ 'subdir/test.c',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'rule_variable',
+ 'extension': 'c',
+ 'outputs': [
+ '<(RULE_INPUT_ROOT).input_root.c',
+ '<(RULE_INPUT_DIRNAME)/input_dirname.c',
+ 'input_path/<(RULE_INPUT_PATH)',
+ 'input_ext<(RULE_INPUT_EXT)',
+ 'input_name/<(RULE_INPUT_NAME)',
+ ],
+ 'action': [],
+ 'process_outputs_as_sources': 1,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/rules/gyptest-all.py b/third_party/python/gyp/test/rules/gyptest-all.py
new file mode 100755
index 0000000000..0520c2f6a0
--- /dev/null
+++ b/third_party/python/gyp/test/rules/gyptest-all.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies simple rules when using an explicit build target of 'all'.
+"""
+
+from __future__ import print_function
+
+import sys
+
+if sys.platform == 'win32':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('no_action_with_rules_fails.gyp', chdir='src/noaction', status=1,
+ stderr=None)
+
+test.run_gyp('actions.gyp',
+ '-G', 'xcode_ninja_target_pattern=^pull_in_all_actions$',
+ chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('actions.gyp', test.ALL, chdir='relocate/src')
+
+expect = """\
+Hello from program.c
+Hello from function1.in
+Hello from function2.in
+"""
+
+if test.format == 'xcode':
+ chdir = 'relocate/src/subdir1'
+else:
+ chdir = 'relocate/src'
+test.run_built_executable('program', chdir=chdir, stdout=expect)
+
+expect = """\
+Hello from program.c
+Hello from function3.in
+"""
+
+if test.format == 'xcode':
+ chdir = 'relocate/src/subdir3'
+else:
+ chdir = 'relocate/src'
+test.run_built_executable('program2', chdir=chdir, stdout=expect)
+
+test.must_match('relocate/src/subdir2/file1.out', 'Hello from file1.in\n')
+test.must_match('relocate/src/subdir2/file2.out', 'Hello from file2.in\n')
+
+test.must_match('relocate/src/subdir2/file1.out2', 'Hello from file1.in\n')
+test.must_match('relocate/src/subdir2/file2.out2', 'Hello from file2.in\n')
+
+test.must_match('relocate/src/subdir2/file1.out4', 'Hello from file1.in\n')
+test.must_match('relocate/src/subdir2/file2.out4', 'Hello from file2.in\n')
+test.must_match('relocate/src/subdir2/file1.copy', 'Hello from file1.in\n')
+
+test.must_match('relocate/src/external/file1.external_rules.out',
+ 'Hello from file1.in\n')
+test.must_match('relocate/src/external/file2.external_rules.out',
+ 'Hello from file2.in\n')
+
+expect = """\
+Hello from program.c
+Got 41.
+"""
+
+if test.format == 'xcode':
+ chdir = 'relocate/src/subdir4'
+else:
+ chdir = 'relocate/src'
+test.run_built_executable('program4', chdir=chdir, stdout=expect)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/rules/gyptest-default.py b/third_party/python/gyp/test/rules/gyptest-default.py
new file mode 100755
index 0000000000..5d01094197
--- /dev/null
+++ b/third_party/python/gyp/test/rules/gyptest-default.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies simple rules when using an explicit build target of 'all'.
+"""
+
+from __future__ import print_function
+
+import sys
+
+if sys.platform == 'win32':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('actions.gyp',
+ '-G', 'xcode_ninja_target_pattern=^pull_in_all_actions$',
+ chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('actions.gyp', chdir='relocate/src')
+
+expect = """\
+Hello from program.c
+Hello from function1.in
+Hello from function2.in
+"""
+
+if test.format == 'xcode':
+ chdir = 'relocate/src/subdir1'
+else:
+ chdir = 'relocate/src'
+test.run_built_executable('program', chdir=chdir, stdout=expect)
+
+expect = """\
+Hello from program.c
+Hello from function3.in
+"""
+
+if test.format == 'xcode':
+ chdir = 'relocate/src/subdir3'
+else:
+ chdir = 'relocate/src'
+test.run_built_executable('program2', chdir=chdir, stdout=expect)
+
+test.must_match('relocate/src/subdir2/file1.out', 'Hello from file1.in\n')
+test.must_match('relocate/src/subdir2/file2.out', 'Hello from file2.in\n')
+
+test.must_match('relocate/src/subdir2/file1.out2', 'Hello from file1.in\n')
+test.must_match('relocate/src/subdir2/file2.out2', 'Hello from file2.in\n')
+
+test.must_match('relocate/src/subdir2/file1.out4', 'Hello from file1.in\n')
+test.must_match('relocate/src/subdir2/file2.out4', 'Hello from file2.in\n')
+test.must_match('relocate/src/subdir2/file1.copy', 'Hello from file1.in\n')
+
+test.must_match('relocate/src/external/file1.external_rules.out',
+ 'Hello from file1.in\n')
+test.must_match('relocate/src/external/file2.external_rules.out',
+ 'Hello from file2.in\n')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/rules/gyptest-input-root.py b/third_party/python/gyp/test/rules/gyptest-input-root.py
new file mode 100755
index 0000000000..92bade6d48
--- /dev/null
+++ b/third_party/python/gyp/test/rules/gyptest-input-root.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that RULE_INPUT_ROOT isn't turned into a path in rule actions
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('input-root.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('input-root.gyp', target='test', chdir='relocate/src')
+
+expect = """\
+Hello somefile
+"""
+
+test.run_built_executable('test', chdir='relocate/src', stdout=expect)
+test.pass_test()
diff --git a/third_party/python/gyp/test/rules/gyptest-special-variables.py b/third_party/python/gyp/test/rules/gyptest-special-variables.py
new file mode 100644
index 0000000000..05ea7cee16
--- /dev/null
+++ b/third_party/python/gyp/test/rules/gyptest-special-variables.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+""" Verifies that VS variables that require special variables are expanded
+correctly. """
+
+import sys
+import TestGyp
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp()
+
+ test.run_gyp('special-variables.gyp', chdir='src')
+ test.build('special-variables.gyp', test.ALL, chdir='src')
+ test.pass_test()
diff --git a/third_party/python/gyp/test/rules/src/actions.gyp b/third_party/python/gyp/test/rules/src/actions.gyp
new file mode 100644
index 0000000000..84376a7193
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/actions.gyp
@@ -0,0 +1,23 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'pull_in_all_actions',
+ 'type': 'none',
+ 'dependencies': [
+ 'subdir1/executable.gyp:*',
+ 'subdir2/both_rule_and_action_input.gyp:*',
+ 'subdir2/never_used.gyp:*',
+ 'subdir2/no_inputs.gyp:*',
+ 'subdir2/no_action.gyp:*',
+ 'subdir2/none.gyp:*',
+ 'subdir3/executable2.gyp:*',
+ 'subdir4/build-asm.gyp:*',
+ 'external/external.gyp:*',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/rules/src/an_asm.S b/third_party/python/gyp/test/rules/src/an_asm.S
new file mode 100644
index 0000000000..eeb1345550
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/an_asm.S
@@ -0,0 +1,6 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Fake asm file.
+int main() {}
diff --git a/third_party/python/gyp/test/rules/src/as.bat b/third_party/python/gyp/test/rules/src/as.bat
new file mode 100644
index 0000000000..903c31a726
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/as.bat
@@ -0,0 +1,7 @@
+@echo off
+:: Copyright (c) 2011 Google Inc. All rights reserved.
+:: Use of this source code is governed by a BSD-style license that can be
+:: found in the LICENSE file.
+
+:: Fake assembler for Windows
+cl /TP /c %1 /Fo%2
diff --git a/third_party/python/gyp/test/rules/src/copy-file.py b/third_party/python/gyp/test/rules/src/copy-file.py
new file mode 100755
index 0000000000..7bdfbfd4bd
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/copy-file.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import sys
+
+contents = open(sys.argv[1], 'r').read()
+open(sys.argv[2], 'w').write(contents)
+
+sys.exit(0)
diff --git a/third_party/python/gyp/test/rules/src/external/external.gyp b/third_party/python/gyp/test/rules/src/external/external.gyp
new file mode 100644
index 0000000000..b28174f57c
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/external/external.gyp
@@ -0,0 +1,66 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Test that the case where there are no inputs (other than the
+# file the rule applies to).
+{
+ 'target_defaults': {
+ 'msvs_cygwin_dirs': ['../../../../../../<(DEPTH)/third_party/cygwin'],
+ },
+ 'targets': [
+ {
+ 'target_name': 'external_rules',
+ 'type': 'none',
+ 'sources': [
+ 'file1.in',
+ 'file2.in',
+ ],
+ 'conditions': [
+ ['OS=="win"', {
+ 'dependencies': [
+ 'cygwin',
+ ],
+ }],
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'copy_file',
+ 'extension': 'in',
+ 'msvs_external_rule': 1,
+ 'outputs': [
+ '<(RULE_INPUT_ROOT).external_rules.out',
+ ],
+ 'action': [
+ 'python', '../copy-file.py', '<(RULE_INPUT_PATH)', '<@(_outputs)',
+ ],
+ },
+ ],
+ },
+ ],
+ 'conditions': [
+ ['OS=="win"', {
+ 'targets': [
+ {
+ 'target_name': 'cygwin',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'setup_mount',
+ 'msvs_cygwin_shell': 0,
+ 'inputs': [
+ '../../../../../../<(DEPTH)/third_party/cygwin/setup_mount.bat',
+ ],
+ # Visual Studio requires an output file, or else the
+ # custom build step won't run.
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/_always_run_setup_mount.marker',
+ ],
+ 'action': ['<@(_inputs)'],
+ },
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/third_party/python/gyp/test/rules/src/external/file1.in b/third_party/python/gyp/test/rules/src/external/file1.in
new file mode 100644
index 0000000000..86ac3ad389
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/external/file1.in
@@ -0,0 +1 @@
+Hello from file1.in
diff --git a/third_party/python/gyp/test/rules/src/external/file2.in b/third_party/python/gyp/test/rules/src/external/file2.in
new file mode 100644
index 0000000000..bf83d8ecec
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/external/file2.in
@@ -0,0 +1 @@
+Hello from file2.in
diff --git a/third_party/python/gyp/test/rules/src/input-root.gyp b/third_party/python/gyp/test/rules/src/input-root.gyp
new file mode 100644
index 0000000000..b6600e767c
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/input-root.gyp
@@ -0,0 +1,24 @@
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test',
+ 'type': 'executable',
+ 'sources': [ 'somefile.ext', ],
+ 'rules': [{
+ 'rule_name': 'rule',
+ 'extension': 'ext',
+ 'inputs': [ 'rule.py', ],
+ 'outputs': [ '<(RULE_INPUT_ROOT).cc', ],
+ 'action': [ 'python', 'rule.py', '<(RULE_INPUT_ROOT)', ],
+ 'message': 'Processing <(RULE_INPUT_PATH)',
+ 'process_outputs_as_sources': 1,
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ }],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/rules/src/noaction/file1.in b/third_party/python/gyp/test/rules/src/noaction/file1.in
new file mode 100644
index 0000000000..86ac3ad389
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/noaction/file1.in
@@ -0,0 +1 @@
+Hello from file1.in
diff --git a/third_party/python/gyp/test/rules/src/noaction/no_action_with_rules_fails.gyp b/third_party/python/gyp/test/rules/src/noaction/no_action_with_rules_fails.gyp
new file mode 100644
index 0000000000..9b6a65629f
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/noaction/no_action_with_rules_fails.gyp
@@ -0,0 +1,37 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Test the case where there's no action but there are input rules that should
+# be processed results in a gyp failure.
+{
+ 'targets': [
+ {
+ 'target_name': 'extension_does_match_sources_but_no_action',
+ 'type': 'none',
+ 'msvs_cygwin_shell': 0,
+ 'sources': [
+ 'file1.in',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'assembled',
+ 'extension': 'in',
+ 'outputs': [
+ '<(RULE_INPUT_ROOT).in',
+ ],
+ 'conditions': [
+ # Always fails.
+ [ '"true"=="false"', {
+ 'action': [
+ 'python', '../copy-file.py', '<(RULE_INPUT_PATH)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ 'message': 'test_rule',
+ }],
+ ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/rules/src/rule.py b/third_party/python/gyp/test/rules/src/rule.py
new file mode 100755
index 0000000000..8a1f36dedb
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/rule.py
@@ -0,0 +1,17 @@
+#!/usr/bin/env python
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+f = open(sys.argv[1] + ".cc", "w")
+f.write("""\
+#include <stdio.h>
+
+int main() {
+ puts("Hello %s");
+ return 0;
+}
+""" % sys.argv[1])
+f.close()
diff --git a/third_party/python/gyp/test/rules/src/somefile.ext b/third_party/python/gyp/test/rules/src/somefile.ext
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/somefile.ext
diff --git a/third_party/python/gyp/test/rules/src/special-variables.gyp b/third_party/python/gyp/test/rules/src/special-variables.gyp
new file mode 100644
index 0000000000..d1443af5ba
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/special-variables.gyp
@@ -0,0 +1,34 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'rules': [
+ {
+ 'rule_name': 'assembler (gnu-compatible)',
+ 'msvs_cygwin_shell': 0,
+ 'msvs_quote_cmd': 0,
+ 'extension': 'S',
+ 'inputs': [
+ 'as.bat',
+ ],
+ 'outputs': [
+ '$(IntDir)/$(InputName).obj',
+ ],
+ 'action': [
+ 'as.bat',
+ '$(InputPath)',
+ '$(IntDir)/$(InputName).obj',
+ ],
+ 'message': 'Building assembly language file $(InputPath)',
+ 'process_outputs_as_sources': 1,
+ },
+ ],
+ 'target_name': 'test',
+ 'type': 'static_library',
+ 'sources': [ 'an_asm.S' ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/rules/src/subdir1/executable.gyp b/third_party/python/gyp/test/rules/src/subdir1/executable.gyp
new file mode 100644
index 0000000000..c34cce5a92
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/subdir1/executable.gyp
@@ -0,0 +1,37 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'msvs_cygwin_shell': 0,
+ 'sources': [
+ 'program.c',
+ 'function1.in',
+ 'function2.in',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'copy_file',
+ 'extension': 'in',
+ 'inputs': [
+ '../copy-file.py',
+ ],
+ 'outputs': [
+ # TODO: fix Make to support generated files not
+ # in a variable-named path like <(INTERMEDIATE_DIR)
+ #'<(RULE_INPUT_ROOT).c',
+ '<(INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).c',
+ ],
+ 'action': [
+ 'python', '<(_inputs)', '<(RULE_INPUT_PATH)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/rules/src/subdir1/function1.in b/third_party/python/gyp/test/rules/src/subdir1/function1.in
new file mode 100644
index 0000000000..60ff28949b
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/subdir1/function1.in
@@ -0,0 +1,6 @@
+#include <stdio.h>
+
+void function1(void)
+{
+ printf("Hello from function1.in\n");
+}
diff --git a/third_party/python/gyp/test/rules/src/subdir1/function2.in b/third_party/python/gyp/test/rules/src/subdir1/function2.in
new file mode 100644
index 0000000000..0fcfc03fdb
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/subdir1/function2.in
@@ -0,0 +1,6 @@
+#include <stdio.h>
+
+void function2(void)
+{
+ printf("Hello from function2.in\n");
+}
diff --git a/third_party/python/gyp/test/rules/src/subdir1/program.c b/third_party/python/gyp/test/rules/src/subdir1/program.c
new file mode 100644
index 0000000000..6b11ff9f67
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/subdir1/program.c
@@ -0,0 +1,12 @@
+#include <stdio.h>
+
+extern void function1(void);
+extern void function2(void);
+
+int main(void)
+{
+ printf("Hello from program.c\n");
+ function1();
+ function2();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/rules/src/subdir2/both_rule_and_action_input.gyp b/third_party/python/gyp/test/rules/src/subdir2/both_rule_and_action_input.gyp
new file mode 100644
index 0000000000..e5e6f3ec2b
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/subdir2/both_rule_and_action_input.gyp
@@ -0,0 +1,50 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Tests that if a rule input is also an action input, both the rule and action
+# are executed
+{
+ 'targets': [
+ {
+ 'target_name': 'files_both_rule_and_action_input',
+ 'type': 'executable',
+ 'msvs_cygwin_shell': 0,
+ 'sources': [
+ 'program.c',
+ 'file1.in',
+ 'file2.in',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'copy_file',
+ 'extension': 'in',
+ 'inputs': [
+ '../copy-file.py',
+ ],
+ 'outputs': [
+ '<(RULE_INPUT_ROOT).out4',
+ ],
+ 'action': [
+ 'python', '<(_inputs)', '<(RULE_INPUT_PATH)', '<@(_outputs)',
+ ],
+ },
+ ],
+ 'actions': [
+ {
+ 'action_name': 'copy_file1_in',
+ 'inputs': [
+ '../copy-file.py',
+ 'file1.in',
+ ],
+ 'outputs': [
+ 'file1.copy',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(_outputs)'
+ ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/rules/src/subdir2/file1.in b/third_party/python/gyp/test/rules/src/subdir2/file1.in
new file mode 100644
index 0000000000..86ac3ad389
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/subdir2/file1.in
@@ -0,0 +1 @@
+Hello from file1.in
diff --git a/third_party/python/gyp/test/rules/src/subdir2/file2.in b/third_party/python/gyp/test/rules/src/subdir2/file2.in
new file mode 100644
index 0000000000..bf83d8ecec
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/subdir2/file2.in
@@ -0,0 +1 @@
+Hello from file2.in
diff --git a/third_party/python/gyp/test/rules/src/subdir2/never_used.gyp b/third_party/python/gyp/test/rules/src/subdir2/never_used.gyp
new file mode 100644
index 0000000000..17f6f55371
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/subdir2/never_used.gyp
@@ -0,0 +1,31 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Test that the case where there is a rule that doesn't apply to anything.
+{
+ 'targets': [
+ {
+ 'target_name': 'files_no_input2',
+ 'type': 'none',
+ 'msvs_cygwin_shell': 0,
+ 'sources': [
+ 'file1.in',
+ 'file2.in',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'copy_file3',
+ 'extension': 'in2',
+ 'outputs': [
+ '<(RULE_INPUT_ROOT).out3',
+ ],
+ 'action': [
+ 'python', '../copy-file.py', '<(RULE_INPUT_PATH)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/rules/src/subdir2/no_action.gyp b/third_party/python/gyp/test/rules/src/subdir2/no_action.gyp
new file mode 100644
index 0000000000..ffa1cefe18
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/subdir2/no_action.gyp
@@ -0,0 +1,38 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Test that the case where an action is only specified under a conditional is
+# evaluated appropriately.
+{
+ 'targets': [
+ {
+ 'target_name': 'extension_does_not_match_sources_and_no_action',
+ 'type': 'none',
+ 'msvs_cygwin_shell': 0,
+ 'sources': [
+ 'file1.in',
+ 'file2.in',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'assemble',
+ 'extension': 'asm',
+ 'outputs': [
+ '<(RULE_INPUT_ROOT).fail',
+ ],
+ 'conditions': [
+ # Always fails.
+ [ '"true"=="false"', {
+ 'action': [
+ 'python', '../copy-file.py', '<(RULE_INPUT_PATH)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ 'message': 'test_rule',
+ }],
+ ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/rules/src/subdir2/no_inputs.gyp b/third_party/python/gyp/test/rules/src/subdir2/no_inputs.gyp
new file mode 100644
index 0000000000..e61a1a3ff6
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/subdir2/no_inputs.gyp
@@ -0,0 +1,32 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Test that the case where there are no inputs (other than the
+# file the rule applies to).
+{
+ 'targets': [
+ {
+ 'target_name': 'files_no_input',
+ 'type': 'none',
+ 'msvs_cygwin_shell': 0,
+ 'sources': [
+ 'file1.in',
+ 'file2.in',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'copy_file2',
+ 'extension': 'in',
+ 'outputs': [
+ '<(RULE_INPUT_ROOT).out2',
+ ],
+ 'action': [
+ 'python', '../copy-file.py', '<(RULE_INPUT_PATH)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/rules/src/subdir2/none.gyp b/third_party/python/gyp/test/rules/src/subdir2/none.gyp
new file mode 100644
index 0000000000..38bcdabdf6
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/subdir2/none.gyp
@@ -0,0 +1,33 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'files',
+ 'type': 'none',
+ 'msvs_cygwin_shell': 0,
+ 'sources': [
+ 'file1.in',
+ 'file2.in',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'copy_file',
+ 'extension': 'in',
+ 'inputs': [
+ '../copy-file.py',
+ ],
+ 'outputs': [
+ '<(RULE_INPUT_ROOT).out',
+ ],
+ 'action': [
+ 'python', '<(_inputs)', '<(RULE_INPUT_PATH)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/rules/src/subdir2/program.c b/third_party/python/gyp/test/rules/src/subdir2/program.c
new file mode 100644
index 0000000000..e5db175148
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/subdir2/program.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2014 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <stdio.h>
+
+int main(void)
+{
+ printf("Hello from program.c\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/rules/src/subdir3/executable2.gyp b/third_party/python/gyp/test/rules/src/subdir3/executable2.gyp
new file mode 100644
index 0000000000..a2a528fc7b
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/subdir3/executable2.gyp
@@ -0,0 +1,37 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This one tests that rules are properly written if extensions are different
+# between the target's sources (program.c) and the generated files
+# (function3.cc)
+
+{
+ 'targets': [
+ {
+ 'target_name': 'program2',
+ 'type': 'executable',
+ 'msvs_cygwin_shell': 0,
+ 'sources': [
+ 'program.c',
+ 'function3.in',
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'copy_file',
+ 'extension': 'in',
+ 'inputs': [
+ '../copy-file.py',
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).cc',
+ ],
+ 'action': [
+ 'python', '<(_inputs)', '<(RULE_INPUT_PATH)', '<@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/rules/src/subdir3/function3.in b/third_party/python/gyp/test/rules/src/subdir3/function3.in
new file mode 100644
index 0000000000..99f46ab05e
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/subdir3/function3.in
@@ -0,0 +1,6 @@
+#include <stdio.h>
+
+extern "C" void function3(void)
+{
+ printf("Hello from function3.in\n");
+}
diff --git a/third_party/python/gyp/test/rules/src/subdir3/program.c b/third_party/python/gyp/test/rules/src/subdir3/program.c
new file mode 100644
index 0000000000..c38eead50e
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/subdir3/program.c
@@ -0,0 +1,10 @@
+#include <stdio.h>
+
+extern void function3(void);
+
+int main(void)
+{
+ printf("Hello from program.c\n");
+ function3();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/rules/src/subdir4/asm-function.assem b/third_party/python/gyp/test/rules/src/subdir4/asm-function.assem
new file mode 100644
index 0000000000..ed47cade95
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/subdir4/asm-function.assem
@@ -0,0 +1,10 @@
+#if PLATFORM_WINDOWS || PLATFORM_MAC
+# define IDENTIFIER(n) _##n
+#else /* Linux */
+# define IDENTIFIER(n) n
+#endif
+
+.globl IDENTIFIER(asm_function)
+IDENTIFIER(asm_function):
+ movl $41, %eax
+ ret
diff --git a/third_party/python/gyp/test/rules/src/subdir4/build-asm.gyp b/third_party/python/gyp/test/rules/src/subdir4/build-asm.gyp
new file mode 100644
index 0000000000..fe0fe93787
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/subdir4/build-asm.gyp
@@ -0,0 +1,49 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This one tests that assembly files ended as .s and .S are compiled.
+
+{
+ 'target_defaults': {
+ 'conditions': [
+ ['OS=="win"', {
+ 'defines': ['PLATFORM_WIN'],
+ }],
+ ['OS=="mac"', {
+ 'defines': ['PLATFORM_MAC'],
+ }],
+ ['OS=="linux"', {
+ 'defines': ['PLATFORM_LINUX'],
+ }],
+ ],
+ },
+ 'targets': [
+ {
+ 'target_name': 'program4',
+ 'type': 'executable',
+ 'sources': [
+ 'asm-function.assem',
+ 'program.c',
+ ],
+ 'conditions': [
+ ['OS=="linux" or OS=="mac"', {
+ 'rules': [
+ {
+ 'rule_name': 'convert_assem',
+ 'extension': 'assem',
+ 'inputs': [],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).S',
+ ],
+ 'action': [
+ 'bash', '-c', 'cp <(RULE_INPUT_PATH) <@(_outputs)',
+ ],
+ 'process_outputs_as_sources': 1,
+ },
+ ],
+ }],
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/rules/src/subdir4/program.c b/third_party/python/gyp/test/rules/src/subdir4/program.c
new file mode 100644
index 0000000000..ad647f4eb9
--- /dev/null
+++ b/third_party/python/gyp/test/rules/src/subdir4/program.c
@@ -0,0 +1,19 @@
+#include <stdio.h>
+
+// Use the assembly function in linux and mac where it is built.
+#if PLATFORM_LINUX || PLATFORM_MAC
+extern int asm_function(void);
+#else
+int asm_function() {
+ return 41;
+}
+#endif
+
+int main(void)
+{
+ fprintf(stdout, "Hello from program.c\n");
+ fflush(stdout);
+ fprintf(stdout, "Got %d.\n", asm_function());
+ fflush(stdout);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/same-gyp-name/gyptest-all.py b/third_party/python/gyp/test/same-gyp-name/gyptest-all.py
new file mode 100755
index 0000000000..cda1a72d4d
--- /dev/null
+++ b/third_party/python/gyp/test/same-gyp-name/gyptest-all.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Build a .gyp that depends on 2 gyp files with the same name.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('all.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('all.gyp', test.ALL, chdir='relocate/src')
+
+expect1 = """\
+Hello from main1.cc
+"""
+
+expect2 = """\
+Hello from main2.cc
+"""
+
+if test.format == 'xcode':
+ chdir1 = 'relocate/src/subdir1'
+ chdir2 = 'relocate/src/subdir2'
+else:
+ chdir1 = chdir2 = 'relocate/src'
+
+test.run_built_executable('program1', chdir=chdir1, stdout=expect1)
+test.run_built_executable('program2', chdir=chdir2, stdout=expect2)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/same-gyp-name/gyptest-default.py b/third_party/python/gyp/test/same-gyp-name/gyptest-default.py
new file mode 100755
index 0000000000..5e4bba0012
--- /dev/null
+++ b/third_party/python/gyp/test/same-gyp-name/gyptest-default.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Build a .gyp that depends on 2 gyp files with the same name.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('all.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('all.gyp', chdir='relocate/src')
+
+expect1 = """\
+Hello from main1.cc
+"""
+
+expect2 = """\
+Hello from main2.cc
+"""
+
+if test.format == 'xcode':
+ chdir1 = 'relocate/src/subdir1'
+ chdir2 = 'relocate/src/subdir2'
+else:
+ chdir1 = chdir2 = 'relocate/src'
+
+test.run_built_executable('program1', chdir=chdir1, stdout=expect1)
+test.run_built_executable('program2', chdir=chdir2, stdout=expect2)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/same-gyp-name/gyptest-library.py b/third_party/python/gyp/test/same-gyp-name/gyptest-library.py
new file mode 100644
index 0000000000..957a4a52d6
--- /dev/null
+++ b/third_party/python/gyp/test/same-gyp-name/gyptest-library.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that a dependency on two gyp files with the same name do not create a
+uid collision in the resulting generated xcode file.
+"""
+
+import TestGyp
+
+import sys
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('test.gyp', chdir='library')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/same-gyp-name/library/one/sub.gyp b/third_party/python/gyp/test/same-gyp-name/library/one/sub.gyp
new file mode 100644
index 0000000000..1bed941e54
--- /dev/null
+++ b/third_party/python/gyp/test/same-gyp-name/library/one/sub.gyp
@@ -0,0 +1,11 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'one',
+ 'type': 'static_library',
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/same-gyp-name/library/test.gyp b/third_party/python/gyp/test/same-gyp-name/library/test.gyp
new file mode 100644
index 0000000000..552a77ed7e
--- /dev/null
+++ b/third_party/python/gyp/test/same-gyp-name/library/test.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'duplicate_names',
+ 'type': 'shared_library',
+ 'dependencies': [
+ 'one/sub.gyp:one',
+ 'two/sub.gyp:two',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/same-gyp-name/library/two/sub.gyp b/third_party/python/gyp/test/same-gyp-name/library/two/sub.gyp
new file mode 100644
index 0000000000..934c98a496
--- /dev/null
+++ b/third_party/python/gyp/test/same-gyp-name/library/two/sub.gyp
@@ -0,0 +1,11 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+{
+ 'targets': [
+ {
+ 'target_name': 'two',
+ 'type': 'static_library',
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/same-gyp-name/src/all.gyp b/third_party/python/gyp/test/same-gyp-name/src/all.gyp
new file mode 100644
index 0000000000..229f02ea84
--- /dev/null
+++ b/third_party/python/gyp/test/same-gyp-name/src/all.gyp
@@ -0,0 +1,16 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'all_exes',
+ 'type': 'none',
+ 'dependencies': [
+ 'subdir1/executable.gyp:*',
+ 'subdir2/executable.gyp:*',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/same-gyp-name/src/subdir1/executable.gyp b/third_party/python/gyp/test/same-gyp-name/src/subdir1/executable.gyp
new file mode 100644
index 0000000000..82483b4c69
--- /dev/null
+++ b/third_party/python/gyp/test/same-gyp-name/src/subdir1/executable.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'program1',
+ 'type': 'executable',
+ 'sources': [
+ 'main1.cc',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/same-gyp-name/src/subdir1/main1.cc b/third_party/python/gyp/test/same-gyp-name/src/subdir1/main1.cc
new file mode 100644
index 0000000000..3645558324
--- /dev/null
+++ b/third_party/python/gyp/test/same-gyp-name/src/subdir1/main1.cc
@@ -0,0 +1,6 @@
+#include <stdio.h>
+
+int main() {
+ printf("Hello from main1.cc\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/same-gyp-name/src/subdir2/executable.gyp b/third_party/python/gyp/test/same-gyp-name/src/subdir2/executable.gyp
new file mode 100644
index 0000000000..e3537013eb
--- /dev/null
+++ b/third_party/python/gyp/test/same-gyp-name/src/subdir2/executable.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'program2',
+ 'type': 'executable',
+ 'sources': [
+ 'main2.cc',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/same-gyp-name/src/subdir2/main2.cc b/third_party/python/gyp/test/same-gyp-name/src/subdir2/main2.cc
new file mode 100644
index 0000000000..0c724dee35
--- /dev/null
+++ b/third_party/python/gyp/test/same-gyp-name/src/subdir2/main2.cc
@@ -0,0 +1,6 @@
+#include <stdio.h>
+
+int main() {
+ printf("Hello from main2.cc\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/same-rule-output-file-name/gyptest-all.py b/third_party/python/gyp/test/same-rule-output-file-name/gyptest-all.py
new file mode 100644
index 0000000000..964e6b7721
--- /dev/null
+++ b/third_party/python/gyp/test/same-rule-output-file-name/gyptest-all.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Tests the use of rules with the same output file name.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('subdirs.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('subdirs.gyp', test.ALL, chdir='relocate/src')
+test.must_exist('relocate/src/subdir1/rule.txt')
+test.must_exist('relocate/src/subdir2/rule.txt')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/same-rule-output-file-name/src/subdir1/subdir1.gyp b/third_party/python/gyp/test/same-rule-output-file-name/src/subdir1/subdir1.gyp
new file mode 100644
index 0000000000..bff381a5a5
--- /dev/null
+++ b/third_party/python/gyp/test/same-rule-output-file-name/src/subdir1/subdir1.gyp
@@ -0,0 +1,30 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'target1',
+ 'type': 'none',
+ 'sources': [
+ '../touch.py'
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'rule1',
+ 'extension': 'py',
+ 'inputs': [],
+ 'outputs': [
+ 'rule.txt',
+ ],
+ 'action': [
+ 'python', '../touch.py', '<(_outputs)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/same-rule-output-file-name/src/subdir2/subdir2.gyp b/third_party/python/gyp/test/same-rule-output-file-name/src/subdir2/subdir2.gyp
new file mode 100644
index 0000000000..12a35600a3
--- /dev/null
+++ b/third_party/python/gyp/test/same-rule-output-file-name/src/subdir2/subdir2.gyp
@@ -0,0 +1,30 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'target2',
+ 'type': 'none',
+ 'sources': [
+ '../touch.py'
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'rule2',
+ 'extension': 'py',
+ 'inputs': [],
+ 'outputs': [
+ 'rule.txt',
+ ],
+ 'action': [
+ 'python', '../touch.py', '<(_outputs)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/same-rule-output-file-name/src/subdirs.gyp b/third_party/python/gyp/test/same-rule-output-file-name/src/subdirs.gyp
new file mode 100644
index 0000000000..25259a38f4
--- /dev/null
+++ b/third_party/python/gyp/test/same-rule-output-file-name/src/subdirs.gyp
@@ -0,0 +1,16 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'subdirs',
+ 'type': 'none',
+ 'dependencies': [
+ 'subdir1/subdir1.gyp:*',
+ 'subdir2/subdir2.gyp:*',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/same-rule-output-file-name/src/touch.py b/third_party/python/gyp/test/same-rule-output-file-name/src/touch.py
new file mode 100644
index 0000000000..2291e9cc56
--- /dev/null
+++ b/third_party/python/gyp/test/same-rule-output-file-name/src/touch.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+f = open(sys.argv[1], 'w+')
+f.write('Hello from touch.py\n')
+f.close()
diff --git a/third_party/python/gyp/test/same-source-file-name/gyptest-all.py b/third_party/python/gyp/test/same-source-file-name/gyptest-all.py
new file mode 100755
index 0000000000..4c215027c2
--- /dev/null
+++ b/third_party/python/gyp/test/same-source-file-name/gyptest-all.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Build a .gyp with two targets that share a common .c source file.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('all.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('all.gyp', test.ALL, chdir='relocate/src')
+
+expect1 = """\
+Hello from prog1.c
+Hello prog1 from func.c
+"""
+
+expect2 = """\
+Hello from prog2.c
+Hello prog2 from func.c
+"""
+
+test.run_built_executable('prog1', chdir='relocate/src', stdout=expect1)
+test.run_built_executable('prog2', chdir='relocate/src', stdout=expect2)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/same-source-file-name/gyptest-default.py b/third_party/python/gyp/test/same-source-file-name/gyptest-default.py
new file mode 100755
index 0000000000..98757c2697
--- /dev/null
+++ b/third_party/python/gyp/test/same-source-file-name/gyptest-default.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Build a .gyp with two targets that share a common .c source file.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('all.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('all.gyp', chdir='relocate/src')
+
+expect1 = """\
+Hello from prog1.c
+Hello prog1 from func.c
+"""
+
+expect2 = """\
+Hello from prog2.c
+Hello prog2 from func.c
+"""
+
+test.run_built_executable('prog1', chdir='relocate/src', stdout=expect1)
+test.run_built_executable('prog2', chdir='relocate/src', stdout=expect2)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/same-source-file-name/gyptest-pass-executable.py b/third_party/python/gyp/test/same-source-file-name/gyptest-pass-executable.py
new file mode 100755
index 0000000000..1a3dcda23d
--- /dev/null
+++ b/third_party/python/gyp/test/same-source-file-name/gyptest-pass-executable.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Checks that gyp does not fail on executable targets which have several files
+with the same basename.
+"""
+
+import TestGyp
+
+# While MSVS supports building executables that contain several files with the
+# same name, the msvs gyp generator does not.
+test = TestGyp.TestGyp(formats=['!msvs'])
+
+test.run_gyp('double-executable.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('double-executable.gyp', test.ALL, chdir='relocate/src')
+
+expect = """\
+Hello from prog3.c
+Hello prog3 from func.c
+Hello prog3 from subdir1/func.c
+Hello prog3 from subdir2/func.c
+"""
+
+test.run_built_executable('prog3', chdir='relocate/src', stdout=expect)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/same-source-file-name/gyptest-pass-shared.py b/third_party/python/gyp/test/same-source-file-name/gyptest-pass-shared.py
new file mode 100755
index 0000000000..a498f1a846
--- /dev/null
+++ b/third_party/python/gyp/test/same-source-file-name/gyptest-pass-shared.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Checks that gyp does not fail on shared_library targets which have several files
+with the same basename.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('double-shared.gyp', chdir='src')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/same-source-file-name/gyptest-static.py b/third_party/python/gyp/test/same-source-file-name/gyptest-static.py
new file mode 100755
index 0000000000..7fa2772040
--- /dev/null
+++ b/third_party/python/gyp/test/same-source-file-name/gyptest-static.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Checks that gyp fails on static_library targets which have several files with
+the same basename.
+"""
+
+import os
+import sys
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+# Fails by default for the compatibility with legacy generators such as
+# VCProj generator for Visual C++ 2008 and Makefile generator on Mac.
+# TODO: Update expected behavior when these legacy generators are deprecated.
+test.run_gyp('double-static.gyp', chdir='src', status=1, stderr=None)
+
+if ((test.format == 'make' and sys.platform == 'darwin') or
+ (test.format == 'msvs' and
+ int(os.environ.get('GYP_MSVS_VERSION', 2010)) < 2010)):
+ test.run_gyp('double-static.gyp', '--no-duplicate-basename-check',
+ chdir='src', status=1, stderr=None)
+else:
+ test.run_gyp('double-static.gyp', '--no-duplicate-basename-check',
+ chdir='src')
+ test.build('double-static.gyp', test.ALL, chdir='src')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/same-source-file-name/src/all.gyp b/third_party/python/gyp/test/same-source-file-name/src/all.gyp
new file mode 100644
index 0000000000..4fe052c668
--- /dev/null
+++ b/third_party/python/gyp/test/same-source-file-name/src/all.gyp
@@ -0,0 +1,30 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'prog1',
+ 'type': 'executable',
+ 'defines': [
+ 'PROG="prog1"',
+ ],
+ 'sources': [
+ 'prog1.c',
+ 'func.c',
+ ],
+ },
+ {
+ 'target_name': 'prog2',
+ 'type': 'executable',
+ 'defines': [
+ 'PROG="prog2"',
+ ],
+ 'sources': [
+ 'prog2.c',
+ 'func.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/same-source-file-name/src/double-executable.gyp b/third_party/python/gyp/test/same-source-file-name/src/double-executable.gyp
new file mode 100644
index 0000000000..477bd87e0d
--- /dev/null
+++ b/third_party/python/gyp/test/same-source-file-name/src/double-executable.gyp
@@ -0,0 +1,21 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'prog3',
+ 'type': 'executable',
+ 'sources': [
+ 'prog3.c',
+ 'func.c',
+ 'subdir1/func.c',
+ 'subdir2/func.c',
+ ],
+ 'defines': [
+ 'PROG="prog3"',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/same-source-file-name/src/double-shared.gyp b/third_party/python/gyp/test/same-source-file-name/src/double-shared.gyp
new file mode 100644
index 0000000000..438b50f3f1
--- /dev/null
+++ b/third_party/python/gyp/test/same-source-file-name/src/double-shared.gyp
@@ -0,0 +1,27 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'lib',
+ 'product_name': 'test_shared_lib',
+ 'type': 'shared_library',
+ 'sources': [
+ 'prog2.c',
+ 'func.c',
+ 'subdir1/func.c',
+ 'subdir2/func.c',
+ ],
+ 'defines': [
+ 'PROG="prog2"',
+ ],
+ 'conditions': [
+ ['OS=="linux"', {
+ 'cflags': ['-fPIC'],
+ }],
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/same-source-file-name/src/double-static.gyp b/third_party/python/gyp/test/same-source-file-name/src/double-static.gyp
new file mode 100644
index 0000000000..e49c0e1251
--- /dev/null
+++ b/third_party/python/gyp/test/same-source-file-name/src/double-static.gyp
@@ -0,0 +1,22 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'lib',
+ 'product_name': 'test_static_lib',
+ 'type': 'static_library',
+ 'sources': [
+ 'prog1.c',
+ 'func.c',
+ 'subdir1/func.c',
+ 'subdir2/func.c',
+ ],
+ 'defines': [
+ 'PROG="prog1"',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/same-source-file-name/src/func.c b/third_party/python/gyp/test/same-source-file-name/src/func.c
new file mode 100644
index 0000000000..e069c692a6
--- /dev/null
+++ b/third_party/python/gyp/test/same-source-file-name/src/func.c
@@ -0,0 +1,6 @@
+#include <stdio.h>
+
+void func(void)
+{
+ printf("Hello %s from func.c\n", PROG);
+}
diff --git a/third_party/python/gyp/test/same-source-file-name/src/prog1.c b/third_party/python/gyp/test/same-source-file-name/src/prog1.c
new file mode 100644
index 0000000000..604e2b9c98
--- /dev/null
+++ b/third_party/python/gyp/test/same-source-file-name/src/prog1.c
@@ -0,0 +1,16 @@
+#include <stdio.h>
+
+extern void func(void);
+
+int main(void)
+{
+ printf("Hello from prog1.c\n");
+ func();
+ /*
+ * Uncomment to test same-named files in different directories,
+ * which Visual Studio doesn't support.
+ subdir1_func();
+ subdir2_func();
+ */
+ return 0;
+}
diff --git a/third_party/python/gyp/test/same-source-file-name/src/prog2.c b/third_party/python/gyp/test/same-source-file-name/src/prog2.c
new file mode 100644
index 0000000000..466ee35003
--- /dev/null
+++ b/third_party/python/gyp/test/same-source-file-name/src/prog2.c
@@ -0,0 +1,16 @@
+#include <stdio.h>
+
+extern void func(void);
+
+int main(void)
+{
+ printf("Hello from prog2.c\n");
+ func();
+ /*
+ * Uncomment to test same-named files in different directories,
+ * which Visual Studio doesn't support.
+ subdir1_func();
+ subdir2_func();
+ */
+ return 0;
+}
diff --git a/third_party/python/gyp/test/same-source-file-name/src/prog3.c b/third_party/python/gyp/test/same-source-file-name/src/prog3.c
new file mode 100644
index 0000000000..34d495ce08
--- /dev/null
+++ b/third_party/python/gyp/test/same-source-file-name/src/prog3.c
@@ -0,0 +1,18 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdio.h>
+
+extern void func(void);
+extern void subdir1_func(void);
+extern void subdir2_func(void);
+
+int main(void)
+{
+ printf("Hello from prog3.c\n");
+ func();
+ subdir1_func();
+ subdir2_func();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/same-source-file-name/src/subdir1/func.c b/third_party/python/gyp/test/same-source-file-name/src/subdir1/func.c
new file mode 100644
index 0000000000..b73450d105
--- /dev/null
+++ b/third_party/python/gyp/test/same-source-file-name/src/subdir1/func.c
@@ -0,0 +1,6 @@
+#include <stdio.h>
+
+void subdir1_func(void)
+{
+ printf("Hello %s from subdir1/func.c\n", PROG);
+}
diff --git a/third_party/python/gyp/test/same-source-file-name/src/subdir2/func.c b/third_party/python/gyp/test/same-source-file-name/src/subdir2/func.c
new file mode 100644
index 0000000000..0248b5720e
--- /dev/null
+++ b/third_party/python/gyp/test/same-source-file-name/src/subdir2/func.c
@@ -0,0 +1,6 @@
+#include <stdio.h>
+
+void subdir2_func(void)
+{
+ printf("Hello %s from subdir2/func.c\n", PROG);
+}
diff --git a/third_party/python/gyp/test/same-target-name-different-directory/gyptest-all.py b/third_party/python/gyp/test/same-target-name-different-directory/gyptest-all.py
new file mode 100644
index 0000000000..755691b576
--- /dev/null
+++ b/third_party/python/gyp/test/same-target-name-different-directory/gyptest-all.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Test cases when multiple targets in different directories have the same name.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp(formats=['ninja', 'make'])
+
+# xcode-ninja fails to generate a project due to id collisions
+# cf. https://code.google.com/p/gyp/issues/detail?id=461
+if test.format == 'xcode-ninja':
+ test.skip_test()
+
+test.run_gyp('subdirs.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+# Test that we build all targets.
+test.build('subdirs.gyp', 'target', chdir='relocate/src')
+test.must_exist('relocate/src/subdir1/action1.txt')
+test.must_exist('relocate/src/subdir2/action2.txt')
+
+# Test that we build all targets using the correct actions, even if they have
+# the same names.
+test.build('subdirs.gyp', 'target_same_action_name', chdir='relocate/src')
+test.must_exist('relocate/src/subdir1/action.txt')
+test.must_exist('relocate/src/subdir2/action.txt')
+
+# Test that we build all targets using the correct rules, even if they have
+# the same names.
+test.build('subdirs.gyp', 'target_same_rule_name', chdir='relocate/src')
+test.must_exist('relocate/src/subdir1/rule.txt')
+test.must_exist('relocate/src/subdir2/rule.txt')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/same-target-name-different-directory/src/subdir1/subdir1.gyp b/third_party/python/gyp/test/same-target-name-different-directory/src/subdir1/subdir1.gyp
new file mode 100644
index 0000000000..d4ec2e679a
--- /dev/null
+++ b/third_party/python/gyp/test/same-target-name-different-directory/src/subdir1/subdir1.gyp
@@ -0,0 +1,66 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'target',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'action1',
+ 'inputs': [],
+ 'outputs': [
+ 'action1.txt',
+ ],
+ 'action': [
+ 'python', '../touch.py', '<(_outputs)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ {
+ 'target_name': 'target_same_action_name',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'action',
+ 'inputs': [],
+ 'outputs': [
+ 'action.txt',
+ ],
+ 'action': [
+ 'python', '../touch.py', '<(_outputs)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ {
+ 'target_name': 'target_same_rule_name',
+ 'type': 'none',
+ 'sources': [
+ '../touch.py'
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'rule',
+ 'extension': 'py',
+ 'inputs': [],
+ 'outputs': [
+ 'rule.txt',
+ ],
+ 'action': [
+ 'python', '../touch.py', '<(_outputs)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/same-target-name-different-directory/src/subdir2/subdir2.gyp b/third_party/python/gyp/test/same-target-name-different-directory/src/subdir2/subdir2.gyp
new file mode 100644
index 0000000000..9006d450b2
--- /dev/null
+++ b/third_party/python/gyp/test/same-target-name-different-directory/src/subdir2/subdir2.gyp
@@ -0,0 +1,66 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'target',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'action2',
+ 'inputs': [],
+ 'outputs': [
+ 'action2.txt',
+ ],
+ 'action': [
+ 'python', '../touch.py', '<(_outputs)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ {
+ 'target_name': 'target_same_action_name',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'action',
+ 'inputs': [],
+ 'outputs': [
+ 'action.txt',
+ ],
+ 'action': [
+ 'python', '../touch.py', '<(_outputs)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ {
+ 'target_name': 'target_same_rule_name',
+ 'type': 'none',
+ 'sources': [
+ '../touch.py'
+ ],
+ 'rules': [
+ {
+ 'rule_name': 'rule',
+ 'extension': 'py',
+ 'inputs': [],
+ 'outputs': [
+ 'rule.txt',
+ ],
+ 'action': [
+ 'python', '../touch.py', '<(_outputs)',
+ ],
+ # Allows the test to run without hermetic cygwin on windows.
+ 'msvs_cygwin_shell': 0,
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/same-target-name-different-directory/src/subdirs.gyp b/third_party/python/gyp/test/same-target-name-different-directory/src/subdirs.gyp
new file mode 100644
index 0000000000..65413e73b2
--- /dev/null
+++ b/third_party/python/gyp/test/same-target-name-different-directory/src/subdirs.gyp
@@ -0,0 +1,16 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'subdirs',
+ 'type': 'none',
+ 'dependencies': [
+ 'subdir1/subdir1.gyp:*',
+ 'subdir2/subdir2.gyp:*',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/same-target-name-different-directory/src/touch.py b/third_party/python/gyp/test/same-target-name-different-directory/src/touch.py
new file mode 100644
index 0000000000..2291e9cc56
--- /dev/null
+++ b/third_party/python/gyp/test/same-target-name-different-directory/src/touch.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+f = open(sys.argv[1], 'w+')
+f.write('Hello from touch.py\n')
+f.close()
diff --git a/third_party/python/gyp/test/same-target-name/gyptest-same-target-name.py b/third_party/python/gyp/test/same-target-name/gyptest-same-target-name.py
new file mode 100755
index 0000000000..bfe5540f31
--- /dev/null
+++ b/third_party/python/gyp/test/same-target-name/gyptest-same-target-name.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Check that duplicate targets in a directory gives an error.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+# Require that gyp files with duplicate targets spit out an error.
+test.run_gyp('all.gyp', chdir='src', status=1, stderr=None)
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/same-target-name/src/all.gyp b/third_party/python/gyp/test/same-target-name/src/all.gyp
new file mode 100644
index 0000000000..ac16976da6
--- /dev/null
+++ b/third_party/python/gyp/test/same-target-name/src/all.gyp
@@ -0,0 +1,16 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'all_exes',
+ 'type': 'none',
+ 'dependencies': [
+ 'executable1.gyp:*',
+ 'executable2.gyp:*',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/same-target-name/src/executable1.gyp b/third_party/python/gyp/test/same-target-name/src/executable1.gyp
new file mode 100644
index 0000000000..3c492c1b37
--- /dev/null
+++ b/third_party/python/gyp/test/same-target-name/src/executable1.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'sources': [
+ 'main1.cc',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/same-target-name/src/executable2.gyp b/third_party/python/gyp/test/same-target-name/src/executable2.gyp
new file mode 100644
index 0000000000..41e84a61c6
--- /dev/null
+++ b/third_party/python/gyp/test/same-target-name/src/executable2.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'sources': [
+ 'main2.cc',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/sanitize-rule-names/blah.S b/third_party/python/gyp/test/sanitize-rule-names/blah.S
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/sanitize-rule-names/blah.S
diff --git a/third_party/python/gyp/test/sanitize-rule-names/gyptest-sanitize-rule-names.py b/third_party/python/gyp/test/sanitize-rule-names/gyptest-sanitize-rule-names.py
new file mode 100644
index 0000000000..968a0ce5ce
--- /dev/null
+++ b/third_party/python/gyp/test/sanitize-rule-names/gyptest-sanitize-rule-names.py
@@ -0,0 +1,17 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure rule names with non-"normal" characters in them don't cause
+broken build files. This test was originally causing broken .ninja files.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+test.run_gyp('sanitize-rule-names.gyp')
+test.build('sanitize-rule-names.gyp', test.ALL)
+test.pass_test()
diff --git a/third_party/python/gyp/test/sanitize-rule-names/hello.cc b/third_party/python/gyp/test/sanitize-rule-names/hello.cc
new file mode 100644
index 0000000000..1711567ef5
--- /dev/null
+++ b/third_party/python/gyp/test/sanitize-rule-names/hello.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/sanitize-rule-names/sanitize-rule-names.gyp b/third_party/python/gyp/test/sanitize-rule-names/sanitize-rule-names.gyp
new file mode 100644
index 0000000000..184253e966
--- /dev/null
+++ b/third_party/python/gyp/test/sanitize-rule-names/sanitize-rule-names.gyp
@@ -0,0 +1,27 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 's_test',
+ 'type': 'executable',
+ 'rules': [
+ {
+ # Make sure this rule name doesn't cause an invalid ninja file.
+ 'rule_name': 'rule name with odd characters ()/',
+ 'extension': 'S',
+ 'outputs': ['outfile'],
+ 'msvs_cygwin_shell': 0,
+ 'msvs_quote_cmd': 0,
+ 'action': ['python', 'script.py', '<(RULE_INPUT_PATH)', 'outfile'],
+ },
+ ],
+ 'sources': [
+ 'blah.S',
+ 'hello.cc',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/sanitize-rule-names/script.py b/third_party/python/gyp/test/sanitize-rule-names/script.py
new file mode 100644
index 0000000000..ae2efa1df4
--- /dev/null
+++ b/third_party/python/gyp/test/sanitize-rule-names/script.py
@@ -0,0 +1,10 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import shutil
+import sys
+
+shutil.copyfile(*sys.argv[1:])
diff --git a/third_party/python/gyp/test/self-dependency/common.gypi b/third_party/python/gyp/test/self-dependency/common.gypi
new file mode 100644
index 0000000000..aae221a5dd
--- /dev/null
+++ b/third_party/python/gyp/test/self-dependency/common.gypi
@@ -0,0 +1,13 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# A common file that other .gyp files include.
+# Makes every target in the project depend on dep.gyp:dep.
+{
+ 'target_defaults': {
+ 'dependencies': [
+ 'dep.gyp:dep',
+ ],
+ },
+}
diff --git a/third_party/python/gyp/test/self-dependency/dep.gyp b/third_party/python/gyp/test/self-dependency/dep.gyp
new file mode 100644
index 0000000000..2b6c9dda85
--- /dev/null
+++ b/third_party/python/gyp/test/self-dependency/dep.gyp
@@ -0,0 +1,23 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# dep.gyp contains a target dep, on which all the targets in the project
+# depend. This means there's a self-dependency of dep on itself, which is
+# pruned by setting prune_self_dependency to 1.
+
+{
+ 'includes': [
+ 'common.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'dep',
+ 'type': 'none',
+ 'variables': {
+ # Without this GYP will report a cycle in dependency graph.
+ 'prune_self_dependency': 1,
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/self-dependency/gyptest-self-dependency.py b/third_party/python/gyp/test/self-dependency/gyptest-self-dependency.py
new file mode 100755
index 0000000000..82fab271c5
--- /dev/null
+++ b/third_party/python/gyp/test/self-dependency/gyptest-self-dependency.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verify that pulling in a dependency a second time in a conditional works for
+shared_library targets. Regression test for http://crbug.com/122588
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('self_dependency.gyp')
+
+# If running gyp worked, all is well.
+test.pass_test()
diff --git a/third_party/python/gyp/test/self-dependency/self_dependency.gyp b/third_party/python/gyp/test/self-dependency/self_dependency.gyp
new file mode 100644
index 0000000000..0ca76c669b
--- /dev/null
+++ b/third_party/python/gyp/test/self-dependency/self_dependency.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': [
+ 'common.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'a',
+ 'type': 'none',
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/sibling/gyptest-all.py b/third_party/python/gyp/test/sibling/gyptest-all.py
new file mode 100755
index 0000000000..318e1a3d84
--- /dev/null
+++ b/third_party/python/gyp/test/sibling/gyptest-all.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+# The xcode-ninja generator handles gypfiles which are not at the
+# project root incorrectly.
+# cf. https://code.google.com/p/gyp/issues/detail?id=460
+if test.format == 'xcode-ninja':
+ test.skip_test()
+
+test.run_gyp('build/all.gyp', chdir='src')
+
+test.build('build/all.gyp', test.ALL, chdir='src')
+
+chdir = 'src/build'
+
+# The top-level Makefile is in the directory where gyp was run.
+# TODO(mmoss) Should the Makefile go in the directory of the passed in .gyp
+# file? What about when passing in multiple .gyp files? Would sub-project
+# Makefiles (see http://codereview.chromium.org/340008 comments) solve this?
+if test.format in ('make', 'ninja', 'cmake'):
+ chdir = 'src'
+
+if test.format == 'xcode':
+ chdir = 'src/prog1'
+test.run_built_executable('program1',
+ chdir=chdir,
+ stdout="Hello from prog1.c\n")
+
+if test.format == 'xcode':
+ chdir = 'src/prog2'
+test.run_built_executable('program2',
+ chdir=chdir,
+ stdout="Hello from prog2.c\n")
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/sibling/gyptest-relocate.py b/third_party/python/gyp/test/sibling/gyptest-relocate.py
new file mode 100755
index 0000000000..05fa9d96fe
--- /dev/null
+++ b/third_party/python/gyp/test/sibling/gyptest-relocate.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+# The xcode-ninja generator handles gypfiles which are not at the
+# project root incorrectly.
+# cf. https://code.google.com/p/gyp/issues/detail?id=460
+if test.format == 'xcode-ninja':
+ test.skip_test()
+
+test.run_gyp('build/all.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('build/all.gyp', test.ALL, chdir='relocate/src')
+
+chdir = 'relocate/src/build'
+
+# The top-level Makefile is in the directory where gyp was run.
+# TODO(mmoss) Should the Makefile go in the directory of the passed in .gyp
+# file? What about when passing in multiple .gyp files? Would sub-project
+# Makefiles (see http://codereview.chromium.org/340008 comments) solve this?
+if test.format in ('make', 'ninja', 'cmake'):
+ chdir = 'relocate/src'
+
+if test.format == 'xcode':
+ chdir = 'relocate/src/prog1'
+test.run_built_executable('program1',
+ chdir=chdir,
+ stdout="Hello from prog1.c\n")
+
+if test.format == 'xcode':
+ chdir = 'relocate/src/prog2'
+test.run_built_executable('program2',
+ chdir=chdir,
+ stdout="Hello from prog2.c\n")
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/sibling/src/build/all.gyp b/third_party/python/gyp/test/sibling/src/build/all.gyp
new file mode 100644
index 0000000000..79c80c9363
--- /dev/null
+++ b/third_party/python/gyp/test/sibling/src/build/all.gyp
@@ -0,0 +1,16 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'All',
+ 'type': 'none',
+ 'dependencies': [
+ '../prog1/prog1.gyp:*',
+ '../prog2/prog2.gyp:*',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/sibling/src/prog1/prog1.c b/third_party/python/gyp/test/sibling/src/prog1/prog1.c
new file mode 100644
index 0000000000..218e99401c
--- /dev/null
+++ b/third_party/python/gyp/test/sibling/src/prog1/prog1.c
@@ -0,0 +1,7 @@
+#include <stdio.h>
+
+int main(void)
+{
+ printf("Hello from prog1.c\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/sibling/src/prog1/prog1.gyp b/third_party/python/gyp/test/sibling/src/prog1/prog1.gyp
new file mode 100644
index 0000000000..4532e4be10
--- /dev/null
+++ b/third_party/python/gyp/test/sibling/src/prog1/prog1.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'program1',
+ 'type': 'executable',
+ 'sources': [
+ 'prog1.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/sibling/src/prog2/prog2.c b/third_party/python/gyp/test/sibling/src/prog2/prog2.c
new file mode 100644
index 0000000000..12a31883b9
--- /dev/null
+++ b/third_party/python/gyp/test/sibling/src/prog2/prog2.c
@@ -0,0 +1,7 @@
+#include <stdio.h>
+
+int main(void)
+{
+ printf("Hello from prog2.c\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/sibling/src/prog2/prog2.gyp b/third_party/python/gyp/test/sibling/src/prog2/prog2.gyp
new file mode 100644
index 0000000000..4cf7f6eb2f
--- /dev/null
+++ b/third_party/python/gyp/test/sibling/src/prog2/prog2.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'program2',
+ 'type': 'executable',
+ 'sources': [
+ 'prog2.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/small/gyptest-small.py b/third_party/python/gyp/test/small/gyptest-small.py
new file mode 100755
index 0000000000..e6cb25f3a7
--- /dev/null
+++ b/third_party/python/gyp/test/small/gyptest-small.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Runs small tests.
+"""
+
+import imp
+import os
+import platform
+import sys
+import unittest
+
+import TestGyp
+
+
+test = TestGyp.TestGyp()
+
+# Add pylib to the import path (so tests can import their dependencies).
+# This is consistant with the path.append done in the top file "gyp".
+sys.path.insert(0, os.path.join(test._cwd, 'pylib'))
+
+# Add new test suites here.
+files_to_test = [
+ 'pylib/gyp/MSVSSettings_test.py',
+ 'pylib/gyp/easy_xml_test.py',
+ 'pylib/gyp/generator/msvs_test.py',
+ 'pylib/gyp/generator/ninja_test.py',
+ 'pylib/gyp/generator/xcode_test.py',
+ 'pylib/gyp/common_test.py',
+ 'pylib/gyp/input_test.py',
+]
+
+# Collect all the suites from the above files.
+suites = []
+for filename in files_to_test:
+ # Carve the module name out of the path.
+ name = os.path.splitext(os.path.split(filename)[1])[0]
+ # Find the complete module path.
+ full_filename = os.path.join(test._cwd, filename)
+ # Load the module.
+ module = imp.load_source(name, full_filename)
+ # Add it to the list of test suites.
+ suites.append(unittest.defaultTestLoader.loadTestsFromModule(module))
+# Create combined suite.
+all_tests = unittest.TestSuite(suites)
+
+# Run all the tests.
+result = unittest.TextTestRunner(verbosity=2).run(all_tests)
+if result.failures or result.errors:
+ test.fail_test()
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/standalone-static-library/gyptest-standalone-static-library.py b/third_party/python/gyp/test/standalone-static-library/gyptest-standalone-static-library.py
new file mode 100644
index 0000000000..50535abfc7
--- /dev/null
+++ b/third_party/python/gyp/test/standalone-static-library/gyptest-standalone-static-library.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies build of a static_library with the standalone_static_library flag set.
+"""
+
+import os
+import subprocess
+import sys
+import TestGyp
+
+# standalone_static_library currently means two things: a specific output
+# location for the built target and non-thin archive files.
+test = TestGyp.TestGyp()
+
+# Verify that types other than static_library cause a failure.
+test.run_gyp('invalid.gyp', status=1, stderr=None)
+target_str = 'invalid.gyp:bad#target'
+err = ['gyp: Target %s has type executable but standalone_static_library flag '
+ 'is only valid for static_library type.' % target_str]
+test.must_contain_all_lines(test.stderr(), err)
+
+# Build a valid standalone_static_library.
+test.run_gyp('mylib.gyp')
+test.build('mylib.gyp', target='prog')
+
+# Verify that the static library is copied to the correct location.
+# We expect the library to be copied to $PRODUCT_DIR.
+standalone_static_library_dir = test.EXECUTABLE
+path_to_lib = os.path.split(
+ test.built_file_path('mylib', type=standalone_static_library_dir))[0]
+lib_name = test.built_file_basename('mylib', type=test.STATIC_LIB)
+path = os.path.join(path_to_lib, lib_name)
+test.must_exist(path)
+
+# Verify that the program runs properly.
+expect = 'hello from mylib.c\n'
+test.run_built_executable('prog', stdout=expect)
+
+# Verify that libmylib.a contains symbols. "ar -x" fails on a 'thin' archive.
+supports_thick = ('make', 'ninja', 'cmake')
+if test.format in supports_thick and sys.platform.startswith('linux'):
+ retcode = subprocess.call(['ar', '-x', path])
+ assert retcode == 0
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/standalone-static-library/invalid.gyp b/third_party/python/gyp/test/standalone-static-library/invalid.gyp
new file mode 100644
index 0000000000..54b32117e0
--- /dev/null
+++ b/third_party/python/gyp/test/standalone-static-library/invalid.gyp
@@ -0,0 +1,16 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'bad',
+ 'type': 'executable',
+ 'standalone_static_library': 1,
+ 'sources': [
+ 'prog.c',
+ ],
+ },
+ ],
+} \ No newline at end of file
diff --git a/third_party/python/gyp/test/standalone-static-library/mylib.c b/third_party/python/gyp/test/standalone-static-library/mylib.c
new file mode 100644
index 0000000000..108be618c2
--- /dev/null
+++ b/third_party/python/gyp/test/standalone-static-library/mylib.c
@@ -0,0 +1,7 @@
+#include <stdio.h>
+
+void print(void)
+{
+ printf("hello from mylib.c\n");
+ return;
+}
diff --git a/third_party/python/gyp/test/standalone-static-library/mylib.gyp b/third_party/python/gyp/test/standalone-static-library/mylib.gyp
new file mode 100644
index 0000000000..2d191de319
--- /dev/null
+++ b/third_party/python/gyp/test/standalone-static-library/mylib.gyp
@@ -0,0 +1,26 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'mylib',
+ 'type': 'static_library',
+ 'standalone_static_library': 1,
+ 'sources': [
+ 'mylib.c',
+ ],
+ },
+ {
+ 'target_name': 'prog',
+ 'type': 'executable',
+ 'sources': [
+ 'prog.c',
+ ],
+ 'dependencies': [
+ 'mylib',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/standalone-static-library/prog.c b/third_party/python/gyp/test/standalone-static-library/prog.c
new file mode 100644
index 0000000000..8af5c90844
--- /dev/null
+++ b/third_party/python/gyp/test/standalone-static-library/prog.c
@@ -0,0 +1,7 @@
+extern void print(void);
+
+int main(void)
+{
+ print();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/standalone/gyptest-standalone.py b/third_party/python/gyp/test/standalone/gyptest-standalone.py
new file mode 100644
index 0000000000..0581d53879
--- /dev/null
+++ b/third_party/python/gyp/test/standalone/gyptest-standalone.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that a project hierarchy created with the --generator-output=
+option can be built even when it's relocated to a different path.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+import os
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('standalone.gyp', '-Gstandalone')
+
+# Look at all the files in the tree to make sure none
+# of them reference the gyp file.
+for root, dirs, files in os.walk("."):
+ for file in files:
+ # ignore ourself
+ if os.path.splitext(__file__)[0] in file:
+ continue
+ file = os.path.join(root, file)
+ contents = open(file, 'rb').read().decode('utf-8', 'ignore')
+ if 'standalone.gyp' in contents:
+ print('gyp file referenced in generated output: %s' % file)
+ test.fail_test()
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/standalone/standalone.gyp b/third_party/python/gyp/test/standalone/standalone.gyp
new file mode 100644
index 0000000000..b2a6785430
--- /dev/null
+++ b/third_party/python/gyp/test/standalone/standalone.gyp
@@ -0,0 +1,12 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name' : 'foo',
+ 'type' : 'executable'
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/subdirectory/gyptest-SYMROOT-all.py b/third_party/python/gyp/test/subdirectory/gyptest-SYMROOT-all.py
new file mode 100755
index 0000000000..9dfb8b05d4
--- /dev/null
+++ b/third_party/python/gyp/test/subdirectory/gyptest-SYMROOT-all.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies building a target and a subsidiary dependent target from a
+.gyp file in a subdirectory, without specifying an explicit output build
+directory, and using the generated solution or project file at the top
+of the tree as the entry point.
+
+The configuration sets the Xcode SYMROOT variable and uses --depth=
+to make Xcode behave like the other build tools--that is, put all
+built targets in a single output build directory at the top of the tree.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('prog1.gyp', '-Dset_symroot=1', '--depth=.', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+# Suppress the test infrastructure's setting SYMROOT on the command line.
+test.build('prog1.gyp', test.ALL, SYMROOT=None, chdir='relocate/src')
+
+test.run_built_executable('prog1',
+ stdout="Hello from prog1.c\n",
+ chdir='relocate/src')
+test.run_built_executable('prog2',
+ stdout="Hello from prog2.c\n",
+ chdir='relocate/src')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/subdirectory/gyptest-SYMROOT-default.py b/third_party/python/gyp/test/subdirectory/gyptest-SYMROOT-default.py
new file mode 100755
index 0000000000..8796650905
--- /dev/null
+++ b/third_party/python/gyp/test/subdirectory/gyptest-SYMROOT-default.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies building a target and a subsidiary dependent target from a
+.gyp file in a subdirectory, without specifying an explicit output build
+directory, and using the generated solution or project file at the top
+of the tree as the entry point.
+
+The configuration sets the Xcode SYMROOT variable and uses --depth=
+to make Xcode behave like the other build tools--that is, put all
+built targets in a single output build directory at the top of the tree.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('prog1.gyp', '-Dset_symroot=1', '--depth=.', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+# Suppress the test infrastructure's setting SYMROOT on the command line.
+test.build('prog1.gyp', SYMROOT=None, chdir='relocate/src')
+
+test.run_built_executable('prog1',
+ stdout="Hello from prog1.c\n",
+ chdir='relocate/src')
+
+test.run_built_executable('prog2',
+ stdout="Hello from prog2.c\n",
+ chdir='relocate/src')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/subdirectory/gyptest-subdir-all.py b/third_party/python/gyp/test/subdirectory/gyptest-subdir-all.py
new file mode 100755
index 0000000000..d5c458454e
--- /dev/null
+++ b/third_party/python/gyp/test/subdirectory/gyptest-subdir-all.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies building a subsidiary dependent target from a .gyp file in a
+subdirectory, without specifying an explicit output build directory,
+and using the subdirectory's solution or project file as the entry point.
+"""
+
+import TestGyp
+
+# Ninja doesn't support relocation.
+# CMake produces a single CMakeLists.txt in the output directory.
+test = TestGyp.TestGyp(formats=['!ninja', '!cmake'])
+
+test.run_gyp('prog1.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+chdir = 'relocate/src/subdir'
+target = test.ALL
+
+test.build('prog2.gyp', target, chdir=chdir)
+
+test.built_file_must_not_exist('prog1', type=test.EXECUTABLE, chdir=chdir)
+
+test.run_built_executable('prog2',
+ chdir=chdir,
+ stdout="Hello from prog2.c\n")
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/subdirectory/gyptest-subdir-default.py b/third_party/python/gyp/test/subdirectory/gyptest-subdir-default.py
new file mode 100755
index 0000000000..2cb6659beb
--- /dev/null
+++ b/third_party/python/gyp/test/subdirectory/gyptest-subdir-default.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies building a subsidiary dependent target from a .gyp file in a
+subdirectory, without specifying an explicit output build directory,
+and using the subdirectory's solution or project file as the entry point.
+"""
+
+import TestGyp
+import errno
+
+# Ninja doesn't support relocation.
+# CMake produces a single CMakeLists.txt in the output directory.
+test = TestGyp.TestGyp(formats=['!ninja', '!cmake'])
+
+test.run_gyp('prog1.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+chdir = 'relocate/src/subdir'
+
+test.build('prog2.gyp', chdir=chdir)
+
+test.built_file_must_not_exist('prog1', type=test.EXECUTABLE, chdir=chdir)
+
+test.run_built_executable('prog2',
+ chdir=chdir,
+ stdout="Hello from prog2.c\n")
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/subdirectory/gyptest-subdir2-deep.py b/third_party/python/gyp/test/subdirectory/gyptest-subdir2-deep.py
new file mode 100755
index 0000000000..48548982f8
--- /dev/null
+++ b/third_party/python/gyp/test/subdirectory/gyptest-subdir2-deep.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies building a project rooted several layers under src_dir works.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('prog3.gyp', chdir='src/subdir/subdir2')
+
+test.relocate('src', 'relocate/src')
+
+test.build('prog3.gyp', test.ALL, chdir='relocate/src/subdir/subdir2')
+
+test.run_built_executable('prog3',
+ chdir='relocate/src/subdir/subdir2',
+ stdout="Hello from prog3.c\n")
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/subdirectory/gyptest-top-all.py b/third_party/python/gyp/test/subdirectory/gyptest-top-all.py
new file mode 100755
index 0000000000..b3c25b1f8d
--- /dev/null
+++ b/third_party/python/gyp/test/subdirectory/gyptest-top-all.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies building a target and a subsidiary dependent target from a
+.gyp file in a subdirectory, without specifying an explicit output build
+directory, and using the generated solution or project file at the top
+of the tree as the entry point.
+
+There is a difference here in the default behavior of the underlying
+build tools. Specifically, when building the entire "solution", Xcode
+puts the output of each project relative to the .xcodeproj directory,
+while Visual Studio (and our implementation of Make) put it
+in a build directory relative to the "solution"--that is, the entry-point
+from which you built the entire tree.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('prog1.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('prog1.gyp', test.ALL, chdir='relocate/src')
+
+test.run_built_executable('prog1',
+ stdout="Hello from prog1.c\n",
+ chdir='relocate/src')
+
+if test.format == 'xcode':
+ chdir = 'relocate/src/subdir'
+else:
+ chdir = 'relocate/src'
+test.run_built_executable('prog2',
+ chdir=chdir,
+ stdout="Hello from prog2.c\n")
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/subdirectory/gyptest-top-default.py b/third_party/python/gyp/test/subdirectory/gyptest-top-default.py
new file mode 100755
index 0000000000..2448dd98ea
--- /dev/null
+++ b/third_party/python/gyp/test/subdirectory/gyptest-top-default.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies building a target and a subsidiary dependent target from a
+.gyp file in a subdirectory, without specifying an explicit output build
+directory, and using the generated solution or project file at the top
+of the tree as the entry point.
+
+There is a difference here in the default behavior of the underlying
+build tools. Specifically, when building the entire "solution", Xcode
+puts the output of each project relative to the .xcodeproj directory,
+while Visual Studio (and our implementation of Make) put it
+in a build directory relative to the "solution"--that is, the entry-point
+from which you built the entire tree.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('prog1.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('prog1.gyp', chdir='relocate/src')
+
+test.run_built_executable('prog1',
+ stdout="Hello from prog1.c\n",
+ chdir='relocate/src')
+
+if test.format == 'xcode':
+ chdir = 'relocate/src/subdir'
+else:
+ chdir = 'relocate/src'
+test.run_built_executable('prog2',
+ chdir=chdir,
+ stdout="Hello from prog2.c\n")
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/subdirectory/src/prog1.c b/third_party/python/gyp/test/subdirectory/src/prog1.c
new file mode 100644
index 0000000000..218e99401c
--- /dev/null
+++ b/third_party/python/gyp/test/subdirectory/src/prog1.c
@@ -0,0 +1,7 @@
+#include <stdio.h>
+
+int main(void)
+{
+ printf("Hello from prog1.c\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/subdirectory/src/prog1.gyp b/third_party/python/gyp/test/subdirectory/src/prog1.gyp
new file mode 100644
index 0000000000..2aa66ce7d7
--- /dev/null
+++ b/third_party/python/gyp/test/subdirectory/src/prog1.gyp
@@ -0,0 +1,21 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': [
+ 'symroot.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'prog1',
+ 'type': 'executable',
+ 'dependencies': [
+ 'subdir/prog2.gyp:prog2',
+ ],
+ 'sources': [
+ 'prog1.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/subdirectory/src/subdir/prog2.c b/third_party/python/gyp/test/subdirectory/src/subdir/prog2.c
new file mode 100644
index 0000000000..12a31883b9
--- /dev/null
+++ b/third_party/python/gyp/test/subdirectory/src/subdir/prog2.c
@@ -0,0 +1,7 @@
+#include <stdio.h>
+
+int main(void)
+{
+ printf("Hello from prog2.c\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/subdirectory/src/subdir/prog2.gyp b/third_party/python/gyp/test/subdirectory/src/subdir/prog2.gyp
new file mode 100644
index 0000000000..c6cd35f7f8
--- /dev/null
+++ b/third_party/python/gyp/test/subdirectory/src/subdir/prog2.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': [
+ '../symroot.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'prog2',
+ 'type': 'executable',
+ 'sources': [
+ 'prog2.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/subdirectory/src/subdir/subdir2/prog3.c b/third_party/python/gyp/test/subdirectory/src/subdir/subdir2/prog3.c
new file mode 100644
index 0000000000..a326dc61b6
--- /dev/null
+++ b/third_party/python/gyp/test/subdirectory/src/subdir/subdir2/prog3.c
@@ -0,0 +1,7 @@
+#include <stdio.h>
+
+int main(void)
+{
+ printf("Hello from prog3.c\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/subdirectory/src/subdir/subdir2/prog3.gyp b/third_party/python/gyp/test/subdirectory/src/subdir/subdir2/prog3.gyp
new file mode 100644
index 0000000000..b49fb59113
--- /dev/null
+++ b/third_party/python/gyp/test/subdirectory/src/subdir/subdir2/prog3.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': [
+ '../../symroot.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'prog3',
+ 'type': 'executable',
+ 'sources': [
+ 'prog3.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/subdirectory/src/symroot.gypi b/third_party/python/gyp/test/subdirectory/src/symroot.gypi
new file mode 100644
index 0000000000..519916427c
--- /dev/null
+++ b/third_party/python/gyp/test/subdirectory/src/symroot.gypi
@@ -0,0 +1,16 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'set_symroot%': 0,
+ },
+ 'conditions': [
+ ['set_symroot == 1', {
+ 'xcode_settings': {
+ 'SYMROOT': '<(DEPTH)/build',
+ },
+ }],
+ ],
+}
diff --git a/third_party/python/gyp/test/symlinks/gyptest-symlinks.py b/third_party/python/gyp/test/symlinks/gyptest-symlinks.py
new file mode 100755
index 0000000000..278818a992
--- /dev/null
+++ b/third_party/python/gyp/test/symlinks/gyptest-symlinks.py
@@ -0,0 +1,66 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Test that RelativePath(s, d) doesn't return a path starting with '..' when
+s is textually below d, but is also a symlink to a file that is not below d.
+
+Returning .. in this case would break the Ninja generator in such a case,
+because it computes output directories by concatenating paths, and concat'ing
+a path starting with .. can unexpectedly erase other parts of the path. It's
+difficult to test this directly since the test harness assumes toplevel_dir is
+the root of the repository, but this test should at least verify that the
+required behavior doesn't change.
+"""
+
+import TestGyp
+import os
+import sys
+import tempfile
+
+if sys.platform != 'win32':
+ test = TestGyp.TestGyp()
+
+ # Copy hello.gyp and hello.c to temporary named files, which will then be
+ # symlinked back and processed. Note that we don't ask gyp to touch the
+ # original files at all; they are only there as source material for the copy.
+ # That's why hello.gyp references symlink_hello.c instead of hello.c.
+ with tempfile.NamedTemporaryFile(mode='w+') as gyp_file:
+ with tempfile.NamedTemporaryFile(mode='w+') as c_file:
+ with open('hello.gyp') as orig_gyp_file:
+ gyp_file.write(orig_gyp_file.read())
+ gyp_file.flush()
+ with open('hello.c') as orig_c_file:
+ c_file.write(orig_c_file.read())
+ c_file.flush()
+ # We need to flush the files because we want to read them before closing
+ # them, since when they are closed they will be deleted.
+
+ # Don't proceed with the test on a system that doesn't let you read from
+ # a still-open temporary file.
+ if os.path.getsize(gyp_file.name) == 0:
+ raise OSError("Copy to temporary file didn't work.")
+
+ symlink_gyp = test.built_file_path('symlink_hello.gyp')
+ symlink_c = test.built_file_path('symlink_hello.c')
+ outdir = os.path.dirname(symlink_gyp)
+
+ # Make sure the outdir exists.
+ try:
+ os.makedirs(outdir)
+ except OSError:
+ if not os.path.isdir(outdir):
+ raise
+ os.symlink(gyp_file.name, symlink_gyp)
+ os.symlink(c_file.name, symlink_c)
+
+ # Run gyp on the symlinked files.
+ test.run_gyp(symlink_gyp, chdir=outdir)
+ test.build(symlink_gyp, chdir=outdir)
+ test.run_built_executable('symlink_hello', stdout="Hello, world!\n",
+ chdir=outdir)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/symlinks/hello.c b/third_party/python/gyp/test/symlinks/hello.c
new file mode 100644
index 0000000000..c63204b948
--- /dev/null
+++ b/third_party/python/gyp/test/symlinks/hello.c
@@ -0,0 +1,12 @@
+/* Copyright (c) 2015 Google Inc. All rights reserved.
+ Use of this source code is governed by a BSD-style license that can be
+ found in the LICENSE file.
+*/
+
+#include <stdio.h>
+
+int main(void)
+{
+ printf("Hello, world!\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/symlinks/hello.gyp b/third_party/python/gyp/test/symlinks/hello.gyp
new file mode 100644
index 0000000000..81d9f18e09
--- /dev/null
+++ b/third_party/python/gyp/test/symlinks/hello.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'symlink_hello',
+ 'type': 'executable',
+ 'sources': [
+ 'symlink_hello.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/target/gyptest-target.py b/third_party/python/gyp/test/target/gyptest-target.py
new file mode 100644
index 0000000000..4338db739c
--- /dev/null
+++ b/third_party/python/gyp/test/target/gyptest-target.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies simplest-possible build of a "Hello, world!" program
+using non-default extension. In particular, verifies how
+target_extension is used to avoid MSB8012 for msvs.
+"""
+
+import sys
+import TestGyp
+
+if sys.platform in ('win32', 'cygwin'):
+ test = TestGyp.TestGyp()
+
+ test.run_gyp('target.gyp')
+ test.build('target.gyp')
+
+ # executables
+ test.built_file_must_exist('hello1.stuff', test.EXECUTABLE, bare=True)
+ test.built_file_must_exist('hello2.exe', test.EXECUTABLE, bare=True)
+ test.built_file_must_not_exist('hello2.stuff', test.EXECUTABLE, bare=True)
+
+ # check msvs log for errors
+ if test.format == "msvs":
+ log_file = "obj\\hello1\\hello1.log"
+ test.built_file_must_exist(log_file)
+ test.built_file_must_not_contain(log_file, "MSB8012")
+
+ log_file = "obj\\hello2\\hello2.log"
+ test.built_file_must_exist(log_file)
+ test.built_file_must_not_contain(log_file, "MSB8012")
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/target/hello.c b/third_party/python/gyp/test/target/hello.c
new file mode 100644
index 0000000000..3d535d3ec6
--- /dev/null
+++ b/third_party/python/gyp/test/target/hello.c
@@ -0,0 +1,7 @@
+/* Copyright (c) 2009 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+void main(void) {
+ printf("Hello, world!\n");
+}
diff --git a/third_party/python/gyp/test/target/target.gyp b/third_party/python/gyp/test/target/target.gyp
new file mode 100644
index 0000000000..c87e30f533
--- /dev/null
+++ b/third_party/python/gyp/test/target/target.gyp
@@ -0,0 +1,24 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'hello1',
+ 'product_extension': 'stuff',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.c',
+ ],
+ },
+ {
+ 'target_name': 'hello2',
+ 'target_extension': 'stuff',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.c',
+ ],
+ }
+ ]
+}
diff --git a/third_party/python/gyp/test/toolsets/gyptest-toolsets.py b/third_party/python/gyp/test/toolsets/gyptest-toolsets.py
new file mode 100755
index 0000000000..f80fce70a2
--- /dev/null
+++ b/third_party/python/gyp/test/toolsets/gyptest-toolsets.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that toolsets are correctly applied
+"""
+import os
+import sys
+import TestGyp
+
+if sys.platform.startswith('linux'):
+
+ test = TestGyp.TestGyp(formats=['make', 'ninja'])
+
+ oldenv = os.environ.copy()
+ try:
+ os.environ['GYP_CROSSCOMPILE'] = '1'
+ test.run_gyp('toolsets.gyp')
+ finally:
+ os.environ.clear()
+ os.environ.update(oldenv)
+
+ test.build('toolsets.gyp', test.ALL)
+
+ test.run_built_executable('host-main', stdout="Host\nShared: Host\n")
+ test.run_built_executable('target-main', stdout="Target\nShared: Target\n")
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/toolsets/main.cc b/third_party/python/gyp/test/toolsets/main.cc
new file mode 100644
index 0000000000..bc47da9978
--- /dev/null
+++ b/third_party/python/gyp/test/toolsets/main.cc
@@ -0,0 +1,13 @@
+/* Copyright (c) 2009 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+#include <stdio.h>
+
+const char *GetToolset();
+const char *GetToolsetShared();
+
+int main(void) {
+ printf("%s\n", GetToolset());
+ printf("Shared: %s\n", GetToolsetShared());
+}
diff --git a/third_party/python/gyp/test/toolsets/toolsets.cc b/third_party/python/gyp/test/toolsets/toolsets.cc
new file mode 100644
index 0000000000..a45fa029cb
--- /dev/null
+++ b/third_party/python/gyp/test/toolsets/toolsets.cc
@@ -0,0 +1,11 @@
+/* Copyright (c) 2009 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+const char *GetToolset() {
+#ifdef TARGET
+ return "Target";
+#else
+ return "Host";
+#endif
+}
diff --git a/third_party/python/gyp/test/toolsets/toolsets.gyp b/third_party/python/gyp/test/toolsets/toolsets.gyp
new file mode 100644
index 0000000000..3bc3a784ea
--- /dev/null
+++ b/third_party/python/gyp/test/toolsets/toolsets.gyp
@@ -0,0 +1,62 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ 'target_conditions': [
+ ['_toolset=="target"', {'defines': ['TARGET']}]
+ ]
+ },
+ 'targets': [
+ {
+ 'target_name': 'toolsets',
+ 'type': 'static_library',
+ 'toolsets': ['target', 'host'],
+ 'sources': [
+ 'toolsets.cc',
+ ],
+ },
+ {
+ 'target_name': 'host-main',
+ 'type': 'executable',
+ 'toolsets': ['host'],
+ 'dependencies': ['toolsets', 'toolsets_shared'],
+ 'sources': [
+ 'main.cc',
+ ],
+ },
+ {
+ 'target_name': 'target-main',
+ 'type': 'executable',
+ 'dependencies': ['toolsets', 'toolsets_shared'],
+ 'sources': [
+ 'main.cc',
+ ],
+ },
+ # This tests that build systems can handle a shared library being build for
+ # both host and target.
+ {
+ 'target_name': 'janus',
+ 'type': 'shared_library',
+ 'toolsets': ['target', 'host'],
+ 'sources': [
+ 'toolsets.cc',
+ ],
+ 'cflags': [ '-fPIC' ],
+ },
+ {
+ 'target_name': 'toolsets_shared',
+ 'type': 'shared_library',
+ 'toolsets': ['target', 'host'],
+ 'target_conditions': [
+ # Ensure target and host have different shared_library names
+ ['_toolset=="host"', {'product_extension': 'host'}],
+ ],
+ 'sources': [
+ 'toolsets_shared.cc',
+ ],
+ 'cflags': [ '-fPIC' ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/toolsets/toolsets_shared.cc b/third_party/python/gyp/test/toolsets/toolsets_shared.cc
new file mode 100644
index 0000000000..794af2c0bd
--- /dev/null
+++ b/third_party/python/gyp/test/toolsets/toolsets_shared.cc
@@ -0,0 +1,11 @@
+/* Copyright (c) 2013 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+const char *GetToolsetShared() {
+#ifdef TARGET
+ return "Target";
+#else
+ return "Host";
+#endif
+}
diff --git a/third_party/python/gyp/test/toplevel-dir/gyptest-toplevel-dir.py b/third_party/python/gyp/test/toplevel-dir/gyptest-toplevel-dir.py
new file mode 100755
index 0000000000..9e69512dd3
--- /dev/null
+++ b/third_party/python/gyp/test/toplevel-dir/gyptest-toplevel-dir.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies building a subsidiary dependent target from a .gyp file in a
+subdirectory, without specifying an explicit output build directory,
+and using the subdirectory's solution or project file as the entry point.
+"""
+
+import TestGyp
+import errno
+
+test = TestGyp.TestGyp(formats=['ninja', 'make'])
+
+# We want our Makefile to be one dir up from main.gyp.
+test.run_gyp('main.gyp', '--toplevel-dir=..', chdir='src/sub1')
+
+toplevel_dir = 'src'
+
+test.build('sub1/main.gyp', test.ALL, chdir=toplevel_dir)
+
+test.built_file_must_exist('prog1', type=test.EXECUTABLE, chdir=toplevel_dir)
+
+test.run_built_executable('prog1',
+ chdir=toplevel_dir,
+ stdout="Hello from prog1.c\n")
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/toplevel-dir/src/sub1/main.gyp b/third_party/python/gyp/test/toplevel-dir/src/sub1/main.gyp
new file mode 100644
index 0000000000..33219010e4
--- /dev/null
+++ b/third_party/python/gyp/test/toplevel-dir/src/sub1/main.gyp
@@ -0,0 +1,18 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'prog1',
+ 'type': 'executable',
+ 'dependencies': [
+ '<(DEPTH)/../sub2/prog2.gyp:prog2',
+ ],
+ 'sources': [
+ 'prog1.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/toplevel-dir/src/sub1/prog1.c b/third_party/python/gyp/test/toplevel-dir/src/sub1/prog1.c
new file mode 100644
index 0000000000..218e99401c
--- /dev/null
+++ b/third_party/python/gyp/test/toplevel-dir/src/sub1/prog1.c
@@ -0,0 +1,7 @@
+#include <stdio.h>
+
+int main(void)
+{
+ printf("Hello from prog1.c\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/toplevel-dir/src/sub2/prog2.c b/third_party/python/gyp/test/toplevel-dir/src/sub2/prog2.c
new file mode 100644
index 0000000000..12a31883b9
--- /dev/null
+++ b/third_party/python/gyp/test/toplevel-dir/src/sub2/prog2.c
@@ -0,0 +1,7 @@
+#include <stdio.h>
+
+int main(void)
+{
+ printf("Hello from prog2.c\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/toplevel-dir/src/sub2/prog2.gyp b/third_party/python/gyp/test/toplevel-dir/src/sub2/prog2.gyp
new file mode 100644
index 0000000000..5934548369
--- /dev/null
+++ b/third_party/python/gyp/test/toplevel-dir/src/sub2/prog2.gyp
@@ -0,0 +1,15 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'prog2',
+ 'type': 'executable',
+ 'sources': [
+ 'prog2.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/variables/commands/commands-repeated.gyp b/third_party/python/gyp/test/variables/commands/commands-repeated.gyp
new file mode 100644
index 0000000000..1f52e75936
--- /dev/null
+++ b/third_party/python/gyp/test/variables/commands/commands-repeated.gyp
@@ -0,0 +1,128 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This is a simple test file to make sure that variable substitution
+# happens correctly. Run "run_tests.py" using python to generate the
+# output from this gyp file.
+
+{
+ 'variables': {
+ 'pi': 'import math; print(math.pi)',
+ 'third_letters': "<(other_letters)HIJK",
+ 'letters_list': 'ABCD',
+ 'other_letters': '<(letters_list)EFG',
+ 'check_included': '<(included_variable)',
+ 'check_lists': [
+ '<(included_variable)',
+ '<(third_letters)',
+ ],
+ 'check_int': 5,
+ 'check_str_int': '6',
+ 'check_list_int': [
+ 7,
+ '8',
+ 9,
+ ],
+ 'not_int_1': ' 10',
+ 'not_int_2': '11 ',
+ 'not_int_3': '012',
+ 'not_int_4': '13.0',
+ 'not_int_5': '+14',
+ 'negative_int': '-15',
+ 'zero_int': '0',
+ },
+ 'includes': [
+ 'commands.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'foo',
+ 'type': 'none',
+ 'variables': {
+ 'var1': '<!(["python", "-c", "<(pi)"])',
+ 'var2': '<!(python -c "print(\'<!(python -c "<(pi)") <(letters_list)\')")',
+ 'var3': '<!(python -c "print(\'<(letters_list)\')")',
+ 'var4': '<(<!(python -c "print(\'letters_list\')"))',
+ 'var5': 'letters_',
+ 'var6': 'list',
+ 'var7': '<(check_int)',
+ 'var8': '<(check_int)blah',
+ 'var9': '<(check_str_int)',
+ 'var10': '<(check_list_int)',
+ 'var11': ['<@(check_list_int)'],
+ 'var12': '<(not_int_1)',
+ 'var13': '<(not_int_2)',
+ 'var14': '<(not_int_3)',
+ 'var15': '<(not_int_4)',
+ 'var16': '<(not_int_5)',
+ 'var17': '<(negative_int)',
+ 'var18': '<(zero_int)',
+ # A second set with different names to make sure they only execute the
+ # commands once.
+ 'var1prime': '<!(["python", "-c", "<(pi)"])',
+ 'var2prime': '<!(python -c "print(\'<!(python -c "<(pi)") <(letters_list)\')")',
+ 'var3prime': '<!(python -c "print(\'<(letters_list)\')")',
+ 'var4prime': '<(<!(python -c "print(\'letters_list\')"))',
+ },
+ 'actions': [
+ {
+ 'action_name': 'test_action',
+ 'variables': {
+ 'var7': '<!(echo <(var5)<(var6))',
+ },
+ 'inputs' : [
+ '<(var2)',
+ ],
+ 'outputs': [
+ '<(var4)',
+ '<(var7)',
+ ],
+ 'action': [
+ 'echo',
+ '<(_inputs)',
+ '<(_outputs)',
+ ],
+ },
+ # Again with the same vars to make sure the right things happened.
+ {
+ 'action_name': 'test_action_prime',
+ 'variables': {
+ 'var7': '<!(echo <(var5)<(var6))',
+ },
+ 'inputs' : [
+ '<(var2)',
+ ],
+ 'outputs': [
+ '<(var4)',
+ '<(var7)',
+ ],
+ 'action': [
+ 'echo',
+ '<(_inputs)',
+ '<(_outputs)',
+ ],
+ },
+ # And one more time with the other vars...
+ {
+ 'action_name': 'test_action_prime_prime',
+ 'variables': {
+ 'var7': '<!(echo <(var5)<(var6))',
+ },
+ 'inputs' : [
+ '<(var2prime)',
+ ],
+ 'outputs': [
+ '<(var4prime)',
+ '<(var7)',
+ ],
+ 'action': [
+ 'echo',
+ '<(_inputs)',
+ '<(_outputs)',
+ ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/variables/commands/commands-repeated.gyp.stdout b/third_party/python/gyp/test/variables/commands/commands-repeated.gyp.stdout
new file mode 100644
index 0000000000..00bba88e4f
--- /dev/null
+++ b/third_party/python/gyp/test/variables/commands/commands-repeated.gyp.stdout
@@ -0,0 +1,136 @@
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'other_letters', 'is_array': '', 'replace': '<(other_letters)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '<(letters_list)EFGHIJK', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'letters_list', 'is_array': '', 'replace': '<(letters_list)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCDEFGHIJK', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'letters_list', 'is_array': '', 'replace': '<(letters_list)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCDEFG', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'included_variable', 'is_array': '', 'replace': '<(included_variable)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'XYZ', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'included_variable', 'is_array': '', 'replace': '<(included_variable)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'XYZ', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'third_letters', 'is_array': '', 'replace': '<(third_letters)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '<(other_letters)HIJK', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'other_letters', 'is_array': '', 'replace': '<(other_letters)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '<(letters_list)EFGHIJK', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'letters_list', 'is_array': '', 'replace': '<(letters_list)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCDEFGHIJK', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'letters_list', 'is_array': '', 'replace': '<(letters_list)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'python -c "print(\'<!(python -c "<(pi', 'is_array': '', 'replace': '<!(python -c "print(\'<!(python -c "<(pi)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'python -c "<(pi', 'is_array': '', 'replace': '<!(python -c "<(pi)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'pi', 'is_array': '', 'replace': '<(pi)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'python -c "import math; print(math.pi)"', recursing.
+VARIABLES:input.py:838:ExpandVariables Executing command 'python -c "import math; print(math.pi)"' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output 'python -c "print(\'3.14159265359 ABCD\')"', recursing.
+VARIABLES:input.py:838:ExpandVariables Executing command 'python -c "print('3.14159265359 ABCD')"' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output '3.14159265359 ABCD', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': '"python", "-c", "<(pi', 'is_array': '[', 'replace': '<!(["python", "-c", "<(pi)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'pi', 'is_array': '', 'replace': '<(pi)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '["python", "-c", "import math; print(math.pi)"]', recursing.
+VARIABLES:input.py:838:ExpandVariables Executing command '['python', '-c', 'import math; print(math.pi)']' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output '3.14159265359', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': '<!(python -c "print(\'letters_list\'', 'is_array': '', 'replace': '<(<!(python -c "print(\'letters_list\')', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'python -c "print(\'letters_list\'', 'is_array': '', 'replace': '<!(python -c "print(\'letters_list\')', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:838:ExpandVariables Executing command 'python -c "print('letters_list')"' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output 'letters_list', recursing.
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCD', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'check_int', 'is_array': '', 'replace': '<(check_int)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '5', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': '"python", "-c", "<(pi', 'is_array': '[', 'replace': '<!(["python", "-c", "<(pi)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'pi', 'is_array': '', 'replace': '<(pi)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '["python", "-c", "import math; print(math.pi)"]', recursing.
+VARIABLES:input.py:889:ExpandVariables Had cache value for command '['python', '-c', 'import math; print(math.pi)']' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output '3.14159265359', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'python -c "print(\'<(letters_list', 'is_array': '', 'replace': '<!(python -c "print(\'<(letters_list)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'letters_list', 'is_array': '', 'replace': '<(letters_list)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'python -c "print(\'ABCD\')"', recursing.
+VARIABLES:input.py:838:ExpandVariables Executing command 'python -c "print('ABCD')"' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCD', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'letters_list', 'is_array': '', 'replace': '<(letters_list)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'python -c "print(\'<!(python -c "<(pi', 'is_array': '', 'replace': '<!(python -c "print(\'<!(python -c "<(pi)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'python -c "<(pi', 'is_array': '', 'replace': '<!(python -c "<(pi)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'pi', 'is_array': '', 'replace': '<(pi)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'python -c "import math; print(math.pi)"', recursing.
+VARIABLES:input.py:889:ExpandVariables Had cache value for command 'python -c "import math; print(math.pi)"' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output 'python -c "print(\'3.14159265359 ABCD\')"', recursing.
+VARIABLES:input.py:889:ExpandVariables Had cache value for command 'python -c "print('3.14159265359 ABCD')"' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output '3.14159265359 ABCD', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'check_str_int', 'is_array': '', 'replace': '<(check_str_int)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '6', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'check_int', 'is_array': '', 'replace': '<(check_int)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '5blah', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': '<!(python -c "print(\'letters_list\'', 'is_array': '', 'replace': '<(<!(python -c "print(\'letters_list\')', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'python -c "print(\'letters_list\'', 'is_array': '', 'replace': '<!(python -c "print(\'letters_list\')', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:889:ExpandVariables Had cache value for command 'python -c "print('letters_list')"' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output 'letters_list', recursing.
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCD', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'python -c "print(\'<(letters_list', 'is_array': '', 'replace': '<!(python -c "print(\'<(letters_list)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'letters_list', 'is_array': '', 'replace': '<(letters_list)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'python -c "print(\'ABCD\')"', recursing.
+VARIABLES:input.py:889:ExpandVariables Had cache value for command 'python -c "print('ABCD')"' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCD', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'not_int_4', 'is_array': '', 'replace': '<(not_int_4)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '13.0', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'not_int_3', 'is_array': '', 'replace': '<(not_int_3)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '012', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'negative_int', 'is_array': '', 'replace': '<(negative_int)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '-15', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'not_int_5', 'is_array': '', 'replace': '<(not_int_5)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '+14', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'check_list_int', 'is_array': '', 'replace': '<(check_list_int)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '7 8 9', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'not_int_2', 'is_array': '', 'replace': '<(not_int_2)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '11 ', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'not_int_1', 'is_array': '', 'replace': '<(not_int_1)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output ' 10', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'zero_int', 'is_array': '', 'replace': '<(zero_int)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '0', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'check_list_int', 'is_array': '', 'replace': '<@(check_list_int)', 'type': '<@', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output [7, 8, 9], recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var6', 'is_array': '', 'replace': '<(var6)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'echo <(var5', 'is_array': '', 'replace': '<!(echo <(var5)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var5', 'is_array': '', 'replace': '<(var5)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'echo letters_list', recursing.
+VARIABLES:input.py:838:ExpandVariables Executing command 'echo letters_list' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output 'letters_list', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': '_inputs', 'is_array': '', 'replace': '<(_inputs)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var2', 'is_array': '', 'replace': '<(var2)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '3.14159265359 ABCD', recursing.
+VARIABLES:input.py:964:ExpandVariables Found output '"3.14159265359 ABCD"', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': '_outputs', 'is_array': '', 'replace': '<(_outputs)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var4', 'is_array': '', 'replace': '<(var4)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCD', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var7', 'is_array': '', 'replace': '<(var7)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'letters_list', recursing.
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCD letters_list', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var6', 'is_array': '', 'replace': '<(var6)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'echo <(var5', 'is_array': '', 'replace': '<!(echo <(var5)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var5', 'is_array': '', 'replace': '<(var5)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'echo letters_list', recursing.
+VARIABLES:input.py:889:ExpandVariables Had cache value for command 'echo letters_list' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output 'letters_list', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': '_inputs', 'is_array': '', 'replace': '<(_inputs)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var2', 'is_array': '', 'replace': '<(var2)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '3.14159265359 ABCD', recursing.
+VARIABLES:input.py:964:ExpandVariables Found output '"3.14159265359 ABCD"', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': '_outputs', 'is_array': '', 'replace': '<(_outputs)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var4', 'is_array': '', 'replace': '<(var4)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCD', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var7', 'is_array': '', 'replace': '<(var7)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'letters_list', recursing.
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCD letters_list', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var6', 'is_array': '', 'replace': '<(var6)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'echo <(var5', 'is_array': '', 'replace': '<!(echo <(var5)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var5', 'is_array': '', 'replace': '<(var5)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'echo letters_list', recursing.
+VARIABLES:input.py:889:ExpandVariables Had cache value for command 'echo letters_list' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output 'letters_list', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': '_inputs', 'is_array': '', 'replace': '<(_inputs)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var2prime', 'is_array': '', 'replace': '<(var2prime)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '3.14159265359 ABCD', recursing.
+VARIABLES:input.py:964:ExpandVariables Found output '"3.14159265359 ABCD"', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': '_outputs', 'is_array': '', 'replace': '<(_outputs)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var4prime', 'is_array': '', 'replace': '<(var4prime)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCD', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var7', 'is_array': '', 'replace': '<(var7)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'letters_list', recursing.
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCD letters_list', recursing.
diff --git a/third_party/python/gyp/test/variables/commands/commands-repeated.gypd.golden b/third_party/python/gyp/test/variables/commands/commands-repeated.gypd.golden
new file mode 100644
index 0000000000..fa4c53716f
--- /dev/null
+++ b/third_party/python/gyp/test/variables/commands/commands-repeated.gypd.golden
@@ -0,0 +1,77 @@
+{'_DEPTH': '.',
+ 'included_files': ['commands-repeated.gyp', 'commands.gypi'],
+ 'targets': [{'actions': [{'action': ['echo',
+ '"3.14159265359 ABCD"',
+ 'ABCD letters_list'],
+ 'action_name': 'test_action',
+ 'inputs': ['3.14159265359 ABCD'],
+ 'outputs': ['ABCD', 'letters_list'],
+ 'variables': {'var7': 'letters_list'}},
+ {'action': ['echo',
+ '"3.14159265359 ABCD"',
+ 'ABCD letters_list'],
+ 'action_name': 'test_action_prime',
+ 'inputs': ['3.14159265359 ABCD'],
+ 'outputs': ['ABCD', 'letters_list'],
+ 'variables': {'var7': 'letters_list'}},
+ {'action': ['echo',
+ '"3.14159265359 ABCD"',
+ 'ABCD letters_list'],
+ 'action_name': 'test_action_prime_prime',
+ 'inputs': ['3.14159265359 ABCD'],
+ 'outputs': ['ABCD', 'letters_list'],
+ 'variables': {'var7': 'letters_list'}}],
+ 'configurations': {'Default': {}},
+ 'default_configuration': 'Default',
+ 'target_name': 'foo',
+ 'toolset': 'target',
+ 'type': 'none',
+ 'variables': {'var1': '3.14159265359',
+ 'var10': '7 8 9',
+ 'var11': ['7', '8', '9'],
+ 'var12': ' 10',
+ 'var13': '11 ',
+ 'var14': '012',
+ 'var15': '13.0',
+ 'var16': '+14',
+ 'var17': '-15',
+ 'var18': '0',
+ 'var1prime': '3.14159265359',
+ 'var2': '3.14159265359 ABCD',
+ 'var2prime': '3.14159265359 ABCD',
+ 'var3': 'ABCD',
+ 'var3prime': 'ABCD',
+ 'var4': 'ABCD',
+ 'var4prime': 'ABCD',
+ 'var5': 'letters_',
+ 'var6': 'list',
+ 'var7': '5',
+ 'var8': '5blah',
+ 'var9': '6'}},
+ {'configurations': {'Default': {}},
+ 'default_configuration': 'Default',
+ 'target_name': 'dummy',
+ 'toolset': 'target',
+ 'type': 'none'}],
+ 'variables': {'check_included': 'XYZ',
+ 'check_int': '5',
+ 'check_list_int': ['7', '8', '9'],
+ 'check_lists': ['XYZ', 'ABCDEFGHIJK'],
+ 'check_str_int': '6',
+ 'default_empty_files%': '',
+ 'default_empty_str%': '',
+ 'default_int%': '0',
+ 'default_int_files%': '0',
+ 'default_str%': 'my_str',
+ 'included_variable': 'XYZ',
+ 'letters_list': 'ABCD',
+ 'negative_int': '-15',
+ 'not_int_1': ' 10',
+ 'not_int_2': '11 ',
+ 'not_int_3': '012',
+ 'not_int_4': '13.0',
+ 'not_int_5': '+14',
+ 'other_letters': 'ABCDEFG',
+ 'pi': 'import math; print(math.pi)',
+ 'third_letters': 'ABCDEFGHIJK',
+ 'zero_int': '0'}}
diff --git a/third_party/python/gyp/test/variables/commands/commands.gyp b/third_party/python/gyp/test/variables/commands/commands.gyp
new file mode 100644
index 0000000000..7d460b1810
--- /dev/null
+++ b/third_party/python/gyp/test/variables/commands/commands.gyp
@@ -0,0 +1,91 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This is a simple test file to make sure that variable substitution
+# happens correctly. Run "run_tests.py" using python to generate the
+# output from this gyp file.
+
+{
+ 'variables': {
+ 'pi': 'import math; print(math.pi)',
+ 'third_letters': "<(other_letters)HIJK",
+ 'letters_list': 'ABCD',
+ 'other_letters': '<(letters_list)EFG',
+ 'check_included': '<(included_variable)',
+ 'check_lists': [
+ '<(included_variable)',
+ '<(third_letters)',
+ ],
+ 'check_int': 5,
+ 'check_str_int': '6',
+ 'check_list_int': [
+ 7,
+ '8',
+ 9,
+ ],
+ 'not_int_1': ' 10',
+ 'not_int_2': '11 ',
+ 'not_int_3': '012',
+ 'not_int_4': '13.0',
+ 'not_int_5': '+14',
+ 'negative_int': '-15',
+ 'zero_int': '0',
+ },
+ 'includes': [
+ 'commands.gypi',
+ ],
+ 'targets': [
+ {
+ 'target_name': 'foo',
+ 'type': 'none',
+ 'variables': {
+ 'var1': '<!(["python", "-c", "<(pi)"])',
+ 'var2': '<!(python -c "print(\'<!(python -c "<(pi)") <(letters_list)\')")',
+ 'var3': '<!(python -c "print(\'<(letters_list)\')")',
+ 'var4': '<(<!(python -c "print(\'letters_list\')"))',
+ 'var5': 'letters_',
+ 'var6': 'list',
+ 'var7': '<(check_int)',
+ 'var8': '<(check_int)blah',
+ 'var9': '<(check_str_int)',
+ 'var10': '<(check_list_int)',
+ 'var11': ['<@(check_list_int)'],
+ 'var12': '<(not_int_1)',
+ 'var13': '<(not_int_2)',
+ 'var14': '<(not_int_3)',
+ 'var15': '<(not_int_4)',
+ 'var16': '<(not_int_5)',
+ 'var17': '<(negative_int)',
+ 'var18': '<(zero_int)',
+ 'var19': ['<!@(python test.py)'],
+ 'var20': '<!(python test.py)',
+ 'var21': '<(default_str)',
+ 'var22': '<(default_empty_str)',
+ 'var23': '<(default_int)',
+ 'var24': '<(default_empty_files)',
+ 'var25': '<(default_int_files)',
+ },
+ 'actions': [
+ {
+ 'action_name': 'test_action',
+ 'variables': {
+ 'var7': '<!(echo <(var5)<(var6))',
+ },
+ 'inputs' : [
+ '<(var2)',
+ ],
+ 'outputs': [
+ '<(var4)',
+ '<(var7)',
+ ],
+ 'action': [
+ 'echo',
+ '<(_inputs)',
+ '<(_outputs)',
+ ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/variables/commands/commands.gyp.ignore-env.stdout b/third_party/python/gyp/test/variables/commands/commands.gyp.ignore-env.stdout
new file mode 100644
index 0000000000..1b721d805b
--- /dev/null
+++ b/third_party/python/gyp/test/variables/commands/commands.gyp.ignore-env.stdout
@@ -0,0 +1,96 @@
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'other_letters', 'is_array': '', 'replace': '<(other_letters)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '<(letters_list)EFGHIJK', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'letters_list', 'is_array': '', 'replace': '<(letters_list)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCDEFGHIJK', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'letters_list', 'is_array': '', 'replace': '<(letters_list)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCDEFG', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'included_variable', 'is_array': '', 'replace': '<(included_variable)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'XYZ', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'included_variable', 'is_array': '', 'replace': '<(included_variable)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'XYZ', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'third_letters', 'is_array': '', 'replace': '<(third_letters)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '<(other_letters)HIJK', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'other_letters', 'is_array': '', 'replace': '<(other_letters)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '<(letters_list)EFGHIJK', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'letters_list', 'is_array': '', 'replace': '<(letters_list)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCDEFGHIJK', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'default_empty_files', 'is_array': '', 'replace': '<(default_empty_files)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'default_int_files', 'is_array': '', 'replace': '<(default_int_files)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '0', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'python test.py', 'is_array': '', 'replace': '<!(python test.py)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:838:ExpandVariables Executing command 'python test.py' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output 'sample\\path\\foo.cpp', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'default_str', 'is_array': '', 'replace': '<(default_str)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'my_str', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'default_empty_str', 'is_array': '', 'replace': '<(default_empty_str)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'default_int', 'is_array': '', 'replace': '<(default_int)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '0', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': '<!(python -c "print(\'letters_list\'', 'is_array': '', 'replace': '<(<!(python -c "print(\'letters_list\')', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'python -c "print(\'letters_list\'', 'is_array': '', 'replace': '<!(python -c "print(\'letters_list\')', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:838:ExpandVariables Executing command 'python -c "print('letters_list')"' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output 'letters_list', recursing.
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCD', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'check_int', 'is_array': '', 'replace': '<(check_int)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '5', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': '"python", "-c", "<(pi', 'is_array': '[', 'replace': '<!(["python", "-c", "<(pi)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'pi', 'is_array': '', 'replace': '<(pi)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '["python", "-c", "import math; print(math.pi)"]', recursing.
+VARIABLES:input.py:838:ExpandVariables Executing command '['python', '-c', 'import math; print(math.pi)']' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output '3.14159265359', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'python -c "print(\'<(letters_list', 'is_array': '', 'replace': '<!(python -c "print(\'<(letters_list)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'letters_list', 'is_array': '', 'replace': '<(letters_list)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'python -c "print(\'ABCD\')"', recursing.
+VARIABLES:input.py:838:ExpandVariables Executing command 'python -c "print('ABCD')"' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCD', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'letters_list', 'is_array': '', 'replace': '<(letters_list)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'python -c "print(\'<!(python -c "<(pi', 'is_array': '', 'replace': '<!(python -c "print(\'<!(python -c "<(pi)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'python -c "<(pi', 'is_array': '', 'replace': '<!(python -c "<(pi)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'pi', 'is_array': '', 'replace': '<(pi)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'python -c "import math; print(math.pi)"', recursing.
+VARIABLES:input.py:838:ExpandVariables Executing command 'python -c "import math; print(math.pi)"' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output 'python -c "print(\'3.14159265359 ABCD\')"', recursing.
+VARIABLES:input.py:838:ExpandVariables Executing command 'python -c "print('3.14159265359 ABCD')"' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output '3.14159265359 ABCD', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'check_str_int', 'is_array': '', 'replace': '<(check_str_int)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '6', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'check_int', 'is_array': '', 'replace': '<(check_int)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '5blah', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'not_int_4', 'is_array': '', 'replace': '<(not_int_4)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '13.0', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'not_int_3', 'is_array': '', 'replace': '<(not_int_3)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '012', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'negative_int', 'is_array': '', 'replace': '<(negative_int)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '-15', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'not_int_5', 'is_array': '', 'replace': '<(not_int_5)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '+14', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'check_list_int', 'is_array': '', 'replace': '<(check_list_int)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '7 8 9', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'not_int_2', 'is_array': '', 'replace': '<(not_int_2)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '11 ', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'not_int_1', 'is_array': '', 'replace': '<(not_int_1)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output ' 10', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'zero_int', 'is_array': '', 'replace': '<(zero_int)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '0', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'check_list_int', 'is_array': '', 'replace': '<@(check_list_int)', 'type': '<@', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output [7, 8, 9], recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'python test.py', 'is_array': '', 'replace': '<!@(python test.py)', 'type': '<!@', 'command_string': None}
+VARIABLES:input.py:889:ExpandVariables Had cache value for command 'python test.py' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output ['samplepathfoo.cpp'], recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var6', 'is_array': '', 'replace': '<(var6)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'echo <(var5', 'is_array': '', 'replace': '<!(echo <(var5)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var5', 'is_array': '', 'replace': '<(var5)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'echo letters_list', recursing.
+VARIABLES:input.py:838:ExpandVariables Executing command 'echo letters_list' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output 'letters_list', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': '_inputs', 'is_array': '', 'replace': '<(_inputs)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var2', 'is_array': '', 'replace': '<(var2)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '3.14159265359 ABCD', recursing.
+VARIABLES:input.py:964:ExpandVariables Found output '"3.14159265359 ABCD"', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': '_outputs', 'is_array': '', 'replace': '<(_outputs)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var4', 'is_array': '', 'replace': '<(var4)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCD', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var7', 'is_array': '', 'replace': '<(var7)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'letters_list', recursing.
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCD letters_list', recursing.
diff --git a/third_party/python/gyp/test/variables/commands/commands.gyp.stdout b/third_party/python/gyp/test/variables/commands/commands.gyp.stdout
new file mode 100644
index 0000000000..1b721d805b
--- /dev/null
+++ b/third_party/python/gyp/test/variables/commands/commands.gyp.stdout
@@ -0,0 +1,96 @@
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'other_letters', 'is_array': '', 'replace': '<(other_letters)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '<(letters_list)EFGHIJK', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'letters_list', 'is_array': '', 'replace': '<(letters_list)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCDEFGHIJK', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'letters_list', 'is_array': '', 'replace': '<(letters_list)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCDEFG', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'included_variable', 'is_array': '', 'replace': '<(included_variable)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'XYZ', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'included_variable', 'is_array': '', 'replace': '<(included_variable)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'XYZ', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'third_letters', 'is_array': '', 'replace': '<(third_letters)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '<(other_letters)HIJK', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'other_letters', 'is_array': '', 'replace': '<(other_letters)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '<(letters_list)EFGHIJK', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'letters_list', 'is_array': '', 'replace': '<(letters_list)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCDEFGHIJK', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'default_empty_files', 'is_array': '', 'replace': '<(default_empty_files)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'default_int_files', 'is_array': '', 'replace': '<(default_int_files)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '0', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'python test.py', 'is_array': '', 'replace': '<!(python test.py)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:838:ExpandVariables Executing command 'python test.py' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output 'sample\\path\\foo.cpp', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'default_str', 'is_array': '', 'replace': '<(default_str)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'my_str', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'default_empty_str', 'is_array': '', 'replace': '<(default_empty_str)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'default_int', 'is_array': '', 'replace': '<(default_int)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '0', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': '<!(python -c "print(\'letters_list\'', 'is_array': '', 'replace': '<(<!(python -c "print(\'letters_list\')', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'python -c "print(\'letters_list\'', 'is_array': '', 'replace': '<!(python -c "print(\'letters_list\')', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:838:ExpandVariables Executing command 'python -c "print('letters_list')"' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output 'letters_list', recursing.
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCD', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'check_int', 'is_array': '', 'replace': '<(check_int)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '5', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': '"python", "-c", "<(pi', 'is_array': '[', 'replace': '<!(["python", "-c", "<(pi)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'pi', 'is_array': '', 'replace': '<(pi)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '["python", "-c", "import math; print(math.pi)"]', recursing.
+VARIABLES:input.py:838:ExpandVariables Executing command '['python', '-c', 'import math; print(math.pi)']' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output '3.14159265359', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'python -c "print(\'<(letters_list', 'is_array': '', 'replace': '<!(python -c "print(\'<(letters_list)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'letters_list', 'is_array': '', 'replace': '<(letters_list)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'python -c "print(\'ABCD\')"', recursing.
+VARIABLES:input.py:838:ExpandVariables Executing command 'python -c "print('ABCD')"' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCD', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'letters_list', 'is_array': '', 'replace': '<(letters_list)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'python -c "print(\'<!(python -c "<(pi', 'is_array': '', 'replace': '<!(python -c "print(\'<!(python -c "<(pi)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'python -c "<(pi', 'is_array': '', 'replace': '<!(python -c "<(pi)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'pi', 'is_array': '', 'replace': '<(pi)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'python -c "import math; print(math.pi)"', recursing.
+VARIABLES:input.py:838:ExpandVariables Executing command 'python -c "import math; print(math.pi)"' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output 'python -c "print(\'3.14159265359 ABCD\')"', recursing.
+VARIABLES:input.py:838:ExpandVariables Executing command 'python -c "print('3.14159265359 ABCD')"' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output '3.14159265359 ABCD', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'check_str_int', 'is_array': '', 'replace': '<(check_str_int)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '6', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'check_int', 'is_array': '', 'replace': '<(check_int)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '5blah', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'not_int_4', 'is_array': '', 'replace': '<(not_int_4)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '13.0', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'not_int_3', 'is_array': '', 'replace': '<(not_int_3)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '012', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'negative_int', 'is_array': '', 'replace': '<(negative_int)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '-15', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'not_int_5', 'is_array': '', 'replace': '<(not_int_5)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '+14', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'check_list_int', 'is_array': '', 'replace': '<(check_list_int)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '7 8 9', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'not_int_2', 'is_array': '', 'replace': '<(not_int_2)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '11 ', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'not_int_1', 'is_array': '', 'replace': '<(not_int_1)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output ' 10', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'zero_int', 'is_array': '', 'replace': '<(zero_int)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '0', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'check_list_int', 'is_array': '', 'replace': '<@(check_list_int)', 'type': '<@', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output [7, 8, 9], recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'python test.py', 'is_array': '', 'replace': '<!@(python test.py)', 'type': '<!@', 'command_string': None}
+VARIABLES:input.py:889:ExpandVariables Had cache value for command 'python test.py' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output ['samplepathfoo.cpp'], recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var6', 'is_array': '', 'replace': '<(var6)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'echo <(var5', 'is_array': '', 'replace': '<!(echo <(var5)', 'type': '<!', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var5', 'is_array': '', 'replace': '<(var5)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'echo letters_list', recursing.
+VARIABLES:input.py:838:ExpandVariables Executing command 'echo letters_list' in directory 'None'
+VARIABLES:input.py:964:ExpandVariables Found output 'letters_list', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': '_inputs', 'is_array': '', 'replace': '<(_inputs)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var2', 'is_array': '', 'replace': '<(var2)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output '3.14159265359 ABCD', recursing.
+VARIABLES:input.py:964:ExpandVariables Found output '"3.14159265359 ABCD"', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': '_outputs', 'is_array': '', 'replace': '<(_outputs)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var4', 'is_array': '', 'replace': '<(var4)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCD', recursing.
+VARIABLES:input.py:724:ExpandVariables Matches: {'content': 'var7', 'is_array': '', 'replace': '<(var7)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:964:ExpandVariables Found output 'letters_list', recursing.
+VARIABLES:input.py:964:ExpandVariables Found output 'ABCD letters_list', recursing.
diff --git a/third_party/python/gyp/test/variables/commands/commands.gypd.golden b/third_party/python/gyp/test/variables/commands/commands.gypd.golden
new file mode 100644
index 0000000000..13abba26c4
--- /dev/null
+++ b/third_party/python/gyp/test/variables/commands/commands.gypd.golden
@@ -0,0 +1,66 @@
+{'_DEPTH': '.',
+ 'included_files': ['commands.gyp', 'commands.gypi'],
+ 'targets': [{'actions': [{'action': ['echo',
+ '"3.14159265359 ABCD"',
+ 'ABCD letters_list'],
+ 'action_name': 'test_action',
+ 'inputs': ['3.14159265359 ABCD'],
+ 'outputs': ['ABCD', 'letters_list'],
+ 'variables': {'var7': 'letters_list'}}],
+ 'configurations': {'Default': {}},
+ 'default_configuration': 'Default',
+ 'target_name': 'foo',
+ 'toolset': 'target',
+ 'type': 'none',
+ 'variables': {'var1': '3.14159265359',
+ 'var10': '7 8 9',
+ 'var11': ['7', '8', '9'],
+ 'var12': ' 10',
+ 'var13': '11 ',
+ 'var14': '012',
+ 'var15': '13.0',
+ 'var16': '+14',
+ 'var17': '-15',
+ 'var18': '0',
+ 'var19': ['samplepathfoo.cpp'],
+ 'var2': '3.14159265359 ABCD',
+ 'var20': 'sample\\path\\foo.cpp',
+ 'var21': 'my_str',
+ 'var22': '',
+ 'var23': '0',
+ 'var24': '',
+ 'var25': '0',
+ 'var3': 'ABCD',
+ 'var4': 'ABCD',
+ 'var5': 'letters_',
+ 'var6': 'list',
+ 'var7': '5',
+ 'var8': '5blah',
+ 'var9': '6'}},
+ {'configurations': {'Default': {}},
+ 'default_configuration': 'Default',
+ 'target_name': 'dummy',
+ 'toolset': 'target',
+ 'type': 'none'}],
+ 'variables': {'check_included': 'XYZ',
+ 'check_int': '5',
+ 'check_list_int': ['7', '8', '9'],
+ 'check_lists': ['XYZ', 'ABCDEFGHIJK'],
+ 'check_str_int': '6',
+ 'default_empty_files%': '',
+ 'default_empty_str%': '',
+ 'default_int%': '0',
+ 'default_int_files%': '0',
+ 'default_str%': 'my_str',
+ 'included_variable': 'XYZ',
+ 'letters_list': 'ABCD',
+ 'negative_int': '-15',
+ 'not_int_1': ' 10',
+ 'not_int_2': '11 ',
+ 'not_int_3': '012',
+ 'not_int_4': '13.0',
+ 'not_int_5': '+14',
+ 'other_letters': 'ABCDEFG',
+ 'pi': 'import math; print(math.pi)',
+ 'third_letters': 'ABCDEFGHIJK',
+ 'zero_int': '0'}}
diff --git a/third_party/python/gyp/test/variables/commands/commands.gypi b/third_party/python/gyp/test/variables/commands/commands.gypi
new file mode 100644
index 0000000000..839cb30b7e
--- /dev/null
+++ b/third_party/python/gyp/test/variables/commands/commands.gypi
@@ -0,0 +1,23 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This file is included from commands.gyp to test evaluation order of includes.
+{
+ 'variables': {
+ 'included_variable': 'XYZ',
+
+ 'default_str%': 'my_str',
+ 'default_empty_str%': '',
+ 'default_int%': 0,
+
+ 'default_empty_files%': '',
+ 'default_int_files%': 0,
+ },
+ 'targets': [
+ {
+ 'target_name': 'dummy',
+ 'type': 'none',
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/variables/commands/gyptest-commands-ignore-env.py b/third_party/python/gyp/test/variables/commands/gyptest-commands-ignore-env.py
new file mode 100755
index 0000000000..5470d5ce36
--- /dev/null
+++ b/third_party/python/gyp/test/variables/commands/gyptest-commands-ignore-env.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Test that environment variables are ignored when --ignore-environment is
+specified.
+"""
+
+from __future__ import print_function
+import os
+
+import TestGyp
+
+test = TestGyp.TestGyp(format='gypd')
+
+os.environ['GYP_DEFINES'] = 'FOO=BAR'
+os.environ['GYP_GENERATORS'] = 'foo'
+os.environ['GYP_GENERATOR_FLAGS'] = 'genflag=foo'
+os.environ['GYP_GENERATOR_OUTPUT'] = 'somedir'
+
+expect = test.read('commands.gyp.ignore-env.stdout').replace('\r\n', '\n')
+
+test.run_gyp('commands.gyp',
+ '--debug', 'variables',
+ '--ignore-environment',
+ stdout=expect, ignore_line_numbers=True)
+
+# Verify the commands.gypd against the checked-in expected contents.
+#
+# Normally, we should canonicalize line endings in the expected
+# contents file setting the Subversion svn:eol-style to native,
+# but that would still fail if multiple systems are sharing a single
+# workspace on a network-mounted file system. Consequently, we
+# massage the Windows line endings ('\r\n') in the output to the
+# checked-in UNIX endings ('\n').
+
+contents = test.read('commands.gypd').replace('\r', '')
+expect = test.read('commands.gypd.golden').replace('\r', '')
+if not test.match(contents, expect):
+ print("Unexpected contents of `commands.gypd'")
+ test.diff(expect, contents, 'commands.gypd ')
+ test.fail_test()
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/variables/commands/gyptest-commands-repeated-multidir.py b/third_party/python/gyp/test/variables/commands/gyptest-commands-repeated-multidir.py
new file mode 100755
index 0000000000..21e0487565
--- /dev/null
+++ b/third_party/python/gyp/test/variables/commands/gyptest-commands-repeated-multidir.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+
+# Copyright 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Test variable expansion of '<!()' syntax commands where they are evaluated
+more than once from different directories.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+# This tests GYP's cache of commands, ensuring that the directory a command is
+# run from is part of its cache key. Parallelism may lead to multiple cache
+# lookups failing, resulting in the command being run multiple times by
+# chance, not by GYP's logic. Turn off parallelism to ensure that the logic is
+# being tested.
+test.run_gyp('repeated_multidir/main.gyp', '--no-parallel')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/variables/commands/gyptest-commands-repeated.py b/third_party/python/gyp/test/variables/commands/gyptest-commands-repeated.py
new file mode 100755
index 0000000000..fcf98ee02a
--- /dev/null
+++ b/third_party/python/gyp/test/variables/commands/gyptest-commands-repeated.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Test variable expansion of '<!()' syntax commands where they are evaluated
+more then once..
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+test = TestGyp.TestGyp(format='gypd')
+
+expect = test.read('commands-repeated.gyp.stdout').replace('\r\n', '\n')
+
+test.run_gyp('commands-repeated.gyp',
+ '--debug', 'variables',
+ stdout=expect, ignore_line_numbers=True)
+
+# Verify the commands-repeated.gypd against the checked-in expected contents.
+#
+# Normally, we should canonicalize line endings in the expected
+# contents file setting the Subversion svn:eol-style to native,
+# but that would still fail if multiple systems are sharing a single
+# workspace on a network-mounted file system. Consequently, we
+# massage the Windows line endings ('\r\n') in the output to the
+# checked-in UNIX endings ('\n').
+
+contents = test.read('commands-repeated.gypd').replace('\r\n', '\n')
+expect = test.read('commands-repeated.gypd.golden').replace('\r\n', '\n')
+if not test.match(contents, expect):
+ print("Unexpected contents of `commands-repeated.gypd'")
+ test.diff(expect, contents, 'commands-repeated.gypd ')
+ test.fail_test()
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/variables/commands/gyptest-commands.py b/third_party/python/gyp/test/variables/commands/gyptest-commands.py
new file mode 100755
index 0000000000..3251120ff4
--- /dev/null
+++ b/third_party/python/gyp/test/variables/commands/gyptest-commands.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Test variable expansion of '<!()' syntax commands.
+"""
+
+from __future__ import print_function
+import os
+
+import TestGyp
+
+test = TestGyp.TestGyp(format='gypd')
+
+expect = test.read('commands.gyp.stdout').replace('\r', '')
+
+test.run_gyp('commands.gyp',
+ '--debug', 'variables',
+ stdout=expect, ignore_line_numbers=True)
+
+# Verify the commands.gypd against the checked-in expected contents.
+#
+# Normally, we should canonicalize line endings in the expected
+# contents file setting the Subversion svn:eol-style to native,
+# but that would still fail if multiple systems are sharing a single
+# workspace on a network-mounted file system. Consequently, we
+# massage the Windows line endings ('\r\n') in the output to the
+# checked-in UNIX endings ('\n').
+
+contents = test.read('commands.gypd').replace('\r', '')
+expect = test.read('commands.gypd.golden').replace('\r', '')
+if not test.match(contents, expect):
+ print("Unexpected contents of `commands.gypd'")
+ test.diff(expect, contents, 'commands.gypd ')
+ test.fail_test()
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/variables/commands/repeated_multidir/dir_1/test_1.gyp b/third_party/python/gyp/test/variables/commands/repeated_multidir/dir_1/test_1.gyp
new file mode 100644
index 0000000000..328fc306cd
--- /dev/null
+++ b/third_party/python/gyp/test/variables/commands/repeated_multidir/dir_1/test_1.gyp
@@ -0,0 +1,13 @@
+# Copyright 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'expected_value': 'dir_1',
+ 'target_name': 'target_1',
+ },
+ 'includes': [
+ '../repeated_command_common.gypi',
+ ],
+}
diff --git a/third_party/python/gyp/test/variables/commands/repeated_multidir/dir_2/test_2.gyp b/third_party/python/gyp/test/variables/commands/repeated_multidir/dir_2/test_2.gyp
new file mode 100644
index 0000000000..18e0c62c93
--- /dev/null
+++ b/third_party/python/gyp/test/variables/commands/repeated_multidir/dir_2/test_2.gyp
@@ -0,0 +1,13 @@
+# Copyright 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'expected_value': 'dir_2',
+ 'target_name': 'target_2',
+ },
+ 'includes': [
+ '../repeated_command_common.gypi',
+ ],
+}
diff --git a/third_party/python/gyp/test/variables/commands/repeated_multidir/main.gyp b/third_party/python/gyp/test/variables/commands/repeated_multidir/main.gyp
new file mode 100644
index 0000000000..5beeeb7244
--- /dev/null
+++ b/third_party/python/gyp/test/variables/commands/repeated_multidir/main.gyp
@@ -0,0 +1,16 @@
+# Copyright 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'main',
+ 'type': 'none',
+ 'dependencies': [
+ 'dir_1/test_1.gyp:target_1',
+ 'dir_2/test_2.gyp:target_2',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/variables/commands/repeated_multidir/print_cwd_basename.py b/third_party/python/gyp/test/variables/commands/repeated_multidir/print_cwd_basename.py
new file mode 100755
index 0000000000..9b206bb3d4
--- /dev/null
+++ b/third_party/python/gyp/test/variables/commands/repeated_multidir/print_cwd_basename.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# Copyright 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+import os
+import os.path
+
+print(os.path.basename(os.getcwd()))
diff --git a/third_party/python/gyp/test/variables/commands/repeated_multidir/repeated_command_common.gypi b/third_party/python/gyp/test/variables/commands/repeated_multidir/repeated_command_common.gypi
new file mode 100644
index 0000000000..74366771aa
--- /dev/null
+++ b/third_party/python/gyp/test/variables/commands/repeated_multidir/repeated_command_common.gypi
@@ -0,0 +1,25 @@
+# Copyright 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ # This command will be run from the directories of the .gyp files that
+ # include this .gypi, the subdirectories dir_1 and dir_2, so use a
+ # relative path from those directories to the script.
+ 'observed_value': '<!(python ../print_cwd_basename.py)',
+ },
+ 'targets': [
+ {
+ 'target_name': '<(target_name)',
+ 'type': 'none',
+ 'conditions': [
+ ['observed_value != expected_value', {
+ # Attempt to expand an undefined variable. This triggers a GYP
+ # error.
+ 'assertion': '<(observed_value_must_equal_expected_value)',
+ }],
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/variables/commands/test.py b/third_party/python/gyp/test/variables/commands/test.py
new file mode 100644
index 0000000000..eb64f95383
--- /dev/null
+++ b/third_party/python/gyp/test/variables/commands/test.py
@@ -0,0 +1,7 @@
+# Copyright (c) 2017 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+
+print("sample\\path\\foo.cpp")
diff --git a/third_party/python/gyp/test/variables/commands/update_golden b/third_party/python/gyp/test/variables/commands/update_golden
new file mode 100755
index 0000000000..4fcf1eb961
--- /dev/null
+++ b/third_party/python/gyp/test/variables/commands/update_golden
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+python ../../../gyp --debug variables --format gypd --depth . commands.gyp > commands.gyp.stdout
+python ../../../gyp --ignore-environment --debug variables --format gypd --depth . commands.gyp > commands.gyp.ignore-env.stdout
+cp -f commands.gypd commands.gypd.golden
+python ../../../gyp --debug variables --format gypd --depth . commands-repeated.gyp > commands-repeated.gyp.stdout
+cp -f commands-repeated.gypd commands-repeated.gypd.golden
diff --git a/third_party/python/gyp/test/variables/empty/empty.gyp b/third_party/python/gyp/test/variables/empty/empty.gyp
new file mode 100644
index 0000000000..207be06fe7
--- /dev/null
+++ b/third_party/python/gyp/test/variables/empty/empty.gyp
@@ -0,0 +1,13 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'includes': ['empty.gypi'],
+ 'targets': [
+ {
+ 'target_name': 'empty',
+ 'type': 'none',
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/variables/empty/empty.gypi b/third_party/python/gyp/test/variables/empty/empty.gypi
new file mode 100644
index 0000000000..e95031fca5
--- /dev/null
+++ b/third_party/python/gyp/test/variables/empty/empty.gypi
@@ -0,0 +1,9 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ '': '',
+ },
+}
diff --git a/third_party/python/gyp/test/variables/empty/gyptest-empty.py b/third_party/python/gyp/test/variables/empty/gyptest-empty.py
new file mode 100755
index 0000000000..4cbe166fdc
--- /dev/null
+++ b/third_party/python/gyp/test/variables/empty/gyptest-empty.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Test that empty variable names don't cause infinite loops.
+"""
+
+import os
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('empty.gyp')
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/variables/filelist/filelist.gyp.stdout b/third_party/python/gyp/test/variables/filelist/filelist.gyp.stdout
new file mode 100644
index 0000000000..595a19c684
--- /dev/null
+++ b/third_party/python/gyp/test/variables/filelist/filelist.gyp.stdout
@@ -0,0 +1,26 @@
+VARIABLES:input.py:562:ExpandVariables Matches: {'content': 'names.txt <@(names', 'is_array': '', 'replace': '<|(names.txt <@(names)', 'type': '<|', 'command_string': None}
+VARIABLES:input.py:562:ExpandVariables Matches: {'content': 'names', 'is_array': '', 'replace': '<@(names)', 'type': '<@', 'command_string': None}
+VARIABLES:input.py:797:ExpandVariables Found output 'names.txt John Jacob Jingleheimer Schmidt', recursing.
+VARIABLES:input.py:797:ExpandVariables Found output 'names.txt', recursing.
+VARIABLES:input.py:562:ExpandVariables Matches: {'content': 'names_listfile', 'is_array': '', 'replace': '<(names_listfile)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:797:ExpandVariables Found output 'names.txt', recursing.
+VARIABLES:input.py:562:ExpandVariables Matches: {'content': 'names_listfile', 'is_array': '', 'replace': '<(names_listfile)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:797:ExpandVariables Found output 'names.txt', recursing.
+VARIABLES:input.py:562:ExpandVariables Matches: {'content': 'cat <(names_listfile', 'is_array': '', 'replace': '<!@(cat <(names_listfile)', 'type': '<!@', 'command_string': None}
+VARIABLES:input.py:562:ExpandVariables Matches: {'content': 'names_listfile', 'is_array': '', 'replace': '<(names_listfile)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:797:ExpandVariables Found output 'cat names.txt', recursing.
+VARIABLES:input.py:676:ExpandVariables Executing command 'cat names.txt' in directory 'src'
+VARIABLES:input.py:797:ExpandVariables Found output ['John', 'Jacob', 'Jingleheimer', 'Schmidt'], recursing.
+VARIABLES:input.py:562:ExpandVariables Matches: {'content': 'sources.txt <@(_sources', 'is_array': '', 'replace': '<|(sources.txt <@(_sources)', 'type': '<|', 'command_string': None}
+VARIABLES:input.py:562:ExpandVariables Matches: {'content': '_sources', 'is_array': '', 'replace': '<@(_sources)', 'type': '<@', 'command_string': None}
+VARIABLES:input.py:797:ExpandVariables Found output 'sources.txt John Jacob Jingleheimer Schmidt', recursing.
+VARIABLES:input.py:797:ExpandVariables Found output 'sources.txt', recursing.
+VARIABLES:input.py:562:ExpandVariables Matches: {'content': 'sources_listfile', 'is_array': '', 'replace': '<(sources_listfile)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:797:ExpandVariables Found output 'sources.txt', recursing.
+VARIABLES:input.py:562:ExpandVariables Matches: {'content': 'sources_listfile', 'is_array': '', 'replace': '<(sources_listfile)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:797:ExpandVariables Found output 'sources.txt', recursing.
+VARIABLES:input.py:562:ExpandVariables Matches: {'content': 'cat <(sources_listfile', 'is_array': '', 'replace': '<!@(cat <(sources_listfile)', 'type': '<!@', 'command_string': None}
+VARIABLES:input.py:562:ExpandVariables Matches: {'content': 'sources_listfile', 'is_array': '', 'replace': '<(sources_listfile)', 'type': '<', 'command_string': None}
+VARIABLES:input.py:797:ExpandVariables Found output 'cat sources.txt', recursing.
+VARIABLES:input.py:676:ExpandVariables Executing command 'cat sources.txt' in directory 'src'
+VARIABLES:input.py:797:ExpandVariables Found output ['John', 'Jacob', 'Jingleheimer', 'Schmidt'], recursing.
diff --git a/third_party/python/gyp/test/variables/filelist/filelist.gypd.golden b/third_party/python/gyp/test/variables/filelist/filelist.gypd.golden
new file mode 100644
index 0000000000..09d9116047
--- /dev/null
+++ b/third_party/python/gyp/test/variables/filelist/filelist.gypd.golden
@@ -0,0 +1,43 @@
+{'_DEPTH': '.',
+ 'included_files': ['filelist.gyp'],
+ 'targets': [{'actions': [{'action': ['python', 'dummy.py', 'names.txt'],
+ 'action_name': 'test_action',
+ 'inputs': ['names.txt',
+ 'John',
+ 'Jacob',
+ 'Jingleheimer',
+ 'Schmidt'],
+ 'outputs': ['dummy_foo']}],
+ 'configurations': {'Default': {}},
+ 'default_configuration': 'Default',
+ 'target_name': 'foo',
+ 'toolset': 'target',
+ 'type': 'none',
+ 'variables': {'names_listfile': 'names.txt'}},
+ {'actions': [{'action': ['python', 'dummy.py', 'sources.txt'],
+ 'action_name': 'test_action',
+ 'inputs': ['sources.txt',
+ 'John',
+ 'Jacob',
+ 'Jingleheimer',
+ 'Schmidt'],
+ 'outputs': ['dummy_foo']}],
+ 'configurations': {'Default': {}},
+ 'default_configuration': 'Default',
+ 'sources': ['John', 'Jacob', 'Jingleheimer', 'Schmidt'],
+ 'sources_excluded': ['Astor', 'Jerome', 'Schultz'],
+ 'target_name': 'bar',
+ 'toolset': 'target',
+ 'type': 'none',
+ 'variables': {'sources_listfile': 'sources.txt'}}],
+ 'variables': {'names': ['John',
+ 'Jacob',
+ 'Astor',
+ 'Jingleheimer',
+ 'Jerome',
+ 'Schmidt',
+ 'Schultz'],
+ 'names!': ['Astor'],
+ 'names/': [['exclude', 'Sch.*'],
+ ['include', '.*dt'],
+ ['exclude', 'Jer.*']]}}
diff --git a/third_party/python/gyp/test/variables/filelist/gyptest-filelist-golden.py b/third_party/python/gyp/test/variables/filelist/gyptest-filelist-golden.py
new file mode 100644
index 0000000000..3ddc6698f8
--- /dev/null
+++ b/third_party/python/gyp/test/variables/filelist/gyptest-filelist-golden.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Test variable expansion of '<|(list.txt ...)' syntax commands.
+"""
+
+from __future__ import print_function
+
+import os
+import sys
+
+import TestGyp
+
+test = TestGyp.TestGyp(format='gypd')
+
+expect = test.read('filelist.gyp.stdout')
+if sys.platform == 'win32':
+ expect = expect.replace('/', r'\\').replace('\r\n', '\n')
+
+test.run_gyp('src/filelist.gyp',
+ '--debug', 'variables',
+ stdout=expect, ignore_line_numbers=True)
+
+# Verify the filelist.gypd against the checked-in expected contents.
+#
+# Normally, we should canonicalize line endings in the expected
+# contents file setting the Subversion svn:eol-style to native,
+# but that would still fail if multiple systems are sharing a single
+# workspace on a network-mounted file system. Consequently, we
+# massage the Windows line endings ('\r\n') in the output to the
+# checked-in UNIX endings ('\n').
+
+contents = test.read('src/filelist.gypd').replace(
+ '\r', '').replace('\\\\', '/')
+expect = test.read('filelist.gypd.golden').replace('\r', '')
+if not test.match(contents, expect):
+ print("Unexpected contents of `src/filelist.gypd'")
+ test.diff(expect, contents, 'src/filelist.gypd ')
+ test.fail_test()
+
+contents = test.read('src/names.txt')
+expect = 'John\nJacob\nJingleheimer\nSchmidt\n'
+if not test.match(contents, expect):
+ print("Unexpected contents of `src/names.txt'")
+ test.diff(expect, contents, 'src/names.txt ')
+ test.fail_test()
+
+test.pass_test()
+
diff --git a/third_party/python/gyp/test/variables/filelist/gyptest-filelist.py b/third_party/python/gyp/test/variables/filelist/gyptest-filelist.py
new file mode 100755
index 0000000000..b12084c21c
--- /dev/null
+++ b/third_party/python/gyp/test/variables/filelist/gyptest-filelist.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Test variable expansion of '<|(list.txt ...)' syntax commands.
+"""
+
+import os
+import sys
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+CHDIR = 'src'
+test.run_gyp('filelist2.gyp', chdir=CHDIR)
+
+test.build('filelist2.gyp', 'foo', chdir=CHDIR)
+contents = test.read('src/dummy_foo').replace('\r', '')
+expect = 'John\nJacob\nJingleheimer\nSchmidt\n'
+if not test.match(contents, expect):
+ print("Unexpected contents of `src/dummy_foo'")
+ test.diff(expect, contents, 'src/dummy_foo')
+ test.fail_test()
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/variables/filelist/src/dummy.py b/third_party/python/gyp/test/variables/filelist/src/dummy.py
new file mode 100644
index 0000000000..e41fc9f8e4
--- /dev/null
+++ b/third_party/python/gyp/test/variables/filelist/src/dummy.py
@@ -0,0 +1,5 @@
+#!/usr/bin/env python
+
+import sys
+
+open(sys.argv[1], 'w').write(open(sys.argv[2]).read())
diff --git a/third_party/python/gyp/test/variables/filelist/src/filelist.gyp b/third_party/python/gyp/test/variables/filelist/src/filelist.gyp
new file mode 100644
index 0000000000..df48eb3e4a
--- /dev/null
+++ b/third_party/python/gyp/test/variables/filelist/src/filelist.gyp
@@ -0,0 +1,93 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This is a test to make sure that <|(foo.txt a b c) generates
+# a pre-calculated file list at gyp time and returns foo.txt.
+# This feature is useful to work around limits in the number of arguments that
+# can be passed to rule/action.
+
+{
+ 'variables': {
+ 'names': [
+ 'John',
+ 'Jacob',
+ 'Astor',
+ 'Jingleheimer',
+ 'Jerome',
+ 'Schmidt',
+ 'Schultz',
+ ],
+ 'names!': [
+ 'Astor',
+ ],
+ 'names/': [
+ ['exclude', 'Sch.*'],
+ ['include', '.*dt'],
+ ['exclude', 'Jer.*'],
+ ],
+ },
+ 'targets': [
+ {
+ 'target_name': 'foo',
+ 'type': 'none',
+ 'variables': {
+ 'names_listfile': '<|(names.txt <@(names))',
+ },
+ 'actions': [
+ {
+ 'action_name': 'test_action',
+ 'inputs' : [
+ '<(names_listfile)',
+ '<!@(cat <(names_listfile))',
+ ],
+ 'outputs': [
+ 'dummy_foo',
+ ],
+ 'action': [
+ 'python', 'dummy.py', '<(names_listfile)',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'bar',
+ 'type': 'none',
+ 'sources': [
+ 'John',
+ 'Jacob',
+ 'Astor',
+ 'Jingleheimer',
+ 'Jerome',
+ 'Schmidt',
+ 'Schultz',
+ ],
+ 'sources!': [
+ 'Astor',
+ ],
+ 'sources/': [
+ ['exclude', 'Sch.*'],
+ ['include', '.*dt'],
+ ['exclude', 'Jer.*'],
+ ],
+ 'variables': {
+ 'sources_listfile': '<|(sources.txt <@(_sources))',
+ },
+ 'actions': [
+ {
+ 'action_name': 'test_action',
+ 'inputs' : [
+ '<(sources_listfile)',
+ '<!@(cat <(sources_listfile))',
+ ],
+ 'outputs': [
+ 'dummy_foo',
+ ],
+ 'action': [
+ 'python', 'dummy.py', '<(sources_listfile)',
+ ],
+ },
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/variables/filelist/src/filelist2.gyp b/third_party/python/gyp/test/variables/filelist/src/filelist2.gyp
new file mode 100644
index 0000000000..ec215dbb76
--- /dev/null
+++ b/third_party/python/gyp/test/variables/filelist/src/filelist2.gyp
@@ -0,0 +1,40 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# This is a test to make sure that <|(foo.txt a b c) generates
+# a pre-calculated file list at gyp time and returns foo.txt.
+# This feature is useful to work around limits in the number of arguments that
+# can be passed to rule/action.
+
+{
+ 'variables': {
+ 'names': [
+ 'John',
+ 'Jacob',
+ 'Jingleheimer',
+ 'Schmidt',
+ ],
+ },
+ 'targets': [
+ {
+ 'target_name': 'foo',
+ 'type': 'none',
+ 'variables': {
+ 'names_listfile': '<|(names.txt <@(names))',
+ },
+ 'actions': [
+ {
+ 'action_name': 'test_action',
+ 'msvs_cygwin_shell': 0,
+ 'inputs' : [ '<(names_listfile)' ],
+ 'outputs': [ 'dummy_foo' ],
+ 'action': [
+ 'python', 'dummy.py', '<@(_outputs)', '<(names_listfile)',
+ ],
+ },
+ ],
+ },
+ ],
+}
+
diff --git a/third_party/python/gyp/test/variables/filelist/update_golden b/third_party/python/gyp/test/variables/filelist/update_golden
new file mode 100755
index 0000000000..b4d489a342
--- /dev/null
+++ b/third_party/python/gyp/test/variables/filelist/update_golden
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+python ../../../gyp --debug variables --debug general --format gypd --depth . src/filelist.gyp > filelist.gyp.stdout
+cp -f src/filelist.gypd filelist.gypd.golden
diff --git a/third_party/python/gyp/test/variables/latelate/gyptest-latelate.py b/third_party/python/gyp/test/variables/latelate/gyptest-latelate.py
new file mode 100755
index 0000000000..2d77dfec5e
--- /dev/null
+++ b/third_party/python/gyp/test/variables/latelate/gyptest-latelate.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that ^(latelate) style variables work.
+"""
+
+import TestGyp
+
+test = TestGyp.TestGyp()
+
+test.run_gyp('latelate.gyp', chdir='src')
+
+test.relocate('src', 'relocate/src')
+
+test.build('latelate.gyp', test.ALL, chdir='relocate/src')
+
+test.run_built_executable(
+ 'program', chdir='relocate/src', stdout='program.cc\n')
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/variables/latelate/src/latelate.gyp b/third_party/python/gyp/test/variables/latelate/src/latelate.gyp
new file mode 100644
index 0000000000..312f3765b6
--- /dev/null
+++ b/third_party/python/gyp/test/variables/latelate/src/latelate.gyp
@@ -0,0 +1,34 @@
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ 'target_conditions': [
+ ['has_lame==1', {
+ 'sources/': [
+ ['exclude', 'lame'],
+ ],
+ }],
+ ],
+ },
+ 'targets': [
+ {
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'variables': {
+ 'has_lame': 1,
+ },
+ 'include_dirs': [
+ '<(SHARED_INTERMEDIATE_DIR)',
+ ],
+ 'defines': [
+ 'FOO="^(_sources)"',
+ ],
+ 'sources': [
+ 'program.cc',
+ 'this_is_lame.cc',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/variables/latelate/src/program.cc b/third_party/python/gyp/test/variables/latelate/src/program.cc
new file mode 100644
index 0000000000..97c98ae5b9
--- /dev/null
+++ b/third_party/python/gyp/test/variables/latelate/src/program.cc
@@ -0,0 +1,13 @@
+/*
+ * Copyright (c) 2012 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+#include <stdio.h>
+
+
+int main(void) {
+ printf(FOO "\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/variables/variable-in-path/C1/hello.cc b/third_party/python/gyp/test/variables/variable-in-path/C1/hello.cc
new file mode 100644
index 0000000000..1711567ef5
--- /dev/null
+++ b/third_party/python/gyp/test/variables/variable-in-path/C1/hello.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/variables/variable-in-path/gyptest-variable-in-path.py b/third_party/python/gyp/test/variables/variable-in-path/gyptest-variable-in-path.py
new file mode 100644
index 0000000000..b73a279da7
--- /dev/null
+++ b/third_party/python/gyp/test/variables/variable-in-path/gyptest-variable-in-path.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure <(CONFIGURATION_NAME) variable is correctly expanded.
+"""
+
+import TestGyp
+
+import sys
+
+test = TestGyp.TestGyp()
+test.set_configuration('C1')
+
+test.run_gyp('variable-in-path.gyp')
+test.build('variable-in-path.gyp', 'hello1')
+test.build('variable-in-path.gyp', 'hello2')
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/variables/variable-in-path/variable-in-path.gyp b/third_party/python/gyp/test/variables/variable-in-path/variable-in-path.gyp
new file mode 100644
index 0000000000..908d21eb66
--- /dev/null
+++ b/third_party/python/gyp/test/variables/variable-in-path/variable-in-path.gyp
@@ -0,0 +1,31 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'hello1',
+ 'type': 'executable',
+ 'sources': [
+ '<(CONFIGURATION_NAME)/hello.cc',
+ ],
+ },
+ {
+ 'target_name': 'hello2',
+ 'type': 'executable',
+ 'sources': [
+ './<(CONFIGURATION_NAME)/hello.cc',
+ ],
+ },
+ ],
+ 'target_defaults': {
+ 'default_configuration': 'C1',
+ 'configurations': {
+ 'C1': {
+ },
+ 'C2': {
+ },
+ },
+ },
+}
diff --git a/third_party/python/gyp/test/win/asm-files/asm-files.gyp b/third_party/python/gyp/test/win/asm-files/asm-files.gyp
new file mode 100644
index 0000000000..b1f132ceea
--- /dev/null
+++ b/third_party/python/gyp/test/win/asm-files/asm-files.gyp
@@ -0,0 +1,17 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'sources_with_asm',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.cc',
+ 'b.s',
+ 'c.S',
+ ],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/asm-files/b.s b/third_party/python/gyp/test/win/asm-files/b.s
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/win/asm-files/b.s
diff --git a/third_party/python/gyp/test/win/asm-files/c.S b/third_party/python/gyp/test/win/asm-files/c.S
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/win/asm-files/c.S
diff --git a/third_party/python/gyp/test/win/asm-files/hello.cc b/third_party/python/gyp/test/win/asm-files/hello.cc
new file mode 100644
index 0000000000..1711567ef5
--- /dev/null
+++ b/third_party/python/gyp/test/win/asm-files/hello.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/batch-file-action/batch-file-action.gyp b/third_party/python/gyp/test/win/batch-file-action/batch-file-action.gyp
new file mode 100644
index 0000000000..e4db9af9d3
--- /dev/null
+++ b/third_party/python/gyp/test/win/batch-file-action/batch-file-action.gyp
@@ -0,0 +1,21 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_batch',
+ 'type': 'none',
+ 'actions': [
+ {
+ 'action_name': 'copy_to_output',
+ 'inputs': ['infile'],
+ 'outputs': ['outfile'],
+ 'action': ['somecmd.bat', 'infile', 'outfile'],
+ 'msvs_cygwin_shell': 0,
+ }
+ ],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/batch-file-action/infile b/third_party/python/gyp/test/win/batch-file-action/infile
new file mode 100644
index 0000000000..3f9177e45e
--- /dev/null
+++ b/third_party/python/gyp/test/win/batch-file-action/infile
@@ -0,0 +1 @@
+input
diff --git a/third_party/python/gyp/test/win/batch-file-action/somecmd.bat b/third_party/python/gyp/test/win/batch-file-action/somecmd.bat
new file mode 100644
index 0000000000..d487753743
--- /dev/null
+++ b/third_party/python/gyp/test/win/batch-file-action/somecmd.bat
@@ -0,0 +1,5 @@
+@echo off
+:: The redirs to nul are important. %2 can end up being an unterminated "'d
+:: string, so the remainder of the command line becomes the target file name,
+:: which in turn fails because it's a filename containing >, nul, etc.
+copy /y %1 %2 >nul 2>nul
diff --git a/third_party/python/gyp/test/win/command-quote/a.S b/third_party/python/gyp/test/win/command-quote/a.S
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/win/command-quote/a.S
diff --git a/third_party/python/gyp/test/win/command-quote/bat with spaces.bat b/third_party/python/gyp/test/win/command-quote/bat with spaces.bat
new file mode 100644
index 0000000000..dc3508f9a9
--- /dev/null
+++ b/third_party/python/gyp/test/win/command-quote/bat with spaces.bat
@@ -0,0 +1,7 @@
+@echo off
+
+:: Copyright (c) 2012 Google Inc. All rights reserved.
+:: Use of this source code is governed by a BSD-style license that can be
+:: found in the LICENSE file.
+
+copy %1 %2
diff --git a/third_party/python/gyp/test/win/command-quote/command-quote.gyp b/third_party/python/gyp/test/win/command-quote/command-quote.gyp
new file mode 100644
index 0000000000..faf724674f
--- /dev/null
+++ b/third_party/python/gyp/test/win/command-quote/command-quote.gyp
@@ -0,0 +1,79 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ 'msvs_cygwin_dirs': ['../../../../../<(DEPTH)/third_party/cygwin'],
+ },
+ 'targets': [
+ {
+ 'target_name': 'test_batch',
+ 'type': 'none',
+ 'rules': [
+ {
+ 'rule_name': 'build_with_batch',
+ 'msvs_cygwin_shell': 0,
+ 'extension': 'S',
+ 'outputs': ['output.obj'],
+ 'action': ['call go.bat', '<(RULE_INPUT_PATH)', 'output.obj'],
+ },],
+ 'sources': ['a.S'],
+ },
+ {
+ 'target_name': 'test_call_separate',
+ 'type': 'none',
+ 'rules': [
+ {
+ 'rule_name': 'build_with_batch2',
+ 'msvs_cygwin_shell': 0,
+ 'extension': 'S',
+ 'outputs': ['output2.obj'],
+ 'action': ['call', 'go.bat', '<(RULE_INPUT_PATH)', 'output2.obj'],
+ },],
+ 'sources': ['a.S'],
+ },
+ {
+ 'target_name': 'test_with_spaces',
+ 'type': 'none',
+ 'rules': [
+ {
+ 'rule_name': 'build_with_batch3',
+ 'msvs_cygwin_shell': 0,
+ 'extension': 'S',
+ 'outputs': ['output3.obj'],
+ 'action': ['bat with spaces.bat', '<(RULE_INPUT_PATH)', 'output3.obj'],
+ },],
+ 'sources': ['a.S'],
+ },
+ {
+ 'target_name': 'test_with_double_quotes',
+ 'type': 'none',
+ 'rules': [
+ {
+ 'rule_name': 'build_with_batch3',
+ 'msvs_cygwin_shell': 1,
+ 'extension': 'S',
+ 'outputs': ['output4.obj'],
+ 'arguments': ['-v'],
+ 'action': ['python', '-c', 'import shutil; '
+ 'shutil.copy("<(RULE_INPUT_PATH)", "output4.obj")'],
+ },],
+ 'sources': ['a.S'],
+ },
+ {
+ 'target_name': 'test_with_single_quotes',
+ 'type': 'none',
+ 'rules': [
+ {
+ 'rule_name': 'build_with_batch3',
+ 'msvs_cygwin_shell': 1,
+ 'extension': 'S',
+ 'outputs': ['output5.obj'],
+ 'action': ['python', '-c', "import shutil; "
+ "shutil.copy('<(RULE_INPUT_PATH)', 'output5.obj')"],
+ },],
+ 'sources': ['a.S'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/command-quote/go.bat b/third_party/python/gyp/test/win/command-quote/go.bat
new file mode 100644
index 0000000000..dc3508f9a9
--- /dev/null
+++ b/third_party/python/gyp/test/win/command-quote/go.bat
@@ -0,0 +1,7 @@
+@echo off
+
+:: Copyright (c) 2012 Google Inc. All rights reserved.
+:: Use of this source code is governed by a BSD-style license that can be
+:: found in the LICENSE file.
+
+copy %1 %2
diff --git a/third_party/python/gyp/test/win/command-quote/subdir/and/another/in-subdir.gyp b/third_party/python/gyp/test/win/command-quote/subdir/and/another/in-subdir.gyp
new file mode 100644
index 0000000000..3dff4c40b9
--- /dev/null
+++ b/third_party/python/gyp/test/win/command-quote/subdir/and/another/in-subdir.gyp
@@ -0,0 +1,27 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_batch_depth',
+ 'type': 'none',
+ 'variables': {
+ # Taken from native_client/build/common.gypi. Seems unintentional (a
+ # string in a 1 element list)? But since it works on other generators,
+ # I guess it should work here too.
+ 'filepath': [ 'call <(DEPTH)/../../../go.bat' ],
+ },
+ 'rules': [
+ {
+ 'rule_name': 'build_with_batch4',
+ 'msvs_cygwin_shell': 0,
+ 'extension': 'S',
+ 'outputs': ['output4.obj'],
+ 'action': ['<@(filepath)', '<(RULE_INPUT_PATH)', 'output4.obj'],
+ },],
+ 'sources': ['<(DEPTH)\\..\\..\\..\\a.S'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/additional-include-dirs.cc b/third_party/python/gyp/test/win/compiler-flags/additional-include-dirs.cc
new file mode 100644
index 0000000000..f1e11dd12d
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/additional-include-dirs.cc
@@ -0,0 +1,10 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// No path qualification to test compiler include dir specification.
+#include "header.h"
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/additional-include-dirs.gyp b/third_party/python/gyp/test/win/compiler-flags/additional-include-dirs.gyp
new file mode 100644
index 0000000000..42c7e849f6
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/additional-include-dirs.gyp
@@ -0,0 +1,20 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_incs',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'AdditionalIncludeDirectories': [
+ 'subdir',
+ ],
+ }
+ },
+ 'sources': ['additional-include-dirs.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/additional-options.cc b/third_party/python/gyp/test/win/compiler-flags/additional-options.cc
new file mode 100644
index 0000000000..c79572bafa
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/additional-options.cc
@@ -0,0 +1,10 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ // Generate a warning that will appear at level 4, but not level 1
+ // (truncation and unused local).
+ char c = 123456;
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/additional-options.gyp b/third_party/python/gyp/test/win/compiler-flags/additional-options.gyp
new file mode 100644
index 0000000000..6a365a2062
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/additional-options.gyp
@@ -0,0 +1,31 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_additional_none',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WarningLevel': '4',
+ 'WarnAsError': 'true',
+ }
+ },
+ 'sources': ['additional-options.cc'],
+ },
+ {
+ 'target_name': 'test_additional_one',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WarningLevel': '4',
+ 'WarnAsError': 'true',
+ 'AdditionalOptions': [ '/W1' ],
+ }
+ },
+ 'sources': ['additional-options.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/analysis.gyp b/third_party/python/gyp/test/win/compiler-flags/analysis.gyp
new file mode 100644
index 0000000000..97e942258f
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/analysis.gyp
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_analysis_on',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'EnablePREfast': 'true',
+ 'WarnAsError': 'true',
+ },
+ },
+ 'sources': ['uninit.cc'],
+ },
+ {
+ 'target_name': 'test_analysis_off',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'EnablePREfast': 'false',
+ 'WarnAsError': 'true',
+ },
+ },
+ 'sources': ['uninit.cc'],
+ },
+ {
+ 'target_name': 'test_analysis_unspec',
+ 'type': 'executable',
+ 'sources': ['uninit.cc'],
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WarnAsError': 'true',
+ },
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/buffer-security-check.gyp b/third_party/python/gyp/test/win/compiler-flags/buffer-security-check.gyp
new file mode 100644
index 0000000000..cc5a12b953
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/buffer-security-check.gyp
@@ -0,0 +1,51 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ # Turn debug information on so that we can see the name of the buffer
+ # security check cookie in the disassembly.
+ {
+ 'target_name': 'test_bsc_unset',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ },
+ },
+ 'sources': ['buffer-security.cc'],
+ },
+ {
+ 'target_name': 'test_bsc_off',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'BufferSecurityCheck': 'false',
+ 'DebugInformationFormat': '3',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ },
+ },
+ 'sources': ['buffer-security.cc'],
+ },
+ {
+ 'target_name': 'test_bsc_on',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'BufferSecurityCheck': 'true',
+ 'DebugInformationFormat': '3',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ },
+ },
+ 'sources': ['buffer-security.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/buffer-security.cc b/third_party/python/gyp/test/win/compiler-flags/buffer-security.cc
new file mode 100644
index 0000000000..e8a48a2a67
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/buffer-security.cc
@@ -0,0 +1,12 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <malloc.h>
+#include <string.h>
+
+int main() {
+ char* stuff = reinterpret_cast<char*>(_alloca(256));
+ strcpy(stuff, "blah");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/calling-convention-cdecl.def b/third_party/python/gyp/test/win/compiler-flags/calling-convention-cdecl.def
new file mode 100644
index 0000000000..dc1dba055a
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/calling-convention-cdecl.def
@@ -0,0 +1,6 @@
+; Copyright (c) 2014 Google Inc. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+EXPORTS
+ foo
diff --git a/third_party/python/gyp/test/win/compiler-flags/calling-convention-fastcall.def b/third_party/python/gyp/test/win/compiler-flags/calling-convention-fastcall.def
new file mode 100644
index 0000000000..2c61afe208
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/calling-convention-fastcall.def
@@ -0,0 +1,6 @@
+; Copyright (c) 2014 Google Inc. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+EXPORTS
+ @foo@0
diff --git a/third_party/python/gyp/test/win/compiler-flags/calling-convention-stdcall.def b/third_party/python/gyp/test/win/compiler-flags/calling-convention-stdcall.def
new file mode 100644
index 0000000000..6c7e05e9ea
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/calling-convention-stdcall.def
@@ -0,0 +1,6 @@
+; Copyright (c) 2014 Google Inc. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+EXPORTS
+ _foo@0
diff --git a/third_party/python/gyp/test/win/compiler-flags/calling-convention-vectorcall.def b/third_party/python/gyp/test/win/compiler-flags/calling-convention-vectorcall.def
new file mode 100644
index 0000000000..4ef119c3e3
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/calling-convention-vectorcall.def
@@ -0,0 +1,6 @@
+; Copyright (c) 2014 Google Inc. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+EXPORTS
+ foo@@0
diff --git a/third_party/python/gyp/test/win/compiler-flags/calling-convention.cc b/third_party/python/gyp/test/win/compiler-flags/calling-convention.cc
new file mode 100644
index 0000000000..0d78a0cc05
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/calling-convention.cc
@@ -0,0 +1,6 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+extern "C" void foo() {
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/calling-convention.gyp b/third_party/python/gyp/test/win/compiler-flags/calling-convention.gyp
new file mode 100644
index 0000000000..5069c552bc
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/calling-convention.gyp
@@ -0,0 +1,66 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_cdecl',
+ 'type': 'loadable_module',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'CallingConvention': 0,
+ },
+ },
+ 'sources': [
+ 'calling-convention.cc',
+ 'calling-convention-cdecl.def',
+ ],
+ },
+ {
+ 'target_name': 'test_fastcall',
+ 'type': 'loadable_module',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'CallingConvention': 1,
+ },
+ },
+ 'sources': [
+ 'calling-convention.cc',
+ 'calling-convention-fastcall.def',
+ ],
+ },
+ {
+ 'target_name': 'test_stdcall',
+ 'type': 'loadable_module',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'CallingConvention': 2,
+ },
+ },
+ 'sources': [
+ 'calling-convention.cc',
+ 'calling-convention-stdcall.def',
+ ],
+ },
+ ],
+ 'conditions': [
+ ['MSVS_VERSION[0:4]>="2013"', {
+ 'targets': [
+ {
+ 'target_name': 'test_vectorcall',
+ 'type': 'loadable_module',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'CallingConvention': 3,
+ },
+ },
+ 'sources': [
+ 'calling-convention.cc',
+ 'calling-convention-vectorcall.def',
+ ],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/character-set-mbcs.cc b/third_party/python/gyp/test/win/compiler-flags/character-set-mbcs.cc
new file mode 100644
index 0000000000..3286304730
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/character-set-mbcs.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef _MBCS
+#error
+#endif
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/character-set-unicode.cc b/third_party/python/gyp/test/win/compiler-flags/character-set-unicode.cc
new file mode 100644
index 0000000000..32e69724a5
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/character-set-unicode.cc
@@ -0,0 +1,15 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef _UNICODE
+#error
+#endif
+
+#ifndef UNICODE
+#error
+#endif
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/character-set.gyp b/third_party/python/gyp/test/win/compiler-flags/character-set.gyp
new file mode 100644
index 0000000000..3dc45557d9
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/character-set.gyp
@@ -0,0 +1,35 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_cs_notset',
+ 'product_name': 'test_cs_notset',
+ 'type': 'executable',
+ 'msvs_configuration_attributes': {
+ 'CharacterSet': '0'
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_cs_unicode',
+ 'product_name': 'test_cs_unicode',
+ 'type': 'executable',
+ 'msvs_configuration_attributes': {
+ 'CharacterSet': '1'
+ },
+ 'sources': ['character-set-unicode.cc'],
+ },
+ {
+ 'target_name': 'test_cs_mbcs',
+ 'product_name': 'test_cs_mbcs',
+ 'type': 'executable',
+ 'msvs_configuration_attributes': {
+ 'CharacterSet': '2'
+ },
+ 'sources': ['character-set-mbcs.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/compile-as-managed.cc b/third_party/python/gyp/test/win/compiler-flags/compile-as-managed.cc
new file mode 100644
index 0000000000..a29c71ee1d
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/compile-as-managed.cc
@@ -0,0 +1,9 @@
+// Copyright (c) 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vcclr.h>
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/compile-as-managed.gyp b/third_party/python/gyp/test/win/compiler-flags/compile-as-managed.gyp
new file mode 100644
index 0000000000..3bacbbc135
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/compile-as-managed.gyp
@@ -0,0 +1,29 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test-compile-as-managed',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'CompileAsManaged': 'true',
+ 'ExceptionHandling': '0' # /clr is incompatible with /EHs
+ }
+ },
+ 'sources': ['compile-as-managed.cc'],
+ },
+ {
+ 'target_name': 'test-compile-as-unmanaged',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'CompileAsManaged': 'false',
+ }
+ },
+ 'sources': ['compile-as-managed.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/compile-as-winrt.cc b/third_party/python/gyp/test/win/compiler-flags/compile-as-winrt.cc
new file mode 100644
index 0000000000..da9954f8fb
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/compile-as-winrt.cc
@@ -0,0 +1,12 @@
+// Copyright (c) 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+using namespace Platform;
+
+int main() {
+ wchar_t msg[] = L"Test";
+ String^ str1 = ref new String(msg);
+ auto str2 = String::Concat(str1, " Concat");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/compile-as-winrt.gyp b/third_party/python/gyp/test/win/compiler-flags/compile-as-winrt.gyp
new file mode 100644
index 0000000000..8978e5059d
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/compile-as-winrt.gyp
@@ -0,0 +1,20 @@
+# Copyright (c) 2016 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test-compile-as-winrt',
+ 'type': 'executable',
+ 'msvs_windows_sdk_version': 'v10.0',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'AdditionalUsingDirectories': ['$(VCInstallDir)vcpackages;$(WindowsSdkDir)UnionMetadata;%(AdditionalUsingDirectories)'],
+ 'CompileAsWinRT': 'true'
+ }
+ },
+ 'sources': ['compile-as-winrt.cc']
+ }
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/debug-format.gyp b/third_party/python/gyp/test/win/compiler-flags/debug-format.gyp
new file mode 100644
index 0000000000..daaed23ff1
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/debug-format.gyp
@@ -0,0 +1,48 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test-debug-format-off',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '0'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test-debug-format-oldstyle',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '1'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test-debug-format-pdb',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test-debug-format-editcontinue',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '4'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/default-char-is-unsigned.cc b/third_party/python/gyp/test/win/compiler-flags/default-char-is-unsigned.cc
new file mode 100644
index 0000000000..beeca2aa15
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/default-char-is-unsigned.cc
@@ -0,0 +1,15 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+template <bool>
+struct CompileAssert {
+};
+
+#define COMPILE_ASSERT(expr, msg) \
+ typedef CompileAssert<(bool(expr))> msg[bool(expr) ? 1 : -1]
+
+int main() {
+ COMPILE_ASSERT(char(-1) > 0, default_char_is_unsigned);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/default-char-is-unsigned.gyp b/third_party/python/gyp/test/win/compiler-flags/default-char-is-unsigned.gyp
new file mode 100644
index 0000000000..941e5810dd
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/default-char-is-unsigned.gyp
@@ -0,0 +1,20 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_default_char_is_unsigned',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DefaultCharIsUnsigned': 'true',
+ },
+ },
+ 'sources': [
+ 'default-char-is-unsigned.cc',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/disable-specific-warnings.cc b/third_party/python/gyp/test/win/compiler-flags/disable-specific-warnings.cc
new file mode 100644
index 0000000000..d312f5f481
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/disable-specific-warnings.cc
@@ -0,0 +1,9 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ // Causes level 1 warning (C4700)
+ int i;
+ return i;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/disable-specific-warnings.gyp b/third_party/python/gyp/test/win/compiler-flags/disable-specific-warnings.gyp
new file mode 100644
index 0000000000..d81d694c62
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/disable-specific-warnings.gyp
@@ -0,0 +1,29 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_disable_specific_warnings_set',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WarnAsError': 'true',
+ 'DisableSpecificWarnings': ['4700']
+ }
+ },
+ 'sources': ['disable-specific-warnings.cc']
+ },
+ {
+ 'target_name': 'test_disable_specific_warnings_unset',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WarnAsError': 'true'
+ }
+ },
+ 'sources': ['disable-specific-warnings.cc']
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/enable-enhanced-instruction-set.cc b/third_party/python/gyp/test/win/compiler-flags/enable-enhanced-instruction-set.cc
new file mode 100644
index 0000000000..432ef54eda
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/enable-enhanced-instruction-set.cc
@@ -0,0 +1,28 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdio.h>
+
+static const char* GetArchOption() {
+#if _M_IX86_FP == 0
+ return "IA32";
+#elif _M_IX86_FP == 1
+ return "SSE";
+#elif _M_IX86_FP == 2
+# if defined(__AVX2__)
+ return "AVX2";
+# elif defined(__AVX__)
+ return "AVX";
+# else
+ return "SSE2";
+# endif
+#else
+ return "UNSUPPORTED OPTION";
+#endif
+}
+
+int main() {
+ printf("/arch:%s\n", GetArchOption());
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/enable-enhanced-instruction-set.gyp b/third_party/python/gyp/test/win/compiler-flags/enable-enhanced-instruction-set.gyp
new file mode 100644
index 0000000000..9c49edc7ff
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/enable-enhanced-instruction-set.gyp
@@ -0,0 +1,68 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'sse_extensions',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'EnableEnhancedInstructionSet': '1', # StreamingSIMDExtensions
+ }
+ },
+ 'sources': ['enable-enhanced-instruction-set.cc'],
+ },
+ {
+ 'target_name': 'sse2_extensions',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'EnableEnhancedInstructionSet': '2', # StreamingSIMDExtensions2
+ }
+ },
+ 'sources': ['enable-enhanced-instruction-set.cc'],
+ },
+ ],
+ 'conditions': [
+ ['MSVS_VERSION[0:4]>"2010"', {
+ 'targets': [
+ {
+ 'target_name': 'avx_extensions',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'EnableEnhancedInstructionSet': '3', # AdvancedVectorExtensions
+ }
+ },
+ 'sources': ['enable-enhanced-instruction-set.cc'],
+ },
+ {
+ 'target_name': 'no_extensions',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'EnableEnhancedInstructionSet': '4', # NoExtensions
+ }
+ },
+ 'sources': ['enable-enhanced-instruction-set.cc'],
+ },
+ ],
+ }],
+ ['MSVS_VERSION[0:4]>="2013"', {
+ 'targets': [
+ {
+ 'target_name': 'avx2_extensions',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'EnableEnhancedInstructionSet': '5', # AdvancedVectorExtensions2
+ }
+ },
+ 'sources': ['enable-enhanced-instruction-set.cc'],
+ },
+ ],
+ }],
+ ],
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/exception-handling-on.cc b/third_party/python/gyp/test/win/compiler-flags/exception-handling-on.cc
new file mode 100644
index 0000000000..5d9a3af77d
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/exception-handling-on.cc
@@ -0,0 +1,24 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <excpt.h>
+#include <stdlib.h>
+
+void fail() {
+ try {
+ int i = 0, j = 1;
+ j /= i;
+ } catch(...) {
+ exit(1);
+ }
+}
+
+int main() {
+ __try {
+ fail();
+ } __except(EXCEPTION_EXECUTE_HANDLER) {
+ return 2;
+ }
+ return 3;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/exception-handling.gyp b/third_party/python/gyp/test/win/compiler-flags/exception-handling.gyp
new file mode 100644
index 0000000000..c266768dda
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/exception-handling.gyp
@@ -0,0 +1,46 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ # Optimization disabled so that the exception-causing code is not removed
+ # (divide by zero was getting optimized away in VS2010).
+ {
+ 'target_name': 'test_eh_off',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'ExceptionHandling': '0',
+ 'WarnAsError': 'true',
+ 'Optimization': '0',
+ }
+ },
+ 'sources': ['exception-handling-on.cc'],
+ },
+ {
+ 'target_name': 'test_eh_s',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'ExceptionHandling': '1',
+ 'WarnAsError': 'true',
+ 'Optimization': '0',
+ }
+ },
+ 'sources': ['exception-handling-on.cc'],
+ },
+ {
+ 'target_name': 'test_eh_a',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'ExceptionHandling': '2',
+ 'WarnAsError': 'true',
+ 'Optimization': '0',
+ }
+ },
+ 'sources': ['exception-handling-on.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/floating-point-model-fast.cc b/third_party/python/gyp/test/win/compiler-flags/floating-point-model-fast.cc
new file mode 100644
index 0000000000..9d22152f5e
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/floating-point-model-fast.cc
@@ -0,0 +1,19 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef _M_FP_PRECISE
+#error
+#endif
+
+#ifdef _M_FP_STRICT
+#error
+#endif
+
+#ifndef _M_FP_FAST
+#error
+#endif
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/floating-point-model-precise.cc b/third_party/python/gyp/test/win/compiler-flags/floating-point-model-precise.cc
new file mode 100644
index 0000000000..1191a74ed1
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/floating-point-model-precise.cc
@@ -0,0 +1,19 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef _M_FP_PRECISE
+#error
+#endif
+
+#ifdef _M_FP_STRICT
+#error
+#endif
+
+#ifdef _M_FP_FAST
+#error
+#endif
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/floating-point-model-strict.cc b/third_party/python/gyp/test/win/compiler-flags/floating-point-model-strict.cc
new file mode 100644
index 0000000000..1ffde36e0c
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/floating-point-model-strict.cc
@@ -0,0 +1,19 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef _M_FP_PRECISE
+#error
+#endif
+
+#ifndef _M_FP_STRICT
+#error
+#endif
+
+#ifdef _M_FP_FAST
+#error
+#endif
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/floating-point-model.gyp b/third_party/python/gyp/test/win/compiler-flags/floating-point-model.gyp
new file mode 100644
index 0000000000..857b275a8f
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/floating-point-model.gyp
@@ -0,0 +1,43 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test-floating-point-model-default',
+ 'type': 'executable',
+ 'sources': ['floating-point-model-precise.cc'],
+ },
+ {
+ 'target_name': 'test-floating-point-model-precise',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'FloatingPointModel': '0'
+ }
+ },
+ 'sources': ['floating-point-model-precise.cc'],
+ },
+ {
+ 'target_name': 'test-floating-point-model-strict',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'FloatingPointModel': '1'
+ }
+ },
+ 'sources': ['floating-point-model-strict.cc'],
+ },
+ {
+ 'target_name': 'test-floating-point-model-fast',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'FloatingPointModel': '2'
+ }
+ },
+ 'sources': ['floating-point-model-fast.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/force-include-files-with-precompiled.cc b/third_party/python/gyp/test/win/compiler-flags/force-include-files-with-precompiled.cc
new file mode 100644
index 0000000000..85cb0f32a6
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/force-include-files-with-precompiled.cc
@@ -0,0 +1,10 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdio.h>
+
+int main() {
+ std::string s;
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/force-include-files.cc b/third_party/python/gyp/test/win/compiler-flags/force-include-files.cc
new file mode 100644
index 0000000000..4a93de55d4
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/force-include-files.cc
@@ -0,0 +1,8 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ std::list<std::vector<std::string> > l;
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/force-include-files.gyp b/third_party/python/gyp/test/win/compiler-flags/force-include-files.gyp
new file mode 100644
index 0000000000..2031546cc5
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/force-include-files.gyp
@@ -0,0 +1,36 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_force_include_files',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'ForcedIncludeFiles': ['string', 'vector', 'list'],
+ },
+ },
+ 'sources': [
+ 'force-include-files.cc',
+ ],
+ },
+ {
+ 'target_name': 'test_force_include_with_precompiled',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'ForcedIncludeFiles': ['string'],
+ },
+ },
+ 'msvs_precompiled_header': 'stdio.h',
+ 'msvs_precompiled_source': 'precomp.cc',
+ 'msvs_disabled_warnings': [ 4530, ],
+ 'sources': [
+ 'force-include-files-with-precompiled.cc',
+ 'precomp.cc',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/function-level-linking.cc b/third_party/python/gyp/test/win/compiler-flags/function-level-linking.cc
new file mode 100644
index 0000000000..4952272817
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/function-level-linking.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int comdat_function() {
+ return 1;
+}
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/function-level-linking.gyp b/third_party/python/gyp/test/win/compiler-flags/function-level-linking.gyp
new file mode 100644
index 0000000000..5858586a24
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/function-level-linking.gyp
@@ -0,0 +1,28 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_fll_off',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'EnableFunctionLevelLinking': 'false'
+ }
+ },
+ 'sources': ['function-level-linking.cc'],
+ },
+ {
+ 'target_name': 'test_fll_on',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'EnableFunctionLevelLinking': 'true',
+ }
+ },
+ 'sources': ['function-level-linking.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/hello.cc b/third_party/python/gyp/test/win/compiler-flags/hello.cc
new file mode 100644
index 0000000000..1711567ef5
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/hello.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/optimizations.gyp b/third_party/python/gyp/test/win/compiler-flags/optimizations.gyp
new file mode 100644
index 0000000000..e63096f0f7
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/optimizations.gyp
@@ -0,0 +1,207 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_opt_off',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'Optimization': '0'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_opt_lev_size',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'Optimization': '1'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_opt_lev_speed',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'Optimization': '2'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_opt_lev_max',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'Optimization': '3'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_opt_unset',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_opt_fpo',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'OmitFramePointers': 'true'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_opt_fpo_off',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'OmitFramePointers': 'false'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_opt_intrinsic',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'EnableIntrinsicFunctions': 'true'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_opt_intrinsic_off',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'EnableIntrinsicFunctions': 'false'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_opt_inline_off',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'InlineFunctionExpansion': '0'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_opt_inline_manual',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'InlineFunctionExpansion': '1'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_opt_inline_auto',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'InlineFunctionExpansion': '2'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_opt_neither',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'FavorSizeOrSpeed': '0'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_opt_speed',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'FavorSizeOrSpeed': '1'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_opt_size',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'FavorSizeOrSpeed': '2'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_opt_wpo',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WholeProgramOptimization': 'true'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_opt_sp',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'StringPooling': 'true'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_opt_sp_off',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'StringPooling': 'false'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_opt_fso',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'EnableFiberSafeOptimizations': 'true'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_opt_fso_off',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'EnableFiberSafeOptimizations': 'false'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/pdbname-override.gyp b/third_party/python/gyp/test/win/compiler-flags/pdbname-override.gyp
new file mode 100644
index 0000000000..dad20e01fd
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/pdbname-override.gyp
@@ -0,0 +1,26 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_pdbname',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.cc',
+ 'pdbname.cc',
+ ],
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3',
+ 'ProgramDataBaseFileName': '<(PRODUCT_DIR)/compiler_generated.pdb',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'ProgramDatabaseFile': '<(PRODUCT_DIR)/linker_generated.pdb',
+ },
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/pdbname.cc b/third_party/python/gyp/test/win/compiler-flags/pdbname.cc
new file mode 100644
index 0000000000..0fe05d5afb
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/pdbname.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int some_function() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/pdbname.gyp b/third_party/python/gyp/test/win/compiler-flags/pdbname.gyp
new file mode 100644
index 0000000000..8fcf754727
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/pdbname.gyp
@@ -0,0 +1,24 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_pdbname',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.cc',
+ 'pdbname.cc',
+ ],
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ },
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/precomp.cc b/third_party/python/gyp/test/win/compiler-flags/precomp.cc
new file mode 100644
index 0000000000..d16bac890f
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/precomp.cc
@@ -0,0 +1,6 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+#include <stdio.h>
diff --git a/third_party/python/gyp/test/win/compiler-flags/rtti-on.cc b/third_party/python/gyp/test/win/compiler-flags/rtti-on.cc
new file mode 100644
index 0000000000..2d3ad03ae4
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/rtti-on.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef _CPPRTTI
+#error
+#endif
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/rtti.gyp b/third_party/python/gyp/test/win/compiler-flags/rtti.gyp
new file mode 100644
index 0000000000..704cd58f5a
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/rtti.gyp
@@ -0,0 +1,37 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_rtti_off',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'RuntimeTypeInfo': 'false',
+ 'WarnAsError': 'true'
+ }
+ },
+ 'sources': ['rtti-on.cc'],
+ },
+ {
+ 'target_name': 'test_rtti_on',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'RuntimeTypeInfo': 'true',
+ 'WarnAsError': 'true'
+ }
+ },
+ 'sources': ['rtti-on.cc'],
+ },
+ {
+ 'target_name': 'test_rtti_unset',
+ 'type': 'executable',
+ 'msvs_settings': {
+ },
+ 'sources': ['rtti-on.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/runtime-checks.cc b/third_party/python/gyp/test/win/compiler-flags/runtime-checks.cc
new file mode 100644
index 0000000000..fdb811da87
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/runtime-checks.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef __MSVC_RUNTIME_CHECKS
+#error
+#endif
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/runtime-checks.gyp b/third_party/python/gyp/test/win/compiler-flags/runtime-checks.gyp
new file mode 100644
index 0000000000..8ea3092057
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/runtime-checks.gyp
@@ -0,0 +1,29 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_brc_none',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'Optimization': '0',
+ }
+ },
+ 'sources': ['runtime-checks.cc'],
+ },
+ {
+ 'target_name': 'test_brc_1',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'Optimization': '0',
+ 'BasicRuntimeChecks': '3'
+ }
+ },
+ 'sources': ['runtime-checks.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/runtime-library-md.cc b/third_party/python/gyp/test/win/compiler-flags/runtime-library-md.cc
new file mode 100644
index 0000000000..87c83021d4
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/runtime-library-md.cc
@@ -0,0 +1,19 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef _MT
+#error
+#endif
+
+#ifdef _DEBUG
+#error
+#endif
+
+#ifndef _DLL
+#error
+#endif
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/runtime-library-mdd.cc b/third_party/python/gyp/test/win/compiler-flags/runtime-library-mdd.cc
new file mode 100644
index 0000000000..9f175e493e
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/runtime-library-mdd.cc
@@ -0,0 +1,19 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef _MT
+#error
+#endif
+
+#ifndef _DEBUG
+#error
+#endif
+
+#ifndef _DLL
+#error
+#endif
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/runtime-library-mt.cc b/third_party/python/gyp/test/win/compiler-flags/runtime-library-mt.cc
new file mode 100644
index 0000000000..27e62b63db
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/runtime-library-mt.cc
@@ -0,0 +1,19 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef _MT
+#error
+#endif
+
+#ifdef _DEBUG
+#error
+#endif
+
+#ifdef _DLL
+#error
+#endif
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/runtime-library-mtd.cc b/third_party/python/gyp/test/win/compiler-flags/runtime-library-mtd.cc
new file mode 100644
index 0000000000..a9921db9e2
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/runtime-library-mtd.cc
@@ -0,0 +1,19 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef _MT
+#error
+#endif
+
+#ifndef _DEBUG
+#error
+#endif
+
+#ifdef _DLL
+#error
+#endif
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/runtime-library.gyp b/third_party/python/gyp/test/win/compiler-flags/runtime-library.gyp
new file mode 100644
index 0000000000..04afc391c7
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/runtime-library.gyp
@@ -0,0 +1,48 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_rl_md',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'RuntimeLibrary': '2'
+ }
+ },
+ 'sources': ['runtime-library-md.cc'],
+ },
+ {
+ 'target_name': 'test_rl_mdd',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'RuntimeLibrary': '3'
+ }
+ },
+ 'sources': ['runtime-library-mdd.cc'],
+ },
+ {
+ 'target_name': 'test_rl_mt',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'RuntimeLibrary': '0'
+ }
+ },
+ 'sources': ['runtime-library-mt.cc'],
+ },
+ {
+ 'target_name': 'test_rl_mtd',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'RuntimeLibrary': '1'
+ }
+ },
+ 'sources': ['runtime-library-mtd.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/spectre-mitigation.gyp b/third_party/python/gyp/test/win/compiler-flags/spectre-mitigation.gyp
new file mode 100644
index 0000000000..dad9cbd2c9
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/spectre-mitigation.gyp
@@ -0,0 +1,44 @@
+# Copyright (c) 2023 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_sm_notset',
+ 'product_name': 'test_sm_notset',
+ 'type': 'executable',
+ 'msvs_configuration_attributes': {
+ 'SpectreMitigation': 'false'
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_sm_spectre',
+ 'product_name': 'test_sm_spectre',
+ 'type': 'executable',
+ 'msvs_configuration_attributes': {
+ 'SpectreMitigation': 'Spectre'
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_sm_spectre_load',
+ 'product_name': 'test_sm_spectre_load',
+ 'type': 'executable',
+ 'msvs_configuration_attributes': {
+ 'SpectreMitigation': 'SpectreLoad'
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_sm_spectre_load_cf',
+ 'product_name': 'test_sm_spectre_load_cf',
+ 'type': 'executable',
+ 'msvs_configuration_attributes': {
+ 'SpectreMitigation': 'SpectreLoadCF'
+ },
+ 'sources': ['hello.cc'],
+ }
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/subdir/header.h b/third_party/python/gyp/test/win/compiler-flags/subdir/header.h
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/subdir/header.h
diff --git a/third_party/python/gyp/test/win/compiler-flags/treat-wchar-t-as-built-in-type.gyp b/third_party/python/gyp/test/win/compiler-flags/treat-wchar-t-as-built-in-type.gyp
new file mode 100644
index 0000000000..456fe047d0
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/treat-wchar-t-as-built-in-type.gyp
@@ -0,0 +1,33 @@
+# Copyright (c) 2010 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_treat_wchar_t_as_built_in_type_negative',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'TreatWChar_tAsBuiltInType': 'false',
+ },
+ },
+ 'sources': [
+ 'treat-wchar-t-as-built-in-type1.cc',
+ ],
+ },
+ {
+ 'target_name': 'test_treat_wchar_t_as_built_in_type_positive',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'TreatWChar_tAsBuiltInType': 'true',
+ },
+ },
+ 'sources': [
+ 'treat-wchar-t-as-built-in-type2.cc',
+ ],
+ },
+
+ ],
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/treat-wchar-t-as-built-in-type1.cc b/third_party/python/gyp/test/win/compiler-flags/treat-wchar-t-as-built-in-type1.cc
new file mode 100644
index 0000000000..fc1ed0b7ea
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/treat-wchar-t-as-built-in-type1.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifdef _NATIVE_WCHAR_T_DEFINED
+#error
+#endif
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/treat-wchar-t-as-built-in-type2.cc b/third_party/python/gyp/test/win/compiler-flags/treat-wchar-t-as-built-in-type2.cc
new file mode 100644
index 0000000000..28ab94f742
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/treat-wchar-t-as-built-in-type2.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef _NATIVE_WCHAR_T_DEFINED
+#error
+#endif
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/uninit.cc b/third_party/python/gyp/test/win/compiler-flags/uninit.cc
new file mode 100644
index 0000000000..a9d5f5d483
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/uninit.cc
@@ -0,0 +1,13 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Should trigger C6001: using uninitialized memory <variable> for |i|.
+int f(bool b) {
+ int i;
+ if (b)
+ i = 0;
+ return i;
+}
+
+int main() {}
diff --git a/third_party/python/gyp/test/win/compiler-flags/warning-as-error.cc b/third_party/python/gyp/test/win/compiler-flags/warning-as-error.cc
new file mode 100644
index 0000000000..fd2130aca5
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/warning-as-error.cc
@@ -0,0 +1,9 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ // Cause a warning, even at /W1
+ int export;
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/warning-as-error.gyp b/third_party/python/gyp/test/win/compiler-flags/warning-as-error.gyp
new file mode 100644
index 0000000000..d71f261141
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/warning-as-error.gyp
@@ -0,0 +1,37 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_warn_as_error_false',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WarnAsError': 'false'
+ }
+ },
+ 'sources': ['warning-as-error.cc']
+ },
+ {
+ 'target_name': 'test_warn_as_error_true',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WarnAsError': 'true'
+ }
+ },
+ 'sources': ['warning-as-error.cc']
+ },
+ {
+ 'target_name': 'test_warn_as_error_unset',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ }
+ },
+ 'sources': ['warning-as-error.cc']
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/warning-level.gyp b/third_party/python/gyp/test/win/compiler-flags/warning-level.gyp
new file mode 100644
index 0000000000..2297aa7cac
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/warning-level.gyp
@@ -0,0 +1,115 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ # Level 1
+ {
+ 'target_name': 'test_wl1_fail',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WarningLevel': '1',
+ 'WarnAsError': 'true',
+ }
+ },
+ 'sources': ['warning-level1.cc'],
+ },
+ {
+ 'target_name': 'test_wl1_pass',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WarningLevel': '1',
+ 'WarnAsError': 'true',
+ }
+ },
+ 'sources': ['warning-level2.cc'],
+ },
+
+ # Level 2
+ {
+ 'target_name': 'test_wl2_fail',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WarningLevel': '2',
+ 'WarnAsError': 'true',
+ }
+ },
+ 'sources': ['warning-level2.cc'],
+ },
+ {
+ 'target_name': 'test_wl2_pass',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WarningLevel': '2',
+ 'WarnAsError': 'true',
+ }
+ },
+ 'sources': ['warning-level3.cc'],
+ },
+
+ # Level 3
+ {
+ 'target_name': 'test_wl3_fail',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WarningLevel': '3',
+ 'WarnAsError': 'true',
+ }
+ },
+ 'sources': ['warning-level3.cc'],
+ },
+ {
+ 'target_name': 'test_wl3_pass',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WarningLevel': '3',
+ 'WarnAsError': 'true',
+ }
+ },
+ 'sources': ['warning-level4.cc'],
+ },
+
+
+ # Level 4
+ {
+ 'target_name': 'test_wl4_fail',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WarningLevel': '4',
+ 'WarnAsError': 'true',
+ }
+ },
+ 'sources': ['warning-level4.cc'],
+ },
+
+ # Default level
+ {
+ 'target_name': 'test_def_fail',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WarnAsError': 'true',
+ }
+ },
+ 'sources': ['warning-level1.cc'],
+ },
+ {
+ 'target_name': 'test_def_pass',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ }
+ },
+ 'sources': ['warning-level2.cc'],
+ },
+
+ ]
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/warning-level1.cc b/third_party/python/gyp/test/win/compiler-flags/warning-level1.cc
new file mode 100644
index 0000000000..119578d694
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/warning-level1.cc
@@ -0,0 +1,8 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ int export; // Cause a level 1 warning (C4237).
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/warning-level2.cc b/third_party/python/gyp/test/win/compiler-flags/warning-level2.cc
new file mode 100644
index 0000000000..9a26703180
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/warning-level2.cc
@@ -0,0 +1,14 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int f(int x) {
+ return 0;
+}
+
+int main() {
+ double x = 10.1;
+ // Cause a level 2 warning (C4243).
+ return f(x);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/warning-level3.cc b/third_party/python/gyp/test/win/compiler-flags/warning-level3.cc
new file mode 100644
index 0000000000..e0a9f3cdd9
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/warning-level3.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Cause a level 3 warning (C4359).
+struct __declspec(align(8)) C8 { __int64 i; };
+struct __declspec(align(4)) C4 { C8 m8; };
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/compiler-flags/warning-level4.cc b/third_party/python/gyp/test/win/compiler-flags/warning-level4.cc
new file mode 100644
index 0000000000..48a4fb7018
--- /dev/null
+++ b/third_party/python/gyp/test/win/compiler-flags/warning-level4.cc
@@ -0,0 +1,10 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ const int i = -1;
+ // Cause a level 4 warning (C4245).
+ unsigned int j = i;
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/enable-winrt/dllmain.cc b/third_party/python/gyp/test/win/enable-winrt/dllmain.cc
new file mode 100644
index 0000000000..dedd83c3f6
--- /dev/null
+++ b/third_party/python/gyp/test/win/enable-winrt/dllmain.cc
@@ -0,0 +1,30 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+#include <wrl.h>
+#include <wrl/wrappers/corewrappers.h>
+#include <windows.graphics.display.h>
+
+using namespace Microsoft::WRL;
+using namespace Microsoft::WRL::Wrappers;
+using namespace ABI::Windows::Foundation;
+using namespace ABI::Windows::Graphics::Display;
+
+bool TryToUseSomeWinRT() {
+ ComPtr<IDisplayPropertiesStatics> dp;
+ HStringReference s(RuntimeClass_Windows_Graphics_Display_DisplayProperties);
+ HRESULT hr = GetActivationFactory(s.Get(), dp.GetAddressOf());
+ if (SUCCEEDED(hr)) {
+ float dpi = 96.0f;
+ if (SUCCEEDED(dp->get_LogicalDpi(&dpi))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+BOOL WINAPI DllMain(HINSTANCE hinstance, DWORD reason, LPVOID reserved) {
+ return TRUE;
+}
diff --git a/third_party/python/gyp/test/win/enable-winrt/enable-winrt.gyp b/third_party/python/gyp/test/win/enable-winrt/enable-winrt.gyp
new file mode 100644
index 0000000000..69f70189db
--- /dev/null
+++ b/third_party/python/gyp/test/win/enable-winrt/enable-winrt.gyp
@@ -0,0 +1,39 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'enable_winrt_dll',
+ 'type': 'shared_library',
+ 'msvs_enable_winrt': 1,
+ 'sources': [
+ 'dllmain.cc',
+ ],
+ },
+ {
+ 'target_name': 'enable_winrt_missing_dll',
+ 'type': 'shared_library',
+ 'sources': [
+ 'dllmain.cc',
+ ],
+ },
+ {
+ 'target_name': 'enable_winrt_winphone_dll',
+ 'type': 'shared_library',
+ 'msvs_enable_winrt': 1,
+ 'msvs_enable_winphone': 1,
+ 'sources': [
+ 'dllmain.cc',
+ ],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'AdditionalDependencies': [
+ '%(AdditionalDependencies)',
+ ],
+ },
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/generator-output-different-drive/gyptest-generator-output-different-drive.py b/third_party/python/gyp/test/win/generator-output-different-drive/gyptest-generator-output-different-drive.py
new file mode 100644
index 0000000000..96a30ec5b9
--- /dev/null
+++ b/third_party/python/gyp/test/win/generator-output-different-drive/gyptest-generator-output-different-drive.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Test that the generator output can be written to a different drive on Windows.
+"""
+
+import os
+import TestGyp
+import string
+import subprocess
+import sys
+
+
+if sys.platform == 'win32':
+ import win32api
+
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ def GetFirstFreeDriveLetter():
+ """ Returns the first unused Windows drive letter in [A, Z] """
+ all_letters = [c for c in string.ascii_uppercase]
+ in_use = win32api.GetLogicalDriveStrings()
+ free = list(set(all_letters) - set(in_use))
+ return free[0]
+
+ output_dir = os.path.join('different-drive', 'output')
+ if not os.path.isdir(os.path.abspath(output_dir)):
+ os.makedirs(os.path.abspath(output_dir))
+ output_drive = GetFirstFreeDriveLetter()
+ subprocess.call(['subst', '%c:' % output_drive, os.path.abspath(output_dir)])
+ try:
+ test.run_gyp('prog.gyp', '--generator-output=%s' % (
+ os.path.join(output_drive, 'output')))
+ test.build('prog.gyp', test.ALL, chdir=os.path.join(output_drive, 'output'))
+ test.built_file_must_exist('program', chdir=os.path.join(output_drive,
+ 'output'),
+ type=test.EXECUTABLE)
+ test.pass_test()
+ finally:
+ subprocess.call(['subst', '%c:' % output_drive, '/D'])
diff --git a/third_party/python/gyp/test/win/generator-output-different-drive/prog.c b/third_party/python/gyp/test/win/generator-output-different-drive/prog.c
new file mode 100644
index 0000000000..7937f5d07a
--- /dev/null
+++ b/third_party/python/gyp/test/win/generator-output-different-drive/prog.c
@@ -0,0 +1,10 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdio.h>
+
+int main(void) {
+ printf("Hello from prog.c\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/generator-output-different-drive/prog.gyp b/third_party/python/gyp/test/win/generator-output-different-drive/prog.gyp
new file mode 100644
index 0000000000..92f53e5da5
--- /dev/null
+++ b/third_party/python/gyp/test/win/generator-output-different-drive/prog.gyp
@@ -0,0 +1,15 @@
+# Copyright 2013 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'sources': [
+ 'prog.c',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/win/gyptest-asm-files.py b/third_party/python/gyp/test/win/gyptest-asm-files.py
new file mode 100644
index 0000000000..007b52eb26
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-asm-files.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure .s files aren't passed to cl.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'asm-files'
+ test.run_gyp('asm-files.gyp', chdir=CHDIR)
+ # The compiler will error out if it's passed the .s files, so just make sure
+ # the build succeeds. The compiler doesn't directly support building
+ # assembler files on Windows, they have to be built explicitly with a
+ # third-party tool.
+ test.build('asm-files.gyp', test.ALL, chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-additional-include-dirs.py b/third_party/python/gyp/test/win/gyptest-cl-additional-include-dirs.py
new file mode 100644
index 0000000000..1fabfa9752
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-additional-include-dirs.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure additional include dirs are extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('additional-include-dirs.gyp', chdir=CHDIR)
+ test.build('additional-include-dirs.gyp', test.ALL, chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-additional-options.py b/third_party/python/gyp/test/win/gyptest-cl-additional-options.py
new file mode 100644
index 0000000000..e9aea10dc9
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-additional-options.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure additional manual compiler flags are extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('additional-options.gyp', chdir=CHDIR)
+
+ # Warning level not overidden, must fail.
+ test.build('additional-options.gyp', 'test_additional_none', chdir=CHDIR,
+ status=1)
+
+ # Warning level is overridden, must succeed.
+ test.build('additional-options.gyp', 'test_additional_one', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-analysis.py b/third_party/python/gyp/test/win/gyptest-cl-analysis.py
new file mode 100644
index 0000000000..7b3b9897f5
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-analysis.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure PREfast (code analysis) setting is extracted properly.
+"""
+
+import TestGyp
+
+import os
+import sys
+
+if (sys.platform == 'win32' and
+ int(os.environ.get('GYP_MSVS_VERSION', 0)) >= 2012):
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('analysis.gyp', chdir=CHDIR)
+
+ # Analysis enabled, should fail.
+ test.build('analysis.gyp', 'test_analysis_on', chdir=CHDIR, status=1)
+
+ # Analysis not enabled, or unspecified, should pass.
+ test.build('analysis.gyp', 'test_analysis_off', chdir=CHDIR)
+ test.build('analysis.gyp', 'test_analysis_unspec', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-buffer-security-check.py b/third_party/python/gyp/test/win/gyptest-cl-buffer-security-check.py
new file mode 100644
index 0000000000..e22869c3d3
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-buffer-security-check.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure buffer security check setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('buffer-security-check.gyp', chdir=CHDIR)
+ test.build('buffer-security-check.gyp', chdir=CHDIR)
+
+ def GetDisassemblyOfMain(exe):
+ # The standard library uses buffer security checks independent of our
+ # buffer security settings, so we extract just our code (i.e. main()) to
+ # check against.
+ full_path = test.built_file_path(exe, chdir=CHDIR)
+ output = test.run_dumpbin('/disasm', full_path)
+ result = []
+ in_main = False
+ for line in output.splitlines():
+ if line == '_main:':
+ in_main = True
+ elif in_main:
+ # Disassembly of next function starts.
+ if line.startswith('_'):
+ break
+ result.append(line)
+ return '\n'.join(result)
+
+ # Buffer security checks are on by default, make sure security_cookie
+ # appears in the disassembly of our code.
+ if 'security_cookie' not in GetDisassemblyOfMain('test_bsc_unset.exe'):
+ test.fail_test()
+
+ # Explicitly on.
+ if 'security_cookie' not in GetDisassemblyOfMain('test_bsc_on.exe'):
+ test.fail_test()
+
+ # Explicitly off, shouldn't be a reference to the security cookie.
+ if 'security_cookie' in GetDisassemblyOfMain('test_bsc_off.exe'):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-calling-convention.py b/third_party/python/gyp/test/win/gyptest-cl-calling-convention.py
new file mode 100644
index 0000000000..b5fdc47744
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-calling-convention.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure calling convention setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('calling-convention.gyp', chdir=CHDIR)
+ test.build('calling-convention.gyp', test.ALL, chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-character-set.py b/third_party/python/gyp/test/win/gyptest-cl-character-set.py
new file mode 100644
index 0000000000..7fabb6722a
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-character-set.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure character set setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('character-set.gyp', chdir=CHDIR)
+ test.build('character-set.gyp', test.ALL, chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-compile-as-managed.py b/third_party/python/gyp/test/win/gyptest-cl-compile-as-managed.py
new file mode 100644
index 0000000000..0d7b420485
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-compile-as-managed.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure compile as managed (clr) settings are extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp()
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('compile-as-managed.gyp', chdir=CHDIR)
+ test.build('compile-as-managed.gyp', "test-compile-as-managed", chdir=CHDIR)
+ # Must fail.
+ test.build('compile-as-managed.gyp', "test-compile-as-unmanaged",
+ chdir=CHDIR, status=1)
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-compile-as-winrt.py b/third_party/python/gyp/test/win/gyptest-cl-compile-as-winrt.py
new file mode 100644
index 0000000000..3e0168b678
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-compile-as-winrt.py
@@ -0,0 +1,20 @@
+# Copyright (c) 2016 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import TestGyp
+
+import os
+import sys
+
+if (sys.platform == 'win32' and
+ int(os.environ.get('GYP_MSVS_VERSION', 0)) >= 2015):
+ test = TestGyp.TestGyp(formats=['msvs'])
+
+ CHDIR = 'compiler-flags'
+
+ test.run_gyp('compile-as-winrt.gyp', chdir=CHDIR)
+
+ test.build('compile-as-winrt.gyp', 'test-compile-as-winrt', chdir=CHDIR)
+
+ test.pass_test() \ No newline at end of file
diff --git a/third_party/python/gyp/test/win/gyptest-cl-debug-format.py b/third_party/python/gyp/test/win/gyptest-cl-debug-format.py
new file mode 100644
index 0000000000..6c68a619be
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-debug-format.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure debug format settings are extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['ninja'])
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('debug-format.gyp', chdir=CHDIR)
+
+ # While there's ways to via .pdb contents, the .pdb doesn't include
+ # which style the debug information was created from, so we resort to just
+ # verifying the flags are correct on the command line.
+
+ ninja_file = test.built_file_path('obj/test-debug-format-off.ninja',
+ chdir=CHDIR)
+ test.must_not_contain(ninja_file, '/Z7')
+ test.must_not_contain(ninja_file, '/Zi')
+ test.must_not_contain(ninja_file, '/ZI')
+
+ ninja_file = test.built_file_path('obj/test-debug-format-oldstyle.ninja',
+ chdir=CHDIR)
+ test.must_contain(ninja_file, '/Z7')
+
+ ninja_file = test.built_file_path('obj/test-debug-format-pdb.ninja',
+ chdir=CHDIR)
+ test.must_contain(ninja_file, '/Zi')
+
+ ninja_file = test.built_file_path('obj/test-debug-format-editcontinue.ninja',
+ chdir=CHDIR)
+ test.must_contain(ninja_file, '/ZI')
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-default-char-is-unsigned.py b/third_party/python/gyp/test/win/gyptest-cl-default-char-is-unsigned.py
new file mode 100644
index 0000000000..d20f6742f5
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-default-char-is-unsigned.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure DefaultCharIsUnsigned option is functional.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('default-char-is-unsigned.gyp', chdir=CHDIR)
+ test.build('default-char-is-unsigned.gyp', test.ALL, chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-disable-specific-warnings.py b/third_party/python/gyp/test/win/gyptest-cl-disable-specific-warnings.py
new file mode 100644
index 0000000000..cb253aff62
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-disable-specific-warnings.py
@@ -0,0 +1,32 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure disable specific warnings is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('disable-specific-warnings.gyp', chdir=CHDIR)
+
+ # The source file contains a warning, so if WarnAsError is true and
+ # DisableSpecificWarnings for the warning in question is set, then the build
+ # should succeed, otherwise it must fail.
+
+ test.build('disable-specific-warnings.gyp',
+ 'test_disable_specific_warnings_set',
+ chdir=CHDIR)
+ test.build('disable-specific-warnings.gyp',
+ 'test_disable_specific_warnings_unset',
+ chdir=CHDIR, status=1)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-enable-enhanced-instruction-set.py b/third_party/python/gyp/test/win/gyptest-cl-enable-enhanced-instruction-set.py
new file mode 100644
index 0000000000..f34e671125
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-enable-enhanced-instruction-set.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Test VCCLCompilerTool EnableEnhancedInstructionSet setting.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import os
+import sys
+
+if sys.platform == 'win32':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ test = TestGyp.TestGyp()
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('enable-enhanced-instruction-set.gyp', chdir=CHDIR)
+
+ test.build('enable-enhanced-instruction-set.gyp', test.ALL, chdir=CHDIR)
+
+ test.run_built_executable('sse_extensions', chdir=CHDIR,
+ stdout='/arch:SSE\n')
+ test.run_built_executable('sse2_extensions', chdir=CHDIR,
+ stdout='/arch:SSE2\n')
+
+ # /arch:AVX introduced in VS2010, but MSBuild support lagged until 2012.
+ if os.path.exists(test.built_file_path('avx_extensions')):
+ test.run_built_executable('avx_extensions', chdir=CHDIR,
+ stdout='/arch:AVX\n')
+
+ # /arch:IA32 introduced in VS2012.
+ if os.path.exists(test.built_file_path('no_extensions')):
+ test.run_built_executable('no_extensions', chdir=CHDIR,
+ stdout='/arch:IA32\n')
+
+ # /arch:AVX2 introduced in VS2013r2.
+ if os.path.exists(test.built_file_path('avx2_extensions')):
+ test.run_built_executable('avx2_extensions', chdir=CHDIR,
+ stdout='/arch:AVX2\n')
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-exception-handling.py b/third_party/python/gyp/test/win/gyptest-cl-exception-handling.py
new file mode 100644
index 0000000000..5738a54071
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-exception-handling.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure exception handling settings are extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('exception-handling.gyp', chdir=CHDIR)
+
+ # Must fail.
+ test.build('exception-handling.gyp', 'test_eh_off', chdir=CHDIR,
+ status=1)
+
+ # Must succeed.
+ test.build('exception-handling.gyp', 'test_eh_s', chdir=CHDIR)
+ test.build('exception-handling.gyp', 'test_eh_a', chdir=CHDIR)
+
+ # Error code must be 1 if EHa, and 2 if EHsc.
+ test.run_built_executable('test_eh_a', chdir=CHDIR, status=1)
+ test.run_built_executable('test_eh_s', chdir=CHDIR, status=2)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-floating-point-model.py b/third_party/python/gyp/test/win/gyptest-cl-floating-point-model.py
new file mode 100644
index 0000000000..86ff4785a0
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-floating-point-model.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure floating point model settings are extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp()
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('floating-point-model.gyp', chdir=CHDIR)
+ test.build('floating-point-model.gyp', test.ALL, chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-force-include-files.py b/third_party/python/gyp/test/win/gyptest-cl-force-include-files.py
new file mode 100644
index 0000000000..b73b8bd503
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-force-include-files.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure ForcedIncludeFiles option is functional.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('force-include-files.gyp', chdir=CHDIR)
+ test.build('force-include-files.gyp', test.ALL, chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-function-level-linking.py b/third_party/python/gyp/test/win/gyptest-cl-function-level-linking.py
new file mode 100644
index 0000000000..6ad7b8c484
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-function-level-linking.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure function-level linking setting is extracted properly.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('function-level-linking.gyp', chdir=CHDIR)
+ test.build('function-level-linking.gyp', test.ALL, chdir=CHDIR)
+
+ def CheckForSectionString(binary, search_for, should_exist):
+ output = test.run_dumpbin('/headers', binary)
+ if should_exist and search_for not in output:
+ print('Did not find "%s" in %s' % (search_for, binary))
+ test.fail_test()
+ elif not should_exist and search_for in output:
+ print('Found "%s" in %s (and shouldn\'t have)' % (search_for, binary))
+ test.fail_test()
+
+ def Object(proj, obj):
+ sep = '.' if test.format == 'ninja' else '\\'
+ return 'obj\\%s%s%s' % (proj, sep, obj)
+
+ look_for = '''COMDAT; sym= "int __cdecl comdat_function'''
+
+ # When function level linking is on, the functions should be listed as
+ # separate comdat entries.
+
+ CheckForSectionString(
+ test.built_file_path(Object('test_fll_on', 'function-level-linking.obj'),
+ chdir=CHDIR),
+ look_for,
+ should_exist=True)
+
+ CheckForSectionString(
+ test.built_file_path(Object('test_fll_off', 'function-level-linking.obj'),
+ chdir=CHDIR),
+ look_for,
+ should_exist=False)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-optimizations.py b/third_party/python/gyp/test/win/gyptest-cl-optimizations.py
new file mode 100644
index 0000000000..31341f7dd7
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-optimizations.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure optimization settings are extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['ninja'])
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('optimizations.gyp', chdir=CHDIR)
+
+ # It's hard to map flags to output contents in a non-fragile way (especially
+ # handling both 2008/2010), so just verify the correct ninja command line
+ # contents.
+
+ ninja_file = test.built_file_path('obj/test_opt_off.ninja', chdir=CHDIR)
+ test.must_contain(ninja_file, 'cflags = /Od')
+
+ ninja_file = test.built_file_path('obj/test_opt_lev_size.ninja', chdir=CHDIR)
+ test.must_contain(ninja_file, 'cflags = /O1')
+
+ ninja_file = test.built_file_path('obj/test_opt_lev_speed.ninja', chdir=CHDIR)
+ test.must_contain(ninja_file, 'cflags = /O2')
+
+ ninja_file = test.built_file_path('obj/test_opt_lev_max.ninja', chdir=CHDIR)
+ test.must_contain(ninja_file, 'cflags = /Ox')
+
+ ninja_file = test.built_file_path('obj/test_opt_unset.ninja', chdir=CHDIR)
+ test.must_not_contain(ninja_file, '/Od')
+ test.must_not_contain(ninja_file, '/O1')
+ test.must_not_contain(ninja_file, '/Ox')
+ # Set by default if none specified.
+ test.must_contain(ninja_file, '/O2')
+
+ ninja_file = test.built_file_path('obj/test_opt_fpo.ninja', chdir=CHDIR)
+ test.must_contain(ninja_file, '/Oy')
+ test.must_not_contain(ninja_file, '/Oy-')
+
+ ninja_file = test.built_file_path('obj/test_opt_fpo_off.ninja', chdir=CHDIR)
+ test.must_contain(ninja_file, '/Oy-')
+
+ ninja_file = test.built_file_path('obj/test_opt_intrinsic.ninja',
+ chdir=CHDIR)
+ test.must_contain(ninja_file, '/Oi')
+ test.must_not_contain(ninja_file, '/Oi-')
+
+ ninja_file = test.built_file_path('obj/test_opt_intrinsic_off.ninja',
+ chdir=CHDIR)
+ test.must_contain(ninja_file, '/Oi-')
+
+ ninja_file = test.built_file_path('obj/test_opt_inline_off.ninja',
+ chdir=CHDIR)
+ test.must_contain(ninja_file, '/Ob0')
+
+ ninja_file = test.built_file_path('obj/test_opt_inline_manual.ninja',
+ chdir=CHDIR)
+ test.must_contain(ninja_file, '/Ob1')
+
+ ninja_file = test.built_file_path('obj/test_opt_inline_auto.ninja',
+ chdir=CHDIR)
+ test.must_contain(ninja_file, '/Ob2')
+
+ ninja_file = test.built_file_path('obj/test_opt_neither.ninja',
+ chdir=CHDIR)
+ test.must_not_contain(ninja_file, '/Os')
+ test.must_not_contain(ninja_file, '/Ot')
+
+ ninja_file = test.built_file_path('obj/test_opt_size.ninja',
+ chdir=CHDIR)
+ test.must_contain(ninja_file, '/Os')
+
+ ninja_file = test.built_file_path('obj/test_opt_speed.ninja',
+ chdir=CHDIR)
+ test.must_contain(ninja_file, '/Ot')
+
+ ninja_file = test.built_file_path('obj/test_opt_wpo.ninja',
+ chdir=CHDIR)
+ test.must_contain(ninja_file, '/GL')
+
+ ninja_file = test.built_file_path('obj/test_opt_sp.ninja',
+ chdir=CHDIR)
+ test.must_contain(ninja_file, '/GF')
+
+ ninja_file = test.built_file_path('obj/test_opt_sp_off.ninja',
+ chdir=CHDIR)
+ test.must_not_contain(ninja_file, '/GF')
+
+ ninja_file = test.built_file_path('obj/test_opt_fso.ninja',
+ chdir=CHDIR)
+ test.must_contain(ninja_file, '/GT')
+
+ ninja_file = test.built_file_path('obj/test_opt_fso_off.ninja',
+ chdir=CHDIR)
+ test.must_not_contain(ninja_file, '/GT')
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-pdbname-override.py b/third_party/python/gyp/test/win/gyptest-cl-pdbname-override.py
new file mode 100644
index 0000000000..da9b49af16
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-pdbname-override.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure pdb is named as expected (shared between .cc files).
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp()
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('pdbname-override.gyp', chdir=CHDIR)
+ test.build('pdbname-override.gyp', test.ALL, chdir=CHDIR)
+
+ # Confirm that the pdb generated by the compiler was renamed (and we also
+ # have the linker generated one).
+ test.built_file_must_exist('compiler_generated.pdb', chdir=CHDIR)
+ test.built_file_must_exist('linker_generated.pdb', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-pdbname.py b/third_party/python/gyp/test/win/gyptest-cl-pdbname.py
new file mode 100644
index 0000000000..f09ac233cd
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-pdbname.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure pdb is named as expected (shared between .cc files).
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['ninja'])
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('pdbname.gyp', chdir=CHDIR)
+ test.build('pdbname.gyp', test.ALL, chdir=CHDIR)
+
+ # Confirm that the default behaviour is to name the .pdb per-target (rather
+ # than per .cc file).
+ test.built_file_must_exist('obj/test_pdbname.cc.pdb', chdir=CHDIR)
+
+ # Confirm that there should be a .pdb alongside the executable.
+ test.built_file_must_exist('test_pdbname.exe', chdir=CHDIR)
+ test.built_file_must_exist('test_pdbname.exe.pdb', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-rtti.py b/third_party/python/gyp/test/win/gyptest-cl-rtti.py
new file mode 100644
index 0000000000..d49a094379
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-rtti.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure RTTI setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('rtti.gyp', chdir=CHDIR)
+
+ # Must fail.
+ test.build('rtti.gyp', 'test_rtti_off', chdir=CHDIR, status=1)
+
+ # Must succeed.
+ test.build('rtti.gyp', 'test_rtti_on', chdir=CHDIR)
+
+ # Must succeed.
+ test.build('rtti.gyp', 'test_rtti_unset', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-runtime-checks.py b/third_party/python/gyp/test/win/gyptest-cl-runtime-checks.py
new file mode 100644
index 0000000000..4fd529f892
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-runtime-checks.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure RTC setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('runtime-checks.gyp', chdir=CHDIR)
+
+ # Runtime checks disabled, should fail.
+ test.build('runtime-checks.gyp', 'test_brc_none', chdir=CHDIR, status=1)
+
+ # Runtime checks enabled, should pass.
+ test.build('runtime-checks.gyp', 'test_brc_1', chdir=CHDIR)
+
+ # TODO(scottmg): There are other less frequently used/partial options, but
+ # it's not clear how to verify them, so ignore for now.
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-runtime-library.py b/third_party/python/gyp/test/win/gyptest-cl-runtime-library.py
new file mode 100644
index 0000000000..53c149297b
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-runtime-library.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure runtime C library setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('runtime-library.gyp', chdir=CHDIR)
+ test.build('runtime-library.gyp', test.ALL, chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-treat-wchar-t-as-built-in-type.py b/third_party/python/gyp/test/win/gyptest-cl-treat-wchar-t-as-built-in-type.py
new file mode 100644
index 0000000000..ca35fb55a0
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-treat-wchar-t-as-built-in-type.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure TreatWChar_tAsBuiltInType option is functional.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('treat-wchar-t-as-built-in-type.gyp', chdir=CHDIR)
+ test.build('treat-wchar-t-as-built-in-type.gyp', test.ALL, chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-warning-as-error.py b/third_party/python/gyp/test/win/gyptest-cl-warning-as-error.py
new file mode 100644
index 0000000000..d4ef1b362b
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-warning-as-error.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure warning-as-error is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('warning-as-error.gyp', chdir=CHDIR)
+
+ # The source file contains a warning, so if WarnAsError is false (or
+ # default, which is also false), then the build should succeed, otherwise it
+ # must fail.
+
+ test.build('warning-as-error.gyp', 'test_warn_as_error_false', chdir=CHDIR)
+ test.build('warning-as-error.gyp', 'test_warn_as_error_unset', chdir=CHDIR)
+ test.build('warning-as-error.gyp', 'test_warn_as_error_true', chdir=CHDIR,
+ status=1)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-cl-warning-level.py b/third_party/python/gyp/test/win/gyptest-cl-warning-level.py
new file mode 100644
index 0000000000..62a5b39b6a
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-cl-warning-level.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure warning level is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'compiler-flags'
+ test.run_gyp('warning-level.gyp', chdir=CHDIR)
+
+ # A separate target for each warning level: one pass (compiling a file
+ # containing a warning that's above the specified level); and one fail
+ # (compiling a file at the specified level). No pass for 4 of course,
+ # because it would have to have no warnings. The default warning level is
+ # equivalent to level 1.
+
+ test.build('warning-level.gyp', 'test_wl1_fail', chdir=CHDIR, status=1)
+ test.build('warning-level.gyp', 'test_wl1_pass', chdir=CHDIR)
+
+ test.build('warning-level.gyp', 'test_wl2_fail', chdir=CHDIR, status=1)
+ test.build('warning-level.gyp', 'test_wl2_pass', chdir=CHDIR)
+
+ test.build('warning-level.gyp', 'test_wl3_fail', chdir=CHDIR, status=1)
+ test.build('warning-level.gyp', 'test_wl3_pass', chdir=CHDIR)
+
+ test.build('warning-level.gyp', 'test_wl4_fail', chdir=CHDIR, status=1)
+
+ test.build('warning-level.gyp', 'test_def_fail', chdir=CHDIR, status=1)
+ test.build('warning-level.gyp', 'test_def_pass', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-command-quote.py b/third_party/python/gyp/test/win/gyptest-command-quote.py
new file mode 100644
index 0000000000..656a69da53
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-command-quote.py
@@ -0,0 +1,42 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+
+Make sure the program in a command can be a called batch file, or an
+application in the path. Specifically, this means not quoting something like
+"call x.bat", lest the shell look for a program named "call x.bat", rather
+than calling "x.bat".
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+ CHDIR = 'command-quote'
+ test.run_gyp('command-quote.gyp', chdir=CHDIR)
+
+ test.build('command-quote.gyp', 'test_batch', chdir=CHDIR)
+ test.build('command-quote.gyp', 'test_call_separate', chdir=CHDIR)
+ test.build('command-quote.gyp', 'test_with_double_quotes', chdir=CHDIR)
+ test.build('command-quote.gyp', 'test_with_single_quotes', chdir=CHDIR)
+
+ # We confirm that this fails because other generators don't handle spaces in
+ # inputs so it's preferable to not have it work here.
+ test.build('command-quote.gyp', 'test_with_spaces', chdir=CHDIR, status=1)
+
+ CHDIR = 'command-quote/subdir/and/another'
+ test.run_gyp('in-subdir.gyp', chdir=CHDIR)
+ test.build('in-subdir.gyp', 'test_batch_depth', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-crosscompile-ar.py b/third_party/python/gyp/test/win/gyptest-crosscompile-ar.py
new file mode 100644
index 0000000000..dc75d96a84
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-crosscompile-ar.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+
+# Copyright 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that ar_host is set correctly when enabling cross-compile on windows.
+"""
+
+import TestGyp
+
+import sys
+import os
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['ninja'])
+
+ CHDIR = 'lib-crosscompile'
+ oldenv = os.environ.copy()
+ try:
+ os.environ['GYP_CROSSCOMPILE'] = '1'
+ test.run_gyp('use_host_ar.gyp', chdir=CHDIR)
+ finally:
+ os.environ.clear()
+ os.environ.update(oldenv)
+
+ test.build('use_host_ar.gyp', test.ALL, chdir=CHDIR)
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-lib-ltcg.py b/third_party/python/gyp/test/win/gyptest-lib-ltcg.py
new file mode 100644
index 0000000000..d1d7bad840
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-lib-ltcg.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure LTCG setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'lib-flags'
+ test.run_gyp('ltcg.gyp', chdir=CHDIR)
+ test.build('ltcg.gyp', test.ALL, chdir=CHDIR)
+ test.must_not_contain_any_line(test.stdout(), ['restarting link with /LTCG'])
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-additional-deps.py b/third_party/python/gyp/test/win/gyptest-link-additional-deps.py
new file mode 100644
index 0000000000..62c57366f9
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-additional-deps.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure additional library dependencies are handled.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('additional-deps.gyp', chdir=CHDIR)
+ test.build('additional-deps.gyp', test.ALL, chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-additional-options.py b/third_party/python/gyp/test/win/gyptest-link-additional-options.py
new file mode 100644
index 0000000000..7e57ae4764
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-additional-options.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure additional options are handled.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('additional-options.gyp', chdir=CHDIR)
+ test.build('additional-options.gyp', test.ALL, chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-aslr.py b/third_party/python/gyp/test/win/gyptest-link-aslr.py
new file mode 100644
index 0000000000..e765017d3b
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-aslr.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure aslr setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('aslr.gyp', chdir=CHDIR)
+ test.build('aslr.gyp', test.ALL, chdir=CHDIR)
+
+ def HasDynamicBase(exe):
+ full_path = test.built_file_path(exe, chdir=CHDIR)
+ output = test.run_dumpbin('/headers', full_path)
+ return ' Dynamic base' in output
+
+ # Default is to be on.
+ if not HasDynamicBase('test_aslr_default.exe'):
+ test.fail_test()
+ if HasDynamicBase('test_aslr_no.exe'):
+ test.fail_test()
+ if not HasDynamicBase('test_aslr_yes.exe'):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-base-address.py b/third_party/python/gyp/test/win/gyptest-link-base-address.py
new file mode 100644
index 0000000000..d58527ad7e
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-base-address.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+
+# Copyright 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure the base address setting is extracted properly.
+"""
+
+import TestGyp
+
+import re
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('base-address.gyp', chdir=CHDIR)
+ test.build('base-address.gyp', test.ALL, chdir=CHDIR)
+
+ def GetHeaders(exe):
+ full_path = test.built_file_path(exe, chdir=CHDIR)
+ return test.run_dumpbin('/headers', full_path)
+
+ # Extract the image base address from the headers output.
+ image_base_reg_ex = re.compile(r'.*\s+([0-9]+) image base.*', re.DOTALL)
+
+ exe_headers = GetHeaders('test_base_specified_exe.exe')
+ exe_match = image_base_reg_ex.match(exe_headers)
+
+ if not exe_match or not exe_match.group(1):
+ test.fail_test()
+ if exe_match.group(1) != '420000':
+ test.fail_test()
+
+ dll_headers = GetHeaders('test_base_specified_dll.dll')
+ dll_match = image_base_reg_ex.match(dll_headers)
+
+ if not dll_match or not dll_match.group(1):
+ test.fail_test()
+ if dll_match.group(1) != '10420000':
+ test.fail_test()
+
+ default_exe_headers = GetHeaders('test_base_default_exe.exe')
+ default_exe_match = image_base_reg_ex.match(default_exe_headers)
+
+ if not default_exe_match or not default_exe_match.group(1):
+ test.fail_test()
+ if default_exe_match.group(1) != '400000':
+ test.fail_test()
+
+ default_dll_headers = GetHeaders('test_base_default_dll.dll')
+ default_dll_match = image_base_reg_ex.match(default_dll_headers)
+
+ if not default_dll_match or not default_dll_match.group(1):
+ test.fail_test()
+ if default_dll_match.group(1) != '10000000':
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-debug-info.py b/third_party/python/gyp/test/win/gyptest-link-debug-info.py
new file mode 100644
index 0000000000..33e8ac48bf
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-debug-info.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure debug info setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('debug-info.gyp', chdir=CHDIR)
+ test.build('debug-info.gyp', test.ALL, chdir=CHDIR)
+
+ suffix = '.exe.pdb' if test.format == 'ninja' else '.pdb'
+ test.built_file_must_not_exist('test_debug_off%s' % suffix, chdir=CHDIR)
+ test.built_file_must_exist('test_debug_on%s' % suffix, chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-default-libs.py b/third_party/python/gyp/test/win/gyptest-link-default-libs.py
new file mode 100644
index 0000000000..5edf467913
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-default-libs.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure we include the default libs.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('no-default-libs.gyp', chdir=CHDIR)
+ test.build('no-default-libs.gyp', test.ALL, chdir=CHDIR, status=1)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-deffile.py b/third_party/python/gyp/test/win/gyptest-link-deffile.py
new file mode 100644
index 0000000000..94df874f85
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-deffile.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure a .def file is handled in the link.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+
+ # Multiple .def files doesn't make any sense, should fail at generate time.
+ test.run_gyp('deffile-multiple.gyp', chdir=CHDIR, stderr=None, status=1)
+
+ test.run_gyp('deffile.gyp', chdir=CHDIR)
+ test.build('deffile.gyp', test.ALL, chdir=CHDIR)
+
+ def HasExport(binary, export):
+ full_path = test.built_file_path(binary, chdir=CHDIR)
+ output = test.run_dumpbin('/exports', full_path)
+ return export in output
+
+ # Make sure we only have the export when the .def file is in use.
+
+ if HasExport('test_deffile_dll_notexported.dll', 'AnExportedFunction'):
+ test.fail_test()
+ if not HasExport('test_deffile_dll_ok.dll', 'AnExportedFunction'):
+ test.fail_test()
+
+ if HasExport('test_deffile_exe_notexported.exe', 'AnExportedFunction'):
+ test.fail_test()
+ if not HasExport('test_deffile_exe_ok.exe', 'AnExportedFunction'):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-defrelink.py b/third_party/python/gyp/test/win/gyptest-link-defrelink.py
new file mode 100644
index 0000000000..cb3f86bb0e
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-defrelink.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure a relink is performed when a .def file is touched.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ target = 'test_deffile_dll_ok'
+ def_contents = test.read('linker-flags/deffile.def')
+
+ # This first build makes sure everything is up to date.
+ test.run_gyp('deffile.gyp', chdir=CHDIR)
+ test.build('deffile.gyp', target, chdir=CHDIR)
+ test.up_to_date('deffile.gyp', target, chdir=CHDIR)
+
+ def HasExport(binary, export):
+ full_path = test.built_file_path(binary, chdir=CHDIR)
+ output = test.run_dumpbin('/exports', full_path)
+ return export in output
+
+ # Verify that only one function is exported.
+ if not HasExport('test_deffile_dll_ok.dll', 'AnExportedFunction'):
+ test.fail_test()
+ if HasExport('test_deffile_dll_ok.dll', 'AnotherExportedFunction'):
+ test.fail_test()
+
+ # Add AnotherExportedFunction to the def file, then rebuild. If it doesn't
+ # relink the DLL, then the subsequent check for AnotherExportedFunction will
+ # fail.
+ new_def_contents = def_contents + "\n AnotherExportedFunction"
+ test.write('linker-flags/deffile.def', new_def_contents)
+ test.build('deffile.gyp', target, chdir=CHDIR)
+ test.up_to_date('deffile.gyp', target, chdir=CHDIR)
+
+ if not HasExport('test_deffile_dll_ok.dll', 'AnExportedFunction'):
+ test.fail_test()
+ if not HasExport('test_deffile_dll_ok.dll', 'AnotherExportedFunction'):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-delay-load-dlls.py b/third_party/python/gyp/test/win/gyptest-link-delay-load-dlls.py
new file mode 100644
index 0000000000..3880247b4a
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-delay-load-dlls.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure delay load setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('delay-load-dlls.gyp', chdir=CHDIR)
+ test.build('delay-load-dlls.gyp', test.ALL, chdir=CHDIR)
+
+ prefix = 'contains the following delay load imports:'
+ shell32_look_for = prefix + '\r\n\r\n SHELL32.dll'
+
+ output = test.run_dumpbin(
+ '/all', test.built_file_path('test_dld_none.exe', chdir=CHDIR))
+ if prefix in output:
+ test.fail_test()
+
+ output = test.run_dumpbin(
+ '/all', test.built_file_path('test_dld_shell32.exe', chdir=CHDIR))
+ if shell32_look_for not in output:
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-embed-manifest.py b/third_party/python/gyp/test/win/gyptest-link-embed-manifest.py
new file mode 100644
index 0000000000..0e2b628be1
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-embed-manifest.py
@@ -0,0 +1,100 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Yandex LLC. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure manifests are embedded in binaries properly. Handling of
+AdditionalManifestFiles is tested too.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ import pywintypes
+ import win32api
+ import winerror
+
+ RT_MANIFEST = 24
+
+ class LoadLibrary(object):
+ """Context manager for loading and releasing binaries in Windows.
+ Yields the handle of the binary loaded."""
+ def __init__(self, path):
+ self._path = path
+ self._handle = None
+
+ def __enter__(self):
+ self._handle = win32api.LoadLibrary(self._path)
+ return self._handle
+
+ def __exit__(self, type, value, traceback):
+ win32api.FreeLibrary(self._handle)
+
+
+ def extract_manifest(path, resource_name):
+ """Reads manifest from |path| and returns it as a string.
+ Returns None is there is no such manifest."""
+ with LoadLibrary(path) as handle:
+ try:
+ return win32api.LoadResource(
+ handle, RT_MANIFEST, resource_name).decode('utf-8', 'ignore')
+ except pywintypes.error as error:
+ if error.args[0] == winerror.ERROR_RESOURCE_DATA_NOT_FOUND:
+ return None
+ else:
+ raise
+
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+ CHDIR = 'linker-flags'
+ test.run_gyp('embed-manifest.gyp', chdir=CHDIR)
+ test.build('embed-manifest.gyp', test.ALL, chdir=CHDIR)
+
+ # The following binaries must contain a manifest embedded.
+ test.fail_test(not extract_manifest(test.built_file_path(
+ 'test_manifest_exe.exe', chdir=CHDIR), 1))
+ test.fail_test(not extract_manifest(test.built_file_path(
+ 'test_manifest_exe_inc.exe', chdir=CHDIR), 1))
+ test.fail_test(not extract_manifest(test.built_file_path(
+ 'test_manifest_dll.dll', chdir=CHDIR), 2))
+ test.fail_test(not extract_manifest(test.built_file_path(
+ 'test_manifest_dll_inc.dll', chdir=CHDIR), 2))
+
+ # Must contain the Win7 support GUID, but not the Vista one (from
+ # extra2.manifest).
+ test.fail_test(
+ '35138b9a-5d96-4fbd-8e2d-a2440225f93a' not in
+ extract_manifest(test.built_file_path('test_manifest_extra1.exe',
+ chdir=CHDIR), 1))
+ test.fail_test(
+ 'e2011457-1546-43c5-a5fe-008deee3d3f0' in
+ extract_manifest(test.built_file_path('test_manifest_extra1.exe',
+ chdir=CHDIR), 1))
+ # Must contain both.
+ test.fail_test(
+ '35138b9a-5d96-4fbd-8e2d-a2440225f93a' not in
+ extract_manifest(test.built_file_path('test_manifest_extra2.exe',
+ chdir=CHDIR), 1))
+ test.fail_test(
+ 'e2011457-1546-43c5-a5fe-008deee3d3f0' not in
+ extract_manifest(test.built_file_path('test_manifest_extra2.exe',
+ chdir=CHDIR), 1))
+
+ # Same as extra2, but using list syntax instead.
+ test.fail_test(
+ '35138b9a-5d96-4fbd-8e2d-a2440225f93a' not in
+ extract_manifest(test.built_file_path('test_manifest_extra_list.exe',
+ chdir=CHDIR), 1))
+ test.fail_test(
+ 'e2011457-1546-43c5-a5fe-008deee3d3f0' not in
+ extract_manifest(test.built_file_path('test_manifest_extra_list.exe',
+ chdir=CHDIR), 1))
+
+ # Test that incremental linking doesn't force manifest embedding.
+ test.fail_test(extract_manifest(test.built_file_path(
+ 'test_manifest_exe_inc_no_embed.exe', chdir=CHDIR), 1))
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-enable-uac.py b/third_party/python/gyp/test/win/gyptest-link-enable-uac.py
new file mode 100644
index 0000000000..0ddbde5fa5
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-enable-uac.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python
+
+# Copyright 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that embedding UAC information into the manifest works.
+"""
+
+import TestGyp
+
+import sys
+from xml.dom.minidom import parseString
+
+if sys.platform == 'win32':
+ import pywintypes
+ import win32api
+ import winerror
+
+ RT_MANIFEST = 24
+
+ class LoadLibrary(object):
+ """Context manager for loading and releasing binaries in Windows.
+ Yields the handle of the binary loaded."""
+ def __init__(self, path):
+ self._path = path
+ self._handle = None
+
+ def __enter__(self):
+ self._handle = win32api.LoadLibrary(self._path)
+ return self._handle
+
+ def __exit__(self, type, value, traceback):
+ win32api.FreeLibrary(self._handle)
+
+
+ def extract_manifest(path, resource_name):
+ """Reads manifest from |path| and returns it as a string.
+ Returns None is there is no such manifest."""
+ with LoadLibrary(path) as handle:
+ try:
+ return win32api.LoadResource(handle, RT_MANIFEST, resource_name)
+ except pywintypes.error as error:
+ if error.args[0] == winerror.ERROR_RESOURCE_DATA_NOT_FOUND:
+ return None
+ else:
+ raise
+
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+ CHDIR = 'linker-flags'
+ test.run_gyp('enable-uac.gyp', chdir=CHDIR)
+ test.build('enable-uac.gyp', test.ALL, chdir=CHDIR)
+
+ # The following binaries must contain a manifest embedded.
+ test.fail_test(not extract_manifest(test.built_file_path(
+ 'enable_uac.exe', chdir=CHDIR), 1))
+ test.fail_test(not extract_manifest(test.built_file_path(
+ 'enable_uac_no.exe', chdir=CHDIR), 1))
+ test.fail_test(not extract_manifest(test.built_file_path(
+ 'enable_uac_admin.exe', chdir=CHDIR), 1))
+
+ # Verify that <requestedExecutionLevel level="asInvoker" uiAccess="false" />
+ # is present.
+ manifest = parseString(extract_manifest(
+ test.built_file_path('enable_uac.exe', chdir=CHDIR), 1))
+ execution_level = manifest.getElementsByTagName('requestedExecutionLevel')
+ test.fail_test(len(execution_level) != 1)
+ execution_level = execution_level[0].attributes
+
+ def _has_key(node, key):
+ # 'in' doesn't work with the NamedNodeMap interface in Python2,
+ # but 'has_key' was removed from it in Python3, so we need to
+ # shim things :(.
+ if hasattr(node, 'has_key'):
+ return node.has_key(key)
+ return key in node
+
+ test.fail_test(not (
+ _has_key(execution_level, 'level') and
+ _has_key(execution_level, 'uiAccess') and
+ execution_level['level'].nodeValue == 'asInvoker' and
+ execution_level['uiAccess'].nodeValue == 'false'))
+
+ # Verify that <requestedExecutionLevel> is not in the menifest.
+ manifest = parseString(extract_manifest(
+ test.built_file_path('enable_uac_no.exe', chdir=CHDIR), 1))
+ execution_level = manifest.getElementsByTagName('requestedExecutionLevel')
+ test.fail_test(len(execution_level) != 0)
+
+ # Verify that <requestedExecutionLevel level="requireAdministrator"
+ # uiAccess="true" /> is present.
+ manifest = parseString(extract_manifest(
+ test.built_file_path('enable_uac_admin.exe', chdir=CHDIR), 1))
+ execution_level = manifest.getElementsByTagName('requestedExecutionLevel')
+ test.fail_test(len(execution_level) != 1)
+ execution_level = execution_level[0].attributes
+ test.fail_test(not (
+ _has_key(execution_level, 'level') and
+ _has_key(execution_level, 'uiAccess') and
+ execution_level['level'].nodeValue == 'requireAdministrator' and
+ execution_level['uiAccess'].nodeValue == 'true'))
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-enable-winrt-app-revision.py b/third_party/python/gyp/test/win/gyptest-link-enable-winrt-app-revision.py
new file mode 100644
index 0000000000..45d86e1c69
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-enable-winrt-app-revision.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure msvs_application_type_revision works correctly.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import os
+import sys
+import struct
+
+CHDIR = 'winrt-app-type-revision'
+
+print('This test is not currently working on the bots: https://code.google.com/p/gyp/issues/detail?id=466')
+sys.exit(0)
+
+if (sys.platform == 'win32' and
+ int(os.environ.get('GYP_MSVS_VERSION', 0)) == 2013):
+ test = TestGyp.TestGyp(formats=['msvs'])
+
+ test.run_gyp('winrt-app-type-revision.gyp', chdir=CHDIR)
+
+ test.build('winrt-app-type-revision.gyp', 'enable_winrt_81_revision_dll',
+ chdir=CHDIR)
+
+ # Revision is set to 8.2 which is invalid for 2013 projects so compilation
+ # must fail.
+ test.build('winrt-app-type-revision.gyp', 'enable_winrt_82_revision_dll',
+ chdir=CHDIR, status=1)
+
+ # Revision is set to an invalid value for 2013 projects so compilation
+ # must fail.
+ test.build('winrt-app-type-revision.gyp', 'enable_winrt_invalid_revision_dll',
+ chdir=CHDIR, status=1)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-enable-winrt-target-platform-version.py b/third_party/python/gyp/test/win/gyptest-link-enable-winrt-target-platform-version.py
new file mode 100644
index 0000000000..877cb68f8c
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-enable-winrt-target-platform-version.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure msvs_target_platform_version works correctly.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import os
+import sys
+import struct
+
+CHDIR = 'winrt-target-platform-version'
+
+print('This test is not currently working on the bots: https://code.google.com/p/gyp/issues/detail?id=466')
+sys.exit(0)
+
+if (sys.platform == 'win32' and
+ int(os.environ.get('GYP_MSVS_VERSION', 0)) == 2015):
+ test = TestGyp.TestGyp(formats=['msvs'])
+
+ test.run_gyp('winrt-target-platform-version.gyp', chdir=CHDIR)
+
+ test.build('winrt-target-platform-version.gyp',
+ 'enable_winrt_10_platversion_dll', chdir=CHDIR)
+
+ # Target Platform without Minimum Target Platform version defaults to a valid
+ # Target Platform and compiles.
+ test.build('winrt-target-platform-version.gyp',
+ 'enable_winrt_10_platversion_nominver_dll', chdir=CHDIR)
+
+ # Target Platform is set to 9.0 which is invalid for 2015 projects so
+ # compilation must fail.
+ test.build('winrt-target-platform-version.gyp',
+ 'enable_winrt_9_platversion_dll', chdir=CHDIR, status=1)
+
+ # Missing Target Platform for 2015 projects must fail.
+ test.build('winrt-target-platform-version.gyp',
+ 'enable_winrt_missing_platversion_dll', chdir=CHDIR, status=1)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-enable-winrt.py b/third_party/python/gyp/test/win/gyptest-link-enable-winrt.py
new file mode 100644
index 0000000000..5e0493aade
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-enable-winrt.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure msvs_enable_winrt works correctly.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import os
+import sys
+import struct
+
+CHDIR = 'enable-winrt'
+
+print('This test is not currently working on the bots: https://code.google.com/p/gyp/issues/detail?id=466')
+sys.exit(0)
+
+if (sys.platform == 'win32' and
+ int(os.environ.get('GYP_MSVS_VERSION', 0)) >= 2013):
+ test = TestGyp.TestGyp(formats=['msvs'])
+
+ test.run_gyp('enable-winrt.gyp', chdir=CHDIR)
+
+ test.build('enable-winrt.gyp', 'enable_winrt_dll', chdir=CHDIR)
+
+ test.build('enable-winrt.gyp', 'enable_winrt_missing_dll', chdir=CHDIR,
+ status=1)
+
+ test.build('enable-winrt.gyp', 'enable_winrt_winphone_dll', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-entrypointsymbol.py b/third_party/python/gyp/test/win/gyptest-link-entrypointsymbol.py
new file mode 100644
index 0000000000..e88174a085
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-entrypointsymbol.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure entrypointsymbol setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('entrypointsymbol.gyp', chdir=CHDIR)
+
+ test.build('entrypointsymbol.gyp', 'test_ok', chdir=CHDIR)
+ test.build('entrypointsymbol.gyp', 'test_fail', chdir=CHDIR, status=1)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-fixed-base.py b/third_party/python/gyp/test/win/gyptest-link-fixed-base.py
new file mode 100644
index 0000000000..725a87028a
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-fixed-base.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure fixed base setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('fixed-base.gyp', chdir=CHDIR)
+ test.build('fixed-base.gyp', test.ALL, chdir=CHDIR)
+
+ def GetHeaders(exe):
+ full_path = test.built_file_path(exe, chdir=CHDIR)
+ return test.run_dumpbin('/headers', full_path)
+
+ # For exe, default is fixed, for dll, it's not fixed.
+ if 'Relocations stripped' not in GetHeaders('test_fixed_default_exe.exe'):
+ test.fail_test()
+ if 'Relocations stripped' in GetHeaders('test_fixed_default_dll.dll'):
+ test.fail_test()
+
+ # Explicitly not fixed.
+ if 'Relocations stripped' in GetHeaders('test_fixed_no.exe'):
+ test.fail_test()
+
+ # Explicitly fixed.
+ if 'Relocations stripped' not in GetHeaders('test_fixed_yes.exe'):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-force-symbol-reference.py b/third_party/python/gyp/test/win/gyptest-link-force-symbol-reference.py
new file mode 100644
index 0000000000..235e94f71b
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-force-symbol-reference.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure ForceSymbolReference is translated properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('force-symbol-reference.gyp', chdir=CHDIR)
+ test.build('force-symbol-reference.gyp', test.ALL, chdir=CHDIR)
+
+ output = test.run_dumpbin(
+ '/disasm', test.built_file_path('test_force_reference.exe', chdir=CHDIR))
+ if '?x@@YAHXZ:' not in output or '?y@@YAHXZ:' not in output:
+ test.fail_test()
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-generate-manifest.py b/third_party/python/gyp/test/win/gyptest-link-generate-manifest.py
new file mode 100644
index 0000000000..77c9228ee5
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-generate-manifest.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure we generate a manifest file when linking binaries, including
+handling AdditionalManifestFiles.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ import pywintypes
+ import win32api
+ import winerror
+
+ RT_MANIFEST = 24
+
+ class LoadLibrary(object):
+ """Context manager for loading and releasing binaries in Windows.
+ Yields the handle of the binary loaded."""
+ def __init__(self, path):
+ self._path = path
+ self._handle = None
+
+ def __enter__(self):
+ self._handle = win32api.LoadLibrary(self._path)
+ return self._handle
+
+ def __exit__(self, type, value, traceback):
+ win32api.FreeLibrary(self._handle)
+
+ def extract_manifest(path, resource_name):
+ """Reads manifest from |path| and returns it as a string.
+ Returns None is there is no such manifest."""
+ with LoadLibrary(path) as handle:
+ try:
+ return win32api.LoadResource(handle, RT_MANIFEST, resource_name)
+ except pywintypes.error as error:
+ if error.args[0] == winerror.ERROR_RESOURCE_DATA_NOT_FOUND:
+ return None
+ else:
+ raise
+
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('generate-manifest.gyp', chdir=CHDIR)
+ test.build('generate-manifest.gyp', test.ALL, chdir=CHDIR)
+
+ # Make sure that generation of .generated.manifest does not cause a relink.
+ test.run_gyp('generate-manifest.gyp', chdir=CHDIR)
+ test.up_to_date('generate-manifest.gyp', test.ALL, chdir=CHDIR)
+
+ def test_manifest(filename, generate_manifest, embedded_manifest,
+ extra_manifest):
+ exe_file = test.built_file_path(filename, chdir=CHDIR)
+ if not generate_manifest:
+ test.must_not_exist(exe_file + '.manifest')
+ manifest = extract_manifest(exe_file, 1)
+ test.fail_test(manifest)
+ return
+ if embedded_manifest:
+ manifest = extract_manifest(exe_file, 1)
+ test.fail_test(not manifest)
+ else:
+ test.must_exist(exe_file + '.manifest')
+ manifest = test.read(exe_file + '.manifest')
+ test.fail_test(not manifest)
+ test.fail_test(extract_manifest(exe_file, 1))
+ if generate_manifest:
+ test.must_contain_any_line(manifest, 'requestedExecutionLevel')
+ if extra_manifest:
+ test.must_contain_any_line(manifest,
+ '35138b9a-5d96-4fbd-8e2d-a2440225f93a')
+ test.must_contain_any_line(manifest,
+ 'e2011457-1546-43c5-a5fe-008deee3d3f0')
+
+ test_manifest('test_generate_manifest_true.exe',
+ generate_manifest=True,
+ embedded_manifest=False,
+ extra_manifest=False)
+ test_manifest('test_generate_manifest_false.exe',
+ generate_manifest=False,
+ embedded_manifest=False,
+ extra_manifest=False)
+ test_manifest('test_generate_manifest_default.exe',
+ generate_manifest=True,
+ embedded_manifest=False,
+ extra_manifest=False)
+ test_manifest('test_generate_manifest_true_as_embedded.exe',
+ generate_manifest=True,
+ embedded_manifest=True,
+ extra_manifest=False)
+ test_manifest('test_generate_manifest_false_as_embedded.exe',
+ generate_manifest=False,
+ embedded_manifest=True,
+ extra_manifest=False)
+ test_manifest('test_generate_manifest_default_as_embedded.exe',
+ generate_manifest=True,
+ embedded_manifest=True,
+ extra_manifest=False)
+ test_manifest('test_generate_manifest_true_with_extra_manifest.exe',
+ generate_manifest=True,
+ embedded_manifest=False,
+ extra_manifest=True)
+ test_manifest('test_generate_manifest_false_with_extra_manifest.exe',
+ generate_manifest=False,
+ embedded_manifest=False,
+ extra_manifest=True)
+ test_manifest('test_generate_manifest_true_with_extra_manifest_list.exe',
+ generate_manifest=True,
+ embedded_manifest=False,
+ extra_manifest=True)
+ test_manifest('test_generate_manifest_false_with_extra_manifest_list.exe',
+ generate_manifest=False,
+ embedded_manifest=False,
+ extra_manifest=True)
+ test_manifest('test_generate_manifest_default_embed_default.exe',
+ generate_manifest=True,
+ embedded_manifest=True,
+ extra_manifest=False)
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-incremental.py b/third_party/python/gyp/test/win/gyptest-link-incremental.py
new file mode 100644
index 0000000000..e7184e162c
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-incremental.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure incremental linking setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('incremental.gyp', chdir=CHDIR)
+ test.build('incremental.gyp', test.ALL, chdir=CHDIR)
+
+ def HasILTTables(exe):
+ full_path = test.built_file_path(exe, chdir=CHDIR)
+ output = test.run_dumpbin('/disasm', full_path)
+ return '@ILT+' in output
+
+ # Default or unset is to be on.
+ if not HasILTTables('test_incremental_unset.exe'):
+ test.fail_test()
+ if not HasILTTables('test_incremental_default.exe'):
+ test.fail_test()
+ if HasILTTables('test_incremental_no.exe'):
+ test.fail_test()
+ if not HasILTTables('test_incremental_yes.exe'):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-large-address-aware.py b/third_party/python/gyp/test/win/gyptest-link-large-address-aware.py
new file mode 100644
index 0000000000..ea433f2099
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-large-address-aware.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure largeaddressaware setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('large-address-aware.gyp', chdir=CHDIR)
+ test.build('large-address-aware.gyp', test.ALL, chdir=CHDIR)
+
+ def GetHeaders(exe):
+ return test.run_dumpbin('/headers', test.built_file_path(exe, chdir=CHDIR))
+
+ MARKER = 'Application can handle large (>2GB) addresses'
+
+ # Explicitly off.
+ if MARKER in GetHeaders('test_large_address_aware_no.exe'):
+ test.fail_test()
+
+ # Explicitly on.
+ if MARKER not in GetHeaders('test_large_address_aware_yes.exe'):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-large-pdb.py b/third_party/python/gyp/test/win/gyptest-link-large-pdb.py
new file mode 100644
index 0000000000..1fb27b0ab7
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-large-pdb.py
@@ -0,0 +1,76 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure msvs_large_pdb works correctly.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import struct
+import sys
+
+if sys.platform == 'win32':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+
+CHDIR = 'large-pdb'
+
+
+def CheckImageAndPdb(test, image_basename, expected_page_size,
+ pdb_basename=None):
+ if not pdb_basename:
+ pdb_basename = image_basename + '.pdb'
+ test.built_file_must_exist(image_basename, chdir=CHDIR)
+ test.built_file_must_exist(pdb_basename, chdir=CHDIR)
+
+ # We expect the PDB to have the given page size. For full details of the
+ # header look here: https://code.google.com/p/pdbparser/wiki/MSF_Format
+ # We read the little-endian 4-byte unsigned integer at position 32 of the
+ # file.
+ pdb_path = test.built_file_path(pdb_basename, chdir=CHDIR)
+ pdb_file = open(pdb_path, 'rb')
+ pdb_file.seek(32, 0)
+ page_size = struct.unpack('<I', pdb_file.read(4))[0]
+ if page_size != expected_page_size:
+ print("Expected page size of %d, got %d for PDB file `%s'." % (
+ expected_page_size, page_size, pdb_path))
+
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ test.run_gyp('large-pdb.gyp', chdir=CHDIR)
+
+ test.build('large-pdb.gyp', 'large_pdb_exe', chdir=CHDIR)
+ CheckImageAndPdb(test, 'large_pdb_exe.exe', 4096)
+
+ test.build('large-pdb.gyp', 'small_pdb_exe', chdir=CHDIR)
+ CheckImageAndPdb(test, 'small_pdb_exe.exe', 1024)
+
+ test.build('large-pdb.gyp', 'large_pdb_dll', chdir=CHDIR)
+ CheckImageAndPdb(test, 'large_pdb_dll.dll', 4096)
+
+ test.build('large-pdb.gyp', 'small_pdb_dll', chdir=CHDIR)
+ CheckImageAndPdb(test, 'small_pdb_dll.dll', 1024)
+
+ test.build('large-pdb.gyp', 'large_pdb_implicit_exe', chdir=CHDIR)
+ CheckImageAndPdb(test, 'large_pdb_implicit_exe.exe', 4096)
+
+ # This target has a different PDB name because it uses an
+ # 'msvs_large_pdb_path' variable.
+ test.build('large-pdb.gyp', 'large_pdb_variable_exe', chdir=CHDIR)
+ CheckImageAndPdb(test, 'large_pdb_variable_exe.exe', 4096,
+ pdb_basename='foo.pdb')
+
+ # This target has a different output name because it uses 'product_name'.
+ test.build('large-pdb.gyp', 'large_pdb_product_exe', chdir=CHDIR)
+ CheckImageAndPdb(test, 'bar.exe', 4096)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-library-adjust.py b/third_party/python/gyp/test/win/gyptest-link-library-adjust.py
new file mode 100644
index 0000000000..71d1c09360
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-library-adjust.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure link_settings containing -lblah.lib is remapped to just blah.lib.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('library-adjust.gyp', chdir=CHDIR)
+ test.build('library-adjust.gyp', test.ALL, chdir=CHDIR)
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-library-directories.py b/third_party/python/gyp/test/win/gyptest-link-library-directories.py
new file mode 100644
index 0000000000..8308e14fcb
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-library-directories.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure libpath is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+
+ # Build subdirectory library.
+ test.run_gyp('subdir/library.gyp', chdir=CHDIR)
+ test.build('subdir/library.gyp', test.ALL, chdir=CHDIR)
+
+ # And then try to link the main project against the library using only
+ # LIBPATH to find it.
+ test.run_gyp('library-directories.gyp', chdir=CHDIR)
+
+ # Without additional paths specified, should fail.
+ test.build('library-directories.gyp', 'test_libdirs_none', chdir=CHDIR,
+ status=1)
+
+ # With the additional library directory, should pass.
+ test.build('library-directories.gyp', 'test_libdirs_with', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-ltcg.py b/third_party/python/gyp/test/win/gyptest-link-ltcg.py
new file mode 100644
index 0000000000..5271e099d7
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-ltcg.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure LTCG is working properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('ltcg.gyp', chdir=CHDIR)
+
+ # Here we expect LTCG is able to inline functions beyond compile unit.
+ # Note: This marker is embedded in 'inline_test_main.cc'
+ INLINE_MARKER = '==== inlined ===='
+
+ # link.exe generates following lines when LTCG is enabled.
+ # Note: Future link.exe may or may not generate them. Update as needed.
+ LTCG_LINKER_MESSAGES = ['Generating code', 'Finished generating code']
+
+ # test 'LinkTimeCodeGenerationOptionDefault'
+ test.build('ltcg.gyp', 'test_ltcg_off', chdir=CHDIR)
+ test.run_built_executable('test_ltcg_off', chdir=CHDIR)
+ test.must_not_contain_any_line(test.stdout(), [INLINE_MARKER])
+
+ # test 'LinkTimeCodeGenerationOptionUse'
+ test.build('ltcg.gyp', 'test_ltcg_on', chdir=CHDIR)
+ if test.format == 'ninja':
+ # Make sure ninja win_tool.py filters out noisy lines.
+ test.must_not_contain_any_line(test.stdout(), LTCG_LINKER_MESSAGES)
+ elif test.format == 'msvs':
+ test.must_contain_any_line(test.stdout(), LTCG_LINKER_MESSAGES)
+ test.run_built_executable('test_ltcg_on', chdir=CHDIR)
+ test.must_contain_any_line(test.stdout(), [INLINE_MARKER])
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-mapfile.py b/third_party/python/gyp/test/win/gyptest-link-mapfile.py
new file mode 100644
index 0000000000..00c1dea9e9
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-mapfile.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure mapfile settings are extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('mapfile.gyp', chdir=CHDIR)
+ test.build('mapfile.gyp', test.ALL, chdir=CHDIR)
+
+ map_file = test.built_file_path('test_mapfile_unset.map', chdir=CHDIR)
+ test.must_not_exist(map_file)
+
+ map_file = test.built_file_path('test_mapfile_generate.map', chdir=CHDIR)
+ test.must_exist(map_file)
+ test.must_contain(map_file, '?AnExportedFunction@@YAXXZ')
+ test.must_not_contain(map_file, 'void __cdecl AnExportedFunction(void)')
+
+ map_file = test.built_file_path('test_mapfile_generate_exports.map',
+ chdir=CHDIR)
+ test.must_exist(map_file)
+ test.must_contain(map_file, 'void __cdecl AnExportedFunction(void)')
+
+ map_file = test.built_file_path('test_mapfile_generate_filename.map',
+ chdir=CHDIR)
+ test.must_not_exist(map_file)
+
+ map_file = test.built_file_path('custom_file_name.map', chdir=CHDIR)
+ test.must_exist(map_file)
+ test.must_contain(map_file, '?AnExportedFunction@@YAXXZ')
+ test.must_not_contain(map_file, 'void __cdecl AnExportedFunction(void)')
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-nodefaultlib.py b/third_party/python/gyp/test/win/gyptest-link-nodefaultlib.py
new file mode 100644
index 0000000000..f00760b882
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-nodefaultlib.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure nodefaultlib setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('nodefaultlib.gyp', chdir=CHDIR)
+
+ test.build('nodefaultlib.gyp', 'test_ok', chdir=CHDIR)
+ test.build('nodefaultlib.gyp', 'test_fail', chdir=CHDIR, status=1)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-noimportlib.py b/third_party/python/gyp/test/win/gyptest-link-noimportlib.py
new file mode 100644
index 0000000000..d12e0ad3ed
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-noimportlib.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure that the (custom) NoImportLibrary flag is handled correctly.
+"""
+
+import TestGyp
+
+import os
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['ninja'])
+
+ CHDIR = 'importlib'
+ test.run_gyp('noimplib.gyp', chdir=CHDIR)
+ test.build('noimplib.gyp', test.ALL, chdir=CHDIR)
+
+ # The target has an entry point, but no exports. Ordinarily, ninja expects
+ # all DLLs to export some symbols (with the exception of /NOENTRY resource-
+ # only DLLs). When the NoImportLibrary flag is set, this is suppressed. If
+ # this is not working correctly, the expected .lib will never be generated
+ # but will be expected, so the build will not be up to date.
+ test.up_to_date('noimplib.gyp', test.ALL, chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-nxcompat.py b/third_party/python/gyp/test/win/gyptest-link-nxcompat.py
new file mode 100644
index 0000000000..660074397c
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-nxcompat.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure nxcompat setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('nxcompat.gyp', chdir=CHDIR)
+ test.build('nxcompat.gyp', test.ALL, chdir=CHDIR)
+
+ def GetHeaders(exe):
+ return test.run_dumpbin('/headers', test.built_file_path(exe, chdir=CHDIR))
+
+ # NXCOMPAT is on by default.
+ if 'NX compatible' not in GetHeaders('test_nxcompat_default.exe'):
+ test.fail_test()
+
+ # Explicitly off, should not be marked NX compatiable.
+ if 'NX compatible' in GetHeaders('test_nxcompat_no.exe'):
+ test.fail_test()
+
+ # Explicitly on.
+ if 'NX compatible' not in GetHeaders('test_nxcompat_yes.exe'):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-opt-icf.py b/third_party/python/gyp/test/win/gyptest-link-opt-icf.py
new file mode 100644
index 0000000000..3c48ef6eb9
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-opt-icf.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure comdat folding optimization setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('opt-icf.gyp', chdir=CHDIR)
+ test.build('opt-icf.gyp', chdir=CHDIR)
+
+ # We're specifying /DEBUG so the default is to not merge identical
+ # functions, so all of the similar_functions should be preserved.
+ output = test.run_dumpbin(
+ '/disasm', test.built_file_path('test_opticf_default.exe', chdir=CHDIR))
+ if output.count('similar_function') != 6: # 3 definitions, 3 calls.
+ test.fail_test()
+
+ # Explicitly off, all functions preserved seperately.
+ output = test.run_dumpbin(
+ '/disasm', test.built_file_path('test_opticf_no.exe', chdir=CHDIR))
+ if output.count('similar_function') != 6: # 3 definitions, 3 calls.
+ test.fail_test()
+
+ # Explicitly on, all but one removed.
+ output = test.run_dumpbin(
+ '/disasm', test.built_file_path('test_opticf_yes.exe', chdir=CHDIR))
+ if output.count('similar_function') != 4: # 1 definition, 3 calls.
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-opt-ref.py b/third_party/python/gyp/test/win/gyptest-link-opt-ref.py
new file mode 100644
index 0000000000..586b7afcd4
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-opt-ref.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure reference optimization setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('opt-ref.gyp', chdir=CHDIR)
+ test.build('opt-ref.gyp', chdir=CHDIR)
+
+ # We're specifying /DEBUG so the default is to not remove unused functions.
+ output = test.run_dumpbin(
+ '/disasm', test.built_file_path('test_optref_default.exe', chdir=CHDIR))
+ if 'unused_function' not in output:
+ test.fail_test()
+
+ # Explicitly off, unused_function preserved.
+ output = test.run_dumpbin(
+ '/disasm', test.built_file_path('test_optref_no.exe', chdir=CHDIR))
+ if 'unused_function' not in output:
+ test.fail_test()
+
+ # Explicitly on, should be removed.
+ output = test.run_dumpbin(
+ '/disasm', test.built_file_path('test_optref_yes.exe', chdir=CHDIR))
+ if 'unused_function' in output:
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-ordering.py b/third_party/python/gyp/test/win/gyptest-link-ordering.py
new file mode 100644
index 0000000000..ed8ee98c9e
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-ordering.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure the link order of object files is the same between msvs and ninja.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('link-ordering.gyp', chdir=CHDIR)
+ test.build('link-ordering.gyp', test.ALL, chdir=CHDIR)
+
+ def GetDisasm(exe):
+ full_path = test.built_file_path(exe, chdir=CHDIR)
+ # Get disassembly and drop int3 padding between functions.
+ return '\n'.join(
+ x for x in test.run_dumpbin('/disasm', full_path).splitlines()
+ if 'CC' not in x)
+
+ # This is the full dump that we expect. The source files in the .gyp match
+ # this order which is what determines the ordering in the binary.
+
+ expected_disasm_basic = '''
+_mainCRTStartup:
+ 00401000: B8 05 00 00 00 mov eax,5
+ 00401005: C3 ret
+?z@@YAHXZ:
+ 00401010: B8 03 00 00 00 mov eax,3
+ 00401015: C3 ret
+?x@@YAHXZ:
+ 00401020: B8 01 00 00 00 mov eax,1
+ 00401025: C3 ret
+?y@@YAHXZ:
+ 00401030: B8 02 00 00 00 mov eax,2
+ 00401035: C3 ret
+_main:
+ 00401040: 33 C0 xor eax,eax
+ 00401042: C3 ret
+'''
+
+ if expected_disasm_basic not in GetDisasm('test_ordering_exe.exe'):
+ print(GetDisasm('test_ordering_exe.exe'))
+ test.fail_test()
+
+ # Similar to above. The VS generator handles subdirectories differently.
+
+ expected_disasm_subdirs = '''
+_mainCRTStartup:
+ 00401000: B8 05 00 00 00 mov eax,5
+ 00401005: C3 ret
+_main:
+ 00401010: 33 C0 xor eax,eax
+ 00401012: C3 ret
+?y@@YAHXZ:
+ 00401020: B8 02 00 00 00 mov eax,2
+ 00401025: C3 ret
+?z@@YAHXZ:
+ 00401030: B8 03 00 00 00 mov eax,3
+ 00401035: C3 ret
+'''
+
+ if expected_disasm_subdirs not in GetDisasm('test_ordering_subdirs.exe'):
+ print(GetDisasm('test_ordering_subdirs.exe'))
+ test.fail_test()
+
+ # Similar, but with directories mixed into folders (crt and main at the same
+ # level, but with a subdir in the middle).
+
+ expected_disasm_subdirs_mixed = '''
+_mainCRTStartup:
+ 00401000: B8 05 00 00 00 mov eax,5
+ 00401005: C3 ret
+?x@@YAHXZ:
+ 00401010: B8 01 00 00 00 mov eax,1
+ 00401015: C3 ret
+_main:
+ 00401020: 33 C0 xor eax,eax
+ 00401022: C3 ret
+?z@@YAHXZ:
+ 00401030: B8 03 00 00 00 mov eax,3
+ 00401035: C3 ret
+?y@@YAHXZ:
+ 00401040: B8 02 00 00 00 mov eax,2
+ 00401045: C3 ret
+'''
+
+ if (expected_disasm_subdirs_mixed not in
+ GetDisasm('test_ordering_subdirs_mixed.exe')):
+ print(GetDisasm('test_ordering_subdirs_mixed.exe'))
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-outputfile.py b/third_party/python/gyp/test/win/gyptest-link-outputfile.py
new file mode 100644
index 0000000000..b98cdff0f0
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-outputfile.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure linker OutputFile setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('outputfile.gyp', chdir=CHDIR)
+ test.build('outputfile.gyp', test.ALL, chdir=CHDIR)
+
+ test.built_file_must_exist('blorp.exe', chdir=CHDIR)
+ test.built_file_must_exist('blorp.dll', chdir=CHDIR)
+ test.built_file_must_exist('subdir/blorp.exe', chdir=CHDIR)
+ test.built_file_must_exist('blorp.lib', chdir=CHDIR)
+ test.built_file_must_exist('subdir/blorp.lib', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-pdb-no-output.py b/third_party/python/gyp/test/win/gyptest-link-pdb-no-output.py
new file mode 100644
index 0000000000..6da0aeae98
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-pdb-no-output.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Ensure that when debug information is not output, a pdb is not expected.
+"""
+
+import TestGyp
+
+import os
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp()
+ CHDIR = 'linker-flags'
+ test.run_gyp('pdb-output.gyp', chdir=CHDIR)
+ test.build('pdb-output.gyp', 'test_pdb_output_disabled', chdir=CHDIR)
+ # Make sure that the build doesn't expect a PDB to be generated when there
+ # will be none.
+ test.up_to_date('pdb-output.gyp', 'test_pdb_output_disabled', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-pdb-output.py b/third_party/python/gyp/test/win/gyptest-link-pdb-output.py
new file mode 100644
index 0000000000..27245f7ec8
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-pdb-output.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Ensure that ninja includes the .pdb as an output file from linking.
+"""
+
+import TestGyp
+
+import os
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['ninja'])
+ CHDIR = 'linker-flags'
+ test.run_gyp('pdb-output.gyp', chdir=CHDIR)
+ # Note, building the pdbs rather than ALL or gyp target.
+ test.build('pdb-output.gyp', 'output_exe.pdb', chdir=CHDIR)
+ test.build('pdb-output.gyp', 'output_dll.pdb', chdir=CHDIR)
+
+ def FindFile(pdb):
+ full_path = test.built_file_path(pdb, chdir=CHDIR)
+ return os.path.isfile(full_path)
+
+ if not FindFile('output_exe.pdb'):
+ test.fail_test()
+ if not FindFile('output_dll.pdb'):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-pdb.py b/third_party/python/gyp/test/win/gyptest-link-pdb.py
new file mode 100644
index 0000000000..26d744d0b7
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-pdb.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that the 'ProgramDatabaseFile' attribute in VCLinker is extracted
+properly.
+"""
+
+import TestGyp
+
+import os
+import sys
+
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+ CHDIR = 'linker-flags'
+ test.run_gyp('program-database.gyp', chdir=CHDIR)
+ test.build('program-database.gyp', test.ALL, chdir=CHDIR)
+
+ def FindFile(pdb):
+ full_path = test.built_file_path(pdb, chdir=CHDIR)
+ return os.path.isfile(full_path)
+
+ # Verify the specified PDB is created when ProgramDatabaseFile
+ # is provided.
+ if not FindFile('name_outdir.pdb'):
+ test.fail_test()
+ if not FindFile('name_proddir.pdb'):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-pgo.py b/third_party/python/gyp/test/win/gyptest-link-pgo.py
new file mode 100644
index 0000000000..d742047ac3
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-pgo.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure PGO is working properly.
+"""
+
+import TestGyp
+
+import os
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('pgo.gyp', chdir=CHDIR)
+
+ def IsPGOAvailable():
+ """Returns true if the Visual Studio available here supports PGO."""
+ test.build('pgo.gyp', 'gen_linker_option', chdir=CHDIR)
+ tmpfile = test.read(test.built_file_path('linker_options.txt', chdir=CHDIR))
+ return any(line.find('PGOPTIMIZE') for line in tmpfile)
+
+ # Test generated build files look fine.
+ if test.format == 'ninja':
+ ninja = test.built_file_path('obj/test_pgo_instrument.ninja', chdir=CHDIR)
+ test.must_contain(ninja, '/LTCG:PGINSTRUMENT')
+ test.must_contain(ninja, 'test_pgo.pgd')
+ ninja = test.built_file_path('obj/test_pgo_optimize.ninja', chdir=CHDIR)
+ test.must_contain(ninja, '/LTCG:PGOPTIMIZE')
+ test.must_contain(ninja, 'test_pgo.pgd')
+ ninja = test.built_file_path('obj/test_pgo_update.ninja', chdir=CHDIR)
+ test.must_contain(ninja, '/LTCG:PGUPDATE')
+ test.must_contain(ninja, 'test_pgo.pgd')
+ elif test.format == 'msvs':
+ LTCG_FORMAT = '<LinkTimeCodeGeneration>%s</LinkTimeCodeGeneration>'
+ vcproj = test.workpath('linker-flags/test_pgo_instrument.vcxproj')
+ test.must_contain(vcproj, LTCG_FORMAT % 'PGInstrument')
+ test.must_contain(vcproj, 'test_pgo.pgd')
+ vcproj = test.workpath('linker-flags/test_pgo_optimize.vcxproj')
+ test.must_contain(vcproj, LTCG_FORMAT % 'PGOptimization')
+ test.must_contain(vcproj, 'test_pgo.pgd')
+ vcproj = test.workpath('linker-flags/test_pgo_update.vcxproj')
+ test.must_contain(vcproj, LTCG_FORMAT % 'PGUpdate')
+ test.must_contain(vcproj, 'test_pgo.pgd')
+
+ # When PGO is available, try building binaries with PGO.
+ if IsPGOAvailable():
+ pgd_path = test.built_file_path('test_pgo.pgd', chdir=CHDIR)
+
+ # Test if 'PGInstrument' generates PGD (Profile-Guided Database) file.
+ if os.path.exists(pgd_path):
+ test.unlink(pgd_path)
+ test.must_not_exist(pgd_path)
+ test.build('pgo.gyp', 'test_pgo_instrument', chdir=CHDIR)
+ test.must_exist(pgd_path)
+
+ # Test if 'PGOptimize' works well
+ test.build('pgo.gyp', 'test_pgo_optimize', chdir=CHDIR)
+ test.must_contain_any_line(test.stdout(), ['profiled functions'])
+
+ # Test if 'PGUpdate' works well
+ test.build('pgo.gyp', 'test_pgo_update', chdir=CHDIR)
+ # With 'PGUpdate', linker should not complain that sources are changed after
+ # the previous training run.
+ test.touch(test.workpath('linker-flags/inline_test_main.cc'))
+ test.unlink(test.built_file_path('test_pgo_update.exe', chdir=CHDIR))
+ test.build('pgo.gyp', 'test_pgo_update', chdir=CHDIR)
+ test.must_contain_any_line(test.stdout(), ['profiled functions'])
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-profile.py b/third_party/python/gyp/test/win/gyptest-link-profile.py
new file mode 100644
index 0000000000..4dbc9ae4ce
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-profile.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that the 'Profile' attribute in VCLinker is extracted properly.
+"""
+
+import TestGyp
+
+import os
+import sys
+
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+ CHDIR = 'linker-flags'
+ test.run_gyp('profile.gyp', chdir=CHDIR)
+ test.build('profile.gyp', test.ALL, chdir=CHDIR)
+
+ def GetSummary(exe):
+ full_path = test.built_file_path(exe, chdir=CHDIR)
+ return test.run_dumpbin(full_path)
+
+ # '.idata' section will be missing when /PROFILE is enabled.
+ if '.idata' in GetSummary('test_profile_true.exe'):
+ test.fail_test()
+
+ if not '.idata' in GetSummary('test_profile_false.exe'):
+ test.fail_test()
+
+ if not '.idata' in GetSummary('test_profile_default.exe'):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-restat-importlib.py b/third_party/python/gyp/test/win/gyptest-link-restat-importlib.py
new file mode 100644
index 0000000000..d249e0a6bd
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-restat-importlib.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure we don't cause unnecessary builds due to import libs appearing
+to be out of date.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import os
+import sys
+import time
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ if not os.environ.get('ProgramFiles(x86)'):
+ # TODO(scottmg)
+ print('Skipping test on x86, http://crbug.com/365833')
+ test.pass_test()
+
+ CHDIR = 'importlib'
+ test.run_gyp('importlib.gyp', chdir=CHDIR)
+ test.build('importlib.gyp', test.ALL, chdir=CHDIR)
+
+ # Delay briefly so that there's time for this touch not to have the
+ # timestamp as the previous run.
+ test.sleep()
+
+ # Touch the .cc file; the .dll will rebuild, but the import libs timestamp
+ # won't be updated.
+ test.touch('importlib/has-exports.cc')
+ test.build('importlib.gyp', 'test_importlib', chdir=CHDIR)
+
+ # This is the important part. The .dll above will relink and have an updated
+ # timestamp, however the import .libs timestamp won't be updated. So, we
+ # have to handle restating inputs in ninja so the final binary doesn't
+ # continually relink (due to thinking the .lib isn't up to date).
+ test.up_to_date('importlib.gyp', test.ALL, chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-safeseh.py b/third_party/python/gyp/test/win/gyptest-link-safeseh.py
new file mode 100644
index 0000000000..31a25673f4
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-safeseh.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure safeseh setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp()
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('safeseh.gyp', chdir=CHDIR)
+ test.build('safeseh.gyp', test.ALL, chdir=CHDIR)
+
+ def HasSafeExceptionHandlers(exe):
+ full_path = test.built_file_path(exe, chdir=CHDIR)
+ output = test.run_dumpbin('/LOADCONFIG', full_path)
+ return ' Safe Exception Handler Table' in output
+
+ # From MSDN: http://msdn.microsoft.com/en-us/library/9a89h429.aspx
+ # If /SAFESEH is not specified, the linker will produce an image with a
+ # table of safe exceptions handlers if all modules are compatible with
+ # the safe exception handling feature. If any modules were not
+ # compatible with safe exception handling feature, the resulting image
+ # will not contain a table of safe exception handlers.
+ # However, the msvs IDE passes /SAFESEH to the linker by default, if
+ # ImageHasSafeExceptionHandlers is not set to false in the vcxproj file.
+ # We emulate this behavior in msvs_emulation.py, so 'test_safeseh_default'
+ # and 'test_safeseh_yes' are built identically.
+ if not HasSafeExceptionHandlers('test_safeseh_default.exe'):
+ test.fail_test()
+ if HasSafeExceptionHandlers('test_safeseh_no.exe'):
+ test.fail_test()
+ if not HasSafeExceptionHandlers('test_safeseh_yes.exe'):
+ test.fail_test()
+ if HasSafeExceptionHandlers('test_safeseh_x64.exe'):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-shard.py b/third_party/python/gyp/test/win/gyptest-link-shard.py
new file mode 100644
index 0000000000..9af93284a7
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-shard.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure msvs_shard works correctly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'shard'
+ test.run_gyp('shard.gyp', chdir=CHDIR)
+ test.build('shard.gyp', test.ALL, chdir=CHDIR)
+
+ test.built_file_must_exist('shard_0.lib', chdir=CHDIR)
+ test.built_file_must_exist('shard_1.lib', chdir=CHDIR)
+ test.built_file_must_exist('shard_2.lib', chdir=CHDIR)
+ test.built_file_must_exist('shard_3.lib', chdir=CHDIR)
+
+ test.run_gyp('shard_ref.gyp', chdir=CHDIR)
+ test.build('shard_ref.gyp', test.ALL, chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-stacksize.py b/third_party/python/gyp/test/win/gyptest-link-stacksize.py
new file mode 100644
index 0000000000..2e952d2c73
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-stacksize.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure StackReserveSize and StackCommitSize settings are extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('stacksize.gyp', chdir=CHDIR)
+ test.build('stacksize.gyp', test.ALL, chdir=CHDIR)
+
+ def GetHeaders(exe):
+ return test.run_dumpbin('/headers', test.built_file_path(exe, chdir=CHDIR))
+
+ # Verify default sizes as reported by dumpbin:
+ # 100000h = 1MB
+ # 1000h = 4KB
+ default_headers = GetHeaders('test_default.exe')
+ if '100000 size of stack reserve' not in default_headers:
+ test.fail_test()
+ if '1000 size of stack commit' not in default_headers:
+ test.fail_test()
+
+ # Verify that reserved size is changed, but commit size is unchanged:
+ # 200000h = 2MB
+ # 1000h = 4KB
+ set_reserved_size_headers = GetHeaders('test_set_reserved_size.exe')
+ if '200000 size of stack reserve' not in set_reserved_size_headers:
+ test.fail_test()
+ if '1000 size of stack commit' not in set_reserved_size_headers:
+ test.fail_test()
+
+ # Verify that setting the commit size, without the reserve size, has no
+ # effect:
+ # 100000h = 1MB
+ # 1000h = 4KB
+ set_commit_size_headers = GetHeaders('test_set_commit_size.exe')
+ if '100000 size of stack reserve' not in set_commit_size_headers:
+ test.fail_test()
+ if '1000 size of stack commit' not in set_commit_size_headers:
+ test.fail_test()
+
+ # Verify that setting both works:
+ # 200000h = 2MB
+ # 2000h = 8KB
+ set_both_headers = GetHeaders('test_set_both.exe')
+ if '200000 size of stack reserve' not in set_both_headers:
+ test.fail_test()
+ if '2000 size of stack commit' not in set_both_headers:
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-subsystem.py b/third_party/python/gyp/test/win/gyptest-link-subsystem.py
new file mode 100644
index 0000000000..a94ba36856
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-subsystem.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure subsystem setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('subsystem.gyp', chdir=CHDIR)
+
+ test.build('subsystem.gyp', 'test_console_ok', chdir=CHDIR)
+ test.build('subsystem.gyp', 'test_console_fail', chdir=CHDIR, status=1)
+ test.build('subsystem.gyp', 'test_windows_ok', chdir=CHDIR)
+ test.build('subsystem.gyp', 'test_windows_fail', chdir=CHDIR, status=1)
+
+ test.build('subsystem.gyp', 'test_console_xp', chdir=CHDIR)
+ test.build('subsystem.gyp', 'test_windows_xp', chdir=CHDIR)
+ # Make sure we are targeting XP.
+ def GetHeaders(exe):
+ return test.run_dumpbin('/headers', test.built_file_path(exe, chdir=CHDIR))
+ if '5.01 subsystem version' not in GetHeaders('test_console_xp.exe'):
+ test.fail_test()
+ if '5.01 subsystem version' not in GetHeaders('test_windows_xp.exe'):
+ test.fail_test()
+
+ # TODO(scottmg): There are other subsystems (WinCE, etc.) that we don't use.
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-target-machine.py b/third_party/python/gyp/test/win/gyptest-link-target-machine.py
new file mode 100644
index 0000000000..5a15f3f4dc
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-target-machine.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure TargetMachine setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('target-machine.gyp', chdir=CHDIR)
+ # The .cc file is compiled as x86 (the default), so the link/libs that are
+ # x64 need to fail.
+ test.build('target-machine.gyp', 'test_target_link_x86', chdir=CHDIR)
+ test.build(
+ 'target-machine.gyp', 'test_target_link_x64', chdir=CHDIR, status=1)
+ test.build('target-machine.gyp', 'test_target_lib_x86', chdir=CHDIR)
+ test.build('target-machine.gyp', 'test_target_lib_x64', chdir=CHDIR, status=1)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-tsaware.py b/third_party/python/gyp/test/win/gyptest-link-tsaware.py
new file mode 100644
index 0000000000..d34b3c24a5
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-tsaware.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure tsaware setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('tsaware.gyp', chdir=CHDIR)
+ test.build('tsaware.gyp', test.ALL, chdir=CHDIR)
+
+ def GetHeaders(exe):
+ return test.run_dumpbin('/headers', test.built_file_path(exe, chdir=CHDIR))
+
+ # Explicitly off, should not be marked NX compatiable.
+ if 'Terminal Server Aware' in GetHeaders('test_tsaware_no.exe'):
+ test.fail_test()
+
+ # Explicitly on.
+ if 'Terminal Server Aware' not in GetHeaders('test_tsaware_yes.exe'):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-uldi-depending-on-module.py b/third_party/python/gyp/test/win/gyptest-link-uldi-depending-on-module.py
new file mode 100644
index 0000000000..75c9503dc4
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-uldi-depending-on-module.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure that when ULDI is on, we link cause downstream modules to get built
+when we depend on the component objs.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'uldi'
+ test.run_gyp('uldi-depending-on-module.gyp', chdir=CHDIR)
+ test.build('uldi-depending-on-module.gyp', 'an_exe', chdir=CHDIR)
+ test.built_file_must_exist('a_module.dll', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-uldi.py b/third_party/python/gyp/test/win/gyptest-link-uldi.py
new file mode 100644
index 0000000000..62c5892c50
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-uldi.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure that when ULDI is on, we link .objs that make up .libs rather than
+the .libs themselves.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'uldi'
+ test.run_gyp('uldi.gyp', chdir=CHDIR)
+ # When linking with ULDI, the duplicated function from the lib will be an
+ # error.
+ test.build('uldi.gyp', 'final_uldi', chdir=CHDIR, status=1)
+ # And when in libs, the duplicated function will be silently dropped, so the
+ # build succeeds.
+ test.build('uldi.gyp', 'final_no_uldi', chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-unsupported-manifest.py b/third_party/python/gyp/test/win/gyptest-link-unsupported-manifest.py
new file mode 100644
index 0000000000..8f7e12bc8c
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-unsupported-manifest.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure we error out if #pragma comments are used to modify manifests.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ # This assertion only applies to the ninja build.
+ test = TestGyp.TestGyp(formats=['ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('unsupported-manifest.gyp', chdir=CHDIR)
+
+ # Just needs to fail to build.
+ test.build('unsupported-manifest.gyp',
+ 'test_unsupported', chdir=CHDIR, status=1)
+ test.must_not_exist(test.built_file_path('test_unsupported.exe', chdir=CHDIR))
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-link-update-manifest.py b/third_party/python/gyp/test/win/gyptest-link-update-manifest.py
new file mode 100644
index 0000000000..7bad1eca77
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-update-manifest.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure binary is relinked when manifest settings are changed.
+"""
+
+import TestGyp
+
+import os
+import sys
+
+if sys.platform == 'win32':
+ import pywintypes
+ import win32api
+ import winerror
+
+ RT_MANIFEST = 24
+
+ class LoadLibrary(object):
+ """Context manager for loading and releasing binaries in Windows.
+ Yields the handle of the binary loaded."""
+ def __init__(self, path):
+ self._path = path
+ self._handle = None
+
+ def __enter__(self):
+ self._handle = win32api.LoadLibrary(self._path)
+ return self._handle
+
+ def __exit__(self, type, value, traceback):
+ win32api.FreeLibrary(self._handle)
+
+ def extract_manifest(path, resource_name):
+ """Reads manifest from |path| and returns it as a string.
+ Returns None is there is no such manifest."""
+ with LoadLibrary(path) as handle:
+ try:
+ return win32api.LoadResource(
+ handle, RT_MANIFEST, resource_name).decode('utf-8', 'ignore')
+ except pywintypes.error as error:
+ if error.args[0] == winerror.ERROR_RESOURCE_DATA_NOT_FOUND:
+ return None
+ else:
+ raise
+
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+
+ gyp_template = '''
+{
+ 'targets': [
+ {
+ 'target_name': 'test_update_manifest',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'EnableUAC': 'true',
+ 'UACExecutionLevel': '%(uac_execution_level)d',
+ },
+ 'VCManifestTool': {
+ 'EmbedManifest': 'true',
+ 'AdditionalManifestFiles': '%(additional_manifest_files)s',
+ },
+ },
+ },
+ ],
+}
+'''
+
+ gypfile = 'update-manifest.gyp'
+
+ def WriteAndUpdate(uac_execution_level, additional_manifest_files, do_build):
+ with open(os.path.join(CHDIR, gypfile), 'w') as f:
+ f.write(gyp_template % {
+ 'uac_execution_level': uac_execution_level,
+ 'additional_manifest_files': additional_manifest_files,
+ })
+ test.run_gyp(gypfile, chdir=CHDIR)
+ if do_build:
+ test.build(gypfile, chdir=CHDIR)
+ exe_file = test.built_file_path('test_update_manifest.exe', chdir=CHDIR)
+ return extract_manifest(exe_file, 1)
+
+ manifest = WriteAndUpdate(0, '', True)
+ test.fail_test('asInvoker' not in manifest)
+ test.fail_test('35138b9a-5d96-4fbd-8e2d-a2440225f93a' in manifest)
+
+ # Make sure that updating .gyp and regenerating doesn't cause a rebuild.
+ WriteAndUpdate(0, '', False)
+ test.up_to_date(gypfile, test.ALL, chdir=CHDIR)
+
+ # But make sure that changing a manifest property does cause a relink.
+ manifest = WriteAndUpdate(2, '', True)
+ test.fail_test('requireAdministrator' not in manifest)
+
+ # Adding a manifest causes a rebuild.
+ manifest = WriteAndUpdate(2, 'extra.manifest', True)
+ test.fail_test('35138b9a-5d96-4fbd-8e2d-a2440225f93a' not in manifest)
diff --git a/third_party/python/gyp/test/win/gyptest-link-warnings-as-errors.py b/third_party/python/gyp/test/win/gyptest-link-warnings-as-errors.py
new file mode 100644
index 0000000000..d6a64736fb
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-link-warnings-as-errors.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure linker warnings-as-errors setting is extracted properly.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'linker-flags'
+ test.run_gyp('warn-as-error.gyp', chdir=CHDIR)
+
+ test.build('warn-as-error.gyp', 'test_on', chdir=CHDIR, status=1)
+ test.build('warn-as-error.gyp', 'test_off', chdir=CHDIR)
+ test.build('warn-as-error.gyp', 'test_default', chdir=CHDIR)
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-long-command-line.py b/third_party/python/gyp/test/win/gyptest-long-command-line.py
new file mode 100644
index 0000000000..8f8b7a3bbd
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-long-command-line.py
@@ -0,0 +1,23 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure long command lines work.
+"""
+
+import TestGyp
+
+import subprocess
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['ninja', 'msvs'])
+
+ CHDIR = 'long-command-line'
+ test.run_gyp('long-command-line.gyp', chdir=CHDIR)
+ test.build('long-command-line.gyp', test.ALL, chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-macro-projectname.py b/third_party/python/gyp/test/win/gyptest-macro-projectname.py
new file mode 100644
index 0000000000..e411cc04a4
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-macro-projectname.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure macro expansion of $(ProjectName) is handled.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'vs-macros'
+ test.run_gyp('projectname.gyp', chdir=CHDIR)
+ test.build('projectname.gyp', test.ALL, chdir=CHDIR)
+ test.built_file_must_exist('test_expansions_plus_something.exe', chdir=CHDIR)
+ test.built_file_must_exist(
+ 'test_with_product_name_plus_something.exe', chdir=CHDIR)
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-macro-targetext.py b/third_party/python/gyp/test/win/gyptest-macro-targetext.py
new file mode 100644
index 0000000000..450710d631
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-macro-targetext.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure macro expansion of $(TargetExt) is handled.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'vs-macros'
+ test.run_gyp('targetext.gyp', chdir=CHDIR)
+ test.build('targetext.gyp', test.ALL, chdir=CHDIR)
+ test.built_file_must_exist('executable.exe', chdir=CHDIR)
+ test.built_file_must_exist('loadable_module.dll', chdir=CHDIR)
+ test.built_file_must_exist('shared_library.dll', chdir=CHDIR)
+ test.built_file_must_exist('static_library.lib', chdir=CHDIR)
+ test.built_file_must_exist('product_extension.library', chdir=CHDIR)
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-macro-targetfilename.py b/third_party/python/gyp/test/win/gyptest-macro-targetfilename.py
new file mode 100644
index 0000000000..759e26c566
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-macro-targetfilename.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure macro expansion of $(TargetFileName) is handled.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import os
+import sys
+
+if sys.platform == 'win32':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+ if not (test.format == 'msvs' and
+ int(os.environ.get('GYP_MSVS_VERSION', 0)) == 2013):
+ CHDIR = 'vs-macros'
+ test.run_gyp('targetfilename.gyp', chdir=CHDIR)
+ test.build('targetfilename.gyp', test.ALL, chdir=CHDIR)
+ test.built_file_must_exist('test_targetfilename_executable.exe', chdir=CHDIR)
+ test.built_file_must_exist('test_targetfilename_loadable_module.dll',
+ chdir=CHDIR)
+ test.built_file_must_exist('test_targetfilename_shared_library.dll',
+ chdir=CHDIR)
+ test.built_file_must_exist('test_targetfilename_static_library.lib',
+ chdir=CHDIR)
+ test.built_file_must_exist('test_targetfilename_product_extension.foo',
+ chdir=CHDIR)
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-macro-targetname.py b/third_party/python/gyp/test/win/gyptest-macro-targetname.py
new file mode 100644
index 0000000000..b1118019a3
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-macro-targetname.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure macro expansion of $(TargetName) and $(TargetDir) are handled.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'vs-macros'
+ test.run_gyp('targetname.gyp', chdir=CHDIR)
+ test.build('targetname.gyp', test.ALL, chdir=CHDIR)
+ test.built_file_must_exist('test_targetname_plus_something1.exe',
+ chdir=CHDIR)
+ test.built_file_must_exist(
+ 'prod_prefixtest_targetname_with_prefix_plus_something2.exe',
+ chdir=CHDIR)
+ test.built_file_must_exist('prod_name_plus_something3.exe', chdir=CHDIR)
+ test.built_file_must_exist('prod_prefixprod_name_plus_something4.exe',
+ chdir=CHDIR)
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-macro-targetpath.py b/third_party/python/gyp/test/win/gyptest-macro-targetpath.py
new file mode 100644
index 0000000000..fe7eac1834
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-macro-targetpath.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure macro expansion of $(TargetPath) is handled.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'vs-macros'
+ test.run_gyp('targetpath.gyp', chdir=CHDIR)
+ test.build('targetpath.gyp', test.ALL, chdir=CHDIR)
+ test.built_file_must_exist('test_targetpath_executable.exe', chdir=CHDIR)
+ test.built_file_must_exist('test_targetpath_loadable_module.dll',
+ chdir=CHDIR)
+ test.built_file_must_exist('test_targetpath_shared_library.dll',
+ chdir=CHDIR)
+ test.built_file_must_exist('test_targetpath_static_library.lib',
+ chdir=CHDIR)
+ test.built_file_must_exist('test_targetpath_product_extension.foo',
+ chdir=CHDIR)
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-macro-vcinstalldir.py b/third_party/python/gyp/test/win/gyptest-macro-vcinstalldir.py
new file mode 100644
index 0000000000..37396e161a
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-macro-vcinstalldir.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure macro expansion of $(VCInstallDir) is handled, and specifically
+always / terminated for compatibility.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'vs-macros'
+ test.run_gyp('vcinstalldir.gyp', chdir=CHDIR)
+ # This fails on VS because the trailing slash escapes the trailing quote.
+ test.build('vcinstalldir.gyp', 'test_slash_trailing', chdir=CHDIR, status=1)
+ test.build('vcinstalldir.gyp', 'test_slash_dir', chdir=CHDIR)
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-macros-containing-gyp.py b/third_party/python/gyp/test/win/gyptest-macros-containing-gyp.py
new file mode 100644
index 0000000000..f6eaf63dbb
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-macros-containing-gyp.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Handle VS macro expansion containing gyp variables.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'vs-macros'
+ test.run_gyp('containing-gyp.gyp', chdir=CHDIR)
+ test.build('containing-gyp.gyp', test.ALL, chdir=CHDIR)
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-macros-in-inputs-and-outputs.py b/third_party/python/gyp/test/win/gyptest-macros-in-inputs-and-outputs.py
new file mode 100644
index 0000000000..3d6fa74e43
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-macros-in-inputs-and-outputs.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Handle macro expansion in inputs and outputs of rules.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'vs-macros'
+ test.run_gyp('input-output-macros.gyp', chdir=CHDIR)
+
+ test.build('input-output-macros.gyp', 'test_expansions', chdir=CHDIR)
+
+ test.built_file_must_exist('stuff.blah.something',
+ content='Random data file.\nModified.',
+ chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-midl-excluded.py b/third_party/python/gyp/test/win/gyptest-midl-excluded.py
new file mode 100644
index 0000000000..70059ab64b
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-midl-excluded.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Test that .idl files in actions and non-native rules are excluded.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'idl-excluded'
+ test.run_gyp('idl-excluded.gyp', chdir=CHDIR)
+ test.build('idl-excluded.gyp', test.ALL, chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-midl-includedirs.py b/third_party/python/gyp/test/win/gyptest-midl-includedirs.py
new file mode 100644
index 0000000000..05f6370409
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-midl-includedirs.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verify that 'midl_include_dirs' is handled.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'idl-includedirs'
+ test.run_gyp('idl-includedirs.gyp', chdir=CHDIR)
+ test.build('idl-includedirs.gyp', test.ALL, chdir=CHDIR)
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-midl-rules.py b/third_party/python/gyp/test/win/gyptest-midl-rules.py
new file mode 100644
index 0000000000..591a507320
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-midl-rules.py
@@ -0,0 +1,28 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Handle default .idl build rules.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'idl-rules'
+ test.run_gyp('basic-idl.gyp', chdir=CHDIR)
+ for platform in ['Win32', 'x64']:
+ test.set_configuration('Debug|%s' % platform)
+ test.build('basic-idl.gyp', test.ALL, chdir=CHDIR)
+
+ # Make sure ninja win_tool.py filters out noisy lines.
+ if test.format == 'ninja' and 'Processing' in test.stdout():
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-ml-safeseh.py b/third_party/python/gyp/test/win/gyptest-ml-safeseh.py
new file mode 100644
index 0000000000..ec702b9df2
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-ml-safeseh.py
@@ -0,0 +1,22 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure the /safeseh option can be passed to ml.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['ninja'])
+
+ CHDIR = 'ml-safeseh'
+ test.run_gyp('ml-safeseh.gyp', chdir=CHDIR)
+ test.build('ml-safeseh.gyp', test.ALL, chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-quoting-commands.py b/third_party/python/gyp/test/win/gyptest-quoting-commands.py
new file mode 100644
index 0000000000..b40f99f088
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-quoting-commands.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure batch files run as actions. Regression test for previously missing
+trailing quote on command line. cmd typically will implicitly insert a missing
+quote, but if the command ends in a quote, it will not insert another, so the
+command can sometimes become unterminated.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'batch-file-action'
+ test.run_gyp('batch-file-action.gyp', chdir=CHDIR)
+ test.build('batch-file-action.gyp', test.ALL, chdir=CHDIR)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-rc-build.py b/third_party/python/gyp/test/win/gyptest-rc-build.py
new file mode 100644
index 0000000000..4df33ab5d9
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-rc-build.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure we build and include .rc files.
+"""
+
+from __future__ import print_function
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ print("This test is currently disabled: https://crbug.com/483696.")
+ sys.exit(0)
+
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'rc-build'
+ test.run_gyp('hello.gyp', chdir=CHDIR)
+ test.build('hello.gyp', test.ALL, chdir=CHDIR)
+ test.up_to_date('hello.gyp', 'resource_only_dll', chdir=CHDIR)
+ test.run_built_executable('with_resources', chdir=CHDIR, status=4)
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-sys.py b/third_party/python/gyp/test/win/gyptest-sys.py
new file mode 100644
index 0000000000..aceb25428e
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-sys.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2016 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that Windows drivers are built correctly.
+"""
+
+import TestGyp
+import TestCmd
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs'])
+
+ CHDIR = 'win-driver-target-type'
+ test.run_gyp('win-driver-target-type.gyp', chdir=CHDIR)
+ maybe_missing = r'[\s\S]+?(WindowsKernelModeDriver|Build succeeded.)[\s\S]+?'
+ test.build('win-driver-target-type.gyp', 'win_driver_target_type',
+ chdir=CHDIR, stdout=maybe_missing,
+ status=[0, 1], match=TestCmd.match_re_dotall)
+
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/gyptest-system-include.py b/third_party/python/gyp/test/win/gyptest-system-include.py
new file mode 100644
index 0000000000..9a47d98538
--- /dev/null
+++ b/third_party/python/gyp/test/win/gyptest-system-include.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Checks that msvs_system_include_dirs works.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
+
+ CHDIR = 'system-include'
+ test.run_gyp('test.gyp', chdir=CHDIR)
+ test.build('test.gyp', test.ALL, chdir=CHDIR)
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/idl-excluded/bad.idl b/third_party/python/gyp/test/win/idl-excluded/bad.idl
new file mode 100644
index 0000000000..38554e9635
--- /dev/null
+++ b/third_party/python/gyp/test/win/idl-excluded/bad.idl
@@ -0,0 +1,6 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+This is a dummy .idl file that will trigger an error if it is not excluded from
+the build.
diff --git a/third_party/python/gyp/test/win/idl-excluded/copy-file.py b/third_party/python/gyp/test/win/idl-excluded/copy-file.py
new file mode 100644
index 0000000000..7bdfbfd4bd
--- /dev/null
+++ b/third_party/python/gyp/test/win/idl-excluded/copy-file.py
@@ -0,0 +1,11 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+import sys
+
+contents = open(sys.argv[1], 'r').read()
+open(sys.argv[2], 'w').write(contents)
+
+sys.exit(0)
diff --git a/third_party/python/gyp/test/win/idl-excluded/idl-excluded.gyp b/third_party/python/gyp/test/win/idl-excluded/idl-excluded.gyp
new file mode 100644
index 0000000000..972b7dedac
--- /dev/null
+++ b/third_party/python/gyp/test/win/idl-excluded/idl-excluded.gyp
@@ -0,0 +1,58 @@
+# Copyright (c) 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'exclude_with_action',
+ 'type': 'none',
+ 'msvs_cygwin_shell': 0,
+ 'actions': [{
+ 'action_name': 'copy_action',
+ 'inputs': [
+ 'copy-file.py',
+ 'bad.idl',
+ ],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/bad.idl',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<@(_outputs)',
+ ],
+ }],
+ },
+ {
+ 'target_name': 'exclude_with_rule',
+ 'type': 'none',
+ 'msvs_cygwin_shell': 0,
+ 'sources': [
+ 'bad.idl',
+ ],
+ 'rules': [{
+ 'rule_name': 'copy_rule',
+ 'extension': 'idl',
+ 'inputs': [
+ 'copy-file.py',
+ ],
+ 'outputs': [
+ '<(INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).idl',
+ ],
+ 'action': [
+ 'python', '<@(_inputs)', '<(RULE_INPUT_PATH)', '<@(_outputs)',
+ ],
+ }],
+ },
+ {
+ 'target_name': 'program',
+ 'type': 'executable',
+ 'sources': [
+ 'program.cc',
+ ],
+ 'dependencies': [
+ 'exclude_with_action',
+ 'exclude_with_rule',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/win/idl-excluded/program.cc b/third_party/python/gyp/test/win/idl-excluded/program.cc
new file mode 100644
index 0000000000..9dc3c94f34
--- /dev/null
+++ b/third_party/python/gyp/test/win/idl-excluded/program.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/idl-includedirs/hello.cc b/third_party/python/gyp/test/win/idl-includedirs/hello.cc
new file mode 100644
index 0000000000..9dc3c94f34
--- /dev/null
+++ b/third_party/python/gyp/test/win/idl-includedirs/hello.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/idl-includedirs/idl-includedirs.gyp b/third_party/python/gyp/test/win/idl-includedirs/idl-includedirs.gyp
new file mode 100644
index 0000000000..fcec063a98
--- /dev/null
+++ b/third_party/python/gyp/test/win/idl-includedirs/idl-includedirs.gyp
@@ -0,0 +1,26 @@
+# Copyright (c) 2014 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_midl_include_dirs',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.cc',
+ 'subdir/foo.idl',
+ 'subdir/bar.idl',
+ ],
+ 'midl_include_dirs': [
+ 'subdir',
+ ],
+ 'msvs_settings': {
+ 'VCMIDLTool': {
+ 'OutputDirectory': '<(INTERMEDIATE_DIR)',
+ 'DLLDataFileName': '$(InputName)_dlldata.h',
+ },
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/win/idl-includedirs/subdir/bar.idl b/third_party/python/gyp/test/win/idl-includedirs/subdir/bar.idl
new file mode 100644
index 0000000000..d4e6cbb3eb
--- /dev/null
+++ b/third_party/python/gyp/test/win/idl-includedirs/subdir/bar.idl
@@ -0,0 +1,13 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import "oaidl.idl";
+
+[
+ object,
+ uuid(A03D1421-B1EC-11D0-8C3A-00C04FC31D3F),
+]
+interface Bar : IUnknown {
+ HRESULT BarFunction();
+};
diff --git a/third_party/python/gyp/test/win/idl-includedirs/subdir/foo.idl b/third_party/python/gyp/test/win/idl-includedirs/subdir/foo.idl
new file mode 100644
index 0000000000..c8c65b9be6
--- /dev/null
+++ b/third_party/python/gyp/test/win/idl-includedirs/subdir/foo.idl
@@ -0,0 +1,14 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import "oaidl.idl";
+import "bar.idl";
+
+[
+ object,
+ uuid(9C1100DD-51D4-4827-AE9F-3B8FAC4AED72),
+]
+interface Foo : IUnknown {
+ HRESULT FooFunction(Bar* bar);
+};
diff --git a/third_party/python/gyp/test/win/idl-rules/Window.idl b/third_party/python/gyp/test/win/idl-rules/Window.idl
new file mode 100644
index 0000000000..d8ea01bee3
--- /dev/null
+++ b/third_party/python/gyp/test/win/idl-rules/Window.idl
@@ -0,0 +1,9 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+[
+ WillBeGarbageCollected,
+] interface Window {
+ void alert();
+};
diff --git a/third_party/python/gyp/test/win/idl-rules/basic-idl.gyp b/third_party/python/gyp/test/win/idl-rules/basic-idl.gyp
new file mode 100644
index 0000000000..b74622adea
--- /dev/null
+++ b/third_party/python/gyp/test/win/idl-rules/basic-idl.gyp
@@ -0,0 +1,67 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'midl_out_dir': '<(SHARED_INTERMEDIATE_DIR)',
+ },
+ 'target_defaults': {
+ 'configurations': {
+ 'Debug': {
+ 'msvs_configuration_platform': 'Win32',
+ },
+ 'Debug_x64': {
+ 'inherit_from': ['Debug'],
+ 'msvs_configuration_platform': 'x64',
+ },
+ },
+ },
+ 'targets': [
+ {
+ 'target_name': 'idl_test',
+ 'type': 'executable',
+ 'sources': [
+ 'history_indexer.idl',
+ '<(midl_out_dir)/history_indexer.h',
+ '<(midl_out_dir)/history_indexer_i.c',
+ 'history_indexer_user.cc',
+ ],
+ 'libraries': ['ole32.lib'],
+ 'include_dirs': [
+ '<(midl_out_dir)',
+ ],
+ 'msvs_settings': {
+ 'VCMIDLTool': {
+ 'OutputDirectory': '<(midl_out_dir)',
+ 'HeaderFileName': '<(RULE_INPUT_ROOT).h',
+ },
+ },
+ },
+ {
+ 'target_name': 'idl_explicit_action',
+ 'type': 'none',
+ 'sources': [
+ 'Window.idl',
+ ],
+ 'actions': [{
+ 'action_name': 'blink_idl',
+ 'explicit_idl_action': 1,
+ 'msvs_cygwin_shell': 0,
+ 'inputs': [
+ 'Window.idl',
+ 'idl_compiler.py',
+ ],
+ 'outputs': [
+ 'Window.cpp',
+ 'Window.h',
+ ],
+ 'action': [
+ 'python',
+ 'idl_compiler.py',
+ 'Window.idl',
+ ],
+ }],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/win/idl-rules/history_indexer.idl b/third_party/python/gyp/test/win/idl-rules/history_indexer.idl
new file mode 100644
index 0000000000..e866ce6d90
--- /dev/null
+++ b/third_party/python/gyp/test/win/idl-rules/history_indexer.idl
@@ -0,0 +1,17 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+import "oaidl.idl";
+import "ocidl.idl";
+
+[
+ object,
+ uuid(9C1100DD-51D4-4827-AE9F-3B8FAC4AED72),
+ oleautomation,
+ nonextensible,
+ pointer_default(unique)
+]
+interface IChromeHistoryIndexer : IUnknown {
+ HRESULT SomeFunction([in] VARIANT begin_time, [in] VARIANT end_time);
+};
diff --git a/third_party/python/gyp/test/win/idl-rules/history_indexer_user.cc b/third_party/python/gyp/test/win/idl-rules/history_indexer_user.cc
new file mode 100644
index 0000000000..071a9ffef5
--- /dev/null
+++ b/third_party/python/gyp/test/win/idl-rules/history_indexer_user.cc
@@ -0,0 +1,15 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "history_indexer.h"
+
+// Use the thing in the IDL.
+int main() {
+ IChromeHistoryIndexer** indexer = 0;
+ IID fake_iid;
+ CoCreateInstance(fake_iid, NULL, CLSCTX_INPROC,
+ __uuidof(IChromeHistoryIndexer),
+ reinterpret_cast<void**>(indexer));
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/idl-rules/idl_compiler.py b/third_party/python/gyp/test/win/idl-rules/idl_compiler.py
new file mode 100644
index 0000000000..a12b274d67
--- /dev/null
+++ b/third_party/python/gyp/test/win/idl-rules/idl_compiler.py
@@ -0,0 +1,17 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# mock, just outputs empty .h/.cpp files
+
+import os
+import sys
+
+if len(sys.argv) == 2:
+ basename, ext = os.path.splitext(sys.argv[1])
+ with open('%s.h' % basename, 'w') as f:
+ f.write('// %s.h\n' % basename)
+ with open('%s.cpp' % basename, 'w') as f:
+ f.write('// %s.cpp\n' % basename)
diff --git a/third_party/python/gyp/test/win/importlib/dll_no_exports.cc b/third_party/python/gyp/test/win/importlib/dll_no_exports.cc
new file mode 100644
index 0000000000..96dd7970b7
--- /dev/null
+++ b/third_party/python/gyp/test/win/importlib/dll_no_exports.cc
@@ -0,0 +1,9 @@
+// Copyright (c) 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+
+BOOL APIENTRY DllMain(HMODULE module, DWORD reason, LPVOID reserved) {
+ return TRUE;
+}
diff --git a/third_party/python/gyp/test/win/importlib/has-exports.cc b/third_party/python/gyp/test/win/importlib/has-exports.cc
new file mode 100644
index 0000000000..3f62d6c60d
--- /dev/null
+++ b/third_party/python/gyp/test/win/importlib/has-exports.cc
@@ -0,0 +1,10 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+__declspec(dllexport) void some_function() {
+}
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/importlib/hello.cc b/third_party/python/gyp/test/win/importlib/hello.cc
new file mode 100644
index 0000000000..66ff68c113
--- /dev/null
+++ b/third_party/python/gyp/test/win/importlib/hello.cc
@@ -0,0 +1,9 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+__declspec(dllimport) void some_function();
+
+int main() {
+ some_function();
+}
diff --git a/third_party/python/gyp/test/win/importlib/importlib.gyp b/third_party/python/gyp/test/win/importlib/importlib.gyp
new file mode 100644
index 0000000000..ab15b1893d
--- /dev/null
+++ b/third_party/python/gyp/test/win/importlib/importlib.gyp
@@ -0,0 +1,30 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_importlib',
+ 'type': 'shared_library',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'LinkIncremental': '2',
+ }
+ },
+ 'sources': ['has-exports.cc'],
+ },
+
+ {
+ 'target_name': 'test_linkagainst',
+ 'type': 'executable',
+ 'dependencies': ['test_importlib'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'LinkIncremental': '2',
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/importlib/noimplib.gyp b/third_party/python/gyp/test/win/importlib/noimplib.gyp
new file mode 100644
index 0000000000..0245058a99
--- /dev/null
+++ b/third_party/python/gyp/test/win/importlib/noimplib.gyp
@@ -0,0 +1,16 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'no_import_library',
+ 'type': 'loadable_module',
+ 'msvs_settings': {
+ 'NoImportLibrary': 'true',
+ },
+ 'sources': ['dll_no_exports.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/large-pdb/dllmain.cc b/third_party/python/gyp/test/win/large-pdb/dllmain.cc
new file mode 100644
index 0000000000..14875623e8
--- /dev/null
+++ b/third_party/python/gyp/test/win/large-pdb/dllmain.cc
@@ -0,0 +1,9 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+
+BOOL WINAPI DllMain(HINSTANCE hinstance, DWORD reason, LPVOID reserved) {
+ return TRUE;
+}
diff --git a/third_party/python/gyp/test/win/large-pdb/large-pdb.gyp b/third_party/python/gyp/test/win/large-pdb/large-pdb.gyp
new file mode 100644
index 0000000000..2a241a5623
--- /dev/null
+++ b/third_party/python/gyp/test/win/large-pdb/large-pdb.gyp
@@ -0,0 +1,98 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'large_pdb_exe',
+ 'type': 'executable',
+ 'msvs_large_pdb': 1,
+ 'sources': [
+ 'main.cc',
+ ],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'ProgramDatabaseFile': '<(PRODUCT_DIR)/large_pdb_exe.exe.pdb',
+ },
+ },
+ },
+ {
+ 'target_name': 'small_pdb_exe',
+ 'type': 'executable',
+ 'msvs_large_pdb': 0,
+ 'sources': [
+ 'main.cc',
+ ],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'ProgramDatabaseFile': '<(PRODUCT_DIR)/small_pdb_exe.exe.pdb',
+ },
+ },
+ },
+ {
+ 'target_name': 'large_pdb_dll',
+ 'type': 'shared_library',
+ 'msvs_large_pdb': 1,
+ 'sources': [
+ 'dllmain.cc',
+ ],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'ProgramDatabaseFile': '<(PRODUCT_DIR)/large_pdb_dll.dll.pdb',
+ },
+ },
+ },
+ {
+ 'target_name': 'small_pdb_dll',
+ 'type': 'shared_library',
+ 'msvs_large_pdb': 0,
+ 'sources': [
+ 'dllmain.cc',
+ ],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'ProgramDatabaseFile': '<(PRODUCT_DIR)/small_pdb_dll.dll.pdb',
+ },
+ },
+ },
+ {
+ 'target_name': 'large_pdb_implicit_exe',
+ 'type': 'executable',
+ 'msvs_large_pdb': 1,
+ 'sources': [
+ 'main.cc',
+ ],
+ # No PDB file is specified. However, the msvs_large_pdb mechanism should
+ # default to the appropriate <(PRODUCT_DIR)/<(TARGET_NAME).exe.pdb.
+ },
+ {
+ 'target_name': 'large_pdb_variable_exe',
+ 'type': 'executable',
+ 'msvs_large_pdb': 1,
+ 'sources': [
+ 'main.cc',
+ ],
+ # No PDB file is specified. However, the msvs_large_pdb_path variable
+ # explicitly sets one.
+ 'variables': {
+ 'msvs_large_pdb_path': '<(PRODUCT_DIR)/foo.pdb',
+ },
+ },
+ {
+ 'target_name': 'large_pdb_product_exe',
+ 'product_name': 'bar',
+ 'type': 'executable',
+ 'msvs_large_pdb': 1,
+ 'sources': [
+ 'main.cc',
+ ],
+ # No PDB file is specified. However, we've specified a product name so
+ # it should use <(PRODUCT_DIR)/bar.exe.pdb.
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/large-pdb/main.cc b/third_party/python/gyp/test/win/large-pdb/main.cc
new file mode 100644
index 0000000000..c3da8e9219
--- /dev/null
+++ b/third_party/python/gyp/test/win/large-pdb/main.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main(void) {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/lib-crosscompile/answer.cc b/third_party/python/gyp/test/win/lib-crosscompile/answer.cc
new file mode 100644
index 0000000000..a6ffa16862
--- /dev/null
+++ b/third_party/python/gyp/test/win/lib-crosscompile/answer.cc
@@ -0,0 +1,9 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "answer.h"
+
+int answer() {
+ return 42;
+}
diff --git a/third_party/python/gyp/test/win/lib-crosscompile/answer.h b/third_party/python/gyp/test/win/lib-crosscompile/answer.h
new file mode 100644
index 0000000000..82312d54b8
--- /dev/null
+++ b/third_party/python/gyp/test/win/lib-crosscompile/answer.h
@@ -0,0 +1,5 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int answer(); \ No newline at end of file
diff --git a/third_party/python/gyp/test/win/lib-crosscompile/use_host_ar.gyp b/third_party/python/gyp/test/win/lib-crosscompile/use_host_ar.gyp
new file mode 100644
index 0000000000..4747bc6445
--- /dev/null
+++ b/third_party/python/gyp/test/win/lib-crosscompile/use_host_ar.gyp
@@ -0,0 +1,17 @@
+# Copyright (c) 2015 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'lib_answer',
+ 'type': 'static_library',
+ 'toolsets': ['host'],
+ 'msvs_settings': {
+ 'msvs_cygwin_shell': 0,
+ },
+ 'sources': ['answer.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/lib-flags/answer.cc b/third_party/python/gyp/test/win/lib-flags/answer.cc
new file mode 100644
index 0000000000..a6ffa16862
--- /dev/null
+++ b/third_party/python/gyp/test/win/lib-flags/answer.cc
@@ -0,0 +1,9 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "answer.h"
+
+int answer() {
+ return 42;
+}
diff --git a/third_party/python/gyp/test/win/lib-flags/answer.h b/third_party/python/gyp/test/win/lib-flags/answer.h
new file mode 100644
index 0000000000..82312d54b8
--- /dev/null
+++ b/third_party/python/gyp/test/win/lib-flags/answer.h
@@ -0,0 +1,5 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int answer(); \ No newline at end of file
diff --git a/third_party/python/gyp/test/win/lib-flags/ltcg.gyp b/third_party/python/gyp/test/win/lib-flags/ltcg.gyp
new file mode 100644
index 0000000000..c183107730
--- /dev/null
+++ b/third_party/python/gyp/test/win/lib-flags/ltcg.gyp
@@ -0,0 +1,21 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'lib_answer',
+ 'type': 'static_library',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WholeProgramOptimization': 'true', # /GL
+ },
+ 'VCLibrarianTool': {
+ 'LinkTimeCodeGeneration': 'true', # /LTCG
+ },
+ },
+ 'sources': ['answer.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/a/x.cc b/third_party/python/gyp/test/win/linker-flags/a/x.cc
new file mode 100644
index 0000000000..f5f763b095
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/a/x.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int x() {
+ return 1;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/a/z.cc b/third_party/python/gyp/test/win/linker-flags/a/z.cc
new file mode 100644
index 0000000000..8a43501270
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/a/z.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int z() {
+ return 3;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/additional-deps.cc b/third_party/python/gyp/test/win/linker-flags/additional-deps.cc
new file mode 100644
index 0000000000..7dfb589d26
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/additional-deps.cc
@@ -0,0 +1,10 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <winsock2.h>
+
+int main() {
+ WSAStartup(0, 0);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/additional-deps.gyp b/third_party/python/gyp/test/win/linker-flags/additional-deps.gyp
new file mode 100644
index 0000000000..55afe64fb0
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/additional-deps.gyp
@@ -0,0 +1,30 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_deps_none',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_deps_few',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'AdditionalDependencies': [
+ 'wininet.lib',
+ 'ws2_32.lib',
+ ]
+ }
+ },
+ 'sources': ['additional-deps.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/additional-options.gyp b/third_party/python/gyp/test/win/linker-flags/additional-options.gyp
new file mode 100644
index 0000000000..cab3994cd1
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/additional-options.gyp
@@ -0,0 +1,29 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_additional_none',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_additional_few',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'AdditionalOptions': [
+ '/dynamicbase:no',
+ ]
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/aslr.gyp b/third_party/python/gyp/test/win/linker-flags/aslr.gyp
new file mode 100644
index 0000000000..b3aefd50b7
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/aslr.gyp
@@ -0,0 +1,35 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_aslr_default',
+ 'type': 'executable',
+ 'msvs_settings': {
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_aslr_no',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'RandomizedBaseAddress': '1',
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_aslr_yes',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'RandomizedBaseAddress': '2',
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/b/y.cc b/third_party/python/gyp/test/win/linker-flags/b/y.cc
new file mode 100644
index 0000000000..bd884119fc
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/b/y.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int y() {
+ return 2;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/base-address.gyp b/third_party/python/gyp/test/win/linker-flags/base-address.gyp
new file mode 100644
index 0000000000..873ebfea3f
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/base-address.gyp
@@ -0,0 +1,38 @@
+# Copyright 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_base_specified_exe',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'BaseAddress': '0x00420000',
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_base_specified_dll',
+ 'type': 'shared_library',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'BaseAddress': '0x10420000',
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_base_default_exe',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_base_default_dll',
+ 'type': 'shared_library',
+ 'sources': ['hello.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/debug-info.gyp b/third_party/python/gyp/test/win/linker-flags/debug-info.gyp
new file mode 100644
index 0000000000..d47d0ecced
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/debug-info.gyp
@@ -0,0 +1,28 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_debug_off',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'false'
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_debug_on',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true'
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/deffile-multiple.gyp b/third_party/python/gyp/test/win/linker-flags/deffile-multiple.gyp
new file mode 100644
index 0000000000..c74a9af20a
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/deffile-multiple.gyp
@@ -0,0 +1,17 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_deffile_multiple_fail',
+ 'type': 'shared_library',
+ 'sources': [
+ 'deffile.cc',
+ 'deffile.def',
+ 'deffile2.def',
+ ],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/deffile.cc b/third_party/python/gyp/test/win/linker-flags/deffile.cc
new file mode 100644
index 0000000000..fa203b34c8
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/deffile.cc
@@ -0,0 +1,13 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+void AnExportedFunction() {
+}
+
+void AnotherExportedFunction() {
+}
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/deffile.def b/third_party/python/gyp/test/win/linker-flags/deffile.def
new file mode 100644
index 0000000000..ba9d399bd6
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/deffile.def
@@ -0,0 +1,8 @@
+; Copyright (c) 2012 Google Inc. All rights reserved.
+; Use of this source code is governed by a BSD-style license that can be
+; found in the LICENSE file.
+
+LIBRARY test_deffile_ok
+
+EXPORTS
+ AnExportedFunction
diff --git a/third_party/python/gyp/test/win/linker-flags/deffile.gyp b/third_party/python/gyp/test/win/linker-flags/deffile.gyp
new file mode 100644
index 0000000000..7b241d5e3a
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/deffile.gyp
@@ -0,0 +1,38 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_deffile_dll_ok',
+ 'type': 'shared_library',
+ 'sources': [
+ 'deffile.cc',
+ 'deffile.def',
+ ],
+ },
+ {
+ 'target_name': 'test_deffile_dll_notexported',
+ 'type': 'shared_library',
+ 'sources': [
+ 'deffile.cc',
+ ],
+ },
+ {
+ 'target_name': 'test_deffile_exe_ok',
+ 'type': 'executable',
+ 'sources': [
+ 'deffile.cc',
+ 'deffile.def',
+ ],
+ },
+ {
+ 'target_name': 'test_deffile_exe_notexported',
+ 'type': 'executable',
+ 'sources': [
+ 'deffile.cc',
+ ],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/delay-load-dlls.gyp b/third_party/python/gyp/test/win/linker-flags/delay-load-dlls.gyp
new file mode 100644
index 0000000000..671cbaa802
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/delay-load-dlls.gyp
@@ -0,0 +1,35 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_dld_none',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ }
+ },
+ 'sources': ['delay-load.cc'],
+ 'libraries': [
+ 'delayimp.lib',
+ 'shell32.lib',
+ ],
+ },
+ {
+ 'target_name': 'test_dld_shell32',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'DelayLoadDLLs': ['shell32.dll']
+ }
+ },
+ 'sources': ['delay-load.cc'],
+ 'libraries': [
+ 'delayimp.lib',
+ 'shell32.lib',
+ ],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/delay-load.cc b/third_party/python/gyp/test/win/linker-flags/delay-load.cc
new file mode 100644
index 0000000000..2be34aa876
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/delay-load.cc
@@ -0,0 +1,10 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <shlobj.h>
+
+int main() {
+ SHCreateDirectory(0, 0);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/embed-manifest.gyp b/third_party/python/gyp/test/win/linker-flags/embed-manifest.gyp
new file mode 100644
index 0000000000..fefb2f56d8
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/embed-manifest.gyp
@@ -0,0 +1,109 @@
+# Copyright (c) 2013 Yandex LLC. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_manifest_exe',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'LinkIncremental': '1',
+ },
+ 'VCManifestTool': {
+ 'EmbedManifest': 'true',
+ }
+ },
+ },
+ {
+ 'target_name': 'test_manifest_dll',
+ 'type': 'loadable_module',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'LinkIncremental': '1',
+ },
+ 'VCManifestTool': {
+ 'EmbedManifest': 'true',
+ }
+ },
+ },
+ {
+ 'target_name': 'test_manifest_extra1',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCManifestTool': {
+ 'EmbedManifest': 'true',
+ 'AdditionalManifestFiles': 'extra.manifest',
+ }
+ },
+ },
+ {
+ 'target_name': 'test_manifest_extra2',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCManifestTool': {
+ 'EmbedManifest': 'true',
+ 'AdditionalManifestFiles': 'extra.manifest;extra2.manifest',
+ }
+ },
+ },
+ {
+ 'target_name': 'test_manifest_extra_list',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCManifestTool': {
+ 'EmbedManifest': 'true',
+ 'AdditionalManifestFiles': [
+ 'extra.manifest',
+ 'extra2.manifest'
+ ],
+ }
+ },
+ },
+ {
+ 'target_name': 'test_manifest_dll_inc',
+ 'type': 'loadable_module',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'LinkIncremental': '2',
+ },
+ 'VCManifestTool': {
+ 'EmbedManifest': 'true',
+ }
+ },
+ },
+ {
+ 'target_name': 'test_manifest_exe_inc',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'LinkIncremental': '2',
+ },
+ 'VCManifestTool': {
+ 'EmbedManifest': 'true',
+ }
+ },
+ },
+ {
+ 'target_name': 'test_manifest_exe_inc_no_embed',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'LinkIncremental': '2',
+ },
+ 'VCManifestTool': {
+ 'EmbedManifest': 'false',
+ }
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/enable-uac.gyp b/third_party/python/gyp/test/win/linker-flags/enable-uac.gyp
new file mode 100644
index 0000000000..4e58c86ec8
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/enable-uac.gyp
@@ -0,0 +1,45 @@
+# Copyright 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'enable_uac',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCManifestTool': {
+ 'EmbedManifest': 'true',
+ }
+ },
+ },
+ {
+ 'target_name': 'enable_uac_no',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'EnableUAC': 'false',
+ },
+ 'VCManifestTool': {
+ 'EmbedManifest': 'true',
+ }
+ },
+ },
+ {
+ 'target_name': 'enable_uac_admin',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'UACExecutionLevel': 2,
+ 'UACUIAccess': 'true',
+ },
+ 'VCManifestTool': {
+ 'EmbedManifest': 'true',
+ }
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/entrypointsymbol.cc b/third_party/python/gyp/test/win/linker-flags/entrypointsymbol.cc
new file mode 100644
index 0000000000..b567bc87b3
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/entrypointsymbol.cc
@@ -0,0 +1,13 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The entry point specified by link.exe /ENTRY option.
+extern "C" void MainEntryPoint() {
+}
+
+// Still needed because the linker checks for existence of one of main, wmain,
+// WinMain, or wMain to offer informative diagnositics.
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/entrypointsymbol.gyp b/third_party/python/gyp/test/win/linker-flags/entrypointsymbol.gyp
new file mode 100644
index 0000000000..7f2c14252d
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/entrypointsymbol.gyp
@@ -0,0 +1,28 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_ok',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'EntryPointSymbol': 'MainEntryPoint',
+ }
+ },
+ 'sources': ['entrypointsymbol.cc'],
+ },
+ {
+ 'target_name': 'test_fail',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'EntryPointSymbol': 'MainEntryPoint',
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/extra.manifest b/third_party/python/gyp/test/win/linker-flags/extra.manifest
new file mode 100644
index 0000000000..2e436dc251
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/extra.manifest
@@ -0,0 +1,11 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
+
+ <compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
+ <application>
+ <!--This Id value indicates the application supports Windows 7 functionality-->
+ <supportedOS Id="{35138b9a-5d96-4fbd-8e2d-a2440225f93a}"/>
+ </application>
+ </compatibility>
+
+</assembly>
diff --git a/third_party/python/gyp/test/win/linker-flags/extra2.manifest b/third_party/python/gyp/test/win/linker-flags/extra2.manifest
new file mode 100644
index 0000000000..bfb570ca59
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/extra2.manifest
@@ -0,0 +1,11 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
+
+ <compatibility xmlns="urn:schemas-microsoft-com:compatibility.v1">
+ <application>
+ <!--This Id value indicates the application supports Windows Vista functionality -->
+ <supportedOS Id="{e2011457-1546-43c5-a5fe-008deee3d3f0}"/>
+ </application>
+ </compatibility>
+
+</assembly>
diff --git a/third_party/python/gyp/test/win/linker-flags/fixed-base.gyp b/third_party/python/gyp/test/win/linker-flags/fixed-base.gyp
new file mode 100644
index 0000000000..cc2982eb27
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/fixed-base.gyp
@@ -0,0 +1,52 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ # Disable DYNAMICBASE for these tests because it implies/doesn't imply
+ # FIXED in certain cases so it complicates the test for FIXED.
+ {
+ 'target_name': 'test_fixed_default_exe',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'RandomizedBaseAddress': '1',
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_fixed_default_dll',
+ 'type': 'shared_library',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'RandomizedBaseAddress': '1',
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_fixed_no',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'FixedBaseAddress': '1',
+ 'RandomizedBaseAddress': '1',
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_fixed_yes',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'FixedBaseAddress': '2',
+ 'RandomizedBaseAddress': '1',
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/force-symbol-reference.gyp b/third_party/python/gyp/test/win/linker-flags/force-symbol-reference.gyp
new file mode 100644
index 0000000000..d6d02a6848
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/force-symbol-reference.gyp
@@ -0,0 +1,39 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_force_reference_lib',
+ 'type': 'static_library',
+ 'sources': ['x.cc', 'y.cc'],
+ },
+ {
+ 'target_name': 'test_force_reference',
+ 'type': 'executable',
+ # Turn on debug info to get symbols in disasm for the test code, and
+ # turn on opt:ref to drop unused symbols to make sure we wouldn't
+ # otherwise have the symbols.
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'AdditionalOptions': [
+ '/OPT:REF',
+ ],
+ 'ForceSymbolReferences': [
+ '?x@@YAHXZ',
+ '?y@@YAHXZ',
+ ],
+ },
+ },
+ 'sources': ['hello.cc'],
+ 'dependencies': [
+ 'test_force_reference_lib',
+ ],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/generate-manifest.gyp b/third_party/python/gyp/test/win/linker-flags/generate-manifest.gyp
new file mode 100644
index 0000000000..34a68d1a48
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/generate-manifest.gyp
@@ -0,0 +1,166 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_generate_manifest_true',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'EnableUAC': 'true',
+ 'GenerateManifest': 'true',
+ },
+ 'VCManifestTool': {
+ 'EmbedManifest': 'false',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_generate_manifest_false',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'EnableUAC': 'true',
+ 'GenerateManifest': 'false',
+ },
+ 'VCManifestTool': {
+ 'EmbedManifest': 'false',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_generate_manifest_default',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'EnableUAC': 'true',
+ },
+ 'VCManifestTool': {
+ 'EmbedManifest': 'false',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_generate_manifest_true_as_embedded',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'EnableUAC': 'true',
+ 'GenerateManifest': 'true',
+ },
+ 'VCManifestTool': {
+ 'EmbedManifest': 'true',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_generate_manifest_false_as_embedded',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'EnableUAC': 'true',
+ 'GenerateManifest': 'false',
+ },
+ 'VCManifestTool': {
+ 'EmbedManifest': 'true',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_generate_manifest_default_as_embedded',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'EnableUAC': 'true',
+ },
+ 'VCManifestTool': {
+ 'EmbedManifest': 'true',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_generate_manifest_true_with_extra_manifest',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'EnableUAC': 'true',
+ 'GenerateManifest': 'true',
+ },
+ 'VCManifestTool': {
+ 'EmbedManifest': 'false',
+ 'AdditionalManifestFiles': 'extra.manifest;extra2.manifest',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_generate_manifest_false_with_extra_manifest',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'EnableUAC': 'true',
+ 'GenerateManifest': 'false',
+ },
+ 'VCManifestTool': {
+ 'EmbedManifest': 'false',
+ 'AdditionalManifestFiles': 'extra.manifest;extra2.manifest',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_generate_manifest_true_with_extra_manifest_list',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'EnableUAC': 'true',
+ 'GenerateManifest': 'true',
+ },
+ 'VCManifestTool': {
+ 'EmbedManifest': 'false',
+ 'AdditionalManifestFiles': [
+ 'extra.manifest',
+ 'extra2.manifest',
+ ],
+ },
+ },
+ },
+ {
+ 'target_name': 'test_generate_manifest_false_with_extra_manifest_list',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'EnableUAC': 'true',
+ 'GenerateManifest': 'false',
+ },
+ 'VCManifestTool': {
+ 'EmbedManifest': 'false',
+ 'AdditionalManifestFiles': [
+ 'extra.manifest',
+ 'extra2.manifest',
+ ],
+ },
+ },
+ },
+ {
+ 'target_name': 'test_generate_manifest_default_embed_default',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'EnableUAC': 'true',
+ },
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/hello.cc b/third_party/python/gyp/test/win/linker-flags/hello.cc
new file mode 100644
index 0000000000..1711567ef5
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/hello.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/incremental.gyp b/third_party/python/gyp/test/win/linker-flags/incremental.gyp
new file mode 100644
index 0000000000..59f3103253
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/incremental.gyp
@@ -0,0 +1,65 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ # Turn on debug information so the incremental linking tables have a
+ # visible symbolic name in the disassembly.
+ {
+ 'target_name': 'test_incremental_unset',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_incremental_default',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'LinkIncremental': '0',
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_incremental_no',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'LinkIncremental': '1',
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_incremental_yes',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'LinkIncremental': '2',
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/inline_test.cc b/third_party/python/gyp/test/win/linker-flags/inline_test.cc
new file mode 100644
index 0000000000..a9f177e476
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/inline_test.cc
@@ -0,0 +1,12 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "inline_test.h"
+
+#include <intrin.h>
+#pragma intrinsic(_ReturnAddress)
+
+bool IsFunctionInlined(void* caller_return_address) {
+ return _ReturnAddress() == caller_return_address;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/inline_test.h b/third_party/python/gyp/test/win/linker-flags/inline_test.h
new file mode 100644
index 0000000000..117913c4f5
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/inline_test.h
@@ -0,0 +1,5 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+bool IsFunctionInlined(void* current_return_address);
diff --git a/third_party/python/gyp/test/win/linker-flags/inline_test_main.cc b/third_party/python/gyp/test/win/linker-flags/inline_test_main.cc
new file mode 100644
index 0000000000..23cafe8f94
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/inline_test_main.cc
@@ -0,0 +1,15 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "inline_test.h"
+
+#include <intrin.h>
+#include <stdio.h>
+
+#pragma intrinsic(_ReturnAddress)
+
+int main() {
+ if (IsFunctionInlined(_ReturnAddress()))
+ puts("==== inlined ====\n");
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/large-address-aware.gyp b/third_party/python/gyp/test/win/linker-flags/large-address-aware.gyp
new file mode 100644
index 0000000000..fa56d3789c
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/large-address-aware.gyp
@@ -0,0 +1,28 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_large_address_aware_no',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'LargeAddressAware': '1',
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_large_address_aware_yes',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'LargeAddressAware': '2',
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/library-adjust.cc b/third_party/python/gyp/test/win/linker-flags/library-adjust.cc
new file mode 100644
index 0000000000..7dfb589d26
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/library-adjust.cc
@@ -0,0 +1,10 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <winsock2.h>
+
+int main() {
+ WSAStartup(0, 0);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/library-adjust.gyp b/third_party/python/gyp/test/win/linker-flags/library-adjust.gyp
new file mode 100644
index 0000000000..10e9996f5c
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/library-adjust.gyp
@@ -0,0 +1,16 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_adjust',
+ 'type': 'executable',
+ 'libraries': [
+ '-lws2_32.lib'
+ ],
+ 'sources': ['library-adjust.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/library-directories-define.cc b/third_party/python/gyp/test/win/linker-flags/library-directories-define.cc
new file mode 100644
index 0000000000..211ef062c1
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/library-directories-define.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int library_function() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/library-directories-reference.cc b/third_party/python/gyp/test/win/linker-flags/library-directories-reference.cc
new file mode 100644
index 0000000000..335097839a
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/library-directories-reference.cc
@@ -0,0 +1,10 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+extern int library_function();
+
+int main() {
+ library_function();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/library-directories.gyp b/third_party/python/gyp/test/win/linker-flags/library-directories.gyp
new file mode 100644
index 0000000000..25395d6c87
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/library-directories.gyp
@@ -0,0 +1,42 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_libdirs_none',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'AdditionalDependencies': [
+ 'test_lib.lib',
+ ],
+ },
+ },
+ 'sources': ['library-directories-reference.cc'],
+ },
+ {
+ 'target_name': 'test_libdirs_with',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ # NOTE: Don't use this for general dependencies between gyp
+ # libraries (use 'dependencies' instead). This is done here only for
+ # testing.
+ #
+ # This setting should only be used to depend on third party prebuilt
+ # libraries that are stored as binaries at a known location.
+ 'AdditionalLibraryDirectories': [
+ '<(DEPTH)/out/Default/obj/subdir', # ninja style
+ '<(DEPTH)/subdir/Default/lib', # msvs style
+ ],
+ 'AdditionalDependencies': [
+ 'test_lib.lib',
+ ],
+ },
+ },
+ 'sources': ['library-directories-reference.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/link-ordering.gyp b/third_party/python/gyp/test/win/linker-flags/link-ordering.gyp
new file mode 100644
index 0000000000..66f44309d1
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/link-ordering.gyp
@@ -0,0 +1,95 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_ordering_exe',
+ 'type': 'executable',
+ # These are so the names of the functions appear in the disassembly.
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3',
+ 'Optimization': '2',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'LinkIncremental': '1',
+ 'GenerateManifest': 'false',
+ # Minimize the disassembly to just our code.
+ 'AdditionalOptions': [
+ '/NODEFAULTLIB',
+ ],
+ },
+ },
+ 'sources': [
+ # Explicitly sorted the same way as the disassembly in the test .py.
+ 'main-crt.c',
+ 'z.cc',
+ 'x.cc',
+ 'y.cc',
+ 'hello.cc',
+ ],
+ },
+
+ {
+ 'target_name': 'test_ordering_subdirs',
+ 'type': 'executable',
+ # These are so the names of the functions appear in the disassembly.
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3',
+ 'Optimization': '2',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'LinkIncremental': '1',
+ 'GenerateManifest': 'false',
+ # Minimize the disassembly to just our code.
+ 'AdditionalOptions': [
+ '/NODEFAULTLIB',
+ ],
+ },
+ },
+ 'sources': [
+ # Explicitly sorted the same way as the disassembly in the test .py.
+ 'main-crt.c',
+ 'hello.cc',
+ 'b/y.cc',
+ 'a/z.cc',
+ ],
+ },
+
+
+ {
+ 'target_name': 'test_ordering_subdirs_mixed',
+ 'type': 'executable',
+ # These are so the names of the functions appear in the disassembly.
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3',
+ 'Optimization': '2',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'LinkIncremental': '1',
+ 'GenerateManifest': 'false',
+ # Minimize the disassembly to just our code.
+ 'AdditionalOptions': [
+ '/NODEFAULTLIB',
+ ],
+ },
+ },
+ 'sources': [
+ # Explicitly sorted the same way as the disassembly in the test .py.
+ 'main-crt.c',
+ 'a/x.cc',
+ 'hello.cc',
+ 'a/z.cc',
+ 'y.cc',
+ ],
+ },
+
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/link-warning.cc b/third_party/python/gyp/test/win/linker-flags/link-warning.cc
new file mode 100644
index 0000000000..4b34277ba3
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/link-warning.cc
@@ -0,0 +1,10 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This will cause LNK4254.
+#pragma comment(linker, "/merge:.data=.text")
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/ltcg.gyp b/third_party/python/gyp/test/win/linker-flags/ltcg.gyp
new file mode 100644
index 0000000000..ddb0d9b4e2
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/ltcg.gyp
@@ -0,0 +1,42 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_ltcg_off',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WholeProgramOptimization': 'false',
+ },
+ 'VCLinkerTool': {
+ 'LinkTimeCodeGeneration': '0',
+ },
+ },
+ 'sources': [
+ 'inline_test.h',
+ 'inline_test.cc',
+ 'inline_test_main.cc',
+ ],
+ },
+ {
+ 'target_name': 'test_ltcg_on',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WholeProgramOptimization': 'true', # /GL
+ },
+ 'VCLinkerTool': {
+ 'LinkTimeCodeGeneration': '1', # /LTCG
+ },
+ },
+ 'sources': [
+ 'inline_test.h',
+ 'inline_test.cc',
+ 'inline_test_main.cc',
+ ],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/main-crt.c b/third_party/python/gyp/test/win/linker-flags/main-crt.c
new file mode 100644
index 0000000000..bdc80c54fd
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/main-crt.c
@@ -0,0 +1,8 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Stub so we can link with /NODEFAULTLIB when checking disasm.
+int mainCRTStartup() {
+ return 5;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/manifest-in-comment.cc b/third_party/python/gyp/test/win/linker-flags/manifest-in-comment.cc
new file mode 100644
index 0000000000..ae54ae5462
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/manifest-in-comment.cc
@@ -0,0 +1,13 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#pragma comment(linker, \
+ "\"/manifestdependency:type='Win32' " \
+ "name='Test.Research.SampleAssembly' version='6.0.0.0' " \
+ "processorArchitecture='X86' " \
+ "publicKeyToken='0000000000000000' language='*'\"")
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/mapfile.cc b/third_party/python/gyp/test/win/linker-flags/mapfile.cc
new file mode 100644
index 0000000000..cebccb264a
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/mapfile.cc
@@ -0,0 +1,12 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+__declspec(dllexport)
+void AnExportedFunction() {
+ // We need an exported function to verify that /MAPINFO:EXPORTS works.
+}
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/mapfile.gyp b/third_party/python/gyp/test/win/linker-flags/mapfile.gyp
new file mode 100644
index 0000000000..14206fe28d
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/mapfile.gyp
@@ -0,0 +1,45 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_mapfile_unset',
+ 'type': 'executable',
+ 'sources': ['mapfile.cc'],
+ },
+ {
+ 'target_name': 'test_mapfile_generate',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'GenerateMapFile': 'true',
+ },
+ },
+ 'sources': ['mapfile.cc'],
+ },
+ {
+ 'target_name': 'test_mapfile_generate_exports',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'GenerateMapFile': 'true',
+ 'MapExports': 'true',
+ },
+ },
+ 'sources': ['mapfile.cc'],
+ },
+ {
+ 'target_name': 'test_mapfile_generate_filename',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'GenerateMapFile': 'true',
+ 'MapFileName': '<(PRODUCT_DIR)/custom_file_name.map',
+ },
+ },
+ 'sources': ['mapfile.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/no-default-libs.cc b/third_party/python/gyp/test/win/linker-flags/no-default-libs.cc
new file mode 100644
index 0000000000..e306846987
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/no-default-libs.cc
@@ -0,0 +1,18 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Reference something in kernel32.dll. This will fail to link, verifying that
+// GYP provides no default import library configuration.
+// Note that we don't include Windows.h, as that will result in generating
+// linker directives in the object file through #pragma comment(lib, ...).
+typedef short BOOL;
+
+extern "C" __declspec(dllimport)
+BOOL CopyFileW(const wchar_t*, const wchar_t*, BOOL);
+
+
+int main() {
+ CopyFileW(0, 0, 0); // kernel32
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/no-default-libs.gyp b/third_party/python/gyp/test/win/linker-flags/no-default-libs.gyp
new file mode 100644
index 0000000000..77838ce8c4
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/no-default-libs.gyp
@@ -0,0 +1,13 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_default',
+ 'type': 'executable',
+ 'sources': ['no-default-libs.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/nodefaultlib.cc b/third_party/python/gyp/test/win/linker-flags/nodefaultlib.cc
new file mode 100644
index 0000000000..24b6eca438
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/nodefaultlib.cc
@@ -0,0 +1,13 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Include entry point function that's excluded by removing C runtime libraries.
+extern "C" void mainCRTStartup() {
+}
+
+// Still needed because the linker checks for existence of one of main, wmain,
+// WinMain, or wMain to offer informative diagnositics.
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/nodefaultlib.gyp b/third_party/python/gyp/test/win/linker-flags/nodefaultlib.gyp
new file mode 100644
index 0000000000..4fb452a18b
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/nodefaultlib.gyp
@@ -0,0 +1,30 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_ok',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'IgnoreDefaultLibraryNames':
+ ['libcmtd.lib', 'libcmt.lib', 'msvcrt.lib', 'msvcrtd.lib'],
+ }
+ },
+ 'sources': ['nodefaultlib.cc'],
+ },
+ {
+ 'target_name': 'test_fail',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'IgnoreDefaultLibraryNames':
+ ['libcmtd.lib', 'libcmt.lib', 'msvcrt.lib', 'msvcrtd.lib'],
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/nxcompat.gyp b/third_party/python/gyp/test/win/linker-flags/nxcompat.gyp
new file mode 100644
index 0000000000..fa4118cbd7
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/nxcompat.gyp
@@ -0,0 +1,35 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_nxcompat_default',
+ 'type': 'executable',
+ 'msvs_settings': {
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_nxcompat_no',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'DataExecutionPrevention': '1',
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_nxcompat_yes',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'DataExecutionPrevention': '2',
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/opt-icf.cc b/third_party/python/gyp/test/win/linker-flags/opt-icf.cc
new file mode 100644
index 0000000000..1f12156b7f
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/opt-icf.cc
@@ -0,0 +1,29 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+void similar_function0(char* x) {
+ while (*x) {
+ ++x;
+ }
+}
+
+void similar_function1(char* p) {
+ while (*p) {
+ ++p;
+ }
+}
+
+void similar_function2(char* q) {
+ while (*q) {
+ ++q;
+ }
+}
+
+int main() {
+ char* x = "hello";
+ similar_function0(x);
+ similar_function1(x);
+ similar_function2(x);
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/opt-icf.gyp b/third_party/python/gyp/test/win/linker-flags/opt-icf.gyp
new file mode 100644
index 0000000000..effe8021c3
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/opt-icf.gyp
@@ -0,0 +1,63 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ # Have to turn on function level linking here to get the function packaged
+ # as a COMDAT so that it's eligible for merging. Also turn on debug
+ # information so that the symbol names for the code appear in the dump.
+ # Finally, specify non-incremental linking so that there's not a bunch of
+ # extra "similar_function"s in the output (the ILT jump table).
+ {
+ 'target_name': 'test_opticf_default',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'EnableFunctionLevelLinking': 'true',
+ 'DebugInformationFormat': '3',
+ 'Optimization': '0',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'LinkIncremental': '1',
+ },
+ },
+ 'sources': ['opt-icf.cc'],
+ },
+ {
+ 'target_name': 'test_opticf_no',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'EnableFunctionLevelLinking': 'true',
+ 'DebugInformationFormat': '3',
+ 'Optimization': '0',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'EnableCOMDATFolding': '1',
+ 'LinkIncremental': '1',
+ },
+ },
+ 'sources': ['opt-icf.cc'],
+ },
+ {
+ 'target_name': 'test_opticf_yes',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'EnableFunctionLevelLinking': 'true',
+ 'DebugInformationFormat': '3',
+ 'Optimization': '0',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'EnableCOMDATFolding': '2',
+ 'LinkIncremental': '1',
+ },
+ },
+ 'sources': ['opt-icf.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/opt-ref.cc b/third_party/python/gyp/test/win/linker-flags/opt-ref.cc
new file mode 100644
index 0000000000..afaa328a5d
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/opt-ref.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int unused_function() {
+ return 0;
+}
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/opt-ref.gyp b/third_party/python/gyp/test/win/linker-flags/opt-ref.gyp
new file mode 100644
index 0000000000..69d0281a08
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/opt-ref.gyp
@@ -0,0 +1,56 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ # Have to turn on function level linking here to get the function packaged
+ # as a COMDAT so that it's eligible for optimizing away. Also turn on
+ # debug information so that the symbol names for the code appear in the
+ # dump (so we can verify if they are included in the final exe).
+ {
+ 'target_name': 'test_optref_default',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'EnableFunctionLevelLinking': 'true',
+ 'DebugInformationFormat': '3',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ },
+ },
+ 'sources': ['opt-ref.cc'],
+ },
+ {
+ 'target_name': 'test_optref_no',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'EnableFunctionLevelLinking': 'true',
+ 'DebugInformationFormat': '3',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'OptimizeReferences': '1',
+ },
+ },
+ 'sources': ['opt-ref.cc'],
+ },
+ {
+ 'target_name': 'test_optref_yes',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'EnableFunctionLevelLinking': 'true',
+ 'DebugInformationFormat': '3',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'OptimizeReferences': '2',
+ },
+ },
+ 'sources': ['opt-ref.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/outputfile.gyp b/third_party/python/gyp/test/win/linker-flags/outputfile.gyp
new file mode 100644
index 0000000000..1022ec2e20
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/outputfile.gyp
@@ -0,0 +1,58 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_output_exe',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(OutDir)\\blorp.exe'
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_output_exe2',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(OutDir)\\subdir\\blorp.exe'
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_output_dll',
+ 'type': 'shared_library',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(OutDir)\\blorp.dll'
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_output_lib',
+ 'type': 'static_library',
+ 'msvs_settings': {
+ 'VCLibrarianTool': {
+ 'OutputFile': '$(OutDir)\\blorp.lib'
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_output_lib2',
+ 'type': 'static_library',
+ 'msvs_settings': {
+ 'VCLibrarianTool': {
+ 'OutputFile': '$(OutDir)\\subdir\\blorp.lib'
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/pdb-output.gyp b/third_party/python/gyp/test/win/linker-flags/pdb-output.gyp
new file mode 100644
index 0000000000..1a03c67cc0
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/pdb-output.gyp
@@ -0,0 +1,49 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_pdb_output_exe',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3'
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'ProgramDatabaseFile': 'output_exe.pdb',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_pdb_output_dll',
+ 'type': 'shared_library',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3'
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'ProgramDatabaseFile': 'output_dll.pdb',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_pdb_output_disabled',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '0'
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'false',
+ },
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/pgo.gyp b/third_party/python/gyp/test/win/linker-flags/pgo.gyp
new file mode 100644
index 0000000000..da32639973
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/pgo.gyp
@@ -0,0 +1,143 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'pgd_basename': 'test_pgo',
+ },
+ 'targets': [
+ # In the PGO (Profile-Guided Optimization) build flow, we need to build the
+ # target binary multiple times. To implement this flow with gyp, here we
+ # define multiple 'executable' targets, each of which represents one build
+ # particular build/profile stage. On tricky part to do this is that these
+ # 'executable' targets should share the code itself so that profile data
+ # can be reused among these 'executable' files. In other words, the only
+ # differences among below 'executable' targets are:
+ # 1) PGO (Profile-Guided Optimization) database, and
+ # 2) linker options.
+ # The following static library contains all the logic including entry point.
+ # Basically we don't need to rebuild this target once we enter profiling
+ # phase of PGO.
+ {
+ 'target_name': 'test_pgo_main',
+ 'type': 'static_library',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WholeProgramOptimization': 'true', # /GL
+ },
+ 'VCLibrarianTool': {
+ 'LinkTimeCodeGeneration': 'true',
+ },
+ },
+ 'link_settings': {
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'ProfileGuidedDatabase': '$(OutDir)\\<(pgd_basename).pgd',
+ 'TargetMachine': '1', # x86 - 32
+ 'SubSystem': '1', # /SUBSYSTEM:CONSOLE
+ # Tell ninja generator not to pass /ManifestFile:<filename> option
+ # to the linker, because it causes LNK1268 error in PGO biuld.
+ 'GenerateManifest': 'false',
+ # We need to specify 'libcmt.lib' here so that the linker can pick
+ # up a valid entry point.
+ 'AdditionalDependencies': [
+ 'libcmt.lib',
+ ],
+ },
+ },
+ },
+ 'sources': [
+ 'inline_test.h',
+ 'inline_test.cc',
+ 'inline_test_main.cc',
+ ],
+ },
+ {
+ 'target_name': 'test_pgo_instrument',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'LinkTimeCodeGeneration': '2',
+ },
+ },
+ 'dependencies': [
+ 'test_pgo_main',
+ ],
+ },
+ {
+ 'target_name': 'gen_profile_guided_database',
+ 'type': 'none',
+ 'msvs_cygwin_shell': 0,
+ 'actions': [
+ {
+ 'action_name': 'action_main',
+ 'inputs': [],
+ 'outputs': [
+ '$(OutDir)\\<(pgd_basename).pgd',
+ ],
+ 'action': [
+ 'python', 'update_pgd.py',
+ '--vcbindir', '$(VCInstallDir)bin',
+ '--exe', '$(OutDir)\\test_pgo_instrument.exe',
+ '--pgd', '$(OutDir)\\<(pgd_basename).pgd',
+ ],
+ },
+ ],
+ 'dependencies': [
+ 'test_pgo_instrument',
+ ],
+ },
+ {
+ 'target_name': 'test_pgo_optimize',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'LinkTimeCodeGeneration': '3',
+ },
+ },
+ 'sources': [
+ '$(OutDir)\\<(pgd_basename).pgd',
+ ],
+ 'dependencies': [
+ 'test_pgo_main',
+ 'gen_profile_guided_database',
+ ],
+ },
+ {
+ 'target_name': 'test_pgo_update',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'LinkTimeCodeGeneration': '4',
+ },
+ },
+ 'sources': [
+ '$(OutDir)\\<(pgd_basename).pgd',
+ ],
+ 'dependencies': [
+ 'test_pgo_main',
+ ],
+ },
+ # A helper target to dump link.exe's command line options. We can use the
+ # output to determine if PGO (Profile-Guided Optimization) is available on
+ # the test environment.
+ {
+ 'target_name': 'gen_linker_option',
+ 'type': 'none',
+ 'msvs_cygwin_shell': 0,
+ 'actions': [
+ {
+ 'action_name': 'action_main',
+ 'inputs': [],
+ 'outputs': [
+ '$(OutDir)\\linker_options.txt',
+ ],
+ 'action': [
+ 'cmd.exe', '/c link.exe > $(OutDir)\\linker_options.txt & exit 0',
+ ],
+ },
+ ],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/profile.gyp b/third_party/python/gyp/test/win/linker-flags/profile.gyp
new file mode 100644
index 0000000000..d60a700fbb
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/profile.gyp
@@ -0,0 +1,50 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ # Verify that 'Profile' option correctly makes it to LINK steup in Ninja
+ {
+ 'target_name': 'test_profile_true',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3'
+ },
+ 'VCLinkerTool': {
+ 'Profile': 'true',
+ 'GenerateDebugInformation': 'true',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_profile_false',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3'
+ },
+ 'VCLinkerTool': {
+ 'Profile': 'false',
+ 'GenerateDebugInformation': 'true',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_profile_default',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3'
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ },
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/program-database.gyp b/third_party/python/gyp/test/win/linker-flags/program-database.gyp
new file mode 100644
index 0000000000..6e60ac0dc9
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/program-database.gyp
@@ -0,0 +1,40 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ # Verify that 'ProgramDatabaseFile' option correctly makes it to LINK
+ # step in Ninja.
+ {
+ # Verify that VC macros and windows paths work correctly.
+ 'target_name': 'test_pdb_outdir',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3'
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'ProgramDatabaseFile': '$(OutDir)\\name_outdir.pdb',
+ },
+ },
+ },
+ {
+ # Verify that GYP macros and POSIX paths work correctly.
+ 'target_name': 'test_pdb_proddir',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3'
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ 'ProgramDatabaseFile': '<(PRODUCT_DIR)/name_proddir.pdb',
+ },
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/safeseh.gyp b/third_party/python/gyp/test/win/linker-flags/safeseh.gyp
new file mode 100644
index 0000000000..d4a62074b8
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/safeseh.gyp
@@ -0,0 +1,79 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'target_defaults': {
+ 'configurations': {
+ 'Default': {
+ 'msvs_configuration_platform': 'Win32',
+ },
+ 'Default_x64': {
+ 'inherit_from': ['Default'],
+ 'msvs_configuration_platform': 'x64',
+ },
+ },
+ },
+ 'targets': [
+ {
+ 'target_name': 'test_safeseh_default',
+ 'type': 'executable',
+ 'msvs_settings': {
+ # By default, msvs passes /SAFESEH for Link, but not for MASM. In
+ # order for test_safeseh_default to link successfully, we need to
+ # explicitly specify /SAFESEH for MASM.
+ 'MASM': {
+ 'UseSafeExceptionHandlers': 'true',
+ },
+ },
+ 'sources': [
+ 'safeseh_hello.cc',
+ 'safeseh_zero.asm',
+ ],
+ },
+ {
+ 'target_name': 'test_safeseh_no',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'ImageHasSafeExceptionHandlers': 'false',
+ },
+ },
+ 'sources': [
+ 'safeseh_hello.cc',
+ 'safeseh_zero.asm',
+ ],
+ },
+ {
+ 'target_name': 'test_safeseh_yes',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'ImageHasSafeExceptionHandlers': 'true',
+ },
+ 'MASM': {
+ 'UseSafeExceptionHandlers': 'true',
+ },
+ },
+ 'sources': [
+ 'safeseh_hello.cc',
+ 'safeseh_zero.asm',
+ ],
+ },
+ {
+ # x64 targets cannot have ImageHasSafeExceptionHandlers or
+ # UseSafeExceptionHandlers set.
+ 'target_name': 'test_safeseh_x64',
+ 'type': 'executable',
+ 'configurations': {
+ 'Default': {
+ 'msvs_target_platform': 'x64',
+ },
+ },
+ 'sources': [
+ 'safeseh_hello.cc',
+ 'safeseh_zero64.asm',
+ ],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/safeseh_hello.cc b/third_party/python/gyp/test/win/linker-flags/safeseh_hello.cc
new file mode 100644
index 0000000000..6141300d2c
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/safeseh_hello.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+extern "C" {
+int zero(void);
+}
+
+int main() {
+ return zero();
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/safeseh_zero.asm b/third_party/python/gyp/test/win/linker-flags/safeseh_zero.asm
new file mode 100644
index 0000000000..62da0df4f3
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/safeseh_zero.asm
@@ -0,0 +1,10 @@
+.MODEL FLAT, C
+.CODE
+
+PUBLIC zero
+zero PROC
+ xor eax, eax
+ ret 0
+zero ENDP
+
+END
diff --git a/third_party/python/gyp/test/win/linker-flags/safeseh_zero64.asm b/third_party/python/gyp/test/win/linker-flags/safeseh_zero64.asm
new file mode 100644
index 0000000000..a4740c0dfb
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/safeseh_zero64.asm
@@ -0,0 +1,9 @@
+.CODE
+
+PUBLIC zero
+zero PROC
+ xor eax, eax
+ ret 0
+zero ENDP
+
+END
diff --git a/third_party/python/gyp/test/win/linker-flags/stacksize.gyp b/third_party/python/gyp/test/win/linker-flags/stacksize.gyp
new file mode 100644
index 0000000000..bba44ca4a7
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/stacksize.gyp
@@ -0,0 +1,44 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_default',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_set_reserved_size',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'StackReserveSize': 2097152, # 2MB
+ }
+ },
+ },
+ {
+ 'target_name': 'test_set_commit_size',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'StackCommitSize': 8192, # 8KB
+ }
+ },
+ },
+ {
+ 'target_name': 'test_set_both',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'StackReserveSize': 2097152, # 2MB
+ 'StackCommitSize': 8192, # 8KB
+ }
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/subdir/library.gyp b/third_party/python/gyp/test/win/linker-flags/subdir/library.gyp
new file mode 100644
index 0000000000..519577f0d7
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/subdir/library.gyp
@@ -0,0 +1,13 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_lib',
+ 'type': 'static_library',
+ 'sources': ['../library-directories-define.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/subsystem-windows.cc b/third_party/python/gyp/test/win/linker-flags/subsystem-windows.cc
new file mode 100644
index 0000000000..ac99da808e
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/subsystem-windows.cc
@@ -0,0 +1,9 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+
+int CALLBACK WinMain(HINSTANCE, HINSTANCE, LPSTR, int) {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/subsystem.gyp b/third_party/python/gyp/test/win/linker-flags/subsystem.gyp
new file mode 100644
index 0000000000..63f072a206
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/subsystem.gyp
@@ -0,0 +1,70 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_console_ok',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'SubSystem': '1'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_console_fail',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'SubSystem': '1'
+ }
+ },
+ 'sources': ['subsystem-windows.cc'],
+ },
+ {
+ 'target_name': 'test_windows_ok',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'SubSystem': '2'
+ }
+ },
+ 'sources': ['subsystem-windows.cc'],
+ },
+ {
+ 'target_name': 'test_windows_fail',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'SubSystem': '2'
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_console_xp',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'SubSystem': '1',
+ 'MinimumRequiredVersion': '5.01', # XP.
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_windows_xp',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'SubSystem': '2',
+ 'MinimumRequiredVersion': '5.01', # XP.
+ }
+ },
+ 'sources': ['subsystem-windows.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/target-machine.gyp b/third_party/python/gyp/test/win/linker-flags/target-machine.gyp
new file mode 100644
index 0000000000..30271926c9
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/target-machine.gyp
@@ -0,0 +1,48 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_target_link_x86',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'TargetMachine': '1',
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_target_link_x64',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'TargetMachine': '17',
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_target_lib_x86',
+ 'type': 'static_library',
+ 'msvs_settings': {
+ 'VCLibrarianTool': {
+ 'TargetMachine': '1',
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_target_lib_x64',
+ 'type': 'static_library',
+ 'msvs_settings': {
+ 'VCLibrarianTool': {
+ 'TargetMachine': '17',
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/tsaware.gyp b/third_party/python/gyp/test/win/linker-flags/tsaware.gyp
new file mode 100644
index 0000000000..7ffc7426bb
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/tsaware.gyp
@@ -0,0 +1,28 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_tsaware_no',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'TerminalServerAware': '1',
+ }
+ },
+ 'sources': ['hello.cc'],
+ },
+ {
+ 'target_name': 'test_tsaware_yes',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'TerminalServerAware': '2',
+ },
+ },
+ 'sources': ['hello.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/unsupported-manifest.gyp b/third_party/python/gyp/test/win/linker-flags/unsupported-manifest.gyp
new file mode 100644
index 0000000000..5549e7cb9b
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/unsupported-manifest.gyp
@@ -0,0 +1,13 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_unsupported',
+ 'type': 'executable',
+ 'sources': ['manifest-in-comment.cc'],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/update_pgd.py b/third_party/python/gyp/test/win/linker-flags/update_pgd.py
new file mode 100644
index 0000000000..176e9e5472
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/update_pgd.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from optparse import OptionParser
+import glob
+import os
+import subprocess
+
+parser = OptionParser()
+parser.add_option('--exe', dest='exe')
+parser.add_option('--vcbindir', dest='vcbindir')
+parser.add_option('--pgd', dest='pgd')
+(options, args) = parser.parse_args()
+
+# Instrumented binaries fail to run unless the Visual C++'s bin dir is included
+# in the PATH environment variable.
+os.environ['PATH'] = os.environ['PATH'] + os.pathsep + options.vcbindir
+
+# Run Instrumented binary. The profile will be recorded into *.pgc file.
+subprocess.call([options.exe])
+
+# Merge *.pgc files into a *.pgd (Profile-Guided Database) file.
+subprocess.call(['pgomgr', '/merge', options.pgd])
+
+# *.pgc files are no longer necessary. Clear all of them.
+pgd_file = os.path.abspath(options.pgd)
+pgd_dir = os.path.dirname(pgd_file)
+(pgd_basename, _) = os.path.splitext(os.path.basename(pgd_file))
+pgc_filepattern = os.path.join(pgd_dir, '%s!*.pgc' % pgd_basename)
+pgc_files= glob.glob(pgc_filepattern)
+for pgc_file in pgc_files:
+ os.unlink(pgc_file)
diff --git a/third_party/python/gyp/test/win/linker-flags/warn-as-error.gyp b/third_party/python/gyp/test/win/linker-flags/warn-as-error.gyp
new file mode 100644
index 0000000000..83c67e9df1
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/warn-as-error.gyp
@@ -0,0 +1,33 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_on',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'TreatLinkerWarningAsErrors': 'true',
+ }
+ },
+ 'sources': ['link-warning.cc'],
+ },
+ {
+ 'target_name': 'test_off',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'TreatLinkerWarningAsErrors': 'false',
+ }
+ },
+ 'sources': ['link-warning.cc'],
+ },
+ {
+ 'target_name': 'test_default',
+ 'type': 'executable',
+ 'sources': ['link-warning.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/x.cc b/third_party/python/gyp/test/win/linker-flags/x.cc
new file mode 100644
index 0000000000..f5f763b095
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/x.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int x() {
+ return 1;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/y.cc b/third_party/python/gyp/test/win/linker-flags/y.cc
new file mode 100644
index 0000000000..bd884119fc
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/y.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int y() {
+ return 2;
+}
diff --git a/third_party/python/gyp/test/win/linker-flags/z.cc b/third_party/python/gyp/test/win/linker-flags/z.cc
new file mode 100644
index 0000000000..8a43501270
--- /dev/null
+++ b/third_party/python/gyp/test/win/linker-flags/z.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int z() {
+ return 3;
+}
diff --git a/third_party/python/gyp/test/win/long-command-line/function.cc b/third_party/python/gyp/test/win/long-command-line/function.cc
new file mode 100644
index 0000000000..af44b2cabd
--- /dev/null
+++ b/third_party/python/gyp/test/win/long-command-line/function.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int func() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/long-command-line/hello.cc b/third_party/python/gyp/test/win/long-command-line/hello.cc
new file mode 100644
index 0000000000..1711567ef5
--- /dev/null
+++ b/third_party/python/gyp/test/win/long-command-line/hello.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/long-command-line/long-command-line.gyp b/third_party/python/gyp/test/win/long-command-line/long-command-line.gyp
new file mode 100644
index 0000000000..964c94fa94
--- /dev/null
+++ b/third_party/python/gyp/test/win/long-command-line/long-command-line.gyp
@@ -0,0 +1,54 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'longexe',
+ 'type': 'executable',
+ 'msvs_settings': {
+ # Use this as a simple way to get a long command.
+ 'VCCLCompilerTool': {
+ 'AdditionalOptions': '/nologo ' * 8000,
+ },
+ 'VCLinkerTool': {
+ 'AdditionalOptions': '/nologo ' * 8000,
+ },
+ },
+ 'sources': [
+ 'hello.cc',
+ ],
+ },
+ {
+ 'target_name': 'longlib',
+ 'type': 'static_library',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'AdditionalOptions': '/nologo ' * 8000,
+ },
+ 'VCLibrarianTool': {
+ 'AdditionalOptions': '/nologo ' * 8000,
+ },
+ },
+ 'sources': [
+ 'function.cc',
+ ],
+ },
+ {
+ 'target_name': 'longdll',
+ 'type': 'shared_library',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'AdditionalOptions': '/nologo ' * 8000,
+ },
+ 'VCLinkerTool': {
+ 'AdditionalOptions': '/nologo ' * 8000,
+ },
+ },
+ 'sources': [
+ 'hello.cc',
+ ],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/ml-safeseh/a.asm b/third_party/python/gyp/test/win/ml-safeseh/a.asm
new file mode 100644
index 0000000000..62da0df4f3
--- /dev/null
+++ b/third_party/python/gyp/test/win/ml-safeseh/a.asm
@@ -0,0 +1,10 @@
+.MODEL FLAT, C
+.CODE
+
+PUBLIC zero
+zero PROC
+ xor eax, eax
+ ret 0
+zero ENDP
+
+END
diff --git a/third_party/python/gyp/test/win/ml-safeseh/hello.cc b/third_party/python/gyp/test/win/ml-safeseh/hello.cc
new file mode 100644
index 0000000000..6141300d2c
--- /dev/null
+++ b/third_party/python/gyp/test/win/ml-safeseh/hello.cc
@@ -0,0 +1,11 @@
+// Copyright (c) 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+extern "C" {
+int zero(void);
+}
+
+int main() {
+ return zero();
+}
diff --git a/third_party/python/gyp/test/win/ml-safeseh/ml-safeseh.gyp b/third_party/python/gyp/test/win/ml-safeseh/ml-safeseh.gyp
new file mode 100644
index 0000000000..bf8618f865
--- /dev/null
+++ b/third_party/python/gyp/test/win/ml-safeseh/ml-safeseh.gyp
@@ -0,0 +1,24 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'ml_safeseh',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.cc',
+ 'a.asm',
+ ],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'ImageHasSafeExceptionHandlers': 'true',
+ },
+ 'MASM': {
+ 'UseSafeExceptionHandlers': 'true',
+ },
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/precompiled/gyptest-all.py b/third_party/python/gyp/test/win/precompiled/gyptest-all.py
new file mode 100644
index 0000000000..9fb5e62edf
--- /dev/null
+++ b/third_party/python/gyp/test/win/precompiled/gyptest-all.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that precompiled headers can be specified.
+"""
+
+import TestGyp
+
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['msvs', 'ninja'], workdir='workarea_all')
+ test.run_gyp('hello.gyp')
+ test.build('hello.gyp', 'hello')
+ test.run_built_executable('hello', stdout="Hello, world!\nHello, two!\n")
+ test.up_to_date('hello.gyp', test.ALL)
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/precompiled/hello.c b/third_party/python/gyp/test/win/precompiled/hello.c
new file mode 100644
index 0000000000..ffb47bf822
--- /dev/null
+++ b/third_party/python/gyp/test/win/precompiled/hello.c
@@ -0,0 +1,14 @@
+/* Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+// Note the abscence of a stdio.h include. This will be inserted because of the
+// precompiled header.
+
+extern int hello2();
+
+int main(void) {
+ printf("Hello, world!\n");
+ hello2();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/precompiled/hello.gyp b/third_party/python/gyp/test/win/precompiled/hello.gyp
new file mode 100644
index 0000000000..5f82c53593
--- /dev/null
+++ b/third_party/python/gyp/test/win/precompiled/hello.gyp
@@ -0,0 +1,28 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'hello',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.c',
+ 'hello2.c',
+ 'precomp.c',
+ ],
+ 'msvs_precompiled_header': 'stdio.h',
+ 'msvs_precompiled_source': 'precomp.c',
+
+ # Required so that the printf actually causes a build failure
+ # if the pch isn't included.
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WarningLevel': '3',
+ 'WarnAsError': 'true',
+ },
+ },
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/win/precompiled/hello2.c b/third_party/python/gyp/test/win/precompiled/hello2.c
new file mode 100644
index 0000000000..d6d53111fb
--- /dev/null
+++ b/third_party/python/gyp/test/win/precompiled/hello2.c
@@ -0,0 +1,13 @@
+/* Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+// Unlike hello.c, this file specifies the headers.
+
+#include <windows.h>
+#include <stdio.h>
+
+int hello2() {
+ printf("Hello, two!\n");
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/precompiled/precomp.c b/third_party/python/gyp/test/win/precompiled/precomp.c
new file mode 100644
index 0000000000..517c61a36b
--- /dev/null
+++ b/third_party/python/gyp/test/win/precompiled/precomp.c
@@ -0,0 +1,8 @@
+/* Copyright (c) 2011 Google Inc. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file. */
+
+// The precompiled header does not have to be the first one in the file.
+
+#include <windows.h>
+#include <stdio.h>
diff --git a/third_party/python/gyp/test/win/rc-build/Resource.h b/third_party/python/gyp/test/win/rc-build/Resource.h
new file mode 100644
index 0000000000..137acf39b5
--- /dev/null
+++ b/third_party/python/gyp/test/win/rc-build/Resource.h
@@ -0,0 +1,26 @@
+//{{NO_DEPENDENCIES}}
+// Microsoft Visual C++ generated include file.
+// Used by hello.rc
+//
+
+#define IDS_APP_TITLE 103
+
+#define IDR_MAINFRAME 128
+#define IDI_HELLO 107
+#define IDI_SMALL 108
+#define IDC_HELLO 109
+#ifndef IDC_STATIC
+#define IDC_STATIC -1
+#endif
+// Next default values for new objects
+//
+#ifdef APSTUDIO_INVOKED
+#ifndef APSTUDIO_READONLY_SYMBOLS
+
+#define _APS_NO_MFC 130
+#define _APS_NEXT_RESOURCE_VALUE 129
+#define _APS_NEXT_COMMAND_VALUE 32771
+#define _APS_NEXT_CONTROL_VALUE 1000
+#define _APS_NEXT_SYMED_VALUE 110
+#endif
+#endif
diff --git a/third_party/python/gyp/test/win/rc-build/hello.cpp b/third_party/python/gyp/test/win/rc-build/hello.cpp
new file mode 100644
index 0000000000..f552ca1591
--- /dev/null
+++ b/third_party/python/gyp/test/win/rc-build/hello.cpp
@@ -0,0 +1,30 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <tchar.h>
+
+#include "resource.h"
+
+#define MAX_LOADSTRING 100
+
+TCHAR szTitle[MAX_LOADSTRING];
+TCHAR szWindowClass[MAX_LOADSTRING];
+
+int APIENTRY _tWinMain(
+ HINSTANCE hInstance,
+ HINSTANCE hPrevInstance,
+ LPTSTR lpCmdLine,
+ int nCmdShow) {
+ // Make sure we can load some resources.
+ int count = 0;
+ LoadString(hInstance, IDS_APP_TITLE, szTitle, MAX_LOADSTRING);
+ if (szTitle[0] != 0) ++count;
+ LoadString(hInstance, IDC_HELLO, szWindowClass, MAX_LOADSTRING);
+ if (szWindowClass[0] != 0) ++count;
+ if (LoadIcon(hInstance, MAKEINTRESOURCE(IDI_SMALL)) != NULL) ++count;
+ if (LoadIcon(hInstance, MAKEINTRESOURCE(IDI_HELLO)) != NULL) ++count;
+ return count;
+}
diff --git a/third_party/python/gyp/test/win/rc-build/hello.gyp b/third_party/python/gyp/test/win/rc-build/hello.gyp
new file mode 100644
index 0000000000..3a66357dd4
--- /dev/null
+++ b/third_party/python/gyp/test/win/rc-build/hello.gyp
@@ -0,0 +1,92 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'with_resources',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ },
+ 'VCResourceCompilerTool': {
+ 'Culture' : '1033',
+ },
+ },
+ 'sources': [
+ 'hello.cpp',
+ 'hello.rc',
+ ],
+ 'libraries': [
+ 'kernel32.lib',
+ 'user32.lib',
+ ],
+ },
+ {
+ 'target_name': 'with_resources_subdir',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ },
+ 'VCResourceCompilerTool': {
+ 'Culture' : '1033',
+ },
+ },
+ 'sources': [
+ 'hello.cpp',
+ 'subdir/hello2.rc',
+ ],
+ 'libraries': [
+ 'kernel32.lib',
+ 'user32.lib',
+ ],
+ },
+ {
+ 'target_name': 'with_include_subdir',
+ 'type': 'executable',
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'DebugInformationFormat': '3',
+ },
+ 'VCLinkerTool': {
+ 'GenerateDebugInformation': 'true',
+ },
+ 'VCResourceCompilerTool': {
+ 'Culture' : '1033',
+ },
+ },
+ 'resource_include_dirs': [
+ '$(ProjectDir)\\subdir',
+ ],
+ 'sources': [
+ 'hello.cpp',
+ 'hello3.rc',
+ ],
+ 'libraries': [
+ 'kernel32.lib',
+ 'user32.lib',
+ ],
+ },
+ {
+ 'target_name': 'resource_only_dll',
+ 'type': 'shared_library',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'ResourceOnlyDLL': 'true',
+ },
+ },
+ 'sources': [
+ 'hello.rc',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/win/rc-build/hello.h b/third_party/python/gyp/test/win/rc-build/hello.h
new file mode 100644
index 0000000000..e60f2eb7ed
--- /dev/null
+++ b/third_party/python/gyp/test/win/rc-build/hello.h
@@ -0,0 +1,3 @@
+#pragma once
+
+#include "resource.h"
diff --git a/third_party/python/gyp/test/win/rc-build/hello.ico b/third_party/python/gyp/test/win/rc-build/hello.ico
new file mode 100644
index 0000000000..d551aa3aaf
--- /dev/null
+++ b/third_party/python/gyp/test/win/rc-build/hello.ico
Binary files differ
diff --git a/third_party/python/gyp/test/win/rc-build/hello.rc b/third_party/python/gyp/test/win/rc-build/hello.rc
new file mode 100644
index 0000000000..c9a7af6a07
--- /dev/null
+++ b/third_party/python/gyp/test/win/rc-build/hello.rc
@@ -0,0 +1,86 @@
+//Microsoft Visual C++ generated resource script.
+//
+#include "resource.h"
+
+#define APSTUDIO_READONLY_SYMBOLS
+/////////////////////////////////////////////////////////////////////////////
+//
+// Generated from the TEXTINCLUDE 2 resource.
+//
+#ifndef APSTUDIO_INVOKED
+#include "targetver.h"
+#endif
+#define APSTUDIO_HIDDEN_SYMBOLS
+#include "windows.h"
+#undef APSTUDIO_HIDDEN_SYMBOLS
+/////////////////////////////////////////////////////////////////////////////
+#undef APSTUDIO_READONLY_SYMBOLS
+
+#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU)
+LANGUAGE 9, 1
+#pragma code_page(932)
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// Icon
+//
+
+// Icon with lowest ID value placed first to ensure application icon
+// remains consistent on all systems.
+
+IDI_HELLO ICON "hello.ico"
+IDI_SMALL ICON "small.ico"
+
+#ifdef APSTUDIO_INVOKED
+/////////////////////////////////////////////////////////////////////////////
+//
+// TEXTINCLUDE
+//
+1 TEXTINCLUDE
+BEGIN
+ "resource.h\0"
+END
+
+2 TEXTINCLUDE
+BEGIN
+ "#ifndef APSTUDIO_INVOKED\r\n"
+ "#include ""targetver.h""\r\n"
+ "#endif\r\n"
+ "#define APSTUDIO_HIDDEN_SYMBOLS\r\n"
+ "#include ""windows.h""\r\n"
+ "#undef APSTUDIO_HIDDEN_SYMBOLS\r\n"
+ "\0"
+END
+
+3 TEXTINCLUDE
+BEGIN
+ "\r\n"
+ "\0"
+END
+
+#endif // APSTUDIO_INVOKED
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// String Table
+//
+
+STRINGTABLE
+BEGIN
+ IDC_HELLO "HELLO"
+ IDS_APP_TITLE "hello"
+END
+
+#endif
+/////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef APSTUDIO_INVOKED
+/////////////////////////////////////////////////////////////////////////////
+//
+// Generated from the TEXTINCLUDE 3 resource.
+//
+
+/////////////////////////////////////////////////////////////////////////////
+#endif // not APSTUDIO_INVOKED
diff --git a/third_party/python/gyp/test/win/rc-build/hello3.rc b/third_party/python/gyp/test/win/rc-build/hello3.rc
new file mode 100644
index 0000000000..c74dede576
--- /dev/null
+++ b/third_party/python/gyp/test/win/rc-build/hello3.rc
@@ -0,0 +1,87 @@
+//Microsoft Visual C++ generated resource script.
+//
+#include "include.h"
+#include "resource.h"
+
+#define APSTUDIO_READONLY_SYMBOLS
+/////////////////////////////////////////////////////////////////////////////
+//
+// Generated from the TEXTINCLUDE 2 resource.
+//
+#ifndef APSTUDIO_INVOKED
+#include "targetver.h"
+#endif
+#define APSTUDIO_HIDDEN_SYMBOLS
+#include "windows.h"
+#undef APSTUDIO_HIDDEN_SYMBOLS
+/////////////////////////////////////////////////////////////////////////////
+#undef APSTUDIO_READONLY_SYMBOLS
+
+#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU)
+LANGUAGE 9, 1
+#pragma code_page(932)
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// Icon
+//
+
+// Icon with lowest ID value placed first to ensure application icon
+// remains consistent on all systems.
+
+IDI_HELLO ICON "hello.ico"
+IDI_SMALL ICON "small.ico"
+
+#ifdef APSTUDIO_INVOKED
+/////////////////////////////////////////////////////////////////////////////
+//
+// TEXTINCLUDE
+//
+1 TEXTINCLUDE
+BEGIN
+ "resource.h\0"
+END
+
+2 TEXTINCLUDE
+BEGIN
+ "#ifndef APSTUDIO_INVOKED\r\n"
+ "#include ""targetver.h""\r\n"
+ "#endif\r\n"
+ "#define APSTUDIO_HIDDEN_SYMBOLS\r\n"
+ "#include ""windows.h""\r\n"
+ "#undef APSTUDIO_HIDDEN_SYMBOLS\r\n"
+ "\0"
+END
+
+3 TEXTINCLUDE
+BEGIN
+ "\r\n"
+ "\0"
+END
+
+#endif // APSTUDIO_INVOKED
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// String Table
+//
+
+STRINGTABLE
+BEGIN
+ IDC_HELLO "HELLO"
+ IDS_APP_TITLE "hello"
+END
+
+#endif
+/////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef APSTUDIO_INVOKED
+/////////////////////////////////////////////////////////////////////////////
+//
+// Generated from the TEXTINCLUDE 3 resource.
+//
+
+/////////////////////////////////////////////////////////////////////////////
+#endif // not APSTUDIO_INVOKED
diff --git a/third_party/python/gyp/test/win/rc-build/small.ico b/third_party/python/gyp/test/win/rc-build/small.ico
new file mode 100644
index 0000000000..d551aa3aaf
--- /dev/null
+++ b/third_party/python/gyp/test/win/rc-build/small.ico
Binary files differ
diff --git a/third_party/python/gyp/test/win/rc-build/subdir/hello2.rc b/third_party/python/gyp/test/win/rc-build/subdir/hello2.rc
new file mode 100644
index 0000000000..4c8eab109e
--- /dev/null
+++ b/third_party/python/gyp/test/win/rc-build/subdir/hello2.rc
@@ -0,0 +1,87 @@
+//Microsoft Visual C++ generated resource script.
+//
+#include "subdir/include.h"
+#include "resource.h"
+
+#define APSTUDIO_READONLY_SYMBOLS
+/////////////////////////////////////////////////////////////////////////////
+//
+// Generated from the TEXTINCLUDE 2 resource.
+//
+#ifndef APSTUDIO_INVOKED
+#include "targetver.h"
+#endif
+#define APSTUDIO_HIDDEN_SYMBOLS
+#include "windows.h"
+#undef APSTUDIO_HIDDEN_SYMBOLS
+/////////////////////////////////////////////////////////////////////////////
+#undef APSTUDIO_READONLY_SYMBOLS
+
+#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU)
+LANGUAGE 9, 1
+#pragma code_page(932)
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// Icon
+//
+
+// Icon with lowest ID value placed first to ensure application icon
+// remains consistent on all systems.
+
+IDI_HELLO ICON "hello.ico"
+IDI_SMALL ICON "small.ico"
+
+#ifdef APSTUDIO_INVOKED
+/////////////////////////////////////////////////////////////////////////////
+//
+// TEXTINCLUDE
+//
+1 TEXTINCLUDE
+BEGIN
+ "resource.h\0"
+END
+
+2 TEXTINCLUDE
+BEGIN
+ "#ifndef APSTUDIO_INVOKED\r\n"
+ "#include ""targetver.h""\r\n"
+ "#endif\r\n"
+ "#define APSTUDIO_HIDDEN_SYMBOLS\r\n"
+ "#include ""windows.h""\r\n"
+ "#undef APSTUDIO_HIDDEN_SYMBOLS\r\n"
+ "\0"
+END
+
+3 TEXTINCLUDE
+BEGIN
+ "\r\n"
+ "\0"
+END
+
+#endif // APSTUDIO_INVOKED
+
+/////////////////////////////////////////////////////////////////////////////
+//
+// String Table
+//
+
+STRINGTABLE
+BEGIN
+ IDC_HELLO "HELLO"
+ IDS_APP_TITLE "hello"
+END
+
+#endif
+/////////////////////////////////////////////////////////////////////////////
+
+
+
+#ifndef APSTUDIO_INVOKED
+/////////////////////////////////////////////////////////////////////////////
+//
+// Generated from the TEXTINCLUDE 3 resource.
+//
+
+/////////////////////////////////////////////////////////////////////////////
+#endif // not APSTUDIO_INVOKED
diff --git a/third_party/python/gyp/test/win/rc-build/subdir/include.h b/third_party/python/gyp/test/win/rc-build/subdir/include.h
new file mode 100644
index 0000000000..f15c48b422
--- /dev/null
+++ b/third_party/python/gyp/test/win/rc-build/subdir/include.h
@@ -0,0 +1 @@
+// Just exists to make sure it can be included.
diff --git a/third_party/python/gyp/test/win/rc-build/targetver.h b/third_party/python/gyp/test/win/rc-build/targetver.h
new file mode 100644
index 0000000000..f583181dfd
--- /dev/null
+++ b/third_party/python/gyp/test/win/rc-build/targetver.h
@@ -0,0 +1,24 @@
+#pragma once
+
+// The following macros define the minimum required platform. The minimum required platform
+// is the earliest version of Windows, Internet Explorer etc. that has the necessary features to run
+// your application. The macros work by enabling all features available on platform versions up to and
+// including the version specified.
+
+// Modify the following defines if you have to target a platform prior to the ones specified below.
+// Refer to MSDN for the latest info on corresponding values for different platforms.
+#ifndef WINVER // Specifies that the minimum required platform is Windows Vista.
+#define WINVER 0x0600 // Change this to the appropriate value to target other versions of Windows.
+#endif
+
+#ifndef _WIN32_WINNT // Specifies that the minimum required platform is Windows Vista.
+#define _WIN32_WINNT 0x0600 // Change this to the appropriate value to target other versions of Windows.
+#endif
+
+#ifndef _WIN32_WINDOWS // Specifies that the minimum required platform is Windows 98.
+#define _WIN32_WINDOWS 0x0410 // Change this to the appropriate value to target Windows Me or later.
+#endif
+
+#ifndef _WIN32_IE // Specifies that the minimum required platform is Internet Explorer 7.0.
+#define _WIN32_IE 0x0700 // Change this to the appropriate value to target other versions of IE.
+#endif
diff --git a/third_party/python/gyp/test/win/shard/hello.cc b/third_party/python/gyp/test/win/shard/hello.cc
new file mode 100644
index 0000000000..a9dce62453
--- /dev/null
+++ b/third_party/python/gyp/test/win/shard/hello.cc
@@ -0,0 +1,7 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/shard/hello1.cc b/third_party/python/gyp/test/win/shard/hello1.cc
new file mode 100644
index 0000000000..0eccf2861d
--- /dev/null
+++ b/third_party/python/gyp/test/win/shard/hello1.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int f1() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/shard/hello2.cc b/third_party/python/gyp/test/win/shard/hello2.cc
new file mode 100644
index 0000000000..23fcb546cb
--- /dev/null
+++ b/third_party/python/gyp/test/win/shard/hello2.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int f2() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/shard/hello3.cc b/third_party/python/gyp/test/win/shard/hello3.cc
new file mode 100644
index 0000000000..a72e2efb5a
--- /dev/null
+++ b/third_party/python/gyp/test/win/shard/hello3.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int f3() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/shard/hello4.cc b/third_party/python/gyp/test/win/shard/hello4.cc
new file mode 100644
index 0000000000..a94df19499
--- /dev/null
+++ b/third_party/python/gyp/test/win/shard/hello4.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int f4() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/shard/shard.gyp b/third_party/python/gyp/test/win/shard/shard.gyp
new file mode 100644
index 0000000000..eac45fcff7
--- /dev/null
+++ b/third_party/python/gyp/test/win/shard/shard.gyp
@@ -0,0 +1,31 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'shard',
+ 'type': 'static_library',
+ 'msvs_shard': 4,
+ 'sources': [
+ 'hello1.cc',
+ 'hello2.cc',
+ 'hello3.cc',
+ 'hello4.cc',
+ ],
+ 'product_dir': '<(PRODUCT_DIR)',
+ },
+ {
+ 'target_name': 'refs_to_shard',
+ 'type': 'executable',
+ 'dependencies': [
+ # Make sure references are correctly updated.
+ 'shard',
+ ],
+ 'sources': [
+ 'hello.cc',
+ ],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/shard/shard_ref.gyp b/third_party/python/gyp/test/win/shard/shard_ref.gyp
new file mode 100644
index 0000000000..3ec8d76f99
--- /dev/null
+++ b/third_party/python/gyp/test/win/shard/shard_ref.gyp
@@ -0,0 +1,41 @@
+# Copyright 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'refs_to_shard_external_lib',
+ 'type': 'static_library',
+ 'dependencies': [
+ # Make sure references in other files are updated correctly.
+ 'shard.gyp:shard',
+ ],
+ 'sources': [
+ 'hello.cc',
+ ],
+ },
+ {
+ 'target_name': 'refs_to_shard_external_exe',
+ 'type': 'executable',
+ 'dependencies': [
+ # Make sure references in other files are updated correctly.
+ 'shard.gyp:shard',
+ ],
+ 'sources': [
+ 'hello.cc',
+ ],
+ },
+ {
+ 'target_name': 'refs_to_shard_external_dll',
+ 'type': 'shared_library',
+ 'dependencies': [
+ # Make sure references in other files are updated correctly.
+ 'shard.gyp:shard',
+ ],
+ 'sources': [
+ 'hello.cc',
+ ],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/system-include/bar/header.h b/third_party/python/gyp/test/win/system-include/bar/header.h
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/win/system-include/bar/header.h
diff --git a/third_party/python/gyp/test/win/system-include/common/commonheader.h b/third_party/python/gyp/test/win/system-include/common/commonheader.h
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/win/system-include/common/commonheader.h
diff --git a/third_party/python/gyp/test/win/system-include/foo/header.h b/third_party/python/gyp/test/win/system-include/foo/header.h
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/win/system-include/foo/header.h
diff --git a/third_party/python/gyp/test/win/system-include/main.cc b/third_party/python/gyp/test/win/system-include/main.cc
new file mode 100644
index 0000000000..b04ea8a530
--- /dev/null
+++ b/third_party/python/gyp/test/win/system-include/main.cc
@@ -0,0 +1,4 @@
+#include <commonheader.h>
+#include <header.h>
+
+int main() {}
diff --git a/third_party/python/gyp/test/win/system-include/test.gyp b/third_party/python/gyp/test/win/system-include/test.gyp
new file mode 100644
index 0000000000..07f2636543
--- /dev/null
+++ b/third_party/python/gyp/test/win/system-include/test.gyp
@@ -0,0 +1,26 @@
+{
+ 'target_defaults': {
+ 'msvs_settings': {
+ 'VCCLCompilerTool': {
+ 'WarningLevel': '4',
+ 'WarnAsError': 'true',
+ },
+ },
+ 'msvs_system_include_dirs': [
+ '$(ProjectName)', # Different for each target
+ 'common', # Same for all targets
+ ],
+ },
+ 'targets': [
+ {
+ 'target_name': 'foo',
+ 'type': 'executable',
+ 'sources': [ 'main.cc', ],
+ },
+ {
+ 'target_name': 'bar',
+ 'type': 'executable',
+ 'sources': [ 'main.cc', ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/win/uldi/a.cc b/third_party/python/gyp/test/win/uldi/a.cc
new file mode 100644
index 0000000000..0fe05d5afb
--- /dev/null
+++ b/third_party/python/gyp/test/win/uldi/a.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int some_function() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/uldi/b.cc b/third_party/python/gyp/test/win/uldi/b.cc
new file mode 100644
index 0000000000..0fe05d5afb
--- /dev/null
+++ b/third_party/python/gyp/test/win/uldi/b.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int some_function() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/uldi/dll.cc b/third_party/python/gyp/test/win/uldi/dll.cc
new file mode 100644
index 0000000000..93a6c19003
--- /dev/null
+++ b/third_party/python/gyp/test/win/uldi/dll.cc
@@ -0,0 +1,6 @@
+// Copyright (c) 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+__declspec(dllexport) void SomeFunction() {
+}
diff --git a/third_party/python/gyp/test/win/uldi/exe.cc b/third_party/python/gyp/test/win/uldi/exe.cc
new file mode 100644
index 0000000000..b3039ace96
--- /dev/null
+++ b/third_party/python/gyp/test/win/uldi/exe.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/uldi/main.cc b/third_party/python/gyp/test/win/uldi/main.cc
new file mode 100644
index 0000000000..81b46d863a
--- /dev/null
+++ b/third_party/python/gyp/test/win/uldi/main.cc
@@ -0,0 +1,10 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+extern int some_function();
+
+int main() {
+ some_function();
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/uldi/uldi-depending-on-module.gyp b/third_party/python/gyp/test/win/uldi/uldi-depending-on-module.gyp
new file mode 100644
index 0000000000..3e34de8418
--- /dev/null
+++ b/third_party/python/gyp/test/win/uldi/uldi-depending-on-module.gyp
@@ -0,0 +1,42 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'an_exe',
+ 'type': 'executable',
+ 'sources': ['exe.cc'],
+ 'dependencies': [
+ 'a_dll',
+ ],
+ },
+ {
+ 'target_name': 'a_dll',
+ 'type': 'shared_library',
+ 'sources': ['dll.cc'],
+ 'dependencies': [
+ 'a_lib',
+ ],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'UseLibraryDependencyInputs': 'true'
+ },
+ },
+ },
+ {
+ 'target_name': 'a_lib',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'a_module',
+ ],
+ 'sources': ['a.cc'],
+ },
+ {
+ 'target_name': 'a_module',
+ 'type': 'loadable_module',
+ 'sources': ['a.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/uldi/uldi.gyp b/third_party/python/gyp/test/win/uldi/uldi.gyp
new file mode 100644
index 0000000000..c32f5e0956
--- /dev/null
+++ b/third_party/python/gyp/test/win/uldi/uldi.gyp
@@ -0,0 +1,45 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'lib1',
+ 'type': 'static_library',
+ 'sources': ['a.cc'],
+ },
+ {
+ 'target_name': 'final_uldi',
+ 'type': 'executable',
+ 'dependencies': [
+ 'lib1',
+ 'lib2',
+ ],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'UseLibraryDependencyInputs': 'true'
+ },
+ },
+ 'sources': ['main.cc'],
+ },
+ {
+ 'target_name': 'final_no_uldi',
+ 'type': 'executable',
+ 'dependencies': [
+ 'lib1',
+ 'lib2',
+ ],
+ 'sources': ['main.cc'],
+ },
+ {
+ 'target_name': 'lib2',
+ 'type': 'static_library',
+ # b.cc has the same named function as a.cc, but don't use the same name
+ # so that the .obj will have a different name. If the obj file has the
+ # same name, the linker will discard the obj file, invalidating the
+ # test.
+ 'sources': ['b.cc'],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/vs-macros/as.py b/third_party/python/gyp/test/win/vs-macros/as.py
new file mode 100644
index 0000000000..806c91d926
--- /dev/null
+++ b/third_party/python/gyp/test/win/vs-macros/as.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from __future__ import print_function
+
+from optparse import OptionParser
+
+parser = OptionParser()
+parser.add_option('-a', dest='platform')
+parser.add_option('-o', dest='output')
+parser.add_option('-p', dest='path')
+(options, args) = parser.parse_args()
+
+f = open(options.output, 'w')
+print('options', options, file=f)
+print('args', args, file=f)
+f.close()
diff --git a/third_party/python/gyp/test/win/vs-macros/containing-gyp.gyp b/third_party/python/gyp/test/win/vs-macros/containing-gyp.gyp
new file mode 100644
index 0000000000..c07b639ff1
--- /dev/null
+++ b/third_party/python/gyp/test/win/vs-macros/containing-gyp.gyp
@@ -0,0 +1,39 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_expansions',
+ 'msvs_cygwin_shell': 0,
+ 'type': 'none',
+ 'rules': [
+ {
+ 'rule_name': 'assembler (gnu-compatible)',
+ 'msvs_cygwin_shell': 0,
+ 'msvs_quote_cmd': 0,
+ 'extension': 'S',
+ 'inputs': [
+ 'as.py',
+ ],
+ 'outputs': [
+ '$(IntDir)/$(InputName).obj',
+ ],
+ 'action':
+ ['python',
+ 'as.py',
+ '-a', '$(PlatformName)',
+ '-o', '$(IntDir)/$(InputName).obj',
+ '-p', '<(DEPTH)',
+ '$(InputPath)'],
+ 'message': 'Building assembly language file $(InputPath)',
+ 'process_outputs_as_sources': 1,
+ },
+ ],
+ 'sources': [
+ 'input.S',
+ ],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/vs-macros/do_stuff.py b/third_party/python/gyp/test/win/vs-macros/do_stuff.py
new file mode 100644
index 0000000000..4669d3139b
--- /dev/null
+++ b/third_party/python/gyp/test/win/vs-macros/do_stuff.py
@@ -0,0 +1,8 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+input = open(sys.argv[1], "r").read()
+open(sys.argv[2], "w").write(input + "Modified.")
diff --git a/third_party/python/gyp/test/win/vs-macros/hello.cc b/third_party/python/gyp/test/win/vs-macros/hello.cc
new file mode 100644
index 0000000000..1711567ef5
--- /dev/null
+++ b/third_party/python/gyp/test/win/vs-macros/hello.cc
@@ -0,0 +1,7 @@
+// Copyright (c) 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/win/vs-macros/input-output-macros.gyp b/third_party/python/gyp/test/win/vs-macros/input-output-macros.gyp
new file mode 100644
index 0000000000..b4520f8cb8
--- /dev/null
+++ b/third_party/python/gyp/test/win/vs-macros/input-output-macros.gyp
@@ -0,0 +1,32 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_expansions',
+ 'msvs_cygwin_shell': 0,
+ 'type': 'none',
+ 'rules': [
+ {
+ 'rule_name': 'generate_file',
+ 'extension': 'blah',
+ 'inputs': [
+ 'do_stuff.py',
+ ],
+ 'outputs': [
+ '$(OutDir)\\<(RULE_INPUT_NAME).something',
+ ],
+ 'action': ['python',
+ 'do_stuff.py',
+ '<(RULE_INPUT_PATH)',
+ '$(OutDir)\\<(RULE_INPUT_NAME).something',],
+ },
+ ],
+ 'sources': [
+ 'stuff.blah',
+ ],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/vs-macros/input.S b/third_party/python/gyp/test/win/vs-macros/input.S
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/gyp/test/win/vs-macros/input.S
diff --git a/third_party/python/gyp/test/win/vs-macros/projectname.gyp b/third_party/python/gyp/test/win/vs-macros/projectname.gyp
new file mode 100644
index 0000000000..625a177643
--- /dev/null
+++ b/third_party/python/gyp/test/win/vs-macros/projectname.gyp
@@ -0,0 +1,29 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_expansions',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(OutDir)\\$(ProjectName)_plus_something.exe',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_with_product_name',
+ 'product_name': 'prod_name',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(OutDir)\\$(ProjectName)_plus_something.exe',
+ },
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/vs-macros/stuff.blah b/third_party/python/gyp/test/win/vs-macros/stuff.blah
new file mode 100644
index 0000000000..d438b4a787
--- /dev/null
+++ b/third_party/python/gyp/test/win/vs-macros/stuff.blah
@@ -0,0 +1 @@
+Random data file.
diff --git a/third_party/python/gyp/test/win/vs-macros/targetext.gyp b/third_party/python/gyp/test/win/vs-macros/targetext.gyp
new file mode 100644
index 0000000000..11f580e4a6
--- /dev/null
+++ b/third_party/python/gyp/test/win/vs-macros/targetext.gyp
@@ -0,0 +1,59 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_targetext_executable',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(TargetDir)\\executable$(TargetExt)',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_targetext_loadable_module',
+ 'type': 'loadable_module',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(TargetDir)\\loadable_module$(TargetExt)',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_targetext_shared_library',
+ 'type': 'shared_library',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(TargetDir)\\shared_library$(TargetExt)',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_targetext_static_library',
+ 'type': 'static_library',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLibrarianTool': {
+ 'OutputFile': '$(TargetDir)\\static_library$(TargetExt)',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_targetext_product_extension',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'product_extension': 'library',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(TargetDir)\\product_extension$(TargetExt)',
+ },
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/vs-macros/targetfilename.gyp b/third_party/python/gyp/test/win/vs-macros/targetfilename.gyp
new file mode 100644
index 0000000000..8287320278
--- /dev/null
+++ b/third_party/python/gyp/test/win/vs-macros/targetfilename.gyp
@@ -0,0 +1,59 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_targetfilename_executable',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(TargetDir)\\$(TargetFileName)',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_targetfilename_loadable_module',
+ 'type': 'loadable_module',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(TargetDir)\\$(TargetFileName)',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_targetfilename_shared_library',
+ 'type': 'loadable_module',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(TargetDir)\\$(TargetFileName)',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_targetfilename_static_library',
+ 'type': 'static_library',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLibrarianTool': {
+ 'OutputFile': '$(TargetDir)\\$(TargetFileName)',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_targetfilename_product_extension',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'product_extension': 'foo',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(TargetDir)\\$(TargetFileName)',
+ },
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/vs-macros/targetname.gyp b/third_party/python/gyp/test/win/vs-macros/targetname.gyp
new file mode 100644
index 0000000000..a53d3c0aa3
--- /dev/null
+++ b/third_party/python/gyp/test/win/vs-macros/targetname.gyp
@@ -0,0 +1,52 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_targetname',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(TargetDir)\\$(TargetName)_plus_something1.exe',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_targetname_with_prefix',
+ 'product_prefix': 'prod_prefix',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(TargetDir)\\$(TargetName)_plus_something2.exe',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_targetname_with_prodname',
+ 'product_name': 'prod_name',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(TargetDir)\\$(TargetName)_plus_something3.exe',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_targetname_with_prodname_with_prefix',
+ 'product_name': 'prod_name',
+ 'product_prefix': 'prod_prefix',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(TargetDir)\\$(TargetName)_plus_something4.exe',
+ },
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/vs-macros/targetpath.gyp b/third_party/python/gyp/test/win/vs-macros/targetpath.gyp
new file mode 100644
index 0000000000..a8699ffb25
--- /dev/null
+++ b/third_party/python/gyp/test/win/vs-macros/targetpath.gyp
@@ -0,0 +1,59 @@
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_targetpath_executable',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(TargetPath)',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_targetpath_loadable_module',
+ 'type': 'loadable_module',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(TargetPath)',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_targetpath_shared_library',
+ 'type': 'loadable_module',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(TargetPath)',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_targetpath_static_library',
+ 'type': 'static_library',
+ 'sources': ['hello.cc'],
+ 'msvs_settings': {
+ 'VCLibrarianTool': {
+ 'OutputFile': '$(TargetPath)',
+ },
+ },
+ },
+ {
+ 'target_name': 'test_targetpath_product_extension',
+ 'type': 'executable',
+ 'sources': ['hello.cc'],
+ 'product_extension': 'foo',
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'OutputFile': '$(TargetPath)',
+ },
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/vs-macros/test_exists.py b/third_party/python/gyp/test/win/vs-macros/test_exists.py
new file mode 100644
index 0000000000..297b1b7d9f
--- /dev/null
+++ b/third_party/python/gyp/test/win/vs-macros/test_exists.py
@@ -0,0 +1,10 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import os
+import sys
+
+if not os.path.exists(sys.argv[1]):
+ raise Exception()
+open(sys.argv[2], 'w').close()
diff --git a/third_party/python/gyp/test/win/vs-macros/vcinstalldir.gyp b/third_party/python/gyp/test/win/vs-macros/vcinstalldir.gyp
new file mode 100644
index 0000000000..3763a4eb18
--- /dev/null
+++ b/third_party/python/gyp/test/win/vs-macros/vcinstalldir.gyp
@@ -0,0 +1,41 @@
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'test_slash_trailing',
+ 'type': 'none',
+ 'msvs_cygwin_shell': '0',
+ 'actions': [
+ {
+ 'action_name': 'root',
+ 'inputs': [],
+ 'outputs': ['out1'],
+ 'action': ['python', 'test_exists.py', '$(VCInstallDir)', 'out1']
+ },
+ ],
+ },
+ {
+ 'target_name': 'test_slash_dir',
+ 'type': 'none',
+ 'msvs_cygwin_shell': '0',
+ 'actions': [
+ {
+ 'action_name': 'bin',
+ 'inputs': [],
+ 'outputs': ['out2'],
+ 'action': ['python', 'test_exists.py', '$(VCInstallDir)bin', 'out2'],
+ },
+ {
+ 'action_name': 'compiler',
+ 'inputs': [],
+ 'outputs': ['out3'],
+ 'action': [
+ 'python', 'test_exists.py', '$(VCInstallDir)bin\\cl.exe', 'out3'],
+ },
+ ],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/win-driver-target-type/win-driver-target-type.c b/third_party/python/gyp/test/win/win-driver-target-type/win-driver-target-type.c
new file mode 100644
index 0000000000..a6bee029ab
--- /dev/null
+++ b/third_party/python/gyp/test/win/win-driver-target-type/win-driver-target-type.c
@@ -0,0 +1,10 @@
+// Copyright (c) 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "win-driver-target-type.h"
+
+NTSTATUS DriverEntry(_In_ struct _DRIVER_OBJECT *DriverObject,
+ _In_ PUNICODE_STRING RegistryPath) {
+ return STATUS_SUCCESS;
+}
diff --git a/third_party/python/gyp/test/win/win-driver-target-type/win-driver-target-type.gyp b/third_party/python/gyp/test/win/win-driver-target-type/win-driver-target-type.gyp
new file mode 100644
index 0000000000..5da9cc9bd8
--- /dev/null
+++ b/third_party/python/gyp/test/win/win-driver-target-type/win-driver-target-type.gyp
@@ -0,0 +1,32 @@
+# Copyright (c) 2016 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'win_driver_target_type',
+ 'type': 'windows_driver',
+ 'msvs_target_version': 'Windows7',
+ 'sources': [
+ 'win-driver-target-type.c',
+ 'win-driver-target-type.h',
+ 'win-driver-target-type.rc',
+ ],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'AdditionalDependencies': [
+ 'wdmsec.lib',
+ 'ntoskrnl.lib',
+ 'hal.lib',
+ 'wmilib.lib',
+ 'bufferoverflowfastfailk.lib',
+ ],
+ },
+ 'VCCLCompilerTool': {
+ 'WarnAsError': 'false',
+ },
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/win-driver-target-type/win-driver-target-type.h b/third_party/python/gyp/test/win/win-driver-target-type/win-driver-target-type.h
new file mode 100644
index 0000000000..5bbffd2373
--- /dev/null
+++ b/third_party/python/gyp/test/win/win-driver-target-type/win-driver-target-type.h
@@ -0,0 +1,13 @@
+// Copyright (c) 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE
+
+#ifndef _WIN_DRIVER_TARGET_TYPE_H_
+#define _WIN_DRIVER_TARGET_TYPE_H_
+
+#include <ntifs.h>
+#include <ntdddisk.h>
+
+DRIVER_INITIALIZE DriverEntry;
+
+#endif
diff --git a/third_party/python/gyp/test/win/win-driver-target-type/win-driver-target-type.rc b/third_party/python/gyp/test/win/win-driver-target-type/win-driver-target-type.rc
new file mode 100644
index 0000000000..7a037ef736
--- /dev/null
+++ b/third_party/python/gyp/test/win/win-driver-target-type/win-driver-target-type.rc
@@ -0,0 +1,14 @@
+// Copyright (c) 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+#include <ntverp.h>
+
+#define VER_FILETYPE VFT_DRV
+#define VER_FILESUBTYPE VFT2_DRV_SYSTEM
+#define VER_FILEDESCRIPTION_STR "Windows Driver GYP target type"
+#define VER_INTERNALNAME_STR "win-driver-target-type.sys"
+#define VER_ORIGINALFILENAME_STR "win-driver-target-type.sys"
+
+#include "common.ver"
diff --git a/third_party/python/gyp/test/win/win-tool/copies_readonly_files.gyp b/third_party/python/gyp/test/win/win-tool/copies_readonly_files.gyp
new file mode 100644
index 0000000000..3cd7e69f1a
--- /dev/null
+++ b/third_party/python/gyp/test/win/win-tool/copies_readonly_files.gyp
@@ -0,0 +1,29 @@
+{
+ 'targets': [
+ {
+ 'target_name': 'foo',
+ 'type': 'none',
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/dest',
+ 'files': [
+ 'read-only-file',
+ ],
+ },
+ ],
+ }, # target: foo
+
+ {
+ 'target_name': 'bar',
+ 'type': 'none',
+ 'copies': [
+ {
+ 'destination': '<(PRODUCT_DIR)/dest',
+ 'files': [
+ 'subdir/',
+ ],
+ },
+ ],
+ }, # target: bar
+ ],
+}
diff --git a/third_party/python/gyp/test/win/win-tool/gyptest-win-tool-handles-readonly-files.py b/third_party/python/gyp/test/win/win-tool/gyptest-win-tool-handles-readonly-files.py
new file mode 100644
index 0000000000..951b952775
--- /dev/null
+++ b/third_party/python/gyp/test/win/win-tool/gyptest-win-tool-handles-readonly-files.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2014 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Make sure overwriting read-only files works as expected (via win-tool).
+"""
+
+import TestGyp
+
+import filecmp
+import os
+import stat
+import sys
+
+if sys.platform == 'win32':
+ test = TestGyp.TestGyp(formats=['ninja'])
+
+ # First, create the source files.
+ os.makedirs('subdir')
+ read_only_files = ['read-only-file', 'subdir/A', 'subdir/B', 'subdir/C']
+ for f in read_only_files:
+ test.write(f, 'source_contents')
+ test.chmod(f, stat.S_IREAD)
+ if os.access(f, os.W_OK):
+ test.fail_test()
+
+ # Second, create the read-only destination files. Note that we are creating
+ # them where the ninja and win-tool will try to copy them to, in order to test
+ # that copies overwrite the files.
+ os.makedirs(test.built_file_path('dest/subdir'))
+ for f in read_only_files:
+ f = os.path.join('dest', f)
+ test.write(test.built_file_path(f), 'SHOULD BE OVERWRITTEN')
+ test.chmod(test.built_file_path(f), stat.S_IREAD)
+ # Ensure not writable.
+ if os.access(test.built_file_path(f), os.W_OK):
+ test.fail_test()
+
+ test.run_gyp('copies_readonly_files.gyp')
+ test.build('copies_readonly_files.gyp')
+
+ # Check the destination files were overwritten by ninja.
+ for f in read_only_files:
+ f = os.path.join('dest', f)
+ test.must_contain(test.built_file_path(f), 'source_contents')
+
+ # This will fail if the files are not the same mode or contents.
+ for f in read_only_files:
+ if not filecmp.cmp(f, test.built_file_path(os.path.join('dest', f))):
+ test.fail_test()
+
+ test.pass_test()
diff --git a/third_party/python/gyp/test/win/winrt-app-type-revision/dllmain.cc b/third_party/python/gyp/test/win/winrt-app-type-revision/dllmain.cc
new file mode 100644
index 0000000000..dedd83c3f6
--- /dev/null
+++ b/third_party/python/gyp/test/win/winrt-app-type-revision/dllmain.cc
@@ -0,0 +1,30 @@
+// Copyright (c) 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+#include <wrl.h>
+#include <wrl/wrappers/corewrappers.h>
+#include <windows.graphics.display.h>
+
+using namespace Microsoft::WRL;
+using namespace Microsoft::WRL::Wrappers;
+using namespace ABI::Windows::Foundation;
+using namespace ABI::Windows::Graphics::Display;
+
+bool TryToUseSomeWinRT() {
+ ComPtr<IDisplayPropertiesStatics> dp;
+ HStringReference s(RuntimeClass_Windows_Graphics_Display_DisplayProperties);
+ HRESULT hr = GetActivationFactory(s.Get(), dp.GetAddressOf());
+ if (SUCCEEDED(hr)) {
+ float dpi = 96.0f;
+ if (SUCCEEDED(dp->get_LogicalDpi(&dpi))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+BOOL WINAPI DllMain(HINSTANCE hinstance, DWORD reason, LPVOID reserved) {
+ return TRUE;
+}
diff --git a/third_party/python/gyp/test/win/winrt-app-type-revision/winrt-app-type-revison.gyp b/third_party/python/gyp/test/win/winrt-app-type-revision/winrt-app-type-revison.gyp
new file mode 100644
index 0000000000..5f37b5a2ab
--- /dev/null
+++ b/third_party/python/gyp/test/win/winrt-app-type-revision/winrt-app-type-revison.gyp
@@ -0,0 +1,43 @@
+# Copyright (c) 2013 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'enable_winrt_81_revision_dll',
+ 'type': 'shared_library',
+ 'msvs_enable_winrt': 1,
+ 'msvs_application_type_revision': '8.1'
+ 'sources': [
+ 'dllmain.cc',
+ ],
+ },
+ {
+ 'target_name': 'enable_winrt_82_revision_dll',
+ 'type': 'shared_library',
+ 'msvs_enable_winrt': 1,
+ 'msvs_application_type_revision': '8.2'
+ 'sources': [
+ 'dllmain.cc',
+ ],
+ },
+ {
+ 'target_name': 'enable_winrt_invalid_revision_dll',
+ 'type': 'shared_library',
+ 'msvs_enable_winrt': 1,
+ 'msvs_application_type_revision': '999'
+ 'sources': [
+ 'dllmain.cc',
+ ],
+ },
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'AdditionalDependencies': [
+ '%(AdditionalDependencies)',
+ ],
+ },
+ },
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/win/winrt-target-platform-version/dllmain.cc b/third_party/python/gyp/test/win/winrt-target-platform-version/dllmain.cc
new file mode 100644
index 0000000000..d71460c924
--- /dev/null
+++ b/third_party/python/gyp/test/win/winrt-target-platform-version/dllmain.cc
@@ -0,0 +1,30 @@
+// Copyright (c) 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+#include <wrl.h>
+#include <wrl/wrappers/corewrappers.h>
+#include <windows.graphics.display.h>
+
+using namespace Microsoft::WRL;
+using namespace Microsoft::WRL::Wrappers;
+using namespace ABI::Windows::Foundation;
+using namespace ABI::Windows::Graphics::Display;
+
+bool TryToUseSomeWinRT() {
+ ComPtr<IDisplayPropertiesStatics> dp;
+ HStringReference s(RuntimeClass_Windows_Graphics_Display_DisplayProperties);
+ HRESULT hr = GetActivationFactory(s.Get(), dp.GetAddressOf());
+ if (SUCCEEDED(hr)) {
+ float dpi = 96.0f;
+ if (SUCCEEDED(dp->get_LogicalDpi(&dpi))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+BOOL WINAPI DllMain(HINSTANCE hinstance, DWORD reason, LPVOID reserved) {
+ return TRUE;
+}
diff --git a/third_party/python/gyp/test/win/winrt-target-platform-version/winrt-target-platform-version.gyp b/third_party/python/gyp/test/win/winrt-target-platform-version/winrt-target-platform-version.gyp
new file mode 100644
index 0000000000..dbcfac6962
--- /dev/null
+++ b/third_party/python/gyp/test/win/winrt-target-platform-version/winrt-target-platform-version.gyp
@@ -0,0 +1,49 @@
+# Copyright (c) 2015 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'enable_winrt_10_platversion_dll',
+ 'type': 'shared_library',
+ 'msvs_enable_winrt': 1,
+ 'msvs_application_type_revision': '10.0',
+ 'msvs_target_platform_version':'10.0.10240.0',
+ 'msvs_target_platform_minversion':'10.0.10240.0'
+ 'sources': [
+ 'dllmain.cc',
+ ],
+ },
+ {
+ 'target_name': 'enable_winrt_10_platversion_nominver_dll',
+ 'type': 'shared_library',
+ 'msvs_enable_winrt': 1,
+ 'msvs_application_type_revision': '10.0',
+ 'msvs_target_platform_version':'10.0.10240.0',
+ 'sources': [
+ 'dllmain.cc',
+ ],
+ },
+ {
+ 'target_name': 'enable_winrt_9_platversion_dll',
+ 'type': 'shared_library',
+ 'msvs_enable_winrt': 1,
+ 'msvs_application_type_revision': '10.0',
+ 'msvs_target_platform_version':'9.0.0.0',
+ 'msvs_target_platform_minversion':'9.0.0.0'
+ 'sources': [
+ 'dllmain.cc',
+ ],
+ },
+ {
+ 'target_name': 'enable_winrt_missing_platversion_dll',
+ 'type': 'shared_library',
+ 'msvs_enable_winrt': 1,
+ 'msvs_application_type_revision': '10.0',
+ 'sources': [
+ 'dllmain.cc',
+ ],
+ },
+ ]
+}
diff --git a/third_party/python/gyp/test/xcode-ninja/list_excluded/gyptest-all.py b/third_party/python/gyp/test/xcode-ninja/list_excluded/gyptest-all.py
new file mode 100644
index 0000000000..2d6378a7a2
--- /dev/null
+++ b/third_party/python/gyp/test/xcode-ninja/list_excluded/gyptest-all.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Verifies that excluded files are listed in sources_for_indexing.xcodeproj by
+default, and that the generator flag xcode_ninja_list_excluded_files can be
+used to override the default behavior.
+"""
+
+import os
+import TestGyp
+
+
+test = TestGyp.TestGyp()
+
+if test.format != 'xcode-ninja':
+ test.skip_test()
+
+
+# With the generator flag not set.
+test.run_gyp('hello_exclude.gyp')
+test.must_contain(
+ 'sources_for_indexing.xcodeproj/project.pbxproj', 'hello_excluded.cpp')
+
+
+# With the generator flag set to 0.
+try:
+ os.environ['GYP_GENERATOR_FLAGS'] = 'xcode_ninja_list_excluded_files=0'
+ test.run_gyp('hello_exclude.gyp')
+finally:
+ del os.environ['GYP_GENERATOR_FLAGS']
+test.must_not_contain(
+ 'sources_for_indexing.xcodeproj/project.pbxproj', 'hello_excluded.cpp')
+
+
+# With the generator flag explicitly set to 1.
+try:
+ os.environ['GYP_GENERATOR_FLAGS'] = 'xcode_ninja_list_excluded_files=1'
+ test.run_gyp('hello_exclude.gyp')
+finally:
+ del os.environ['GYP_GENERATOR_FLAGS']
+test.must_contain(
+ 'sources_for_indexing.xcodeproj/project.pbxproj', 'hello_excluded.cpp')
+
+
+test.pass_test()
diff --git a/third_party/python/gyp/test/xcode-ninja/list_excluded/hello.cpp b/third_party/python/gyp/test/xcode-ninja/list_excluded/hello.cpp
new file mode 100644
index 0000000000..cd409dabf9
--- /dev/null
+++ b/third_party/python/gyp/test/xcode-ninja/list_excluded/hello.cpp
@@ -0,0 +1,7 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 0;
+}
diff --git a/third_party/python/gyp/test/xcode-ninja/list_excluded/hello_exclude.gyp b/third_party/python/gyp/test/xcode-ninja/list_excluded/hello_exclude.gyp
new file mode 100644
index 0000000000..f5f0e8eafd
--- /dev/null
+++ b/third_party/python/gyp/test/xcode-ninja/list_excluded/hello_exclude.gyp
@@ -0,0 +1,19 @@
+# Copyright (c) 2016 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'targets': [
+ {
+ 'target_name': 'hello',
+ 'type': 'executable',
+ 'sources': [
+ 'hello.cpp',
+ 'hello_excluded.cpp',
+ ],
+ 'sources!': [
+ 'hello_excluded.cpp',
+ ],
+ },
+ ],
+}
diff --git a/third_party/python/gyp/test/xcode-ninja/list_excluded/hello_excluded.cpp b/third_party/python/gyp/test/xcode-ninja/list_excluded/hello_excluded.cpp
new file mode 100644
index 0000000000..2115529542
--- /dev/null
+++ b/third_party/python/gyp/test/xcode-ninja/list_excluded/hello_excluded.cpp
@@ -0,0 +1,7 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+int main() {
+ return 42;
+}
diff --git a/third_party/python/gyp/tools/README b/third_party/python/gyp/tools/README
new file mode 100644
index 0000000000..712e4efbb7
--- /dev/null
+++ b/third_party/python/gyp/tools/README
@@ -0,0 +1,15 @@
+pretty_vcproj:
+ Usage: pretty_vcproj.py "c:\path\to\vcproj.vcproj" [key1=value1] [key2=value2]
+
+ They key/value pair are used to resolve vsprops name.
+
+ For example, if I want to diff the base.vcproj project:
+
+ pretty_vcproj.py z:\dev\src-chrome\src\base\build\base.vcproj "$(SolutionDir)=z:\dev\src-chrome\src\chrome\\" "$(CHROMIUM_BUILD)=" "$(CHROME_BUILD_TYPE)=" > orignal.txt
+ pretty_vcproj.py z:\dev\src-chrome\src\base\base_gyp.vcproj "$(SolutionDir)=z:\dev\src-chrome\src\chrome\\" "$(CHROMIUM_BUILD)=" "$(CHROME_BUILD_TYPE)=" > gyp.txt
+
+ And you can use your favorite diff tool to see the changes.
+
+ Note: In the case of base.vcproj, the original vcproj is one level up the generated one.
+ I suggest you do a search and replace for '"..\' and replace it with '"' in original.txt
+ before you perform the diff. \ No newline at end of file
diff --git a/third_party/python/gyp/tools/Xcode/README b/third_party/python/gyp/tools/Xcode/README
new file mode 100644
index 0000000000..2492a2c2f8
--- /dev/null
+++ b/third_party/python/gyp/tools/Xcode/README
@@ -0,0 +1,5 @@
+Specifications contains syntax formatters for Xcode 3. These do not appear to be supported yet on Xcode 4. To use these with Xcode 3 please install both the gyp.pbfilespec and gyp.xclangspec files in
+
+~/Library/Application Support/Developer/Shared/Xcode/Specifications/
+
+and restart Xcode. \ No newline at end of file
diff --git a/third_party/python/gyp/tools/Xcode/Specifications/gyp.pbfilespec b/third_party/python/gyp/tools/Xcode/Specifications/gyp.pbfilespec
new file mode 100644
index 0000000000..85e2e268a5
--- /dev/null
+++ b/third_party/python/gyp/tools/Xcode/Specifications/gyp.pbfilespec
@@ -0,0 +1,27 @@
+/*
+ gyp.pbfilespec
+ GYP source file spec for Xcode 3
+
+ There is not much documentation available regarding the format
+ of .pbfilespec files. As a starting point, see for instance the
+ outdated documentation at:
+ http://maxao.free.fr/xcode-plugin-interface/specifications.html
+ and the files in:
+ /Developer/Library/PrivateFrameworks/XcodeEdit.framework/Versions/A/Resources/
+
+ Place this file in directory:
+ ~/Library/Application Support/Developer/Shared/Xcode/Specifications/
+*/
+
+(
+ {
+ Identifier = sourcecode.gyp;
+ BasedOn = sourcecode;
+ Name = "GYP Files";
+ Extensions = ("gyp", "gypi");
+ MIMETypes = ("text/gyp");
+ Language = "xcode.lang.gyp";
+ IsTextFile = YES;
+ IsSourceFile = YES;
+ }
+)
diff --git a/third_party/python/gyp/tools/Xcode/Specifications/gyp.xclangspec b/third_party/python/gyp/tools/Xcode/Specifications/gyp.xclangspec
new file mode 100644
index 0000000000..3b3506d319
--- /dev/null
+++ b/third_party/python/gyp/tools/Xcode/Specifications/gyp.xclangspec
@@ -0,0 +1,226 @@
+/*
+ Copyright (c) 2011 Google Inc. All rights reserved.
+ Use of this source code is governed by a BSD-style license that can be
+ found in the LICENSE file.
+
+ gyp.xclangspec
+ GYP language specification for Xcode 3
+
+ There is not much documentation available regarding the format
+ of .xclangspec files. As a starting point, see for instance the
+ outdated documentation at:
+ http://maxao.free.fr/xcode-plugin-interface/specifications.html
+ and the files in:
+ /Developer/Library/PrivateFrameworks/XcodeEdit.framework/Versions/A/Resources/
+
+ Place this file in directory:
+ ~/Library/Application Support/Developer/Shared/Xcode/Specifications/
+*/
+
+(
+
+ {
+ Identifier = "xcode.lang.gyp.keyword";
+ Syntax = {
+ Words = (
+ "and",
+ "or",
+ "<!",
+ "<",
+ );
+ Type = "xcode.syntax.keyword";
+ };
+ },
+
+ {
+ Identifier = "xcode.lang.gyp.target.declarator";
+ Syntax = {
+ Words = (
+ "'target_name'",
+ );
+ Type = "xcode.syntax.identifier.type";
+ };
+ },
+
+ {
+ Identifier = "xcode.lang.gyp.string.singlequote";
+ Syntax = {
+ IncludeRules = (
+ "xcode.lang.string",
+ "xcode.lang.gyp.keyword",
+ "xcode.lang.number",
+ );
+ Start = "'";
+ End = "'";
+ };
+ },
+
+ {
+ Identifier = "xcode.lang.gyp.comma";
+ Syntax = {
+ Words = ( ",", );
+
+ };
+ },
+
+ {
+ Identifier = "xcode.lang.gyp";
+ Description = "GYP Coloring";
+ BasedOn = "xcode.lang.simpleColoring";
+ IncludeInMenu = YES;
+ Name = "GYP";
+ Syntax = {
+ Tokenizer = "xcode.lang.gyp.lexer.toplevel";
+ IncludeRules = (
+ "xcode.lang.gyp.dictionary",
+ );
+ Type = "xcode.syntax.plain";
+ };
+ },
+
+ // The following rule returns tokens to the other rules
+ {
+ Identifier = "xcode.lang.gyp.lexer";
+ Syntax = {
+ IncludeRules = (
+ "xcode.lang.gyp.comment",
+ "xcode.lang.string",
+ 'xcode.lang.gyp.targetname.declarator',
+ "xcode.lang.gyp.string.singlequote",
+ "xcode.lang.number",
+ "xcode.lang.gyp.comma",
+ );
+ };
+ },
+
+ {
+ Identifier = "xcode.lang.gyp.lexer.toplevel";
+ Syntax = {
+ IncludeRules = (
+ "xcode.lang.gyp.comment",
+ );
+ };
+ },
+
+ {
+ Identifier = "xcode.lang.gyp.assignment";
+ Syntax = {
+ Tokenizer = "xcode.lang.gyp.lexer";
+ Rules = (
+ "xcode.lang.gyp.assignment.lhs",
+ ":",
+ "xcode.lang.gyp.assignment.rhs",
+ );
+ };
+
+ },
+
+ {
+ Identifier = "xcode.lang.gyp.target.declaration";
+ Syntax = {
+ Tokenizer = "xcode.lang.gyp.lexer";
+ Rules = (
+ "xcode.lang.gyp.target.declarator",
+ ":",
+ "xcode.lang.gyp.target.name",
+ );
+ };
+ },
+
+ {
+ Identifier = "xcode.lang.gyp.target.name";
+ Syntax = {
+ Tokenizer = "xcode.lang.gyp.lexer";
+ Rules = (
+ "xcode.lang.gyp.string.singlequote",
+ );
+ Type = "xcode.syntax.definition.function";
+ };
+ },
+
+ {
+ Identifier = "xcode.lang.gyp.assignment.lhs";
+ Syntax = {
+ Tokenizer = "xcode.lang.gyp.lexer";
+ Rules = (
+ "xcode.lang.gyp.string.singlequote",
+ );
+ Type = "xcode.syntax.identifier.type";
+ };
+ },
+
+ {
+ Identifier = "xcode.lang.gyp.assignment.rhs";
+ Syntax = {
+ Tokenizer = "xcode.lang.gyp.lexer";
+ Rules = (
+ "xcode.lang.gyp.string.singlequote?",
+ "xcode.lang.gyp.array?",
+ "xcode.lang.gyp.dictionary?",
+ "xcode.lang.number?",
+ );
+ };
+ },
+
+ {
+ Identifier = "xcode.lang.gyp.dictionary";
+ Syntax = {
+ Tokenizer = "xcode.lang.gyp.lexer";
+ Start = "{";
+ End = "}";
+ Foldable = YES;
+ Recursive = YES;
+ IncludeRules = (
+ "xcode.lang.gyp.target.declaration",
+ "xcode.lang.gyp.assignment",
+ );
+ };
+ },
+
+ {
+ Identifier = "xcode.lang.gyp.array";
+ Syntax = {
+ Tokenizer = "xcode.lang.gyp.lexer";
+ Start = "[";
+ End = "]";
+ Foldable = YES;
+ Recursive = YES;
+ IncludeRules = (
+ "xcode.lang.gyp.array",
+ "xcode.lang.gyp.dictionary",
+ "xcode.lang.gyp.string.singlequote",
+ );
+ };
+ },
+
+ {
+ Identifier = "xcode.lang.gyp.todo.mark";
+ Syntax = {
+ StartChars = "T";
+ Match = (
+ "^\(TODO\(.*\):[ \t]+.*\)$", // include "TODO: " in the markers list
+ );
+ // This is the order of captures. All of the match strings above need the same order.
+ CaptureTypes = (
+ "xcode.syntax.mark"
+ );
+ Type = "xcode.syntax.comment";
+ };
+ },
+
+ {
+ Identifier = "xcode.lang.gyp.comment";
+ BasedOn = "xcode.lang.comment"; // for text macros
+ Syntax = {
+ Start = "#";
+ End = "\n";
+ IncludeRules = (
+ "xcode.lang.url",
+ "xcode.lang.url.mail",
+ "xcode.lang.comment.mark",
+ "xcode.lang.gyp.todo.mark",
+ );
+ Type = "xcode.syntax.comment";
+ };
+ },
+)
diff --git a/third_party/python/gyp/tools/emacs/README b/third_party/python/gyp/tools/emacs/README
new file mode 100644
index 0000000000..eeef39f41b
--- /dev/null
+++ b/third_party/python/gyp/tools/emacs/README
@@ -0,0 +1,12 @@
+How to install gyp-mode for emacs:
+
+Add the following to your ~/.emacs (replace ... with the path to your gyp
+checkout).
+
+(setq load-path (cons ".../tools/emacs" load-path))
+(require 'gyp)
+
+Restart emacs (or eval-region the added lines) and you should be all set.
+
+Please note that ert is required for running the tests, which is included in
+Emacs 24, or available separately from https://github.com/ohler/ert
diff --git a/third_party/python/gyp/tools/emacs/gyp-tests.el b/third_party/python/gyp/tools/emacs/gyp-tests.el
new file mode 100644
index 0000000000..11b8497886
--- /dev/null
+++ b/third_party/python/gyp/tools/emacs/gyp-tests.el
@@ -0,0 +1,63 @@
+;;; gyp-tests.el - unit tests for gyp-mode.
+
+;; Copyright (c) 2012 Google Inc. All rights reserved.
+;; Use of this source code is governed by a BSD-style license that can be
+;; found in the LICENSE file.
+
+;; The recommended way to run these tests is to run them from the command-line,
+;; with the run-unit-tests.sh script.
+
+(require 'cl)
+(require 'ert)
+(require 'gyp)
+
+(defconst samples (directory-files "testdata" t ".gyp$")
+ "List of golden samples to check")
+
+(defun fontify (filename)
+ (with-temp-buffer
+ (insert-file-contents-literally filename)
+ (gyp-mode)
+ (font-lock-fontify-buffer)
+ (buffer-string)))
+
+(defun read-golden-sample (filename)
+ (with-temp-buffer
+ (insert-file-contents-literally (concat filename ".fontified"))
+ (read (current-buffer))))
+
+(defun equivalent-face (face)
+ "For the purposes of face comparison, we're not interested in the
+ differences between certain faces. For example, the difference between
+ font-lock-comment-delimiter and font-lock-comment-face."
+ (case face
+ ((font-lock-comment-delimiter-face) font-lock-comment-face)
+ (t face)))
+
+(defun text-face-properties (s)
+ "Extract the text properties from s"
+ (let ((result (list t)))
+ (dotimes (i (length s))
+ (setq result (cons (equivalent-face (get-text-property i 'face s))
+ result)))
+ (nreverse result)))
+
+(ert-deftest test-golden-samples ()
+ "Check that fontification produces the same results as the golden samples"
+ (dolist (sample samples)
+ (let ((golden (read-golden-sample sample))
+ (fontified (fontify sample)))
+ (should (equal golden fontified))
+ (should (equal (text-face-properties golden)
+ (text-face-properties fontified))))))
+
+(defun create-golden-sample (filename)
+ "Create a golden sample by fontifying filename and writing out the printable
+ representation of the fontified buffer (with text properties) to the
+ FILENAME.fontified"
+ (with-temp-file (concat filename ".fontified")
+ (print (fontify filename) (current-buffer))))
+
+(defun create-golden-samples ()
+ "Recreate the golden samples"
+ (dolist (sample samples) (create-golden-sample sample)))
diff --git a/third_party/python/gyp/tools/emacs/gyp.el b/third_party/python/gyp/tools/emacs/gyp.el
new file mode 100644
index 0000000000..b98b155ced
--- /dev/null
+++ b/third_party/python/gyp/tools/emacs/gyp.el
@@ -0,0 +1,275 @@
+;;; gyp.el - font-lock-mode support for gyp files.
+
+;; Copyright (c) 2012 Google Inc. All rights reserved.
+;; Use of this source code is governed by a BSD-style license that can be
+;; found in the LICENSE file.
+
+;; Put this somewhere in your load-path and
+;; (require 'gyp)
+
+(require 'python)
+(require 'cl)
+
+(when (string-match "python-mode.el" (symbol-file 'python-mode 'defun))
+ (error (concat "python-mode must be loaded from python.el (bundled with "
+ "recent emacsen), not from the older and less maintained "
+ "python-mode.el")))
+
+(defadvice python-indent-calculate-levels (after gyp-outdent-closing-parens
+ activate)
+ "De-indent closing parens, braces, and brackets in gyp-mode."
+ (when (and (eq major-mode 'gyp-mode)
+ (string-match "^ *[])}][],)}]* *$"
+ (buffer-substring-no-properties
+ (line-beginning-position) (line-end-position))))
+ (setf (first python-indent-levels)
+ (- (first python-indent-levels) python-continuation-offset))))
+
+(defadvice python-indent-guess-indent-offset (around
+ gyp-indent-guess-indent-offset
+ activate)
+ "Guess correct indent offset in gyp-mode."
+ (or (and (not (eq major-mode 'gyp-mode))
+ ad-do-it)
+ (save-excursion
+ (save-restriction
+ (widen)
+ (goto-char (point-min))
+ ;; Find first line ending with an opening brace that is not a comment.
+ (or (and (re-search-forward "\\(^[[{]$\\|^.*[^#].*[[{]$\\)")
+ (forward-line)
+ (/= (current-indentation) 0)
+ (set (make-local-variable 'python-indent-offset)
+ (current-indentation))
+ (set (make-local-variable 'python-continuation-offset)
+ (current-indentation)))
+ (message "Can't guess gyp indent offset, using default: %s"
+ python-continuation-offset))))))
+
+(define-derived-mode gyp-mode python-mode "Gyp"
+ "Major mode for editing .gyp files. See http://code.google.com/p/gyp/"
+ ;; gyp-parse-history is a stack of (POSITION . PARSE-STATE) tuples,
+ ;; with greater positions at the top of the stack. PARSE-STATE
+ ;; is a list of section symbols (see gyp-section-name and gyp-parse-to)
+ ;; with most nested section symbol at the front of the list.
+ (set (make-local-variable 'gyp-parse-history) '((1 . (list))))
+ (gyp-add-font-lock-keywords))
+
+(defun gyp-set-indentation ()
+ "Hook function to configure python indentation to suit gyp mode."
+ (set (make-local-variable 'python-indent-offset) 2)
+ (set (make-local-variable 'python-continuation-offset) 2)
+ (set (make-local-variable 'python-indent-guess-indent-offset) t)
+ (python-indent-guess-indent-offset))
+
+(add-hook 'gyp-mode-hook 'gyp-set-indentation)
+
+(add-to-list 'auto-mode-alist '("\\.gyp\\'" . gyp-mode))
+(add-to-list 'auto-mode-alist '("\\.gypi\\'" . gyp-mode))
+(add-to-list 'auto-mode-alist '("/\\.gclient\\'" . gyp-mode))
+
+;;; Font-lock support
+
+(defconst gyp-dependencies-regexp
+ (regexp-opt (list "dependencies" "export_dependent_settings"))
+ "Regular expression to introduce 'dependencies' section")
+
+(defconst gyp-sources-regexp
+ (regexp-opt (list "action" "files" "include_dirs" "includes" "inputs"
+ "libraries" "outputs" "sources"))
+ "Regular expression to introduce 'sources' sections")
+
+(defconst gyp-conditions-regexp
+ (regexp-opt (list "conditions" "target_conditions"))
+ "Regular expression to introduce conditions sections")
+
+(defconst gyp-variables-regexp
+ "^variables"
+ "Regular expression to introduce variables sections")
+
+(defconst gyp-defines-regexp
+ "^defines"
+ "Regular expression to introduce 'defines' sections")
+
+(defconst gyp-targets-regexp
+ "^targets"
+ "Regular expression to introduce 'targets' sections")
+
+(defun gyp-section-name (section)
+ "Map the sections we are interested in from SECTION to symbol.
+
+ SECTION is a string from the buffer that introduces a section. The result is
+ a symbol representing the kind of section.
+
+ This allows us to treat (for the purposes of font-lock) several different
+ section names as the same kind of section. For example, a 'sources section
+ can be introduced by the 'sources', 'inputs', 'outputs' keyword.
+
+ 'other is the default section kind when a more specific match is not made."
+ (cond ((string-match-p gyp-dependencies-regexp section) 'dependencies)
+ ((string-match-p gyp-sources-regexp section) 'sources)
+ ((string-match-p gyp-variables-regexp section) 'variables)
+ ((string-match-p gyp-conditions-regexp section) 'conditions)
+ ((string-match-p gyp-targets-regexp section) 'targets)
+ ((string-match-p gyp-defines-regexp section) 'defines)
+ (t 'other)))
+
+(defun gyp-invalidate-parse-states-after (target-point)
+ "Erase any parse information after target-point."
+ (while (> (caar gyp-parse-history) target-point)
+ (setq gyp-parse-history (cdr gyp-parse-history))))
+
+(defun gyp-parse-point ()
+ "The point of the last parse state added by gyp-parse-to."
+ (caar gyp-parse-history))
+
+(defun gyp-parse-sections ()
+ "A list of section symbols holding at the last parse state point."
+ (cdar gyp-parse-history))
+
+(defun gyp-inside-dictionary-p ()
+ "Predicate returning true if the parser is inside a dictionary."
+ (not (eq (cadar gyp-parse-history) 'list)))
+
+(defun gyp-add-parse-history (point sections)
+ "Add parse state SECTIONS to the parse history at POINT so that parsing can be
+ resumed instantly."
+ (while (>= (caar gyp-parse-history) point)
+ (setq gyp-parse-history (cdr gyp-parse-history)))
+ (setq gyp-parse-history (cons (cons point sections) gyp-parse-history)))
+
+(defun gyp-parse-to (target-point)
+ "Parses from (point) to TARGET-POINT adding the parse state information to
+ gyp-parse-state-history. Parsing stops if TARGET-POINT is reached or if a
+ string literal has been parsed. Returns nil if no further parsing can be
+ done, otherwise returns the position of the start of a parsed string, leaving
+ the point at the end of the string."
+ (let ((parsing t)
+ string-start)
+ (while parsing
+ (setq string-start nil)
+ ;; Parse up to a character that starts a sexp, or if the nesting
+ ;; level decreases.
+ (let ((state (parse-partial-sexp (gyp-parse-point)
+ target-point
+ -1
+ t))
+ (sections (gyp-parse-sections)))
+ (if (= (nth 0 state) -1)
+ (setq sections (cdr sections)) ; pop out a level
+ (cond ((looking-at-p "['\"]") ; a string
+ (setq string-start (point))
+ (goto-char (scan-sexps (point) 1))
+ (if (gyp-inside-dictionary-p)
+ ;; Look for sections inside a dictionary
+ (let ((section (gyp-section-name
+ (buffer-substring-no-properties
+ (+ 1 string-start)
+ (- (point) 1)))))
+ (setq sections (cons section (cdr sections)))))
+ ;; Stop after the string so it can be fontified.
+ (setq target-point (point)))
+ ((looking-at-p "{")
+ ;; Inside a dictionary. Increase nesting.
+ (forward-char 1)
+ (setq sections (cons 'unknown sections)))
+ ((looking-at-p "\\[")
+ ;; Inside a list. Increase nesting
+ (forward-char 1)
+ (setq sections (cons 'list sections)))
+ ((not (eobp))
+ ;; other
+ (forward-char 1))))
+ (gyp-add-parse-history (point) sections)
+ (setq parsing (< (point) target-point))))
+ string-start))
+
+(defun gyp-section-at-point ()
+ "Transform the last parse state, which is a list of nested sections and return
+ the section symbol that should be used to determine font-lock information for
+ the string. Can return nil indicating the string should not have any attached
+ section."
+ (let ((sections (gyp-parse-sections)))
+ (cond
+ ((eq (car sections) 'conditions)
+ ;; conditions can occur in a variables section, but we still want to
+ ;; highlight it as a keyword.
+ nil)
+ ((and (eq (car sections) 'list)
+ (eq (cadr sections) 'list))
+ ;; conditions and sources can have items in [[ ]]
+ (caddr sections))
+ (t (cadr sections)))))
+
+(defun gyp-section-match (limit)
+ "Parse from (point) to LIMIT returning by means of match data what was
+ matched. The group of the match indicates what style font-lock should apply.
+ See also `gyp-add-font-lock-keywords'."
+ (gyp-invalidate-parse-states-after (point))
+ (let ((group nil)
+ (string-start t))
+ (while (and (< (point) limit)
+ (not group)
+ string-start)
+ (setq string-start (gyp-parse-to limit))
+ (if string-start
+ (setq group (case (gyp-section-at-point)
+ ('dependencies 1)
+ ('variables 2)
+ ('conditions 2)
+ ('sources 3)
+ ('defines 4)
+ (nil nil)))))
+ (if group
+ (progn
+ ;; Set the match data to indicate to the font-lock mechanism the
+ ;; highlighting to be performed.
+ (set-match-data (append (list string-start (point))
+ (make-list (* (1- group) 2) nil)
+ (list (1+ string-start) (1- (point)))))
+ t))))
+
+;;; Please see http://code.google.com/p/gyp/wiki/GypLanguageSpecification for
+;;; canonical list of keywords.
+(defun gyp-add-font-lock-keywords ()
+ "Add gyp-mode keywords to font-lock mechanism."
+ ;; TODO(jknotten): Move all the keyword highlighting into gyp-section-match
+ ;; so that we can do the font-locking in a single font-lock pass.
+ (font-lock-add-keywords
+ nil
+ (list
+ ;; Top-level keywords
+ (list (concat "['\"]\\("
+ (regexp-opt (list "action" "action_name" "actions" "cflags"
+ "cflags_cc" "conditions" "configurations"
+ "copies" "defines" "dependencies" "destination"
+ "direct_dependent_settings"
+ "export_dependent_settings" "extension" "files"
+ "include_dirs" "includes" "inputs" "ldflags" "libraries"
+ "link_settings" "mac_bundle" "message"
+ "msvs_external_rule" "outputs" "product_name"
+ "process_outputs_as_sources" "rules" "rule_name"
+ "sources" "suppress_wildcard"
+ "target_conditions" "target_defaults"
+ "target_defines" "target_name" "toolsets"
+ "targets" "type" "variables" "xcode_settings"))
+ "[!/+=]?\\)") 1 'font-lock-keyword-face t)
+ ;; Type of target
+ (list (concat "['\"]\\("
+ (regexp-opt (list "loadable_module" "static_library"
+ "shared_library" "executable" "none"))
+ "\\)") 1 'font-lock-type-face t)
+ (list "\\(?:target\\|action\\)_name['\"]\\s-*:\\s-*['\"]\\([^ '\"]*\\)" 1
+ 'font-lock-function-name-face t)
+ (list 'gyp-section-match
+ (list 1 'font-lock-function-name-face t t) ; dependencies
+ (list 2 'font-lock-variable-name-face t t) ; variables, conditions
+ (list 3 'font-lock-constant-face t t) ; sources
+ (list 4 'font-lock-preprocessor-face t t)) ; preprocessor
+ ;; Variable expansion
+ (list "<@?(\\([^\n )]+\\))" 1 'font-lock-variable-name-face t)
+ ;; Command expansion
+ (list "<!@?(\\([^\n )]+\\))" 1 'font-lock-variable-name-face t)
+ )))
+
+(provide 'gyp)
diff --git a/third_party/python/gyp/tools/emacs/run-unit-tests.sh b/third_party/python/gyp/tools/emacs/run-unit-tests.sh
new file mode 100755
index 0000000000..6e62b9b28c
--- /dev/null
+++ b/third_party/python/gyp/tools/emacs/run-unit-tests.sh
@@ -0,0 +1,7 @@
+#!/bin/sh
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+emacs --no-site-file --no-init-file --batch \
+ --load ert.el --load gyp.el --load gyp-tests.el \
+ -f ert-run-tests-batch-and-exit
diff --git a/third_party/python/gyp/tools/emacs/testdata/media.gyp b/third_party/python/gyp/tools/emacs/testdata/media.gyp
new file mode 100644
index 0000000000..29300fe1b8
--- /dev/null
+++ b/third_party/python/gyp/tools/emacs/testdata/media.gyp
@@ -0,0 +1,1105 @@
+# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'chromium_code': 1,
+ # Override to dynamically link the PulseAudio library.
+ 'use_pulseaudio%': 0,
+ # Override to dynamically link the cras (ChromeOS audio) library.
+ 'use_cras%': 0,
+ },
+ 'targets': [
+ {
+ 'target_name': 'media',
+ 'type': '<(component)',
+ 'dependencies': [
+ 'yuv_convert',
+ '../base/base.gyp:base',
+ '../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
+ '../build/temp_gyp/googleurl.gyp:googleurl',
+ '../crypto/crypto.gyp:crypto',
+ '../third_party/openmax/openmax.gyp:il',
+ '../ui/ui.gyp:ui',
+ ],
+ 'defines': [
+ 'MEDIA_IMPLEMENTATION',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'sources': [
+ 'audio/android/audio_manager_android.cc',
+ 'audio/android/audio_manager_android.h',
+ 'audio/android/audio_track_output_android.cc',
+ 'audio/android/audio_track_output_android.h',
+ 'audio/android/opensles_input.cc',
+ 'audio/android/opensles_input.h',
+ 'audio/android/opensles_output.cc',
+ 'audio/android/opensles_output.h',
+ 'audio/async_socket_io_handler.h',
+ 'audio/async_socket_io_handler_posix.cc',
+ 'audio/async_socket_io_handler_win.cc',
+ 'audio/audio_buffers_state.cc',
+ 'audio/audio_buffers_state.h',
+ 'audio/audio_io.h',
+ 'audio/audio_input_controller.cc',
+ 'audio/audio_input_controller.h',
+ 'audio/audio_input_stream_impl.cc',
+ 'audio/audio_input_stream_impl.h',
+ 'audio/audio_device_name.cc',
+ 'audio/audio_device_name.h',
+ 'audio/audio_manager.cc',
+ 'audio/audio_manager.h',
+ 'audio/audio_manager_base.cc',
+ 'audio/audio_manager_base.h',
+ 'audio/audio_output_controller.cc',
+ 'audio/audio_output_controller.h',
+ 'audio/audio_output_dispatcher.cc',
+ 'audio/audio_output_dispatcher.h',
+ 'audio/audio_output_dispatcher_impl.cc',
+ 'audio/audio_output_dispatcher_impl.h',
+ 'audio/audio_output_mixer.cc',
+ 'audio/audio_output_mixer.h',
+ 'audio/audio_output_proxy.cc',
+ 'audio/audio_output_proxy.h',
+ 'audio/audio_parameters.cc',
+ 'audio/audio_parameters.h',
+ 'audio/audio_util.cc',
+ 'audio/audio_util.h',
+ 'audio/cross_process_notification.cc',
+ 'audio/cross_process_notification.h',
+ 'audio/cross_process_notification_win.cc',
+ 'audio/cross_process_notification_posix.cc',
+ 'audio/fake_audio_input_stream.cc',
+ 'audio/fake_audio_input_stream.h',
+ 'audio/fake_audio_output_stream.cc',
+ 'audio/fake_audio_output_stream.h',
+ 'audio/linux/audio_manager_linux.cc',
+ 'audio/linux/audio_manager_linux.h',
+ 'audio/linux/alsa_input.cc',
+ 'audio/linux/alsa_input.h',
+ 'audio/linux/alsa_output.cc',
+ 'audio/linux/alsa_output.h',
+ 'audio/linux/alsa_util.cc',
+ 'audio/linux/alsa_util.h',
+ 'audio/linux/alsa_wrapper.cc',
+ 'audio/linux/alsa_wrapper.h',
+ 'audio/linux/cras_output.cc',
+ 'audio/linux/cras_output.h',
+ 'audio/openbsd/audio_manager_openbsd.cc',
+ 'audio/openbsd/audio_manager_openbsd.h',
+ 'audio/mac/audio_input_mac.cc',
+ 'audio/mac/audio_input_mac.h',
+ 'audio/mac/audio_low_latency_input_mac.cc',
+ 'audio/mac/audio_low_latency_input_mac.h',
+ 'audio/mac/audio_low_latency_output_mac.cc',
+ 'audio/mac/audio_low_latency_output_mac.h',
+ 'audio/mac/audio_manager_mac.cc',
+ 'audio/mac/audio_manager_mac.h',
+ 'audio/mac/audio_output_mac.cc',
+ 'audio/mac/audio_output_mac.h',
+ 'audio/null_audio_sink.cc',
+ 'audio/null_audio_sink.h',
+ 'audio/pulse/pulse_output.cc',
+ 'audio/pulse/pulse_output.h',
+ 'audio/sample_rates.cc',
+ 'audio/sample_rates.h',
+ 'audio/simple_sources.cc',
+ 'audio/simple_sources.h',
+ 'audio/win/audio_low_latency_input_win.cc',
+ 'audio/win/audio_low_latency_input_win.h',
+ 'audio/win/audio_low_latency_output_win.cc',
+ 'audio/win/audio_low_latency_output_win.h',
+ 'audio/win/audio_manager_win.cc',
+ 'audio/win/audio_manager_win.h',
+ 'audio/win/avrt_wrapper_win.cc',
+ 'audio/win/avrt_wrapper_win.h',
+ 'audio/win/device_enumeration_win.cc',
+ 'audio/win/device_enumeration_win.h',
+ 'audio/win/wavein_input_win.cc',
+ 'audio/win/wavein_input_win.h',
+ 'audio/win/waveout_output_win.cc',
+ 'audio/win/waveout_output_win.h',
+ 'base/android/media_jni_registrar.cc',
+ 'base/android/media_jni_registrar.h',
+ 'base/audio_decoder.cc',
+ 'base/audio_decoder.h',
+ 'base/audio_decoder_config.cc',
+ 'base/audio_decoder_config.h',
+ 'base/audio_renderer.h',
+ 'base/audio_renderer_mixer.cc',
+ 'base/audio_renderer_mixer.h',
+ 'base/audio_renderer_mixer_input.cc',
+ 'base/audio_renderer_mixer_input.h',
+ 'base/bitstream_buffer.h',
+ 'base/buffers.cc',
+ 'base/buffers.h',
+ 'base/byte_queue.cc',
+ 'base/byte_queue.h',
+ 'base/channel_layout.cc',
+ 'base/channel_layout.h',
+ 'base/clock.cc',
+ 'base/clock.h',
+ 'base/composite_filter.cc',
+ 'base/composite_filter.h',
+ 'base/data_buffer.cc',
+ 'base/data_buffer.h',
+ 'base/data_source.cc',
+ 'base/data_source.h',
+ 'base/decoder_buffer.cc',
+ 'base/decoder_buffer.h',
+ 'base/decrypt_config.cc',
+ 'base/decrypt_config.h',
+ 'base/decryptor.h',
+ 'base/decryptor_client.h',
+ 'base/demuxer.cc',
+ 'base/demuxer.h',
+ 'base/demuxer_stream.cc',
+ 'base/demuxer_stream.h',
+ 'base/djb2.cc',
+ 'base/djb2.h',
+ 'base/filter_collection.cc',
+ 'base/filter_collection.h',
+ 'base/filter_host.h',
+ 'base/filters.cc',
+ 'base/filters.h',
+ 'base/h264_bitstream_converter.cc',
+ 'base/h264_bitstream_converter.h',
+ 'base/media.h',
+ 'base/media_android.cc',
+ 'base/media_export.h',
+ 'base/media_log.cc',
+ 'base/media_log.h',
+ 'base/media_log_event.h',
+ 'base/media_posix.cc',
+ 'base/media_switches.cc',
+ 'base/media_switches.h',
+ 'base/media_win.cc',
+ 'base/message_loop_factory.cc',
+ 'base/message_loop_factory.h',
+ 'base/pipeline.cc',
+ 'base/pipeline.h',
+ 'base/pipeline_status.cc',
+ 'base/pipeline_status.h',
+ 'base/ranges.cc',
+ 'base/ranges.h',
+ 'base/seekable_buffer.cc',
+ 'base/seekable_buffer.h',
+ 'base/state_matrix.cc',
+ 'base/state_matrix.h',
+ 'base/stream_parser.cc',
+ 'base/stream_parser.h',
+ 'base/stream_parser_buffer.cc',
+ 'base/stream_parser_buffer.h',
+ 'base/video_decoder.cc',
+ 'base/video_decoder.h',
+ 'base/video_decoder_config.cc',
+ 'base/video_decoder_config.h',
+ 'base/video_frame.cc',
+ 'base/video_frame.h',
+ 'base/video_renderer.h',
+ 'base/video_util.cc',
+ 'base/video_util.h',
+ 'crypto/aes_decryptor.cc',
+ 'crypto/aes_decryptor.h',
+ 'ffmpeg/ffmpeg_common.cc',
+ 'ffmpeg/ffmpeg_common.h',
+ 'ffmpeg/file_protocol.cc',
+ 'ffmpeg/file_protocol.h',
+ 'filters/audio_file_reader.cc',
+ 'filters/audio_file_reader.h',
+ 'filters/audio_renderer_algorithm.cc',
+ 'filters/audio_renderer_algorithm.h',
+ 'filters/audio_renderer_impl.cc',
+ 'filters/audio_renderer_impl.h',
+ 'filters/bitstream_converter.cc',
+ 'filters/bitstream_converter.h',
+ 'filters/chunk_demuxer.cc',
+ 'filters/chunk_demuxer.h',
+ 'filters/chunk_demuxer_client.h',
+ 'filters/dummy_demuxer.cc',
+ 'filters/dummy_demuxer.h',
+ 'filters/ffmpeg_audio_decoder.cc',
+ 'filters/ffmpeg_audio_decoder.h',
+ 'filters/ffmpeg_demuxer.cc',
+ 'filters/ffmpeg_demuxer.h',
+ 'filters/ffmpeg_h264_bitstream_converter.cc',
+ 'filters/ffmpeg_h264_bitstream_converter.h',
+ 'filters/ffmpeg_glue.cc',
+ 'filters/ffmpeg_glue.h',
+ 'filters/ffmpeg_video_decoder.cc',
+ 'filters/ffmpeg_video_decoder.h',
+ 'filters/file_data_source.cc',
+ 'filters/file_data_source.h',
+ 'filters/gpu_video_decoder.cc',
+ 'filters/gpu_video_decoder.h',
+ 'filters/in_memory_url_protocol.cc',
+ 'filters/in_memory_url_protocol.h',
+ 'filters/source_buffer_stream.cc',
+ 'filters/source_buffer_stream.h',
+ 'filters/video_frame_generator.cc',
+ 'filters/video_frame_generator.h',
+ 'filters/video_renderer_base.cc',
+ 'filters/video_renderer_base.h',
+ 'video/capture/fake_video_capture_device.cc',
+ 'video/capture/fake_video_capture_device.h',
+ 'video/capture/linux/video_capture_device_linux.cc',
+ 'video/capture/linux/video_capture_device_linux.h',
+ 'video/capture/mac/video_capture_device_mac.h',
+ 'video/capture/mac/video_capture_device_mac.mm',
+ 'video/capture/mac/video_capture_device_qtkit_mac.h',
+ 'video/capture/mac/video_capture_device_qtkit_mac.mm',
+ 'video/capture/video_capture.h',
+ 'video/capture/video_capture_device.h',
+ 'video/capture/video_capture_device_dummy.cc',
+ 'video/capture/video_capture_device_dummy.h',
+ 'video/capture/video_capture_proxy.cc',
+ 'video/capture/video_capture_proxy.h',
+ 'video/capture/video_capture_types.h',
+ 'video/capture/win/filter_base_win.cc',
+ 'video/capture/win/filter_base_win.h',
+ 'video/capture/win/pin_base_win.cc',
+ 'video/capture/win/pin_base_win.h',
+ 'video/capture/win/sink_filter_observer_win.h',
+ 'video/capture/win/sink_filter_win.cc',
+ 'video/capture/win/sink_filter_win.h',
+ 'video/capture/win/sink_input_pin_win.cc',
+ 'video/capture/win/sink_input_pin_win.h',
+ 'video/capture/win/video_capture_device_win.cc',
+ 'video/capture/win/video_capture_device_win.h',
+ 'video/picture.cc',
+ 'video/picture.h',
+ 'video/video_decode_accelerator.cc',
+ 'video/video_decode_accelerator.h',
+ 'webm/webm_constants.h',
+ 'webm/webm_cluster_parser.cc',
+ 'webm/webm_cluster_parser.h',
+ 'webm/webm_content_encodings.cc',
+ 'webm/webm_content_encodings.h',
+ 'webm/webm_content_encodings_client.cc',
+ 'webm/webm_content_encodings_client.h',
+ 'webm/webm_info_parser.cc',
+ 'webm/webm_info_parser.h',
+ 'webm/webm_parser.cc',
+ 'webm/webm_parser.h',
+ 'webm/webm_stream_parser.cc',
+ 'webm/webm_stream_parser.h',
+ 'webm/webm_tracks_parser.cc',
+ 'webm/webm_tracks_parser.h',
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '..',
+ ],
+ },
+ 'conditions': [
+ # Android doesn't use ffmpeg, so make the dependency conditional
+ # and exclude the sources which depend on ffmpeg.
+ ['OS != "android"', {
+ 'dependencies': [
+ '../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ ],
+ }],
+ ['OS == "android"', {
+ 'sources!': [
+ 'base/media_posix.cc',
+ 'ffmpeg/ffmpeg_common.cc',
+ 'ffmpeg/ffmpeg_common.h',
+ 'ffmpeg/file_protocol.cc',
+ 'ffmpeg/file_protocol.h',
+ 'filters/audio_file_reader.cc',
+ 'filters/audio_file_reader.h',
+ 'filters/bitstream_converter.cc',
+ 'filters/bitstream_converter.h',
+ 'filters/chunk_demuxer.cc',
+ 'filters/chunk_demuxer.h',
+ 'filters/chunk_demuxer_client.h',
+ 'filters/ffmpeg_audio_decoder.cc',
+ 'filters/ffmpeg_audio_decoder.h',
+ 'filters/ffmpeg_demuxer.cc',
+ 'filters/ffmpeg_demuxer.h',
+ 'filters/ffmpeg_h264_bitstream_converter.cc',
+ 'filters/ffmpeg_h264_bitstream_converter.h',
+ 'filters/ffmpeg_glue.cc',
+ 'filters/ffmpeg_glue.h',
+ 'filters/ffmpeg_video_decoder.cc',
+ 'filters/ffmpeg_video_decoder.h',
+ 'filters/gpu_video_decoder.cc',
+ 'filters/gpu_video_decoder.h',
+ 'webm/webm_cluster_parser.cc',
+ 'webm/webm_cluster_parser.h',
+ 'webm/webm_stream_parser.cc',
+ 'webm/webm_stream_parser.h',
+ ],
+ }],
+ # The below 'android' condition were added temporarily and should be
+ # removed in downstream, because there is no Java environment setup in
+ # upstream yet.
+ ['OS == "android"', {
+ 'sources!':[
+ 'audio/android/audio_track_output_android.cc',
+ ],
+ 'sources':[
+ 'audio/android/audio_track_output_stub_android.cc',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '-lOpenSLES',
+ ],
+ },
+ }],
+ ['OS=="linux" or OS=="freebsd" or OS=="solaris"', {
+ 'link_settings': {
+ 'libraries': [
+ '-lasound',
+ ],
+ },
+ }],
+ ['OS=="openbsd"', {
+ 'sources/': [ ['exclude', '/alsa_' ],
+ ['exclude', '/audio_manager_linux' ] ],
+ 'link_settings': {
+ 'libraries': [
+ ],
+ },
+ }],
+ ['OS!="openbsd"', {
+ 'sources!': [
+ 'audio/openbsd/audio_manager_openbsd.cc',
+ 'audio/openbsd/audio_manager_openbsd.h',
+ ],
+ }],
+ ['OS=="linux"', {
+ 'variables': {
+ 'conditions': [
+ ['sysroot!=""', {
+ 'pkg-config': '../build/linux/pkg-config-wrapper "<(sysroot)" "<(target_arch)"',
+ }, {
+ 'pkg-config': 'pkg-config'
+ }],
+ ],
+ },
+ 'conditions': [
+ ['use_cras == 1', {
+ 'cflags': [
+ '<!@(<(pkg-config) --cflags libcras)',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '<!@(<(pkg-config) --libs libcras)',
+ ],
+ },
+ 'defines': [
+ 'USE_CRAS',
+ ],
+ }, { # else: use_cras == 0
+ 'sources!': [
+ 'audio/linux/cras_output.cc',
+ 'audio/linux/cras_output.h',
+ ],
+ }],
+ ],
+ }],
+ ['os_posix == 1', {
+ 'conditions': [
+ ['use_pulseaudio == 1', {
+ 'cflags': [
+ '<!@(pkg-config --cflags libpulse)',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '<!@(pkg-config --libs-only-l libpulse)',
+ ],
+ },
+ 'defines': [
+ 'USE_PULSEAUDIO',
+ ],
+ }, { # else: use_pulseaudio == 0
+ 'sources!': [
+ 'audio/pulse/pulse_output.cc',
+ 'audio/pulse/pulse_output.h',
+ ],
+ }],
+ ],
+ }],
+ ['os_posix == 1 and OS != "android"', {
+ # Video capture isn't supported in Android yet.
+ 'sources!': [
+ 'video/capture/video_capture_device_dummy.cc',
+ 'video/capture/video_capture_device_dummy.h',
+ ],
+ }],
+ ['OS=="mac"', {
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/AudioUnit.framework',
+ '$(SDKROOT)/System/Library/Frameworks/AudioToolbox.framework',
+ '$(SDKROOT)/System/Library/Frameworks/CoreAudio.framework',
+ '$(SDKROOT)/System/Library/Frameworks/CoreVideo.framework',
+ '$(SDKROOT)/System/Library/Frameworks/QTKit.framework',
+ ],
+ },
+ }],
+ ['OS=="win"', {
+ 'sources!': [
+ 'audio/pulse/pulse_output.cc',
+ 'audio/pulse/pulse_output.h',
+ 'video/capture/video_capture_device_dummy.cc',
+ 'video/capture/video_capture_device_dummy.h',
+ ],
+ }],
+ ['proprietary_codecs==1 or branding=="Chrome"', {
+ 'sources': [
+ 'mp4/avc.cc',
+ 'mp4/avc.h',
+ 'mp4/box_definitions.cc',
+ 'mp4/box_definitions.h',
+ 'mp4/box_reader.cc',
+ 'mp4/box_reader.h',
+ 'mp4/cenc.cc',
+ 'mp4/cenc.h',
+ 'mp4/mp4_stream_parser.cc',
+ 'mp4/mp4_stream_parser.h',
+ 'mp4/offset_byte_queue.cc',
+ 'mp4/offset_byte_queue.h',
+ 'mp4/track_run_iterator.cc',
+ 'mp4/track_run_iterator.h',
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'yuv_convert',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '..',
+ ],
+ 'conditions': [
+ ['order_profiling != 0', {
+ 'target_conditions' : [
+ ['_toolset=="target"', {
+ 'cflags!': [ '-finstrument-functions' ],
+ }],
+ ],
+ }],
+ [ 'target_arch == "ia32" or target_arch == "x64"', {
+ 'dependencies': [
+ 'yuv_convert_simd_x86',
+ ],
+ }],
+ [ 'target_arch == "arm"', {
+ 'dependencies': [
+ 'yuv_convert_simd_arm',
+ ],
+ }],
+ ],
+ 'sources': [
+ 'base/yuv_convert.cc',
+ 'base/yuv_convert.h',
+ ],
+ },
+ {
+ 'target_name': 'yuv_convert_simd_x86',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '..',
+ ],
+ 'sources': [
+ 'base/simd/convert_rgb_to_yuv_c.cc',
+ 'base/simd/convert_rgb_to_yuv_sse2.cc',
+ 'base/simd/convert_rgb_to_yuv_ssse3.asm',
+ 'base/simd/convert_rgb_to_yuv_ssse3.cc',
+ 'base/simd/convert_rgb_to_yuv_ssse3.inc',
+ 'base/simd/convert_yuv_to_rgb_c.cc',
+ 'base/simd/convert_yuv_to_rgb_x86.cc',
+ 'base/simd/convert_yuv_to_rgb_mmx.asm',
+ 'base/simd/convert_yuv_to_rgb_mmx.inc',
+ 'base/simd/convert_yuv_to_rgb_sse.asm',
+ 'base/simd/filter_yuv.h',
+ 'base/simd/filter_yuv_c.cc',
+ 'base/simd/filter_yuv_mmx.cc',
+ 'base/simd/filter_yuv_sse2.cc',
+ 'base/simd/linear_scale_yuv_to_rgb_mmx.asm',
+ 'base/simd/linear_scale_yuv_to_rgb_mmx.inc',
+ 'base/simd/linear_scale_yuv_to_rgb_sse.asm',
+ 'base/simd/scale_yuv_to_rgb_mmx.asm',
+ 'base/simd/scale_yuv_to_rgb_mmx.inc',
+ 'base/simd/scale_yuv_to_rgb_sse.asm',
+ 'base/simd/yuv_to_rgb_table.cc',
+ 'base/simd/yuv_to_rgb_table.h',
+ ],
+ 'conditions': [
+ ['order_profiling != 0', {
+ 'target_conditions' : [
+ ['_toolset=="target"', {
+ 'cflags!': [ '-finstrument-functions' ],
+ }],
+ ],
+ }],
+ [ 'target_arch == "x64"', {
+ # Source files optimized for X64 systems.
+ 'sources': [
+ 'base/simd/linear_scale_yuv_to_rgb_mmx_x64.asm',
+ 'base/simd/scale_yuv_to_rgb_sse2_x64.asm',
+ ],
+ }],
+ [ 'os_posix == 1 and OS != "mac" and OS != "android"', {
+ 'cflags': [
+ '-msse2',
+ ],
+ }],
+ [ 'OS == "mac"', {
+ 'configurations': {
+ 'Debug': {
+ 'xcode_settings': {
+ # gcc on the mac builds horribly unoptimized sse code in debug
+ # mode. Since this is rarely going to be debugged, run with full
+ # optimizations in Debug as well as Release.
+ 'GCC_OPTIMIZATION_LEVEL': '3', # -O3
+ },
+ },
+ },
+ }],
+ [ 'OS=="win"', {
+ 'variables': {
+ 'yasm_flags': [
+ '-DWIN32',
+ '-DMSVC',
+ '-DCHROMIUM',
+ '-Isimd',
+ ],
+ },
+ }],
+ [ 'OS=="mac"', {
+ 'variables': {
+ 'yasm_flags': [
+ '-DPREFIX',
+ '-DMACHO',
+ '-DCHROMIUM',
+ '-Isimd',
+ ],
+ },
+ }],
+ [ 'os_posix==1 and OS!="mac"', {
+ 'variables': {
+ 'conditions': [
+ [ 'target_arch=="ia32"', {
+ 'yasm_flags': [
+ '-DX86_32',
+ '-DELF',
+ '-DCHROMIUM',
+ '-Isimd',
+ ],
+ }, {
+ 'yasm_flags': [
+ '-DARCH_X86_64',
+ '-DELF',
+ '-DPIC',
+ '-DCHROMIUM',
+ '-Isimd',
+ ],
+ }],
+ ],
+ },
+ }],
+ ],
+ 'variables': {
+ 'yasm_output_path': '<(SHARED_INTERMEDIATE_DIR)/media',
+ },
+ 'msvs_2010_disable_uldi_when_referenced': 1,
+ 'includes': [
+ '../third_party/yasm/yasm_compile.gypi',
+ ],
+ },
+ {
+ 'target_name': 'yuv_convert_simd_arm',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '..',
+ ],
+ 'sources': [
+ 'base/simd/convert_rgb_to_yuv_c.cc',
+ 'base/simd/convert_rgb_to_yuv.h',
+ 'base/simd/convert_yuv_to_rgb_c.cc',
+ 'base/simd/convert_yuv_to_rgb.h',
+ 'base/simd/filter_yuv.h',
+ 'base/simd/filter_yuv_c.cc',
+ 'base/simd/yuv_to_rgb_table.cc',
+ 'base/simd/yuv_to_rgb_table.h',
+ ],
+ },
+ {
+ 'target_name': 'media_unittests',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ 'media_test_support',
+ 'yuv_convert',
+ '../base/base.gyp:base',
+ '../base/base.gyp:base_i18n',
+ '../base/base.gyp:test_support_base',
+ '../testing/gmock.gyp:gmock',
+ '../testing/gtest.gyp:gtest',
+ '../ui/ui.gyp:ui',
+ ],
+ 'sources': [
+ 'audio/async_socket_io_handler_unittest.cc',
+ 'audio/audio_input_controller_unittest.cc',
+ 'audio/audio_input_device_unittest.cc',
+ 'audio/audio_input_unittest.cc',
+ 'audio/audio_input_volume_unittest.cc',
+ 'audio/audio_low_latency_input_output_unittest.cc',
+ 'audio/audio_output_controller_unittest.cc',
+ 'audio/audio_output_proxy_unittest.cc',
+ 'audio/audio_parameters_unittest.cc',
+ 'audio/audio_util_unittest.cc',
+ 'audio/cross_process_notification_unittest.cc',
+ 'audio/linux/alsa_output_unittest.cc',
+ 'audio/mac/audio_low_latency_input_mac_unittest.cc',
+ 'audio/mac/audio_output_mac_unittest.cc',
+ 'audio/simple_sources_unittest.cc',
+ 'audio/win/audio_low_latency_input_win_unittest.cc',
+ 'audio/win/audio_low_latency_output_win_unittest.cc',
+ 'audio/win/audio_output_win_unittest.cc',
+ 'base/audio_renderer_mixer_unittest.cc',
+ 'base/audio_renderer_mixer_input_unittest.cc',
+ 'base/buffers_unittest.cc',
+ 'base/clock_unittest.cc',
+ 'base/composite_filter_unittest.cc',
+ 'base/data_buffer_unittest.cc',
+ 'base/decoder_buffer_unittest.cc',
+ 'base/djb2_unittest.cc',
+ 'base/fake_audio_render_callback.cc',
+ 'base/fake_audio_render_callback.h',
+ 'base/filter_collection_unittest.cc',
+ 'base/h264_bitstream_converter_unittest.cc',
+ 'base/pipeline_unittest.cc',
+ 'base/ranges_unittest.cc',
+ 'base/run_all_unittests.cc',
+ 'base/seekable_buffer_unittest.cc',
+ 'base/state_matrix_unittest.cc',
+ 'base/test_data_util.cc',
+ 'base/test_data_util.h',
+ 'base/video_frame_unittest.cc',
+ 'base/video_util_unittest.cc',
+ 'base/yuv_convert_unittest.cc',
+ 'crypto/aes_decryptor_unittest.cc',
+ 'ffmpeg/ffmpeg_common_unittest.cc',
+ 'filters/audio_renderer_algorithm_unittest.cc',
+ 'filters/audio_renderer_impl_unittest.cc',
+ 'filters/bitstream_converter_unittest.cc',
+ 'filters/chunk_demuxer_unittest.cc',
+ 'filters/ffmpeg_audio_decoder_unittest.cc',
+ 'filters/ffmpeg_decoder_unittest.h',
+ 'filters/ffmpeg_demuxer_unittest.cc',
+ 'filters/ffmpeg_glue_unittest.cc',
+ 'filters/ffmpeg_h264_bitstream_converter_unittest.cc',
+ 'filters/ffmpeg_video_decoder_unittest.cc',
+ 'filters/file_data_source_unittest.cc',
+ 'filters/pipeline_integration_test.cc',
+ 'filters/pipeline_integration_test_base.cc',
+ 'filters/source_buffer_stream_unittest.cc',
+ 'filters/video_renderer_base_unittest.cc',
+ 'video/capture/video_capture_device_unittest.cc',
+ 'webm/cluster_builder.cc',
+ 'webm/cluster_builder.h',
+ 'webm/webm_cluster_parser_unittest.cc',
+ 'webm/webm_content_encodings_client_unittest.cc',
+ 'webm/webm_parser_unittest.cc',
+ ],
+ 'conditions': [
+ ['os_posix==1 and OS!="mac"', {
+ 'conditions': [
+ ['linux_use_tcmalloc==1', {
+ 'dependencies': [
+ '../base/allocator/allocator.gyp:allocator',
+ ],
+ }],
+ ],
+ }],
+ ['OS != "android"', {
+ 'dependencies': [
+ '../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ ],
+ }],
+ ['OS == "android"', {
+ 'sources!': [
+ 'audio/audio_input_volume_unittest.cc',
+ 'base/test_data_util.cc',
+ 'base/test_data_util.h',
+ 'ffmpeg/ffmpeg_common_unittest.cc',
+ 'filters/ffmpeg_audio_decoder_unittest.cc',
+ 'filters/bitstream_converter_unittest.cc',
+ 'filters/chunk_demuxer_unittest.cc',
+ 'filters/ffmpeg_demuxer_unittest.cc',
+ 'filters/ffmpeg_glue_unittest.cc',
+ 'filters/ffmpeg_h264_bitstream_converter_unittest.cc',
+ 'filters/ffmpeg_video_decoder_unittest.cc',
+ 'filters/pipeline_integration_test.cc',
+ 'filters/pipeline_integration_test_base.cc',
+ 'mp4/mp4_stream_parser_unittest.cc',
+ 'webm/webm_cluster_parser_unittest.cc',
+ ],
+ }],
+ ['OS == "linux"', {
+ 'conditions': [
+ ['use_cras == 1', {
+ 'sources': [
+ 'audio/linux/cras_output_unittest.cc',
+ ],
+ 'defines': [
+ 'USE_CRAS',
+ ],
+ }],
+ ],
+ }],
+ [ 'target_arch=="ia32" or target_arch=="x64"', {
+ 'sources': [
+ 'base/simd/convert_rgb_to_yuv_unittest.cc',
+ ],
+ }],
+ ['proprietary_codecs==1 or branding=="Chrome"', {
+ 'sources': [
+ 'mp4/avc_unittest.cc',
+ 'mp4/box_reader_unittest.cc',
+ 'mp4/mp4_stream_parser_unittest.cc',
+ 'mp4/offset_byte_queue_unittest.cc',
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'media_test_support',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'media',
+ '../base/base.gyp:base',
+ '../testing/gmock.gyp:gmock',
+ '../testing/gtest.gyp:gtest',
+ ],
+ 'sources': [
+ 'audio/test_audio_input_controller_factory.cc',
+ 'audio/test_audio_input_controller_factory.h',
+ 'base/mock_callback.cc',
+ 'base/mock_callback.h',
+ 'base/mock_data_source_host.cc',
+ 'base/mock_data_source_host.h',
+ 'base/mock_demuxer_host.cc',
+ 'base/mock_demuxer_host.h',
+ 'base/mock_filter_host.cc',
+ 'base/mock_filter_host.h',
+ 'base/mock_filters.cc',
+ 'base/mock_filters.h',
+ ],
+ },
+ {
+ 'target_name': 'scaler_bench',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ 'yuv_convert',
+ '../base/base.gyp:base',
+ '../skia/skia.gyp:skia',
+ ],
+ 'sources': [
+ 'tools/scaler_bench/scaler_bench.cc',
+ ],
+ },
+ {
+ 'target_name': 'qt_faststart',
+ 'type': 'executable',
+ 'sources': [
+ 'tools/qt_faststart/qt_faststart.c'
+ ],
+ },
+ {
+ 'target_name': 'seek_tester',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ '../base/base.gyp:base',
+ ],
+ 'sources': [
+ 'tools/seek_tester/seek_tester.cc',
+ ],
+ },
+ ],
+ 'conditions': [
+ ['OS=="win"', {
+ 'targets': [
+ {
+ 'target_name': 'player_wtl',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ 'yuv_convert',
+ '../base/base.gyp:base',
+ '../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
+ '../ui/ui.gyp:ui',
+ ],
+ 'include_dirs': [
+ '<(DEPTH)/third_party/wtl/include',
+ ],
+ 'sources': [
+ 'tools/player_wtl/list.h',
+ 'tools/player_wtl/mainfrm.h',
+ 'tools/player_wtl/movie.cc',
+ 'tools/player_wtl/movie.h',
+ 'tools/player_wtl/player_wtl.cc',
+ 'tools/player_wtl/player_wtl.rc',
+ 'tools/player_wtl/props.h',
+ 'tools/player_wtl/seek.h',
+ 'tools/player_wtl/resource.h',
+ 'tools/player_wtl/view.h',
+ ],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'SubSystem': '2', # Set /SUBSYSTEM:WINDOWS
+ },
+ },
+ 'defines': [
+ '_CRT_SECURE_NO_WARNINGS=1',
+ ],
+ },
+ ],
+ }],
+ ['OS == "win" or toolkit_uses_gtk == 1', {
+ 'targets': [
+ {
+ 'target_name': 'shader_bench',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ 'yuv_convert',
+ '../base/base.gyp:base',
+ '../ui/gl/gl.gyp:gl',
+ ],
+ 'sources': [
+ 'tools/shader_bench/shader_bench.cc',
+ 'tools/shader_bench/cpu_color_painter.cc',
+ 'tools/shader_bench/cpu_color_painter.h',
+ 'tools/shader_bench/gpu_color_painter.cc',
+ 'tools/shader_bench/gpu_color_painter.h',
+ 'tools/shader_bench/gpu_painter.cc',
+ 'tools/shader_bench/gpu_painter.h',
+ 'tools/shader_bench/painter.cc',
+ 'tools/shader_bench/painter.h',
+ 'tools/shader_bench/window.cc',
+ 'tools/shader_bench/window.h',
+ ],
+ 'conditions': [
+ ['toolkit_uses_gtk == 1', {
+ 'dependencies': [
+ '../build/linux/system.gyp:gtk',
+ ],
+ 'sources': [
+ 'tools/shader_bench/window_linux.cc',
+ ],
+ }],
+ ['OS=="win"', {
+ 'dependencies': [
+ '../third_party/angle/src/build_angle.gyp:libEGL',
+ '../third_party/angle/src/build_angle.gyp:libGLESv2',
+ ],
+ 'sources': [
+ 'tools/shader_bench/window_win.cc',
+ ],
+ }],
+ ],
+ },
+ ],
+ }],
+ ['OS == "linux" and target_arch != "arm"', {
+ 'targets': [
+ {
+ 'target_name': 'tile_render_bench',
+ 'type': 'executable',
+ 'dependencies': [
+ '../base/base.gyp:base',
+ '../ui/gl/gl.gyp:gl',
+ ],
+ 'libraries': [
+ '-lGL',
+ '-ldl',
+ ],
+ 'sources': [
+ 'tools/tile_render_bench/tile_render_bench.cc',
+ ],
+ },
+ ],
+ }],
+ ['os_posix == 1 and OS != "mac" and OS != "android"', {
+ 'targets': [
+ {
+ 'target_name': 'player_x11',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ 'yuv_convert',
+ '../base/base.gyp:base',
+ '../ui/gl/gl.gyp:gl',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '-ldl',
+ '-lX11',
+ '-lXrender',
+ '-lXext',
+ ],
+ },
+ 'sources': [
+ 'tools/player_x11/data_source_logger.cc',
+ 'tools/player_x11/data_source_logger.h',
+ 'tools/player_x11/gl_video_renderer.cc',
+ 'tools/player_x11/gl_video_renderer.h',
+ 'tools/player_x11/player_x11.cc',
+ 'tools/player_x11/x11_video_renderer.cc',
+ 'tools/player_x11/x11_video_renderer.h',
+ ],
+ },
+ ],
+ }],
+ ['OS == "android"', {
+ 'targets': [
+ {
+ 'target_name': 'player_android',
+ 'type': 'static_library',
+ 'sources': [
+ 'base/android/media_player_bridge.cc',
+ 'base/android/media_player_bridge.h',
+ ],
+ 'dependencies': [
+ '../base/base.gyp:base',
+ ],
+ 'include_dirs': [
+ '<(SHARED_INTERMEDIATE_DIR)/media',
+ ],
+ 'actions': [
+ {
+ 'action_name': 'generate-jni-headers',
+ 'inputs': [
+ '../base/android/jni_generator/jni_generator.py',
+ 'base/android/java/src/org/chromium/media/MediaPlayerListener.java',
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/media/jni/media_player_listener_jni.h',
+ ],
+ 'action': [
+ 'python',
+ '<(DEPTH)/base/android/jni_generator/jni_generator.py',
+ '-o',
+ '<@(_inputs)',
+ '<@(_outputs)',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'media_java',
+ 'type': 'none',
+ 'dependencies': [ '../base/base.gyp:base_java' ],
+ 'variables': {
+ 'package_name': 'media',
+ 'java_in_dir': 'base/android/java',
+ },
+ 'includes': [ '../build/java.gypi' ],
+ },
+
+ ],
+ }, { # OS != "android"'
+ # Android does not use ffmpeg, so disable the targets which require it.
+ 'targets': [
+ {
+ 'target_name': 'ffmpeg_unittests',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ 'media_test_support',
+ '../base/base.gyp:base',
+ '../base/base.gyp:base_i18n',
+ '../base/base.gyp:test_support_base',
+ '../base/base.gyp:test_support_perf',
+ '../testing/gtest.gyp:gtest',
+ '../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ ],
+ 'sources': [
+ 'ffmpeg/ffmpeg_unittest.cc',
+ ],
+ 'conditions': [
+ ['toolkit_uses_gtk == 1', {
+ 'dependencies': [
+ # Needed for the following #include chain:
+ # base/run_all_unittests.cc
+ # ../base/test_suite.h
+ # gtk/gtk.h
+ '../build/linux/system.gyp:gtk',
+ ],
+ 'conditions': [
+ ['linux_use_tcmalloc==1', {
+ 'dependencies': [
+ '../base/allocator/allocator.gyp:allocator',
+ ],
+ }],
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'ffmpeg_regression_tests',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ 'media_test_support',
+ '../base/base.gyp:test_support_base',
+ '../testing/gmock.gyp:gmock',
+ '../testing/gtest.gyp:gtest',
+ '../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ ],
+ 'sources': [
+ 'base/test_data_util.cc',
+ 'base/run_all_unittests.cc',
+ 'ffmpeg/ffmpeg_regression_tests.cc',
+ 'filters/pipeline_integration_test_base.cc',
+ ],
+ 'conditions': [
+ ['os_posix==1 and OS!="mac"', {
+ 'conditions': [
+ ['linux_use_tcmalloc==1', {
+ 'dependencies': [
+ '../base/allocator/allocator.gyp:allocator',
+ ],
+ }],
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'ffmpeg_tests',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ '../base/base.gyp:base',
+ '../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ ],
+ 'sources': [
+ 'test/ffmpeg_tests/ffmpeg_tests.cc',
+ ],
+ },
+ {
+ 'target_name': 'media_bench',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ '../base/base.gyp:base',
+ '../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ ],
+ 'sources': [
+ 'tools/media_bench/media_bench.cc',
+ ],
+ },
+ ],
+ }]
+ ],
+}
diff --git a/third_party/python/gyp/tools/emacs/testdata/media.gyp.fontified b/third_party/python/gyp/tools/emacs/testdata/media.gyp.fontified
new file mode 100644
index 0000000000..962b7b2c43
--- /dev/null
+++ b/third_party/python/gyp/tools/emacs/testdata/media.gyp.fontified
@@ -0,0 +1,1107 @@
+
+#("# Copyright (c) 2012 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+ 'variables': {
+ 'chromium_code': 1,
+ # Override to dynamically link the PulseAudio library.
+ 'use_pulseaudio%': 0,
+ # Override to dynamically link the cras (ChromeOS audio) library.
+ 'use_cras%': 0,
+ },
+ 'targets': [
+ {
+ 'target_name': 'media',
+ 'type': '<(component)',
+ 'dependencies': [
+ 'yuv_convert',
+ '../base/base.gyp:base',
+ '../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
+ '../build/temp_gyp/googleurl.gyp:googleurl',
+ '../crypto/crypto.gyp:crypto',
+ '../third_party/openmax/openmax.gyp:il',
+ '../ui/ui.gyp:ui',
+ ],
+ 'defines': [
+ 'MEDIA_IMPLEMENTATION',
+ ],
+ 'include_dirs': [
+ '..',
+ ],
+ 'sources': [
+ 'audio/android/audio_manager_android.cc',
+ 'audio/android/audio_manager_android.h',
+ 'audio/android/audio_track_output_android.cc',
+ 'audio/android/audio_track_output_android.h',
+ 'audio/android/opensles_input.cc',
+ 'audio/android/opensles_input.h',
+ 'audio/android/opensles_output.cc',
+ 'audio/android/opensles_output.h',
+ 'audio/async_socket_io_handler.h',
+ 'audio/async_socket_io_handler_posix.cc',
+ 'audio/async_socket_io_handler_win.cc',
+ 'audio/audio_buffers_state.cc',
+ 'audio/audio_buffers_state.h',
+ 'audio/audio_io.h',
+ 'audio/audio_input_controller.cc',
+ 'audio/audio_input_controller.h',
+ 'audio/audio_input_stream_impl.cc',
+ 'audio/audio_input_stream_impl.h',
+ 'audio/audio_device_name.cc',
+ 'audio/audio_device_name.h',
+ 'audio/audio_manager.cc',
+ 'audio/audio_manager.h',
+ 'audio/audio_manager_base.cc',
+ 'audio/audio_manager_base.h',
+ 'audio/audio_output_controller.cc',
+ 'audio/audio_output_controller.h',
+ 'audio/audio_output_dispatcher.cc',
+ 'audio/audio_output_dispatcher.h',
+ 'audio/audio_output_dispatcher_impl.cc',
+ 'audio/audio_output_dispatcher_impl.h',
+ 'audio/audio_output_mixer.cc',
+ 'audio/audio_output_mixer.h',
+ 'audio/audio_output_proxy.cc',
+ 'audio/audio_output_proxy.h',
+ 'audio/audio_parameters.cc',
+ 'audio/audio_parameters.h',
+ 'audio/audio_util.cc',
+ 'audio/audio_util.h',
+ 'audio/cross_process_notification.cc',
+ 'audio/cross_process_notification.h',
+ 'audio/cross_process_notification_win.cc',
+ 'audio/cross_process_notification_posix.cc',
+ 'audio/fake_audio_input_stream.cc',
+ 'audio/fake_audio_input_stream.h',
+ 'audio/fake_audio_output_stream.cc',
+ 'audio/fake_audio_output_stream.h',
+ 'audio/linux/audio_manager_linux.cc',
+ 'audio/linux/audio_manager_linux.h',
+ 'audio/linux/alsa_input.cc',
+ 'audio/linux/alsa_input.h',
+ 'audio/linux/alsa_output.cc',
+ 'audio/linux/alsa_output.h',
+ 'audio/linux/alsa_util.cc',
+ 'audio/linux/alsa_util.h',
+ 'audio/linux/alsa_wrapper.cc',
+ 'audio/linux/alsa_wrapper.h',
+ 'audio/linux/cras_output.cc',
+ 'audio/linux/cras_output.h',
+ 'audio/openbsd/audio_manager_openbsd.cc',
+ 'audio/openbsd/audio_manager_openbsd.h',
+ 'audio/mac/audio_input_mac.cc',
+ 'audio/mac/audio_input_mac.h',
+ 'audio/mac/audio_low_latency_input_mac.cc',
+ 'audio/mac/audio_low_latency_input_mac.h',
+ 'audio/mac/audio_low_latency_output_mac.cc',
+ 'audio/mac/audio_low_latency_output_mac.h',
+ 'audio/mac/audio_manager_mac.cc',
+ 'audio/mac/audio_manager_mac.h',
+ 'audio/mac/audio_output_mac.cc',
+ 'audio/mac/audio_output_mac.h',
+ 'audio/null_audio_sink.cc',
+ 'audio/null_audio_sink.h',
+ 'audio/pulse/pulse_output.cc',
+ 'audio/pulse/pulse_output.h',
+ 'audio/sample_rates.cc',
+ 'audio/sample_rates.h',
+ 'audio/simple_sources.cc',
+ 'audio/simple_sources.h',
+ 'audio/win/audio_low_latency_input_win.cc',
+ 'audio/win/audio_low_latency_input_win.h',
+ 'audio/win/audio_low_latency_output_win.cc',
+ 'audio/win/audio_low_latency_output_win.h',
+ 'audio/win/audio_manager_win.cc',
+ 'audio/win/audio_manager_win.h',
+ 'audio/win/avrt_wrapper_win.cc',
+ 'audio/win/avrt_wrapper_win.h',
+ 'audio/win/device_enumeration_win.cc',
+ 'audio/win/device_enumeration_win.h',
+ 'audio/win/wavein_input_win.cc',
+ 'audio/win/wavein_input_win.h',
+ 'audio/win/waveout_output_win.cc',
+ 'audio/win/waveout_output_win.h',
+ 'base/android/media_jni_registrar.cc',
+ 'base/android/media_jni_registrar.h',
+ 'base/audio_decoder.cc',
+ 'base/audio_decoder.h',
+ 'base/audio_decoder_config.cc',
+ 'base/audio_decoder_config.h',
+ 'base/audio_renderer.h',
+ 'base/audio_renderer_mixer.cc',
+ 'base/audio_renderer_mixer.h',
+ 'base/audio_renderer_mixer_input.cc',
+ 'base/audio_renderer_mixer_input.h',
+ 'base/bitstream_buffer.h',
+ 'base/buffers.cc',
+ 'base/buffers.h',
+ 'base/byte_queue.cc',
+ 'base/byte_queue.h',
+ 'base/channel_layout.cc',
+ 'base/channel_layout.h',
+ 'base/clock.cc',
+ 'base/clock.h',
+ 'base/composite_filter.cc',
+ 'base/composite_filter.h',
+ 'base/data_buffer.cc',
+ 'base/data_buffer.h',
+ 'base/data_source.cc',
+ 'base/data_source.h',
+ 'base/decoder_buffer.cc',
+ 'base/decoder_buffer.h',
+ 'base/decrypt_config.cc',
+ 'base/decrypt_config.h',
+ 'base/decryptor.h',
+ 'base/decryptor_client.h',
+ 'base/demuxer.cc',
+ 'base/demuxer.h',
+ 'base/demuxer_stream.cc',
+ 'base/demuxer_stream.h',
+ 'base/djb2.cc',
+ 'base/djb2.h',
+ 'base/filter_collection.cc',
+ 'base/filter_collection.h',
+ 'base/filter_host.h',
+ 'base/filters.cc',
+ 'base/filters.h',
+ 'base/h264_bitstream_converter.cc',
+ 'base/h264_bitstream_converter.h',
+ 'base/media.h',
+ 'base/media_android.cc',
+ 'base/media_export.h',
+ 'base/media_log.cc',
+ 'base/media_log.h',
+ 'base/media_log_event.h',
+ 'base/media_posix.cc',
+ 'base/media_switches.cc',
+ 'base/media_switches.h',
+ 'base/media_win.cc',
+ 'base/message_loop_factory.cc',
+ 'base/message_loop_factory.h',
+ 'base/pipeline.cc',
+ 'base/pipeline.h',
+ 'base/pipeline_status.cc',
+ 'base/pipeline_status.h',
+ 'base/ranges.cc',
+ 'base/ranges.h',
+ 'base/seekable_buffer.cc',
+ 'base/seekable_buffer.h',
+ 'base/state_matrix.cc',
+ 'base/state_matrix.h',
+ 'base/stream_parser.cc',
+ 'base/stream_parser.h',
+ 'base/stream_parser_buffer.cc',
+ 'base/stream_parser_buffer.h',
+ 'base/video_decoder.cc',
+ 'base/video_decoder.h',
+ 'base/video_decoder_config.cc',
+ 'base/video_decoder_config.h',
+ 'base/video_frame.cc',
+ 'base/video_frame.h',
+ 'base/video_renderer.h',
+ 'base/video_util.cc',
+ 'base/video_util.h',
+ 'crypto/aes_decryptor.cc',
+ 'crypto/aes_decryptor.h',
+ 'ffmpeg/ffmpeg_common.cc',
+ 'ffmpeg/ffmpeg_common.h',
+ 'ffmpeg/file_protocol.cc',
+ 'ffmpeg/file_protocol.h',
+ 'filters/audio_file_reader.cc',
+ 'filters/audio_file_reader.h',
+ 'filters/audio_renderer_algorithm.cc',
+ 'filters/audio_renderer_algorithm.h',
+ 'filters/audio_renderer_impl.cc',
+ 'filters/audio_renderer_impl.h',
+ 'filters/bitstream_converter.cc',
+ 'filters/bitstream_converter.h',
+ 'filters/chunk_demuxer.cc',
+ 'filters/chunk_demuxer.h',
+ 'filters/chunk_demuxer_client.h',
+ 'filters/dummy_demuxer.cc',
+ 'filters/dummy_demuxer.h',
+ 'filters/ffmpeg_audio_decoder.cc',
+ 'filters/ffmpeg_audio_decoder.h',
+ 'filters/ffmpeg_demuxer.cc',
+ 'filters/ffmpeg_demuxer.h',
+ 'filters/ffmpeg_h264_bitstream_converter.cc',
+ 'filters/ffmpeg_h264_bitstream_converter.h',
+ 'filters/ffmpeg_glue.cc',
+ 'filters/ffmpeg_glue.h',
+ 'filters/ffmpeg_video_decoder.cc',
+ 'filters/ffmpeg_video_decoder.h',
+ 'filters/file_data_source.cc',
+ 'filters/file_data_source.h',
+ 'filters/gpu_video_decoder.cc',
+ 'filters/gpu_video_decoder.h',
+ 'filters/in_memory_url_protocol.cc',
+ 'filters/in_memory_url_protocol.h',
+ 'filters/source_buffer_stream.cc',
+ 'filters/source_buffer_stream.h',
+ 'filters/video_frame_generator.cc',
+ 'filters/video_frame_generator.h',
+ 'filters/video_renderer_base.cc',
+ 'filters/video_renderer_base.h',
+ 'video/capture/fake_video_capture_device.cc',
+ 'video/capture/fake_video_capture_device.h',
+ 'video/capture/linux/video_capture_device_linux.cc',
+ 'video/capture/linux/video_capture_device_linux.h',
+ 'video/capture/mac/video_capture_device_mac.h',
+ 'video/capture/mac/video_capture_device_mac.mm',
+ 'video/capture/mac/video_capture_device_qtkit_mac.h',
+ 'video/capture/mac/video_capture_device_qtkit_mac.mm',
+ 'video/capture/video_capture.h',
+ 'video/capture/video_capture_device.h',
+ 'video/capture/video_capture_device_dummy.cc',
+ 'video/capture/video_capture_device_dummy.h',
+ 'video/capture/video_capture_proxy.cc',
+ 'video/capture/video_capture_proxy.h',
+ 'video/capture/video_capture_types.h',
+ 'video/capture/win/filter_base_win.cc',
+ 'video/capture/win/filter_base_win.h',
+ 'video/capture/win/pin_base_win.cc',
+ 'video/capture/win/pin_base_win.h',
+ 'video/capture/win/sink_filter_observer_win.h',
+ 'video/capture/win/sink_filter_win.cc',
+ 'video/capture/win/sink_filter_win.h',
+ 'video/capture/win/sink_input_pin_win.cc',
+ 'video/capture/win/sink_input_pin_win.h',
+ 'video/capture/win/video_capture_device_win.cc',
+ 'video/capture/win/video_capture_device_win.h',
+ 'video/picture.cc',
+ 'video/picture.h',
+ 'video/video_decode_accelerator.cc',
+ 'video/video_decode_accelerator.h',
+ 'webm/webm_constants.h',
+ 'webm/webm_cluster_parser.cc',
+ 'webm/webm_cluster_parser.h',
+ 'webm/webm_content_encodings.cc',
+ 'webm/webm_content_encodings.h',
+ 'webm/webm_content_encodings_client.cc',
+ 'webm/webm_content_encodings_client.h',
+ 'webm/webm_info_parser.cc',
+ 'webm/webm_info_parser.h',
+ 'webm/webm_parser.cc',
+ 'webm/webm_parser.h',
+ 'webm/webm_stream_parser.cc',
+ 'webm/webm_stream_parser.h',
+ 'webm/webm_tracks_parser.cc',
+ 'webm/webm_tracks_parser.h',
+ ],
+ 'direct_dependent_settings': {
+ 'include_dirs': [
+ '..',
+ ],
+ },
+ 'conditions': [
+ # Android doesn't use ffmpeg, so make the dependency conditional
+ # and exclude the sources which depend on ffmpeg.
+ ['OS != \"android\"', {
+ 'dependencies': [
+ '../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ ],
+ }],
+ ['OS == \"android\"', {
+ 'sources!': [
+ 'base/media_posix.cc',
+ 'ffmpeg/ffmpeg_common.cc',
+ 'ffmpeg/ffmpeg_common.h',
+ 'ffmpeg/file_protocol.cc',
+ 'ffmpeg/file_protocol.h',
+ 'filters/audio_file_reader.cc',
+ 'filters/audio_file_reader.h',
+ 'filters/bitstream_converter.cc',
+ 'filters/bitstream_converter.h',
+ 'filters/chunk_demuxer.cc',
+ 'filters/chunk_demuxer.h',
+ 'filters/chunk_demuxer_client.h',
+ 'filters/ffmpeg_audio_decoder.cc',
+ 'filters/ffmpeg_audio_decoder.h',
+ 'filters/ffmpeg_demuxer.cc',
+ 'filters/ffmpeg_demuxer.h',
+ 'filters/ffmpeg_h264_bitstream_converter.cc',
+ 'filters/ffmpeg_h264_bitstream_converter.h',
+ 'filters/ffmpeg_glue.cc',
+ 'filters/ffmpeg_glue.h',
+ 'filters/ffmpeg_video_decoder.cc',
+ 'filters/ffmpeg_video_decoder.h',
+ 'filters/gpu_video_decoder.cc',
+ 'filters/gpu_video_decoder.h',
+ 'webm/webm_cluster_parser.cc',
+ 'webm/webm_cluster_parser.h',
+ 'webm/webm_stream_parser.cc',
+ 'webm/webm_stream_parser.h',
+ ],
+ }],
+ # The below 'android' condition were added temporarily and should be
+ # removed in downstream, because there is no Java environment setup in
+ # upstream yet.
+ ['OS == \"android\"', {
+ 'sources!':[
+ 'audio/android/audio_track_output_android.cc',
+ ],
+ 'sources':[
+ 'audio/android/audio_track_output_stub_android.cc',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '-lOpenSLES',
+ ],
+ },
+ }],
+ ['OS==\"linux\" or OS==\"freebsd\" or OS==\"solaris\"', {
+ 'link_settings': {
+ 'libraries': [
+ '-lasound',
+ ],
+ },
+ }],
+ ['OS==\"openbsd\"', {
+ 'sources/': [ ['exclude', '/alsa_' ],
+ ['exclude', '/audio_manager_linux' ] ],
+ 'link_settings': {
+ 'libraries': [
+ ],
+ },
+ }],
+ ['OS!=\"openbsd\"', {
+ 'sources!': [
+ 'audio/openbsd/audio_manager_openbsd.cc',
+ 'audio/openbsd/audio_manager_openbsd.h',
+ ],
+ }],
+ ['OS==\"linux\"', {
+ 'variables': {
+ 'conditions': [
+ ['sysroot!=\"\"', {
+ 'pkg-config': '../build/linux/pkg-config-wrapper \"<(sysroot)\" \"<(target_arch)\"',
+ }, {
+ 'pkg-config': 'pkg-config'
+ }],
+ ],
+ },
+ 'conditions': [
+ ['use_cras == 1', {
+ 'cflags': [
+ '<!@(<(pkg-config) --cflags libcras)',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '<!@(<(pkg-config) --libs libcras)',
+ ],
+ },
+ 'defines': [
+ 'USE_CRAS',
+ ],
+ }, { # else: use_cras == 0
+ 'sources!': [
+ 'audio/linux/cras_output.cc',
+ 'audio/linux/cras_output.h',
+ ],
+ }],
+ ],
+ }],
+ ['os_posix == 1', {
+ 'conditions': [
+ ['use_pulseaudio == 1', {
+ 'cflags': [
+ '<!@(pkg-config --cflags libpulse)',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '<!@(pkg-config --libs-only-l libpulse)',
+ ],
+ },
+ 'defines': [
+ 'USE_PULSEAUDIO',
+ ],
+ }, { # else: use_pulseaudio == 0
+ 'sources!': [
+ 'audio/pulse/pulse_output.cc',
+ 'audio/pulse/pulse_output.h',
+ ],
+ }],
+ ],
+ }],
+ ['os_posix == 1 and OS != \"android\"', {
+ # Video capture isn't supported in Android yet.
+ 'sources!': [
+ 'video/capture/video_capture_device_dummy.cc',
+ 'video/capture/video_capture_device_dummy.h',
+ ],
+ }],
+ ['OS==\"mac\"', {
+ 'link_settings': {
+ 'libraries': [
+ '$(SDKROOT)/System/Library/Frameworks/AudioUnit.framework',
+ '$(SDKROOT)/System/Library/Frameworks/AudioToolbox.framework',
+ '$(SDKROOT)/System/Library/Frameworks/CoreAudio.framework',
+ '$(SDKROOT)/System/Library/Frameworks/CoreVideo.framework',
+ '$(SDKROOT)/System/Library/Frameworks/QTKit.framework',
+ ],
+ },
+ }],
+ ['OS==\"win\"', {
+ 'sources!': [
+ 'audio/pulse/pulse_output.cc',
+ 'audio/pulse/pulse_output.h',
+ 'video/capture/video_capture_device_dummy.cc',
+ 'video/capture/video_capture_device_dummy.h',
+ ],
+ }],
+ ['proprietary_codecs==1 or branding==\"Chrome\"', {
+ 'sources': [
+ 'mp4/avc.cc',
+ 'mp4/avc.h',
+ 'mp4/box_definitions.cc',
+ 'mp4/box_definitions.h',
+ 'mp4/box_reader.cc',
+ 'mp4/box_reader.h',
+ 'mp4/cenc.cc',
+ 'mp4/cenc.h',
+ 'mp4/mp4_stream_parser.cc',
+ 'mp4/mp4_stream_parser.h',
+ 'mp4/offset_byte_queue.cc',
+ 'mp4/offset_byte_queue.h',
+ 'mp4/track_run_iterator.cc',
+ 'mp4/track_run_iterator.h',
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'yuv_convert',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '..',
+ ],
+ 'conditions': [
+ ['order_profiling != 0', {
+ 'target_conditions' : [
+ ['_toolset==\"target\"', {
+ 'cflags!': [ '-finstrument-functions' ],
+ }],
+ ],
+ }],
+ [ 'target_arch == \"ia32\" or target_arch == \"x64\"', {
+ 'dependencies': [
+ 'yuv_convert_simd_x86',
+ ],
+ }],
+ [ 'target_arch == \"arm\"', {
+ 'dependencies': [
+ 'yuv_convert_simd_arm',
+ ],
+ }],
+ ],
+ 'sources': [
+ 'base/yuv_convert.cc',
+ 'base/yuv_convert.h',
+ ],
+ },
+ {
+ 'target_name': 'yuv_convert_simd_x86',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '..',
+ ],
+ 'sources': [
+ 'base/simd/convert_rgb_to_yuv_c.cc',
+ 'base/simd/convert_rgb_to_yuv_sse2.cc',
+ 'base/simd/convert_rgb_to_yuv_ssse3.asm',
+ 'base/simd/convert_rgb_to_yuv_ssse3.cc',
+ 'base/simd/convert_rgb_to_yuv_ssse3.inc',
+ 'base/simd/convert_yuv_to_rgb_c.cc',
+ 'base/simd/convert_yuv_to_rgb_x86.cc',
+ 'base/simd/convert_yuv_to_rgb_mmx.asm',
+ 'base/simd/convert_yuv_to_rgb_mmx.inc',
+ 'base/simd/convert_yuv_to_rgb_sse.asm',
+ 'base/simd/filter_yuv.h',
+ 'base/simd/filter_yuv_c.cc',
+ 'base/simd/filter_yuv_mmx.cc',
+ 'base/simd/filter_yuv_sse2.cc',
+ 'base/simd/linear_scale_yuv_to_rgb_mmx.asm',
+ 'base/simd/linear_scale_yuv_to_rgb_mmx.inc',
+ 'base/simd/linear_scale_yuv_to_rgb_sse.asm',
+ 'base/simd/scale_yuv_to_rgb_mmx.asm',
+ 'base/simd/scale_yuv_to_rgb_mmx.inc',
+ 'base/simd/scale_yuv_to_rgb_sse.asm',
+ 'base/simd/yuv_to_rgb_table.cc',
+ 'base/simd/yuv_to_rgb_table.h',
+ ],
+ 'conditions': [
+ ['order_profiling != 0', {
+ 'target_conditions' : [
+ ['_toolset==\"target\"', {
+ 'cflags!': [ '-finstrument-functions' ],
+ }],
+ ],
+ }],
+ [ 'target_arch == \"x64\"', {
+ # Source files optimized for X64 systems.
+ 'sources': [
+ 'base/simd/linear_scale_yuv_to_rgb_mmx_x64.asm',
+ 'base/simd/scale_yuv_to_rgb_sse2_x64.asm',
+ ],
+ }],
+ [ 'os_posix == 1 and OS != \"mac\" and OS != \"android\"', {
+ 'cflags': [
+ '-msse2',
+ ],
+ }],
+ [ 'OS == \"mac\"', {
+ 'configurations': {
+ 'Debug': {
+ 'xcode_settings': {
+ # gcc on the mac builds horribly unoptimized sse code in debug
+ # mode. Since this is rarely going to be debugged, run with full
+ # optimizations in Debug as well as Release.
+ 'GCC_OPTIMIZATION_LEVEL': '3', # -O3
+ },
+ },
+ },
+ }],
+ [ 'OS==\"win\"', {
+ 'variables': {
+ 'yasm_flags': [
+ '-DWIN32',
+ '-DMSVC',
+ '-DCHROMIUM',
+ '-Isimd',
+ ],
+ },
+ }],
+ [ 'OS==\"mac\"', {
+ 'variables': {
+ 'yasm_flags': [
+ '-DPREFIX',
+ '-DMACHO',
+ '-DCHROMIUM',
+ '-Isimd',
+ ],
+ },
+ }],
+ [ 'os_posix==1 and OS!=\"mac\"', {
+ 'variables': {
+ 'conditions': [
+ [ 'target_arch==\"ia32\"', {
+ 'yasm_flags': [
+ '-DX86_32',
+ '-DELF',
+ '-DCHROMIUM',
+ '-Isimd',
+ ],
+ }, {
+ 'yasm_flags': [
+ '-DARCH_X86_64',
+ '-DELF',
+ '-DPIC',
+ '-DCHROMIUM',
+ '-Isimd',
+ ],
+ }],
+ ],
+ },
+ }],
+ ],
+ 'variables': {
+ 'yasm_output_path': '<(SHARED_INTERMEDIATE_DIR)/media',
+ },
+ 'msvs_2010_disable_uldi_when_referenced': 1,
+ 'includes': [
+ '../third_party/yasm/yasm_compile.gypi',
+ ],
+ },
+ {
+ 'target_name': 'yuv_convert_simd_arm',
+ 'type': 'static_library',
+ 'include_dirs': [
+ '..',
+ ],
+ 'sources': [
+ 'base/simd/convert_rgb_to_yuv_c.cc',
+ 'base/simd/convert_rgb_to_yuv.h',
+ 'base/simd/convert_yuv_to_rgb_c.cc',
+ 'base/simd/convert_yuv_to_rgb.h',
+ 'base/simd/filter_yuv.h',
+ 'base/simd/filter_yuv_c.cc',
+ 'base/simd/yuv_to_rgb_table.cc',
+ 'base/simd/yuv_to_rgb_table.h',
+ ],
+ },
+ {
+ 'target_name': 'media_unittests',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ 'media_test_support',
+ 'yuv_convert',
+ '../base/base.gyp:base',
+ '../base/base.gyp:base_i18n',
+ '../base/base.gyp:test_support_base',
+ '../testing/gmock.gyp:gmock',
+ '../testing/gtest.gyp:gtest',
+ '../ui/ui.gyp:ui',
+ ],
+ 'sources': [
+ 'audio/async_socket_io_handler_unittest.cc',
+ 'audio/audio_input_controller_unittest.cc',
+ 'audio/audio_input_device_unittest.cc',
+ 'audio/audio_input_unittest.cc',
+ 'audio/audio_input_volume_unittest.cc',
+ 'audio/audio_low_latency_input_output_unittest.cc',
+ 'audio/audio_output_controller_unittest.cc',
+ 'audio/audio_output_proxy_unittest.cc',
+ 'audio/audio_parameters_unittest.cc',
+ 'audio/audio_util_unittest.cc',
+ 'audio/cross_process_notification_unittest.cc',
+ 'audio/linux/alsa_output_unittest.cc',
+ 'audio/mac/audio_low_latency_input_mac_unittest.cc',
+ 'audio/mac/audio_output_mac_unittest.cc',
+ 'audio/simple_sources_unittest.cc',
+ 'audio/win/audio_low_latency_input_win_unittest.cc',
+ 'audio/win/audio_low_latency_output_win_unittest.cc',
+ 'audio/win/audio_output_win_unittest.cc',
+ 'base/audio_renderer_mixer_unittest.cc',
+ 'base/audio_renderer_mixer_input_unittest.cc',
+ 'base/buffers_unittest.cc',
+ 'base/clock_unittest.cc',
+ 'base/composite_filter_unittest.cc',
+ 'base/data_buffer_unittest.cc',
+ 'base/decoder_buffer_unittest.cc',
+ 'base/djb2_unittest.cc',
+ 'base/fake_audio_render_callback.cc',
+ 'base/fake_audio_render_callback.h',
+ 'base/filter_collection_unittest.cc',
+ 'base/h264_bitstream_converter_unittest.cc',
+ 'base/pipeline_unittest.cc',
+ 'base/ranges_unittest.cc',
+ 'base/run_all_unittests.cc',
+ 'base/seekable_buffer_unittest.cc',
+ 'base/state_matrix_unittest.cc',
+ 'base/test_data_util.cc',
+ 'base/test_data_util.h',
+ 'base/video_frame_unittest.cc',
+ 'base/video_util_unittest.cc',
+ 'base/yuv_convert_unittest.cc',
+ 'crypto/aes_decryptor_unittest.cc',
+ 'ffmpeg/ffmpeg_common_unittest.cc',
+ 'filters/audio_renderer_algorithm_unittest.cc',
+ 'filters/audio_renderer_impl_unittest.cc',
+ 'filters/bitstream_converter_unittest.cc',
+ 'filters/chunk_demuxer_unittest.cc',
+ 'filters/ffmpeg_audio_decoder_unittest.cc',
+ 'filters/ffmpeg_decoder_unittest.h',
+ 'filters/ffmpeg_demuxer_unittest.cc',
+ 'filters/ffmpeg_glue_unittest.cc',
+ 'filters/ffmpeg_h264_bitstream_converter_unittest.cc',
+ 'filters/ffmpeg_video_decoder_unittest.cc',
+ 'filters/file_data_source_unittest.cc',
+ 'filters/pipeline_integration_test.cc',
+ 'filters/pipeline_integration_test_base.cc',
+ 'filters/source_buffer_stream_unittest.cc',
+ 'filters/video_renderer_base_unittest.cc',
+ 'video/capture/video_capture_device_unittest.cc',
+ 'webm/cluster_builder.cc',
+ 'webm/cluster_builder.h',
+ 'webm/webm_cluster_parser_unittest.cc',
+ 'webm/webm_content_encodings_client_unittest.cc',
+ 'webm/webm_parser_unittest.cc',
+ ],
+ 'conditions': [
+ ['os_posix==1 and OS!=\"mac\"', {
+ 'conditions': [
+ ['linux_use_tcmalloc==1', {
+ 'dependencies': [
+ '../base/allocator/allocator.gyp:allocator',
+ ],
+ }],
+ ],
+ }],
+ ['OS != \"android\"', {
+ 'dependencies': [
+ '../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ ],
+ }],
+ ['OS == \"android\"', {
+ 'sources!': [
+ 'audio/audio_input_volume_unittest.cc',
+ 'base/test_data_util.cc',
+ 'base/test_data_util.h',
+ 'ffmpeg/ffmpeg_common_unittest.cc',
+ 'filters/ffmpeg_audio_decoder_unittest.cc',
+ 'filters/bitstream_converter_unittest.cc',
+ 'filters/chunk_demuxer_unittest.cc',
+ 'filters/ffmpeg_demuxer_unittest.cc',
+ 'filters/ffmpeg_glue_unittest.cc',
+ 'filters/ffmpeg_h264_bitstream_converter_unittest.cc',
+ 'filters/ffmpeg_video_decoder_unittest.cc',
+ 'filters/pipeline_integration_test.cc',
+ 'filters/pipeline_integration_test_base.cc',
+ 'mp4/mp4_stream_parser_unittest.cc',
+ 'webm/webm_cluster_parser_unittest.cc',
+ ],
+ }],
+ ['OS == \"linux\"', {
+ 'conditions': [
+ ['use_cras == 1', {
+ 'sources': [
+ 'audio/linux/cras_output_unittest.cc',
+ ],
+ 'defines': [
+ 'USE_CRAS',
+ ],
+ }],
+ ],
+ }],
+ [ 'target_arch==\"ia32\" or target_arch==\"x64\"', {
+ 'sources': [
+ 'base/simd/convert_rgb_to_yuv_unittest.cc',
+ ],
+ }],
+ ['proprietary_codecs==1 or branding==\"Chrome\"', {
+ 'sources': [
+ 'mp4/avc_unittest.cc',
+ 'mp4/box_reader_unittest.cc',
+ 'mp4/mp4_stream_parser_unittest.cc',
+ 'mp4/offset_byte_queue_unittest.cc',
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'media_test_support',
+ 'type': 'static_library',
+ 'dependencies': [
+ 'media',
+ '../base/base.gyp:base',
+ '../testing/gmock.gyp:gmock',
+ '../testing/gtest.gyp:gtest',
+ ],
+ 'sources': [
+ 'audio/test_audio_input_controller_factory.cc',
+ 'audio/test_audio_input_controller_factory.h',
+ 'base/mock_callback.cc',
+ 'base/mock_callback.h',
+ 'base/mock_data_source_host.cc',
+ 'base/mock_data_source_host.h',
+ 'base/mock_demuxer_host.cc',
+ 'base/mock_demuxer_host.h',
+ 'base/mock_filter_host.cc',
+ 'base/mock_filter_host.h',
+ 'base/mock_filters.cc',
+ 'base/mock_filters.h',
+ ],
+ },
+ {
+ 'target_name': 'scaler_bench',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ 'yuv_convert',
+ '../base/base.gyp:base',
+ '../skia/skia.gyp:skia',
+ ],
+ 'sources': [
+ 'tools/scaler_bench/scaler_bench.cc',
+ ],
+ },
+ {
+ 'target_name': 'qt_faststart',
+ 'type': 'executable',
+ 'sources': [
+ 'tools/qt_faststart/qt_faststart.c'
+ ],
+ },
+ {
+ 'target_name': 'seek_tester',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ '../base/base.gyp:base',
+ ],
+ 'sources': [
+ 'tools/seek_tester/seek_tester.cc',
+ ],
+ },
+ ],
+ 'conditions': [
+ ['OS==\"win\"', {
+ 'targets': [
+ {
+ 'target_name': 'player_wtl',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ 'yuv_convert',
+ '../base/base.gyp:base',
+ '../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
+ '../ui/ui.gyp:ui',
+ ],
+ 'include_dirs': [
+ '<(DEPTH)/third_party/wtl/include',
+ ],
+ 'sources': [
+ 'tools/player_wtl/list.h',
+ 'tools/player_wtl/mainfrm.h',
+ 'tools/player_wtl/movie.cc',
+ 'tools/player_wtl/movie.h',
+ 'tools/player_wtl/player_wtl.cc',
+ 'tools/player_wtl/player_wtl.rc',
+ 'tools/player_wtl/props.h',
+ 'tools/player_wtl/seek.h',
+ 'tools/player_wtl/resource.h',
+ 'tools/player_wtl/view.h',
+ ],
+ 'msvs_settings': {
+ 'VCLinkerTool': {
+ 'SubSystem': '2', # Set /SUBSYSTEM:WINDOWS
+ },
+ },
+ 'defines': [
+ '_CRT_SECURE_NO_WARNINGS=1',
+ ],
+ },
+ ],
+ }],
+ ['OS == \"win\" or toolkit_uses_gtk == 1', {
+ 'targets': [
+ {
+ 'target_name': 'shader_bench',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ 'yuv_convert',
+ '../base/base.gyp:base',
+ '../ui/gl/gl.gyp:gl',
+ ],
+ 'sources': [
+ 'tools/shader_bench/shader_bench.cc',
+ 'tools/shader_bench/cpu_color_painter.cc',
+ 'tools/shader_bench/cpu_color_painter.h',
+ 'tools/shader_bench/gpu_color_painter.cc',
+ 'tools/shader_bench/gpu_color_painter.h',
+ 'tools/shader_bench/gpu_painter.cc',
+ 'tools/shader_bench/gpu_painter.h',
+ 'tools/shader_bench/painter.cc',
+ 'tools/shader_bench/painter.h',
+ 'tools/shader_bench/window.cc',
+ 'tools/shader_bench/window.h',
+ ],
+ 'conditions': [
+ ['toolkit_uses_gtk == 1', {
+ 'dependencies': [
+ '../build/linux/system.gyp:gtk',
+ ],
+ 'sources': [
+ 'tools/shader_bench/window_linux.cc',
+ ],
+ }],
+ ['OS==\"win\"', {
+ 'dependencies': [
+ '../third_party/angle/src/build_angle.gyp:libEGL',
+ '../third_party/angle/src/build_angle.gyp:libGLESv2',
+ ],
+ 'sources': [
+ 'tools/shader_bench/window_win.cc',
+ ],
+ }],
+ ],
+ },
+ ],
+ }],
+ ['OS == \"linux\" and target_arch != \"arm\"', {
+ 'targets': [
+ {
+ 'target_name': 'tile_render_bench',
+ 'type': 'executable',
+ 'dependencies': [
+ '../base/base.gyp:base',
+ '../ui/gl/gl.gyp:gl',
+ ],
+ 'libraries': [
+ '-lGL',
+ '-ldl',
+ ],
+ 'sources': [
+ 'tools/tile_render_bench/tile_render_bench.cc',
+ ],
+ },
+ ],
+ }],
+ ['os_posix == 1 and OS != \"mac\" and OS != \"android\"', {
+ 'targets': [
+ {
+ 'target_name': 'player_x11',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ 'yuv_convert',
+ '../base/base.gyp:base',
+ '../ui/gl/gl.gyp:gl',
+ ],
+ 'link_settings': {
+ 'libraries': [
+ '-ldl',
+ '-lX11',
+ '-lXrender',
+ '-lXext',
+ ],
+ },
+ 'sources': [
+ 'tools/player_x11/data_source_logger.cc',
+ 'tools/player_x11/data_source_logger.h',
+ 'tools/player_x11/gl_video_renderer.cc',
+ 'tools/player_x11/gl_video_renderer.h',
+ 'tools/player_x11/player_x11.cc',
+ 'tools/player_x11/x11_video_renderer.cc',
+ 'tools/player_x11/x11_video_renderer.h',
+ ],
+ },
+ ],
+ }],
+ ['OS == \"android\"', {
+ 'targets': [
+ {
+ 'target_name': 'player_android',
+ 'type': 'static_library',
+ 'sources': [
+ 'base/android/media_player_bridge.cc',
+ 'base/android/media_player_bridge.h',
+ ],
+ 'dependencies': [
+ '../base/base.gyp:base',
+ ],
+ 'include_dirs': [
+ '<(SHARED_INTERMEDIATE_DIR)/media',
+ ],
+ 'actions': [
+ {
+ 'action_name': 'generate-jni-headers',
+ 'inputs': [
+ '../base/android/jni_generator/jni_generator.py',
+ 'base/android/java/src/org/chromium/media/MediaPlayerListener.java',
+ ],
+ 'outputs': [
+ '<(SHARED_INTERMEDIATE_DIR)/media/jni/media_player_listener_jni.h',
+ ],
+ 'action': [
+ 'python',
+ '<(DEPTH)/base/android/jni_generator/jni_generator.py',
+ '-o',
+ '<@(_inputs)',
+ '<@(_outputs)',
+ ],
+ },
+ ],
+ },
+ {
+ 'target_name': 'media_java',
+ 'type': 'none',
+ 'dependencies': [ '../base/base.gyp:base_java' ],
+ 'variables': {
+ 'package_name': 'media',
+ 'java_in_dir': 'base/android/java',
+ },
+ 'includes': [ '../build/java.gypi' ],
+ },
+
+ ],
+ }, { # OS != \"android\"'
+ # Android does not use ffmpeg, so disable the targets which require it.
+ 'targets': [
+ {
+ 'target_name': 'ffmpeg_unittests',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ 'media_test_support',
+ '../base/base.gyp:base',
+ '../base/base.gyp:base_i18n',
+ '../base/base.gyp:test_support_base',
+ '../base/base.gyp:test_support_perf',
+ '../testing/gtest.gyp:gtest',
+ '../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ ],
+ 'sources': [
+ 'ffmpeg/ffmpeg_unittest.cc',
+ ],
+ 'conditions': [
+ ['toolkit_uses_gtk == 1', {
+ 'dependencies': [
+ # Needed for the following #include chain:
+ # base/run_all_unittests.cc
+ # ../base/test_suite.h
+ # gtk/gtk.h
+ '../build/linux/system.gyp:gtk',
+ ],
+ 'conditions': [
+ ['linux_use_tcmalloc==1', {
+ 'dependencies': [
+ '../base/allocator/allocator.gyp:allocator',
+ ],
+ }],
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'ffmpeg_regression_tests',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ 'media_test_support',
+ '../base/base.gyp:test_support_base',
+ '../testing/gmock.gyp:gmock',
+ '../testing/gtest.gyp:gtest',
+ '../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ ],
+ 'sources': [
+ 'base/test_data_util.cc',
+ 'base/run_all_unittests.cc',
+ 'ffmpeg/ffmpeg_regression_tests.cc',
+ 'filters/pipeline_integration_test_base.cc',
+ ],
+ 'conditions': [
+ ['os_posix==1 and OS!=\"mac\"', {
+ 'conditions': [
+ ['linux_use_tcmalloc==1', {
+ 'dependencies': [
+ '../base/allocator/allocator.gyp:allocator',
+ ],
+ }],
+ ],
+ }],
+ ],
+ },
+ {
+ 'target_name': 'ffmpeg_tests',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ '../base/base.gyp:base',
+ '../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ ],
+ 'sources': [
+ 'test/ffmpeg_tests/ffmpeg_tests.cc',
+ ],
+ },
+ {
+ 'target_name': 'media_bench',
+ 'type': 'executable',
+ 'dependencies': [
+ 'media',
+ '../base/base.gyp:base',
+ '../third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
+ ],
+ 'sources': [
+ 'tools/media_bench/media_bench.cc',
+ ],
+ },
+ ],
+ }]
+ ],
+}
+" 0 64 (face font-lock-comment-face) 64 137 (face font-lock-comment-face) 137 166 (face font-lock-comment-face) 166 171 nil 171 172 (face font-lock-string-face) 172 181 (face font-lock-keyword-face) 181 182 (face font-lock-string-face) 182 190 nil 190 191 (face font-lock-string-face) 191 204 (face font-lock-variable-name-face) 204 205 (face font-lock-string-face) 205 214 nil 214 269 (face font-lock-comment-face) 269 273 nil 273 274 (face font-lock-string-face) 274 289 (face font-lock-variable-name-face) 289 290 (face font-lock-string-face) 290 299 nil 299 365 (face font-lock-comment-face) 365 369 nil 369 370 (face font-lock-string-face) 370 379 (face font-lock-variable-name-face) 379 380 (face font-lock-string-face) 380 392 nil 392 393 (face font-lock-string-face) 393 400 (face font-lock-keyword-face) 400 401 (face font-lock-string-face) 401 417 nil 417 418 (face font-lock-string-face) 418 429 (face font-lock-keyword-face) 429 430 (face font-lock-string-face) 430 432 nil 432 433 (face font-lock-string-face) 433 438 (face font-lock-function-name-face) 438 439 (face font-lock-string-face) 439 447 nil 447 448 (face font-lock-string-face) 448 452 (face font-lock-keyword-face) 452 453 (face font-lock-string-face) 453 455 nil 455 458 (face font-lock-string-face) 458 467 (face font-lock-variable-name-face) 467 469 (face font-lock-string-face) 469 477 nil 477 478 (face font-lock-string-face) 478 490 (face font-lock-keyword-face) 490 491 (face font-lock-string-face) 491 503 nil 503 504 (face font-lock-string-face) 504 515 (face font-lock-function-name-face) 515 516 (face font-lock-string-face) 516 526 nil 526 527 (face font-lock-string-face) 527 548 (face font-lock-function-name-face) 548 549 (face font-lock-string-face) 549 559 nil 559 560 (face font-lock-string-face) 560 643 (face font-lock-function-name-face) 643 644 (face font-lock-string-face) 644 654 nil 654 655 (face font-lock-string-face) 655 696 (face font-lock-function-name-face) 696 697 (face font-lock-string-face) 697 707 nil 707 708 (face font-lock-string-face) 708 735 (face font-lock-function-name-face) 735 736 (face font-lock-string-face) 736 746 nil 746 747 (face font-lock-string-face) 747 784 (face font-lock-function-name-face) 784 785 (face font-lock-string-face) 785 795 nil 795 796 (face font-lock-string-face) 796 811 (face font-lock-function-name-face) 811 812 (face font-lock-string-face) 812 829 nil 829 830 (face font-lock-string-face) 830 837 (face font-lock-keyword-face) 837 838 (face font-lock-string-face) 838 850 nil 850 851 (face font-lock-string-face) 851 871 (face font-lock-preprocessor-face) 871 872 (face font-lock-string-face) 872 889 nil 889 890 (face font-lock-string-face) 890 902 (face font-lock-keyword-face) 902 903 (face font-lock-string-face) 903 915 nil 915 916 (face font-lock-string-face) 916 918 (face font-lock-constant-face) 918 919 (face font-lock-string-face) 919 936 nil 936 937 (face font-lock-string-face) 937 944 (face font-lock-keyword-face) 944 945 (face font-lock-string-face) 945 957 nil 957 958 (face font-lock-string-face) 958 996 (face font-lock-constant-face) 996 997 (face font-lock-string-face) 997 1007 nil 1007 1008 (face font-lock-string-face) 1008 1045 (face font-lock-constant-face) 1045 1046 (face font-lock-string-face) 1046 1056 nil 1056 1057 (face font-lock-string-face) 1057 1100 (face font-lock-constant-face) 1100 1101 (face font-lock-string-face) 1101 1111 nil 1111 1112 (face font-lock-string-face) 1112 1154 (face font-lock-constant-face) 1154 1155 (face font-lock-string-face) 1155 1165 nil 1165 1166 (face font-lock-string-face) 1166 1197 (face font-lock-constant-face) 1197 1198 (face font-lock-string-face) 1198 1208 nil 1208 1209 (face font-lock-string-face) 1209 1239 (face font-lock-constant-face) 1239 1240 (face font-lock-string-face) 1240 1250 nil 1250 1251 (face font-lock-string-face) 1251 1283 (face font-lock-constant-face) 1283 1284 (face font-lock-string-face) 1284 1294 nil 1294 1295 (face font-lock-string-face) 1295 1326 (face font-lock-constant-face) 1326 1327 (face font-lock-string-face) 1327 1337 nil 1337 1338 (face font-lock-string-face) 1338 1369 (face font-lock-constant-face) 1369 1370 (face font-lock-string-face) 1370 1380 nil 1380 1381 (face font-lock-string-face) 1381 1419 (face font-lock-constant-face) 1419 1420 (face font-lock-string-face) 1420 1430 nil 1430 1431 (face font-lock-string-face) 1431 1467 (face font-lock-constant-face) 1467 1468 (face font-lock-string-face) 1468 1478 nil 1478 1479 (face font-lock-string-face) 1479 1507 (face font-lock-constant-face) 1507 1508 (face font-lock-string-face) 1508 1518 nil 1518 1519 (face font-lock-string-face) 1519 1546 (face font-lock-constant-face) 1546 1547 (face font-lock-string-face) 1547 1557 nil 1557 1558 (face font-lock-string-face) 1558 1574 (face font-lock-constant-face) 1574 1575 (face font-lock-string-face) 1575 1585 nil 1585 1586 (face font-lock-string-face) 1586 1617 (face font-lock-constant-face) 1617 1618 (face font-lock-string-face) 1618 1628 nil 1628 1629 (face font-lock-string-face) 1629 1659 (face font-lock-constant-face) 1659 1660 (face font-lock-string-face) 1660 1670 nil 1670 1671 (face font-lock-string-face) 1671 1703 (face font-lock-constant-face) 1703 1704 (face font-lock-string-face) 1704 1714 nil 1714 1715 (face font-lock-string-face) 1715 1746 (face font-lock-constant-face) 1746 1747 (face font-lock-string-face) 1747 1757 nil 1757 1758 (face font-lock-string-face) 1758 1784 (face font-lock-constant-face) 1784 1785 (face font-lock-string-face) 1785 1795 nil 1795 1796 (face font-lock-string-face) 1796 1821 (face font-lock-constant-face) 1821 1822 (face font-lock-string-face) 1822 1832 nil 1832 1833 (face font-lock-string-face) 1833 1855 (face font-lock-constant-face) 1855 1856 (face font-lock-string-face) 1856 1866 nil 1866 1867 (face font-lock-string-face) 1867 1888 (face font-lock-constant-face) 1888 1889 (face font-lock-string-face) 1889 1899 nil 1899 1900 (face font-lock-string-face) 1900 1927 (face font-lock-constant-face) 1927 1928 (face font-lock-string-face) 1928 1938 nil 1938 1939 (face font-lock-string-face) 1939 1965 (face font-lock-constant-face) 1965 1966 (face font-lock-string-face) 1966 1976 nil 1976 1977 (face font-lock-string-face) 1977 2009 (face font-lock-constant-face) 2009 2010 (face font-lock-string-face) 2010 2020 nil 2020 2021 (face font-lock-string-face) 2021 2052 (face font-lock-constant-face) 2052 2053 (face font-lock-string-face) 2053 2063 nil 2063 2064 (face font-lock-string-face) 2064 2096 (face font-lock-constant-face) 2096 2097 (face font-lock-string-face) 2097 2107 nil 2107 2108 (face font-lock-string-face) 2108 2139 (face font-lock-constant-face) 2139 2140 (face font-lock-string-face) 2140 2150 nil 2150 2151 (face font-lock-string-face) 2151 2188 (face font-lock-constant-face) 2188 2189 (face font-lock-string-face) 2189 2199 nil 2199 2200 (face font-lock-string-face) 2200 2236 (face font-lock-constant-face) 2236 2237 (face font-lock-string-face) 2237 2247 nil 2247 2248 (face font-lock-string-face) 2248 2275 (face font-lock-constant-face) 2275 2276 (face font-lock-string-face) 2276 2286 nil 2286 2287 (face font-lock-string-face) 2287 2313 (face font-lock-constant-face) 2313 2314 (face font-lock-string-face) 2314 2324 nil 2324 2325 (face font-lock-string-face) 2325 2352 (face font-lock-constant-face) 2352 2353 (face font-lock-string-face) 2353 2363 nil 2363 2364 (face font-lock-string-face) 2364 2390 (face font-lock-constant-face) 2390 2391 (face font-lock-string-face) 2391 2401 nil 2401 2402 (face font-lock-string-face) 2402 2427 (face font-lock-constant-face) 2427 2428 (face font-lock-string-face) 2428 2438 nil 2438 2439 (face font-lock-string-face) 2439 2463 (face font-lock-constant-face) 2463 2464 (face font-lock-string-face) 2464 2474 nil 2474 2475 (face font-lock-string-face) 2475 2494 (face font-lock-constant-face) 2494 2495 (face font-lock-string-face) 2495 2505 nil 2505 2506 (face font-lock-string-face) 2506 2524 (face font-lock-constant-face) 2524 2525 (face font-lock-string-face) 2525 2535 nil 2535 2536 (face font-lock-string-face) 2536 2571 (face font-lock-constant-face) 2571 2572 (face font-lock-string-face) 2572 2582 nil 2582 2583 (face font-lock-string-face) 2583 2617 (face font-lock-constant-face) 2617 2618 (face font-lock-string-face) 2618 2628 nil 2628 2629 (face font-lock-string-face) 2629 2668 (face font-lock-constant-face) 2668 2669 (face font-lock-string-face) 2669 2679 nil 2679 2680 (face font-lock-string-face) 2680 2721 (face font-lock-constant-face) 2721 2722 (face font-lock-string-face) 2722 2732 nil 2732 2733 (face font-lock-string-face) 2733 2765 (face font-lock-constant-face) 2765 2766 (face font-lock-string-face) 2766 2776 nil 2776 2777 (face font-lock-string-face) 2777 2808 (face font-lock-constant-face) 2808 2809 (face font-lock-string-face) 2809 2819 nil 2819 2820 (face font-lock-string-face) 2820 2853 (face font-lock-constant-face) 2853 2854 (face font-lock-string-face) 2854 2864 nil 2864 2865 (face font-lock-string-face) 2865 2897 (face font-lock-constant-face) 2897 2898 (face font-lock-string-face) 2898 2908 nil 2908 2909 (face font-lock-string-face) 2909 2943 (face font-lock-constant-face) 2943 2944 (face font-lock-string-face) 2944 2954 nil 2954 2955 (face font-lock-string-face) 2955 2988 (face font-lock-constant-face) 2988 2989 (face font-lock-string-face) 2989 2999 nil 2999 3000 (face font-lock-string-face) 3000 3025 (face font-lock-constant-face) 3025 3026 (face font-lock-string-face) 3026 3036 nil 3036 3037 (face font-lock-string-face) 3037 3061 (face font-lock-constant-face) 3061 3062 (face font-lock-string-face) 3062 3072 nil 3072 3073 (face font-lock-string-face) 3073 3099 (face font-lock-constant-face) 3099 3100 (face font-lock-string-face) 3100 3110 nil 3110 3111 (face font-lock-string-face) 3111 3136 (face font-lock-constant-face) 3136 3137 (face font-lock-string-face) 3137 3147 nil 3147 3148 (face font-lock-string-face) 3148 3172 (face font-lock-constant-face) 3172 3173 (face font-lock-string-face) 3173 3183 nil 3183 3184 (face font-lock-string-face) 3184 3207 (face font-lock-constant-face) 3207 3208 (face font-lock-string-face) 3208 3218 nil 3218 3219 (face font-lock-string-face) 3219 3246 (face font-lock-constant-face) 3246 3247 (face font-lock-string-face) 3247 3257 nil 3257 3258 (face font-lock-string-face) 3258 3284 (face font-lock-constant-face) 3284 3285 (face font-lock-string-face) 3285 3295 nil 3295 3296 (face font-lock-string-face) 3296 3322 (face font-lock-constant-face) 3322 3323 (face font-lock-string-face) 3323 3333 nil 3333 3334 (face font-lock-string-face) 3334 3359 (face font-lock-constant-face) 3359 3360 (face font-lock-string-face) 3360 3370 nil 3370 3371 (face font-lock-string-face) 3371 3409 (face font-lock-constant-face) 3409 3410 (face font-lock-string-face) 3410 3420 nil 3420 3421 (face font-lock-string-face) 3421 3458 (face font-lock-constant-face) 3458 3459 (face font-lock-string-face) 3459 3469 nil 3469 3470 (face font-lock-string-face) 3470 3498 (face font-lock-constant-face) 3498 3499 (face font-lock-string-face) 3499 3509 nil 3509 3510 (face font-lock-string-face) 3510 3537 (face font-lock-constant-face) 3537 3538 (face font-lock-string-face) 3538 3548 nil 3548 3549 (face font-lock-string-face) 3549 3589 (face font-lock-constant-face) 3589 3590 (face font-lock-string-face) 3590 3600 nil 3600 3601 (face font-lock-string-face) 3601 3640 (face font-lock-constant-face) 3640 3641 (face font-lock-string-face) 3641 3651 nil 3651 3652 (face font-lock-string-face) 3652 3693 (face font-lock-constant-face) 3693 3694 (face font-lock-string-face) 3694 3704 nil 3704 3705 (face font-lock-string-face) 3705 3745 (face font-lock-constant-face) 3745 3746 (face font-lock-string-face) 3746 3756 nil 3756 3757 (face font-lock-string-face) 3757 3787 (face font-lock-constant-face) 3787 3788 (face font-lock-string-face) 3788 3798 nil 3798 3799 (face font-lock-string-face) 3799 3828 (face font-lock-constant-face) 3828 3829 (face font-lock-string-face) 3829 3839 nil 3839 3840 (face font-lock-string-face) 3840 3869 (face font-lock-constant-face) 3869 3870 (face font-lock-string-face) 3870 3880 nil 3880 3881 (face font-lock-string-face) 3881 3909 (face font-lock-constant-face) 3909 3910 (face font-lock-string-face) 3910 3920 nil 3920 3921 (face font-lock-string-face) 3921 3945 (face font-lock-constant-face) 3945 3946 (face font-lock-string-face) 3946 3956 nil 3956 3957 (face font-lock-string-face) 3957 3980 (face font-lock-constant-face) 3980 3981 (face font-lock-string-face) 3981 3991 nil 3991 3992 (face font-lock-string-face) 3992 4019 (face font-lock-constant-face) 4019 4020 (face font-lock-string-face) 4020 4030 nil 4030 4031 (face font-lock-string-face) 4031 4057 (face font-lock-constant-face) 4057 4058 (face font-lock-string-face) 4058 4068 nil 4068 4069 (face font-lock-string-face) 4069 4090 (face font-lock-constant-face) 4090 4091 (face font-lock-string-face) 4091 4101 nil 4101 4102 (face font-lock-string-face) 4102 4122 (face font-lock-constant-face) 4122 4123 (face font-lock-string-face) 4123 4133 nil 4133 4134 (face font-lock-string-face) 4134 4157 (face font-lock-constant-face) 4157 4158 (face font-lock-string-face) 4158 4168 nil 4168 4169 (face font-lock-string-face) 4169 4191 (face font-lock-constant-face) 4191 4192 (face font-lock-string-face) 4192 4202 nil 4202 4203 (face font-lock-string-face) 4203 4243 (face font-lock-constant-face) 4243 4244 (face font-lock-string-face) 4244 4254 nil 4254 4255 (face font-lock-string-face) 4255 4294 (face font-lock-constant-face) 4294 4295 (face font-lock-string-face) 4295 4305 nil 4305 4306 (face font-lock-string-face) 4306 4347 (face font-lock-constant-face) 4347 4348 (face font-lock-string-face) 4348 4358 nil 4358 4359 (face font-lock-string-face) 4359 4399 (face font-lock-constant-face) 4399 4400 (face font-lock-string-face) 4400 4410 nil 4410 4411 (face font-lock-string-face) 4411 4441 (face font-lock-constant-face) 4441 4442 (face font-lock-string-face) 4442 4452 nil 4452 4453 (face font-lock-string-face) 4453 4482 (face font-lock-constant-face) 4482 4483 (face font-lock-string-face) 4483 4493 nil 4493 4494 (face font-lock-string-face) 4494 4523 (face font-lock-constant-face) 4523 4524 (face font-lock-string-face) 4524 4534 nil 4534 4535 (face font-lock-string-face) 4535 4563 (face font-lock-constant-face) 4563 4564 (face font-lock-string-face) 4564 4574 nil 4574 4575 (face font-lock-string-face) 4575 4610 (face font-lock-constant-face) 4610 4611 (face font-lock-string-face) 4611 4621 nil 4621 4622 (face font-lock-string-face) 4622 4656 (face font-lock-constant-face) 4656 4657 (face font-lock-string-face) 4657 4667 nil 4667 4668 (face font-lock-string-face) 4668 4697 (face font-lock-constant-face) 4697 4698 (face font-lock-string-face) 4698 4708 nil 4708 4709 (face font-lock-string-face) 4709 4737 (face font-lock-constant-face) 4737 4738 (face font-lock-string-face) 4738 4748 nil 4748 4749 (face font-lock-string-face) 4749 4780 (face font-lock-constant-face) 4780 4781 (face font-lock-string-face) 4781 4791 nil 4791 4792 (face font-lock-string-face) 4792 4822 (face font-lock-constant-face) 4822 4823 (face font-lock-string-face) 4823 4833 nil 4833 4834 (face font-lock-string-face) 4834 4869 (face font-lock-constant-face) 4869 4870 (face font-lock-string-face) 4870 4880 nil 4880 4881 (face font-lock-string-face) 4881 4915 (face font-lock-constant-face) 4915 4916 (face font-lock-string-face) 4916 4926 nil 4926 4927 (face font-lock-string-face) 4927 4948 (face font-lock-constant-face) 4948 4949 (face font-lock-string-face) 4949 4959 nil 4959 4960 (face font-lock-string-face) 4960 4980 (face font-lock-constant-face) 4980 4981 (face font-lock-string-face) 4981 4991 nil 4991 4992 (face font-lock-string-face) 4992 5020 (face font-lock-constant-face) 5020 5021 (face font-lock-string-face) 5021 5031 nil 5031 5032 (face font-lock-string-face) 5032 5059 (face font-lock-constant-face) 5059 5060 (face font-lock-string-face) 5060 5070 nil 5070 5071 (face font-lock-string-face) 5071 5092 (face font-lock-constant-face) 5092 5093 (face font-lock-string-face) 5093 5103 nil 5103 5104 (face font-lock-string-face) 5104 5132 (face font-lock-constant-face) 5132 5133 (face font-lock-string-face) 5133 5143 nil 5143 5144 (face font-lock-string-face) 5144 5171 (face font-lock-constant-face) 5171 5172 (face font-lock-string-face) 5172 5182 nil 5182 5183 (face font-lock-string-face) 5183 5217 (face font-lock-constant-face) 5217 5218 (face font-lock-string-face) 5218 5228 nil 5228 5229 (face font-lock-string-face) 5229 5262 (face font-lock-constant-face) 5262 5263 (face font-lock-string-face) 5263 5273 nil 5273 5274 (face font-lock-string-face) 5274 5297 (face font-lock-constant-face) 5297 5298 (face font-lock-string-face) 5298 5308 nil 5308 5309 (face font-lock-string-face) 5309 5324 (face font-lock-constant-face) 5324 5325 (face font-lock-string-face) 5325 5335 nil 5335 5336 (face font-lock-string-face) 5336 5350 (face font-lock-constant-face) 5350 5351 (face font-lock-string-face) 5351 5361 nil 5361 5362 (face font-lock-string-face) 5362 5380 (face font-lock-constant-face) 5380 5381 (face font-lock-string-face) 5381 5391 nil 5391 5392 (face font-lock-string-face) 5392 5409 (face font-lock-constant-face) 5409 5410 (face font-lock-string-face) 5410 5420 nil 5420 5421 (face font-lock-string-face) 5421 5443 (face font-lock-constant-face) 5443 5444 (face font-lock-string-face) 5444 5454 nil 5454 5455 (face font-lock-string-face) 5455 5476 (face font-lock-constant-face) 5476 5477 (face font-lock-string-face) 5477 5487 nil 5487 5488 (face font-lock-string-face) 5488 5501 (face font-lock-constant-face) 5501 5502 (face font-lock-string-face) 5502 5512 nil 5512 5513 (face font-lock-string-face) 5513 5525 (face font-lock-constant-face) 5525 5526 (face font-lock-string-face) 5526 5536 nil 5536 5537 (face font-lock-string-face) 5537 5561 (face font-lock-constant-face) 5561 5562 (face font-lock-string-face) 5562 5572 nil 5572 5573 (face font-lock-string-face) 5573 5596 (face font-lock-constant-face) 5596 5597 (face font-lock-string-face) 5597 5607 nil 5607 5608 (face font-lock-string-face) 5608 5627 (face font-lock-constant-face) 5627 5628 (face font-lock-string-face) 5628 5638 nil 5638 5639 (face font-lock-string-face) 5639 5657 (face font-lock-constant-face) 5657 5658 (face font-lock-string-face) 5658 5668 nil 5668 5669 (face font-lock-string-face) 5669 5688 (face font-lock-constant-face) 5688 5689 (face font-lock-string-face) 5689 5699 nil 5699 5700 (face font-lock-string-face) 5700 5718 (face font-lock-constant-face) 5718 5719 (face font-lock-string-face) 5719 5729 nil 5729 5730 (face font-lock-string-face) 5730 5752 (face font-lock-constant-face) 5752 5753 (face font-lock-string-face) 5753 5763 nil 5763 5764 (face font-lock-string-face) 5764 5785 (face font-lock-constant-face) 5785 5786 (face font-lock-string-face) 5786 5796 nil 5796 5797 (face font-lock-string-face) 5797 5819 (face font-lock-constant-face) 5819 5820 (face font-lock-string-face) 5820 5830 nil 5830 5831 (face font-lock-string-face) 5831 5852 (face font-lock-constant-face) 5852 5853 (face font-lock-string-face) 5853 5863 nil 5863 5864 (face font-lock-string-face) 5864 5880 (face font-lock-constant-face) 5880 5881 (face font-lock-string-face) 5881 5891 nil 5891 5892 (face font-lock-string-face) 5892 5915 (face font-lock-constant-face) 5915 5916 (face font-lock-string-face) 5916 5926 nil 5926 5927 (face font-lock-string-face) 5927 5942 (face font-lock-constant-face) 5942 5943 (face font-lock-string-face) 5943 5953 nil 5953 5954 (face font-lock-string-face) 5954 5968 (face font-lock-constant-face) 5968 5969 (face font-lock-string-face) 5969 5979 nil 5979 5980 (face font-lock-string-face) 5980 6002 (face font-lock-constant-face) 6002 6003 (face font-lock-string-face) 6003 6013 nil 6013 6014 (face font-lock-string-face) 6014 6035 (face font-lock-constant-face) 6035 6036 (face font-lock-string-face) 6036 6046 nil 6046 6047 (face font-lock-string-face) 6047 6059 (face font-lock-constant-face) 6059 6060 (face font-lock-string-face) 6060 6070 nil 6070 6071 (face font-lock-string-face) 6071 6082 (face font-lock-constant-face) 6082 6083 (face font-lock-string-face) 6083 6093 nil 6093 6094 (face font-lock-string-face) 6094 6119 (face font-lock-constant-face) 6119 6120 (face font-lock-string-face) 6120 6130 nil 6130 6131 (face font-lock-string-face) 6131 6155 (face font-lock-constant-face) 6155 6156 (face font-lock-string-face) 6156 6166 nil 6166 6167 (face font-lock-string-face) 6167 6185 (face font-lock-constant-face) 6185 6186 (face font-lock-string-face) 6186 6196 nil 6196 6197 (face font-lock-string-face) 6197 6212 (face font-lock-constant-face) 6212 6213 (face font-lock-string-face) 6213 6223 nil 6223 6224 (face font-lock-string-face) 6224 6238 (face font-lock-constant-face) 6238 6239 (face font-lock-string-face) 6239 6249 nil 6249 6250 (face font-lock-string-face) 6250 6282 (face font-lock-constant-face) 6282 6283 (face font-lock-string-face) 6283 6293 nil 6293 6294 (face font-lock-string-face) 6294 6325 (face font-lock-constant-face) 6325 6326 (face font-lock-string-face) 6326 6336 nil 6336 6337 (face font-lock-string-face) 6337 6349 (face font-lock-constant-face) 6349 6350 (face font-lock-string-face) 6350 6360 nil 6360 6361 (face font-lock-string-face) 6361 6382 (face font-lock-constant-face) 6382 6383 (face font-lock-string-face) 6383 6393 nil 6393 6394 (face font-lock-string-face) 6394 6413 (face font-lock-constant-face) 6413 6414 (face font-lock-string-face) 6414 6424 nil 6424 6425 (face font-lock-string-face) 6425 6442 (face font-lock-constant-face) 6442 6443 (face font-lock-string-face) 6443 6453 nil 6453 6454 (face font-lock-string-face) 6454 6470 (face font-lock-constant-face) 6470 6471 (face font-lock-string-face) 6471 6481 nil 6481 6482 (face font-lock-string-face) 6482 6504 (face font-lock-constant-face) 6504 6505 (face font-lock-string-face) 6505 6515 nil 6515 6516 (face font-lock-string-face) 6516 6535 (face font-lock-constant-face) 6535 6536 (face font-lock-string-face) 6536 6546 nil 6546 6547 (face font-lock-string-face) 6547 6569 (face font-lock-constant-face) 6569 6570 (face font-lock-string-face) 6570 6580 nil 6580 6581 (face font-lock-string-face) 6581 6602 (face font-lock-constant-face) 6602 6603 (face font-lock-string-face) 6603 6613 nil 6613 6614 (face font-lock-string-face) 6614 6631 (face font-lock-constant-face) 6631 6632 (face font-lock-string-face) 6632 6642 nil 6642 6643 (face font-lock-string-face) 6643 6671 (face font-lock-constant-face) 6671 6672 (face font-lock-string-face) 6672 6682 nil 6682 6683 (face font-lock-string-face) 6683 6710 (face font-lock-constant-face) 6710 6711 (face font-lock-string-face) 6711 6721 nil 6721 6722 (face font-lock-string-face) 6722 6738 (face font-lock-constant-face) 6738 6739 (face font-lock-string-face) 6739 6749 nil 6749 6750 (face font-lock-string-face) 6750 6765 (face font-lock-constant-face) 6765 6766 (face font-lock-string-face) 6766 6776 nil 6776 6777 (face font-lock-string-face) 6777 6800 (face font-lock-constant-face) 6800 6801 (face font-lock-string-face) 6801 6811 nil 6811 6812 (face font-lock-string-face) 6812 6834 (face font-lock-constant-face) 6834 6835 (face font-lock-string-face) 6835 6845 nil 6845 6846 (face font-lock-string-face) 6846 6860 (face font-lock-constant-face) 6860 6861 (face font-lock-string-face) 6861 6871 nil 6871 6872 (face font-lock-string-face) 6872 6885 (face font-lock-constant-face) 6885 6886 (face font-lock-string-face) 6886 6896 nil 6896 6897 (face font-lock-string-face) 6897 6920 (face font-lock-constant-face) 6920 6921 (face font-lock-string-face) 6921 6931 nil 6931 6932 (face font-lock-string-face) 6932 6954 (face font-lock-constant-face) 6954 6955 (face font-lock-string-face) 6955 6965 nil 6965 6966 (face font-lock-string-face) 6966 6986 (face font-lock-constant-face) 6986 6987 (face font-lock-string-face) 6987 6997 nil 6997 6998 (face font-lock-string-face) 6998 7017 (face font-lock-constant-face) 7017 7018 (face font-lock-string-face) 7018 7028 nil 7028 7029 (face font-lock-string-face) 7029 7050 (face font-lock-constant-face) 7050 7051 (face font-lock-string-face) 7051 7061 nil 7061 7062 (face font-lock-string-face) 7062 7082 (face font-lock-constant-face) 7082 7083 (face font-lock-string-face) 7083 7093 nil 7093 7094 (face font-lock-string-face) 7094 7122 (face font-lock-constant-face) 7122 7123 (face font-lock-string-face) 7123 7133 nil 7133 7134 (face font-lock-string-face) 7134 7161 (face font-lock-constant-face) 7161 7162 (face font-lock-string-face) 7162 7172 nil 7172 7173 (face font-lock-string-face) 7173 7194 (face font-lock-constant-face) 7194 7195 (face font-lock-string-face) 7195 7205 nil 7205 7206 (face font-lock-string-face) 7206 7226 (face font-lock-constant-face) 7226 7227 (face font-lock-string-face) 7227 7237 nil 7237 7238 (face font-lock-string-face) 7238 7266 (face font-lock-constant-face) 7266 7267 (face font-lock-string-face) 7267 7277 nil 7277 7278 (face font-lock-string-face) 7278 7305 (face font-lock-constant-face) 7305 7306 (face font-lock-string-face) 7306 7316 nil 7316 7317 (face font-lock-string-face) 7317 7336 (face font-lock-constant-face) 7336 7337 (face font-lock-string-face) 7337 7347 nil 7347 7348 (face font-lock-string-face) 7348 7366 (face font-lock-constant-face) 7366 7367 (face font-lock-string-face) 7367 7377 nil 7377 7378 (face font-lock-string-face) 7378 7399 (face font-lock-constant-face) 7399 7400 (face font-lock-string-face) 7400 7410 nil 7410 7411 (face font-lock-string-face) 7411 7429 (face font-lock-constant-face) 7429 7430 (face font-lock-string-face) 7430 7440 nil 7440 7441 (face font-lock-string-face) 7441 7458 (face font-lock-constant-face) 7458 7459 (face font-lock-string-face) 7459 7469 nil 7469 7470 (face font-lock-string-face) 7470 7493 (face font-lock-constant-face) 7493 7494 (face font-lock-string-face) 7494 7504 nil 7504 7505 (face font-lock-string-face) 7505 7527 (face font-lock-constant-face) 7527 7528 (face font-lock-string-face) 7528 7538 nil 7538 7539 (face font-lock-string-face) 7539 7562 (face font-lock-constant-face) 7562 7563 (face font-lock-string-face) 7563 7573 nil 7573 7574 (face font-lock-string-face) 7574 7596 (face font-lock-constant-face) 7596 7597 (face font-lock-string-face) 7597 7607 nil 7607 7608 (face font-lock-string-face) 7608 7631 (face font-lock-constant-face) 7631 7632 (face font-lock-string-face) 7632 7642 nil 7642 7643 (face font-lock-string-face) 7643 7665 (face font-lock-constant-face) 7665 7666 (face font-lock-string-face) 7666 7676 nil 7676 7677 (face font-lock-string-face) 7677 7705 (face font-lock-constant-face) 7705 7706 (face font-lock-string-face) 7706 7716 nil 7716 7717 (face font-lock-string-face) 7717 7744 (face font-lock-constant-face) 7744 7745 (face font-lock-string-face) 7745 7755 nil 7755 7756 (face font-lock-string-face) 7756 7791 (face font-lock-constant-face) 7791 7792 (face font-lock-string-face) 7792 7802 nil 7802 7803 (face font-lock-string-face) 7803 7837 (face font-lock-constant-face) 7837 7838 (face font-lock-string-face) 7838 7848 nil 7848 7849 (face font-lock-string-face) 7849 7879 (face font-lock-constant-face) 7879 7880 (face font-lock-string-face) 7880 7890 nil 7890 7891 (face font-lock-string-face) 7891 7920 (face font-lock-constant-face) 7920 7921 (face font-lock-string-face) 7921 7931 nil 7931 7932 (face font-lock-string-face) 7932 7962 (face font-lock-constant-face) 7962 7963 (face font-lock-string-face) 7963 7973 nil 7973 7974 (face font-lock-string-face) 7974 8003 (face font-lock-constant-face) 8003 8004 (face font-lock-string-face) 8004 8014 nil 8014 8015 (face font-lock-string-face) 8015 8039 (face font-lock-constant-face) 8039 8040 (face font-lock-string-face) 8040 8050 nil 8050 8051 (face font-lock-string-face) 8051 8074 (face font-lock-constant-face) 8074 8075 (face font-lock-string-face) 8075 8085 nil 8085 8086 (face font-lock-string-face) 8086 8116 (face font-lock-constant-face) 8116 8117 (face font-lock-string-face) 8117 8127 nil 8127 8128 (face font-lock-string-face) 8128 8152 (face font-lock-constant-face) 8152 8153 (face font-lock-string-face) 8153 8163 nil 8163 8164 (face font-lock-string-face) 8164 8187 (face font-lock-constant-face) 8187 8188 (face font-lock-string-face) 8188 8198 nil 8198 8199 (face font-lock-string-face) 8199 8230 (face font-lock-constant-face) 8230 8231 (face font-lock-string-face) 8231 8241 nil 8241 8242 (face font-lock-string-face) 8242 8272 (face font-lock-constant-face) 8272 8273 (face font-lock-string-face) 8273 8283 nil 8283 8284 (face font-lock-string-face) 8284 8309 (face font-lock-constant-face) 8309 8310 (face font-lock-string-face) 8310 8320 nil 8320 8321 (face font-lock-string-face) 8321 8345 (face font-lock-constant-face) 8345 8346 (face font-lock-string-face) 8346 8356 nil 8356 8357 (face font-lock-string-face) 8357 8399 (face font-lock-constant-face) 8399 8400 (face font-lock-string-face) 8400 8410 nil 8410 8411 (face font-lock-string-face) 8411 8452 (face font-lock-constant-face) 8452 8453 (face font-lock-string-face) 8453 8463 nil 8463 8464 (face font-lock-string-face) 8464 8486 (face font-lock-constant-face) 8486 8487 (face font-lock-string-face) 8487 8497 nil 8497 8498 (face font-lock-string-face) 8498 8519 (face font-lock-constant-face) 8519 8520 (face font-lock-string-face) 8520 8530 nil 8530 8531 (face font-lock-string-face) 8531 8562 (face font-lock-constant-face) 8562 8563 (face font-lock-string-face) 8563 8573 nil 8573 8574 (face font-lock-string-face) 8574 8604 (face font-lock-constant-face) 8604 8605 (face font-lock-string-face) 8605 8615 nil 8615 8616 (face font-lock-string-face) 8616 8643 (face font-lock-constant-face) 8643 8644 (face font-lock-string-face) 8644 8654 nil 8654 8655 (face font-lock-string-face) 8655 8681 (face font-lock-constant-face) 8681 8682 (face font-lock-string-face) 8682 8692 nil 8692 8693 (face font-lock-string-face) 8693 8721 (face font-lock-constant-face) 8721 8722 (face font-lock-string-face) 8722 8732 nil 8732 8733 (face font-lock-string-face) 8733 8760 (face font-lock-constant-face) 8760 8761 (face font-lock-string-face) 8761 8771 nil 8771 8772 (face font-lock-string-face) 8772 8805 (face font-lock-constant-face) 8805 8806 (face font-lock-string-face) 8806 8816 nil 8816 8817 (face font-lock-string-face) 8817 8849 (face font-lock-constant-face) 8849 8850 (face font-lock-string-face) 8850 8860 nil 8860 8861 (face font-lock-string-face) 8861 8892 (face font-lock-constant-face) 8892 8893 (face font-lock-string-face) 8893 8903 nil 8903 8904 (face font-lock-string-face) 8904 8934 (face font-lock-constant-face) 8934 8935 (face font-lock-string-face) 8935 8945 nil 8945 8946 (face font-lock-string-face) 8946 8978 (face font-lock-constant-face) 8978 8979 (face font-lock-string-face) 8979 8989 nil 8989 8990 (face font-lock-string-face) 8990 9021 (face font-lock-constant-face) 9021 9022 (face font-lock-string-face) 9022 9032 nil 9032 9033 (face font-lock-string-face) 9033 9063 (face font-lock-constant-face) 9063 9064 (face font-lock-string-face) 9064 9074 nil 9074 9075 (face font-lock-string-face) 9075 9104 (face font-lock-constant-face) 9104 9105 (face font-lock-string-face) 9105 9115 nil 9115 9116 (face font-lock-string-face) 9116 9158 (face font-lock-constant-face) 9158 9159 (face font-lock-string-face) 9159 9169 nil 9169 9170 (face font-lock-string-face) 9170 9211 (face font-lock-constant-face) 9211 9212 (face font-lock-string-face) 9212 9222 nil 9222 9223 (face font-lock-string-face) 9223 9272 (face font-lock-constant-face) 9272 9273 (face font-lock-string-face) 9273 9283 nil 9283 9284 (face font-lock-string-face) 9284 9332 (face font-lock-constant-face) 9332 9333 (face font-lock-string-face) 9333 9343 nil 9343 9344 (face font-lock-string-face) 9344 9388 (face font-lock-constant-face) 9388 9389 (face font-lock-string-face) 9389 9399 nil 9399 9400 (face font-lock-string-face) 9400 9445 (face font-lock-constant-face) 9445 9446 (face font-lock-string-face) 9446 9456 nil 9456 9457 (face font-lock-string-face) 9457 9507 (face font-lock-constant-face) 9507 9508 (face font-lock-string-face) 9508 9518 nil 9518 9519 (face font-lock-string-face) 9519 9570 (face font-lock-constant-face) 9570 9571 (face font-lock-string-face) 9571 9581 nil 9581 9582 (face font-lock-string-face) 9582 9611 (face font-lock-constant-face) 9611 9612 (face font-lock-string-face) 9612 9622 nil 9622 9623 (face font-lock-string-face) 9623 9659 (face font-lock-constant-face) 9659 9660 (face font-lock-string-face) 9660 9670 nil 9670 9671 (face font-lock-string-face) 9671 9714 (face font-lock-constant-face) 9714 9715 (face font-lock-string-face) 9715 9725 nil 9725 9726 (face font-lock-string-face) 9726 9768 (face font-lock-constant-face) 9768 9769 (face font-lock-string-face) 9769 9779 nil 9779 9780 (face font-lock-string-face) 9780 9816 (face font-lock-constant-face) 9816 9817 (face font-lock-string-face) 9817 9827 nil 9827 9828 (face font-lock-string-face) 9828 9863 (face font-lock-constant-face) 9863 9864 (face font-lock-string-face) 9864 9874 nil 9874 9875 (face font-lock-string-face) 9875 9910 (face font-lock-constant-face) 9910 9911 (face font-lock-string-face) 9911 9921 nil 9921 9922 (face font-lock-string-face) 9922 9958 (face font-lock-constant-face) 9958 9959 (face font-lock-string-face) 9959 9969 nil 9969 9970 (face font-lock-string-face) 9970 10005 (face font-lock-constant-face) 10005 10006 (face font-lock-string-face) 10006 10016 nil 10016 10017 (face font-lock-string-face) 10017 10050 (face font-lock-constant-face) 10050 10051 (face font-lock-string-face) 10051 10061 nil 10061 10062 (face font-lock-string-face) 10062 10094 (face font-lock-constant-face) 10094 10095 (face font-lock-string-face) 10095 10105 nil 10105 10106 (face font-lock-string-face) 10106 10150 (face font-lock-constant-face) 10150 10151 (face font-lock-string-face) 10151 10161 nil 10161 10162 (face font-lock-string-face) 10162 10198 (face font-lock-constant-face) 10198 10199 (face font-lock-string-face) 10199 10209 nil 10209 10210 (face font-lock-string-face) 10210 10245 (face font-lock-constant-face) 10245 10246 (face font-lock-string-face) 10246 10256 nil 10256 10257 (face font-lock-string-face) 10257 10296 (face font-lock-constant-face) 10296 10297 (face font-lock-string-face) 10297 10307 nil 10307 10308 (face font-lock-string-face) 10308 10346 (face font-lock-constant-face) 10346 10347 (face font-lock-string-face) 10347 10357 nil 10357 10358 (face font-lock-string-face) 10358 10403 (face font-lock-constant-face) 10403 10404 (face font-lock-string-face) 10404 10414 nil 10414 10415 (face font-lock-string-face) 10415 10459 (face font-lock-constant-face) 10459 10460 (face font-lock-string-face) 10460 10470 nil 10470 10471 (face font-lock-string-face) 10471 10487 (face font-lock-constant-face) 10487 10488 (face font-lock-string-face) 10488 10498 nil 10498 10499 (face font-lock-string-face) 10499 10514 (face font-lock-constant-face) 10514 10515 (face font-lock-string-face) 10515 10525 nil 10525 10526 (face font-lock-string-face) 10526 10559 (face font-lock-constant-face) 10559 10560 (face font-lock-string-face) 10560 10570 nil 10570 10571 (face font-lock-string-face) 10571 10603 (face font-lock-constant-face) 10603 10604 (face font-lock-string-face) 10604 10614 nil 10614 10615 (face font-lock-string-face) 10615 10636 (face font-lock-constant-face) 10636 10637 (face font-lock-string-face) 10637 10647 nil 10647 10648 (face font-lock-string-face) 10648 10675 (face font-lock-constant-face) 10675 10676 (face font-lock-string-face) 10676 10686 nil 10686 10687 (face font-lock-string-face) 10687 10713 (face font-lock-constant-face) 10713 10714 (face font-lock-string-face) 10714 10724 nil 10724 10725 (face font-lock-string-face) 10725 10755 (face font-lock-constant-face) 10755 10756 (face font-lock-string-face) 10756 10766 nil 10766 10767 (face font-lock-string-face) 10767 10796 (face font-lock-constant-face) 10796 10797 (face font-lock-string-face) 10797 10807 nil 10807 10808 (face font-lock-string-face) 10808 10845 (face font-lock-constant-face) 10845 10846 (face font-lock-string-face) 10846 10856 nil 10856 10857 (face font-lock-string-face) 10857 10893 (face font-lock-constant-face) 10893 10894 (face font-lock-string-face) 10894 10904 nil 10904 10905 (face font-lock-string-face) 10905 10929 (face font-lock-constant-face) 10929 10930 (face font-lock-string-face) 10930 10940 nil 10940 10941 (face font-lock-string-face) 10941 10964 (face font-lock-constant-face) 10964 10965 (face font-lock-string-face) 10965 10975 nil 10975 10976 (face font-lock-string-face) 10976 10995 (face font-lock-constant-face) 10995 10996 (face font-lock-string-face) 10996 11006 nil 11006 11007 (face font-lock-string-face) 11007 11025 (face font-lock-constant-face) 11025 11026 (face font-lock-string-face) 11026 11036 nil 11036 11037 (face font-lock-string-face) 11037 11063 (face font-lock-constant-face) 11063 11064 (face font-lock-string-face) 11064 11074 nil 11074 11075 (face font-lock-string-face) 11075 11100 (face font-lock-constant-face) 11100 11101 (face font-lock-string-face) 11101 11111 nil 11111 11112 (face font-lock-string-face) 11112 11138 (face font-lock-constant-face) 11138 11139 (face font-lock-string-face) 11139 11149 nil 11149 11150 (face font-lock-string-face) 11150 11175 (face font-lock-constant-face) 11175 11176 (face font-lock-string-face) 11176 11193 nil 11193 11194 (face font-lock-string-face) 11194 11219 (face font-lock-keyword-face) 11219 11220 (face font-lock-string-face) 11220 11232 nil 11232 11233 (face font-lock-string-face) 11233 11245 (face font-lock-keyword-face) 11245 11246 (face font-lock-string-face) 11246 11260 nil 11260 11261 (face font-lock-string-face) 11261 11263 (face font-lock-constant-face) 11263 11264 (face font-lock-string-face) 11264 11292 nil 11292 11293 (face font-lock-string-face) 11293 11303 (face font-lock-keyword-face) 11303 11304 (face font-lock-string-face) 11304 11316 nil 11316 11381 (face font-lock-comment-face) 11381 11389 nil 11389 11439 (face font-lock-comment-face) 11439 11448 nil 11448 11449 (face font-lock-string-face) 11449 11464 (face font-lock-variable-name-face) 11464 11465 (face font-lock-string-face) 11465 11479 nil 11479 11480 (face font-lock-string-face) 11480 11492 (face font-lock-keyword-face) 11492 11493 (face font-lock-string-face) 11493 11509 nil 11509 11510 (face font-lock-string-face) 11510 11549 (face font-lock-function-name-face) 11549 11550 (face font-lock-string-face) 11550 11586 nil 11586 11587 (face font-lock-string-face) 11587 11602 (face font-lock-variable-name-face) 11602 11603 (face font-lock-string-face) 11603 11617 nil 11617 11618 (face font-lock-string-face) 11618 11626 (face font-lock-keyword-face) 11626 11627 (face font-lock-string-face) 11627 11643 nil 11643 11644 (face font-lock-string-face) 11644 11663 (face font-lock-constant-face) 11663 11664 (face font-lock-string-face) 11664 11678 nil 11678 11679 (face font-lock-string-face) 11679 11702 (face font-lock-constant-face) 11702 11703 (face font-lock-string-face) 11703 11717 nil 11717 11718 (face font-lock-string-face) 11718 11740 (face font-lock-constant-face) 11740 11741 (face font-lock-string-face) 11741 11755 nil 11755 11756 (face font-lock-string-face) 11756 11779 (face font-lock-constant-face) 11779 11780 (face font-lock-string-face) 11780 11794 nil 11794 11795 (face font-lock-string-face) 11795 11817 (face font-lock-constant-face) 11817 11818 (face font-lock-string-face) 11818 11832 nil 11832 11833 (face font-lock-string-face) 11833 11861 (face font-lock-constant-face) 11861 11862 (face font-lock-string-face) 11862 11876 nil 11876 11877 (face font-lock-string-face) 11877 11904 (face font-lock-constant-face) 11904 11905 (face font-lock-string-face) 11905 11919 nil 11919 11920 (face font-lock-string-face) 11920 11950 (face font-lock-constant-face) 11950 11951 (face font-lock-string-face) 11951 11965 nil 11965 11966 (face font-lock-string-face) 11966 11995 (face font-lock-constant-face) 11995 11996 (face font-lock-string-face) 11996 12010 nil 12010 12011 (face font-lock-string-face) 12011 12035 (face font-lock-constant-face) 12035 12036 (face font-lock-string-face) 12036 12050 nil 12050 12051 (face font-lock-string-face) 12051 12074 (face font-lock-constant-face) 12074 12075 (face font-lock-string-face) 12075 12089 nil 12089 12090 (face font-lock-string-face) 12090 12120 (face font-lock-constant-face) 12120 12121 (face font-lock-string-face) 12121 12135 nil 12135 12136 (face font-lock-string-face) 12136 12167 (face font-lock-constant-face) 12167 12168 (face font-lock-string-face) 12168 12182 nil 12182 12183 (face font-lock-string-face) 12183 12213 (face font-lock-constant-face) 12213 12214 (face font-lock-string-face) 12214 12228 nil 12228 12229 (face font-lock-string-face) 12229 12254 (face font-lock-constant-face) 12254 12255 (face font-lock-string-face) 12255 12269 nil 12269 12270 (face font-lock-string-face) 12270 12294 (face font-lock-constant-face) 12294 12295 (face font-lock-string-face) 12295 12309 nil 12309 12310 (face font-lock-string-face) 12310 12352 (face font-lock-constant-face) 12352 12353 (face font-lock-string-face) 12353 12367 nil 12367 12368 (face font-lock-string-face) 12368 12409 (face font-lock-constant-face) 12409 12410 (face font-lock-string-face) 12410 12424 nil 12424 12425 (face font-lock-string-face) 12425 12447 (face font-lock-constant-face) 12447 12448 (face font-lock-string-face) 12448 12462 nil 12462 12463 (face font-lock-string-face) 12463 12484 (face font-lock-constant-face) 12484 12485 (face font-lock-string-face) 12485 12499 nil 12499 12500 (face font-lock-string-face) 12500 12531 (face font-lock-constant-face) 12531 12532 (face font-lock-string-face) 12532 12546 nil 12546 12547 (face font-lock-string-face) 12547 12577 (face font-lock-constant-face) 12577 12578 (face font-lock-string-face) 12578 12592 nil 12592 12593 (face font-lock-string-face) 12593 12621 (face font-lock-constant-face) 12621 12622 (face font-lock-string-face) 12622 12636 nil 12636 12637 (face font-lock-string-face) 12637 12664 (face font-lock-constant-face) 12664 12665 (face font-lock-string-face) 12665 12679 nil 12679 12680 (face font-lock-string-face) 12680 12707 (face font-lock-constant-face) 12707 12708 (face font-lock-string-face) 12708 12722 nil 12722 12723 (face font-lock-string-face) 12723 12749 (face font-lock-constant-face) 12749 12750 (face font-lock-string-face) 12750 12764 nil 12764 12765 (face font-lock-string-face) 12765 12791 (face font-lock-constant-face) 12791 12792 (face font-lock-string-face) 12792 12806 nil 12806 12807 (face font-lock-string-face) 12807 12832 (face font-lock-constant-face) 12832 12833 (face font-lock-string-face) 12833 12868 nil 12868 12937 (face font-lock-comment-face) 12937 12945 nil 12945 13016 (face font-lock-comment-face) 13016 13024 nil 13024 13040 (face font-lock-comment-face) 13040 13049 nil 13049 13050 (face font-lock-string-face) 13050 13065 (face font-lock-variable-name-face) 13065 13066 (face font-lock-string-face) 13066 13080 nil 13080 13081 (face font-lock-string-face) 13081 13089 (face font-lock-keyword-face) 13089 13090 (face font-lock-string-face) 13090 13105 nil 13105 13106 (face font-lock-string-face) 13106 13149 (face font-lock-constant-face) 13149 13150 (face font-lock-string-face) 13150 13175 nil 13175 13176 (face font-lock-string-face) 13176 13183 (face font-lock-keyword-face) 13183 13184 (face font-lock-string-face) 13184 13199 nil 13199 13200 (face font-lock-string-face) 13200 13248 (face font-lock-constant-face) 13248 13249 (face font-lock-string-face) 13249 13274 nil 13274 13275 (face font-lock-string-face) 13275 13288 (face font-lock-keyword-face) 13288 13289 (face font-lock-string-face) 13289 13305 nil 13305 13306 (face font-lock-string-face) 13306 13315 (face font-lock-keyword-face) 13315 13316 (face font-lock-string-face) 13316 13334 nil 13334 13335 (face font-lock-string-face) 13335 13345 (face font-lock-constant-face) 13345 13346 (face font-lock-string-face) 13346 13397 nil 13397 13398 (face font-lock-string-face) 13398 13443 (face font-lock-variable-name-face) 13443 13444 (face font-lock-string-face) 13444 13458 nil 13458 13459 (face font-lock-string-face) 13459 13472 (face font-lock-keyword-face) 13472 13473 (face font-lock-string-face) 13473 13489 nil 13489 13490 (face font-lock-string-face) 13490 13499 (face font-lock-keyword-face) 13499 13500 (face font-lock-string-face) 13500 13518 nil 13518 13519 (face font-lock-string-face) 13519 13527 (face font-lock-constant-face) 13527 13528 (face font-lock-string-face) 13528 13579 nil 13579 13580 (face font-lock-string-face) 13580 13593 (face font-lock-variable-name-face) 13593 13594 (face font-lock-string-face) 13594 13608 nil 13608 13609 (face font-lock-string-face) 13609 13617 (face font-lock-keyword-face) 13617 13618 (face font-lock-string-face) 13618 13623 nil 13623 13624 (face font-lock-string-face) 13624 13631 (face font-lock-constant-face) 13631 13632 (face font-lock-string-face) 13632 13634 nil 13634 13635 (face font-lock-string-face) 13635 13641 (face font-lock-constant-face) 13641 13642 (face font-lock-string-face) 13642 13671 nil 13671 13672 (face font-lock-string-face) 13672 13679 (face font-lock-constant-face) 13679 13680 (face font-lock-string-face) 13680 13682 nil 13682 13683 (face font-lock-string-face) 13683 13703 (face font-lock-constant-face) 13703 13704 (face font-lock-string-face) 13704 13720 nil 13720 13721 (face font-lock-string-face) 13721 13734 (face font-lock-keyword-face) 13734 13735 (face font-lock-string-face) 13735 13751 nil 13751 13752 (face font-lock-string-face) 13752 13761 (face font-lock-keyword-face) 13761 13762 (face font-lock-string-face) 13762 13815 nil 13815 13816 (face font-lock-string-face) 13816 13829 (face font-lock-variable-name-face) 13829 13830 (face font-lock-string-face) 13830 13844 nil 13844 13845 (face font-lock-string-face) 13845 13853 (face font-lock-keyword-face) 13853 13854 (face font-lock-string-face) 13854 13870 nil 13870 13871 (face font-lock-string-face) 13871 13909 (face font-lock-constant-face) 13909 13910 (face font-lock-string-face) 13910 13924 nil 13924 13925 (face font-lock-string-face) 13925 13962 (face font-lock-constant-face) 13962 13963 (face font-lock-string-face) 13963 13999 nil 13999 14000 (face font-lock-string-face) 14000 14011 (face font-lock-variable-name-face) 14011 14012 (face font-lock-string-face) 14012 14026 nil 14026 14027 (face font-lock-string-face) 14027 14036 (face font-lock-keyword-face) 14036 14037 (face font-lock-string-face) 14037 14053 nil 14053 14054 (face font-lock-string-face) 14054 14064 (face font-lock-keyword-face) 14064 14065 (face font-lock-string-face) 14065 14084 nil 14084 14085 (face font-lock-string-face) 14085 14096 (face font-lock-variable-name-face) 14096 14097 (face font-lock-string-face) 14097 14117 nil 14117 14129 (face font-lock-string-face) 14129 14131 nil 14131 14169 (face font-lock-string-face) 14169 14176 (face font-lock-variable-name-face) 14176 14182 (face font-lock-string-face) 14182 14193 (face font-lock-variable-name-face) 14193 14196 (face font-lock-string-face) 14196 14233 nil 14233 14245 (face font-lock-string-face) 14245 14247 nil 14247 14259 (face font-lock-string-face) 14259 14316 nil 14316 14317 (face font-lock-string-face) 14317 14327 (face font-lock-keyword-face) 14327 14328 (face font-lock-string-face) 14328 14345 nil 14345 14346 (face font-lock-string-face) 14346 14359 (face font-lock-variable-name-face) 14359 14360 (face font-lock-string-face) 14360 14378 nil 14378 14379 (face font-lock-string-face) 14379 14385 (face font-lock-keyword-face) 14385 14386 (face font-lock-string-face) 14386 14406 nil 14406 14411 (face font-lock-string-face) 14411 14413 (face font-lock-variable-name-face) 14413 14423 (face font-lock-variable-name-face) 14423 14443 (face font-lock-string-face) 14443 14476 nil 14476 14477 (face font-lock-string-face) 14477 14490 (face font-lock-keyword-face) 14490 14491 (face font-lock-string-face) 14491 14511 nil 14511 14512 (face font-lock-string-face) 14512 14521 (face font-lock-keyword-face) 14521 14522 (face font-lock-string-face) 14522 14544 nil 14544 14545 (face font-lock-string-face) 14545 14549 (face font-lock-constant-face) 14549 14551 (face font-lock-variable-name-face) 14551 14561 (face font-lock-variable-name-face) 14561 14578 (face font-lock-constant-face) 14578 14579 (face font-lock-string-face) 14579 14631 nil 14631 14632 (face font-lock-string-face) 14632 14639 (face font-lock-keyword-face) 14639 14640 (face font-lock-string-face) 14640 14660 nil 14660 14661 (face font-lock-string-face) 14661 14669 (face font-lock-preprocessor-face) 14669 14670 (face font-lock-string-face) 14670 14707 nil 14707 14729 (face font-lock-comment-face) 14729 14743 nil 14743 14744 (face font-lock-string-face) 14744 14752 (face font-lock-keyword-face) 14752 14753 (face font-lock-string-face) 14753 14773 nil 14773 14774 (face font-lock-string-face) 14774 14800 (face font-lock-constant-face) 14800 14801 (face font-lock-string-face) 14801 14819 nil 14819 14820 (face font-lock-string-face) 14820 14845 (face font-lock-constant-face) 14845 14846 (face font-lock-string-face) 14846 14915 nil 14915 14916 (face font-lock-string-face) 14916 14929 (face font-lock-variable-name-face) 14929 14930 (face font-lock-string-face) 14930 14944 nil 14944 14945 (face font-lock-string-face) 14945 14955 (face font-lock-keyword-face) 14955 14956 (face font-lock-string-face) 14956 14973 nil 14973 14974 (face font-lock-string-face) 14974 14993 (face font-lock-variable-name-face) 14993 14994 (face font-lock-string-face) 14994 15012 nil 15012 15013 (face font-lock-string-face) 15013 15019 (face font-lock-keyword-face) 15019 15020 (face font-lock-string-face) 15020 15040 nil 15040 15075 (face font-lock-string-face) 15075 15108 nil 15108 15109 (face font-lock-string-face) 15109 15122 (face font-lock-keyword-face) 15122 15123 (face font-lock-string-face) 15123 15143 nil 15143 15144 (face font-lock-string-face) 15144 15153 (face font-lock-keyword-face) 15153 15154 (face font-lock-string-face) 15154 15176 nil 15176 15177 (face font-lock-string-face) 15177 15215 (face font-lock-constant-face) 15215 15216 (face font-lock-string-face) 15216 15268 nil 15268 15269 (face font-lock-string-face) 15269 15276 (face font-lock-keyword-face) 15276 15277 (face font-lock-string-face) 15277 15297 nil 15297 15298 (face font-lock-string-face) 15298 15312 (face font-lock-preprocessor-face) 15312 15313 (face font-lock-string-face) 15313 15350 nil 15350 15378 (face font-lock-comment-face) 15378 15392 nil 15392 15393 (face font-lock-string-face) 15393 15401 (face font-lock-keyword-face) 15401 15402 (face font-lock-string-face) 15402 15422 nil 15422 15423 (face font-lock-string-face) 15423 15450 (face font-lock-constant-face) 15450 15451 (face font-lock-string-face) 15451 15469 nil 15469 15470 (face font-lock-string-face) 15470 15496 (face font-lock-constant-face) 15496 15497 (face font-lock-string-face) 15497 15566 nil 15566 15567 (face font-lock-string-face) 15567 15600 (face font-lock-variable-name-face) 15600 15601 (face font-lock-string-face) 15601 15615 nil 15615 15663 (face font-lock-comment-face) 15663 15673 nil 15673 15674 (face font-lock-string-face) 15674 15682 (face font-lock-keyword-face) 15682 15683 (face font-lock-string-face) 15683 15699 nil 15699 15700 (face font-lock-string-face) 15700 15743 (face font-lock-constant-face) 15743 15744 (face font-lock-string-face) 15744 15758 nil 15758 15759 (face font-lock-string-face) 15759 15801 (face font-lock-constant-face) 15801 15802 (face font-lock-string-face) 15802 15838 nil 15838 15839 (face font-lock-string-face) 15839 15848 (face font-lock-variable-name-face) 15848 15849 (face font-lock-string-face) 15849 15863 nil 15863 15864 (face font-lock-string-face) 15864 15877 (face font-lock-keyword-face) 15877 15878 (face font-lock-string-face) 15878 15894 nil 15894 15895 (face font-lock-string-face) 15895 15904 (face font-lock-keyword-face) 15904 15905 (face font-lock-string-face) 15905 15923 nil 15923 15924 (face font-lock-string-face) 15924 15980 (face font-lock-constant-face) 15980 15981 (face font-lock-string-face) 15981 15997 nil 15997 15998 (face font-lock-string-face) 15998 16057 (face font-lock-constant-face) 16057 16058 (face font-lock-string-face) 16058 16074 nil 16074 16075 (face font-lock-string-face) 16075 16131 (face font-lock-constant-face) 16131 16132 (face font-lock-string-face) 16132 16148 nil 16148 16149 (face font-lock-string-face) 16149 16205 (face font-lock-constant-face) 16205 16206 (face font-lock-string-face) 16206 16222 nil 16222 16223 (face font-lock-string-face) 16223 16275 (face font-lock-constant-face) 16275 16276 (face font-lock-string-face) 16276 16327 nil 16327 16328 (face font-lock-string-face) 16328 16337 (face font-lock-variable-name-face) 16337 16338 (face font-lock-string-face) 16338 16352 nil 16352 16353 (face font-lock-string-face) 16353 16361 (face font-lock-keyword-face) 16361 16362 (face font-lock-string-face) 16362 16378 nil 16378 16379 (face font-lock-string-face) 16379 16406 (face font-lock-constant-face) 16406 16407 (face font-lock-string-face) 16407 16421 nil 16421 16422 (face font-lock-string-face) 16422 16448 (face font-lock-constant-face) 16448 16449 (face font-lock-string-face) 16449 16463 nil 16463 16464 (face font-lock-string-face) 16464 16507 (face font-lock-constant-face) 16507 16508 (face font-lock-string-face) 16508 16522 nil 16522 16523 (face font-lock-string-face) 16523 16565 (face font-lock-constant-face) 16565 16566 (face font-lock-string-face) 16566 16602 nil 16602 16603 (face font-lock-string-face) 16603 16646 (face font-lock-variable-name-face) 16646 16647 (face font-lock-string-face) 16647 16661 nil 16661 16662 (face font-lock-string-face) 16662 16669 (face font-lock-keyword-face) 16669 16670 (face font-lock-string-face) 16670 16686 nil 16686 16687 (face font-lock-string-face) 16687 16697 (face font-lock-constant-face) 16697 16698 (face font-lock-string-face) 16698 16712 nil 16712 16713 (face font-lock-string-face) 16713 16722 (face font-lock-constant-face) 16722 16723 (face font-lock-string-face) 16723 16737 nil 16737 16738 (face font-lock-string-face) 16738 16760 (face font-lock-constant-face) 16760 16761 (face font-lock-string-face) 16761 16775 nil 16775 16776 (face font-lock-string-face) 16776 16797 (face font-lock-constant-face) 16797 16798 (face font-lock-string-face) 16798 16812 nil 16812 16813 (face font-lock-string-face) 16813 16830 (face font-lock-constant-face) 16830 16831 (face font-lock-string-face) 16831 16845 nil 16845 16846 (face font-lock-string-face) 16846 16862 (face font-lock-constant-face) 16862 16863 (face font-lock-string-face) 16863 16877 nil 16877 16878 (face font-lock-string-face) 16878 16889 (face font-lock-constant-face) 16889 16890 (face font-lock-string-face) 16890 16904 nil 16904 16905 (face font-lock-string-face) 16905 16915 (face font-lock-constant-face) 16915 16916 (face font-lock-string-face) 16916 16930 nil 16930 16931 (face font-lock-string-face) 16931 16955 (face font-lock-constant-face) 16955 16956 (face font-lock-string-face) 16956 16970 nil 16970 16971 (face font-lock-string-face) 16971 16994 (face font-lock-constant-face) 16994 16995 (face font-lock-string-face) 16995 17009 nil 17009 17010 (face font-lock-string-face) 17010 17034 (face font-lock-constant-face) 17034 17035 (face font-lock-string-face) 17035 17049 nil 17049 17050 (face font-lock-string-face) 17050 17073 (face font-lock-constant-face) 17073 17074 (face font-lock-string-face) 17074 17088 nil 17088 17089 (face font-lock-string-face) 17089 17114 (face font-lock-constant-face) 17114 17115 (face font-lock-string-face) 17115 17129 nil 17129 17130 (face font-lock-string-face) 17130 17154 (face font-lock-constant-face) 17154 17155 (face font-lock-string-face) 17155 17210 nil 17210 17211 (face font-lock-string-face) 17211 17222 (face font-lock-keyword-face) 17222 17223 (face font-lock-string-face) 17223 17225 nil 17225 17226 (face font-lock-string-face) 17226 17237 (face font-lock-function-name-face) 17237 17238 (face font-lock-string-face) 17238 17246 nil 17246 17247 (face font-lock-string-face) 17247 17251 (face font-lock-keyword-face) 17251 17252 (face font-lock-string-face) 17252 17254 nil 17254 17255 (face font-lock-string-face) 17255 17269 (face font-lock-type-face) 17269 17270 (face font-lock-string-face) 17270 17278 nil 17278 17279 (face font-lock-string-face) 17279 17291 (face font-lock-keyword-face) 17291 17292 (face font-lock-string-face) 17292 17304 nil 17304 17305 (face font-lock-string-face) 17305 17307 (face font-lock-constant-face) 17307 17308 (face font-lock-string-face) 17308 17325 nil 17325 17326 (face font-lock-string-face) 17326 17336 (face font-lock-keyword-face) 17336 17337 (face font-lock-string-face) 17337 17350 nil 17350 17351 (face font-lock-string-face) 17351 17371 (face font-lock-variable-name-face) 17371 17372 (face font-lock-string-face) 17372 17386 nil 17386 17387 (face font-lock-string-face) 17387 17404 (face font-lock-keyword-face) 17404 17405 (face font-lock-string-face) 17405 17423 nil 17423 17424 (face font-lock-string-face) 17424 17442 (face font-lock-variable-name-face) 17442 17443 (face font-lock-string-face) 17443 17461 nil 17461 17462 (face font-lock-string-face) 17462 17469 (face font-lock-keyword-face) 17469 17470 (face font-lock-string-face) 17470 17474 nil 17474 17498 (face font-lock-string-face) 17498 17553 nil 17553 17554 (face font-lock-string-face) 17554 17599 (face font-lock-variable-name-face) 17599 17600 (face font-lock-string-face) 17600 17614 nil 17614 17615 (face font-lock-string-face) 17615 17627 (face font-lock-keyword-face) 17627 17628 (face font-lock-string-face) 17628 17644 nil 17644 17645 (face font-lock-string-face) 17645 17665 (face font-lock-function-name-face) 17665 17666 (face font-lock-string-face) 17666 17703 nil 17703 17704 (face font-lock-string-face) 17704 17724 (face font-lock-variable-name-face) 17724 17725 (face font-lock-string-face) 17725 17739 nil 17739 17740 (face font-lock-string-face) 17740 17752 (face font-lock-keyword-face) 17752 17753 (face font-lock-string-face) 17753 17769 nil 17769 17770 (face font-lock-string-face) 17770 17790 (face font-lock-function-name-face) 17790 17791 (face font-lock-string-face) 17791 17833 nil 17833 17834 (face font-lock-string-face) 17834 17841 (face font-lock-keyword-face) 17841 17842 (face font-lock-string-face) 17842 17854 nil 17854 17855 (face font-lock-string-face) 17855 17874 (face font-lock-constant-face) 17874 17875 (face font-lock-string-face) 17875 17885 nil 17885 17886 (face font-lock-string-face) 17886 17904 (face font-lock-constant-face) 17904 17905 (face font-lock-string-face) 17905 17935 nil 17935 17936 (face font-lock-string-face) 17936 17947 (face font-lock-keyword-face) 17947 17948 (face font-lock-string-face) 17948 17950 nil 17950 17951 (face font-lock-string-face) 17951 17971 (face font-lock-function-name-face) 17971 17972 (face font-lock-string-face) 17972 17980 nil 17980 17981 (face font-lock-string-face) 17981 17985 (face font-lock-keyword-face) 17985 17986 (face font-lock-string-face) 17986 17988 nil 17988 17989 (face font-lock-string-face) 17989 18003 (face font-lock-type-face) 18003 18004 (face font-lock-string-face) 18004 18012 nil 18012 18013 (face font-lock-string-face) 18013 18025 (face font-lock-keyword-face) 18025 18026 (face font-lock-string-face) 18026 18038 nil 18038 18039 (face font-lock-string-face) 18039 18041 (face font-lock-constant-face) 18041 18042 (face font-lock-string-face) 18042 18059 nil 18059 18060 (face font-lock-string-face) 18060 18067 (face font-lock-keyword-face) 18067 18068 (face font-lock-string-face) 18068 18080 nil 18080 18081 (face font-lock-string-face) 18081 18114 (face font-lock-constant-face) 18114 18115 (face font-lock-string-face) 18115 18125 nil 18125 18126 (face font-lock-string-face) 18126 18162 (face font-lock-constant-face) 18162 18163 (face font-lock-string-face) 18163 18173 nil 18173 18174 (face font-lock-string-face) 18174 18212 (face font-lock-constant-face) 18212 18213 (face font-lock-string-face) 18213 18223 nil 18223 18224 (face font-lock-string-face) 18224 18261 (face font-lock-constant-face) 18261 18262 (face font-lock-string-face) 18262 18272 nil 18272 18273 (face font-lock-string-face) 18273 18311 (face font-lock-constant-face) 18311 18312 (face font-lock-string-face) 18312 18322 nil 18322 18323 (face font-lock-string-face) 18323 18356 (face font-lock-constant-face) 18356 18357 (face font-lock-string-face) 18357 18367 nil 18367 18368 (face font-lock-string-face) 18368 18403 (face font-lock-constant-face) 18403 18404 (face font-lock-string-face) 18404 18414 nil 18414 18415 (face font-lock-string-face) 18415 18451 (face font-lock-constant-face) 18451 18452 (face font-lock-string-face) 18452 18462 nil 18462 18463 (face font-lock-string-face) 18463 18499 (face font-lock-constant-face) 18499 18500 (face font-lock-string-face) 18500 18510 nil 18510 18511 (face font-lock-string-face) 18511 18547 (face font-lock-constant-face) 18547 18548 (face font-lock-string-face) 18548 18558 nil 18558 18559 (face font-lock-string-face) 18559 18581 (face font-lock-constant-face) 18581 18582 (face font-lock-string-face) 18582 18592 nil 18592 18593 (face font-lock-string-face) 18593 18618 (face font-lock-constant-face) 18618 18619 (face font-lock-string-face) 18619 18629 nil 18629 18630 (face font-lock-string-face) 18630 18657 (face font-lock-constant-face) 18657 18658 (face font-lock-string-face) 18658 18668 nil 18668 18669 (face font-lock-string-face) 18669 18697 (face font-lock-constant-face) 18697 18698 (face font-lock-string-face) 18698 18708 nil 18708 18709 (face font-lock-string-face) 18709 18750 (face font-lock-constant-face) 18750 18751 (face font-lock-string-face) 18751 18761 nil 18761 18762 (face font-lock-string-face) 18762 18803 (face font-lock-constant-face) 18803 18804 (face font-lock-string-face) 18804 18814 nil 18814 18815 (face font-lock-string-face) 18815 18856 (face font-lock-constant-face) 18856 18857 (face font-lock-string-face) 18857 18867 nil 18867 18868 (face font-lock-string-face) 18868 18902 (face font-lock-constant-face) 18902 18903 (face font-lock-string-face) 18903 18913 nil 18913 18914 (face font-lock-string-face) 18914 18948 (face font-lock-constant-face) 18948 18949 (face font-lock-string-face) 18949 18959 nil 18959 18960 (face font-lock-string-face) 18960 18994 (face font-lock-constant-face) 18994 18995 (face font-lock-string-face) 18995 19005 nil 19005 19006 (face font-lock-string-face) 19006 19035 (face font-lock-constant-face) 19035 19036 (face font-lock-string-face) 19036 19046 nil 19046 19047 (face font-lock-string-face) 19047 19075 (face font-lock-constant-face) 19075 19076 (face font-lock-string-face) 19076 19093 nil 19093 19094 (face font-lock-string-face) 19094 19104 (face font-lock-keyword-face) 19104 19105 (face font-lock-string-face) 19105 19118 nil 19118 19119 (face font-lock-string-face) 19119 19139 (face font-lock-variable-name-face) 19139 19140 (face font-lock-string-face) 19140 19154 nil 19154 19155 (face font-lock-string-face) 19155 19172 (face font-lock-keyword-face) 19172 19173 (face font-lock-string-face) 19173 19191 nil 19191 19192 (face font-lock-string-face) 19192 19210 (face font-lock-variable-name-face) 19210 19211 (face font-lock-string-face) 19211 19229 nil 19229 19230 (face font-lock-string-face) 19230 19237 (face font-lock-keyword-face) 19237 19238 (face font-lock-string-face) 19238 19242 nil 19242 19266 (face font-lock-string-face) 19266 19321 nil 19321 19322 (face font-lock-string-face) 19322 19342 (face font-lock-variable-name-face) 19342 19343 (face font-lock-string-face) 19343 19357 nil 19357 19399 (face font-lock-comment-face) 19399 19409 nil 19409 19410 (face font-lock-string-face) 19410 19417 (face font-lock-keyword-face) 19417 19418 (face font-lock-string-face) 19418 19434 nil 19434 19435 (face font-lock-string-face) 19435 19480 (face font-lock-constant-face) 19480 19481 (face font-lock-string-face) 19481 19495 nil 19495 19496 (face font-lock-string-face) 19496 19535 (face font-lock-constant-face) 19535 19536 (face font-lock-string-face) 19536 19573 nil 19573 19574 (face font-lock-string-face) 19574 19623 (face font-lock-variable-name-face) 19623 19624 (face font-lock-string-face) 19624 19638 nil 19638 19639 (face font-lock-string-face) 19639 19645 (face font-lock-keyword-face) 19645 19646 (face font-lock-string-face) 19646 19662 nil 19662 19670 (face font-lock-string-face) 19670 19707 nil 19707 19708 (face font-lock-string-face) 19708 19719 (face font-lock-variable-name-face) 19719 19720 (face font-lock-string-face) 19720 19734 nil 19734 19735 (face font-lock-string-face) 19735 19749 (face font-lock-keyword-face) 19749 19750 (face font-lock-string-face) 19750 19766 nil 19766 19773 (face font-lock-string-face) 19773 19791 nil 19791 19792 (face font-lock-string-face) 19792 19806 (face font-lock-keyword-face) 19806 19807 (face font-lock-string-face) 19807 19827 nil 19827 19890 (face font-lock-comment-face) 19890 19906 nil 19906 19971 (face font-lock-comment-face) 19971 19987 nil 19987 20032 (face font-lock-comment-face) 20032 20048 nil 20048 20072 (face font-lock-string-face) 20072 20074 nil 20074 20077 (face font-lock-string-face) 20077 20080 nil 20080 20086 (face font-lock-comment-face) 20086 20155 nil 20155 20156 (face font-lock-string-face) 20156 20165 (face font-lock-variable-name-face) 20165 20166 (face font-lock-string-face) 20166 20180 nil 20180 20181 (face font-lock-string-face) 20181 20190 (face font-lock-keyword-face) 20190 20191 (face font-lock-string-face) 20191 20207 nil 20207 20208 (face font-lock-string-face) 20208 20218 (face font-lock-variable-name-face) 20218 20219 (face font-lock-string-face) 20219 20237 nil 20237 20246 (face font-lock-string-face) 20246 20262 nil 20262 20270 (face font-lock-string-face) 20270 20286 nil 20286 20298 (face font-lock-string-face) 20298 20314 nil 20314 20322 (face font-lock-string-face) 20322 20374 nil 20374 20375 (face font-lock-string-face) 20375 20384 (face font-lock-variable-name-face) 20384 20385 (face font-lock-string-face) 20385 20399 nil 20399 20400 (face font-lock-string-face) 20400 20409 (face font-lock-keyword-face) 20409 20410 (face font-lock-string-face) 20410 20426 nil 20426 20427 (face font-lock-string-face) 20427 20437 (face font-lock-variable-name-face) 20437 20438 (face font-lock-string-face) 20438 20456 nil 20456 20466 (face font-lock-string-face) 20466 20482 nil 20482 20491 (face font-lock-string-face) 20491 20507 nil 20507 20519 (face font-lock-string-face) 20519 20535 nil 20535 20543 (face font-lock-string-face) 20543 20595 nil 20595 20596 (face font-lock-string-face) 20596 20621 (face font-lock-variable-name-face) 20621 20622 (face font-lock-string-face) 20622 20636 nil 20636 20637 (face font-lock-string-face) 20637 20646 (face font-lock-keyword-face) 20646 20647 (face font-lock-string-face) 20647 20663 nil 20663 20664 (face font-lock-string-face) 20664 20674 (face font-lock-keyword-face) 20674 20675 (face font-lock-string-face) 20675 20695 nil 20695 20696 (face font-lock-string-face) 20696 20715 (face font-lock-variable-name-face) 20715 20716 (face font-lock-string-face) 20716 20736 nil 20736 20748 (face font-lock-string-face) 20748 20770 nil 20770 20780 (face font-lock-string-face) 20780 20800 nil 20800 20807 (face font-lock-string-face) 20807 20827 nil 20827 20839 (face font-lock-string-face) 20839 20859 nil 20859 20867 (face font-lock-string-face) 20867 20923 nil 20923 20935 (face font-lock-string-face) 20935 20957 nil 20957 20972 (face font-lock-string-face) 20972 20992 nil 20992 20999 (face font-lock-string-face) 20999 21019 nil 21019 21026 (face font-lock-string-face) 21026 21046 nil 21046 21058 (face font-lock-string-face) 21058 21078 nil 21078 21086 (face font-lock-string-face) 21086 21180 nil 21180 21181 (face font-lock-string-face) 21181 21190 (face font-lock-keyword-face) 21190 21191 (face font-lock-string-face) 21191 21203 nil 21203 21204 (face font-lock-string-face) 21204 21220 (face font-lock-variable-name-face) 21220 21221 (face font-lock-string-face) 21221 21223 nil 21223 21224 (face font-lock-string-face) 21224 21256 (face font-lock-variable-name-face) 21256 21257 (face font-lock-string-face) 21257 21274 nil 21274 21314 (face font-lock-string-face) 21314 21325 nil 21325 21326 (face font-lock-string-face) 21326 21334 (face font-lock-keyword-face) 21334 21335 (face font-lock-string-face) 21335 21347 nil 21347 21348 (face font-lock-string-face) 21348 21385 (face font-lock-constant-face) 21385 21386 (face font-lock-string-face) 21386 21416 nil 21416 21417 (face font-lock-string-face) 21417 21428 (face font-lock-keyword-face) 21428 21429 (face font-lock-string-face) 21429 21431 nil 21431 21432 (face font-lock-string-face) 21432 21452 (face font-lock-function-name-face) 21452 21453 (face font-lock-string-face) 21453 21461 nil 21461 21462 (face font-lock-string-face) 21462 21466 (face font-lock-keyword-face) 21466 21467 (face font-lock-string-face) 21467 21469 nil 21469 21470 (face font-lock-string-face) 21470 21484 (face font-lock-type-face) 21484 21485 (face font-lock-string-face) 21485 21493 nil 21493 21494 (face font-lock-string-face) 21494 21506 (face font-lock-keyword-face) 21506 21507 (face font-lock-string-face) 21507 21519 nil 21519 21520 (face font-lock-string-face) 21520 21522 (face font-lock-constant-face) 21522 21523 (face font-lock-string-face) 21523 21540 nil 21540 21541 (face font-lock-string-face) 21541 21548 (face font-lock-keyword-face) 21548 21549 (face font-lock-string-face) 21549 21561 nil 21561 21562 (face font-lock-string-face) 21562 21595 (face font-lock-constant-face) 21595 21596 (face font-lock-string-face) 21596 21606 nil 21606 21607 (face font-lock-string-face) 21607 21637 (face font-lock-constant-face) 21637 21638 (face font-lock-string-face) 21638 21648 nil 21648 21649 (face font-lock-string-face) 21649 21682 (face font-lock-constant-face) 21682 21683 (face font-lock-string-face) 21683 21693 nil 21693 21694 (face font-lock-string-face) 21694 21724 (face font-lock-constant-face) 21724 21725 (face font-lock-string-face) 21725 21735 nil 21735 21736 (face font-lock-string-face) 21736 21758 (face font-lock-constant-face) 21758 21759 (face font-lock-string-face) 21759 21769 nil 21769 21770 (face font-lock-string-face) 21770 21795 (face font-lock-constant-face) 21795 21796 (face font-lock-string-face) 21796 21806 nil 21806 21807 (face font-lock-string-face) 21807 21836 (face font-lock-constant-face) 21836 21837 (face font-lock-string-face) 21837 21847 nil 21847 21848 (face font-lock-string-face) 21848 21876 (face font-lock-constant-face) 21876 21877 (face font-lock-string-face) 21877 21907 nil 21907 21908 (face font-lock-string-face) 21908 21919 (face font-lock-keyword-face) 21919 21920 (face font-lock-string-face) 21920 21922 nil 21922 21923 (face font-lock-string-face) 21923 21938 (face font-lock-function-name-face) 21938 21939 (face font-lock-string-face) 21939 21947 nil 21947 21948 (face font-lock-string-face) 21948 21952 (face font-lock-keyword-face) 21952 21953 (face font-lock-string-face) 21953 21955 nil 21955 21956 (face font-lock-string-face) 21956 21966 (face font-lock-type-face) 21966 21967 (face font-lock-string-face) 21967 21975 nil 21975 21976 (face font-lock-string-face) 21976 21988 (face font-lock-keyword-face) 21988 21989 (face font-lock-string-face) 21989 22001 nil 22001 22002 (face font-lock-string-face) 22002 22007 (face font-lock-function-name-face) 22007 22008 (face font-lock-string-face) 22008 22018 nil 22018 22019 (face font-lock-string-face) 22019 22037 (face font-lock-function-name-face) 22037 22038 (face font-lock-string-face) 22038 22048 nil 22048 22049 (face font-lock-string-face) 22049 22060 (face font-lock-function-name-face) 22060 22061 (face font-lock-string-face) 22061 22071 nil 22071 22072 (face font-lock-string-face) 22072 22093 (face font-lock-function-name-face) 22093 22094 (face font-lock-string-face) 22094 22104 nil 22104 22105 (face font-lock-string-face) 22105 22131 (face font-lock-function-name-face) 22131 22132 (face font-lock-string-face) 22132 22142 nil 22142 22143 (face font-lock-string-face) 22143 22177 (face font-lock-function-name-face) 22177 22178 (face font-lock-string-face) 22178 22188 nil 22188 22189 (face font-lock-string-face) 22189 22215 (face font-lock-function-name-face) 22215 22216 (face font-lock-string-face) 22216 22226 nil 22226 22227 (face font-lock-string-face) 22227 22253 (face font-lock-function-name-face) 22253 22254 (face font-lock-string-face) 22254 22264 nil 22264 22265 (face font-lock-string-face) 22265 22280 (face font-lock-function-name-face) 22280 22281 (face font-lock-string-face) 22281 22298 nil 22298 22299 (face font-lock-string-face) 22299 22306 (face font-lock-keyword-face) 22306 22307 (face font-lock-string-face) 22307 22319 nil 22319 22320 (face font-lock-string-face) 22320 22361 (face font-lock-constant-face) 22361 22362 (face font-lock-string-face) 22362 22372 nil 22372 22373 (face font-lock-string-face) 22373 22413 (face font-lock-constant-face) 22413 22414 (face font-lock-string-face) 22414 22424 nil 22424 22425 (face font-lock-string-face) 22425 22461 (face font-lock-constant-face) 22461 22462 (face font-lock-string-face) 22462 22472 nil 22472 22473 (face font-lock-string-face) 22473 22502 (face font-lock-constant-face) 22502 22503 (face font-lock-string-face) 22503 22513 nil 22513 22514 (face font-lock-string-face) 22514 22550 (face font-lock-constant-face) 22550 22551 (face font-lock-string-face) 22551 22561 nil 22561 22562 (face font-lock-string-face) 22562 22610 (face font-lock-constant-face) 22610 22611 (face font-lock-string-face) 22611 22621 nil 22621 22622 (face font-lock-string-face) 22622 22663 (face font-lock-constant-face) 22663 22664 (face font-lock-string-face) 22664 22674 nil 22674 22675 (face font-lock-string-face) 22675 22711 (face font-lock-constant-face) 22711 22712 (face font-lock-string-face) 22712 22722 nil 22722 22723 (face font-lock-string-face) 22723 22757 (face font-lock-constant-face) 22757 22758 (face font-lock-string-face) 22758 22768 nil 22768 22769 (face font-lock-string-face) 22769 22797 (face font-lock-constant-face) 22797 22798 (face font-lock-string-face) 22798 22808 nil 22808 22809 (face font-lock-string-face) 22809 22853 (face font-lock-constant-face) 22853 22854 (face font-lock-string-face) 22854 22864 nil 22864 22865 (face font-lock-string-face) 22865 22900 (face font-lock-constant-face) 22900 22901 (face font-lock-string-face) 22901 22911 nil 22911 22912 (face font-lock-string-face) 22912 22961 (face font-lock-constant-face) 22961 22962 (face font-lock-string-face) 22962 22972 nil 22972 22973 (face font-lock-string-face) 22973 23011 (face font-lock-constant-face) 23011 23012 (face font-lock-string-face) 23012 23022 nil 23022 23023 (face font-lock-string-face) 23023 23055 (face font-lock-constant-face) 23055 23056 (face font-lock-string-face) 23056 23066 nil 23066 23067 (face font-lock-string-face) 23067 23116 (face font-lock-constant-face) 23116 23117 (face font-lock-string-face) 23117 23127 nil 23127 23128 (face font-lock-string-face) 23128 23178 (face font-lock-constant-face) 23178 23179 (face font-lock-string-face) 23179 23189 nil 23189 23190 (face font-lock-string-face) 23190 23228 (face font-lock-constant-face) 23228 23229 (face font-lock-string-face) 23229 23239 nil 23239 23240 (face font-lock-string-face) 23240 23277 (face font-lock-constant-face) 23277 23278 (face font-lock-string-face) 23278 23288 nil 23288 23289 (face font-lock-string-face) 23289 23332 (face font-lock-constant-face) 23332 23333 (face font-lock-string-face) 23333 23343 nil 23343 23344 (face font-lock-string-face) 23344 23368 (face font-lock-constant-face) 23368 23369 (face font-lock-string-face) 23369 23379 nil 23379 23380 (face font-lock-string-face) 23380 23402 (face font-lock-constant-face) 23402 23403 (face font-lock-string-face) 23403 23413 nil 23413 23414 (face font-lock-string-face) 23414 23447 (face font-lock-constant-face) 23447 23448 (face font-lock-string-face) 23448 23458 nil 23458 23459 (face font-lock-string-face) 23459 23487 (face font-lock-constant-face) 23487 23488 (face font-lock-string-face) 23488 23498 nil 23498 23499 (face font-lock-string-face) 23499 23530 (face font-lock-constant-face) 23530 23531 (face font-lock-string-face) 23531 23541 nil 23541 23542 (face font-lock-string-face) 23542 23563 (face font-lock-constant-face) 23563 23564 (face font-lock-string-face) 23564 23574 nil 23574 23575 (face font-lock-string-face) 23575 23609 (face font-lock-constant-face) 23609 23610 (face font-lock-string-face) 23610 23620 nil 23620 23621 (face font-lock-string-face) 23621 23654 (face font-lock-constant-face) 23654 23655 (face font-lock-string-face) 23655 23665 nil 23665 23666 (face font-lock-string-face) 23666 23700 (face font-lock-constant-face) 23700 23701 (face font-lock-string-face) 23701 23711 nil 23711 23712 (face font-lock-string-face) 23712 23753 (face font-lock-constant-face) 23753 23754 (face font-lock-string-face) 23754 23764 nil 23764 23765 (face font-lock-string-face) 23765 23790 (face font-lock-constant-face) 23790 23791 (face font-lock-string-face) 23791 23801 nil 23801 23802 (face font-lock-string-face) 23802 23825 (face font-lock-constant-face) 23825 23826 (face font-lock-string-face) 23826 23836 nil 23836 23837 (face font-lock-string-face) 23837 23862 (face font-lock-constant-face) 23862 23863 (face font-lock-string-face) 23863 23873 nil 23873 23874 (face font-lock-string-face) 23874 23906 (face font-lock-constant-face) 23906 23907 (face font-lock-string-face) 23907 23917 nil 23917 23918 (face font-lock-string-face) 23918 23947 (face font-lock-constant-face) 23947 23948 (face font-lock-string-face) 23948 23958 nil 23958 23959 (face font-lock-string-face) 23959 23981 (face font-lock-constant-face) 23981 23982 (face font-lock-string-face) 23982 23992 nil 23992 23993 (face font-lock-string-face) 23993 24014 (face font-lock-constant-face) 24014 24015 (face font-lock-string-face) 24015 24025 nil 24025 24026 (face font-lock-string-face) 24026 24054 (face font-lock-constant-face) 24054 24055 (face font-lock-string-face) 24055 24065 nil 24065 24066 (face font-lock-string-face) 24066 24093 (face font-lock-constant-face) 24093 24094 (face font-lock-string-face) 24094 24104 nil 24104 24105 (face font-lock-string-face) 24105 24133 (face font-lock-constant-face) 24133 24134 (face font-lock-string-face) 24134 24144 nil 24144 24145 (face font-lock-string-face) 24145 24177 (face font-lock-constant-face) 24177 24178 (face font-lock-string-face) 24178 24188 nil 24188 24189 (face font-lock-string-face) 24189 24221 (face font-lock-constant-face) 24221 24222 (face font-lock-string-face) 24222 24232 nil 24232 24233 (face font-lock-string-face) 24233 24277 (face font-lock-constant-face) 24277 24278 (face font-lock-string-face) 24278 24288 nil 24288 24289 (face font-lock-string-face) 24289 24328 (face font-lock-constant-face) 24328 24329 (face font-lock-string-face) 24329 24339 nil 24339 24340 (face font-lock-string-face) 24340 24379 (face font-lock-constant-face) 24379 24380 (face font-lock-string-face) 24380 24390 nil 24390 24391 (face font-lock-string-face) 24391 24424 (face font-lock-constant-face) 24424 24425 (face font-lock-string-face) 24425 24435 nil 24435 24436 (face font-lock-string-face) 24436 24476 (face font-lock-constant-face) 24476 24477 (face font-lock-string-face) 24477 24487 nil 24487 24488 (face font-lock-string-face) 24488 24521 (face font-lock-constant-face) 24521 24522 (face font-lock-string-face) 24522 24532 nil 24532 24533 (face font-lock-string-face) 24533 24567 (face font-lock-constant-face) 24567 24568 (face font-lock-string-face) 24568 24578 nil 24578 24579 (face font-lock-string-face) 24579 24610 (face font-lock-constant-face) 24610 24611 (face font-lock-string-face) 24611 24621 nil 24621 24622 (face font-lock-string-face) 24622 24673 (face font-lock-constant-face) 24673 24674 (face font-lock-string-face) 24674 24684 nil 24684 24685 (face font-lock-string-face) 24685 24725 (face font-lock-constant-face) 24725 24726 (face font-lock-string-face) 24726 24736 nil 24736 24737 (face font-lock-string-face) 24737 24773 (face font-lock-constant-face) 24773 24774 (face font-lock-string-face) 24774 24784 nil 24784 24785 (face font-lock-string-face) 24785 24821 (face font-lock-constant-face) 24821 24822 (face font-lock-string-face) 24822 24832 nil 24832 24833 (face font-lock-string-face) 24833 24874 (face font-lock-constant-face) 24874 24875 (face font-lock-string-face) 24875 24885 nil 24885 24886 (face font-lock-string-face) 24886 24926 (face font-lock-constant-face) 24926 24927 (face font-lock-string-face) 24927 24937 nil 24937 24938 (face font-lock-string-face) 24938 24977 (face font-lock-constant-face) 24977 24978 (face font-lock-string-face) 24978 24988 nil 24988 24989 (face font-lock-string-face) 24989 25035 (face font-lock-constant-face) 25035 25036 (face font-lock-string-face) 25036 25046 nil 25046 25047 (face font-lock-string-face) 25047 25070 (face font-lock-constant-face) 25070 25071 (face font-lock-string-face) 25071 25081 nil 25081 25082 (face font-lock-string-face) 25082 25104 (face font-lock-constant-face) 25104 25105 (face font-lock-string-face) 25105 25115 nil 25115 25116 (face font-lock-string-face) 25116 25152 (face font-lock-constant-face) 25152 25153 (face font-lock-string-face) 25153 25163 nil 25163 25164 (face font-lock-string-face) 25164 25210 (face font-lock-constant-face) 25210 25211 (face font-lock-string-face) 25211 25221 nil 25221 25222 (face font-lock-string-face) 25222 25250 (face font-lock-constant-face) 25250 25251 (face font-lock-string-face) 25251 25268 nil 25268 25269 (face font-lock-string-face) 25269 25279 (face font-lock-keyword-face) 25279 25280 (face font-lock-string-face) 25280 25293 nil 25293 25294 (face font-lock-string-face) 25294 25319 (face font-lock-variable-name-face) 25319 25320 (face font-lock-string-face) 25320 25334 nil 25334 25335 (face font-lock-string-face) 25335 25345 (face font-lock-keyword-face) 25345 25346 (face font-lock-string-face) 25346 25363 nil 25363 25364 (face font-lock-string-face) 25364 25385 (face font-lock-variable-name-face) 25385 25386 (face font-lock-string-face) 25386 25404 nil 25404 25405 (face font-lock-string-face) 25405 25417 (face font-lock-keyword-face) 25417 25418 (face font-lock-string-face) 25418 25438 nil 25438 25439 (face font-lock-string-face) 25439 25480 (face font-lock-function-name-face) 25480 25481 (face font-lock-string-face) 25481 25550 nil 25550 25551 (face font-lock-string-face) 25551 25566 (face font-lock-variable-name-face) 25566 25567 (face font-lock-string-face) 25567 25581 nil 25581 25582 (face font-lock-string-face) 25582 25594 (face font-lock-keyword-face) 25594 25595 (face font-lock-string-face) 25595 25611 nil 25611 25612 (face font-lock-string-face) 25612 25651 (face font-lock-function-name-face) 25651 25652 (face font-lock-string-face) 25652 25688 nil 25688 25689 (face font-lock-string-face) 25689 25704 (face font-lock-variable-name-face) 25704 25705 (face font-lock-string-face) 25705 25719 nil 25719 25720 (face font-lock-string-face) 25720 25728 (face font-lock-keyword-face) 25728 25729 (face font-lock-string-face) 25729 25745 nil 25745 25746 (face font-lock-string-face) 25746 25782 (face font-lock-constant-face) 25782 25783 (face font-lock-string-face) 25783 25797 nil 25797 25798 (face font-lock-string-face) 25798 25820 (face font-lock-constant-face) 25820 25821 (face font-lock-string-face) 25821 25835 nil 25835 25836 (face font-lock-string-face) 25836 25857 (face font-lock-constant-face) 25857 25858 (face font-lock-string-face) 25858 25872 nil 25872 25873 (face font-lock-string-face) 25873 25905 (face font-lock-constant-face) 25905 25906 (face font-lock-string-face) 25906 25920 nil 25920 25921 (face font-lock-string-face) 25921 25961 (face font-lock-constant-face) 25961 25962 (face font-lock-string-face) 25962 25976 nil 25976 25977 (face font-lock-string-face) 25977 26016 (face font-lock-constant-face) 26016 26017 (face font-lock-string-face) 26017 26031 nil 26031 26032 (face font-lock-string-face) 26032 26065 (face font-lock-constant-face) 26065 26066 (face font-lock-string-face) 26066 26080 nil 26080 26081 (face font-lock-string-face) 26081 26115 (face font-lock-constant-face) 26115 26116 (face font-lock-string-face) 26116 26130 nil 26130 26131 (face font-lock-string-face) 26131 26162 (face font-lock-constant-face) 26162 26163 (face font-lock-string-face) 26163 26177 nil 26177 26178 (face font-lock-string-face) 26178 26229 (face font-lock-constant-face) 26229 26230 (face font-lock-string-face) 26230 26244 nil 26244 26245 (face font-lock-string-face) 26245 26285 (face font-lock-constant-face) 26285 26286 (face font-lock-string-face) 26286 26300 nil 26300 26301 (face font-lock-string-face) 26301 26337 (face font-lock-constant-face) 26337 26338 (face font-lock-string-face) 26338 26352 nil 26352 26353 (face font-lock-string-face) 26353 26394 (face font-lock-constant-face) 26394 26395 (face font-lock-string-face) 26395 26409 nil 26409 26410 (face font-lock-string-face) 26410 26443 (face font-lock-constant-face) 26443 26444 (face font-lock-string-face) 26444 26458 nil 26458 26459 (face font-lock-string-face) 26459 26495 (face font-lock-constant-face) 26495 26496 (face font-lock-string-face) 26496 26532 nil 26532 26533 (face font-lock-string-face) 26533 26546 (face font-lock-variable-name-face) 26546 26547 (face font-lock-string-face) 26547 26561 nil 26561 26562 (face font-lock-string-face) 26562 26572 (face font-lock-keyword-face) 26572 26573 (face font-lock-string-face) 26573 26590 nil 26590 26591 (face font-lock-string-face) 26591 26604 (face font-lock-variable-name-face) 26604 26605 (face font-lock-string-face) 26605 26623 nil 26623 26624 (face font-lock-string-face) 26624 26631 (face font-lock-keyword-face) 26631 26632 (face font-lock-string-face) 26632 26652 nil 26652 26653 (face font-lock-string-face) 26653 26688 (face font-lock-constant-face) 26688 26689 (face font-lock-string-face) 26689 26722 nil 26722 26723 (face font-lock-string-face) 26723 26730 (face font-lock-keyword-face) 26730 26731 (face font-lock-string-face) 26731 26751 nil 26751 26752 (face font-lock-string-face) 26752 26760 (face font-lock-preprocessor-face) 26760 26761 (face font-lock-string-face) 26761 26831 nil 26831 26832 (face font-lock-string-face) 26832 26873 (face font-lock-variable-name-face) 26873 26874 (face font-lock-string-face) 26874 26888 nil 26888 26889 (face font-lock-string-face) 26889 26896 (face font-lock-keyword-face) 26896 26897 (face font-lock-string-face) 26897 26913 nil 26913 26914 (face font-lock-string-face) 26914 26954 (face font-lock-constant-face) 26954 26955 (face font-lock-string-face) 26955 26991 nil 26991 26992 (face font-lock-string-face) 26992 27035 (face font-lock-variable-name-face) 27035 27036 (face font-lock-string-face) 27036 27050 nil 27050 27051 (face font-lock-string-face) 27051 27058 (face font-lock-keyword-face) 27058 27059 (face font-lock-string-face) 27059 27075 nil 27075 27076 (face font-lock-string-face) 27076 27095 (face font-lock-constant-face) 27095 27096 (face font-lock-string-face) 27096 27110 nil 27110 27111 (face font-lock-string-face) 27111 27137 (face font-lock-constant-face) 27137 27138 (face font-lock-string-face) 27138 27152 nil 27152 27153 (face font-lock-string-face) 27153 27186 (face font-lock-constant-face) 27186 27187 (face font-lock-string-face) 27187 27201 nil 27201 27202 (face font-lock-string-face) 27202 27235 (face font-lock-constant-face) 27235 27236 (face font-lock-string-face) 27236 27291 nil 27291 27292 (face font-lock-string-face) 27292 27303 (face font-lock-keyword-face) 27303 27304 (face font-lock-string-face) 27304 27306 nil 27306 27307 (face font-lock-string-face) 27307 27325 (face font-lock-function-name-face) 27325 27326 (face font-lock-string-face) 27326 27334 nil 27334 27335 (face font-lock-string-face) 27335 27339 (face font-lock-keyword-face) 27339 27340 (face font-lock-string-face) 27340 27342 nil 27342 27343 (face font-lock-string-face) 27343 27357 (face font-lock-type-face) 27357 27358 (face font-lock-string-face) 27358 27366 nil 27366 27367 (face font-lock-string-face) 27367 27379 (face font-lock-keyword-face) 27379 27380 (face font-lock-string-face) 27380 27392 nil 27392 27393 (face font-lock-string-face) 27393 27398 (face font-lock-function-name-face) 27398 27399 (face font-lock-string-face) 27399 27409 nil 27409 27410 (face font-lock-string-face) 27410 27431 (face font-lock-function-name-face) 27431 27432 (face font-lock-string-face) 27432 27442 nil 27442 27443 (face font-lock-string-face) 27443 27469 (face font-lock-function-name-face) 27469 27470 (face font-lock-string-face) 27470 27480 nil 27480 27481 (face font-lock-string-face) 27481 27507 (face font-lock-function-name-face) 27507 27508 (face font-lock-string-face) 27508 27525 nil 27525 27526 (face font-lock-string-face) 27526 27533 (face font-lock-keyword-face) 27533 27534 (face font-lock-string-face) 27534 27546 nil 27546 27547 (face font-lock-string-face) 27547 27591 (face font-lock-constant-face) 27591 27592 (face font-lock-string-face) 27592 27602 nil 27602 27603 (face font-lock-string-face) 27603 27646 (face font-lock-constant-face) 27646 27647 (face font-lock-string-face) 27647 27657 nil 27657 27658 (face font-lock-string-face) 27658 27679 (face font-lock-constant-face) 27679 27680 (face font-lock-string-face) 27680 27690 nil 27690 27691 (face font-lock-string-face) 27691 27711 (face font-lock-constant-face) 27711 27712 (face font-lock-string-face) 27712 27722 nil 27722 27723 (face font-lock-string-face) 27723 27752 (face font-lock-constant-face) 27752 27753 (face font-lock-string-face) 27753 27763 nil 27763 27764 (face font-lock-string-face) 27764 27792 (face font-lock-constant-face) 27792 27793 (face font-lock-string-face) 27793 27803 nil 27803 27804 (face font-lock-string-face) 27804 27829 (face font-lock-constant-face) 27829 27830 (face font-lock-string-face) 27830 27840 nil 27840 27841 (face font-lock-string-face) 27841 27865 (face font-lock-constant-face) 27865 27866 (face font-lock-string-face) 27866 27876 nil 27876 27877 (face font-lock-string-face) 27877 27901 (face font-lock-constant-face) 27901 27902 (face font-lock-string-face) 27902 27912 nil 27912 27913 (face font-lock-string-face) 27913 27936 (face font-lock-constant-face) 27936 27937 (face font-lock-string-face) 27937 27947 nil 27947 27948 (face font-lock-string-face) 27948 27968 (face font-lock-constant-face) 27968 27969 (face font-lock-string-face) 27969 27979 nil 27979 27980 (face font-lock-string-face) 27980 27999 (face font-lock-constant-face) 27999 28000 (face font-lock-string-face) 28000 28030 nil 28030 28031 (face font-lock-string-face) 28031 28042 (face font-lock-keyword-face) 28042 28043 (face font-lock-string-face) 28043 28045 nil 28045 28046 (face font-lock-string-face) 28046 28058 (face font-lock-function-name-face) 28058 28059 (face font-lock-string-face) 28059 28067 nil 28067 28068 (face font-lock-string-face) 28068 28072 (face font-lock-keyword-face) 28072 28073 (face font-lock-string-face) 28073 28075 nil 28075 28076 (face font-lock-string-face) 28076 28086 (face font-lock-type-face) 28086 28087 (face font-lock-string-face) 28087 28095 nil 28095 28096 (face font-lock-string-face) 28096 28108 (face font-lock-keyword-face) 28108 28109 (face font-lock-string-face) 28109 28121 nil 28121 28122 (face font-lock-string-face) 28122 28127 (face font-lock-function-name-face) 28127 28128 (face font-lock-string-face) 28128 28138 nil 28138 28139 (face font-lock-string-face) 28139 28150 (face font-lock-function-name-face) 28150 28151 (face font-lock-string-face) 28151 28161 nil 28161 28162 (face font-lock-string-face) 28162 28183 (face font-lock-function-name-face) 28183 28184 (face font-lock-string-face) 28184 28194 nil 28194 28195 (face font-lock-string-face) 28195 28216 (face font-lock-function-name-face) 28216 28217 (face font-lock-string-face) 28217 28234 nil 28234 28235 (face font-lock-string-face) 28235 28242 (face font-lock-keyword-face) 28242 28243 (face font-lock-string-face) 28243 28255 nil 28255 28256 (face font-lock-string-face) 28256 28290 (face font-lock-constant-face) 28290 28291 (face font-lock-string-face) 28291 28321 nil 28321 28322 (face font-lock-string-face) 28322 28333 (face font-lock-keyword-face) 28333 28334 (face font-lock-string-face) 28334 28336 nil 28336 28337 (face font-lock-string-face) 28337 28349 (face font-lock-function-name-face) 28349 28350 (face font-lock-string-face) 28350 28358 nil 28358 28359 (face font-lock-string-face) 28359 28363 (face font-lock-keyword-face) 28363 28364 (face font-lock-string-face) 28364 28366 nil 28366 28367 (face font-lock-string-face) 28367 28377 (face font-lock-type-face) 28377 28378 (face font-lock-string-face) 28378 28386 nil 28386 28387 (face font-lock-string-face) 28387 28394 (face font-lock-keyword-face) 28394 28395 (face font-lock-string-face) 28395 28407 nil 28407 28408 (face font-lock-string-face) 28408 28441 (face font-lock-constant-face) 28441 28442 (face font-lock-string-face) 28442 28471 nil 28471 28472 (face font-lock-string-face) 28472 28483 (face font-lock-keyword-face) 28483 28484 (face font-lock-string-face) 28484 28486 nil 28486 28487 (face font-lock-string-face) 28487 28498 (face font-lock-function-name-face) 28498 28499 (face font-lock-string-face) 28499 28507 nil 28507 28508 (face font-lock-string-face) 28508 28512 (face font-lock-keyword-face) 28512 28513 (face font-lock-string-face) 28513 28515 nil 28515 28516 (face font-lock-string-face) 28516 28526 (face font-lock-type-face) 28526 28527 (face font-lock-string-face) 28527 28535 nil 28535 28536 (face font-lock-string-face) 28536 28548 (face font-lock-keyword-face) 28548 28549 (face font-lock-string-face) 28549 28561 nil 28561 28562 (face font-lock-string-face) 28562 28567 (face font-lock-function-name-face) 28567 28568 (face font-lock-string-face) 28568 28578 nil 28578 28579 (face font-lock-string-face) 28579 28600 (face font-lock-function-name-face) 28600 28601 (face font-lock-string-face) 28601 28618 nil 28618 28619 (face font-lock-string-face) 28619 28626 (face font-lock-keyword-face) 28626 28627 (face font-lock-string-face) 28627 28639 nil 28639 28640 (face font-lock-string-face) 28640 28672 (face font-lock-constant-face) 28672 28673 (face font-lock-string-face) 28673 28698 nil 28698 28699 (face font-lock-string-face) 28699 28709 (face font-lock-keyword-face) 28709 28710 (face font-lock-string-face) 28710 28719 nil 28719 28720 (face font-lock-string-face) 28720 28729 (face font-lock-variable-name-face) 28729 28730 (face font-lock-string-face) 28730 28740 nil 28740 28741 (face font-lock-string-face) 28741 28748 (face font-lock-keyword-face) 28748 28749 (face font-lock-string-face) 28749 28773 nil 28773 28774 (face font-lock-string-face) 28774 28785 (face font-lock-keyword-face) 28785 28786 (face font-lock-string-face) 28786 28788 nil 28788 28789 (face font-lock-string-face) 28789 28799 (face font-lock-function-name-face) 28799 28800 (face font-lock-string-face) 28800 28812 nil 28812 28813 (face font-lock-string-face) 28813 28817 (face font-lock-keyword-face) 28817 28818 (face font-lock-string-face) 28818 28820 nil 28820 28821 (face font-lock-string-face) 28821 28831 (face font-lock-type-face) 28831 28832 (face font-lock-string-face) 28832 28844 nil 28844 28845 (face font-lock-string-face) 28845 28857 (face font-lock-keyword-face) 28857 28858 (face font-lock-string-face) 28858 28874 nil 28874 28875 (face font-lock-string-face) 28875 28880 (face font-lock-function-name-face) 28880 28881 (face font-lock-string-face) 28881 28895 nil 28895 28896 (face font-lock-string-face) 28896 28907 (face font-lock-function-name-face) 28907 28908 (face font-lock-string-face) 28908 28922 nil 28922 28923 (face font-lock-string-face) 28923 28944 (face font-lock-function-name-face) 28944 28945 (face font-lock-string-face) 28945 28959 nil 28959 28960 (face font-lock-string-face) 28960 29043 (face font-lock-function-name-face) 29043 29044 (face font-lock-string-face) 29044 29058 nil 29058 29059 (face font-lock-string-face) 29059 29074 (face font-lock-function-name-face) 29074 29075 (face font-lock-string-face) 29075 29100 nil 29100 29101 (face font-lock-string-face) 29101 29113 (face font-lock-keyword-face) 29113 29114 (face font-lock-string-face) 29114 29130 nil 29130 29131 (face font-lock-string-face) 29131 29133 (face font-lock-constant-face) 29133 29138 (face font-lock-variable-name-face) 29138 29163 (face font-lock-constant-face) 29163 29164 (face font-lock-string-face) 29164 29189 nil 29189 29190 (face font-lock-string-face) 29190 29197 (face font-lock-keyword-face) 29197 29198 (face font-lock-string-face) 29198 29214 nil 29214 29215 (face font-lock-string-face) 29215 29238 (face font-lock-constant-face) 29238 29239 (face font-lock-string-face) 29239 29253 nil 29253 29254 (face font-lock-string-face) 29254 29280 (face font-lock-constant-face) 29280 29281 (face font-lock-string-face) 29281 29295 nil 29295 29296 (face font-lock-string-face) 29296 29321 (face font-lock-constant-face) 29321 29322 (face font-lock-string-face) 29322 29336 nil 29336 29337 (face font-lock-string-face) 29337 29361 (face font-lock-constant-face) 29361 29362 (face font-lock-string-face) 29362 29376 nil 29376 29377 (face font-lock-string-face) 29377 29407 (face font-lock-constant-face) 29407 29408 (face font-lock-string-face) 29408 29422 nil 29422 29423 (face font-lock-string-face) 29423 29453 (face font-lock-constant-face) 29453 29454 (face font-lock-string-face) 29454 29468 nil 29468 29469 (face font-lock-string-face) 29469 29493 (face font-lock-constant-face) 29493 29494 (face font-lock-string-face) 29494 29508 nil 29508 29509 (face font-lock-string-face) 29509 29532 (face font-lock-constant-face) 29532 29533 (face font-lock-string-face) 29533 29547 nil 29547 29548 (face font-lock-string-face) 29548 29575 (face font-lock-constant-face) 29575 29576 (face font-lock-string-face) 29576 29590 nil 29590 29591 (face font-lock-string-face) 29591 29614 (face font-lock-constant-face) 29614 29615 (face font-lock-string-face) 29615 29640 nil 29640 29655 (face font-lock-string-face) 29655 29671 nil 29671 29685 (face font-lock-string-face) 29685 29703 nil 29703 29714 (face font-lock-string-face) 29714 29716 nil 29716 29719 (face font-lock-string-face) 29719 29729 nil 29729 29754 (face font-lock-comment-face) 29754 29792 nil 29792 29793 (face font-lock-string-face) 29793 29800 (face font-lock-keyword-face) 29800 29801 (face font-lock-string-face) 29801 29817 nil 29817 29818 (face font-lock-string-face) 29818 29843 (face font-lock-preprocessor-face) 29843 29844 (face font-lock-string-face) 29844 29892 nil 29892 29893 (face font-lock-string-face) 29893 29929 (face font-lock-variable-name-face) 29929 29930 (face font-lock-string-face) 29930 29940 nil 29940 29941 (face font-lock-string-face) 29941 29948 (face font-lock-keyword-face) 29948 29949 (face font-lock-string-face) 29949 29973 nil 29973 29974 (face font-lock-string-face) 29974 29985 (face font-lock-keyword-face) 29985 29986 (face font-lock-string-face) 29986 29988 nil 29988 29989 (face font-lock-string-face) 29989 30001 (face font-lock-function-name-face) 30001 30002 (face font-lock-string-face) 30002 30014 nil 30014 30015 (face font-lock-string-face) 30015 30019 (face font-lock-keyword-face) 30019 30020 (face font-lock-string-face) 30020 30022 nil 30022 30023 (face font-lock-string-face) 30023 30033 (face font-lock-type-face) 30033 30034 (face font-lock-string-face) 30034 30046 nil 30046 30047 (face font-lock-string-face) 30047 30059 (face font-lock-keyword-face) 30059 30060 (face font-lock-string-face) 30060 30076 nil 30076 30077 (face font-lock-string-face) 30077 30082 (face font-lock-function-name-face) 30082 30083 (face font-lock-string-face) 30083 30097 nil 30097 30098 (face font-lock-string-face) 30098 30109 (face font-lock-function-name-face) 30109 30110 (face font-lock-string-face) 30110 30124 nil 30124 30125 (face font-lock-string-face) 30125 30146 (face font-lock-function-name-face) 30146 30147 (face font-lock-string-face) 30147 30161 nil 30161 30162 (face font-lock-string-face) 30162 30180 (face font-lock-function-name-face) 30180 30181 (face font-lock-string-face) 30181 30206 nil 30206 30207 (face font-lock-string-face) 30207 30214 (face font-lock-keyword-face) 30214 30215 (face font-lock-string-face) 30215 30231 nil 30231 30232 (face font-lock-string-face) 30232 30266 (face font-lock-constant-face) 30266 30267 (face font-lock-string-face) 30267 30281 nil 30281 30282 (face font-lock-string-face) 30282 30321 (face font-lock-constant-face) 30321 30322 (face font-lock-string-face) 30322 30336 nil 30336 30337 (face font-lock-string-face) 30337 30375 (face font-lock-constant-face) 30375 30376 (face font-lock-string-face) 30376 30390 nil 30390 30391 (face font-lock-string-face) 30391 30430 (face font-lock-constant-face) 30430 30431 (face font-lock-string-face) 30431 30445 nil 30445 30446 (face font-lock-string-face) 30446 30484 (face font-lock-constant-face) 30484 30485 (face font-lock-string-face) 30485 30499 nil 30499 30500 (face font-lock-string-face) 30500 30533 (face font-lock-constant-face) 30533 30534 (face font-lock-string-face) 30534 30548 nil 30548 30549 (face font-lock-string-face) 30549 30581 (face font-lock-constant-face) 30581 30582 (face font-lock-string-face) 30582 30596 nil 30596 30597 (face font-lock-string-face) 30597 30626 (face font-lock-constant-face) 30626 30627 (face font-lock-string-face) 30627 30641 nil 30641 30642 (face font-lock-string-face) 30642 30670 (face font-lock-constant-face) 30670 30671 (face font-lock-string-face) 30671 30685 nil 30685 30686 (face font-lock-string-face) 30686 30714 (face font-lock-constant-face) 30714 30715 (face font-lock-string-face) 30715 30729 nil 30729 30730 (face font-lock-string-face) 30730 30757 (face font-lock-constant-face) 30757 30758 (face font-lock-string-face) 30758 30783 nil 30783 30784 (face font-lock-string-face) 30784 30794 (face font-lock-keyword-face) 30794 30795 (face font-lock-string-face) 30795 30812 nil 30812 30813 (face font-lock-string-face) 30813 30834 (face font-lock-variable-name-face) 30834 30835 (face font-lock-string-face) 30835 30853 nil 30853 30854 (face font-lock-string-face) 30854 30866 (face font-lock-keyword-face) 30866 30867 (face font-lock-string-face) 30867 30887 nil 30887 30888 (face font-lock-string-face) 30888 30917 (face font-lock-function-name-face) 30917 30918 (face font-lock-string-face) 30918 30951 nil 30951 30952 (face font-lock-string-face) 30952 30959 (face font-lock-keyword-face) 30959 30960 (face font-lock-string-face) 30960 30980 nil 30980 30981 (face font-lock-string-face) 30981 31015 (face font-lock-constant-face) 31015 31016 (face font-lock-string-face) 31016 31064 nil 31064 31065 (face font-lock-string-face) 31065 31074 (face font-lock-variable-name-face) 31074 31075 (face font-lock-string-face) 31075 31093 nil 31093 31094 (face font-lock-string-face) 31094 31106 (face font-lock-keyword-face) 31106 31107 (face font-lock-string-face) 31107 31127 nil 31127 31128 (face font-lock-string-face) 31128 31175 (face font-lock-function-name-face) 31175 31176 (face font-lock-string-face) 31176 31194 nil 31194 31195 (face font-lock-string-face) 31195 31245 (face font-lock-function-name-face) 31245 31246 (face font-lock-string-face) 31246 31279 nil 31279 31280 (face font-lock-string-face) 31280 31287 (face font-lock-keyword-face) 31287 31288 (face font-lock-string-face) 31288 31308 nil 31308 31309 (face font-lock-string-face) 31309 31341 (face font-lock-constant-face) 31341 31342 (face font-lock-string-face) 31342 31423 nil 31423 31424 (face font-lock-string-face) 31424 31462 (face font-lock-variable-name-face) 31462 31463 (face font-lock-string-face) 31463 31473 nil 31473 31474 (face font-lock-string-face) 31474 31481 (face font-lock-keyword-face) 31481 31482 (face font-lock-string-face) 31482 31506 nil 31506 31507 (face font-lock-string-face) 31507 31518 (face font-lock-keyword-face) 31518 31519 (face font-lock-string-face) 31519 31521 nil 31521 31522 (face font-lock-string-face) 31522 31539 (face font-lock-function-name-face) 31539 31540 (face font-lock-string-face) 31540 31552 nil 31552 31553 (face font-lock-string-face) 31553 31557 (face font-lock-keyword-face) 31557 31558 (face font-lock-string-face) 31558 31560 nil 31560 31561 (face font-lock-string-face) 31561 31571 (face font-lock-type-face) 31571 31572 (face font-lock-string-face) 31572 31584 nil 31584 31585 (face font-lock-string-face) 31585 31597 (face font-lock-keyword-face) 31597 31598 (face font-lock-string-face) 31598 31614 nil 31614 31615 (face font-lock-string-face) 31615 31636 (face font-lock-function-name-face) 31636 31637 (face font-lock-string-face) 31637 31651 nil 31651 31652 (face font-lock-string-face) 31652 31670 (face font-lock-function-name-face) 31670 31671 (face font-lock-string-face) 31671 31696 nil 31696 31697 (face font-lock-string-face) 31697 31706 (face font-lock-keyword-face) 31706 31707 (face font-lock-string-face) 31707 31723 nil 31723 31724 (face font-lock-string-face) 31724 31728 (face font-lock-constant-face) 31728 31729 (face font-lock-string-face) 31729 31743 nil 31743 31744 (face font-lock-string-face) 31744 31748 (face font-lock-constant-face) 31748 31749 (face font-lock-string-face) 31749 31774 nil 31774 31775 (face font-lock-string-face) 31775 31782 (face font-lock-keyword-face) 31782 31783 (face font-lock-string-face) 31783 31799 nil 31799 31800 (face font-lock-string-face) 31800 31844 (face font-lock-constant-face) 31844 31845 (face font-lock-string-face) 31845 31893 nil 31893 31894 (face font-lock-string-face) 31894 31943 (face font-lock-variable-name-face) 31943 31944 (face font-lock-string-face) 31944 31954 nil 31954 31955 (face font-lock-string-face) 31955 31962 (face font-lock-keyword-face) 31962 31963 (face font-lock-string-face) 31963 31987 nil 31987 31988 (face font-lock-string-face) 31988 31999 (face font-lock-keyword-face) 31999 32000 (face font-lock-string-face) 32000 32002 nil 32002 32003 (face font-lock-string-face) 32003 32013 (face font-lock-function-name-face) 32013 32014 (face font-lock-string-face) 32014 32026 nil 32026 32027 (face font-lock-string-face) 32027 32031 (face font-lock-keyword-face) 32031 32032 (face font-lock-string-face) 32032 32034 nil 32034 32035 (face font-lock-string-face) 32035 32045 (face font-lock-type-face) 32045 32046 (face font-lock-string-face) 32046 32058 nil 32058 32059 (face font-lock-string-face) 32059 32071 (face font-lock-keyword-face) 32071 32072 (face font-lock-string-face) 32072 32088 nil 32088 32089 (face font-lock-string-face) 32089 32094 (face font-lock-function-name-face) 32094 32095 (face font-lock-string-face) 32095 32109 nil 32109 32110 (face font-lock-string-face) 32110 32121 (face font-lock-function-name-face) 32121 32122 (face font-lock-string-face) 32122 32136 nil 32136 32137 (face font-lock-string-face) 32137 32158 (face font-lock-function-name-face) 32158 32159 (face font-lock-string-face) 32159 32173 nil 32173 32174 (face font-lock-string-face) 32174 32192 (face font-lock-function-name-face) 32192 32193 (face font-lock-string-face) 32193 32218 nil 32218 32219 (face font-lock-string-face) 32219 32232 (face font-lock-keyword-face) 32232 32233 (face font-lock-string-face) 32233 32249 nil 32249 32250 (face font-lock-string-face) 32250 32259 (face font-lock-keyword-face) 32259 32260 (face font-lock-string-face) 32260 32278 nil 32278 32279 (face font-lock-string-face) 32279 32283 (face font-lock-constant-face) 32283 32284 (face font-lock-string-face) 32284 32300 nil 32300 32301 (face font-lock-string-face) 32301 32306 (face font-lock-constant-face) 32306 32307 (face font-lock-string-face) 32307 32323 nil 32323 32324 (face font-lock-string-face) 32324 32333 (face font-lock-constant-face) 32333 32334 (face font-lock-string-face) 32334 32350 nil 32350 32351 (face font-lock-string-face) 32351 32357 (face font-lock-constant-face) 32357 32358 (face font-lock-string-face) 32358 32398 nil 32398 32399 (face font-lock-string-face) 32399 32406 (face font-lock-keyword-face) 32406 32407 (face font-lock-string-face) 32407 32423 nil 32423 32424 (face font-lock-string-face) 32424 32462 (face font-lock-constant-face) 32462 32463 (face font-lock-string-face) 32463 32477 nil 32477 32478 (face font-lock-string-face) 32478 32515 (face font-lock-constant-face) 32515 32516 (face font-lock-string-face) 32516 32530 nil 32530 32531 (face font-lock-string-face) 32531 32568 (face font-lock-constant-face) 32568 32569 (face font-lock-string-face) 32569 32583 nil 32583 32584 (face font-lock-string-face) 32584 32620 (face font-lock-constant-face) 32620 32621 (face font-lock-string-face) 32621 32635 nil 32635 32636 (face font-lock-string-face) 32636 32666 (face font-lock-constant-face) 32666 32667 (face font-lock-string-face) 32667 32681 nil 32681 32682 (face font-lock-string-face) 32682 32720 (face font-lock-constant-face) 32720 32721 (face font-lock-string-face) 32721 32735 nil 32735 32736 (face font-lock-string-face) 32736 32773 (face font-lock-constant-face) 32773 32774 (face font-lock-string-face) 32774 32822 nil 32822 32823 (face font-lock-string-face) 32823 32838 (face font-lock-variable-name-face) 32838 32839 (face font-lock-string-face) 32839 32849 nil 32849 32850 (face font-lock-string-face) 32850 32857 (face font-lock-keyword-face) 32857 32858 (face font-lock-string-face) 32858 32882 nil 32882 32883 (face font-lock-string-face) 32883 32894 (face font-lock-keyword-face) 32894 32895 (face font-lock-string-face) 32895 32897 nil 32897 32898 (face font-lock-string-face) 32898 32912 (face font-lock-function-name-face) 32912 32913 (face font-lock-string-face) 32913 32925 nil 32925 32926 (face font-lock-string-face) 32926 32930 (face font-lock-keyword-face) 32930 32931 (face font-lock-string-face) 32931 32933 nil 32933 32934 (face font-lock-string-face) 32934 32948 (face font-lock-type-face) 32948 32949 (face font-lock-string-face) 32949 32961 nil 32961 32962 (face font-lock-string-face) 32962 32969 (face font-lock-keyword-face) 32969 32970 (face font-lock-string-face) 32970 32986 nil 32986 32987 (face font-lock-string-face) 32987 33022 (face font-lock-constant-face) 33022 33023 (face font-lock-string-face) 33023 33037 nil 33037 33038 (face font-lock-string-face) 33038 33072 (face font-lock-constant-face) 33072 33073 (face font-lock-string-face) 33073 33098 nil 33098 33099 (face font-lock-string-face) 33099 33111 (face font-lock-keyword-face) 33111 33112 (face font-lock-string-face) 33112 33128 nil 33128 33129 (face font-lock-string-face) 33129 33150 (face font-lock-function-name-face) 33150 33151 (face font-lock-string-face) 33151 33176 nil 33176 33177 (face font-lock-string-face) 33177 33189 (face font-lock-keyword-face) 33189 33190 (face font-lock-string-face) 33190 33206 nil 33206 33207 (face font-lock-string-face) 33207 33209 (face font-lock-constant-face) 33209 33232 (face font-lock-variable-name-face) 33232 33239 (face font-lock-constant-face) 33239 33240 (face font-lock-string-face) 33240 33265 nil 33265 33266 (face font-lock-string-face) 33266 33273 (face font-lock-keyword-face) 33273 33274 (face font-lock-string-face) 33274 33306 nil 33306 33307 (face font-lock-string-face) 33307 33318 (face font-lock-keyword-face) 33318 33319 (face font-lock-string-face) 33319 33321 nil 33321 33322 (face font-lock-string-face) 33322 33342 (face font-lock-function-name-face) 33342 33343 (face font-lock-string-face) 33343 33359 nil 33359 33360 (face font-lock-string-face) 33360 33366 (face font-lock-keyword-face) 33366 33367 (face font-lock-string-face) 33367 33387 nil 33387 33388 (face font-lock-string-face) 33388 33434 (face font-lock-constant-face) 33434 33435 (face font-lock-string-face) 33435 33453 nil 33453 33454 (face font-lock-string-face) 33454 33519 (face font-lock-constant-face) 33519 33520 (face font-lock-string-face) 33520 33553 nil 33553 33554 (face font-lock-string-face) 33554 33561 (face font-lock-keyword-face) 33561 33562 (face font-lock-string-face) 33562 33582 nil 33582 33583 (face font-lock-string-face) 33583 33585 (face font-lock-constant-face) 33585 33608 (face font-lock-variable-name-face) 33608 33647 (face font-lock-constant-face) 33647 33648 (face font-lock-string-face) 33648 33681 nil 33681 33682 (face font-lock-string-face) 33682 33688 (face font-lock-keyword-face) 33688 33689 (face font-lock-string-face) 33689 33709 nil 33709 33710 (face font-lock-string-face) 33710 33716 (face font-lock-constant-face) 33716 33717 (face font-lock-string-face) 33717 33735 nil 33735 33736 (face font-lock-string-face) 33736 33738 (face font-lock-constant-face) 33738 33743 (face font-lock-variable-name-face) 33743 33788 (face font-lock-constant-face) 33788 33789 (face font-lock-string-face) 33789 33807 nil 33807 33808 (face font-lock-string-face) 33808 33810 (face font-lock-constant-face) 33810 33811 (face font-lock-string-face) 33811 33829 nil 33829 33830 (face font-lock-string-face) 33830 33833 (face font-lock-constant-face) 33833 33840 (face font-lock-variable-name-face) 33840 33841 (face font-lock-constant-face) 33841 33842 (face font-lock-string-face) 33842 33860 nil 33860 33861 (face font-lock-string-face) 33861 33864 (face font-lock-constant-face) 33864 33872 (face font-lock-variable-name-face) 33872 33873 (face font-lock-constant-face) 33873 33874 (face font-lock-string-face) 33874 33952 nil 33952 33953 (face font-lock-string-face) 33953 33964 (face font-lock-keyword-face) 33964 33965 (face font-lock-string-face) 33965 33967 nil 33967 33968 (face font-lock-string-face) 33968 33978 (face font-lock-function-name-face) 33978 33979 (face font-lock-string-face) 33979 33991 nil 33991 33992 (face font-lock-string-face) 33992 33996 (face font-lock-keyword-face) 33996 33997 (face font-lock-string-face) 33997 33999 nil 33999 34000 (face font-lock-string-face) 34000 34004 (face font-lock-type-face) 34004 34005 (face font-lock-string-face) 34005 34017 nil 34017 34018 (face font-lock-string-face) 34018 34030 (face font-lock-keyword-face) 34030 34031 (face font-lock-string-face) 34031 34035 nil 34035 34036 (face font-lock-string-face) 34036 34062 (face font-lock-function-name-face) 34062 34063 (face font-lock-string-face) 34063 34077 nil 34077 34078 (face font-lock-string-face) 34078 34087 (face font-lock-keyword-face) 34087 34088 (face font-lock-string-face) 34088 34104 nil 34104 34105 (face font-lock-string-face) 34105 34117 (face font-lock-variable-name-face) 34117 34118 (face font-lock-string-face) 34118 34120 nil 34120 34121 (face font-lock-string-face) 34121 34126 (face font-lock-variable-name-face) 34126 34127 (face font-lock-string-face) 34127 34141 nil 34141 34142 (face font-lock-string-face) 34142 34153 (face font-lock-variable-name-face) 34153 34154 (face font-lock-string-face) 34154 34156 nil 34156 34157 (face font-lock-string-face) 34157 34174 (face font-lock-variable-name-face) 34174 34175 (face font-lock-string-face) 34175 34200 nil 34200 34201 (face font-lock-string-face) 34201 34209 (face font-lock-keyword-face) 34209 34210 (face font-lock-string-face) 34210 34214 nil 34214 34215 (face font-lock-string-face) 34215 34233 (face font-lock-constant-face) 34233 34234 (face font-lock-string-face) 34234 34268 nil 34268 34287 (face font-lock-comment-face) 34287 34293 nil 34293 34365 (face font-lock-comment-face) 34365 34371 nil 34371 34372 (face font-lock-string-face) 34372 34379 (face font-lock-keyword-face) 34379 34380 (face font-lock-string-face) 34380 34404 nil 34404 34405 (face font-lock-string-face) 34405 34416 (face font-lock-keyword-face) 34416 34417 (face font-lock-string-face) 34417 34419 nil 34419 34420 (face font-lock-string-face) 34420 34436 (face font-lock-function-name-face) 34436 34437 (face font-lock-string-face) 34437 34449 nil 34449 34450 (face font-lock-string-face) 34450 34454 (face font-lock-keyword-face) 34454 34455 (face font-lock-string-face) 34455 34457 nil 34457 34458 (face font-lock-string-face) 34458 34468 (face font-lock-type-face) 34468 34469 (face font-lock-string-face) 34469 34481 nil 34481 34482 (face font-lock-string-face) 34482 34494 (face font-lock-keyword-face) 34494 34495 (face font-lock-string-face) 34495 34511 nil 34511 34512 (face font-lock-string-face) 34512 34517 (face font-lock-function-name-face) 34517 34518 (face font-lock-string-face) 34518 34532 nil 34532 34533 (face font-lock-string-face) 34533 34551 (face font-lock-function-name-face) 34551 34552 (face font-lock-string-face) 34552 34566 nil 34566 34567 (face font-lock-string-face) 34567 34588 (face font-lock-function-name-face) 34588 34589 (face font-lock-string-face) 34589 34603 nil 34603 34604 (face font-lock-string-face) 34604 34630 (face font-lock-function-name-face) 34630 34631 (face font-lock-string-face) 34631 34645 nil 34645 34646 (face font-lock-string-face) 34646 34680 (face font-lock-function-name-face) 34680 34681 (face font-lock-string-face) 34681 34695 nil 34695 34696 (face font-lock-string-face) 34696 34730 (face font-lock-function-name-face) 34730 34731 (face font-lock-string-face) 34731 34745 nil 34745 34746 (face font-lock-string-face) 34746 34772 (face font-lock-function-name-face) 34772 34773 (face font-lock-string-face) 34773 34787 nil 34787 34788 (face font-lock-string-face) 34788 34827 (face font-lock-function-name-face) 34827 34828 (face font-lock-string-face) 34828 34853 nil 34853 34854 (face font-lock-string-face) 34854 34861 (face font-lock-keyword-face) 34861 34862 (face font-lock-string-face) 34862 34878 nil 34878 34879 (face font-lock-string-face) 34879 34904 (face font-lock-constant-face) 34904 34905 (face font-lock-string-face) 34905 34930 nil 34930 34931 (face font-lock-string-face) 34931 34941 (face font-lock-keyword-face) 34941 34942 (face font-lock-string-face) 34942 34959 nil 34959 34960 (face font-lock-string-face) 34960 34981 (face font-lock-variable-name-face) 34981 34982 (face font-lock-string-face) 34982 35000 nil 35000 35001 (face font-lock-string-face) 35001 35013 (face font-lock-keyword-face) 35013 35014 (face font-lock-string-face) 35014 35034 nil 35034 35077 (face font-lock-comment-face) 35077 35093 nil 35093 35123 (face font-lock-comment-face) 35123 35139 nil 35139 35164 (face font-lock-comment-face) 35164 35180 nil 35180 35194 (face font-lock-comment-face) 35194 35210 nil 35210 35211 (face font-lock-string-face) 35211 35240 (face font-lock-function-name-face) 35240 35241 (face font-lock-string-face) 35241 35274 nil 35274 35275 (face font-lock-string-face) 35275 35285 (face font-lock-keyword-face) 35285 35286 (face font-lock-string-face) 35286 35307 nil 35307 35308 (face font-lock-string-face) 35308 35329 (face font-lock-variable-name-face) 35329 35330 (face font-lock-string-face) 35330 35352 nil 35352 35353 (face font-lock-string-face) 35353 35365 (face font-lock-keyword-face) 35365 35366 (face font-lock-string-face) 35366 35390 nil 35390 35391 (face font-lock-string-face) 35391 35432 (face font-lock-function-name-face) 35432 35433 (face font-lock-string-face) 35433 35553 nil 35553 35554 (face font-lock-string-face) 35554 35565 (face font-lock-keyword-face) 35565 35566 (face font-lock-string-face) 35566 35568 nil 35568 35569 (face font-lock-string-face) 35569 35592 (face font-lock-function-name-face) 35592 35593 (face font-lock-string-face) 35593 35605 nil 35605 35606 (face font-lock-string-face) 35606 35610 (face font-lock-keyword-face) 35610 35611 (face font-lock-string-face) 35611 35613 nil 35613 35614 (face font-lock-string-face) 35614 35624 (face font-lock-type-face) 35624 35625 (face font-lock-string-face) 35625 35637 nil 35637 35638 (face font-lock-string-face) 35638 35650 (face font-lock-keyword-face) 35650 35651 (face font-lock-string-face) 35651 35667 nil 35667 35668 (face font-lock-string-face) 35668 35673 (face font-lock-function-name-face) 35673 35674 (face font-lock-string-face) 35674 35688 nil 35688 35689 (face font-lock-string-face) 35689 35707 (face font-lock-function-name-face) 35707 35708 (face font-lock-string-face) 35708 35722 nil 35722 35723 (face font-lock-string-face) 35723 35757 (face font-lock-function-name-face) 35757 35758 (face font-lock-string-face) 35758 35772 nil 35772 35773 (face font-lock-string-face) 35773 35799 (face font-lock-function-name-face) 35799 35800 (face font-lock-string-face) 35800 35814 nil 35814 35815 (face font-lock-string-face) 35815 35841 (face font-lock-function-name-face) 35841 35842 (face font-lock-string-face) 35842 35856 nil 35856 35857 (face font-lock-string-face) 35857 35896 (face font-lock-function-name-face) 35896 35897 (face font-lock-string-face) 35897 35922 nil 35922 35923 (face font-lock-string-face) 35923 35930 (face font-lock-keyword-face) 35930 35931 (face font-lock-string-face) 35931 35947 nil 35947 35948 (face font-lock-string-face) 35948 35970 (face font-lock-constant-face) 35970 35971 (face font-lock-string-face) 35971 35985 nil 35985 35986 (face font-lock-string-face) 35986 36011 (face font-lock-constant-face) 36011 36012 (face font-lock-string-face) 36012 36026 nil 36026 36027 (face font-lock-string-face) 36027 36060 (face font-lock-constant-face) 36060 36061 (face font-lock-string-face) 36061 36075 nil 36075 36076 (face font-lock-string-face) 36076 36117 (face font-lock-constant-face) 36117 36118 (face font-lock-string-face) 36118 36143 nil 36143 36144 (face font-lock-string-face) 36144 36154 (face font-lock-keyword-face) 36154 36155 (face font-lock-string-face) 36155 36172 nil 36172 36173 (face font-lock-string-face) 36173 36198 (face font-lock-variable-name-face) 36198 36199 (face font-lock-string-face) 36199 36217 nil 36217 36218 (face font-lock-string-face) 36218 36228 (face font-lock-keyword-face) 36228 36229 (face font-lock-string-face) 36229 36250 nil 36250 36251 (face font-lock-string-face) 36251 36272 (face font-lock-variable-name-face) 36272 36273 (face font-lock-string-face) 36273 36295 nil 36295 36296 (face font-lock-string-face) 36296 36308 (face font-lock-keyword-face) 36308 36309 (face font-lock-string-face) 36309 36333 nil 36333 36334 (face font-lock-string-face) 36334 36375 (face font-lock-function-name-face) 36375 36376 (face font-lock-string-face) 36376 36496 nil 36496 36497 (face font-lock-string-face) 36497 36508 (face font-lock-keyword-face) 36508 36509 (face font-lock-string-face) 36509 36511 nil 36511 36512 (face font-lock-string-face) 36512 36524 (face font-lock-function-name-face) 36524 36525 (face font-lock-string-face) 36525 36537 nil 36537 36538 (face font-lock-string-face) 36538 36542 (face font-lock-keyword-face) 36542 36543 (face font-lock-string-face) 36543 36545 nil 36545 36546 (face font-lock-string-face) 36546 36556 (face font-lock-type-face) 36556 36557 (face font-lock-string-face) 36557 36569 nil 36569 36570 (face font-lock-string-face) 36570 36582 (face font-lock-keyword-face) 36582 36583 (face font-lock-string-face) 36583 36599 nil 36599 36600 (face font-lock-string-face) 36600 36605 (face font-lock-function-name-face) 36605 36606 (face font-lock-string-face) 36606 36620 nil 36620 36621 (face font-lock-string-face) 36621 36642 (face font-lock-function-name-face) 36642 36643 (face font-lock-string-face) 36643 36657 nil 36657 36658 (face font-lock-string-face) 36658 36697 (face font-lock-function-name-face) 36697 36698 (face font-lock-string-face) 36698 36723 nil 36723 36724 (face font-lock-string-face) 36724 36731 (face font-lock-keyword-face) 36731 36732 (face font-lock-string-face) 36732 36748 nil 36748 36749 (face font-lock-string-face) 36749 36782 (face font-lock-constant-face) 36782 36783 (face font-lock-string-face) 36783 36829 nil 36829 36830 (face font-lock-string-face) 36830 36841 (face font-lock-keyword-face) 36841 36842 (face font-lock-string-face) 36842 36844 nil 36844 36845 (face font-lock-string-face) 36845 36856 (face font-lock-function-name-face) 36856 36857 (face font-lock-string-face) 36857 36869 nil 36869 36870 (face font-lock-string-face) 36870 36874 (face font-lock-keyword-face) 36874 36875 (face font-lock-string-face) 36875 36877 nil 36877 36878 (face font-lock-string-face) 36878 36888 (face font-lock-type-face) 36888 36889 (face font-lock-string-face) 36889 36901 nil 36901 36902 (face font-lock-string-face) 36902 36914 (face font-lock-keyword-face) 36914 36915 (face font-lock-string-face) 36915 36931 nil 36931 36932 (face font-lock-string-face) 36932 36937 (face font-lock-function-name-face) 36937 36938 (face font-lock-string-face) 36938 36952 nil 36952 36953 (face font-lock-string-face) 36953 36974 (face font-lock-function-name-face) 36974 36975 (face font-lock-string-face) 36975 36989 nil 36989 36990 (face font-lock-string-face) 36990 37029 (face font-lock-function-name-face) 37029 37030 (face font-lock-string-face) 37030 37055 nil 37055 37056 (face font-lock-string-face) 37056 37063 (face font-lock-keyword-face) 37063 37064 (face font-lock-string-face) 37064 37080 nil 37080 37081 (face font-lock-string-face) 37081 37113 (face font-lock-constant-face) 37113 37114 (face font-lock-string-face) 37114 37163 nil)
diff --git a/third_party/python/gyp/tools/graphviz.py b/third_party/python/gyp/tools/graphviz.py
new file mode 100755
index 0000000000..538b059da4
--- /dev/null
+++ b/third_party/python/gyp/tools/graphviz.py
@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2011 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Using the JSON dumped by the dump-dependency-json generator,
+generate input suitable for graphviz to render a dependency graph of
+targets."""
+
+from __future__ import print_function
+
+import collections
+import json
+import sys
+
+
+def ParseTarget(target):
+ target, _, suffix = target.partition('#')
+ filename, _, target = target.partition(':')
+ return filename, target, suffix
+
+
+def LoadEdges(filename, targets):
+ """Load the edges map from the dump file, and filter it to only
+ show targets in |targets| and their depedendents."""
+
+ file = open('dump.json')
+ edges = json.load(file)
+ file.close()
+
+ # Copy out only the edges we're interested in from the full edge list.
+ target_edges = {}
+ to_visit = targets[:]
+ while to_visit:
+ src = to_visit.pop()
+ if src in target_edges:
+ continue
+ target_edges[src] = edges[src]
+ to_visit.extend(edges[src])
+
+ return target_edges
+
+
+def WriteGraph(edges):
+ """Print a graphviz graph to stdout.
+ |edges| is a map of target to a list of other targets it depends on."""
+
+ # Bucket targets by file.
+ files = collections.defaultdict(list)
+ for src, dst in edges.items():
+ build_file, target_name, toolset = ParseTarget(src)
+ files[build_file].append(src)
+
+ print('digraph D {')
+ print(' fontsize=8') # Used by subgraphs.
+ print(' node [fontsize=8]')
+
+ # Output nodes by file. We must first write out each node within
+ # its file grouping before writing out any edges that may refer
+ # to those nodes.
+ for filename, targets in files.items():
+ if len(targets) == 1:
+ # If there's only one node for this file, simplify
+ # the display by making it a box without an internal node.
+ target = targets[0]
+ build_file, target_name, toolset = ParseTarget(target)
+ print(' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
+ target_name))
+ else:
+ # Group multiple nodes together in a subgraph.
+ print(' subgraph "cluster_%s" {' % filename)
+ print(' label = "%s"' % filename)
+ for target in targets:
+ build_file, target_name, toolset = ParseTarget(target)
+ print(' "%s" [label="%s"]' % (target, target_name))
+ print(' }')
+
+ # Now that we've placed all the nodes within subgraphs, output all
+ # the edges between nodes.
+ for src, dsts in edges.items():
+ for dst in dsts:
+ print(' "%s" -> "%s"' % (src, dst))
+
+ print('}')
+
+
+def main():
+ if len(sys.argv) < 2:
+ print(__doc__, file=sys.stderr)
+ print(file=sys.stderr)
+ print('usage: %s target1 target2...' % (sys.argv[0]), file=sys.stderr)
+ return 1
+
+ edges = LoadEdges('dump.json', sys.argv[1:])
+
+ WriteGraph(edges)
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/third_party/python/gyp/tools/pretty_gyp.py b/third_party/python/gyp/tools/pretty_gyp.py
new file mode 100755
index 0000000000..5060d1d9e2
--- /dev/null
+++ b/third_party/python/gyp/tools/pretty_gyp.py
@@ -0,0 +1,156 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Pretty-prints the contents of a GYP file."""
+
+from __future__ import print_function
+
+import sys
+import re
+
+
+# Regex to remove comments when we're counting braces.
+COMMENT_RE = re.compile(r'\s*#.*')
+
+# Regex to remove quoted strings when we're counting braces.
+# It takes into account quoted quotes, and makes sure that the quotes match.
+# NOTE: It does not handle quotes that span more than one line, or
+# cases where an escaped quote is preceeded by an escaped backslash.
+QUOTE_RE_STR = r'(?P<q>[\'"])(.*?)(?<![^\\][\\])(?P=q)'
+QUOTE_RE = re.compile(QUOTE_RE_STR)
+
+
+def comment_replace(matchobj):
+ return matchobj.group(1) + matchobj.group(2) + '#' * len(matchobj.group(3))
+
+
+def mask_comments(input):
+ """Mask the quoted strings so we skip braces inside quoted strings."""
+ search_re = re.compile(r'(.*?)(#)(.*)')
+ return [search_re.sub(comment_replace, line) for line in input]
+
+
+def quote_replace(matchobj):
+ return "%s%s%s%s" % (matchobj.group(1),
+ matchobj.group(2),
+ 'x'*len(matchobj.group(3)),
+ matchobj.group(2))
+
+
+def mask_quotes(input):
+ """Mask the quoted strings so we skip braces inside quoted strings."""
+ search_re = re.compile(r'(.*?)' + QUOTE_RE_STR)
+ return [search_re.sub(quote_replace, line) for line in input]
+
+
+def do_split(input, masked_input, search_re):
+ output = []
+ mask_output = []
+ for (line, masked_line) in zip(input, masked_input):
+ m = search_re.match(masked_line)
+ while m:
+ split = len(m.group(1))
+ line = line[:split] + r'\n' + line[split:]
+ masked_line = masked_line[:split] + r'\n' + masked_line[split:]
+ m = search_re.match(masked_line)
+ output.extend(line.split(r'\n'))
+ mask_output.extend(masked_line.split(r'\n'))
+ return (output, mask_output)
+
+
+def split_double_braces(input):
+ """Masks out the quotes and comments, and then splits appropriate
+ lines (lines that matche the double_*_brace re's above) before
+ indenting them below.
+
+ These are used to split lines which have multiple braces on them, so
+ that the indentation looks prettier when all laid out (e.g. closing
+ braces make a nice diagonal line).
+ """
+ double_open_brace_re = re.compile(r'(.*?[\[\{\(,])(\s*)([\[\{\(])')
+ double_close_brace_re = re.compile(r'(.*?[\]\}\)],?)(\s*)([\]\}\)])')
+
+ masked_input = mask_quotes(input)
+ masked_input = mask_comments(masked_input)
+
+ (output, mask_output) = do_split(input, masked_input, double_open_brace_re)
+ (output, mask_output) = do_split(output, mask_output, double_close_brace_re)
+
+ return output
+
+
+def count_braces(line):
+ """keeps track of the number of braces on a given line and returns the result.
+
+ It starts at zero and subtracts for closed braces, and adds for open braces.
+ """
+ open_braces = ['[', '(', '{']
+ close_braces = [']', ')', '}']
+ closing_prefix_re = re.compile(r'(.*?[^\s\]\}\)]+.*?)([\]\}\)],?)\s*$')
+ cnt = 0
+ stripline = COMMENT_RE.sub(r'', line)
+ stripline = QUOTE_RE.sub(r"''", stripline)
+ for char in stripline:
+ for brace in open_braces:
+ if char == brace:
+ cnt += 1
+ for brace in close_braces:
+ if char == brace:
+ cnt -= 1
+
+ after = False
+ if cnt > 0:
+ after = True
+
+ # This catches the special case of a closing brace having something
+ # other than just whitespace ahead of it -- we don't want to
+ # unindent that until after this line is printed so it stays with
+ # the previous indentation level.
+ if cnt < 0 and closing_prefix_re.match(stripline):
+ after = True
+ return (cnt, after)
+
+
+def prettyprint_input(lines):
+ """Does the main work of indenting the input based on the brace counts."""
+ indent = 0
+ basic_offset = 2
+ last_line = ""
+ for line in lines:
+ line = line.strip('\r\n\t ') # Otherwise doesn't strip \r on Unix.
+ if len(line) > 0:
+ brace_diff = 0
+ if not COMMENT_RE.match(line):
+ (brace_diff, after) = count_braces(line)
+ if brace_diff != 0:
+ if after:
+ print(" " * (basic_offset * indent) + line)
+ indent += brace_diff
+ else:
+ indent += brace_diff
+ print(" " * (basic_offset * indent) + line)
+ else:
+ print(" " * (basic_offset * indent) + line)
+ else:
+ print("")
+ last_line = line
+
+
+def main():
+ if len(sys.argv) > 1:
+ data = open(sys.argv[1]).read().splitlines()
+ else:
+ data = sys.stdin.read().splitlines()
+ # Split up the double braces.
+ lines = split_double_braces(data)
+
+ # Indent and print the output.
+ prettyprint_input(lines)
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/third_party/python/gyp/tools/pretty_sln.py b/third_party/python/gyp/tools/pretty_sln.py
new file mode 100755
index 0000000000..12a6dadd17
--- /dev/null
+++ b/third_party/python/gyp/tools/pretty_sln.py
@@ -0,0 +1,171 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Prints the information in a sln file in a diffable way.
+
+ It first outputs each projects in alphabetical order with their
+ dependencies.
+
+ Then it outputs a possible build order.
+"""
+
+from __future__ import print_function
+
+__author__ = 'nsylvain (Nicolas Sylvain)'
+
+import os
+import re
+import sys
+import pretty_vcproj
+
+def BuildProject(project, built, projects, deps):
+ # if all dependencies are done, we can build it, otherwise we try to build the
+ # dependency.
+ # This is not infinite-recursion proof.
+ for dep in deps[project]:
+ if dep not in built:
+ BuildProject(dep, built, projects, deps)
+ print(project)
+ built.append(project)
+
+def ParseSolution(solution_file):
+ # All projects, their clsid and paths.
+ projects = dict()
+
+ # A list of dependencies associated with a project.
+ dependencies = dict()
+
+ # Regular expressions that matches the SLN format.
+ # The first line of a project definition.
+ begin_project = re.compile(r'^Project\("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942'
+ r'}"\) = "(.*)", "(.*)", "(.*)"$')
+ # The last line of a project definition.
+ end_project = re.compile('^EndProject$')
+ # The first line of a dependency list.
+ begin_dep = re.compile(
+ r'ProjectSection\(ProjectDependencies\) = postProject$')
+ # The last line of a dependency list.
+ end_dep = re.compile('EndProjectSection$')
+ # A line describing a dependency.
+ dep_line = re.compile(' *({.*}) = ({.*})$')
+
+ in_deps = False
+ solution = open(solution_file)
+ for line in solution:
+ results = begin_project.search(line)
+ if results:
+ # Hack to remove icu because the diff is too different.
+ if results.group(1).find('icu') != -1:
+ continue
+ # We remove "_gyp" from the names because it helps to diff them.
+ current_project = results.group(1).replace('_gyp', '')
+ projects[current_project] = [results.group(2).replace('_gyp', ''),
+ results.group(3),
+ results.group(2)]
+ dependencies[current_project] = []
+ continue
+
+ results = end_project.search(line)
+ if results:
+ current_project = None
+ continue
+
+ results = begin_dep.search(line)
+ if results:
+ in_deps = True
+ continue
+
+ results = end_dep.search(line)
+ if results:
+ in_deps = False
+ continue
+
+ results = dep_line.search(line)
+ if results and in_deps and current_project:
+ dependencies[current_project].append(results.group(1))
+ continue
+
+ # Change all dependencies clsid to name instead.
+ for project in dependencies:
+ # For each dependencies in this project
+ new_dep_array = []
+ for dep in dependencies[project]:
+ # Look for the project name matching this cldis
+ for project_info in projects:
+ if projects[project_info][1] == dep:
+ new_dep_array.append(project_info)
+ dependencies[project] = sorted(new_dep_array)
+
+ return (projects, dependencies)
+
+def PrintDependencies(projects, deps):
+ print("---------------------------------------")
+ print("Dependencies for all projects")
+ print("---------------------------------------")
+ print("-- --")
+
+ for (project, dep_list) in sorted(deps.items()):
+ print("Project : %s" % project)
+ print("Path : %s" % projects[project][0])
+ if dep_list:
+ for dep in dep_list:
+ print(" - %s" % dep)
+ print("")
+
+ print("-- --")
+
+def PrintBuildOrder(projects, deps):
+ print("---------------------------------------")
+ print("Build order ")
+ print("---------------------------------------")
+ print("-- --")
+
+ built = []
+ for (project, _) in sorted(deps.items()):
+ if project not in built:
+ BuildProject(project, built, projects, deps)
+
+ print("-- --")
+
+def PrintVCProj(projects):
+
+ for project in projects:
+ print("-------------------------------------")
+ print("-------------------------------------")
+ print(project)
+ print(project)
+ print(project)
+ print("-------------------------------------")
+ print("-------------------------------------")
+
+ project_path = os.path.abspath(os.path.join(os.path.dirname(sys.argv[1]),
+ projects[project][2]))
+
+ pretty = pretty_vcproj
+ argv = [ '',
+ project_path,
+ '$(SolutionDir)=%s\\' % os.path.dirname(sys.argv[1]),
+ ]
+ argv.extend(sys.argv[3:])
+ pretty.main(argv)
+
+def main():
+ # check if we have exactly 1 parameter.
+ if len(sys.argv) < 2:
+ print('Usage: %s "c:\\path\\to\\project.sln"' % sys.argv[0])
+ return 1
+
+ (projects, deps) = ParseSolution(sys.argv[1])
+ PrintDependencies(projects, deps)
+ PrintBuildOrder(projects, deps)
+
+ if '--recursive' in sys.argv:
+ PrintVCProj(projects)
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/third_party/python/gyp/tools/pretty_vcproj.py b/third_party/python/gyp/tools/pretty_vcproj.py
new file mode 100755
index 0000000000..4454d9b2b9
--- /dev/null
+++ b/third_party/python/gyp/tools/pretty_vcproj.py
@@ -0,0 +1,337 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""Make the format of a vcproj really pretty.
+
+ This script normalize and sort an xml. It also fetches all the properties
+ inside linked vsprops and include them explicitly in the vcproj.
+
+ It outputs the resulting xml to stdout.
+"""
+
+from __future__ import print_function
+
+__author__ = 'nsylvain (Nicolas Sylvain)'
+
+import os
+import sys
+
+from xml.dom.minidom import parse
+from xml.dom.minidom import Node
+
+try:
+ # cmp was removed in python3.
+ cmp
+except NameError:
+ def cmp(a, b):
+ return (a > b) - (a < b)
+
+REPLACEMENTS = dict()
+ARGUMENTS = None
+
+
+class CmpTuple(object):
+ """Compare function between 2 tuple."""
+ def __call__(self, x, y):
+ return cmp(x[0], y[0])
+
+
+class CmpNode(object):
+ """Compare function between 2 xml nodes."""
+
+ def __call__(self, x, y):
+ def get_string(node):
+ node_string = "node"
+ node_string += node.nodeName
+ if node.nodeValue:
+ node_string += node.nodeValue
+
+ if node.attributes:
+ # We first sort by name, if present.
+ node_string += node.getAttribute("Name")
+
+ all_nodes = []
+ for (name, value) in node.attributes.items():
+ all_nodes.append((name, value))
+
+ all_nodes.sort(CmpTuple())
+ for (name, value) in all_nodes:
+ node_string += name
+ node_string += value
+
+ return node_string
+
+ return cmp(get_string(x), get_string(y))
+
+
+def PrettyPrintNode(node, indent=0):
+ if node.nodeType == Node.TEXT_NODE:
+ if node.data.strip():
+ print('%s%s' % (' '*indent, node.data.strip()))
+ return
+
+ if node.childNodes:
+ node.normalize()
+ # Get the number of attributes
+ attr_count = 0
+ if node.attributes:
+ attr_count = node.attributes.length
+
+ # Print the main tag
+ if attr_count == 0:
+ print('%s<%s>' % (' '*indent, node.nodeName))
+ else:
+ print('%s<%s' % (' '*indent, node.nodeName))
+
+ all_attributes = []
+ for (name, value) in node.attributes.items():
+ all_attributes.append((name, value))
+ all_attributes.sort(key=(lambda attr: attr[0]))
+ for (name, value) in all_attributes:
+ print('%s %s="%s"' % (' '*indent, name, value))
+ print('%s>' % (' '*indent))
+ if node.nodeValue:
+ print('%s %s' % (' '*indent, node.nodeValue))
+
+ for sub_node in node.childNodes:
+ PrettyPrintNode(sub_node, indent=indent+2)
+ print('%s</%s>' % (' '*indent, node.nodeName))
+
+
+def FlattenFilter(node):
+ """Returns a list of all the node and sub nodes."""
+ node_list = []
+
+ if (node.attributes and
+ node.getAttribute('Name') == '_excluded_files'):
+ # We don't add the "_excluded_files" filter.
+ return []
+
+ for current in node.childNodes:
+ if current.nodeName == 'Filter':
+ node_list.extend(FlattenFilter(current))
+ else:
+ node_list.append(current)
+
+ return node_list
+
+
+def FixFilenames(filenames, current_directory):
+ new_list = []
+ for filename in filenames:
+ if filename:
+ for key in REPLACEMENTS:
+ filename = filename.replace(key, REPLACEMENTS[key])
+ os.chdir(current_directory)
+ filename = filename.strip('"\' ')
+ if filename.startswith('$'):
+ new_list.append(filename)
+ else:
+ new_list.append(os.path.abspath(filename))
+ return new_list
+
+
+def AbsoluteNode(node):
+ """Makes all the properties we know about in this node absolute."""
+ if node.attributes:
+ for (name, value) in node.attributes.items():
+ if name in ['InheritedPropertySheets', 'RelativePath',
+ 'AdditionalIncludeDirectories',
+ 'IntermediateDirectory', 'OutputDirectory',
+ 'AdditionalLibraryDirectories']:
+ # We want to fix up these paths
+ path_list = value.split(';')
+ new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1]))
+ node.setAttribute(name, ';'.join(new_list))
+ if not value:
+ node.removeAttribute(name)
+
+
+def CleanupVcproj(node):
+ """For each sub node, we call recursively this function."""
+ for sub_node in node.childNodes:
+ AbsoluteNode(sub_node)
+ CleanupVcproj(sub_node)
+
+ # Normalize the node, and remove all extranous whitespaces.
+ for sub_node in node.childNodes:
+ if sub_node.nodeType == Node.TEXT_NODE:
+ sub_node.data = sub_node.data.replace("\r", "")
+ sub_node.data = sub_node.data.replace("\n", "")
+ sub_node.data = sub_node.data.rstrip()
+
+ # Fix all the semicolon separated attributes to be sorted, and we also
+ # remove the dups.
+ if node.attributes:
+ for (name, value) in node.attributes.items():
+ sorted_list = sorted(value.split(';'))
+ unique_list = []
+ for i in sorted_list:
+ if not unique_list.count(i):
+ unique_list.append(i)
+ node.setAttribute(name, ';'.join(unique_list))
+ if not value:
+ node.removeAttribute(name)
+
+ if node.childNodes:
+ node.normalize()
+
+ # For each node, take a copy, and remove it from the list.
+ node_array = []
+ while node.childNodes and node.childNodes[0]:
+ # Take a copy of the node and remove it from the list.
+ current = node.childNodes[0]
+ node.removeChild(current)
+
+ # If the child is a filter, we want to append all its children
+ # to this same list.
+ if current.nodeName == 'Filter':
+ node_array.extend(FlattenFilter(current))
+ else:
+ node_array.append(current)
+
+
+ # Sort the list.
+ node_array.sort(CmpNode())
+
+ # Insert the nodes in the correct order.
+ for new_node in node_array:
+ # But don't append empty tool node.
+ if new_node.nodeName == 'Tool':
+ if new_node.attributes and new_node.attributes.length == 1:
+ # This one was empty.
+ continue
+ if new_node.nodeName == 'UserMacro':
+ continue
+ node.appendChild(new_node)
+
+
+def GetConfiguationNodes(vcproj):
+ #TODO(nsylvain): Find a better way to navigate the xml.
+ nodes = []
+ for node in vcproj.childNodes:
+ if node.nodeName == "Configurations":
+ for sub_node in node.childNodes:
+ if sub_node.nodeName == "Configuration":
+ nodes.append(sub_node)
+
+ return nodes
+
+
+def GetChildrenVsprops(filename):
+ dom = parse(filename)
+ if dom.documentElement.attributes:
+ vsprops = dom.documentElement.getAttribute('InheritedPropertySheets')
+ return FixFilenames(vsprops.split(';'), os.path.dirname(filename))
+ return []
+
+def SeekToNode(node1, child2):
+ # A text node does not have properties.
+ if child2.nodeType == Node.TEXT_NODE:
+ return None
+
+ # Get the name of the current node.
+ current_name = child2.getAttribute("Name")
+ if not current_name:
+ # There is no name. We don't know how to merge.
+ return None
+
+ # Look through all the nodes to find a match.
+ for sub_node in node1.childNodes:
+ if sub_node.nodeName == child2.nodeName:
+ name = sub_node.getAttribute("Name")
+ if name == current_name:
+ return sub_node
+
+ # No match. We give up.
+ return None
+
+
+def MergeAttributes(node1, node2):
+ # No attributes to merge?
+ if not node2.attributes:
+ return
+
+ for (name, value2) in node2.attributes.items():
+ # Don't merge the 'Name' attribute.
+ if name == 'Name':
+ continue
+ value1 = node1.getAttribute(name)
+ if value1:
+ # The attribute exist in the main node. If it's equal, we leave it
+ # untouched, otherwise we concatenate it.
+ if value1 != value2:
+ node1.setAttribute(name, ';'.join([value1, value2]))
+ else:
+ # The attribute does nto exist in the main node. We append this one.
+ node1.setAttribute(name, value2)
+
+ # If the attribute was a property sheet attributes, we remove it, since
+ # they are useless.
+ if name == 'InheritedPropertySheets':
+ node1.removeAttribute(name)
+
+
+def MergeProperties(node1, node2):
+ MergeAttributes(node1, node2)
+ for child2 in node2.childNodes:
+ child1 = SeekToNode(node1, child2)
+ if child1:
+ MergeProperties(child1, child2)
+ else:
+ node1.appendChild(child2.cloneNode(True))
+
+
+def main(argv):
+ """Main function of this vcproj prettifier."""
+ global ARGUMENTS
+ ARGUMENTS = argv
+
+ # check if we have exactly 1 parameter.
+ if len(argv) < 2:
+ print('Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] '
+ '[key2=value2]' % argv[0])
+ return 1
+
+ # Parse the keys
+ for i in range(2, len(argv)):
+ (key, value) = argv[i].split('=')
+ REPLACEMENTS[key] = value
+
+ # Open the vcproj and parse the xml.
+ dom = parse(argv[1])
+
+ # First thing we need to do is find the Configuration Node and merge them
+ # with the vsprops they include.
+ for configuration_node in GetConfiguationNodes(dom.documentElement):
+ # Get the property sheets associated with this configuration.
+ vsprops = configuration_node.getAttribute('InheritedPropertySheets')
+
+ # Fix the filenames to be absolute.
+ vsprops_list = FixFilenames(vsprops.strip().split(';'),
+ os.path.dirname(argv[1]))
+
+ # Extend the list of vsprops with all vsprops contained in the current
+ # vsprops.
+ for current_vsprops in vsprops_list:
+ vsprops_list.extend(GetChildrenVsprops(current_vsprops))
+
+ # Now that we have all the vsprops, we need to merge them.
+ for current_vsprops in vsprops_list:
+ MergeProperties(configuration_node,
+ parse(current_vsprops).documentElement)
+
+ # Now that everything is merged, we need to cleanup the xml.
+ CleanupVcproj(dom.documentElement)
+
+ # Finally, we use the prett xml function to print the vcproj back to the
+ # user.
+ PrettyPrintNode(dom.documentElement)
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/third_party/python/idna/idna-2.10.dist-info/LICENSE.rst b/third_party/python/idna/idna-2.10.dist-info/LICENSE.rst
new file mode 100644
index 0000000000..63664b82e7
--- /dev/null
+++ b/third_party/python/idna/idna-2.10.dist-info/LICENSE.rst
@@ -0,0 +1,34 @@
+License
+-------
+
+License: bsd-3-clause
+
+Copyright (c) 2013-2020, Kim Davies. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+#. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+#. Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided with
+ the distribution.
+
+#. Neither the name of the copyright holder nor the names of the
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+#. THIS SOFTWARE IS PROVIDED BY THE CONTRIBUTORS "AS IS" AND ANY
+ EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
+ CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ DAMAGE.
diff --git a/third_party/python/idna/idna-2.10.dist-info/METADATA b/third_party/python/idna/idna-2.10.dist-info/METADATA
new file mode 100644
index 0000000000..f73c0ffefe
--- /dev/null
+++ b/third_party/python/idna/idna-2.10.dist-info/METADATA
@@ -0,0 +1,243 @@
+Metadata-Version: 2.1
+Name: idna
+Version: 2.10
+Summary: Internationalized Domain Names in Applications (IDNA)
+Home-page: https://github.com/kjd/idna
+Author: Kim Davies
+Author-email: kim@cynosure.com.au
+License: BSD-like
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Internet :: Name Service (DNS)
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Utilities
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
+
+Internationalized Domain Names in Applications (IDNA)
+=====================================================
+
+Support for the Internationalised Domain Names in Applications
+(IDNA) protocol as specified in `RFC 5891 <http://tools.ietf.org/html/rfc5891>`_.
+This is the latest version of the protocol and is sometimes referred to as
+“IDNA 2008”.
+
+This library also provides support for Unicode Technical Standard 46,
+`Unicode IDNA Compatibility Processing <http://unicode.org/reports/tr46/>`_.
+
+This acts as a suitable replacement for the “encodings.idna” module that
+comes with the Python standard library, but only supports the
+old, deprecated IDNA specification (`RFC 3490 <http://tools.ietf.org/html/rfc3490>`_).
+
+Basic functions are simply executed:
+
+.. code-block:: pycon
+
+ # Python 3
+ >>> import idna
+ >>> idna.encode('ドメイン.テスト')
+ b'xn--eckwd4c7c.xn--zckzah'
+ >>> print(idna.decode('xn--eckwd4c7c.xn--zckzah'))
+ ドメイン.テスト
+
+ # Python 2
+ >>> import idna
+ >>> idna.encode(u'ドメイン.テスト')
+ 'xn--eckwd4c7c.xn--zckzah'
+ >>> print idna.decode('xn--eckwd4c7c.xn--zckzah')
+ ドメイン.テスト
+
+Packages
+--------
+
+The latest tagged release version is published in the PyPI repository:
+
+.. image:: https://badge.fury.io/py/idna.svg
+ :target: http://badge.fury.io/py/idna
+
+
+Installation
+------------
+
+To install this library, you can use pip:
+
+.. code-block:: bash
+
+ $ pip install idna
+
+Alternatively, you can install the package using the bundled setup script:
+
+.. code-block:: bash
+
+ $ python setup.py install
+
+This library works with Python 2.7 and Python 3.4 or later.
+
+
+Usage
+-----
+
+For typical usage, the ``encode`` and ``decode`` functions will take a domain
+name argument and perform a conversion to A-labels or U-labels respectively.
+
+.. code-block:: pycon
+
+ # Python 3
+ >>> import idna
+ >>> idna.encode('ドメイン.テスト')
+ b'xn--eckwd4c7c.xn--zckzah'
+ >>> print(idna.decode('xn--eckwd4c7c.xn--zckzah'))
+ ドメイン.テスト
+
+You may use the codec encoding and decoding methods using the
+``idna.codec`` module:
+
+.. code-block:: pycon
+
+ # Python 2
+ >>> import idna.codec
+ >>> print u'домена.испытание'.encode('idna')
+ xn--80ahd1agd.xn--80akhbyknj4f
+ >>> print 'xn--80ahd1agd.xn--80akhbyknj4f'.decode('idna')
+ домена.испытание
+
+Conversions can be applied at a per-label basis using the ``ulabel`` or ``alabel``
+functions if necessary:
+
+.. code-block:: pycon
+
+ # Python 2
+ >>> idna.alabel(u'测试')
+ 'xn--0zwm56d'
+
+Compatibility Mapping (UTS #46)
++++++++++++++++++++++++++++++++
+
+As described in `RFC 5895 <http://tools.ietf.org/html/rfc5895>`_, the IDNA
+specification no longer normalizes input from different potential ways a user
+may input a domain name. This functionality, known as a “mapping”, is now
+considered by the specification to be a local user-interface issue distinct
+from IDNA conversion functionality.
+
+This library provides one such mapping, that was developed by the Unicode
+Consortium. Known as `Unicode IDNA Compatibility Processing <http://unicode.org/reports/tr46/>`_,
+it provides for both a regular mapping for typical applications, as well as
+a transitional mapping to help migrate from older IDNA 2003 applications.
+
+For example, “Königsgäßchen” is not a permissible label as *LATIN CAPITAL
+LETTER K* is not allowed (nor are capital letters in general). UTS 46 will
+convert this into lower case prior to applying the IDNA conversion.
+
+.. code-block:: pycon
+
+ # Python 3
+ >>> import idna
+ >>> idna.encode(u'Königsgäßchen')
+ ...
+ idna.core.InvalidCodepoint: Codepoint U+004B at position 1 of 'Königsgäßchen' not allowed
+ >>> idna.encode('Königsgäßchen', uts46=True)
+ b'xn--knigsgchen-b4a3dun'
+ >>> print(idna.decode('xn--knigsgchen-b4a3dun'))
+ königsgäßchen
+
+Transitional processing provides conversions to help transition from the older
+2003 standard to the current standard. For example, in the original IDNA
+specification, the *LATIN SMALL LETTER SHARP S* (ß) was converted into two
+*LATIN SMALL LETTER S* (ss), whereas in the current IDNA specification this
+conversion is not performed.
+
+.. code-block:: pycon
+
+ # Python 2
+ >>> idna.encode(u'Königsgäßchen', uts46=True, transitional=True)
+ 'xn--knigsgsschen-lcb0w'
+
+Implementors should use transitional processing with caution, only in rare
+cases where conversion from legacy labels to current labels must be performed
+(i.e. IDNA implementations that pre-date 2008). For typical applications
+that just need to convert labels, transitional processing is unlikely to be
+beneficial and could produce unexpected incompatible results.
+
+``encodings.idna`` Compatibility
+++++++++++++++++++++++++++++++++
+
+Function calls from the Python built-in ``encodings.idna`` module are
+mapped to their IDNA 2008 equivalents using the ``idna.compat`` module.
+Simply substitute the ``import`` clause in your code to refer to the
+new module name.
+
+Exceptions
+----------
+
+All errors raised during the conversion following the specification should
+raise an exception derived from the ``idna.IDNAError`` base class.
+
+More specific exceptions that may be generated as ``idna.IDNABidiError``
+when the error reflects an illegal combination of left-to-right and right-to-left
+characters in a label; ``idna.InvalidCodepoint`` when a specific codepoint is
+an illegal character in an IDN label (i.e. INVALID); and ``idna.InvalidCodepointContext``
+when the codepoint is illegal based on its positional context (i.e. it is CONTEXTO
+or CONTEXTJ but the contextual requirements are not satisfied.)
+
+Building and Diagnostics
+------------------------
+
+The IDNA and UTS 46 functionality relies upon pre-calculated lookup tables for
+performance. These tables are derived from computing against eligibility criteria
+in the respective standards. These tables are computed using the command-line
+script ``tools/idna-data``.
+
+This tool will fetch relevant tables from the Unicode Consortium and perform the
+required calculations to identify eligibility. It has three main modes:
+
+* ``idna-data make-libdata``. Generates ``idnadata.py`` and ``uts46data.py``,
+ the pre-calculated lookup tables using for IDNA and UTS 46 conversions. Implementors
+ who wish to track this library against a different Unicode version may use this tool
+ to manually generate a different version of the ``idnadata.py`` and ``uts46data.py``
+ files.
+
+* ``idna-data make-table``. Generate a table of the IDNA disposition
+ (e.g. PVALID, CONTEXTJ, CONTEXTO) in the format found in Appendix B.1 of RFC
+ 5892 and the pre-computed tables published by `IANA <http://iana.org/>`_.
+
+* ``idna-data U+0061``. Prints debugging output on the various properties
+ associated with an individual Unicode codepoint (in this case, U+0061), that are
+ used to assess the IDNA and UTS 46 status of a codepoint. This is helpful in debugging
+ or analysis.
+
+The tool accepts a number of arguments, described using ``idna-data -h``. Most notably,
+the ``--version`` argument allows the specification of the version of Unicode to use
+in computing the table data. For example, ``idna-data --version 9.0.0 make-libdata``
+will generate library data against Unicode 9.0.0.
+
+Note that this script requires Python 3, but all generated library data will work
+in Python 2.7.
+
+
+Testing
+-------
+
+The library has a test suite based on each rule of the IDNA specification, as
+well as tests that are provided as part of the Unicode Technical Standard 46,
+`Unicode IDNA Compatibility Processing <http://unicode.org/reports/tr46/>`_.
+
+The tests are run automatically on each commit at Travis CI:
+
+.. image:: https://travis-ci.org/kjd/idna.svg?branch=master
+ :target: https://travis-ci.org/kjd/idna
+
+
diff --git a/third_party/python/idna/idna-2.10.dist-info/RECORD b/third_party/python/idna/idna-2.10.dist-info/RECORD
new file mode 100644
index 0000000000..fd40d65382
--- /dev/null
+++ b/third_party/python/idna/idna-2.10.dist-info/RECORD
@@ -0,0 +1,13 @@
+idna/__init__.py,sha256=9Nt7xpyet3DmOrPUGooDdAwmHZZu1qUAy2EaJ93kGiQ,58
+idna/codec.py,sha256=lvYb7yu7PhAqFaAIAdWcwgaWI2UmgseUua-1c0AsG0A,3299
+idna/compat.py,sha256=R-h29D-6mrnJzbXxymrWUW7iZUvy-26TQwZ0ij57i4U,232
+idna/core.py,sha256=jCoaLb3bA2tS_DDx9PpGuNTEZZN2jAzB369aP-IHYRE,11951
+idna/idnadata.py,sha256=gmzFwZWjdms3kKZ_M_vwz7-LP_SCgYfSeE03B21Qpsk,42350
+idna/intranges.py,sha256=TY1lpxZIQWEP6tNqjZkFA5hgoMWOj1OBmnUG8ihT87E,1749
+idna/package_data.py,sha256=bxBjpLnE06_1jSYKEy5svOMu1zM3OMztXVUb1tPlcp0,22
+idna/uts46data.py,sha256=lMdw2zdjkH1JUWXPPEfFUSYT3Fyj60bBmfLvvy5m7ko,202084
+idna-2.10.dist-info/LICENSE.rst,sha256=QSAUQg0kc9ugYRfD1Nng7sqm3eDKMM2VH07CvjlCbzI,1565
+idna-2.10.dist-info/METADATA,sha256=ZWCaQDBjdmSvx5EU7Cv6ORC-9NUQ6nXh1eXx38ySe40,9104
+idna-2.10.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110
+idna-2.10.dist-info/top_level.txt,sha256=jSag9sEDqvSPftxOQy-ABfGV_RSy7oFh4zZJpODV8k0,5
+idna-2.10.dist-info/RECORD,,
diff --git a/third_party/python/idna/idna-2.10.dist-info/WHEEL b/third_party/python/idna/idna-2.10.dist-info/WHEEL
new file mode 100644
index 0000000000..8b701e93c2
--- /dev/null
+++ b/third_party/python/idna/idna-2.10.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.6)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/idna/idna-2.10.dist-info/top_level.txt b/third_party/python/idna/idna-2.10.dist-info/top_level.txt
new file mode 100644
index 0000000000..c40472e6fc
--- /dev/null
+++ b/third_party/python/idna/idna-2.10.dist-info/top_level.txt
@@ -0,0 +1 @@
+idna
diff --git a/third_party/python/idna/idna/__init__.py b/third_party/python/idna/idna/__init__.py
new file mode 100644
index 0000000000..847bf93547
--- /dev/null
+++ b/third_party/python/idna/idna/__init__.py
@@ -0,0 +1,2 @@
+from .package_data import __version__
+from .core import *
diff --git a/third_party/python/idna/idna/codec.py b/third_party/python/idna/idna/codec.py
new file mode 100644
index 0000000000..98c65ead14
--- /dev/null
+++ b/third_party/python/idna/idna/codec.py
@@ -0,0 +1,118 @@
+from .core import encode, decode, alabel, ulabel, IDNAError
+import codecs
+import re
+
+_unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]')
+
+class Codec(codecs.Codec):
+
+ def encode(self, data, errors='strict'):
+
+ if errors != 'strict':
+ raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
+
+ if not data:
+ return "", 0
+
+ return encode(data), len(data)
+
+ def decode(self, data, errors='strict'):
+
+ if errors != 'strict':
+ raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
+
+ if not data:
+ return u"", 0
+
+ return decode(data), len(data)
+
+class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
+ def _buffer_encode(self, data, errors, final):
+ if errors != 'strict':
+ raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
+
+ if not data:
+ return ("", 0)
+
+ labels = _unicode_dots_re.split(data)
+ trailing_dot = u''
+ if labels:
+ if not labels[-1]:
+ trailing_dot = '.'
+ del labels[-1]
+ elif not final:
+ # Keep potentially unfinished label until the next call
+ del labels[-1]
+ if labels:
+ trailing_dot = '.'
+
+ result = []
+ size = 0
+ for label in labels:
+ result.append(alabel(label))
+ if size:
+ size += 1
+ size += len(label)
+
+ # Join with U+002E
+ result = ".".join(result) + trailing_dot
+ size += len(trailing_dot)
+ return (result, size)
+
+class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
+ def _buffer_decode(self, data, errors, final):
+ if errors != 'strict':
+ raise IDNAError("Unsupported error handling \"{0}\"".format(errors))
+
+ if not data:
+ return (u"", 0)
+
+ # IDNA allows decoding to operate on Unicode strings, too.
+ if isinstance(data, unicode):
+ labels = _unicode_dots_re.split(data)
+ else:
+ # Must be ASCII string
+ data = str(data)
+ unicode(data, "ascii")
+ labels = data.split(".")
+
+ trailing_dot = u''
+ if labels:
+ if not labels[-1]:
+ trailing_dot = u'.'
+ del labels[-1]
+ elif not final:
+ # Keep potentially unfinished label until the next call
+ del labels[-1]
+ if labels:
+ trailing_dot = u'.'
+
+ result = []
+ size = 0
+ for label in labels:
+ result.append(ulabel(label))
+ if size:
+ size += 1
+ size += len(label)
+
+ result = u".".join(result) + trailing_dot
+ size += len(trailing_dot)
+ return (result, size)
+
+
+class StreamWriter(Codec, codecs.StreamWriter):
+ pass
+
+class StreamReader(Codec, codecs.StreamReader):
+ pass
+
+def getregentry():
+ return codecs.CodecInfo(
+ name='idna',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamwriter=StreamWriter,
+ streamreader=StreamReader,
+ )
diff --git a/third_party/python/idna/idna/compat.py b/third_party/python/idna/idna/compat.py
new file mode 100644
index 0000000000..4d47f336db
--- /dev/null
+++ b/third_party/python/idna/idna/compat.py
@@ -0,0 +1,12 @@
+from .core import *
+from .codec import *
+
+def ToASCII(label):
+ return encode(label)
+
+def ToUnicode(label):
+ return decode(label)
+
+def nameprep(s):
+ raise NotImplementedError("IDNA 2008 does not utilise nameprep protocol")
+
diff --git a/third_party/python/idna/idna/core.py b/third_party/python/idna/idna/core.py
new file mode 100644
index 0000000000..41ec5c711d
--- /dev/null
+++ b/third_party/python/idna/idna/core.py
@@ -0,0 +1,400 @@
+from . import idnadata
+import bisect
+import unicodedata
+import re
+import sys
+from .intranges import intranges_contain
+
+_virama_combining_class = 9
+_alabel_prefix = b'xn--'
+_unicode_dots_re = re.compile(u'[\u002e\u3002\uff0e\uff61]')
+
+if sys.version_info[0] >= 3:
+ unicode = str
+ unichr = chr
+
+class IDNAError(UnicodeError):
+ """ Base exception for all IDNA-encoding related problems """
+ pass
+
+
+class IDNABidiError(IDNAError):
+ """ Exception when bidirectional requirements are not satisfied """
+ pass
+
+
+class InvalidCodepoint(IDNAError):
+ """ Exception when a disallowed or unallocated codepoint is used """
+ pass
+
+
+class InvalidCodepointContext(IDNAError):
+ """ Exception when the codepoint is not valid in the context it is used """
+ pass
+
+
+def _combining_class(cp):
+ v = unicodedata.combining(unichr(cp))
+ if v == 0:
+ if not unicodedata.name(unichr(cp)):
+ raise ValueError("Unknown character in unicodedata")
+ return v
+
+def _is_script(cp, script):
+ return intranges_contain(ord(cp), idnadata.scripts[script])
+
+def _punycode(s):
+ return s.encode('punycode')
+
+def _unot(s):
+ return 'U+{0:04X}'.format(s)
+
+
+def valid_label_length(label):
+
+ if len(label) > 63:
+ return False
+ return True
+
+
+def valid_string_length(label, trailing_dot):
+
+ if len(label) > (254 if trailing_dot else 253):
+ return False
+ return True
+
+
+def check_bidi(label, check_ltr=False):
+
+ # Bidi rules should only be applied if string contains RTL characters
+ bidi_label = False
+ for (idx, cp) in enumerate(label, 1):
+ direction = unicodedata.bidirectional(cp)
+ if direction == '':
+ # String likely comes from a newer version of Unicode
+ raise IDNABidiError('Unknown directionality in label {0} at position {1}'.format(repr(label), idx))
+ if direction in ['R', 'AL', 'AN']:
+ bidi_label = True
+ if not bidi_label and not check_ltr:
+ return True
+
+ # Bidi rule 1
+ direction = unicodedata.bidirectional(label[0])
+ if direction in ['R', 'AL']:
+ rtl = True
+ elif direction == 'L':
+ rtl = False
+ else:
+ raise IDNABidiError('First codepoint in label {0} must be directionality L, R or AL'.format(repr(label)))
+
+ valid_ending = False
+ number_type = False
+ for (idx, cp) in enumerate(label, 1):
+ direction = unicodedata.bidirectional(cp)
+
+ if rtl:
+ # Bidi rule 2
+ if not direction in ['R', 'AL', 'AN', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
+ raise IDNABidiError('Invalid direction for codepoint at position {0} in a right-to-left label'.format(idx))
+ # Bidi rule 3
+ if direction in ['R', 'AL', 'EN', 'AN']:
+ valid_ending = True
+ elif direction != 'NSM':
+ valid_ending = False
+ # Bidi rule 4
+ if direction in ['AN', 'EN']:
+ if not number_type:
+ number_type = direction
+ else:
+ if number_type != direction:
+ raise IDNABidiError('Can not mix numeral types in a right-to-left label')
+ else:
+ # Bidi rule 5
+ if not direction in ['L', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
+ raise IDNABidiError('Invalid direction for codepoint at position {0} in a left-to-right label'.format(idx))
+ # Bidi rule 6
+ if direction in ['L', 'EN']:
+ valid_ending = True
+ elif direction != 'NSM':
+ valid_ending = False
+
+ if not valid_ending:
+ raise IDNABidiError('Label ends with illegal codepoint directionality')
+
+ return True
+
+
+def check_initial_combiner(label):
+
+ if unicodedata.category(label[0])[0] == 'M':
+ raise IDNAError('Label begins with an illegal combining character')
+ return True
+
+
+def check_hyphen_ok(label):
+
+ if label[2:4] == '--':
+ raise IDNAError('Label has disallowed hyphens in 3rd and 4th position')
+ if label[0] == '-' or label[-1] == '-':
+ raise IDNAError('Label must not start or end with a hyphen')
+ return True
+
+
+def check_nfc(label):
+
+ if unicodedata.normalize('NFC', label) != label:
+ raise IDNAError('Label must be in Normalization Form C')
+
+
+def valid_contextj(label, pos):
+
+ cp_value = ord(label[pos])
+
+ if cp_value == 0x200c:
+
+ if pos > 0:
+ if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
+ return True
+
+ ok = False
+ for i in range(pos-1, -1, -1):
+ joining_type = idnadata.joining_types.get(ord(label[i]))
+ if joining_type == ord('T'):
+ continue
+ if joining_type in [ord('L'), ord('D')]:
+ ok = True
+ break
+
+ if not ok:
+ return False
+
+ ok = False
+ for i in range(pos+1, len(label)):
+ joining_type = idnadata.joining_types.get(ord(label[i]))
+ if joining_type == ord('T'):
+ continue
+ if joining_type in [ord('R'), ord('D')]:
+ ok = True
+ break
+ return ok
+
+ if cp_value == 0x200d:
+
+ if pos > 0:
+ if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
+ return True
+ return False
+
+ else:
+
+ return False
+
+
+def valid_contexto(label, pos, exception=False):
+
+ cp_value = ord(label[pos])
+
+ if cp_value == 0x00b7:
+ if 0 < pos < len(label)-1:
+ if ord(label[pos - 1]) == 0x006c and ord(label[pos + 1]) == 0x006c:
+ return True
+ return False
+
+ elif cp_value == 0x0375:
+ if pos < len(label)-1 and len(label) > 1:
+ return _is_script(label[pos + 1], 'Greek')
+ return False
+
+ elif cp_value == 0x05f3 or cp_value == 0x05f4:
+ if pos > 0:
+ return _is_script(label[pos - 1], 'Hebrew')
+ return False
+
+ elif cp_value == 0x30fb:
+ for cp in label:
+ if cp == u'\u30fb':
+ continue
+ if _is_script(cp, 'Hiragana') or _is_script(cp, 'Katakana') or _is_script(cp, 'Han'):
+ return True
+ return False
+
+ elif 0x660 <= cp_value <= 0x669:
+ for cp in label:
+ if 0x6f0 <= ord(cp) <= 0x06f9:
+ return False
+ return True
+
+ elif 0x6f0 <= cp_value <= 0x6f9:
+ for cp in label:
+ if 0x660 <= ord(cp) <= 0x0669:
+ return False
+ return True
+
+
+def check_label(label):
+
+ if isinstance(label, (bytes, bytearray)):
+ label = label.decode('utf-8')
+ if len(label) == 0:
+ raise IDNAError('Empty Label')
+
+ check_nfc(label)
+ check_hyphen_ok(label)
+ check_initial_combiner(label)
+
+ for (pos, cp) in enumerate(label):
+ cp_value = ord(cp)
+ if intranges_contain(cp_value, idnadata.codepoint_classes['PVALID']):
+ continue
+ elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTJ']):
+ try:
+ if not valid_contextj(label, pos):
+ raise InvalidCodepointContext('Joiner {0} not allowed at position {1} in {2}'.format(
+ _unot(cp_value), pos+1, repr(label)))
+ except ValueError:
+ raise IDNAError('Unknown codepoint adjacent to joiner {0} at position {1} in {2}'.format(
+ _unot(cp_value), pos+1, repr(label)))
+ elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTO']):
+ if not valid_contexto(label, pos):
+ raise InvalidCodepointContext('Codepoint {0} not allowed at position {1} in {2}'.format(_unot(cp_value), pos+1, repr(label)))
+ else:
+ raise InvalidCodepoint('Codepoint {0} at position {1} of {2} not allowed'.format(_unot(cp_value), pos+1, repr(label)))
+
+ check_bidi(label)
+
+
+def alabel(label):
+
+ try:
+ label = label.encode('ascii')
+ ulabel(label)
+ if not valid_label_length(label):
+ raise IDNAError('Label too long')
+ return label
+ except UnicodeEncodeError:
+ pass
+
+ if not label:
+ raise IDNAError('No Input')
+
+ label = unicode(label)
+ check_label(label)
+ label = _punycode(label)
+ label = _alabel_prefix + label
+
+ if not valid_label_length(label):
+ raise IDNAError('Label too long')
+
+ return label
+
+
+def ulabel(label):
+
+ if not isinstance(label, (bytes, bytearray)):
+ try:
+ label = label.encode('ascii')
+ except UnicodeEncodeError:
+ check_label(label)
+ return label
+
+ label = label.lower()
+ if label.startswith(_alabel_prefix):
+ label = label[len(_alabel_prefix):]
+ if not label:
+ raise IDNAError('Malformed A-label, no Punycode eligible content found')
+ if label.decode('ascii')[-1] == '-':
+ raise IDNAError('A-label must not end with a hyphen')
+ else:
+ check_label(label)
+ return label.decode('ascii')
+
+ label = label.decode('punycode')
+ check_label(label)
+ return label
+
+
+def uts46_remap(domain, std3_rules=True, transitional=False):
+ """Re-map the characters in the string according to UTS46 processing."""
+ from .uts46data import uts46data
+ output = u""
+ try:
+ for pos, char in enumerate(domain):
+ code_point = ord(char)
+ uts46row = uts46data[code_point if code_point < 256 else
+ bisect.bisect_left(uts46data, (code_point, "Z")) - 1]
+ status = uts46row[1]
+ replacement = uts46row[2] if len(uts46row) == 3 else None
+ if (status == "V" or
+ (status == "D" and not transitional) or
+ (status == "3" and not std3_rules and replacement is None)):
+ output += char
+ elif replacement is not None and (status == "M" or
+ (status == "3" and not std3_rules) or
+ (status == "D" and transitional)):
+ output += replacement
+ elif status != "I":
+ raise IndexError()
+ return unicodedata.normalize("NFC", output)
+ except IndexError:
+ raise InvalidCodepoint(
+ "Codepoint {0} not allowed at position {1} in {2}".format(
+ _unot(code_point), pos + 1, repr(domain)))
+
+
+def encode(s, strict=False, uts46=False, std3_rules=False, transitional=False):
+
+ if isinstance(s, (bytes, bytearray)):
+ s = s.decode("ascii")
+ if uts46:
+ s = uts46_remap(s, std3_rules, transitional)
+ trailing_dot = False
+ result = []
+ if strict:
+ labels = s.split('.')
+ else:
+ labels = _unicode_dots_re.split(s)
+ if not labels or labels == ['']:
+ raise IDNAError('Empty domain')
+ if labels[-1] == '':
+ del labels[-1]
+ trailing_dot = True
+ for label in labels:
+ s = alabel(label)
+ if s:
+ result.append(s)
+ else:
+ raise IDNAError('Empty label')
+ if trailing_dot:
+ result.append(b'')
+ s = b'.'.join(result)
+ if not valid_string_length(s, trailing_dot):
+ raise IDNAError('Domain too long')
+ return s
+
+
+def decode(s, strict=False, uts46=False, std3_rules=False):
+
+ if isinstance(s, (bytes, bytearray)):
+ s = s.decode("ascii")
+ if uts46:
+ s = uts46_remap(s, std3_rules, False)
+ trailing_dot = False
+ result = []
+ if not strict:
+ labels = _unicode_dots_re.split(s)
+ else:
+ labels = s.split(u'.')
+ if not labels or labels == ['']:
+ raise IDNAError('Empty domain')
+ if not labels[-1]:
+ del labels[-1]
+ trailing_dot = True
+ for label in labels:
+ s = ulabel(label)
+ if s:
+ result.append(s)
+ else:
+ raise IDNAError('Empty label')
+ if trailing_dot:
+ result.append(u'')
+ return u'.'.join(result)
diff --git a/third_party/python/idna/idna/idnadata.py b/third_party/python/idna/idna/idnadata.py
new file mode 100644
index 0000000000..a284e4c84a
--- /dev/null
+++ b/third_party/python/idna/idna/idnadata.py
@@ -0,0 +1,2050 @@
+# This file is automatically generated by tools/idna-data
+
+__version__ = "13.0.0"
+scripts = {
+ 'Greek': (
+ 0x37000000374,
+ 0x37500000378,
+ 0x37a0000037e,
+ 0x37f00000380,
+ 0x38400000385,
+ 0x38600000387,
+ 0x3880000038b,
+ 0x38c0000038d,
+ 0x38e000003a2,
+ 0x3a3000003e2,
+ 0x3f000000400,
+ 0x1d2600001d2b,
+ 0x1d5d00001d62,
+ 0x1d6600001d6b,
+ 0x1dbf00001dc0,
+ 0x1f0000001f16,
+ 0x1f1800001f1e,
+ 0x1f2000001f46,
+ 0x1f4800001f4e,
+ 0x1f5000001f58,
+ 0x1f5900001f5a,
+ 0x1f5b00001f5c,
+ 0x1f5d00001f5e,
+ 0x1f5f00001f7e,
+ 0x1f8000001fb5,
+ 0x1fb600001fc5,
+ 0x1fc600001fd4,
+ 0x1fd600001fdc,
+ 0x1fdd00001ff0,
+ 0x1ff200001ff5,
+ 0x1ff600001fff,
+ 0x212600002127,
+ 0xab650000ab66,
+ 0x101400001018f,
+ 0x101a0000101a1,
+ 0x1d2000001d246,
+ ),
+ 'Han': (
+ 0x2e8000002e9a,
+ 0x2e9b00002ef4,
+ 0x2f0000002fd6,
+ 0x300500003006,
+ 0x300700003008,
+ 0x30210000302a,
+ 0x30380000303c,
+ 0x340000004dc0,
+ 0x4e0000009ffd,
+ 0xf9000000fa6e,
+ 0xfa700000fada,
+ 0x16ff000016ff2,
+ 0x200000002a6de,
+ 0x2a7000002b735,
+ 0x2b7400002b81e,
+ 0x2b8200002cea2,
+ 0x2ceb00002ebe1,
+ 0x2f8000002fa1e,
+ 0x300000003134b,
+ ),
+ 'Hebrew': (
+ 0x591000005c8,
+ 0x5d0000005eb,
+ 0x5ef000005f5,
+ 0xfb1d0000fb37,
+ 0xfb380000fb3d,
+ 0xfb3e0000fb3f,
+ 0xfb400000fb42,
+ 0xfb430000fb45,
+ 0xfb460000fb50,
+ ),
+ 'Hiragana': (
+ 0x304100003097,
+ 0x309d000030a0,
+ 0x1b0010001b11f,
+ 0x1b1500001b153,
+ 0x1f2000001f201,
+ ),
+ 'Katakana': (
+ 0x30a1000030fb,
+ 0x30fd00003100,
+ 0x31f000003200,
+ 0x32d0000032ff,
+ 0x330000003358,
+ 0xff660000ff70,
+ 0xff710000ff9e,
+ 0x1b0000001b001,
+ 0x1b1640001b168,
+ ),
+}
+joining_types = {
+ 0x600: 85,
+ 0x601: 85,
+ 0x602: 85,
+ 0x603: 85,
+ 0x604: 85,
+ 0x605: 85,
+ 0x608: 85,
+ 0x60b: 85,
+ 0x620: 68,
+ 0x621: 85,
+ 0x622: 82,
+ 0x623: 82,
+ 0x624: 82,
+ 0x625: 82,
+ 0x626: 68,
+ 0x627: 82,
+ 0x628: 68,
+ 0x629: 82,
+ 0x62a: 68,
+ 0x62b: 68,
+ 0x62c: 68,
+ 0x62d: 68,
+ 0x62e: 68,
+ 0x62f: 82,
+ 0x630: 82,
+ 0x631: 82,
+ 0x632: 82,
+ 0x633: 68,
+ 0x634: 68,
+ 0x635: 68,
+ 0x636: 68,
+ 0x637: 68,
+ 0x638: 68,
+ 0x639: 68,
+ 0x63a: 68,
+ 0x63b: 68,
+ 0x63c: 68,
+ 0x63d: 68,
+ 0x63e: 68,
+ 0x63f: 68,
+ 0x640: 67,
+ 0x641: 68,
+ 0x642: 68,
+ 0x643: 68,
+ 0x644: 68,
+ 0x645: 68,
+ 0x646: 68,
+ 0x647: 68,
+ 0x648: 82,
+ 0x649: 68,
+ 0x64a: 68,
+ 0x66e: 68,
+ 0x66f: 68,
+ 0x671: 82,
+ 0x672: 82,
+ 0x673: 82,
+ 0x674: 85,
+ 0x675: 82,
+ 0x676: 82,
+ 0x677: 82,
+ 0x678: 68,
+ 0x679: 68,
+ 0x67a: 68,
+ 0x67b: 68,
+ 0x67c: 68,
+ 0x67d: 68,
+ 0x67e: 68,
+ 0x67f: 68,
+ 0x680: 68,
+ 0x681: 68,
+ 0x682: 68,
+ 0x683: 68,
+ 0x684: 68,
+ 0x685: 68,
+ 0x686: 68,
+ 0x687: 68,
+ 0x688: 82,
+ 0x689: 82,
+ 0x68a: 82,
+ 0x68b: 82,
+ 0x68c: 82,
+ 0x68d: 82,
+ 0x68e: 82,
+ 0x68f: 82,
+ 0x690: 82,
+ 0x691: 82,
+ 0x692: 82,
+ 0x693: 82,
+ 0x694: 82,
+ 0x695: 82,
+ 0x696: 82,
+ 0x697: 82,
+ 0x698: 82,
+ 0x699: 82,
+ 0x69a: 68,
+ 0x69b: 68,
+ 0x69c: 68,
+ 0x69d: 68,
+ 0x69e: 68,
+ 0x69f: 68,
+ 0x6a0: 68,
+ 0x6a1: 68,
+ 0x6a2: 68,
+ 0x6a3: 68,
+ 0x6a4: 68,
+ 0x6a5: 68,
+ 0x6a6: 68,
+ 0x6a7: 68,
+ 0x6a8: 68,
+ 0x6a9: 68,
+ 0x6aa: 68,
+ 0x6ab: 68,
+ 0x6ac: 68,
+ 0x6ad: 68,
+ 0x6ae: 68,
+ 0x6af: 68,
+ 0x6b0: 68,
+ 0x6b1: 68,
+ 0x6b2: 68,
+ 0x6b3: 68,
+ 0x6b4: 68,
+ 0x6b5: 68,
+ 0x6b6: 68,
+ 0x6b7: 68,
+ 0x6b8: 68,
+ 0x6b9: 68,
+ 0x6ba: 68,
+ 0x6bb: 68,
+ 0x6bc: 68,
+ 0x6bd: 68,
+ 0x6be: 68,
+ 0x6bf: 68,
+ 0x6c0: 82,
+ 0x6c1: 68,
+ 0x6c2: 68,
+ 0x6c3: 82,
+ 0x6c4: 82,
+ 0x6c5: 82,
+ 0x6c6: 82,
+ 0x6c7: 82,
+ 0x6c8: 82,
+ 0x6c9: 82,
+ 0x6ca: 82,
+ 0x6cb: 82,
+ 0x6cc: 68,
+ 0x6cd: 82,
+ 0x6ce: 68,
+ 0x6cf: 82,
+ 0x6d0: 68,
+ 0x6d1: 68,
+ 0x6d2: 82,
+ 0x6d3: 82,
+ 0x6d5: 82,
+ 0x6dd: 85,
+ 0x6ee: 82,
+ 0x6ef: 82,
+ 0x6fa: 68,
+ 0x6fb: 68,
+ 0x6fc: 68,
+ 0x6ff: 68,
+ 0x70f: 84,
+ 0x710: 82,
+ 0x712: 68,
+ 0x713: 68,
+ 0x714: 68,
+ 0x715: 82,
+ 0x716: 82,
+ 0x717: 82,
+ 0x718: 82,
+ 0x719: 82,
+ 0x71a: 68,
+ 0x71b: 68,
+ 0x71c: 68,
+ 0x71d: 68,
+ 0x71e: 82,
+ 0x71f: 68,
+ 0x720: 68,
+ 0x721: 68,
+ 0x722: 68,
+ 0x723: 68,
+ 0x724: 68,
+ 0x725: 68,
+ 0x726: 68,
+ 0x727: 68,
+ 0x728: 82,
+ 0x729: 68,
+ 0x72a: 82,
+ 0x72b: 68,
+ 0x72c: 82,
+ 0x72d: 68,
+ 0x72e: 68,
+ 0x72f: 82,
+ 0x74d: 82,
+ 0x74e: 68,
+ 0x74f: 68,
+ 0x750: 68,
+ 0x751: 68,
+ 0x752: 68,
+ 0x753: 68,
+ 0x754: 68,
+ 0x755: 68,
+ 0x756: 68,
+ 0x757: 68,
+ 0x758: 68,
+ 0x759: 82,
+ 0x75a: 82,
+ 0x75b: 82,
+ 0x75c: 68,
+ 0x75d: 68,
+ 0x75e: 68,
+ 0x75f: 68,
+ 0x760: 68,
+ 0x761: 68,
+ 0x762: 68,
+ 0x763: 68,
+ 0x764: 68,
+ 0x765: 68,
+ 0x766: 68,
+ 0x767: 68,
+ 0x768: 68,
+ 0x769: 68,
+ 0x76a: 68,
+ 0x76b: 82,
+ 0x76c: 82,
+ 0x76d: 68,
+ 0x76e: 68,
+ 0x76f: 68,
+ 0x770: 68,
+ 0x771: 82,
+ 0x772: 68,
+ 0x773: 82,
+ 0x774: 82,
+ 0x775: 68,
+ 0x776: 68,
+ 0x777: 68,
+ 0x778: 82,
+ 0x779: 82,
+ 0x77a: 68,
+ 0x77b: 68,
+ 0x77c: 68,
+ 0x77d: 68,
+ 0x77e: 68,
+ 0x77f: 68,
+ 0x7ca: 68,
+ 0x7cb: 68,
+ 0x7cc: 68,
+ 0x7cd: 68,
+ 0x7ce: 68,
+ 0x7cf: 68,
+ 0x7d0: 68,
+ 0x7d1: 68,
+ 0x7d2: 68,
+ 0x7d3: 68,
+ 0x7d4: 68,
+ 0x7d5: 68,
+ 0x7d6: 68,
+ 0x7d7: 68,
+ 0x7d8: 68,
+ 0x7d9: 68,
+ 0x7da: 68,
+ 0x7db: 68,
+ 0x7dc: 68,
+ 0x7dd: 68,
+ 0x7de: 68,
+ 0x7df: 68,
+ 0x7e0: 68,
+ 0x7e1: 68,
+ 0x7e2: 68,
+ 0x7e3: 68,
+ 0x7e4: 68,
+ 0x7e5: 68,
+ 0x7e6: 68,
+ 0x7e7: 68,
+ 0x7e8: 68,
+ 0x7e9: 68,
+ 0x7ea: 68,
+ 0x7fa: 67,
+ 0x840: 82,
+ 0x841: 68,
+ 0x842: 68,
+ 0x843: 68,
+ 0x844: 68,
+ 0x845: 68,
+ 0x846: 82,
+ 0x847: 82,
+ 0x848: 68,
+ 0x849: 82,
+ 0x84a: 68,
+ 0x84b: 68,
+ 0x84c: 68,
+ 0x84d: 68,
+ 0x84e: 68,
+ 0x84f: 68,
+ 0x850: 68,
+ 0x851: 68,
+ 0x852: 68,
+ 0x853: 68,
+ 0x854: 82,
+ 0x855: 68,
+ 0x856: 82,
+ 0x857: 82,
+ 0x858: 82,
+ 0x860: 68,
+ 0x861: 85,
+ 0x862: 68,
+ 0x863: 68,
+ 0x864: 68,
+ 0x865: 68,
+ 0x866: 85,
+ 0x867: 82,
+ 0x868: 68,
+ 0x869: 82,
+ 0x86a: 82,
+ 0x8a0: 68,
+ 0x8a1: 68,
+ 0x8a2: 68,
+ 0x8a3: 68,
+ 0x8a4: 68,
+ 0x8a5: 68,
+ 0x8a6: 68,
+ 0x8a7: 68,
+ 0x8a8: 68,
+ 0x8a9: 68,
+ 0x8aa: 82,
+ 0x8ab: 82,
+ 0x8ac: 82,
+ 0x8ad: 85,
+ 0x8ae: 82,
+ 0x8af: 68,
+ 0x8b0: 68,
+ 0x8b1: 82,
+ 0x8b2: 82,
+ 0x8b3: 68,
+ 0x8b4: 68,
+ 0x8b6: 68,
+ 0x8b7: 68,
+ 0x8b8: 68,
+ 0x8b9: 82,
+ 0x8ba: 68,
+ 0x8bb: 68,
+ 0x8bc: 68,
+ 0x8bd: 68,
+ 0x8be: 68,
+ 0x8bf: 68,
+ 0x8c0: 68,
+ 0x8c1: 68,
+ 0x8c2: 68,
+ 0x8c3: 68,
+ 0x8c4: 68,
+ 0x8c5: 68,
+ 0x8c6: 68,
+ 0x8c7: 68,
+ 0x8e2: 85,
+ 0x1806: 85,
+ 0x1807: 68,
+ 0x180a: 67,
+ 0x180e: 85,
+ 0x1820: 68,
+ 0x1821: 68,
+ 0x1822: 68,
+ 0x1823: 68,
+ 0x1824: 68,
+ 0x1825: 68,
+ 0x1826: 68,
+ 0x1827: 68,
+ 0x1828: 68,
+ 0x1829: 68,
+ 0x182a: 68,
+ 0x182b: 68,
+ 0x182c: 68,
+ 0x182d: 68,
+ 0x182e: 68,
+ 0x182f: 68,
+ 0x1830: 68,
+ 0x1831: 68,
+ 0x1832: 68,
+ 0x1833: 68,
+ 0x1834: 68,
+ 0x1835: 68,
+ 0x1836: 68,
+ 0x1837: 68,
+ 0x1838: 68,
+ 0x1839: 68,
+ 0x183a: 68,
+ 0x183b: 68,
+ 0x183c: 68,
+ 0x183d: 68,
+ 0x183e: 68,
+ 0x183f: 68,
+ 0x1840: 68,
+ 0x1841: 68,
+ 0x1842: 68,
+ 0x1843: 68,
+ 0x1844: 68,
+ 0x1845: 68,
+ 0x1846: 68,
+ 0x1847: 68,
+ 0x1848: 68,
+ 0x1849: 68,
+ 0x184a: 68,
+ 0x184b: 68,
+ 0x184c: 68,
+ 0x184d: 68,
+ 0x184e: 68,
+ 0x184f: 68,
+ 0x1850: 68,
+ 0x1851: 68,
+ 0x1852: 68,
+ 0x1853: 68,
+ 0x1854: 68,
+ 0x1855: 68,
+ 0x1856: 68,
+ 0x1857: 68,
+ 0x1858: 68,
+ 0x1859: 68,
+ 0x185a: 68,
+ 0x185b: 68,
+ 0x185c: 68,
+ 0x185d: 68,
+ 0x185e: 68,
+ 0x185f: 68,
+ 0x1860: 68,
+ 0x1861: 68,
+ 0x1862: 68,
+ 0x1863: 68,
+ 0x1864: 68,
+ 0x1865: 68,
+ 0x1866: 68,
+ 0x1867: 68,
+ 0x1868: 68,
+ 0x1869: 68,
+ 0x186a: 68,
+ 0x186b: 68,
+ 0x186c: 68,
+ 0x186d: 68,
+ 0x186e: 68,
+ 0x186f: 68,
+ 0x1870: 68,
+ 0x1871: 68,
+ 0x1872: 68,
+ 0x1873: 68,
+ 0x1874: 68,
+ 0x1875: 68,
+ 0x1876: 68,
+ 0x1877: 68,
+ 0x1878: 68,
+ 0x1880: 85,
+ 0x1881: 85,
+ 0x1882: 85,
+ 0x1883: 85,
+ 0x1884: 85,
+ 0x1885: 84,
+ 0x1886: 84,
+ 0x1887: 68,
+ 0x1888: 68,
+ 0x1889: 68,
+ 0x188a: 68,
+ 0x188b: 68,
+ 0x188c: 68,
+ 0x188d: 68,
+ 0x188e: 68,
+ 0x188f: 68,
+ 0x1890: 68,
+ 0x1891: 68,
+ 0x1892: 68,
+ 0x1893: 68,
+ 0x1894: 68,
+ 0x1895: 68,
+ 0x1896: 68,
+ 0x1897: 68,
+ 0x1898: 68,
+ 0x1899: 68,
+ 0x189a: 68,
+ 0x189b: 68,
+ 0x189c: 68,
+ 0x189d: 68,
+ 0x189e: 68,
+ 0x189f: 68,
+ 0x18a0: 68,
+ 0x18a1: 68,
+ 0x18a2: 68,
+ 0x18a3: 68,
+ 0x18a4: 68,
+ 0x18a5: 68,
+ 0x18a6: 68,
+ 0x18a7: 68,
+ 0x18a8: 68,
+ 0x18aa: 68,
+ 0x200c: 85,
+ 0x200d: 67,
+ 0x202f: 85,
+ 0x2066: 85,
+ 0x2067: 85,
+ 0x2068: 85,
+ 0x2069: 85,
+ 0xa840: 68,
+ 0xa841: 68,
+ 0xa842: 68,
+ 0xa843: 68,
+ 0xa844: 68,
+ 0xa845: 68,
+ 0xa846: 68,
+ 0xa847: 68,
+ 0xa848: 68,
+ 0xa849: 68,
+ 0xa84a: 68,
+ 0xa84b: 68,
+ 0xa84c: 68,
+ 0xa84d: 68,
+ 0xa84e: 68,
+ 0xa84f: 68,
+ 0xa850: 68,
+ 0xa851: 68,
+ 0xa852: 68,
+ 0xa853: 68,
+ 0xa854: 68,
+ 0xa855: 68,
+ 0xa856: 68,
+ 0xa857: 68,
+ 0xa858: 68,
+ 0xa859: 68,
+ 0xa85a: 68,
+ 0xa85b: 68,
+ 0xa85c: 68,
+ 0xa85d: 68,
+ 0xa85e: 68,
+ 0xa85f: 68,
+ 0xa860: 68,
+ 0xa861: 68,
+ 0xa862: 68,
+ 0xa863: 68,
+ 0xa864: 68,
+ 0xa865: 68,
+ 0xa866: 68,
+ 0xa867: 68,
+ 0xa868: 68,
+ 0xa869: 68,
+ 0xa86a: 68,
+ 0xa86b: 68,
+ 0xa86c: 68,
+ 0xa86d: 68,
+ 0xa86e: 68,
+ 0xa86f: 68,
+ 0xa870: 68,
+ 0xa871: 68,
+ 0xa872: 76,
+ 0xa873: 85,
+ 0x10ac0: 68,
+ 0x10ac1: 68,
+ 0x10ac2: 68,
+ 0x10ac3: 68,
+ 0x10ac4: 68,
+ 0x10ac5: 82,
+ 0x10ac6: 85,
+ 0x10ac7: 82,
+ 0x10ac8: 85,
+ 0x10ac9: 82,
+ 0x10aca: 82,
+ 0x10acb: 85,
+ 0x10acc: 85,
+ 0x10acd: 76,
+ 0x10ace: 82,
+ 0x10acf: 82,
+ 0x10ad0: 82,
+ 0x10ad1: 82,
+ 0x10ad2: 82,
+ 0x10ad3: 68,
+ 0x10ad4: 68,
+ 0x10ad5: 68,
+ 0x10ad6: 68,
+ 0x10ad7: 76,
+ 0x10ad8: 68,
+ 0x10ad9: 68,
+ 0x10ada: 68,
+ 0x10adb: 68,
+ 0x10adc: 68,
+ 0x10add: 82,
+ 0x10ade: 68,
+ 0x10adf: 68,
+ 0x10ae0: 68,
+ 0x10ae1: 82,
+ 0x10ae2: 85,
+ 0x10ae3: 85,
+ 0x10ae4: 82,
+ 0x10aeb: 68,
+ 0x10aec: 68,
+ 0x10aed: 68,
+ 0x10aee: 68,
+ 0x10aef: 82,
+ 0x10b80: 68,
+ 0x10b81: 82,
+ 0x10b82: 68,
+ 0x10b83: 82,
+ 0x10b84: 82,
+ 0x10b85: 82,
+ 0x10b86: 68,
+ 0x10b87: 68,
+ 0x10b88: 68,
+ 0x10b89: 82,
+ 0x10b8a: 68,
+ 0x10b8b: 68,
+ 0x10b8c: 82,
+ 0x10b8d: 68,
+ 0x10b8e: 82,
+ 0x10b8f: 82,
+ 0x10b90: 68,
+ 0x10b91: 82,
+ 0x10ba9: 82,
+ 0x10baa: 82,
+ 0x10bab: 82,
+ 0x10bac: 82,
+ 0x10bad: 68,
+ 0x10bae: 68,
+ 0x10baf: 85,
+ 0x10d00: 76,
+ 0x10d01: 68,
+ 0x10d02: 68,
+ 0x10d03: 68,
+ 0x10d04: 68,
+ 0x10d05: 68,
+ 0x10d06: 68,
+ 0x10d07: 68,
+ 0x10d08: 68,
+ 0x10d09: 68,
+ 0x10d0a: 68,
+ 0x10d0b: 68,
+ 0x10d0c: 68,
+ 0x10d0d: 68,
+ 0x10d0e: 68,
+ 0x10d0f: 68,
+ 0x10d10: 68,
+ 0x10d11: 68,
+ 0x10d12: 68,
+ 0x10d13: 68,
+ 0x10d14: 68,
+ 0x10d15: 68,
+ 0x10d16: 68,
+ 0x10d17: 68,
+ 0x10d18: 68,
+ 0x10d19: 68,
+ 0x10d1a: 68,
+ 0x10d1b: 68,
+ 0x10d1c: 68,
+ 0x10d1d: 68,
+ 0x10d1e: 68,
+ 0x10d1f: 68,
+ 0x10d20: 68,
+ 0x10d21: 68,
+ 0x10d22: 82,
+ 0x10d23: 68,
+ 0x10f30: 68,
+ 0x10f31: 68,
+ 0x10f32: 68,
+ 0x10f33: 82,
+ 0x10f34: 68,
+ 0x10f35: 68,
+ 0x10f36: 68,
+ 0x10f37: 68,
+ 0x10f38: 68,
+ 0x10f39: 68,
+ 0x10f3a: 68,
+ 0x10f3b: 68,
+ 0x10f3c: 68,
+ 0x10f3d: 68,
+ 0x10f3e: 68,
+ 0x10f3f: 68,
+ 0x10f40: 68,
+ 0x10f41: 68,
+ 0x10f42: 68,
+ 0x10f43: 68,
+ 0x10f44: 68,
+ 0x10f45: 85,
+ 0x10f51: 68,
+ 0x10f52: 68,
+ 0x10f53: 68,
+ 0x10f54: 82,
+ 0x10fb0: 68,
+ 0x10fb1: 85,
+ 0x10fb2: 68,
+ 0x10fb3: 68,
+ 0x10fb4: 82,
+ 0x10fb5: 82,
+ 0x10fb6: 82,
+ 0x10fb7: 85,
+ 0x10fb8: 68,
+ 0x10fb9: 82,
+ 0x10fba: 82,
+ 0x10fbb: 68,
+ 0x10fbc: 68,
+ 0x10fbd: 82,
+ 0x10fbe: 68,
+ 0x10fbf: 68,
+ 0x10fc0: 85,
+ 0x10fc1: 68,
+ 0x10fc2: 82,
+ 0x10fc3: 82,
+ 0x10fc4: 68,
+ 0x10fc5: 85,
+ 0x10fc6: 85,
+ 0x10fc7: 85,
+ 0x10fc8: 85,
+ 0x10fc9: 82,
+ 0x10fca: 68,
+ 0x10fcb: 76,
+ 0x110bd: 85,
+ 0x110cd: 85,
+ 0x1e900: 68,
+ 0x1e901: 68,
+ 0x1e902: 68,
+ 0x1e903: 68,
+ 0x1e904: 68,
+ 0x1e905: 68,
+ 0x1e906: 68,
+ 0x1e907: 68,
+ 0x1e908: 68,
+ 0x1e909: 68,
+ 0x1e90a: 68,
+ 0x1e90b: 68,
+ 0x1e90c: 68,
+ 0x1e90d: 68,
+ 0x1e90e: 68,
+ 0x1e90f: 68,
+ 0x1e910: 68,
+ 0x1e911: 68,
+ 0x1e912: 68,
+ 0x1e913: 68,
+ 0x1e914: 68,
+ 0x1e915: 68,
+ 0x1e916: 68,
+ 0x1e917: 68,
+ 0x1e918: 68,
+ 0x1e919: 68,
+ 0x1e91a: 68,
+ 0x1e91b: 68,
+ 0x1e91c: 68,
+ 0x1e91d: 68,
+ 0x1e91e: 68,
+ 0x1e91f: 68,
+ 0x1e920: 68,
+ 0x1e921: 68,
+ 0x1e922: 68,
+ 0x1e923: 68,
+ 0x1e924: 68,
+ 0x1e925: 68,
+ 0x1e926: 68,
+ 0x1e927: 68,
+ 0x1e928: 68,
+ 0x1e929: 68,
+ 0x1e92a: 68,
+ 0x1e92b: 68,
+ 0x1e92c: 68,
+ 0x1e92d: 68,
+ 0x1e92e: 68,
+ 0x1e92f: 68,
+ 0x1e930: 68,
+ 0x1e931: 68,
+ 0x1e932: 68,
+ 0x1e933: 68,
+ 0x1e934: 68,
+ 0x1e935: 68,
+ 0x1e936: 68,
+ 0x1e937: 68,
+ 0x1e938: 68,
+ 0x1e939: 68,
+ 0x1e93a: 68,
+ 0x1e93b: 68,
+ 0x1e93c: 68,
+ 0x1e93d: 68,
+ 0x1e93e: 68,
+ 0x1e93f: 68,
+ 0x1e940: 68,
+ 0x1e941: 68,
+ 0x1e942: 68,
+ 0x1e943: 68,
+ 0x1e94b: 84,
+}
+codepoint_classes = {
+ 'PVALID': (
+ 0x2d0000002e,
+ 0x300000003a,
+ 0x610000007b,
+ 0xdf000000f7,
+ 0xf800000100,
+ 0x10100000102,
+ 0x10300000104,
+ 0x10500000106,
+ 0x10700000108,
+ 0x1090000010a,
+ 0x10b0000010c,
+ 0x10d0000010e,
+ 0x10f00000110,
+ 0x11100000112,
+ 0x11300000114,
+ 0x11500000116,
+ 0x11700000118,
+ 0x1190000011a,
+ 0x11b0000011c,
+ 0x11d0000011e,
+ 0x11f00000120,
+ 0x12100000122,
+ 0x12300000124,
+ 0x12500000126,
+ 0x12700000128,
+ 0x1290000012a,
+ 0x12b0000012c,
+ 0x12d0000012e,
+ 0x12f00000130,
+ 0x13100000132,
+ 0x13500000136,
+ 0x13700000139,
+ 0x13a0000013b,
+ 0x13c0000013d,
+ 0x13e0000013f,
+ 0x14200000143,
+ 0x14400000145,
+ 0x14600000147,
+ 0x14800000149,
+ 0x14b0000014c,
+ 0x14d0000014e,
+ 0x14f00000150,
+ 0x15100000152,
+ 0x15300000154,
+ 0x15500000156,
+ 0x15700000158,
+ 0x1590000015a,
+ 0x15b0000015c,
+ 0x15d0000015e,
+ 0x15f00000160,
+ 0x16100000162,
+ 0x16300000164,
+ 0x16500000166,
+ 0x16700000168,
+ 0x1690000016a,
+ 0x16b0000016c,
+ 0x16d0000016e,
+ 0x16f00000170,
+ 0x17100000172,
+ 0x17300000174,
+ 0x17500000176,
+ 0x17700000178,
+ 0x17a0000017b,
+ 0x17c0000017d,
+ 0x17e0000017f,
+ 0x18000000181,
+ 0x18300000184,
+ 0x18500000186,
+ 0x18800000189,
+ 0x18c0000018e,
+ 0x19200000193,
+ 0x19500000196,
+ 0x1990000019c,
+ 0x19e0000019f,
+ 0x1a1000001a2,
+ 0x1a3000001a4,
+ 0x1a5000001a6,
+ 0x1a8000001a9,
+ 0x1aa000001ac,
+ 0x1ad000001ae,
+ 0x1b0000001b1,
+ 0x1b4000001b5,
+ 0x1b6000001b7,
+ 0x1b9000001bc,
+ 0x1bd000001c4,
+ 0x1ce000001cf,
+ 0x1d0000001d1,
+ 0x1d2000001d3,
+ 0x1d4000001d5,
+ 0x1d6000001d7,
+ 0x1d8000001d9,
+ 0x1da000001db,
+ 0x1dc000001de,
+ 0x1df000001e0,
+ 0x1e1000001e2,
+ 0x1e3000001e4,
+ 0x1e5000001e6,
+ 0x1e7000001e8,
+ 0x1e9000001ea,
+ 0x1eb000001ec,
+ 0x1ed000001ee,
+ 0x1ef000001f1,
+ 0x1f5000001f6,
+ 0x1f9000001fa,
+ 0x1fb000001fc,
+ 0x1fd000001fe,
+ 0x1ff00000200,
+ 0x20100000202,
+ 0x20300000204,
+ 0x20500000206,
+ 0x20700000208,
+ 0x2090000020a,
+ 0x20b0000020c,
+ 0x20d0000020e,
+ 0x20f00000210,
+ 0x21100000212,
+ 0x21300000214,
+ 0x21500000216,
+ 0x21700000218,
+ 0x2190000021a,
+ 0x21b0000021c,
+ 0x21d0000021e,
+ 0x21f00000220,
+ 0x22100000222,
+ 0x22300000224,
+ 0x22500000226,
+ 0x22700000228,
+ 0x2290000022a,
+ 0x22b0000022c,
+ 0x22d0000022e,
+ 0x22f00000230,
+ 0x23100000232,
+ 0x2330000023a,
+ 0x23c0000023d,
+ 0x23f00000241,
+ 0x24200000243,
+ 0x24700000248,
+ 0x2490000024a,
+ 0x24b0000024c,
+ 0x24d0000024e,
+ 0x24f000002b0,
+ 0x2b9000002c2,
+ 0x2c6000002d2,
+ 0x2ec000002ed,
+ 0x2ee000002ef,
+ 0x30000000340,
+ 0x34200000343,
+ 0x3460000034f,
+ 0x35000000370,
+ 0x37100000372,
+ 0x37300000374,
+ 0x37700000378,
+ 0x37b0000037e,
+ 0x39000000391,
+ 0x3ac000003cf,
+ 0x3d7000003d8,
+ 0x3d9000003da,
+ 0x3db000003dc,
+ 0x3dd000003de,
+ 0x3df000003e0,
+ 0x3e1000003e2,
+ 0x3e3000003e4,
+ 0x3e5000003e6,
+ 0x3e7000003e8,
+ 0x3e9000003ea,
+ 0x3eb000003ec,
+ 0x3ed000003ee,
+ 0x3ef000003f0,
+ 0x3f3000003f4,
+ 0x3f8000003f9,
+ 0x3fb000003fd,
+ 0x43000000460,
+ 0x46100000462,
+ 0x46300000464,
+ 0x46500000466,
+ 0x46700000468,
+ 0x4690000046a,
+ 0x46b0000046c,
+ 0x46d0000046e,
+ 0x46f00000470,
+ 0x47100000472,
+ 0x47300000474,
+ 0x47500000476,
+ 0x47700000478,
+ 0x4790000047a,
+ 0x47b0000047c,
+ 0x47d0000047e,
+ 0x47f00000480,
+ 0x48100000482,
+ 0x48300000488,
+ 0x48b0000048c,
+ 0x48d0000048e,
+ 0x48f00000490,
+ 0x49100000492,
+ 0x49300000494,
+ 0x49500000496,
+ 0x49700000498,
+ 0x4990000049a,
+ 0x49b0000049c,
+ 0x49d0000049e,
+ 0x49f000004a0,
+ 0x4a1000004a2,
+ 0x4a3000004a4,
+ 0x4a5000004a6,
+ 0x4a7000004a8,
+ 0x4a9000004aa,
+ 0x4ab000004ac,
+ 0x4ad000004ae,
+ 0x4af000004b0,
+ 0x4b1000004b2,
+ 0x4b3000004b4,
+ 0x4b5000004b6,
+ 0x4b7000004b8,
+ 0x4b9000004ba,
+ 0x4bb000004bc,
+ 0x4bd000004be,
+ 0x4bf000004c0,
+ 0x4c2000004c3,
+ 0x4c4000004c5,
+ 0x4c6000004c7,
+ 0x4c8000004c9,
+ 0x4ca000004cb,
+ 0x4cc000004cd,
+ 0x4ce000004d0,
+ 0x4d1000004d2,
+ 0x4d3000004d4,
+ 0x4d5000004d6,
+ 0x4d7000004d8,
+ 0x4d9000004da,
+ 0x4db000004dc,
+ 0x4dd000004de,
+ 0x4df000004e0,
+ 0x4e1000004e2,
+ 0x4e3000004e4,
+ 0x4e5000004e6,
+ 0x4e7000004e8,
+ 0x4e9000004ea,
+ 0x4eb000004ec,
+ 0x4ed000004ee,
+ 0x4ef000004f0,
+ 0x4f1000004f2,
+ 0x4f3000004f4,
+ 0x4f5000004f6,
+ 0x4f7000004f8,
+ 0x4f9000004fa,
+ 0x4fb000004fc,
+ 0x4fd000004fe,
+ 0x4ff00000500,
+ 0x50100000502,
+ 0x50300000504,
+ 0x50500000506,
+ 0x50700000508,
+ 0x5090000050a,
+ 0x50b0000050c,
+ 0x50d0000050e,
+ 0x50f00000510,
+ 0x51100000512,
+ 0x51300000514,
+ 0x51500000516,
+ 0x51700000518,
+ 0x5190000051a,
+ 0x51b0000051c,
+ 0x51d0000051e,
+ 0x51f00000520,
+ 0x52100000522,
+ 0x52300000524,
+ 0x52500000526,
+ 0x52700000528,
+ 0x5290000052a,
+ 0x52b0000052c,
+ 0x52d0000052e,
+ 0x52f00000530,
+ 0x5590000055a,
+ 0x56000000587,
+ 0x58800000589,
+ 0x591000005be,
+ 0x5bf000005c0,
+ 0x5c1000005c3,
+ 0x5c4000005c6,
+ 0x5c7000005c8,
+ 0x5d0000005eb,
+ 0x5ef000005f3,
+ 0x6100000061b,
+ 0x62000000640,
+ 0x64100000660,
+ 0x66e00000675,
+ 0x679000006d4,
+ 0x6d5000006dd,
+ 0x6df000006e9,
+ 0x6ea000006f0,
+ 0x6fa00000700,
+ 0x7100000074b,
+ 0x74d000007b2,
+ 0x7c0000007f6,
+ 0x7fd000007fe,
+ 0x8000000082e,
+ 0x8400000085c,
+ 0x8600000086b,
+ 0x8a0000008b5,
+ 0x8b6000008c8,
+ 0x8d3000008e2,
+ 0x8e300000958,
+ 0x96000000964,
+ 0x96600000970,
+ 0x97100000984,
+ 0x9850000098d,
+ 0x98f00000991,
+ 0x993000009a9,
+ 0x9aa000009b1,
+ 0x9b2000009b3,
+ 0x9b6000009ba,
+ 0x9bc000009c5,
+ 0x9c7000009c9,
+ 0x9cb000009cf,
+ 0x9d7000009d8,
+ 0x9e0000009e4,
+ 0x9e6000009f2,
+ 0x9fc000009fd,
+ 0x9fe000009ff,
+ 0xa0100000a04,
+ 0xa0500000a0b,
+ 0xa0f00000a11,
+ 0xa1300000a29,
+ 0xa2a00000a31,
+ 0xa3200000a33,
+ 0xa3500000a36,
+ 0xa3800000a3a,
+ 0xa3c00000a3d,
+ 0xa3e00000a43,
+ 0xa4700000a49,
+ 0xa4b00000a4e,
+ 0xa5100000a52,
+ 0xa5c00000a5d,
+ 0xa6600000a76,
+ 0xa8100000a84,
+ 0xa8500000a8e,
+ 0xa8f00000a92,
+ 0xa9300000aa9,
+ 0xaaa00000ab1,
+ 0xab200000ab4,
+ 0xab500000aba,
+ 0xabc00000ac6,
+ 0xac700000aca,
+ 0xacb00000ace,
+ 0xad000000ad1,
+ 0xae000000ae4,
+ 0xae600000af0,
+ 0xaf900000b00,
+ 0xb0100000b04,
+ 0xb0500000b0d,
+ 0xb0f00000b11,
+ 0xb1300000b29,
+ 0xb2a00000b31,
+ 0xb3200000b34,
+ 0xb3500000b3a,
+ 0xb3c00000b45,
+ 0xb4700000b49,
+ 0xb4b00000b4e,
+ 0xb5500000b58,
+ 0xb5f00000b64,
+ 0xb6600000b70,
+ 0xb7100000b72,
+ 0xb8200000b84,
+ 0xb8500000b8b,
+ 0xb8e00000b91,
+ 0xb9200000b96,
+ 0xb9900000b9b,
+ 0xb9c00000b9d,
+ 0xb9e00000ba0,
+ 0xba300000ba5,
+ 0xba800000bab,
+ 0xbae00000bba,
+ 0xbbe00000bc3,
+ 0xbc600000bc9,
+ 0xbca00000bce,
+ 0xbd000000bd1,
+ 0xbd700000bd8,
+ 0xbe600000bf0,
+ 0xc0000000c0d,
+ 0xc0e00000c11,
+ 0xc1200000c29,
+ 0xc2a00000c3a,
+ 0xc3d00000c45,
+ 0xc4600000c49,
+ 0xc4a00000c4e,
+ 0xc5500000c57,
+ 0xc5800000c5b,
+ 0xc6000000c64,
+ 0xc6600000c70,
+ 0xc8000000c84,
+ 0xc8500000c8d,
+ 0xc8e00000c91,
+ 0xc9200000ca9,
+ 0xcaa00000cb4,
+ 0xcb500000cba,
+ 0xcbc00000cc5,
+ 0xcc600000cc9,
+ 0xcca00000cce,
+ 0xcd500000cd7,
+ 0xcde00000cdf,
+ 0xce000000ce4,
+ 0xce600000cf0,
+ 0xcf100000cf3,
+ 0xd0000000d0d,
+ 0xd0e00000d11,
+ 0xd1200000d45,
+ 0xd4600000d49,
+ 0xd4a00000d4f,
+ 0xd5400000d58,
+ 0xd5f00000d64,
+ 0xd6600000d70,
+ 0xd7a00000d80,
+ 0xd8100000d84,
+ 0xd8500000d97,
+ 0xd9a00000db2,
+ 0xdb300000dbc,
+ 0xdbd00000dbe,
+ 0xdc000000dc7,
+ 0xdca00000dcb,
+ 0xdcf00000dd5,
+ 0xdd600000dd7,
+ 0xdd800000de0,
+ 0xde600000df0,
+ 0xdf200000df4,
+ 0xe0100000e33,
+ 0xe3400000e3b,
+ 0xe4000000e4f,
+ 0xe5000000e5a,
+ 0xe8100000e83,
+ 0xe8400000e85,
+ 0xe8600000e8b,
+ 0xe8c00000ea4,
+ 0xea500000ea6,
+ 0xea700000eb3,
+ 0xeb400000ebe,
+ 0xec000000ec5,
+ 0xec600000ec7,
+ 0xec800000ece,
+ 0xed000000eda,
+ 0xede00000ee0,
+ 0xf0000000f01,
+ 0xf0b00000f0c,
+ 0xf1800000f1a,
+ 0xf2000000f2a,
+ 0xf3500000f36,
+ 0xf3700000f38,
+ 0xf3900000f3a,
+ 0xf3e00000f43,
+ 0xf4400000f48,
+ 0xf4900000f4d,
+ 0xf4e00000f52,
+ 0xf5300000f57,
+ 0xf5800000f5c,
+ 0xf5d00000f69,
+ 0xf6a00000f6d,
+ 0xf7100000f73,
+ 0xf7400000f75,
+ 0xf7a00000f81,
+ 0xf8200000f85,
+ 0xf8600000f93,
+ 0xf9400000f98,
+ 0xf9900000f9d,
+ 0xf9e00000fa2,
+ 0xfa300000fa7,
+ 0xfa800000fac,
+ 0xfad00000fb9,
+ 0xfba00000fbd,
+ 0xfc600000fc7,
+ 0x10000000104a,
+ 0x10500000109e,
+ 0x10d0000010fb,
+ 0x10fd00001100,
+ 0x120000001249,
+ 0x124a0000124e,
+ 0x125000001257,
+ 0x125800001259,
+ 0x125a0000125e,
+ 0x126000001289,
+ 0x128a0000128e,
+ 0x1290000012b1,
+ 0x12b2000012b6,
+ 0x12b8000012bf,
+ 0x12c0000012c1,
+ 0x12c2000012c6,
+ 0x12c8000012d7,
+ 0x12d800001311,
+ 0x131200001316,
+ 0x13180000135b,
+ 0x135d00001360,
+ 0x138000001390,
+ 0x13a0000013f6,
+ 0x14010000166d,
+ 0x166f00001680,
+ 0x16810000169b,
+ 0x16a0000016eb,
+ 0x16f1000016f9,
+ 0x17000000170d,
+ 0x170e00001715,
+ 0x172000001735,
+ 0x174000001754,
+ 0x17600000176d,
+ 0x176e00001771,
+ 0x177200001774,
+ 0x1780000017b4,
+ 0x17b6000017d4,
+ 0x17d7000017d8,
+ 0x17dc000017de,
+ 0x17e0000017ea,
+ 0x18100000181a,
+ 0x182000001879,
+ 0x1880000018ab,
+ 0x18b0000018f6,
+ 0x19000000191f,
+ 0x19200000192c,
+ 0x19300000193c,
+ 0x19460000196e,
+ 0x197000001975,
+ 0x1980000019ac,
+ 0x19b0000019ca,
+ 0x19d0000019da,
+ 0x1a0000001a1c,
+ 0x1a2000001a5f,
+ 0x1a6000001a7d,
+ 0x1a7f00001a8a,
+ 0x1a9000001a9a,
+ 0x1aa700001aa8,
+ 0x1ab000001abe,
+ 0x1abf00001ac1,
+ 0x1b0000001b4c,
+ 0x1b5000001b5a,
+ 0x1b6b00001b74,
+ 0x1b8000001bf4,
+ 0x1c0000001c38,
+ 0x1c4000001c4a,
+ 0x1c4d00001c7e,
+ 0x1cd000001cd3,
+ 0x1cd400001cfb,
+ 0x1d0000001d2c,
+ 0x1d2f00001d30,
+ 0x1d3b00001d3c,
+ 0x1d4e00001d4f,
+ 0x1d6b00001d78,
+ 0x1d7900001d9b,
+ 0x1dc000001dfa,
+ 0x1dfb00001e00,
+ 0x1e0100001e02,
+ 0x1e0300001e04,
+ 0x1e0500001e06,
+ 0x1e0700001e08,
+ 0x1e0900001e0a,
+ 0x1e0b00001e0c,
+ 0x1e0d00001e0e,
+ 0x1e0f00001e10,
+ 0x1e1100001e12,
+ 0x1e1300001e14,
+ 0x1e1500001e16,
+ 0x1e1700001e18,
+ 0x1e1900001e1a,
+ 0x1e1b00001e1c,
+ 0x1e1d00001e1e,
+ 0x1e1f00001e20,
+ 0x1e2100001e22,
+ 0x1e2300001e24,
+ 0x1e2500001e26,
+ 0x1e2700001e28,
+ 0x1e2900001e2a,
+ 0x1e2b00001e2c,
+ 0x1e2d00001e2e,
+ 0x1e2f00001e30,
+ 0x1e3100001e32,
+ 0x1e3300001e34,
+ 0x1e3500001e36,
+ 0x1e3700001e38,
+ 0x1e3900001e3a,
+ 0x1e3b00001e3c,
+ 0x1e3d00001e3e,
+ 0x1e3f00001e40,
+ 0x1e4100001e42,
+ 0x1e4300001e44,
+ 0x1e4500001e46,
+ 0x1e4700001e48,
+ 0x1e4900001e4a,
+ 0x1e4b00001e4c,
+ 0x1e4d00001e4e,
+ 0x1e4f00001e50,
+ 0x1e5100001e52,
+ 0x1e5300001e54,
+ 0x1e5500001e56,
+ 0x1e5700001e58,
+ 0x1e5900001e5a,
+ 0x1e5b00001e5c,
+ 0x1e5d00001e5e,
+ 0x1e5f00001e60,
+ 0x1e6100001e62,
+ 0x1e6300001e64,
+ 0x1e6500001e66,
+ 0x1e6700001e68,
+ 0x1e6900001e6a,
+ 0x1e6b00001e6c,
+ 0x1e6d00001e6e,
+ 0x1e6f00001e70,
+ 0x1e7100001e72,
+ 0x1e7300001e74,
+ 0x1e7500001e76,
+ 0x1e7700001e78,
+ 0x1e7900001e7a,
+ 0x1e7b00001e7c,
+ 0x1e7d00001e7e,
+ 0x1e7f00001e80,
+ 0x1e8100001e82,
+ 0x1e8300001e84,
+ 0x1e8500001e86,
+ 0x1e8700001e88,
+ 0x1e8900001e8a,
+ 0x1e8b00001e8c,
+ 0x1e8d00001e8e,
+ 0x1e8f00001e90,
+ 0x1e9100001e92,
+ 0x1e9300001e94,
+ 0x1e9500001e9a,
+ 0x1e9c00001e9e,
+ 0x1e9f00001ea0,
+ 0x1ea100001ea2,
+ 0x1ea300001ea4,
+ 0x1ea500001ea6,
+ 0x1ea700001ea8,
+ 0x1ea900001eaa,
+ 0x1eab00001eac,
+ 0x1ead00001eae,
+ 0x1eaf00001eb0,
+ 0x1eb100001eb2,
+ 0x1eb300001eb4,
+ 0x1eb500001eb6,
+ 0x1eb700001eb8,
+ 0x1eb900001eba,
+ 0x1ebb00001ebc,
+ 0x1ebd00001ebe,
+ 0x1ebf00001ec0,
+ 0x1ec100001ec2,
+ 0x1ec300001ec4,
+ 0x1ec500001ec6,
+ 0x1ec700001ec8,
+ 0x1ec900001eca,
+ 0x1ecb00001ecc,
+ 0x1ecd00001ece,
+ 0x1ecf00001ed0,
+ 0x1ed100001ed2,
+ 0x1ed300001ed4,
+ 0x1ed500001ed6,
+ 0x1ed700001ed8,
+ 0x1ed900001eda,
+ 0x1edb00001edc,
+ 0x1edd00001ede,
+ 0x1edf00001ee0,
+ 0x1ee100001ee2,
+ 0x1ee300001ee4,
+ 0x1ee500001ee6,
+ 0x1ee700001ee8,
+ 0x1ee900001eea,
+ 0x1eeb00001eec,
+ 0x1eed00001eee,
+ 0x1eef00001ef0,
+ 0x1ef100001ef2,
+ 0x1ef300001ef4,
+ 0x1ef500001ef6,
+ 0x1ef700001ef8,
+ 0x1ef900001efa,
+ 0x1efb00001efc,
+ 0x1efd00001efe,
+ 0x1eff00001f08,
+ 0x1f1000001f16,
+ 0x1f2000001f28,
+ 0x1f3000001f38,
+ 0x1f4000001f46,
+ 0x1f5000001f58,
+ 0x1f6000001f68,
+ 0x1f7000001f71,
+ 0x1f7200001f73,
+ 0x1f7400001f75,
+ 0x1f7600001f77,
+ 0x1f7800001f79,
+ 0x1f7a00001f7b,
+ 0x1f7c00001f7d,
+ 0x1fb000001fb2,
+ 0x1fb600001fb7,
+ 0x1fc600001fc7,
+ 0x1fd000001fd3,
+ 0x1fd600001fd8,
+ 0x1fe000001fe3,
+ 0x1fe400001fe8,
+ 0x1ff600001ff7,
+ 0x214e0000214f,
+ 0x218400002185,
+ 0x2c3000002c5f,
+ 0x2c6100002c62,
+ 0x2c6500002c67,
+ 0x2c6800002c69,
+ 0x2c6a00002c6b,
+ 0x2c6c00002c6d,
+ 0x2c7100002c72,
+ 0x2c7300002c75,
+ 0x2c7600002c7c,
+ 0x2c8100002c82,
+ 0x2c8300002c84,
+ 0x2c8500002c86,
+ 0x2c8700002c88,
+ 0x2c8900002c8a,
+ 0x2c8b00002c8c,
+ 0x2c8d00002c8e,
+ 0x2c8f00002c90,
+ 0x2c9100002c92,
+ 0x2c9300002c94,
+ 0x2c9500002c96,
+ 0x2c9700002c98,
+ 0x2c9900002c9a,
+ 0x2c9b00002c9c,
+ 0x2c9d00002c9e,
+ 0x2c9f00002ca0,
+ 0x2ca100002ca2,
+ 0x2ca300002ca4,
+ 0x2ca500002ca6,
+ 0x2ca700002ca8,
+ 0x2ca900002caa,
+ 0x2cab00002cac,
+ 0x2cad00002cae,
+ 0x2caf00002cb0,
+ 0x2cb100002cb2,
+ 0x2cb300002cb4,
+ 0x2cb500002cb6,
+ 0x2cb700002cb8,
+ 0x2cb900002cba,
+ 0x2cbb00002cbc,
+ 0x2cbd00002cbe,
+ 0x2cbf00002cc0,
+ 0x2cc100002cc2,
+ 0x2cc300002cc4,
+ 0x2cc500002cc6,
+ 0x2cc700002cc8,
+ 0x2cc900002cca,
+ 0x2ccb00002ccc,
+ 0x2ccd00002cce,
+ 0x2ccf00002cd0,
+ 0x2cd100002cd2,
+ 0x2cd300002cd4,
+ 0x2cd500002cd6,
+ 0x2cd700002cd8,
+ 0x2cd900002cda,
+ 0x2cdb00002cdc,
+ 0x2cdd00002cde,
+ 0x2cdf00002ce0,
+ 0x2ce100002ce2,
+ 0x2ce300002ce5,
+ 0x2cec00002ced,
+ 0x2cee00002cf2,
+ 0x2cf300002cf4,
+ 0x2d0000002d26,
+ 0x2d2700002d28,
+ 0x2d2d00002d2e,
+ 0x2d3000002d68,
+ 0x2d7f00002d97,
+ 0x2da000002da7,
+ 0x2da800002daf,
+ 0x2db000002db7,
+ 0x2db800002dbf,
+ 0x2dc000002dc7,
+ 0x2dc800002dcf,
+ 0x2dd000002dd7,
+ 0x2dd800002ddf,
+ 0x2de000002e00,
+ 0x2e2f00002e30,
+ 0x300500003008,
+ 0x302a0000302e,
+ 0x303c0000303d,
+ 0x304100003097,
+ 0x30990000309b,
+ 0x309d0000309f,
+ 0x30a1000030fb,
+ 0x30fc000030ff,
+ 0x310500003130,
+ 0x31a0000031c0,
+ 0x31f000003200,
+ 0x340000004dc0,
+ 0x4e0000009ffd,
+ 0xa0000000a48d,
+ 0xa4d00000a4fe,
+ 0xa5000000a60d,
+ 0xa6100000a62c,
+ 0xa6410000a642,
+ 0xa6430000a644,
+ 0xa6450000a646,
+ 0xa6470000a648,
+ 0xa6490000a64a,
+ 0xa64b0000a64c,
+ 0xa64d0000a64e,
+ 0xa64f0000a650,
+ 0xa6510000a652,
+ 0xa6530000a654,
+ 0xa6550000a656,
+ 0xa6570000a658,
+ 0xa6590000a65a,
+ 0xa65b0000a65c,
+ 0xa65d0000a65e,
+ 0xa65f0000a660,
+ 0xa6610000a662,
+ 0xa6630000a664,
+ 0xa6650000a666,
+ 0xa6670000a668,
+ 0xa6690000a66a,
+ 0xa66b0000a66c,
+ 0xa66d0000a670,
+ 0xa6740000a67e,
+ 0xa67f0000a680,
+ 0xa6810000a682,
+ 0xa6830000a684,
+ 0xa6850000a686,
+ 0xa6870000a688,
+ 0xa6890000a68a,
+ 0xa68b0000a68c,
+ 0xa68d0000a68e,
+ 0xa68f0000a690,
+ 0xa6910000a692,
+ 0xa6930000a694,
+ 0xa6950000a696,
+ 0xa6970000a698,
+ 0xa6990000a69a,
+ 0xa69b0000a69c,
+ 0xa69e0000a6e6,
+ 0xa6f00000a6f2,
+ 0xa7170000a720,
+ 0xa7230000a724,
+ 0xa7250000a726,
+ 0xa7270000a728,
+ 0xa7290000a72a,
+ 0xa72b0000a72c,
+ 0xa72d0000a72e,
+ 0xa72f0000a732,
+ 0xa7330000a734,
+ 0xa7350000a736,
+ 0xa7370000a738,
+ 0xa7390000a73a,
+ 0xa73b0000a73c,
+ 0xa73d0000a73e,
+ 0xa73f0000a740,
+ 0xa7410000a742,
+ 0xa7430000a744,
+ 0xa7450000a746,
+ 0xa7470000a748,
+ 0xa7490000a74a,
+ 0xa74b0000a74c,
+ 0xa74d0000a74e,
+ 0xa74f0000a750,
+ 0xa7510000a752,
+ 0xa7530000a754,
+ 0xa7550000a756,
+ 0xa7570000a758,
+ 0xa7590000a75a,
+ 0xa75b0000a75c,
+ 0xa75d0000a75e,
+ 0xa75f0000a760,
+ 0xa7610000a762,
+ 0xa7630000a764,
+ 0xa7650000a766,
+ 0xa7670000a768,
+ 0xa7690000a76a,
+ 0xa76b0000a76c,
+ 0xa76d0000a76e,
+ 0xa76f0000a770,
+ 0xa7710000a779,
+ 0xa77a0000a77b,
+ 0xa77c0000a77d,
+ 0xa77f0000a780,
+ 0xa7810000a782,
+ 0xa7830000a784,
+ 0xa7850000a786,
+ 0xa7870000a789,
+ 0xa78c0000a78d,
+ 0xa78e0000a790,
+ 0xa7910000a792,
+ 0xa7930000a796,
+ 0xa7970000a798,
+ 0xa7990000a79a,
+ 0xa79b0000a79c,
+ 0xa79d0000a79e,
+ 0xa79f0000a7a0,
+ 0xa7a10000a7a2,
+ 0xa7a30000a7a4,
+ 0xa7a50000a7a6,
+ 0xa7a70000a7a8,
+ 0xa7a90000a7aa,
+ 0xa7af0000a7b0,
+ 0xa7b50000a7b6,
+ 0xa7b70000a7b8,
+ 0xa7b90000a7ba,
+ 0xa7bb0000a7bc,
+ 0xa7bd0000a7be,
+ 0xa7bf0000a7c0,
+ 0xa7c30000a7c4,
+ 0xa7c80000a7c9,
+ 0xa7ca0000a7cb,
+ 0xa7f60000a7f8,
+ 0xa7fa0000a828,
+ 0xa82c0000a82d,
+ 0xa8400000a874,
+ 0xa8800000a8c6,
+ 0xa8d00000a8da,
+ 0xa8e00000a8f8,
+ 0xa8fb0000a8fc,
+ 0xa8fd0000a92e,
+ 0xa9300000a954,
+ 0xa9800000a9c1,
+ 0xa9cf0000a9da,
+ 0xa9e00000a9ff,
+ 0xaa000000aa37,
+ 0xaa400000aa4e,
+ 0xaa500000aa5a,
+ 0xaa600000aa77,
+ 0xaa7a0000aac3,
+ 0xaadb0000aade,
+ 0xaae00000aaf0,
+ 0xaaf20000aaf7,
+ 0xab010000ab07,
+ 0xab090000ab0f,
+ 0xab110000ab17,
+ 0xab200000ab27,
+ 0xab280000ab2f,
+ 0xab300000ab5b,
+ 0xab600000ab6a,
+ 0xabc00000abeb,
+ 0xabec0000abee,
+ 0xabf00000abfa,
+ 0xac000000d7a4,
+ 0xfa0e0000fa10,
+ 0xfa110000fa12,
+ 0xfa130000fa15,
+ 0xfa1f0000fa20,
+ 0xfa210000fa22,
+ 0xfa230000fa25,
+ 0xfa270000fa2a,
+ 0xfb1e0000fb1f,
+ 0xfe200000fe30,
+ 0xfe730000fe74,
+ 0x100000001000c,
+ 0x1000d00010027,
+ 0x100280001003b,
+ 0x1003c0001003e,
+ 0x1003f0001004e,
+ 0x100500001005e,
+ 0x10080000100fb,
+ 0x101fd000101fe,
+ 0x102800001029d,
+ 0x102a0000102d1,
+ 0x102e0000102e1,
+ 0x1030000010320,
+ 0x1032d00010341,
+ 0x103420001034a,
+ 0x103500001037b,
+ 0x103800001039e,
+ 0x103a0000103c4,
+ 0x103c8000103d0,
+ 0x104280001049e,
+ 0x104a0000104aa,
+ 0x104d8000104fc,
+ 0x1050000010528,
+ 0x1053000010564,
+ 0x1060000010737,
+ 0x1074000010756,
+ 0x1076000010768,
+ 0x1080000010806,
+ 0x1080800010809,
+ 0x1080a00010836,
+ 0x1083700010839,
+ 0x1083c0001083d,
+ 0x1083f00010856,
+ 0x1086000010877,
+ 0x108800001089f,
+ 0x108e0000108f3,
+ 0x108f4000108f6,
+ 0x1090000010916,
+ 0x109200001093a,
+ 0x10980000109b8,
+ 0x109be000109c0,
+ 0x10a0000010a04,
+ 0x10a0500010a07,
+ 0x10a0c00010a14,
+ 0x10a1500010a18,
+ 0x10a1900010a36,
+ 0x10a3800010a3b,
+ 0x10a3f00010a40,
+ 0x10a6000010a7d,
+ 0x10a8000010a9d,
+ 0x10ac000010ac8,
+ 0x10ac900010ae7,
+ 0x10b0000010b36,
+ 0x10b4000010b56,
+ 0x10b6000010b73,
+ 0x10b8000010b92,
+ 0x10c0000010c49,
+ 0x10cc000010cf3,
+ 0x10d0000010d28,
+ 0x10d3000010d3a,
+ 0x10e8000010eaa,
+ 0x10eab00010ead,
+ 0x10eb000010eb2,
+ 0x10f0000010f1d,
+ 0x10f2700010f28,
+ 0x10f3000010f51,
+ 0x10fb000010fc5,
+ 0x10fe000010ff7,
+ 0x1100000011047,
+ 0x1106600011070,
+ 0x1107f000110bb,
+ 0x110d0000110e9,
+ 0x110f0000110fa,
+ 0x1110000011135,
+ 0x1113600011140,
+ 0x1114400011148,
+ 0x1115000011174,
+ 0x1117600011177,
+ 0x11180000111c5,
+ 0x111c9000111cd,
+ 0x111ce000111db,
+ 0x111dc000111dd,
+ 0x1120000011212,
+ 0x1121300011238,
+ 0x1123e0001123f,
+ 0x1128000011287,
+ 0x1128800011289,
+ 0x1128a0001128e,
+ 0x1128f0001129e,
+ 0x1129f000112a9,
+ 0x112b0000112eb,
+ 0x112f0000112fa,
+ 0x1130000011304,
+ 0x113050001130d,
+ 0x1130f00011311,
+ 0x1131300011329,
+ 0x1132a00011331,
+ 0x1133200011334,
+ 0x113350001133a,
+ 0x1133b00011345,
+ 0x1134700011349,
+ 0x1134b0001134e,
+ 0x1135000011351,
+ 0x1135700011358,
+ 0x1135d00011364,
+ 0x113660001136d,
+ 0x1137000011375,
+ 0x114000001144b,
+ 0x114500001145a,
+ 0x1145e00011462,
+ 0x11480000114c6,
+ 0x114c7000114c8,
+ 0x114d0000114da,
+ 0x11580000115b6,
+ 0x115b8000115c1,
+ 0x115d8000115de,
+ 0x1160000011641,
+ 0x1164400011645,
+ 0x116500001165a,
+ 0x11680000116b9,
+ 0x116c0000116ca,
+ 0x117000001171b,
+ 0x1171d0001172c,
+ 0x117300001173a,
+ 0x118000001183b,
+ 0x118c0000118ea,
+ 0x118ff00011907,
+ 0x119090001190a,
+ 0x1190c00011914,
+ 0x1191500011917,
+ 0x1191800011936,
+ 0x1193700011939,
+ 0x1193b00011944,
+ 0x119500001195a,
+ 0x119a0000119a8,
+ 0x119aa000119d8,
+ 0x119da000119e2,
+ 0x119e3000119e5,
+ 0x11a0000011a3f,
+ 0x11a4700011a48,
+ 0x11a5000011a9a,
+ 0x11a9d00011a9e,
+ 0x11ac000011af9,
+ 0x11c0000011c09,
+ 0x11c0a00011c37,
+ 0x11c3800011c41,
+ 0x11c5000011c5a,
+ 0x11c7200011c90,
+ 0x11c9200011ca8,
+ 0x11ca900011cb7,
+ 0x11d0000011d07,
+ 0x11d0800011d0a,
+ 0x11d0b00011d37,
+ 0x11d3a00011d3b,
+ 0x11d3c00011d3e,
+ 0x11d3f00011d48,
+ 0x11d5000011d5a,
+ 0x11d6000011d66,
+ 0x11d6700011d69,
+ 0x11d6a00011d8f,
+ 0x11d9000011d92,
+ 0x11d9300011d99,
+ 0x11da000011daa,
+ 0x11ee000011ef7,
+ 0x11fb000011fb1,
+ 0x120000001239a,
+ 0x1248000012544,
+ 0x130000001342f,
+ 0x1440000014647,
+ 0x1680000016a39,
+ 0x16a4000016a5f,
+ 0x16a6000016a6a,
+ 0x16ad000016aee,
+ 0x16af000016af5,
+ 0x16b0000016b37,
+ 0x16b4000016b44,
+ 0x16b5000016b5a,
+ 0x16b6300016b78,
+ 0x16b7d00016b90,
+ 0x16e6000016e80,
+ 0x16f0000016f4b,
+ 0x16f4f00016f88,
+ 0x16f8f00016fa0,
+ 0x16fe000016fe2,
+ 0x16fe300016fe5,
+ 0x16ff000016ff2,
+ 0x17000000187f8,
+ 0x1880000018cd6,
+ 0x18d0000018d09,
+ 0x1b0000001b11f,
+ 0x1b1500001b153,
+ 0x1b1640001b168,
+ 0x1b1700001b2fc,
+ 0x1bc000001bc6b,
+ 0x1bc700001bc7d,
+ 0x1bc800001bc89,
+ 0x1bc900001bc9a,
+ 0x1bc9d0001bc9f,
+ 0x1da000001da37,
+ 0x1da3b0001da6d,
+ 0x1da750001da76,
+ 0x1da840001da85,
+ 0x1da9b0001daa0,
+ 0x1daa10001dab0,
+ 0x1e0000001e007,
+ 0x1e0080001e019,
+ 0x1e01b0001e022,
+ 0x1e0230001e025,
+ 0x1e0260001e02b,
+ 0x1e1000001e12d,
+ 0x1e1300001e13e,
+ 0x1e1400001e14a,
+ 0x1e14e0001e14f,
+ 0x1e2c00001e2fa,
+ 0x1e8000001e8c5,
+ 0x1e8d00001e8d7,
+ 0x1e9220001e94c,
+ 0x1e9500001e95a,
+ 0x1fbf00001fbfa,
+ 0x200000002a6de,
+ 0x2a7000002b735,
+ 0x2b7400002b81e,
+ 0x2b8200002cea2,
+ 0x2ceb00002ebe1,
+ 0x300000003134b,
+ ),
+ 'CONTEXTJ': (
+ 0x200c0000200e,
+ ),
+ 'CONTEXTO': (
+ 0xb7000000b8,
+ 0x37500000376,
+ 0x5f3000005f5,
+ 0x6600000066a,
+ 0x6f0000006fa,
+ 0x30fb000030fc,
+ ),
+}
diff --git a/third_party/python/idna/idna/intranges.py b/third_party/python/idna/idna/intranges.py
new file mode 100644
index 0000000000..fa8a735662
--- /dev/null
+++ b/third_party/python/idna/idna/intranges.py
@@ -0,0 +1,53 @@
+"""
+Given a list of integers, made up of (hopefully) a small number of long runs
+of consecutive integers, compute a representation of the form
+((start1, end1), (start2, end2) ...). Then answer the question "was x present
+in the original list?" in time O(log(# runs)).
+"""
+
+import bisect
+
+def intranges_from_list(list_):
+ """Represent a list of integers as a sequence of ranges:
+ ((start_0, end_0), (start_1, end_1), ...), such that the original
+ integers are exactly those x such that start_i <= x < end_i for some i.
+
+ Ranges are encoded as single integers (start << 32 | end), not as tuples.
+ """
+
+ sorted_list = sorted(list_)
+ ranges = []
+ last_write = -1
+ for i in range(len(sorted_list)):
+ if i+1 < len(sorted_list):
+ if sorted_list[i] == sorted_list[i+1]-1:
+ continue
+ current_range = sorted_list[last_write+1:i+1]
+ ranges.append(_encode_range(current_range[0], current_range[-1] + 1))
+ last_write = i
+
+ return tuple(ranges)
+
+def _encode_range(start, end):
+ return (start << 32) | end
+
+def _decode_range(r):
+ return (r >> 32), (r & ((1 << 32) - 1))
+
+
+def intranges_contain(int_, ranges):
+ """Determine if `int_` falls into one of the ranges in `ranges`."""
+ tuple_ = _encode_range(int_, 0)
+ pos = bisect.bisect_left(ranges, tuple_)
+ # we could be immediately ahead of a tuple (start, end)
+ # with start < int_ <= end
+ if pos > 0:
+ left, right = _decode_range(ranges[pos-1])
+ if left <= int_ < right:
+ return True
+ # or we could be immediately behind a tuple (int_, end)
+ if pos < len(ranges):
+ left, _ = _decode_range(ranges[pos])
+ if left == int_:
+ return True
+ return False
diff --git a/third_party/python/idna/idna/package_data.py b/third_party/python/idna/idna/package_data.py
new file mode 100644
index 0000000000..ce1c521d23
--- /dev/null
+++ b/third_party/python/idna/idna/package_data.py
@@ -0,0 +1,2 @@
+__version__ = '2.10'
+
diff --git a/third_party/python/idna/idna/uts46data.py b/third_party/python/idna/idna/uts46data.py
new file mode 100644
index 0000000000..3766dd49f6
--- /dev/null
+++ b/third_party/python/idna/idna/uts46data.py
@@ -0,0 +1,8357 @@
+# This file is automatically generated by tools/idna-data
+# vim: set fileencoding=utf-8 :
+
+"""IDNA Mapping Table from UTS46."""
+
+
+__version__ = "13.0.0"
+def _seg_0():
+ return [
+ (0x0, '3'),
+ (0x1, '3'),
+ (0x2, '3'),
+ (0x3, '3'),
+ (0x4, '3'),
+ (0x5, '3'),
+ (0x6, '3'),
+ (0x7, '3'),
+ (0x8, '3'),
+ (0x9, '3'),
+ (0xA, '3'),
+ (0xB, '3'),
+ (0xC, '3'),
+ (0xD, '3'),
+ (0xE, '3'),
+ (0xF, '3'),
+ (0x10, '3'),
+ (0x11, '3'),
+ (0x12, '3'),
+ (0x13, '3'),
+ (0x14, '3'),
+ (0x15, '3'),
+ (0x16, '3'),
+ (0x17, '3'),
+ (0x18, '3'),
+ (0x19, '3'),
+ (0x1A, '3'),
+ (0x1B, '3'),
+ (0x1C, '3'),
+ (0x1D, '3'),
+ (0x1E, '3'),
+ (0x1F, '3'),
+ (0x20, '3'),
+ (0x21, '3'),
+ (0x22, '3'),
+ (0x23, '3'),
+ (0x24, '3'),
+ (0x25, '3'),
+ (0x26, '3'),
+ (0x27, '3'),
+ (0x28, '3'),
+ (0x29, '3'),
+ (0x2A, '3'),
+ (0x2B, '3'),
+ (0x2C, '3'),
+ (0x2D, 'V'),
+ (0x2E, 'V'),
+ (0x2F, '3'),
+ (0x30, 'V'),
+ (0x31, 'V'),
+ (0x32, 'V'),
+ (0x33, 'V'),
+ (0x34, 'V'),
+ (0x35, 'V'),
+ (0x36, 'V'),
+ (0x37, 'V'),
+ (0x38, 'V'),
+ (0x39, 'V'),
+ (0x3A, '3'),
+ (0x3B, '3'),
+ (0x3C, '3'),
+ (0x3D, '3'),
+ (0x3E, '3'),
+ (0x3F, '3'),
+ (0x40, '3'),
+ (0x41, 'M', u'a'),
+ (0x42, 'M', u'b'),
+ (0x43, 'M', u'c'),
+ (0x44, 'M', u'd'),
+ (0x45, 'M', u'e'),
+ (0x46, 'M', u'f'),
+ (0x47, 'M', u'g'),
+ (0x48, 'M', u'h'),
+ (0x49, 'M', u'i'),
+ (0x4A, 'M', u'j'),
+ (0x4B, 'M', u'k'),
+ (0x4C, 'M', u'l'),
+ (0x4D, 'M', u'm'),
+ (0x4E, 'M', u'n'),
+ (0x4F, 'M', u'o'),
+ (0x50, 'M', u'p'),
+ (0x51, 'M', u'q'),
+ (0x52, 'M', u'r'),
+ (0x53, 'M', u's'),
+ (0x54, 'M', u't'),
+ (0x55, 'M', u'u'),
+ (0x56, 'M', u'v'),
+ (0x57, 'M', u'w'),
+ (0x58, 'M', u'x'),
+ (0x59, 'M', u'y'),
+ (0x5A, 'M', u'z'),
+ (0x5B, '3'),
+ (0x5C, '3'),
+ (0x5D, '3'),
+ (0x5E, '3'),
+ (0x5F, '3'),
+ (0x60, '3'),
+ (0x61, 'V'),
+ (0x62, 'V'),
+ (0x63, 'V'),
+ ]
+
+def _seg_1():
+ return [
+ (0x64, 'V'),
+ (0x65, 'V'),
+ (0x66, 'V'),
+ (0x67, 'V'),
+ (0x68, 'V'),
+ (0x69, 'V'),
+ (0x6A, 'V'),
+ (0x6B, 'V'),
+ (0x6C, 'V'),
+ (0x6D, 'V'),
+ (0x6E, 'V'),
+ (0x6F, 'V'),
+ (0x70, 'V'),
+ (0x71, 'V'),
+ (0x72, 'V'),
+ (0x73, 'V'),
+ (0x74, 'V'),
+ (0x75, 'V'),
+ (0x76, 'V'),
+ (0x77, 'V'),
+ (0x78, 'V'),
+ (0x79, 'V'),
+ (0x7A, 'V'),
+ (0x7B, '3'),
+ (0x7C, '3'),
+ (0x7D, '3'),
+ (0x7E, '3'),
+ (0x7F, '3'),
+ (0x80, 'X'),
+ (0x81, 'X'),
+ (0x82, 'X'),
+ (0x83, 'X'),
+ (0x84, 'X'),
+ (0x85, 'X'),
+ (0x86, 'X'),
+ (0x87, 'X'),
+ (0x88, 'X'),
+ (0x89, 'X'),
+ (0x8A, 'X'),
+ (0x8B, 'X'),
+ (0x8C, 'X'),
+ (0x8D, 'X'),
+ (0x8E, 'X'),
+ (0x8F, 'X'),
+ (0x90, 'X'),
+ (0x91, 'X'),
+ (0x92, 'X'),
+ (0x93, 'X'),
+ (0x94, 'X'),
+ (0x95, 'X'),
+ (0x96, 'X'),
+ (0x97, 'X'),
+ (0x98, 'X'),
+ (0x99, 'X'),
+ (0x9A, 'X'),
+ (0x9B, 'X'),
+ (0x9C, 'X'),
+ (0x9D, 'X'),
+ (0x9E, 'X'),
+ (0x9F, 'X'),
+ (0xA0, '3', u' '),
+ (0xA1, 'V'),
+ (0xA2, 'V'),
+ (0xA3, 'V'),
+ (0xA4, 'V'),
+ (0xA5, 'V'),
+ (0xA6, 'V'),
+ (0xA7, 'V'),
+ (0xA8, '3', u' ̈'),
+ (0xA9, 'V'),
+ (0xAA, 'M', u'a'),
+ (0xAB, 'V'),
+ (0xAC, 'V'),
+ (0xAD, 'I'),
+ (0xAE, 'V'),
+ (0xAF, '3', u' ̄'),
+ (0xB0, 'V'),
+ (0xB1, 'V'),
+ (0xB2, 'M', u'2'),
+ (0xB3, 'M', u'3'),
+ (0xB4, '3', u' ́'),
+ (0xB5, 'M', u'μ'),
+ (0xB6, 'V'),
+ (0xB7, 'V'),
+ (0xB8, '3', u' ̧'),
+ (0xB9, 'M', u'1'),
+ (0xBA, 'M', u'o'),
+ (0xBB, 'V'),
+ (0xBC, 'M', u'1⁄4'),
+ (0xBD, 'M', u'1⁄2'),
+ (0xBE, 'M', u'3⁄4'),
+ (0xBF, 'V'),
+ (0xC0, 'M', u'à'),
+ (0xC1, 'M', u'á'),
+ (0xC2, 'M', u'â'),
+ (0xC3, 'M', u'ã'),
+ (0xC4, 'M', u'ä'),
+ (0xC5, 'M', u'å'),
+ (0xC6, 'M', u'æ'),
+ (0xC7, 'M', u'ç'),
+ ]
+
+def _seg_2():
+ return [
+ (0xC8, 'M', u'è'),
+ (0xC9, 'M', u'é'),
+ (0xCA, 'M', u'ê'),
+ (0xCB, 'M', u'ë'),
+ (0xCC, 'M', u'ì'),
+ (0xCD, 'M', u'í'),
+ (0xCE, 'M', u'î'),
+ (0xCF, 'M', u'ï'),
+ (0xD0, 'M', u'ð'),
+ (0xD1, 'M', u'ñ'),
+ (0xD2, 'M', u'ò'),
+ (0xD3, 'M', u'ó'),
+ (0xD4, 'M', u'ô'),
+ (0xD5, 'M', u'õ'),
+ (0xD6, 'M', u'ö'),
+ (0xD7, 'V'),
+ (0xD8, 'M', u'ø'),
+ (0xD9, 'M', u'ù'),
+ (0xDA, 'M', u'ú'),
+ (0xDB, 'M', u'û'),
+ (0xDC, 'M', u'ü'),
+ (0xDD, 'M', u'ý'),
+ (0xDE, 'M', u'þ'),
+ (0xDF, 'D', u'ss'),
+ (0xE0, 'V'),
+ (0xE1, 'V'),
+ (0xE2, 'V'),
+ (0xE3, 'V'),
+ (0xE4, 'V'),
+ (0xE5, 'V'),
+ (0xE6, 'V'),
+ (0xE7, 'V'),
+ (0xE8, 'V'),
+ (0xE9, 'V'),
+ (0xEA, 'V'),
+ (0xEB, 'V'),
+ (0xEC, 'V'),
+ (0xED, 'V'),
+ (0xEE, 'V'),
+ (0xEF, 'V'),
+ (0xF0, 'V'),
+ (0xF1, 'V'),
+ (0xF2, 'V'),
+ (0xF3, 'V'),
+ (0xF4, 'V'),
+ (0xF5, 'V'),
+ (0xF6, 'V'),
+ (0xF7, 'V'),
+ (0xF8, 'V'),
+ (0xF9, 'V'),
+ (0xFA, 'V'),
+ (0xFB, 'V'),
+ (0xFC, 'V'),
+ (0xFD, 'V'),
+ (0xFE, 'V'),
+ (0xFF, 'V'),
+ (0x100, 'M', u'ā'),
+ (0x101, 'V'),
+ (0x102, 'M', u'ă'),
+ (0x103, 'V'),
+ (0x104, 'M', u'ą'),
+ (0x105, 'V'),
+ (0x106, 'M', u'ć'),
+ (0x107, 'V'),
+ (0x108, 'M', u'ĉ'),
+ (0x109, 'V'),
+ (0x10A, 'M', u'ċ'),
+ (0x10B, 'V'),
+ (0x10C, 'M', u'č'),
+ (0x10D, 'V'),
+ (0x10E, 'M', u'ď'),
+ (0x10F, 'V'),
+ (0x110, 'M', u'đ'),
+ (0x111, 'V'),
+ (0x112, 'M', u'ē'),
+ (0x113, 'V'),
+ (0x114, 'M', u'ĕ'),
+ (0x115, 'V'),
+ (0x116, 'M', u'ė'),
+ (0x117, 'V'),
+ (0x118, 'M', u'ę'),
+ (0x119, 'V'),
+ (0x11A, 'M', u'ě'),
+ (0x11B, 'V'),
+ (0x11C, 'M', u'ĝ'),
+ (0x11D, 'V'),
+ (0x11E, 'M', u'ğ'),
+ (0x11F, 'V'),
+ (0x120, 'M', u'ġ'),
+ (0x121, 'V'),
+ (0x122, 'M', u'ģ'),
+ (0x123, 'V'),
+ (0x124, 'M', u'ĥ'),
+ (0x125, 'V'),
+ (0x126, 'M', u'ħ'),
+ (0x127, 'V'),
+ (0x128, 'M', u'ĩ'),
+ (0x129, 'V'),
+ (0x12A, 'M', u'ī'),
+ (0x12B, 'V'),
+ ]
+
+def _seg_3():
+ return [
+ (0x12C, 'M', u'ĭ'),
+ (0x12D, 'V'),
+ (0x12E, 'M', u'į'),
+ (0x12F, 'V'),
+ (0x130, 'M', u'i̇'),
+ (0x131, 'V'),
+ (0x132, 'M', u'ij'),
+ (0x134, 'M', u'ĵ'),
+ (0x135, 'V'),
+ (0x136, 'M', u'ķ'),
+ (0x137, 'V'),
+ (0x139, 'M', u'ĺ'),
+ (0x13A, 'V'),
+ (0x13B, 'M', u'ļ'),
+ (0x13C, 'V'),
+ (0x13D, 'M', u'ľ'),
+ (0x13E, 'V'),
+ (0x13F, 'M', u'l·'),
+ (0x141, 'M', u'ł'),
+ (0x142, 'V'),
+ (0x143, 'M', u'ń'),
+ (0x144, 'V'),
+ (0x145, 'M', u'ņ'),
+ (0x146, 'V'),
+ (0x147, 'M', u'ň'),
+ (0x148, 'V'),
+ (0x149, 'M', u'ʼn'),
+ (0x14A, 'M', u'ŋ'),
+ (0x14B, 'V'),
+ (0x14C, 'M', u'ō'),
+ (0x14D, 'V'),
+ (0x14E, 'M', u'ŏ'),
+ (0x14F, 'V'),
+ (0x150, 'M', u'ő'),
+ (0x151, 'V'),
+ (0x152, 'M', u'œ'),
+ (0x153, 'V'),
+ (0x154, 'M', u'ŕ'),
+ (0x155, 'V'),
+ (0x156, 'M', u'ŗ'),
+ (0x157, 'V'),
+ (0x158, 'M', u'ř'),
+ (0x159, 'V'),
+ (0x15A, 'M', u'ś'),
+ (0x15B, 'V'),
+ (0x15C, 'M', u'ŝ'),
+ (0x15D, 'V'),
+ (0x15E, 'M', u'ş'),
+ (0x15F, 'V'),
+ (0x160, 'M', u'š'),
+ (0x161, 'V'),
+ (0x162, 'M', u'ţ'),
+ (0x163, 'V'),
+ (0x164, 'M', u'ť'),
+ (0x165, 'V'),
+ (0x166, 'M', u'ŧ'),
+ (0x167, 'V'),
+ (0x168, 'M', u'ũ'),
+ (0x169, 'V'),
+ (0x16A, 'M', u'ū'),
+ (0x16B, 'V'),
+ (0x16C, 'M', u'ŭ'),
+ (0x16D, 'V'),
+ (0x16E, 'M', u'ů'),
+ (0x16F, 'V'),
+ (0x170, 'M', u'ű'),
+ (0x171, 'V'),
+ (0x172, 'M', u'ų'),
+ (0x173, 'V'),
+ (0x174, 'M', u'ŵ'),
+ (0x175, 'V'),
+ (0x176, 'M', u'ŷ'),
+ (0x177, 'V'),
+ (0x178, 'M', u'ÿ'),
+ (0x179, 'M', u'ź'),
+ (0x17A, 'V'),
+ (0x17B, 'M', u'ż'),
+ (0x17C, 'V'),
+ (0x17D, 'M', u'ž'),
+ (0x17E, 'V'),
+ (0x17F, 'M', u's'),
+ (0x180, 'V'),
+ (0x181, 'M', u'ɓ'),
+ (0x182, 'M', u'ƃ'),
+ (0x183, 'V'),
+ (0x184, 'M', u'ƅ'),
+ (0x185, 'V'),
+ (0x186, 'M', u'ɔ'),
+ (0x187, 'M', u'ƈ'),
+ (0x188, 'V'),
+ (0x189, 'M', u'ɖ'),
+ (0x18A, 'M', u'ɗ'),
+ (0x18B, 'M', u'ƌ'),
+ (0x18C, 'V'),
+ (0x18E, 'M', u'ǝ'),
+ (0x18F, 'M', u'ə'),
+ (0x190, 'M', u'ɛ'),
+ (0x191, 'M', u'ƒ'),
+ (0x192, 'V'),
+ (0x193, 'M', u'ɠ'),
+ ]
+
+def _seg_4():
+ return [
+ (0x194, 'M', u'ɣ'),
+ (0x195, 'V'),
+ (0x196, 'M', u'ɩ'),
+ (0x197, 'M', u'ɨ'),
+ (0x198, 'M', u'ƙ'),
+ (0x199, 'V'),
+ (0x19C, 'M', u'ɯ'),
+ (0x19D, 'M', u'ɲ'),
+ (0x19E, 'V'),
+ (0x19F, 'M', u'ɵ'),
+ (0x1A0, 'M', u'ơ'),
+ (0x1A1, 'V'),
+ (0x1A2, 'M', u'ƣ'),
+ (0x1A3, 'V'),
+ (0x1A4, 'M', u'ƥ'),
+ (0x1A5, 'V'),
+ (0x1A6, 'M', u'ʀ'),
+ (0x1A7, 'M', u'ƨ'),
+ (0x1A8, 'V'),
+ (0x1A9, 'M', u'ʃ'),
+ (0x1AA, 'V'),
+ (0x1AC, 'M', u'ƭ'),
+ (0x1AD, 'V'),
+ (0x1AE, 'M', u'ʈ'),
+ (0x1AF, 'M', u'ư'),
+ (0x1B0, 'V'),
+ (0x1B1, 'M', u'ʊ'),
+ (0x1B2, 'M', u'ʋ'),
+ (0x1B3, 'M', u'ƴ'),
+ (0x1B4, 'V'),
+ (0x1B5, 'M', u'ƶ'),
+ (0x1B6, 'V'),
+ (0x1B7, 'M', u'ʒ'),
+ (0x1B8, 'M', u'ƹ'),
+ (0x1B9, 'V'),
+ (0x1BC, 'M', u'ƽ'),
+ (0x1BD, 'V'),
+ (0x1C4, 'M', u'dž'),
+ (0x1C7, 'M', u'lj'),
+ (0x1CA, 'M', u'nj'),
+ (0x1CD, 'M', u'ǎ'),
+ (0x1CE, 'V'),
+ (0x1CF, 'M', u'ǐ'),
+ (0x1D0, 'V'),
+ (0x1D1, 'M', u'ǒ'),
+ (0x1D2, 'V'),
+ (0x1D3, 'M', u'ǔ'),
+ (0x1D4, 'V'),
+ (0x1D5, 'M', u'ǖ'),
+ (0x1D6, 'V'),
+ (0x1D7, 'M', u'ǘ'),
+ (0x1D8, 'V'),
+ (0x1D9, 'M', u'ǚ'),
+ (0x1DA, 'V'),
+ (0x1DB, 'M', u'ǜ'),
+ (0x1DC, 'V'),
+ (0x1DE, 'M', u'ǟ'),
+ (0x1DF, 'V'),
+ (0x1E0, 'M', u'ǡ'),
+ (0x1E1, 'V'),
+ (0x1E2, 'M', u'ǣ'),
+ (0x1E3, 'V'),
+ (0x1E4, 'M', u'ǥ'),
+ (0x1E5, 'V'),
+ (0x1E6, 'M', u'ǧ'),
+ (0x1E7, 'V'),
+ (0x1E8, 'M', u'ǩ'),
+ (0x1E9, 'V'),
+ (0x1EA, 'M', u'ǫ'),
+ (0x1EB, 'V'),
+ (0x1EC, 'M', u'ǭ'),
+ (0x1ED, 'V'),
+ (0x1EE, 'M', u'ǯ'),
+ (0x1EF, 'V'),
+ (0x1F1, 'M', u'dz'),
+ (0x1F4, 'M', u'ǵ'),
+ (0x1F5, 'V'),
+ (0x1F6, 'M', u'ƕ'),
+ (0x1F7, 'M', u'ƿ'),
+ (0x1F8, 'M', u'ǹ'),
+ (0x1F9, 'V'),
+ (0x1FA, 'M', u'ǻ'),
+ (0x1FB, 'V'),
+ (0x1FC, 'M', u'ǽ'),
+ (0x1FD, 'V'),
+ (0x1FE, 'M', u'ǿ'),
+ (0x1FF, 'V'),
+ (0x200, 'M', u'ȁ'),
+ (0x201, 'V'),
+ (0x202, 'M', u'ȃ'),
+ (0x203, 'V'),
+ (0x204, 'M', u'ȅ'),
+ (0x205, 'V'),
+ (0x206, 'M', u'ȇ'),
+ (0x207, 'V'),
+ (0x208, 'M', u'ȉ'),
+ (0x209, 'V'),
+ (0x20A, 'M', u'ȋ'),
+ (0x20B, 'V'),
+ (0x20C, 'M', u'ȍ'),
+ ]
+
+def _seg_5():
+ return [
+ (0x20D, 'V'),
+ (0x20E, 'M', u'ȏ'),
+ (0x20F, 'V'),
+ (0x210, 'M', u'ȑ'),
+ (0x211, 'V'),
+ (0x212, 'M', u'ȓ'),
+ (0x213, 'V'),
+ (0x214, 'M', u'ȕ'),
+ (0x215, 'V'),
+ (0x216, 'M', u'ȗ'),
+ (0x217, 'V'),
+ (0x218, 'M', u'ș'),
+ (0x219, 'V'),
+ (0x21A, 'M', u'ț'),
+ (0x21B, 'V'),
+ (0x21C, 'M', u'ȝ'),
+ (0x21D, 'V'),
+ (0x21E, 'M', u'ȟ'),
+ (0x21F, 'V'),
+ (0x220, 'M', u'ƞ'),
+ (0x221, 'V'),
+ (0x222, 'M', u'ȣ'),
+ (0x223, 'V'),
+ (0x224, 'M', u'ȥ'),
+ (0x225, 'V'),
+ (0x226, 'M', u'ȧ'),
+ (0x227, 'V'),
+ (0x228, 'M', u'ȩ'),
+ (0x229, 'V'),
+ (0x22A, 'M', u'ȫ'),
+ (0x22B, 'V'),
+ (0x22C, 'M', u'ȭ'),
+ (0x22D, 'V'),
+ (0x22E, 'M', u'ȯ'),
+ (0x22F, 'V'),
+ (0x230, 'M', u'ȱ'),
+ (0x231, 'V'),
+ (0x232, 'M', u'ȳ'),
+ (0x233, 'V'),
+ (0x23A, 'M', u'ⱥ'),
+ (0x23B, 'M', u'ȼ'),
+ (0x23C, 'V'),
+ (0x23D, 'M', u'ƚ'),
+ (0x23E, 'M', u'ⱦ'),
+ (0x23F, 'V'),
+ (0x241, 'M', u'ɂ'),
+ (0x242, 'V'),
+ (0x243, 'M', u'ƀ'),
+ (0x244, 'M', u'ʉ'),
+ (0x245, 'M', u'ʌ'),
+ (0x246, 'M', u'ɇ'),
+ (0x247, 'V'),
+ (0x248, 'M', u'ɉ'),
+ (0x249, 'V'),
+ (0x24A, 'M', u'ɋ'),
+ (0x24B, 'V'),
+ (0x24C, 'M', u'ɍ'),
+ (0x24D, 'V'),
+ (0x24E, 'M', u'ɏ'),
+ (0x24F, 'V'),
+ (0x2B0, 'M', u'h'),
+ (0x2B1, 'M', u'ɦ'),
+ (0x2B2, 'M', u'j'),
+ (0x2B3, 'M', u'r'),
+ (0x2B4, 'M', u'ɹ'),
+ (0x2B5, 'M', u'ɻ'),
+ (0x2B6, 'M', u'ʁ'),
+ (0x2B7, 'M', u'w'),
+ (0x2B8, 'M', u'y'),
+ (0x2B9, 'V'),
+ (0x2D8, '3', u' ̆'),
+ (0x2D9, '3', u' ̇'),
+ (0x2DA, '3', u' ̊'),
+ (0x2DB, '3', u' ̨'),
+ (0x2DC, '3', u' ̃'),
+ (0x2DD, '3', u' ̋'),
+ (0x2DE, 'V'),
+ (0x2E0, 'M', u'ɣ'),
+ (0x2E1, 'M', u'l'),
+ (0x2E2, 'M', u's'),
+ (0x2E3, 'M', u'x'),
+ (0x2E4, 'M', u'ʕ'),
+ (0x2E5, 'V'),
+ (0x340, 'M', u'̀'),
+ (0x341, 'M', u'́'),
+ (0x342, 'V'),
+ (0x343, 'M', u'̓'),
+ (0x344, 'M', u'̈́'),
+ (0x345, 'M', u'ι'),
+ (0x346, 'V'),
+ (0x34F, 'I'),
+ (0x350, 'V'),
+ (0x370, 'M', u'ͱ'),
+ (0x371, 'V'),
+ (0x372, 'M', u'ͳ'),
+ (0x373, 'V'),
+ (0x374, 'M', u'ʹ'),
+ (0x375, 'V'),
+ (0x376, 'M', u'ͷ'),
+ (0x377, 'V'),
+ ]
+
+def _seg_6():
+ return [
+ (0x378, 'X'),
+ (0x37A, '3', u' ι'),
+ (0x37B, 'V'),
+ (0x37E, '3', u';'),
+ (0x37F, 'M', u'ϳ'),
+ (0x380, 'X'),
+ (0x384, '3', u' ́'),
+ (0x385, '3', u' ̈́'),
+ (0x386, 'M', u'ά'),
+ (0x387, 'M', u'·'),
+ (0x388, 'M', u'έ'),
+ (0x389, 'M', u'ή'),
+ (0x38A, 'M', u'ί'),
+ (0x38B, 'X'),
+ (0x38C, 'M', u'ό'),
+ (0x38D, 'X'),
+ (0x38E, 'M', u'ύ'),
+ (0x38F, 'M', u'ώ'),
+ (0x390, 'V'),
+ (0x391, 'M', u'α'),
+ (0x392, 'M', u'β'),
+ (0x393, 'M', u'γ'),
+ (0x394, 'M', u'δ'),
+ (0x395, 'M', u'ε'),
+ (0x396, 'M', u'ζ'),
+ (0x397, 'M', u'η'),
+ (0x398, 'M', u'θ'),
+ (0x399, 'M', u'ι'),
+ (0x39A, 'M', u'κ'),
+ (0x39B, 'M', u'λ'),
+ (0x39C, 'M', u'μ'),
+ (0x39D, 'M', u'ν'),
+ (0x39E, 'M', u'ξ'),
+ (0x39F, 'M', u'ο'),
+ (0x3A0, 'M', u'π'),
+ (0x3A1, 'M', u'ρ'),
+ (0x3A2, 'X'),
+ (0x3A3, 'M', u'σ'),
+ (0x3A4, 'M', u'τ'),
+ (0x3A5, 'M', u'υ'),
+ (0x3A6, 'M', u'φ'),
+ (0x3A7, 'M', u'χ'),
+ (0x3A8, 'M', u'ψ'),
+ (0x3A9, 'M', u'ω'),
+ (0x3AA, 'M', u'ϊ'),
+ (0x3AB, 'M', u'ϋ'),
+ (0x3AC, 'V'),
+ (0x3C2, 'D', u'σ'),
+ (0x3C3, 'V'),
+ (0x3CF, 'M', u'ϗ'),
+ (0x3D0, 'M', u'β'),
+ (0x3D1, 'M', u'θ'),
+ (0x3D2, 'M', u'υ'),
+ (0x3D3, 'M', u'ύ'),
+ (0x3D4, 'M', u'ϋ'),
+ (0x3D5, 'M', u'φ'),
+ (0x3D6, 'M', u'π'),
+ (0x3D7, 'V'),
+ (0x3D8, 'M', u'ϙ'),
+ (0x3D9, 'V'),
+ (0x3DA, 'M', u'ϛ'),
+ (0x3DB, 'V'),
+ (0x3DC, 'M', u'ϝ'),
+ (0x3DD, 'V'),
+ (0x3DE, 'M', u'ϟ'),
+ (0x3DF, 'V'),
+ (0x3E0, 'M', u'ϡ'),
+ (0x3E1, 'V'),
+ (0x3E2, 'M', u'ϣ'),
+ (0x3E3, 'V'),
+ (0x3E4, 'M', u'ϥ'),
+ (0x3E5, 'V'),
+ (0x3E6, 'M', u'ϧ'),
+ (0x3E7, 'V'),
+ (0x3E8, 'M', u'ϩ'),
+ (0x3E9, 'V'),
+ (0x3EA, 'M', u'ϫ'),
+ (0x3EB, 'V'),
+ (0x3EC, 'M', u'ϭ'),
+ (0x3ED, 'V'),
+ (0x3EE, 'M', u'ϯ'),
+ (0x3EF, 'V'),
+ (0x3F0, 'M', u'κ'),
+ (0x3F1, 'M', u'ρ'),
+ (0x3F2, 'M', u'σ'),
+ (0x3F3, 'V'),
+ (0x3F4, 'M', u'θ'),
+ (0x3F5, 'M', u'ε'),
+ (0x3F6, 'V'),
+ (0x3F7, 'M', u'ϸ'),
+ (0x3F8, 'V'),
+ (0x3F9, 'M', u'σ'),
+ (0x3FA, 'M', u'ϻ'),
+ (0x3FB, 'V'),
+ (0x3FD, 'M', u'ͻ'),
+ (0x3FE, 'M', u'ͼ'),
+ (0x3FF, 'M', u'ͽ'),
+ (0x400, 'M', u'ѐ'),
+ (0x401, 'M', u'ё'),
+ (0x402, 'M', u'ђ'),
+ ]
+
+def _seg_7():
+ return [
+ (0x403, 'M', u'ѓ'),
+ (0x404, 'M', u'є'),
+ (0x405, 'M', u'ѕ'),
+ (0x406, 'M', u'і'),
+ (0x407, 'M', u'ї'),
+ (0x408, 'M', u'ј'),
+ (0x409, 'M', u'љ'),
+ (0x40A, 'M', u'њ'),
+ (0x40B, 'M', u'ћ'),
+ (0x40C, 'M', u'ќ'),
+ (0x40D, 'M', u'ѝ'),
+ (0x40E, 'M', u'ў'),
+ (0x40F, 'M', u'џ'),
+ (0x410, 'M', u'а'),
+ (0x411, 'M', u'б'),
+ (0x412, 'M', u'в'),
+ (0x413, 'M', u'г'),
+ (0x414, 'M', u'д'),
+ (0x415, 'M', u'е'),
+ (0x416, 'M', u'ж'),
+ (0x417, 'M', u'з'),
+ (0x418, 'M', u'и'),
+ (0x419, 'M', u'й'),
+ (0x41A, 'M', u'к'),
+ (0x41B, 'M', u'л'),
+ (0x41C, 'M', u'м'),
+ (0x41D, 'M', u'н'),
+ (0x41E, 'M', u'о'),
+ (0x41F, 'M', u'п'),
+ (0x420, 'M', u'р'),
+ (0x421, 'M', u'с'),
+ (0x422, 'M', u'т'),
+ (0x423, 'M', u'у'),
+ (0x424, 'M', u'ф'),
+ (0x425, 'M', u'х'),
+ (0x426, 'M', u'ц'),
+ (0x427, 'M', u'ч'),
+ (0x428, 'M', u'ш'),
+ (0x429, 'M', u'щ'),
+ (0x42A, 'M', u'ъ'),
+ (0x42B, 'M', u'ы'),
+ (0x42C, 'M', u'ь'),
+ (0x42D, 'M', u'э'),
+ (0x42E, 'M', u'ю'),
+ (0x42F, 'M', u'я'),
+ (0x430, 'V'),
+ (0x460, 'M', u'ѡ'),
+ (0x461, 'V'),
+ (0x462, 'M', u'ѣ'),
+ (0x463, 'V'),
+ (0x464, 'M', u'ѥ'),
+ (0x465, 'V'),
+ (0x466, 'M', u'ѧ'),
+ (0x467, 'V'),
+ (0x468, 'M', u'ѩ'),
+ (0x469, 'V'),
+ (0x46A, 'M', u'ѫ'),
+ (0x46B, 'V'),
+ (0x46C, 'M', u'ѭ'),
+ (0x46D, 'V'),
+ (0x46E, 'M', u'ѯ'),
+ (0x46F, 'V'),
+ (0x470, 'M', u'ѱ'),
+ (0x471, 'V'),
+ (0x472, 'M', u'ѳ'),
+ (0x473, 'V'),
+ (0x474, 'M', u'ѵ'),
+ (0x475, 'V'),
+ (0x476, 'M', u'ѷ'),
+ (0x477, 'V'),
+ (0x478, 'M', u'ѹ'),
+ (0x479, 'V'),
+ (0x47A, 'M', u'ѻ'),
+ (0x47B, 'V'),
+ (0x47C, 'M', u'ѽ'),
+ (0x47D, 'V'),
+ (0x47E, 'M', u'ѿ'),
+ (0x47F, 'V'),
+ (0x480, 'M', u'ҁ'),
+ (0x481, 'V'),
+ (0x48A, 'M', u'ҋ'),
+ (0x48B, 'V'),
+ (0x48C, 'M', u'ҍ'),
+ (0x48D, 'V'),
+ (0x48E, 'M', u'ҏ'),
+ (0x48F, 'V'),
+ (0x490, 'M', u'ґ'),
+ (0x491, 'V'),
+ (0x492, 'M', u'ғ'),
+ (0x493, 'V'),
+ (0x494, 'M', u'ҕ'),
+ (0x495, 'V'),
+ (0x496, 'M', u'җ'),
+ (0x497, 'V'),
+ (0x498, 'M', u'ҙ'),
+ (0x499, 'V'),
+ (0x49A, 'M', u'қ'),
+ (0x49B, 'V'),
+ (0x49C, 'M', u'ҝ'),
+ (0x49D, 'V'),
+ ]
+
+def _seg_8():
+ return [
+ (0x49E, 'M', u'ҟ'),
+ (0x49F, 'V'),
+ (0x4A0, 'M', u'ҡ'),
+ (0x4A1, 'V'),
+ (0x4A2, 'M', u'ң'),
+ (0x4A3, 'V'),
+ (0x4A4, 'M', u'ҥ'),
+ (0x4A5, 'V'),
+ (0x4A6, 'M', u'ҧ'),
+ (0x4A7, 'V'),
+ (0x4A8, 'M', u'ҩ'),
+ (0x4A9, 'V'),
+ (0x4AA, 'M', u'ҫ'),
+ (0x4AB, 'V'),
+ (0x4AC, 'M', u'ҭ'),
+ (0x4AD, 'V'),
+ (0x4AE, 'M', u'ү'),
+ (0x4AF, 'V'),
+ (0x4B0, 'M', u'ұ'),
+ (0x4B1, 'V'),
+ (0x4B2, 'M', u'ҳ'),
+ (0x4B3, 'V'),
+ (0x4B4, 'M', u'ҵ'),
+ (0x4B5, 'V'),
+ (0x4B6, 'M', u'ҷ'),
+ (0x4B7, 'V'),
+ (0x4B8, 'M', u'ҹ'),
+ (0x4B9, 'V'),
+ (0x4BA, 'M', u'һ'),
+ (0x4BB, 'V'),
+ (0x4BC, 'M', u'ҽ'),
+ (0x4BD, 'V'),
+ (0x4BE, 'M', u'ҿ'),
+ (0x4BF, 'V'),
+ (0x4C0, 'X'),
+ (0x4C1, 'M', u'ӂ'),
+ (0x4C2, 'V'),
+ (0x4C3, 'M', u'ӄ'),
+ (0x4C4, 'V'),
+ (0x4C5, 'M', u'ӆ'),
+ (0x4C6, 'V'),
+ (0x4C7, 'M', u'ӈ'),
+ (0x4C8, 'V'),
+ (0x4C9, 'M', u'ӊ'),
+ (0x4CA, 'V'),
+ (0x4CB, 'M', u'ӌ'),
+ (0x4CC, 'V'),
+ (0x4CD, 'M', u'ӎ'),
+ (0x4CE, 'V'),
+ (0x4D0, 'M', u'ӑ'),
+ (0x4D1, 'V'),
+ (0x4D2, 'M', u'ӓ'),
+ (0x4D3, 'V'),
+ (0x4D4, 'M', u'ӕ'),
+ (0x4D5, 'V'),
+ (0x4D6, 'M', u'ӗ'),
+ (0x4D7, 'V'),
+ (0x4D8, 'M', u'ә'),
+ (0x4D9, 'V'),
+ (0x4DA, 'M', u'ӛ'),
+ (0x4DB, 'V'),
+ (0x4DC, 'M', u'ӝ'),
+ (0x4DD, 'V'),
+ (0x4DE, 'M', u'ӟ'),
+ (0x4DF, 'V'),
+ (0x4E0, 'M', u'ӡ'),
+ (0x4E1, 'V'),
+ (0x4E2, 'M', u'ӣ'),
+ (0x4E3, 'V'),
+ (0x4E4, 'M', u'ӥ'),
+ (0x4E5, 'V'),
+ (0x4E6, 'M', u'ӧ'),
+ (0x4E7, 'V'),
+ (0x4E8, 'M', u'ө'),
+ (0x4E9, 'V'),
+ (0x4EA, 'M', u'ӫ'),
+ (0x4EB, 'V'),
+ (0x4EC, 'M', u'ӭ'),
+ (0x4ED, 'V'),
+ (0x4EE, 'M', u'ӯ'),
+ (0x4EF, 'V'),
+ (0x4F0, 'M', u'ӱ'),
+ (0x4F1, 'V'),
+ (0x4F2, 'M', u'ӳ'),
+ (0x4F3, 'V'),
+ (0x4F4, 'M', u'ӵ'),
+ (0x4F5, 'V'),
+ (0x4F6, 'M', u'ӷ'),
+ (0x4F7, 'V'),
+ (0x4F8, 'M', u'ӹ'),
+ (0x4F9, 'V'),
+ (0x4FA, 'M', u'ӻ'),
+ (0x4FB, 'V'),
+ (0x4FC, 'M', u'ӽ'),
+ (0x4FD, 'V'),
+ (0x4FE, 'M', u'ӿ'),
+ (0x4FF, 'V'),
+ (0x500, 'M', u'ԁ'),
+ (0x501, 'V'),
+ (0x502, 'M', u'ԃ'),
+ ]
+
+def _seg_9():
+ return [
+ (0x503, 'V'),
+ (0x504, 'M', u'ԅ'),
+ (0x505, 'V'),
+ (0x506, 'M', u'ԇ'),
+ (0x507, 'V'),
+ (0x508, 'M', u'ԉ'),
+ (0x509, 'V'),
+ (0x50A, 'M', u'ԋ'),
+ (0x50B, 'V'),
+ (0x50C, 'M', u'ԍ'),
+ (0x50D, 'V'),
+ (0x50E, 'M', u'ԏ'),
+ (0x50F, 'V'),
+ (0x510, 'M', u'ԑ'),
+ (0x511, 'V'),
+ (0x512, 'M', u'ԓ'),
+ (0x513, 'V'),
+ (0x514, 'M', u'ԕ'),
+ (0x515, 'V'),
+ (0x516, 'M', u'ԗ'),
+ (0x517, 'V'),
+ (0x518, 'M', u'ԙ'),
+ (0x519, 'V'),
+ (0x51A, 'M', u'ԛ'),
+ (0x51B, 'V'),
+ (0x51C, 'M', u'ԝ'),
+ (0x51D, 'V'),
+ (0x51E, 'M', u'ԟ'),
+ (0x51F, 'V'),
+ (0x520, 'M', u'ԡ'),
+ (0x521, 'V'),
+ (0x522, 'M', u'ԣ'),
+ (0x523, 'V'),
+ (0x524, 'M', u'ԥ'),
+ (0x525, 'V'),
+ (0x526, 'M', u'ԧ'),
+ (0x527, 'V'),
+ (0x528, 'M', u'ԩ'),
+ (0x529, 'V'),
+ (0x52A, 'M', u'ԫ'),
+ (0x52B, 'V'),
+ (0x52C, 'M', u'ԭ'),
+ (0x52D, 'V'),
+ (0x52E, 'M', u'ԯ'),
+ (0x52F, 'V'),
+ (0x530, 'X'),
+ (0x531, 'M', u'ա'),
+ (0x532, 'M', u'բ'),
+ (0x533, 'M', u'գ'),
+ (0x534, 'M', u'դ'),
+ (0x535, 'M', u'ե'),
+ (0x536, 'M', u'զ'),
+ (0x537, 'M', u'է'),
+ (0x538, 'M', u'ը'),
+ (0x539, 'M', u'թ'),
+ (0x53A, 'M', u'ժ'),
+ (0x53B, 'M', u'ի'),
+ (0x53C, 'M', u'լ'),
+ (0x53D, 'M', u'խ'),
+ (0x53E, 'M', u'ծ'),
+ (0x53F, 'M', u'կ'),
+ (0x540, 'M', u'հ'),
+ (0x541, 'M', u'ձ'),
+ (0x542, 'M', u'ղ'),
+ (0x543, 'M', u'ճ'),
+ (0x544, 'M', u'մ'),
+ (0x545, 'M', u'յ'),
+ (0x546, 'M', u'ն'),
+ (0x547, 'M', u'շ'),
+ (0x548, 'M', u'ո'),
+ (0x549, 'M', u'չ'),
+ (0x54A, 'M', u'պ'),
+ (0x54B, 'M', u'ջ'),
+ (0x54C, 'M', u'ռ'),
+ (0x54D, 'M', u'ս'),
+ (0x54E, 'M', u'վ'),
+ (0x54F, 'M', u'տ'),
+ (0x550, 'M', u'ր'),
+ (0x551, 'M', u'ց'),
+ (0x552, 'M', u'ւ'),
+ (0x553, 'M', u'փ'),
+ (0x554, 'M', u'ք'),
+ (0x555, 'M', u'օ'),
+ (0x556, 'M', u'ֆ'),
+ (0x557, 'X'),
+ (0x559, 'V'),
+ (0x587, 'M', u'եւ'),
+ (0x588, 'V'),
+ (0x58B, 'X'),
+ (0x58D, 'V'),
+ (0x590, 'X'),
+ (0x591, 'V'),
+ (0x5C8, 'X'),
+ (0x5D0, 'V'),
+ (0x5EB, 'X'),
+ (0x5EF, 'V'),
+ (0x5F5, 'X'),
+ (0x606, 'V'),
+ (0x61C, 'X'),
+ (0x61E, 'V'),
+ ]
+
+def _seg_10():
+ return [
+ (0x675, 'M', u'اٴ'),
+ (0x676, 'M', u'وٴ'),
+ (0x677, 'M', u'ۇٴ'),
+ (0x678, 'M', u'يٴ'),
+ (0x679, 'V'),
+ (0x6DD, 'X'),
+ (0x6DE, 'V'),
+ (0x70E, 'X'),
+ (0x710, 'V'),
+ (0x74B, 'X'),
+ (0x74D, 'V'),
+ (0x7B2, 'X'),
+ (0x7C0, 'V'),
+ (0x7FB, 'X'),
+ (0x7FD, 'V'),
+ (0x82E, 'X'),
+ (0x830, 'V'),
+ (0x83F, 'X'),
+ (0x840, 'V'),
+ (0x85C, 'X'),
+ (0x85E, 'V'),
+ (0x85F, 'X'),
+ (0x860, 'V'),
+ (0x86B, 'X'),
+ (0x8A0, 'V'),
+ (0x8B5, 'X'),
+ (0x8B6, 'V'),
+ (0x8C8, 'X'),
+ (0x8D3, 'V'),
+ (0x8E2, 'X'),
+ (0x8E3, 'V'),
+ (0x958, 'M', u'क़'),
+ (0x959, 'M', u'ख़'),
+ (0x95A, 'M', u'ग़'),
+ (0x95B, 'M', u'ज़'),
+ (0x95C, 'M', u'ड़'),
+ (0x95D, 'M', u'ढ़'),
+ (0x95E, 'M', u'फ़'),
+ (0x95F, 'M', u'य़'),
+ (0x960, 'V'),
+ (0x984, 'X'),
+ (0x985, 'V'),
+ (0x98D, 'X'),
+ (0x98F, 'V'),
+ (0x991, 'X'),
+ (0x993, 'V'),
+ (0x9A9, 'X'),
+ (0x9AA, 'V'),
+ (0x9B1, 'X'),
+ (0x9B2, 'V'),
+ (0x9B3, 'X'),
+ (0x9B6, 'V'),
+ (0x9BA, 'X'),
+ (0x9BC, 'V'),
+ (0x9C5, 'X'),
+ (0x9C7, 'V'),
+ (0x9C9, 'X'),
+ (0x9CB, 'V'),
+ (0x9CF, 'X'),
+ (0x9D7, 'V'),
+ (0x9D8, 'X'),
+ (0x9DC, 'M', u'ড়'),
+ (0x9DD, 'M', u'ঢ়'),
+ (0x9DE, 'X'),
+ (0x9DF, 'M', u'য়'),
+ (0x9E0, 'V'),
+ (0x9E4, 'X'),
+ (0x9E6, 'V'),
+ (0x9FF, 'X'),
+ (0xA01, 'V'),
+ (0xA04, 'X'),
+ (0xA05, 'V'),
+ (0xA0B, 'X'),
+ (0xA0F, 'V'),
+ (0xA11, 'X'),
+ (0xA13, 'V'),
+ (0xA29, 'X'),
+ (0xA2A, 'V'),
+ (0xA31, 'X'),
+ (0xA32, 'V'),
+ (0xA33, 'M', u'ਲ਼'),
+ (0xA34, 'X'),
+ (0xA35, 'V'),
+ (0xA36, 'M', u'ਸ਼'),
+ (0xA37, 'X'),
+ (0xA38, 'V'),
+ (0xA3A, 'X'),
+ (0xA3C, 'V'),
+ (0xA3D, 'X'),
+ (0xA3E, 'V'),
+ (0xA43, 'X'),
+ (0xA47, 'V'),
+ (0xA49, 'X'),
+ (0xA4B, 'V'),
+ (0xA4E, 'X'),
+ (0xA51, 'V'),
+ (0xA52, 'X'),
+ (0xA59, 'M', u'ਖ਼'),
+ (0xA5A, 'M', u'ਗ਼'),
+ (0xA5B, 'M', u'ਜ਼'),
+ ]
+
+def _seg_11():
+ return [
+ (0xA5C, 'V'),
+ (0xA5D, 'X'),
+ (0xA5E, 'M', u'ਫ਼'),
+ (0xA5F, 'X'),
+ (0xA66, 'V'),
+ (0xA77, 'X'),
+ (0xA81, 'V'),
+ (0xA84, 'X'),
+ (0xA85, 'V'),
+ (0xA8E, 'X'),
+ (0xA8F, 'V'),
+ (0xA92, 'X'),
+ (0xA93, 'V'),
+ (0xAA9, 'X'),
+ (0xAAA, 'V'),
+ (0xAB1, 'X'),
+ (0xAB2, 'V'),
+ (0xAB4, 'X'),
+ (0xAB5, 'V'),
+ (0xABA, 'X'),
+ (0xABC, 'V'),
+ (0xAC6, 'X'),
+ (0xAC7, 'V'),
+ (0xACA, 'X'),
+ (0xACB, 'V'),
+ (0xACE, 'X'),
+ (0xAD0, 'V'),
+ (0xAD1, 'X'),
+ (0xAE0, 'V'),
+ (0xAE4, 'X'),
+ (0xAE6, 'V'),
+ (0xAF2, 'X'),
+ (0xAF9, 'V'),
+ (0xB00, 'X'),
+ (0xB01, 'V'),
+ (0xB04, 'X'),
+ (0xB05, 'V'),
+ (0xB0D, 'X'),
+ (0xB0F, 'V'),
+ (0xB11, 'X'),
+ (0xB13, 'V'),
+ (0xB29, 'X'),
+ (0xB2A, 'V'),
+ (0xB31, 'X'),
+ (0xB32, 'V'),
+ (0xB34, 'X'),
+ (0xB35, 'V'),
+ (0xB3A, 'X'),
+ (0xB3C, 'V'),
+ (0xB45, 'X'),
+ (0xB47, 'V'),
+ (0xB49, 'X'),
+ (0xB4B, 'V'),
+ (0xB4E, 'X'),
+ (0xB55, 'V'),
+ (0xB58, 'X'),
+ (0xB5C, 'M', u'ଡ଼'),
+ (0xB5D, 'M', u'ଢ଼'),
+ (0xB5E, 'X'),
+ (0xB5F, 'V'),
+ (0xB64, 'X'),
+ (0xB66, 'V'),
+ (0xB78, 'X'),
+ (0xB82, 'V'),
+ (0xB84, 'X'),
+ (0xB85, 'V'),
+ (0xB8B, 'X'),
+ (0xB8E, 'V'),
+ (0xB91, 'X'),
+ (0xB92, 'V'),
+ (0xB96, 'X'),
+ (0xB99, 'V'),
+ (0xB9B, 'X'),
+ (0xB9C, 'V'),
+ (0xB9D, 'X'),
+ (0xB9E, 'V'),
+ (0xBA0, 'X'),
+ (0xBA3, 'V'),
+ (0xBA5, 'X'),
+ (0xBA8, 'V'),
+ (0xBAB, 'X'),
+ (0xBAE, 'V'),
+ (0xBBA, 'X'),
+ (0xBBE, 'V'),
+ (0xBC3, 'X'),
+ (0xBC6, 'V'),
+ (0xBC9, 'X'),
+ (0xBCA, 'V'),
+ (0xBCE, 'X'),
+ (0xBD0, 'V'),
+ (0xBD1, 'X'),
+ (0xBD7, 'V'),
+ (0xBD8, 'X'),
+ (0xBE6, 'V'),
+ (0xBFB, 'X'),
+ (0xC00, 'V'),
+ (0xC0D, 'X'),
+ (0xC0E, 'V'),
+ (0xC11, 'X'),
+ (0xC12, 'V'),
+ ]
+
+def _seg_12():
+ return [
+ (0xC29, 'X'),
+ (0xC2A, 'V'),
+ (0xC3A, 'X'),
+ (0xC3D, 'V'),
+ (0xC45, 'X'),
+ (0xC46, 'V'),
+ (0xC49, 'X'),
+ (0xC4A, 'V'),
+ (0xC4E, 'X'),
+ (0xC55, 'V'),
+ (0xC57, 'X'),
+ (0xC58, 'V'),
+ (0xC5B, 'X'),
+ (0xC60, 'V'),
+ (0xC64, 'X'),
+ (0xC66, 'V'),
+ (0xC70, 'X'),
+ (0xC77, 'V'),
+ (0xC8D, 'X'),
+ (0xC8E, 'V'),
+ (0xC91, 'X'),
+ (0xC92, 'V'),
+ (0xCA9, 'X'),
+ (0xCAA, 'V'),
+ (0xCB4, 'X'),
+ (0xCB5, 'V'),
+ (0xCBA, 'X'),
+ (0xCBC, 'V'),
+ (0xCC5, 'X'),
+ (0xCC6, 'V'),
+ (0xCC9, 'X'),
+ (0xCCA, 'V'),
+ (0xCCE, 'X'),
+ (0xCD5, 'V'),
+ (0xCD7, 'X'),
+ (0xCDE, 'V'),
+ (0xCDF, 'X'),
+ (0xCE0, 'V'),
+ (0xCE4, 'X'),
+ (0xCE6, 'V'),
+ (0xCF0, 'X'),
+ (0xCF1, 'V'),
+ (0xCF3, 'X'),
+ (0xD00, 'V'),
+ (0xD0D, 'X'),
+ (0xD0E, 'V'),
+ (0xD11, 'X'),
+ (0xD12, 'V'),
+ (0xD45, 'X'),
+ (0xD46, 'V'),
+ (0xD49, 'X'),
+ (0xD4A, 'V'),
+ (0xD50, 'X'),
+ (0xD54, 'V'),
+ (0xD64, 'X'),
+ (0xD66, 'V'),
+ (0xD80, 'X'),
+ (0xD81, 'V'),
+ (0xD84, 'X'),
+ (0xD85, 'V'),
+ (0xD97, 'X'),
+ (0xD9A, 'V'),
+ (0xDB2, 'X'),
+ (0xDB3, 'V'),
+ (0xDBC, 'X'),
+ (0xDBD, 'V'),
+ (0xDBE, 'X'),
+ (0xDC0, 'V'),
+ (0xDC7, 'X'),
+ (0xDCA, 'V'),
+ (0xDCB, 'X'),
+ (0xDCF, 'V'),
+ (0xDD5, 'X'),
+ (0xDD6, 'V'),
+ (0xDD7, 'X'),
+ (0xDD8, 'V'),
+ (0xDE0, 'X'),
+ (0xDE6, 'V'),
+ (0xDF0, 'X'),
+ (0xDF2, 'V'),
+ (0xDF5, 'X'),
+ (0xE01, 'V'),
+ (0xE33, 'M', u'ํา'),
+ (0xE34, 'V'),
+ (0xE3B, 'X'),
+ (0xE3F, 'V'),
+ (0xE5C, 'X'),
+ (0xE81, 'V'),
+ (0xE83, 'X'),
+ (0xE84, 'V'),
+ (0xE85, 'X'),
+ (0xE86, 'V'),
+ (0xE8B, 'X'),
+ (0xE8C, 'V'),
+ (0xEA4, 'X'),
+ (0xEA5, 'V'),
+ (0xEA6, 'X'),
+ (0xEA7, 'V'),
+ (0xEB3, 'M', u'ໍາ'),
+ (0xEB4, 'V'),
+ ]
+
+def _seg_13():
+ return [
+ (0xEBE, 'X'),
+ (0xEC0, 'V'),
+ (0xEC5, 'X'),
+ (0xEC6, 'V'),
+ (0xEC7, 'X'),
+ (0xEC8, 'V'),
+ (0xECE, 'X'),
+ (0xED0, 'V'),
+ (0xEDA, 'X'),
+ (0xEDC, 'M', u'ຫນ'),
+ (0xEDD, 'M', u'ຫມ'),
+ (0xEDE, 'V'),
+ (0xEE0, 'X'),
+ (0xF00, 'V'),
+ (0xF0C, 'M', u'་'),
+ (0xF0D, 'V'),
+ (0xF43, 'M', u'གྷ'),
+ (0xF44, 'V'),
+ (0xF48, 'X'),
+ (0xF49, 'V'),
+ (0xF4D, 'M', u'ཌྷ'),
+ (0xF4E, 'V'),
+ (0xF52, 'M', u'དྷ'),
+ (0xF53, 'V'),
+ (0xF57, 'M', u'བྷ'),
+ (0xF58, 'V'),
+ (0xF5C, 'M', u'ཛྷ'),
+ (0xF5D, 'V'),
+ (0xF69, 'M', u'ཀྵ'),
+ (0xF6A, 'V'),
+ (0xF6D, 'X'),
+ (0xF71, 'V'),
+ (0xF73, 'M', u'ཱི'),
+ (0xF74, 'V'),
+ (0xF75, 'M', u'ཱུ'),
+ (0xF76, 'M', u'ྲྀ'),
+ (0xF77, 'M', u'ྲཱྀ'),
+ (0xF78, 'M', u'ླྀ'),
+ (0xF79, 'M', u'ླཱྀ'),
+ (0xF7A, 'V'),
+ (0xF81, 'M', u'ཱྀ'),
+ (0xF82, 'V'),
+ (0xF93, 'M', u'ྒྷ'),
+ (0xF94, 'V'),
+ (0xF98, 'X'),
+ (0xF99, 'V'),
+ (0xF9D, 'M', u'ྜྷ'),
+ (0xF9E, 'V'),
+ (0xFA2, 'M', u'ྡྷ'),
+ (0xFA3, 'V'),
+ (0xFA7, 'M', u'ྦྷ'),
+ (0xFA8, 'V'),
+ (0xFAC, 'M', u'ྫྷ'),
+ (0xFAD, 'V'),
+ (0xFB9, 'M', u'ྐྵ'),
+ (0xFBA, 'V'),
+ (0xFBD, 'X'),
+ (0xFBE, 'V'),
+ (0xFCD, 'X'),
+ (0xFCE, 'V'),
+ (0xFDB, 'X'),
+ (0x1000, 'V'),
+ (0x10A0, 'X'),
+ (0x10C7, 'M', u'ⴧ'),
+ (0x10C8, 'X'),
+ (0x10CD, 'M', u'ⴭ'),
+ (0x10CE, 'X'),
+ (0x10D0, 'V'),
+ (0x10FC, 'M', u'ნ'),
+ (0x10FD, 'V'),
+ (0x115F, 'X'),
+ (0x1161, 'V'),
+ (0x1249, 'X'),
+ (0x124A, 'V'),
+ (0x124E, 'X'),
+ (0x1250, 'V'),
+ (0x1257, 'X'),
+ (0x1258, 'V'),
+ (0x1259, 'X'),
+ (0x125A, 'V'),
+ (0x125E, 'X'),
+ (0x1260, 'V'),
+ (0x1289, 'X'),
+ (0x128A, 'V'),
+ (0x128E, 'X'),
+ (0x1290, 'V'),
+ (0x12B1, 'X'),
+ (0x12B2, 'V'),
+ (0x12B6, 'X'),
+ (0x12B8, 'V'),
+ (0x12BF, 'X'),
+ (0x12C0, 'V'),
+ (0x12C1, 'X'),
+ (0x12C2, 'V'),
+ (0x12C6, 'X'),
+ (0x12C8, 'V'),
+ (0x12D7, 'X'),
+ (0x12D8, 'V'),
+ (0x1311, 'X'),
+ (0x1312, 'V'),
+ ]
+
+def _seg_14():
+ return [
+ (0x1316, 'X'),
+ (0x1318, 'V'),
+ (0x135B, 'X'),
+ (0x135D, 'V'),
+ (0x137D, 'X'),
+ (0x1380, 'V'),
+ (0x139A, 'X'),
+ (0x13A0, 'V'),
+ (0x13F6, 'X'),
+ (0x13F8, 'M', u'Ᏸ'),
+ (0x13F9, 'M', u'Ᏹ'),
+ (0x13FA, 'M', u'Ᏺ'),
+ (0x13FB, 'M', u'Ᏻ'),
+ (0x13FC, 'M', u'Ᏼ'),
+ (0x13FD, 'M', u'Ᏽ'),
+ (0x13FE, 'X'),
+ (0x1400, 'V'),
+ (0x1680, 'X'),
+ (0x1681, 'V'),
+ (0x169D, 'X'),
+ (0x16A0, 'V'),
+ (0x16F9, 'X'),
+ (0x1700, 'V'),
+ (0x170D, 'X'),
+ (0x170E, 'V'),
+ (0x1715, 'X'),
+ (0x1720, 'V'),
+ (0x1737, 'X'),
+ (0x1740, 'V'),
+ (0x1754, 'X'),
+ (0x1760, 'V'),
+ (0x176D, 'X'),
+ (0x176E, 'V'),
+ (0x1771, 'X'),
+ (0x1772, 'V'),
+ (0x1774, 'X'),
+ (0x1780, 'V'),
+ (0x17B4, 'X'),
+ (0x17B6, 'V'),
+ (0x17DE, 'X'),
+ (0x17E0, 'V'),
+ (0x17EA, 'X'),
+ (0x17F0, 'V'),
+ (0x17FA, 'X'),
+ (0x1800, 'V'),
+ (0x1806, 'X'),
+ (0x1807, 'V'),
+ (0x180B, 'I'),
+ (0x180E, 'X'),
+ (0x1810, 'V'),
+ (0x181A, 'X'),
+ (0x1820, 'V'),
+ (0x1879, 'X'),
+ (0x1880, 'V'),
+ (0x18AB, 'X'),
+ (0x18B0, 'V'),
+ (0x18F6, 'X'),
+ (0x1900, 'V'),
+ (0x191F, 'X'),
+ (0x1920, 'V'),
+ (0x192C, 'X'),
+ (0x1930, 'V'),
+ (0x193C, 'X'),
+ (0x1940, 'V'),
+ (0x1941, 'X'),
+ (0x1944, 'V'),
+ (0x196E, 'X'),
+ (0x1970, 'V'),
+ (0x1975, 'X'),
+ (0x1980, 'V'),
+ (0x19AC, 'X'),
+ (0x19B0, 'V'),
+ (0x19CA, 'X'),
+ (0x19D0, 'V'),
+ (0x19DB, 'X'),
+ (0x19DE, 'V'),
+ (0x1A1C, 'X'),
+ (0x1A1E, 'V'),
+ (0x1A5F, 'X'),
+ (0x1A60, 'V'),
+ (0x1A7D, 'X'),
+ (0x1A7F, 'V'),
+ (0x1A8A, 'X'),
+ (0x1A90, 'V'),
+ (0x1A9A, 'X'),
+ (0x1AA0, 'V'),
+ (0x1AAE, 'X'),
+ (0x1AB0, 'V'),
+ (0x1AC1, 'X'),
+ (0x1B00, 'V'),
+ (0x1B4C, 'X'),
+ (0x1B50, 'V'),
+ (0x1B7D, 'X'),
+ (0x1B80, 'V'),
+ (0x1BF4, 'X'),
+ (0x1BFC, 'V'),
+ (0x1C38, 'X'),
+ (0x1C3B, 'V'),
+ (0x1C4A, 'X'),
+ (0x1C4D, 'V'),
+ ]
+
+def _seg_15():
+ return [
+ (0x1C80, 'M', u'в'),
+ (0x1C81, 'M', u'д'),
+ (0x1C82, 'M', u'о'),
+ (0x1C83, 'M', u'с'),
+ (0x1C84, 'M', u'т'),
+ (0x1C86, 'M', u'ъ'),
+ (0x1C87, 'M', u'ѣ'),
+ (0x1C88, 'M', u'ꙋ'),
+ (0x1C89, 'X'),
+ (0x1C90, 'M', u'ა'),
+ (0x1C91, 'M', u'ბ'),
+ (0x1C92, 'M', u'გ'),
+ (0x1C93, 'M', u'დ'),
+ (0x1C94, 'M', u'ე'),
+ (0x1C95, 'M', u'ვ'),
+ (0x1C96, 'M', u'ზ'),
+ (0x1C97, 'M', u'თ'),
+ (0x1C98, 'M', u'ი'),
+ (0x1C99, 'M', u'კ'),
+ (0x1C9A, 'M', u'ლ'),
+ (0x1C9B, 'M', u'მ'),
+ (0x1C9C, 'M', u'ნ'),
+ (0x1C9D, 'M', u'ო'),
+ (0x1C9E, 'M', u'პ'),
+ (0x1C9F, 'M', u'ჟ'),
+ (0x1CA0, 'M', u'რ'),
+ (0x1CA1, 'M', u'ს'),
+ (0x1CA2, 'M', u'ტ'),
+ (0x1CA3, 'M', u'უ'),
+ (0x1CA4, 'M', u'ფ'),
+ (0x1CA5, 'M', u'ქ'),
+ (0x1CA6, 'M', u'ღ'),
+ (0x1CA7, 'M', u'ყ'),
+ (0x1CA8, 'M', u'შ'),
+ (0x1CA9, 'M', u'ჩ'),
+ (0x1CAA, 'M', u'ც'),
+ (0x1CAB, 'M', u'ძ'),
+ (0x1CAC, 'M', u'წ'),
+ (0x1CAD, 'M', u'ჭ'),
+ (0x1CAE, 'M', u'ხ'),
+ (0x1CAF, 'M', u'ჯ'),
+ (0x1CB0, 'M', u'ჰ'),
+ (0x1CB1, 'M', u'ჱ'),
+ (0x1CB2, 'M', u'ჲ'),
+ (0x1CB3, 'M', u'ჳ'),
+ (0x1CB4, 'M', u'ჴ'),
+ (0x1CB5, 'M', u'ჵ'),
+ (0x1CB6, 'M', u'ჶ'),
+ (0x1CB7, 'M', u'ჷ'),
+ (0x1CB8, 'M', u'ჸ'),
+ (0x1CB9, 'M', u'ჹ'),
+ (0x1CBA, 'M', u'ჺ'),
+ (0x1CBB, 'X'),
+ (0x1CBD, 'M', u'ჽ'),
+ (0x1CBE, 'M', u'ჾ'),
+ (0x1CBF, 'M', u'ჿ'),
+ (0x1CC0, 'V'),
+ (0x1CC8, 'X'),
+ (0x1CD0, 'V'),
+ (0x1CFB, 'X'),
+ (0x1D00, 'V'),
+ (0x1D2C, 'M', u'a'),
+ (0x1D2D, 'M', u'æ'),
+ (0x1D2E, 'M', u'b'),
+ (0x1D2F, 'V'),
+ (0x1D30, 'M', u'd'),
+ (0x1D31, 'M', u'e'),
+ (0x1D32, 'M', u'ǝ'),
+ (0x1D33, 'M', u'g'),
+ (0x1D34, 'M', u'h'),
+ (0x1D35, 'M', u'i'),
+ (0x1D36, 'M', u'j'),
+ (0x1D37, 'M', u'k'),
+ (0x1D38, 'M', u'l'),
+ (0x1D39, 'M', u'm'),
+ (0x1D3A, 'M', u'n'),
+ (0x1D3B, 'V'),
+ (0x1D3C, 'M', u'o'),
+ (0x1D3D, 'M', u'ȣ'),
+ (0x1D3E, 'M', u'p'),
+ (0x1D3F, 'M', u'r'),
+ (0x1D40, 'M', u't'),
+ (0x1D41, 'M', u'u'),
+ (0x1D42, 'M', u'w'),
+ (0x1D43, 'M', u'a'),
+ (0x1D44, 'M', u'ɐ'),
+ (0x1D45, 'M', u'ɑ'),
+ (0x1D46, 'M', u'ᴂ'),
+ (0x1D47, 'M', u'b'),
+ (0x1D48, 'M', u'd'),
+ (0x1D49, 'M', u'e'),
+ (0x1D4A, 'M', u'ə'),
+ (0x1D4B, 'M', u'ɛ'),
+ (0x1D4C, 'M', u'ɜ'),
+ (0x1D4D, 'M', u'g'),
+ (0x1D4E, 'V'),
+ (0x1D4F, 'M', u'k'),
+ (0x1D50, 'M', u'm'),
+ (0x1D51, 'M', u'ŋ'),
+ (0x1D52, 'M', u'o'),
+ ]
+
+def _seg_16():
+ return [
+ (0x1D53, 'M', u'ɔ'),
+ (0x1D54, 'M', u'ᴖ'),
+ (0x1D55, 'M', u'ᴗ'),
+ (0x1D56, 'M', u'p'),
+ (0x1D57, 'M', u't'),
+ (0x1D58, 'M', u'u'),
+ (0x1D59, 'M', u'ᴝ'),
+ (0x1D5A, 'M', u'ɯ'),
+ (0x1D5B, 'M', u'v'),
+ (0x1D5C, 'M', u'ᴥ'),
+ (0x1D5D, 'M', u'β'),
+ (0x1D5E, 'M', u'γ'),
+ (0x1D5F, 'M', u'δ'),
+ (0x1D60, 'M', u'φ'),
+ (0x1D61, 'M', u'χ'),
+ (0x1D62, 'M', u'i'),
+ (0x1D63, 'M', u'r'),
+ (0x1D64, 'M', u'u'),
+ (0x1D65, 'M', u'v'),
+ (0x1D66, 'M', u'β'),
+ (0x1D67, 'M', u'γ'),
+ (0x1D68, 'M', u'ρ'),
+ (0x1D69, 'M', u'φ'),
+ (0x1D6A, 'M', u'χ'),
+ (0x1D6B, 'V'),
+ (0x1D78, 'M', u'н'),
+ (0x1D79, 'V'),
+ (0x1D9B, 'M', u'ɒ'),
+ (0x1D9C, 'M', u'c'),
+ (0x1D9D, 'M', u'ɕ'),
+ (0x1D9E, 'M', u'ð'),
+ (0x1D9F, 'M', u'ɜ'),
+ (0x1DA0, 'M', u'f'),
+ (0x1DA1, 'M', u'ɟ'),
+ (0x1DA2, 'M', u'ɡ'),
+ (0x1DA3, 'M', u'ɥ'),
+ (0x1DA4, 'M', u'ɨ'),
+ (0x1DA5, 'M', u'ɩ'),
+ (0x1DA6, 'M', u'ɪ'),
+ (0x1DA7, 'M', u'ᵻ'),
+ (0x1DA8, 'M', u'ʝ'),
+ (0x1DA9, 'M', u'ɭ'),
+ (0x1DAA, 'M', u'ᶅ'),
+ (0x1DAB, 'M', u'ʟ'),
+ (0x1DAC, 'M', u'ɱ'),
+ (0x1DAD, 'M', u'ɰ'),
+ (0x1DAE, 'M', u'ɲ'),
+ (0x1DAF, 'M', u'ɳ'),
+ (0x1DB0, 'M', u'ɴ'),
+ (0x1DB1, 'M', u'ɵ'),
+ (0x1DB2, 'M', u'ɸ'),
+ (0x1DB3, 'M', u'ʂ'),
+ (0x1DB4, 'M', u'ʃ'),
+ (0x1DB5, 'M', u'ƫ'),
+ (0x1DB6, 'M', u'ʉ'),
+ (0x1DB7, 'M', u'ʊ'),
+ (0x1DB8, 'M', u'ᴜ'),
+ (0x1DB9, 'M', u'ʋ'),
+ (0x1DBA, 'M', u'ʌ'),
+ (0x1DBB, 'M', u'z'),
+ (0x1DBC, 'M', u'ʐ'),
+ (0x1DBD, 'M', u'ʑ'),
+ (0x1DBE, 'M', u'ʒ'),
+ (0x1DBF, 'M', u'θ'),
+ (0x1DC0, 'V'),
+ (0x1DFA, 'X'),
+ (0x1DFB, 'V'),
+ (0x1E00, 'M', u'ḁ'),
+ (0x1E01, 'V'),
+ (0x1E02, 'M', u'ḃ'),
+ (0x1E03, 'V'),
+ (0x1E04, 'M', u'ḅ'),
+ (0x1E05, 'V'),
+ (0x1E06, 'M', u'ḇ'),
+ (0x1E07, 'V'),
+ (0x1E08, 'M', u'ḉ'),
+ (0x1E09, 'V'),
+ (0x1E0A, 'M', u'ḋ'),
+ (0x1E0B, 'V'),
+ (0x1E0C, 'M', u'ḍ'),
+ (0x1E0D, 'V'),
+ (0x1E0E, 'M', u'ḏ'),
+ (0x1E0F, 'V'),
+ (0x1E10, 'M', u'ḑ'),
+ (0x1E11, 'V'),
+ (0x1E12, 'M', u'ḓ'),
+ (0x1E13, 'V'),
+ (0x1E14, 'M', u'ḕ'),
+ (0x1E15, 'V'),
+ (0x1E16, 'M', u'ḗ'),
+ (0x1E17, 'V'),
+ (0x1E18, 'M', u'ḙ'),
+ (0x1E19, 'V'),
+ (0x1E1A, 'M', u'ḛ'),
+ (0x1E1B, 'V'),
+ (0x1E1C, 'M', u'ḝ'),
+ (0x1E1D, 'V'),
+ (0x1E1E, 'M', u'ḟ'),
+ (0x1E1F, 'V'),
+ (0x1E20, 'M', u'ḡ'),
+ ]
+
+def _seg_17():
+ return [
+ (0x1E21, 'V'),
+ (0x1E22, 'M', u'ḣ'),
+ (0x1E23, 'V'),
+ (0x1E24, 'M', u'ḥ'),
+ (0x1E25, 'V'),
+ (0x1E26, 'M', u'ḧ'),
+ (0x1E27, 'V'),
+ (0x1E28, 'M', u'ḩ'),
+ (0x1E29, 'V'),
+ (0x1E2A, 'M', u'ḫ'),
+ (0x1E2B, 'V'),
+ (0x1E2C, 'M', u'ḭ'),
+ (0x1E2D, 'V'),
+ (0x1E2E, 'M', u'ḯ'),
+ (0x1E2F, 'V'),
+ (0x1E30, 'M', u'ḱ'),
+ (0x1E31, 'V'),
+ (0x1E32, 'M', u'ḳ'),
+ (0x1E33, 'V'),
+ (0x1E34, 'M', u'ḵ'),
+ (0x1E35, 'V'),
+ (0x1E36, 'M', u'ḷ'),
+ (0x1E37, 'V'),
+ (0x1E38, 'M', u'ḹ'),
+ (0x1E39, 'V'),
+ (0x1E3A, 'M', u'ḻ'),
+ (0x1E3B, 'V'),
+ (0x1E3C, 'M', u'ḽ'),
+ (0x1E3D, 'V'),
+ (0x1E3E, 'M', u'ḿ'),
+ (0x1E3F, 'V'),
+ (0x1E40, 'M', u'ṁ'),
+ (0x1E41, 'V'),
+ (0x1E42, 'M', u'ṃ'),
+ (0x1E43, 'V'),
+ (0x1E44, 'M', u'ṅ'),
+ (0x1E45, 'V'),
+ (0x1E46, 'M', u'ṇ'),
+ (0x1E47, 'V'),
+ (0x1E48, 'M', u'ṉ'),
+ (0x1E49, 'V'),
+ (0x1E4A, 'M', u'ṋ'),
+ (0x1E4B, 'V'),
+ (0x1E4C, 'M', u'ṍ'),
+ (0x1E4D, 'V'),
+ (0x1E4E, 'M', u'ṏ'),
+ (0x1E4F, 'V'),
+ (0x1E50, 'M', u'ṑ'),
+ (0x1E51, 'V'),
+ (0x1E52, 'M', u'ṓ'),
+ (0x1E53, 'V'),
+ (0x1E54, 'M', u'ṕ'),
+ (0x1E55, 'V'),
+ (0x1E56, 'M', u'ṗ'),
+ (0x1E57, 'V'),
+ (0x1E58, 'M', u'ṙ'),
+ (0x1E59, 'V'),
+ (0x1E5A, 'M', u'ṛ'),
+ (0x1E5B, 'V'),
+ (0x1E5C, 'M', u'ṝ'),
+ (0x1E5D, 'V'),
+ (0x1E5E, 'M', u'ṟ'),
+ (0x1E5F, 'V'),
+ (0x1E60, 'M', u'ṡ'),
+ (0x1E61, 'V'),
+ (0x1E62, 'M', u'ṣ'),
+ (0x1E63, 'V'),
+ (0x1E64, 'M', u'ṥ'),
+ (0x1E65, 'V'),
+ (0x1E66, 'M', u'ṧ'),
+ (0x1E67, 'V'),
+ (0x1E68, 'M', u'ṩ'),
+ (0x1E69, 'V'),
+ (0x1E6A, 'M', u'ṫ'),
+ (0x1E6B, 'V'),
+ (0x1E6C, 'M', u'ṭ'),
+ (0x1E6D, 'V'),
+ (0x1E6E, 'M', u'ṯ'),
+ (0x1E6F, 'V'),
+ (0x1E70, 'M', u'ṱ'),
+ (0x1E71, 'V'),
+ (0x1E72, 'M', u'ṳ'),
+ (0x1E73, 'V'),
+ (0x1E74, 'M', u'ṵ'),
+ (0x1E75, 'V'),
+ (0x1E76, 'M', u'ṷ'),
+ (0x1E77, 'V'),
+ (0x1E78, 'M', u'ṹ'),
+ (0x1E79, 'V'),
+ (0x1E7A, 'M', u'ṻ'),
+ (0x1E7B, 'V'),
+ (0x1E7C, 'M', u'ṽ'),
+ (0x1E7D, 'V'),
+ (0x1E7E, 'M', u'ṿ'),
+ (0x1E7F, 'V'),
+ (0x1E80, 'M', u'ẁ'),
+ (0x1E81, 'V'),
+ (0x1E82, 'M', u'ẃ'),
+ (0x1E83, 'V'),
+ (0x1E84, 'M', u'ẅ'),
+ ]
+
+def _seg_18():
+ return [
+ (0x1E85, 'V'),
+ (0x1E86, 'M', u'ẇ'),
+ (0x1E87, 'V'),
+ (0x1E88, 'M', u'ẉ'),
+ (0x1E89, 'V'),
+ (0x1E8A, 'M', u'ẋ'),
+ (0x1E8B, 'V'),
+ (0x1E8C, 'M', u'ẍ'),
+ (0x1E8D, 'V'),
+ (0x1E8E, 'M', u'ẏ'),
+ (0x1E8F, 'V'),
+ (0x1E90, 'M', u'ẑ'),
+ (0x1E91, 'V'),
+ (0x1E92, 'M', u'ẓ'),
+ (0x1E93, 'V'),
+ (0x1E94, 'M', u'ẕ'),
+ (0x1E95, 'V'),
+ (0x1E9A, 'M', u'aʾ'),
+ (0x1E9B, 'M', u'ṡ'),
+ (0x1E9C, 'V'),
+ (0x1E9E, 'M', u'ss'),
+ (0x1E9F, 'V'),
+ (0x1EA0, 'M', u'ạ'),
+ (0x1EA1, 'V'),
+ (0x1EA2, 'M', u'ả'),
+ (0x1EA3, 'V'),
+ (0x1EA4, 'M', u'ấ'),
+ (0x1EA5, 'V'),
+ (0x1EA6, 'M', u'ầ'),
+ (0x1EA7, 'V'),
+ (0x1EA8, 'M', u'ẩ'),
+ (0x1EA9, 'V'),
+ (0x1EAA, 'M', u'ẫ'),
+ (0x1EAB, 'V'),
+ (0x1EAC, 'M', u'ậ'),
+ (0x1EAD, 'V'),
+ (0x1EAE, 'M', u'ắ'),
+ (0x1EAF, 'V'),
+ (0x1EB0, 'M', u'ằ'),
+ (0x1EB1, 'V'),
+ (0x1EB2, 'M', u'ẳ'),
+ (0x1EB3, 'V'),
+ (0x1EB4, 'M', u'ẵ'),
+ (0x1EB5, 'V'),
+ (0x1EB6, 'M', u'ặ'),
+ (0x1EB7, 'V'),
+ (0x1EB8, 'M', u'ẹ'),
+ (0x1EB9, 'V'),
+ (0x1EBA, 'M', u'ẻ'),
+ (0x1EBB, 'V'),
+ (0x1EBC, 'M', u'ẽ'),
+ (0x1EBD, 'V'),
+ (0x1EBE, 'M', u'ế'),
+ (0x1EBF, 'V'),
+ (0x1EC0, 'M', u'ề'),
+ (0x1EC1, 'V'),
+ (0x1EC2, 'M', u'ể'),
+ (0x1EC3, 'V'),
+ (0x1EC4, 'M', u'ễ'),
+ (0x1EC5, 'V'),
+ (0x1EC6, 'M', u'ệ'),
+ (0x1EC7, 'V'),
+ (0x1EC8, 'M', u'ỉ'),
+ (0x1EC9, 'V'),
+ (0x1ECA, 'M', u'ị'),
+ (0x1ECB, 'V'),
+ (0x1ECC, 'M', u'ọ'),
+ (0x1ECD, 'V'),
+ (0x1ECE, 'M', u'ỏ'),
+ (0x1ECF, 'V'),
+ (0x1ED0, 'M', u'ố'),
+ (0x1ED1, 'V'),
+ (0x1ED2, 'M', u'ồ'),
+ (0x1ED3, 'V'),
+ (0x1ED4, 'M', u'ổ'),
+ (0x1ED5, 'V'),
+ (0x1ED6, 'M', u'ỗ'),
+ (0x1ED7, 'V'),
+ (0x1ED8, 'M', u'ộ'),
+ (0x1ED9, 'V'),
+ (0x1EDA, 'M', u'ớ'),
+ (0x1EDB, 'V'),
+ (0x1EDC, 'M', u'ờ'),
+ (0x1EDD, 'V'),
+ (0x1EDE, 'M', u'ở'),
+ (0x1EDF, 'V'),
+ (0x1EE0, 'M', u'ỡ'),
+ (0x1EE1, 'V'),
+ (0x1EE2, 'M', u'ợ'),
+ (0x1EE3, 'V'),
+ (0x1EE4, 'M', u'ụ'),
+ (0x1EE5, 'V'),
+ (0x1EE6, 'M', u'ủ'),
+ (0x1EE7, 'V'),
+ (0x1EE8, 'M', u'ứ'),
+ (0x1EE9, 'V'),
+ (0x1EEA, 'M', u'ừ'),
+ (0x1EEB, 'V'),
+ (0x1EEC, 'M', u'ử'),
+ (0x1EED, 'V'),
+ ]
+
+def _seg_19():
+ return [
+ (0x1EEE, 'M', u'ữ'),
+ (0x1EEF, 'V'),
+ (0x1EF0, 'M', u'ự'),
+ (0x1EF1, 'V'),
+ (0x1EF2, 'M', u'ỳ'),
+ (0x1EF3, 'V'),
+ (0x1EF4, 'M', u'ỵ'),
+ (0x1EF5, 'V'),
+ (0x1EF6, 'M', u'ỷ'),
+ (0x1EF7, 'V'),
+ (0x1EF8, 'M', u'ỹ'),
+ (0x1EF9, 'V'),
+ (0x1EFA, 'M', u'ỻ'),
+ (0x1EFB, 'V'),
+ (0x1EFC, 'M', u'ỽ'),
+ (0x1EFD, 'V'),
+ (0x1EFE, 'M', u'ỿ'),
+ (0x1EFF, 'V'),
+ (0x1F08, 'M', u'ἀ'),
+ (0x1F09, 'M', u'ἁ'),
+ (0x1F0A, 'M', u'ἂ'),
+ (0x1F0B, 'M', u'ἃ'),
+ (0x1F0C, 'M', u'ἄ'),
+ (0x1F0D, 'M', u'ἅ'),
+ (0x1F0E, 'M', u'ἆ'),
+ (0x1F0F, 'M', u'ἇ'),
+ (0x1F10, 'V'),
+ (0x1F16, 'X'),
+ (0x1F18, 'M', u'ἐ'),
+ (0x1F19, 'M', u'ἑ'),
+ (0x1F1A, 'M', u'ἒ'),
+ (0x1F1B, 'M', u'ἓ'),
+ (0x1F1C, 'M', u'ἔ'),
+ (0x1F1D, 'M', u'ἕ'),
+ (0x1F1E, 'X'),
+ (0x1F20, 'V'),
+ (0x1F28, 'M', u'ἠ'),
+ (0x1F29, 'M', u'ἡ'),
+ (0x1F2A, 'M', u'ἢ'),
+ (0x1F2B, 'M', u'ἣ'),
+ (0x1F2C, 'M', u'ἤ'),
+ (0x1F2D, 'M', u'ἥ'),
+ (0x1F2E, 'M', u'ἦ'),
+ (0x1F2F, 'M', u'ἧ'),
+ (0x1F30, 'V'),
+ (0x1F38, 'M', u'ἰ'),
+ (0x1F39, 'M', u'ἱ'),
+ (0x1F3A, 'M', u'ἲ'),
+ (0x1F3B, 'M', u'ἳ'),
+ (0x1F3C, 'M', u'ἴ'),
+ (0x1F3D, 'M', u'ἵ'),
+ (0x1F3E, 'M', u'ἶ'),
+ (0x1F3F, 'M', u'ἷ'),
+ (0x1F40, 'V'),
+ (0x1F46, 'X'),
+ (0x1F48, 'M', u'ὀ'),
+ (0x1F49, 'M', u'ὁ'),
+ (0x1F4A, 'M', u'ὂ'),
+ (0x1F4B, 'M', u'ὃ'),
+ (0x1F4C, 'M', u'ὄ'),
+ (0x1F4D, 'M', u'ὅ'),
+ (0x1F4E, 'X'),
+ (0x1F50, 'V'),
+ (0x1F58, 'X'),
+ (0x1F59, 'M', u'ὑ'),
+ (0x1F5A, 'X'),
+ (0x1F5B, 'M', u'ὓ'),
+ (0x1F5C, 'X'),
+ (0x1F5D, 'M', u'ὕ'),
+ (0x1F5E, 'X'),
+ (0x1F5F, 'M', u'ὗ'),
+ (0x1F60, 'V'),
+ (0x1F68, 'M', u'ὠ'),
+ (0x1F69, 'M', u'ὡ'),
+ (0x1F6A, 'M', u'ὢ'),
+ (0x1F6B, 'M', u'ὣ'),
+ (0x1F6C, 'M', u'ὤ'),
+ (0x1F6D, 'M', u'ὥ'),
+ (0x1F6E, 'M', u'ὦ'),
+ (0x1F6F, 'M', u'ὧ'),
+ (0x1F70, 'V'),
+ (0x1F71, 'M', u'ά'),
+ (0x1F72, 'V'),
+ (0x1F73, 'M', u'έ'),
+ (0x1F74, 'V'),
+ (0x1F75, 'M', u'ή'),
+ (0x1F76, 'V'),
+ (0x1F77, 'M', u'ί'),
+ (0x1F78, 'V'),
+ (0x1F79, 'M', u'ό'),
+ (0x1F7A, 'V'),
+ (0x1F7B, 'M', u'ύ'),
+ (0x1F7C, 'V'),
+ (0x1F7D, 'M', u'ώ'),
+ (0x1F7E, 'X'),
+ (0x1F80, 'M', u'ἀι'),
+ (0x1F81, 'M', u'ἁι'),
+ (0x1F82, 'M', u'ἂι'),
+ (0x1F83, 'M', u'ἃι'),
+ (0x1F84, 'M', u'ἄι'),
+ ]
+
+def _seg_20():
+ return [
+ (0x1F85, 'M', u'ἅι'),
+ (0x1F86, 'M', u'ἆι'),
+ (0x1F87, 'M', u'ἇι'),
+ (0x1F88, 'M', u'ἀι'),
+ (0x1F89, 'M', u'ἁι'),
+ (0x1F8A, 'M', u'ἂι'),
+ (0x1F8B, 'M', u'ἃι'),
+ (0x1F8C, 'M', u'ἄι'),
+ (0x1F8D, 'M', u'ἅι'),
+ (0x1F8E, 'M', u'ἆι'),
+ (0x1F8F, 'M', u'ἇι'),
+ (0x1F90, 'M', u'ἠι'),
+ (0x1F91, 'M', u'ἡι'),
+ (0x1F92, 'M', u'ἢι'),
+ (0x1F93, 'M', u'ἣι'),
+ (0x1F94, 'M', u'ἤι'),
+ (0x1F95, 'M', u'ἥι'),
+ (0x1F96, 'M', u'ἦι'),
+ (0x1F97, 'M', u'ἧι'),
+ (0x1F98, 'M', u'ἠι'),
+ (0x1F99, 'M', u'ἡι'),
+ (0x1F9A, 'M', u'ἢι'),
+ (0x1F9B, 'M', u'ἣι'),
+ (0x1F9C, 'M', u'ἤι'),
+ (0x1F9D, 'M', u'ἥι'),
+ (0x1F9E, 'M', u'ἦι'),
+ (0x1F9F, 'M', u'ἧι'),
+ (0x1FA0, 'M', u'ὠι'),
+ (0x1FA1, 'M', u'ὡι'),
+ (0x1FA2, 'M', u'ὢι'),
+ (0x1FA3, 'M', u'ὣι'),
+ (0x1FA4, 'M', u'ὤι'),
+ (0x1FA5, 'M', u'ὥι'),
+ (0x1FA6, 'M', u'ὦι'),
+ (0x1FA7, 'M', u'ὧι'),
+ (0x1FA8, 'M', u'ὠι'),
+ (0x1FA9, 'M', u'ὡι'),
+ (0x1FAA, 'M', u'ὢι'),
+ (0x1FAB, 'M', u'ὣι'),
+ (0x1FAC, 'M', u'ὤι'),
+ (0x1FAD, 'M', u'ὥι'),
+ (0x1FAE, 'M', u'ὦι'),
+ (0x1FAF, 'M', u'ὧι'),
+ (0x1FB0, 'V'),
+ (0x1FB2, 'M', u'ὰι'),
+ (0x1FB3, 'M', u'αι'),
+ (0x1FB4, 'M', u'άι'),
+ (0x1FB5, 'X'),
+ (0x1FB6, 'V'),
+ (0x1FB7, 'M', u'ᾶι'),
+ (0x1FB8, 'M', u'ᾰ'),
+ (0x1FB9, 'M', u'ᾱ'),
+ (0x1FBA, 'M', u'ὰ'),
+ (0x1FBB, 'M', u'ά'),
+ (0x1FBC, 'M', u'αι'),
+ (0x1FBD, '3', u' ̓'),
+ (0x1FBE, 'M', u'ι'),
+ (0x1FBF, '3', u' ̓'),
+ (0x1FC0, '3', u' ͂'),
+ (0x1FC1, '3', u' ̈͂'),
+ (0x1FC2, 'M', u'ὴι'),
+ (0x1FC3, 'M', u'ηι'),
+ (0x1FC4, 'M', u'ήι'),
+ (0x1FC5, 'X'),
+ (0x1FC6, 'V'),
+ (0x1FC7, 'M', u'ῆι'),
+ (0x1FC8, 'M', u'ὲ'),
+ (0x1FC9, 'M', u'έ'),
+ (0x1FCA, 'M', u'ὴ'),
+ (0x1FCB, 'M', u'ή'),
+ (0x1FCC, 'M', u'ηι'),
+ (0x1FCD, '3', u' ̓̀'),
+ (0x1FCE, '3', u' ̓́'),
+ (0x1FCF, '3', u' ̓͂'),
+ (0x1FD0, 'V'),
+ (0x1FD3, 'M', u'ΐ'),
+ (0x1FD4, 'X'),
+ (0x1FD6, 'V'),
+ (0x1FD8, 'M', u'ῐ'),
+ (0x1FD9, 'M', u'ῑ'),
+ (0x1FDA, 'M', u'ὶ'),
+ (0x1FDB, 'M', u'ί'),
+ (0x1FDC, 'X'),
+ (0x1FDD, '3', u' ̔̀'),
+ (0x1FDE, '3', u' ̔́'),
+ (0x1FDF, '3', u' ̔͂'),
+ (0x1FE0, 'V'),
+ (0x1FE3, 'M', u'ΰ'),
+ (0x1FE4, 'V'),
+ (0x1FE8, 'M', u'ῠ'),
+ (0x1FE9, 'M', u'ῡ'),
+ (0x1FEA, 'M', u'ὺ'),
+ (0x1FEB, 'M', u'ύ'),
+ (0x1FEC, 'M', u'ῥ'),
+ (0x1FED, '3', u' ̈̀'),
+ (0x1FEE, '3', u' ̈́'),
+ (0x1FEF, '3', u'`'),
+ (0x1FF0, 'X'),
+ (0x1FF2, 'M', u'ὼι'),
+ (0x1FF3, 'M', u'ωι'),
+ ]
+
+def _seg_21():
+ return [
+ (0x1FF4, 'M', u'ώι'),
+ (0x1FF5, 'X'),
+ (0x1FF6, 'V'),
+ (0x1FF7, 'M', u'ῶι'),
+ (0x1FF8, 'M', u'ὸ'),
+ (0x1FF9, 'M', u'ό'),
+ (0x1FFA, 'M', u'ὼ'),
+ (0x1FFB, 'M', u'ώ'),
+ (0x1FFC, 'M', u'ωι'),
+ (0x1FFD, '3', u' ́'),
+ (0x1FFE, '3', u' ̔'),
+ (0x1FFF, 'X'),
+ (0x2000, '3', u' '),
+ (0x200B, 'I'),
+ (0x200C, 'D', u''),
+ (0x200E, 'X'),
+ (0x2010, 'V'),
+ (0x2011, 'M', u'‐'),
+ (0x2012, 'V'),
+ (0x2017, '3', u' ̳'),
+ (0x2018, 'V'),
+ (0x2024, 'X'),
+ (0x2027, 'V'),
+ (0x2028, 'X'),
+ (0x202F, '3', u' '),
+ (0x2030, 'V'),
+ (0x2033, 'M', u'′′'),
+ (0x2034, 'M', u'′′′'),
+ (0x2035, 'V'),
+ (0x2036, 'M', u'‵‵'),
+ (0x2037, 'M', u'‵‵‵'),
+ (0x2038, 'V'),
+ (0x203C, '3', u'!!'),
+ (0x203D, 'V'),
+ (0x203E, '3', u' ̅'),
+ (0x203F, 'V'),
+ (0x2047, '3', u'??'),
+ (0x2048, '3', u'?!'),
+ (0x2049, '3', u'!?'),
+ (0x204A, 'V'),
+ (0x2057, 'M', u'′′′′'),
+ (0x2058, 'V'),
+ (0x205F, '3', u' '),
+ (0x2060, 'I'),
+ (0x2061, 'X'),
+ (0x2064, 'I'),
+ (0x2065, 'X'),
+ (0x2070, 'M', u'0'),
+ (0x2071, 'M', u'i'),
+ (0x2072, 'X'),
+ (0x2074, 'M', u'4'),
+ (0x2075, 'M', u'5'),
+ (0x2076, 'M', u'6'),
+ (0x2077, 'M', u'7'),
+ (0x2078, 'M', u'8'),
+ (0x2079, 'M', u'9'),
+ (0x207A, '3', u'+'),
+ (0x207B, 'M', u'−'),
+ (0x207C, '3', u'='),
+ (0x207D, '3', u'('),
+ (0x207E, '3', u')'),
+ (0x207F, 'M', u'n'),
+ (0x2080, 'M', u'0'),
+ (0x2081, 'M', u'1'),
+ (0x2082, 'M', u'2'),
+ (0x2083, 'M', u'3'),
+ (0x2084, 'M', u'4'),
+ (0x2085, 'M', u'5'),
+ (0x2086, 'M', u'6'),
+ (0x2087, 'M', u'7'),
+ (0x2088, 'M', u'8'),
+ (0x2089, 'M', u'9'),
+ (0x208A, '3', u'+'),
+ (0x208B, 'M', u'−'),
+ (0x208C, '3', u'='),
+ (0x208D, '3', u'('),
+ (0x208E, '3', u')'),
+ (0x208F, 'X'),
+ (0x2090, 'M', u'a'),
+ (0x2091, 'M', u'e'),
+ (0x2092, 'M', u'o'),
+ (0x2093, 'M', u'x'),
+ (0x2094, 'M', u'ə'),
+ (0x2095, 'M', u'h'),
+ (0x2096, 'M', u'k'),
+ (0x2097, 'M', u'l'),
+ (0x2098, 'M', u'm'),
+ (0x2099, 'M', u'n'),
+ (0x209A, 'M', u'p'),
+ (0x209B, 'M', u's'),
+ (0x209C, 'M', u't'),
+ (0x209D, 'X'),
+ (0x20A0, 'V'),
+ (0x20A8, 'M', u'rs'),
+ (0x20A9, 'V'),
+ (0x20C0, 'X'),
+ (0x20D0, 'V'),
+ (0x20F1, 'X'),
+ (0x2100, '3', u'a/c'),
+ (0x2101, '3', u'a/s'),
+ ]
+
+def _seg_22():
+ return [
+ (0x2102, 'M', u'c'),
+ (0x2103, 'M', u'°c'),
+ (0x2104, 'V'),
+ (0x2105, '3', u'c/o'),
+ (0x2106, '3', u'c/u'),
+ (0x2107, 'M', u'ɛ'),
+ (0x2108, 'V'),
+ (0x2109, 'M', u'°f'),
+ (0x210A, 'M', u'g'),
+ (0x210B, 'M', u'h'),
+ (0x210F, 'M', u'ħ'),
+ (0x2110, 'M', u'i'),
+ (0x2112, 'M', u'l'),
+ (0x2114, 'V'),
+ (0x2115, 'M', u'n'),
+ (0x2116, 'M', u'no'),
+ (0x2117, 'V'),
+ (0x2119, 'M', u'p'),
+ (0x211A, 'M', u'q'),
+ (0x211B, 'M', u'r'),
+ (0x211E, 'V'),
+ (0x2120, 'M', u'sm'),
+ (0x2121, 'M', u'tel'),
+ (0x2122, 'M', u'tm'),
+ (0x2123, 'V'),
+ (0x2124, 'M', u'z'),
+ (0x2125, 'V'),
+ (0x2126, 'M', u'ω'),
+ (0x2127, 'V'),
+ (0x2128, 'M', u'z'),
+ (0x2129, 'V'),
+ (0x212A, 'M', u'k'),
+ (0x212B, 'M', u'å'),
+ (0x212C, 'M', u'b'),
+ (0x212D, 'M', u'c'),
+ (0x212E, 'V'),
+ (0x212F, 'M', u'e'),
+ (0x2131, 'M', u'f'),
+ (0x2132, 'X'),
+ (0x2133, 'M', u'm'),
+ (0x2134, 'M', u'o'),
+ (0x2135, 'M', u'א'),
+ (0x2136, 'M', u'ב'),
+ (0x2137, 'M', u'ג'),
+ (0x2138, 'M', u'ד'),
+ (0x2139, 'M', u'i'),
+ (0x213A, 'V'),
+ (0x213B, 'M', u'fax'),
+ (0x213C, 'M', u'π'),
+ (0x213D, 'M', u'γ'),
+ (0x213F, 'M', u'π'),
+ (0x2140, 'M', u'∑'),
+ (0x2141, 'V'),
+ (0x2145, 'M', u'd'),
+ (0x2147, 'M', u'e'),
+ (0x2148, 'M', u'i'),
+ (0x2149, 'M', u'j'),
+ (0x214A, 'V'),
+ (0x2150, 'M', u'1⁄7'),
+ (0x2151, 'M', u'1⁄9'),
+ (0x2152, 'M', u'1⁄10'),
+ (0x2153, 'M', u'1⁄3'),
+ (0x2154, 'M', u'2⁄3'),
+ (0x2155, 'M', u'1⁄5'),
+ (0x2156, 'M', u'2⁄5'),
+ (0x2157, 'M', u'3⁄5'),
+ (0x2158, 'M', u'4⁄5'),
+ (0x2159, 'M', u'1⁄6'),
+ (0x215A, 'M', u'5⁄6'),
+ (0x215B, 'M', u'1⁄8'),
+ (0x215C, 'M', u'3⁄8'),
+ (0x215D, 'M', u'5⁄8'),
+ (0x215E, 'M', u'7⁄8'),
+ (0x215F, 'M', u'1⁄'),
+ (0x2160, 'M', u'i'),
+ (0x2161, 'M', u'ii'),
+ (0x2162, 'M', u'iii'),
+ (0x2163, 'M', u'iv'),
+ (0x2164, 'M', u'v'),
+ (0x2165, 'M', u'vi'),
+ (0x2166, 'M', u'vii'),
+ (0x2167, 'M', u'viii'),
+ (0x2168, 'M', u'ix'),
+ (0x2169, 'M', u'x'),
+ (0x216A, 'M', u'xi'),
+ (0x216B, 'M', u'xii'),
+ (0x216C, 'M', u'l'),
+ (0x216D, 'M', u'c'),
+ (0x216E, 'M', u'd'),
+ (0x216F, 'M', u'm'),
+ (0x2170, 'M', u'i'),
+ (0x2171, 'M', u'ii'),
+ (0x2172, 'M', u'iii'),
+ (0x2173, 'M', u'iv'),
+ (0x2174, 'M', u'v'),
+ (0x2175, 'M', u'vi'),
+ (0x2176, 'M', u'vii'),
+ (0x2177, 'M', u'viii'),
+ (0x2178, 'M', u'ix'),
+ (0x2179, 'M', u'x'),
+ ]
+
+def _seg_23():
+ return [
+ (0x217A, 'M', u'xi'),
+ (0x217B, 'M', u'xii'),
+ (0x217C, 'M', u'l'),
+ (0x217D, 'M', u'c'),
+ (0x217E, 'M', u'd'),
+ (0x217F, 'M', u'm'),
+ (0x2180, 'V'),
+ (0x2183, 'X'),
+ (0x2184, 'V'),
+ (0x2189, 'M', u'0⁄3'),
+ (0x218A, 'V'),
+ (0x218C, 'X'),
+ (0x2190, 'V'),
+ (0x222C, 'M', u'∫∫'),
+ (0x222D, 'M', u'∫∫∫'),
+ (0x222E, 'V'),
+ (0x222F, 'M', u'∮∮'),
+ (0x2230, 'M', u'∮∮∮'),
+ (0x2231, 'V'),
+ (0x2260, '3'),
+ (0x2261, 'V'),
+ (0x226E, '3'),
+ (0x2270, 'V'),
+ (0x2329, 'M', u'〈'),
+ (0x232A, 'M', u'〉'),
+ (0x232B, 'V'),
+ (0x2427, 'X'),
+ (0x2440, 'V'),
+ (0x244B, 'X'),
+ (0x2460, 'M', u'1'),
+ (0x2461, 'M', u'2'),
+ (0x2462, 'M', u'3'),
+ (0x2463, 'M', u'4'),
+ (0x2464, 'M', u'5'),
+ (0x2465, 'M', u'6'),
+ (0x2466, 'M', u'7'),
+ (0x2467, 'M', u'8'),
+ (0x2468, 'M', u'9'),
+ (0x2469, 'M', u'10'),
+ (0x246A, 'M', u'11'),
+ (0x246B, 'M', u'12'),
+ (0x246C, 'M', u'13'),
+ (0x246D, 'M', u'14'),
+ (0x246E, 'M', u'15'),
+ (0x246F, 'M', u'16'),
+ (0x2470, 'M', u'17'),
+ (0x2471, 'M', u'18'),
+ (0x2472, 'M', u'19'),
+ (0x2473, 'M', u'20'),
+ (0x2474, '3', u'(1)'),
+ (0x2475, '3', u'(2)'),
+ (0x2476, '3', u'(3)'),
+ (0x2477, '3', u'(4)'),
+ (0x2478, '3', u'(5)'),
+ (0x2479, '3', u'(6)'),
+ (0x247A, '3', u'(7)'),
+ (0x247B, '3', u'(8)'),
+ (0x247C, '3', u'(9)'),
+ (0x247D, '3', u'(10)'),
+ (0x247E, '3', u'(11)'),
+ (0x247F, '3', u'(12)'),
+ (0x2480, '3', u'(13)'),
+ (0x2481, '3', u'(14)'),
+ (0x2482, '3', u'(15)'),
+ (0x2483, '3', u'(16)'),
+ (0x2484, '3', u'(17)'),
+ (0x2485, '3', u'(18)'),
+ (0x2486, '3', u'(19)'),
+ (0x2487, '3', u'(20)'),
+ (0x2488, 'X'),
+ (0x249C, '3', u'(a)'),
+ (0x249D, '3', u'(b)'),
+ (0x249E, '3', u'(c)'),
+ (0x249F, '3', u'(d)'),
+ (0x24A0, '3', u'(e)'),
+ (0x24A1, '3', u'(f)'),
+ (0x24A2, '3', u'(g)'),
+ (0x24A3, '3', u'(h)'),
+ (0x24A4, '3', u'(i)'),
+ (0x24A5, '3', u'(j)'),
+ (0x24A6, '3', u'(k)'),
+ (0x24A7, '3', u'(l)'),
+ (0x24A8, '3', u'(m)'),
+ (0x24A9, '3', u'(n)'),
+ (0x24AA, '3', u'(o)'),
+ (0x24AB, '3', u'(p)'),
+ (0x24AC, '3', u'(q)'),
+ (0x24AD, '3', u'(r)'),
+ (0x24AE, '3', u'(s)'),
+ (0x24AF, '3', u'(t)'),
+ (0x24B0, '3', u'(u)'),
+ (0x24B1, '3', u'(v)'),
+ (0x24B2, '3', u'(w)'),
+ (0x24B3, '3', u'(x)'),
+ (0x24B4, '3', u'(y)'),
+ (0x24B5, '3', u'(z)'),
+ (0x24B6, 'M', u'a'),
+ (0x24B7, 'M', u'b'),
+ (0x24B8, 'M', u'c'),
+ (0x24B9, 'M', u'd'),
+ ]
+
+def _seg_24():
+ return [
+ (0x24BA, 'M', u'e'),
+ (0x24BB, 'M', u'f'),
+ (0x24BC, 'M', u'g'),
+ (0x24BD, 'M', u'h'),
+ (0x24BE, 'M', u'i'),
+ (0x24BF, 'M', u'j'),
+ (0x24C0, 'M', u'k'),
+ (0x24C1, 'M', u'l'),
+ (0x24C2, 'M', u'm'),
+ (0x24C3, 'M', u'n'),
+ (0x24C4, 'M', u'o'),
+ (0x24C5, 'M', u'p'),
+ (0x24C6, 'M', u'q'),
+ (0x24C7, 'M', u'r'),
+ (0x24C8, 'M', u's'),
+ (0x24C9, 'M', u't'),
+ (0x24CA, 'M', u'u'),
+ (0x24CB, 'M', u'v'),
+ (0x24CC, 'M', u'w'),
+ (0x24CD, 'M', u'x'),
+ (0x24CE, 'M', u'y'),
+ (0x24CF, 'M', u'z'),
+ (0x24D0, 'M', u'a'),
+ (0x24D1, 'M', u'b'),
+ (0x24D2, 'M', u'c'),
+ (0x24D3, 'M', u'd'),
+ (0x24D4, 'M', u'e'),
+ (0x24D5, 'M', u'f'),
+ (0x24D6, 'M', u'g'),
+ (0x24D7, 'M', u'h'),
+ (0x24D8, 'M', u'i'),
+ (0x24D9, 'M', u'j'),
+ (0x24DA, 'M', u'k'),
+ (0x24DB, 'M', u'l'),
+ (0x24DC, 'M', u'm'),
+ (0x24DD, 'M', u'n'),
+ (0x24DE, 'M', u'o'),
+ (0x24DF, 'M', u'p'),
+ (0x24E0, 'M', u'q'),
+ (0x24E1, 'M', u'r'),
+ (0x24E2, 'M', u's'),
+ (0x24E3, 'M', u't'),
+ (0x24E4, 'M', u'u'),
+ (0x24E5, 'M', u'v'),
+ (0x24E6, 'M', u'w'),
+ (0x24E7, 'M', u'x'),
+ (0x24E8, 'M', u'y'),
+ (0x24E9, 'M', u'z'),
+ (0x24EA, 'M', u'0'),
+ (0x24EB, 'V'),
+ (0x2A0C, 'M', u'∫∫∫∫'),
+ (0x2A0D, 'V'),
+ (0x2A74, '3', u'::='),
+ (0x2A75, '3', u'=='),
+ (0x2A76, '3', u'==='),
+ (0x2A77, 'V'),
+ (0x2ADC, 'M', u'⫝̸'),
+ (0x2ADD, 'V'),
+ (0x2B74, 'X'),
+ (0x2B76, 'V'),
+ (0x2B96, 'X'),
+ (0x2B97, 'V'),
+ (0x2C00, 'M', u'ⰰ'),
+ (0x2C01, 'M', u'ⰱ'),
+ (0x2C02, 'M', u'ⰲ'),
+ (0x2C03, 'M', u'ⰳ'),
+ (0x2C04, 'M', u'ⰴ'),
+ (0x2C05, 'M', u'ⰵ'),
+ (0x2C06, 'M', u'ⰶ'),
+ (0x2C07, 'M', u'ⰷ'),
+ (0x2C08, 'M', u'ⰸ'),
+ (0x2C09, 'M', u'ⰹ'),
+ (0x2C0A, 'M', u'ⰺ'),
+ (0x2C0B, 'M', u'ⰻ'),
+ (0x2C0C, 'M', u'ⰼ'),
+ (0x2C0D, 'M', u'ⰽ'),
+ (0x2C0E, 'M', u'ⰾ'),
+ (0x2C0F, 'M', u'ⰿ'),
+ (0x2C10, 'M', u'ⱀ'),
+ (0x2C11, 'M', u'ⱁ'),
+ (0x2C12, 'M', u'ⱂ'),
+ (0x2C13, 'M', u'ⱃ'),
+ (0x2C14, 'M', u'ⱄ'),
+ (0x2C15, 'M', u'ⱅ'),
+ (0x2C16, 'M', u'ⱆ'),
+ (0x2C17, 'M', u'ⱇ'),
+ (0x2C18, 'M', u'ⱈ'),
+ (0x2C19, 'M', u'ⱉ'),
+ (0x2C1A, 'M', u'ⱊ'),
+ (0x2C1B, 'M', u'ⱋ'),
+ (0x2C1C, 'M', u'ⱌ'),
+ (0x2C1D, 'M', u'ⱍ'),
+ (0x2C1E, 'M', u'ⱎ'),
+ (0x2C1F, 'M', u'ⱏ'),
+ (0x2C20, 'M', u'ⱐ'),
+ (0x2C21, 'M', u'ⱑ'),
+ (0x2C22, 'M', u'ⱒ'),
+ (0x2C23, 'M', u'ⱓ'),
+ (0x2C24, 'M', u'ⱔ'),
+ (0x2C25, 'M', u'ⱕ'),
+ ]
+
+def _seg_25():
+ return [
+ (0x2C26, 'M', u'ⱖ'),
+ (0x2C27, 'M', u'ⱗ'),
+ (0x2C28, 'M', u'ⱘ'),
+ (0x2C29, 'M', u'ⱙ'),
+ (0x2C2A, 'M', u'ⱚ'),
+ (0x2C2B, 'M', u'ⱛ'),
+ (0x2C2C, 'M', u'ⱜ'),
+ (0x2C2D, 'M', u'ⱝ'),
+ (0x2C2E, 'M', u'ⱞ'),
+ (0x2C2F, 'X'),
+ (0x2C30, 'V'),
+ (0x2C5F, 'X'),
+ (0x2C60, 'M', u'ⱡ'),
+ (0x2C61, 'V'),
+ (0x2C62, 'M', u'ɫ'),
+ (0x2C63, 'M', u'ᵽ'),
+ (0x2C64, 'M', u'ɽ'),
+ (0x2C65, 'V'),
+ (0x2C67, 'M', u'ⱨ'),
+ (0x2C68, 'V'),
+ (0x2C69, 'M', u'ⱪ'),
+ (0x2C6A, 'V'),
+ (0x2C6B, 'M', u'ⱬ'),
+ (0x2C6C, 'V'),
+ (0x2C6D, 'M', u'ɑ'),
+ (0x2C6E, 'M', u'ɱ'),
+ (0x2C6F, 'M', u'ɐ'),
+ (0x2C70, 'M', u'ɒ'),
+ (0x2C71, 'V'),
+ (0x2C72, 'M', u'ⱳ'),
+ (0x2C73, 'V'),
+ (0x2C75, 'M', u'ⱶ'),
+ (0x2C76, 'V'),
+ (0x2C7C, 'M', u'j'),
+ (0x2C7D, 'M', u'v'),
+ (0x2C7E, 'M', u'ȿ'),
+ (0x2C7F, 'M', u'ɀ'),
+ (0x2C80, 'M', u'ⲁ'),
+ (0x2C81, 'V'),
+ (0x2C82, 'M', u'ⲃ'),
+ (0x2C83, 'V'),
+ (0x2C84, 'M', u'ⲅ'),
+ (0x2C85, 'V'),
+ (0x2C86, 'M', u'ⲇ'),
+ (0x2C87, 'V'),
+ (0x2C88, 'M', u'ⲉ'),
+ (0x2C89, 'V'),
+ (0x2C8A, 'M', u'ⲋ'),
+ (0x2C8B, 'V'),
+ (0x2C8C, 'M', u'ⲍ'),
+ (0x2C8D, 'V'),
+ (0x2C8E, 'M', u'ⲏ'),
+ (0x2C8F, 'V'),
+ (0x2C90, 'M', u'ⲑ'),
+ (0x2C91, 'V'),
+ (0x2C92, 'M', u'ⲓ'),
+ (0x2C93, 'V'),
+ (0x2C94, 'M', u'ⲕ'),
+ (0x2C95, 'V'),
+ (0x2C96, 'M', u'ⲗ'),
+ (0x2C97, 'V'),
+ (0x2C98, 'M', u'ⲙ'),
+ (0x2C99, 'V'),
+ (0x2C9A, 'M', u'ⲛ'),
+ (0x2C9B, 'V'),
+ (0x2C9C, 'M', u'ⲝ'),
+ (0x2C9D, 'V'),
+ (0x2C9E, 'M', u'ⲟ'),
+ (0x2C9F, 'V'),
+ (0x2CA0, 'M', u'ⲡ'),
+ (0x2CA1, 'V'),
+ (0x2CA2, 'M', u'ⲣ'),
+ (0x2CA3, 'V'),
+ (0x2CA4, 'M', u'ⲥ'),
+ (0x2CA5, 'V'),
+ (0x2CA6, 'M', u'ⲧ'),
+ (0x2CA7, 'V'),
+ (0x2CA8, 'M', u'ⲩ'),
+ (0x2CA9, 'V'),
+ (0x2CAA, 'M', u'ⲫ'),
+ (0x2CAB, 'V'),
+ (0x2CAC, 'M', u'ⲭ'),
+ (0x2CAD, 'V'),
+ (0x2CAE, 'M', u'ⲯ'),
+ (0x2CAF, 'V'),
+ (0x2CB0, 'M', u'ⲱ'),
+ (0x2CB1, 'V'),
+ (0x2CB2, 'M', u'ⲳ'),
+ (0x2CB3, 'V'),
+ (0x2CB4, 'M', u'ⲵ'),
+ (0x2CB5, 'V'),
+ (0x2CB6, 'M', u'ⲷ'),
+ (0x2CB7, 'V'),
+ (0x2CB8, 'M', u'ⲹ'),
+ (0x2CB9, 'V'),
+ (0x2CBA, 'M', u'ⲻ'),
+ (0x2CBB, 'V'),
+ (0x2CBC, 'M', u'ⲽ'),
+ (0x2CBD, 'V'),
+ (0x2CBE, 'M', u'ⲿ'),
+ ]
+
+def _seg_26():
+ return [
+ (0x2CBF, 'V'),
+ (0x2CC0, 'M', u'ⳁ'),
+ (0x2CC1, 'V'),
+ (0x2CC2, 'M', u'ⳃ'),
+ (0x2CC3, 'V'),
+ (0x2CC4, 'M', u'ⳅ'),
+ (0x2CC5, 'V'),
+ (0x2CC6, 'M', u'ⳇ'),
+ (0x2CC7, 'V'),
+ (0x2CC8, 'M', u'ⳉ'),
+ (0x2CC9, 'V'),
+ (0x2CCA, 'M', u'ⳋ'),
+ (0x2CCB, 'V'),
+ (0x2CCC, 'M', u'ⳍ'),
+ (0x2CCD, 'V'),
+ (0x2CCE, 'M', u'ⳏ'),
+ (0x2CCF, 'V'),
+ (0x2CD0, 'M', u'ⳑ'),
+ (0x2CD1, 'V'),
+ (0x2CD2, 'M', u'ⳓ'),
+ (0x2CD3, 'V'),
+ (0x2CD4, 'M', u'ⳕ'),
+ (0x2CD5, 'V'),
+ (0x2CD6, 'M', u'ⳗ'),
+ (0x2CD7, 'V'),
+ (0x2CD8, 'M', u'ⳙ'),
+ (0x2CD9, 'V'),
+ (0x2CDA, 'M', u'ⳛ'),
+ (0x2CDB, 'V'),
+ (0x2CDC, 'M', u'ⳝ'),
+ (0x2CDD, 'V'),
+ (0x2CDE, 'M', u'ⳟ'),
+ (0x2CDF, 'V'),
+ (0x2CE0, 'M', u'ⳡ'),
+ (0x2CE1, 'V'),
+ (0x2CE2, 'M', u'ⳣ'),
+ (0x2CE3, 'V'),
+ (0x2CEB, 'M', u'ⳬ'),
+ (0x2CEC, 'V'),
+ (0x2CED, 'M', u'ⳮ'),
+ (0x2CEE, 'V'),
+ (0x2CF2, 'M', u'ⳳ'),
+ (0x2CF3, 'V'),
+ (0x2CF4, 'X'),
+ (0x2CF9, 'V'),
+ (0x2D26, 'X'),
+ (0x2D27, 'V'),
+ (0x2D28, 'X'),
+ (0x2D2D, 'V'),
+ (0x2D2E, 'X'),
+ (0x2D30, 'V'),
+ (0x2D68, 'X'),
+ (0x2D6F, 'M', u'ⵡ'),
+ (0x2D70, 'V'),
+ (0x2D71, 'X'),
+ (0x2D7F, 'V'),
+ (0x2D97, 'X'),
+ (0x2DA0, 'V'),
+ (0x2DA7, 'X'),
+ (0x2DA8, 'V'),
+ (0x2DAF, 'X'),
+ (0x2DB0, 'V'),
+ (0x2DB7, 'X'),
+ (0x2DB8, 'V'),
+ (0x2DBF, 'X'),
+ (0x2DC0, 'V'),
+ (0x2DC7, 'X'),
+ (0x2DC8, 'V'),
+ (0x2DCF, 'X'),
+ (0x2DD0, 'V'),
+ (0x2DD7, 'X'),
+ (0x2DD8, 'V'),
+ (0x2DDF, 'X'),
+ (0x2DE0, 'V'),
+ (0x2E53, 'X'),
+ (0x2E80, 'V'),
+ (0x2E9A, 'X'),
+ (0x2E9B, 'V'),
+ (0x2E9F, 'M', u'母'),
+ (0x2EA0, 'V'),
+ (0x2EF3, 'M', u'龟'),
+ (0x2EF4, 'X'),
+ (0x2F00, 'M', u'一'),
+ (0x2F01, 'M', u'丨'),
+ (0x2F02, 'M', u'丶'),
+ (0x2F03, 'M', u'丿'),
+ (0x2F04, 'M', u'乙'),
+ (0x2F05, 'M', u'亅'),
+ (0x2F06, 'M', u'二'),
+ (0x2F07, 'M', u'亠'),
+ (0x2F08, 'M', u'人'),
+ (0x2F09, 'M', u'儿'),
+ (0x2F0A, 'M', u'入'),
+ (0x2F0B, 'M', u'八'),
+ (0x2F0C, 'M', u'冂'),
+ (0x2F0D, 'M', u'冖'),
+ (0x2F0E, 'M', u'冫'),
+ (0x2F0F, 'M', u'几'),
+ (0x2F10, 'M', u'凵'),
+ (0x2F11, 'M', u'刀'),
+ ]
+
+def _seg_27():
+ return [
+ (0x2F12, 'M', u'力'),
+ (0x2F13, 'M', u'勹'),
+ (0x2F14, 'M', u'匕'),
+ (0x2F15, 'M', u'匚'),
+ (0x2F16, 'M', u'匸'),
+ (0x2F17, 'M', u'十'),
+ (0x2F18, 'M', u'卜'),
+ (0x2F19, 'M', u'卩'),
+ (0x2F1A, 'M', u'厂'),
+ (0x2F1B, 'M', u'厶'),
+ (0x2F1C, 'M', u'又'),
+ (0x2F1D, 'M', u'口'),
+ (0x2F1E, 'M', u'囗'),
+ (0x2F1F, 'M', u'土'),
+ (0x2F20, 'M', u'士'),
+ (0x2F21, 'M', u'夂'),
+ (0x2F22, 'M', u'夊'),
+ (0x2F23, 'M', u'夕'),
+ (0x2F24, 'M', u'大'),
+ (0x2F25, 'M', u'女'),
+ (0x2F26, 'M', u'子'),
+ (0x2F27, 'M', u'宀'),
+ (0x2F28, 'M', u'寸'),
+ (0x2F29, 'M', u'小'),
+ (0x2F2A, 'M', u'尢'),
+ (0x2F2B, 'M', u'尸'),
+ (0x2F2C, 'M', u'屮'),
+ (0x2F2D, 'M', u'山'),
+ (0x2F2E, 'M', u'巛'),
+ (0x2F2F, 'M', u'工'),
+ (0x2F30, 'M', u'己'),
+ (0x2F31, 'M', u'巾'),
+ (0x2F32, 'M', u'干'),
+ (0x2F33, 'M', u'幺'),
+ (0x2F34, 'M', u'广'),
+ (0x2F35, 'M', u'廴'),
+ (0x2F36, 'M', u'廾'),
+ (0x2F37, 'M', u'弋'),
+ (0x2F38, 'M', u'弓'),
+ (0x2F39, 'M', u'彐'),
+ (0x2F3A, 'M', u'彡'),
+ (0x2F3B, 'M', u'彳'),
+ (0x2F3C, 'M', u'心'),
+ (0x2F3D, 'M', u'戈'),
+ (0x2F3E, 'M', u'戶'),
+ (0x2F3F, 'M', u'手'),
+ (0x2F40, 'M', u'支'),
+ (0x2F41, 'M', u'攴'),
+ (0x2F42, 'M', u'文'),
+ (0x2F43, 'M', u'斗'),
+ (0x2F44, 'M', u'斤'),
+ (0x2F45, 'M', u'方'),
+ (0x2F46, 'M', u'无'),
+ (0x2F47, 'M', u'日'),
+ (0x2F48, 'M', u'曰'),
+ (0x2F49, 'M', u'月'),
+ (0x2F4A, 'M', u'木'),
+ (0x2F4B, 'M', u'欠'),
+ (0x2F4C, 'M', u'止'),
+ (0x2F4D, 'M', u'歹'),
+ (0x2F4E, 'M', u'殳'),
+ (0x2F4F, 'M', u'毋'),
+ (0x2F50, 'M', u'比'),
+ (0x2F51, 'M', u'毛'),
+ (0x2F52, 'M', u'氏'),
+ (0x2F53, 'M', u'气'),
+ (0x2F54, 'M', u'水'),
+ (0x2F55, 'M', u'火'),
+ (0x2F56, 'M', u'爪'),
+ (0x2F57, 'M', u'父'),
+ (0x2F58, 'M', u'爻'),
+ (0x2F59, 'M', u'爿'),
+ (0x2F5A, 'M', u'片'),
+ (0x2F5B, 'M', u'牙'),
+ (0x2F5C, 'M', u'牛'),
+ (0x2F5D, 'M', u'犬'),
+ (0x2F5E, 'M', u'玄'),
+ (0x2F5F, 'M', u'玉'),
+ (0x2F60, 'M', u'瓜'),
+ (0x2F61, 'M', u'瓦'),
+ (0x2F62, 'M', u'甘'),
+ (0x2F63, 'M', u'生'),
+ (0x2F64, 'M', u'用'),
+ (0x2F65, 'M', u'田'),
+ (0x2F66, 'M', u'疋'),
+ (0x2F67, 'M', u'疒'),
+ (0x2F68, 'M', u'癶'),
+ (0x2F69, 'M', u'白'),
+ (0x2F6A, 'M', u'皮'),
+ (0x2F6B, 'M', u'皿'),
+ (0x2F6C, 'M', u'目'),
+ (0x2F6D, 'M', u'矛'),
+ (0x2F6E, 'M', u'矢'),
+ (0x2F6F, 'M', u'石'),
+ (0x2F70, 'M', u'示'),
+ (0x2F71, 'M', u'禸'),
+ (0x2F72, 'M', u'禾'),
+ (0x2F73, 'M', u'穴'),
+ (0x2F74, 'M', u'立'),
+ (0x2F75, 'M', u'竹'),
+ ]
+
+def _seg_28():
+ return [
+ (0x2F76, 'M', u'米'),
+ (0x2F77, 'M', u'糸'),
+ (0x2F78, 'M', u'缶'),
+ (0x2F79, 'M', u'网'),
+ (0x2F7A, 'M', u'羊'),
+ (0x2F7B, 'M', u'羽'),
+ (0x2F7C, 'M', u'老'),
+ (0x2F7D, 'M', u'而'),
+ (0x2F7E, 'M', u'耒'),
+ (0x2F7F, 'M', u'耳'),
+ (0x2F80, 'M', u'聿'),
+ (0x2F81, 'M', u'肉'),
+ (0x2F82, 'M', u'臣'),
+ (0x2F83, 'M', u'自'),
+ (0x2F84, 'M', u'至'),
+ (0x2F85, 'M', u'臼'),
+ (0x2F86, 'M', u'舌'),
+ (0x2F87, 'M', u'舛'),
+ (0x2F88, 'M', u'舟'),
+ (0x2F89, 'M', u'艮'),
+ (0x2F8A, 'M', u'色'),
+ (0x2F8B, 'M', u'艸'),
+ (0x2F8C, 'M', u'虍'),
+ (0x2F8D, 'M', u'虫'),
+ (0x2F8E, 'M', u'血'),
+ (0x2F8F, 'M', u'行'),
+ (0x2F90, 'M', u'衣'),
+ (0x2F91, 'M', u'襾'),
+ (0x2F92, 'M', u'見'),
+ (0x2F93, 'M', u'角'),
+ (0x2F94, 'M', u'言'),
+ (0x2F95, 'M', u'谷'),
+ (0x2F96, 'M', u'豆'),
+ (0x2F97, 'M', u'豕'),
+ (0x2F98, 'M', u'豸'),
+ (0x2F99, 'M', u'貝'),
+ (0x2F9A, 'M', u'赤'),
+ (0x2F9B, 'M', u'走'),
+ (0x2F9C, 'M', u'足'),
+ (0x2F9D, 'M', u'身'),
+ (0x2F9E, 'M', u'車'),
+ (0x2F9F, 'M', u'辛'),
+ (0x2FA0, 'M', u'辰'),
+ (0x2FA1, 'M', u'辵'),
+ (0x2FA2, 'M', u'邑'),
+ (0x2FA3, 'M', u'酉'),
+ (0x2FA4, 'M', u'釆'),
+ (0x2FA5, 'M', u'里'),
+ (0x2FA6, 'M', u'金'),
+ (0x2FA7, 'M', u'長'),
+ (0x2FA8, 'M', u'門'),
+ (0x2FA9, 'M', u'阜'),
+ (0x2FAA, 'M', u'隶'),
+ (0x2FAB, 'M', u'隹'),
+ (0x2FAC, 'M', u'雨'),
+ (0x2FAD, 'M', u'靑'),
+ (0x2FAE, 'M', u'非'),
+ (0x2FAF, 'M', u'面'),
+ (0x2FB0, 'M', u'革'),
+ (0x2FB1, 'M', u'韋'),
+ (0x2FB2, 'M', u'韭'),
+ (0x2FB3, 'M', u'音'),
+ (0x2FB4, 'M', u'頁'),
+ (0x2FB5, 'M', u'風'),
+ (0x2FB6, 'M', u'飛'),
+ (0x2FB7, 'M', u'食'),
+ (0x2FB8, 'M', u'首'),
+ (0x2FB9, 'M', u'香'),
+ (0x2FBA, 'M', u'馬'),
+ (0x2FBB, 'M', u'骨'),
+ (0x2FBC, 'M', u'高'),
+ (0x2FBD, 'M', u'髟'),
+ (0x2FBE, 'M', u'鬥'),
+ (0x2FBF, 'M', u'鬯'),
+ (0x2FC0, 'M', u'鬲'),
+ (0x2FC1, 'M', u'鬼'),
+ (0x2FC2, 'M', u'魚'),
+ (0x2FC3, 'M', u'鳥'),
+ (0x2FC4, 'M', u'鹵'),
+ (0x2FC5, 'M', u'鹿'),
+ (0x2FC6, 'M', u'麥'),
+ (0x2FC7, 'M', u'麻'),
+ (0x2FC8, 'M', u'黃'),
+ (0x2FC9, 'M', u'黍'),
+ (0x2FCA, 'M', u'黑'),
+ (0x2FCB, 'M', u'黹'),
+ (0x2FCC, 'M', u'黽'),
+ (0x2FCD, 'M', u'鼎'),
+ (0x2FCE, 'M', u'鼓'),
+ (0x2FCF, 'M', u'鼠'),
+ (0x2FD0, 'M', u'鼻'),
+ (0x2FD1, 'M', u'齊'),
+ (0x2FD2, 'M', u'齒'),
+ (0x2FD3, 'M', u'龍'),
+ (0x2FD4, 'M', u'龜'),
+ (0x2FD5, 'M', u'龠'),
+ (0x2FD6, 'X'),
+ (0x3000, '3', u' '),
+ (0x3001, 'V'),
+ (0x3002, 'M', u'.'),
+ ]
+
+def _seg_29():
+ return [
+ (0x3003, 'V'),
+ (0x3036, 'M', u'〒'),
+ (0x3037, 'V'),
+ (0x3038, 'M', u'十'),
+ (0x3039, 'M', u'卄'),
+ (0x303A, 'M', u'卅'),
+ (0x303B, 'V'),
+ (0x3040, 'X'),
+ (0x3041, 'V'),
+ (0x3097, 'X'),
+ (0x3099, 'V'),
+ (0x309B, '3', u' ゙'),
+ (0x309C, '3', u' ゚'),
+ (0x309D, 'V'),
+ (0x309F, 'M', u'より'),
+ (0x30A0, 'V'),
+ (0x30FF, 'M', u'コト'),
+ (0x3100, 'X'),
+ (0x3105, 'V'),
+ (0x3130, 'X'),
+ (0x3131, 'M', u'ᄀ'),
+ (0x3132, 'M', u'ᄁ'),
+ (0x3133, 'M', u'ᆪ'),
+ (0x3134, 'M', u'ᄂ'),
+ (0x3135, 'M', u'ᆬ'),
+ (0x3136, 'M', u'ᆭ'),
+ (0x3137, 'M', u'ᄃ'),
+ (0x3138, 'M', u'ᄄ'),
+ (0x3139, 'M', u'ᄅ'),
+ (0x313A, 'M', u'ᆰ'),
+ (0x313B, 'M', u'ᆱ'),
+ (0x313C, 'M', u'ᆲ'),
+ (0x313D, 'M', u'ᆳ'),
+ (0x313E, 'M', u'ᆴ'),
+ (0x313F, 'M', u'ᆵ'),
+ (0x3140, 'M', u'ᄚ'),
+ (0x3141, 'M', u'ᄆ'),
+ (0x3142, 'M', u'ᄇ'),
+ (0x3143, 'M', u'ᄈ'),
+ (0x3144, 'M', u'ᄡ'),
+ (0x3145, 'M', u'ᄉ'),
+ (0x3146, 'M', u'ᄊ'),
+ (0x3147, 'M', u'ᄋ'),
+ (0x3148, 'M', u'ᄌ'),
+ (0x3149, 'M', u'ᄍ'),
+ (0x314A, 'M', u'ᄎ'),
+ (0x314B, 'M', u'ᄏ'),
+ (0x314C, 'M', u'ᄐ'),
+ (0x314D, 'M', u'ᄑ'),
+ (0x314E, 'M', u'ᄒ'),
+ (0x314F, 'M', u'ᅡ'),
+ (0x3150, 'M', u'ᅢ'),
+ (0x3151, 'M', u'ᅣ'),
+ (0x3152, 'M', u'ᅤ'),
+ (0x3153, 'M', u'ᅥ'),
+ (0x3154, 'M', u'ᅦ'),
+ (0x3155, 'M', u'ᅧ'),
+ (0x3156, 'M', u'ᅨ'),
+ (0x3157, 'M', u'ᅩ'),
+ (0x3158, 'M', u'ᅪ'),
+ (0x3159, 'M', u'ᅫ'),
+ (0x315A, 'M', u'ᅬ'),
+ (0x315B, 'M', u'ᅭ'),
+ (0x315C, 'M', u'ᅮ'),
+ (0x315D, 'M', u'ᅯ'),
+ (0x315E, 'M', u'ᅰ'),
+ (0x315F, 'M', u'ᅱ'),
+ (0x3160, 'M', u'ᅲ'),
+ (0x3161, 'M', u'ᅳ'),
+ (0x3162, 'M', u'ᅴ'),
+ (0x3163, 'M', u'ᅵ'),
+ (0x3164, 'X'),
+ (0x3165, 'M', u'ᄔ'),
+ (0x3166, 'M', u'ᄕ'),
+ (0x3167, 'M', u'ᇇ'),
+ (0x3168, 'M', u'ᇈ'),
+ (0x3169, 'M', u'ᇌ'),
+ (0x316A, 'M', u'ᇎ'),
+ (0x316B, 'M', u'ᇓ'),
+ (0x316C, 'M', u'ᇗ'),
+ (0x316D, 'M', u'ᇙ'),
+ (0x316E, 'M', u'ᄜ'),
+ (0x316F, 'M', u'ᇝ'),
+ (0x3170, 'M', u'ᇟ'),
+ (0x3171, 'M', u'ᄝ'),
+ (0x3172, 'M', u'ᄞ'),
+ (0x3173, 'M', u'ᄠ'),
+ (0x3174, 'M', u'ᄢ'),
+ (0x3175, 'M', u'ᄣ'),
+ (0x3176, 'M', u'ᄧ'),
+ (0x3177, 'M', u'ᄩ'),
+ (0x3178, 'M', u'ᄫ'),
+ (0x3179, 'M', u'ᄬ'),
+ (0x317A, 'M', u'ᄭ'),
+ (0x317B, 'M', u'ᄮ'),
+ (0x317C, 'M', u'ᄯ'),
+ (0x317D, 'M', u'ᄲ'),
+ (0x317E, 'M', u'ᄶ'),
+ (0x317F, 'M', u'ᅀ'),
+ (0x3180, 'M', u'ᅇ'),
+ ]
+
+def _seg_30():
+ return [
+ (0x3181, 'M', u'ᅌ'),
+ (0x3182, 'M', u'ᇱ'),
+ (0x3183, 'M', u'ᇲ'),
+ (0x3184, 'M', u'ᅗ'),
+ (0x3185, 'M', u'ᅘ'),
+ (0x3186, 'M', u'ᅙ'),
+ (0x3187, 'M', u'ᆄ'),
+ (0x3188, 'M', u'ᆅ'),
+ (0x3189, 'M', u'ᆈ'),
+ (0x318A, 'M', u'ᆑ'),
+ (0x318B, 'M', u'ᆒ'),
+ (0x318C, 'M', u'ᆔ'),
+ (0x318D, 'M', u'ᆞ'),
+ (0x318E, 'M', u'ᆡ'),
+ (0x318F, 'X'),
+ (0x3190, 'V'),
+ (0x3192, 'M', u'一'),
+ (0x3193, 'M', u'二'),
+ (0x3194, 'M', u'三'),
+ (0x3195, 'M', u'四'),
+ (0x3196, 'M', u'上'),
+ (0x3197, 'M', u'中'),
+ (0x3198, 'M', u'下'),
+ (0x3199, 'M', u'甲'),
+ (0x319A, 'M', u'乙'),
+ (0x319B, 'M', u'丙'),
+ (0x319C, 'M', u'丁'),
+ (0x319D, 'M', u'天'),
+ (0x319E, 'M', u'地'),
+ (0x319F, 'M', u'人'),
+ (0x31A0, 'V'),
+ (0x31E4, 'X'),
+ (0x31F0, 'V'),
+ (0x3200, '3', u'(ᄀ)'),
+ (0x3201, '3', u'(ᄂ)'),
+ (0x3202, '3', u'(ᄃ)'),
+ (0x3203, '3', u'(ᄅ)'),
+ (0x3204, '3', u'(ᄆ)'),
+ (0x3205, '3', u'(ᄇ)'),
+ (0x3206, '3', u'(ᄉ)'),
+ (0x3207, '3', u'(ᄋ)'),
+ (0x3208, '3', u'(ᄌ)'),
+ (0x3209, '3', u'(ᄎ)'),
+ (0x320A, '3', u'(ᄏ)'),
+ (0x320B, '3', u'(ᄐ)'),
+ (0x320C, '3', u'(ᄑ)'),
+ (0x320D, '3', u'(ᄒ)'),
+ (0x320E, '3', u'(가)'),
+ (0x320F, '3', u'(나)'),
+ (0x3210, '3', u'(다)'),
+ (0x3211, '3', u'(라)'),
+ (0x3212, '3', u'(마)'),
+ (0x3213, '3', u'(바)'),
+ (0x3214, '3', u'(사)'),
+ (0x3215, '3', u'(아)'),
+ (0x3216, '3', u'(자)'),
+ (0x3217, '3', u'(차)'),
+ (0x3218, '3', u'(카)'),
+ (0x3219, '3', u'(타)'),
+ (0x321A, '3', u'(파)'),
+ (0x321B, '3', u'(하)'),
+ (0x321C, '3', u'(주)'),
+ (0x321D, '3', u'(오전)'),
+ (0x321E, '3', u'(오후)'),
+ (0x321F, 'X'),
+ (0x3220, '3', u'(一)'),
+ (0x3221, '3', u'(二)'),
+ (0x3222, '3', u'(三)'),
+ (0x3223, '3', u'(四)'),
+ (0x3224, '3', u'(五)'),
+ (0x3225, '3', u'(六)'),
+ (0x3226, '3', u'(七)'),
+ (0x3227, '3', u'(八)'),
+ (0x3228, '3', u'(九)'),
+ (0x3229, '3', u'(十)'),
+ (0x322A, '3', u'(月)'),
+ (0x322B, '3', u'(火)'),
+ (0x322C, '3', u'(水)'),
+ (0x322D, '3', u'(木)'),
+ (0x322E, '3', u'(金)'),
+ (0x322F, '3', u'(土)'),
+ (0x3230, '3', u'(日)'),
+ (0x3231, '3', u'(株)'),
+ (0x3232, '3', u'(有)'),
+ (0x3233, '3', u'(社)'),
+ (0x3234, '3', u'(名)'),
+ (0x3235, '3', u'(特)'),
+ (0x3236, '3', u'(財)'),
+ (0x3237, '3', u'(祝)'),
+ (0x3238, '3', u'(労)'),
+ (0x3239, '3', u'(代)'),
+ (0x323A, '3', u'(呼)'),
+ (0x323B, '3', u'(学)'),
+ (0x323C, '3', u'(監)'),
+ (0x323D, '3', u'(企)'),
+ (0x323E, '3', u'(資)'),
+ (0x323F, '3', u'(協)'),
+ (0x3240, '3', u'(祭)'),
+ (0x3241, '3', u'(休)'),
+ (0x3242, '3', u'(自)'),
+ ]
+
+def _seg_31():
+ return [
+ (0x3243, '3', u'(至)'),
+ (0x3244, 'M', u'問'),
+ (0x3245, 'M', u'幼'),
+ (0x3246, 'M', u'文'),
+ (0x3247, 'M', u'箏'),
+ (0x3248, 'V'),
+ (0x3250, 'M', u'pte'),
+ (0x3251, 'M', u'21'),
+ (0x3252, 'M', u'22'),
+ (0x3253, 'M', u'23'),
+ (0x3254, 'M', u'24'),
+ (0x3255, 'M', u'25'),
+ (0x3256, 'M', u'26'),
+ (0x3257, 'M', u'27'),
+ (0x3258, 'M', u'28'),
+ (0x3259, 'M', u'29'),
+ (0x325A, 'M', u'30'),
+ (0x325B, 'M', u'31'),
+ (0x325C, 'M', u'32'),
+ (0x325D, 'M', u'33'),
+ (0x325E, 'M', u'34'),
+ (0x325F, 'M', u'35'),
+ (0x3260, 'M', u'ᄀ'),
+ (0x3261, 'M', u'ᄂ'),
+ (0x3262, 'M', u'ᄃ'),
+ (0x3263, 'M', u'ᄅ'),
+ (0x3264, 'M', u'ᄆ'),
+ (0x3265, 'M', u'ᄇ'),
+ (0x3266, 'M', u'ᄉ'),
+ (0x3267, 'M', u'ᄋ'),
+ (0x3268, 'M', u'ᄌ'),
+ (0x3269, 'M', u'ᄎ'),
+ (0x326A, 'M', u'ᄏ'),
+ (0x326B, 'M', u'ᄐ'),
+ (0x326C, 'M', u'ᄑ'),
+ (0x326D, 'M', u'ᄒ'),
+ (0x326E, 'M', u'가'),
+ (0x326F, 'M', u'나'),
+ (0x3270, 'M', u'다'),
+ (0x3271, 'M', u'라'),
+ (0x3272, 'M', u'마'),
+ (0x3273, 'M', u'바'),
+ (0x3274, 'M', u'사'),
+ (0x3275, 'M', u'아'),
+ (0x3276, 'M', u'자'),
+ (0x3277, 'M', u'차'),
+ (0x3278, 'M', u'카'),
+ (0x3279, 'M', u'타'),
+ (0x327A, 'M', u'파'),
+ (0x327B, 'M', u'하'),
+ (0x327C, 'M', u'참고'),
+ (0x327D, 'M', u'주의'),
+ (0x327E, 'M', u'우'),
+ (0x327F, 'V'),
+ (0x3280, 'M', u'一'),
+ (0x3281, 'M', u'二'),
+ (0x3282, 'M', u'三'),
+ (0x3283, 'M', u'四'),
+ (0x3284, 'M', u'五'),
+ (0x3285, 'M', u'六'),
+ (0x3286, 'M', u'七'),
+ (0x3287, 'M', u'八'),
+ (0x3288, 'M', u'九'),
+ (0x3289, 'M', u'十'),
+ (0x328A, 'M', u'月'),
+ (0x328B, 'M', u'火'),
+ (0x328C, 'M', u'水'),
+ (0x328D, 'M', u'木'),
+ (0x328E, 'M', u'金'),
+ (0x328F, 'M', u'土'),
+ (0x3290, 'M', u'日'),
+ (0x3291, 'M', u'株'),
+ (0x3292, 'M', u'有'),
+ (0x3293, 'M', u'社'),
+ (0x3294, 'M', u'名'),
+ (0x3295, 'M', u'特'),
+ (0x3296, 'M', u'財'),
+ (0x3297, 'M', u'祝'),
+ (0x3298, 'M', u'労'),
+ (0x3299, 'M', u'秘'),
+ (0x329A, 'M', u'男'),
+ (0x329B, 'M', u'女'),
+ (0x329C, 'M', u'適'),
+ (0x329D, 'M', u'優'),
+ (0x329E, 'M', u'印'),
+ (0x329F, 'M', u'注'),
+ (0x32A0, 'M', u'項'),
+ (0x32A1, 'M', u'休'),
+ (0x32A2, 'M', u'写'),
+ (0x32A3, 'M', u'正'),
+ (0x32A4, 'M', u'上'),
+ (0x32A5, 'M', u'中'),
+ (0x32A6, 'M', u'下'),
+ (0x32A7, 'M', u'左'),
+ (0x32A8, 'M', u'右'),
+ (0x32A9, 'M', u'医'),
+ (0x32AA, 'M', u'宗'),
+ (0x32AB, 'M', u'学'),
+ (0x32AC, 'M', u'監'),
+ (0x32AD, 'M', u'企'),
+ ]
+
+def _seg_32():
+ return [
+ (0x32AE, 'M', u'資'),
+ (0x32AF, 'M', u'協'),
+ (0x32B0, 'M', u'夜'),
+ (0x32B1, 'M', u'36'),
+ (0x32B2, 'M', u'37'),
+ (0x32B3, 'M', u'38'),
+ (0x32B4, 'M', u'39'),
+ (0x32B5, 'M', u'40'),
+ (0x32B6, 'M', u'41'),
+ (0x32B7, 'M', u'42'),
+ (0x32B8, 'M', u'43'),
+ (0x32B9, 'M', u'44'),
+ (0x32BA, 'M', u'45'),
+ (0x32BB, 'M', u'46'),
+ (0x32BC, 'M', u'47'),
+ (0x32BD, 'M', u'48'),
+ (0x32BE, 'M', u'49'),
+ (0x32BF, 'M', u'50'),
+ (0x32C0, 'M', u'1月'),
+ (0x32C1, 'M', u'2月'),
+ (0x32C2, 'M', u'3月'),
+ (0x32C3, 'M', u'4月'),
+ (0x32C4, 'M', u'5月'),
+ (0x32C5, 'M', u'6月'),
+ (0x32C6, 'M', u'7月'),
+ (0x32C7, 'M', u'8月'),
+ (0x32C8, 'M', u'9月'),
+ (0x32C9, 'M', u'10月'),
+ (0x32CA, 'M', u'11月'),
+ (0x32CB, 'M', u'12月'),
+ (0x32CC, 'M', u'hg'),
+ (0x32CD, 'M', u'erg'),
+ (0x32CE, 'M', u'ev'),
+ (0x32CF, 'M', u'ltd'),
+ (0x32D0, 'M', u'ア'),
+ (0x32D1, 'M', u'イ'),
+ (0x32D2, 'M', u'ウ'),
+ (0x32D3, 'M', u'エ'),
+ (0x32D4, 'M', u'オ'),
+ (0x32D5, 'M', u'カ'),
+ (0x32D6, 'M', u'キ'),
+ (0x32D7, 'M', u'ク'),
+ (0x32D8, 'M', u'ケ'),
+ (0x32D9, 'M', u'コ'),
+ (0x32DA, 'M', u'サ'),
+ (0x32DB, 'M', u'シ'),
+ (0x32DC, 'M', u'ス'),
+ (0x32DD, 'M', u'セ'),
+ (0x32DE, 'M', u'ソ'),
+ (0x32DF, 'M', u'タ'),
+ (0x32E0, 'M', u'チ'),
+ (0x32E1, 'M', u'ツ'),
+ (0x32E2, 'M', u'テ'),
+ (0x32E3, 'M', u'ト'),
+ (0x32E4, 'M', u'ナ'),
+ (0x32E5, 'M', u'ニ'),
+ (0x32E6, 'M', u'ヌ'),
+ (0x32E7, 'M', u'ネ'),
+ (0x32E8, 'M', u'ノ'),
+ (0x32E9, 'M', u'ハ'),
+ (0x32EA, 'M', u'ヒ'),
+ (0x32EB, 'M', u'フ'),
+ (0x32EC, 'M', u'ヘ'),
+ (0x32ED, 'M', u'ホ'),
+ (0x32EE, 'M', u'マ'),
+ (0x32EF, 'M', u'ミ'),
+ (0x32F0, 'M', u'ム'),
+ (0x32F1, 'M', u'メ'),
+ (0x32F2, 'M', u'モ'),
+ (0x32F3, 'M', u'ヤ'),
+ (0x32F4, 'M', u'ユ'),
+ (0x32F5, 'M', u'ヨ'),
+ (0x32F6, 'M', u'ラ'),
+ (0x32F7, 'M', u'リ'),
+ (0x32F8, 'M', u'ル'),
+ (0x32F9, 'M', u'レ'),
+ (0x32FA, 'M', u'ロ'),
+ (0x32FB, 'M', u'ワ'),
+ (0x32FC, 'M', u'ヰ'),
+ (0x32FD, 'M', u'ヱ'),
+ (0x32FE, 'M', u'ヲ'),
+ (0x32FF, 'M', u'令和'),
+ (0x3300, 'M', u'アパート'),
+ (0x3301, 'M', u'アルファ'),
+ (0x3302, 'M', u'アンペア'),
+ (0x3303, 'M', u'アール'),
+ (0x3304, 'M', u'イニング'),
+ (0x3305, 'M', u'インチ'),
+ (0x3306, 'M', u'ウォン'),
+ (0x3307, 'M', u'エスクード'),
+ (0x3308, 'M', u'エーカー'),
+ (0x3309, 'M', u'オンス'),
+ (0x330A, 'M', u'オーム'),
+ (0x330B, 'M', u'カイリ'),
+ (0x330C, 'M', u'カラット'),
+ (0x330D, 'M', u'カロリー'),
+ (0x330E, 'M', u'ガロン'),
+ (0x330F, 'M', u'ガンマ'),
+ (0x3310, 'M', u'ギガ'),
+ (0x3311, 'M', u'ギニー'),
+ ]
+
+def _seg_33():
+ return [
+ (0x3312, 'M', u'キュリー'),
+ (0x3313, 'M', u'ギルダー'),
+ (0x3314, 'M', u'キロ'),
+ (0x3315, 'M', u'キログラム'),
+ (0x3316, 'M', u'キロメートル'),
+ (0x3317, 'M', u'キロワット'),
+ (0x3318, 'M', u'グラム'),
+ (0x3319, 'M', u'グラムトン'),
+ (0x331A, 'M', u'クルゼイロ'),
+ (0x331B, 'M', u'クローネ'),
+ (0x331C, 'M', u'ケース'),
+ (0x331D, 'M', u'コルナ'),
+ (0x331E, 'M', u'コーポ'),
+ (0x331F, 'M', u'サイクル'),
+ (0x3320, 'M', u'サンチーム'),
+ (0x3321, 'M', u'シリング'),
+ (0x3322, 'M', u'センチ'),
+ (0x3323, 'M', u'セント'),
+ (0x3324, 'M', u'ダース'),
+ (0x3325, 'M', u'デシ'),
+ (0x3326, 'M', u'ドル'),
+ (0x3327, 'M', u'トン'),
+ (0x3328, 'M', u'ナノ'),
+ (0x3329, 'M', u'ノット'),
+ (0x332A, 'M', u'ハイツ'),
+ (0x332B, 'M', u'パーセント'),
+ (0x332C, 'M', u'パーツ'),
+ (0x332D, 'M', u'バーレル'),
+ (0x332E, 'M', u'ピアストル'),
+ (0x332F, 'M', u'ピクル'),
+ (0x3330, 'M', u'ピコ'),
+ (0x3331, 'M', u'ビル'),
+ (0x3332, 'M', u'ファラッド'),
+ (0x3333, 'M', u'フィート'),
+ (0x3334, 'M', u'ブッシェル'),
+ (0x3335, 'M', u'フラン'),
+ (0x3336, 'M', u'ヘクタール'),
+ (0x3337, 'M', u'ペソ'),
+ (0x3338, 'M', u'ペニヒ'),
+ (0x3339, 'M', u'ヘルツ'),
+ (0x333A, 'M', u'ペンス'),
+ (0x333B, 'M', u'ページ'),
+ (0x333C, 'M', u'ベータ'),
+ (0x333D, 'M', u'ポイント'),
+ (0x333E, 'M', u'ボルト'),
+ (0x333F, 'M', u'ホン'),
+ (0x3340, 'M', u'ポンド'),
+ (0x3341, 'M', u'ホール'),
+ (0x3342, 'M', u'ホーン'),
+ (0x3343, 'M', u'マイクロ'),
+ (0x3344, 'M', u'マイル'),
+ (0x3345, 'M', u'マッハ'),
+ (0x3346, 'M', u'マルク'),
+ (0x3347, 'M', u'マンション'),
+ (0x3348, 'M', u'ミクロン'),
+ (0x3349, 'M', u'ミリ'),
+ (0x334A, 'M', u'ミリバール'),
+ (0x334B, 'M', u'メガ'),
+ (0x334C, 'M', u'メガトン'),
+ (0x334D, 'M', u'メートル'),
+ (0x334E, 'M', u'ヤード'),
+ (0x334F, 'M', u'ヤール'),
+ (0x3350, 'M', u'ユアン'),
+ (0x3351, 'M', u'リットル'),
+ (0x3352, 'M', u'リラ'),
+ (0x3353, 'M', u'ルピー'),
+ (0x3354, 'M', u'ルーブル'),
+ (0x3355, 'M', u'レム'),
+ (0x3356, 'M', u'レントゲン'),
+ (0x3357, 'M', u'ワット'),
+ (0x3358, 'M', u'0点'),
+ (0x3359, 'M', u'1点'),
+ (0x335A, 'M', u'2点'),
+ (0x335B, 'M', u'3点'),
+ (0x335C, 'M', u'4点'),
+ (0x335D, 'M', u'5点'),
+ (0x335E, 'M', u'6点'),
+ (0x335F, 'M', u'7点'),
+ (0x3360, 'M', u'8点'),
+ (0x3361, 'M', u'9点'),
+ (0x3362, 'M', u'10点'),
+ (0x3363, 'M', u'11点'),
+ (0x3364, 'M', u'12点'),
+ (0x3365, 'M', u'13点'),
+ (0x3366, 'M', u'14点'),
+ (0x3367, 'M', u'15点'),
+ (0x3368, 'M', u'16点'),
+ (0x3369, 'M', u'17点'),
+ (0x336A, 'M', u'18点'),
+ (0x336B, 'M', u'19点'),
+ (0x336C, 'M', u'20点'),
+ (0x336D, 'M', u'21点'),
+ (0x336E, 'M', u'22点'),
+ (0x336F, 'M', u'23点'),
+ (0x3370, 'M', u'24点'),
+ (0x3371, 'M', u'hpa'),
+ (0x3372, 'M', u'da'),
+ (0x3373, 'M', u'au'),
+ (0x3374, 'M', u'bar'),
+ (0x3375, 'M', u'ov'),
+ ]
+
+def _seg_34():
+ return [
+ (0x3376, 'M', u'pc'),
+ (0x3377, 'M', u'dm'),
+ (0x3378, 'M', u'dm2'),
+ (0x3379, 'M', u'dm3'),
+ (0x337A, 'M', u'iu'),
+ (0x337B, 'M', u'平成'),
+ (0x337C, 'M', u'昭和'),
+ (0x337D, 'M', u'大正'),
+ (0x337E, 'M', u'明治'),
+ (0x337F, 'M', u'株式会社'),
+ (0x3380, 'M', u'pa'),
+ (0x3381, 'M', u'na'),
+ (0x3382, 'M', u'μa'),
+ (0x3383, 'M', u'ma'),
+ (0x3384, 'M', u'ka'),
+ (0x3385, 'M', u'kb'),
+ (0x3386, 'M', u'mb'),
+ (0x3387, 'M', u'gb'),
+ (0x3388, 'M', u'cal'),
+ (0x3389, 'M', u'kcal'),
+ (0x338A, 'M', u'pf'),
+ (0x338B, 'M', u'nf'),
+ (0x338C, 'M', u'μf'),
+ (0x338D, 'M', u'μg'),
+ (0x338E, 'M', u'mg'),
+ (0x338F, 'M', u'kg'),
+ (0x3390, 'M', u'hz'),
+ (0x3391, 'M', u'khz'),
+ (0x3392, 'M', u'mhz'),
+ (0x3393, 'M', u'ghz'),
+ (0x3394, 'M', u'thz'),
+ (0x3395, 'M', u'μl'),
+ (0x3396, 'M', u'ml'),
+ (0x3397, 'M', u'dl'),
+ (0x3398, 'M', u'kl'),
+ (0x3399, 'M', u'fm'),
+ (0x339A, 'M', u'nm'),
+ (0x339B, 'M', u'μm'),
+ (0x339C, 'M', u'mm'),
+ (0x339D, 'M', u'cm'),
+ (0x339E, 'M', u'km'),
+ (0x339F, 'M', u'mm2'),
+ (0x33A0, 'M', u'cm2'),
+ (0x33A1, 'M', u'm2'),
+ (0x33A2, 'M', u'km2'),
+ (0x33A3, 'M', u'mm3'),
+ (0x33A4, 'M', u'cm3'),
+ (0x33A5, 'M', u'm3'),
+ (0x33A6, 'M', u'km3'),
+ (0x33A7, 'M', u'm∕s'),
+ (0x33A8, 'M', u'm∕s2'),
+ (0x33A9, 'M', u'pa'),
+ (0x33AA, 'M', u'kpa'),
+ (0x33AB, 'M', u'mpa'),
+ (0x33AC, 'M', u'gpa'),
+ (0x33AD, 'M', u'rad'),
+ (0x33AE, 'M', u'rad∕s'),
+ (0x33AF, 'M', u'rad∕s2'),
+ (0x33B0, 'M', u'ps'),
+ (0x33B1, 'M', u'ns'),
+ (0x33B2, 'M', u'μs'),
+ (0x33B3, 'M', u'ms'),
+ (0x33B4, 'M', u'pv'),
+ (0x33B5, 'M', u'nv'),
+ (0x33B6, 'M', u'μv'),
+ (0x33B7, 'M', u'mv'),
+ (0x33B8, 'M', u'kv'),
+ (0x33B9, 'M', u'mv'),
+ (0x33BA, 'M', u'pw'),
+ (0x33BB, 'M', u'nw'),
+ (0x33BC, 'M', u'μw'),
+ (0x33BD, 'M', u'mw'),
+ (0x33BE, 'M', u'kw'),
+ (0x33BF, 'M', u'mw'),
+ (0x33C0, 'M', u'kω'),
+ (0x33C1, 'M', u'mω'),
+ (0x33C2, 'X'),
+ (0x33C3, 'M', u'bq'),
+ (0x33C4, 'M', u'cc'),
+ (0x33C5, 'M', u'cd'),
+ (0x33C6, 'M', u'c∕kg'),
+ (0x33C7, 'X'),
+ (0x33C8, 'M', u'db'),
+ (0x33C9, 'M', u'gy'),
+ (0x33CA, 'M', u'ha'),
+ (0x33CB, 'M', u'hp'),
+ (0x33CC, 'M', u'in'),
+ (0x33CD, 'M', u'kk'),
+ (0x33CE, 'M', u'km'),
+ (0x33CF, 'M', u'kt'),
+ (0x33D0, 'M', u'lm'),
+ (0x33D1, 'M', u'ln'),
+ (0x33D2, 'M', u'log'),
+ (0x33D3, 'M', u'lx'),
+ (0x33D4, 'M', u'mb'),
+ (0x33D5, 'M', u'mil'),
+ (0x33D6, 'M', u'mol'),
+ (0x33D7, 'M', u'ph'),
+ (0x33D8, 'X'),
+ (0x33D9, 'M', u'ppm'),
+ ]
+
+def _seg_35():
+ return [
+ (0x33DA, 'M', u'pr'),
+ (0x33DB, 'M', u'sr'),
+ (0x33DC, 'M', u'sv'),
+ (0x33DD, 'M', u'wb'),
+ (0x33DE, 'M', u'v∕m'),
+ (0x33DF, 'M', u'a∕m'),
+ (0x33E0, 'M', u'1日'),
+ (0x33E1, 'M', u'2日'),
+ (0x33E2, 'M', u'3日'),
+ (0x33E3, 'M', u'4日'),
+ (0x33E4, 'M', u'5日'),
+ (0x33E5, 'M', u'6日'),
+ (0x33E6, 'M', u'7日'),
+ (0x33E7, 'M', u'8日'),
+ (0x33E8, 'M', u'9日'),
+ (0x33E9, 'M', u'10日'),
+ (0x33EA, 'M', u'11日'),
+ (0x33EB, 'M', u'12日'),
+ (0x33EC, 'M', u'13日'),
+ (0x33ED, 'M', u'14日'),
+ (0x33EE, 'M', u'15日'),
+ (0x33EF, 'M', u'16日'),
+ (0x33F0, 'M', u'17日'),
+ (0x33F1, 'M', u'18日'),
+ (0x33F2, 'M', u'19日'),
+ (0x33F3, 'M', u'20日'),
+ (0x33F4, 'M', u'21日'),
+ (0x33F5, 'M', u'22日'),
+ (0x33F6, 'M', u'23日'),
+ (0x33F7, 'M', u'24日'),
+ (0x33F8, 'M', u'25日'),
+ (0x33F9, 'M', u'26日'),
+ (0x33FA, 'M', u'27日'),
+ (0x33FB, 'M', u'28日'),
+ (0x33FC, 'M', u'29日'),
+ (0x33FD, 'M', u'30日'),
+ (0x33FE, 'M', u'31日'),
+ (0x33FF, 'M', u'gal'),
+ (0x3400, 'V'),
+ (0x9FFD, 'X'),
+ (0xA000, 'V'),
+ (0xA48D, 'X'),
+ (0xA490, 'V'),
+ (0xA4C7, 'X'),
+ (0xA4D0, 'V'),
+ (0xA62C, 'X'),
+ (0xA640, 'M', u'ꙁ'),
+ (0xA641, 'V'),
+ (0xA642, 'M', u'ꙃ'),
+ (0xA643, 'V'),
+ (0xA644, 'M', u'ꙅ'),
+ (0xA645, 'V'),
+ (0xA646, 'M', u'ꙇ'),
+ (0xA647, 'V'),
+ (0xA648, 'M', u'ꙉ'),
+ (0xA649, 'V'),
+ (0xA64A, 'M', u'ꙋ'),
+ (0xA64B, 'V'),
+ (0xA64C, 'M', u'ꙍ'),
+ (0xA64D, 'V'),
+ (0xA64E, 'M', u'ꙏ'),
+ (0xA64F, 'V'),
+ (0xA650, 'M', u'ꙑ'),
+ (0xA651, 'V'),
+ (0xA652, 'M', u'ꙓ'),
+ (0xA653, 'V'),
+ (0xA654, 'M', u'ꙕ'),
+ (0xA655, 'V'),
+ (0xA656, 'M', u'ꙗ'),
+ (0xA657, 'V'),
+ (0xA658, 'M', u'ꙙ'),
+ (0xA659, 'V'),
+ (0xA65A, 'M', u'ꙛ'),
+ (0xA65B, 'V'),
+ (0xA65C, 'M', u'ꙝ'),
+ (0xA65D, 'V'),
+ (0xA65E, 'M', u'ꙟ'),
+ (0xA65F, 'V'),
+ (0xA660, 'M', u'ꙡ'),
+ (0xA661, 'V'),
+ (0xA662, 'M', u'ꙣ'),
+ (0xA663, 'V'),
+ (0xA664, 'M', u'ꙥ'),
+ (0xA665, 'V'),
+ (0xA666, 'M', u'ꙧ'),
+ (0xA667, 'V'),
+ (0xA668, 'M', u'ꙩ'),
+ (0xA669, 'V'),
+ (0xA66A, 'M', u'ꙫ'),
+ (0xA66B, 'V'),
+ (0xA66C, 'M', u'ꙭ'),
+ (0xA66D, 'V'),
+ (0xA680, 'M', u'ꚁ'),
+ (0xA681, 'V'),
+ (0xA682, 'M', u'ꚃ'),
+ (0xA683, 'V'),
+ (0xA684, 'M', u'ꚅ'),
+ (0xA685, 'V'),
+ (0xA686, 'M', u'ꚇ'),
+ (0xA687, 'V'),
+ ]
+
+def _seg_36():
+ return [
+ (0xA688, 'M', u'ꚉ'),
+ (0xA689, 'V'),
+ (0xA68A, 'M', u'ꚋ'),
+ (0xA68B, 'V'),
+ (0xA68C, 'M', u'ꚍ'),
+ (0xA68D, 'V'),
+ (0xA68E, 'M', u'ꚏ'),
+ (0xA68F, 'V'),
+ (0xA690, 'M', u'ꚑ'),
+ (0xA691, 'V'),
+ (0xA692, 'M', u'ꚓ'),
+ (0xA693, 'V'),
+ (0xA694, 'M', u'ꚕ'),
+ (0xA695, 'V'),
+ (0xA696, 'M', u'ꚗ'),
+ (0xA697, 'V'),
+ (0xA698, 'M', u'ꚙ'),
+ (0xA699, 'V'),
+ (0xA69A, 'M', u'ꚛ'),
+ (0xA69B, 'V'),
+ (0xA69C, 'M', u'ъ'),
+ (0xA69D, 'M', u'ь'),
+ (0xA69E, 'V'),
+ (0xA6F8, 'X'),
+ (0xA700, 'V'),
+ (0xA722, 'M', u'ꜣ'),
+ (0xA723, 'V'),
+ (0xA724, 'M', u'ꜥ'),
+ (0xA725, 'V'),
+ (0xA726, 'M', u'ꜧ'),
+ (0xA727, 'V'),
+ (0xA728, 'M', u'ꜩ'),
+ (0xA729, 'V'),
+ (0xA72A, 'M', u'ꜫ'),
+ (0xA72B, 'V'),
+ (0xA72C, 'M', u'ꜭ'),
+ (0xA72D, 'V'),
+ (0xA72E, 'M', u'ꜯ'),
+ (0xA72F, 'V'),
+ (0xA732, 'M', u'ꜳ'),
+ (0xA733, 'V'),
+ (0xA734, 'M', u'ꜵ'),
+ (0xA735, 'V'),
+ (0xA736, 'M', u'ꜷ'),
+ (0xA737, 'V'),
+ (0xA738, 'M', u'ꜹ'),
+ (0xA739, 'V'),
+ (0xA73A, 'M', u'ꜻ'),
+ (0xA73B, 'V'),
+ (0xA73C, 'M', u'ꜽ'),
+ (0xA73D, 'V'),
+ (0xA73E, 'M', u'ꜿ'),
+ (0xA73F, 'V'),
+ (0xA740, 'M', u'ꝁ'),
+ (0xA741, 'V'),
+ (0xA742, 'M', u'ꝃ'),
+ (0xA743, 'V'),
+ (0xA744, 'M', u'ꝅ'),
+ (0xA745, 'V'),
+ (0xA746, 'M', u'ꝇ'),
+ (0xA747, 'V'),
+ (0xA748, 'M', u'ꝉ'),
+ (0xA749, 'V'),
+ (0xA74A, 'M', u'ꝋ'),
+ (0xA74B, 'V'),
+ (0xA74C, 'M', u'ꝍ'),
+ (0xA74D, 'V'),
+ (0xA74E, 'M', u'ꝏ'),
+ (0xA74F, 'V'),
+ (0xA750, 'M', u'ꝑ'),
+ (0xA751, 'V'),
+ (0xA752, 'M', u'ꝓ'),
+ (0xA753, 'V'),
+ (0xA754, 'M', u'ꝕ'),
+ (0xA755, 'V'),
+ (0xA756, 'M', u'ꝗ'),
+ (0xA757, 'V'),
+ (0xA758, 'M', u'ꝙ'),
+ (0xA759, 'V'),
+ (0xA75A, 'M', u'ꝛ'),
+ (0xA75B, 'V'),
+ (0xA75C, 'M', u'ꝝ'),
+ (0xA75D, 'V'),
+ (0xA75E, 'M', u'ꝟ'),
+ (0xA75F, 'V'),
+ (0xA760, 'M', u'ꝡ'),
+ (0xA761, 'V'),
+ (0xA762, 'M', u'ꝣ'),
+ (0xA763, 'V'),
+ (0xA764, 'M', u'ꝥ'),
+ (0xA765, 'V'),
+ (0xA766, 'M', u'ꝧ'),
+ (0xA767, 'V'),
+ (0xA768, 'M', u'ꝩ'),
+ (0xA769, 'V'),
+ (0xA76A, 'M', u'ꝫ'),
+ (0xA76B, 'V'),
+ (0xA76C, 'M', u'ꝭ'),
+ (0xA76D, 'V'),
+ (0xA76E, 'M', u'ꝯ'),
+ ]
+
+def _seg_37():
+ return [
+ (0xA76F, 'V'),
+ (0xA770, 'M', u'ꝯ'),
+ (0xA771, 'V'),
+ (0xA779, 'M', u'ꝺ'),
+ (0xA77A, 'V'),
+ (0xA77B, 'M', u'ꝼ'),
+ (0xA77C, 'V'),
+ (0xA77D, 'M', u'ᵹ'),
+ (0xA77E, 'M', u'ꝿ'),
+ (0xA77F, 'V'),
+ (0xA780, 'M', u'ꞁ'),
+ (0xA781, 'V'),
+ (0xA782, 'M', u'ꞃ'),
+ (0xA783, 'V'),
+ (0xA784, 'M', u'ꞅ'),
+ (0xA785, 'V'),
+ (0xA786, 'M', u'ꞇ'),
+ (0xA787, 'V'),
+ (0xA78B, 'M', u'ꞌ'),
+ (0xA78C, 'V'),
+ (0xA78D, 'M', u'ɥ'),
+ (0xA78E, 'V'),
+ (0xA790, 'M', u'ꞑ'),
+ (0xA791, 'V'),
+ (0xA792, 'M', u'ꞓ'),
+ (0xA793, 'V'),
+ (0xA796, 'M', u'ꞗ'),
+ (0xA797, 'V'),
+ (0xA798, 'M', u'ꞙ'),
+ (0xA799, 'V'),
+ (0xA79A, 'M', u'ꞛ'),
+ (0xA79B, 'V'),
+ (0xA79C, 'M', u'ꞝ'),
+ (0xA79D, 'V'),
+ (0xA79E, 'M', u'ꞟ'),
+ (0xA79F, 'V'),
+ (0xA7A0, 'M', u'ꞡ'),
+ (0xA7A1, 'V'),
+ (0xA7A2, 'M', u'ꞣ'),
+ (0xA7A3, 'V'),
+ (0xA7A4, 'M', u'ꞥ'),
+ (0xA7A5, 'V'),
+ (0xA7A6, 'M', u'ꞧ'),
+ (0xA7A7, 'V'),
+ (0xA7A8, 'M', u'ꞩ'),
+ (0xA7A9, 'V'),
+ (0xA7AA, 'M', u'ɦ'),
+ (0xA7AB, 'M', u'ɜ'),
+ (0xA7AC, 'M', u'ɡ'),
+ (0xA7AD, 'M', u'ɬ'),
+ (0xA7AE, 'M', u'ɪ'),
+ (0xA7AF, 'V'),
+ (0xA7B0, 'M', u'ʞ'),
+ (0xA7B1, 'M', u'ʇ'),
+ (0xA7B2, 'M', u'ʝ'),
+ (0xA7B3, 'M', u'ꭓ'),
+ (0xA7B4, 'M', u'ꞵ'),
+ (0xA7B5, 'V'),
+ (0xA7B6, 'M', u'ꞷ'),
+ (0xA7B7, 'V'),
+ (0xA7B8, 'M', u'ꞹ'),
+ (0xA7B9, 'V'),
+ (0xA7BA, 'M', u'ꞻ'),
+ (0xA7BB, 'V'),
+ (0xA7BC, 'M', u'ꞽ'),
+ (0xA7BD, 'V'),
+ (0xA7BE, 'M', u'ꞿ'),
+ (0xA7BF, 'V'),
+ (0xA7C0, 'X'),
+ (0xA7C2, 'M', u'ꟃ'),
+ (0xA7C3, 'V'),
+ (0xA7C4, 'M', u'ꞔ'),
+ (0xA7C5, 'M', u'ʂ'),
+ (0xA7C6, 'M', u'ᶎ'),
+ (0xA7C7, 'M', u'ꟈ'),
+ (0xA7C8, 'V'),
+ (0xA7C9, 'M', u'ꟊ'),
+ (0xA7CA, 'V'),
+ (0xA7CB, 'X'),
+ (0xA7F5, 'M', u'ꟶ'),
+ (0xA7F6, 'V'),
+ (0xA7F8, 'M', u'ħ'),
+ (0xA7F9, 'M', u'œ'),
+ (0xA7FA, 'V'),
+ (0xA82D, 'X'),
+ (0xA830, 'V'),
+ (0xA83A, 'X'),
+ (0xA840, 'V'),
+ (0xA878, 'X'),
+ (0xA880, 'V'),
+ (0xA8C6, 'X'),
+ (0xA8CE, 'V'),
+ (0xA8DA, 'X'),
+ (0xA8E0, 'V'),
+ (0xA954, 'X'),
+ (0xA95F, 'V'),
+ (0xA97D, 'X'),
+ (0xA980, 'V'),
+ (0xA9CE, 'X'),
+ (0xA9CF, 'V'),
+ ]
+
+def _seg_38():
+ return [
+ (0xA9DA, 'X'),
+ (0xA9DE, 'V'),
+ (0xA9FF, 'X'),
+ (0xAA00, 'V'),
+ (0xAA37, 'X'),
+ (0xAA40, 'V'),
+ (0xAA4E, 'X'),
+ (0xAA50, 'V'),
+ (0xAA5A, 'X'),
+ (0xAA5C, 'V'),
+ (0xAAC3, 'X'),
+ (0xAADB, 'V'),
+ (0xAAF7, 'X'),
+ (0xAB01, 'V'),
+ (0xAB07, 'X'),
+ (0xAB09, 'V'),
+ (0xAB0F, 'X'),
+ (0xAB11, 'V'),
+ (0xAB17, 'X'),
+ (0xAB20, 'V'),
+ (0xAB27, 'X'),
+ (0xAB28, 'V'),
+ (0xAB2F, 'X'),
+ (0xAB30, 'V'),
+ (0xAB5C, 'M', u'ꜧ'),
+ (0xAB5D, 'M', u'ꬷ'),
+ (0xAB5E, 'M', u'ɫ'),
+ (0xAB5F, 'M', u'ꭒ'),
+ (0xAB60, 'V'),
+ (0xAB69, 'M', u'ʍ'),
+ (0xAB6A, 'V'),
+ (0xAB6C, 'X'),
+ (0xAB70, 'M', u'Ꭰ'),
+ (0xAB71, 'M', u'Ꭱ'),
+ (0xAB72, 'M', u'Ꭲ'),
+ (0xAB73, 'M', u'Ꭳ'),
+ (0xAB74, 'M', u'Ꭴ'),
+ (0xAB75, 'M', u'Ꭵ'),
+ (0xAB76, 'M', u'Ꭶ'),
+ (0xAB77, 'M', u'Ꭷ'),
+ (0xAB78, 'M', u'Ꭸ'),
+ (0xAB79, 'M', u'Ꭹ'),
+ (0xAB7A, 'M', u'Ꭺ'),
+ (0xAB7B, 'M', u'Ꭻ'),
+ (0xAB7C, 'M', u'Ꭼ'),
+ (0xAB7D, 'M', u'Ꭽ'),
+ (0xAB7E, 'M', u'Ꭾ'),
+ (0xAB7F, 'M', u'Ꭿ'),
+ (0xAB80, 'M', u'Ꮀ'),
+ (0xAB81, 'M', u'Ꮁ'),
+ (0xAB82, 'M', u'Ꮂ'),
+ (0xAB83, 'M', u'Ꮃ'),
+ (0xAB84, 'M', u'Ꮄ'),
+ (0xAB85, 'M', u'Ꮅ'),
+ (0xAB86, 'M', u'Ꮆ'),
+ (0xAB87, 'M', u'Ꮇ'),
+ (0xAB88, 'M', u'Ꮈ'),
+ (0xAB89, 'M', u'Ꮉ'),
+ (0xAB8A, 'M', u'Ꮊ'),
+ (0xAB8B, 'M', u'Ꮋ'),
+ (0xAB8C, 'M', u'Ꮌ'),
+ (0xAB8D, 'M', u'Ꮍ'),
+ (0xAB8E, 'M', u'Ꮎ'),
+ (0xAB8F, 'M', u'Ꮏ'),
+ (0xAB90, 'M', u'Ꮐ'),
+ (0xAB91, 'M', u'Ꮑ'),
+ (0xAB92, 'M', u'Ꮒ'),
+ (0xAB93, 'M', u'Ꮓ'),
+ (0xAB94, 'M', u'Ꮔ'),
+ (0xAB95, 'M', u'Ꮕ'),
+ (0xAB96, 'M', u'Ꮖ'),
+ (0xAB97, 'M', u'Ꮗ'),
+ (0xAB98, 'M', u'Ꮘ'),
+ (0xAB99, 'M', u'Ꮙ'),
+ (0xAB9A, 'M', u'Ꮚ'),
+ (0xAB9B, 'M', u'Ꮛ'),
+ (0xAB9C, 'M', u'Ꮜ'),
+ (0xAB9D, 'M', u'Ꮝ'),
+ (0xAB9E, 'M', u'Ꮞ'),
+ (0xAB9F, 'M', u'Ꮟ'),
+ (0xABA0, 'M', u'Ꮠ'),
+ (0xABA1, 'M', u'Ꮡ'),
+ (0xABA2, 'M', u'Ꮢ'),
+ (0xABA3, 'M', u'Ꮣ'),
+ (0xABA4, 'M', u'Ꮤ'),
+ (0xABA5, 'M', u'Ꮥ'),
+ (0xABA6, 'M', u'Ꮦ'),
+ (0xABA7, 'M', u'Ꮧ'),
+ (0xABA8, 'M', u'Ꮨ'),
+ (0xABA9, 'M', u'Ꮩ'),
+ (0xABAA, 'M', u'Ꮪ'),
+ (0xABAB, 'M', u'Ꮫ'),
+ (0xABAC, 'M', u'Ꮬ'),
+ (0xABAD, 'M', u'Ꮭ'),
+ (0xABAE, 'M', u'Ꮮ'),
+ (0xABAF, 'M', u'Ꮯ'),
+ (0xABB0, 'M', u'Ꮰ'),
+ (0xABB1, 'M', u'Ꮱ'),
+ (0xABB2, 'M', u'Ꮲ'),
+ (0xABB3, 'M', u'Ꮳ'),
+ ]
+
+def _seg_39():
+ return [
+ (0xABB4, 'M', u'Ꮴ'),
+ (0xABB5, 'M', u'Ꮵ'),
+ (0xABB6, 'M', u'Ꮶ'),
+ (0xABB7, 'M', u'Ꮷ'),
+ (0xABB8, 'M', u'Ꮸ'),
+ (0xABB9, 'M', u'Ꮹ'),
+ (0xABBA, 'M', u'Ꮺ'),
+ (0xABBB, 'M', u'Ꮻ'),
+ (0xABBC, 'M', u'Ꮼ'),
+ (0xABBD, 'M', u'Ꮽ'),
+ (0xABBE, 'M', u'Ꮾ'),
+ (0xABBF, 'M', u'Ꮿ'),
+ (0xABC0, 'V'),
+ (0xABEE, 'X'),
+ (0xABF0, 'V'),
+ (0xABFA, 'X'),
+ (0xAC00, 'V'),
+ (0xD7A4, 'X'),
+ (0xD7B0, 'V'),
+ (0xD7C7, 'X'),
+ (0xD7CB, 'V'),
+ (0xD7FC, 'X'),
+ (0xF900, 'M', u'豈'),
+ (0xF901, 'M', u'更'),
+ (0xF902, 'M', u'車'),
+ (0xF903, 'M', u'賈'),
+ (0xF904, 'M', u'滑'),
+ (0xF905, 'M', u'串'),
+ (0xF906, 'M', u'句'),
+ (0xF907, 'M', u'龜'),
+ (0xF909, 'M', u'契'),
+ (0xF90A, 'M', u'金'),
+ (0xF90B, 'M', u'喇'),
+ (0xF90C, 'M', u'奈'),
+ (0xF90D, 'M', u'懶'),
+ (0xF90E, 'M', u'癩'),
+ (0xF90F, 'M', u'羅'),
+ (0xF910, 'M', u'蘿'),
+ (0xF911, 'M', u'螺'),
+ (0xF912, 'M', u'裸'),
+ (0xF913, 'M', u'邏'),
+ (0xF914, 'M', u'樂'),
+ (0xF915, 'M', u'洛'),
+ (0xF916, 'M', u'烙'),
+ (0xF917, 'M', u'珞'),
+ (0xF918, 'M', u'落'),
+ (0xF919, 'M', u'酪'),
+ (0xF91A, 'M', u'駱'),
+ (0xF91B, 'M', u'亂'),
+ (0xF91C, 'M', u'卵'),
+ (0xF91D, 'M', u'欄'),
+ (0xF91E, 'M', u'爛'),
+ (0xF91F, 'M', u'蘭'),
+ (0xF920, 'M', u'鸞'),
+ (0xF921, 'M', u'嵐'),
+ (0xF922, 'M', u'濫'),
+ (0xF923, 'M', u'藍'),
+ (0xF924, 'M', u'襤'),
+ (0xF925, 'M', u'拉'),
+ (0xF926, 'M', u'臘'),
+ (0xF927, 'M', u'蠟'),
+ (0xF928, 'M', u'廊'),
+ (0xF929, 'M', u'朗'),
+ (0xF92A, 'M', u'浪'),
+ (0xF92B, 'M', u'狼'),
+ (0xF92C, 'M', u'郎'),
+ (0xF92D, 'M', u'來'),
+ (0xF92E, 'M', u'冷'),
+ (0xF92F, 'M', u'勞'),
+ (0xF930, 'M', u'擄'),
+ (0xF931, 'M', u'櫓'),
+ (0xF932, 'M', u'爐'),
+ (0xF933, 'M', u'盧'),
+ (0xF934, 'M', u'老'),
+ (0xF935, 'M', u'蘆'),
+ (0xF936, 'M', u'虜'),
+ (0xF937, 'M', u'路'),
+ (0xF938, 'M', u'露'),
+ (0xF939, 'M', u'魯'),
+ (0xF93A, 'M', u'鷺'),
+ (0xF93B, 'M', u'碌'),
+ (0xF93C, 'M', u'祿'),
+ (0xF93D, 'M', u'綠'),
+ (0xF93E, 'M', u'菉'),
+ (0xF93F, 'M', u'錄'),
+ (0xF940, 'M', u'鹿'),
+ (0xF941, 'M', u'論'),
+ (0xF942, 'M', u'壟'),
+ (0xF943, 'M', u'弄'),
+ (0xF944, 'M', u'籠'),
+ (0xF945, 'M', u'聾'),
+ (0xF946, 'M', u'牢'),
+ (0xF947, 'M', u'磊'),
+ (0xF948, 'M', u'賂'),
+ (0xF949, 'M', u'雷'),
+ (0xF94A, 'M', u'壘'),
+ (0xF94B, 'M', u'屢'),
+ (0xF94C, 'M', u'樓'),
+ (0xF94D, 'M', u'淚'),
+ (0xF94E, 'M', u'漏'),
+ ]
+
+def _seg_40():
+ return [
+ (0xF94F, 'M', u'累'),
+ (0xF950, 'M', u'縷'),
+ (0xF951, 'M', u'陋'),
+ (0xF952, 'M', u'勒'),
+ (0xF953, 'M', u'肋'),
+ (0xF954, 'M', u'凜'),
+ (0xF955, 'M', u'凌'),
+ (0xF956, 'M', u'稜'),
+ (0xF957, 'M', u'綾'),
+ (0xF958, 'M', u'菱'),
+ (0xF959, 'M', u'陵'),
+ (0xF95A, 'M', u'讀'),
+ (0xF95B, 'M', u'拏'),
+ (0xF95C, 'M', u'樂'),
+ (0xF95D, 'M', u'諾'),
+ (0xF95E, 'M', u'丹'),
+ (0xF95F, 'M', u'寧'),
+ (0xF960, 'M', u'怒'),
+ (0xF961, 'M', u'率'),
+ (0xF962, 'M', u'異'),
+ (0xF963, 'M', u'北'),
+ (0xF964, 'M', u'磻'),
+ (0xF965, 'M', u'便'),
+ (0xF966, 'M', u'復'),
+ (0xF967, 'M', u'不'),
+ (0xF968, 'M', u'泌'),
+ (0xF969, 'M', u'數'),
+ (0xF96A, 'M', u'索'),
+ (0xF96B, 'M', u'參'),
+ (0xF96C, 'M', u'塞'),
+ (0xF96D, 'M', u'省'),
+ (0xF96E, 'M', u'葉'),
+ (0xF96F, 'M', u'說'),
+ (0xF970, 'M', u'殺'),
+ (0xF971, 'M', u'辰'),
+ (0xF972, 'M', u'沈'),
+ (0xF973, 'M', u'拾'),
+ (0xF974, 'M', u'若'),
+ (0xF975, 'M', u'掠'),
+ (0xF976, 'M', u'略'),
+ (0xF977, 'M', u'亮'),
+ (0xF978, 'M', u'兩'),
+ (0xF979, 'M', u'凉'),
+ (0xF97A, 'M', u'梁'),
+ (0xF97B, 'M', u'糧'),
+ (0xF97C, 'M', u'良'),
+ (0xF97D, 'M', u'諒'),
+ (0xF97E, 'M', u'量'),
+ (0xF97F, 'M', u'勵'),
+ (0xF980, 'M', u'呂'),
+ (0xF981, 'M', u'女'),
+ (0xF982, 'M', u'廬'),
+ (0xF983, 'M', u'旅'),
+ (0xF984, 'M', u'濾'),
+ (0xF985, 'M', u'礪'),
+ (0xF986, 'M', u'閭'),
+ (0xF987, 'M', u'驪'),
+ (0xF988, 'M', u'麗'),
+ (0xF989, 'M', u'黎'),
+ (0xF98A, 'M', u'力'),
+ (0xF98B, 'M', u'曆'),
+ (0xF98C, 'M', u'歷'),
+ (0xF98D, 'M', u'轢'),
+ (0xF98E, 'M', u'年'),
+ (0xF98F, 'M', u'憐'),
+ (0xF990, 'M', u'戀'),
+ (0xF991, 'M', u'撚'),
+ (0xF992, 'M', u'漣'),
+ (0xF993, 'M', u'煉'),
+ (0xF994, 'M', u'璉'),
+ (0xF995, 'M', u'秊'),
+ (0xF996, 'M', u'練'),
+ (0xF997, 'M', u'聯'),
+ (0xF998, 'M', u'輦'),
+ (0xF999, 'M', u'蓮'),
+ (0xF99A, 'M', u'連'),
+ (0xF99B, 'M', u'鍊'),
+ (0xF99C, 'M', u'列'),
+ (0xF99D, 'M', u'劣'),
+ (0xF99E, 'M', u'咽'),
+ (0xF99F, 'M', u'烈'),
+ (0xF9A0, 'M', u'裂'),
+ (0xF9A1, 'M', u'說'),
+ (0xF9A2, 'M', u'廉'),
+ (0xF9A3, 'M', u'念'),
+ (0xF9A4, 'M', u'捻'),
+ (0xF9A5, 'M', u'殮'),
+ (0xF9A6, 'M', u'簾'),
+ (0xF9A7, 'M', u'獵'),
+ (0xF9A8, 'M', u'令'),
+ (0xF9A9, 'M', u'囹'),
+ (0xF9AA, 'M', u'寧'),
+ (0xF9AB, 'M', u'嶺'),
+ (0xF9AC, 'M', u'怜'),
+ (0xF9AD, 'M', u'玲'),
+ (0xF9AE, 'M', u'瑩'),
+ (0xF9AF, 'M', u'羚'),
+ (0xF9B0, 'M', u'聆'),
+ (0xF9B1, 'M', u'鈴'),
+ (0xF9B2, 'M', u'零'),
+ ]
+
+def _seg_41():
+ return [
+ (0xF9B3, 'M', u'靈'),
+ (0xF9B4, 'M', u'領'),
+ (0xF9B5, 'M', u'例'),
+ (0xF9B6, 'M', u'禮'),
+ (0xF9B7, 'M', u'醴'),
+ (0xF9B8, 'M', u'隸'),
+ (0xF9B9, 'M', u'惡'),
+ (0xF9BA, 'M', u'了'),
+ (0xF9BB, 'M', u'僚'),
+ (0xF9BC, 'M', u'寮'),
+ (0xF9BD, 'M', u'尿'),
+ (0xF9BE, 'M', u'料'),
+ (0xF9BF, 'M', u'樂'),
+ (0xF9C0, 'M', u'燎'),
+ (0xF9C1, 'M', u'療'),
+ (0xF9C2, 'M', u'蓼'),
+ (0xF9C3, 'M', u'遼'),
+ (0xF9C4, 'M', u'龍'),
+ (0xF9C5, 'M', u'暈'),
+ (0xF9C6, 'M', u'阮'),
+ (0xF9C7, 'M', u'劉'),
+ (0xF9C8, 'M', u'杻'),
+ (0xF9C9, 'M', u'柳'),
+ (0xF9CA, 'M', u'流'),
+ (0xF9CB, 'M', u'溜'),
+ (0xF9CC, 'M', u'琉'),
+ (0xF9CD, 'M', u'留'),
+ (0xF9CE, 'M', u'硫'),
+ (0xF9CF, 'M', u'紐'),
+ (0xF9D0, 'M', u'類'),
+ (0xF9D1, 'M', u'六'),
+ (0xF9D2, 'M', u'戮'),
+ (0xF9D3, 'M', u'陸'),
+ (0xF9D4, 'M', u'倫'),
+ (0xF9D5, 'M', u'崙'),
+ (0xF9D6, 'M', u'淪'),
+ (0xF9D7, 'M', u'輪'),
+ (0xF9D8, 'M', u'律'),
+ (0xF9D9, 'M', u'慄'),
+ (0xF9DA, 'M', u'栗'),
+ (0xF9DB, 'M', u'率'),
+ (0xF9DC, 'M', u'隆'),
+ (0xF9DD, 'M', u'利'),
+ (0xF9DE, 'M', u'吏'),
+ (0xF9DF, 'M', u'履'),
+ (0xF9E0, 'M', u'易'),
+ (0xF9E1, 'M', u'李'),
+ (0xF9E2, 'M', u'梨'),
+ (0xF9E3, 'M', u'泥'),
+ (0xF9E4, 'M', u'理'),
+ (0xF9E5, 'M', u'痢'),
+ (0xF9E6, 'M', u'罹'),
+ (0xF9E7, 'M', u'裏'),
+ (0xF9E8, 'M', u'裡'),
+ (0xF9E9, 'M', u'里'),
+ (0xF9EA, 'M', u'離'),
+ (0xF9EB, 'M', u'匿'),
+ (0xF9EC, 'M', u'溺'),
+ (0xF9ED, 'M', u'吝'),
+ (0xF9EE, 'M', u'燐'),
+ (0xF9EF, 'M', u'璘'),
+ (0xF9F0, 'M', u'藺'),
+ (0xF9F1, 'M', u'隣'),
+ (0xF9F2, 'M', u'鱗'),
+ (0xF9F3, 'M', u'麟'),
+ (0xF9F4, 'M', u'林'),
+ (0xF9F5, 'M', u'淋'),
+ (0xF9F6, 'M', u'臨'),
+ (0xF9F7, 'M', u'立'),
+ (0xF9F8, 'M', u'笠'),
+ (0xF9F9, 'M', u'粒'),
+ (0xF9FA, 'M', u'狀'),
+ (0xF9FB, 'M', u'炙'),
+ (0xF9FC, 'M', u'識'),
+ (0xF9FD, 'M', u'什'),
+ (0xF9FE, 'M', u'茶'),
+ (0xF9FF, 'M', u'刺'),
+ (0xFA00, 'M', u'切'),
+ (0xFA01, 'M', u'度'),
+ (0xFA02, 'M', u'拓'),
+ (0xFA03, 'M', u'糖'),
+ (0xFA04, 'M', u'宅'),
+ (0xFA05, 'M', u'洞'),
+ (0xFA06, 'M', u'暴'),
+ (0xFA07, 'M', u'輻'),
+ (0xFA08, 'M', u'行'),
+ (0xFA09, 'M', u'降'),
+ (0xFA0A, 'M', u'見'),
+ (0xFA0B, 'M', u'廓'),
+ (0xFA0C, 'M', u'兀'),
+ (0xFA0D, 'M', u'嗀'),
+ (0xFA0E, 'V'),
+ (0xFA10, 'M', u'塚'),
+ (0xFA11, 'V'),
+ (0xFA12, 'M', u'晴'),
+ (0xFA13, 'V'),
+ (0xFA15, 'M', u'凞'),
+ (0xFA16, 'M', u'猪'),
+ (0xFA17, 'M', u'益'),
+ (0xFA18, 'M', u'礼'),
+ ]
+
+def _seg_42():
+ return [
+ (0xFA19, 'M', u'神'),
+ (0xFA1A, 'M', u'祥'),
+ (0xFA1B, 'M', u'福'),
+ (0xFA1C, 'M', u'靖'),
+ (0xFA1D, 'M', u'精'),
+ (0xFA1E, 'M', u'羽'),
+ (0xFA1F, 'V'),
+ (0xFA20, 'M', u'蘒'),
+ (0xFA21, 'V'),
+ (0xFA22, 'M', u'諸'),
+ (0xFA23, 'V'),
+ (0xFA25, 'M', u'逸'),
+ (0xFA26, 'M', u'都'),
+ (0xFA27, 'V'),
+ (0xFA2A, 'M', u'飯'),
+ (0xFA2B, 'M', u'飼'),
+ (0xFA2C, 'M', u'館'),
+ (0xFA2D, 'M', u'鶴'),
+ (0xFA2E, 'M', u'郞'),
+ (0xFA2F, 'M', u'隷'),
+ (0xFA30, 'M', u'侮'),
+ (0xFA31, 'M', u'僧'),
+ (0xFA32, 'M', u'免'),
+ (0xFA33, 'M', u'勉'),
+ (0xFA34, 'M', u'勤'),
+ (0xFA35, 'M', u'卑'),
+ (0xFA36, 'M', u'喝'),
+ (0xFA37, 'M', u'嘆'),
+ (0xFA38, 'M', u'器'),
+ (0xFA39, 'M', u'塀'),
+ (0xFA3A, 'M', u'墨'),
+ (0xFA3B, 'M', u'層'),
+ (0xFA3C, 'M', u'屮'),
+ (0xFA3D, 'M', u'悔'),
+ (0xFA3E, 'M', u'慨'),
+ (0xFA3F, 'M', u'憎'),
+ (0xFA40, 'M', u'懲'),
+ (0xFA41, 'M', u'敏'),
+ (0xFA42, 'M', u'既'),
+ (0xFA43, 'M', u'暑'),
+ (0xFA44, 'M', u'梅'),
+ (0xFA45, 'M', u'海'),
+ (0xFA46, 'M', u'渚'),
+ (0xFA47, 'M', u'漢'),
+ (0xFA48, 'M', u'煮'),
+ (0xFA49, 'M', u'爫'),
+ (0xFA4A, 'M', u'琢'),
+ (0xFA4B, 'M', u'碑'),
+ (0xFA4C, 'M', u'社'),
+ (0xFA4D, 'M', u'祉'),
+ (0xFA4E, 'M', u'祈'),
+ (0xFA4F, 'M', u'祐'),
+ (0xFA50, 'M', u'祖'),
+ (0xFA51, 'M', u'祝'),
+ (0xFA52, 'M', u'禍'),
+ (0xFA53, 'M', u'禎'),
+ (0xFA54, 'M', u'穀'),
+ (0xFA55, 'M', u'突'),
+ (0xFA56, 'M', u'節'),
+ (0xFA57, 'M', u'練'),
+ (0xFA58, 'M', u'縉'),
+ (0xFA59, 'M', u'繁'),
+ (0xFA5A, 'M', u'署'),
+ (0xFA5B, 'M', u'者'),
+ (0xFA5C, 'M', u'臭'),
+ (0xFA5D, 'M', u'艹'),
+ (0xFA5F, 'M', u'著'),
+ (0xFA60, 'M', u'褐'),
+ (0xFA61, 'M', u'視'),
+ (0xFA62, 'M', u'謁'),
+ (0xFA63, 'M', u'謹'),
+ (0xFA64, 'M', u'賓'),
+ (0xFA65, 'M', u'贈'),
+ (0xFA66, 'M', u'辶'),
+ (0xFA67, 'M', u'逸'),
+ (0xFA68, 'M', u'難'),
+ (0xFA69, 'M', u'響'),
+ (0xFA6A, 'M', u'頻'),
+ (0xFA6B, 'M', u'恵'),
+ (0xFA6C, 'M', u'𤋮'),
+ (0xFA6D, 'M', u'舘'),
+ (0xFA6E, 'X'),
+ (0xFA70, 'M', u'並'),
+ (0xFA71, 'M', u'况'),
+ (0xFA72, 'M', u'全'),
+ (0xFA73, 'M', u'侀'),
+ (0xFA74, 'M', u'充'),
+ (0xFA75, 'M', u'冀'),
+ (0xFA76, 'M', u'勇'),
+ (0xFA77, 'M', u'勺'),
+ (0xFA78, 'M', u'喝'),
+ (0xFA79, 'M', u'啕'),
+ (0xFA7A, 'M', u'喙'),
+ (0xFA7B, 'M', u'嗢'),
+ (0xFA7C, 'M', u'塚'),
+ (0xFA7D, 'M', u'墳'),
+ (0xFA7E, 'M', u'奄'),
+ (0xFA7F, 'M', u'奔'),
+ (0xFA80, 'M', u'婢'),
+ (0xFA81, 'M', u'嬨'),
+ ]
+
+def _seg_43():
+ return [
+ (0xFA82, 'M', u'廒'),
+ (0xFA83, 'M', u'廙'),
+ (0xFA84, 'M', u'彩'),
+ (0xFA85, 'M', u'徭'),
+ (0xFA86, 'M', u'惘'),
+ (0xFA87, 'M', u'慎'),
+ (0xFA88, 'M', u'愈'),
+ (0xFA89, 'M', u'憎'),
+ (0xFA8A, 'M', u'慠'),
+ (0xFA8B, 'M', u'懲'),
+ (0xFA8C, 'M', u'戴'),
+ (0xFA8D, 'M', u'揄'),
+ (0xFA8E, 'M', u'搜'),
+ (0xFA8F, 'M', u'摒'),
+ (0xFA90, 'M', u'敖'),
+ (0xFA91, 'M', u'晴'),
+ (0xFA92, 'M', u'朗'),
+ (0xFA93, 'M', u'望'),
+ (0xFA94, 'M', u'杖'),
+ (0xFA95, 'M', u'歹'),
+ (0xFA96, 'M', u'殺'),
+ (0xFA97, 'M', u'流'),
+ (0xFA98, 'M', u'滛'),
+ (0xFA99, 'M', u'滋'),
+ (0xFA9A, 'M', u'漢'),
+ (0xFA9B, 'M', u'瀞'),
+ (0xFA9C, 'M', u'煮'),
+ (0xFA9D, 'M', u'瞧'),
+ (0xFA9E, 'M', u'爵'),
+ (0xFA9F, 'M', u'犯'),
+ (0xFAA0, 'M', u'猪'),
+ (0xFAA1, 'M', u'瑱'),
+ (0xFAA2, 'M', u'甆'),
+ (0xFAA3, 'M', u'画'),
+ (0xFAA4, 'M', u'瘝'),
+ (0xFAA5, 'M', u'瘟'),
+ (0xFAA6, 'M', u'益'),
+ (0xFAA7, 'M', u'盛'),
+ (0xFAA8, 'M', u'直'),
+ (0xFAA9, 'M', u'睊'),
+ (0xFAAA, 'M', u'着'),
+ (0xFAAB, 'M', u'磌'),
+ (0xFAAC, 'M', u'窱'),
+ (0xFAAD, 'M', u'節'),
+ (0xFAAE, 'M', u'类'),
+ (0xFAAF, 'M', u'絛'),
+ (0xFAB0, 'M', u'練'),
+ (0xFAB1, 'M', u'缾'),
+ (0xFAB2, 'M', u'者'),
+ (0xFAB3, 'M', u'荒'),
+ (0xFAB4, 'M', u'華'),
+ (0xFAB5, 'M', u'蝹'),
+ (0xFAB6, 'M', u'襁'),
+ (0xFAB7, 'M', u'覆'),
+ (0xFAB8, 'M', u'視'),
+ (0xFAB9, 'M', u'調'),
+ (0xFABA, 'M', u'諸'),
+ (0xFABB, 'M', u'請'),
+ (0xFABC, 'M', u'謁'),
+ (0xFABD, 'M', u'諾'),
+ (0xFABE, 'M', u'諭'),
+ (0xFABF, 'M', u'謹'),
+ (0xFAC0, 'M', u'變'),
+ (0xFAC1, 'M', u'贈'),
+ (0xFAC2, 'M', u'輸'),
+ (0xFAC3, 'M', u'遲'),
+ (0xFAC4, 'M', u'醙'),
+ (0xFAC5, 'M', u'鉶'),
+ (0xFAC6, 'M', u'陼'),
+ (0xFAC7, 'M', u'難'),
+ (0xFAC8, 'M', u'靖'),
+ (0xFAC9, 'M', u'韛'),
+ (0xFACA, 'M', u'響'),
+ (0xFACB, 'M', u'頋'),
+ (0xFACC, 'M', u'頻'),
+ (0xFACD, 'M', u'鬒'),
+ (0xFACE, 'M', u'龜'),
+ (0xFACF, 'M', u'𢡊'),
+ (0xFAD0, 'M', u'𢡄'),
+ (0xFAD1, 'M', u'𣏕'),
+ (0xFAD2, 'M', u'㮝'),
+ (0xFAD3, 'M', u'䀘'),
+ (0xFAD4, 'M', u'䀹'),
+ (0xFAD5, 'M', u'𥉉'),
+ (0xFAD6, 'M', u'𥳐'),
+ (0xFAD7, 'M', u'𧻓'),
+ (0xFAD8, 'M', u'齃'),
+ (0xFAD9, 'M', u'龎'),
+ (0xFADA, 'X'),
+ (0xFB00, 'M', u'ff'),
+ (0xFB01, 'M', u'fi'),
+ (0xFB02, 'M', u'fl'),
+ (0xFB03, 'M', u'ffi'),
+ (0xFB04, 'M', u'ffl'),
+ (0xFB05, 'M', u'st'),
+ (0xFB07, 'X'),
+ (0xFB13, 'M', u'մն'),
+ (0xFB14, 'M', u'մե'),
+ (0xFB15, 'M', u'մի'),
+ (0xFB16, 'M', u'վն'),
+ ]
+
+def _seg_44():
+ return [
+ (0xFB17, 'M', u'մխ'),
+ (0xFB18, 'X'),
+ (0xFB1D, 'M', u'יִ'),
+ (0xFB1E, 'V'),
+ (0xFB1F, 'M', u'ײַ'),
+ (0xFB20, 'M', u'ע'),
+ (0xFB21, 'M', u'א'),
+ (0xFB22, 'M', u'ד'),
+ (0xFB23, 'M', u'ה'),
+ (0xFB24, 'M', u'כ'),
+ (0xFB25, 'M', u'ל'),
+ (0xFB26, 'M', u'ם'),
+ (0xFB27, 'M', u'ר'),
+ (0xFB28, 'M', u'ת'),
+ (0xFB29, '3', u'+'),
+ (0xFB2A, 'M', u'שׁ'),
+ (0xFB2B, 'M', u'שׂ'),
+ (0xFB2C, 'M', u'שּׁ'),
+ (0xFB2D, 'M', u'שּׂ'),
+ (0xFB2E, 'M', u'אַ'),
+ (0xFB2F, 'M', u'אָ'),
+ (0xFB30, 'M', u'אּ'),
+ (0xFB31, 'M', u'בּ'),
+ (0xFB32, 'M', u'גּ'),
+ (0xFB33, 'M', u'דּ'),
+ (0xFB34, 'M', u'הּ'),
+ (0xFB35, 'M', u'וּ'),
+ (0xFB36, 'M', u'זּ'),
+ (0xFB37, 'X'),
+ (0xFB38, 'M', u'טּ'),
+ (0xFB39, 'M', u'יּ'),
+ (0xFB3A, 'M', u'ךּ'),
+ (0xFB3B, 'M', u'כּ'),
+ (0xFB3C, 'M', u'לּ'),
+ (0xFB3D, 'X'),
+ (0xFB3E, 'M', u'מּ'),
+ (0xFB3F, 'X'),
+ (0xFB40, 'M', u'נּ'),
+ (0xFB41, 'M', u'סּ'),
+ (0xFB42, 'X'),
+ (0xFB43, 'M', u'ףּ'),
+ (0xFB44, 'M', u'פּ'),
+ (0xFB45, 'X'),
+ (0xFB46, 'M', u'צּ'),
+ (0xFB47, 'M', u'קּ'),
+ (0xFB48, 'M', u'רּ'),
+ (0xFB49, 'M', u'שּ'),
+ (0xFB4A, 'M', u'תּ'),
+ (0xFB4B, 'M', u'וֹ'),
+ (0xFB4C, 'M', u'בֿ'),
+ (0xFB4D, 'M', u'כֿ'),
+ (0xFB4E, 'M', u'פֿ'),
+ (0xFB4F, 'M', u'אל'),
+ (0xFB50, 'M', u'ٱ'),
+ (0xFB52, 'M', u'ٻ'),
+ (0xFB56, 'M', u'پ'),
+ (0xFB5A, 'M', u'ڀ'),
+ (0xFB5E, 'M', u'ٺ'),
+ (0xFB62, 'M', u'ٿ'),
+ (0xFB66, 'M', u'ٹ'),
+ (0xFB6A, 'M', u'ڤ'),
+ (0xFB6E, 'M', u'ڦ'),
+ (0xFB72, 'M', u'ڄ'),
+ (0xFB76, 'M', u'ڃ'),
+ (0xFB7A, 'M', u'چ'),
+ (0xFB7E, 'M', u'ڇ'),
+ (0xFB82, 'M', u'ڍ'),
+ (0xFB84, 'M', u'ڌ'),
+ (0xFB86, 'M', u'ڎ'),
+ (0xFB88, 'M', u'ڈ'),
+ (0xFB8A, 'M', u'ژ'),
+ (0xFB8C, 'M', u'ڑ'),
+ (0xFB8E, 'M', u'ک'),
+ (0xFB92, 'M', u'گ'),
+ (0xFB96, 'M', u'ڳ'),
+ (0xFB9A, 'M', u'ڱ'),
+ (0xFB9E, 'M', u'ں'),
+ (0xFBA0, 'M', u'ڻ'),
+ (0xFBA4, 'M', u'ۀ'),
+ (0xFBA6, 'M', u'ہ'),
+ (0xFBAA, 'M', u'ھ'),
+ (0xFBAE, 'M', u'ے'),
+ (0xFBB0, 'M', u'ۓ'),
+ (0xFBB2, 'V'),
+ (0xFBC2, 'X'),
+ (0xFBD3, 'M', u'ڭ'),
+ (0xFBD7, 'M', u'ۇ'),
+ (0xFBD9, 'M', u'ۆ'),
+ (0xFBDB, 'M', u'ۈ'),
+ (0xFBDD, 'M', u'ۇٴ'),
+ (0xFBDE, 'M', u'ۋ'),
+ (0xFBE0, 'M', u'ۅ'),
+ (0xFBE2, 'M', u'ۉ'),
+ (0xFBE4, 'M', u'ې'),
+ (0xFBE8, 'M', u'ى'),
+ (0xFBEA, 'M', u'ئا'),
+ (0xFBEC, 'M', u'ئە'),
+ (0xFBEE, 'M', u'ئو'),
+ (0xFBF0, 'M', u'ئۇ'),
+ (0xFBF2, 'M', u'ئۆ'),
+ ]
+
+def _seg_45():
+ return [
+ (0xFBF4, 'M', u'ئۈ'),
+ (0xFBF6, 'M', u'ئې'),
+ (0xFBF9, 'M', u'ئى'),
+ (0xFBFC, 'M', u'ی'),
+ (0xFC00, 'M', u'ئج'),
+ (0xFC01, 'M', u'ئح'),
+ (0xFC02, 'M', u'ئم'),
+ (0xFC03, 'M', u'ئى'),
+ (0xFC04, 'M', u'ئي'),
+ (0xFC05, 'M', u'بج'),
+ (0xFC06, 'M', u'بح'),
+ (0xFC07, 'M', u'بخ'),
+ (0xFC08, 'M', u'بم'),
+ (0xFC09, 'M', u'بى'),
+ (0xFC0A, 'M', u'بي'),
+ (0xFC0B, 'M', u'تج'),
+ (0xFC0C, 'M', u'تح'),
+ (0xFC0D, 'M', u'تخ'),
+ (0xFC0E, 'M', u'تم'),
+ (0xFC0F, 'M', u'تى'),
+ (0xFC10, 'M', u'تي'),
+ (0xFC11, 'M', u'ثج'),
+ (0xFC12, 'M', u'ثم'),
+ (0xFC13, 'M', u'ثى'),
+ (0xFC14, 'M', u'ثي'),
+ (0xFC15, 'M', u'جح'),
+ (0xFC16, 'M', u'جم'),
+ (0xFC17, 'M', u'حج'),
+ (0xFC18, 'M', u'حم'),
+ (0xFC19, 'M', u'خج'),
+ (0xFC1A, 'M', u'خح'),
+ (0xFC1B, 'M', u'خم'),
+ (0xFC1C, 'M', u'سج'),
+ (0xFC1D, 'M', u'سح'),
+ (0xFC1E, 'M', u'سخ'),
+ (0xFC1F, 'M', u'سم'),
+ (0xFC20, 'M', u'صح'),
+ (0xFC21, 'M', u'صم'),
+ (0xFC22, 'M', u'ضج'),
+ (0xFC23, 'M', u'ضح'),
+ (0xFC24, 'M', u'ضخ'),
+ (0xFC25, 'M', u'ضم'),
+ (0xFC26, 'M', u'طح'),
+ (0xFC27, 'M', u'طم'),
+ (0xFC28, 'M', u'ظم'),
+ (0xFC29, 'M', u'عج'),
+ (0xFC2A, 'M', u'عم'),
+ (0xFC2B, 'M', u'غج'),
+ (0xFC2C, 'M', u'غم'),
+ (0xFC2D, 'M', u'فج'),
+ (0xFC2E, 'M', u'فح'),
+ (0xFC2F, 'M', u'فخ'),
+ (0xFC30, 'M', u'فم'),
+ (0xFC31, 'M', u'فى'),
+ (0xFC32, 'M', u'في'),
+ (0xFC33, 'M', u'قح'),
+ (0xFC34, 'M', u'قم'),
+ (0xFC35, 'M', u'قى'),
+ (0xFC36, 'M', u'قي'),
+ (0xFC37, 'M', u'كا'),
+ (0xFC38, 'M', u'كج'),
+ (0xFC39, 'M', u'كح'),
+ (0xFC3A, 'M', u'كخ'),
+ (0xFC3B, 'M', u'كل'),
+ (0xFC3C, 'M', u'كم'),
+ (0xFC3D, 'M', u'كى'),
+ (0xFC3E, 'M', u'كي'),
+ (0xFC3F, 'M', u'لج'),
+ (0xFC40, 'M', u'لح'),
+ (0xFC41, 'M', u'لخ'),
+ (0xFC42, 'M', u'لم'),
+ (0xFC43, 'M', u'لى'),
+ (0xFC44, 'M', u'لي'),
+ (0xFC45, 'M', u'مج'),
+ (0xFC46, 'M', u'مح'),
+ (0xFC47, 'M', u'مخ'),
+ (0xFC48, 'M', u'مم'),
+ (0xFC49, 'M', u'مى'),
+ (0xFC4A, 'M', u'مي'),
+ (0xFC4B, 'M', u'نج'),
+ (0xFC4C, 'M', u'نح'),
+ (0xFC4D, 'M', u'نخ'),
+ (0xFC4E, 'M', u'نم'),
+ (0xFC4F, 'M', u'نى'),
+ (0xFC50, 'M', u'ني'),
+ (0xFC51, 'M', u'هج'),
+ (0xFC52, 'M', u'هم'),
+ (0xFC53, 'M', u'هى'),
+ (0xFC54, 'M', u'هي'),
+ (0xFC55, 'M', u'يج'),
+ (0xFC56, 'M', u'يح'),
+ (0xFC57, 'M', u'يخ'),
+ (0xFC58, 'M', u'يم'),
+ (0xFC59, 'M', u'يى'),
+ (0xFC5A, 'M', u'يي'),
+ (0xFC5B, 'M', u'ذٰ'),
+ (0xFC5C, 'M', u'رٰ'),
+ (0xFC5D, 'M', u'ىٰ'),
+ (0xFC5E, '3', u' ٌّ'),
+ (0xFC5F, '3', u' ٍّ'),
+ ]
+
+def _seg_46():
+ return [
+ (0xFC60, '3', u' َّ'),
+ (0xFC61, '3', u' ُّ'),
+ (0xFC62, '3', u' ِّ'),
+ (0xFC63, '3', u' ّٰ'),
+ (0xFC64, 'M', u'ئر'),
+ (0xFC65, 'M', u'ئز'),
+ (0xFC66, 'M', u'ئم'),
+ (0xFC67, 'M', u'ئن'),
+ (0xFC68, 'M', u'ئى'),
+ (0xFC69, 'M', u'ئي'),
+ (0xFC6A, 'M', u'بر'),
+ (0xFC6B, 'M', u'بز'),
+ (0xFC6C, 'M', u'بم'),
+ (0xFC6D, 'M', u'بن'),
+ (0xFC6E, 'M', u'بى'),
+ (0xFC6F, 'M', u'بي'),
+ (0xFC70, 'M', u'تر'),
+ (0xFC71, 'M', u'تز'),
+ (0xFC72, 'M', u'تم'),
+ (0xFC73, 'M', u'تن'),
+ (0xFC74, 'M', u'تى'),
+ (0xFC75, 'M', u'تي'),
+ (0xFC76, 'M', u'ثر'),
+ (0xFC77, 'M', u'ثز'),
+ (0xFC78, 'M', u'ثم'),
+ (0xFC79, 'M', u'ثن'),
+ (0xFC7A, 'M', u'ثى'),
+ (0xFC7B, 'M', u'ثي'),
+ (0xFC7C, 'M', u'فى'),
+ (0xFC7D, 'M', u'في'),
+ (0xFC7E, 'M', u'قى'),
+ (0xFC7F, 'M', u'قي'),
+ (0xFC80, 'M', u'كا'),
+ (0xFC81, 'M', u'كل'),
+ (0xFC82, 'M', u'كم'),
+ (0xFC83, 'M', u'كى'),
+ (0xFC84, 'M', u'كي'),
+ (0xFC85, 'M', u'لم'),
+ (0xFC86, 'M', u'لى'),
+ (0xFC87, 'M', u'لي'),
+ (0xFC88, 'M', u'ما'),
+ (0xFC89, 'M', u'مم'),
+ (0xFC8A, 'M', u'نر'),
+ (0xFC8B, 'M', u'نز'),
+ (0xFC8C, 'M', u'نم'),
+ (0xFC8D, 'M', u'نن'),
+ (0xFC8E, 'M', u'نى'),
+ (0xFC8F, 'M', u'ني'),
+ (0xFC90, 'M', u'ىٰ'),
+ (0xFC91, 'M', u'ير'),
+ (0xFC92, 'M', u'يز'),
+ (0xFC93, 'M', u'يم'),
+ (0xFC94, 'M', u'ين'),
+ (0xFC95, 'M', u'يى'),
+ (0xFC96, 'M', u'يي'),
+ (0xFC97, 'M', u'ئج'),
+ (0xFC98, 'M', u'ئح'),
+ (0xFC99, 'M', u'ئخ'),
+ (0xFC9A, 'M', u'ئم'),
+ (0xFC9B, 'M', u'ئه'),
+ (0xFC9C, 'M', u'بج'),
+ (0xFC9D, 'M', u'بح'),
+ (0xFC9E, 'M', u'بخ'),
+ (0xFC9F, 'M', u'بم'),
+ (0xFCA0, 'M', u'به'),
+ (0xFCA1, 'M', u'تج'),
+ (0xFCA2, 'M', u'تح'),
+ (0xFCA3, 'M', u'تخ'),
+ (0xFCA4, 'M', u'تم'),
+ (0xFCA5, 'M', u'ته'),
+ (0xFCA6, 'M', u'ثم'),
+ (0xFCA7, 'M', u'جح'),
+ (0xFCA8, 'M', u'جم'),
+ (0xFCA9, 'M', u'حج'),
+ (0xFCAA, 'M', u'حم'),
+ (0xFCAB, 'M', u'خج'),
+ (0xFCAC, 'M', u'خم'),
+ (0xFCAD, 'M', u'سج'),
+ (0xFCAE, 'M', u'سح'),
+ (0xFCAF, 'M', u'سخ'),
+ (0xFCB0, 'M', u'سم'),
+ (0xFCB1, 'M', u'صح'),
+ (0xFCB2, 'M', u'صخ'),
+ (0xFCB3, 'M', u'صم'),
+ (0xFCB4, 'M', u'ضج'),
+ (0xFCB5, 'M', u'ضح'),
+ (0xFCB6, 'M', u'ضخ'),
+ (0xFCB7, 'M', u'ضم'),
+ (0xFCB8, 'M', u'طح'),
+ (0xFCB9, 'M', u'ظم'),
+ (0xFCBA, 'M', u'عج'),
+ (0xFCBB, 'M', u'عم'),
+ (0xFCBC, 'M', u'غج'),
+ (0xFCBD, 'M', u'غم'),
+ (0xFCBE, 'M', u'فج'),
+ (0xFCBF, 'M', u'فح'),
+ (0xFCC0, 'M', u'فخ'),
+ (0xFCC1, 'M', u'فم'),
+ (0xFCC2, 'M', u'قح'),
+ (0xFCC3, 'M', u'قم'),
+ ]
+
+def _seg_47():
+ return [
+ (0xFCC4, 'M', u'كج'),
+ (0xFCC5, 'M', u'كح'),
+ (0xFCC6, 'M', u'كخ'),
+ (0xFCC7, 'M', u'كل'),
+ (0xFCC8, 'M', u'كم'),
+ (0xFCC9, 'M', u'لج'),
+ (0xFCCA, 'M', u'لح'),
+ (0xFCCB, 'M', u'لخ'),
+ (0xFCCC, 'M', u'لم'),
+ (0xFCCD, 'M', u'له'),
+ (0xFCCE, 'M', u'مج'),
+ (0xFCCF, 'M', u'مح'),
+ (0xFCD0, 'M', u'مخ'),
+ (0xFCD1, 'M', u'مم'),
+ (0xFCD2, 'M', u'نج'),
+ (0xFCD3, 'M', u'نح'),
+ (0xFCD4, 'M', u'نخ'),
+ (0xFCD5, 'M', u'نم'),
+ (0xFCD6, 'M', u'نه'),
+ (0xFCD7, 'M', u'هج'),
+ (0xFCD8, 'M', u'هم'),
+ (0xFCD9, 'M', u'هٰ'),
+ (0xFCDA, 'M', u'يج'),
+ (0xFCDB, 'M', u'يح'),
+ (0xFCDC, 'M', u'يخ'),
+ (0xFCDD, 'M', u'يم'),
+ (0xFCDE, 'M', u'يه'),
+ (0xFCDF, 'M', u'ئم'),
+ (0xFCE0, 'M', u'ئه'),
+ (0xFCE1, 'M', u'بم'),
+ (0xFCE2, 'M', u'به'),
+ (0xFCE3, 'M', u'تم'),
+ (0xFCE4, 'M', u'ته'),
+ (0xFCE5, 'M', u'ثم'),
+ (0xFCE6, 'M', u'ثه'),
+ (0xFCE7, 'M', u'سم'),
+ (0xFCE8, 'M', u'سه'),
+ (0xFCE9, 'M', u'شم'),
+ (0xFCEA, 'M', u'شه'),
+ (0xFCEB, 'M', u'كل'),
+ (0xFCEC, 'M', u'كم'),
+ (0xFCED, 'M', u'لم'),
+ (0xFCEE, 'M', u'نم'),
+ (0xFCEF, 'M', u'نه'),
+ (0xFCF0, 'M', u'يم'),
+ (0xFCF1, 'M', u'يه'),
+ (0xFCF2, 'M', u'ـَّ'),
+ (0xFCF3, 'M', u'ـُّ'),
+ (0xFCF4, 'M', u'ـِّ'),
+ (0xFCF5, 'M', u'طى'),
+ (0xFCF6, 'M', u'طي'),
+ (0xFCF7, 'M', u'عى'),
+ (0xFCF8, 'M', u'عي'),
+ (0xFCF9, 'M', u'غى'),
+ (0xFCFA, 'M', u'غي'),
+ (0xFCFB, 'M', u'سى'),
+ (0xFCFC, 'M', u'سي'),
+ (0xFCFD, 'M', u'شى'),
+ (0xFCFE, 'M', u'شي'),
+ (0xFCFF, 'M', u'حى'),
+ (0xFD00, 'M', u'حي'),
+ (0xFD01, 'M', u'جى'),
+ (0xFD02, 'M', u'جي'),
+ (0xFD03, 'M', u'خى'),
+ (0xFD04, 'M', u'خي'),
+ (0xFD05, 'M', u'صى'),
+ (0xFD06, 'M', u'صي'),
+ (0xFD07, 'M', u'ضى'),
+ (0xFD08, 'M', u'ضي'),
+ (0xFD09, 'M', u'شج'),
+ (0xFD0A, 'M', u'شح'),
+ (0xFD0B, 'M', u'شخ'),
+ (0xFD0C, 'M', u'شم'),
+ (0xFD0D, 'M', u'شر'),
+ (0xFD0E, 'M', u'سر'),
+ (0xFD0F, 'M', u'صر'),
+ (0xFD10, 'M', u'ضر'),
+ (0xFD11, 'M', u'طى'),
+ (0xFD12, 'M', u'طي'),
+ (0xFD13, 'M', u'عى'),
+ (0xFD14, 'M', u'عي'),
+ (0xFD15, 'M', u'غى'),
+ (0xFD16, 'M', u'غي'),
+ (0xFD17, 'M', u'سى'),
+ (0xFD18, 'M', u'سي'),
+ (0xFD19, 'M', u'شى'),
+ (0xFD1A, 'M', u'شي'),
+ (0xFD1B, 'M', u'حى'),
+ (0xFD1C, 'M', u'حي'),
+ (0xFD1D, 'M', u'جى'),
+ (0xFD1E, 'M', u'جي'),
+ (0xFD1F, 'M', u'خى'),
+ (0xFD20, 'M', u'خي'),
+ (0xFD21, 'M', u'صى'),
+ (0xFD22, 'M', u'صي'),
+ (0xFD23, 'M', u'ضى'),
+ (0xFD24, 'M', u'ضي'),
+ (0xFD25, 'M', u'شج'),
+ (0xFD26, 'M', u'شح'),
+ (0xFD27, 'M', u'شخ'),
+ ]
+
+def _seg_48():
+ return [
+ (0xFD28, 'M', u'شم'),
+ (0xFD29, 'M', u'شر'),
+ (0xFD2A, 'M', u'سر'),
+ (0xFD2B, 'M', u'صر'),
+ (0xFD2C, 'M', u'ضر'),
+ (0xFD2D, 'M', u'شج'),
+ (0xFD2E, 'M', u'شح'),
+ (0xFD2F, 'M', u'شخ'),
+ (0xFD30, 'M', u'شم'),
+ (0xFD31, 'M', u'سه'),
+ (0xFD32, 'M', u'شه'),
+ (0xFD33, 'M', u'طم'),
+ (0xFD34, 'M', u'سج'),
+ (0xFD35, 'M', u'سح'),
+ (0xFD36, 'M', u'سخ'),
+ (0xFD37, 'M', u'شج'),
+ (0xFD38, 'M', u'شح'),
+ (0xFD39, 'M', u'شخ'),
+ (0xFD3A, 'M', u'طم'),
+ (0xFD3B, 'M', u'ظم'),
+ (0xFD3C, 'M', u'اً'),
+ (0xFD3E, 'V'),
+ (0xFD40, 'X'),
+ (0xFD50, 'M', u'تجم'),
+ (0xFD51, 'M', u'تحج'),
+ (0xFD53, 'M', u'تحم'),
+ (0xFD54, 'M', u'تخم'),
+ (0xFD55, 'M', u'تمج'),
+ (0xFD56, 'M', u'تمح'),
+ (0xFD57, 'M', u'تمخ'),
+ (0xFD58, 'M', u'جمح'),
+ (0xFD5A, 'M', u'حمي'),
+ (0xFD5B, 'M', u'حمى'),
+ (0xFD5C, 'M', u'سحج'),
+ (0xFD5D, 'M', u'سجح'),
+ (0xFD5E, 'M', u'سجى'),
+ (0xFD5F, 'M', u'سمح'),
+ (0xFD61, 'M', u'سمج'),
+ (0xFD62, 'M', u'سمم'),
+ (0xFD64, 'M', u'صحح'),
+ (0xFD66, 'M', u'صمم'),
+ (0xFD67, 'M', u'شحم'),
+ (0xFD69, 'M', u'شجي'),
+ (0xFD6A, 'M', u'شمخ'),
+ (0xFD6C, 'M', u'شمم'),
+ (0xFD6E, 'M', u'ضحى'),
+ (0xFD6F, 'M', u'ضخم'),
+ (0xFD71, 'M', u'طمح'),
+ (0xFD73, 'M', u'طمم'),
+ (0xFD74, 'M', u'طمي'),
+ (0xFD75, 'M', u'عجم'),
+ (0xFD76, 'M', u'عمم'),
+ (0xFD78, 'M', u'عمى'),
+ (0xFD79, 'M', u'غمم'),
+ (0xFD7A, 'M', u'غمي'),
+ (0xFD7B, 'M', u'غمى'),
+ (0xFD7C, 'M', u'فخم'),
+ (0xFD7E, 'M', u'قمح'),
+ (0xFD7F, 'M', u'قمم'),
+ (0xFD80, 'M', u'لحم'),
+ (0xFD81, 'M', u'لحي'),
+ (0xFD82, 'M', u'لحى'),
+ (0xFD83, 'M', u'لجج'),
+ (0xFD85, 'M', u'لخم'),
+ (0xFD87, 'M', u'لمح'),
+ (0xFD89, 'M', u'محج'),
+ (0xFD8A, 'M', u'محم'),
+ (0xFD8B, 'M', u'محي'),
+ (0xFD8C, 'M', u'مجح'),
+ (0xFD8D, 'M', u'مجم'),
+ (0xFD8E, 'M', u'مخج'),
+ (0xFD8F, 'M', u'مخم'),
+ (0xFD90, 'X'),
+ (0xFD92, 'M', u'مجخ'),
+ (0xFD93, 'M', u'همج'),
+ (0xFD94, 'M', u'همم'),
+ (0xFD95, 'M', u'نحم'),
+ (0xFD96, 'M', u'نحى'),
+ (0xFD97, 'M', u'نجم'),
+ (0xFD99, 'M', u'نجى'),
+ (0xFD9A, 'M', u'نمي'),
+ (0xFD9B, 'M', u'نمى'),
+ (0xFD9C, 'M', u'يمم'),
+ (0xFD9E, 'M', u'بخي'),
+ (0xFD9F, 'M', u'تجي'),
+ (0xFDA0, 'M', u'تجى'),
+ (0xFDA1, 'M', u'تخي'),
+ (0xFDA2, 'M', u'تخى'),
+ (0xFDA3, 'M', u'تمي'),
+ (0xFDA4, 'M', u'تمى'),
+ (0xFDA5, 'M', u'جمي'),
+ (0xFDA6, 'M', u'جحى'),
+ (0xFDA7, 'M', u'جمى'),
+ (0xFDA8, 'M', u'سخى'),
+ (0xFDA9, 'M', u'صحي'),
+ (0xFDAA, 'M', u'شحي'),
+ (0xFDAB, 'M', u'ضحي'),
+ (0xFDAC, 'M', u'لجي'),
+ (0xFDAD, 'M', u'لمي'),
+ (0xFDAE, 'M', u'يحي'),
+ ]
+
+def _seg_49():
+ return [
+ (0xFDAF, 'M', u'يجي'),
+ (0xFDB0, 'M', u'يمي'),
+ (0xFDB1, 'M', u'ممي'),
+ (0xFDB2, 'M', u'قمي'),
+ (0xFDB3, 'M', u'نحي'),
+ (0xFDB4, 'M', u'قمح'),
+ (0xFDB5, 'M', u'لحم'),
+ (0xFDB6, 'M', u'عمي'),
+ (0xFDB7, 'M', u'كمي'),
+ (0xFDB8, 'M', u'نجح'),
+ (0xFDB9, 'M', u'مخي'),
+ (0xFDBA, 'M', u'لجم'),
+ (0xFDBB, 'M', u'كمم'),
+ (0xFDBC, 'M', u'لجم'),
+ (0xFDBD, 'M', u'نجح'),
+ (0xFDBE, 'M', u'جحي'),
+ (0xFDBF, 'M', u'حجي'),
+ (0xFDC0, 'M', u'مجي'),
+ (0xFDC1, 'M', u'فمي'),
+ (0xFDC2, 'M', u'بحي'),
+ (0xFDC3, 'M', u'كمم'),
+ (0xFDC4, 'M', u'عجم'),
+ (0xFDC5, 'M', u'صمم'),
+ (0xFDC6, 'M', u'سخي'),
+ (0xFDC7, 'M', u'نجي'),
+ (0xFDC8, 'X'),
+ (0xFDF0, 'M', u'صلے'),
+ (0xFDF1, 'M', u'قلے'),
+ (0xFDF2, 'M', u'الله'),
+ (0xFDF3, 'M', u'اكبر'),
+ (0xFDF4, 'M', u'محمد'),
+ (0xFDF5, 'M', u'صلعم'),
+ (0xFDF6, 'M', u'رسول'),
+ (0xFDF7, 'M', u'عليه'),
+ (0xFDF8, 'M', u'وسلم'),
+ (0xFDF9, 'M', u'صلى'),
+ (0xFDFA, '3', u'صلى الله عليه وسلم'),
+ (0xFDFB, '3', u'جل جلاله'),
+ (0xFDFC, 'M', u'ریال'),
+ (0xFDFD, 'V'),
+ (0xFDFE, 'X'),
+ (0xFE00, 'I'),
+ (0xFE10, '3', u','),
+ (0xFE11, 'M', u'、'),
+ (0xFE12, 'X'),
+ (0xFE13, '3', u':'),
+ (0xFE14, '3', u';'),
+ (0xFE15, '3', u'!'),
+ (0xFE16, '3', u'?'),
+ (0xFE17, 'M', u'〖'),
+ (0xFE18, 'M', u'〗'),
+ (0xFE19, 'X'),
+ (0xFE20, 'V'),
+ (0xFE30, 'X'),
+ (0xFE31, 'M', u'—'),
+ (0xFE32, 'M', u'–'),
+ (0xFE33, '3', u'_'),
+ (0xFE35, '3', u'('),
+ (0xFE36, '3', u')'),
+ (0xFE37, '3', u'{'),
+ (0xFE38, '3', u'}'),
+ (0xFE39, 'M', u'〔'),
+ (0xFE3A, 'M', u'〕'),
+ (0xFE3B, 'M', u'【'),
+ (0xFE3C, 'M', u'】'),
+ (0xFE3D, 'M', u'《'),
+ (0xFE3E, 'M', u'》'),
+ (0xFE3F, 'M', u'〈'),
+ (0xFE40, 'M', u'〉'),
+ (0xFE41, 'M', u'「'),
+ (0xFE42, 'M', u'」'),
+ (0xFE43, 'M', u'『'),
+ (0xFE44, 'M', u'』'),
+ (0xFE45, 'V'),
+ (0xFE47, '3', u'['),
+ (0xFE48, '3', u']'),
+ (0xFE49, '3', u' ̅'),
+ (0xFE4D, '3', u'_'),
+ (0xFE50, '3', u','),
+ (0xFE51, 'M', u'、'),
+ (0xFE52, 'X'),
+ (0xFE54, '3', u';'),
+ (0xFE55, '3', u':'),
+ (0xFE56, '3', u'?'),
+ (0xFE57, '3', u'!'),
+ (0xFE58, 'M', u'—'),
+ (0xFE59, '3', u'('),
+ (0xFE5A, '3', u')'),
+ (0xFE5B, '3', u'{'),
+ (0xFE5C, '3', u'}'),
+ (0xFE5D, 'M', u'〔'),
+ (0xFE5E, 'M', u'〕'),
+ (0xFE5F, '3', u'#'),
+ (0xFE60, '3', u'&'),
+ (0xFE61, '3', u'*'),
+ (0xFE62, '3', u'+'),
+ (0xFE63, 'M', u'-'),
+ (0xFE64, '3', u'<'),
+ (0xFE65, '3', u'>'),
+ (0xFE66, '3', u'='),
+ ]
+
+def _seg_50():
+ return [
+ (0xFE67, 'X'),
+ (0xFE68, '3', u'\\'),
+ (0xFE69, '3', u'$'),
+ (0xFE6A, '3', u'%'),
+ (0xFE6B, '3', u'@'),
+ (0xFE6C, 'X'),
+ (0xFE70, '3', u' ً'),
+ (0xFE71, 'M', u'ـً'),
+ (0xFE72, '3', u' ٌ'),
+ (0xFE73, 'V'),
+ (0xFE74, '3', u' ٍ'),
+ (0xFE75, 'X'),
+ (0xFE76, '3', u' َ'),
+ (0xFE77, 'M', u'ـَ'),
+ (0xFE78, '3', u' ُ'),
+ (0xFE79, 'M', u'ـُ'),
+ (0xFE7A, '3', u' ِ'),
+ (0xFE7B, 'M', u'ـِ'),
+ (0xFE7C, '3', u' ّ'),
+ (0xFE7D, 'M', u'ـّ'),
+ (0xFE7E, '3', u' ْ'),
+ (0xFE7F, 'M', u'ـْ'),
+ (0xFE80, 'M', u'ء'),
+ (0xFE81, 'M', u'آ'),
+ (0xFE83, 'M', u'أ'),
+ (0xFE85, 'M', u'ؤ'),
+ (0xFE87, 'M', u'إ'),
+ (0xFE89, 'M', u'ئ'),
+ (0xFE8D, 'M', u'ا'),
+ (0xFE8F, 'M', u'ب'),
+ (0xFE93, 'M', u'ة'),
+ (0xFE95, 'M', u'ت'),
+ (0xFE99, 'M', u'ث'),
+ (0xFE9D, 'M', u'ج'),
+ (0xFEA1, 'M', u'ح'),
+ (0xFEA5, 'M', u'خ'),
+ (0xFEA9, 'M', u'د'),
+ (0xFEAB, 'M', u'ذ'),
+ (0xFEAD, 'M', u'ر'),
+ (0xFEAF, 'M', u'ز'),
+ (0xFEB1, 'M', u'س'),
+ (0xFEB5, 'M', u'ش'),
+ (0xFEB9, 'M', u'ص'),
+ (0xFEBD, 'M', u'ض'),
+ (0xFEC1, 'M', u'ط'),
+ (0xFEC5, 'M', u'ظ'),
+ (0xFEC9, 'M', u'ع'),
+ (0xFECD, 'M', u'غ'),
+ (0xFED1, 'M', u'ف'),
+ (0xFED5, 'M', u'ق'),
+ (0xFED9, 'M', u'ك'),
+ (0xFEDD, 'M', u'ل'),
+ (0xFEE1, 'M', u'م'),
+ (0xFEE5, 'M', u'ن'),
+ (0xFEE9, 'M', u'ه'),
+ (0xFEED, 'M', u'و'),
+ (0xFEEF, 'M', u'ى'),
+ (0xFEF1, 'M', u'ي'),
+ (0xFEF5, 'M', u'لآ'),
+ (0xFEF7, 'M', u'لأ'),
+ (0xFEF9, 'M', u'لإ'),
+ (0xFEFB, 'M', u'لا'),
+ (0xFEFD, 'X'),
+ (0xFEFF, 'I'),
+ (0xFF00, 'X'),
+ (0xFF01, '3', u'!'),
+ (0xFF02, '3', u'"'),
+ (0xFF03, '3', u'#'),
+ (0xFF04, '3', u'$'),
+ (0xFF05, '3', u'%'),
+ (0xFF06, '3', u'&'),
+ (0xFF07, '3', u'\''),
+ (0xFF08, '3', u'('),
+ (0xFF09, '3', u')'),
+ (0xFF0A, '3', u'*'),
+ (0xFF0B, '3', u'+'),
+ (0xFF0C, '3', u','),
+ (0xFF0D, 'M', u'-'),
+ (0xFF0E, 'M', u'.'),
+ (0xFF0F, '3', u'/'),
+ (0xFF10, 'M', u'0'),
+ (0xFF11, 'M', u'1'),
+ (0xFF12, 'M', u'2'),
+ (0xFF13, 'M', u'3'),
+ (0xFF14, 'M', u'4'),
+ (0xFF15, 'M', u'5'),
+ (0xFF16, 'M', u'6'),
+ (0xFF17, 'M', u'7'),
+ (0xFF18, 'M', u'8'),
+ (0xFF19, 'M', u'9'),
+ (0xFF1A, '3', u':'),
+ (0xFF1B, '3', u';'),
+ (0xFF1C, '3', u'<'),
+ (0xFF1D, '3', u'='),
+ (0xFF1E, '3', u'>'),
+ (0xFF1F, '3', u'?'),
+ (0xFF20, '3', u'@'),
+ (0xFF21, 'M', u'a'),
+ (0xFF22, 'M', u'b'),
+ (0xFF23, 'M', u'c'),
+ ]
+
+def _seg_51():
+ return [
+ (0xFF24, 'M', u'd'),
+ (0xFF25, 'M', u'e'),
+ (0xFF26, 'M', u'f'),
+ (0xFF27, 'M', u'g'),
+ (0xFF28, 'M', u'h'),
+ (0xFF29, 'M', u'i'),
+ (0xFF2A, 'M', u'j'),
+ (0xFF2B, 'M', u'k'),
+ (0xFF2C, 'M', u'l'),
+ (0xFF2D, 'M', u'm'),
+ (0xFF2E, 'M', u'n'),
+ (0xFF2F, 'M', u'o'),
+ (0xFF30, 'M', u'p'),
+ (0xFF31, 'M', u'q'),
+ (0xFF32, 'M', u'r'),
+ (0xFF33, 'M', u's'),
+ (0xFF34, 'M', u't'),
+ (0xFF35, 'M', u'u'),
+ (0xFF36, 'M', u'v'),
+ (0xFF37, 'M', u'w'),
+ (0xFF38, 'M', u'x'),
+ (0xFF39, 'M', u'y'),
+ (0xFF3A, 'M', u'z'),
+ (0xFF3B, '3', u'['),
+ (0xFF3C, '3', u'\\'),
+ (0xFF3D, '3', u']'),
+ (0xFF3E, '3', u'^'),
+ (0xFF3F, '3', u'_'),
+ (0xFF40, '3', u'`'),
+ (0xFF41, 'M', u'a'),
+ (0xFF42, 'M', u'b'),
+ (0xFF43, 'M', u'c'),
+ (0xFF44, 'M', u'd'),
+ (0xFF45, 'M', u'e'),
+ (0xFF46, 'M', u'f'),
+ (0xFF47, 'M', u'g'),
+ (0xFF48, 'M', u'h'),
+ (0xFF49, 'M', u'i'),
+ (0xFF4A, 'M', u'j'),
+ (0xFF4B, 'M', u'k'),
+ (0xFF4C, 'M', u'l'),
+ (0xFF4D, 'M', u'm'),
+ (0xFF4E, 'M', u'n'),
+ (0xFF4F, 'M', u'o'),
+ (0xFF50, 'M', u'p'),
+ (0xFF51, 'M', u'q'),
+ (0xFF52, 'M', u'r'),
+ (0xFF53, 'M', u's'),
+ (0xFF54, 'M', u't'),
+ (0xFF55, 'M', u'u'),
+ (0xFF56, 'M', u'v'),
+ (0xFF57, 'M', u'w'),
+ (0xFF58, 'M', u'x'),
+ (0xFF59, 'M', u'y'),
+ (0xFF5A, 'M', u'z'),
+ (0xFF5B, '3', u'{'),
+ (0xFF5C, '3', u'|'),
+ (0xFF5D, '3', u'}'),
+ (0xFF5E, '3', u'~'),
+ (0xFF5F, 'M', u'⦅'),
+ (0xFF60, 'M', u'⦆'),
+ (0xFF61, 'M', u'.'),
+ (0xFF62, 'M', u'「'),
+ (0xFF63, 'M', u'」'),
+ (0xFF64, 'M', u'、'),
+ (0xFF65, 'M', u'・'),
+ (0xFF66, 'M', u'ヲ'),
+ (0xFF67, 'M', u'ァ'),
+ (0xFF68, 'M', u'ィ'),
+ (0xFF69, 'M', u'ゥ'),
+ (0xFF6A, 'M', u'ェ'),
+ (0xFF6B, 'M', u'ォ'),
+ (0xFF6C, 'M', u'ャ'),
+ (0xFF6D, 'M', u'ュ'),
+ (0xFF6E, 'M', u'ョ'),
+ (0xFF6F, 'M', u'ッ'),
+ (0xFF70, 'M', u'ー'),
+ (0xFF71, 'M', u'ア'),
+ (0xFF72, 'M', u'イ'),
+ (0xFF73, 'M', u'ウ'),
+ (0xFF74, 'M', u'エ'),
+ (0xFF75, 'M', u'オ'),
+ (0xFF76, 'M', u'カ'),
+ (0xFF77, 'M', u'キ'),
+ (0xFF78, 'M', u'ク'),
+ (0xFF79, 'M', u'ケ'),
+ (0xFF7A, 'M', u'コ'),
+ (0xFF7B, 'M', u'サ'),
+ (0xFF7C, 'M', u'シ'),
+ (0xFF7D, 'M', u'ス'),
+ (0xFF7E, 'M', u'セ'),
+ (0xFF7F, 'M', u'ソ'),
+ (0xFF80, 'M', u'タ'),
+ (0xFF81, 'M', u'チ'),
+ (0xFF82, 'M', u'ツ'),
+ (0xFF83, 'M', u'テ'),
+ (0xFF84, 'M', u'ト'),
+ (0xFF85, 'M', u'ナ'),
+ (0xFF86, 'M', u'ニ'),
+ (0xFF87, 'M', u'ヌ'),
+ ]
+
+def _seg_52():
+ return [
+ (0xFF88, 'M', u'ネ'),
+ (0xFF89, 'M', u'ノ'),
+ (0xFF8A, 'M', u'ハ'),
+ (0xFF8B, 'M', u'ヒ'),
+ (0xFF8C, 'M', u'フ'),
+ (0xFF8D, 'M', u'ヘ'),
+ (0xFF8E, 'M', u'ホ'),
+ (0xFF8F, 'M', u'マ'),
+ (0xFF90, 'M', u'ミ'),
+ (0xFF91, 'M', u'ム'),
+ (0xFF92, 'M', u'メ'),
+ (0xFF93, 'M', u'モ'),
+ (0xFF94, 'M', u'ヤ'),
+ (0xFF95, 'M', u'ユ'),
+ (0xFF96, 'M', u'ヨ'),
+ (0xFF97, 'M', u'ラ'),
+ (0xFF98, 'M', u'リ'),
+ (0xFF99, 'M', u'ル'),
+ (0xFF9A, 'M', u'レ'),
+ (0xFF9B, 'M', u'ロ'),
+ (0xFF9C, 'M', u'ワ'),
+ (0xFF9D, 'M', u'ン'),
+ (0xFF9E, 'M', u'゙'),
+ (0xFF9F, 'M', u'゚'),
+ (0xFFA0, 'X'),
+ (0xFFA1, 'M', u'ᄀ'),
+ (0xFFA2, 'M', u'ᄁ'),
+ (0xFFA3, 'M', u'ᆪ'),
+ (0xFFA4, 'M', u'ᄂ'),
+ (0xFFA5, 'M', u'ᆬ'),
+ (0xFFA6, 'M', u'ᆭ'),
+ (0xFFA7, 'M', u'ᄃ'),
+ (0xFFA8, 'M', u'ᄄ'),
+ (0xFFA9, 'M', u'ᄅ'),
+ (0xFFAA, 'M', u'ᆰ'),
+ (0xFFAB, 'M', u'ᆱ'),
+ (0xFFAC, 'M', u'ᆲ'),
+ (0xFFAD, 'M', u'ᆳ'),
+ (0xFFAE, 'M', u'ᆴ'),
+ (0xFFAF, 'M', u'ᆵ'),
+ (0xFFB0, 'M', u'ᄚ'),
+ (0xFFB1, 'M', u'ᄆ'),
+ (0xFFB2, 'M', u'ᄇ'),
+ (0xFFB3, 'M', u'ᄈ'),
+ (0xFFB4, 'M', u'ᄡ'),
+ (0xFFB5, 'M', u'ᄉ'),
+ (0xFFB6, 'M', u'ᄊ'),
+ (0xFFB7, 'M', u'ᄋ'),
+ (0xFFB8, 'M', u'ᄌ'),
+ (0xFFB9, 'M', u'ᄍ'),
+ (0xFFBA, 'M', u'ᄎ'),
+ (0xFFBB, 'M', u'ᄏ'),
+ (0xFFBC, 'M', u'ᄐ'),
+ (0xFFBD, 'M', u'ᄑ'),
+ (0xFFBE, 'M', u'ᄒ'),
+ (0xFFBF, 'X'),
+ (0xFFC2, 'M', u'ᅡ'),
+ (0xFFC3, 'M', u'ᅢ'),
+ (0xFFC4, 'M', u'ᅣ'),
+ (0xFFC5, 'M', u'ᅤ'),
+ (0xFFC6, 'M', u'ᅥ'),
+ (0xFFC7, 'M', u'ᅦ'),
+ (0xFFC8, 'X'),
+ (0xFFCA, 'M', u'ᅧ'),
+ (0xFFCB, 'M', u'ᅨ'),
+ (0xFFCC, 'M', u'ᅩ'),
+ (0xFFCD, 'M', u'ᅪ'),
+ (0xFFCE, 'M', u'ᅫ'),
+ (0xFFCF, 'M', u'ᅬ'),
+ (0xFFD0, 'X'),
+ (0xFFD2, 'M', u'ᅭ'),
+ (0xFFD3, 'M', u'ᅮ'),
+ (0xFFD4, 'M', u'ᅯ'),
+ (0xFFD5, 'M', u'ᅰ'),
+ (0xFFD6, 'M', u'ᅱ'),
+ (0xFFD7, 'M', u'ᅲ'),
+ (0xFFD8, 'X'),
+ (0xFFDA, 'M', u'ᅳ'),
+ (0xFFDB, 'M', u'ᅴ'),
+ (0xFFDC, 'M', u'ᅵ'),
+ (0xFFDD, 'X'),
+ (0xFFE0, 'M', u'¢'),
+ (0xFFE1, 'M', u'£'),
+ (0xFFE2, 'M', u'¬'),
+ (0xFFE3, '3', u' ̄'),
+ (0xFFE4, 'M', u'¦'),
+ (0xFFE5, 'M', u'¥'),
+ (0xFFE6, 'M', u'₩'),
+ (0xFFE7, 'X'),
+ (0xFFE8, 'M', u'│'),
+ (0xFFE9, 'M', u'←'),
+ (0xFFEA, 'M', u'↑'),
+ (0xFFEB, 'M', u'→'),
+ (0xFFEC, 'M', u'↓'),
+ (0xFFED, 'M', u'■'),
+ (0xFFEE, 'M', u'○'),
+ (0xFFEF, 'X'),
+ (0x10000, 'V'),
+ (0x1000C, 'X'),
+ (0x1000D, 'V'),
+ ]
+
+def _seg_53():
+ return [
+ (0x10027, 'X'),
+ (0x10028, 'V'),
+ (0x1003B, 'X'),
+ (0x1003C, 'V'),
+ (0x1003E, 'X'),
+ (0x1003F, 'V'),
+ (0x1004E, 'X'),
+ (0x10050, 'V'),
+ (0x1005E, 'X'),
+ (0x10080, 'V'),
+ (0x100FB, 'X'),
+ (0x10100, 'V'),
+ (0x10103, 'X'),
+ (0x10107, 'V'),
+ (0x10134, 'X'),
+ (0x10137, 'V'),
+ (0x1018F, 'X'),
+ (0x10190, 'V'),
+ (0x1019D, 'X'),
+ (0x101A0, 'V'),
+ (0x101A1, 'X'),
+ (0x101D0, 'V'),
+ (0x101FE, 'X'),
+ (0x10280, 'V'),
+ (0x1029D, 'X'),
+ (0x102A0, 'V'),
+ (0x102D1, 'X'),
+ (0x102E0, 'V'),
+ (0x102FC, 'X'),
+ (0x10300, 'V'),
+ (0x10324, 'X'),
+ (0x1032D, 'V'),
+ (0x1034B, 'X'),
+ (0x10350, 'V'),
+ (0x1037B, 'X'),
+ (0x10380, 'V'),
+ (0x1039E, 'X'),
+ (0x1039F, 'V'),
+ (0x103C4, 'X'),
+ (0x103C8, 'V'),
+ (0x103D6, 'X'),
+ (0x10400, 'M', u'𐐨'),
+ (0x10401, 'M', u'𐐩'),
+ (0x10402, 'M', u'𐐪'),
+ (0x10403, 'M', u'𐐫'),
+ (0x10404, 'M', u'𐐬'),
+ (0x10405, 'M', u'𐐭'),
+ (0x10406, 'M', u'𐐮'),
+ (0x10407, 'M', u'𐐯'),
+ (0x10408, 'M', u'𐐰'),
+ (0x10409, 'M', u'𐐱'),
+ (0x1040A, 'M', u'𐐲'),
+ (0x1040B, 'M', u'𐐳'),
+ (0x1040C, 'M', u'𐐴'),
+ (0x1040D, 'M', u'𐐵'),
+ (0x1040E, 'M', u'𐐶'),
+ (0x1040F, 'M', u'𐐷'),
+ (0x10410, 'M', u'𐐸'),
+ (0x10411, 'M', u'𐐹'),
+ (0x10412, 'M', u'𐐺'),
+ (0x10413, 'M', u'𐐻'),
+ (0x10414, 'M', u'𐐼'),
+ (0x10415, 'M', u'𐐽'),
+ (0x10416, 'M', u'𐐾'),
+ (0x10417, 'M', u'𐐿'),
+ (0x10418, 'M', u'𐑀'),
+ (0x10419, 'M', u'𐑁'),
+ (0x1041A, 'M', u'𐑂'),
+ (0x1041B, 'M', u'𐑃'),
+ (0x1041C, 'M', u'𐑄'),
+ (0x1041D, 'M', u'𐑅'),
+ (0x1041E, 'M', u'𐑆'),
+ (0x1041F, 'M', u'𐑇'),
+ (0x10420, 'M', u'𐑈'),
+ (0x10421, 'M', u'𐑉'),
+ (0x10422, 'M', u'𐑊'),
+ (0x10423, 'M', u'𐑋'),
+ (0x10424, 'M', u'𐑌'),
+ (0x10425, 'M', u'𐑍'),
+ (0x10426, 'M', u'𐑎'),
+ (0x10427, 'M', u'𐑏'),
+ (0x10428, 'V'),
+ (0x1049E, 'X'),
+ (0x104A0, 'V'),
+ (0x104AA, 'X'),
+ (0x104B0, 'M', u'𐓘'),
+ (0x104B1, 'M', u'𐓙'),
+ (0x104B2, 'M', u'𐓚'),
+ (0x104B3, 'M', u'𐓛'),
+ (0x104B4, 'M', u'𐓜'),
+ (0x104B5, 'M', u'𐓝'),
+ (0x104B6, 'M', u'𐓞'),
+ (0x104B7, 'M', u'𐓟'),
+ (0x104B8, 'M', u'𐓠'),
+ (0x104B9, 'M', u'𐓡'),
+ (0x104BA, 'M', u'𐓢'),
+ (0x104BB, 'M', u'𐓣'),
+ (0x104BC, 'M', u'𐓤'),
+ (0x104BD, 'M', u'𐓥'),
+ (0x104BE, 'M', u'𐓦'),
+ ]
+
+def _seg_54():
+ return [
+ (0x104BF, 'M', u'𐓧'),
+ (0x104C0, 'M', u'𐓨'),
+ (0x104C1, 'M', u'𐓩'),
+ (0x104C2, 'M', u'𐓪'),
+ (0x104C3, 'M', u'𐓫'),
+ (0x104C4, 'M', u'𐓬'),
+ (0x104C5, 'M', u'𐓭'),
+ (0x104C6, 'M', u'𐓮'),
+ (0x104C7, 'M', u'𐓯'),
+ (0x104C8, 'M', u'𐓰'),
+ (0x104C9, 'M', u'𐓱'),
+ (0x104CA, 'M', u'𐓲'),
+ (0x104CB, 'M', u'𐓳'),
+ (0x104CC, 'M', u'𐓴'),
+ (0x104CD, 'M', u'𐓵'),
+ (0x104CE, 'M', u'𐓶'),
+ (0x104CF, 'M', u'𐓷'),
+ (0x104D0, 'M', u'𐓸'),
+ (0x104D1, 'M', u'𐓹'),
+ (0x104D2, 'M', u'𐓺'),
+ (0x104D3, 'M', u'𐓻'),
+ (0x104D4, 'X'),
+ (0x104D8, 'V'),
+ (0x104FC, 'X'),
+ (0x10500, 'V'),
+ (0x10528, 'X'),
+ (0x10530, 'V'),
+ (0x10564, 'X'),
+ (0x1056F, 'V'),
+ (0x10570, 'X'),
+ (0x10600, 'V'),
+ (0x10737, 'X'),
+ (0x10740, 'V'),
+ (0x10756, 'X'),
+ (0x10760, 'V'),
+ (0x10768, 'X'),
+ (0x10800, 'V'),
+ (0x10806, 'X'),
+ (0x10808, 'V'),
+ (0x10809, 'X'),
+ (0x1080A, 'V'),
+ (0x10836, 'X'),
+ (0x10837, 'V'),
+ (0x10839, 'X'),
+ (0x1083C, 'V'),
+ (0x1083D, 'X'),
+ (0x1083F, 'V'),
+ (0x10856, 'X'),
+ (0x10857, 'V'),
+ (0x1089F, 'X'),
+ (0x108A7, 'V'),
+ (0x108B0, 'X'),
+ (0x108E0, 'V'),
+ (0x108F3, 'X'),
+ (0x108F4, 'V'),
+ (0x108F6, 'X'),
+ (0x108FB, 'V'),
+ (0x1091C, 'X'),
+ (0x1091F, 'V'),
+ (0x1093A, 'X'),
+ (0x1093F, 'V'),
+ (0x10940, 'X'),
+ (0x10980, 'V'),
+ (0x109B8, 'X'),
+ (0x109BC, 'V'),
+ (0x109D0, 'X'),
+ (0x109D2, 'V'),
+ (0x10A04, 'X'),
+ (0x10A05, 'V'),
+ (0x10A07, 'X'),
+ (0x10A0C, 'V'),
+ (0x10A14, 'X'),
+ (0x10A15, 'V'),
+ (0x10A18, 'X'),
+ (0x10A19, 'V'),
+ (0x10A36, 'X'),
+ (0x10A38, 'V'),
+ (0x10A3B, 'X'),
+ (0x10A3F, 'V'),
+ (0x10A49, 'X'),
+ (0x10A50, 'V'),
+ (0x10A59, 'X'),
+ (0x10A60, 'V'),
+ (0x10AA0, 'X'),
+ (0x10AC0, 'V'),
+ (0x10AE7, 'X'),
+ (0x10AEB, 'V'),
+ (0x10AF7, 'X'),
+ (0x10B00, 'V'),
+ (0x10B36, 'X'),
+ (0x10B39, 'V'),
+ (0x10B56, 'X'),
+ (0x10B58, 'V'),
+ (0x10B73, 'X'),
+ (0x10B78, 'V'),
+ (0x10B92, 'X'),
+ (0x10B99, 'V'),
+ (0x10B9D, 'X'),
+ (0x10BA9, 'V'),
+ (0x10BB0, 'X'),
+ ]
+
+def _seg_55():
+ return [
+ (0x10C00, 'V'),
+ (0x10C49, 'X'),
+ (0x10C80, 'M', u'𐳀'),
+ (0x10C81, 'M', u'𐳁'),
+ (0x10C82, 'M', u'𐳂'),
+ (0x10C83, 'M', u'𐳃'),
+ (0x10C84, 'M', u'𐳄'),
+ (0x10C85, 'M', u'𐳅'),
+ (0x10C86, 'M', u'𐳆'),
+ (0x10C87, 'M', u'𐳇'),
+ (0x10C88, 'M', u'𐳈'),
+ (0x10C89, 'M', u'𐳉'),
+ (0x10C8A, 'M', u'𐳊'),
+ (0x10C8B, 'M', u'𐳋'),
+ (0x10C8C, 'M', u'𐳌'),
+ (0x10C8D, 'M', u'𐳍'),
+ (0x10C8E, 'M', u'𐳎'),
+ (0x10C8F, 'M', u'𐳏'),
+ (0x10C90, 'M', u'𐳐'),
+ (0x10C91, 'M', u'𐳑'),
+ (0x10C92, 'M', u'𐳒'),
+ (0x10C93, 'M', u'𐳓'),
+ (0x10C94, 'M', u'𐳔'),
+ (0x10C95, 'M', u'𐳕'),
+ (0x10C96, 'M', u'𐳖'),
+ (0x10C97, 'M', u'𐳗'),
+ (0x10C98, 'M', u'𐳘'),
+ (0x10C99, 'M', u'𐳙'),
+ (0x10C9A, 'M', u'𐳚'),
+ (0x10C9B, 'M', u'𐳛'),
+ (0x10C9C, 'M', u'𐳜'),
+ (0x10C9D, 'M', u'𐳝'),
+ (0x10C9E, 'M', u'𐳞'),
+ (0x10C9F, 'M', u'𐳟'),
+ (0x10CA0, 'M', u'𐳠'),
+ (0x10CA1, 'M', u'𐳡'),
+ (0x10CA2, 'M', u'𐳢'),
+ (0x10CA3, 'M', u'𐳣'),
+ (0x10CA4, 'M', u'𐳤'),
+ (0x10CA5, 'M', u'𐳥'),
+ (0x10CA6, 'M', u'𐳦'),
+ (0x10CA7, 'M', u'𐳧'),
+ (0x10CA8, 'M', u'𐳨'),
+ (0x10CA9, 'M', u'𐳩'),
+ (0x10CAA, 'M', u'𐳪'),
+ (0x10CAB, 'M', u'𐳫'),
+ (0x10CAC, 'M', u'𐳬'),
+ (0x10CAD, 'M', u'𐳭'),
+ (0x10CAE, 'M', u'𐳮'),
+ (0x10CAF, 'M', u'𐳯'),
+ (0x10CB0, 'M', u'𐳰'),
+ (0x10CB1, 'M', u'𐳱'),
+ (0x10CB2, 'M', u'𐳲'),
+ (0x10CB3, 'X'),
+ (0x10CC0, 'V'),
+ (0x10CF3, 'X'),
+ (0x10CFA, 'V'),
+ (0x10D28, 'X'),
+ (0x10D30, 'V'),
+ (0x10D3A, 'X'),
+ (0x10E60, 'V'),
+ (0x10E7F, 'X'),
+ (0x10E80, 'V'),
+ (0x10EAA, 'X'),
+ (0x10EAB, 'V'),
+ (0x10EAE, 'X'),
+ (0x10EB0, 'V'),
+ (0x10EB2, 'X'),
+ (0x10F00, 'V'),
+ (0x10F28, 'X'),
+ (0x10F30, 'V'),
+ (0x10F5A, 'X'),
+ (0x10FB0, 'V'),
+ (0x10FCC, 'X'),
+ (0x10FE0, 'V'),
+ (0x10FF7, 'X'),
+ (0x11000, 'V'),
+ (0x1104E, 'X'),
+ (0x11052, 'V'),
+ (0x11070, 'X'),
+ (0x1107F, 'V'),
+ (0x110BD, 'X'),
+ (0x110BE, 'V'),
+ (0x110C2, 'X'),
+ (0x110D0, 'V'),
+ (0x110E9, 'X'),
+ (0x110F0, 'V'),
+ (0x110FA, 'X'),
+ (0x11100, 'V'),
+ (0x11135, 'X'),
+ (0x11136, 'V'),
+ (0x11148, 'X'),
+ (0x11150, 'V'),
+ (0x11177, 'X'),
+ (0x11180, 'V'),
+ (0x111E0, 'X'),
+ (0x111E1, 'V'),
+ (0x111F5, 'X'),
+ (0x11200, 'V'),
+ (0x11212, 'X'),
+ ]
+
+def _seg_56():
+ return [
+ (0x11213, 'V'),
+ (0x1123F, 'X'),
+ (0x11280, 'V'),
+ (0x11287, 'X'),
+ (0x11288, 'V'),
+ (0x11289, 'X'),
+ (0x1128A, 'V'),
+ (0x1128E, 'X'),
+ (0x1128F, 'V'),
+ (0x1129E, 'X'),
+ (0x1129F, 'V'),
+ (0x112AA, 'X'),
+ (0x112B0, 'V'),
+ (0x112EB, 'X'),
+ (0x112F0, 'V'),
+ (0x112FA, 'X'),
+ (0x11300, 'V'),
+ (0x11304, 'X'),
+ (0x11305, 'V'),
+ (0x1130D, 'X'),
+ (0x1130F, 'V'),
+ (0x11311, 'X'),
+ (0x11313, 'V'),
+ (0x11329, 'X'),
+ (0x1132A, 'V'),
+ (0x11331, 'X'),
+ (0x11332, 'V'),
+ (0x11334, 'X'),
+ (0x11335, 'V'),
+ (0x1133A, 'X'),
+ (0x1133B, 'V'),
+ (0x11345, 'X'),
+ (0x11347, 'V'),
+ (0x11349, 'X'),
+ (0x1134B, 'V'),
+ (0x1134E, 'X'),
+ (0x11350, 'V'),
+ (0x11351, 'X'),
+ (0x11357, 'V'),
+ (0x11358, 'X'),
+ (0x1135D, 'V'),
+ (0x11364, 'X'),
+ (0x11366, 'V'),
+ (0x1136D, 'X'),
+ (0x11370, 'V'),
+ (0x11375, 'X'),
+ (0x11400, 'V'),
+ (0x1145C, 'X'),
+ (0x1145D, 'V'),
+ (0x11462, 'X'),
+ (0x11480, 'V'),
+ (0x114C8, 'X'),
+ (0x114D0, 'V'),
+ (0x114DA, 'X'),
+ (0x11580, 'V'),
+ (0x115B6, 'X'),
+ (0x115B8, 'V'),
+ (0x115DE, 'X'),
+ (0x11600, 'V'),
+ (0x11645, 'X'),
+ (0x11650, 'V'),
+ (0x1165A, 'X'),
+ (0x11660, 'V'),
+ (0x1166D, 'X'),
+ (0x11680, 'V'),
+ (0x116B9, 'X'),
+ (0x116C0, 'V'),
+ (0x116CA, 'X'),
+ (0x11700, 'V'),
+ (0x1171B, 'X'),
+ (0x1171D, 'V'),
+ (0x1172C, 'X'),
+ (0x11730, 'V'),
+ (0x11740, 'X'),
+ (0x11800, 'V'),
+ (0x1183C, 'X'),
+ (0x118A0, 'M', u'𑣀'),
+ (0x118A1, 'M', u'𑣁'),
+ (0x118A2, 'M', u'𑣂'),
+ (0x118A3, 'M', u'𑣃'),
+ (0x118A4, 'M', u'𑣄'),
+ (0x118A5, 'M', u'𑣅'),
+ (0x118A6, 'M', u'𑣆'),
+ (0x118A7, 'M', u'𑣇'),
+ (0x118A8, 'M', u'𑣈'),
+ (0x118A9, 'M', u'𑣉'),
+ (0x118AA, 'M', u'𑣊'),
+ (0x118AB, 'M', u'𑣋'),
+ (0x118AC, 'M', u'𑣌'),
+ (0x118AD, 'M', u'𑣍'),
+ (0x118AE, 'M', u'𑣎'),
+ (0x118AF, 'M', u'𑣏'),
+ (0x118B0, 'M', u'𑣐'),
+ (0x118B1, 'M', u'𑣑'),
+ (0x118B2, 'M', u'𑣒'),
+ (0x118B3, 'M', u'𑣓'),
+ (0x118B4, 'M', u'𑣔'),
+ (0x118B5, 'M', u'𑣕'),
+ (0x118B6, 'M', u'𑣖'),
+ (0x118B7, 'M', u'𑣗'),
+ ]
+
+def _seg_57():
+ return [
+ (0x118B8, 'M', u'𑣘'),
+ (0x118B9, 'M', u'𑣙'),
+ (0x118BA, 'M', u'𑣚'),
+ (0x118BB, 'M', u'𑣛'),
+ (0x118BC, 'M', u'𑣜'),
+ (0x118BD, 'M', u'𑣝'),
+ (0x118BE, 'M', u'𑣞'),
+ (0x118BF, 'M', u'𑣟'),
+ (0x118C0, 'V'),
+ (0x118F3, 'X'),
+ (0x118FF, 'V'),
+ (0x11907, 'X'),
+ (0x11909, 'V'),
+ (0x1190A, 'X'),
+ (0x1190C, 'V'),
+ (0x11914, 'X'),
+ (0x11915, 'V'),
+ (0x11917, 'X'),
+ (0x11918, 'V'),
+ (0x11936, 'X'),
+ (0x11937, 'V'),
+ (0x11939, 'X'),
+ (0x1193B, 'V'),
+ (0x11947, 'X'),
+ (0x11950, 'V'),
+ (0x1195A, 'X'),
+ (0x119A0, 'V'),
+ (0x119A8, 'X'),
+ (0x119AA, 'V'),
+ (0x119D8, 'X'),
+ (0x119DA, 'V'),
+ (0x119E5, 'X'),
+ (0x11A00, 'V'),
+ (0x11A48, 'X'),
+ (0x11A50, 'V'),
+ (0x11AA3, 'X'),
+ (0x11AC0, 'V'),
+ (0x11AF9, 'X'),
+ (0x11C00, 'V'),
+ (0x11C09, 'X'),
+ (0x11C0A, 'V'),
+ (0x11C37, 'X'),
+ (0x11C38, 'V'),
+ (0x11C46, 'X'),
+ (0x11C50, 'V'),
+ (0x11C6D, 'X'),
+ (0x11C70, 'V'),
+ (0x11C90, 'X'),
+ (0x11C92, 'V'),
+ (0x11CA8, 'X'),
+ (0x11CA9, 'V'),
+ (0x11CB7, 'X'),
+ (0x11D00, 'V'),
+ (0x11D07, 'X'),
+ (0x11D08, 'V'),
+ (0x11D0A, 'X'),
+ (0x11D0B, 'V'),
+ (0x11D37, 'X'),
+ (0x11D3A, 'V'),
+ (0x11D3B, 'X'),
+ (0x11D3C, 'V'),
+ (0x11D3E, 'X'),
+ (0x11D3F, 'V'),
+ (0x11D48, 'X'),
+ (0x11D50, 'V'),
+ (0x11D5A, 'X'),
+ (0x11D60, 'V'),
+ (0x11D66, 'X'),
+ (0x11D67, 'V'),
+ (0x11D69, 'X'),
+ (0x11D6A, 'V'),
+ (0x11D8F, 'X'),
+ (0x11D90, 'V'),
+ (0x11D92, 'X'),
+ (0x11D93, 'V'),
+ (0x11D99, 'X'),
+ (0x11DA0, 'V'),
+ (0x11DAA, 'X'),
+ (0x11EE0, 'V'),
+ (0x11EF9, 'X'),
+ (0x11FB0, 'V'),
+ (0x11FB1, 'X'),
+ (0x11FC0, 'V'),
+ (0x11FF2, 'X'),
+ (0x11FFF, 'V'),
+ (0x1239A, 'X'),
+ (0x12400, 'V'),
+ (0x1246F, 'X'),
+ (0x12470, 'V'),
+ (0x12475, 'X'),
+ (0x12480, 'V'),
+ (0x12544, 'X'),
+ (0x13000, 'V'),
+ (0x1342F, 'X'),
+ (0x14400, 'V'),
+ (0x14647, 'X'),
+ (0x16800, 'V'),
+ (0x16A39, 'X'),
+ (0x16A40, 'V'),
+ (0x16A5F, 'X'),
+ ]
+
+def _seg_58():
+ return [
+ (0x16A60, 'V'),
+ (0x16A6A, 'X'),
+ (0x16A6E, 'V'),
+ (0x16A70, 'X'),
+ (0x16AD0, 'V'),
+ (0x16AEE, 'X'),
+ (0x16AF0, 'V'),
+ (0x16AF6, 'X'),
+ (0x16B00, 'V'),
+ (0x16B46, 'X'),
+ (0x16B50, 'V'),
+ (0x16B5A, 'X'),
+ (0x16B5B, 'V'),
+ (0x16B62, 'X'),
+ (0x16B63, 'V'),
+ (0x16B78, 'X'),
+ (0x16B7D, 'V'),
+ (0x16B90, 'X'),
+ (0x16E40, 'M', u'𖹠'),
+ (0x16E41, 'M', u'𖹡'),
+ (0x16E42, 'M', u'𖹢'),
+ (0x16E43, 'M', u'𖹣'),
+ (0x16E44, 'M', u'𖹤'),
+ (0x16E45, 'M', u'𖹥'),
+ (0x16E46, 'M', u'𖹦'),
+ (0x16E47, 'M', u'𖹧'),
+ (0x16E48, 'M', u'𖹨'),
+ (0x16E49, 'M', u'𖹩'),
+ (0x16E4A, 'M', u'𖹪'),
+ (0x16E4B, 'M', u'𖹫'),
+ (0x16E4C, 'M', u'𖹬'),
+ (0x16E4D, 'M', u'𖹭'),
+ (0x16E4E, 'M', u'𖹮'),
+ (0x16E4F, 'M', u'𖹯'),
+ (0x16E50, 'M', u'𖹰'),
+ (0x16E51, 'M', u'𖹱'),
+ (0x16E52, 'M', u'𖹲'),
+ (0x16E53, 'M', u'𖹳'),
+ (0x16E54, 'M', u'𖹴'),
+ (0x16E55, 'M', u'𖹵'),
+ (0x16E56, 'M', u'𖹶'),
+ (0x16E57, 'M', u'𖹷'),
+ (0x16E58, 'M', u'𖹸'),
+ (0x16E59, 'M', u'𖹹'),
+ (0x16E5A, 'M', u'𖹺'),
+ (0x16E5B, 'M', u'𖹻'),
+ (0x16E5C, 'M', u'𖹼'),
+ (0x16E5D, 'M', u'𖹽'),
+ (0x16E5E, 'M', u'𖹾'),
+ (0x16E5F, 'M', u'𖹿'),
+ (0x16E60, 'V'),
+ (0x16E9B, 'X'),
+ (0x16F00, 'V'),
+ (0x16F4B, 'X'),
+ (0x16F4F, 'V'),
+ (0x16F88, 'X'),
+ (0x16F8F, 'V'),
+ (0x16FA0, 'X'),
+ (0x16FE0, 'V'),
+ (0x16FE5, 'X'),
+ (0x16FF0, 'V'),
+ (0x16FF2, 'X'),
+ (0x17000, 'V'),
+ (0x187F8, 'X'),
+ (0x18800, 'V'),
+ (0x18CD6, 'X'),
+ (0x18D00, 'V'),
+ (0x18D09, 'X'),
+ (0x1B000, 'V'),
+ (0x1B11F, 'X'),
+ (0x1B150, 'V'),
+ (0x1B153, 'X'),
+ (0x1B164, 'V'),
+ (0x1B168, 'X'),
+ (0x1B170, 'V'),
+ (0x1B2FC, 'X'),
+ (0x1BC00, 'V'),
+ (0x1BC6B, 'X'),
+ (0x1BC70, 'V'),
+ (0x1BC7D, 'X'),
+ (0x1BC80, 'V'),
+ (0x1BC89, 'X'),
+ (0x1BC90, 'V'),
+ (0x1BC9A, 'X'),
+ (0x1BC9C, 'V'),
+ (0x1BCA0, 'I'),
+ (0x1BCA4, 'X'),
+ (0x1D000, 'V'),
+ (0x1D0F6, 'X'),
+ (0x1D100, 'V'),
+ (0x1D127, 'X'),
+ (0x1D129, 'V'),
+ (0x1D15E, 'M', u'𝅗𝅥'),
+ (0x1D15F, 'M', u'𝅘𝅥'),
+ (0x1D160, 'M', u'𝅘𝅥𝅮'),
+ (0x1D161, 'M', u'𝅘𝅥𝅯'),
+ (0x1D162, 'M', u'𝅘𝅥𝅰'),
+ (0x1D163, 'M', u'𝅘𝅥𝅱'),
+ (0x1D164, 'M', u'𝅘𝅥𝅲'),
+ (0x1D165, 'V'),
+ ]
+
+def _seg_59():
+ return [
+ (0x1D173, 'X'),
+ (0x1D17B, 'V'),
+ (0x1D1BB, 'M', u'𝆹𝅥'),
+ (0x1D1BC, 'M', u'𝆺𝅥'),
+ (0x1D1BD, 'M', u'𝆹𝅥𝅮'),
+ (0x1D1BE, 'M', u'𝆺𝅥𝅮'),
+ (0x1D1BF, 'M', u'𝆹𝅥𝅯'),
+ (0x1D1C0, 'M', u'𝆺𝅥𝅯'),
+ (0x1D1C1, 'V'),
+ (0x1D1E9, 'X'),
+ (0x1D200, 'V'),
+ (0x1D246, 'X'),
+ (0x1D2E0, 'V'),
+ (0x1D2F4, 'X'),
+ (0x1D300, 'V'),
+ (0x1D357, 'X'),
+ (0x1D360, 'V'),
+ (0x1D379, 'X'),
+ (0x1D400, 'M', u'a'),
+ (0x1D401, 'M', u'b'),
+ (0x1D402, 'M', u'c'),
+ (0x1D403, 'M', u'd'),
+ (0x1D404, 'M', u'e'),
+ (0x1D405, 'M', u'f'),
+ (0x1D406, 'M', u'g'),
+ (0x1D407, 'M', u'h'),
+ (0x1D408, 'M', u'i'),
+ (0x1D409, 'M', u'j'),
+ (0x1D40A, 'M', u'k'),
+ (0x1D40B, 'M', u'l'),
+ (0x1D40C, 'M', u'm'),
+ (0x1D40D, 'M', u'n'),
+ (0x1D40E, 'M', u'o'),
+ (0x1D40F, 'M', u'p'),
+ (0x1D410, 'M', u'q'),
+ (0x1D411, 'M', u'r'),
+ (0x1D412, 'M', u's'),
+ (0x1D413, 'M', u't'),
+ (0x1D414, 'M', u'u'),
+ (0x1D415, 'M', u'v'),
+ (0x1D416, 'M', u'w'),
+ (0x1D417, 'M', u'x'),
+ (0x1D418, 'M', u'y'),
+ (0x1D419, 'M', u'z'),
+ (0x1D41A, 'M', u'a'),
+ (0x1D41B, 'M', u'b'),
+ (0x1D41C, 'M', u'c'),
+ (0x1D41D, 'M', u'd'),
+ (0x1D41E, 'M', u'e'),
+ (0x1D41F, 'M', u'f'),
+ (0x1D420, 'M', u'g'),
+ (0x1D421, 'M', u'h'),
+ (0x1D422, 'M', u'i'),
+ (0x1D423, 'M', u'j'),
+ (0x1D424, 'M', u'k'),
+ (0x1D425, 'M', u'l'),
+ (0x1D426, 'M', u'm'),
+ (0x1D427, 'M', u'n'),
+ (0x1D428, 'M', u'o'),
+ (0x1D429, 'M', u'p'),
+ (0x1D42A, 'M', u'q'),
+ (0x1D42B, 'M', u'r'),
+ (0x1D42C, 'M', u's'),
+ (0x1D42D, 'M', u't'),
+ (0x1D42E, 'M', u'u'),
+ (0x1D42F, 'M', u'v'),
+ (0x1D430, 'M', u'w'),
+ (0x1D431, 'M', u'x'),
+ (0x1D432, 'M', u'y'),
+ (0x1D433, 'M', u'z'),
+ (0x1D434, 'M', u'a'),
+ (0x1D435, 'M', u'b'),
+ (0x1D436, 'M', u'c'),
+ (0x1D437, 'M', u'd'),
+ (0x1D438, 'M', u'e'),
+ (0x1D439, 'M', u'f'),
+ (0x1D43A, 'M', u'g'),
+ (0x1D43B, 'M', u'h'),
+ (0x1D43C, 'M', u'i'),
+ (0x1D43D, 'M', u'j'),
+ (0x1D43E, 'M', u'k'),
+ (0x1D43F, 'M', u'l'),
+ (0x1D440, 'M', u'm'),
+ (0x1D441, 'M', u'n'),
+ (0x1D442, 'M', u'o'),
+ (0x1D443, 'M', u'p'),
+ (0x1D444, 'M', u'q'),
+ (0x1D445, 'M', u'r'),
+ (0x1D446, 'M', u's'),
+ (0x1D447, 'M', u't'),
+ (0x1D448, 'M', u'u'),
+ (0x1D449, 'M', u'v'),
+ (0x1D44A, 'M', u'w'),
+ (0x1D44B, 'M', u'x'),
+ (0x1D44C, 'M', u'y'),
+ (0x1D44D, 'M', u'z'),
+ (0x1D44E, 'M', u'a'),
+ (0x1D44F, 'M', u'b'),
+ (0x1D450, 'M', u'c'),
+ (0x1D451, 'M', u'd'),
+ ]
+
+def _seg_60():
+ return [
+ (0x1D452, 'M', u'e'),
+ (0x1D453, 'M', u'f'),
+ (0x1D454, 'M', u'g'),
+ (0x1D455, 'X'),
+ (0x1D456, 'M', u'i'),
+ (0x1D457, 'M', u'j'),
+ (0x1D458, 'M', u'k'),
+ (0x1D459, 'M', u'l'),
+ (0x1D45A, 'M', u'm'),
+ (0x1D45B, 'M', u'n'),
+ (0x1D45C, 'M', u'o'),
+ (0x1D45D, 'M', u'p'),
+ (0x1D45E, 'M', u'q'),
+ (0x1D45F, 'M', u'r'),
+ (0x1D460, 'M', u's'),
+ (0x1D461, 'M', u't'),
+ (0x1D462, 'M', u'u'),
+ (0x1D463, 'M', u'v'),
+ (0x1D464, 'M', u'w'),
+ (0x1D465, 'M', u'x'),
+ (0x1D466, 'M', u'y'),
+ (0x1D467, 'M', u'z'),
+ (0x1D468, 'M', u'a'),
+ (0x1D469, 'M', u'b'),
+ (0x1D46A, 'M', u'c'),
+ (0x1D46B, 'M', u'd'),
+ (0x1D46C, 'M', u'e'),
+ (0x1D46D, 'M', u'f'),
+ (0x1D46E, 'M', u'g'),
+ (0x1D46F, 'M', u'h'),
+ (0x1D470, 'M', u'i'),
+ (0x1D471, 'M', u'j'),
+ (0x1D472, 'M', u'k'),
+ (0x1D473, 'M', u'l'),
+ (0x1D474, 'M', u'm'),
+ (0x1D475, 'M', u'n'),
+ (0x1D476, 'M', u'o'),
+ (0x1D477, 'M', u'p'),
+ (0x1D478, 'M', u'q'),
+ (0x1D479, 'M', u'r'),
+ (0x1D47A, 'M', u's'),
+ (0x1D47B, 'M', u't'),
+ (0x1D47C, 'M', u'u'),
+ (0x1D47D, 'M', u'v'),
+ (0x1D47E, 'M', u'w'),
+ (0x1D47F, 'M', u'x'),
+ (0x1D480, 'M', u'y'),
+ (0x1D481, 'M', u'z'),
+ (0x1D482, 'M', u'a'),
+ (0x1D483, 'M', u'b'),
+ (0x1D484, 'M', u'c'),
+ (0x1D485, 'M', u'd'),
+ (0x1D486, 'M', u'e'),
+ (0x1D487, 'M', u'f'),
+ (0x1D488, 'M', u'g'),
+ (0x1D489, 'M', u'h'),
+ (0x1D48A, 'M', u'i'),
+ (0x1D48B, 'M', u'j'),
+ (0x1D48C, 'M', u'k'),
+ (0x1D48D, 'M', u'l'),
+ (0x1D48E, 'M', u'm'),
+ (0x1D48F, 'M', u'n'),
+ (0x1D490, 'M', u'o'),
+ (0x1D491, 'M', u'p'),
+ (0x1D492, 'M', u'q'),
+ (0x1D493, 'M', u'r'),
+ (0x1D494, 'M', u's'),
+ (0x1D495, 'M', u't'),
+ (0x1D496, 'M', u'u'),
+ (0x1D497, 'M', u'v'),
+ (0x1D498, 'M', u'w'),
+ (0x1D499, 'M', u'x'),
+ (0x1D49A, 'M', u'y'),
+ (0x1D49B, 'M', u'z'),
+ (0x1D49C, 'M', u'a'),
+ (0x1D49D, 'X'),
+ (0x1D49E, 'M', u'c'),
+ (0x1D49F, 'M', u'd'),
+ (0x1D4A0, 'X'),
+ (0x1D4A2, 'M', u'g'),
+ (0x1D4A3, 'X'),
+ (0x1D4A5, 'M', u'j'),
+ (0x1D4A6, 'M', u'k'),
+ (0x1D4A7, 'X'),
+ (0x1D4A9, 'M', u'n'),
+ (0x1D4AA, 'M', u'o'),
+ (0x1D4AB, 'M', u'p'),
+ (0x1D4AC, 'M', u'q'),
+ (0x1D4AD, 'X'),
+ (0x1D4AE, 'M', u's'),
+ (0x1D4AF, 'M', u't'),
+ (0x1D4B0, 'M', u'u'),
+ (0x1D4B1, 'M', u'v'),
+ (0x1D4B2, 'M', u'w'),
+ (0x1D4B3, 'M', u'x'),
+ (0x1D4B4, 'M', u'y'),
+ (0x1D4B5, 'M', u'z'),
+ (0x1D4B6, 'M', u'a'),
+ (0x1D4B7, 'M', u'b'),
+ (0x1D4B8, 'M', u'c'),
+ ]
+
+def _seg_61():
+ return [
+ (0x1D4B9, 'M', u'd'),
+ (0x1D4BA, 'X'),
+ (0x1D4BB, 'M', u'f'),
+ (0x1D4BC, 'X'),
+ (0x1D4BD, 'M', u'h'),
+ (0x1D4BE, 'M', u'i'),
+ (0x1D4BF, 'M', u'j'),
+ (0x1D4C0, 'M', u'k'),
+ (0x1D4C1, 'M', u'l'),
+ (0x1D4C2, 'M', u'm'),
+ (0x1D4C3, 'M', u'n'),
+ (0x1D4C4, 'X'),
+ (0x1D4C5, 'M', u'p'),
+ (0x1D4C6, 'M', u'q'),
+ (0x1D4C7, 'M', u'r'),
+ (0x1D4C8, 'M', u's'),
+ (0x1D4C9, 'M', u't'),
+ (0x1D4CA, 'M', u'u'),
+ (0x1D4CB, 'M', u'v'),
+ (0x1D4CC, 'M', u'w'),
+ (0x1D4CD, 'M', u'x'),
+ (0x1D4CE, 'M', u'y'),
+ (0x1D4CF, 'M', u'z'),
+ (0x1D4D0, 'M', u'a'),
+ (0x1D4D1, 'M', u'b'),
+ (0x1D4D2, 'M', u'c'),
+ (0x1D4D3, 'M', u'd'),
+ (0x1D4D4, 'M', u'e'),
+ (0x1D4D5, 'M', u'f'),
+ (0x1D4D6, 'M', u'g'),
+ (0x1D4D7, 'M', u'h'),
+ (0x1D4D8, 'M', u'i'),
+ (0x1D4D9, 'M', u'j'),
+ (0x1D4DA, 'M', u'k'),
+ (0x1D4DB, 'M', u'l'),
+ (0x1D4DC, 'M', u'm'),
+ (0x1D4DD, 'M', u'n'),
+ (0x1D4DE, 'M', u'o'),
+ (0x1D4DF, 'M', u'p'),
+ (0x1D4E0, 'M', u'q'),
+ (0x1D4E1, 'M', u'r'),
+ (0x1D4E2, 'M', u's'),
+ (0x1D4E3, 'M', u't'),
+ (0x1D4E4, 'M', u'u'),
+ (0x1D4E5, 'M', u'v'),
+ (0x1D4E6, 'M', u'w'),
+ (0x1D4E7, 'M', u'x'),
+ (0x1D4E8, 'M', u'y'),
+ (0x1D4E9, 'M', u'z'),
+ (0x1D4EA, 'M', u'a'),
+ (0x1D4EB, 'M', u'b'),
+ (0x1D4EC, 'M', u'c'),
+ (0x1D4ED, 'M', u'd'),
+ (0x1D4EE, 'M', u'e'),
+ (0x1D4EF, 'M', u'f'),
+ (0x1D4F0, 'M', u'g'),
+ (0x1D4F1, 'M', u'h'),
+ (0x1D4F2, 'M', u'i'),
+ (0x1D4F3, 'M', u'j'),
+ (0x1D4F4, 'M', u'k'),
+ (0x1D4F5, 'M', u'l'),
+ (0x1D4F6, 'M', u'm'),
+ (0x1D4F7, 'M', u'n'),
+ (0x1D4F8, 'M', u'o'),
+ (0x1D4F9, 'M', u'p'),
+ (0x1D4FA, 'M', u'q'),
+ (0x1D4FB, 'M', u'r'),
+ (0x1D4FC, 'M', u's'),
+ (0x1D4FD, 'M', u't'),
+ (0x1D4FE, 'M', u'u'),
+ (0x1D4FF, 'M', u'v'),
+ (0x1D500, 'M', u'w'),
+ (0x1D501, 'M', u'x'),
+ (0x1D502, 'M', u'y'),
+ (0x1D503, 'M', u'z'),
+ (0x1D504, 'M', u'a'),
+ (0x1D505, 'M', u'b'),
+ (0x1D506, 'X'),
+ (0x1D507, 'M', u'd'),
+ (0x1D508, 'M', u'e'),
+ (0x1D509, 'M', u'f'),
+ (0x1D50A, 'M', u'g'),
+ (0x1D50B, 'X'),
+ (0x1D50D, 'M', u'j'),
+ (0x1D50E, 'M', u'k'),
+ (0x1D50F, 'M', u'l'),
+ (0x1D510, 'M', u'm'),
+ (0x1D511, 'M', u'n'),
+ (0x1D512, 'M', u'o'),
+ (0x1D513, 'M', u'p'),
+ (0x1D514, 'M', u'q'),
+ (0x1D515, 'X'),
+ (0x1D516, 'M', u's'),
+ (0x1D517, 'M', u't'),
+ (0x1D518, 'M', u'u'),
+ (0x1D519, 'M', u'v'),
+ (0x1D51A, 'M', u'w'),
+ (0x1D51B, 'M', u'x'),
+ (0x1D51C, 'M', u'y'),
+ (0x1D51D, 'X'),
+ ]
+
+def _seg_62():
+ return [
+ (0x1D51E, 'M', u'a'),
+ (0x1D51F, 'M', u'b'),
+ (0x1D520, 'M', u'c'),
+ (0x1D521, 'M', u'd'),
+ (0x1D522, 'M', u'e'),
+ (0x1D523, 'M', u'f'),
+ (0x1D524, 'M', u'g'),
+ (0x1D525, 'M', u'h'),
+ (0x1D526, 'M', u'i'),
+ (0x1D527, 'M', u'j'),
+ (0x1D528, 'M', u'k'),
+ (0x1D529, 'M', u'l'),
+ (0x1D52A, 'M', u'm'),
+ (0x1D52B, 'M', u'n'),
+ (0x1D52C, 'M', u'o'),
+ (0x1D52D, 'M', u'p'),
+ (0x1D52E, 'M', u'q'),
+ (0x1D52F, 'M', u'r'),
+ (0x1D530, 'M', u's'),
+ (0x1D531, 'M', u't'),
+ (0x1D532, 'M', u'u'),
+ (0x1D533, 'M', u'v'),
+ (0x1D534, 'M', u'w'),
+ (0x1D535, 'M', u'x'),
+ (0x1D536, 'M', u'y'),
+ (0x1D537, 'M', u'z'),
+ (0x1D538, 'M', u'a'),
+ (0x1D539, 'M', u'b'),
+ (0x1D53A, 'X'),
+ (0x1D53B, 'M', u'd'),
+ (0x1D53C, 'M', u'e'),
+ (0x1D53D, 'M', u'f'),
+ (0x1D53E, 'M', u'g'),
+ (0x1D53F, 'X'),
+ (0x1D540, 'M', u'i'),
+ (0x1D541, 'M', u'j'),
+ (0x1D542, 'M', u'k'),
+ (0x1D543, 'M', u'l'),
+ (0x1D544, 'M', u'm'),
+ (0x1D545, 'X'),
+ (0x1D546, 'M', u'o'),
+ (0x1D547, 'X'),
+ (0x1D54A, 'M', u's'),
+ (0x1D54B, 'M', u't'),
+ (0x1D54C, 'M', u'u'),
+ (0x1D54D, 'M', u'v'),
+ (0x1D54E, 'M', u'w'),
+ (0x1D54F, 'M', u'x'),
+ (0x1D550, 'M', u'y'),
+ (0x1D551, 'X'),
+ (0x1D552, 'M', u'a'),
+ (0x1D553, 'M', u'b'),
+ (0x1D554, 'M', u'c'),
+ (0x1D555, 'M', u'd'),
+ (0x1D556, 'M', u'e'),
+ (0x1D557, 'M', u'f'),
+ (0x1D558, 'M', u'g'),
+ (0x1D559, 'M', u'h'),
+ (0x1D55A, 'M', u'i'),
+ (0x1D55B, 'M', u'j'),
+ (0x1D55C, 'M', u'k'),
+ (0x1D55D, 'M', u'l'),
+ (0x1D55E, 'M', u'm'),
+ (0x1D55F, 'M', u'n'),
+ (0x1D560, 'M', u'o'),
+ (0x1D561, 'M', u'p'),
+ (0x1D562, 'M', u'q'),
+ (0x1D563, 'M', u'r'),
+ (0x1D564, 'M', u's'),
+ (0x1D565, 'M', u't'),
+ (0x1D566, 'M', u'u'),
+ (0x1D567, 'M', u'v'),
+ (0x1D568, 'M', u'w'),
+ (0x1D569, 'M', u'x'),
+ (0x1D56A, 'M', u'y'),
+ (0x1D56B, 'M', u'z'),
+ (0x1D56C, 'M', u'a'),
+ (0x1D56D, 'M', u'b'),
+ (0x1D56E, 'M', u'c'),
+ (0x1D56F, 'M', u'd'),
+ (0x1D570, 'M', u'e'),
+ (0x1D571, 'M', u'f'),
+ (0x1D572, 'M', u'g'),
+ (0x1D573, 'M', u'h'),
+ (0x1D574, 'M', u'i'),
+ (0x1D575, 'M', u'j'),
+ (0x1D576, 'M', u'k'),
+ (0x1D577, 'M', u'l'),
+ (0x1D578, 'M', u'm'),
+ (0x1D579, 'M', u'n'),
+ (0x1D57A, 'M', u'o'),
+ (0x1D57B, 'M', u'p'),
+ (0x1D57C, 'M', u'q'),
+ (0x1D57D, 'M', u'r'),
+ (0x1D57E, 'M', u's'),
+ (0x1D57F, 'M', u't'),
+ (0x1D580, 'M', u'u'),
+ (0x1D581, 'M', u'v'),
+ (0x1D582, 'M', u'w'),
+ (0x1D583, 'M', u'x'),
+ ]
+
+def _seg_63():
+ return [
+ (0x1D584, 'M', u'y'),
+ (0x1D585, 'M', u'z'),
+ (0x1D586, 'M', u'a'),
+ (0x1D587, 'M', u'b'),
+ (0x1D588, 'M', u'c'),
+ (0x1D589, 'M', u'd'),
+ (0x1D58A, 'M', u'e'),
+ (0x1D58B, 'M', u'f'),
+ (0x1D58C, 'M', u'g'),
+ (0x1D58D, 'M', u'h'),
+ (0x1D58E, 'M', u'i'),
+ (0x1D58F, 'M', u'j'),
+ (0x1D590, 'M', u'k'),
+ (0x1D591, 'M', u'l'),
+ (0x1D592, 'M', u'm'),
+ (0x1D593, 'M', u'n'),
+ (0x1D594, 'M', u'o'),
+ (0x1D595, 'M', u'p'),
+ (0x1D596, 'M', u'q'),
+ (0x1D597, 'M', u'r'),
+ (0x1D598, 'M', u's'),
+ (0x1D599, 'M', u't'),
+ (0x1D59A, 'M', u'u'),
+ (0x1D59B, 'M', u'v'),
+ (0x1D59C, 'M', u'w'),
+ (0x1D59D, 'M', u'x'),
+ (0x1D59E, 'M', u'y'),
+ (0x1D59F, 'M', u'z'),
+ (0x1D5A0, 'M', u'a'),
+ (0x1D5A1, 'M', u'b'),
+ (0x1D5A2, 'M', u'c'),
+ (0x1D5A3, 'M', u'd'),
+ (0x1D5A4, 'M', u'e'),
+ (0x1D5A5, 'M', u'f'),
+ (0x1D5A6, 'M', u'g'),
+ (0x1D5A7, 'M', u'h'),
+ (0x1D5A8, 'M', u'i'),
+ (0x1D5A9, 'M', u'j'),
+ (0x1D5AA, 'M', u'k'),
+ (0x1D5AB, 'M', u'l'),
+ (0x1D5AC, 'M', u'm'),
+ (0x1D5AD, 'M', u'n'),
+ (0x1D5AE, 'M', u'o'),
+ (0x1D5AF, 'M', u'p'),
+ (0x1D5B0, 'M', u'q'),
+ (0x1D5B1, 'M', u'r'),
+ (0x1D5B2, 'M', u's'),
+ (0x1D5B3, 'M', u't'),
+ (0x1D5B4, 'M', u'u'),
+ (0x1D5B5, 'M', u'v'),
+ (0x1D5B6, 'M', u'w'),
+ (0x1D5B7, 'M', u'x'),
+ (0x1D5B8, 'M', u'y'),
+ (0x1D5B9, 'M', u'z'),
+ (0x1D5BA, 'M', u'a'),
+ (0x1D5BB, 'M', u'b'),
+ (0x1D5BC, 'M', u'c'),
+ (0x1D5BD, 'M', u'd'),
+ (0x1D5BE, 'M', u'e'),
+ (0x1D5BF, 'M', u'f'),
+ (0x1D5C0, 'M', u'g'),
+ (0x1D5C1, 'M', u'h'),
+ (0x1D5C2, 'M', u'i'),
+ (0x1D5C3, 'M', u'j'),
+ (0x1D5C4, 'M', u'k'),
+ (0x1D5C5, 'M', u'l'),
+ (0x1D5C6, 'M', u'm'),
+ (0x1D5C7, 'M', u'n'),
+ (0x1D5C8, 'M', u'o'),
+ (0x1D5C9, 'M', u'p'),
+ (0x1D5CA, 'M', u'q'),
+ (0x1D5CB, 'M', u'r'),
+ (0x1D5CC, 'M', u's'),
+ (0x1D5CD, 'M', u't'),
+ (0x1D5CE, 'M', u'u'),
+ (0x1D5CF, 'M', u'v'),
+ (0x1D5D0, 'M', u'w'),
+ (0x1D5D1, 'M', u'x'),
+ (0x1D5D2, 'M', u'y'),
+ (0x1D5D3, 'M', u'z'),
+ (0x1D5D4, 'M', u'a'),
+ (0x1D5D5, 'M', u'b'),
+ (0x1D5D6, 'M', u'c'),
+ (0x1D5D7, 'M', u'd'),
+ (0x1D5D8, 'M', u'e'),
+ (0x1D5D9, 'M', u'f'),
+ (0x1D5DA, 'M', u'g'),
+ (0x1D5DB, 'M', u'h'),
+ (0x1D5DC, 'M', u'i'),
+ (0x1D5DD, 'M', u'j'),
+ (0x1D5DE, 'M', u'k'),
+ (0x1D5DF, 'M', u'l'),
+ (0x1D5E0, 'M', u'm'),
+ (0x1D5E1, 'M', u'n'),
+ (0x1D5E2, 'M', u'o'),
+ (0x1D5E3, 'M', u'p'),
+ (0x1D5E4, 'M', u'q'),
+ (0x1D5E5, 'M', u'r'),
+ (0x1D5E6, 'M', u's'),
+ (0x1D5E7, 'M', u't'),
+ ]
+
+def _seg_64():
+ return [
+ (0x1D5E8, 'M', u'u'),
+ (0x1D5E9, 'M', u'v'),
+ (0x1D5EA, 'M', u'w'),
+ (0x1D5EB, 'M', u'x'),
+ (0x1D5EC, 'M', u'y'),
+ (0x1D5ED, 'M', u'z'),
+ (0x1D5EE, 'M', u'a'),
+ (0x1D5EF, 'M', u'b'),
+ (0x1D5F0, 'M', u'c'),
+ (0x1D5F1, 'M', u'd'),
+ (0x1D5F2, 'M', u'e'),
+ (0x1D5F3, 'M', u'f'),
+ (0x1D5F4, 'M', u'g'),
+ (0x1D5F5, 'M', u'h'),
+ (0x1D5F6, 'M', u'i'),
+ (0x1D5F7, 'M', u'j'),
+ (0x1D5F8, 'M', u'k'),
+ (0x1D5F9, 'M', u'l'),
+ (0x1D5FA, 'M', u'm'),
+ (0x1D5FB, 'M', u'n'),
+ (0x1D5FC, 'M', u'o'),
+ (0x1D5FD, 'M', u'p'),
+ (0x1D5FE, 'M', u'q'),
+ (0x1D5FF, 'M', u'r'),
+ (0x1D600, 'M', u's'),
+ (0x1D601, 'M', u't'),
+ (0x1D602, 'M', u'u'),
+ (0x1D603, 'M', u'v'),
+ (0x1D604, 'M', u'w'),
+ (0x1D605, 'M', u'x'),
+ (0x1D606, 'M', u'y'),
+ (0x1D607, 'M', u'z'),
+ (0x1D608, 'M', u'a'),
+ (0x1D609, 'M', u'b'),
+ (0x1D60A, 'M', u'c'),
+ (0x1D60B, 'M', u'd'),
+ (0x1D60C, 'M', u'e'),
+ (0x1D60D, 'M', u'f'),
+ (0x1D60E, 'M', u'g'),
+ (0x1D60F, 'M', u'h'),
+ (0x1D610, 'M', u'i'),
+ (0x1D611, 'M', u'j'),
+ (0x1D612, 'M', u'k'),
+ (0x1D613, 'M', u'l'),
+ (0x1D614, 'M', u'm'),
+ (0x1D615, 'M', u'n'),
+ (0x1D616, 'M', u'o'),
+ (0x1D617, 'M', u'p'),
+ (0x1D618, 'M', u'q'),
+ (0x1D619, 'M', u'r'),
+ (0x1D61A, 'M', u's'),
+ (0x1D61B, 'M', u't'),
+ (0x1D61C, 'M', u'u'),
+ (0x1D61D, 'M', u'v'),
+ (0x1D61E, 'M', u'w'),
+ (0x1D61F, 'M', u'x'),
+ (0x1D620, 'M', u'y'),
+ (0x1D621, 'M', u'z'),
+ (0x1D622, 'M', u'a'),
+ (0x1D623, 'M', u'b'),
+ (0x1D624, 'M', u'c'),
+ (0x1D625, 'M', u'd'),
+ (0x1D626, 'M', u'e'),
+ (0x1D627, 'M', u'f'),
+ (0x1D628, 'M', u'g'),
+ (0x1D629, 'M', u'h'),
+ (0x1D62A, 'M', u'i'),
+ (0x1D62B, 'M', u'j'),
+ (0x1D62C, 'M', u'k'),
+ (0x1D62D, 'M', u'l'),
+ (0x1D62E, 'M', u'm'),
+ (0x1D62F, 'M', u'n'),
+ (0x1D630, 'M', u'o'),
+ (0x1D631, 'M', u'p'),
+ (0x1D632, 'M', u'q'),
+ (0x1D633, 'M', u'r'),
+ (0x1D634, 'M', u's'),
+ (0x1D635, 'M', u't'),
+ (0x1D636, 'M', u'u'),
+ (0x1D637, 'M', u'v'),
+ (0x1D638, 'M', u'w'),
+ (0x1D639, 'M', u'x'),
+ (0x1D63A, 'M', u'y'),
+ (0x1D63B, 'M', u'z'),
+ (0x1D63C, 'M', u'a'),
+ (0x1D63D, 'M', u'b'),
+ (0x1D63E, 'M', u'c'),
+ (0x1D63F, 'M', u'd'),
+ (0x1D640, 'M', u'e'),
+ (0x1D641, 'M', u'f'),
+ (0x1D642, 'M', u'g'),
+ (0x1D643, 'M', u'h'),
+ (0x1D644, 'M', u'i'),
+ (0x1D645, 'M', u'j'),
+ (0x1D646, 'M', u'k'),
+ (0x1D647, 'M', u'l'),
+ (0x1D648, 'M', u'm'),
+ (0x1D649, 'M', u'n'),
+ (0x1D64A, 'M', u'o'),
+ (0x1D64B, 'M', u'p'),
+ ]
+
+def _seg_65():
+ return [
+ (0x1D64C, 'M', u'q'),
+ (0x1D64D, 'M', u'r'),
+ (0x1D64E, 'M', u's'),
+ (0x1D64F, 'M', u't'),
+ (0x1D650, 'M', u'u'),
+ (0x1D651, 'M', u'v'),
+ (0x1D652, 'M', u'w'),
+ (0x1D653, 'M', u'x'),
+ (0x1D654, 'M', u'y'),
+ (0x1D655, 'M', u'z'),
+ (0x1D656, 'M', u'a'),
+ (0x1D657, 'M', u'b'),
+ (0x1D658, 'M', u'c'),
+ (0x1D659, 'M', u'd'),
+ (0x1D65A, 'M', u'e'),
+ (0x1D65B, 'M', u'f'),
+ (0x1D65C, 'M', u'g'),
+ (0x1D65D, 'M', u'h'),
+ (0x1D65E, 'M', u'i'),
+ (0x1D65F, 'M', u'j'),
+ (0x1D660, 'M', u'k'),
+ (0x1D661, 'M', u'l'),
+ (0x1D662, 'M', u'm'),
+ (0x1D663, 'M', u'n'),
+ (0x1D664, 'M', u'o'),
+ (0x1D665, 'M', u'p'),
+ (0x1D666, 'M', u'q'),
+ (0x1D667, 'M', u'r'),
+ (0x1D668, 'M', u's'),
+ (0x1D669, 'M', u't'),
+ (0x1D66A, 'M', u'u'),
+ (0x1D66B, 'M', u'v'),
+ (0x1D66C, 'M', u'w'),
+ (0x1D66D, 'M', u'x'),
+ (0x1D66E, 'M', u'y'),
+ (0x1D66F, 'M', u'z'),
+ (0x1D670, 'M', u'a'),
+ (0x1D671, 'M', u'b'),
+ (0x1D672, 'M', u'c'),
+ (0x1D673, 'M', u'd'),
+ (0x1D674, 'M', u'e'),
+ (0x1D675, 'M', u'f'),
+ (0x1D676, 'M', u'g'),
+ (0x1D677, 'M', u'h'),
+ (0x1D678, 'M', u'i'),
+ (0x1D679, 'M', u'j'),
+ (0x1D67A, 'M', u'k'),
+ (0x1D67B, 'M', u'l'),
+ (0x1D67C, 'M', u'm'),
+ (0x1D67D, 'M', u'n'),
+ (0x1D67E, 'M', u'o'),
+ (0x1D67F, 'M', u'p'),
+ (0x1D680, 'M', u'q'),
+ (0x1D681, 'M', u'r'),
+ (0x1D682, 'M', u's'),
+ (0x1D683, 'M', u't'),
+ (0x1D684, 'M', u'u'),
+ (0x1D685, 'M', u'v'),
+ (0x1D686, 'M', u'w'),
+ (0x1D687, 'M', u'x'),
+ (0x1D688, 'M', u'y'),
+ (0x1D689, 'M', u'z'),
+ (0x1D68A, 'M', u'a'),
+ (0x1D68B, 'M', u'b'),
+ (0x1D68C, 'M', u'c'),
+ (0x1D68D, 'M', u'd'),
+ (0x1D68E, 'M', u'e'),
+ (0x1D68F, 'M', u'f'),
+ (0x1D690, 'M', u'g'),
+ (0x1D691, 'M', u'h'),
+ (0x1D692, 'M', u'i'),
+ (0x1D693, 'M', u'j'),
+ (0x1D694, 'M', u'k'),
+ (0x1D695, 'M', u'l'),
+ (0x1D696, 'M', u'm'),
+ (0x1D697, 'M', u'n'),
+ (0x1D698, 'M', u'o'),
+ (0x1D699, 'M', u'p'),
+ (0x1D69A, 'M', u'q'),
+ (0x1D69B, 'M', u'r'),
+ (0x1D69C, 'M', u's'),
+ (0x1D69D, 'M', u't'),
+ (0x1D69E, 'M', u'u'),
+ (0x1D69F, 'M', u'v'),
+ (0x1D6A0, 'M', u'w'),
+ (0x1D6A1, 'M', u'x'),
+ (0x1D6A2, 'M', u'y'),
+ (0x1D6A3, 'M', u'z'),
+ (0x1D6A4, 'M', u'ı'),
+ (0x1D6A5, 'M', u'ȷ'),
+ (0x1D6A6, 'X'),
+ (0x1D6A8, 'M', u'α'),
+ (0x1D6A9, 'M', u'β'),
+ (0x1D6AA, 'M', u'γ'),
+ (0x1D6AB, 'M', u'δ'),
+ (0x1D6AC, 'M', u'ε'),
+ (0x1D6AD, 'M', u'ζ'),
+ (0x1D6AE, 'M', u'η'),
+ (0x1D6AF, 'M', u'θ'),
+ (0x1D6B0, 'M', u'ι'),
+ ]
+
+def _seg_66():
+ return [
+ (0x1D6B1, 'M', u'κ'),
+ (0x1D6B2, 'M', u'λ'),
+ (0x1D6B3, 'M', u'μ'),
+ (0x1D6B4, 'M', u'ν'),
+ (0x1D6B5, 'M', u'ξ'),
+ (0x1D6B6, 'M', u'ο'),
+ (0x1D6B7, 'M', u'π'),
+ (0x1D6B8, 'M', u'ρ'),
+ (0x1D6B9, 'M', u'θ'),
+ (0x1D6BA, 'M', u'σ'),
+ (0x1D6BB, 'M', u'τ'),
+ (0x1D6BC, 'M', u'υ'),
+ (0x1D6BD, 'M', u'φ'),
+ (0x1D6BE, 'M', u'χ'),
+ (0x1D6BF, 'M', u'ψ'),
+ (0x1D6C0, 'M', u'ω'),
+ (0x1D6C1, 'M', u'∇'),
+ (0x1D6C2, 'M', u'α'),
+ (0x1D6C3, 'M', u'β'),
+ (0x1D6C4, 'M', u'γ'),
+ (0x1D6C5, 'M', u'δ'),
+ (0x1D6C6, 'M', u'ε'),
+ (0x1D6C7, 'M', u'ζ'),
+ (0x1D6C8, 'M', u'η'),
+ (0x1D6C9, 'M', u'θ'),
+ (0x1D6CA, 'M', u'ι'),
+ (0x1D6CB, 'M', u'κ'),
+ (0x1D6CC, 'M', u'λ'),
+ (0x1D6CD, 'M', u'μ'),
+ (0x1D6CE, 'M', u'ν'),
+ (0x1D6CF, 'M', u'ξ'),
+ (0x1D6D0, 'M', u'ο'),
+ (0x1D6D1, 'M', u'π'),
+ (0x1D6D2, 'M', u'ρ'),
+ (0x1D6D3, 'M', u'σ'),
+ (0x1D6D5, 'M', u'τ'),
+ (0x1D6D6, 'M', u'υ'),
+ (0x1D6D7, 'M', u'φ'),
+ (0x1D6D8, 'M', u'χ'),
+ (0x1D6D9, 'M', u'ψ'),
+ (0x1D6DA, 'M', u'ω'),
+ (0x1D6DB, 'M', u'∂'),
+ (0x1D6DC, 'M', u'ε'),
+ (0x1D6DD, 'M', u'θ'),
+ (0x1D6DE, 'M', u'κ'),
+ (0x1D6DF, 'M', u'φ'),
+ (0x1D6E0, 'M', u'ρ'),
+ (0x1D6E1, 'M', u'π'),
+ (0x1D6E2, 'M', u'α'),
+ (0x1D6E3, 'M', u'β'),
+ (0x1D6E4, 'M', u'γ'),
+ (0x1D6E5, 'M', u'δ'),
+ (0x1D6E6, 'M', u'ε'),
+ (0x1D6E7, 'M', u'ζ'),
+ (0x1D6E8, 'M', u'η'),
+ (0x1D6E9, 'M', u'θ'),
+ (0x1D6EA, 'M', u'ι'),
+ (0x1D6EB, 'M', u'κ'),
+ (0x1D6EC, 'M', u'λ'),
+ (0x1D6ED, 'M', u'μ'),
+ (0x1D6EE, 'M', u'ν'),
+ (0x1D6EF, 'M', u'ξ'),
+ (0x1D6F0, 'M', u'ο'),
+ (0x1D6F1, 'M', u'π'),
+ (0x1D6F2, 'M', u'ρ'),
+ (0x1D6F3, 'M', u'θ'),
+ (0x1D6F4, 'M', u'σ'),
+ (0x1D6F5, 'M', u'τ'),
+ (0x1D6F6, 'M', u'υ'),
+ (0x1D6F7, 'M', u'φ'),
+ (0x1D6F8, 'M', u'χ'),
+ (0x1D6F9, 'M', u'ψ'),
+ (0x1D6FA, 'M', u'ω'),
+ (0x1D6FB, 'M', u'∇'),
+ (0x1D6FC, 'M', u'α'),
+ (0x1D6FD, 'M', u'β'),
+ (0x1D6FE, 'M', u'γ'),
+ (0x1D6FF, 'M', u'δ'),
+ (0x1D700, 'M', u'ε'),
+ (0x1D701, 'M', u'ζ'),
+ (0x1D702, 'M', u'η'),
+ (0x1D703, 'M', u'θ'),
+ (0x1D704, 'M', u'ι'),
+ (0x1D705, 'M', u'κ'),
+ (0x1D706, 'M', u'λ'),
+ (0x1D707, 'M', u'μ'),
+ (0x1D708, 'M', u'ν'),
+ (0x1D709, 'M', u'ξ'),
+ (0x1D70A, 'M', u'ο'),
+ (0x1D70B, 'M', u'π'),
+ (0x1D70C, 'M', u'ρ'),
+ (0x1D70D, 'M', u'σ'),
+ (0x1D70F, 'M', u'τ'),
+ (0x1D710, 'M', u'υ'),
+ (0x1D711, 'M', u'φ'),
+ (0x1D712, 'M', u'χ'),
+ (0x1D713, 'M', u'ψ'),
+ (0x1D714, 'M', u'ω'),
+ (0x1D715, 'M', u'∂'),
+ (0x1D716, 'M', u'ε'),
+ ]
+
+def _seg_67():
+ return [
+ (0x1D717, 'M', u'θ'),
+ (0x1D718, 'M', u'κ'),
+ (0x1D719, 'M', u'φ'),
+ (0x1D71A, 'M', u'ρ'),
+ (0x1D71B, 'M', u'π'),
+ (0x1D71C, 'M', u'α'),
+ (0x1D71D, 'M', u'β'),
+ (0x1D71E, 'M', u'γ'),
+ (0x1D71F, 'M', u'δ'),
+ (0x1D720, 'M', u'ε'),
+ (0x1D721, 'M', u'ζ'),
+ (0x1D722, 'M', u'η'),
+ (0x1D723, 'M', u'θ'),
+ (0x1D724, 'M', u'ι'),
+ (0x1D725, 'M', u'κ'),
+ (0x1D726, 'M', u'λ'),
+ (0x1D727, 'M', u'μ'),
+ (0x1D728, 'M', u'ν'),
+ (0x1D729, 'M', u'ξ'),
+ (0x1D72A, 'M', u'ο'),
+ (0x1D72B, 'M', u'π'),
+ (0x1D72C, 'M', u'ρ'),
+ (0x1D72D, 'M', u'θ'),
+ (0x1D72E, 'M', u'σ'),
+ (0x1D72F, 'M', u'τ'),
+ (0x1D730, 'M', u'υ'),
+ (0x1D731, 'M', u'φ'),
+ (0x1D732, 'M', u'χ'),
+ (0x1D733, 'M', u'ψ'),
+ (0x1D734, 'M', u'ω'),
+ (0x1D735, 'M', u'∇'),
+ (0x1D736, 'M', u'α'),
+ (0x1D737, 'M', u'β'),
+ (0x1D738, 'M', u'γ'),
+ (0x1D739, 'M', u'δ'),
+ (0x1D73A, 'M', u'ε'),
+ (0x1D73B, 'M', u'ζ'),
+ (0x1D73C, 'M', u'η'),
+ (0x1D73D, 'M', u'θ'),
+ (0x1D73E, 'M', u'ι'),
+ (0x1D73F, 'M', u'κ'),
+ (0x1D740, 'M', u'λ'),
+ (0x1D741, 'M', u'μ'),
+ (0x1D742, 'M', u'ν'),
+ (0x1D743, 'M', u'ξ'),
+ (0x1D744, 'M', u'ο'),
+ (0x1D745, 'M', u'π'),
+ (0x1D746, 'M', u'ρ'),
+ (0x1D747, 'M', u'σ'),
+ (0x1D749, 'M', u'τ'),
+ (0x1D74A, 'M', u'υ'),
+ (0x1D74B, 'M', u'φ'),
+ (0x1D74C, 'M', u'χ'),
+ (0x1D74D, 'M', u'ψ'),
+ (0x1D74E, 'M', u'ω'),
+ (0x1D74F, 'M', u'∂'),
+ (0x1D750, 'M', u'ε'),
+ (0x1D751, 'M', u'θ'),
+ (0x1D752, 'M', u'κ'),
+ (0x1D753, 'M', u'φ'),
+ (0x1D754, 'M', u'ρ'),
+ (0x1D755, 'M', u'π'),
+ (0x1D756, 'M', u'α'),
+ (0x1D757, 'M', u'β'),
+ (0x1D758, 'M', u'γ'),
+ (0x1D759, 'M', u'δ'),
+ (0x1D75A, 'M', u'ε'),
+ (0x1D75B, 'M', u'ζ'),
+ (0x1D75C, 'M', u'η'),
+ (0x1D75D, 'M', u'θ'),
+ (0x1D75E, 'M', u'ι'),
+ (0x1D75F, 'M', u'κ'),
+ (0x1D760, 'M', u'λ'),
+ (0x1D761, 'M', u'μ'),
+ (0x1D762, 'M', u'ν'),
+ (0x1D763, 'M', u'ξ'),
+ (0x1D764, 'M', u'ο'),
+ (0x1D765, 'M', u'π'),
+ (0x1D766, 'M', u'ρ'),
+ (0x1D767, 'M', u'θ'),
+ (0x1D768, 'M', u'σ'),
+ (0x1D769, 'M', u'τ'),
+ (0x1D76A, 'M', u'υ'),
+ (0x1D76B, 'M', u'φ'),
+ (0x1D76C, 'M', u'χ'),
+ (0x1D76D, 'M', u'ψ'),
+ (0x1D76E, 'M', u'ω'),
+ (0x1D76F, 'M', u'∇'),
+ (0x1D770, 'M', u'α'),
+ (0x1D771, 'M', u'β'),
+ (0x1D772, 'M', u'γ'),
+ (0x1D773, 'M', u'δ'),
+ (0x1D774, 'M', u'ε'),
+ (0x1D775, 'M', u'ζ'),
+ (0x1D776, 'M', u'η'),
+ (0x1D777, 'M', u'θ'),
+ (0x1D778, 'M', u'ι'),
+ (0x1D779, 'M', u'κ'),
+ (0x1D77A, 'M', u'λ'),
+ (0x1D77B, 'M', u'μ'),
+ ]
+
+def _seg_68():
+ return [
+ (0x1D77C, 'M', u'ν'),
+ (0x1D77D, 'M', u'ξ'),
+ (0x1D77E, 'M', u'ο'),
+ (0x1D77F, 'M', u'π'),
+ (0x1D780, 'M', u'ρ'),
+ (0x1D781, 'M', u'σ'),
+ (0x1D783, 'M', u'τ'),
+ (0x1D784, 'M', u'υ'),
+ (0x1D785, 'M', u'φ'),
+ (0x1D786, 'M', u'χ'),
+ (0x1D787, 'M', u'ψ'),
+ (0x1D788, 'M', u'ω'),
+ (0x1D789, 'M', u'∂'),
+ (0x1D78A, 'M', u'ε'),
+ (0x1D78B, 'M', u'θ'),
+ (0x1D78C, 'M', u'κ'),
+ (0x1D78D, 'M', u'φ'),
+ (0x1D78E, 'M', u'ρ'),
+ (0x1D78F, 'M', u'π'),
+ (0x1D790, 'M', u'α'),
+ (0x1D791, 'M', u'β'),
+ (0x1D792, 'M', u'γ'),
+ (0x1D793, 'M', u'δ'),
+ (0x1D794, 'M', u'ε'),
+ (0x1D795, 'M', u'ζ'),
+ (0x1D796, 'M', u'η'),
+ (0x1D797, 'M', u'θ'),
+ (0x1D798, 'M', u'ι'),
+ (0x1D799, 'M', u'κ'),
+ (0x1D79A, 'M', u'λ'),
+ (0x1D79B, 'M', u'μ'),
+ (0x1D79C, 'M', u'ν'),
+ (0x1D79D, 'M', u'ξ'),
+ (0x1D79E, 'M', u'ο'),
+ (0x1D79F, 'M', u'π'),
+ (0x1D7A0, 'M', u'ρ'),
+ (0x1D7A1, 'M', u'θ'),
+ (0x1D7A2, 'M', u'σ'),
+ (0x1D7A3, 'M', u'τ'),
+ (0x1D7A4, 'M', u'υ'),
+ (0x1D7A5, 'M', u'φ'),
+ (0x1D7A6, 'M', u'χ'),
+ (0x1D7A7, 'M', u'ψ'),
+ (0x1D7A8, 'M', u'ω'),
+ (0x1D7A9, 'M', u'∇'),
+ (0x1D7AA, 'M', u'α'),
+ (0x1D7AB, 'M', u'β'),
+ (0x1D7AC, 'M', u'γ'),
+ (0x1D7AD, 'M', u'δ'),
+ (0x1D7AE, 'M', u'ε'),
+ (0x1D7AF, 'M', u'ζ'),
+ (0x1D7B0, 'M', u'η'),
+ (0x1D7B1, 'M', u'θ'),
+ (0x1D7B2, 'M', u'ι'),
+ (0x1D7B3, 'M', u'κ'),
+ (0x1D7B4, 'M', u'λ'),
+ (0x1D7B5, 'M', u'μ'),
+ (0x1D7B6, 'M', u'ν'),
+ (0x1D7B7, 'M', u'ξ'),
+ (0x1D7B8, 'M', u'ο'),
+ (0x1D7B9, 'M', u'π'),
+ (0x1D7BA, 'M', u'ρ'),
+ (0x1D7BB, 'M', u'σ'),
+ (0x1D7BD, 'M', u'τ'),
+ (0x1D7BE, 'M', u'υ'),
+ (0x1D7BF, 'M', u'φ'),
+ (0x1D7C0, 'M', u'χ'),
+ (0x1D7C1, 'M', u'ψ'),
+ (0x1D7C2, 'M', u'ω'),
+ (0x1D7C3, 'M', u'∂'),
+ (0x1D7C4, 'M', u'ε'),
+ (0x1D7C5, 'M', u'θ'),
+ (0x1D7C6, 'M', u'κ'),
+ (0x1D7C7, 'M', u'φ'),
+ (0x1D7C8, 'M', u'ρ'),
+ (0x1D7C9, 'M', u'π'),
+ (0x1D7CA, 'M', u'ϝ'),
+ (0x1D7CC, 'X'),
+ (0x1D7CE, 'M', u'0'),
+ (0x1D7CF, 'M', u'1'),
+ (0x1D7D0, 'M', u'2'),
+ (0x1D7D1, 'M', u'3'),
+ (0x1D7D2, 'M', u'4'),
+ (0x1D7D3, 'M', u'5'),
+ (0x1D7D4, 'M', u'6'),
+ (0x1D7D5, 'M', u'7'),
+ (0x1D7D6, 'M', u'8'),
+ (0x1D7D7, 'M', u'9'),
+ (0x1D7D8, 'M', u'0'),
+ (0x1D7D9, 'M', u'1'),
+ (0x1D7DA, 'M', u'2'),
+ (0x1D7DB, 'M', u'3'),
+ (0x1D7DC, 'M', u'4'),
+ (0x1D7DD, 'M', u'5'),
+ (0x1D7DE, 'M', u'6'),
+ (0x1D7DF, 'M', u'7'),
+ (0x1D7E0, 'M', u'8'),
+ (0x1D7E1, 'M', u'9'),
+ (0x1D7E2, 'M', u'0'),
+ (0x1D7E3, 'M', u'1'),
+ ]
+
+def _seg_69():
+ return [
+ (0x1D7E4, 'M', u'2'),
+ (0x1D7E5, 'M', u'3'),
+ (0x1D7E6, 'M', u'4'),
+ (0x1D7E7, 'M', u'5'),
+ (0x1D7E8, 'M', u'6'),
+ (0x1D7E9, 'M', u'7'),
+ (0x1D7EA, 'M', u'8'),
+ (0x1D7EB, 'M', u'9'),
+ (0x1D7EC, 'M', u'0'),
+ (0x1D7ED, 'M', u'1'),
+ (0x1D7EE, 'M', u'2'),
+ (0x1D7EF, 'M', u'3'),
+ (0x1D7F0, 'M', u'4'),
+ (0x1D7F1, 'M', u'5'),
+ (0x1D7F2, 'M', u'6'),
+ (0x1D7F3, 'M', u'7'),
+ (0x1D7F4, 'M', u'8'),
+ (0x1D7F5, 'M', u'9'),
+ (0x1D7F6, 'M', u'0'),
+ (0x1D7F7, 'M', u'1'),
+ (0x1D7F8, 'M', u'2'),
+ (0x1D7F9, 'M', u'3'),
+ (0x1D7FA, 'M', u'4'),
+ (0x1D7FB, 'M', u'5'),
+ (0x1D7FC, 'M', u'6'),
+ (0x1D7FD, 'M', u'7'),
+ (0x1D7FE, 'M', u'8'),
+ (0x1D7FF, 'M', u'9'),
+ (0x1D800, 'V'),
+ (0x1DA8C, 'X'),
+ (0x1DA9B, 'V'),
+ (0x1DAA0, 'X'),
+ (0x1DAA1, 'V'),
+ (0x1DAB0, 'X'),
+ (0x1E000, 'V'),
+ (0x1E007, 'X'),
+ (0x1E008, 'V'),
+ (0x1E019, 'X'),
+ (0x1E01B, 'V'),
+ (0x1E022, 'X'),
+ (0x1E023, 'V'),
+ (0x1E025, 'X'),
+ (0x1E026, 'V'),
+ (0x1E02B, 'X'),
+ (0x1E100, 'V'),
+ (0x1E12D, 'X'),
+ (0x1E130, 'V'),
+ (0x1E13E, 'X'),
+ (0x1E140, 'V'),
+ (0x1E14A, 'X'),
+ (0x1E14E, 'V'),
+ (0x1E150, 'X'),
+ (0x1E2C0, 'V'),
+ (0x1E2FA, 'X'),
+ (0x1E2FF, 'V'),
+ (0x1E300, 'X'),
+ (0x1E800, 'V'),
+ (0x1E8C5, 'X'),
+ (0x1E8C7, 'V'),
+ (0x1E8D7, 'X'),
+ (0x1E900, 'M', u'𞤢'),
+ (0x1E901, 'M', u'𞤣'),
+ (0x1E902, 'M', u'𞤤'),
+ (0x1E903, 'M', u'𞤥'),
+ (0x1E904, 'M', u'𞤦'),
+ (0x1E905, 'M', u'𞤧'),
+ (0x1E906, 'M', u'𞤨'),
+ (0x1E907, 'M', u'𞤩'),
+ (0x1E908, 'M', u'𞤪'),
+ (0x1E909, 'M', u'𞤫'),
+ (0x1E90A, 'M', u'𞤬'),
+ (0x1E90B, 'M', u'𞤭'),
+ (0x1E90C, 'M', u'𞤮'),
+ (0x1E90D, 'M', u'𞤯'),
+ (0x1E90E, 'M', u'𞤰'),
+ (0x1E90F, 'M', u'𞤱'),
+ (0x1E910, 'M', u'𞤲'),
+ (0x1E911, 'M', u'𞤳'),
+ (0x1E912, 'M', u'𞤴'),
+ (0x1E913, 'M', u'𞤵'),
+ (0x1E914, 'M', u'𞤶'),
+ (0x1E915, 'M', u'𞤷'),
+ (0x1E916, 'M', u'𞤸'),
+ (0x1E917, 'M', u'𞤹'),
+ (0x1E918, 'M', u'𞤺'),
+ (0x1E919, 'M', u'𞤻'),
+ (0x1E91A, 'M', u'𞤼'),
+ (0x1E91B, 'M', u'𞤽'),
+ (0x1E91C, 'M', u'𞤾'),
+ (0x1E91D, 'M', u'𞤿'),
+ (0x1E91E, 'M', u'𞥀'),
+ (0x1E91F, 'M', u'𞥁'),
+ (0x1E920, 'M', u'𞥂'),
+ (0x1E921, 'M', u'𞥃'),
+ (0x1E922, 'V'),
+ (0x1E94C, 'X'),
+ (0x1E950, 'V'),
+ (0x1E95A, 'X'),
+ (0x1E95E, 'V'),
+ (0x1E960, 'X'),
+ ]
+
+def _seg_70():
+ return [
+ (0x1EC71, 'V'),
+ (0x1ECB5, 'X'),
+ (0x1ED01, 'V'),
+ (0x1ED3E, 'X'),
+ (0x1EE00, 'M', u'ا'),
+ (0x1EE01, 'M', u'ب'),
+ (0x1EE02, 'M', u'ج'),
+ (0x1EE03, 'M', u'د'),
+ (0x1EE04, 'X'),
+ (0x1EE05, 'M', u'و'),
+ (0x1EE06, 'M', u'ز'),
+ (0x1EE07, 'M', u'ح'),
+ (0x1EE08, 'M', u'ط'),
+ (0x1EE09, 'M', u'ي'),
+ (0x1EE0A, 'M', u'ك'),
+ (0x1EE0B, 'M', u'ل'),
+ (0x1EE0C, 'M', u'م'),
+ (0x1EE0D, 'M', u'ن'),
+ (0x1EE0E, 'M', u'س'),
+ (0x1EE0F, 'M', u'ع'),
+ (0x1EE10, 'M', u'ف'),
+ (0x1EE11, 'M', u'ص'),
+ (0x1EE12, 'M', u'ق'),
+ (0x1EE13, 'M', u'ر'),
+ (0x1EE14, 'M', u'ش'),
+ (0x1EE15, 'M', u'ت'),
+ (0x1EE16, 'M', u'ث'),
+ (0x1EE17, 'M', u'خ'),
+ (0x1EE18, 'M', u'ذ'),
+ (0x1EE19, 'M', u'ض'),
+ (0x1EE1A, 'M', u'ظ'),
+ (0x1EE1B, 'M', u'غ'),
+ (0x1EE1C, 'M', u'ٮ'),
+ (0x1EE1D, 'M', u'ں'),
+ (0x1EE1E, 'M', u'ڡ'),
+ (0x1EE1F, 'M', u'ٯ'),
+ (0x1EE20, 'X'),
+ (0x1EE21, 'M', u'ب'),
+ (0x1EE22, 'M', u'ج'),
+ (0x1EE23, 'X'),
+ (0x1EE24, 'M', u'ه'),
+ (0x1EE25, 'X'),
+ (0x1EE27, 'M', u'ح'),
+ (0x1EE28, 'X'),
+ (0x1EE29, 'M', u'ي'),
+ (0x1EE2A, 'M', u'ك'),
+ (0x1EE2B, 'M', u'ل'),
+ (0x1EE2C, 'M', u'م'),
+ (0x1EE2D, 'M', u'ن'),
+ (0x1EE2E, 'M', u'س'),
+ (0x1EE2F, 'M', u'ع'),
+ (0x1EE30, 'M', u'ف'),
+ (0x1EE31, 'M', u'ص'),
+ (0x1EE32, 'M', u'ق'),
+ (0x1EE33, 'X'),
+ (0x1EE34, 'M', u'ش'),
+ (0x1EE35, 'M', u'ت'),
+ (0x1EE36, 'M', u'ث'),
+ (0x1EE37, 'M', u'خ'),
+ (0x1EE38, 'X'),
+ (0x1EE39, 'M', u'ض'),
+ (0x1EE3A, 'X'),
+ (0x1EE3B, 'M', u'غ'),
+ (0x1EE3C, 'X'),
+ (0x1EE42, 'M', u'ج'),
+ (0x1EE43, 'X'),
+ (0x1EE47, 'M', u'ح'),
+ (0x1EE48, 'X'),
+ (0x1EE49, 'M', u'ي'),
+ (0x1EE4A, 'X'),
+ (0x1EE4B, 'M', u'ل'),
+ (0x1EE4C, 'X'),
+ (0x1EE4D, 'M', u'ن'),
+ (0x1EE4E, 'M', u'س'),
+ (0x1EE4F, 'M', u'ع'),
+ (0x1EE50, 'X'),
+ (0x1EE51, 'M', u'ص'),
+ (0x1EE52, 'M', u'ق'),
+ (0x1EE53, 'X'),
+ (0x1EE54, 'M', u'ش'),
+ (0x1EE55, 'X'),
+ (0x1EE57, 'M', u'خ'),
+ (0x1EE58, 'X'),
+ (0x1EE59, 'M', u'ض'),
+ (0x1EE5A, 'X'),
+ (0x1EE5B, 'M', u'غ'),
+ (0x1EE5C, 'X'),
+ (0x1EE5D, 'M', u'ں'),
+ (0x1EE5E, 'X'),
+ (0x1EE5F, 'M', u'ٯ'),
+ (0x1EE60, 'X'),
+ (0x1EE61, 'M', u'ب'),
+ (0x1EE62, 'M', u'ج'),
+ (0x1EE63, 'X'),
+ (0x1EE64, 'M', u'ه'),
+ (0x1EE65, 'X'),
+ (0x1EE67, 'M', u'ح'),
+ (0x1EE68, 'M', u'ط'),
+ (0x1EE69, 'M', u'ي'),
+ (0x1EE6A, 'M', u'ك'),
+ ]
+
+def _seg_71():
+ return [
+ (0x1EE6B, 'X'),
+ (0x1EE6C, 'M', u'م'),
+ (0x1EE6D, 'M', u'ن'),
+ (0x1EE6E, 'M', u'س'),
+ (0x1EE6F, 'M', u'ع'),
+ (0x1EE70, 'M', u'ف'),
+ (0x1EE71, 'M', u'ص'),
+ (0x1EE72, 'M', u'ق'),
+ (0x1EE73, 'X'),
+ (0x1EE74, 'M', u'ش'),
+ (0x1EE75, 'M', u'ت'),
+ (0x1EE76, 'M', u'ث'),
+ (0x1EE77, 'M', u'خ'),
+ (0x1EE78, 'X'),
+ (0x1EE79, 'M', u'ض'),
+ (0x1EE7A, 'M', u'ظ'),
+ (0x1EE7B, 'M', u'غ'),
+ (0x1EE7C, 'M', u'ٮ'),
+ (0x1EE7D, 'X'),
+ (0x1EE7E, 'M', u'ڡ'),
+ (0x1EE7F, 'X'),
+ (0x1EE80, 'M', u'ا'),
+ (0x1EE81, 'M', u'ب'),
+ (0x1EE82, 'M', u'ج'),
+ (0x1EE83, 'M', u'د'),
+ (0x1EE84, 'M', u'ه'),
+ (0x1EE85, 'M', u'و'),
+ (0x1EE86, 'M', u'ز'),
+ (0x1EE87, 'M', u'ح'),
+ (0x1EE88, 'M', u'ط'),
+ (0x1EE89, 'M', u'ي'),
+ (0x1EE8A, 'X'),
+ (0x1EE8B, 'M', u'ل'),
+ (0x1EE8C, 'M', u'م'),
+ (0x1EE8D, 'M', u'ن'),
+ (0x1EE8E, 'M', u'س'),
+ (0x1EE8F, 'M', u'ع'),
+ (0x1EE90, 'M', u'ف'),
+ (0x1EE91, 'M', u'ص'),
+ (0x1EE92, 'M', u'ق'),
+ (0x1EE93, 'M', u'ر'),
+ (0x1EE94, 'M', u'ش'),
+ (0x1EE95, 'M', u'ت'),
+ (0x1EE96, 'M', u'ث'),
+ (0x1EE97, 'M', u'خ'),
+ (0x1EE98, 'M', u'ذ'),
+ (0x1EE99, 'M', u'ض'),
+ (0x1EE9A, 'M', u'ظ'),
+ (0x1EE9B, 'M', u'غ'),
+ (0x1EE9C, 'X'),
+ (0x1EEA1, 'M', u'ب'),
+ (0x1EEA2, 'M', u'ج'),
+ (0x1EEA3, 'M', u'د'),
+ (0x1EEA4, 'X'),
+ (0x1EEA5, 'M', u'و'),
+ (0x1EEA6, 'M', u'ز'),
+ (0x1EEA7, 'M', u'ح'),
+ (0x1EEA8, 'M', u'ط'),
+ (0x1EEA9, 'M', u'ي'),
+ (0x1EEAA, 'X'),
+ (0x1EEAB, 'M', u'ل'),
+ (0x1EEAC, 'M', u'م'),
+ (0x1EEAD, 'M', u'ن'),
+ (0x1EEAE, 'M', u'س'),
+ (0x1EEAF, 'M', u'ع'),
+ (0x1EEB0, 'M', u'ف'),
+ (0x1EEB1, 'M', u'ص'),
+ (0x1EEB2, 'M', u'ق'),
+ (0x1EEB3, 'M', u'ر'),
+ (0x1EEB4, 'M', u'ش'),
+ (0x1EEB5, 'M', u'ت'),
+ (0x1EEB6, 'M', u'ث'),
+ (0x1EEB7, 'M', u'خ'),
+ (0x1EEB8, 'M', u'ذ'),
+ (0x1EEB9, 'M', u'ض'),
+ (0x1EEBA, 'M', u'ظ'),
+ (0x1EEBB, 'M', u'غ'),
+ (0x1EEBC, 'X'),
+ (0x1EEF0, 'V'),
+ (0x1EEF2, 'X'),
+ (0x1F000, 'V'),
+ (0x1F02C, 'X'),
+ (0x1F030, 'V'),
+ (0x1F094, 'X'),
+ (0x1F0A0, 'V'),
+ (0x1F0AF, 'X'),
+ (0x1F0B1, 'V'),
+ (0x1F0C0, 'X'),
+ (0x1F0C1, 'V'),
+ (0x1F0D0, 'X'),
+ (0x1F0D1, 'V'),
+ (0x1F0F6, 'X'),
+ (0x1F101, '3', u'0,'),
+ (0x1F102, '3', u'1,'),
+ (0x1F103, '3', u'2,'),
+ (0x1F104, '3', u'3,'),
+ (0x1F105, '3', u'4,'),
+ (0x1F106, '3', u'5,'),
+ (0x1F107, '3', u'6,'),
+ (0x1F108, '3', u'7,'),
+ ]
+
+def _seg_72():
+ return [
+ (0x1F109, '3', u'8,'),
+ (0x1F10A, '3', u'9,'),
+ (0x1F10B, 'V'),
+ (0x1F110, '3', u'(a)'),
+ (0x1F111, '3', u'(b)'),
+ (0x1F112, '3', u'(c)'),
+ (0x1F113, '3', u'(d)'),
+ (0x1F114, '3', u'(e)'),
+ (0x1F115, '3', u'(f)'),
+ (0x1F116, '3', u'(g)'),
+ (0x1F117, '3', u'(h)'),
+ (0x1F118, '3', u'(i)'),
+ (0x1F119, '3', u'(j)'),
+ (0x1F11A, '3', u'(k)'),
+ (0x1F11B, '3', u'(l)'),
+ (0x1F11C, '3', u'(m)'),
+ (0x1F11D, '3', u'(n)'),
+ (0x1F11E, '3', u'(o)'),
+ (0x1F11F, '3', u'(p)'),
+ (0x1F120, '3', u'(q)'),
+ (0x1F121, '3', u'(r)'),
+ (0x1F122, '3', u'(s)'),
+ (0x1F123, '3', u'(t)'),
+ (0x1F124, '3', u'(u)'),
+ (0x1F125, '3', u'(v)'),
+ (0x1F126, '3', u'(w)'),
+ (0x1F127, '3', u'(x)'),
+ (0x1F128, '3', u'(y)'),
+ (0x1F129, '3', u'(z)'),
+ (0x1F12A, 'M', u'〔s〕'),
+ (0x1F12B, 'M', u'c'),
+ (0x1F12C, 'M', u'r'),
+ (0x1F12D, 'M', u'cd'),
+ (0x1F12E, 'M', u'wz'),
+ (0x1F12F, 'V'),
+ (0x1F130, 'M', u'a'),
+ (0x1F131, 'M', u'b'),
+ (0x1F132, 'M', u'c'),
+ (0x1F133, 'M', u'd'),
+ (0x1F134, 'M', u'e'),
+ (0x1F135, 'M', u'f'),
+ (0x1F136, 'M', u'g'),
+ (0x1F137, 'M', u'h'),
+ (0x1F138, 'M', u'i'),
+ (0x1F139, 'M', u'j'),
+ (0x1F13A, 'M', u'k'),
+ (0x1F13B, 'M', u'l'),
+ (0x1F13C, 'M', u'm'),
+ (0x1F13D, 'M', u'n'),
+ (0x1F13E, 'M', u'o'),
+ (0x1F13F, 'M', u'p'),
+ (0x1F140, 'M', u'q'),
+ (0x1F141, 'M', u'r'),
+ (0x1F142, 'M', u's'),
+ (0x1F143, 'M', u't'),
+ (0x1F144, 'M', u'u'),
+ (0x1F145, 'M', u'v'),
+ (0x1F146, 'M', u'w'),
+ (0x1F147, 'M', u'x'),
+ (0x1F148, 'M', u'y'),
+ (0x1F149, 'M', u'z'),
+ (0x1F14A, 'M', u'hv'),
+ (0x1F14B, 'M', u'mv'),
+ (0x1F14C, 'M', u'sd'),
+ (0x1F14D, 'M', u'ss'),
+ (0x1F14E, 'M', u'ppv'),
+ (0x1F14F, 'M', u'wc'),
+ (0x1F150, 'V'),
+ (0x1F16A, 'M', u'mc'),
+ (0x1F16B, 'M', u'md'),
+ (0x1F16C, 'M', u'mr'),
+ (0x1F16D, 'V'),
+ (0x1F190, 'M', u'dj'),
+ (0x1F191, 'V'),
+ (0x1F1AE, 'X'),
+ (0x1F1E6, 'V'),
+ (0x1F200, 'M', u'ほか'),
+ (0x1F201, 'M', u'ココ'),
+ (0x1F202, 'M', u'サ'),
+ (0x1F203, 'X'),
+ (0x1F210, 'M', u'手'),
+ (0x1F211, 'M', u'字'),
+ (0x1F212, 'M', u'双'),
+ (0x1F213, 'M', u'デ'),
+ (0x1F214, 'M', u'二'),
+ (0x1F215, 'M', u'多'),
+ (0x1F216, 'M', u'解'),
+ (0x1F217, 'M', u'天'),
+ (0x1F218, 'M', u'交'),
+ (0x1F219, 'M', u'映'),
+ (0x1F21A, 'M', u'無'),
+ (0x1F21B, 'M', u'料'),
+ (0x1F21C, 'M', u'前'),
+ (0x1F21D, 'M', u'後'),
+ (0x1F21E, 'M', u'再'),
+ (0x1F21F, 'M', u'新'),
+ (0x1F220, 'M', u'初'),
+ (0x1F221, 'M', u'終'),
+ (0x1F222, 'M', u'生'),
+ (0x1F223, 'M', u'販'),
+ ]
+
+def _seg_73():
+ return [
+ (0x1F224, 'M', u'声'),
+ (0x1F225, 'M', u'吹'),
+ (0x1F226, 'M', u'演'),
+ (0x1F227, 'M', u'投'),
+ (0x1F228, 'M', u'捕'),
+ (0x1F229, 'M', u'一'),
+ (0x1F22A, 'M', u'三'),
+ (0x1F22B, 'M', u'遊'),
+ (0x1F22C, 'M', u'左'),
+ (0x1F22D, 'M', u'中'),
+ (0x1F22E, 'M', u'右'),
+ (0x1F22F, 'M', u'指'),
+ (0x1F230, 'M', u'走'),
+ (0x1F231, 'M', u'打'),
+ (0x1F232, 'M', u'禁'),
+ (0x1F233, 'M', u'空'),
+ (0x1F234, 'M', u'合'),
+ (0x1F235, 'M', u'満'),
+ (0x1F236, 'M', u'有'),
+ (0x1F237, 'M', u'月'),
+ (0x1F238, 'M', u'申'),
+ (0x1F239, 'M', u'割'),
+ (0x1F23A, 'M', u'営'),
+ (0x1F23B, 'M', u'配'),
+ (0x1F23C, 'X'),
+ (0x1F240, 'M', u'〔本〕'),
+ (0x1F241, 'M', u'〔三〕'),
+ (0x1F242, 'M', u'〔二〕'),
+ (0x1F243, 'M', u'〔安〕'),
+ (0x1F244, 'M', u'〔点〕'),
+ (0x1F245, 'M', u'〔打〕'),
+ (0x1F246, 'M', u'〔盗〕'),
+ (0x1F247, 'M', u'〔勝〕'),
+ (0x1F248, 'M', u'〔敗〕'),
+ (0x1F249, 'X'),
+ (0x1F250, 'M', u'得'),
+ (0x1F251, 'M', u'可'),
+ (0x1F252, 'X'),
+ (0x1F260, 'V'),
+ (0x1F266, 'X'),
+ (0x1F300, 'V'),
+ (0x1F6D8, 'X'),
+ (0x1F6E0, 'V'),
+ (0x1F6ED, 'X'),
+ (0x1F6F0, 'V'),
+ (0x1F6FD, 'X'),
+ (0x1F700, 'V'),
+ (0x1F774, 'X'),
+ (0x1F780, 'V'),
+ (0x1F7D9, 'X'),
+ (0x1F7E0, 'V'),
+ (0x1F7EC, 'X'),
+ (0x1F800, 'V'),
+ (0x1F80C, 'X'),
+ (0x1F810, 'V'),
+ (0x1F848, 'X'),
+ (0x1F850, 'V'),
+ (0x1F85A, 'X'),
+ (0x1F860, 'V'),
+ (0x1F888, 'X'),
+ (0x1F890, 'V'),
+ (0x1F8AE, 'X'),
+ (0x1F8B0, 'V'),
+ (0x1F8B2, 'X'),
+ (0x1F900, 'V'),
+ (0x1F979, 'X'),
+ (0x1F97A, 'V'),
+ (0x1F9CC, 'X'),
+ (0x1F9CD, 'V'),
+ (0x1FA54, 'X'),
+ (0x1FA60, 'V'),
+ (0x1FA6E, 'X'),
+ (0x1FA70, 'V'),
+ (0x1FA75, 'X'),
+ (0x1FA78, 'V'),
+ (0x1FA7B, 'X'),
+ (0x1FA80, 'V'),
+ (0x1FA87, 'X'),
+ (0x1FA90, 'V'),
+ (0x1FAA9, 'X'),
+ (0x1FAB0, 'V'),
+ (0x1FAB7, 'X'),
+ (0x1FAC0, 'V'),
+ (0x1FAC3, 'X'),
+ (0x1FAD0, 'V'),
+ (0x1FAD7, 'X'),
+ (0x1FB00, 'V'),
+ (0x1FB93, 'X'),
+ (0x1FB94, 'V'),
+ (0x1FBCB, 'X'),
+ (0x1FBF0, 'M', u'0'),
+ (0x1FBF1, 'M', u'1'),
+ (0x1FBF2, 'M', u'2'),
+ (0x1FBF3, 'M', u'3'),
+ (0x1FBF4, 'M', u'4'),
+ (0x1FBF5, 'M', u'5'),
+ (0x1FBF6, 'M', u'6'),
+ (0x1FBF7, 'M', u'7'),
+ (0x1FBF8, 'M', u'8'),
+ (0x1FBF9, 'M', u'9'),
+ ]
+
+def _seg_74():
+ return [
+ (0x1FBFA, 'X'),
+ (0x20000, 'V'),
+ (0x2A6DE, 'X'),
+ (0x2A700, 'V'),
+ (0x2B735, 'X'),
+ (0x2B740, 'V'),
+ (0x2B81E, 'X'),
+ (0x2B820, 'V'),
+ (0x2CEA2, 'X'),
+ (0x2CEB0, 'V'),
+ (0x2EBE1, 'X'),
+ (0x2F800, 'M', u'丽'),
+ (0x2F801, 'M', u'丸'),
+ (0x2F802, 'M', u'乁'),
+ (0x2F803, 'M', u'𠄢'),
+ (0x2F804, 'M', u'你'),
+ (0x2F805, 'M', u'侮'),
+ (0x2F806, 'M', u'侻'),
+ (0x2F807, 'M', u'倂'),
+ (0x2F808, 'M', u'偺'),
+ (0x2F809, 'M', u'備'),
+ (0x2F80A, 'M', u'僧'),
+ (0x2F80B, 'M', u'像'),
+ (0x2F80C, 'M', u'㒞'),
+ (0x2F80D, 'M', u'𠘺'),
+ (0x2F80E, 'M', u'免'),
+ (0x2F80F, 'M', u'兔'),
+ (0x2F810, 'M', u'兤'),
+ (0x2F811, 'M', u'具'),
+ (0x2F812, 'M', u'𠔜'),
+ (0x2F813, 'M', u'㒹'),
+ (0x2F814, 'M', u'內'),
+ (0x2F815, 'M', u'再'),
+ (0x2F816, 'M', u'𠕋'),
+ (0x2F817, 'M', u'冗'),
+ (0x2F818, 'M', u'冤'),
+ (0x2F819, 'M', u'仌'),
+ (0x2F81A, 'M', u'冬'),
+ (0x2F81B, 'M', u'况'),
+ (0x2F81C, 'M', u'𩇟'),
+ (0x2F81D, 'M', u'凵'),
+ (0x2F81E, 'M', u'刃'),
+ (0x2F81F, 'M', u'㓟'),
+ (0x2F820, 'M', u'刻'),
+ (0x2F821, 'M', u'剆'),
+ (0x2F822, 'M', u'割'),
+ (0x2F823, 'M', u'剷'),
+ (0x2F824, 'M', u'㔕'),
+ (0x2F825, 'M', u'勇'),
+ (0x2F826, 'M', u'勉'),
+ (0x2F827, 'M', u'勤'),
+ (0x2F828, 'M', u'勺'),
+ (0x2F829, 'M', u'包'),
+ (0x2F82A, 'M', u'匆'),
+ (0x2F82B, 'M', u'北'),
+ (0x2F82C, 'M', u'卉'),
+ (0x2F82D, 'M', u'卑'),
+ (0x2F82E, 'M', u'博'),
+ (0x2F82F, 'M', u'即'),
+ (0x2F830, 'M', u'卽'),
+ (0x2F831, 'M', u'卿'),
+ (0x2F834, 'M', u'𠨬'),
+ (0x2F835, 'M', u'灰'),
+ (0x2F836, 'M', u'及'),
+ (0x2F837, 'M', u'叟'),
+ (0x2F838, 'M', u'𠭣'),
+ (0x2F839, 'M', u'叫'),
+ (0x2F83A, 'M', u'叱'),
+ (0x2F83B, 'M', u'吆'),
+ (0x2F83C, 'M', u'咞'),
+ (0x2F83D, 'M', u'吸'),
+ (0x2F83E, 'M', u'呈'),
+ (0x2F83F, 'M', u'周'),
+ (0x2F840, 'M', u'咢'),
+ (0x2F841, 'M', u'哶'),
+ (0x2F842, 'M', u'唐'),
+ (0x2F843, 'M', u'啓'),
+ (0x2F844, 'M', u'啣'),
+ (0x2F845, 'M', u'善'),
+ (0x2F847, 'M', u'喙'),
+ (0x2F848, 'M', u'喫'),
+ (0x2F849, 'M', u'喳'),
+ (0x2F84A, 'M', u'嗂'),
+ (0x2F84B, 'M', u'圖'),
+ (0x2F84C, 'M', u'嘆'),
+ (0x2F84D, 'M', u'圗'),
+ (0x2F84E, 'M', u'噑'),
+ (0x2F84F, 'M', u'噴'),
+ (0x2F850, 'M', u'切'),
+ (0x2F851, 'M', u'壮'),
+ (0x2F852, 'M', u'城'),
+ (0x2F853, 'M', u'埴'),
+ (0x2F854, 'M', u'堍'),
+ (0x2F855, 'M', u'型'),
+ (0x2F856, 'M', u'堲'),
+ (0x2F857, 'M', u'報'),
+ (0x2F858, 'M', u'墬'),
+ (0x2F859, 'M', u'𡓤'),
+ (0x2F85A, 'M', u'売'),
+ (0x2F85B, 'M', u'壷'),
+ ]
+
+def _seg_75():
+ return [
+ (0x2F85C, 'M', u'夆'),
+ (0x2F85D, 'M', u'多'),
+ (0x2F85E, 'M', u'夢'),
+ (0x2F85F, 'M', u'奢'),
+ (0x2F860, 'M', u'𡚨'),
+ (0x2F861, 'M', u'𡛪'),
+ (0x2F862, 'M', u'姬'),
+ (0x2F863, 'M', u'娛'),
+ (0x2F864, 'M', u'娧'),
+ (0x2F865, 'M', u'姘'),
+ (0x2F866, 'M', u'婦'),
+ (0x2F867, 'M', u'㛮'),
+ (0x2F868, 'X'),
+ (0x2F869, 'M', u'嬈'),
+ (0x2F86A, 'M', u'嬾'),
+ (0x2F86C, 'M', u'𡧈'),
+ (0x2F86D, 'M', u'寃'),
+ (0x2F86E, 'M', u'寘'),
+ (0x2F86F, 'M', u'寧'),
+ (0x2F870, 'M', u'寳'),
+ (0x2F871, 'M', u'𡬘'),
+ (0x2F872, 'M', u'寿'),
+ (0x2F873, 'M', u'将'),
+ (0x2F874, 'X'),
+ (0x2F875, 'M', u'尢'),
+ (0x2F876, 'M', u'㞁'),
+ (0x2F877, 'M', u'屠'),
+ (0x2F878, 'M', u'屮'),
+ (0x2F879, 'M', u'峀'),
+ (0x2F87A, 'M', u'岍'),
+ (0x2F87B, 'M', u'𡷤'),
+ (0x2F87C, 'M', u'嵃'),
+ (0x2F87D, 'M', u'𡷦'),
+ (0x2F87E, 'M', u'嵮'),
+ (0x2F87F, 'M', u'嵫'),
+ (0x2F880, 'M', u'嵼'),
+ (0x2F881, 'M', u'巡'),
+ (0x2F882, 'M', u'巢'),
+ (0x2F883, 'M', u'㠯'),
+ (0x2F884, 'M', u'巽'),
+ (0x2F885, 'M', u'帨'),
+ (0x2F886, 'M', u'帽'),
+ (0x2F887, 'M', u'幩'),
+ (0x2F888, 'M', u'㡢'),
+ (0x2F889, 'M', u'𢆃'),
+ (0x2F88A, 'M', u'㡼'),
+ (0x2F88B, 'M', u'庰'),
+ (0x2F88C, 'M', u'庳'),
+ (0x2F88D, 'M', u'庶'),
+ (0x2F88E, 'M', u'廊'),
+ (0x2F88F, 'M', u'𪎒'),
+ (0x2F890, 'M', u'廾'),
+ (0x2F891, 'M', u'𢌱'),
+ (0x2F893, 'M', u'舁'),
+ (0x2F894, 'M', u'弢'),
+ (0x2F896, 'M', u'㣇'),
+ (0x2F897, 'M', u'𣊸'),
+ (0x2F898, 'M', u'𦇚'),
+ (0x2F899, 'M', u'形'),
+ (0x2F89A, 'M', u'彫'),
+ (0x2F89B, 'M', u'㣣'),
+ (0x2F89C, 'M', u'徚'),
+ (0x2F89D, 'M', u'忍'),
+ (0x2F89E, 'M', u'志'),
+ (0x2F89F, 'M', u'忹'),
+ (0x2F8A0, 'M', u'悁'),
+ (0x2F8A1, 'M', u'㤺'),
+ (0x2F8A2, 'M', u'㤜'),
+ (0x2F8A3, 'M', u'悔'),
+ (0x2F8A4, 'M', u'𢛔'),
+ (0x2F8A5, 'M', u'惇'),
+ (0x2F8A6, 'M', u'慈'),
+ (0x2F8A7, 'M', u'慌'),
+ (0x2F8A8, 'M', u'慎'),
+ (0x2F8A9, 'M', u'慌'),
+ (0x2F8AA, 'M', u'慺'),
+ (0x2F8AB, 'M', u'憎'),
+ (0x2F8AC, 'M', u'憲'),
+ (0x2F8AD, 'M', u'憤'),
+ (0x2F8AE, 'M', u'憯'),
+ (0x2F8AF, 'M', u'懞'),
+ (0x2F8B0, 'M', u'懲'),
+ (0x2F8B1, 'M', u'懶'),
+ (0x2F8B2, 'M', u'成'),
+ (0x2F8B3, 'M', u'戛'),
+ (0x2F8B4, 'M', u'扝'),
+ (0x2F8B5, 'M', u'抱'),
+ (0x2F8B6, 'M', u'拔'),
+ (0x2F8B7, 'M', u'捐'),
+ (0x2F8B8, 'M', u'𢬌'),
+ (0x2F8B9, 'M', u'挽'),
+ (0x2F8BA, 'M', u'拼'),
+ (0x2F8BB, 'M', u'捨'),
+ (0x2F8BC, 'M', u'掃'),
+ (0x2F8BD, 'M', u'揤'),
+ (0x2F8BE, 'M', u'𢯱'),
+ (0x2F8BF, 'M', u'搢'),
+ (0x2F8C0, 'M', u'揅'),
+ (0x2F8C1, 'M', u'掩'),
+ (0x2F8C2, 'M', u'㨮'),
+ ]
+
+def _seg_76():
+ return [
+ (0x2F8C3, 'M', u'摩'),
+ (0x2F8C4, 'M', u'摾'),
+ (0x2F8C5, 'M', u'撝'),
+ (0x2F8C6, 'M', u'摷'),
+ (0x2F8C7, 'M', u'㩬'),
+ (0x2F8C8, 'M', u'敏'),
+ (0x2F8C9, 'M', u'敬'),
+ (0x2F8CA, 'M', u'𣀊'),
+ (0x2F8CB, 'M', u'旣'),
+ (0x2F8CC, 'M', u'書'),
+ (0x2F8CD, 'M', u'晉'),
+ (0x2F8CE, 'M', u'㬙'),
+ (0x2F8CF, 'M', u'暑'),
+ (0x2F8D0, 'M', u'㬈'),
+ (0x2F8D1, 'M', u'㫤'),
+ (0x2F8D2, 'M', u'冒'),
+ (0x2F8D3, 'M', u'冕'),
+ (0x2F8D4, 'M', u'最'),
+ (0x2F8D5, 'M', u'暜'),
+ (0x2F8D6, 'M', u'肭'),
+ (0x2F8D7, 'M', u'䏙'),
+ (0x2F8D8, 'M', u'朗'),
+ (0x2F8D9, 'M', u'望'),
+ (0x2F8DA, 'M', u'朡'),
+ (0x2F8DB, 'M', u'杞'),
+ (0x2F8DC, 'M', u'杓'),
+ (0x2F8DD, 'M', u'𣏃'),
+ (0x2F8DE, 'M', u'㭉'),
+ (0x2F8DF, 'M', u'柺'),
+ (0x2F8E0, 'M', u'枅'),
+ (0x2F8E1, 'M', u'桒'),
+ (0x2F8E2, 'M', u'梅'),
+ (0x2F8E3, 'M', u'𣑭'),
+ (0x2F8E4, 'M', u'梎'),
+ (0x2F8E5, 'M', u'栟'),
+ (0x2F8E6, 'M', u'椔'),
+ (0x2F8E7, 'M', u'㮝'),
+ (0x2F8E8, 'M', u'楂'),
+ (0x2F8E9, 'M', u'榣'),
+ (0x2F8EA, 'M', u'槪'),
+ (0x2F8EB, 'M', u'檨'),
+ (0x2F8EC, 'M', u'𣚣'),
+ (0x2F8ED, 'M', u'櫛'),
+ (0x2F8EE, 'M', u'㰘'),
+ (0x2F8EF, 'M', u'次'),
+ (0x2F8F0, 'M', u'𣢧'),
+ (0x2F8F1, 'M', u'歔'),
+ (0x2F8F2, 'M', u'㱎'),
+ (0x2F8F3, 'M', u'歲'),
+ (0x2F8F4, 'M', u'殟'),
+ (0x2F8F5, 'M', u'殺'),
+ (0x2F8F6, 'M', u'殻'),
+ (0x2F8F7, 'M', u'𣪍'),
+ (0x2F8F8, 'M', u'𡴋'),
+ (0x2F8F9, 'M', u'𣫺'),
+ (0x2F8FA, 'M', u'汎'),
+ (0x2F8FB, 'M', u'𣲼'),
+ (0x2F8FC, 'M', u'沿'),
+ (0x2F8FD, 'M', u'泍'),
+ (0x2F8FE, 'M', u'汧'),
+ (0x2F8FF, 'M', u'洖'),
+ (0x2F900, 'M', u'派'),
+ (0x2F901, 'M', u'海'),
+ (0x2F902, 'M', u'流'),
+ (0x2F903, 'M', u'浩'),
+ (0x2F904, 'M', u'浸'),
+ (0x2F905, 'M', u'涅'),
+ (0x2F906, 'M', u'𣴞'),
+ (0x2F907, 'M', u'洴'),
+ (0x2F908, 'M', u'港'),
+ (0x2F909, 'M', u'湮'),
+ (0x2F90A, 'M', u'㴳'),
+ (0x2F90B, 'M', u'滋'),
+ (0x2F90C, 'M', u'滇'),
+ (0x2F90D, 'M', u'𣻑'),
+ (0x2F90E, 'M', u'淹'),
+ (0x2F90F, 'M', u'潮'),
+ (0x2F910, 'M', u'𣽞'),
+ (0x2F911, 'M', u'𣾎'),
+ (0x2F912, 'M', u'濆'),
+ (0x2F913, 'M', u'瀹'),
+ (0x2F914, 'M', u'瀞'),
+ (0x2F915, 'M', u'瀛'),
+ (0x2F916, 'M', u'㶖'),
+ (0x2F917, 'M', u'灊'),
+ (0x2F918, 'M', u'災'),
+ (0x2F919, 'M', u'灷'),
+ (0x2F91A, 'M', u'炭'),
+ (0x2F91B, 'M', u'𠔥'),
+ (0x2F91C, 'M', u'煅'),
+ (0x2F91D, 'M', u'𤉣'),
+ (0x2F91E, 'M', u'熜'),
+ (0x2F91F, 'X'),
+ (0x2F920, 'M', u'爨'),
+ (0x2F921, 'M', u'爵'),
+ (0x2F922, 'M', u'牐'),
+ (0x2F923, 'M', u'𤘈'),
+ (0x2F924, 'M', u'犀'),
+ (0x2F925, 'M', u'犕'),
+ (0x2F926, 'M', u'𤜵'),
+ ]
+
+def _seg_77():
+ return [
+ (0x2F927, 'M', u'𤠔'),
+ (0x2F928, 'M', u'獺'),
+ (0x2F929, 'M', u'王'),
+ (0x2F92A, 'M', u'㺬'),
+ (0x2F92B, 'M', u'玥'),
+ (0x2F92C, 'M', u'㺸'),
+ (0x2F92E, 'M', u'瑇'),
+ (0x2F92F, 'M', u'瑜'),
+ (0x2F930, 'M', u'瑱'),
+ (0x2F931, 'M', u'璅'),
+ (0x2F932, 'M', u'瓊'),
+ (0x2F933, 'M', u'㼛'),
+ (0x2F934, 'M', u'甤'),
+ (0x2F935, 'M', u'𤰶'),
+ (0x2F936, 'M', u'甾'),
+ (0x2F937, 'M', u'𤲒'),
+ (0x2F938, 'M', u'異'),
+ (0x2F939, 'M', u'𢆟'),
+ (0x2F93A, 'M', u'瘐'),
+ (0x2F93B, 'M', u'𤾡'),
+ (0x2F93C, 'M', u'𤾸'),
+ (0x2F93D, 'M', u'𥁄'),
+ (0x2F93E, 'M', u'㿼'),
+ (0x2F93F, 'M', u'䀈'),
+ (0x2F940, 'M', u'直'),
+ (0x2F941, 'M', u'𥃳'),
+ (0x2F942, 'M', u'𥃲'),
+ (0x2F943, 'M', u'𥄙'),
+ (0x2F944, 'M', u'𥄳'),
+ (0x2F945, 'M', u'眞'),
+ (0x2F946, 'M', u'真'),
+ (0x2F948, 'M', u'睊'),
+ (0x2F949, 'M', u'䀹'),
+ (0x2F94A, 'M', u'瞋'),
+ (0x2F94B, 'M', u'䁆'),
+ (0x2F94C, 'M', u'䂖'),
+ (0x2F94D, 'M', u'𥐝'),
+ (0x2F94E, 'M', u'硎'),
+ (0x2F94F, 'M', u'碌'),
+ (0x2F950, 'M', u'磌'),
+ (0x2F951, 'M', u'䃣'),
+ (0x2F952, 'M', u'𥘦'),
+ (0x2F953, 'M', u'祖'),
+ (0x2F954, 'M', u'𥚚'),
+ (0x2F955, 'M', u'𥛅'),
+ (0x2F956, 'M', u'福'),
+ (0x2F957, 'M', u'秫'),
+ (0x2F958, 'M', u'䄯'),
+ (0x2F959, 'M', u'穀'),
+ (0x2F95A, 'M', u'穊'),
+ (0x2F95B, 'M', u'穏'),
+ (0x2F95C, 'M', u'𥥼'),
+ (0x2F95D, 'M', u'𥪧'),
+ (0x2F95F, 'X'),
+ (0x2F960, 'M', u'䈂'),
+ (0x2F961, 'M', u'𥮫'),
+ (0x2F962, 'M', u'篆'),
+ (0x2F963, 'M', u'築'),
+ (0x2F964, 'M', u'䈧'),
+ (0x2F965, 'M', u'𥲀'),
+ (0x2F966, 'M', u'糒'),
+ (0x2F967, 'M', u'䊠'),
+ (0x2F968, 'M', u'糨'),
+ (0x2F969, 'M', u'糣'),
+ (0x2F96A, 'M', u'紀'),
+ (0x2F96B, 'M', u'𥾆'),
+ (0x2F96C, 'M', u'絣'),
+ (0x2F96D, 'M', u'䌁'),
+ (0x2F96E, 'M', u'緇'),
+ (0x2F96F, 'M', u'縂'),
+ (0x2F970, 'M', u'繅'),
+ (0x2F971, 'M', u'䌴'),
+ (0x2F972, 'M', u'𦈨'),
+ (0x2F973, 'M', u'𦉇'),
+ (0x2F974, 'M', u'䍙'),
+ (0x2F975, 'M', u'𦋙'),
+ (0x2F976, 'M', u'罺'),
+ (0x2F977, 'M', u'𦌾'),
+ (0x2F978, 'M', u'羕'),
+ (0x2F979, 'M', u'翺'),
+ (0x2F97A, 'M', u'者'),
+ (0x2F97B, 'M', u'𦓚'),
+ (0x2F97C, 'M', u'𦔣'),
+ (0x2F97D, 'M', u'聠'),
+ (0x2F97E, 'M', u'𦖨'),
+ (0x2F97F, 'M', u'聰'),
+ (0x2F980, 'M', u'𣍟'),
+ (0x2F981, 'M', u'䏕'),
+ (0x2F982, 'M', u'育'),
+ (0x2F983, 'M', u'脃'),
+ (0x2F984, 'M', u'䐋'),
+ (0x2F985, 'M', u'脾'),
+ (0x2F986, 'M', u'媵'),
+ (0x2F987, 'M', u'𦞧'),
+ (0x2F988, 'M', u'𦞵'),
+ (0x2F989, 'M', u'𣎓'),
+ (0x2F98A, 'M', u'𣎜'),
+ (0x2F98B, 'M', u'舁'),
+ (0x2F98C, 'M', u'舄'),
+ (0x2F98D, 'M', u'辞'),
+ ]
+
+def _seg_78():
+ return [
+ (0x2F98E, 'M', u'䑫'),
+ (0x2F98F, 'M', u'芑'),
+ (0x2F990, 'M', u'芋'),
+ (0x2F991, 'M', u'芝'),
+ (0x2F992, 'M', u'劳'),
+ (0x2F993, 'M', u'花'),
+ (0x2F994, 'M', u'芳'),
+ (0x2F995, 'M', u'芽'),
+ (0x2F996, 'M', u'苦'),
+ (0x2F997, 'M', u'𦬼'),
+ (0x2F998, 'M', u'若'),
+ (0x2F999, 'M', u'茝'),
+ (0x2F99A, 'M', u'荣'),
+ (0x2F99B, 'M', u'莭'),
+ (0x2F99C, 'M', u'茣'),
+ (0x2F99D, 'M', u'莽'),
+ (0x2F99E, 'M', u'菧'),
+ (0x2F99F, 'M', u'著'),
+ (0x2F9A0, 'M', u'荓'),
+ (0x2F9A1, 'M', u'菊'),
+ (0x2F9A2, 'M', u'菌'),
+ (0x2F9A3, 'M', u'菜'),
+ (0x2F9A4, 'M', u'𦰶'),
+ (0x2F9A5, 'M', u'𦵫'),
+ (0x2F9A6, 'M', u'𦳕'),
+ (0x2F9A7, 'M', u'䔫'),
+ (0x2F9A8, 'M', u'蓱'),
+ (0x2F9A9, 'M', u'蓳'),
+ (0x2F9AA, 'M', u'蔖'),
+ (0x2F9AB, 'M', u'𧏊'),
+ (0x2F9AC, 'M', u'蕤'),
+ (0x2F9AD, 'M', u'𦼬'),
+ (0x2F9AE, 'M', u'䕝'),
+ (0x2F9AF, 'M', u'䕡'),
+ (0x2F9B0, 'M', u'𦾱'),
+ (0x2F9B1, 'M', u'𧃒'),
+ (0x2F9B2, 'M', u'䕫'),
+ (0x2F9B3, 'M', u'虐'),
+ (0x2F9B4, 'M', u'虜'),
+ (0x2F9B5, 'M', u'虧'),
+ (0x2F9B6, 'M', u'虩'),
+ (0x2F9B7, 'M', u'蚩'),
+ (0x2F9B8, 'M', u'蚈'),
+ (0x2F9B9, 'M', u'蜎'),
+ (0x2F9BA, 'M', u'蛢'),
+ (0x2F9BB, 'M', u'蝹'),
+ (0x2F9BC, 'M', u'蜨'),
+ (0x2F9BD, 'M', u'蝫'),
+ (0x2F9BE, 'M', u'螆'),
+ (0x2F9BF, 'X'),
+ (0x2F9C0, 'M', u'蟡'),
+ (0x2F9C1, 'M', u'蠁'),
+ (0x2F9C2, 'M', u'䗹'),
+ (0x2F9C3, 'M', u'衠'),
+ (0x2F9C4, 'M', u'衣'),
+ (0x2F9C5, 'M', u'𧙧'),
+ (0x2F9C6, 'M', u'裗'),
+ (0x2F9C7, 'M', u'裞'),
+ (0x2F9C8, 'M', u'䘵'),
+ (0x2F9C9, 'M', u'裺'),
+ (0x2F9CA, 'M', u'㒻'),
+ (0x2F9CB, 'M', u'𧢮'),
+ (0x2F9CC, 'M', u'𧥦'),
+ (0x2F9CD, 'M', u'䚾'),
+ (0x2F9CE, 'M', u'䛇'),
+ (0x2F9CF, 'M', u'誠'),
+ (0x2F9D0, 'M', u'諭'),
+ (0x2F9D1, 'M', u'變'),
+ (0x2F9D2, 'M', u'豕'),
+ (0x2F9D3, 'M', u'𧲨'),
+ (0x2F9D4, 'M', u'貫'),
+ (0x2F9D5, 'M', u'賁'),
+ (0x2F9D6, 'M', u'贛'),
+ (0x2F9D7, 'M', u'起'),
+ (0x2F9D8, 'M', u'𧼯'),
+ (0x2F9D9, 'M', u'𠠄'),
+ (0x2F9DA, 'M', u'跋'),
+ (0x2F9DB, 'M', u'趼'),
+ (0x2F9DC, 'M', u'跰'),
+ (0x2F9DD, 'M', u'𠣞'),
+ (0x2F9DE, 'M', u'軔'),
+ (0x2F9DF, 'M', u'輸'),
+ (0x2F9E0, 'M', u'𨗒'),
+ (0x2F9E1, 'M', u'𨗭'),
+ (0x2F9E2, 'M', u'邔'),
+ (0x2F9E3, 'M', u'郱'),
+ (0x2F9E4, 'M', u'鄑'),
+ (0x2F9E5, 'M', u'𨜮'),
+ (0x2F9E6, 'M', u'鄛'),
+ (0x2F9E7, 'M', u'鈸'),
+ (0x2F9E8, 'M', u'鋗'),
+ (0x2F9E9, 'M', u'鋘'),
+ (0x2F9EA, 'M', u'鉼'),
+ (0x2F9EB, 'M', u'鏹'),
+ (0x2F9EC, 'M', u'鐕'),
+ (0x2F9ED, 'M', u'𨯺'),
+ (0x2F9EE, 'M', u'開'),
+ (0x2F9EF, 'M', u'䦕'),
+ (0x2F9F0, 'M', u'閷'),
+ (0x2F9F1, 'M', u'𨵷'),
+ ]
+
+def _seg_79():
+ return [
+ (0x2F9F2, 'M', u'䧦'),
+ (0x2F9F3, 'M', u'雃'),
+ (0x2F9F4, 'M', u'嶲'),
+ (0x2F9F5, 'M', u'霣'),
+ (0x2F9F6, 'M', u'𩅅'),
+ (0x2F9F7, 'M', u'𩈚'),
+ (0x2F9F8, 'M', u'䩮'),
+ (0x2F9F9, 'M', u'䩶'),
+ (0x2F9FA, 'M', u'韠'),
+ (0x2F9FB, 'M', u'𩐊'),
+ (0x2F9FC, 'M', u'䪲'),
+ (0x2F9FD, 'M', u'𩒖'),
+ (0x2F9FE, 'M', u'頋'),
+ (0x2FA00, 'M', u'頩'),
+ (0x2FA01, 'M', u'𩖶'),
+ (0x2FA02, 'M', u'飢'),
+ (0x2FA03, 'M', u'䬳'),
+ (0x2FA04, 'M', u'餩'),
+ (0x2FA05, 'M', u'馧'),
+ (0x2FA06, 'M', u'駂'),
+ (0x2FA07, 'M', u'駾'),
+ (0x2FA08, 'M', u'䯎'),
+ (0x2FA09, 'M', u'𩬰'),
+ (0x2FA0A, 'M', u'鬒'),
+ (0x2FA0B, 'M', u'鱀'),
+ (0x2FA0C, 'M', u'鳽'),
+ (0x2FA0D, 'M', u'䳎'),
+ (0x2FA0E, 'M', u'䳭'),
+ (0x2FA0F, 'M', u'鵧'),
+ (0x2FA10, 'M', u'𪃎'),
+ (0x2FA11, 'M', u'䳸'),
+ (0x2FA12, 'M', u'𪄅'),
+ (0x2FA13, 'M', u'𪈎'),
+ (0x2FA14, 'M', u'𪊑'),
+ (0x2FA15, 'M', u'麻'),
+ (0x2FA16, 'M', u'䵖'),
+ (0x2FA17, 'M', u'黹'),
+ (0x2FA18, 'M', u'黾'),
+ (0x2FA19, 'M', u'鼅'),
+ (0x2FA1A, 'M', u'鼏'),
+ (0x2FA1B, 'M', u'鼖'),
+ (0x2FA1C, 'M', u'鼻'),
+ (0x2FA1D, 'M', u'𪘀'),
+ (0x2FA1E, 'X'),
+ (0x30000, 'V'),
+ (0x3134B, 'X'),
+ (0xE0100, 'I'),
+ (0xE01F0, 'X'),
+ ]
+
+uts46data = tuple(
+ _seg_0()
+ + _seg_1()
+ + _seg_2()
+ + _seg_3()
+ + _seg_4()
+ + _seg_5()
+ + _seg_6()
+ + _seg_7()
+ + _seg_8()
+ + _seg_9()
+ + _seg_10()
+ + _seg_11()
+ + _seg_12()
+ + _seg_13()
+ + _seg_14()
+ + _seg_15()
+ + _seg_16()
+ + _seg_17()
+ + _seg_18()
+ + _seg_19()
+ + _seg_20()
+ + _seg_21()
+ + _seg_22()
+ + _seg_23()
+ + _seg_24()
+ + _seg_25()
+ + _seg_26()
+ + _seg_27()
+ + _seg_28()
+ + _seg_29()
+ + _seg_30()
+ + _seg_31()
+ + _seg_32()
+ + _seg_33()
+ + _seg_34()
+ + _seg_35()
+ + _seg_36()
+ + _seg_37()
+ + _seg_38()
+ + _seg_39()
+ + _seg_40()
+ + _seg_41()
+ + _seg_42()
+ + _seg_43()
+ + _seg_44()
+ + _seg_45()
+ + _seg_46()
+ + _seg_47()
+ + _seg_48()
+ + _seg_49()
+ + _seg_50()
+ + _seg_51()
+ + _seg_52()
+ + _seg_53()
+ + _seg_54()
+ + _seg_55()
+ + _seg_56()
+ + _seg_57()
+ + _seg_58()
+ + _seg_59()
+ + _seg_60()
+ + _seg_61()
+ + _seg_62()
+ + _seg_63()
+ + _seg_64()
+ + _seg_65()
+ + _seg_66()
+ + _seg_67()
+ + _seg_68()
+ + _seg_69()
+ + _seg_70()
+ + _seg_71()
+ + _seg_72()
+ + _seg_73()
+ + _seg_74()
+ + _seg_75()
+ + _seg_76()
+ + _seg_77()
+ + _seg_78()
+ + _seg_79()
+)
diff --git a/third_party/python/importlib_metadata/importlib_metadata-6.0.0.dist-info/LICENSE b/third_party/python/importlib_metadata/importlib_metadata-6.0.0.dist-info/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/third_party/python/importlib_metadata/importlib_metadata-6.0.0.dist-info/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/third_party/python/importlib_metadata/importlib_metadata-6.0.0.dist-info/METADATA b/third_party/python/importlib_metadata/importlib_metadata-6.0.0.dist-info/METADATA
new file mode 100644
index 0000000000..663c0c8720
--- /dev/null
+++ b/third_party/python/importlib_metadata/importlib_metadata-6.0.0.dist-info/METADATA
@@ -0,0 +1,135 @@
+Metadata-Version: 2.1
+Name: importlib-metadata
+Version: 6.0.0
+Summary: Read metadata from Python packages
+Home-page: https://github.com/python/importlib_metadata
+Author: Jason R. Coombs
+Author-email: jaraco@jaraco.com
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Requires-Python: >=3.7
+License-File: LICENSE
+Requires-Dist: zipp (>=0.5)
+Requires-Dist: typing-extensions (>=3.6.4) ; python_version < "3.8"
+Provides-Extra: docs
+Requires-Dist: sphinx (>=3.5) ; extra == 'docs'
+Requires-Dist: jaraco.packaging (>=9) ; extra == 'docs'
+Requires-Dist: rst.linker (>=1.9) ; extra == 'docs'
+Requires-Dist: furo ; extra == 'docs'
+Requires-Dist: sphinx-lint ; extra == 'docs'
+Requires-Dist: jaraco.tidelift (>=1.4) ; extra == 'docs'
+Provides-Extra: perf
+Requires-Dist: ipython ; extra == 'perf'
+Provides-Extra: testing
+Requires-Dist: pytest (>=6) ; extra == 'testing'
+Requires-Dist: pytest-checkdocs (>=2.4) ; extra == 'testing'
+Requires-Dist: flake8 (<5) ; extra == 'testing'
+Requires-Dist: pytest-cov ; extra == 'testing'
+Requires-Dist: pytest-enabler (>=1.3) ; extra == 'testing'
+Requires-Dist: packaging ; extra == 'testing'
+Requires-Dist: pyfakefs ; extra == 'testing'
+Requires-Dist: flufl.flake8 ; extra == 'testing'
+Requires-Dist: pytest-perf (>=0.9.2) ; extra == 'testing'
+Requires-Dist: pytest-black (>=0.3.7) ; (platform_python_implementation != "PyPy") and extra == 'testing'
+Requires-Dist: pytest-mypy (>=0.9.1) ; (platform_python_implementation != "PyPy") and extra == 'testing'
+Requires-Dist: pytest-flake8 ; (python_version < "3.12") and extra == 'testing'
+Requires-Dist: importlib-resources (>=1.3) ; (python_version < "3.9") and extra == 'testing'
+
+.. image:: https://img.shields.io/pypi/v/importlib_metadata.svg
+ :target: https://pypi.org/project/importlib_metadata
+
+.. image:: https://img.shields.io/pypi/pyversions/importlib_metadata.svg
+
+.. image:: https://github.com/python/importlib_metadata/workflows/tests/badge.svg
+ :target: https://github.com/python/importlib_metadata/actions?query=workflow%3A%22tests%22
+ :alt: tests
+
+.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
+ :target: https://github.com/psf/black
+ :alt: Code style: Black
+
+.. image:: https://readthedocs.org/projects/importlib-metadata/badge/?version=latest
+ :target: https://importlib-metadata.readthedocs.io/en/latest/?badge=latest
+
+.. image:: https://img.shields.io/badge/skeleton-2022-informational
+ :target: https://blog.jaraco.com/skeleton
+
+.. image:: https://tidelift.com/badges/package/pypi/importlib-metadata
+ :target: https://tidelift.com/subscription/pkg/pypi-importlib-metadata?utm_source=pypi-importlib-metadata&utm_medium=readme
+
+Library to access the metadata for a Python package.
+
+This package supplies third-party access to the functionality of
+`importlib.metadata <https://docs.python.org/3/library/importlib.metadata.html>`_
+including improvements added to subsequent Python versions.
+
+
+Compatibility
+=============
+
+New features are introduced in this third-party library and later merged
+into CPython. The following table indicates which versions of this library
+were contributed to different versions in the standard library:
+
+.. list-table::
+ :header-rows: 1
+
+ * - importlib_metadata
+ - stdlib
+ * - 5.0
+ - 3.12
+ * - 4.13
+ - 3.11
+ * - 4.6
+ - 3.10
+ * - 1.4
+ - 3.8
+
+
+Usage
+=====
+
+See the `online documentation <https://importlib_metadata.readthedocs.io/>`_
+for usage details.
+
+`Finder authors
+<https://docs.python.org/3/reference/import.html#finders-and-loaders>`_ can
+also add support for custom package installers. See the above documentation
+for details.
+
+
+Caveats
+=======
+
+This project primarily supports third-party packages installed by PyPA
+tools (or other conforming packages). It does not support:
+
+- Packages in the stdlib.
+- Packages installed without metadata.
+
+Project details
+===============
+
+ * Project home: https://github.com/python/importlib_metadata
+ * Report bugs at: https://github.com/python/importlib_metadata/issues
+ * Code hosting: https://github.com/python/importlib_metadata
+ * Documentation: https://importlib_metadata.readthedocs.io/
+
+For Enterprise
+==============
+
+Available as part of the Tidelift Subscription.
+
+This project and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use.
+
+`Learn more <https://tidelift.com/subscription/pkg/pypi-importlib-metadata?utm_source=pypi-importlib-metadata&utm_medium=referral&utm_campaign=github>`_.
+
+Security Contact
+================
+
+To report a security vulnerability, please use the
+`Tidelift security contact <https://tidelift.com/security>`_.
+Tidelift will coordinate the fix and disclosure.
diff --git a/third_party/python/importlib_metadata/importlib_metadata-6.0.0.dist-info/RECORD b/third_party/python/importlib_metadata/importlib_metadata-6.0.0.dist-info/RECORD
new file mode 100644
index 0000000000..f95c14991f
--- /dev/null
+++ b/third_party/python/importlib_metadata/importlib_metadata-6.0.0.dist-info/RECORD
@@ -0,0 +1,15 @@
+importlib_metadata/__init__.py,sha256=wiMJxNXXhPtRRHSX2N9gGLnTh0YszmE1rn3uKYRrNcs,26490
+importlib_metadata/_adapters.py,sha256=i8S6Ib1OQjcILA-l4gkzktMZe18TaeUNI49PLRp6OBU,2454
+importlib_metadata/_collections.py,sha256=CJ0OTCHIjWA0ZIVS4voORAsn2R4R2cQBEtPsZEJpASY,743
+importlib_metadata/_compat.py,sha256=9zOKf0eDgkCMnnaEhU5kQVxHd1P8BIYV7Stso7av5h8,1857
+importlib_metadata/_functools.py,sha256=PsY2-4rrKX4RVeRC1oGp1lB1pmC9eKN88_f-bD9uOoA,2895
+importlib_metadata/_itertools.py,sha256=cvr_2v8BRbxcIl5x5ldfqdHjhI8Yi8s8yk50G_nm6jQ,2068
+importlib_metadata/_meta.py,sha256=v5e1ZDG7yZTH3h7TjbS5bM5p8AGzMPVOu8skDMv4h6k,1165
+importlib_metadata/_py39compat.py,sha256=2Tk5twb_VgLCY-1NEAQjdZp_S9OFMC-pUzP2isuaPsQ,1098
+importlib_metadata/_text.py,sha256=HCsFksZpJLeTP3NEk_ngrAeXVRRtTrtyh9eOABoRP4A,2166
+importlib_metadata/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+importlib_metadata-6.0.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
+importlib_metadata-6.0.0.dist-info/METADATA,sha256=tZIEx9HdEXD34SWuitkNXaYBqSnyNukx2l4FKQAz9hY,4958
+importlib_metadata-6.0.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
+importlib_metadata-6.0.0.dist-info/top_level.txt,sha256=CO3fD9yylANiXkrMo4qHLV_mqXL2sC5JFKgt1yWAT-A,19
+importlib_metadata-6.0.0.dist-info/RECORD,,
diff --git a/third_party/python/importlib_metadata/importlib_metadata-6.0.0.dist-info/WHEEL b/third_party/python/importlib_metadata/importlib_metadata-6.0.0.dist-info/WHEEL
new file mode 100644
index 0000000000..57e3d840d5
--- /dev/null
+++ b/third_party/python/importlib_metadata/importlib_metadata-6.0.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.38.4)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/third_party/python/importlib_metadata/importlib_metadata-6.0.0.dist-info/top_level.txt b/third_party/python/importlib_metadata/importlib_metadata-6.0.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..bbb07547a1
--- /dev/null
+++ b/third_party/python/importlib_metadata/importlib_metadata-6.0.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+importlib_metadata
diff --git a/third_party/python/importlib_metadata/importlib_metadata/__init__.py b/third_party/python/importlib_metadata/importlib_metadata/__init__.py
new file mode 100644
index 0000000000..9a36a8e621
--- /dev/null
+++ b/third_party/python/importlib_metadata/importlib_metadata/__init__.py
@@ -0,0 +1,904 @@
+import os
+import re
+import abc
+import csv
+import sys
+import zipp
+import email
+import pathlib
+import operator
+import textwrap
+import warnings
+import functools
+import itertools
+import posixpath
+import collections
+
+from . import _adapters, _meta, _py39compat
+from ._collections import FreezableDefaultDict, Pair
+from ._compat import (
+ NullFinder,
+ install,
+ pypy_partial,
+)
+from ._functools import method_cache, pass_none
+from ._itertools import always_iterable, unique_everseen
+from ._meta import PackageMetadata, SimplePath
+
+from contextlib import suppress
+from importlib import import_module
+from importlib.abc import MetaPathFinder
+from itertools import starmap
+from typing import List, Mapping, Optional
+
+
+__all__ = [
+ 'Distribution',
+ 'DistributionFinder',
+ 'PackageMetadata',
+ 'PackageNotFoundError',
+ 'distribution',
+ 'distributions',
+ 'entry_points',
+ 'files',
+ 'metadata',
+ 'packages_distributions',
+ 'requires',
+ 'version',
+]
+
+
+class PackageNotFoundError(ModuleNotFoundError):
+ """The package was not found."""
+
+ def __str__(self):
+ return f"No package metadata was found for {self.name}"
+
+ @property
+ def name(self):
+ (name,) = self.args
+ return name
+
+
+class Sectioned:
+ """
+ A simple entry point config parser for performance
+
+ >>> for item in Sectioned.read(Sectioned._sample):
+ ... print(item)
+ Pair(name='sec1', value='# comments ignored')
+ Pair(name='sec1', value='a = 1')
+ Pair(name='sec1', value='b = 2')
+ Pair(name='sec2', value='a = 2')
+
+ >>> res = Sectioned.section_pairs(Sectioned._sample)
+ >>> item = next(res)
+ >>> item.name
+ 'sec1'
+ >>> item.value
+ Pair(name='a', value='1')
+ >>> item = next(res)
+ >>> item.value
+ Pair(name='b', value='2')
+ >>> item = next(res)
+ >>> item.name
+ 'sec2'
+ >>> item.value
+ Pair(name='a', value='2')
+ >>> list(res)
+ []
+ """
+
+ _sample = textwrap.dedent(
+ """
+ [sec1]
+ # comments ignored
+ a = 1
+ b = 2
+
+ [sec2]
+ a = 2
+ """
+ ).lstrip()
+
+ @classmethod
+ def section_pairs(cls, text):
+ return (
+ section._replace(value=Pair.parse(section.value))
+ for section in cls.read(text, filter_=cls.valid)
+ if section.name is not None
+ )
+
+ @staticmethod
+ def read(text, filter_=None):
+ lines = filter(filter_, map(str.strip, text.splitlines()))
+ name = None
+ for value in lines:
+ section_match = value.startswith('[') and value.endswith(']')
+ if section_match:
+ name = value.strip('[]')
+ continue
+ yield Pair(name, value)
+
+ @staticmethod
+ def valid(line):
+ return line and not line.startswith('#')
+
+
+class DeprecatedTuple:
+ """
+ Provide subscript item access for backward compatibility.
+
+ >>> recwarn = getfixture('recwarn')
+ >>> ep = EntryPoint(name='name', value='value', group='group')
+ >>> ep[:]
+ ('name', 'value', 'group')
+ >>> ep[0]
+ 'name'
+ >>> len(recwarn)
+ 1
+ """
+
+ # Do not remove prior to 2023-05-01 or Python 3.13
+ _warn = functools.partial(
+ warnings.warn,
+ "EntryPoint tuple interface is deprecated. Access members by name.",
+ DeprecationWarning,
+ stacklevel=pypy_partial(2),
+ )
+
+ def __getitem__(self, item):
+ self._warn()
+ return self._key()[item]
+
+
+class EntryPoint(DeprecatedTuple):
+ """An entry point as defined by Python packaging conventions.
+
+ See `the packaging docs on entry points
+ <https://packaging.python.org/specifications/entry-points/>`_
+ for more information.
+
+ >>> ep = EntryPoint(
+ ... name=None, group=None, value='package.module:attr [extra1, extra2]')
+ >>> ep.module
+ 'package.module'
+ >>> ep.attr
+ 'attr'
+ >>> ep.extras
+ ['extra1', 'extra2']
+ """
+
+ pattern = re.compile(
+ r'(?P<module>[\w.]+)\s*'
+ r'(:\s*(?P<attr>[\w.]+)\s*)?'
+ r'((?P<extras>\[.*\])\s*)?$'
+ )
+ """
+ A regular expression describing the syntax for an entry point,
+ which might look like:
+
+ - module
+ - package.module
+ - package.module:attribute
+ - package.module:object.attribute
+ - package.module:attr [extra1, extra2]
+
+ Other combinations are possible as well.
+
+ The expression is lenient about whitespace around the ':',
+ following the attr, and following any extras.
+ """
+
+ name: str
+ value: str
+ group: str
+
+ dist: Optional['Distribution'] = None
+
+ def __init__(self, name, value, group):
+ vars(self).update(name=name, value=value, group=group)
+
+ def load(self):
+ """Load the entry point from its definition. If only a module
+ is indicated by the value, return that module. Otherwise,
+ return the named object.
+ """
+ match = self.pattern.match(self.value)
+ module = import_module(match.group('module'))
+ attrs = filter(None, (match.group('attr') or '').split('.'))
+ return functools.reduce(getattr, attrs, module)
+
+ @property
+ def module(self):
+ match = self.pattern.match(self.value)
+ return match.group('module')
+
+ @property
+ def attr(self):
+ match = self.pattern.match(self.value)
+ return match.group('attr')
+
+ @property
+ def extras(self):
+ match = self.pattern.match(self.value)
+ return re.findall(r'\w+', match.group('extras') or '')
+
+ def _for(self, dist):
+ vars(self).update(dist=dist)
+ return self
+
+ def matches(self, **params):
+ """
+ EntryPoint matches the given parameters.
+
+ >>> ep = EntryPoint(group='foo', name='bar', value='bing:bong [extra1, extra2]')
+ >>> ep.matches(group='foo')
+ True
+ >>> ep.matches(name='bar', value='bing:bong [extra1, extra2]')
+ True
+ >>> ep.matches(group='foo', name='other')
+ False
+ >>> ep.matches()
+ True
+ >>> ep.matches(extras=['extra1', 'extra2'])
+ True
+ >>> ep.matches(module='bing')
+ True
+ >>> ep.matches(attr='bong')
+ True
+ """
+ attrs = (getattr(self, param) for param in params)
+ return all(map(operator.eq, params.values(), attrs))
+
+ def _key(self):
+ return self.name, self.value, self.group
+
+ def __lt__(self, other):
+ return self._key() < other._key()
+
+ def __eq__(self, other):
+ return self._key() == other._key()
+
+ def __setattr__(self, name, value):
+ raise AttributeError("EntryPoint objects are immutable.")
+
+ def __repr__(self):
+ return (
+ f'EntryPoint(name={self.name!r}, value={self.value!r}, '
+ f'group={self.group!r})'
+ )
+
+ def __hash__(self):
+ return hash(self._key())
+
+
+class EntryPoints(tuple):
+ """
+ An immutable collection of selectable EntryPoint objects.
+ """
+
+ __slots__ = ()
+
+ def __getitem__(self, name): # -> EntryPoint:
+ """
+ Get the EntryPoint in self matching name.
+ """
+ try:
+ return next(iter(self.select(name=name)))
+ except StopIteration:
+ raise KeyError(name)
+
+ def select(self, **params):
+ """
+ Select entry points from self that match the
+ given parameters (typically group and/or name).
+ """
+ return EntryPoints(ep for ep in self if _py39compat.ep_matches(ep, **params))
+
+ @property
+ def names(self):
+ """
+ Return the set of all names of all entry points.
+ """
+ return {ep.name for ep in self}
+
+ @property
+ def groups(self):
+ """
+ Return the set of all groups of all entry points.
+ """
+ return {ep.group for ep in self}
+
+ @classmethod
+ def _from_text_for(cls, text, dist):
+ return cls(ep._for(dist) for ep in cls._from_text(text))
+
+ @staticmethod
+ def _from_text(text):
+ return (
+ EntryPoint(name=item.value.name, value=item.value.value, group=item.name)
+ for item in Sectioned.section_pairs(text or '')
+ )
+
+
+class PackagePath(pathlib.PurePosixPath):
+ """A reference to a path in a package"""
+
+ def read_text(self, encoding='utf-8'):
+ with self.locate().open(encoding=encoding) as stream:
+ return stream.read()
+
+ def read_binary(self):
+ with self.locate().open('rb') as stream:
+ return stream.read()
+
+ def locate(self):
+ """Return a path-like object for this path"""
+ return self.dist.locate_file(self)
+
+
+class FileHash:
+ def __init__(self, spec):
+ self.mode, _, self.value = spec.partition('=')
+
+ def __repr__(self):
+ return f'<FileHash mode: {self.mode} value: {self.value}>'
+
+
+class Distribution(metaclass=abc.ABCMeta):
+ """A Python distribution package."""
+
+ @abc.abstractmethod
+ def read_text(self, filename):
+ """Attempt to load metadata file given by the name.
+
+ :param filename: The name of the file in the distribution info.
+ :return: The text if found, otherwise None.
+ """
+
+ @abc.abstractmethod
+ def locate_file(self, path):
+ """
+ Given a path to a file in this distribution, return a path
+ to it.
+ """
+
+ @classmethod
+ def from_name(cls, name: str):
+ """Return the Distribution for the given package name.
+
+ :param name: The name of the distribution package to search for.
+ :return: The Distribution instance (or subclass thereof) for the named
+ package, if found.
+ :raises PackageNotFoundError: When the named package's distribution
+ metadata cannot be found.
+ :raises ValueError: When an invalid value is supplied for name.
+ """
+ if not name:
+ raise ValueError("A distribution name is required.")
+ try:
+ return next(cls.discover(name=name))
+ except StopIteration:
+ raise PackageNotFoundError(name)
+
+ @classmethod
+ def discover(cls, **kwargs):
+ """Return an iterable of Distribution objects for all packages.
+
+ Pass a ``context`` or pass keyword arguments for constructing
+ a context.
+
+ :context: A ``DistributionFinder.Context`` object.
+ :return: Iterable of Distribution objects for all packages.
+ """
+ context = kwargs.pop('context', None)
+ if context and kwargs:
+ raise ValueError("cannot accept context and kwargs")
+ context = context or DistributionFinder.Context(**kwargs)
+ return itertools.chain.from_iterable(
+ resolver(context) for resolver in cls._discover_resolvers()
+ )
+
+ @staticmethod
+ def at(path):
+ """Return a Distribution for the indicated metadata path
+
+ :param path: a string or path-like object
+ :return: a concrete Distribution instance for the path
+ """
+ return PathDistribution(pathlib.Path(path))
+
+ @staticmethod
+ def _discover_resolvers():
+ """Search the meta_path for resolvers."""
+ declared = (
+ getattr(finder, 'find_distributions', None) for finder in sys.meta_path
+ )
+ return filter(None, declared)
+
+ @property
+ def metadata(self) -> _meta.PackageMetadata:
+ """Return the parsed metadata for this Distribution.
+
+ The returned object will have keys that name the various bits of
+ metadata. See PEP 566 for details.
+ """
+ text = (
+ self.read_text('METADATA')
+ or self.read_text('PKG-INFO')
+ # This last clause is here to support old egg-info files. Its
+ # effect is to just end up using the PathDistribution's self._path
+ # (which points to the egg-info file) attribute unchanged.
+ or self.read_text('')
+ )
+ return _adapters.Message(email.message_from_string(text))
+
+ @property
+ def name(self):
+ """Return the 'Name' metadata for the distribution package."""
+ return self.metadata['Name']
+
+ @property
+ def _normalized_name(self):
+ """Return a normalized version of the name."""
+ return Prepared.normalize(self.name)
+
+ @property
+ def version(self):
+ """Return the 'Version' metadata for the distribution package."""
+ return self.metadata['Version']
+
+ @property
+ def entry_points(self):
+ return EntryPoints._from_text_for(self.read_text('entry_points.txt'), self)
+
+ @property
+ def files(self):
+ """Files in this distribution.
+
+ :return: List of PackagePath for this distribution or None
+
+ Result is `None` if the metadata file that enumerates files
+ (i.e. RECORD for dist-info or SOURCES.txt for egg-info) is
+ missing.
+ Result may be empty if the metadata exists but is empty.
+ """
+
+ def make_file(name, hash=None, size_str=None):
+ result = PackagePath(name)
+ result.hash = FileHash(hash) if hash else None
+ result.size = int(size_str) if size_str else None
+ result.dist = self
+ return result
+
+ @pass_none
+ def make_files(lines):
+ return list(starmap(make_file, csv.reader(lines)))
+
+ return make_files(self._read_files_distinfo() or self._read_files_egginfo())
+
+ def _read_files_distinfo(self):
+ """
+ Read the lines of RECORD
+ """
+ text = self.read_text('RECORD')
+ return text and text.splitlines()
+
+ def _read_files_egginfo(self):
+ """
+ SOURCES.txt might contain literal commas, so wrap each line
+ in quotes.
+ """
+ text = self.read_text('SOURCES.txt')
+ return text and map('"{}"'.format, text.splitlines())
+
+ @property
+ def requires(self):
+ """Generated requirements specified for this Distribution"""
+ reqs = self._read_dist_info_reqs() or self._read_egg_info_reqs()
+ return reqs and list(reqs)
+
+ def _read_dist_info_reqs(self):
+ return self.metadata.get_all('Requires-Dist')
+
+ def _read_egg_info_reqs(self):
+ source = self.read_text('requires.txt')
+ return pass_none(self._deps_from_requires_text)(source)
+
+ @classmethod
+ def _deps_from_requires_text(cls, source):
+ return cls._convert_egg_info_reqs_to_simple_reqs(Sectioned.read(source))
+
+ @staticmethod
+ def _convert_egg_info_reqs_to_simple_reqs(sections):
+ """
+ Historically, setuptools would solicit and store 'extra'
+ requirements, including those with environment markers,
+ in separate sections. More modern tools expect each
+ dependency to be defined separately, with any relevant
+ extras and environment markers attached directly to that
+ requirement. This method converts the former to the
+ latter. See _test_deps_from_requires_text for an example.
+ """
+
+ def make_condition(name):
+ return name and f'extra == "{name}"'
+
+ def quoted_marker(section):
+ section = section or ''
+ extra, sep, markers = section.partition(':')
+ if extra and markers:
+ markers = f'({markers})'
+ conditions = list(filter(None, [markers, make_condition(extra)]))
+ return '; ' + ' and '.join(conditions) if conditions else ''
+
+ def url_req_space(req):
+ """
+ PEP 508 requires a space between the url_spec and the quoted_marker.
+ Ref python/importlib_metadata#357.
+ """
+ # '@' is uniquely indicative of a url_req.
+ return ' ' * ('@' in req)
+
+ for section in sections:
+ space = url_req_space(section.value)
+ yield section.value + space + quoted_marker(section.name)
+
+
+class DistributionFinder(MetaPathFinder):
+ """
+ A MetaPathFinder capable of discovering installed distributions.
+ """
+
+ class Context:
+ """
+ Keyword arguments presented by the caller to
+ ``distributions()`` or ``Distribution.discover()``
+ to narrow the scope of a search for distributions
+ in all DistributionFinders.
+
+ Each DistributionFinder may expect any parameters
+ and should attempt to honor the canonical
+ parameters defined below when appropriate.
+ """
+
+ name = None
+ """
+ Specific name for which a distribution finder should match.
+ A name of ``None`` matches all distributions.
+ """
+
+ def __init__(self, **kwargs):
+ vars(self).update(kwargs)
+
+ @property
+ def path(self):
+ """
+ The sequence of directory path that a distribution finder
+ should search.
+
+ Typically refers to Python installed package paths such as
+ "site-packages" directories and defaults to ``sys.path``.
+ """
+ return vars(self).get('path', sys.path)
+
+ @abc.abstractmethod
+ def find_distributions(self, context=Context()):
+ """
+ Find distributions.
+
+ Return an iterable of all Distribution instances capable of
+ loading the metadata for packages matching the ``context``,
+ a DistributionFinder.Context instance.
+ """
+
+
+class FastPath:
+ """
+ Micro-optimized class for searching a path for
+ children.
+
+ >>> FastPath('').children()
+ ['...']
+ """
+
+ @functools.lru_cache() # type: ignore
+ def __new__(cls, root):
+ return super().__new__(cls)
+
+ def __init__(self, root):
+ self.root = root
+
+ def joinpath(self, child):
+ return pathlib.Path(self.root, child)
+
+ def children(self):
+ with suppress(Exception):
+ return os.listdir(self.root or '.')
+ with suppress(Exception):
+ return self.zip_children()
+ return []
+
+ def zip_children(self):
+ zip_path = zipp.Path(self.root)
+ names = zip_path.root.namelist()
+ self.joinpath = zip_path.joinpath
+
+ return dict.fromkeys(child.split(posixpath.sep, 1)[0] for child in names)
+
+ def search(self, name):
+ return self.lookup(self.mtime).search(name)
+
+ @property
+ def mtime(self):
+ with suppress(OSError):
+ return os.stat(self.root).st_mtime
+ self.lookup.cache_clear()
+
+ @method_cache
+ def lookup(self, mtime):
+ return Lookup(self)
+
+
+class Lookup:
+ def __init__(self, path: FastPath):
+ base = os.path.basename(path.root).lower()
+ base_is_egg = base.endswith(".egg")
+ self.infos = FreezableDefaultDict(list)
+ self.eggs = FreezableDefaultDict(list)
+
+ for child in path.children():
+ low = child.lower()
+ if low.endswith((".dist-info", ".egg-info")):
+ # rpartition is faster than splitext and suitable for this purpose.
+ name = low.rpartition(".")[0].partition("-")[0]
+ normalized = Prepared.normalize(name)
+ self.infos[normalized].append(path.joinpath(child))
+ elif base_is_egg and low == "egg-info":
+ name = base.rpartition(".")[0].partition("-")[0]
+ legacy_normalized = Prepared.legacy_normalize(name)
+ self.eggs[legacy_normalized].append(path.joinpath(child))
+
+ self.infos.freeze()
+ self.eggs.freeze()
+
+ def search(self, prepared):
+ infos = (
+ self.infos[prepared.normalized]
+ if prepared
+ else itertools.chain.from_iterable(self.infos.values())
+ )
+ eggs = (
+ self.eggs[prepared.legacy_normalized]
+ if prepared
+ else itertools.chain.from_iterable(self.eggs.values())
+ )
+ return itertools.chain(infos, eggs)
+
+
+class Prepared:
+ """
+ A prepared search for metadata on a possibly-named package.
+ """
+
+ normalized = None
+ legacy_normalized = None
+
+ def __init__(self, name):
+ self.name = name
+ if name is None:
+ return
+ self.normalized = self.normalize(name)
+ self.legacy_normalized = self.legacy_normalize(name)
+
+ @staticmethod
+ def normalize(name):
+ """
+ PEP 503 normalization plus dashes as underscores.
+ """
+ return re.sub(r"[-_.]+", "-", name).lower().replace('-', '_')
+
+ @staticmethod
+ def legacy_normalize(name):
+ """
+ Normalize the package name as found in the convention in
+ older packaging tools versions and specs.
+ """
+ return name.lower().replace('-', '_')
+
+ def __bool__(self):
+ return bool(self.name)
+
+
+@install
+class MetadataPathFinder(NullFinder, DistributionFinder):
+ """A degenerate finder for distribution packages on the file system.
+
+ This finder supplies only a find_distributions() method for versions
+ of Python that do not have a PathFinder find_distributions().
+ """
+
+ def find_distributions(self, context=DistributionFinder.Context()):
+ """
+ Find distributions.
+
+ Return an iterable of all Distribution instances capable of
+ loading the metadata for packages matching ``context.name``
+ (or all names if ``None`` indicated) along the paths in the list
+ of directories ``context.path``.
+ """
+ found = self._search_paths(context.name, context.path)
+ return map(PathDistribution, found)
+
+ @classmethod
+ def _search_paths(cls, name, paths):
+ """Find metadata directories in paths heuristically."""
+ prepared = Prepared(name)
+ return itertools.chain.from_iterable(
+ path.search(prepared) for path in map(FastPath, paths)
+ )
+
+ def invalidate_caches(cls):
+ FastPath.__new__.cache_clear()
+
+
+class PathDistribution(Distribution):
+ def __init__(self, path: SimplePath):
+ """Construct a distribution.
+
+ :param path: SimplePath indicating the metadata directory.
+ """
+ self._path = path
+
+ def read_text(self, filename):
+ with suppress(
+ FileNotFoundError,
+ IsADirectoryError,
+ KeyError,
+ NotADirectoryError,
+ PermissionError,
+ ):
+ return self._path.joinpath(filename).read_text(encoding='utf-8')
+
+ read_text.__doc__ = Distribution.read_text.__doc__
+
+ def locate_file(self, path):
+ return self._path.parent / path
+
+ @property
+ def _normalized_name(self):
+ """
+ Performance optimization: where possible, resolve the
+ normalized name from the file system path.
+ """
+ stem = os.path.basename(str(self._path))
+ return (
+ pass_none(Prepared.normalize)(self._name_from_stem(stem))
+ or super()._normalized_name
+ )
+
+ @staticmethod
+ def _name_from_stem(stem):
+ """
+ >>> PathDistribution._name_from_stem('foo-3.0.egg-info')
+ 'foo'
+ >>> PathDistribution._name_from_stem('CherryPy-3.0.dist-info')
+ 'CherryPy'
+ >>> PathDistribution._name_from_stem('face.egg-info')
+ 'face'
+ >>> PathDistribution._name_from_stem('foo.bar')
+ """
+ filename, ext = os.path.splitext(stem)
+ if ext not in ('.dist-info', '.egg-info'):
+ return
+ name, sep, rest = filename.partition('-')
+ return name
+
+
+def distribution(distribution_name):
+ """Get the ``Distribution`` instance for the named package.
+
+ :param distribution_name: The name of the distribution package as a string.
+ :return: A ``Distribution`` instance (or subclass thereof).
+ """
+ return Distribution.from_name(distribution_name)
+
+
+def distributions(**kwargs):
+ """Get all ``Distribution`` instances in the current environment.
+
+ :return: An iterable of ``Distribution`` instances.
+ """
+ return Distribution.discover(**kwargs)
+
+
+def metadata(distribution_name) -> _meta.PackageMetadata:
+ """Get the metadata for the named package.
+
+ :param distribution_name: The name of the distribution package to query.
+ :return: A PackageMetadata containing the parsed metadata.
+ """
+ return Distribution.from_name(distribution_name).metadata
+
+
+def version(distribution_name):
+ """Get the version string for the named package.
+
+ :param distribution_name: The name of the distribution package to query.
+ :return: The version string for the package as defined in the package's
+ "Version" metadata key.
+ """
+ return distribution(distribution_name).version
+
+
+_unique = functools.partial(
+ unique_everseen,
+ key=_py39compat.normalized_name,
+)
+"""
+Wrapper for ``distributions`` to return unique distributions by name.
+"""
+
+
+def entry_points(**params) -> EntryPoints:
+ """Return EntryPoint objects for all installed packages.
+
+ Pass selection parameters (group or name) to filter the
+ result to entry points matching those properties (see
+ EntryPoints.select()).
+
+ :return: EntryPoints for all installed packages.
+ """
+ eps = itertools.chain.from_iterable(
+ dist.entry_points for dist in _unique(distributions())
+ )
+ return EntryPoints(eps).select(**params)
+
+
+def files(distribution_name):
+ """Return a list of files for the named package.
+
+ :param distribution_name: The name of the distribution package to query.
+ :return: List of files composing the distribution.
+ """
+ return distribution(distribution_name).files
+
+
+def requires(distribution_name):
+ """
+ Return a list of requirements for the named package.
+
+ :return: An iterator of requirements, suitable for
+ packaging.requirement.Requirement.
+ """
+ return distribution(distribution_name).requires
+
+
+def packages_distributions() -> Mapping[str, List[str]]:
+ """
+ Return a mapping of top-level packages to their
+ distributions.
+
+ >>> import collections.abc
+ >>> pkgs = packages_distributions()
+ >>> all(isinstance(dist, collections.abc.Sequence) for dist in pkgs.values())
+ True
+ """
+ pkg_to_dist = collections.defaultdict(list)
+ for dist in distributions():
+ for pkg in _top_level_declared(dist) or _top_level_inferred(dist):
+ pkg_to_dist[pkg].append(dist.metadata['Name'])
+ return dict(pkg_to_dist)
+
+
+def _top_level_declared(dist):
+ return (dist.read_text('top_level.txt') or '').split()
+
+
+def _top_level_inferred(dist):
+ return {
+ f.parts[0] if len(f.parts) > 1 else f.with_suffix('').name
+ for f in always_iterable(dist.files)
+ if f.suffix == ".py"
+ }
diff --git a/third_party/python/importlib_metadata/importlib_metadata/_adapters.py b/third_party/python/importlib_metadata/importlib_metadata/_adapters.py
new file mode 100644
index 0000000000..e33cba5e44
--- /dev/null
+++ b/third_party/python/importlib_metadata/importlib_metadata/_adapters.py
@@ -0,0 +1,90 @@
+import functools
+import warnings
+import re
+import textwrap
+import email.message
+
+from ._text import FoldedCase
+from ._compat import pypy_partial
+
+
+# Do not remove prior to 2024-01-01 or Python 3.14
+_warn = functools.partial(
+ warnings.warn,
+ "Implicit None on return values is deprecated and will raise KeyErrors.",
+ DeprecationWarning,
+ stacklevel=pypy_partial(2),
+)
+
+
+class Message(email.message.Message):
+ multiple_use_keys = set(
+ map(
+ FoldedCase,
+ [
+ 'Classifier',
+ 'Obsoletes-Dist',
+ 'Platform',
+ 'Project-URL',
+ 'Provides-Dist',
+ 'Provides-Extra',
+ 'Requires-Dist',
+ 'Requires-External',
+ 'Supported-Platform',
+ 'Dynamic',
+ ],
+ )
+ )
+ """
+ Keys that may be indicated multiple times per PEP 566.
+ """
+
+ def __new__(cls, orig: email.message.Message):
+ res = super().__new__(cls)
+ vars(res).update(vars(orig))
+ return res
+
+ def __init__(self, *args, **kwargs):
+ self._headers = self._repair_headers()
+
+ # suppress spurious error from mypy
+ def __iter__(self):
+ return super().__iter__()
+
+ def __getitem__(self, item):
+ """
+ Warn users that a ``KeyError`` can be expected when a
+ mising key is supplied. Ref python/importlib_metadata#371.
+ """
+ res = super().__getitem__(item)
+ if res is None:
+ _warn()
+ return res
+
+ def _repair_headers(self):
+ def redent(value):
+ "Correct for RFC822 indentation"
+ if not value or '\n' not in value:
+ return value
+ return textwrap.dedent(' ' * 8 + value)
+
+ headers = [(key, redent(value)) for key, value in vars(self)['_headers']]
+ if self._payload:
+ headers.append(('Description', self.get_payload()))
+ return headers
+
+ @property
+ def json(self):
+ """
+ Convert PackageMetadata to a JSON-compatible format
+ per PEP 0566.
+ """
+
+ def transform(key):
+ value = self.get_all(key) if key in self.multiple_use_keys else self[key]
+ if key == 'Keywords':
+ value = re.split(r'\s+', value)
+ tk = key.lower().replace('-', '_')
+ return tk, value
+
+ return dict(map(transform, map(FoldedCase, self)))
diff --git a/third_party/python/importlib_metadata/importlib_metadata/_collections.py b/third_party/python/importlib_metadata/importlib_metadata/_collections.py
new file mode 100644
index 0000000000..cf0954e1a3
--- /dev/null
+++ b/third_party/python/importlib_metadata/importlib_metadata/_collections.py
@@ -0,0 +1,30 @@
+import collections
+
+
+# from jaraco.collections 3.3
+class FreezableDefaultDict(collections.defaultdict):
+ """
+ Often it is desirable to prevent the mutation of
+ a default dict after its initial construction, such
+ as to prevent mutation during iteration.
+
+ >>> dd = FreezableDefaultDict(list)
+ >>> dd[0].append('1')
+ >>> dd.freeze()
+ >>> dd[1]
+ []
+ >>> len(dd)
+ 1
+ """
+
+ def __missing__(self, key):
+ return getattr(self, '_frozen', super().__missing__)(key)
+
+ def freeze(self):
+ self._frozen = lambda key: self.default_factory()
+
+
+class Pair(collections.namedtuple('Pair', 'name value')):
+ @classmethod
+ def parse(cls, text):
+ return cls(*map(str.strip, text.split("=", 1)))
diff --git a/third_party/python/importlib_metadata/importlib_metadata/_compat.py b/third_party/python/importlib_metadata/importlib_metadata/_compat.py
new file mode 100644
index 0000000000..3d78566ea3
--- /dev/null
+++ b/third_party/python/importlib_metadata/importlib_metadata/_compat.py
@@ -0,0 +1,72 @@
+import sys
+import platform
+
+
+__all__ = ['install', 'NullFinder', 'Protocol']
+
+
+try:
+ from typing import Protocol
+except ImportError: # pragma: no cover
+ # Python 3.7 compatibility
+ from typing_extensions import Protocol # type: ignore
+
+
+def install(cls):
+ """
+ Class decorator for installation on sys.meta_path.
+
+ Adds the backport DistributionFinder to sys.meta_path and
+ attempts to disable the finder functionality of the stdlib
+ DistributionFinder.
+ """
+ sys.meta_path.append(cls())
+ disable_stdlib_finder()
+ return cls
+
+
+def disable_stdlib_finder():
+ """
+ Give the backport primacy for discovering path-based distributions
+ by monkey-patching the stdlib O_O.
+
+ See #91 for more background for rationale on this sketchy
+ behavior.
+ """
+
+ def matches(finder):
+ return getattr(
+ finder, '__module__', None
+ ) == '_frozen_importlib_external' and hasattr(finder, 'find_distributions')
+
+ for finder in filter(matches, sys.meta_path): # pragma: nocover
+ del finder.find_distributions
+
+
+class NullFinder:
+ """
+ A "Finder" (aka "MetaClassFinder") that never finds any modules,
+ but may find distributions.
+ """
+
+ @staticmethod
+ def find_spec(*args, **kwargs):
+ return None
+
+ # In Python 2, the import system requires finders
+ # to have a find_module() method, but this usage
+ # is deprecated in Python 3 in favor of find_spec().
+ # For the purposes of this finder (i.e. being present
+ # on sys.meta_path but having no other import
+ # system functionality), the two methods are identical.
+ find_module = find_spec
+
+
+def pypy_partial(val):
+ """
+ Adjust for variable stacklevel on partial under PyPy.
+
+ Workaround for #327.
+ """
+ is_pypy = platform.python_implementation() == 'PyPy'
+ return val + is_pypy
diff --git a/third_party/python/importlib_metadata/importlib_metadata/_functools.py b/third_party/python/importlib_metadata/importlib_metadata/_functools.py
new file mode 100644
index 0000000000..71f66bd03c
--- /dev/null
+++ b/third_party/python/importlib_metadata/importlib_metadata/_functools.py
@@ -0,0 +1,104 @@
+import types
+import functools
+
+
+# from jaraco.functools 3.3
+def method_cache(method, cache_wrapper=None):
+ """
+ Wrap lru_cache to support storing the cache data in the object instances.
+
+ Abstracts the common paradigm where the method explicitly saves an
+ underscore-prefixed protected property on first call and returns that
+ subsequently.
+
+ >>> class MyClass:
+ ... calls = 0
+ ...
+ ... @method_cache
+ ... def method(self, value):
+ ... self.calls += 1
+ ... return value
+
+ >>> a = MyClass()
+ >>> a.method(3)
+ 3
+ >>> for x in range(75):
+ ... res = a.method(x)
+ >>> a.calls
+ 75
+
+ Note that the apparent behavior will be exactly like that of lru_cache
+ except that the cache is stored on each instance, so values in one
+ instance will not flush values from another, and when an instance is
+ deleted, so are the cached values for that instance.
+
+ >>> b = MyClass()
+ >>> for x in range(35):
+ ... res = b.method(x)
+ >>> b.calls
+ 35
+ >>> a.method(0)
+ 0
+ >>> a.calls
+ 75
+
+ Note that if method had been decorated with ``functools.lru_cache()``,
+ a.calls would have been 76 (due to the cached value of 0 having been
+ flushed by the 'b' instance).
+
+ Clear the cache with ``.cache_clear()``
+
+ >>> a.method.cache_clear()
+
+ Same for a method that hasn't yet been called.
+
+ >>> c = MyClass()
+ >>> c.method.cache_clear()
+
+ Another cache wrapper may be supplied:
+
+ >>> cache = functools.lru_cache(maxsize=2)
+ >>> MyClass.method2 = method_cache(lambda self: 3, cache_wrapper=cache)
+ >>> a = MyClass()
+ >>> a.method2()
+ 3
+
+ Caution - do not subsequently wrap the method with another decorator, such
+ as ``@property``, which changes the semantics of the function.
+
+ See also
+ http://code.activestate.com/recipes/577452-a-memoize-decorator-for-instance-methods/
+ for another implementation and additional justification.
+ """
+ cache_wrapper = cache_wrapper or functools.lru_cache()
+
+ def wrapper(self, *args, **kwargs):
+ # it's the first call, replace the method with a cached, bound method
+ bound_method = types.MethodType(method, self)
+ cached_method = cache_wrapper(bound_method)
+ setattr(self, method.__name__, cached_method)
+ return cached_method(*args, **kwargs)
+
+ # Support cache clear even before cache has been created.
+ wrapper.cache_clear = lambda: None
+
+ return wrapper
+
+
+# From jaraco.functools 3.3
+def pass_none(func):
+ """
+ Wrap func so it's not called if its first param is None
+
+ >>> print_text = pass_none(print)
+ >>> print_text('text')
+ text
+ >>> print_text(None)
+ """
+
+ @functools.wraps(func)
+ def wrapper(param, *args, **kwargs):
+ if param is not None:
+ return func(param, *args, **kwargs)
+
+ return wrapper
diff --git a/third_party/python/importlib_metadata/importlib_metadata/_itertools.py b/third_party/python/importlib_metadata/importlib_metadata/_itertools.py
new file mode 100644
index 0000000000..d4ca9b9140
--- /dev/null
+++ b/third_party/python/importlib_metadata/importlib_metadata/_itertools.py
@@ -0,0 +1,73 @@
+from itertools import filterfalse
+
+
+def unique_everseen(iterable, key=None):
+ "List unique elements, preserving order. Remember all elements ever seen."
+ # unique_everseen('AAAABBBCCDAABBB') --> A B C D
+ # unique_everseen('ABBCcAD', str.lower) --> A B C D
+ seen = set()
+ seen_add = seen.add
+ if key is None:
+ for element in filterfalse(seen.__contains__, iterable):
+ seen_add(element)
+ yield element
+ else:
+ for element in iterable:
+ k = key(element)
+ if k not in seen:
+ seen_add(k)
+ yield element
+
+
+# copied from more_itertools 8.8
+def always_iterable(obj, base_type=(str, bytes)):
+ """If *obj* is iterable, return an iterator over its items::
+
+ >>> obj = (1, 2, 3)
+ >>> list(always_iterable(obj))
+ [1, 2, 3]
+
+ If *obj* is not iterable, return a one-item iterable containing *obj*::
+
+ >>> obj = 1
+ >>> list(always_iterable(obj))
+ [1]
+
+ If *obj* is ``None``, return an empty iterable:
+
+ >>> obj = None
+ >>> list(always_iterable(None))
+ []
+
+ By default, binary and text strings are not considered iterable::
+
+ >>> obj = 'foo'
+ >>> list(always_iterable(obj))
+ ['foo']
+
+ If *base_type* is set, objects for which ``isinstance(obj, base_type)``
+ returns ``True`` won't be considered iterable.
+
+ >>> obj = {'a': 1}
+ >>> list(always_iterable(obj)) # Iterate over the dict's keys
+ ['a']
+ >>> list(always_iterable(obj, base_type=dict)) # Treat dicts as a unit
+ [{'a': 1}]
+
+ Set *base_type* to ``None`` to avoid any special handling and treat objects
+ Python considers iterable as iterable:
+
+ >>> obj = 'foo'
+ >>> list(always_iterable(obj, base_type=None))
+ ['f', 'o', 'o']
+ """
+ if obj is None:
+ return iter(())
+
+ if (base_type is not None) and isinstance(obj, base_type):
+ return iter((obj,))
+
+ try:
+ return iter(obj)
+ except TypeError:
+ return iter((obj,))
diff --git a/third_party/python/importlib_metadata/importlib_metadata/_meta.py b/third_party/python/importlib_metadata/importlib_metadata/_meta.py
new file mode 100644
index 0000000000..259b15ba19
--- /dev/null
+++ b/third_party/python/importlib_metadata/importlib_metadata/_meta.py
@@ -0,0 +1,49 @@
+from ._compat import Protocol
+from typing import Any, Dict, Iterator, List, TypeVar, Union
+
+
+_T = TypeVar("_T")
+
+
+class PackageMetadata(Protocol):
+ def __len__(self) -> int:
+ ... # pragma: no cover
+
+ def __contains__(self, item: str) -> bool:
+ ... # pragma: no cover
+
+ def __getitem__(self, key: str) -> str:
+ ... # pragma: no cover
+
+ def __iter__(self) -> Iterator[str]:
+ ... # pragma: no cover
+
+ def get_all(self, name: str, failobj: _T = ...) -> Union[List[Any], _T]:
+ """
+ Return all values associated with a possibly multi-valued key.
+ """
+
+ @property
+ def json(self) -> Dict[str, Union[str, List[str]]]:
+ """
+ A JSON-compatible form of the metadata.
+ """
+
+
+class SimplePath(Protocol[_T]):
+ """
+ A minimal subset of pathlib.Path required by PathDistribution.
+ """
+
+ def joinpath(self) -> _T:
+ ... # pragma: no cover
+
+ def __truediv__(self, other: Union[str, _T]) -> _T:
+ ... # pragma: no cover
+
+ @property
+ def parent(self) -> _T:
+ ... # pragma: no cover
+
+ def read_text(self) -> str:
+ ... # pragma: no cover
diff --git a/third_party/python/importlib_metadata/importlib_metadata/_py39compat.py b/third_party/python/importlib_metadata/importlib_metadata/_py39compat.py
new file mode 100644
index 0000000000..cde4558fbb
--- /dev/null
+++ b/third_party/python/importlib_metadata/importlib_metadata/_py39compat.py
@@ -0,0 +1,35 @@
+"""
+Compatibility layer with Python 3.8/3.9
+"""
+from typing import TYPE_CHECKING, Any, Optional
+
+if TYPE_CHECKING: # pragma: no cover
+ # Prevent circular imports on runtime.
+ from . import Distribution, EntryPoint
+else:
+ Distribution = EntryPoint = Any
+
+
+def normalized_name(dist: Distribution) -> Optional[str]:
+ """
+ Honor name normalization for distributions that don't provide ``_normalized_name``.
+ """
+ try:
+ return dist._normalized_name
+ except AttributeError:
+ from . import Prepared # -> delay to prevent circular imports.
+
+ return Prepared.normalize(getattr(dist, "name", None) or dist.metadata['Name'])
+
+
+def ep_matches(ep: EntryPoint, **params) -> bool:
+ """
+ Workaround for ``EntryPoint`` objects without the ``matches`` method.
+ """
+ try:
+ return ep.matches(**params)
+ except AttributeError:
+ from . import EntryPoint # -> delay to prevent circular imports.
+
+ # Reconstruct the EntryPoint object to make sure it is compatible.
+ return EntryPoint(ep.name, ep.value, ep.group).matches(**params)
diff --git a/third_party/python/importlib_metadata/importlib_metadata/_text.py b/third_party/python/importlib_metadata/importlib_metadata/_text.py
new file mode 100644
index 0000000000..c88cfbb234
--- /dev/null
+++ b/third_party/python/importlib_metadata/importlib_metadata/_text.py
@@ -0,0 +1,99 @@
+import re
+
+from ._functools import method_cache
+
+
+# from jaraco.text 3.5
+class FoldedCase(str):
+ """
+ A case insensitive string class; behaves just like str
+ except compares equal when the only variation is case.
+
+ >>> s = FoldedCase('hello world')
+
+ >>> s == 'Hello World'
+ True
+
+ >>> 'Hello World' == s
+ True
+
+ >>> s != 'Hello World'
+ False
+
+ >>> s.index('O')
+ 4
+
+ >>> s.split('O')
+ ['hell', ' w', 'rld']
+
+ >>> sorted(map(FoldedCase, ['GAMMA', 'alpha', 'Beta']))
+ ['alpha', 'Beta', 'GAMMA']
+
+ Sequence membership is straightforward.
+
+ >>> "Hello World" in [s]
+ True
+ >>> s in ["Hello World"]
+ True
+
+ You may test for set inclusion, but candidate and elements
+ must both be folded.
+
+ >>> FoldedCase("Hello World") in {s}
+ True
+ >>> s in {FoldedCase("Hello World")}
+ True
+
+ String inclusion works as long as the FoldedCase object
+ is on the right.
+
+ >>> "hello" in FoldedCase("Hello World")
+ True
+
+ But not if the FoldedCase object is on the left:
+
+ >>> FoldedCase('hello') in 'Hello World'
+ False
+
+ In that case, use in_:
+
+ >>> FoldedCase('hello').in_('Hello World')
+ True
+
+ >>> FoldedCase('hello') > FoldedCase('Hello')
+ False
+ """
+
+ def __lt__(self, other):
+ return self.lower() < other.lower()
+
+ def __gt__(self, other):
+ return self.lower() > other.lower()
+
+ def __eq__(self, other):
+ return self.lower() == other.lower()
+
+ def __ne__(self, other):
+ return self.lower() != other.lower()
+
+ def __hash__(self):
+ return hash(self.lower())
+
+ def __contains__(self, other):
+ return super().lower().__contains__(other.lower())
+
+ def in_(self, other):
+ "Does self appear in other?"
+ return self in FoldedCase(other)
+
+ # cache lower since it's likely to be called frequently.
+ @method_cache
+ def lower(self):
+ return super().lower()
+
+ def index(self, sub):
+ return self.lower().index(sub.lower())
+
+ def split(self, splitter=' ', maxsplit=0):
+ pattern = re.compile(re.escape(splitter), re.I)
+ return pattern.split(self, maxsplit)
diff --git a/third_party/python/importlib_metadata/importlib_metadata/py.typed b/third_party/python/importlib_metadata/importlib_metadata/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/importlib_metadata/importlib_metadata/py.typed
diff --git a/third_party/python/importlib_resources/importlib_resources-5.12.0.dist-info/LICENSE b/third_party/python/importlib_resources/importlib_resources-5.12.0.dist-info/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/third_party/python/importlib_resources/importlib_resources-5.12.0.dist-info/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/third_party/python/importlib_resources/importlib_resources-5.12.0.dist-info/METADATA b/third_party/python/importlib_resources/importlib_resources-5.12.0.dist-info/METADATA
new file mode 100644
index 0000000000..038059d306
--- /dev/null
+++ b/third_party/python/importlib_resources/importlib_resources-5.12.0.dist-info/METADATA
@@ -0,0 +1,104 @@
+Metadata-Version: 2.1
+Name: importlib-resources
+Version: 5.12.0
+Summary: Read resources from Python packages
+Home-page: https://github.com/python/importlib_resources
+Author: Barry Warsaw
+Author-email: barry@python.org
+Project-URL: Documentation, https://importlib-resources.readthedocs.io/
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Requires-Python: >=3.7
+License-File: LICENSE
+Requires-Dist: zipp (>=3.1.0) ; python_version < "3.10"
+Provides-Extra: docs
+Requires-Dist: sphinx (>=3.5) ; extra == 'docs'
+Requires-Dist: jaraco.packaging (>=9) ; extra == 'docs'
+Requires-Dist: rst.linker (>=1.9) ; extra == 'docs'
+Requires-Dist: furo ; extra == 'docs'
+Requires-Dist: sphinx-lint ; extra == 'docs'
+Requires-Dist: jaraco.tidelift (>=1.4) ; extra == 'docs'
+Provides-Extra: testing
+Requires-Dist: pytest (>=6) ; extra == 'testing'
+Requires-Dist: pytest-checkdocs (>=2.4) ; extra == 'testing'
+Requires-Dist: flake8 (<5) ; extra == 'testing'
+Requires-Dist: pytest-cov ; extra == 'testing'
+Requires-Dist: pytest-enabler (>=1.3) ; extra == 'testing'
+Requires-Dist: pytest-black (>=0.3.7) ; (platform_python_implementation != "PyPy") and extra == 'testing'
+Requires-Dist: pytest-mypy (>=0.9.1) ; (platform_python_implementation != "PyPy") and extra == 'testing'
+Requires-Dist: pytest-flake8 ; (python_version < "3.12") and extra == 'testing'
+
+.. image:: https://img.shields.io/pypi/v/importlib_resources.svg
+ :target: https://pypi.org/project/importlib_resources
+
+.. image:: https://img.shields.io/pypi/pyversions/importlib_resources.svg
+
+.. image:: https://github.com/python/importlib_resources/workflows/tests/badge.svg
+ :target: https://github.com/python/importlib_resources/actions?query=workflow%3A%22tests%22
+ :alt: tests
+
+.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
+ :target: https://github.com/psf/black
+ :alt: Code style: Black
+
+.. image:: https://readthedocs.org/projects/importlib-resources/badge/?version=latest
+ :target: https://importlib-resources.readthedocs.io/en/latest/?badge=latest
+
+.. image:: https://img.shields.io/badge/skeleton-2023-informational
+ :target: https://blog.jaraco.com/skeleton
+
+.. image:: https://tidelift.com/badges/package/pypi/importlib-resources
+ :target: https://tidelift.com/subscription/pkg/pypi-importlib-resources?utm_source=pypi-importlib-resources&utm_medium=readme
+
+``importlib_resources`` is a backport of Python standard library
+`importlib.resources
+<https://docs.python.org/3/library/importlib.html#module-importlib.resources>`_
+module for older Pythons.
+
+The key goal of this module is to replace parts of `pkg_resources
+<https://setuptools.readthedocs.io/en/latest/pkg_resources.html>`_ with a
+solution in Python's stdlib that relies on well-defined APIs. This makes
+reading resources included in packages easier, with more stable and consistent
+semantics.
+
+Compatibility
+=============
+
+New features are introduced in this third-party library and later merged
+into CPython. The following table indicates which versions of this library
+were contributed to different versions in the standard library:
+
+.. list-table::
+ :header-rows: 1
+
+ * - importlib_resources
+ - stdlib
+ * - 5.9
+ - 3.12
+ * - 5.7
+ - 3.11
+ * - 5.0
+ - 3.10
+ * - 1.3
+ - 3.9
+ * - 0.5 (?)
+ - 3.7
+
+For Enterprise
+==============
+
+Available as part of the Tidelift Subscription.
+
+This project and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use.
+
+`Learn more <https://tidelift.com/subscription/pkg/pypi-importlib-resources?utm_source=pypi-importlib-resources&utm_medium=referral&utm_campaign=github>`_.
+
+Security Contact
+================
+
+To report a security vulnerability, please use the
+`Tidelift security contact <https://tidelift.com/security>`_.
+Tidelift will coordinate the fix and disclosure.
diff --git a/third_party/python/importlib_resources/importlib_resources-5.12.0.dist-info/RECORD b/third_party/python/importlib_resources/importlib_resources-5.12.0.dist-info/RECORD
new file mode 100644
index 0000000000..2babad09e7
--- /dev/null
+++ b/third_party/python/importlib_resources/importlib_resources-5.12.0.dist-info/RECORD
@@ -0,0 +1,48 @@
+importlib_resources/__init__.py,sha256=evPm12kLgYqTm-pbzm60bOuumumT8IpBNWFp0uMyrzE,506
+importlib_resources/_adapters.py,sha256=vprJGbUeHbajX6XCuMP6J3lMrqCi-P_MTlziJUR7jfk,4482
+importlib_resources/_common.py,sha256=jSC4xfLdcMNbtbWHtpzbFkNa0W7kvf__nsYn14C_AEU,5457
+importlib_resources/_compat.py,sha256=4oDJPpo63eH_3l5BkBHmkjAQW4HGs5qvYd2-ziLA_ck,2935
+importlib_resources/_itertools.py,sha256=eDisV6RqiNZOogLSXf6LOGHOYc79FGgPrKNLzFLmCrU,1277
+importlib_resources/_legacy.py,sha256=0TKdZixxLWA-xwtAZw4HcpqJmj4Xprx1Zkcty0gTRZY,3481
+importlib_resources/abc.py,sha256=Icr2IJ2QtH7vvAB9vC5WRJ9KBoaDyJa7KUs8McuROzo,5140
+importlib_resources/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+importlib_resources/readers.py,sha256=i80n49L2rBAtlB9bU0zAeQHiEXxcdP99-pWR6ED-ypY,4312
+importlib_resources/simple.py,sha256=0__2TQBTQoqkajYmNPt1HxERcReAT6boVKJA328pr04,2576
+importlib_resources/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+importlib_resources/tests/_compat.py,sha256=YTSB0U1R9oADnh6GrQcOCgojxcF_N6H1LklymEWf9SQ,708
+importlib_resources/tests/_path.py,sha256=nkv3ek7D1U898v921rYbldDCtKri2oyYOi3EJqGjEGU,1289
+importlib_resources/tests/test_compatibilty_files.py,sha256=95N_R7aik8cvnE6sBJpsxmP0K5plOWRIJDgbalD-Hpw,3314
+importlib_resources/tests/test_contents.py,sha256=V1Xfk3lqTDdvUsZuV18Kndf0CT_tkM2oEIwk9Vv0rhg,968
+importlib_resources/tests/test_custom.py,sha256=jVYg9idEVdUN6idHUfDDlZ-zDWl56qYNbj5QrcZO76Y,1124
+importlib_resources/tests/test_files.py,sha256=W5XoBWSTr84Ke15UtjqWLet2iUDUyJfQxbST4PDlj2w,3283
+importlib_resources/tests/test_open.py,sha256=9qvdC6Eu2Kn3mh3xDR5HUEQoePSKIecTxU4vnH9veO8,2671
+importlib_resources/tests/test_path.py,sha256=XR5RI7_zndI_Nqw9eHU1tDmSGIo29N1GP8INodPc584,2142
+importlib_resources/tests/test_read.py,sha256=BYdRqZEEJE17NHPArpZW9VsIwMlna1BpHyWkgCvEKWk,2512
+importlib_resources/tests/test_reader.py,sha256=YS1RHDzSIo7Dy3AhoK7sY-cFWIFnfkMNfQR3xlXsgio,4990
+importlib_resources/tests/test_resource.py,sha256=cPHz7VLwq6bFznZ-JDYE3f_4VJthQztRHKhiA9SriT0,8270
+importlib_resources/tests/update-zips.py,sha256=x-SrO5v87iLLUMXyefxDwAd3imAs_slI94sLWvJ6N40,1417
+importlib_resources/tests/util.py,sha256=TQz12vSkHNjGlF3hB0OR4kx2sCR-xcj0wI2esDyHR9I,5001
+importlib_resources/tests/data01/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+importlib_resources/tests/data01/binary.file,sha256=BU7ewdAhH2JP7Qy8qdT5QAsOSRxDdCryxbCr6_DJkNg,4
+importlib_resources/tests/data01/utf-16.file,sha256=t5q9qhxX0rYqItBOM8D3ylwG-RHrnOYteTLtQr6sF7g,44
+importlib_resources/tests/data01/utf-8.file,sha256=kwWgYG4yQ-ZF2X_WA66EjYPmxJRn-w8aSOiS9e8tKYY,20
+importlib_resources/tests/data01/subdirectory/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+importlib_resources/tests/data01/subdirectory/binary.file,sha256=BU7ewdAhH2JP7Qy8qdT5QAsOSRxDdCryxbCr6_DJkNg,4
+importlib_resources/tests/data02/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+importlib_resources/tests/data02/one/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+importlib_resources/tests/data02/one/resource1.txt,sha256=10flKac7c-XXFzJ3t-AB5MJjlBy__dSZvPE_dOm2q6U,13
+importlib_resources/tests/data02/subdirectory/subsubdir/resource.txt,sha256=jnrBBztxYrtQck7cmVnc4xQVO4-agzAZDGSFkAWtlFw,10
+importlib_resources/tests/data02/two/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+importlib_resources/tests/data02/two/resource2.txt,sha256=lt2jbN3TMn9QiFKM832X39bU_62UptDdUkoYzkvEbl0,13
+importlib_resources/tests/namespacedata01/binary.file,sha256=BU7ewdAhH2JP7Qy8qdT5QAsOSRxDdCryxbCr6_DJkNg,4
+importlib_resources/tests/namespacedata01/utf-16.file,sha256=t5q9qhxX0rYqItBOM8D3ylwG-RHrnOYteTLtQr6sF7g,44
+importlib_resources/tests/namespacedata01/utf-8.file,sha256=kwWgYG4yQ-ZF2X_WA66EjYPmxJRn-w8aSOiS9e8tKYY,20
+importlib_resources/tests/zipdata01/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+importlib_resources/tests/zipdata01/ziptestdata.zip,sha256=z5Of4dsv3T0t-46B0MsVhxlhsPGMz28aUhJDWpj3_oY,876
+importlib_resources/tests/zipdata02/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+importlib_resources/tests/zipdata02/ziptestdata.zip,sha256=ydI-_j-xgQ7tDxqBp9cjOqXBGxUp6ZBbwVJu6Xj-nrY,698
+importlib_resources-5.12.0.dist-info/LICENSE,sha256=z8d0m5b2O9McPEK1xHG_dWgUBT6EfBDz6wA0F7xSPTA,11358
+importlib_resources-5.12.0.dist-info/METADATA,sha256=uEY10nhKI-5nXImnXgsNt7BDYf7u2Qw8-BO2K2hmlJA,4111
+importlib_resources-5.12.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
+importlib_resources-5.12.0.dist-info/top_level.txt,sha256=fHIjHU1GZwAjvcydpmUnUrTnbvdiWjG4OEVZK8by0TQ,20
+importlib_resources-5.12.0.dist-info/RECORD,,
diff --git a/third_party/python/importlib_resources/importlib_resources-5.12.0.dist-info/WHEEL b/third_party/python/importlib_resources/importlib_resources-5.12.0.dist-info/WHEEL
new file mode 100644
index 0000000000..57e3d840d5
--- /dev/null
+++ b/third_party/python/importlib_resources/importlib_resources-5.12.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.38.4)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/third_party/python/importlib_resources/importlib_resources-5.12.0.dist-info/top_level.txt b/third_party/python/importlib_resources/importlib_resources-5.12.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..58ad1bd333
--- /dev/null
+++ b/third_party/python/importlib_resources/importlib_resources-5.12.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+importlib_resources
diff --git a/third_party/python/importlib_resources/importlib_resources/__init__.py b/third_party/python/importlib_resources/importlib_resources/__init__.py
new file mode 100644
index 0000000000..34e3a9950c
--- /dev/null
+++ b/third_party/python/importlib_resources/importlib_resources/__init__.py
@@ -0,0 +1,36 @@
+"""Read resources contained within a package."""
+
+from ._common import (
+ as_file,
+ files,
+ Package,
+)
+
+from ._legacy import (
+ contents,
+ open_binary,
+ read_binary,
+ open_text,
+ read_text,
+ is_resource,
+ path,
+ Resource,
+)
+
+from .abc import ResourceReader
+
+
+__all__ = [
+ 'Package',
+ 'Resource',
+ 'ResourceReader',
+ 'as_file',
+ 'contents',
+ 'files',
+ 'is_resource',
+ 'open_binary',
+ 'open_text',
+ 'path',
+ 'read_binary',
+ 'read_text',
+]
diff --git a/third_party/python/importlib_resources/importlib_resources/_adapters.py b/third_party/python/importlib_resources/importlib_resources/_adapters.py
new file mode 100644
index 0000000000..50688fbb66
--- /dev/null
+++ b/third_party/python/importlib_resources/importlib_resources/_adapters.py
@@ -0,0 +1,168 @@
+from contextlib import suppress
+from io import TextIOWrapper
+
+from . import abc
+
+
+class SpecLoaderAdapter:
+ """
+ Adapt a package spec to adapt the underlying loader.
+ """
+
+ def __init__(self, spec, adapter=lambda spec: spec.loader):
+ self.spec = spec
+ self.loader = adapter(spec)
+
+ def __getattr__(self, name):
+ return getattr(self.spec, name)
+
+
+class TraversableResourcesLoader:
+ """
+ Adapt a loader to provide TraversableResources.
+ """
+
+ def __init__(self, spec):
+ self.spec = spec
+
+ def get_resource_reader(self, name):
+ return CompatibilityFiles(self.spec)._native()
+
+
+def _io_wrapper(file, mode='r', *args, **kwargs):
+ if mode == 'r':
+ return TextIOWrapper(file, *args, **kwargs)
+ elif mode == 'rb':
+ return file
+ raise ValueError(f"Invalid mode value '{mode}', only 'r' and 'rb' are supported")
+
+
+class CompatibilityFiles:
+ """
+ Adapter for an existing or non-existent resource reader
+ to provide a compatibility .files().
+ """
+
+ class SpecPath(abc.Traversable):
+ """
+ Path tied to a module spec.
+ Can be read and exposes the resource reader children.
+ """
+
+ def __init__(self, spec, reader):
+ self._spec = spec
+ self._reader = reader
+
+ def iterdir(self):
+ if not self._reader:
+ return iter(())
+ return iter(
+ CompatibilityFiles.ChildPath(self._reader, path)
+ for path in self._reader.contents()
+ )
+
+ def is_file(self):
+ return False
+
+ is_dir = is_file
+
+ def joinpath(self, other):
+ if not self._reader:
+ return CompatibilityFiles.OrphanPath(other)
+ return CompatibilityFiles.ChildPath(self._reader, other)
+
+ @property
+ def name(self):
+ return self._spec.name
+
+ def open(self, mode='r', *args, **kwargs):
+ return _io_wrapper(self._reader.open_resource(None), mode, *args, **kwargs)
+
+ class ChildPath(abc.Traversable):
+ """
+ Path tied to a resource reader child.
+ Can be read but doesn't expose any meaningful children.
+ """
+
+ def __init__(self, reader, name):
+ self._reader = reader
+ self._name = name
+
+ def iterdir(self):
+ return iter(())
+
+ def is_file(self):
+ return self._reader.is_resource(self.name)
+
+ def is_dir(self):
+ return not self.is_file()
+
+ def joinpath(self, other):
+ return CompatibilityFiles.OrphanPath(self.name, other)
+
+ @property
+ def name(self):
+ return self._name
+
+ def open(self, mode='r', *args, **kwargs):
+ return _io_wrapper(
+ self._reader.open_resource(self.name), mode, *args, **kwargs
+ )
+
+ class OrphanPath(abc.Traversable):
+ """
+ Orphan path, not tied to a module spec or resource reader.
+ Can't be read and doesn't expose any meaningful children.
+ """
+
+ def __init__(self, *path_parts):
+ if len(path_parts) < 1:
+ raise ValueError('Need at least one path part to construct a path')
+ self._path = path_parts
+
+ def iterdir(self):
+ return iter(())
+
+ def is_file(self):
+ return False
+
+ is_dir = is_file
+
+ def joinpath(self, other):
+ return CompatibilityFiles.OrphanPath(*self._path, other)
+
+ @property
+ def name(self):
+ return self._path[-1]
+
+ def open(self, mode='r', *args, **kwargs):
+ raise FileNotFoundError("Can't open orphan path")
+
+ def __init__(self, spec):
+ self.spec = spec
+
+ @property
+ def _reader(self):
+ with suppress(AttributeError):
+ return self.spec.loader.get_resource_reader(self.spec.name)
+
+ def _native(self):
+ """
+ Return the native reader if it supports files().
+ """
+ reader = self._reader
+ return reader if hasattr(reader, 'files') else self
+
+ def __getattr__(self, attr):
+ return getattr(self._reader, attr)
+
+ def files(self):
+ return CompatibilityFiles.SpecPath(self.spec, self._reader)
+
+
+def wrap_spec(package):
+ """
+ Construct a package spec with traversable compatibility
+ on the spec/loader/reader.
+ """
+ return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
diff --git a/third_party/python/importlib_resources/importlib_resources/_common.py b/third_party/python/importlib_resources/importlib_resources/_common.py
new file mode 100644
index 0000000000..3c6de1cfb2
--- /dev/null
+++ b/third_party/python/importlib_resources/importlib_resources/_common.py
@@ -0,0 +1,207 @@
+import os
+import pathlib
+import tempfile
+import functools
+import contextlib
+import types
+import importlib
+import inspect
+import warnings
+import itertools
+
+from typing import Union, Optional, cast
+from .abc import ResourceReader, Traversable
+
+from ._compat import wrap_spec
+
+Package = Union[types.ModuleType, str]
+Anchor = Package
+
+
+def package_to_anchor(func):
+ """
+ Replace 'package' parameter as 'anchor' and warn about the change.
+
+ Other errors should fall through.
+
+ >>> files('a', 'b')
+ Traceback (most recent call last):
+ TypeError: files() takes from 0 to 1 positional arguments but 2 were given
+ """
+ undefined = object()
+
+ @functools.wraps(func)
+ def wrapper(anchor=undefined, package=undefined):
+ if package is not undefined:
+ if anchor is not undefined:
+ return func(anchor, package)
+ warnings.warn(
+ "First parameter to files is renamed to 'anchor'",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return func(package)
+ elif anchor is undefined:
+ return func()
+ return func(anchor)
+
+ return wrapper
+
+
+@package_to_anchor
+def files(anchor: Optional[Anchor] = None) -> Traversable:
+ """
+ Get a Traversable resource for an anchor.
+ """
+ return from_package(resolve(anchor))
+
+
+def get_resource_reader(package: types.ModuleType) -> Optional[ResourceReader]:
+ """
+ Return the package's loader if it's a ResourceReader.
+ """
+ # We can't use
+ # a issubclass() check here because apparently abc.'s __subclasscheck__()
+ # hook wants to create a weak reference to the object, but
+ # zipimport.zipimporter does not support weak references, resulting in a
+ # TypeError. That seems terrible.
+ spec = package.__spec__
+ reader = getattr(spec.loader, 'get_resource_reader', None) # type: ignore
+ if reader is None:
+ return None
+ return reader(spec.name) # type: ignore
+
+
+@functools.singledispatch
+def resolve(cand: Optional[Anchor]) -> types.ModuleType:
+ return cast(types.ModuleType, cand)
+
+
+@resolve.register
+def _(cand: str) -> types.ModuleType:
+ return importlib.import_module(cand)
+
+
+@resolve.register
+def _(cand: None) -> types.ModuleType:
+ return resolve(_infer_caller().f_globals['__name__'])
+
+
+def _infer_caller():
+ """
+ Walk the stack and find the frame of the first caller not in this module.
+ """
+
+ def is_this_file(frame_info):
+ return frame_info.filename == __file__
+
+ def is_wrapper(frame_info):
+ return frame_info.function == 'wrapper'
+
+ not_this_file = itertools.filterfalse(is_this_file, inspect.stack())
+ # also exclude 'wrapper' due to singledispatch in the call stack
+ callers = itertools.filterfalse(is_wrapper, not_this_file)
+ return next(callers).frame
+
+
+def from_package(package: types.ModuleType):
+ """
+ Return a Traversable object for the given package.
+
+ """
+ spec = wrap_spec(package)
+ reader = spec.loader.get_resource_reader(spec.name)
+ return reader.files()
+
+
+@contextlib.contextmanager
+def _tempfile(
+ reader,
+ suffix='',
+ # gh-93353: Keep a reference to call os.remove() in late Python
+ # finalization.
+ *,
+ _os_remove=os.remove,
+):
+ # Not using tempfile.NamedTemporaryFile as it leads to deeper 'try'
+ # blocks due to the need to close the temporary file to work on Windows
+ # properly.
+ fd, raw_path = tempfile.mkstemp(suffix=suffix)
+ try:
+ try:
+ os.write(fd, reader())
+ finally:
+ os.close(fd)
+ del reader
+ yield pathlib.Path(raw_path)
+ finally:
+ try:
+ _os_remove(raw_path)
+ except FileNotFoundError:
+ pass
+
+
+def _temp_file(path):
+ return _tempfile(path.read_bytes, suffix=path.name)
+
+
+def _is_present_dir(path: Traversable) -> bool:
+ """
+ Some Traversables implement ``is_dir()`` to raise an
+ exception (i.e. ``FileNotFoundError``) when the
+ directory doesn't exist. This function wraps that call
+ to always return a boolean and only return True
+ if there's a dir and it exists.
+ """
+ with contextlib.suppress(FileNotFoundError):
+ return path.is_dir()
+ return False
+
+
+@functools.singledispatch
+def as_file(path):
+ """
+ Given a Traversable object, return that object as a
+ path on the local file system in a context manager.
+ """
+ return _temp_dir(path) if _is_present_dir(path) else _temp_file(path)
+
+
+@as_file.register(pathlib.Path)
+@contextlib.contextmanager
+def _(path):
+ """
+ Degenerate behavior for pathlib.Path objects.
+ """
+ yield path
+
+
+@contextlib.contextmanager
+def _temp_path(dir: tempfile.TemporaryDirectory):
+ """
+ Wrap tempfile.TemporyDirectory to return a pathlib object.
+ """
+ with dir as result:
+ yield pathlib.Path(result)
+
+
+@contextlib.contextmanager
+def _temp_dir(path):
+ """
+ Given a traversable dir, recursively replicate the whole tree
+ to the file system in a context manager.
+ """
+ assert path.is_dir()
+ with _temp_path(tempfile.TemporaryDirectory()) as temp_dir:
+ yield _write_contents(temp_dir, path)
+
+
+def _write_contents(target, source):
+ child = target.joinpath(source.name)
+ if source.is_dir():
+ child.mkdir()
+ for item in source.iterdir():
+ _write_contents(child, item)
+ else:
+ child.write_bytes(source.read_bytes())
+ return child
diff --git a/third_party/python/importlib_resources/importlib_resources/_compat.py b/third_party/python/importlib_resources/importlib_resources/_compat.py
new file mode 100644
index 0000000000..a93a88263b
--- /dev/null
+++ b/third_party/python/importlib_resources/importlib_resources/_compat.py
@@ -0,0 +1,109 @@
+# flake8: noqa
+
+import abc
+import os
+import sys
+import pathlib
+from contextlib import suppress
+from typing import Union
+
+
+if sys.version_info >= (3, 10):
+ from zipfile import Path as ZipPath # type: ignore
+else:
+ from zipp import Path as ZipPath # type: ignore
+
+
+try:
+ from typing import runtime_checkable # type: ignore
+except ImportError:
+
+ def runtime_checkable(cls): # type: ignore
+ return cls
+
+
+try:
+ from typing import Protocol # type: ignore
+except ImportError:
+ Protocol = abc.ABC # type: ignore
+
+
+class TraversableResourcesLoader:
+ """
+ Adapt loaders to provide TraversableResources and other
+ compatibility.
+
+ Used primarily for Python 3.9 and earlier where the native
+ loaders do not yet implement TraversableResources.
+ """
+
+ def __init__(self, spec):
+ self.spec = spec
+
+ @property
+ def path(self):
+ return self.spec.origin
+
+ def get_resource_reader(self, name):
+ from . import readers, _adapters
+
+ def _zip_reader(spec):
+ with suppress(AttributeError):
+ return readers.ZipReader(spec.loader, spec.name)
+
+ def _namespace_reader(spec):
+ with suppress(AttributeError, ValueError):
+ return readers.NamespaceReader(spec.submodule_search_locations)
+
+ def _available_reader(spec):
+ with suppress(AttributeError):
+ return spec.loader.get_resource_reader(spec.name)
+
+ def _native_reader(spec):
+ reader = _available_reader(spec)
+ return reader if hasattr(reader, 'files') else None
+
+ def _file_reader(spec):
+ try:
+ path = pathlib.Path(self.path)
+ except TypeError:
+ return None
+ if path.exists():
+ return readers.FileReader(self)
+
+ return (
+ # local ZipReader if a zip module
+ _zip_reader(self.spec)
+ or
+ # local NamespaceReader if a namespace module
+ _namespace_reader(self.spec)
+ or
+ # local FileReader
+ _file_reader(self.spec)
+ or
+ # native reader if it supplies 'files'
+ _native_reader(self.spec)
+ or
+ # fallback - adapt the spec ResourceReader to TraversableReader
+ _adapters.CompatibilityFiles(self.spec)
+ )
+
+
+def wrap_spec(package):
+ """
+ Construct a package spec with traversable compatibility
+ on the spec/loader/reader.
+
+ Supersedes _adapters.wrap_spec to use TraversableResourcesLoader
+ from above for older Python compatibility (<3.10).
+ """
+ from . import _adapters
+
+ return _adapters.SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader)
+
+
+if sys.version_info >= (3, 9):
+ StrPath = Union[str, os.PathLike[str]]
+else:
+ # PathLike is only subscriptable at runtime in 3.9+
+ StrPath = Union[str, "os.PathLike[str]"]
diff --git a/third_party/python/importlib_resources/importlib_resources/_itertools.py b/third_party/python/importlib_resources/importlib_resources/_itertools.py
new file mode 100644
index 0000000000..7b775ef5ae
--- /dev/null
+++ b/third_party/python/importlib_resources/importlib_resources/_itertools.py
@@ -0,0 +1,38 @@
+# from more_itertools 9.0
+def only(iterable, default=None, too_long=None):
+ """If *iterable* has only one item, return it.
+ If it has zero items, return *default*.
+ If it has more than one item, raise the exception given by *too_long*,
+ which is ``ValueError`` by default.
+ >>> only([], default='missing')
+ 'missing'
+ >>> only([1])
+ 1
+ >>> only([1, 2]) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ ValueError: Expected exactly one item in iterable, but got 1, 2,
+ and perhaps more.'
+ >>> only([1, 2], too_long=TypeError) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ TypeError
+ Note that :func:`only` attempts to advance *iterable* twice to ensure there
+ is only one item. See :func:`spy` or :func:`peekable` to check
+ iterable contents less destructively.
+ """
+ it = iter(iterable)
+ first_value = next(it, default)
+
+ try:
+ second_value = next(it)
+ except StopIteration:
+ pass
+ else:
+ msg = (
+ 'Expected exactly one item in iterable, but got {!r}, {!r}, '
+ 'and perhaps more.'.format(first_value, second_value)
+ )
+ raise too_long or ValueError(msg)
+
+ return first_value
diff --git a/third_party/python/importlib_resources/importlib_resources/_legacy.py b/third_party/python/importlib_resources/importlib_resources/_legacy.py
new file mode 100644
index 0000000000..b1ea8105da
--- /dev/null
+++ b/third_party/python/importlib_resources/importlib_resources/_legacy.py
@@ -0,0 +1,120 @@
+import functools
+import os
+import pathlib
+import types
+import warnings
+
+from typing import Union, Iterable, ContextManager, BinaryIO, TextIO, Any
+
+from . import _common
+
+Package = Union[types.ModuleType, str]
+Resource = str
+
+
+def deprecated(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ warnings.warn(
+ f"{func.__name__} is deprecated. Use files() instead. "
+ "Refer to https://importlib-resources.readthedocs.io"
+ "/en/latest/using.html#migrating-from-legacy for migration advice.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return func(*args, **kwargs)
+
+ return wrapper
+
+
+def normalize_path(path: Any) -> str:
+ """Normalize a path by ensuring it is a string.
+
+ If the resulting string contains path separators, an exception is raised.
+ """
+ str_path = str(path)
+ parent, file_name = os.path.split(str_path)
+ if parent:
+ raise ValueError(f'{path!r} must be only a file name')
+ return file_name
+
+
+@deprecated
+def open_binary(package: Package, resource: Resource) -> BinaryIO:
+ """Return a file-like object opened for binary reading of the resource."""
+ return (_common.files(package) / normalize_path(resource)).open('rb')
+
+
+@deprecated
+def read_binary(package: Package, resource: Resource) -> bytes:
+ """Return the binary contents of the resource."""
+ return (_common.files(package) / normalize_path(resource)).read_bytes()
+
+
+@deprecated
+def open_text(
+ package: Package,
+ resource: Resource,
+ encoding: str = 'utf-8',
+ errors: str = 'strict',
+) -> TextIO:
+ """Return a file-like object opened for text reading of the resource."""
+ return (_common.files(package) / normalize_path(resource)).open(
+ 'r', encoding=encoding, errors=errors
+ )
+
+
+@deprecated
+def read_text(
+ package: Package,
+ resource: Resource,
+ encoding: str = 'utf-8',
+ errors: str = 'strict',
+) -> str:
+ """Return the decoded string of the resource.
+
+ The decoding-related arguments have the same semantics as those of
+ bytes.decode().
+ """
+ with open_text(package, resource, encoding, errors) as fp:
+ return fp.read()
+
+
+@deprecated
+def contents(package: Package) -> Iterable[str]:
+ """Return an iterable of entries in `package`.
+
+ Note that not all entries are resources. Specifically, directories are
+ not considered resources. Use `is_resource()` on each entry returned here
+ to check if it is a resource or not.
+ """
+ return [path.name for path in _common.files(package).iterdir()]
+
+
+@deprecated
+def is_resource(package: Package, name: str) -> bool:
+ """True if `name` is a resource inside `package`.
+
+ Directories are *not* resources.
+ """
+ resource = normalize_path(name)
+ return any(
+ traversable.name == resource and traversable.is_file()
+ for traversable in _common.files(package).iterdir()
+ )
+
+
+@deprecated
+def path(
+ package: Package,
+ resource: Resource,
+) -> ContextManager[pathlib.Path]:
+ """A context manager providing a file path object to the resource.
+
+ If the resource does not already exist on its own on the file system,
+ a temporary file will be created. If the file was created, the file
+ will be deleted upon exiting the context manager (no exception is
+ raised if the file was deleted prior to the context manager
+ exiting).
+ """
+ return _common.as_file(_common.files(package) / normalize_path(resource))
diff --git a/third_party/python/importlib_resources/importlib_resources/abc.py b/third_party/python/importlib_resources/importlib_resources/abc.py
new file mode 100644
index 0000000000..23b6aeafe4
--- /dev/null
+++ b/third_party/python/importlib_resources/importlib_resources/abc.py
@@ -0,0 +1,170 @@
+import abc
+import io
+import itertools
+import pathlib
+from typing import Any, BinaryIO, Iterable, Iterator, NoReturn, Text, Optional
+
+from ._compat import runtime_checkable, Protocol, StrPath
+
+
+__all__ = ["ResourceReader", "Traversable", "TraversableResources"]
+
+
+class ResourceReader(metaclass=abc.ABCMeta):
+ """Abstract base class for loaders to provide resource reading support."""
+
+ @abc.abstractmethod
+ def open_resource(self, resource: Text) -> BinaryIO:
+ """Return an opened, file-like object for binary reading.
+
+ The 'resource' argument is expected to represent only a file name.
+ If the resource cannot be found, FileNotFoundError is raised.
+ """
+ # This deliberately raises FileNotFoundError instead of
+ # NotImplementedError so that if this method is accidentally called,
+ # it'll still do the right thing.
+ raise FileNotFoundError
+
+ @abc.abstractmethod
+ def resource_path(self, resource: Text) -> Text:
+ """Return the file system path to the specified resource.
+
+ The 'resource' argument is expected to represent only a file name.
+ If the resource does not exist on the file system, raise
+ FileNotFoundError.
+ """
+ # This deliberately raises FileNotFoundError instead of
+ # NotImplementedError so that if this method is accidentally called,
+ # it'll still do the right thing.
+ raise FileNotFoundError
+
+ @abc.abstractmethod
+ def is_resource(self, path: Text) -> bool:
+ """Return True if the named 'path' is a resource.
+
+ Files are resources, directories are not.
+ """
+ raise FileNotFoundError
+
+ @abc.abstractmethod
+ def contents(self) -> Iterable[str]:
+ """Return an iterable of entries in `package`."""
+ raise FileNotFoundError
+
+
+class TraversalError(Exception):
+ pass
+
+
+@runtime_checkable
+class Traversable(Protocol):
+ """
+ An object with a subset of pathlib.Path methods suitable for
+ traversing directories and opening files.
+
+ Any exceptions that occur when accessing the backing resource
+ may propagate unaltered.
+ """
+
+ @abc.abstractmethod
+ def iterdir(self) -> Iterator["Traversable"]:
+ """
+ Yield Traversable objects in self
+ """
+
+ def read_bytes(self) -> bytes:
+ """
+ Read contents of self as bytes
+ """
+ with self.open('rb') as strm:
+ return strm.read()
+
+ def read_text(self, encoding: Optional[str] = None) -> str:
+ """
+ Read contents of self as text
+ """
+ with self.open(encoding=encoding) as strm:
+ return strm.read()
+
+ @abc.abstractmethod
+ def is_dir(self) -> bool:
+ """
+ Return True if self is a directory
+ """
+
+ @abc.abstractmethod
+ def is_file(self) -> bool:
+ """
+ Return True if self is a file
+ """
+
+ def joinpath(self, *descendants: StrPath) -> "Traversable":
+ """
+ Return Traversable resolved with any descendants applied.
+
+ Each descendant should be a path segment relative to self
+ and each may contain multiple levels separated by
+ ``posixpath.sep`` (``/``).
+ """
+ if not descendants:
+ return self
+ names = itertools.chain.from_iterable(
+ path.parts for path in map(pathlib.PurePosixPath, descendants)
+ )
+ target = next(names)
+ matches = (
+ traversable for traversable in self.iterdir() if traversable.name == target
+ )
+ try:
+ match = next(matches)
+ except StopIteration:
+ raise TraversalError(
+ "Target not found during traversal.", target, list(names)
+ )
+ return match.joinpath(*names)
+
+ def __truediv__(self, child: StrPath) -> "Traversable":
+ """
+ Return Traversable child in self
+ """
+ return self.joinpath(child)
+
+ @abc.abstractmethod
+ def open(self, mode='r', *args, **kwargs):
+ """
+ mode may be 'r' or 'rb' to open as text or binary. Return a handle
+ suitable for reading (same as pathlib.Path.open).
+
+ When opening as text, accepts encoding parameters such as those
+ accepted by io.TextIOWrapper.
+ """
+
+ @property
+ @abc.abstractmethod
+ def name(self) -> str:
+ """
+ The base name of this object without any parent references.
+ """
+
+
+class TraversableResources(ResourceReader):
+ """
+ The required interface for providing traversable
+ resources.
+ """
+
+ @abc.abstractmethod
+ def files(self) -> "Traversable":
+ """Return a Traversable object for the loaded package."""
+
+ def open_resource(self, resource: StrPath) -> io.BufferedReader:
+ return self.files().joinpath(resource).open('rb')
+
+ def resource_path(self, resource: Any) -> NoReturn:
+ raise FileNotFoundError(resource)
+
+ def is_resource(self, path: StrPath) -> bool:
+ return self.files().joinpath(path).is_file()
+
+ def contents(self) -> Iterator[str]:
+ return (item.name for item in self.files().iterdir())
diff --git a/third_party/python/importlib_resources/importlib_resources/py.typed b/third_party/python/importlib_resources/importlib_resources/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/importlib_resources/importlib_resources/py.typed
diff --git a/third_party/python/importlib_resources/importlib_resources/readers.py b/third_party/python/importlib_resources/importlib_resources/readers.py
new file mode 100644
index 0000000000..51d030a654
--- /dev/null
+++ b/third_party/python/importlib_resources/importlib_resources/readers.py
@@ -0,0 +1,144 @@
+import collections
+import itertools
+import pathlib
+import operator
+
+from . import abc
+
+from ._itertools import only
+from ._compat import ZipPath
+
+
+def remove_duplicates(items):
+ return iter(collections.OrderedDict.fromkeys(items))
+
+
+class FileReader(abc.TraversableResources):
+ def __init__(self, loader):
+ self.path = pathlib.Path(loader.path).parent
+
+ def resource_path(self, resource):
+ """
+ Return the file system path to prevent
+ `resources.path()` from creating a temporary
+ copy.
+ """
+ return str(self.path.joinpath(resource))
+
+ def files(self):
+ return self.path
+
+
+class ZipReader(abc.TraversableResources):
+ def __init__(self, loader, module):
+ _, _, name = module.rpartition('.')
+ self.prefix = loader.prefix.replace('\\', '/') + name + '/'
+ self.archive = loader.archive
+
+ def open_resource(self, resource):
+ try:
+ return super().open_resource(resource)
+ except KeyError as exc:
+ raise FileNotFoundError(exc.args[0])
+
+ def is_resource(self, path):
+ """
+ Workaround for `zipfile.Path.is_file` returning true
+ for non-existent paths.
+ """
+ target = self.files().joinpath(path)
+ return target.is_file() and target.exists()
+
+ def files(self):
+ return ZipPath(self.archive, self.prefix)
+
+
+class MultiplexedPath(abc.Traversable):
+ """
+ Given a series of Traversable objects, implement a merged
+ version of the interface across all objects. Useful for
+ namespace packages which may be multihomed at a single
+ name.
+ """
+
+ def __init__(self, *paths):
+ self._paths = list(map(pathlib.Path, remove_duplicates(paths)))
+ if not self._paths:
+ message = 'MultiplexedPath must contain at least one path'
+ raise FileNotFoundError(message)
+ if not all(path.is_dir() for path in self._paths):
+ raise NotADirectoryError('MultiplexedPath only supports directories')
+
+ def iterdir(self):
+ children = (child for path in self._paths for child in path.iterdir())
+ by_name = operator.attrgetter('name')
+ groups = itertools.groupby(sorted(children, key=by_name), key=by_name)
+ return map(self._follow, (locs for name, locs in groups))
+
+ def read_bytes(self):
+ raise FileNotFoundError(f'{self} is not a file')
+
+ def read_text(self, *args, **kwargs):
+ raise FileNotFoundError(f'{self} is not a file')
+
+ def is_dir(self):
+ return True
+
+ def is_file(self):
+ return False
+
+ def joinpath(self, *descendants):
+ try:
+ return super().joinpath(*descendants)
+ except abc.TraversalError:
+ # One of the paths did not resolve (a directory does not exist).
+ # Just return something that will not exist.
+ return self._paths[0].joinpath(*descendants)
+
+ @classmethod
+ def _follow(cls, children):
+ """
+ Construct a MultiplexedPath if needed.
+
+ If children contains a sole element, return it.
+ Otherwise, return a MultiplexedPath of the items.
+ Unless one of the items is not a Directory, then return the first.
+ """
+ subdirs, one_dir, one_file = itertools.tee(children, 3)
+
+ try:
+ return only(one_dir)
+ except ValueError:
+ try:
+ return cls(*subdirs)
+ except NotADirectoryError:
+ return next(one_file)
+
+ def open(self, *args, **kwargs):
+ raise FileNotFoundError(f'{self} is not a file')
+
+ @property
+ def name(self):
+ return self._paths[0].name
+
+ def __repr__(self):
+ paths = ', '.join(f"'{path}'" for path in self._paths)
+ return f'MultiplexedPath({paths})'
+
+
+class NamespaceReader(abc.TraversableResources):
+ def __init__(self, namespace_path):
+ if 'NamespacePath' not in str(namespace_path):
+ raise ValueError('Invalid path')
+ self.path = MultiplexedPath(*list(namespace_path))
+
+ def resource_path(self, resource):
+ """
+ Return the file system path to prevent
+ `resources.path()` from creating a temporary
+ copy.
+ """
+ return str(self.path.joinpath(resource))
+
+ def files(self):
+ return self.path
diff --git a/third_party/python/importlib_resources/importlib_resources/simple.py b/third_party/python/importlib_resources/importlib_resources/simple.py
new file mode 100644
index 0000000000..7770c922c8
--- /dev/null
+++ b/third_party/python/importlib_resources/importlib_resources/simple.py
@@ -0,0 +1,106 @@
+"""
+Interface adapters for low-level readers.
+"""
+
+import abc
+import io
+import itertools
+from typing import BinaryIO, List
+
+from .abc import Traversable, TraversableResources
+
+
+class SimpleReader(abc.ABC):
+ """
+ The minimum, low-level interface required from a resource
+ provider.
+ """
+
+ @property
+ @abc.abstractmethod
+ def package(self) -> str:
+ """
+ The name of the package for which this reader loads resources.
+ """
+
+ @abc.abstractmethod
+ def children(self) -> List['SimpleReader']:
+ """
+ Obtain an iterable of SimpleReader for available
+ child containers (e.g. directories).
+ """
+
+ @abc.abstractmethod
+ def resources(self) -> List[str]:
+ """
+ Obtain available named resources for this virtual package.
+ """
+
+ @abc.abstractmethod
+ def open_binary(self, resource: str) -> BinaryIO:
+ """
+ Obtain a File-like for a named resource.
+ """
+
+ @property
+ def name(self):
+ return self.package.split('.')[-1]
+
+
+class ResourceContainer(Traversable):
+ """
+ Traversable container for a package's resources via its reader.
+ """
+
+ def __init__(self, reader: SimpleReader):
+ self.reader = reader
+
+ def is_dir(self):
+ return True
+
+ def is_file(self):
+ return False
+
+ def iterdir(self):
+ files = (ResourceHandle(self, name) for name in self.reader.resources)
+ dirs = map(ResourceContainer, self.reader.children())
+ return itertools.chain(files, dirs)
+
+ def open(self, *args, **kwargs):
+ raise IsADirectoryError()
+
+
+class ResourceHandle(Traversable):
+ """
+ Handle to a named resource in a ResourceReader.
+ """
+
+ def __init__(self, parent: ResourceContainer, name: str):
+ self.parent = parent
+ self.name = name # type: ignore
+
+ def is_file(self):
+ return True
+
+ def is_dir(self):
+ return False
+
+ def open(self, mode='r', *args, **kwargs):
+ stream = self.parent.reader.open_binary(self.name)
+ if 'b' not in mode:
+ stream = io.TextIOWrapper(*args, **kwargs)
+ return stream
+
+ def joinpath(self, name):
+ raise RuntimeError("Cannot traverse into a resource")
+
+
+class TraversableReader(TraversableResources, SimpleReader):
+ """
+ A TraversableResources based on SimpleReader. Resource providers
+ may derive from this class to provide the TraversableResources
+ interface by supplying the SimpleReader interface.
+ """
+
+ def files(self):
+ return ResourceContainer(self)
diff --git a/third_party/python/jinxed/jinxed-1.2.0.dist-info/LICENSE b/third_party/python/jinxed/jinxed-1.2.0.dist-info/LICENSE
new file mode 100644
index 0000000000..a612ad9813
--- /dev/null
+++ b/third_party/python/jinxed/jinxed-1.2.0.dist-info/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/third_party/python/jinxed/jinxed-1.2.0.dist-info/METADATA b/third_party/python/jinxed/jinxed-1.2.0.dist-info/METADATA
new file mode 100644
index 0000000000..ae98a1b6fb
--- /dev/null
+++ b/third_party/python/jinxed/jinxed-1.2.0.dist-info/METADATA
@@ -0,0 +1,112 @@
+Metadata-Version: 2.1
+Name: jinxed
+Version: 1.2.0
+Summary: Jinxed Terminal Library
+Home-page: https://github.com/Rockhopper-Technologies/jinxed
+Author: Avram Lubkin
+Author-email: avylove@rockhopper.net
+Maintainer: Avram Lubkin
+Maintainer-email: avylove@rockhopper.net
+License: MPLv2.0
+Keywords: terminal console blessed curses
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Terminals
+License-File: LICENSE
+Requires-Dist: ansicon ; platform_system == "Windows"
+
+.. start-badges
+
+| |docs| |appveyor| |travis| |codecov|
+| |pypi| |supported-versions| |supported-implementations|
+| |linux| |windows| |mac| |bsd|
+
+.. |docs| image:: https://img.shields.io/readthedocs/jinxed.svg?style=plastic&logo=read-the-docs
+ :target: https://jinxed.readthedocs.org
+ :alt: Documentation Status
+
+.. |appveyor| image:: https://img.shields.io/appveyor/ci/Rockhopper-Technologies/jinxed.svg?style=plastic&logo=appveyor
+ :target: https://ci.appveyor.com/project/Rockhopper-Technologies/jinxed
+ :alt: Appveyor Build Status
+
+.. |travis| image:: https://img.shields.io/travis/com/Rockhopper-Technologies/jinxed.svg?style=plastic&logo=travis
+ :target: https://travis-ci.com/Rockhopper-Technologies/jinxed
+ :alt: Travis-CI Build Status
+
+.. |codecov| image:: https://img.shields.io/codecov/c/github/Rockhopper-Technologies/jinxed.svg?style=plastic&logo=codecov
+ :target: https://codecov.io/gh/Rockhopper-Technologies/jinxed
+ :alt: Coverage Status
+
+.. |pypi| image:: https://img.shields.io/pypi/v/jinxed.svg?style=plastic&logo=pypi
+ :alt: PyPI Package latest release
+ :target: https://pypi.python.org/pypi/jinxed
+
+.. |supported-versions| image:: https://img.shields.io/pypi/pyversions/jinxed.svg?style=plastic&logo=pypi
+ :alt: Supported versions
+ :target: https://pypi.python.org/pypi/jinxed
+
+.. |supported-implementations| image:: https://img.shields.io/pypi/implementation/jinxed.svg?style=plastic&logo=pypi
+ :alt: Supported implementations
+ :target: https://pypi.python.org/pypi/jinxed
+
+.. |linux| image:: https://img.shields.io/badge/Linux-yes-success?style=plastic&logo=linux
+ :alt: Linux supported
+ :target: https://pypi.python.org/pypi/jinxed
+
+.. |windows| image:: https://img.shields.io/badge/Windows-yes-success?style=plastic&logo=windows
+ :alt: Windows supported
+ :target: https://pypi.python.org/pypi/jinxed
+
+.. |mac| image:: https://img.shields.io/badge/MacOS-yes-success?style=plastic&logo=apple
+ :alt: MacOS supported
+ :target: https://pypi.python.org/pypi/jinxed
+
+.. |bsd| image:: https://img.shields.io/badge/BSD-yes-success?style=plastic&logo=freebsd
+ :alt: BSD supported
+ :target: https://pypi.python.org/pypi/jinxed
+
+.. end-badges
+
+
+Overview
+========
+
+Jinxed is an implementation of a subset of the Python curses_ library.
+It provides pure Python implementations of terminfo functions such as `tigetstr()`_
+and `tparm()`_ as well as convenience methods for working with Windows terminals.
+
+Jinxed was initially written to support Blessed_ on Windows, but will work on all platforms.
+
+
+Installation
+============
+
+.. code-block:: console
+
+ $ pip install jinxed
+
+
+Documentation
+=============
+
+Jinxed documentation can be found on `Read the Docs <https://jinxed.readthedocs.io/en/stable/>`_.
+
+.. _Blessed: https://pypi.org/project/blessed
+.. _curses: https://docs.python.org/library/curses.html
+.. _tigetstr(): https://docs.python.org/library/curses.html#curses.tigetstr
+.. _tparm(): https://docs.python.org/library/curses.html#curses.tparm
+
diff --git a/third_party/python/jinxed/jinxed-1.2.0.dist-info/RECORD b/third_party/python/jinxed/jinxed-1.2.0.dist-info/RECORD
new file mode 100644
index 0000000000..2929aa63a9
--- /dev/null
+++ b/third_party/python/jinxed/jinxed-1.2.0.dist-info/RECORD
@@ -0,0 +1,18 @@
+jinxed/__init__.py,sha256=SiIICY1hCXKflXonUVMsB6tnX0HlBOqmTvStTGJlkWU,1047
+jinxed/_keys.py,sha256=2UeEOVCPBgy5fv6HKaW_TxmHI03QwqEv_TiKGB5lSCI,2535
+jinxed/_terminal.py,sha256=kdYG-8E9rIgMCXxzEVA19-q01EYm2TQWxvr5vCPnnyY,3330
+jinxed/_tparm.py,sha256=sn1P8_4VsSsgHYgco1va-Bk2eQTColtPTj3aAa3QR7A,8833
+jinxed/_util.py,sha256=kcxjcHhGX7cqrlYlBYc03dsez3RCx899OzPgQ_L4jmE,1462
+jinxed/has_key.py,sha256=J9nU62s2KZcndOud1b8x_B3uBsZonGfqPLWan9Kh5Jw,4269
+jinxed/win32.py,sha256=oIpfwPRAAq4kBJGXA5pnaapaYW7ubO72sF2HEZeY2uM,10256
+jinxed/terminfo/__init__.py,sha256=63dZbYG1TkSJwWbm_2rEfmBjMalOO-gBj1pHEvCaTNg,5088
+jinxed/terminfo/ansicon.py,sha256=X3dLufLBpFwX8ouKJMt7Ia3Xu7rCKxKI9pQEYFlAD5E,4313
+jinxed/terminfo/vtwin10.py,sha256=W4sqWtH0p-lzd-5u0q_wkePNhbKtJX_UeDksPBnYp5o,2057
+jinxed/terminfo/xterm.py,sha256=NIPuVWIWvhF4ClQxewGlRnVxBKq2j1iV1knKdG7WA1I,30525
+jinxed/terminfo/xterm_256color.py,sha256=Xi6I7LbIy2F4hmpzXf51YK1utdWWKUNqEWlpHb39isM,792
+jinxed/terminfo/xterm_256colors.py,sha256=02ci_cybpc_qNw-guktEth-JduVTspDCats4QaEtOjQ,793
+jinxed-1.2.0.dist-info/LICENSE,sha256=HyVuytGSiAUQ6ErWBHTqt1iSGHhLmlC8fO7jTCuR8dU,16725
+jinxed-1.2.0.dist-info/METADATA,sha256=pXE3x-jb9pFccBO8qpKUp2HSbBc_JmNoJ4XfcOVPxgQ,4272
+jinxed-1.2.0.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110
+jinxed-1.2.0.dist-info/top_level.txt,sha256=B6kZZ8ObbPHjOIuhpS6zxE9lGfnpHp5KthpsDuXIXR0,7
+jinxed-1.2.0.dist-info/RECORD,,
diff --git a/third_party/python/jinxed/jinxed-1.2.0.dist-info/WHEEL b/third_party/python/jinxed/jinxed-1.2.0.dist-info/WHEEL
new file mode 100644
index 0000000000..01b8fc7d4a
--- /dev/null
+++ b/third_party/python/jinxed/jinxed-1.2.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.36.2)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/jinxed/jinxed-1.2.0.dist-info/top_level.txt b/third_party/python/jinxed/jinxed-1.2.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..6f38c7da7f
--- /dev/null
+++ b/third_party/python/jinxed/jinxed-1.2.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+jinxed
diff --git a/third_party/python/jinxed/jinxed/__init__.py b/third_party/python/jinxed/jinxed/__init__.py
new file mode 100644
index 0000000000..19b826930b
--- /dev/null
+++ b/third_party/python/jinxed/jinxed/__init__.py
@@ -0,0 +1,39 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 - 2021 Avram Lubkin, All Rights Reserved
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Jinxed Terminal Library
+
+Jinxed is an implementation of a subset of the Python curses library for Windows
+
+Other libraries implement the full curses stack. Jinxed is intended primarily for libraries
+that need to access terminfo functions such as tigetstr() and tparm().
+"""
+
+# flake8: noqa: F401
+
+from jinxed._keys import *
+from jinxed._tparm import tparm
+from jinxed._terminal import setupterm, tigetflag, tigetnum, tigetstr
+from jinxed._util import error, IS_WINDOWS
+
+if IS_WINDOWS: # pragma: no branch
+ from jinxed.win32 import get_term # pragma: no cover
+else:
+ from jinxed._util import get_term
+
+
+__version__ = '1.2.0'
+
+COLOR_BLACK = 0
+COLOR_RED = 1
+COLOR_GREEN = 2
+COLOR_YELLOW = 3
+COLOR_BLUE = 4
+COLOR_MAGENTA = 5
+COLOR_CYAN = 6
+COLOR_WHITE = 7
diff --git a/third_party/python/jinxed/jinxed/_keys.py b/third_party/python/jinxed/jinxed/_keys.py
new file mode 100644
index 0000000000..559e0f5879
--- /dev/null
+++ b/third_party/python/jinxed/jinxed/_keys.py
@@ -0,0 +1,164 @@
+"""
+Key code constants
+
+Most of this information came from the terminfo man pages, part of ncurses
+More information on ncurses can be found at:
+https://www.gnu.org/software/ncurses/ncurses.html
+"""
+
+KEY_A1 = 348
+KEY_A3 = 349
+KEY_B2 = 350
+KEY_BACKSPACE = 263
+KEY_BEG = 354
+KEY_BREAK = 257
+KEY_BTAB = 353
+KEY_C1 = 351
+KEY_C3 = 352
+KEY_CANCEL = 355
+KEY_CATAB = 342
+KEY_CLEAR = 333
+KEY_CLOSE = 356
+KEY_COMMAND = 357
+KEY_COPY = 358
+KEY_CREATE = 359
+KEY_CTAB = 341
+KEY_DC = 330
+KEY_DL = 328
+KEY_DOWN = 258
+KEY_EIC = 332
+KEY_END = 360
+KEY_ENTER = 343
+KEY_EOL = 335
+KEY_EOS = 334
+KEY_EXIT = 361
+KEY_F0 = 264
+KEY_F1 = 265
+KEY_F10 = 274
+KEY_F11 = 275
+KEY_F12 = 276
+KEY_F13 = 277
+KEY_F14 = 278
+KEY_F15 = 279
+KEY_F16 = 280
+KEY_F17 = 281
+KEY_F18 = 282
+KEY_F19 = 283
+KEY_F2 = 266
+KEY_F20 = 284
+KEY_F21 = 285
+KEY_F22 = 286
+KEY_F23 = 287
+KEY_F24 = 288
+KEY_F25 = 289
+KEY_F26 = 290
+KEY_F27 = 291
+KEY_F28 = 292
+KEY_F29 = 293
+KEY_F3 = 267
+KEY_F30 = 294
+KEY_F31 = 295
+KEY_F32 = 296
+KEY_F33 = 297
+KEY_F34 = 298
+KEY_F35 = 299
+KEY_F36 = 300
+KEY_F37 = 301
+KEY_F38 = 302
+KEY_F39 = 303
+KEY_F4 = 268
+KEY_F40 = 304
+KEY_F41 = 305
+KEY_F42 = 306
+KEY_F43 = 307
+KEY_F44 = 308
+KEY_F45 = 309
+KEY_F46 = 310
+KEY_F47 = 311
+KEY_F48 = 312
+KEY_F49 = 313
+KEY_F5 = 269
+KEY_F50 = 314
+KEY_F51 = 315
+KEY_F52 = 316
+KEY_F53 = 317
+KEY_F54 = 318
+KEY_F55 = 319
+KEY_F56 = 320
+KEY_F57 = 321
+KEY_F58 = 322
+KEY_F59 = 323
+KEY_F6 = 270
+KEY_F60 = 324
+KEY_F61 = 325
+KEY_F62 = 326
+KEY_F63 = 327
+KEY_F7 = 271
+KEY_F8 = 272
+KEY_F9 = 273
+KEY_FIND = 362
+KEY_HELP = 363
+KEY_HOME = 262
+KEY_IC = 331
+KEY_IL = 329
+KEY_LEFT = 260
+KEY_LL = 347
+KEY_MARK = 364
+KEY_MAX = 511
+KEY_MESSAGE = 365
+KEY_MIN = 257
+KEY_MOUSE = 409
+KEY_MOVE = 366
+KEY_NEXT = 367
+KEY_NPAGE = 338
+KEY_OPEN = 368
+KEY_OPTIONS = 369
+KEY_PPAGE = 339
+KEY_PREVIOUS = 370
+KEY_PRINT = 346
+KEY_REDO = 371
+KEY_REFERENCE = 372
+KEY_REFRESH = 373
+KEY_REPLACE = 374
+KEY_RESET = 345
+KEY_RESIZE = 410
+KEY_RESTART = 375
+KEY_RESUME = 376
+KEY_RIGHT = 261
+KEY_SAVE = 377
+KEY_SBEG = 378
+KEY_SCANCEL = 379
+KEY_SCOMMAND = 380
+KEY_SCOPY = 381
+KEY_SCREATE = 382
+KEY_SDC = 383
+KEY_SDL = 384
+KEY_SELECT = 385
+KEY_SEND = 386
+KEY_SEOL = 387
+KEY_SEXIT = 388
+KEY_SF = 336
+KEY_SFIND = 389
+KEY_SHELP = 390
+KEY_SHOME = 391
+KEY_SIC = 392
+KEY_SLEFT = 393
+KEY_SMESSAGE = 394
+KEY_SMOVE = 395
+KEY_SNEXT = 396
+KEY_SOPTIONS = 397
+KEY_SPREVIOUS = 398
+KEY_SPRINT = 399
+KEY_SR = 337
+KEY_SREDO = 400
+KEY_SREPLACE = 401
+KEY_SRESET = 344
+KEY_SRIGHT = 402
+KEY_SRSUME = 403
+KEY_SSAVE = 404
+KEY_SSUSPEND = 405
+KEY_STAB = 340
+KEY_SUNDO = 406
+KEY_SUSPEND = 407
+KEY_UNDO = 408
+KEY_UP = 259
diff --git a/third_party/python/jinxed/jinxed/_terminal.py b/third_party/python/jinxed/jinxed/_terminal.py
new file mode 100644
index 0000000000..524a67afc6
--- /dev/null
+++ b/third_party/python/jinxed/jinxed/_terminal.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 Avram Lubkin, All Rights Reserved
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Provides a terminal class primarily for accessing functions that depend on
+a terminal which has been previously setup as well as those functions
+"""
+
+import importlib
+import io
+import platform
+import sys
+
+from jinxed.terminfo import BOOL_CAPS, NUM_CAPS
+from jinxed._util import BASESTRING, error, raise_from_none
+
+if platform.system() == 'Windows': # pragma: no branch
+ from jinxed.win32 import get_term # pragma: no cover
+else:
+ from jinxed._util import get_term
+
+
+TERM = None
+
+
+class Terminal(object):
+ """
+ Persistent terminal object for functions that require a previously configured state
+ """
+
+ def __init__(self, term=None, fd=-1): # pylint: disable=invalid-name
+
+ # Type check for term
+ if term is not None and not isinstance(term, BASESTRING):
+ raise TypeError('term must be a string or None, not %s' % type(term).__name__)
+
+ # Type check and default handling for fd
+ if fd == -1:
+ try:
+ self.stream_fd = sys.stdout.fileno()
+ except (AttributeError, TypeError, io.UnsupportedOperation):
+ self.stream_fd = None
+ elif not isinstance(fd, int):
+ raise TypeError('fd must be an integer, not %s' % type(fd).__name__)
+ else:
+ self.stream_fd = fd
+
+ # Try to dynamically determine terminal type
+ if term is None:
+ term = get_term(self.stream_fd)
+
+ try:
+ self.terminfo = importlib.import_module('jinxed.terminfo.%s' % term.replace('-', '_'))
+ except ImportError:
+ raise_from_none(error('Could not find terminal %s' % term))
+
+ def tigetstr(self, capname):
+ """
+ Reimplementation of curses.tigetstr()
+ """
+
+ return self.terminfo.STR_CAPS.get(capname, None)
+
+ def tigetnum(self, capname):
+ """
+ Reimplementation of curses.tigetnum()
+ """
+
+ return self.terminfo.NUM_CAPS.get(capname, -1 if capname in NUM_CAPS else -2)
+
+ def tigetflag(self, capname):
+ """
+ Reimplementation of curses.tigetflag()
+ """
+
+ if capname in self.terminfo.BOOL_CAPS:
+ return 1
+ if capname in BOOL_CAPS:
+ return 0
+ return -1
+
+
+def setupterm(term=None, fd=-1): # pylint: disable=invalid-name
+ """
+ Reimplementation of :py:func:`curses.setupterm`
+ """
+
+ global TERM # pylint: disable=global-statement
+ TERM = Terminal(term, fd)
+
+
+def tigetflag(capname):
+ """
+ Reimplementation of :py:func:`curses.tigetflag`
+ """
+
+ if TERM is None:
+ raise error('Must call setupterm() first')
+ return TERM.tigetflag(capname)
+
+
+def tigetnum(capname):
+ """
+ Reimplementation of :py:func:`curses.tigetnum`
+ """
+
+ if TERM is None:
+ raise error('Must call setupterm() first')
+ return TERM.tigetnum(capname)
+
+
+def tigetstr(capname):
+ """
+ Reimplementation of :py:func:`curses.tigetstr`
+ """
+
+ if TERM is None:
+ raise error('Must call setupterm() first')
+ return TERM.tigetstr(capname)
diff --git a/third_party/python/jinxed/jinxed/_tparm.py b/third_party/python/jinxed/jinxed/_tparm.py
new file mode 100644
index 0000000000..8874397a8f
--- /dev/null
+++ b/third_party/python/jinxed/jinxed/_tparm.py
@@ -0,0 +1,291 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 Avram Lubkin, All Rights Reserved
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+An pure Python implementation of tparm
+Based on documentation in man(5) terminfo and comparing behavior of curses.tparm
+"""
+
+from collections import deque
+import operator
+import re
+
+
+OPERATORS = {b'+': operator.add,
+ b'-': operator.sub,
+ b'*': operator.mul,
+ b'/': operator.floordiv,
+ b'm': operator.mod,
+ b'&': operator.and_,
+ b'|': operator.or_,
+ b'^': operator.xor,
+ b'=': operator.eq,
+ b'>': operator.gt,
+ b'<': operator.lt,
+ b'~': operator.inv,
+ b'!': operator.not_,
+ b'A': lambda x, y: bool(x and y),
+ b'O': lambda x, y: bool(x or y)}
+
+FILTERS = (('_literal_percent', br'%%'),
+ ('_pop_c', br'%c'),
+ ('_increment_one_two', br'%i'),
+ ('_binary_op', br'%([\+\-\*/m\&\|\^=><AO])'),
+ ('_unary_op', br'%([\~!])'),
+ ('_push_param', br'%p([1-9]\d*)'),
+ ('_set_dynamic', br'%P([a-z])'),
+ ('_get_dynamic', br'%g([a-z])'),
+ ('_set_static', br'%P([A-Z])'),
+ ('_get_static', br'%g([A-Z])'),
+ ('_char_constant', br"%'(.)'"),
+ ('_int_constant', br'%{(\d+)}'),
+ ('_push_len', br'%l'),
+ ('_cond_if', br'%\?(.+?)(?=%t)'),
+ ('_cond_then_else', br'%t(.+?)(?=%e)'),
+ ('_cond_then_fi', br'%t(.+?)(?=%;)'),
+ ('_cond_else', br'%e(.+?)(?=%;|$)'),
+ ('_cond_fi', br'%;'),
+ ('_printf', br'%:?[^%]*?[doxXs]'),
+ ('_unmatched', br'%.'),
+ ('_literal', br'[^%]+'))
+
+PATTERNS = tuple((re.compile(pattern), filter_) for filter_, pattern in FILTERS)
+NULL = type('Null', (int,), {})(0)
+
+
+class TParm(object): # pylint: disable=useless-object-inheritance
+ """
+ Class to hold tparm methods and persist variables between calls
+ """
+
+ def __init__(self, *params, **kwargs):
+
+ self.rtn = b''
+ self.stack = deque()
+
+ # The spec for tparm allows c string parameters, but most implementations don't
+ # The reference code makes a best effort to determine which parameters require strings
+ # We'll allow them without trying to predict
+ for param in params:
+ if not isinstance(param, (int, bytes)):
+ raise TypeError('Parameters must be integers or bytes, not %s' %
+ type(param).__name__)
+ self.params = list(params)
+
+ static = kwargs.get('static', None)
+ self.static = {} if static is None else static
+ dynamic = kwargs.get('static', None)
+ self.dynamic = {} if dynamic is None else dynamic
+
+ def __call__(self, string, *params):
+ return self.child(*params).parse(string)
+
+ def _literal_percent(self, group): # pylint: disable=unused-argument
+ """
+ Literal percent sign
+ """
+ self.rtn += b'%'
+
+ def _pop_c(self, group): # pylint: disable=unused-argument
+ """
+ Return pop() like %c in printf
+ """
+
+ try:
+ value = self.stack.pop()
+ except IndexError:
+ value = NULL
+
+ # Treat null as 0x80
+ if value is NULL:
+ value = 0x80
+
+ self.rtn += b'%c' % value
+
+ def _increment_one_two(self, group): # pylint: disable=unused-argument
+ """
+ Add 1 to first two parameters
+ Missing parameters are treated as 0's
+ """
+ for index in (0, 1):
+ try:
+ self.params[index] += 1
+ except IndexError:
+ self.params.append(1)
+
+ def _binary_op(self, group):
+ """
+ Perform a binary operation on the last two items on the stack
+ The order of evaluation is the order the items were placed on the stack
+ """
+ second_val = self.stack.pop()
+ self.stack.append(OPERATORS[group](self.stack.pop(), second_val))
+
+ def _unary_op(self, group):
+ """
+ Perform a unary operation on the last item on the stack
+ """
+ self.stack.append(OPERATORS[group](self.stack.pop()))
+
+ def _push_param(self, group):
+ """
+ Push a parameter onto the stack
+ If the parameter is missing, push Null
+ """
+ try:
+ self.stack.append(self.params[int(group) - 1])
+ except IndexError:
+ self.stack.append(NULL)
+
+ def _set_dynamic(self, group):
+ """
+ Set the a dynamic variable to pop()
+ """
+ self.dynamic[group] = self.stack.pop()
+
+ def _get_dynamic(self, group):
+ """
+ Push the value of a dynamic variable onto the stack
+ """
+ self.stack.append(self.dynamic.get(group, NULL))
+
+ def _set_static(self, group):
+ """
+ Set the a static variable to pop()
+ """
+ self.static[group] = self.stack.pop()
+
+ def _get_static(self, group):
+ """
+ Push the value of a static variable onto the stack
+ """
+ self.stack.append(self.static.get(group, NULL))
+
+ def _char_constant(self, group):
+ """
+ Push an character constant onto the stack
+ """
+ self.stack.append(ord(group))
+
+ def _int_constant(self, group):
+ """
+ Push an integer constant onto the stack
+ """
+ self.stack.append(int(group))
+
+ def _push_len(self, group): # pylint: disable=unused-argument
+ """
+ Replace the last item on the stack with its length
+ """
+ self.stack.append(len(self.stack.pop()))
+
+ def _cond_if(self, group):
+ """
+ Recursively evaluate the body of the if statement
+ """
+ self.parse(group)
+
+ def _cond_then_else(self, group):
+ """
+ If the last item on the stack is True,
+ recursively evaluate then statement
+
+ Do not consume last item on stack
+ """
+ if self.stack[-1]:
+ self.parse(group)
+
+ def _cond_then_fi(self, group):
+ """
+ If the last item on the stack is True,
+ recursively evaluate then statement
+
+ Always consume last item on stack
+ """
+ if self.stack.pop():
+ self.parse(group)
+
+ def _cond_else(self, group):
+ """
+ If the last item on the stack is False,
+ recursively evaluate the both of the else statement
+
+ Always consume last item on stack
+ """
+ if not self.stack.pop():
+ self.parse(group)
+
+ def _cond_fi(self, group): # pylint: disable=unused-argument
+ """
+ End if statement
+ """
+
+ def _printf(self, group):
+ """
+ Subset of printf-like formatting
+ """
+
+ # : is an escape to prevent flags from being treated as % operators, ignore
+ # Python 2 returns as ':', Python 3 returns as 58
+ if group[1] in (b':', 58):
+ group = b'%' + group[2:]
+
+ try:
+ value = self.stack.pop()
+ except IndexError:
+ value = NULL
+
+ # Treat null as empty string when string formatting
+ # Python 2 returns as 's', Python 3 returns as 115
+ if value is NULL and group[-1] in (b's', 115):
+ value = b''
+
+ self.rtn += group % value
+
+ def _unmatched(self, group): # pylint: disable=unused-argument
+ """
+ Escape pattern with no spec is skipped
+ """
+
+ def _literal(self, group):
+ """
+ Anything not prefaced with a known pattern spec is treated literally
+ """
+ self.rtn += group
+
+ def parse(self, string):
+ """
+ Parsing loop
+ Evaluate regex patterns in order until a pattern is matched
+ """
+
+ if not isinstance(string, bytes):
+ raise TypeError("A bytes-like object is required, not '%s'" % type(string).__name__)
+
+ index = 0
+ length = len(string)
+
+ while index < length:
+ for filt, meth in PATTERNS: # pragma: no branch
+ match = re.match(filt, string[index:])
+ if match:
+ group = match.groups()[-1] if match.groups() else match.group(0)
+ getattr(self, meth)(group)
+ index += match.end()
+ break
+
+ return self.rtn
+
+ def child(self, *params):
+ """
+ Return a new instance with the same variables, but different parameters
+ """
+ return self.__class__(*params, static=self.static, dynamic=self.dynamic)
+
+
+tparm = TParm() # pylint: disable=invalid-name
+"""Reimplementation of :py:func:`curses.tparm`"""
diff --git a/third_party/python/jinxed/jinxed/_util.py b/third_party/python/jinxed/jinxed/_util.py
new file mode 100644
index 0000000000..7d9dd2e255
--- /dev/null
+++ b/third_party/python/jinxed/jinxed/_util.py
@@ -0,0 +1,52 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 - 2021 Avram Lubkin, All Rights Reserved
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Utility objects
+"""
+
+import os
+import platform
+import sys
+
+if sys.version_info[:2] < (3, 3): # pragma: no branch
+ import mock # pragma: no cover # pylint: disable=import-error, unused-import
+else:
+ from unittest import mock # noqa: F401 # pylint: disable=unused-import
+
+if sys.version_info[0] < 3: # pragma: no branch
+ BASESTRING = basestring # pragma: no cover # noqa: F821 # pylint: disable=undefined-variable
+else:
+ BASESTRING = str
+
+IS_WINDOWS = platform.system() == 'Windows'
+
+
+class error(Exception): # pylint: disable=invalid-name
+ """
+ Generic class for Jinxed errors
+ """
+
+
+def get_term(*args, **kwargs): # pylint: disable=unused-argument
+ """
+ A lightweight stand-in for win32.get_term() for non-Windows platforms
+ Returns value of TERM environment variable or 'unknown'
+ """
+
+ return os.environ.get('TERM', 'unknown')
+
+
+def raise_from_none(exc): # pragma: no cover
+ """
+ Convenience function to raise from None in a Python 2/3 compatible manner
+ """
+ raise exc
+
+
+if sys.version_info[0] >= 3: # pragma: no branch
+ exec('def raise_from_none(exc):\n raise exc from None') # pylint: disable=exec-used
diff --git a/third_party/python/jinxed/jinxed/has_key.py b/third_party/python/jinxed/jinxed/has_key.py
new file mode 100644
index 0000000000..2c9225444d
--- /dev/null
+++ b/third_party/python/jinxed/jinxed/has_key.py
@@ -0,0 +1,158 @@
+"""
+key mapping numeric to cap
+"""
+
+from jinxed import _keys
+
+
+_capability_names = { # pylint: disable=invalid-name
+ _keys.KEY_A1: 'ka1',
+ _keys.KEY_A3: 'ka3',
+ _keys.KEY_B2: 'kb2',
+ _keys.KEY_BACKSPACE: 'kbs',
+ _keys.KEY_BEG: 'kbeg',
+ _keys.KEY_BTAB: 'kcbt',
+ _keys.KEY_C1: 'kc1',
+ _keys.KEY_C3: 'kc3',
+ _keys.KEY_CANCEL: 'kcan',
+ _keys.KEY_CATAB: 'ktbc',
+ _keys.KEY_CLEAR: 'kclr',
+ _keys.KEY_CLOSE: 'kclo',
+ _keys.KEY_COMMAND: 'kcmd',
+ _keys.KEY_COPY: 'kcpy',
+ _keys.KEY_CREATE: 'kcrt',
+ _keys.KEY_CTAB: 'kctab',
+ _keys.KEY_DC: 'kdch1',
+ _keys.KEY_DL: 'kdl1',
+ _keys.KEY_DOWN: 'kcud1',
+ _keys.KEY_EIC: 'krmir',
+ _keys.KEY_END: 'kend',
+ _keys.KEY_ENTER: 'kent',
+ _keys.KEY_EOL: 'kel',
+ _keys.KEY_EOS: 'ked',
+ _keys.KEY_EXIT: 'kext',
+ _keys.KEY_F0: 'kf0',
+ _keys.KEY_F1: 'kf1',
+ _keys.KEY_F10: 'kf10',
+ _keys.KEY_F11: 'kf11',
+ _keys.KEY_F12: 'kf12',
+ _keys.KEY_F13: 'kf13',
+ _keys.KEY_F14: 'kf14',
+ _keys.KEY_F15: 'kf15',
+ _keys.KEY_F16: 'kf16',
+ _keys.KEY_F17: 'kf17',
+ _keys.KEY_F18: 'kf18',
+ _keys.KEY_F19: 'kf19',
+ _keys.KEY_F2: 'kf2',
+ _keys.KEY_F20: 'kf20',
+ _keys.KEY_F21: 'kf21',
+ _keys.KEY_F22: 'kf22',
+ _keys.KEY_F23: 'kf23',
+ _keys.KEY_F24: 'kf24',
+ _keys.KEY_F25: 'kf25',
+ _keys.KEY_F26: 'kf26',
+ _keys.KEY_F27: 'kf27',
+ _keys.KEY_F28: 'kf28',
+ _keys.KEY_F29: 'kf29',
+ _keys.KEY_F3: 'kf3',
+ _keys.KEY_F30: 'kf30',
+ _keys.KEY_F31: 'kf31',
+ _keys.KEY_F32: 'kf32',
+ _keys.KEY_F33: 'kf33',
+ _keys.KEY_F34: 'kf34',
+ _keys.KEY_F35: 'kf35',
+ _keys.KEY_F36: 'kf36',
+ _keys.KEY_F37: 'kf37',
+ _keys.KEY_F38: 'kf38',
+ _keys.KEY_F39: 'kf39',
+ _keys.KEY_F4: 'kf4',
+ _keys.KEY_F40: 'kf40',
+ _keys.KEY_F41: 'kf41',
+ _keys.KEY_F42: 'kf42',
+ _keys.KEY_F43: 'kf43',
+ _keys.KEY_F44: 'kf44',
+ _keys.KEY_F45: 'kf45',
+ _keys.KEY_F46: 'kf46',
+ _keys.KEY_F47: 'kf47',
+ _keys.KEY_F48: 'kf48',
+ _keys.KEY_F49: 'kf49',
+ _keys.KEY_F5: 'kf5',
+ _keys.KEY_F50: 'kf50',
+ _keys.KEY_F51: 'kf51',
+ _keys.KEY_F52: 'kf52',
+ _keys.KEY_F53: 'kf53',
+ _keys.KEY_F54: 'kf54',
+ _keys.KEY_F55: 'kf55',
+ _keys.KEY_F56: 'kf56',
+ _keys.KEY_F57: 'kf57',
+ _keys.KEY_F58: 'kf58',
+ _keys.KEY_F59: 'kf59',
+ _keys.KEY_F6: 'kf6',
+ _keys.KEY_F60: 'kf60',
+ _keys.KEY_F61: 'kf61',
+ _keys.KEY_F62: 'kf62',
+ _keys.KEY_F63: 'kf63',
+ _keys.KEY_F7: 'kf7',
+ _keys.KEY_F8: 'kf8',
+ _keys.KEY_F9: 'kf9',
+ _keys.KEY_FIND: 'kfnd',
+ _keys.KEY_HELP: 'khlp',
+ _keys.KEY_HOME: 'khome',
+ _keys.KEY_IC: 'kich1',
+ _keys.KEY_IL: 'kil1',
+ _keys.KEY_LEFT: 'kcub1',
+ _keys.KEY_LL: 'kll',
+ _keys.KEY_MARK: 'kmrk',
+ _keys.KEY_MESSAGE: 'kmsg',
+ _keys.KEY_MOVE: 'kmov',
+ _keys.KEY_NEXT: 'knxt',
+ _keys.KEY_NPAGE: 'knp',
+ _keys.KEY_OPEN: 'kopn',
+ _keys.KEY_OPTIONS: 'kopt',
+ _keys.KEY_PPAGE: 'kpp',
+ _keys.KEY_PREVIOUS: 'kprv',
+ _keys.KEY_PRINT: 'kprt',
+ _keys.KEY_REDO: 'krdo',
+ _keys.KEY_REFERENCE: 'kref',
+ _keys.KEY_REFRESH: 'krfr',
+ _keys.KEY_REPLACE: 'krpl',
+ _keys.KEY_RESTART: 'krst',
+ _keys.KEY_RESUME: 'kres',
+ _keys.KEY_RIGHT: 'kcuf1',
+ _keys.KEY_SAVE: 'ksav',
+ _keys.KEY_SBEG: 'kBEG',
+ _keys.KEY_SCANCEL: 'kCAN',
+ _keys.KEY_SCOMMAND: 'kCMD',
+ _keys.KEY_SCOPY: 'kCPY',
+ _keys.KEY_SCREATE: 'kCRT',
+ _keys.KEY_SDC: 'kDC',
+ _keys.KEY_SDL: 'kDL',
+ _keys.KEY_SELECT: 'kslt',
+ _keys.KEY_SEND: 'kEND',
+ _keys.KEY_SEOL: 'kEOL',
+ _keys.KEY_SEXIT: 'kEXT',
+ _keys.KEY_SF: 'kind',
+ _keys.KEY_SFIND: 'kFND',
+ _keys.KEY_SHELP: 'kHLP',
+ _keys.KEY_SHOME: 'kHOM',
+ _keys.KEY_SIC: 'kIC',
+ _keys.KEY_SLEFT: 'kLFT',
+ _keys.KEY_SMESSAGE: 'kMSG',
+ _keys.KEY_SMOVE: 'kMOV',
+ _keys.KEY_SNEXT: 'kNXT',
+ _keys.KEY_SOPTIONS: 'kOPT',
+ _keys.KEY_SPREVIOUS: 'kPRV',
+ _keys.KEY_SPRINT: 'kPRT',
+ _keys.KEY_SR: 'kri',
+ _keys.KEY_SREDO: 'kRDO',
+ _keys.KEY_SREPLACE: 'kRPL',
+ _keys.KEY_SRIGHT: 'kRIT',
+ _keys.KEY_SRSUME: 'kRES',
+ _keys.KEY_SSAVE: 'kSAV',
+ _keys.KEY_SSUSPEND: 'kSPD',
+ _keys.KEY_STAB: 'khts',
+ _keys.KEY_SUNDO: 'kUND',
+ _keys.KEY_SUSPEND: 'kspd',
+ _keys.KEY_UNDO: 'kund',
+ _keys.KEY_UP: 'kcuu1'
+ }
diff --git a/third_party/python/jinxed/jinxed/terminfo/__init__.py b/third_party/python/jinxed/jinxed/terminfo/__init__.py
new file mode 100644
index 0000000000..8a304e9f44
--- /dev/null
+++ b/third_party/python/jinxed/jinxed/terminfo/__init__.py
@@ -0,0 +1,87 @@
+"""
+jinxed terminal info library
+
+Most of this information came from the terminfo man pages, part of ncurses
+More information on ncurses can be found at:
+https://www.gnu.org/software/ncurses/ncurses.html
+
+Boolean and numeric capabilities are listed here to support tigetnum() and tigetflag()
+"""
+
+# pylint: disable=wrong-spelling-in-comment
+
+BOOL_CAPS = [
+ 'am', # (auto_right_margin) terminal has automatic margins
+ 'bce', # (back_color_erase) screen erased with background color
+ 'bw', # (auto_left_margin) cub1 wraps from column 0 to last column
+ 'ccc', # (can_change) terminal can re-define existing colors
+ 'chts', # (hard_cursor) cursor is hard to see
+ 'cpix', # (cpi_changes_res) changing character pitch changes resolution
+ 'crxm', # (cr_cancels_micro_mode) using cr turns off micro mode
+ 'daisy', # (has_print_wheel) printer needs operator to change character set
+ 'da', # (memory_above) display may be retained above the screen
+ 'db', # (memory_below) display may be retained below the screen
+ 'eo', # (erase_overstrike) can erase overstrikes with a blank
+ 'eslok', # (status_line_esc_ok) escape can be used on the status line
+ 'gn', # (generic_type) generic line type
+ 'hc', # (hard_copy) hardcopy terminal
+ 'hls', # (hue_lightness_saturation) terminal uses only HLS color notation (Tektronix)
+ 'hs', # (has_status_line) has extra status line
+ 'hz', # (tilde_glitch) cannot print ~'s (Hazeltine)
+ 'in', # (insert_null_glitch) insert mode distinguishes nulls
+ 'km', # (has_meta_key) Has a meta key (i.e., sets 8th-bit)
+ 'lpix', # (lpi_changes_res) changing line pitch changes resolution
+ 'mc5i', # (prtr_silent) printer will not echo on screen
+ 'mir', # (move_insert_mode) safe to move while in insert mode
+ 'msgr', # (move_standout_mode) safe to move while in standout mode
+ 'ndscr', # (non_dest_scroll_region) scrolling region is non-destructive
+ 'npc', # (no_pad_char) pad character does not exist
+ 'nrrmc', # (non_rev_rmcup) smcup does not reverse rmcup
+ 'nxon', # (needs_xon_xoff) padding will not work, xon/xoff required
+ 'os', # (over_strike) terminal can overstrike
+ 'sam', # (semi_auto_right_margin) printing in last column causes cr
+ 'ul', # (transparent_underline) underline character overstrikes
+ 'xenl', # (eat_newline_glitch) newline ignored after 80 cols (concept)
+ 'xhpa', # (col_addr_glitch) only positive motion for hpa/mhpa caps
+ 'xhp', # (ceol_standout_glitch) standout not erased by overwriting (hp)
+ 'xon', # (xon_xoff) terminal uses xon/xoff handshaking
+ 'xsb', # (no_esc_ctlc) beehive (f1=escape, f2=ctrl C)
+ 'xt', # (dest_tabs_magic_smso) tabs destructive, magic so char (t1061)
+ 'xvpa', # (row_addr_glitch) only positive motion for vpa/mvpa caps
+]
+
+NUM_CAPS = [
+ 'bitwin', # (bit_image_entwining) number of passes for each bit-image row
+ 'bitype', # (bit_image_type) type of bit-image device
+ 'btns', # (buttons) number of buttons on mouse
+ 'bufsz', # (buffer_capacity) numbers of bytes buffered before printing
+ 'colors', # (max_colors) maximum number of colors on screen
+ 'cols', # (columns) number of columns in a line
+ 'cps', # (print_rate) print rate in characters per second
+ 'it', # (init_tabs) tabs initially every # spaces
+ 'lh', # (label_height) rows in each label
+ 'lines', # (lines) number of lines on screen or page
+ 'lm', # (lines_of_memory) lines of memory if > line. 0 means varies
+ 'lw', # (label_width) columns in each label
+ 'ma', # (max_attributes) maximum combined attributes terminal can handle
+ 'maddr', # (max_micro_address) maximum value in micro_..._address
+ 'mcs', # (micro_col_size) character step size when in micro mode
+ 'mjump', # (max_micro_jump) maximum value in parm_..._micro
+ 'mls', # (micro_line_size) line step size when in micro mode
+ 'ncv', # (no_color_video) video attributes that cannot be used with colors
+ 'nlab', # (num_labels) number of labels on screen
+ 'npins', # (number_of_pins) numbers of pins in print-head
+ 'orc', # (output_res_char) horizontal resolution in units per line
+ 'orhi', # (output_res_horz_inch) horizontal resolution in units per inch
+ 'orl', # (output_res_line) vertical resolution in units per line
+ 'orvi', # (output_res_vert_inch) vertical resolution in units per inch
+ 'pairs', # (max_pairs) maximum number of color-pairs on the screen
+ 'pb', # (padding_baud_rate) lowest baud rate where padding needed
+ 'spinh', # (dot_horz_spacing) spacing of dots horizontally in dots per inch
+ 'spinv', # (dot_vert_spacing) spacing of pins vertically in pins per inch
+ 'vt', # (virtual_terminal) virtual terminal number (CB/unix)
+ 'widcs', # (wide_char_size) character step size when in double wide mode
+ 'wnum', # (maximum_windows) maximum number of definable windows
+ 'wsl', # (width_status_line) number of columns in status line
+ 'xmc', # (magic_cookie_glitch) number of blank characters left by smso or rmso
+]
diff --git a/third_party/python/jinxed/jinxed/terminfo/ansicon.py b/third_party/python/jinxed/jinxed/terminfo/ansicon.py
new file mode 100644
index 0000000000..d4ee079e40
--- /dev/null
+++ b/third_party/python/jinxed/jinxed/terminfo/ansicon.py
@@ -0,0 +1,158 @@
+"""
+Ansicon virtual terminal codes
+
+Information sourced from:
+ https://github.com/adoxa/ansicon/blob/master/sequences.txt
+
+A best effort has been made, but not all information was available
+"""
+
+from .xterm_256color import BOOL_CAPS, NUM_CAPS, STR_CAPS
+
+BOOL_CAPS = BOOL_CAPS[:]
+NUM_CAPS = NUM_CAPS.copy()
+STR_CAPS = STR_CAPS.copy()
+
+
+# Added
+STR_CAPS['cht'] = b'\x1b[%p1%dI'
+STR_CAPS['cnl'] = b'\x1b[%p1%dE'
+STR_CAPS['cpl'] = b'\x1b[%p1%dF'
+STR_CAPS['da1'] = b'\x1b[0c'
+STR_CAPS['dsr'] = b'\x1b[5n'
+STR_CAPS['hvp'] = b'\x1b[%i%p1%d;%p2%df' # Same as cup
+STR_CAPS['setb'] = b'\x1b[48;5;%p1%dm'
+STR_CAPS['setf'] = b'\x1b[38;5;%p1%dm'
+
+# Removed - These do not appear to be supported
+del STR_CAPS['dim']
+del STR_CAPS['flash']
+del STR_CAPS['invis']
+del STR_CAPS['kcbt']
+del STR_CAPS['kEND']
+del STR_CAPS['kf37']
+del STR_CAPS['kf38']
+del STR_CAPS['kf39']
+del STR_CAPS['kf40']
+del STR_CAPS['kf41']
+del STR_CAPS['kf42']
+del STR_CAPS['kf43']
+del STR_CAPS['kf44']
+del STR_CAPS['kf45']
+del STR_CAPS['kf46']
+del STR_CAPS['kf47']
+del STR_CAPS['kf48']
+del STR_CAPS['kf61']
+del STR_CAPS['kf62']
+del STR_CAPS['kf63']
+del STR_CAPS['kIC']
+del STR_CAPS['kind']
+del STR_CAPS['kLFT']
+del STR_CAPS['kmous']
+del STR_CAPS['kNXT']
+del STR_CAPS['kPRV']
+del STR_CAPS['kri']
+del STR_CAPS['kRIT']
+del STR_CAPS['meml']
+del STR_CAPS['memu']
+del STR_CAPS['ritm']
+del STR_CAPS['rmam']
+del STR_CAPS['rmcup']
+del STR_CAPS['rmir']
+del STR_CAPS['rmkx']
+del STR_CAPS['rmm']
+del STR_CAPS['sitm']
+del STR_CAPS['smam']
+del STR_CAPS['smcup']
+del STR_CAPS['smir']
+del STR_CAPS['smkx']
+del STR_CAPS['smm']
+
+# Modified
+NUM_CAPS['colors'] = 16
+NUM_CAPS['cols'] = 80
+NUM_CAPS['lines'] = 30
+NUM_CAPS['pairs'] = 256
+STR_CAPS['cbt'] = b'\x1b[%p1%dZ'
+STR_CAPS['cnorm'] = b'\x1b[?25h'
+STR_CAPS['csr'] = b'\x1b[%p1%{1}%+%d;%?%p2%t%p2%{1}%+%dr'
+STR_CAPS['cub1'] = b'\x1b[D'
+STR_CAPS['cud1'] = b'\x1b[B'
+STR_CAPS['cvvis'] = b'\x1b[?25h'
+STR_CAPS['initc'] = b'\x1b]4;%p1%d;rgb] =%p2%d/%p3%d/%p4%d\x1b'
+STR_CAPS['is2'] = b'\x1b[!p\x1b>'
+STR_CAPS['ka1'] = b'\x00G' # upper left of keypad
+STR_CAPS['ka3'] = b'\x00I' # lower right of keypad
+STR_CAPS['kbs'] = b'\x08'
+STR_CAPS['kc1'] = b'\x00O' # lower left of keypad
+STR_CAPS['kc3'] = b'\x00Q' # lower right of keypad
+STR_CAPS['kcub1'] = b'\xe0K'
+STR_CAPS['kcud1'] = b'\xe0P'
+STR_CAPS['kcuf1'] = b'\xe0M'
+STR_CAPS['kcuu1'] = b'\xe0H'
+STR_CAPS['kDC'] = b'\xe0S'
+STR_CAPS['kdch1'] = b'\x0eQ'
+STR_CAPS['kend'] = b'\xe0O'
+STR_CAPS['kent'] = b'\r'
+STR_CAPS['kf1'] = b'\x00;'
+STR_CAPS['kf2'] = b'\x00<'
+STR_CAPS['kf3'] = b'\x00='
+STR_CAPS['kf4'] = b'\x00>'
+STR_CAPS['kf5'] = b'\x00?'
+STR_CAPS['kf6'] = b'\x00@'
+STR_CAPS['kf7'] = b'\x00A'
+STR_CAPS['kf8'] = b'\x00B'
+STR_CAPS['kf9'] = b'\x00C'
+STR_CAPS['kf10'] = b'\x00D'
+STR_CAPS['kf11'] = b'\xe0\x85'
+STR_CAPS['kf12'] = b'\xe0\x86'
+STR_CAPS['kf13'] = b'\x00T'
+STR_CAPS['kf14'] = b'\x00U'
+STR_CAPS['kf15'] = b'\x00V'
+STR_CAPS['kf16'] = b'\x00W'
+STR_CAPS['kf17'] = b'\x00X'
+STR_CAPS['kf18'] = b'\x00Y'
+STR_CAPS['kf19'] = b'\x00Z'
+STR_CAPS['kf20'] = b'\x00['
+STR_CAPS['kf21'] = b'\x00\\'
+STR_CAPS['kf22'] = b'\x00]'
+STR_CAPS['kf23'] = b'\xe0\x87'
+STR_CAPS['kf24'] = b'\xe0\x88'
+STR_CAPS['kf25'] = b'\x00^'
+STR_CAPS['kf26'] = b'\x00_'
+STR_CAPS['kf27'] = b'\x00`'
+STR_CAPS['kf28'] = b'\x00a'
+STR_CAPS['kf29'] = b'\x00b'
+STR_CAPS['kf30'] = b'\x00c'
+STR_CAPS['kf31'] = b'\x00d'
+STR_CAPS['kf32'] = b'\x00e'
+STR_CAPS['kf33'] = b'\x00f'
+STR_CAPS['kf34'] = b'\x00g'
+STR_CAPS['kf35'] = b'\xe0\x89'
+STR_CAPS['kf36'] = b'\xe0\x8a'
+# Missing F37 - F48
+STR_CAPS['kf49'] = b'\x00h'
+STR_CAPS['kf50'] = b'\x00i'
+STR_CAPS['kf51'] = b'\x00j'
+STR_CAPS['kf52'] = b'\x00k'
+STR_CAPS['kf53'] = b'\x00l'
+STR_CAPS['kf54'] = b'\x00m'
+STR_CAPS['kf55'] = b'\x00n'
+STR_CAPS['kf56'] = b'\x00o'
+STR_CAPS['kf57'] = b'\x00p'
+STR_CAPS['kf58'] = b'\x00q'
+STR_CAPS['kf59'] = b'\xe0\x8b'
+STR_CAPS['kf60'] = b'\xe0\x8b'
+# Missing F61 - F63
+STR_CAPS['khome'] = b'\xe0G'
+STR_CAPS['kich1'] = b'\xe0R'
+STR_CAPS['knp'] = b'\xe0Q'
+STR_CAPS['kpp'] = b'\xe0I'
+STR_CAPS['rs1'] = b'\x1bc\x1b]104ST'
+STR_CAPS['rs2'] = b'\x1b[!p'
+STR_CAPS['sgr'] = b'\x1b[%p1%d%?%p2%t;%p2%d%;%?%p3%t;%p3%d%;%?%p4%t;%p4%d%;%?%p5%t;%p5%d%;' \
+ b'%?%p6%t;%p6%d%;%?%p7%t;%p7%d%;%?%p8%t;%p8%d%;%?%p9%t;%p9%d%;m'
+
+# Need info - Left in, but unsure
+# acsc (covers some, but maybe not all)
+# mc0/mc4/mc5 (print screen/off/on)
diff --git a/third_party/python/jinxed/jinxed/terminfo/vtwin10.py b/third_party/python/jinxed/jinxed/terminfo/vtwin10.py
new file mode 100644
index 0000000000..6cfb0deb69
--- /dev/null
+++ b/third_party/python/jinxed/jinxed/terminfo/vtwin10.py
@@ -0,0 +1,68 @@
+"""
+Windows 10 virtual terminal codes
+
+Information sourced from:
+ https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences
+
+A best effort has been made, but not all information was available
+"""
+
+from .xterm_256color import BOOL_CAPS, NUM_CAPS, STR_CAPS
+
+BOOL_CAPS = BOOL_CAPS[:]
+NUM_CAPS = NUM_CAPS.copy()
+STR_CAPS = STR_CAPS.copy()
+
+# Added
+STR_CAPS['cht'] = b'\x1b[%p1%dI'
+STR_CAPS['cnl'] = b'\x1b[%p1%dE'
+STR_CAPS['cpl'] = b'\x1b[%p1%dF'
+STR_CAPS['hvp'] = b'\x1b[%i%p1%d;%p2%df' # Same as cup
+STR_CAPS['ka1'] = b'\x1bOH' # upper left of keypad
+STR_CAPS['ka3'] = b'\x1b[5~' # upper right of keypad
+STR_CAPS['setb'] = b'\x1b[48;5;%p1%dm'
+STR_CAPS['setf'] = b'\x1b[38;5;%p1%dm'
+
+# Removed - These do not appear to be supported
+del STR_CAPS['blink']
+del STR_CAPS['dim']
+del STR_CAPS['flash']
+del STR_CAPS['invis']
+del STR_CAPS['kmous']
+del STR_CAPS['meml']
+del STR_CAPS['memu']
+del STR_CAPS['ritm']
+del STR_CAPS['rmam']
+del STR_CAPS['rmir']
+del STR_CAPS['rmm']
+del STR_CAPS['sitm']
+del STR_CAPS['smam']
+del STR_CAPS['smir']
+del STR_CAPS['smm']
+
+# Modified
+NUM_CAPS['colors'] = 256
+NUM_CAPS['cols'] = 120
+NUM_CAPS['lines'] = 30
+NUM_CAPS['pairs'] = 65536
+STR_CAPS['cbt'] = b'\x1b[%p1%dZ'
+STR_CAPS['csr'] = b'\x1b[%p1%{1}%+%d;%?%p2%t%p2%{1}%+%dr'
+STR_CAPS['cub1'] = b'\x1b[D'
+STR_CAPS['cud1'] = b'\x1b[B'
+STR_CAPS['cvvis'] = b'\x1b[?25h'
+STR_CAPS['initc'] = b'\x1b]4;%p1%d;rgb] =%p2%d/%p3%d/%p4%d\x1b'
+STR_CAPS['is2'] = b'\x1b[!p\x1b>'
+STR_CAPS['kbs'] = b'\x7f'
+STR_CAPS['kc1'] = b'\x1bOF' # lower left of keypad
+STR_CAPS['kc3'] = b'\x1b[6~' # lower right of keypad
+STR_CAPS['kent'] = b'\r'
+STR_CAPS['rmcup'] = b'\x1b[?1049l'
+STR_CAPS['rs2'] = b'\x1b[!p\x1b>' # DECSTR
+STR_CAPS['sgr'] = b'\x1b[%p1%d%?%p2%t;%p2%d%;%?%p3%t;%p3%d%;%?%p4%t;%p4%d%;%?%p5%t;%p5%d%;' \
+ b'%?%p6%t;%p6%d%;%?%p7%t;%p7%d%;%?%p8%t;%p8%d%;%?%p9%t;%p9%d%;m'
+STR_CAPS['smcup'] = b'\x1b[?1049h'
+STR_CAPS['u9'] = b'\x1b[0c'
+
+# Need info - Left in, but unsure
+# acsc (covers some, but maybe not all)
+# mc0/mc4/mc5 (print screen/off/on)
diff --git a/third_party/python/jinxed/jinxed/terminfo/xterm.py b/third_party/python/jinxed/jinxed/terminfo/xterm.py
new file mode 100644
index 0000000000..1a43c5184e
--- /dev/null
+++ b/third_party/python/jinxed/jinxed/terminfo/xterm.py
@@ -0,0 +1,482 @@
+"""
+xterm terminal info
+
+Since most of the Windows virtual processing schemes are based on xterm
+This file is intended to be sourced and includes the man page descriptions
+
+Most of this information came from the terminfo man pages, part of ncurses
+More information on ncurses can be found at:
+https://www.gnu.org/software/ncurses/ncurses.html
+
+The values are as reported by infocmp on Fedora 30 with ncurses 6.1
+"""
+
+# pylint: disable=wrong-spelling-in-comment,line-too-long
+# flake8: noqa: E501
+
+BOOL_CAPS = [
+ 'am', # (auto_right_margin) terminal has automatic margins
+ 'bce', # (back_color_erase) screen erased with background color
+ # 'bw', # (auto_left_margin) cub1 wraps from column 0 to last column
+ # 'ccc', # (can_change) terminal can re-define existing colors
+ # 'chts', # (hard_cursor) cursor is hard to see
+ # 'cpix', # (cpi_changes_res) changing character pitch changes resolution
+ # 'crxm', # (cr_cancels_micro_mode) using cr turns off micro mode
+ # 'daisy', # (has_print_wheel) printer needs operator to change character set
+ # 'da', # (memory_above) display may be retained above the screen
+ # 'db', # (memory_below) display may be retained below the screen
+ # 'eo', # (erase_overstrike) can erase overstrikes with a blank
+ # 'eslok', # (status_line_esc_ok) escape can be used on the status line
+ # 'gn', # (generic_type) generic line type
+ # 'hc', # (hard_copy) hardcopy terminal
+ # 'hls', # (hue_lightness_saturation) terminal uses only HLS color notation (Tektronix)
+ # 'hs', # (has_status_line) has extra status line
+ # 'hz', # (tilde_glitch) cannot print ~'s (Hazeltine)
+ # 'in', # (insert_null_glitch) insert mode distinguishes nulls
+ 'km', # (has_meta_key) Has a meta key (i.e., sets 8th-bit)
+ # 'lpix', # (lpi_changes_res) changing line pitch changes resolution
+ 'mc5i', # (prtr_silent) printer will not echo on screen
+ 'mir', # (move_insert_mode) safe to move while in insert mode
+ 'msgr', # (move_standout_mode) safe to move while in standout mode
+ # 'ndscr', # (non_dest_scroll_region) scrolling region is non-destructive
+ 'npc', # (no_pad_char) pad character does not exist
+ # 'nrrmc', # (non_rev_rmcup) smcup does not reverse rmcup
+ # 'nxon', # (needs_xon_xoff) padding will not work, xon/xoff required
+ # 'os', # (over_strike) terminal can overstrike
+ # 'sam', # (semi_auto_right_margin) printing in last column causes cr
+ # 'ul', # (transparent_underline) underline character overstrikes
+ 'xenl', # (eat_newline_glitch) newline ignored after 80 cols (concept)
+ # 'xhpa', # (col_addr_glitch) only positive motion for hpa/mhpa caps
+ # 'xhp', # (ceol_standout_glitch) standout not erased by overwriting (hp)
+ # 'xon', # (xon_xoff) terminal uses xon/xoff handshaking
+ # 'xsb', # (no_esc_ctlc) beehive (f1=escape, f2=ctrl C)
+ # 'xt', # (dest_tabs_magic_smso) tabs destructive, magic so char (t1061)
+ # 'xvpa', # (row_addr_glitch) only positive motion for vpa/mvpa caps
+]
+
+NUM_CAPS = {
+ # 'bitwin': 0, # (bit_image_entwining) number of passes for each bit-image row
+ # 'bitype': 0, # (bit_image_type) type of bit-image device
+ # 'btns': 0, # (buttons) number of buttons on mouse
+ # 'bufsz': 0, # (buffer_capacity) numbers of bytes buffered before printing
+ 'colors': 8, # (max_colors) maximum number of colors on screen
+ 'cols': 80, # (columns) number of columns in a line
+ # 'cps': 0, # (print_rate) print rate in characters per second
+ 'it': 8, # (init_tabs) tabs initially every # spaces
+ # 'lh': 0, # (label_height) rows in each label
+ 'lines': 24, # (lines) number of lines on screen or page
+ # 'lm': 0, # (lines_of_memory) lines of memory if > line. 0 means varies
+ # 'lw': 0, # (label_width) columns in each label
+ # 'ma': 0, # (max_attributes) maximum combined attributes terminal can handle
+ # 'maddr': 0, # (max_micro_address) maximum value in micro_..._address
+ # 'mcs': 0, # (micro_col_size) character step size when in micro mode
+ # 'mjump': 0, # (max_micro_jump) maximum value in parm_..._micro
+ # 'mls': 0, # (micro_line_size) line step size when in micro mode
+ # 'ncv': 0, # (no_color_video) video attributes that cannot be used with colors
+ # 'nlab': 0, # (num_labels) number of labels on screen
+ # 'npins': 0, # (number_of_pins) numbers of pins in print-head
+ # 'orc': 0, # (output_res_char) horizontal resolution in units per line
+ # 'orhi': 0, # (output_res_horz_inch) horizontal resolution in units per inch
+ # 'orl': 0, # (output_res_line) vertical resolution in units per line
+ # 'orvi': 0, # (output_res_vert_inch) vertical resolution in units per inch
+ 'pairs': 64, # (max_pairs) maximum number of color-pairs on the screen
+ # 'pb': 0, # (padding_baud_rate) lowest baud rate where padding needed
+ # 'spinh': 0, # (dot_horz_spacing) spacing of dots horizontally in dots per inch
+ # 'spinv': 0, # (dot_vert_spacing) spacing of pins vertically in pins per inch
+ # 'vt': 0, # (virtual_terminal) virtual terminal number (CB/unix)
+ # 'widcs': 0, # (wide_char_size) character step size when in double wide mode
+ # 'wnum': 0, # (maximum_windows) maximum number of definable windows
+ # 'wsl': 0, # (width_status_line) number of columns in status line
+ # 'xmc': 0, # (magic_cookie_glitch) number of blank characters left by smso or rmso
+}
+
+STR_CAPS = {
+ 'acsc': b'``aaffggiijjkkllmmnnooppqqrrssttuuvvwwxxyyzz{{||}}~~', # (acs_chars) graphics charset pairs, based on vt100
+ 'bel': b'^G', # (bell) audible signal (bell) (P)
+ # 'bicr': b'', # (bit_image_carriage_return) Move to beginning of same row
+ # 'binel': b'', # (bit_image_newline) Move to next row of the bit image
+ # 'birep': b'', # (bit_image_repeat) Repeat bit image cell #1 #2 times
+ 'blink': b'\x1b[5m', # (enter_blink_mode) turn on blinking
+ 'bold': b'\x1b[1m', # (enter_bold_mode) turn on bold (extra bright) mode
+ 'cbt': b'\x1b[Z', # (back_tab) back tab (P)
+ # 'chr': b'', # (change_res_horz) Change horizontal resolution to #1
+ 'civis': b'\x1b[?25l', # (cursor_invisible) make cursor invisible
+ 'clear': b'\x1b[H\x1b[2J', # (clear_screen) clear screen and home cursor (P*)
+ # 'cmdch': b'', # (command_character) terminal settable cmd character in prototype !?
+ 'cnorm': b'\x1b[?12l\x1b[?25h', # (cursor_normal) make cursor appear normal (undo civis/cvvis)
+ # 'colornm': b'', # (color_names) Give name for color #1
+ # 'cpi': b'', # (change_char_pitch) Change number of characters per inch to #1
+ 'cr': b'\r', # (carriage_return) carriage return (P*) (P*)
+ # 'csin': b'', # (code_set_init) Init sequence for multiple codesets
+ # 'csnm': b'', # (char_set_names) Produce #1'th item from list of character set names
+ 'csr': b'\x1b[%i%p1%d;%p2%dr', # (change_scroll_region) change region to line #1 to line #2 (P)
+ 'cub1': b'^H', # (cursor_left) move left one space
+ 'cub': b'\x1b[%p1%dD', # (parm_left_cursor) move #1 characters to the left (P)
+ 'cud1': b'\n', # (cursor_down) down one line
+ 'cud': b'\x1b[%p1%dB', # (parm_down_cursor) down #1 lines (P*)
+ 'cuf1': b'\x1b[C', # (cursor_right) non-destructive space (move right one space)
+ 'cuf': b'\x1b[%p1%dC', # (parm_right_cursor) move #1 characters to the right (P*)
+ 'cup': b'\x1b[%i%p1%d;%p2%dH', # (cursor_address) move to row #1 columns #2
+ 'cuu1': b'\x1b[A', # (cursor_up) up one line
+ 'cuu': b'\x1b[%p1%dA', # (parm_up_cursor) up #1 lines (P*)
+ # 'cvr': b'', # (change_res_vert) Change vertical resolution to #1
+ 'cvvis': b'\x1b[?12;25h', # (cursor_visible) make cursor very visible
+ # 'cwin': b'', # (create_window) define a window #1 from #2,#3 to #4,#5
+ 'dch1': b'\x1b[P', # (delete_character) delete character (P*)
+ 'dch': b'\x1b[%p1%dP', # (parm_dch) delete #1 characters (P*)
+ # 'dclk': b'', # (display_clock) display clock
+ # 'defbi': b'', # (define_bit_image_region) Define rectangular bit image region
+ # 'defc': b'', # (define_char) Define a character #1, #2 dots wide, descender #3
+ # 'devt': b'', # (device_type) Indicate language/codeset support
+ # 'dial': b'', # (dial_phone) dial number #1
+ 'dim': b'\x1b[2m', # (enter_dim_mode) turn on half-bright mode
+ # 'dispc': b'', # (display_pc_char) Display PC character #1
+ 'dl1': b'\x1b[M', # (delete_line) delete line (P*)
+ 'dl': b'\x1b[%p1%dM', # (parm_delete_line) delete #1 lines (P*)
+ # 'docr': b'', # (these_cause_cr) Printing any of these characters causes CR
+ # 'dsl': b'', # (dis_status_line) disable status line
+ 'ech': b'\x1b[%p1%dX', # (erase_chars) erase #1 characters (P)
+ 'ed': b'\x1b[J', # (clr_eos) clear to end of screen (P*)
+ 'el1': b'\x1b[1K', # (clr_bol) Clear to beginning of line
+ 'el': b'\x1b[K', # (clr_eol) clear to end of line (P)
+ # 'enacs': b'', # (ena_acs) enable alternate char set
+ # 'endbi': b'', # (end_bit_image_region) End a bit-image region
+ # 'ff': b'', # (form_feed) hardcopy terminal page eject (P*)
+ 'flash': b'\x1b[?5h$<100/>\x1b[?5l', # (flash_screen) visible bell (may not move cursor)
+ # 'fln': b'', # (label_format) label format
+ # 'fsl': b'', # (from_status_line) return from status line
+ # 'getm': b'', # (get_mouse) Curses should get button events, parameter #1 not documented.
+ # 'hd': b'', # (down_half_line) half a line down
+ 'home': b'\x1b[H', # (cursor_home) home cursor (if no cup)
+ # 'hook': b'', # (flash_hook) flash switch hook
+ 'hpa': b'\x1b[%i%p1%dG', # (column_address) horizontal position #1, absolute (P)
+ 'ht': b'^I', # (tab) tab to next 8-space hardware tab stop
+ 'hts': b'\x1bH', # (set_tab) set a tab in every row, current columns
+ # 'hu': b'', # (up_half_line) half a line up
+ # 'hup': b'', # (hangup) hang-up phone
+ # 'ich1': b'', # (insert_character) insert character (P)
+ 'ich': b'\x1b[%p1%d@', # (parm_ich) insert #1 characters (P*)
+ # 'if': b'', # (init_file) name of initialization file
+ 'il1': b'\x1b[L', # (insert_line) insert line (P*)
+ 'il': b'\x1b[%p1%dL', # (parm_insert_line) insert #1 lines (P*)
+ 'ind': b'\n', # (scroll_forward) scroll text up (P)
+ 'indn': b'\x1b[%p1%dS', # (parm_index) scroll forward #1 lines (P)
+ # 'initc': b'', # (initialize_color) initialize color #1 to (#2,#3,#4)
+ # 'initp': b'', # (initialize_pair) Initialize color pair #1 to fg=(#2,#3,#4), bg=(#5,#6,#7)
+ 'invis': b'\x1b[8m', # (enter_secure_mode) turn on blank mode (characters invisible)
+ # 'ip': b'', # (insert_padding) insert padding after inserted character
+ # 'iprog': b'', # (init_prog) path name of program for initialization
+ # 'is1': b'', # (init_1string) initialization string
+ 'is2': b'\x1b[!p\x1b[?3;4l\x1b[4l\x1b>', # (init_2string) initialization string
+ # 'is3': b'', # (init_3string) initialization string
+ # 'ka1': b'', # (key_a1) upper left of keypad
+ # 'ka3': b'', # (key_a3) upper right of keypad
+ 'kb2': b'\x1bOE', # (key_b2) center of keypad
+ # 'kbeg': b'', # (key_beg) begin key
+ # 'kBEG': b'', # (key_sbeg) shifted begin key
+ 'kbs': b'^?', # (key_backspace) backspace key
+ # 'kc1': b'', # (key_c1) lower left of keypad
+ # 'kc3': b'', # (key_c3) lower right of keypad
+ # 'kcan': b'', # (key_cancel) cancel key
+ # 'kCAN': b'', # (key_scancel) shifted cancel key
+ 'kcbt': b'\x1b[Z', # (key_btab) back-tab key
+ # 'kclo': b'', # (key_close) close key
+ # 'kclr': b'', # (key_clear) clear-screen or erase key
+ # 'kcmd': b'', # (key_command) command key
+ # 'kCMD': b'', # (key_scommand) shifted command key
+ # 'kcpy': b'', # (key_copy) copy key
+ # 'kCPY': b'', # (key_scopy) shifted copy key
+ # 'kcrt': b'', # (key_create) create key
+ # 'kCRT': b'', # (key_screate) shifted create key
+ # 'kctab': b'', # (key_ctab) clear-tab key
+ 'kcub1': b'\x1bOD', # (key_left) left-arrow key
+ 'kcud1': b'\x1bOB', # (key_down) down-arrow key
+ 'kcuf1': b'\x1bOC', # (key_right) right-arrow key
+ 'kcuu1': b'\x1bOA', # (key_up) up-arrow key
+ 'kDC': b'\x1b[3;2~', # (key_sdc) shifted delete- character key
+ 'kdch1': b'\x1b[3~', # (key_dc) delete-character key
+ # 'kdl1': b'', # (key_dl) delete-line key
+ # 'kDL': b'', # (key_sdl) shifted delete-line key
+ # 'ked': b'', # (key_eos) clear-to-end-of- screen key
+ # 'kel': b'', # (key_eol) clear-to-end-of-line key
+ 'kEND': b'\x1b[1;2F', # (key_send) shifted end key
+ 'kend': b'\x1bOF', # (key_end) end key
+ 'kent': b'\x1bOM', # (key_enter) enter/send key
+ # 'kEOL': b'', # (key_seol) shifted clear-to- end-of-line key
+ # 'kext': b'', # (key_exit) exit key
+ # 'kEXT': b'', # (key_sexit) shifted exit key
+ # 'kf0': b'', # (key_f0) F0 function key
+ 'kf1': b'\x1bOP', # (key_f1) F1 function key
+ 'kf2': b'\x1bOQ', # (key_f2) F2 function key
+ 'kf3': b'\x1bOR', # (key_f3) F3 function key
+ 'kf4': b'\x1bOS', # (key_f4) F4 function key
+ 'kf5': b'\x1b[15~', # (key_f5) F5 function key
+ 'kf6': b'\x1b[17~', # (key_f6) F6 function key
+ 'kf7': b'\x1b[18~', # (key_f7) F7 function key
+ 'kf8': b'\x1b[19~', # (key_f8) F8 function key
+ 'kf9': b'\x1b[20~', # (key_f9) F9 function key
+ 'kf10': b'\x1b[21~', # (key_f10) F10 function key
+ 'kf11': b'\x1b[23~', # (key_f11) F11 function key
+ 'kf12': b'\x1b[24~', # (key_f12) F12 function key
+ 'kf13': b'\x1b[1;2P', # (key_f13) F13 function key
+ 'kf14': b'\x1b[1;2Q', # (key_f14) F14 function key
+ 'kf15': b'\x1b[1;2R', # (key_f15) F15 function key
+ 'kf16': b'\x1b[1;2S', # (key_f16) F16 function key
+ 'kf17': b'\x1b[15;2~', # (key_f17) F17 function key
+ 'kf18': b'\x1b[17;2~', # (key_f18) F18 function key
+ 'kf19': b'\x1b[18;2~', # (key_f19) F19 function key
+ 'kf20': b'\x1b[19;2~', # (key_f20) F20 function key
+ 'kf21': b'\x1b[20;2~', # (key_f21) F21 function key
+ 'kf22': b'\x1b[21;2~', # (key_f22) F22 function key
+ 'kf23': b'\x1b[23;2~', # (key_f23) F23 function key
+ 'kf24': b'\x1b[24;2~', # (key_f24) F24 function key
+ 'kf25': b'\x1b[1;5P', # (key_f25) F25 function key
+ 'kf26': b'\x1b[1;5Q', # (key_f26) F26 function key
+ 'kf27': b'\x1b[1;5R', # (key_f27) F27 function key
+ 'kf28': b'\x1b[1;5S', # (key_f28) F28 function key
+ 'kf29': b'\x1b[15;5~', # (key_f29) F29 function key
+ 'kf30': b'\x1b[17;5~', # (key_f30) F30 function key
+ 'kf31': b'\x1b[18;5~', # (key_f31) F31 function key
+ 'kf32': b'\x1b[19;5~', # (key_f32) F32 function key
+ 'kf33': b'\x1b[20;5~', # (key_f33) F33 function key
+ 'kf34': b'\x1b[21;5~', # (key_f34) F34 function key
+ 'kf35': b'\x1b[23;5~', # (key_f35) F35 function key
+ 'kf36': b'\x1b[24;5~', # (key_f36) F36 function key
+ 'kf37': b'\x1b[1;6P', # (key_f37) F37 function key
+ 'kf38': b'\x1b[1;6Q', # (key_f38) F38 function key
+ 'kf39': b'\x1b[1;6R', # (key_f39) F39 function key
+ 'kf40': b'\x1b[1;6S', # (key_f40) F40 function key
+ 'kf41': b'\x1b[15;6~', # (key_f41) F41 function key
+ 'kf42': b'\x1b[17;6~', # (key_f42) F42 function key
+ 'kf43': b'\x1b[18;6~', # (key_f43) F43 function key
+ 'kf44': b'\x1b[19;6~', # (key_f44) F44 function key
+ 'kf45': b'\x1b[20;6~', # (key_f45) F45 function key
+ 'kf46': b'\x1b[21;6~', # (key_f46) F46 function key
+ 'kf47': b'\x1b[23;6~', # (key_f47) F47 function key
+ 'kf48': b'\x1b[24;6~', # (key_f48) F48 function key
+ 'kf49': b'\x1b[1;3P', # (key_f49) F49 function key
+ 'kf50': b'\x1b[1;3Q', # (key_f50) F50 function key
+ 'kf51': b'\x1b[1;3R', # (key_f51) F51 function key
+ 'kf52': b'\x1b[1;3S', # (key_f52) F52 function key
+ 'kf53': b'\x1b[15;3~', # (key_f53) F53 function key
+ 'kf54': b'\x1b[17;3~', # (key_f54) F54 function key
+ 'kf55': b'\x1b[18;3~', # (key_f55) F55 function key
+ 'kf56': b'\x1b[19;3~', # (key_f56) F56 function key
+ 'kf57': b'\x1b[20;3~', # (key_f57) F57 function key
+ 'kf58': b'\x1b[21;3~', # (key_f58) F58 function key
+ 'kf59': b'\x1b[23;3~', # (key_f59) F59 function key
+ 'kf60': b'\x1b[24;3~', # (key_f60) F60 function key
+ 'kf61': b'\x1b[1;4P', # (key_f61) F61 function key
+ 'kf62': b'\x1b[1;4Q', # (key_f62) F62 function key
+ 'kf63': b'\x1b[1;4R', # (key_f63) F63 function key
+ # 'kfnd': b'', # (key_find) find key
+ # 'kFND': b'', # (key_sfind) shifted find key
+ # 'khlp': b'', # (key_help) help key
+ # 'kHLP': b'', # (key_shelp) shifted help key
+ 'kHOM': b'\x1b[1;2H', # (key_shome) shifted home key
+ 'khome': b'\x1bOH', # (key_home) home key
+ # 'khts': b'', # (key_stab) set-tab key
+ 'kIC': b'\x1b[2;2~', # (key_sic) shifted insert- character key
+ 'kich1': b'\x1b[2~', # (key_ic) insert-character key
+ # 'kil1': b'', # (key_il) insert-line key
+ 'kind': b'\x1b[1;2B', # (key_sf) scroll-forward key
+ 'kLFT': b'\x1b[1;2D', # (key_sleft) shifted left-arrow key
+ # 'kll': b'', # (key_ll) lower-left key (home down)
+ 'kmous': b'\x1b[<', # (key_mouse) Mouse event has occurred
+ # 'kmov': b'', # (key_move) move key
+ # 'kMOV': b'', # (key_smove) shifted move key
+ # 'kmrk': b'', # (key_mark) mark key
+ # 'kmsg': b'', # (key_message) message key
+ # 'kMSG': b'', # (key_smessage) shifted message key
+ 'knp': b'\x1b[6~', # (key_npage) next-page key
+ # 'knxt': b'', # (key_next) next key
+ 'kNXT': b'\x1b[6;2~', # (key_snext) shifted next key
+ # 'kopn': b'', # (key_open) open key
+ # 'kopt': b'', # (key_options) options key
+ # 'kOPT': b'', # (key_soptions) shifted options key
+ 'kpp': b'\x1b[5~', # (key_ppage) previous-page key
+ # 'kprt': b'', # (key_print) print key
+ # 'kPRT': b'', # (key_sprint) shifted print key
+ # 'kprv': b'', # (key_previous) previous key
+ 'kPRV': b'\x1b[5;2~', # (key_sprevious) shifted previous key
+ # 'krdo': b'', # (key_redo) redo key
+ # 'kRDO': b'', # (key_sredo) shifted redo key
+ # 'kref': b'', # (key_reference) reference key
+ # 'kres': b'', # (key_resume) resume key
+ # 'kRES': b'', # (key_srsume) shifted resume key
+ # 'krfr': b'', # (key_refresh) refresh key
+ 'kri': b'\x1b[1;2A', # (key_sr) scroll-backward key
+ 'kRIT': b'\x1b[1;2C', # (key_sright) shifted right-arrow key
+ # 'krmir': b'', # (key_eic) sent by rmir or smir in insert mode
+ # 'krpl': b'', # (key_replace) replace key
+ # 'kRPL': b'', # (key_sreplace) shifted replace key
+ # 'krst': b'', # (key_restart) restart key
+ # 'ksav': b'', # (key_save) save key
+ # 'kSAV': b'', # (key_ssave) shifted save key
+ # 'kslt': b'', # (key_select) select key
+ # 'kSPD': b'', # (key_ssuspend) shifted suspend key
+ # 'kspd': b'', # (key_suspend) suspend key
+ # 'ktbc': b'', # (key_catab) clear-all-tabs key
+ # 'kUND': b'', # (key_sundo) shifted undo key
+ # 'kund': b'', # (key_undo) undo key
+ # 'lf0': b'', # (lab_f0) label on function key f0 if not f0
+ # 'lf10': b'', # (lab_f10) label on function key f10 if not f10
+ # 'lf1': b'', # (lab_f1) label on function key f1 if not f1
+ # 'lf2': b'', # (lab_f2) label on function key f2 if not f2
+ # 'lf3': b'', # (lab_f3) label on function key f3 if not f3
+ # 'lf4': b'', # (lab_f4) label on function key f4 if not f4
+ # 'lf5': b'', # (lab_f5) label on function key f5 if not f5
+ # 'lf6': b'', # (lab_f6) label on function key f6 if not f6
+ # 'lf7': b'', # (lab_f7) label on function key f7 if not f7
+ # 'lf8': b'', # (lab_f8) label on function key f8 if not f8
+ # 'lf9': b'', # (lab_f9) label on function key f9 if not f9
+ # 'll': b'', # (cursor_to_ll) last line, first column (if no cup)
+ # 'lpi': b'', # (change_line_pitch) Change number of lines per inch to #1
+ 'meml': b'\x1bl', # lock memory above the curser
+ 'memu': b'\x1bl', # unlock memory above the curser
+ 'mc0': b'\x1b[i', # (print_screen) print contents of screen
+ 'mc4': b'\x1b[4i', # (prtr_off) turn off printer
+ 'mc5': b'\x1b[5i', # (prtr_on) turn on printer
+ # 'mc5p': b'', # (prtr_non) turn on printer for #1 bytes
+ # 'mcub1': b'', # (micro_left) Like cursor_left in micro mode
+ # 'mcub': b'', # (parm_left_micro) Like parm_left_cursor in micro mode
+ # 'mcud1': b'', # (micro_down) Like cursor_down in micro mode
+ # 'mcud': b'', # (parm_down_micro) Like parm_down_cursor in micro mode
+ # 'mcuf1': b'', # (micro_right) Like cursor_right in micro mode
+ # 'mcuf': b'', # (parm_right_micro) Like parm_right_cursor in micro mode
+ # 'mcuu1': b'', # (micro_up) Like cursor_up in micro mode
+ # 'mcuu': b'', # (parm_up_micro) Like parm_up_cursor in micro mode
+ # 'mgc': b'', # (clear_margins) clear right and left soft margins
+ # 'mhpa': b'', # (micro_column_address) Like column_address in micro mode
+ # 'minfo': b'', # (mouse_info) Mouse status information
+ # 'mrcup': b'', # (cursor_mem_address) memory relative cursor addressing, move to row #1 columns #2
+ # 'mvpa': b'', # (micro_row_address) Like row_address #1 in micro mode
+ # 'nel': b'', # (newline) newline (behave like cr followed by lf)
+ # 'oc': b'', # (orig_colors) Set all color pairs to the original ones
+ 'op': b'\x1b[39;49m', # (orig_pair) Set default pair to its original value
+ # 'pad': b'', # (pad_char) padding char (instead of null)
+ # 'pause': b'', # (fixed_pause) pause for 2-3 seconds
+ # 'pctrm': b'', # (pc_term_options) PC terminal options
+ # 'pfkey': b'', # (pkey_key) program function key #1 to type string #2
+ # 'pfloc': b'', # (pkey_local) program function key #1 to execute string #2
+ # 'pfx': b'', # (pkey_xmit) program function key #1 to transmit string #2
+ # 'pfxl': b'', # (pkey_plab) Program function key #1 to type string #2 and show string #3
+ # 'pln': b'', # (plab_norm) program label #1 to show string #2
+ # 'porder': b'', # (order_of_pins) Match software bits to print-head pins
+ # 'prot': b'', # (enter_protected_mode) turn on protected mode
+ # 'pulse': b'', # (pulse) select pulse dialing
+ # 'qdial': b'', # (quick_dial) dial number #1 without checking
+ # 'rbim': b'', # (stop_bit_image) Stop printing bit image graphics
+ 'rc': b'\x1b8', # (restore_cursor) restore cursor to position of last save_cursor
+ # 'rcsd': b'', # (stop_char_set_def) End definition of character set #1
+ 'rep': b'%p1%c\x1b[%p2%{1}%-%db', # (repeat_char) repeat char #1 #2 times (P*)
+ # 'reqmp': b'', # (req_mouse_pos) Request mouse position
+ 'rev': b'\x1b[7m', # (enter_reverse_mode) turn on reverse video mode
+ # 'rf': b'', # (reset_file) name of reset file
+ # 'rfi': b'', # (req_for_input) send next input char (for ptys)
+ 'ri': b'\x1bM', # (scroll_reverse) scroll text down (P)
+ 'rin': b'\x1b[%p1%dT', # (parm_rindex) scroll back #1 lines (P)
+ 'ritm': b'\x1b[23m', # (exit_italics_mode) End italic mode
+ # 'rlm': b'', # (exit_leftward_mode) End left-motion mode
+ 'rmacs': b'\x1b(B', # (exit_alt_charset_mode) end alternate character set (P)
+ 'rmam': b'\x1b[?7l', # (exit_am_mode) turn off automatic margins
+ # 'rmclk': b'', # (remove_clock) remove clock
+ 'rmcup': b'\x1b[?1049l\x1b[23;0;0t', # (exit_ca_mode) strings to end programs using cup
+ # 'rmdc': b'', # (exit_delete_mode) end delete mode
+ # 'rmicm': b'', # (exit_micro_mode) End micro-motion mode
+ 'rmir': b'\x1b[4l', # (exit_insert_mode) exit insert mode
+ 'rmkx': b'\x1b[?1l\x1b>', # (keypad_local) leave 'keyboard_transmit' mode
+ # 'rmln': b'', # (label_off) turn off soft labels
+ 'rmm': b'\x1b[?1034l', # (meta_off) turn off meta mode
+ # 'rmp': b'', # (char_padding) like ip but when in insert mode
+ # 'rmpch': b'', # (exit_pc_charset_mode) Exit PC character display mode
+ # 'rmsc': b'', # (exit_scancode_mode) Exit PC scancode mode
+ 'rmso': b'\x1b[27m', # (exit_standout_mode) exit standout mode
+ 'rmul': b'\x1b[24m', # (exit_underline_mode) exit underline mode
+ # 'rmxon': b'', # (exit_xon_mode) turn off xon/xoff handshaking
+ 'rs1': b'\x1bc', # (reset_1string) reset string
+ 'rs2': b'\x1b[!p\x1b[?3;4l\x1b[4l\x1b>', # (reset_2string) reset string
+ # 'rs3': b'', # (reset_3string) reset string
+ # 'rshm': b'', # (exit_shadow_mode) End shadow-print mode
+ # 'rsubm': b'', # (exit_subscript_mode) End subscript mode
+ # 'rsupm': b'', # (exit_superscript_mode) End superscript mode
+ # 'rum': b'', # (exit_upward_mode) End reverse character motion
+ # 'rwidm': b'', # (exit_doublewide_mode) End double-wide mode
+ # 's0ds': b'', # (set0_des_seq) Shift to codeset 0 (EUC set 0, ASCII)
+ # 's1ds': b'', # (set1_des_seq) Shift to codeset 1
+ # 's2ds': b'', # (set2_des_seq) Shift to codeset 2
+ # 's3ds': b'', # (set3_des_seq) Shift to codeset 3
+ # 'sbim': b'', # (start_bit_image) Start printing bit image graphics
+ 'sc': b'\x1b7', # (save_cursor) save current cursor position (P)
+ # 'scesa': b'', # (alt_scancode_esc) Alternate escape for scancode emulation
+ # 'scesc': b'', # (scancode_escape) Escape for scancode emulation
+ # 'sclk': b'', # (set_clock) set clock, #1 hrs #2 mins #3 secs
+ # 'scp': b'', # (set_color_pair) Set current color pair to #1
+ # 'scs': b'', # (select_char_set) Select character set, #1
+ # 'scsd': b'', # (start_char_set_def) Start character set definition #1, with #2 characters in the set
+ # 'sdrfq': b'', # (enter_draft_quality) Enter draft-quality mode
+ 'setab': b'\x1b[4%p1%dm', # (set_a_background) Set background color to #1, using ANSI escape
+ 'setaf': b'\x1b[3%p1%dm', # (set_a_foreground) Set foreground color to #1, using ANSI escape
+ 'setb': b'\x1b[4%?%p1%{1}%=%t4%e%p1%{3}%=%t6%e%p1%{4}%=%t1%e%p1%{6}%=%t3%e%p1%d%;m', # (set_background) Set background color #1
+ # 'setcolor': b'', # (set_color_band) Change to ribbon color #1
+ 'setf': b'\x1b[3%?%p1%{1}%=%t4%e%p1%{3}%=%t6%e%p1%{4}%=%t1%e%p1%{6}%=%t3%e%p1%d%;m', # (set_foreground) Set foreground color #1
+ 'sgr0': b'\x1b(B\x1b[m', # (exit_attribute_mode) turn off all attributes
+ 'sgr': b'%?%p9%t\x1b(0%e\x1b(B%;\x1b[0%?%p6%t;1%;%?%p5%t;2%;%?%p2%t;4%;%?%p1%p3%|%t;7%;%?%p4%t;5%;%?%p7%t;8%;m', # (set_attributes) define video attributes #1-#9 (PG9)
+ 'sitm': b'\x1b[3m', # (enter_italics_mode) Enter italic mode
+ # 'slines': b'', # (set_page_length) Set page length to #1 lines
+ # 'slm': b'', # (enter_leftward_mode) Start leftward carriage motion
+ 'smacs': b'\x1b(0', # (enter_alt_charset_mode) start alternate character set (P)
+ 'smam': b'\x1b[?7h', # (enter_am_mode) turn on automatic margins
+ 'smcup': b'\x1b[?1049h\x1b[22;0;0t', # (enter_ca_mode) string to start programs using cup
+ # 'smdc': b'', # (enter_delete_mode) enter delete mode
+ # 'smgb': b'', # (set_bottom_margin) Set bottom margin at current line
+ # 'smgbp': b'', # (set_bottom_margin_parm) Set bottom margin at line #1 or (if smgtp is not given) #2 lines from bottom
+ # 'smgl': b'', # (set_left_margin) set left soft margin at current column. See smgl. (ML is not in BSD termcap).
+ # 'smglp': b'', # (set_left_margin_parm) Set left (right) margin at column #1
+ # 'smglr': b'', # (set_lr_margin) Set both left and right margins to #1, #2. (ML is not in BSD termcap).
+ # 'smgr': b'', # (set_right_margin) set right soft margin at current column
+ # 'smgrp': b'', # (set_right_margin_parm) Set right margin at column #1
+ # 'smgtb': b'', # (set_tb_margin) Sets both top and bottom margins to #1, #2
+ # 'smgt': b'', # (set_top_margin) Set top margin at current line
+ # 'smgtp': b'', # (set_top_margin_parm) Set top (bottom) margin at row #1
+ # 'smicm': b'', # (enter_micro_mode) Start micro-motion mode
+ 'smir': b'\x1b[4h', # (enter_insert_mode) enter insert mode
+ 'smkx': b'\x1b[?1h\x1b=', # (keypad_xmit) enter 'keyboard_transmit' mode
+ # 'smln': b'', # (label_on) turn on soft labels
+ 'smm': b'\x1b[?1034h', # (meta_on) turn on meta mode (8th-bit on)
+ # 'smpch': b'', # (enter_pc_charset_mode) Enter PC character display mode
+ # 'smsc': b'', # (enter_scancode_mode) Enter PC scancode mode
+ 'smso': b'\x1b[7m', # (enter_standout_mode) begin standout mode
+ 'smul': b'\x1b[4m', # (enter_underline_mode) begin underline mode
+ # 'smxon': b'', # (enter_xon_mode) turn on xon/xoff handshaking
+ # 'snlq': b'', # (enter_near_letter_quality) Enter NLQ mode
+ # 'snrmq': b'', # (enter_normal_quality) Enter normal-quality mode
+ # 'sshm': b'', # (enter_shadow_mode) Enter shadow-print mode
+ # 'ssubm': b'', # (enter_subscript_mode) Enter subscript mode
+ # 'ssupm': b'', # (enter_superscript_mode) Enter superscript mode
+ # 'subcs': b'', # (subscript_characters) List of subscriptable characters
+ # 'sum': b'', # (enter_upward_mode) Start upward carriage motion
+ # 'supcs': b'', # (superscript_characters) List of superscriptable characters
+ # 'swidm': b'', # (enter_doublewide_mode) Enter double-wide mode
+ 'tbc': b'\x1b[3g', # (clear_all_tabs) clear all tab stops (P)
+ # 'tone': b'', # (tone) select touch tone dialing
+ # 'tsl': b'', # (to_status_line) move to status line, column #1
+ # 'u0': b'', # (user0) User string #0
+ # 'u1': b'', # (user1) User string #1
+ # 'u2': b'', # (user2) User string #2
+ # 'u3': b'', # (user3) User string #3
+ # 'u4': b'', # (user4) User string #4
+ # 'u5': b'', # (user5) User string #5
+ 'u6': b'\x1b[%i%d;%dR', # (user6) User string #6 [cursor position report (equiv. to ANSI/ECMA-48 CPR)]
+ 'u7': b'\x1b[6n', # (user7) User string #7 [cursor position request (equiv. to VT100/ANSI/ECMA-48 DSR 6)]
+ 'u8': b'\x1b[?%[;0123456789]c', # (user8) User string #8 [terminal answerback description]
+ 'u9': b'\x1b[c', # (user9) User string #9 [terminal enquire string (equiv. to ANSI/ECMA-48 DA)]
+ # 'uc': b'', # (underline_char) underline char and move past it
+ 'vpa': b'\x1b[%i%p1%dd', # (row_address) vertical position #1 absolute (P)
+ # 'wait': b'', # (wait_tone) wait for dial-tone
+ # 'wind': b'', # (set_window) current window is lines #1-#2 cols #3-#4
+ # 'wingo': b'', # (goto_window) go to window #1
+ # 'xoffc': b'', # (xoff_character) XOFF character
+ # 'xonc': b'', # (xon_character) XON character
+ # 'zerom': b'', # (zero_motion) No motion for subsequent character
+}
diff --git a/third_party/python/jinxed/jinxed/terminfo/xterm_256color.py b/third_party/python/jinxed/jinxed/terminfo/xterm_256color.py
new file mode 100644
index 0000000000..6c7e2c16ac
--- /dev/null
+++ b/third_party/python/jinxed/jinxed/terminfo/xterm_256color.py
@@ -0,0 +1,28 @@
+"""
+xterm-256color terminal info
+
+The values are as reported by infocmp on Fedora 30 with ncurses 6.1
+"""
+
+from .xterm import BOOL_CAPS, NUM_CAPS, STR_CAPS
+
+BOOL_CAPS = BOOL_CAPS[:]
+NUM_CAPS = NUM_CAPS.copy()
+STR_CAPS = STR_CAPS.copy()
+
+# Added
+BOOL_CAPS.append('ccc')
+STR_CAPS['initc'] = b'\x1b]4;%p1%d;rgb\x5c:%p2%{255}%*%{1000}%/%2.2X/' \
+ b'%p3%{255}%*%{1000}%/%2.2X/%p4%{255}%*%{1000}%/%2.2X\x1b\x5c'
+STR_CAPS['oc'] = b'\x1b]104\007'
+
+# Removed
+del STR_CAPS['setb']
+del STR_CAPS['setf']
+
+# Modified
+NUM_CAPS['colors'] = 256
+NUM_CAPS['pairs'] = 65536
+STR_CAPS['rs1'] = b'\x1bc\x1b]104\007'
+STR_CAPS['setab'] = b'\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m'
+STR_CAPS['setaf'] = b'\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m'
diff --git a/third_party/python/jinxed/jinxed/terminfo/xterm_256colors.py b/third_party/python/jinxed/jinxed/terminfo/xterm_256colors.py
new file mode 100644
index 0000000000..1e005a1bd9
--- /dev/null
+++ b/third_party/python/jinxed/jinxed/terminfo/xterm_256colors.py
@@ -0,0 +1,28 @@
+"""
+xterm-256colors terminal info
+
+The values are as reported by infocmp on Fedora 30 with ncurses 6.1
+"""
+
+from .xterm import BOOL_CAPS, NUM_CAPS, STR_CAPS
+
+BOOL_CAPS = BOOL_CAPS[:]
+NUM_CAPS = NUM_CAPS.copy()
+STR_CAPS = STR_CAPS.copy()
+
+# Added
+BOOL_CAPS.append('ccc')
+STR_CAPS['initc'] = b'\x1b]4;%p1%d;rgb\x5c:%p2%{255}%*%{1000}%/%2.2X/' \
+ b'%p3%{255}%*%{1000}%/%2.2X/%p4%{255}%*%{1000}%/%2.2X\x1b\x5c'
+STR_CAPS['oc'] = b'\x1b]104\007'
+
+# Removed
+del STR_CAPS['setb']
+del STR_CAPS['setf']
+
+# Modified
+NUM_CAPS['colors'] = 256
+NUM_CAPS['pairs'] = 65536
+STR_CAPS['rs1'] = b'\x1bc\x1b]104\007'
+STR_CAPS['setab'] = b'\x1b[%?%p1%{8}%<%t4%p1%d%e%p1%{16}%<%t10%p1%{8}%-%d%e48;5;%p1%d%;m'
+STR_CAPS['setaf'] = b'\x1b[%?%p1%{8}%<%t3%p1%d%e%p1%{16}%<%t9%p1%{8}%-%d%e38;5;%p1%d%;m'
diff --git a/third_party/python/jinxed/jinxed/win32.py b/third_party/python/jinxed/jinxed/win32.py
new file mode 100644
index 0000000000..593a275a0a
--- /dev/null
+++ b/third_party/python/jinxed/jinxed/win32.py
@@ -0,0 +1,352 @@
+# -*- coding: utf-8 -*-
+# Copyright 2019 - 2021 Avram Lubkin, All Rights Reserved
+
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Support functions and wrappers for calls to the Windows API
+"""
+
+import atexit
+import codecs
+from collections import namedtuple
+import ctypes
+from ctypes import wintypes
+import io
+import msvcrt # pylint: disable=import-error
+import os
+import platform
+import sys
+
+from jinxed._util import mock, IS_WINDOWS
+
+# Workaround for auto-doc generation on Linux
+if not IS_WINDOWS:
+ ctypes = mock.Mock() # noqa: F811
+
+LPDWORD = ctypes.POINTER(wintypes.DWORD)
+COORD = wintypes._COORD # pylint: disable=protected-access
+
+# Console input modes
+ENABLE_ECHO_INPUT = 0x0004
+ENABLE_EXTENDED_FLAGS = 0x0080
+ENABLE_INSERT_MODE = 0x0020
+ENABLE_LINE_INPUT = 0x0002
+ENABLE_MOUSE_INPUT = 0x0010
+ENABLE_PROCESSED_INPUT = 0x0001
+ENABLE_QUICK_EDIT_MODE = 0x0040
+ENABLE_WINDOW_INPUT = 0x0008
+ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200
+
+# Console output modes
+ENABLE_PROCESSED_OUTPUT = 0x0001
+ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002
+ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
+DISABLE_NEWLINE_AUTO_RETURN = 0x0008
+ENABLE_LVB_GRID_WORLDWIDE = 0x0010
+
+if IS_WINDOWS and tuple(int(num) for num in platform.version().split('.')) >= (10, 0, 10586):
+ VTMODE_SUPPORTED = True
+ CBREAK_MODE = ENABLE_PROCESSED_INPUT | ENABLE_VIRTUAL_TERMINAL_INPUT
+ RAW_MODE = ENABLE_VIRTUAL_TERMINAL_INPUT
+else:
+ VTMODE_SUPPORTED = False
+ CBREAK_MODE = ENABLE_PROCESSED_INPUT
+ RAW_MODE = 0
+
+GTS_SUPPORTED = hasattr(os, 'get_terminal_size')
+TerminalSize = namedtuple('TerminalSize', ('columns', 'lines'))
+
+
+class ConsoleScreenBufferInfo(ctypes.Structure): # pylint: disable=too-few-public-methods
+ """
+ Python representation of CONSOLE_SCREEN_BUFFER_INFO structure
+ https://docs.microsoft.com/en-us/windows/console/console-screen-buffer-info-str
+ """
+
+ _fields_ = [('dwSize', COORD),
+ ('dwCursorPosition', COORD),
+ ('wAttributes', wintypes.WORD),
+ ('srWindow', wintypes.SMALL_RECT),
+ ('dwMaximumWindowSize', COORD)]
+
+
+CSBIP = ctypes.POINTER(ConsoleScreenBufferInfo)
+
+
+def _check_bool(result, func, args): # pylint: disable=unused-argument
+ """
+ Used as an error handler for Windows calls
+ Gets last error if call is not successful
+ """
+
+ if not result:
+ raise ctypes.WinError(ctypes.get_last_error())
+ return args
+
+
+KERNEL32 = ctypes.WinDLL('kernel32', use_last_error=True)
+
+KERNEL32.GetConsoleCP.errcheck = _check_bool
+KERNEL32.GetConsoleCP.argtypes = tuple()
+
+KERNEL32.GetConsoleMode.errcheck = _check_bool
+KERNEL32.GetConsoleMode.argtypes = (wintypes.HANDLE, LPDWORD)
+
+KERNEL32.SetConsoleMode.errcheck = _check_bool
+KERNEL32.SetConsoleMode.argtypes = (wintypes.HANDLE, wintypes.DWORD)
+
+KERNEL32.GetConsoleScreenBufferInfo.errcheck = _check_bool
+KERNEL32.GetConsoleScreenBufferInfo.argtypes = (wintypes.HANDLE, CSBIP)
+
+
+def get_csbi(filehandle=None):
+ """
+ Args:
+ filehandle(int): Windows filehandle object as returned by :py:func:`msvcrt.get_osfhandle`
+
+ Returns:
+ :py:class:`ConsoleScreenBufferInfo`: CONSOLE_SCREEN_BUFFER_INFO_ structure
+
+ Wrapper for GetConsoleScreenBufferInfo_
+
+ If ``filehandle`` is :py:data:`None`, uses the filehandle of :py:data:`sys.__stdout__`.
+
+ """
+
+ if filehandle is None:
+ filehandle = msvcrt.get_osfhandle(sys.__stdout__.fileno())
+
+ csbi = ConsoleScreenBufferInfo()
+ KERNEL32.GetConsoleScreenBufferInfo(filehandle, ctypes.byref(csbi))
+ return csbi
+
+
+def get_console_input_encoding():
+ """
+ Returns:
+ int: Current console mode
+
+ Query for the console input code page and provide an encoding
+
+ If the code page can not be resolved to a Python encoding, :py:data:`None` is returned.
+ """
+
+ try:
+ encoding = 'cp%d' % KERNEL32.GetConsoleCP()
+ except OSError:
+ return None
+
+ try:
+ codecs.lookup(encoding)
+ except LookupError:
+ return None
+
+ return encoding
+
+
+def get_console_mode(filehandle):
+ """
+ Args:
+ filehandle(int): Windows filehandle object as returned by :py:func:`msvcrt.get_osfhandle`
+
+ Returns:
+ int: Current console mode
+
+ Raises:
+ OSError: Error calling Windows API
+
+ Wrapper for GetConsoleMode_
+ """
+
+ mode = wintypes.DWORD()
+ KERNEL32.GetConsoleMode(filehandle, ctypes.byref(mode))
+ return mode.value
+
+
+def set_console_mode(filehandle, mode):
+ """
+ Args:
+ filehandle(int): Windows filehandle object as returned by :py:func:`msvcrt.get_osfhandle`
+ mode(int): Desired console mode
+
+ Raises:
+ OSError: Error calling Windows API
+
+ Wrapper for SetConsoleMode_
+ """
+
+ return bool(KERNEL32.SetConsoleMode(filehandle, mode))
+
+
+def setcbreak(filehandle):
+ """
+ Args:
+ filehandle(int): Windows filehandle object as returned by :py:func:`msvcrt.get_osfhandle`
+
+ Raises:
+ OSError: Error calling Windows API
+
+ Convenience function which mimics :py:func:`tty.setcbreak` behavior
+
+ All console input options are disabled except ``ENABLE_PROCESSED_INPUT``
+ and, if supported, ``ENABLE_VIRTUAL_TERMINAL_INPUT``
+ """
+
+ set_console_mode(filehandle, CBREAK_MODE)
+
+
+def setraw(filehandle):
+ """
+ Args:
+ filehandle(int): Windows filehandle object as returned by :py:func:`msvcrt.get_osfhandle`
+
+ Raises:
+ OSError: Error calling Windows API
+
+ Convenience function which mimics :py:func:`tty.setraw` behavior
+
+ All console input options are disabled except, if supported, ``ENABLE_VIRTUAL_TERMINAL_INPUT``
+ """
+
+ set_console_mode(filehandle, RAW_MODE)
+
+
+def enable_vt_mode(filehandle=None):
+ """
+ Args:
+ filehandle(int): Windows filehandle object as returned by :py:func:`msvcrt.get_osfhandle`
+
+ Raises:
+ OSError: Error calling Windows API
+
+ Enables virtual terminal processing mode for the given console
+
+ If ``filehandle`` is :py:data:`None`, uses the filehandle of :py:data:`sys.__stdout__`.
+ """
+
+ if filehandle is None:
+ filehandle = msvcrt.get_osfhandle(sys.__stdout__.fileno())
+
+ mode = get_console_mode(filehandle)
+ mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING
+ set_console_mode(filehandle, mode)
+
+
+def get_terminal_size(fd): # pylint: disable=invalid-name
+ """
+ Args:
+ fd(int): Python file descriptor
+
+ Returns:
+ :py:class:`os.terminal_size`: Named tuple representing terminal size
+
+ Convenience function for getting terminal size
+
+ In Python 3.3 and above, this is a wrapper for :py:func:`os.get_terminal_size`.
+ In older versions of Python, this function calls GetConsoleScreenBufferInfo_.
+ """
+
+ # In Python 3.3+ we can let the standard library handle this
+ if GTS_SUPPORTED:
+ return os.get_terminal_size(fd)
+
+ handle = msvcrt.get_osfhandle(fd)
+ window = get_csbi(handle).srWindow
+ return TerminalSize(window.Right - window.Left + 1, window.Bottom - window.Top + 1)
+
+
+def flush_and_set_console(fd, mode): # pylint: disable=invalid-name
+ """
+ Args:
+ filehandle(int): Windows filehandle object as returned by :py:func:`msvcrt.get_osfhandle`
+ mode(int): Desired console mode
+
+ Attempts to set console to specified mode, but will not raise on failure
+
+ If the file descriptor is STDOUT or STDERR, attempts to flush first
+ """
+
+ try:
+ if fd in (sys.__stdout__.fileno(), sys.__stderr__.fileno()):
+ sys.__stdout__.flush()
+ sys.__stderr__.flush()
+ except (AttributeError, TypeError, io.UnsupportedOperation):
+ pass
+
+ try:
+ filehandle = msvcrt.get_osfhandle(fd)
+ set_console_mode(filehandle, mode)
+ except OSError:
+ pass
+
+
+def get_term(fd, fallback=True): # pylint: disable=invalid-name
+ """
+ Args:
+ fd(int): Python file descriptor
+ fallback(bool): Use fallback terminal type if type can not be determined
+ Returns:
+ str: Terminal type
+
+ Attempts to determine and enable the current terminal type
+
+ The current logic is:
+
+ - If TERM is defined in the environment, the value is returned
+ - Else, if ANSICON is defined in the environment, ``'ansicon'`` is returned
+ - Else, if virtual terminal mode is natively supported,
+ it is enabled and ``'vtwin10'`` is returned
+ - Else, if ``fallback`` is ``True``, Ansicon is loaded, and ``'ansicon'`` is returned
+ - If no other conditions are satisfied, ``'unknown'`` is returned
+
+ This logic may change in the future as additional terminal types are added.
+ """
+
+ # First try TERM
+ term = os.environ.get('TERM', None)
+
+ if term is None:
+
+ # See if ansicon is enabled
+ if os.environ.get('ANSICON', None):
+ term = 'ansicon'
+
+ # See if Windows Terminal is being used
+ elif os.environ.get('WT_SESSION', None):
+ term = 'vtwin10'
+
+ # See if the version of Windows supports VTMODE
+ elif VTMODE_SUPPORTED:
+ try:
+ filehandle = msvcrt.get_osfhandle(fd)
+ mode = get_console_mode(filehandle)
+ except OSError:
+ term = 'unknown'
+ else:
+ atexit.register(flush_and_set_console, fd, mode)
+ # pylint: disable=unsupported-binary-operation
+ set_console_mode(filehandle, mode | ENABLE_VIRTUAL_TERMINAL_PROCESSING)
+ term = 'vtwin10'
+
+ # Currently falling back to Ansicon for older versions of Windows
+ elif fallback:
+ import ansicon # pylint: disable=import-error,import-outside-toplevel
+ ansicon.load()
+
+ try:
+ filehandle = msvcrt.get_osfhandle(fd)
+ mode = get_console_mode(filehandle)
+ except OSError:
+ term = 'unknown'
+ else:
+ atexit.register(flush_and_set_console, fd, mode)
+ set_console_mode(filehandle, mode ^ ENABLE_WRAP_AT_EOL_OUTPUT)
+ term = 'ansicon'
+
+ else:
+ term = 'unknown'
+
+ return term
diff --git a/third_party/python/jsmin/CHANGELOG.txt b/third_party/python/jsmin/CHANGELOG.txt
new file mode 100644
index 0000000000..1adfa9b9a7
--- /dev/null
+++ b/third_party/python/jsmin/CHANGELOG.txt
@@ -0,0 +1,79 @@
+Changelog
+=========
+
+v3.0.0 (2021-09-08) Ben Bradshaw
+--------------------------------
+
+- Breaking Change: Removed support for Python 2
+
+- Removed usage of use_2to3 in setup.py
+
+v2.2.2 (2017-05-01) Tikitu de Jager
+-----------------------------------
+
+- Add license headers to code files (fixes i#17)
+
+- Remove mercurial files (fixes #20)
+
+v2.2.1 (2016-03-06) Tikitu de Jager
+-----------------------------------
+
+- Fix #14: Infinite loop on `return x / 1;`
+
+v2.2.0 (2015-12-19) Tikitu de Jager
+-----------------------------------
+
+- Merge #13: Preserve "loud comments" starting with `/*!`
+
+ These are commonly used for copyright notices, and are preserved by various
+ other minifiers (e.g. YUI Compressor).
+
+v2.1.6 (2015-10-14) Tikitu de Jager
+-----------------------------------
+
+- Fix #12: Newline following a regex literal should not be elided.
+
+v2.1.5 (2015-10-11) Tikitu de Jager
+-----------------------------------
+
+- Fix #9: Premature end of statement caused by multi-line comment not
+ adding newline.
+
+- Fix #10: Removing multiline comment separating tokens must leave a space.
+
+- Refactor comment handling for maintainability.
+
+v2.1.4 (2015-08-23) Tikitu de Jager
+-----------------------------------
+
+- Fix #6: regex literal matching comment was not correctly matched.
+
+- Refactor regex literal handling for robustness.
+
+v2.1.3 (2015-08-09) Tikitu de Jager
+-----------------------------------
+
+- Reset issue numbering: issues live in github from now on.
+
+- Fix #1: regex literal was not recognised when occurring directly after `{`.
+
+v2.1.2 (2015-07-12) Tikitu de Jager
+-----------------------------------
+
+- Issue numbers here and below refer to the bitbucket repository.
+
+- Fix #17: bug when JS starts with comment then literal regex.
+
+v2.1.1 (2015-02-14) Tikitu de Jager
+-----------------------------------
+
+- Fix #16: bug returning a literal regex containing escaped forward-slashes.
+
+v2.1.0 (2014-12-24) Tikitu de Jager
+-----------------------------------
+
+- First changelog entries; see README.rst for prior contributors.
+
+- Expose quote_chars parameter to provide just enough unofficial Harmony
+ support to be useful.
+
diff --git a/third_party/python/jsmin/LICENSE.txt b/third_party/python/jsmin/LICENSE.txt
new file mode 100644
index 0000000000..193a85326d
--- /dev/null
+++ b/third_party/python/jsmin/LICENSE.txt
@@ -0,0 +1,23 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Dave St.Germain
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+
diff --git a/third_party/python/jsmin/MANIFEST.in b/third_party/python/jsmin/MANIFEST.in
new file mode 100644
index 0000000000..ab30e9acee
--- /dev/null
+++ b/third_party/python/jsmin/MANIFEST.in
@@ -0,0 +1 @@
+include *.txt
diff --git a/third_party/python/jsmin/PKG-INFO b/third_party/python/jsmin/PKG-INFO
new file mode 100644
index 0000000000..c4bbd566aa
--- /dev/null
+++ b/third_party/python/jsmin/PKG-INFO
@@ -0,0 +1,196 @@
+Metadata-Version: 1.2
+Name: jsmin
+Version: 3.0.0
+Summary: JavaScript minifier.
+Home-page: https://github.com/tikitu/jsmin/
+Author: Dave St.Germain
+Author-email: dave@st.germa.in
+Maintainer: Tikitu de Jager
+Maintainer-email: tikitu+jsmin@logophile.org
+License: MIT License
+Description: =====
+ jsmin
+ =====
+
+ JavaScript minifier.
+
+ Usage
+ =====
+
+ .. code:: python
+
+ from jsmin import jsmin
+ with open('myfile.js') as js_file:
+ minified = jsmin(js_file.read())
+
+ You can run it as a commandline tool also::
+
+ python -m jsmin myfile.js
+
+ NB: ``jsmin`` makes no attempt to be compatible with
+ `ECMAScript 6 / ES.next / Harmony <http://wiki.ecmascript.org/doku.php?id=harmony:specification_drafts>`_.
+ The current maintainer does not intend to add ES6-compatibility. If you would
+ like to take over maintenance and update ``jsmin`` for ES6, please contact
+ `Tikitu de Jager <mailto:tikitu+jsmin@logophile.org>`_. Pull requests are also
+ welcome, of course, but my time to review them is somewhat limited these days.
+
+ If you're using ``jsmin`` on ES6 code, though, you might find the ``quote_chars``
+ parameter useful:
+
+ .. code:: python
+
+ from jsmin import jsmin
+ with open('myfile.js') as js_file:
+ minified = jsmin(js_file.read(), quote_chars="'\"`")
+
+
+ Where to get it
+ ===============
+
+ * install the package `from pypi <https://pypi.python.org/pypi/jsmin/>`_
+ * get the latest release `from latest-release on github <https://github.com/tikitu/jsmin/tree/latest-release/jsmin>`_
+ * get the development version `from master on github <https://github.com/tikitu/jsmin/>`_
+
+
+ Python 2 support removed
+ ========================
+
+ Python 2 support was removed in version 3.0.0. If you need to support Python 2, please use version 2.2.2 with setuptools<58.
+
+ Contributing
+ ============
+
+ `Issues <https://github.com/tikitu/jsmin/issues>`_ and `Pull requests <https://github.com/tikitu/jsmin/pulls>`_
+ will be gratefully received on Github. The project used to be hosted
+ `on bitbucket <https://bitbucket.org/dcs/jsmin/>`_ and old issues can still be
+ found there.
+
+ If possible, please make separate pull requests for tests and for code: tests will be added to the `latest-release` branch while code will go to `master`.
+
+ Unless you request otherwise, your Github identity will be added to the contributor's list below; if you prefer a
+ different name feel free to add it in your pull request instead. (If you prefer not to be mentioned you'll have to let
+ the maintainer know somehow.)
+
+ Build/test status
+ =================
+
+ Both branches are tested with Travis: https://travis-ci.org/tikitu/jsmin
+
+ The `latest-release` branch (the version on PyPI plus any new tests) is tested against CPython 3.
+ Currently:
+
+ .. image:: https://travis-ci.org/tikitu/jsmin.png?branch=latest-release
+
+ If that branch is failing that means there's a new test that fails on *the latest released version on pypi*, with no fix yet
+ released.
+
+ The `master` branch (development version, might be ahead of latest released version) is tested against CPython 3.
+ Currently:
+
+ .. image:: https://travis-ci.org/tikitu/jsmin.png?branch=master
+
+ If `master` is failing don't use it, but as long as `latest-release` is passing the pypi release should be ok.
+
+ Contributors (chronological commit order)
+ =========================================
+
+ * `Dave St.Germain <https://bitbucket.org/dcs>`_ (original author)
+ * `Hans weltar <https://bitbucket.org/hansweltar>`_
+ * `Tikitu de Jager <mailto:tikitu+jsmin@logophile.org>`_ (current maintainer)
+ * https://bitbucket.org/rennat
+ * `Nick Alexander <https://bitbucket.org/ncalexan>`_
+ * `Gennady Kovshenin <https://github.com/soulseekah>`_
+ * `Matt Molyneaux <https://github.com/moggers87>`_
+ * `Albert Wang <https://github.com/albertyw>`_
+ * `Ben Bradshaw <https://github.com/serenecloud>`_
+
+ Changelog
+ =========
+
+ v3.0.0 (2021-09-08) Ben Bradshaw
+ --------------------------------
+
+ - Breaking Change: Removed support for Python 2
+
+ - Removed usage of use_2to3 in setup.py
+
+ v2.2.2 (2017-05-01) Tikitu de Jager
+ -----------------------------------
+
+ - Add license headers to code files (fixes i#17)
+
+ - Remove mercurial files (fixes #20)
+
+ v2.2.1 (2016-03-06) Tikitu de Jager
+ -----------------------------------
+
+ - Fix #14: Infinite loop on `return x / 1;`
+
+ v2.2.0 (2015-12-19) Tikitu de Jager
+ -----------------------------------
+
+ - Merge #13: Preserve "loud comments" starting with `/*!`
+
+ These are commonly used for copyright notices, and are preserved by various
+ other minifiers (e.g. YUI Compressor).
+
+ v2.1.6 (2015-10-14) Tikitu de Jager
+ -----------------------------------
+
+ - Fix #12: Newline following a regex literal should not be elided.
+
+ v2.1.5 (2015-10-11) Tikitu de Jager
+ -----------------------------------
+
+ - Fix #9: Premature end of statement caused by multi-line comment not
+ adding newline.
+
+ - Fix #10: Removing multiline comment separating tokens must leave a space.
+
+ - Refactor comment handling for maintainability.
+
+ v2.1.4 (2015-08-23) Tikitu de Jager
+ -----------------------------------
+
+ - Fix #6: regex literal matching comment was not correctly matched.
+
+ - Refactor regex literal handling for robustness.
+
+ v2.1.3 (2015-08-09) Tikitu de Jager
+ -----------------------------------
+
+ - Reset issue numbering: issues live in github from now on.
+
+ - Fix #1: regex literal was not recognised when occurring directly after `{`.
+
+ v2.1.2 (2015-07-12) Tikitu de Jager
+ -----------------------------------
+
+ - Issue numbers here and below refer to the bitbucket repository.
+
+ - Fix #17: bug when JS starts with comment then literal regex.
+
+ v2.1.1 (2015-02-14) Tikitu de Jager
+ -----------------------------------
+
+ - Fix #16: bug returning a literal regex containing escaped forward-slashes.
+
+ v2.1.0 (2014-12-24) Tikitu de Jager
+ -----------------------------------
+
+ - First changelog entries; see README.rst for prior contributors.
+
+ - Expose quote_chars parameter to provide just enough unofficial Harmony
+ support to be useful.
+
+
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Software Development :: Pre-processors
+Classifier: Topic :: Text Processing :: Filters
diff --git a/third_party/python/jsmin/README.rst b/third_party/python/jsmin/README.rst
new file mode 100644
index 0000000000..8f6a9bdec5
--- /dev/null
+++ b/third_party/python/jsmin/README.rst
@@ -0,0 +1,95 @@
+=====
+jsmin
+=====
+
+JavaScript minifier.
+
+Usage
+=====
+
+.. code:: python
+
+ from jsmin import jsmin
+ with open('myfile.js') as js_file:
+ minified = jsmin(js_file.read())
+
+You can run it as a commandline tool also::
+
+ python -m jsmin myfile.js
+
+NB: ``jsmin`` makes no attempt to be compatible with
+`ECMAScript 6 / ES.next / Harmony <http://wiki.ecmascript.org/doku.php?id=harmony:specification_drafts>`_.
+The current maintainer does not intend to add ES6-compatibility. If you would
+like to take over maintenance and update ``jsmin`` for ES6, please contact
+`Tikitu de Jager <mailto:tikitu+jsmin@logophile.org>`_. Pull requests are also
+welcome, of course, but my time to review them is somewhat limited these days.
+
+If you're using ``jsmin`` on ES6 code, though, you might find the ``quote_chars``
+parameter useful:
+
+.. code:: python
+
+ from jsmin import jsmin
+ with open('myfile.js') as js_file:
+ minified = jsmin(js_file.read(), quote_chars="'\"`")
+
+
+Where to get it
+===============
+
+* install the package `from pypi <https://pypi.python.org/pypi/jsmin/>`_
+* get the latest release `from latest-release on github <https://github.com/tikitu/jsmin/tree/latest-release/jsmin>`_
+* get the development version `from master on github <https://github.com/tikitu/jsmin/>`_
+
+
+Python 2 support removed
+========================
+
+Python 2 support was removed in version 3.0.0. If you need to support Python 2, please use version 2.2.2 with setuptools<58.
+
+Contributing
+============
+
+`Issues <https://github.com/tikitu/jsmin/issues>`_ and `Pull requests <https://github.com/tikitu/jsmin/pulls>`_
+will be gratefully received on Github. The project used to be hosted
+`on bitbucket <https://bitbucket.org/dcs/jsmin/>`_ and old issues can still be
+found there.
+
+If possible, please make separate pull requests for tests and for code: tests will be added to the `latest-release` branch while code will go to `master`.
+
+Unless you request otherwise, your Github identity will be added to the contributor's list below; if you prefer a
+different name feel free to add it in your pull request instead. (If you prefer not to be mentioned you'll have to let
+the maintainer know somehow.)
+
+Build/test status
+=================
+
+Both branches are tested with Travis: https://travis-ci.org/tikitu/jsmin
+
+The `latest-release` branch (the version on PyPI plus any new tests) is tested against CPython 3.
+Currently:
+
+.. image:: https://travis-ci.org/tikitu/jsmin.png?branch=latest-release
+
+If that branch is failing that means there's a new test that fails on *the latest released version on pypi*, with no fix yet
+released.
+
+The `master` branch (development version, might be ahead of latest released version) is tested against CPython 3.
+Currently:
+
+.. image:: https://travis-ci.org/tikitu/jsmin.png?branch=master
+
+If `master` is failing don't use it, but as long as `latest-release` is passing the pypi release should be ok.
+
+Contributors (chronological commit order)
+=========================================
+
+* `Dave St.Germain <https://bitbucket.org/dcs>`_ (original author)
+* `Hans weltar <https://bitbucket.org/hansweltar>`_
+* `Tikitu de Jager <mailto:tikitu+jsmin@logophile.org>`_ (current maintainer)
+* https://bitbucket.org/rennat
+* `Nick Alexander <https://bitbucket.org/ncalexan>`_
+* `Gennady Kovshenin <https://github.com/soulseekah>`_
+* `Matt Molyneaux <https://github.com/moggers87>`_
+* `Albert Wang <https://github.com/albertyw>`_
+* `Ben Bradshaw <https://github.com/serenecloud>`_
diff --git a/third_party/python/jsmin/jsmin.egg-info/PKG-INFO b/third_party/python/jsmin/jsmin.egg-info/PKG-INFO
new file mode 100644
index 0000000000..c4bbd566aa
--- /dev/null
+++ b/third_party/python/jsmin/jsmin.egg-info/PKG-INFO
@@ -0,0 +1,196 @@
+Metadata-Version: 1.2
+Name: jsmin
+Version: 3.0.0
+Summary: JavaScript minifier.
+Home-page: https://github.com/tikitu/jsmin/
+Author: Dave St.Germain
+Author-email: dave@st.germa.in
+Maintainer: Tikitu de Jager
+Maintainer-email: tikitu+jsmin@logophile.org
+License: MIT License
+Description: =====
+ jsmin
+ =====
+
+ JavaScript minifier.
+
+ Usage
+ =====
+
+ .. code:: python
+
+ from jsmin import jsmin
+ with open('myfile.js') as js_file:
+ minified = jsmin(js_file.read())
+
+ You can run it as a commandline tool also::
+
+ python -m jsmin myfile.js
+
+ NB: ``jsmin`` makes no attempt to be compatible with
+ `ECMAScript 6 / ES.next / Harmony <http://wiki.ecmascript.org/doku.php?id=harmony:specification_drafts>`_.
+ The current maintainer does not intend to add ES6-compatibility. If you would
+ like to take over maintenance and update ``jsmin`` for ES6, please contact
+ `Tikitu de Jager <mailto:tikitu+jsmin@logophile.org>`_. Pull requests are also
+ welcome, of course, but my time to review them is somewhat limited these days.
+
+ If you're using ``jsmin`` on ES6 code, though, you might find the ``quote_chars``
+ parameter useful:
+
+ .. code:: python
+
+ from jsmin import jsmin
+ with open('myfile.js') as js_file:
+ minified = jsmin(js_file.read(), quote_chars="'\"`")
+
+
+ Where to get it
+ ===============
+
+ * install the package `from pypi <https://pypi.python.org/pypi/jsmin/>`_
+ * get the latest release `from latest-release on github <https://github.com/tikitu/jsmin/tree/latest-release/jsmin>`_
+ * get the development version `from master on github <https://github.com/tikitu/jsmin/>`_
+
+
+ Python 2 support removed
+ ========================
+
+ Python 2 support was removed in version 3.0.0. If you need to support Python 2, please use version 2.2.2 with setuptools<58.
+
+ Contributing
+ ============
+
+ `Issues <https://github.com/tikitu/jsmin/issues>`_ and `Pull requests <https://github.com/tikitu/jsmin/pulls>`_
+ will be gratefully received on Github. The project used to be hosted
+ `on bitbucket <https://bitbucket.org/dcs/jsmin/>`_ and old issues can still be
+ found there.
+
+ If possible, please make separate pull requests for tests and for code: tests will be added to the `latest-release` branch while code will go to `master`.
+
+ Unless you request otherwise, your Github identity will be added to the contributor's list below; if you prefer a
+ different name feel free to add it in your pull request instead. (If you prefer not to be mentioned you'll have to let
+ the maintainer know somehow.)
+
+ Build/test status
+ =================
+
+ Both branches are tested with Travis: https://travis-ci.org/tikitu/jsmin
+
+ The `latest-release` branch (the version on PyPI plus any new tests) is tested against CPython 3.
+ Currently:
+
+ .. image:: https://travis-ci.org/tikitu/jsmin.png?branch=latest-release
+
+ If that branch is failing that means there's a new test that fails on *the latest released version on pypi*, with no fix yet
+ released.
+
+ The `master` branch (development version, might be ahead of latest released version) is tested against CPython 3.
+ Currently:
+
+ .. image:: https://travis-ci.org/tikitu/jsmin.png?branch=master
+
+ If `master` is failing don't use it, but as long as `latest-release` is passing the pypi release should be ok.
+
+ Contributors (chronological commit order)
+ =========================================
+
+ * `Dave St.Germain <https://bitbucket.org/dcs>`_ (original author)
+ * `Hans weltar <https://bitbucket.org/hansweltar>`_
+ * `Tikitu de Jager <mailto:tikitu+jsmin@logophile.org>`_ (current maintainer)
+ * https://bitbucket.org/rennat
+ * `Nick Alexander <https://bitbucket.org/ncalexan>`_
+ * `Gennady Kovshenin <https://github.com/soulseekah>`_
+ * `Matt Molyneaux <https://github.com/moggers87>`_
+ * `Albert Wang <https://github.com/albertyw>`_
+ * `Ben Bradshaw <https://github.com/serenecloud>`_
+
+ Changelog
+ =========
+
+ v3.0.0 (2021-09-08) Ben Bradshaw
+ --------------------------------
+
+ - Breaking Change: Removed support for Python 2
+
+ - Removed usage of use_2to3 in setup.py
+
+ v2.2.2 (2017-05-01) Tikitu de Jager
+ -----------------------------------
+
+ - Add license headers to code files (fixes i#17)
+
+ - Remove mercurial files (fixes #20)
+
+ v2.2.1 (2016-03-06) Tikitu de Jager
+ -----------------------------------
+
+ - Fix #14: Infinite loop on `return x / 1;`
+
+ v2.2.0 (2015-12-19) Tikitu de Jager
+ -----------------------------------
+
+ - Merge #13: Preserve "loud comments" starting with `/*!`
+
+ These are commonly used for copyright notices, and are preserved by various
+ other minifiers (e.g. YUI Compressor).
+
+ v2.1.6 (2015-10-14) Tikitu de Jager
+ -----------------------------------
+
+ - Fix #12: Newline following a regex literal should not be elided.
+
+ v2.1.5 (2015-10-11) Tikitu de Jager
+ -----------------------------------
+
+ - Fix #9: Premature end of statement caused by multi-line comment not
+ adding newline.
+
+ - Fix #10: Removing multiline comment separating tokens must leave a space.
+
+ - Refactor comment handling for maintainability.
+
+ v2.1.4 (2015-08-23) Tikitu de Jager
+ -----------------------------------
+
+ - Fix #6: regex literal matching comment was not correctly matched.
+
+ - Refactor regex literal handling for robustness.
+
+ v2.1.3 (2015-08-09) Tikitu de Jager
+ -----------------------------------
+
+ - Reset issue numbering: issues live in github from now on.
+
+ - Fix #1: regex literal was not recognised when occurring directly after `{`.
+
+ v2.1.2 (2015-07-12) Tikitu de Jager
+ -----------------------------------
+
+ - Issue numbers here and below refer to the bitbucket repository.
+
+ - Fix #17: bug when JS starts with comment then literal regex.
+
+ v2.1.1 (2015-02-14) Tikitu de Jager
+ -----------------------------------
+
+ - Fix #16: bug returning a literal regex containing escaped forward-slashes.
+
+ v2.1.0 (2014-12-24) Tikitu de Jager
+ -----------------------------------
+
+ - First changelog entries; see README.rst for prior contributors.
+
+ - Expose quote_chars parameter to provide just enough unofficial Harmony
+ support to be useful.
+
+
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Software Development :: Pre-processors
+Classifier: Topic :: Text Processing :: Filters
diff --git a/third_party/python/jsmin/jsmin.egg-info/SOURCES.txt b/third_party/python/jsmin/jsmin.egg-info/SOURCES.txt
new file mode 100644
index 0000000000..ae208f4b60
--- /dev/null
+++ b/third_party/python/jsmin/jsmin.egg-info/SOURCES.txt
@@ -0,0 +1,13 @@
+CHANGELOG.txt
+LICENSE.txt
+MANIFEST.in
+README.rst
+setup.cfg
+setup.py
+jsmin/__init__.py
+jsmin/__main__.py
+jsmin/test.py
+jsmin.egg-info/PKG-INFO
+jsmin.egg-info/SOURCES.txt
+jsmin.egg-info/dependency_links.txt
+jsmin.egg-info/top_level.txt \ No newline at end of file
diff --git a/third_party/python/jsmin/jsmin.egg-info/dependency_links.txt b/third_party/python/jsmin/jsmin.egg-info/dependency_links.txt
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/third_party/python/jsmin/jsmin.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/third_party/python/jsmin/jsmin.egg-info/top_level.txt b/third_party/python/jsmin/jsmin.egg-info/top_level.txt
new file mode 100644
index 0000000000..79abaa99ee
--- /dev/null
+++ b/third_party/python/jsmin/jsmin.egg-info/top_level.txt
@@ -0,0 +1 @@
+jsmin
diff --git a/third_party/python/jsmin/jsmin/__init__.py b/third_party/python/jsmin/jsmin/__init__.py
new file mode 100644
index 0000000000..c906d14a03
--- /dev/null
+++ b/third_party/python/jsmin/jsmin/__init__.py
@@ -0,0 +1,252 @@
+# vim: set fileencoding=utf-8 :
+
+# This code is original from jsmin by Douglas Crockford, it was translated to
+# Python by Baruch Even. It was rewritten by Dave St.Germain for speed.
+#
+# The MIT License (MIT)
+#
+# Copyright (c) 2013 Dave St.Germain
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+
+import io
+
+__all__ = ['jsmin', 'JavascriptMinify']
+__version__ = '3.0.0'
+
+
+def jsmin(js, **kwargs):
+ """
+ returns a minified version of the javascript string
+ """
+ klass = io.StringIO
+ ins = klass(js)
+ outs = klass()
+ JavascriptMinify(ins, outs, **kwargs).minify()
+ return outs.getvalue()
+
+
+class JavascriptMinify(object):
+ """
+ Minify an input stream of javascript, writing
+ to an output stream
+ """
+
+ def __init__(self, instream=None, outstream=None, quote_chars="'\""):
+ self.ins = instream
+ self.outs = outstream
+ self.quote_chars = quote_chars
+
+ def minify(self, instream=None, outstream=None):
+ if instream and outstream:
+ self.ins, self.outs = instream, outstream
+
+ self.is_return = False
+ self.return_buf = ''
+
+ def write(char):
+ # all of this is to support literal regular expressions.
+ # sigh
+ if char in 'return':
+ self.return_buf += char
+ self.is_return = self.return_buf == 'return'
+ else:
+ self.return_buf = ''
+ self.is_return = self.is_return and char < '!'
+ self.outs.write(char)
+ if self.is_return:
+ self.return_buf = ''
+
+ read = self.ins.read
+
+ space_strings = "abcdefghijklmnopqrstuvwxyz"\
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$\\"
+ self.space_strings = space_strings
+ starters, enders = '{[(+-', '}])+-/' + self.quote_chars
+ newlinestart_strings = starters + space_strings + self.quote_chars
+ newlineend_strings = enders + space_strings + self.quote_chars
+ self.newlinestart_strings = newlinestart_strings
+ self.newlineend_strings = newlineend_strings
+
+ do_newline = False
+ do_space = False
+ escape_slash_count = 0
+ in_quote = ''
+ quote_buf = []
+
+ previous = ';'
+ previous_non_space = ';'
+ next1 = read(1)
+
+ while next1:
+ next2 = read(1)
+ if in_quote:
+ quote_buf.append(next1)
+
+ if next1 == in_quote:
+ numslashes = 0
+ for c in reversed(quote_buf[:-1]):
+ if c != '\\':
+ break
+ else:
+ numslashes += 1
+ if numslashes % 2 == 0:
+ in_quote = ''
+ write(''.join(quote_buf))
+ elif next1 in '\r\n':
+ next2, do_newline = self.newline(
+ previous_non_space, next2, do_newline)
+ elif next1 < '!':
+ if (previous_non_space in space_strings \
+ or previous_non_space > '~') \
+ and (next2 in space_strings or next2 > '~'):
+ do_space = True
+ elif previous_non_space in '-+' and next2 == previous_non_space:
+ # protect against + ++ or - -- sequences
+ do_space = True
+ elif self.is_return and next2 == '/':
+ # returning a regex...
+ write(' ')
+ elif next1 == '/':
+ if do_space:
+ write(' ')
+ if next2 == '/':
+ # Line comment: treat it as a newline, but skip it
+ next2 = self.line_comment(next1, next2)
+ next1 = '\n'
+ next2, do_newline = self.newline(
+ previous_non_space, next2, do_newline)
+ elif next2 == '*':
+ self.block_comment(next1, next2)
+ next2 = read(1)
+ if previous_non_space in space_strings:
+ do_space = True
+ next1 = previous
+ else:
+ if previous_non_space in '{(,=:[?!&|;' or self.is_return:
+ self.regex_literal(next1, next2)
+ # hackish: after regex literal next1 is still /
+ # (it was the initial /, now it's the last /)
+ next2 = read(1)
+ else:
+ write('/')
+ else:
+ if do_newline:
+ write('\n')
+ do_newline = False
+ do_space = False
+ if do_space:
+ do_space = False
+ write(' ')
+
+ write(next1)
+ if next1 in self.quote_chars:
+ in_quote = next1
+ quote_buf = []
+
+ if next1 >= '!':
+ previous_non_space = next1
+
+ if next1 == '\\':
+ escape_slash_count += 1
+ else:
+ escape_slash_count = 0
+
+ previous = next1
+ next1 = next2
+
+ def regex_literal(self, next1, next2):
+ assert next1 == '/' # otherwise we should not be called!
+
+ self.return_buf = ''
+
+ read = self.ins.read
+ write = self.outs.write
+
+ in_char_class = False
+
+ write('/')
+
+ next = next2
+ while next and (next != '/' or in_char_class):
+ write(next)
+ if next == '\\':
+ write(read(1)) # whatever is next is escaped
+ elif next == '[':
+ write(read(1)) # character class cannot be empty
+ in_char_class = True
+ elif next == ']':
+ in_char_class = False
+ next = read(1)
+
+ write('/')
+
+ def line_comment(self, next1, next2):
+ assert next1 == next2 == '/'
+
+ read = self.ins.read
+
+ while next1 and next1 not in '\r\n':
+ next1 = read(1)
+ while next1 and next1 in '\r\n':
+ next1 = read(1)
+
+ return next1
+
+ def block_comment(self, next1, next2):
+ assert next1 == '/'
+ assert next2 == '*'
+
+ read = self.ins.read
+
+ # Skip past first /* and avoid catching on /*/...*/
+ next1 = read(1)
+ next2 = read(1)
+
+ comment_buffer = '/*'
+ while next1 != '*' or next2 != '/':
+ comment_buffer += next1
+ next1 = next2
+ next2 = read(1)
+
+ if comment_buffer.startswith("/*!"):
+ # comment needs preserving
+ self.outs.write(comment_buffer)
+ self.outs.write("*/\n")
+
+
+ def newline(self, previous_non_space, next2, do_newline):
+ read = self.ins.read
+
+ if previous_non_space and (
+ previous_non_space in self.newlineend_strings
+ or previous_non_space > '~'):
+ while 1:
+ if next2 < '!':
+ next2 = read(1)
+ if not next2:
+ break
+ else:
+ if next2 in self.newlinestart_strings \
+ or next2 > '~' or next2 == '/':
+ do_newline = True
+ break
+
+ return next2, do_newline
diff --git a/third_party/python/jsmin/jsmin/__main__.py b/third_party/python/jsmin/jsmin/__main__.py
new file mode 100644
index 0000000000..6d37a3eb7b
--- /dev/null
+++ b/third_party/python/jsmin/jsmin/__main__.py
@@ -0,0 +1,37 @@
+# vim: set fileencoding=utf-8 :
+
+# This code is original from jsmin by Douglas Crockford, it was translated to
+# Python by Baruch Even. It was rewritten by Dave St.Germain for speed.
+#
+# The MIT License (MIT)
+#·
+# Copyright (c) 2013 Dave St.Germain
+#·
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#·
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#·
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+import sys, os, glob
+from jsmin import JavascriptMinify
+
+for f in sys.argv[1:]:
+ with open(f, 'r') as js:
+ minifier = JavascriptMinify(js, sys.stdout)
+ minifier.minify()
+ sys.stdout.write('\n')
+
+
diff --git a/third_party/python/jsmin/jsmin/test.py b/third_party/python/jsmin/jsmin/test.py
new file mode 100644
index 0000000000..74fbaadeae
--- /dev/null
+++ b/third_party/python/jsmin/jsmin/test.py
@@ -0,0 +1,644 @@
+# vim: set fileencoding=utf-8 :
+
+# This code is original from jsmin by Douglas Crockford, it was translated to
+# Python by Baruch Even. It was rewritten by Dave St.Germain for speed.
+#
+# The MIT License (MIT)
+#·
+# Copyright (c) 2013 Dave St.Germain
+#·
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#·
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#·
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+import unittest
+import jsmin
+
+
+class JsTests(unittest.TestCase):
+ def _minify(self, js):
+ return jsmin.jsmin(js)
+
+ def assertEqual(self, thing1, thing2):
+ if thing1 != thing2:
+ print(repr(thing1), repr(thing2))
+ raise AssertionError
+ return True
+
+ def assertMinified(self, js_input, expected, **kwargs):
+ minified = jsmin.jsmin(js_input, **kwargs)
+ assert minified == expected, "\ngot: %r\nexp: %r" % (minified, expected)
+
+ def testQuoted(self):
+ js = r'''
+ Object.extend(String, {
+ interpret: function(value) {
+ return value == null ? '' : String(value);
+ },
+ specialChar: {
+ '\b': '\\b',
+ '\t': '\\t',
+ '\n': '\\n',
+ '\f': '\\f',
+ '\r': '\\r',
+ '\\': '\\\\'
+ }
+ });
+
+ '''
+ expected = r"""Object.extend(String,{interpret:function(value){return value==null?'':String(value);},specialChar:{'\b':'\\b','\t':'\\t','\n':'\\n','\f':'\\f','\r':'\\r','\\':'\\\\'}});"""
+ self.assertMinified(js, expected)
+
+ def testSingleComment(self):
+ js = r'''// use native browser JS 1.6 implementation if available
+ if (Object.isFunction(Array.prototype.forEach))
+ Array.prototype._each = Array.prototype.forEach;
+
+ if (!Array.prototype.indexOf) Array.prototype.indexOf = function(item, i) {
+
+ // hey there
+ function() {// testing comment
+ foo;
+ //something something
+
+ location = 'http://foo.com;'; // goodbye
+ }
+ //bye
+ '''
+ expected = r"""if(Object.isFunction(Array.prototype.forEach))
+Array.prototype._each=Array.prototype.forEach;if(!Array.prototype.indexOf)Array.prototype.indexOf=function(item,i){function(){foo;location='http://foo.com;';}"""
+ self.assertMinified(js, expected)
+
+ def testEmpty(self):
+ self.assertMinified('', '')
+ self.assertMinified(' ', '')
+ self.assertMinified('\n', '')
+ self.assertMinified('\r\n', '')
+ self.assertMinified('\t', '')
+
+
+ def testMultiComment(self):
+ js = r"""
+ function foo() {
+ print('hey');
+ }
+ /*
+ if(this.options.zindex) {
+ this.originalZ = parseInt(Element.getStyle(this.element,'z-index') || 0);
+ this.element.style.zIndex = this.options.zindex;
+ }
+ */
+ another thing;
+ """
+ expected = r"""function foo(){print('hey');}
+another thing;"""
+ self.assertMinified(js, expected)
+
+ def testLeadingComment(self):
+ js = r"""/* here is a comment at the top
+
+ it ends here */
+ function foo() {
+ alert('crud');
+ }
+
+ """
+ expected = r"""function foo(){alert('crud');}"""
+ self.assertMinified(js, expected)
+
+ def testBlockCommentStartingWithSlash(self):
+ self.assertMinified('A; /*/ comment */ B', 'A;B')
+
+ def testBlockCommentEndingWithSlash(self):
+ self.assertMinified('A; /* comment /*/ B', 'A;B')
+
+ def testLeadingBlockCommentStartingWithSlash(self):
+ self.assertMinified('/*/ comment */ A', 'A')
+
+ def testLeadingBlockCommentEndingWithSlash(self):
+ self.assertMinified('/* comment /*/ A', 'A')
+
+ def testEmptyBlockComment(self):
+ self.assertMinified('/**/ A', 'A')
+
+ def testBlockCommentMultipleOpen(self):
+ self.assertMinified('/* A /* B */ C', 'C')
+
+ def testJustAComment(self):
+ self.assertMinified(' // a comment', '')
+
+ def test_issue_bitbucket_10(self):
+ js = '''
+ files = [{name: value.replace(/^.*\\\\/, '')}];
+ // comment
+ A
+ '''
+ expected = '''files=[{name:value.replace(/^.*\\\\/,'')}];A'''
+ self.assertMinified(js, expected)
+
+ def test_issue_bitbucket_10_without_semicolon(self):
+ js = '''
+ files = [{name: value.replace(/^.*\\\\/, '')}]
+ // comment
+ A
+ '''
+ expected = '''files=[{name:value.replace(/^.*\\\\/,'')}]\nA'''
+ self.assertMinified(js, expected)
+
+ def testRe(self):
+ js = r'''
+ var str = this.replace(/\\./g, '@').replace(/"[^"\\\n\r]*"/g, '');
+ return (/^[,:{}\[\]0-9.\-+Eaeflnr-u \n\r\t]*$/).test(str);
+ });'''
+ expected = r"""var str=this.replace(/\\./g,'@').replace(/"[^"\\\n\r]*"/g,'');return(/^[,:{}\[\]0-9.\-+Eaeflnr-u \n\r\t]*$/).test(str);});"""
+ self.assertMinified(js, expected)
+
+ def testIgnoreComment(self):
+ js = r"""
+ var options_for_droppable = {
+ overlap: options.overlap,
+ containment: options.containment,
+ tree: options.tree,
+ hoverclass: options.hoverclass,
+ onHover: Sortable.onHover
+ }
+
+ var options_for_tree = {
+ onHover: Sortable.onEmptyHover,
+ overlap: options.overlap,
+ containment: options.containment,
+ hoverclass: options.hoverclass
+ }
+
+ // fix for gecko engine
+ Element.cleanWhitespace(element);
+ """
+ expected = r"""var options_for_droppable={overlap:options.overlap,containment:options.containment,tree:options.tree,hoverclass:options.hoverclass,onHover:Sortable.onHover}
+var options_for_tree={onHover:Sortable.onEmptyHover,overlap:options.overlap,containment:options.containment,hoverclass:options.hoverclass}
+Element.cleanWhitespace(element);"""
+ self.assertMinified(js, expected)
+
+ def testHairyRe(self):
+ js = r"""
+ inspect: function(useDoubleQuotes) {
+ var escapedString = this.gsub(/[\x00-\x1f\\]/, function(match) {
+ var character = String.specialChar[match[0]];
+ return character ? character : '\\u00' + match[0].charCodeAt().toPaddedString(2, 16);
+ });
+ if (useDoubleQuotes) return '"' + escapedString.replace(/"/g, '\\"') + '"';
+ return "'" + escapedString.replace(/'/g, '\\\'') + "'";
+ },
+
+ toJSON: function() {
+ return this.inspect(true);
+ },
+
+ unfilterJSON: function(filter) {
+ return this.sub(filter || Prototype.JSONFilter, '#{1}');
+ },
+ """
+ expected = r"""inspect:function(useDoubleQuotes){var escapedString=this.gsub(/[\x00-\x1f\\]/,function(match){var character=String.specialChar[match[0]];return character?character:'\\u00'+match[0].charCodeAt().toPaddedString(2,16);});if(useDoubleQuotes)return'"'+escapedString.replace(/"/g,'\\"')+'"';return"'"+escapedString.replace(/'/g,'\\\'')+"'";},toJSON:function(){return this.inspect(true);},unfilterJSON:function(filter){return this.sub(filter||Prototype.JSONFilter,'#{1}');},"""
+ self.assertMinified(js, expected)
+
+ def testLiteralRe(self):
+ js = r"""
+ myString.replace(/\\/g, '/');
+ console.log("hi");
+ """
+ expected = r"""myString.replace(/\\/g,'/');console.log("hi");"""
+ self.assertMinified(js, expected)
+
+ js = r''' return /^data:image\//i.test(url) ||
+ /^(https?|ftp|file|about|chrome|resource):/.test(url);
+ '''
+ expected = r'''return /^data:image\//i.test(url)||/^(https?|ftp|file|about|chrome|resource):/.test(url);'''
+ self.assertMinified(js, expected)
+
+ def testNoBracesWithComment(self):
+ js = r"""
+ onSuccess: function(transport) {
+ var js = transport.responseText.strip();
+ if (!/^\[.*\]$/.test(js)) // TODO: improve sanity check
+ throw 'Server returned an invalid collection representation.';
+ this._collection = eval(js);
+ this.checkForExternalText();
+ }.bind(this),
+ onFailure: this.onFailure
+ });
+ """
+ expected = r"""onSuccess:function(transport){var js=transport.responseText.strip();if(!/^\[.*\]$/.test(js))
+throw'Server returned an invalid collection representation.';this._collection=eval(js);this.checkForExternalText();}.bind(this),onFailure:this.onFailure});"""
+ self.assertMinified(js, expected)
+ js_without_comment = r"""
+ onSuccess: function(transport) {
+ var js = transport.responseText.strip();
+ if (!/^\[.*\]$/.test(js))
+ throw 'Server returned an invalid collection representation.';
+ this._collection = eval(js);
+ this.checkForExternalText();
+ }.bind(this),
+ onFailure: this.onFailure
+ });
+ """
+ self.assertMinified(js_without_comment, expected)
+
+ def testSpaceInRe(self):
+ js = r"""
+ num = num.replace(/ /g,'');
+ """
+ self.assertMinified(js, "num=num.replace(/ /g,'');")
+
+ def testEmptyString(self):
+ js = r'''
+ function foo('') {
+
+ }
+ '''
+ self.assertMinified(js, "function foo(''){}")
+
+ def testDoubleSpace(self):
+ js = r'''
+var foo = "hey";
+ '''
+ self.assertMinified(js, 'var foo="hey";')
+
+ def testLeadingRegex(self):
+ js = r'/[d]+/g '
+ self.assertMinified(js, js.strip())
+
+ def testLeadingString(self):
+ js = r"'a string in the middle of nowhere'; // and a comment"
+ self.assertMinified(js, "'a string in the middle of nowhere';")
+
+ def testSingleCommentEnd(self):
+ js = r'// a comment\n'
+ self.assertMinified(js, '')
+
+ def testInputStream(self):
+ try:
+ from StringIO import StringIO
+ except ImportError:
+ from io import StringIO
+
+ ins = StringIO(r'''
+ function foo('') {
+
+ }
+ ''')
+ outs = StringIO()
+ m = jsmin.JavascriptMinify()
+ m.minify(ins, outs)
+ output = outs.getvalue()
+ assert output == "function foo(''){}"
+
+ def testUnicode(self):
+ instr = u'\u4000 //foo'
+ expected = u'\u4000'
+ output = jsmin.jsmin(instr)
+ self.assertEqual(output, expected)
+
+ def testCommentBeforeEOF(self):
+ self.assertMinified("//test\r\n", "")
+
+ def testCommentInObj(self):
+ self.assertMinified("""{
+ a: 1,//comment
+ }""", "{a:1,}")
+
+ def testCommentInObj2(self):
+ self.assertMinified("{a: 1//comment\r\n}", "{a:1}")
+
+ def testImplicitSemicolon(self):
+ # return \n 1 is equivalent with return; 1
+ # so best make sure jsmin retains the newline
+ self.assertMinified("return\na", "return\na")
+
+ def test_explicit_semicolon(self):
+ self.assertMinified("return;//comment\r\na", "return;a")
+
+ def testImplicitSemicolon2(self):
+ self.assertMinified("return//comment...\r\nar", "return\nar")
+
+ def testImplicitSemicolon3(self):
+ self.assertMinified("return//comment...\r\na", "return\na")
+
+ def testSingleComment2(self):
+ self.assertMinified('x.replace(/\//, "_")// slash to underscore',
+ 'x.replace(/\//,"_")')
+
+ def testSlashesNearComments(self):
+ original = '''
+ { a: n / 2, }
+ // comment
+ '''
+ expected = '''{a:n/2,}'''
+ self.assertMinified(original, expected)
+
+ def testReturn(self):
+ original = '''
+ return foo;//comment
+ return bar;'''
+ expected = 'return foo;return bar;'
+ self.assertMinified(original, expected)
+ original = '''
+ return foo
+ return bar;'''
+ expected = 'return foo\nreturn bar;'
+ self.assertMinified(original, expected)
+
+ def test_space_plus(self):
+ original = '"s" + ++e + "s"'
+ expected = '"s"+ ++e+"s"'
+ self.assertMinified(original, expected)
+
+ def test_no_final_newline(self):
+ original = '"s"'
+ expected = '"s"'
+ self.assertMinified(original, expected)
+
+ def test_space_with_regex_repeats(self):
+ original = '/(NaN| {2}|^$)/.test(a)&&(a="M 0 0");'
+ self.assertMinified(original, original) # there should be nothing jsmin can do here
+
+ def test_space_with_regex_repeats_not_at_start(self):
+ original = 'aaa;/(NaN| {2}|^$)/.test(a)&&(a="M 0 0");'
+ self.assertMinified(original, original) # there should be nothing jsmin can do here
+
+ def test_space_in_regex(self):
+ original = '/a (a)/.test("a")'
+ self.assertMinified(original, original)
+
+ def test_brackets_around_slashed_regex(self):
+ original = 'function a() { /\//.test("a") }'
+ expected = 'function a(){/\//.test("a")}'
+ self.assertMinified(original, expected)
+
+ def test_angular_1(self):
+ original = '''var /** holds major version number for IE or NaN for real browsers */
+ msie,
+ jqLite, // delay binding since jQuery could be loaded after us.'''
+ minified = jsmin.jsmin(original)
+ self.assertTrue('var\nmsie' in minified)
+
+ def test_angular_2(self):
+ original = 'var/* comment */msie;'
+ expected = 'var msie;'
+ self.assertMinified(original, expected)
+
+ def test_angular_3(self):
+ original = 'var /* comment */msie;'
+ expected = 'var msie;'
+ self.assertMinified(original, expected)
+
+ def test_angular_4(self):
+ original = 'var /* comment */ msie;'
+ expected = 'var msie;'
+ self.assertMinified(original, expected)
+
+ def test_angular_5(self):
+ original = 'a/b'
+ self.assertMinified(original, original)
+
+ def testBackticks(self):
+ original = '`test`'
+ self.assertMinified(original, original, quote_chars="'\"`")
+
+ original = '` test with leading whitespace`'
+ self.assertMinified(original, original, quote_chars="'\"`")
+
+ original = '`test with trailing whitespace `'
+ self.assertMinified(original, original, quote_chars="'\"`")
+
+ original = '''`test
+with a new line`'''
+ self.assertMinified(original, original, quote_chars="'\"`")
+
+ original = '''dumpAvStats: function(stats) {
+ var statsString = "";
+ if (stats.mozAvSyncDelay) {
+ statsString += `A/V sync: ${stats.mozAvSyncDelay} ms `;
+ }
+ if (stats.mozJitterBufferDelay) {
+ statsString += `Jitter-buffer delay: ${stats.mozJitterBufferDelay} ms`;
+ }
+
+ return React.DOM.div(null, statsString);'''
+ expected = 'dumpAvStats:function(stats){var statsString="";if(stats.mozAvSyncDelay){statsString+=`A/V sync: ${stats.mozAvSyncDelay} ms `;}\nif(stats.mozJitterBufferDelay){statsString+=`Jitter-buffer delay: ${stats.mozJitterBufferDelay} ms`;}\nreturn React.DOM.div(null,statsString);'
+ self.assertMinified(original, expected, quote_chars="'\"`")
+
+ def testBackticksExpressions(self):
+ original = '`Fifteen is ${a + b} and not ${2 * a + b}.`'
+ self.assertMinified(original, original, quote_chars="'\"`")
+
+ original = '''`Fifteen is ${a +
+b} and not ${2 * a + "b"}.`'''
+ self.assertMinified(original, original, quote_chars="'\"`")
+
+ def testBackticksTagged(self):
+ original = 'tag`Hello ${ a + b } world ${ a * b}`;'
+ self.assertMinified(original, original, quote_chars="'\"`")
+
+ def test_issue_bitbucket_16(self):
+ original = """
+ f = function() {
+ return /DataTree\/(.*)\//.exec(this._url)[1];
+ }
+ """
+ self.assertMinified(
+ original,
+ 'f=function(){return /DataTree\/(.*)\//.exec(this._url)[1];}')
+
+ def test_issue_bitbucket_17(self):
+ original = "// hi\n/^(get|post|head|put)$/i.test('POST')"
+ self.assertMinified(original,
+ "/^(get|post|head|put)$/i.test('POST')")
+
+ def test_issue_6(self):
+ original = '''
+ respond.regex = {
+ comments: /\/\*[^*]*\*+([^/][^*]*\*+)*\//gi,
+ urls: 'whatever'
+ };
+ '''
+ expected = original.replace(' ', '').replace('\n', '')
+ self.assertMinified(original, expected)
+
+ def test_issue_9(self):
+ original = '\n'.join([
+ 'var a = \'hi\' // this is a comment',
+ 'var a = \'hi\' /* this is also a comment */',
+ 'console.log(1) // this is a comment',
+ 'console.log(1) /* this is also a comment */',
+ '1 // this is a comment',
+ '1 /* this is also a comment */',
+ '{} // this is a comment',
+ '{} /* this is also a comment */',
+ '"YOLO" /* this is a comment */',
+ '"YOLO" // this is a comment',
+ '(1 + 2) // comment',
+ '(1 + 2) /* yup still comment */',
+ 'var b'
+ ])
+ expected = '\n'.join([
+ 'var a=\'hi\'',
+ 'var a=\'hi\'',
+ 'console.log(1)',
+ 'console.log(1)',
+ '1',
+ '1',
+ '{}',
+ '{}',
+ '"YOLO"',
+ '"YOLO"',
+ '(1+2)',
+ '(1+2)',
+ 'var b'
+ ])
+ self.assertMinified(expected, expected)
+ self.assertMinified(original, expected)
+
+ def test_newline_between_strings(self):
+ self.assertMinified('"yolo"\n"loyo"', '"yolo"\n"loyo"')
+
+ def test_issue_10_comments_between_tokens(self):
+ self.assertMinified('var/* comment */a', 'var a')
+
+ def test_ends_with_string(self):
+ self.assertMinified('var s = "s"', 'var s="s"')
+
+ def test_short_comment(self):
+ self.assertMinified('a;/**/b', 'a;b')
+
+ def test_shorter_comment(self):
+ self.assertMinified('a;/*/*/b', 'a;b')
+
+ def test_block_comment_with_semicolon(self):
+ self.assertMinified('a;/**/\nb', 'a;b')
+
+ def test_block_comment_With_implicit_semicolon(self):
+ self.assertMinified('a/**/\nvar b', 'a\nvar b')
+
+ def test_issue_9_single_comments(self):
+ original = '''
+ var a = "hello" // this is a comment
+ a += " world"
+ '''
+ self.assertMinified(original, 'var a="hello"\na+=" world"')
+
+ def test_issue_9_multi_comments(self):
+ original = '''
+ var a = "hello" /* this is a comment */
+ a += " world"
+ '''
+ self.assertMinified(original, 'var a="hello"\na+=" world"')
+
+ def test_issue_12_re_nl_if(self):
+ original = '''
+ var re = /\d{4}/
+ if (1) { console.log(2); }'''
+ self.assertMinified(
+ original, 'var re=/\d{4}/\nif(1){console.log(2);}')
+
+ def test_issue_12_re_nl_other(self):
+ original = '''
+ var re = /\d{4}/
+ g = 10'''
+ self.assertMinified(original , 'var re=/\d{4}/\ng=10')
+
+ def test_preserve_copyright(self):
+ original = '''
+ function this() {
+ /*! Copyright year person */
+ console.log('hello!');
+ }
+
+ /*! Copyright blah blah
+ *
+ * Some other text
+ */
+
+ var a;
+ '''
+ expected = """function this(){/*! Copyright year person */
+console.log('hello!');}/*! Copyright blah blah
+ *
+ * Some other text
+ */\n\nvar a;"""
+ self.assertMinified(original, expected)
+
+ def test_issue_14(self):
+ original = 'return x / 1;'
+ self.assertMinified(original, 'return x/1;')
+
+ def test_issue_14_with_char_from_return(self):
+ original = 'return r / 1;'
+ self.assertMinified(original, 'return r/1;')
+
+
+class RegexTests(unittest.TestCase):
+
+ def regex_recognise(self, js):
+ if not jsmin.is_3:
+ if jsmin.cStringIO and not isinstance(js, unicode):
+ # strings can use cStringIO for a 3x performance
+ # improvement, but unicode (in python2) cannot
+ klass = jsmin.cStringIO.StringIO
+ else:
+ klass = jsmin.StringIO.StringIO
+ else:
+ klass = jsmin.io.StringIO
+ ins = klass(js[2:])
+ outs = klass()
+ jsmin.JavascriptMinify(ins, outs).regex_literal(js[0], js[1])
+ return outs.getvalue()
+
+ def assert_regex(self, js_input, expected):
+ assert js_input[0] == '/' # otherwise we should not be testing!
+ recognised = self.regex_recognise(js_input)
+ assert recognised == expected, "\n in: %r\ngot: %r\nexp: %r" % (js_input, recognised, expected)
+
+ def test_simple(self):
+ self.assert_regex('/123/g', '/123/')
+
+ def test_character_class(self):
+ self.assert_regex('/a[0-9]b/g', '/a[0-9]b/')
+
+ def test_character_class_with_slash(self):
+ self.assert_regex('/a[/]b/g', '/a[/]b/')
+
+ def test_escaped_forward_slash(self):
+ self.assert_regex(r'/a\/b/g', r'/a\/b/')
+
+ def test_escaped_back_slash(self):
+ self.assert_regex(r'/a\\/g', r'/a\\/')
+
+ def test_empty_character_class(self):
+ # This one is subtle: an empty character class is not allowed, afaics
+ # from http://regexpal.com/ Chrome Version 44.0.2403.155 (64-bit) Mac
+ # so this char class is interpreted as containing ]/ *not* as char
+ # class [] followed by end-of-regex /.
+ self.assert_regex('/a[]/]b/g', '/a[]/]b/')
+
+ def test_precedence_of_parens(self):
+ # judging from
+ # http://regexpal.com/ Chrome Version 44.0.2403.155 (64-bit) Mac
+ # () have lower precedence than []
+ self.assert_regex('/a([)])b/g', '/a([)])b/')
+ self.assert_regex('/a[(]b/g', '/a[(]b/')
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/third_party/python/jsmin/setup.cfg b/third_party/python/jsmin/setup.cfg
new file mode 100644
index 0000000000..861a9f5542
--- /dev/null
+++ b/third_party/python/jsmin/setup.cfg
@@ -0,0 +1,5 @@
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/third_party/python/jsmin/setup.py b/third_party/python/jsmin/setup.py
new file mode 100644
index 0000000000..7968aecdd0
--- /dev/null
+++ b/third_party/python/jsmin/setup.py
@@ -0,0 +1,36 @@
+from setuptools import setup
+
+import os, sys, re
+
+os.environ['COPYFILE_DISABLE'] = 'true' # this disables including resource forks in tar files on os x
+
+
+def long_description():
+ return open('README.rst').read() + '\n' + open('CHANGELOG.txt').read()
+
+
+setup(
+ name="jsmin",
+ version=re.search(r'__version__ = ["\']([^"\']+)', open('jsmin/__init__.py').read()).group(1),
+ packages=['jsmin'],
+ description='JavaScript minifier.',
+ long_description=long_description(),
+ author='Dave St.Germain',
+ author_email='dave@st.germa.in',
+ maintainer='Tikitu de Jager',
+ maintainer_email='tikitu+jsmin@logophile.org',
+ test_suite='jsmin.test',
+ license='MIT License',
+ url='https://github.com/tikitu/jsmin/',
+ classifiers=[
+ 'Development Status :: 5 - Production/Stable',
+ 'Environment :: Web Environment',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: MIT License',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Python :: 3 :: Only',
+ 'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
+ 'Topic :: Software Development :: Pre-processors',
+ 'Topic :: Text Processing :: Filters',
+ ]
+)
diff --git a/third_party/python/json-e/MANIFEST.in b/third_party/python/json-e/MANIFEST.in
new file mode 100644
index 0000000000..a6995977cb
--- /dev/null
+++ b/third_party/python/json-e/MANIFEST.in
@@ -0,0 +1,3 @@
+include jsone *.py
+include package.json
+recursive-exclude test *
diff --git a/third_party/python/json-e/PKG-INFO b/third_party/python/json-e/PKG-INFO
new file mode 100644
index 0000000000..bf41f82167
--- /dev/null
+++ b/third_party/python/json-e/PKG-INFO
@@ -0,0 +1,11 @@
+Metadata-Version: 2.1
+Name: json-e
+Version: 2.7.0
+Summary: A data-structure parameterization system written for embedding context in JSON objects
+Home-page: https://taskcluster.github.io/json-e/
+Author: Dustin J. Mitchell
+Author-email: dustin@mozilla.com
+License: MPL2
+Description: UNKNOWN
+Platform: UNKNOWN
+Provides-Extra: release
diff --git a/third_party/python/json-e/README.md b/third_party/python/json-e/README.md
new file mode 100644
index 0000000000..155b2e6ded
--- /dev/null
+++ b/third_party/python/json-e/README.md
@@ -0,0 +1,730 @@
+* [Full documentation](https://taskcluster.github.io/json-e)
+
+# JSON-e
+
+JSON-e is a data-structure parameterization system for embedding context in
+JSON objects.
+
+The central idea is to treat a data structure as a "template" and transform it,
+using another data structure as context, to produce an output data structure.
+
+There are countless libraries to do this with strings, such as
+[mustache](https://mustache.github.io/). What makes JSON-e unique is that it
+operates on data structures, not on their textual representation. This allows
+input to be written in a number of formats (JSON, YAML, etc.) or even generated
+dynamically. It also means that the output cannot be "invalid", even when
+including large chunks of contextual data.
+
+JSON-e is also designed to be safe for use on untrusted data. It never uses
+`eval` or any other function that might result in arbitrary code execution. It
+also disallows unbounded iteration, so any JSON-e rendering operation will
+finish in finite time.
+
+## Changes
+
+See
+[CHANGELOG.rst](https://github.com/taskcluster/json-e/blob/master/CHANGELOG.rst)
+for the changes in each version of this library.
+
+# Interface
+
+## JavaScript
+
+The JS module is installed with either of
+
+```shell
+npm install --save json-e
+yarn add json-e
+```
+
+The module exposes following interface:
+
+```javascript
+import jsone from 'json-e';
+
+var template = {a: {$eval: "foo.bar"}};
+var context = {foo: {bar: "zoo"}};
+console.log(jsone(template, context));
+// -> { a: 'zoo' }
+```
+
+Note that the context can contain functions, and those functions can be called
+from the template:
+
+```javascript
+var template = {$eval: "foo(1)"};
+var context = {"foo": function(x) { return x + 2; }};
+console.log(jsone(template, context)); // -> 3
+```
+
+*NOTE*: Context functions are called synchronously. Any complex asynchronous
+operations should be handled before rendering the template.
+
+*NOTE*: If the template is untrusted, it can pass arbitrary data to functions
+in the context, which must guard against such behavior.
+
+### Browser
+
+JSON-e is distributed as a CommonJS package is not designed to be included
+directly in a browser with `<script>`. Instead, it must be incorproated using a
+tool that understands CommonJS such as Webpack. See
+[Neutrino](https://neutrino.js.org/) for an easy, configuration-free way to
+build such applications.
+
+## Python
+
+The Python distribution exposes a `render` function:
+
+```python
+import jsone
+
+template = {"a": {"$eval": "foo.bar"}}
+context = {"foo": {"bar": "zoo"}}
+print(jsone.render(template, context)) # -> {"a": "zoo"}
+```
+
+and also allows custom functions in the context:
+
+```python
+template = {"$eval": "foo(1)"}
+context = {"foo": lambda x: x + 2}
+print(jsone.render(template, context)) # -> 3
+```
+
+## Go (golang)
+
+The [golang package for json-e](https://godoc.org/github.com/taskcluster/json-e) exposes a `Render` function:
+
+```golang
+import (
+ "fmt"
+ "github.com/taskcluster/json-e"
+)
+
+// Template must be given using types:
+// map[string]interface{}, []interface{}, float64, string, bool, nil
+// The same types that json.Unmarshal() will create when targeting an interface{}
+template := map[string]interface{}{
+ "result": map[string]interface{}{
+ "$eval": "f() + 5",
+ },
+}
+// Context can be JSON types just like template, but may also contain functions
+// these can JSON types as arguments, and return a value and optionally an error.
+context := map[string]interface{}{
+ "f": func() int { return 37 },
+}
+
+func main() {
+ value, err := jsone.Render(template, context)
+ fmt.Printf("%#v\n", value)
+}
+```
+
+## CLI
+
+You can use the 3rd party package [rjsone](https://wryun.github.io/rjsone/) to template
+JSON-e from the command line, passing templates/contexts as files or arguments and using
+stdout for the result.
+
+
+# Language Reference
+
+The examples here are given in YAML for ease of reading. Of course, the
+rendering operation takes place on the parsed data, so the input format is
+irrelevant to its operation.
+
+## Simple Operations
+
+All JSON-e directives involve the `$` character, so a template without any directives is
+rendered unchanged:
+
+```yaml
+template: {key: [1,2,{key2: 'val', key3: 1}, true], f: false}
+context: {}
+result: {key: [1,2,{key2: 'val', key3: 1}, true], f: false}
+```
+
+## String Interpolation
+
+The simplest form of substitution occurs within strings, using `${..}`:
+
+```yaml
+template: {message: 'hello ${key}', 'k=${num}': true}
+context: {key: 'world', num: 1}
+result: {message: 'hello world', 'k=1': true}
+```
+
+The bit inside the `${..}` is an expression, and must evaluate to something
+that interpolates obviously into a string (so, a string, number, boolean,).
+If it is null, then the expression interpolates into an empty string.
+The expression syntax is described in more detail below.
+
+Values interpolate as their JSON literal values:
+
+```yaml
+template: ["number: ${num}", "booleans: ${t} ${f}", "null: ${nil}"]
+context: {num: 3, t: true, f: false, nil: null}
+result: ["number: 3", "booleans: true false", "null: "]
+```
+
+Note that object keys can be interpolated, too:
+
+```yaml
+template: {"tc_${name}": "${value}"}
+context: {name: 'foo', value: 'bar'}
+result: {"tc_foo": "bar"}
+```
+
+The string `${` can be escaped as `$${`.
+
+## Operators
+
+JSON-e defines a bunch of operators. Each is represented as an object with a
+property beginning with `$`. This object can be buried deeply within the
+template. Some operators take additional arguments as properties of the same
+object.
+
+### `$eval`
+
+The `$eval` operator evaluates the given expression and is replaced with the
+result of that evaluation. Unlike with string interpolation, the result need
+not be a string, but can be an arbitrary data structure.
+
+```yaml
+template: {config: {$eval: 'settings.staging'}}
+context:
+ settings:
+ staging:
+ transactionBackend: mock
+ production:
+ transactionBackend: customerdb
+result: {config: {transactionBackend: 'mock'}}
+```
+
+The expression syntax is described in more detail below.
+
+Note that `$eval`'s value must be a string. "Metaprogramming" by providing a
+calculated value to eval is not allowed. For example, `{$eval: {$eval:
+"${var1} + ${var2}"}}` is not valid JSON-e.
+
+### `$json`
+
+The `$json` operator formats the given value as JSON with sorted keys. It does
+not evaluate the value (use `$eval` for that). While this can be useful in some
+cases, it is an unusual case to include a JSON string in a larger data
+structure.
+
+```yaml
+template: {$json: [a, b, {$eval: 'a+b'}, 4]}
+context: {a: 1, b: 2}
+result: '["a", "b", 3, 4]'
+```
+
+### `$if` - `then` - `else`
+
+The `$if` operator supports conditionals. It evaluates the given value, and
+replaces itself with the `then` or `else` properties. If either property is
+omitted, then the expression is omitted from the parent object.
+
+```yaml
+template: {key: {$if: 'cond', then: 1}, k2: 3}
+context: {cond: true}
+result: {key: 1, k2: 3}
+```
+
+```yaml
+template: {$if: 'x > 5', then: 1, else: -1}
+context: {x: 10}
+result: 1
+```
+
+```yaml
+template: [1, {$if: 'cond', else: 2}, 3]
+context: {cond: false}
+result: [1,2,3]
+```
+
+```yaml
+template: {key: {$if: 'cond', then: 2}, other: 3}
+context: {cond: false}
+result: {other: 3}
+```
+
+### `$flatten`
+
+The `$flatten` operator flattens an array of arrays into one array.
+
+```yaml
+template: {$flatten: [[1, 2], [3, 4], [5]]}
+context: {}
+result: [1, 2, 3, 4, 5]
+```
+
+### `$flattenDeep`
+
+The `$flattenDeep` operator deeply flattens an array of arrays into one array.
+
+```yaml
+template: {$flattenDeep: [[1, [2, [3]]]]}
+context: {}
+result: [1, 2, 3]
+```
+
+### `$fromNow`
+
+The `$fromNow` operator is a shorthand for the built-in function `fromNow`. It
+creates a JSON (ISO 8601) datestamp for a time relative to the current time
+(see the `now` builtin, below) or, if `from` is given, relative to that time.
+The offset is specified by a sequence of number/unit pairs in a string. For
+example:
+
+```yaml
+template: {$fromNow: '2 days 1 hour'}
+context: {}
+result: '2017-01-19T16:27:20.974Z'
+```
+
+```yaml
+template: {$fromNow: '1 hour', from: '2017-01-19T16:27:20.974Z'}
+context: {}
+result: '2017-01-19T17:27:20.974Z'
+```
+
+The available units are `day`, `hour`, and `minute`, for all of which a plural
+is also accepted.
+
+### `$let`
+
+The `$let` operator evaluates an expression using a context amended with the
+given values. It is analogous to the Haskell `where` clause.
+
+```yaml
+template: {$let: {ts: 100, foo: 200},
+ in: [{$eval: "ts+foo"}, {$eval: "ts-foo"}, {$eval: "ts*foo"}]}
+context: {}
+result: [300, -100, 20000]
+```
+
+The `$let` operator here added the `ts` and `foo` variables to the scope of
+the context and accordingly evaluated the `in` clause using those variables
+to return the correct result.
+
+The variable names in the `$let` value must be valid context variable names and
+must be written literally. That is, an expression like `{$let: {$eval:
+"extraVariables"}, in : ..}` is not allowed.
+
+### `$map`
+
+The `$map` operator evaluates an expression for each value of the given array or object,
+constructing the result as an array or object of the evaluated values.
+
+When given an array, map always returns an array.
+
+```yaml
+template:
+ $map: [2, 4, 6]
+ each(x): {$eval: 'x + a'}
+context: {a: 1}
+result: [3, 5, 7]
+```
+The array or object is the value of the `$map` property, and the expression to evaluate
+is given by `each(var)` where `var` is the name of the variable containing each
+element. In the case of iterating over an object, `var` will be an object with two keys:
+`key` and `val`. These keys correspond to a key in the object and its corresponding value.
+
+When $map is given an object, the expression defined by `each(var)` must evaluate to an
+object for each key/value pair (`key` and `val`).The objects constructed by each 'each(var)'
+can then be merged internally to give the resulting object with later keys overwriting
+the previous ones.Otherwise the expression becomes invalid for the $map operator
+
+```yaml
+template:
+ $map: {a: 1, b: 2, c: 3}
+ each(y): {'${y.key}x': {$eval: 'y.val + 1'}}
+context: {}
+result: {ax: 2, bx: 3, cx: 4}
+```
+
+### `$match`
+
+The `$match` operator is not dissimilar to pattern matching operators. It gets an object, in which every key is a string expression(s) to evaluate to `true` or `false` based on the context. The result will be an array of things (all types are supported) that were values corresponding to the keys that were evaluated to `true`. The order of the things in the array will be arbitrary. If there are no matches, the result is an empty array.
+
+```yaml
+template: {$match: {"x == 10": "ten", "x == 20": "twenty"}}
+context: {x: 10}
+result: ["ten"]
+```
+
+```yaml
+template: {$match: {"x == 10 || x == 20": "tens", "x == 10": "ten"}}
+context: {x: 10}
+one possible result: ["tens", "ten"]
+another possible result: ["ten", "tens"]
+```
+```yaml
+template: {$match: {"x < 10": "tens"}}
+context: {x: 10}
+result: []
+```
+
+### `$merge`
+
+The `$merge` operator merges an array of objects, returning a single object
+that combines all of the objects in the array, where the right-side objects
+overwrite the values of the left-side ones.
+
+```yaml
+template: {$merge: [{a: 1, b: 1}, {b: 2, c: 3}, {d: 4}]}
+context: {}
+result: {a: 1, b: 2, c: 3, d: 4}
+```
+
+### `$mergeDeep`
+
+The `$mergeDeep` operator is like `$merge`, but it recurses into objects to
+combine their contents property by property. Arrays are concatenated.
+
+```yaml
+template:
+ $mergeDeep:
+ - task:
+ payload:
+ command: [a, b]
+ - task:
+ extra:
+ foo: bar
+ - task:
+ payload:
+ command: [c]
+context: {}
+result:
+ task:
+ extra:
+ foo: bar
+ payload:
+ command: [a, b, c]
+```
+
+### `$sort`
+
+The `$sort` operator sorts the given array. It takes a `by(var)` property which
+should evaluate to a comparable value for each element. The `by(var)` property
+defaults to the identity function.
+
+```yaml
+template:
+ $sort: [{a: 2}, {a: 1, b: []}, {a: 3}]
+ by(x): 'x.a'
+context: {}
+result: [{a: 1, b: []}, {a: 2}, {a: 3}]
+```
+
+### `$reverse`
+
+The `$reverse` operator simply reverses the given array.
+
+```yaml
+template: {$reverse: [3, 4, 1, 2]}
+context: {}
+result: [2, 1, 4, 3]
+```
+
+### Escaping operators
+
+All property names starting with `$` are reserved for JSON-e.
+You can use `$$` to escape such properties:
+
+```yaml
+template: {$$reverse: [3, 2, {$$eval: '2 - 1'}, 0]}
+context: {}
+result: {$reverse: [3, 2, {$eval: '2 - 1'}, 0]}
+```
+
+## Truthiness
+
+Many values can be evaluated in context where booleans are required,
+not just booleans themselves. JSON-e defines the following values as false.
+Anything else will be true.
+
+```yaml
+template: {$if: 'a || b || c || d || e || f', then: "uh oh", else: "falsy" }
+context: {a: null, b: [], c: {}, d: "", e: 0, f: false}
+result: "falsy"
+```
+
+## Expression Syntax
+
+Expression are given in a simple Python- or JavaScript-like expression
+language. Its data types are limited to JSON types plus function objects.
+
+### Literals
+
+Literals are similar to those for JSON. Numeric literals only accept integer
+and decimal notation. Strings do not support any kind of escaping. The use of
+`\n` and `\t` in the example below depends on the YAML parser to expand the
+escapes.
+
+```yaml
+template:
+ - {$eval: "1.3"}
+ - {$eval: "'abc'"}
+ - {$eval: '"abc"'}
+ - {$eval: "'\n\t'"}
+context: {}
+result:
+ - 1.3
+ - "abc"
+ - "abc"
+ - "\n\t"
+```
+
+Array and object literals also look much like JSON, with bare identifiers
+allowed as keys like in Javascript:
+
+```yaml
+template:
+ - {$eval: '[1, 2, "three"]'}
+ - {$eval: '{foo: 1, "bar": 2}'}
+context: {}
+result:
+ - [1, 2, "three"]
+ - {"foo": 1, "bar": 2}
+```
+
+### Context References
+
+Bare identifiers refer to items from the context or to built-ins (described below).
+
+```yaml
+template: {$eval: '[x, z, x+z]'}
+context: {x: 'quick', z: 'sort'}
+reslut: ['quick', 'sort', 'quicksort']
+```
+
+### Arithmetic Operations
+
+The usual arithmetic operators are all defined, with typical associativity and
+precedence:
+
+```yaml
+template:
+ - {$eval: 'x + z'}
+ - {$eval: 's + t'}
+ - {$eval: 'z - x'}
+ - {$eval: 'x * z'}
+ - {$eval: 'z / x'}
+ - {$eval: 'z ** 2'}
+ - {$eval: '(z / x) ** 2'}
+context: {x: 10, z: 20, s: "face", t: "plant"}
+result:
+ - 30
+ - "faceplant"
+ - 10
+ - 200
+ - 2
+ - 400
+ - 4
+```
+
+Note that strings can be concatenated with `+`, but none of the other operators
+apply.
+
+### Comparison Operations
+
+Comparisons work as expected. Equality is "deep" in the sense of doing
+comparisons of the contents of data structures.
+
+```yaml
+template:
+ - {$eval: 'x < z'}
+ - {$eval: 'x <= z'}
+ - {$eval: 'x > z'}
+ - {$eval: 'x >= z'}
+ - {$eval: 'deep == [1, [3, {a: 5}]]'}
+ - {$eval: 'deep != [1, [3, {a: 5}]]'}
+context: {x: -10, z: 10, deep: [1, [3, {a: 5}]]}
+result: [true, true, false, false, true, false]
+```
+
+### Boolean Operations
+
+Boolean operations use C- and Javascript-style symbls `||`, `&&`, and `!`:
+
+```yaml
+template: {$eval: '!(false || false) && true'}
+context: {}
+result: true
+```
+
+### Object Property Access
+
+Like Javascript, object properties can be accessed either with array-index
+syntax or with dot syntax. Unlike Javascript, `obj.prop` is an error if `obj`
+does not have `prop`, while `obj['prop']` will evaulate to `null`.
+
+```yaml
+template: {$eval: 'v.a + v["b"]'}
+context: {v: {a: 'apple', b: 'bananna', c: 'carrot'}}
+result: 'applebananna'
+````
+
+### Indexing and Slicing
+
+Strings and arrays can be indexed and sliced using a Python-like indexing
+scheme. Negative indexes are counted from the end of the value. Slices are
+treated as "half-open", meaning that the result contains the first index and
+does not contain the second index. A "backward" slice with the start index
+greater than the end index is treated as empty.
+
+```yaml
+template:
+ - {$eval: '[array[1], string[1]]'}
+ - {$eval: '[array[1:4], string[1:4]]'}
+ - {$eval: '[array[2:], string[2:]]'}
+ - {$eval: '[array[:2], string[:2]]'}
+ - {$eval: '[array[4:2], string[4:2]]'}
+ - {$eval: '[array[-2], string[-2]]'}
+ - {$eval: '[array[-2:], string[-2:]]'}
+ - {$eval: '[array[:-3], string[:-3]]'}
+context: {array: ['a', 'b', 'c', 'd', 'e'], string: 'abcde'}
+result:
+ - ['b', 'b']
+ - [['b', 'c', 'd'], 'bcd']
+ - [['c', 'd', 'e'], 'cde']
+ - [['a', 'b'], 'ab']
+ - [[], '']
+ - ['d', 'd']
+ - [['d', 'e'], 'de']
+ - [['a', 'b'], 'ab']
+```
+
+### Containment Operation
+
+The `in` keyword can be used to check for containment: a property in an object,
+an element in an array, or a substring in a string.
+
+```yaml
+template:
+ - {$eval: '"foo" in {foo: 1, bar: 2}'}
+ - {$eval: '"foo" in ["foo", "bar"]'}
+ - {$eval: '"foo" in "foobar"'}
+context: {}
+result: [true, true, true]
+```
+
+### Function Invocation
+
+Function calls are made with the usual `fn(arg1, arg2)` syntax. Functions are
+not JSON data, so they cannot be created in JSON-e, but they can be provided as
+built-ins or supplied in the context and called from JSON-e.
+
+### Built-In Functions and Variables
+
+The expression language provides a laundry-list of built-in functions/variables. Library
+users can easily add additional functions/variables, or override the built-ins, as part
+of the context.
+
+#### Time
+
+The built-in context value `now` is set to the current time at the start of
+evaluation of the template, and used as the default "from" value for `$fromNow`
+and the built-in `fromNow()`.
+
+```yaml
+template:
+ - {$eval: 'now'}
+ - {$eval: 'fromNow("1 minute")'}
+ - {$eval: 'fromNow("1 minute", "2017-01-19T16:27:20.974Z")'}
+context: {}
+result:
+ - '2017-01-19T16:27:20.974Z',
+ - '2017-01-19T16:28:20.974Z',
+ - '2017-01-19T16:28:20.974Z',
+```
+
+#### Math
+
+```yaml
+template:
+ # the smallest of the arguments
+ - {$eval: 'min(1, 3, 5)'}
+ # the largest of the arguments
+ - {$eval: 'max(2, 4, 6)'}
+ # mathematical functions
+ - {$eval: 'sqrt(16)'}
+ - {$eval: 'ceil(0.3)'}
+ - {$eval: 'floor(0.3)'}
+ - {$eval: 'abs(-0.3)'}
+context: {}
+result:
+ - 1
+ - 6
+ - 4
+ - 1
+ - 0
+ - 0.3
+```
+
+#### Strings
+
+```yaml
+template:
+ # convert string case
+ - {$eval: 'lowercase("Fools!")'}
+ - {$eval: 'uppercase("Fools!")'}
+ # convert string, number, boolean, or array to string
+ - {$eval: 'str(130)'}
+ # strip whitespace from left, right, or both ends of a string
+ - {$eval: 'lstrip(" room ")'}
+ - {$eval: 'rstrip(" room ")'}
+ - {$eval: 'strip(" room ")'}
+context: {}
+result:
+ - "fools!"
+ - "FOOLS!"
+ - "130"
+ - "room "
+ - " room"
+ - room
+```
+
+#### Type
+
+The `typeof()` built-in returns the type of an object. Its behavior around
+`null` is reminiscent of JavaScript.
+
+```yaml
+template:
+ - "${typeof('abc')}"
+ - "${typeof(42)}"
+ - "${typeof(42.0)}"
+ - "${typeof(true)}"
+ - "${typeof([])}"
+ - "${typeof({})}"
+ - "${typeof(typeof)}"
+ - {$eval: "typeof(null)"}
+ - "${typeof(null)}"
+context: {}
+result:
+ - string
+ - number
+ - number
+ - boolean
+ - array
+ - object
+ - function
+ - null # note: the value null, not the string "null"
+ - '' # .. which interpolates to an empty string
+```
+
+#### Length
+
+The `len()` built-in returns the length of a string or array.
+
+```yaml
+template: {$eval: 'len([1, 2, 3])'}
+context: {}
+result: 3
+```
+
diff --git a/third_party/python/json-e/json_e.egg-info/PKG-INFO b/third_party/python/json-e/json_e.egg-info/PKG-INFO
new file mode 100644
index 0000000000..bf41f82167
--- /dev/null
+++ b/third_party/python/json-e/json_e.egg-info/PKG-INFO
@@ -0,0 +1,11 @@
+Metadata-Version: 2.1
+Name: json-e
+Version: 2.7.0
+Summary: A data-structure parameterization system written for embedding context in JSON objects
+Home-page: https://taskcluster.github.io/json-e/
+Author: Dustin J. Mitchell
+Author-email: dustin@mozilla.com
+License: MPL2
+Description: UNKNOWN
+Platform: UNKNOWN
+Provides-Extra: release
diff --git a/third_party/python/json-e/json_e.egg-info/SOURCES.txt b/third_party/python/json-e/json_e.egg-info/SOURCES.txt
new file mode 100644
index 0000000000..ec2e4f666b
--- /dev/null
+++ b/third_party/python/json-e/json_e.egg-info/SOURCES.txt
@@ -0,0 +1,17 @@
+MANIFEST.in
+README.md
+package.json
+setup.cfg
+setup.py
+json_e.egg-info/PKG-INFO
+json_e.egg-info/SOURCES.txt
+json_e.egg-info/dependency_links.txt
+json_e.egg-info/requires.txt
+json_e.egg-info/top_level.txt
+jsone/__init__.py
+jsone/builtins.py
+jsone/interpreter.py
+jsone/prattparser.py
+jsone/render.py
+jsone/shared.py
+jsone/six.py \ No newline at end of file
diff --git a/third_party/python/json-e/json_e.egg-info/dependency_links.txt b/third_party/python/json-e/json_e.egg-info/dependency_links.txt
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/third_party/python/json-e/json_e.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/third_party/python/json-e/json_e.egg-info/requires.txt b/third_party/python/json-e/json_e.egg-info/requires.txt
new file mode 100644
index 0000000000..f25a637d71
--- /dev/null
+++ b/third_party/python/json-e/json_e.egg-info/requires.txt
@@ -0,0 +1,3 @@
+
+[release]
+towncrier
diff --git a/third_party/python/json-e/json_e.egg-info/top_level.txt b/third_party/python/json-e/json_e.egg-info/top_level.txt
new file mode 100644
index 0000000000..afe8caa74c
--- /dev/null
+++ b/third_party/python/json-e/json_e.egg-info/top_level.txt
@@ -0,0 +1 @@
+jsone
diff --git a/third_party/python/json-e/jsone/__init__.py b/third_party/python/json-e/jsone/__init__.py
new file mode 100644
index 0000000000..943674e672
--- /dev/null
+++ b/third_party/python/json-e/jsone/__init__.py
@@ -0,0 +1,21 @@
+from __future__ import absolute_import, print_function, unicode_literals
+
+import re
+from .render import renderValue
+from .shared import JSONTemplateError, DeleteMarker, TemplateError, fromNow
+from . import builtins
+
+_context_re = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*$')
+
+
+def render(template, context):
+ if not all(_context_re.match(c) for c in context):
+ raise TemplateError('top level keys of context must follow '
+ '/[a-zA-Z_][a-zA-Z0-9_]*/')
+ full_context = {'now': fromNow('0 seconds', None)}
+ full_context.update(builtins.build(full_context))
+ full_context.update(context)
+ rv = renderValue(template, full_context)
+ if rv is DeleteMarker:
+ return None
+ return rv
diff --git a/third_party/python/json-e/jsone/builtins.py b/third_party/python/json-e/jsone/builtins.py
new file mode 100644
index 0000000000..751ee2dc04
--- /dev/null
+++ b/third_party/python/json-e/jsone/builtins.py
@@ -0,0 +1,121 @@
+from __future__ import absolute_import, print_function, unicode_literals
+
+import math
+from .shared import string, to_str, fromNow, JSONTemplateError
+
+
+class BuiltinError(JSONTemplateError):
+ pass
+
+
+def build(context):
+ builtins = {}
+
+ def builtin(name, variadic=None, argument_tests=None, minArgs=None):
+ def wrap(fn):
+ def bad(reason=None):
+ raise BuiltinError(
+ (reason or 'invalid arguments to builtin: {}').format(name))
+ if variadic:
+ def invoke(*args):
+ if minArgs:
+ if len(args) < minArgs:
+ bad("too few arguments to {}")
+ for arg in args:
+ if not variadic(arg):
+ bad()
+ return fn(*args)
+
+ elif argument_tests:
+ def invoke(*args):
+ if len(args) != len(argument_tests):
+ bad()
+ for t, arg in zip(argument_tests, args):
+ if not t(arg):
+ bad()
+ return fn(*args)
+
+ else:
+ def invoke(*args):
+ return fn(*args)
+
+ builtins[name] = invoke
+ return fn
+ return wrap
+
+ def is_number(v):
+ return isinstance(v, (int, float)) and not isinstance(v, bool)
+
+ def is_string(v):
+ return isinstance(v, string)
+
+ def is_string_or_array(v):
+ return isinstance(v, (string, list))
+
+ def anything_except_array(v):
+ return isinstance(v, (string, int, float, bool)) or v is None
+
+ def anything(v):
+ return isinstance(v, (string, int, float, list, dict)) or v is None or callable(v)
+
+ # ---
+
+ builtin('min', variadic=is_number, minArgs=1)(min)
+ builtin('max', variadic=is_number, minArgs=1)(max)
+ builtin('sqrt', argument_tests=[is_number])(math.sqrt)
+ builtin('abs', argument_tests=[is_number])(abs)
+
+ @builtin('ceil', argument_tests=[is_number])
+ def ceil(v):
+ return int(math.ceil(v))
+
+ @builtin('floor', argument_tests=[is_number])
+ def floor(v):
+ return int(math.floor(v))
+
+ @builtin('lowercase', argument_tests=[is_string])
+ def lowercase(v):
+ return v.lower()
+
+ @builtin('uppercase', argument_tests=[is_string])
+ def lowercase(v):
+ return v.upper()
+
+ builtin('len', argument_tests=[is_string_or_array])(len)
+ builtin('str', argument_tests=[anything_except_array])(to_str)
+ builtin('number', variadic=is_string, minArgs=1)(float)
+
+ @builtin('strip', argument_tests=[is_string])
+ def strip(s):
+ return s.strip()
+
+ @builtin('rstrip', argument_tests=[is_string])
+ def rstrip(s):
+ return s.rstrip()
+
+ @builtin('lstrip', argument_tests=[is_string])
+ def lstrip(s):
+ return s.lstrip()
+
+ @builtin('fromNow', variadic=is_string, minArgs=1)
+ def fromNow_builtin(offset, reference=None):
+ return fromNow(offset, reference or context.get('now'))
+
+ @builtin('typeof', argument_tests=[anything])
+ def typeof(v):
+ if isinstance(v, bool):
+ return 'boolean'
+ elif isinstance(v, string):
+ return 'string'
+ elif isinstance(v, (int, float)):
+ return 'number'
+ elif isinstance(v, list):
+ return 'array'
+ elif isinstance(v, dict):
+ return 'object'
+ elif v is None:
+ return None
+ elif callable(v):
+ return 'function'
+
+ return builtins
diff --git a/third_party/python/json-e/jsone/interpreter.py b/third_party/python/json-e/jsone/interpreter.py
new file mode 100644
index 0000000000..eb38a9c85b
--- /dev/null
+++ b/third_party/python/json-e/jsone/interpreter.py
@@ -0,0 +1,289 @@
+from __future__ import absolute_import, print_function, unicode_literals
+
+from .prattparser import PrattParser, infix, prefix
+from .shared import TemplateError, InterpreterError, string
+import operator
+import json
+
+OPERATORS = {
+ '-': operator.sub,
+ '*': operator.mul,
+ '/': operator.truediv,
+ '**': operator.pow,
+ '==': operator.eq,
+ '!=': operator.ne,
+ '<=': operator.le,
+ '<': operator.lt,
+ '>': operator.gt,
+ '>=': operator.ge,
+ '&&': lambda a, b: bool(a and b),
+ '||': lambda a, b: bool(a or b),
+}
+
+
+def infixExpectationError(operator, expected):
+ return InterpreterError('infix: {} expects {} {} {}'.
+ format(operator, expected, operator, expected))
+
+
+class ExpressionEvaluator(PrattParser):
+
+ ignore = '\\s+'
+ patterns = {
+ 'number': '[0-9]+(?:\\.[0-9]+)?',
+ 'identifier': '[a-zA-Z_][a-zA-Z_0-9]*',
+ 'string': '\'[^\']*\'|"[^"]*"',
+ # avoid matching these as prefixes of identifiers e.g., `insinutations`
+ 'true': 'true(?![a-zA-Z_0-9])',
+ 'false': 'false(?![a-zA-Z_0-9])',
+ 'in': 'in(?![a-zA-Z_0-9])',
+ 'null': 'null(?![a-zA-Z_0-9])',
+ }
+ tokens = [
+ '**', '+', '-', '*', '/', '[', ']', '.', '(', ')', '{', '}', ':', ',',
+ '>=', '<=', '<', '>', '==', '!=', '!', '&&', '||', 'true', 'false', 'in',
+ 'null', 'number', 'identifier', 'string',
+ ]
+ precedence = [
+ ['||'],
+ ['&&'],
+ ['in'],
+ ['==', '!='],
+ ['>=', '<=', '<', '>'],
+ ['+', '-'],
+ ['*', '/'],
+ ['**-right-associative'],
+ ['**'],
+ ['[', '.'],
+ ['('],
+ ['unary'],
+ ]
+
+ def __init__(self, context):
+ super(ExpressionEvaluator, self).__init__()
+ self.context = context
+
+ def parse(self, expression):
+ if not isinstance(expression, string):
+ raise TemplateError('expression to be evaluated must be a string')
+ return super(ExpressionEvaluator, self).parse(expression)
+
+ @prefix('number')
+ def number(self, token, pc):
+ v = token.value
+ return float(v) if '.' in v else int(v)
+
+ @prefix("!")
+ def bang(self, token, pc):
+ return not pc.parse('unary')
+
+ @prefix("-")
+ def uminus(self, token, pc):
+ v = pc.parse('unary')
+ if not isNumber(v):
+ raise InterpreterError('{} expects {}'.format('unary -', 'number'))
+ return -v
+
+ @prefix("+")
+ def uplus(self, token, pc):
+ v = pc.parse('unary')
+ if not isNumber(v):
+ raise InterpreterError('{} expects {}'.format('unary +', 'number'))
+ return v
+
+ @prefix("identifier")
+ def identifier(self, token, pc):
+ try:
+ return self.context[token.value]
+ except KeyError:
+ raise InterpreterError(
+ 'unknown context value {}'.format(token.value))
+
+ @prefix("null")
+ def null(self, token, pc):
+ return None
+
+ @prefix("[")
+ def array_bracket(self, token, pc):
+ return parseList(pc, ',', ']')
+
+ @prefix("(")
+ def grouping_paren(self, token, pc):
+ rv = pc.parse()
+ pc.require(')')
+ return rv
+
+ @prefix("{")
+ def object_brace(self, token, pc):
+ return parseObject(pc)
+
+ @prefix("string")
+ def string(self, token, pc):
+ return parseString(token.value)
+
+ @prefix("true")
+ def true(self, token, pc):
+ return True
+
+ @prefix("false")
+ def false(self, token, ps):
+ return False
+
+ @infix("+")
+ def plus(self, left, token, pc):
+ if not isinstance(left, (string, int, float)) or isinstance(left, bool):
+ raise infixExpectationError('+', 'number/string')
+ right = pc.parse(token.kind)
+ if not isinstance(right, (string, int, float)) or isinstance(right, bool):
+ raise infixExpectationError('+', 'number/string')
+ if type(right) != type(left) and \
+ (isinstance(left, string) or isinstance(right, string)):
+ raise infixExpectationError('+', 'numbers/strings')
+ return left + right
+
+ @infix('-', '*', '/', '**')
+ def arith(self, left, token, pc):
+ op = token.kind
+ if not isNumber(left):
+ raise infixExpectationError(op, 'number')
+ right = pc.parse({'**': '**-right-associative'}.get(op))
+ if not isNumber(right):
+ raise infixExpectationError(op, 'number')
+ return OPERATORS[op](left, right)
+
+ @infix("[")
+ def index_slice(self, left, token, pc):
+ a = None
+ b = None
+ is_interval = False
+ if pc.attempt(':'):
+ a = 0
+ is_interval = True
+ else:
+ a = pc.parse()
+ if pc.attempt(':'):
+ is_interval = True
+
+ if is_interval and not pc.attempt(']'):
+ b = pc.parse()
+ pc.require(']')
+
+ if not is_interval:
+ pc.require(']')
+
+ return accessProperty(left, a, b, is_interval)
+
+ @infix(".")
+ def property_dot(self, left, token, pc):
+ if not isinstance(left, dict):
+ raise infixExpectationError('.', 'object')
+ k = pc.require('identifier').value
+ try:
+ return left[k]
+ except KeyError:
+ raise TemplateError(
+ '{} not found in {}'.format(k, json.dumps(left)))
+
+ @infix("(")
+ def function_call(self, left, token, pc):
+ if not callable(left):
+ raise TemplateError('function call', 'callable')
+ args = parseList(pc, ',', ')')
+ return left(*args)
+
+ @infix('==', '!=', '||', '&&')
+ def equality_and_logic(self, left, token, pc):
+ op = token.kind
+ right = pc.parse(op)
+ return OPERATORS[op](left, right)
+
+ @infix('<=', '<', '>', '>=')
+ def inequality(self, left, token, pc):
+ op = token.kind
+ right = pc.parse(op)
+ if type(left) != type(right) or \
+ not (isinstance(left, (int, float, string)) and not isinstance(left, bool)):
+ raise infixExpectationError(op, 'numbers/strings')
+ return OPERATORS[op](left, right)
+
+ @infix("in")
+ def contains(self, left, token, pc):
+ right = pc.parse(token.kind)
+ if isinstance(right, dict):
+ if not isinstance(left, string):
+ raise infixExpectationError('in-object', 'string on left side')
+ elif isinstance(right, string):
+ if not isinstance(left, string):
+ raise infixExpectationError('in-string', 'string on left side')
+ elif not isinstance(right, list):
+ raise infixExpectationError(
+ 'in', 'Array, string, or object on right side')
+ try:
+ return left in right
+ except TypeError:
+ raise infixExpectationError('in', 'scalar value, collection')
+
+
+def isNumber(v):
+ return isinstance(v, (int, float)) and not isinstance(v, bool)
+
+
+def parseString(v):
+ return v[1:-1]
+
+
+def parseList(pc, separator, terminator):
+ rv = []
+ if not pc.attempt(terminator):
+ while True:
+ rv.append(pc.parse())
+ if not pc.attempt(separator):
+ break
+ pc.require(terminator)
+ return rv
+
+
+def parseObject(pc):
+ rv = {}
+ if not pc.attempt('}'):
+ while True:
+ k = pc.require('identifier', 'string')
+ if k.kind == 'string':
+ k = parseString(k.value)
+ else:
+ k = k.value
+ pc.require(':')
+ v = pc.parse()
+ rv[k] = v
+ if not pc.attempt(','):
+ break
+ pc.require('}')
+ return rv
+
+
+def accessProperty(value, a, b, is_interval):
+ if isinstance(value, (list, string)):
+ if is_interval:
+ if b is None:
+ b = len(value)
+ try:
+ return value[a:b]
+ except TypeError:
+ raise infixExpectationError('[..]', 'integer')
+ else:
+ try:
+ return value[a]
+ except IndexError:
+ raise TemplateError('index out of bounds')
+ except TypeError:
+ raise infixExpectationError('[..]', 'integer')
+
+ if not isinstance(value, dict):
+ raise infixExpectationError('[..]', 'object, array, or string')
+ if not isinstance(a, string):
+ raise infixExpectationError('[..]', 'string index')
+
+ try:
+ return value[a]
+ except KeyError:
+ return None
diff --git a/third_party/python/json-e/jsone/prattparser.py b/third_party/python/json-e/jsone/prattparser.py
new file mode 100644
index 0000000000..5bf250a816
--- /dev/null
+++ b/third_party/python/json-e/jsone/prattparser.py
@@ -0,0 +1,191 @@
+from __future__ import absolute_import, print_function, unicode_literals
+
+import re
+from collections import namedtuple
+from .shared import TemplateError
+from .six import with_metaclass, viewitems
+
+
+class SyntaxError(TemplateError):
+
+ @classmethod
+ def unexpected(cls, got, exp):
+ exp = ', '.join(sorted(exp))
+ return cls('Found {}, expected {}'.format(got.value, exp))
+
+
+Token = namedtuple('Token', ['kind', 'value', 'start', 'end'])
+
+
+def prefix(*kinds):
+ """Decorate a method as handling prefix tokens of the given kinds"""
+ def wrap(fn):
+ try:
+ fn.prefix_kinds.extend(kinds)
+ except AttributeError:
+ fn.prefix_kinds = list(kinds)
+ return fn
+ return wrap
+
+
+def infix(*kinds):
+ """Decorate a method as handling infix tokens of the given kinds"""
+ def wrap(fn):
+ try:
+ fn.infix_kinds.extend(kinds)
+ except AttributeError:
+ fn.infix_kinds = list(kinds)
+ return fn
+ return wrap
+
+
+class PrattParserMeta(type):
+
+ def __init__(cls, name, bases, body):
+ # set up rules based on decorated methods
+ infix_rules = cls.infix_rules = {}
+ prefix_rules = cls.prefix_rules = {}
+ for prop, value in viewitems(body):
+ if hasattr(value, 'prefix_kinds'):
+ for kind in value.prefix_kinds:
+ prefix_rules[kind] = value
+ delattr(cls, prop)
+ if hasattr(value, 'infix_kinds'):
+ for kind in value.infix_kinds:
+ infix_rules[kind] = value
+ delattr(cls, prop)
+
+ # build a regular expression to generate a sequence of tokens
+ token_patterns = [
+ '({})'.format(cls.patterns.get(t, re.escape(t)))
+ for t in cls.tokens]
+ if cls.ignore:
+ token_patterns.append('(?:{})'.format(cls.ignore))
+ cls.token_re = re.compile('^(?:' + '|'.join(token_patterns) + ')')
+
+ # build a map from token kind to precedence level
+ cls.precedence_map = {
+ kind: prec + 1
+ for (prec, row) in enumerate(cls.precedence)
+ for kind in row
+ }
+
+
+class PrattParser(with_metaclass(PrattParserMeta, object)):
+
+ # regular expression for ignored input (e.g., whitespace)
+ ignore = None
+
+ # regular expressions for tokens that do not match themselves
+ patterns = {}
+
+ # all token kinds (note that order matters - the first matching token
+ # will be returned)
+ tokens = []
+
+ # precedence of tokens, as a list of lists, from lowest to highest
+ precedence = []
+
+ def parse(self, source):
+ pc = ParseContext(self, source, self._generate_tokens(source))
+ result = pc.parse()
+ # if there are any tokens remaining, that's an error..
+ token = pc.attempt()
+ if token:
+ raise SyntaxError.unexpected(token, self.infix_rules)
+ return result
+
+ def parseUntilTerminator(self, source, terminator):
+ pc = ParseContext(self, source, self._generate_tokens(source))
+ result = pc.parse()
+ token = pc.attempt()
+ if token.kind != terminator:
+ raise SyntaxError.unexpected(token, [terminator])
+ return (result, token.start)
+
+ def _generate_tokens(self, source):
+ offset = 0
+ while True:
+ start = offset
+ remainder = source[offset:]
+ mo = self.token_re.match(remainder)
+ if not mo:
+ if remainder:
+ raise SyntaxError(
+ "Unexpected input: '{}'".format(remainder))
+ break
+ offset += mo.end()
+
+ # figure out which token matched (note that idx is 0-based)
+ indexes = list(
+ filter(lambda x: x[1] is not None, enumerate(mo.groups())))
+ if indexes:
+ idx = indexes[0][0]
+ yield Token(
+ kind=self.tokens[idx],
+ value=mo.group(idx + 1), # (mo.group is 1-based)
+ start=start,
+ end=offset)
+
+
+class ParseContext(object):
+
+ def __init__(self, parser, source, token_generator):
+ self.parser = parser
+ self.source = source
+
+ self._tokens = token_generator
+ self._error = None
+
+ self._advance()
+
+ def _advance(self):
+ try:
+ self.next_token = next(self._tokens)
+ except StopIteration:
+ self.next_token = None
+ except SyntaxError as exc:
+ self._error = exc
+
+ def attempt(self, *kinds):
+ """Try to get the next token if it matches one of the kinds given,
+ otherwise returning None. If no kinds are given, any kind is
+ accepted."""
+ if self._error:
+ raise self._error
+ token = self.next_token
+ if not token:
+ return None
+ if kinds and token.kind not in kinds:
+ return None
+ self._advance()
+ return token
+
+ def require(self, *kinds):
+ """Get the next token, raising an exception if it doesn't match one of
+ the given kinds, or the input ends. If no kinds are given, returns the
+ next token of any kind."""
+ token = self.attempt()
+ if not token:
+ raise SyntaxError('Unexpected end of input')
+ if kinds and token.kind not in kinds:
+ raise SyntaxError.unexpected(token, kinds)
+ return token
+
+ def parse(self, precedence=None):
+ parser = self.parser
+ precedence = parser.precedence_map[precedence] if precedence else 0
+ token = self.require()
+ prefix_rule = parser.prefix_rules.get(token.kind)
+ if not prefix_rule:
+ raise SyntaxError.unexpected(token, parser.prefix_rules)
+ left = prefix_rule(parser, token, self)
+ while self.next_token:
+ kind = self.next_token.kind
+ if kind not in parser.infix_rules:
+ break
+ if precedence >= parser.precedence_map[kind]:
+ break
+ token = self.require()
+ left = parser.infix_rules[kind](parser, left, token, self)
+ return left
diff --git a/third_party/python/json-e/jsone/render.py b/third_party/python/json-e/jsone/render.py
new file mode 100644
index 0000000000..e820da1ec2
--- /dev/null
+++ b/third_party/python/json-e/jsone/render.py
@@ -0,0 +1,354 @@
+from __future__ import absolute_import, print_function, unicode_literals
+
+import re
+import json as json
+from .shared import JSONTemplateError, TemplateError, DeleteMarker, string, to_str
+from . import shared
+from .interpreter import ExpressionEvaluator
+from .six import viewitems
+import functools
+
+operators = {}
+IDENTIFIER_RE = re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*$')
+
+
+def operator(name):
+ def wrap(fn):
+ operators[name] = fn
+ return fn
+ return wrap
+
+
+def evaluateExpression(expr, context):
+ evaluator = ExpressionEvaluator(context)
+ return evaluator.parse(expr)
+
+
+_interpolation_start_re = re.compile(r'\$?\${')
+
+
+def interpolate(string, context):
+ mo = _interpolation_start_re.search(string)
+ if not mo:
+ return string
+
+ result = []
+ evaluator = ExpressionEvaluator(context)
+
+ while True:
+ result.append(string[:mo.start()])
+ if mo.group() != '$${':
+ string = string[mo.end():]
+ parsed, offset = evaluator.parseUntilTerminator(string, '}')
+ if isinstance(parsed, (list, dict)):
+ raise TemplateError(
+ "interpolation of '{}' produced an array or object".format(string[:offset]))
+ if to_str(parsed) == "null":
+ result.append("")
+ else:
+ result.append(to_str(parsed))
+ string = string[offset + 1:]
+ else: # found `$${`
+ result.append('${')
+ string = string[mo.end():]
+
+ mo = _interpolation_start_re.search(string)
+ if not mo:
+ result.append(string)
+ break
+
+ return ''.join(result)
+
+
+def checkUndefinedProperties(template, allowed):
+ unknownKeys = []
+ combined = "|".join(allowed) + "$"
+ unknownKeys = [key for key in sorted(template)
+ if not re.match(combined, key)]
+ if unknownKeys:
+ raise TemplateError(allowed[0].replace('\\', '') +
+ " has undefined properties: " + " ".join(unknownKeys))
+
+
+@operator('$eval')
+def eval(template, context):
+ checkUndefinedProperties(template, ['\$eval'])
+ if not isinstance(template['$eval'], string):
+ raise TemplateError("$eval must be given a string expression")
+ return evaluateExpression(template['$eval'], context)
+
+
+@operator('$flatten')
+def flatten(template, context):
+ checkUndefinedProperties(template, ['\$flatten'])
+ value = renderValue(template['$flatten'], context)
+ if not isinstance(value, list):
+ raise TemplateError('$flatten value must evaluate to an array')
+
+ def gen():
+ for e in value:
+ if isinstance(e, list):
+ for e2 in e:
+ yield e2
+ else:
+ yield e
+ return list(gen())
+
+
+@operator('$flattenDeep')
+def flattenDeep(template, context):
+ checkUndefinedProperties(template, ['\$flattenDeep'])
+ value = renderValue(template['$flattenDeep'], context)
+ if not isinstance(value, list):
+ raise TemplateError('$flattenDeep value must evaluate to an array')
+
+ def gen(value):
+ if isinstance(value, list):
+ for e in value:
+ for sub in gen(e):
+ yield sub
+ else:
+ yield value
+
+ return list(gen(value))
+
+
+@operator('$fromNow')
+def fromNow(template, context):
+ checkUndefinedProperties(template, ['\$fromNow', 'from'])
+ offset = renderValue(template['$fromNow'], context)
+ reference = renderValue(
+ template['from'], context) if 'from' in template else context.get('now')
+
+ if not isinstance(offset, string):
+ raise TemplateError("$fromNow expects a string")
+ return shared.fromNow(offset, reference)
+
+
+@operator('$if')
+def ifConstruct(template, context):
+ checkUndefinedProperties(template, ['\$if', 'then', 'else'])
+ condition = evaluateExpression(template['$if'], context)
+ try:
+ if condition:
+ rv = template['then']
+ else:
+ rv = template['else']
+ except KeyError:
+ return DeleteMarker
+ return renderValue(rv, context)
+
+
+@operator('$json')
+def jsonConstruct(template, context):
+ checkUndefinedProperties(template, ['\$json'])
+ value = renderValue(template['$json'], context)
+ return json.dumps(value, separators=(',', ':'), sort_keys=True, ensure_ascii=False)
+
+
+@operator('$let')
+def let(template, context):
+ checkUndefinedProperties(template, ['\$let', 'in'])
+ if not isinstance(template['$let'], dict):
+ raise TemplateError("$let value must be an object")
+
+ subcontext = context.copy()
+ for k, v in template['$let'].items():
+ if not IDENTIFIER_RE.match(k):
+ raise TemplateError('top level keys of $let must follow /[a-zA-Z_][a-zA-Z0-9_]*/')
+ subcontext[k] = renderValue(v, context)
+
+ try:
+ in_expression = template['in']
+ except KeyError:
+ raise TemplateError("$let operator requires an `in` clause")
+ return renderValue(in_expression, subcontext)
+
+
+@operator('$map')
+def map(template, context):
+ EACH_RE = 'each\([a-zA-Z_][a-zA-Z0-9_]*\)'
+ checkUndefinedProperties(template, ['\$map', EACH_RE])
+ value = renderValue(template['$map'], context)
+ if not isinstance(value, list) and not isinstance(value, dict):
+ raise TemplateError("$map value must evaluate to an array or object")
+
+ is_obj = isinstance(value, dict)
+
+ each_keys = [k for k in template if k.startswith('each(')]
+ if len(each_keys) != 1:
+ raise TemplateError(
+ "$map requires exactly one other property, each(..)")
+ each_key = each_keys[0]
+ each_var = each_key[5:-1]
+ each_template = template[each_key]
+
+ def gen(val):
+ subcontext = context.copy()
+ for elt in val:
+ subcontext[each_var] = elt
+ elt = renderValue(each_template, subcontext)
+ if elt is not DeleteMarker:
+ yield elt
+ if is_obj:
+ value = [{'key': v[0], 'val': v[1]} for v in value.items()]
+ v = dict()
+ for e in gen(value):
+ if not isinstance(e, dict):
+ raise TemplateError(
+ "$map on objects expects {0} to evaluate to an object".format(each_key))
+ v.update(e)
+ return v
+ else:
+ return list(gen(value))
+
+
+@operator('$match')
+def matchConstruct(template, context):
+ checkUndefinedProperties(template, ['\$match'])
+
+ if not isinstance(template['$match'], dict):
+ raise TemplateError("$match can evaluate objects only")
+
+ result = []
+ for condition in template['$match']:
+ if evaluateExpression(condition, context):
+ result.append(renderValue(template['$match'][condition], context))
+
+ return result
+
+
+@operator('$merge')
+def merge(template, context):
+ checkUndefinedProperties(template, ['\$merge'])
+ value = renderValue(template['$merge'], context)
+ if not isinstance(value, list) or not all(isinstance(e, dict) for e in value):
+ raise TemplateError(
+ "$merge value must evaluate to an array of objects")
+ v = dict()
+ for e in value:
+ v.update(e)
+ return v
+
+
+@operator('$mergeDeep')
+def merge(template, context):
+ checkUndefinedProperties(template, ['\$mergeDeep'])
+ value = renderValue(template['$mergeDeep'], context)
+ if not isinstance(value, list) or not all(isinstance(e, dict) for e in value):
+ raise TemplateError(
+ "$mergeDeep value must evaluate to an array of objects")
+
+ def merge(l, r):
+ if isinstance(l, list) and isinstance(r, list):
+ return l + r
+ if isinstance(l, dict) and isinstance(r, dict):
+ res = l.copy()
+ for k, v in viewitems(r):
+ if k in l:
+ res[k] = merge(l[k], v)
+ else:
+ res[k] = v
+ return res
+ return r
+ if len(value) == 0:
+ return {}
+ return functools.reduce(merge, value[1:], value[0])
+
+
+@operator('$reverse')
+def reverse(template, context):
+ checkUndefinedProperties(template, ['\$reverse'])
+ value = renderValue(template['$reverse'], context)
+ if not isinstance(value, list):
+ raise TemplateError("$reverse value must evaluate to an array of objects")
+ return list(reversed(value))
+
+
+@operator('$sort')
+def sort(template, context):
+ BY_RE = 'by\([a-zA-Z_][a-zA-Z0-9_]*\)'
+ checkUndefinedProperties(template, ['\$sort', BY_RE])
+ value = renderValue(template['$sort'], context)
+ if not isinstance(value, list):
+ raise TemplateError('$sorted values to be sorted must have the same type')
+
+ # handle by(..) if given, applying the schwartzian transform
+ by_keys = [k for k in template if k.startswith('by(')]
+ if len(by_keys) == 1:
+ by_key = by_keys[0]
+ by_var = by_key[3:-1]
+ by_expr = template[by_key]
+
+ def xform():
+ subcontext = context.copy()
+ for e in value:
+ subcontext[by_var] = e
+ yield evaluateExpression(by_expr, subcontext), e
+ to_sort = list(xform())
+ elif len(by_keys) == 0:
+ to_sort = [(e, e) for e in value]
+ else:
+ raise TemplateError('only one by(..) is allowed')
+
+ # check types
+ try:
+ eltype = type(to_sort[0][0])
+ except IndexError:
+ return []
+ if eltype in (list, dict, bool, type(None)):
+ raise TemplateError('$sorted values to be sorted must have the same type')
+ if not all(isinstance(e[0], eltype) for e in to_sort):
+ raise TemplateError('$sorted values to be sorted must have the same type')
+
+ # unzip the schwartzian transform
+ return list(e[1] for e in sorted(to_sort))
+
+
+def renderValue(template, context):
+ if isinstance(template, string):
+ return interpolate(template, context)
+
+ elif isinstance(template, dict):
+ matches = [k for k in template if k in operators]
+ if matches:
+ if len(matches) > 1:
+ raise TemplateError("only one operator allowed")
+ return operators[matches[0]](template, context)
+
+ def updated():
+ for k, v in viewitems(template):
+ if k.startswith('$$'):
+ k = k[1:]
+ elif k.startswith('$') and IDENTIFIER_RE.match(k[1:]):
+ raise TemplateError(
+ '$<identifier> is reserved; ues $$<identifier>')
+ else:
+ k = interpolate(k, context)
+
+ try:
+ v = renderValue(v, context)
+ except JSONTemplateError as e:
+ if IDENTIFIER_RE.match(k):
+ e.add_location('.{}'.format(k))
+ else:
+ e.add_location('[{}]'.format(json.dumps(k)))
+ raise
+ if v is not DeleteMarker:
+ yield k, v
+ return dict(updated())
+
+ elif isinstance(template, list):
+ def updated():
+ for i, e in enumerate(template):
+ try:
+ v = renderValue(e, context)
+ if v is not DeleteMarker:
+ yield v
+ except JSONTemplateError as e:
+ e.add_location('[{}]'.format(i))
+ raise
+ return list(updated())
+
+ else:
+ return template
diff --git a/third_party/python/json-e/jsone/shared.py b/third_party/python/json-e/jsone/shared.py
new file mode 100644
index 0000000000..0e70e21f81
--- /dev/null
+++ b/third_party/python/json-e/jsone/shared.py
@@ -0,0 +1,131 @@
+from __future__ import absolute_import, print_function, unicode_literals
+
+import re
+import datetime
+
+
+class DeleteMarker:
+ pass
+
+
+class JSONTemplateError(Exception):
+ def __init__(self, message):
+ super(JSONTemplateError, self).__init__(message)
+ self.location = []
+
+ def add_location(self, loc):
+ self.location.insert(0, loc)
+
+ def __str__(self):
+ location = ' at template' + ''.join(self.location)
+ return "{}{}: {}".format(
+ self.__class__.__name__,
+ location if self.location else '',
+ self.args[0])
+
+
+class TemplateError(JSONTemplateError):
+ pass
+
+
+class InterpreterError(JSONTemplateError):
+ pass
+
+
+# Regular expression matching: X days Y hours Z minutes
+# todo: support hr, wk, yr
+FROMNOW_RE = re.compile(''.join([
+ '^(\s*(?P<years>\d+)\s*y(ears?)?)?',
+ '(\s*(?P<months>\d+)\s*mo(nths?)?)?',
+ '(\s*(?P<weeks>\d+)\s*w(eeks?)?)?',
+ '(\s*(?P<days>\d+)\s*d(ays?)?)?',
+ '(\s*(?P<hours>\d+)\s*h(ours?)?)?',
+ '(\s*(?P<minutes>\d+)\s*m(in(utes?)?)?)?\s*',
+ '(\s*(?P<seconds>\d+)\s*s(ec(onds?)?)?)?\s*$',
+]))
+
+
+def fromNow(offset, reference):
+ # copied from taskcluster-client.py
+ # We want to handle past dates as well as future
+ future = True
+ offset = offset.lstrip()
+ if offset.startswith('-'):
+ future = False
+ offset = offset[1:].lstrip()
+ if offset.startswith('+'):
+ offset = offset[1:].lstrip()
+
+ # Parse offset
+ m = FROMNOW_RE.match(offset)
+ if m is None:
+ raise ValueError("offset string: '%s' does not parse" % offset)
+
+ # In order to calculate years and months we need to calculate how many days
+ # to offset the offset by, since timedelta only goes as high as weeks
+ days = 0
+ hours = 0
+ minutes = 0
+ seconds = 0
+ if m.group('years'):
+ # forget leap years, a year is 365 days
+ years = int(m.group('years'))
+ days += 365 * years
+ if m.group('months'):
+ # assume "month" means 30 days
+ months = int(m.group('months'))
+ days += 30 * months
+ days += int(m.group('days') or 0)
+ hours += int(m.group('hours') or 0)
+ minutes += int(m.group('minutes') or 0)
+ seconds += int(m.group('seconds') or 0)
+
+ # Offset datetime from utc
+ delta = datetime.timedelta(
+ weeks=int(m.group('weeks') or 0),
+ days=days,
+ hours=hours,
+ minutes=minutes,
+ seconds=seconds,
+ )
+
+ if isinstance(reference, string):
+ reference = datetime.datetime.strptime(
+ reference, '%Y-%m-%dT%H:%M:%S.%fZ')
+ elif reference is None:
+ reference = datetime.datetime.utcnow()
+ return stringDate(reference + delta if future else reference - delta)
+
+
+datefmt_re = re.compile(r'(\.[0-9]{3})[0-9]*(\+00:00)?')
+
+
+def to_str(v):
+ if isinstance(v, bool):
+ return {True: 'true', False: 'false'}[v]
+ elif isinstance(v, list):
+ return ','.join(to_str(e) for e in v)
+ elif v is None:
+ return 'null'
+ else:
+ return str(v)
+
+
+def stringDate(date):
+ # Convert to isoFormat
+ try:
+ string = date.isoformat(timespec='microseconds')
+ # py2.7 to py3.5 does not have timespec
+ except TypeError as e:
+ string = date.isoformat()
+ if string.find('.') == -1:
+ string += '.000'
+ string = datefmt_re.sub(r'\1Z', string)
+ return string
+
+
+# the base class for strings, regardless of python version
+try:
+ string = basestring
+except NameError:
+ string = str
diff --git a/third_party/python/json-e/jsone/six.py b/third_party/python/json-e/jsone/six.py
new file mode 100644
index 0000000000..1ab9cd2d7d
--- /dev/null
+++ b/third_party/python/json-e/jsone/six.py
@@ -0,0 +1,23 @@
+import sys
+import operator
+
+# https://github.com/benjaminp/six/blob/2c3492a9f16d294cd5e6b43d6110c5a3a2e58b4c/six.py#L818
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(meta):
+
+ def __new__(cls, name, this_bases, d):
+ return meta(name, bases, d)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+# https://github.com/benjaminp/six/blob/2c3492a9f16d294cd5e6b43d6110c5a3a2e58b4c/six.py#L578
+if sys.version_info[0] == 3:
+ viewitems = operator.methodcaller("items")
+else:
+ viewitems = operator.methodcaller("viewitems")
diff --git a/third_party/python/json-e/package.json b/third_party/python/json-e/package.json
new file mode 100644
index 0000000000..0c388d57db
--- /dev/null
+++ b/third_party/python/json-e/package.json
@@ -0,0 +1,35 @@
+{
+ "name": "json-e",
+ "version": "2.7.0",
+ "description": "json parameterization module inspired from json-parameterization",
+ "main": "./src/index.js",
+ "scripts": {
+ "lint": "eslint src/*.js test/*.js",
+ "test": "yarn lint && mocha test/*_test.js",
+ "build-demo": "cd demo && yarn && yarn build",
+ "start-demo": "cd demo && yarn && yarn start"
+ },
+ "files": [
+ "src"
+ ],
+ "repository": {
+ "type": "git",
+ "url": "https://github.com/taskcluster/json-e"
+ },
+ "author": "",
+ "license": "MPL-2.0",
+ "dependencies": {
+ "json-stable-stringify": "^1.0.1"
+ },
+ "devDependencies": {
+ "assume": "^1.5.2",
+ "browserify": "^14.5.0",
+ "eslint-config-taskcluster": "^3.0.0",
+ "mocha": "^4.0.1",
+ "source-map-support": "^0.5.0",
+ "timekeeper": "^2.0.0"
+ },
+ "engines": {
+ "node": ">=6.4.0"
+ }
+}
diff --git a/third_party/python/json-e/setup.cfg b/third_party/python/json-e/setup.cfg
new file mode 100644
index 0000000000..6410597b69
--- /dev/null
+++ b/third_party/python/json-e/setup.cfg
@@ -0,0 +1,8 @@
+[pep8]
+max-line-length = 100
+select = E,W
+
+[egg_info]
+tag_build =
+tag_date = 0
+
diff --git a/third_party/python/json-e/setup.py b/third_party/python/json-e/setup.py
new file mode 100644
index 0000000000..76299df9be
--- /dev/null
+++ b/third_party/python/json-e/setup.py
@@ -0,0 +1,31 @@
+import json
+import os
+from setuptools import setup, find_packages
+
+package_json = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'package.json')
+with open(package_json) as f:
+ version = json.load(f)['version']
+
+setup(name='json-e',
+ version=version,
+ description='A data-structure parameterization system written for embedding context in JSON objects',
+ author='Dustin J. Mitchell',
+ url='https://taskcluster.github.io/json-e/',
+ author_email='dustin@mozilla.com',
+ packages=['jsone'],
+ test_suite='nose.collector',
+ license='MPL2',
+ extras_require={
+ 'release': [
+ 'towncrier',
+ ],
+ },
+ tests_require=[
+ "freezegun",
+ "hypothesis",
+ "nose",
+ "PyYAML",
+ "python-dateutil",
+ 'pep8',
+ ]
+)
diff --git a/third_party/python/jsonschema/jsonschema-4.17.3.dist-info/METADATA b/third_party/python/jsonschema/jsonschema-4.17.3.dist-info/METADATA
new file mode 100644
index 0000000000..07ec3f119b
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema-4.17.3.dist-info/METADATA
@@ -0,0 +1,195 @@
+Metadata-Version: 2.1
+Name: jsonschema
+Version: 4.17.3
+Summary: An implementation of JSON Schema validation for Python
+Project-URL: Homepage, https://github.com/python-jsonschema/jsonschema
+Project-URL: Documentation, https://python-jsonschema.readthedocs.io/
+Project-URL: Issues, https://github.com/python-jsonschema/jsonschema/issues/
+Project-URL: Funding, https://github.com/sponsors/Julian
+Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-jsonschema?utm_source=pypi-jsonschema&utm_medium=referral&utm_campaign=pypi-link
+Project-URL: Changelog, https://github.com/python-jsonschema/jsonschema/blob/main/CHANGELOG.rst
+Project-URL: Source, https://github.com/python-jsonschema/jsonschema
+Author: Julian Berman
+Author-email: Julian+jsonschema@GrayVines.com
+License: MIT
+License-File: COPYING
+Keywords: data validation,json,jsonschema,validation
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Requires-Python: >=3.7
+Requires-Dist: attrs>=17.4.0
+Requires-Dist: importlib-metadata; python_version < '3.8'
+Requires-Dist: importlib-resources>=1.4.0; python_version < '3.9'
+Requires-Dist: pkgutil-resolve-name>=1.3.10; python_version < '3.9'
+Requires-Dist: pyrsistent!=0.17.0,!=0.17.1,!=0.17.2,>=0.14.0
+Requires-Dist: typing-extensions; python_version < '3.8'
+Provides-Extra: format
+Requires-Dist: fqdn; extra == 'format'
+Requires-Dist: idna; extra == 'format'
+Requires-Dist: isoduration; extra == 'format'
+Requires-Dist: jsonpointer>1.13; extra == 'format'
+Requires-Dist: rfc3339-validator; extra == 'format'
+Requires-Dist: rfc3987; extra == 'format'
+Requires-Dist: uri-template; extra == 'format'
+Requires-Dist: webcolors>=1.11; extra == 'format'
+Provides-Extra: format-nongpl
+Requires-Dist: fqdn; extra == 'format-nongpl'
+Requires-Dist: idna; extra == 'format-nongpl'
+Requires-Dist: isoduration; extra == 'format-nongpl'
+Requires-Dist: jsonpointer>1.13; extra == 'format-nongpl'
+Requires-Dist: rfc3339-validator; extra == 'format-nongpl'
+Requires-Dist: rfc3986-validator>0.1.0; extra == 'format-nongpl'
+Requires-Dist: uri-template; extra == 'format-nongpl'
+Requires-Dist: webcolors>=1.11; extra == 'format-nongpl'
+Description-Content-Type: text/x-rst
+
+==========
+jsonschema
+==========
+
+|PyPI| |Pythons| |CI| |ReadTheDocs| |Precommit| |Zenodo|
+
+.. |PyPI| image:: https://img.shields.io/pypi/v/jsonschema.svg
+ :alt: PyPI version
+ :target: https://pypi.org/project/jsonschema/
+
+.. |Pythons| image:: https://img.shields.io/pypi/pyversions/jsonschema.svg
+ :alt: Supported Python versions
+ :target: https://pypi.org/project/jsonschema/
+
+.. |CI| image:: https://github.com/python-jsonschema/jsonschema/workflows/CI/badge.svg
+ :alt: Build status
+ :target: https://github.com/python-jsonschema/jsonschema/actions?query=workflow%3ACI
+
+.. |ReadTheDocs| image:: https://readthedocs.org/projects/python-jsonschema/badge/?version=stable&style=flat
+ :alt: ReadTheDocs status
+ :target: https://python-jsonschema.readthedocs.io/en/stable/
+
+.. |Precommit| image:: https://results.pre-commit.ci/badge/github/python-jsonschema/jsonschema/main.svg
+ :alt: pre-commit.ci status
+ :target: https://results.pre-commit.ci/latest/github/python-jsonschema/jsonschema/main
+
+.. |Zenodo| image:: https://zenodo.org/badge/3072629.svg
+ :target: https://zenodo.org/badge/latestdoi/3072629
+
+
+``jsonschema`` is an implementation of the `JSON Schema
+<https://json-schema.org>`_ specification for Python.
+
+.. code-block:: python
+
+ >>> from jsonschema import validate
+
+ >>> # A sample schema, like what we'd get from json.load()
+ >>> schema = {
+ ... "type" : "object",
+ ... "properties" : {
+ ... "price" : {"type" : "number"},
+ ... "name" : {"type" : "string"},
+ ... },
+ ... }
+
+ >>> # If no exception is raised by validate(), the instance is valid.
+ >>> validate(instance={"name" : "Eggs", "price" : 34.99}, schema=schema)
+
+ >>> validate(
+ ... instance={"name" : "Eggs", "price" : "Invalid"}, schema=schema,
+ ... ) # doctest: +IGNORE_EXCEPTION_DETAIL
+ Traceback (most recent call last):
+ ...
+ ValidationError: 'Invalid' is not of type 'number'
+
+It can also be used from console:
+
+.. code-block:: bash
+
+ $ jsonschema --instance sample.json sample.schema
+
+Features
+--------
+
+* Partial support for
+ `Draft 2020-12 <https://python-jsonschema.readthedocs.io/en/latest/api/jsonschema/validators/#jsonschema.validators.Draft202012Validator>`_ and
+ `Draft 2019-09 <https://python-jsonschema.readthedocs.io/en/latest/api/jsonschema/validators/#jsonschema.validators.Draft201909Validator>`_,
+ except for ``dynamicRef`` / ``recursiveRef`` and ``$vocabulary`` (in-progress).
+ Full support for
+ `Draft 7 <https://python-jsonschema.readthedocs.io/en/latest/api/jsonschema/validators/#jsonschema.validators.Draft7Validator>`_,
+ `Draft 6 <https://python-jsonschema.readthedocs.io/en/latest/api/jsonschema/validators/#jsonschema.validators.Draft6Validator>`_,
+ `Draft 4 <https://python-jsonschema.readthedocs.io/en/latest/api/jsonschema/validators/#jsonschema.validators.Draft4Validator>`_
+ and
+ `Draft 3 <https://python-jsonschema.readthedocs.io/en/latest/api/jsonschema/validators/#jsonschema.validators.Draft3Validator>`_
+
+* `Lazy validation <https://python-jsonschema.readthedocs.io/en/latest/api/jsonschema/protocols/#jsonschema.protocols.Validator.iter_errors>`_
+ that can iteratively report *all* validation errors.
+
+* `Programmatic querying <https://python-jsonschema.readthedocs.io/en/latest/errors/>`_
+ of which properties or items failed validation.
+
+
+Installation
+------------
+
+``jsonschema`` is available on `PyPI <https://pypi.org/project/jsonschema/>`_. You can install using `pip <https://pip.pypa.io/en/stable/>`_:
+
+.. code-block:: bash
+
+ $ pip install jsonschema
+
+
+Extras
+======
+
+Two extras are available when installing the package, both currently related to ``format`` validation:
+
+ * ``format``
+ * ``format-nongpl``
+
+They can be used when installing in order to include additional dependencies, e.g.:
+
+.. code-block:: bash
+
+ $ pip install jsonschema'[format]'
+
+Be aware that the mere presence of these dependencies – or even the specification of ``format`` checks in a schema – do *not* activate format checks (as per the specification).
+Please read the `format validation documentation <https://python-jsonschema.readthedocs.io/en/latest/validate/#validating-formats>`_ for further details.
+
+About
+-----
+
+I'm Julian Berman.
+
+``jsonschema`` is on `GitHub <https://github.com/python-jsonschema/jsonschema>`_.
+
+Get in touch, via GitHub or otherwise, if you've got something to contribute,
+it'd be most welcome!
+
+You can also generally find me on Libera (nick: ``Julian``) in various
+channels, including ``#python``.
+
+If you feel overwhelmingly grateful, you can also `sponsor me
+<https://github.com/sponsors/Julian/>`_.
+
+And for companies who appreciate ``jsonschema`` and its continued support
+and growth, ``jsonschema`` is also now supportable via `TideLift
+<https://tidelift.com/subscription/pkg/pypi-jsonschema?utm_source=pypi-j
+sonschema&utm_medium=referral&utm_campaign=readme>`_.
+
+
+Release Information
+-------------------
+
+v4.17.3
+=======
+
+* Fix instantiating validators with cached refs to boolean schemas
+ rather than objects (#1018).
diff --git a/third_party/python/jsonschema/jsonschema-4.17.3.dist-info/RECORD b/third_party/python/jsonschema/jsonschema-4.17.3.dist-info/RECORD
new file mode 100644
index 0000000000..22b5fecf4a
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema-4.17.3.dist-info/RECORD
@@ -0,0 +1,52 @@
+jsonschema/__init__.py,sha256=FRdJDXN8-AFk-Fj1qclckQsZNeGQB__r_QuMjtRoze4,2187
+jsonschema/__main__.py,sha256=Sfz1ZNeogymj_KZxq6JXY3F6O_1v28sLIiskusifQ5s,40
+jsonschema/_format.py,sha256=P7gjEZzWa1dU0wDu3NbyG1cNW6drM3i454a_7sJE8TY,14575
+jsonschema/_legacy_validators.py,sha256=0KDc3X0gTzuW52NRQJ123ZW_IBLcIDsBaVFVfMo_vNY,10549
+jsonschema/_types.py,sha256=YgKkjzf97pKRsiuc5RN76jJPGCpmdMHJlhcOVWjCW78,5425
+jsonschema/_utils.py,sha256=D3oRGTSk6llsZDFdDAVQWBEi62Uy_4SWVbjPiG9PJcs,10429
+jsonschema/_validators.py,sha256=4H0TI2BIXhmJrmF3iC1VzKgxwhCfiGreJfBWBtP20HY,15956
+jsonschema/cli.py,sha256=DJhWQs6X5nsmWfUqrnSjmAdzg5Sbd3igcRGguHJAEQU,8518
+jsonschema/exceptions.py,sha256=asZ8nwPwuQlMCXTb7Ztyi1OuxkqRiPnxK9TuwiDqGRo,11336
+jsonschema/protocols.py,sha256=zfj-Rmc2rsNIQjEIFnnMsHlYEoS-rprnBHTG5pgvCy0,7295
+jsonschema/validators.py,sha256=zwfdaiQFvH-Z9EoEH-G_Bh8hCQzOirJg6-_AFoT8_ow,38150
+jsonschema/benchmarks/__init__.py,sha256=A0sQrxDBVHSyQ-8ru3L11hMXf3q9gVuB9x_YgHb4R9M,70
+jsonschema/benchmarks/issue232.py,sha256=GKQBwm03sf-pPSxBxc4YDvBBnMYknOk6m-WtTntN5VE,506
+jsonschema/benchmarks/json_schema_test_suite.py,sha256=PvfabpUYcF4_7csYDTcTauED8rnFEGYbdY5RqTXD08s,320
+jsonschema/benchmarks/issue232/issue.json,sha256=eaPOZjMRu5u8RpKrsA9uk7ucPZS5tkKG4D_hkOTQ3Hk,117105
+jsonschema/schemas/draft2019-09.json,sha256=e3YbPhIfCgyh6ioLjizIVrz4AWBLgmjXG6yqICvAwTs,1785
+jsonschema/schemas/draft2020-12.json,sha256=Qdp29a-3zgYtJI92JGOpL3ykfk4PkFsiS6av7vkd7Q8,2452
+jsonschema/schemas/draft3.json,sha256=LPdfZENvtb43Si6qJ6uLfh_WUcm0ba6mxnsC_WTiRYs,2600
+jsonschema/schemas/draft4.json,sha256=4UidC0dV8CeTMCWR0_y48Htok6gqlPJIlfjk7fEbguI,4357
+jsonschema/schemas/draft6.json,sha256=wp386fVINcOgbAOzxdXsDtp3cGVo-cTffPvHVmpRAG0,4437
+jsonschema/schemas/draft7.json,sha256=PVOSCIJhYGxVm2A_OFMpyfGrRbXWZ-uZBodFOwVdQF4,4819
+jsonschema/schemas/vocabularies/draft2019-09/applicator,sha256=aJUQDplyb7sQcFhRK77D7P1LJOj9L6zuPlBe5ysNTDE,1860
+jsonschema/schemas/vocabularies/draft2019-09/content,sha256=m31PVaTi_bAsQwBo_f-rxzKt3OI42j8d8mkCScM1MnQ,517
+jsonschema/schemas/vocabularies/draft2019-09/core,sha256=taLElX9kldClCB8ECevooU5BOayyA_x0hHH47eKvWyw,1531
+jsonschema/schemas/vocabularies/draft2019-09/meta-data,sha256=1H4kRd1qgicaKY2DzGxsuNSuHhXg3Fa-zTehY-zwEoY,892
+jsonschema/schemas/vocabularies/draft2019-09/validation,sha256=HlJsHTNac0gF_ILPV5jBK5YK19olF8Zs2lobCTWcPBw,2834
+jsonschema/schemas/vocabularies/draft2020-12/applicator,sha256=xKbkFHuR_vf-ptwFjLG_k0AvdBS3ZXiosWqvHa1qrO8,1659
+jsonschema/schemas/vocabularies/draft2020-12/content,sha256=CDQ3R3ZOSlgUJieTz01lIFenkThjxZUNQyl-jh_axbY,519
+jsonschema/schemas/vocabularies/draft2020-12/core,sha256=wtEqjk3RHTNt_IOj9mOqTGnwtJs76wlP_rJbUxb0gD0,1564
+jsonschema/schemas/vocabularies/draft2020-12/format,sha256=UOu_55BhGoSbjMQAoJwdDg-2q1wNQ6DyIgH9NiUFa_Q,403
+jsonschema/schemas/vocabularies/draft2020-12/format-annotation,sha256=q8d1rf79idIjWBcNm_k_Tr0jSVY7u-3WDwK-98gSvMA,448
+jsonschema/schemas/vocabularies/draft2020-12/format-assertion,sha256=xSJCuaG7eGsmw-gset1CjDH5yW5XXc6Z5W6l_qptogw,445
+jsonschema/schemas/vocabularies/draft2020-12/meta-data,sha256=j3bW4U9Bubku-TO3CM3FFEyLUmhlGtEZGEhfsXVPHHY,892
+jsonschema/schemas/vocabularies/draft2020-12/unevaluated,sha256=Lb-8tzmUtnCwl2SSre4f_7RsIWgnhNL1pMpWH54tDLQ,506
+jsonschema/schemas/vocabularies/draft2020-12/validation,sha256=cBCjHlQfMtK-ch4t40jfdcmzaHaj7TBId_wKvaHTelg,2834
+jsonschema/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+jsonschema/tests/_helpers.py,sha256=yoWpWVYq4auuTPPd1_8FXV77RlczQtiyutSh6c191ZM,618
+jsonschema/tests/_suite.py,sha256=EyzqI3EGlJiMX_o3C-W51J2m4JVRU0uqrWFjdmCspgI,7052
+jsonschema/tests/fuzz_validate.py,sha256=fUA7yTJIihaCwJplkUehZeyB84HcXEcqtY5oPJXIO7I,1114
+jsonschema/tests/test_cli.py,sha256=e9kjwfVp1sSBCF9ffGH7FwgcRlTaBZUehcWiM-BmmLk,28857
+jsonschema/tests/test_deprecations.py,sha256=uf0ct-i9V3UBc3zm8DRcgxCitURKl2-Qt3wpAUoK0GU,9284
+jsonschema/tests/test_exceptions.py,sha256=vvh4zfHVvE9egGn4qc1jQo2s8fh09oVt1WcCRpaKivM,19639
+jsonschema/tests/test_format.py,sha256=h79gKpcKV58qOQOn8TsJNf2HFkrUYzNZ3Uai_cc2nQE,3810
+jsonschema/tests/test_jsonschema_test_suite.py,sha256=-sUumFw_1MTcfMGrG5-Zw3_yldVHCto6SxPh-aoh64g,18090
+jsonschema/tests/test_types.py,sha256=_m7chMGj9U5G57jB_CmUuP5mSit3PET584ehoIlKkf4,6983
+jsonschema/tests/test_utils.py,sha256=lJRVYyQeZQTUCTU_M3BhlkxPMgjsc8KQCd7U_Qkook8,3749
+jsonschema/tests/test_validators.py,sha256=JW4Y5mzVMuumYdBCEFhxPO7EIqN-k2MnbwzBXHq7LIE,78785
+jsonschema-4.17.3.dist-info/METADATA,sha256=YnDEs-j6S4At_Fq54u7PqfYG4gQT8E5nrp8vu2PXeXw,7879
+jsonschema-4.17.3.dist-info/WHEEL,sha256=NaLmgHHW_f9jTvv_wRh9vcK7c7EK9o5fwsIXMOzoGgM,87
+jsonschema-4.17.3.dist-info/entry_points.txt,sha256=vO7rX4Fs_xIVJy2pnAtKgTSxfpnozAVQ0DjCmpMxnWE,51
+jsonschema-4.17.3.dist-info/licenses/COPYING,sha256=T5KgFaE8TRoEC-8BiqE0MLTxvHO0Gxa7hGw0Z2bedDk,1057
+jsonschema-4.17.3.dist-info/RECORD,,
diff --git a/third_party/python/jsonschema/jsonschema-4.17.3.dist-info/WHEEL b/third_party/python/jsonschema/jsonschema-4.17.3.dist-info/WHEEL
new file mode 100644
index 0000000000..6d803659b7
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema-4.17.3.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: hatchling 1.11.1
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/third_party/python/jsonschema/jsonschema-4.17.3.dist-info/entry_points.txt b/third_party/python/jsonschema/jsonschema-4.17.3.dist-info/entry_points.txt
new file mode 100644
index 0000000000..eecef9d8fa
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema-4.17.3.dist-info/entry_points.txt
@@ -0,0 +1,2 @@
+[console_scripts]
+jsonschema = jsonschema.cli:main
diff --git a/third_party/python/jsonschema/jsonschema-4.17.3.dist-info/licenses/COPYING b/third_party/python/jsonschema/jsonschema-4.17.3.dist-info/licenses/COPYING
new file mode 100644
index 0000000000..af9cfbdb13
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema-4.17.3.dist-info/licenses/COPYING
@@ -0,0 +1,19 @@
+Copyright (c) 2013 Julian Berman
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/third_party/python/jsonschema/jsonschema/__init__.py b/third_party/python/jsonschema/jsonschema/__init__.py
new file mode 100644
index 0000000000..6628fc7eb9
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/__init__.py
@@ -0,0 +1,71 @@
+"""
+An implementation of JSON Schema for Python
+
+The main functionality is provided by the validator classes for each of the
+supported JSON Schema versions.
+
+Most commonly, `jsonschema.validators.validate` is the quickest way to simply
+validate a given instance under a schema, and will create a validator
+for you.
+"""
+import warnings
+
+from jsonschema._format import FormatChecker
+from jsonschema._types import TypeChecker
+from jsonschema.exceptions import (
+ ErrorTree,
+ FormatError,
+ RefResolutionError,
+ SchemaError,
+ ValidationError,
+)
+from jsonschema.protocols import Validator
+from jsonschema.validators import (
+ Draft3Validator,
+ Draft4Validator,
+ Draft6Validator,
+ Draft7Validator,
+ Draft201909Validator,
+ Draft202012Validator,
+ RefResolver,
+ validate,
+)
+
+
+def __getattr__(name):
+ if name == "__version__":
+ warnings.warn(
+ "Accessing jsonschema.__version__ is deprecated and will be "
+ "removed in a future release. Use importlib.metadata directly "
+ "to query for jsonschema's version.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ try:
+ from importlib import metadata
+ except ImportError:
+ import importlib_metadata as metadata
+
+ return metadata.version("jsonschema")
+
+ format_checkers = {
+ "draft3_format_checker": Draft3Validator,
+ "draft4_format_checker": Draft4Validator,
+ "draft6_format_checker": Draft6Validator,
+ "draft7_format_checker": Draft7Validator,
+ "draft201909_format_checker": Draft201909Validator,
+ "draft202012_format_checker": Draft202012Validator,
+ }
+ ValidatorForFormat = format_checkers.get(name)
+ if ValidatorForFormat is not None:
+ warnings.warn(
+ f"Accessing jsonschema.{name} is deprecated and will be "
+ "removed in a future release. Instead, use the FORMAT_CHECKER "
+ "attribute on the corresponding Validator.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return ValidatorForFormat.FORMAT_CHECKER
+
+ raise AttributeError(f"module {__name__} has no attribute {name}")
diff --git a/third_party/python/jsonschema/jsonschema/__main__.py b/third_party/python/jsonschema/jsonschema/__main__.py
new file mode 100644
index 0000000000..fdc21e2306
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/__main__.py
@@ -0,0 +1,3 @@
+from jsonschema.cli import main
+
+main()
diff --git a/third_party/python/jsonschema/jsonschema/_format.py b/third_party/python/jsonschema/jsonschema/_format.py
new file mode 100644
index 0000000000..5ec97977ad
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/_format.py
@@ -0,0 +1,518 @@
+from __future__ import annotations
+
+from contextlib import suppress
+from uuid import UUID
+import datetime
+import ipaddress
+import re
+import typing
+import warnings
+
+from jsonschema.exceptions import FormatError
+
+_FormatCheckCallable = typing.Callable[[object], bool]
+_F = typing.TypeVar("_F", bound=_FormatCheckCallable)
+_RaisesType = typing.Union[
+ typing.Type[Exception], typing.Tuple[typing.Type[Exception], ...],
+]
+
+
+class FormatChecker:
+ """
+ A ``format`` property checker.
+
+ JSON Schema does not mandate that the ``format`` property actually do any
+ validation. If validation is desired however, instances of this class can
+ be hooked into validators to enable format validation.
+
+ `FormatChecker` objects always return ``True`` when asked about
+ formats that they do not know how to validate.
+
+ To add a check for a custom format use the `FormatChecker.checks`
+ decorator.
+
+ Arguments:
+
+ formats:
+
+ The known formats to validate. This argument can be used to
+ limit which formats will be used during validation.
+ """
+
+ checkers: dict[
+ str,
+ tuple[_FormatCheckCallable, _RaisesType],
+ ] = {}
+
+ def __init__(self, formats: typing.Iterable[str] | None = None):
+ if formats is None:
+ formats = self.checkers.keys()
+ self.checkers = {k: self.checkers[k] for k in formats}
+
+ def __repr__(self):
+ return "<FormatChecker checkers={}>".format(sorted(self.checkers))
+
+ def checks(
+ self, format: str, raises: _RaisesType = (),
+ ) -> typing.Callable[[_F], _F]:
+ """
+ Register a decorated function as validating a new format.
+
+ Arguments:
+
+ format:
+
+ The format that the decorated function will check.
+
+ raises:
+
+ The exception(s) raised by the decorated function when an
+ invalid instance is found.
+
+ The exception object will be accessible as the
+ `jsonschema.exceptions.ValidationError.cause` attribute of the
+ resulting validation error.
+ """
+
+ def _checks(func: _F) -> _F:
+ self.checkers[format] = (func, raises)
+ return func
+
+ return _checks
+
+ @classmethod
+ def cls_checks(
+ cls, format: str, raises: _RaisesType = (),
+ ) -> typing.Callable[[_F], _F]:
+ warnings.warn(
+ (
+ "FormatChecker.cls_checks is deprecated. Call "
+ "FormatChecker.checks on a specific FormatChecker instance "
+ "instead."
+ ),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return cls._cls_checks(format=format, raises=raises)
+
+ @classmethod
+ def _cls_checks(
+ cls, format: str, raises: _RaisesType = (),
+ ) -> typing.Callable[[_F], _F]:
+ def _checks(func: _F) -> _F:
+ cls.checkers[format] = (func, raises)
+ return func
+
+ return _checks
+
+ def check(self, instance: object, format: str) -> None:
+ """
+ Check whether the instance conforms to the given format.
+
+ Arguments:
+
+ instance (*any primitive type*, i.e. str, number, bool):
+
+ The instance to check
+
+ format:
+
+ The format that instance should conform to
+
+ Raises:
+
+ FormatError:
+
+ if the instance does not conform to ``format``
+ """
+
+ if format not in self.checkers:
+ return
+
+ func, raises = self.checkers[format]
+ result, cause = None, None
+ try:
+ result = func(instance)
+ except raises as e:
+ cause = e
+ if not result:
+ raise FormatError(f"{instance!r} is not a {format!r}", cause=cause)
+
+ def conforms(self, instance: object, format: str) -> bool:
+ """
+ Check whether the instance conforms to the given format.
+
+ Arguments:
+
+ instance (*any primitive type*, i.e. str, number, bool):
+
+ The instance to check
+
+ format:
+
+ The format that instance should conform to
+
+ Returns:
+
+ bool: whether it conformed
+ """
+
+ try:
+ self.check(instance, format)
+ except FormatError:
+ return False
+ else:
+ return True
+
+
+draft3_format_checker = FormatChecker()
+draft4_format_checker = FormatChecker()
+draft6_format_checker = FormatChecker()
+draft7_format_checker = FormatChecker()
+draft201909_format_checker = FormatChecker()
+draft202012_format_checker = FormatChecker()
+
+_draft_checkers: dict[str, FormatChecker] = dict(
+ draft3=draft3_format_checker,
+ draft4=draft4_format_checker,
+ draft6=draft6_format_checker,
+ draft7=draft7_format_checker,
+ draft201909=draft201909_format_checker,
+ draft202012=draft202012_format_checker,
+)
+
+
+def _checks_drafts(
+ name=None,
+ draft3=None,
+ draft4=None,
+ draft6=None,
+ draft7=None,
+ draft201909=None,
+ draft202012=None,
+ raises=(),
+) -> typing.Callable[[_F], _F]:
+ draft3 = draft3 or name
+ draft4 = draft4 or name
+ draft6 = draft6 or name
+ draft7 = draft7 or name
+ draft201909 = draft201909 or name
+ draft202012 = draft202012 or name
+
+ def wrap(func: _F) -> _F:
+ if draft3:
+ func = _draft_checkers["draft3"].checks(draft3, raises)(func)
+ if draft4:
+ func = _draft_checkers["draft4"].checks(draft4, raises)(func)
+ if draft6:
+ func = _draft_checkers["draft6"].checks(draft6, raises)(func)
+ if draft7:
+ func = _draft_checkers["draft7"].checks(draft7, raises)(func)
+ if draft201909:
+ func = _draft_checkers["draft201909"].checks(draft201909, raises)(
+ func,
+ )
+ if draft202012:
+ func = _draft_checkers["draft202012"].checks(draft202012, raises)(
+ func,
+ )
+
+ # Oy. This is bad global state, but relied upon for now, until
+ # deprecation. See #519 and test_format_checkers_come_with_defaults
+ FormatChecker._cls_checks(
+ draft202012 or draft201909 or draft7 or draft6 or draft4 or draft3,
+ raises,
+ )(func)
+ return func
+
+ return wrap
+
+
+@_checks_drafts(name="idn-email")
+@_checks_drafts(name="email")
+def is_email(instance: object) -> bool:
+ if not isinstance(instance, str):
+ return True
+ return "@" in instance
+
+
+@_checks_drafts(
+ draft3="ip-address",
+ draft4="ipv4",
+ draft6="ipv4",
+ draft7="ipv4",
+ draft201909="ipv4",
+ draft202012="ipv4",
+ raises=ipaddress.AddressValueError,
+)
+def is_ipv4(instance: object) -> bool:
+ if not isinstance(instance, str):
+ return True
+ return bool(ipaddress.IPv4Address(instance))
+
+
+@_checks_drafts(name="ipv6", raises=ipaddress.AddressValueError)
+def is_ipv6(instance: object) -> bool:
+ if not isinstance(instance, str):
+ return True
+ address = ipaddress.IPv6Address(instance)
+ return not getattr(address, "scope_id", "")
+
+
+with suppress(ImportError):
+ from fqdn import FQDN
+
+ @_checks_drafts(
+ draft3="host-name",
+ draft4="hostname",
+ draft6="hostname",
+ draft7="hostname",
+ draft201909="hostname",
+ draft202012="hostname",
+ )
+ def is_host_name(instance: object) -> bool:
+ if not isinstance(instance, str):
+ return True
+ return FQDN(instance).is_valid
+
+
+with suppress(ImportError):
+ # The built-in `idna` codec only implements RFC 3890, so we go elsewhere.
+ import idna
+
+ @_checks_drafts(
+ draft7="idn-hostname",
+ draft201909="idn-hostname",
+ draft202012="idn-hostname",
+ raises=(idna.IDNAError, UnicodeError),
+ )
+ def is_idn_host_name(instance: object) -> bool:
+ if not isinstance(instance, str):
+ return True
+ idna.encode(instance)
+ return True
+
+
+try:
+ import rfc3987
+except ImportError:
+ with suppress(ImportError):
+ from rfc3986_validator import validate_rfc3986
+
+ @_checks_drafts(name="uri")
+ def is_uri(instance: object) -> bool:
+ if not isinstance(instance, str):
+ return True
+ return validate_rfc3986(instance, rule="URI")
+
+ @_checks_drafts(
+ draft6="uri-reference",
+ draft7="uri-reference",
+ draft201909="uri-reference",
+ draft202012="uri-reference",
+ raises=ValueError,
+ )
+ def is_uri_reference(instance: object) -> bool:
+ if not isinstance(instance, str):
+ return True
+ return validate_rfc3986(instance, rule="URI_reference")
+
+else:
+
+ @_checks_drafts(
+ draft7="iri",
+ draft201909="iri",
+ draft202012="iri",
+ raises=ValueError,
+ )
+ def is_iri(instance: object) -> bool:
+ if not isinstance(instance, str):
+ return True
+ return rfc3987.parse(instance, rule="IRI")
+
+ @_checks_drafts(
+ draft7="iri-reference",
+ draft201909="iri-reference",
+ draft202012="iri-reference",
+ raises=ValueError,
+ )
+ def is_iri_reference(instance: object) -> bool:
+ if not isinstance(instance, str):
+ return True
+ return rfc3987.parse(instance, rule="IRI_reference")
+
+ @_checks_drafts(name="uri", raises=ValueError)
+ def is_uri(instance: object) -> bool:
+ if not isinstance(instance, str):
+ return True
+ return rfc3987.parse(instance, rule="URI")
+
+ @_checks_drafts(
+ draft6="uri-reference",
+ draft7="uri-reference",
+ draft201909="uri-reference",
+ draft202012="uri-reference",
+ raises=ValueError,
+ )
+ def is_uri_reference(instance: object) -> bool:
+ if not isinstance(instance, str):
+ return True
+ return rfc3987.parse(instance, rule="URI_reference")
+
+
+with suppress(ImportError):
+ from rfc3339_validator import validate_rfc3339
+
+ @_checks_drafts(name="date-time")
+ def is_datetime(instance: object) -> bool:
+ if not isinstance(instance, str):
+ return True
+ return validate_rfc3339(instance.upper())
+
+ @_checks_drafts(
+ draft7="time",
+ draft201909="time",
+ draft202012="time",
+ )
+ def is_time(instance: object) -> bool:
+ if not isinstance(instance, str):
+ return True
+ return is_datetime("1970-01-01T" + instance)
+
+
+@_checks_drafts(name="regex", raises=re.error)
+def is_regex(instance: object) -> bool:
+ if not isinstance(instance, str):
+ return True
+ return bool(re.compile(instance))
+
+
+@_checks_drafts(
+ draft3="date",
+ draft7="date",
+ draft201909="date",
+ draft202012="date",
+ raises=ValueError,
+)
+def is_date(instance: object) -> bool:
+ if not isinstance(instance, str):
+ return True
+ return bool(instance.isascii() and datetime.date.fromisoformat(instance))
+
+
+@_checks_drafts(draft3="time", raises=ValueError)
+def is_draft3_time(instance: object) -> bool:
+ if not isinstance(instance, str):
+ return True
+ return bool(datetime.datetime.strptime(instance, "%H:%M:%S"))
+
+
+with suppress(ImportError):
+ from webcolors import CSS21_NAMES_TO_HEX
+ import webcolors
+
+ def is_css_color_code(instance: object) -> bool:
+ return webcolors.normalize_hex(instance)
+
+ @_checks_drafts(draft3="color", raises=(ValueError, TypeError))
+ def is_css21_color(instance: object) -> bool:
+ if (
+ not isinstance(instance, str)
+ or instance.lower() in CSS21_NAMES_TO_HEX
+ ):
+ return True
+ return is_css_color_code(instance)
+
+
+with suppress(ImportError):
+ import jsonpointer
+
+ @_checks_drafts(
+ draft6="json-pointer",
+ draft7="json-pointer",
+ draft201909="json-pointer",
+ draft202012="json-pointer",
+ raises=jsonpointer.JsonPointerException,
+ )
+ def is_json_pointer(instance: object) -> bool:
+ if not isinstance(instance, str):
+ return True
+ return bool(jsonpointer.JsonPointer(instance))
+
+ # TODO: I don't want to maintain this, so it
+ # needs to go either into jsonpointer (pending
+ # https://github.com/stefankoegl/python-json-pointer/issues/34) or
+ # into a new external library.
+ @_checks_drafts(
+ draft7="relative-json-pointer",
+ draft201909="relative-json-pointer",
+ draft202012="relative-json-pointer",
+ raises=jsonpointer.JsonPointerException,
+ )
+ def is_relative_json_pointer(instance: object) -> bool:
+ # Definition taken from:
+ # https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3
+ if not isinstance(instance, str):
+ return True
+ if not instance:
+ return False
+
+ non_negative_integer, rest = [], ""
+ for i, character in enumerate(instance):
+ if character.isdigit():
+ # digits with a leading "0" are not allowed
+ if i > 0 and int(instance[i - 1]) == 0:
+ return False
+
+ non_negative_integer.append(character)
+ continue
+
+ if not non_negative_integer:
+ return False
+
+ rest = instance[i:]
+ break
+ return (rest == "#") or bool(jsonpointer.JsonPointer(rest))
+
+
+with suppress(ImportError):
+ import uri_template
+
+ @_checks_drafts(
+ draft6="uri-template",
+ draft7="uri-template",
+ draft201909="uri-template",
+ draft202012="uri-template",
+ )
+ def is_uri_template(instance: object) -> bool:
+ if not isinstance(instance, str):
+ return True
+ return uri_template.validate(instance)
+
+
+with suppress(ImportError):
+ import isoduration
+
+ @_checks_drafts(
+ draft201909="duration",
+ draft202012="duration",
+ raises=isoduration.DurationParsingException,
+ )
+ def is_duration(instance: object) -> bool:
+ if not isinstance(instance, str):
+ return True
+ isoduration.parse_duration(instance)
+ # FIXME: See bolsote/isoduration#25 and bolsote/isoduration#21
+ return instance.endswith(tuple("DMYWHMS"))
+
+
+@_checks_drafts(
+ draft201909="uuid",
+ draft202012="uuid",
+ raises=ValueError,
+)
+def is_uuid(instance: object) -> bool:
+ if not isinstance(instance, str):
+ return True
+ UUID(instance)
+ return all(instance[position] == "-" for position in (8, 13, 18, 23))
diff --git a/third_party/python/jsonschema/jsonschema/_legacy_validators.py b/third_party/python/jsonschema/jsonschema/_legacy_validators.py
new file mode 100644
index 0000000000..cc5e3f44c1
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/_legacy_validators.py
@@ -0,0 +1,319 @@
+from jsonschema import _utils
+from jsonschema.exceptions import ValidationError
+
+
+def id_of_ignore_ref(property="$id"):
+ def id_of(schema):
+ """
+ Ignore an ``$id`` sibling of ``$ref`` if it is present.
+
+ Otherwise, return the ID of the given schema.
+ """
+ if schema is True or schema is False or "$ref" in schema:
+ return ""
+ return schema.get(property, "")
+ return id_of
+
+
+def ignore_ref_siblings(schema):
+ """
+ Ignore siblings of ``$ref`` if it is present.
+
+ Otherwise, return all keywords.
+
+ Suitable for use with `create`'s ``applicable_validators`` argument.
+ """
+ ref = schema.get("$ref")
+ if ref is not None:
+ return [("$ref", ref)]
+ else:
+ return schema.items()
+
+
+def dependencies_draft3(validator, dependencies, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for property, dependency in dependencies.items():
+ if property not in instance:
+ continue
+
+ if validator.is_type(dependency, "object"):
+ yield from validator.descend(
+ instance, dependency, schema_path=property,
+ )
+ elif validator.is_type(dependency, "string"):
+ if dependency not in instance:
+ message = f"{dependency!r} is a dependency of {property!r}"
+ yield ValidationError(message)
+ else:
+ for each in dependency:
+ if each not in instance:
+ message = f"{each!r} is a dependency of {property!r}"
+ yield ValidationError(message)
+
+
+def dependencies_draft4_draft6_draft7(
+ validator,
+ dependencies,
+ instance,
+ schema,
+):
+ """
+ Support for the ``dependencies`` keyword from pre-draft 2019-09.
+
+ In later drafts, the keyword was split into separate
+ ``dependentRequired`` and ``dependentSchemas`` validators.
+ """
+ if not validator.is_type(instance, "object"):
+ return
+
+ for property, dependency in dependencies.items():
+ if property not in instance:
+ continue
+
+ if validator.is_type(dependency, "array"):
+ for each in dependency:
+ if each not in instance:
+ message = f"{each!r} is a dependency of {property!r}"
+ yield ValidationError(message)
+ else:
+ yield from validator.descend(
+ instance, dependency, schema_path=property,
+ )
+
+
+def disallow_draft3(validator, disallow, instance, schema):
+ for disallowed in _utils.ensure_list(disallow):
+ if validator.evolve(schema={"type": [disallowed]}).is_valid(instance):
+ message = f"{disallowed!r} is disallowed for {instance!r}"
+ yield ValidationError(message)
+
+
+def extends_draft3(validator, extends, instance, schema):
+ if validator.is_type(extends, "object"):
+ yield from validator.descend(instance, extends)
+ return
+ for index, subschema in enumerate(extends):
+ yield from validator.descend(instance, subschema, schema_path=index)
+
+
+def items_draft3_draft4(validator, items, instance, schema):
+ if not validator.is_type(instance, "array"):
+ return
+
+ if validator.is_type(items, "object"):
+ for index, item in enumerate(instance):
+ yield from validator.descend(item, items, path=index)
+ else:
+ for (index, item), subschema in zip(enumerate(instance), items):
+ yield from validator.descend(
+ item, subschema, path=index, schema_path=index,
+ )
+
+
+def items_draft6_draft7_draft201909(validator, items, instance, schema):
+ if not validator.is_type(instance, "array"):
+ return
+
+ if validator.is_type(items, "array"):
+ for (index, item), subschema in zip(enumerate(instance), items):
+ yield from validator.descend(
+ item, subschema, path=index, schema_path=index,
+ )
+ else:
+ for index, item in enumerate(instance):
+ yield from validator.descend(item, items, path=index)
+
+
+def minimum_draft3_draft4(validator, minimum, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if schema.get("exclusiveMinimum", False):
+ failed = instance <= minimum
+ cmp = "less than or equal to"
+ else:
+ failed = instance < minimum
+ cmp = "less than"
+
+ if failed:
+ message = f"{instance!r} is {cmp} the minimum of {minimum!r}"
+ yield ValidationError(message)
+
+
+def maximum_draft3_draft4(validator, maximum, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if schema.get("exclusiveMaximum", False):
+ failed = instance >= maximum
+ cmp = "greater than or equal to"
+ else:
+ failed = instance > maximum
+ cmp = "greater than"
+
+ if failed:
+ message = f"{instance!r} is {cmp} the maximum of {maximum!r}"
+ yield ValidationError(message)
+
+
+def properties_draft3(validator, properties, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for property, subschema in properties.items():
+ if property in instance:
+ yield from validator.descend(
+ instance[property],
+ subschema,
+ path=property,
+ schema_path=property,
+ )
+ elif subschema.get("required", False):
+ error = ValidationError(f"{property!r} is a required property")
+ error._set(
+ validator="required",
+ validator_value=subschema["required"],
+ instance=instance,
+ schema=schema,
+ )
+ error.path.appendleft(property)
+ error.schema_path.extend([property, "required"])
+ yield error
+
+
+def type_draft3(validator, types, instance, schema):
+ types = _utils.ensure_list(types)
+
+ all_errors = []
+ for index, type in enumerate(types):
+ if validator.is_type(type, "object"):
+ errors = list(validator.descend(instance, type, schema_path=index))
+ if not errors:
+ return
+ all_errors.extend(errors)
+ else:
+ if validator.is_type(instance, type):
+ return
+ else:
+ reprs = []
+ for type in types:
+ try:
+ reprs.append(repr(type["name"]))
+ except Exception:
+ reprs.append(repr(type))
+ yield ValidationError(
+ f"{instance!r} is not of type {', '.join(reprs)}",
+ context=all_errors,
+ )
+
+
+def contains_draft6_draft7(validator, contains, instance, schema):
+ if not validator.is_type(instance, "array"):
+ return
+
+ if not any(
+ validator.evolve(schema=contains).is_valid(element)
+ for element in instance
+ ):
+ yield ValidationError(
+ f"None of {instance!r} are valid under the given schema",
+ )
+
+
+def recursiveRef(validator, recursiveRef, instance, schema):
+ lookup_url, target = validator.resolver.resolution_scope, validator.schema
+
+ for each in reversed(validator.resolver._scopes_stack[1:]):
+ lookup_url, next_target = validator.resolver.resolve(each)
+ if next_target.get("$recursiveAnchor"):
+ target = next_target
+ else:
+ break
+
+ fragment = recursiveRef.lstrip("#")
+ subschema = validator.resolver.resolve_fragment(target, fragment)
+ # FIXME: This is gutted (and not calling .descend) because it can trigger
+ # recursion errors, so there's a bug here. Re-enable the tests to
+ # see it.
+ subschema
+ return []
+
+
+def find_evaluated_item_indexes_by_schema(validator, instance, schema):
+ """
+ Get all indexes of items that get evaluated under the current schema
+
+ Covers all keywords related to unevaluatedItems: items, prefixItems, if,
+ then, else, contains, unevaluatedItems, allOf, oneOf, anyOf
+ """
+ if validator.is_type(schema, "boolean"):
+ return []
+ evaluated_indexes = []
+
+ if "additionalItems" in schema:
+ return list(range(0, len(instance)))
+
+ if "$ref" in schema:
+ scope, resolved = validator.resolver.resolve(schema["$ref"])
+ validator.resolver.push_scope(scope)
+
+ try:
+ evaluated_indexes += find_evaluated_item_indexes_by_schema(
+ validator, instance, resolved,
+ )
+ finally:
+ validator.resolver.pop_scope()
+
+ if "items" in schema:
+ if validator.is_type(schema["items"], "object"):
+ return list(range(0, len(instance)))
+ evaluated_indexes += list(range(0, len(schema["items"])))
+
+ if "if" in schema:
+ if validator.evolve(schema=schema["if"]).is_valid(instance):
+ evaluated_indexes += find_evaluated_item_indexes_by_schema(
+ validator, instance, schema["if"],
+ )
+ if "then" in schema:
+ evaluated_indexes += find_evaluated_item_indexes_by_schema(
+ validator, instance, schema["then"],
+ )
+ else:
+ if "else" in schema:
+ evaluated_indexes += find_evaluated_item_indexes_by_schema(
+ validator, instance, schema["else"],
+ )
+
+ for keyword in ["contains", "unevaluatedItems"]:
+ if keyword in schema:
+ for k, v in enumerate(instance):
+ if validator.evolve(schema=schema[keyword]).is_valid(v):
+ evaluated_indexes.append(k)
+
+ for keyword in ["allOf", "oneOf", "anyOf"]:
+ if keyword in schema:
+ for subschema in schema[keyword]:
+ errs = list(validator.descend(instance, subschema))
+ if not errs:
+ evaluated_indexes += find_evaluated_item_indexes_by_schema(
+ validator, instance, subschema,
+ )
+
+ return evaluated_indexes
+
+
+def unevaluatedItems_draft2019(validator, unevaluatedItems, instance, schema):
+ if not validator.is_type(instance, "array"):
+ return
+ evaluated_item_indexes = find_evaluated_item_indexes_by_schema(
+ validator, instance, schema,
+ )
+ unevaluated_items = [
+ item for index, item in enumerate(instance)
+ if index not in evaluated_item_indexes
+ ]
+ if unevaluated_items:
+ error = "Unevaluated items are not allowed (%s %s unexpected)"
+ yield ValidationError(error % _utils.extras_msg(unevaluated_items))
diff --git a/third_party/python/jsonschema/jsonschema/_types.py b/third_party/python/jsonschema/jsonschema/_types.py
new file mode 100644
index 0000000000..5b543f71bb
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/_types.py
@@ -0,0 +1,203 @@
+from __future__ import annotations
+
+import numbers
+import typing
+
+from pyrsistent import pmap
+from pyrsistent.typing import PMap
+import attr
+
+from jsonschema.exceptions import UndefinedTypeCheck
+
+
+# unfortunately, the type of pmap is generic, and if used as the attr.ib
+# converter, the generic type is presented to mypy, which then fails to match
+# the concrete type of a type checker mapping
+# this "do nothing" wrapper presents the correct information to mypy
+def _typed_pmap_converter(
+ init_val: typing.Mapping[
+ str,
+ typing.Callable[["TypeChecker", typing.Any], bool],
+ ],
+) -> PMap[str, typing.Callable[["TypeChecker", typing.Any], bool]]:
+ return pmap(init_val)
+
+
+def is_array(checker, instance):
+ return isinstance(instance, list)
+
+
+def is_bool(checker, instance):
+ return isinstance(instance, bool)
+
+
+def is_integer(checker, instance):
+ # bool inherits from int, so ensure bools aren't reported as ints
+ if isinstance(instance, bool):
+ return False
+ return isinstance(instance, int)
+
+
+def is_null(checker, instance):
+ return instance is None
+
+
+def is_number(checker, instance):
+ # bool inherits from int, so ensure bools aren't reported as ints
+ if isinstance(instance, bool):
+ return False
+ return isinstance(instance, numbers.Number)
+
+
+def is_object(checker, instance):
+ return isinstance(instance, dict)
+
+
+def is_string(checker, instance):
+ return isinstance(instance, str)
+
+
+def is_any(checker, instance):
+ return True
+
+
+@attr.s(frozen=True, repr=False)
+class TypeChecker:
+ """
+ A :kw:`type` property checker.
+
+ A `TypeChecker` performs type checking for a `Validator`, converting
+ between the defined JSON Schema types and some associated Python types or
+ objects.
+
+ Modifying the behavior just mentioned by redefining which Python objects
+ are considered to be of which JSON Schema types can be done using
+ `TypeChecker.redefine` or `TypeChecker.redefine_many`, and types can be
+ removed via `TypeChecker.remove`. Each of these return a new `TypeChecker`.
+
+ Arguments:
+
+ type_checkers:
+
+ The initial mapping of types to their checking functions.
+ """
+
+ _type_checkers: PMap[
+ str, typing.Callable[["TypeChecker", typing.Any], bool],
+ ] = attr.ib(
+ default=pmap(),
+ converter=_typed_pmap_converter,
+ )
+
+ def __repr__(self):
+ types = ", ".join(repr(k) for k in sorted(self._type_checkers))
+ return f"<{self.__class__.__name__} types={{{types}}}>"
+
+ def is_type(self, instance, type: str) -> bool:
+ """
+ Check if the instance is of the appropriate type.
+
+ Arguments:
+
+ instance:
+
+ The instance to check
+
+ type:
+
+ The name of the type that is expected.
+
+ Raises:
+
+ `jsonschema.exceptions.UndefinedTypeCheck`:
+
+ if ``type`` is unknown to this object.
+ """
+ try:
+ fn = self._type_checkers[type]
+ except KeyError:
+ raise UndefinedTypeCheck(type) from None
+
+ return fn(self, instance)
+
+ def redefine(self, type: str, fn) -> "TypeChecker":
+ """
+ Produce a new checker with the given type redefined.
+
+ Arguments:
+
+ type:
+
+ The name of the type to check.
+
+ fn (collections.abc.Callable):
+
+ A callable taking exactly two parameters - the type
+ checker calling the function and the instance to check.
+ The function should return true if instance is of this
+ type and false otherwise.
+ """
+ return self.redefine_many({type: fn})
+
+ def redefine_many(self, definitions=()) -> "TypeChecker":
+ """
+ Produce a new checker with the given types redefined.
+
+ Arguments:
+
+ definitions (dict):
+
+ A dictionary mapping types to their checking functions.
+ """
+ type_checkers = self._type_checkers.update(definitions)
+ return attr.evolve(self, type_checkers=type_checkers)
+
+ def remove(self, *types) -> "TypeChecker":
+ """
+ Produce a new checker with the given types forgotten.
+
+ Arguments:
+
+ types:
+
+ the names of the types to remove.
+
+ Raises:
+
+ `jsonschema.exceptions.UndefinedTypeCheck`:
+
+ if any given type is unknown to this object
+ """
+
+ type_checkers = self._type_checkers
+ for each in types:
+ try:
+ type_checkers = type_checkers.remove(each)
+ except KeyError:
+ raise UndefinedTypeCheck(each)
+ return attr.evolve(self, type_checkers=type_checkers)
+
+
+draft3_type_checker = TypeChecker(
+ {
+ "any": is_any,
+ "array": is_array,
+ "boolean": is_bool,
+ "integer": is_integer,
+ "object": is_object,
+ "null": is_null,
+ "number": is_number,
+ "string": is_string,
+ },
+)
+draft4_type_checker = draft3_type_checker.remove("any")
+draft6_type_checker = draft4_type_checker.redefine(
+ "integer",
+ lambda checker, instance: (
+ is_integer(checker, instance)
+ or isinstance(instance, float) and instance.is_integer()
+ ),
+)
+draft7_type_checker = draft6_type_checker
+draft201909_type_checker = draft7_type_checker
+draft202012_type_checker = draft201909_type_checker
diff --git a/third_party/python/jsonschema/jsonschema/_utils.py b/third_party/python/jsonschema/jsonschema/_utils.py
new file mode 100644
index 0000000000..418348ce1c
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/_utils.py
@@ -0,0 +1,349 @@
+from collections.abc import Mapping, MutableMapping, Sequence
+from urllib.parse import urlsplit
+import itertools
+import json
+import re
+import sys
+
+# The files() API was added in Python 3.9.
+if sys.version_info >= (3, 9): # pragma: no cover
+ from importlib import resources
+else: # pragma: no cover
+ import importlib_resources as resources # type: ignore
+
+
+class URIDict(MutableMapping):
+ """
+ Dictionary which uses normalized URIs as keys.
+ """
+
+ def normalize(self, uri):
+ return urlsplit(uri).geturl()
+
+ def __init__(self, *args, **kwargs):
+ self.store = dict()
+ self.store.update(*args, **kwargs)
+
+ def __getitem__(self, uri):
+ return self.store[self.normalize(uri)]
+
+ def __setitem__(self, uri, value):
+ self.store[self.normalize(uri)] = value
+
+ def __delitem__(self, uri):
+ del self.store[self.normalize(uri)]
+
+ def __iter__(self):
+ return iter(self.store)
+
+ def __len__(self):
+ return len(self.store)
+
+ def __repr__(self):
+ return repr(self.store)
+
+
+class Unset:
+ """
+ An as-of-yet unset attribute or unprovided default parameter.
+ """
+
+ def __repr__(self):
+ return "<unset>"
+
+
+def load_schema(name):
+ """
+ Load a schema from ./schemas/``name``.json and return it.
+ """
+
+ path = resources.files(__package__).joinpath(f"schemas/{name}.json")
+ data = path.read_text(encoding="utf-8")
+ return json.loads(data)
+
+
+def format_as_index(container, indices):
+ """
+ Construct a single string containing indexing operations for the indices.
+
+ For example for a container ``bar``, [1, 2, "foo"] -> bar[1][2]["foo"]
+
+ Arguments:
+
+ container (str):
+
+ A word to use for the thing being indexed
+
+ indices (sequence):
+
+ The indices to format.
+ """
+
+ if not indices:
+ return container
+ return f"{container}[{']['.join(repr(index) for index in indices)}]"
+
+
+def find_additional_properties(instance, schema):
+ """
+ Return the set of additional properties for the given ``instance``.
+
+ Weeds out properties that should have been validated by ``properties`` and
+ / or ``patternProperties``.
+
+ Assumes ``instance`` is dict-like already.
+ """
+
+ properties = schema.get("properties", {})
+ patterns = "|".join(schema.get("patternProperties", {}))
+ for property in instance:
+ if property not in properties:
+ if patterns and re.search(patterns, property):
+ continue
+ yield property
+
+
+def extras_msg(extras):
+ """
+ Create an error message for extra items or properties.
+ """
+
+ if len(extras) == 1:
+ verb = "was"
+ else:
+ verb = "were"
+ return ", ".join(repr(extra) for extra in sorted(extras)), verb
+
+
+def ensure_list(thing):
+ """
+ Wrap ``thing`` in a list if it's a single str.
+
+ Otherwise, return it unchanged.
+ """
+
+ if isinstance(thing, str):
+ return [thing]
+ return thing
+
+
+def _mapping_equal(one, two):
+ """
+ Check if two mappings are equal using the semantics of `equal`.
+ """
+ if len(one) != len(two):
+ return False
+ return all(
+ key in two and equal(value, two[key])
+ for key, value in one.items()
+ )
+
+
+def _sequence_equal(one, two):
+ """
+ Check if two sequences are equal using the semantics of `equal`.
+ """
+ if len(one) != len(two):
+ return False
+ return all(equal(i, j) for i, j in zip(one, two))
+
+
+def equal(one, two):
+ """
+ Check if two things are equal evading some Python type hierarchy semantics.
+
+ Specifically in JSON Schema, evade `bool` inheriting from `int`,
+ recursing into sequences to do the same.
+ """
+ if isinstance(one, str) or isinstance(two, str):
+ return one == two
+ if isinstance(one, Sequence) and isinstance(two, Sequence):
+ return _sequence_equal(one, two)
+ if isinstance(one, Mapping) and isinstance(two, Mapping):
+ return _mapping_equal(one, two)
+ return unbool(one) == unbool(two)
+
+
+def unbool(element, true=object(), false=object()):
+ """
+ A hack to make True and 1 and False and 0 unique for ``uniq``.
+ """
+
+ if element is True:
+ return true
+ elif element is False:
+ return false
+ return element
+
+
+def uniq(container):
+ """
+ Check if all of a container's elements are unique.
+
+ Tries to rely on the container being recursively sortable, or otherwise
+ falls back on (slow) brute force.
+ """
+ try:
+ sort = sorted(unbool(i) for i in container)
+ sliced = itertools.islice(sort, 1, None)
+
+ for i, j in zip(sort, sliced):
+ if equal(i, j):
+ return False
+
+ except (NotImplementedError, TypeError):
+ seen = []
+ for e in container:
+ e = unbool(e)
+
+ for i in seen:
+ if equal(i, e):
+ return False
+
+ seen.append(e)
+ return True
+
+
+def find_evaluated_item_indexes_by_schema(validator, instance, schema):
+ """
+ Get all indexes of items that get evaluated under the current schema
+
+ Covers all keywords related to unevaluatedItems: items, prefixItems, if,
+ then, else, contains, unevaluatedItems, allOf, oneOf, anyOf
+ """
+ if validator.is_type(schema, "boolean"):
+ return []
+ evaluated_indexes = []
+
+ if "items" in schema:
+ return list(range(0, len(instance)))
+
+ if "$ref" in schema:
+ scope, resolved = validator.resolver.resolve(schema["$ref"])
+ validator.resolver.push_scope(scope)
+
+ try:
+ evaluated_indexes += find_evaluated_item_indexes_by_schema(
+ validator, instance, resolved,
+ )
+ finally:
+ validator.resolver.pop_scope()
+
+ if "prefixItems" in schema:
+ evaluated_indexes += list(range(0, len(schema["prefixItems"])))
+
+ if "if" in schema:
+ if validator.evolve(schema=schema["if"]).is_valid(instance):
+ evaluated_indexes += find_evaluated_item_indexes_by_schema(
+ validator, instance, schema["if"],
+ )
+ if "then" in schema:
+ evaluated_indexes += find_evaluated_item_indexes_by_schema(
+ validator, instance, schema["then"],
+ )
+ else:
+ if "else" in schema:
+ evaluated_indexes += find_evaluated_item_indexes_by_schema(
+ validator, instance, schema["else"],
+ )
+
+ for keyword in ["contains", "unevaluatedItems"]:
+ if keyword in schema:
+ for k, v in enumerate(instance):
+ if validator.evolve(schema=schema[keyword]).is_valid(v):
+ evaluated_indexes.append(k)
+
+ for keyword in ["allOf", "oneOf", "anyOf"]:
+ if keyword in schema:
+ for subschema in schema[keyword]:
+ errs = list(validator.descend(instance, subschema))
+ if not errs:
+ evaluated_indexes += find_evaluated_item_indexes_by_schema(
+ validator, instance, subschema,
+ )
+
+ return evaluated_indexes
+
+
+def find_evaluated_property_keys_by_schema(validator, instance, schema):
+ """
+ Get all keys of items that get evaluated under the current schema
+
+ Covers all keywords related to unevaluatedProperties: properties,
+ additionalProperties, unevaluatedProperties, patternProperties,
+ dependentSchemas, allOf, oneOf, anyOf, if, then, else
+ """
+ if validator.is_type(schema, "boolean"):
+ return []
+ evaluated_keys = []
+
+ if "$ref" in schema:
+ scope, resolved = validator.resolver.resolve(schema["$ref"])
+ validator.resolver.push_scope(scope)
+
+ try:
+ evaluated_keys += find_evaluated_property_keys_by_schema(
+ validator, instance, resolved,
+ )
+ finally:
+ validator.resolver.pop_scope()
+
+ for keyword in [
+ "properties", "additionalProperties", "unevaluatedProperties",
+ ]:
+ if keyword in schema:
+ if validator.is_type(schema[keyword], "boolean"):
+ for property, value in instance.items():
+ if validator.evolve(schema=schema[keyword]).is_valid(
+ {property: value},
+ ):
+ evaluated_keys.append(property)
+
+ if validator.is_type(schema[keyword], "object"):
+ for property, subschema in schema[keyword].items():
+ if property in instance and validator.evolve(
+ schema=subschema,
+ ).is_valid(instance[property]):
+ evaluated_keys.append(property)
+
+ if "patternProperties" in schema:
+ for property, value in instance.items():
+ for pattern, _ in schema["patternProperties"].items():
+ if re.search(pattern, property) and validator.evolve(
+ schema=schema["patternProperties"],
+ ).is_valid({property: value}):
+ evaluated_keys.append(property)
+
+ if "dependentSchemas" in schema:
+ for property, subschema in schema["dependentSchemas"].items():
+ if property not in instance:
+ continue
+ evaluated_keys += find_evaluated_property_keys_by_schema(
+ validator, instance, subschema,
+ )
+
+ for keyword in ["allOf", "oneOf", "anyOf"]:
+ if keyword in schema:
+ for subschema in schema[keyword]:
+ errs = list(validator.descend(instance, subschema))
+ if not errs:
+ evaluated_keys += find_evaluated_property_keys_by_schema(
+ validator, instance, subschema,
+ )
+
+ if "if" in schema:
+ if validator.evolve(schema=schema["if"]).is_valid(instance):
+ evaluated_keys += find_evaluated_property_keys_by_schema(
+ validator, instance, schema["if"],
+ )
+ if "then" in schema:
+ evaluated_keys += find_evaluated_property_keys_by_schema(
+ validator, instance, schema["then"],
+ )
+ else:
+ if "else" in schema:
+ evaluated_keys += find_evaluated_property_keys_by_schema(
+ validator, instance, schema["else"],
+ )
+
+ return evaluated_keys
diff --git a/third_party/python/jsonschema/jsonschema/_validators.py b/third_party/python/jsonschema/jsonschema/_validators.py
new file mode 100644
index 0000000000..8542a879c8
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/_validators.py
@@ -0,0 +1,476 @@
+from fractions import Fraction
+from urllib.parse import urldefrag, urljoin
+import re
+
+from jsonschema._utils import (
+ ensure_list,
+ equal,
+ extras_msg,
+ find_additional_properties,
+ find_evaluated_item_indexes_by_schema,
+ find_evaluated_property_keys_by_schema,
+ unbool,
+ uniq,
+)
+from jsonschema.exceptions import FormatError, ValidationError
+
+
+def patternProperties(validator, patternProperties, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for pattern, subschema in patternProperties.items():
+ for k, v in instance.items():
+ if re.search(pattern, k):
+ yield from validator.descend(
+ v, subschema, path=k, schema_path=pattern,
+ )
+
+
+def propertyNames(validator, propertyNames, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for property in instance:
+ yield from validator.descend(instance=property, schema=propertyNames)
+
+
+def additionalProperties(validator, aP, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ extras = set(find_additional_properties(instance, schema))
+
+ if validator.is_type(aP, "object"):
+ for extra in extras:
+ yield from validator.descend(instance[extra], aP, path=extra)
+ elif not aP and extras:
+ if "patternProperties" in schema:
+ if len(extras) == 1:
+ verb = "does"
+ else:
+ verb = "do"
+
+ joined = ", ".join(repr(each) for each in sorted(extras))
+ patterns = ", ".join(
+ repr(each) for each in sorted(schema["patternProperties"])
+ )
+ error = f"{joined} {verb} not match any of the regexes: {patterns}"
+ yield ValidationError(error)
+ else:
+ error = "Additional properties are not allowed (%s %s unexpected)"
+ yield ValidationError(error % extras_msg(extras))
+
+
+def items(validator, items, instance, schema):
+ if not validator.is_type(instance, "array"):
+ return
+
+ prefix = len(schema.get("prefixItems", []))
+ total = len(instance)
+ if items is False and total > prefix:
+ message = f"Expected at most {prefix} items, but found {total}"
+ yield ValidationError(message)
+ else:
+ for index in range(prefix, total):
+ yield from validator.descend(
+ instance=instance[index],
+ schema=items,
+ path=index,
+ )
+
+
+def additionalItems(validator, aI, instance, schema):
+ if (
+ not validator.is_type(instance, "array")
+ or validator.is_type(schema.get("items", {}), "object")
+ ):
+ return
+
+ len_items = len(schema.get("items", []))
+ if validator.is_type(aI, "object"):
+ for index, item in enumerate(instance[len_items:], start=len_items):
+ yield from validator.descend(item, aI, path=index)
+ elif not aI and len(instance) > len(schema.get("items", [])):
+ error = "Additional items are not allowed (%s %s unexpected)"
+ yield ValidationError(
+ error % extras_msg(instance[len(schema.get("items", [])):]),
+ )
+
+
+def const(validator, const, instance, schema):
+ if not equal(instance, const):
+ yield ValidationError(f"{const!r} was expected")
+
+
+def contains(validator, contains, instance, schema):
+ if not validator.is_type(instance, "array"):
+ return
+
+ matches = 0
+ min_contains = schema.get("minContains", 1)
+ max_contains = schema.get("maxContains", len(instance))
+
+ for each in instance:
+ if validator.evolve(schema=contains).is_valid(each):
+ matches += 1
+ if matches > max_contains:
+ yield ValidationError(
+ "Too many items match the given schema "
+ f"(expected at most {max_contains})",
+ validator="maxContains",
+ validator_value=max_contains,
+ )
+ return
+
+ if matches < min_contains:
+ if not matches:
+ yield ValidationError(
+ f"{instance!r} does not contain items "
+ "matching the given schema",
+ )
+ else:
+ yield ValidationError(
+ "Too few items match the given schema (expected at least "
+ f"{min_contains} but only {matches} matched)",
+ validator="minContains",
+ validator_value=min_contains,
+ )
+
+
+def exclusiveMinimum(validator, minimum, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if instance <= minimum:
+ yield ValidationError(
+ f"{instance!r} is less than or equal to "
+ f"the minimum of {minimum!r}",
+ )
+
+
+def exclusiveMaximum(validator, maximum, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if instance >= maximum:
+ yield ValidationError(
+ f"{instance!r} is greater than or equal "
+ f"to the maximum of {maximum!r}",
+ )
+
+
+def minimum(validator, minimum, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if instance < minimum:
+ message = f"{instance!r} is less than the minimum of {minimum!r}"
+ yield ValidationError(message)
+
+
+def maximum(validator, maximum, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if instance > maximum:
+ message = f"{instance!r} is greater than the maximum of {maximum!r}"
+ yield ValidationError(message)
+
+
+def multipleOf(validator, dB, instance, schema):
+ if not validator.is_type(instance, "number"):
+ return
+
+ if isinstance(dB, float):
+ quotient = instance / dB
+ try:
+ failed = int(quotient) != quotient
+ except OverflowError:
+ # When `instance` is large and `dB` is less than one,
+ # quotient can overflow to infinity; and then casting to int
+ # raises an error.
+ #
+ # In this case we fall back to Fraction logic, which is
+ # exact and cannot overflow. The performance is also
+ # acceptable: we try the fast all-float option first, and
+ # we know that fraction(dB) can have at most a few hundred
+ # digits in each part. The worst-case slowdown is therefore
+ # for already-slow enormous integers or Decimals.
+ failed = (Fraction(instance) / Fraction(dB)).denominator != 1
+ else:
+ failed = instance % dB
+
+ if failed:
+ yield ValidationError(f"{instance!r} is not a multiple of {dB}")
+
+
+def minItems(validator, mI, instance, schema):
+ if validator.is_type(instance, "array") and len(instance) < mI:
+ yield ValidationError(f"{instance!r} is too short")
+
+
+def maxItems(validator, mI, instance, schema):
+ if validator.is_type(instance, "array") and len(instance) > mI:
+ yield ValidationError(f"{instance!r} is too long")
+
+
+def uniqueItems(validator, uI, instance, schema):
+ if (
+ uI
+ and validator.is_type(instance, "array")
+ and not uniq(instance)
+ ):
+ yield ValidationError(f"{instance!r} has non-unique elements")
+
+
+def pattern(validator, patrn, instance, schema):
+ if (
+ validator.is_type(instance, "string")
+ and not re.search(patrn, instance)
+ ):
+ yield ValidationError(f"{instance!r} does not match {patrn!r}")
+
+
+def format(validator, format, instance, schema):
+ if validator.format_checker is not None:
+ try:
+ validator.format_checker.check(instance, format)
+ except FormatError as error:
+ yield ValidationError(error.message, cause=error.cause)
+
+
+def minLength(validator, mL, instance, schema):
+ if validator.is_type(instance, "string") and len(instance) < mL:
+ yield ValidationError(f"{instance!r} is too short")
+
+
+def maxLength(validator, mL, instance, schema):
+ if validator.is_type(instance, "string") and len(instance) > mL:
+ yield ValidationError(f"{instance!r} is too long")
+
+
+def dependentRequired(validator, dependentRequired, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for property, dependency in dependentRequired.items():
+ if property not in instance:
+ continue
+
+ for each in dependency:
+ if each not in instance:
+ message = f"{each!r} is a dependency of {property!r}"
+ yield ValidationError(message)
+
+
+def dependentSchemas(validator, dependentSchemas, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for property, dependency in dependentSchemas.items():
+ if property not in instance:
+ continue
+ yield from validator.descend(
+ instance, dependency, schema_path=property,
+ )
+
+
+def enum(validator, enums, instance, schema):
+ if instance == 0 or instance == 1:
+ unbooled = unbool(instance)
+ if all(unbooled != unbool(each) for each in enums):
+ yield ValidationError(f"{instance!r} is not one of {enums!r}")
+ elif instance not in enums:
+ yield ValidationError(f"{instance!r} is not one of {enums!r}")
+
+
+def ref(validator, ref, instance, schema):
+ resolve = getattr(validator.resolver, "resolve", None)
+ if resolve is None:
+ with validator.resolver.resolving(ref) as resolved:
+ yield from validator.descend(instance, resolved)
+ else:
+ scope, resolved = validator.resolver.resolve(ref)
+ validator.resolver.push_scope(scope)
+
+ try:
+ yield from validator.descend(instance, resolved)
+ finally:
+ validator.resolver.pop_scope()
+
+
+def dynamicRef(validator, dynamicRef, instance, schema):
+ _, fragment = urldefrag(dynamicRef)
+
+ for url in validator.resolver._scopes_stack:
+ lookup_url = urljoin(url, dynamicRef)
+ with validator.resolver.resolving(lookup_url) as subschema:
+ if ("$dynamicAnchor" in subschema
+ and fragment == subschema["$dynamicAnchor"]):
+ yield from validator.descend(instance, subschema)
+ break
+ else:
+ with validator.resolver.resolving(dynamicRef) as subschema:
+ yield from validator.descend(instance, subschema)
+
+
+def type(validator, types, instance, schema):
+ types = ensure_list(types)
+
+ if not any(validator.is_type(instance, type) for type in types):
+ reprs = ", ".join(repr(type) for type in types)
+ yield ValidationError(f"{instance!r} is not of type {reprs}")
+
+
+def properties(validator, properties, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+
+ for property, subschema in properties.items():
+ if property in instance:
+ yield from validator.descend(
+ instance[property],
+ subschema,
+ path=property,
+ schema_path=property,
+ )
+
+
+def required(validator, required, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+ for property in required:
+ if property not in instance:
+ yield ValidationError(f"{property!r} is a required property")
+
+
+def minProperties(validator, mP, instance, schema):
+ if validator.is_type(instance, "object") and len(instance) < mP:
+ yield ValidationError(f"{instance!r} does not have enough properties")
+
+
+def maxProperties(validator, mP, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+ if validator.is_type(instance, "object") and len(instance) > mP:
+ yield ValidationError(f"{instance!r} has too many properties")
+
+
+def allOf(validator, allOf, instance, schema):
+ for index, subschema in enumerate(allOf):
+ yield from validator.descend(instance, subschema, schema_path=index)
+
+
+def anyOf(validator, anyOf, instance, schema):
+ all_errors = []
+ for index, subschema in enumerate(anyOf):
+ errs = list(validator.descend(instance, subschema, schema_path=index))
+ if not errs:
+ break
+ all_errors.extend(errs)
+ else:
+ yield ValidationError(
+ f"{instance!r} is not valid under any of the given schemas",
+ context=all_errors,
+ )
+
+
+def oneOf(validator, oneOf, instance, schema):
+ subschemas = enumerate(oneOf)
+ all_errors = []
+ for index, subschema in subschemas:
+ errs = list(validator.descend(instance, subschema, schema_path=index))
+ if not errs:
+ first_valid = subschema
+ break
+ all_errors.extend(errs)
+ else:
+ yield ValidationError(
+ f"{instance!r} is not valid under any of the given schemas",
+ context=all_errors,
+ )
+
+ more_valid = [
+ each for _, each in subschemas
+ if validator.evolve(schema=each).is_valid(instance)
+ ]
+ if more_valid:
+ more_valid.append(first_valid)
+ reprs = ", ".join(repr(schema) for schema in more_valid)
+ yield ValidationError(f"{instance!r} is valid under each of {reprs}")
+
+
+def not_(validator, not_schema, instance, schema):
+ if validator.evolve(schema=not_schema).is_valid(instance):
+ message = f"{instance!r} should not be valid under {not_schema!r}"
+ yield ValidationError(message)
+
+
+def if_(validator, if_schema, instance, schema):
+ if validator.evolve(schema=if_schema).is_valid(instance):
+ if "then" in schema:
+ then = schema["then"]
+ yield from validator.descend(instance, then, schema_path="then")
+ elif "else" in schema:
+ else_ = schema["else"]
+ yield from validator.descend(instance, else_, schema_path="else")
+
+
+def unevaluatedItems(validator, unevaluatedItems, instance, schema):
+ if not validator.is_type(instance, "array"):
+ return
+ evaluated_item_indexes = find_evaluated_item_indexes_by_schema(
+ validator, instance, schema,
+ )
+ unevaluated_items = [
+ item for index, item in enumerate(instance)
+ if index not in evaluated_item_indexes
+ ]
+ if unevaluated_items:
+ error = "Unevaluated items are not allowed (%s %s unexpected)"
+ yield ValidationError(error % extras_msg(unevaluated_items))
+
+
+def unevaluatedProperties(validator, unevaluatedProperties, instance, schema):
+ if not validator.is_type(instance, "object"):
+ return
+ evaluated_keys = find_evaluated_property_keys_by_schema(
+ validator, instance, schema,
+ )
+ unevaluated_keys = []
+ for property in instance:
+ if property not in evaluated_keys:
+ for _ in validator.descend(
+ instance[property],
+ unevaluatedProperties,
+ path=property,
+ schema_path=property,
+ ):
+ # FIXME: Include context for each unevaluated property
+ # indicating why it's invalid under the subschema.
+ unevaluated_keys.append(property)
+
+ if unevaluated_keys:
+ if unevaluatedProperties is False:
+ error = "Unevaluated properties are not allowed (%s %s unexpected)"
+ yield ValidationError(error % extras_msg(unevaluated_keys))
+ else:
+ error = (
+ "Unevaluated properties are not valid under "
+ "the given schema (%s %s unevaluated and invalid)"
+ )
+ yield ValidationError(error % extras_msg(unevaluated_keys))
+
+
+def prefixItems(validator, prefixItems, instance, schema):
+ if not validator.is_type(instance, "array"):
+ return
+
+ for (index, item), subschema in zip(enumerate(instance), prefixItems):
+ yield from validator.descend(
+ instance=item,
+ schema=subschema,
+ schema_path=index,
+ path=index,
+ )
diff --git a/third_party/python/jsonschema/jsonschema/benchmarks/__init__.py b/third_party/python/jsonschema/jsonschema/benchmarks/__init__.py
new file mode 100644
index 0000000000..e3dcc68993
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/benchmarks/__init__.py
@@ -0,0 +1,5 @@
+"""
+Benchmarks for validation.
+
+This package is *not* public API.
+"""
diff --git a/third_party/python/jsonschema/jsonschema/benchmarks/issue232.py b/third_party/python/jsonschema/jsonschema/benchmarks/issue232.py
new file mode 100644
index 0000000000..bf357e9116
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/benchmarks/issue232.py
@@ -0,0 +1,25 @@
+"""
+A performance benchmark using the example from issue #232.
+
+See https://github.com/python-jsonschema/jsonschema/pull/232.
+"""
+from pathlib import Path
+
+from pyperf import Runner
+from pyrsistent import m
+
+from jsonschema.tests._suite import Version
+import jsonschema
+
+issue232 = Version(
+ path=Path(__file__).parent / "issue232",
+ remotes=m(),
+ name="issue232",
+)
+
+
+if __name__ == "__main__":
+ issue232.benchmark(
+ runner=Runner(),
+ Validator=jsonschema.Draft4Validator,
+ )
diff --git a/third_party/python/jsonschema/jsonschema/benchmarks/issue232/issue.json b/third_party/python/jsonschema/jsonschema/benchmarks/issue232/issue.json
new file mode 100644
index 0000000000..804c340845
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/benchmarks/issue232/issue.json
@@ -0,0 +1,2653 @@
+[
+ {
+ "description": "Petstore",
+ "schema": {
+ "title": "A JSON Schema for Swagger 2.0 API.",
+ "id": "http://swagger.io/v2/schema.json#",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "type": "object",
+ "required": [
+ "swagger",
+ "info",
+ "paths"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "swagger": {
+ "type": "string",
+ "enum": [
+ "2.0"
+ ],
+ "description": "The Swagger version of this document."
+ },
+ "info": {
+ "$ref": "#/definitions/info"
+ },
+ "host": {
+ "type": "string",
+ "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$",
+ "description": "The host (name or ip) of the API. Example: 'swagger.io'"
+ },
+ "basePath": {
+ "type": "string",
+ "pattern": "^/",
+ "description": "The base path to the API. Example: '/api'."
+ },
+ "schemes": {
+ "$ref": "#/definitions/schemesList"
+ },
+ "consumes": {
+ "description": "A list of MIME types accepted by the API.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/mediaTypeList"
+ }
+ ]
+ },
+ "produces": {
+ "description": "A list of MIME types the API can produce.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/mediaTypeList"
+ }
+ ]
+ },
+ "paths": {
+ "$ref": "#/definitions/paths"
+ },
+ "definitions": {
+ "$ref": "#/definitions/definitions"
+ },
+ "parameters": {
+ "$ref": "#/definitions/parameterDefinitions"
+ },
+ "responses": {
+ "$ref": "#/definitions/responseDefinitions"
+ },
+ "security": {
+ "$ref": "#/definitions/security"
+ },
+ "securityDefinitions": {
+ "$ref": "#/definitions/securityDefinitions"
+ },
+ "tags": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/tag"
+ },
+ "uniqueItems": true
+ },
+ "externalDocs": {
+ "$ref": "#/definitions/externalDocs"
+ }
+ },
+ "definitions": {
+ "info": {
+ "type": "object",
+ "description": "General information about the API.",
+ "required": [
+ "version",
+ "title"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "title": {
+ "type": "string",
+ "description": "A unique and precise title of the API."
+ },
+ "version": {
+ "type": "string",
+ "description": "A semantic version number of the API."
+ },
+ "description": {
+ "type": "string",
+ "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed."
+ },
+ "termsOfService": {
+ "type": "string",
+ "description": "The terms of service for the API."
+ },
+ "contact": {
+ "$ref": "#/definitions/contact"
+ },
+ "license": {
+ "$ref": "#/definitions/license"
+ }
+ }
+ },
+ "contact": {
+ "type": "object",
+ "description": "Contact information for the owners of the API.",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The identifying name of the contact person/organization."
+ },
+ "url": {
+ "type": "string",
+ "description": "The URL pointing to the contact information.",
+ "format": "uri"
+ },
+ "email": {
+ "type": "string",
+ "description": "The email address of the contact person/organization.",
+ "format": "email"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "license": {
+ "type": "object",
+ "required": [
+ "name"
+ ],
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string",
+ "description": "The name of the license type. It's encouraged to use an OSI compatible license."
+ },
+ "url": {
+ "type": "string",
+ "description": "The URL pointing to the license.",
+ "format": "uri"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "paths": {
+ "type": "object",
+ "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.",
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ },
+ "^/": {
+ "$ref": "#/definitions/pathItem"
+ }
+ },
+ "additionalProperties": false
+ },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/schema"
+ },
+ "description": "One or more JSON objects describing the schemas being consumed and produced by the API."
+ },
+ "parameterDefinitions": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/parameter"
+ },
+ "description": "One or more JSON representations for parameters"
+ },
+ "responseDefinitions": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/response"
+ },
+ "description": "One or more JSON representations for parameters"
+ },
+ "externalDocs": {
+ "type": "object",
+ "additionalProperties": false,
+ "description": "information about external documentation",
+ "required": [
+ "url"
+ ],
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "url": {
+ "type": "string",
+ "format": "uri"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "examples": {
+ "type": "object",
+ "additionalProperties": true
+ },
+ "mimeType": {
+ "type": "string",
+ "description": "The MIME type of the HTTP message."
+ },
+ "operation": {
+ "type": "object",
+ "required": [
+ "responses"
+ ],
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "tags": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "summary": {
+ "type": "string",
+ "description": "A brief summary of the operation."
+ },
+ "description": {
+ "type": "string",
+ "description": "A longer description of the operation, GitHub Flavored Markdown is allowed."
+ },
+ "externalDocs": {
+ "$ref": "#/definitions/externalDocs"
+ },
+ "operationId": {
+ "type": "string",
+ "description": "A unique identifier of the operation."
+ },
+ "produces": {
+ "description": "A list of MIME types the API can produce.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/mediaTypeList"
+ }
+ ]
+ },
+ "consumes": {
+ "description": "A list of MIME types the API can consume.",
+ "allOf": [
+ {
+ "$ref": "#/definitions/mediaTypeList"
+ }
+ ]
+ },
+ "parameters": {
+ "$ref": "#/definitions/parametersList"
+ },
+ "responses": {
+ "$ref": "#/definitions/responses"
+ },
+ "schemes": {
+ "$ref": "#/definitions/schemesList"
+ },
+ "deprecated": {
+ "type": "boolean",
+ "default": false
+ },
+ "security": {
+ "$ref": "#/definitions/security"
+ }
+ }
+ },
+ "pathItem": {
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "$ref": {
+ "type": "string"
+ },
+ "get": {
+ "$ref": "#/definitions/operation"
+ },
+ "put": {
+ "$ref": "#/definitions/operation"
+ },
+ "post": {
+ "$ref": "#/definitions/operation"
+ },
+ "delete": {
+ "$ref": "#/definitions/operation"
+ },
+ "options": {
+ "$ref": "#/definitions/operation"
+ },
+ "head": {
+ "$ref": "#/definitions/operation"
+ },
+ "patch": {
+ "$ref": "#/definitions/operation"
+ },
+ "parameters": {
+ "$ref": "#/definitions/parametersList"
+ }
+ }
+ },
+ "responses": {
+ "type": "object",
+ "description": "Response objects names can either be any valid HTTP status code or 'default'.",
+ "minProperties": 1,
+ "additionalProperties": false,
+ "patternProperties": {
+ "^([0-9]{3})$|^(default)$": {
+ "$ref": "#/definitions/responseValue"
+ },
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "not": {
+ "type": "object",
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ }
+ },
+ "responseValue": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/response"
+ },
+ {
+ "$ref": "#/definitions/jsonReference"
+ }
+ ]
+ },
+ "response": {
+ "type": "object",
+ "required": [
+ "description"
+ ],
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "schema": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/schema"
+ },
+ {
+ "$ref": "#/definitions/fileSchema"
+ }
+ ]
+ },
+ "headers": {
+ "$ref": "#/definitions/headers"
+ },
+ "examples": {
+ "$ref": "#/definitions/examples"
+ }
+ },
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "headers": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/header"
+ }
+ },
+ "header": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "integer",
+ "boolean",
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormat"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "vendorExtension": {
+ "description": "Any property starting with x- is valid.",
+ "additionalProperties": true,
+ "additionalItems": true
+ },
+ "bodyParameter": {
+ "type": "object",
+ "required": [
+ "name",
+ "in",
+ "schema"
+ ],
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "description": {
+ "type": "string",
+ "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the parameter."
+ },
+ "in": {
+ "type": "string",
+ "description": "Determines the location of the parameter.",
+ "enum": [
+ "body"
+ ]
+ },
+ "required": {
+ "type": "boolean",
+ "description": "Determines whether or not this parameter is required or optional.",
+ "default": false
+ },
+ "schema": {
+ "$ref": "#/definitions/schema"
+ }
+ },
+ "additionalProperties": false
+ },
+ "headerParameterSubSchema": {
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "required": {
+ "type": "boolean",
+ "description": "Determines whether or not this parameter is required or optional.",
+ "default": false
+ },
+ "in": {
+ "type": "string",
+ "description": "Determines the location of the parameter.",
+ "enum": [
+ "header"
+ ]
+ },
+ "description": {
+ "type": "string",
+ "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the parameter."
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "boolean",
+ "integer",
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormat"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ }
+ }
+ },
+ "queryParameterSubSchema": {
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "required": {
+ "type": "boolean",
+ "description": "Determines whether or not this parameter is required or optional.",
+ "default": false
+ },
+ "in": {
+ "type": "string",
+ "description": "Determines the location of the parameter.",
+ "enum": [
+ "query"
+ ]
+ },
+ "description": {
+ "type": "string",
+ "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the parameter."
+ },
+ "allowEmptyValue": {
+ "type": "boolean",
+ "default": false,
+ "description": "allows sending a parameter by name only or with an empty value."
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "boolean",
+ "integer",
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormatWithMulti"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ }
+ }
+ },
+ "formDataParameterSubSchema": {
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "required": {
+ "type": "boolean",
+ "description": "Determines whether or not this parameter is required or optional.",
+ "default": false
+ },
+ "in": {
+ "type": "string",
+ "description": "Determines the location of the parameter.",
+ "enum": [
+ "formData"
+ ]
+ },
+ "description": {
+ "type": "string",
+ "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the parameter."
+ },
+ "allowEmptyValue": {
+ "type": "boolean",
+ "default": false,
+ "description": "allows sending a parameter by name only or with an empty value."
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "boolean",
+ "integer",
+ "array",
+ "file"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormatWithMulti"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ }
+ }
+ },
+ "pathParameterSubSchema": {
+ "additionalProperties": false,
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "required": [
+ "required"
+ ],
+ "properties": {
+ "required": {
+ "type": "boolean",
+ "enum": [
+ true
+ ],
+ "description": "Determines whether or not this parameter is required or optional."
+ },
+ "in": {
+ "type": "string",
+ "description": "Determines the location of the parameter.",
+ "enum": [
+ "path"
+ ]
+ },
+ "description": {
+ "type": "string",
+ "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the parameter."
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "boolean",
+ "integer",
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormat"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ }
+ }
+ },
+ "nonBodyParameter": {
+ "type": "object",
+ "required": [
+ "name",
+ "in",
+ "type"
+ ],
+ "oneOf": [
+ {
+ "$ref": "#/definitions/headerParameterSubSchema"
+ },
+ {
+ "$ref": "#/definitions/formDataParameterSubSchema"
+ },
+ {
+ "$ref": "#/definitions/queryParameterSubSchema"
+ },
+ {
+ "$ref": "#/definitions/pathParameterSubSchema"
+ }
+ ]
+ },
+ "parameter": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/bodyParameter"
+ },
+ {
+ "$ref": "#/definitions/nonBodyParameter"
+ }
+ ]
+ },
+ "schema": {
+ "type": "object",
+ "description": "A deterministic version of a JSON Schema object.",
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "properties": {
+ "$ref": {
+ "type": "string"
+ },
+ "format": {
+ "type": "string"
+ },
+ "title": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
+ },
+ "description": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
+ },
+ "default": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
+ },
+ "multipleOf": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf"
+ },
+ "maximum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+ },
+ "minLength": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+ },
+ "pattern": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern"
+ },
+ "maxItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+ },
+ "minItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+ },
+ "uniqueItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems"
+ },
+ "maxProperties": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+ },
+ "minProperties": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+ },
+ "required": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray"
+ },
+ "enum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/enum"
+ },
+ "additionalProperties": {
+ "anyOf": [
+ {
+ "$ref": "#/definitions/schema"
+ },
+ {
+ "type": "boolean"
+ }
+ ],
+ "default": {}
+ },
+ "type": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/type"
+ },
+ "items": {
+ "anyOf": [
+ {
+ "$ref": "#/definitions/schema"
+ },
+ {
+ "type": "array",
+ "minItems": 1,
+ "items": {
+ "$ref": "#/definitions/schema"
+ }
+ }
+ ],
+ "default": {}
+ },
+ "allOf": {
+ "type": "array",
+ "minItems": 1,
+ "items": {
+ "$ref": "#/definitions/schema"
+ }
+ },
+ "properties": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/schema"
+ },
+ "default": {}
+ },
+ "discriminator": {
+ "type": "string"
+ },
+ "readOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "xml": {
+ "$ref": "#/definitions/xml"
+ },
+ "externalDocs": {
+ "$ref": "#/definitions/externalDocs"
+ },
+ "example": {}
+ },
+ "additionalProperties": false
+ },
+ "fileSchema": {
+ "type": "object",
+ "description": "A deterministic version of a JSON Schema object.",
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ },
+ "required": [
+ "type"
+ ],
+ "properties": {
+ "format": {
+ "type": "string"
+ },
+ "title": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
+ },
+ "description": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
+ },
+ "default": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
+ },
+ "required": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray"
+ },
+ "type": {
+ "type": "string",
+ "enum": [
+ "file"
+ ]
+ },
+ "readOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "externalDocs": {
+ "$ref": "#/definitions/externalDocs"
+ },
+ "example": {}
+ },
+ "additionalProperties": false
+ },
+ "primitivesItems": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "string",
+ "number",
+ "integer",
+ "boolean",
+ "array"
+ ]
+ },
+ "format": {
+ "type": "string"
+ },
+ "items": {
+ "$ref": "#/definitions/primitivesItems"
+ },
+ "collectionFormat": {
+ "$ref": "#/definitions/collectionFormat"
+ },
+ "default": {
+ "$ref": "#/definitions/default"
+ },
+ "maximum": {
+ "$ref": "#/definitions/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "#/definitions/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "#/definitions/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "#/definitions/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "#/definitions/maxLength"
+ },
+ "minLength": {
+ "$ref": "#/definitions/minLength"
+ },
+ "pattern": {
+ "$ref": "#/definitions/pattern"
+ },
+ "maxItems": {
+ "$ref": "#/definitions/maxItems"
+ },
+ "minItems": {
+ "$ref": "#/definitions/minItems"
+ },
+ "uniqueItems": {
+ "$ref": "#/definitions/uniqueItems"
+ },
+ "enum": {
+ "$ref": "#/definitions/enum"
+ },
+ "multipleOf": {
+ "$ref": "#/definitions/multipleOf"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "security": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/securityRequirement"
+ },
+ "uniqueItems": true
+ },
+ "securityRequirement": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "uniqueItems": true
+ }
+ },
+ "xml": {
+ "type": "object",
+ "additionalProperties": false,
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "namespace": {
+ "type": "string"
+ },
+ "prefix": {
+ "type": "string"
+ },
+ "attribute": {
+ "type": "boolean",
+ "default": false
+ },
+ "wrapped": {
+ "type": "boolean",
+ "default": false
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "tag": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "name"
+ ],
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "externalDocs": {
+ "$ref": "#/definitions/externalDocs"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "securityDefinitions": {
+ "type": "object",
+ "additionalProperties": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/basicAuthenticationSecurity"
+ },
+ {
+ "$ref": "#/definitions/apiKeySecurity"
+ },
+ {
+ "$ref": "#/definitions/oauth2ImplicitSecurity"
+ },
+ {
+ "$ref": "#/definitions/oauth2PasswordSecurity"
+ },
+ {
+ "$ref": "#/definitions/oauth2ApplicationSecurity"
+ },
+ {
+ "$ref": "#/definitions/oauth2AccessCodeSecurity"
+ }
+ ]
+ }
+ },
+ "basicAuthenticationSecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "basic"
+ ]
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "apiKeySecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type",
+ "name",
+ "in"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "apiKey"
+ ]
+ },
+ "name": {
+ "type": "string"
+ },
+ "in": {
+ "type": "string",
+ "enum": [
+ "header",
+ "query"
+ ]
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "oauth2ImplicitSecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type",
+ "flow",
+ "authorizationUrl"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "oauth2"
+ ]
+ },
+ "flow": {
+ "type": "string",
+ "enum": [
+ "implicit"
+ ]
+ },
+ "scopes": {
+ "$ref": "#/definitions/oauth2Scopes"
+ },
+ "authorizationUrl": {
+ "type": "string",
+ "format": "uri"
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "oauth2PasswordSecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type",
+ "flow",
+ "tokenUrl"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "oauth2"
+ ]
+ },
+ "flow": {
+ "type": "string",
+ "enum": [
+ "password"
+ ]
+ },
+ "scopes": {
+ "$ref": "#/definitions/oauth2Scopes"
+ },
+ "tokenUrl": {
+ "type": "string",
+ "format": "uri"
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "oauth2ApplicationSecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type",
+ "flow",
+ "tokenUrl"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "oauth2"
+ ]
+ },
+ "flow": {
+ "type": "string",
+ "enum": [
+ "application"
+ ]
+ },
+ "scopes": {
+ "$ref": "#/definitions/oauth2Scopes"
+ },
+ "tokenUrl": {
+ "type": "string",
+ "format": "uri"
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "oauth2AccessCodeSecurity": {
+ "type": "object",
+ "additionalProperties": false,
+ "required": [
+ "type",
+ "flow",
+ "authorizationUrl",
+ "tokenUrl"
+ ],
+ "properties": {
+ "type": {
+ "type": "string",
+ "enum": [
+ "oauth2"
+ ]
+ },
+ "flow": {
+ "type": "string",
+ "enum": [
+ "accessCode"
+ ]
+ },
+ "scopes": {
+ "$ref": "#/definitions/oauth2Scopes"
+ },
+ "authorizationUrl": {
+ "type": "string",
+ "format": "uri"
+ },
+ "tokenUrl": {
+ "type": "string",
+ "format": "uri"
+ },
+ "description": {
+ "type": "string"
+ }
+ },
+ "patternProperties": {
+ "^x-": {
+ "$ref": "#/definitions/vendorExtension"
+ }
+ }
+ },
+ "oauth2Scopes": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "mediaTypeList": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/mimeType"
+ },
+ "uniqueItems": true
+ },
+ "parametersList": {
+ "type": "array",
+ "description": "The parameters needed to send a valid API call.",
+ "additionalItems": false,
+ "items": {
+ "oneOf": [
+ {
+ "$ref": "#/definitions/parameter"
+ },
+ {
+ "$ref": "#/definitions/jsonReference"
+ }
+ ]
+ },
+ "uniqueItems": true
+ },
+ "schemesList": {
+ "type": "array",
+ "description": "The transfer protocol of the API.",
+ "items": {
+ "type": "string",
+ "enum": [
+ "http",
+ "https",
+ "ws",
+ "wss"
+ ]
+ },
+ "uniqueItems": true
+ },
+ "collectionFormat": {
+ "type": "string",
+ "enum": [
+ "csv",
+ "ssv",
+ "tsv",
+ "pipes"
+ ],
+ "default": "csv"
+ },
+ "collectionFormatWithMulti": {
+ "type": "string",
+ "enum": [
+ "csv",
+ "ssv",
+ "tsv",
+ "pipes",
+ "multi"
+ ],
+ "default": "csv"
+ },
+ "title": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/title"
+ },
+ "description": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/description"
+ },
+ "default": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/default"
+ },
+ "multipleOf": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf"
+ },
+ "maximum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum"
+ },
+ "exclusiveMaximum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum"
+ },
+ "minimum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum"
+ },
+ "exclusiveMinimum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum"
+ },
+ "maxLength": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+ },
+ "minLength": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+ },
+ "pattern": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern"
+ },
+ "maxItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger"
+ },
+ "minItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0"
+ },
+ "uniqueItems": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems"
+ },
+ "enum": {
+ "$ref": "http://json-schema.org/draft-04/schema#/properties/enum"
+ },
+ "jsonReference": {
+ "type": "object",
+ "required": [
+ "$ref"
+ ],
+ "additionalProperties": false,
+ "properties": {
+ "$ref": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "tests": [
+ {
+ "description": "Example petsore",
+ "data": {
+ "swagger": "2.0",
+ "info": {
+ "description": "This is a sample server Petstore server. You can find out more about Swagger at [http://swagger.io](http://swagger.io) or on [irc.freenode.net, #swagger](http://swagger.io/irc/). For this sample, you can use the api key `special-key` to test the authorization filters.",
+ "version": "1.0.0",
+ "title": "Swagger Petstore",
+ "termsOfService": "http://swagger.io/terms/",
+ "contact": {
+ "email": "apiteam@swagger.io"
+ },
+ "license": {
+ "name": "Apache 2.0",
+ "url": "http://www.apache.org/licenses/LICENSE-2.0.html"
+ }
+ },
+ "host": "petstore.swagger.io",
+ "basePath": "/v2",
+ "tags": [
+ {
+ "name": "pet",
+ "description": "Everything about your Pets",
+ "externalDocs": {
+ "description": "Find out more",
+ "url": "http://swagger.io"
+ }
+ },
+ {
+ "name": "store",
+ "description": "Access to Petstore orders"
+ },
+ {
+ "name": "user",
+ "description": "Operations about user",
+ "externalDocs": {
+ "description": "Find out more about our store",
+ "url": "http://swagger.io"
+ }
+ }
+ ],
+ "schemes": [
+ "http"
+ ],
+ "paths": {
+ "/pet": {
+ "post": {
+ "tags": [
+ "pet"
+ ],
+ "summary": "Add a new pet to the store",
+ "description": "",
+ "operationId": "addPet",
+ "consumes": [
+ "application/json",
+ "application/xml"
+ ],
+ "produces": [
+ "application/xml",
+ "application/json"
+ ],
+ "parameters": [
+ {
+ "in": "body",
+ "name": "body",
+ "description": "Pet object that needs to be added to the store",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/Pet"
+ }
+ }
+ ],
+ "responses": {
+ "405": {
+ "description": "Invalid input"
+ }
+ },
+ "security": [
+ {
+ "petstore_auth": [
+ "write:pets",
+ "read:pets"
+ ]
+ }
+ ]
+ },
+ "put": {
+ "tags": [
+ "pet"
+ ],
+ "summary": "Update an existing pet",
+ "description": "",
+ "operationId": "updatePet",
+ "consumes": [
+ "application/json",
+ "application/xml"
+ ],
+ "produces": [
+ "application/xml",
+ "application/json"
+ ],
+ "parameters": [
+ {
+ "in": "body",
+ "name": "body",
+ "description": "Pet object that needs to be added to the store",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/Pet"
+ }
+ }
+ ],
+ "responses": {
+ "400": {
+ "description": "Invalid ID supplied"
+ },
+ "404": {
+ "description": "Pet not found"
+ },
+ "405": {
+ "description": "Validation exception"
+ }
+ },
+ "security": [
+ {
+ "petstore_auth": [
+ "write:pets",
+ "read:pets"
+ ]
+ }
+ ]
+ }
+ },
+ "/pet/findByStatus": {
+ "get": {
+ "tags": [
+ "pet"
+ ],
+ "summary": "Finds Pets by status",
+ "description": "Multiple status values can be provided with comma separated strings",
+ "operationId": "findPetsByStatus",
+ "produces": [
+ "application/xml",
+ "application/json"
+ ],
+ "parameters": [
+ {
+ "name": "status",
+ "in": "query",
+ "description": "Status values that need to be considered for filter",
+ "required": true,
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "available",
+ "pending",
+ "sold"
+ ],
+ "default": "available"
+ },
+ "collectionFormat": "multi"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "successful operation",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/Pet"
+ }
+ }
+ },
+ "400": {
+ "description": "Invalid status value"
+ }
+ },
+ "security": [
+ {
+ "petstore_auth": [
+ "write:pets",
+ "read:pets"
+ ]
+ }
+ ]
+ }
+ },
+ "/pet/findByTags": {
+ "get": {
+ "tags": [
+ "pet"
+ ],
+ "summary": "Finds Pets by tags",
+ "description": "Muliple tags can be provided with comma separated strings. Use tag1, tag2, tag3 for testing.",
+ "operationId": "findPetsByTags",
+ "produces": [
+ "application/xml",
+ "application/json"
+ ],
+ "parameters": [
+ {
+ "name": "tags",
+ "in": "query",
+ "description": "Tags to filter by",
+ "required": true,
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "collectionFormat": "multi"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "successful operation",
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/Pet"
+ }
+ }
+ },
+ "400": {
+ "description": "Invalid tag value"
+ }
+ },
+ "security": [
+ {
+ "petstore_auth": [
+ "write:pets",
+ "read:pets"
+ ]
+ }
+ ],
+ "deprecated": true
+ }
+ },
+ "/pet/{petId}": {
+ "get": {
+ "tags": [
+ "pet"
+ ],
+ "summary": "Find pet by ID",
+ "description": "Returns a single pet",
+ "operationId": "getPetById",
+ "produces": [
+ "application/xml",
+ "application/json"
+ ],
+ "parameters": [
+ {
+ "name": "petId",
+ "in": "path",
+ "description": "ID of pet to return",
+ "required": true,
+ "type": "integer",
+ "format": "int64"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "successful operation",
+ "schema": {
+ "$ref": "#/definitions/Pet"
+ }
+ },
+ "400": {
+ "description": "Invalid ID supplied"
+ },
+ "404": {
+ "description": "Pet not found"
+ }
+ },
+ "security": [
+ {
+ "api_key": []
+ }
+ ]
+ },
+ "post": {
+ "tags": [
+ "pet"
+ ],
+ "summary": "Updates a pet in the store with form data",
+ "description": "",
+ "operationId": "updatePetWithForm",
+ "consumes": [
+ "application/x-www-form-urlencoded"
+ ],
+ "produces": [
+ "application/xml",
+ "application/json"
+ ],
+ "parameters": [
+ {
+ "name": "petId",
+ "in": "path",
+ "description": "ID of pet that needs to be updated",
+ "required": true,
+ "type": "integer",
+ "format": "int64"
+ },
+ {
+ "name": "name",
+ "in": "formData",
+ "description": "Updated name of the pet",
+ "required": false,
+ "type": "string"
+ },
+ {
+ "name": "status",
+ "in": "formData",
+ "description": "Updated status of the pet",
+ "required": false,
+ "type": "string"
+ }
+ ],
+ "responses": {
+ "405": {
+ "description": "Invalid input"
+ }
+ },
+ "security": [
+ {
+ "petstore_auth": [
+ "write:pets",
+ "read:pets"
+ ]
+ }
+ ]
+ },
+ "delete": {
+ "tags": [
+ "pet"
+ ],
+ "summary": "Deletes a pet",
+ "description": "",
+ "operationId": "deletePet",
+ "produces": [
+ "application/xml",
+ "application/json"
+ ],
+ "parameters": [
+ {
+ "name": "api_key",
+ "in": "header",
+ "required": false,
+ "type": "string"
+ },
+ {
+ "name": "petId",
+ "in": "path",
+ "description": "Pet id to delete",
+ "required": true,
+ "type": "integer",
+ "format": "int64"
+ }
+ ],
+ "responses": {
+ "400": {
+ "description": "Invalid ID supplied"
+ },
+ "404": {
+ "description": "Pet not found"
+ }
+ },
+ "security": [
+ {
+ "petstore_auth": [
+ "write:pets",
+ "read:pets"
+ ]
+ }
+ ]
+ }
+ },
+ "/pet/{petId}/uploadImage": {
+ "post": {
+ "tags": [
+ "pet"
+ ],
+ "summary": "uploads an image",
+ "description": "",
+ "operationId": "uploadFile",
+ "consumes": [
+ "multipart/form-data"
+ ],
+ "produces": [
+ "application/json"
+ ],
+ "parameters": [
+ {
+ "name": "petId",
+ "in": "path",
+ "description": "ID of pet to update",
+ "required": true,
+ "type": "integer",
+ "format": "int64"
+ },
+ {
+ "name": "additionalMetadata",
+ "in": "formData",
+ "description": "Additional data to pass to server",
+ "required": false,
+ "type": "string"
+ },
+ {
+ "name": "file",
+ "in": "formData",
+ "description": "file to upload",
+ "required": false,
+ "type": "file"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "successful operation",
+ "schema": {
+ "$ref": "#/definitions/ApiResponse"
+ }
+ }
+ },
+ "security": [
+ {
+ "petstore_auth": [
+ "write:pets",
+ "read:pets"
+ ]
+ }
+ ]
+ }
+ },
+ "/store/inventory": {
+ "get": {
+ "tags": [
+ "store"
+ ],
+ "summary": "Returns pet inventories by status",
+ "description": "Returns a map of status codes to quantities",
+ "operationId": "getInventory",
+ "produces": [
+ "application/json"
+ ],
+ "parameters": [],
+ "responses": {
+ "200": {
+ "description": "successful operation",
+ "schema": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "integer",
+ "format": "int32"
+ }
+ }
+ }
+ },
+ "security": [
+ {
+ "api_key": []
+ }
+ ]
+ }
+ },
+ "/store/order": {
+ "post": {
+ "tags": [
+ "store"
+ ],
+ "summary": "Place an order for a pet",
+ "description": "",
+ "operationId": "placeOrder",
+ "produces": [
+ "application/xml",
+ "application/json"
+ ],
+ "parameters": [
+ {
+ "in": "body",
+ "name": "body",
+ "description": "order placed for purchasing the pet",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/Order"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "successful operation",
+ "schema": {
+ "$ref": "#/definitions/Order"
+ }
+ },
+ "400": {
+ "description": "Invalid Order"
+ }
+ }
+ }
+ },
+ "/store/order/{orderId}": {
+ "get": {
+ "tags": [
+ "store"
+ ],
+ "summary": "Find purchase order by ID",
+ "description": "For valid response try integer IDs with value >= 1 and <= 10. Other values will generated exceptions",
+ "operationId": "getOrderById",
+ "produces": [
+ "application/xml",
+ "application/json"
+ ],
+ "parameters": [
+ {
+ "name": "orderId",
+ "in": "path",
+ "description": "ID of pet that needs to be fetched",
+ "required": true,
+ "type": "integer",
+ "maximum": 10.0,
+ "minimum": 1.0,
+ "format": "int64"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "successful operation",
+ "schema": {
+ "$ref": "#/definitions/Order"
+ }
+ },
+ "400": {
+ "description": "Invalid ID supplied"
+ },
+ "404": {
+ "description": "Order not found"
+ }
+ }
+ },
+ "delete": {
+ "tags": [
+ "store"
+ ],
+ "summary": "Delete purchase order by ID",
+ "description": "For valid response try integer IDs with positive integer value. Negative or non-integer values will generate API errors",
+ "operationId": "deleteOrder",
+ "produces": [
+ "application/xml",
+ "application/json"
+ ],
+ "parameters": [
+ {
+ "name": "orderId",
+ "in": "path",
+ "description": "ID of the order that needs to be deleted",
+ "required": true,
+ "type": "integer",
+ "minimum": 1.0,
+ "format": "int64"
+ }
+ ],
+ "responses": {
+ "400": {
+ "description": "Invalid ID supplied"
+ },
+ "404": {
+ "description": "Order not found"
+ }
+ }
+ }
+ },
+ "/user": {
+ "post": {
+ "tags": [
+ "user"
+ ],
+ "summary": "Create user",
+ "description": "This can only be done by the logged in user.",
+ "operationId": "createUser",
+ "produces": [
+ "application/xml",
+ "application/json"
+ ],
+ "parameters": [
+ {
+ "in": "body",
+ "name": "body",
+ "description": "Created user object",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/User"
+ }
+ }
+ ],
+ "responses": {
+ "default": {
+ "description": "successful operation"
+ }
+ }
+ }
+ },
+ "/user/createWithArray": {
+ "post": {
+ "tags": [
+ "user"
+ ],
+ "summary": "Creates list of users with given input array",
+ "description": "",
+ "operationId": "createUsersWithArrayInput",
+ "produces": [
+ "application/xml",
+ "application/json"
+ ],
+ "parameters": [
+ {
+ "in": "body",
+ "name": "body",
+ "description": "List of user object",
+ "required": true,
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/User"
+ }
+ }
+ }
+ ],
+ "responses": {
+ "default": {
+ "description": "successful operation"
+ }
+ }
+ }
+ },
+ "/user/createWithList": {
+ "post": {
+ "tags": [
+ "user"
+ ],
+ "summary": "Creates list of users with given input array",
+ "description": "",
+ "operationId": "createUsersWithListInput",
+ "produces": [
+ "application/xml",
+ "application/json"
+ ],
+ "parameters": [
+ {
+ "in": "body",
+ "name": "body",
+ "description": "List of user object",
+ "required": true,
+ "schema": {
+ "type": "array",
+ "items": {
+ "$ref": "#/definitions/User"
+ }
+ }
+ }
+ ],
+ "responses": {
+ "default": {
+ "description": "successful operation"
+ }
+ }
+ }
+ },
+ "/user/login": {
+ "get": {
+ "tags": [
+ "user"
+ ],
+ "summary": "Logs user into the system",
+ "description": "",
+ "operationId": "loginUser",
+ "produces": [
+ "application/xml",
+ "application/json"
+ ],
+ "parameters": [
+ {
+ "name": "username",
+ "in": "query",
+ "description": "The user name for login",
+ "required": true,
+ "type": "string"
+ },
+ {
+ "name": "password",
+ "in": "query",
+ "description": "The password for login in clear text",
+ "required": true,
+ "type": "string"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "successful operation",
+ "schema": {
+ "type": "string"
+ },
+ "headers": {
+ "X-Rate-Limit": {
+ "type": "integer",
+ "format": "int32",
+ "description": "calls per hour allowed by the user"
+ },
+ "X-Expires-After": {
+ "type": "string",
+ "format": "date-time",
+ "description": "date in UTC when token expires"
+ }
+ }
+ },
+ "400": {
+ "description": "Invalid username/password supplied"
+ }
+ }
+ }
+ },
+ "/user/logout": {
+ "get": {
+ "tags": [
+ "user"
+ ],
+ "summary": "Logs out current logged in user session",
+ "description": "",
+ "operationId": "logoutUser",
+ "produces": [
+ "application/xml",
+ "application/json"
+ ],
+ "parameters": [],
+ "responses": {
+ "default": {
+ "description": "successful operation"
+ }
+ }
+ }
+ },
+ "/user/{username}": {
+ "get": {
+ "tags": [
+ "user"
+ ],
+ "summary": "Get user by user name",
+ "description": "",
+ "operationId": "getUserByName",
+ "produces": [
+ "application/xml",
+ "application/json"
+ ],
+ "parameters": [
+ {
+ "name": "username",
+ "in": "path",
+ "description": "The name that needs to be fetched. Use user1 for testing. ",
+ "required": true,
+ "type": "string"
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "successful operation",
+ "schema": {
+ "$ref": "#/definitions/User"
+ }
+ },
+ "400": {
+ "description": "Invalid username supplied"
+ },
+ "404": {
+ "description": "User not found"
+ }
+ }
+ },
+ "put": {
+ "tags": [
+ "user"
+ ],
+ "summary": "Updated user",
+ "description": "This can only be done by the logged in user.",
+ "operationId": "updateUser",
+ "produces": [
+ "application/xml",
+ "application/json"
+ ],
+ "parameters": [
+ {
+ "name": "username",
+ "in": "path",
+ "description": "name that need to be updated",
+ "required": true,
+ "type": "string"
+ },
+ {
+ "in": "body",
+ "name": "body",
+ "description": "Updated user object",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/User"
+ }
+ }
+ ],
+ "responses": {
+ "400": {
+ "description": "Invalid user supplied"
+ },
+ "404": {
+ "description": "User not found"
+ }
+ }
+ },
+ "delete": {
+ "tags": [
+ "user"
+ ],
+ "summary": "Delete user",
+ "description": "This can only be done by the logged in user.",
+ "operationId": "deleteUser",
+ "produces": [
+ "application/xml",
+ "application/json"
+ ],
+ "parameters": [
+ {
+ "name": "username",
+ "in": "path",
+ "description": "The name that needs to be deleted",
+ "required": true,
+ "type": "string"
+ }
+ ],
+ "responses": {
+ "400": {
+ "description": "Invalid username supplied"
+ },
+ "404": {
+ "description": "User not found"
+ }
+ }
+ }
+ }
+ },
+ "securityDefinitions": {
+ "petstore_auth": {
+ "type": "oauth2",
+ "authorizationUrl": "http://petstore.swagger.io/oauth/dialog",
+ "flow": "implicit",
+ "scopes": {
+ "write:pets": "modify pets in your account",
+ "read:pets": "read your pets"
+ }
+ },
+ "api_key": {
+ "type": "apiKey",
+ "name": "api_key",
+ "in": "header"
+ }
+ },
+ "definitions": {
+ "Order": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "integer",
+ "format": "int64"
+ },
+ "petId": {
+ "type": "integer",
+ "format": "int64"
+ },
+ "quantity": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "shipDate": {
+ "type": "string",
+ "format": "date-time"
+ },
+ "status": {
+ "type": "string",
+ "description": "Order Status",
+ "enum": [
+ "placed",
+ "approved",
+ "delivered"
+ ]
+ },
+ "complete": {
+ "type": "boolean",
+ "default": false
+ }
+ },
+ "xml": {
+ "name": "Order"
+ }
+ },
+ "Category": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "integer",
+ "format": "int64"
+ },
+ "name": {
+ "type": "string"
+ }
+ },
+ "xml": {
+ "name": "Category"
+ }
+ },
+ "User": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "integer",
+ "format": "int64"
+ },
+ "username": {
+ "type": "string"
+ },
+ "firstName": {
+ "type": "string"
+ },
+ "lastName": {
+ "type": "string"
+ },
+ "email": {
+ "type": "string"
+ },
+ "password": {
+ "type": "string"
+ },
+ "phone": {
+ "type": "string"
+ },
+ "userStatus": {
+ "type": "integer",
+ "format": "int32",
+ "description": "User Status"
+ }
+ },
+ "xml": {
+ "name": "User"
+ }
+ },
+ "Tag": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "integer",
+ "format": "int64"
+ },
+ "name": {
+ "type": "string"
+ }
+ },
+ "xml": {
+ "name": "Tag"
+ }
+ },
+ "Pet": {
+ "type": "object",
+ "required": [
+ "name",
+ "photoUrls"
+ ],
+ "properties": {
+ "id": {
+ "type": "integer",
+ "format": "int64"
+ },
+ "category": {
+ "$ref": "#/definitions/Category"
+ },
+ "name": {
+ "type": "string",
+ "example": "doggie"
+ },
+ "photoUrls": {
+ "type": "array",
+ "xml": {
+ "name": "photoUrl",
+ "wrapped": true
+ },
+ "items": {
+ "type": "string"
+ }
+ },
+ "tags": {
+ "type": "array",
+ "xml": {
+ "name": "tag",
+ "wrapped": true
+ },
+ "items": {
+ "$ref": "#/definitions/Tag"
+ }
+ },
+ "status": {
+ "type": "string",
+ "description": "pet status in the store",
+ "enum": [
+ "available",
+ "pending",
+ "sold"
+ ]
+ }
+ },
+ "xml": {
+ "name": "Pet"
+ }
+ },
+ "ApiResponse": {
+ "type": "object",
+ "properties": {
+ "code": {
+ "type": "integer",
+ "format": "int32"
+ },
+ "type": {
+ "type": "string"
+ },
+ "message": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "externalDocs": {
+ "description": "Find out more about Swagger",
+ "url": "http://swagger.io"
+ }
+ },
+ "valid": true
+ }
+ ]
+ }
+]
diff --git a/third_party/python/jsonschema/jsonschema/benchmarks/json_schema_test_suite.py b/third_party/python/jsonschema/jsonschema/benchmarks/json_schema_test_suite.py
new file mode 100644
index 0000000000..905fb6a3b8
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/benchmarks/json_schema_test_suite.py
@@ -0,0 +1,12 @@
+"""
+A performance benchmark using the official test suite.
+
+This benchmarks jsonschema using every valid example in the
+JSON-Schema-Test-Suite. It will take some time to complete.
+"""
+from pyperf import Runner
+
+from jsonschema.tests._suite import Suite
+
+if __name__ == "__main__":
+ Suite().benchmark(runner=Runner())
diff --git a/third_party/python/jsonschema/jsonschema/cli.py b/third_party/python/jsonschema/jsonschema/cli.py
new file mode 100644
index 0000000000..f93b5c5a08
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/cli.py
@@ -0,0 +1,299 @@
+"""
+The ``jsonschema`` command line.
+"""
+
+from json import JSONDecodeError
+from textwrap import dedent
+import argparse
+import json
+import sys
+import traceback
+import warnings
+
+try:
+ from importlib import metadata
+except ImportError:
+ import importlib_metadata as metadata # type: ignore
+
+try:
+ from pkgutil import resolve_name
+except ImportError:
+ from pkgutil_resolve_name import resolve_name # type: ignore
+
+import attr
+
+from jsonschema.exceptions import SchemaError
+from jsonschema.validators import RefResolver, validator_for
+
+warnings.warn(
+ (
+ "The jsonschema CLI is deprecated and will be removed in a future "
+ "version. Please use check-jsonschema instead, which can be installed "
+ "from https://pypi.org/project/check-jsonschema/"
+ ),
+ DeprecationWarning,
+ stacklevel=2,
+)
+
+
+class _CannotLoadFile(Exception):
+ pass
+
+
+@attr.s
+class _Outputter:
+
+ _formatter = attr.ib()
+ _stdout = attr.ib()
+ _stderr = attr.ib()
+
+ @classmethod
+ def from_arguments(cls, arguments, stdout, stderr):
+ if arguments["output"] == "plain":
+ formatter = _PlainFormatter(arguments["error_format"])
+ elif arguments["output"] == "pretty":
+ formatter = _PrettyFormatter()
+ return cls(formatter=formatter, stdout=stdout, stderr=stderr)
+
+ def load(self, path):
+ try:
+ file = open(path)
+ except FileNotFoundError:
+ self.filenotfound_error(path=path, exc_info=sys.exc_info())
+ raise _CannotLoadFile()
+
+ with file:
+ try:
+ return json.load(file)
+ except JSONDecodeError:
+ self.parsing_error(path=path, exc_info=sys.exc_info())
+ raise _CannotLoadFile()
+
+ def filenotfound_error(self, **kwargs):
+ self._stderr.write(self._formatter.filenotfound_error(**kwargs))
+
+ def parsing_error(self, **kwargs):
+ self._stderr.write(self._formatter.parsing_error(**kwargs))
+
+ def validation_error(self, **kwargs):
+ self._stderr.write(self._formatter.validation_error(**kwargs))
+
+ def validation_success(self, **kwargs):
+ self._stdout.write(self._formatter.validation_success(**kwargs))
+
+
+@attr.s
+class _PrettyFormatter:
+
+ _ERROR_MSG = dedent(
+ """\
+ ===[{type}]===({path})===
+
+ {body}
+ -----------------------------
+ """,
+ )
+ _SUCCESS_MSG = "===[SUCCESS]===({path})===\n"
+
+ def filenotfound_error(self, path, exc_info):
+ return self._ERROR_MSG.format(
+ path=path,
+ type="FileNotFoundError",
+ body="{!r} does not exist.".format(path),
+ )
+
+ def parsing_error(self, path, exc_info):
+ exc_type, exc_value, exc_traceback = exc_info
+ exc_lines = "".join(
+ traceback.format_exception(exc_type, exc_value, exc_traceback),
+ )
+ return self._ERROR_MSG.format(
+ path=path,
+ type=exc_type.__name__,
+ body=exc_lines,
+ )
+
+ def validation_error(self, instance_path, error):
+ return self._ERROR_MSG.format(
+ path=instance_path,
+ type=error.__class__.__name__,
+ body=error,
+ )
+
+ def validation_success(self, instance_path):
+ return self._SUCCESS_MSG.format(path=instance_path)
+
+
+@attr.s
+class _PlainFormatter:
+
+ _error_format = attr.ib()
+
+ def filenotfound_error(self, path, exc_info):
+ return "{!r} does not exist.\n".format(path)
+
+ def parsing_error(self, path, exc_info):
+ return "Failed to parse {}: {}\n".format(
+ "<stdin>" if path == "<stdin>" else repr(path),
+ exc_info[1],
+ )
+
+ def validation_error(self, instance_path, error):
+ return self._error_format.format(file_name=instance_path, error=error)
+
+ def validation_success(self, instance_path):
+ return ""
+
+
+def _resolve_name_with_default(name):
+ if "." not in name:
+ name = "jsonschema." + name
+ return resolve_name(name)
+
+
+parser = argparse.ArgumentParser(
+ description="JSON Schema Validation CLI",
+)
+parser.add_argument(
+ "-i", "--instance",
+ action="append",
+ dest="instances",
+ help="""
+ a path to a JSON instance (i.e. filename.json) to validate (may
+ be specified multiple times). If no instances are provided via this
+ option, one will be expected on standard input.
+ """,
+)
+parser.add_argument(
+ "-F", "--error-format",
+ help="""
+ the format to use for each validation error message, specified
+ in a form suitable for str.format. This string will be passed
+ one formatted object named 'error' for each ValidationError.
+ Only provide this option when using --output=plain, which is the
+ default. If this argument is unprovided and --output=plain is
+ used, a simple default representation will be used.
+ """,
+)
+parser.add_argument(
+ "-o", "--output",
+ choices=["plain", "pretty"],
+ default="plain",
+ help="""
+ an output format to use. 'plain' (default) will produce minimal
+ text with one line for each error, while 'pretty' will produce
+ more detailed human-readable output on multiple lines.
+ """,
+)
+parser.add_argument(
+ "-V", "--validator",
+ type=_resolve_name_with_default,
+ help="""
+ the fully qualified object name of a validator to use, or, for
+ validators that are registered with jsonschema, simply the name
+ of the class.
+ """,
+)
+parser.add_argument(
+ "--base-uri",
+ help="""
+ a base URI to assign to the provided schema, even if it does not
+ declare one (via e.g. $id). This option can be used if you wish to
+ resolve relative references to a particular URI (or local path)
+ """,
+)
+parser.add_argument(
+ "--version",
+ action="version",
+ version=metadata.version("jsonschema"),
+)
+parser.add_argument(
+ "schema",
+ help="the path to a JSON Schema to validate with (i.e. schema.json)",
+)
+
+
+def parse_args(args):
+ arguments = vars(parser.parse_args(args=args or ["--help"]))
+ if arguments["output"] != "plain" and arguments["error_format"]:
+ raise parser.error(
+ "--error-format can only be used with --output plain",
+ )
+ if arguments["output"] == "plain" and arguments["error_format"] is None:
+ arguments["error_format"] = "{error.instance}: {error.message}\n"
+ return arguments
+
+
+def _validate_instance(instance_path, instance, validator, outputter):
+ invalid = False
+ for error in validator.iter_errors(instance):
+ invalid = True
+ outputter.validation_error(instance_path=instance_path, error=error)
+
+ if not invalid:
+ outputter.validation_success(instance_path=instance_path)
+ return invalid
+
+
+def main(args=sys.argv[1:]):
+ sys.exit(run(arguments=parse_args(args=args)))
+
+
+def run(arguments, stdout=sys.stdout, stderr=sys.stderr, stdin=sys.stdin):
+ outputter = _Outputter.from_arguments(
+ arguments=arguments,
+ stdout=stdout,
+ stderr=stderr,
+ )
+
+ try:
+ schema = outputter.load(arguments["schema"])
+ except _CannotLoadFile:
+ return 1
+
+ if arguments["validator"] is None:
+ arguments["validator"] = validator_for(schema)
+
+ try:
+ arguments["validator"].check_schema(schema)
+ except SchemaError as error:
+ outputter.validation_error(
+ instance_path=arguments["schema"],
+ error=error,
+ )
+ return 1
+
+ if arguments["instances"]:
+ load, instances = outputter.load, arguments["instances"]
+ else:
+ def load(_):
+ try:
+ return json.load(stdin)
+ except JSONDecodeError:
+ outputter.parsing_error(
+ path="<stdin>", exc_info=sys.exc_info(),
+ )
+ raise _CannotLoadFile()
+ instances = ["<stdin>"]
+
+ resolver = RefResolver(
+ base_uri=arguments["base_uri"],
+ referrer=schema,
+ ) if arguments["base_uri"] is not None else None
+
+ validator = arguments["validator"](schema, resolver=resolver)
+ exit_code = 0
+ for each in instances:
+ try:
+ instance = load(each)
+ except _CannotLoadFile:
+ exit_code = 1
+ else:
+ exit_code |= _validate_instance(
+ instance_path=each,
+ instance=instance,
+ validator=validator,
+ outputter=outputter,
+ )
+
+ return exit_code
diff --git a/third_party/python/jsonschema/jsonschema/exceptions.py b/third_party/python/jsonschema/jsonschema/exceptions.py
new file mode 100644
index 0000000000..87db3df3a6
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/exceptions.py
@@ -0,0 +1,396 @@
+"""
+Validation errors, and some surrounding helpers.
+"""
+from __future__ import annotations
+
+from collections import defaultdict, deque
+from pprint import pformat
+from textwrap import dedent, indent
+import heapq
+import itertools
+
+import attr
+
+from jsonschema import _utils
+
+WEAK_MATCHES: frozenset[str] = frozenset(["anyOf", "oneOf"])
+STRONG_MATCHES: frozenset[str] = frozenset()
+
+_unset = _utils.Unset()
+
+
+class _Error(Exception):
+ def __init__(
+ self,
+ message,
+ validator=_unset,
+ path=(),
+ cause=None,
+ context=(),
+ validator_value=_unset,
+ instance=_unset,
+ schema=_unset,
+ schema_path=(),
+ parent=None,
+ type_checker=_unset,
+ ):
+ super(_Error, self).__init__(
+ message,
+ validator,
+ path,
+ cause,
+ context,
+ validator_value,
+ instance,
+ schema,
+ schema_path,
+ parent,
+ )
+ self.message = message
+ self.path = self.relative_path = deque(path)
+ self.schema_path = self.relative_schema_path = deque(schema_path)
+ self.context = list(context)
+ self.cause = self.__cause__ = cause
+ self.validator = validator
+ self.validator_value = validator_value
+ self.instance = instance
+ self.schema = schema
+ self.parent = parent
+ self._type_checker = type_checker
+
+ for error in context:
+ error.parent = self
+
+ def __repr__(self):
+ return f"<{self.__class__.__name__}: {self.message!r}>"
+
+ def __str__(self):
+ essential_for_verbose = (
+ self.validator, self.validator_value, self.instance, self.schema,
+ )
+ if any(m is _unset for m in essential_for_verbose):
+ return self.message
+
+ schema_path = _utils.format_as_index(
+ container=self._word_for_schema_in_error_message,
+ indices=list(self.relative_schema_path)[:-1],
+ )
+ instance_path = _utils.format_as_index(
+ container=self._word_for_instance_in_error_message,
+ indices=self.relative_path,
+ )
+ prefix = 16 * " "
+
+ return dedent(
+ f"""\
+ {self.message}
+
+ Failed validating {self.validator!r} in {schema_path}:
+ {indent(pformat(self.schema, width=72), prefix).lstrip()}
+
+ On {instance_path}:
+ {indent(pformat(self.instance, width=72), prefix).lstrip()}
+ """.rstrip(),
+ )
+
+ @classmethod
+ def create_from(cls, other):
+ return cls(**other._contents())
+
+ @property
+ def absolute_path(self):
+ parent = self.parent
+ if parent is None:
+ return self.relative_path
+
+ path = deque(self.relative_path)
+ path.extendleft(reversed(parent.absolute_path))
+ return path
+
+ @property
+ def absolute_schema_path(self):
+ parent = self.parent
+ if parent is None:
+ return self.relative_schema_path
+
+ path = deque(self.relative_schema_path)
+ path.extendleft(reversed(parent.absolute_schema_path))
+ return path
+
+ @property
+ def json_path(self):
+ path = "$"
+ for elem in self.absolute_path:
+ if isinstance(elem, int):
+ path += "[" + str(elem) + "]"
+ else:
+ path += "." + elem
+ return path
+
+ def _set(self, type_checker=None, **kwargs):
+ if type_checker is not None and self._type_checker is _unset:
+ self._type_checker = type_checker
+
+ for k, v in kwargs.items():
+ if getattr(self, k) is _unset:
+ setattr(self, k, v)
+
+ def _contents(self):
+ attrs = (
+ "message", "cause", "context", "validator", "validator_value",
+ "path", "schema_path", "instance", "schema", "parent",
+ )
+ return dict((attr, getattr(self, attr)) for attr in attrs)
+
+ def _matches_type(self):
+ try:
+ expected = self.schema["type"]
+ except (KeyError, TypeError):
+ return False
+
+ if isinstance(expected, str):
+ return self._type_checker.is_type(self.instance, expected)
+
+ return any(
+ self._type_checker.is_type(self.instance, expected_type)
+ for expected_type in expected
+ )
+
+
+class ValidationError(_Error):
+ """
+ An instance was invalid under a provided schema.
+ """
+
+ _word_for_schema_in_error_message = "schema"
+ _word_for_instance_in_error_message = "instance"
+
+
+class SchemaError(_Error):
+ """
+ A schema was invalid under its corresponding metaschema.
+ """
+
+ _word_for_schema_in_error_message = "metaschema"
+ _word_for_instance_in_error_message = "schema"
+
+
+@attr.s(hash=True)
+class RefResolutionError(Exception):
+ """
+ A ref could not be resolved.
+ """
+
+ _cause = attr.ib()
+
+ def __str__(self):
+ return str(self._cause)
+
+
+class UndefinedTypeCheck(Exception):
+ """
+ A type checker was asked to check a type it did not have registered.
+ """
+
+ def __init__(self, type):
+ self.type = type
+
+ def __str__(self):
+ return f"Type {self.type!r} is unknown to this type checker"
+
+
+class UnknownType(Exception):
+ """
+ A validator was asked to validate an instance against an unknown type.
+ """
+
+ def __init__(self, type, instance, schema):
+ self.type = type
+ self.instance = instance
+ self.schema = schema
+
+ def __str__(self):
+ prefix = 16 * " "
+
+ return dedent(
+ f"""\
+ Unknown type {self.type!r} for validator with schema:
+ {indent(pformat(self.schema, width=72), prefix).lstrip()}
+
+ While checking instance:
+ {indent(pformat(self.instance, width=72), prefix).lstrip()}
+ """.rstrip(),
+ )
+
+
+class FormatError(Exception):
+ """
+ Validating a format failed.
+ """
+
+ def __init__(self, message, cause=None):
+ super(FormatError, self).__init__(message, cause)
+ self.message = message
+ self.cause = self.__cause__ = cause
+
+ def __str__(self):
+ return self.message
+
+
+class ErrorTree:
+ """
+ ErrorTrees make it easier to check which validations failed.
+ """
+
+ _instance = _unset
+
+ def __init__(self, errors=()):
+ self.errors = {}
+ self._contents = defaultdict(self.__class__)
+
+ for error in errors:
+ container = self
+ for element in error.path:
+ container = container[element]
+ container.errors[error.validator] = error
+
+ container._instance = error.instance
+
+ def __contains__(self, index):
+ """
+ Check whether ``instance[index]`` has any errors.
+ """
+
+ return index in self._contents
+
+ def __getitem__(self, index):
+ """
+ Retrieve the child tree one level down at the given ``index``.
+
+ If the index is not in the instance that this tree corresponds
+ to and is not known by this tree, whatever error would be raised
+ by ``instance.__getitem__`` will be propagated (usually this is
+ some subclass of `LookupError`.
+ """
+
+ if self._instance is not _unset and index not in self:
+ self._instance[index]
+ return self._contents[index]
+
+ def __setitem__(self, index, value):
+ """
+ Add an error to the tree at the given ``index``.
+ """
+ self._contents[index] = value
+
+ def __iter__(self):
+ """
+ Iterate (non-recursively) over the indices in the instance with errors.
+ """
+
+ return iter(self._contents)
+
+ def __len__(self):
+ """
+ Return the `total_errors`.
+ """
+ return self.total_errors
+
+ def __repr__(self):
+ total = len(self)
+ errors = "error" if total == 1 else "errors"
+ return f"<{self.__class__.__name__} ({total} total {errors})>"
+
+ @property
+ def total_errors(self):
+ """
+ The total number of errors in the entire tree, including children.
+ """
+
+ child_errors = sum(len(tree) for _, tree in self._contents.items())
+ return len(self.errors) + child_errors
+
+
+def by_relevance(weak=WEAK_MATCHES, strong=STRONG_MATCHES):
+ """
+ Create a key function that can be used to sort errors by relevance.
+
+ Arguments:
+ weak (set):
+ a collection of validation keywords to consider to be
+ "weak". If there are two errors at the same level of the
+ instance and one is in the set of weak validation keywords,
+ the other error will take priority. By default, :kw:`anyOf`
+ and :kw:`oneOf` are considered weak keywords and will be
+ superseded by other same-level validation errors.
+
+ strong (set):
+ a collection of validation keywords to consider to be
+ "strong"
+ """
+ def relevance(error):
+ validator = error.validator
+ return (
+ -len(error.path),
+ validator not in weak,
+ validator in strong,
+ not error._matches_type(),
+ )
+ return relevance
+
+
+relevance = by_relevance()
+
+
+def best_match(errors, key=relevance):
+ """
+ Try to find an error that appears to be the best match among given errors.
+
+ In general, errors that are higher up in the instance (i.e. for which
+ `ValidationError.path` is shorter) are considered better matches,
+ since they indicate "more" is wrong with the instance.
+
+ If the resulting match is either :kw:`oneOf` or :kw:`anyOf`, the
+ *opposite* assumption is made -- i.e. the deepest error is picked,
+ since these keywords only need to match once, and any other errors
+ may not be relevant.
+
+ Arguments:
+ errors (collections.abc.Iterable):
+
+ the errors to select from. Do not provide a mixture of
+ errors from different validation attempts (i.e. from
+ different instances or schemas), since it won't produce
+ sensical output.
+
+ key (collections.abc.Callable):
+
+ the key to use when sorting errors. See `relevance` and
+ transitively `by_relevance` for more details (the default is
+ to sort with the defaults of that function). Changing the
+ default is only useful if you want to change the function
+ that rates errors but still want the error context descent
+ done by this function.
+
+ Returns:
+ the best matching error, or ``None`` if the iterable was empty
+
+ .. note::
+
+ This function is a heuristic. Its return value may change for a given
+ set of inputs from version to version if better heuristics are added.
+ """
+ errors = iter(errors)
+ best = next(errors, None)
+ if best is None:
+ return
+ best = max(itertools.chain([best], errors), key=key)
+
+ while best.context:
+ # Calculate the minimum via nsmallest, because we don't recurse if
+ # all nested errors have the same relevance (i.e. if min == max == all)
+ smallest = heapq.nsmallest(2, best.context, key=key)
+ if len(smallest) == 2 and key(smallest[0]) == key(smallest[1]):
+ return best
+ best = smallest[0]
+ return best
diff --git a/third_party/python/jsonschema/jsonschema/protocols.py b/third_party/python/jsonschema/jsonschema/protocols.py
new file mode 100644
index 0000000000..5f52166faf
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/protocols.py
@@ -0,0 +1,225 @@
+"""
+typing.Protocol classes for jsonschema interfaces.
+"""
+
+# for reference material on Protocols, see
+# https://www.python.org/dev/peps/pep-0544/
+
+from __future__ import annotations
+
+from collections.abc import Callable, Mapping
+from typing import TYPE_CHECKING, Any, ClassVar, Iterable
+import sys
+
+# doing these imports with `try ... except ImportError` doesn't pass mypy
+# checking because mypy sees `typing._SpecialForm` and
+# `typing_extensions._SpecialForm` as incompatible
+#
+# see:
+# https://mypy.readthedocs.io/en/stable/runtime_troubles.html#using-new-additions-to-the-typing-module
+# https://github.com/python/mypy/issues/4427
+if sys.version_info >= (3, 8):
+ from typing import Protocol, runtime_checkable
+else:
+ from typing_extensions import Protocol, runtime_checkable
+
+# in order for Sphinx to resolve references accurately from type annotations,
+# it needs to see names like `jsonschema.TypeChecker`
+# therefore, only import at type-checking time (to avoid circular references),
+# but use `jsonschema` for any types which will otherwise not be resolvable
+if TYPE_CHECKING:
+ import jsonschema
+ import jsonschema.validators
+
+from jsonschema.exceptions import ValidationError
+
+# For code authors working on the validator protocol, these are the three
+# use-cases which should be kept in mind:
+#
+# 1. As a protocol class, it can be used in type annotations to describe the
+# available methods and attributes of a validator
+# 2. It is the source of autodoc for the validator documentation
+# 3. It is runtime_checkable, meaning that it can be used in isinstance()
+# checks.
+#
+# Since protocols are not base classes, isinstance() checking is limited in
+# its capabilities. See docs on runtime_checkable for detail
+
+
+@runtime_checkable
+class Validator(Protocol):
+ """
+ The protocol to which all validator classes adhere.
+
+ Arguments:
+
+ schema:
+
+ The schema that the validator object will validate with.
+ It is assumed to be valid, and providing
+ an invalid schema can lead to undefined behavior. See
+ `Validator.check_schema` to validate a schema first.
+
+ resolver:
+
+ a resolver that will be used to resolve :kw:`$ref`
+ properties (JSON references). If unprovided, one will be created.
+
+ format_checker:
+
+ if provided, a checker which will be used to assert about
+ :kw:`format` properties present in the schema. If unprovided,
+ *no* format validation is done, and the presence of format
+ within schemas is strictly informational. Certain formats
+ require additional packages to be installed in order to assert
+ against instances. Ensure you've installed `jsonschema` with
+ its `extra (optional) dependencies <index:extras>` when
+ invoking ``pip``.
+
+ .. deprecated:: v4.12.0
+
+ Subclassing validator classes now explicitly warns this is not part of
+ their public API.
+ """
+
+ #: An object representing the validator's meta schema (the schema that
+ #: describes valid schemas in the given version).
+ META_SCHEMA: ClassVar[Mapping]
+
+ #: A mapping of validation keywords (`str`\s) to functions that
+ #: validate the keyword with that name. For more information see
+ #: `creating-validators`.
+ VALIDATORS: ClassVar[Mapping]
+
+ #: A `jsonschema.TypeChecker` that will be used when validating
+ #: :kw:`type` keywords in JSON schemas.
+ TYPE_CHECKER: ClassVar[jsonschema.TypeChecker]
+
+ #: A `jsonschema.FormatChecker` that will be used when validating
+ #: :kw:`format` keywords in JSON schemas.
+ FORMAT_CHECKER: ClassVar[jsonschema.FormatChecker]
+
+ #: A function which given a schema returns its ID.
+ ID_OF: Callable[[Any], str | None]
+
+ #: The schema that will be used to validate instances
+ schema: Mapping | bool
+
+ def __init__(
+ self,
+ schema: Mapping | bool,
+ resolver: jsonschema.validators.RefResolver | None = None,
+ format_checker: jsonschema.FormatChecker | None = None,
+ ) -> None:
+ ...
+
+ @classmethod
+ def check_schema(cls, schema: Mapping | bool) -> None:
+ """
+ Validate the given schema against the validator's `META_SCHEMA`.
+
+ Raises:
+
+ `jsonschema.exceptions.SchemaError`:
+
+ if the schema is invalid
+ """
+
+ def is_type(self, instance: Any, type: str) -> bool:
+ """
+ Check if the instance is of the given (JSON Schema) type.
+
+ Arguments:
+
+ instance:
+
+ the value to check
+
+ type:
+
+ the name of a known (JSON Schema) type
+
+ Returns:
+
+ whether the instance is of the given type
+
+ Raises:
+
+ `jsonschema.exceptions.UnknownType`:
+
+ if ``type`` is not a known type
+ """
+
+ def is_valid(self, instance: Any) -> bool:
+ """
+ Check if the instance is valid under the current `schema`.
+
+ Returns:
+
+ whether the instance is valid or not
+
+ >>> schema = {"maxItems" : 2}
+ >>> Draft202012Validator(schema).is_valid([2, 3, 4])
+ False
+ """
+
+ def iter_errors(self, instance: Any) -> Iterable[ValidationError]:
+ r"""
+ Lazily yield each of the validation errors in the given instance.
+
+ >>> schema = {
+ ... "type" : "array",
+ ... "items" : {"enum" : [1, 2, 3]},
+ ... "maxItems" : 2,
+ ... }
+ >>> v = Draft202012Validator(schema)
+ >>> for error in sorted(v.iter_errors([2, 3, 4]), key=str):
+ ... print(error.message)
+ 4 is not one of [1, 2, 3]
+ [2, 3, 4] is too long
+
+ .. deprecated:: v4.0.0
+
+ Calling this function with a second schema argument is deprecated.
+ Use `Validator.evolve` instead.
+ """
+
+ def validate(self, instance: Any) -> None:
+ """
+ Check if the instance is valid under the current `schema`.
+
+ Raises:
+
+ `jsonschema.exceptions.ValidationError`:
+
+ if the instance is invalid
+
+ >>> schema = {"maxItems" : 2}
+ >>> Draft202012Validator(schema).validate([2, 3, 4])
+ Traceback (most recent call last):
+ ...
+ ValidationError: [2, 3, 4] is too long
+ """
+
+ def evolve(self, **kwargs) -> "Validator":
+ """
+ Create a new validator like this one, but with given changes.
+
+ Preserves all other attributes, so can be used to e.g. create a
+ validator with a different schema but with the same :kw:`$ref`
+ resolution behavior.
+
+ >>> validator = Draft202012Validator({})
+ >>> validator.evolve(schema={"type": "number"})
+ Draft202012Validator(schema={'type': 'number'}, format_checker=None)
+
+ The returned object satisfies the validator protocol, but may not
+ be of the same concrete class! In particular this occurs
+ when a :kw:`$ref` occurs to a schema with a different
+ :kw:`$schema` than this one (i.e. for a different draft).
+
+ >>> validator.evolve(
+ ... schema={"$schema": Draft7Validator.META_SCHEMA["$id"]}
+ ... )
+ Draft7Validator(schema=..., format_checker=None)
+ """
diff --git a/third_party/python/jsonschema/jsonschema/schemas/draft2019-09.json b/third_party/python/jsonschema/jsonschema/schemas/draft2019-09.json
new file mode 100644
index 0000000000..2248a0c80b
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/schemas/draft2019-09.json
@@ -0,0 +1,42 @@
+{
+ "$schema": "https://json-schema.org/draft/2019-09/schema",
+ "$id": "https://json-schema.org/draft/2019-09/schema",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2019-09/vocab/core": true,
+ "https://json-schema.org/draft/2019-09/vocab/applicator": true,
+ "https://json-schema.org/draft/2019-09/vocab/validation": true,
+ "https://json-schema.org/draft/2019-09/vocab/meta-data": true,
+ "https://json-schema.org/draft/2019-09/vocab/format": false,
+ "https://json-schema.org/draft/2019-09/vocab/content": true
+ },
+ "$recursiveAnchor": true,
+
+ "title": "Core and Validation specifications meta-schema",
+ "allOf": [
+ {"$ref": "meta/core"},
+ {"$ref": "meta/applicator"},
+ {"$ref": "meta/validation"},
+ {"$ref": "meta/meta-data"},
+ {"$ref": "meta/format"},
+ {"$ref": "meta/content"}
+ ],
+ "type": ["object", "boolean"],
+ "properties": {
+ "definitions": {
+ "$comment": "While no longer an official keyword as it is replaced by $defs, this keyword is retained in the meta-schema to prevent incompatible extensions as it remains in common use.",
+ "type": "object",
+ "additionalProperties": { "$recursiveRef": "#" },
+ "default": {}
+ },
+ "dependencies": {
+ "$comment": "\"dependencies\" is no longer a keyword, but schema authors should avoid redefining it to facilitate a smooth transition to \"dependentSchemas\" and \"dependentRequired\"",
+ "type": "object",
+ "additionalProperties": {
+ "anyOf": [
+ { "$recursiveRef": "#" },
+ { "$ref": "meta/validation#/$defs/stringArray" }
+ ]
+ }
+ }
+ }
+}
diff --git a/third_party/python/jsonschema/jsonschema/schemas/draft2020-12.json b/third_party/python/jsonschema/jsonschema/schemas/draft2020-12.json
new file mode 100644
index 0000000000..d5e2d31c3c
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/schemas/draft2020-12.json
@@ -0,0 +1,58 @@
+{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://json-schema.org/draft/2020-12/schema",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2020-12/vocab/core": true,
+ "https://json-schema.org/draft/2020-12/vocab/applicator": true,
+ "https://json-schema.org/draft/2020-12/vocab/unevaluated": true,
+ "https://json-schema.org/draft/2020-12/vocab/validation": true,
+ "https://json-schema.org/draft/2020-12/vocab/meta-data": true,
+ "https://json-schema.org/draft/2020-12/vocab/format-annotation": true,
+ "https://json-schema.org/draft/2020-12/vocab/content": true
+ },
+ "$dynamicAnchor": "meta",
+
+ "title": "Core and Validation specifications meta-schema",
+ "allOf": [
+ {"$ref": "meta/core"},
+ {"$ref": "meta/applicator"},
+ {"$ref": "meta/unevaluated"},
+ {"$ref": "meta/validation"},
+ {"$ref": "meta/meta-data"},
+ {"$ref": "meta/format-annotation"},
+ {"$ref": "meta/content"}
+ ],
+ "type": ["object", "boolean"],
+ "$comment": "This meta-schema also defines keywords that have appeared in previous drafts in order to prevent incompatible extensions as they remain in common use.",
+ "properties": {
+ "definitions": {
+ "$comment": "\"definitions\" has been replaced by \"$defs\".",
+ "type": "object",
+ "additionalProperties": { "$dynamicRef": "#meta" },
+ "deprecated": true,
+ "default": {}
+ },
+ "dependencies": {
+ "$comment": "\"dependencies\" has been split and replaced by \"dependentSchemas\" and \"dependentRequired\" in order to serve their differing semantics.",
+ "type": "object",
+ "additionalProperties": {
+ "anyOf": [
+ { "$dynamicRef": "#meta" },
+ { "$ref": "meta/validation#/$defs/stringArray" }
+ ]
+ },
+ "deprecated": true,
+ "default": {}
+ },
+ "$recursiveAnchor": {
+ "$comment": "\"$recursiveAnchor\" has been replaced by \"$dynamicAnchor\".",
+ "$ref": "meta/core#/$defs/anchorString",
+ "deprecated": true
+ },
+ "$recursiveRef": {
+ "$comment": "\"$recursiveRef\" has been replaced by \"$dynamicRef\".",
+ "$ref": "meta/core#/$defs/uriReferenceString",
+ "deprecated": true
+ }
+ }
+}
diff --git a/third_party/python/jsonschema/jsonschema/schemas/draft3.json b/third_party/python/jsonschema/jsonschema/schemas/draft3.json
new file mode 100644
index 0000000000..8b26b1f89f
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/schemas/draft3.json
@@ -0,0 +1,172 @@
+{
+ "$schema" : "http://json-schema.org/draft-03/schema#",
+ "id" : "http://json-schema.org/draft-03/schema#",
+ "type" : "object",
+
+ "properties" : {
+ "type" : {
+ "type" : ["string", "array"],
+ "items" : {
+ "type" : ["string", {"$ref" : "#"}]
+ },
+ "uniqueItems" : true,
+ "default" : "any"
+ },
+
+ "properties" : {
+ "type" : "object",
+ "additionalProperties" : {"$ref" : "#"},
+ "default" : {}
+ },
+
+ "patternProperties" : {
+ "type" : "object",
+ "additionalProperties" : {"$ref" : "#"},
+ "default" : {}
+ },
+
+ "additionalProperties" : {
+ "type" : [{"$ref" : "#"}, "boolean"],
+ "default" : {}
+ },
+
+ "items" : {
+ "type" : [{"$ref" : "#"}, "array"],
+ "items" : {"$ref" : "#"},
+ "default" : {}
+ },
+
+ "additionalItems" : {
+ "type" : [{"$ref" : "#"}, "boolean"],
+ "default" : {}
+ },
+
+ "required" : {
+ "type" : "boolean",
+ "default" : false
+ },
+
+ "dependencies" : {
+ "type" : "object",
+ "additionalProperties" : {
+ "type" : ["string", "array", {"$ref" : "#"}],
+ "items" : {
+ "type" : "string"
+ }
+ },
+ "default" : {}
+ },
+
+ "minimum" : {
+ "type" : "number"
+ },
+
+ "maximum" : {
+ "type" : "number"
+ },
+
+ "exclusiveMinimum" : {
+ "type" : "boolean",
+ "default" : false
+ },
+
+ "exclusiveMaximum" : {
+ "type" : "boolean",
+ "default" : false
+ },
+
+ "minItems" : {
+ "type" : "integer",
+ "minimum" : 0,
+ "default" : 0
+ },
+
+ "maxItems" : {
+ "type" : "integer",
+ "minimum" : 0
+ },
+
+ "uniqueItems" : {
+ "type" : "boolean",
+ "default" : false
+ },
+
+ "pattern" : {
+ "type" : "string",
+ "format" : "regex"
+ },
+
+ "minLength" : {
+ "type" : "integer",
+ "minimum" : 0,
+ "default" : 0
+ },
+
+ "maxLength" : {
+ "type" : "integer"
+ },
+
+ "enum" : {
+ "type" : "array",
+ "minItems" : 1,
+ "uniqueItems" : true
+ },
+
+ "default" : {
+ "type" : "any"
+ },
+
+ "title" : {
+ "type" : "string"
+ },
+
+ "description" : {
+ "type" : "string"
+ },
+
+ "format" : {
+ "type" : "string"
+ },
+
+ "divisibleBy" : {
+ "type" : "number",
+ "minimum" : 0,
+ "exclusiveMinimum" : true,
+ "default" : 1
+ },
+
+ "disallow" : {
+ "type" : ["string", "array"],
+ "items" : {
+ "type" : ["string", {"$ref" : "#"}]
+ },
+ "uniqueItems" : true
+ },
+
+ "extends" : {
+ "type" : [{"$ref" : "#"}, "array"],
+ "items" : {"$ref" : "#"},
+ "default" : {}
+ },
+
+ "id" : {
+ "type" : "string"
+ },
+
+ "$ref" : {
+ "type" : "string"
+ },
+
+ "$schema" : {
+ "type" : "string",
+ "format" : "uri"
+ }
+ },
+
+ "dependencies" : {
+ "exclusiveMinimum" : "minimum",
+ "exclusiveMaximum" : "maximum"
+ },
+
+ "default" : {}
+}
diff --git a/third_party/python/jsonschema/jsonschema/schemas/draft4.json b/third_party/python/jsonschema/jsonschema/schemas/draft4.json
new file mode 100644
index 0000000000..bcbb84743e
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/schemas/draft4.json
@@ -0,0 +1,149 @@
+{
+ "id": "http://json-schema.org/draft-04/schema#",
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "description": "Core schema meta-schema",
+ "definitions": {
+ "schemaArray": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$ref": "#" }
+ },
+ "positiveInteger": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "positiveIntegerDefault0": {
+ "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ]
+ },
+ "simpleTypes": {
+ "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ]
+ },
+ "stringArray": {
+ "type": "array",
+ "items": { "type": "string" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ },
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string"
+ },
+ "$schema": {
+ "type": "string"
+ },
+ "title": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "default": {},
+ "multipleOf": {
+ "type": "number",
+ "minimum": 0,
+ "exclusiveMinimum": true
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "exclusiveMaximum": {
+ "type": "boolean",
+ "default": false
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "exclusiveMinimum": {
+ "type": "boolean",
+ "default": false
+ },
+ "maxLength": { "$ref": "#/definitions/positiveInteger" },
+ "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "pattern": {
+ "type": "string",
+ "format": "regex"
+ },
+ "additionalItems": {
+ "anyOf": [
+ { "type": "boolean" },
+ { "$ref": "#" }
+ ],
+ "default": {}
+ },
+ "items": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/schemaArray" }
+ ],
+ "default": {}
+ },
+ "maxItems": { "$ref": "#/definitions/positiveInteger" },
+ "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "uniqueItems": {
+ "type": "boolean",
+ "default": false
+ },
+ "maxProperties": { "$ref": "#/definitions/positiveInteger" },
+ "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" },
+ "required": { "$ref": "#/definitions/stringArray" },
+ "additionalProperties": {
+ "anyOf": [
+ { "type": "boolean" },
+ { "$ref": "#" }
+ ],
+ "default": {}
+ },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "properties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "patternProperties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "dependencies": {
+ "type": "object",
+ "additionalProperties": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/stringArray" }
+ ]
+ }
+ },
+ "enum": {
+ "type": "array",
+ "minItems": 1,
+ "uniqueItems": true
+ },
+ "type": {
+ "anyOf": [
+ { "$ref": "#/definitions/simpleTypes" },
+ {
+ "type": "array",
+ "items": { "$ref": "#/definitions/simpleTypes" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ ]
+ },
+ "format": { "type": "string" },
+ "allOf": { "$ref": "#/definitions/schemaArray" },
+ "anyOf": { "$ref": "#/definitions/schemaArray" },
+ "oneOf": { "$ref": "#/definitions/schemaArray" },
+ "not": { "$ref": "#" }
+ },
+ "dependencies": {
+ "exclusiveMaximum": [ "maximum" ],
+ "exclusiveMinimum": [ "minimum" ]
+ },
+ "default": {}
+}
diff --git a/third_party/python/jsonschema/jsonschema/schemas/draft6.json b/third_party/python/jsonschema/jsonschema/schemas/draft6.json
new file mode 100644
index 0000000000..a0d2bf7896
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/schemas/draft6.json
@@ -0,0 +1,153 @@
+{
+ "$schema": "http://json-schema.org/draft-06/schema#",
+ "$id": "http://json-schema.org/draft-06/schema#",
+ "title": "Core schema meta-schema",
+ "definitions": {
+ "schemaArray": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$ref": "#" }
+ },
+ "nonNegativeInteger": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "nonNegativeIntegerDefault0": {
+ "allOf": [
+ { "$ref": "#/definitions/nonNegativeInteger" },
+ { "default": 0 }
+ ]
+ },
+ "simpleTypes": {
+ "enum": [
+ "array",
+ "boolean",
+ "integer",
+ "null",
+ "number",
+ "object",
+ "string"
+ ]
+ },
+ "stringArray": {
+ "type": "array",
+ "items": { "type": "string" },
+ "uniqueItems": true,
+ "default": []
+ }
+ },
+ "type": ["object", "boolean"],
+ "properties": {
+ "$id": {
+ "type": "string",
+ "format": "uri-reference"
+ },
+ "$schema": {
+ "type": "string",
+ "format": "uri"
+ },
+ "$ref": {
+ "type": "string",
+ "format": "uri-reference"
+ },
+ "title": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "default": {},
+ "examples": {
+ "type": "array",
+ "items": {}
+ },
+ "multipleOf": {
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "exclusiveMaximum": {
+ "type": "number"
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "exclusiveMinimum": {
+ "type": "number"
+ },
+ "maxLength": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "pattern": {
+ "type": "string",
+ "format": "regex"
+ },
+ "additionalItems": { "$ref": "#" },
+ "items": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/schemaArray" }
+ ],
+ "default": {}
+ },
+ "maxItems": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "uniqueItems": {
+ "type": "boolean",
+ "default": false
+ },
+ "contains": { "$ref": "#" },
+ "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "required": { "$ref": "#/definitions/stringArray" },
+ "additionalProperties": { "$ref": "#" },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "properties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "patternProperties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "propertyNames": { "format": "regex" },
+ "default": {}
+ },
+ "dependencies": {
+ "type": "object",
+ "additionalProperties": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/stringArray" }
+ ]
+ }
+ },
+ "propertyNames": { "$ref": "#" },
+ "const": {},
+ "enum": {
+ "type": "array"
+ },
+ "type": {
+ "anyOf": [
+ { "$ref": "#/definitions/simpleTypes" },
+ {
+ "type": "array",
+ "items": { "$ref": "#/definitions/simpleTypes" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ ]
+ },
+ "format": { "type": "string" },
+ "allOf": { "$ref": "#/definitions/schemaArray" },
+ "anyOf": { "$ref": "#/definitions/schemaArray" },
+ "oneOf": { "$ref": "#/definitions/schemaArray" },
+ "not": { "$ref": "#" }
+ },
+ "default": {}
+}
diff --git a/third_party/python/jsonschema/jsonschema/schemas/draft7.json b/third_party/python/jsonschema/jsonschema/schemas/draft7.json
new file mode 100644
index 0000000000..746cde9690
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/schemas/draft7.json
@@ -0,0 +1,166 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "$id": "http://json-schema.org/draft-07/schema#",
+ "title": "Core schema meta-schema",
+ "definitions": {
+ "schemaArray": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$ref": "#" }
+ },
+ "nonNegativeInteger": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "nonNegativeIntegerDefault0": {
+ "allOf": [
+ { "$ref": "#/definitions/nonNegativeInteger" },
+ { "default": 0 }
+ ]
+ },
+ "simpleTypes": {
+ "enum": [
+ "array",
+ "boolean",
+ "integer",
+ "null",
+ "number",
+ "object",
+ "string"
+ ]
+ },
+ "stringArray": {
+ "type": "array",
+ "items": { "type": "string" },
+ "uniqueItems": true,
+ "default": []
+ }
+ },
+ "type": ["object", "boolean"],
+ "properties": {
+ "$id": {
+ "type": "string",
+ "format": "uri-reference"
+ },
+ "$schema": {
+ "type": "string",
+ "format": "uri"
+ },
+ "$ref": {
+ "type": "string",
+ "format": "uri-reference"
+ },
+ "$comment": {
+ "type": "string"
+ },
+ "title": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "default": true,
+ "readOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "examples": {
+ "type": "array",
+ "items": true
+ },
+ "multipleOf": {
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "exclusiveMaximum": {
+ "type": "number"
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "exclusiveMinimum": {
+ "type": "number"
+ },
+ "maxLength": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "pattern": {
+ "type": "string",
+ "format": "regex"
+ },
+ "additionalItems": { "$ref": "#" },
+ "items": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/schemaArray" }
+ ],
+ "default": true
+ },
+ "maxItems": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "uniqueItems": {
+ "type": "boolean",
+ "default": false
+ },
+ "contains": { "$ref": "#" },
+ "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" },
+ "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" },
+ "required": { "$ref": "#/definitions/stringArray" },
+ "additionalProperties": { "$ref": "#" },
+ "definitions": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "properties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "default": {}
+ },
+ "patternProperties": {
+ "type": "object",
+ "additionalProperties": { "$ref": "#" },
+ "propertyNames": { "format": "regex" },
+ "default": {}
+ },
+ "dependencies": {
+ "type": "object",
+ "additionalProperties": {
+ "anyOf": [
+ { "$ref": "#" },
+ { "$ref": "#/definitions/stringArray" }
+ ]
+ }
+ },
+ "propertyNames": { "$ref": "#" },
+ "const": true,
+ "enum": {
+ "type": "array",
+ "items": true
+ },
+ "type": {
+ "anyOf": [
+ { "$ref": "#/definitions/simpleTypes" },
+ {
+ "type": "array",
+ "items": { "$ref": "#/definitions/simpleTypes" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ ]
+ },
+ "format": { "type": "string" },
+ "contentMediaType": { "type": "string" },
+ "contentEncoding": { "type": "string" },
+ "if": {"$ref": "#"},
+ "then": {"$ref": "#"},
+ "else": {"$ref": "#"},
+ "allOf": { "$ref": "#/definitions/schemaArray" },
+ "anyOf": { "$ref": "#/definitions/schemaArray" },
+ "oneOf": { "$ref": "#/definitions/schemaArray" },
+ "not": { "$ref": "#" }
+ },
+ "default": true
+}
diff --git a/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2019-09/applicator b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2019-09/applicator
new file mode 100644
index 0000000000..24a1cc4f4f
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2019-09/applicator
@@ -0,0 +1,56 @@
+{
+ "$schema": "https://json-schema.org/draft/2019-09/schema",
+ "$id": "https://json-schema.org/draft/2019-09/meta/applicator",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2019-09/vocab/applicator": true
+ },
+ "$recursiveAnchor": true,
+
+ "title": "Applicator vocabulary meta-schema",
+ "type": ["object", "boolean"],
+ "properties": {
+ "additionalItems": { "$recursiveRef": "#" },
+ "unevaluatedItems": { "$recursiveRef": "#" },
+ "items": {
+ "anyOf": [
+ { "$recursiveRef": "#" },
+ { "$ref": "#/$defs/schemaArray" }
+ ]
+ },
+ "contains": { "$recursiveRef": "#" },
+ "additionalProperties": { "$recursiveRef": "#" },
+ "unevaluatedProperties": { "$recursiveRef": "#" },
+ "properties": {
+ "type": "object",
+ "additionalProperties": { "$recursiveRef": "#" },
+ "default": {}
+ },
+ "patternProperties": {
+ "type": "object",
+ "additionalProperties": { "$recursiveRef": "#" },
+ "propertyNames": { "format": "regex" },
+ "default": {}
+ },
+ "dependentSchemas": {
+ "type": "object",
+ "additionalProperties": {
+ "$recursiveRef": "#"
+ }
+ },
+ "propertyNames": { "$recursiveRef": "#" },
+ "if": { "$recursiveRef": "#" },
+ "then": { "$recursiveRef": "#" },
+ "else": { "$recursiveRef": "#" },
+ "allOf": { "$ref": "#/$defs/schemaArray" },
+ "anyOf": { "$ref": "#/$defs/schemaArray" },
+ "oneOf": { "$ref": "#/$defs/schemaArray" },
+ "not": { "$recursiveRef": "#" }
+ },
+ "$defs": {
+ "schemaArray": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$recursiveRef": "#" }
+ }
+ }
+}
diff --git a/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2019-09/content b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2019-09/content
new file mode 100644
index 0000000000..f6752a8efb
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2019-09/content
@@ -0,0 +1,17 @@
+{
+ "$schema": "https://json-schema.org/draft/2019-09/schema",
+ "$id": "https://json-schema.org/draft/2019-09/meta/content",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2019-09/vocab/content": true
+ },
+ "$recursiveAnchor": true,
+
+ "title": "Content vocabulary meta-schema",
+
+ "type": ["object", "boolean"],
+ "properties": {
+ "contentMediaType": { "type": "string" },
+ "contentEncoding": { "type": "string" },
+ "contentSchema": { "$recursiveRef": "#" }
+ }
+}
diff --git a/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2019-09/core b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2019-09/core
new file mode 100644
index 0000000000..eb708a5604
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2019-09/core
@@ -0,0 +1,57 @@
+{
+ "$schema": "https://json-schema.org/draft/2019-09/schema",
+ "$id": "https://json-schema.org/draft/2019-09/meta/core",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2019-09/vocab/core": true
+ },
+ "$recursiveAnchor": true,
+
+ "title": "Core vocabulary meta-schema",
+ "type": ["object", "boolean"],
+ "properties": {
+ "$id": {
+ "type": "string",
+ "format": "uri-reference",
+ "$comment": "Non-empty fragments not allowed.",
+ "pattern": "^[^#]*#?$"
+ },
+ "$schema": {
+ "type": "string",
+ "format": "uri"
+ },
+ "$anchor": {
+ "type": "string",
+ "pattern": "^[A-Za-z][-A-Za-z0-9.:_]*$"
+ },
+ "$ref": {
+ "type": "string",
+ "format": "uri-reference"
+ },
+ "$recursiveRef": {
+ "type": "string",
+ "format": "uri-reference"
+ },
+ "$recursiveAnchor": {
+ "type": "boolean",
+ "default": false
+ },
+ "$vocabulary": {
+ "type": "object",
+ "propertyNames": {
+ "type": "string",
+ "format": "uri"
+ },
+ "additionalProperties": {
+ "type": "boolean"
+ }
+ },
+ "$comment": {
+ "type": "string"
+ },
+ "$defs": {
+ "type": "object",
+ "additionalProperties": { "$recursiveRef": "#" },
+ "default": {}
+ }
+ }
+}
diff --git a/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2019-09/meta-data b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2019-09/meta-data
new file mode 100644
index 0000000000..da04cff6d3
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2019-09/meta-data
@@ -0,0 +1,37 @@
+{
+ "$schema": "https://json-schema.org/draft/2019-09/schema",
+ "$id": "https://json-schema.org/draft/2019-09/meta/meta-data",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2019-09/vocab/meta-data": true
+ },
+ "$recursiveAnchor": true,
+
+ "title": "Meta-data vocabulary meta-schema",
+
+ "type": ["object", "boolean"],
+ "properties": {
+ "title": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "default": true,
+ "deprecated": {
+ "type": "boolean",
+ "default": false
+ },
+ "readOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "writeOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "examples": {
+ "type": "array",
+ "items": true
+ }
+ }
+}
diff --git a/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2019-09/validation b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2019-09/validation
new file mode 100644
index 0000000000..9f59677b30
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2019-09/validation
@@ -0,0 +1,98 @@
+{
+ "$schema": "https://json-schema.org/draft/2019-09/schema",
+ "$id": "https://json-schema.org/draft/2019-09/meta/validation",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2019-09/vocab/validation": true
+ },
+ "$recursiveAnchor": true,
+
+ "title": "Validation vocabulary meta-schema",
+ "type": ["object", "boolean"],
+ "properties": {
+ "multipleOf": {
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "exclusiveMaximum": {
+ "type": "number"
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "exclusiveMinimum": {
+ "type": "number"
+ },
+ "maxLength": { "$ref": "#/$defs/nonNegativeInteger" },
+ "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" },
+ "pattern": {
+ "type": "string",
+ "format": "regex"
+ },
+ "maxItems": { "$ref": "#/$defs/nonNegativeInteger" },
+ "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" },
+ "uniqueItems": {
+ "type": "boolean",
+ "default": false
+ },
+ "maxContains": { "$ref": "#/$defs/nonNegativeInteger" },
+ "minContains": {
+ "$ref": "#/$defs/nonNegativeInteger",
+ "default": 1
+ },
+ "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" },
+ "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" },
+ "required": { "$ref": "#/$defs/stringArray" },
+ "dependentRequired": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/$defs/stringArray"
+ }
+ },
+ "const": true,
+ "enum": {
+ "type": "array",
+ "items": true
+ },
+ "type": {
+ "anyOf": [
+ { "$ref": "#/$defs/simpleTypes" },
+ {
+ "type": "array",
+ "items": { "$ref": "#/$defs/simpleTypes" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ ]
+ }
+ },
+ "$defs": {
+ "nonNegativeInteger": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "nonNegativeIntegerDefault0": {
+ "$ref": "#/$defs/nonNegativeInteger",
+ "default": 0
+ },
+ "simpleTypes": {
+ "enum": [
+ "array",
+ "boolean",
+ "integer",
+ "null",
+ "number",
+ "object",
+ "string"
+ ]
+ },
+ "stringArray": {
+ "type": "array",
+ "items": { "type": "string" },
+ "uniqueItems": true,
+ "default": []
+ }
+ }
+}
diff --git a/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/applicator b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/applicator
new file mode 100644
index 0000000000..ca69923096
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/applicator
@@ -0,0 +1,48 @@
+{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://json-schema.org/draft/2020-12/meta/applicator",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2020-12/vocab/applicator": true
+ },
+ "$dynamicAnchor": "meta",
+
+ "title": "Applicator vocabulary meta-schema",
+ "type": ["object", "boolean"],
+ "properties": {
+ "prefixItems": { "$ref": "#/$defs/schemaArray" },
+ "items": { "$dynamicRef": "#meta" },
+ "contains": { "$dynamicRef": "#meta" },
+ "additionalProperties": { "$dynamicRef": "#meta" },
+ "properties": {
+ "type": "object",
+ "additionalProperties": { "$dynamicRef": "#meta" },
+ "default": {}
+ },
+ "patternProperties": {
+ "type": "object",
+ "additionalProperties": { "$dynamicRef": "#meta" },
+ "propertyNames": { "format": "regex" },
+ "default": {}
+ },
+ "dependentSchemas": {
+ "type": "object",
+ "additionalProperties": { "$dynamicRef": "#meta" },
+ "default": {}
+ },
+ "propertyNames": { "$dynamicRef": "#meta" },
+ "if": { "$dynamicRef": "#meta" },
+ "then": { "$dynamicRef": "#meta" },
+ "else": { "$dynamicRef": "#meta" },
+ "allOf": { "$ref": "#/$defs/schemaArray" },
+ "anyOf": { "$ref": "#/$defs/schemaArray" },
+ "oneOf": { "$ref": "#/$defs/schemaArray" },
+ "not": { "$dynamicRef": "#meta" }
+ },
+ "$defs": {
+ "schemaArray": {
+ "type": "array",
+ "minItems": 1,
+ "items": { "$dynamicRef": "#meta" }
+ }
+ }
+}
diff --git a/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/content b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/content
new file mode 100644
index 0000000000..2f6e056a9a
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/content
@@ -0,0 +1,17 @@
+{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://json-schema.org/draft/2020-12/meta/content",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2020-12/vocab/content": true
+ },
+ "$dynamicAnchor": "meta",
+
+ "title": "Content vocabulary meta-schema",
+
+ "type": ["object", "boolean"],
+ "properties": {
+ "contentEncoding": { "type": "string" },
+ "contentMediaType": { "type": "string" },
+ "contentSchema": { "$dynamicRef": "#meta" }
+ }
+}
diff --git a/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/core b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/core
new file mode 100644
index 0000000000..dfc092d964
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/core
@@ -0,0 +1,51 @@
+{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://json-schema.org/draft/2020-12/meta/core",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2020-12/vocab/core": true
+ },
+ "$dynamicAnchor": "meta",
+
+ "title": "Core vocabulary meta-schema",
+ "type": ["object", "boolean"],
+ "properties": {
+ "$id": {
+ "$ref": "#/$defs/uriReferenceString",
+ "$comment": "Non-empty fragments not allowed.",
+ "pattern": "^[^#]*#?$"
+ },
+ "$schema": { "$ref": "#/$defs/uriString" },
+ "$ref": { "$ref": "#/$defs/uriReferenceString" },
+ "$anchor": { "$ref": "#/$defs/anchorString" },
+ "$dynamicRef": { "$ref": "#/$defs/uriReferenceString" },
+ "$dynamicAnchor": { "$ref": "#/$defs/anchorString" },
+ "$vocabulary": {
+ "type": "object",
+ "propertyNames": { "$ref": "#/$defs/uriString" },
+ "additionalProperties": {
+ "type": "boolean"
+ }
+ },
+ "$comment": {
+ "type": "string"
+ },
+ "$defs": {
+ "type": "object",
+ "additionalProperties": { "$dynamicRef": "#meta" }
+ }
+ },
+ "$defs": {
+ "anchorString": {
+ "type": "string",
+ "pattern": "^[A-Za-z_][-A-Za-z0-9._]*$"
+ },
+ "uriString": {
+ "type": "string",
+ "format": "uri"
+ },
+ "uriReferenceString": {
+ "type": "string",
+ "format": "uri-reference"
+ }
+ }
+}
diff --git a/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/format b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/format
new file mode 100644
index 0000000000..09bbfdda97
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/format
@@ -0,0 +1,14 @@
+{
+ "$schema": "https://json-schema.org/draft/2019-09/schema",
+ "$id": "https://json-schema.org/draft/2019-09/meta/format",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2019-09/vocab/format": true
+ },
+ "$recursiveAnchor": true,
+
+ "title": "Format vocabulary meta-schema",
+ "type": ["object", "boolean"],
+ "properties": {
+ "format": { "type": "string" }
+ }
+}
diff --git a/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/format-annotation b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/format-annotation
new file mode 100644
index 0000000000..51ef7ea118
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/format-annotation
@@ -0,0 +1,14 @@
+{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://json-schema.org/draft/2020-12/meta/format-annotation",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2020-12/vocab/format-annotation": true
+ },
+ "$dynamicAnchor": "meta",
+
+ "title": "Format vocabulary meta-schema for annotation results",
+ "type": ["object", "boolean"],
+ "properties": {
+ "format": { "type": "string" }
+ }
+}
diff --git a/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/format-assertion b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/format-assertion
new file mode 100644
index 0000000000..5e73fd7571
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/format-assertion
@@ -0,0 +1,14 @@
+{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://json-schema.org/draft/2020-12/meta/format-assertion",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2020-12/vocab/format-assertion": true
+ },
+ "$dynamicAnchor": "meta",
+
+ "title": "Format vocabulary meta-schema for assertion results",
+ "type": ["object", "boolean"],
+ "properties": {
+ "format": { "type": "string" }
+ }
+}
diff --git a/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/meta-data b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/meta-data
new file mode 100644
index 0000000000..05cbc22afd
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/meta-data
@@ -0,0 +1,37 @@
+{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://json-schema.org/draft/2020-12/meta/meta-data",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2020-12/vocab/meta-data": true
+ },
+ "$dynamicAnchor": "meta",
+
+ "title": "Meta-data vocabulary meta-schema",
+
+ "type": ["object", "boolean"],
+ "properties": {
+ "title": {
+ "type": "string"
+ },
+ "description": {
+ "type": "string"
+ },
+ "default": true,
+ "deprecated": {
+ "type": "boolean",
+ "default": false
+ },
+ "readOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "writeOnly": {
+ "type": "boolean",
+ "default": false
+ },
+ "examples": {
+ "type": "array",
+ "items": true
+ }
+ }
+}
diff --git a/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/unevaluated b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/unevaluated
new file mode 100644
index 0000000000..5f62a3ffa2
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/unevaluated
@@ -0,0 +1,15 @@
+{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://json-schema.org/draft/2020-12/meta/unevaluated",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2020-12/vocab/unevaluated": true
+ },
+ "$dynamicAnchor": "meta",
+
+ "title": "Unevaluated applicator vocabulary meta-schema",
+ "type": ["object", "boolean"],
+ "properties": {
+ "unevaluatedItems": { "$dynamicRef": "#meta" },
+ "unevaluatedProperties": { "$dynamicRef": "#meta" }
+ }
+}
diff --git a/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/validation b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/validation
new file mode 100644
index 0000000000..606b87ba2e
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/schemas/vocabularies/draft2020-12/validation
@@ -0,0 +1,98 @@
+{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://json-schema.org/draft/2020-12/meta/validation",
+ "$vocabulary": {
+ "https://json-schema.org/draft/2020-12/vocab/validation": true
+ },
+ "$dynamicAnchor": "meta",
+
+ "title": "Validation vocabulary meta-schema",
+ "type": ["object", "boolean"],
+ "properties": {
+ "type": {
+ "anyOf": [
+ { "$ref": "#/$defs/simpleTypes" },
+ {
+ "type": "array",
+ "items": { "$ref": "#/$defs/simpleTypes" },
+ "minItems": 1,
+ "uniqueItems": true
+ }
+ ]
+ },
+ "const": true,
+ "enum": {
+ "type": "array",
+ "items": true
+ },
+ "multipleOf": {
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "maximum": {
+ "type": "number"
+ },
+ "exclusiveMaximum": {
+ "type": "number"
+ },
+ "minimum": {
+ "type": "number"
+ },
+ "exclusiveMinimum": {
+ "type": "number"
+ },
+ "maxLength": { "$ref": "#/$defs/nonNegativeInteger" },
+ "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" },
+ "pattern": {
+ "type": "string",
+ "format": "regex"
+ },
+ "maxItems": { "$ref": "#/$defs/nonNegativeInteger" },
+ "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" },
+ "uniqueItems": {
+ "type": "boolean",
+ "default": false
+ },
+ "maxContains": { "$ref": "#/$defs/nonNegativeInteger" },
+ "minContains": {
+ "$ref": "#/$defs/nonNegativeInteger",
+ "default": 1
+ },
+ "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" },
+ "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" },
+ "required": { "$ref": "#/$defs/stringArray" },
+ "dependentRequired": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/$defs/stringArray"
+ }
+ }
+ },
+ "$defs": {
+ "nonNegativeInteger": {
+ "type": "integer",
+ "minimum": 0
+ },
+ "nonNegativeIntegerDefault0": {
+ "$ref": "#/$defs/nonNegativeInteger",
+ "default": 0
+ },
+ "simpleTypes": {
+ "enum": [
+ "array",
+ "boolean",
+ "integer",
+ "null",
+ "number",
+ "object",
+ "string"
+ ]
+ },
+ "stringArray": {
+ "type": "array",
+ "items": { "type": "string" },
+ "uniqueItems": true,
+ "default": []
+ }
+ }
+}
diff --git a/third_party/python/jsonschema/jsonschema/validators.py b/third_party/python/jsonschema/jsonschema/validators.py
new file mode 100644
index 0000000000..66e803ea2d
--- /dev/null
+++ b/third_party/python/jsonschema/jsonschema/validators.py
@@ -0,0 +1,1161 @@
+"""
+Creation and extension of validators, with implementations for existing drafts.
+"""
+from __future__ import annotations
+
+from collections import deque
+from collections.abc import Mapping, Sequence
+from functools import lru_cache
+from operator import methodcaller
+from urllib.parse import unquote, urldefrag, urljoin, urlsplit
+from urllib.request import urlopen
+from warnings import warn
+import contextlib
+import json
+import reprlib
+import typing
+import warnings
+
+from pyrsistent import m
+import attr
+
+from jsonschema import (
+ _format,
+ _legacy_validators,
+ _types,
+ _utils,
+ _validators,
+ exceptions,
+)
+
+_UNSET = _utils.Unset()
+
+_VALIDATORS: dict[str, typing.Any] = {}
+_META_SCHEMAS = _utils.URIDict()
+_VOCABULARIES: list[tuple[str, typing.Any]] = []
+
+
+def __getattr__(name):
+ if name == "ErrorTree":
+ warnings.warn(
+ "Importing ErrorTree from jsonschema.validators is deprecated. "
+ "Instead import it from jsonschema.exceptions.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ from jsonschema.exceptions import ErrorTree
+ return ErrorTree
+ elif name == "validators":
+ warnings.warn(
+ "Accessing jsonschema.validators.validators is deprecated. "
+ "Use jsonschema.validators.validator_for with a given schema.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return _VALIDATORS
+ elif name == "meta_schemas":
+ warnings.warn(
+ "Accessing jsonschema.validators.meta_schemas is deprecated. "
+ "Use jsonschema.validators.validator_for with a given schema.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return _META_SCHEMAS
+ raise AttributeError(f"module {__name__} has no attribute {name}")
+
+
+def validates(version):
+ """
+ Register the decorated validator for a ``version`` of the specification.
+
+ Registered validators and their meta schemas will be considered when
+ parsing :kw:`$schema` keywords' URIs.
+
+ Arguments:
+
+ version (str):
+
+ An identifier to use as the version's name
+
+ Returns:
+
+ collections.abc.Callable:
+
+ a class decorator to decorate the validator with the version
+ """
+
+ def _validates(cls):
+ _VALIDATORS[version] = cls
+ meta_schema_id = cls.ID_OF(cls.META_SCHEMA)
+ _META_SCHEMAS[meta_schema_id] = cls
+ return cls
+ return _validates
+
+
+def _id_of(schema):
+ """
+ Return the ID of a schema for recent JSON Schema drafts.
+ """
+ if schema is True or schema is False:
+ return ""
+ return schema.get("$id", "")
+
+
+def _store_schema_list():
+ if not _VOCABULARIES:
+ package = _utils.resources.files(__package__)
+ for version in package.joinpath("schemas", "vocabularies").iterdir():
+ for path in version.iterdir():
+ vocabulary = json.loads(path.read_text())
+ _VOCABULARIES.append((vocabulary["$id"], vocabulary))
+ return [
+ (id, validator.META_SCHEMA) for id, validator in _META_SCHEMAS.items()
+ ] + _VOCABULARIES
+
+
+def create(
+ meta_schema,
+ validators=(),
+ version=None,
+ type_checker=_types.draft202012_type_checker,
+ format_checker=_format.draft202012_format_checker,
+ id_of=_id_of,
+ applicable_validators=methodcaller("items"),
+):
+ """
+ Create a new validator class.
+
+ Arguments:
+
+ meta_schema (collections.abc.Mapping):
+
+ the meta schema for the new validator class
+
+ validators (collections.abc.Mapping):
+
+ a mapping from names to callables, where each callable will
+ validate the schema property with the given name.
+
+ Each callable should take 4 arguments:
+
+ 1. a validator instance,
+ 2. the value of the property being validated within the
+ instance
+ 3. the instance
+ 4. the schema
+
+ version (str):
+
+ an identifier for the version that this validator class will
+ validate. If provided, the returned validator class will
+ have its ``__name__`` set to include the version, and also
+ will have `jsonschema.validators.validates` automatically
+ called for the given version.
+
+ type_checker (jsonschema.TypeChecker):
+
+ a type checker, used when applying the :kw:`type` keyword.
+
+ If unprovided, a `jsonschema.TypeChecker` will be created
+ with a set of default types typical of JSON Schema drafts.
+
+ format_checker (jsonschema.FormatChecker):
+
+ a format checker, used when applying the :kw:`format` keyword.
+
+ If unprovided, a `jsonschema.FormatChecker` will be created
+ with a set of default formats typical of JSON Schema drafts.
+
+ id_of (collections.abc.Callable):
+
+ A function that given a schema, returns its ID.
+
+ applicable_validators (collections.abc.Callable):
+
+ A function that given a schema, returns the list of
+ applicable validators (validation keywords and callables)
+ which will be used to validate the instance.
+
+ Returns:
+
+ a new `jsonschema.protocols.Validator` class
+ """
+ # preemptively don't shadow the `Validator.format_checker` local
+ format_checker_arg = format_checker
+
+ @attr.s
+ class Validator:
+
+ VALIDATORS = dict(validators)
+ META_SCHEMA = dict(meta_schema)
+ TYPE_CHECKER = type_checker
+ FORMAT_CHECKER = format_checker_arg
+ ID_OF = staticmethod(id_of)
+
+ schema = attr.ib(repr=reprlib.repr)
+ resolver = attr.ib(default=None, repr=False)
+ format_checker = attr.ib(default=None)
+
+ def __init_subclass__(cls):
+ warnings.warn(
+ (
+ "Subclassing validator classes is not intended to "
+ "be part of their public API. A future version "
+ "will make doing so an error, as the behavior of "
+ "subclasses isn't guaranteed to stay the same "
+ "between releases of jsonschema. Instead, prefer "
+ "composition of validators, wrapping them in an object "
+ "owned entirely by the downstream library."
+ ),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ def __attrs_post_init__(self):
+ if self.resolver is None:
+ self.resolver = RefResolver.from_schema(
+ self.schema,
+ id_of=id_of,
+ )
+
+ @classmethod
+ def check_schema(cls, schema, format_checker=_UNSET):
+ Validator = validator_for(cls.META_SCHEMA, default=cls)
+ if format_checker is _UNSET:
+ format_checker = Validator.FORMAT_CHECKER
+ validator = Validator(
+ schema=cls.META_SCHEMA,
+ format_checker=format_checker,
+ )
+ for error in validator.iter_errors(schema):
+ raise exceptions.SchemaError.create_from(error)
+
+ def evolve(self, **changes):
+ # Essentially reproduces attr.evolve, but may involve instantiating
+ # a different class than this one.
+ cls = self.__class__
+
+ schema = changes.setdefault("schema", self.schema)
+ NewValidator = validator_for(schema, default=cls)
+
+ for field in attr.fields(cls):
+ if not field.init:
+ continue
+ attr_name = field.name # To deal with private attributes.
+ init_name = attr_name if attr_name[0] != "_" else attr_name[1:]
+ if init_name not in changes:
+ changes[init_name] = getattr(self, attr_name)
+
+ return NewValidator(**changes)
+
+ def iter_errors(self, instance, _schema=None):
+ if _schema is not None:
+ warnings.warn(
+ (
+ "Passing a schema to Validator.iter_errors "
+ "is deprecated and will be removed in a future "
+ "release. Call validator.evolve(schema=new_schema)."
+ "iter_errors(...) instead."
+ ),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ else:
+ _schema = self.schema
+
+ if _schema is True:
+ return
+ elif _schema is False:
+ yield exceptions.ValidationError(
+ f"False schema does not allow {instance!r}",
+ validator=None,
+ validator_value=None,
+ instance=instance,
+ schema=_schema,
+ )
+ return
+
+ scope = id_of(_schema)
+ if scope:
+ self.resolver.push_scope(scope)
+ try:
+ for k, v in applicable_validators(_schema):
+ validator = self.VALIDATORS.get(k)
+ if validator is None:
+ continue
+
+ errors = validator(self, v, instance, _schema) or ()
+ for error in errors:
+ # set details if not already set by the called fn
+ error._set(
+ validator=k,
+ validator_value=v,
+ instance=instance,
+ schema=_schema,
+ type_checker=self.TYPE_CHECKER,
+ )
+ if k not in {"if", "$ref"}:
+ error.schema_path.appendleft(k)
+ yield error
+ finally:
+ if scope:
+ self.resolver.pop_scope()
+
+ def descend(self, instance, schema, path=None, schema_path=None):
+ for error in self.evolve(schema=schema).iter_errors(instance):
+ if path is not None:
+ error.path.appendleft(path)
+ if schema_path is not None:
+ error.schema_path.appendleft(schema_path)
+ yield error
+
+ def validate(self, *args, **kwargs):
+ for error in self.iter_errors(*args, **kwargs):
+ raise error
+
+ def is_type(self, instance, type):
+ try:
+ return self.TYPE_CHECKER.is_type(instance, type)
+ except exceptions.UndefinedTypeCheck:
+ raise exceptions.UnknownType(type, instance, self.schema)
+
+ def is_valid(self, instance, _schema=None):
+ if _schema is not None:
+ warnings.warn(
+ (
+ "Passing a schema to Validator.is_valid is deprecated "
+ "and will be removed in a future release. Call "
+ "validator.evolve(schema=new_schema).is_valid(...) "
+ "instead."
+ ),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ self = self.evolve(schema=_schema)
+
+ error = next(self.iter_errors(instance), None)
+ return error is None
+
+ if version is not None:
+ safe = version.title().replace(" ", "").replace("-", "")
+ Validator.__name__ = Validator.__qualname__ = f"{safe}Validator"
+ Validator = validates(version)(Validator)
+
+ return Validator
+
+
+def extend(
+ validator,
+ validators=(),
+ version=None,
+ type_checker=None,
+ format_checker=None,
+):
+ """
+ Create a new validator class by extending an existing one.
+
+ Arguments:
+
+ validator (jsonschema.protocols.Validator):
+
+ an existing validator class
+
+ validators (collections.abc.Mapping):
+
+ a mapping of new validator callables to extend with, whose
+ structure is as in `create`.
+
+ .. note::
+
+ Any validator callables with the same name as an
+ existing one will (silently) replace the old validator
+ callable entirely, effectively overriding any validation
+ done in the "parent" validator class.
+
+ If you wish to instead extend the behavior of a parent's
+ validator callable, delegate and call it directly in
+ the new validator function by retrieving it using
+ ``OldValidator.VALIDATORS["validation_keyword_name"]``.
+
+ version (str):
+
+ a version for the new validator class
+
+ type_checker (jsonschema.TypeChecker):
+
+ a type checker, used when applying the :kw:`type` keyword.
+
+ If unprovided, the type checker of the extended
+ `jsonschema.protocols.Validator` will be carried along.
+
+ format_checker (jsonschema.FormatChecker):
+
+ a format checker, used when applying the :kw:`format` keyword.
+
+ If unprovided, the format checker of the extended
+ `jsonschema.protocols.Validator` will be carried along.
+
+ Returns:
+
+ a new `jsonschema.protocols.Validator` class extending the one
+ provided
+
+ .. note:: Meta Schemas
+
+ The new validator class will have its parent's meta schema.
+
+ If you wish to change or extend the meta schema in the new
+ validator class, modify ``META_SCHEMA`` directly on the returned
+ class. Note that no implicit copying is done, so a copy should
+ likely be made before modifying it, in order to not affect the
+ old validator.
+ """
+
+ all_validators = dict(validator.VALIDATORS)
+ all_validators.update(validators)
+
+ if type_checker is None:
+ type_checker = validator.TYPE_CHECKER
+ if format_checker is None:
+ format_checker = validator.FORMAT_CHECKER
+ return create(
+ meta_schema=validator.META_SCHEMA,
+ validators=all_validators,
+ version=version,
+ type_checker=type_checker,
+ format_checker=format_checker,
+ id_of=validator.ID_OF,
+ )
+
+
+Draft3Validator = create(
+ meta_schema=_utils.load_schema("draft3"),
+ validators={
+ "$ref": _validators.ref,
+ "additionalItems": _validators.additionalItems,
+ "additionalProperties": _validators.additionalProperties,
+ "dependencies": _legacy_validators.dependencies_draft3,
+ "disallow": _legacy_validators.disallow_draft3,
+ "divisibleBy": _validators.multipleOf,
+ "enum": _validators.enum,
+ "extends": _legacy_validators.extends_draft3,
+ "format": _validators.format,
+ "items": _legacy_validators.items_draft3_draft4,
+ "maxItems": _validators.maxItems,
+ "maxLength": _validators.maxLength,
+ "maximum": _legacy_validators.maximum_draft3_draft4,
+ "minItems": _validators.minItems,
+ "minLength": _validators.minLength,
+ "minimum": _legacy_validators.minimum_draft3_draft4,
+ "pattern": _validators.pattern,
+ "patternProperties": _validators.patternProperties,
+ "properties": _legacy_validators.properties_draft3,
+ "type": _legacy_validators.type_draft3,
+ "uniqueItems": _validators.uniqueItems,
+ },
+ type_checker=_types.draft3_type_checker,
+ format_checker=_format.draft3_format_checker,
+ version="draft3",
+ id_of=_legacy_validators.id_of_ignore_ref(property="id"),
+ applicable_validators=_legacy_validators.ignore_ref_siblings,
+)
+
+Draft4Validator = create(
+ meta_schema=_utils.load_schema("draft4"),
+ validators={
+ "$ref": _validators.ref,
+ "additionalItems": _validators.additionalItems,
+ "additionalProperties": _validators.additionalProperties,
+ "allOf": _validators.allOf,
+ "anyOf": _validators.anyOf,
+ "dependencies": _legacy_validators.dependencies_draft4_draft6_draft7,
+ "enum": _validators.enum,
+ "format": _validators.format,
+ "items": _legacy_validators.items_draft3_draft4,
+ "maxItems": _validators.maxItems,
+ "maxLength": _validators.maxLength,
+ "maxProperties": _validators.maxProperties,
+ "maximum": _legacy_validators.maximum_draft3_draft4,
+ "minItems": _validators.minItems,
+ "minLength": _validators.minLength,
+ "minProperties": _validators.minProperties,
+ "minimum": _legacy_validators.minimum_draft3_draft4,
+ "multipleOf": _validators.multipleOf,
+ "not": _validators.not_,
+ "oneOf": _validators.oneOf,
+ "pattern": _validators.pattern,
+ "patternProperties": _validators.patternProperties,
+ "properties": _validators.properties,
+ "required": _validators.required,
+ "type": _validators.type,
+ "uniqueItems": _validators.uniqueItems,
+ },
+ type_checker=_types.draft4_type_checker,
+ format_checker=_format.draft4_format_checker,
+ version="draft4",
+ id_of=_legacy_validators.id_of_ignore_ref(property="id"),
+ applicable_validators=_legacy_validators.ignore_ref_siblings,
+)
+
+Draft6Validator = create(
+ meta_schema=_utils.load_schema("draft6"),
+ validators={
+ "$ref": _validators.ref,
+ "additionalItems": _validators.additionalItems,
+ "additionalProperties": _validators.additionalProperties,
+ "allOf": _validators.allOf,
+ "anyOf": _validators.anyOf,
+ "const": _validators.const,
+ "contains": _legacy_validators.contains_draft6_draft7,
+ "dependencies": _legacy_validators.dependencies_draft4_draft6_draft7,
+ "enum": _validators.enum,
+ "exclusiveMaximum": _validators.exclusiveMaximum,
+ "exclusiveMinimum": _validators.exclusiveMinimum,
+ "format": _validators.format,
+ "items": _legacy_validators.items_draft6_draft7_draft201909,
+ "maxItems": _validators.maxItems,
+ "maxLength": _validators.maxLength,
+ "maxProperties": _validators.maxProperties,
+ "maximum": _validators.maximum,
+ "minItems": _validators.minItems,
+ "minLength": _validators.minLength,
+ "minProperties": _validators.minProperties,
+ "minimum": _validators.minimum,
+ "multipleOf": _validators.multipleOf,
+ "not": _validators.not_,
+ "oneOf": _validators.oneOf,
+ "pattern": _validators.pattern,
+ "patternProperties": _validators.patternProperties,
+ "properties": _validators.properties,
+ "propertyNames": _validators.propertyNames,
+ "required": _validators.required,
+ "type": _validators.type,
+ "uniqueItems": _validators.uniqueItems,
+ },
+ type_checker=_types.draft6_type_checker,
+ format_checker=_format.draft6_format_checker,
+ version="draft6",
+ id_of=_legacy_validators.id_of_ignore_ref(),
+ applicable_validators=_legacy_validators.ignore_ref_siblings,
+)
+
+Draft7Validator = create(
+ meta_schema=_utils.load_schema("draft7"),
+ validators={
+ "$ref": _validators.ref,
+ "additionalItems": _validators.additionalItems,
+ "additionalProperties": _validators.additionalProperties,
+ "allOf": _validators.allOf,
+ "anyOf": _validators.anyOf,
+ "const": _validators.const,
+ "contains": _legacy_validators.contains_draft6_draft7,
+ "dependencies": _legacy_validators.dependencies_draft4_draft6_draft7,
+ "enum": _validators.enum,
+ "exclusiveMaximum": _validators.exclusiveMaximum,
+ "exclusiveMinimum": _validators.exclusiveMinimum,
+ "format": _validators.format,
+ "if": _validators.if_,
+ "items": _legacy_validators.items_draft6_draft7_draft201909,
+ "maxItems": _validators.maxItems,
+ "maxLength": _validators.maxLength,
+ "maxProperties": _validators.maxProperties,
+ "maximum": _validators.maximum,
+ "minItems": _validators.minItems,
+ "minLength": _validators.minLength,
+ "minProperties": _validators.minProperties,
+ "minimum": _validators.minimum,
+ "multipleOf": _validators.multipleOf,
+ "not": _validators.not_,
+ "oneOf": _validators.oneOf,
+ "pattern": _validators.pattern,
+ "patternProperties": _validators.patternProperties,
+ "properties": _validators.properties,
+ "propertyNames": _validators.propertyNames,
+ "required": _validators.required,
+ "type": _validators.type,
+ "uniqueItems": _validators.uniqueItems,
+ },
+ type_checker=_types.draft7_type_checker,
+ format_checker=_format.draft7_format_checker,
+ version="draft7",
+ id_of=_legacy_validators.id_of_ignore_ref(),
+ applicable_validators=_legacy_validators.ignore_ref_siblings,
+)
+
+Draft201909Validator = create(
+ meta_schema=_utils.load_schema("draft2019-09"),
+ validators={
+ "$recursiveRef": _legacy_validators.recursiveRef,
+ "$ref": _validators.ref,
+ "additionalItems": _validators.additionalItems,
+ "additionalProperties": _validators.additionalProperties,
+ "allOf": _validators.allOf,
+ "anyOf": _validators.anyOf,
+ "const": _validators.const,
+ "contains": _validators.contains,
+ "dependentRequired": _validators.dependentRequired,
+ "dependentSchemas": _validators.dependentSchemas,
+ "enum": _validators.enum,
+ "exclusiveMaximum": _validators.exclusiveMaximum,
+ "exclusiveMinimum": _validators.exclusiveMinimum,
+ "format": _validators.format,
+ "if": _validators.if_,
+ "items": _legacy_validators.items_draft6_draft7_draft201909,
+ "maxItems": _validators.maxItems,
+ "maxLength": _validators.maxLength,
+ "maxProperties": _validators.maxProperties,
+ "maximum": _validators.maximum,
+ "minItems": _validators.minItems,
+ "minLength": _validators.minLength,
+ "minProperties": _validators.minProperties,
+ "minimum": _validators.minimum,
+ "multipleOf": _validators.multipleOf,
+ "not": _validators.not_,
+ "oneOf": _validators.oneOf,
+ "pattern": _validators.pattern,
+ "patternProperties": _validators.patternProperties,
+ "properties": _validators.properties,
+ "propertyNames": _validators.propertyNames,
+ "required": _validators.required,
+ "type": _validators.type,
+ "unevaluatedItems": _legacy_validators.unevaluatedItems_draft2019,
+ "unevaluatedProperties": _validators.unevaluatedProperties,
+ "uniqueItems": _validators.uniqueItems,
+ },
+ type_checker=_types.draft201909_type_checker,
+ format_checker=_format.draft201909_format_checker,
+ version="draft2019-09",
+)
+
+Draft202012Validator = create(
+ meta_schema=_utils.load_schema("draft2020-12"),
+ validators={
+ "$dynamicRef": _validators.dynamicRef,
+ "$ref": _validators.ref,
+ "additionalItems": _validators.additionalItems,
+ "additionalProperties": _validators.additionalProperties,
+ "allOf": _validators.allOf,
+ "anyOf": _validators.anyOf,
+ "const": _validators.const,
+ "contains": _validators.contains,
+ "dependentRequired": _validators.dependentRequired,
+ "dependentSchemas": _validators.dependentSchemas,
+ "enum": _validators.enum,
+ "exclusiveMaximum": _validators.exclusiveMaximum,
+ "exclusiveMinimum": _validators.exclusiveMinimum,
+ "format": _validators.format,
+ "if": _validators.if_,
+ "items": _validators.items,
+ "maxItems": _validators.maxItems,
+ "maxLength": _validators.maxLength,
+ "maxProperties": _validators.maxProperties,
+ "maximum": _validators.maximum,
+ "minItems": _validators.minItems,
+ "minLength": _validators.minLength,
+ "minProperties": _validators.minProperties,
+ "minimum": _validators.minimum,
+ "multipleOf": _validators.multipleOf,
+ "not": _validators.not_,
+ "oneOf": _validators.oneOf,
+ "pattern": _validators.pattern,
+ "patternProperties": _validators.patternProperties,
+ "prefixItems": _validators.prefixItems,
+ "properties": _validators.properties,
+ "propertyNames": _validators.propertyNames,
+ "required": _validators.required,
+ "type": _validators.type,
+ "unevaluatedItems": _validators.unevaluatedItems,
+ "unevaluatedProperties": _validators.unevaluatedProperties,
+ "uniqueItems": _validators.uniqueItems,
+ },
+ type_checker=_types.draft202012_type_checker,
+ format_checker=_format.draft202012_format_checker,
+ version="draft2020-12",
+)
+
+_LATEST_VERSION = Draft202012Validator
+
+
+class RefResolver:
+ """
+ Resolve JSON References.
+
+ Arguments:
+
+ base_uri (str):
+
+ The URI of the referring document
+
+ referrer:
+
+ The actual referring document
+
+ store (dict):
+
+ A mapping from URIs to documents to cache
+
+ cache_remote (bool):
+
+ Whether remote refs should be cached after first resolution
+
+ handlers (dict):
+
+ A mapping from URI schemes to functions that should be used
+ to retrieve them
+
+ urljoin_cache (:func:`functools.lru_cache`):
+
+ A cache that will be used for caching the results of joining
+ the resolution scope to subscopes.
+
+ remote_cache (:func:`functools.lru_cache`):
+
+ A cache that will be used for caching the results of
+ resolved remote URLs.
+
+ Attributes:
+
+ cache_remote (bool):
+
+ Whether remote refs should be cached after first resolution
+ """
+
+ def __init__(
+ self,
+ base_uri,
+ referrer,
+ store=m(),
+ cache_remote=True,
+ handlers=(),
+ urljoin_cache=None,
+ remote_cache=None,
+ ):
+ if urljoin_cache is None:
+ urljoin_cache = lru_cache(1024)(urljoin)
+ if remote_cache is None:
+ remote_cache = lru_cache(1024)(self.resolve_from_url)
+
+ self.referrer = referrer
+ self.cache_remote = cache_remote
+ self.handlers = dict(handlers)
+
+ self._scopes_stack = [base_uri]
+
+ self.store = _utils.URIDict(_store_schema_list())
+ self.store.update(store)
+ self.store.update(
+ (schema["$id"], schema)
+ for schema in store.values()
+ if isinstance(schema, Mapping) and "$id" in schema
+ )
+ self.store[base_uri] = referrer
+
+ self._urljoin_cache = urljoin_cache
+ self._remote_cache = remote_cache
+
+ @classmethod
+ def from_schema(cls, schema, id_of=_id_of, *args, **kwargs):
+ """
+ Construct a resolver from a JSON schema object.
+
+ Arguments:
+
+ schema:
+
+ the referring schema
+
+ Returns:
+
+ `RefResolver`
+ """
+
+ return cls(base_uri=id_of(schema), referrer=schema, *args, **kwargs) # noqa: B026, E501
+
+ def push_scope(self, scope):
+ """
+ Enter a given sub-scope.
+
+ Treats further dereferences as being performed underneath the
+ given scope.
+ """
+ self._scopes_stack.append(
+ self._urljoin_cache(self.resolution_scope, scope),
+ )
+
+ def pop_scope(self):
+ """
+ Exit the most recent entered scope.
+
+ Treats further dereferences as being performed underneath the
+ original scope.
+
+ Don't call this method more times than `push_scope` has been
+ called.
+ """
+ try:
+ self._scopes_stack.pop()
+ except IndexError:
+ raise exceptions.RefResolutionError(
+ "Failed to pop the scope from an empty stack. "
+ "`pop_scope()` should only be called once for every "
+ "`push_scope()`",
+ )
+
+ @property
+ def resolution_scope(self):
+ """
+ Retrieve the current resolution scope.
+ """
+ return self._scopes_stack[-1]
+
+ @property
+ def base_uri(self):
+ """
+ Retrieve the current base URI, not including any fragment.
+ """
+ uri, _ = urldefrag(self.resolution_scope)
+ return uri
+
+ @contextlib.contextmanager
+ def in_scope(self, scope):
+ """
+ Temporarily enter the given scope for the duration of the context.
+
+ .. deprecated:: v4.0.0
+ """
+ warnings.warn(
+ "jsonschema.RefResolver.in_scope is deprecated and will be "
+ "removed in a future release.",
+ DeprecationWarning,
+ stacklevel=3,
+ )
+ self.push_scope(scope)
+ try:
+ yield
+ finally:
+ self.pop_scope()
+
+ @contextlib.contextmanager
+ def resolving(self, ref):
+ """
+ Resolve the given ``ref`` and enter its resolution scope.
+
+ Exits the scope on exit of this context manager.
+
+ Arguments:
+
+ ref (str):
+
+ The reference to resolve
+ """
+
+ url, resolved = self.resolve(ref)
+ self.push_scope(url)
+ try:
+ yield resolved
+ finally:
+ self.pop_scope()
+
+ def _find_in_referrer(self, key):
+ return self._get_subschemas_cache()[key]
+
+ @lru_cache() # noqa: B019
+ def _get_subschemas_cache(self):
+ cache = {key: [] for key in _SUBSCHEMAS_KEYWORDS}
+ for keyword, subschema in _search_schema(
+ self.referrer, _match_subschema_keywords,
+ ):
+ cache[keyword].append(subschema)
+ return cache
+
+ @lru_cache() # noqa: B019
+ def _find_in_subschemas(self, url):
+ subschemas = self._get_subschemas_cache()["$id"]
+ if not subschemas:
+ return None
+ uri, fragment = urldefrag(url)
+ for subschema in subschemas:
+ target_uri = self._urljoin_cache(
+ self.resolution_scope, subschema["$id"],
+ )
+ if target_uri.rstrip("/") == uri.rstrip("/"):
+ if fragment:
+ subschema = self.resolve_fragment(subschema, fragment)
+ self.store[url] = subschema
+ return url, subschema
+ return None
+
+ def resolve(self, ref):
+ """
+ Resolve the given reference.
+ """
+ url = self._urljoin_cache(self.resolution_scope, ref).rstrip("/")
+
+ match = self._find_in_subschemas(url)
+ if match is not None:
+ return match
+
+ return url, self._remote_cache(url)
+
+ def resolve_from_url(self, url):
+ """
+ Resolve the given URL.
+ """
+ url, fragment = urldefrag(url)
+ if not url:
+ url = self.base_uri
+
+ try:
+ document = self.store[url]
+ except KeyError:
+ try:
+ document = self.resolve_remote(url)
+ except Exception as exc:
+ raise exceptions.RefResolutionError(exc)
+
+ return self.resolve_fragment(document, fragment)
+
+ def resolve_fragment(self, document, fragment):
+ """
+ Resolve a ``fragment`` within the referenced ``document``.
+
+ Arguments:
+
+ document:
+
+ The referent document
+
+ fragment (str):
+
+ a URI fragment to resolve within it
+ """
+
+ fragment = fragment.lstrip("/")
+
+ if not fragment:
+ return document
+
+ if document is self.referrer:
+ find = self._find_in_referrer
+ else:
+
+ def find(key):
+ yield from _search_schema(document, _match_keyword(key))
+
+ for keyword in ["$anchor", "$dynamicAnchor"]:
+ for subschema in find(keyword):
+ if fragment == subschema[keyword]:
+ return subschema
+ for keyword in ["id", "$id"]:
+ for subschema in find(keyword):
+ if "#" + fragment == subschema[keyword]:
+ return subschema
+
+ # Resolve via path
+ parts = unquote(fragment).split("/") if fragment else []
+ for part in parts:
+ part = part.replace("~1", "/").replace("~0", "~")
+
+ if isinstance(document, Sequence):
+ # Array indexes should be turned into integers
+ try:
+ part = int(part)
+ except ValueError:
+ pass
+ try:
+ document = document[part]
+ except (TypeError, LookupError):
+ raise exceptions.RefResolutionError(
+ f"Unresolvable JSON pointer: {fragment!r}",
+ )
+
+ return document
+
+ def resolve_remote(self, uri):
+ """
+ Resolve a remote ``uri``.
+
+ If called directly, does not check the store first, but after
+ retrieving the document at the specified URI it will be saved in
+ the store if :attr:`cache_remote` is True.
+
+ .. note::
+
+ If the requests_ library is present, ``jsonschema`` will use it to
+ request the remote ``uri``, so that the correct encoding is
+ detected and used.
+
+ If it isn't, or if the scheme of the ``uri`` is not ``http`` or
+ ``https``, UTF-8 is assumed.
+
+ Arguments:
+
+ uri (str):
+
+ The URI to resolve
+
+ Returns:
+
+ The retrieved document
+
+ .. _requests: https://pypi.org/project/requests/
+ """
+ try:
+ import requests
+ except ImportError:
+ requests = None
+
+ scheme = urlsplit(uri).scheme
+
+ if scheme in self.handlers:
+ result = self.handlers[scheme](uri)
+ elif scheme in ["http", "https"] and requests:
+ # Requests has support for detecting the correct encoding of
+ # json over http
+ result = requests.get(uri).json()
+ else:
+ # Otherwise, pass off to urllib and assume utf-8
+ with urlopen(uri) as url:
+ result = json.loads(url.read().decode("utf-8"))
+
+ if self.cache_remote:
+ self.store[uri] = result
+ return result
+
+
+_SUBSCHEMAS_KEYWORDS = ("$id", "id", "$anchor", "$dynamicAnchor")
+
+
+def _match_keyword(keyword):
+
+ def matcher(value):
+ if keyword in value:
+ yield value
+
+ return matcher
+
+
+def _match_subschema_keywords(value):
+ for keyword in _SUBSCHEMAS_KEYWORDS:
+ if keyword in value:
+ yield keyword, value
+
+
+def _search_schema(schema, matcher):
+ """Breadth-first search routine."""
+ values = deque([schema])
+ while values:
+ value = values.pop()
+ if not isinstance(value, dict):
+ continue
+ yield from matcher(value)
+ values.extendleft(value.values())
+
+
+def validate(instance, schema, cls=None, *args, **kwargs):
+ """
+ Validate an instance under the given schema.
+
+ >>> validate([2, 3, 4], {"maxItems": 2})
+ Traceback (most recent call last):
+ ...
+ ValidationError: [2, 3, 4] is too long
+
+ :func:`~jsonschema.validators.validate` will first verify that the
+ provided schema is itself valid, since not doing so can lead to less
+ obvious error messages and fail in less obvious or consistent ways.
+
+ If you know you have a valid schema already, especially
+ if you intend to validate multiple instances with
+ the same schema, you likely would prefer using the
+ `jsonschema.protocols.Validator.validate` method directly on a
+ specific validator (e.g. ``Draft20212Validator.validate``).
+
+
+ Arguments:
+
+ instance:
+
+ The instance to validate
+
+ schema:
+
+ The schema to validate with
+
+ cls (jsonschema.protocols.Validator):
+
+ The class that will be used to validate the instance.
+
+ If the ``cls`` argument is not provided, two things will happen
+ in accordance with the specification. First, if the schema has a
+ :kw:`$schema` keyword containing a known meta-schema [#]_ then the
+ proper validator will be used. The specification recommends that
+ all schemas contain :kw:`$schema` properties for this reason. If no
+ :kw:`$schema` property is found, the default validator class is the
+ latest released draft.
+
+ Any other provided positional and keyword arguments will be passed
+ on when instantiating the ``cls``.
+
+ Raises:
+
+ `jsonschema.exceptions.ValidationError`:
+
+ if the instance is invalid
+
+ `jsonschema.exceptions.SchemaError`:
+
+ if the schema itself is invalid
+
+ .. rubric:: Footnotes
+ .. [#] known by a validator registered with
+ `jsonschema.validators.validates`
+ """
+ if cls is None:
+ cls = validator_for(schema)
+
+ cls.check_schema(schema)
+ validator = cls(schema, *args, **kwargs)
+ error = exceptions.best_match(validator.iter_errors(instance))
+ if error is not None:
+ raise error
+
+
+def validator_for(schema, default=_UNSET):
+ """
+ Retrieve the validator class appropriate for validating the given schema.
+
+ Uses the :kw:`$schema` keyword that should be present in the given
+ schema to look up the appropriate validator class.
+
+ Arguments:
+
+ schema (collections.abc.Mapping or bool):
+
+ the schema to look at
+
+ default:
+
+ the default to return if the appropriate validator class
+ cannot be determined.
+
+ If unprovided, the default is to return the latest supported
+ draft.
+ """
+
+ DefaultValidator = _LATEST_VERSION if default is _UNSET else default
+
+ if schema is True or schema is False or "$schema" not in schema:
+ return DefaultValidator
+ if schema["$schema"] not in _META_SCHEMAS:
+ if default is _UNSET:
+ warn(
+ (
+ "The metaschema specified by $schema was not found. "
+ "Using the latest draft to validate, but this will raise "
+ "an error in the future."
+ ),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return _META_SCHEMAS.get(schema["$schema"], DefaultValidator)
diff --git a/third_party/python/looseversion/looseversion-1.0.1.dist-info/LICENSE b/third_party/python/looseversion/looseversion-1.0.1.dist-info/LICENSE
new file mode 100644
index 0000000000..a9b2196fd1
--- /dev/null
+++ b/third_party/python/looseversion/looseversion-1.0.1.dist-info/LICENSE
@@ -0,0 +1,48 @@
+PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+--------------------------------------------
+
+1. This LICENSE AGREEMENT is between the Python Software Foundation
+("PSF"), and the Individual or Organization ("Licensee") accessing and
+otherwise using this software ("Python") in source or binary form and
+its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, PSF hereby
+grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+analyze, test, perform and/or display publicly, prepare derivative works,
+distribute, and otherwise use Python alone or in any derivative version,
+provided, however, that PSF's License Agreement and PSF's notice of copyright,
+i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022 Python Software Foundation;
+All Rights Reserved" are retained in Python alone or in any derivative version
+prepared by Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python.
+
+4. PSF is making Python available to Licensee on an "AS IS"
+basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between PSF and
+Licensee. This License Agreement does not grant permission to use PSF
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using Python, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
diff --git a/third_party/python/looseversion/looseversion-1.0.1.dist-info/METADATA b/third_party/python/looseversion/looseversion-1.0.1.dist-info/METADATA
new file mode 100644
index 0000000000..2ba9f265d0
--- /dev/null
+++ b/third_party/python/looseversion/looseversion-1.0.1.dist-info/METADATA
@@ -0,0 +1,56 @@
+Metadata-Version: 2.1
+Name: looseversion
+Version: 1.0.1
+Summary: Version numbering for anarchists and software realists
+Home-page: https://github.com/effigies/looseversion
+Author: Chris Markiewicz
+Author-email: effigies@gmail.com
+License: PSF-2.0
+Classifier: Programming Language :: Python :: 3
+Classifier: Development Status :: 6 - Mature
+Classifier: License :: OSI Approved :: Python Software Foundation License
+Requires-Python: >=3
+Description-Content-Type: text/markdown
+License-File: LICENSE
+
+# looseversion - Version numbering for anarchists and software realists
+
+A backwards/forwards-compatible fork of `distutils.version.LooseVersion`,
+for times when PEP-440 isn't what you need.
+
+The goal of this package is to be a drop-in replacement for the original `LooseVersion`.
+It implements an identical interface and comparison logic to `LooseVersion`.
+The only major change is that a `looseversion.LooseVersion` is comparable to a
+`distutils.version.LooseVersion`, which means tools should not need to worry whether
+all dependencies that use LooseVersion have migrated.
+
+If you are simply comparing versions of Python packages, consider moving to
+[packaging.version.Version](https://packaging.pypa.io/en/latest/version.html#packaging.version.Version),
+which follows [PEP-440](https://peps.python.org/pep-0440).
+`LooseVersion` is better suited to interacting with heterogeneous version schemes that
+do not follow PEP-440.
+
+## Installation
+
+### From PyPI
+
+```
+pip install looseversion
+```
+
+### From source
+
+```
+git clone https://github.com/effigies/looseversion.git
+pip install looseversion/
+```
+
+## Usage
+
+```Python
+>>> from looseversion import LooseVersion
+>>> LooseVersion("1.0.0") < LooseVersion("2.0.0")
+True
+>>> LooseVersion("1.0.0") < "2"
+True
+```
diff --git a/third_party/python/looseversion/looseversion-1.0.1.dist-info/RECORD b/third_party/python/looseversion/looseversion-1.0.1.dist-info/RECORD
new file mode 100644
index 0000000000..8cdae607b9
--- /dev/null
+++ b/third_party/python/looseversion/looseversion-1.0.1.dist-info/RECORD
@@ -0,0 +1,6 @@
+looseversion.py,sha256=ZcTnLvMPdx3yVGbgcaUuwK3-s40QkaOR0_usF_VbrHU,8029
+looseversion-1.0.1.dist-info/LICENSE,sha256=9PgMmBYfVjIATURxO1y5XkABRbQMvAKX8fUMJ7VL79s,2490
+looseversion-1.0.1.dist-info/METADATA,sha256=-c48feSKsGGyLOWaWQfPNMawhA6OGKNoy5PjUhLlCk8,1757
+looseversion-1.0.1.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
+looseversion-1.0.1.dist-info/top_level.txt,sha256=gZsH8AUlCFqOEpKD_foyCUB2uKao5ePwjMqWWO7hpoM,13
+looseversion-1.0.1.dist-info/RECORD,,
diff --git a/third_party/python/looseversion/looseversion-1.0.1.dist-info/WHEEL b/third_party/python/looseversion/looseversion-1.0.1.dist-info/WHEEL
new file mode 100644
index 0000000000..becc9a66ea
--- /dev/null
+++ b/third_party/python/looseversion/looseversion-1.0.1.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.1)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/third_party/python/looseversion/looseversion-1.0.1.dist-info/top_level.txt b/third_party/python/looseversion/looseversion-1.0.1.dist-info/top_level.txt
new file mode 100644
index 0000000000..c08202104d
--- /dev/null
+++ b/third_party/python/looseversion/looseversion-1.0.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+looseversion
diff --git a/third_party/python/looseversion/looseversion.py b/third_party/python/looseversion/looseversion.py
new file mode 100644
index 0000000000..c75bdf3878
--- /dev/null
+++ b/third_party/python/looseversion/looseversion.py
@@ -0,0 +1,204 @@
+"""Provides classes to represent module version numbers (one class for
+each style of version numbering). There are currently two such classes
+implemented: StrictVersion and LooseVersion.
+
+Every version number class implements the following interface:
+ * the 'parse' method takes a string and parses it to some internal
+ representation; if the string is an invalid version number,
+ 'parse' raises a ValueError exception
+ * the class constructor takes an optional string argument which,
+ if supplied, is passed to 'parse'
+ * __str__ reconstructs the string that was passed to 'parse' (or
+ an equivalent string -- ie. one that will generate an equivalent
+ version number instance)
+ * __repr__ generates Python code to recreate the version number instance
+ * _cmp compares the current instance with either another instance
+ of the same class or a string (which will be parsed to an instance
+ of the same class, thus must follow the same rules)
+"""
+
+import sys
+import re
+
+
+# The rules according to Greg Stein:
+# 1) a version number has 1 or more numbers separated by a period or by
+# sequences of letters. If only periods, then these are compared
+# left-to-right to determine an ordering.
+# 2) sequences of letters are part of the tuple for comparison and are
+# compared lexicographically
+# 3) recognize the numeric components may have leading zeroes
+#
+# The LooseVersion class below implements these rules: a version number
+# string is split up into a tuple of integer and string components, and
+# comparison is a simple tuple comparison. This means that version
+# numbers behave in a predictable and obvious way, but a way that might
+# not necessarily be how people *want* version numbers to behave. There
+# wouldn't be a problem if people could stick to purely numeric version
+# numbers: just split on period and compare the numbers as tuples.
+# However, people insist on putting letters into their version numbers;
+# the most common purpose seems to be:
+# - indicating a "pre-release" version
+# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
+# - indicating a post-release patch ('p', 'pl', 'patch')
+# but of course this can't cover all version number schemes, and there's
+# no way to know what a programmer means without asking him.
+#
+# The problem is what to do with letters (and other non-numeric
+# characters) in a version number. The current implementation does the
+# obvious and predictable thing: keep them as strings and compare
+# lexically within a tuple comparison. This has the desired effect if
+# an appended letter sequence implies something "post-release":
+# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
+#
+# However, if letters in a version number imply a pre-release version,
+# the "obvious" thing isn't correct. Eg. you would expect that
+# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
+# implemented here, this just isn't so.
+#
+# Two possible solutions come to mind. The first is to tie the
+# comparison algorithm to a particular set of semantic rules, as has
+# been done in the StrictVersion class above. This works great as long
+# as everyone can go along with bondage and discipline. Hopefully a
+# (large) subset of Python module programmers will agree that the
+# particular flavour of bondage and discipline provided by StrictVersion
+# provides enough benefit to be worth using, and will submit their
+# version numbering scheme to its domination. The free-thinking
+# anarchists in the lot will never give in, though, and something needs
+# to be done to accommodate them.
+#
+# Perhaps a "moderately strict" version class could be implemented that
+# lets almost anything slide (syntactically), and makes some heuristic
+# assumptions about non-digits in version number strings. This could
+# sink into special-case-hell, though; if I was as talented and
+# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
+# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
+# just as happy dealing with things like "2g6" and "1.13++". I don't
+# think I'm smart enough to do it right though.
+#
+# In any case, I've coded the test suite for this module (see
+# ../test/test_version.py) specifically to fail on things like comparing
+# "1.2a2" and "1.2". That's not because the *code* is doing anything
+# wrong, it's because the simple, obvious design doesn't match my
+# complicated, hairy expectations for real-world version numbers. It
+# would be a snap to fix the test suite to say, "Yep, LooseVersion does
+# the Right Thing" (ie. the code matches the conception). But I'd rather
+# have a conception that matches common notions about version numbers.
+
+
+class LooseVersion:
+
+ """Version numbering for anarchists and software realists.
+ Implements the standard interface for version number classes as
+ described above. A version number consists of a series of numbers,
+ separated by either periods or strings of letters. When comparing
+ version numbers, the numeric components will be compared
+ numerically, and the alphabetic components lexically. The following
+ are all valid version numbers, in no particular order:
+
+ 1.5.1
+ 1.5.2b2
+ 161
+ 3.10a
+ 8.02
+ 3.4j
+ 1996.07.12
+ 3.2.pl0
+ 3.1.1.6
+ 2g6
+ 11g
+ 0.960923
+ 2.2beta29
+ 1.13++
+ 5.5.kw
+ 2.0b1pl0
+
+ In fact, there is no such thing as an invalid version number under
+ this scheme; the rules for comparison are simple and predictable,
+ but may not always give the results you want (for some definition
+ of "want").
+ """
+
+ component_re = re.compile(r"(\d+ | [a-z]+ | \.)", re.VERBOSE)
+
+ def __init__(self, vstring=None):
+ if vstring:
+ self.parse(vstring)
+
+ def __eq__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c == 0
+
+ def __lt__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c < 0
+
+ def __le__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c <= 0
+
+ def __gt__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c > 0
+
+ def __ge__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c >= 0
+
+ def parse(self, vstring):
+ # I've given up on thinking I can reconstruct the version string
+ # from the parsed tuple -- so I just store the string here for
+ # use by __str__
+ self.vstring = vstring
+ components = [x for x in self.component_re.split(vstring) if x and x != "."]
+ for i, obj in enumerate(components):
+ try:
+ components[i] = int(obj)
+ except ValueError:
+ pass
+
+ self.version = components
+
+ def __str__(self):
+ return self.vstring
+
+ def __repr__(self):
+ return "LooseVersion ('%s')" % str(self)
+
+ def _cmp(self, other):
+ other = self._coerce(other)
+ if other is NotImplemented:
+ return NotImplemented
+
+ if self.version == other.version:
+ return 0
+ if self.version < other.version:
+ return -1
+ if self.version > other.version:
+ return 1
+
+ @staticmethod
+ def _coerce(other):
+ if isinstance(other, LooseVersion):
+ return other
+ elif isinstance(other, str):
+ return LooseVersion(other)
+ elif "distutils" in sys.modules:
+ # Using this check to avoid importing distutils and suppressing the warning
+ try:
+ from distutils.version import LooseVersion as deprecated
+ except ImportError:
+ return NotImplemented
+ if isinstance(other, deprecated):
+ return LooseVersion(str(other))
+ return NotImplemented
diff --git a/third_party/python/mohawk/PKG-INFO b/third_party/python/mohawk/PKG-INFO
new file mode 100644
index 0000000000..131f03cfc5
--- /dev/null
+++ b/third_party/python/mohawk/PKG-INFO
@@ -0,0 +1,19 @@
+Metadata-Version: 1.1
+Name: mohawk
+Version: 0.3.4
+Summary: Library for Hawk HTTP authorization
+Home-page: https://github.com/kumar303/mohawk
+Author: Kumar McMillan, Austin King
+Author-email: kumar.mcmillan@gmail.com
+License: MPL 2.0 (Mozilla Public License)
+Description: UNKNOWN
+Platform: UNKNOWN
+Classifier: Intended Audience :: Developers
+Classifier: Natural Language :: English
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Topic :: Internet :: WWW/HTTP
diff --git a/third_party/python/mohawk/README.rst b/third_party/python/mohawk/README.rst
new file mode 100644
index 0000000000..e53a8f7e3e
--- /dev/null
+++ b/third_party/python/mohawk/README.rst
@@ -0,0 +1,25 @@
+======
+Mohawk
+======
+.. image:: https://img.shields.io/pypi/v/mohawk.svg
+ :target: https://pypi.python.org/pypi/mohawk
+ :alt: Latest PyPI release
+
+.. image:: https://img.shields.io/pypi/dm/mohawk.svg
+ :target: https://pypi.python.org/pypi/mohawk
+ :alt: PyPI monthly download stats
+
+.. image:: https://travis-ci.org/kumar303/mohawk.svg?branch=master
+ :target: https://travis-ci.org/kumar303/mohawk
+ :alt: Travis master branch status
+
+.. image:: https://readthedocs.org/projects/mohawk/badge/?version=latest
+ :target: https://mohawk.readthedocs.io/en/latest/?badge=latest
+ :alt: Documentation status
+
+Mohawk is an alternate Python implementation of the
+`Hawk HTTP authorization scheme`_.
+
+Full documentation: https://mohawk.readthedocs.io/
+
+.. _`Hawk HTTP authorization scheme`: https://github.com/hueniverse/hawk
diff --git a/third_party/python/mohawk/mohawk.egg-info/PKG-INFO b/third_party/python/mohawk/mohawk.egg-info/PKG-INFO
new file mode 100644
index 0000000000..131f03cfc5
--- /dev/null
+++ b/third_party/python/mohawk/mohawk.egg-info/PKG-INFO
@@ -0,0 +1,19 @@
+Metadata-Version: 1.1
+Name: mohawk
+Version: 0.3.4
+Summary: Library for Hawk HTTP authorization
+Home-page: https://github.com/kumar303/mohawk
+Author: Kumar McMillan, Austin King
+Author-email: kumar.mcmillan@gmail.com
+License: MPL 2.0 (Mozilla Public License)
+Description: UNKNOWN
+Platform: UNKNOWN
+Classifier: Intended Audience :: Developers
+Classifier: Natural Language :: English
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Topic :: Internet :: WWW/HTTP
diff --git a/third_party/python/mohawk/mohawk.egg-info/SOURCES.txt b/third_party/python/mohawk/mohawk.egg-info/SOURCES.txt
new file mode 100644
index 0000000000..880beddbc4
--- /dev/null
+++ b/third_party/python/mohawk/mohawk.egg-info/SOURCES.txt
@@ -0,0 +1,15 @@
+README.rst
+setup.py
+mohawk/__init__.py
+mohawk/base.py
+mohawk/bewit.py
+mohawk/exc.py
+mohawk/receiver.py
+mohawk/sender.py
+mohawk/tests.py
+mohawk/util.py
+mohawk.egg-info/PKG-INFO
+mohawk.egg-info/SOURCES.txt
+mohawk.egg-info/dependency_links.txt
+mohawk.egg-info/requires.txt
+mohawk.egg-info/top_level.txt \ No newline at end of file
diff --git a/third_party/python/mohawk/mohawk.egg-info/dependency_links.txt b/third_party/python/mohawk/mohawk.egg-info/dependency_links.txt
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/third_party/python/mohawk/mohawk.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/third_party/python/mohawk/mohawk.egg-info/requires.txt b/third_party/python/mohawk/mohawk.egg-info/requires.txt
new file mode 100644
index 0000000000..ffe2fce498
--- /dev/null
+++ b/third_party/python/mohawk/mohawk.egg-info/requires.txt
@@ -0,0 +1 @@
+six
diff --git a/third_party/python/mohawk/mohawk.egg-info/top_level.txt b/third_party/python/mohawk/mohawk.egg-info/top_level.txt
new file mode 100644
index 0000000000..2b859fd06c
--- /dev/null
+++ b/third_party/python/mohawk/mohawk.egg-info/top_level.txt
@@ -0,0 +1 @@
+mohawk
diff --git a/third_party/python/mohawk/mohawk/__init__.py b/third_party/python/mohawk/mohawk/__init__.py
new file mode 100644
index 0000000000..a79e7b7164
--- /dev/null
+++ b/third_party/python/mohawk/mohawk/__init__.py
@@ -0,0 +1,2 @@
+from .sender import *
+from .receiver import *
diff --git a/third_party/python/mohawk/mohawk/base.py b/third_party/python/mohawk/mohawk/base.py
new file mode 100644
index 0000000000..4935110568
--- /dev/null
+++ b/third_party/python/mohawk/mohawk/base.py
@@ -0,0 +1,230 @@
+import logging
+import math
+import pprint
+
+import six
+from six.moves.urllib.parse import urlparse
+
+from .exc import (AlreadyProcessed,
+ MacMismatch,
+ MisComputedContentHash,
+ TokenExpired)
+from .util import (calculate_mac,
+ calculate_payload_hash,
+ calculate_ts_mac,
+ prepare_header_val,
+ random_string,
+ strings_match,
+ utc_now)
+
+default_ts_skew_in_seconds = 60
+log = logging.getLogger(__name__)
+
+
+class HawkAuthority:
+
+ def _authorize(self, mac_type, parsed_header, resource,
+ their_timestamp=None,
+ timestamp_skew_in_seconds=default_ts_skew_in_seconds,
+ localtime_offset_in_seconds=0,
+ accept_untrusted_content=False):
+
+ now = utc_now(offset_in_seconds=localtime_offset_in_seconds)
+
+ their_hash = parsed_header.get('hash', '')
+ their_mac = parsed_header.get('mac', '')
+ mac = calculate_mac(mac_type, resource, their_hash)
+ if not strings_match(mac, their_mac):
+ raise MacMismatch('MACs do not match; ours: {ours}; '
+ 'theirs: {theirs}'
+ .format(ours=mac, theirs=their_mac))
+
+ if 'hash' not in parsed_header and accept_untrusted_content:
+ # The request did not hash its content.
+ log.debug('NOT calculating/verifiying payload hash '
+ '(no hash in header)')
+ check_hash = False
+ content_hash = None
+ else:
+ check_hash = True
+ content_hash = resource.gen_content_hash()
+
+ if check_hash and not their_hash:
+ log.info('request unexpectedly did not hash its content')
+
+ if check_hash:
+ if not strings_match(content_hash, their_hash):
+ # The hash declared in the header is incorrect.
+ # Content could have been tampered with.
+ log.debug('mismatched content: {content}'
+ .format(content=repr(resource.content)))
+ log.debug('mismatched content-type: {typ}'
+ .format(typ=repr(resource.content_type)))
+ raise MisComputedContentHash(
+ 'Our hash {ours} ({algo}) did not '
+ 'match theirs {theirs}'
+ .format(ours=content_hash,
+ theirs=their_hash,
+ algo=resource.credentials['algorithm']))
+
+ if resource.seen_nonce:
+ if resource.seen_nonce(resource.credentials['id'],
+ parsed_header['nonce'],
+ parsed_header['ts']):
+ raise AlreadyProcessed('Nonce {nonce} with timestamp {ts} '
+ 'has already been processed for {id}'
+ .format(nonce=parsed_header['nonce'],
+ ts=parsed_header['ts'],
+ id=resource.credentials['id']))
+ else:
+ log.warn('seen_nonce was None; not checking nonce. '
+ 'You may be vulnerable to replay attacks')
+
+ their_ts = int(their_timestamp or parsed_header['ts'])
+
+ if math.fabs(their_ts - now) > timestamp_skew_in_seconds:
+ message = ('token with UTC timestamp {ts} has expired; '
+ 'it was compared to {now}'
+ .format(ts=their_ts, now=now))
+ tsm = calculate_ts_mac(now, resource.credentials)
+ if isinstance(tsm, six.binary_type):
+ tsm = tsm.decode('ascii')
+ www_authenticate = ('Hawk ts="{ts}", tsm="{tsm}", error="{error}"'
+ .format(ts=now, tsm=tsm, error=message))
+ raise TokenExpired(message,
+ localtime_in_seconds=now,
+ www_authenticate=www_authenticate)
+
+ log.debug('authorized OK')
+
+ def _make_header(self, resource, mac, additional_keys=None):
+ keys = additional_keys
+ if not keys:
+ # These are the default header keys that you'd send with a
+ # request header. Response headers are odd because they
+ # exclude a bunch of keys.
+ keys = ('id', 'ts', 'nonce', 'ext', 'app', 'dlg')
+
+ header = u'Hawk mac="{mac}"'.format(mac=prepare_header_val(mac))
+
+ if resource.content_hash:
+ header = u'{header}, hash="{hash}"'.format(
+ header=header,
+ hash=prepare_header_val(resource.content_hash))
+
+ if 'id' in keys:
+ header = u'{header}, id="{id}"'.format(
+ header=header,
+ id=prepare_header_val(resource.credentials['id']))
+
+ if 'ts' in keys:
+ header = u'{header}, ts="{ts}"'.format(
+ header=header, ts=prepare_header_val(resource.timestamp))
+
+ if 'nonce' in keys:
+ header = u'{header}, nonce="{nonce}"'.format(
+ header=header, nonce=prepare_header_val(resource.nonce))
+
+ # These are optional so we need to check if they have values first.
+
+ if 'ext' in keys and resource.ext:
+ header = u'{header}, ext="{ext}"'.format(
+ header=header, ext=prepare_header_val(resource.ext))
+
+ if 'app' in keys and resource.app:
+ header = u'{header}, app="{app}"'.format(
+ header=header, app=prepare_header_val(resource.app))
+
+ if 'dlg' in keys and resource.dlg:
+ header = u'{header}, dlg="{dlg}"'.format(
+ header=header, dlg=prepare_header_val(resource.dlg))
+
+ log.debug('Hawk header for URL={url} method={method}: {header}'
+ .format(url=resource.url, method=resource.method,
+ header=header))
+ return header
+
+
+class Resource:
+ """
+ Normalized request/response resource.
+ """
+
+ def __init__(self, **kw):
+ self.credentials = kw.pop('credentials')
+ self.method = kw.pop('method').upper()
+ self.content = kw.pop('content', None)
+ self.content_type = kw.pop('content_type', None)
+ self.always_hash_content = kw.pop('always_hash_content', True)
+ self.ext = kw.pop('ext', None)
+ self.app = kw.pop('app', None)
+ self.dlg = kw.pop('dlg', None)
+
+ self.timestamp = str(kw.pop('timestamp', None) or utc_now())
+
+ self.nonce = kw.pop('nonce', None)
+ if self.nonce is None:
+ self.nonce = random_string(6)
+
+ # This is a lookup function for checking nonces.
+ self.seen_nonce = kw.pop('seen_nonce', None)
+
+ self.url = kw.pop('url')
+ if not self.url:
+ raise ValueError('url was empty')
+ url_parts = self.parse_url(self.url)
+ log.debug('parsed URL parts: \n{parts}'
+ .format(parts=pprint.pformat(url_parts)))
+
+ self.name = url_parts['resource'] or ''
+ self.host = url_parts['hostname'] or ''
+ self.port = str(url_parts['port'])
+
+ if kw.keys():
+ raise TypeError('Unknown keyword argument(s): {0}'
+ .format(kw.keys()))
+
+ @property
+ def content_hash(self):
+ if not hasattr(self, '_content_hash'):
+ raise AttributeError(
+ 'Cannot access content_hash because it has not been generated')
+ return self._content_hash
+
+ def gen_content_hash(self):
+ if self.content is None or self.content_type is None:
+ if self.always_hash_content:
+ # Be really strict about allowing developers to skip content
+ # hashing. If they get this far they may be unintentiionally
+ # skipping it.
+ raise ValueError(
+ 'payload content and/or content_type cannot be '
+ 'empty without an explicit allowance')
+ log.debug('NOT hashing content')
+ self._content_hash = None
+ else:
+ self._content_hash = calculate_payload_hash(
+ self.content, self.credentials['algorithm'],
+ self.content_type)
+ return self.content_hash
+
+ def parse_url(self, url):
+ url_parts = urlparse(url)
+ url_dict = {
+ 'scheme': url_parts.scheme,
+ 'hostname': url_parts.hostname,
+ 'port': url_parts.port,
+ 'path': url_parts.path,
+ 'resource': url_parts.path,
+ 'query': url_parts.query,
+ }
+ if len(url_dict['query']) > 0:
+ url_dict['resource'] = '%s?%s' % (url_dict['resource'],
+ url_dict['query'])
+
+ if url_parts.port is None:
+ if url_parts.scheme == 'http':
+ url_dict['port'] = 80
+ elif url_parts.scheme == 'https':
+ url_dict['port'] = 443
+ return url_dict
diff --git a/third_party/python/mohawk/mohawk/bewit.py b/third_party/python/mohawk/mohawk/bewit.py
new file mode 100644
index 0000000000..ec83923655
--- /dev/null
+++ b/third_party/python/mohawk/mohawk/bewit.py
@@ -0,0 +1,167 @@
+from base64 import urlsafe_b64encode, b64decode
+from collections import namedtuple
+import logging
+import re
+
+import six
+
+from .base import Resource
+from .util import (calculate_mac,
+ utc_now)
+from .exc import (CredentialsLookupError,
+ InvalidBewit,
+ MacMismatch,
+ TokenExpired)
+
+log = logging.getLogger(__name__)
+
+
+def get_bewit(resource):
+ """
+ Returns a bewit identifier for the resource as a string.
+
+ :param resource:
+ Resource to generate a bewit for
+ :type resource: `mohawk.base.Resource`
+ """
+ if resource.method != 'GET':
+ raise ValueError('bewits can only be generated for GET requests')
+ if resource.nonce != '':
+ raise ValueError('bewits must use an empty nonce')
+ mac = calculate_mac(
+ 'bewit',
+ resource,
+ None,
+ )
+
+ if isinstance(mac, six.binary_type):
+ mac = mac.decode('ascii')
+
+ if resource.ext is None:
+ ext = ''
+ else:
+ ext = resource.ext
+
+ # Strip out \ from the client id
+ # since that can break parsing the response
+ # NB that the canonical implementation does not do this as of
+ # Oct 28, 2015, so this could break compat.
+ # We can leave \ in ext since validators can limit how many \ they split
+ # on (although again, the canonical implementation does not do this)
+ client_id = six.text_type(resource.credentials['id'])
+ if "\\" in client_id:
+ log.warn("Stripping backslash character(s) '\\' from client_id")
+ client_id = client_id.replace("\\", "")
+
+ # b64encode works only with bytes in python3, but all of our parameters are
+ # in unicode, so we need to encode them. The cleanest way to do this that
+ # works in both python 2 and 3 is to use string formatting to get a
+ # unicode string, and then explicitly encode it to bytes.
+ inner_bewit = u"{id}\\{exp}\\{mac}\\{ext}".format(
+ id=client_id,
+ exp=resource.timestamp,
+ mac=mac,
+ ext=ext,
+ )
+ inner_bewit_bytes = inner_bewit.encode('ascii')
+ bewit_bytes = urlsafe_b64encode(inner_bewit_bytes)
+ # Now decode the resulting bytes back to a unicode string
+ return bewit_bytes.decode('ascii')
+
+
+bewittuple = namedtuple('bewittuple', 'id expiration mac ext')
+
+
+def parse_bewit(bewit):
+ """
+ Returns a `bewittuple` representing the parts of an encoded bewit string.
+ This has the following named attributes:
+ (id, expiration, mac, ext)
+
+ :param bewit:
+ A base64 encoded bewit string
+ :type bewit: str
+ """
+ decoded_bewit = b64decode(bewit).decode('ascii')
+ bewit_parts = decoded_bewit.split("\\", 3)
+ if len(bewit_parts) != 4:
+ raise InvalidBewit('Expected 4 parts to bewit: %s' % decoded_bewit)
+ return bewittuple(*decoded_bewit.split("\\", 3))
+
+
+def strip_bewit(url):
+ """
+ Strips the bewit parameter out of a url.
+
+ Returns (encoded_bewit, stripped_url)
+
+ Raises InvalidBewit if no bewit found.
+
+ :param url:
+ The url containing a bewit parameter
+ :type url: str
+ """
+ m = re.search('[?&]bewit=([^&]+)', url)
+ if not m:
+ raise InvalidBewit('no bewit data found')
+ bewit = m.group(1)
+ stripped_url = url[:m.start()] + url[m.end():]
+ return bewit, stripped_url
+
+
+def check_bewit(url, credential_lookup, now=None):
+ """
+ Validates the given bewit.
+
+ Returns True if the resource has a valid bewit parameter attached,
+ or raises a subclass of HawkFail otherwise.
+
+ :param credential_lookup:
+ Callable to look up the credentials dict by sender ID.
+ The credentials dict must have the keys:
+ ``id``, ``key``, and ``algorithm``.
+ See :ref:`receiving-request` for an example.
+ :type credential_lookup: callable
+
+ :param now=None:
+ Unix epoch time for the current time to determine if bewit has expired.
+ If None, then the current time as given by utc_now() is used.
+ :type now=None: integer
+ """
+ raw_bewit, stripped_url = strip_bewit(url)
+ bewit = parse_bewit(raw_bewit)
+ try:
+ credentials = credential_lookup(bewit.id)
+ except LookupError:
+ raise CredentialsLookupError('Could not find credentials for ID {0}'
+ .format(bewit.id))
+
+ res = Resource(url=stripped_url,
+ method='GET',
+ credentials=credentials,
+ timestamp=bewit.expiration,
+ nonce='',
+ ext=bewit.ext,
+ )
+ mac = calculate_mac('bewit', res, None)
+ mac = mac.decode('ascii')
+
+ if mac != bewit.mac:
+ raise MacMismatch('bewit with mac {bewit_mac} did not match expected mac {expected_mac}'
+ .format(bewit_mac=bewit.mac,
+ expected_mac=mac))
+
+ # Check that the timestamp isn't expired
+ if now is None:
+ # TODO: Add offset/skew
+ now = utc_now()
+ if int(bewit.expiration) < now:
+ # TODO: Refactor TokenExpired to handle this better
+ raise TokenExpired('bewit with UTC timestamp {ts} has expired; '
+ 'it was compared to {now}'
+ .format(ts=bewit.expiration, now=now),
+ localtime_in_seconds=now,
+ www_authenticate=''
+ )
+
+ return True
diff --git a/third_party/python/mohawk/mohawk/exc.py b/third_party/python/mohawk/mohawk/exc.py
new file mode 100644
index 0000000000..9376995f2c
--- /dev/null
+++ b/third_party/python/mohawk/mohawk/exc.py
@@ -0,0 +1,98 @@
+"""
+If you want to catch any exception that might be raised,
+catch :class:`mohawk.exc.HawkFail`.
+"""
+
+
+class HawkFail(Exception):
+ """
+ All Mohawk exceptions derive from this base.
+ """
+
+
+class MissingAuthorization(HawkFail):
+ """
+ No authorization header was sent by the client.
+ """
+
+
+class InvalidCredentials(HawkFail):
+ """
+ The specified Hawk credentials are invalid.
+
+ For example, the dict could be formatted incorrectly.
+ """
+
+
+class CredentialsLookupError(HawkFail):
+ """
+ A :class:`mohawk.Receiver` could not look up the
+ credentials for an incoming request.
+ """
+
+
+class BadHeaderValue(HawkFail):
+ """
+ There was an error with an attribute or value when parsing
+ or creating a Hawk header.
+ """
+
+
+class MacMismatch(HawkFail):
+ """
+ The locally calculated MAC did not match the MAC that was sent.
+ """
+
+
+class MisComputedContentHash(HawkFail):
+ """
+ The signature of the content did not match the actual content.
+ """
+
+
+class TokenExpired(HawkFail):
+ """
+ The timestamp on a message received has expired.
+
+ You may also receive this message if your server clock is out of sync.
+ Consider synchronizing it with something like `TLSdate`_.
+
+ If you are unable to synchronize your clock universally,
+ The `Hawk`_ spec mentions how you can `adjust`_
+ your sender's time to match that of the receiver in the case
+ of unexpected expiration.
+
+ The ``www_authenticate`` attribute of this exception is a header
+ that can be returned to the client. If the value is not None, it
+ will include a timestamp HMAC'd with the sender's credentials.
+ This will allow the client
+ to verify the value and safely apply an offset.
+
+ .. _`Hawk`: https://github.com/hueniverse/hawk
+ .. _`adjust`: https://github.com/hueniverse/hawk#future-time-manipulation
+ .. _`TLSdate`: http://linux-audit.com/tlsdate-the-secure-alternative-for-ntpd-ntpdate-and-rdate/
+ """
+ #: Current local time in seconds that was used to compare timestamps.
+ localtime_in_seconds = None
+ # A header containing an HMAC'd server timestamp that the sender can verify.
+ www_authenticate = None
+
+ def __init__(self, *args, **kw):
+ self.localtime_in_seconds = kw.pop('localtime_in_seconds')
+ self.www_authenticate = kw.pop('www_authenticate')
+ super(HawkFail, self).__init__(*args, **kw)
+
+
+class AlreadyProcessed(HawkFail):
+ """
+ The message has already been processed and cannot be re-processed.
+
+ See :ref:`nonce` for details.
+ """
+
+
+class InvalidBewit(HawkFail):
+ """
+ The bewit is invalid; e.g. it doesn't contain the right number of
+ parameters.
+ """
diff --git a/third_party/python/mohawk/mohawk/receiver.py b/third_party/python/mohawk/mohawk/receiver.py
new file mode 100644
index 0000000000..509729ea8d
--- /dev/null
+++ b/third_party/python/mohawk/mohawk/receiver.py
@@ -0,0 +1,170 @@
+import logging
+import sys
+
+from .base import default_ts_skew_in_seconds, HawkAuthority, Resource
+from .exc import CredentialsLookupError, MissingAuthorization
+from .util import (calculate_mac,
+ parse_authorization_header,
+ validate_credentials)
+
+__all__ = ['Receiver']
+log = logging.getLogger(__name__)
+
+
+class Receiver(HawkAuthority):
+ """
+ A Hawk authority that will receive and respond to requests.
+
+ :param credentials_map:
+ Callable to look up the credentials dict by sender ID.
+ The credentials dict must have the keys:
+ ``id``, ``key``, and ``algorithm``.
+ See :ref:`receiving-request` for an example.
+ :type credentials_map: callable
+
+ :param request_header:
+ A `Hawk`_ ``Authorization`` header
+ such as one created by :class:`mohawk.Sender`.
+ :type request_header: str
+
+ :param url: Absolute URL of the request.
+ :type url: str
+
+ :param method: Method of the request. E.G. POST, GET
+ :type method: str
+
+ :param content=None: Byte string of request body.
+ :type content=None: str
+
+ :param content_type=None: content-type header value for request.
+ :type content_type=None: str
+
+ :param accept_untrusted_content=False:
+ When True, allow requests that do not hash their content or
+ allow None type ``content`` and ``content_type``
+ arguments. Read :ref:`skipping-content-checks`
+ to learn more.
+ :type accept_untrusted_content=False: bool
+
+ :param localtime_offset_in_seconds=0:
+ Seconds to add to local time in case it's out of sync.
+ :type localtime_offset_in_seconds=0: float
+
+ :param timestamp_skew_in_seconds=60:
+ Max seconds until a message expires. Upon expiry,
+ :class:`mohawk.exc.TokenExpired` is raised.
+ :type timestamp_skew_in_seconds=60: float
+
+ .. _`Hawk`: https://github.com/hueniverse/hawk
+ """
+ #: Value suitable for a ``Server-Authorization`` header.
+ response_header = None
+
+ def __init__(self,
+ credentials_map,
+ request_header,
+ url,
+ method,
+ content=None,
+ content_type=None,
+ seen_nonce=None,
+ localtime_offset_in_seconds=0,
+ accept_untrusted_content=False,
+ timestamp_skew_in_seconds=default_ts_skew_in_seconds,
+ **auth_kw):
+
+ self.response_header = None # make into property that can raise exc?
+ self.credentials_map = credentials_map
+ self.seen_nonce = seen_nonce
+
+ log.debug('accepting request {header}'.format(header=request_header))
+
+ if not request_header:
+ raise MissingAuthorization()
+
+ parsed_header = parse_authorization_header(request_header)
+
+ try:
+ credentials = self.credentials_map(parsed_header['id'])
+ except LookupError:
+ etype, val, tb = sys.exc_info()
+ log.debug('Catching {etype}: {val}'.format(etype=etype, val=val))
+ raise CredentialsLookupError(
+ 'Could not find credentials for ID {0}'
+ .format(parsed_header['id']))
+ validate_credentials(credentials)
+
+ resource = Resource(url=url,
+ method=method,
+ ext=parsed_header.get('ext', None),
+ app=parsed_header.get('app', None),
+ dlg=parsed_header.get('dlg', None),
+ credentials=credentials,
+ nonce=parsed_header['nonce'],
+ seen_nonce=self.seen_nonce,
+ content=content,
+ timestamp=parsed_header['ts'],
+ content_type=content_type)
+
+ self._authorize(
+ 'header', parsed_header, resource,
+ timestamp_skew_in_seconds=timestamp_skew_in_seconds,
+ localtime_offset_in_seconds=localtime_offset_in_seconds,
+ accept_untrusted_content=accept_untrusted_content,
+ **auth_kw)
+
+ # Now that we verified an incoming request, we can re-use some of its
+ # properties to build our response header.
+
+ self.parsed_header = parsed_header
+ self.resource = resource
+
+ def respond(self,
+ content=None,
+ content_type=None,
+ always_hash_content=True,
+ ext=None):
+ """
+ Respond to the request.
+
+ This generates the :attr:`mohawk.Receiver.response_header`
+ attribute.
+
+ :param content=None: Byte string of response body that will be sent.
+ :type content=None: str
+
+ :param content_type=None: content-type header value for response.
+ :type content_type=None: str
+
+ :param always_hash_content=True:
+ When True, ``content`` and ``content_type`` cannot be None.
+ Read :ref:`skipping-content-checks` to learn more.
+ :type always_hash_content=True: bool
+
+ :param ext=None:
+ An external `Hawk`_ string. If not None, this value will be
+ signed so that the sender can trust it.
+ :type ext=None: str
+
+ .. _`Hawk`: https://github.com/hueniverse/hawk
+ """
+
+ log.debug('generating response header')
+
+ resource = Resource(url=self.resource.url,
+ credentials=self.resource.credentials,
+ ext=ext,
+ app=self.parsed_header.get('app', None),
+ dlg=self.parsed_header.get('dlg', None),
+ method=self.resource.method,
+ content=content,
+ content_type=content_type,
+ always_hash_content=always_hash_content,
+ nonce=self.parsed_header['nonce'],
+ timestamp=self.parsed_header['ts'])
+
+ mac = calculate_mac('response', resource, resource.gen_content_hash())
+
+ self.response_header = self._make_header(resource, mac,
+ additional_keys=['ext'])
+ return self.response_header
diff --git a/third_party/python/mohawk/mohawk/sender.py b/third_party/python/mohawk/mohawk/sender.py
new file mode 100644
index 0000000000..b6f3edc170
--- /dev/null
+++ b/third_party/python/mohawk/mohawk/sender.py
@@ -0,0 +1,178 @@
+import logging
+
+from .base import default_ts_skew_in_seconds, HawkAuthority, Resource
+from .util import (calculate_mac,
+ parse_authorization_header,
+ validate_credentials)
+
+__all__ = ['Sender']
+log = logging.getLogger(__name__)
+
+
+class Sender(HawkAuthority):
+ """
+ A Hawk authority that will emit requests and verify responses.
+
+ :param credentials: Dict of credentials with keys ``id``, ``key``,
+ and ``algorithm``. See :ref:`usage` for an example.
+ :type credentials: dict
+
+ :param url: Absolute URL of the request.
+ :type url: str
+
+ :param method: Method of the request. E.G. POST, GET
+ :type method: str
+
+ :param content=None: Byte string of request body.
+ :type content=None: str
+
+ :param content_type=None: content-type header value for request.
+ :type content_type=None: str
+
+ :param always_hash_content=True:
+ When True, ``content`` and ``content_type`` cannot be None.
+ Read :ref:`skipping-content-checks` to learn more.
+ :type always_hash_content=True: bool
+
+ :param nonce=None:
+ A string that when coupled with the timestamp will
+ uniquely identify this request to prevent replays.
+ If None, a nonce will be generated for you.
+ :type nonce=None: str
+
+ :param ext=None:
+ An external `Hawk`_ string. If not None, this value will be signed
+ so that the receiver can trust it.
+ :type ext=None: str
+
+ :param app=None:
+ A `Hawk`_ application string. If not None, this value will be signed
+ so that the receiver can trust it.
+ :type app=None: str
+
+ :param dlg=None:
+ A `Hawk`_ delegation string. If not None, this value will be signed
+ so that the receiver can trust it.
+ :type dlg=None: str
+
+ :param seen_nonce=None:
+ A callable that returns True if a nonce has been seen.
+ See :ref:`nonce` for details.
+ :type seen_nonce=None: callable
+
+ .. _`Hawk`: https://github.com/hueniverse/hawk
+ """
+ #: Value suitable for an ``Authorization`` header.
+ request_header = None
+
+ def __init__(self, credentials,
+ url,
+ method,
+ content=None,
+ content_type=None,
+ always_hash_content=True,
+ nonce=None,
+ ext=None,
+ app=None,
+ dlg=None,
+ seen_nonce=None,
+ # For easier testing:
+ _timestamp=None):
+
+ self.reconfigure(credentials)
+ self.request_header = None
+ self.seen_nonce = seen_nonce
+
+ log.debug('generating request header')
+ self.req_resource = Resource(url=url,
+ credentials=self.credentials,
+ ext=ext,
+ app=app,
+ dlg=dlg,
+ nonce=nonce,
+ method=method,
+ content=content,
+ always_hash_content=always_hash_content,
+ timestamp=_timestamp,
+ content_type=content_type)
+
+ mac = calculate_mac('header', self.req_resource,
+ self.req_resource.gen_content_hash())
+ self.request_header = self._make_header(self.req_resource, mac)
+
+ def accept_response(self,
+ response_header,
+ content=None,
+ content_type=None,
+ accept_untrusted_content=False,
+ localtime_offset_in_seconds=0,
+ timestamp_skew_in_seconds=default_ts_skew_in_seconds,
+ **auth_kw):
+ """
+ Accept a response to this request.
+
+ :param response_header:
+ A `Hawk`_ ``Server-Authorization`` header
+ such as one created by :class:`mohawk.Receiver`.
+ :type response_header: str
+
+ :param content=None: Byte string of the response body received.
+ :type content=None: str
+
+ :param content_type=None:
+ Content-Type header value of the response received.
+ :type content_type=None: str
+
+ :param accept_untrusted_content=False:
+ When True, allow responses that do not hash their content or
+ allow None type ``content`` and ``content_type``
+ arguments. Read :ref:`skipping-content-checks`
+ to learn more.
+ :type accept_untrusted_content=False: bool
+
+ :param localtime_offset_in_seconds=0:
+ Seconds to add to local time in case it's out of sync.
+ :type localtime_offset_in_seconds=0: float
+
+ :param timestamp_skew_in_seconds=60:
+ Max seconds until a message expires. Upon expiry,
+ :class:`mohawk.exc.TokenExpired` is raised.
+ :type timestamp_skew_in_seconds=60: float
+
+ .. _`Hawk`: https://github.com/hueniverse/hawk
+ """
+ log.debug('accepting response {header}'
+ .format(header=response_header))
+
+ parsed_header = parse_authorization_header(response_header)
+
+ resource = Resource(ext=parsed_header.get('ext', None),
+ content=content,
+ content_type=content_type,
+ # The following response attributes are
+ # in reference to the original request,
+ # not to the reponse header:
+ timestamp=self.req_resource.timestamp,
+ nonce=self.req_resource.nonce,
+ url=self.req_resource.url,
+ method=self.req_resource.method,
+ app=self.req_resource.app,
+ dlg=self.req_resource.dlg,
+ credentials=self.credentials,
+ seen_nonce=self.seen_nonce)
+
+ self._authorize(
+ 'response', parsed_header, resource,
+ # Per Node lib, a responder macs the *sender's* timestamp.
+ # It does not create its own timestamp.
+ # I suppose a slow response could time out here. Maybe only check
+ # mac failures, not timeouts?
+ their_timestamp=resource.timestamp,
+ timestamp_skew_in_seconds=timestamp_skew_in_seconds,
+ localtime_offset_in_seconds=localtime_offset_in_seconds,
+ accept_untrusted_content=accept_untrusted_content,
+ **auth_kw)
+
+ def reconfigure(self, credentials):
+ validate_credentials(credentials)
+ self.credentials = credentials
diff --git a/third_party/python/mohawk/mohawk/tests.py b/third_party/python/mohawk/mohawk/tests.py
new file mode 100644
index 0000000000..eeb71506d1
--- /dev/null
+++ b/third_party/python/mohawk/mohawk/tests.py
@@ -0,0 +1,823 @@
+import sys
+from unittest import TestCase
+from base64 import b64decode, urlsafe_b64encode
+
+import mock
+from nose.tools import eq_, raises
+import six
+
+from . import Receiver, Sender
+from .base import Resource
+from .exc import (AlreadyProcessed,
+ BadHeaderValue,
+ CredentialsLookupError,
+ InvalidCredentials,
+ MacMismatch,
+ MisComputedContentHash,
+ MissingAuthorization,
+ TokenExpired,
+ InvalidBewit)
+from .util import (parse_authorization_header,
+ utc_now,
+ calculate_ts_mac,
+ validate_credentials)
+from .bewit import (get_bewit,
+ check_bewit,
+ strip_bewit,
+ parse_bewit)
+
+
+class Base(TestCase):
+
+ def setUp(self):
+ self.credentials = {
+ 'id': 'my-hawk-id',
+ 'key': 'my hAwK sekret',
+ 'algorithm': 'sha256',
+ }
+
+ # This callable might be replaced by tests.
+ def seen_nonce(id, nonce, ts):
+ return False
+ self.seen_nonce = seen_nonce
+
+ def credentials_map(self, id):
+ # Pretend this is doing something more interesting like looking up
+ # a credentials by ID in a database.
+ if self.credentials['id'] != id:
+ raise LookupError('No credentialsuration for Hawk ID {id}'
+ .format(id=id))
+ return self.credentials
+
+
+class TestConfig(Base):
+
+ @raises(InvalidCredentials)
+ def test_no_id(self):
+ c = self.credentials.copy()
+ del c['id']
+ validate_credentials(c)
+
+ @raises(InvalidCredentials)
+ def test_no_key(self):
+ c = self.credentials.copy()
+ del c['key']
+ validate_credentials(c)
+
+ @raises(InvalidCredentials)
+ def test_no_algo(self):
+ c = self.credentials.copy()
+ del c['algorithm']
+ validate_credentials(c)
+
+ @raises(InvalidCredentials)
+ def test_no_credentials(self):
+ validate_credentials(None)
+
+ def test_non_dict_credentials(self):
+ class WeirdThing(object):
+ def __getitem__(self, key):
+ return 'whatever'
+ validate_credentials(WeirdThing())
+
+
+class TestSender(Base):
+
+ def setUp(self):
+ super(TestSender, self).setUp()
+ self.url = 'http://site.com/foo?bar=1'
+
+ def Sender(self, method='GET', **kw):
+ credentials = kw.pop('credentials', self.credentials)
+ kw.setdefault('content', '')
+ kw.setdefault('content_type', '')
+ sender = Sender(credentials, self.url, method, **kw)
+ return sender
+
+ def receive(self, request_header, url=None, method='GET', **kw):
+ credentials_map = kw.pop('credentials_map', self.credentials_map)
+ kw.setdefault('content', '')
+ kw.setdefault('content_type', '')
+ kw.setdefault('seen_nonce', self.seen_nonce)
+ return Receiver(credentials_map, request_header,
+ url or self.url, method, **kw)
+
+ def test_get_ok(self):
+ method = 'GET'
+ sn = self.Sender(method=method)
+ self.receive(sn.request_header, method=method)
+
+ def test_post_ok(self):
+ method = 'POST'
+ sn = self.Sender(method=method)
+ self.receive(sn.request_header, method=method)
+
+ def test_post_content_ok(self):
+ method = 'POST'
+ content = 'foo=bar&baz=2'
+ sn = self.Sender(method=method, content=content)
+ self.receive(sn.request_header, method=method, content=content)
+
+ def test_post_content_type_ok(self):
+ method = 'POST'
+ content = '{"bar": "foobs"}'
+ content_type = 'application/json'
+ sn = self.Sender(method=method, content=content,
+ content_type=content_type)
+ self.receive(sn.request_header, method=method, content=content,
+ content_type=content_type)
+
+ def test_post_content_type_with_trailing_charset(self):
+ method = 'POST'
+ content = '{"bar": "foobs"}'
+ content_type = 'application/json; charset=utf8'
+ sn = self.Sender(method=method, content=content,
+ content_type=content_type)
+ self.receive(sn.request_header, method=method, content=content,
+ content_type='application/json; charset=other')
+
+ @raises(ValueError)
+ def test_missing_payload_details(self):
+ self.Sender(method='POST', content=None, content_type=None)
+
+ def test_skip_payload_hashing(self):
+ method = 'POST'
+ content = '{"bar": "foobs"}'
+ content_type = 'application/json'
+ sn = self.Sender(method=method, content=None, content_type=None,
+ always_hash_content=False)
+ self.receive(sn.request_header, method=method, content=content,
+ content_type=content_type,
+ accept_untrusted_content=True)
+
+ @raises(ValueError)
+ def test_cannot_skip_content_only(self):
+ self.Sender(method='POST', content=None,
+ content_type='application/json')
+
+ @raises(ValueError)
+ def test_cannot_skip_content_type_only(self):
+ self.Sender(method='POST', content='{"foo": "bar"}',
+ content_type=None)
+
+ @raises(MacMismatch)
+ def test_tamper_with_host(self):
+ sn = self.Sender()
+ self.receive(sn.request_header, url='http://TAMPERED-WITH.com')
+
+ @raises(MacMismatch)
+ def test_tamper_with_method(self):
+ sn = self.Sender(method='GET')
+ self.receive(sn.request_header, method='POST')
+
+ @raises(MacMismatch)
+ def test_tamper_with_path(self):
+ sn = self.Sender()
+ self.receive(sn.request_header,
+ url='http://site.com/TAMPERED?bar=1')
+
+ @raises(MacMismatch)
+ def test_tamper_with_query(self):
+ sn = self.Sender()
+ self.receive(sn.request_header,
+ url='http://site.com/foo?bar=TAMPERED')
+
+ @raises(MacMismatch)
+ def test_tamper_with_scheme(self):
+ sn = self.Sender()
+ self.receive(sn.request_header, url='https://site.com/foo?bar=1')
+
+ @raises(MacMismatch)
+ def test_tamper_with_port(self):
+ sn = self.Sender()
+ self.receive(sn.request_header,
+ url='http://site.com:8000/foo?bar=1')
+
+ @raises(MisComputedContentHash)
+ def test_tamper_with_content(self):
+ sn = self.Sender()
+ self.receive(sn.request_header, content='stuff=nope')
+
+ def test_non_ascii_content(self):
+ content = u'Ivan Kristi\u0107'
+ sn = self.Sender(content=content)
+ self.receive(sn.request_header, content=content)
+
+ @raises(MacMismatch)
+ def test_tamper_with_content_type(self):
+ sn = self.Sender(method='POST')
+ self.receive(sn.request_header, content_type='application/json')
+
+ @raises(AlreadyProcessed)
+ def test_nonce_fail(self):
+
+ def seen_nonce(id, nonce, ts):
+ return True
+
+ sn = self.Sender()
+
+ self.receive(sn.request_header, seen_nonce=seen_nonce)
+
+ def test_nonce_ok(self):
+
+ def seen_nonce(id, nonce, ts):
+ return False
+
+ sn = self.Sender(seen_nonce=seen_nonce)
+ self.receive(sn.request_header)
+
+ @raises(TokenExpired)
+ def test_expired_ts(self):
+ now = utc_now() - 120
+ sn = self.Sender(_timestamp=now)
+ self.receive(sn.request_header)
+
+ def test_expired_exception_reports_localtime(self):
+ now = utc_now()
+ ts = now - 120
+ sn = self.Sender(_timestamp=ts) # force expiry
+
+ exc = None
+ with mock.patch('mohawk.base.utc_now') as fake_now:
+ fake_now.return_value = now
+ try:
+ self.receive(sn.request_header)
+ except:
+ etype, exc, tb = sys.exc_info()
+
+ eq_(type(exc), TokenExpired)
+ eq_(exc.localtime_in_seconds, now)
+
+ def test_localtime_offset(self):
+ now = utc_now() - 120
+ sn = self.Sender(_timestamp=now)
+ # Without an offset this will raise an expired exception.
+ self.receive(sn.request_header, localtime_offset_in_seconds=-120)
+
+ def test_localtime_skew(self):
+ now = utc_now() - 120
+ sn = self.Sender(_timestamp=now)
+ # Without an offset this will raise an expired exception.
+ self.receive(sn.request_header, timestamp_skew_in_seconds=120)
+
+ @raises(MacMismatch)
+ def test_hash_tampering(self):
+ sn = self.Sender()
+ header = sn.request_header.replace('hash="', 'hash="nope')
+ self.receive(header)
+
+ @raises(MacMismatch)
+ def test_bad_secret(self):
+ cfg = {
+ 'id': 'my-hawk-id',
+ 'key': 'INCORRECT; YOU FAIL',
+ 'algorithm': 'sha256',
+ }
+ sn = self.Sender(credentials=cfg)
+ self.receive(sn.request_header)
+
+ @raises(MacMismatch)
+ def test_unexpected_algorithm(self):
+ cr = self.credentials.copy()
+ cr['algorithm'] = 'sha512'
+ sn = self.Sender(credentials=cr)
+
+ # Validate with mismatched credentials (sha256).
+ self.receive(sn.request_header)
+
+ @raises(InvalidCredentials)
+ def test_invalid_credentials(self):
+ cfg = self.credentials.copy()
+ # Create an invalid credentials.
+ del cfg['algorithm']
+
+ self.Sender(credentials=cfg)
+
+ @raises(CredentialsLookupError)
+ def test_unknown_id(self):
+ cr = self.credentials.copy()
+ cr['id'] = 'someone-else'
+ sn = self.Sender(credentials=cr)
+
+ self.receive(sn.request_header)
+
+ @raises(MacMismatch)
+ def test_bad_ext(self):
+ sn = self.Sender(ext='my external data')
+
+ header = sn.request_header.replace('my external data', 'TAMPERED')
+ self.receive(header)
+
+ def test_ext_with_quotes(self):
+ sn = self.Sender(ext='quotes=""')
+ self.receive(sn.request_header)
+ parsed = parse_authorization_header(sn.request_header)
+ eq_(parsed['ext'], 'quotes=""')
+
+ def test_ext_with_new_line(self):
+ sn = self.Sender(ext="new line \n in the middle")
+ self.receive(sn.request_header)
+ parsed = parse_authorization_header(sn.request_header)
+ eq_(parsed['ext'], "new line \n in the middle")
+
+ def test_ext_with_equality_sign(self):
+ sn = self.Sender(ext="foo=bar&foo2=bar2;foo3=bar3")
+ self.receive(sn.request_header)
+ parsed = parse_authorization_header(sn.request_header)
+ eq_(parsed['ext'], "foo=bar&foo2=bar2;foo3=bar3")
+
+ @raises(BadHeaderValue)
+ def test_ext_with_illegal_chars(self):
+ self.Sender(ext="something like \t is illegal")
+
+ @raises(BadHeaderValue)
+ def test_ext_with_illegal_unicode(self):
+ self.Sender(ext=u'Ivan Kristi\u0107')
+
+ @raises(BadHeaderValue)
+ def test_ext_with_illegal_utf8(self):
+ # This isn't allowed because the escaped byte chars are out of
+ # range. It's a little odd but this is what the Node lib does
+ # implicitly with its regex.
+ self.Sender(ext=u'Ivan Kristi\u0107'.encode('utf8'))
+
+ def test_app_ok(self):
+ app = 'custom-app'
+ sn = self.Sender(app=app)
+ self.receive(sn.request_header)
+ parsed = parse_authorization_header(sn.request_header)
+ eq_(parsed['app'], app)
+
+ @raises(MacMismatch)
+ def test_tampered_app(self):
+ app = 'custom-app'
+ sn = self.Sender(app=app)
+ header = sn.request_header.replace(app, 'TAMPERED-WITH')
+ self.receive(header)
+
+ def test_dlg_ok(self):
+ dlg = 'custom-dlg'
+ sn = self.Sender(dlg=dlg)
+ self.receive(sn.request_header)
+ parsed = parse_authorization_header(sn.request_header)
+ eq_(parsed['dlg'], dlg)
+
+ @raises(MacMismatch)
+ def test_tampered_dlg(self):
+ dlg = 'custom-dlg'
+ sn = self.Sender(dlg=dlg, app='some-app')
+ header = sn.request_header.replace(dlg, 'TAMPERED-WITH')
+ self.receive(header)
+
+
+class TestReceiver(Base):
+
+ def setUp(self):
+ super(TestReceiver, self).setUp()
+ self.url = 'http://site.com/'
+ self.sender = None
+ self.receiver = None
+
+ def receive(self, method='GET', **kw):
+ url = kw.pop('url', self.url)
+ sender = kw.pop('sender', None)
+ sender_kw = kw.pop('sender_kw', {})
+ sender_kw.setdefault('content', '')
+ sender_kw.setdefault('content_type', '')
+ sender_url = kw.pop('sender_url', url)
+
+ credentials_map = kw.pop('credentials_map',
+ lambda id: self.credentials)
+
+ if sender:
+ self.sender = sender
+ else:
+ self.sender = Sender(self.credentials, sender_url, method,
+ **sender_kw)
+
+ kw.setdefault('content', '')
+ kw.setdefault('content_type', '')
+ self.receiver = Receiver(credentials_map,
+ self.sender.request_header, url, method,
+ **kw)
+
+ def respond(self, **kw):
+ accept_kw = kw.pop('accept_kw', {})
+ accept_kw.setdefault('content', '')
+ accept_kw.setdefault('content_type', '')
+ receiver = kw.pop('receiver', self.receiver)
+
+ kw.setdefault('content', '')
+ kw.setdefault('content_type', '')
+ receiver.respond(**kw)
+ self.sender.accept_response(receiver.response_header, **accept_kw)
+
+ return receiver.response_header
+
+ @raises(InvalidCredentials)
+ def test_invalid_credentials_lookup(self):
+ # Return invalid credentials.
+ self.receive(credentials_map=lambda *a: {})
+
+ def test_get_ok(self):
+ method = 'GET'
+ self.receive(method=method)
+ self.respond()
+
+ def test_post_ok(self):
+ method = 'POST'
+ self.receive(method=method)
+ self.respond()
+
+ @raises(MisComputedContentHash)
+ def test_respond_with_wrong_content(self):
+ self.receive()
+ self.respond(content='real content',
+ accept_kw=dict(content='TAMPERED WITH'))
+
+ @raises(MisComputedContentHash)
+ def test_respond_with_wrong_content_type(self):
+ self.receive()
+ self.respond(content_type='text/html',
+ accept_kw=dict(content_type='application/json'))
+
+ @raises(MissingAuthorization)
+ def test_missing_authorization(self):
+ Receiver(lambda id: self.credentials, None, '/', 'GET')
+
+ @raises(MacMismatch)
+ def test_respond_with_wrong_url(self):
+ self.receive(url='http://fakesite.com')
+ wrong_receiver = self.receiver
+
+ self.receive(url='http://realsite.com')
+
+ self.respond(receiver=wrong_receiver)
+
+ @raises(MacMismatch)
+ def test_respond_with_wrong_method(self):
+ self.receive(method='GET')
+ wrong_receiver = self.receiver
+
+ self.receive(method='POST')
+
+ self.respond(receiver=wrong_receiver)
+
+ @raises(MacMismatch)
+ def test_respond_with_wrong_nonce(self):
+ self.receive(sender_kw=dict(nonce='another-nonce'))
+ wrong_receiver = self.receiver
+
+ self.receive()
+
+ # The nonce must match the one sent in the original request.
+ self.respond(receiver=wrong_receiver)
+
+ def test_respond_with_unhashed_content(self):
+ self.receive()
+
+ self.respond(always_hash_content=False, content=None,
+ content_type=None,
+ accept_kw=dict(accept_untrusted_content=True))
+
+ @raises(TokenExpired)
+ def test_respond_with_expired_ts(self):
+ self.receive()
+ hdr = self.receiver.respond(content='', content_type='')
+
+ with mock.patch('mohawk.base.utc_now') as fn:
+ fn.return_value = 0 # force an expiry
+ try:
+ self.sender.accept_response(hdr, content='', content_type='')
+ except TokenExpired:
+ etype, exc, tb = sys.exc_info()
+ hdr = parse_authorization_header(exc.www_authenticate)
+ calculated = calculate_ts_mac(fn(), self.credentials)
+ if isinstance(calculated, six.binary_type):
+ calculated = calculated.decode('ascii')
+ eq_(hdr['tsm'], calculated)
+ raise
+
+ def test_respond_with_bad_ts_skew_ok(self):
+ now = utc_now() - 120
+
+ self.receive()
+ hdr = self.receiver.respond(content='', content_type='')
+
+ with mock.patch('mohawk.base.utc_now') as fn:
+ fn.return_value = now
+
+ # Without an offset this will raise an expired exception.
+ self.sender.accept_response(hdr, content='', content_type='',
+ timestamp_skew_in_seconds=120)
+
+ def test_respond_with_ext(self):
+ self.receive()
+
+ ext = 'custom-ext'
+ self.respond(ext=ext)
+ header = parse_authorization_header(self.receiver.response_header)
+ eq_(header['ext'], ext)
+
+ @raises(MacMismatch)
+ def test_respond_with_wrong_app(self):
+ self.receive(sender_kw=dict(app='TAMPERED-WITH', dlg='delegation'))
+ self.receiver.respond(content='', content_type='')
+ wrong_receiver = self.receiver
+
+ self.receive(sender_kw=dict(app='real-app', dlg='delegation'))
+
+ self.sender.accept_response(wrong_receiver.response_header,
+ content='', content_type='')
+
+ @raises(MacMismatch)
+ def test_respond_with_wrong_dlg(self):
+ self.receive(sender_kw=dict(app='app', dlg='TAMPERED-WITH'))
+ self.receiver.respond(content='', content_type='')
+ wrong_receiver = self.receiver
+
+ self.receive(sender_kw=dict(app='app', dlg='real-dlg'))
+
+ self.sender.accept_response(wrong_receiver.response_header,
+ content='', content_type='')
+
+ @raises(MacMismatch)
+ def test_receive_wrong_method(self):
+ self.receive(method='GET')
+ wrong_sender = self.sender
+ self.receive(method='POST', sender=wrong_sender)
+
+ @raises(MacMismatch)
+ def test_receive_wrong_url(self):
+ self.receive(url='http://fakesite.com/')
+ wrong_sender = self.sender
+ self.receive(url='http://realsite.com/', sender=wrong_sender)
+
+ @raises(MisComputedContentHash)
+ def test_receive_wrong_content(self):
+ self.receive(sender_kw=dict(content='real request'),
+ content='real request')
+ wrong_sender = self.sender
+ self.receive(content='TAMPERED WITH', sender=wrong_sender)
+
+ @raises(MisComputedContentHash)
+ def test_unexpected_unhashed_content(self):
+ self.receive(sender_kw=dict(content=None, content_type=None,
+ always_hash_content=False))
+
+ @raises(ValueError)
+ def test_cannot_receive_empty_content_only(self):
+ content_type = 'text/plain'
+ self.receive(sender_kw=dict(content='<content>',
+ content_type=content_type),
+ content=None, content_type=content_type)
+
+ @raises(ValueError)
+ def test_cannot_receive_empty_content_type_only(self):
+ content = '<content>'
+ self.receive(sender_kw=dict(content=content,
+ content_type='text/plain'),
+ content=content, content_type=None)
+
+ @raises(MisComputedContentHash)
+ def test_receive_wrong_content_type(self):
+ self.receive(sender_kw=dict(content_type='text/html'),
+ content_type='text/html')
+ wrong_sender = self.sender
+
+ self.receive(content_type='application/json',
+ sender=wrong_sender)
+
+
+class TestSendAndReceive(Base):
+
+ def test(self):
+ credentials = {
+ 'id': 'some-id',
+ 'key': 'some secret',
+ 'algorithm': 'sha256'
+ }
+
+ url = 'https://my-site.com/'
+ method = 'POST'
+
+ # The client sends a request with a Hawk header.
+ content = 'foo=bar&baz=nooz'
+ content_type = 'application/x-www-form-urlencoded'
+
+ sender = Sender(credentials,
+ url, method,
+ content=content,
+ content_type=content_type)
+
+ # The server receives a request and authorizes access.
+ receiver = Receiver(lambda id: credentials,
+ sender.request_header,
+ url, method,
+ content=content,
+ content_type=content_type)
+
+ # The server responds with a similar Hawk header.
+ content = 'we are friends'
+ content_type = 'text/plain'
+ receiver.respond(content=content,
+ content_type=content_type)
+
+ # The client receives a response and authorizes access.
+ sender.accept_response(receiver.response_header,
+ content=content,
+ content_type=content_type)
+
+
+class TestBewit(Base):
+
+ # Test cases copied from
+ # https://github.com/hueniverse/hawk/blob/492632da51ecedd5f59ce96f081860ad24ce6532/test/uri.js
+
+ def setUp(self):
+ self.credentials = {
+ 'id': '123456',
+ 'key': '2983d45yun89q',
+ 'algorithm': 'sha256',
+ }
+
+ def make_credential_lookup(self, credentials_map):
+ # Helper function to make a lookup function given a dictionary of
+ # credentials
+ def lookup(client_id):
+ # Will raise a KeyError if missing; which is a subclass of
+ # LookupError
+ return credentials_map[client_id]
+ return lookup
+
+ def test_bewit(self):
+ res = Resource(url='https://example.com/somewhere/over/the/rainbow',
+ method='GET', credentials=self.credentials,
+ timestamp=1356420407 + 300,
+ nonce='',
+ )
+ bewit = get_bewit(res)
+
+ expected = '123456\\1356420707\\IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=\\'
+ eq_(b64decode(bewit).decode('ascii'), expected)
+
+ def test_bewit_with_binary_id(self):
+ # Check for exceptions in get_bewit call with binary id
+ binary_credentials = self.credentials.copy()
+ binary_credentials['id'] = binary_credentials['id'].encode('ascii')
+ res = Resource(url='https://example.com/somewhere/over/the/rainbow',
+ method='GET', credentials=binary_credentials,
+ timestamp=1356420407 + 300,
+ nonce='',
+ )
+ get_bewit(res)
+
+ def test_bewit_with_ext(self):
+ res = Resource(url='https://example.com/somewhere/over/the/rainbow',
+ method='GET', credentials=self.credentials,
+ timestamp=1356420407 + 300,
+ nonce='',
+ ext='xandyandz'
+ )
+ bewit = get_bewit(res)
+
+ expected = '123456\\1356420707\\kscxwNR2tJpP1T1zDLNPbB5UiKIU9tOSJXTUdG7X9h8=\\xandyandz'
+ eq_(b64decode(bewit).decode('ascii'), expected)
+
+ def test_bewit_with_ext_and_backslashes(self):
+ credentials = self.credentials
+ credentials['id'] = '123\\456'
+ res = Resource(url='https://example.com/somewhere/over/the/rainbow',
+ method='GET', credentials=self.credentials,
+ timestamp=1356420407 + 300,
+ nonce='',
+ ext='xand\\yandz'
+ )
+ bewit = get_bewit(res)
+
+ expected = '123456\\1356420707\\b82LLIxG5UDkaChLU953mC+SMrbniV1sb8KiZi9cSsc=\\xand\\yandz'
+ eq_(b64decode(bewit).decode('ascii'), expected)
+
+ def test_bewit_with_port(self):
+ res = Resource(url='https://example.com:8080/somewhere/over/the/rainbow',
+ method='GET', credentials=self.credentials,
+ timestamp=1356420407 + 300, nonce='', ext='xandyandz')
+ bewit = get_bewit(res)
+
+ expected = '123456\\1356420707\\hZbJ3P2cKEo4ky0C8jkZAkRyCZueg4WSNbxV7vq3xHU=\\xandyandz'
+ eq_(b64decode(bewit).decode('ascii'), expected)
+
+ @raises(ValueError)
+ def test_bewit_with_nonce(self):
+ res = Resource(url='https://example.com/somewhere/over/the/rainbow',
+ method='GET', credentials=self.credentials,
+ timestamp=1356420407 + 300,
+ nonce='n1')
+ get_bewit(res)
+
+ @raises(ValueError)
+ def test_bewit_invalid_method(self):
+ res = Resource(url='https://example.com:8080/somewhere/over/the/rainbow',
+ method='POST', credentials=self.credentials,
+ timestamp=1356420407 + 300, nonce='')
+ get_bewit(res)
+
+ def test_strip_bewit(self):
+ bewit = b'123456\\1356420707\\IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=\\'
+ bewit = urlsafe_b64encode(bewit).decode('ascii')
+ url = "https://example.com/somewhere/over/the/rainbow?bewit={bewit}".format(bewit=bewit)
+
+ raw_bewit, stripped_url = strip_bewit(url)
+ self.assertEquals(raw_bewit, bewit)
+ self.assertEquals(stripped_url, "https://example.com/somewhere/over/the/rainbow")
+
+ @raises(InvalidBewit)
+ def test_strip_url_without_bewit(self):
+ url = "https://example.com/somewhere/over/the/rainbow"
+ strip_bewit(url)
+
+ def test_parse_bewit(self):
+ bewit = b'123456\\1356420707\\IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=\\'
+ bewit = urlsafe_b64encode(bewit).decode('ascii')
+ bewit = parse_bewit(bewit)
+ self.assertEquals(bewit.id, '123456')
+ self.assertEquals(bewit.expiration, '1356420707')
+ self.assertEquals(bewit.mac, 'IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=')
+ self.assertEquals(bewit.ext, '')
+
+ def test_parse_bewit_with_ext(self):
+ bewit = b'123456\\1356420707\\IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=\\xandyandz'
+ bewit = urlsafe_b64encode(bewit).decode('ascii')
+ bewit = parse_bewit(bewit)
+ self.assertEquals(bewit.id, '123456')
+ self.assertEquals(bewit.expiration, '1356420707')
+ self.assertEquals(bewit.mac, 'IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=')
+ self.assertEquals(bewit.ext, 'xandyandz')
+
+ def test_parse_bewit_with_ext_and_backslashes(self):
+ bewit = b'123456\\1356420707\\IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=\\xand\\yandz'
+ bewit = urlsafe_b64encode(bewit).decode('ascii')
+ bewit = parse_bewit(bewit)
+ self.assertEquals(bewit.id, '123456')
+ self.assertEquals(bewit.expiration, '1356420707')
+ self.assertEquals(bewit.mac, 'IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=')
+ self.assertEquals(bewit.ext, 'xand\\yandz')
+
+ @raises(InvalidBewit)
+ def test_parse_invalid_bewit_with_only_one_part(self):
+ bewit = b'12345'
+ bewit = urlsafe_b64encode(bewit).decode('ascii')
+ bewit = parse_bewit(bewit)
+
+ @raises(InvalidBewit)
+ def test_parse_invalid_bewit_with_only_two_parts(self):
+ bewit = b'1\\2'
+ bewit = urlsafe_b64encode(bewit).decode('ascii')
+ bewit = parse_bewit(bewit)
+
+ def test_validate_bewit(self):
+ bewit = b'123456\\1356420707\\IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=\\'
+ bewit = urlsafe_b64encode(bewit).decode('ascii')
+ url = "https://example.com/somewhere/over/the/rainbow?bewit={bewit}".format(bewit=bewit)
+ credential_lookup = self.make_credential_lookup({
+ self.credentials['id']: self.credentials,
+ })
+ self.assertTrue(check_bewit(url, credential_lookup=credential_lookup, now=1356420407 + 10))
+
+ def test_validate_bewit_with_ext(self):
+ bewit = b'123456\\1356420707\\kscxwNR2tJpP1T1zDLNPbB5UiKIU9tOSJXTUdG7X9h8=\\xandyandz'
+ bewit = urlsafe_b64encode(bewit).decode('ascii')
+ url = "https://example.com/somewhere/over/the/rainbow?bewit={bewit}".format(bewit=bewit)
+ credential_lookup = self.make_credential_lookup({
+ self.credentials['id']: self.credentials,
+ })
+ self.assertTrue(check_bewit(url, credential_lookup=credential_lookup, now=1356420407 + 10))
+
+ def test_validate_bewit_with_ext_and_backslashes(self):
+ bewit = b'123456\\1356420707\\b82LLIxG5UDkaChLU953mC+SMrbniV1sb8KiZi9cSsc=\\xand\\yandz'
+ bewit = urlsafe_b64encode(bewit).decode('ascii')
+ url = "https://example.com/somewhere/over/the/rainbow?bewit={bewit}".format(bewit=bewit)
+ credential_lookup = self.make_credential_lookup({
+ self.credentials['id']: self.credentials,
+ })
+ self.assertTrue(check_bewit(url, credential_lookup=credential_lookup, now=1356420407 + 10))
+
+ @raises(TokenExpired)
+ def test_validate_expired_bewit(self):
+ bewit = b'123456\\1356420707\\IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=\\'
+ bewit = urlsafe_b64encode(bewit).decode('ascii')
+ url = "https://example.com/somewhere/over/the/rainbow?bewit={bewit}".format(bewit=bewit)
+ credential_lookup = self.make_credential_lookup({
+ self.credentials['id']: self.credentials,
+ })
+ check_bewit(url, credential_lookup=credential_lookup, now=1356420407 + 1000)
+
+ @raises(CredentialsLookupError)
+ def test_validate_bewit_with_unknown_credentials(self):
+ bewit = b'123456\\1356420707\\IGYmLgIqLrCe8CxvKPs4JlWIA+UjWJJouwgARiVhCAg=\\'
+ bewit = urlsafe_b64encode(bewit).decode('ascii')
+ url = "https://example.com/somewhere/over/the/rainbow?bewit={bewit}".format(bewit=bewit)
+ credential_lookup = self.make_credential_lookup({
+ 'other_id': self.credentials,
+ })
+ check_bewit(url, credential_lookup=credential_lookup, now=1356420407 + 10)
diff --git a/third_party/python/mohawk/mohawk/util.py b/third_party/python/mohawk/mohawk/util.py
new file mode 100644
index 0000000000..46a28e94ce
--- /dev/null
+++ b/third_party/python/mohawk/mohawk/util.py
@@ -0,0 +1,267 @@
+from base64 import b64encode, urlsafe_b64encode
+import calendar
+import hashlib
+import hmac
+import logging
+import math
+import os
+import pprint
+import re
+import sys
+import time
+
+import six
+
+from .exc import (
+ BadHeaderValue,
+ HawkFail,
+ InvalidCredentials)
+
+
+HAWK_VER = 1
+log = logging.getLogger(__name__)
+allowable_header_keys = set(['id', 'ts', 'tsm', 'nonce', 'hash',
+ 'error', 'ext', 'mac', 'app', 'dlg'])
+
+
+def validate_credentials(creds):
+ if not hasattr(creds, '__getitem__'):
+ raise InvalidCredentials('credentials must be a dict-like object')
+ try:
+ creds['id']
+ creds['key']
+ creds['algorithm']
+ except KeyError:
+ etype, val, tb = sys.exc_info()
+ raise InvalidCredentials('{etype}: {val}'
+ .format(etype=etype, val=val))
+
+
+def random_string(length):
+ """Generates a random string for a given length."""
+ # this conservatively gets 8*length bits and then returns 6*length of
+ # them. Grabbing (6/8)*length bits could lose some entropy off the ends.
+ return urlsafe_b64encode(os.urandom(length))[:length]
+
+
+def calculate_payload_hash(payload, algorithm, content_type):
+ """Calculates a hash for a given payload."""
+ p_hash = hashlib.new(algorithm)
+
+ parts = []
+ parts.append('hawk.' + str(HAWK_VER) + '.payload\n')
+ parts.append(parse_content_type(content_type) + '\n')
+ parts.append(payload or '')
+ parts.append('\n')
+
+ for i, p in enumerate(parts):
+ # Make sure we are about to hash binary strings.
+ if not isinstance(p, six.binary_type):
+ p = p.encode('utf8')
+ p_hash.update(p)
+ parts[i] = p
+
+ log.debug('calculating payload hash from:\n{parts}'
+ .format(parts=pprint.pformat(parts)))
+
+ return b64encode(p_hash.digest())
+
+
+def calculate_mac(mac_type, resource, content_hash):
+ """Calculates a message authorization code (MAC)."""
+ normalized = normalize_string(mac_type, resource, content_hash)
+ log.debug(u'normalized resource for mac calc: {norm}'
+ .format(norm=normalized))
+ digestmod = getattr(hashlib, resource.credentials['algorithm'])
+
+ # Make sure we are about to hash binary strings.
+
+ if not isinstance(normalized, six.binary_type):
+ normalized = normalized.encode('utf8')
+ key = resource.credentials['key']
+ if not isinstance(key, six.binary_type):
+ key = key.encode('ascii')
+
+ result = hmac.new(key, normalized, digestmod)
+ return b64encode(result.digest())
+
+
+def calculate_ts_mac(ts, credentials):
+ """Calculates a message authorization code (MAC) for a timestamp."""
+ normalized = ('hawk.{hawk_ver}.ts\n{ts}\n'
+ .format(hawk_ver=HAWK_VER, ts=ts))
+ log.debug(u'normalized resource for ts mac calc: {norm}'
+ .format(norm=normalized))
+ digestmod = getattr(hashlib, credentials['algorithm'])
+
+ if not isinstance(normalized, six.binary_type):
+ normalized = normalized.encode('utf8')
+ key = credentials['key']
+ if not isinstance(key, six.binary_type):
+ key = key.encode('ascii')
+
+ result = hmac.new(key, normalized, digestmod)
+ return b64encode(result.digest())
+
+
+def normalize_string(mac_type, resource, content_hash):
+ """Serializes mac_type and resource into a HAWK string."""
+
+ normalized = [
+ 'hawk.' + str(HAWK_VER) + '.' + mac_type,
+ normalize_header_attr(resource.timestamp),
+ normalize_header_attr(resource.nonce),
+ normalize_header_attr(resource.method or ''),
+ normalize_header_attr(resource.name or ''),
+ normalize_header_attr(resource.host),
+ normalize_header_attr(resource.port),
+ normalize_header_attr(content_hash or '')
+ ]
+
+ # The blank lines are important. They follow what the Node Hawk lib does.
+
+ normalized.append(normalize_header_attr(resource.ext or ''))
+
+ if resource.app:
+ normalized.append(normalize_header_attr(resource.app))
+ normalized.append(normalize_header_attr(resource.dlg or ''))
+
+ # Add trailing new line.
+ normalized.append('')
+
+ normalized = '\n'.join(normalized)
+
+ return normalized
+
+
+def parse_content_type(content_type):
+ """Cleans up content_type."""
+ if content_type:
+ return content_type.split(';')[0].strip().lower()
+ else:
+ return ''
+
+
+def parse_authorization_header(auth_header):
+ """
+ Example Authorization header:
+
+ 'Hawk id="dh37fgj492je", ts="1367076201", nonce="NPHgnG", ext="and
+ welcome!", mac="CeWHy4d9kbLGhDlkyw2Nh3PJ7SDOdZDa267KH4ZaNMY="'
+ """
+ attributes = {}
+
+ # Make sure we have a unicode object for consistency.
+ if isinstance(auth_header, six.binary_type):
+ auth_header = auth_header.decode('utf8')
+
+ parts = auth_header.split(',')
+ auth_scheme_parts = parts[0].split(' ')
+ if 'hawk' != auth_scheme_parts[0].lower():
+ raise HawkFail("Unknown scheme '{scheme}' when parsing header"
+ .format(scheme=auth_scheme_parts[0].lower()))
+
+ # Replace 'Hawk key: value' with 'key: value'
+ # which matches the rest of parts
+ parts[0] = auth_scheme_parts[1]
+
+ for part in parts:
+ attr_parts = part.split('=')
+ key = attr_parts[0].strip()
+ if key not in allowable_header_keys:
+ raise HawkFail("Unknown Hawk key '{key}' when parsing header"
+ .format(key=key))
+
+ if len(attr_parts) > 2:
+ attr_parts[1] = '='.join(attr_parts[1:])
+
+ # Chop of quotation marks
+ value = attr_parts[1]
+
+ if attr_parts[1].find('"') == 0:
+ value = attr_parts[1][1:]
+
+ if value.find('"') > -1:
+ value = value[0:-1]
+
+ validate_header_attr(value, name=key)
+ value = unescape_header_attr(value)
+ attributes[key] = value
+
+ log.debug('parsed Hawk header: {header} into: \n{parsed}'
+ .format(header=auth_header, parsed=pprint.pformat(attributes)))
+ return attributes
+
+
+def strings_match(a, b):
+ # Constant time string comparision, mitigates side channel attacks.
+ if len(a) != len(b):
+ return False
+ result = 0
+
+ def byte_ints(buf):
+ for ch in buf:
+ # In Python 3, if we have a bytes object, iterating it will
+ # already get the integer value. In older pythons, we need
+ # to use ord().
+ if not isinstance(ch, int):
+ ch = ord(ch)
+ yield ch
+
+ for x, y in zip(byte_ints(a), byte_ints(b)):
+ result |= x ^ y
+ return result == 0
+
+
+def utc_now(offset_in_seconds=0.0):
+ # TODO: add support for SNTP server? See ntplib module.
+ return int(math.floor(calendar.timegm(time.gmtime()) +
+ float(offset_in_seconds)))
+
+
+# Allowed value characters:
+# !#$%&'()*+,-./:;<=>?@[]^_`{|}~ and space, a-z, A-Z, 0-9, \, "
+_header_attribute_chars = re.compile(
+ r"^[ a-zA-Z0-9_\!#\$%&'\(\)\*\+,\-\./\:;<\=>\?@\[\]\^`\{\|\}~\"\\]*$")
+
+
+def validate_header_attr(val, name=None):
+ if not _header_attribute_chars.match(val):
+ raise BadHeaderValue('header value name={name} value={val} '
+ 'contained an illegal character'
+ .format(name=name or '?', val=repr(val)))
+
+
+def escape_header_attr(val):
+
+ # Ensure we are working with Unicode for consistency.
+ if isinstance(val, six.binary_type):
+ val = val.decode('utf8')
+
+ # Escape quotes and slash like the hawk reference code.
+ val = val.replace('\\', '\\\\')
+ val = val.replace('"', '\\"')
+ val = val.replace('\n', '\\n')
+ return val
+
+
+def unescape_header_attr(val):
+ # Un-do the hawk escaping.
+ val = val.replace('\\n', '\n')
+ val = val.replace('\\\\', '\\').replace('\\"', '"')
+ return val
+
+
+def prepare_header_val(val):
+ val = escape_header_attr(val)
+ validate_header_attr(val)
+ return val
+
+
+def normalize_header_attr(val):
+ if not val:
+ val = ''
+
+ # Normalize like the hawk reference code.
+ val = escape_header_attr(val)
+ return val
diff --git a/third_party/python/mohawk/setup.cfg b/third_party/python/mohawk/setup.cfg
new file mode 100644
index 0000000000..861a9f5542
--- /dev/null
+++ b/third_party/python/mohawk/setup.cfg
@@ -0,0 +1,5 @@
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/third_party/python/mohawk/setup.py b/third_party/python/mohawk/setup.py
new file mode 100644
index 0000000000..ddaf9026c2
--- /dev/null
+++ b/third_party/python/mohawk/setup.py
@@ -0,0 +1,25 @@
+from setuptools import setup, find_packages
+
+
+setup(name='mohawk',
+ version='0.3.4',
+ description="Library for Hawk HTTP authorization",
+ long_description='',
+ author='Kumar McMillan, Austin King',
+ author_email='kumar.mcmillan@gmail.com',
+ license='MPL 2.0 (Mozilla Public License)',
+ url='https://github.com/kumar303/mohawk',
+ include_package_data=True,
+ classifiers=[
+ 'Intended Audience :: Developers',
+ 'Natural Language :: English',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Python :: 2',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3.3',
+ 'Topic :: Internet :: WWW/HTTP',
+ ],
+ packages=find_packages(exclude=['tests']),
+ install_requires=['six'])
diff --git a/third_party/python/moz.build b/third_party/python/moz.build
new file mode 100644
index 0000000000..c67e814a1b
--- /dev/null
+++ b/third_party/python/moz.build
@@ -0,0 +1,58 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Default extra components to build config
+with Files('**'):
+ BUG_COMPONENT = ('Firefox Build System', 'General')
+
+with Files('attrs/**'):
+ BUG_COMPONENT = ('Firefox Build System', 'Task Configuration')
+
+with Files('compare_locales/**'):
+ BUG_COMPONENT = ('Localization Infrastructure and Tools', 'compare-locales')
+
+with Files('fluent.migrate/**'):
+ BUG_COMPONENT = ('Localization Infrastructure and Tools', 'Fluent Migration')
+
+# Actually, https://github.com/projectfluent/python-fluent/issues
+with Files('fluent.syntax/**'):
+ BUG_COMPONENT = ('Localization Infrastructure and Tools', 'General')
+
+with Files('jsmin/**'):
+ BUG_COMPONENT = ('GeckoView', 'General')
+
+with Files('mohawk/**'):
+ BUG_COMPONENT = ('Taskcluster', 'Platform Libraries')
+
+with Files('mozilla_version/**'):
+ BUG_COMPONENT = ('Release Engineering', 'General')
+
+with Files('pyasn1/**'):
+ BUG_COMPONENT = ('Release Engineering', 'General')
+
+with Files('pyasn1_modules/**'):
+ BUG_COMPONENT = ('Core', 'Security: PSM')
+
+with Files('pylru/**'):
+ BUG_COMPONENT = ('mozilla.org', 'MozillaBuild')
+
+with Files('pytest/**'):
+ BUG_COMPONENT = ('Testing', 'General')
+
+with Files('pyyaml/**'):
+ BUG_COMPONENT = ('Taskcluster', 'General')
+
+with Files('rsa/**'):
+ BUG_COMPONENT = ('Core', 'Security: PSM')
+
+with Files('slugid/**'):
+ BUG_COMPONENT = ('Taskcluster', 'Platform Libraries')
+
+with Files('taskcluster/**'):
+ BUG_COMPONENT = ('Taskcluster', 'Platform Libraries')
+
+with Files('voluptuous/**'):
+ BUG_COMPONENT = ('Firefox Build System', 'Task Configuration')
diff --git a/third_party/python/mozilla_repo_urls/mozilla_repo_urls-0.1.1.dist-info/METADATA b/third_party/python/mozilla_repo_urls/mozilla_repo_urls-0.1.1.dist-info/METADATA
new file mode 100644
index 0000000000..5e92ff315a
--- /dev/null
+++ b/third_party/python/mozilla_repo_urls/mozilla_repo_urls-0.1.1.dist-info/METADATA
@@ -0,0 +1,16 @@
+Metadata-Version: 2.1
+Name: mozilla-repo-urls
+Version: 0.1.1
+Summary: Process Mozilla's repository URLs. The intent is to centralize URLs parsing.
+Home-page: https://github.com/mozilla-releng/mozilla-repo-urls
+Author: Mozilla Release Engineering
+Author-email: release+python@mozilla.com
+License: MPL2
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Requires-Dist: giturlparse
+
diff --git a/third_party/python/mozilla_repo_urls/mozilla_repo_urls-0.1.1.dist-info/RECORD b/third_party/python/mozilla_repo_urls/mozilla_repo_urls-0.1.1.dist-info/RECORD
new file mode 100644
index 0000000000..de2ca1f2f1
--- /dev/null
+++ b/third_party/python/mozilla_repo_urls/mozilla_repo_urls-0.1.1.dist-info/RECORD
@@ -0,0 +1,13 @@
+mozilla_repo_urls/__init__.py,sha256=seFB5ueyozmIXZxBWVATYPbQCzNln2SWSTirc0yk_A0,108
+mozilla_repo_urls/errors.py,sha256=1WsLXnfGj9qCLf8TeSj740zj6jbeDDIlCcW9FDgHwBo,488
+mozilla_repo_urls/parser.py,sha256=x32OMEOHbGmT5L-T5C2zWRtvWpgyrDWtQ_QAPPL6Dws,1234
+mozilla_repo_urls/result.py,sha256=sJEJDdfML72MR3oZtVdMRo18FbPl8SP9-pQUyF4RQBE,952
+mozilla_repo_urls/platforms/__init__.py,sha256=5gwGbeTZUI-0VR0HmC3913e6AUTylDkjmcXYkg8QwYc,89
+mozilla_repo_urls/platforms/hgmo.py,sha256=8vzw9GUaBylHEY1qWGvdIbkzYdUHeVoYYkwUQOjJktE,1893
+test/__init__.py,sha256=ui4glNH_cCoz4Ex7hcZhHTcstOPJb2wcojFiNvvIALI,88
+test/test_integration.py,sha256=KcRIoMywtngzpIrqAHn7hMYiDZR_v7kQeJX68ZFG9JM,19412
+test/test_parser.py,sha256=KZ0WXwF9ZGcyoHRadquvz22aHfIWpBRY63J_niSvrjE,187
+mozilla_repo_urls-0.1.1.dist-info/METADATA,sha256=eCjZU6fj80r07zJjN5AVWsqyfMUa7UQMsQ_-ixnz-4g,628
+mozilla_repo_urls-0.1.1.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
+mozilla_repo_urls-0.1.1.dist-info/top_level.txt,sha256=0LuRstNeetmfWdKTPvknIx8aDVzsf1KSmASCgOvKvDM,23
+mozilla_repo_urls-0.1.1.dist-info/RECORD,,
diff --git a/third_party/python/mozilla_repo_urls/mozilla_repo_urls-0.1.1.dist-info/WHEEL b/third_party/python/mozilla_repo_urls/mozilla_repo_urls-0.1.1.dist-info/WHEEL
new file mode 100644
index 0000000000..becc9a66ea
--- /dev/null
+++ b/third_party/python/mozilla_repo_urls/mozilla_repo_urls-0.1.1.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.1)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/third_party/python/mozilla_repo_urls/mozilla_repo_urls-0.1.1.dist-info/top_level.txt b/third_party/python/mozilla_repo_urls/mozilla_repo_urls-0.1.1.dist-info/top_level.txt
new file mode 100644
index 0000000000..f70acba909
--- /dev/null
+++ b/third_party/python/mozilla_repo_urls/mozilla_repo_urls-0.1.1.dist-info/top_level.txt
@@ -0,0 +1,2 @@
+mozilla_repo_urls
+test
diff --git a/third_party/python/mozilla_repo_urls/mozilla_repo_urls/__init__.py b/third_party/python/mozilla_repo_urls/mozilla_repo_urls/__init__.py
new file mode 100644
index 0000000000..9daa2a961a
--- /dev/null
+++ b/third_party/python/mozilla_repo_urls/mozilla_repo_urls/__init__.py
@@ -0,0 +1,2 @@
+from mozilla_repo_urls.errors import * # noqa F401
+from mozilla_repo_urls.parser import parse # noqa F401
diff --git a/third_party/python/mozilla_repo_urls/mozilla_repo_urls/errors.py b/third_party/python/mozilla_repo_urls/mozilla_repo_urls/errors.py
new file mode 100644
index 0000000000..49c7d13b08
--- /dev/null
+++ b/third_party/python/mozilla_repo_urls/mozilla_repo_urls/errors.py
@@ -0,0 +1,15 @@
+class RepoUrlsBaseError(Exception):
+ pass
+
+
+class InvalidRepoUrlError(RepoUrlsBaseError):
+ def __init__(self, url_string) -> None:
+ super().__init__(f"Could not parse URL: {url_string}")
+
+
+class UnsupportedPlatformError(RepoUrlsBaseError):
+ def __init__(self, url_string, host, supported_hosts) -> None:
+ super().__init__(
+ f"Unsupported version control host. Got: {host}. "
+ f"Expected one of: {supported_hosts}. URL: {url_string}"
+ )
diff --git a/third_party/python/mozilla_repo_urls/mozilla_repo_urls/parser.py b/third_party/python/mozilla_repo_urls/mozilla_repo_urls/parser.py
new file mode 100644
index 0000000000..895c23e59b
--- /dev/null
+++ b/third_party/python/mozilla_repo_urls/mozilla_repo_urls/parser.py
@@ -0,0 +1,43 @@
+import giturlparse
+
+from mozilla_repo_urls.platforms import ADDITIONAL_PLATFORMS
+
+from .errors import InvalidRepoUrlError, UnsupportedPlatformError
+from .result import RepoUrlParsed
+
+for i, platform in enumerate(ADDITIONAL_PLATFORMS):
+ giturlparse.platforms.PLATFORMS.insert(i, platform)
+
+
+_SUPPORTED_PLATFORMS = ("hgmo", "github")
+
+
+SUPPORTED_HOSTS = tuple(
+ sorted(
+ [
+ host
+ for domains in [
+ platform[1].DOMAINS
+ for platform in giturlparse.platforms.PLATFORMS
+ if platform[0] in _SUPPORTED_PLATFORMS
+ ]
+ for host in domains
+ ]
+ )
+)
+
+
+def parse(url_string):
+ # Workaround for https://github.com/nephila/giturlparse/issues/43
+ url_string = url_string.rstrip("/")
+ parsed_info = giturlparse.parser.parse(url_string)
+ parsed_url = RepoUrlParsed(parsed_info)
+
+ if not parsed_url.valid:
+ raise InvalidRepoUrlError(url_string)
+
+ if parsed_url.host not in SUPPORTED_HOSTS:
+ # For error reporting purposes, the exception object includes the domain
+ # for each supported platform.
+ raise UnsupportedPlatformError(url_string, parsed_url.host, SUPPORTED_HOSTS)
+ return parsed_url
diff --git a/third_party/python/mozilla_repo_urls/mozilla_repo_urls/platforms/__init__.py b/third_party/python/mozilla_repo_urls/mozilla_repo_urls/platforms/__init__.py
new file mode 100644
index 0000000000..330fff5256
--- /dev/null
+++ b/third_party/python/mozilla_repo_urls/mozilla_repo_urls/platforms/__init__.py
@@ -0,0 +1,5 @@
+from .hgmo import HgmoPlatform
+
+ADDITIONAL_PLATFORMS = [
+ ("hgmo", HgmoPlatform()),
+]
diff --git a/third_party/python/mozilla_repo_urls/mozilla_repo_urls/platforms/hgmo.py b/third_party/python/mozilla_repo_urls/mozilla_repo_urls/platforms/hgmo.py
new file mode 100644
index 0000000000..539d267eca
--- /dev/null
+++ b/third_party/python/mozilla_repo_urls/mozilla_repo_urls/platforms/hgmo.py
@@ -0,0 +1,55 @@
+from giturlparse.platforms.base import BasePlatform
+
+
+class HgmoPlatform(BasePlatform):
+ PATTERNS = {
+ "hg": (
+ r"(?P<protocols>(?P<protocol>hg))://"
+ r"(?P<domain>[^/]+?)"
+ r"(?P<transport_protocol>(|:https|:ssh))"
+ r"(?P<pathname>/"
+ r"(?P<repo>(([^/]+?)(/)?){1,2}))/?$"
+ ),
+ "https": (
+ r"(?P<protocols>(hg::)?(?P<protocol>https))://"
+ r"(?P<domain>[^/]+?)"
+ r"(?P<pathname>/"
+ r"(?P<repo>(([^/]+?)(/)?){1,2}))"
+ r"(?P<path_raw>(/raw-file/|/file/).+)?$"
+ ),
+ "ssh": (
+ r"(?P<protocols>(hg::)?(?P<protocol>ssh))(://)?"
+ r"(?P<domain>.+?)"
+ r"(?P<pathname>(:|/)"
+ r"(?P<repo>(([^/]+?)(/)?){1,2}))/?$"
+ ),
+ }
+ FORMATS = {
+ "hg": r"hg://%(domain)s:%(transport_protocol)s/%(repo)s",
+ "https": r"https://%(domain)s/%(repo)s%(path_raw)s",
+ "ssh": r"ssh://%(domain)s/%(repo)s",
+ }
+ DOMAINS = ("hg.mozilla.org",)
+ DEFAULTS = {"_user": ""}
+
+ @staticmethod
+ def clean_data(data):
+ data = BasePlatform.clean_data(data)
+ if "path_raw" in data and data["path_raw"].startswith(("/raw-file/", "/file")):
+ data["path"] = (
+ data["path_raw"].replace("/raw-file/", "").replace("/file/", "")
+ )
+
+ # git-cinnabar fixtures
+ if "transport_protocol" in data:
+ if not data["transport_protocol"]:
+ data["transport_protocol"] = "https"
+ if data["transport_protocol"].startswith(":"):
+ data["transport_protocol"] = data["transport_protocol"][1:]
+ data["protocols"][
+ 0
+ ] = f"{data['protocols'][0]}::{data['transport_protocol']}"
+ else:
+ data["transport_protocol"] = data["protocol"]
+
+ return data
diff --git a/third_party/python/mozilla_repo_urls/mozilla_repo_urls/result.py b/third_party/python/mozilla_repo_urls/mozilla_repo_urls/result.py
new file mode 100644
index 0000000000..e420ff89e3
--- /dev/null
+++ b/third_party/python/mozilla_repo_urls/mozilla_repo_urls/result.py
@@ -0,0 +1,35 @@
+import giturlparse
+
+_DOT_GIT_SUFFIX = ".git"
+
+
+class RepoUrlParsed(giturlparse.result.GitUrlParsed):
+ @property
+ def hgmo(self) -> bool:
+ return self.platform == "hgmo"
+
+ @property
+ def git_cinnabar(self) -> bool:
+ # https://github.com/glandium/git-cinnabar
+ return len(self.protocols) > 0 and self.protocols[0].startswith("hg::")
+
+ @property
+ def repo_name(self) -> str:
+ return self.repo_path.split("/")[-1]
+
+ @property
+ def repo_path(self) -> str:
+ repo_path = (
+ self.pathname[: -len(_DOT_GIT_SUFFIX)]
+ if self.pathname.endswith(_DOT_GIT_SUFFIX)
+ else self.pathname
+ )
+ return repo_path.strip("/")
+
+ @property
+ def repo_type(self) -> str:
+ return "hg" if self.platform == "hgmo" and not self.git_cinnabar else "git"
+
+ @property
+ def taskcluster_role_prefix(self) -> str:
+ return f"repo:{self.host}/{self.repo_path}"
diff --git a/third_party/python/mozilla_repo_urls/test/__init__.py b/third_party/python/mozilla_repo_urls/test/__init__.py
new file mode 100644
index 0000000000..8010adcb1c
--- /dev/null
+++ b/third_party/python/mozilla_repo_urls/test/__init__.py
@@ -0,0 +1,6 @@
+from contextlib import contextmanager
+
+
+@contextmanager
+def does_not_raise():
+ yield
diff --git a/third_party/python/mozilla_repo_urls/test/test_integration.py b/third_party/python/mozilla_repo_urls/test/test_integration.py
new file mode 100644
index 0000000000..200c492227
--- /dev/null
+++ b/third_party/python/mozilla_repo_urls/test/test_integration.py
@@ -0,0 +1,493 @@
+from test import does_not_raise
+
+import pytest
+
+import mozilla_repo_urls
+
+
+@pytest.mark.parametrize(
+ "url_string, expectation, expected",
+ (
+ (
+ "https://hg.mozilla.org/mozilla-central",
+ does_not_raise(),
+ {
+ "github": False,
+ "git_cinnabar": False,
+ "groups": [],
+ "hgmo": True,
+ "host": "hg.mozilla.org",
+ "name": "mozilla-central",
+ "normalized": "https://hg.mozilla.org/mozilla-central",
+ "path_raw": "",
+ "path": "",
+ "pathname": "/mozilla-central",
+ "platform": "hgmo",
+ "port": "",
+ "protocol": "https",
+ "protocols": ["https"],
+ "repo": "mozilla-central",
+ "repo_name": "mozilla-central",
+ "repo_path": "mozilla-central",
+ "repo_type": "hg",
+ "resource": "hg.mozilla.org",
+ "taskcluster_role_prefix": "repo:hg.mozilla.org/mozilla-central",
+ "urls": {
+ "hg": "hg://hg.mozilla.org:https/mozilla-central",
+ "https": "https://hg.mozilla.org/mozilla-central",
+ "ssh": "ssh://hg.mozilla.org/mozilla-central",
+ },
+ "user": "",
+ "valid": True,
+ },
+ ),
+ (
+ "https://hg.mozilla.org/releases/mozilla-beta",
+ does_not_raise(),
+ {
+ "github": False,
+ "git_cinnabar": False,
+ "groups": [],
+ "hgmo": True,
+ "host": "hg.mozilla.org",
+ "name": "releases/mozilla-beta",
+ "normalized": "https://hg.mozilla.org/releases/mozilla-beta",
+ "path_raw": "",
+ "path": "",
+ "pathname": "/releases/mozilla-beta",
+ "platform": "hgmo",
+ "port": "",
+ "protocol": "https",
+ "protocols": ["https"],
+ "repo": "releases/mozilla-beta",
+ "repo_name": "mozilla-beta",
+ "repo_path": "releases/mozilla-beta",
+ "repo_type": "hg",
+ "resource": "hg.mozilla.org",
+ "taskcluster_role_prefix": "repo:hg.mozilla.org/releases/mozilla-beta",
+ "urls": {
+ "hg": "hg://hg.mozilla.org:https/releases/mozilla-beta",
+ "https": "https://hg.mozilla.org/releases/mozilla-beta",
+ "ssh": "ssh://hg.mozilla.org/releases/mozilla-beta",
+ },
+ "user": "",
+ "valid": True,
+ },
+ ),
+ (
+ "https://hg.mozilla.org/releases/mozilla-release",
+ does_not_raise(),
+ {
+ "github": False,
+ "git_cinnabar": False,
+ "groups": [],
+ "hgmo": True,
+ "host": "hg.mozilla.org",
+ "name": "releases/mozilla-release",
+ "normalized": "https://hg.mozilla.org/releases/mozilla-release",
+ "path_raw": "",
+ "path": "",
+ "pathname": "/releases/mozilla-release",
+ "platform": "hgmo",
+ "port": "",
+ "protocol": "https",
+ "protocols": ["https"],
+ "repo": "releases/mozilla-release",
+ "repo_name": "mozilla-release",
+ "repo_path": "releases/mozilla-release",
+ "repo_type": "hg",
+ "resource": "hg.mozilla.org",
+ "taskcluster_role_prefix": "repo:hg.mozilla.org/releases/mozilla-release", # noqa: E501
+ "urls": {
+ "hg": "hg://hg.mozilla.org:https/releases/mozilla-release",
+ "https": "https://hg.mozilla.org/releases/mozilla-release",
+ "ssh": "ssh://hg.mozilla.org/releases/mozilla-release",
+ },
+ "user": "",
+ "valid": True,
+ },
+ ),
+ (
+ "https://hg.mozilla.org/try",
+ does_not_raise(),
+ {
+ "github": False,
+ "git_cinnabar": False,
+ "groups": [],
+ "hgmo": True,
+ "host": "hg.mozilla.org",
+ "name": "try",
+ "normalized": "https://hg.mozilla.org/try",
+ "path_raw": "",
+ "path": "",
+ "pathname": "/try",
+ "platform": "hgmo",
+ "port": "",
+ "protocol": "https",
+ "protocols": ["https"],
+ "repo": "try",
+ "repo_name": "try",
+ "repo_path": "try",
+ "repo_type": "hg",
+ "resource": "hg.mozilla.org",
+ "taskcluster_role_prefix": "repo:hg.mozilla.org/try",
+ "urls": {
+ "hg": "hg://hg.mozilla.org:https/try",
+ "https": "https://hg.mozilla.org/try",
+ "ssh": "ssh://hg.mozilla.org/try",
+ },
+ "user": "",
+ "valid": True,
+ },
+ ),
+ (
+ "https://hg.mozilla.org/mozilla-central/raw-file/tip/taskcluster/ci/config.yml", # noqa: E501
+ does_not_raise(),
+ {
+ "github": False,
+ "git_cinnabar": False,
+ "groups": [],
+ "hgmo": True,
+ "host": "hg.mozilla.org",
+ "name": "mozilla-central",
+ "normalized": "https://hg.mozilla.org/mozilla-central/raw-file/tip/taskcluster/ci/config.yml", # noqa: E501
+ "path_raw": "/raw-file/tip/taskcluster/ci/config.yml",
+ "path": "tip/taskcluster/ci/config.yml",
+ "pathname": "/mozilla-central",
+ "platform": "hgmo",
+ "port": "",
+ "protocol": "https",
+ "protocols": ["https"],
+ "repo": "mozilla-central",
+ "repo_name": "mozilla-central",
+ "repo_path": "mozilla-central",
+ "repo_type": "hg",
+ "resource": "hg.mozilla.org",
+ "taskcluster_role_prefix": "repo:hg.mozilla.org/mozilla-central",
+ "urls": {
+ "hg": "hg://hg.mozilla.org:https/mozilla-central",
+ "https": "https://hg.mozilla.org/mozilla-central/raw-file/tip/taskcluster/ci/config.yml", # noqa: E501
+ "ssh": "ssh://hg.mozilla.org/mozilla-central",
+ },
+ "user": "",
+ "valid": True,
+ },
+ ),
+ (
+ "https://hg.mozilla.org/mozilla-central/file/tip/taskcluster/ci/config.yml", # noqa: E501
+ does_not_raise(),
+ {
+ "github": False,
+ "git_cinnabar": False,
+ "groups": [],
+ "hgmo": True,
+ "host": "hg.mozilla.org",
+ "name": "mozilla-central",
+ "normalized": "https://hg.mozilla.org/mozilla-central/file/tip/taskcluster/ci/config.yml", # noqa: E501
+ "path_raw": "/file/tip/taskcluster/ci/config.yml",
+ "path": "tip/taskcluster/ci/config.yml",
+ "pathname": "/mozilla-central",
+ "platform": "hgmo",
+ "port": "",
+ "protocol": "https",
+ "protocols": ["https"],
+ "repo": "mozilla-central",
+ "repo_name": "mozilla-central",
+ "repo_path": "mozilla-central",
+ "repo_type": "hg",
+ "resource": "hg.mozilla.org",
+ "taskcluster_role_prefix": "repo:hg.mozilla.org/mozilla-central",
+ "urls": {
+ "hg": "hg://hg.mozilla.org:https/mozilla-central",
+ "https": "https://hg.mozilla.org/mozilla-central/file/tip/taskcluster/ci/config.yml", # noqa: E501
+ "ssh": "ssh://hg.mozilla.org/mozilla-central",
+ },
+ "user": "",
+ "valid": True,
+ },
+ ),
+ (
+ "https://github.com/mozilla-mobile/fenix",
+ does_not_raise(),
+ {
+ "github": True,
+ "git_cinnabar": False,
+ "groups": [],
+ "hgmo": False,
+ "host": "github.com",
+ "name": "fenix",
+ "normalized": "https://github.com/mozilla-mobile/fenix.git",
+ "owner": "mozilla-mobile",
+ "path_raw": "",
+ "path": "",
+ "pathname": "/mozilla-mobile/fenix",
+ "platform": "github",
+ "port": "",
+ "protocol": "https",
+ "protocols": ["https"],
+ "repo": "fenix",
+ "repo_name": "fenix",
+ "repo_path": "mozilla-mobile/fenix",
+ "repo_type": "git",
+ "resource": "github.com",
+ "taskcluster_role_prefix": "repo:github.com/mozilla-mobile/fenix",
+ "urls": {
+ "git": "git://github.com/mozilla-mobile/fenix.git",
+ "https": "https://github.com/mozilla-mobile/fenix.git",
+ "ssh": "git@github.com:mozilla-mobile/fenix.git",
+ },
+ "user": "git",
+ "valid": True,
+ },
+ ),
+ (
+ "git@github.com:mozilla-mobile/firefox-android.git",
+ does_not_raise(),
+ {
+ "github": True,
+ "git_cinnabar": False,
+ "groups": [],
+ "hgmo": False,
+ "host": "github.com",
+ "name": "firefox-android",
+ "normalized": "git@github.com:mozilla-mobile/firefox-android.git",
+ "owner": "mozilla-mobile",
+ "path_raw": "",
+ "path": "",
+ "pathname": "mozilla-mobile/firefox-android.git",
+ "platform": "github",
+ "port": "",
+ "protocol": "ssh",
+ "protocols": [],
+ "repo": "firefox-android",
+ "repo_name": "firefox-android",
+ "repo_path": "mozilla-mobile/firefox-android",
+ "repo_type": "git",
+ "resource": "github.com",
+ "taskcluster_role_prefix": "repo:github.com/mozilla-mobile/firefox-android", # noqa: E501
+ "urls": {
+ "git": "git://github.com/mozilla-mobile/firefox-android.git",
+ "https": "https://github.com/mozilla-mobile/firefox-android.git",
+ "ssh": "git@github.com:mozilla-mobile/firefox-android.git",
+ },
+ "user": "git",
+ "valid": True,
+ },
+ ),
+ (
+ "ssh://hg.mozilla.org/mozilla-unified",
+ does_not_raise(),
+ {
+ "github": False,
+ "groups": [],
+ "hgmo": True,
+ "host": "hg.mozilla.org",
+ "name": "mozilla-unified",
+ "normalized": "ssh://hg.mozilla.org/mozilla-unified",
+ "path_raw": "",
+ "path": "",
+ "pathname": "/mozilla-unified",
+ "platform": "hgmo",
+ "port": "",
+ "protocol": "ssh",
+ "protocols": ["ssh"],
+ "repo": "mozilla-unified",
+ "repo_name": "mozilla-unified",
+ "repo_path": "mozilla-unified",
+ "repo_type": "hg",
+ "resource": "hg.mozilla.org",
+ "taskcluster_role_prefix": "repo:hg.mozilla.org/mozilla-unified",
+ "urls": {
+ "hg": "hg://hg.mozilla.org:ssh/mozilla-unified",
+ "https": "https://hg.mozilla.org/mozilla-unified",
+ "ssh": "ssh://hg.mozilla.org/mozilla-unified",
+ },
+ "user": "",
+ "valid": True,
+ },
+ ),
+ (
+ "hg::https://hg.mozilla.org/mozilla-unified",
+ does_not_raise(),
+ {
+ "github": False,
+ "git_cinnabar": True,
+ "groups": [],
+ "hgmo": True,
+ "host": "hg.mozilla.org",
+ "name": "mozilla-unified",
+ "normalized": "https://hg.mozilla.org/mozilla-unified",
+ "path_raw": "",
+ "path": "",
+ "pathname": "/mozilla-unified",
+ "platform": "hgmo",
+ "port": "",
+ "protocol": "https",
+ "protocols": ["hg::https"],
+ "repo": "mozilla-unified",
+ "repo_name": "mozilla-unified",
+ "repo_path": "mozilla-unified",
+ "repo_type": "git",
+ "resource": "hg.mozilla.org",
+ "taskcluster_role_prefix": "repo:hg.mozilla.org/mozilla-unified",
+ "urls": {
+ "hg": "hg://hg.mozilla.org:https/mozilla-unified",
+ "https": "https://hg.mozilla.org/mozilla-unified",
+ "ssh": "ssh://hg.mozilla.org/mozilla-unified",
+ },
+ "user": "",
+ "valid": True,
+ },
+ ),
+ (
+ "hg::ssh://hg.mozilla.org/mozilla-unified",
+ does_not_raise(),
+ {
+ "github": False,
+ "git_cinnabar": True,
+ "groups": [],
+ "hgmo": True,
+ "host": "hg.mozilla.org",
+ "name": "mozilla-unified",
+ "normalized": "ssh://hg.mozilla.org/mozilla-unified",
+ "path_raw": "",
+ "path": "",
+ "pathname": "/mozilla-unified",
+ "platform": "hgmo",
+ "port": "",
+ "protocol": "ssh",
+ "protocols": ["hg::ssh"],
+ "repo": "mozilla-unified",
+ "repo_name": "mozilla-unified",
+ "repo_path": "mozilla-unified",
+ "repo_type": "git",
+ "resource": "hg.mozilla.org",
+ "taskcluster_role_prefix": "repo:hg.mozilla.org/mozilla-unified",
+ "urls": {
+ "hg": "hg://hg.mozilla.org:ssh/mozilla-unified",
+ "https": "https://hg.mozilla.org/mozilla-unified",
+ "ssh": "ssh://hg.mozilla.org/mozilla-unified",
+ },
+ "user": "",
+ "valid": True,
+ },
+ ),
+ (
+ "hg://hg.mozilla.org/mozilla-central",
+ does_not_raise(),
+ {
+ "github": False,
+ "git_cinnabar": True,
+ "groups": [],
+ "hgmo": True,
+ "host": "hg.mozilla.org",
+ "name": "mozilla-central",
+ "normalized": "hg://hg.mozilla.org:https/mozilla-central",
+ "path_raw": "",
+ "path": "",
+ "pathname": "/mozilla-central",
+ "platform": "hgmo",
+ "port": "",
+ "protocol": "hg",
+ "protocols": ["hg::https"],
+ "repo": "mozilla-central",
+ "repo_name": "mozilla-central",
+ "repo_path": "mozilla-central",
+ "repo_type": "git",
+ "resource": "hg.mozilla.org",
+ "taskcluster_role_prefix": "repo:hg.mozilla.org/mozilla-central",
+ "urls": {
+ "hg": "hg://hg.mozilla.org:https/mozilla-central",
+ "https": "https://hg.mozilla.org/mozilla-central",
+ "ssh": "ssh://hg.mozilla.org/mozilla-central",
+ },
+ "user": "",
+ "valid": True,
+ },
+ ),
+ (
+ "hg://hg.mozilla.org:https/mozilla-central",
+ does_not_raise(),
+ {
+ "github": False,
+ "git_cinnabar": True,
+ "groups": [],
+ "hgmo": True,
+ "host": "hg.mozilla.org",
+ "name": "mozilla-central",
+ "normalized": "hg://hg.mozilla.org:https/mozilla-central",
+ "path_raw": "",
+ "path": "",
+ "pathname": "/mozilla-central",
+ "platform": "hgmo",
+ "port": "",
+ "protocol": "hg",
+ "protocols": ["hg::https"],
+ "repo": "mozilla-central",
+ "repo_name": "mozilla-central",
+ "repo_path": "mozilla-central",
+ "repo_type": "git",
+ "resource": "hg.mozilla.org",
+ "taskcluster_role_prefix": "repo:hg.mozilla.org/mozilla-central",
+ "urls": {
+ "hg": "hg://hg.mozilla.org:https/mozilla-central",
+ "https": "https://hg.mozilla.org/mozilla-central",
+ "ssh": "ssh://hg.mozilla.org/mozilla-central",
+ },
+ "user": "",
+ "valid": True,
+ },
+ ),
+ (
+ "hg://hg.mozilla.org:ssh/mozilla-central",
+ does_not_raise(),
+ {
+ "github": False,
+ "git_cinnabar": True,
+ "groups": [],
+ "hgmo": True,
+ "host": "hg.mozilla.org",
+ "name": "mozilla-central",
+ "normalized": "hg://hg.mozilla.org:ssh/mozilla-central",
+ "path_raw": "",
+ "path": "",
+ "pathname": "/mozilla-central",
+ "platform": "hgmo",
+ "port": "",
+ "protocol": "hg",
+ "protocols": ["hg::ssh"],
+ "repo": "mozilla-central",
+ "repo_name": "mozilla-central",
+ "repo_path": "mozilla-central",
+ "repo_type": "git",
+ "resource": "hg.mozilla.org",
+ "taskcluster_role_prefix": "repo:hg.mozilla.org/mozilla-central",
+ "urls": {
+ "hg": "hg://hg.mozilla.org:ssh/mozilla-central",
+ "https": "https://hg.mozilla.org/mozilla-central",
+ "ssh": "ssh://hg.mozilla.org/mozilla-central",
+ },
+ "user": "",
+ "valid": True,
+ },
+ ),
+ (
+ "https://some.unknown/repo",
+ pytest.raises(mozilla_repo_urls.InvalidRepoUrlError),
+ None,
+ ),
+ (
+ "https://gitlab.com/some-owner/some-repo",
+ pytest.raises(mozilla_repo_urls.UnsupportedPlatformError),
+ None,
+ ),
+ ),
+)
+def test_parse(url_string, expectation, expected):
+ with expectation:
+ url_object = mozilla_repo_urls.parse(url_string)
+ actual = {
+ attribute_name: getattr(url_object, attribute_name)
+ for attribute_name in expected.keys()
+ }
+ assert actual == expected
diff --git a/third_party/python/mozilla_repo_urls/test/test_parser.py b/third_party/python/mozilla_repo_urls/test/test_parser.py
new file mode 100644
index 0000000000..2c517395f7
--- /dev/null
+++ b/third_party/python/mozilla_repo_urls/test/test_parser.py
@@ -0,0 +1,9 @@
+from mozilla_repo_urls import parser
+
+
+def test_supported_hosts():
+ assert parser.SUPPORTED_HOSTS == (
+ "gist.github.com",
+ "github.com",
+ "hg.mozilla.org",
+ )
diff --git a/third_party/python/mozilla_version/mozilla_version-2.0.0.dist-info/LICENSE b/third_party/python/mozilla_version/mozilla_version-2.0.0.dist-info/LICENSE
new file mode 100644
index 0000000000..e87a115e46
--- /dev/null
+++ b/third_party/python/mozilla_version/mozilla_version-2.0.0.dist-info/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+ means each individual or legal entity that creates, contributes to the
+ creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+ means the combination of the Contributions of others (if any) used by a
+ Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+ means Source Code Form to which the initial Contributor has attached the
+ notice in Exhibit A, the Executable Form of such Source Code Form, and
+ Modifications of such Source Code Form, in each case including portions
+ thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ a. that the initial Contributor has attached the notice described in
+ Exhibit B to the Covered Software; or
+
+ b. that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the terms of
+ a Secondary License.
+
+1.6. "Executable Form"
+
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+ means a work that combines Covered Software with other material, in a
+ separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+ means this document.
+
+1.9. "Licensable"
+
+ means having the right to grant, to the maximum extent possible, whether
+ at the time of the initial grant or subsequently, any and all of the
+ rights conveyed by this License.
+
+1.10. "Modifications"
+
+ means any of the following:
+
+ a. any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered Software; or
+
+ b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the License,
+ by the making, using, selling, offering for sale, having made, import,
+ or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+ means either the GNU General Public License, Version 2.0, the GNU Lesser
+ General Public License, Version 2.1, the GNU Affero General Public
+ License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that controls, is
+ controlled by, or is under common control with You. For purposes of this
+ definition, "control" means (a) the power, direct or indirect, to cause
+ the direction or management of such entity, whether by contract or
+ otherwise, or (b) ownership of more than fifty percent (50%) of the
+ outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+ Each Contributor hereby grants You a world-wide, royalty-free,
+ non-exclusive license:
+
+ a. under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+ b. under Patent Claims of such Contributor to make, use, sell, offer for
+ sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+ The licenses granted in Section 2.1 with respect to any Contribution
+ become effective for each Contribution on the date the Contributor first
+ distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+ The licenses granted in this Section 2 are the only rights granted under
+ this License. No additional rights or licenses will be implied from the
+ distribution or licensing of Covered Software under this License.
+ Notwithstanding Section 2.1(b) above, no patent license is granted by a
+ Contributor:
+
+ a. for any code that a Contributor has removed from Covered Software; or
+
+ b. for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+ c. under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+ This License does not grant any rights in the trademarks, service marks,
+ or logos of any Contributor (except as may be necessary to comply with
+ the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+ No Contributor makes additional grants as a result of Your choice to
+ distribute the Covered Software under a subsequent version of this
+ License (see Section 10.2) or under the terms of a Secondary License (if
+ permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+ Each Contributor represents that the Contributor believes its
+ Contributions are its original creation(s) or it has sufficient rights to
+ grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+ This License is not intended to limit any rights You have under
+ applicable copyright doctrines of fair use, fair dealing, or other
+ equivalents.
+
+2.7. Conditions
+
+ Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+ Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+ All distribution of Covered Software in Source Code Form, including any
+ Modifications that You create or to which You contribute, must be under
+ the terms of this License. You must inform recipients that the Source
+ Code Form of the Covered Software is governed by the terms of this
+ License, and how they can obtain a copy of this License. You may not
+ attempt to alter or restrict the recipients' rights in the Source Code
+ Form.
+
+3.2. Distribution of Executable Form
+
+ If You distribute Covered Software in Executable Form then:
+
+ a. such Covered Software must also be made available in Source Code Form,
+ as described in Section 3.1, and You must inform recipients of the
+ Executable Form how they can obtain a copy of such Source Code Form by
+ reasonable means in a timely manner, at a charge no more than the cost
+ of distribution to the recipient; and
+
+ b. You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter the
+ recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+ You may create and distribute a Larger Work under terms of Your choice,
+ provided that You also comply with the requirements of this License for
+ the Covered Software. If the Larger Work is a combination of Covered
+ Software with a work governed by one or more Secondary Licenses, and the
+ Covered Software is not Incompatible With Secondary Licenses, this
+ License permits You to additionally distribute such Covered Software
+ under the terms of such Secondary License(s), so that the recipient of
+ the Larger Work may, at their option, further distribute the Covered
+ Software under the terms of either this License or such Secondary
+ License(s).
+
+3.4. Notices
+
+ You may not remove or alter the substance of any license notices
+ (including copyright notices, patent notices, disclaimers of warranty, or
+ limitations of liability) contained within the Source Code Form of the
+ Covered Software, except that You may alter any license notices to the
+ extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+ You may choose to offer, and to charge a fee for, warranty, support,
+ indemnity or liability obligations to one or more recipients of Covered
+ Software. However, You may do so only on Your own behalf, and not on
+ behalf of any Contributor. You must make it absolutely clear that any
+ such warranty, support, indemnity, or liability obligation is offered by
+ You alone, and You hereby agree to indemnify every Contributor for any
+ liability incurred by such Contributor as a result of warranty, support,
+ indemnity or liability terms You offer. You may include additional
+ disclaimers of warranty and limitations of liability specific to any
+ jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+ If it is impossible for You to comply with any of the terms of this License
+ with respect to some or all of the Covered Software due to statute,
+ judicial order, or regulation then You must: (a) comply with the terms of
+ this License to the maximum extent possible; and (b) describe the
+ limitations and the code they affect. Such description must be placed in a
+ text file included with all distributions of the Covered Software under
+ this License. Except to the extent prohibited by statute or regulation,
+ such description must be sufficiently detailed for a recipient of ordinary
+ skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+ fail to comply with any of its terms. However, if You become compliant,
+ then the rights granted under this License from a particular Contributor
+ are reinstated (a) provisionally, unless and until such Contributor
+ explicitly and finally terminates Your grants, and (b) on an ongoing
+ basis, if such Contributor fails to notify You of the non-compliance by
+ some reasonable means prior to 60 days after You have come back into
+ compliance. Moreover, Your grants from a particular Contributor are
+ reinstated on an ongoing basis if such Contributor notifies You of the
+ non-compliance by some reasonable means, this is the first time You have
+ received notice of non-compliance with this License from such
+ Contributor, and You become compliant prior to 30 days after Your receipt
+ of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+ infringement claim (excluding declaratory judgment actions,
+ counter-claims, and cross-claims) alleging that a Contributor Version
+ directly or indirectly infringes any patent, then the rights granted to
+ You by any and all Contributors for the Covered Software under Section
+ 2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+ license agreements (excluding distributors and resellers) which have been
+ validly granted by You or Your distributors under this License prior to
+ termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+ Covered Software is provided under this License on an "as is" basis,
+ without warranty of any kind, either expressed, implied, or statutory,
+ including, without limitation, warranties that the Covered Software is free
+ of defects, merchantable, fit for a particular purpose or non-infringing.
+ The entire risk as to the quality and performance of the Covered Software
+ is with You. Should any Covered Software prove defective in any respect,
+ You (not any Contributor) assume the cost of any necessary servicing,
+ repair, or correction. This disclaimer of warranty constitutes an essential
+ part of this License. No use of any Covered Software is authorized under
+ this License except under this disclaimer.
+
+7. Limitation of Liability
+
+ Under no circumstances and under no legal theory, whether tort (including
+ negligence), contract, or otherwise, shall any Contributor, or anyone who
+ distributes Covered Software as permitted above, be liable to You for any
+ direct, indirect, special, incidental, or consequential damages of any
+ character including, without limitation, damages for lost profits, loss of
+ goodwill, work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses, even if such party shall have been
+ informed of the possibility of such damages. This limitation of liability
+ shall not apply to liability for death or personal injury resulting from
+ such party's negligence to the extent applicable law prohibits such
+ limitation. Some jurisdictions do not allow the exclusion or limitation of
+ incidental or consequential damages, so this exclusion and limitation may
+ not apply to You.
+
+8. Litigation
+
+ Any litigation relating to this License may be brought only in the courts
+ of a jurisdiction where the defendant maintains its principal place of
+ business and such litigation shall be governed by laws of that
+ jurisdiction, without reference to its conflict-of-law provisions. Nothing
+ in this Section shall prevent a party's ability to bring cross-claims or
+ counter-claims.
+
+9. Miscellaneous
+
+ This License represents the complete agreement concerning the subject
+ matter hereof. If any provision of this License is held to be
+ unenforceable, such provision shall be reformed only to the extent
+ necessary to make it enforceable. Any law or regulation which provides that
+ the language of a contract shall be construed against the drafter shall not
+ be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+ Mozilla Foundation is the license steward. Except as provided in Section
+ 10.3, no one other than the license steward has the right to modify or
+ publish new versions of this License. Each version will be given a
+ distinguishing version number.
+
+10.2. Effect of New Versions
+
+ You may distribute the Covered Software under the terms of the version
+ of the License under which You originally received the Covered Software,
+ or under the terms of any subsequent version published by the license
+ steward.
+
+10.3. Modified Versions
+
+ If you create software not governed by this License, and you want to
+ create a new license for such software, you may create and use a
+ modified version of this License if you rename the license and remove
+ any references to the name of the license steward (except to note that
+ such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+ Licenses If You choose to distribute Source Code Form that is
+ Incompatible With Secondary Licenses under the terms of this version of
+ the License, the notice described in Exhibit B of this License must be
+ attached.
+
+Exhibit A - Source Code Form License Notice
+
+ This Source Code Form is subject to the
+ terms of the Mozilla Public License, v.
+ 2.0. If a copy of the MPL was not
+ distributed with this file, You can
+ obtain one at
+ http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+ This Source Code Form is "Incompatible
+ With Secondary Licenses", as defined by
+ the Mozilla Public License, v. 2.0.
+
diff --git a/third_party/python/mozilla_version/mozilla_version-2.0.0.dist-info/METADATA b/third_party/python/mozilla_version/mozilla_version-2.0.0.dist-info/METADATA
new file mode 100644
index 0000000000..3edee7bd3d
--- /dev/null
+++ b/third_party/python/mozilla_version/mozilla_version-2.0.0.dist-info/METADATA
@@ -0,0 +1,12 @@
+Metadata-Version: 2.1
+Name: mozilla-version
+Version: 2.0.0
+Summary: Process Firefox versions numbers. Tells whether they are valid or not, whether they are nightlies or regular releases, whether this version precedes that other.
+Home-page: https://github.com/mozilla-releng/mozilla-version
+Author: Mozilla Release Engineering
+Author-email: release+python@mozilla.com
+License: MPL2
+Classifier: Programming Language :: Python :: 3
+License-File: LICENSE
+Requires-Dist: attrs (>=19.2)
+
diff --git a/third_party/python/mozilla_version/mozilla_version-2.0.0.dist-info/RECORD b/third_party/python/mozilla_version/mozilla_version-2.0.0.dist-info/RECORD
new file mode 100644
index 0000000000..8da74e9bde
--- /dev/null
+++ b/third_party/python/mozilla_version/mozilla_version-2.0.0.dist-info/RECORD
@@ -0,0 +1,22 @@
+mozilla_version/__init__.py,sha256=ro9IDUmjUco6GHJhqbgynResbswRnh6HL5Iv1ttDuWU,60
+mozilla_version/balrog.py,sha256=p75Ln9W5IiEzO8C-HIDmKsgdpN4hc9zbvTEOMZodNhQ,4961
+mozilla_version/errors.py,sha256=DvBsNaJdhpaT3wb4E3Rnl7KAuxnqXlElBllYOcijwbQ,2468
+mozilla_version/fenix.py,sha256=zruk3WsTMCeaRaaNi5ezxaSAb8t_8CATXpLhbVryOPM,199
+mozilla_version/gecko.py,sha256=t4JcuF7mehXqFhKIxvFM3hrEx-qZpCmF9YcwWTUuCHM,24783
+mozilla_version/maven.py,sha256=jH0F-Rq3tJJ_N3KbNE1KBi0i_BlXGZCYyjZ7K_CRxoM,1988
+mozilla_version/mobile.py,sha256=3VJgbC90NpQMUTfy75zWyK4kMKjb3E7MnE_cfdHZriM,9520
+mozilla_version/parser.py,sha256=kwaw3UeAbWgUFtCmCheY9grKwabmq9tc64JyTlPrHS8,1335
+mozilla_version/version.py,sha256=MNTbIWmRWlN4jofkt2wKmyq3z3MWGWFDqrJYN1nKxj0,7929
+mozilla_version/test/__init__.py,sha256=r9z_NrSZeN6vCBiocNFI00XPm2bveSogzO-jsLa7Q-I,87
+mozilla_version/test/test_balrog.py,sha256=olr3NBdF1wtsz2Rfnb1aT3-cD7YgWQlDMfszmgz-ZgM,7839
+mozilla_version/test/test_errors.py,sha256=oR6PZorSCYDWDRrye560gz6MCXD2E4J-eyfIVCVoenw,933
+mozilla_version/test/test_fenix.py,sha256=qs8sD39N_cM9rNEZxyCaLuxx53hIIeHZIrJe_EBpYoQ,193
+mozilla_version/test/test_gecko.py,sha256=TbIoRzfvCqtbrdIOw8aeNi-eieuZBSCE9c7nNghsOps,24494
+mozilla_version/test/test_maven.py,sha256=_KaMDq47nQNctmPfA8zbTSq35vUFtaHyLkjdP9HL0zk,3526
+mozilla_version/test/test_mobile.py,sha256=uMNZhPE1Go4vJ7hxzIs23T9qBVbNYQVs6gjN32NTP4U,11948
+mozilla_version/test/test_version.py,sha256=AeWRvkgW739mEbq3JBd1hlY9hQqHro4h9gaUuLAChqU,7441
+mozilla_version-2.0.0.dist-info/LICENSE,sha256=YCIsKMGn9qksffmOXF9EWeYk5uKF4Lm5RGevX2qzND0,15922
+mozilla_version-2.0.0.dist-info/METADATA,sha256=3ZeZKRMprBj6yz8xBbHQTvK5h3vk18joltdq29yj2gY,482
+mozilla_version-2.0.0.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
+mozilla_version-2.0.0.dist-info/top_level.txt,sha256=K1r8SXa4ny0i7OTfimG0Ct33oHkXtLjuU1E5_aHBe94,16
+mozilla_version-2.0.0.dist-info/RECORD,,
diff --git a/third_party/python/mozilla_version/mozilla_version-2.0.0.dist-info/WHEEL b/third_party/python/mozilla_version/mozilla_version-2.0.0.dist-info/WHEEL
new file mode 100644
index 0000000000..57e3d840d5
--- /dev/null
+++ b/third_party/python/mozilla_version/mozilla_version-2.0.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.38.4)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/third_party/python/mozilla_version/mozilla_version-2.0.0.dist-info/top_level.txt b/third_party/python/mozilla_version/mozilla_version-2.0.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..f5c7efa40b
--- /dev/null
+++ b/third_party/python/mozilla_version/mozilla_version-2.0.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+mozilla_version
diff --git a/third_party/python/mozilla_version/mozilla_version/__init__.py b/third_party/python/mozilla_version/mozilla_version/__init__.py
new file mode 100644
index 0000000000..ba46ee264d
--- /dev/null
+++ b/third_party/python/mozilla_version/mozilla_version/__init__.py
@@ -0,0 +1 @@
+"""Defines characteristics of Mozilla's version numbers."""
diff --git a/third_party/python/mozilla_version/mozilla_version/balrog.py b/third_party/python/mozilla_version/mozilla_version/balrog.py
new file mode 100644
index 0000000000..ed2808314c
--- /dev/null
+++ b/third_party/python/mozilla_version/mozilla_version/balrog.py
@@ -0,0 +1,142 @@
+"""Defines characteristics of a Balrog release name.
+
+Balrog is the server that delivers Firefox and Thunderbird updates. Release names follow
+the pattern "{product}-{version}-build{build_number}"
+
+Examples:
+ .. code-block:: python
+
+ from mozilla_version.balrog import BalrogReleaseName
+
+ balrog_release = BalrogReleaseName.parse('firefox-60.0.1-build1')
+
+ balrog_release.product # firefox
+ balrog_release.version.major_number # 60
+ str(balrog_release) # 'firefox-60.0.1-build1'
+
+ previous_release = BalrogReleaseName.parse('firefox-60.0-build2')
+ previous_release < balrog_release # True
+
+ invalid = BalrogReleaseName.parse('60.0.1') # raises PatternNotMatchedError
+ invalid = BalrogReleaseName.parse('firefox-60.0.1') # raises PatternNotMatchedError
+
+ # Releases can be built thanks to version classes like FirefoxVersion
+ BalrogReleaseName('firefox', FirefoxVersion(60, 0, 1, 1)) # 'firefox-60.0.1-build1'
+
+"""
+
+import attr
+import re
+
+from mozilla_version.errors import PatternNotMatchedError
+from mozilla_version.parser import get_value_matched_by_regex
+from mozilla_version.gecko import (
+ GeckoVersion, FirefoxVersion, DeveditionVersion, FennecVersion, ThunderbirdVersion
+)
+
+
+_VALID_ENOUGH_BALROG_RELEASE_PATTERN = re.compile(
+ r"^(?P<product>[a-z]+)-(?P<version>.+)$", re.IGNORECASE
+)
+
+
+_SUPPORTED_PRODUCTS = {
+ 'firefox': FirefoxVersion,
+ 'devedition': DeveditionVersion,
+ 'fennec': FennecVersion,
+ 'thunderbird': ThunderbirdVersion,
+}
+
+
+def _supported_product(string):
+ product = string.lower()
+ if product not in _SUPPORTED_PRODUCTS:
+ raise PatternNotMatchedError(string, patterns=('unknown product',))
+ return product
+
+
+def _products_must_be_identical(method):
+ def checker(this, other):
+ if this.product != other.product:
+ raise ValueError(f'Cannot compare "{this.product}" and "{other.product}"')
+ return method(this, other)
+ return checker
+
+
+@attr.s(frozen=True, eq=False, hash=True)
+class BalrogReleaseName:
+ """Class that validates and handles Balrog release names.
+
+ Raises:
+ PatternNotMatchedError: if a parsed string doesn't match the pattern of a valid release
+ MissingFieldError: if a mandatory field is missing in the string. Mandatory fields are
+ `product`, `major_number`, `minor_number`, and `build_number`
+ ValueError: if an integer can't be cast or is not (strictly) positive
+ TooManyTypesError: if the string matches more than 1 `VersionType`
+ NoVersionTypeError: if the string matches none.
+
+ """
+
+ product = attr.ib(type=str, converter=_supported_product)
+ version = attr.ib(type=GeckoVersion)
+
+ def __attrs_post_init__(self):
+ """Ensure attributes are sane all together."""
+ if self.version.build_number is None:
+ raise PatternNotMatchedError(self, patterns=('build_number must exist',))
+
+ @classmethod
+ def parse(cls, release_string):
+ """Construct an object representing a valid Firefox version number."""
+ regex_matches = _VALID_ENOUGH_BALROG_RELEASE_PATTERN.match(release_string)
+ if regex_matches is None:
+ raise PatternNotMatchedError(release_string, (_VALID_ENOUGH_BALROG_RELEASE_PATTERN,))
+
+ product = get_value_matched_by_regex('product', regex_matches, release_string)
+ try:
+ VersionClass = _SUPPORTED_PRODUCTS[product.lower()]
+ except KeyError:
+ raise PatternNotMatchedError(release_string, patterns=('unknown product',))
+
+ version_string = get_value_matched_by_regex('version', regex_matches, release_string)
+ version = VersionClass.parse(version_string)
+
+ return cls(product, version)
+
+ def __str__(self):
+ """Implement string representation.
+
+ Computes a new string based on the given attributes.
+ """
+ version_string = str(self.version).replace('build', '-build')
+ return f'{self.product}-{version_string}'
+
+ @_products_must_be_identical
+ def __eq__(self, other):
+ """Implement `==` operator."""
+ return self.version == other.version
+
+ @_products_must_be_identical
+ def __ne__(self, other):
+ """Implement `!=` operator."""
+ return self.version != other.version
+
+ @_products_must_be_identical
+ def __lt__(self, other):
+ """Implement `<` operator."""
+ return self.version < other.version
+
+ @_products_must_be_identical
+ def __le__(self, other):
+ """Implement `<=` operator."""
+ return self.version <= other.version
+
+ @_products_must_be_identical
+ def __gt__(self, other):
+ """Implement `>` operator."""
+ return self.version > other.version
+
+ @_products_must_be_identical
+ def __ge__(self, other):
+ """Implement `>=` operator."""
+ return self.version >= other.version
diff --git a/third_party/python/mozilla_version/mozilla_version/errors.py b/third_party/python/mozilla_version/mozilla_version/errors.py
new file mode 100644
index 0000000000..356fe16cc3
--- /dev/null
+++ b/third_party/python/mozilla_version/mozilla_version/errors.py
@@ -0,0 +1,75 @@
+"""Defines all errors reported by mozilla-version."""
+
+
+class PatternNotMatchedError(ValueError):
+ """Error when a string doesn't match an expected pattern.
+
+ Args:
+ string (str): The string it was unable to match.
+ patterns (sequence): The patterns it tried to match.
+ """
+
+ def __init__(self, string, patterns):
+ """Initialize error."""
+ number_of_patterns = len(patterns)
+ if number_of_patterns == 0:
+ raise ValueError('At least one pattern must be provided')
+ elif number_of_patterns == 1:
+ message = f'"{string}" does not match the pattern: {patterns[0]}'
+ else:
+ message = '"{}" does not match the patterns:\n - {}'.format(
+ string,
+ '\n - '.join(patterns)
+ )
+
+ super().__init__(message)
+
+
+class NoVersionTypeError(ValueError):
+ """Error when `version_string` matched the pattern, but was unable to find its type.
+
+ Args:
+ version_string (str): The string it was unable to guess the type.
+ """
+
+ def __init__(self, version_string):
+ """Initialize error."""
+ super().__init__(
+ 'Version "{}" matched the pattern of a valid version, but it is unable to '
+ 'find what type it is. This is likely a bug in mozilla-version'.format(
+ version_string
+ )
+ )
+
+
+class MissingFieldError(ValueError):
+ """Error when `version_string` lacks an expected field.
+
+ Args:
+ version_string (str): The string it was unable to extract a given field.
+ field_name (str): The name of the missing field.
+ """
+
+ def __init__(self, version_string, field_name):
+ """Initialize error."""
+ super().__init__(
+ f'Release "{version_string}" does not contain a valid {field_name}'
+ )
+
+
+class TooManyTypesError(ValueError):
+ """Error when `version_string` has too many types.
+
+ Args:
+ version_string (str): The string that gave too many types.
+ first_matched_type (str): The name of the first detected type.
+ second_matched_type (str): The name of the second detected type
+ """
+
+ def __init__(self, version_string, first_matched_type, second_matched_type):
+ """Initialize error."""
+ super().__init__(
+ 'Release "{}" cannot match types "{}" and "{}"'.format(
+ version_string, first_matched_type, second_matched_type
+ )
+ )
diff --git a/third_party/python/mozilla_version/mozilla_version/fenix.py b/third_party/python/mozilla_version/mozilla_version/fenix.py
new file mode 100644
index 0000000000..038745aeff
--- /dev/null
+++ b/third_party/python/mozilla_version/mozilla_version/fenix.py
@@ -0,0 +1,3 @@
+"""Deprecated module for backwards compatibility."""
+# TODO remove in a future release - deprecated in favor of MobileVersion
+from mozilla_version.mobile import MobileVersion as FenixVersion # noqa
diff --git a/third_party/python/mozilla_version/mozilla_version/gecko.py b/third_party/python/mozilla_version/mozilla_version/gecko.py
new file mode 100644
index 0000000000..ab63b2c780
--- /dev/null
+++ b/third_party/python/mozilla_version/mozilla_version/gecko.py
@@ -0,0 +1,672 @@
+"""Defines characteristics of a Gecko version number, including Firefox.
+
+Examples:
+ .. code-block:: python
+
+ from mozilla_version.gecko import FirefoxVersion
+
+ version = FirefoxVersion.parse('60.0.1')
+
+ version.major_number # 60
+ version.minor_number # 0
+ version.patch_number # 1
+
+ version.is_release # True
+ version.is_beta # False
+ version.is_nightly # False
+
+ str(version) # '60.0.1'
+
+ previous_version = FirefoxVersion.parse('60.0b14')
+ previous_version < version # True
+
+ previous_version.beta_number # 14
+ previous_version.major_number # 60
+ previous_version.minor_number # 0
+ previous_version.patch_number # raises AttributeError
+
+ previous_version.is_beta # True
+ previous_version.is_release # False
+ previous_version.is_nightly # False
+
+ invalid_version = FirefoxVersion.parse('60.1') # raises PatternNotMatchedError
+ invalid_version = FirefoxVersion.parse('60.0.0') # raises PatternNotMatchedError
+ version = FirefoxVersion.parse('60.0') # valid
+
+ # Versions can be built by raw values
+ FirefoxVersion(60, 0)) # '60.0'
+ FirefoxVersion(60, 0, 1)) # '60.0.1'
+ FirefoxVersion(60, 1, 0)) # '60.1.0'
+ FirefoxVersion(60, 0, 1, 1)) # '60.0.1build1'
+ FirefoxVersion(60, 0, beta_number=1)) # '60.0b1'
+ FirefoxVersion(60, 0, is_nightly=True)) # '60.0a1'
+ FirefoxVersion(60, 0, is_aurora_or_devedition=True)) # '60.0a2'
+ FirefoxVersion(60, 0, is_esr=True)) # '60.0esr'
+ FirefoxVersion(60, 0, 1, is_esr=True)) # '60.0.1esr'
+
+"""
+
+import attr
+import re
+
+from mozilla_version.errors import (
+ PatternNotMatchedError, TooManyTypesError, NoVersionTypeError
+)
+from mozilla_version.parser import strictly_positive_int_or_none
+from mozilla_version.version import BaseVersion, VersionType
+
+
+def _find_type(version):
+ version_type = None
+
+ def ensure_version_type_is_not_already_defined(previous_type, candidate_type):
+ if previous_type is not None:
+ raise TooManyTypesError(
+ str(version), previous_type, candidate_type
+ )
+
+ if version.is_nightly:
+ version_type = VersionType.NIGHTLY
+ if version.is_aurora_or_devedition:
+ ensure_version_type_is_not_already_defined(
+ version_type, VersionType.AURORA_OR_DEVEDITION
+ )
+ version_type = VersionType.AURORA_OR_DEVEDITION
+ if version.is_beta:
+ ensure_version_type_is_not_already_defined(version_type, VersionType.BETA)
+ version_type = VersionType.BETA
+ if version.is_esr:
+ ensure_version_type_is_not_already_defined(version_type, VersionType.ESR)
+ version_type = VersionType.ESR
+ if version.is_release_candidate:
+ ensure_version_type_is_not_already_defined(version_type, VersionType.RELEASE_CANDIDATE)
+ version_type = VersionType.RELEASE_CANDIDATE
+ if version.is_release:
+ ensure_version_type_is_not_already_defined(version_type, VersionType.RELEASE)
+ version_type = VersionType.RELEASE
+
+ if version_type is None:
+ raise NoVersionTypeError(str(version))
+
+ return version_type
+
+
+@attr.s(frozen=True, eq=False, hash=True)
+class GeckoVersion(BaseVersion):
+ """Class that validates and handles version numbers for Gecko-based products.
+
+ You may want to use specific classes like FirefoxVersion. These classes define edge cases
+ that were shipped.
+
+ Raises:
+ PatternNotMatchedError: if the string doesn't match the pattern of a valid version number
+ MissingFieldError: if a mandatory field is missing in the string. Mandatory fields are
+ `major_number` and `minor_number`
+ ValueError: if an integer can't be cast or is not (strictly) positive
+ TooManyTypesError: if the string matches more than 1 `VersionType`
+ NoVersionTypeError: if the string matches none.
+
+ """
+
+ # XXX This pattern doesn't catch all subtleties of a Firefox version (like 32.5 isn't valid).
+ # This regex is intended to assign numbers. Then checks are done by attrs and
+ # __attrs_post_init__()
+ _VALID_ENOUGH_VERSION_PATTERN = re.compile(r"""
+ ^(?P<major_number>\d+)
+ \.(?P<minor_number>\d+)
+ (\.(?P<patch_number>\d+))?
+ (\.(?P<old_fourth_number>\d+))?
+ (
+ (?P<is_nightly>a1)
+ |(?P<is_aurora_or_devedition>a2)
+ |rc(?P<release_candidate_number>\d+)
+ |b(?P<beta_number>\d+)
+ |(?P<is_esr>esr)
+ )?
+ -?(build(?P<build_number>\d+))?$""", re.VERBOSE)
+
+ _OPTIONAL_NUMBERS = BaseVersion._OPTIONAL_NUMBERS + (
+ 'old_fourth_number', 'release_candidate_number', 'beta_number', 'build_number'
+ )
+
+ _ALL_NUMBERS = BaseVersion._ALL_NUMBERS + _OPTIONAL_NUMBERS
+
+ _KNOWN_ESR_MAJOR_NUMBERS = (10, 17, 24, 31, 38, 45, 52, 60, 68, 78, 91, 102, 115)
+
+ _LAST_AURORA_DEVEDITION_AS_VERSION_TYPE = 54
+
+ build_number = attr.ib(type=int, converter=strictly_positive_int_or_none, default=None)
+ beta_number = attr.ib(type=int, converter=strictly_positive_int_or_none, default=None)
+ is_nightly = attr.ib(type=bool, default=False)
+ is_aurora_or_devedition = attr.ib(type=bool, default=False)
+ is_esr = attr.ib(type=bool, default=False)
+ old_fourth_number = attr.ib(type=int, converter=strictly_positive_int_or_none, default=None)
+ release_candidate_number = attr.ib(
+ type=int, converter=strictly_positive_int_or_none, default=None
+ )
+ version_type = attr.ib(init=False, default=attr.Factory(_find_type, takes_self=True))
+
+ def __attrs_post_init__(self):
+ """Ensure attributes are sane all together."""
+ # General checks
+ error_messages = [
+ pattern_message
+ for condition, pattern_message in ((
+ not self.is_four_digit_scheme and self.old_fourth_number is not None,
+ 'The old fourth number can only be defined on Gecko 1.5.x.y or 2.0.x.y',
+ ), (
+ self.beta_number is not None and self.patch_number is not None,
+ 'Beta number and patch number cannot be both defined',
+ ))
+ if condition
+ ]
+
+ # Firefox 5 is the first version to implement the rapid release model, which defines
+ # the scheme used so far.
+ if self.is_rapid_release_scheme:
+ error_messages.extend([
+ pattern_message
+ for condition, pattern_message in ((
+ self.release_candidate_number is not None,
+ 'Release candidate number cannot be defined starting Gecko 5',
+ ), (
+ self.minor_number == 0 and self.patch_number == 0,
+ 'Minor number and patch number cannot be both equal to 0',
+ ), (
+ self.minor_number != 0 and self.patch_number is None,
+ 'Patch number cannot be undefined if minor number is greater than 0',
+ ), (
+ self.patch_number is not None and self.is_nightly,
+ 'Patch number cannot be defined on a nightly version',
+ ), (
+ self.patch_number is not None and self.is_aurora_or_devedition,
+ 'Patch number cannot be defined on an aurora version',
+ ), (
+ self.major_number > self._LAST_AURORA_DEVEDITION_AS_VERSION_TYPE and
+ self.is_aurora_or_devedition,
+ 'Last aurora/devedition version was 54.0a2. Please use the DeveditionVersion '
+ 'class, past this version.',
+ ), (
+ self.major_number not in self._KNOWN_ESR_MAJOR_NUMBERS and self.is_esr,
+ '"{}" is not a valid ESR major number. Valid ones are: {}'.format(
+ self.major_number, self._KNOWN_ESR_MAJOR_NUMBERS
+ )
+ ))
+ if condition
+ ])
+ else:
+ if self.release_candidate_number is not None:
+ error_messages.extend([
+ pattern_message
+ for condition, pattern_message in ((
+ self.patch_number is not None,
+ 'Release candidate and patch number cannot be both defined',
+ ), (
+ self.old_fourth_number is not None,
+ 'Release candidate and the old fourth number cannot be both defined',
+ ), (
+ self.beta_number is not None,
+ 'Release candidate and beta number cannot be both defined',
+ ))
+ if condition
+ ])
+
+ if self.old_fourth_number is not None and self.patch_number != 0:
+ error_messages.append(
+ 'The old fourth number cannot be defined if the patch number is not 0 '
+ '(we have never shipped a release that did so)'
+ )
+
+ if error_messages:
+ raise PatternNotMatchedError(self, patterns=error_messages)
+
+ @classmethod
+ def parse(cls, version_string):
+ """Construct an object representing a valid Firefox version number."""
+ return super().parse(
+ version_string, regex_groups=('is_nightly', 'is_aurora_or_devedition', 'is_esr')
+ )
+
+ @property
+ def is_beta(self):
+ """Return `True` if `GeckoVersion` was built with a string matching a beta version."""
+ return self.beta_number is not None
+
+ @property
+ def is_release_candidate(self):
+ """Return `True` if `GeckoVersion` was built with a string matching an RC version."""
+ return self.release_candidate_number is not None
+
+ @property
+ def is_rapid_release_scheme(self):
+ """Return `True` if `GeckoVersion` was built with against the rapid release scheme."""
+ return self.major_number >= 5
+
+ @property
+ def is_four_digit_scheme(self):
+ """Return `True` if `GeckoVersion` was built with the 4 digits schemes.
+
+ Only Firefox 1.5.x.y and 2.0.x.y were.
+ """
+ return (
+ all((self.major_number == 1, self.minor_number == 5)) or
+ all((self.major_number == 2, self.minor_number == 0))
+ )
+
+ @property
+ def is_release(self):
+ """Return `True` if `GeckoVersion` was built with a string matching a release version."""
+ return not any((
+ self.is_nightly, self.is_aurora_or_devedition, self.is_beta,
+ self.is_release_candidate, self.is_esr
+ ))
+
+ def __str__(self):
+ """Implement string representation.
+
+ Computes a new string based on the given attributes.
+ """
+ string = super().__str__()
+
+ if self.old_fourth_number is not None:
+ string = f'{string}.{self.old_fourth_number}'
+
+ if self.is_nightly:
+ string = f'{string}a1'
+ elif self.is_aurora_or_devedition:
+ string = f'{string}a2'
+ elif self.is_beta:
+ string = f'{string}b{self.beta_number}'
+ elif self.is_release_candidate:
+ string = f'{string}rc{self.release_candidate_number}'
+ elif self.is_esr:
+ string = f'{string}esr'
+
+ if self.build_number is not None:
+ string = f'{string}build{self.build_number}'
+
+ return string
+
+ def __eq__(self, other):
+ """Implement `==` operator.
+
+ A version is considered equal to another if all numbers match and if they are of the same
+ `VersionType`. Like said in `VersionType`, release and ESR are considered equal (if they
+ share the same numbers). If a version contains a build number but not the other, the build
+ number won't be considered in the comparison.
+
+ Examples:
+ .. code-block:: python
+
+ assert GeckoVersion.parse('60.0') == GeckoVersion.parse('60.0')
+ assert GeckoVersion.parse('60.0') == GeckoVersion.parse('60.0esr')
+ assert GeckoVersion.parse('60.0') == GeckoVersion.parse('60.0build1')
+ assert GeckoVersion.parse('60.0build1') == GeckoVersion.parse('60.0build1')
+
+ assert GeckoVersion.parse('60.0') != GeckoVersion.parse('61.0')
+ assert GeckoVersion.parse('60.0') != GeckoVersion.parse('60.1.0')
+ assert GeckoVersion.parse('60.0') != GeckoVersion.parse('60.0.1')
+ assert GeckoVersion.parse('60.0') != GeckoVersion.parse('60.0a1')
+ assert GeckoVersion.parse('60.0') != GeckoVersion.parse('60.0a2')
+ assert GeckoVersion.parse('60.0') != GeckoVersion.parse('60.0b1')
+ assert GeckoVersion.parse('60.0build1') != GeckoVersion.parse('60.0build2')
+
+ """
+ return super().__eq__(other)
+
+ def _compare(self, other):
+ """Compare this release with another.
+
+ Returns:
+ 0 if equal
+ < 0 is this precedes the other
+ > 0 if the other precedes this
+
+ """
+ if isinstance(other, str):
+ other = GeckoVersion.parse(other)
+ elif not isinstance(other, GeckoVersion):
+ raise ValueError(f'Cannot compare "{other}", type not supported!')
+
+ difference = super()._compare(other)
+ if difference != 0:
+ return difference
+
+ difference = self._substract_other_number_from_this_number(other, 'old_fourth_number')
+ if difference != 0:
+ return difference
+
+ channel_difference = self._compare_version_type(other)
+ if channel_difference != 0:
+ return channel_difference
+
+ if self.is_beta and other.is_beta:
+ beta_difference = self.beta_number - other.beta_number
+ if beta_difference != 0:
+ return beta_difference
+
+ if self.is_release_candidate and other.is_release_candidate:
+ rc_difference = self.release_candidate_number - other.release_candidate_number
+ if rc_difference != 0:
+ return rc_difference
+
+ # Build numbers are a special case. We might compare a regular version number
+ # (like "32.0b8") versus a release build (as in "32.0b8build1"). As a consequence,
+ # we only compare build_numbers when we both have them.
+ try:
+ return self.build_number - other.build_number
+ except TypeError:
+ pass
+
+ return 0
+
+ def _compare_version_type(self, other):
+ return self.version_type.compare(other.version_type)
+
+ def _create_bump_kwargs(self, field):
+ if field == 'build_number' and self.build_number is None:
+ raise ValueError('Cannot bump the build number if it is not already set')
+
+ bump_kwargs = super()._create_bump_kwargs(field)
+
+ if field == 'major_number' and self.is_esr:
+ current_esr_index = self._KNOWN_ESR_MAJOR_NUMBERS.index(self.major_number)
+ try:
+ next_major_esr_number = self._KNOWN_ESR_MAJOR_NUMBERS[current_esr_index + 1]
+ except IndexError:
+ raise ValueError(
+ "Cannot bump the major number past last known major ESR. We don't know it yet."
+ )
+ bump_kwargs['major_number'] = next_major_esr_number
+
+ if field != 'build_number' and bump_kwargs.get('build_number') == 0:
+ del bump_kwargs['build_number']
+ if bump_kwargs.get('beta_number') == 0:
+ if self.is_beta:
+ bump_kwargs['beta_number'] = 1
+ else:
+ del bump_kwargs['beta_number']
+
+ if field != 'old_fourth_number' and not self.is_four_digit_scheme:
+ del bump_kwargs['old_fourth_number']
+ if bump_kwargs.get('minor_number') == 0 and bump_kwargs.get('patch_number') == 0:
+ del bump_kwargs['patch_number']
+
+ if self.is_four_digit_scheme:
+ if (
+ bump_kwargs.get('patch_number') == 0 and
+ bump_kwargs.get('old_fourth_number') in (0, None)
+ ):
+ del bump_kwargs['patch_number']
+ del bump_kwargs['old_fourth_number']
+ elif (
+ bump_kwargs.get('patch_number') is None and
+ bump_kwargs.get('old_fourth_number') is not None and
+ bump_kwargs.get('old_fourth_number') > 0
+ ):
+ bump_kwargs['patch_number'] = 0
+
+ if field != 'release_candidate_number' and self.is_rapid_release_scheme:
+ del bump_kwargs['release_candidate_number']
+
+ bump_kwargs['is_nightly'] = self.is_nightly
+ bump_kwargs['is_aurora_or_devedition'] = self.is_aurora_or_devedition
+ bump_kwargs['is_esr'] = self.is_esr
+
+ return bump_kwargs
+
+ def bump_version_type(self):
+ """Bump version type to the next one.
+
+ Returns:
+ A new GeckoVersion with the version type set to the next one. Builds numbers are reset,
+ if originally set.
+
+ For instance:
+ * 32.0a1 is bumped to 32.0b1
+ * 32.0bX is bumped to 32.0
+ * 32.0 is bumped to 32.0esr
+ * 31.0build1 is bumped to 31.0esrbuild1
+ * 31.0build2 is bumped to 31.0esrbuild1
+
+ """
+ try:
+ return self.__class__(**self._create_bump_version_type_kwargs())
+ except (ValueError, PatternNotMatchedError) as e:
+ raise ValueError(
+ 'Cannot bump version type for version "{}". New version number is not valid. '
+ 'Cause: {}'.format(self, e)
+ ) from e
+
+ def _create_bump_version_type_kwargs(self):
+ bump_version_type_kwargs = {
+ 'major_number': self.major_number,
+ 'minor_number': self.minor_number,
+ 'patch_number': self.patch_number,
+ }
+
+ if self.is_nightly and self.major_number <= self._LAST_AURORA_DEVEDITION_AS_VERSION_TYPE:
+ bump_version_type_kwargs['is_aurora_or_devedition'] = True
+ elif (
+ self.is_nightly and self.major_number > self._LAST_AURORA_DEVEDITION_AS_VERSION_TYPE or
+ self.is_aurora_or_devedition
+ ):
+ bump_version_type_kwargs['beta_number'] = 1
+ elif self.is_beta and not self.is_rapid_release_scheme:
+ bump_version_type_kwargs['release_candidate_number'] = 1
+ elif self.is_release:
+ bump_version_type_kwargs['is_esr'] = True
+ elif self.is_esr:
+ raise ValueError('There is no higher version type than ESR.')
+
+ if self.build_number is not None:
+ bump_version_type_kwargs['build_number'] = 1
+
+ return bump_version_type_kwargs
+
+
+class _VersionWithEdgeCases(GeckoVersion):
+ def __attrs_post_init__(self):
+ for edge_case in self._RELEASED_EDGE_CASES:
+ if all(
+ getattr(self, number_type) == edge_case.get(number_type, None)
+ for number_type in self._ALL_NUMBERS
+ if number_type != 'build_number'
+ ):
+ if self.build_number is None:
+ return
+ elif self.build_number == edge_case.get('build_number', None):
+ return
+
+ super().__attrs_post_init__()
+
+
+class FirefoxVersion(_VersionWithEdgeCases):
+ """Class that validates and handles Firefox version numbers."""
+
+ _RELEASED_EDGE_CASES = ({
+ 'major_number': 1,
+ 'minor_number': 5,
+ 'patch_number': 0,
+ 'old_fourth_number': 1,
+ 'release_candidate_number': 1,
+ }, {
+ 'major_number': 33,
+ 'minor_number': 1,
+ 'build_number': 1,
+ }, {
+ 'major_number': 33,
+ 'minor_number': 1,
+ 'build_number': 2,
+ }, {
+ 'major_number': 33,
+ 'minor_number': 1,
+ 'build_number': 3,
+ }, {
+ 'major_number': 38,
+ 'minor_number': 0,
+ 'patch_number': 5,
+ 'beta_number': 1,
+ 'build_number': 1,
+ }, {
+ 'major_number': 38,
+ 'minor_number': 0,
+ 'patch_number': 5,
+ 'beta_number': 1,
+ 'build_number': 2,
+ }, {
+ 'major_number': 38,
+ 'minor_number': 0,
+ 'patch_number': 5,
+ 'beta_number': 2,
+ 'build_number': 1,
+ }, {
+ 'major_number': 38,
+ 'minor_number': 0,
+ 'patch_number': 5,
+ 'beta_number': 3,
+ 'build_number': 1,
+ })
+
+
+class DeveditionVersion(GeckoVersion):
+ """Class that validates and handles Devedition after it became an equivalent to beta."""
+
+ # No edge case were shipped
+
+ def __attrs_post_init__(self):
+ """Ensure attributes are sane all together."""
+ if (
+ (not self.is_beta) or
+ (self.major_number < 54) or
+ (self.major_number == 54 and self.beta_number < 11)
+ ):
+ raise PatternNotMatchedError(
+ self, patterns=('Devedition as a product must be a beta >= 54.0b11',)
+ )
+
+
+class FennecVersion(_VersionWithEdgeCases):
+ """Class that validates and handles Fennec (Firefox for Android) version numbers."""
+
+ _RELEASED_EDGE_CASES = ({
+ 'major_number': 33,
+ 'minor_number': 1,
+ 'build_number': 1,
+ }, {
+ 'major_number': 33,
+ 'minor_number': 1,
+ 'build_number': 2,
+ }, {
+ 'major_number': 38,
+ 'minor_number': 0,
+ 'patch_number': 5,
+ 'beta_number': 4,
+ 'build_number': 1,
+ })
+
+ _LAST_FENNEC_VERSION = 68
+
+ def __attrs_post_init__(self):
+ """Ensure attributes are sane all together."""
+ # Versions matching 68.Xa1, 68.XbN, or simply 68.X are expected since bug 1523402. The
+ # latter is needed because of the version.txt of beta
+ if (
+ self.major_number == self._LAST_FENNEC_VERSION and
+ self.minor_number > 0 and
+ self.patch_number is None
+ ):
+ return
+
+ if self.major_number > self._LAST_FENNEC_VERSION:
+ raise PatternNotMatchedError(
+ self, patterns=(f'Last Fennec version is {self._LAST_FENNEC_VERSION}',)
+ )
+
+ super().__attrs_post_init__()
+
+ def _create_bump_kwargs(self, field):
+ kwargs = super()._create_bump_kwargs(field)
+
+ if (
+ field != 'patch_number' and
+ kwargs['major_number'] == self._LAST_FENNEC_VERSION and
+ (kwargs['is_nightly'] or kwargs.get('beta_number'))
+ ):
+ del kwargs['patch_number']
+
+ return kwargs
+
+
+class ThunderbirdVersion(_VersionWithEdgeCases):
+ """Class that validates and handles Thunderbird version numbers."""
+
+ _RELEASED_EDGE_CASES = ({
+ 'major_number': 1,
+ 'minor_number': 5,
+ 'beta_number': 1,
+ }, {
+ 'major_number': 1,
+ 'minor_number': 5,
+ 'beta_number': 2,
+ }, {
+ 'major_number': 3,
+ 'minor_number': 1,
+ 'beta_number': 1,
+ }, {
+ 'major_number': 3,
+ 'minor_number': 1,
+ }, {
+ 'major_number': 45,
+ 'minor_number': 1,
+ 'beta_number': 1,
+ 'build_number': 1,
+ }, {
+ 'major_number': 45,
+ 'minor_number': 2,
+ 'build_number': 1,
+ }, {
+ 'major_number': 45,
+ 'minor_number': 2,
+ 'build_number': 2,
+ }, {
+ 'major_number': 45,
+ 'minor_number': 2,
+ 'beta_number': 1,
+ 'build_number': 2,
+ })
+
+
+class GeckoSnapVersion(GeckoVersion):
+ """Class that validates and handles Gecko's Snap version numbers.
+
+ Snap is a Linux packaging format developped by Canonical. Valid numbers are like "63.0b7-1",
+ "1" stands for "build1". Release Engineering set this scheme at the beginning of Snap and now
+ we can't rename published snap to the regular pattern like "63.0b7-build1".
+ """
+
+ # Our Snaps are recent enough to not list any edge case, yet.
+
+ # Differences between this regex and the one in GeckoVersion:
+ # * no a2
+ # * no "build"
+ # * but mandatory dash and build number.
+ # Example: 63.0b7-1
+ _VALID_ENOUGH_VERSION_PATTERN = re.compile(r"""
+ ^(?P<major_number>\d+)
+ \.(?P<minor_number>\d+)
+ (\.(?P<patch_number>\d+))?
+ (
+ (?P<is_nightly>a1)
+ |b(?P<beta_number>\d+)
+ |(?P<is_esr>esr)
+ )?
+ -(?P<build_number>\d+)$""", re.VERBOSE)
+
+ def __str__(self):
+ """Implement string representation.
+
+ Returns format like "63.0b7-1"
+ """
+ string = super().__str__()
+ return string.replace('build', '-')
diff --git a/third_party/python/mozilla_version/mozilla_version/maven.py b/third_party/python/mozilla_version/mozilla_version/maven.py
new file mode 100644
index 0000000000..19bc4f74c6
--- /dev/null
+++ b/third_party/python/mozilla_version/mozilla_version/maven.py
@@ -0,0 +1,65 @@
+"""Defines characteristics of a Maven version at Mozilla."""
+
+import attr
+import re
+
+from mozilla_version.version import BaseVersion
+
+
+@attr.s(frozen=True, eq=False, hash=True)
+class MavenVersion(BaseVersion):
+ """Class that validates and handles Maven version numbers.
+
+ At Mozilla, Maven packages are used in projects like "GeckoView" or "Android-Components".
+ """
+
+ is_snapshot = attr.ib(type=bool, default=False)
+ is_beta = attr.ib(type=bool, default=False, init=False)
+ is_release_candidate = attr.ib(type=bool, default=False, init=False)
+
+ _VALID_ENOUGH_VERSION_PATTERN = re.compile(r"""
+ ^(?P<major_number>\d+)
+ \.(?P<minor_number>\d+)
+ (\.(?P<patch_number>\d+))?
+ (?P<is_snapshot>-SNAPSHOT)?$""", re.VERBOSE)
+
+ @classmethod
+ def parse(cls, version_string):
+ """Construct an object representing a valid Maven version number."""
+ return super().parse(version_string, regex_groups=('is_snapshot', ))
+
+ def __str__(self):
+ """Implement string representation.
+
+ Computes a new string based on the given attributes.
+ """
+ string = super().__str__()
+
+ if self.is_snapshot:
+ string = f'{string}-SNAPSHOT'
+
+ return string
+
+ def _compare(self, other):
+ if isinstance(other, str):
+ other = MavenVersion.parse(other)
+ elif not isinstance(other, MavenVersion):
+ raise ValueError(f'Cannot compare "{other}", type not supported!')
+
+ difference = super()._compare(other)
+ if difference != 0:
+ return difference
+
+ if not self.is_snapshot and other.is_snapshot:
+ return 1
+ elif self.is_snapshot and not other.is_snapshot:
+ return -1
+ else:
+ return 0
+
+ @property
+ def is_release(self):
+ """Return `True` if the others are both False."""
+ return not any((
+ self.is_beta, self.is_release_candidate, self.is_snapshot
+ ))
diff --git a/third_party/python/mozilla_version/mozilla_version/mobile.py b/third_party/python/mozilla_version/mozilla_version/mobile.py
new file mode 100644
index 0000000000..97e0f5b6aa
--- /dev/null
+++ b/third_party/python/mozilla_version/mozilla_version/mobile.py
@@ -0,0 +1,250 @@
+"""Defines characteristics of a Mobile version at Mozilla."""
+
+import attr
+import re
+
+from mozilla_version.errors import PatternNotMatchedError, TooManyTypesError, NoVersionTypeError
+from mozilla_version.gecko import GeckoVersion
+from mozilla_version.version import BaseVersion, VersionType
+from mozilla_version.parser import strictly_positive_int_or_none
+
+
+def _find_type(version):
+ version_type = None
+
+ def ensure_version_type_is_not_already_defined(previous_type, candidate_type):
+ if previous_type is not None:
+ raise TooManyTypesError(
+ str(version), previous_type, candidate_type
+ )
+
+ if version.is_nightly:
+ version_type = VersionType.NIGHTLY
+ if version.is_beta:
+ ensure_version_type_is_not_already_defined(version_type, VersionType.BETA)
+ version_type = VersionType.BETA
+ if version.is_release_candidate:
+ ensure_version_type_is_not_already_defined(version_type, VersionType.RELEASE_CANDIDATE)
+ version_type = VersionType.RELEASE_CANDIDATE
+ if version.is_release:
+ ensure_version_type_is_not_already_defined(version_type, VersionType.RELEASE)
+ version_type = VersionType.RELEASE
+
+ if version_type is None:
+ raise NoVersionTypeError(str(version))
+
+ return version_type
+
+
+@attr.s(frozen=True, eq=False, hash=True)
+class MobileVersion(BaseVersion):
+ """Validate and handle version numbers for mobile products.
+
+ This covers applications such as Fenix and Focus for Android.
+ """
+
+ _VALID_ENOUGH_VERSION_PATTERN = re.compile(r"""
+ ^(?P<major_number>\d+)
+ \.(?P<minor_number>\d+)
+ (\.(?P<patch_number>\d+))?
+ (
+ (?P<is_nightly>a1)
+ |(-beta\.|b)(?P<beta_number>\d+)
+ |-rc\.(?P<release_candidate_number>\d+)
+ )?
+ -?(build(?P<build_number>\d+))?$""", re.VERBOSE)
+
+ _OPTIONAL_NUMBERS = (
+ 'patch_number', 'beta_number', 'release_candidate_number', 'build_number'
+ )
+
+ _ALL_NUMBERS = BaseVersion._MANDATORY_NUMBERS + _OPTIONAL_NUMBERS
+
+ # Focus-Android and Fenix were the first ones to be converted to the Gecko
+ # pattern (bug 1777255)
+ _FIRST_VERSION_TO_FOLLOW_GECKO_PATTERN = 104
+ # Android-Components later (bug 1800611)
+ _LAST_VERSION_TO_FOLLOW_MAVEN_PATTERN = 108
+
+ build_number = attr.ib(type=int, converter=strictly_positive_int_or_none, default=None)
+ beta_number = attr.ib(type=int, converter=strictly_positive_int_or_none, default=None)
+ is_nightly = attr.ib(type=bool, default=False)
+ release_candidate_number = attr.ib(
+ type=int, converter=strictly_positive_int_or_none, default=None
+ )
+ version_type = attr.ib(init=False, default=attr.Factory(_find_type, takes_self=True))
+
+ def __attrs_post_init__(self):
+ """Ensure attributes are sane all together."""
+ error_messages = []
+
+ if self.is_gecko_pattern:
+ error_messages.extend([
+ pattern_message
+ for condition, pattern_message in ((
+ self.beta_number is not None and self.patch_number is not None,
+ 'Beta number and patch number cannot be both defined',
+ ), (
+ self.release_candidate_number is not None,
+ 'Release candidate number cannot be defined after Mobile v{}'.format(
+ self._FIRST_VERSION_TO_FOLLOW_GECKO_PATTERN
+ ),
+ ), (
+ self.major_number > self._LAST_VERSION_TO_FOLLOW_MAVEN_PATTERN and
+ self.minor_number == 0 and
+ self.patch_number == 0,
+ 'Minor number and patch number cannot be both equal to 0 past '
+ 'Mobile v{}'.format(
+ self._LAST_VERSION_TO_FOLLOW_MAVEN_PATTERN
+ ),
+ ), (
+ self.minor_number != 0 and self.patch_number is None,
+ 'Patch number cannot be undefined if minor number is greater than 0',
+ ))
+ if condition
+ ])
+ else:
+ error_messages.extend([
+ pattern_message
+ for condition, pattern_message in ((
+ self.patch_number is None,
+ 'Patch number must be defined before Mobile v{}'.format(
+ self._FIRST_VERSION_TO_FOLLOW_GECKO_PATTERN
+ ),
+ ), (
+ self.is_nightly,
+ 'Nightlies are not supported until Mobile v{}'.format(
+ self._FIRST_VERSION_TO_FOLLOW_GECKO_PATTERN
+ ),
+ ))
+ if condition
+ ])
+
+ if error_messages:
+ raise PatternNotMatchedError(self, patterns=error_messages)
+
+ @classmethod
+ def parse(cls, version_string):
+ """Construct an object representing a valid Firefox version number."""
+ mobile_version = super().parse(
+ version_string, regex_groups=('is_nightly',)
+ )
+
+ # Betas are supported in both the old and the gecko pattern. Let's make sure
+ # the string we got follows the right rules
+ if mobile_version.is_beta:
+ if mobile_version.is_gecko_pattern and '-beta.' in version_string:
+ raise PatternNotMatchedError(
+ mobile_version, ['"-beta." can only be used before Mobile v{}'.format(
+ cls._FIRST_VERSION_TO_FOLLOW_GECKO_PATTERN
+ )]
+ )
+ if not mobile_version.is_gecko_pattern and re.search(r"\db\d", version_string):
+ raise PatternNotMatchedError(
+ mobile_version, [
+ '"b" cannot be used before Mobile v{} to define a '
+ 'beta version'.format(
+ cls._FIRST_VERSION_TO_FOLLOW_GECKO_PATTERN
+ )
+ ]
+ )
+
+ return mobile_version
+
+ @property
+ def is_gecko_pattern(self):
+ """Return `True` if `MobileVersion` was built with against the Gecko scheme."""
+ return self.major_number >= self._FIRST_VERSION_TO_FOLLOW_GECKO_PATTERN
+
+ @property
+ def is_beta(self):
+ """Return `True` if `MobileVersion` was built with a string matching a beta version."""
+ return self.beta_number is not None
+
+ @property
+ def is_release_candidate(self):
+ """Return `True` if `MobileVersion` was built with a string matching an RC version."""
+ return self.release_candidate_number is not None
+
+ @property
+ def is_release(self):
+ """Return `True` if `MobileVersion` was built with a string matching a release version."""
+ return not any((
+ self.is_nightly, self.is_beta, self.is_release_candidate,
+ ))
+
+ def __str__(self):
+ """Implement string representation.
+
+ Computes a new string based on the given attributes.
+ """
+ if self.is_gecko_pattern:
+ string = str(GeckoVersion(
+ major_number=self.major_number,
+ minor_number=self.minor_number,
+ patch_number=self.patch_number,
+ build_number=self.build_number,
+ beta_number=self.beta_number,
+ is_nightly=self.is_nightly,
+ ))
+ else:
+ string = super().__str__()
+ if self.is_beta:
+ string = f'{string}-beta.{self.beta_number}'
+ elif self.is_release_candidate:
+ string = f'{string}-rc.{self.release_candidate_number}'
+
+ return string
+
+ def _compare(self, other):
+ if isinstance(other, str):
+ other = MobileVersion.parse(other)
+ elif not isinstance(other, MobileVersion):
+ raise ValueError(f'Cannot compare "{other}", type not supported!')
+
+ difference = super()._compare(other)
+ if difference != 0:
+ return difference
+
+ channel_difference = self._compare_version_type(other)
+ if channel_difference != 0:
+ return channel_difference
+
+ if self.is_beta and other.is_beta:
+ beta_difference = self.beta_number - other.beta_number
+ if beta_difference != 0:
+ return beta_difference
+
+ if self.is_release_candidate and other.is_release_candidate:
+ rc_difference = self.release_candidate_number - other.release_candidate_number
+ if rc_difference != 0:
+ return rc_difference
+
+ return 0
+
+ def _compare_version_type(self, other):
+ return self.version_type.compare(other.version_type)
+
+ def _create_bump_kwargs(self, field):
+ bump_kwargs = super()._create_bump_kwargs(field)
+
+ if field != 'build_number' and bump_kwargs.get('build_number') == 0:
+ del bump_kwargs['build_number']
+ if bump_kwargs.get('beta_number') == 0:
+ if self.is_beta:
+ bump_kwargs['beta_number'] = 1
+ else:
+ del bump_kwargs['beta_number']
+
+ if field != 'release_candidate_number':
+ del bump_kwargs['release_candidate_number']
+
+ if (
+ field == 'major_number'
+ and bump_kwargs.get('major_number') == self._FIRST_VERSION_TO_FOLLOW_GECKO_PATTERN
+ ):
+ del bump_kwargs['patch_number']
+
+ bump_kwargs['is_nightly'] = self.is_nightly
+
+ return bump_kwargs
diff --git a/third_party/python/mozilla_version/mozilla_version/parser.py b/third_party/python/mozilla_version/mozilla_version/parser.py
new file mode 100644
index 0000000000..1b96090c5a
--- /dev/null
+++ b/third_party/python/mozilla_version/mozilla_version/parser.py
@@ -0,0 +1,48 @@
+"""Defines parser helpers."""
+
+from mozilla_version.errors import MissingFieldError
+
+
+def get_value_matched_by_regex(field_name, regex_matches, string):
+ """Ensure value stored in regex group exists."""
+ try:
+ value = regex_matches.group(field_name)
+ if value is not None:
+ return value
+ except IndexError:
+ pass
+
+ raise MissingFieldError(string, field_name)
+
+
+def does_regex_have_group(regex_matches, group_name):
+ """Return a boolean depending on whether a regex group is matched."""
+ try:
+ return regex_matches.group(group_name) is not None
+ except IndexError:
+ return False
+
+
+def positive_int(val):
+ """Parse `val` into a positive integer."""
+ if isinstance(val, float):
+ raise ValueError(f'"{val}" must not be a float')
+ val = int(val)
+ if val >= 0:
+ return val
+ raise ValueError(f'"{val}" must be positive')
+
+
+def positive_int_or_none(val):
+ """Parse `val` into either `None` or a positive integer."""
+ if val is None:
+ return val
+ return positive_int(val)
+
+
+def strictly_positive_int_or_none(val):
+ """Parse `val` into either `None` or a strictly positive integer."""
+ val = positive_int_or_none(val)
+ if val is None or val > 0:
+ return val
+ raise ValueError(f'"{val}" must be strictly positive')
diff --git a/third_party/python/mozilla_version/mozilla_version/version.py b/third_party/python/mozilla_version/mozilla_version/version.py
new file mode 100644
index 0000000000..6f7a603a5c
--- /dev/null
+++ b/third_party/python/mozilla_version/mozilla_version/version.py
@@ -0,0 +1,236 @@
+"""Defines common characteristics of a version at Mozilla."""
+
+import attr
+import re
+
+from enum import Enum
+
+from mozilla_version.errors import MissingFieldError, PatternNotMatchedError
+from mozilla_version.parser import (
+ get_value_matched_by_regex,
+ does_regex_have_group,
+ positive_int,
+ positive_int_or_none
+)
+
+
+@attr.s(frozen=True, eq=False, hash=True)
+class BaseVersion:
+ """Class that validates and handles general version numbers."""
+
+ major_number = attr.ib(type=int, converter=positive_int)
+ minor_number = attr.ib(type=int, converter=positive_int)
+ patch_number = attr.ib(type=int, converter=positive_int_or_none, default=None)
+
+ _MANDATORY_NUMBERS = ('major_number', 'minor_number')
+ _OPTIONAL_NUMBERS = ('patch_number', )
+ _ALL_NUMBERS = _MANDATORY_NUMBERS + _OPTIONAL_NUMBERS
+
+ _VALID_ENOUGH_VERSION_PATTERN = re.compile(r"""
+ ^(?P<major_number>\d+)
+ \.(?P<minor_number>\d+)
+ (\.(?P<patch_number>\d+))?$""", re.VERBOSE)
+
+ @classmethod
+ def parse(cls, version_string, regex_groups=()):
+ """Construct an object representing a valid version number."""
+ regex_matches = cls._VALID_ENOUGH_VERSION_PATTERN.match(version_string)
+
+ if regex_matches is None:
+ raise PatternNotMatchedError(version_string, (cls._VALID_ENOUGH_VERSION_PATTERN,))
+
+ kwargs = {}
+
+ for field in cls._MANDATORY_NUMBERS:
+ kwargs[field] = get_value_matched_by_regex(field, regex_matches, version_string)
+ for field in cls._OPTIONAL_NUMBERS:
+ try:
+ kwargs[field] = get_value_matched_by_regex(field, regex_matches, version_string)
+ except MissingFieldError:
+ pass
+
+ for regex_group in regex_groups:
+ kwargs[regex_group] = does_regex_have_group(regex_matches, regex_group)
+
+ return cls(**kwargs)
+
+ def __str__(self):
+ """Implement string representation.
+
+ Computes a new string based on the given attributes.
+ """
+ semvers = [str(self.major_number), str(self.minor_number)]
+ if self.patch_number is not None:
+ semvers.append(str(self.patch_number))
+
+ return '.'.join(semvers)
+
+ def __eq__(self, other):
+ """Implement `==` operator."""
+ return self._compare(other) == 0
+
+ def __ne__(self, other):
+ """Implement `!=` operator."""
+ return self._compare(other) != 0
+
+ def __lt__(self, other):
+ """Implement `<` operator."""
+ return self._compare(other) < 0
+
+ def __le__(self, other):
+ """Implement `<=` operator."""
+ return self._compare(other) <= 0
+
+ def __gt__(self, other):
+ """Implement `>` operator."""
+ return self._compare(other) > 0
+
+ def __ge__(self, other):
+ """Implement `>=` operator."""
+ return self._compare(other) >= 0
+
+ def _compare(self, other):
+ """Compare this release with another.
+
+ Returns:
+ 0 if equal
+ < 0 is this precedes the other
+ > 0 if the other precedes this
+
+ """
+ if isinstance(other, str):
+ other = BaseVersion.parse(other)
+ elif not isinstance(other, BaseVersion):
+ raise ValueError(f'Cannot compare "{other}", type not supported!')
+
+ for field in ('major_number', 'minor_number', 'patch_number'):
+ difference = self._substract_other_number_from_this_number(other, field)
+ if difference != 0:
+ return difference
+
+ return 0
+
+ def _substract_other_number_from_this_number(self, other, field):
+ # BaseVersion sets unmatched numbers to None. E.g.: "32.0" sets the patch_number to None.
+ # Because of this behavior, `getattr(self, 'patch_number')` returns None too. That's why
+ # we can't call `getattr(self, field, 0)` directly, it will return None for all unmatched
+ # numbers
+ this_number = getattr(self, field, None)
+ this_number = 0 if this_number is None else this_number
+ other_number = getattr(other, field, None)
+ other_number = 0 if other_number is None else other_number
+
+ return this_number - other_number
+
+ def bump(self, field):
+ """Bump the number defined `field`.
+
+ Returns:
+ A new BaseVersion with the right field bumped and the following ones set to 0,
+ if they exist or if they need to be set.
+
+ For instance:
+ * 32.0 is bumped to 33.0, because the patch number does not exist
+ * 32.0.1 is bumped to 33.0.0, because the patch number exists
+ * 32.0 is bumped to 32.1.0, because patch number must be defined if the minor number
+ is not 0.
+
+ """
+ try:
+ return self.__class__(**self._create_bump_kwargs(field))
+ except (ValueError, PatternNotMatchedError) as e:
+ raise ValueError(
+ f'Cannot bump "{field}". New version number is not valid. Cause: {e}'
+ ) from e
+
+ def _create_bump_kwargs(self, field):
+ if field not in self._ALL_NUMBERS:
+ raise ValueError(f'Unknown field "{field}"')
+
+ kwargs = {}
+ has_requested_field_been_met = False
+ should_set_optional_numbers = False
+ for current_field in self._ALL_NUMBERS:
+ current_number = getattr(self, current_field, None)
+ if current_field == field:
+ has_requested_field_been_met = True
+ new_number = 1 if current_number is None else current_number + 1
+ if new_number == 1 and current_field == 'minor_number':
+ should_set_optional_numbers = True
+ kwargs[current_field] = new_number
+ else:
+ if (
+ has_requested_field_been_met and
+ (
+ current_field not in self._OPTIONAL_NUMBERS or
+ should_set_optional_numbers or
+ current_number is not None
+ )
+ ):
+ new_number = 0
+ else:
+ new_number = current_number
+ kwargs[current_field] = new_number
+
+ return kwargs
+
+
+class VersionType(Enum):
+ """Enum that sorts types of versions (e.g.: nightly, beta, release, esr).
+
+ Supports comparison. `ESR` is considered higher than `RELEASE` (even if they technically have
+ the same codebase). For instance: 60.0.1 < 60.0.1esr but 61.0 > 60.0.1esr.
+ This choice has a practical use case: if you have a list of Release and ESR version, you can
+ easily extract one kind or the other thanks to the VersionType.
+
+ Examples:
+ .. code-block:: python
+
+ assert VersionType.NIGHTLY == VersionType.NIGHTLY
+ assert VersionType.ESR > VersionType.RELEASE
+
+ """
+
+ NIGHTLY = 1
+ AURORA_OR_DEVEDITION = 2
+ BETA = 3
+ RELEASE_CANDIDATE = 4
+ RELEASE = 5
+ ESR = 6
+
+ def __eq__(self, other):
+ """Implement `==` operator."""
+ return self.compare(other) == 0
+
+ def __ne__(self, other):
+ """Implement `!=` operator."""
+ return self.compare(other) != 0
+
+ def __lt__(self, other):
+ """Implement `<` operator."""
+ return self.compare(other) < 0
+
+ def __le__(self, other):
+ """Implement `<=` operator."""
+ return self.compare(other) <= 0
+
+ def __gt__(self, other):
+ """Implement `>` operator."""
+ return self.compare(other) > 0
+
+ def __ge__(self, other):
+ """Implement `>=` operator."""
+ return self.compare(other) >= 0
+
+ def compare(self, other):
+ """Compare this `VersionType` with anotherself.
+
+ Returns:
+ 0 if equal
+ < 0 is this precedes the other
+ > 0 if the other precedes this
+
+ """
+ return self.value - other.value
+
+ __hash__ = Enum.__hash__
diff --git a/third_party/python/multidict/CHANGES.rst b/third_party/python/multidict/CHANGES.rst
new file mode 100644
index 0000000000..c10b8c297a
--- /dev/null
+++ b/third_party/python/multidict/CHANGES.rst
@@ -0,0 +1,255 @@
+=========
+Changelog
+=========
+
+..
+ You should *NOT* be adding new change log entries to this file, this
+ file is managed by towncrier. You *may* edit previous change logs to
+ fix problems like typo corrections or such.
+ To add a new change log entry, please see
+ https://pip.pypa.io/en/latest/development/#adding-a-news-entry
+ we named the news folder "changes".
+
+ WARNING: Don't drop the next directive!
+
+.. towncrier release notes start
+
+5.1.0 (2020-12-03)
+==================
+
+Features
+--------
+
+- Support ``GenericAliases`` (``MultiDict[str]``) for Python 3.9+
+ `#553 <https://github.com/aio-libs/multidict/issues/553>`_
+
+
+Bugfixes
+--------
+
+- Synchronize the declared supported Python versions in ``setup.py`` with actually supported and tested ones.
+ `#552 <https://github.com/aio-libs/multidict/issues/552>`_
+
+
+----
+
+
+5.0.1 (2020-11-14)
+==================
+
+Bugfixes
+--------
+
+- Provide x86 Windows wheels
+ `#550 <https://github.com/aio-libs/multidict/issues/550>`_
+
+
+----
+
+
+5.0.0 (2020-10-12)
+==================
+
+Features
+--------
+
+- Provide wheels for ``aarch64``, ``i686``, ``ppc64le``, ``s390x`` architectures on Linux
+ as well as ``x86_64``.
+ `#500 <https://github.com/aio-libs/multidict/issues/500>`_
+- Provide wheels for Python 3.9.
+ `#534 <https://github.com/aio-libs/multidict/issues/534>`_
+
+Removal
+-------
+
+- Drop Python 3.5 support; Python 3.6 is the minimal supported Python version.
+
+Misc
+----
+
+- `#503 <https://github.com/aio-libs/multidict/issues/503>`_
+
+
+----
+
+
+4.7.6 (2020-05-15)
+==================
+
+Bugfixes
+--------
+
+- Fixed an issue with some versions of the ``wheel`` dist
+ failing because of being unable to detect the license file.
+ `#481 <https://github.com/aio-libs/multidict/issues/481>`_
+
+
+----
+
+
+4.7.5 (2020-02-21)
+==================
+
+Bugfixes
+--------
+
+- Fixed creating and updating of MultiDict from a sequence of pairs and keyword
+ arguments. Previously passing a list argument modified it inplace, and other sequences
+ caused an error.
+ `#457 <https://github.com/aio-libs/multidict/issues/457>`_
+- Fixed comparing with mapping: an exception raised in the
+ :py:func:`~object.__len__` method caused raising a SyntaxError.
+ `#459 <https://github.com/aio-libs/multidict/issues/459>`_
+- Fixed comparing with mapping: all exceptions raised in the
+ :py:func:`~object.__getitem__` method were silenced.
+ `#460 <https://github.com/aio-libs/multidict/issues/460>`_
+
+
+----
+
+
+4.7.4 (2020-01-11)
+==================
+
+Bugfixes
+--------
+
+- ``MultiDict.iter`` fix memory leak when used iterator over
+ :py:mod:`multidict` instance.
+ `#452 <https://github.com/aio-libs/multidict/issues/452>`_
+
+
+----
+
+
+4.7.3 (2019-12-30)
+==================
+
+Features
+--------
+
+- Implement ``__sizeof__`` function to correctly calculate all internal structures size.
+ `#444 <https://github.com/aio-libs/multidict/issues/444>`_
+- Expose ``getversion()`` function.
+ `#451 <https://github.com/aio-libs/multidict/issues/451>`_
+
+
+Bugfixes
+--------
+
+- Fix crashes in ``popone``/``popall`` when default is returned.
+ `#450 <https://github.com/aio-libs/multidict/issues/450>`_
+
+
+Improved Documentation
+----------------------
+
+- Corrected the documentation for ``MultiDict.extend()``
+ `#446 <https://github.com/aio-libs/multidict/issues/446>`_
+
+
+----
+
+
+4.7.2 (2019-12-20)
+==================
+
+Bugfixes
+--------
+
+- Fix crashing when multidict is used pyinstaller
+ `#432 <https://github.com/aio-libs/multidict/issues/432>`_
+- Fix typing for :py:meth:`CIMultiDict.copy`
+ `#434 <https://github.com/aio-libs/multidict/issues/434>`_
+- Fix memory leak in ``MultiDict.copy()``
+ `#443 <https://github.com/aio-libs/multidict/issues/443>`_
+
+
+----
+
+
+4.7.1 (2019-12-12)
+==================
+
+Bugfixes
+--------
+
+- :py:meth:`CIMultiDictProxy.copy` return object type
+ :py:class:`multidict._multidict.CIMultiDict`
+ `#427 <https://github.com/aio-libs/multidict/issues/427>`_
+- Make :py:class:`CIMultiDict` subclassable again
+ `#416 <https://github.com/aio-libs/multidict/issues/416>`_
+- Fix regression, multidict can be constructed from arbitrary iterable of pairs again.
+ `#418 <https://github.com/aio-libs/multidict/issues/418>`_
+- :py:meth:`CIMultiDict.add` may be called with keyword arguments
+ `#421 <https://github.com/aio-libs/multidict/issues/421>`_
+
+
+Improved Documentation
+----------------------
+
+- Mention ``MULTIDICT_NO_EXTENSIONS`` environment variable in docs.
+ `#393 <https://github.com/aio-libs/multidict/issues/393>`_
+- Document the fact that ``istr`` preserves the casing of argument untouched but uses internal lower-cased copy for keys comparison.
+ `#419 <https://github.com/aio-libs/multidict/issues/419>`_
+
+
+----
+
+
+4.7.0 (2019-12-10)
+==================
+
+Features
+--------
+
+- Replace Cython optimization with pure C
+ `#249 <https://github.com/aio-libs/multidict/issues/249>`_
+- Implement ``__length_hint__()`` for iterators
+ `#310 <https://github.com/aio-libs/multidict/issues/310>`_
+- Support the MultiDict[str] generic specialization in the runtime.
+ `#392 <https://github.com/aio-libs/multidict/issues/392>`_
+- Embed pair_list_t structure into MultiDict Python object
+ `#395 <https://github.com/aio-libs/multidict/issues/395>`_
+- Embed multidict pairs for small dictionaries to amortize the memory usage.
+ `#396 <https://github.com/aio-libs/multidict/issues/396>`_
+- Support weak references to C Extension classes.
+ `#399 <https://github.com/aio-libs/multidict/issues/399>`_
+- Add docstrings to provided classes.
+ `#400 <https://github.com/aio-libs/multidict/issues/400>`_
+- Merge ``multidict._istr`` back with ``multidict._multidict``.
+ `#409 <https://github.com/aio-libs/multidict/issues/409>`_
+
+
+Bugfixes
+--------
+
+- Explicitly call ``tp_free`` slot on deallocation.
+ `#407 <https://github.com/aio-libs/multidict/issues/407>`_
+- Return class from __class_getitem__ to simplify subclassing
+ `#413 <https://github.com/aio-libs/multidict/issues/413>`_
+
+
+----
+
+
+4.6.1 (2019-11-21)
+====================
+
+Bugfixes
+--------
+
+- Fix PyPI link for GitHub Issues badge.
+ `#391 <https://github.com/aio-libs/aiohttp/issues/391>`_
+
+4.6.0 (2019-11-20)
+====================
+
+Bugfixes
+--------
+
+- Fix GC object tracking.
+ `#314 <https://github.com/aio-libs/aiohttp/issues/314>`_
+- Preserve the case of `istr` strings.
+ `#374 <https://github.com/aio-libs/aiohttp/issues/374>`_
+- Generate binary wheels for Python 3.8.
diff --git a/third_party/python/multidict/LICENSE b/third_party/python/multidict/LICENSE
new file mode 100644
index 0000000000..99a9e21af0
--- /dev/null
+++ b/third_party/python/multidict/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-2017 Andrew Svetlov
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/third_party/python/multidict/MANIFEST.in b/third_party/python/multidict/MANIFEST.in
new file mode 100644
index 0000000000..c39a12f0b6
--- /dev/null
+++ b/third_party/python/multidict/MANIFEST.in
@@ -0,0 +1,14 @@
+include LICENSE
+include CHANGES.rst
+include README.rst
+include Makefile
+graft multidict
+graft docs
+graft tests
+global-exclude *.pyc
+include multidict/*.c
+exclude multidict/_multidict.html
+exclude multidict/*.so
+exclude multidict/*.pyd
+exclude multidict/*.pyd
+prune docs/_build
diff --git a/third_party/python/multidict/Makefile b/third_party/python/multidict/Makefile
new file mode 100644
index 0000000000..ca0562b043
--- /dev/null
+++ b/third_party/python/multidict/Makefile
@@ -0,0 +1,108 @@
+# Some simple testing tasks (sorry, UNIX only).
+.PHONY: all build flake test vtest cov clean doc mypy
+
+
+PYXS = $(wildcard multidict/*.pyx)
+SRC = multidict tests setup.py
+
+all: test
+
+.install-deps: $(shell find requirements -type f)
+ pip install -r requirements/dev.txt
+ @touch .install-deps
+
+.flake: .install-deps $(shell find multidict -type f) \
+ $(shell find tests -type f)
+ flake8 multidict tests
+ @if ! isort --check multidict tests; then \
+ echo "Import sort errors, run 'make fmt' to fix them!!!"; \
+ isort --diff --check multidict tests; \
+ false; \
+ fi
+ @touch .flake
+
+
+isort-check:
+ @if ! isort --check $(SRC); then \
+ echo "Import sort errors, run 'make fmt' to fix them!!!"; \
+ isort --diff --check $(SRC); \
+ false; \
+ fi
+
+flake8:
+ flake8 $(SRC)
+
+black-check:
+ @if ! isort --check $(SRC); then \
+ echo "black errors, run 'make fmt' to fix them!!!"; \
+ black -t py35 --diff --check $(SRC); \
+ false; \
+ fi
+
+mypy:
+ mypy --show-error-codes multidict tests
+
+lint: flake8 black-check mypy isort-check check_changes
+
+fmt:
+ black -t py35 $(SRC)
+ isort $(SRC)
+
+check_changes:
+ ./tools/check_changes.py
+
+.develop: .install-deps $(shell find multidict -type f) .flake check_changes mypy
+ pip install -e .
+ @touch .develop
+
+test: .develop
+ @pytest -q
+
+vtest: .develop
+ @pytest -s -v
+
+cov-dev: .develop
+ @pytest --cov-report=html
+ @echo "open file://`pwd`/htmlcov/index.html"
+
+cov-ci-run: .develop
+ @echo "Regular run"
+ @pytest --cov-report=html
+
+cov-dev-full: cov-ci-run
+ @echo "open file://`pwd`/htmlcov/index.html"
+
+doc:
+ @make -C docs html SPHINXOPTS="-W -E"
+ @echo "open file://`pwd`/docs/_build/html/index.html"
+
+doc-spelling:
+ @make -C docs spelling SPHINXOPTS="-W -E"
+
+install:
+ @pip install -U 'pip'
+ @pip install -Ur requirements/dev.txt
+
+install-dev: .develop
+
+
+clean:
+ rm -rf `find . -name __pycache__`
+ rm -f `find . -type f -name '*.py[co]' `
+ rm -f `find . -type f -name '*~' `
+ rm -f `find . -type f -name '.*~' `
+ rm -f `find . -type f -name '@*' `
+ rm -f `find . -type f -name '#*#' `
+ rm -f `find . -type f -name '*.orig' `
+ rm -f `find . -type f -name '*.rej' `
+ rm -f .coverage
+ rm -rf coverage
+ rm -rf build
+ rm -rf cover
+ rm -rf htmlcov
+ make -C docs clean SPHINXBUILD=false
+ python3 setup.py clean
+ rm -f multidict/*.html
+ rm -f multidict/*.so
+ rm -f multidict/*.pyd
+ rm -rf .tox
diff --git a/third_party/python/multidict/PKG-INFO b/third_party/python/multidict/PKG-INFO
new file mode 100644
index 0000000000..bbd4864947
--- /dev/null
+++ b/third_party/python/multidict/PKG-INFO
@@ -0,0 +1,128 @@
+Metadata-Version: 1.2
+Name: multidict
+Version: 5.1.0
+Summary: multidict implementation
+Home-page: https://github.com/aio-libs/multidict
+Author: Andrew Svetlov
+Author-email: andrew.svetlov@gmail.com
+License: Apache 2
+Project-URL: Chat: Gitter, https://gitter.im/aio-libs/Lobby
+Project-URL: CI: Azure Pipelines, https://dev.azure.com/aio-libs/multidict/_build
+Project-URL: Coverage: codecov, https://codecov.io/github/aio-libs/multidict
+Project-URL: Docs: RTD, https://multidict.readthedocs.io
+Project-URL: GitHub: issues, https://github.com/aio-libs/multidict/issues
+Project-URL: GitHub: repo, https://github.com/aio-libs/multidict
+Description: =========
+ multidict
+ =========
+
+ .. image:: https://github.com/aio-libs/multidict/workflows/CI/badge.svg
+ :target: https://github.com/aio-libs/multidict/actions?query=workflow%3ACI
+ :alt: GitHub status for master branch
+
+ .. image:: https://codecov.io/gh/aio-libs/multidict/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/aio-libs/multidict
+ :alt: Coverage metrics
+
+ .. image:: https://img.shields.io/pypi/v/multidict.svg
+ :target: https://pypi.org/project/multidict
+ :alt: PyPI
+
+ .. image:: https://readthedocs.org/projects/multidict/badge/?version=latest
+ :target: http://multidict.readthedocs.org/en/latest/?badge=latest
+ :alt: Documentationb
+
+ .. image:: https://img.shields.io/pypi/pyversions/multidict.svg
+ :target: https://pypi.org/project/multidict
+ :alt: Python versions
+
+ .. image:: https://badges.gitter.im/Join%20Chat.svg
+ :target: https://gitter.im/aio-libs/Lobby
+ :alt: Chat on Gitter
+
+ Multidict is dict-like collection of *key-value pairs* where key
+ might be occurred more than once in the container.
+
+ Introduction
+ ------------
+
+ *HTTP Headers* and *URL query string* require specific data structure:
+ *multidict*. It behaves mostly like a regular ``dict`` but it may have
+ several *values* for the same *key* and *preserves insertion ordering*.
+
+ The *key* is ``str`` (or ``istr`` for case-insensitive dictionaries).
+
+ ``multidict`` has four multidict classes:
+ ``MultiDict``, ``MultiDictProxy``, ``CIMultiDict``
+ and ``CIMultiDictProxy``.
+
+ Immutable proxies (``MultiDictProxy`` and
+ ``CIMultiDictProxy``) provide a dynamic view for the
+ proxied multidict, the view reflects underlying collection changes. They
+ implement the ``collections.abc.Mapping`` interface.
+
+ Regular mutable (``MultiDict`` and ``CIMultiDict``) classes
+ implement ``collections.abc.MutableMapping`` and allows to change
+ their own content.
+
+
+ *Case insensitive* (``CIMultiDict`` and
+ ``CIMultiDictProxy``) ones assume the *keys* are case
+ insensitive, e.g.::
+
+ >>> dct = CIMultiDict(key='val')
+ >>> 'Key' in dct
+ True
+ >>> dct['Key']
+ 'val'
+
+ *Keys* should be ``str`` or ``istr`` instances.
+
+ The library has optional C Extensions for sake of speed.
+
+
+ License
+ -------
+
+ Apache 2
+
+ Library Installation
+ --------------------
+
+ .. code-block:: bash
+
+ $ pip install multidict
+
+ The library is Python 3 only!
+
+ PyPI contains binary wheels for Linux, Windows and MacOS. If you want to install
+ ``multidict`` on another operation system (or *Alpine Linux* inside a Docker) the
+ Tarball will be used to compile the library from sources. It requires C compiler and
+ Python headers installed.
+
+ To skip the compilation please use `MULTIDICT_NO_EXTENSIONS` environment variable,
+ e.g.:
+
+ .. code-block:: bash
+
+ $ MULTIDICT_NO_EXTENSIONS=1 pip install multidict
+
+ Please note, Pure Python (uncompiled) version is about 20-50 times slower depending on
+ the usage scenario!!!
+
+
+
+ Changelog
+ ---------
+ See `RTD page <http://multidict.readthedocs.org/en/latest/changes.html>`_.
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Intended Audience :: Developers
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Development Status :: 5 - Production/Stable
+Requires-Python: >=3.6
diff --git a/third_party/python/multidict/README.rst b/third_party/python/multidict/README.rst
new file mode 100644
index 0000000000..e78e5065c2
--- /dev/null
+++ b/third_party/python/multidict/README.rst
@@ -0,0 +1,103 @@
+=========
+multidict
+=========
+
+.. image:: https://github.com/aio-libs/multidict/workflows/CI/badge.svg
+ :target: https://github.com/aio-libs/multidict/actions?query=workflow%3ACI
+ :alt: GitHub status for master branch
+
+.. image:: https://codecov.io/gh/aio-libs/multidict/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/aio-libs/multidict
+ :alt: Coverage metrics
+
+.. image:: https://img.shields.io/pypi/v/multidict.svg
+ :target: https://pypi.org/project/multidict
+ :alt: PyPI
+
+.. image:: https://readthedocs.org/projects/multidict/badge/?version=latest
+ :target: http://multidict.readthedocs.org/en/latest/?badge=latest
+ :alt: Documentationb
+
+.. image:: https://img.shields.io/pypi/pyversions/multidict.svg
+ :target: https://pypi.org/project/multidict
+ :alt: Python versions
+
+.. image:: https://badges.gitter.im/Join%20Chat.svg
+ :target: https://gitter.im/aio-libs/Lobby
+ :alt: Chat on Gitter
+
+Multidict is dict-like collection of *key-value pairs* where key
+might be occurred more than once in the container.
+
+Introduction
+------------
+
+*HTTP Headers* and *URL query string* require specific data structure:
+*multidict*. It behaves mostly like a regular ``dict`` but it may have
+several *values* for the same *key* and *preserves insertion ordering*.
+
+The *key* is ``str`` (or ``istr`` for case-insensitive dictionaries).
+
+``multidict`` has four multidict classes:
+``MultiDict``, ``MultiDictProxy``, ``CIMultiDict``
+and ``CIMultiDictProxy``.
+
+Immutable proxies (``MultiDictProxy`` and
+``CIMultiDictProxy``) provide a dynamic view for the
+proxied multidict, the view reflects underlying collection changes. They
+implement the ``collections.abc.Mapping`` interface.
+
+Regular mutable (``MultiDict`` and ``CIMultiDict``) classes
+implement ``collections.abc.MutableMapping`` and allows to change
+their own content.
+
+
+*Case insensitive* (``CIMultiDict`` and
+``CIMultiDictProxy``) ones assume the *keys* are case
+insensitive, e.g.::
+
+ >>> dct = CIMultiDict(key='val')
+ >>> 'Key' in dct
+ True
+ >>> dct['Key']
+ 'val'
+
+*Keys* should be ``str`` or ``istr`` instances.
+
+The library has optional C Extensions for sake of speed.
+
+
+License
+-------
+
+Apache 2
+
+Library Installation
+--------------------
+
+.. code-block:: bash
+
+ $ pip install multidict
+
+The library is Python 3 only!
+
+PyPI contains binary wheels for Linux, Windows and MacOS. If you want to install
+``multidict`` on another operation system (or *Alpine Linux* inside a Docker) the
+Tarball will be used to compile the library from sources. It requires C compiler and
+Python headers installed.
+
+To skip the compilation please use `MULTIDICT_NO_EXTENSIONS` environment variable,
+e.g.:
+
+.. code-block:: bash
+
+ $ MULTIDICT_NO_EXTENSIONS=1 pip install multidict
+
+Please note, Pure Python (uncompiled) version is about 20-50 times slower depending on
+the usage scenario!!!
+
+
+
+Changelog
+---------
+See `RTD page <http://multidict.readthedocs.org/en/latest/changes.html>`_.
diff --git a/third_party/python/multidict/multidict.egg-info/PKG-INFO b/third_party/python/multidict/multidict.egg-info/PKG-INFO
new file mode 100644
index 0000000000..bbd4864947
--- /dev/null
+++ b/third_party/python/multidict/multidict.egg-info/PKG-INFO
@@ -0,0 +1,128 @@
+Metadata-Version: 1.2
+Name: multidict
+Version: 5.1.0
+Summary: multidict implementation
+Home-page: https://github.com/aio-libs/multidict
+Author: Andrew Svetlov
+Author-email: andrew.svetlov@gmail.com
+License: Apache 2
+Project-URL: Chat: Gitter, https://gitter.im/aio-libs/Lobby
+Project-URL: CI: Azure Pipelines, https://dev.azure.com/aio-libs/multidict/_build
+Project-URL: Coverage: codecov, https://codecov.io/github/aio-libs/multidict
+Project-URL: Docs: RTD, https://multidict.readthedocs.io
+Project-URL: GitHub: issues, https://github.com/aio-libs/multidict/issues
+Project-URL: GitHub: repo, https://github.com/aio-libs/multidict
+Description: =========
+ multidict
+ =========
+
+ .. image:: https://github.com/aio-libs/multidict/workflows/CI/badge.svg
+ :target: https://github.com/aio-libs/multidict/actions?query=workflow%3ACI
+ :alt: GitHub status for master branch
+
+ .. image:: https://codecov.io/gh/aio-libs/multidict/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/aio-libs/multidict
+ :alt: Coverage metrics
+
+ .. image:: https://img.shields.io/pypi/v/multidict.svg
+ :target: https://pypi.org/project/multidict
+ :alt: PyPI
+
+ .. image:: https://readthedocs.org/projects/multidict/badge/?version=latest
+ :target: http://multidict.readthedocs.org/en/latest/?badge=latest
+ :alt: Documentationb
+
+ .. image:: https://img.shields.io/pypi/pyversions/multidict.svg
+ :target: https://pypi.org/project/multidict
+ :alt: Python versions
+
+ .. image:: https://badges.gitter.im/Join%20Chat.svg
+ :target: https://gitter.im/aio-libs/Lobby
+ :alt: Chat on Gitter
+
+ Multidict is dict-like collection of *key-value pairs* where key
+ might be occurred more than once in the container.
+
+ Introduction
+ ------------
+
+ *HTTP Headers* and *URL query string* require specific data structure:
+ *multidict*. It behaves mostly like a regular ``dict`` but it may have
+ several *values* for the same *key* and *preserves insertion ordering*.
+
+ The *key* is ``str`` (or ``istr`` for case-insensitive dictionaries).
+
+ ``multidict`` has four multidict classes:
+ ``MultiDict``, ``MultiDictProxy``, ``CIMultiDict``
+ and ``CIMultiDictProxy``.
+
+ Immutable proxies (``MultiDictProxy`` and
+ ``CIMultiDictProxy``) provide a dynamic view for the
+ proxied multidict, the view reflects underlying collection changes. They
+ implement the ``collections.abc.Mapping`` interface.
+
+ Regular mutable (``MultiDict`` and ``CIMultiDict``) classes
+ implement ``collections.abc.MutableMapping`` and allows to change
+ their own content.
+
+
+ *Case insensitive* (``CIMultiDict`` and
+ ``CIMultiDictProxy``) ones assume the *keys* are case
+ insensitive, e.g.::
+
+ >>> dct = CIMultiDict(key='val')
+ >>> 'Key' in dct
+ True
+ >>> dct['Key']
+ 'val'
+
+ *Keys* should be ``str`` or ``istr`` instances.
+
+ The library has optional C Extensions for sake of speed.
+
+
+ License
+ -------
+
+ Apache 2
+
+ Library Installation
+ --------------------
+
+ .. code-block:: bash
+
+ $ pip install multidict
+
+ The library is Python 3 only!
+
+ PyPI contains binary wheels for Linux, Windows and MacOS. If you want to install
+ ``multidict`` on another operation system (or *Alpine Linux* inside a Docker) the
+ Tarball will be used to compile the library from sources. It requires C compiler and
+ Python headers installed.
+
+ To skip the compilation please use `MULTIDICT_NO_EXTENSIONS` environment variable,
+ e.g.:
+
+ .. code-block:: bash
+
+ $ MULTIDICT_NO_EXTENSIONS=1 pip install multidict
+
+ Please note, Pure Python (uncompiled) version is about 20-50 times slower depending on
+ the usage scenario!!!
+
+
+
+ Changelog
+ ---------
+ See `RTD page <http://multidict.readthedocs.org/en/latest/changes.html>`_.
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Intended Audience :: Developers
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Development Status :: 5 - Production/Stable
+Requires-Python: >=3.6
diff --git a/third_party/python/multidict/multidict.egg-info/SOURCES.txt b/third_party/python/multidict/multidict.egg-info/SOURCES.txt
new file mode 100644
index 0000000000..6c6257ea9b
--- /dev/null
+++ b/third_party/python/multidict/multidict.egg-info/SOURCES.txt
@@ -0,0 +1,71 @@
+CHANGES.rst
+LICENSE
+MANIFEST.in
+Makefile
+README.rst
+pyproject.toml
+setup.cfg
+setup.py
+docs/Makefile
+docs/benchmark.rst
+docs/changes.rst
+docs/conf.py
+docs/index.rst
+docs/make.bat
+docs/multidict.rst
+docs/spelling_wordlist.txt
+multidict/__init__.py
+multidict/__init__.pyi
+multidict/_abc.py
+multidict/_compat.py
+multidict/_multidict.c
+multidict/_multidict_base.py
+multidict/_multidict_py.py
+multidict/py.typed
+multidict.egg-info/PKG-INFO
+multidict.egg-info/SOURCES.txt
+multidict.egg-info/dependency_links.txt
+multidict.egg-info/top_level.txt
+multidict/_multilib/defs.h
+multidict/_multilib/dict.h
+multidict/_multilib/istr.h
+multidict/_multilib/iter.h
+multidict/_multilib/pair_list.h
+multidict/_multilib/views.h
+tests/cimultidict.pickle.0
+tests/cimultidict.pickle.1
+tests/cimultidict.pickle.2
+tests/cimultidict.pickle.3
+tests/cimultidict.pickle.4
+tests/cimultidict.pickle.5
+tests/conftest.py
+tests/gen_pickles.py
+tests/multidict.pickle.0
+tests/multidict.pickle.1
+tests/multidict.pickle.2
+tests/multidict.pickle.3
+tests/multidict.pickle.4
+tests/multidict.pickle.5
+tests/pycimultidict.pickle.0
+tests/pycimultidict.pickle.1
+tests/pycimultidict.pickle.2
+tests/pycimultidict.pickle.3
+tests/pycimultidict.pickle.4
+tests/pycimultidict.pickle.5
+tests/pymultidict.pickle.0
+tests/pymultidict.pickle.1
+tests/pymultidict.pickle.2
+tests/pymultidict.pickle.3
+tests/pymultidict.pickle.4
+tests/pymultidict.pickle.5
+tests/test_abc.py
+tests/test_copy.py
+tests/test_guard.py
+tests/test_istr.py
+tests/test_multidict.py
+tests/test_mutable_multidict.py
+tests/test_mypy.py
+tests/test_pickle.py
+tests/test_types.py
+tests/test_update.py
+tests/test_version.py \ No newline at end of file
diff --git a/third_party/python/multidict/multidict.egg-info/dependency_links.txt b/third_party/python/multidict/multidict.egg-info/dependency_links.txt
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/third_party/python/multidict/multidict.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/third_party/python/multidict/multidict.egg-info/top_level.txt b/third_party/python/multidict/multidict.egg-info/top_level.txt
new file mode 100644
index 0000000000..afcecdff08
--- /dev/null
+++ b/third_party/python/multidict/multidict.egg-info/top_level.txt
@@ -0,0 +1 @@
+multidict
diff --git a/third_party/python/multidict/multidict/__init__.py b/third_party/python/multidict/multidict/__init__.py
new file mode 100644
index 0000000000..6b091d1431
--- /dev/null
+++ b/third_party/python/multidict/multidict/__init__.py
@@ -0,0 +1,48 @@
+"""Multidict implementation.
+
+HTTP Headers and URL query string require specific data structure:
+multidict. It behaves mostly like a dict but it can have
+several values for the same key.
+"""
+
+from ._abc import MultiMapping, MutableMultiMapping
+from ._compat import USE_CYTHON_EXTENSIONS
+
+__all__ = (
+ "MultiMapping",
+ "MutableMultiMapping",
+ "MultiDictProxy",
+ "CIMultiDictProxy",
+ "MultiDict",
+ "CIMultiDict",
+ "upstr",
+ "istr",
+ "getversion",
+)
+
+__version__ = "5.1.0"
+
+
+try:
+ if not USE_CYTHON_EXTENSIONS:
+ raise ImportError
+ from ._multidict import (
+ CIMultiDict,
+ CIMultiDictProxy,
+ MultiDict,
+ MultiDictProxy,
+ getversion,
+ istr,
+ )
+except ImportError: # pragma: no cover
+ from ._multidict_py import (
+ CIMultiDict,
+ CIMultiDictProxy,
+ MultiDict,
+ MultiDictProxy,
+ getversion,
+ istr,
+ )
+
+
+upstr = istr
diff --git a/third_party/python/multidict/multidict/__init__.pyi b/third_party/python/multidict/multidict/__init__.pyi
new file mode 100644
index 0000000000..24ba63054b
--- /dev/null
+++ b/third_party/python/multidict/multidict/__init__.pyi
@@ -0,0 +1,152 @@
+import abc
+from typing import (
+ Dict,
+ Generic,
+ Iterable,
+ Iterator,
+ List,
+ Mapping,
+ MutableMapping,
+ Tuple,
+ TypeVar,
+ Union,
+ overload,
+)
+
+class istr(str): ...
+
+upstr = istr
+
+_S = Union[str, istr]
+
+_T = TypeVar("_T")
+
+_T_co = TypeVar("_T_co", covariant=True)
+
+_D = TypeVar("_D")
+
+class MultiMapping(Mapping[_S, _T_co]):
+ @overload
+ @abc.abstractmethod
+ def getall(self, key: _S) -> List[_T_co]: ...
+ @overload
+ @abc.abstractmethod
+ def getall(self, key: _S, default: _D) -> Union[List[_T_co], _D]: ...
+ @overload
+ @abc.abstractmethod
+ def getone(self, key: _S) -> _T_co: ...
+ @overload
+ @abc.abstractmethod
+ def getone(self, key: _S, default: _D) -> Union[_T_co, _D]: ...
+
+_Arg = Union[Mapping[_S, _T], Dict[_S, _T], MultiMapping[_T], Iterable[Tuple[_S, _T]]]
+
+class MutableMultiMapping(MultiMapping[_T], MutableMapping[_S, _T], Generic[_T]):
+ @abc.abstractmethod
+ def add(self, key: _S, value: _T) -> None: ...
+ @abc.abstractmethod
+ def extend(self, arg: _Arg[_T] = ..., **kwargs: _T) -> None: ...
+ @overload
+ @abc.abstractmethod
+ def popone(self, key: _S) -> _T: ...
+ @overload
+ @abc.abstractmethod
+ def popone(self, key: _S, default: _D) -> Union[_T, _D]: ...
+ @overload
+ @abc.abstractmethod
+ def popall(self, key: _S) -> List[_T]: ...
+ @overload
+ @abc.abstractmethod
+ def popall(self, key: _S, default: _D) -> Union[List[_T], _D]: ...
+
+class MultiDict(MutableMultiMapping[_T], Generic[_T]):
+ def __init__(self, arg: _Arg[_T] = ..., **kwargs: _T) -> None: ...
+ def copy(self) -> MultiDict[_T]: ...
+ def __getitem__(self, k: _S) -> _T: ...
+ def __setitem__(self, k: _S, v: _T) -> None: ...
+ def __delitem__(self, v: _S) -> None: ...
+ def __iter__(self) -> Iterator[_S]: ...
+ def __len__(self) -> int: ...
+ @overload
+ def getall(self, key: _S) -> List[_T]: ...
+ @overload
+ def getall(self, key: _S, default: _D) -> Union[List[_T], _D]: ...
+ @overload
+ def getone(self, key: _S) -> _T: ...
+ @overload
+ def getone(self, key: _S, default: _D) -> Union[_T, _D]: ...
+ def add(self, key: _S, value: _T) -> None: ...
+ def extend(self, arg: _Arg[_T] = ..., **kwargs: _T) -> None: ...
+ @overload
+ def popone(self, key: _S) -> _T: ...
+ @overload
+ def popone(self, key: _S, default: _D) -> Union[_T, _D]: ...
+ @overload
+ def popall(self, key: _S) -> List[_T]: ...
+ @overload
+ def popall(self, key: _S, default: _D) -> Union[List[_T], _D]: ...
+
+class CIMultiDict(MutableMultiMapping[_T], Generic[_T]):
+ def __init__(self, arg: _Arg[_T] = ..., **kwargs: _T) -> None: ...
+ def copy(self) -> CIMultiDict[_T]: ...
+ def __getitem__(self, k: _S) -> _T: ...
+ def __setitem__(self, k: _S, v: _T) -> None: ...
+ def __delitem__(self, v: _S) -> None: ...
+ def __iter__(self) -> Iterator[_S]: ...
+ def __len__(self) -> int: ...
+ @overload
+ def getall(self, key: _S) -> List[_T]: ...
+ @overload
+ def getall(self, key: _S, default: _D) -> Union[List[_T], _D]: ...
+ @overload
+ def getone(self, key: _S) -> _T: ...
+ @overload
+ def getone(self, key: _S, default: _D) -> Union[_T, _D]: ...
+ def add(self, key: _S, value: _T) -> None: ...
+ def extend(self, arg: _Arg[_T] = ..., **kwargs: _T) -> None: ...
+ @overload
+ def popone(self, key: _S) -> _T: ...
+ @overload
+ def popone(self, key: _S, default: _D) -> Union[_T, _D]: ...
+ @overload
+ def popall(self, key: _S) -> List[_T]: ...
+ @overload
+ def popall(self, key: _S, default: _D) -> Union[List[_T], _D]: ...
+
+class MultiDictProxy(MultiMapping[_T], Generic[_T]):
+ def __init__(
+ self, arg: Union[MultiMapping[_T], MutableMultiMapping[_T]]
+ ) -> None: ...
+ def copy(self) -> MultiDict[_T]: ...
+ def __getitem__(self, k: _S) -> _T: ...
+ def __iter__(self) -> Iterator[_S]: ...
+ def __len__(self) -> int: ...
+ @overload
+ def getall(self, key: _S) -> List[_T]: ...
+ @overload
+ def getall(self, key: _S, default: _D) -> Union[List[_T], _D]: ...
+ @overload
+ def getone(self, key: _S) -> _T: ...
+ @overload
+ def getone(self, key: _S, default: _D) -> Union[_T, _D]: ...
+
+class CIMultiDictProxy(MultiMapping[_T], Generic[_T]):
+ def __init__(
+ self, arg: Union[MultiMapping[_T], MutableMultiMapping[_T]]
+ ) -> None: ...
+ def __getitem__(self, k: _S) -> _T: ...
+ def __iter__(self) -> Iterator[_S]: ...
+ def __len__(self) -> int: ...
+ @overload
+ def getall(self, key: _S) -> List[_T]: ...
+ @overload
+ def getall(self, key: _S, default: _D) -> Union[List[_T], _D]: ...
+ @overload
+ def getone(self, key: _S) -> _T: ...
+ @overload
+ def getone(self, key: _S, default: _D) -> Union[_T, _D]: ...
+ def copy(self) -> CIMultiDict[_T]: ...
+
+def getversion(
+ md: Union[MultiDict[_T], CIMultiDict[_T], MultiDictProxy[_T], CIMultiDictProxy[_T]]
+) -> int: ...
diff --git a/third_party/python/multidict/multidict/_abc.py b/third_party/python/multidict/multidict/_abc.py
new file mode 100644
index 0000000000..0603cdd244
--- /dev/null
+++ b/third_party/python/multidict/multidict/_abc.py
@@ -0,0 +1,48 @@
+import abc
+import sys
+import types
+from collections.abc import Mapping, MutableMapping
+
+
+class _TypingMeta(abc.ABCMeta):
+ # A fake metaclass to satisfy typing deps in runtime
+ # basically MultiMapping[str] and other generic-like type instantiations
+ # are emulated.
+ # Note: real type hints are provided by __init__.pyi stub file
+ if sys.version_info >= (3, 9):
+
+ def __getitem__(self, key):
+ return types.GenericAlias(self, key)
+
+ else:
+
+ def __getitem__(self, key):
+ return self
+
+
+class MultiMapping(Mapping, metaclass=_TypingMeta):
+ @abc.abstractmethod
+ def getall(self, key, default=None):
+ raise KeyError
+
+ @abc.abstractmethod
+ def getone(self, key, default=None):
+ raise KeyError
+
+
+class MutableMultiMapping(MultiMapping, MutableMapping):
+ @abc.abstractmethod
+ def add(self, key, value):
+ raise NotImplementedError
+
+ @abc.abstractmethod
+ def extend(self, *args, **kwargs):
+ raise NotImplementedError
+
+ @abc.abstractmethod
+ def popone(self, key, default=None):
+ raise KeyError
+
+ @abc.abstractmethod
+ def popall(self, key, default=None):
+ raise KeyError
diff --git a/third_party/python/multidict/multidict/_compat.py b/third_party/python/multidict/multidict/_compat.py
new file mode 100644
index 0000000000..e659124558
--- /dev/null
+++ b/third_party/python/multidict/multidict/_compat.py
@@ -0,0 +1,14 @@
+import os
+import platform
+
+NO_EXTENSIONS = bool(os.environ.get("MULTIDICT_NO_EXTENSIONS"))
+
+PYPY = platform.python_implementation() == "PyPy"
+
+USE_CYTHON_EXTENSIONS = USE_CYTHON = not NO_EXTENSIONS and not PYPY
+
+if USE_CYTHON_EXTENSIONS:
+ try:
+ from . import _multidict # noqa
+ except ImportError:
+ USE_CYTHON_EXTENSIONS = USE_CYTHON = False
diff --git a/third_party/python/multidict/multidict/_multidict.c b/third_party/python/multidict/multidict/_multidict.c
new file mode 100644
index 0000000000..5bdcc898de
--- /dev/null
+++ b/third_party/python/multidict/multidict/_multidict.c
@@ -0,0 +1,1646 @@
+#include "Python.h"
+#include "structmember.h"
+
+// Include order important
+#include "_multilib/defs.h"
+#include "_multilib/istr.h"
+#include "_multilib/pair_list.h"
+#include "_multilib/dict.h"
+#include "_multilib/iter.h"
+#include "_multilib/views.h"
+
+static PyObject *collections_abc_mapping;
+static PyObject *collections_abc_mut_mapping;
+static PyObject *collections_abc_mut_multi_mapping;
+
+static PyTypeObject multidict_type;
+static PyTypeObject cimultidict_type;
+static PyTypeObject multidict_proxy_type;
+static PyTypeObject cimultidict_proxy_type;
+
+static PyObject *repr_func;
+
+#define MultiDict_CheckExact(o) (Py_TYPE(o) == &multidict_type)
+#define CIMultiDict_CheckExact(o) (Py_TYPE(o) == &cimultidict_type)
+#define MultiDictProxy_CheckExact(o) (Py_TYPE(o) == &multidict_proxy_type)
+#define CIMultiDictProxy_CheckExact(o) (Py_TYPE(o) == &cimultidict_proxy_type)
+
+/* Helper macro for something like isinstance(obj, Base) */
+#define _MultiDict_Check(o) \
+ ((MultiDict_CheckExact(o)) || \
+ (CIMultiDict_CheckExact(o)) || \
+ (MultiDictProxy_CheckExact(o)) || \
+ (CIMultiDictProxy_CheckExact(o)))
+
+/******************** Internal Methods ********************/
+
+/* Forward declaration */
+static PyObject *multidict_items(MultiDictObject *self);
+
+static inline PyObject *
+_multidict_getone(MultiDictObject *self, PyObject *key, PyObject *_default)
+{
+ PyObject *val = pair_list_get_one(&self->pairs, key);
+
+ if (val == NULL &&
+ PyErr_ExceptionMatches(PyExc_KeyError) &&
+ _default != NULL)
+ {
+ PyErr_Clear();
+ Py_INCREF(_default);
+ return _default;
+ }
+
+ return val;
+}
+
+static inline int
+_multidict_eq(MultiDictObject *self, MultiDictObject *other)
+{
+ Py_ssize_t pos1 = 0,
+ pos2 = 0;
+
+ Py_hash_t h1 = 0,
+ h2 = 0;
+
+ PyObject *identity1 = NULL,
+ *identity2 = NULL,
+ *value1 = NULL,
+ *value2 = NULL;
+
+ int cmp_identity = 0,
+ cmp_value = 0;
+
+ if (self == other) {
+ return 1;
+ }
+
+ if (pair_list_len(&self->pairs) != pair_list_len(&other->pairs)) {
+ return 0;
+ }
+
+ while (_pair_list_next(&self->pairs, &pos1, &identity1, NULL, &value1, &h1) &&
+ _pair_list_next(&other->pairs, &pos2, &identity2, NULL, &value2, &h2))
+ {
+ if (h1 != h2) {
+ return 0;
+ }
+ cmp_identity = PyObject_RichCompareBool(identity1, identity2, Py_NE);
+ if (cmp_identity < 0) {
+ return -1;
+ }
+ cmp_value = PyObject_RichCompareBool(value1, value2, Py_NE);
+ if (cmp_value < 0) {
+ return -1;
+ }
+ if (cmp_identity || cmp_value) {
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static inline int
+_multidict_update_items(MultiDictObject *self, pair_list_t *pairs)
+{
+ return pair_list_update(&self->pairs, pairs);
+}
+
+static inline int
+_multidict_append_items(MultiDictObject *self, pair_list_t *pairs)
+{
+ PyObject *key = NULL,
+ *value = NULL;
+
+ Py_ssize_t pos = 0;
+
+ while (_pair_list_next(pairs, &pos, NULL, &key, &value, NULL)) {
+ if (pair_list_add(&self->pairs, key, value) < 0) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static inline int
+_multidict_append_items_seq(MultiDictObject *self, PyObject *arg,
+ const char *name)
+{
+ PyObject *key = NULL,
+ *value = NULL,
+ *item = NULL,
+ *iter = PyObject_GetIter(arg);
+
+ if (iter == NULL) {
+ return -1;
+ }
+
+ while ((item = PyIter_Next(iter)) != NULL) {
+ if (PyTuple_CheckExact(item)) {
+ if (PyTuple_GET_SIZE(item) != 2) {
+ goto invalid_type;
+ }
+ key = PyTuple_GET_ITEM(item, 0);
+ Py_INCREF(key);
+ value = PyTuple_GET_ITEM(item, 1);
+ Py_INCREF(value);
+ }
+ else if (PyList_CheckExact(item)) {
+ if (PyList_GET_SIZE(item) != 2) {
+ goto invalid_type;
+ }
+ key = PyList_GET_ITEM(item, 0);
+ Py_INCREF(key);
+ value = PyList_GET_ITEM(item, 1);
+ Py_INCREF(value);
+ }
+ else if (PySequence_Check(item)) {
+ if (PySequence_Size(item) != 2) {
+ goto invalid_type;
+ }
+ key = PySequence_GetItem(item, 0);
+ value = PySequence_GetItem(item, 1);
+ } else {
+ goto invalid_type;
+ }
+
+ if (pair_list_add(&self->pairs, key, value) < 0) {
+ goto fail;
+ }
+ Py_CLEAR(key);
+ Py_CLEAR(value);
+ Py_CLEAR(item);
+ }
+
+ Py_DECREF(iter);
+
+ if (PyErr_Occurred()) {
+ return -1;
+ }
+
+ return 0;
+invalid_type:
+ PyErr_Format(
+ PyExc_TypeError,
+ "%s takes either dict or list of (key, value) pairs",
+ name,
+ NULL
+ );
+ goto fail;
+fail:
+ Py_XDECREF(key);
+ Py_XDECREF(value);
+ Py_XDECREF(item);
+ Py_DECREF(iter);
+ return -1;
+}
+
+static inline int
+_multidict_list_extend(PyObject *list, PyObject *target_list)
+{
+ PyObject *item = NULL,
+ *iter = PyObject_GetIter(target_list);
+
+ if (iter == NULL) {
+ return -1;
+ }
+
+ while ((item = PyIter_Next(iter)) != NULL) {
+ if (PyList_Append(list, item) < 0) {
+ Py_DECREF(item);
+ Py_DECREF(iter);
+ return -1;
+ }
+ Py_DECREF(item);
+ }
+
+ Py_DECREF(iter);
+
+ if (PyErr_Occurred()) {
+ return -1;
+ }
+
+ return 0;
+}
+
+static inline int
+_multidict_extend_with_args(MultiDictObject *self, PyObject *arg,
+ PyObject *kwds, const char *name, int do_add)
+{
+ PyObject *arg_items = NULL, /* tracked by GC */
+ *kwds_items = NULL; /* new reference */
+ pair_list_t *pairs = NULL;
+
+ int err = 0;
+
+ if (kwds && !PyArg_ValidateKeywordArguments(kwds)) {
+ return -1;
+ }
+
+ // TODO: mb can be refactored more clear
+ if (_MultiDict_Check(arg) && kwds == NULL) {
+ if (MultiDict_CheckExact(arg) || CIMultiDict_CheckExact(arg)) {
+ pairs = &((MultiDictObject*)arg)->pairs;
+ } else if (MultiDictProxy_CheckExact(arg) || CIMultiDictProxy_CheckExact(arg)) {
+ pairs = &((MultiDictProxyObject*)arg)->md->pairs;
+ }
+
+ if (do_add) {
+ return _multidict_append_items(self, pairs);
+ }
+
+ return _multidict_update_items(self, pairs);
+ }
+
+ if (PyObject_HasAttrString(arg, "items")) {
+ if (_MultiDict_Check(arg)) {
+ arg_items = multidict_items((MultiDictObject*)arg);
+ } else {
+ arg_items = PyMapping_Items(arg);
+ }
+ if (arg_items == NULL) {
+ return -1;
+ }
+ } else {
+ arg_items = arg;
+ Py_INCREF(arg_items);
+ }
+
+ if (kwds) {
+ PyObject *tmp = PySequence_List(arg_items);
+ Py_DECREF(arg_items);
+ arg_items = tmp;
+ if (arg_items == NULL) {
+ return -1;
+ }
+
+ kwds_items = PyDict_Items(kwds);
+ if (kwds_items == NULL) {
+ Py_DECREF(arg_items);
+ return -1;
+ }
+ err = _multidict_list_extend(arg_items, kwds_items);
+ Py_DECREF(kwds_items);
+ if (err < 0) {
+ Py_DECREF(arg_items);
+ return -1;
+ }
+ }
+
+ if (do_add) {
+ err = _multidict_append_items_seq(self, arg_items, name);
+ } else {
+ err = pair_list_update_from_seq(&self->pairs, arg_items);
+ }
+
+ Py_DECREF(arg_items);
+
+ return err;
+}
+
+static inline int
+_multidict_extend_with_kwds(MultiDictObject *self, PyObject *kwds,
+ const char *name, int do_add)
+{
+ PyObject *arg = NULL;
+
+ int err = 0;
+
+ if (!PyArg_ValidateKeywordArguments(kwds)) {
+ return -1;
+ }
+
+ arg = PyDict_Items(kwds);
+ if (do_add) {
+ err = _multidict_append_items_seq(self, arg, name);
+ } else {
+ err = pair_list_update_from_seq(&self->pairs, arg);
+ }
+
+ Py_DECREF(arg);
+ return err;
+}
+
+static inline int
+_multidict_extend(MultiDictObject *self, PyObject *args, PyObject *kwds,
+ const char *name, int do_add)
+{
+ PyObject *arg = NULL;
+
+ if (args && PyObject_Length(args) > 1) {
+ PyErr_Format(
+ PyExc_TypeError,
+ "%s takes at most 1 positional argument (%zd given)",
+ name, PyObject_Length(args), NULL
+ );
+ return -1;
+ }
+
+ if (args && PyObject_Length(args) > 0) {
+ if (!PyArg_UnpackTuple(args, name, 0, 1, &arg)) {
+ return -1;
+ }
+ if (_multidict_extend_with_args(self, arg, kwds, name, do_add) < 0) {
+ return -1;
+ }
+ } else if (kwds && PyObject_Length(kwds) > 0) {
+ if (_multidict_extend_with_kwds(self, kwds, name, do_add) < 0) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+static inline PyObject *
+_multidict_copy(MultiDictObject *self, PyTypeObject *multidict_tp_object)
+{
+ MultiDictObject *new_multidict = NULL;
+
+ PyObject *arg_items = NULL,
+ *items = NULL;
+
+ new_multidict = (MultiDictObject*)PyType_GenericNew(
+ multidict_tp_object, NULL, NULL);
+ if (new_multidict == NULL) {
+ return NULL;
+ }
+
+ if (multidict_tp_object->tp_init(
+ (PyObject*)new_multidict, NULL, NULL) < 0)
+ {
+ return NULL;
+ }
+
+ items = multidict_items(self);
+ if (items == NULL) {
+ goto fail;
+ }
+
+ // TODO: "Implementation looks as slow as possible ..."
+ arg_items = PyTuple_New(1);
+ if (arg_items == NULL) {
+ goto fail;
+ }
+
+ Py_INCREF(items);
+ PyTuple_SET_ITEM(arg_items, 0, items);
+
+ if (_multidict_extend(
+ new_multidict, arg_items, NULL, "copy", 1) < 0)
+ {
+ goto fail;
+ }
+
+ Py_DECREF(items);
+ Py_DECREF(arg_items);
+
+ return (PyObject*)new_multidict;
+
+fail:
+ Py_XDECREF(items);
+ Py_XDECREF(arg_items);
+
+ Py_DECREF(new_multidict);
+
+ return NULL;
+}
+
+static inline PyObject *
+_multidict_proxy_copy(MultiDictProxyObject *self, PyTypeObject *type)
+{
+ PyObject *new_multidict = PyType_GenericNew(type, NULL, NULL);
+ if (new_multidict == NULL) {
+ goto fail;
+ }
+ if (type->tp_init(new_multidict, NULL, NULL) < 0) {
+ goto fail;
+ }
+ if (_multidict_extend_with_args(
+ (MultiDictObject*)new_multidict, (PyObject*)self, NULL, "copy", 1) < 0)
+ {
+ goto fail;
+ }
+
+ return new_multidict;
+
+fail:
+ Py_XDECREF(new_multidict);
+ return NULL;
+}
+
+
+/******************** Base Methods ********************/
+
+static inline PyObject *
+multidict_getall(MultiDictObject *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *list = NULL,
+ *key = NULL,
+ *_default = NULL;
+
+ static char *getall_keywords[] = {"key", "default", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O:getall",
+ getall_keywords, &key, &_default))
+ {
+ return NULL;
+ }
+
+ list = pair_list_get_all(&self->pairs, key);
+
+ if (list == NULL &&
+ PyErr_ExceptionMatches(PyExc_KeyError) &&
+ _default != NULL)
+ {
+ PyErr_Clear();
+ Py_INCREF(_default);
+ return _default;
+ }
+
+ return list;
+}
+
+static inline PyObject *
+multidict_getone(MultiDictObject *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *key = NULL,
+ *_default = NULL;
+
+ static char *getone_keywords[] = {"key", "default", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O:getone",
+ getone_keywords, &key, &_default))
+ {
+ return NULL;
+ }
+
+ return _multidict_getone(self, key, _default);
+}
+
+static inline PyObject *
+multidict_get(MultiDictObject *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *key = NULL,
+ *_default = Py_None,
+ *ret;
+
+ static char *getone_keywords[] = {"key", "default", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O:getone",
+ getone_keywords, &key, &_default))
+ {
+ return NULL;
+ }
+ ret = _multidict_getone(self, key, _default);
+ return ret;
+}
+
+static inline PyObject *
+multidict_keys(MultiDictObject *self)
+{
+ return multidict_keysview_new((PyObject*)self);
+}
+
+static inline PyObject *
+multidict_items(MultiDictObject *self)
+{
+ return multidict_itemsview_new((PyObject*)self);
+}
+
+static inline PyObject *
+multidict_values(MultiDictObject *self)
+{
+ return multidict_valuesview_new((PyObject*)self);
+}
+
+static inline PyObject *
+multidict_reduce(MultiDictObject *self)
+{
+ PyObject *items = NULL,
+ *items_list = NULL,
+ *args = NULL,
+ *result = NULL;
+
+ items = multidict_items(self);
+ if (items == NULL) {
+ goto ret;
+ }
+
+ items_list = PySequence_List(items);
+ if (items_list == NULL) {
+ goto ret;
+ }
+
+ args = PyTuple_Pack(1, items_list);
+ if (args == NULL) {
+ goto ret;
+ }
+
+ result = PyTuple_Pack(2, Py_TYPE(self), args);
+
+ret:
+ Py_XDECREF(args);
+ Py_XDECREF(items_list);
+ Py_XDECREF(items);
+
+ return result;
+}
+
+static inline PyObject *
+multidict_repr(PyObject *self)
+{
+ return PyObject_CallFunctionObjArgs(
+ repr_func, self, NULL);
+}
+
+static inline Py_ssize_t
+multidict_mp_len(MultiDictObject *self)
+{
+ return pair_list_len(&self->pairs);
+}
+
+static inline PyObject *
+multidict_mp_subscript(MultiDictObject *self, PyObject *key)
+{
+ return _multidict_getone(self, key, NULL);
+}
+
+static inline int
+multidict_mp_as_subscript(MultiDictObject *self, PyObject *key, PyObject *val)
+{
+ if (val == NULL) {
+ return pair_list_del(&self->pairs, key);
+ } else {
+ return pair_list_replace(&self->pairs, key, val);
+ }
+}
+
+static inline int
+multidict_sq_contains(MultiDictObject *self, PyObject *key)
+{
+ return pair_list_contains(&self->pairs, key);
+}
+
+static inline PyObject *
+multidict_tp_iter(MultiDictObject *self)
+{
+ return multidict_keys_iter_new(self);
+}
+
+static inline PyObject *
+multidict_tp_richcompare(PyObject *self, PyObject *other, int op)
+{
+ // TODO: refactoring me with love
+
+ int cmp = 0;
+
+ if (op != Py_EQ && op != Py_NE) {
+ Py_RETURN_NOTIMPLEMENTED;
+ }
+
+ if (MultiDict_CheckExact(other) || CIMultiDict_CheckExact(other)) {
+ cmp = _multidict_eq(
+ (MultiDictObject*)self,
+ (MultiDictObject*)other
+ );
+ if (cmp < 0) {
+ return NULL;
+ }
+ if (op == Py_NE) {
+ cmp = !cmp;
+ }
+ return PyBool_FromLong(cmp);
+ }
+
+ if (MultiDictProxy_CheckExact(other) || CIMultiDictProxy_CheckExact(other)) {
+ cmp = _multidict_eq(
+ (MultiDictObject*)self,
+ ((MultiDictProxyObject*)other)->md
+ );
+ if (cmp < 0) {
+ return NULL;
+ }
+ if (op == Py_NE) {
+ cmp = !cmp;
+ }
+ return PyBool_FromLong(cmp);
+ }
+
+ cmp = PyObject_IsInstance(other, (PyObject*)collections_abc_mapping);
+ if (cmp < 0) {
+ return NULL;
+ }
+
+ if (cmp) {
+ cmp = pair_list_eq_to_mapping(&((MultiDictObject*)self)->pairs, other);
+ if (cmp < 0) {
+ return NULL;
+ }
+ if (op == Py_NE) {
+ cmp = !cmp;
+ }
+ return PyBool_FromLong(cmp);
+ }
+
+ Py_RETURN_NOTIMPLEMENTED;
+}
+
+static inline void
+multidict_tp_dealloc(MultiDictObject *self)
+{
+ PyObject_GC_UnTrack(self);
+ Py_TRASHCAN_SAFE_BEGIN(self);
+ if (self->weaklist != NULL) {
+ PyObject_ClearWeakRefs((PyObject *)self);
+ };
+ pair_list_dealloc(&self->pairs);
+ Py_TYPE(self)->tp_free((PyObject *)self);
+ Py_TRASHCAN_SAFE_END(self);
+}
+
+static inline int
+multidict_tp_traverse(MultiDictObject *self, visitproc visit, void *arg)
+{
+ return pair_list_traverse(&self->pairs, visit, arg);
+}
+
+static inline int
+multidict_tp_clear(MultiDictObject *self)
+{
+ return pair_list_clear(&self->pairs);
+}
+
+PyDoc_STRVAR(multidict_getall_doc,
+"Return a list of all values matching the key.");
+
+PyDoc_STRVAR(multidict_getone_doc,
+"Get first value matching the key.");
+
+PyDoc_STRVAR(multidict_get_doc,
+"Get first value matching the key.\n\nThe method is alias for .getone().");
+
+PyDoc_STRVAR(multidict_keys_doc,
+"Return a new view of the dictionary's keys.");
+
+PyDoc_STRVAR(multidict_items_doc,
+"Return a new view of the dictionary's items *(key, value) pairs).");
+
+PyDoc_STRVAR(multidict_values_doc,
+"Return a new view of the dictionary's values.");
+
+/******************** MultiDict ********************/
+
+static inline int
+multidict_tp_init(MultiDictObject *self, PyObject *args, PyObject *kwds)
+{
+ if (pair_list_init(&self->pairs) < 0) {
+ return -1;
+ }
+ if (_multidict_extend(self, args, kwds, "MultiDict", 1) < 0) {
+ return -1;
+ }
+ return 0;
+}
+
+static inline PyObject *
+multidict_add(MultiDictObject *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *key = NULL,
+ *val = NULL;
+
+ static char *kwlist[] = {"key", "value", NULL};
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO:add",
+ kwlist, &key, &val))
+ {
+ return NULL;
+ }
+
+ if (pair_list_add(&self->pairs, key, val) < 0) {
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+static inline PyObject *
+multidict_copy(MultiDictObject *self)
+{
+ return _multidict_copy(self, &multidict_type);
+}
+
+static inline PyObject *
+multidict_extend(MultiDictObject *self, PyObject *args, PyObject *kwds)
+{
+ if (_multidict_extend(self, args, kwds, "extend", 1) < 0) {
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+static inline PyObject *
+multidict_clear(MultiDictObject *self)
+{
+ if (pair_list_clear(&self->pairs) < 0) {
+ return NULL;
+ }
+
+ Py_RETURN_NONE;
+}
+
+static inline PyObject *
+multidict_setdefault(MultiDictObject *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *key = NULL,
+ *_default = NULL;
+
+ static char *setdefault_keywords[] = {"key", "default", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O:setdefault",
+ setdefault_keywords, &key, &_default))
+ {
+ return NULL;
+ }
+ return pair_list_set_default(&self->pairs, key, _default);
+}
+
+static inline PyObject *
+multidict_popone(MultiDictObject *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *key = NULL,
+ *_default = NULL,
+ *ret_val = NULL;
+
+ static char *popone_keywords[] = {"key", "default", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O:popone",
+ popone_keywords, &key, &_default))
+ {
+ return NULL;
+ }
+
+ ret_val = pair_list_pop_one(&self->pairs, key);
+
+ if (ret_val == NULL &&
+ PyErr_ExceptionMatches(PyExc_KeyError) &&
+ _default != NULL)
+ {
+ PyErr_Clear();
+ Py_INCREF(_default);
+ return _default;
+ }
+
+ return ret_val;
+}
+
+static inline PyObject *
+multidict_popall(MultiDictObject *self, PyObject *args, PyObject *kwds)
+{
+ PyObject *key = NULL,
+ *_default = NULL,
+ *ret_val = NULL;
+
+ static char *popall_keywords[] = {"key", "default", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|O:popall",
+ popall_keywords, &key, &_default))
+ {
+ return NULL;
+ }
+
+ ret_val = pair_list_pop_all(&self->pairs, key);
+
+ if (ret_val == NULL &&
+ PyErr_ExceptionMatches(PyExc_KeyError) &&
+ _default != NULL)
+ {
+ PyErr_Clear();
+ Py_INCREF(_default);
+ return _default;
+ }
+
+ return ret_val;
+}
+
+static inline PyObject *
+multidict_popitem(MultiDictObject *self)
+{
+ return pair_list_pop_item(&self->pairs);
+}
+
+static inline PyObject *
+multidict_update(MultiDictObject *self, PyObject *args, PyObject *kwds)
+{
+ if (_multidict_extend(self, args, kwds, "update", 0) < 0) {
+ return NULL;
+ }
+ Py_RETURN_NONE;
+}
+
+PyDoc_STRVAR(multidict_add_doc,
+"Add the key and value, not overwriting any previous value.");
+
+PyDoc_STRVAR(multidict_copy_doc,
+"Return a copy of itself.");
+
+PyDoc_STRVAR(multdicit_method_extend_doc,
+"Extend current MultiDict with more values.\n\
+This method must be used instead of update.");
+
+PyDoc_STRVAR(multidict_clear_doc,
+"Remove all items from MultiDict");
+
+PyDoc_STRVAR(multidict_setdefault_doc,
+"Return value for key, set value to default if key is not present.");
+
+PyDoc_STRVAR(multidict_popone_doc,
+"Remove the last occurrence of key and return the corresponding value.\n\n\
+If key is not found, default is returned if given, otherwise KeyError is \
+raised.\n");
+
+PyDoc_STRVAR(multidict_popall_doc,
+"Remove all occurrences of key and return the list of corresponding values.\n\n\
+If key is not found, default is returned if given, otherwise KeyError is \
+raised.\n");
+
+PyDoc_STRVAR(multidict_popitem_doc,
+"Remove and return an arbitrary (key, value) pair.");
+
+PyDoc_STRVAR(multidict_update_doc,
+"Update the dictionary from *other*, overwriting existing keys.");
+
+
+#if PY_MAJOR_VERSION >= 3 && PY_MINOR_VERSION >= 9
+#define multidict_class_getitem Py_GenericAlias
+#else
+static inline PyObject *
+multidict_class_getitem(PyObject *self, PyObject *arg)
+{
+ Py_INCREF(self);
+ return self;
+}
+#endif
+
+
+PyDoc_STRVAR(sizeof__doc__,
+"D.__sizeof__() -> size of D in memory, in bytes");
+
+static inline PyObject *
+_multidict_sizeof(MultiDictObject *self)
+{
+ Py_ssize_t size = sizeof(MultiDictObject);
+ if (self->pairs.pairs != self->pairs.buffer) {
+ size += (Py_ssize_t)sizeof(pair_t) * self->pairs.capacity;
+ }
+ return PyLong_FromSsize_t(size);
+}
+
+
+static PySequenceMethods multidict_sequence = {
+ .sq_contains = (objobjproc)multidict_sq_contains,
+};
+
+static PyMappingMethods multidict_mapping = {
+ .mp_length = (lenfunc)multidict_mp_len,
+ .mp_subscript = (binaryfunc)multidict_mp_subscript,
+ .mp_ass_subscript = (objobjargproc)multidict_mp_as_subscript,
+};
+
+static PyMethodDef multidict_methods[] = {
+ {
+ "getall",
+ (PyCFunction)multidict_getall,
+ METH_VARARGS | METH_KEYWORDS,
+ multidict_getall_doc
+ },
+ {
+ "getone",
+ (PyCFunction)multidict_getone,
+ METH_VARARGS | METH_KEYWORDS,
+ multidict_getone_doc
+ },
+ {
+ "get",
+ (PyCFunction)multidict_get,
+ METH_VARARGS | METH_KEYWORDS,
+ multidict_get_doc
+ },
+ {
+ "keys",
+ (PyCFunction)multidict_keys,
+ METH_NOARGS,
+ multidict_keys_doc
+ },
+ {
+ "items",
+ (PyCFunction)multidict_items,
+ METH_NOARGS,
+ multidict_items_doc
+ },
+ {
+ "values",
+ (PyCFunction)multidict_values,
+ METH_NOARGS,
+ multidict_values_doc
+ },
+ {
+ "add",
+ (PyCFunction)multidict_add,
+ METH_VARARGS | METH_KEYWORDS,
+ multidict_add_doc
+ },
+ {
+ "copy",
+ (PyCFunction)multidict_copy,
+ METH_NOARGS,
+ multidict_copy_doc
+ },
+ {
+ "extend",
+ (PyCFunction)multidict_extend,
+ METH_VARARGS | METH_KEYWORDS,
+ multdicit_method_extend_doc
+ },
+ {
+ "clear",
+ (PyCFunction)multidict_clear,
+ METH_NOARGS,
+ multidict_clear_doc
+ },
+ {
+ "setdefault",
+ (PyCFunction)multidict_setdefault,
+ METH_VARARGS | METH_KEYWORDS,
+ multidict_setdefault_doc
+ },
+ {
+ "popone",
+ (PyCFunction)multidict_popone,
+ METH_VARARGS | METH_KEYWORDS,
+ multidict_popone_doc
+ },
+ {
+ "pop",
+ (PyCFunction)multidict_popone,
+ METH_VARARGS | METH_KEYWORDS,
+ multidict_popone_doc
+ },
+ {
+ "popall",
+ (PyCFunction)multidict_popall,
+ METH_VARARGS | METH_KEYWORDS,
+ multidict_popall_doc
+ },
+ {
+ "popitem",
+ (PyCFunction)multidict_popitem,
+ METH_NOARGS,
+ multidict_popitem_doc
+ },
+ {
+ "update",
+ (PyCFunction)multidict_update,
+ METH_VARARGS | METH_KEYWORDS,
+ multidict_update_doc
+ },
+ {
+ "__reduce__",
+ (PyCFunction)multidict_reduce,
+ METH_NOARGS,
+ NULL,
+ },
+ {
+ "__class_getitem__",
+ (PyCFunction)multidict_class_getitem,
+ METH_O | METH_CLASS,
+ NULL
+ },
+ {
+ "__sizeof__",
+ (PyCFunction)_multidict_sizeof,
+ METH_NOARGS,
+ sizeof__doc__,
+ },
+ {
+ NULL,
+ NULL
+ } /* sentinel */
+};
+
+
+PyDoc_STRVAR(MultDict_doc,
+"Dictionary with the support for duplicate keys.");
+
+
+static PyTypeObject multidict_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "multidict._multidict.MultiDict", /* tp_name */
+ sizeof(MultiDictObject), /* tp_basicsize */
+ .tp_dealloc = (destructor)multidict_tp_dealloc,
+ .tp_repr = (reprfunc)multidict_repr,
+ .tp_as_sequence = &multidict_sequence,
+ .tp_as_mapping = &multidict_mapping,
+ .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
+ .tp_doc = MultDict_doc,
+ .tp_traverse = (traverseproc)multidict_tp_traverse,
+ .tp_clear = (inquiry)multidict_tp_clear,
+ .tp_richcompare = (richcmpfunc)multidict_tp_richcompare,
+ .tp_weaklistoffset = offsetof(MultiDictObject, weaklist),
+ .tp_iter = (getiterfunc)multidict_tp_iter,
+ .tp_methods = multidict_methods,
+ .tp_init = (initproc)multidict_tp_init,
+ .tp_alloc = PyType_GenericAlloc,
+ .tp_new = PyType_GenericNew,
+ .tp_free = PyObject_GC_Del,
+};
+
+/******************** CIMultiDict ********************/
+
+static inline int
+cimultidict_tp_init(MultiDictObject *self, PyObject *args, PyObject *kwds)
+{
+ if (ci_pair_list_init(&self->pairs) < 0) {
+ return -1;
+ }
+ if (_multidict_extend(self, args, kwds, "CIMultiDict", 1) < 0) {
+ return -1;
+ }
+ return 0;
+}
+
+static inline PyObject *
+cimultidict_copy(MultiDictObject *self)
+{
+ return _multidict_copy(self, &cimultidict_type);
+}
+
+PyDoc_STRVAR(cimultidict_copy_doc,
+"Return a copy of itself.");
+
+static PyMethodDef cimultidict_methods[] = {
+ {
+ "copy",
+ (PyCFunction)cimultidict_copy,
+ METH_NOARGS,
+ cimultidict_copy_doc
+ },
+ {
+ NULL,
+ NULL
+ } /* sentinel */
+};
+
+PyDoc_STRVAR(CIMultDict_doc,
+"Dictionary with the support for duplicate case-insensitive keys.");
+
+
+static PyTypeObject cimultidict_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "multidict._multidict.CIMultiDict", /* tp_name */
+ sizeof(MultiDictObject), /* tp_basicsize */
+ .tp_dealloc = (destructor)multidict_tp_dealloc,
+ .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
+ .tp_doc = CIMultDict_doc,
+ .tp_traverse = (traverseproc)multidict_tp_traverse,
+ .tp_clear = (inquiry)multidict_tp_clear,
+ .tp_weaklistoffset = offsetof(MultiDictObject, weaklist),
+ .tp_methods = cimultidict_methods,
+ .tp_base = &multidict_type,
+ .tp_init = (initproc)cimultidict_tp_init,
+ .tp_alloc = PyType_GenericAlloc,
+ .tp_new = PyType_GenericNew,
+ .tp_free = PyObject_GC_Del,
+};
+
+/******************** MultiDictProxy ********************/
+
+static inline int
+multidict_proxy_tp_init(MultiDictProxyObject *self, PyObject *args,
+ PyObject *kwds)
+{
+ PyObject *arg = NULL;
+ MultiDictObject *md = NULL;
+
+ if (!PyArg_UnpackTuple(args, "multidict._multidict.MultiDictProxy",
+ 0, 1, &arg))
+ {
+ return -1;
+ }
+ if (arg == NULL) {
+ PyErr_Format(
+ PyExc_TypeError,
+ "__init__() missing 1 required positional argument: 'arg'"
+ );
+ return -1;
+ }
+ if (!MultiDictProxy_CheckExact(arg) &&
+ !CIMultiDict_CheckExact(arg) &&
+ !MultiDict_CheckExact(arg))
+ {
+ PyErr_Format(
+ PyExc_TypeError,
+ "ctor requires MultiDict or MultiDictProxy instance, "
+ "not <classs '%s'>",
+ Py_TYPE(arg)->tp_name
+ );
+ return -1;
+ }
+
+ md = (MultiDictObject*)arg;
+ if (MultiDictProxy_CheckExact(arg)) {
+ md = ((MultiDictProxyObject*)arg)->md;
+ }
+ Py_INCREF(md);
+ self->md = md;
+
+ return 0;
+}
+
+static inline PyObject *
+multidict_proxy_getall(MultiDictProxyObject *self, PyObject *args,
+ PyObject *kwds)
+{
+ return multidict_getall(self->md, args, kwds);
+}
+
+static inline PyObject *
+multidict_proxy_getone(MultiDictProxyObject *self, PyObject *args,
+ PyObject *kwds)
+{
+ return multidict_getone(self->md, args, kwds);
+}
+
+static inline PyObject *
+multidict_proxy_get(MultiDictProxyObject *self, PyObject *args,
+ PyObject *kwds)
+{
+ return multidict_get(self->md, args, kwds);
+}
+
+static inline PyObject *
+multidict_proxy_keys(MultiDictProxyObject *self)
+{
+ return multidict_keys(self->md);
+}
+
+static inline PyObject *
+multidict_proxy_items(MultiDictProxyObject *self)
+{
+ return multidict_items(self->md);
+}
+
+static inline PyObject *
+multidict_proxy_values(MultiDictProxyObject *self)
+{
+ return multidict_values(self->md);
+}
+
+static inline PyObject *
+multidict_proxy_copy(MultiDictProxyObject *self)
+{
+ return _multidict_proxy_copy(self, &multidict_type);
+}
+
+static inline PyObject *
+multidict_proxy_reduce(MultiDictProxyObject *self)
+{
+ PyErr_Format(
+ PyExc_TypeError,
+ "can't pickle %s objects", Py_TYPE(self)->tp_name
+ );
+
+ return NULL;
+}
+
+static inline Py_ssize_t
+multidict_proxy_mp_len(MultiDictProxyObject *self)
+{
+ return multidict_mp_len(self->md);
+}
+
+static inline PyObject *
+multidict_proxy_mp_subscript(MultiDictProxyObject *self, PyObject *key)
+{
+ return multidict_mp_subscript(self->md, key);
+}
+
+static inline int
+multidict_proxy_sq_contains(MultiDictProxyObject *self, PyObject *key)
+{
+ return multidict_sq_contains(self->md, key);
+}
+
+static inline PyObject *
+multidict_proxy_tp_iter(MultiDictProxyObject *self)
+{
+ return multidict_tp_iter(self->md);
+}
+
+static inline PyObject *
+multidict_proxy_tp_richcompare(MultiDictProxyObject *self, PyObject *other,
+ int op)
+{
+ return multidict_tp_richcompare((PyObject*)self->md, other, op);
+}
+
+static inline void
+multidict_proxy_tp_dealloc(MultiDictProxyObject *self)
+{
+ PyObject_GC_UnTrack(self);
+ if (self->weaklist != NULL) {
+ PyObject_ClearWeakRefs((PyObject *)self);
+ };
+ Py_XDECREF(self->md);
+ Py_TYPE(self)->tp_free((PyObject *)self);
+}
+
+static inline int
+multidict_proxy_tp_traverse(MultiDictProxyObject *self, visitproc visit,
+ void *arg)
+{
+ Py_VISIT(self->md);
+ return 0;
+}
+
+static inline int
+multidict_proxy_tp_clear(MultiDictProxyObject *self)
+{
+ Py_CLEAR(self->md);
+ return 0;
+}
+
+static PySequenceMethods multidict_proxy_sequence = {
+ .sq_contains = (objobjproc)multidict_proxy_sq_contains,
+};
+
+static PyMappingMethods multidict_proxy_mapping = {
+ .mp_length = (lenfunc)multidict_proxy_mp_len,
+ .mp_subscript = (binaryfunc)multidict_proxy_mp_subscript,
+};
+
+static PyMethodDef multidict_proxy_methods[] = {
+ {
+ "getall",
+ (PyCFunction)multidict_proxy_getall,
+ METH_VARARGS | METH_KEYWORDS,
+ multidict_getall_doc
+ },
+ {
+ "getone",
+ (PyCFunction)multidict_proxy_getone,
+ METH_VARARGS | METH_KEYWORDS,
+ multidict_getone_doc
+ },
+ {
+ "get",
+ (PyCFunction)multidict_proxy_get,
+ METH_VARARGS | METH_KEYWORDS,
+ multidict_get_doc
+ },
+ {
+ "keys",
+ (PyCFunction)multidict_proxy_keys,
+ METH_NOARGS,
+ multidict_keys_doc
+ },
+ {
+ "items",
+ (PyCFunction)multidict_proxy_items,
+ METH_NOARGS,
+ multidict_items_doc
+ },
+ {
+ "values",
+ (PyCFunction)multidict_proxy_values,
+ METH_NOARGS,
+ multidict_values_doc
+ },
+ {
+ "copy",
+ (PyCFunction)multidict_proxy_copy,
+ METH_NOARGS,
+ multidict_copy_doc
+ },
+ {
+ "__reduce__",
+ (PyCFunction)multidict_proxy_reduce,
+ METH_NOARGS,
+ NULL
+ },
+ {
+ "__class_getitem__",
+ (PyCFunction)multidict_class_getitem,
+ METH_O | METH_CLASS,
+ NULL
+ },
+ {
+ NULL,
+ NULL
+ } /* sentinel */
+};
+
+
+PyDoc_STRVAR(MultDictProxy_doc,
+"Read-only proxy for MultiDict instance.");
+
+
+static PyTypeObject multidict_proxy_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "multidict._multidict.MultiDictProxy", /* tp_name */
+ sizeof(MultiDictProxyObject), /* tp_basicsize */
+ .tp_dealloc = (destructor)multidict_proxy_tp_dealloc,
+ .tp_repr = (reprfunc)multidict_repr,
+ .tp_as_sequence = &multidict_proxy_sequence,
+ .tp_as_mapping = &multidict_proxy_mapping,
+ .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
+ .tp_doc = MultDictProxy_doc,
+ .tp_traverse = (traverseproc)multidict_proxy_tp_traverse,
+ .tp_clear = (inquiry)multidict_proxy_tp_clear,
+ .tp_richcompare = (richcmpfunc)multidict_proxy_tp_richcompare,
+ .tp_weaklistoffset = offsetof(MultiDictProxyObject, weaklist),
+ .tp_iter = (getiterfunc)multidict_proxy_tp_iter,
+ .tp_methods = multidict_proxy_methods,
+ .tp_init = (initproc)multidict_proxy_tp_init,
+ .tp_alloc = PyType_GenericAlloc,
+ .tp_new = PyType_GenericNew,
+ .tp_free = PyObject_GC_Del,
+};
+
+/******************** CIMultiDictProxy ********************/
+
+static inline int
+cimultidict_proxy_tp_init(MultiDictProxyObject *self, PyObject *args,
+ PyObject *kwds)
+{
+ PyObject *arg = NULL;
+ MultiDictObject *md = NULL;
+
+ if (!PyArg_UnpackTuple(args, "multidict._multidict.CIMultiDictProxy",
+ 1, 1, &arg))
+ {
+ return -1;
+ }
+ if (arg == NULL) {
+ PyErr_Format(
+ PyExc_TypeError,
+ "__init__() missing 1 required positional argument: 'arg'"
+ );
+ return -1;
+ }
+ if (!CIMultiDictProxy_CheckExact(arg) && !CIMultiDict_CheckExact(arg)) {
+ PyErr_Format(
+ PyExc_TypeError,
+ "ctor requires CIMultiDict or CIMultiDictProxy instance, "
+ "not <class '%s'>",
+ Py_TYPE(arg)->tp_name
+ );
+ return -1;
+ }
+
+ md = (MultiDictObject*)arg;
+ if (CIMultiDictProxy_CheckExact(arg)) {
+ md = ((MultiDictProxyObject*)arg)->md;
+ }
+ Py_INCREF(md);
+ self->md = md;
+
+ return 0;
+}
+
+static inline PyObject *
+cimultidict_proxy_copy(MultiDictProxyObject *self)
+{
+ return _multidict_proxy_copy(self, &cimultidict_type);
+}
+
+
+PyDoc_STRVAR(CIMultDictProxy_doc,
+"Read-only proxy for CIMultiDict instance.");
+
+PyDoc_STRVAR(cimultidict_proxy_copy_doc,
+"Return copy of itself");
+
+static PyMethodDef cimultidict_proxy_methods[] = {
+ {
+ "copy",
+ (PyCFunction)cimultidict_proxy_copy,
+ METH_NOARGS,
+ cimultidict_proxy_copy_doc
+ },
+ {
+ NULL,
+ NULL
+ } /* sentinel */
+};
+
+static PyTypeObject cimultidict_proxy_type = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "multidict._multidict.CIMultiDictProxy", /* tp_name */
+ sizeof(MultiDictProxyObject), /* tp_basicsize */
+ .tp_dealloc = (destructor)multidict_proxy_tp_dealloc,
+ .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HAVE_GC,
+ .tp_doc = CIMultDictProxy_doc,
+ .tp_traverse = (traverseproc)multidict_proxy_tp_traverse,
+ .tp_clear = (inquiry)multidict_proxy_tp_clear,
+ .tp_richcompare = (richcmpfunc)multidict_proxy_tp_richcompare,
+ .tp_weaklistoffset = offsetof(MultiDictProxyObject, weaklist),
+ .tp_methods = cimultidict_proxy_methods,
+ .tp_base = &multidict_proxy_type,
+ .tp_init = (initproc)cimultidict_proxy_tp_init,
+ .tp_alloc = PyType_GenericAlloc,
+ .tp_new = PyType_GenericNew,
+ .tp_free = PyObject_GC_Del,
+};
+
+/******************** Other functions ********************/
+
+static inline PyObject *
+getversion(PyObject *self, PyObject *md)
+{
+ pair_list_t *pairs = NULL;
+ if (MultiDict_CheckExact(md) || CIMultiDict_CheckExact(md)) {
+ pairs = &((MultiDictObject*)md)->pairs;
+ } else if (MultiDictProxy_CheckExact(md) || CIMultiDictProxy_CheckExact(md)) {
+ pairs = &((MultiDictProxyObject*)md)->md->pairs;
+ } else {
+ PyErr_Format(PyExc_TypeError, "unexpected type");
+ return NULL;
+ }
+ return PyLong_FromUnsignedLong(pair_list_version(pairs));
+}
+
+/******************** Module ********************/
+
+static inline void
+module_free(void *m)
+{
+ Py_CLEAR(collections_abc_mapping);
+ Py_CLEAR(collections_abc_mut_mapping);
+ Py_CLEAR(collections_abc_mut_multi_mapping);
+}
+
+static PyMethodDef multidict_module_methods[] = {
+ {
+ "getversion",
+ (PyCFunction)getversion,
+ METH_O
+ },
+ {
+ NULL,
+ NULL
+ } /* sentinel */
+};
+
+static PyModuleDef multidict_module = {
+ PyModuleDef_HEAD_INIT, /* m_base */
+ "_multidict", /* m_name */
+ .m_size = -1,
+ .m_methods = multidict_module_methods,
+ .m_free = (freefunc)module_free,
+};
+
+PyMODINIT_FUNC
+PyInit__multidict()
+{
+ PyObject *module = NULL,
+ *reg_func_call_result = NULL;
+
+#define WITH_MOD(NAME) \
+ Py_CLEAR(module); \
+ module = PyImport_ImportModule(NAME); \
+ if (module == NULL) { \
+ goto fail; \
+ }
+
+#define GET_MOD_ATTR(VAR, NAME) \
+ VAR = PyObject_GetAttrString(module, NAME); \
+ if (VAR == NULL) { \
+ goto fail; \
+ }
+
+ if (multidict_views_init() < 0) {
+ goto fail;
+ }
+
+ if (multidict_iter_init() < 0) {
+ goto fail;
+ }
+
+ if (istr_init() < 0) {
+ goto fail;
+ }
+
+ if (PyType_Ready(&multidict_type) < 0 ||
+ PyType_Ready(&cimultidict_type) < 0 ||
+ PyType_Ready(&multidict_proxy_type) < 0 ||
+ PyType_Ready(&cimultidict_proxy_type) < 0)
+ {
+ goto fail;
+ }
+
+ WITH_MOD("collections.abc");
+ GET_MOD_ATTR(collections_abc_mapping, "Mapping");
+
+ WITH_MOD("multidict._abc");
+ GET_MOD_ATTR(collections_abc_mut_mapping, "MultiMapping");
+
+ WITH_MOD("multidict._abc");
+ GET_MOD_ATTR(collections_abc_mut_multi_mapping, "MutableMultiMapping");
+
+ WITH_MOD("multidict._multidict_base");
+ GET_MOD_ATTR(repr_func, "_mdrepr");
+
+ /* Register in _abc mappings (CI)MultiDict and (CI)MultiDictProxy */
+ reg_func_call_result = PyObject_CallMethod(
+ collections_abc_mut_mapping,
+ "register", "O",
+ (PyObject*)&multidict_proxy_type
+ );
+ if (reg_func_call_result == NULL) {
+ goto fail;
+ }
+ Py_DECREF(reg_func_call_result);
+
+ reg_func_call_result = PyObject_CallMethod(
+ collections_abc_mut_mapping,
+ "register", "O",
+ (PyObject*)&cimultidict_proxy_type
+ );
+ if (reg_func_call_result == NULL) {
+ goto fail;
+ }
+ Py_DECREF(reg_func_call_result);
+
+ reg_func_call_result = PyObject_CallMethod(
+ collections_abc_mut_multi_mapping,
+ "register", "O",
+ (PyObject*)&multidict_type
+ );
+ if (reg_func_call_result == NULL) {
+ goto fail;
+ }
+ Py_DECREF(reg_func_call_result);
+
+ reg_func_call_result = PyObject_CallMethod(
+ collections_abc_mut_multi_mapping,
+ "register", "O",
+ (PyObject*)&cimultidict_type
+ );
+ if (reg_func_call_result == NULL) {
+ goto fail;
+ }
+ Py_DECREF(reg_func_call_result);
+
+ /* Instantiate this module */
+ module = PyModule_Create(&multidict_module);
+
+ Py_INCREF(&istr_type);
+ if (PyModule_AddObject(
+ module, "istr", (PyObject*)&istr_type) < 0)
+ {
+ goto fail;
+ }
+
+ Py_INCREF(&multidict_type);
+ if (PyModule_AddObject(
+ module, "MultiDict", (PyObject*)&multidict_type) < 0)
+ {
+ goto fail;
+ }
+
+ Py_INCREF(&cimultidict_type);
+ if (PyModule_AddObject(
+ module, "CIMultiDict", (PyObject*)&cimultidict_type) < 0)
+ {
+ goto fail;
+ }
+
+ Py_INCREF(&multidict_proxy_type);
+ if (PyModule_AddObject(
+ module, "MultiDictProxy", (PyObject*)&multidict_proxy_type) < 0)
+ {
+ goto fail;
+ }
+
+ Py_INCREF(&cimultidict_proxy_type);
+ if (PyModule_AddObject(
+ module, "CIMultiDictProxy", (PyObject*)&cimultidict_proxy_type) < 0)
+ {
+ goto fail;
+ }
+
+ return module;
+
+fail:
+ Py_XDECREF(collections_abc_mapping);
+ Py_XDECREF(collections_abc_mut_mapping);
+ Py_XDECREF(collections_abc_mut_multi_mapping);
+
+ return NULL;
+
+#undef WITH_MOD
+#undef GET_MOD_ATTR
+}
diff --git a/third_party/python/multidict/multidict/_multidict_base.py b/third_party/python/multidict/multidict/_multidict_base.py
new file mode 100644
index 0000000000..394466548c
--- /dev/null
+++ b/third_party/python/multidict/multidict/_multidict_base.py
@@ -0,0 +1,144 @@
+from collections.abc import ItemsView, Iterable, KeysView, Set, ValuesView
+
+
+def _abc_itemsview_register(view_cls):
+ ItemsView.register(view_cls)
+
+
+def _abc_keysview_register(view_cls):
+ KeysView.register(view_cls)
+
+
+def _abc_valuesview_register(view_cls):
+ ValuesView.register(view_cls)
+
+
+def _viewbaseset_richcmp(view, other, op):
+ if op == 0: # <
+ if not isinstance(other, Set):
+ return NotImplemented
+ return len(view) < len(other) and view <= other
+ elif op == 1: # <=
+ if not isinstance(other, Set):
+ return NotImplemented
+ if len(view) > len(other):
+ return False
+ for elem in view:
+ if elem not in other:
+ return False
+ return True
+ elif op == 2: # ==
+ if not isinstance(other, Set):
+ return NotImplemented
+ return len(view) == len(other) and view <= other
+ elif op == 3: # !=
+ return not view == other
+ elif op == 4: # >
+ if not isinstance(other, Set):
+ return NotImplemented
+ return len(view) > len(other) and view >= other
+ elif op == 5: # >=
+ if not isinstance(other, Set):
+ return NotImplemented
+ if len(view) < len(other):
+ return False
+ for elem in other:
+ if elem not in view:
+ return False
+ return True
+
+
+def _viewbaseset_and(view, other):
+ if not isinstance(other, Iterable):
+ return NotImplemented
+ if isinstance(view, Set):
+ view = set(iter(view))
+ if isinstance(other, Set):
+ other = set(iter(other))
+ if not isinstance(other, Set):
+ other = set(iter(other))
+ return view & other
+
+
+def _viewbaseset_or(view, other):
+ if not isinstance(other, Iterable):
+ return NotImplemented
+ if isinstance(view, Set):
+ view = set(iter(view))
+ if isinstance(other, Set):
+ other = set(iter(other))
+ if not isinstance(other, Set):
+ other = set(iter(other))
+ return view | other
+
+
+def _viewbaseset_sub(view, other):
+ if not isinstance(other, Iterable):
+ return NotImplemented
+ if isinstance(view, Set):
+ view = set(iter(view))
+ if isinstance(other, Set):
+ other = set(iter(other))
+ if not isinstance(other, Set):
+ other = set(iter(other))
+ return view - other
+
+
+def _viewbaseset_xor(view, other):
+ if not isinstance(other, Iterable):
+ return NotImplemented
+ if isinstance(view, Set):
+ view = set(iter(view))
+ if isinstance(other, Set):
+ other = set(iter(other))
+ if not isinstance(other, Set):
+ other = set(iter(other))
+ return view ^ other
+
+
+def _itemsview_isdisjoint(view, other):
+ "Return True if two sets have a null intersection."
+ for v in other:
+ if v in view:
+ return False
+ return True
+
+
+def _itemsview_repr(view):
+ lst = []
+ for k, v in view:
+ lst.append("{!r}: {!r}".format(k, v))
+ body = ", ".join(lst)
+ return "{}({})".format(view.__class__.__name__, body)
+
+
+def _keysview_isdisjoint(view, other):
+ "Return True if two sets have a null intersection."
+ for k in other:
+ if k in view:
+ return False
+ return True
+
+
+def _keysview_repr(view):
+ lst = []
+ for k in view:
+ lst.append("{!r}".format(k))
+ body = ", ".join(lst)
+ return "{}({})".format(view.__class__.__name__, body)
+
+
+def _valuesview_repr(view):
+ lst = []
+ for v in view:
+ lst.append("{!r}".format(v))
+ body = ", ".join(lst)
+ return "{}({})".format(view.__class__.__name__, body)
+
+
+def _mdrepr(md):
+ lst = []
+ for k, v in md.items():
+ lst.append("'{}': {!r}".format(k, v))
+ body = ", ".join(lst)
+ return "<{}({})>".format(md.__class__.__name__, body)
diff --git a/third_party/python/multidict/multidict/_multidict_py.py b/third_party/python/multidict/multidict/_multidict_py.py
new file mode 100644
index 0000000000..1ec63da0d5
--- /dev/null
+++ b/third_party/python/multidict/multidict/_multidict_py.py
@@ -0,0 +1,515 @@
+import sys
+from array import array
+from collections import abc
+
+from ._abc import MultiMapping, MutableMultiMapping
+
+_marker = object()
+
+
+class istr(str):
+
+ """Case insensitive str."""
+
+ __is_istr__ = True
+
+
+upstr = istr # for relaxing backward compatibility problems
+
+
+def getversion(md):
+ if not isinstance(md, _Base):
+ raise TypeError("Parameter should be multidict or proxy")
+ return md._impl._version
+
+
+_version = array("Q", [0])
+
+
+class _Impl:
+ __slots__ = ("_items", "_version")
+
+ def __init__(self):
+ self._items = []
+ self.incr_version()
+
+ def incr_version(self):
+ global _version
+ v = _version
+ v[0] += 1
+ self._version = v[0]
+
+ if sys.implementation.name != "pypy":
+
+ def __sizeof__(self):
+ return object.__sizeof__(self) + sys.getsizeof(self._items)
+
+
+class _Base:
+ def _title(self, key):
+ return key
+
+ def getall(self, key, default=_marker):
+ """Return a list of all values matching the key."""
+ identity = self._title(key)
+ res = [v for i, k, v in self._impl._items if i == identity]
+ if res:
+ return res
+ if not res and default is not _marker:
+ return default
+ raise KeyError("Key not found: %r" % key)
+
+ def getone(self, key, default=_marker):
+ """Get first value matching the key."""
+ identity = self._title(key)
+ for i, k, v in self._impl._items:
+ if i == identity:
+ return v
+ if default is not _marker:
+ return default
+ raise KeyError("Key not found: %r" % key)
+
+ # Mapping interface #
+
+ def __getitem__(self, key):
+ return self.getone(key)
+
+ def get(self, key, default=None):
+ """Get first value matching the key.
+
+ The method is alias for .getone().
+ """
+ return self.getone(key, default)
+
+ def __iter__(self):
+ return iter(self.keys())
+
+ def __len__(self):
+ return len(self._impl._items)
+
+ def keys(self):
+ """Return a new view of the dictionary's keys."""
+ return _KeysView(self._impl)
+
+ def items(self):
+ """Return a new view of the dictionary's items *(key, value) pairs)."""
+ return _ItemsView(self._impl)
+
+ def values(self):
+ """Return a new view of the dictionary's values."""
+ return _ValuesView(self._impl)
+
+ def __eq__(self, other):
+ if not isinstance(other, abc.Mapping):
+ return NotImplemented
+ if isinstance(other, _Base):
+ lft = self._impl._items
+ rht = other._impl._items
+ if len(lft) != len(rht):
+ return False
+ for (i1, k2, v1), (i2, k2, v2) in zip(lft, rht):
+ if i1 != i2 or v1 != v2:
+ return False
+ return True
+ if len(self._impl._items) != len(other):
+ return False
+ for k, v in self.items():
+ nv = other.get(k, _marker)
+ if v != nv:
+ return False
+ return True
+
+ def __contains__(self, key):
+ identity = self._title(key)
+ for i, k, v in self._impl._items:
+ if i == identity:
+ return True
+ return False
+
+ def __repr__(self):
+ body = ", ".join("'{}': {!r}".format(k, v) for k, v in self.items())
+ return "<{}({})>".format(self.__class__.__name__, body)
+
+
+class MultiDictProxy(_Base, MultiMapping):
+ """Read-only proxy for MultiDict instance."""
+
+ def __init__(self, arg):
+ if not isinstance(arg, (MultiDict, MultiDictProxy)):
+ raise TypeError(
+ "ctor requires MultiDict or MultiDictProxy instance"
+ ", not {}".format(type(arg))
+ )
+
+ self._impl = arg._impl
+
+ def __reduce__(self):
+ raise TypeError("can't pickle {} objects".format(self.__class__.__name__))
+
+ def copy(self):
+ """Return a copy of itself."""
+ return MultiDict(self.items())
+
+
+class CIMultiDictProxy(MultiDictProxy):
+ """Read-only proxy for CIMultiDict instance."""
+
+ def __init__(self, arg):
+ if not isinstance(arg, (CIMultiDict, CIMultiDictProxy)):
+ raise TypeError(
+ "ctor requires CIMultiDict or CIMultiDictProxy instance"
+ ", not {}".format(type(arg))
+ )
+
+ self._impl = arg._impl
+
+ def _title(self, key):
+ return key.title()
+
+ def copy(self):
+ """Return a copy of itself."""
+ return CIMultiDict(self.items())
+
+
+class MultiDict(_Base, MutableMultiMapping):
+ """Dictionary with the support for duplicate keys."""
+
+ def __init__(self, *args, **kwargs):
+ self._impl = _Impl()
+
+ self._extend(args, kwargs, self.__class__.__name__, self._extend_items)
+
+ if sys.implementation.name != "pypy":
+
+ def __sizeof__(self):
+ return object.__sizeof__(self) + sys.getsizeof(self._impl)
+
+ def __reduce__(self):
+ return (self.__class__, (list(self.items()),))
+
+ def _title(self, key):
+ return key
+
+ def _key(self, key):
+ if isinstance(key, str):
+ return key
+ else:
+ raise TypeError(
+ "MultiDict keys should be either str " "or subclasses of str"
+ )
+
+ def add(self, key, value):
+ identity = self._title(key)
+ self._impl._items.append((identity, self._key(key), value))
+ self._impl.incr_version()
+
+ def copy(self):
+ """Return a copy of itself."""
+ cls = self.__class__
+ return cls(self.items())
+
+ __copy__ = copy
+
+ def extend(self, *args, **kwargs):
+ """Extend current MultiDict with more values.
+
+ This method must be used instead of update.
+ """
+ self._extend(args, kwargs, "extend", self._extend_items)
+
+ def _extend(self, args, kwargs, name, method):
+ if len(args) > 1:
+ raise TypeError(
+ "{} takes at most 1 positional argument"
+ " ({} given)".format(name, len(args))
+ )
+ if args:
+ arg = args[0]
+ if isinstance(args[0], (MultiDict, MultiDictProxy)) and not kwargs:
+ items = arg._impl._items
+ else:
+ if hasattr(arg, "items"):
+ arg = arg.items()
+ if kwargs:
+ arg = list(arg)
+ arg.extend(list(kwargs.items()))
+ items = []
+ for item in arg:
+ if not len(item) == 2:
+ raise TypeError(
+ "{} takes either dict or list of (key, value) "
+ "tuples".format(name)
+ )
+ items.append((self._title(item[0]), self._key(item[0]), item[1]))
+
+ method(items)
+ else:
+ method(
+ [
+ (self._title(key), self._key(key), value)
+ for key, value in kwargs.items()
+ ]
+ )
+
+ def _extend_items(self, items):
+ for identity, key, value in items:
+ self.add(key, value)
+
+ def clear(self):
+ """Remove all items from MultiDict."""
+ self._impl._items.clear()
+ self._impl.incr_version()
+
+ # Mapping interface #
+
+ def __setitem__(self, key, value):
+ self._replace(key, value)
+
+ def __delitem__(self, key):
+ identity = self._title(key)
+ items = self._impl._items
+ found = False
+ for i in range(len(items) - 1, -1, -1):
+ if items[i][0] == identity:
+ del items[i]
+ found = True
+ if not found:
+ raise KeyError(key)
+ else:
+ self._impl.incr_version()
+
+ def setdefault(self, key, default=None):
+ """Return value for key, set value to default if key is not present."""
+ identity = self._title(key)
+ for i, k, v in self._impl._items:
+ if i == identity:
+ return v
+ self.add(key, default)
+ return default
+
+ def popone(self, key, default=_marker):
+ """Remove specified key and return the corresponding value.
+
+ If key is not found, d is returned if given, otherwise
+ KeyError is raised.
+
+ """
+ identity = self._title(key)
+ for i in range(len(self._impl._items)):
+ if self._impl._items[i][0] == identity:
+ value = self._impl._items[i][2]
+ del self._impl._items[i]
+ self._impl.incr_version()
+ return value
+ if default is _marker:
+ raise KeyError(key)
+ else:
+ return default
+
+ pop = popone # type: ignore
+
+ def popall(self, key, default=_marker):
+ """Remove all occurrences of key and return the list of corresponding
+ values.
+
+ If key is not found, default is returned if given, otherwise
+ KeyError is raised.
+
+ """
+ found = False
+ identity = self._title(key)
+ ret = []
+ for i in range(len(self._impl._items) - 1, -1, -1):
+ item = self._impl._items[i]
+ if item[0] == identity:
+ ret.append(item[2])
+ del self._impl._items[i]
+ self._impl.incr_version()
+ found = True
+ if not found:
+ if default is _marker:
+ raise KeyError(key)
+ else:
+ return default
+ else:
+ ret.reverse()
+ return ret
+
+ def popitem(self):
+ """Remove and return an arbitrary (key, value) pair."""
+ if self._impl._items:
+ i = self._impl._items.pop(0)
+ self._impl.incr_version()
+ return i[1], i[2]
+ else:
+ raise KeyError("empty multidict")
+
+ def update(self, *args, **kwargs):
+ """Update the dictionary from *other*, overwriting existing keys."""
+ self._extend(args, kwargs, "update", self._update_items)
+
+ def _update_items(self, items):
+ if not items:
+ return
+ used_keys = {}
+ for identity, key, value in items:
+ start = used_keys.get(identity, 0)
+ for i in range(start, len(self._impl._items)):
+ item = self._impl._items[i]
+ if item[0] == identity:
+ used_keys[identity] = i + 1
+ self._impl._items[i] = (identity, key, value)
+ break
+ else:
+ self._impl._items.append((identity, key, value))
+ used_keys[identity] = len(self._impl._items)
+
+ # drop tails
+ i = 0
+ while i < len(self._impl._items):
+ item = self._impl._items[i]
+ identity = item[0]
+ pos = used_keys.get(identity)
+ if pos is None:
+ i += 1
+ continue
+ if i >= pos:
+ del self._impl._items[i]
+ else:
+ i += 1
+
+ self._impl.incr_version()
+
+ def _replace(self, key, value):
+ key = self._key(key)
+ identity = self._title(key)
+ items = self._impl._items
+
+ for i in range(len(items)):
+ item = items[i]
+ if item[0] == identity:
+ items[i] = (identity, key, value)
+ # i points to last found item
+ rgt = i
+ self._impl.incr_version()
+ break
+ else:
+ self._impl._items.append((identity, key, value))
+ self._impl.incr_version()
+ return
+
+ # remove all tail items
+ i = rgt + 1
+ while i < len(items):
+ item = items[i]
+ if item[0] == identity:
+ del items[i]
+ else:
+ i += 1
+
+
+class CIMultiDict(MultiDict):
+ """Dictionary with the support for duplicate case-insensitive keys."""
+
+ def _title(self, key):
+ return key.title()
+
+
+class _Iter:
+ __slots__ = ("_size", "_iter")
+
+ def __init__(self, size, iterator):
+ self._size = size
+ self._iter = iterator
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ return next(self._iter)
+
+ def __length_hint__(self):
+ return self._size
+
+
+class _ViewBase:
+ def __init__(self, impl):
+ self._impl = impl
+ self._version = impl._version
+
+ def __len__(self):
+ return len(self._impl._items)
+
+
+class _ItemsView(_ViewBase, abc.ItemsView):
+ def __contains__(self, item):
+ assert isinstance(item, tuple) or isinstance(item, list)
+ assert len(item) == 2
+ for i, k, v in self._impl._items:
+ if item[0] == k and item[1] == v:
+ return True
+ return False
+
+ def __iter__(self):
+ return _Iter(len(self), self._iter())
+
+ def _iter(self):
+ for i, k, v in self._impl._items:
+ if self._version != self._impl._version:
+ raise RuntimeError("Dictionary changed during iteration")
+ yield k, v
+
+ def __repr__(self):
+ lst = []
+ for item in self._impl._items:
+ lst.append("{!r}: {!r}".format(item[1], item[2]))
+ body = ", ".join(lst)
+ return "{}({})".format(self.__class__.__name__, body)
+
+
+class _ValuesView(_ViewBase, abc.ValuesView):
+ def __contains__(self, value):
+ for item in self._impl._items:
+ if item[2] == value:
+ return True
+ return False
+
+ def __iter__(self):
+ return _Iter(len(self), self._iter())
+
+ def _iter(self):
+ for item in self._impl._items:
+ if self._version != self._impl._version:
+ raise RuntimeError("Dictionary changed during iteration")
+ yield item[2]
+
+ def __repr__(self):
+ lst = []
+ for item in self._impl._items:
+ lst.append("{!r}".format(item[2]))
+ body = ", ".join(lst)
+ return "{}({})".format(self.__class__.__name__, body)
+
+
+class _KeysView(_ViewBase, abc.KeysView):
+ def __contains__(self, key):
+ for item in self._impl._items:
+ if item[1] == key:
+ return True
+ return False
+
+ def __iter__(self):
+ return _Iter(len(self), self._iter())
+
+ def _iter(self):
+ for item in self._impl._items:
+ if self._version != self._impl._version:
+ raise RuntimeError("Dictionary changed during iteration")
+ yield item[1]
+
+ def __repr__(self):
+ lst = []
+ for item in self._impl._items:
+ lst.append("{!r}".format(item[1]))
+ body = ", ".join(lst)
+ return "{}({})".format(self.__class__.__name__, body)
diff --git a/third_party/python/multidict/multidict/_multilib/defs.h b/third_party/python/multidict/multidict/_multilib/defs.h
new file mode 100644
index 0000000000..c7027c817e
--- /dev/null
+++ b/third_party/python/multidict/multidict/_multilib/defs.h
@@ -0,0 +1,22 @@
+#ifndef _MULTIDICT_DEFS_H
+#define _MULTIDICT_DEFS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+_Py_IDENTIFIER(lower);
+
+/* We link this module statically for convenience. If compiled as a shared
+ library instead, some compilers don't allow addresses of Python objects
+ defined in other libraries to be used in static initializers here. The
+ DEFERRED_ADDRESS macro is used to tag the slots where such addresses
+ appear; the module init function must fill in the tagged slots at runtime.
+ The argument is for documentation -- the macro ignores it.
+*/
+#define DEFERRED_ADDRESS(ADDR) 0
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/third_party/python/multidict/multidict/_multilib/dict.h b/third_party/python/multidict/multidict/_multilib/dict.h
new file mode 100644
index 0000000000..3caf83e5b4
--- /dev/null
+++ b/third_party/python/multidict/multidict/_multilib/dict.h
@@ -0,0 +1,24 @@
+#ifndef _MULTIDICT_C_H
+#define _MULTIDICT_C_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct { // 16 or 24 for GC prefix
+ PyObject_HEAD // 16
+ PyObject *weaklist;
+ pair_list_t pairs;
+} MultiDictObject;
+
+typedef struct {
+ PyObject_HEAD
+ PyObject *weaklist;
+ MultiDictObject *md;
+} MultiDictProxyObject;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/third_party/python/multidict/multidict/_multilib/istr.h b/third_party/python/multidict/multidict/_multilib/istr.h
new file mode 100644
index 0000000000..2688f48914
--- /dev/null
+++ b/third_party/python/multidict/multidict/_multilib/istr.h
@@ -0,0 +1,85 @@
+#ifndef _MULTIDICT_ISTR_H
+#define _MULTIDICT_ISTR_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct {
+ PyUnicodeObject str;
+ PyObject * canonical;
+} istrobject;
+
+PyDoc_STRVAR(istr__doc__, "istr class implementation");
+
+static PyTypeObject istr_type;
+
+static inline void
+istr_dealloc(istrobject *self)
+{
+ Py_XDECREF(self->canonical);
+ PyUnicode_Type.tp_dealloc((PyObject*)self);
+}
+
+static inline PyObject *
+istr_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
+{
+ PyObject *x = NULL;
+ static char *kwlist[] = {"object", "encoding", "errors", 0};
+ PyObject *encoding = NULL;
+ PyObject *errors = NULL;
+ PyObject *s = NULL;
+ PyObject * ret = NULL;
+
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOO:str",
+ kwlist, &x, &encoding, &errors)) {
+ return NULL;
+ }
+ if (x != NULL && Py_TYPE(x) == &istr_type) {
+ Py_INCREF(x);
+ return x;
+ }
+ ret = PyUnicode_Type.tp_new(type, args, kwds);
+ if (!ret) {
+ goto fail;
+ }
+ s =_PyObject_CallMethodId(ret, &PyId_lower, NULL);
+ if (!s) {
+ goto fail;
+ }
+ ((istrobject*)ret)->canonical = s;
+ s = NULL; /* the reference is stollen by .canonical */
+ return ret;
+fail:
+ Py_XDECREF(ret);
+ return NULL;
+}
+
+static PyTypeObject istr_type = {
+ PyVarObject_HEAD_INIT(DEFERRED_ADDRESS(&PyType_Type), 0)
+ "multidict._multidict.istr",
+ sizeof(istrobject),
+ .tp_dealloc = (destructor)istr_dealloc,
+ .tp_flags = Py_TPFLAGS_DEFAULT
+ | Py_TPFLAGS_BASETYPE
+ | Py_TPFLAGS_UNICODE_SUBCLASS,
+ .tp_doc = istr__doc__,
+ .tp_base = DEFERRED_ADDRESS(&PyUnicode_Type),
+ .tp_new = (newfunc)istr_new,
+};
+
+
+static inline int
+istr_init(void)
+{
+ istr_type.tp_base = &PyUnicode_Type;
+ if (PyType_Ready(&istr_type) < 0) {
+ return -1;
+ }
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/third_party/python/multidict/multidict/_multilib/iter.h b/third_party/python/multidict/multidict/_multilib/iter.h
new file mode 100644
index 0000000000..4e2e32b387
--- /dev/null
+++ b/third_party/python/multidict/multidict/_multilib/iter.h
@@ -0,0 +1,238 @@
+#ifndef _MULTIDICT_ITER_H
+#define _MULTIDICT_ITER_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static PyTypeObject multidict_items_iter_type;
+static PyTypeObject multidict_values_iter_type;
+static PyTypeObject multidict_keys_iter_type;
+
+typedef struct multidict_iter {
+ PyObject_HEAD
+ MultiDictObject *md; // MultiDict or CIMultiDict
+ Py_ssize_t current;
+ uint64_t version;
+} MultidictIter;
+
+static inline void
+_init_iter(MultidictIter *it, MultiDictObject *md)
+{
+ Py_INCREF(md);
+
+ it->md = md;
+ it->current = 0;
+ it->version = pair_list_version(&md->pairs);
+}
+
+static inline PyObject *
+multidict_items_iter_new(MultiDictObject *md)
+{
+ MultidictIter *it = PyObject_GC_New(
+ MultidictIter, &multidict_items_iter_type);
+ if (it == NULL) {
+ return NULL;
+ }
+
+ _init_iter(it, md);
+
+ PyObject_GC_Track(it);
+ return (PyObject *)it;
+}
+
+static inline PyObject *
+multidict_keys_iter_new(MultiDictObject *md)
+{
+ MultidictIter *it = PyObject_GC_New(
+ MultidictIter, &multidict_keys_iter_type);
+ if (it == NULL) {
+ return NULL;
+ }
+
+ _init_iter(it, md);
+
+ PyObject_GC_Track(it);
+ return (PyObject *)it;
+}
+
+static inline PyObject *
+multidict_values_iter_new(MultiDictObject *md)
+{
+ MultidictIter *it = PyObject_GC_New(
+ MultidictIter, &multidict_values_iter_type);
+ if (it == NULL) {
+ return NULL;
+ }
+
+ _init_iter(it, md);
+
+ PyObject_GC_Track(it);
+ return (PyObject *)it;
+}
+
+static inline PyObject *
+multidict_items_iter_iternext(MultidictIter *self)
+{
+ PyObject *key = NULL;
+ PyObject *value = NULL;
+ PyObject *ret = NULL;
+
+ if (self->version != pair_list_version(&self->md->pairs)) {
+ PyErr_SetString(PyExc_RuntimeError, "Dictionary changed during iteration");
+ return NULL;
+ }
+
+ if (!_pair_list_next(&self->md->pairs, &self->current, NULL, &key, &value, NULL)) {
+ PyErr_SetNone(PyExc_StopIteration);
+ return NULL;
+ }
+
+ ret = PyTuple_Pack(2, key, value);
+ if (ret == NULL) {
+ return NULL;
+ }
+
+ return ret;
+}
+
+static inline PyObject *
+multidict_values_iter_iternext(MultidictIter *self)
+{
+ PyObject *value = NULL;
+
+ if (self->version != pair_list_version(&self->md->pairs)) {
+ PyErr_SetString(PyExc_RuntimeError, "Dictionary changed during iteration");
+ return NULL;
+ }
+
+ if (!pair_list_next(&self->md->pairs, &self->current, NULL, NULL, &value)) {
+ PyErr_SetNone(PyExc_StopIteration);
+ return NULL;
+ }
+
+ Py_INCREF(value);
+
+ return value;
+}
+
+static inline PyObject *
+multidict_keys_iter_iternext(MultidictIter *self)
+{
+ PyObject *key = NULL;
+
+ if (self->version != pair_list_version(&self->md->pairs)) {
+ PyErr_SetString(PyExc_RuntimeError, "Dictionary changed during iteration");
+ return NULL;
+ }
+
+ if (!pair_list_next(&self->md->pairs, &self->current, NULL, &key, NULL)) {
+ PyErr_SetNone(PyExc_StopIteration);
+ return NULL;
+ }
+
+ Py_INCREF(key);
+
+ return key;
+}
+
+static inline void
+multidict_iter_dealloc(MultidictIter *self)
+{
+ PyObject_GC_UnTrack(self);
+ Py_XDECREF(self->md);
+ PyObject_GC_Del(self);
+}
+
+static inline int
+multidict_iter_traverse(MultidictIter *self, visitproc visit, void *arg)
+{
+ Py_VISIT(self->md);
+ return 0;
+}
+
+static inline int
+multidict_iter_clear(MultidictIter *self)
+{
+ Py_CLEAR(self->md);
+ return 0;
+}
+
+static inline PyObject *
+multidict_iter_len(MultidictIter *self)
+{
+ return PyLong_FromLong(pair_list_len(&self->md->pairs));
+}
+
+PyDoc_STRVAR(length_hint_doc,
+ "Private method returning an estimate of len(list(it)).");
+
+static PyMethodDef multidict_iter_methods[] = {
+ {
+ "__length_hint__",
+ (PyCFunction)(void(*)(void))multidict_iter_len,
+ METH_NOARGS,
+ length_hint_doc
+ },
+ {
+ NULL,
+ NULL
+ } /* sentinel */
+};
+
+/***********************************************************************/
+
+static PyTypeObject multidict_items_iter_type = {
+ PyVarObject_HEAD_INIT(DEFERRED_ADDRESS(&PyType_Type), 0)
+ "multidict._multidict._itemsiter", /* tp_name */
+ sizeof(MultidictIter), /* tp_basicsize */
+ .tp_dealloc = (destructor)multidict_iter_dealloc,
+ .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
+ .tp_traverse = (traverseproc)multidict_iter_traverse,
+ .tp_clear = (inquiry)multidict_iter_clear,
+ .tp_iter = PyObject_SelfIter,
+ .tp_iternext = (iternextfunc)multidict_items_iter_iternext,
+ .tp_methods = multidict_iter_methods,
+};
+
+static PyTypeObject multidict_values_iter_type = {
+ PyVarObject_HEAD_INIT(DEFERRED_ADDRESS(&PyType_Type), 0)
+ "multidict._multidict._valuesiter", /* tp_name */
+ sizeof(MultidictIter), /* tp_basicsize */
+ .tp_dealloc = (destructor)multidict_iter_dealloc,
+ .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
+ .tp_traverse = (traverseproc)multidict_iter_traverse,
+ .tp_clear = (inquiry)multidict_iter_clear,
+ .tp_iter = PyObject_SelfIter,
+ .tp_iternext = (iternextfunc)multidict_values_iter_iternext,
+ .tp_methods = multidict_iter_methods,
+};
+
+static PyTypeObject multidict_keys_iter_type = {
+ PyVarObject_HEAD_INIT(DEFERRED_ADDRESS(&PyType_Type), 0)
+ "multidict._multidict._keysiter", /* tp_name */
+ sizeof(MultidictIter), /* tp_basicsize */
+ .tp_dealloc = (destructor)multidict_iter_dealloc,
+ .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
+ .tp_traverse = (traverseproc)multidict_iter_traverse,
+ .tp_clear = (inquiry)multidict_iter_clear,
+ .tp_iter = PyObject_SelfIter,
+ .tp_iternext = (iternextfunc)multidict_keys_iter_iternext,
+ .tp_methods = multidict_iter_methods,
+};
+
+static inline int
+multidict_iter_init()
+{
+ if (PyType_Ready(&multidict_items_iter_type) < 0 ||
+ PyType_Ready(&multidict_values_iter_type) < 0 ||
+ PyType_Ready(&multidict_keys_iter_type) < 0) {
+ return -1;
+ }
+ return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/third_party/python/multidict/multidict/_multilib/pair_list.h b/third_party/python/multidict/multidict/_multilib/pair_list.h
new file mode 100644
index 0000000000..7eafd215b5
--- /dev/null
+++ b/third_party/python/multidict/multidict/_multilib/pair_list.h
@@ -0,0 +1,1244 @@
+#ifndef _MULTIDICT_PAIR_LIST_H
+#define _MULTIDICT_PAIR_LIST_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <string.h>
+#include <stddef.h>
+#include <stdint.h>
+
+typedef PyObject * (*calc_identity_func)(PyObject *key);
+
+typedef struct pair {
+ PyObject *identity; // 8
+ PyObject *key; // 8
+ PyObject *value; // 8
+ Py_hash_t hash; // 8
+} pair_t;
+
+/* Note about the structure size
+With 29 pairs the MultiDict object size is slightly less than 1KiB
+(1000-1008 bytes depending on Python version,
+plus extra 12 bytes for memory allocator internal structures).
+As the result the max reserved size is 1020 bytes at most.
+
+To fit into 512 bytes, the structure can contain only 13 pairs
+which is too small, e.g. https://www.python.org returns 16 headers
+(9 of them are caching proxy information though).
+
+The embedded buffer intention is to fit the vast majority of possible
+HTTP headers into the buffer without allocating an extra memory block.
+*/
+
+#if (PY_VERSION_HEX < 0x03080000)
+#define EMBEDDED_CAPACITY 28
+#else
+#define EMBEDDED_CAPACITY 29
+#endif
+
+typedef struct pair_list { // 40
+ Py_ssize_t capacity; // 8
+ Py_ssize_t size; // 8
+ uint64_t version; // 8
+ calc_identity_func calc_identity; // 8
+ pair_t *pairs; // 8
+ pair_t buffer[EMBEDDED_CAPACITY];
+} pair_list_t;
+
+#define MIN_CAPACITY 63
+#define CAPACITY_STEP 64
+
+/* Global counter used to set ma_version_tag field of dictionary.
+ * It is incremented each time that a dictionary is created and each
+ * time that a dictionary is modified. */
+static uint64_t pair_list_global_version = 0;
+
+#define NEXT_VERSION() (++pair_list_global_version)
+
+
+static inline int
+str_cmp(PyObject *s1, PyObject *s2)
+{
+ PyObject *ret = PyUnicode_RichCompare(s1, s2, Py_EQ);
+ if (ret == Py_True) {
+ Py_DECREF(ret);
+ return 1;
+ }
+ else if (ret == NULL) {
+ return -1;
+ }
+ else {
+ Py_DECREF(ret);
+ return 0;
+ }
+}
+
+
+static inline PyObject *
+key_to_str(PyObject *key)
+{
+ PyObject *ret;
+ PyTypeObject *type = Py_TYPE(key);
+ if (type == &istr_type) {
+ ret = ((istrobject*)key)->canonical;
+ Py_INCREF(ret);
+ return ret;
+ }
+ if (PyUnicode_CheckExact(key)) {
+ Py_INCREF(key);
+ return key;
+ }
+ if (PyUnicode_Check(key)) {
+ return PyObject_Str(key);
+ }
+ PyErr_SetString(PyExc_TypeError,
+ "MultiDict keys should be either str "
+ "or subclasses of str");
+ return NULL;
+}
+
+
+static inline PyObject *
+ci_key_to_str(PyObject *key)
+{
+ PyObject *ret;
+ PyTypeObject *type = Py_TYPE(key);
+ if (type == &istr_type) {
+ ret = ((istrobject*)key)->canonical;
+ Py_INCREF(ret);
+ return ret;
+ }
+ if (PyUnicode_Check(key)) {
+ return _PyObject_CallMethodId(key, &PyId_lower, NULL);
+ }
+ PyErr_SetString(PyExc_TypeError,
+ "CIMultiDict keys should be either str "
+ "or subclasses of str");
+ return NULL;
+}
+
+static inline pair_t *
+pair_list_get(pair_list_t *list, Py_ssize_t i)
+{
+ pair_t *item = list->pairs + i;
+ return item;
+}
+
+
+static inline int
+pair_list_grow(pair_list_t *list)
+{
+ // Grow by one element if needed
+ Py_ssize_t new_capacity;
+ pair_t *new_pairs;
+
+ if (list->size < list->capacity) {
+ return 0;
+ }
+
+ if (list->pairs == list->buffer) {
+ new_pairs = PyMem_New(pair_t, MIN_CAPACITY);
+ memcpy(new_pairs, list->buffer, (size_t)list->capacity * sizeof(pair_t));
+
+ list->pairs = new_pairs;
+ list->capacity = MIN_CAPACITY;
+ return 0;
+ } else {
+ new_capacity = list->capacity + CAPACITY_STEP;
+ new_pairs = PyMem_Resize(list->pairs, pair_t, (size_t)new_capacity);
+
+ if (NULL == new_pairs) {
+ // Resizing error
+ return -1;
+ }
+
+ list->pairs = new_pairs;
+ list->capacity = new_capacity;
+ return 0;
+ }
+}
+
+
+static inline int
+pair_list_shrink(pair_list_t *list)
+{
+ // Shrink by one element if needed.
+ // Optimization is applied to prevent jitter
+ // (grow-shrink-grow-shrink on adding-removing the single element
+ // when the buffer is full).
+ // To prevent this, the buffer is resized if the size is less than the capacity
+ // by 2*CAPACITY_STEP factor.
+ // The switch back to embedded buffer is never performed for both reasons:
+ // the code simplicity and the jitter prevention.
+
+ pair_t *new_pairs;
+ Py_ssize_t new_capacity;
+
+ if (list->capacity - list->size < 2 * CAPACITY_STEP) {
+ return 0;
+ }
+ new_capacity = list->capacity - CAPACITY_STEP;
+ if (new_capacity < MIN_CAPACITY) {
+ return 0;
+ }
+
+ new_pairs = PyMem_Resize(list->pairs, pair_t, (size_t)new_capacity);
+
+ if (NULL == new_pairs) {
+ // Resizing error
+ return -1;
+ }
+
+ list->pairs = new_pairs;
+ list->capacity = new_capacity;
+
+ return 0;
+}
+
+
+static inline int
+_pair_list_init(pair_list_t *list, calc_identity_func calc_identity)
+{
+ list->pairs = list->buffer;
+ list->capacity = EMBEDDED_CAPACITY;
+ list->size = 0;
+ list->version = NEXT_VERSION();
+ list->calc_identity = calc_identity;
+ return 0;
+}
+
+static inline int
+pair_list_init(pair_list_t *list)
+{
+ return _pair_list_init(list, key_to_str);
+}
+
+
+static inline int
+ci_pair_list_init(pair_list_t *list)
+{
+ return _pair_list_init(list, ci_key_to_str);
+}
+
+
+static inline void
+pair_list_dealloc(pair_list_t *list)
+{
+ pair_t *pair;
+ Py_ssize_t pos;
+
+ for (pos = 0; pos < list->size; pos++) {
+ pair = pair_list_get(list, pos);
+
+ Py_XDECREF(pair->identity);
+ Py_XDECREF(pair->key);
+ Py_XDECREF(pair->value);
+ }
+
+ /*
+ Strictly speaking, resetting size and capacity and
+ assigning pairs to buffer is not necessary.
+ Do it to consistency and idemotency.
+ The cleanup doesn't hurt performance.
+ !!!
+ !!! The buffer deletion is crucial though.
+ !!!
+ */
+ list->size = 0;
+ if (list->pairs != list->buffer) {
+ PyMem_Del(list->pairs);
+ list->pairs = list->buffer;
+ list->capacity = EMBEDDED_CAPACITY;
+ }
+}
+
+
+static inline Py_ssize_t
+pair_list_len(pair_list_t *list)
+{
+ return list->size;
+}
+
+
+static inline int
+_pair_list_add_with_hash(pair_list_t *list,
+ PyObject *identity,
+ PyObject *key,
+ PyObject *value,
+ Py_hash_t hash)
+{
+ pair_t *pair;
+
+ if (pair_list_grow(list) < 0) {
+ return -1;
+ }
+
+ pair = pair_list_get(list, list->size);
+
+ Py_INCREF(identity);
+ pair->identity = identity;
+
+ Py_INCREF(key);
+ pair->key = key;
+
+ Py_INCREF(value);
+ pair->value = value;
+
+ pair->hash = hash;
+
+ list->version = NEXT_VERSION();
+ list->size += 1;
+
+ return 0;
+}
+
+
+static inline int
+pair_list_add(pair_list_t *list,
+ PyObject *key,
+ PyObject *value)
+{
+ Py_hash_t hash;
+ PyObject *identity = NULL;
+ int ret;
+
+ identity = list->calc_identity(key);
+ if (identity == NULL) {
+ goto fail;
+ }
+ hash = PyObject_Hash(identity);
+ if (hash == -1) {
+ goto fail;
+ }
+ ret = _pair_list_add_with_hash(list, identity, key, value, hash);
+ Py_DECREF(identity);
+ return ret;
+fail:
+ Py_XDECREF(identity);
+ return -1;
+}
+
+
+static inline int
+pair_list_del_at(pair_list_t *list, Py_ssize_t pos)
+{
+ // return 1 on success, -1 on failure
+ Py_ssize_t tail;
+ pair_t *pair;
+
+ pair = pair_list_get(list, pos);
+ Py_DECREF(pair->identity);
+ Py_DECREF(pair->key);
+ Py_DECREF(pair->value);
+
+ list->size -= 1;
+ list->version = NEXT_VERSION();
+
+ if (list->size == pos) {
+ // remove from tail, no need to shift body
+ return 0;
+ }
+
+ tail = list->size - pos;
+ // TODO: raise an error if tail < 0
+ memmove((void *)pair_list_get(list, pos),
+ (void *)pair_list_get(list, pos + 1),
+ sizeof(pair_t) * (size_t)tail);
+
+ return pair_list_shrink(list);
+}
+
+
+static inline int
+_pair_list_drop_tail(pair_list_t *list, PyObject *identity, Py_hash_t hash,
+ Py_ssize_t pos)
+{
+ // return 1 if deleted, 0 if not found
+ pair_t *pair;
+ int ret;
+ int found = 0;
+
+ if (pos >= list->size) {
+ return 0;
+ }
+
+ for (; pos < list->size; pos++) {
+ pair = pair_list_get(list, pos);
+ if (pair->hash != hash) {
+ continue;
+ }
+ ret = str_cmp(pair->identity, identity);
+ if (ret > 0) {
+ if (pair_list_del_at(list, pos) < 0) {
+ return -1;
+ }
+ found = 1;
+ pos--;
+ }
+ else if (ret == -1) {
+ return -1;
+ }
+ }
+
+ return found;
+}
+
+static inline int
+_pair_list_del_hash(pair_list_t *list, PyObject *identity,
+ PyObject *key, Py_hash_t hash)
+{
+ int ret = _pair_list_drop_tail(list, identity, hash, 0);
+
+ if (ret < 0) {
+ return -1;
+ }
+ else if (ret == 0) {
+ PyErr_SetObject(PyExc_KeyError, key);
+ return -1;
+ }
+ else {
+ list->version = NEXT_VERSION();
+ return 0;
+ }
+}
+
+
+static inline int
+pair_list_del(pair_list_t *list, PyObject *key)
+{
+ PyObject *identity = NULL;
+ Py_hash_t hash;
+ int ret;
+
+ identity = list->calc_identity(key);
+ if (identity == NULL) {
+ goto fail;
+ }
+
+ hash = PyObject_Hash(identity);
+ if (hash == -1) {
+ goto fail;
+ }
+
+ ret = _pair_list_del_hash(list, identity, key, hash);
+ Py_DECREF(identity);
+ return ret;
+fail:
+ Py_XDECREF(identity);
+ return -1;
+}
+
+
+static inline uint64_t
+pair_list_version(pair_list_t *list)
+{
+ return list->version;
+}
+
+
+static inline int
+_pair_list_next(pair_list_t *list, Py_ssize_t *ppos, PyObject **pidentity,
+ PyObject **pkey, PyObject **pvalue, Py_hash_t *phash)
+{
+ pair_t *pair;
+
+ if (*ppos >= list->size) {
+ return 0;
+ }
+
+ pair = pair_list_get(list, *ppos);
+
+ if (pidentity) {
+ *pidentity = pair->identity;
+ }
+ if (pkey) {
+ *pkey = pair->key;
+ }
+ if (pvalue) {
+ *pvalue = pair->value;
+ }
+ if (phash) {
+ *phash = pair->hash;
+ }
+
+ *ppos += 1;
+ return 1;
+}
+
+
+static inline int
+pair_list_next(pair_list_t *list, Py_ssize_t *ppos, PyObject **pidentity,
+ PyObject **pkey, PyObject **pvalue)
+{
+ Py_hash_t hash;
+ return _pair_list_next(list, ppos, pidentity, pkey, pvalue, &hash);
+}
+
+
+static inline int
+pair_list_contains(pair_list_t *list, PyObject *key)
+{
+ Py_hash_t hash1, hash2;
+ Py_ssize_t pos = 0;
+ PyObject *ident = NULL;
+ PyObject *identity = NULL;
+ int tmp;
+
+ ident = list->calc_identity(key);
+ if (ident == NULL) {
+ goto fail;
+ }
+
+ hash1 = PyObject_Hash(ident);
+ if (hash1 == -1) {
+ goto fail;
+ }
+
+ while (_pair_list_next(list, &pos, &identity, NULL, NULL, &hash2)) {
+ if (hash1 != hash2) {
+ continue;
+ }
+ tmp = str_cmp(ident, identity);
+ if (tmp > 0) {
+ Py_DECREF(ident);
+ return 1;
+ }
+ else if (tmp < 0) {
+ goto fail;
+ }
+ }
+
+ Py_DECREF(ident);
+ return 0;
+fail:
+ Py_XDECREF(ident);
+ return -1;
+}
+
+
+static inline PyObject *
+pair_list_get_one(pair_list_t *list, PyObject *key)
+{
+ Py_hash_t hash1, hash2;
+ Py_ssize_t pos = 0;
+ PyObject *ident = NULL;
+ PyObject *identity = NULL;
+ PyObject *value = NULL;
+ int tmp;
+
+ ident = list->calc_identity(key);
+ if (ident == NULL) {
+ goto fail;
+ }
+
+ hash1 = PyObject_Hash(ident);
+ if (hash1 == -1) {
+ goto fail;
+ }
+
+ while (_pair_list_next(list, &pos, &identity, NULL, &value, &hash2)) {
+ if (hash1 != hash2) {
+ continue;
+ }
+ tmp = str_cmp(ident, identity);
+ if (tmp > 0) {
+ Py_INCREF(value);
+ Py_DECREF(ident);
+ return value;
+ }
+ else if (tmp < 0) {
+ goto fail;
+ }
+ }
+
+ Py_DECREF(ident);
+ PyErr_SetObject(PyExc_KeyError, key);
+ return NULL;
+fail:
+ Py_XDECREF(ident);
+ return NULL;
+}
+
+
+static inline PyObject *
+pair_list_get_all(pair_list_t *list, PyObject *key)
+{
+ Py_hash_t hash1, hash2;
+ Py_ssize_t pos = 0;
+ PyObject *ident = NULL;
+ PyObject *identity = NULL;
+ PyObject *value = NULL;
+ PyObject *res = NULL;
+ int tmp;
+
+ ident = list->calc_identity(key);
+ if (ident == NULL) {
+ goto fail;
+ }
+
+ hash1 = PyObject_Hash(ident);
+ if (hash1 == -1) {
+ goto fail;
+ }
+
+ while (_pair_list_next(list, &pos, &identity, NULL, &value, &hash2)) {
+ if (hash1 != hash2) {
+ continue;
+ }
+ tmp = str_cmp(ident, identity);
+ if (tmp > 0) {
+ if (res == NULL) {
+ res = PyList_New(1);
+ if (res == NULL) {
+ goto fail;
+ }
+ if (PyList_SetItem(res, 0, value) < 0) {
+ goto fail;
+ }
+ Py_INCREF(value);
+ }
+ else if (PyList_Append(res, value) < 0) {
+ goto fail;
+ }
+ }
+ else if (tmp < 0) {
+ goto fail;
+ }
+ }
+
+ if (res == NULL) {
+ PyErr_SetObject(PyExc_KeyError, key);
+ }
+ Py_DECREF(ident);
+ return res;
+
+fail:
+ Py_XDECREF(ident);
+ Py_XDECREF(res);
+ return NULL;
+}
+
+
+static inline PyObject *
+pair_list_set_default(pair_list_t *list, PyObject *key, PyObject *value)
+{
+ Py_hash_t hash1, hash2;
+ Py_ssize_t pos = 0;
+ PyObject *ident = NULL;
+ PyObject *identity = NULL;
+ PyObject *value2 = NULL;
+ int tmp;
+
+ ident = list->calc_identity(key);
+ if (ident == NULL) {
+ goto fail;
+ }
+
+ hash1 = PyObject_Hash(ident);
+ if (hash1 == -1) {
+ goto fail;
+ }
+
+ while (_pair_list_next(list, &pos, &identity, NULL, &value2, &hash2)) {
+ if (hash1 != hash2) {
+ continue;
+ }
+ tmp = str_cmp(ident, identity);
+ if (tmp > 0) {
+ Py_INCREF(value2);
+ Py_DECREF(ident);
+ return value2;
+ }
+ else if (tmp < 0) {
+ goto fail;
+ }
+ }
+
+ if (_pair_list_add_with_hash(list, ident, key, value, hash1) < 0) {
+ goto fail;
+ }
+
+ Py_INCREF(value);
+ Py_DECREF(ident);
+ return value;
+fail:
+ Py_XDECREF(ident);
+ return NULL;
+}
+
+
+static inline PyObject *
+pair_list_pop_one(pair_list_t *list, PyObject *key)
+{
+ pair_t *pair;
+
+ Py_hash_t hash;
+ Py_ssize_t pos;
+ PyObject *value = NULL;
+ int tmp;
+ PyObject *ident = NULL;
+
+ ident = list->calc_identity(key);
+ if (ident == NULL) {
+ goto fail;
+ }
+
+ hash = PyObject_Hash(ident);
+ if (hash == -1) {
+ goto fail;
+ }
+
+ for (pos=0; pos < list->size; pos++) {
+ pair = pair_list_get(list, pos);
+ if (pair->hash != hash) {
+ continue;
+ }
+ tmp = str_cmp(ident, pair->identity);
+ if (tmp > 0) {
+ value = pair->value;
+ Py_INCREF(value);
+ if (pair_list_del_at(list, pos) < 0) {
+ goto fail;
+ }
+ Py_DECREF(ident);
+ return value;
+ }
+ else if (tmp < 0) {
+ goto fail;
+ }
+ }
+
+ PyErr_SetObject(PyExc_KeyError, key);
+ goto fail;
+
+fail:
+ Py_XDECREF(value);
+ Py_XDECREF(ident);
+ return NULL;
+}
+
+
+static inline PyObject *
+pair_list_pop_all(pair_list_t *list, PyObject *key)
+{
+ Py_hash_t hash;
+ Py_ssize_t pos;
+ pair_t *pair;
+ int tmp;
+ PyObject *res = NULL;
+ PyObject *ident = NULL;
+
+ ident = list->calc_identity(key);
+ if (ident == NULL) {
+ goto fail;
+ }
+
+ hash = PyObject_Hash(ident);
+ if (hash == -1) {
+ goto fail;
+ }
+
+ if (list->size == 0) {
+ PyErr_SetObject(PyExc_KeyError, ident);
+ goto fail;
+ }
+
+ for (pos = list->size - 1; pos >= 0; pos--) {
+ pair = pair_list_get(list, pos);
+ if (hash != pair->hash) {
+ continue;
+ }
+ tmp = str_cmp(ident, pair->identity);
+ if (tmp > 0) {
+ if (res == NULL) {
+ res = PyList_New(1);
+ if (res == NULL) {
+ goto fail;
+ }
+ if (PyList_SetItem(res, 0, pair->value) < 0) {
+ goto fail;
+ }
+ Py_INCREF(pair->value);
+ } else if (PyList_Append(res, pair->value) < 0) {
+ goto fail;
+ }
+ if (pair_list_del_at(list, pos) < 0) {
+ goto fail;
+ }
+ }
+ else if (tmp < 0) {
+ goto fail;
+ }
+ }
+
+ if (res == NULL) {
+ PyErr_SetObject(PyExc_KeyError, key);
+ } else if (PyList_Reverse(res) < 0) {
+ goto fail;
+ }
+ Py_DECREF(ident);
+ return res;
+
+fail:
+ Py_XDECREF(ident);
+ Py_XDECREF(res);
+ return NULL;
+}
+
+
+static inline PyObject *
+pair_list_pop_item(pair_list_t *list)
+{
+ PyObject *ret;
+ pair_t *pair;
+
+ if (list->size == 0) {
+ PyErr_SetString(PyExc_KeyError, "empty multidict");
+ return NULL;
+ }
+
+ pair = pair_list_get(list, 0);
+ ret = PyTuple_Pack(2, pair->key, pair->value);
+ if (ret == NULL) {
+ return NULL;
+ }
+
+ if (pair_list_del_at(list, 0) < 0) {
+ Py_DECREF(ret);
+ return NULL;
+ }
+
+ return ret;
+}
+
+
+static inline int
+pair_list_replace(pair_list_t *list, PyObject * key, PyObject *value)
+{
+ pair_t *pair;
+
+ Py_ssize_t pos;
+ int tmp;
+ int found = 0;
+
+ PyObject *identity = NULL;
+ Py_hash_t hash;
+
+ identity = list->calc_identity(key);
+ if (identity == NULL) {
+ goto fail;
+ }
+
+ hash = PyObject_Hash(identity);
+ if (hash == -1) {
+ goto fail;
+ }
+
+
+ for (pos = 0; pos < list->size; pos++) {
+ pair = pair_list_get(list, pos);
+ if (hash != pair->hash) {
+ continue;
+ }
+ tmp = str_cmp(identity, pair->identity);
+ if (tmp > 0) {
+ found = 1;
+ Py_INCREF(key);
+ Py_DECREF(pair->key);
+ pair->key = key;
+ Py_INCREF(value);
+ Py_DECREF(pair->value);
+ pair->value = value;
+ break;
+ }
+ else if (tmp < 0) {
+ goto fail;
+ }
+ }
+
+ if (!found) {
+ if (_pair_list_add_with_hash(list, identity, key, value, hash) < 0) {
+ goto fail;
+ }
+ Py_DECREF(identity);
+ return 0;
+ }
+ else {
+ list->version = NEXT_VERSION();
+ if (_pair_list_drop_tail(list, identity, hash, pos+1) < 0) {
+ goto fail;
+ }
+ Py_DECREF(identity);
+ return 0;
+ }
+fail:
+ Py_XDECREF(identity);
+ return -1;
+}
+
+
+static inline int
+_dict_set_number(PyObject *dict, PyObject *key, Py_ssize_t num)
+{
+ PyObject *tmp = PyLong_FromSsize_t(num);
+ if (tmp == NULL) {
+ return -1;
+ }
+
+ if (PyDict_SetItem(dict, key, tmp) < 0) {
+ Py_DECREF(tmp);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+static inline int
+_pair_list_post_update(pair_list_t *list, PyObject* used_keys, Py_ssize_t pos)
+{
+ pair_t *pair;
+ PyObject *tmp;
+ Py_ssize_t num;
+
+ for (; pos < list->size; pos++) {
+ pair = pair_list_get(list, pos);
+ tmp = PyDict_GetItem(used_keys, pair->identity);
+ if (tmp == NULL) {
+ // not found
+ continue;
+ }
+
+ num = PyLong_AsSsize_t(tmp);
+ if (num == -1) {
+ if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_RuntimeError, "invalid internal state");
+ }
+ return -1;
+ }
+
+ if (pos >= num) {
+ // del self[pos]
+ if (pair_list_del_at(list, pos) < 0) {
+ return -1;
+ }
+ pos--;
+ }
+ }
+
+ list->version = NEXT_VERSION();
+ return 0;
+}
+
+// TODO: need refactoring function name
+static inline int
+_pair_list_update(pair_list_t *list, PyObject *key,
+ PyObject *value, PyObject *used_keys,
+ PyObject *identity, Py_hash_t hash)
+{
+ PyObject *item = NULL;
+ pair_t *pair = NULL;
+ Py_ssize_t pos;
+ int found;
+ int ident_cmp_res;
+
+ item = PyDict_GetItem(used_keys, identity);
+ if (item == NULL) {
+ pos = 0;
+ }
+ else {
+ pos = PyLong_AsSsize_t(item);
+ if (pos == -1) {
+ if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_RuntimeError, "invalid internal state");
+ }
+ return -1;
+ }
+ }
+
+ found = 0;
+ for (; pos < list->size; pos++) {
+ pair = pair_list_get(list, pos);
+ if (pair->hash != hash) {
+ continue;
+ }
+
+ ident_cmp_res = str_cmp(pair->identity, identity);
+ if (ident_cmp_res > 0) {
+ Py_INCREF(key);
+ Py_DECREF(pair->key);
+ pair->key = key;
+
+ Py_INCREF(value);
+ Py_DECREF(pair->value);
+ pair->value = value;
+
+ if (_dict_set_number(used_keys, pair->identity, pos + 1) < 0) {
+ return -1;
+ }
+
+ found = 1;
+ break;
+ }
+ else if (ident_cmp_res < 0) {
+ return -1;
+ }
+ }
+
+ if (!found) {
+ if (_pair_list_add_with_hash(list, identity, key, value, hash) < 0) {
+ return -1;
+ }
+ if (_dict_set_number(used_keys, identity, list->size) < 0) {
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+
+static inline int
+pair_list_update(pair_list_t *list, pair_list_t *other)
+{
+ PyObject *used_keys = NULL;
+ pair_t *pair = NULL;
+
+ Py_ssize_t pos;
+
+ if (other->size == 0) {
+ return 0;
+ }
+
+ used_keys = PyDict_New();
+ if (used_keys == NULL) {
+ return -1;
+ }
+
+ for (pos = 0; pos < other->size; pos++) {
+ pair = pair_list_get(other, pos);
+ if (_pair_list_update(list, pair->key, pair->value, used_keys,
+ pair->identity, pair->hash) < 0) {
+ goto fail;
+ }
+ }
+
+ if (_pair_list_post_update(list, used_keys, 0) < 0) {
+ goto fail;
+ }
+
+ Py_DECREF(used_keys);
+ return 0;
+
+fail:
+ Py_XDECREF(used_keys);
+ return -1;
+}
+
+
+static inline int
+pair_list_update_from_seq(pair_list_t *list, PyObject *seq)
+{
+ PyObject *it = NULL; // iter(seq)
+ PyObject *fast = NULL; // item as a 2-tuple or 2-list
+ PyObject *item = NULL; // seq[i]
+ PyObject *used_keys = NULL; // dict(<Identitty: Pos>)
+
+ PyObject *key = NULL;
+ PyObject *value = NULL;
+ PyObject *identity = NULL;
+
+ Py_hash_t hash;
+
+ Py_ssize_t i;
+ Py_ssize_t n;
+
+ it = PyObject_GetIter(seq);
+ if (it == NULL) {
+ return -1;
+ }
+
+ used_keys = PyDict_New();
+ if (used_keys == NULL) {
+ goto fail_1;
+ }
+
+ for (i = 0; ; ++i) { // i - index into seq of current element
+ fast = NULL;
+ item = PyIter_Next(it);
+ if (item == NULL) {
+ if (PyErr_Occurred()) {
+ goto fail_1;
+ }
+ break;
+ }
+
+ // Convert item to sequence, and verify length 2.
+ fast = PySequence_Fast(item, "");
+ if (fast == NULL) {
+ if (PyErr_ExceptionMatches(PyExc_TypeError)) {
+ PyErr_Format(PyExc_TypeError,
+ "multidict cannot convert sequence element #%zd"
+ " to a sequence",
+ i);
+ }
+ goto fail_1;
+ }
+
+ n = PySequence_Fast_GET_SIZE(fast);
+ if (n != 2) {
+ PyErr_Format(PyExc_ValueError,
+ "multidict update sequence element #%zd "
+ "has length %zd; 2 is required",
+ i, n);
+ goto fail_1;
+ }
+
+ key = PySequence_Fast_GET_ITEM(fast, 0);
+ value = PySequence_Fast_GET_ITEM(fast, 1);
+ Py_INCREF(key);
+ Py_INCREF(value);
+
+ identity = list->calc_identity(key);
+ if (identity == NULL) {
+ goto fail_1;
+ }
+
+ hash = PyObject_Hash(identity);
+ if (hash == -1) {
+ goto fail_1;
+ }
+
+ if (_pair_list_update(list, key, value, used_keys, identity, hash) < 0) {
+ goto fail_1;
+ }
+
+ Py_DECREF(key);
+ Py_DECREF(value);
+ Py_DECREF(fast);
+ Py_DECREF(item);
+ Py_DECREF(identity);
+ }
+
+ if (_pair_list_post_update(list, used_keys, 0) < 0) {
+ goto fail_2;
+ }
+
+ Py_DECREF(it);
+ Py_DECREF(used_keys);
+ return 0;
+
+fail_1:
+ Py_XDECREF(key);
+ Py_XDECREF(value);
+ Py_XDECREF(fast);
+ Py_XDECREF(item);
+ Py_XDECREF(identity);
+
+fail_2:
+ Py_XDECREF(it);
+ Py_XDECREF(used_keys);
+ return -1;
+}
+
+static inline int
+pair_list_eq_to_mapping(pair_list_t *list, PyObject *other)
+{
+ PyObject *key = NULL;
+ PyObject *avalue = NULL;
+ PyObject *bvalue;
+
+ Py_ssize_t pos, other_len;
+
+ int eq;
+
+ if (!PyMapping_Check(other)) {
+ PyErr_Format(PyExc_TypeError,
+ "other argument must be a mapping, not %s",
+ Py_TYPE(other)->tp_name);
+ return -1;
+ }
+
+ other_len = PyMapping_Size(other);
+ if (other_len < 0) {
+ return -1;
+ }
+ if (pair_list_len(list) != other_len) {
+ return 0;
+ }
+
+ pos = 0;
+ while (pair_list_next(list, &pos, NULL, &key, &avalue)) {
+ bvalue = PyObject_GetItem(other, key);
+ if (bvalue == NULL) {
+ if (PyErr_ExceptionMatches(PyExc_KeyError)) {
+ PyErr_Clear();
+ return 0;
+ }
+ return -1;
+ }
+
+ eq = PyObject_RichCompareBool(avalue, bvalue, Py_EQ);
+ Py_DECREF(bvalue);
+
+ if (eq <= 0) {
+ return eq;
+ }
+ }
+
+ return 1;
+}
+
+
+/***********************************************************************/
+
+static inline int
+pair_list_traverse(pair_list_t *list, visitproc visit, void *arg)
+{
+ pair_t *pair = NULL;
+ Py_ssize_t pos;
+
+ for (pos = 0; pos < list->size; pos++) {
+ pair = pair_list_get(list, pos);
+ // Don't need traverse the identity: it is a terminal
+ Py_VISIT(pair->key);
+ Py_VISIT(pair->value);
+ }
+
+ return 0;
+}
+
+
+static inline int
+pair_list_clear(pair_list_t *list)
+{
+ pair_t *pair = NULL;
+ Py_ssize_t pos;
+
+ if (list->size == 0) {
+ return 0;
+ }
+
+ list->version = NEXT_VERSION();
+ for (pos = 0; pos < list->size; pos++) {
+ pair = pair_list_get(list, pos);
+ Py_CLEAR(pair->key);
+ Py_CLEAR(pair->identity);
+ Py_CLEAR(pair->value);
+ }
+ list->size = 0;
+ if (list->pairs != list->buffer) {
+ PyMem_Del(list->pairs);
+ list->pairs = list->buffer;
+ }
+
+ return 0;
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/third_party/python/multidict/multidict/_multilib/views.h b/third_party/python/multidict/multidict/_multilib/views.h
new file mode 100644
index 0000000000..5b1ebfe77c
--- /dev/null
+++ b/third_party/python/multidict/multidict/_multilib/views.h
@@ -0,0 +1,464 @@
+#ifndef _MULTIDICT_VIEWS_H
+#define _MULTIDICT_VIEWS_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static PyTypeObject multidict_itemsview_type;
+static PyTypeObject multidict_valuesview_type;
+static PyTypeObject multidict_keysview_type;
+
+static PyObject *viewbaseset_richcmp_func;
+static PyObject *viewbaseset_and_func;
+static PyObject *viewbaseset_or_func;
+static PyObject *viewbaseset_sub_func;
+static PyObject *viewbaseset_xor_func;
+
+static PyObject *abc_itemsview_register_func;
+static PyObject *abc_keysview_register_func;
+static PyObject *abc_valuesview_register_func;
+
+static PyObject *itemsview_isdisjoint_func;
+static PyObject *itemsview_repr_func;
+
+static PyObject *keysview_repr_func;
+static PyObject *keysview_isdisjoint_func;
+
+static PyObject *valuesview_repr_func;
+
+typedef struct {
+ PyObject_HEAD
+ PyObject *md;
+} _Multidict_ViewObject;
+
+
+/********** Base **********/
+
+static inline void
+_init_view(_Multidict_ViewObject *self, PyObject *md)
+{
+ Py_INCREF(md);
+ self->md = md;
+}
+
+static inline void
+multidict_view_dealloc(_Multidict_ViewObject *self)
+{
+ PyObject_GC_UnTrack(self);
+ Py_XDECREF(self->md);
+ PyObject_GC_Del(self);
+}
+
+static inline int
+multidict_view_traverse(_Multidict_ViewObject *self, visitproc visit, void *arg)
+{
+ Py_VISIT(self->md);
+ return 0;
+}
+
+static inline int
+multidict_view_clear(_Multidict_ViewObject *self)
+{
+ Py_CLEAR(self->md);
+ return 0;
+}
+
+static inline Py_ssize_t
+multidict_view_len(_Multidict_ViewObject *self)
+{
+ return pair_list_len(&((MultiDictObject*)self->md)->pairs);
+}
+
+static inline PyObject *
+multidict_view_richcompare(PyObject *self, PyObject *other, int op)
+{
+ PyObject *ret;
+ PyObject *op_obj = PyLong_FromLong(op);
+ if (op_obj == NULL) {
+ return NULL;
+ }
+ ret = PyObject_CallFunctionObjArgs(
+ viewbaseset_richcmp_func, self, other, op_obj, NULL);
+ Py_DECREF(op_obj);
+ return ret;
+}
+
+static inline PyObject *
+multidict_view_and(PyObject *self, PyObject *other)
+{
+ return PyObject_CallFunctionObjArgs(
+ viewbaseset_and_func, self, other, NULL);
+}
+
+static inline PyObject *
+multidict_view_or(PyObject *self, PyObject *other)
+{
+ return PyObject_CallFunctionObjArgs(
+ viewbaseset_or_func, self, other, NULL);
+}
+
+static inline PyObject *
+multidict_view_sub(PyObject *self, PyObject *other)
+{
+ return PyObject_CallFunctionObjArgs(
+ viewbaseset_sub_func, self, other, NULL);
+}
+
+static inline PyObject *
+multidict_view_xor(PyObject *self, PyObject *other)
+{
+ return PyObject_CallFunctionObjArgs(
+ viewbaseset_xor_func, self, other, NULL);
+}
+
+static PyNumberMethods multidict_view_as_number = {
+ .nb_subtract = (binaryfunc)multidict_view_sub,
+ .nb_and = (binaryfunc)multidict_view_and,
+ .nb_xor = (binaryfunc)multidict_view_xor,
+ .nb_or = (binaryfunc)multidict_view_or,
+};
+
+/********** Items **********/
+
+static inline PyObject *
+multidict_itemsview_new(PyObject *md)
+{
+ _Multidict_ViewObject *mv = PyObject_GC_New(
+ _Multidict_ViewObject, &multidict_itemsview_type);
+ if (mv == NULL) {
+ return NULL;
+ }
+
+ _init_view(mv, md);
+
+ PyObject_GC_Track(mv);
+ return (PyObject *)mv;
+}
+
+static inline PyObject *
+multidict_itemsview_iter(_Multidict_ViewObject *self)
+{
+ return multidict_items_iter_new((MultiDictObject*)self->md);
+}
+
+static inline PyObject *
+multidict_itemsview_repr(_Multidict_ViewObject *self)
+{
+ return PyObject_CallFunctionObjArgs(
+ itemsview_repr_func, self, NULL);
+}
+
+static inline PyObject *
+multidict_itemsview_isdisjoint(_Multidict_ViewObject *self, PyObject *other)
+{
+ return PyObject_CallFunctionObjArgs(
+ itemsview_isdisjoint_func, self, other, NULL);
+}
+
+PyDoc_STRVAR(itemsview_isdisjoint_doc,
+ "Return True if two sets have a null intersection.");
+
+static PyMethodDef multidict_itemsview_methods[] = {
+ {
+ "isdisjoint",
+ (PyCFunction)multidict_itemsview_isdisjoint,
+ METH_O,
+ itemsview_isdisjoint_doc
+ },
+ {
+ NULL,
+ NULL
+ } /* sentinel */
+};
+
+static inline int
+multidict_itemsview_contains(_Multidict_ViewObject *self, PyObject *obj)
+{
+ PyObject *akey = NULL,
+ *aval = NULL,
+ *bkey = NULL,
+ *bval = NULL,
+ *iter = NULL,
+ *item = NULL;
+ int ret1, ret2;
+
+ if (!PyTuple_Check(obj) || PyTuple_GET_SIZE(obj) != 2) {
+ return 0;
+ }
+
+ bkey = PyTuple_GET_ITEM(obj, 0);
+ bval = PyTuple_GET_ITEM(obj, 1);
+
+ iter = multidict_itemsview_iter(self);
+ if (iter == NULL) {
+ return 0;
+ }
+
+ while ((item = PyIter_Next(iter)) != NULL) {
+ akey = PyTuple_GET_ITEM(item, 0);
+ aval = PyTuple_GET_ITEM(item, 1);
+
+ ret1 = PyObject_RichCompareBool(akey, bkey, Py_EQ);
+ if (ret1 < 0) {
+ Py_DECREF(iter);
+ Py_DECREF(item);
+ return -1;
+ }
+ ret2 = PyObject_RichCompareBool(aval, bval, Py_EQ);
+ if (ret2 < 0) {
+ Py_DECREF(iter);
+ Py_DECREF(item);
+ return -1;
+ }
+ if (ret1 > 0 && ret2 > 0)
+ {
+ Py_DECREF(iter);
+ Py_DECREF(item);
+ return 1;
+ }
+
+ Py_DECREF(item);
+ }
+
+ Py_DECREF(iter);
+
+ if (PyErr_Occurred()) {
+ return -1;
+ }
+
+ return 0;
+}
+
+static PySequenceMethods multidict_itemsview_as_sequence = {
+ .sq_length = (lenfunc)multidict_view_len,
+ .sq_contains = (objobjproc)multidict_itemsview_contains,
+};
+
+static PyTypeObject multidict_itemsview_type = {
+ PyVarObject_HEAD_INIT(DEFERRED_ADDRESS(&PyType_Type), 0)
+ "multidict._multidict._ItemsView", /* tp_name */
+ sizeof(_Multidict_ViewObject), /* tp_basicsize */
+ .tp_dealloc = (destructor)multidict_view_dealloc,
+ .tp_repr = (reprfunc)multidict_itemsview_repr,
+ .tp_as_number = &multidict_view_as_number,
+ .tp_as_sequence = &multidict_itemsview_as_sequence,
+ .tp_getattro = PyObject_GenericGetAttr,
+ .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
+ .tp_traverse = (traverseproc)multidict_view_traverse,
+ .tp_clear = (inquiry)multidict_view_clear,
+ .tp_richcompare = multidict_view_richcompare,
+ .tp_iter = (getiterfunc)multidict_itemsview_iter,
+ .tp_methods = multidict_itemsview_methods,
+};
+
+
+/********** Keys **********/
+
+static inline PyObject *
+multidict_keysview_new(PyObject *md)
+{
+ _Multidict_ViewObject *mv = PyObject_GC_New(
+ _Multidict_ViewObject, &multidict_keysview_type);
+ if (mv == NULL) {
+ return NULL;
+ }
+
+ _init_view(mv, md);
+
+ PyObject_GC_Track(mv);
+ return (PyObject *)mv;
+}
+
+static inline PyObject *
+multidict_keysview_iter(_Multidict_ViewObject *self)
+{
+ return multidict_keys_iter_new(((MultiDictObject*)self->md));
+}
+
+static inline PyObject *
+multidict_keysview_repr(_Multidict_ViewObject *self)
+{
+ return PyObject_CallFunctionObjArgs(
+ keysview_repr_func, self, NULL);
+}
+
+static inline PyObject *
+multidict_keysview_isdisjoint(_Multidict_ViewObject *self, PyObject *other)
+{
+ return PyObject_CallFunctionObjArgs(
+ keysview_isdisjoint_func, self, other, NULL);
+}
+
+PyDoc_STRVAR(keysview_isdisjoint_doc,
+ "Return True if two sets have a null intersection.");
+
+static PyMethodDef multidict_keysview_methods[] = {
+ {
+ "isdisjoint",
+ (PyCFunction)multidict_keysview_isdisjoint,
+ METH_O,
+ keysview_isdisjoint_doc
+ },
+ {
+ NULL,
+ NULL
+ } /* sentinel */
+};
+
+static inline int
+multidict_keysview_contains(_Multidict_ViewObject *self, PyObject *key)
+{
+ return pair_list_contains(&((MultiDictObject*)self->md)->pairs, key);
+}
+
+static PySequenceMethods multidict_keysview_as_sequence = {
+ .sq_length = (lenfunc)multidict_view_len,
+ .sq_contains = (objobjproc)multidict_keysview_contains,
+};
+
+static PyTypeObject multidict_keysview_type = {
+ PyVarObject_HEAD_INIT(DEFERRED_ADDRESS(&PyType_Type), 0)
+ "multidict._multidict._KeysView", /* tp_name */
+ sizeof(_Multidict_ViewObject), /* tp_basicsize */
+ .tp_dealloc = (destructor)multidict_view_dealloc,
+ .tp_repr = (reprfunc)multidict_keysview_repr,
+ .tp_as_number = &multidict_view_as_number,
+ .tp_as_sequence = &multidict_keysview_as_sequence,
+ .tp_getattro = PyObject_GenericGetAttr,
+ .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
+ .tp_traverse = (traverseproc)multidict_view_traverse,
+ .tp_clear = (inquiry)multidict_view_clear,
+ .tp_richcompare = multidict_view_richcompare,
+ .tp_iter = (getiterfunc)multidict_keysview_iter,
+ .tp_methods = multidict_keysview_methods,
+};
+
+
+/********** Values **********/
+
+static inline PyObject *
+multidict_valuesview_new(PyObject *md)
+{
+ _Multidict_ViewObject *mv = PyObject_GC_New(
+ _Multidict_ViewObject, &multidict_valuesview_type);
+ if (mv == NULL) {
+ return NULL;
+ }
+
+ _init_view(mv, md);
+
+ PyObject_GC_Track(mv);
+ return (PyObject *)mv;
+}
+
+static inline PyObject *
+multidict_valuesview_iter(_Multidict_ViewObject *self)
+{
+ return multidict_values_iter_new(((MultiDictObject*)self->md));
+}
+
+static inline PyObject *
+multidict_valuesview_repr(_Multidict_ViewObject *self)
+{
+ return PyObject_CallFunctionObjArgs(
+ valuesview_repr_func, self, NULL);
+}
+
+static PySequenceMethods multidict_valuesview_as_sequence = {
+ .sq_length = (lenfunc)multidict_view_len,
+};
+
+static PyTypeObject multidict_valuesview_type = {
+ PyVarObject_HEAD_INIT(DEFERRED_ADDRESS(&PyType_Type), 0)
+ "multidict._multidict._ValuesView", /* tp_name */
+ sizeof(_Multidict_ViewObject), /* tp_basicsize */
+ .tp_dealloc = (destructor)multidict_view_dealloc,
+ .tp_repr = (reprfunc)multidict_valuesview_repr,
+ .tp_as_sequence = &multidict_valuesview_as_sequence,
+ .tp_getattro = PyObject_GenericGetAttr,
+ .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC,
+ .tp_traverse = (traverseproc)multidict_view_traverse,
+ .tp_clear = (inquiry)multidict_view_clear,
+ .tp_iter = (getiterfunc)multidict_valuesview_iter,
+};
+
+
+static inline int
+multidict_views_init()
+{
+ PyObject *reg_func_call_result = NULL;
+ PyObject *module = PyImport_ImportModule("multidict._multidict_base");
+ if (module == NULL) {
+ goto fail;
+ }
+
+#define GET_MOD_ATTR(VAR, NAME) \
+ VAR = PyObject_GetAttrString(module, NAME); \
+ if (VAR == NULL) { \
+ goto fail; \
+ }
+
+ GET_MOD_ATTR(viewbaseset_richcmp_func, "_viewbaseset_richcmp");
+ GET_MOD_ATTR(viewbaseset_and_func, "_viewbaseset_and");
+ GET_MOD_ATTR(viewbaseset_or_func, "_viewbaseset_or");
+ GET_MOD_ATTR(viewbaseset_sub_func, "_viewbaseset_sub");
+ GET_MOD_ATTR(viewbaseset_xor_func, "_viewbaseset_xor");
+
+ GET_MOD_ATTR(abc_itemsview_register_func, "_abc_itemsview_register");
+ GET_MOD_ATTR(abc_keysview_register_func, "_abc_keysview_register");
+ GET_MOD_ATTR(abc_valuesview_register_func, "_abc_valuesview_register");
+
+ GET_MOD_ATTR(itemsview_repr_func, "_itemsview_isdisjoint");
+ GET_MOD_ATTR(itemsview_repr_func, "_itemsview_repr");
+
+ GET_MOD_ATTR(keysview_repr_func, "_keysview_repr");
+ GET_MOD_ATTR(keysview_isdisjoint_func, "_keysview_isdisjoint");
+
+ GET_MOD_ATTR(valuesview_repr_func, "_valuesview_repr");
+
+ if (PyType_Ready(&multidict_itemsview_type) < 0 ||
+ PyType_Ready(&multidict_valuesview_type) < 0 ||
+ PyType_Ready(&multidict_keysview_type) < 0)
+ {
+ goto fail;
+ }
+
+ // abc.ItemsView.register(_ItemsView)
+ reg_func_call_result = PyObject_CallFunctionObjArgs(
+ abc_itemsview_register_func, (PyObject*)&multidict_itemsview_type, NULL);
+ if (reg_func_call_result == NULL) {
+ goto fail;
+ }
+ Py_DECREF(reg_func_call_result);
+
+ // abc.KeysView.register(_KeysView)
+ reg_func_call_result = PyObject_CallFunctionObjArgs(
+ abc_keysview_register_func, (PyObject*)&multidict_keysview_type, NULL);
+ if (reg_func_call_result == NULL) {
+ goto fail;
+ }
+ Py_DECREF(reg_func_call_result);
+
+ // abc.ValuesView.register(_KeysView)
+ reg_func_call_result = PyObject_CallFunctionObjArgs(
+ abc_valuesview_register_func, (PyObject*)&multidict_valuesview_type, NULL);
+ if (reg_func_call_result == NULL) {
+ goto fail;
+ }
+ Py_DECREF(reg_func_call_result);
+
+ Py_DECREF(module);
+ return 0;
+
+fail:
+ Py_CLEAR(module);
+ return -1;
+
+#undef GET_MOD_ATTR
+}
+
+#ifdef __cplusplus
+}
+#endif
+#endif
diff --git a/third_party/python/multidict/multidict/py.typed b/third_party/python/multidict/multidict/py.typed
new file mode 100644
index 0000000000..dfe8cc048e
--- /dev/null
+++ b/third_party/python/multidict/multidict/py.typed
@@ -0,0 +1 @@
+PEP-561 marker. \ No newline at end of file
diff --git a/third_party/python/multidict/pyproject.toml b/third_party/python/multidict/pyproject.toml
new file mode 100644
index 0000000000..f1b83b8f62
--- /dev/null
+++ b/third_party/python/multidict/pyproject.toml
@@ -0,0 +1,11 @@
+[build-system]
+requires = ["setuptools>=40", "wheel"]
+
+
+[tool.towncrier]
+package = "multidict"
+filename = "CHANGES.rst"
+directory = "CHANGES/"
+title_format = "{version} ({project_date})"
+template = "CHANGES/.TEMPLATE.rst"
+issue_format = "`#{issue} <https://github.com/aio-libs/multidict/issues/{issue}>`_"
diff --git a/third_party/python/multidict/setup.cfg b/third_party/python/multidict/setup.cfg
new file mode 100644
index 0000000000..2c11fd4aed
--- /dev/null
+++ b/third_party/python/multidict/setup.cfg
@@ -0,0 +1,37 @@
+[aliases]
+test = pytest
+
+[metadata]
+license_files =
+ LICENSE
+long_description = file: README.rst
+
+[flake8]
+ignore = E302,E701,E305,E704,F811,N811, W503
+max-line-length = 88
+
+[isort]
+multi_line_output = 3
+include_trailing_comma = True
+force_grid_wrap = 0
+use_parentheses = True
+known_first_party = multidict
+known_third_party = pytest
+
+[tool:pytest]
+testpaths = tests
+norecursedirs = dist build .tox docs requirements tools
+addopts = --doctest-modules --cov=multidict --cov-report term-missing:skip-covered --cov-report xml --junitxml=junit-test-results.xml -v
+doctest_optionflags = ALLOW_UNICODE ELLIPSIS
+junit_family = xunit2
+
+[mypy-pytest]
+ignore_missing_imports = true
+
+[mypy-multidict._multidict]
+ignore_missing_imports = true
+
+[egg_info]
+tag_build =
+tag_date = 0
+
diff --git a/third_party/python/multidict/setup.py b/third_party/python/multidict/setup.py
new file mode 100644
index 0000000000..044f1d72ed
--- /dev/null
+++ b/third_party/python/multidict/setup.py
@@ -0,0 +1,96 @@
+import codecs
+import os
+import platform
+import re
+import sys
+
+from setuptools import Extension, setup
+
+NO_EXTENSIONS = bool(os.environ.get("MULTIDICT_NO_EXTENSIONS"))
+
+if sys.implementation.name != "cpython":
+ NO_EXTENSIONS = True
+
+CFLAGS = ["-O2"]
+# CFLAGS = ['-g']
+if platform.system() != "Windows":
+ CFLAGS.extend(
+ [
+ "-std=c99",
+ "-Wall",
+ "-Wsign-compare",
+ "-Wconversion",
+ "-fno-strict-aliasing",
+ "-pedantic",
+ ]
+ )
+
+extensions = [
+ Extension(
+ "multidict._multidict",
+ ["multidict/_multidict.c"],
+ extra_compile_args=CFLAGS,
+ ),
+]
+
+
+with codecs.open(
+ os.path.join(
+ os.path.abspath(os.path.dirname(__file__)), "multidict", "__init__.py"
+ ),
+ "r",
+ "latin1",
+) as fp:
+ try:
+ version = re.findall(r'^__version__ = "([^"]+)"\r?$', fp.read(), re.M)[0]
+ except IndexError:
+ raise RuntimeError("Unable to determine version.")
+
+
+def read(f):
+ return open(os.path.join(os.path.dirname(__file__), f)).read().strip()
+
+
+args = dict(
+ name="multidict",
+ version=version,
+ description=("multidict implementation"),
+ long_description=read("README.rst"),
+ classifiers=[
+ "License :: OSI Approved :: Apache Software License",
+ "Intended Audience :: Developers",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Development Status :: 5 - Production/Stable",
+ ],
+ author="Andrew Svetlov",
+ author_email="andrew.svetlov@gmail.com",
+ url="https://github.com/aio-libs/multidict",
+ project_urls={
+ "Chat: Gitter": "https://gitter.im/aio-libs/Lobby",
+ "CI: Azure Pipelines": "https://dev.azure.com/aio-libs/multidict/_build",
+ "Coverage: codecov": "https://codecov.io/github/aio-libs/multidict",
+ "Docs: RTD": "https://multidict.readthedocs.io",
+ "GitHub: issues": "https://github.com/aio-libs/multidict/issues",
+ "GitHub: repo": "https://github.com/aio-libs/multidict",
+ },
+ license="Apache 2",
+ packages=["multidict"],
+ python_requires=">=3.6",
+ include_package_data=True,
+)
+
+if not NO_EXTENSIONS:
+ print("*********************")
+ print("* Accelerated build *")
+ print("*********************")
+ setup(ext_modules=extensions, **args)
+else:
+ print("*********************")
+ print("* Pure Python build *")
+ print("*********************")
+ setup(**args)
diff --git a/third_party/python/packaging/packaging-21.3.dist-info/LICENSE b/third_party/python/packaging/packaging-21.3.dist-info/LICENSE
new file mode 100644
index 0000000000..6f62d44e4e
--- /dev/null
+++ b/third_party/python/packaging/packaging-21.3.dist-info/LICENSE
@@ -0,0 +1,3 @@
+This software is made available under the terms of *either* of the licenses
+found in LICENSE.APACHE or LICENSE.BSD. Contributions to this software is made
+under the terms of *both* these licenses.
diff --git a/third_party/python/packaging/packaging-21.3.dist-info/LICENSE.APACHE b/third_party/python/packaging/packaging-21.3.dist-info/LICENSE.APACHE
new file mode 100644
index 0000000000..f433b1a53f
--- /dev/null
+++ b/third_party/python/packaging/packaging-21.3.dist-info/LICENSE.APACHE
@@ -0,0 +1,177 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
diff --git a/third_party/python/packaging/packaging-21.3.dist-info/LICENSE.BSD b/third_party/python/packaging/packaging-21.3.dist-info/LICENSE.BSD
new file mode 100644
index 0000000000..42ce7b75c9
--- /dev/null
+++ b/third_party/python/packaging/packaging-21.3.dist-info/LICENSE.BSD
@@ -0,0 +1,23 @@
+Copyright (c) Donald Stufft and individual contributors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/third_party/python/packaging/packaging-21.3.dist-info/METADATA b/third_party/python/packaging/packaging-21.3.dist-info/METADATA
new file mode 100644
index 0000000000..358ace5362
--- /dev/null
+++ b/third_party/python/packaging/packaging-21.3.dist-info/METADATA
@@ -0,0 +1,453 @@
+Metadata-Version: 2.1
+Name: packaging
+Version: 21.3
+Summary: Core utilities for Python packages
+Home-page: https://github.com/pypa/packaging
+Author: Donald Stufft and individual contributors
+Author-email: donald@stufft.io
+License: BSD-2-Clause or Apache-2.0
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Requires-Python: >=3.6
+Description-Content-Type: text/x-rst
+License-File: LICENSE
+License-File: LICENSE.APACHE
+License-File: LICENSE.BSD
+Requires-Dist: pyparsing (!=3.0.5,>=2.0.2)
+
+packaging
+=========
+
+.. start-intro
+
+Reusable core utilities for various Python Packaging
+`interoperability specifications <https://packaging.python.org/specifications/>`_.
+
+This library provides utilities that implement the interoperability
+specifications which have clearly one correct behaviour (eg: :pep:`440`)
+or benefit greatly from having a single shared implementation (eg: :pep:`425`).
+
+.. end-intro
+
+The ``packaging`` project includes the following: version handling, specifiers,
+markers, requirements, tags, utilities.
+
+Documentation
+-------------
+
+The `documentation`_ provides information and the API for the following:
+
+- Version Handling
+- Specifiers
+- Markers
+- Requirements
+- Tags
+- Utilities
+
+Installation
+------------
+
+Use ``pip`` to install these utilities::
+
+ pip install packaging
+
+Discussion
+----------
+
+If you run into bugs, you can file them in our `issue tracker`_.
+
+You can also join ``#pypa`` on Freenode to ask questions or get involved.
+
+
+.. _`documentation`: https://packaging.pypa.io/
+.. _`issue tracker`: https://github.com/pypa/packaging/issues
+
+
+Code of Conduct
+---------------
+
+Everyone interacting in the packaging project's codebases, issue trackers, chat
+rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_.
+
+.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md
+
+Contributing
+------------
+
+The ``CONTRIBUTING.rst`` file outlines how to contribute to this project as
+well as how to report a potential security issue. The documentation for this
+project also covers information about `project development`_ and `security`_.
+
+.. _`project development`: https://packaging.pypa.io/en/latest/development/
+.. _`security`: https://packaging.pypa.io/en/latest/security/
+
+Project History
+---------------
+
+Please review the ``CHANGELOG.rst`` file or the `Changelog documentation`_ for
+recent changes and project history.
+
+.. _`Changelog documentation`: https://packaging.pypa.io/en/latest/changelog/
+
+Changelog
+---------
+
+21.3 - 2021-11-17
+~~~~~~~~~~~~~~~~~
+
+* Add a ``pp3-none-any`` tag (`#311 <https://github.com/pypa/packaging/issues/311>`__)
+* Replace the blank pyparsing 3 exclusion with a 3.0.5 exclusion (`#481 <https://github.com/pypa/packaging/issues/481>`__, `#486 <https://github.com/pypa/packaging/issues/486>`__)
+* Fix a spelling mistake (`#479 <https://github.com/pypa/packaging/issues/479>`__)
+
+21.2 - 2021-10-29
+~~~~~~~~~~~~~~~~~
+
+* Update documentation entry for 21.1.
+
+21.1 - 2021-10-29
+~~~~~~~~~~~~~~~~~
+
+* Update pin to pyparsing to exclude 3.0.0.
+
+21.0 - 2021-07-03
+~~~~~~~~~~~~~~~~~
+
+* PEP 656: musllinux support (`#411 <https://github.com/pypa/packaging/issues/411>`__)
+* Drop support for Python 2.7, Python 3.4 and Python 3.5.
+* Replace distutils usage with sysconfig (`#396 <https://github.com/pypa/packaging/issues/396>`__)
+* Add support for zip files in ``parse_sdist_filename`` (`#429 <https://github.com/pypa/packaging/issues/429>`__)
+* Use cached ``_hash`` attribute to short-circuit tag equality comparisons (`#417 <https://github.com/pypa/packaging/issues/417>`__)
+* Specify the default value for the ``specifier`` argument to ``SpecifierSet`` (`#437 <https://github.com/pypa/packaging/issues/437>`__)
+* Proper keyword-only "warn" argument in packaging.tags (`#403 <https://github.com/pypa/packaging/issues/403>`__)
+* Correctly remove prerelease suffixes from ~= check (`#366 <https://github.com/pypa/packaging/issues/366>`__)
+* Fix type hints for ``Version.post`` and ``Version.dev`` (`#393 <https://github.com/pypa/packaging/issues/393>`__)
+* Use typing alias ``UnparsedVersion`` (`#398 <https://github.com/pypa/packaging/issues/398>`__)
+* Improve type inference for ``packaging.specifiers.filter()`` (`#430 <https://github.com/pypa/packaging/issues/430>`__)
+* Tighten the return type of ``canonicalize_version()`` (`#402 <https://github.com/pypa/packaging/issues/402>`__)
+
+20.9 - 2021-01-29
+~~~~~~~~~~~~~~~~~
+
+* Run `isort <https://pypi.org/project/isort/>`_ over the code base (`#377 <https://github.com/pypa/packaging/issues/377>`__)
+* Add support for the ``macosx_10_*_universal2`` platform tags (`#379 <https://github.com/pypa/packaging/issues/379>`__)
+* Introduce ``packaging.utils.parse_wheel_filename()`` and ``parse_sdist_filename()``
+ (`#387 <https://github.com/pypa/packaging/issues/387>`__ and `#389 <https://github.com/pypa/packaging/issues/389>`__)
+
+20.8 - 2020-12-11
+~~~~~~~~~~~~~~~~~
+
+* Revert back to setuptools for compatibility purposes for some Linux distros (`#363 <https://github.com/pypa/packaging/issues/363>`__)
+* Do not insert an underscore in wheel tags when the interpreter version number
+ is more than 2 digits (`#372 <https://github.com/pypa/packaging/issues/372>`__)
+
+20.7 - 2020-11-28
+~~~~~~~~~~~~~~~~~
+
+No unreleased changes.
+
+20.6 - 2020-11-28
+~~~~~~~~~~~~~~~~~
+
+.. note:: This release was subsequently yanked, and these changes were included in 20.7.
+
+* Fix flit configuration, to include LICENSE files (`#357 <https://github.com/pypa/packaging/issues/357>`__)
+* Make `intel` a recognized CPU architecture for the `universal` macOS platform tag (`#361 <https://github.com/pypa/packaging/issues/361>`__)
+* Add some missing type hints to `packaging.requirements` (issue:`350`)
+
+20.5 - 2020-11-27
+~~~~~~~~~~~~~~~~~
+
+* Officially support Python 3.9 (`#343 <https://github.com/pypa/packaging/issues/343>`__)
+* Deprecate the ``LegacyVersion`` and ``LegacySpecifier`` classes (`#321 <https://github.com/pypa/packaging/issues/321>`__)
+* Handle ``OSError`` on non-dynamic executables when attempting to resolve
+ the glibc version string.
+
+20.4 - 2020-05-19
+~~~~~~~~~~~~~~~~~
+
+* Canonicalize version before comparing specifiers. (`#282 <https://github.com/pypa/packaging/issues/282>`__)
+* Change type hint for ``canonicalize_name`` to return
+ ``packaging.utils.NormalizedName``.
+ This enables the use of static typing tools (like mypy) to detect mixing of
+ normalized and un-normalized names.
+
+20.3 - 2020-03-05
+~~~~~~~~~~~~~~~~~
+
+* Fix changelog for 20.2.
+
+20.2 - 2020-03-05
+~~~~~~~~~~~~~~~~~
+
+* Fix a bug that caused a 32-bit OS that runs on a 64-bit ARM CPU (e.g. ARM-v8,
+ aarch64), to report the wrong bitness.
+
+20.1 - 2020-01-24
+~~~~~~~~~~~~~~~~~~~
+
+* Fix a bug caused by reuse of an exhausted iterator. (`#257 <https://github.com/pypa/packaging/issues/257>`__)
+
+20.0 - 2020-01-06
+~~~~~~~~~~~~~~~~~
+
+* Add type hints (`#191 <https://github.com/pypa/packaging/issues/191>`__)
+
+* Add proper trove classifiers for PyPy support (`#198 <https://github.com/pypa/packaging/issues/198>`__)
+
+* Scale back depending on ``ctypes`` for manylinux support detection (`#171 <https://github.com/pypa/packaging/issues/171>`__)
+
+* Use ``sys.implementation.name`` where appropriate for ``packaging.tags`` (`#193 <https://github.com/pypa/packaging/issues/193>`__)
+
+* Expand upon the API provided by ``packaging.tags``: ``interpreter_name()``, ``mac_platforms()``, ``compatible_tags()``, ``cpython_tags()``, ``generic_tags()`` (`#187 <https://github.com/pypa/packaging/issues/187>`__)
+
+* Officially support Python 3.8 (`#232 <https://github.com/pypa/packaging/issues/232>`__)
+
+* Add ``major``, ``minor``, and ``micro`` aliases to ``packaging.version.Version`` (`#226 <https://github.com/pypa/packaging/issues/226>`__)
+
+* Properly mark ``packaging`` has being fully typed by adding a `py.typed` file (`#226 <https://github.com/pypa/packaging/issues/226>`__)
+
+19.2 - 2019-09-18
+~~~~~~~~~~~~~~~~~
+
+* Remove dependency on ``attrs`` (`#178 <https://github.com/pypa/packaging/issues/178>`__, `#179 <https://github.com/pypa/packaging/issues/179>`__)
+
+* Use appropriate fallbacks for CPython ABI tag (`#181 <https://github.com/pypa/packaging/issues/181>`__, `#185 <https://github.com/pypa/packaging/issues/185>`__)
+
+* Add manylinux2014 support (`#186 <https://github.com/pypa/packaging/issues/186>`__)
+
+* Improve ABI detection (`#181 <https://github.com/pypa/packaging/issues/181>`__)
+
+* Properly handle debug wheels for Python 3.8 (`#172 <https://github.com/pypa/packaging/issues/172>`__)
+
+* Improve detection of debug builds on Windows (`#194 <https://github.com/pypa/packaging/issues/194>`__)
+
+19.1 - 2019-07-30
+~~~~~~~~~~~~~~~~~
+
+* Add the ``packaging.tags`` module. (`#156 <https://github.com/pypa/packaging/issues/156>`__)
+
+* Correctly handle two-digit versions in ``python_version`` (`#119 <https://github.com/pypa/packaging/issues/119>`__)
+
+
+19.0 - 2019-01-20
+~~~~~~~~~~~~~~~~~
+
+* Fix string representation of PEP 508 direct URL requirements with markers.
+
+* Better handling of file URLs
+
+ This allows for using ``file:///absolute/path``, which was previously
+ prevented due to the missing ``netloc``.
+
+ This allows for all file URLs that ``urlunparse`` turns back into the
+ original URL to be valid.
+
+
+18.0 - 2018-09-26
+~~~~~~~~~~~~~~~~~
+
+* Improve error messages when invalid requirements are given. (`#129 <https://github.com/pypa/packaging/issues/129>`__)
+
+
+17.1 - 2017-02-28
+~~~~~~~~~~~~~~~~~
+
+* Fix ``utils.canonicalize_version`` when supplying non PEP 440 versions.
+
+
+17.0 - 2017-02-28
+~~~~~~~~~~~~~~~~~
+
+* Drop support for python 2.6, 3.2, and 3.3.
+
+* Define minimal pyparsing version to 2.0.2 (`#91 <https://github.com/pypa/packaging/issues/91>`__).
+
+* Add ``epoch``, ``release``, ``pre``, ``dev``, and ``post`` attributes to
+ ``Version`` and ``LegacyVersion`` (`#34 <https://github.com/pypa/packaging/issues/34>`__).
+
+* Add ``Version().is_devrelease`` and ``LegacyVersion().is_devrelease`` to
+ make it easy to determine if a release is a development release.
+
+* Add ``utils.canonicalize_version`` to canonicalize version strings or
+ ``Version`` instances (`#121 <https://github.com/pypa/packaging/issues/121>`__).
+
+
+16.8 - 2016-10-29
+~~~~~~~~~~~~~~~~~
+
+* Fix markers that utilize ``in`` so that they render correctly.
+
+* Fix an erroneous test on Python RC releases.
+
+
+16.7 - 2016-04-23
+~~~~~~~~~~~~~~~~~
+
+* Add support for the deprecated ``python_implementation`` marker which was
+ an undocumented setuptools marker in addition to the newer markers.
+
+
+16.6 - 2016-03-29
+~~~~~~~~~~~~~~~~~
+
+* Add support for the deprecated, PEP 345 environment markers in addition to
+ the newer markers.
+
+
+16.5 - 2016-02-26
+~~~~~~~~~~~~~~~~~
+
+* Fix a regression in parsing requirements with whitespaces between the comma
+ separators.
+
+
+16.4 - 2016-02-22
+~~~~~~~~~~~~~~~~~
+
+* Fix a regression in parsing requirements like ``foo (==4)``.
+
+
+16.3 - 2016-02-21
+~~~~~~~~~~~~~~~~~
+
+* Fix a bug where ``packaging.requirements:Requirement`` was overly strict when
+ matching legacy requirements.
+
+
+16.2 - 2016-02-09
+~~~~~~~~~~~~~~~~~
+
+* Add a function that implements the name canonicalization from PEP 503.
+
+
+16.1 - 2016-02-07
+~~~~~~~~~~~~~~~~~
+
+* Implement requirement specifiers from PEP 508.
+
+
+16.0 - 2016-01-19
+~~~~~~~~~~~~~~~~~
+
+* Relicense so that packaging is available under *either* the Apache License,
+ Version 2.0 or a 2 Clause BSD license.
+
+* Support installation of packaging when only distutils is available.
+
+* Fix ``==`` comparison when there is a prefix and a local version in play.
+ (`#41 <https://github.com/pypa/packaging/issues/41>`__).
+
+* Implement environment markers from PEP 508.
+
+
+15.3 - 2015-08-01
+~~~~~~~~~~~~~~~~~
+
+* Normalize post-release spellings for rev/r prefixes. `#35 <https://github.com/pypa/packaging/issues/35>`__
+
+
+15.2 - 2015-05-13
+~~~~~~~~~~~~~~~~~
+
+* Fix an error where the arbitrary specifier (``===``) was not correctly
+ allowing pre-releases when it was being used.
+
+* Expose the specifier and version parts through properties on the
+ ``Specifier`` classes.
+
+* Allow iterating over the ``SpecifierSet`` to get access to all of the
+ ``Specifier`` instances.
+
+* Allow testing if a version is contained within a specifier via the ``in``
+ operator.
+
+
+15.1 - 2015-04-13
+~~~~~~~~~~~~~~~~~
+
+* Fix a logic error that was causing inconsistent answers about whether or not
+ a pre-release was contained within a ``SpecifierSet`` or not.
+
+
+15.0 - 2015-01-02
+~~~~~~~~~~~~~~~~~
+
+* Add ``Version().is_postrelease`` and ``LegacyVersion().is_postrelease`` to
+ make it easy to determine if a release is a post release.
+
+* Add ``Version().base_version`` and ``LegacyVersion().base_version`` to make
+ it easy to get the public version without any pre or post release markers.
+
+* Support the update to PEP 440 which removed the implied ``!=V.*`` when using
+ either ``>V`` or ``<V`` and which instead special cased the handling of
+ pre-releases, post-releases, and local versions when using ``>V`` or ``<V``.
+
+
+14.5 - 2014-12-17
+~~~~~~~~~~~~~~~~~
+
+* Normalize release candidates as ``rc`` instead of ``c``.
+
+* Expose the ``VERSION_PATTERN`` constant, a regular expression matching
+ a valid version.
+
+
+14.4 - 2014-12-15
+~~~~~~~~~~~~~~~~~
+
+* Ensure that versions are normalized before comparison when used in a
+ specifier with a less than (``<``) or greater than (``>``) operator.
+
+
+14.3 - 2014-11-19
+~~~~~~~~~~~~~~~~~
+
+* **BACKWARDS INCOMPATIBLE** Refactor specifier support so that it can sanely
+ handle legacy specifiers as well as PEP 440 specifiers.
+
+* **BACKWARDS INCOMPATIBLE** Move the specifier support out of
+ ``packaging.version`` into ``packaging.specifiers``.
+
+
+14.2 - 2014-09-10
+~~~~~~~~~~~~~~~~~
+
+* Add prerelease support to ``Specifier``.
+* Remove the ability to do ``item in Specifier()`` and replace it with
+ ``Specifier().contains(item)`` in order to allow flags that signal if a
+ prerelease should be accepted or not.
+* Add a method ``Specifier().filter()`` which will take an iterable and returns
+ an iterable with items that do not match the specifier filtered out.
+
+
+14.1 - 2014-09-08
+~~~~~~~~~~~~~~~~~
+
+* Allow ``LegacyVersion`` and ``Version`` to be sorted together.
+* Add ``packaging.version.parse()`` to enable easily parsing a version string
+ as either a ``Version`` or a ``LegacyVersion`` depending on it's PEP 440
+ validity.
+
+
+14.0 - 2014-09-05
+~~~~~~~~~~~~~~~~~
+
+* Initial release.
+
+
+.. _`master`: https://github.com/pypa/packaging/
+
+
diff --git a/third_party/python/packaging/packaging-21.3.dist-info/RECORD b/third_party/python/packaging/packaging-21.3.dist-info/RECORD
new file mode 100644
index 0000000000..870a8eb175
--- /dev/null
+++ b/third_party/python/packaging/packaging-21.3.dist-info/RECORD
@@ -0,0 +1,19 @@
+packaging/__about__.py,sha256=ugASIO2w1oUyH8_COqQ2X_s0rDhjbhQC3yJocD03h2c,661
+packaging/__init__.py,sha256=b9Kk5MF7KxhhLgcDmiUWukN-LatWFxPdNug0joPhHSk,497
+packaging/_manylinux.py,sha256=XcbiXB-qcjv3bcohp6N98TMpOP4_j3m-iOA8ptK2GWY,11488
+packaging/_musllinux.py,sha256=_KGgY_qc7vhMGpoqss25n2hiLCNKRtvz9mCrS7gkqyc,4378
+packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431
+packaging/markers.py,sha256=Fygi3_eZnjQ-3VJizW5AhI5wvo0Hb6RMk4DidsKpOC0,8475
+packaging/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+packaging/requirements.py,sha256=rjaGRCMepZS1mlYMjJ5Qh6rfq3gtsCRQUQmftGZ_bu8,4664
+packaging/specifiers.py,sha256=LRQ0kFsHrl5qfcFNEEJrIFYsnIHQUJXY9fIsakTrrqE,30110
+packaging/tags.py,sha256=lmsnGNiJ8C4D_Pf9PbM0qgbZvD9kmB9lpZBQUZa3R_Y,15699
+packaging/utils.py,sha256=dJjeat3BS-TYn1RrUFVwufUMasbtzLfYRoy_HXENeFQ,4200
+packaging/version.py,sha256=_fLRNrFrxYcHVfyo8vk9j8s6JM8N_xsSxVFr6RJyco8,14665
+packaging-21.3.dist-info/LICENSE,sha256=ytHvW9NA1z4HS6YU0m996spceUDD2MNIUuZcSQlobEg,197
+packaging-21.3.dist-info/LICENSE.APACHE,sha256=DVQuDIgE45qn836wDaWnYhSdxoLXgpRRKH4RuTjpRZQ,10174
+packaging-21.3.dist-info/LICENSE.BSD,sha256=tw5-m3QvHMb5SLNMFqo5_-zpQZY2S8iP8NIYDwAo-sU,1344
+packaging-21.3.dist-info/METADATA,sha256=KuKIy6qDLP3svIt6ejCbxBDhvq11ebkgUN55MeyKFyc,15147
+packaging-21.3.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92
+packaging-21.3.dist-info/top_level.txt,sha256=zFdHrhWnPslzsiP455HutQsqPB6v0KCtNUMtUtrefDw,10
+packaging-21.3.dist-info/RECORD,,
diff --git a/third_party/python/packaging/packaging-21.3.dist-info/WHEEL b/third_party/python/packaging/packaging-21.3.dist-info/WHEEL
new file mode 100644
index 0000000000..5bad85fdc1
--- /dev/null
+++ b/third_party/python/packaging/packaging-21.3.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/third_party/python/packaging/packaging-21.3.dist-info/top_level.txt b/third_party/python/packaging/packaging-21.3.dist-info/top_level.txt
new file mode 100644
index 0000000000..748809f75c
--- /dev/null
+++ b/third_party/python/packaging/packaging-21.3.dist-info/top_level.txt
@@ -0,0 +1 @@
+packaging
diff --git a/third_party/python/packaging/packaging/__about__.py b/third_party/python/packaging/packaging/__about__.py
new file mode 100644
index 0000000000..3551bc2d29
--- /dev/null
+++ b/third_party/python/packaging/packaging/__about__.py
@@ -0,0 +1,26 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+__all__ = [
+ "__title__",
+ "__summary__",
+ "__uri__",
+ "__version__",
+ "__author__",
+ "__email__",
+ "__license__",
+ "__copyright__",
+]
+
+__title__ = "packaging"
+__summary__ = "Core utilities for Python packages"
+__uri__ = "https://github.com/pypa/packaging"
+
+__version__ = "21.3"
+
+__author__ = "Donald Stufft and individual contributors"
+__email__ = "donald@stufft.io"
+
+__license__ = "BSD-2-Clause or Apache-2.0"
+__copyright__ = "2014-2019 %s" % __author__
diff --git a/third_party/python/packaging/packaging/__init__.py b/third_party/python/packaging/packaging/__init__.py
new file mode 100644
index 0000000000..3c50c5dcfe
--- /dev/null
+++ b/third_party/python/packaging/packaging/__init__.py
@@ -0,0 +1,25 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+from .__about__ import (
+ __author__,
+ __copyright__,
+ __email__,
+ __license__,
+ __summary__,
+ __title__,
+ __uri__,
+ __version__,
+)
+
+__all__ = [
+ "__title__",
+ "__summary__",
+ "__uri__",
+ "__version__",
+ "__author__",
+ "__email__",
+ "__license__",
+ "__copyright__",
+]
diff --git a/third_party/python/packaging/packaging/_manylinux.py b/third_party/python/packaging/packaging/_manylinux.py
new file mode 100644
index 0000000000..4c379aa6f6
--- /dev/null
+++ b/third_party/python/packaging/packaging/_manylinux.py
@@ -0,0 +1,301 @@
+import collections
+import functools
+import os
+import re
+import struct
+import sys
+import warnings
+from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple
+
+
+# Python does not provide platform information at sufficient granularity to
+# identify the architecture of the running executable in some cases, so we
+# determine it dynamically by reading the information from the running
+# process. This only applies on Linux, which uses the ELF format.
+class _ELFFileHeader:
+ # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
+ class _InvalidELFFileHeader(ValueError):
+ """
+ An invalid ELF file header was found.
+ """
+
+ ELF_MAGIC_NUMBER = 0x7F454C46
+ ELFCLASS32 = 1
+ ELFCLASS64 = 2
+ ELFDATA2LSB = 1
+ ELFDATA2MSB = 2
+ EM_386 = 3
+ EM_S390 = 22
+ EM_ARM = 40
+ EM_X86_64 = 62
+ EF_ARM_ABIMASK = 0xFF000000
+ EF_ARM_ABI_VER5 = 0x05000000
+ EF_ARM_ABI_FLOAT_HARD = 0x00000400
+
+ def __init__(self, file: IO[bytes]) -> None:
+ def unpack(fmt: str) -> int:
+ try:
+ data = file.read(struct.calcsize(fmt))
+ result: Tuple[int, ...] = struct.unpack(fmt, data)
+ except struct.error:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ return result[0]
+
+ self.e_ident_magic = unpack(">I")
+ if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_class = unpack("B")
+ if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_data = unpack("B")
+ if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_version = unpack("B")
+ self.e_ident_osabi = unpack("B")
+ self.e_ident_abiversion = unpack("B")
+ self.e_ident_pad = file.read(7)
+ format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
+ format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
+ format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
+ format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
+ self.e_type = unpack(format_h)
+ self.e_machine = unpack(format_h)
+ self.e_version = unpack(format_i)
+ self.e_entry = unpack(format_p)
+ self.e_phoff = unpack(format_p)
+ self.e_shoff = unpack(format_p)
+ self.e_flags = unpack(format_i)
+ self.e_ehsize = unpack(format_h)
+ self.e_phentsize = unpack(format_h)
+ self.e_phnum = unpack(format_h)
+ self.e_shentsize = unpack(format_h)
+ self.e_shnum = unpack(format_h)
+ self.e_shstrndx = unpack(format_h)
+
+
+def _get_elf_header() -> Optional[_ELFFileHeader]:
+ try:
+ with open(sys.executable, "rb") as f:
+ elf_header = _ELFFileHeader(f)
+ except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
+ return None
+ return elf_header
+
+
+def _is_linux_armhf() -> bool:
+ # hard-float ABI can be detected from the ELF header of the running
+ # process
+ # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
+ elf_header = _get_elf_header()
+ if elf_header is None:
+ return False
+ result = elf_header.e_ident_class == elf_header.ELFCLASS32
+ result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
+ result &= elf_header.e_machine == elf_header.EM_ARM
+ result &= (
+ elf_header.e_flags & elf_header.EF_ARM_ABIMASK
+ ) == elf_header.EF_ARM_ABI_VER5
+ result &= (
+ elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
+ ) == elf_header.EF_ARM_ABI_FLOAT_HARD
+ return result
+
+
+def _is_linux_i686() -> bool:
+ elf_header = _get_elf_header()
+ if elf_header is None:
+ return False
+ result = elf_header.e_ident_class == elf_header.ELFCLASS32
+ result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
+ result &= elf_header.e_machine == elf_header.EM_386
+ return result
+
+
+def _have_compatible_abi(arch: str) -> bool:
+ if arch == "armv7l":
+ return _is_linux_armhf()
+ if arch == "i686":
+ return _is_linux_i686()
+ return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
+
+
+# If glibc ever changes its major version, we need to know what the last
+# minor version was, so we can build the complete list of all versions.
+# For now, guess what the highest minor version might be, assume it will
+# be 50 for testing. Once this actually happens, update the dictionary
+# with the actual value.
+_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50)
+
+
+class _GLibCVersion(NamedTuple):
+ major: int
+ minor: int
+
+
+def _glibc_version_string_confstr() -> Optional[str]:
+ """
+ Primary implementation of glibc_version_string using os.confstr.
+ """
+ # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
+ # to be broken or missing. This strategy is used in the standard library
+ # platform module.
+ # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
+ try:
+ # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
+ version_string = os.confstr("CS_GNU_LIBC_VERSION")
+ assert version_string is not None
+ _, version = version_string.split()
+ except (AssertionError, AttributeError, OSError, ValueError):
+ # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
+ return None
+ return version
+
+
+def _glibc_version_string_ctypes() -> Optional[str]:
+ """
+ Fallback implementation of glibc_version_string using ctypes.
+ """
+ try:
+ import ctypes
+ except ImportError:
+ return None
+
+ # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
+ # manpage says, "If filename is NULL, then the returned handle is for the
+ # main program". This way we can let the linker do the work to figure out
+ # which libc our process is actually using.
+ #
+ # We must also handle the special case where the executable is not a
+ # dynamically linked executable. This can occur when using musl libc,
+ # for example. In this situation, dlopen() will error, leading to an
+ # OSError. Interestingly, at least in the case of musl, there is no
+ # errno set on the OSError. The single string argument used to construct
+ # OSError comes from libc itself and is therefore not portable to
+ # hard code here. In any case, failure to call dlopen() means we
+ # can proceed, so we bail on our attempt.
+ try:
+ process_namespace = ctypes.CDLL(None)
+ except OSError:
+ return None
+
+ try:
+ gnu_get_libc_version = process_namespace.gnu_get_libc_version
+ except AttributeError:
+ # Symbol doesn't exist -> therefore, we are not linked to
+ # glibc.
+ return None
+
+ # Call gnu_get_libc_version, which returns a string like "2.5"
+ gnu_get_libc_version.restype = ctypes.c_char_p
+ version_str: str = gnu_get_libc_version()
+ # py2 / py3 compatibility:
+ if not isinstance(version_str, str):
+ version_str = version_str.decode("ascii")
+
+ return version_str
+
+
+def _glibc_version_string() -> Optional[str]:
+ """Returns glibc version string, or None if not using glibc."""
+ return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
+
+
+def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
+ """Parse glibc version.
+
+ We use a regexp instead of str.split because we want to discard any
+ random junk that might come after the minor version -- this might happen
+ in patched/forked versions of glibc (e.g. Linaro's version of glibc
+ uses version strings like "2.20-2014.11"). See gh-3588.
+ """
+ m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
+ if not m:
+ warnings.warn(
+ "Expected glibc version with 2 components major.minor,"
+ " got: %s" % version_str,
+ RuntimeWarning,
+ )
+ return -1, -1
+ return int(m.group("major")), int(m.group("minor"))
+
+
+@functools.lru_cache()
+def _get_glibc_version() -> Tuple[int, int]:
+ version_str = _glibc_version_string()
+ if version_str is None:
+ return (-1, -1)
+ return _parse_glibc_version(version_str)
+
+
+# From PEP 513, PEP 600
+def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool:
+ sys_glibc = _get_glibc_version()
+ if sys_glibc < version:
+ return False
+ # Check for presence of _manylinux module.
+ try:
+ import _manylinux # noqa
+ except ImportError:
+ return True
+ if hasattr(_manylinux, "manylinux_compatible"):
+ result = _manylinux.manylinux_compatible(version[0], version[1], arch)
+ if result is not None:
+ return bool(result)
+ return True
+ if version == _GLibCVersion(2, 5):
+ if hasattr(_manylinux, "manylinux1_compatible"):
+ return bool(_manylinux.manylinux1_compatible)
+ if version == _GLibCVersion(2, 12):
+ if hasattr(_manylinux, "manylinux2010_compatible"):
+ return bool(_manylinux.manylinux2010_compatible)
+ if version == _GLibCVersion(2, 17):
+ if hasattr(_manylinux, "manylinux2014_compatible"):
+ return bool(_manylinux.manylinux2014_compatible)
+ return True
+
+
+_LEGACY_MANYLINUX_MAP = {
+ # CentOS 7 w/ glibc 2.17 (PEP 599)
+ (2, 17): "manylinux2014",
+ # CentOS 6 w/ glibc 2.12 (PEP 571)
+ (2, 12): "manylinux2010",
+ # CentOS 5 w/ glibc 2.5 (PEP 513)
+ (2, 5): "manylinux1",
+}
+
+
+def platform_tags(linux: str, arch: str) -> Iterator[str]:
+ if not _have_compatible_abi(arch):
+ return
+ # Oldest glibc to be supported regardless of architecture is (2, 17).
+ too_old_glibc2 = _GLibCVersion(2, 16)
+ if arch in {"x86_64", "i686"}:
+ # On x86/i686 also oldest glibc to be supported is (2, 5).
+ too_old_glibc2 = _GLibCVersion(2, 4)
+ current_glibc = _GLibCVersion(*_get_glibc_version())
+ glibc_max_list = [current_glibc]
+ # We can assume compatibility across glibc major versions.
+ # https://sourceware.org/bugzilla/show_bug.cgi?id=24636
+ #
+ # Build a list of maximum glibc versions so that we can
+ # output the canonical list of all glibc from current_glibc
+ # down to too_old_glibc2, including all intermediary versions.
+ for glibc_major in range(current_glibc.major - 1, 1, -1):
+ glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
+ glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
+ for glibc_max in glibc_max_list:
+ if glibc_max.major == too_old_glibc2.major:
+ min_minor = too_old_glibc2.minor
+ else:
+ # For other glibc major versions oldest supported is (x, 0).
+ min_minor = -1
+ for glibc_minor in range(glibc_max.minor, min_minor, -1):
+ glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
+ tag = "manylinux_{}_{}".format(*glibc_version)
+ if _is_compatible(tag, arch, glibc_version):
+ yield linux.replace("linux", tag)
+ # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
+ if glibc_version in _LEGACY_MANYLINUX_MAP:
+ legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
+ if _is_compatible(legacy_tag, arch, glibc_version):
+ yield linux.replace("linux", legacy_tag)
diff --git a/third_party/python/packaging/packaging/_musllinux.py b/third_party/python/packaging/packaging/_musllinux.py
new file mode 100644
index 0000000000..8ac3059ba3
--- /dev/null
+++ b/third_party/python/packaging/packaging/_musllinux.py
@@ -0,0 +1,136 @@
+"""PEP 656 support.
+
+This module implements logic to detect if the currently running Python is
+linked against musl, and what musl version is used.
+"""
+
+import contextlib
+import functools
+import operator
+import os
+import re
+import struct
+import subprocess
+import sys
+from typing import IO, Iterator, NamedTuple, Optional, Tuple
+
+
+def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
+ return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
+
+
+def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
+ """Detect musl libc location by parsing the Python executable.
+
+ Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
+ ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
+ """
+ f.seek(0)
+ try:
+ ident = _read_unpacked(f, "16B")
+ except struct.error:
+ return None
+ if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
+ return None
+ f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
+
+ try:
+ # e_fmt: Format for program header.
+ # p_fmt: Format for section header.
+ # p_idx: Indexes to find p_type, p_offset, and p_filesz.
+ e_fmt, p_fmt, p_idx = {
+ 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
+ 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
+ }[ident[4]]
+ except KeyError:
+ return None
+ else:
+ p_get = operator.itemgetter(*p_idx)
+
+ # Find the interpreter section and return its content.
+ try:
+ _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
+ except struct.error:
+ return None
+ for i in range(e_phnum + 1):
+ f.seek(e_phoff + e_phentsize * i)
+ try:
+ p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
+ except struct.error:
+ return None
+ if p_type != 3: # Not PT_INTERP.
+ continue
+ f.seek(p_offset)
+ interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
+ if "musl" not in interpreter:
+ return None
+ return interpreter
+ return None
+
+
+class _MuslVersion(NamedTuple):
+ major: int
+ minor: int
+
+
+def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
+ lines = [n for n in (n.strip() for n in output.splitlines()) if n]
+ if len(lines) < 2 or lines[0][:4] != "musl":
+ return None
+ m = re.match(r"Version (\d+)\.(\d+)", lines[1])
+ if not m:
+ return None
+ return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
+
+
+@functools.lru_cache()
+def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
+ """Detect currently-running musl runtime version.
+
+ This is done by checking the specified executable's dynamic linking
+ information, and invoking the loader to parse its output for a version
+ string. If the loader is musl, the output would be something like::
+
+ musl libc (x86_64)
+ Version 1.2.2
+ Dynamic Program Loader
+ """
+ with contextlib.ExitStack() as stack:
+ try:
+ f = stack.enter_context(open(executable, "rb"))
+ except OSError:
+ return None
+ ld = _parse_ld_musl_from_elf(f)
+ if not ld:
+ return None
+ proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
+ return _parse_musl_version(proc.stderr)
+
+
+def platform_tags(arch: str) -> Iterator[str]:
+ """Generate musllinux tags compatible to the current platform.
+
+ :param arch: Should be the part of platform tag after the ``linux_``
+ prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
+ prerequisite for the current platform to be musllinux-compatible.
+
+ :returns: An iterator of compatible musllinux tags.
+ """
+ sys_musl = _get_musl_version(sys.executable)
+ if sys_musl is None: # Python not dynamically linked against musl.
+ return
+ for minor in range(sys_musl.minor, -1, -1):
+ yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
+
+
+if __name__ == "__main__": # pragma: no cover
+ import sysconfig
+
+ plat = sysconfig.get_platform()
+ assert plat.startswith("linux-"), "not linux"
+
+ print("plat:", plat)
+ print("musl:", _get_musl_version(sys.executable))
+ print("tags:", end=" ")
+ for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
+ print(t, end="\n ")
diff --git a/third_party/python/packaging/packaging/_structures.py b/third_party/python/packaging/packaging/_structures.py
new file mode 100644
index 0000000000..90a6465f96
--- /dev/null
+++ b/third_party/python/packaging/packaging/_structures.py
@@ -0,0 +1,61 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+
+class InfinityType:
+ def __repr__(self) -> str:
+ return "Infinity"
+
+ def __hash__(self) -> int:
+ return hash(repr(self))
+
+ def __lt__(self, other: object) -> bool:
+ return False
+
+ def __le__(self, other: object) -> bool:
+ return False
+
+ def __eq__(self, other: object) -> bool:
+ return isinstance(other, self.__class__)
+
+ def __gt__(self, other: object) -> bool:
+ return True
+
+ def __ge__(self, other: object) -> bool:
+ return True
+
+ def __neg__(self: object) -> "NegativeInfinityType":
+ return NegativeInfinity
+
+
+Infinity = InfinityType()
+
+
+class NegativeInfinityType:
+ def __repr__(self) -> str:
+ return "-Infinity"
+
+ def __hash__(self) -> int:
+ return hash(repr(self))
+
+ def __lt__(self, other: object) -> bool:
+ return True
+
+ def __le__(self, other: object) -> bool:
+ return True
+
+ def __eq__(self, other: object) -> bool:
+ return isinstance(other, self.__class__)
+
+ def __gt__(self, other: object) -> bool:
+ return False
+
+ def __ge__(self, other: object) -> bool:
+ return False
+
+ def __neg__(self: object) -> InfinityType:
+ return Infinity
+
+
+NegativeInfinity = NegativeInfinityType()
diff --git a/third_party/python/packaging/packaging/markers.py b/third_party/python/packaging/packaging/markers.py
new file mode 100644
index 0000000000..cb640e8f9b
--- /dev/null
+++ b/third_party/python/packaging/packaging/markers.py
@@ -0,0 +1,304 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+import operator
+import os
+import platform
+import sys
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+
+from pyparsing import ( # noqa: N817
+ Forward,
+ Group,
+ Literal as L,
+ ParseException,
+ ParseResults,
+ QuotedString,
+ ZeroOrMore,
+ stringEnd,
+ stringStart,
+)
+
+from .specifiers import InvalidSpecifier, Specifier
+
+__all__ = [
+ "InvalidMarker",
+ "UndefinedComparison",
+ "UndefinedEnvironmentName",
+ "Marker",
+ "default_environment",
+]
+
+Operator = Callable[[str, str], bool]
+
+
+class InvalidMarker(ValueError):
+ """
+ An invalid marker was found, users should refer to PEP 508.
+ """
+
+
+class UndefinedComparison(ValueError):
+ """
+ An invalid operation was attempted on a value that doesn't support it.
+ """
+
+
+class UndefinedEnvironmentName(ValueError):
+ """
+ A name was attempted to be used that does not exist inside of the
+ environment.
+ """
+
+
+class Node:
+ def __init__(self, value: Any) -> None:
+ self.value = value
+
+ def __str__(self) -> str:
+ return str(self.value)
+
+ def __repr__(self) -> str:
+ return f"<{self.__class__.__name__}('{self}')>"
+
+ def serialize(self) -> str:
+ raise NotImplementedError
+
+
+class Variable(Node):
+ def serialize(self) -> str:
+ return str(self)
+
+
+class Value(Node):
+ def serialize(self) -> str:
+ return f'"{self}"'
+
+
+class Op(Node):
+ def serialize(self) -> str:
+ return str(self)
+
+
+VARIABLE = (
+ L("implementation_version")
+ | L("platform_python_implementation")
+ | L("implementation_name")
+ | L("python_full_version")
+ | L("platform_release")
+ | L("platform_version")
+ | L("platform_machine")
+ | L("platform_system")
+ | L("python_version")
+ | L("sys_platform")
+ | L("os_name")
+ | L("os.name") # PEP-345
+ | L("sys.platform") # PEP-345
+ | L("platform.version") # PEP-345
+ | L("platform.machine") # PEP-345
+ | L("platform.python_implementation") # PEP-345
+ | L("python_implementation") # undocumented setuptools legacy
+ | L("extra") # PEP-508
+)
+ALIASES = {
+ "os.name": "os_name",
+ "sys.platform": "sys_platform",
+ "platform.version": "platform_version",
+ "platform.machine": "platform_machine",
+ "platform.python_implementation": "platform_python_implementation",
+ "python_implementation": "platform_python_implementation",
+}
+VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
+
+VERSION_CMP = (
+ L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
+)
+
+MARKER_OP = VERSION_CMP | L("not in") | L("in")
+MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
+
+MARKER_VALUE = QuotedString("'") | QuotedString('"')
+MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
+
+BOOLOP = L("and") | L("or")
+
+MARKER_VAR = VARIABLE | MARKER_VALUE
+
+MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
+MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
+
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+
+MARKER_EXPR = Forward()
+MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
+MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
+
+MARKER = stringStart + MARKER_EXPR + stringEnd
+
+
+def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]:
+ if isinstance(results, ParseResults):
+ return [_coerce_parse_result(i) for i in results]
+ else:
+ return results
+
+
+def _format_marker(
+ marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True
+) -> str:
+
+ assert isinstance(marker, (list, tuple, str))
+
+ # Sometimes we have a structure like [[...]] which is a single item list
+ # where the single item is itself it's own list. In that case we want skip
+ # the rest of this function so that we don't get extraneous () on the
+ # outside.
+ if (
+ isinstance(marker, list)
+ and len(marker) == 1
+ and isinstance(marker[0], (list, tuple))
+ ):
+ return _format_marker(marker[0])
+
+ if isinstance(marker, list):
+ inner = (_format_marker(m, first=False) for m in marker)
+ if first:
+ return " ".join(inner)
+ else:
+ return "(" + " ".join(inner) + ")"
+ elif isinstance(marker, tuple):
+ return " ".join([m.serialize() for m in marker])
+ else:
+ return marker
+
+
+_operators: Dict[str, Operator] = {
+ "in": lambda lhs, rhs: lhs in rhs,
+ "not in": lambda lhs, rhs: lhs not in rhs,
+ "<": operator.lt,
+ "<=": operator.le,
+ "==": operator.eq,
+ "!=": operator.ne,
+ ">=": operator.ge,
+ ">": operator.gt,
+}
+
+
+def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
+ try:
+ spec = Specifier("".join([op.serialize(), rhs]))
+ except InvalidSpecifier:
+ pass
+ else:
+ return spec.contains(lhs)
+
+ oper: Optional[Operator] = _operators.get(op.serialize())
+ if oper is None:
+ raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
+
+ return oper(lhs, rhs)
+
+
+class Undefined:
+ pass
+
+
+_undefined = Undefined()
+
+
+def _get_env(environment: Dict[str, str], name: str) -> str:
+ value: Union[str, Undefined] = environment.get(name, _undefined)
+
+ if isinstance(value, Undefined):
+ raise UndefinedEnvironmentName(
+ f"{name!r} does not exist in evaluation environment."
+ )
+
+ return value
+
+
+def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool:
+ groups: List[List[bool]] = [[]]
+
+ for marker in markers:
+ assert isinstance(marker, (list, tuple, str))
+
+ if isinstance(marker, list):
+ groups[-1].append(_evaluate_markers(marker, environment))
+ elif isinstance(marker, tuple):
+ lhs, op, rhs = marker
+
+ if isinstance(lhs, Variable):
+ lhs_value = _get_env(environment, lhs.value)
+ rhs_value = rhs.value
+ else:
+ lhs_value = lhs.value
+ rhs_value = _get_env(environment, rhs.value)
+
+ groups[-1].append(_eval_op(lhs_value, op, rhs_value))
+ else:
+ assert marker in ["and", "or"]
+ if marker == "or":
+ groups.append([])
+
+ return any(all(item) for item in groups)
+
+
+def format_full_version(info: "sys._version_info") -> str:
+ version = "{0.major}.{0.minor}.{0.micro}".format(info)
+ kind = info.releaselevel
+ if kind != "final":
+ version += kind[0] + str(info.serial)
+ return version
+
+
+def default_environment() -> Dict[str, str]:
+ iver = format_full_version(sys.implementation.version)
+ implementation_name = sys.implementation.name
+ return {
+ "implementation_name": implementation_name,
+ "implementation_version": iver,
+ "os_name": os.name,
+ "platform_machine": platform.machine(),
+ "platform_release": platform.release(),
+ "platform_system": platform.system(),
+ "platform_version": platform.version(),
+ "python_full_version": platform.python_version(),
+ "platform_python_implementation": platform.python_implementation(),
+ "python_version": ".".join(platform.python_version_tuple()[:2]),
+ "sys_platform": sys.platform,
+ }
+
+
+class Marker:
+ def __init__(self, marker: str) -> None:
+ try:
+ self._markers = _coerce_parse_result(MARKER.parseString(marker))
+ except ParseException as e:
+ raise InvalidMarker(
+ f"Invalid marker: {marker!r}, parse error at "
+ f"{marker[e.loc : e.loc + 8]!r}"
+ )
+
+ def __str__(self) -> str:
+ return _format_marker(self._markers)
+
+ def __repr__(self) -> str:
+ return f"<Marker('{self}')>"
+
+ def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
+ """Evaluate a marker.
+
+ Return the boolean from evaluating the given marker against the
+ environment. environment is an optional argument to override all or
+ part of the determined environment.
+
+ The environment is determined from the current Python process.
+ """
+ current_environment = default_environment()
+ if environment is not None:
+ current_environment.update(environment)
+
+ return _evaluate_markers(self._markers, current_environment)
diff --git a/third_party/python/packaging/packaging/py.typed b/third_party/python/packaging/packaging/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/packaging/packaging/py.typed
diff --git a/third_party/python/packaging/packaging/requirements.py b/third_party/python/packaging/packaging/requirements.py
new file mode 100644
index 0000000000..53f9a3aa42
--- /dev/null
+++ b/third_party/python/packaging/packaging/requirements.py
@@ -0,0 +1,146 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+import re
+import string
+import urllib.parse
+from typing import List, Optional as TOptional, Set
+
+from pyparsing import ( # noqa
+ Combine,
+ Literal as L,
+ Optional,
+ ParseException,
+ Regex,
+ Word,
+ ZeroOrMore,
+ originalTextFor,
+ stringEnd,
+ stringStart,
+)
+
+from .markers import MARKER_EXPR, Marker
+from .specifiers import LegacySpecifier, Specifier, SpecifierSet
+
+
+class InvalidRequirement(ValueError):
+ """
+ An invalid requirement was found, users should refer to PEP 508.
+ """
+
+
+ALPHANUM = Word(string.ascii_letters + string.digits)
+
+LBRACKET = L("[").suppress()
+RBRACKET = L("]").suppress()
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+COMMA = L(",").suppress()
+SEMICOLON = L(";").suppress()
+AT = L("@").suppress()
+
+PUNCTUATION = Word("-_.")
+IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
+IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
+
+NAME = IDENTIFIER("name")
+EXTRA = IDENTIFIER
+
+URI = Regex(r"[^ ]+")("url")
+URL = AT + URI
+
+EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
+EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
+
+VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
+VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
+
+VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
+VERSION_MANY = Combine(
+ VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
+)("_raw_spec")
+_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)
+_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
+
+VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
+VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
+
+MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
+MARKER_EXPR.setParseAction(
+ lambda s, l, t: Marker(s[t._original_start : t._original_end])
+)
+MARKER_SEPARATOR = SEMICOLON
+MARKER = MARKER_SEPARATOR + MARKER_EXPR
+
+VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
+URL_AND_MARKER = URL + Optional(MARKER)
+
+NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
+
+REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
+# pyparsing isn't thread safe during initialization, so we do it eagerly, see
+# issue #104
+REQUIREMENT.parseString("x[]")
+
+
+class Requirement:
+ """Parse a requirement.
+
+ Parse a given requirement string into its parts, such as name, specifier,
+ URL, and extras. Raises InvalidRequirement on a badly-formed requirement
+ string.
+ """
+
+ # TODO: Can we test whether something is contained within a requirement?
+ # If so how do we do that? Do we need to test against the _name_ of
+ # the thing as well as the version? What about the markers?
+ # TODO: Can we normalize the name and extra name?
+
+ def __init__(self, requirement_string: str) -> None:
+ try:
+ req = REQUIREMENT.parseString(requirement_string)
+ except ParseException as e:
+ raise InvalidRequirement(
+ f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}'
+ )
+
+ self.name: str = req.name
+ if req.url:
+ parsed_url = urllib.parse.urlparse(req.url)
+ if parsed_url.scheme == "file":
+ if urllib.parse.urlunparse(parsed_url) != req.url:
+ raise InvalidRequirement("Invalid URL given")
+ elif not (parsed_url.scheme and parsed_url.netloc) or (
+ not parsed_url.scheme and not parsed_url.netloc
+ ):
+ raise InvalidRequirement(f"Invalid URL: {req.url}")
+ self.url: TOptional[str] = req.url
+ else:
+ self.url = None
+ self.extras: Set[str] = set(req.extras.asList() if req.extras else [])
+ self.specifier: SpecifierSet = SpecifierSet(req.specifier)
+ self.marker: TOptional[Marker] = req.marker if req.marker else None
+
+ def __str__(self) -> str:
+ parts: List[str] = [self.name]
+
+ if self.extras:
+ formatted_extras = ",".join(sorted(self.extras))
+ parts.append(f"[{formatted_extras}]")
+
+ if self.specifier:
+ parts.append(str(self.specifier))
+
+ if self.url:
+ parts.append(f"@ {self.url}")
+ if self.marker:
+ parts.append(" ")
+
+ if self.marker:
+ parts.append(f"; {self.marker}")
+
+ return "".join(parts)
+
+ def __repr__(self) -> str:
+ return f"<Requirement('{self}')>"
diff --git a/third_party/python/packaging/packaging/specifiers.py b/third_party/python/packaging/packaging/specifiers.py
new file mode 100644
index 0000000000..0e218a6f9f
--- /dev/null
+++ b/third_party/python/packaging/packaging/specifiers.py
@@ -0,0 +1,802 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+import abc
+import functools
+import itertools
+import re
+import warnings
+from typing import (
+ Callable,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Pattern,
+ Set,
+ Tuple,
+ TypeVar,
+ Union,
+)
+
+from .utils import canonicalize_version
+from .version import LegacyVersion, Version, parse
+
+ParsedVersion = Union[Version, LegacyVersion]
+UnparsedVersion = Union[Version, LegacyVersion, str]
+VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion)
+CallableOperator = Callable[[ParsedVersion, str], bool]
+
+
+class InvalidSpecifier(ValueError):
+ """
+ An invalid specifier was found, users should refer to PEP 440.
+ """
+
+
+class BaseSpecifier(metaclass=abc.ABCMeta):
+ @abc.abstractmethod
+ def __str__(self) -> str:
+ """
+ Returns the str representation of this Specifier like object. This
+ should be representative of the Specifier itself.
+ """
+
+ @abc.abstractmethod
+ def __hash__(self) -> int:
+ """
+ Returns a hash value for this Specifier like object.
+ """
+
+ @abc.abstractmethod
+ def __eq__(self, other: object) -> bool:
+ """
+ Returns a boolean representing whether or not the two Specifier like
+ objects are equal.
+ """
+
+ @abc.abstractproperty
+ def prereleases(self) -> Optional[bool]:
+ """
+ Returns whether or not pre-releases as a whole are allowed by this
+ specifier.
+ """
+
+ @prereleases.setter
+ def prereleases(self, value: bool) -> None:
+ """
+ Sets whether or not pre-releases as a whole are allowed by this
+ specifier.
+ """
+
+ @abc.abstractmethod
+ def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
+ """
+ Determines if the given item is contained within this specifier.
+ """
+
+ @abc.abstractmethod
+ def filter(
+ self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
+ ) -> Iterable[VersionTypeVar]:
+ """
+ Takes an iterable of items and filters them so that only items which
+ are contained within this specifier are allowed in it.
+ """
+
+
+class _IndividualSpecifier(BaseSpecifier):
+
+ _operators: Dict[str, str] = {}
+ _regex: Pattern[str]
+
+ def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
+ match = self._regex.search(spec)
+ if not match:
+ raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
+
+ self._spec: Tuple[str, str] = (
+ match.group("operator").strip(),
+ match.group("version").strip(),
+ )
+
+ # Store whether or not this Specifier should accept prereleases
+ self._prereleases = prereleases
+
+ def __repr__(self) -> str:
+ pre = (
+ f", prereleases={self.prereleases!r}"
+ if self._prereleases is not None
+ else ""
+ )
+
+ return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
+
+ def __str__(self) -> str:
+ return "{}{}".format(*self._spec)
+
+ @property
+ def _canonical_spec(self) -> Tuple[str, str]:
+ return self._spec[0], canonicalize_version(self._spec[1])
+
+ def __hash__(self) -> int:
+ return hash(self._canonical_spec)
+
+ def __eq__(self, other: object) -> bool:
+ if isinstance(other, str):
+ try:
+ other = self.__class__(str(other))
+ except InvalidSpecifier:
+ return NotImplemented
+ elif not isinstance(other, self.__class__):
+ return NotImplemented
+
+ return self._canonical_spec == other._canonical_spec
+
+ def _get_operator(self, op: str) -> CallableOperator:
+ operator_callable: CallableOperator = getattr(
+ self, f"_compare_{self._operators[op]}"
+ )
+ return operator_callable
+
+ def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion:
+ if not isinstance(version, (LegacyVersion, Version)):
+ version = parse(version)
+ return version
+
+ @property
+ def operator(self) -> str:
+ return self._spec[0]
+
+ @property
+ def version(self) -> str:
+ return self._spec[1]
+
+ @property
+ def prereleases(self) -> Optional[bool]:
+ return self._prereleases
+
+ @prereleases.setter
+ def prereleases(self, value: bool) -> None:
+ self._prereleases = value
+
+ def __contains__(self, item: str) -> bool:
+ return self.contains(item)
+
+ def contains(
+ self, item: UnparsedVersion, prereleases: Optional[bool] = None
+ ) -> bool:
+
+ # Determine if prereleases are to be allowed or not.
+ if prereleases is None:
+ prereleases = self.prereleases
+
+ # Normalize item to a Version or LegacyVersion, this allows us to have
+ # a shortcut for ``"2.0" in Specifier(">=2")
+ normalized_item = self._coerce_version(item)
+
+ # Determine if we should be supporting prereleases in this specifier
+ # or not, if we do not support prereleases than we can short circuit
+ # logic if this version is a prereleases.
+ if normalized_item.is_prerelease and not prereleases:
+ return False
+
+ # Actually do the comparison to determine if this item is contained
+ # within this Specifier or not.
+ operator_callable: CallableOperator = self._get_operator(self.operator)
+ return operator_callable(normalized_item, self.version)
+
+ def filter(
+ self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
+ ) -> Iterable[VersionTypeVar]:
+
+ yielded = False
+ found_prereleases = []
+
+ kw = {"prereleases": prereleases if prereleases is not None else True}
+
+ # Attempt to iterate over all the values in the iterable and if any of
+ # them match, yield them.
+ for version in iterable:
+ parsed_version = self._coerce_version(version)
+
+ if self.contains(parsed_version, **kw):
+ # If our version is a prerelease, and we were not set to allow
+ # prereleases, then we'll store it for later in case nothing
+ # else matches this specifier.
+ if parsed_version.is_prerelease and not (
+ prereleases or self.prereleases
+ ):
+ found_prereleases.append(version)
+ # Either this is not a prerelease, or we should have been
+ # accepting prereleases from the beginning.
+ else:
+ yielded = True
+ yield version
+
+ # Now that we've iterated over everything, determine if we've yielded
+ # any values, and if we have not and we have any prereleases stored up
+ # then we will go ahead and yield the prereleases.
+ if not yielded and found_prereleases:
+ for version in found_prereleases:
+ yield version
+
+
+class LegacySpecifier(_IndividualSpecifier):
+
+ _regex_str = r"""
+ (?P<operator>(==|!=|<=|>=|<|>))
+ \s*
+ (?P<version>
+ [^,;\s)]* # Since this is a "legacy" specifier, and the version
+ # string can be just about anything, we match everything
+ # except for whitespace, a semi-colon for marker support,
+ # a closing paren since versions can be enclosed in
+ # them, and a comma since it's a version separator.
+ )
+ """
+
+ _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ _operators = {
+ "==": "equal",
+ "!=": "not_equal",
+ "<=": "less_than_equal",
+ ">=": "greater_than_equal",
+ "<": "less_than",
+ ">": "greater_than",
+ }
+
+ def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
+ super().__init__(spec, prereleases)
+
+ warnings.warn(
+ "Creating a LegacyVersion has been deprecated and will be "
+ "removed in the next major release",
+ DeprecationWarning,
+ )
+
+ def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion:
+ if not isinstance(version, LegacyVersion):
+ version = LegacyVersion(str(version))
+ return version
+
+ def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool:
+ return prospective == self._coerce_version(spec)
+
+ def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool:
+ return prospective != self._coerce_version(spec)
+
+ def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool:
+ return prospective <= self._coerce_version(spec)
+
+ def _compare_greater_than_equal(
+ self, prospective: LegacyVersion, spec: str
+ ) -> bool:
+ return prospective >= self._coerce_version(spec)
+
+ def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool:
+ return prospective < self._coerce_version(spec)
+
+ def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool:
+ return prospective > self._coerce_version(spec)
+
+
+def _require_version_compare(
+ fn: Callable[["Specifier", ParsedVersion, str], bool]
+) -> Callable[["Specifier", ParsedVersion, str], bool]:
+ @functools.wraps(fn)
+ def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool:
+ if not isinstance(prospective, Version):
+ return False
+ return fn(self, prospective, spec)
+
+ return wrapped
+
+
+class Specifier(_IndividualSpecifier):
+
+ _regex_str = r"""
+ (?P<operator>(~=|==|!=|<=|>=|<|>|===))
+ (?P<version>
+ (?:
+ # The identity operators allow for an escape hatch that will
+ # do an exact string match of the version you wish to install.
+ # This will not be parsed by PEP 440 and we cannot determine
+ # any semantic meaning from it. This operator is discouraged
+ # but included entirely as an escape hatch.
+ (?<====) # Only match for the identity operator
+ \s*
+ [^\s]* # We just match everything, except for whitespace
+ # since we are only testing for strict identity.
+ )
+ |
+ (?:
+ # The (non)equality operators allow for wild card and local
+ # versions to be specified so we have to define these two
+ # operators separately to enable that.
+ (?<===|!=) # Only match for equals and not equals
+
+ \s*
+ v?
+ (?:[0-9]+!)? # epoch
+ [0-9]+(?:\.[0-9]+)* # release
+ (?: # pre release
+ [-_\.]?
+ (a|b|c|rc|alpha|beta|pre|preview)
+ [-_\.]?
+ [0-9]*
+ )?
+ (?: # post release
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+ )?
+
+ # You cannot use a wild card and a dev or local version
+ # together so group them with a | and make them optional.
+ (?:
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
+ (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
+ |
+ \.\* # Wild card syntax of .*
+ )?
+ )
+ |
+ (?:
+ # The compatible operator requires at least two digits in the
+ # release segment.
+ (?<=~=) # Only match for the compatible operator
+
+ \s*
+ v?
+ (?:[0-9]+!)? # epoch
+ [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
+ (?: # pre release
+ [-_\.]?
+ (a|b|c|rc|alpha|beta|pre|preview)
+ [-_\.]?
+ [0-9]*
+ )?
+ (?: # post release
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+ )?
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
+ )
+ |
+ (?:
+ # All other operators only allow a sub set of what the
+ # (non)equality operators do. Specifically they do not allow
+ # local versions to be specified nor do they allow the prefix
+ # matching wild cards.
+ (?<!==|!=|~=) # We have special cases for these
+ # operators so we want to make sure they
+ # don't match here.
+
+ \s*
+ v?
+ (?:[0-9]+!)? # epoch
+ [0-9]+(?:\.[0-9]+)* # release
+ (?: # pre release
+ [-_\.]?
+ (a|b|c|rc|alpha|beta|pre|preview)
+ [-_\.]?
+ [0-9]*
+ )?
+ (?: # post release
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+ )?
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
+ )
+ )
+ """
+
+ _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ _operators = {
+ "~=": "compatible",
+ "==": "equal",
+ "!=": "not_equal",
+ "<=": "less_than_equal",
+ ">=": "greater_than_equal",
+ "<": "less_than",
+ ">": "greater_than",
+ "===": "arbitrary",
+ }
+
+ @_require_version_compare
+ def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool:
+
+ # Compatible releases have an equivalent combination of >= and ==. That
+ # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
+ # implement this in terms of the other specifiers instead of
+ # implementing it ourselves. The only thing we need to do is construct
+ # the other specifiers.
+
+ # We want everything but the last item in the version, but we want to
+ # ignore suffix segments.
+ prefix = ".".join(
+ list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
+ )
+
+ # Add the prefix notation to the end of our string
+ prefix += ".*"
+
+ return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
+ prospective, prefix
+ )
+
+ @_require_version_compare
+ def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool:
+
+ # We need special logic to handle prefix matching
+ if spec.endswith(".*"):
+ # In the case of prefix matching we want to ignore local segment.
+ prospective = Version(prospective.public)
+ # Split the spec out by dots, and pretend that there is an implicit
+ # dot in between a release segment and a pre-release segment.
+ split_spec = _version_split(spec[:-2]) # Remove the trailing .*
+
+ # Split the prospective version out by dots, and pretend that there
+ # is an implicit dot in between a release segment and a pre-release
+ # segment.
+ split_prospective = _version_split(str(prospective))
+
+ # Shorten the prospective version to be the same length as the spec
+ # so that we can determine if the specifier is a prefix of the
+ # prospective version or not.
+ shortened_prospective = split_prospective[: len(split_spec)]
+
+ # Pad out our two sides with zeros so that they both equal the same
+ # length.
+ padded_spec, padded_prospective = _pad_version(
+ split_spec, shortened_prospective
+ )
+
+ return padded_prospective == padded_spec
+ else:
+ # Convert our spec string into a Version
+ spec_version = Version(spec)
+
+ # If the specifier does not have a local segment, then we want to
+ # act as if the prospective version also does not have a local
+ # segment.
+ if not spec_version.local:
+ prospective = Version(prospective.public)
+
+ return prospective == spec_version
+
+ @_require_version_compare
+ def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool:
+ return not self._compare_equal(prospective, spec)
+
+ @_require_version_compare
+ def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool:
+
+ # NB: Local version identifiers are NOT permitted in the version
+ # specifier, so local version labels can be universally removed from
+ # the prospective version.
+ return Version(prospective.public) <= Version(spec)
+
+ @_require_version_compare
+ def _compare_greater_than_equal(
+ self, prospective: ParsedVersion, spec: str
+ ) -> bool:
+
+ # NB: Local version identifiers are NOT permitted in the version
+ # specifier, so local version labels can be universally removed from
+ # the prospective version.
+ return Version(prospective.public) >= Version(spec)
+
+ @_require_version_compare
+ def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
+
+ # Convert our spec to a Version instance, since we'll want to work with
+ # it as a version.
+ spec = Version(spec_str)
+
+ # Check to see if the prospective version is less than the spec
+ # version. If it's not we can short circuit and just return False now
+ # instead of doing extra unneeded work.
+ if not prospective < spec:
+ return False
+
+ # This special case is here so that, unless the specifier itself
+ # includes is a pre-release version, that we do not accept pre-release
+ # versions for the version mentioned in the specifier (e.g. <3.1 should
+ # not match 3.1.dev0, but should match 3.0.dev0).
+ if not spec.is_prerelease and prospective.is_prerelease:
+ if Version(prospective.base_version) == Version(spec.base_version):
+ return False
+
+ # If we've gotten to here, it means that prospective version is both
+ # less than the spec version *and* it's not a pre-release of the same
+ # version in the spec.
+ return True
+
+ @_require_version_compare
+ def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
+
+ # Convert our spec to a Version instance, since we'll want to work with
+ # it as a version.
+ spec = Version(spec_str)
+
+ # Check to see if the prospective version is greater than the spec
+ # version. If it's not we can short circuit and just return False now
+ # instead of doing extra unneeded work.
+ if not prospective > spec:
+ return False
+
+ # This special case is here so that, unless the specifier itself
+ # includes is a post-release version, that we do not accept
+ # post-release versions for the version mentioned in the specifier
+ # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
+ if not spec.is_postrelease and prospective.is_postrelease:
+ if Version(prospective.base_version) == Version(spec.base_version):
+ return False
+
+ # Ensure that we do not allow a local version of the version mentioned
+ # in the specifier, which is technically greater than, to match.
+ if prospective.local is not None:
+ if Version(prospective.base_version) == Version(spec.base_version):
+ return False
+
+ # If we've gotten to here, it means that prospective version is both
+ # greater than the spec version *and* it's not a pre-release of the
+ # same version in the spec.
+ return True
+
+ def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
+ return str(prospective).lower() == str(spec).lower()
+
+ @property
+ def prereleases(self) -> bool:
+
+ # If there is an explicit prereleases set for this, then we'll just
+ # blindly use that.
+ if self._prereleases is not None:
+ return self._prereleases
+
+ # Look at all of our specifiers and determine if they are inclusive
+ # operators, and if they are if they are including an explicit
+ # prerelease.
+ operator, version = self._spec
+ if operator in ["==", ">=", "<=", "~=", "==="]:
+ # The == specifier can include a trailing .*, if it does we
+ # want to remove before parsing.
+ if operator == "==" and version.endswith(".*"):
+ version = version[:-2]
+
+ # Parse the version, and if it is a pre-release than this
+ # specifier allows pre-releases.
+ if parse(version).is_prerelease:
+ return True
+
+ return False
+
+ @prereleases.setter
+ def prereleases(self, value: bool) -> None:
+ self._prereleases = value
+
+
+_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
+
+
+def _version_split(version: str) -> List[str]:
+ result: List[str] = []
+ for item in version.split("."):
+ match = _prefix_regex.search(item)
+ if match:
+ result.extend(match.groups())
+ else:
+ result.append(item)
+ return result
+
+
+def _is_not_suffix(segment: str) -> bool:
+ return not any(
+ segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
+ )
+
+
+def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
+ left_split, right_split = [], []
+
+ # Get the release segment of our versions
+ left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
+ right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
+
+ # Get the rest of our versions
+ left_split.append(left[len(left_split[0]) :])
+ right_split.append(right[len(right_split[0]) :])
+
+ # Insert our padding
+ left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
+ right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
+
+ return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
+
+
+class SpecifierSet(BaseSpecifier):
+ def __init__(
+ self, specifiers: str = "", prereleases: Optional[bool] = None
+ ) -> None:
+
+ # Split on , to break each individual specifier into it's own item, and
+ # strip each item to remove leading/trailing whitespace.
+ split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
+
+ # Parsed each individual specifier, attempting first to make it a
+ # Specifier and falling back to a LegacySpecifier.
+ parsed: Set[_IndividualSpecifier] = set()
+ for specifier in split_specifiers:
+ try:
+ parsed.add(Specifier(specifier))
+ except InvalidSpecifier:
+ parsed.add(LegacySpecifier(specifier))
+
+ # Turn our parsed specifiers into a frozen set and save them for later.
+ self._specs = frozenset(parsed)
+
+ # Store our prereleases value so we can use it later to determine if
+ # we accept prereleases or not.
+ self._prereleases = prereleases
+
+ def __repr__(self) -> str:
+ pre = (
+ f", prereleases={self.prereleases!r}"
+ if self._prereleases is not None
+ else ""
+ )
+
+ return f"<SpecifierSet({str(self)!r}{pre})>"
+
+ def __str__(self) -> str:
+ return ",".join(sorted(str(s) for s in self._specs))
+
+ def __hash__(self) -> int:
+ return hash(self._specs)
+
+ def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
+ if isinstance(other, str):
+ other = SpecifierSet(other)
+ elif not isinstance(other, SpecifierSet):
+ return NotImplemented
+
+ specifier = SpecifierSet()
+ specifier._specs = frozenset(self._specs | other._specs)
+
+ if self._prereleases is None and other._prereleases is not None:
+ specifier._prereleases = other._prereleases
+ elif self._prereleases is not None and other._prereleases is None:
+ specifier._prereleases = self._prereleases
+ elif self._prereleases == other._prereleases:
+ specifier._prereleases = self._prereleases
+ else:
+ raise ValueError(
+ "Cannot combine SpecifierSets with True and False prerelease "
+ "overrides."
+ )
+
+ return specifier
+
+ def __eq__(self, other: object) -> bool:
+ if isinstance(other, (str, _IndividualSpecifier)):
+ other = SpecifierSet(str(other))
+ elif not isinstance(other, SpecifierSet):
+ return NotImplemented
+
+ return self._specs == other._specs
+
+ def __len__(self) -> int:
+ return len(self._specs)
+
+ def __iter__(self) -> Iterator[_IndividualSpecifier]:
+ return iter(self._specs)
+
+ @property
+ def prereleases(self) -> Optional[bool]:
+
+ # If we have been given an explicit prerelease modifier, then we'll
+ # pass that through here.
+ if self._prereleases is not None:
+ return self._prereleases
+
+ # If we don't have any specifiers, and we don't have a forced value,
+ # then we'll just return None since we don't know if this should have
+ # pre-releases or not.
+ if not self._specs:
+ return None
+
+ # Otherwise we'll see if any of the given specifiers accept
+ # prereleases, if any of them do we'll return True, otherwise False.
+ return any(s.prereleases for s in self._specs)
+
+ @prereleases.setter
+ def prereleases(self, value: bool) -> None:
+ self._prereleases = value
+
+ def __contains__(self, item: UnparsedVersion) -> bool:
+ return self.contains(item)
+
+ def contains(
+ self, item: UnparsedVersion, prereleases: Optional[bool] = None
+ ) -> bool:
+
+ # Ensure that our item is a Version or LegacyVersion instance.
+ if not isinstance(item, (LegacyVersion, Version)):
+ item = parse(item)
+
+ # Determine if we're forcing a prerelease or not, if we're not forcing
+ # one for this particular filter call, then we'll use whatever the
+ # SpecifierSet thinks for whether or not we should support prereleases.
+ if prereleases is None:
+ prereleases = self.prereleases
+
+ # We can determine if we're going to allow pre-releases by looking to
+ # see if any of the underlying items supports them. If none of them do
+ # and this item is a pre-release then we do not allow it and we can
+ # short circuit that here.
+ # Note: This means that 1.0.dev1 would not be contained in something
+ # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
+ if not prereleases and item.is_prerelease:
+ return False
+
+ # We simply dispatch to the underlying specs here to make sure that the
+ # given version is contained within all of them.
+ # Note: This use of all() here means that an empty set of specifiers
+ # will always return True, this is an explicit design decision.
+ return all(s.contains(item, prereleases=prereleases) for s in self._specs)
+
+ def filter(
+ self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
+ ) -> Iterable[VersionTypeVar]:
+
+ # Determine if we're forcing a prerelease or not, if we're not forcing
+ # one for this particular filter call, then we'll use whatever the
+ # SpecifierSet thinks for whether or not we should support prereleases.
+ if prereleases is None:
+ prereleases = self.prereleases
+
+ # If we have any specifiers, then we want to wrap our iterable in the
+ # filter method for each one, this will act as a logical AND amongst
+ # each specifier.
+ if self._specs:
+ for spec in self._specs:
+ iterable = spec.filter(iterable, prereleases=bool(prereleases))
+ return iterable
+ # If we do not have any specifiers, then we need to have a rough filter
+ # which will filter out any pre-releases, unless there are no final
+ # releases, and which will filter out LegacyVersion in general.
+ else:
+ filtered: List[VersionTypeVar] = []
+ found_prereleases: List[VersionTypeVar] = []
+
+ item: UnparsedVersion
+ parsed_version: Union[Version, LegacyVersion]
+
+ for item in iterable:
+ # Ensure that we some kind of Version class for this item.
+ if not isinstance(item, (LegacyVersion, Version)):
+ parsed_version = parse(item)
+ else:
+ parsed_version = item
+
+ # Filter out any item which is parsed as a LegacyVersion
+ if isinstance(parsed_version, LegacyVersion):
+ continue
+
+ # Store any item which is a pre-release for later unless we've
+ # already found a final version or we are accepting prereleases
+ if parsed_version.is_prerelease and not prereleases:
+ if not filtered:
+ found_prereleases.append(item)
+ else:
+ filtered.append(item)
+
+ # If we've found no items except for pre-releases, then we'll go
+ # ahead and use the pre-releases
+ if not filtered and found_prereleases and prereleases is None:
+ return found_prereleases
+
+ return filtered
diff --git a/third_party/python/packaging/packaging/tags.py b/third_party/python/packaging/packaging/tags.py
new file mode 100644
index 0000000000..9a3d25a71c
--- /dev/null
+++ b/third_party/python/packaging/packaging/tags.py
@@ -0,0 +1,487 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+import logging
+import platform
+import sys
+import sysconfig
+from importlib.machinery import EXTENSION_SUFFIXES
+from typing import (
+ Dict,
+ FrozenSet,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+ cast,
+)
+
+from . import _manylinux, _musllinux
+
+logger = logging.getLogger(__name__)
+
+PythonVersion = Sequence[int]
+MacVersion = Tuple[int, int]
+
+INTERPRETER_SHORT_NAMES: Dict[str, str] = {
+ "python": "py", # Generic.
+ "cpython": "cp",
+ "pypy": "pp",
+ "ironpython": "ip",
+ "jython": "jy",
+}
+
+
+_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
+
+
+class Tag:
+ """
+ A representation of the tag triple for a wheel.
+
+ Instances are considered immutable and thus are hashable. Equality checking
+ is also supported.
+ """
+
+ __slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
+
+ def __init__(self, interpreter: str, abi: str, platform: str) -> None:
+ self._interpreter = interpreter.lower()
+ self._abi = abi.lower()
+ self._platform = platform.lower()
+ # The __hash__ of every single element in a Set[Tag] will be evaluated each time
+ # that a set calls its `.disjoint()` method, which may be called hundreds of
+ # times when scanning a page of links for packages with tags matching that
+ # Set[Tag]. Pre-computing the value here produces significant speedups for
+ # downstream consumers.
+ self._hash = hash((self._interpreter, self._abi, self._platform))
+
+ @property
+ def interpreter(self) -> str:
+ return self._interpreter
+
+ @property
+ def abi(self) -> str:
+ return self._abi
+
+ @property
+ def platform(self) -> str:
+ return self._platform
+
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, Tag):
+ return NotImplemented
+
+ return (
+ (self._hash == other._hash) # Short-circuit ASAP for perf reasons.
+ and (self._platform == other._platform)
+ and (self._abi == other._abi)
+ and (self._interpreter == other._interpreter)
+ )
+
+ def __hash__(self) -> int:
+ return self._hash
+
+ def __str__(self) -> str:
+ return f"{self._interpreter}-{self._abi}-{self._platform}"
+
+ def __repr__(self) -> str:
+ return f"<{self} @ {id(self)}>"
+
+
+def parse_tag(tag: str) -> FrozenSet[Tag]:
+ """
+ Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
+
+ Returning a set is required due to the possibility that the tag is a
+ compressed tag set.
+ """
+ tags = set()
+ interpreters, abis, platforms = tag.split("-")
+ for interpreter in interpreters.split("."):
+ for abi in abis.split("."):
+ for platform_ in platforms.split("."):
+ tags.add(Tag(interpreter, abi, platform_))
+ return frozenset(tags)
+
+
+def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
+ value = sysconfig.get_config_var(name)
+ if value is None and warn:
+ logger.debug(
+ "Config variable '%s' is unset, Python ABI tag may be incorrect", name
+ )
+ return value
+
+
+def _normalize_string(string: str) -> str:
+ return string.replace(".", "_").replace("-", "_")
+
+
+def _abi3_applies(python_version: PythonVersion) -> bool:
+ """
+ Determine if the Python version supports abi3.
+
+ PEP 384 was first implemented in Python 3.2.
+ """
+ return len(python_version) > 1 and tuple(python_version) >= (3, 2)
+
+
+def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]:
+ py_version = tuple(py_version) # To allow for version comparison.
+ abis = []
+ version = _version_nodot(py_version[:2])
+ debug = pymalloc = ucs4 = ""
+ with_debug = _get_config_var("Py_DEBUG", warn)
+ has_refcount = hasattr(sys, "gettotalrefcount")
+ # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
+ # extension modules is the best option.
+ # https://github.com/pypa/pip/issues/3383#issuecomment-173267692
+ has_ext = "_d.pyd" in EXTENSION_SUFFIXES
+ if with_debug or (with_debug is None and (has_refcount or has_ext)):
+ debug = "d"
+ if py_version < (3, 8):
+ with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
+ if with_pymalloc or with_pymalloc is None:
+ pymalloc = "m"
+ if py_version < (3, 3):
+ unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
+ if unicode_size == 4 or (
+ unicode_size is None and sys.maxunicode == 0x10FFFF
+ ):
+ ucs4 = "u"
+ elif debug:
+ # Debug builds can also load "normal" extension modules.
+ # We can also assume no UCS-4 or pymalloc requirement.
+ abis.append(f"cp{version}")
+ abis.insert(
+ 0,
+ "cp{version}{debug}{pymalloc}{ucs4}".format(
+ version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
+ ),
+ )
+ return abis
+
+
+def cpython_tags(
+ python_version: Optional[PythonVersion] = None,
+ abis: Optional[Iterable[str]] = None,
+ platforms: Optional[Iterable[str]] = None,
+ *,
+ warn: bool = False,
+) -> Iterator[Tag]:
+ """
+ Yields the tags for a CPython interpreter.
+
+ The tags consist of:
+ - cp<python_version>-<abi>-<platform>
+ - cp<python_version>-abi3-<platform>
+ - cp<python_version>-none-<platform>
+ - cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
+
+ If python_version only specifies a major version then user-provided ABIs and
+ the 'none' ABItag will be used.
+
+ If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
+ their normal position and not at the beginning.
+ """
+ if not python_version:
+ python_version = sys.version_info[:2]
+
+ interpreter = f"cp{_version_nodot(python_version[:2])}"
+
+ if abis is None:
+ if len(python_version) > 1:
+ abis = _cpython_abis(python_version, warn)
+ else:
+ abis = []
+ abis = list(abis)
+ # 'abi3' and 'none' are explicitly handled later.
+ for explicit_abi in ("abi3", "none"):
+ try:
+ abis.remove(explicit_abi)
+ except ValueError:
+ pass
+
+ platforms = list(platforms or platform_tags())
+ for abi in abis:
+ for platform_ in platforms:
+ yield Tag(interpreter, abi, platform_)
+ if _abi3_applies(python_version):
+ yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
+ yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
+
+ if _abi3_applies(python_version):
+ for minor_version in range(python_version[1] - 1, 1, -1):
+ for platform_ in platforms:
+ interpreter = "cp{version}".format(
+ version=_version_nodot((python_version[0], minor_version))
+ )
+ yield Tag(interpreter, "abi3", platform_)
+
+
+def _generic_abi() -> Iterator[str]:
+ abi = sysconfig.get_config_var("SOABI")
+ if abi:
+ yield _normalize_string(abi)
+
+
+def generic_tags(
+ interpreter: Optional[str] = None,
+ abis: Optional[Iterable[str]] = None,
+ platforms: Optional[Iterable[str]] = None,
+ *,
+ warn: bool = False,
+) -> Iterator[Tag]:
+ """
+ Yields the tags for a generic interpreter.
+
+ The tags consist of:
+ - <interpreter>-<abi>-<platform>
+
+ The "none" ABI will be added if it was not explicitly provided.
+ """
+ if not interpreter:
+ interp_name = interpreter_name()
+ interp_version = interpreter_version(warn=warn)
+ interpreter = "".join([interp_name, interp_version])
+ if abis is None:
+ abis = _generic_abi()
+ platforms = list(platforms or platform_tags())
+ abis = list(abis)
+ if "none" not in abis:
+ abis.append("none")
+ for abi in abis:
+ for platform_ in platforms:
+ yield Tag(interpreter, abi, platform_)
+
+
+def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
+ """
+ Yields Python versions in descending order.
+
+ After the latest version, the major-only version will be yielded, and then
+ all previous versions of that major version.
+ """
+ if len(py_version) > 1:
+ yield f"py{_version_nodot(py_version[:2])}"
+ yield f"py{py_version[0]}"
+ if len(py_version) > 1:
+ for minor in range(py_version[1] - 1, -1, -1):
+ yield f"py{_version_nodot((py_version[0], minor))}"
+
+
+def compatible_tags(
+ python_version: Optional[PythonVersion] = None,
+ interpreter: Optional[str] = None,
+ platforms: Optional[Iterable[str]] = None,
+) -> Iterator[Tag]:
+ """
+ Yields the sequence of tags that are compatible with a specific version of Python.
+
+ The tags consist of:
+ - py*-none-<platform>
+ - <interpreter>-none-any # ... if `interpreter` is provided.
+ - py*-none-any
+ """
+ if not python_version:
+ python_version = sys.version_info[:2]
+ platforms = list(platforms or platform_tags())
+ for version in _py_interpreter_range(python_version):
+ for platform_ in platforms:
+ yield Tag(version, "none", platform_)
+ if interpreter:
+ yield Tag(interpreter, "none", "any")
+ for version in _py_interpreter_range(python_version):
+ yield Tag(version, "none", "any")
+
+
+def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
+ if not is_32bit:
+ return arch
+
+ if arch.startswith("ppc"):
+ return "ppc"
+
+ return "i386"
+
+
+def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]:
+ formats = [cpu_arch]
+ if cpu_arch == "x86_64":
+ if version < (10, 4):
+ return []
+ formats.extend(["intel", "fat64", "fat32"])
+
+ elif cpu_arch == "i386":
+ if version < (10, 4):
+ return []
+ formats.extend(["intel", "fat32", "fat"])
+
+ elif cpu_arch == "ppc64":
+ # TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
+ if version > (10, 5) or version < (10, 4):
+ return []
+ formats.append("fat64")
+
+ elif cpu_arch == "ppc":
+ if version > (10, 6):
+ return []
+ formats.extend(["fat32", "fat"])
+
+ if cpu_arch in {"arm64", "x86_64"}:
+ formats.append("universal2")
+
+ if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
+ formats.append("universal")
+
+ return formats
+
+
+def mac_platforms(
+ version: Optional[MacVersion] = None, arch: Optional[str] = None
+) -> Iterator[str]:
+ """
+ Yields the platform tags for a macOS system.
+
+ The `version` parameter is a two-item tuple specifying the macOS version to
+ generate platform tags for. The `arch` parameter is the CPU architecture to
+ generate platform tags for. Both parameters default to the appropriate value
+ for the current system.
+ """
+ version_str, _, cpu_arch = platform.mac_ver()
+ if version is None:
+ version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
+ else:
+ version = version
+ if arch is None:
+ arch = _mac_arch(cpu_arch)
+ else:
+ arch = arch
+
+ if (10, 0) <= version and version < (11, 0):
+ # Prior to Mac OS 11, each yearly release of Mac OS bumped the
+ # "minor" version number. The major version was always 10.
+ for minor_version in range(version[1], -1, -1):
+ compat_version = 10, minor_version
+ binary_formats = _mac_binary_formats(compat_version, arch)
+ for binary_format in binary_formats:
+ yield "macosx_{major}_{minor}_{binary_format}".format(
+ major=10, minor=minor_version, binary_format=binary_format
+ )
+
+ if version >= (11, 0):
+ # Starting with Mac OS 11, each yearly release bumps the major version
+ # number. The minor versions are now the midyear updates.
+ for major_version in range(version[0], 10, -1):
+ compat_version = major_version, 0
+ binary_formats = _mac_binary_formats(compat_version, arch)
+ for binary_format in binary_formats:
+ yield "macosx_{major}_{minor}_{binary_format}".format(
+ major=major_version, minor=0, binary_format=binary_format
+ )
+
+ if version >= (11, 0):
+ # Mac OS 11 on x86_64 is compatible with binaries from previous releases.
+ # Arm64 support was introduced in 11.0, so no Arm binaries from previous
+ # releases exist.
+ #
+ # However, the "universal2" binary format can have a
+ # macOS version earlier than 11.0 when the x86_64 part of the binary supports
+ # that version of macOS.
+ if arch == "x86_64":
+ for minor_version in range(16, 3, -1):
+ compat_version = 10, minor_version
+ binary_formats = _mac_binary_formats(compat_version, arch)
+ for binary_format in binary_formats:
+ yield "macosx_{major}_{minor}_{binary_format}".format(
+ major=compat_version[0],
+ minor=compat_version[1],
+ binary_format=binary_format,
+ )
+ else:
+ for minor_version in range(16, 3, -1):
+ compat_version = 10, minor_version
+ binary_format = "universal2"
+ yield "macosx_{major}_{minor}_{binary_format}".format(
+ major=compat_version[0],
+ minor=compat_version[1],
+ binary_format=binary_format,
+ )
+
+
+def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
+ linux = _normalize_string(sysconfig.get_platform())
+ if is_32bit:
+ if linux == "linux_x86_64":
+ linux = "linux_i686"
+ elif linux == "linux_aarch64":
+ linux = "linux_armv7l"
+ _, arch = linux.split("_", 1)
+ yield from _manylinux.platform_tags(linux, arch)
+ yield from _musllinux.platform_tags(arch)
+ yield linux
+
+
+def _generic_platforms() -> Iterator[str]:
+ yield _normalize_string(sysconfig.get_platform())
+
+
+def platform_tags() -> Iterator[str]:
+ """
+ Provides the platform tags for this installation.
+ """
+ if platform.system() == "Darwin":
+ return mac_platforms()
+ elif platform.system() == "Linux":
+ return _linux_platforms()
+ else:
+ return _generic_platforms()
+
+
+def interpreter_name() -> str:
+ """
+ Returns the name of the running interpreter.
+ """
+ name = sys.implementation.name
+ return INTERPRETER_SHORT_NAMES.get(name) or name
+
+
+def interpreter_version(*, warn: bool = False) -> str:
+ """
+ Returns the version of the running interpreter.
+ """
+ version = _get_config_var("py_version_nodot", warn=warn)
+ if version:
+ version = str(version)
+ else:
+ version = _version_nodot(sys.version_info[:2])
+ return version
+
+
+def _version_nodot(version: PythonVersion) -> str:
+ return "".join(map(str, version))
+
+
+def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
+ """
+ Returns the sequence of tag triples for the running interpreter.
+
+ The order of the sequence corresponds to priority order for the
+ interpreter, from most to least important.
+ """
+
+ interp_name = interpreter_name()
+ if interp_name == "cp":
+ yield from cpython_tags(warn=warn)
+ else:
+ yield from generic_tags()
+
+ if interp_name == "pp":
+ yield from compatible_tags(interpreter="pp3")
+ else:
+ yield from compatible_tags()
diff --git a/third_party/python/packaging/packaging/utils.py b/third_party/python/packaging/packaging/utils.py
new file mode 100644
index 0000000000..bab11b80c6
--- /dev/null
+++ b/third_party/python/packaging/packaging/utils.py
@@ -0,0 +1,136 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+import re
+from typing import FrozenSet, NewType, Tuple, Union, cast
+
+from .tags import Tag, parse_tag
+from .version import InvalidVersion, Version
+
+BuildTag = Union[Tuple[()], Tuple[int, str]]
+NormalizedName = NewType("NormalizedName", str)
+
+
+class InvalidWheelFilename(ValueError):
+ """
+ An invalid wheel filename was found, users should refer to PEP 427.
+ """
+
+
+class InvalidSdistFilename(ValueError):
+ """
+ An invalid sdist filename was found, users should refer to the packaging user guide.
+ """
+
+
+_canonicalize_regex = re.compile(r"[-_.]+")
+# PEP 427: The build number must start with a digit.
+_build_tag_regex = re.compile(r"(\d+)(.*)")
+
+
+def canonicalize_name(name: str) -> NormalizedName:
+ # This is taken from PEP 503.
+ value = _canonicalize_regex.sub("-", name).lower()
+ return cast(NormalizedName, value)
+
+
+def canonicalize_version(version: Union[Version, str]) -> str:
+ """
+ This is very similar to Version.__str__, but has one subtle difference
+ with the way it handles the release segment.
+ """
+ if isinstance(version, str):
+ try:
+ parsed = Version(version)
+ except InvalidVersion:
+ # Legacy versions cannot be normalized
+ return version
+ else:
+ parsed = version
+
+ parts = []
+
+ # Epoch
+ if parsed.epoch != 0:
+ parts.append(f"{parsed.epoch}!")
+
+ # Release segment
+ # NB: This strips trailing '.0's to normalize
+ parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release)))
+
+ # Pre-release
+ if parsed.pre is not None:
+ parts.append("".join(str(x) for x in parsed.pre))
+
+ # Post-release
+ if parsed.post is not None:
+ parts.append(f".post{parsed.post}")
+
+ # Development release
+ if parsed.dev is not None:
+ parts.append(f".dev{parsed.dev}")
+
+ # Local version segment
+ if parsed.local is not None:
+ parts.append(f"+{parsed.local}")
+
+ return "".join(parts)
+
+
+def parse_wheel_filename(
+ filename: str,
+) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]:
+ if not filename.endswith(".whl"):
+ raise InvalidWheelFilename(
+ f"Invalid wheel filename (extension must be '.whl'): {filename}"
+ )
+
+ filename = filename[:-4]
+ dashes = filename.count("-")
+ if dashes not in (4, 5):
+ raise InvalidWheelFilename(
+ f"Invalid wheel filename (wrong number of parts): {filename}"
+ )
+
+ parts = filename.split("-", dashes - 2)
+ name_part = parts[0]
+ # See PEP 427 for the rules on escaping the project name
+ if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
+ raise InvalidWheelFilename(f"Invalid project name: {filename}")
+ name = canonicalize_name(name_part)
+ version = Version(parts[1])
+ if dashes == 5:
+ build_part = parts[2]
+ build_match = _build_tag_regex.match(build_part)
+ if build_match is None:
+ raise InvalidWheelFilename(
+ f"Invalid build number: {build_part} in '{filename}'"
+ )
+ build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
+ else:
+ build = ()
+ tags = parse_tag(parts[-1])
+ return (name, version, build, tags)
+
+
+def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]:
+ if filename.endswith(".tar.gz"):
+ file_stem = filename[: -len(".tar.gz")]
+ elif filename.endswith(".zip"):
+ file_stem = filename[: -len(".zip")]
+ else:
+ raise InvalidSdistFilename(
+ f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
+ f" {filename}"
+ )
+
+ # We are requiring a PEP 440 version, which cannot contain dashes,
+ # so we split on the last dash.
+ name_part, sep, version_part = file_stem.rpartition("-")
+ if not sep:
+ raise InvalidSdistFilename(f"Invalid sdist filename: {filename}")
+
+ name = canonicalize_name(name_part)
+ version = Version(version_part)
+ return (name, version)
diff --git a/third_party/python/packaging/packaging/version.py b/third_party/python/packaging/packaging/version.py
new file mode 100644
index 0000000000..de9a09a4ed
--- /dev/null
+++ b/third_party/python/packaging/packaging/version.py
@@ -0,0 +1,504 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+import collections
+import itertools
+import re
+import warnings
+from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
+
+from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
+
+__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
+
+InfiniteTypes = Union[InfinityType, NegativeInfinityType]
+PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
+SubLocalType = Union[InfiniteTypes, int, str]
+LocalType = Union[
+ NegativeInfinityType,
+ Tuple[
+ Union[
+ SubLocalType,
+ Tuple[SubLocalType, str],
+ Tuple[NegativeInfinityType, SubLocalType],
+ ],
+ ...,
+ ],
+]
+CmpKey = Tuple[
+ int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
+]
+LegacyCmpKey = Tuple[int, Tuple[str, ...]]
+VersionComparisonMethod = Callable[
+ [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
+]
+
+_Version = collections.namedtuple(
+ "_Version", ["epoch", "release", "dev", "pre", "post", "local"]
+)
+
+
+def parse(version: str) -> Union["LegacyVersion", "Version"]:
+ """
+ Parse the given version string and return either a :class:`Version` object
+ or a :class:`LegacyVersion` object depending on if the given version is
+ a valid PEP 440 version or a legacy version.
+ """
+ try:
+ return Version(version)
+ except InvalidVersion:
+ return LegacyVersion(version)
+
+
+class InvalidVersion(ValueError):
+ """
+ An invalid version was found, users should refer to PEP 440.
+ """
+
+
+class _BaseVersion:
+ _key: Union[CmpKey, LegacyCmpKey]
+
+ def __hash__(self) -> int:
+ return hash(self._key)
+
+ # Please keep the duplicated `isinstance` check
+ # in the six comparisons hereunder
+ # unless you find a way to avoid adding overhead function calls.
+ def __lt__(self, other: "_BaseVersion") -> bool:
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
+
+ return self._key < other._key
+
+ def __le__(self, other: "_BaseVersion") -> bool:
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
+
+ return self._key <= other._key
+
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
+
+ return self._key == other._key
+
+ def __ge__(self, other: "_BaseVersion") -> bool:
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
+
+ return self._key >= other._key
+
+ def __gt__(self, other: "_BaseVersion") -> bool:
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
+
+ return self._key > other._key
+
+ def __ne__(self, other: object) -> bool:
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
+
+ return self._key != other._key
+
+
+class LegacyVersion(_BaseVersion):
+ def __init__(self, version: str) -> None:
+ self._version = str(version)
+ self._key = _legacy_cmpkey(self._version)
+
+ warnings.warn(
+ "Creating a LegacyVersion has been deprecated and will be "
+ "removed in the next major release",
+ DeprecationWarning,
+ )
+
+ def __str__(self) -> str:
+ return self._version
+
+ def __repr__(self) -> str:
+ return f"<LegacyVersion('{self}')>"
+
+ @property
+ def public(self) -> str:
+ return self._version
+
+ @property
+ def base_version(self) -> str:
+ return self._version
+
+ @property
+ def epoch(self) -> int:
+ return -1
+
+ @property
+ def release(self) -> None:
+ return None
+
+ @property
+ def pre(self) -> None:
+ return None
+
+ @property
+ def post(self) -> None:
+ return None
+
+ @property
+ def dev(self) -> None:
+ return None
+
+ @property
+ def local(self) -> None:
+ return None
+
+ @property
+ def is_prerelease(self) -> bool:
+ return False
+
+ @property
+ def is_postrelease(self) -> bool:
+ return False
+
+ @property
+ def is_devrelease(self) -> bool:
+ return False
+
+
+_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
+
+_legacy_version_replacement_map = {
+ "pre": "c",
+ "preview": "c",
+ "-": "final-",
+ "rc": "c",
+ "dev": "@",
+}
+
+
+def _parse_version_parts(s: str) -> Iterator[str]:
+ for part in _legacy_version_component_re.split(s):
+ part = _legacy_version_replacement_map.get(part, part)
+
+ if not part or part == ".":
+ continue
+
+ if part[:1] in "0123456789":
+ # pad for numeric comparison
+ yield part.zfill(8)
+ else:
+ yield "*" + part
+
+ # ensure that alpha/beta/candidate are before final
+ yield "*final"
+
+
+def _legacy_cmpkey(version: str) -> LegacyCmpKey:
+
+ # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
+ # greater than or equal to 0. This will effectively put the LegacyVersion,
+ # which uses the defacto standard originally implemented by setuptools,
+ # as before all PEP 440 versions.
+ epoch = -1
+
+ # This scheme is taken from pkg_resources.parse_version setuptools prior to
+ # it's adoption of the packaging library.
+ parts: List[str] = []
+ for part in _parse_version_parts(version.lower()):
+ if part.startswith("*"):
+ # remove "-" before a prerelease tag
+ if part < "*final":
+ while parts and parts[-1] == "*final-":
+ parts.pop()
+
+ # remove trailing zeros from each series of numeric parts
+ while parts and parts[-1] == "00000000":
+ parts.pop()
+
+ parts.append(part)
+
+ return epoch, tuple(parts)
+
+
+# Deliberately not anchored to the start and end of the string, to make it
+# easier for 3rd party code to reuse
+VERSION_PATTERN = r"""
+ v?
+ (?:
+ (?:(?P<epoch>[0-9]+)!)? # epoch
+ (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
+ (?P<pre> # pre-release
+ [-_\.]?
+ (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
+ [-_\.]?
+ (?P<pre_n>[0-9]+)?
+ )?
+ (?P<post> # post release
+ (?:-(?P<post_n1>[0-9]+))
+ |
+ (?:
+ [-_\.]?
+ (?P<post_l>post|rev|r)
+ [-_\.]?
+ (?P<post_n2>[0-9]+)?
+ )
+ )?
+ (?P<dev> # dev release
+ [-_\.]?
+ (?P<dev_l>dev)
+ [-_\.]?
+ (?P<dev_n>[0-9]+)?
+ )?
+ )
+ (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
+"""
+
+
+class Version(_BaseVersion):
+
+ _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ def __init__(self, version: str) -> None:
+
+ # Validate the version and parse it into pieces
+ match = self._regex.search(version)
+ if not match:
+ raise InvalidVersion(f"Invalid version: '{version}'")
+
+ # Store the parsed out pieces of the version
+ self._version = _Version(
+ epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+ release=tuple(int(i) for i in match.group("release").split(".")),
+ pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+ post=_parse_letter_version(
+ match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+ ),
+ dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+ local=_parse_local_version(match.group("local")),
+ )
+
+ # Generate a key which will be used for sorting
+ self._key = _cmpkey(
+ self._version.epoch,
+ self._version.release,
+ self._version.pre,
+ self._version.post,
+ self._version.dev,
+ self._version.local,
+ )
+
+ def __repr__(self) -> str:
+ return f"<Version('{self}')>"
+
+ def __str__(self) -> str:
+ parts = []
+
+ # Epoch
+ if self.epoch != 0:
+ parts.append(f"{self.epoch}!")
+
+ # Release segment
+ parts.append(".".join(str(x) for x in self.release))
+
+ # Pre-release
+ if self.pre is not None:
+ parts.append("".join(str(x) for x in self.pre))
+
+ # Post-release
+ if self.post is not None:
+ parts.append(f".post{self.post}")
+
+ # Development release
+ if self.dev is not None:
+ parts.append(f".dev{self.dev}")
+
+ # Local version segment
+ if self.local is not None:
+ parts.append(f"+{self.local}")
+
+ return "".join(parts)
+
+ @property
+ def epoch(self) -> int:
+ _epoch: int = self._version.epoch
+ return _epoch
+
+ @property
+ def release(self) -> Tuple[int, ...]:
+ _release: Tuple[int, ...] = self._version.release
+ return _release
+
+ @property
+ def pre(self) -> Optional[Tuple[str, int]]:
+ _pre: Optional[Tuple[str, int]] = self._version.pre
+ return _pre
+
+ @property
+ def post(self) -> Optional[int]:
+ return self._version.post[1] if self._version.post else None
+
+ @property
+ def dev(self) -> Optional[int]:
+ return self._version.dev[1] if self._version.dev else None
+
+ @property
+ def local(self) -> Optional[str]:
+ if self._version.local:
+ return ".".join(str(x) for x in self._version.local)
+ else:
+ return None
+
+ @property
+ def public(self) -> str:
+ return str(self).split("+", 1)[0]
+
+ @property
+ def base_version(self) -> str:
+ parts = []
+
+ # Epoch
+ if self.epoch != 0:
+ parts.append(f"{self.epoch}!")
+
+ # Release segment
+ parts.append(".".join(str(x) for x in self.release))
+
+ return "".join(parts)
+
+ @property
+ def is_prerelease(self) -> bool:
+ return self.dev is not None or self.pre is not None
+
+ @property
+ def is_postrelease(self) -> bool:
+ return self.post is not None
+
+ @property
+ def is_devrelease(self) -> bool:
+ return self.dev is not None
+
+ @property
+ def major(self) -> int:
+ return self.release[0] if len(self.release) >= 1 else 0
+
+ @property
+ def minor(self) -> int:
+ return self.release[1] if len(self.release) >= 2 else 0
+
+ @property
+ def micro(self) -> int:
+ return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+ letter: str, number: Union[str, bytes, SupportsInt]
+) -> Optional[Tuple[str, int]]:
+
+ if letter:
+ # We consider there to be an implicit 0 in a pre-release if there is
+ # not a numeral associated with it.
+ if number is None:
+ number = 0
+
+ # We normalize any letters to their lower case form
+ letter = letter.lower()
+
+ # We consider some words to be alternate spellings of other words and
+ # in those cases we want to normalize the spellings to our preferred
+ # spelling.
+ if letter == "alpha":
+ letter = "a"
+ elif letter == "beta":
+ letter = "b"
+ elif letter in ["c", "pre", "preview"]:
+ letter = "rc"
+ elif letter in ["rev", "r"]:
+ letter = "post"
+
+ return letter, int(number)
+ if not letter and number:
+ # We assume if we are given a number, but we are not given a letter
+ # then this is using the implicit post release syntax (e.g. 1.0-1)
+ letter = "post"
+
+ return letter, int(number)
+
+ return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: str) -> Optional[LocalType]:
+ """
+ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+ """
+ if local is not None:
+ return tuple(
+ part.lower() if not part.isdigit() else int(part)
+ for part in _local_version_separators.split(local)
+ )
+ return None
+
+
+def _cmpkey(
+ epoch: int,
+ release: Tuple[int, ...],
+ pre: Optional[Tuple[str, int]],
+ post: Optional[Tuple[str, int]],
+ dev: Optional[Tuple[str, int]],
+ local: Optional[Tuple[SubLocalType]],
+) -> CmpKey:
+
+ # When we compare a release version, we want to compare it with all of the
+ # trailing zeros removed. So we'll use a reverse the list, drop all the now
+ # leading zeros until we come to something non zero, then take the rest
+ # re-reverse it back into the correct order and make it a tuple and use
+ # that for our sorting key.
+ _release = tuple(
+ reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+ )
+
+ # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+ # We'll do this by abusing the pre segment, but we _only_ want to do this
+ # if there is not a pre or a post segment. If we have one of those then
+ # the normal sorting rules will handle this case correctly.
+ if pre is None and post is None and dev is not None:
+ _pre: PrePostDevType = NegativeInfinity
+ # Versions without a pre-release (except as noted above) should sort after
+ # those with one.
+ elif pre is None:
+ _pre = Infinity
+ else:
+ _pre = pre
+
+ # Versions without a post segment should sort before those with one.
+ if post is None:
+ _post: PrePostDevType = NegativeInfinity
+
+ else:
+ _post = post
+
+ # Versions without a development segment should sort after those with one.
+ if dev is None:
+ _dev: PrePostDevType = Infinity
+
+ else:
+ _dev = dev
+
+ if local is None:
+ # Versions without a local segment should sort before those with one.
+ _local: LocalType = NegativeInfinity
+ else:
+ # Versions with a local segment need that segment parsed to implement
+ # the sorting rules in PEP440.
+ # - Alpha numeric segments sort before numeric segments
+ # - Alpha numeric segments sort lexicographically
+ # - Numeric segments sort numerically
+ # - Shorter versions sort before longer versions when the prefixes
+ # match exactly
+ _local = tuple(
+ (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+ )
+
+ return epoch, _release, _pre, _post, _dev, _local
diff --git a/third_party/python/pathspec/pathspec-0.9.0.dist-info/LICENSE b/third_party/python/pathspec/pathspec-0.9.0.dist-info/LICENSE
new file mode 100644
index 0000000000..14e2f777f6
--- /dev/null
+++ b/third_party/python/pathspec/pathspec-0.9.0.dist-info/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/third_party/python/pathspec/pathspec-0.9.0.dist-info/METADATA b/third_party/python/pathspec/pathspec-0.9.0.dist-info/METADATA
new file mode 100644
index 0000000000..2d38736204
--- /dev/null
+++ b/third_party/python/pathspec/pathspec-0.9.0.dist-info/METADATA
@@ -0,0 +1,411 @@
+Metadata-Version: 2.1
+Name: pathspec
+Version: 0.9.0
+Summary: Utility library for gitignore style pattern matching of file paths.
+Home-page: https://github.com/cpburnz/python-path-specification
+Author: Caleb P. Burns
+Author-email: cpburnz@gmail.com
+License: MPL 2.0
+Platform: UNKNOWN
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Utilities
+Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7
+Description-Content-Type: text/x-rst
+
+
+*pathspec*: Path Specification
+==============================
+
+*pathspec* is a utility library for pattern matching of file paths. So
+far this only includes Git's wildmatch pattern matching which itself is
+derived from Rsync's wildmatch. Git uses wildmatch for its `gitignore`_
+files.
+
+.. _`gitignore`: http://git-scm.com/docs/gitignore
+
+
+Tutorial
+--------
+
+Say you have a "Projects" directory and you want to back it up, but only
+certain files, and ignore others depending on certain conditions::
+
+ >>> import pathspec
+ >>> # The gitignore-style patterns for files to select, but we're including
+ >>> # instead of ignoring.
+ >>> spec = """
+ ...
+ ... # This is a comment because the line begins with a hash: "#"
+ ...
+ ... # Include several project directories (and all descendants) relative to
+ ... # the current directory. To reference a directory you must end with a
+ ... # slash: "/"
+ ... /project-a/
+ ... /project-b/
+ ... /project-c/
+ ...
+ ... # Patterns can be negated by prefixing with exclamation mark: "!"
+ ...
+ ... # Ignore temporary files beginning or ending with "~" and ending with
+ ... # ".swp".
+ ... !~*
+ ... !*~
+ ... !*.swp
+ ...
+ ... # These are python projects so ignore compiled python files from
+ ... # testing.
+ ... !*.pyc
+ ...
+ ... # Ignore the build directories but only directly under the project
+ ... # directories.
+ ... !/*/build/
+ ...
+ ... """
+
+We want to use the ``GitWildMatchPattern`` class to compile our patterns. The
+``PathSpec`` class provides an interface around pattern implementations::
+
+ >>> spec = pathspec.PathSpec.from_lines(pathspec.patterns.GitWildMatchPattern, spec.splitlines())
+
+That may be a mouthful but it allows for additional patterns to be implemented
+in the future without them having to deal with anything but matching the paths
+sent to them. ``GitWildMatchPattern`` is the implementation of the actual
+pattern which internally gets converted into a regular expression.
+``PathSpec`` is a simple wrapper around a list of compiled patterns.
+
+To make things simpler, we can use the registered name for a pattern class
+instead of always having to provide a reference to the class itself. The
+``GitWildMatchPattern`` class is registered as **gitwildmatch**::
+
+ >>> spec = pathspec.PathSpec.from_lines('gitwildmatch', spec.splitlines())
+
+If we wanted to manually compile the patterns we can just do the following::
+
+ >>> patterns = map(pathspec.patterns.GitWildMatchPattern, spec.splitlines())
+ >>> spec = PathSpec(patterns)
+
+``PathSpec.from_lines()`` is simply a class method which does just that.
+
+If you want to load the patterns from file, you can pass the file instance
+directly as well::
+
+ >>> with open('patterns.list', 'r') as fh:
+ >>> spec = pathspec.PathSpec.from_lines('gitwildmatch', fh)
+
+You can perform matching on a whole directory tree with::
+
+ >>> matches = spec.match_tree('path/to/directory')
+
+Or you can perform matching on a specific set of file paths with::
+
+ >>> matches = spec.match_files(file_paths)
+
+Or check to see if an individual file matches::
+
+ >>> is_matched = spec.match_file(file_path)
+
+
+License
+-------
+
+*pathspec* is licensed under the `Mozilla Public License Version 2.0`_. See
+`LICENSE`_ or the `FAQ`_ for more information.
+
+In summary, you may use *pathspec* with any closed or open source project
+without affecting the license of the larger work so long as you:
+
+- give credit where credit is due,
+
+- and release any custom changes made to *pathspec*.
+
+.. _`Mozilla Public License Version 2.0`: http://www.mozilla.org/MPL/2.0
+.. _`LICENSE`: LICENSE
+.. _`FAQ`: http://www.mozilla.org/MPL/2.0/FAQ.html
+
+
+Source
+------
+
+The source code for *pathspec* is available from the GitHub repo
+`cpburnz/python-path-specification`_.
+
+.. _`cpburnz/python-path-specification`: https://github.com/cpburnz/python-path-specification
+
+
+Installation
+------------
+
+*pathspec* requires the following packages:
+
+- `setuptools`_
+
+*pathspec* can be installed from source with::
+
+ python setup.py install
+
+*pathspec* is also available for install through `PyPI`_::
+
+ pip install pathspec
+
+.. _`setuptools`: https://pypi.python.org/pypi/setuptools
+.. _`PyPI`: http://pypi.python.org/pypi/pathspec
+
+
+Documentation
+-------------
+
+Documentation for *pathspec* is available on `Read the Docs`_.
+
+.. _`Read the Docs`: http://python-path-specification.readthedocs.io
+
+
+Other Languages
+---------------
+
+*pathspec* is also available as a `Ruby gem`_.
+
+.. _`Ruby gem`: https://github.com/highb/pathspec-ruby
+
+
+Change History
+==============
+
+0.9.0 (2021-07-17)
+------------------
+
+- `Issue #44`_/`Issue #50`_: Raise `GitWildMatchPatternError` for invalid git patterns.
+- `Issue #45`_: Fix for duplicate leading double-asterisk, and edge cases.
+- `Issue #46`_: Fix matching absolute paths.
+- API change: `util.normalize_files()` now returns a `Dict[str, List[pathlike]]` instead of a `Dict[str, pathlike]`.
+- Added type hinting.
+
+.. _`Issue #44`: https://github.com/cpburnz/python-path-specification/issues/44
+.. _`Issue #45`: https://github.com/cpburnz/python-path-specification/pull/45
+.. _`Issue #46`: https://github.com/cpburnz/python-path-specification/issues/46
+.. _`Issue #50`: https://github.com/cpburnz/python-path-specification/pull/50
+
+
+0.8.1 (2020-11-07)
+------------------
+
+- `Issue #43`_: Add support for addition operator.
+
+.. _`Issue #43`: https://github.com/cpburnz/python-path-specification/pull/43
+
+
+0.8.0 (2020-04-09)
+------------------
+
+- `Issue #30`_: Expose what patterns matched paths. Added `util.detailed_match_files()`.
+- `Issue #31`_: `match_tree()` doesn't return symlinks.
+- `Issue #34`_: Support `pathlib.Path`\ s.
+- Add `PathSpec.match_tree_entries` and `util.iter_tree_entries()` to support directories and symlinks.
+- API change: `match_tree()` has been renamed to `match_tree_files()`. The old name `match_tree()` is still available as an alias.
+- API change: `match_tree_files()` now returns symlinks. This is a bug fix but it will change the returned results.
+
+.. _`Issue #30`: https://github.com/cpburnz/python-path-specification/issues/30
+.. _`Issue #31`: https://github.com/cpburnz/python-path-specification/issues/31
+.. _`Issue #34`: https://github.com/cpburnz/python-path-specification/issues/34
+
+
+0.7.0 (2019-12-27)
+------------------
+
+- `Issue #28`_: Add support for Python 3.8, and drop Python 3.4.
+- `Issue #29`_: Publish bdist wheel.
+
+.. _`Issue #28`: https://github.com/cpburnz/python-path-specification/pull/28
+.. _`Issue #29`: https://github.com/cpburnz/python-path-specification/pull/29
+
+
+0.6.0 (2019-10-03)
+------------------
+
+- `Issue #24`_: Drop support for Python 2.6, 3.2, and 3.3.
+- `Issue #25`_: Update README.rst.
+- `Issue #26`_: Method to escape gitwildmatch.
+
+.. _`Issue #24`: https://github.com/cpburnz/python-path-specification/pull/24
+.. _`Issue #25`: https://github.com/cpburnz/python-path-specification/pull/25
+.. _`Issue #26`: https://github.com/cpburnz/python-path-specification/pull/26
+
+
+0.5.9 (2018-09-15)
+------------------
+
+- Fixed file system error handling.
+
+
+0.5.8 (2018-09-15)
+------------------
+
+- Improved type checking.
+- Created scripts to test Python 2.6 because Tox removed support for it.
+- Improved byte string handling in Python 3.
+- `Issue #22`_: Handle dangling symlinks.
+
+.. _`Issue #22`: https://github.com/cpburnz/python-path-specification/issues/22
+
+
+0.5.7 (2018-08-14)
+------------------
+
+- `Issue #21`_: Fix collections deprecation warning.
+
+.. _`Issue #21`: https://github.com/cpburnz/python-path-specification/issues/21
+
+
+0.5.6 (2018-04-06)
+------------------
+
+- Improved unit tests.
+- Improved type checking.
+- `Issue #20`_: Support current directory prefix.
+
+.. _`Issue #20`: https://github.com/cpburnz/python-path-specification/issues/20
+
+
+0.5.5 (2017-09-09)
+------------------
+
+- Add documentation link to README.
+
+
+0.5.4 (2017-09-09)
+------------------
+
+- `Issue #17`_: Add link to Ruby implementation of *pathspec*.
+- Add sphinx documentation.
+
+.. _`Issue #17`: https://github.com/cpburnz/python-path-specification/pull/17
+
+
+0.5.3 (2017-07-01)
+------------------
+
+- `Issue #14`_: Fix byte strings for Python 3.
+- `Issue #15`_: Include "LICENSE" in source package.
+- `Issue #16`_: Support Python 2.6.
+
+.. _`Issue #14`: https://github.com/cpburnz/python-path-specification/issues/14
+.. _`Issue #15`: https://github.com/cpburnz/python-path-specification/pull/15
+.. _`Issue #16`: https://github.com/cpburnz/python-path-specification/issues/16
+
+
+0.5.2 (2017-04-04)
+------------------
+
+- Fixed change log.
+
+
+0.5.1 (2017-04-04)
+------------------
+
+- `Issue #13`_: Add equality methods to `PathSpec` and `RegexPattern`.
+
+.. _`Issue #13`: https://github.com/cpburnz/python-path-specification/pull/13
+
+
+0.5.0 (2016-08-22)
+------------------
+
+- `Issue #12`_: Add `PathSpec.match_file()`.
+- Renamed `gitignore.GitIgnorePattern` to `patterns.gitwildmatch.GitWildMatchPattern`.
+- Deprecated `gitignore.GitIgnorePattern`.
+
+.. _`Issue #12`: https://github.com/cpburnz/python-path-specification/issues/12
+
+
+0.4.0 (2016-07-15)
+------------------
+
+- `Issue #11`_: Support converting patterns into regular expressions without compiling them.
+- API change: Subclasses of `RegexPattern` should implement `pattern_to_regex()`.
+
+.. _`Issue #11`: https://github.com/cpburnz/python-path-specification/issues/11
+
+
+0.3.4 (2015-08-24)
+------------------
+
+- `Issue #7`_: Fixed non-recursive links.
+- `Issue #8`_: Fixed edge cases in gitignore patterns.
+- `Issue #9`_: Fixed minor usage documentation.
+- Fixed recursion detection.
+- Fixed trivial incompatibility with Python 3.2.
+
+.. _`Issue #7`: https://github.com/cpburnz/python-path-specification/pull/7
+.. _`Issue #8`: https://github.com/cpburnz/python-path-specification/pull/8
+.. _`Issue #9`: https://github.com/cpburnz/python-path-specification/pull/9
+
+
+0.3.3 (2014-11-21)
+------------------
+
+- Improved documentation.
+
+
+0.3.2 (2014-11-08)
+------------------
+
+- `Issue #5`_: Use tox for testing.
+- `Issue #6`_: Fixed matching Windows paths.
+- Improved documentation.
+- API change: `spec.match_tree()` and `spec.match_files()` now return iterators instead of sets.
+
+.. _`Issue #5`: https://github.com/cpburnz/python-path-specification/pull/5
+.. _`Issue #6`: https://github.com/cpburnz/python-path-specification/issues/6
+
+
+0.3.1 (2014-09-17)
+------------------
+
+- Updated README.
+
+
+0.3.0 (2014-09-17)
+------------------
+
+- `Issue #3`_: Fixed trailing slash in gitignore patterns.
+- `Issue #4`_: Fixed test for trailing slash in gitignore patterns.
+- Added registered patterns.
+
+.. _`Issue #3`: https://github.com/cpburnz/python-path-specification/pull/3
+.. _`Issue #4`: https://github.com/cpburnz/python-path-specification/pull/4
+
+
+0.2.2 (2013-12-17)
+------------------
+
+- Fixed setup.py.
+
+
+0.2.1 (2013-12-17)
+------------------
+
+- Added tests.
+- Fixed comment gitignore patterns.
+- Fixed relative path gitignore patterns.
+
+
+0.2.0 (2013-12-07)
+------------------
+
+- Initial release.
+
+
diff --git a/third_party/python/pathspec/pathspec-0.9.0.dist-info/RECORD b/third_party/python/pathspec/pathspec-0.9.0.dist-info/RECORD
new file mode 100644
index 0000000000..328aa58483
--- /dev/null
+++ b/third_party/python/pathspec/pathspec-0.9.0.dist-info/RECORD
@@ -0,0 +1,17 @@
+pathspec/__init__.py,sha256=72Wc9H_-xRaisgbnz3pNf8bJJlFu6ZP3MMEYVvlIowk,1085
+pathspec/_meta.py,sha256=MyOTCY28N1FOIxhPQBNxXei9oiQEvzVISrwnyCIAb-0,1617
+pathspec/compat.py,sha256=G0_QT3NRlFTcrV1bhx9G1qIvBdmnaWh-f4VrcpXzfhc,830
+pathspec/pathspec.py,sha256=J2lrhvT1iqkZ5rSrTvMEOjuLBkdzl2QVZkJYWIoMVmw,8048
+pathspec/pattern.py,sha256=sWBuHJNnDKa6tqpKU50ZeCUYXSkKz2dWPLtSHpIyP5U,4811
+pathspec/util.py,sha256=XayuxOvE0LaTZDyffhpel-1ew1_MP_FTCeqXj0M3C8I,19889
+pathspec/patterns/__init__.py,sha256=Falv9rzI0S-Sjc-t-vCS9nUPcKwBptmdNderY9Kok50,184
+pathspec/patterns/gitwildmatch.py,sha256=NzrN7IqFvJJT3x7jcXIMZBmeWF6n0nDjUMoPWpGmvZs,11899
+pathspec/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pathspec/tests/test_gitwildmatch.py,sha256=m9a5-dJ1-MXZ-SVzci3JqtHShfEP3ty-kaYMkVGWS9U,15160
+pathspec/tests/test_pathspec.py,sha256=UB_aETIpTxpuei79zI2_kbjI2XnM2tF_xTQOFD4IYL8,4845
+pathspec/tests/test_util.py,sha256=IjvDbKYyEnQOaa07ebx-qD4bb-YFSm3qm0PE-VhKUa0,7906
+pathspec-0.9.0.dist-info/LICENSE,sha256=-rPda9qyJvHAhjCx3ZF-Efy07F4eAg4sFvg6ChOGPoU,16726
+pathspec-0.9.0.dist-info/METADATA,sha256=nmGOFs7gDO9imfQOjDP2Mhu-NX1xK2DoDv4B7D37D80,12141
+pathspec-0.9.0.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110
+pathspec-0.9.0.dist-info/top_level.txt,sha256=0NA6IbW6iCUZ4p402IRSyZpvx4yqoltHzMoAxVQHI1M,9
+pathspec-0.9.0.dist-info/RECORD,,
diff --git a/third_party/python/pathspec/pathspec-0.9.0.dist-info/WHEEL b/third_party/python/pathspec/pathspec-0.9.0.dist-info/WHEEL
new file mode 100644
index 0000000000..8b701e93c2
--- /dev/null
+++ b/third_party/python/pathspec/pathspec-0.9.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.6)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/pathspec/pathspec-0.9.0.dist-info/top_level.txt b/third_party/python/pathspec/pathspec-0.9.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..6486958df0
--- /dev/null
+++ b/third_party/python/pathspec/pathspec-0.9.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+pathspec
diff --git a/third_party/python/pathspec/pathspec/__init__.py b/third_party/python/pathspec/pathspec/__init__.py
new file mode 100644
index 0000000000..790d0a33bb
--- /dev/null
+++ b/third_party/python/pathspec/pathspec/__init__.py
@@ -0,0 +1,43 @@
+# encoding: utf-8
+"""
+The *pathspec* package provides pattern matching for file paths. So far
+this only includes Git's wildmatch pattern matching (the style used for
+".gitignore" files).
+
+The following classes are imported and made available from the root of
+the `pathspec` package:
+
+- :class:`pathspec.pathspec.PathSpec`
+
+- :class:`pathspec.pattern.Pattern`
+
+- :class:`pathspec.pattern.RegexPattern`
+
+- :class:`pathspec.util.RecursionError`
+
+The following functions are also imported:
+
+- :func:`pathspec.util.iter_tree`
+- :func:`pathspec.util.lookup_pattern`
+- :func:`pathspec.util.match_files`
+"""
+from __future__ import unicode_literals
+
+from .pathspec import PathSpec
+from .pattern import Pattern, RegexPattern
+from .util import iter_tree, lookup_pattern, match_files, RecursionError
+
+from ._meta import (
+ __author__,
+ __copyright__,
+ __credits__,
+ __license__,
+ __version__,
+)
+
+# Load pattern implementations.
+from . import patterns
+
+# Expose `GitIgnorePattern` class in the root module for backward
+# compatibility with v0.4.
+from .patterns.gitwildmatch import GitIgnorePattern
diff --git a/third_party/python/pathspec/pathspec/_meta.py b/third_party/python/pathspec/pathspec/_meta.py
new file mode 100644
index 0000000000..ff4c3239a9
--- /dev/null
+++ b/third_party/python/pathspec/pathspec/_meta.py
@@ -0,0 +1,43 @@
+# encoding: utf-8
+"""
+This module contains the project meta-data.
+"""
+
+__author__ = "Caleb P. Burns"
+__copyright__ = "Copyright © 2013-2021 Caleb P. Burns"
+__credits__ = [
+ "dahlia <https://github.com/dahlia>",
+ "highb <https://github.com/highb>",
+ "029xue <https://github.com/029xue>",
+ "mikexstudios <https://github.com/mikexstudios>",
+ "nhumrich <https://github.com/nhumrich>",
+ "davidfraser <https://github.com/davidfraser>",
+ "demurgos <https://github.com/demurgos>",
+ "ghickman <https://github.com/ghickman>",
+ "nvie <https://github.com/nvie>",
+ "adrienverge <https://github.com/adrienverge>",
+ "AndersBlomdell <https://github.com/AndersBlomdell>",
+ "highb <https://github.com/highb>",
+ "thmxv <https://github.com/thmxv>",
+ "wimglenn <https://github.com/wimglenn>",
+ "hugovk <https://github.com/hugovk>",
+ "dcecile <https://github.com/dcecile>",
+ "mroutis <https://github.com/mroutis>",
+ "jdufresne <https://github.com/jdufresne>",
+ "groodt <https://github.com/groodt>",
+ "ftrofin <https://github.com/ftrofin>",
+ "pykong <https://github.com/pykong>",
+ "nhhollander <https://github.com/nhhollander>",
+ "KOLANICH <https://github.com/KOLANICH>",
+ "JonjonHays <https://github.com/JonjonHays>",
+ "Isaac0616 <https://github.com/Isaac0616>",
+ "SebastiaanZ <https://github.com/SebastiaanZ>",
+ "RoelAdriaans <https://github.com/RoelAdriaans>",
+ "raviselker <https://github.com/raviselker>",
+ "johanvergeer <https://github.com/johanvergeer>",
+ "danjer <https://github.com/danjer>",
+ "jhbuhrman <https://github.com/jhbuhrman>",
+ "WPDOrdina <https://github.com/WPDOrdina>",
+]
+__license__ = "MPL 2.0"
+__version__ = "0.9.0"
diff --git a/third_party/python/pathspec/pathspec/compat.py b/third_party/python/pathspec/pathspec/compat.py
new file mode 100644
index 0000000000..f5d17bf3ce
--- /dev/null
+++ b/third_party/python/pathspec/pathspec/compat.py
@@ -0,0 +1,41 @@
+# encoding: utf-8
+"""
+This module provides compatibility between Python 2 and 3. Hardly
+anything is used by this project to constitute including `six`_.
+
+.. _`six`: http://pythonhosted.org/six
+"""
+
+import sys
+
+if sys.version_info[0] < 3:
+ # Python 2.
+ unicode = unicode
+ string_types = (basestring,)
+
+ from collections import Iterable
+ from itertools import izip_longest
+
+ def iterkeys(mapping):
+ return mapping.iterkeys()
+
+else:
+ # Python 3.
+ unicode = str
+ string_types = (unicode,)
+
+ from collections.abc import Iterable
+ from itertools import zip_longest as izip_longest
+
+ def iterkeys(mapping):
+ return mapping.keys()
+
+try:
+ # Python 3.6+.
+ from collections.abc import Collection
+except ImportError:
+ # Python 2.7 - 3.5.
+ from collections import Container as Collection
+
+CollectionType = Collection
+IterableType = Iterable
diff --git a/third_party/python/pathspec/pathspec/pathspec.py b/third_party/python/pathspec/pathspec/pathspec.py
new file mode 100644
index 0000000000..ff40089d2c
--- /dev/null
+++ b/third_party/python/pathspec/pathspec/pathspec.py
@@ -0,0 +1,243 @@
+# encoding: utf-8
+"""
+This module provides an object oriented interface for pattern matching
+of files.
+"""
+
+try:
+ from typing import (
+ Any,
+ AnyStr,
+ Callable,
+ Iterable,
+ Iterator,
+ Optional,
+ Text,
+ Union)
+except ImportError:
+ pass
+
+try:
+ # Python 3.6+ type hints.
+ from os import PathLike
+ from typing import Collection
+except ImportError:
+ pass
+
+from . import util
+from .compat import (
+ CollectionType,
+ iterkeys,
+ izip_longest,
+ string_types)
+from .pattern import Pattern
+from .util import TreeEntry
+
+
+class PathSpec(object):
+ """
+ The :class:`PathSpec` class is a wrapper around a list of compiled
+ :class:`.Pattern` instances.
+ """
+
+ def __init__(self, patterns):
+ # type: (Iterable[Pattern]) -> None
+ """
+ Initializes the :class:`PathSpec` instance.
+
+ *patterns* (:class:`~collections.abc.Collection` or :class:`~collections.abc.Iterable`)
+ yields each compiled pattern (:class:`.Pattern`).
+ """
+
+ self.patterns = patterns if isinstance(patterns, CollectionType) else list(patterns)
+ """
+ *patterns* (:class:`~collections.abc.Collection` of :class:`.Pattern`)
+ contains the compiled patterns.
+ """
+
+ def __eq__(self, other):
+ # type: (PathSpec) -> bool
+ """
+ Tests the equality of this path-spec with *other* (:class:`PathSpec`)
+ by comparing their :attr:`~PathSpec.patterns` attributes.
+ """
+ if isinstance(other, PathSpec):
+ paired_patterns = izip_longest(self.patterns, other.patterns)
+ return all(a == b for a, b in paired_patterns)
+ else:
+ return NotImplemented
+
+ def __len__(self):
+ """
+ Returns the number of compiled patterns this path-spec contains
+ (:class:`int`).
+ """
+ return len(self.patterns)
+
+ def __add__(self, other):
+ # type: (PathSpec) -> PathSpec
+ """
+ Combines the :attr:`Pathspec.patterns` patterns from two
+ :class:`PathSpec` instances.
+ """
+ if isinstance(other, PathSpec):
+ return PathSpec(self.patterns + other.patterns)
+ else:
+ return NotImplemented
+
+ def __iadd__(self, other):
+ # type: (PathSpec) -> PathSpec
+ """
+ Adds the :attr:`Pathspec.patterns` patterns from one :class:`PathSpec`
+ instance to this instance.
+ """
+ if isinstance(other, PathSpec):
+ self.patterns += other.patterns
+ return self
+ else:
+ return NotImplemented
+
+ @classmethod
+ def from_lines(cls, pattern_factory, lines):
+ # type: (Union[Text, Callable[[AnyStr], Pattern]], Iterable[AnyStr]) -> PathSpec
+ """
+ Compiles the pattern lines.
+
+ *pattern_factory* can be either the name of a registered pattern
+ factory (:class:`str`), or a :class:`~collections.abc.Callable` used
+ to compile patterns. It must accept an uncompiled pattern (:class:`str`)
+ and return the compiled pattern (:class:`.Pattern`).
+
+ *lines* (:class:`~collections.abc.Iterable`) yields each uncompiled
+ pattern (:class:`str`). This simply has to yield each line so it can
+ be a :class:`file` (e.g., from :func:`open` or :class:`io.StringIO`)
+ or the result from :meth:`str.splitlines`.
+
+ Returns the :class:`PathSpec` instance.
+ """
+ if isinstance(pattern_factory, string_types):
+ pattern_factory = util.lookup_pattern(pattern_factory)
+ if not callable(pattern_factory):
+ raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory))
+
+ if not util._is_iterable(lines):
+ raise TypeError("lines:{!r} is not an iterable.".format(lines))
+
+ patterns = [pattern_factory(line) for line in lines if line]
+ return cls(patterns)
+
+ def match_file(self, file, separators=None):
+ # type: (Union[Text, PathLike], Optional[Collection[Text]]) -> bool
+ """
+ Matches the file to this path-spec.
+
+ *file* (:class:`str` or :class:`~pathlib.PurePath`) is the file path
+ to be matched against :attr:`self.patterns <PathSpec.patterns>`.
+
+ *separators* (:class:`~collections.abc.Collection` of :class:`str`)
+ optionally contains the path separators to normalize. See
+ :func:`~pathspec.util.normalize_file` for more information.
+
+ Returns :data:`True` if *file* matched; otherwise, :data:`False`.
+ """
+ norm_file = util.normalize_file(file, separators=separators)
+ return util.match_file(self.patterns, norm_file)
+
+ def match_entries(self, entries, separators=None):
+ # type: (Iterable[TreeEntry], Optional[Collection[Text]]) -> Iterator[TreeEntry]
+ """
+ Matches the entries to this path-spec.
+
+ *entries* (:class:`~collections.abc.Iterable` of :class:`~util.TreeEntry`)
+ contains the entries to be matched against :attr:`self.patterns <PathSpec.patterns>`.
+
+ *separators* (:class:`~collections.abc.Collection` of :class:`str`;
+ or :data:`None`) optionally contains the path separators to
+ normalize. See :func:`~pathspec.util.normalize_file` for more
+ information.
+
+ Returns the matched entries (:class:`~collections.abc.Iterator` of
+ :class:`~util.TreeEntry`).
+ """
+ if not util._is_iterable(entries):
+ raise TypeError("entries:{!r} is not an iterable.".format(entries))
+
+ entry_map = util._normalize_entries(entries, separators=separators)
+ match_paths = util.match_files(self.patterns, iterkeys(entry_map))
+ for path in match_paths:
+ yield entry_map[path]
+
+ def match_files(self, files, separators=None):
+ # type: (Iterable[Union[Text, PathLike]], Optional[Collection[Text]]) -> Iterator[Union[Text, PathLike]]
+ """
+ Matches the files to this path-spec.
+
+ *files* (:class:`~collections.abc.Iterable` of :class:`str; or
+ :class:`pathlib.PurePath`) contains the file paths to be matched
+ against :attr:`self.patterns <PathSpec.patterns>`.
+
+ *separators* (:class:`~collections.abc.Collection` of :class:`str`;
+ or :data:`None`) optionally contains the path separators to
+ normalize. See :func:`~pathspec.util.normalize_file` for more
+ information.
+
+ Returns the matched files (:class:`~collections.abc.Iterator` of
+ :class:`str` or :class:`pathlib.PurePath`).
+ """
+ if not util._is_iterable(files):
+ raise TypeError("files:{!r} is not an iterable.".format(files))
+
+ file_map = util.normalize_files(files, separators=separators)
+ matched_files = util.match_files(self.patterns, iterkeys(file_map))
+ for norm_file in matched_files:
+ for orig_file in file_map[norm_file]:
+ yield orig_file
+
+ def match_tree_entries(self, root, on_error=None, follow_links=None):
+ # type: (Text, Optional[Callable], Optional[bool]) -> Iterator[TreeEntry]
+ """
+ Walks the specified root path for all files and matches them to this
+ path-spec.
+
+ *root* (:class:`str`; or :class:`pathlib.PurePath`) is the root
+ directory to search.
+
+ *on_error* (:class:`~collections.abc.Callable` or :data:`None`)
+ optionally is the error handler for file-system exceptions. See
+ :func:`~pathspec.util.iter_tree_entries` for more information.
+
+ *follow_links* (:class:`bool` or :data:`None`) optionally is whether
+ to walk symbolic links that resolve to directories. See
+ :func:`~pathspec.util.iter_tree_files` for more information.
+
+ Returns the matched files (:class:`~collections.abc.Iterator` of
+ :class:`.TreeEntry`).
+ """
+ entries = util.iter_tree_entries(root, on_error=on_error, follow_links=follow_links)
+ return self.match_entries(entries)
+
+ def match_tree_files(self, root, on_error=None, follow_links=None):
+ # type: (Text, Optional[Callable], Optional[bool]) -> Iterator[Text]
+ """
+ Walks the specified root path for all files and matches them to this
+ path-spec.
+
+ *root* (:class:`str`; or :class:`pathlib.PurePath`) is the root
+ directory to search for files.
+
+ *on_error* (:class:`~collections.abc.Callable` or :data:`None`)
+ optionally is the error handler for file-system exceptions. See
+ :func:`~pathspec.util.iter_tree_files` for more information.
+
+ *follow_links* (:class:`bool` or :data:`None`) optionally is whether
+ to walk symbolic links that resolve to directories. See
+ :func:`~pathspec.util.iter_tree_files` for more information.
+
+ Returns the matched files (:class:`~collections.abc.Iterable` of
+ :class:`str`).
+ """
+ files = util.iter_tree_files(root, on_error=on_error, follow_links=follow_links)
+ return self.match_files(files)
+
+ # Alias `match_tree_files()` as `match_tree()`.
+ match_tree = match_tree_files
diff --git a/third_party/python/pathspec/pathspec/pattern.py b/third_party/python/pathspec/pathspec/pattern.py
new file mode 100644
index 0000000000..c354c2632a
--- /dev/null
+++ b/third_party/python/pathspec/pathspec/pattern.py
@@ -0,0 +1,164 @@
+# encoding: utf-8
+"""
+This module provides the base definition for patterns.
+"""
+
+import re
+try:
+ from typing import (
+ AnyStr,
+ Iterable,
+ Iterator,
+ Optional,
+ Pattern as RegexHint,
+ Text,
+ Tuple,
+ Union)
+except ImportError:
+ pass
+
+from .compat import unicode
+
+
+class Pattern(object):
+ """
+ The :class:`Pattern` class is the abstract definition of a pattern.
+ """
+
+ # Make the class dict-less.
+ __slots__ = ('include',)
+
+ def __init__(self, include):
+ # type: (Optional[bool]) -> None
+ """
+ Initializes the :class:`Pattern` instance.
+
+ *include* (:class:`bool` or :data:`None`) is whether the matched
+ files should be included (:data:`True`), excluded (:data:`False`),
+ or is a null-operation (:data:`None`).
+ """
+
+ self.include = include
+ """
+ *include* (:class:`bool` or :data:`None`) is whether the matched
+ files should be included (:data:`True`), excluded (:data:`False`),
+ or is a null-operation (:data:`None`).
+ """
+
+ def match(self, files):
+ # type: (Iterable[Text]) -> Iterator[Text]
+ """
+ Matches this pattern against the specified files.
+
+ *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
+ each file relative to the root directory (e.g., ``"relative/path/to/file"``).
+
+ Returns an :class:`~collections.abc.Iterable` yielding each matched
+ file path (:class:`str`).
+ """
+ raise NotImplementedError("{}.{} must override match().".format(self.__class__.__module__, self.__class__.__name__))
+
+
+class RegexPattern(Pattern):
+ """
+ The :class:`RegexPattern` class is an implementation of a pattern
+ using regular expressions.
+ """
+
+ # Make the class dict-less.
+ __slots__ = ('regex',)
+
+ def __init__(self, pattern, include=None):
+ # type: (Union[AnyStr, RegexHint], Optional[bool]) -> None
+ """
+ Initializes the :class:`RegexPattern` instance.
+
+ *pattern* (:class:`unicode`, :class:`bytes`, :class:`re.RegexObject`,
+ or :data:`None`) is the pattern to compile into a regular
+ expression.
+
+ *include* (:class:`bool` or :data:`None`) must be :data:`None`
+ unless *pattern* is a precompiled regular expression (:class:`re.RegexObject`)
+ in which case it is whether matched files should be included
+ (:data:`True`), excluded (:data:`False`), or is a null operation
+ (:data:`None`).
+
+ .. NOTE:: Subclasses do not need to support the *include*
+ parameter.
+ """
+
+ self.regex = None
+ """
+ *regex* (:class:`re.RegexObject`) is the regular expression for the
+ pattern.
+ """
+
+ if isinstance(pattern, (unicode, bytes)):
+ assert include is None, "include:{!r} must be null when pattern:{!r} is a string.".format(include, pattern)
+ regex, include = self.pattern_to_regex(pattern)
+ # NOTE: Make sure to allow a null regular expression to be
+ # returned for a null-operation.
+ if include is not None:
+ regex = re.compile(regex)
+
+ elif pattern is not None and hasattr(pattern, 'match'):
+ # Assume pattern is a precompiled regular expression.
+ # - NOTE: Used specified *include*.
+ regex = pattern
+
+ elif pattern is None:
+ # NOTE: Make sure to allow a null pattern to be passed for a
+ # null-operation.
+ assert include is None, "include:{!r} must be null when pattern:{!r} is null.".format(include, pattern)
+
+ else:
+ raise TypeError("pattern:{!r} is not a string, RegexObject, or None.".format(pattern))
+
+ super(RegexPattern, self).__init__(include)
+ self.regex = regex
+
+ def __eq__(self, other):
+ # type: (RegexPattern) -> bool
+ """
+ Tests the equality of this regex pattern with *other* (:class:`RegexPattern`)
+ by comparing their :attr:`~Pattern.include` and :attr:`~RegexPattern.regex`
+ attributes.
+ """
+ if isinstance(other, RegexPattern):
+ return self.include == other.include and self.regex == other.regex
+ else:
+ return NotImplemented
+
+ def match(self, files):
+ # type: (Iterable[Text]) -> Iterable[Text]
+ """
+ Matches this pattern against the specified files.
+
+ *files* (:class:`~collections.abc.Iterable` of :class:`str`)
+ contains each file relative to the root directory (e.g., "relative/path/to/file").
+
+ Returns an :class:`~collections.abc.Iterable` yielding each matched
+ file path (:class:`str`).
+ """
+ if self.include is not None:
+ for path in files:
+ if self.regex.match(path) is not None:
+ yield path
+
+ @classmethod
+ def pattern_to_regex(cls, pattern):
+ # type: (Text) -> Tuple[Text, bool]
+ """
+ Convert the pattern into an uncompiled regular expression.
+
+ *pattern* (:class:`str`) is the pattern to convert into a regular
+ expression.
+
+ Returns the uncompiled regular expression (:class:`str` or :data:`None`),
+ and whether matched files should be included (:data:`True`),
+ excluded (:data:`False`), or is a null-operation (:data:`None`).
+
+ .. NOTE:: The default implementation simply returns *pattern* and
+ :data:`True`.
+ """
+ return pattern, True
diff --git a/third_party/python/pathspec/pathspec/patterns/__init__.py b/third_party/python/pathspec/pathspec/patterns/__init__.py
new file mode 100644
index 0000000000..1a0d55ec74
--- /dev/null
+++ b/third_party/python/pathspec/pathspec/patterns/__init__.py
@@ -0,0 +1,8 @@
+# encoding: utf-8
+"""
+The *pathspec.patterns* package contains the pattern matching
+implementations.
+"""
+
+# Load pattern implementations.
+from .gitwildmatch import GitWildMatchPattern
diff --git a/third_party/python/pathspec/pathspec/patterns/gitwildmatch.py b/third_party/python/pathspec/pathspec/patterns/gitwildmatch.py
new file mode 100644
index 0000000000..afd8f6b4cf
--- /dev/null
+++ b/third_party/python/pathspec/pathspec/patterns/gitwildmatch.py
@@ -0,0 +1,400 @@
+# encoding: utf-8
+"""
+This module implements Git's wildmatch pattern matching which itself is
+derived from Rsync's wildmatch. Git uses wildmatch for its ".gitignore"
+files.
+"""
+from __future__ import unicode_literals
+
+import re
+import warnings
+try:
+ from typing import (
+ AnyStr,
+ Optional,
+ Text,
+ Tuple)
+except ImportError:
+ pass
+
+from .. import util
+from ..compat import unicode
+from ..pattern import RegexPattern
+
+#: The encoding to use when parsing a byte string pattern.
+_BYTES_ENCODING = 'latin1'
+
+
+class GitWildMatchPatternError(ValueError):
+ """
+ The :class:`GitWildMatchPatternError` indicates an invalid git wild match
+ pattern.
+ """
+ pass
+
+
+class GitWildMatchPattern(RegexPattern):
+ """
+ The :class:`GitWildMatchPattern` class represents a compiled Git
+ wildmatch pattern.
+ """
+
+ # Keep the dict-less class hierarchy.
+ __slots__ = ()
+
+ @classmethod
+ def pattern_to_regex(cls, pattern):
+ # type: (AnyStr) -> Tuple[Optional[AnyStr], Optional[bool]]
+ """
+ Convert the pattern into a regular expression.
+
+ *pattern* (:class:`unicode` or :class:`bytes`) is the pattern to
+ convert into a regular expression.
+
+ Returns the uncompiled regular expression (:class:`unicode`, :class:`bytes`,
+ or :data:`None`), and whether matched files should be included
+ (:data:`True`), excluded (:data:`False`), or if it is a
+ null-operation (:data:`None`).
+ """
+ if isinstance(pattern, unicode):
+ return_type = unicode
+ elif isinstance(pattern, bytes):
+ return_type = bytes
+ pattern = pattern.decode(_BYTES_ENCODING)
+ else:
+ raise TypeError("pattern:{!r} is not a unicode or byte string.".format(pattern))
+
+ original_pattern = pattern
+ pattern = pattern.strip()
+
+ if pattern.startswith('#'):
+ # A pattern starting with a hash ('#') serves as a comment
+ # (neither includes nor excludes files). Escape the hash with a
+ # back-slash to match a literal hash (i.e., '\#').
+ regex = None
+ include = None
+
+ elif pattern == '/':
+ # EDGE CASE: According to `git check-ignore` (v2.4.1), a single
+ # '/' does not match any file.
+ regex = None
+ include = None
+
+ elif pattern:
+ if pattern.startswith('!'):
+ # A pattern starting with an exclamation mark ('!') negates the
+ # pattern (exclude instead of include). Escape the exclamation
+ # mark with a back-slash to match a literal exclamation mark
+ # (i.e., '\!').
+ include = False
+ # Remove leading exclamation mark.
+ pattern = pattern[1:]
+ else:
+ include = True
+
+ if pattern.startswith('\\'):
+ # Remove leading back-slash escape for escaped hash ('#') or
+ # exclamation mark ('!').
+ pattern = pattern[1:]
+
+ # Allow a regex override for edge cases that cannot be handled
+ # through normalization.
+ override_regex = None
+
+ # Split pattern into segments.
+ pattern_segs = pattern.split('/')
+
+ # Normalize pattern to make processing easier.
+
+ # EDGE CASE: Deal with duplicate double-asterisk sequences.
+ # Collapse each sequence down to one double-asterisk. Iterate over
+ # the segments in reverse and remove the duplicate double
+ # asterisks as we go.
+ for i in range(len(pattern_segs) - 1, 0, -1):
+ prev = pattern_segs[i-1]
+ seg = pattern_segs[i]
+ if prev == '**' and seg == '**':
+ del pattern_segs[i]
+
+ if len(pattern_segs) == 2 and pattern_segs[0] == '**' and not pattern_segs[1]:
+ # EDGE CASE: The '**/' pattern should match everything except
+ # individual files in the root directory. This case cannot be
+ # adequately handled through normalization. Use the override.
+ override_regex = '^.+/.*$'
+
+ if not pattern_segs[0]:
+ # A pattern beginning with a slash ('/') will only match paths
+ # directly on the root directory instead of any descendant
+ # paths. So, remove empty first segment to make pattern relative
+ # to root.
+ del pattern_segs[0]
+
+ elif len(pattern_segs) == 1 or (len(pattern_segs) == 2 and not pattern_segs[1]):
+ # A single pattern without a beginning slash ('/') will match
+ # any descendant path. This is equivalent to "**/{pattern}". So,
+ # prepend with double-asterisks to make pattern relative to
+ # root.
+ # EDGE CASE: This also holds for a single pattern with a
+ # trailing slash (e.g. dir/).
+ if pattern_segs[0] != '**':
+ pattern_segs.insert(0, '**')
+
+ else:
+ # EDGE CASE: A pattern without a beginning slash ('/') but
+ # contains at least one prepended directory (e.g.
+ # "dir/{pattern}") should not match "**/dir/{pattern}",
+ # according to `git check-ignore` (v2.4.1).
+ pass
+
+ if not pattern_segs:
+ # After resolving the edge cases, we end up with no
+ # pattern at all. This must be because the pattern is
+ # invalid.
+ raise GitWildMatchPatternError("Invalid git pattern: %r" % (original_pattern,))
+
+ if not pattern_segs[-1] and len(pattern_segs) > 1:
+ # A pattern ending with a slash ('/') will match all
+ # descendant paths if it is a directory but not if it is a
+ # regular file. This is equivalent to "{pattern}/**". So, set
+ # last segment to a double-asterisk to include all
+ # descendants.
+ pattern_segs[-1] = '**'
+
+ if override_regex is None:
+ # Build regular expression from pattern.
+ output = ['^']
+ need_slash = False
+ end = len(pattern_segs) - 1
+ for i, seg in enumerate(pattern_segs):
+ if seg == '**':
+ if i == 0 and i == end:
+ # A pattern consisting solely of double-asterisks ('**')
+ # will match every path.
+ output.append('.+')
+ elif i == 0:
+ # A normalized pattern beginning with double-asterisks
+ # ('**') will match any leading path segments.
+ output.append('(?:.+/)?')
+ need_slash = False
+ elif i == end:
+ # A normalized pattern ending with double-asterisks ('**')
+ # will match any trailing path segments.
+ output.append('/.*')
+ else:
+ # A pattern with inner double-asterisks ('**') will match
+ # multiple (or zero) inner path segments.
+ output.append('(?:/.+)?')
+ need_slash = True
+
+ elif seg == '*':
+ # Match single path segment.
+ if need_slash:
+ output.append('/')
+ output.append('[^/]+')
+ need_slash = True
+
+ else:
+ # Match segment glob pattern.
+ if need_slash:
+ output.append('/')
+
+ output.append(cls._translate_segment_glob(seg))
+ if i == end and include is True:
+ # A pattern ending without a slash ('/') will match a file
+ # or a directory (with paths underneath it). E.g., "foo"
+ # matches "foo", "foo/bar", "foo/bar/baz", etc.
+ # EDGE CASE: However, this does not hold for exclusion cases
+ # according to `git check-ignore` (v2.4.1).
+ output.append('(?:/.*)?')
+
+ need_slash = True
+
+ output.append('$')
+ regex = ''.join(output)
+
+ else:
+ # Use regex override.
+ regex = override_regex
+
+ else:
+ # A blank pattern is a null-operation (neither includes nor
+ # excludes files).
+ regex = None
+ include = None
+
+ if regex is not None and return_type is bytes:
+ regex = regex.encode(_BYTES_ENCODING)
+
+ return regex, include
+
+ @staticmethod
+ def _translate_segment_glob(pattern):
+ # type: (Text) -> Text
+ """
+ Translates the glob pattern to a regular expression. This is used in
+ the constructor to translate a path segment glob pattern to its
+ corresponding regular expression.
+
+ *pattern* (:class:`str`) is the glob pattern.
+
+ Returns the regular expression (:class:`str`).
+ """
+ # NOTE: This is derived from `fnmatch.translate()` and is similar to
+ # the POSIX function `fnmatch()` with the `FNM_PATHNAME` flag set.
+
+ escape = False
+ regex = ''
+ i, end = 0, len(pattern)
+ while i < end:
+ # Get next character.
+ char = pattern[i]
+ i += 1
+
+ if escape:
+ # Escape the character.
+ escape = False
+ regex += re.escape(char)
+
+ elif char == '\\':
+ # Escape character, escape next character.
+ escape = True
+
+ elif char == '*':
+ # Multi-character wildcard. Match any string (except slashes),
+ # including an empty string.
+ regex += '[^/]*'
+
+ elif char == '?':
+ # Single-character wildcard. Match any single character (except
+ # a slash).
+ regex += '[^/]'
+
+ elif char == '[':
+ # Bracket expression wildcard. Except for the beginning
+ # exclamation mark, the whole bracket expression can be used
+ # directly as regex but we have to find where the expression
+ # ends.
+ # - "[][!]" matches ']', '[' and '!'.
+ # - "[]-]" matches ']' and '-'.
+ # - "[!]a-]" matches any character except ']', 'a' and '-'.
+ j = i
+ # Pass brack expression negation.
+ if j < end and pattern[j] == '!':
+ j += 1
+ # Pass first closing bracket if it is at the beginning of the
+ # expression.
+ if j < end and pattern[j] == ']':
+ j += 1
+ # Find closing bracket. Stop once we reach the end or find it.
+ while j < end and pattern[j] != ']':
+ j += 1
+
+ if j < end:
+ # Found end of bracket expression. Increment j to be one past
+ # the closing bracket:
+ #
+ # [...]
+ # ^ ^
+ # i j
+ #
+ j += 1
+ expr = '['
+
+ if pattern[i] == '!':
+ # Braket expression needs to be negated.
+ expr += '^'
+ i += 1
+ elif pattern[i] == '^':
+ # POSIX declares that the regex bracket expression negation
+ # "[^...]" is undefined in a glob pattern. Python's
+ # `fnmatch.translate()` escapes the caret ('^') as a
+ # literal. To maintain consistency with undefined behavior,
+ # I am escaping the '^' as well.
+ expr += '\\^'
+ i += 1
+
+ # Build regex bracket expression. Escape slashes so they are
+ # treated as literal slashes by regex as defined by POSIX.
+ expr += pattern[i:j].replace('\\', '\\\\')
+
+ # Add regex bracket expression to regex result.
+ regex += expr
+
+ # Set i to one past the closing bracket.
+ i = j
+
+ else:
+ # Failed to find closing bracket, treat opening bracket as a
+ # bracket literal instead of as an expression.
+ regex += '\\['
+
+ else:
+ # Regular character, escape it for regex.
+ regex += re.escape(char)
+
+ return regex
+
+ @staticmethod
+ def escape(s):
+ # type: (AnyStr) -> AnyStr
+ """
+ Escape special characters in the given string.
+
+ *s* (:class:`unicode` or :class:`bytes`) a filename or a string
+ that you want to escape, usually before adding it to a `.gitignore`
+
+ Returns the escaped string (:class:`unicode` or :class:`bytes`)
+ """
+ if isinstance(s, unicode):
+ return_type = unicode
+ string = s
+ elif isinstance(s, bytes):
+ return_type = bytes
+ string = s.decode(_BYTES_ENCODING)
+ else:
+ raise TypeError("s:{!r} is not a unicode or byte string.".format(s))
+
+ # Reference: https://git-scm.com/docs/gitignore#_pattern_format
+ meta_characters = r"[]!*#?"
+
+ out_string = "".join("\\" + x if x in meta_characters else x for x in string)
+
+ if return_type is bytes:
+ return out_string.encode(_BYTES_ENCODING)
+ else:
+ return out_string
+
+util.register_pattern('gitwildmatch', GitWildMatchPattern)
+
+
+class GitIgnorePattern(GitWildMatchPattern):
+ """
+ The :class:`GitIgnorePattern` class is deprecated by :class:`GitWildMatchPattern`.
+ This class only exists to maintain compatibility with v0.4.
+ """
+
+ def __init__(self, *args, **kw):
+ """
+ Warn about deprecation.
+ """
+ self._deprecated()
+ super(GitIgnorePattern, self).__init__(*args, **kw)
+
+ @staticmethod
+ def _deprecated():
+ """
+ Warn about deprecation.
+ """
+ warnings.warn("GitIgnorePattern ('gitignore') is deprecated. Use GitWildMatchPattern ('gitwildmatch') instead.", DeprecationWarning, stacklevel=3)
+
+ @classmethod
+ def pattern_to_regex(cls, *args, **kw):
+ """
+ Warn about deprecation.
+ """
+ cls._deprecated()
+ return super(GitIgnorePattern, cls).pattern_to_regex(*args, **kw)
+
+# Register `GitIgnorePattern` as "gitignore" for backward compatibility
+# with v0.4.
+util.register_pattern('gitignore', GitIgnorePattern)
diff --git a/third_party/python/pathspec/pathspec/util.py b/third_party/python/pathspec/pathspec/util.py
new file mode 100644
index 0000000000..64a5dea9db
--- /dev/null
+++ b/third_party/python/pathspec/pathspec/util.py
@@ -0,0 +1,665 @@
+# encoding: utf-8
+"""
+This module provides utility methods for dealing with path-specs.
+"""
+
+import os
+import os.path
+import posixpath
+import stat
+try:
+ from typing import (
+ Any,
+ AnyStr,
+ Callable,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Sequence,
+ Set,
+ Text,
+ Union)
+except ImportError:
+ pass
+try:
+ # Python 3.6+ type hints.
+ from os import PathLike
+ from typing import Collection
+except ImportError:
+ pass
+
+from .compat import (
+ CollectionType,
+ IterableType,
+ string_types,
+ unicode)
+from .pattern import Pattern
+
+NORMALIZE_PATH_SEPS = [sep for sep in [os.sep, os.altsep] if sep and sep != posixpath.sep]
+"""
+*NORMALIZE_PATH_SEPS* (:class:`list` of :class:`str`) contains the path
+separators that need to be normalized to the POSIX separator for the
+current operating system. The separators are determined by examining
+:data:`os.sep` and :data:`os.altsep`.
+"""
+
+_registered_patterns = {}
+"""
+*_registered_patterns* (:class:`dict`) maps a name (:class:`str`) to the
+registered pattern factory (:class:`~collections.abc.Callable`).
+"""
+
+
+def detailed_match_files(patterns, files, all_matches=None):
+ # type: (Iterable[Pattern], Iterable[Text], Optional[bool]) -> Dict[Text, 'MatchDetail']
+ """
+ Matches the files to the patterns, and returns which patterns matched
+ the files.
+
+ *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
+ contains the patterns to use.
+
+ *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
+ the normalized file paths to be matched against *patterns*.
+
+ *all_matches* (:class:`boot` or :data:`None`) is whether to return all
+ matches patterns (:data:`True`), or only the last matched pattern
+ (:data:`False`). Default is :data:`None` for :data:`False`.
+
+ Returns the matched files (:class:`dict`) which maps each matched file
+ (:class:`str`) to the patterns that matched in order (:class:`.MatchDetail`).
+ """
+ all_files = files if isinstance(files, CollectionType) else list(files)
+ return_files = {}
+ for pattern in patterns:
+ if pattern.include is not None:
+ result_files = pattern.match(all_files)
+ if pattern.include:
+ # Add files and record pattern.
+ for result_file in result_files:
+ if result_file in return_files:
+ if all_matches:
+ return_files[result_file].patterns.append(pattern)
+ else:
+ return_files[result_file].patterns[0] = pattern
+ else:
+ return_files[result_file] = MatchDetail([pattern])
+
+ else:
+ # Remove files.
+ for file in result_files:
+ del return_files[file]
+
+ return return_files
+
+
+def _is_iterable(value):
+ # type: (Any) -> bool
+ """
+ Check whether the value is an iterable (excludes strings).
+
+ *value* is the value to check,
+
+ Returns whether *value* is a iterable (:class:`bool`).
+ """
+ return isinstance(value, IterableType) and not isinstance(value, (unicode, bytes))
+
+
+def iter_tree_entries(root, on_error=None, follow_links=None):
+ # type: (Text, Optional[Callable], Optional[bool]) -> Iterator['TreeEntry']
+ """
+ Walks the specified directory for all files and directories.
+
+ *root* (:class:`str`) is the root directory to search.
+
+ *on_error* (:class:`~collections.abc.Callable` or :data:`None`)
+ optionally is the error handler for file-system exceptions. It will be
+ called with the exception (:exc:`OSError`). Reraise the exception to
+ abort the walk. Default is :data:`None` to ignore file-system
+ exceptions.
+
+ *follow_links* (:class:`bool` or :data:`None`) optionally is whether
+ to walk symbolic links that resolve to directories. Default is
+ :data:`None` for :data:`True`.
+
+ Raises :exc:`RecursionError` if recursion is detected.
+
+ Returns an :class:`~collections.abc.Iterator` yielding each file or
+ directory entry (:class:`.TreeEntry`) relative to *root*.
+ """
+ if on_error is not None and not callable(on_error):
+ raise TypeError("on_error:{!r} is not callable.".format(on_error))
+
+ if follow_links is None:
+ follow_links = True
+
+ for entry in _iter_tree_entries_next(os.path.abspath(root), '', {}, on_error, follow_links):
+ yield entry
+
+
+def iter_tree_files(root, on_error=None, follow_links=None):
+ # type: (Text, Optional[Callable], Optional[bool]) -> Iterator[Text]
+ """
+ Walks the specified directory for all files.
+
+ *root* (:class:`str`) is the root directory to search for files.
+
+ *on_error* (:class:`~collections.abc.Callable` or :data:`None`)
+ optionally is the error handler for file-system exceptions. It will be
+ called with the exception (:exc:`OSError`). Reraise the exception to
+ abort the walk. Default is :data:`None` to ignore file-system
+ exceptions.
+
+ *follow_links* (:class:`bool` or :data:`None`) optionally is whether
+ to walk symbolic links that resolve to directories. Default is
+ :data:`None` for :data:`True`.
+
+ Raises :exc:`RecursionError` if recursion is detected.
+
+ Returns an :class:`~collections.abc.Iterator` yielding the path to
+ each file (:class:`str`) relative to *root*.
+ """
+ if on_error is not None and not callable(on_error):
+ raise TypeError("on_error:{!r} is not callable.".format(on_error))
+
+ if follow_links is None:
+ follow_links = True
+
+ for entry in _iter_tree_entries_next(os.path.abspath(root), '', {}, on_error, follow_links):
+ if not entry.is_dir(follow_links):
+ yield entry.path
+
+
+# Alias `iter_tree_files()` as `iter_tree()`.
+iter_tree = iter_tree_files
+
+
+def _iter_tree_entries_next(root_full, dir_rel, memo, on_error, follow_links):
+ # type: (Text, Text, Dict[Text, Text], Callable, bool) -> Iterator['TreeEntry']
+ """
+ Scan the directory for all descendant files.
+
+ *root_full* (:class:`str`) the absolute path to the root directory.
+
+ *dir_rel* (:class:`str`) the path to the directory to scan relative to
+ *root_full*.
+
+ *memo* (:class:`dict`) keeps track of ancestor directories
+ encountered. Maps each ancestor real path (:class:`str`) to relative
+ path (:class:`str`).
+
+ *on_error* (:class:`~collections.abc.Callable` or :data:`None`)
+ optionally is the error handler for file-system exceptions.
+
+ *follow_links* (:class:`bool`) is whether to walk symbolic links that
+ resolve to directories.
+
+ Yields each entry (:class:`.TreeEntry`).
+ """
+ dir_full = os.path.join(root_full, dir_rel)
+ dir_real = os.path.realpath(dir_full)
+
+ # Remember each encountered ancestor directory and its canonical
+ # (real) path. If a canonical path is encountered more than once,
+ # recursion has occurred.
+ if dir_real not in memo:
+ memo[dir_real] = dir_rel
+ else:
+ raise RecursionError(real_path=dir_real, first_path=memo[dir_real], second_path=dir_rel)
+
+ for node_name in os.listdir(dir_full):
+ node_rel = os.path.join(dir_rel, node_name)
+ node_full = os.path.join(root_full, node_rel)
+
+ # Inspect child node.
+ try:
+ node_lstat = os.lstat(node_full)
+ except OSError as e:
+ if on_error is not None:
+ on_error(e)
+ continue
+
+ if stat.S_ISLNK(node_lstat.st_mode):
+ # Child node is a link, inspect the target node.
+ is_link = True
+ try:
+ node_stat = os.stat(node_full)
+ except OSError as e:
+ if on_error is not None:
+ on_error(e)
+ continue
+ else:
+ is_link = False
+ node_stat = node_lstat
+
+ if stat.S_ISDIR(node_stat.st_mode) and (follow_links or not is_link):
+ # Child node is a directory, recurse into it and yield its
+ # descendant files.
+ yield TreeEntry(node_name, node_rel, node_lstat, node_stat)
+
+ for entry in _iter_tree_entries_next(root_full, node_rel, memo, on_error, follow_links):
+ yield entry
+
+ elif stat.S_ISREG(node_stat.st_mode) or is_link:
+ # Child node is either a file or an unfollowed link, yield it.
+ yield TreeEntry(node_name, node_rel, node_lstat, node_stat)
+
+ # NOTE: Make sure to remove the canonical (real) path of the directory
+ # from the ancestors memo once we are done with it. This allows the
+ # same directory to appear multiple times. If this is not done, the
+ # second occurrence of the directory will be incorrectly interpreted
+ # as a recursion. See <https://github.com/cpburnz/python-path-specification/pull/7>.
+ del memo[dir_real]
+
+
+def lookup_pattern(name):
+ # type: (Text) -> Callable[[AnyStr], Pattern]
+ """
+ Lookups a registered pattern factory by name.
+
+ *name* (:class:`str`) is the name of the pattern factory.
+
+ Returns the registered pattern factory (:class:`~collections.abc.Callable`).
+ If no pattern factory is registered, raises :exc:`KeyError`.
+ """
+ return _registered_patterns[name]
+
+
+def match_file(patterns, file):
+ # type: (Iterable[Pattern], Text) -> bool
+ """
+ Matches the file to the patterns.
+
+ *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
+ contains the patterns to use.
+
+ *file* (:class:`str`) is the normalized file path to be matched
+ against *patterns*.
+
+ Returns :data:`True` if *file* matched; otherwise, :data:`False`.
+ """
+ matched = False
+ for pattern in patterns:
+ if pattern.include is not None:
+ if file in pattern.match((file,)):
+ matched = pattern.include
+ return matched
+
+
+def match_files(patterns, files):
+ # type: (Iterable[Pattern], Iterable[Text]) -> Set[Text]
+ """
+ Matches the files to the patterns.
+
+ *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
+ contains the patterns to use.
+
+ *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains
+ the normalized file paths to be matched against *patterns*.
+
+ Returns the matched files (:class:`set` of :class:`str`).
+ """
+ all_files = files if isinstance(files, CollectionType) else list(files)
+ return_files = set()
+ for pattern in patterns:
+ if pattern.include is not None:
+ result_files = pattern.match(all_files)
+ if pattern.include:
+ return_files.update(result_files)
+ else:
+ return_files.difference_update(result_files)
+ return return_files
+
+
+def _normalize_entries(entries, separators=None):
+ # type: (Iterable['TreeEntry'], Optional[Collection[Text]]) -> Dict[Text, 'TreeEntry']
+ """
+ Normalizes the entry paths to use the POSIX path separator.
+
+ *entries* (:class:`~collections.abc.Iterable` of :class:`.TreeEntry`)
+ contains the entries to be normalized.
+
+ *separators* (:class:`~collections.abc.Collection` of :class:`str`; or
+ :data:`None`) optionally contains the path separators to normalize.
+ See :func:`normalize_file` for more information.
+
+ Returns a :class:`dict` mapping the each normalized file path (:class:`str`)
+ to the entry (:class:`.TreeEntry`)
+ """
+ norm_files = {}
+ for entry in entries:
+ norm_files[normalize_file(entry.path, separators=separators)] = entry
+ return norm_files
+
+
+def normalize_file(file, separators=None):
+ # type: (Union[Text, PathLike], Optional[Collection[Text]]) -> Text
+ """
+ Normalizes the file path to use the POSIX path separator (i.e.,
+ ``'/'``), and make the paths relative (remove leading ``'/'``).
+
+ *file* (:class:`str` or :class:`pathlib.PurePath`) is the file path.
+
+ *separators* (:class:`~collections.abc.Collection` of :class:`str`; or
+ :data:`None`) optionally contains the path separators to normalize.
+ This does not need to include the POSIX path separator (``'/'``), but
+ including it will not affect the results. Default is :data:`None` for
+ :data:`NORMALIZE_PATH_SEPS`. To prevent normalization, pass an empty
+ container (e.g., an empty tuple ``()``).
+
+ Returns the normalized file path (:class:`str`).
+ """
+ # Normalize path separators.
+ if separators is None:
+ separators = NORMALIZE_PATH_SEPS
+
+ # Convert path object to string.
+ norm_file = str(file)
+
+ for sep in separators:
+ norm_file = norm_file.replace(sep, posixpath.sep)
+
+ if norm_file.startswith('/'):
+ # Make path relative.
+ norm_file = norm_file[1:]
+
+ elif norm_file.startswith('./'):
+ # Remove current directory prefix.
+ norm_file = norm_file[2:]
+
+ return norm_file
+
+
+def normalize_files(files, separators=None):
+ # type: (Iterable[Union[str, PathLike]], Optional[Collection[Text]]) -> Dict[Text, List[Union[str, PathLike]]]
+ """
+ Normalizes the file paths to use the POSIX path separator.
+
+ *files* (:class:`~collections.abc.Iterable` of :class:`str` or
+ :class:`pathlib.PurePath`) contains the file paths to be normalized.
+
+ *separators* (:class:`~collections.abc.Collection` of :class:`str`; or
+ :data:`None`) optionally contains the path separators to normalize.
+ See :func:`normalize_file` for more information.
+
+ Returns a :class:`dict` mapping the each normalized file path
+ (:class:`str`) to the original file paths (:class:`list` of
+ :class:`str` or :class:`pathlib.PurePath`).
+ """
+ norm_files = {}
+ for path in files:
+ norm_file = normalize_file(path, separators=separators)
+ if norm_file in norm_files:
+ norm_files[norm_file].append(path)
+ else:
+ norm_files[norm_file] = [path]
+
+ return norm_files
+
+
+def register_pattern(name, pattern_factory, override=None):
+ # type: (Text, Callable[[AnyStr], Pattern], Optional[bool]) -> None
+ """
+ Registers the specified pattern factory.
+
+ *name* (:class:`str`) is the name to register the pattern factory
+ under.
+
+ *pattern_factory* (:class:`~collections.abc.Callable`) is used to
+ compile patterns. It must accept an uncompiled pattern (:class:`str`)
+ and return the compiled pattern (:class:`.Pattern`).
+
+ *override* (:class:`bool` or :data:`None`) optionally is whether to
+ allow overriding an already registered pattern under the same name
+ (:data:`True`), instead of raising an :exc:`AlreadyRegisteredError`
+ (:data:`False`). Default is :data:`None` for :data:`False`.
+ """
+ if not isinstance(name, string_types):
+ raise TypeError("name:{!r} is not a string.".format(name))
+ if not callable(pattern_factory):
+ raise TypeError("pattern_factory:{!r} is not callable.".format(pattern_factory))
+ if name in _registered_patterns and not override:
+ raise AlreadyRegisteredError(name, _registered_patterns[name])
+ _registered_patterns[name] = pattern_factory
+
+
+class AlreadyRegisteredError(Exception):
+ """
+ The :exc:`AlreadyRegisteredError` exception is raised when a pattern
+ factory is registered under a name already in use.
+ """
+
+ def __init__(self, name, pattern_factory):
+ # type: (Text, Callable[[AnyStr], Pattern]) -> None
+ """
+ Initializes the :exc:`AlreadyRegisteredError` instance.
+
+ *name* (:class:`str`) is the name of the registered pattern.
+
+ *pattern_factory* (:class:`~collections.abc.Callable`) is the
+ registered pattern factory.
+ """
+ super(AlreadyRegisteredError, self).__init__(name, pattern_factory)
+
+ @property
+ def message(self):
+ # type: () -> Text
+ """
+ *message* (:class:`str`) is the error message.
+ """
+ return "{name!r} is already registered for pattern factory:{pattern_factory!r}.".format(
+ name=self.name,
+ pattern_factory=self.pattern_factory,
+ )
+
+ @property
+ def name(self):
+ # type: () -> Text
+ """
+ *name* (:class:`str`) is the name of the registered pattern.
+ """
+ return self.args[0]
+
+ @property
+ def pattern_factory(self):
+ # type: () -> Callable[[AnyStr], Pattern]
+ """
+ *pattern_factory* (:class:`~collections.abc.Callable`) is the
+ registered pattern factory.
+ """
+ return self.args[1]
+
+
+class RecursionError(Exception):
+ """
+ The :exc:`RecursionError` exception is raised when recursion is
+ detected.
+ """
+
+ def __init__(self, real_path, first_path, second_path):
+ # type: (Text, Text, Text) -> None
+ """
+ Initializes the :exc:`RecursionError` instance.
+
+ *real_path* (:class:`str`) is the real path that recursion was
+ encountered on.
+
+ *first_path* (:class:`str`) is the first path encountered for
+ *real_path*.
+
+ *second_path* (:class:`str`) is the second path encountered for
+ *real_path*.
+ """
+ super(RecursionError, self).__init__(real_path, first_path, second_path)
+
+ @property
+ def first_path(self):
+ # type: () -> Text
+ """
+ *first_path* (:class:`str`) is the first path encountered for
+ :attr:`self.real_path <RecursionError.real_path>`.
+ """
+ return self.args[1]
+
+ @property
+ def message(self):
+ # type: () -> Text
+ """
+ *message* (:class:`str`) is the error message.
+ """
+ return "Real path {real!r} was encountered at {first!r} and then {second!r}.".format(
+ real=self.real_path,
+ first=self.first_path,
+ second=self.second_path,
+ )
+
+ @property
+ def real_path(self):
+ # type: () -> Text
+ """
+ *real_path* (:class:`str`) is the real path that recursion was
+ encountered on.
+ """
+ return self.args[0]
+
+ @property
+ def second_path(self):
+ # type: () -> Text
+ """
+ *second_path* (:class:`str`) is the second path encountered for
+ :attr:`self.real_path <RecursionError.real_path>`.
+ """
+ return self.args[2]
+
+
+class MatchDetail(object):
+ """
+ The :class:`.MatchDetail` class contains information about
+ """
+
+ #: Make the class dict-less.
+ __slots__ = ('patterns',)
+
+ def __init__(self, patterns):
+ # type: (Sequence[Pattern]) -> None
+ """
+ Initialize the :class:`.MatchDetail` instance.
+
+ *patterns* (:class:`~collections.abc.Sequence` of :class:`~pathspec.pattern.Pattern`)
+ contains the patterns that matched the file in the order they were
+ encountered.
+ """
+
+ self.patterns = patterns
+ """
+ *patterns* (:class:`~collections.abc.Sequence` of :class:`~pathspec.pattern.Pattern`)
+ contains the patterns that matched the file in the order they were
+ encountered.
+ """
+
+
+class TreeEntry(object):
+ """
+ The :class:`.TreeEntry` class contains information about a file-system
+ entry.
+ """
+
+ #: Make the class dict-less.
+ __slots__ = ('_lstat', 'name', 'path', '_stat')
+
+ def __init__(self, name, path, lstat, stat):
+ # type: (Text, Text, os.stat_result, os.stat_result) -> None
+ """
+ Initialize the :class:`.TreeEntry` instance.
+
+ *name* (:class:`str`) is the base name of the entry.
+
+ *path* (:class:`str`) is the relative path of the entry.
+
+ *lstat* (:class:`~os.stat_result`) is the stat result of the direct
+ entry.
+
+ *stat* (:class:`~os.stat_result`) is the stat result of the entry,
+ potentially linked.
+ """
+
+ self._lstat = lstat
+ """
+ *_lstat* (:class:`~os.stat_result`) is the stat result of the direct
+ entry.
+ """
+
+ self.name = name
+ """
+ *name* (:class:`str`) is the base name of the entry.
+ """
+
+ self.path = path
+ """
+ *path* (:class:`str`) is the path of the entry.
+ """
+
+ self._stat = stat
+ """
+ *_stat* (:class:`~os.stat_result`) is the stat result of the linked
+ entry.
+ """
+
+ def is_dir(self, follow_links=None):
+ # type: (Optional[bool]) -> bool
+ """
+ Get whether the entry is a directory.
+
+ *follow_links* (:class:`bool` or :data:`None`) is whether to follow
+ symbolic links. If this is :data:`True`, a symlink to a directory
+ will result in :data:`True`. Default is :data:`None` for :data:`True`.
+
+ Returns whether the entry is a directory (:class:`bool`).
+ """
+ if follow_links is None:
+ follow_links = True
+
+ node_stat = self._stat if follow_links else self._lstat
+ return stat.S_ISDIR(node_stat.st_mode)
+
+ def is_file(self, follow_links=None):
+ # type: (Optional[bool]) -> bool
+ """
+ Get whether the entry is a regular file.
+
+ *follow_links* (:class:`bool` or :data:`None`) is whether to follow
+ symbolic links. If this is :data:`True`, a symlink to a regular file
+ will result in :data:`True`. Default is :data:`None` for :data:`True`.
+
+ Returns whether the entry is a regular file (:class:`bool`).
+ """
+ if follow_links is None:
+ follow_links = True
+
+ node_stat = self._stat if follow_links else self._lstat
+ return stat.S_ISREG(node_stat.st_mode)
+
+ def is_symlink(self):
+ # type: () -> bool
+ """
+ Returns whether the entry is a symbolic link (:class:`bool`).
+ """
+ return stat.S_ISLNK(self._lstat.st_mode)
+
+ def stat(self, follow_links=None):
+ # type: (Optional[bool]) -> os.stat_result
+ """
+ Get the cached stat result for the entry.
+
+ *follow_links* (:class:`bool` or :data:`None`) is whether to follow
+ symbolic links. If this is :data:`True`, the stat result of the
+ linked file will be returned. Default is :data:`None` for :data:`True`.
+
+ Returns that stat result (:class:`~os.stat_result`).
+ """
+ if follow_links is None:
+ follow_links = True
+
+ return self._stat if follow_links else self._lstat
diff --git a/third_party/python/pip/pip-23.0.1.dist-info/LICENSE.txt b/third_party/python/pip/pip-23.0.1.dist-info/LICENSE.txt
new file mode 100644
index 0000000000..8e7b65eaf6
--- /dev/null
+++ b/third_party/python/pip/pip-23.0.1.dist-info/LICENSE.txt
@@ -0,0 +1,20 @@
+Copyright (c) 2008-present The pip developers (see AUTHORS.txt file)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/third_party/python/pip/pip-23.0.1.dist-info/METADATA b/third_party/python/pip/pip-23.0.1.dist-info/METADATA
new file mode 100644
index 0000000000..984f9ad3f9
--- /dev/null
+++ b/third_party/python/pip/pip-23.0.1.dist-info/METADATA
@@ -0,0 +1,88 @@
+Metadata-Version: 2.1
+Name: pip
+Version: 23.0.1
+Summary: The PyPA recommended tool for installing Python packages.
+Home-page: https://pip.pypa.io/
+Author: The pip developers
+Author-email: distutils-sig@python.org
+License: MIT
+Project-URL: Documentation, https://pip.pypa.io
+Project-URL: Source, https://github.com/pypa/pip
+Project-URL: Changelog, https://pip.pypa.io/en/stable/news/
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Topic :: Software Development :: Build Tools
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Requires-Python: >=3.7
+License-File: LICENSE.txt
+
+pip - The Python Package Installer
+==================================
+
+.. image:: https://img.shields.io/pypi/v/pip.svg
+ :target: https://pypi.org/project/pip/
+
+.. image:: https://readthedocs.org/projects/pip/badge/?version=latest
+ :target: https://pip.pypa.io/en/latest
+
+pip is the `package installer`_ for Python. You can use pip to install packages from the `Python Package Index`_ and other indexes.
+
+Please take a look at our documentation for how to install and use pip:
+
+* `Installation`_
+* `Usage`_
+
+We release updates regularly, with a new version every 3 months. Find more details in our documentation:
+
+* `Release notes`_
+* `Release process`_
+
+In pip 20.3, we've `made a big improvement to the heart of pip`_; `learn more`_. We want your input, so `sign up for our user experience research studies`_ to help us do it right.
+
+**Note**: pip 21.0, in January 2021, removed Python 2 support, per pip's `Python 2 support policy`_. Please migrate to Python 3.
+
+If you find bugs, need help, or want to talk to the developers, please use our mailing lists or chat rooms:
+
+* `Issue tracking`_
+* `Discourse channel`_
+* `User IRC`_
+
+If you want to get involved head over to GitHub to get the source code, look at our development documentation and feel free to jump on the developer mailing lists and chat rooms:
+
+* `GitHub page`_
+* `Development documentation`_
+* `Development IRC`_
+
+Code of Conduct
+---------------
+
+Everyone interacting in the pip project's codebases, issue trackers, chat
+rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_.
+
+.. _package installer: https://packaging.python.org/guides/tool-recommendations/
+.. _Python Package Index: https://pypi.org
+.. _Installation: https://pip.pypa.io/en/stable/installation/
+.. _Usage: https://pip.pypa.io/en/stable/
+.. _Release notes: https://pip.pypa.io/en/stable/news.html
+.. _Release process: https://pip.pypa.io/en/latest/development/release-process/
+.. _GitHub page: https://github.com/pypa/pip
+.. _Development documentation: https://pip.pypa.io/en/latest/development
+.. _made a big improvement to the heart of pip: https://pyfound.blogspot.com/2020/11/pip-20-3-new-resolver.html
+.. _learn more: https://pip.pypa.io/en/latest/user_guide/#changes-to-the-pip-dependency-resolver-in-20-3-2020
+.. _sign up for our user experience research studies: https://pyfound.blogspot.com/2020/03/new-pip-resolver-to-roll-out-this-year.html
+.. _Python 2 support policy: https://pip.pypa.io/en/latest/development/release-process/#python-2-support
+.. _Issue tracking: https://github.com/pypa/pip/issues
+.. _Discourse channel: https://discuss.python.org/c/packaging
+.. _User IRC: https://kiwiirc.com/nextclient/#ircs://irc.libera.chat:+6697/pypa
+.. _Development IRC: https://kiwiirc.com/nextclient/#ircs://irc.libera.chat:+6697/pypa-dev
+.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md
diff --git a/third_party/python/pip/pip-23.0.1.dist-info/RECORD b/third_party/python/pip/pip-23.0.1.dist-info/RECORD
new file mode 100644
index 0000000000..6c0f62fa4c
--- /dev/null
+++ b/third_party/python/pip/pip-23.0.1.dist-info/RECORD
@@ -0,0 +1,506 @@
+pip/__init__.py,sha256=5yroedzc2dKKbcynDrHX8vBoLxqU27KmFvvHmdqQN9w,357
+pip/__main__.py,sha256=mXwWDftNLMKfwVqKFWGE_uuBZvGSIiUELhLkeysIuZc,1198
+pip/__pip-runner__.py,sha256=EnrfKmKMzWAdqg_JicLCOP9Y95Ux7zHh4ObvqLtQcjo,1444
+pip/py.typed,sha256=EBVvvPRTn_eIpz5e5QztSCdrMX7Qwd7VP93RSoIlZ2I,286
+pip/_internal/__init__.py,sha256=nnFCuxrPMgALrIDxSoy-H6Zj4W4UY60D-uL1aJyq0pc,573
+pip/_internal/build_env.py,sha256=1ESpqw0iupS_K7phZK5zshVE5Czy9BtGLFU4W6Enva8,10243
+pip/_internal/cache.py,sha256=C3n78VnBga9rjPXZqht_4A4d-T25poC7K0qBM7FHDhU,10734
+pip/_internal/configuration.py,sha256=uBKTus43pDIO6IzT2mLWQeROmHhtnoabhniKNjPYvD0,13529
+pip/_internal/exceptions.py,sha256=cU4dz7x-1uFGrf2A1_Np9tKcy599bRJKRJkikgARxW4,24244
+pip/_internal/main.py,sha256=r-UnUe8HLo5XFJz8inTcOOTiu_sxNhgHb6VwlGUllOI,340
+pip/_internal/pyproject.py,sha256=QqSZR5AGwtf3HTa8NdbDq2yj9T2r9S2h9gnU4aX2Kvg,6987
+pip/_internal/self_outdated_check.py,sha256=pnqBuKKZQ8OxKP0MaUUiDHl3AtyoMJHHG4rMQ7YcYXY,8167
+pip/_internal/wheel_builder.py,sha256=8cObBCu4mIsMJqZM7xXI9DO3vldiAnRNa1Gt6izPPTs,13079
+pip/_internal/cli/__init__.py,sha256=FkHBgpxxb-_gd6r1FjnNhfMOzAUYyXoXKJ6abijfcFU,132
+pip/_internal/cli/autocompletion.py,sha256=wY2JPZY2Eji1vhR7bVo-yCBPJ9LCy6P80iOAhZD1Vi8,6676
+pip/_internal/cli/base_command.py,sha256=t1D5x40Hfn9HnPnMt-iSxvqL14nht2olBCacW74pc-k,7842
+pip/_internal/cli/cmdoptions.py,sha256=0OHXkgnppCtC4QyF28ZL8FBosVUXG5pWj2uzO1CgWhM,29497
+pip/_internal/cli/command_context.py,sha256=RHgIPwtObh5KhMrd3YZTkl8zbVG-6Okml7YbFX4Ehg0,774
+pip/_internal/cli/main.py,sha256=ioJ8IVlb2K1qLOxR-tXkee9lURhYV89CDM71MKag7YY,2472
+pip/_internal/cli/main_parser.py,sha256=laDpsuBDl6kyfywp9eMMA9s84jfH2TJJn-vmL0GG90w,4338
+pip/_internal/cli/parser.py,sha256=tWP-K1uSxnJyXu3WE0kkH3niAYRBeuUaxeydhzOdhL4,10817
+pip/_internal/cli/progress_bars.py,sha256=So4mPoSjXkXiSHiTzzquH3VVyVD_njXlHJSExYPXAow,1968
+pip/_internal/cli/req_command.py,sha256=ypTutLv4j_efxC2f6C6aCQufxre-zaJdi5m_tWlLeBk,18172
+pip/_internal/cli/spinners.py,sha256=hIJ83GerdFgFCdobIA23Jggetegl_uC4Sp586nzFbPE,5118
+pip/_internal/cli/status_codes.py,sha256=sEFHUaUJbqv8iArL3HAtcztWZmGOFX01hTesSytDEh0,116
+pip/_internal/commands/__init__.py,sha256=5oRO9O3dM2vGuh0bFw4HOVletryrz5HHMmmPWwJrH9U,3882
+pip/_internal/commands/cache.py,sha256=muaT0mbL-ZUpn6AaushVAipzTiMwE4nV2BLbJBwt_KQ,7582
+pip/_internal/commands/check.py,sha256=0gjXR7j36xJT5cs2heYU_dfOfpnFfzX8OoPNNoKhqdM,1685
+pip/_internal/commands/completion.py,sha256=H0TJvGrdsoleuIyQKzJbicLFppYx2OZA0BLNpQDeFjI,4129
+pip/_internal/commands/configuration.py,sha256=NB5uf8HIX8-li95YLoZO09nALIWlLCHDF5aifSKcBn8,9815
+pip/_internal/commands/debug.py,sha256=AesEID-4gPFDWTwPiPaGZuD4twdT-imaGuMR5ZfSn8s,6591
+pip/_internal/commands/download.py,sha256=LwKEyYMG2L67nQRyGo8hQdNEeMU2bmGWqJfcB8JDXas,5289
+pip/_internal/commands/freeze.py,sha256=gCjoD6foBZPBAAYx5t8zZLkJhsF_ZRtnb3dPuD7beO8,2951
+pip/_internal/commands/hash.py,sha256=EVVOuvGtoPEdFi8SNnmdqlCQrhCxV-kJsdwtdcCnXGQ,1703
+pip/_internal/commands/help.py,sha256=gcc6QDkcgHMOuAn5UxaZwAStsRBrnGSn_yxjS57JIoM,1132
+pip/_internal/commands/index.py,sha256=cGQVSA5dAs7caQ9sz4kllYvaI4ZpGiq1WhCgaImXNSA,4793
+pip/_internal/commands/inspect.py,sha256=2wSPt9yfr3r6g-s2S5L6PvRtaHNVyb4TuodMStJ39cw,3188
+pip/_internal/commands/install.py,sha256=3vT9tnHOV-p6dPMaKDqzivqmcq_kPAI-jVkxOEwN5C4,32389
+pip/_internal/commands/list.py,sha256=Fk1TSxB33NlRS4qlLQ0xwnytnF9-zkQJbKQYv2xc4Q4,12343
+pip/_internal/commands/search.py,sha256=sbBZiARRc050QquOKcCvOr2K3XLsoYebLKZGRi__iUI,5697
+pip/_internal/commands/show.py,sha256=t5jia4zcYJRJZy4U_Von7zMl03hJmmcofj6oDNTnj7Y,6419
+pip/_internal/commands/uninstall.py,sha256=OIqO9tqadY8kM4HwhFf1Q62fUIp7v8KDrTRo8yWMz7Y,3886
+pip/_internal/commands/wheel.py,sha256=mbFJd4dmUfrVFJkQbK8n2zHyRcD3AI91f7EUo9l3KYg,7396
+pip/_internal/distributions/__init__.py,sha256=Hq6kt6gXBgjNit5hTTWLAzeCNOKoB-N0pGYSqehrli8,858
+pip/_internal/distributions/base.py,sha256=jrF1Vi7eGyqFqMHrieh1PIOrGU7KeCxhYPZnbvtmvGY,1221
+pip/_internal/distributions/installed.py,sha256=NI2OgsgH9iBq9l5vB-56vOg5YsybOy-AU4VE5CSCO2I,729
+pip/_internal/distributions/sdist.py,sha256=SQBdkatXSigKGG_SaD0U0p1Jwdfrg26UCNcHgkXZfdA,6494
+pip/_internal/distributions/wheel.py,sha256=m-J4XO-gvFerlYsFzzSXYDvrx8tLZlJFTCgDxctn8ig,1164
+pip/_internal/index/__init__.py,sha256=vpt-JeTZefh8a-FC22ZeBSXFVbuBcXSGiILhQZJaNpQ,30
+pip/_internal/index/collector.py,sha256=3OmYZ3tCoRPGOrELSgQWG-03M-bQHa2-VCA3R_nJAaU,16504
+pip/_internal/index/package_finder.py,sha256=rrUw4vj7QE_eMt022jw--wQiKznMaUgVBkJ1UCrVUxo,37873
+pip/_internal/index/sources.py,sha256=SVyPitv08-Qalh2_Bk5diAJ9GAA_d-a93koouQodAG0,6557
+pip/_internal/locations/__init__.py,sha256=Dh8LJWG8LRlDK4JIj9sfRF96TREzE--N_AIlx7Tqoe4,15365
+pip/_internal/locations/_distutils.py,sha256=cmi6h63xYNXhQe7KEWEMaANjHFy5yQOPt_1_RCWyXMY,6100
+pip/_internal/locations/_sysconfig.py,sha256=jyNVtUfMIf0mtyY-Xp1m9yQ8iwECozSVVFmjkN9a2yw,7680
+pip/_internal/locations/base.py,sha256=RQiPi1d4FVM2Bxk04dQhXZ2PqkeljEL2fZZ9SYqIQ78,2556
+pip/_internal/metadata/__init__.py,sha256=84j1dPJaIoz5Q2ZTPi0uB1iaDAHiUNfKtYSGQCfFKpo,4280
+pip/_internal/metadata/_json.py,sha256=BTkWfFDrWFwuSodImjtbAh8wCL3isecbnjTb5E6UUDI,2595
+pip/_internal/metadata/base.py,sha256=vIwIo1BtoqegehWMAXhNrpLGYBq245rcaCNkBMPnTU8,25277
+pip/_internal/metadata/pkg_resources.py,sha256=WjwiNdRsvxqxL4MA5Tb5a_q3Q3sUhdpbZF8wGLtPMI0,9773
+pip/_internal/metadata/importlib/__init__.py,sha256=9ZVO8BoE7NEZPmoHp5Ap_NJo0HgNIezXXg-TFTtt3Z4,107
+pip/_internal/metadata/importlib/_compat.py,sha256=GAe_prIfCE4iUylrnr_2dJRlkkBVRUbOidEoID7LPoE,1882
+pip/_internal/metadata/importlib/_dists.py,sha256=BUV8y6D0PePZrEN3vfJL-m1FDqZ6YPRgAiBeBinHhNg,8181
+pip/_internal/metadata/importlib/_envs.py,sha256=7BxanCh3T7arusys__O2ZHJdnmDhQXFmfU7x1-jB5xI,7457
+pip/_internal/models/__init__.py,sha256=3DHUd_qxpPozfzouoqa9g9ts1Czr5qaHfFxbnxriepM,63
+pip/_internal/models/candidate.py,sha256=6pcABsaR7CfIHlbJbr2_kMkVJFL_yrYjTx6SVWUnCPQ,990
+pip/_internal/models/direct_url.py,sha256=f3WiKUwWPdBkT1xm7DlolS32ZAMYh3jbkkVH-BUON5A,6626
+pip/_internal/models/format_control.py,sha256=DJpMYjxeYKKQdwNcML2_F0vtAh-qnKTYe-CpTxQe-4g,2520
+pip/_internal/models/index.py,sha256=tYnL8oxGi4aSNWur0mG8DAP7rC6yuha_MwJO8xw0crI,1030
+pip/_internal/models/installation_report.py,sha256=Hymmzv9-e3WhtewYm2NIOeMyAB6lXp736mpYqb9scZ0,2617
+pip/_internal/models/link.py,sha256=nfybVSpXgVHeU0MkC8hMkN2IgMup8Pdaudg74_sQEC8,18602
+pip/_internal/models/scheme.py,sha256=3EFQp_ICu_shH1-TBqhl0QAusKCPDFOlgHFeN4XowWs,738
+pip/_internal/models/search_scope.py,sha256=iGPQQ6a4Lau8oGQ_FWj8aRLik8A21o03SMO5KnSt-Cg,4644
+pip/_internal/models/selection_prefs.py,sha256=KZdi66gsR-_RUXUr9uejssk3rmTHrQVJWeNA2sV-VSY,1907
+pip/_internal/models/target_python.py,sha256=qKpZox7J8NAaPmDs5C_aniwfPDxzvpkrCKqfwndG87k,3858
+pip/_internal/models/wheel.py,sha256=YqazoIZyma_Q1ejFa1C7NHKQRRWlvWkdK96VRKmDBeI,3600
+pip/_internal/network/__init__.py,sha256=jf6Tt5nV_7zkARBrKojIXItgejvoegVJVKUbhAa5Ioc,50
+pip/_internal/network/auth.py,sha256=MQVP0k4hUXk8ReYEfsGQ5t7_TS7cNHQuaHJuBlJLHxU,16507
+pip/_internal/network/cache.py,sha256=hgXftU-eau4MWxHSLquTMzepYq5BPC2zhCkhN3glBy8,2145
+pip/_internal/network/download.py,sha256=HvDDq9bVqaN3jcS3DyVJHP7uTqFzbShdkf7NFSoHfkw,6096
+pip/_internal/network/lazy_wheel.py,sha256=PbPyuleNhtEq6b2S7rufoGXZWMD15FAGL4XeiAQ8FxA,7638
+pip/_internal/network/session.py,sha256=BpDOJ7_Xw5VkgPYWsePzcaqOfcyRZcB2AW7W0HGBST0,18443
+pip/_internal/network/utils.py,sha256=6A5SrUJEEUHxbGtbscwU2NpCyz-3ztiDlGWHpRRhsJ8,4073
+pip/_internal/network/xmlrpc.py,sha256=AzQgG4GgS152_cqmGr_Oz2MIXsCal-xfsis7fA7nmU0,1791
+pip/_internal/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_internal/operations/check.py,sha256=WsN7z0_QSgJjw0JsWWcqOHj4wWTaFv0J7mxgUByDCOg,5122
+pip/_internal/operations/freeze.py,sha256=mwTZ2uML8aQgo3k8MR79a7SZmmmvdAJqdyaknKbavmg,9784
+pip/_internal/operations/prepare.py,sha256=BeYXrLFpRoV5XBnRXQHxRA2plyC36kK9Pms5D9wjCo4,25091
+pip/_internal/operations/build/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_internal/operations/build/build_tracker.py,sha256=vf81EwomN3xe9G8qRJED0VGqNikmRQRQoobNsxi5Xrs,4133
+pip/_internal/operations/build/metadata.py,sha256=9S0CUD8U3QqZeXp-Zyt8HxwU90lE4QrnYDgrqZDzBnc,1422
+pip/_internal/operations/build/metadata_editable.py,sha256=VLL7LvntKE8qxdhUdEJhcotFzUsOSI8NNS043xULKew,1474
+pip/_internal/operations/build/metadata_legacy.py,sha256=o-eU21As175hDC7dluM1fJJ_FqokTIShyWpjKaIpHZw,2198
+pip/_internal/operations/build/wheel.py,sha256=sT12FBLAxDC6wyrDorh8kvcZ1jG5qInCRWzzP-UkJiQ,1075
+pip/_internal/operations/build/wheel_editable.py,sha256=yOtoH6zpAkoKYEUtr8FhzrYnkNHQaQBjWQ2HYae1MQg,1417
+pip/_internal/operations/build/wheel_legacy.py,sha256=C9j6rukgQI1n_JeQLoZGuDdfUwzCXShyIdPTp6edbMQ,3064
+pip/_internal/operations/install/__init__.py,sha256=mX7hyD2GNBO2mFGokDQ30r_GXv7Y_PLdtxcUv144e-s,51
+pip/_internal/operations/install/editable_legacy.py,sha256=ee4kfJHNuzTdKItbfAsNOSEwq_vD7DRPGkBdK48yBhU,1354
+pip/_internal/operations/install/legacy.py,sha256=cHdcHebyzf8w7OaOLwcsTNSMSSV8WBoAPFLay_9CjE8,4105
+pip/_internal/operations/install/wheel.py,sha256=CxzEg2wTPX4SxNTPIx0ozTqF1X7LhpCyP3iM2FjcKUE,27407
+pip/_internal/req/__init__.py,sha256=rUQ9d_Sh3E5kNYqX9pkN0D06YL-LrtcbJQ-LiIonq08,2807
+pip/_internal/req/constructors.py,sha256=ypjtq1mOQ3d2mFkFPMf_6Mr8SLKeHQk3tUKHA1ddG0U,16611
+pip/_internal/req/req_file.py,sha256=N6lPO3c0to_G73YyGAnk7VUYmed5jV4Qxgmt1xtlXVg,17646
+pip/_internal/req/req_install.py,sha256=X4WNQlTtvkeATwWdSiJcNLihwbYI_EnGDgE99p-Aa00,35763
+pip/_internal/req/req_set.py,sha256=j3esG0s6SzoVReX9rWn4rpYNtyET_fwxbwJPRimvRxo,2858
+pip/_internal/req/req_uninstall.py,sha256=ZFQfgSNz6H1BMsgl87nQNr2iaQCcbFcmXpW8rKVQcic,24045
+pip/_internal/resolution/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_internal/resolution/base.py,sha256=qlmh325SBVfvG6Me9gc5Nsh5sdwHBwzHBq6aEXtKsLA,583
+pip/_internal/resolution/legacy/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_internal/resolution/legacy/resolver.py,sha256=9em8D5TcSsEN4xZM1WreaRShOnyM4LlvhMSHpUPsocE,24129
+pip/_internal/resolution/resolvelib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_internal/resolution/resolvelib/base.py,sha256=u1O4fkvCO4mhmu5i32xrDv9AX5NgUci_eYVyBDQhTIM,5220
+pip/_internal/resolution/resolvelib/candidates.py,sha256=6kQZeMzwibnL4lO6bW0hUQQjNEvXfADdFphRRkRvOtc,18963
+pip/_internal/resolution/resolvelib/factory.py,sha256=OnjkLIgyk5Tol7uOOqapA1D4qiRHWmPU18DF1yN5N8o,27878
+pip/_internal/resolution/resolvelib/found_candidates.py,sha256=hvL3Hoa9VaYo-qEOZkBi2Iqw251UDxPz-uMHVaWmLpE,5705
+pip/_internal/resolution/resolvelib/provider.py,sha256=Vd4jW_NnyifB-HMkPYtZIO70M3_RM0MbL5YV6XyBM-w,9914
+pip/_internal/resolution/resolvelib/reporter.py,sha256=3ZVVYrs5PqvLFJkGLcuXoMK5mTInFzl31xjUpDBpZZk,2526
+pip/_internal/resolution/resolvelib/requirements.py,sha256=B1ndvKPSuyyyTEXt9sKhbwminViSWnBrJa7qO2ln4Z0,5455
+pip/_internal/resolution/resolvelib/resolver.py,sha256=nYZ9bTFXj5c1ILKnkSgU7tUCTYyo5V5J-J0sKoA7Wzg,11533
+pip/_internal/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_internal/utils/_log.py,sha256=-jHLOE_THaZz5BFcCnoSL9EYAtJ0nXem49s9of4jvKw,1015
+pip/_internal/utils/appdirs.py,sha256=swgcTKOm3daLeXTW6v5BUS2Ti2RvEnGRQYH_yDXklAo,1665
+pip/_internal/utils/compat.py,sha256=ACyBfLgj3_XG-iA5omEDrXqDM0cQKzi8h8HRBInzG6Q,1884
+pip/_internal/utils/compatibility_tags.py,sha256=ydin8QG8BHqYRsPY4OL6cmb44CbqXl1T0xxS97VhHkk,5377
+pip/_internal/utils/datetime.py,sha256=m21Y3wAtQc-ji6Veb6k_M5g6A0ZyFI4egchTdnwh-pQ,242
+pip/_internal/utils/deprecation.py,sha256=OLc7GzDwPob9y8jscDYCKUNBV-9CWwqFplBOJPLOpBM,5764
+pip/_internal/utils/direct_url_helpers.py,sha256=6F1tc2rcKaCZmgfVwsE6ObIe_Pux23mUVYA-2D9wCFc,3206
+pip/_internal/utils/distutils_args.py,sha256=bYUt4wfFJRaeGO4VHia6FNaA8HlYXMcKuEq1zYijY5g,1115
+pip/_internal/utils/egg_link.py,sha256=ZryCchR_yQSCsdsMkCpxQjjLbQxObA5GDtLG0RR5mGc,2118
+pip/_internal/utils/encoding.py,sha256=qqsXDtiwMIjXMEiIVSaOjwH5YmirCaK-dIzb6-XJsL0,1169
+pip/_internal/utils/entrypoints.py,sha256=YlhLTRl2oHBAuqhc-zmL7USS67TPWVHImjeAQHreZTQ,3064
+pip/_internal/utils/filesystem.py,sha256=RhMIXUaNVMGjc3rhsDahWQ4MavvEQDdqXqgq-F6fpw8,5122
+pip/_internal/utils/filetypes.py,sha256=i8XAQ0eFCog26Fw9yV0Yb1ygAqKYB1w9Cz9n0fj8gZU,716
+pip/_internal/utils/glibc.py,sha256=tDfwVYnJCOC0BNVpItpy8CGLP9BjkxFHdl0mTS0J7fc,3110
+pip/_internal/utils/hashes.py,sha256=1WhkVNIHNfuYLafBHThIjVKGplxFJXSlQtuG2mXNlJI,4831
+pip/_internal/utils/inject_securetransport.py,sha256=o-QRVMGiENrTJxw3fAhA7uxpdEdw6M41TjHYtSVRrcg,795
+pip/_internal/utils/logging.py,sha256=U2q0i1n8hPS2gQh8qcocAg5dovGAa_bR24akmXMzrk4,11632
+pip/_internal/utils/misc.py,sha256=XLtMDOmy8mWiNLuPIhxPdO1bWIleLdN6JnWDZsXfTgE,22253
+pip/_internal/utils/models.py,sha256=5GoYU586SrxURMvDn_jBMJInitviJg4O5-iOU-6I0WY,1193
+pip/_internal/utils/packaging.py,sha256=5Wm6_x7lKrlqVjPI5MBN_RurcRHwVYoQ7Ksrs84de7s,2108
+pip/_internal/utils/setuptools_build.py,sha256=4i3CuS34yNrkePnZ73rR47pyDzpZBo-SX9V5PNDSSHY,5662
+pip/_internal/utils/subprocess.py,sha256=0EMhgfPGFk8FZn6Qq7Hp9PN6YHuQNWiVby4DXcTCON4,9200
+pip/_internal/utils/temp_dir.py,sha256=aCX489gRa4Nu0dMKRFyGhV6maJr60uEynu5uCbKR4Qg,7702
+pip/_internal/utils/unpacking.py,sha256=SBb2iV1crb89MDRTEKY86R4A_UOWApTQn9VQVcMDOlE,8821
+pip/_internal/utils/urls.py,sha256=AhaesUGl-9it6uvG6fsFPOr9ynFpGaTMk4t5XTX7Z_Q,1759
+pip/_internal/utils/virtualenv.py,sha256=S6f7csYorRpiD6cvn3jISZYc3I8PJC43H5iMFpRAEDU,3456
+pip/_internal/utils/wheel.py,sha256=lXOgZyTlOm5HmK8tw5iw0A3_5A6wRzsXHOaQkIvvloU,4549
+pip/_internal/vcs/__init__.py,sha256=UAqvzpbi0VbZo3Ub6skEeZAw-ooIZR-zX_WpCbxyCoU,596
+pip/_internal/vcs/bazaar.py,sha256=j0oin0fpGRHcCFCxEcpPCQoFEvA-DMLULKdGP8Nv76o,3519
+pip/_internal/vcs/git.py,sha256=mjhwudCx9WlLNkxZ6_kOKmueF0rLoU2i1xeASKF6yiQ,18116
+pip/_internal/vcs/mercurial.py,sha256=Bzbd518Jsx-EJI0IhIobiQqiRsUv5TWYnrmRIFWE0Gw,5238
+pip/_internal/vcs/subversion.py,sha256=vhZs8L-TNggXqM1bbhl-FpbxE3TrIB6Tgnx8fh3S2HE,11729
+pip/_internal/vcs/versioncontrol.py,sha256=KUOc-hN51em9jrqxKwUR3JnkgSE-xSOqMiiJcSaL6B8,22811
+pip/_vendor/__init__.py,sha256=fNxOSVD0auElsD8fN9tuq5psfgMQ-RFBtD4X5gjlRkg,4966
+pip/_vendor/six.py,sha256=TOOfQi7nFGfMrIvtdr6wX4wyHH8M7aknmuLfo2cBBrM,34549
+pip/_vendor/typing_extensions.py,sha256=VKZ_nHsuzDbKOVUY2CTdavwBgfZ2EXRyluZHRzUYAbg,80114
+pip/_vendor/vendor.txt,sha256=3i3Zr7_kRDD9UEva0I8YOMroCZ8xuZ9OWd_Q4jmazqE,476
+pip/_vendor/cachecontrol/__init__.py,sha256=hrxlv3q7upsfyMw8k3gQ9vagBax1pYHSGGqYlZ0Zk0M,465
+pip/_vendor/cachecontrol/_cmd.py,sha256=lxUXqfNTVx84zf6tcWbkLZHA6WVBRtJRpfeA9ZqhaAY,1379
+pip/_vendor/cachecontrol/adapter.py,sha256=ew9OYEQHEOjvGl06ZsuX8W3DAvHWsQKHwWAxISyGug8,5033
+pip/_vendor/cachecontrol/cache.py,sha256=Tty45fOjH40fColTGkqKQvQQmbYsMpk-nCyfLcv2vG4,1535
+pip/_vendor/cachecontrol/compat.py,sha256=LNx7vqBndYdHU8YuJt53ab_8rzMGTXVrvMb7CZJkxG0,778
+pip/_vendor/cachecontrol/controller.py,sha256=bAYrt7x_VH4toNpI066LQxbHpYGpY1MxxmZAhspplvw,16416
+pip/_vendor/cachecontrol/filewrapper.py,sha256=X4BAQOO26GNOR7nH_fhTzAfeuct2rBQcx_15MyFBpcs,3946
+pip/_vendor/cachecontrol/heuristics.py,sha256=8kAyuZLSCyEIgQr6vbUwfhpqg9ows4mM0IV6DWazevI,4154
+pip/_vendor/cachecontrol/serialize.py,sha256=_U1NU_C-SDgFzkbAxAsPDgMTHeTWZZaHCQnZN_jh0U8,7105
+pip/_vendor/cachecontrol/wrapper.py,sha256=X3-KMZ20Ho3VtqyVaXclpeQpFzokR5NE8tZSfvKVaB8,774
+pip/_vendor/cachecontrol/caches/__init__.py,sha256=h-1cUmOz6mhLsjTjOrJ8iPejpGdLCyG4lzTftfGZvLg,242
+pip/_vendor/cachecontrol/caches/file_cache.py,sha256=GpexcE29LoY4MaZwPUTcUBZaDdcsjqyLxZFznk8Hbr4,5271
+pip/_vendor/cachecontrol/caches/redis_cache.py,sha256=mp-QWonP40I3xJGK3XVO-Gs9a3UjzlqqEmp9iLJH9F4,1033
+pip/_vendor/certifi/__init__.py,sha256=bK_nm9bLJzNvWZc2oZdiTwg2KWD4HSPBWGaM0zUDvMw,94
+pip/_vendor/certifi/__main__.py,sha256=1k3Cr95vCxxGRGDljrW3wMdpZdL3Nhf0u1n-k2qdsCY,255
+pip/_vendor/certifi/cacert.pem,sha256=LBHDzgj_xA05AxnHK8ENT5COnGNElNZe0svFUHMf1SQ,275233
+pip/_vendor/certifi/core.py,sha256=ZwiOsv-sD_ouU1ft8wy_xZ3LQ7UbcVzyqj2XNyrsZis,4279
+pip/_vendor/chardet/__init__.py,sha256=57R-HSxj0PWmILMN0GFmUNqEMfrEVSamXyjD-W6_fbs,4797
+pip/_vendor/chardet/big5freq.py,sha256=ltcfP-3PjlNHCoo5e4a7C4z-2DhBTXRfY6jbMbB7P30,31274
+pip/_vendor/chardet/big5prober.py,sha256=lPMfwCX6v2AaPgvFh_cSWZcgLDbWiFCHLZ_p9RQ9uxE,1763
+pip/_vendor/chardet/chardistribution.py,sha256=13B8XUG4oXDuLdXvfbIWwLFeR-ZU21AqTS1zcdON8bU,10032
+pip/_vendor/chardet/charsetgroupprober.py,sha256=UKK3SaIZB2PCdKSIS0gnvMtLR9JJX62M-fZJu3OlWyg,3915
+pip/_vendor/chardet/charsetprober.py,sha256=L3t8_wIOov8em-vZWOcbkdsrwe43N6_gqNh5pH7WPd4,5420
+pip/_vendor/chardet/codingstatemachine.py,sha256=K7k69sw3jY5DmTXoSJQVsUtFIQKYPQVOSJJhBuGv_yE,3732
+pip/_vendor/chardet/codingstatemachinedict.py,sha256=0GY3Hi2qIZvDrOOJ3AtqppM1RsYxr_66ER4EHjuMiMc,542
+pip/_vendor/chardet/cp949prober.py,sha256=0jKRV7fECuWI16rNnks0ZECKA1iZYCIEaP8A1ZvjUSI,1860
+pip/_vendor/chardet/enums.py,sha256=TzECiZoCKNMqgwU76cPCeKWFBqaWvAdLMev5_bCkhY8,1683
+pip/_vendor/chardet/escprober.py,sha256=Kho48X65xE0scFylIdeJjM2bcbvRvv0h0WUbMWrJD3A,4006
+pip/_vendor/chardet/escsm.py,sha256=AqyXpA2FQFD7k-buBty_7itGEYkhmVa8X09NLRul3QM,12176
+pip/_vendor/chardet/eucjpprober.py,sha256=5KYaM9fsxkRYzw1b5k0fL-j_-ezIw-ij9r97a9MHxLY,3934
+pip/_vendor/chardet/euckrfreq.py,sha256=3mHuRvXfsq_QcQysDQFb8qSudvTiol71C6Ic2w57tKM,13566
+pip/_vendor/chardet/euckrprober.py,sha256=hiFT6wM174GIwRvqDsIcuOc-dDsq2uPKMKbyV8-1Xnc,1753
+pip/_vendor/chardet/euctwfreq.py,sha256=2alILE1Lh5eqiFJZjzRkMQXolNJRHY5oBQd-vmZYFFM,36913
+pip/_vendor/chardet/euctwprober.py,sha256=NxbpNdBtU0VFI0bKfGfDkpP7S2_8_6FlO87dVH0ogws,1753
+pip/_vendor/chardet/gb2312freq.py,sha256=49OrdXzD-HXqwavkqjo8Z7gvs58hONNzDhAyMENNkvY,20735
+pip/_vendor/chardet/gb2312prober.py,sha256=KPEBueaSLSvBpFeINMu0D6TgHcR90e5PaQawifzF4o0,1759
+pip/_vendor/chardet/hebrewprober.py,sha256=96T_Lj_OmW-fK7JrSHojYjyG3fsGgbzkoTNleZ3kfYE,14537
+pip/_vendor/chardet/jisfreq.py,sha256=mm8tfrwqhpOd3wzZKS4NJqkYBQVcDfTM2JiQ5aW932E,25796
+pip/_vendor/chardet/johabfreq.py,sha256=dBpOYG34GRX6SL8k_LbS9rxZPMjLjoMlgZ03Pz5Hmqc,42498
+pip/_vendor/chardet/johabprober.py,sha256=O1Qw9nVzRnun7vZp4UZM7wvJSv9W941mEU9uDMnY3DU,1752
+pip/_vendor/chardet/jpcntx.py,sha256=uhHrYWkLxE_rF5OkHKInm0HUsrjgKHHVQvtt3UcvotA,27055
+pip/_vendor/chardet/langbulgarianmodel.py,sha256=vmbvYFP8SZkSxoBvLkFqKiH1sjma5ihk3PTpdy71Rr4,104562
+pip/_vendor/chardet/langgreekmodel.py,sha256=JfB7bupjjJH2w3X_mYnQr9cJA_7EuITC2cRW13fUjeI,98484
+pip/_vendor/chardet/langhebrewmodel.py,sha256=3HXHaLQPNAGcXnJjkIJfozNZLTvTJmf4W5Awi6zRRKc,98196
+pip/_vendor/chardet/langhungarianmodel.py,sha256=WxbeQIxkv8YtApiNqxQcvj-tMycsoI4Xy-fwkDHpP_Y,101363
+pip/_vendor/chardet/langrussianmodel.py,sha256=s395bTZ87ESTrZCOdgXbEjZ9P1iGPwCl_8xSsac_DLY,128035
+pip/_vendor/chardet/langthaimodel.py,sha256=7bJlQitRpTnVGABmbSznHnJwOHDy3InkTvtFUx13WQI,102774
+pip/_vendor/chardet/langturkishmodel.py,sha256=XY0eGdTIy4eQ9Xg1LVPZacb-UBhHBR-cq0IpPVHowKc,95372
+pip/_vendor/chardet/latin1prober.py,sha256=p15EEmFbmQUwbKLC7lOJVGHEZwcG45ubEZYTGu01J5g,5380
+pip/_vendor/chardet/macromanprober.py,sha256=9anfzmY6TBfUPDyBDOdY07kqmTHpZ1tK0jL-p1JWcOY,6077
+pip/_vendor/chardet/mbcharsetprober.py,sha256=Wr04WNI4F3X_VxEverNG-H25g7u-MDDKlNt-JGj-_uU,3715
+pip/_vendor/chardet/mbcsgroupprober.py,sha256=iRpaNBjV0DNwYPu_z6TiHgRpwYahiM7ztI_4kZ4Uz9A,2131
+pip/_vendor/chardet/mbcssm.py,sha256=hUtPvDYgWDaA2dWdgLsshbwRfm3Q5YRlRogdmeRUNQw,30391
+pip/_vendor/chardet/resultdict.py,sha256=ez4FRvN5KaSosJeJ2WzUyKdDdg35HDy_SSLPXKCdt5M,402
+pip/_vendor/chardet/sbcharsetprober.py,sha256=-nd3F90i7GpXLjehLVHqVBE0KlWzGvQUPETLBNn4o6U,6400
+pip/_vendor/chardet/sbcsgroupprober.py,sha256=gcgI0fOfgw_3YTClpbra_MNxwyEyJ3eUXraoLHYb59E,4137
+pip/_vendor/chardet/sjisprober.py,sha256=aqQufMzRw46ZpFlzmYaYeT2-nzmKb-hmcrApppJ862k,4007
+pip/_vendor/chardet/universaldetector.py,sha256=xYBrg4x0dd9WnT8qclfADVD9ondrUNkqPmvte1pa520,14848
+pip/_vendor/chardet/utf1632prober.py,sha256=pw1epGdMj1hDGiCu1AHqqzOEfjX8MVdiW7O1BlT8-eQ,8505
+pip/_vendor/chardet/utf8prober.py,sha256=8m08Ub5490H4jQ6LYXvFysGtgKoKsHUd2zH_i8_TnVw,2812
+pip/_vendor/chardet/version.py,sha256=lGtJcxGM44Qz4Cbk4rbbmrKxnNr1-97U25TameLehZw,244
+pip/_vendor/chardet/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/chardet/cli/chardetect.py,sha256=zibMVg5RpKb-ME9_7EYG4ZM2Sf07NHcQzZ12U-rYJho,3242
+pip/_vendor/chardet/metadata/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/chardet/metadata/languages.py,sha256=FhvBIdZFxRQ-dTwkb_0madRKgVBCaUMQz9I5xqjE5iQ,13560
+pip/_vendor/colorama/__init__.py,sha256=wePQA4U20tKgYARySLEC047ucNX-g8pRLpYBuiHlLb8,266
+pip/_vendor/colorama/ansi.py,sha256=Top4EeEuaQdBWdteKMEcGOTeKeF19Q-Wo_6_Cj5kOzQ,2522
+pip/_vendor/colorama/ansitowin32.py,sha256=vPNYa3OZbxjbuFyaVo0Tmhmy1FZ1lKMWCnT7odXpItk,11128
+pip/_vendor/colorama/initialise.py,sha256=-hIny86ClXo39ixh5iSCfUIa2f_h_bgKRDW7gqs-KLU,3325
+pip/_vendor/colorama/win32.py,sha256=YQOKwMTwtGBbsY4dL5HYTvwTeP9wIQra5MvPNddpxZs,6181
+pip/_vendor/colorama/winterm.py,sha256=XCQFDHjPi6AHYNdZwy0tA02H-Jh48Jp-HvCjeLeLp3U,7134
+pip/_vendor/colorama/tests/__init__.py,sha256=MkgPAEzGQd-Rq0w0PZXSX2LadRWhUECcisJY8lSrm4Q,75
+pip/_vendor/colorama/tests/ansi_test.py,sha256=FeViDrUINIZcr505PAxvU4AjXz1asEiALs9GXMhwRaE,2839
+pip/_vendor/colorama/tests/ansitowin32_test.py,sha256=RN7AIhMJ5EqDsYaCjVo-o4u8JzDD4ukJbmevWKS70rY,10678
+pip/_vendor/colorama/tests/initialise_test.py,sha256=BbPy-XfyHwJ6zKozuQOvNvQZzsx9vdb_0bYXn7hsBTc,6741
+pip/_vendor/colorama/tests/isatty_test.py,sha256=Pg26LRpv0yQDB5Ac-sxgVXG7hsA1NYvapFgApZfYzZg,1866
+pip/_vendor/colorama/tests/utils.py,sha256=1IIRylG39z5-dzq09R_ngufxyPZxgldNbrxKxUGwGKE,1079
+pip/_vendor/colorama/tests/winterm_test.py,sha256=qoWFPEjym5gm2RuMwpf3pOis3a5r_PJZFCzK254JL8A,3709
+pip/_vendor/distlib/__init__.py,sha256=acgfseOC55dNrVAzaBKpUiH3Z6V7Q1CaxsiQ3K7pC-E,581
+pip/_vendor/distlib/compat.py,sha256=tfoMrj6tujk7G4UC2owL6ArgDuCKabgBxuJRGZSmpko,41259
+pip/_vendor/distlib/database.py,sha256=o_mw0fAr93NDAHHHfqG54Y1Hi9Rkfrp2BX15XWZYK50,51697
+pip/_vendor/distlib/index.py,sha256=HFiDG7LMoaBs829WuotrfIwcErOOExUOR_AeBtw_TCU,20834
+pip/_vendor/distlib/locators.py,sha256=wNzG-zERzS_XGls-nBPVVyLRHa2skUlkn0-5n0trMWA,51991
+pip/_vendor/distlib/manifest.py,sha256=nQEhYmgoreaBZzyFzwYsXxJARu3fo4EkunU163U16iE,14811
+pip/_vendor/distlib/markers.py,sha256=TpHHHLgkzyT7YHbwj-2i6weRaq-Ivy2-MUnrDkjau-U,5058
+pip/_vendor/distlib/metadata.py,sha256=g_DIiu8nBXRzA-mWPRpatHGbmFZqaFoss7z9TG7QSUU,39801
+pip/_vendor/distlib/resources.py,sha256=LwbPksc0A1JMbi6XnuPdMBUn83X7BPuFNWqPGEKI698,10820
+pip/_vendor/distlib/scripts.py,sha256=BmkTKmiTk4m2cj-iueliatwz3ut_9SsABBW51vnQnZU,18102
+pip/_vendor/distlib/t32.exe,sha256=a0GV5kCoWsMutvliiCKmIgV98eRZ33wXoS-XrqvJQVs,97792
+pip/_vendor/distlib/t64-arm.exe,sha256=68TAa32V504xVBnufojh0PcenpR3U4wAqTqf-MZqbPw,182784
+pip/_vendor/distlib/t64.exe,sha256=gaYY8hy4fbkHYTTnA4i26ct8IQZzkBG2pRdy0iyuBrc,108032
+pip/_vendor/distlib/util.py,sha256=31dPXn3Rfat0xZLeVoFpuniyhe6vsbl9_QN-qd9Lhlk,66262
+pip/_vendor/distlib/version.py,sha256=WG__LyAa2GwmA6qSoEJtvJE8REA1LZpbSizy8WvhJLk,23513
+pip/_vendor/distlib/w32.exe,sha256=R4csx3-OGM9kL4aPIzQKRo5TfmRSHZo6QWyLhDhNBks,91648
+pip/_vendor/distlib/w64-arm.exe,sha256=xdyYhKj0WDcVUOCb05blQYvzdYIKMbmJn2SZvzkcey4,168448
+pip/_vendor/distlib/w64.exe,sha256=ejGf-rojoBfXseGLpya6bFTFPWRG21X5KvU8J5iU-K0,101888
+pip/_vendor/distlib/wheel.py,sha256=Rgqs658VsJ3R2845qwnZD8XQryV2CzWw2mghwLvxxsI,43898
+pip/_vendor/distro/__init__.py,sha256=2fHjF-SfgPvjyNZ1iHh_wjqWdR_Yo5ODHwZC0jLBPhc,981
+pip/_vendor/distro/__main__.py,sha256=bu9d3TifoKciZFcqRBuygV3GSuThnVD_m2IK4cz96Vs,64
+pip/_vendor/distro/distro.py,sha256=UZO1LjIhtFCMdlbiz39gj3raV-Amf3SBwzGzfApiMHw,49330
+pip/_vendor/idna/__init__.py,sha256=KJQN1eQBr8iIK5SKrJ47lXvxG0BJ7Lm38W4zT0v_8lk,849
+pip/_vendor/idna/codec.py,sha256=6ly5odKfqrytKT9_7UrlGklHnf1DSK2r9C6cSM4sa28,3374
+pip/_vendor/idna/compat.py,sha256=0_sOEUMT4CVw9doD3vyRhX80X19PwqFoUBs7gWsFME4,321
+pip/_vendor/idna/core.py,sha256=1JxchwKzkxBSn7R_oCE12oBu3eVux0VzdxolmIad24M,12950
+pip/_vendor/idna/idnadata.py,sha256=xUjqKqiJV8Ho_XzBpAtv5JFoVPSupK-SUXvtjygUHqw,44375
+pip/_vendor/idna/intranges.py,sha256=YBr4fRYuWH7kTKS2tXlFjM24ZF1Pdvcir-aywniInqg,1881
+pip/_vendor/idna/package_data.py,sha256=C_jHJzmX8PI4xq0jpzmcTMxpb5lDsq4o5VyxQzlVrZE,21
+pip/_vendor/idna/uts46data.py,sha256=zvjZU24s58_uAS850Mcd0NnD0X7_gCMAMjzWNIeUJdc,206539
+pip/_vendor/msgpack/__init__.py,sha256=NryGaKLDk_Egd58ZxXpnuI7OWO27AXz7S6CBFRM3sAY,1132
+pip/_vendor/msgpack/exceptions.py,sha256=dCTWei8dpkrMsQDcjQk74ATl9HsIBH0ybt8zOPNqMYc,1081
+pip/_vendor/msgpack/ext.py,sha256=TuldJPkYu8Wo_Xh0tFGL2l06-gY88NSR8tOje9fo2Wg,6080
+pip/_vendor/msgpack/fallback.py,sha256=OORDn86-fHBPlu-rPlMdM10KzkH6S_Rx9CHN1b7o4cg,34557
+pip/_vendor/packaging/__about__.py,sha256=ugASIO2w1oUyH8_COqQ2X_s0rDhjbhQC3yJocD03h2c,661
+pip/_vendor/packaging/__init__.py,sha256=b9Kk5MF7KxhhLgcDmiUWukN-LatWFxPdNug0joPhHSk,497
+pip/_vendor/packaging/_manylinux.py,sha256=XcbiXB-qcjv3bcohp6N98TMpOP4_j3m-iOA8ptK2GWY,11488
+pip/_vendor/packaging/_musllinux.py,sha256=_KGgY_qc7vhMGpoqss25n2hiLCNKRtvz9mCrS7gkqyc,4378
+pip/_vendor/packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431
+pip/_vendor/packaging/markers.py,sha256=AJBOcY8Oq0kYc570KuuPTkvuqjAlhufaE2c9sCUbm64,8487
+pip/_vendor/packaging/requirements.py,sha256=NtDlPBtojpn1IUC85iMjPNsUmufjpSlwnNA-Xb4m5NA,4676
+pip/_vendor/packaging/specifiers.py,sha256=LRQ0kFsHrl5qfcFNEEJrIFYsnIHQUJXY9fIsakTrrqE,30110
+pip/_vendor/packaging/tags.py,sha256=lmsnGNiJ8C4D_Pf9PbM0qgbZvD9kmB9lpZBQUZa3R_Y,15699
+pip/_vendor/packaging/utils.py,sha256=dJjeat3BS-TYn1RrUFVwufUMasbtzLfYRoy_HXENeFQ,4200
+pip/_vendor/packaging/version.py,sha256=_fLRNrFrxYcHVfyo8vk9j8s6JM8N_xsSxVFr6RJyco8,14665
+pip/_vendor/pkg_resources/__init__.py,sha256=NnpQ3g6BCHzpMgOR_OLBmYtniY4oOzdKpwqghfq_6ug,108287
+pip/_vendor/pkg_resources/py31compat.py,sha256=CRk8fkiPRDLsbi5pZcKsHI__Pbmh_94L8mr9Qy9Ab2U,562
+pip/_vendor/platformdirs/__init__.py,sha256=9iY4Z8iJDZB0djln6zHHwrPVWpB54TCygcnh--MujU0,12936
+pip/_vendor/platformdirs/__main__.py,sha256=ZmsnTxEOxtTvwa-Y_Vfab_JN3X4XCVeN8X0yyy9-qnc,1176
+pip/_vendor/platformdirs/android.py,sha256=GKizhyS7ESRiU67u8UnBJLm46goau9937EchXWbPBlk,4068
+pip/_vendor/platformdirs/api.py,sha256=MXKHXOL3eh_-trSok-JUTjAR_zjmmKF3rjREVABjP8s,4910
+pip/_vendor/platformdirs/macos.py,sha256=-3UXQewbT0yMhMdkzRXfXGAntmLIH7Qt4a9Hlf8I5_Y,2655
+pip/_vendor/platformdirs/unix.py,sha256=P-WQjSSieE38DXjMDa1t4XHnKJQ5idEaKT0PyXwm8KQ,6911
+pip/_vendor/platformdirs/version.py,sha256=qaN-fw_htIgKUVXoAuAEVgKxQu3tZ9qE2eiKkWIS7LA,160
+pip/_vendor/platformdirs/windows.py,sha256=LOrXLgI0CjQldDo2zhOZYGYZ6g4e_cJOCB_pF9aMRWQ,6596
+pip/_vendor/pygments/__init__.py,sha256=5oLcMLXD0cTG8YcHBPITtK1fS0JBASILEvEnWkTezgE,2999
+pip/_vendor/pygments/__main__.py,sha256=p0_rz3JZmNZMNZBOqDojaEx1cr9wmA9FQZX_TYl74lQ,353
+pip/_vendor/pygments/cmdline.py,sha256=rc0fah4eknRqFgn1wKNEwkq0yWnSqYOGaA4PaIeOxVY,23685
+pip/_vendor/pygments/console.py,sha256=hQfqCFuOlGk7DW2lPQYepsw-wkOH1iNt9ylNA1eRymM,1697
+pip/_vendor/pygments/filter.py,sha256=NglMmMPTRRv-zuRSE_QbWid7JXd2J4AvwjCW2yWALXU,1938
+pip/_vendor/pygments/formatter.py,sha256=6-TS2Y8pUMeWIUolWwr1O8ruC-U6HydWDwOdbAiJgJQ,2917
+pip/_vendor/pygments/lexer.py,sha256=ZPB_TGn_qzrXodRFwEdPzzJk6LZBo9BlfSy3lacc6zg,32005
+pip/_vendor/pygments/modeline.py,sha256=gIbMSYrjSWPk0oATz7W9vMBYkUyTK2OcdVyKjioDRvA,986
+pip/_vendor/pygments/plugin.py,sha256=5rPxEoB_89qQMpOs0nI4KyLOzAHNlbQiwEMOKxqNmv8,2591
+pip/_vendor/pygments/regexopt.py,sha256=c6xcXGpGgvCET_3VWawJJqAnOp0QttFpQEdOPNY2Py0,3072
+pip/_vendor/pygments/scanner.py,sha256=F2T2G6cpkj-yZtzGQr-sOBw5w5-96UrJWveZN6va2aM,3092
+pip/_vendor/pygments/sphinxext.py,sha256=F8L0211sPnXaiWutN0lkSUajWBwlgDMIEFFAbMWOvZY,4630
+pip/_vendor/pygments/style.py,sha256=RRnussX1YiK9Z7HipIvKorImxu3-HnkdpPCO4u925T0,6257
+pip/_vendor/pygments/token.py,sha256=vA2yNHGJBHfq4jNQSah7C9DmIOp34MmYHPA8P-cYAHI,6184
+pip/_vendor/pygments/unistring.py,sha256=gP3gK-6C4oAFjjo9HvoahsqzuV4Qz0jl0E0OxfDerHI,63187
+pip/_vendor/pygments/util.py,sha256=KgwpWWC3By5AiNwxGTI7oI9aXupH2TyZWukafBJe0Mg,9110
+pip/_vendor/pygments/filters/__init__.py,sha256=b5YuXB9rampSy2-cMtKxGQoMDfrG4_DcvVwZrzTlB6w,40386
+pip/_vendor/pygments/formatters/__init__.py,sha256=YTqGeHS17fNXCLMZpf7oCxBCKLB9YLsZ8IAsjGhawyg,4810
+pip/_vendor/pygments/formatters/_mapping.py,sha256=fCZgvsM6UEuZUG7J6lr47eVss5owKd_JyaNbDfxeqmQ,4104
+pip/_vendor/pygments/formatters/bbcode.py,sha256=JrL4ITjN-KzPcuQpPMBf1pm33eW2sDUNr8WzSoAJsJA,3314
+pip/_vendor/pygments/formatters/groff.py,sha256=xrOFoLbafSA9uHsSLRogy79_Zc4GWJ8tMK2hCdTJRsw,5086
+pip/_vendor/pygments/formatters/html.py,sha256=QNt9prPgxmbKx2M-nfDwoR1bIg06-sNouQuWnE434Wc,35441
+pip/_vendor/pygments/formatters/img.py,sha256=h75Y7IRZLZxDEIwyoOsdRLTwm7kLVPbODKkgEiJ0iKI,21938
+pip/_vendor/pygments/formatters/irc.py,sha256=iwk5tDJOxbCV64SCmOFyvk__x6RD60ay0nUn7ko9n7U,5871
+pip/_vendor/pygments/formatters/latex.py,sha256=thPbytJCIs2AUXsO3NZwqKtXJ-upOlcXP4CXsx94G4w,19351
+pip/_vendor/pygments/formatters/other.py,sha256=PczqK1Rms43lz6iucOLPeBMxIncPKOGBt-195w1ynII,5073
+pip/_vendor/pygments/formatters/pangomarkup.py,sha256=ZZzMsKJKXrsDniFeMTkIpe7aQ4VZYRHu0idWmSiUJ2U,2212
+pip/_vendor/pygments/formatters/rtf.py,sha256=abrKlWjipBkQvhIICxtjYTUNv6WME0iJJObFvqVuudE,5014
+pip/_vendor/pygments/formatters/svg.py,sha256=6MM9YyO8NhU42RTQfTWBiagWMnsf9iG5gwhqSriHORE,7335
+pip/_vendor/pygments/formatters/terminal.py,sha256=NpEGvwkC6LgMLQTjVzGrJXji3XcET1sb5JCunSCzoRo,4674
+pip/_vendor/pygments/formatters/terminal256.py,sha256=4v4OVizvsxtwWBpIy_Po30zeOzE5oJg_mOc1-rCjMDk,11753
+pip/_vendor/pygments/lexers/__init__.py,sha256=8d80-XfL5UKDCC1wRD1a_ZBZDkZ2HOe7Zul8SsnNYFE,11174
+pip/_vendor/pygments/lexers/_mapping.py,sha256=zEiCV5FPiBioMJQJjw9kk7IJ5Y9GwknS4VJPYlcNchs,70232
+pip/_vendor/pygments/lexers/python.py,sha256=gZROs9iNSOA18YyVghP1cUCD0OwYZ04a6PCwgSOCeSA,53376
+pip/_vendor/pygments/styles/__init__.py,sha256=iZDZ7PBKb55SpGlE1--cx9cbmWx5lVTH4bXO87t2Vok,3419
+pip/_vendor/pyparsing/__init__.py,sha256=ZPdI7pPo4IYXcABw-51AcqOzsxVvDtqnQbyn_qYWZvo,9171
+pip/_vendor/pyparsing/actions.py,sha256=wU9i32e0y1ymxKE3OUwSHO-SFIrt1h_wv6Ws0GQjpNU,6426
+pip/_vendor/pyparsing/common.py,sha256=lFL97ooIeR75CmW5hjURZqwDCTgruqltcTCZ-ulLO2Q,12936
+pip/_vendor/pyparsing/core.py,sha256=AzTm1KFT1FIhiw2zvXZJmrpQoAwB0wOmeDCiR6SYytw,213344
+pip/_vendor/pyparsing/exceptions.py,sha256=3LbSafD32NYb1Tzt85GHNkhEAU1eZkTtNSk24cPMemo,9023
+pip/_vendor/pyparsing/helpers.py,sha256=QpUOjW0-psvueMwWb9bQpU2noqKCv98_wnw1VSzSdVo,39129
+pip/_vendor/pyparsing/results.py,sha256=HgNvWVXBdQP-Q6PtJfoCEeOJk2nwEvG-2KVKC5sGA30,25341
+pip/_vendor/pyparsing/testing.py,sha256=7tu4Abp4uSeJV0N_yEPRmmNUhpd18ZQP3CrX41DM814,13402
+pip/_vendor/pyparsing/unicode.py,sha256=fwuhMj30SQ165Cv7HJpu-rSxGbRm93kN9L4Ei7VGc1Y,10787
+pip/_vendor/pyparsing/util.py,sha256=kq772O5YSeXOSdP-M31EWpbH_ayj7BMHImBYo9xPD5M,6805
+pip/_vendor/pyparsing/diagram/__init__.py,sha256=KW0PV_TvWKnL7jysz0pQbZ24nzWWu2ZfNaeyUIIywIg,23685
+pip/_vendor/pyproject_hooks/__init__.py,sha256=kCehmy0UaBa9oVMD7ZIZrnswfnP3LXZ5lvnNJAL5JBM,491
+pip/_vendor/pyproject_hooks/_compat.py,sha256=by6evrYnqkisiM-MQcvOKs5bgDMzlOSgZqRHNqf04zE,138
+pip/_vendor/pyproject_hooks/_impl.py,sha256=61GJxzQip0IInhuO69ZI5GbNQ82XEDUB_1Gg5_KtUoc,11920
+pip/_vendor/pyproject_hooks/_in_process/__init__.py,sha256=9gQATptbFkelkIy0OfWFEACzqxXJMQDWCH9rBOAZVwQ,546
+pip/_vendor/pyproject_hooks/_in_process/_in_process.py,sha256=m2b34c917IW5o-Q_6TYIHlsK9lSUlNiyrITTUH_zwew,10927
+pip/_vendor/requests/__init__.py,sha256=64HgJ8cke-XyNrj1ErwNq0F9SqyAThUTh5lV6m7-YkI,5178
+pip/_vendor/requests/__version__.py,sha256=h48zn-oFukaXrYHocdadp_hIszWyd_PGrS8Eiii6aoc,435
+pip/_vendor/requests/_internal_utils.py,sha256=aSPlF4uDhtfKxEayZJJ7KkAxtormeTfpwKSBSwtmAUw,1397
+pip/_vendor/requests/adapters.py,sha256=GFEz5koZaMZD86v0SHXKVB5SE9MgslEjkCQzldkNwVM,21443
+pip/_vendor/requests/api.py,sha256=dyvkDd5itC9z2g0wHl_YfD1yf6YwpGWLO7__8e21nks,6377
+pip/_vendor/requests/auth.py,sha256=h-HLlVx9j8rKV5hfSAycP2ApOSglTz77R0tz7qCbbEE,10187
+pip/_vendor/requests/certs.py,sha256=PVPooB0jP5hkZEULSCwC074532UFbR2Ptgu0I5zwmCs,575
+pip/_vendor/requests/compat.py,sha256=IhK9quyX0RRuWTNcg6d2JGSAOUbM6mym2p_2XjLTwf4,1286
+pip/_vendor/requests/cookies.py,sha256=kD3kNEcCj-mxbtf5fJsSaT86eGoEYpD3X0CSgpzl7BM,18560
+pip/_vendor/requests/exceptions.py,sha256=FA-_kVwBZ2jhXauRctN_ewHVK25b-fj0Azyz1THQ0Kk,3823
+pip/_vendor/requests/help.py,sha256=FnAAklv8MGm_qb2UilDQgS6l0cUttiCFKUjx0zn2XNA,3879
+pip/_vendor/requests/hooks.py,sha256=CiuysiHA39V5UfcCBXFIx83IrDpuwfN9RcTUgv28ftQ,733
+pip/_vendor/requests/models.py,sha256=dDZ-iThotky-Noq9yy97cUEJhr3wnY6mv-xR_ePg_lk,35288
+pip/_vendor/requests/packages.py,sha256=njJmVifY4aSctuW3PP5EFRCxjEwMRDO6J_feG2dKWsI,695
+pip/_vendor/requests/sessions.py,sha256=KUqJcRRLovNefUs7ScOXSUVCcfSayTFWtbiJ7gOSlTI,30180
+pip/_vendor/requests/status_codes.py,sha256=FvHmT5uH-_uimtRz5hH9VCbt7VV-Nei2J9upbej6j8g,4235
+pip/_vendor/requests/structures.py,sha256=-IbmhVz06S-5aPSZuUthZ6-6D9XOjRuTXHOabY041XM,2912
+pip/_vendor/requests/utils.py,sha256=0gzSOcx9Ya4liAbHnHuwt4jM78lzCZZoDFgkmsInNUg,33240
+pip/_vendor/resolvelib/__init__.py,sha256=UL-B2BDI0_TRIqkfGwLHKLxY-LjBlomz7941wDqzB1I,537
+pip/_vendor/resolvelib/providers.py,sha256=roVmFBItQJ0TkhNua65h8LdNny7rmeqVEXZu90QiP4o,5872
+pip/_vendor/resolvelib/reporters.py,sha256=fW91NKf-lK8XN7i6Yd_rczL5QeOT3sc6AKhpaTEnP3E,1583
+pip/_vendor/resolvelib/resolvers.py,sha256=2wYzVGBGerbmcIpH8cFmgSKgLSETz8jmwBMGjCBMHG4,17592
+pip/_vendor/resolvelib/structs.py,sha256=IVIYof6sA_N4ZEiE1C1UhzTX495brCNnyCdgq6CYq28,4794
+pip/_vendor/resolvelib/compat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/resolvelib/compat/collections_abc.py,sha256=uy8xUZ-NDEw916tugUXm8HgwCGiMO0f-RcdnpkfXfOs,156
+pip/_vendor/rich/__init__.py,sha256=dRxjIL-SbFVY0q3IjSMrfgBTHrm1LZDgLOygVBwiYZc,6090
+pip/_vendor/rich/__main__.py,sha256=TT8sb9PTnsnKhhrGuHkLN0jdN0dtKhtPkEr9CidDbPM,8478
+pip/_vendor/rich/_cell_widths.py,sha256=2n4EiJi3X9sqIq0O16kUZ_zy6UYMd3xFfChlKfnW1Hc,10096
+pip/_vendor/rich/_emoji_codes.py,sha256=hu1VL9nbVdppJrVoijVshRlcRRe_v3dju3Mmd2sKZdY,140235
+pip/_vendor/rich/_emoji_replace.py,sha256=n-kcetsEUx2ZUmhQrfeMNc-teeGhpuSQ5F8VPBsyvDo,1064
+pip/_vendor/rich/_export_format.py,sha256=nHArqOljIlYn6NruhWsAsh-fHo7oJC3y9BDJyAa-QYQ,2114
+pip/_vendor/rich/_extension.py,sha256=Xt47QacCKwYruzjDi-gOBq724JReDj9Cm9xUi5fr-34,265
+pip/_vendor/rich/_inspect.py,sha256=oZJGw31e64dwXSCmrDnvZbwVb1ZKhWfU8wI3VWohjJk,9695
+pip/_vendor/rich/_log_render.py,sha256=1ByI0PA1ZpxZY3CGJOK54hjlq4X-Bz_boIjIqCd8Kns,3225
+pip/_vendor/rich/_loop.py,sha256=hV_6CLdoPm0va22Wpw4zKqM0RYsz3TZxXj0PoS-9eDQ,1236
+pip/_vendor/rich/_null_file.py,sha256=cTaTCU_xuDXGGa9iqK-kZ0uddZCSvM-RgM2aGMuMiHs,1643
+pip/_vendor/rich/_palettes.py,sha256=cdev1JQKZ0JvlguV9ipHgznTdnvlIzUFDBb0It2PzjI,7063
+pip/_vendor/rich/_pick.py,sha256=evDt8QN4lF5CiwrUIXlOJCntitBCOsI3ZLPEIAVRLJU,423
+pip/_vendor/rich/_ratio.py,sha256=2lLSliL025Y-YMfdfGbutkQDevhcyDqc-DtUYW9mU70,5472
+pip/_vendor/rich/_spinners.py,sha256=U2r1_g_1zSjsjiUdAESc2iAMc3i4ri_S8PYP6kQ5z1I,19919
+pip/_vendor/rich/_stack.py,sha256=-C8OK7rxn3sIUdVwxZBBpeHhIzX0eI-VM3MemYfaXm0,351
+pip/_vendor/rich/_timer.py,sha256=zelxbT6oPFZnNrwWPpc1ktUeAT-Vc4fuFcRZLQGLtMI,417
+pip/_vendor/rich/_win32_console.py,sha256=P0vxI2fcndym1UU1S37XAzQzQnkyY7YqAKmxm24_gug,22820
+pip/_vendor/rich/_windows.py,sha256=dvNl9TmfPzNVxiKk5WDFihErZ5796g2UC9-KGGyfXmk,1926
+pip/_vendor/rich/_windows_renderer.py,sha256=t74ZL3xuDCP3nmTp9pH1L5LiI2cakJuQRQleHCJerlk,2783
+pip/_vendor/rich/_wrap.py,sha256=xfV_9t0Sg6rzimmrDru8fCVmUlalYAcHLDfrJZnbbwQ,1840
+pip/_vendor/rich/abc.py,sha256=ON-E-ZqSSheZ88VrKX2M3PXpFbGEUUZPMa_Af0l-4f0,890
+pip/_vendor/rich/align.py,sha256=FV6_GS-8uhIyViMng3hkIWSFaTgMohK1Oqyjl8I8mGE,10368
+pip/_vendor/rich/ansi.py,sha256=THex7-qjc82-ZRtmDPAYlVEObYOEE_ARB1692Fk-JHs,6819
+pip/_vendor/rich/bar.py,sha256=a7UD303BccRCrEhGjfMElpv5RFYIinaAhAuqYqhUvmw,3264
+pip/_vendor/rich/box.py,sha256=FJ6nI3jD7h2XNFU138bJUt2HYmWOlRbltoCEuIAZhew,9842
+pip/_vendor/rich/cells.py,sha256=zMjFI15wCpgjLR14lHdfFMVC6qMDi5OsKIB0PYZBBMk,4503
+pip/_vendor/rich/color.py,sha256=GTITgffj47On3YK1v_I5T2CPZJGSnyWipPID_YkYXqw,18015
+pip/_vendor/rich/color_triplet.py,sha256=3lhQkdJbvWPoLDO-AnYImAWmJvV5dlgYNCVZ97ORaN4,1054
+pip/_vendor/rich/columns.py,sha256=HUX0KcMm9dsKNi11fTbiM_h2iDtl8ySCaVcxlalEzq8,7131
+pip/_vendor/rich/console.py,sha256=w3tJfrILZpS359wrNqaldGmyk3PEhEmV8Pg2g2GjXWI,97992
+pip/_vendor/rich/constrain.py,sha256=1VIPuC8AgtKWrcncQrjBdYqA3JVWysu6jZo1rrh7c7Q,1288
+pip/_vendor/rich/containers.py,sha256=aKgm5UDHn5Nmui6IJaKdsZhbHClh_X7D-_Wg8Ehrr7s,5497
+pip/_vendor/rich/control.py,sha256=DSkHTUQLorfSERAKE_oTAEUFefZnZp4bQb4q8rHbKws,6630
+pip/_vendor/rich/default_styles.py,sha256=WqVh-RPNEsx0Wxf3fhS_fCn-wVqgJ6Qfo-Zg7CoCsLE,7954
+pip/_vendor/rich/diagnose.py,sha256=an6uouwhKPAlvQhYpNNpGq9EJysfMIOvvCbO3oSoR24,972
+pip/_vendor/rich/emoji.py,sha256=omTF9asaAnsM4yLY94eR_9dgRRSm1lHUszX20D1yYCQ,2501
+pip/_vendor/rich/errors.py,sha256=5pP3Kc5d4QJ_c0KFsxrfyhjiPVe7J1zOqSFbFAzcV-Y,642
+pip/_vendor/rich/file_proxy.py,sha256=4gCbGRXg0rW35Plaf0UVvj3dfENHuzc_n8I_dBqxI7o,1616
+pip/_vendor/rich/filesize.py,sha256=9fTLAPCAwHmBXdRv7KZU194jSgNrRb6Wx7RIoBgqeKY,2508
+pip/_vendor/rich/highlighter.py,sha256=3WW6PACGlq0e3YDjfqiMBQ0dYZwu7pcoFYUgJy01nb0,9585
+pip/_vendor/rich/json.py,sha256=TmeFm96Utaov-Ff5miavBPNo51HRooM8S78HEwrYEjA,5053
+pip/_vendor/rich/jupyter.py,sha256=QyoKoE_8IdCbrtiSHp9TsTSNyTHY0FO5whE7jOTd9UE,3252
+pip/_vendor/rich/layout.py,sha256=RFYL6HdCFsHf9WRpcvi3w-fpj-8O5dMZ8W96VdKNdbI,14007
+pip/_vendor/rich/live.py,sha256=emVaLUua-FKSYqZXmtJJjBIstO99CqMOuA6vMAKVkO0,14172
+pip/_vendor/rich/live_render.py,sha256=zElm3PrfSIvjOce28zETHMIUf9pFYSUA5o0AflgUP64,3667
+pip/_vendor/rich/logging.py,sha256=uB-cB-3Q4bmXDLLpbOWkmFviw-Fde39zyMV6tKJ2WHQ,11903
+pip/_vendor/rich/markup.py,sha256=xzF4uAafiEeEYDJYt_vUnJOGoTU8RrH-PH7WcWYXjCg,8198
+pip/_vendor/rich/measure.py,sha256=HmrIJX8sWRTHbgh8MxEay_83VkqNW_70s8aKP5ZcYI8,5305
+pip/_vendor/rich/padding.py,sha256=kTFGsdGe0os7tXLnHKpwTI90CXEvrceeZGCshmJy5zw,4970
+pip/_vendor/rich/pager.py,sha256=SO_ETBFKbg3n_AgOzXm41Sv36YxXAyI3_R-KOY2_uSc,828
+pip/_vendor/rich/palette.py,sha256=lInvR1ODDT2f3UZMfL1grq7dY_pDdKHw4bdUgOGaM4Y,3396
+pip/_vendor/rich/panel.py,sha256=wGMe40J8KCGgQoM0LyjRErmGIkv2bsYA71RCXThD0xE,10574
+pip/_vendor/rich/pretty.py,sha256=dAbLqSF3jJnyfBLJ7QjQ3B2J-WGyBnAdGXeuBVIyMyA,37414
+pip/_vendor/rich/progress.py,sha256=eg-OURdfZW3n3bib1-zP3SZl6cIm2VZup1pr_96CyLk,59836
+pip/_vendor/rich/progress_bar.py,sha256=cEoBfkc3lLwqba4XKsUpy4vSQKDh2QQ5J2J94-ACFoo,8165
+pip/_vendor/rich/prompt.py,sha256=x0mW-pIPodJM4ry6grgmmLrl8VZp99kqcmdnBe70YYA,11303
+pip/_vendor/rich/protocol.py,sha256=5hHHDDNHckdk8iWH5zEbi-zuIVSF5hbU2jIo47R7lTE,1391
+pip/_vendor/rich/region.py,sha256=rNT9xZrVZTYIXZC0NYn41CJQwYNbR-KecPOxTgQvB8Y,166
+pip/_vendor/rich/repr.py,sha256=eJObQe6_c5pUjRM85sZ2rrW47_iF9HT3Z8DrgVjvOl8,4436
+pip/_vendor/rich/rule.py,sha256=V6AWI0wCb6DB0rvN967FRMlQrdlG7HoZdfEAHyeG8CM,4773
+pip/_vendor/rich/scope.py,sha256=TMUU8qo17thyqQCPqjDLYpg_UU1k5qVd-WwiJvnJVas,2843
+pip/_vendor/rich/screen.py,sha256=YoeReESUhx74grqb0mSSb9lghhysWmFHYhsbMVQjXO8,1591
+pip/_vendor/rich/segment.py,sha256=6XdX0MfL18tUCaUWDWncIqx0wpq3GiaqzhYP779JvRA,24224
+pip/_vendor/rich/spinner.py,sha256=7b8MCleS4fa46HX0AzF98zfu6ZM6fAL0UgYzPOoakF4,4374
+pip/_vendor/rich/status.py,sha256=gJsIXIZeSo3urOyxRUjs6VrhX5CZrA0NxIQ-dxhCnwo,4425
+pip/_vendor/rich/style.py,sha256=odBbAlrgdEbAj7pmtPbQtWJNS8upyNhhy--Ks6KwAKk,26332
+pip/_vendor/rich/styled.py,sha256=eZNnzGrI4ki_54pgY3Oj0T-x3lxdXTYh4_ryDB24wBU,1258
+pip/_vendor/rich/syntax.py,sha256=W1xtdBA1-EVP-weYofKXusUlV5zghCOv1nWMHHfNmiY,34995
+pip/_vendor/rich/table.py,sha256=-WzesL-VJKsaiDU3uyczpJMHy6VCaSewBYJwx8RudI8,39684
+pip/_vendor/rich/terminal_theme.py,sha256=1j5-ufJfnvlAo5Qsi_ACZiXDmwMXzqgmFByObT9-yJY,3370
+pip/_vendor/rich/text.py,sha256=andXaxWW_wBveMiZZpd5viQwucWo7SPopcM3ZCQeO0c,45686
+pip/_vendor/rich/theme.py,sha256=GKNtQhDBZKAzDaY0vQVQQFzbc0uWfFe6CJXA-syT7zQ,3627
+pip/_vendor/rich/themes.py,sha256=0xgTLozfabebYtcJtDdC5QkX5IVUEaviqDUJJh4YVFk,102
+pip/_vendor/rich/traceback.py,sha256=6LkGguCEAxKv8v8xmKfMeYPPJ1UXUEHDv4726To6FiQ,26070
+pip/_vendor/rich/tree.py,sha256=BMbUYNjS9uodNPfvtY_odmU09GA5QzcMbQ5cJZhllQI,9169
+pip/_vendor/tenacity/__init__.py,sha256=rjcWJVq5PcNJNC42rt-TAGGskM-RUEkZbDKu1ra7IPo,18364
+pip/_vendor/tenacity/_asyncio.py,sha256=HEb0BVJEeBJE9P-m9XBxh1KcaF96BwoeqkJCL5sbVcQ,3314
+pip/_vendor/tenacity/_utils.py,sha256=-y68scDcyoqvTJuJJ0GTfjdSCljEYlbCYvgk7nM4NdM,1944
+pip/_vendor/tenacity/after.py,sha256=dlmyxxFy2uqpLXDr838DiEd7jgv2AGthsWHGYcGYsaI,1496
+pip/_vendor/tenacity/before.py,sha256=7XtvRmO0dRWUp8SVn24OvIiGFj8-4OP5muQRUiWgLh0,1376
+pip/_vendor/tenacity/before_sleep.py,sha256=ThyDvqKU5yle_IvYQz_b6Tp6UjUS0PhVp6zgqYl9U6Y,1908
+pip/_vendor/tenacity/nap.py,sha256=fRWvnz1aIzbIq9Ap3gAkAZgDH6oo5zxMrU6ZOVByq0I,1383
+pip/_vendor/tenacity/retry.py,sha256=Cy504Ss3UrRV7lnYgvymF66WD1wJ2dbM869kDcjuDes,7550
+pip/_vendor/tenacity/stop.py,sha256=sKHmHaoSaW6sKu3dTxUVKr1-stVkY7lw4Y9yjZU30zQ,2790
+pip/_vendor/tenacity/tornadoweb.py,sha256=E8lWO2nwe6dJgoB-N2HhQprYLDLB_UdSgFnv-EN6wKE,2145
+pip/_vendor/tenacity/wait.py,sha256=tdLTESRm5E237VHG0SxCDXRa0DHKPKVq285kslHVURc,8011
+pip/_vendor/tomli/__init__.py,sha256=JhUwV66DB1g4Hvt1UQCVMdfCu-IgAV8FXmvDU9onxd4,396
+pip/_vendor/tomli/_parser.py,sha256=g9-ENaALS-B8dokYpCuzUFalWlog7T-SIYMjLZSWrtM,22633
+pip/_vendor/tomli/_re.py,sha256=dbjg5ChZT23Ka9z9DHOXfdtSpPwUfdgMXnj8NOoly-w,2943
+pip/_vendor/tomli/_types.py,sha256=-GTG2VUqkpxwMqzmVO4F7ybKddIbAnuAHXfmWQcTi3Q,254
+pip/_vendor/urllib3/__init__.py,sha256=iXLcYiJySn0GNbWOOZDDApgBL1JgP44EZ8i1760S8Mc,3333
+pip/_vendor/urllib3/_collections.py,sha256=Rp1mVyBgc_UlAcp6M3at1skJBXR5J43NawRTvW2g_XY,10811
+pip/_vendor/urllib3/_version.py,sha256=JWE--BUVy7--9FsXILONIpQ43irftKGjT9j2H_fdF2M,64
+pip/_vendor/urllib3/connection.py,sha256=8976wL6sGeVMW0JnXvx5mD00yXu87uQjxtB9_VL8dx8,20070
+pip/_vendor/urllib3/connectionpool.py,sha256=vS4UaHLoR9_5aGLXSQ776y_jTxgqqjx0YsjkYksWGOo,39095
+pip/_vendor/urllib3/exceptions.py,sha256=0Mnno3KHTNfXRfY7638NufOPkUb6mXOm-Lqj-4x2w8A,8217
+pip/_vendor/urllib3/fields.py,sha256=kvLDCg_JmH1lLjUUEY_FLS8UhY7hBvDPuVETbY8mdrM,8579
+pip/_vendor/urllib3/filepost.py,sha256=5b_qqgRHVlL7uLtdAYBzBh-GHmU5AfJVt_2N0XS3PeY,2440
+pip/_vendor/urllib3/poolmanager.py,sha256=0KOOJECoeLYVjUHvv-0h4Oq3FFQQ2yb-Fnjkbj8gJO0,19786
+pip/_vendor/urllib3/request.py,sha256=ZFSIqX0C6WizixecChZ3_okyu7BEv0lZu1VT0s6h4SM,5985
+pip/_vendor/urllib3/response.py,sha256=fmDJAFkG71uFTn-sVSTh2Iw0WmcXQYqkbRjihvwBjU8,30641
+pip/_vendor/urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/urllib3/contrib/_appengine_environ.py,sha256=bDbyOEhW2CKLJcQqAKAyrEHN-aklsyHFKq6vF8ZFsmk,957
+pip/_vendor/urllib3/contrib/appengine.py,sha256=VR68eAVE137lxTgjBDwCna5UiBZTOKa01Aj_-5BaCz4,11036
+pip/_vendor/urllib3/contrib/ntlmpool.py,sha256=NlfkW7WMdW8ziqudopjHoW299og1BTWi0IeIibquFwk,4528
+pip/_vendor/urllib3/contrib/pyopenssl.py,sha256=hDJh4MhyY_p-oKlFcYcQaVQRDv6GMmBGuW9yjxyeejM,17081
+pip/_vendor/urllib3/contrib/securetransport.py,sha256=yhZdmVjY6PI6EeFbp7qYOp6-vp1Rkv2NMuOGaEj7pmc,34448
+pip/_vendor/urllib3/contrib/socks.py,sha256=aRi9eWXo9ZEb95XUxef4Z21CFlnnjbEiAo9HOseoMt4,7097
+pip/_vendor/urllib3/contrib/_securetransport/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/urllib3/contrib/_securetransport/bindings.py,sha256=4Xk64qIkPBt09A5q-RIFUuDhNc9mXilVapm7WnYnzRw,17632
+pip/_vendor/urllib3/contrib/_securetransport/low_level.py,sha256=B2JBB2_NRP02xK6DCa1Pa9IuxrPwxzDzZbixQkb7U9M,13922
+pip/_vendor/urllib3/packages/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/urllib3/packages/six.py,sha256=b9LM0wBXv7E7SrbCjAm4wwN-hrH-iNxv18LgWNMMKPo,34665
+pip/_vendor/urllib3/packages/backports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pip/_vendor/urllib3/packages/backports/makefile.py,sha256=nbzt3i0agPVP07jqqgjhaYjMmuAi_W5E0EywZivVO8E,1417
+pip/_vendor/urllib3/util/__init__.py,sha256=JEmSmmqqLyaw8P51gUImZh8Gwg9i1zSe-DoqAitn2nc,1155
+pip/_vendor/urllib3/util/connection.py,sha256=5Lx2B1PW29KxBn2T0xkN1CBgRBa3gGVJBKoQoRogEVk,4901
+pip/_vendor/urllib3/util/proxy.py,sha256=zUvPPCJrp6dOF0N4GAVbOcl6o-4uXKSrGiTkkr5vUS4,1605
+pip/_vendor/urllib3/util/queue.py,sha256=nRgX8_eX-_VkvxoX096QWoz8Ps0QHUAExILCY_7PncM,498
+pip/_vendor/urllib3/util/request.py,sha256=C0OUt2tcU6LRiQJ7YYNP9GvPrSvl7ziIBekQ-5nlBZk,3997
+pip/_vendor/urllib3/util/response.py,sha256=GJpg3Egi9qaJXRwBh5wv-MNuRWan5BIu40oReoxWP28,3510
+pip/_vendor/urllib3/util/retry.py,sha256=4laWh0HpwGijLiBmdBIYtbhYekQnNzzhx2W9uys0RHA,22003
+pip/_vendor/urllib3/util/ssl_.py,sha256=X4-AqW91aYPhPx6-xbf66yHFQKbqqfC_5Zt4WkLX1Hc,17177
+pip/_vendor/urllib3/util/ssl_match_hostname.py,sha256=Ir4cZVEjmAk8gUAIHWSi7wtOO83UCYABY2xFD1Ql_WA,5758
+pip/_vendor/urllib3/util/ssltransport.py,sha256=NA-u5rMTrDFDFC8QzRKUEKMG0561hOD4qBTr3Z4pv6E,6895
+pip/_vendor/urllib3/util/timeout.py,sha256=QSbBUNOB9yh6AnDn61SrLQ0hg5oz0I9-uXEG91AJuIg,10003
+pip/_vendor/urllib3/util/url.py,sha256=HLCLEKt8D-QMioTNbneZSzGTGyUkns4w_lSJP1UzE2E,14298
+pip/_vendor/urllib3/util/wait.py,sha256=fOX0_faozG2P7iVojQoE1mbydweNyTcm-hXEfFrTtLI,5403
+pip/_vendor/webencodings/__init__.py,sha256=qOBJIuPy_4ByYH6W_bNgJF-qYQ2DoU-dKsDu5yRWCXg,10579
+pip/_vendor/webencodings/labels.py,sha256=4AO_KxTddqGtrL9ns7kAPjb0CcN6xsCIxbK37HY9r3E,8979
+pip/_vendor/webencodings/mklabels.py,sha256=GYIeywnpaLnP0GSic8LFWgd0UVvO_l1Nc6YoF-87R_4,1305
+pip/_vendor/webencodings/tests.py,sha256=OtGLyjhNY1fvkW1GvLJ_FV9ZoqC9Anyjr7q3kxTbzNs,6563
+pip/_vendor/webencodings/x_user_defined.py,sha256=yOqWSdmpytGfUgh_Z6JYgDNhoc-BAHyyeeT15Fr42tM,4307
+pip-23.0.1.dist-info/LICENSE.txt,sha256=Y0MApmnUmurmWxLGxIySTFGkzfPR_whtw0VtyLyqIQQ,1093
+pip-23.0.1.dist-info/METADATA,sha256=POh89utz-H1e0K-xDY9CL9gs-x0MjH-AWxbhJG3aaVE,4072
+pip-23.0.1.dist-info/WHEEL,sha256=2wepM1nk4DS4eFpYrW1TTqPcoGNfHhhO_i5m4cOimbo,92
+pip-23.0.1.dist-info/entry_points.txt,sha256=w694mjHYSfmSoUVVSaHoQ9UkOBBdtKKIJbyDRLdKju8,124
+pip-23.0.1.dist-info/top_level.txt,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+pip-23.0.1.dist-info/RECORD,,
diff --git a/third_party/python/pip/pip-23.0.1.dist-info/WHEEL b/third_party/python/pip/pip-23.0.1.dist-info/WHEEL
new file mode 100644
index 0000000000..57e3d840d5
--- /dev/null
+++ b/third_party/python/pip/pip-23.0.1.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.38.4)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/third_party/python/pip/pip-23.0.1.dist-info/entry_points.txt b/third_party/python/pip/pip-23.0.1.dist-info/entry_points.txt
new file mode 100644
index 0000000000..ab909c9b34
--- /dev/null
+++ b/third_party/python/pip/pip-23.0.1.dist-info/entry_points.txt
@@ -0,0 +1,4 @@
+[console_scripts]
+pip = pip._internal.cli.main:main
+pip3 = pip._internal.cli.main:main
+pip3.9 = pip._internal.cli.main:main
diff --git a/third_party/python/pip/pip-23.0.1.dist-info/top_level.txt b/third_party/python/pip/pip-23.0.1.dist-info/top_level.txt
new file mode 100644
index 0000000000..a1b589e38a
--- /dev/null
+++ b/third_party/python/pip/pip-23.0.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+pip
diff --git a/third_party/python/pip/pip/__init__.py b/third_party/python/pip/pip/__init__.py
new file mode 100644
index 0000000000..42f6c455c6
--- /dev/null
+++ b/third_party/python/pip/pip/__init__.py
@@ -0,0 +1,13 @@
+from typing import List, Optional
+
+__version__ = "23.0.1"
+
+
+def main(args: Optional[List[str]] = None) -> int:
+ """This is an internal API only meant for use by pip's own console scripts.
+
+ For additional details, see https://github.com/pypa/pip/issues/7498.
+ """
+ from pip._internal.utils.entrypoints import _wrapper
+
+ return _wrapper(args)
diff --git a/third_party/python/pip/pip/__main__.py b/third_party/python/pip/pip/__main__.py
new file mode 100644
index 0000000000..fe34a7b777
--- /dev/null
+++ b/third_party/python/pip/pip/__main__.py
@@ -0,0 +1,31 @@
+import os
+import sys
+import warnings
+
+# Remove '' and current working directory from the first entry
+# of sys.path, if present to avoid using current directory
+# in pip commands check, freeze, install, list and show,
+# when invoked as python -m pip <command>
+if sys.path[0] in ("", os.getcwd()):
+ sys.path.pop(0)
+
+# If we are running from a wheel, add the wheel to sys.path
+# This allows the usage python pip-*.whl/pip install pip-*.whl
+if __package__ == "":
+ # __file__ is pip-*.whl/pip/__main__.py
+ # first dirname call strips of '/__main__.py', second strips off '/pip'
+ # Resulting path is the name of the wheel itself
+ # Add that to sys.path so we can import pip
+ path = os.path.dirname(os.path.dirname(__file__))
+ sys.path.insert(0, path)
+
+if __name__ == "__main__":
+ # Work around the error reported in #9540, pending a proper fix.
+ # Note: It is essential the warning filter is set *before* importing
+ # pip, as the deprecation happens at import time, not runtime.
+ warnings.filterwarnings(
+ "ignore", category=DeprecationWarning, module=".*packaging\\.version"
+ )
+ from pip._internal.cli.main import main as _main
+
+ sys.exit(_main())
diff --git a/third_party/python/pip/pip/__pip-runner__.py b/third_party/python/pip/pip/__pip-runner__.py
new file mode 100644
index 0000000000..49a148a097
--- /dev/null
+++ b/third_party/python/pip/pip/__pip-runner__.py
@@ -0,0 +1,50 @@
+"""Execute exactly this copy of pip, within a different environment.
+
+This file is named as it is, to ensure that this module can't be imported via
+an import statement.
+"""
+
+# /!\ This version compatibility check section must be Python 2 compatible. /!\
+
+import sys
+
+# Copied from setup.py
+PYTHON_REQUIRES = (3, 7)
+
+
+def version_str(version): # type: ignore
+ return ".".join(str(v) for v in version)
+
+
+if sys.version_info[:2] < PYTHON_REQUIRES:
+ raise SystemExit(
+ "This version of pip does not support python {} (requires >={}).".format(
+ version_str(sys.version_info[:2]), version_str(PYTHON_REQUIRES)
+ )
+ )
+
+# From here on, we can use Python 3 features, but the syntax must remain
+# Python 2 compatible.
+
+import runpy # noqa: E402
+from importlib.machinery import PathFinder # noqa: E402
+from os.path import dirname # noqa: E402
+
+PIP_SOURCES_ROOT = dirname(dirname(__file__))
+
+
+class PipImportRedirectingFinder:
+ @classmethod
+ def find_spec(self, fullname, path=None, target=None): # type: ignore
+ if fullname != "pip":
+ return None
+
+ spec = PathFinder.find_spec(fullname, [PIP_SOURCES_ROOT], target)
+ assert spec, (PIP_SOURCES_ROOT, fullname)
+ return spec
+
+
+sys.meta_path.insert(0, PipImportRedirectingFinder())
+
+assert __name__ == "__main__", "Cannot run __pip-runner__.py as a non-main module"
+runpy.run_module("pip", run_name="__main__", alter_sys=True)
diff --git a/third_party/python/pip/pip/_internal/__init__.py b/third_party/python/pip/pip/_internal/__init__.py
new file mode 100644
index 0000000000..6afb5c627c
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/__init__.py
@@ -0,0 +1,19 @@
+from typing import List, Optional
+
+import pip._internal.utils.inject_securetransport # noqa
+from pip._internal.utils import _log
+
+# init_logging() must be called before any call to logging.getLogger()
+# which happens at import of most modules.
+_log.init_logging()
+
+
+def main(args: (Optional[List[str]]) = None) -> int:
+ """This is preserved for old console scripts that may still be referencing
+ it.
+
+ For additional details, see https://github.com/pypa/pip/issues/7498.
+ """
+ from pip._internal.utils.entrypoints import _wrapper
+
+ return _wrapper(args)
diff --git a/third_party/python/pip/pip/_internal/build_env.py b/third_party/python/pip/pip/_internal/build_env.py
new file mode 100644
index 0000000000..4f704a3547
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/build_env.py
@@ -0,0 +1,311 @@
+"""Build Environment used for isolation during sdist building
+"""
+
+import logging
+import os
+import pathlib
+import site
+import sys
+import textwrap
+from collections import OrderedDict
+from types import TracebackType
+from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple, Type, Union
+
+from pip._vendor.certifi import where
+from pip._vendor.packaging.requirements import Requirement
+from pip._vendor.packaging.version import Version
+
+from pip import __file__ as pip_location
+from pip._internal.cli.spinners import open_spinner
+from pip._internal.locations import get_platlib, get_purelib, get_scheme
+from pip._internal.metadata import get_default_environment, get_environment
+from pip._internal.utils.subprocess import call_subprocess
+from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds
+
+if TYPE_CHECKING:
+ from pip._internal.index.package_finder import PackageFinder
+
+logger = logging.getLogger(__name__)
+
+
+def _dedup(a: str, b: str) -> Union[Tuple[str], Tuple[str, str]]:
+ return (a, b) if a != b else (a,)
+
+
+class _Prefix:
+ def __init__(self, path: str) -> None:
+ self.path = path
+ self.setup = False
+ scheme = get_scheme("", prefix=path)
+ self.bin_dir = scheme.scripts
+ self.lib_dirs = _dedup(scheme.purelib, scheme.platlib)
+
+
+def get_runnable_pip() -> str:
+ """Get a file to pass to a Python executable, to run the currently-running pip.
+
+ This is used to run a pip subprocess, for installing requirements into the build
+ environment.
+ """
+ source = pathlib.Path(pip_location).resolve().parent
+
+ if not source.is_dir():
+ # This would happen if someone is using pip from inside a zip file. In that
+ # case, we can use that directly.
+ return str(source)
+
+ return os.fsdecode(source / "__pip-runner__.py")
+
+
+def _get_system_sitepackages() -> Set[str]:
+ """Get system site packages
+
+ Usually from site.getsitepackages,
+ but fallback on `get_purelib()/get_platlib()` if unavailable
+ (e.g. in a virtualenv created by virtualenv<20)
+
+ Returns normalized set of strings.
+ """
+ if hasattr(site, "getsitepackages"):
+ system_sites = site.getsitepackages()
+ else:
+ # virtualenv < 20 overwrites site.py without getsitepackages
+ # fallback on get_purelib/get_platlib.
+ # this is known to miss things, but shouldn't in the cases
+ # where getsitepackages() has been removed (inside a virtualenv)
+ system_sites = [get_purelib(), get_platlib()]
+ return {os.path.normcase(path) for path in system_sites}
+
+
+class BuildEnvironment:
+ """Creates and manages an isolated environment to install build deps"""
+
+ def __init__(self) -> None:
+ temp_dir = TempDirectory(kind=tempdir_kinds.BUILD_ENV, globally_managed=True)
+
+ self._prefixes = OrderedDict(
+ (name, _Prefix(os.path.join(temp_dir.path, name)))
+ for name in ("normal", "overlay")
+ )
+
+ self._bin_dirs: List[str] = []
+ self._lib_dirs: List[str] = []
+ for prefix in reversed(list(self._prefixes.values())):
+ self._bin_dirs.append(prefix.bin_dir)
+ self._lib_dirs.extend(prefix.lib_dirs)
+
+ # Customize site to:
+ # - ensure .pth files are honored
+ # - prevent access to system site packages
+ system_sites = _get_system_sitepackages()
+
+ self._site_dir = os.path.join(temp_dir.path, "site")
+ if not os.path.exists(self._site_dir):
+ os.mkdir(self._site_dir)
+ with open(
+ os.path.join(self._site_dir, "sitecustomize.py"), "w", encoding="utf-8"
+ ) as fp:
+ fp.write(
+ textwrap.dedent(
+ """
+ import os, site, sys
+
+ # First, drop system-sites related paths.
+ original_sys_path = sys.path[:]
+ known_paths = set()
+ for path in {system_sites!r}:
+ site.addsitedir(path, known_paths=known_paths)
+ system_paths = set(
+ os.path.normcase(path)
+ for path in sys.path[len(original_sys_path):]
+ )
+ original_sys_path = [
+ path for path in original_sys_path
+ if os.path.normcase(path) not in system_paths
+ ]
+ sys.path = original_sys_path
+
+ # Second, add lib directories.
+ # ensuring .pth file are processed.
+ for path in {lib_dirs!r}:
+ assert not path in sys.path
+ site.addsitedir(path)
+ """
+ ).format(system_sites=system_sites, lib_dirs=self._lib_dirs)
+ )
+
+ def __enter__(self) -> None:
+ self._save_env = {
+ name: os.environ.get(name, None)
+ for name in ("PATH", "PYTHONNOUSERSITE", "PYTHONPATH")
+ }
+
+ path = self._bin_dirs[:]
+ old_path = self._save_env["PATH"]
+ if old_path:
+ path.extend(old_path.split(os.pathsep))
+
+ pythonpath = [self._site_dir]
+
+ os.environ.update(
+ {
+ "PATH": os.pathsep.join(path),
+ "PYTHONNOUSERSITE": "1",
+ "PYTHONPATH": os.pathsep.join(pythonpath),
+ }
+ )
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ for varname, old_value in self._save_env.items():
+ if old_value is None:
+ os.environ.pop(varname, None)
+ else:
+ os.environ[varname] = old_value
+
+ def check_requirements(
+ self, reqs: Iterable[str]
+ ) -> Tuple[Set[Tuple[str, str]], Set[str]]:
+ """Return 2 sets:
+ - conflicting requirements: set of (installed, wanted) reqs tuples
+ - missing requirements: set of reqs
+ """
+ missing = set()
+ conflicting = set()
+ if reqs:
+ env = (
+ get_environment(self._lib_dirs)
+ if hasattr(self, "_lib_dirs")
+ else get_default_environment()
+ )
+ for req_str in reqs:
+ req = Requirement(req_str)
+ # We're explicitly evaluating with an empty extra value, since build
+ # environments are not provided any mechanism to select specific extras.
+ if req.marker is not None and not req.marker.evaluate({"extra": ""}):
+ continue
+ dist = env.get_distribution(req.name)
+ if not dist:
+ missing.add(req_str)
+ continue
+ if isinstance(dist.version, Version):
+ installed_req_str = f"{req.name}=={dist.version}"
+ else:
+ installed_req_str = f"{req.name}==={dist.version}"
+ if not req.specifier.contains(dist.version, prereleases=True):
+ conflicting.add((installed_req_str, req_str))
+ # FIXME: Consider direct URL?
+ return conflicting, missing
+
+ def install_requirements(
+ self,
+ finder: "PackageFinder",
+ requirements: Iterable[str],
+ prefix_as_string: str,
+ *,
+ kind: str,
+ ) -> None:
+ prefix = self._prefixes[prefix_as_string]
+ assert not prefix.setup
+ prefix.setup = True
+ if not requirements:
+ return
+ self._install_requirements(
+ get_runnable_pip(),
+ finder,
+ requirements,
+ prefix,
+ kind=kind,
+ )
+
+ @staticmethod
+ def _install_requirements(
+ pip_runnable: str,
+ finder: "PackageFinder",
+ requirements: Iterable[str],
+ prefix: _Prefix,
+ *,
+ kind: str,
+ ) -> None:
+ args: List[str] = [
+ sys.executable,
+ pip_runnable,
+ "install",
+ "--ignore-installed",
+ "--no-user",
+ "--prefix",
+ prefix.path,
+ "--no-warn-script-location",
+ ]
+ if logger.getEffectiveLevel() <= logging.DEBUG:
+ args.append("-v")
+ for format_control in ("no_binary", "only_binary"):
+ formats = getattr(finder.format_control, format_control)
+ args.extend(
+ (
+ "--" + format_control.replace("_", "-"),
+ ",".join(sorted(formats or {":none:"})),
+ )
+ )
+
+ index_urls = finder.index_urls
+ if index_urls:
+ args.extend(["-i", index_urls[0]])
+ for extra_index in index_urls[1:]:
+ args.extend(["--extra-index-url", extra_index])
+ else:
+ args.append("--no-index")
+ for link in finder.find_links:
+ args.extend(["--find-links", link])
+
+ for host in finder.trusted_hosts:
+ args.extend(["--trusted-host", host])
+ if finder.allow_all_prereleases:
+ args.append("--pre")
+ if finder.prefer_binary:
+ args.append("--prefer-binary")
+ args.append("--")
+ args.extend(requirements)
+ extra_environ = {"_PIP_STANDALONE_CERT": where()}
+ with open_spinner(f"Installing {kind}") as spinner:
+ call_subprocess(
+ args,
+ command_desc=f"pip subprocess to install {kind}",
+ spinner=spinner,
+ extra_environ=extra_environ,
+ )
+
+
+class NoOpBuildEnvironment(BuildEnvironment):
+ """A no-op drop-in replacement for BuildEnvironment"""
+
+ def __init__(self) -> None:
+ pass
+
+ def __enter__(self) -> None:
+ pass
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ pass
+
+ def cleanup(self) -> None:
+ pass
+
+ def install_requirements(
+ self,
+ finder: "PackageFinder",
+ requirements: Iterable[str],
+ prefix_as_string: str,
+ *,
+ kind: str,
+ ) -> None:
+ raise NotImplementedError()
diff --git a/third_party/python/pip/pip/_internal/cache.py b/third_party/python/pip/pip/_internal/cache.py
new file mode 100644
index 0000000000..c53b7f023a
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/cache.py
@@ -0,0 +1,293 @@
+"""Cache Management
+"""
+
+import hashlib
+import json
+import logging
+import os
+from pathlib import Path
+from typing import Any, Dict, List, Optional, Set
+
+from pip._vendor.packaging.tags import Tag, interpreter_name, interpreter_version
+from pip._vendor.packaging.utils import canonicalize_name
+
+from pip._internal.exceptions import InvalidWheelFilename
+from pip._internal.models.direct_url import DirectUrl
+from pip._internal.models.format_control import FormatControl
+from pip._internal.models.link import Link
+from pip._internal.models.wheel import Wheel
+from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds
+from pip._internal.utils.urls import path_to_url
+
+logger = logging.getLogger(__name__)
+
+ORIGIN_JSON_NAME = "origin.json"
+
+
+def _hash_dict(d: Dict[str, str]) -> str:
+ """Return a stable sha224 of a dictionary."""
+ s = json.dumps(d, sort_keys=True, separators=(",", ":"), ensure_ascii=True)
+ return hashlib.sha224(s.encode("ascii")).hexdigest()
+
+
+class Cache:
+ """An abstract class - provides cache directories for data from links
+
+
+ :param cache_dir: The root of the cache.
+ :param format_control: An object of FormatControl class to limit
+ binaries being read from the cache.
+ :param allowed_formats: which formats of files the cache should store.
+ ('binary' and 'source' are the only allowed values)
+ """
+
+ def __init__(
+ self, cache_dir: str, format_control: FormatControl, allowed_formats: Set[str]
+ ) -> None:
+ super().__init__()
+ assert not cache_dir or os.path.isabs(cache_dir)
+ self.cache_dir = cache_dir or None
+ self.format_control = format_control
+ self.allowed_formats = allowed_formats
+
+ _valid_formats = {"source", "binary"}
+ assert self.allowed_formats.union(_valid_formats) == _valid_formats
+
+ def _get_cache_path_parts(self, link: Link) -> List[str]:
+ """Get parts of part that must be os.path.joined with cache_dir"""
+
+ # We want to generate an url to use as our cache key, we don't want to
+ # just re-use the URL because it might have other items in the fragment
+ # and we don't care about those.
+ key_parts = {"url": link.url_without_fragment}
+ if link.hash_name is not None and link.hash is not None:
+ key_parts[link.hash_name] = link.hash
+ if link.subdirectory_fragment:
+ key_parts["subdirectory"] = link.subdirectory_fragment
+
+ # Include interpreter name, major and minor version in cache key
+ # to cope with ill-behaved sdists that build a different wheel
+ # depending on the python version their setup.py is being run on,
+ # and don't encode the difference in compatibility tags.
+ # https://github.com/pypa/pip/issues/7296
+ key_parts["interpreter_name"] = interpreter_name()
+ key_parts["interpreter_version"] = interpreter_version()
+
+ # Encode our key url with sha224, we'll use this because it has similar
+ # security properties to sha256, but with a shorter total output (and
+ # thus less secure). However the differences don't make a lot of
+ # difference for our use case here.
+ hashed = _hash_dict(key_parts)
+
+ # We want to nest the directories some to prevent having a ton of top
+ # level directories where we might run out of sub directories on some
+ # FS.
+ parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
+
+ return parts
+
+ def _get_candidates(self, link: Link, canonical_package_name: str) -> List[Any]:
+ can_not_cache = not self.cache_dir or not canonical_package_name or not link
+ if can_not_cache:
+ return []
+
+ formats = self.format_control.get_allowed_formats(canonical_package_name)
+ if not self.allowed_formats.intersection(formats):
+ return []
+
+ candidates = []
+ path = self.get_path_for_link(link)
+ if os.path.isdir(path):
+ for candidate in os.listdir(path):
+ candidates.append((candidate, path))
+ return candidates
+
+ def get_path_for_link(self, link: Link) -> str:
+ """Return a directory to store cached items in for link."""
+ raise NotImplementedError()
+
+ def get(
+ self,
+ link: Link,
+ package_name: Optional[str],
+ supported_tags: List[Tag],
+ ) -> Link:
+ """Returns a link to a cached item if it exists, otherwise returns the
+ passed link.
+ """
+ raise NotImplementedError()
+
+
+class SimpleWheelCache(Cache):
+ """A cache of wheels for future installs."""
+
+ def __init__(self, cache_dir: str, format_control: FormatControl) -> None:
+ super().__init__(cache_dir, format_control, {"binary"})
+
+ def get_path_for_link(self, link: Link) -> str:
+ """Return a directory to store cached wheels for link
+
+ Because there are M wheels for any one sdist, we provide a directory
+ to cache them in, and then consult that directory when looking up
+ cache hits.
+
+ We only insert things into the cache if they have plausible version
+ numbers, so that we don't contaminate the cache with things that were
+ not unique. E.g. ./package might have dozens of installs done for it
+ and build a version of 0.0...and if we built and cached a wheel, we'd
+ end up using the same wheel even if the source has been edited.
+
+ :param link: The link of the sdist for which this will cache wheels.
+ """
+ parts = self._get_cache_path_parts(link)
+ assert self.cache_dir
+ # Store wheels within the root cache_dir
+ return os.path.join(self.cache_dir, "wheels", *parts)
+
+ def get(
+ self,
+ link: Link,
+ package_name: Optional[str],
+ supported_tags: List[Tag],
+ ) -> Link:
+ candidates = []
+
+ if not package_name:
+ return link
+
+ canonical_package_name = canonicalize_name(package_name)
+ for wheel_name, wheel_dir in self._get_candidates(link, canonical_package_name):
+ try:
+ wheel = Wheel(wheel_name)
+ except InvalidWheelFilename:
+ continue
+ if canonicalize_name(wheel.name) != canonical_package_name:
+ logger.debug(
+ "Ignoring cached wheel %s for %s as it "
+ "does not match the expected distribution name %s.",
+ wheel_name,
+ link,
+ package_name,
+ )
+ continue
+ if not wheel.supported(supported_tags):
+ # Built for a different python/arch/etc
+ continue
+ candidates.append(
+ (
+ wheel.support_index_min(supported_tags),
+ wheel_name,
+ wheel_dir,
+ )
+ )
+
+ if not candidates:
+ return link
+
+ _, wheel_name, wheel_dir = min(candidates)
+ return Link(path_to_url(os.path.join(wheel_dir, wheel_name)))
+
+
+class EphemWheelCache(SimpleWheelCache):
+ """A SimpleWheelCache that creates it's own temporary cache directory"""
+
+ def __init__(self, format_control: FormatControl) -> None:
+ self._temp_dir = TempDirectory(
+ kind=tempdir_kinds.EPHEM_WHEEL_CACHE,
+ globally_managed=True,
+ )
+
+ super().__init__(self._temp_dir.path, format_control)
+
+
+class CacheEntry:
+ def __init__(
+ self,
+ link: Link,
+ persistent: bool,
+ ):
+ self.link = link
+ self.persistent = persistent
+ self.origin: Optional[DirectUrl] = None
+ origin_direct_url_path = Path(self.link.file_path).parent / ORIGIN_JSON_NAME
+ if origin_direct_url_path.exists():
+ self.origin = DirectUrl.from_json(origin_direct_url_path.read_text())
+
+
+class WheelCache(Cache):
+ """Wraps EphemWheelCache and SimpleWheelCache into a single Cache
+
+ This Cache allows for gracefully degradation, using the ephem wheel cache
+ when a certain link is not found in the simple wheel cache first.
+ """
+
+ def __init__(
+ self, cache_dir: str, format_control: Optional[FormatControl] = None
+ ) -> None:
+ if format_control is None:
+ format_control = FormatControl()
+ super().__init__(cache_dir, format_control, {"binary"})
+ self._wheel_cache = SimpleWheelCache(cache_dir, format_control)
+ self._ephem_cache = EphemWheelCache(format_control)
+
+ def get_path_for_link(self, link: Link) -> str:
+ return self._wheel_cache.get_path_for_link(link)
+
+ def get_ephem_path_for_link(self, link: Link) -> str:
+ return self._ephem_cache.get_path_for_link(link)
+
+ def get(
+ self,
+ link: Link,
+ package_name: Optional[str],
+ supported_tags: List[Tag],
+ ) -> Link:
+ cache_entry = self.get_cache_entry(link, package_name, supported_tags)
+ if cache_entry is None:
+ return link
+ return cache_entry.link
+
+ def get_cache_entry(
+ self,
+ link: Link,
+ package_name: Optional[str],
+ supported_tags: List[Tag],
+ ) -> Optional[CacheEntry]:
+ """Returns a CacheEntry with a link to a cached item if it exists or
+ None. The cache entry indicates if the item was found in the persistent
+ or ephemeral cache.
+ """
+ retval = self._wheel_cache.get(
+ link=link,
+ package_name=package_name,
+ supported_tags=supported_tags,
+ )
+ if retval is not link:
+ return CacheEntry(retval, persistent=True)
+
+ retval = self._ephem_cache.get(
+ link=link,
+ package_name=package_name,
+ supported_tags=supported_tags,
+ )
+ if retval is not link:
+ return CacheEntry(retval, persistent=False)
+
+ return None
+
+ @staticmethod
+ def record_download_origin(cache_dir: str, download_info: DirectUrl) -> None:
+ origin_path = Path(cache_dir) / ORIGIN_JSON_NAME
+ if origin_path.is_file():
+ origin = DirectUrl.from_json(origin_path.read_text())
+ # TODO: use DirectUrl.equivalent when https://github.com/pypa/pip/pull/10564
+ # is merged.
+ if origin.url != download_info.url:
+ logger.warning(
+ "Origin URL %s in cache entry %s does not match download URL %s. "
+ "This is likely a pip bug or a cache corruption issue.",
+ origin.url,
+ cache_dir,
+ download_info.url,
+ )
+ origin_path.write_text(download_info.to_json(), encoding="utf-8")
diff --git a/third_party/python/pip/pip/_internal/cli/__init__.py b/third_party/python/pip/pip/_internal/cli/__init__.py
new file mode 100644
index 0000000000..e589bb917e
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/cli/__init__.py
@@ -0,0 +1,4 @@
+"""Subpackage containing all of pip's command line interface related code
+"""
+
+# This file intentionally does not import submodules
diff --git a/third_party/python/pip/pip/_internal/cli/autocompletion.py b/third_party/python/pip/pip/_internal/cli/autocompletion.py
new file mode 100644
index 0000000000..226fe84dc0
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/cli/autocompletion.py
@@ -0,0 +1,171 @@
+"""Logic that powers autocompletion installed by ``pip completion``.
+"""
+
+import optparse
+import os
+import sys
+from itertools import chain
+from typing import Any, Iterable, List, Optional
+
+from pip._internal.cli.main_parser import create_main_parser
+from pip._internal.commands import commands_dict, create_command
+from pip._internal.metadata import get_default_environment
+
+
+def autocomplete() -> None:
+ """Entry Point for completion of main and subcommand options."""
+ # Don't complete if user hasn't sourced bash_completion file.
+ if "PIP_AUTO_COMPLETE" not in os.environ:
+ return
+ cwords = os.environ["COMP_WORDS"].split()[1:]
+ cword = int(os.environ["COMP_CWORD"])
+ try:
+ current = cwords[cword - 1]
+ except IndexError:
+ current = ""
+
+ parser = create_main_parser()
+ subcommands = list(commands_dict)
+ options = []
+
+ # subcommand
+ subcommand_name: Optional[str] = None
+ for word in cwords:
+ if word in subcommands:
+ subcommand_name = word
+ break
+ # subcommand options
+ if subcommand_name is not None:
+ # special case: 'help' subcommand has no options
+ if subcommand_name == "help":
+ sys.exit(1)
+ # special case: list locally installed dists for show and uninstall
+ should_list_installed = not current.startswith("-") and subcommand_name in [
+ "show",
+ "uninstall",
+ ]
+ if should_list_installed:
+ env = get_default_environment()
+ lc = current.lower()
+ installed = [
+ dist.canonical_name
+ for dist in env.iter_installed_distributions(local_only=True)
+ if dist.canonical_name.startswith(lc)
+ and dist.canonical_name not in cwords[1:]
+ ]
+ # if there are no dists installed, fall back to option completion
+ if installed:
+ for dist in installed:
+ print(dist)
+ sys.exit(1)
+
+ should_list_installables = (
+ not current.startswith("-") and subcommand_name == "install"
+ )
+ if should_list_installables:
+ for path in auto_complete_paths(current, "path"):
+ print(path)
+ sys.exit(1)
+
+ subcommand = create_command(subcommand_name)
+
+ for opt in subcommand.parser.option_list_all:
+ if opt.help != optparse.SUPPRESS_HELP:
+ for opt_str in opt._long_opts + opt._short_opts:
+ options.append((opt_str, opt.nargs))
+
+ # filter out previously specified options from available options
+ prev_opts = [x.split("=")[0] for x in cwords[1 : cword - 1]]
+ options = [(x, v) for (x, v) in options if x not in prev_opts]
+ # filter options by current input
+ options = [(k, v) for k, v in options if k.startswith(current)]
+ # get completion type given cwords and available subcommand options
+ completion_type = get_path_completion_type(
+ cwords,
+ cword,
+ subcommand.parser.option_list_all,
+ )
+ # get completion files and directories if ``completion_type`` is
+ # ``<file>``, ``<dir>`` or ``<path>``
+ if completion_type:
+ paths = auto_complete_paths(current, completion_type)
+ options = [(path, 0) for path in paths]
+ for option in options:
+ opt_label = option[0]
+ # append '=' to options which require args
+ if option[1] and option[0][:2] == "--":
+ opt_label += "="
+ print(opt_label)
+ else:
+ # show main parser options only when necessary
+
+ opts = [i.option_list for i in parser.option_groups]
+ opts.append(parser.option_list)
+ flattened_opts = chain.from_iterable(opts)
+ if current.startswith("-"):
+ for opt in flattened_opts:
+ if opt.help != optparse.SUPPRESS_HELP:
+ subcommands += opt._long_opts + opt._short_opts
+ else:
+ # get completion type given cwords and all available options
+ completion_type = get_path_completion_type(cwords, cword, flattened_opts)
+ if completion_type:
+ subcommands = list(auto_complete_paths(current, completion_type))
+
+ print(" ".join([x for x in subcommands if x.startswith(current)]))
+ sys.exit(1)
+
+
+def get_path_completion_type(
+ cwords: List[str], cword: int, opts: Iterable[Any]
+) -> Optional[str]:
+ """Get the type of path completion (``file``, ``dir``, ``path`` or None)
+
+ :param cwords: same as the environmental variable ``COMP_WORDS``
+ :param cword: same as the environmental variable ``COMP_CWORD``
+ :param opts: The available options to check
+ :return: path completion type (``file``, ``dir``, ``path`` or None)
+ """
+ if cword < 2 or not cwords[cword - 2].startswith("-"):
+ return None
+ for opt in opts:
+ if opt.help == optparse.SUPPRESS_HELP:
+ continue
+ for o in str(opt).split("/"):
+ if cwords[cword - 2].split("=")[0] == o:
+ if not opt.metavar or any(
+ x in ("path", "file", "dir") for x in opt.metavar.split("/")
+ ):
+ return opt.metavar
+ return None
+
+
+def auto_complete_paths(current: str, completion_type: str) -> Iterable[str]:
+ """If ``completion_type`` is ``file`` or ``path``, list all regular files
+ and directories starting with ``current``; otherwise only list directories
+ starting with ``current``.
+
+ :param current: The word to be completed
+ :param completion_type: path completion type(``file``, ``path`` or ``dir``)
+ :return: A generator of regular files and/or directories
+ """
+ directory, filename = os.path.split(current)
+ current_path = os.path.abspath(directory)
+ # Don't complete paths if they can't be accessed
+ if not os.access(current_path, os.R_OK):
+ return
+ filename = os.path.normcase(filename)
+ # list all files that start with ``filename``
+ file_list = (
+ x for x in os.listdir(current_path) if os.path.normcase(x).startswith(filename)
+ )
+ for f in file_list:
+ opt = os.path.join(current_path, f)
+ comp_file = os.path.normcase(os.path.join(directory, f))
+ # complete regular files when there is not ``<dir>`` after option
+ # complete directories when there is ``<file>``, ``<path>`` or
+ # ``<dir>``after option
+ if completion_type != "dir" and os.path.isfile(opt):
+ yield comp_file
+ elif os.path.isdir(opt):
+ yield os.path.join(comp_file, "")
diff --git a/third_party/python/pip/pip/_internal/cli/base_command.py b/third_party/python/pip/pip/_internal/cli/base_command.py
new file mode 100644
index 0000000000..5bd7e67e64
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/cli/base_command.py
@@ -0,0 +1,216 @@
+"""Base Command class, and related routines"""
+
+import functools
+import logging
+import logging.config
+import optparse
+import os
+import sys
+import traceback
+from optparse import Values
+from typing import Any, Callable, List, Optional, Tuple
+
+from pip._vendor.rich import traceback as rich_traceback
+
+from pip._internal.cli import cmdoptions
+from pip._internal.cli.command_context import CommandContextMixIn
+from pip._internal.cli.parser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
+from pip._internal.cli.status_codes import (
+ ERROR,
+ PREVIOUS_BUILD_DIR_ERROR,
+ UNKNOWN_ERROR,
+ VIRTUALENV_NOT_FOUND,
+)
+from pip._internal.exceptions import (
+ BadCommand,
+ CommandError,
+ DiagnosticPipError,
+ InstallationError,
+ NetworkConnectionError,
+ PreviousBuildDirError,
+ UninstallationError,
+)
+from pip._internal.utils.filesystem import check_path_owner
+from pip._internal.utils.logging import BrokenStdoutLoggingError, setup_logging
+from pip._internal.utils.misc import get_prog, normalize_path
+from pip._internal.utils.temp_dir import TempDirectoryTypeRegistry as TempDirRegistry
+from pip._internal.utils.temp_dir import global_tempdir_manager, tempdir_registry
+from pip._internal.utils.virtualenv import running_under_virtualenv
+
+__all__ = ["Command"]
+
+logger = logging.getLogger(__name__)
+
+
+class Command(CommandContextMixIn):
+ usage: str = ""
+ ignore_require_venv: bool = False
+
+ def __init__(self, name: str, summary: str, isolated: bool = False) -> None:
+ super().__init__()
+
+ self.name = name
+ self.summary = summary
+ self.parser = ConfigOptionParser(
+ usage=self.usage,
+ prog=f"{get_prog()} {name}",
+ formatter=UpdatingDefaultsHelpFormatter(),
+ add_help_option=False,
+ name=name,
+ description=self.__doc__,
+ isolated=isolated,
+ )
+
+ self.tempdir_registry: Optional[TempDirRegistry] = None
+
+ # Commands should add options to this option group
+ optgroup_name = f"{self.name.capitalize()} Options"
+ self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name)
+
+ # Add the general options
+ gen_opts = cmdoptions.make_option_group(
+ cmdoptions.general_group,
+ self.parser,
+ )
+ self.parser.add_option_group(gen_opts)
+
+ self.add_options()
+
+ def add_options(self) -> None:
+ pass
+
+ def handle_pip_version_check(self, options: Values) -> None:
+ """
+ This is a no-op so that commands by default do not do the pip version
+ check.
+ """
+ # Make sure we do the pip version check if the index_group options
+ # are present.
+ assert not hasattr(options, "no_index")
+
+ def run(self, options: Values, args: List[str]) -> int:
+ raise NotImplementedError
+
+ def parse_args(self, args: List[str]) -> Tuple[Values, List[str]]:
+ # factored out for testability
+ return self.parser.parse_args(args)
+
+ def main(self, args: List[str]) -> int:
+ try:
+ with self.main_context():
+ return self._main(args)
+ finally:
+ logging.shutdown()
+
+ def _main(self, args: List[str]) -> int:
+ # We must initialize this before the tempdir manager, otherwise the
+ # configuration would not be accessible by the time we clean up the
+ # tempdir manager.
+ self.tempdir_registry = self.enter_context(tempdir_registry())
+ # Intentionally set as early as possible so globally-managed temporary
+ # directories are available to the rest of the code.
+ self.enter_context(global_tempdir_manager())
+
+ options, args = self.parse_args(args)
+
+ # Set verbosity so that it can be used elsewhere.
+ self.verbosity = options.verbose - options.quiet
+
+ level_number = setup_logging(
+ verbosity=self.verbosity,
+ no_color=options.no_color,
+ user_log_file=options.log,
+ )
+
+ # TODO: Try to get these passing down from the command?
+ # without resorting to os.environ to hold these.
+ # This also affects isolated builds and it should.
+
+ if options.no_input:
+ os.environ["PIP_NO_INPUT"] = "1"
+
+ if options.exists_action:
+ os.environ["PIP_EXISTS_ACTION"] = " ".join(options.exists_action)
+
+ if options.require_venv and not self.ignore_require_venv:
+ # If a venv is required check if it can really be found
+ if not running_under_virtualenv():
+ logger.critical("Could not find an activated virtualenv (required).")
+ sys.exit(VIRTUALENV_NOT_FOUND)
+
+ if options.cache_dir:
+ options.cache_dir = normalize_path(options.cache_dir)
+ if not check_path_owner(options.cache_dir):
+ logger.warning(
+ "The directory '%s' or its parent directory is not owned "
+ "or is not writable by the current user. The cache "
+ "has been disabled. Check the permissions and owner of "
+ "that directory. If executing pip with sudo, you should "
+ "use sudo's -H flag.",
+ options.cache_dir,
+ )
+ options.cache_dir = None
+
+ def intercepts_unhandled_exc(
+ run_func: Callable[..., int]
+ ) -> Callable[..., int]:
+ @functools.wraps(run_func)
+ def exc_logging_wrapper(*args: Any) -> int:
+ try:
+ status = run_func(*args)
+ assert isinstance(status, int)
+ return status
+ except DiagnosticPipError as exc:
+ logger.error("[present-rich] %s", exc)
+ logger.debug("Exception information:", exc_info=True)
+
+ return ERROR
+ except PreviousBuildDirError as exc:
+ logger.critical(str(exc))
+ logger.debug("Exception information:", exc_info=True)
+
+ return PREVIOUS_BUILD_DIR_ERROR
+ except (
+ InstallationError,
+ UninstallationError,
+ BadCommand,
+ NetworkConnectionError,
+ ) as exc:
+ logger.critical(str(exc))
+ logger.debug("Exception information:", exc_info=True)
+
+ return ERROR
+ except CommandError as exc:
+ logger.critical("%s", exc)
+ logger.debug("Exception information:", exc_info=True)
+
+ return ERROR
+ except BrokenStdoutLoggingError:
+ # Bypass our logger and write any remaining messages to
+ # stderr because stdout no longer works.
+ print("ERROR: Pipe to stdout was broken", file=sys.stderr)
+ if level_number <= logging.DEBUG:
+ traceback.print_exc(file=sys.stderr)
+
+ return ERROR
+ except KeyboardInterrupt:
+ logger.critical("Operation cancelled by user")
+ logger.debug("Exception information:", exc_info=True)
+
+ return ERROR
+ except BaseException:
+ logger.critical("Exception:", exc_info=True)
+
+ return UNKNOWN_ERROR
+
+ return exc_logging_wrapper
+
+ try:
+ if not options.debug_mode:
+ run = intercepts_unhandled_exc(self.run)
+ else:
+ run = self.run
+ rich_traceback.install(show_locals=True)
+ return run(options, args)
+ finally:
+ self.handle_pip_version_check(options)
diff --git a/third_party/python/pip/pip/_internal/cli/cmdoptions.py b/third_party/python/pip/pip/_internal/cli/cmdoptions.py
new file mode 100644
index 0000000000..1f804097e8
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/cli/cmdoptions.py
@@ -0,0 +1,1055 @@
+"""
+shared options and groups
+
+The principle here is to define options once, but *not* instantiate them
+globally. One reason being that options with action='append' can carry state
+between parses. pip parses general options twice internally, and shouldn't
+pass on state. To be consistent, all options will follow this design.
+"""
+
+# The following comment should be removed at some point in the future.
+# mypy: strict-optional=False
+
+import importlib.util
+import logging
+import os
+import textwrap
+from functools import partial
+from optparse import SUPPRESS_HELP, Option, OptionGroup, OptionParser, Values
+from textwrap import dedent
+from typing import Any, Callable, Dict, Optional, Tuple
+
+from pip._vendor.packaging.utils import canonicalize_name
+
+from pip._internal.cli.parser import ConfigOptionParser
+from pip._internal.exceptions import CommandError
+from pip._internal.locations import USER_CACHE_DIR, get_src_prefix
+from pip._internal.models.format_control import FormatControl
+from pip._internal.models.index import PyPI
+from pip._internal.models.target_python import TargetPython
+from pip._internal.utils.hashes import STRONG_HASHES
+from pip._internal.utils.misc import strtobool
+
+logger = logging.getLogger(__name__)
+
+
+def raise_option_error(parser: OptionParser, option: Option, msg: str) -> None:
+ """
+ Raise an option parsing error using parser.error().
+
+ Args:
+ parser: an OptionParser instance.
+ option: an Option instance.
+ msg: the error text.
+ """
+ msg = f"{option} error: {msg}"
+ msg = textwrap.fill(" ".join(msg.split()))
+ parser.error(msg)
+
+
+def make_option_group(group: Dict[str, Any], parser: ConfigOptionParser) -> OptionGroup:
+ """
+ Return an OptionGroup object
+ group -- assumed to be dict with 'name' and 'options' keys
+ parser -- an optparse Parser
+ """
+ option_group = OptionGroup(parser, group["name"])
+ for option in group["options"]:
+ option_group.add_option(option())
+ return option_group
+
+
+def check_dist_restriction(options: Values, check_target: bool = False) -> None:
+ """Function for determining if custom platform options are allowed.
+
+ :param options: The OptionParser options.
+ :param check_target: Whether or not to check if --target is being used.
+ """
+ dist_restriction_set = any(
+ [
+ options.python_version,
+ options.platforms,
+ options.abis,
+ options.implementation,
+ ]
+ )
+
+ binary_only = FormatControl(set(), {":all:"})
+ sdist_dependencies_allowed = (
+ options.format_control != binary_only and not options.ignore_dependencies
+ )
+
+ # Installations or downloads using dist restrictions must not combine
+ # source distributions and dist-specific wheels, as they are not
+ # guaranteed to be locally compatible.
+ if dist_restriction_set and sdist_dependencies_allowed:
+ raise CommandError(
+ "When restricting platform and interpreter constraints using "
+ "--python-version, --platform, --abi, or --implementation, "
+ "either --no-deps must be set, or --only-binary=:all: must be "
+ "set and --no-binary must not be set (or must be set to "
+ ":none:)."
+ )
+
+ if check_target:
+ if dist_restriction_set and not options.target_dir:
+ raise CommandError(
+ "Can not use any platform or abi specific options unless "
+ "installing via '--target'"
+ )
+
+
+def _path_option_check(option: Option, opt: str, value: str) -> str:
+ return os.path.expanduser(value)
+
+
+def _package_name_option_check(option: Option, opt: str, value: str) -> str:
+ return canonicalize_name(value)
+
+
+class PipOption(Option):
+ TYPES = Option.TYPES + ("path", "package_name")
+ TYPE_CHECKER = Option.TYPE_CHECKER.copy()
+ TYPE_CHECKER["package_name"] = _package_name_option_check
+ TYPE_CHECKER["path"] = _path_option_check
+
+
+###########
+# options #
+###########
+
+help_: Callable[..., Option] = partial(
+ Option,
+ "-h",
+ "--help",
+ dest="help",
+ action="help",
+ help="Show help.",
+)
+
+debug_mode: Callable[..., Option] = partial(
+ Option,
+ "--debug",
+ dest="debug_mode",
+ action="store_true",
+ default=False,
+ help=(
+ "Let unhandled exceptions propagate outside the main subroutine, "
+ "instead of logging them to stderr."
+ ),
+)
+
+isolated_mode: Callable[..., Option] = partial(
+ Option,
+ "--isolated",
+ dest="isolated_mode",
+ action="store_true",
+ default=False,
+ help=(
+ "Run pip in an isolated mode, ignoring environment variables and user "
+ "configuration."
+ ),
+)
+
+require_virtualenv: Callable[..., Option] = partial(
+ Option,
+ "--require-virtualenv",
+ "--require-venv",
+ dest="require_venv",
+ action="store_true",
+ default=False,
+ help=(
+ "Allow pip to only run in a virtual environment; "
+ "exit with an error otherwise."
+ ),
+)
+
+override_externally_managed: Callable[..., Option] = partial(
+ Option,
+ "--break-system-packages",
+ dest="override_externally_managed",
+ action="store_true",
+ help="Allow pip to modify an EXTERNALLY-MANAGED Python installation",
+)
+
+python: Callable[..., Option] = partial(
+ Option,
+ "--python",
+ dest="python",
+ help="Run pip with the specified Python interpreter.",
+)
+
+verbose: Callable[..., Option] = partial(
+ Option,
+ "-v",
+ "--verbose",
+ dest="verbose",
+ action="count",
+ default=0,
+ help="Give more output. Option is additive, and can be used up to 3 times.",
+)
+
+no_color: Callable[..., Option] = partial(
+ Option,
+ "--no-color",
+ dest="no_color",
+ action="store_true",
+ default=False,
+ help="Suppress colored output.",
+)
+
+version: Callable[..., Option] = partial(
+ Option,
+ "-V",
+ "--version",
+ dest="version",
+ action="store_true",
+ help="Show version and exit.",
+)
+
+quiet: Callable[..., Option] = partial(
+ Option,
+ "-q",
+ "--quiet",
+ dest="quiet",
+ action="count",
+ default=0,
+ help=(
+ "Give less output. Option is additive, and can be used up to 3"
+ " times (corresponding to WARNING, ERROR, and CRITICAL logging"
+ " levels)."
+ ),
+)
+
+progress_bar: Callable[..., Option] = partial(
+ Option,
+ "--progress-bar",
+ dest="progress_bar",
+ type="choice",
+ choices=["on", "off"],
+ default="on",
+ help="Specify whether the progress bar should be used [on, off] (default: on)",
+)
+
+log: Callable[..., Option] = partial(
+ PipOption,
+ "--log",
+ "--log-file",
+ "--local-log",
+ dest="log",
+ metavar="path",
+ type="path",
+ help="Path to a verbose appending log.",
+)
+
+no_input: Callable[..., Option] = partial(
+ Option,
+ # Don't ask for input
+ "--no-input",
+ dest="no_input",
+ action="store_true",
+ default=False,
+ help="Disable prompting for input.",
+)
+
+proxy: Callable[..., Option] = partial(
+ Option,
+ "--proxy",
+ dest="proxy",
+ type="str",
+ default="",
+ help="Specify a proxy in the form scheme://[user:passwd@]proxy.server:port.",
+)
+
+retries: Callable[..., Option] = partial(
+ Option,
+ "--retries",
+ dest="retries",
+ type="int",
+ default=5,
+ help="Maximum number of retries each connection should attempt "
+ "(default %default times).",
+)
+
+timeout: Callable[..., Option] = partial(
+ Option,
+ "--timeout",
+ "--default-timeout",
+ metavar="sec",
+ dest="timeout",
+ type="float",
+ default=15,
+ help="Set the socket timeout (default %default seconds).",
+)
+
+
+def exists_action() -> Option:
+ return Option(
+ # Option when path already exist
+ "--exists-action",
+ dest="exists_action",
+ type="choice",
+ choices=["s", "i", "w", "b", "a"],
+ default=[],
+ action="append",
+ metavar="action",
+ help="Default action when a path already exists: "
+ "(s)witch, (i)gnore, (w)ipe, (b)ackup, (a)bort.",
+ )
+
+
+cert: Callable[..., Option] = partial(
+ PipOption,
+ "--cert",
+ dest="cert",
+ type="path",
+ metavar="path",
+ help=(
+ "Path to PEM-encoded CA certificate bundle. "
+ "If provided, overrides the default. "
+ "See 'SSL Certificate Verification' in pip documentation "
+ "for more information."
+ ),
+)
+
+client_cert: Callable[..., Option] = partial(
+ PipOption,
+ "--client-cert",
+ dest="client_cert",
+ type="path",
+ default=None,
+ metavar="path",
+ help="Path to SSL client certificate, a single file containing the "
+ "private key and the certificate in PEM format.",
+)
+
+index_url: Callable[..., Option] = partial(
+ Option,
+ "-i",
+ "--index-url",
+ "--pypi-url",
+ dest="index_url",
+ metavar="URL",
+ default=PyPI.simple_url,
+ help="Base URL of the Python Package Index (default %default). "
+ "This should point to a repository compliant with PEP 503 "
+ "(the simple repository API) or a local directory laid out "
+ "in the same format.",
+)
+
+
+def extra_index_url() -> Option:
+ return Option(
+ "--extra-index-url",
+ dest="extra_index_urls",
+ metavar="URL",
+ action="append",
+ default=[],
+ help="Extra URLs of package indexes to use in addition to "
+ "--index-url. Should follow the same rules as "
+ "--index-url.",
+ )
+
+
+no_index: Callable[..., Option] = partial(
+ Option,
+ "--no-index",
+ dest="no_index",
+ action="store_true",
+ default=False,
+ help="Ignore package index (only looking at --find-links URLs instead).",
+)
+
+
+def find_links() -> Option:
+ return Option(
+ "-f",
+ "--find-links",
+ dest="find_links",
+ action="append",
+ default=[],
+ metavar="url",
+ help="If a URL or path to an html file, then parse for links to "
+ "archives such as sdist (.tar.gz) or wheel (.whl) files. "
+ "If a local path or file:// URL that's a directory, "
+ "then look for archives in the directory listing. "
+ "Links to VCS project URLs are not supported.",
+ )
+
+
+def trusted_host() -> Option:
+ return Option(
+ "--trusted-host",
+ dest="trusted_hosts",
+ action="append",
+ metavar="HOSTNAME",
+ default=[],
+ help="Mark this host or host:port pair as trusted, even though it "
+ "does not have valid or any HTTPS.",
+ )
+
+
+def constraints() -> Option:
+ return Option(
+ "-c",
+ "--constraint",
+ dest="constraints",
+ action="append",
+ default=[],
+ metavar="file",
+ help="Constrain versions using the given constraints file. "
+ "This option can be used multiple times.",
+ )
+
+
+def requirements() -> Option:
+ return Option(
+ "-r",
+ "--requirement",
+ dest="requirements",
+ action="append",
+ default=[],
+ metavar="file",
+ help="Install from the given requirements file. "
+ "This option can be used multiple times.",
+ )
+
+
+def editable() -> Option:
+ return Option(
+ "-e",
+ "--editable",
+ dest="editables",
+ action="append",
+ default=[],
+ metavar="path/url",
+ help=(
+ "Install a project in editable mode (i.e. setuptools "
+ '"develop mode") from a local project path or a VCS url.'
+ ),
+ )
+
+
+def _handle_src(option: Option, opt_str: str, value: str, parser: OptionParser) -> None:
+ value = os.path.abspath(value)
+ setattr(parser.values, option.dest, value)
+
+
+src: Callable[..., Option] = partial(
+ PipOption,
+ "--src",
+ "--source",
+ "--source-dir",
+ "--source-directory",
+ dest="src_dir",
+ type="path",
+ metavar="dir",
+ default=get_src_prefix(),
+ action="callback",
+ callback=_handle_src,
+ help="Directory to check out editable projects into. "
+ 'The default in a virtualenv is "<venv path>/src". '
+ 'The default for global installs is "<current dir>/src".',
+)
+
+
+def _get_format_control(values: Values, option: Option) -> Any:
+ """Get a format_control object."""
+ return getattr(values, option.dest)
+
+
+def _handle_no_binary(
+ option: Option, opt_str: str, value: str, parser: OptionParser
+) -> None:
+ existing = _get_format_control(parser.values, option)
+ FormatControl.handle_mutual_excludes(
+ value,
+ existing.no_binary,
+ existing.only_binary,
+ )
+
+
+def _handle_only_binary(
+ option: Option, opt_str: str, value: str, parser: OptionParser
+) -> None:
+ existing = _get_format_control(parser.values, option)
+ FormatControl.handle_mutual_excludes(
+ value,
+ existing.only_binary,
+ existing.no_binary,
+ )
+
+
+def no_binary() -> Option:
+ format_control = FormatControl(set(), set())
+ return Option(
+ "--no-binary",
+ dest="format_control",
+ action="callback",
+ callback=_handle_no_binary,
+ type="str",
+ default=format_control,
+ help="Do not use binary packages. Can be supplied multiple times, and "
+ 'each time adds to the existing value. Accepts either ":all:" to '
+ 'disable all binary packages, ":none:" to empty the set (notice '
+ "the colons), or one or more package names with commas between "
+ "them (no colons). Note that some packages are tricky to compile "
+ "and may fail to install when this option is used on them.",
+ )
+
+
+def only_binary() -> Option:
+ format_control = FormatControl(set(), set())
+ return Option(
+ "--only-binary",
+ dest="format_control",
+ action="callback",
+ callback=_handle_only_binary,
+ type="str",
+ default=format_control,
+ help="Do not use source packages. Can be supplied multiple times, and "
+ 'each time adds to the existing value. Accepts either ":all:" to '
+ 'disable all source packages, ":none:" to empty the set, or one '
+ "or more package names with commas between them. Packages "
+ "without binary distributions will fail to install when this "
+ "option is used on them.",
+ )
+
+
+platforms: Callable[..., Option] = partial(
+ Option,
+ "--platform",
+ dest="platforms",
+ metavar="platform",
+ action="append",
+ default=None,
+ help=(
+ "Only use wheels compatible with <platform>. Defaults to the "
+ "platform of the running system. Use this option multiple times to "
+ "specify multiple platforms supported by the target interpreter."
+ ),
+)
+
+
+# This was made a separate function for unit-testing purposes.
+def _convert_python_version(value: str) -> Tuple[Tuple[int, ...], Optional[str]]:
+ """
+ Convert a version string like "3", "37", or "3.7.3" into a tuple of ints.
+
+ :return: A 2-tuple (version_info, error_msg), where `error_msg` is
+ non-None if and only if there was a parsing error.
+ """
+ if not value:
+ # The empty string is the same as not providing a value.
+ return (None, None)
+
+ parts = value.split(".")
+ if len(parts) > 3:
+ return ((), "at most three version parts are allowed")
+
+ if len(parts) == 1:
+ # Then we are in the case of "3" or "37".
+ value = parts[0]
+ if len(value) > 1:
+ parts = [value[0], value[1:]]
+
+ try:
+ version_info = tuple(int(part) for part in parts)
+ except ValueError:
+ return ((), "each version part must be an integer")
+
+ return (version_info, None)
+
+
+def _handle_python_version(
+ option: Option, opt_str: str, value: str, parser: OptionParser
+) -> None:
+ """
+ Handle a provided --python-version value.
+ """
+ version_info, error_msg = _convert_python_version(value)
+ if error_msg is not None:
+ msg = "invalid --python-version value: {!r}: {}".format(
+ value,
+ error_msg,
+ )
+ raise_option_error(parser, option=option, msg=msg)
+
+ parser.values.python_version = version_info
+
+
+python_version: Callable[..., Option] = partial(
+ Option,
+ "--python-version",
+ dest="python_version",
+ metavar="python_version",
+ action="callback",
+ callback=_handle_python_version,
+ type="str",
+ default=None,
+ help=dedent(
+ """\
+ The Python interpreter version to use for wheel and "Requires-Python"
+ compatibility checks. Defaults to a version derived from the running
+ interpreter. The version can be specified using up to three dot-separated
+ integers (e.g. "3" for 3.0.0, "3.7" for 3.7.0, or "3.7.3"). A major-minor
+ version can also be given as a string without dots (e.g. "37" for 3.7.0).
+ """
+ ),
+)
+
+
+implementation: Callable[..., Option] = partial(
+ Option,
+ "--implementation",
+ dest="implementation",
+ metavar="implementation",
+ default=None,
+ help=(
+ "Only use wheels compatible with Python "
+ "implementation <implementation>, e.g. 'pp', 'jy', 'cp', "
+ " or 'ip'. If not specified, then the current "
+ "interpreter implementation is used. Use 'py' to force "
+ "implementation-agnostic wheels."
+ ),
+)
+
+
+abis: Callable[..., Option] = partial(
+ Option,
+ "--abi",
+ dest="abis",
+ metavar="abi",
+ action="append",
+ default=None,
+ help=(
+ "Only use wheels compatible with Python abi <abi>, e.g. 'pypy_41'. "
+ "If not specified, then the current interpreter abi tag is used. "
+ "Use this option multiple times to specify multiple abis supported "
+ "by the target interpreter. Generally you will need to specify "
+ "--implementation, --platform, and --python-version when using this "
+ "option."
+ ),
+)
+
+
+def add_target_python_options(cmd_opts: OptionGroup) -> None:
+ cmd_opts.add_option(platforms())
+ cmd_opts.add_option(python_version())
+ cmd_opts.add_option(implementation())
+ cmd_opts.add_option(abis())
+
+
+def make_target_python(options: Values) -> TargetPython:
+ target_python = TargetPython(
+ platforms=options.platforms,
+ py_version_info=options.python_version,
+ abis=options.abis,
+ implementation=options.implementation,
+ )
+
+ return target_python
+
+
+def prefer_binary() -> Option:
+ return Option(
+ "--prefer-binary",
+ dest="prefer_binary",
+ action="store_true",
+ default=False,
+ help="Prefer older binary packages over newer source packages.",
+ )
+
+
+cache_dir: Callable[..., Option] = partial(
+ PipOption,
+ "--cache-dir",
+ dest="cache_dir",
+ default=USER_CACHE_DIR,
+ metavar="dir",
+ type="path",
+ help="Store the cache data in <dir>.",
+)
+
+
+def _handle_no_cache_dir(
+ option: Option, opt: str, value: str, parser: OptionParser
+) -> None:
+ """
+ Process a value provided for the --no-cache-dir option.
+
+ This is an optparse.Option callback for the --no-cache-dir option.
+ """
+ # The value argument will be None if --no-cache-dir is passed via the
+ # command-line, since the option doesn't accept arguments. However,
+ # the value can be non-None if the option is triggered e.g. by an
+ # environment variable, like PIP_NO_CACHE_DIR=true.
+ if value is not None:
+ # Then parse the string value to get argument error-checking.
+ try:
+ strtobool(value)
+ except ValueError as exc:
+ raise_option_error(parser, option=option, msg=str(exc))
+
+ # Originally, setting PIP_NO_CACHE_DIR to a value that strtobool()
+ # converted to 0 (like "false" or "no") caused cache_dir to be disabled
+ # rather than enabled (logic would say the latter). Thus, we disable
+ # the cache directory not just on values that parse to True, but (for
+ # backwards compatibility reasons) also on values that parse to False.
+ # In other words, always set it to False if the option is provided in
+ # some (valid) form.
+ parser.values.cache_dir = False
+
+
+no_cache: Callable[..., Option] = partial(
+ Option,
+ "--no-cache-dir",
+ dest="cache_dir",
+ action="callback",
+ callback=_handle_no_cache_dir,
+ help="Disable the cache.",
+)
+
+no_deps: Callable[..., Option] = partial(
+ Option,
+ "--no-deps",
+ "--no-dependencies",
+ dest="ignore_dependencies",
+ action="store_true",
+ default=False,
+ help="Don't install package dependencies.",
+)
+
+ignore_requires_python: Callable[..., Option] = partial(
+ Option,
+ "--ignore-requires-python",
+ dest="ignore_requires_python",
+ action="store_true",
+ help="Ignore the Requires-Python information.",
+)
+
+no_build_isolation: Callable[..., Option] = partial(
+ Option,
+ "--no-build-isolation",
+ dest="build_isolation",
+ action="store_false",
+ default=True,
+ help="Disable isolation when building a modern source distribution. "
+ "Build dependencies specified by PEP 518 must be already installed "
+ "if this option is used.",
+)
+
+check_build_deps: Callable[..., Option] = partial(
+ Option,
+ "--check-build-dependencies",
+ dest="check_build_deps",
+ action="store_true",
+ default=False,
+ help="Check the build dependencies when PEP517 is used.",
+)
+
+
+def _handle_no_use_pep517(
+ option: Option, opt: str, value: str, parser: OptionParser
+) -> None:
+ """
+ Process a value provided for the --no-use-pep517 option.
+
+ This is an optparse.Option callback for the no_use_pep517 option.
+ """
+ # Since --no-use-pep517 doesn't accept arguments, the value argument
+ # will be None if --no-use-pep517 is passed via the command-line.
+ # However, the value can be non-None if the option is triggered e.g.
+ # by an environment variable, for example "PIP_NO_USE_PEP517=true".
+ if value is not None:
+ msg = """A value was passed for --no-use-pep517,
+ probably using either the PIP_NO_USE_PEP517 environment variable
+ or the "no-use-pep517" config file option. Use an appropriate value
+ of the PIP_USE_PEP517 environment variable or the "use-pep517"
+ config file option instead.
+ """
+ raise_option_error(parser, option=option, msg=msg)
+
+ # If user doesn't wish to use pep517, we check if setuptools is installed
+ # and raise error if it is not.
+ if not importlib.util.find_spec("setuptools"):
+ msg = "It is not possible to use --no-use-pep517 without setuptools installed."
+ raise_option_error(parser, option=option, msg=msg)
+
+ # Otherwise, --no-use-pep517 was passed via the command-line.
+ parser.values.use_pep517 = False
+
+
+use_pep517: Any = partial(
+ Option,
+ "--use-pep517",
+ dest="use_pep517",
+ action="store_true",
+ default=None,
+ help="Use PEP 517 for building source distributions "
+ "(use --no-use-pep517 to force legacy behaviour).",
+)
+
+no_use_pep517: Any = partial(
+ Option,
+ "--no-use-pep517",
+ dest="use_pep517",
+ action="callback",
+ callback=_handle_no_use_pep517,
+ default=None,
+ help=SUPPRESS_HELP,
+)
+
+
+def _handle_config_settings(
+ option: Option, opt_str: str, value: str, parser: OptionParser
+) -> None:
+ key, sep, val = value.partition("=")
+ if sep != "=":
+ parser.error(f"Arguments to {opt_str} must be of the form KEY=VAL") # noqa
+ dest = getattr(parser.values, option.dest)
+ if dest is None:
+ dest = {}
+ setattr(parser.values, option.dest, dest)
+ dest[key] = val
+
+
+config_settings: Callable[..., Option] = partial(
+ Option,
+ "--config-settings",
+ dest="config_settings",
+ type=str,
+ action="callback",
+ callback=_handle_config_settings,
+ metavar="settings",
+ help="Configuration settings to be passed to the PEP 517 build backend. "
+ "Settings take the form KEY=VALUE. Use multiple --config-settings options "
+ "to pass multiple keys to the backend.",
+)
+
+install_options: Callable[..., Option] = partial(
+ Option,
+ "--install-option",
+ dest="install_options",
+ action="append",
+ metavar="options",
+ help="This option is deprecated. Using this option with location-changing "
+ "options may cause unexpected behavior. "
+ "Use pip-level options like --user, --prefix, --root, and --target.",
+)
+
+build_options: Callable[..., Option] = partial(
+ Option,
+ "--build-option",
+ dest="build_options",
+ metavar="options",
+ action="append",
+ help="Extra arguments to be supplied to 'setup.py bdist_wheel'.",
+)
+
+global_options: Callable[..., Option] = partial(
+ Option,
+ "--global-option",
+ dest="global_options",
+ action="append",
+ metavar="options",
+ help="Extra global options to be supplied to the setup.py "
+ "call before the install or bdist_wheel command.",
+)
+
+no_clean: Callable[..., Option] = partial(
+ Option,
+ "--no-clean",
+ action="store_true",
+ default=False,
+ help="Don't clean up build directories.",
+)
+
+pre: Callable[..., Option] = partial(
+ Option,
+ "--pre",
+ action="store_true",
+ default=False,
+ help="Include pre-release and development versions. By default, "
+ "pip only finds stable versions.",
+)
+
+disable_pip_version_check: Callable[..., Option] = partial(
+ Option,
+ "--disable-pip-version-check",
+ dest="disable_pip_version_check",
+ action="store_true",
+ default=False,
+ help="Don't periodically check PyPI to determine whether a new version "
+ "of pip is available for download. Implied with --no-index.",
+)
+
+root_user_action: Callable[..., Option] = partial(
+ Option,
+ "--root-user-action",
+ dest="root_user_action",
+ default="warn",
+ choices=["warn", "ignore"],
+ help="Action if pip is run as a root user. By default, a warning message is shown.",
+)
+
+
+def _handle_merge_hash(
+ option: Option, opt_str: str, value: str, parser: OptionParser
+) -> None:
+ """Given a value spelled "algo:digest", append the digest to a list
+ pointed to in a dict by the algo name."""
+ if not parser.values.hashes:
+ parser.values.hashes = {}
+ try:
+ algo, digest = value.split(":", 1)
+ except ValueError:
+ parser.error(
+ "Arguments to {} must be a hash name " # noqa
+ "followed by a value, like --hash=sha256:"
+ "abcde...".format(opt_str)
+ )
+ if algo not in STRONG_HASHES:
+ parser.error(
+ "Allowed hash algorithms for {} are {}.".format( # noqa
+ opt_str, ", ".join(STRONG_HASHES)
+ )
+ )
+ parser.values.hashes.setdefault(algo, []).append(digest)
+
+
+hash: Callable[..., Option] = partial(
+ Option,
+ "--hash",
+ # Hash values eventually end up in InstallRequirement.hashes due to
+ # __dict__ copying in process_line().
+ dest="hashes",
+ action="callback",
+ callback=_handle_merge_hash,
+ type="string",
+ help="Verify that the package's archive matches this "
+ "hash before installing. Example: --hash=sha256:abcdef...",
+)
+
+
+require_hashes: Callable[..., Option] = partial(
+ Option,
+ "--require-hashes",
+ dest="require_hashes",
+ action="store_true",
+ default=False,
+ help="Require a hash to check each requirement against, for "
+ "repeatable installs. This option is implied when any package in a "
+ "requirements file has a --hash option.",
+)
+
+
+list_path: Callable[..., Option] = partial(
+ PipOption,
+ "--path",
+ dest="path",
+ type="path",
+ action="append",
+ help="Restrict to the specified installation path for listing "
+ "packages (can be used multiple times).",
+)
+
+
+def check_list_path_option(options: Values) -> None:
+ if options.path and (options.user or options.local):
+ raise CommandError("Cannot combine '--path' with '--user' or '--local'")
+
+
+list_exclude: Callable[..., Option] = partial(
+ PipOption,
+ "--exclude",
+ dest="excludes",
+ action="append",
+ metavar="package",
+ type="package_name",
+ help="Exclude specified package from the output",
+)
+
+
+no_python_version_warning: Callable[..., Option] = partial(
+ Option,
+ "--no-python-version-warning",
+ dest="no_python_version_warning",
+ action="store_true",
+ default=False,
+ help="Silence deprecation warnings for upcoming unsupported Pythons.",
+)
+
+
+use_new_feature: Callable[..., Option] = partial(
+ Option,
+ "--use-feature",
+ dest="features_enabled",
+ metavar="feature",
+ action="append",
+ default=[],
+ choices=[
+ "fast-deps",
+ "truststore",
+ "no-binary-enable-wheel-cache",
+ ],
+ help="Enable new functionality, that may be backward incompatible.",
+)
+
+use_deprecated_feature: Callable[..., Option] = partial(
+ Option,
+ "--use-deprecated",
+ dest="deprecated_features_enabled",
+ metavar="feature",
+ action="append",
+ default=[],
+ choices=[
+ "legacy-resolver",
+ ],
+ help=("Enable deprecated functionality, that will be removed in the future."),
+)
+
+
+##########
+# groups #
+##########
+
+general_group: Dict[str, Any] = {
+ "name": "General Options",
+ "options": [
+ help_,
+ debug_mode,
+ isolated_mode,
+ require_virtualenv,
+ python,
+ verbose,
+ version,
+ quiet,
+ log,
+ no_input,
+ proxy,
+ retries,
+ timeout,
+ exists_action,
+ trusted_host,
+ cert,
+ client_cert,
+ cache_dir,
+ no_cache,
+ disable_pip_version_check,
+ no_color,
+ no_python_version_warning,
+ use_new_feature,
+ use_deprecated_feature,
+ ],
+}
+
+index_group: Dict[str, Any] = {
+ "name": "Package Index Options",
+ "options": [
+ index_url,
+ extra_index_url,
+ no_index,
+ find_links,
+ ],
+}
diff --git a/third_party/python/pip/pip/_internal/cli/command_context.py b/third_party/python/pip/pip/_internal/cli/command_context.py
new file mode 100644
index 0000000000..139995ac3f
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/cli/command_context.py
@@ -0,0 +1,27 @@
+from contextlib import ExitStack, contextmanager
+from typing import ContextManager, Generator, TypeVar
+
+_T = TypeVar("_T", covariant=True)
+
+
+class CommandContextMixIn:
+ def __init__(self) -> None:
+ super().__init__()
+ self._in_main_context = False
+ self._main_context = ExitStack()
+
+ @contextmanager
+ def main_context(self) -> Generator[None, None, None]:
+ assert not self._in_main_context
+
+ self._in_main_context = True
+ try:
+ with self._main_context:
+ yield
+ finally:
+ self._in_main_context = False
+
+ def enter_context(self, context_provider: ContextManager[_T]) -> _T:
+ assert self._in_main_context
+
+ return self._main_context.enter_context(context_provider)
diff --git a/third_party/python/pip/pip/_internal/cli/main.py b/third_party/python/pip/pip/_internal/cli/main.py
new file mode 100644
index 0000000000..0e31221543
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/cli/main.py
@@ -0,0 +1,70 @@
+"""Primary application entrypoint.
+"""
+import locale
+import logging
+import os
+import sys
+from typing import List, Optional
+
+from pip._internal.cli.autocompletion import autocomplete
+from pip._internal.cli.main_parser import parse_command
+from pip._internal.commands import create_command
+from pip._internal.exceptions import PipError
+from pip._internal.utils import deprecation
+
+logger = logging.getLogger(__name__)
+
+
+# Do not import and use main() directly! Using it directly is actively
+# discouraged by pip's maintainers. The name, location and behavior of
+# this function is subject to change, so calling it directly is not
+# portable across different pip versions.
+
+# In addition, running pip in-process is unsupported and unsafe. This is
+# elaborated in detail at
+# https://pip.pypa.io/en/stable/user_guide/#using-pip-from-your-program.
+# That document also provides suggestions that should work for nearly
+# all users that are considering importing and using main() directly.
+
+# However, we know that certain users will still want to invoke pip
+# in-process. If you understand and accept the implications of using pip
+# in an unsupported manner, the best approach is to use runpy to avoid
+# depending on the exact location of this entry point.
+
+# The following example shows how to use runpy to invoke pip in that
+# case:
+#
+# sys.argv = ["pip", your, args, here]
+# runpy.run_module("pip", run_name="__main__")
+#
+# Note that this will exit the process after running, unlike a direct
+# call to main. As it is not safe to do any processing after calling
+# main, this should not be an issue in practice.
+
+
+def main(args: Optional[List[str]] = None) -> int:
+ if args is None:
+ args = sys.argv[1:]
+
+ # Configure our deprecation warnings to be sent through loggers
+ deprecation.install_warning_logger()
+
+ autocomplete()
+
+ try:
+ cmd_name, cmd_args = parse_command(args)
+ except PipError as exc:
+ sys.stderr.write(f"ERROR: {exc}")
+ sys.stderr.write(os.linesep)
+ sys.exit(1)
+
+ # Needed for locale.getpreferredencoding(False) to work
+ # in pip._internal.utils.encoding.auto_decode
+ try:
+ locale.setlocale(locale.LC_ALL, "")
+ except locale.Error as e:
+ # setlocale can apparently crash if locale are uninitialized
+ logger.debug("Ignoring error %s when setting locale", e)
+ command = create_command(cmd_name, isolated=("--isolated" in cmd_args))
+
+ return command.main(cmd_args)
diff --git a/third_party/python/pip/pip/_internal/cli/main_parser.py b/third_party/python/pip/pip/_internal/cli/main_parser.py
new file mode 100644
index 0000000000..5ade356b9c
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/cli/main_parser.py
@@ -0,0 +1,134 @@
+"""A single place for constructing and exposing the main parser
+"""
+
+import os
+import subprocess
+import sys
+from typing import List, Optional, Tuple
+
+from pip._internal.build_env import get_runnable_pip
+from pip._internal.cli import cmdoptions
+from pip._internal.cli.parser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
+from pip._internal.commands import commands_dict, get_similar_commands
+from pip._internal.exceptions import CommandError
+from pip._internal.utils.misc import get_pip_version, get_prog
+
+__all__ = ["create_main_parser", "parse_command"]
+
+
+def create_main_parser() -> ConfigOptionParser:
+ """Creates and returns the main parser for pip's CLI"""
+
+ parser = ConfigOptionParser(
+ usage="\n%prog <command> [options]",
+ add_help_option=False,
+ formatter=UpdatingDefaultsHelpFormatter(),
+ name="global",
+ prog=get_prog(),
+ )
+ parser.disable_interspersed_args()
+
+ parser.version = get_pip_version()
+
+ # add the general options
+ gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser)
+ parser.add_option_group(gen_opts)
+
+ # so the help formatter knows
+ parser.main = True # type: ignore
+
+ # create command listing for description
+ description = [""] + [
+ f"{name:27} {command_info.summary}"
+ for name, command_info in commands_dict.items()
+ ]
+ parser.description = "\n".join(description)
+
+ return parser
+
+
+def identify_python_interpreter(python: str) -> Optional[str]:
+ # If the named file exists, use it.
+ # If it's a directory, assume it's a virtual environment and
+ # look for the environment's Python executable.
+ if os.path.exists(python):
+ if os.path.isdir(python):
+ # bin/python for Unix, Scripts/python.exe for Windows
+ # Try both in case of odd cases like cygwin.
+ for exe in ("bin/python", "Scripts/python.exe"):
+ py = os.path.join(python, exe)
+ if os.path.exists(py):
+ return py
+ else:
+ return python
+
+ # Could not find the interpreter specified
+ return None
+
+
+def parse_command(args: List[str]) -> Tuple[str, List[str]]:
+ parser = create_main_parser()
+
+ # Note: parser calls disable_interspersed_args(), so the result of this
+ # call is to split the initial args into the general options before the
+ # subcommand and everything else.
+ # For example:
+ # args: ['--timeout=5', 'install', '--user', 'INITools']
+ # general_options: ['--timeout==5']
+ # args_else: ['install', '--user', 'INITools']
+ general_options, args_else = parser.parse_args(args)
+
+ # --python
+ if general_options.python and "_PIP_RUNNING_IN_SUBPROCESS" not in os.environ:
+ # Re-invoke pip using the specified Python interpreter
+ interpreter = identify_python_interpreter(general_options.python)
+ if interpreter is None:
+ raise CommandError(
+ f"Could not locate Python interpreter {general_options.python}"
+ )
+
+ pip_cmd = [
+ interpreter,
+ get_runnable_pip(),
+ ]
+ pip_cmd.extend(args)
+
+ # Set a flag so the child doesn't re-invoke itself, causing
+ # an infinite loop.
+ os.environ["_PIP_RUNNING_IN_SUBPROCESS"] = "1"
+ returncode = 0
+ try:
+ proc = subprocess.run(pip_cmd)
+ returncode = proc.returncode
+ except (subprocess.SubprocessError, OSError) as exc:
+ raise CommandError(f"Failed to run pip under {interpreter}: {exc}")
+ sys.exit(returncode)
+
+ # --version
+ if general_options.version:
+ sys.stdout.write(parser.version)
+ sys.stdout.write(os.linesep)
+ sys.exit()
+
+ # pip || pip help -> print_help()
+ if not args_else or (args_else[0] == "help" and len(args_else) == 1):
+ parser.print_help()
+ sys.exit()
+
+ # the subcommand name
+ cmd_name = args_else[0]
+
+ if cmd_name not in commands_dict:
+ guess = get_similar_commands(cmd_name)
+
+ msg = [f'unknown command "{cmd_name}"']
+ if guess:
+ msg.append(f'maybe you meant "{guess}"')
+
+ raise CommandError(" - ".join(msg))
+
+ # all the args without the subcommand
+ cmd_args = args[:]
+ cmd_args.remove(cmd_name)
+
+ return cmd_name, cmd_args
diff --git a/third_party/python/pip/pip/_internal/cli/parser.py b/third_party/python/pip/pip/_internal/cli/parser.py
new file mode 100644
index 0000000000..c762cf2781
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/cli/parser.py
@@ -0,0 +1,294 @@
+"""Base option parser setup"""
+
+import logging
+import optparse
+import shutil
+import sys
+import textwrap
+from contextlib import suppress
+from typing import Any, Dict, Generator, List, Tuple
+
+from pip._internal.cli.status_codes import UNKNOWN_ERROR
+from pip._internal.configuration import Configuration, ConfigurationError
+from pip._internal.utils.misc import redact_auth_from_url, strtobool
+
+logger = logging.getLogger(__name__)
+
+
+class PrettyHelpFormatter(optparse.IndentedHelpFormatter):
+ """A prettier/less verbose help formatter for optparse."""
+
+ def __init__(self, *args: Any, **kwargs: Any) -> None:
+ # help position must be aligned with __init__.parseopts.description
+ kwargs["max_help_position"] = 30
+ kwargs["indent_increment"] = 1
+ kwargs["width"] = shutil.get_terminal_size()[0] - 2
+ super().__init__(*args, **kwargs)
+
+ def format_option_strings(self, option: optparse.Option) -> str:
+ return self._format_option_strings(option)
+
+ def _format_option_strings(
+ self, option: optparse.Option, mvarfmt: str = " <{}>", optsep: str = ", "
+ ) -> str:
+ """
+ Return a comma-separated list of option strings and metavars.
+
+ :param option: tuple of (short opt, long opt), e.g: ('-f', '--format')
+ :param mvarfmt: metavar format string
+ :param optsep: separator
+ """
+ opts = []
+
+ if option._short_opts:
+ opts.append(option._short_opts[0])
+ if option._long_opts:
+ opts.append(option._long_opts[0])
+ if len(opts) > 1:
+ opts.insert(1, optsep)
+
+ if option.takes_value():
+ assert option.dest is not None
+ metavar = option.metavar or option.dest.lower()
+ opts.append(mvarfmt.format(metavar.lower()))
+
+ return "".join(opts)
+
+ def format_heading(self, heading: str) -> str:
+ if heading == "Options":
+ return ""
+ return heading + ":\n"
+
+ def format_usage(self, usage: str) -> str:
+ """
+ Ensure there is only one newline between usage and the first heading
+ if there is no description.
+ """
+ msg = "\nUsage: {}\n".format(self.indent_lines(textwrap.dedent(usage), " "))
+ return msg
+
+ def format_description(self, description: str) -> str:
+ # leave full control over description to us
+ if description:
+ if hasattr(self.parser, "main"):
+ label = "Commands"
+ else:
+ label = "Description"
+ # some doc strings have initial newlines, some don't
+ description = description.lstrip("\n")
+ # some doc strings have final newlines and spaces, some don't
+ description = description.rstrip()
+ # dedent, then reindent
+ description = self.indent_lines(textwrap.dedent(description), " ")
+ description = f"{label}:\n{description}\n"
+ return description
+ else:
+ return ""
+
+ def format_epilog(self, epilog: str) -> str:
+ # leave full control over epilog to us
+ if epilog:
+ return epilog
+ else:
+ return ""
+
+ def indent_lines(self, text: str, indent: str) -> str:
+ new_lines = [indent + line for line in text.split("\n")]
+ return "\n".join(new_lines)
+
+
+class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter):
+ """Custom help formatter for use in ConfigOptionParser.
+
+ This is updates the defaults before expanding them, allowing
+ them to show up correctly in the help listing.
+
+ Also redact auth from url type options
+ """
+
+ def expand_default(self, option: optparse.Option) -> str:
+ default_values = None
+ if self.parser is not None:
+ assert isinstance(self.parser, ConfigOptionParser)
+ self.parser._update_defaults(self.parser.defaults)
+ assert option.dest is not None
+ default_values = self.parser.defaults.get(option.dest)
+ help_text = super().expand_default(option)
+
+ if default_values and option.metavar == "URL":
+ if isinstance(default_values, str):
+ default_values = [default_values]
+
+ # If its not a list, we should abort and just return the help text
+ if not isinstance(default_values, list):
+ default_values = []
+
+ for val in default_values:
+ help_text = help_text.replace(val, redact_auth_from_url(val))
+
+ return help_text
+
+
+class CustomOptionParser(optparse.OptionParser):
+ def insert_option_group(
+ self, idx: int, *args: Any, **kwargs: Any
+ ) -> optparse.OptionGroup:
+ """Insert an OptionGroup at a given position."""
+ group = self.add_option_group(*args, **kwargs)
+
+ self.option_groups.pop()
+ self.option_groups.insert(idx, group)
+
+ return group
+
+ @property
+ def option_list_all(self) -> List[optparse.Option]:
+ """Get a list of all options, including those in option groups."""
+ res = self.option_list[:]
+ for i in self.option_groups:
+ res.extend(i.option_list)
+
+ return res
+
+
+class ConfigOptionParser(CustomOptionParser):
+ """Custom option parser which updates its defaults by checking the
+ configuration files and environmental variables"""
+
+ def __init__(
+ self,
+ *args: Any,
+ name: str,
+ isolated: bool = False,
+ **kwargs: Any,
+ ) -> None:
+ self.name = name
+ self.config = Configuration(isolated)
+
+ assert self.name
+ super().__init__(*args, **kwargs)
+
+ def check_default(self, option: optparse.Option, key: str, val: Any) -> Any:
+ try:
+ return option.check_value(key, val)
+ except optparse.OptionValueError as exc:
+ print(f"An error occurred during configuration: {exc}")
+ sys.exit(3)
+
+ def _get_ordered_configuration_items(
+ self,
+ ) -> Generator[Tuple[str, Any], None, None]:
+ # Configuration gives keys in an unordered manner. Order them.
+ override_order = ["global", self.name, ":env:"]
+
+ # Pool the options into different groups
+ section_items: Dict[str, List[Tuple[str, Any]]] = {
+ name: [] for name in override_order
+ }
+ for section_key, val in self.config.items():
+ # ignore empty values
+ if not val:
+ logger.debug(
+ "Ignoring configuration key '%s' as it's value is empty.",
+ section_key,
+ )
+ continue
+
+ section, key = section_key.split(".", 1)
+ if section in override_order:
+ section_items[section].append((key, val))
+
+ # Yield each group in their override order
+ for section in override_order:
+ for key, val in section_items[section]:
+ yield key, val
+
+ def _update_defaults(self, defaults: Dict[str, Any]) -> Dict[str, Any]:
+ """Updates the given defaults with values from the config files and
+ the environ. Does a little special handling for certain types of
+ options (lists)."""
+
+ # Accumulate complex default state.
+ self.values = optparse.Values(self.defaults)
+ late_eval = set()
+ # Then set the options with those values
+ for key, val in self._get_ordered_configuration_items():
+ # '--' because configuration supports only long names
+ option = self.get_option("--" + key)
+
+ # Ignore options not present in this parser. E.g. non-globals put
+ # in [global] by users that want them to apply to all applicable
+ # commands.
+ if option is None:
+ continue
+
+ assert option.dest is not None
+
+ if option.action in ("store_true", "store_false"):
+ try:
+ val = strtobool(val)
+ except ValueError:
+ self.error(
+ "{} is not a valid value for {} option, " # noqa
+ "please specify a boolean value like yes/no, "
+ "true/false or 1/0 instead.".format(val, key)
+ )
+ elif option.action == "count":
+ with suppress(ValueError):
+ val = strtobool(val)
+ with suppress(ValueError):
+ val = int(val)
+ if not isinstance(val, int) or val < 0:
+ self.error(
+ "{} is not a valid value for {} option, " # noqa
+ "please instead specify either a non-negative integer "
+ "or a boolean value like yes/no or false/true "
+ "which is equivalent to 1/0.".format(val, key)
+ )
+ elif option.action == "append":
+ val = val.split()
+ val = [self.check_default(option, key, v) for v in val]
+ elif option.action == "callback":
+ assert option.callback is not None
+ late_eval.add(option.dest)
+ opt_str = option.get_opt_string()
+ val = option.convert_value(opt_str, val)
+ # From take_action
+ args = option.callback_args or ()
+ kwargs = option.callback_kwargs or {}
+ option.callback(option, opt_str, val, self, *args, **kwargs)
+ else:
+ val = self.check_default(option, key, val)
+
+ defaults[option.dest] = val
+
+ for key in late_eval:
+ defaults[key] = getattr(self.values, key)
+ self.values = None
+ return defaults
+
+ def get_default_values(self) -> optparse.Values:
+ """Overriding to make updating the defaults after instantiation of
+ the option parser possible, _update_defaults() does the dirty work."""
+ if not self.process_default_values:
+ # Old, pre-Optik 1.5 behaviour.
+ return optparse.Values(self.defaults)
+
+ # Load the configuration, or error out in case of an error
+ try:
+ self.config.load()
+ except ConfigurationError as err:
+ self.exit(UNKNOWN_ERROR, str(err))
+
+ defaults = self._update_defaults(self.defaults.copy()) # ours
+ for option in self._get_all_options():
+ assert option.dest is not None
+ default = defaults.get(option.dest)
+ if isinstance(default, str):
+ opt_str = option.get_opt_string()
+ defaults[option.dest] = option.check_value(opt_str, default)
+ return optparse.Values(defaults)
+
+ def error(self, msg: str) -> None:
+ self.print_usage(sys.stderr)
+ self.exit(UNKNOWN_ERROR, f"{msg}\n")
diff --git a/third_party/python/pip/pip/_internal/cli/progress_bars.py b/third_party/python/pip/pip/_internal/cli/progress_bars.py
new file mode 100644
index 0000000000..0ad14031ca
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/cli/progress_bars.py
@@ -0,0 +1,68 @@
+import functools
+from typing import Callable, Generator, Iterable, Iterator, Optional, Tuple
+
+from pip._vendor.rich.progress import (
+ BarColumn,
+ DownloadColumn,
+ FileSizeColumn,
+ Progress,
+ ProgressColumn,
+ SpinnerColumn,
+ TextColumn,
+ TimeElapsedColumn,
+ TimeRemainingColumn,
+ TransferSpeedColumn,
+)
+
+from pip._internal.utils.logging import get_indentation
+
+DownloadProgressRenderer = Callable[[Iterable[bytes]], Iterator[bytes]]
+
+
+def _rich_progress_bar(
+ iterable: Iterable[bytes],
+ *,
+ bar_type: str,
+ size: int,
+) -> Generator[bytes, None, None]:
+ assert bar_type == "on", "This should only be used in the default mode."
+
+ if not size:
+ total = float("inf")
+ columns: Tuple[ProgressColumn, ...] = (
+ TextColumn("[progress.description]{task.description}"),
+ SpinnerColumn("line", speed=1.5),
+ FileSizeColumn(),
+ TransferSpeedColumn(),
+ TimeElapsedColumn(),
+ )
+ else:
+ total = size
+ columns = (
+ TextColumn("[progress.description]{task.description}"),
+ BarColumn(),
+ DownloadColumn(),
+ TransferSpeedColumn(),
+ TextColumn("eta"),
+ TimeRemainingColumn(),
+ )
+
+ progress = Progress(*columns, refresh_per_second=30)
+ task_id = progress.add_task(" " * (get_indentation() + 2), total=total)
+ with progress:
+ for chunk in iterable:
+ yield chunk
+ progress.update(task_id, advance=len(chunk))
+
+
+def get_download_progress_renderer(
+ *, bar_type: str, size: Optional[int] = None
+) -> DownloadProgressRenderer:
+ """Get an object that can be used to render the download progress.
+
+ Returns a callable, that takes an iterable to "wrap".
+ """
+ if bar_type == "on":
+ return functools.partial(_rich_progress_bar, bar_type=bar_type, size=size)
+ else:
+ return iter # no-op, when passed an iterator
diff --git a/third_party/python/pip/pip/_internal/cli/req_command.py b/third_party/python/pip/pip/_internal/cli/req_command.py
new file mode 100644
index 0000000000..1044809f04
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/cli/req_command.py
@@ -0,0 +1,502 @@
+"""Contains the Command base classes that depend on PipSession.
+
+The classes in this module are in a separate module so the commands not
+needing download / PackageFinder capability don't unnecessarily import the
+PackageFinder machinery and all its vendored dependencies, etc.
+"""
+
+import logging
+import os
+import sys
+from functools import partial
+from optparse import Values
+from typing import TYPE_CHECKING, Any, List, Optional, Tuple
+
+from pip._internal.cache import WheelCache
+from pip._internal.cli import cmdoptions
+from pip._internal.cli.base_command import Command
+from pip._internal.cli.command_context import CommandContextMixIn
+from pip._internal.exceptions import CommandError, PreviousBuildDirError
+from pip._internal.index.collector import LinkCollector
+from pip._internal.index.package_finder import PackageFinder
+from pip._internal.models.selection_prefs import SelectionPreferences
+from pip._internal.models.target_python import TargetPython
+from pip._internal.network.session import PipSession
+from pip._internal.operations.build.build_tracker import BuildTracker
+from pip._internal.operations.prepare import RequirementPreparer
+from pip._internal.req.constructors import (
+ install_req_from_editable,
+ install_req_from_line,
+ install_req_from_parsed_requirement,
+ install_req_from_req_string,
+)
+from pip._internal.req.req_file import parse_requirements
+from pip._internal.req.req_install import InstallRequirement
+from pip._internal.resolution.base import BaseResolver
+from pip._internal.self_outdated_check import pip_self_version_check
+from pip._internal.utils.temp_dir import (
+ TempDirectory,
+ TempDirectoryTypeRegistry,
+ tempdir_kinds,
+)
+from pip._internal.utils.virtualenv import running_under_virtualenv
+
+if TYPE_CHECKING:
+ from ssl import SSLContext
+
+logger = logging.getLogger(__name__)
+
+
+def _create_truststore_ssl_context() -> Optional["SSLContext"]:
+ if sys.version_info < (3, 10):
+ raise CommandError("The truststore feature is only available for Python 3.10+")
+
+ try:
+ import ssl
+ except ImportError:
+ logger.warning("Disabling truststore since ssl support is missing")
+ return None
+
+ try:
+ import truststore
+ except ImportError:
+ raise CommandError(
+ "To use the truststore feature, 'truststore' must be installed into "
+ "pip's current environment."
+ )
+
+ return truststore.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
+
+
+class SessionCommandMixin(CommandContextMixIn):
+
+ """
+ A class mixin for command classes needing _build_session().
+ """
+
+ def __init__(self) -> None:
+ super().__init__()
+ self._session: Optional[PipSession] = None
+
+ @classmethod
+ def _get_index_urls(cls, options: Values) -> Optional[List[str]]:
+ """Return a list of index urls from user-provided options."""
+ index_urls = []
+ if not getattr(options, "no_index", False):
+ url = getattr(options, "index_url", None)
+ if url:
+ index_urls.append(url)
+ urls = getattr(options, "extra_index_urls", None)
+ if urls:
+ index_urls.extend(urls)
+ # Return None rather than an empty list
+ return index_urls or None
+
+ def get_default_session(self, options: Values) -> PipSession:
+ """Get a default-managed session."""
+ if self._session is None:
+ self._session = self.enter_context(self._build_session(options))
+ # there's no type annotation on requests.Session, so it's
+ # automatically ContextManager[Any] and self._session becomes Any,
+ # then https://github.com/python/mypy/issues/7696 kicks in
+ assert self._session is not None
+ return self._session
+
+ def _build_session(
+ self,
+ options: Values,
+ retries: Optional[int] = None,
+ timeout: Optional[int] = None,
+ fallback_to_certifi: bool = False,
+ ) -> PipSession:
+ cache_dir = options.cache_dir
+ assert not cache_dir or os.path.isabs(cache_dir)
+
+ if "truststore" in options.features_enabled:
+ try:
+ ssl_context = _create_truststore_ssl_context()
+ except Exception:
+ if not fallback_to_certifi:
+ raise
+ ssl_context = None
+ else:
+ ssl_context = None
+
+ session = PipSession(
+ cache=os.path.join(cache_dir, "http") if cache_dir else None,
+ retries=retries if retries is not None else options.retries,
+ trusted_hosts=options.trusted_hosts,
+ index_urls=self._get_index_urls(options),
+ ssl_context=ssl_context,
+ )
+
+ # Handle custom ca-bundles from the user
+ if options.cert:
+ session.verify = options.cert
+
+ # Handle SSL client certificate
+ if options.client_cert:
+ session.cert = options.client_cert
+
+ # Handle timeouts
+ if options.timeout or timeout:
+ session.timeout = timeout if timeout is not None else options.timeout
+
+ # Handle configured proxies
+ if options.proxy:
+ session.proxies = {
+ "http": options.proxy,
+ "https": options.proxy,
+ }
+
+ # Determine if we can prompt the user for authentication or not
+ session.auth.prompting = not options.no_input
+
+ return session
+
+
+class IndexGroupCommand(Command, SessionCommandMixin):
+
+ """
+ Abstract base class for commands with the index_group options.
+
+ This also corresponds to the commands that permit the pip version check.
+ """
+
+ def handle_pip_version_check(self, options: Values) -> None:
+ """
+ Do the pip version check if not disabled.
+
+ This overrides the default behavior of not doing the check.
+ """
+ # Make sure the index_group options are present.
+ assert hasattr(options, "no_index")
+
+ if options.disable_pip_version_check or options.no_index:
+ return
+
+ # Otherwise, check if we're using the latest version of pip available.
+ session = self._build_session(
+ options,
+ retries=0,
+ timeout=min(5, options.timeout),
+ # This is set to ensure the function does not fail when truststore is
+ # specified in use-feature but cannot be loaded. This usually raises a
+ # CommandError and shows a nice user-facing error, but this function is not
+ # called in that try-except block.
+ fallback_to_certifi=True,
+ )
+ with session:
+ pip_self_version_check(session, options)
+
+
+KEEPABLE_TEMPDIR_TYPES = [
+ tempdir_kinds.BUILD_ENV,
+ tempdir_kinds.EPHEM_WHEEL_CACHE,
+ tempdir_kinds.REQ_BUILD,
+]
+
+
+def warn_if_run_as_root() -> None:
+ """Output a warning for sudo users on Unix.
+
+ In a virtual environment, sudo pip still writes to virtualenv.
+ On Windows, users may run pip as Administrator without issues.
+ This warning only applies to Unix root users outside of virtualenv.
+ """
+ if running_under_virtualenv():
+ return
+ if not hasattr(os, "getuid"):
+ return
+ # On Windows, there are no "system managed" Python packages. Installing as
+ # Administrator via pip is the correct way of updating system environments.
+ #
+ # We choose sys.platform over utils.compat.WINDOWS here to enable Mypy platform
+ # checks: https://mypy.readthedocs.io/en/stable/common_issues.html
+ if sys.platform == "win32" or sys.platform == "cygwin":
+ return
+
+ if os.getuid() != 0:
+ return
+
+ logger.warning(
+ "Running pip as the 'root' user can result in broken permissions and "
+ "conflicting behaviour with the system package manager. "
+ "It is recommended to use a virtual environment instead: "
+ "https://pip.pypa.io/warnings/venv"
+ )
+
+
+def with_cleanup(func: Any) -> Any:
+ """Decorator for common logic related to managing temporary
+ directories.
+ """
+
+ def configure_tempdir_registry(registry: TempDirectoryTypeRegistry) -> None:
+ for t in KEEPABLE_TEMPDIR_TYPES:
+ registry.set_delete(t, False)
+
+ def wrapper(
+ self: RequirementCommand, options: Values, args: List[Any]
+ ) -> Optional[int]:
+ assert self.tempdir_registry is not None
+ if options.no_clean:
+ configure_tempdir_registry(self.tempdir_registry)
+
+ try:
+ return func(self, options, args)
+ except PreviousBuildDirError:
+ # This kind of conflict can occur when the user passes an explicit
+ # build directory with a pre-existing folder. In that case we do
+ # not want to accidentally remove it.
+ configure_tempdir_registry(self.tempdir_registry)
+ raise
+
+ return wrapper
+
+
+class RequirementCommand(IndexGroupCommand):
+ def __init__(self, *args: Any, **kw: Any) -> None:
+ super().__init__(*args, **kw)
+
+ self.cmd_opts.add_option(cmdoptions.no_clean())
+
+ @staticmethod
+ def determine_resolver_variant(options: Values) -> str:
+ """Determines which resolver should be used, based on the given options."""
+ if "legacy-resolver" in options.deprecated_features_enabled:
+ return "legacy"
+
+ return "2020-resolver"
+
+ @classmethod
+ def make_requirement_preparer(
+ cls,
+ temp_build_dir: TempDirectory,
+ options: Values,
+ build_tracker: BuildTracker,
+ session: PipSession,
+ finder: PackageFinder,
+ use_user_site: bool,
+ download_dir: Optional[str] = None,
+ verbosity: int = 0,
+ ) -> RequirementPreparer:
+ """
+ Create a RequirementPreparer instance for the given parameters.
+ """
+ temp_build_dir_path = temp_build_dir.path
+ assert temp_build_dir_path is not None
+
+ resolver_variant = cls.determine_resolver_variant(options)
+ if resolver_variant == "2020-resolver":
+ lazy_wheel = "fast-deps" in options.features_enabled
+ if lazy_wheel:
+ logger.warning(
+ "pip is using lazily downloaded wheels using HTTP "
+ "range requests to obtain dependency information. "
+ "This experimental feature is enabled through "
+ "--use-feature=fast-deps and it is not ready for "
+ "production."
+ )
+ else:
+ lazy_wheel = False
+ if "fast-deps" in options.features_enabled:
+ logger.warning(
+ "fast-deps has no effect when used with the legacy resolver."
+ )
+
+ return RequirementPreparer(
+ build_dir=temp_build_dir_path,
+ src_dir=options.src_dir,
+ download_dir=download_dir,
+ build_isolation=options.build_isolation,
+ check_build_deps=options.check_build_deps,
+ build_tracker=build_tracker,
+ session=session,
+ progress_bar=options.progress_bar,
+ finder=finder,
+ require_hashes=options.require_hashes,
+ use_user_site=use_user_site,
+ lazy_wheel=lazy_wheel,
+ verbosity=verbosity,
+ )
+
+ @classmethod
+ def make_resolver(
+ cls,
+ preparer: RequirementPreparer,
+ finder: PackageFinder,
+ options: Values,
+ wheel_cache: Optional[WheelCache] = None,
+ use_user_site: bool = False,
+ ignore_installed: bool = True,
+ ignore_requires_python: bool = False,
+ force_reinstall: bool = False,
+ upgrade_strategy: str = "to-satisfy-only",
+ use_pep517: Optional[bool] = None,
+ py_version_info: Optional[Tuple[int, ...]] = None,
+ ) -> BaseResolver:
+ """
+ Create a Resolver instance for the given parameters.
+ """
+ make_install_req = partial(
+ install_req_from_req_string,
+ isolated=options.isolated_mode,
+ use_pep517=use_pep517,
+ config_settings=getattr(options, "config_settings", None),
+ )
+ resolver_variant = cls.determine_resolver_variant(options)
+ # The long import name and duplicated invocation is needed to convince
+ # Mypy into correctly typechecking. Otherwise it would complain the
+ # "Resolver" class being redefined.
+ if resolver_variant == "2020-resolver":
+ import pip._internal.resolution.resolvelib.resolver
+
+ return pip._internal.resolution.resolvelib.resolver.Resolver(
+ preparer=preparer,
+ finder=finder,
+ wheel_cache=wheel_cache,
+ make_install_req=make_install_req,
+ use_user_site=use_user_site,
+ ignore_dependencies=options.ignore_dependencies,
+ ignore_installed=ignore_installed,
+ ignore_requires_python=ignore_requires_python,
+ force_reinstall=force_reinstall,
+ upgrade_strategy=upgrade_strategy,
+ py_version_info=py_version_info,
+ )
+ import pip._internal.resolution.legacy.resolver
+
+ return pip._internal.resolution.legacy.resolver.Resolver(
+ preparer=preparer,
+ finder=finder,
+ wheel_cache=wheel_cache,
+ make_install_req=make_install_req,
+ use_user_site=use_user_site,
+ ignore_dependencies=options.ignore_dependencies,
+ ignore_installed=ignore_installed,
+ ignore_requires_python=ignore_requires_python,
+ force_reinstall=force_reinstall,
+ upgrade_strategy=upgrade_strategy,
+ py_version_info=py_version_info,
+ )
+
+ def get_requirements(
+ self,
+ args: List[str],
+ options: Values,
+ finder: PackageFinder,
+ session: PipSession,
+ ) -> List[InstallRequirement]:
+ """
+ Parse command-line arguments into the corresponding requirements.
+ """
+ requirements: List[InstallRequirement] = []
+ for filename in options.constraints:
+ for parsed_req in parse_requirements(
+ filename,
+ constraint=True,
+ finder=finder,
+ options=options,
+ session=session,
+ ):
+ req_to_add = install_req_from_parsed_requirement(
+ parsed_req,
+ isolated=options.isolated_mode,
+ user_supplied=False,
+ )
+ requirements.append(req_to_add)
+
+ for req in args:
+ req_to_add = install_req_from_line(
+ req,
+ None,
+ isolated=options.isolated_mode,
+ use_pep517=options.use_pep517,
+ user_supplied=True,
+ config_settings=getattr(options, "config_settings", None),
+ )
+ requirements.append(req_to_add)
+
+ for req in options.editables:
+ req_to_add = install_req_from_editable(
+ req,
+ user_supplied=True,
+ isolated=options.isolated_mode,
+ use_pep517=options.use_pep517,
+ config_settings=getattr(options, "config_settings", None),
+ )
+ requirements.append(req_to_add)
+
+ # NOTE: options.require_hashes may be set if --require-hashes is True
+ for filename in options.requirements:
+ for parsed_req in parse_requirements(
+ filename, finder=finder, options=options, session=session
+ ):
+ req_to_add = install_req_from_parsed_requirement(
+ parsed_req,
+ isolated=options.isolated_mode,
+ use_pep517=options.use_pep517,
+ user_supplied=True,
+ )
+ requirements.append(req_to_add)
+
+ # If any requirement has hash options, enable hash checking.
+ if any(req.has_hash_options for req in requirements):
+ options.require_hashes = True
+
+ if not (args or options.editables or options.requirements):
+ opts = {"name": self.name}
+ if options.find_links:
+ raise CommandError(
+ "You must give at least one requirement to {name} "
+ '(maybe you meant "pip {name} {links}"?)'.format(
+ **dict(opts, links=" ".join(options.find_links))
+ )
+ )
+ else:
+ raise CommandError(
+ "You must give at least one requirement to {name} "
+ '(see "pip help {name}")'.format(**opts)
+ )
+
+ return requirements
+
+ @staticmethod
+ def trace_basic_info(finder: PackageFinder) -> None:
+ """
+ Trace basic information about the provided objects.
+ """
+ # Display where finder is looking for packages
+ search_scope = finder.search_scope
+ locations = search_scope.get_formatted_locations()
+ if locations:
+ logger.info(locations)
+
+ def _build_package_finder(
+ self,
+ options: Values,
+ session: PipSession,
+ target_python: Optional[TargetPython] = None,
+ ignore_requires_python: Optional[bool] = None,
+ ) -> PackageFinder:
+ """
+ Create a package finder appropriate to this requirement command.
+
+ :param ignore_requires_python: Whether to ignore incompatible
+ "Requires-Python" values in links. Defaults to False.
+ """
+ link_collector = LinkCollector.create(session, options=options)
+ selection_prefs = SelectionPreferences(
+ allow_yanked=True,
+ format_control=options.format_control,
+ allow_all_prereleases=options.pre,
+ prefer_binary=options.prefer_binary,
+ ignore_requires_python=ignore_requires_python,
+ )
+
+ return PackageFinder.create(
+ link_collector=link_collector,
+ selection_prefs=selection_prefs,
+ target_python=target_python,
+ )
diff --git a/third_party/python/pip/pip/_internal/cli/spinners.py b/third_party/python/pip/pip/_internal/cli/spinners.py
new file mode 100644
index 0000000000..cf2b976f37
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/cli/spinners.py
@@ -0,0 +1,159 @@
+import contextlib
+import itertools
+import logging
+import sys
+import time
+from typing import IO, Generator, Optional
+
+from pip._internal.utils.compat import WINDOWS
+from pip._internal.utils.logging import get_indentation
+
+logger = logging.getLogger(__name__)
+
+
+class SpinnerInterface:
+ def spin(self) -> None:
+ raise NotImplementedError()
+
+ def finish(self, final_status: str) -> None:
+ raise NotImplementedError()
+
+
+class InteractiveSpinner(SpinnerInterface):
+ def __init__(
+ self,
+ message: str,
+ file: Optional[IO[str]] = None,
+ spin_chars: str = "-\\|/",
+ # Empirically, 8 updates/second looks nice
+ min_update_interval_seconds: float = 0.125,
+ ):
+ self._message = message
+ if file is None:
+ file = sys.stdout
+ self._file = file
+ self._rate_limiter = RateLimiter(min_update_interval_seconds)
+ self._finished = False
+
+ self._spin_cycle = itertools.cycle(spin_chars)
+
+ self._file.write(" " * get_indentation() + self._message + " ... ")
+ self._width = 0
+
+ def _write(self, status: str) -> None:
+ assert not self._finished
+ # Erase what we wrote before by backspacing to the beginning, writing
+ # spaces to overwrite the old text, and then backspacing again
+ backup = "\b" * self._width
+ self._file.write(backup + " " * self._width + backup)
+ # Now we have a blank slate to add our status
+ self._file.write(status)
+ self._width = len(status)
+ self._file.flush()
+ self._rate_limiter.reset()
+
+ def spin(self) -> None:
+ if self._finished:
+ return
+ if not self._rate_limiter.ready():
+ return
+ self._write(next(self._spin_cycle))
+
+ def finish(self, final_status: str) -> None:
+ if self._finished:
+ return
+ self._write(final_status)
+ self._file.write("\n")
+ self._file.flush()
+ self._finished = True
+
+
+# Used for dumb terminals, non-interactive installs (no tty), etc.
+# We still print updates occasionally (once every 60 seconds by default) to
+# act as a keep-alive for systems like Travis-CI that take lack-of-output as
+# an indication that a task has frozen.
+class NonInteractiveSpinner(SpinnerInterface):
+ def __init__(self, message: str, min_update_interval_seconds: float = 60.0) -> None:
+ self._message = message
+ self._finished = False
+ self._rate_limiter = RateLimiter(min_update_interval_seconds)
+ self._update("started")
+
+ def _update(self, status: str) -> None:
+ assert not self._finished
+ self._rate_limiter.reset()
+ logger.info("%s: %s", self._message, status)
+
+ def spin(self) -> None:
+ if self._finished:
+ return
+ if not self._rate_limiter.ready():
+ return
+ self._update("still running...")
+
+ def finish(self, final_status: str) -> None:
+ if self._finished:
+ return
+ self._update(f"finished with status '{final_status}'")
+ self._finished = True
+
+
+class RateLimiter:
+ def __init__(self, min_update_interval_seconds: float) -> None:
+ self._min_update_interval_seconds = min_update_interval_seconds
+ self._last_update: float = 0
+
+ def ready(self) -> bool:
+ now = time.time()
+ delta = now - self._last_update
+ return delta >= self._min_update_interval_seconds
+
+ def reset(self) -> None:
+ self._last_update = time.time()
+
+
+@contextlib.contextmanager
+def open_spinner(message: str) -> Generator[SpinnerInterface, None, None]:
+ # Interactive spinner goes directly to sys.stdout rather than being routed
+ # through the logging system, but it acts like it has level INFO,
+ # i.e. it's only displayed if we're at level INFO or better.
+ # Non-interactive spinner goes through the logging system, so it is always
+ # in sync with logging configuration.
+ if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO:
+ spinner: SpinnerInterface = InteractiveSpinner(message)
+ else:
+ spinner = NonInteractiveSpinner(message)
+ try:
+ with hidden_cursor(sys.stdout):
+ yield spinner
+ except KeyboardInterrupt:
+ spinner.finish("canceled")
+ raise
+ except Exception:
+ spinner.finish("error")
+ raise
+ else:
+ spinner.finish("done")
+
+
+HIDE_CURSOR = "\x1b[?25l"
+SHOW_CURSOR = "\x1b[?25h"
+
+
+@contextlib.contextmanager
+def hidden_cursor(file: IO[str]) -> Generator[None, None, None]:
+ # The Windows terminal does not support the hide/show cursor ANSI codes,
+ # even via colorama. So don't even try.
+ if WINDOWS:
+ yield
+ # We don't want to clutter the output with control characters if we're
+ # writing to a file, or if the user is running with --quiet.
+ # See https://github.com/pypa/pip/issues/3418
+ elif not file.isatty() or logger.getEffectiveLevel() > logging.INFO:
+ yield
+ else:
+ file.write(HIDE_CURSOR)
+ try:
+ yield
+ finally:
+ file.write(SHOW_CURSOR)
diff --git a/third_party/python/pip/pip/_internal/cli/status_codes.py b/third_party/python/pip/pip/_internal/cli/status_codes.py
new file mode 100644
index 0000000000..5e29502cdd
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/cli/status_codes.py
@@ -0,0 +1,6 @@
+SUCCESS = 0
+ERROR = 1
+UNKNOWN_ERROR = 2
+VIRTUALENV_NOT_FOUND = 3
+PREVIOUS_BUILD_DIR_ERROR = 4
+NO_MATCHES_FOUND = 23
diff --git a/third_party/python/pip/pip/_internal/commands/__init__.py b/third_party/python/pip/pip/_internal/commands/__init__.py
new file mode 100644
index 0000000000..858a410141
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/commands/__init__.py
@@ -0,0 +1,132 @@
+"""
+Package containing all pip commands
+"""
+
+import importlib
+from collections import namedtuple
+from typing import Any, Dict, Optional
+
+from pip._internal.cli.base_command import Command
+
+CommandInfo = namedtuple("CommandInfo", "module_path, class_name, summary")
+
+# This dictionary does a bunch of heavy lifting for help output:
+# - Enables avoiding additional (costly) imports for presenting `--help`.
+# - The ordering matters for help display.
+#
+# Even though the module path starts with the same "pip._internal.commands"
+# prefix, the full path makes testing easier (specifically when modifying
+# `commands_dict` in test setup / teardown).
+commands_dict: Dict[str, CommandInfo] = {
+ "install": CommandInfo(
+ "pip._internal.commands.install",
+ "InstallCommand",
+ "Install packages.",
+ ),
+ "download": CommandInfo(
+ "pip._internal.commands.download",
+ "DownloadCommand",
+ "Download packages.",
+ ),
+ "uninstall": CommandInfo(
+ "pip._internal.commands.uninstall",
+ "UninstallCommand",
+ "Uninstall packages.",
+ ),
+ "freeze": CommandInfo(
+ "pip._internal.commands.freeze",
+ "FreezeCommand",
+ "Output installed packages in requirements format.",
+ ),
+ "inspect": CommandInfo(
+ "pip._internal.commands.inspect",
+ "InspectCommand",
+ "Inspect the python environment.",
+ ),
+ "list": CommandInfo(
+ "pip._internal.commands.list",
+ "ListCommand",
+ "List installed packages.",
+ ),
+ "show": CommandInfo(
+ "pip._internal.commands.show",
+ "ShowCommand",
+ "Show information about installed packages.",
+ ),
+ "check": CommandInfo(
+ "pip._internal.commands.check",
+ "CheckCommand",
+ "Verify installed packages have compatible dependencies.",
+ ),
+ "config": CommandInfo(
+ "pip._internal.commands.configuration",
+ "ConfigurationCommand",
+ "Manage local and global configuration.",
+ ),
+ "search": CommandInfo(
+ "pip._internal.commands.search",
+ "SearchCommand",
+ "Search PyPI for packages.",
+ ),
+ "cache": CommandInfo(
+ "pip._internal.commands.cache",
+ "CacheCommand",
+ "Inspect and manage pip's wheel cache.",
+ ),
+ "index": CommandInfo(
+ "pip._internal.commands.index",
+ "IndexCommand",
+ "Inspect information available from package indexes.",
+ ),
+ "wheel": CommandInfo(
+ "pip._internal.commands.wheel",
+ "WheelCommand",
+ "Build wheels from your requirements.",
+ ),
+ "hash": CommandInfo(
+ "pip._internal.commands.hash",
+ "HashCommand",
+ "Compute hashes of package archives.",
+ ),
+ "completion": CommandInfo(
+ "pip._internal.commands.completion",
+ "CompletionCommand",
+ "A helper command used for command completion.",
+ ),
+ "debug": CommandInfo(
+ "pip._internal.commands.debug",
+ "DebugCommand",
+ "Show information useful for debugging.",
+ ),
+ "help": CommandInfo(
+ "pip._internal.commands.help",
+ "HelpCommand",
+ "Show help for commands.",
+ ),
+}
+
+
+def create_command(name: str, **kwargs: Any) -> Command:
+ """
+ Create an instance of the Command class with the given name.
+ """
+ module_path, class_name, summary = commands_dict[name]
+ module = importlib.import_module(module_path)
+ command_class = getattr(module, class_name)
+ command = command_class(name=name, summary=summary, **kwargs)
+
+ return command
+
+
+def get_similar_commands(name: str) -> Optional[str]:
+ """Command name auto-correct."""
+ from difflib import get_close_matches
+
+ name = name.lower()
+
+ close_commands = get_close_matches(name, commands_dict.keys())
+
+ if close_commands:
+ return close_commands[0]
+ else:
+ return None
diff --git a/third_party/python/pip/pip/_internal/commands/cache.py b/third_party/python/pip/pip/_internal/commands/cache.py
new file mode 100644
index 0000000000..c5f03302d6
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/commands/cache.py
@@ -0,0 +1,223 @@
+import os
+import textwrap
+from optparse import Values
+from typing import Any, List
+
+import pip._internal.utils.filesystem as filesystem
+from pip._internal.cli.base_command import Command
+from pip._internal.cli.status_codes import ERROR, SUCCESS
+from pip._internal.exceptions import CommandError, PipError
+from pip._internal.utils.logging import getLogger
+
+logger = getLogger(__name__)
+
+
+class CacheCommand(Command):
+ """
+ Inspect and manage pip's wheel cache.
+
+ Subcommands:
+
+ - dir: Show the cache directory.
+ - info: Show information about the cache.
+ - list: List filenames of packages stored in the cache.
+ - remove: Remove one or more package from the cache.
+ - purge: Remove all items from the cache.
+
+ ``<pattern>`` can be a glob expression or a package name.
+ """
+
+ ignore_require_venv = True
+ usage = """
+ %prog dir
+ %prog info
+ %prog list [<pattern>] [--format=[human, abspath]]
+ %prog remove <pattern>
+ %prog purge
+ """
+
+ def add_options(self) -> None:
+
+ self.cmd_opts.add_option(
+ "--format",
+ action="store",
+ dest="list_format",
+ default="human",
+ choices=("human", "abspath"),
+ help="Select the output format among: human (default) or abspath",
+ )
+
+ self.parser.insert_option_group(0, self.cmd_opts)
+
+ def run(self, options: Values, args: List[str]) -> int:
+ handlers = {
+ "dir": self.get_cache_dir,
+ "info": self.get_cache_info,
+ "list": self.list_cache_items,
+ "remove": self.remove_cache_items,
+ "purge": self.purge_cache,
+ }
+
+ if not options.cache_dir:
+ logger.error("pip cache commands can not function since cache is disabled.")
+ return ERROR
+
+ # Determine action
+ if not args or args[0] not in handlers:
+ logger.error(
+ "Need an action (%s) to perform.",
+ ", ".join(sorted(handlers)),
+ )
+ return ERROR
+
+ action = args[0]
+
+ # Error handling happens here, not in the action-handlers.
+ try:
+ handlers[action](options, args[1:])
+ except PipError as e:
+ logger.error(e.args[0])
+ return ERROR
+
+ return SUCCESS
+
+ def get_cache_dir(self, options: Values, args: List[Any]) -> None:
+ if args:
+ raise CommandError("Too many arguments")
+
+ logger.info(options.cache_dir)
+
+ def get_cache_info(self, options: Values, args: List[Any]) -> None:
+ if args:
+ raise CommandError("Too many arguments")
+
+ num_http_files = len(self._find_http_files(options))
+ num_packages = len(self._find_wheels(options, "*"))
+
+ http_cache_location = self._cache_dir(options, "http")
+ wheels_cache_location = self._cache_dir(options, "wheels")
+ http_cache_size = filesystem.format_directory_size(http_cache_location)
+ wheels_cache_size = filesystem.format_directory_size(wheels_cache_location)
+
+ message = (
+ textwrap.dedent(
+ """
+ Package index page cache location: {http_cache_location}
+ Package index page cache size: {http_cache_size}
+ Number of HTTP files: {num_http_files}
+ Locally built wheels location: {wheels_cache_location}
+ Locally built wheels size: {wheels_cache_size}
+ Number of locally built wheels: {package_count}
+ """
+ )
+ .format(
+ http_cache_location=http_cache_location,
+ http_cache_size=http_cache_size,
+ num_http_files=num_http_files,
+ wheels_cache_location=wheels_cache_location,
+ package_count=num_packages,
+ wheels_cache_size=wheels_cache_size,
+ )
+ .strip()
+ )
+
+ logger.info(message)
+
+ def list_cache_items(self, options: Values, args: List[Any]) -> None:
+ if len(args) > 1:
+ raise CommandError("Too many arguments")
+
+ if args:
+ pattern = args[0]
+ else:
+ pattern = "*"
+
+ files = self._find_wheels(options, pattern)
+ if options.list_format == "human":
+ self.format_for_human(files)
+ else:
+ self.format_for_abspath(files)
+
+ def format_for_human(self, files: List[str]) -> None:
+ if not files:
+ logger.info("No locally built wheels cached.")
+ return
+
+ results = []
+ for filename in files:
+ wheel = os.path.basename(filename)
+ size = filesystem.format_file_size(filename)
+ results.append(f" - {wheel} ({size})")
+ logger.info("Cache contents:\n")
+ logger.info("\n".join(sorted(results)))
+
+ def format_for_abspath(self, files: List[str]) -> None:
+ if not files:
+ return
+
+ results = []
+ for filename in files:
+ results.append(filename)
+
+ logger.info("\n".join(sorted(results)))
+
+ def remove_cache_items(self, options: Values, args: List[Any]) -> None:
+ if len(args) > 1:
+ raise CommandError("Too many arguments")
+
+ if not args:
+ raise CommandError("Please provide a pattern")
+
+ files = self._find_wheels(options, args[0])
+
+ no_matching_msg = "No matching packages"
+ if args[0] == "*":
+ # Only fetch http files if no specific pattern given
+ files += self._find_http_files(options)
+ else:
+ # Add the pattern to the log message
+ no_matching_msg += ' for pattern "{}"'.format(args[0])
+
+ if not files:
+ logger.warning(no_matching_msg)
+
+ for filename in files:
+ os.unlink(filename)
+ logger.verbose("Removed %s", filename)
+ logger.info("Files removed: %s", len(files))
+
+ def purge_cache(self, options: Values, args: List[Any]) -> None:
+ if args:
+ raise CommandError("Too many arguments")
+
+ return self.remove_cache_items(options, ["*"])
+
+ def _cache_dir(self, options: Values, subdir: str) -> str:
+ return os.path.join(options.cache_dir, subdir)
+
+ def _find_http_files(self, options: Values) -> List[str]:
+ http_dir = self._cache_dir(options, "http")
+ return filesystem.find_files(http_dir, "*")
+
+ def _find_wheels(self, options: Values, pattern: str) -> List[str]:
+ wheel_dir = self._cache_dir(options, "wheels")
+
+ # The wheel filename format, as specified in PEP 427, is:
+ # {distribution}-{version}(-{build})?-{python}-{abi}-{platform}.whl
+ #
+ # Additionally, non-alphanumeric values in the distribution are
+ # normalized to underscores (_), meaning hyphens can never occur
+ # before `-{version}`.
+ #
+ # Given that information:
+ # - If the pattern we're given contains a hyphen (-), the user is
+ # providing at least the version. Thus, we can just append `*.whl`
+ # to match the rest of it.
+ # - If the pattern we're given doesn't contain a hyphen (-), the
+ # user is only providing the name. Thus, we append `-*.whl` to
+ # match the hyphen before the version, followed by anything else.
+ #
+ # PEP 427: https://www.python.org/dev/peps/pep-0427/
+ pattern = pattern + ("*.whl" if "-" in pattern else "-*.whl")
+
+ return filesystem.find_files(wheel_dir, pattern)
diff --git a/third_party/python/pip/pip/_internal/commands/check.py b/third_party/python/pip/pip/_internal/commands/check.py
new file mode 100644
index 0000000000..3864220b2b
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/commands/check.py
@@ -0,0 +1,53 @@
+import logging
+from optparse import Values
+from typing import List
+
+from pip._internal.cli.base_command import Command
+from pip._internal.cli.status_codes import ERROR, SUCCESS
+from pip._internal.operations.check import (
+ check_package_set,
+ create_package_set_from_installed,
+)
+from pip._internal.utils.misc import write_output
+
+logger = logging.getLogger(__name__)
+
+
+class CheckCommand(Command):
+ """Verify installed packages have compatible dependencies."""
+
+ usage = """
+ %prog [options]"""
+
+ def run(self, options: Values, args: List[str]) -> int:
+
+ package_set, parsing_probs = create_package_set_from_installed()
+ missing, conflicting = check_package_set(package_set)
+
+ for project_name in missing:
+ version = package_set[project_name].version
+ for dependency in missing[project_name]:
+ write_output(
+ "%s %s requires %s, which is not installed.",
+ project_name,
+ version,
+ dependency[0],
+ )
+
+ for project_name in conflicting:
+ version = package_set[project_name].version
+ for dep_name, dep_version, req in conflicting[project_name]:
+ write_output(
+ "%s %s has requirement %s, but you have %s %s.",
+ project_name,
+ version,
+ req,
+ dep_name,
+ dep_version,
+ )
+
+ if missing or conflicting or parsing_probs:
+ return ERROR
+ else:
+ write_output("No broken requirements found.")
+ return SUCCESS
diff --git a/third_party/python/pip/pip/_internal/commands/completion.py b/third_party/python/pip/pip/_internal/commands/completion.py
new file mode 100644
index 0000000000..deaa30899e
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/commands/completion.py
@@ -0,0 +1,126 @@
+import sys
+import textwrap
+from optparse import Values
+from typing import List
+
+from pip._internal.cli.base_command import Command
+from pip._internal.cli.status_codes import SUCCESS
+from pip._internal.utils.misc import get_prog
+
+BASE_COMPLETION = """
+# pip {shell} completion start{script}# pip {shell} completion end
+"""
+
+COMPLETION_SCRIPTS = {
+ "bash": """
+ _pip_completion()
+ {{
+ COMPREPLY=( $( COMP_WORDS="${{COMP_WORDS[*]}}" \\
+ COMP_CWORD=$COMP_CWORD \\
+ PIP_AUTO_COMPLETE=1 $1 2>/dev/null ) )
+ }}
+ complete -o default -F _pip_completion {prog}
+ """,
+ "zsh": """
+ function _pip_completion {{
+ local words cword
+ read -Ac words
+ read -cn cword
+ reply=( $( COMP_WORDS="$words[*]" \\
+ COMP_CWORD=$(( cword-1 )) \\
+ PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null ))
+ }}
+ compctl -K _pip_completion {prog}
+ """,
+ "fish": """
+ function __fish_complete_pip
+ set -lx COMP_WORDS (commandline -o) ""
+ set -lx COMP_CWORD ( \\
+ math (contains -i -- (commandline -t) $COMP_WORDS)-1 \\
+ )
+ set -lx PIP_AUTO_COMPLETE 1
+ string split \\ -- (eval $COMP_WORDS[1])
+ end
+ complete -fa "(__fish_complete_pip)" -c {prog}
+ """,
+ "powershell": """
+ if ((Test-Path Function:\\TabExpansion) -and -not `
+ (Test-Path Function:\\_pip_completeBackup)) {{
+ Rename-Item Function:\\TabExpansion _pip_completeBackup
+ }}
+ function TabExpansion($line, $lastWord) {{
+ $lastBlock = [regex]::Split($line, '[|;]')[-1].TrimStart()
+ if ($lastBlock.StartsWith("{prog} ")) {{
+ $Env:COMP_WORDS=$lastBlock
+ $Env:COMP_CWORD=$lastBlock.Split().Length - 1
+ $Env:PIP_AUTO_COMPLETE=1
+ (& {prog}).Split()
+ Remove-Item Env:COMP_WORDS
+ Remove-Item Env:COMP_CWORD
+ Remove-Item Env:PIP_AUTO_COMPLETE
+ }}
+ elseif (Test-Path Function:\\_pip_completeBackup) {{
+ # Fall back on existing tab expansion
+ _pip_completeBackup $line $lastWord
+ }}
+ }}
+ """,
+}
+
+
+class CompletionCommand(Command):
+ """A helper command to be used for command completion."""
+
+ ignore_require_venv = True
+
+ def add_options(self) -> None:
+ self.cmd_opts.add_option(
+ "--bash",
+ "-b",
+ action="store_const",
+ const="bash",
+ dest="shell",
+ help="Emit completion code for bash",
+ )
+ self.cmd_opts.add_option(
+ "--zsh",
+ "-z",
+ action="store_const",
+ const="zsh",
+ dest="shell",
+ help="Emit completion code for zsh",
+ )
+ self.cmd_opts.add_option(
+ "--fish",
+ "-f",
+ action="store_const",
+ const="fish",
+ dest="shell",
+ help="Emit completion code for fish",
+ )
+ self.cmd_opts.add_option(
+ "--powershell",
+ "-p",
+ action="store_const",
+ const="powershell",
+ dest="shell",
+ help="Emit completion code for powershell",
+ )
+
+ self.parser.insert_option_group(0, self.cmd_opts)
+
+ def run(self, options: Values, args: List[str]) -> int:
+ """Prints the completion code of the given shell"""
+ shells = COMPLETION_SCRIPTS.keys()
+ shell_options = ["--" + shell for shell in sorted(shells)]
+ if options.shell in shells:
+ script = textwrap.dedent(
+ COMPLETION_SCRIPTS.get(options.shell, "").format(prog=get_prog())
+ )
+ print(BASE_COMPLETION.format(script=script, shell=options.shell))
+ return SUCCESS
+ else:
+ sys.stderr.write(
+ "ERROR: You must pass {}\n".format(" or ".join(shell_options))
+ )
+ return SUCCESS
diff --git a/third_party/python/pip/pip/_internal/commands/configuration.py b/third_party/python/pip/pip/_internal/commands/configuration.py
new file mode 100644
index 0000000000..84b134e490
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/commands/configuration.py
@@ -0,0 +1,282 @@
+import logging
+import os
+import subprocess
+from optparse import Values
+from typing import Any, List, Optional
+
+from pip._internal.cli.base_command import Command
+from pip._internal.cli.status_codes import ERROR, SUCCESS
+from pip._internal.configuration import (
+ Configuration,
+ Kind,
+ get_configuration_files,
+ kinds,
+)
+from pip._internal.exceptions import PipError
+from pip._internal.utils.logging import indent_log
+from pip._internal.utils.misc import get_prog, write_output
+
+logger = logging.getLogger(__name__)
+
+
+class ConfigurationCommand(Command):
+ """
+ Manage local and global configuration.
+
+ Subcommands:
+
+ - list: List the active configuration (or from the file specified)
+ - edit: Edit the configuration file in an editor
+ - get: Get the value associated with command.option
+ - set: Set the command.option=value
+ - unset: Unset the value associated with command.option
+ - debug: List the configuration files and values defined under them
+
+ Configuration keys should be dot separated command and option name,
+ with the special prefix "global" affecting any command. For example,
+ "pip config set global.index-url https://example.org/" would configure
+ the index url for all commands, but "pip config set download.timeout 10"
+ would configure a 10 second timeout only for "pip download" commands.
+
+ If none of --user, --global and --site are passed, a virtual
+ environment configuration file is used if one is active and the file
+ exists. Otherwise, all modifications happen to the user file by
+ default.
+ """
+
+ ignore_require_venv = True
+ usage = """
+ %prog [<file-option>] list
+ %prog [<file-option>] [--editor <editor-path>] edit
+
+ %prog [<file-option>] get command.option
+ %prog [<file-option>] set command.option value
+ %prog [<file-option>] unset command.option
+ %prog [<file-option>] debug
+ """
+
+ def add_options(self) -> None:
+ self.cmd_opts.add_option(
+ "--editor",
+ dest="editor",
+ action="store",
+ default=None,
+ help=(
+ "Editor to use to edit the file. Uses VISUAL or EDITOR "
+ "environment variables if not provided."
+ ),
+ )
+
+ self.cmd_opts.add_option(
+ "--global",
+ dest="global_file",
+ action="store_true",
+ default=False,
+ help="Use the system-wide configuration file only",
+ )
+
+ self.cmd_opts.add_option(
+ "--user",
+ dest="user_file",
+ action="store_true",
+ default=False,
+ help="Use the user configuration file only",
+ )
+
+ self.cmd_opts.add_option(
+ "--site",
+ dest="site_file",
+ action="store_true",
+ default=False,
+ help="Use the current environment configuration file only",
+ )
+
+ self.parser.insert_option_group(0, self.cmd_opts)
+
+ def run(self, options: Values, args: List[str]) -> int:
+ handlers = {
+ "list": self.list_values,
+ "edit": self.open_in_editor,
+ "get": self.get_name,
+ "set": self.set_name_value,
+ "unset": self.unset_name,
+ "debug": self.list_config_values,
+ }
+
+ # Determine action
+ if not args or args[0] not in handlers:
+ logger.error(
+ "Need an action (%s) to perform.",
+ ", ".join(sorted(handlers)),
+ )
+ return ERROR
+
+ action = args[0]
+
+ # Determine which configuration files are to be loaded
+ # Depends on whether the command is modifying.
+ try:
+ load_only = self._determine_file(
+ options, need_value=(action in ["get", "set", "unset", "edit"])
+ )
+ except PipError as e:
+ logger.error(e.args[0])
+ return ERROR
+
+ # Load a new configuration
+ self.configuration = Configuration(
+ isolated=options.isolated_mode, load_only=load_only
+ )
+ self.configuration.load()
+
+ # Error handling happens here, not in the action-handlers.
+ try:
+ handlers[action](options, args[1:])
+ except PipError as e:
+ logger.error(e.args[0])
+ return ERROR
+
+ return SUCCESS
+
+ def _determine_file(self, options: Values, need_value: bool) -> Optional[Kind]:
+ file_options = [
+ key
+ for key, value in (
+ (kinds.USER, options.user_file),
+ (kinds.GLOBAL, options.global_file),
+ (kinds.SITE, options.site_file),
+ )
+ if value
+ ]
+
+ if not file_options:
+ if not need_value:
+ return None
+ # Default to user, unless there's a site file.
+ elif any(
+ os.path.exists(site_config_file)
+ for site_config_file in get_configuration_files()[kinds.SITE]
+ ):
+ return kinds.SITE
+ else:
+ return kinds.USER
+ elif len(file_options) == 1:
+ return file_options[0]
+
+ raise PipError(
+ "Need exactly one file to operate upon "
+ "(--user, --site, --global) to perform."
+ )
+
+ def list_values(self, options: Values, args: List[str]) -> None:
+ self._get_n_args(args, "list", n=0)
+
+ for key, value in sorted(self.configuration.items()):
+ write_output("%s=%r", key, value)
+
+ def get_name(self, options: Values, args: List[str]) -> None:
+ key = self._get_n_args(args, "get [name]", n=1)
+ value = self.configuration.get_value(key)
+
+ write_output("%s", value)
+
+ def set_name_value(self, options: Values, args: List[str]) -> None:
+ key, value = self._get_n_args(args, "set [name] [value]", n=2)
+ self.configuration.set_value(key, value)
+
+ self._save_configuration()
+
+ def unset_name(self, options: Values, args: List[str]) -> None:
+ key = self._get_n_args(args, "unset [name]", n=1)
+ self.configuration.unset_value(key)
+
+ self._save_configuration()
+
+ def list_config_values(self, options: Values, args: List[str]) -> None:
+ """List config key-value pairs across different config files"""
+ self._get_n_args(args, "debug", n=0)
+
+ self.print_env_var_values()
+ # Iterate over config files and print if they exist, and the
+ # key-value pairs present in them if they do
+ for variant, files in sorted(self.configuration.iter_config_files()):
+ write_output("%s:", variant)
+ for fname in files:
+ with indent_log():
+ file_exists = os.path.exists(fname)
+ write_output("%s, exists: %r", fname, file_exists)
+ if file_exists:
+ self.print_config_file_values(variant)
+
+ def print_config_file_values(self, variant: Kind) -> None:
+ """Get key-value pairs from the file of a variant"""
+ for name, value in self.configuration.get_values_in_config(variant).items():
+ with indent_log():
+ write_output("%s: %s", name, value)
+
+ def print_env_var_values(self) -> None:
+ """Get key-values pairs present as environment variables"""
+ write_output("%s:", "env_var")
+ with indent_log():
+ for key, value in sorted(self.configuration.get_environ_vars()):
+ env_var = f"PIP_{key.upper()}"
+ write_output("%s=%r", env_var, value)
+
+ def open_in_editor(self, options: Values, args: List[str]) -> None:
+ editor = self._determine_editor(options)
+
+ fname = self.configuration.get_file_to_edit()
+ if fname is None:
+ raise PipError("Could not determine appropriate file.")
+ elif '"' in fname:
+ # This shouldn't happen, unless we see a username like that.
+ # If that happens, we'd appreciate a pull request fixing this.
+ raise PipError(
+ f'Can not open an editor for a file name containing "\n{fname}'
+ )
+
+ try:
+ subprocess.check_call(f'{editor} "{fname}"', shell=True)
+ except FileNotFoundError as e:
+ if not e.filename:
+ e.filename = editor
+ raise
+ except subprocess.CalledProcessError as e:
+ raise PipError(
+ "Editor Subprocess exited with exit code {}".format(e.returncode)
+ )
+
+ def _get_n_args(self, args: List[str], example: str, n: int) -> Any:
+ """Helper to make sure the command got the right number of arguments"""
+ if len(args) != n:
+ msg = (
+ "Got unexpected number of arguments, expected {}. "
+ '(example: "{} config {}")'
+ ).format(n, get_prog(), example)
+ raise PipError(msg)
+
+ if n == 1:
+ return args[0]
+ else:
+ return args
+
+ def _save_configuration(self) -> None:
+ # We successfully ran a modifying command. Need to save the
+ # configuration.
+ try:
+ self.configuration.save()
+ except Exception:
+ logger.exception(
+ "Unable to save configuration. Please report this as a bug."
+ )
+ raise PipError("Internal Error.")
+
+ def _determine_editor(self, options: Values) -> str:
+ if options.editor is not None:
+ return options.editor
+ elif "VISUAL" in os.environ:
+ return os.environ["VISUAL"]
+ elif "EDITOR" in os.environ:
+ return os.environ["EDITOR"]
+ else:
+ raise PipError("Could not determine editor to use.")
diff --git a/third_party/python/pip/pip/_internal/commands/debug.py b/third_party/python/pip/pip/_internal/commands/debug.py
new file mode 100644
index 0000000000..2a3e7d298f
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/commands/debug.py
@@ -0,0 +1,199 @@
+import importlib.resources
+import locale
+import logging
+import os
+import sys
+from optparse import Values
+from types import ModuleType
+from typing import Any, Dict, List, Optional
+
+import pip._vendor
+from pip._vendor.certifi import where
+from pip._vendor.packaging.version import parse as parse_version
+
+from pip._internal.cli import cmdoptions
+from pip._internal.cli.base_command import Command
+from pip._internal.cli.cmdoptions import make_target_python
+from pip._internal.cli.status_codes import SUCCESS
+from pip._internal.configuration import Configuration
+from pip._internal.metadata import get_environment
+from pip._internal.utils.logging import indent_log
+from pip._internal.utils.misc import get_pip_version
+
+logger = logging.getLogger(__name__)
+
+
+def show_value(name: str, value: Any) -> None:
+ logger.info("%s: %s", name, value)
+
+
+def show_sys_implementation() -> None:
+ logger.info("sys.implementation:")
+ implementation_name = sys.implementation.name
+ with indent_log():
+ show_value("name", implementation_name)
+
+
+def create_vendor_txt_map() -> Dict[str, str]:
+ with importlib.resources.open_text("pip._vendor", "vendor.txt") as f:
+ # Purge non version specifying lines.
+ # Also, remove any space prefix or suffixes (including comments).
+ lines = [
+ line.strip().split(" ", 1)[0] for line in f.readlines() if "==" in line
+ ]
+
+ # Transform into "module" -> version dict.
+ return dict(line.split("==", 1) for line in lines)
+
+
+def get_module_from_module_name(module_name: str) -> ModuleType:
+ # Module name can be uppercase in vendor.txt for some reason...
+ module_name = module_name.lower().replace("-", "_")
+ # PATCH: setuptools is actually only pkg_resources.
+ if module_name == "setuptools":
+ module_name = "pkg_resources"
+
+ __import__(f"pip._vendor.{module_name}", globals(), locals(), level=0)
+ return getattr(pip._vendor, module_name)
+
+
+def get_vendor_version_from_module(module_name: str) -> Optional[str]:
+ module = get_module_from_module_name(module_name)
+ version = getattr(module, "__version__", None)
+
+ if not version:
+ # Try to find version in debundled module info.
+ assert module.__file__ is not None
+ env = get_environment([os.path.dirname(module.__file__)])
+ dist = env.get_distribution(module_name)
+ if dist:
+ version = str(dist.version)
+
+ return version
+
+
+def show_actual_vendor_versions(vendor_txt_versions: Dict[str, str]) -> None:
+ """Log the actual version and print extra info if there is
+ a conflict or if the actual version could not be imported.
+ """
+ for module_name, expected_version in vendor_txt_versions.items():
+ extra_message = ""
+ actual_version = get_vendor_version_from_module(module_name)
+ if not actual_version:
+ extra_message = (
+ " (Unable to locate actual module version, using"
+ " vendor.txt specified version)"
+ )
+ actual_version = expected_version
+ elif parse_version(actual_version) != parse_version(expected_version):
+ extra_message = (
+ " (CONFLICT: vendor.txt suggests version should"
+ " be {})".format(expected_version)
+ )
+ logger.info("%s==%s%s", module_name, actual_version, extra_message)
+
+
+def show_vendor_versions() -> None:
+ logger.info("vendored library versions:")
+
+ vendor_txt_versions = create_vendor_txt_map()
+ with indent_log():
+ show_actual_vendor_versions(vendor_txt_versions)
+
+
+def show_tags(options: Values) -> None:
+ tag_limit = 10
+
+ target_python = make_target_python(options)
+ tags = target_python.get_tags()
+
+ # Display the target options that were explicitly provided.
+ formatted_target = target_python.format_given()
+ suffix = ""
+ if formatted_target:
+ suffix = f" (target: {formatted_target})"
+
+ msg = "Compatible tags: {}{}".format(len(tags), suffix)
+ logger.info(msg)
+
+ if options.verbose < 1 and len(tags) > tag_limit:
+ tags_limited = True
+ tags = tags[:tag_limit]
+ else:
+ tags_limited = False
+
+ with indent_log():
+ for tag in tags:
+ logger.info(str(tag))
+
+ if tags_limited:
+ msg = (
+ "...\n[First {tag_limit} tags shown. Pass --verbose to show all.]"
+ ).format(tag_limit=tag_limit)
+ logger.info(msg)
+
+
+def ca_bundle_info(config: Configuration) -> str:
+ levels = set()
+ for key, _ in config.items():
+ levels.add(key.split(".")[0])
+
+ if not levels:
+ return "Not specified"
+
+ levels_that_override_global = ["install", "wheel", "download"]
+ global_overriding_level = [
+ level for level in levels if level in levels_that_override_global
+ ]
+ if not global_overriding_level:
+ return "global"
+
+ if "global" in levels:
+ levels.remove("global")
+ return ", ".join(levels)
+
+
+class DebugCommand(Command):
+ """
+ Display debug information.
+ """
+
+ usage = """
+ %prog <options>"""
+ ignore_require_venv = True
+
+ def add_options(self) -> None:
+ cmdoptions.add_target_python_options(self.cmd_opts)
+ self.parser.insert_option_group(0, self.cmd_opts)
+ self.parser.config.load()
+
+ def run(self, options: Values, args: List[str]) -> int:
+ logger.warning(
+ "This command is only meant for debugging. "
+ "Do not use this with automation for parsing and getting these "
+ "details, since the output and options of this command may "
+ "change without notice."
+ )
+ show_value("pip version", get_pip_version())
+ show_value("sys.version", sys.version)
+ show_value("sys.executable", sys.executable)
+ show_value("sys.getdefaultencoding", sys.getdefaultencoding())
+ show_value("sys.getfilesystemencoding", sys.getfilesystemencoding())
+ show_value(
+ "locale.getpreferredencoding",
+ locale.getpreferredencoding(),
+ )
+ show_value("sys.platform", sys.platform)
+ show_sys_implementation()
+
+ show_value("'cert' config value", ca_bundle_info(self.parser.config))
+ show_value("REQUESTS_CA_BUNDLE", os.environ.get("REQUESTS_CA_BUNDLE"))
+ show_value("CURL_CA_BUNDLE", os.environ.get("CURL_CA_BUNDLE"))
+ show_value("pip._vendor.certifi.where()", where())
+ show_value("pip._vendor.DEBUNDLED", pip._vendor.DEBUNDLED)
+
+ show_vendor_versions()
+
+ show_tags(options)
+
+ return SUCCESS
diff --git a/third_party/python/pip/pip/_internal/commands/download.py b/third_party/python/pip/pip/_internal/commands/download.py
new file mode 100644
index 0000000000..4132e08988
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/commands/download.py
@@ -0,0 +1,149 @@
+import logging
+import os
+from optparse import Values
+from typing import List
+
+from pip._internal.cli import cmdoptions
+from pip._internal.cli.cmdoptions import make_target_python
+from pip._internal.cli.req_command import RequirementCommand, with_cleanup
+from pip._internal.cli.status_codes import SUCCESS
+from pip._internal.operations.build.build_tracker import get_build_tracker
+from pip._internal.req.req_install import (
+ LegacySetupPyOptionsCheckMode,
+ check_legacy_setup_py_options,
+)
+from pip._internal.utils.misc import ensure_dir, normalize_path, write_output
+from pip._internal.utils.temp_dir import TempDirectory
+
+logger = logging.getLogger(__name__)
+
+
+class DownloadCommand(RequirementCommand):
+ """
+ Download packages from:
+
+ - PyPI (and other indexes) using requirement specifiers.
+ - VCS project urls.
+ - Local project directories.
+ - Local or remote source archives.
+
+ pip also supports downloading from "requirements files", which provide
+ an easy way to specify a whole environment to be downloaded.
+ """
+
+ usage = """
+ %prog [options] <requirement specifier> [package-index-options] ...
+ %prog [options] -r <requirements file> [package-index-options] ...
+ %prog [options] <vcs project url> ...
+ %prog [options] <local project path> ...
+ %prog [options] <archive url/path> ..."""
+
+ def add_options(self) -> None:
+ self.cmd_opts.add_option(cmdoptions.constraints())
+ self.cmd_opts.add_option(cmdoptions.requirements())
+ self.cmd_opts.add_option(cmdoptions.no_deps())
+ self.cmd_opts.add_option(cmdoptions.global_options())
+ self.cmd_opts.add_option(cmdoptions.no_binary())
+ self.cmd_opts.add_option(cmdoptions.only_binary())
+ self.cmd_opts.add_option(cmdoptions.prefer_binary())
+ self.cmd_opts.add_option(cmdoptions.src())
+ self.cmd_opts.add_option(cmdoptions.pre())
+ self.cmd_opts.add_option(cmdoptions.require_hashes())
+ self.cmd_opts.add_option(cmdoptions.progress_bar())
+ self.cmd_opts.add_option(cmdoptions.no_build_isolation())
+ self.cmd_opts.add_option(cmdoptions.use_pep517())
+ self.cmd_opts.add_option(cmdoptions.no_use_pep517())
+ self.cmd_opts.add_option(cmdoptions.check_build_deps())
+ self.cmd_opts.add_option(cmdoptions.ignore_requires_python())
+
+ self.cmd_opts.add_option(
+ "-d",
+ "--dest",
+ "--destination-dir",
+ "--destination-directory",
+ dest="download_dir",
+ metavar="dir",
+ default=os.curdir,
+ help="Download packages into <dir>.",
+ )
+
+ cmdoptions.add_target_python_options(self.cmd_opts)
+
+ index_opts = cmdoptions.make_option_group(
+ cmdoptions.index_group,
+ self.parser,
+ )
+
+ self.parser.insert_option_group(0, index_opts)
+ self.parser.insert_option_group(0, self.cmd_opts)
+
+ @with_cleanup
+ def run(self, options: Values, args: List[str]) -> int:
+
+ options.ignore_installed = True
+ # editable doesn't really make sense for `pip download`, but the bowels
+ # of the RequirementSet code require that property.
+ options.editables = []
+
+ cmdoptions.check_dist_restriction(options)
+
+ options.download_dir = normalize_path(options.download_dir)
+ ensure_dir(options.download_dir)
+
+ session = self.get_default_session(options)
+
+ target_python = make_target_python(options)
+ finder = self._build_package_finder(
+ options=options,
+ session=session,
+ target_python=target_python,
+ ignore_requires_python=options.ignore_requires_python,
+ )
+
+ build_tracker = self.enter_context(get_build_tracker())
+
+ directory = TempDirectory(
+ delete=not options.no_clean,
+ kind="download",
+ globally_managed=True,
+ )
+
+ reqs = self.get_requirements(args, options, finder, session)
+ check_legacy_setup_py_options(
+ options, reqs, LegacySetupPyOptionsCheckMode.DOWNLOAD
+ )
+
+ preparer = self.make_requirement_preparer(
+ temp_build_dir=directory,
+ options=options,
+ build_tracker=build_tracker,
+ session=session,
+ finder=finder,
+ download_dir=options.download_dir,
+ use_user_site=False,
+ verbosity=self.verbosity,
+ )
+
+ resolver = self.make_resolver(
+ preparer=preparer,
+ finder=finder,
+ options=options,
+ ignore_requires_python=options.ignore_requires_python,
+ use_pep517=options.use_pep517,
+ py_version_info=options.python_version,
+ )
+
+ self.trace_basic_info(finder)
+
+ requirement_set = resolver.resolve(reqs, check_supported_wheels=True)
+
+ downloaded: List[str] = []
+ for req in requirement_set.requirements.values():
+ if req.satisfied_by is None:
+ assert req.name is not None
+ preparer.save_linked_requirement(req)
+ downloaded.append(req.name)
+ if downloaded:
+ write_output("Successfully downloaded %s", " ".join(downloaded))
+
+ return SUCCESS
diff --git a/third_party/python/pip/pip/_internal/commands/freeze.py b/third_party/python/pip/pip/_internal/commands/freeze.py
new file mode 100644
index 0000000000..5fa6d39b2c
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/commands/freeze.py
@@ -0,0 +1,97 @@
+import sys
+from optparse import Values
+from typing import List
+
+from pip._internal.cli import cmdoptions
+from pip._internal.cli.base_command import Command
+from pip._internal.cli.status_codes import SUCCESS
+from pip._internal.operations.freeze import freeze
+from pip._internal.utils.compat import stdlib_pkgs
+
+DEV_PKGS = {"pip", "setuptools", "distribute", "wheel"}
+
+
+class FreezeCommand(Command):
+ """
+ Output installed packages in requirements format.
+
+ packages are listed in a case-insensitive sorted order.
+ """
+
+ usage = """
+ %prog [options]"""
+ log_streams = ("ext://sys.stderr", "ext://sys.stderr")
+
+ def add_options(self) -> None:
+ self.cmd_opts.add_option(
+ "-r",
+ "--requirement",
+ dest="requirements",
+ action="append",
+ default=[],
+ metavar="file",
+ help=(
+ "Use the order in the given requirements file and its "
+ "comments when generating output. This option can be "
+ "used multiple times."
+ ),
+ )
+ self.cmd_opts.add_option(
+ "-l",
+ "--local",
+ dest="local",
+ action="store_true",
+ default=False,
+ help=(
+ "If in a virtualenv that has global access, do not output "
+ "globally-installed packages."
+ ),
+ )
+ self.cmd_opts.add_option(
+ "--user",
+ dest="user",
+ action="store_true",
+ default=False,
+ help="Only output packages installed in user-site.",
+ )
+ self.cmd_opts.add_option(cmdoptions.list_path())
+ self.cmd_opts.add_option(
+ "--all",
+ dest="freeze_all",
+ action="store_true",
+ help=(
+ "Do not skip these packages in the output:"
+ " {}".format(", ".join(DEV_PKGS))
+ ),
+ )
+ self.cmd_opts.add_option(
+ "--exclude-editable",
+ dest="exclude_editable",
+ action="store_true",
+ help="Exclude editable package from output.",
+ )
+ self.cmd_opts.add_option(cmdoptions.list_exclude())
+
+ self.parser.insert_option_group(0, self.cmd_opts)
+
+ def run(self, options: Values, args: List[str]) -> int:
+ skip = set(stdlib_pkgs)
+ if not options.freeze_all:
+ skip.update(DEV_PKGS)
+
+ if options.excludes:
+ skip.update(options.excludes)
+
+ cmdoptions.check_list_path_option(options)
+
+ for line in freeze(
+ requirement=options.requirements,
+ local_only=options.local,
+ user_only=options.user,
+ paths=options.path,
+ isolated=options.isolated_mode,
+ skip=skip,
+ exclude_editable=options.exclude_editable,
+ ):
+ sys.stdout.write(line + "\n")
+ return SUCCESS
diff --git a/third_party/python/pip/pip/_internal/commands/hash.py b/third_party/python/pip/pip/_internal/commands/hash.py
new file mode 100644
index 0000000000..042dac813e
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/commands/hash.py
@@ -0,0 +1,59 @@
+import hashlib
+import logging
+import sys
+from optparse import Values
+from typing import List
+
+from pip._internal.cli.base_command import Command
+from pip._internal.cli.status_codes import ERROR, SUCCESS
+from pip._internal.utils.hashes import FAVORITE_HASH, STRONG_HASHES
+from pip._internal.utils.misc import read_chunks, write_output
+
+logger = logging.getLogger(__name__)
+
+
+class HashCommand(Command):
+ """
+ Compute a hash of a local package archive.
+
+ These can be used with --hash in a requirements file to do repeatable
+ installs.
+ """
+
+ usage = "%prog [options] <file> ..."
+ ignore_require_venv = True
+
+ def add_options(self) -> None:
+ self.cmd_opts.add_option(
+ "-a",
+ "--algorithm",
+ dest="algorithm",
+ choices=STRONG_HASHES,
+ action="store",
+ default=FAVORITE_HASH,
+ help="The hash algorithm to use: one of {}".format(
+ ", ".join(STRONG_HASHES)
+ ),
+ )
+ self.parser.insert_option_group(0, self.cmd_opts)
+
+ def run(self, options: Values, args: List[str]) -> int:
+ if not args:
+ self.parser.print_usage(sys.stderr)
+ return ERROR
+
+ algorithm = options.algorithm
+ for path in args:
+ write_output(
+ "%s:\n--hash=%s:%s", path, algorithm, _hash_of_file(path, algorithm)
+ )
+ return SUCCESS
+
+
+def _hash_of_file(path: str, algorithm: str) -> str:
+ """Return the hash digest of a file."""
+ with open(path, "rb") as archive:
+ hash = hashlib.new(algorithm)
+ for chunk in read_chunks(archive):
+ hash.update(chunk)
+ return hash.hexdigest()
diff --git a/third_party/python/pip/pip/_internal/commands/help.py b/third_party/python/pip/pip/_internal/commands/help.py
new file mode 100644
index 0000000000..62066318b7
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/commands/help.py
@@ -0,0 +1,41 @@
+from optparse import Values
+from typing import List
+
+from pip._internal.cli.base_command import Command
+from pip._internal.cli.status_codes import SUCCESS
+from pip._internal.exceptions import CommandError
+
+
+class HelpCommand(Command):
+ """Show help for commands"""
+
+ usage = """
+ %prog <command>"""
+ ignore_require_venv = True
+
+ def run(self, options: Values, args: List[str]) -> int:
+ from pip._internal.commands import (
+ commands_dict,
+ create_command,
+ get_similar_commands,
+ )
+
+ try:
+ # 'pip help' with no args is handled by pip.__init__.parseopt()
+ cmd_name = args[0] # the command we need help for
+ except IndexError:
+ return SUCCESS
+
+ if cmd_name not in commands_dict:
+ guess = get_similar_commands(cmd_name)
+
+ msg = [f'unknown command "{cmd_name}"']
+ if guess:
+ msg.append(f'maybe you meant "{guess}"')
+
+ raise CommandError(" - ".join(msg))
+
+ command = create_command(cmd_name)
+ command.parser.print_help()
+
+ return SUCCESS
diff --git a/third_party/python/pip/pip/_internal/commands/index.py b/third_party/python/pip/pip/_internal/commands/index.py
new file mode 100644
index 0000000000..7267effed2
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/commands/index.py
@@ -0,0 +1,139 @@
+import logging
+from optparse import Values
+from typing import Any, Iterable, List, Optional, Union
+
+from pip._vendor.packaging.version import LegacyVersion, Version
+
+from pip._internal.cli import cmdoptions
+from pip._internal.cli.req_command import IndexGroupCommand
+from pip._internal.cli.status_codes import ERROR, SUCCESS
+from pip._internal.commands.search import print_dist_installation_info
+from pip._internal.exceptions import CommandError, DistributionNotFound, PipError
+from pip._internal.index.collector import LinkCollector
+from pip._internal.index.package_finder import PackageFinder
+from pip._internal.models.selection_prefs import SelectionPreferences
+from pip._internal.models.target_python import TargetPython
+from pip._internal.network.session import PipSession
+from pip._internal.utils.misc import write_output
+
+logger = logging.getLogger(__name__)
+
+
+class IndexCommand(IndexGroupCommand):
+ """
+ Inspect information available from package indexes.
+ """
+
+ ignore_require_venv = True
+ usage = """
+ %prog versions <package>
+ """
+
+ def add_options(self) -> None:
+ cmdoptions.add_target_python_options(self.cmd_opts)
+
+ self.cmd_opts.add_option(cmdoptions.ignore_requires_python())
+ self.cmd_opts.add_option(cmdoptions.pre())
+ self.cmd_opts.add_option(cmdoptions.no_binary())
+ self.cmd_opts.add_option(cmdoptions.only_binary())
+
+ index_opts = cmdoptions.make_option_group(
+ cmdoptions.index_group,
+ self.parser,
+ )
+
+ self.parser.insert_option_group(0, index_opts)
+ self.parser.insert_option_group(0, self.cmd_opts)
+
+ def run(self, options: Values, args: List[str]) -> int:
+ handlers = {
+ "versions": self.get_available_package_versions,
+ }
+
+ logger.warning(
+ "pip index is currently an experimental command. "
+ "It may be removed/changed in a future release "
+ "without prior warning."
+ )
+
+ # Determine action
+ if not args or args[0] not in handlers:
+ logger.error(
+ "Need an action (%s) to perform.",
+ ", ".join(sorted(handlers)),
+ )
+ return ERROR
+
+ action = args[0]
+
+ # Error handling happens here, not in the action-handlers.
+ try:
+ handlers[action](options, args[1:])
+ except PipError as e:
+ logger.error(e.args[0])
+ return ERROR
+
+ return SUCCESS
+
+ def _build_package_finder(
+ self,
+ options: Values,
+ session: PipSession,
+ target_python: Optional[TargetPython] = None,
+ ignore_requires_python: Optional[bool] = None,
+ ) -> PackageFinder:
+ """
+ Create a package finder appropriate to the index command.
+ """
+ link_collector = LinkCollector.create(session, options=options)
+
+ # Pass allow_yanked=False to ignore yanked versions.
+ selection_prefs = SelectionPreferences(
+ allow_yanked=False,
+ allow_all_prereleases=options.pre,
+ ignore_requires_python=ignore_requires_python,
+ )
+
+ return PackageFinder.create(
+ link_collector=link_collector,
+ selection_prefs=selection_prefs,
+ target_python=target_python,
+ )
+
+ def get_available_package_versions(self, options: Values, args: List[Any]) -> None:
+ if len(args) != 1:
+ raise CommandError("You need to specify exactly one argument")
+
+ target_python = cmdoptions.make_target_python(options)
+ query = args[0]
+
+ with self._build_session(options) as session:
+ finder = self._build_package_finder(
+ options=options,
+ session=session,
+ target_python=target_python,
+ ignore_requires_python=options.ignore_requires_python,
+ )
+
+ versions: Iterable[Union[LegacyVersion, Version]] = (
+ candidate.version for candidate in finder.find_all_candidates(query)
+ )
+
+ if not options.pre:
+ # Remove prereleases
+ versions = (
+ version for version in versions if not version.is_prerelease
+ )
+ versions = set(versions)
+
+ if not versions:
+ raise DistributionNotFound(
+ "No matching distribution found for {}".format(query)
+ )
+
+ formatted_versions = [str(ver) for ver in sorted(versions, reverse=True)]
+ latest = formatted_versions[0]
+
+ write_output("{} ({})".format(query, latest))
+ write_output("Available versions: {}".format(", ".join(formatted_versions)))
+ print_dist_installation_info(query, latest)
diff --git a/third_party/python/pip/pip/_internal/commands/inspect.py b/third_party/python/pip/pip/_internal/commands/inspect.py
new file mode 100644
index 0000000000..27c8fa3d5b
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/commands/inspect.py
@@ -0,0 +1,92 @@
+import logging
+from optparse import Values
+from typing import Any, Dict, List
+
+from pip._vendor.packaging.markers import default_environment
+from pip._vendor.rich import print_json
+
+from pip import __version__
+from pip._internal.cli import cmdoptions
+from pip._internal.cli.req_command import Command
+from pip._internal.cli.status_codes import SUCCESS
+from pip._internal.metadata import BaseDistribution, get_environment
+from pip._internal.utils.compat import stdlib_pkgs
+from pip._internal.utils.urls import path_to_url
+
+logger = logging.getLogger(__name__)
+
+
+class InspectCommand(Command):
+ """
+ Inspect the content of a Python environment and produce a report in JSON format.
+ """
+
+ ignore_require_venv = True
+ usage = """
+ %prog [options]"""
+
+ def add_options(self) -> None:
+ self.cmd_opts.add_option(
+ "--local",
+ action="store_true",
+ default=False,
+ help=(
+ "If in a virtualenv that has global access, do not list "
+ "globally-installed packages."
+ ),
+ )
+ self.cmd_opts.add_option(
+ "--user",
+ dest="user",
+ action="store_true",
+ default=False,
+ help="Only output packages installed in user-site.",
+ )
+ self.cmd_opts.add_option(cmdoptions.list_path())
+ self.parser.insert_option_group(0, self.cmd_opts)
+
+ def run(self, options: Values, args: List[str]) -> int:
+ cmdoptions.check_list_path_option(options)
+ dists = get_environment(options.path).iter_installed_distributions(
+ local_only=options.local,
+ user_only=options.user,
+ skip=set(stdlib_pkgs),
+ )
+ output = {
+ "version": "1",
+ "pip_version": __version__,
+ "installed": [self._dist_to_dict(dist) for dist in dists],
+ "environment": default_environment(),
+ # TODO tags? scheme?
+ }
+ print_json(data=output)
+ return SUCCESS
+
+ def _dist_to_dict(self, dist: BaseDistribution) -> Dict[str, Any]:
+ res: Dict[str, Any] = {
+ "metadata": dist.metadata_dict,
+ "metadata_location": dist.info_location,
+ }
+ # direct_url. Note that we don't have download_info (as in the installation
+ # report) since it is not recorded in installed metadata.
+ direct_url = dist.direct_url
+ if direct_url is not None:
+ res["direct_url"] = direct_url.to_dict()
+ else:
+ # Emulate direct_url for legacy editable installs.
+ editable_project_location = dist.editable_project_location
+ if editable_project_location is not None:
+ res["direct_url"] = {
+ "url": path_to_url(editable_project_location),
+ "dir_info": {
+ "editable": True,
+ },
+ }
+ # installer
+ installer = dist.installer
+ if dist.installer:
+ res["installer"] = installer
+ # requested
+ if dist.installed_with_dist_info:
+ res["requested"] = dist.requested
+ return res
diff --git a/third_party/python/pip/pip/_internal/commands/install.py b/third_party/python/pip/pip/_internal/commands/install.py
new file mode 100644
index 0000000000..b20aeddf83
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/commands/install.py
@@ -0,0 +1,873 @@
+import errno
+import json
+import operator
+import os
+import shutil
+import site
+from optparse import SUPPRESS_HELP, Values
+from typing import Iterable, List, Optional
+
+from pip._vendor.packaging.utils import canonicalize_name
+from pip._vendor.rich import print_json
+
+from pip._internal.cache import WheelCache
+from pip._internal.cli import cmdoptions
+from pip._internal.cli.cmdoptions import make_target_python
+from pip._internal.cli.req_command import (
+ RequirementCommand,
+ warn_if_run_as_root,
+ with_cleanup,
+)
+from pip._internal.cli.status_codes import ERROR, SUCCESS
+from pip._internal.exceptions import CommandError, InstallationError
+from pip._internal.locations import get_scheme
+from pip._internal.metadata import get_environment
+from pip._internal.models.format_control import FormatControl
+from pip._internal.models.installation_report import InstallationReport
+from pip._internal.operations.build.build_tracker import get_build_tracker
+from pip._internal.operations.check import ConflictDetails, check_install_conflicts
+from pip._internal.req import install_given_reqs
+from pip._internal.req.req_install import (
+ InstallRequirement,
+ LegacySetupPyOptionsCheckMode,
+ check_legacy_setup_py_options,
+)
+from pip._internal.utils.compat import WINDOWS
+from pip._internal.utils.deprecation import (
+ LegacyInstallReasonFailedBdistWheel,
+ deprecated,
+)
+from pip._internal.utils.distutils_args import parse_distutils_args
+from pip._internal.utils.filesystem import test_writable_dir
+from pip._internal.utils.logging import getLogger
+from pip._internal.utils.misc import (
+ check_externally_managed,
+ ensure_dir,
+ get_pip_version,
+ protect_pip_from_modification_on_windows,
+ write_output,
+)
+from pip._internal.utils.temp_dir import TempDirectory
+from pip._internal.utils.virtualenv import (
+ running_under_virtualenv,
+ virtualenv_no_global,
+)
+from pip._internal.wheel_builder import (
+ BdistWheelAllowedPredicate,
+ build,
+ should_build_for_install_command,
+)
+
+logger = getLogger(__name__)
+
+
+def get_check_bdist_wheel_allowed(
+ format_control: FormatControl,
+) -> BdistWheelAllowedPredicate:
+ def check_binary_allowed(req: InstallRequirement) -> bool:
+ canonical_name = canonicalize_name(req.name or "")
+ allowed_formats = format_control.get_allowed_formats(canonical_name)
+ return "binary" in allowed_formats
+
+ return check_binary_allowed
+
+
+class InstallCommand(RequirementCommand):
+ """
+ Install packages from:
+
+ - PyPI (and other indexes) using requirement specifiers.
+ - VCS project urls.
+ - Local project directories.
+ - Local or remote source archives.
+
+ pip also supports installing from "requirements files", which provide
+ an easy way to specify a whole environment to be installed.
+ """
+
+ usage = """
+ %prog [options] <requirement specifier> [package-index-options] ...
+ %prog [options] -r <requirements file> [package-index-options] ...
+ %prog [options] [-e] <vcs project url> ...
+ %prog [options] [-e] <local project path> ...
+ %prog [options] <archive url/path> ..."""
+
+ def add_options(self) -> None:
+ self.cmd_opts.add_option(cmdoptions.requirements())
+ self.cmd_opts.add_option(cmdoptions.constraints())
+ self.cmd_opts.add_option(cmdoptions.no_deps())
+ self.cmd_opts.add_option(cmdoptions.pre())
+
+ self.cmd_opts.add_option(cmdoptions.editable())
+ self.cmd_opts.add_option(
+ "--dry-run",
+ action="store_true",
+ dest="dry_run",
+ default=False,
+ help=(
+ "Don't actually install anything, just print what would be. "
+ "Can be used in combination with --ignore-installed "
+ "to 'resolve' the requirements."
+ ),
+ )
+ self.cmd_opts.add_option(
+ "-t",
+ "--target",
+ dest="target_dir",
+ metavar="dir",
+ default=None,
+ help=(
+ "Install packages into <dir>. "
+ "By default this will not replace existing files/folders in "
+ "<dir>. Use --upgrade to replace existing packages in <dir> "
+ "with new versions."
+ ),
+ )
+ cmdoptions.add_target_python_options(self.cmd_opts)
+
+ self.cmd_opts.add_option(
+ "--user",
+ dest="use_user_site",
+ action="store_true",
+ help=(
+ "Install to the Python user install directory for your "
+ "platform. Typically ~/.local/, or %APPDATA%\\Python on "
+ "Windows. (See the Python documentation for site.USER_BASE "
+ "for full details.)"
+ ),
+ )
+ self.cmd_opts.add_option(
+ "--no-user",
+ dest="use_user_site",
+ action="store_false",
+ help=SUPPRESS_HELP,
+ )
+ self.cmd_opts.add_option(
+ "--root",
+ dest="root_path",
+ metavar="dir",
+ default=None,
+ help="Install everything relative to this alternate root directory.",
+ )
+ self.cmd_opts.add_option(
+ "--prefix",
+ dest="prefix_path",
+ metavar="dir",
+ default=None,
+ help=(
+ "Installation prefix where lib, bin and other top-level "
+ "folders are placed"
+ ),
+ )
+
+ self.cmd_opts.add_option(cmdoptions.src())
+
+ self.cmd_opts.add_option(
+ "-U",
+ "--upgrade",
+ dest="upgrade",
+ action="store_true",
+ help=(
+ "Upgrade all specified packages to the newest available "
+ "version. The handling of dependencies depends on the "
+ "upgrade-strategy used."
+ ),
+ )
+
+ self.cmd_opts.add_option(
+ "--upgrade-strategy",
+ dest="upgrade_strategy",
+ default="only-if-needed",
+ choices=["only-if-needed", "eager"],
+ help=(
+ "Determines how dependency upgrading should be handled "
+ "[default: %default]. "
+ '"eager" - dependencies are upgraded regardless of '
+ "whether the currently installed version satisfies the "
+ "requirements of the upgraded package(s). "
+ '"only-if-needed" - are upgraded only when they do not '
+ "satisfy the requirements of the upgraded package(s)."
+ ),
+ )
+
+ self.cmd_opts.add_option(
+ "--force-reinstall",
+ dest="force_reinstall",
+ action="store_true",
+ help="Reinstall all packages even if they are already up-to-date.",
+ )
+
+ self.cmd_opts.add_option(
+ "-I",
+ "--ignore-installed",
+ dest="ignore_installed",
+ action="store_true",
+ help=(
+ "Ignore the installed packages, overwriting them. "
+ "This can break your system if the existing package "
+ "is of a different version or was installed "
+ "with a different package manager!"
+ ),
+ )
+
+ self.cmd_opts.add_option(cmdoptions.ignore_requires_python())
+ self.cmd_opts.add_option(cmdoptions.no_build_isolation())
+ self.cmd_opts.add_option(cmdoptions.use_pep517())
+ self.cmd_opts.add_option(cmdoptions.no_use_pep517())
+ self.cmd_opts.add_option(cmdoptions.check_build_deps())
+ self.cmd_opts.add_option(cmdoptions.override_externally_managed())
+
+ self.cmd_opts.add_option(cmdoptions.config_settings())
+ self.cmd_opts.add_option(cmdoptions.install_options())
+ self.cmd_opts.add_option(cmdoptions.global_options())
+
+ self.cmd_opts.add_option(
+ "--compile",
+ action="store_true",
+ dest="compile",
+ default=True,
+ help="Compile Python source files to bytecode",
+ )
+
+ self.cmd_opts.add_option(
+ "--no-compile",
+ action="store_false",
+ dest="compile",
+ help="Do not compile Python source files to bytecode",
+ )
+
+ self.cmd_opts.add_option(
+ "--no-warn-script-location",
+ action="store_false",
+ dest="warn_script_location",
+ default=True,
+ help="Do not warn when installing scripts outside PATH",
+ )
+ self.cmd_opts.add_option(
+ "--no-warn-conflicts",
+ action="store_false",
+ dest="warn_about_conflicts",
+ default=True,
+ help="Do not warn about broken dependencies",
+ )
+ self.cmd_opts.add_option(cmdoptions.no_binary())
+ self.cmd_opts.add_option(cmdoptions.only_binary())
+ self.cmd_opts.add_option(cmdoptions.prefer_binary())
+ self.cmd_opts.add_option(cmdoptions.require_hashes())
+ self.cmd_opts.add_option(cmdoptions.progress_bar())
+ self.cmd_opts.add_option(cmdoptions.root_user_action())
+
+ index_opts = cmdoptions.make_option_group(
+ cmdoptions.index_group,
+ self.parser,
+ )
+
+ self.parser.insert_option_group(0, index_opts)
+ self.parser.insert_option_group(0, self.cmd_opts)
+
+ self.cmd_opts.add_option(
+ "--report",
+ dest="json_report_file",
+ metavar="file",
+ default=None,
+ help=(
+ "Generate a JSON file describing what pip did to install "
+ "the provided requirements. "
+ "Can be used in combination with --dry-run and --ignore-installed "
+ "to 'resolve' the requirements. "
+ "When - is used as file name it writes to stdout. "
+ "When writing to stdout, please combine with the --quiet option "
+ "to avoid mixing pip logging output with JSON output."
+ ),
+ )
+
+ @with_cleanup
+ def run(self, options: Values, args: List[str]) -> int:
+ if options.use_user_site and options.target_dir is not None:
+ raise CommandError("Can not combine '--user' and '--target'")
+
+ # Check whether the environment we're installing into is externally
+ # managed, as specified in PEP 668. Specifying --root, --target, or
+ # --prefix disables the check, since there's no reliable way to locate
+ # the EXTERNALLY-MANAGED file for those cases. An exception is also
+ # made specifically for "--dry-run --report" for convenience.
+ installing_into_current_environment = (
+ not (options.dry_run and options.json_report_file)
+ and options.root_path is None
+ and options.target_dir is None
+ and options.prefix_path is None
+ )
+ if (
+ installing_into_current_environment
+ and not options.override_externally_managed
+ ):
+ check_externally_managed()
+
+ upgrade_strategy = "to-satisfy-only"
+ if options.upgrade:
+ upgrade_strategy = options.upgrade_strategy
+
+ cmdoptions.check_dist_restriction(options, check_target=True)
+
+ install_options = options.install_options or []
+
+ logger.verbose("Using %s", get_pip_version())
+ options.use_user_site = decide_user_install(
+ options.use_user_site,
+ prefix_path=options.prefix_path,
+ target_dir=options.target_dir,
+ root_path=options.root_path,
+ isolated_mode=options.isolated_mode,
+ )
+
+ target_temp_dir: Optional[TempDirectory] = None
+ target_temp_dir_path: Optional[str] = None
+ if options.target_dir:
+ options.ignore_installed = True
+ options.target_dir = os.path.abspath(options.target_dir)
+ if (
+ # fmt: off
+ os.path.exists(options.target_dir) and
+ not os.path.isdir(options.target_dir)
+ # fmt: on
+ ):
+ raise CommandError(
+ "Target path exists but is not a directory, will not continue."
+ )
+
+ # Create a target directory for using with the target option
+ target_temp_dir = TempDirectory(kind="target")
+ target_temp_dir_path = target_temp_dir.path
+ self.enter_context(target_temp_dir)
+
+ global_options = options.global_options or []
+
+ session = self.get_default_session(options)
+
+ target_python = make_target_python(options)
+ finder = self._build_package_finder(
+ options=options,
+ session=session,
+ target_python=target_python,
+ ignore_requires_python=options.ignore_requires_python,
+ )
+ build_tracker = self.enter_context(get_build_tracker())
+
+ directory = TempDirectory(
+ delete=not options.no_clean,
+ kind="install",
+ globally_managed=True,
+ )
+
+ try:
+ reqs = self.get_requirements(args, options, finder, session)
+ check_legacy_setup_py_options(
+ options, reqs, LegacySetupPyOptionsCheckMode.INSTALL
+ )
+
+ if "no-binary-enable-wheel-cache" in options.features_enabled:
+ # TODO: remove format_control from WheelCache when the deprecation cycle
+ # is over
+ wheel_cache = WheelCache(options.cache_dir)
+ else:
+ if options.format_control.no_binary:
+ deprecated(
+ reason=(
+ "--no-binary currently disables reading from "
+ "the cache of locally built wheels. In the future "
+ "--no-binary will not influence the wheel cache."
+ ),
+ replacement="to use the --no-cache-dir option",
+ feature_flag="no-binary-enable-wheel-cache",
+ issue=11453,
+ gone_in="23.1",
+ )
+ wheel_cache = WheelCache(options.cache_dir, options.format_control)
+
+ # Only when installing is it permitted to use PEP 660.
+ # In other circumstances (pip wheel, pip download) we generate
+ # regular (i.e. non editable) metadata and wheels.
+ for req in reqs:
+ req.permit_editable_wheels = True
+
+ reject_location_related_install_options(reqs, options.install_options)
+
+ preparer = self.make_requirement_preparer(
+ temp_build_dir=directory,
+ options=options,
+ build_tracker=build_tracker,
+ session=session,
+ finder=finder,
+ use_user_site=options.use_user_site,
+ verbosity=self.verbosity,
+ )
+ resolver = self.make_resolver(
+ preparer=preparer,
+ finder=finder,
+ options=options,
+ wheel_cache=wheel_cache,
+ use_user_site=options.use_user_site,
+ ignore_installed=options.ignore_installed,
+ ignore_requires_python=options.ignore_requires_python,
+ force_reinstall=options.force_reinstall,
+ upgrade_strategy=upgrade_strategy,
+ use_pep517=options.use_pep517,
+ )
+
+ self.trace_basic_info(finder)
+
+ requirement_set = resolver.resolve(
+ reqs, check_supported_wheels=not options.target_dir
+ )
+
+ if options.json_report_file:
+ report = InstallationReport(requirement_set.requirements_to_install)
+ if options.json_report_file == "-":
+ print_json(data=report.to_dict())
+ else:
+ with open(options.json_report_file, "w", encoding="utf-8") as f:
+ json.dump(report.to_dict(), f, indent=2, ensure_ascii=False)
+
+ if options.dry_run:
+ would_install_items = sorted(
+ (r.metadata["name"], r.metadata["version"])
+ for r in requirement_set.requirements_to_install
+ )
+ if would_install_items:
+ write_output(
+ "Would install %s",
+ " ".join("-".join(item) for item in would_install_items),
+ )
+ return SUCCESS
+
+ try:
+ pip_req = requirement_set.get_requirement("pip")
+ except KeyError:
+ modifying_pip = False
+ else:
+ # If we're not replacing an already installed pip,
+ # we're not modifying it.
+ modifying_pip = pip_req.satisfied_by is None
+ protect_pip_from_modification_on_windows(modifying_pip=modifying_pip)
+
+ check_bdist_wheel_allowed = get_check_bdist_wheel_allowed(
+ finder.format_control
+ )
+
+ reqs_to_build = [
+ r
+ for r in requirement_set.requirements.values()
+ if should_build_for_install_command(r, check_bdist_wheel_allowed)
+ ]
+
+ _, build_failures = build(
+ reqs_to_build,
+ wheel_cache=wheel_cache,
+ verify=True,
+ build_options=[],
+ global_options=global_options,
+ )
+
+ # If we're using PEP 517, we cannot do a legacy setup.py install
+ # so we fail here.
+ pep517_build_failure_names: List[str] = [
+ r.name for r in build_failures if r.use_pep517 # type: ignore
+ ]
+ if pep517_build_failure_names:
+ raise InstallationError(
+ "Could not build wheels for {}, which is required to "
+ "install pyproject.toml-based projects".format(
+ ", ".join(pep517_build_failure_names)
+ )
+ )
+
+ # For now, we just warn about failures building legacy
+ # requirements, as we'll fall through to a setup.py install for
+ # those.
+ for r in build_failures:
+ if not r.use_pep517:
+ r.legacy_install_reason = LegacyInstallReasonFailedBdistWheel
+
+ to_install = resolver.get_installation_order(requirement_set)
+
+ # Check for conflicts in the package set we're installing.
+ conflicts: Optional[ConflictDetails] = None
+ should_warn_about_conflicts = (
+ not options.ignore_dependencies and options.warn_about_conflicts
+ )
+ if should_warn_about_conflicts:
+ conflicts = self._determine_conflicts(to_install)
+
+ # Don't warn about script install locations if
+ # --target or --prefix has been specified
+ warn_script_location = options.warn_script_location
+ if options.target_dir or options.prefix_path:
+ warn_script_location = False
+
+ installed = install_given_reqs(
+ to_install,
+ install_options,
+ global_options,
+ root=options.root_path,
+ home=target_temp_dir_path,
+ prefix=options.prefix_path,
+ warn_script_location=warn_script_location,
+ use_user_site=options.use_user_site,
+ pycompile=options.compile,
+ )
+
+ lib_locations = get_lib_location_guesses(
+ user=options.use_user_site,
+ home=target_temp_dir_path,
+ root=options.root_path,
+ prefix=options.prefix_path,
+ isolated=options.isolated_mode,
+ )
+ env = get_environment(lib_locations)
+
+ installed.sort(key=operator.attrgetter("name"))
+ items = []
+ for result in installed:
+ item = result.name
+ try:
+ installed_dist = env.get_distribution(item)
+ if installed_dist is not None:
+ item = f"{item}-{installed_dist.version}"
+ except Exception:
+ pass
+ items.append(item)
+
+ if conflicts is not None:
+ self._warn_about_conflicts(
+ conflicts,
+ resolver_variant=self.determine_resolver_variant(options),
+ )
+
+ installed_desc = " ".join(items)
+ if installed_desc:
+ write_output(
+ "Successfully installed %s",
+ installed_desc,
+ )
+ except OSError as error:
+ show_traceback = self.verbosity >= 1
+
+ message = create_os_error_message(
+ error,
+ show_traceback,
+ options.use_user_site,
+ )
+ logger.error(message, exc_info=show_traceback) # noqa
+
+ return ERROR
+
+ if options.target_dir:
+ assert target_temp_dir
+ self._handle_target_dir(
+ options.target_dir, target_temp_dir, options.upgrade
+ )
+ if options.root_user_action == "warn":
+ warn_if_run_as_root()
+ return SUCCESS
+
+ def _handle_target_dir(
+ self, target_dir: str, target_temp_dir: TempDirectory, upgrade: bool
+ ) -> None:
+ ensure_dir(target_dir)
+
+ # Checking both purelib and platlib directories for installed
+ # packages to be moved to target directory
+ lib_dir_list = []
+
+ # Checking both purelib and platlib directories for installed
+ # packages to be moved to target directory
+ scheme = get_scheme("", home=target_temp_dir.path)
+ purelib_dir = scheme.purelib
+ platlib_dir = scheme.platlib
+ data_dir = scheme.data
+
+ if os.path.exists(purelib_dir):
+ lib_dir_list.append(purelib_dir)
+ if os.path.exists(platlib_dir) and platlib_dir != purelib_dir:
+ lib_dir_list.append(platlib_dir)
+ if os.path.exists(data_dir):
+ lib_dir_list.append(data_dir)
+
+ for lib_dir in lib_dir_list:
+ for item in os.listdir(lib_dir):
+ if lib_dir == data_dir:
+ ddir = os.path.join(data_dir, item)
+ if any(s.startswith(ddir) for s in lib_dir_list[:-1]):
+ continue
+ target_item_dir = os.path.join(target_dir, item)
+ if os.path.exists(target_item_dir):
+ if not upgrade:
+ logger.warning(
+ "Target directory %s already exists. Specify "
+ "--upgrade to force replacement.",
+ target_item_dir,
+ )
+ continue
+ if os.path.islink(target_item_dir):
+ logger.warning(
+ "Target directory %s already exists and is "
+ "a link. pip will not automatically replace "
+ "links, please remove if replacement is "
+ "desired.",
+ target_item_dir,
+ )
+ continue
+ if os.path.isdir(target_item_dir):
+ shutil.rmtree(target_item_dir)
+ else:
+ os.remove(target_item_dir)
+
+ shutil.move(os.path.join(lib_dir, item), target_item_dir)
+
+ def _determine_conflicts(
+ self, to_install: List[InstallRequirement]
+ ) -> Optional[ConflictDetails]:
+ try:
+ return check_install_conflicts(to_install)
+ except Exception:
+ logger.exception(
+ "Error while checking for conflicts. Please file an issue on "
+ "pip's issue tracker: https://github.com/pypa/pip/issues/new"
+ )
+ return None
+
+ def _warn_about_conflicts(
+ self, conflict_details: ConflictDetails, resolver_variant: str
+ ) -> None:
+ package_set, (missing, conflicting) = conflict_details
+ if not missing and not conflicting:
+ return
+
+ parts: List[str] = []
+ if resolver_variant == "legacy":
+ parts.append(
+ "pip's legacy dependency resolver does not consider dependency "
+ "conflicts when selecting packages. This behaviour is the "
+ "source of the following dependency conflicts."
+ )
+ else:
+ assert resolver_variant == "2020-resolver"
+ parts.append(
+ "pip's dependency resolver does not currently take into account "
+ "all the packages that are installed. This behaviour is the "
+ "source of the following dependency conflicts."
+ )
+
+ # NOTE: There is some duplication here, with commands/check.py
+ for project_name in missing:
+ version = package_set[project_name][0]
+ for dependency in missing[project_name]:
+ message = (
+ "{name} {version} requires {requirement}, "
+ "which is not installed."
+ ).format(
+ name=project_name,
+ version=version,
+ requirement=dependency[1],
+ )
+ parts.append(message)
+
+ for project_name in conflicting:
+ version = package_set[project_name][0]
+ for dep_name, dep_version, req in conflicting[project_name]:
+ message = (
+ "{name} {version} requires {requirement}, but {you} have "
+ "{dep_name} {dep_version} which is incompatible."
+ ).format(
+ name=project_name,
+ version=version,
+ requirement=req,
+ dep_name=dep_name,
+ dep_version=dep_version,
+ you=("you" if resolver_variant == "2020-resolver" else "you'll"),
+ )
+ parts.append(message)
+
+ logger.critical("\n".join(parts))
+
+
+def get_lib_location_guesses(
+ user: bool = False,
+ home: Optional[str] = None,
+ root: Optional[str] = None,
+ isolated: bool = False,
+ prefix: Optional[str] = None,
+) -> List[str]:
+ scheme = get_scheme(
+ "",
+ user=user,
+ home=home,
+ root=root,
+ isolated=isolated,
+ prefix=prefix,
+ )
+ return [scheme.purelib, scheme.platlib]
+
+
+def site_packages_writable(root: Optional[str], isolated: bool) -> bool:
+ return all(
+ test_writable_dir(d)
+ for d in set(get_lib_location_guesses(root=root, isolated=isolated))
+ )
+
+
+def decide_user_install(
+ use_user_site: Optional[bool],
+ prefix_path: Optional[str] = None,
+ target_dir: Optional[str] = None,
+ root_path: Optional[str] = None,
+ isolated_mode: bool = False,
+) -> bool:
+ """Determine whether to do a user install based on the input options.
+
+ If use_user_site is False, no additional checks are done.
+ If use_user_site is True, it is checked for compatibility with other
+ options.
+ If use_user_site is None, the default behaviour depends on the environment,
+ which is provided by the other arguments.
+ """
+ # In some cases (config from tox), use_user_site can be set to an integer
+ # rather than a bool, which 'use_user_site is False' wouldn't catch.
+ if (use_user_site is not None) and (not use_user_site):
+ logger.debug("Non-user install by explicit request")
+ return False
+
+ if use_user_site:
+ if prefix_path:
+ raise CommandError(
+ "Can not combine '--user' and '--prefix' as they imply "
+ "different installation locations"
+ )
+ if virtualenv_no_global():
+ raise InstallationError(
+ "Can not perform a '--user' install. User site-packages "
+ "are not visible in this virtualenv."
+ )
+ logger.debug("User install by explicit request")
+ return True
+
+ # If we are here, user installs have not been explicitly requested/avoided
+ assert use_user_site is None
+
+ # user install incompatible with --prefix/--target
+ if prefix_path or target_dir:
+ logger.debug("Non-user install due to --prefix or --target option")
+ return False
+
+ # If user installs are not enabled, choose a non-user install
+ if not site.ENABLE_USER_SITE:
+ logger.debug("Non-user install because user site-packages disabled")
+ return False
+
+ # If we have permission for a non-user install, do that,
+ # otherwise do a user install.
+ if site_packages_writable(root=root_path, isolated=isolated_mode):
+ logger.debug("Non-user install because site-packages writeable")
+ return False
+
+ logger.info(
+ "Defaulting to user installation because normal site-packages "
+ "is not writeable"
+ )
+ return True
+
+
+def reject_location_related_install_options(
+ requirements: List[InstallRequirement], options: Optional[List[str]]
+) -> None:
+ """If any location-changing --install-option arguments were passed for
+ requirements or on the command-line, then show a deprecation warning.
+ """
+
+ def format_options(option_names: Iterable[str]) -> List[str]:
+ return ["--{}".format(name.replace("_", "-")) for name in option_names]
+
+ offenders = []
+
+ for requirement in requirements:
+ install_options = requirement.install_options
+ location_options = parse_distutils_args(install_options)
+ if location_options:
+ offenders.append(
+ "{!r} from {}".format(
+ format_options(location_options.keys()), requirement
+ )
+ )
+
+ if options:
+ location_options = parse_distutils_args(options)
+ if location_options:
+ offenders.append(
+ "{!r} from command line".format(format_options(location_options.keys()))
+ )
+
+ if not offenders:
+ return
+
+ raise CommandError(
+ "Location-changing options found in --install-option: {}."
+ " This is unsupported, use pip-level options like --user,"
+ " --prefix, --root, and --target instead.".format("; ".join(offenders))
+ )
+
+
+def create_os_error_message(
+ error: OSError, show_traceback: bool, using_user_site: bool
+) -> str:
+ """Format an error message for an OSError
+
+ It may occur anytime during the execution of the install command.
+ """
+ parts = []
+
+ # Mention the error if we are not going to show a traceback
+ parts.append("Could not install packages due to an OSError")
+ if not show_traceback:
+ parts.append(": ")
+ parts.append(str(error))
+ else:
+ parts.append(".")
+
+ # Spilt the error indication from a helper message (if any)
+ parts[-1] += "\n"
+
+ # Suggest useful actions to the user:
+ # (1) using user site-packages or (2) verifying the permissions
+ if error.errno == errno.EACCES:
+ user_option_part = "Consider using the `--user` option"
+ permissions_part = "Check the permissions"
+
+ if not running_under_virtualenv() and not using_user_site:
+ parts.extend(
+ [
+ user_option_part,
+ " or ",
+ permissions_part.lower(),
+ ]
+ )
+ else:
+ parts.append(permissions_part)
+ parts.append(".\n")
+
+ # Suggest the user to enable Long Paths if path length is
+ # more than 260
+ if (
+ WINDOWS
+ and error.errno == errno.ENOENT
+ and error.filename
+ and len(error.filename) > 260
+ ):
+ parts.append(
+ "HINT: This error might have occurred since "
+ "this system does not have Windows Long Path "
+ "support enabled. You can find information on "
+ "how to enable this at "
+ "https://pip.pypa.io/warnings/enable-long-paths\n"
+ )
+
+ return "".join(parts).strip() + "\n"
diff --git a/third_party/python/pip/pip/_internal/commands/list.py b/third_party/python/pip/pip/_internal/commands/list.py
new file mode 100644
index 0000000000..8e1426dbb6
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/commands/list.py
@@ -0,0 +1,365 @@
+import json
+import logging
+from optparse import Values
+from typing import TYPE_CHECKING, Generator, List, Optional, Sequence, Tuple, cast
+
+from pip._vendor.packaging.utils import canonicalize_name
+
+from pip._internal.cli import cmdoptions
+from pip._internal.cli.req_command import IndexGroupCommand
+from pip._internal.cli.status_codes import SUCCESS
+from pip._internal.exceptions import CommandError
+from pip._internal.index.collector import LinkCollector
+from pip._internal.index.package_finder import PackageFinder
+from pip._internal.metadata import BaseDistribution, get_environment
+from pip._internal.models.selection_prefs import SelectionPreferences
+from pip._internal.network.session import PipSession
+from pip._internal.utils.compat import stdlib_pkgs
+from pip._internal.utils.misc import tabulate, write_output
+
+if TYPE_CHECKING:
+ from pip._internal.metadata.base import DistributionVersion
+
+ class _DistWithLatestInfo(BaseDistribution):
+ """Give the distribution object a couple of extra fields.
+
+ These will be populated during ``get_outdated()``. This is dirty but
+ makes the rest of the code much cleaner.
+ """
+
+ latest_version: DistributionVersion
+ latest_filetype: str
+
+ _ProcessedDists = Sequence[_DistWithLatestInfo]
+
+
+logger = logging.getLogger(__name__)
+
+
+class ListCommand(IndexGroupCommand):
+ """
+ List installed packages, including editables.
+
+ Packages are listed in a case-insensitive sorted order.
+ """
+
+ ignore_require_venv = True
+ usage = """
+ %prog [options]"""
+
+ def add_options(self) -> None:
+ self.cmd_opts.add_option(
+ "-o",
+ "--outdated",
+ action="store_true",
+ default=False,
+ help="List outdated packages",
+ )
+ self.cmd_opts.add_option(
+ "-u",
+ "--uptodate",
+ action="store_true",
+ default=False,
+ help="List uptodate packages",
+ )
+ self.cmd_opts.add_option(
+ "-e",
+ "--editable",
+ action="store_true",
+ default=False,
+ help="List editable projects.",
+ )
+ self.cmd_opts.add_option(
+ "-l",
+ "--local",
+ action="store_true",
+ default=False,
+ help=(
+ "If in a virtualenv that has global access, do not list "
+ "globally-installed packages."
+ ),
+ )
+ self.cmd_opts.add_option(
+ "--user",
+ dest="user",
+ action="store_true",
+ default=False,
+ help="Only output packages installed in user-site.",
+ )
+ self.cmd_opts.add_option(cmdoptions.list_path())
+ self.cmd_opts.add_option(
+ "--pre",
+ action="store_true",
+ default=False,
+ help=(
+ "Include pre-release and development versions. By default, "
+ "pip only finds stable versions."
+ ),
+ )
+
+ self.cmd_opts.add_option(
+ "--format",
+ action="store",
+ dest="list_format",
+ default="columns",
+ choices=("columns", "freeze", "json"),
+ help="Select the output format among: columns (default), freeze, or json",
+ )
+
+ self.cmd_opts.add_option(
+ "--not-required",
+ action="store_true",
+ dest="not_required",
+ help="List packages that are not dependencies of installed packages.",
+ )
+
+ self.cmd_opts.add_option(
+ "--exclude-editable",
+ action="store_false",
+ dest="include_editable",
+ help="Exclude editable package from output.",
+ )
+ self.cmd_opts.add_option(
+ "--include-editable",
+ action="store_true",
+ dest="include_editable",
+ help="Include editable package from output.",
+ default=True,
+ )
+ self.cmd_opts.add_option(cmdoptions.list_exclude())
+ index_opts = cmdoptions.make_option_group(cmdoptions.index_group, self.parser)
+
+ self.parser.insert_option_group(0, index_opts)
+ self.parser.insert_option_group(0, self.cmd_opts)
+
+ def _build_package_finder(
+ self, options: Values, session: PipSession
+ ) -> PackageFinder:
+ """
+ Create a package finder appropriate to this list command.
+ """
+ link_collector = LinkCollector.create(session, options=options)
+
+ # Pass allow_yanked=False to ignore yanked versions.
+ selection_prefs = SelectionPreferences(
+ allow_yanked=False,
+ allow_all_prereleases=options.pre,
+ )
+
+ return PackageFinder.create(
+ link_collector=link_collector,
+ selection_prefs=selection_prefs,
+ )
+
+ def run(self, options: Values, args: List[str]) -> int:
+ if options.outdated and options.uptodate:
+ raise CommandError("Options --outdated and --uptodate cannot be combined.")
+
+ if options.outdated and options.list_format == "freeze":
+ raise CommandError(
+ "List format 'freeze' can not be used with the --outdated option."
+ )
+
+ cmdoptions.check_list_path_option(options)
+
+ skip = set(stdlib_pkgs)
+ if options.excludes:
+ skip.update(canonicalize_name(n) for n in options.excludes)
+
+ packages: "_ProcessedDists" = [
+ cast("_DistWithLatestInfo", d)
+ for d in get_environment(options.path).iter_installed_distributions(
+ local_only=options.local,
+ user_only=options.user,
+ editables_only=options.editable,
+ include_editables=options.include_editable,
+ skip=skip,
+ )
+ ]
+
+ # get_not_required must be called firstly in order to find and
+ # filter out all dependencies correctly. Otherwise a package
+ # can't be identified as requirement because some parent packages
+ # could be filtered out before.
+ if options.not_required:
+ packages = self.get_not_required(packages, options)
+
+ if options.outdated:
+ packages = self.get_outdated(packages, options)
+ elif options.uptodate:
+ packages = self.get_uptodate(packages, options)
+
+ self.output_package_listing(packages, options)
+ return SUCCESS
+
+ def get_outdated(
+ self, packages: "_ProcessedDists", options: Values
+ ) -> "_ProcessedDists":
+ return [
+ dist
+ for dist in self.iter_packages_latest_infos(packages, options)
+ if dist.latest_version > dist.version
+ ]
+
+ def get_uptodate(
+ self, packages: "_ProcessedDists", options: Values
+ ) -> "_ProcessedDists":
+ return [
+ dist
+ for dist in self.iter_packages_latest_infos(packages, options)
+ if dist.latest_version == dist.version
+ ]
+
+ def get_not_required(
+ self, packages: "_ProcessedDists", options: Values
+ ) -> "_ProcessedDists":
+ dep_keys = {
+ canonicalize_name(dep.name)
+ for dist in packages
+ for dep in (dist.iter_dependencies() or ())
+ }
+
+ # Create a set to remove duplicate packages, and cast it to a list
+ # to keep the return type consistent with get_outdated and
+ # get_uptodate
+ return list({pkg for pkg in packages if pkg.canonical_name not in dep_keys})
+
+ def iter_packages_latest_infos(
+ self, packages: "_ProcessedDists", options: Values
+ ) -> Generator["_DistWithLatestInfo", None, None]:
+ with self._build_session(options) as session:
+ finder = self._build_package_finder(options, session)
+
+ def latest_info(
+ dist: "_DistWithLatestInfo",
+ ) -> Optional["_DistWithLatestInfo"]:
+ all_candidates = finder.find_all_candidates(dist.canonical_name)
+ if not options.pre:
+ # Remove prereleases
+ all_candidates = [
+ candidate
+ for candidate in all_candidates
+ if not candidate.version.is_prerelease
+ ]
+
+ evaluator = finder.make_candidate_evaluator(
+ project_name=dist.canonical_name,
+ )
+ best_candidate = evaluator.sort_best_candidate(all_candidates)
+ if best_candidate is None:
+ return None
+
+ remote_version = best_candidate.version
+ if best_candidate.link.is_wheel:
+ typ = "wheel"
+ else:
+ typ = "sdist"
+ dist.latest_version = remote_version
+ dist.latest_filetype = typ
+ return dist
+
+ for dist in map(latest_info, packages):
+ if dist is not None:
+ yield dist
+
+ def output_package_listing(
+ self, packages: "_ProcessedDists", options: Values
+ ) -> None:
+ packages = sorted(
+ packages,
+ key=lambda dist: dist.canonical_name,
+ )
+ if options.list_format == "columns" and packages:
+ data, header = format_for_columns(packages, options)
+ self.output_package_listing_columns(data, header)
+ elif options.list_format == "freeze":
+ for dist in packages:
+ if options.verbose >= 1:
+ write_output(
+ "%s==%s (%s)", dist.raw_name, dist.version, dist.location
+ )
+ else:
+ write_output("%s==%s", dist.raw_name, dist.version)
+ elif options.list_format == "json":
+ write_output(format_for_json(packages, options))
+
+ def output_package_listing_columns(
+ self, data: List[List[str]], header: List[str]
+ ) -> None:
+ # insert the header first: we need to know the size of column names
+ if len(data) > 0:
+ data.insert(0, header)
+
+ pkg_strings, sizes = tabulate(data)
+
+ # Create and add a separator.
+ if len(data) > 0:
+ pkg_strings.insert(1, " ".join(map(lambda x: "-" * x, sizes)))
+
+ for val in pkg_strings:
+ write_output(val)
+
+
+def format_for_columns(
+ pkgs: "_ProcessedDists", options: Values
+) -> Tuple[List[List[str]], List[str]]:
+ """
+ Convert the package data into something usable
+ by output_package_listing_columns.
+ """
+ header = ["Package", "Version"]
+
+ running_outdated = options.outdated
+ if running_outdated:
+ header.extend(["Latest", "Type"])
+
+ has_editables = any(x.editable for x in pkgs)
+ if has_editables:
+ header.append("Editable project location")
+
+ if options.verbose >= 1:
+ header.append("Location")
+ if options.verbose >= 1:
+ header.append("Installer")
+
+ data = []
+ for proj in pkgs:
+ # if we're working on the 'outdated' list, separate out the
+ # latest_version and type
+ row = [proj.raw_name, str(proj.version)]
+
+ if running_outdated:
+ row.append(str(proj.latest_version))
+ row.append(proj.latest_filetype)
+
+ if has_editables:
+ row.append(proj.editable_project_location or "")
+
+ if options.verbose >= 1:
+ row.append(proj.location or "")
+ if options.verbose >= 1:
+ row.append(proj.installer)
+
+ data.append(row)
+
+ return data, header
+
+
+def format_for_json(packages: "_ProcessedDists", options: Values) -> str:
+ data = []
+ for dist in packages:
+ info = {
+ "name": dist.raw_name,
+ "version": str(dist.version),
+ }
+ if options.verbose >= 1:
+ info["location"] = dist.location or ""
+ info["installer"] = dist.installer
+ if options.outdated:
+ info["latest_version"] = str(dist.latest_version)
+ info["latest_filetype"] = dist.latest_filetype
+ editable_project_location = dist.editable_project_location
+ if editable_project_location:
+ info["editable_project_location"] = editable_project_location
+ data.append(info)
+ return json.dumps(data)
diff --git a/third_party/python/pip/pip/_internal/commands/search.py b/third_party/python/pip/pip/_internal/commands/search.py
new file mode 100644
index 0000000000..03ed925b24
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/commands/search.py
@@ -0,0 +1,174 @@
+import logging
+import shutil
+import sys
+import textwrap
+import xmlrpc.client
+from collections import OrderedDict
+from optparse import Values
+from typing import TYPE_CHECKING, Dict, List, Optional
+
+from pip._vendor.packaging.version import parse as parse_version
+
+from pip._internal.cli.base_command import Command
+from pip._internal.cli.req_command import SessionCommandMixin
+from pip._internal.cli.status_codes import NO_MATCHES_FOUND, SUCCESS
+from pip._internal.exceptions import CommandError
+from pip._internal.metadata import get_default_environment
+from pip._internal.models.index import PyPI
+from pip._internal.network.xmlrpc import PipXmlrpcTransport
+from pip._internal.utils.logging import indent_log
+from pip._internal.utils.misc import write_output
+
+if TYPE_CHECKING:
+ from typing import TypedDict
+
+ class TransformedHit(TypedDict):
+ name: str
+ summary: str
+ versions: List[str]
+
+
+logger = logging.getLogger(__name__)
+
+
+class SearchCommand(Command, SessionCommandMixin):
+ """Search for PyPI packages whose name or summary contains <query>."""
+
+ usage = """
+ %prog [options] <query>"""
+ ignore_require_venv = True
+
+ def add_options(self) -> None:
+ self.cmd_opts.add_option(
+ "-i",
+ "--index",
+ dest="index",
+ metavar="URL",
+ default=PyPI.pypi_url,
+ help="Base URL of Python Package Index (default %default)",
+ )
+
+ self.parser.insert_option_group(0, self.cmd_opts)
+
+ def run(self, options: Values, args: List[str]) -> int:
+ if not args:
+ raise CommandError("Missing required argument (search query).")
+ query = args
+ pypi_hits = self.search(query, options)
+ hits = transform_hits(pypi_hits)
+
+ terminal_width = None
+ if sys.stdout.isatty():
+ terminal_width = shutil.get_terminal_size()[0]
+
+ print_results(hits, terminal_width=terminal_width)
+ if pypi_hits:
+ return SUCCESS
+ return NO_MATCHES_FOUND
+
+ def search(self, query: List[str], options: Values) -> List[Dict[str, str]]:
+ index_url = options.index
+
+ session = self.get_default_session(options)
+
+ transport = PipXmlrpcTransport(index_url, session)
+ pypi = xmlrpc.client.ServerProxy(index_url, transport)
+ try:
+ hits = pypi.search({"name": query, "summary": query}, "or")
+ except xmlrpc.client.Fault as fault:
+ message = "XMLRPC request failed [code: {code}]\n{string}".format(
+ code=fault.faultCode,
+ string=fault.faultString,
+ )
+ raise CommandError(message)
+ assert isinstance(hits, list)
+ return hits
+
+
+def transform_hits(hits: List[Dict[str, str]]) -> List["TransformedHit"]:
+ """
+ The list from pypi is really a list of versions. We want a list of
+ packages with the list of versions stored inline. This converts the
+ list from pypi into one we can use.
+ """
+ packages: Dict[str, "TransformedHit"] = OrderedDict()
+ for hit in hits:
+ name = hit["name"]
+ summary = hit["summary"]
+ version = hit["version"]
+
+ if name not in packages.keys():
+ packages[name] = {
+ "name": name,
+ "summary": summary,
+ "versions": [version],
+ }
+ else:
+ packages[name]["versions"].append(version)
+
+ # if this is the highest version, replace summary and score
+ if version == highest_version(packages[name]["versions"]):
+ packages[name]["summary"] = summary
+
+ return list(packages.values())
+
+
+def print_dist_installation_info(name: str, latest: str) -> None:
+ env = get_default_environment()
+ dist = env.get_distribution(name)
+ if dist is not None:
+ with indent_log():
+ if dist.version == latest:
+ write_output("INSTALLED: %s (latest)", dist.version)
+ else:
+ write_output("INSTALLED: %s", dist.version)
+ if parse_version(latest).pre:
+ write_output(
+ "LATEST: %s (pre-release; install"
+ " with `pip install --pre`)",
+ latest,
+ )
+ else:
+ write_output("LATEST: %s", latest)
+
+
+def print_results(
+ hits: List["TransformedHit"],
+ name_column_width: Optional[int] = None,
+ terminal_width: Optional[int] = None,
+) -> None:
+ if not hits:
+ return
+ if name_column_width is None:
+ name_column_width = (
+ max(
+ [
+ len(hit["name"]) + len(highest_version(hit.get("versions", ["-"])))
+ for hit in hits
+ ]
+ )
+ + 4
+ )
+
+ for hit in hits:
+ name = hit["name"]
+ summary = hit["summary"] or ""
+ latest = highest_version(hit.get("versions", ["-"]))
+ if terminal_width is not None:
+ target_width = terminal_width - name_column_width - 5
+ if target_width > 10:
+ # wrap and indent summary to fit terminal
+ summary_lines = textwrap.wrap(summary, target_width)
+ summary = ("\n" + " " * (name_column_width + 3)).join(summary_lines)
+
+ name_latest = f"{name} ({latest})"
+ line = f"{name_latest:{name_column_width}} - {summary}"
+ try:
+ write_output(line)
+ print_dist_installation_info(name, latest)
+ except UnicodeEncodeError:
+ pass
+
+
+def highest_version(versions: List[str]) -> str:
+ return max(versions, key=parse_version)
diff --git a/third_party/python/pip/pip/_internal/commands/show.py b/third_party/python/pip/pip/_internal/commands/show.py
new file mode 100644
index 0000000000..3f10701f6b
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/commands/show.py
@@ -0,0 +1,189 @@
+import logging
+from optparse import Values
+from typing import Generator, Iterable, Iterator, List, NamedTuple, Optional
+
+from pip._vendor.packaging.utils import canonicalize_name
+
+from pip._internal.cli.base_command import Command
+from pip._internal.cli.status_codes import ERROR, SUCCESS
+from pip._internal.metadata import BaseDistribution, get_default_environment
+from pip._internal.utils.misc import write_output
+
+logger = logging.getLogger(__name__)
+
+
+class ShowCommand(Command):
+ """
+ Show information about one or more installed packages.
+
+ The output is in RFC-compliant mail header format.
+ """
+
+ usage = """
+ %prog [options] <package> ..."""
+ ignore_require_venv = True
+
+ def add_options(self) -> None:
+ self.cmd_opts.add_option(
+ "-f",
+ "--files",
+ dest="files",
+ action="store_true",
+ default=False,
+ help="Show the full list of installed files for each package.",
+ )
+
+ self.parser.insert_option_group(0, self.cmd_opts)
+
+ def run(self, options: Values, args: List[str]) -> int:
+ if not args:
+ logger.warning("ERROR: Please provide a package name or names.")
+ return ERROR
+ query = args
+
+ results = search_packages_info(query)
+ if not print_results(
+ results, list_files=options.files, verbose=options.verbose
+ ):
+ return ERROR
+ return SUCCESS
+
+
+class _PackageInfo(NamedTuple):
+ name: str
+ version: str
+ location: str
+ editable_project_location: Optional[str]
+ requires: List[str]
+ required_by: List[str]
+ installer: str
+ metadata_version: str
+ classifiers: List[str]
+ summary: str
+ homepage: str
+ project_urls: List[str]
+ author: str
+ author_email: str
+ license: str
+ entry_points: List[str]
+ files: Optional[List[str]]
+
+
+def search_packages_info(query: List[str]) -> Generator[_PackageInfo, None, None]:
+ """
+ Gather details from installed distributions. Print distribution name,
+ version, location, and installed files. Installed files requires a
+ pip generated 'installed-files.txt' in the distributions '.egg-info'
+ directory.
+ """
+ env = get_default_environment()
+
+ installed = {dist.canonical_name: dist for dist in env.iter_all_distributions()}
+ query_names = [canonicalize_name(name) for name in query]
+ missing = sorted(
+ [name for name, pkg in zip(query, query_names) if pkg not in installed]
+ )
+ if missing:
+ logger.warning("Package(s) not found: %s", ", ".join(missing))
+
+ def _get_requiring_packages(current_dist: BaseDistribution) -> Iterator[str]:
+ return (
+ dist.metadata["Name"] or "UNKNOWN"
+ for dist in installed.values()
+ if current_dist.canonical_name
+ in {canonicalize_name(d.name) for d in dist.iter_dependencies()}
+ )
+
+ for query_name in query_names:
+ try:
+ dist = installed[query_name]
+ except KeyError:
+ continue
+
+ requires = sorted((req.name for req in dist.iter_dependencies()), key=str.lower)
+ required_by = sorted(_get_requiring_packages(dist), key=str.lower)
+
+ try:
+ entry_points_text = dist.read_text("entry_points.txt")
+ entry_points = entry_points_text.splitlines(keepends=False)
+ except FileNotFoundError:
+ entry_points = []
+
+ files_iter = dist.iter_declared_entries()
+ if files_iter is None:
+ files: Optional[List[str]] = None
+ else:
+ files = sorted(files_iter)
+
+ metadata = dist.metadata
+
+ yield _PackageInfo(
+ name=dist.raw_name,
+ version=str(dist.version),
+ location=dist.location or "",
+ editable_project_location=dist.editable_project_location,
+ requires=requires,
+ required_by=required_by,
+ installer=dist.installer,
+ metadata_version=dist.metadata_version or "",
+ classifiers=metadata.get_all("Classifier", []),
+ summary=metadata.get("Summary", ""),
+ homepage=metadata.get("Home-page", ""),
+ project_urls=metadata.get_all("Project-URL", []),
+ author=metadata.get("Author", ""),
+ author_email=metadata.get("Author-email", ""),
+ license=metadata.get("License", ""),
+ entry_points=entry_points,
+ files=files,
+ )
+
+
+def print_results(
+ distributions: Iterable[_PackageInfo],
+ list_files: bool,
+ verbose: bool,
+) -> bool:
+ """
+ Print the information from installed distributions found.
+ """
+ results_printed = False
+ for i, dist in enumerate(distributions):
+ results_printed = True
+ if i > 0:
+ write_output("---")
+
+ write_output("Name: %s", dist.name)
+ write_output("Version: %s", dist.version)
+ write_output("Summary: %s", dist.summary)
+ write_output("Home-page: %s", dist.homepage)
+ write_output("Author: %s", dist.author)
+ write_output("Author-email: %s", dist.author_email)
+ write_output("License: %s", dist.license)
+ write_output("Location: %s", dist.location)
+ if dist.editable_project_location is not None:
+ write_output(
+ "Editable project location: %s", dist.editable_project_location
+ )
+ write_output("Requires: %s", ", ".join(dist.requires))
+ write_output("Required-by: %s", ", ".join(dist.required_by))
+
+ if verbose:
+ write_output("Metadata-Version: %s", dist.metadata_version)
+ write_output("Installer: %s", dist.installer)
+ write_output("Classifiers:")
+ for classifier in dist.classifiers:
+ write_output(" %s", classifier)
+ write_output("Entry-points:")
+ for entry in dist.entry_points:
+ write_output(" %s", entry.strip())
+ write_output("Project-URLs:")
+ for project_url in dist.project_urls:
+ write_output(" %s", project_url)
+ if list_files:
+ write_output("Files:")
+ if dist.files is None:
+ write_output("Cannot locate RECORD or installed-files.txt")
+ else:
+ for line in dist.files:
+ write_output(" %s", line.strip())
+ return results_printed
diff --git a/third_party/python/pip/pip/_internal/commands/uninstall.py b/third_party/python/pip/pip/_internal/commands/uninstall.py
new file mode 100644
index 0000000000..f198fc313f
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/commands/uninstall.py
@@ -0,0 +1,113 @@
+import logging
+from optparse import Values
+from typing import List
+
+from pip._vendor.packaging.utils import canonicalize_name
+
+from pip._internal.cli import cmdoptions
+from pip._internal.cli.base_command import Command
+from pip._internal.cli.req_command import SessionCommandMixin, warn_if_run_as_root
+from pip._internal.cli.status_codes import SUCCESS
+from pip._internal.exceptions import InstallationError
+from pip._internal.req import parse_requirements
+from pip._internal.req.constructors import (
+ install_req_from_line,
+ install_req_from_parsed_requirement,
+)
+from pip._internal.utils.misc import (
+ check_externally_managed,
+ protect_pip_from_modification_on_windows,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class UninstallCommand(Command, SessionCommandMixin):
+ """
+ Uninstall packages.
+
+ pip is able to uninstall most installed packages. Known exceptions are:
+
+ - Pure distutils packages installed with ``python setup.py install``, which
+ leave behind no metadata to determine what files were installed.
+ - Script wrappers installed by ``python setup.py develop``.
+ """
+
+ usage = """
+ %prog [options] <package> ...
+ %prog [options] -r <requirements file> ..."""
+
+ def add_options(self) -> None:
+ self.cmd_opts.add_option(
+ "-r",
+ "--requirement",
+ dest="requirements",
+ action="append",
+ default=[],
+ metavar="file",
+ help=(
+ "Uninstall all the packages listed in the given requirements "
+ "file. This option can be used multiple times."
+ ),
+ )
+ self.cmd_opts.add_option(
+ "-y",
+ "--yes",
+ dest="yes",
+ action="store_true",
+ help="Don't ask for confirmation of uninstall deletions.",
+ )
+ self.cmd_opts.add_option(cmdoptions.root_user_action())
+ self.cmd_opts.add_option(cmdoptions.override_externally_managed())
+ self.parser.insert_option_group(0, self.cmd_opts)
+
+ def run(self, options: Values, args: List[str]) -> int:
+ session = self.get_default_session(options)
+
+ reqs_to_uninstall = {}
+ for name in args:
+ req = install_req_from_line(
+ name,
+ isolated=options.isolated_mode,
+ )
+ if req.name:
+ reqs_to_uninstall[canonicalize_name(req.name)] = req
+ else:
+ logger.warning(
+ "Invalid requirement: %r ignored -"
+ " the uninstall command expects named"
+ " requirements.",
+ name,
+ )
+ for filename in options.requirements:
+ for parsed_req in parse_requirements(
+ filename, options=options, session=session
+ ):
+ req = install_req_from_parsed_requirement(
+ parsed_req, isolated=options.isolated_mode
+ )
+ if req.name:
+ reqs_to_uninstall[canonicalize_name(req.name)] = req
+ if not reqs_to_uninstall:
+ raise InstallationError(
+ f"You must give at least one requirement to {self.name} (see "
+ f'"pip help {self.name}")'
+ )
+
+ if not options.override_externally_managed:
+ check_externally_managed()
+
+ protect_pip_from_modification_on_windows(
+ modifying_pip="pip" in reqs_to_uninstall
+ )
+
+ for req in reqs_to_uninstall.values():
+ uninstall_pathset = req.uninstall(
+ auto_confirm=options.yes,
+ verbose=self.verbosity > 0,
+ )
+ if uninstall_pathset:
+ uninstall_pathset.commit()
+ if options.root_user_action == "warn":
+ warn_if_run_as_root()
+ return SUCCESS
diff --git a/third_party/python/pip/pip/_internal/commands/wheel.py b/third_party/python/pip/pip/_internal/commands/wheel.py
new file mode 100644
index 0000000000..1afbd562c6
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/commands/wheel.py
@@ -0,0 +1,203 @@
+import logging
+import os
+import shutil
+from optparse import Values
+from typing import List
+
+from pip._internal.cache import WheelCache
+from pip._internal.cli import cmdoptions
+from pip._internal.cli.req_command import RequirementCommand, with_cleanup
+from pip._internal.cli.status_codes import SUCCESS
+from pip._internal.exceptions import CommandError
+from pip._internal.operations.build.build_tracker import get_build_tracker
+from pip._internal.req.req_install import (
+ InstallRequirement,
+ LegacySetupPyOptionsCheckMode,
+ check_legacy_setup_py_options,
+)
+from pip._internal.utils.deprecation import deprecated
+from pip._internal.utils.misc import ensure_dir, normalize_path
+from pip._internal.utils.temp_dir import TempDirectory
+from pip._internal.wheel_builder import build, should_build_for_wheel_command
+
+logger = logging.getLogger(__name__)
+
+
+class WheelCommand(RequirementCommand):
+ """
+ Build Wheel archives for your requirements and dependencies.
+
+ Wheel is a built-package format, and offers the advantage of not
+ recompiling your software during every install. For more details, see the
+ wheel docs: https://wheel.readthedocs.io/en/latest/
+
+ 'pip wheel' uses the build system interface as described here:
+ https://pip.pypa.io/en/stable/reference/build-system/
+
+ """
+
+ usage = """
+ %prog [options] <requirement specifier> ...
+ %prog [options] -r <requirements file> ...
+ %prog [options] [-e] <vcs project url> ...
+ %prog [options] [-e] <local project path> ...
+ %prog [options] <archive url/path> ..."""
+
+ def add_options(self) -> None:
+
+ self.cmd_opts.add_option(
+ "-w",
+ "--wheel-dir",
+ dest="wheel_dir",
+ metavar="dir",
+ default=os.curdir,
+ help=(
+ "Build wheels into <dir>, where the default is the "
+ "current working directory."
+ ),
+ )
+ self.cmd_opts.add_option(cmdoptions.no_binary())
+ self.cmd_opts.add_option(cmdoptions.only_binary())
+ self.cmd_opts.add_option(cmdoptions.prefer_binary())
+ self.cmd_opts.add_option(cmdoptions.no_build_isolation())
+ self.cmd_opts.add_option(cmdoptions.use_pep517())
+ self.cmd_opts.add_option(cmdoptions.no_use_pep517())
+ self.cmd_opts.add_option(cmdoptions.check_build_deps())
+ self.cmd_opts.add_option(cmdoptions.constraints())
+ self.cmd_opts.add_option(cmdoptions.editable())
+ self.cmd_opts.add_option(cmdoptions.requirements())
+ self.cmd_opts.add_option(cmdoptions.src())
+ self.cmd_opts.add_option(cmdoptions.ignore_requires_python())
+ self.cmd_opts.add_option(cmdoptions.no_deps())
+ self.cmd_opts.add_option(cmdoptions.progress_bar())
+
+ self.cmd_opts.add_option(
+ "--no-verify",
+ dest="no_verify",
+ action="store_true",
+ default=False,
+ help="Don't verify if built wheel is valid.",
+ )
+
+ self.cmd_opts.add_option(cmdoptions.config_settings())
+ self.cmd_opts.add_option(cmdoptions.build_options())
+ self.cmd_opts.add_option(cmdoptions.global_options())
+
+ self.cmd_opts.add_option(
+ "--pre",
+ action="store_true",
+ default=False,
+ help=(
+ "Include pre-release and development versions. By default, "
+ "pip only finds stable versions."
+ ),
+ )
+
+ self.cmd_opts.add_option(cmdoptions.require_hashes())
+
+ index_opts = cmdoptions.make_option_group(
+ cmdoptions.index_group,
+ self.parser,
+ )
+
+ self.parser.insert_option_group(0, index_opts)
+ self.parser.insert_option_group(0, self.cmd_opts)
+
+ @with_cleanup
+ def run(self, options: Values, args: List[str]) -> int:
+ session = self.get_default_session(options)
+
+ finder = self._build_package_finder(options, session)
+ wheel_cache = WheelCache(options.cache_dir, options.format_control)
+
+ options.wheel_dir = normalize_path(options.wheel_dir)
+ ensure_dir(options.wheel_dir)
+
+ build_tracker = self.enter_context(get_build_tracker())
+
+ directory = TempDirectory(
+ delete=not options.no_clean,
+ kind="wheel",
+ globally_managed=True,
+ )
+
+ reqs = self.get_requirements(args, options, finder, session)
+ check_legacy_setup_py_options(
+ options, reqs, LegacySetupPyOptionsCheckMode.WHEEL
+ )
+
+ if "no-binary-enable-wheel-cache" in options.features_enabled:
+ # TODO: remove format_control from WheelCache when the deprecation cycle
+ # is over
+ wheel_cache = WheelCache(options.cache_dir)
+ else:
+ if options.format_control.no_binary:
+ deprecated(
+ reason=(
+ "--no-binary currently disables reading from "
+ "the cache of locally built wheels. In the future "
+ "--no-binary will not influence the wheel cache."
+ ),
+ replacement="to use the --no-cache-dir option",
+ feature_flag="no-binary-enable-wheel-cache",
+ issue=11453,
+ gone_in="23.1",
+ )
+ wheel_cache = WheelCache(options.cache_dir, options.format_control)
+
+ preparer = self.make_requirement_preparer(
+ temp_build_dir=directory,
+ options=options,
+ build_tracker=build_tracker,
+ session=session,
+ finder=finder,
+ download_dir=options.wheel_dir,
+ use_user_site=False,
+ verbosity=self.verbosity,
+ )
+
+ resolver = self.make_resolver(
+ preparer=preparer,
+ finder=finder,
+ options=options,
+ wheel_cache=wheel_cache,
+ ignore_requires_python=options.ignore_requires_python,
+ use_pep517=options.use_pep517,
+ )
+
+ self.trace_basic_info(finder)
+
+ requirement_set = resolver.resolve(reqs, check_supported_wheels=True)
+
+ reqs_to_build: List[InstallRequirement] = []
+ for req in requirement_set.requirements.values():
+ if req.is_wheel:
+ preparer.save_linked_requirement(req)
+ elif should_build_for_wheel_command(req):
+ reqs_to_build.append(req)
+
+ # build wheels
+ build_successes, build_failures = build(
+ reqs_to_build,
+ wheel_cache=wheel_cache,
+ verify=(not options.no_verify),
+ build_options=options.build_options or [],
+ global_options=options.global_options or [],
+ )
+ for req in build_successes:
+ assert req.link and req.link.is_wheel
+ assert req.local_file_path
+ # copy from cache to target directory
+ try:
+ shutil.copy(req.local_file_path, options.wheel_dir)
+ except OSError as e:
+ logger.warning(
+ "Building wheel for %s failed: %s",
+ req.name,
+ e,
+ )
+ build_failures.append(req)
+ if len(build_failures) != 0:
+ raise CommandError("Failed to build one or more wheels")
+
+ return SUCCESS
diff --git a/third_party/python/pip/pip/_internal/configuration.py b/third_party/python/pip/pip/_internal/configuration.py
new file mode 100644
index 0000000000..8fd46c9b8e
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/configuration.py
@@ -0,0 +1,374 @@
+"""Configuration management setup
+
+Some terminology:
+- name
+ As written in config files.
+- value
+ Value associated with a name
+- key
+ Name combined with it's section (section.name)
+- variant
+ A single word describing where the configuration key-value pair came from
+"""
+
+import configparser
+import locale
+import os
+import sys
+from typing import Any, Dict, Iterable, List, NewType, Optional, Tuple
+
+from pip._internal.exceptions import (
+ ConfigurationError,
+ ConfigurationFileCouldNotBeLoaded,
+)
+from pip._internal.utils import appdirs
+from pip._internal.utils.compat import WINDOWS
+from pip._internal.utils.logging import getLogger
+from pip._internal.utils.misc import ensure_dir, enum
+
+RawConfigParser = configparser.RawConfigParser # Shorthand
+Kind = NewType("Kind", str)
+
+CONFIG_BASENAME = "pip.ini" if WINDOWS else "pip.conf"
+ENV_NAMES_IGNORED = "version", "help"
+
+# The kinds of configurations there are.
+kinds = enum(
+ USER="user", # User Specific
+ GLOBAL="global", # System Wide
+ SITE="site", # [Virtual] Environment Specific
+ ENV="env", # from PIP_CONFIG_FILE
+ ENV_VAR="env-var", # from Environment Variables
+)
+OVERRIDE_ORDER = kinds.GLOBAL, kinds.USER, kinds.SITE, kinds.ENV, kinds.ENV_VAR
+VALID_LOAD_ONLY = kinds.USER, kinds.GLOBAL, kinds.SITE
+
+logger = getLogger(__name__)
+
+
+# NOTE: Maybe use the optionx attribute to normalize keynames.
+def _normalize_name(name: str) -> str:
+ """Make a name consistent regardless of source (environment or file)"""
+ name = name.lower().replace("_", "-")
+ if name.startswith("--"):
+ name = name[2:] # only prefer long opts
+ return name
+
+
+def _disassemble_key(name: str) -> List[str]:
+ if "." not in name:
+ error_message = (
+ "Key does not contain dot separated section and key. "
+ "Perhaps you wanted to use 'global.{}' instead?"
+ ).format(name)
+ raise ConfigurationError(error_message)
+ return name.split(".", 1)
+
+
+def get_configuration_files() -> Dict[Kind, List[str]]:
+ global_config_files = [
+ os.path.join(path, CONFIG_BASENAME) for path in appdirs.site_config_dirs("pip")
+ ]
+
+ site_config_file = os.path.join(sys.prefix, CONFIG_BASENAME)
+ legacy_config_file = os.path.join(
+ os.path.expanduser("~"),
+ "pip" if WINDOWS else ".pip",
+ CONFIG_BASENAME,
+ )
+ new_config_file = os.path.join(appdirs.user_config_dir("pip"), CONFIG_BASENAME)
+ return {
+ kinds.GLOBAL: global_config_files,
+ kinds.SITE: [site_config_file],
+ kinds.USER: [legacy_config_file, new_config_file],
+ }
+
+
+class Configuration:
+ """Handles management of configuration.
+
+ Provides an interface to accessing and managing configuration files.
+
+ This class converts provides an API that takes "section.key-name" style
+ keys and stores the value associated with it as "key-name" under the
+ section "section".
+
+ This allows for a clean interface wherein the both the section and the
+ key-name are preserved in an easy to manage form in the configuration files
+ and the data stored is also nice.
+ """
+
+ def __init__(self, isolated: bool, load_only: Optional[Kind] = None) -> None:
+ super().__init__()
+
+ if load_only is not None and load_only not in VALID_LOAD_ONLY:
+ raise ConfigurationError(
+ "Got invalid value for load_only - should be one of {}".format(
+ ", ".join(map(repr, VALID_LOAD_ONLY))
+ )
+ )
+ self.isolated = isolated
+ self.load_only = load_only
+
+ # Because we keep track of where we got the data from
+ self._parsers: Dict[Kind, List[Tuple[str, RawConfigParser]]] = {
+ variant: [] for variant in OVERRIDE_ORDER
+ }
+ self._config: Dict[Kind, Dict[str, Any]] = {
+ variant: {} for variant in OVERRIDE_ORDER
+ }
+ self._modified_parsers: List[Tuple[str, RawConfigParser]] = []
+
+ def load(self) -> None:
+ """Loads configuration from configuration files and environment"""
+ self._load_config_files()
+ if not self.isolated:
+ self._load_environment_vars()
+
+ def get_file_to_edit(self) -> Optional[str]:
+ """Returns the file with highest priority in configuration"""
+ assert self.load_only is not None, "Need to be specified a file to be editing"
+
+ try:
+ return self._get_parser_to_modify()[0]
+ except IndexError:
+ return None
+
+ def items(self) -> Iterable[Tuple[str, Any]]:
+ """Returns key-value pairs like dict.items() representing the loaded
+ configuration
+ """
+ return self._dictionary.items()
+
+ def get_value(self, key: str) -> Any:
+ """Get a value from the configuration."""
+ orig_key = key
+ key = _normalize_name(key)
+ try:
+ return self._dictionary[key]
+ except KeyError:
+ # disassembling triggers a more useful error message than simply
+ # "No such key" in the case that the key isn't in the form command.option
+ _disassemble_key(key)
+ raise ConfigurationError(f"No such key - {orig_key}")
+
+ def set_value(self, key: str, value: Any) -> None:
+ """Modify a value in the configuration."""
+ key = _normalize_name(key)
+ self._ensure_have_load_only()
+
+ assert self.load_only
+ fname, parser = self._get_parser_to_modify()
+
+ if parser is not None:
+ section, name = _disassemble_key(key)
+
+ # Modify the parser and the configuration
+ if not parser.has_section(section):
+ parser.add_section(section)
+ parser.set(section, name, value)
+
+ self._config[self.load_only][key] = value
+ self._mark_as_modified(fname, parser)
+
+ def unset_value(self, key: str) -> None:
+ """Unset a value in the configuration."""
+ orig_key = key
+ key = _normalize_name(key)
+ self._ensure_have_load_only()
+
+ assert self.load_only
+ if key not in self._config[self.load_only]:
+ raise ConfigurationError(f"No such key - {orig_key}")
+
+ fname, parser = self._get_parser_to_modify()
+
+ if parser is not None:
+ section, name = _disassemble_key(key)
+ if not (
+ parser.has_section(section) and parser.remove_option(section, name)
+ ):
+ # The option was not removed.
+ raise ConfigurationError(
+ "Fatal Internal error [id=1]. Please report as a bug."
+ )
+
+ # The section may be empty after the option was removed.
+ if not parser.items(section):
+ parser.remove_section(section)
+ self._mark_as_modified(fname, parser)
+
+ del self._config[self.load_only][key]
+
+ def save(self) -> None:
+ """Save the current in-memory state."""
+ self._ensure_have_load_only()
+
+ for fname, parser in self._modified_parsers:
+ logger.info("Writing to %s", fname)
+
+ # Ensure directory exists.
+ ensure_dir(os.path.dirname(fname))
+
+ with open(fname, "w") as f:
+ parser.write(f)
+
+ #
+ # Private routines
+ #
+
+ def _ensure_have_load_only(self) -> None:
+ if self.load_only is None:
+ raise ConfigurationError("Needed a specific file to be modifying.")
+ logger.debug("Will be working with %s variant only", self.load_only)
+
+ @property
+ def _dictionary(self) -> Dict[str, Any]:
+ """A dictionary representing the loaded configuration."""
+ # NOTE: Dictionaries are not populated if not loaded. So, conditionals
+ # are not needed here.
+ retval = {}
+
+ for variant in OVERRIDE_ORDER:
+ retval.update(self._config[variant])
+
+ return retval
+
+ def _load_config_files(self) -> None:
+ """Loads configuration from configuration files"""
+ config_files = dict(self.iter_config_files())
+ if config_files[kinds.ENV][0:1] == [os.devnull]:
+ logger.debug(
+ "Skipping loading configuration files due to "
+ "environment's PIP_CONFIG_FILE being os.devnull"
+ )
+ return
+
+ for variant, files in config_files.items():
+ for fname in files:
+ # If there's specific variant set in `load_only`, load only
+ # that variant, not the others.
+ if self.load_only is not None and variant != self.load_only:
+ logger.debug("Skipping file '%s' (variant: %s)", fname, variant)
+ continue
+
+ parser = self._load_file(variant, fname)
+
+ # Keeping track of the parsers used
+ self._parsers[variant].append((fname, parser))
+
+ def _load_file(self, variant: Kind, fname: str) -> RawConfigParser:
+ logger.verbose("For variant '%s', will try loading '%s'", variant, fname)
+ parser = self._construct_parser(fname)
+
+ for section in parser.sections():
+ items = parser.items(section)
+ self._config[variant].update(self._normalized_keys(section, items))
+
+ return parser
+
+ def _construct_parser(self, fname: str) -> RawConfigParser:
+ parser = configparser.RawConfigParser()
+ # If there is no such file, don't bother reading it but create the
+ # parser anyway, to hold the data.
+ # Doing this is useful when modifying and saving files, where we don't
+ # need to construct a parser.
+ if os.path.exists(fname):
+ locale_encoding = locale.getpreferredencoding(False)
+ try:
+ parser.read(fname, encoding=locale_encoding)
+ except UnicodeDecodeError:
+ # See https://github.com/pypa/pip/issues/4963
+ raise ConfigurationFileCouldNotBeLoaded(
+ reason=f"contains invalid {locale_encoding} characters",
+ fname=fname,
+ )
+ except configparser.Error as error:
+ # See https://github.com/pypa/pip/issues/4893
+ raise ConfigurationFileCouldNotBeLoaded(error=error)
+ return parser
+
+ def _load_environment_vars(self) -> None:
+ """Loads configuration from environment variables"""
+ self._config[kinds.ENV_VAR].update(
+ self._normalized_keys(":env:", self.get_environ_vars())
+ )
+
+ def _normalized_keys(
+ self, section: str, items: Iterable[Tuple[str, Any]]
+ ) -> Dict[str, Any]:
+ """Normalizes items to construct a dictionary with normalized keys.
+
+ This routine is where the names become keys and are made the same
+ regardless of source - configuration files or environment.
+ """
+ normalized = {}
+ for name, val in items:
+ key = section + "." + _normalize_name(name)
+ normalized[key] = val
+ return normalized
+
+ def get_environ_vars(self) -> Iterable[Tuple[str, str]]:
+ """Returns a generator with all environmental vars with prefix PIP_"""
+ for key, val in os.environ.items():
+ if key.startswith("PIP_"):
+ name = key[4:].lower()
+ if name not in ENV_NAMES_IGNORED:
+ yield name, val
+
+ # XXX: This is patched in the tests.
+ def iter_config_files(self) -> Iterable[Tuple[Kind, List[str]]]:
+ """Yields variant and configuration files associated with it.
+
+ This should be treated like items of a dictionary.
+ """
+ # SMELL: Move the conditions out of this function
+
+ # environment variables have the lowest priority
+ config_file = os.environ.get("PIP_CONFIG_FILE", None)
+ if config_file is not None:
+ yield kinds.ENV, [config_file]
+ else:
+ yield kinds.ENV, []
+
+ config_files = get_configuration_files()
+
+ # at the base we have any global configuration
+ yield kinds.GLOBAL, config_files[kinds.GLOBAL]
+
+ # per-user configuration next
+ should_load_user_config = not self.isolated and not (
+ config_file and os.path.exists(config_file)
+ )
+ if should_load_user_config:
+ # The legacy config file is overridden by the new config file
+ yield kinds.USER, config_files[kinds.USER]
+
+ # finally virtualenv configuration first trumping others
+ yield kinds.SITE, config_files[kinds.SITE]
+
+ def get_values_in_config(self, variant: Kind) -> Dict[str, Any]:
+ """Get values present in a config file"""
+ return self._config[variant]
+
+ def _get_parser_to_modify(self) -> Tuple[str, RawConfigParser]:
+ # Determine which parser to modify
+ assert self.load_only
+ parsers = self._parsers[self.load_only]
+ if not parsers:
+ # This should not happen if everything works correctly.
+ raise ConfigurationError(
+ "Fatal Internal error [id=2]. Please report as a bug."
+ )
+
+ # Use the highest priority parser.
+ return parsers[-1]
+
+ # XXX: This is patched in the tests.
+ def _mark_as_modified(self, fname: str, parser: RawConfigParser) -> None:
+ file_parser_tuple = (fname, parser)
+ if file_parser_tuple not in self._modified_parsers:
+ self._modified_parsers.append(file_parser_tuple)
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}({self._dictionary!r})"
diff --git a/third_party/python/pip/pip/_internal/distributions/__init__.py b/third_party/python/pip/pip/_internal/distributions/__init__.py
new file mode 100644
index 0000000000..9a89a838b9
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/distributions/__init__.py
@@ -0,0 +1,21 @@
+from pip._internal.distributions.base import AbstractDistribution
+from pip._internal.distributions.sdist import SourceDistribution
+from pip._internal.distributions.wheel import WheelDistribution
+from pip._internal.req.req_install import InstallRequirement
+
+
+def make_distribution_for_install_requirement(
+ install_req: InstallRequirement,
+) -> AbstractDistribution:
+ """Returns a Distribution for the given InstallRequirement"""
+ # Editable requirements will always be source distributions. They use the
+ # legacy logic until we create a modern standard for them.
+ if install_req.editable:
+ return SourceDistribution(install_req)
+
+ # If it's a wheel, it's a WheelDistribution
+ if install_req.is_wheel:
+ return WheelDistribution(install_req)
+
+ # Otherwise, a SourceDistribution
+ return SourceDistribution(install_req)
diff --git a/third_party/python/pip/pip/_internal/distributions/base.py b/third_party/python/pip/pip/_internal/distributions/base.py
new file mode 100644
index 0000000000..75ce2dc905
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/distributions/base.py
@@ -0,0 +1,39 @@
+import abc
+
+from pip._internal.index.package_finder import PackageFinder
+from pip._internal.metadata.base import BaseDistribution
+from pip._internal.req import InstallRequirement
+
+
+class AbstractDistribution(metaclass=abc.ABCMeta):
+ """A base class for handling installable artifacts.
+
+ The requirements for anything installable are as follows:
+
+ - we must be able to determine the requirement name
+ (or we can't correctly handle the non-upgrade case).
+
+ - for packages with setup requirements, we must also be able
+ to determine their requirements without installing additional
+ packages (for the same reason as run-time dependencies)
+
+ - we must be able to create a Distribution object exposing the
+ above metadata.
+ """
+
+ def __init__(self, req: InstallRequirement) -> None:
+ super().__init__()
+ self.req = req
+
+ @abc.abstractmethod
+ def get_metadata_distribution(self) -> BaseDistribution:
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def prepare_distribution_metadata(
+ self,
+ finder: PackageFinder,
+ build_isolation: bool,
+ check_build_deps: bool,
+ ) -> None:
+ raise NotImplementedError()
diff --git a/third_party/python/pip/pip/_internal/distributions/installed.py b/third_party/python/pip/pip/_internal/distributions/installed.py
new file mode 100644
index 0000000000..edb38aa1a6
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/distributions/installed.py
@@ -0,0 +1,23 @@
+from pip._internal.distributions.base import AbstractDistribution
+from pip._internal.index.package_finder import PackageFinder
+from pip._internal.metadata import BaseDistribution
+
+
+class InstalledDistribution(AbstractDistribution):
+ """Represents an installed package.
+
+ This does not need any preparation as the required information has already
+ been computed.
+ """
+
+ def get_metadata_distribution(self) -> BaseDistribution:
+ assert self.req.satisfied_by is not None, "not actually installed"
+ return self.req.satisfied_by
+
+ def prepare_distribution_metadata(
+ self,
+ finder: PackageFinder,
+ build_isolation: bool,
+ check_build_deps: bool,
+ ) -> None:
+ pass
diff --git a/third_party/python/pip/pip/_internal/distributions/sdist.py b/third_party/python/pip/pip/_internal/distributions/sdist.py
new file mode 100644
index 0000000000..4c25647930
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/distributions/sdist.py
@@ -0,0 +1,150 @@
+import logging
+from typing import Iterable, Set, Tuple
+
+from pip._internal.build_env import BuildEnvironment
+from pip._internal.distributions.base import AbstractDistribution
+from pip._internal.exceptions import InstallationError
+from pip._internal.index.package_finder import PackageFinder
+from pip._internal.metadata import BaseDistribution
+from pip._internal.utils.subprocess import runner_with_spinner_message
+
+logger = logging.getLogger(__name__)
+
+
+class SourceDistribution(AbstractDistribution):
+ """Represents a source distribution.
+
+ The preparation step for these needs metadata for the packages to be
+ generated, either using PEP 517 or using the legacy `setup.py egg_info`.
+ """
+
+ def get_metadata_distribution(self) -> BaseDistribution:
+ return self.req.get_dist()
+
+ def prepare_distribution_metadata(
+ self,
+ finder: PackageFinder,
+ build_isolation: bool,
+ check_build_deps: bool,
+ ) -> None:
+ # Load pyproject.toml, to determine whether PEP 517 is to be used
+ self.req.load_pyproject_toml()
+
+ # Set up the build isolation, if this requirement should be isolated
+ should_isolate = self.req.use_pep517 and build_isolation
+ if should_isolate:
+ # Setup an isolated environment and install the build backend static
+ # requirements in it.
+ self._prepare_build_backend(finder)
+ # Check that if the requirement is editable, it either supports PEP 660 or
+ # has a setup.py or a setup.cfg. This cannot be done earlier because we need
+ # to setup the build backend to verify it supports build_editable, nor can
+ # it be done later, because we want to avoid installing build requirements
+ # needlessly. Doing it here also works around setuptools generating
+ # UNKNOWN.egg-info when running get_requires_for_build_wheel on a directory
+ # without setup.py nor setup.cfg.
+ self.req.isolated_editable_sanity_check()
+ # Install the dynamic build requirements.
+ self._install_build_reqs(finder)
+ # Check if the current environment provides build dependencies
+ should_check_deps = self.req.use_pep517 and check_build_deps
+ if should_check_deps:
+ pyproject_requires = self.req.pyproject_requires
+ assert pyproject_requires is not None
+ conflicting, missing = self.req.build_env.check_requirements(
+ pyproject_requires
+ )
+ if conflicting:
+ self._raise_conflicts("the backend dependencies", conflicting)
+ if missing:
+ self._raise_missing_reqs(missing)
+ self.req.prepare_metadata()
+
+ def _prepare_build_backend(self, finder: PackageFinder) -> None:
+ # Isolate in a BuildEnvironment and install the build-time
+ # requirements.
+ pyproject_requires = self.req.pyproject_requires
+ assert pyproject_requires is not None
+
+ self.req.build_env = BuildEnvironment()
+ self.req.build_env.install_requirements(
+ finder, pyproject_requires, "overlay", kind="build dependencies"
+ )
+ conflicting, missing = self.req.build_env.check_requirements(
+ self.req.requirements_to_check
+ )
+ if conflicting:
+ self._raise_conflicts("PEP 517/518 supported requirements", conflicting)
+ if missing:
+ logger.warning(
+ "Missing build requirements in pyproject.toml for %s.",
+ self.req,
+ )
+ logger.warning(
+ "The project does not specify a build backend, and "
+ "pip cannot fall back to setuptools without %s.",
+ " and ".join(map(repr, sorted(missing))),
+ )
+
+ def _get_build_requires_wheel(self) -> Iterable[str]:
+ with self.req.build_env:
+ runner = runner_with_spinner_message("Getting requirements to build wheel")
+ backend = self.req.pep517_backend
+ assert backend is not None
+ with backend.subprocess_runner(runner):
+ return backend.get_requires_for_build_wheel()
+
+ def _get_build_requires_editable(self) -> Iterable[str]:
+ with self.req.build_env:
+ runner = runner_with_spinner_message(
+ "Getting requirements to build editable"
+ )
+ backend = self.req.pep517_backend
+ assert backend is not None
+ with backend.subprocess_runner(runner):
+ return backend.get_requires_for_build_editable()
+
+ def _install_build_reqs(self, finder: PackageFinder) -> None:
+ # Install any extra build dependencies that the backend requests.
+ # This must be done in a second pass, as the pyproject.toml
+ # dependencies must be installed before we can call the backend.
+ if (
+ self.req.editable
+ and self.req.permit_editable_wheels
+ and self.req.supports_pyproject_editable()
+ ):
+ build_reqs = self._get_build_requires_editable()
+ else:
+ build_reqs = self._get_build_requires_wheel()
+ conflicting, missing = self.req.build_env.check_requirements(build_reqs)
+ if conflicting:
+ self._raise_conflicts("the backend dependencies", conflicting)
+ self.req.build_env.install_requirements(
+ finder, missing, "normal", kind="backend dependencies"
+ )
+
+ def _raise_conflicts(
+ self, conflicting_with: str, conflicting_reqs: Set[Tuple[str, str]]
+ ) -> None:
+ format_string = (
+ "Some build dependencies for {requirement} "
+ "conflict with {conflicting_with}: {description}."
+ )
+ error_message = format_string.format(
+ requirement=self.req,
+ conflicting_with=conflicting_with,
+ description=", ".join(
+ f"{installed} is incompatible with {wanted}"
+ for installed, wanted in sorted(conflicting_reqs)
+ ),
+ )
+ raise InstallationError(error_message)
+
+ def _raise_missing_reqs(self, missing: Set[str]) -> None:
+ format_string = (
+ "Some build dependencies for {requirement} are missing: {missing}."
+ )
+ error_message = format_string.format(
+ requirement=self.req, missing=", ".join(map(repr, sorted(missing)))
+ )
+ raise InstallationError(error_message)
diff --git a/third_party/python/pip/pip/_internal/distributions/wheel.py b/third_party/python/pip/pip/_internal/distributions/wheel.py
new file mode 100644
index 0000000000..03aac775b5
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/distributions/wheel.py
@@ -0,0 +1,34 @@
+from pip._vendor.packaging.utils import canonicalize_name
+
+from pip._internal.distributions.base import AbstractDistribution
+from pip._internal.index.package_finder import PackageFinder
+from pip._internal.metadata import (
+ BaseDistribution,
+ FilesystemWheel,
+ get_wheel_distribution,
+)
+
+
+class WheelDistribution(AbstractDistribution):
+ """Represents a wheel distribution.
+
+ This does not need any preparation as wheels can be directly unpacked.
+ """
+
+ def get_metadata_distribution(self) -> BaseDistribution:
+ """Loads the metadata from the wheel file into memory and returns a
+ Distribution that uses it, not relying on the wheel file or
+ requirement.
+ """
+ assert self.req.local_file_path, "Set as part of preparation during download"
+ assert self.req.name, "Wheels are never unnamed"
+ wheel = FilesystemWheel(self.req.local_file_path)
+ return get_wheel_distribution(wheel, canonicalize_name(self.req.name))
+
+ def prepare_distribution_metadata(
+ self,
+ finder: PackageFinder,
+ build_isolation: bool,
+ check_build_deps: bool,
+ ) -> None:
+ pass
diff --git a/third_party/python/pip/pip/_internal/exceptions.py b/third_party/python/pip/pip/_internal/exceptions.py
new file mode 100644
index 0000000000..d4527295da
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/exceptions.py
@@ -0,0 +1,747 @@
+"""Exceptions used throughout package.
+
+This module MUST NOT try to import from anything within `pip._internal` to
+operate. This is expected to be importable from any/all files within the
+subpackage and, thus, should not depend on them.
+"""
+
+import configparser
+import contextlib
+import locale
+import logging
+import pathlib
+import re
+import sys
+from itertools import chain, groupby, repeat
+from typing import TYPE_CHECKING, Dict, Iterator, List, Optional, Union
+
+from pip._vendor.requests.models import Request, Response
+from pip._vendor.rich.console import Console, ConsoleOptions, RenderResult
+from pip._vendor.rich.markup import escape
+from pip._vendor.rich.text import Text
+
+if TYPE_CHECKING:
+ from hashlib import _Hash
+ from typing import Literal
+
+ from pip._internal.metadata import BaseDistribution
+ from pip._internal.req.req_install import InstallRequirement
+
+logger = logging.getLogger(__name__)
+
+
+#
+# Scaffolding
+#
+def _is_kebab_case(s: str) -> bool:
+ return re.match(r"^[a-z]+(-[a-z]+)*$", s) is not None
+
+
+def _prefix_with_indent(
+ s: Union[Text, str],
+ console: Console,
+ *,
+ prefix: str,
+ indent: str,
+) -> Text:
+ if isinstance(s, Text):
+ text = s
+ else:
+ text = console.render_str(s)
+
+ return console.render_str(prefix, overflow="ignore") + console.render_str(
+ f"\n{indent}", overflow="ignore"
+ ).join(text.split(allow_blank=True))
+
+
+class PipError(Exception):
+ """The base pip error."""
+
+
+class DiagnosticPipError(PipError):
+ """An error, that presents diagnostic information to the user.
+
+ This contains a bunch of logic, to enable pretty presentation of our error
+ messages. Each error gets a unique reference. Each error can also include
+ additional context, a hint and/or a note -- which are presented with the
+ main error message in a consistent style.
+
+ This is adapted from the error output styling in `sphinx-theme-builder`.
+ """
+
+ reference: str
+
+ def __init__(
+ self,
+ *,
+ kind: 'Literal["error", "warning"]' = "error",
+ reference: Optional[str] = None,
+ message: Union[str, Text],
+ context: Optional[Union[str, Text]],
+ hint_stmt: Optional[Union[str, Text]],
+ note_stmt: Optional[Union[str, Text]] = None,
+ link: Optional[str] = None,
+ ) -> None:
+ # Ensure a proper reference is provided.
+ if reference is None:
+ assert hasattr(self, "reference"), "error reference not provided!"
+ reference = self.reference
+ assert _is_kebab_case(reference), "error reference must be kebab-case!"
+
+ self.kind = kind
+ self.reference = reference
+
+ self.message = message
+ self.context = context
+
+ self.note_stmt = note_stmt
+ self.hint_stmt = hint_stmt
+
+ self.link = link
+
+ super().__init__(f"<{self.__class__.__name__}: {self.reference}>")
+
+ def __repr__(self) -> str:
+ return (
+ f"<{self.__class__.__name__}("
+ f"reference={self.reference!r}, "
+ f"message={self.message!r}, "
+ f"context={self.context!r}, "
+ f"note_stmt={self.note_stmt!r}, "
+ f"hint_stmt={self.hint_stmt!r}"
+ ")>"
+ )
+
+ def __rich_console__(
+ self,
+ console: Console,
+ options: ConsoleOptions,
+ ) -> RenderResult:
+ colour = "red" if self.kind == "error" else "yellow"
+
+ yield f"[{colour} bold]{self.kind}[/]: [bold]{self.reference}[/]"
+ yield ""
+
+ if not options.ascii_only:
+ # Present the main message, with relevant context indented.
+ if self.context is not None:
+ yield _prefix_with_indent(
+ self.message,
+ console,
+ prefix=f"[{colour}]×[/] ",
+ indent=f"[{colour}]│[/] ",
+ )
+ yield _prefix_with_indent(
+ self.context,
+ console,
+ prefix=f"[{colour}]╰─>[/] ",
+ indent=f"[{colour}] [/] ",
+ )
+ else:
+ yield _prefix_with_indent(
+ self.message,
+ console,
+ prefix="[red]×[/] ",
+ indent=" ",
+ )
+ else:
+ yield self.message
+ if self.context is not None:
+ yield ""
+ yield self.context
+
+ if self.note_stmt is not None or self.hint_stmt is not None:
+ yield ""
+
+ if self.note_stmt is not None:
+ yield _prefix_with_indent(
+ self.note_stmt,
+ console,
+ prefix="[magenta bold]note[/]: ",
+ indent=" ",
+ )
+ if self.hint_stmt is not None:
+ yield _prefix_with_indent(
+ self.hint_stmt,
+ console,
+ prefix="[cyan bold]hint[/]: ",
+ indent=" ",
+ )
+
+ if self.link is not None:
+ yield ""
+ yield f"Link: {self.link}"
+
+
+#
+# Actual Errors
+#
+class ConfigurationError(PipError):
+ """General exception in configuration"""
+
+
+class InstallationError(PipError):
+ """General exception during installation"""
+
+
+class UninstallationError(PipError):
+ """General exception during uninstallation"""
+
+
+class MissingPyProjectBuildRequires(DiagnosticPipError):
+ """Raised when pyproject.toml has `build-system`, but no `build-system.requires`."""
+
+ reference = "missing-pyproject-build-system-requires"
+
+ def __init__(self, *, package: str) -> None:
+ super().__init__(
+ message=f"Can not process {escape(package)}",
+ context=Text(
+ "This package has an invalid pyproject.toml file.\n"
+ "The [build-system] table is missing the mandatory `requires` key."
+ ),
+ note_stmt="This is an issue with the package mentioned above, not pip.",
+ hint_stmt=Text("See PEP 518 for the detailed specification."),
+ )
+
+
+class InvalidPyProjectBuildRequires(DiagnosticPipError):
+ """Raised when pyproject.toml an invalid `build-system.requires`."""
+
+ reference = "invalid-pyproject-build-system-requires"
+
+ def __init__(self, *, package: str, reason: str) -> None:
+ super().__init__(
+ message=f"Can not process {escape(package)}",
+ context=Text(
+ "This package has an invalid `build-system.requires` key in "
+ f"pyproject.toml.\n{reason}"
+ ),
+ note_stmt="This is an issue with the package mentioned above, not pip.",
+ hint_stmt=Text("See PEP 518 for the detailed specification."),
+ )
+
+
+class NoneMetadataError(PipError):
+ """Raised when accessing a Distribution's "METADATA" or "PKG-INFO".
+
+ This signifies an inconsistency, when the Distribution claims to have
+ the metadata file (if not, raise ``FileNotFoundError`` instead), but is
+ not actually able to produce its content. This may be due to permission
+ errors.
+ """
+
+ def __init__(
+ self,
+ dist: "BaseDistribution",
+ metadata_name: str,
+ ) -> None:
+ """
+ :param dist: A Distribution object.
+ :param metadata_name: The name of the metadata being accessed
+ (can be "METADATA" or "PKG-INFO").
+ """
+ self.dist = dist
+ self.metadata_name = metadata_name
+
+ def __str__(self) -> str:
+ # Use `dist` in the error message because its stringification
+ # includes more information, like the version and location.
+ return "None {} metadata found for distribution: {}".format(
+ self.metadata_name,
+ self.dist,
+ )
+
+
+class UserInstallationInvalid(InstallationError):
+ """A --user install is requested on an environment without user site."""
+
+ def __str__(self) -> str:
+ return "User base directory is not specified"
+
+
+class InvalidSchemeCombination(InstallationError):
+ def __str__(self) -> str:
+ before = ", ".join(str(a) for a in self.args[:-1])
+ return f"Cannot set {before} and {self.args[-1]} together"
+
+
+class DistributionNotFound(InstallationError):
+ """Raised when a distribution cannot be found to satisfy a requirement"""
+
+
+class RequirementsFileParseError(InstallationError):
+ """Raised when a general error occurs parsing a requirements file line."""
+
+
+class BestVersionAlreadyInstalled(PipError):
+ """Raised when the most up-to-date version of a package is already
+ installed."""
+
+
+class BadCommand(PipError):
+ """Raised when virtualenv or a command is not found"""
+
+
+class CommandError(PipError):
+ """Raised when there is an error in command-line arguments"""
+
+
+class PreviousBuildDirError(PipError):
+ """Raised when there's a previous conflicting build directory"""
+
+
+class NetworkConnectionError(PipError):
+ """HTTP connection error"""
+
+ def __init__(
+ self,
+ error_msg: str,
+ response: Optional[Response] = None,
+ request: Optional[Request] = None,
+ ) -> None:
+ """
+ Initialize NetworkConnectionError with `request` and `response`
+ objects.
+ """
+ self.response = response
+ self.request = request
+ self.error_msg = error_msg
+ if (
+ self.response is not None
+ and not self.request
+ and hasattr(response, "request")
+ ):
+ self.request = self.response.request
+ super().__init__(error_msg, response, request)
+
+ def __str__(self) -> str:
+ return str(self.error_msg)
+
+
+class InvalidWheelFilename(InstallationError):
+ """Invalid wheel filename."""
+
+
+class UnsupportedWheel(InstallationError):
+ """Unsupported wheel."""
+
+
+class InvalidWheel(InstallationError):
+ """Invalid (e.g. corrupt) wheel."""
+
+ def __init__(self, location: str, name: str):
+ self.location = location
+ self.name = name
+
+ def __str__(self) -> str:
+ return f"Wheel '{self.name}' located at {self.location} is invalid."
+
+
+class MetadataInconsistent(InstallationError):
+ """Built metadata contains inconsistent information.
+
+ This is raised when the metadata contains values (e.g. name and version)
+ that do not match the information previously obtained from sdist filename,
+ user-supplied ``#egg=`` value, or an install requirement name.
+ """
+
+ def __init__(
+ self, ireq: "InstallRequirement", field: str, f_val: str, m_val: str
+ ) -> None:
+ self.ireq = ireq
+ self.field = field
+ self.f_val = f_val
+ self.m_val = m_val
+
+ def __str__(self) -> str:
+ return (
+ f"Requested {self.ireq} has inconsistent {self.field}: "
+ f"expected {self.f_val!r}, but metadata has {self.m_val!r}"
+ )
+
+
+class LegacyInstallFailure(DiagnosticPipError):
+ """Error occurred while executing `setup.py install`"""
+
+ reference = "legacy-install-failure"
+
+ def __init__(self, package_details: str) -> None:
+ super().__init__(
+ message="Encountered error while trying to install package.",
+ context=package_details,
+ hint_stmt="See above for output from the failure.",
+ note_stmt="This is an issue with the package mentioned above, not pip.",
+ )
+
+
+class InstallationSubprocessError(DiagnosticPipError, InstallationError):
+ """A subprocess call failed."""
+
+ reference = "subprocess-exited-with-error"
+
+ def __init__(
+ self,
+ *,
+ command_description: str,
+ exit_code: int,
+ output_lines: Optional[List[str]],
+ ) -> None:
+ if output_lines is None:
+ output_prompt = Text("See above for output.")
+ else:
+ output_prompt = (
+ Text.from_markup(f"[red][{len(output_lines)} lines of output][/]\n")
+ + Text("".join(output_lines))
+ + Text.from_markup(R"[red]\[end of output][/]")
+ )
+
+ super().__init__(
+ message=(
+ f"[green]{escape(command_description)}[/] did not run successfully.\n"
+ f"exit code: {exit_code}"
+ ),
+ context=output_prompt,
+ hint_stmt=None,
+ note_stmt=(
+ "This error originates from a subprocess, and is likely not a "
+ "problem with pip."
+ ),
+ )
+
+ self.command_description = command_description
+ self.exit_code = exit_code
+
+ def __str__(self) -> str:
+ return f"{self.command_description} exited with {self.exit_code}"
+
+
+class MetadataGenerationFailed(InstallationSubprocessError, InstallationError):
+ reference = "metadata-generation-failed"
+
+ def __init__(
+ self,
+ *,
+ package_details: str,
+ ) -> None:
+ super(InstallationSubprocessError, self).__init__(
+ message="Encountered error while generating package metadata.",
+ context=escape(package_details),
+ hint_stmt="See above for details.",
+ note_stmt="This is an issue with the package mentioned above, not pip.",
+ )
+
+ def __str__(self) -> str:
+ return "metadata generation failed"
+
+
+class HashErrors(InstallationError):
+ """Multiple HashError instances rolled into one for reporting"""
+
+ def __init__(self) -> None:
+ self.errors: List["HashError"] = []
+
+ def append(self, error: "HashError") -> None:
+ self.errors.append(error)
+
+ def __str__(self) -> str:
+ lines = []
+ self.errors.sort(key=lambda e: e.order)
+ for cls, errors_of_cls in groupby(self.errors, lambda e: e.__class__):
+ lines.append(cls.head)
+ lines.extend(e.body() for e in errors_of_cls)
+ if lines:
+ return "\n".join(lines)
+ return ""
+
+ def __bool__(self) -> bool:
+ return bool(self.errors)
+
+
+class HashError(InstallationError):
+ """
+ A failure to verify a package against known-good hashes
+
+ :cvar order: An int sorting hash exception classes by difficulty of
+ recovery (lower being harder), so the user doesn't bother fretting
+ about unpinned packages when he has deeper issues, like VCS
+ dependencies, to deal with. Also keeps error reports in a
+ deterministic order.
+ :cvar head: A section heading for display above potentially many
+ exceptions of this kind
+ :ivar req: The InstallRequirement that triggered this error. This is
+ pasted on after the exception is instantiated, because it's not
+ typically available earlier.
+
+ """
+
+ req: Optional["InstallRequirement"] = None
+ head = ""
+ order: int = -1
+
+ def body(self) -> str:
+ """Return a summary of me for display under the heading.
+
+ This default implementation simply prints a description of the
+ triggering requirement.
+
+ :param req: The InstallRequirement that provoked this error, with
+ its link already populated by the resolver's _populate_link().
+
+ """
+ return f" {self._requirement_name()}"
+
+ def __str__(self) -> str:
+ return f"{self.head}\n{self.body()}"
+
+ def _requirement_name(self) -> str:
+ """Return a description of the requirement that triggered me.
+
+ This default implementation returns long description of the req, with
+ line numbers
+
+ """
+ return str(self.req) if self.req else "unknown package"
+
+
+class VcsHashUnsupported(HashError):
+ """A hash was provided for a version-control-system-based requirement, but
+ we don't have a method for hashing those."""
+
+ order = 0
+ head = (
+ "Can't verify hashes for these requirements because we don't "
+ "have a way to hash version control repositories:"
+ )
+
+
+class DirectoryUrlHashUnsupported(HashError):
+ """A hash was provided for a version-control-system-based requirement, but
+ we don't have a method for hashing those."""
+
+ order = 1
+ head = (
+ "Can't verify hashes for these file:// requirements because they "
+ "point to directories:"
+ )
+
+
+class HashMissing(HashError):
+ """A hash was needed for a requirement but is absent."""
+
+ order = 2
+ head = (
+ "Hashes are required in --require-hashes mode, but they are "
+ "missing from some requirements. Here is a list of those "
+ "requirements along with the hashes their downloaded archives "
+ "actually had. Add lines like these to your requirements files to "
+ "prevent tampering. (If you did not enable --require-hashes "
+ "manually, note that it turns on automatically when any package "
+ "has a hash.)"
+ )
+
+ def __init__(self, gotten_hash: str) -> None:
+ """
+ :param gotten_hash: The hash of the (possibly malicious) archive we
+ just downloaded
+ """
+ self.gotten_hash = gotten_hash
+
+ def body(self) -> str:
+ # Dodge circular import.
+ from pip._internal.utils.hashes import FAVORITE_HASH
+
+ package = None
+ if self.req:
+ # In the case of URL-based requirements, display the original URL
+ # seen in the requirements file rather than the package name,
+ # so the output can be directly copied into the requirements file.
+ package = (
+ self.req.original_link
+ if self.req.original_link
+ # In case someone feeds something downright stupid
+ # to InstallRequirement's constructor.
+ else getattr(self.req, "req", None)
+ )
+ return " {} --hash={}:{}".format(
+ package or "unknown package", FAVORITE_HASH, self.gotten_hash
+ )
+
+
+class HashUnpinned(HashError):
+ """A requirement had a hash specified but was not pinned to a specific
+ version."""
+
+ order = 3
+ head = (
+ "In --require-hashes mode, all requirements must have their "
+ "versions pinned with ==. These do not:"
+ )
+
+
+class HashMismatch(HashError):
+ """
+ Distribution file hash values don't match.
+
+ :ivar package_name: The name of the package that triggered the hash
+ mismatch. Feel free to write to this after the exception is raise to
+ improve its error message.
+
+ """
+
+ order = 4
+ head = (
+ "THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS "
+ "FILE. If you have updated the package versions, please update "
+ "the hashes. Otherwise, examine the package contents carefully; "
+ "someone may have tampered with them."
+ )
+
+ def __init__(self, allowed: Dict[str, List[str]], gots: Dict[str, "_Hash"]) -> None:
+ """
+ :param allowed: A dict of algorithm names pointing to lists of allowed
+ hex digests
+ :param gots: A dict of algorithm names pointing to hashes we
+ actually got from the files under suspicion
+ """
+ self.allowed = allowed
+ self.gots = gots
+
+ def body(self) -> str:
+ return " {}:\n{}".format(self._requirement_name(), self._hash_comparison())
+
+ def _hash_comparison(self) -> str:
+ """
+ Return a comparison of actual and expected hash values.
+
+ Example::
+
+ Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde
+ or 123451234512345123451234512345123451234512345
+ Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef
+
+ """
+
+ def hash_then_or(hash_name: str) -> "chain[str]":
+ # For now, all the decent hashes have 6-char names, so we can get
+ # away with hard-coding space literals.
+ return chain([hash_name], repeat(" or"))
+
+ lines: List[str] = []
+ for hash_name, expecteds in self.allowed.items():
+ prefix = hash_then_or(hash_name)
+ lines.extend(
+ (" Expected {} {}".format(next(prefix), e)) for e in expecteds
+ )
+ lines.append(
+ " Got {}\n".format(self.gots[hash_name].hexdigest())
+ )
+ return "\n".join(lines)
+
+
+class UnsupportedPythonVersion(InstallationError):
+ """Unsupported python version according to Requires-Python package
+ metadata."""
+
+
+class ConfigurationFileCouldNotBeLoaded(ConfigurationError):
+ """When there are errors while loading a configuration file"""
+
+ def __init__(
+ self,
+ reason: str = "could not be loaded",
+ fname: Optional[str] = None,
+ error: Optional[configparser.Error] = None,
+ ) -> None:
+ super().__init__(error)
+ self.reason = reason
+ self.fname = fname
+ self.error = error
+
+ def __str__(self) -> str:
+ if self.fname is not None:
+ message_part = f" in {self.fname}."
+ else:
+ assert self.error is not None
+ message_part = f".\n{self.error}\n"
+ return f"Configuration file {self.reason}{message_part}"
+
+
+_DEFAULT_EXTERNALLY_MANAGED_ERROR = f"""\
+The Python environment under {sys.prefix} is managed externally, and may not be
+manipulated by the user. Please use specific tooling from the distributor of
+the Python installation to interact with this environment instead.
+"""
+
+
+class ExternallyManagedEnvironment(DiagnosticPipError):
+ """The current environment is externally managed.
+
+ This is raised when the current environment is externally managed, as
+ defined by `PEP 668`_. The ``EXTERNALLY-MANAGED`` configuration is checked
+ and displayed when the error is bubbled up to the user.
+
+ :param error: The error message read from ``EXTERNALLY-MANAGED``.
+ """
+
+ reference = "externally-managed-environment"
+
+ def __init__(self, error: Optional[str]) -> None:
+ if error is None:
+ context = Text(_DEFAULT_EXTERNALLY_MANAGED_ERROR)
+ else:
+ context = Text(error)
+ super().__init__(
+ message="This environment is externally managed",
+ context=context,
+ note_stmt=(
+ "If you believe this is a mistake, please contact your "
+ "Python installation or OS distribution provider. "
+ "You can override this, at the risk of breaking your Python "
+ "installation or OS, by passing --break-system-packages."
+ ),
+ hint_stmt=Text("See PEP 668 for the detailed specification."),
+ )
+
+ @staticmethod
+ def _iter_externally_managed_error_keys() -> Iterator[str]:
+ # LC_MESSAGES is in POSIX, but not the C standard. The most common
+ # platform that does not implement this category is Windows, where
+ # using other categories for console message localization is equally
+ # unreliable, so we fall back to the locale-less vendor message. This
+ # can always be re-evaluated when a vendor proposes a new alternative.
+ try:
+ category = locale.LC_MESSAGES
+ except AttributeError:
+ lang: Optional[str] = None
+ else:
+ lang, _ = locale.getlocale(category)
+ if lang is not None:
+ yield f"Error-{lang}"
+ for sep in ("-", "_"):
+ before, found, _ = lang.partition(sep)
+ if not found:
+ continue
+ yield f"Error-{before}"
+ yield "Error"
+
+ @classmethod
+ def from_config(
+ cls,
+ config: Union[pathlib.Path, str],
+ ) -> "ExternallyManagedEnvironment":
+ parser = configparser.ConfigParser(interpolation=None)
+ try:
+ parser.read(config, encoding="utf-8")
+ section = parser["externally-managed"]
+ for key in cls._iter_externally_managed_error_keys():
+ with contextlib.suppress(KeyError):
+ return cls(section[key])
+ except KeyError:
+ pass
+ except (OSError, UnicodeDecodeError, configparser.ParsingError):
+ from pip._internal.utils._log import VERBOSE
+
+ exc_info = logger.isEnabledFor(VERBOSE)
+ logger.warning("Failed to read %s", config, exc_info=exc_info)
+ return cls(None)
diff --git a/third_party/python/pip/pip/_internal/index/__init__.py b/third_party/python/pip/pip/_internal/index/__init__.py
new file mode 100644
index 0000000000..7a17b7b3b6
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/index/__init__.py
@@ -0,0 +1,2 @@
+"""Index interaction code
+"""
diff --git a/third_party/python/pip/pip/_internal/index/collector.py b/third_party/python/pip/pip/_internal/index/collector.py
new file mode 100644
index 0000000000..b3e293ea3a
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/index/collector.py
@@ -0,0 +1,505 @@
+"""
+The main purpose of this module is to expose LinkCollector.collect_sources().
+"""
+
+import collections
+import email.message
+import functools
+import itertools
+import json
+import logging
+import os
+import urllib.parse
+import urllib.request
+from html.parser import HTMLParser
+from optparse import Values
+from typing import (
+ TYPE_CHECKING,
+ Callable,
+ Dict,
+ Iterable,
+ List,
+ MutableMapping,
+ NamedTuple,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+)
+
+from pip._vendor import requests
+from pip._vendor.requests import Response
+from pip._vendor.requests.exceptions import RetryError, SSLError
+
+from pip._internal.exceptions import NetworkConnectionError
+from pip._internal.models.link import Link
+from pip._internal.models.search_scope import SearchScope
+from pip._internal.network.session import PipSession
+from pip._internal.network.utils import raise_for_status
+from pip._internal.utils.filetypes import is_archive_file
+from pip._internal.utils.misc import redact_auth_from_url
+from pip._internal.vcs import vcs
+
+from .sources import CandidatesFromPage, LinkSource, build_source
+
+if TYPE_CHECKING:
+ from typing import Protocol
+else:
+ Protocol = object
+
+logger = logging.getLogger(__name__)
+
+ResponseHeaders = MutableMapping[str, str]
+
+
+def _match_vcs_scheme(url: str) -> Optional[str]:
+ """Look for VCS schemes in the URL.
+
+ Returns the matched VCS scheme, or None if there's no match.
+ """
+ for scheme in vcs.schemes:
+ if url.lower().startswith(scheme) and url[len(scheme)] in "+:":
+ return scheme
+ return None
+
+
+class _NotAPIContent(Exception):
+ def __init__(self, content_type: str, request_desc: str) -> None:
+ super().__init__(content_type, request_desc)
+ self.content_type = content_type
+ self.request_desc = request_desc
+
+
+def _ensure_api_header(response: Response) -> None:
+ """
+ Check the Content-Type header to ensure the response contains a Simple
+ API Response.
+
+ Raises `_NotAPIContent` if the content type is not a valid content-type.
+ """
+ content_type = response.headers.get("Content-Type", "Unknown")
+
+ content_type_l = content_type.lower()
+ if content_type_l.startswith(
+ (
+ "text/html",
+ "application/vnd.pypi.simple.v1+html",
+ "application/vnd.pypi.simple.v1+json",
+ )
+ ):
+ return
+
+ raise _NotAPIContent(content_type, response.request.method)
+
+
+class _NotHTTP(Exception):
+ pass
+
+
+def _ensure_api_response(url: str, session: PipSession) -> None:
+ """
+ Send a HEAD request to the URL, and ensure the response contains a simple
+ API Response.
+
+ Raises `_NotHTTP` if the URL is not available for a HEAD request, or
+ `_NotAPIContent` if the content type is not a valid content type.
+ """
+ scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
+ if scheme not in {"http", "https"}:
+ raise _NotHTTP()
+
+ resp = session.head(url, allow_redirects=True)
+ raise_for_status(resp)
+
+ _ensure_api_header(resp)
+
+
+def _get_simple_response(url: str, session: PipSession) -> Response:
+ """Access an Simple API response with GET, and return the response.
+
+ This consists of three parts:
+
+ 1. If the URL looks suspiciously like an archive, send a HEAD first to
+ check the Content-Type is HTML or Simple API, to avoid downloading a
+ large file. Raise `_NotHTTP` if the content type cannot be determined, or
+ `_NotAPIContent` if it is not HTML or a Simple API.
+ 2. Actually perform the request. Raise HTTP exceptions on network failures.
+ 3. Check the Content-Type header to make sure we got a Simple API response,
+ and raise `_NotAPIContent` otherwise.
+ """
+ if is_archive_file(Link(url).filename):
+ _ensure_api_response(url, session=session)
+
+ logger.debug("Getting page %s", redact_auth_from_url(url))
+
+ resp = session.get(
+ url,
+ headers={
+ "Accept": ", ".join(
+ [
+ "application/vnd.pypi.simple.v1+json",
+ "application/vnd.pypi.simple.v1+html; q=0.1",
+ "text/html; q=0.01",
+ ]
+ ),
+ # We don't want to blindly returned cached data for
+ # /simple/, because authors generally expecting that
+ # twine upload && pip install will function, but if
+ # they've done a pip install in the last ~10 minutes
+ # it won't. Thus by setting this to zero we will not
+ # blindly use any cached data, however the benefit of
+ # using max-age=0 instead of no-cache, is that we will
+ # still support conditional requests, so we will still
+ # minimize traffic sent in cases where the page hasn't
+ # changed at all, we will just always incur the round
+ # trip for the conditional GET now instead of only
+ # once per 10 minutes.
+ # For more information, please see pypa/pip#5670.
+ "Cache-Control": "max-age=0",
+ },
+ )
+ raise_for_status(resp)
+
+ # The check for archives above only works if the url ends with
+ # something that looks like an archive. However that is not a
+ # requirement of an url. Unless we issue a HEAD request on every
+ # url we cannot know ahead of time for sure if something is a
+ # Simple API response or not. However we can check after we've
+ # downloaded it.
+ _ensure_api_header(resp)
+
+ logger.debug(
+ "Fetched page %s as %s",
+ redact_auth_from_url(url),
+ resp.headers.get("Content-Type", "Unknown"),
+ )
+
+ return resp
+
+
+def _get_encoding_from_headers(headers: ResponseHeaders) -> Optional[str]:
+ """Determine if we have any encoding information in our headers."""
+ if headers and "Content-Type" in headers:
+ m = email.message.Message()
+ m["content-type"] = headers["Content-Type"]
+ charset = m.get_param("charset")
+ if charset:
+ return str(charset)
+ return None
+
+
+class CacheablePageContent:
+ def __init__(self, page: "IndexContent") -> None:
+ assert page.cache_link_parsing
+ self.page = page
+
+ def __eq__(self, other: object) -> bool:
+ return isinstance(other, type(self)) and self.page.url == other.page.url
+
+ def __hash__(self) -> int:
+ return hash(self.page.url)
+
+
+class ParseLinks(Protocol):
+ def __call__(self, page: "IndexContent") -> Iterable[Link]:
+ ...
+
+
+def with_cached_index_content(fn: ParseLinks) -> ParseLinks:
+ """
+ Given a function that parses an Iterable[Link] from an IndexContent, cache the
+ function's result (keyed by CacheablePageContent), unless the IndexContent
+ `page` has `page.cache_link_parsing == False`.
+ """
+
+ @functools.lru_cache(maxsize=None)
+ def wrapper(cacheable_page: CacheablePageContent) -> List[Link]:
+ return list(fn(cacheable_page.page))
+
+ @functools.wraps(fn)
+ def wrapper_wrapper(page: "IndexContent") -> List[Link]:
+ if page.cache_link_parsing:
+ return wrapper(CacheablePageContent(page))
+ return list(fn(page))
+
+ return wrapper_wrapper
+
+
+@with_cached_index_content
+def parse_links(page: "IndexContent") -> Iterable[Link]:
+ """
+ Parse a Simple API's Index Content, and yield its anchor elements as Link objects.
+ """
+
+ content_type_l = page.content_type.lower()
+ if content_type_l.startswith("application/vnd.pypi.simple.v1+json"):
+ data = json.loads(page.content)
+ for file in data.get("files", []):
+ link = Link.from_json(file, page.url)
+ if link is None:
+ continue
+ yield link
+ return
+
+ parser = HTMLLinkParser(page.url)
+ encoding = page.encoding or "utf-8"
+ parser.feed(page.content.decode(encoding))
+
+ url = page.url
+ base_url = parser.base_url or url
+ for anchor in parser.anchors:
+ link = Link.from_element(anchor, page_url=url, base_url=base_url)
+ if link is None:
+ continue
+ yield link
+
+
+class IndexContent:
+ """Represents one response (or page), along with its URL"""
+
+ def __init__(
+ self,
+ content: bytes,
+ content_type: str,
+ encoding: Optional[str],
+ url: str,
+ cache_link_parsing: bool = True,
+ ) -> None:
+ """
+ :param encoding: the encoding to decode the given content.
+ :param url: the URL from which the HTML was downloaded.
+ :param cache_link_parsing: whether links parsed from this page's url
+ should be cached. PyPI index urls should
+ have this set to False, for example.
+ """
+ self.content = content
+ self.content_type = content_type
+ self.encoding = encoding
+ self.url = url
+ self.cache_link_parsing = cache_link_parsing
+
+ def __str__(self) -> str:
+ return redact_auth_from_url(self.url)
+
+
+class HTMLLinkParser(HTMLParser):
+ """
+ HTMLParser that keeps the first base HREF and a list of all anchor
+ elements' attributes.
+ """
+
+ def __init__(self, url: str) -> None:
+ super().__init__(convert_charrefs=True)
+
+ self.url: str = url
+ self.base_url: Optional[str] = None
+ self.anchors: List[Dict[str, Optional[str]]] = []
+
+ def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]) -> None:
+ if tag == "base" and self.base_url is None:
+ href = self.get_href(attrs)
+ if href is not None:
+ self.base_url = href
+ elif tag == "a":
+ self.anchors.append(dict(attrs))
+
+ def get_href(self, attrs: List[Tuple[str, Optional[str]]]) -> Optional[str]:
+ for name, value in attrs:
+ if name == "href":
+ return value
+ return None
+
+
+def _handle_get_simple_fail(
+ link: Link,
+ reason: Union[str, Exception],
+ meth: Optional[Callable[..., None]] = None,
+) -> None:
+ if meth is None:
+ meth = logger.debug
+ meth("Could not fetch URL %s: %s - skipping", link, reason)
+
+
+def _make_index_content(
+ response: Response, cache_link_parsing: bool = True
+) -> IndexContent:
+ encoding = _get_encoding_from_headers(response.headers)
+ return IndexContent(
+ response.content,
+ response.headers["Content-Type"],
+ encoding=encoding,
+ url=response.url,
+ cache_link_parsing=cache_link_parsing,
+ )
+
+
+def _get_index_content(link: Link, *, session: PipSession) -> Optional["IndexContent"]:
+ url = link.url.split("#", 1)[0]
+
+ # Check for VCS schemes that do not support lookup as web pages.
+ vcs_scheme = _match_vcs_scheme(url)
+ if vcs_scheme:
+ logger.warning(
+ "Cannot look at %s URL %s because it does not support lookup as web pages.",
+ vcs_scheme,
+ link,
+ )
+ return None
+
+ # Tack index.html onto file:// URLs that point to directories
+ scheme, _, path, _, _, _ = urllib.parse.urlparse(url)
+ if scheme == "file" and os.path.isdir(urllib.request.url2pathname(path)):
+ # add trailing slash if not present so urljoin doesn't trim
+ # final segment
+ if not url.endswith("/"):
+ url += "/"
+ # TODO: In the future, it would be nice if pip supported PEP 691
+ # style responses in the file:// URLs, however there's no
+ # standard file extension for application/vnd.pypi.simple.v1+json
+ # so we'll need to come up with something on our own.
+ url = urllib.parse.urljoin(url, "index.html")
+ logger.debug(" file: URL is directory, getting %s", url)
+
+ try:
+ resp = _get_simple_response(url, session=session)
+ except _NotHTTP:
+ logger.warning(
+ "Skipping page %s because it looks like an archive, and cannot "
+ "be checked by a HTTP HEAD request.",
+ link,
+ )
+ except _NotAPIContent as exc:
+ logger.warning(
+ "Skipping page %s because the %s request got Content-Type: %s. "
+ "The only supported Content-Types are application/vnd.pypi.simple.v1+json, "
+ "application/vnd.pypi.simple.v1+html, and text/html",
+ link,
+ exc.request_desc,
+ exc.content_type,
+ )
+ except NetworkConnectionError as exc:
+ _handle_get_simple_fail(link, exc)
+ except RetryError as exc:
+ _handle_get_simple_fail(link, exc)
+ except SSLError as exc:
+ reason = "There was a problem confirming the ssl certificate: "
+ reason += str(exc)
+ _handle_get_simple_fail(link, reason, meth=logger.info)
+ except requests.ConnectionError as exc:
+ _handle_get_simple_fail(link, f"connection error: {exc}")
+ except requests.Timeout:
+ _handle_get_simple_fail(link, "timed out")
+ else:
+ return _make_index_content(resp, cache_link_parsing=link.cache_link_parsing)
+ return None
+
+
+class CollectedSources(NamedTuple):
+ find_links: Sequence[Optional[LinkSource]]
+ index_urls: Sequence[Optional[LinkSource]]
+
+
+class LinkCollector:
+
+ """
+ Responsible for collecting Link objects from all configured locations,
+ making network requests as needed.
+
+ The class's main method is its collect_sources() method.
+ """
+
+ def __init__(
+ self,
+ session: PipSession,
+ search_scope: SearchScope,
+ ) -> None:
+ self.search_scope = search_scope
+ self.session = session
+
+ @classmethod
+ def create(
+ cls,
+ session: PipSession,
+ options: Values,
+ suppress_no_index: bool = False,
+ ) -> "LinkCollector":
+ """
+ :param session: The Session to use to make requests.
+ :param suppress_no_index: Whether to ignore the --no-index option
+ when constructing the SearchScope object.
+ """
+ index_urls = [options.index_url] + options.extra_index_urls
+ if options.no_index and not suppress_no_index:
+ logger.debug(
+ "Ignoring indexes: %s",
+ ",".join(redact_auth_from_url(url) for url in index_urls),
+ )
+ index_urls = []
+
+ # Make sure find_links is a list before passing to create().
+ find_links = options.find_links or []
+
+ search_scope = SearchScope.create(
+ find_links=find_links,
+ index_urls=index_urls,
+ no_index=options.no_index,
+ )
+ link_collector = LinkCollector(
+ session=session,
+ search_scope=search_scope,
+ )
+ return link_collector
+
+ @property
+ def find_links(self) -> List[str]:
+ return self.search_scope.find_links
+
+ def fetch_response(self, location: Link) -> Optional[IndexContent]:
+ """
+ Fetch an HTML page containing package links.
+ """
+ return _get_index_content(location, session=self.session)
+
+ def collect_sources(
+ self,
+ project_name: str,
+ candidates_from_page: CandidatesFromPage,
+ ) -> CollectedSources:
+ # The OrderedDict calls deduplicate sources by URL.
+ index_url_sources = collections.OrderedDict(
+ build_source(
+ loc,
+ candidates_from_page=candidates_from_page,
+ page_validator=self.session.is_secure_origin,
+ expand_dir=False,
+ cache_link_parsing=False,
+ )
+ for loc in self.search_scope.get_index_urls_locations(project_name)
+ ).values()
+ find_links_sources = collections.OrderedDict(
+ build_source(
+ loc,
+ candidates_from_page=candidates_from_page,
+ page_validator=self.session.is_secure_origin,
+ expand_dir=True,
+ cache_link_parsing=True,
+ )
+ for loc in self.find_links
+ ).values()
+
+ if logger.isEnabledFor(logging.DEBUG):
+ lines = [
+ f"* {s.link}"
+ for s in itertools.chain(find_links_sources, index_url_sources)
+ if s is not None and s.link is not None
+ ]
+ lines = [
+ f"{len(lines)} location(s) to search "
+ f"for versions of {project_name}:"
+ ] + lines
+ logger.debug("\n".join(lines))
+
+ return CollectedSources(
+ find_links=list(find_links_sources),
+ index_urls=list(index_url_sources),
+ )
diff --git a/third_party/python/pip/pip/_internal/index/package_finder.py b/third_party/python/pip/pip/_internal/index/package_finder.py
new file mode 100644
index 0000000000..b6f8d57e85
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/index/package_finder.py
@@ -0,0 +1,1029 @@
+"""Routines related to PyPI, indexes"""
+
+import enum
+import functools
+import itertools
+import logging
+import re
+from typing import TYPE_CHECKING, FrozenSet, Iterable, List, Optional, Set, Tuple, Union
+
+from pip._vendor.packaging import specifiers
+from pip._vendor.packaging.tags import Tag
+from pip._vendor.packaging.utils import canonicalize_name
+from pip._vendor.packaging.version import _BaseVersion
+from pip._vendor.packaging.version import parse as parse_version
+
+from pip._internal.exceptions import (
+ BestVersionAlreadyInstalled,
+ DistributionNotFound,
+ InvalidWheelFilename,
+ UnsupportedWheel,
+)
+from pip._internal.index.collector import LinkCollector, parse_links
+from pip._internal.models.candidate import InstallationCandidate
+from pip._internal.models.format_control import FormatControl
+from pip._internal.models.link import Link
+from pip._internal.models.search_scope import SearchScope
+from pip._internal.models.selection_prefs import SelectionPreferences
+from pip._internal.models.target_python import TargetPython
+from pip._internal.models.wheel import Wheel
+from pip._internal.req import InstallRequirement
+from pip._internal.utils._log import getLogger
+from pip._internal.utils.filetypes import WHEEL_EXTENSION
+from pip._internal.utils.hashes import Hashes
+from pip._internal.utils.logging import indent_log
+from pip._internal.utils.misc import build_netloc
+from pip._internal.utils.packaging import check_requires_python
+from pip._internal.utils.unpacking import SUPPORTED_EXTENSIONS
+
+if TYPE_CHECKING:
+ from pip._vendor.typing_extensions import TypeGuard
+
+__all__ = ["FormatControl", "BestCandidateResult", "PackageFinder"]
+
+
+logger = getLogger(__name__)
+
+BuildTag = Union[Tuple[()], Tuple[int, str]]
+CandidateSortingKey = Tuple[int, int, int, _BaseVersion, Optional[int], BuildTag]
+
+
+def _check_link_requires_python(
+ link: Link,
+ version_info: Tuple[int, int, int],
+ ignore_requires_python: bool = False,
+) -> bool:
+ """
+ Return whether the given Python version is compatible with a link's
+ "Requires-Python" value.
+
+ :param version_info: A 3-tuple of ints representing the Python
+ major-minor-micro version to check.
+ :param ignore_requires_python: Whether to ignore the "Requires-Python"
+ value if the given Python version isn't compatible.
+ """
+ try:
+ is_compatible = check_requires_python(
+ link.requires_python,
+ version_info=version_info,
+ )
+ except specifiers.InvalidSpecifier:
+ logger.debug(
+ "Ignoring invalid Requires-Python (%r) for link: %s",
+ link.requires_python,
+ link,
+ )
+ else:
+ if not is_compatible:
+ version = ".".join(map(str, version_info))
+ if not ignore_requires_python:
+ logger.verbose(
+ "Link requires a different Python (%s not in: %r): %s",
+ version,
+ link.requires_python,
+ link,
+ )
+ return False
+
+ logger.debug(
+ "Ignoring failed Requires-Python check (%s not in: %r) for link: %s",
+ version,
+ link.requires_python,
+ link,
+ )
+
+ return True
+
+
+class LinkType(enum.Enum):
+ candidate = enum.auto()
+ different_project = enum.auto()
+ yanked = enum.auto()
+ format_unsupported = enum.auto()
+ format_invalid = enum.auto()
+ platform_mismatch = enum.auto()
+ requires_python_mismatch = enum.auto()
+
+
+class LinkEvaluator:
+
+ """
+ Responsible for evaluating links for a particular project.
+ """
+
+ _py_version_re = re.compile(r"-py([123]\.?[0-9]?)$")
+
+ # Don't include an allow_yanked default value to make sure each call
+ # site considers whether yanked releases are allowed. This also causes
+ # that decision to be made explicit in the calling code, which helps
+ # people when reading the code.
+ def __init__(
+ self,
+ project_name: str,
+ canonical_name: str,
+ formats: FrozenSet[str],
+ target_python: TargetPython,
+ allow_yanked: bool,
+ ignore_requires_python: Optional[bool] = None,
+ ) -> None:
+ """
+ :param project_name: The user supplied package name.
+ :param canonical_name: The canonical package name.
+ :param formats: The formats allowed for this package. Should be a set
+ with 'binary' or 'source' or both in it.
+ :param target_python: The target Python interpreter to use when
+ evaluating link compatibility. This is used, for example, to
+ check wheel compatibility, as well as when checking the Python
+ version, e.g. the Python version embedded in a link filename
+ (or egg fragment) and against an HTML link's optional PEP 503
+ "data-requires-python" attribute.
+ :param allow_yanked: Whether files marked as yanked (in the sense
+ of PEP 592) are permitted to be candidates for install.
+ :param ignore_requires_python: Whether to ignore incompatible
+ PEP 503 "data-requires-python" values in HTML links. Defaults
+ to False.
+ """
+ if ignore_requires_python is None:
+ ignore_requires_python = False
+
+ self._allow_yanked = allow_yanked
+ self._canonical_name = canonical_name
+ self._ignore_requires_python = ignore_requires_python
+ self._formats = formats
+ self._target_python = target_python
+
+ self.project_name = project_name
+
+ def evaluate_link(self, link: Link) -> Tuple[LinkType, str]:
+ """
+ Determine whether a link is a candidate for installation.
+
+ :return: A tuple (result, detail), where *result* is an enum
+ representing whether the evaluation found a candidate, or the reason
+ why one is not found. If a candidate is found, *detail* will be the
+ candidate's version string; if one is not found, it contains the
+ reason the link fails to qualify.
+ """
+ version = None
+ if link.is_yanked and not self._allow_yanked:
+ reason = link.yanked_reason or "<none given>"
+ return (LinkType.yanked, f"yanked for reason: {reason}")
+
+ if link.egg_fragment:
+ egg_info = link.egg_fragment
+ ext = link.ext
+ else:
+ egg_info, ext = link.splitext()
+ if not ext:
+ return (LinkType.format_unsupported, "not a file")
+ if ext not in SUPPORTED_EXTENSIONS:
+ return (
+ LinkType.format_unsupported,
+ f"unsupported archive format: {ext}",
+ )
+ if "binary" not in self._formats and ext == WHEEL_EXTENSION:
+ reason = f"No binaries permitted for {self.project_name}"
+ return (LinkType.format_unsupported, reason)
+ if "macosx10" in link.path and ext == ".zip":
+ return (LinkType.format_unsupported, "macosx10 one")
+ if ext == WHEEL_EXTENSION:
+ try:
+ wheel = Wheel(link.filename)
+ except InvalidWheelFilename:
+ return (
+ LinkType.format_invalid,
+ "invalid wheel filename",
+ )
+ if canonicalize_name(wheel.name) != self._canonical_name:
+ reason = f"wrong project name (not {self.project_name})"
+ return (LinkType.different_project, reason)
+
+ supported_tags = self._target_python.get_tags()
+ if not wheel.supported(supported_tags):
+ # Include the wheel's tags in the reason string to
+ # simplify troubleshooting compatibility issues.
+ file_tags = ", ".join(wheel.get_formatted_file_tags())
+ reason = (
+ f"none of the wheel's tags ({file_tags}) are compatible "
+ f"(run pip debug --verbose to show compatible tags)"
+ )
+ return (LinkType.platform_mismatch, reason)
+
+ version = wheel.version
+
+ # This should be up by the self.ok_binary check, but see issue 2700.
+ if "source" not in self._formats and ext != WHEEL_EXTENSION:
+ reason = f"No sources permitted for {self.project_name}"
+ return (LinkType.format_unsupported, reason)
+
+ if not version:
+ version = _extract_version_from_fragment(
+ egg_info,
+ self._canonical_name,
+ )
+ if not version:
+ reason = f"Missing project version for {self.project_name}"
+ return (LinkType.format_invalid, reason)
+
+ match = self._py_version_re.search(version)
+ if match:
+ version = version[: match.start()]
+ py_version = match.group(1)
+ if py_version != self._target_python.py_version:
+ return (
+ LinkType.platform_mismatch,
+ "Python version is incorrect",
+ )
+
+ supports_python = _check_link_requires_python(
+ link,
+ version_info=self._target_python.py_version_info,
+ ignore_requires_python=self._ignore_requires_python,
+ )
+ if not supports_python:
+ reason = f"{version} Requires-Python {link.requires_python}"
+ return (LinkType.requires_python_mismatch, reason)
+
+ logger.debug("Found link %s, version: %s", link, version)
+
+ return (LinkType.candidate, version)
+
+
+def filter_unallowed_hashes(
+ candidates: List[InstallationCandidate],
+ hashes: Optional[Hashes],
+ project_name: str,
+) -> List[InstallationCandidate]:
+ """
+ Filter out candidates whose hashes aren't allowed, and return a new
+ list of candidates.
+
+ If at least one candidate has an allowed hash, then all candidates with
+ either an allowed hash or no hash specified are returned. Otherwise,
+ the given candidates are returned.
+
+ Including the candidates with no hash specified when there is a match
+ allows a warning to be logged if there is a more preferred candidate
+ with no hash specified. Returning all candidates in the case of no
+ matches lets pip report the hash of the candidate that would otherwise
+ have been installed (e.g. permitting the user to more easily update
+ their requirements file with the desired hash).
+ """
+ if not hashes:
+ logger.debug(
+ "Given no hashes to check %s links for project %r: "
+ "discarding no candidates",
+ len(candidates),
+ project_name,
+ )
+ # Make sure we're not returning back the given value.
+ return list(candidates)
+
+ matches_or_no_digest = []
+ # Collect the non-matches for logging purposes.
+ non_matches = []
+ match_count = 0
+ for candidate in candidates:
+ link = candidate.link
+ if not link.has_hash:
+ pass
+ elif link.is_hash_allowed(hashes=hashes):
+ match_count += 1
+ else:
+ non_matches.append(candidate)
+ continue
+
+ matches_or_no_digest.append(candidate)
+
+ if match_count:
+ filtered = matches_or_no_digest
+ else:
+ # Make sure we're not returning back the given value.
+ filtered = list(candidates)
+
+ if len(filtered) == len(candidates):
+ discard_message = "discarding no candidates"
+ else:
+ discard_message = "discarding {} non-matches:\n {}".format(
+ len(non_matches),
+ "\n ".join(str(candidate.link) for candidate in non_matches),
+ )
+
+ logger.debug(
+ "Checked %s links for project %r against %s hashes "
+ "(%s matches, %s no digest): %s",
+ len(candidates),
+ project_name,
+ hashes.digest_count,
+ match_count,
+ len(matches_or_no_digest) - match_count,
+ discard_message,
+ )
+
+ return filtered
+
+
+class CandidatePreferences:
+
+ """
+ Encapsulates some of the preferences for filtering and sorting
+ InstallationCandidate objects.
+ """
+
+ def __init__(
+ self,
+ prefer_binary: bool = False,
+ allow_all_prereleases: bool = False,
+ ) -> None:
+ """
+ :param allow_all_prereleases: Whether to allow all pre-releases.
+ """
+ self.allow_all_prereleases = allow_all_prereleases
+ self.prefer_binary = prefer_binary
+
+
+class BestCandidateResult:
+ """A collection of candidates, returned by `PackageFinder.find_best_candidate`.
+
+ This class is only intended to be instantiated by CandidateEvaluator's
+ `compute_best_candidate()` method.
+ """
+
+ def __init__(
+ self,
+ candidates: List[InstallationCandidate],
+ applicable_candidates: List[InstallationCandidate],
+ best_candidate: Optional[InstallationCandidate],
+ ) -> None:
+ """
+ :param candidates: A sequence of all available candidates found.
+ :param applicable_candidates: The applicable candidates.
+ :param best_candidate: The most preferred candidate found, or None
+ if no applicable candidates were found.
+ """
+ assert set(applicable_candidates) <= set(candidates)
+
+ if best_candidate is None:
+ assert not applicable_candidates
+ else:
+ assert best_candidate in applicable_candidates
+
+ self._applicable_candidates = applicable_candidates
+ self._candidates = candidates
+
+ self.best_candidate = best_candidate
+
+ def iter_all(self) -> Iterable[InstallationCandidate]:
+ """Iterate through all candidates."""
+ return iter(self._candidates)
+
+ def iter_applicable(self) -> Iterable[InstallationCandidate]:
+ """Iterate through the applicable candidates."""
+ return iter(self._applicable_candidates)
+
+
+class CandidateEvaluator:
+
+ """
+ Responsible for filtering and sorting candidates for installation based
+ on what tags are valid.
+ """
+
+ @classmethod
+ def create(
+ cls,
+ project_name: str,
+ target_python: Optional[TargetPython] = None,
+ prefer_binary: bool = False,
+ allow_all_prereleases: bool = False,
+ specifier: Optional[specifiers.BaseSpecifier] = None,
+ hashes: Optional[Hashes] = None,
+ ) -> "CandidateEvaluator":
+ """Create a CandidateEvaluator object.
+
+ :param target_python: The target Python interpreter to use when
+ checking compatibility. If None (the default), a TargetPython
+ object will be constructed from the running Python.
+ :param specifier: An optional object implementing `filter`
+ (e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
+ versions.
+ :param hashes: An optional collection of allowed hashes.
+ """
+ if target_python is None:
+ target_python = TargetPython()
+ if specifier is None:
+ specifier = specifiers.SpecifierSet()
+
+ supported_tags = target_python.get_tags()
+
+ return cls(
+ project_name=project_name,
+ supported_tags=supported_tags,
+ specifier=specifier,
+ prefer_binary=prefer_binary,
+ allow_all_prereleases=allow_all_prereleases,
+ hashes=hashes,
+ )
+
+ def __init__(
+ self,
+ project_name: str,
+ supported_tags: List[Tag],
+ specifier: specifiers.BaseSpecifier,
+ prefer_binary: bool = False,
+ allow_all_prereleases: bool = False,
+ hashes: Optional[Hashes] = None,
+ ) -> None:
+ """
+ :param supported_tags: The PEP 425 tags supported by the target
+ Python in order of preference (most preferred first).
+ """
+ self._allow_all_prereleases = allow_all_prereleases
+ self._hashes = hashes
+ self._prefer_binary = prefer_binary
+ self._project_name = project_name
+ self._specifier = specifier
+ self._supported_tags = supported_tags
+ # Since the index of the tag in the _supported_tags list is used
+ # as a priority, precompute a map from tag to index/priority to be
+ # used in wheel.find_most_preferred_tag.
+ self._wheel_tag_preferences = {
+ tag: idx for idx, tag in enumerate(supported_tags)
+ }
+
+ def get_applicable_candidates(
+ self,
+ candidates: List[InstallationCandidate],
+ ) -> List[InstallationCandidate]:
+ """
+ Return the applicable candidates from a list of candidates.
+ """
+ # Using None infers from the specifier instead.
+ allow_prereleases = self._allow_all_prereleases or None
+ specifier = self._specifier
+ versions = {
+ str(v)
+ for v in specifier.filter(
+ # We turn the version object into a str here because otherwise
+ # when we're debundled but setuptools isn't, Python will see
+ # packaging.version.Version and
+ # pkg_resources._vendor.packaging.version.Version as different
+ # types. This way we'll use a str as a common data interchange
+ # format. If we stop using the pkg_resources provided specifier
+ # and start using our own, we can drop the cast to str().
+ (str(c.version) for c in candidates),
+ prereleases=allow_prereleases,
+ )
+ }
+
+ # Again, converting version to str to deal with debundling.
+ applicable_candidates = [c for c in candidates if str(c.version) in versions]
+
+ filtered_applicable_candidates = filter_unallowed_hashes(
+ candidates=applicable_candidates,
+ hashes=self._hashes,
+ project_name=self._project_name,
+ )
+
+ return sorted(filtered_applicable_candidates, key=self._sort_key)
+
+ def _sort_key(self, candidate: InstallationCandidate) -> CandidateSortingKey:
+ """
+ Function to pass as the `key` argument to a call to sorted() to sort
+ InstallationCandidates by preference.
+
+ Returns a tuple such that tuples sorting as greater using Python's
+ default comparison operator are more preferred.
+
+ The preference is as follows:
+
+ First and foremost, candidates with allowed (matching) hashes are
+ always preferred over candidates without matching hashes. This is
+ because e.g. if the only candidate with an allowed hash is yanked,
+ we still want to use that candidate.
+
+ Second, excepting hash considerations, candidates that have been
+ yanked (in the sense of PEP 592) are always less preferred than
+ candidates that haven't been yanked. Then:
+
+ If not finding wheels, they are sorted by version only.
+ If finding wheels, then the sort order is by version, then:
+ 1. existing installs
+ 2. wheels ordered via Wheel.support_index_min(self._supported_tags)
+ 3. source archives
+ If prefer_binary was set, then all wheels are sorted above sources.
+
+ Note: it was considered to embed this logic into the Link
+ comparison operators, but then different sdist links
+ with the same version, would have to be considered equal
+ """
+ valid_tags = self._supported_tags
+ support_num = len(valid_tags)
+ build_tag: BuildTag = ()
+ binary_preference = 0
+ link = candidate.link
+ if link.is_wheel:
+ # can raise InvalidWheelFilename
+ wheel = Wheel(link.filename)
+ try:
+ pri = -(
+ wheel.find_most_preferred_tag(
+ valid_tags, self._wheel_tag_preferences
+ )
+ )
+ except ValueError:
+ raise UnsupportedWheel(
+ "{} is not a supported wheel for this platform. It "
+ "can't be sorted.".format(wheel.filename)
+ )
+ if self._prefer_binary:
+ binary_preference = 1
+ if wheel.build_tag is not None:
+ match = re.match(r"^(\d+)(.*)$", wheel.build_tag)
+ assert match is not None, "guaranteed by filename validation"
+ build_tag_groups = match.groups()
+ build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
+ else: # sdist
+ pri = -(support_num)
+ has_allowed_hash = int(link.is_hash_allowed(self._hashes))
+ yank_value = -1 * int(link.is_yanked) # -1 for yanked.
+ return (
+ has_allowed_hash,
+ yank_value,
+ binary_preference,
+ candidate.version,
+ pri,
+ build_tag,
+ )
+
+ def sort_best_candidate(
+ self,
+ candidates: List[InstallationCandidate],
+ ) -> Optional[InstallationCandidate]:
+ """
+ Return the best candidate per the instance's sort order, or None if
+ no candidate is acceptable.
+ """
+ if not candidates:
+ return None
+ best_candidate = max(candidates, key=self._sort_key)
+ return best_candidate
+
+ def compute_best_candidate(
+ self,
+ candidates: List[InstallationCandidate],
+ ) -> BestCandidateResult:
+ """
+ Compute and return a `BestCandidateResult` instance.
+ """
+ applicable_candidates = self.get_applicable_candidates(candidates)
+
+ best_candidate = self.sort_best_candidate(applicable_candidates)
+
+ return BestCandidateResult(
+ candidates,
+ applicable_candidates=applicable_candidates,
+ best_candidate=best_candidate,
+ )
+
+
+class PackageFinder:
+ """This finds packages.
+
+ This is meant to match easy_install's technique for looking for
+ packages, by reading pages and looking for appropriate links.
+ """
+
+ def __init__(
+ self,
+ link_collector: LinkCollector,
+ target_python: TargetPython,
+ allow_yanked: bool,
+ format_control: Optional[FormatControl] = None,
+ candidate_prefs: Optional[CandidatePreferences] = None,
+ ignore_requires_python: Optional[bool] = None,
+ ) -> None:
+ """
+ This constructor is primarily meant to be used by the create() class
+ method and from tests.
+
+ :param format_control: A FormatControl object, used to control
+ the selection of source packages / binary packages when consulting
+ the index and links.
+ :param candidate_prefs: Options to use when creating a
+ CandidateEvaluator object.
+ """
+ if candidate_prefs is None:
+ candidate_prefs = CandidatePreferences()
+
+ format_control = format_control or FormatControl(set(), set())
+
+ self._allow_yanked = allow_yanked
+ self._candidate_prefs = candidate_prefs
+ self._ignore_requires_python = ignore_requires_python
+ self._link_collector = link_collector
+ self._target_python = target_python
+
+ self.format_control = format_control
+
+ # These are boring links that have already been logged somehow.
+ self._logged_links: Set[Tuple[Link, LinkType, str]] = set()
+
+ # Don't include an allow_yanked default value to make sure each call
+ # site considers whether yanked releases are allowed. This also causes
+ # that decision to be made explicit in the calling code, which helps
+ # people when reading the code.
+ @classmethod
+ def create(
+ cls,
+ link_collector: LinkCollector,
+ selection_prefs: SelectionPreferences,
+ target_python: Optional[TargetPython] = None,
+ ) -> "PackageFinder":
+ """Create a PackageFinder.
+
+ :param selection_prefs: The candidate selection preferences, as a
+ SelectionPreferences object.
+ :param target_python: The target Python interpreter to use when
+ checking compatibility. If None (the default), a TargetPython
+ object will be constructed from the running Python.
+ """
+ if target_python is None:
+ target_python = TargetPython()
+
+ candidate_prefs = CandidatePreferences(
+ prefer_binary=selection_prefs.prefer_binary,
+ allow_all_prereleases=selection_prefs.allow_all_prereleases,
+ )
+
+ return cls(
+ candidate_prefs=candidate_prefs,
+ link_collector=link_collector,
+ target_python=target_python,
+ allow_yanked=selection_prefs.allow_yanked,
+ format_control=selection_prefs.format_control,
+ ignore_requires_python=selection_prefs.ignore_requires_python,
+ )
+
+ @property
+ def target_python(self) -> TargetPython:
+ return self._target_python
+
+ @property
+ def search_scope(self) -> SearchScope:
+ return self._link_collector.search_scope
+
+ @search_scope.setter
+ def search_scope(self, search_scope: SearchScope) -> None:
+ self._link_collector.search_scope = search_scope
+
+ @property
+ def find_links(self) -> List[str]:
+ return self._link_collector.find_links
+
+ @property
+ def index_urls(self) -> List[str]:
+ return self.search_scope.index_urls
+
+ @property
+ def trusted_hosts(self) -> Iterable[str]:
+ for host_port in self._link_collector.session.pip_trusted_origins:
+ yield build_netloc(*host_port)
+
+ @property
+ def allow_all_prereleases(self) -> bool:
+ return self._candidate_prefs.allow_all_prereleases
+
+ def set_allow_all_prereleases(self) -> None:
+ self._candidate_prefs.allow_all_prereleases = True
+
+ @property
+ def prefer_binary(self) -> bool:
+ return self._candidate_prefs.prefer_binary
+
+ def set_prefer_binary(self) -> None:
+ self._candidate_prefs.prefer_binary = True
+
+ def requires_python_skipped_reasons(self) -> List[str]:
+ reasons = {
+ detail
+ for _, result, detail in self._logged_links
+ if result == LinkType.requires_python_mismatch
+ }
+ return sorted(reasons)
+
+ def make_link_evaluator(self, project_name: str) -> LinkEvaluator:
+ canonical_name = canonicalize_name(project_name)
+ formats = self.format_control.get_allowed_formats(canonical_name)
+
+ return LinkEvaluator(
+ project_name=project_name,
+ canonical_name=canonical_name,
+ formats=formats,
+ target_python=self._target_python,
+ allow_yanked=self._allow_yanked,
+ ignore_requires_python=self._ignore_requires_python,
+ )
+
+ def _sort_links(self, links: Iterable[Link]) -> List[Link]:
+ """
+ Returns elements of links in order, non-egg links first, egg links
+ second, while eliminating duplicates
+ """
+ eggs, no_eggs = [], []
+ seen: Set[Link] = set()
+ for link in links:
+ if link not in seen:
+ seen.add(link)
+ if link.egg_fragment:
+ eggs.append(link)
+ else:
+ no_eggs.append(link)
+ return no_eggs + eggs
+
+ def _log_skipped_link(self, link: Link, result: LinkType, detail: str) -> None:
+ entry = (link, result, detail)
+ if entry not in self._logged_links:
+ # Put the link at the end so the reason is more visible and because
+ # the link string is usually very long.
+ logger.debug("Skipping link: %s: %s", detail, link)
+ self._logged_links.add(entry)
+
+ def get_install_candidate(
+ self, link_evaluator: LinkEvaluator, link: Link
+ ) -> Optional[InstallationCandidate]:
+ """
+ If the link is a candidate for install, convert it to an
+ InstallationCandidate and return it. Otherwise, return None.
+ """
+ result, detail = link_evaluator.evaluate_link(link)
+ if result != LinkType.candidate:
+ self._log_skipped_link(link, result, detail)
+ return None
+
+ return InstallationCandidate(
+ name=link_evaluator.project_name,
+ link=link,
+ version=detail,
+ )
+
+ def evaluate_links(
+ self, link_evaluator: LinkEvaluator, links: Iterable[Link]
+ ) -> List[InstallationCandidate]:
+ """
+ Convert links that are candidates to InstallationCandidate objects.
+ """
+ candidates = []
+ for link in self._sort_links(links):
+ candidate = self.get_install_candidate(link_evaluator, link)
+ if candidate is not None:
+ candidates.append(candidate)
+
+ return candidates
+
+ def process_project_url(
+ self, project_url: Link, link_evaluator: LinkEvaluator
+ ) -> List[InstallationCandidate]:
+ logger.debug(
+ "Fetching project page and analyzing links: %s",
+ project_url,
+ )
+ index_response = self._link_collector.fetch_response(project_url)
+ if index_response is None:
+ return []
+
+ page_links = list(parse_links(index_response))
+
+ with indent_log():
+ package_links = self.evaluate_links(
+ link_evaluator,
+ links=page_links,
+ )
+
+ return package_links
+
+ @functools.lru_cache(maxsize=None)
+ def find_all_candidates(self, project_name: str) -> List[InstallationCandidate]:
+ """Find all available InstallationCandidate for project_name
+
+ This checks index_urls and find_links.
+ All versions found are returned as an InstallationCandidate list.
+
+ See LinkEvaluator.evaluate_link() for details on which files
+ are accepted.
+ """
+ link_evaluator = self.make_link_evaluator(project_name)
+
+ collected_sources = self._link_collector.collect_sources(
+ project_name=project_name,
+ candidates_from_page=functools.partial(
+ self.process_project_url,
+ link_evaluator=link_evaluator,
+ ),
+ )
+
+ page_candidates_it = itertools.chain.from_iterable(
+ source.page_candidates()
+ for sources in collected_sources
+ for source in sources
+ if source is not None
+ )
+ page_candidates = list(page_candidates_it)
+
+ file_links_it = itertools.chain.from_iterable(
+ source.file_links()
+ for sources in collected_sources
+ for source in sources
+ if source is not None
+ )
+ file_candidates = self.evaluate_links(
+ link_evaluator,
+ sorted(file_links_it, reverse=True),
+ )
+
+ if logger.isEnabledFor(logging.DEBUG) and file_candidates:
+ paths = []
+ for candidate in file_candidates:
+ assert candidate.link.url # we need to have a URL
+ try:
+ paths.append(candidate.link.file_path)
+ except Exception:
+ paths.append(candidate.link.url) # it's not a local file
+
+ logger.debug("Local files found: %s", ", ".join(paths))
+
+ # This is an intentional priority ordering
+ return file_candidates + page_candidates
+
+ def make_candidate_evaluator(
+ self,
+ project_name: str,
+ specifier: Optional[specifiers.BaseSpecifier] = None,
+ hashes: Optional[Hashes] = None,
+ ) -> CandidateEvaluator:
+ """Create a CandidateEvaluator object to use."""
+ candidate_prefs = self._candidate_prefs
+ return CandidateEvaluator.create(
+ project_name=project_name,
+ target_python=self._target_python,
+ prefer_binary=candidate_prefs.prefer_binary,
+ allow_all_prereleases=candidate_prefs.allow_all_prereleases,
+ specifier=specifier,
+ hashes=hashes,
+ )
+
+ @functools.lru_cache(maxsize=None)
+ def find_best_candidate(
+ self,
+ project_name: str,
+ specifier: Optional[specifiers.BaseSpecifier] = None,
+ hashes: Optional[Hashes] = None,
+ ) -> BestCandidateResult:
+ """Find matches for the given project and specifier.
+
+ :param specifier: An optional object implementing `filter`
+ (e.g. `packaging.specifiers.SpecifierSet`) to filter applicable
+ versions.
+
+ :return: A `BestCandidateResult` instance.
+ """
+ candidates = self.find_all_candidates(project_name)
+ candidate_evaluator = self.make_candidate_evaluator(
+ project_name=project_name,
+ specifier=specifier,
+ hashes=hashes,
+ )
+ return candidate_evaluator.compute_best_candidate(candidates)
+
+ def find_requirement(
+ self, req: InstallRequirement, upgrade: bool
+ ) -> Optional[InstallationCandidate]:
+ """Try to find a Link matching req
+
+ Expects req, an InstallRequirement and upgrade, a boolean
+ Returns a InstallationCandidate if found,
+ Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
+ """
+ hashes = req.hashes(trust_internet=False)
+ best_candidate_result = self.find_best_candidate(
+ req.name,
+ specifier=req.specifier,
+ hashes=hashes,
+ )
+ best_candidate = best_candidate_result.best_candidate
+
+ installed_version: Optional[_BaseVersion] = None
+ if req.satisfied_by is not None:
+ installed_version = req.satisfied_by.version
+
+ def _format_versions(cand_iter: Iterable[InstallationCandidate]) -> str:
+ # This repeated parse_version and str() conversion is needed to
+ # handle different vendoring sources from pip and pkg_resources.
+ # If we stop using the pkg_resources provided specifier and start
+ # using our own, we can drop the cast to str().
+ return (
+ ", ".join(
+ sorted(
+ {str(c.version) for c in cand_iter},
+ key=parse_version,
+ )
+ )
+ or "none"
+ )
+
+ if installed_version is None and best_candidate is None:
+ logger.critical(
+ "Could not find a version that satisfies the requirement %s "
+ "(from versions: %s)",
+ req,
+ _format_versions(best_candidate_result.iter_all()),
+ )
+
+ raise DistributionNotFound(
+ "No matching distribution found for {}".format(req)
+ )
+
+ def _should_install_candidate(
+ candidate: Optional[InstallationCandidate],
+ ) -> "TypeGuard[InstallationCandidate]":
+ if installed_version is None:
+ return True
+ if best_candidate is None:
+ return False
+ return best_candidate.version > installed_version
+
+ if not upgrade and installed_version is not None:
+ if _should_install_candidate(best_candidate):
+ logger.debug(
+ "Existing installed version (%s) satisfies requirement "
+ "(most up-to-date version is %s)",
+ installed_version,
+ best_candidate.version,
+ )
+ else:
+ logger.debug(
+ "Existing installed version (%s) is most up-to-date and "
+ "satisfies requirement",
+ installed_version,
+ )
+ return None
+
+ if _should_install_candidate(best_candidate):
+ logger.debug(
+ "Using version %s (newest of versions: %s)",
+ best_candidate.version,
+ _format_versions(best_candidate_result.iter_applicable()),
+ )
+ return best_candidate
+
+ # We have an existing version, and its the best version
+ logger.debug(
+ "Installed version (%s) is most up-to-date (past versions: %s)",
+ installed_version,
+ _format_versions(best_candidate_result.iter_applicable()),
+ )
+ raise BestVersionAlreadyInstalled
+
+
+def _find_name_version_sep(fragment: str, canonical_name: str) -> int:
+ """Find the separator's index based on the package's canonical name.
+
+ :param fragment: A <package>+<version> filename "fragment" (stem) or
+ egg fragment.
+ :param canonical_name: The package's canonical name.
+
+ This function is needed since the canonicalized name does not necessarily
+ have the same length as the egg info's name part. An example::
+
+ >>> fragment = 'foo__bar-1.0'
+ >>> canonical_name = 'foo-bar'
+ >>> _find_name_version_sep(fragment, canonical_name)
+ 8
+ """
+ # Project name and version must be separated by one single dash. Find all
+ # occurrences of dashes; if the string in front of it matches the canonical
+ # name, this is the one separating the name and version parts.
+ for i, c in enumerate(fragment):
+ if c != "-":
+ continue
+ if canonicalize_name(fragment[:i]) == canonical_name:
+ return i
+ raise ValueError(f"{fragment} does not match {canonical_name}")
+
+
+def _extract_version_from_fragment(fragment: str, canonical_name: str) -> Optional[str]:
+ """Parse the version string from a <package>+<version> filename
+ "fragment" (stem) or egg fragment.
+
+ :param fragment: The string to parse. E.g. foo-2.1
+ :param canonical_name: The canonicalized name of the package this
+ belongs to.
+ """
+ try:
+ version_start = _find_name_version_sep(fragment, canonical_name) + 1
+ except ValueError:
+ return None
+ version = fragment[version_start:]
+ if not version:
+ return None
+ return version
diff --git a/third_party/python/pip/pip/_internal/index/sources.py b/third_party/python/pip/pip/_internal/index/sources.py
new file mode 100644
index 0000000000..eec3f12f7e
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/index/sources.py
@@ -0,0 +1,224 @@
+import logging
+import mimetypes
+import os
+import pathlib
+from typing import Callable, Iterable, Optional, Tuple
+
+from pip._internal.models.candidate import InstallationCandidate
+from pip._internal.models.link import Link
+from pip._internal.utils.urls import path_to_url, url_to_path
+from pip._internal.vcs import is_url
+
+logger = logging.getLogger(__name__)
+
+FoundCandidates = Iterable[InstallationCandidate]
+FoundLinks = Iterable[Link]
+CandidatesFromPage = Callable[[Link], Iterable[InstallationCandidate]]
+PageValidator = Callable[[Link], bool]
+
+
+class LinkSource:
+ @property
+ def link(self) -> Optional[Link]:
+ """Returns the underlying link, if there's one."""
+ raise NotImplementedError()
+
+ def page_candidates(self) -> FoundCandidates:
+ """Candidates found by parsing an archive listing HTML file."""
+ raise NotImplementedError()
+
+ def file_links(self) -> FoundLinks:
+ """Links found by specifying archives directly."""
+ raise NotImplementedError()
+
+
+def _is_html_file(file_url: str) -> bool:
+ return mimetypes.guess_type(file_url, strict=False)[0] == "text/html"
+
+
+class _FlatDirectorySource(LinkSource):
+ """Link source specified by ``--find-links=<path-to-dir>``.
+
+ This looks the content of the directory, and returns:
+
+ * ``page_candidates``: Links listed on each HTML file in the directory.
+ * ``file_candidates``: Archives in the directory.
+ """
+
+ def __init__(
+ self,
+ candidates_from_page: CandidatesFromPage,
+ path: str,
+ ) -> None:
+ self._candidates_from_page = candidates_from_page
+ self._path = pathlib.Path(os.path.realpath(path))
+
+ @property
+ def link(self) -> Optional[Link]:
+ return None
+
+ def page_candidates(self) -> FoundCandidates:
+ for path in self._path.iterdir():
+ url = path_to_url(str(path))
+ if not _is_html_file(url):
+ continue
+ yield from self._candidates_from_page(Link(url))
+
+ def file_links(self) -> FoundLinks:
+ for path in self._path.iterdir():
+ url = path_to_url(str(path))
+ if _is_html_file(url):
+ continue
+ yield Link(url)
+
+
+class _LocalFileSource(LinkSource):
+ """``--find-links=<path-or-url>`` or ``--[extra-]index-url=<path-or-url>``.
+
+ If a URL is supplied, it must be a ``file:`` URL. If a path is supplied to
+ the option, it is converted to a URL first. This returns:
+
+ * ``page_candidates``: Links listed on an HTML file.
+ * ``file_candidates``: The non-HTML file.
+ """
+
+ def __init__(
+ self,
+ candidates_from_page: CandidatesFromPage,
+ link: Link,
+ ) -> None:
+ self._candidates_from_page = candidates_from_page
+ self._link = link
+
+ @property
+ def link(self) -> Optional[Link]:
+ return self._link
+
+ def page_candidates(self) -> FoundCandidates:
+ if not _is_html_file(self._link.url):
+ return
+ yield from self._candidates_from_page(self._link)
+
+ def file_links(self) -> FoundLinks:
+ if _is_html_file(self._link.url):
+ return
+ yield self._link
+
+
+class _RemoteFileSource(LinkSource):
+ """``--find-links=<url>`` or ``--[extra-]index-url=<url>``.
+
+ This returns:
+
+ * ``page_candidates``: Links listed on an HTML file.
+ * ``file_candidates``: The non-HTML file.
+ """
+
+ def __init__(
+ self,
+ candidates_from_page: CandidatesFromPage,
+ page_validator: PageValidator,
+ link: Link,
+ ) -> None:
+ self._candidates_from_page = candidates_from_page
+ self._page_validator = page_validator
+ self._link = link
+
+ @property
+ def link(self) -> Optional[Link]:
+ return self._link
+
+ def page_candidates(self) -> FoundCandidates:
+ if not self._page_validator(self._link):
+ return
+ yield from self._candidates_from_page(self._link)
+
+ def file_links(self) -> FoundLinks:
+ yield self._link
+
+
+class _IndexDirectorySource(LinkSource):
+ """``--[extra-]index-url=<path-to-directory>``.
+
+ This is treated like a remote URL; ``candidates_from_page`` contains logic
+ for this by appending ``index.html`` to the link.
+ """
+
+ def __init__(
+ self,
+ candidates_from_page: CandidatesFromPage,
+ link: Link,
+ ) -> None:
+ self._candidates_from_page = candidates_from_page
+ self._link = link
+
+ @property
+ def link(self) -> Optional[Link]:
+ return self._link
+
+ def page_candidates(self) -> FoundCandidates:
+ yield from self._candidates_from_page(self._link)
+
+ def file_links(self) -> FoundLinks:
+ return ()
+
+
+def build_source(
+ location: str,
+ *,
+ candidates_from_page: CandidatesFromPage,
+ page_validator: PageValidator,
+ expand_dir: bool,
+ cache_link_parsing: bool,
+) -> Tuple[Optional[str], Optional[LinkSource]]:
+
+ path: Optional[str] = None
+ url: Optional[str] = None
+ if os.path.exists(location): # Is a local path.
+ url = path_to_url(location)
+ path = location
+ elif location.startswith("file:"): # A file: URL.
+ url = location
+ path = url_to_path(location)
+ elif is_url(location):
+ url = location
+
+ if url is None:
+ msg = (
+ "Location '%s' is ignored: "
+ "it is either a non-existing path or lacks a specific scheme."
+ )
+ logger.warning(msg, location)
+ return (None, None)
+
+ if path is None:
+ source: LinkSource = _RemoteFileSource(
+ candidates_from_page=candidates_from_page,
+ page_validator=page_validator,
+ link=Link(url, cache_link_parsing=cache_link_parsing),
+ )
+ return (url, source)
+
+ if os.path.isdir(path):
+ if expand_dir:
+ source = _FlatDirectorySource(
+ candidates_from_page=candidates_from_page,
+ path=path,
+ )
+ else:
+ source = _IndexDirectorySource(
+ candidates_from_page=candidates_from_page,
+ link=Link(url, cache_link_parsing=cache_link_parsing),
+ )
+ return (url, source)
+ elif os.path.isfile(path):
+ source = _LocalFileSource(
+ candidates_from_page=candidates_from_page,
+ link=Link(url, cache_link_parsing=cache_link_parsing),
+ )
+ return (url, source)
+ logger.warning(
+ "Location '%s' is ignored: it is neither a file nor a directory.",
+ location,
+ )
+ return (url, None)
diff --git a/third_party/python/pip/pip/_internal/locations/__init__.py b/third_party/python/pip/pip/_internal/locations/__init__.py
new file mode 100644
index 0000000000..d54bc63eba
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/locations/__init__.py
@@ -0,0 +1,467 @@
+import functools
+import logging
+import os
+import pathlib
+import sys
+import sysconfig
+from typing import Any, Dict, Generator, Optional, Tuple
+
+from pip._internal.models.scheme import SCHEME_KEYS, Scheme
+from pip._internal.utils.compat import WINDOWS
+from pip._internal.utils.deprecation import deprecated
+from pip._internal.utils.virtualenv import running_under_virtualenv
+
+from . import _sysconfig
+from .base import (
+ USER_CACHE_DIR,
+ get_major_minor_version,
+ get_src_prefix,
+ is_osx_framework,
+ site_packages,
+ user_site,
+)
+
+__all__ = [
+ "USER_CACHE_DIR",
+ "get_bin_prefix",
+ "get_bin_user",
+ "get_major_minor_version",
+ "get_platlib",
+ "get_purelib",
+ "get_scheme",
+ "get_src_prefix",
+ "site_packages",
+ "user_site",
+]
+
+
+logger = logging.getLogger(__name__)
+
+
+_PLATLIBDIR: str = getattr(sys, "platlibdir", "lib")
+
+_USE_SYSCONFIG_DEFAULT = sys.version_info >= (3, 10)
+
+
+def _should_use_sysconfig() -> bool:
+ """This function determines the value of _USE_SYSCONFIG.
+
+ By default, pip uses sysconfig on Python 3.10+.
+ But Python distributors can override this decision by setting:
+ sysconfig._PIP_USE_SYSCONFIG = True / False
+ Rationale in https://github.com/pypa/pip/issues/10647
+
+ This is a function for testability, but should be constant during any one
+ run.
+ """
+ return bool(getattr(sysconfig, "_PIP_USE_SYSCONFIG", _USE_SYSCONFIG_DEFAULT))
+
+
+_USE_SYSCONFIG = _should_use_sysconfig()
+
+if not _USE_SYSCONFIG:
+ # Import distutils lazily to avoid deprecation warnings,
+ # but import it soon enough that it is in memory and available during
+ # a pip reinstall.
+ from . import _distutils
+
+# Be noisy about incompatibilities if this platforms "should" be using
+# sysconfig, but is explicitly opting out and using distutils instead.
+if _USE_SYSCONFIG_DEFAULT and not _USE_SYSCONFIG:
+ _MISMATCH_LEVEL = logging.WARNING
+else:
+ _MISMATCH_LEVEL = logging.DEBUG
+
+
+def _looks_like_bpo_44860() -> bool:
+ """The resolution to bpo-44860 will change this incorrect platlib.
+
+ See <https://bugs.python.org/issue44860>.
+ """
+ from distutils.command.install import INSTALL_SCHEMES
+
+ try:
+ unix_user_platlib = INSTALL_SCHEMES["unix_user"]["platlib"]
+ except KeyError:
+ return False
+ return unix_user_platlib == "$usersite"
+
+
+def _looks_like_red_hat_patched_platlib_purelib(scheme: Dict[str, str]) -> bool:
+ platlib = scheme["platlib"]
+ if "/$platlibdir/" in platlib:
+ platlib = platlib.replace("/$platlibdir/", f"/{_PLATLIBDIR}/")
+ if "/lib64/" not in platlib:
+ return False
+ unpatched = platlib.replace("/lib64/", "/lib/")
+ return unpatched.replace("$platbase/", "$base/") == scheme["purelib"]
+
+
+@functools.lru_cache(maxsize=None)
+def _looks_like_red_hat_lib() -> bool:
+ """Red Hat patches platlib in unix_prefix and unix_home, but not purelib.
+
+ This is the only way I can see to tell a Red Hat-patched Python.
+ """
+ from distutils.command.install import INSTALL_SCHEMES
+
+ return all(
+ k in INSTALL_SCHEMES
+ and _looks_like_red_hat_patched_platlib_purelib(INSTALL_SCHEMES[k])
+ for k in ("unix_prefix", "unix_home")
+ )
+
+
+@functools.lru_cache(maxsize=None)
+def _looks_like_debian_scheme() -> bool:
+ """Debian adds two additional schemes."""
+ from distutils.command.install import INSTALL_SCHEMES
+
+ return "deb_system" in INSTALL_SCHEMES and "unix_local" in INSTALL_SCHEMES
+
+
+@functools.lru_cache(maxsize=None)
+def _looks_like_red_hat_scheme() -> bool:
+ """Red Hat patches ``sys.prefix`` and ``sys.exec_prefix``.
+
+ Red Hat's ``00251-change-user-install-location.patch`` changes the install
+ command's ``prefix`` and ``exec_prefix`` to append ``"/local"``. This is
+ (fortunately?) done quite unconditionally, so we create a default command
+ object without any configuration to detect this.
+ """
+ from distutils.command.install import install
+ from distutils.dist import Distribution
+
+ cmd: Any = install(Distribution())
+ cmd.finalize_options()
+ return (
+ cmd.exec_prefix == f"{os.path.normpath(sys.exec_prefix)}/local"
+ and cmd.prefix == f"{os.path.normpath(sys.prefix)}/local"
+ )
+
+
+@functools.lru_cache(maxsize=None)
+def _looks_like_slackware_scheme() -> bool:
+ """Slackware patches sysconfig but fails to patch distutils and site.
+
+ Slackware changes sysconfig's user scheme to use ``"lib64"`` for the lib
+ path, but does not do the same to the site module.
+ """
+ if user_site is None: # User-site not available.
+ return False
+ try:
+ paths = sysconfig.get_paths(scheme="posix_user", expand=False)
+ except KeyError: # User-site not available.
+ return False
+ return "/lib64/" in paths["purelib"] and "/lib64/" not in user_site
+
+
+@functools.lru_cache(maxsize=None)
+def _looks_like_msys2_mingw_scheme() -> bool:
+ """MSYS2 patches distutils and sysconfig to use a UNIX-like scheme.
+
+ However, MSYS2 incorrectly patches sysconfig ``nt`` scheme. The fix is
+ likely going to be included in their 3.10 release, so we ignore the warning.
+ See msys2/MINGW-packages#9319.
+
+ MSYS2 MINGW's patch uses lowercase ``"lib"`` instead of the usual uppercase,
+ and is missing the final ``"site-packages"``.
+ """
+ paths = sysconfig.get_paths("nt", expand=False)
+ return all(
+ "Lib" not in p and "lib" in p and not p.endswith("site-packages")
+ for p in (paths[key] for key in ("platlib", "purelib"))
+ )
+
+
+def _fix_abiflags(parts: Tuple[str]) -> Generator[str, None, None]:
+ ldversion = sysconfig.get_config_var("LDVERSION")
+ abiflags = getattr(sys, "abiflags", None)
+
+ # LDVERSION does not end with sys.abiflags. Just return the path unchanged.
+ if not ldversion or not abiflags or not ldversion.endswith(abiflags):
+ yield from parts
+ return
+
+ # Strip sys.abiflags from LDVERSION-based path components.
+ for part in parts:
+ if part.endswith(ldversion):
+ part = part[: (0 - len(abiflags))]
+ yield part
+
+
+@functools.lru_cache(maxsize=None)
+def _warn_mismatched(old: pathlib.Path, new: pathlib.Path, *, key: str) -> None:
+ issue_url = "https://github.com/pypa/pip/issues/10151"
+ message = (
+ "Value for %s does not match. Please report this to <%s>"
+ "\ndistutils: %s"
+ "\nsysconfig: %s"
+ )
+ logger.log(_MISMATCH_LEVEL, message, key, issue_url, old, new)
+
+
+def _warn_if_mismatch(old: pathlib.Path, new: pathlib.Path, *, key: str) -> bool:
+ if old == new:
+ return False
+ _warn_mismatched(old, new, key=key)
+ return True
+
+
+@functools.lru_cache(maxsize=None)
+def _log_context(
+ *,
+ user: bool = False,
+ home: Optional[str] = None,
+ root: Optional[str] = None,
+ prefix: Optional[str] = None,
+) -> None:
+ parts = [
+ "Additional context:",
+ "user = %r",
+ "home = %r",
+ "root = %r",
+ "prefix = %r",
+ ]
+
+ logger.log(_MISMATCH_LEVEL, "\n".join(parts), user, home, root, prefix)
+
+
+def get_scheme(
+ dist_name: str,
+ user: bool = False,
+ home: Optional[str] = None,
+ root: Optional[str] = None,
+ isolated: bool = False,
+ prefix: Optional[str] = None,
+) -> Scheme:
+ new = _sysconfig.get_scheme(
+ dist_name,
+ user=user,
+ home=home,
+ root=root,
+ isolated=isolated,
+ prefix=prefix,
+ )
+ if _USE_SYSCONFIG:
+ return new
+
+ old = _distutils.get_scheme(
+ dist_name,
+ user=user,
+ home=home,
+ root=root,
+ isolated=isolated,
+ prefix=prefix,
+ )
+
+ warning_contexts = []
+ for k in SCHEME_KEYS:
+ old_v = pathlib.Path(getattr(old, k))
+ new_v = pathlib.Path(getattr(new, k))
+
+ if old_v == new_v:
+ continue
+
+ # distutils incorrectly put PyPy packages under ``site-packages/python``
+ # in the ``posix_home`` scheme, but PyPy devs said they expect the
+ # directory name to be ``pypy`` instead. So we treat this as a bug fix
+ # and not warn about it. See bpo-43307 and python/cpython#24628.
+ skip_pypy_special_case = (
+ sys.implementation.name == "pypy"
+ and home is not None
+ and k in ("platlib", "purelib")
+ and old_v.parent == new_v.parent
+ and old_v.name.startswith("python")
+ and new_v.name.startswith("pypy")
+ )
+ if skip_pypy_special_case:
+ continue
+
+ # sysconfig's ``osx_framework_user`` does not include ``pythonX.Y`` in
+ # the ``include`` value, but distutils's ``headers`` does. We'll let
+ # CPython decide whether this is a bug or feature. See bpo-43948.
+ skip_osx_framework_user_special_case = (
+ user
+ and is_osx_framework()
+ and k == "headers"
+ and old_v.parent.parent == new_v.parent
+ and old_v.parent.name.startswith("python")
+ )
+ if skip_osx_framework_user_special_case:
+ continue
+
+ # On Red Hat and derived Linux distributions, distutils is patched to
+ # use "lib64" instead of "lib" for platlib.
+ if k == "platlib" and _looks_like_red_hat_lib():
+ continue
+
+ # On Python 3.9+, sysconfig's posix_user scheme sets platlib against
+ # sys.platlibdir, but distutils's unix_user incorrectly coninutes
+ # using the same $usersite for both platlib and purelib. This creates a
+ # mismatch when sys.platlibdir is not "lib".
+ skip_bpo_44860 = (
+ user
+ and k == "platlib"
+ and not WINDOWS
+ and sys.version_info >= (3, 9)
+ and _PLATLIBDIR != "lib"
+ and _looks_like_bpo_44860()
+ )
+ if skip_bpo_44860:
+ continue
+
+ # Slackware incorrectly patches posix_user to use lib64 instead of lib,
+ # but not usersite to match the location.
+ skip_slackware_user_scheme = (
+ user
+ and k in ("platlib", "purelib")
+ and not WINDOWS
+ and _looks_like_slackware_scheme()
+ )
+ if skip_slackware_user_scheme:
+ continue
+
+ # Both Debian and Red Hat patch Python to place the system site under
+ # /usr/local instead of /usr. Debian also places lib in dist-packages
+ # instead of site-packages, but the /usr/local check should cover it.
+ skip_linux_system_special_case = (
+ not (user or home or prefix or running_under_virtualenv())
+ and old_v.parts[1:3] == ("usr", "local")
+ and len(new_v.parts) > 1
+ and new_v.parts[1] == "usr"
+ and (len(new_v.parts) < 3 or new_v.parts[2] != "local")
+ and (_looks_like_red_hat_scheme() or _looks_like_debian_scheme())
+ )
+ if skip_linux_system_special_case:
+ continue
+
+ # On Python 3.7 and earlier, sysconfig does not include sys.abiflags in
+ # the "pythonX.Y" part of the path, but distutils does.
+ skip_sysconfig_abiflag_bug = (
+ sys.version_info < (3, 8)
+ and not WINDOWS
+ and k in ("headers", "platlib", "purelib")
+ and tuple(_fix_abiflags(old_v.parts)) == new_v.parts
+ )
+ if skip_sysconfig_abiflag_bug:
+ continue
+
+ # MSYS2 MINGW's sysconfig patch does not include the "site-packages"
+ # part of the path. This is incorrect and will be fixed in MSYS.
+ skip_msys2_mingw_bug = (
+ WINDOWS and k in ("platlib", "purelib") and _looks_like_msys2_mingw_scheme()
+ )
+ if skip_msys2_mingw_bug:
+ continue
+
+ # CPython's POSIX install script invokes pip (via ensurepip) against the
+ # interpreter located in the source tree, not the install site. This
+ # triggers special logic in sysconfig that's not present in distutils.
+ # https://github.com/python/cpython/blob/8c21941ddaf/Lib/sysconfig.py#L178-L194
+ skip_cpython_build = (
+ sysconfig.is_python_build(check_home=True)
+ and not WINDOWS
+ and k in ("headers", "include", "platinclude")
+ )
+ if skip_cpython_build:
+ continue
+
+ warning_contexts.append((old_v, new_v, f"scheme.{k}"))
+
+ if not warning_contexts:
+ return old
+
+ # Check if this path mismatch is caused by distutils config files. Those
+ # files will no longer work once we switch to sysconfig, so this raises a
+ # deprecation message for them.
+ default_old = _distutils.distutils_scheme(
+ dist_name,
+ user,
+ home,
+ root,
+ isolated,
+ prefix,
+ ignore_config_files=True,
+ )
+ if any(default_old[k] != getattr(old, k) for k in SCHEME_KEYS):
+ deprecated(
+ reason=(
+ "Configuring installation scheme with distutils config files "
+ "is deprecated and will no longer work in the near future. If you "
+ "are using a Homebrew or Linuxbrew Python, please see discussion "
+ "at https://github.com/Homebrew/homebrew-core/issues/76621"
+ ),
+ replacement=None,
+ gone_in=None,
+ )
+ return old
+
+ # Post warnings about this mismatch so user can report them back.
+ for old_v, new_v, key in warning_contexts:
+ _warn_mismatched(old_v, new_v, key=key)
+ _log_context(user=user, home=home, root=root, prefix=prefix)
+
+ return old
+
+
+def get_bin_prefix() -> str:
+ new = _sysconfig.get_bin_prefix()
+ if _USE_SYSCONFIG:
+ return new
+
+ old = _distutils.get_bin_prefix()
+ if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="bin_prefix"):
+ _log_context()
+ return old
+
+
+def get_bin_user() -> str:
+ return _sysconfig.get_scheme("", user=True).scripts
+
+
+def _looks_like_deb_system_dist_packages(value: str) -> bool:
+ """Check if the value is Debian's APT-controlled dist-packages.
+
+ Debian's ``distutils.sysconfig.get_python_lib()`` implementation returns the
+ default package path controlled by APT, but does not patch ``sysconfig`` to
+ do the same. This is similar to the bug worked around in ``get_scheme()``,
+ but here the default is ``deb_system`` instead of ``unix_local``. Ultimately
+ we can't do anything about this Debian bug, and this detection allows us to
+ skip the warning when needed.
+ """
+ if not _looks_like_debian_scheme():
+ return False
+ if value == "/usr/lib/python3/dist-packages":
+ return True
+ return False
+
+
+def get_purelib() -> str:
+ """Return the default pure-Python lib location."""
+ new = _sysconfig.get_purelib()
+ if _USE_SYSCONFIG:
+ return new
+
+ old = _distutils.get_purelib()
+ if _looks_like_deb_system_dist_packages(old):
+ return old
+ if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="purelib"):
+ _log_context()
+ return old
+
+
+def get_platlib() -> str:
+ """Return the default platform-shared lib location."""
+ new = _sysconfig.get_platlib()
+ if _USE_SYSCONFIG:
+ return new
+
+ from . import _distutils
+
+ old = _distutils.get_platlib()
+ if _looks_like_deb_system_dist_packages(old):
+ return old
+ if _warn_if_mismatch(pathlib.Path(old), pathlib.Path(new), key="platlib"):
+ _log_context()
+ return old
diff --git a/third_party/python/pip/pip/_internal/locations/_distutils.py b/third_party/python/pip/pip/_internal/locations/_distutils.py
new file mode 100644
index 0000000000..92bd93179c
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/locations/_distutils.py
@@ -0,0 +1,173 @@
+"""Locations where we look for configs, install stuff, etc"""
+
+# The following comment should be removed at some point in the future.
+# mypy: strict-optional=False
+
+# If pip's going to use distutils, it should not be using the copy that setuptools
+# might have injected into the environment. This is done by removing the injected
+# shim, if it's injected.
+#
+# See https://github.com/pypa/pip/issues/8761 for the original discussion and
+# rationale for why this is done within pip.
+try:
+ __import__("_distutils_hack").remove_shim()
+except (ImportError, AttributeError):
+ pass
+
+import logging
+import os
+import sys
+from distutils.cmd import Command as DistutilsCommand
+from distutils.command.install import SCHEME_KEYS
+from distutils.command.install import install as distutils_install_command
+from distutils.sysconfig import get_python_lib
+from typing import Dict, List, Optional, Union, cast
+
+from pip._internal.models.scheme import Scheme
+from pip._internal.utils.compat import WINDOWS
+from pip._internal.utils.virtualenv import running_under_virtualenv
+
+from .base import get_major_minor_version
+
+logger = logging.getLogger(__name__)
+
+
+def distutils_scheme(
+ dist_name: str,
+ user: bool = False,
+ home: Optional[str] = None,
+ root: Optional[str] = None,
+ isolated: bool = False,
+ prefix: Optional[str] = None,
+ *,
+ ignore_config_files: bool = False,
+) -> Dict[str, str]:
+ """
+ Return a distutils install scheme
+ """
+ from distutils.dist import Distribution
+
+ dist_args: Dict[str, Union[str, List[str]]] = {"name": dist_name}
+ if isolated:
+ dist_args["script_args"] = ["--no-user-cfg"]
+
+ d = Distribution(dist_args)
+ if not ignore_config_files:
+ try:
+ d.parse_config_files()
+ except UnicodeDecodeError:
+ # Typeshed does not include find_config_files() for some reason.
+ paths = d.find_config_files() # type: ignore
+ logger.warning(
+ "Ignore distutils configs in %s due to encoding errors.",
+ ", ".join(os.path.basename(p) for p in paths),
+ )
+ obj: Optional[DistutilsCommand] = None
+ obj = d.get_command_obj("install", create=True)
+ assert obj is not None
+ i = cast(distutils_install_command, obj)
+ # NOTE: setting user or home has the side-effect of creating the home dir
+ # or user base for installations during finalize_options()
+ # ideally, we'd prefer a scheme class that has no side-effects.
+ assert not (user and prefix), f"user={user} prefix={prefix}"
+ assert not (home and prefix), f"home={home} prefix={prefix}"
+ i.user = user or i.user
+ if user or home:
+ i.prefix = ""
+ i.prefix = prefix or i.prefix
+ i.home = home or i.home
+ i.root = root or i.root
+ i.finalize_options()
+
+ scheme = {}
+ for key in SCHEME_KEYS:
+ scheme[key] = getattr(i, "install_" + key)
+
+ # install_lib specified in setup.cfg should install *everything*
+ # into there (i.e. it takes precedence over both purelib and
+ # platlib). Note, i.install_lib is *always* set after
+ # finalize_options(); we only want to override here if the user
+ # has explicitly requested it hence going back to the config
+ if "install_lib" in d.get_option_dict("install"):
+ scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib))
+
+ if running_under_virtualenv():
+ if home:
+ prefix = home
+ elif user:
+ prefix = i.install_userbase
+ else:
+ prefix = i.prefix
+ scheme["headers"] = os.path.join(
+ prefix,
+ "include",
+ "site",
+ f"python{get_major_minor_version()}",
+ dist_name,
+ )
+
+ if root is not None:
+ path_no_drive = os.path.splitdrive(os.path.abspath(scheme["headers"]))[1]
+ scheme["headers"] = os.path.join(root, path_no_drive[1:])
+
+ return scheme
+
+
+def get_scheme(
+ dist_name: str,
+ user: bool = False,
+ home: Optional[str] = None,
+ root: Optional[str] = None,
+ isolated: bool = False,
+ prefix: Optional[str] = None,
+) -> Scheme:
+ """
+ Get the "scheme" corresponding to the input parameters. The distutils
+ documentation provides the context for the available schemes:
+ https://docs.python.org/3/install/index.html#alternate-installation
+
+ :param dist_name: the name of the package to retrieve the scheme for, used
+ in the headers scheme path
+ :param user: indicates to use the "user" scheme
+ :param home: indicates to use the "home" scheme and provides the base
+ directory for the same
+ :param root: root under which other directories are re-based
+ :param isolated: equivalent to --no-user-cfg, i.e. do not consider
+ ~/.pydistutils.cfg (posix) or ~/pydistutils.cfg (non-posix) for
+ scheme paths
+ :param prefix: indicates to use the "prefix" scheme and provides the
+ base directory for the same
+ """
+ scheme = distutils_scheme(dist_name, user, home, root, isolated, prefix)
+ return Scheme(
+ platlib=scheme["platlib"],
+ purelib=scheme["purelib"],
+ headers=scheme["headers"],
+ scripts=scheme["scripts"],
+ data=scheme["data"],
+ )
+
+
+def get_bin_prefix() -> str:
+ # XXX: In old virtualenv versions, sys.prefix can contain '..' components,
+ # so we need to call normpath to eliminate them.
+ prefix = os.path.normpath(sys.prefix)
+ if WINDOWS:
+ bin_py = os.path.join(prefix, "Scripts")
+ # buildout uses 'bin' on Windows too?
+ if not os.path.exists(bin_py):
+ bin_py = os.path.join(prefix, "bin")
+ return bin_py
+ # Forcing to use /usr/local/bin for standard macOS framework installs
+ # Also log to ~/Library/Logs/ for use with the Console.app log viewer
+ if sys.platform[:6] == "darwin" and prefix[:16] == "/System/Library/":
+ return "/usr/local/bin"
+ return os.path.join(prefix, "bin")
+
+
+def get_purelib() -> str:
+ return get_python_lib(plat_specific=False)
+
+
+def get_platlib() -> str:
+ return get_python_lib(plat_specific=True)
diff --git a/third_party/python/pip/pip/_internal/locations/_sysconfig.py b/third_party/python/pip/pip/_internal/locations/_sysconfig.py
new file mode 100644
index 0000000000..97aef1f1ac
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/locations/_sysconfig.py
@@ -0,0 +1,213 @@
+import logging
+import os
+import sys
+import sysconfig
+import typing
+
+from pip._internal.exceptions import InvalidSchemeCombination, UserInstallationInvalid
+from pip._internal.models.scheme import SCHEME_KEYS, Scheme
+from pip._internal.utils.virtualenv import running_under_virtualenv
+
+from .base import change_root, get_major_minor_version, is_osx_framework
+
+logger = logging.getLogger(__name__)
+
+
+# Notes on _infer_* functions.
+# Unfortunately ``get_default_scheme()`` didn't exist before 3.10, so there's no
+# way to ask things like "what is the '_prefix' scheme on this platform". These
+# functions try to answer that with some heuristics while accounting for ad-hoc
+# platforms not covered by CPython's default sysconfig implementation. If the
+# ad-hoc implementation does not fully implement sysconfig, we'll fall back to
+# a POSIX scheme.
+
+_AVAILABLE_SCHEMES = set(sysconfig.get_scheme_names())
+
+_PREFERRED_SCHEME_API = getattr(sysconfig, "get_preferred_scheme", None)
+
+
+def _should_use_osx_framework_prefix() -> bool:
+ """Check for Apple's ``osx_framework_library`` scheme.
+
+ Python distributed by Apple's Command Line Tools has this special scheme
+ that's used when:
+
+ * This is a framework build.
+ * We are installing into the system prefix.
+
+ This does not account for ``pip install --prefix`` (also means we're not
+ installing to the system prefix), which should use ``posix_prefix``, but
+ logic here means ``_infer_prefix()`` outputs ``osx_framework_library``. But
+ since ``prefix`` is not available for ``sysconfig.get_default_scheme()``,
+ which is the stdlib replacement for ``_infer_prefix()``, presumably Apple
+ wouldn't be able to magically switch between ``osx_framework_library`` and
+ ``posix_prefix``. ``_infer_prefix()`` returning ``osx_framework_library``
+ means its behavior is consistent whether we use the stdlib implementation
+ or our own, and we deal with this special case in ``get_scheme()`` instead.
+ """
+ return (
+ "osx_framework_library" in _AVAILABLE_SCHEMES
+ and not running_under_virtualenv()
+ and is_osx_framework()
+ )
+
+
+def _infer_prefix() -> str:
+ """Try to find a prefix scheme for the current platform.
+
+ This tries:
+
+ * A special ``osx_framework_library`` for Python distributed by Apple's
+ Command Line Tools, when not running in a virtual environment.
+ * Implementation + OS, used by PyPy on Windows (``pypy_nt``).
+ * Implementation without OS, used by PyPy on POSIX (``pypy``).
+ * OS + "prefix", used by CPython on POSIX (``posix_prefix``).
+ * Just the OS name, used by CPython on Windows (``nt``).
+
+ If none of the above works, fall back to ``posix_prefix``.
+ """
+ if _PREFERRED_SCHEME_API:
+ return _PREFERRED_SCHEME_API("prefix")
+ if _should_use_osx_framework_prefix():
+ return "osx_framework_library"
+ implementation_suffixed = f"{sys.implementation.name}_{os.name}"
+ if implementation_suffixed in _AVAILABLE_SCHEMES:
+ return implementation_suffixed
+ if sys.implementation.name in _AVAILABLE_SCHEMES:
+ return sys.implementation.name
+ suffixed = f"{os.name}_prefix"
+ if suffixed in _AVAILABLE_SCHEMES:
+ return suffixed
+ if os.name in _AVAILABLE_SCHEMES: # On Windows, prefx is just called "nt".
+ return os.name
+ return "posix_prefix"
+
+
+def _infer_user() -> str:
+ """Try to find a user scheme for the current platform."""
+ if _PREFERRED_SCHEME_API:
+ return _PREFERRED_SCHEME_API("user")
+ if is_osx_framework() and not running_under_virtualenv():
+ suffixed = "osx_framework_user"
+ else:
+ suffixed = f"{os.name}_user"
+ if suffixed in _AVAILABLE_SCHEMES:
+ return suffixed
+ if "posix_user" not in _AVAILABLE_SCHEMES: # User scheme unavailable.
+ raise UserInstallationInvalid()
+ return "posix_user"
+
+
+def _infer_home() -> str:
+ """Try to find a home for the current platform."""
+ if _PREFERRED_SCHEME_API:
+ return _PREFERRED_SCHEME_API("home")
+ suffixed = f"{os.name}_home"
+ if suffixed in _AVAILABLE_SCHEMES:
+ return suffixed
+ return "posix_home"
+
+
+# Update these keys if the user sets a custom home.
+_HOME_KEYS = [
+ "installed_base",
+ "base",
+ "installed_platbase",
+ "platbase",
+ "prefix",
+ "exec_prefix",
+]
+if sysconfig.get_config_var("userbase") is not None:
+ _HOME_KEYS.append("userbase")
+
+
+def get_scheme(
+ dist_name: str,
+ user: bool = False,
+ home: typing.Optional[str] = None,
+ root: typing.Optional[str] = None,
+ isolated: bool = False,
+ prefix: typing.Optional[str] = None,
+) -> Scheme:
+ """
+ Get the "scheme" corresponding to the input parameters.
+
+ :param dist_name: the name of the package to retrieve the scheme for, used
+ in the headers scheme path
+ :param user: indicates to use the "user" scheme
+ :param home: indicates to use the "home" scheme
+ :param root: root under which other directories are re-based
+ :param isolated: ignored, but kept for distutils compatibility (where
+ this controls whether the user-site pydistutils.cfg is honored)
+ :param prefix: indicates to use the "prefix" scheme and provides the
+ base directory for the same
+ """
+ if user and prefix:
+ raise InvalidSchemeCombination("--user", "--prefix")
+ if home and prefix:
+ raise InvalidSchemeCombination("--home", "--prefix")
+
+ if home is not None:
+ scheme_name = _infer_home()
+ elif user:
+ scheme_name = _infer_user()
+ else:
+ scheme_name = _infer_prefix()
+
+ # Special case: When installing into a custom prefix, use posix_prefix
+ # instead of osx_framework_library. See _should_use_osx_framework_prefix()
+ # docstring for details.
+ if prefix is not None and scheme_name == "osx_framework_library":
+ scheme_name = "posix_prefix"
+
+ if home is not None:
+ variables = {k: home for k in _HOME_KEYS}
+ elif prefix is not None:
+ variables = {k: prefix for k in _HOME_KEYS}
+ else:
+ variables = {}
+
+ paths = sysconfig.get_paths(scheme=scheme_name, vars=variables)
+
+ # Logic here is very arbitrary, we're doing it for compatibility, don't ask.
+ # 1. Pip historically uses a special header path in virtual environments.
+ # 2. If the distribution name is not known, distutils uses 'UNKNOWN'. We
+ # only do the same when not running in a virtual environment because
+ # pip's historical header path logic (see point 1) did not do this.
+ if running_under_virtualenv():
+ if user:
+ base = variables.get("userbase", sys.prefix)
+ else:
+ base = variables.get("base", sys.prefix)
+ python_xy = f"python{get_major_minor_version()}"
+ paths["include"] = os.path.join(base, "include", "site", python_xy)
+ elif not dist_name:
+ dist_name = "UNKNOWN"
+
+ scheme = Scheme(
+ platlib=paths["platlib"],
+ purelib=paths["purelib"],
+ headers=os.path.join(paths["include"], dist_name),
+ scripts=paths["scripts"],
+ data=paths["data"],
+ )
+ if root is not None:
+ for key in SCHEME_KEYS:
+ value = change_root(root, getattr(scheme, key))
+ setattr(scheme, key, value)
+ return scheme
+
+
+def get_bin_prefix() -> str:
+ # Forcing to use /usr/local/bin for standard macOS framework installs.
+ if sys.platform[:6] == "darwin" and sys.prefix[:16] == "/System/Library/":
+ return "/usr/local/bin"
+ return sysconfig.get_paths()["scripts"]
+
+
+def get_purelib() -> str:
+ return sysconfig.get_paths()["purelib"]
+
+
+def get_platlib() -> str:
+ return sysconfig.get_paths()["platlib"]
diff --git a/third_party/python/pip/pip/_internal/locations/base.py b/third_party/python/pip/pip/_internal/locations/base.py
new file mode 100644
index 0000000000..3f9f896e63
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/locations/base.py
@@ -0,0 +1,81 @@
+import functools
+import os
+import site
+import sys
+import sysconfig
+import typing
+
+from pip._internal.exceptions import InstallationError
+from pip._internal.utils import appdirs
+from pip._internal.utils.virtualenv import running_under_virtualenv
+
+# Application Directories
+USER_CACHE_DIR = appdirs.user_cache_dir("pip")
+
+# FIXME doesn't account for venv linked to global site-packages
+site_packages: str = sysconfig.get_path("purelib")
+
+
+def get_major_minor_version() -> str:
+ """
+ Return the major-minor version of the current Python as a string, e.g.
+ "3.7" or "3.10".
+ """
+ return "{}.{}".format(*sys.version_info)
+
+
+def change_root(new_root: str, pathname: str) -> str:
+ """Return 'pathname' with 'new_root' prepended.
+
+ If 'pathname' is relative, this is equivalent to os.path.join(new_root, pathname).
+ Otherwise, it requires making 'pathname' relative and then joining the
+ two, which is tricky on DOS/Windows and Mac OS.
+
+ This is borrowed from Python's standard library's distutils module.
+ """
+ if os.name == "posix":
+ if not os.path.isabs(pathname):
+ return os.path.join(new_root, pathname)
+ else:
+ return os.path.join(new_root, pathname[1:])
+
+ elif os.name == "nt":
+ (drive, path) = os.path.splitdrive(pathname)
+ if path[0] == "\\":
+ path = path[1:]
+ return os.path.join(new_root, path)
+
+ else:
+ raise InstallationError(
+ f"Unknown platform: {os.name}\n"
+ "Can not change root path prefix on unknown platform."
+ )
+
+
+def get_src_prefix() -> str:
+ if running_under_virtualenv():
+ src_prefix = os.path.join(sys.prefix, "src")
+ else:
+ # FIXME: keep src in cwd for now (it is not a temporary folder)
+ try:
+ src_prefix = os.path.join(os.getcwd(), "src")
+ except OSError:
+ # In case the current working directory has been renamed or deleted
+ sys.exit("The folder you are executing pip from can no longer be found.")
+
+ # under macOS + virtualenv sys.prefix is not properly resolved
+ # it is something like /path/to/python/bin/..
+ return os.path.abspath(src_prefix)
+
+
+try:
+ # Use getusersitepackages if this is present, as it ensures that the
+ # value is initialised properly.
+ user_site: typing.Optional[str] = site.getusersitepackages()
+except AttributeError:
+ user_site = site.USER_SITE
+
+
+@functools.lru_cache(maxsize=None)
+def is_osx_framework() -> bool:
+ return bool(sysconfig.get_config_var("PYTHONFRAMEWORK"))
diff --git a/third_party/python/pip/pip/_internal/main.py b/third_party/python/pip/pip/_internal/main.py
new file mode 100644
index 0000000000..33c6d24cd8
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/main.py
@@ -0,0 +1,12 @@
+from typing import List, Optional
+
+
+def main(args: Optional[List[str]] = None) -> int:
+ """This is preserved for old console scripts that may still be referencing
+ it.
+
+ For additional details, see https://github.com/pypa/pip/issues/7498.
+ """
+ from pip._internal.utils.entrypoints import _wrapper
+
+ return _wrapper(args)
diff --git a/third_party/python/pip/pip/_internal/metadata/__init__.py b/third_party/python/pip/pip/_internal/metadata/__init__.py
new file mode 100644
index 0000000000..9f73ca7105
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/metadata/__init__.py
@@ -0,0 +1,127 @@
+import contextlib
+import functools
+import os
+import sys
+from typing import TYPE_CHECKING, List, Optional, Type, cast
+
+from pip._internal.utils.misc import strtobool
+
+from .base import BaseDistribution, BaseEnvironment, FilesystemWheel, MemoryWheel, Wheel
+
+if TYPE_CHECKING:
+ from typing import Protocol
+else:
+ Protocol = object
+
+__all__ = [
+ "BaseDistribution",
+ "BaseEnvironment",
+ "FilesystemWheel",
+ "MemoryWheel",
+ "Wheel",
+ "get_default_environment",
+ "get_environment",
+ "get_wheel_distribution",
+ "select_backend",
+]
+
+
+def _should_use_importlib_metadata() -> bool:
+ """Whether to use the ``importlib.metadata`` or ``pkg_resources`` backend.
+
+ By default, pip uses ``importlib.metadata`` on Python 3.11+, and
+ ``pkg_resourcess`` otherwise. This can be overridden by a couple of ways:
+
+ * If environment variable ``_PIP_USE_IMPORTLIB_METADATA`` is set, it
+ dictates whether ``importlib.metadata`` is used, regardless of Python
+ version.
+ * On Python 3.11+, Python distributors can patch ``importlib.metadata``
+ to add a global constant ``_PIP_USE_IMPORTLIB_METADATA = False``. This
+ makes pip use ``pkg_resources`` (unless the user set the aforementioned
+ environment variable to *True*).
+ """
+ with contextlib.suppress(KeyError, ValueError):
+ return bool(strtobool(os.environ["_PIP_USE_IMPORTLIB_METADATA"]))
+ if sys.version_info < (3, 11):
+ return False
+ import importlib.metadata
+
+ return bool(getattr(importlib.metadata, "_PIP_USE_IMPORTLIB_METADATA", True))
+
+
+class Backend(Protocol):
+ Distribution: Type[BaseDistribution]
+ Environment: Type[BaseEnvironment]
+
+
+@functools.lru_cache(maxsize=None)
+def select_backend() -> Backend:
+ if _should_use_importlib_metadata():
+ from . import importlib
+
+ return cast(Backend, importlib)
+ from . import pkg_resources
+
+ return cast(Backend, pkg_resources)
+
+
+def get_default_environment() -> BaseEnvironment:
+ """Get the default representation for the current environment.
+
+ This returns an Environment instance from the chosen backend. The default
+ Environment instance should be built from ``sys.path`` and may use caching
+ to share instance state accorss calls.
+ """
+ return select_backend().Environment.default()
+
+
+def get_environment(paths: Optional[List[str]]) -> BaseEnvironment:
+ """Get a representation of the environment specified by ``paths``.
+
+ This returns an Environment instance from the chosen backend based on the
+ given import paths. The backend must build a fresh instance representing
+ the state of installed distributions when this function is called.
+ """
+ return select_backend().Environment.from_paths(paths)
+
+
+def get_directory_distribution(directory: str) -> BaseDistribution:
+ """Get the distribution metadata representation in the specified directory.
+
+ This returns a Distribution instance from the chosen backend based on
+ the given on-disk ``.dist-info`` directory.
+ """
+ return select_backend().Distribution.from_directory(directory)
+
+
+def get_wheel_distribution(wheel: Wheel, canonical_name: str) -> BaseDistribution:
+ """Get the representation of the specified wheel's distribution metadata.
+
+ This returns a Distribution instance from the chosen backend based on
+ the given wheel's ``.dist-info`` directory.
+
+ :param canonical_name: Normalized project name of the given wheel.
+ """
+ return select_backend().Distribution.from_wheel(wheel, canonical_name)
+
+
+def get_metadata_distribution(
+ metadata_contents: bytes,
+ filename: str,
+ canonical_name: str,
+) -> BaseDistribution:
+ """Get the dist representation of the specified METADATA file contents.
+
+ This returns a Distribution instance from the chosen backend sourced from the data
+ in `metadata_contents`.
+
+ :param metadata_contents: Contents of a METADATA file within a dist, or one served
+ via PEP 658.
+ :param filename: Filename for the dist this metadata represents.
+ :param canonical_name: Normalized project name of the given dist.
+ """
+ return select_backend().Distribution.from_metadata_file_contents(
+ metadata_contents,
+ filename,
+ canonical_name,
+ )
diff --git a/third_party/python/pip/pip/_internal/metadata/_json.py b/third_party/python/pip/pip/_internal/metadata/_json.py
new file mode 100644
index 0000000000..336b52f1ef
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/metadata/_json.py
@@ -0,0 +1,84 @@
+# Extracted from https://github.com/pfmoore/pkg_metadata
+
+from email.header import Header, decode_header, make_header
+from email.message import Message
+from typing import Any, Dict, List, Union
+
+METADATA_FIELDS = [
+ # Name, Multiple-Use
+ ("Metadata-Version", False),
+ ("Name", False),
+ ("Version", False),
+ ("Dynamic", True),
+ ("Platform", True),
+ ("Supported-Platform", True),
+ ("Summary", False),
+ ("Description", False),
+ ("Description-Content-Type", False),
+ ("Keywords", False),
+ ("Home-page", False),
+ ("Download-URL", False),
+ ("Author", False),
+ ("Author-email", False),
+ ("Maintainer", False),
+ ("Maintainer-email", False),
+ ("License", False),
+ ("Classifier", True),
+ ("Requires-Dist", True),
+ ("Requires-Python", False),
+ ("Requires-External", True),
+ ("Project-URL", True),
+ ("Provides-Extra", True),
+ ("Provides-Dist", True),
+ ("Obsoletes-Dist", True),
+]
+
+
+def json_name(field: str) -> str:
+ return field.lower().replace("-", "_")
+
+
+def msg_to_json(msg: Message) -> Dict[str, Any]:
+ """Convert a Message object into a JSON-compatible dictionary."""
+
+ def sanitise_header(h: Union[Header, str]) -> str:
+ if isinstance(h, Header):
+ chunks = []
+ for bytes, encoding in decode_header(h):
+ if encoding == "unknown-8bit":
+ try:
+ # See if UTF-8 works
+ bytes.decode("utf-8")
+ encoding = "utf-8"
+ except UnicodeDecodeError:
+ # If not, latin1 at least won't fail
+ encoding = "latin1"
+ chunks.append((bytes, encoding))
+ return str(make_header(chunks))
+ return str(h)
+
+ result = {}
+ for field, multi in METADATA_FIELDS:
+ if field not in msg:
+ continue
+ key = json_name(field)
+ if multi:
+ value: Union[str, List[str]] = [
+ sanitise_header(v) for v in msg.get_all(field)
+ ]
+ else:
+ value = sanitise_header(msg.get(field))
+ if key == "keywords":
+ # Accept both comma-separated and space-separated
+ # forms, for better compatibility with old data.
+ if "," in value:
+ value = [v.strip() for v in value.split(",")]
+ else:
+ value = value.split()
+ result[key] = value
+
+ payload = msg.get_payload()
+ if payload:
+ result["description"] = payload
+
+ return result
diff --git a/third_party/python/pip/pip/_internal/metadata/base.py b/third_party/python/pip/pip/_internal/metadata/base.py
new file mode 100644
index 0000000000..cafb79fb3d
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/metadata/base.py
@@ -0,0 +1,688 @@
+import csv
+import email.message
+import functools
+import json
+import logging
+import pathlib
+import re
+import zipfile
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ Collection,
+ Container,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ NamedTuple,
+ Optional,
+ Tuple,
+ Union,
+)
+
+from pip._vendor.packaging.requirements import Requirement
+from pip._vendor.packaging.specifiers import InvalidSpecifier, SpecifierSet
+from pip._vendor.packaging.utils import NormalizedName
+from pip._vendor.packaging.version import LegacyVersion, Version
+
+from pip._internal.exceptions import NoneMetadataError
+from pip._internal.locations import site_packages, user_site
+from pip._internal.models.direct_url import (
+ DIRECT_URL_METADATA_NAME,
+ DirectUrl,
+ DirectUrlValidationError,
+)
+from pip._internal.utils.compat import stdlib_pkgs # TODO: Move definition here.
+from pip._internal.utils.egg_link import egg_link_path_from_sys_path
+from pip._internal.utils.misc import is_local, normalize_path
+from pip._internal.utils.packaging import safe_extra
+from pip._internal.utils.urls import url_to_path
+
+from ._json import msg_to_json
+
+if TYPE_CHECKING:
+ from typing import Protocol
+else:
+ Protocol = object
+
+DistributionVersion = Union[LegacyVersion, Version]
+
+InfoPath = Union[str, pathlib.PurePath]
+
+logger = logging.getLogger(__name__)
+
+
+class BaseEntryPoint(Protocol):
+ @property
+ def name(self) -> str:
+ raise NotImplementedError()
+
+ @property
+ def value(self) -> str:
+ raise NotImplementedError()
+
+ @property
+ def group(self) -> str:
+ raise NotImplementedError()
+
+
+def _convert_installed_files_path(
+ entry: Tuple[str, ...],
+ info: Tuple[str, ...],
+) -> str:
+ """Convert a legacy installed-files.txt path into modern RECORD path.
+
+ The legacy format stores paths relative to the info directory, while the
+ modern format stores paths relative to the package root, e.g. the
+ site-packages directory.
+
+ :param entry: Path parts of the installed-files.txt entry.
+ :param info: Path parts of the egg-info directory relative to package root.
+ :returns: The converted entry.
+
+ For best compatibility with symlinks, this does not use ``abspath()`` or
+ ``Path.resolve()``, but tries to work with path parts:
+
+ 1. While ``entry`` starts with ``..``, remove the equal amounts of parts
+ from ``info``; if ``info`` is empty, start appending ``..`` instead.
+ 2. Join the two directly.
+ """
+ while entry and entry[0] == "..":
+ if not info or info[-1] == "..":
+ info += ("..",)
+ else:
+ info = info[:-1]
+ entry = entry[1:]
+ return str(pathlib.Path(*info, *entry))
+
+
+class RequiresEntry(NamedTuple):
+ requirement: str
+ extra: str
+ marker: str
+
+
+class BaseDistribution(Protocol):
+ @classmethod
+ def from_directory(cls, directory: str) -> "BaseDistribution":
+ """Load the distribution from a metadata directory.
+
+ :param directory: Path to a metadata directory, e.g. ``.dist-info``.
+ """
+ raise NotImplementedError()
+
+ @classmethod
+ def from_metadata_file_contents(
+ cls,
+ metadata_contents: bytes,
+ filename: str,
+ project_name: str,
+ ) -> "BaseDistribution":
+ """Load the distribution from the contents of a METADATA file.
+
+ This is used to implement PEP 658 by generating a "shallow" dist object that can
+ be used for resolution without downloading or building the actual dist yet.
+
+ :param metadata_contents: The contents of a METADATA file.
+ :param filename: File name for the dist with this metadata.
+ :param project_name: Name of the project this dist represents.
+ """
+ raise NotImplementedError()
+
+ @classmethod
+ def from_wheel(cls, wheel: "Wheel", name: str) -> "BaseDistribution":
+ """Load the distribution from a given wheel.
+
+ :param wheel: A concrete wheel definition.
+ :param name: File name of the wheel.
+
+ :raises InvalidWheel: Whenever loading of the wheel causes a
+ :py:exc:`zipfile.BadZipFile` exception to be thrown.
+ :raises UnsupportedWheel: If the wheel is a valid zip, but malformed
+ internally.
+ """
+ raise NotImplementedError()
+
+ def __repr__(self) -> str:
+ return f"{self.raw_name} {self.version} ({self.location})"
+
+ def __str__(self) -> str:
+ return f"{self.raw_name} {self.version}"
+
+ @property
+ def location(self) -> Optional[str]:
+ """Where the distribution is loaded from.
+
+ A string value is not necessarily a filesystem path, since distributions
+ can be loaded from other sources, e.g. arbitrary zip archives. ``None``
+ means the distribution is created in-memory.
+
+ Do not canonicalize this value with e.g. ``pathlib.Path.resolve()``. If
+ this is a symbolic link, we want to preserve the relative path between
+ it and files in the distribution.
+ """
+ raise NotImplementedError()
+
+ @property
+ def editable_project_location(self) -> Optional[str]:
+ """The project location for editable distributions.
+
+ This is the directory where pyproject.toml or setup.py is located.
+ None if the distribution is not installed in editable mode.
+ """
+ # TODO: this property is relatively costly to compute, memoize it ?
+ direct_url = self.direct_url
+ if direct_url:
+ if direct_url.is_local_editable():
+ return url_to_path(direct_url.url)
+ else:
+ # Search for an .egg-link file by walking sys.path, as it was
+ # done before by dist_is_editable().
+ egg_link_path = egg_link_path_from_sys_path(self.raw_name)
+ if egg_link_path:
+ # TODO: get project location from second line of egg_link file
+ # (https://github.com/pypa/pip/issues/10243)
+ return self.location
+ return None
+
+ @property
+ def installed_location(self) -> Optional[str]:
+ """The distribution's "installed" location.
+
+ This should generally be a ``site-packages`` directory. This is
+ usually ``dist.location``, except for legacy develop-installed packages,
+ where ``dist.location`` is the source code location, and this is where
+ the ``.egg-link`` file is.
+
+ The returned location is normalized (in particular, with symlinks removed).
+ """
+ raise NotImplementedError()
+
+ @property
+ def info_location(self) -> Optional[str]:
+ """Location of the .[egg|dist]-info directory or file.
+
+ Similarly to ``location``, a string value is not necessarily a
+ filesystem path. ``None`` means the distribution is created in-memory.
+
+ For a modern .dist-info installation on disk, this should be something
+ like ``{location}/{raw_name}-{version}.dist-info``.
+
+ Do not canonicalize this value with e.g. ``pathlib.Path.resolve()``. If
+ this is a symbolic link, we want to preserve the relative path between
+ it and other files in the distribution.
+ """
+ raise NotImplementedError()
+
+ @property
+ def installed_by_distutils(self) -> bool:
+ """Whether this distribution is installed with legacy distutils format.
+
+ A distribution installed with "raw" distutils not patched by setuptools
+ uses one single file at ``info_location`` to store metadata. We need to
+ treat this specially on uninstallation.
+ """
+ info_location = self.info_location
+ if not info_location:
+ return False
+ return pathlib.Path(info_location).is_file()
+
+ @property
+ def installed_as_egg(self) -> bool:
+ """Whether this distribution is installed as an egg.
+
+ This usually indicates the distribution was installed by (older versions
+ of) easy_install.
+ """
+ location = self.location
+ if not location:
+ return False
+ return location.endswith(".egg")
+
+ @property
+ def installed_with_setuptools_egg_info(self) -> bool:
+ """Whether this distribution is installed with the ``.egg-info`` format.
+
+ This usually indicates the distribution was installed with setuptools
+ with an old pip version or with ``single-version-externally-managed``.
+
+ Note that this ensure the metadata store is a directory. distutils can
+ also installs an ``.egg-info``, but as a file, not a directory. This
+ property is *False* for that case. Also see ``installed_by_distutils``.
+ """
+ info_location = self.info_location
+ if not info_location:
+ return False
+ if not info_location.endswith(".egg-info"):
+ return False
+ return pathlib.Path(info_location).is_dir()
+
+ @property
+ def installed_with_dist_info(self) -> bool:
+ """Whether this distribution is installed with the "modern format".
+
+ This indicates a "modern" installation, e.g. storing metadata in the
+ ``.dist-info`` directory. This applies to installations made by
+ setuptools (but through pip, not directly), or anything using the
+ standardized build backend interface (PEP 517).
+ """
+ info_location = self.info_location
+ if not info_location:
+ return False
+ if not info_location.endswith(".dist-info"):
+ return False
+ return pathlib.Path(info_location).is_dir()
+
+ @property
+ def canonical_name(self) -> NormalizedName:
+ raise NotImplementedError()
+
+ @property
+ def version(self) -> DistributionVersion:
+ raise NotImplementedError()
+
+ @property
+ def setuptools_filename(self) -> str:
+ """Convert a project name to its setuptools-compatible filename.
+
+ This is a copy of ``pkg_resources.to_filename()`` for compatibility.
+ """
+ return self.raw_name.replace("-", "_")
+
+ @property
+ def direct_url(self) -> Optional[DirectUrl]:
+ """Obtain a DirectUrl from this distribution.
+
+ Returns None if the distribution has no `direct_url.json` metadata,
+ or if `direct_url.json` is invalid.
+ """
+ try:
+ content = self.read_text(DIRECT_URL_METADATA_NAME)
+ except FileNotFoundError:
+ return None
+ try:
+ return DirectUrl.from_json(content)
+ except (
+ UnicodeDecodeError,
+ json.JSONDecodeError,
+ DirectUrlValidationError,
+ ) as e:
+ logger.warning(
+ "Error parsing %s for %s: %s",
+ DIRECT_URL_METADATA_NAME,
+ self.canonical_name,
+ e,
+ )
+ return None
+
+ @property
+ def installer(self) -> str:
+ try:
+ installer_text = self.read_text("INSTALLER")
+ except (OSError, ValueError, NoneMetadataError):
+ return "" # Fail silently if the installer file cannot be read.
+ for line in installer_text.splitlines():
+ cleaned_line = line.strip()
+ if cleaned_line:
+ return cleaned_line
+ return ""
+
+ @property
+ def requested(self) -> bool:
+ return self.is_file("REQUESTED")
+
+ @property
+ def editable(self) -> bool:
+ return bool(self.editable_project_location)
+
+ @property
+ def local(self) -> bool:
+ """If distribution is installed in the current virtual environment.
+
+ Always True if we're not in a virtualenv.
+ """
+ if self.installed_location is None:
+ return False
+ return is_local(self.installed_location)
+
+ @property
+ def in_usersite(self) -> bool:
+ if self.installed_location is None or user_site is None:
+ return False
+ return self.installed_location.startswith(normalize_path(user_site))
+
+ @property
+ def in_site_packages(self) -> bool:
+ if self.installed_location is None or site_packages is None:
+ return False
+ return self.installed_location.startswith(normalize_path(site_packages))
+
+ def is_file(self, path: InfoPath) -> bool:
+ """Check whether an entry in the info directory is a file."""
+ raise NotImplementedError()
+
+ def iter_distutils_script_names(self) -> Iterator[str]:
+ """Find distutils 'scripts' entries metadata.
+
+ If 'scripts' is supplied in ``setup.py``, distutils records those in the
+ installed distribution's ``scripts`` directory, a file for each script.
+ """
+ raise NotImplementedError()
+
+ def read_text(self, path: InfoPath) -> str:
+ """Read a file in the info directory.
+
+ :raise FileNotFoundError: If ``path`` does not exist in the directory.
+ :raise NoneMetadataError: If ``path`` exists in the info directory, but
+ cannot be read.
+ """
+ raise NotImplementedError()
+
+ def iter_entry_points(self) -> Iterable[BaseEntryPoint]:
+ raise NotImplementedError()
+
+ def _metadata_impl(self) -> email.message.Message:
+ raise NotImplementedError()
+
+ @functools.lru_cache(maxsize=1)
+ def _metadata_cached(self) -> email.message.Message:
+ # When we drop python 3.7 support, move this to the metadata property and use
+ # functools.cached_property instead of lru_cache.
+ metadata = self._metadata_impl()
+ self._add_egg_info_requires(metadata)
+ return metadata
+
+ @property
+ def metadata(self) -> email.message.Message:
+ """Metadata of distribution parsed from e.g. METADATA or PKG-INFO.
+
+ This should return an empty message if the metadata file is unavailable.
+
+ :raises NoneMetadataError: If the metadata file is available, but does
+ not contain valid metadata.
+ """
+ return self._metadata_cached()
+
+ @property
+ def metadata_dict(self) -> Dict[str, Any]:
+ """PEP 566 compliant JSON-serializable representation of METADATA or PKG-INFO.
+
+ This should return an empty dict if the metadata file is unavailable.
+
+ :raises NoneMetadataError: If the metadata file is available, but does
+ not contain valid metadata.
+ """
+ return msg_to_json(self.metadata)
+
+ @property
+ def metadata_version(self) -> Optional[str]:
+ """Value of "Metadata-Version:" in distribution metadata, if available."""
+ return self.metadata.get("Metadata-Version")
+
+ @property
+ def raw_name(self) -> str:
+ """Value of "Name:" in distribution metadata."""
+ # The metadata should NEVER be missing the Name: key, but if it somehow
+ # does, fall back to the known canonical name.
+ return self.metadata.get("Name", self.canonical_name)
+
+ @property
+ def requires_python(self) -> SpecifierSet:
+ """Value of "Requires-Python:" in distribution metadata.
+
+ If the key does not exist or contains an invalid value, an empty
+ SpecifierSet should be returned.
+ """
+ value = self.metadata.get("Requires-Python")
+ if value is None:
+ return SpecifierSet()
+ try:
+ # Convert to str to satisfy the type checker; this can be a Header object.
+ spec = SpecifierSet(str(value))
+ except InvalidSpecifier as e:
+ message = "Package %r has an invalid Requires-Python: %s"
+ logger.warning(message, self.raw_name, e)
+ return SpecifierSet()
+ return spec
+
+ def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]:
+ """Dependencies of this distribution.
+
+ For modern .dist-info distributions, this is the collection of
+ "Requires-Dist:" entries in distribution metadata.
+ """
+ raise NotImplementedError()
+
+ def iter_provided_extras(self) -> Iterable[str]:
+ """Extras provided by this distribution.
+
+ For modern .dist-info distributions, this is the collection of
+ "Provides-Extra:" entries in distribution metadata.
+ """
+ raise NotImplementedError()
+
+ def _iter_declared_entries_from_record(self) -> Optional[Iterator[str]]:
+ try:
+ text = self.read_text("RECORD")
+ except FileNotFoundError:
+ return None
+ # This extra Path-str cast normalizes entries.
+ return (str(pathlib.Path(row[0])) for row in csv.reader(text.splitlines()))
+
+ def _iter_declared_entries_from_legacy(self) -> Optional[Iterator[str]]:
+ try:
+ text = self.read_text("installed-files.txt")
+ except FileNotFoundError:
+ return None
+ paths = (p for p in text.splitlines(keepends=False) if p)
+ root = self.location
+ info = self.info_location
+ if root is None or info is None:
+ return paths
+ try:
+ info_rel = pathlib.Path(info).relative_to(root)
+ except ValueError: # info is not relative to root.
+ return paths
+ if not info_rel.parts: # info *is* root.
+ return paths
+ return (
+ _convert_installed_files_path(pathlib.Path(p).parts, info_rel.parts)
+ for p in paths
+ )
+
+ def iter_declared_entries(self) -> Optional[Iterator[str]]:
+ """Iterate through file entries declared in this distribution.
+
+ For modern .dist-info distributions, this is the files listed in the
+ ``RECORD`` metadata file. For legacy setuptools distributions, this
+ comes from ``installed-files.txt``, with entries normalized to be
+ compatible with the format used by ``RECORD``.
+
+ :return: An iterator for listed entries, or None if the distribution
+ contains neither ``RECORD`` nor ``installed-files.txt``.
+ """
+ return (
+ self._iter_declared_entries_from_record()
+ or self._iter_declared_entries_from_legacy()
+ )
+
+ def _iter_requires_txt_entries(self) -> Iterator[RequiresEntry]:
+ """Parse a ``requires.txt`` in an egg-info directory.
+
+ This is an INI-ish format where an egg-info stores dependencies. A
+ section name describes extra other environment markers, while each entry
+ is an arbitrary string (not a key-value pair) representing a dependency
+ as a requirement string (no markers).
+
+ There is a construct in ``importlib.metadata`` called ``Sectioned`` that
+ does mostly the same, but the format is currently considered private.
+ """
+ try:
+ content = self.read_text("requires.txt")
+ except FileNotFoundError:
+ return
+ extra = marker = "" # Section-less entries don't have markers.
+ for line in content.splitlines():
+ line = line.strip()
+ if not line or line.startswith("#"): # Comment; ignored.
+ continue
+ if line.startswith("[") and line.endswith("]"): # A section header.
+ extra, _, marker = line.strip("[]").partition(":")
+ continue
+ yield RequiresEntry(requirement=line, extra=extra, marker=marker)
+
+ def _iter_egg_info_extras(self) -> Iterable[str]:
+ """Get extras from the egg-info directory."""
+ known_extras = {""}
+ for entry in self._iter_requires_txt_entries():
+ if entry.extra in known_extras:
+ continue
+ known_extras.add(entry.extra)
+ yield entry.extra
+
+ def _iter_egg_info_dependencies(self) -> Iterable[str]:
+ """Get distribution dependencies from the egg-info directory.
+
+ To ease parsing, this converts a legacy dependency entry into a PEP 508
+ requirement string. Like ``_iter_requires_txt_entries()``, there is code
+ in ``importlib.metadata`` that does mostly the same, but not do exactly
+ what we need.
+
+ Namely, ``importlib.metadata`` does not normalize the extra name before
+ putting it into the requirement string, which causes marker comparison
+ to fail because the dist-info format do normalize. This is consistent in
+ all currently available PEP 517 backends, although not standardized.
+ """
+ for entry in self._iter_requires_txt_entries():
+ if entry.extra and entry.marker:
+ marker = f'({entry.marker}) and extra == "{safe_extra(entry.extra)}"'
+ elif entry.extra:
+ marker = f'extra == "{safe_extra(entry.extra)}"'
+ elif entry.marker:
+ marker = entry.marker
+ else:
+ marker = ""
+ if marker:
+ yield f"{entry.requirement} ; {marker}"
+ else:
+ yield entry.requirement
+
+ def _add_egg_info_requires(self, metadata: email.message.Message) -> None:
+ """Add egg-info requires.txt information to the metadata."""
+ if not metadata.get_all("Requires-Dist"):
+ for dep in self._iter_egg_info_dependencies():
+ metadata["Requires-Dist"] = dep
+ if not metadata.get_all("Provides-Extra"):
+ for extra in self._iter_egg_info_extras():
+ metadata["Provides-Extra"] = extra
+
+
+class BaseEnvironment:
+ """An environment containing distributions to introspect."""
+
+ @classmethod
+ def default(cls) -> "BaseEnvironment":
+ raise NotImplementedError()
+
+ @classmethod
+ def from_paths(cls, paths: Optional[List[str]]) -> "BaseEnvironment":
+ raise NotImplementedError()
+
+ def get_distribution(self, name: str) -> Optional["BaseDistribution"]:
+ """Given a requirement name, return the installed distributions.
+
+ The name may not be normalized. The implementation must canonicalize
+ it for lookup.
+ """
+ raise NotImplementedError()
+
+ def _iter_distributions(self) -> Iterator["BaseDistribution"]:
+ """Iterate through installed distributions.
+
+ This function should be implemented by subclass, but never called
+ directly. Use the public ``iter_distribution()`` instead, which
+ implements additional logic to make sure the distributions are valid.
+ """
+ raise NotImplementedError()
+
+ def iter_all_distributions(self) -> Iterator[BaseDistribution]:
+ """Iterate through all installed distributions without any filtering."""
+ for dist in self._iter_distributions():
+ # Make sure the distribution actually comes from a valid Python
+ # packaging distribution. Pip's AdjacentTempDirectory leaves folders
+ # e.g. ``~atplotlib.dist-info`` if cleanup was interrupted. The
+ # valid project name pattern is taken from PEP 508.
+ project_name_valid = re.match(
+ r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$",
+ dist.canonical_name,
+ flags=re.IGNORECASE,
+ )
+ if not project_name_valid:
+ logger.warning(
+ "Ignoring invalid distribution %s (%s)",
+ dist.canonical_name,
+ dist.location,
+ )
+ continue
+ yield dist
+
+ def iter_installed_distributions(
+ self,
+ local_only: bool = True,
+ skip: Container[str] = stdlib_pkgs,
+ include_editables: bool = True,
+ editables_only: bool = False,
+ user_only: bool = False,
+ ) -> Iterator[BaseDistribution]:
+ """Return a list of installed distributions.
+
+ This is based on ``iter_all_distributions()`` with additional filtering
+ options. Note that ``iter_installed_distributions()`` without arguments
+ is *not* equal to ``iter_all_distributions()``, since some of the
+ configurations exclude packages by default.
+
+ :param local_only: If True (default), only return installations
+ local to the current virtualenv, if in a virtualenv.
+ :param skip: An iterable of canonicalized project names to ignore;
+ defaults to ``stdlib_pkgs``.
+ :param include_editables: If False, don't report editables.
+ :param editables_only: If True, only report editables.
+ :param user_only: If True, only report installations in the user
+ site directory.
+ """
+ it = self.iter_all_distributions()
+ if local_only:
+ it = (d for d in it if d.local)
+ if not include_editables:
+ it = (d for d in it if not d.editable)
+ if editables_only:
+ it = (d for d in it if d.editable)
+ if user_only:
+ it = (d for d in it if d.in_usersite)
+ return (d for d in it if d.canonical_name not in skip)
+
+
+class Wheel(Protocol):
+ location: str
+
+ def as_zipfile(self) -> zipfile.ZipFile:
+ raise NotImplementedError()
+
+
+class FilesystemWheel(Wheel):
+ def __init__(self, location: str) -> None:
+ self.location = location
+
+ def as_zipfile(self) -> zipfile.ZipFile:
+ return zipfile.ZipFile(self.location, allowZip64=True)
+
+
+class MemoryWheel(Wheel):
+ def __init__(self, location: str, stream: IO[bytes]) -> None:
+ self.location = location
+ self.stream = stream
+
+ def as_zipfile(self) -> zipfile.ZipFile:
+ return zipfile.ZipFile(self.stream, allowZip64=True)
diff --git a/third_party/python/pip/pip/_internal/metadata/importlib/__init__.py b/third_party/python/pip/pip/_internal/metadata/importlib/__init__.py
new file mode 100644
index 0000000000..5e7af9fe52
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/metadata/importlib/__init__.py
@@ -0,0 +1,4 @@
+from ._dists import Distribution
+from ._envs import Environment
+
+__all__ = ["Distribution", "Environment"]
diff --git a/third_party/python/pip/pip/_internal/metadata/importlib/_compat.py b/third_party/python/pip/pip/_internal/metadata/importlib/_compat.py
new file mode 100644
index 0000000000..593bff23ed
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/metadata/importlib/_compat.py
@@ -0,0 +1,55 @@
+import importlib.metadata
+from typing import Any, Optional, Protocol, cast
+
+
+class BadMetadata(ValueError):
+ def __init__(self, dist: importlib.metadata.Distribution, *, reason: str) -> None:
+ self.dist = dist
+ self.reason = reason
+
+ def __str__(self) -> str:
+ return f"Bad metadata in {self.dist} ({self.reason})"
+
+
+class BasePath(Protocol):
+ """A protocol that various path objects conform.
+
+ This exists because importlib.metadata uses both ``pathlib.Path`` and
+ ``zipfile.Path``, and we need a common base for type hints (Union does not
+ work well since ``zipfile.Path`` is too new for our linter setup).
+
+ This does not mean to be exhaustive, but only contains things that present
+ in both classes *that we need*.
+ """
+
+ @property
+ def name(self) -> str:
+ raise NotImplementedError()
+
+ @property
+ def parent(self) -> "BasePath":
+ raise NotImplementedError()
+
+
+def get_info_location(d: importlib.metadata.Distribution) -> Optional[BasePath]:
+ """Find the path to the distribution's metadata directory.
+
+ HACK: This relies on importlib.metadata's private ``_path`` attribute. Not
+ all distributions exist on disk, so importlib.metadata is correct to not
+ expose the attribute as public. But pip's code base is old and not as clean,
+ so we do this to avoid having to rewrite too many things. Hopefully we can
+ eliminate this some day.
+ """
+ return getattr(d, "_path", None)
+
+
+def get_dist_name(dist: importlib.metadata.Distribution) -> str:
+ """Get the distribution's project name.
+
+ The ``name`` attribute is only available in Python 3.10 or later. We are
+ targeting exactly that, but Mypy does not know this.
+ """
+ name = cast(Any, dist).name
+ if not isinstance(name, str):
+ raise BadMetadata(dist, reason="invalid metadata entry 'name'")
+ return name
diff --git a/third_party/python/pip/pip/_internal/metadata/importlib/_dists.py b/third_party/python/pip/pip/_internal/metadata/importlib/_dists.py
new file mode 100644
index 0000000000..65c043c87e
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/metadata/importlib/_dists.py
@@ -0,0 +1,224 @@
+import email.message
+import importlib.metadata
+import os
+import pathlib
+import zipfile
+from typing import (
+ Collection,
+ Dict,
+ Iterable,
+ Iterator,
+ Mapping,
+ Optional,
+ Sequence,
+ cast,
+)
+
+from pip._vendor.packaging.requirements import Requirement
+from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
+from pip._vendor.packaging.version import parse as parse_version
+
+from pip._internal.exceptions import InvalidWheel, UnsupportedWheel
+from pip._internal.metadata.base import (
+ BaseDistribution,
+ BaseEntryPoint,
+ DistributionVersion,
+ InfoPath,
+ Wheel,
+)
+from pip._internal.utils.misc import normalize_path
+from pip._internal.utils.packaging import safe_extra
+from pip._internal.utils.temp_dir import TempDirectory
+from pip._internal.utils.wheel import parse_wheel, read_wheel_metadata_file
+
+from ._compat import BasePath, get_dist_name
+
+
+class WheelDistribution(importlib.metadata.Distribution):
+ """An ``importlib.metadata.Distribution`` read from a wheel.
+
+ Although ``importlib.metadata.PathDistribution`` accepts ``zipfile.Path``,
+ its implementation is too "lazy" for pip's needs (we can't keep the ZipFile
+ handle open for the entire lifetime of the distribution object).
+
+ This implementation eagerly reads the entire metadata directory into the
+ memory instead, and operates from that.
+ """
+
+ def __init__(
+ self,
+ files: Mapping[pathlib.PurePosixPath, bytes],
+ info_location: pathlib.PurePosixPath,
+ ) -> None:
+ self._files = files
+ self.info_location = info_location
+
+ @classmethod
+ def from_zipfile(
+ cls,
+ zf: zipfile.ZipFile,
+ name: str,
+ location: str,
+ ) -> "WheelDistribution":
+ info_dir, _ = parse_wheel(zf, name)
+ paths = (
+ (name, pathlib.PurePosixPath(name.split("/", 1)[-1]))
+ for name in zf.namelist()
+ if name.startswith(f"{info_dir}/")
+ )
+ files = {
+ relpath: read_wheel_metadata_file(zf, fullpath)
+ for fullpath, relpath in paths
+ }
+ info_location = pathlib.PurePosixPath(location, info_dir)
+ return cls(files, info_location)
+
+ def iterdir(self, path: InfoPath) -> Iterator[pathlib.PurePosixPath]:
+ # Only allow iterating through the metadata directory.
+ if pathlib.PurePosixPath(str(path)) in self._files:
+ return iter(self._files)
+ raise FileNotFoundError(path)
+
+ def read_text(self, filename: str) -> Optional[str]:
+ try:
+ data = self._files[pathlib.PurePosixPath(filename)]
+ except KeyError:
+ return None
+ try:
+ text = data.decode("utf-8")
+ except UnicodeDecodeError as e:
+ wheel = self.info_location.parent
+ error = f"Error decoding metadata for {wheel}: {e} in {filename} file"
+ raise UnsupportedWheel(error)
+ return text
+
+
+class Distribution(BaseDistribution):
+ def __init__(
+ self,
+ dist: importlib.metadata.Distribution,
+ info_location: Optional[BasePath],
+ installed_location: Optional[BasePath],
+ ) -> None:
+ self._dist = dist
+ self._info_location = info_location
+ self._installed_location = installed_location
+
+ @classmethod
+ def from_directory(cls, directory: str) -> BaseDistribution:
+ info_location = pathlib.Path(directory)
+ dist = importlib.metadata.Distribution.at(info_location)
+ return cls(dist, info_location, info_location.parent)
+
+ @classmethod
+ def from_metadata_file_contents(
+ cls,
+ metadata_contents: bytes,
+ filename: str,
+ project_name: str,
+ ) -> BaseDistribution:
+ # Generate temp dir to contain the metadata file, and write the file contents.
+ temp_dir = pathlib.Path(
+ TempDirectory(kind="metadata", globally_managed=True).path
+ )
+ metadata_path = temp_dir / "METADATA"
+ metadata_path.write_bytes(metadata_contents)
+ # Construct dist pointing to the newly created directory.
+ dist = importlib.metadata.Distribution.at(metadata_path.parent)
+ return cls(dist, metadata_path.parent, None)
+
+ @classmethod
+ def from_wheel(cls, wheel: Wheel, name: str) -> BaseDistribution:
+ try:
+ with wheel.as_zipfile() as zf:
+ dist = WheelDistribution.from_zipfile(zf, name, wheel.location)
+ except zipfile.BadZipFile as e:
+ raise InvalidWheel(wheel.location, name) from e
+ except UnsupportedWheel as e:
+ raise UnsupportedWheel(f"{name} has an invalid wheel, {e}")
+ return cls(dist, dist.info_location, pathlib.PurePosixPath(wheel.location))
+
+ @property
+ def location(self) -> Optional[str]:
+ if self._info_location is None:
+ return None
+ return str(self._info_location.parent)
+
+ @property
+ def info_location(self) -> Optional[str]:
+ if self._info_location is None:
+ return None
+ return str(self._info_location)
+
+ @property
+ def installed_location(self) -> Optional[str]:
+ if self._installed_location is None:
+ return None
+ return normalize_path(str(self._installed_location))
+
+ def _get_dist_name_from_location(self) -> Optional[str]:
+ """Try to get the name from the metadata directory name.
+
+ This is much faster than reading metadata.
+ """
+ if self._info_location is None:
+ return None
+ stem, suffix = os.path.splitext(self._info_location.name)
+ if suffix not in (".dist-info", ".egg-info"):
+ return None
+ return stem.split("-", 1)[0]
+
+ @property
+ def canonical_name(self) -> NormalizedName:
+ name = self._get_dist_name_from_location() or get_dist_name(self._dist)
+ return canonicalize_name(name)
+
+ @property
+ def version(self) -> DistributionVersion:
+ return parse_version(self._dist.version)
+
+ def is_file(self, path: InfoPath) -> bool:
+ return self._dist.read_text(str(path)) is not None
+
+ def iter_distutils_script_names(self) -> Iterator[str]:
+ # A distutils installation is always "flat" (not in e.g. egg form), so
+ # if this distribution's info location is NOT a pathlib.Path (but e.g.
+ # zipfile.Path), it can never contain any distutils scripts.
+ if not isinstance(self._info_location, pathlib.Path):
+ return
+ for child in self._info_location.joinpath("scripts").iterdir():
+ yield child.name
+
+ def read_text(self, path: InfoPath) -> str:
+ content = self._dist.read_text(str(path))
+ if content is None:
+ raise FileNotFoundError(path)
+ return content
+
+ def iter_entry_points(self) -> Iterable[BaseEntryPoint]:
+ # importlib.metadata's EntryPoint structure sasitfies BaseEntryPoint.
+ return self._dist.entry_points
+
+ def _metadata_impl(self) -> email.message.Message:
+ # From Python 3.10+, importlib.metadata declares PackageMetadata as the
+ # return type. This protocol is unfortunately a disaster now and misses
+ # a ton of fields that we need, including get() and get_payload(). We
+ # rely on the implementation that the object is actually a Message now,
+ # until upstream can improve the protocol. (python/cpython#94952)
+ return cast(email.message.Message, self._dist.metadata)
+
+ def iter_provided_extras(self) -> Iterable[str]:
+ return (
+ safe_extra(extra) for extra in self.metadata.get_all("Provides-Extra", [])
+ )
+
+ def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]:
+ contexts: Sequence[Dict[str, str]] = [{"extra": safe_extra(e)} for e in extras]
+ for req_string in self.metadata.get_all("Requires-Dist", []):
+ req = Requirement(req_string)
+ if not req.marker:
+ yield req
+ elif not extras and req.marker.evaluate({"extra": ""}):
+ yield req
+ elif any(req.marker.evaluate(context) for context in contexts):
+ yield req
diff --git a/third_party/python/pip/pip/_internal/metadata/importlib/_envs.py b/third_party/python/pip/pip/_internal/metadata/importlib/_envs.py
new file mode 100644
index 0000000000..cbec59e2c6
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/metadata/importlib/_envs.py
@@ -0,0 +1,188 @@
+import functools
+import importlib.metadata
+import logging
+import os
+import pathlib
+import sys
+import zipfile
+import zipimport
+from typing import Iterator, List, Optional, Sequence, Set, Tuple
+
+from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
+
+from pip._internal.metadata.base import BaseDistribution, BaseEnvironment
+from pip._internal.models.wheel import Wheel
+from pip._internal.utils.deprecation import deprecated
+from pip._internal.utils.filetypes import WHEEL_EXTENSION
+
+from ._compat import BadMetadata, BasePath, get_dist_name, get_info_location
+from ._dists import Distribution
+
+logger = logging.getLogger(__name__)
+
+
+def _looks_like_wheel(location: str) -> bool:
+ if not location.endswith(WHEEL_EXTENSION):
+ return False
+ if not os.path.isfile(location):
+ return False
+ if not Wheel.wheel_file_re.match(os.path.basename(location)):
+ return False
+ return zipfile.is_zipfile(location)
+
+
+class _DistributionFinder:
+ """Finder to locate distributions.
+
+ The main purpose of this class is to memoize found distributions' names, so
+ only one distribution is returned for each package name. At lot of pip code
+ assumes this (because it is setuptools's behavior), and not doing the same
+ can potentially cause a distribution in lower precedence path to override a
+ higher precedence one if the caller is not careful.
+
+ Eventually we probably want to make it possible to see lower precedence
+ installations as well. It's useful feature, after all.
+ """
+
+ FoundResult = Tuple[importlib.metadata.Distribution, Optional[BasePath]]
+
+ def __init__(self) -> None:
+ self._found_names: Set[NormalizedName] = set()
+
+ def _find_impl(self, location: str) -> Iterator[FoundResult]:
+ """Find distributions in a location."""
+ # Skip looking inside a wheel. Since a package inside a wheel is not
+ # always valid (due to .data directories etc.), its .dist-info entry
+ # should not be considered an installed distribution.
+ if _looks_like_wheel(location):
+ return
+ # To know exactly where we find a distribution, we have to feed in the
+ # paths one by one, instead of dumping the list to importlib.metadata.
+ for dist in importlib.metadata.distributions(path=[location]):
+ info_location = get_info_location(dist)
+ try:
+ raw_name = get_dist_name(dist)
+ except BadMetadata as e:
+ logger.warning("Skipping %s due to %s", info_location, e.reason)
+ continue
+ normalized_name = canonicalize_name(raw_name)
+ if normalized_name in self._found_names:
+ continue
+ self._found_names.add(normalized_name)
+ yield dist, info_location
+
+ def find(self, location: str) -> Iterator[BaseDistribution]:
+ """Find distributions in a location.
+
+ The path can be either a directory, or a ZIP archive.
+ """
+ for dist, info_location in self._find_impl(location):
+ if info_location is None:
+ installed_location: Optional[BasePath] = None
+ else:
+ installed_location = info_location.parent
+ yield Distribution(dist, info_location, installed_location)
+
+ def find_linked(self, location: str) -> Iterator[BaseDistribution]:
+ """Read location in egg-link files and return distributions in there.
+
+ The path should be a directory; otherwise this returns nothing. This
+ follows how setuptools does this for compatibility. The first non-empty
+ line in the egg-link is read as a path (resolved against the egg-link's
+ containing directory if relative). Distributions found at that linked
+ location are returned.
+ """
+ path = pathlib.Path(location)
+ if not path.is_dir():
+ return
+ for child in path.iterdir():
+ if child.suffix != ".egg-link":
+ continue
+ with child.open() as f:
+ lines = (line.strip() for line in f)
+ target_rel = next((line for line in lines if line), "")
+ if not target_rel:
+ continue
+ target_location = str(path.joinpath(target_rel))
+ for dist, info_location in self._find_impl(target_location):
+ yield Distribution(dist, info_location, path)
+
+ def _find_eggs_in_dir(self, location: str) -> Iterator[BaseDistribution]:
+ from pip._vendor.pkg_resources import find_distributions
+
+ from pip._internal.metadata import pkg_resources as legacy
+
+ with os.scandir(location) as it:
+ for entry in it:
+ if not entry.name.endswith(".egg"):
+ continue
+ for dist in find_distributions(entry.path):
+ yield legacy.Distribution(dist)
+
+ def _find_eggs_in_zip(self, location: str) -> Iterator[BaseDistribution]:
+ from pip._vendor.pkg_resources import find_eggs_in_zip
+
+ from pip._internal.metadata import pkg_resources as legacy
+
+ try:
+ importer = zipimport.zipimporter(location)
+ except zipimport.ZipImportError:
+ return
+ for dist in find_eggs_in_zip(importer, location):
+ yield legacy.Distribution(dist)
+
+ def find_eggs(self, location: str) -> Iterator[BaseDistribution]:
+ """Find eggs in a location.
+
+ This actually uses the old *pkg_resources* backend. We likely want to
+ deprecate this so we can eventually remove the *pkg_resources*
+ dependency entirely. Before that, this should first emit a deprecation
+ warning for some versions when using the fallback since importing
+ *pkg_resources* is slow for those who don't need it.
+ """
+ if os.path.isdir(location):
+ yield from self._find_eggs_in_dir(location)
+ if zipfile.is_zipfile(location):
+ yield from self._find_eggs_in_zip(location)
+
+
+@functools.lru_cache(maxsize=None) # Warn a distribution exactly once.
+def _emit_egg_deprecation(location: Optional[str]) -> None:
+ deprecated(
+ reason=f"Loading egg at {location} is deprecated.",
+ replacement="to use pip for package installation.",
+ gone_in=None,
+ )
+
+
+class Environment(BaseEnvironment):
+ def __init__(self, paths: Sequence[str]) -> None:
+ self._paths = paths
+
+ @classmethod
+ def default(cls) -> BaseEnvironment:
+ return cls(sys.path)
+
+ @classmethod
+ def from_paths(cls, paths: Optional[List[str]]) -> BaseEnvironment:
+ if paths is None:
+ return cls(sys.path)
+ return cls(paths)
+
+ def _iter_distributions(self) -> Iterator[BaseDistribution]:
+ finder = _DistributionFinder()
+ for location in self._paths:
+ yield from finder.find(location)
+ for dist in finder.find_eggs(location):
+ # _emit_egg_deprecation(dist.location) # TODO: Enable this.
+ yield dist
+ # This must go last because that's how pkg_resources tie-breaks.
+ yield from finder.find_linked(location)
+
+ def get_distribution(self, name: str) -> Optional[BaseDistribution]:
+ matches = (
+ distribution
+ for distribution in self.iter_all_distributions()
+ if distribution.canonical_name == canonicalize_name(name)
+ )
+ return next(matches, None)
diff --git a/third_party/python/pip/pip/_internal/metadata/pkg_resources.py b/third_party/python/pip/pip/_internal/metadata/pkg_resources.py
new file mode 100644
index 0000000000..f330ef12a2
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/metadata/pkg_resources.py
@@ -0,0 +1,270 @@
+import email.message
+import email.parser
+import logging
+import os
+import zipfile
+from typing import Collection, Iterable, Iterator, List, Mapping, NamedTuple, Optional
+
+from pip._vendor import pkg_resources
+from pip._vendor.packaging.requirements import Requirement
+from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
+from pip._vendor.packaging.version import parse as parse_version
+
+from pip._internal.exceptions import InvalidWheel, NoneMetadataError, UnsupportedWheel
+from pip._internal.utils.egg_link import egg_link_path_from_location
+from pip._internal.utils.misc import display_path, normalize_path
+from pip._internal.utils.wheel import parse_wheel, read_wheel_metadata_file
+
+from .base import (
+ BaseDistribution,
+ BaseEntryPoint,
+ BaseEnvironment,
+ DistributionVersion,
+ InfoPath,
+ Wheel,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class EntryPoint(NamedTuple):
+ name: str
+ value: str
+ group: str
+
+
+class InMemoryMetadata:
+ """IMetadataProvider that reads metadata files from a dictionary.
+
+ This also maps metadata decoding exceptions to our internal exception type.
+ """
+
+ def __init__(self, metadata: Mapping[str, bytes], wheel_name: str) -> None:
+ self._metadata = metadata
+ self._wheel_name = wheel_name
+
+ def has_metadata(self, name: str) -> bool:
+ return name in self._metadata
+
+ def get_metadata(self, name: str) -> str:
+ try:
+ return self._metadata[name].decode()
+ except UnicodeDecodeError as e:
+ # Augment the default error with the origin of the file.
+ raise UnsupportedWheel(
+ f"Error decoding metadata for {self._wheel_name}: {e} in {name} file"
+ )
+
+ def get_metadata_lines(self, name: str) -> Iterable[str]:
+ return pkg_resources.yield_lines(self.get_metadata(name))
+
+ def metadata_isdir(self, name: str) -> bool:
+ return False
+
+ def metadata_listdir(self, name: str) -> List[str]:
+ return []
+
+ def run_script(self, script_name: str, namespace: str) -> None:
+ pass
+
+
+class Distribution(BaseDistribution):
+ def __init__(self, dist: pkg_resources.Distribution) -> None:
+ self._dist = dist
+
+ @classmethod
+ def from_directory(cls, directory: str) -> BaseDistribution:
+ dist_dir = directory.rstrip(os.sep)
+
+ # Build a PathMetadata object, from path to metadata. :wink:
+ base_dir, dist_dir_name = os.path.split(dist_dir)
+ metadata = pkg_resources.PathMetadata(base_dir, dist_dir)
+
+ # Determine the correct Distribution object type.
+ if dist_dir.endswith(".egg-info"):
+ dist_cls = pkg_resources.Distribution
+ dist_name = os.path.splitext(dist_dir_name)[0]
+ else:
+ assert dist_dir.endswith(".dist-info")
+ dist_cls = pkg_resources.DistInfoDistribution
+ dist_name = os.path.splitext(dist_dir_name)[0].split("-")[0]
+
+ dist = dist_cls(base_dir, project_name=dist_name, metadata=metadata)
+ return cls(dist)
+
+ @classmethod
+ def from_metadata_file_contents(
+ cls,
+ metadata_contents: bytes,
+ filename: str,
+ project_name: str,
+ ) -> BaseDistribution:
+ metadata_dict = {
+ "METADATA": metadata_contents,
+ }
+ dist = pkg_resources.DistInfoDistribution(
+ location=filename,
+ metadata=InMemoryMetadata(metadata_dict, filename),
+ project_name=project_name,
+ )
+ return cls(dist)
+
+ @classmethod
+ def from_wheel(cls, wheel: Wheel, name: str) -> BaseDistribution:
+ try:
+ with wheel.as_zipfile() as zf:
+ info_dir, _ = parse_wheel(zf, name)
+ metadata_dict = {
+ path.split("/", 1)[-1]: read_wheel_metadata_file(zf, path)
+ for path in zf.namelist()
+ if path.startswith(f"{info_dir}/")
+ }
+ except zipfile.BadZipFile as e:
+ raise InvalidWheel(wheel.location, name) from e
+ except UnsupportedWheel as e:
+ raise UnsupportedWheel(f"{name} has an invalid wheel, {e}")
+ dist = pkg_resources.DistInfoDistribution(
+ location=wheel.location,
+ metadata=InMemoryMetadata(metadata_dict, wheel.location),
+ project_name=name,
+ )
+ return cls(dist)
+
+ @property
+ def location(self) -> Optional[str]:
+ return self._dist.location
+
+ @property
+ def installed_location(self) -> Optional[str]:
+ egg_link = egg_link_path_from_location(self.raw_name)
+ if egg_link:
+ location = egg_link
+ elif self.location:
+ location = self.location
+ else:
+ return None
+ return normalize_path(location)
+
+ @property
+ def info_location(self) -> Optional[str]:
+ return self._dist.egg_info
+
+ @property
+ def installed_by_distutils(self) -> bool:
+ # A distutils-installed distribution is provided by FileMetadata. This
+ # provider has a "path" attribute not present anywhere else. Not the
+ # best introspection logic, but pip has been doing this for a long time.
+ try:
+ return bool(self._dist._provider.path)
+ except AttributeError:
+ return False
+
+ @property
+ def canonical_name(self) -> NormalizedName:
+ return canonicalize_name(self._dist.project_name)
+
+ @property
+ def version(self) -> DistributionVersion:
+ return parse_version(self._dist.version)
+
+ def is_file(self, path: InfoPath) -> bool:
+ return self._dist.has_metadata(str(path))
+
+ def iter_distutils_script_names(self) -> Iterator[str]:
+ yield from self._dist.metadata_listdir("scripts")
+
+ def read_text(self, path: InfoPath) -> str:
+ name = str(path)
+ if not self._dist.has_metadata(name):
+ raise FileNotFoundError(name)
+ content = self._dist.get_metadata(name)
+ if content is None:
+ raise NoneMetadataError(self, name)
+ return content
+
+ def iter_entry_points(self) -> Iterable[BaseEntryPoint]:
+ for group, entries in self._dist.get_entry_map().items():
+ for name, entry_point in entries.items():
+ name, _, value = str(entry_point).partition("=")
+ yield EntryPoint(name=name.strip(), value=value.strip(), group=group)
+
+ def _metadata_impl(self) -> email.message.Message:
+ """
+ :raises NoneMetadataError: if the distribution reports `has_metadata()`
+ True but `get_metadata()` returns None.
+ """
+ if isinstance(self._dist, pkg_resources.DistInfoDistribution):
+ metadata_name = "METADATA"
+ else:
+ metadata_name = "PKG-INFO"
+ try:
+ metadata = self.read_text(metadata_name)
+ except FileNotFoundError:
+ if self.location:
+ displaying_path = display_path(self.location)
+ else:
+ displaying_path = repr(self.location)
+ logger.warning("No metadata found in %s", displaying_path)
+ metadata = ""
+ feed_parser = email.parser.FeedParser()
+ feed_parser.feed(metadata)
+ return feed_parser.close()
+
+ def iter_dependencies(self, extras: Collection[str] = ()) -> Iterable[Requirement]:
+ if extras: # pkg_resources raises on invalid extras, so we sanitize.
+ extras = frozenset(extras).intersection(self._dist.extras)
+ return self._dist.requires(extras)
+
+ def iter_provided_extras(self) -> Iterable[str]:
+ return self._dist.extras
+
+
+class Environment(BaseEnvironment):
+ def __init__(self, ws: pkg_resources.WorkingSet) -> None:
+ self._ws = ws
+
+ @classmethod
+ def default(cls) -> BaseEnvironment:
+ return cls(pkg_resources.working_set)
+
+ @classmethod
+ def from_paths(cls, paths: Optional[List[str]]) -> BaseEnvironment:
+ return cls(pkg_resources.WorkingSet(paths))
+
+ def _iter_distributions(self) -> Iterator[BaseDistribution]:
+ for dist in self._ws:
+ yield Distribution(dist)
+
+ def _search_distribution(self, name: str) -> Optional[BaseDistribution]:
+ """Find a distribution matching the ``name`` in the environment.
+
+ This searches from *all* distributions available in the environment, to
+ match the behavior of ``pkg_resources.get_distribution()``.
+ """
+ canonical_name = canonicalize_name(name)
+ for dist in self.iter_all_distributions():
+ if dist.canonical_name == canonical_name:
+ return dist
+ return None
+
+ def get_distribution(self, name: str) -> Optional[BaseDistribution]:
+ # Search the distribution by looking through the working set.
+ dist = self._search_distribution(name)
+ if dist:
+ return dist
+
+ # If distribution could not be found, call working_set.require to
+ # update the working set, and try to find the distribution again.
+ # This might happen for e.g. when you install a package twice, once
+ # using setup.py develop and again using setup.py install. Now when
+ # running pip uninstall twice, the package gets removed from the
+ # working set in the first uninstall, so we have to populate the
+ # working set again so that pip knows about it and the packages gets
+ # picked up and is successfully uninstalled the second time too.
+ try:
+ # We didn't pass in any version specifiers, so this can never
+ # raise pkg_resources.VersionConflict.
+ self._ws.require(name)
+ except pkg_resources.DistributionNotFound:
+ return None
+ return self._search_distribution(name)
diff --git a/third_party/python/pip/pip/_internal/models/__init__.py b/third_party/python/pip/pip/_internal/models/__init__.py
new file mode 100644
index 0000000000..7855226e4b
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/models/__init__.py
@@ -0,0 +1,2 @@
+"""A package that contains models that represent entities.
+"""
diff --git a/third_party/python/pip/pip/_internal/models/candidate.py b/third_party/python/pip/pip/_internal/models/candidate.py
new file mode 100644
index 0000000000..a4963aec63
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/models/candidate.py
@@ -0,0 +1,34 @@
+from pip._vendor.packaging.version import parse as parse_version
+
+from pip._internal.models.link import Link
+from pip._internal.utils.models import KeyBasedCompareMixin
+
+
+class InstallationCandidate(KeyBasedCompareMixin):
+ """Represents a potential "candidate" for installation."""
+
+ __slots__ = ["name", "version", "link"]
+
+ def __init__(self, name: str, version: str, link: Link) -> None:
+ self.name = name
+ self.version = parse_version(version)
+ self.link = link
+
+ super().__init__(
+ key=(self.name, self.version, self.link),
+ defining_class=InstallationCandidate,
+ )
+
+ def __repr__(self) -> str:
+ return "<InstallationCandidate({!r}, {!r}, {!r})>".format(
+ self.name,
+ self.version,
+ self.link,
+ )
+
+ def __str__(self) -> str:
+ return "{!r} candidate (version {} at {})".format(
+ self.name,
+ self.version,
+ self.link,
+ )
diff --git a/third_party/python/pip/pip/_internal/models/direct_url.py b/third_party/python/pip/pip/_internal/models/direct_url.py
new file mode 100644
index 0000000000..c3de70a749
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/models/direct_url.py
@@ -0,0 +1,228 @@
+""" PEP 610 """
+import json
+import re
+import urllib.parse
+from typing import Any, Dict, Iterable, Optional, Type, TypeVar, Union
+
+__all__ = [
+ "DirectUrl",
+ "DirectUrlValidationError",
+ "DirInfo",
+ "ArchiveInfo",
+ "VcsInfo",
+]
+
+T = TypeVar("T")
+
+DIRECT_URL_METADATA_NAME = "direct_url.json"
+ENV_VAR_RE = re.compile(r"^\$\{[A-Za-z0-9-_]+\}(:\$\{[A-Za-z0-9-_]+\})?$")
+
+
+class DirectUrlValidationError(Exception):
+ pass
+
+
+def _get(
+ d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None
+) -> Optional[T]:
+ """Get value from dictionary and verify expected type."""
+ if key not in d:
+ return default
+ value = d[key]
+ if not isinstance(value, expected_type):
+ raise DirectUrlValidationError(
+ "{!r} has unexpected type for {} (expected {})".format(
+ value, key, expected_type
+ )
+ )
+ return value
+
+
+def _get_required(
+ d: Dict[str, Any], expected_type: Type[T], key: str, default: Optional[T] = None
+) -> T:
+ value = _get(d, expected_type, key, default)
+ if value is None:
+ raise DirectUrlValidationError(f"{key} must have a value")
+ return value
+
+
+def _exactly_one_of(infos: Iterable[Optional["InfoType"]]) -> "InfoType":
+ infos = [info for info in infos if info is not None]
+ if not infos:
+ raise DirectUrlValidationError(
+ "missing one of archive_info, dir_info, vcs_info"
+ )
+ if len(infos) > 1:
+ raise DirectUrlValidationError(
+ "more than one of archive_info, dir_info, vcs_info"
+ )
+ assert infos[0] is not None
+ return infos[0]
+
+
+def _filter_none(**kwargs: Any) -> Dict[str, Any]:
+ """Make dict excluding None values."""
+ return {k: v for k, v in kwargs.items() if v is not None}
+
+
+class VcsInfo:
+ name = "vcs_info"
+
+ def __init__(
+ self,
+ vcs: str,
+ commit_id: str,
+ requested_revision: Optional[str] = None,
+ ) -> None:
+ self.vcs = vcs
+ self.requested_revision = requested_revision
+ self.commit_id = commit_id
+
+ @classmethod
+ def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["VcsInfo"]:
+ if d is None:
+ return None
+ return cls(
+ vcs=_get_required(d, str, "vcs"),
+ commit_id=_get_required(d, str, "commit_id"),
+ requested_revision=_get(d, str, "requested_revision"),
+ )
+
+ def _to_dict(self) -> Dict[str, Any]:
+ return _filter_none(
+ vcs=self.vcs,
+ requested_revision=self.requested_revision,
+ commit_id=self.commit_id,
+ )
+
+
+class ArchiveInfo:
+ name = "archive_info"
+
+ def __init__(
+ self,
+ hash: Optional[str] = None,
+ hashes: Optional[Dict[str, str]] = None,
+ ) -> None:
+ if hash is not None:
+ # Auto-populate the hashes key to upgrade to the new format automatically.
+ # We don't back-populate the legacy hash key.
+ try:
+ hash_name, hash_value = hash.split("=", 1)
+ except ValueError:
+ raise DirectUrlValidationError(
+ f"invalid archive_info.hash format: {hash!r}"
+ )
+ if hashes is None:
+ hashes = {hash_name: hash_value}
+ elif hash_name not in hash:
+ hashes = hashes.copy()
+ hashes[hash_name] = hash_value
+ self.hash = hash
+ self.hashes = hashes
+
+ @classmethod
+ def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["ArchiveInfo"]:
+ if d is None:
+ return None
+ return cls(hash=_get(d, str, "hash"), hashes=_get(d, dict, "hashes"))
+
+ def _to_dict(self) -> Dict[str, Any]:
+ return _filter_none(hash=self.hash, hashes=self.hashes)
+
+
+class DirInfo:
+ name = "dir_info"
+
+ def __init__(
+ self,
+ editable: bool = False,
+ ) -> None:
+ self.editable = editable
+
+ @classmethod
+ def _from_dict(cls, d: Optional[Dict[str, Any]]) -> Optional["DirInfo"]:
+ if d is None:
+ return None
+ return cls(editable=_get_required(d, bool, "editable", default=False))
+
+ def _to_dict(self) -> Dict[str, Any]:
+ return _filter_none(editable=self.editable or None)
+
+
+InfoType = Union[ArchiveInfo, DirInfo, VcsInfo]
+
+
+class DirectUrl:
+ def __init__(
+ self,
+ url: str,
+ info: InfoType,
+ subdirectory: Optional[str] = None,
+ ) -> None:
+ self.url = url
+ self.info = info
+ self.subdirectory = subdirectory
+
+ def _remove_auth_from_netloc(self, netloc: str) -> str:
+ if "@" not in netloc:
+ return netloc
+ user_pass, netloc_no_user_pass = netloc.split("@", 1)
+ if (
+ isinstance(self.info, VcsInfo)
+ and self.info.vcs == "git"
+ and user_pass == "git"
+ ):
+ return netloc
+ if ENV_VAR_RE.match(user_pass):
+ return netloc
+ return netloc_no_user_pass
+
+ @property
+ def redacted_url(self) -> str:
+ """url with user:password part removed unless it is formed with
+ environment variables as specified in PEP 610, or it is ``git``
+ in the case of a git URL.
+ """
+ purl = urllib.parse.urlsplit(self.url)
+ netloc = self._remove_auth_from_netloc(purl.netloc)
+ surl = urllib.parse.urlunsplit(
+ (purl.scheme, netloc, purl.path, purl.query, purl.fragment)
+ )
+ return surl
+
+ def validate(self) -> None:
+ self.from_dict(self.to_dict())
+
+ @classmethod
+ def from_dict(cls, d: Dict[str, Any]) -> "DirectUrl":
+ return DirectUrl(
+ url=_get_required(d, str, "url"),
+ subdirectory=_get(d, str, "subdirectory"),
+ info=_exactly_one_of(
+ [
+ ArchiveInfo._from_dict(_get(d, dict, "archive_info")),
+ DirInfo._from_dict(_get(d, dict, "dir_info")),
+ VcsInfo._from_dict(_get(d, dict, "vcs_info")),
+ ]
+ ),
+ )
+
+ def to_dict(self) -> Dict[str, Any]:
+ res = _filter_none(
+ url=self.redacted_url,
+ subdirectory=self.subdirectory,
+ )
+ res[self.info.name] = self.info._to_dict()
+ return res
+
+ @classmethod
+ def from_json(cls, s: str) -> "DirectUrl":
+ return cls.from_dict(json.loads(s))
+
+ def to_json(self) -> str:
+ return json.dumps(self.to_dict(), sort_keys=True)
+
+ def is_local_editable(self) -> bool:
+ return isinstance(self.info, DirInfo) and self.info.editable
diff --git a/third_party/python/pip/pip/_internal/models/format_control.py b/third_party/python/pip/pip/_internal/models/format_control.py
new file mode 100644
index 0000000000..db3995eac9
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/models/format_control.py
@@ -0,0 +1,80 @@
+from typing import FrozenSet, Optional, Set
+
+from pip._vendor.packaging.utils import canonicalize_name
+
+from pip._internal.exceptions import CommandError
+
+
+class FormatControl:
+ """Helper for managing formats from which a package can be installed."""
+
+ __slots__ = ["no_binary", "only_binary"]
+
+ def __init__(
+ self,
+ no_binary: Optional[Set[str]] = None,
+ only_binary: Optional[Set[str]] = None,
+ ) -> None:
+ if no_binary is None:
+ no_binary = set()
+ if only_binary is None:
+ only_binary = set()
+
+ self.no_binary = no_binary
+ self.only_binary = only_binary
+
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+
+ if self.__slots__ != other.__slots__:
+ return False
+
+ return all(getattr(self, k) == getattr(other, k) for k in self.__slots__)
+
+ def __repr__(self) -> str:
+ return "{}({}, {})".format(
+ self.__class__.__name__, self.no_binary, self.only_binary
+ )
+
+ @staticmethod
+ def handle_mutual_excludes(value: str, target: Set[str], other: Set[str]) -> None:
+ if value.startswith("-"):
+ raise CommandError(
+ "--no-binary / --only-binary option requires 1 argument."
+ )
+ new = value.split(",")
+ while ":all:" in new:
+ other.clear()
+ target.clear()
+ target.add(":all:")
+ del new[: new.index(":all:") + 1]
+ # Without a none, we want to discard everything as :all: covers it
+ if ":none:" not in new:
+ return
+ for name in new:
+ if name == ":none:":
+ target.clear()
+ continue
+ name = canonicalize_name(name)
+ other.discard(name)
+ target.add(name)
+
+ def get_allowed_formats(self, canonical_name: str) -> FrozenSet[str]:
+ result = {"binary", "source"}
+ if canonical_name in self.only_binary:
+ result.discard("source")
+ elif canonical_name in self.no_binary:
+ result.discard("binary")
+ elif ":all:" in self.only_binary:
+ result.discard("source")
+ elif ":all:" in self.no_binary:
+ result.discard("binary")
+ return frozenset(result)
+
+ def disallow_binaries(self) -> None:
+ self.handle_mutual_excludes(
+ ":all:",
+ self.no_binary,
+ self.only_binary,
+ )
diff --git a/third_party/python/pip/pip/_internal/models/index.py b/third_party/python/pip/pip/_internal/models/index.py
new file mode 100644
index 0000000000..b94c32511f
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/models/index.py
@@ -0,0 +1,28 @@
+import urllib.parse
+
+
+class PackageIndex:
+ """Represents a Package Index and provides easier access to endpoints"""
+
+ __slots__ = ["url", "netloc", "simple_url", "pypi_url", "file_storage_domain"]
+
+ def __init__(self, url: str, file_storage_domain: str) -> None:
+ super().__init__()
+ self.url = url
+ self.netloc = urllib.parse.urlsplit(url).netloc
+ self.simple_url = self._url_for_path("simple")
+ self.pypi_url = self._url_for_path("pypi")
+
+ # This is part of a temporary hack used to block installs of PyPI
+ # packages which depend on external urls only necessary until PyPI can
+ # block such packages themselves
+ self.file_storage_domain = file_storage_domain
+
+ def _url_for_path(self, path: str) -> str:
+ return urllib.parse.urljoin(self.url, path)
+
+
+PyPI = PackageIndex("https://pypi.org/", file_storage_domain="files.pythonhosted.org")
+TestPyPI = PackageIndex(
+ "https://test.pypi.org/", file_storage_domain="test-files.pythonhosted.org"
+)
diff --git a/third_party/python/pip/pip/_internal/models/installation_report.py b/third_party/python/pip/pip/_internal/models/installation_report.py
new file mode 100644
index 0000000000..b54afb109b
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/models/installation_report.py
@@ -0,0 +1,53 @@
+from typing import Any, Dict, Sequence
+
+from pip._vendor.packaging.markers import default_environment
+
+from pip import __version__
+from pip._internal.req.req_install import InstallRequirement
+
+
+class InstallationReport:
+ def __init__(self, install_requirements: Sequence[InstallRequirement]):
+ self._install_requirements = install_requirements
+
+ @classmethod
+ def _install_req_to_dict(cls, ireq: InstallRequirement) -> Dict[str, Any]:
+ assert ireq.download_info, f"No download_info for {ireq}"
+ res = {
+ # PEP 610 json for the download URL. download_info.archive_info.hash may
+ # be absent when the requirement was installed from the wheel cache
+ # and the cache entry was populated by an older pip version that did not
+ # record origin.json.
+ "download_info": ireq.download_info.to_dict(),
+ # is_direct is true if the requirement was a direct URL reference (which
+ # includes editable requirements), and false if the requirement was
+ # downloaded from a PEP 503 index or --find-links.
+ "is_direct": bool(ireq.original_link),
+ # requested is true if the requirement was specified by the user (aka
+ # top level requirement), and false if it was installed as a dependency of a
+ # requirement. https://peps.python.org/pep-0376/#requested
+ "requested": ireq.user_supplied,
+ # PEP 566 json encoding for metadata
+ # https://www.python.org/dev/peps/pep-0566/#json-compatible-metadata
+ "metadata": ireq.get_dist().metadata_dict,
+ }
+ if ireq.user_supplied and ireq.extras:
+ # For top level requirements, the list of requested extras, if any.
+ res["requested_extras"] = list(sorted(ireq.extras))
+ return res
+
+ def to_dict(self) -> Dict[str, Any]:
+ return {
+ "version": "1",
+ "pip_version": __version__,
+ "install": [
+ self._install_req_to_dict(ireq) for ireq in self._install_requirements
+ ],
+ # https://peps.python.org/pep-0508/#environment-markers
+ # TODO: currently, the resolver uses the default environment to evaluate
+ # environment markers, so that is what we report here. In the future, it
+ # should also take into account options such as --python-version or
+ # --platform, perhaps under the form of an environment_override field?
+ # https://github.com/pypa/pip/issues/11198
+ "environment": default_environment(),
+ }
diff --git a/third_party/python/pip/pip/_internal/models/link.py b/third_party/python/pip/pip/_internal/models/link.py
new file mode 100644
index 0000000000..a1e4d5a08d
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/models/link.py
@@ -0,0 +1,524 @@
+import functools
+import itertools
+import logging
+import os
+import posixpath
+import re
+import urllib.parse
+from dataclasses import dataclass
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Dict,
+ List,
+ Mapping,
+ NamedTuple,
+ Optional,
+ Tuple,
+ Union,
+)
+
+from pip._internal.utils.deprecation import deprecated
+from pip._internal.utils.filetypes import WHEEL_EXTENSION
+from pip._internal.utils.hashes import Hashes
+from pip._internal.utils.misc import (
+ pairwise,
+ redact_auth_from_url,
+ split_auth_from_netloc,
+ splitext,
+)
+from pip._internal.utils.models import KeyBasedCompareMixin
+from pip._internal.utils.urls import path_to_url, url_to_path
+
+if TYPE_CHECKING:
+ from pip._internal.index.collector import IndexContent
+
+logger = logging.getLogger(__name__)
+
+
+# Order matters, earlier hashes have a precedence over later hashes for what
+# we will pick to use.
+_SUPPORTED_HASHES = ("sha512", "sha384", "sha256", "sha224", "sha1", "md5")
+
+
+@dataclass(frozen=True)
+class LinkHash:
+ """Links to content may have embedded hash values. This class parses those.
+
+ `name` must be any member of `_SUPPORTED_HASHES`.
+
+ This class can be converted to and from `ArchiveInfo`. While ArchiveInfo intends to
+ be JSON-serializable to conform to PEP 610, this class contains the logic for
+ parsing a hash name and value for correctness, and then checking whether that hash
+ conforms to a schema with `.is_hash_allowed()`."""
+
+ name: str
+ value: str
+
+ _hash_re = re.compile(
+ # NB: we do not validate that the second group (.*) is a valid hex
+ # digest. Instead, we simply keep that string in this class, and then check it
+ # against Hashes when hash-checking is needed. This is easier to debug than
+ # proactively discarding an invalid hex digest, as we handle incorrect hashes
+ # and malformed hashes in the same place.
+ r"({choices})=(.*)".format(
+ choices="|".join(re.escape(hash_name) for hash_name in _SUPPORTED_HASHES)
+ ),
+ )
+
+ def __post_init__(self) -> None:
+ assert self._hash_re.match(f"{self.name}={self.value}")
+
+ @classmethod
+ @functools.lru_cache(maxsize=None)
+ def split_hash_name_and_value(cls, url: str) -> Optional["LinkHash"]:
+ """Search a string for a checksum algorithm name and encoded output value."""
+ match = cls._hash_re.search(url)
+ if match is None:
+ return None
+ name, value = match.groups()
+ return cls(name=name, value=value)
+
+ def as_dict(self) -> Dict[str, str]:
+ return {self.name: self.value}
+
+ def as_hashes(self) -> Hashes:
+ """Return a Hashes instance which checks only for the current hash."""
+ return Hashes({self.name: [self.value]})
+
+ def is_hash_allowed(self, hashes: Optional[Hashes]) -> bool:
+ """
+ Return True if the current hash is allowed by `hashes`.
+ """
+ if hashes is None:
+ return False
+ return hashes.is_hash_allowed(self.name, hex_digest=self.value)
+
+
+def _clean_url_path_part(part: str) -> str:
+ """
+ Clean a "part" of a URL path (i.e. after splitting on "@" characters).
+ """
+ # We unquote prior to quoting to make sure nothing is double quoted.
+ return urllib.parse.quote(urllib.parse.unquote(part))
+
+
+def _clean_file_url_path(part: str) -> str:
+ """
+ Clean the first part of a URL path that corresponds to a local
+ filesystem path (i.e. the first part after splitting on "@" characters).
+ """
+ # We unquote prior to quoting to make sure nothing is double quoted.
+ # Also, on Windows the path part might contain a drive letter which
+ # should not be quoted. On Linux where drive letters do not
+ # exist, the colon should be quoted. We rely on urllib.request
+ # to do the right thing here.
+ return urllib.request.pathname2url(urllib.request.url2pathname(part))
+
+
+# percent-encoded: /
+_reserved_chars_re = re.compile("(@|%2F)", re.IGNORECASE)
+
+
+def _clean_url_path(path: str, is_local_path: bool) -> str:
+ """
+ Clean the path portion of a URL.
+ """
+ if is_local_path:
+ clean_func = _clean_file_url_path
+ else:
+ clean_func = _clean_url_path_part
+
+ # Split on the reserved characters prior to cleaning so that
+ # revision strings in VCS URLs are properly preserved.
+ parts = _reserved_chars_re.split(path)
+
+ cleaned_parts = []
+ for to_clean, reserved in pairwise(itertools.chain(parts, [""])):
+ cleaned_parts.append(clean_func(to_clean))
+ # Normalize %xx escapes (e.g. %2f -> %2F)
+ cleaned_parts.append(reserved.upper())
+
+ return "".join(cleaned_parts)
+
+
+def _ensure_quoted_url(url: str) -> str:
+ """
+ Make sure a link is fully quoted.
+ For example, if ' ' occurs in the URL, it will be replaced with "%20",
+ and without double-quoting other characters.
+ """
+ # Split the URL into parts according to the general structure
+ # `scheme://netloc/path;parameters?query#fragment`.
+ result = urllib.parse.urlparse(url)
+ # If the netloc is empty, then the URL refers to a local filesystem path.
+ is_local_path = not result.netloc
+ path = _clean_url_path(result.path, is_local_path=is_local_path)
+ return urllib.parse.urlunparse(result._replace(path=path))
+
+
+class Link(KeyBasedCompareMixin):
+ """Represents a parsed link from a Package Index's simple URL"""
+
+ __slots__ = [
+ "_parsed_url",
+ "_url",
+ "_hashes",
+ "comes_from",
+ "requires_python",
+ "yanked_reason",
+ "dist_info_metadata",
+ "cache_link_parsing",
+ "egg_fragment",
+ ]
+
+ def __init__(
+ self,
+ url: str,
+ comes_from: Optional[Union[str, "IndexContent"]] = None,
+ requires_python: Optional[str] = None,
+ yanked_reason: Optional[str] = None,
+ dist_info_metadata: Optional[str] = None,
+ cache_link_parsing: bool = True,
+ hashes: Optional[Mapping[str, str]] = None,
+ ) -> None:
+ """
+ :param url: url of the resource pointed to (href of the link)
+ :param comes_from: instance of IndexContent where the link was found,
+ or string.
+ :param requires_python: String containing the `Requires-Python`
+ metadata field, specified in PEP 345. This may be specified by
+ a data-requires-python attribute in the HTML link tag, as
+ described in PEP 503.
+ :param yanked_reason: the reason the file has been yanked, if the
+ file has been yanked, or None if the file hasn't been yanked.
+ This is the value of the "data-yanked" attribute, if present, in
+ a simple repository HTML link. If the file has been yanked but
+ no reason was provided, this should be the empty string. See
+ PEP 592 for more information and the specification.
+ :param dist_info_metadata: the metadata attached to the file, or None if no such
+ metadata is provided. This is the value of the "data-dist-info-metadata"
+ attribute, if present, in a simple repository HTML link. This may be parsed
+ into its own `Link` by `self.metadata_link()`. See PEP 658 for more
+ information and the specification.
+ :param cache_link_parsing: A flag that is used elsewhere to determine
+ whether resources retrieved from this link should be cached. PyPI
+ URLs should generally have this set to False, for example.
+ :param hashes: A mapping of hash names to digests to allow us to
+ determine the validity of a download.
+ """
+
+ # url can be a UNC windows share
+ if url.startswith("\\\\"):
+ url = path_to_url(url)
+
+ self._parsed_url = urllib.parse.urlsplit(url)
+ # Store the url as a private attribute to prevent accidentally
+ # trying to set a new value.
+ self._url = url
+
+ link_hash = LinkHash.split_hash_name_and_value(url)
+ hashes_from_link = {} if link_hash is None else link_hash.as_dict()
+ if hashes is None:
+ self._hashes = hashes_from_link
+ else:
+ self._hashes = {**hashes, **hashes_from_link}
+
+ self.comes_from = comes_from
+ self.requires_python = requires_python if requires_python else None
+ self.yanked_reason = yanked_reason
+ self.dist_info_metadata = dist_info_metadata
+
+ super().__init__(key=url, defining_class=Link)
+
+ self.cache_link_parsing = cache_link_parsing
+ self.egg_fragment = self._egg_fragment()
+
+ @classmethod
+ def from_json(
+ cls,
+ file_data: Dict[str, Any],
+ page_url: str,
+ ) -> Optional["Link"]:
+ """
+ Convert an pypi json document from a simple repository page into a Link.
+ """
+ file_url = file_data.get("url")
+ if file_url is None:
+ return None
+
+ url = _ensure_quoted_url(urllib.parse.urljoin(page_url, file_url))
+ pyrequire = file_data.get("requires-python")
+ yanked_reason = file_data.get("yanked")
+ dist_info_metadata = file_data.get("dist-info-metadata")
+ hashes = file_data.get("hashes", {})
+
+ # The Link.yanked_reason expects an empty string instead of a boolean.
+ if yanked_reason and not isinstance(yanked_reason, str):
+ yanked_reason = ""
+ # The Link.yanked_reason expects None instead of False.
+ elif not yanked_reason:
+ yanked_reason = None
+
+ return cls(
+ url,
+ comes_from=page_url,
+ requires_python=pyrequire,
+ yanked_reason=yanked_reason,
+ hashes=hashes,
+ dist_info_metadata=dist_info_metadata,
+ )
+
+ @classmethod
+ def from_element(
+ cls,
+ anchor_attribs: Dict[str, Optional[str]],
+ page_url: str,
+ base_url: str,
+ ) -> Optional["Link"]:
+ """
+ Convert an anchor element's attributes in a simple repository page to a Link.
+ """
+ href = anchor_attribs.get("href")
+ if not href:
+ return None
+
+ url = _ensure_quoted_url(urllib.parse.urljoin(base_url, href))
+ pyrequire = anchor_attribs.get("data-requires-python")
+ yanked_reason = anchor_attribs.get("data-yanked")
+ dist_info_metadata = anchor_attribs.get("data-dist-info-metadata")
+
+ return cls(
+ url,
+ comes_from=page_url,
+ requires_python=pyrequire,
+ yanked_reason=yanked_reason,
+ dist_info_metadata=dist_info_metadata,
+ )
+
+ def __str__(self) -> str:
+ if self.requires_python:
+ rp = f" (requires-python:{self.requires_python})"
+ else:
+ rp = ""
+ if self.comes_from:
+ return "{} (from {}){}".format(
+ redact_auth_from_url(self._url), self.comes_from, rp
+ )
+ else:
+ return redact_auth_from_url(str(self._url))
+
+ def __repr__(self) -> str:
+ return f"<Link {self}>"
+
+ @property
+ def url(self) -> str:
+ return self._url
+
+ @property
+ def filename(self) -> str:
+ path = self.path.rstrip("/")
+ name = posixpath.basename(path)
+ if not name:
+ # Make sure we don't leak auth information if the netloc
+ # includes a username and password.
+ netloc, user_pass = split_auth_from_netloc(self.netloc)
+ return netloc
+
+ name = urllib.parse.unquote(name)
+ assert name, f"URL {self._url!r} produced no filename"
+ return name
+
+ @property
+ def file_path(self) -> str:
+ return url_to_path(self.url)
+
+ @property
+ def scheme(self) -> str:
+ return self._parsed_url.scheme
+
+ @property
+ def netloc(self) -> str:
+ """
+ This can contain auth information.
+ """
+ return self._parsed_url.netloc
+
+ @property
+ def path(self) -> str:
+ return urllib.parse.unquote(self._parsed_url.path)
+
+ def splitext(self) -> Tuple[str, str]:
+ return splitext(posixpath.basename(self.path.rstrip("/")))
+
+ @property
+ def ext(self) -> str:
+ return self.splitext()[1]
+
+ @property
+ def url_without_fragment(self) -> str:
+ scheme, netloc, path, query, fragment = self._parsed_url
+ return urllib.parse.urlunsplit((scheme, netloc, path, query, ""))
+
+ _egg_fragment_re = re.compile(r"[#&]egg=([^&]*)")
+
+ # Per PEP 508.
+ _project_name_re = re.compile(
+ r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.IGNORECASE
+ )
+
+ def _egg_fragment(self) -> Optional[str]:
+ match = self._egg_fragment_re.search(self._url)
+ if not match:
+ return None
+
+ # An egg fragment looks like a PEP 508 project name, along with
+ # an optional extras specifier. Anything else is invalid.
+ project_name = match.group(1)
+ if not self._project_name_re.match(project_name):
+ deprecated(
+ reason=f"{self} contains an egg fragment with a non-PEP 508 name",
+ replacement="to use the req @ url syntax, and remove the egg fragment",
+ gone_in="25.0",
+ issue=11617,
+ )
+
+ return project_name
+
+ _subdirectory_fragment_re = re.compile(r"[#&]subdirectory=([^&]*)")
+
+ @property
+ def subdirectory_fragment(self) -> Optional[str]:
+ match = self._subdirectory_fragment_re.search(self._url)
+ if not match:
+ return None
+ return match.group(1)
+
+ def metadata_link(self) -> Optional["Link"]:
+ """Implementation of PEP 658 parsing."""
+ # Note that Link.from_element() parsing the "data-dist-info-metadata" attribute
+ # from an HTML anchor tag is typically how the Link.dist_info_metadata attribute
+ # gets set.
+ if self.dist_info_metadata is None:
+ return None
+ metadata_url = f"{self.url_without_fragment}.metadata"
+ # If data-dist-info-metadata="true" is set, then the metadata file exists,
+ # but there is no information about its checksum or anything else.
+ if self.dist_info_metadata != "true":
+ link_hash = LinkHash.split_hash_name_and_value(self.dist_info_metadata)
+ else:
+ link_hash = None
+ if link_hash is None:
+ return Link(metadata_url)
+ return Link(metadata_url, hashes=link_hash.as_dict())
+
+ def as_hashes(self) -> Hashes:
+ return Hashes({k: [v] for k, v in self._hashes.items()})
+
+ @property
+ def hash(self) -> Optional[str]:
+ return next(iter(self._hashes.values()), None)
+
+ @property
+ def hash_name(self) -> Optional[str]:
+ return next(iter(self._hashes), None)
+
+ @property
+ def show_url(self) -> str:
+ return posixpath.basename(self._url.split("#", 1)[0].split("?", 1)[0])
+
+ @property
+ def is_file(self) -> bool:
+ return self.scheme == "file"
+
+ def is_existing_dir(self) -> bool:
+ return self.is_file and os.path.isdir(self.file_path)
+
+ @property
+ def is_wheel(self) -> bool:
+ return self.ext == WHEEL_EXTENSION
+
+ @property
+ def is_vcs(self) -> bool:
+ from pip._internal.vcs import vcs
+
+ return self.scheme in vcs.all_schemes
+
+ @property
+ def is_yanked(self) -> bool:
+ return self.yanked_reason is not None
+
+ @property
+ def has_hash(self) -> bool:
+ return bool(self._hashes)
+
+ def is_hash_allowed(self, hashes: Optional[Hashes]) -> bool:
+ """
+ Return True if the link has a hash and it is allowed by `hashes`.
+ """
+ if hashes is None:
+ return False
+ return any(hashes.is_hash_allowed(k, v) for k, v in self._hashes.items())
+
+
+class _CleanResult(NamedTuple):
+ """Convert link for equivalency check.
+
+ This is used in the resolver to check whether two URL-specified requirements
+ likely point to the same distribution and can be considered equivalent. This
+ equivalency logic avoids comparing URLs literally, which can be too strict
+ (e.g. "a=1&b=2" vs "b=2&a=1") and produce conflicts unexpecting to users.
+
+ Currently this does three things:
+
+ 1. Drop the basic auth part. This is technically wrong since a server can
+ serve different content based on auth, but if it does that, it is even
+ impossible to guarantee two URLs without auth are equivalent, since
+ the user can input different auth information when prompted. So the
+ practical solution is to assume the auth doesn't affect the response.
+ 2. Parse the query to avoid the ordering issue. Note that ordering under the
+ same key in the query are NOT cleaned; i.e. "a=1&a=2" and "a=2&a=1" are
+ still considered different.
+ 3. Explicitly drop most of the fragment part, except ``subdirectory=`` and
+ hash values, since it should have no impact the downloaded content. Note
+ that this drops the "egg=" part historically used to denote the requested
+ project (and extras), which is wrong in the strictest sense, but too many
+ people are supplying it inconsistently to cause superfluous resolution
+ conflicts, so we choose to also ignore them.
+ """
+
+ parsed: urllib.parse.SplitResult
+ query: Dict[str, List[str]]
+ subdirectory: str
+ hashes: Dict[str, str]
+
+
+def _clean_link(link: Link) -> _CleanResult:
+ parsed = link._parsed_url
+ netloc = parsed.netloc.rsplit("@", 1)[-1]
+ # According to RFC 8089, an empty host in file: means localhost.
+ if parsed.scheme == "file" and not netloc:
+ netloc = "localhost"
+ fragment = urllib.parse.parse_qs(parsed.fragment)
+ if "egg" in fragment:
+ logger.debug("Ignoring egg= fragment in %s", link)
+ try:
+ # If there are multiple subdirectory values, use the first one.
+ # This matches the behavior of Link.subdirectory_fragment.
+ subdirectory = fragment["subdirectory"][0]
+ except (IndexError, KeyError):
+ subdirectory = ""
+ # If there are multiple hash values under the same algorithm, use the
+ # first one. This matches the behavior of Link.hash_value.
+ hashes = {k: fragment[k][0] for k in _SUPPORTED_HASHES if k in fragment}
+ return _CleanResult(
+ parsed=parsed._replace(netloc=netloc, query="", fragment=""),
+ query=urllib.parse.parse_qs(parsed.query),
+ subdirectory=subdirectory,
+ hashes=hashes,
+ )
+
+
+@functools.lru_cache(maxsize=None)
+def links_equivalent(link1: Link, link2: Link) -> bool:
+ return _clean_link(link1) == _clean_link(link2)
diff --git a/third_party/python/pip/pip/_internal/models/scheme.py b/third_party/python/pip/pip/_internal/models/scheme.py
new file mode 100644
index 0000000000..f51190ac60
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/models/scheme.py
@@ -0,0 +1,31 @@
+"""
+For types associated with installation schemes.
+
+For a general overview of available schemes and their context, see
+https://docs.python.org/3/install/index.html#alternate-installation.
+"""
+
+
+SCHEME_KEYS = ["platlib", "purelib", "headers", "scripts", "data"]
+
+
+class Scheme:
+ """A Scheme holds paths which are used as the base directories for
+ artifacts associated with a Python package.
+ """
+
+ __slots__ = SCHEME_KEYS
+
+ def __init__(
+ self,
+ platlib: str,
+ purelib: str,
+ headers: str,
+ scripts: str,
+ data: str,
+ ) -> None:
+ self.platlib = platlib
+ self.purelib = purelib
+ self.headers = headers
+ self.scripts = scripts
+ self.data = data
diff --git a/third_party/python/pip/pip/_internal/models/search_scope.py b/third_party/python/pip/pip/_internal/models/search_scope.py
new file mode 100644
index 0000000000..a64af73899
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/models/search_scope.py
@@ -0,0 +1,133 @@
+import itertools
+import logging
+import os
+import posixpath
+import urllib.parse
+from typing import List
+
+from pip._vendor.packaging.utils import canonicalize_name
+
+from pip._internal.models.index import PyPI
+from pip._internal.utils.compat import has_tls
+from pip._internal.utils.misc import normalize_path, redact_auth_from_url
+
+logger = logging.getLogger(__name__)
+
+
+class SearchScope:
+
+ """
+ Encapsulates the locations that pip is configured to search.
+ """
+
+ __slots__ = ["find_links", "index_urls", "no_index"]
+
+ @classmethod
+ def create(
+ cls,
+ find_links: List[str],
+ index_urls: List[str],
+ no_index: bool,
+ ) -> "SearchScope":
+ """
+ Create a SearchScope object after normalizing the `find_links`.
+ """
+ # Build find_links. If an argument starts with ~, it may be
+ # a local file relative to a home directory. So try normalizing
+ # it and if it exists, use the normalized version.
+ # This is deliberately conservative - it might be fine just to
+ # blindly normalize anything starting with a ~...
+ built_find_links: List[str] = []
+ for link in find_links:
+ if link.startswith("~"):
+ new_link = normalize_path(link)
+ if os.path.exists(new_link):
+ link = new_link
+ built_find_links.append(link)
+
+ # If we don't have TLS enabled, then WARN if anyplace we're looking
+ # relies on TLS.
+ if not has_tls():
+ for link in itertools.chain(index_urls, built_find_links):
+ parsed = urllib.parse.urlparse(link)
+ if parsed.scheme == "https":
+ logger.warning(
+ "pip is configured with locations that require "
+ "TLS/SSL, however the ssl module in Python is not "
+ "available."
+ )
+ break
+
+ return cls(
+ find_links=built_find_links,
+ index_urls=index_urls,
+ no_index=no_index,
+ )
+
+ def __init__(
+ self,
+ find_links: List[str],
+ index_urls: List[str],
+ no_index: bool,
+ ) -> None:
+ self.find_links = find_links
+ self.index_urls = index_urls
+ self.no_index = no_index
+
+ def get_formatted_locations(self) -> str:
+ lines = []
+ redacted_index_urls = []
+ if self.index_urls and self.index_urls != [PyPI.simple_url]:
+ for url in self.index_urls:
+
+ redacted_index_url = redact_auth_from_url(url)
+
+ # Parse the URL
+ purl = urllib.parse.urlsplit(redacted_index_url)
+
+ # URL is generally invalid if scheme and netloc is missing
+ # there are issues with Python and URL parsing, so this test
+ # is a bit crude. See bpo-20271, bpo-23505. Python doesn't
+ # always parse invalid URLs correctly - it should raise
+ # exceptions for malformed URLs
+ if not purl.scheme and not purl.netloc:
+ logger.warning(
+ 'The index url "%s" seems invalid, please provide a scheme.',
+ redacted_index_url,
+ )
+
+ redacted_index_urls.append(redacted_index_url)
+
+ lines.append(
+ "Looking in indexes: {}".format(", ".join(redacted_index_urls))
+ )
+
+ if self.find_links:
+ lines.append(
+ "Looking in links: {}".format(
+ ", ".join(redact_auth_from_url(url) for url in self.find_links)
+ )
+ )
+ return "\n".join(lines)
+
+ def get_index_urls_locations(self, project_name: str) -> List[str]:
+ """Returns the locations found via self.index_urls
+
+ Checks the url_name on the main (first in the list) index and
+ use this url_name to produce all locations
+ """
+
+ def mkurl_pypi_url(url: str) -> str:
+ loc = posixpath.join(
+ url, urllib.parse.quote(canonicalize_name(project_name))
+ )
+ # For maximum compatibility with easy_install, ensure the path
+ # ends in a trailing slash. Although this isn't in the spec
+ # (and PyPI can handle it without the slash) some other index
+ # implementations might break if they relied on easy_install's
+ # behavior.
+ if not loc.endswith("/"):
+ loc = loc + "/"
+ return loc
+
+ return [mkurl_pypi_url(url) for url in self.index_urls]
diff --git a/third_party/python/pip/pip/_internal/models/selection_prefs.py b/third_party/python/pip/pip/_internal/models/selection_prefs.py
new file mode 100644
index 0000000000..977bc4caa7
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/models/selection_prefs.py
@@ -0,0 +1,51 @@
+from typing import Optional
+
+from pip._internal.models.format_control import FormatControl
+
+
+class SelectionPreferences:
+ """
+ Encapsulates the candidate selection preferences for downloading
+ and installing files.
+ """
+
+ __slots__ = [
+ "allow_yanked",
+ "allow_all_prereleases",
+ "format_control",
+ "prefer_binary",
+ "ignore_requires_python",
+ ]
+
+ # Don't include an allow_yanked default value to make sure each call
+ # site considers whether yanked releases are allowed. This also causes
+ # that decision to be made explicit in the calling code, which helps
+ # people when reading the code.
+ def __init__(
+ self,
+ allow_yanked: bool,
+ allow_all_prereleases: bool = False,
+ format_control: Optional[FormatControl] = None,
+ prefer_binary: bool = False,
+ ignore_requires_python: Optional[bool] = None,
+ ) -> None:
+ """Create a SelectionPreferences object.
+
+ :param allow_yanked: Whether files marked as yanked (in the sense
+ of PEP 592) are permitted to be candidates for install.
+ :param format_control: A FormatControl object or None. Used to control
+ the selection of source packages / binary packages when consulting
+ the index and links.
+ :param prefer_binary: Whether to prefer an old, but valid, binary
+ dist over a new source dist.
+ :param ignore_requires_python: Whether to ignore incompatible
+ "Requires-Python" values in links. Defaults to False.
+ """
+ if ignore_requires_python is None:
+ ignore_requires_python = False
+
+ self.allow_yanked = allow_yanked
+ self.allow_all_prereleases = allow_all_prereleases
+ self.format_control = format_control
+ self.prefer_binary = prefer_binary
+ self.ignore_requires_python = ignore_requires_python
diff --git a/third_party/python/pip/pip/_internal/models/target_python.py b/third_party/python/pip/pip/_internal/models/target_python.py
new file mode 100644
index 0000000000..744bd7ef58
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/models/target_python.py
@@ -0,0 +1,110 @@
+import sys
+from typing import List, Optional, Tuple
+
+from pip._vendor.packaging.tags import Tag
+
+from pip._internal.utils.compatibility_tags import get_supported, version_info_to_nodot
+from pip._internal.utils.misc import normalize_version_info
+
+
+class TargetPython:
+
+ """
+ Encapsulates the properties of a Python interpreter one is targeting
+ for a package install, download, etc.
+ """
+
+ __slots__ = [
+ "_given_py_version_info",
+ "abis",
+ "implementation",
+ "platforms",
+ "py_version",
+ "py_version_info",
+ "_valid_tags",
+ ]
+
+ def __init__(
+ self,
+ platforms: Optional[List[str]] = None,
+ py_version_info: Optional[Tuple[int, ...]] = None,
+ abis: Optional[List[str]] = None,
+ implementation: Optional[str] = None,
+ ) -> None:
+ """
+ :param platforms: A list of strings or None. If None, searches for
+ packages that are supported by the current system. Otherwise, will
+ find packages that can be built on the platforms passed in. These
+ packages will only be downloaded for distribution: they will
+ not be built locally.
+ :param py_version_info: An optional tuple of ints representing the
+ Python version information to use (e.g. `sys.version_info[:3]`).
+ This can have length 1, 2, or 3 when provided.
+ :param abis: A list of strings or None. This is passed to
+ compatibility_tags.py's get_supported() function as is.
+ :param implementation: A string or None. This is passed to
+ compatibility_tags.py's get_supported() function as is.
+ """
+ # Store the given py_version_info for when we call get_supported().
+ self._given_py_version_info = py_version_info
+
+ if py_version_info is None:
+ py_version_info = sys.version_info[:3]
+ else:
+ py_version_info = normalize_version_info(py_version_info)
+
+ py_version = ".".join(map(str, py_version_info[:2]))
+
+ self.abis = abis
+ self.implementation = implementation
+ self.platforms = platforms
+ self.py_version = py_version
+ self.py_version_info = py_version_info
+
+ # This is used to cache the return value of get_tags().
+ self._valid_tags: Optional[List[Tag]] = None
+
+ def format_given(self) -> str:
+ """
+ Format the given, non-None attributes for display.
+ """
+ display_version = None
+ if self._given_py_version_info is not None:
+ display_version = ".".join(
+ str(part) for part in self._given_py_version_info
+ )
+
+ key_values = [
+ ("platforms", self.platforms),
+ ("version_info", display_version),
+ ("abis", self.abis),
+ ("implementation", self.implementation),
+ ]
+ return " ".join(
+ f"{key}={value!r}" for key, value in key_values if value is not None
+ )
+
+ def get_tags(self) -> List[Tag]:
+ """
+ Return the supported PEP 425 tags to check wheel candidates against.
+
+ The tags are returned in order of preference (most preferred first).
+ """
+ if self._valid_tags is None:
+ # Pass versions=None if no py_version_info was given since
+ # versions=None uses special default logic.
+ py_version_info = self._given_py_version_info
+ if py_version_info is None:
+ version = None
+ else:
+ version = version_info_to_nodot(py_version_info)
+
+ tags = get_supported(
+ version=version,
+ platforms=self.platforms,
+ abis=self.abis,
+ impl=self.implementation,
+ )
+ self._valid_tags = tags
+
+ return self._valid_tags
diff --git a/third_party/python/pip/pip/_internal/models/wheel.py b/third_party/python/pip/pip/_internal/models/wheel.py
new file mode 100644
index 0000000000..a5dc12bdd6
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/models/wheel.py
@@ -0,0 +1,92 @@
+"""Represents a wheel file and provides access to the various parts of the
+name that have meaning.
+"""
+import re
+from typing import Dict, Iterable, List
+
+from pip._vendor.packaging.tags import Tag
+
+from pip._internal.exceptions import InvalidWheelFilename
+
+
+class Wheel:
+ """A wheel file"""
+
+ wheel_file_re = re.compile(
+ r"""^(?P<namever>(?P<name>[^\s-]+?)-(?P<ver>[^\s-]*?))
+ ((-(?P<build>\d[^-]*?))?-(?P<pyver>[^\s-]+?)-(?P<abi>[^\s-]+?)-(?P<plat>[^\s-]+?)
+ \.whl|\.dist-info)$""",
+ re.VERBOSE,
+ )
+
+ def __init__(self, filename: str) -> None:
+ """
+ :raises InvalidWheelFilename: when the filename is invalid for a wheel
+ """
+ wheel_info = self.wheel_file_re.match(filename)
+ if not wheel_info:
+ raise InvalidWheelFilename(f"{filename} is not a valid wheel filename.")
+ self.filename = filename
+ self.name = wheel_info.group("name").replace("_", "-")
+ # we'll assume "_" means "-" due to wheel naming scheme
+ # (https://github.com/pypa/pip/issues/1150)
+ self.version = wheel_info.group("ver").replace("_", "-")
+ self.build_tag = wheel_info.group("build")
+ self.pyversions = wheel_info.group("pyver").split(".")
+ self.abis = wheel_info.group("abi").split(".")
+ self.plats = wheel_info.group("plat").split(".")
+
+ # All the tag combinations from this file
+ self.file_tags = {
+ Tag(x, y, z) for x in self.pyversions for y in self.abis for z in self.plats
+ }
+
+ def get_formatted_file_tags(self) -> List[str]:
+ """Return the wheel's tags as a sorted list of strings."""
+ return sorted(str(tag) for tag in self.file_tags)
+
+ def support_index_min(self, tags: List[Tag]) -> int:
+ """Return the lowest index that one of the wheel's file_tag combinations
+ achieves in the given list of supported tags.
+
+ For example, if there are 8 supported tags and one of the file tags
+ is first in the list, then return 0.
+
+ :param tags: the PEP 425 tags to check the wheel against, in order
+ with most preferred first.
+
+ :raises ValueError: If none of the wheel's file tags match one of
+ the supported tags.
+ """
+ try:
+ return next(i for i, t in enumerate(tags) if t in self.file_tags)
+ except StopIteration:
+ raise ValueError()
+
+ def find_most_preferred_tag(
+ self, tags: List[Tag], tag_to_priority: Dict[Tag, int]
+ ) -> int:
+ """Return the priority of the most preferred tag that one of the wheel's file
+ tag combinations achieves in the given list of supported tags using the given
+ tag_to_priority mapping, where lower priorities are more-preferred.
+
+ This is used in place of support_index_min in some cases in order to avoid
+ an expensive linear scan of a large list of tags.
+
+ :param tags: the PEP 425 tags to check the wheel against.
+ :param tag_to_priority: a mapping from tag to priority of that tag, where
+ lower is more preferred.
+
+ :raises ValueError: If none of the wheel's file tags match one of
+ the supported tags.
+ """
+ return min(
+ tag_to_priority[tag] for tag in self.file_tags if tag in tag_to_priority
+ )
+
+ def supported(self, tags: Iterable[Tag]) -> bool:
+ """Return whether the wheel is compatible with one of the given tags.
+
+ :param tags: the PEP 425 tags to check the wheel against.
+ """
+ return not self.file_tags.isdisjoint(tags)
diff --git a/third_party/python/pip/pip/_internal/network/__init__.py b/third_party/python/pip/pip/_internal/network/__init__.py
new file mode 100644
index 0000000000..b51bde91b2
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/network/__init__.py
@@ -0,0 +1,2 @@
+"""Contains purely network-related utilities.
+"""
diff --git a/third_party/python/pip/pip/_internal/network/auth.py b/third_party/python/pip/pip/_internal/network/auth.py
new file mode 100644
index 0000000000..c162132682
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/network/auth.py
@@ -0,0 +1,446 @@
+"""Network Authentication Helpers
+
+Contains interface (MultiDomainBasicAuth) and associated glue code for
+providing credentials in the context of network requests.
+"""
+
+import os
+import shutil
+import subprocess
+import urllib.parse
+from abc import ABC, abstractmethod
+from typing import Any, Dict, List, NamedTuple, Optional, Tuple
+
+from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
+from pip._vendor.requests.models import Request, Response
+from pip._vendor.requests.utils import get_netrc_auth
+
+from pip._internal.utils.logging import getLogger
+from pip._internal.utils.misc import (
+ ask,
+ ask_input,
+ ask_password,
+ remove_auth_from_url,
+ split_auth_netloc_from_url,
+)
+from pip._internal.vcs.versioncontrol import AuthInfo
+
+logger = getLogger(__name__)
+
+KEYRING_DISABLED = False
+
+
+class Credentials(NamedTuple):
+ url: str
+ username: str
+ password: str
+
+
+class KeyRingBaseProvider(ABC):
+ """Keyring base provider interface"""
+
+ @abstractmethod
+ def get_auth_info(self, url: str, username: Optional[str]) -> Optional[AuthInfo]:
+ ...
+
+ @abstractmethod
+ def save_auth_info(self, url: str, username: str, password: str) -> None:
+ ...
+
+
+class KeyRingNullProvider(KeyRingBaseProvider):
+ """Keyring null provider"""
+
+ def get_auth_info(self, url: str, username: Optional[str]) -> Optional[AuthInfo]:
+ return None
+
+ def save_auth_info(self, url: str, username: str, password: str) -> None:
+ return None
+
+
+class KeyRingPythonProvider(KeyRingBaseProvider):
+ """Keyring interface which uses locally imported `keyring`"""
+
+ def __init__(self) -> None:
+ import keyring
+
+ self.keyring = keyring
+
+ def get_auth_info(self, url: str, username: Optional[str]) -> Optional[AuthInfo]:
+ # Support keyring's get_credential interface which supports getting
+ # credentials without a username. This is only available for
+ # keyring>=15.2.0.
+ if hasattr(self.keyring, "get_credential"):
+ logger.debug("Getting credentials from keyring for %s", url)
+ cred = self.keyring.get_credential(url, username)
+ if cred is not None:
+ return cred.username, cred.password
+ return None
+
+ if username is not None:
+ logger.debug("Getting password from keyring for %s", url)
+ password = self.keyring.get_password(url, username)
+ if password:
+ return username, password
+ return None
+
+ def save_auth_info(self, url: str, username: str, password: str) -> None:
+ self.keyring.set_password(url, username, password)
+
+
+class KeyRingCliProvider(KeyRingBaseProvider):
+ """Provider which uses `keyring` cli
+
+ Instead of calling the keyring package installed alongside pip
+ we call keyring on the command line which will enable pip to
+ use which ever installation of keyring is available first in
+ PATH.
+ """
+
+ def __init__(self, cmd: str) -> None:
+ self.keyring = cmd
+
+ def get_auth_info(self, url: str, username: Optional[str]) -> Optional[AuthInfo]:
+ # This is the default implementation of keyring.get_credential
+ # https://github.com/jaraco/keyring/blob/97689324abcf01bd1793d49063e7ca01e03d7d07/keyring/backend.py#L134-L139
+ if username is not None:
+ password = self._get_password(url, username)
+ if password is not None:
+ return username, password
+ return None
+
+ def save_auth_info(self, url: str, username: str, password: str) -> None:
+ return self._set_password(url, username, password)
+
+ def _get_password(self, service_name: str, username: str) -> Optional[str]:
+ """Mirror the implementation of keyring.get_password using cli"""
+ if self.keyring is None:
+ return None
+
+ cmd = [self.keyring, "get", service_name, username]
+ env = os.environ.copy()
+ env["PYTHONIOENCODING"] = "utf-8"
+ res = subprocess.run(
+ cmd,
+ stdin=subprocess.DEVNULL,
+ capture_output=True,
+ env=env,
+ )
+ if res.returncode:
+ return None
+ return res.stdout.decode("utf-8").strip(os.linesep)
+
+ def _set_password(self, service_name: str, username: str, password: str) -> None:
+ """Mirror the implementation of keyring.set_password using cli"""
+ if self.keyring is None:
+ return None
+
+ cmd = [self.keyring, "set", service_name, username]
+ input_ = (password + os.linesep).encode("utf-8")
+ env = os.environ.copy()
+ env["PYTHONIOENCODING"] = "utf-8"
+ res = subprocess.run(cmd, input=input_, env=env)
+ res.check_returncode()
+ return None
+
+
+def get_keyring_provider() -> KeyRingBaseProvider:
+ # keyring has previously failed and been disabled
+ if not KEYRING_DISABLED:
+ # Default to trying to use Python provider
+ try:
+ return KeyRingPythonProvider()
+ except ImportError:
+ pass
+ except Exception as exc:
+ # In the event of an unexpected exception
+ # we should warn the user
+ logger.warning(
+ "Installed copy of keyring fails with exception %s, "
+ "trying to find a keyring executable as a fallback",
+ str(exc),
+ )
+
+ # Fallback to Cli Provider if `keyring` isn't installed
+ cli = shutil.which("keyring")
+ if cli:
+ return KeyRingCliProvider(cli)
+
+ return KeyRingNullProvider()
+
+
+def get_keyring_auth(url: Optional[str], username: Optional[str]) -> Optional[AuthInfo]:
+ """Return the tuple auth for a given url from keyring."""
+ # Do nothing if no url was provided
+ if not url:
+ return None
+
+ keyring = get_keyring_provider()
+ try:
+ return keyring.get_auth_info(url, username)
+ except Exception as exc:
+ logger.warning(
+ "Keyring is skipped due to an exception: %s",
+ str(exc),
+ )
+ global KEYRING_DISABLED
+ KEYRING_DISABLED = True
+ return None
+
+
+class MultiDomainBasicAuth(AuthBase):
+ def __init__(
+ self, prompting: bool = True, index_urls: Optional[List[str]] = None
+ ) -> None:
+ self.prompting = prompting
+ self.index_urls = index_urls
+ self.passwords: Dict[str, AuthInfo] = {}
+ # When the user is prompted to enter credentials and keyring is
+ # available, we will offer to save them. If the user accepts,
+ # this value is set to the credentials they entered. After the
+ # request authenticates, the caller should call
+ # ``save_credentials`` to save these.
+ self._credentials_to_save: Optional[Credentials] = None
+
+ def _get_index_url(self, url: str) -> Optional[str]:
+ """Return the original index URL matching the requested URL.
+
+ Cached or dynamically generated credentials may work against
+ the original index URL rather than just the netloc.
+
+ The provided url should have had its username and password
+ removed already. If the original index url had credentials then
+ they will be included in the return value.
+
+ Returns None if no matching index was found, or if --no-index
+ was specified by the user.
+ """
+ if not url or not self.index_urls:
+ return None
+
+ for u in self.index_urls:
+ prefix = remove_auth_from_url(u).rstrip("/") + "/"
+ if url.startswith(prefix):
+ return u
+ return None
+
+ def _get_new_credentials(
+ self,
+ original_url: str,
+ allow_netrc: bool = True,
+ allow_keyring: bool = False,
+ ) -> AuthInfo:
+ """Find and return credentials for the specified URL."""
+ # Split the credentials and netloc from the url.
+ url, netloc, url_user_password = split_auth_netloc_from_url(
+ original_url,
+ )
+
+ # Start with the credentials embedded in the url
+ username, password = url_user_password
+ if username is not None and password is not None:
+ logger.debug("Found credentials in url for %s", netloc)
+ return url_user_password
+
+ # Find a matching index url for this request
+ index_url = self._get_index_url(url)
+ if index_url:
+ # Split the credentials from the url.
+ index_info = split_auth_netloc_from_url(index_url)
+ if index_info:
+ index_url, _, index_url_user_password = index_info
+ logger.debug("Found index url %s", index_url)
+
+ # If an index URL was found, try its embedded credentials
+ if index_url and index_url_user_password[0] is not None:
+ username, password = index_url_user_password
+ if username is not None and password is not None:
+ logger.debug("Found credentials in index url for %s", netloc)
+ return index_url_user_password
+
+ # Get creds from netrc if we still don't have them
+ if allow_netrc:
+ netrc_auth = get_netrc_auth(original_url)
+ if netrc_auth:
+ logger.debug("Found credentials in netrc for %s", netloc)
+ return netrc_auth
+
+ # If we don't have a password and keyring is available, use it.
+ if allow_keyring:
+ # The index url is more specific than the netloc, so try it first
+ # fmt: off
+ kr_auth = (
+ get_keyring_auth(index_url, username) or
+ get_keyring_auth(netloc, username)
+ )
+ # fmt: on
+ if kr_auth:
+ logger.debug("Found credentials in keyring for %s", netloc)
+ return kr_auth
+
+ return username, password
+
+ def _get_url_and_credentials(
+ self, original_url: str
+ ) -> Tuple[str, Optional[str], Optional[str]]:
+ """Return the credentials to use for the provided URL.
+
+ If allowed, netrc and keyring may be used to obtain the
+ correct credentials.
+
+ Returns (url_without_credentials, username, password). Note
+ that even if the original URL contains credentials, this
+ function may return a different username and password.
+ """
+ url, netloc, _ = split_auth_netloc_from_url(original_url)
+
+ # Try to get credentials from original url
+ username, password = self._get_new_credentials(original_url)
+
+ # If credentials not found, use any stored credentials for this netloc.
+ # Do this if either the username or the password is missing.
+ # This accounts for the situation in which the user has specified
+ # the username in the index url, but the password comes from keyring.
+ if (username is None or password is None) and netloc in self.passwords:
+ un, pw = self.passwords[netloc]
+ # It is possible that the cached credentials are for a different username,
+ # in which case the cache should be ignored.
+ if username is None or username == un:
+ username, password = un, pw
+
+ if username is not None or password is not None:
+ # Convert the username and password if they're None, so that
+ # this netloc will show up as "cached" in the conditional above.
+ # Further, HTTPBasicAuth doesn't accept None, so it makes sense to
+ # cache the value that is going to be used.
+ username = username or ""
+ password = password or ""
+
+ # Store any acquired credentials.
+ self.passwords[netloc] = (username, password)
+
+ assert (
+ # Credentials were found
+ (username is not None and password is not None)
+ # Credentials were not found
+ or (username is None and password is None)
+ ), f"Could not load credentials from url: {original_url}"
+
+ return url, username, password
+
+ def __call__(self, req: Request) -> Request:
+ # Get credentials for this request
+ url, username, password = self._get_url_and_credentials(req.url)
+
+ # Set the url of the request to the url without any credentials
+ req.url = url
+
+ if username is not None and password is not None:
+ # Send the basic auth with this request
+ req = HTTPBasicAuth(username, password)(req)
+
+ # Attach a hook to handle 401 responses
+ req.register_hook("response", self.handle_401)
+
+ return req
+
+ # Factored out to allow for easy patching in tests
+ def _prompt_for_password(
+ self, netloc: str
+ ) -> Tuple[Optional[str], Optional[str], bool]:
+ username = ask_input(f"User for {netloc}: ")
+ if not username:
+ return None, None, False
+ auth = get_keyring_auth(netloc, username)
+ if auth and auth[0] is not None and auth[1] is not None:
+ return auth[0], auth[1], False
+ password = ask_password("Password: ")
+ return username, password, True
+
+ # Factored out to allow for easy patching in tests
+ def _should_save_password_to_keyring(self) -> bool:
+ if get_keyring_provider() is None:
+ return False
+ return ask("Save credentials to keyring [y/N]: ", ["y", "n"]) == "y"
+
+ def handle_401(self, resp: Response, **kwargs: Any) -> Response:
+ # We only care about 401 responses, anything else we want to just
+ # pass through the actual response
+ if resp.status_code != 401:
+ return resp
+
+ # We are not able to prompt the user so simply return the response
+ if not self.prompting:
+ return resp
+
+ parsed = urllib.parse.urlparse(resp.url)
+
+ # Query the keyring for credentials:
+ username, password = self._get_new_credentials(
+ resp.url,
+ allow_netrc=False,
+ allow_keyring=True,
+ )
+
+ # Prompt the user for a new username and password
+ save = False
+ if not username and not password:
+ username, password, save = self._prompt_for_password(parsed.netloc)
+
+ # Store the new username and password to use for future requests
+ self._credentials_to_save = None
+ if username is not None and password is not None:
+ self.passwords[parsed.netloc] = (username, password)
+
+ # Prompt to save the password to keyring
+ if save and self._should_save_password_to_keyring():
+ self._credentials_to_save = Credentials(
+ url=parsed.netloc,
+ username=username,
+ password=password,
+ )
+
+ # Consume content and release the original connection to allow our new
+ # request to reuse the same one.
+ resp.content
+ resp.raw.release_conn()
+
+ # Add our new username and password to the request
+ req = HTTPBasicAuth(username or "", password or "")(resp.request)
+ req.register_hook("response", self.warn_on_401)
+
+ # On successful request, save the credentials that were used to
+ # keyring. (Note that if the user responded "no" above, this member
+ # is not set and nothing will be saved.)
+ if self._credentials_to_save:
+ req.register_hook("response", self.save_credentials)
+
+ # Send our new request
+ new_resp = resp.connection.send(req, **kwargs)
+ new_resp.history.append(resp)
+
+ return new_resp
+
+ def warn_on_401(self, resp: Response, **kwargs: Any) -> None:
+ """Response callback to warn about incorrect credentials."""
+ if resp.status_code == 401:
+ logger.warning(
+ "401 Error, Credentials not correct for %s",
+ resp.request.url,
+ )
+
+ def save_credentials(self, resp: Response, **kwargs: Any) -> None:
+ """Response callback to save credentials on success."""
+ keyring = get_keyring_provider()
+ assert not isinstance(
+ keyring, KeyRingNullProvider
+ ), "should never reach here without keyring"
+
+ creds = self._credentials_to_save
+ self._credentials_to_save = None
+ if creds and resp.status_code < 400:
+ try:
+ logger.info("Saving credentials to keyring")
+ keyring.save_auth_info(creds.url, creds.username, creds.password)
+ except Exception:
+ logger.exception("Failed to save credentials")
diff --git a/third_party/python/pip/pip/_internal/network/cache.py b/third_party/python/pip/pip/_internal/network/cache.py
new file mode 100644
index 0000000000..a81a239851
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/network/cache.py
@@ -0,0 +1,69 @@
+"""HTTP cache implementation.
+"""
+
+import os
+from contextlib import contextmanager
+from typing import Generator, Optional
+
+from pip._vendor.cachecontrol.cache import BaseCache
+from pip._vendor.cachecontrol.caches import FileCache
+from pip._vendor.requests.models import Response
+
+from pip._internal.utils.filesystem import adjacent_tmp_file, replace
+from pip._internal.utils.misc import ensure_dir
+
+
+def is_from_cache(response: Response) -> bool:
+ return getattr(response, "from_cache", False)
+
+
+@contextmanager
+def suppressed_cache_errors() -> Generator[None, None, None]:
+ """If we can't access the cache then we can just skip caching and process
+ requests as if caching wasn't enabled.
+ """
+ try:
+ yield
+ except OSError:
+ pass
+
+
+class SafeFileCache(BaseCache):
+ """
+ A file based cache which is safe to use even when the target directory may
+ not be accessible or writable.
+ """
+
+ def __init__(self, directory: str) -> None:
+ assert directory is not None, "Cache directory must not be None."
+ super().__init__()
+ self.directory = directory
+
+ def _get_cache_path(self, name: str) -> str:
+ # From cachecontrol.caches.file_cache.FileCache._fn, brought into our
+ # class for backwards-compatibility and to avoid using a non-public
+ # method.
+ hashed = FileCache.encode(name)
+ parts = list(hashed[:5]) + [hashed]
+ return os.path.join(self.directory, *parts)
+
+ def get(self, key: str) -> Optional[bytes]:
+ path = self._get_cache_path(key)
+ with suppressed_cache_errors():
+ with open(path, "rb") as f:
+ return f.read()
+
+ def set(self, key: str, value: bytes, expires: Optional[int] = None) -> None:
+ path = self._get_cache_path(key)
+ with suppressed_cache_errors():
+ ensure_dir(os.path.dirname(path))
+
+ with adjacent_tmp_file(path) as f:
+ f.write(value)
+
+ replace(f.name, path)
+
+ def delete(self, key: str) -> None:
+ path = self._get_cache_path(key)
+ with suppressed_cache_errors():
+ os.remove(path)
diff --git a/third_party/python/pip/pip/_internal/network/download.py b/third_party/python/pip/pip/_internal/network/download.py
new file mode 100644
index 0000000000..79b82a570e
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/network/download.py
@@ -0,0 +1,186 @@
+"""Download files with progress indicators.
+"""
+import email.message
+import logging
+import mimetypes
+import os
+from typing import Iterable, Optional, Tuple
+
+from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
+
+from pip._internal.cli.progress_bars import get_download_progress_renderer
+from pip._internal.exceptions import NetworkConnectionError
+from pip._internal.models.index import PyPI
+from pip._internal.models.link import Link
+from pip._internal.network.cache import is_from_cache
+from pip._internal.network.session import PipSession
+from pip._internal.network.utils import HEADERS, raise_for_status, response_chunks
+from pip._internal.utils.misc import format_size, redact_auth_from_url, splitext
+
+logger = logging.getLogger(__name__)
+
+
+def _get_http_response_size(resp: Response) -> Optional[int]:
+ try:
+ return int(resp.headers["content-length"])
+ except (ValueError, KeyError, TypeError):
+ return None
+
+
+def _prepare_download(
+ resp: Response,
+ link: Link,
+ progress_bar: str,
+) -> Iterable[bytes]:
+ total_length = _get_http_response_size(resp)
+
+ if link.netloc == PyPI.file_storage_domain:
+ url = link.show_url
+ else:
+ url = link.url_without_fragment
+
+ logged_url = redact_auth_from_url(url)
+
+ if total_length:
+ logged_url = "{} ({})".format(logged_url, format_size(total_length))
+
+ if is_from_cache(resp):
+ logger.info("Using cached %s", logged_url)
+ else:
+ logger.info("Downloading %s", logged_url)
+
+ if logger.getEffectiveLevel() > logging.INFO:
+ show_progress = False
+ elif is_from_cache(resp):
+ show_progress = False
+ elif not total_length:
+ show_progress = True
+ elif total_length > (40 * 1000):
+ show_progress = True
+ else:
+ show_progress = False
+
+ chunks = response_chunks(resp, CONTENT_CHUNK_SIZE)
+
+ if not show_progress:
+ return chunks
+
+ renderer = get_download_progress_renderer(bar_type=progress_bar, size=total_length)
+ return renderer(chunks)
+
+
+def sanitize_content_filename(filename: str) -> str:
+ """
+ Sanitize the "filename" value from a Content-Disposition header.
+ """
+ return os.path.basename(filename)
+
+
+def parse_content_disposition(content_disposition: str, default_filename: str) -> str:
+ """
+ Parse the "filename" value from a Content-Disposition header, and
+ return the default filename if the result is empty.
+ """
+ m = email.message.Message()
+ m["content-type"] = content_disposition
+ filename = m.get_param("filename")
+ if filename:
+ # We need to sanitize the filename to prevent directory traversal
+ # in case the filename contains ".." path parts.
+ filename = sanitize_content_filename(str(filename))
+ return filename or default_filename
+
+
+def _get_http_response_filename(resp: Response, link: Link) -> str:
+ """Get an ideal filename from the given HTTP response, falling back to
+ the link filename if not provided.
+ """
+ filename = link.filename # fallback
+ # Have a look at the Content-Disposition header for a better guess
+ content_disposition = resp.headers.get("content-disposition")
+ if content_disposition:
+ filename = parse_content_disposition(content_disposition, filename)
+ ext: Optional[str] = splitext(filename)[1]
+ if not ext:
+ ext = mimetypes.guess_extension(resp.headers.get("content-type", ""))
+ if ext:
+ filename += ext
+ if not ext and link.url != resp.url:
+ ext = os.path.splitext(resp.url)[1]
+ if ext:
+ filename += ext
+ return filename
+
+
+def _http_get_download(session: PipSession, link: Link) -> Response:
+ target_url = link.url.split("#", 1)[0]
+ resp = session.get(target_url, headers=HEADERS, stream=True)
+ raise_for_status(resp)
+ return resp
+
+
+class Downloader:
+ def __init__(
+ self,
+ session: PipSession,
+ progress_bar: str,
+ ) -> None:
+ self._session = session
+ self._progress_bar = progress_bar
+
+ def __call__(self, link: Link, location: str) -> Tuple[str, str]:
+ """Download the file given by link into location."""
+ try:
+ resp = _http_get_download(self._session, link)
+ except NetworkConnectionError as e:
+ assert e.response is not None
+ logger.critical(
+ "HTTP error %s while getting %s", e.response.status_code, link
+ )
+ raise
+
+ filename = _get_http_response_filename(resp, link)
+ filepath = os.path.join(location, filename)
+
+ chunks = _prepare_download(resp, link, self._progress_bar)
+ with open(filepath, "wb") as content_file:
+ for chunk in chunks:
+ content_file.write(chunk)
+ content_type = resp.headers.get("Content-Type", "")
+ return filepath, content_type
+
+
+class BatchDownloader:
+ def __init__(
+ self,
+ session: PipSession,
+ progress_bar: str,
+ ) -> None:
+ self._session = session
+ self._progress_bar = progress_bar
+
+ def __call__(
+ self, links: Iterable[Link], location: str
+ ) -> Iterable[Tuple[Link, Tuple[str, str]]]:
+ """Download the files given by links into location."""
+ for link in links:
+ try:
+ resp = _http_get_download(self._session, link)
+ except NetworkConnectionError as e:
+ assert e.response is not None
+ logger.critical(
+ "HTTP error %s while getting %s",
+ e.response.status_code,
+ link,
+ )
+ raise
+
+ filename = _get_http_response_filename(resp, link)
+ filepath = os.path.join(location, filename)
+
+ chunks = _prepare_download(resp, link, self._progress_bar)
+ with open(filepath, "wb") as content_file:
+ for chunk in chunks:
+ content_file.write(chunk)
+ content_type = resp.headers.get("Content-Type", "")
+ yield link, (filepath, content_type)
diff --git a/third_party/python/pip/pip/_internal/network/lazy_wheel.py b/third_party/python/pip/pip/_internal/network/lazy_wheel.py
new file mode 100644
index 0000000000..854a6fa1fd
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/network/lazy_wheel.py
@@ -0,0 +1,210 @@
+"""Lazy ZIP over HTTP"""
+
+__all__ = ["HTTPRangeRequestUnsupported", "dist_from_wheel_url"]
+
+from bisect import bisect_left, bisect_right
+from contextlib import contextmanager
+from tempfile import NamedTemporaryFile
+from typing import Any, Dict, Generator, List, Optional, Tuple
+from zipfile import BadZipfile, ZipFile
+
+from pip._vendor.packaging.utils import canonicalize_name
+from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
+
+from pip._internal.metadata import BaseDistribution, MemoryWheel, get_wheel_distribution
+from pip._internal.network.session import PipSession
+from pip._internal.network.utils import HEADERS, raise_for_status, response_chunks
+
+
+class HTTPRangeRequestUnsupported(Exception):
+ pass
+
+
+def dist_from_wheel_url(name: str, url: str, session: PipSession) -> BaseDistribution:
+ """Return a distribution object from the given wheel URL.
+
+ This uses HTTP range requests to only fetch the portion of the wheel
+ containing metadata, just enough for the object to be constructed.
+ If such requests are not supported, HTTPRangeRequestUnsupported
+ is raised.
+ """
+ with LazyZipOverHTTP(url, session) as zf:
+ # For read-only ZIP files, ZipFile only needs methods read,
+ # seek, seekable and tell, not the whole IO protocol.
+ wheel = MemoryWheel(zf.name, zf) # type: ignore
+ # After context manager exit, wheel.name
+ # is an invalid file by intention.
+ return get_wheel_distribution(wheel, canonicalize_name(name))
+
+
+class LazyZipOverHTTP:
+ """File-like object mapped to a ZIP file over HTTP.
+
+ This uses HTTP range requests to lazily fetch the file's content,
+ which is supposed to be fed to ZipFile. If such requests are not
+ supported by the server, raise HTTPRangeRequestUnsupported
+ during initialization.
+ """
+
+ def __init__(
+ self, url: str, session: PipSession, chunk_size: int = CONTENT_CHUNK_SIZE
+ ) -> None:
+ head = session.head(url, headers=HEADERS)
+ raise_for_status(head)
+ assert head.status_code == 200
+ self._session, self._url, self._chunk_size = session, url, chunk_size
+ self._length = int(head.headers["Content-Length"])
+ self._file = NamedTemporaryFile()
+ self.truncate(self._length)
+ self._left: List[int] = []
+ self._right: List[int] = []
+ if "bytes" not in head.headers.get("Accept-Ranges", "none"):
+ raise HTTPRangeRequestUnsupported("range request is not supported")
+ self._check_zip()
+
+ @property
+ def mode(self) -> str:
+ """Opening mode, which is always rb."""
+ return "rb"
+
+ @property
+ def name(self) -> str:
+ """Path to the underlying file."""
+ return self._file.name
+
+ def seekable(self) -> bool:
+ """Return whether random access is supported, which is True."""
+ return True
+
+ def close(self) -> None:
+ """Close the file."""
+ self._file.close()
+
+ @property
+ def closed(self) -> bool:
+ """Whether the file is closed."""
+ return self._file.closed
+
+ def read(self, size: int = -1) -> bytes:
+ """Read up to size bytes from the object and return them.
+
+ As a convenience, if size is unspecified or -1,
+ all bytes until EOF are returned. Fewer than
+ size bytes may be returned if EOF is reached.
+ """
+ download_size = max(size, self._chunk_size)
+ start, length = self.tell(), self._length
+ stop = length if size < 0 else min(start + download_size, length)
+ start = max(0, stop - download_size)
+ self._download(start, stop - 1)
+ return self._file.read(size)
+
+ def readable(self) -> bool:
+ """Return whether the file is readable, which is True."""
+ return True
+
+ def seek(self, offset: int, whence: int = 0) -> int:
+ """Change stream position and return the new absolute position.
+
+ Seek to offset relative position indicated by whence:
+ * 0: Start of stream (the default). pos should be >= 0;
+ * 1: Current position - pos may be negative;
+ * 2: End of stream - pos usually negative.
+ """
+ return self._file.seek(offset, whence)
+
+ def tell(self) -> int:
+ """Return the current position."""
+ return self._file.tell()
+
+ def truncate(self, size: Optional[int] = None) -> int:
+ """Resize the stream to the given size in bytes.
+
+ If size is unspecified resize to the current position.
+ The current stream position isn't changed.
+
+ Return the new file size.
+ """
+ return self._file.truncate(size)
+
+ def writable(self) -> bool:
+ """Return False."""
+ return False
+
+ def __enter__(self) -> "LazyZipOverHTTP":
+ self._file.__enter__()
+ return self
+
+ def __exit__(self, *exc: Any) -> None:
+ self._file.__exit__(*exc)
+
+ @contextmanager
+ def _stay(self) -> Generator[None, None, None]:
+ """Return a context manager keeping the position.
+
+ At the end of the block, seek back to original position.
+ """
+ pos = self.tell()
+ try:
+ yield
+ finally:
+ self.seek(pos)
+
+ def _check_zip(self) -> None:
+ """Check and download until the file is a valid ZIP."""
+ end = self._length - 1
+ for start in reversed(range(0, end, self._chunk_size)):
+ self._download(start, end)
+ with self._stay():
+ try:
+ # For read-only ZIP files, ZipFile only needs
+ # methods read, seek, seekable and tell.
+ ZipFile(self) # type: ignore
+ except BadZipfile:
+ pass
+ else:
+ break
+
+ def _stream_response(
+ self, start: int, end: int, base_headers: Dict[str, str] = HEADERS
+ ) -> Response:
+ """Return HTTP response to a range request from start to end."""
+ headers = base_headers.copy()
+ headers["Range"] = f"bytes={start}-{end}"
+ # TODO: Get range requests to be correctly cached
+ headers["Cache-Control"] = "no-cache"
+ return self._session.get(self._url, headers=headers, stream=True)
+
+ def _merge(
+ self, start: int, end: int, left: int, right: int
+ ) -> Generator[Tuple[int, int], None, None]:
+ """Return a generator of intervals to be fetched.
+
+ Args:
+ start (int): Start of needed interval
+ end (int): End of needed interval
+ left (int): Index of first overlapping downloaded data
+ right (int): Index after last overlapping downloaded data
+ """
+ lslice, rslice = self._left[left:right], self._right[left:right]
+ i = start = min([start] + lslice[:1])
+ end = max([end] + rslice[-1:])
+ for j, k in zip(lslice, rslice):
+ if j > i:
+ yield i, j - 1
+ i = k + 1
+ if i <= end:
+ yield i, end
+ self._left[left:right], self._right[left:right] = [start], [end]
+
+ def _download(self, start: int, end: int) -> None:
+ """Download bytes from start to end inclusively."""
+ with self._stay():
+ left = bisect_left(self._right, start)
+ right = bisect_right(self._left, end)
+ for start, end in self._merge(start, end, left, right):
+ response = self._stream_response(start, end)
+ response.raise_for_status()
+ self.seek(start)
+ for chunk in response_chunks(response, self._chunk_size):
+ self._file.write(chunk)
diff --git a/third_party/python/pip/pip/_internal/network/session.py b/third_party/python/pip/pip/_internal/network/session.py
new file mode 100644
index 0000000000..e512ac7846
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/network/session.py
@@ -0,0 +1,518 @@
+"""PipSession and supporting code, containing all pip-specific
+network request configuration and behavior.
+"""
+
+import email.utils
+import io
+import ipaddress
+import json
+import logging
+import mimetypes
+import os
+import platform
+import shutil
+import subprocess
+import sys
+import urllib.parse
+import warnings
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Dict,
+ Generator,
+ List,
+ Mapping,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+)
+
+from pip._vendor import requests, urllib3
+from pip._vendor.cachecontrol import CacheControlAdapter as _BaseCacheControlAdapter
+from pip._vendor.requests.adapters import DEFAULT_POOLBLOCK, BaseAdapter
+from pip._vendor.requests.adapters import HTTPAdapter as _BaseHTTPAdapter
+from pip._vendor.requests.models import PreparedRequest, Response
+from pip._vendor.requests.structures import CaseInsensitiveDict
+from pip._vendor.urllib3.connectionpool import ConnectionPool
+from pip._vendor.urllib3.exceptions import InsecureRequestWarning
+
+from pip import __version__
+from pip._internal.metadata import get_default_environment
+from pip._internal.models.link import Link
+from pip._internal.network.auth import MultiDomainBasicAuth
+from pip._internal.network.cache import SafeFileCache
+
+# Import ssl from compat so the initial import occurs in only one place.
+from pip._internal.utils.compat import has_tls
+from pip._internal.utils.glibc import libc_ver
+from pip._internal.utils.misc import build_url_from_netloc, parse_netloc
+from pip._internal.utils.urls import url_to_path
+
+if TYPE_CHECKING:
+ from ssl import SSLContext
+
+ from pip._vendor.urllib3.poolmanager import PoolManager
+
+
+logger = logging.getLogger(__name__)
+
+SecureOrigin = Tuple[str, str, Optional[Union[int, str]]]
+
+
+# Ignore warning raised when using --trusted-host.
+warnings.filterwarnings("ignore", category=InsecureRequestWarning)
+
+
+SECURE_ORIGINS: List[SecureOrigin] = [
+ # protocol, hostname, port
+ # Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
+ ("https", "*", "*"),
+ ("*", "localhost", "*"),
+ ("*", "127.0.0.0/8", "*"),
+ ("*", "::1/128", "*"),
+ ("file", "*", None),
+ # ssh is always secure.
+ ("ssh", "*", "*"),
+]
+
+
+# These are environment variables present when running under various
+# CI systems. For each variable, some CI systems that use the variable
+# are indicated. The collection was chosen so that for each of a number
+# of popular systems, at least one of the environment variables is used.
+# This list is used to provide some indication of and lower bound for
+# CI traffic to PyPI. Thus, it is okay if the list is not comprehensive.
+# For more background, see: https://github.com/pypa/pip/issues/5499
+CI_ENVIRONMENT_VARIABLES = (
+ # Azure Pipelines
+ "BUILD_BUILDID",
+ # Jenkins
+ "BUILD_ID",
+ # AppVeyor, CircleCI, Codeship, Gitlab CI, Shippable, Travis CI
+ "CI",
+ # Explicit environment variable.
+ "PIP_IS_CI",
+)
+
+
+def looks_like_ci() -> bool:
+ """
+ Return whether it looks like pip is running under CI.
+ """
+ # We don't use the method of checking for a tty (e.g. using isatty())
+ # because some CI systems mimic a tty (e.g. Travis CI). Thus that
+ # method doesn't provide definitive information in either direction.
+ return any(name in os.environ for name in CI_ENVIRONMENT_VARIABLES)
+
+
+def user_agent() -> str:
+ """
+ Return a string representing the user agent.
+ """
+ data: Dict[str, Any] = {
+ "installer": {"name": "pip", "version": __version__},
+ "python": platform.python_version(),
+ "implementation": {
+ "name": platform.python_implementation(),
+ },
+ }
+
+ if data["implementation"]["name"] == "CPython":
+ data["implementation"]["version"] = platform.python_version()
+ elif data["implementation"]["name"] == "PyPy":
+ pypy_version_info = sys.pypy_version_info # type: ignore
+ if pypy_version_info.releaselevel == "final":
+ pypy_version_info = pypy_version_info[:3]
+ data["implementation"]["version"] = ".".join(
+ [str(x) for x in pypy_version_info]
+ )
+ elif data["implementation"]["name"] == "Jython":
+ # Complete Guess
+ data["implementation"]["version"] = platform.python_version()
+ elif data["implementation"]["name"] == "IronPython":
+ # Complete Guess
+ data["implementation"]["version"] = platform.python_version()
+
+ if sys.platform.startswith("linux"):
+ from pip._vendor import distro
+
+ linux_distribution = distro.name(), distro.version(), distro.codename()
+ distro_infos: Dict[str, Any] = dict(
+ filter(
+ lambda x: x[1],
+ zip(["name", "version", "id"], linux_distribution),
+ )
+ )
+ libc = dict(
+ filter(
+ lambda x: x[1],
+ zip(["lib", "version"], libc_ver()),
+ )
+ )
+ if libc:
+ distro_infos["libc"] = libc
+ if distro_infos:
+ data["distro"] = distro_infos
+
+ if sys.platform.startswith("darwin") and platform.mac_ver()[0]:
+ data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]}
+
+ if platform.system():
+ data.setdefault("system", {})["name"] = platform.system()
+
+ if platform.release():
+ data.setdefault("system", {})["release"] = platform.release()
+
+ if platform.machine():
+ data["cpu"] = platform.machine()
+
+ if has_tls():
+ import _ssl as ssl
+
+ data["openssl_version"] = ssl.OPENSSL_VERSION
+
+ setuptools_dist = get_default_environment().get_distribution("setuptools")
+ if setuptools_dist is not None:
+ data["setuptools_version"] = str(setuptools_dist.version)
+
+ if shutil.which("rustc") is not None:
+ # If for any reason `rustc --version` fails, silently ignore it
+ try:
+ rustc_output = subprocess.check_output(
+ ["rustc", "--version"], stderr=subprocess.STDOUT, timeout=0.5
+ )
+ except Exception:
+ pass
+ else:
+ if rustc_output.startswith(b"rustc "):
+ # The format of `rustc --version` is:
+ # `b'rustc 1.52.1 (9bc8c42bb 2021-05-09)\n'`
+ # We extract just the middle (1.52.1) part
+ data["rustc_version"] = rustc_output.split(b" ")[1].decode()
+
+ # Use None rather than False so as not to give the impression that
+ # pip knows it is not being run under CI. Rather, it is a null or
+ # inconclusive result. Also, we include some value rather than no
+ # value to make it easier to know that the check has been run.
+ data["ci"] = True if looks_like_ci() else None
+
+ user_data = os.environ.get("PIP_USER_AGENT_USER_DATA")
+ if user_data is not None:
+ data["user_data"] = user_data
+
+ return "{data[installer][name]}/{data[installer][version]} {json}".format(
+ data=data,
+ json=json.dumps(data, separators=(",", ":"), sort_keys=True),
+ )
+
+
+class LocalFSAdapter(BaseAdapter):
+ def send(
+ self,
+ request: PreparedRequest,
+ stream: bool = False,
+ timeout: Optional[Union[float, Tuple[float, float]]] = None,
+ verify: Union[bool, str] = True,
+ cert: Optional[Union[str, Tuple[str, str]]] = None,
+ proxies: Optional[Mapping[str, str]] = None,
+ ) -> Response:
+ pathname = url_to_path(request.url)
+
+ resp = Response()
+ resp.status_code = 200
+ resp.url = request.url
+
+ try:
+ stats = os.stat(pathname)
+ except OSError as exc:
+ # format the exception raised as a io.BytesIO object,
+ # to return a better error message:
+ resp.status_code = 404
+ resp.reason = type(exc).__name__
+ resp.raw = io.BytesIO(f"{resp.reason}: {exc}".encode("utf8"))
+ else:
+ modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
+ content_type = mimetypes.guess_type(pathname)[0] or "text/plain"
+ resp.headers = CaseInsensitiveDict(
+ {
+ "Content-Type": content_type,
+ "Content-Length": stats.st_size,
+ "Last-Modified": modified,
+ }
+ )
+
+ resp.raw = open(pathname, "rb")
+ resp.close = resp.raw.close
+
+ return resp
+
+ def close(self) -> None:
+ pass
+
+
+class _SSLContextAdapterMixin:
+ """Mixin to add the ``ssl_context`` constructor argument to HTTP adapters.
+
+ The additional argument is forwarded directly to the pool manager. This allows us
+ to dynamically decide what SSL store to use at runtime, which is used to implement
+ the optional ``truststore`` backend.
+ """
+
+ def __init__(
+ self,
+ *,
+ ssl_context: Optional["SSLContext"] = None,
+ **kwargs: Any,
+ ) -> None:
+ self._ssl_context = ssl_context
+ super().__init__(**kwargs)
+
+ def init_poolmanager(
+ self,
+ connections: int,
+ maxsize: int,
+ block: bool = DEFAULT_POOLBLOCK,
+ **pool_kwargs: Any,
+ ) -> "PoolManager":
+ if self._ssl_context is not None:
+ pool_kwargs.setdefault("ssl_context", self._ssl_context)
+ return super().init_poolmanager( # type: ignore[misc]
+ connections=connections,
+ maxsize=maxsize,
+ block=block,
+ **pool_kwargs,
+ )
+
+
+class HTTPAdapter(_SSLContextAdapterMixin, _BaseHTTPAdapter):
+ pass
+
+
+class CacheControlAdapter(_SSLContextAdapterMixin, _BaseCacheControlAdapter):
+ pass
+
+
+class InsecureHTTPAdapter(HTTPAdapter):
+ def cert_verify(
+ self,
+ conn: ConnectionPool,
+ url: str,
+ verify: Union[bool, str],
+ cert: Optional[Union[str, Tuple[str, str]]],
+ ) -> None:
+ super().cert_verify(conn=conn, url=url, verify=False, cert=cert)
+
+
+class InsecureCacheControlAdapter(CacheControlAdapter):
+ def cert_verify(
+ self,
+ conn: ConnectionPool,
+ url: str,
+ verify: Union[bool, str],
+ cert: Optional[Union[str, Tuple[str, str]]],
+ ) -> None:
+ super().cert_verify(conn=conn, url=url, verify=False, cert=cert)
+
+
+class PipSession(requests.Session):
+
+ timeout: Optional[int] = None
+
+ def __init__(
+ self,
+ *args: Any,
+ retries: int = 0,
+ cache: Optional[str] = None,
+ trusted_hosts: Sequence[str] = (),
+ index_urls: Optional[List[str]] = None,
+ ssl_context: Optional["SSLContext"] = None,
+ **kwargs: Any,
+ ) -> None:
+ """
+ :param trusted_hosts: Domains not to emit warnings for when not using
+ HTTPS.
+ """
+ super().__init__(*args, **kwargs)
+
+ # Namespace the attribute with "pip_" just in case to prevent
+ # possible conflicts with the base class.
+ self.pip_trusted_origins: List[Tuple[str, Optional[int]]] = []
+
+ # Attach our User Agent to the request
+ self.headers["User-Agent"] = user_agent()
+
+ # Attach our Authentication handler to the session
+ self.auth = MultiDomainBasicAuth(index_urls=index_urls)
+
+ # Create our urllib3.Retry instance which will allow us to customize
+ # how we handle retries.
+ retries = urllib3.Retry(
+ # Set the total number of retries that a particular request can
+ # have.
+ total=retries,
+ # A 503 error from PyPI typically means that the Fastly -> Origin
+ # connection got interrupted in some way. A 503 error in general
+ # is typically considered a transient error so we'll go ahead and
+ # retry it.
+ # A 500 may indicate transient error in Amazon S3
+ # A 520 or 527 - may indicate transient error in CloudFlare
+ status_forcelist=[500, 503, 520, 527],
+ # Add a small amount of back off between failed requests in
+ # order to prevent hammering the service.
+ backoff_factor=0.25,
+ ) # type: ignore
+
+ # Our Insecure HTTPAdapter disables HTTPS validation. It does not
+ # support caching so we'll use it for all http:// URLs.
+ # If caching is disabled, we will also use it for
+ # https:// hosts that we've marked as ignoring
+ # TLS errors for (trusted-hosts).
+ insecure_adapter = InsecureHTTPAdapter(max_retries=retries)
+
+ # We want to _only_ cache responses on securely fetched origins or when
+ # the host is specified as trusted. We do this because
+ # we can't validate the response of an insecurely/untrusted fetched
+ # origin, and we don't want someone to be able to poison the cache and
+ # require manual eviction from the cache to fix it.
+ if cache:
+ secure_adapter = CacheControlAdapter(
+ cache=SafeFileCache(cache),
+ max_retries=retries,
+ ssl_context=ssl_context,
+ )
+ self._trusted_host_adapter = InsecureCacheControlAdapter(
+ cache=SafeFileCache(cache),
+ max_retries=retries,
+ )
+ else:
+ secure_adapter = HTTPAdapter(max_retries=retries, ssl_context=ssl_context)
+ self._trusted_host_adapter = insecure_adapter
+
+ self.mount("https://", secure_adapter)
+ self.mount("http://", insecure_adapter)
+
+ # Enable file:// urls
+ self.mount("file://", LocalFSAdapter())
+
+ for host in trusted_hosts:
+ self.add_trusted_host(host, suppress_logging=True)
+
+ def update_index_urls(self, new_index_urls: List[str]) -> None:
+ """
+ :param new_index_urls: New index urls to update the authentication
+ handler with.
+ """
+ self.auth.index_urls = new_index_urls
+
+ def add_trusted_host(
+ self, host: str, source: Optional[str] = None, suppress_logging: bool = False
+ ) -> None:
+ """
+ :param host: It is okay to provide a host that has previously been
+ added.
+ :param source: An optional source string, for logging where the host
+ string came from.
+ """
+ if not suppress_logging:
+ msg = f"adding trusted host: {host!r}"
+ if source is not None:
+ msg += f" (from {source})"
+ logger.info(msg)
+
+ host_port = parse_netloc(host)
+ if host_port not in self.pip_trusted_origins:
+ self.pip_trusted_origins.append(host_port)
+
+ self.mount(
+ build_url_from_netloc(host, scheme="http") + "/", self._trusted_host_adapter
+ )
+ self.mount(build_url_from_netloc(host) + "/", self._trusted_host_adapter)
+ if not host_port[1]:
+ self.mount(
+ build_url_from_netloc(host, scheme="http") + ":",
+ self._trusted_host_adapter,
+ )
+ # Mount wildcard ports for the same host.
+ self.mount(build_url_from_netloc(host) + ":", self._trusted_host_adapter)
+
+ def iter_secure_origins(self) -> Generator[SecureOrigin, None, None]:
+ yield from SECURE_ORIGINS
+ for host, port in self.pip_trusted_origins:
+ yield ("*", host, "*" if port is None else port)
+
+ def is_secure_origin(self, location: Link) -> bool:
+ # Determine if this url used a secure transport mechanism
+ parsed = urllib.parse.urlparse(str(location))
+ origin_protocol, origin_host, origin_port = (
+ parsed.scheme,
+ parsed.hostname,
+ parsed.port,
+ )
+
+ # The protocol to use to see if the protocol matches.
+ # Don't count the repository type as part of the protocol: in
+ # cases such as "git+ssh", only use "ssh". (I.e., Only verify against
+ # the last scheme.)
+ origin_protocol = origin_protocol.rsplit("+", 1)[-1]
+
+ # Determine if our origin is a secure origin by looking through our
+ # hardcoded list of secure origins, as well as any additional ones
+ # configured on this PackageFinder instance.
+ for secure_origin in self.iter_secure_origins():
+ secure_protocol, secure_host, secure_port = secure_origin
+ if origin_protocol != secure_protocol and secure_protocol != "*":
+ continue
+
+ try:
+ addr = ipaddress.ip_address(origin_host or "")
+ network = ipaddress.ip_network(secure_host)
+ except ValueError:
+ # We don't have both a valid address or a valid network, so
+ # we'll check this origin against hostnames.
+ if (
+ origin_host
+ and origin_host.lower() != secure_host.lower()
+ and secure_host != "*"
+ ):
+ continue
+ else:
+ # We have a valid address and network, so see if the address
+ # is contained within the network.
+ if addr not in network:
+ continue
+
+ # Check to see if the port matches.
+ if (
+ origin_port != secure_port
+ and secure_port != "*"
+ and secure_port is not None
+ ):
+ continue
+
+ # If we've gotten here, then this origin matches the current
+ # secure origin and we should return True
+ return True
+
+ # If we've gotten to this point, then the origin isn't secure and we
+ # will not accept it as a valid location to search. We will however
+ # log a warning that we are ignoring it.
+ logger.warning(
+ "The repository located at %s is not a trusted or secure host and "
+ "is being ignored. If this repository is available via HTTPS we "
+ "recommend you use HTTPS instead, otherwise you may silence "
+ "this warning and allow it anyway with '--trusted-host %s'.",
+ origin_host,
+ origin_host,
+ )
+
+ return False
+
+ def request(self, method: str, url: str, *args: Any, **kwargs: Any) -> Response:
+ # Allow setting a default timeout on a session
+ kwargs.setdefault("timeout", self.timeout)
+ # Allow setting a default proxies on a session
+ kwargs.setdefault("proxies", self.proxies)
+
+ # Dispatch the actual request
+ return super().request(method, url, *args, **kwargs)
diff --git a/third_party/python/pip/pip/_internal/network/utils.py b/third_party/python/pip/pip/_internal/network/utils.py
new file mode 100644
index 0000000000..134848ae52
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/network/utils.py
@@ -0,0 +1,96 @@
+from typing import Dict, Generator
+
+from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response
+
+from pip._internal.exceptions import NetworkConnectionError
+
+# The following comments and HTTP headers were originally added by
+# Donald Stufft in git commit 22c562429a61bb77172039e480873fb239dd8c03.
+#
+# We use Accept-Encoding: identity here because requests defaults to
+# accepting compressed responses. This breaks in a variety of ways
+# depending on how the server is configured.
+# - Some servers will notice that the file isn't a compressible file
+# and will leave the file alone and with an empty Content-Encoding
+# - Some servers will notice that the file is already compressed and
+# will leave the file alone, adding a Content-Encoding: gzip header
+# - Some servers won't notice anything at all and will take a file
+# that's already been compressed and compress it again, and set
+# the Content-Encoding: gzip header
+# By setting this to request only the identity encoding we're hoping
+# to eliminate the third case. Hopefully there does not exist a server
+# which when given a file will notice it is already compressed and that
+# you're not asking for a compressed file and will then decompress it
+# before sending because if that's the case I don't think it'll ever be
+# possible to make this work.
+HEADERS: Dict[str, str] = {"Accept-Encoding": "identity"}
+
+
+def raise_for_status(resp: Response) -> None:
+ http_error_msg = ""
+ if isinstance(resp.reason, bytes):
+ # We attempt to decode utf-8 first because some servers
+ # choose to localize their reason strings. If the string
+ # isn't utf-8, we fall back to iso-8859-1 for all other
+ # encodings.
+ try:
+ reason = resp.reason.decode("utf-8")
+ except UnicodeDecodeError:
+ reason = resp.reason.decode("iso-8859-1")
+ else:
+ reason = resp.reason
+
+ if 400 <= resp.status_code < 500:
+ http_error_msg = (
+ f"{resp.status_code} Client Error: {reason} for url: {resp.url}"
+ )
+
+ elif 500 <= resp.status_code < 600:
+ http_error_msg = (
+ f"{resp.status_code} Server Error: {reason} for url: {resp.url}"
+ )
+
+ if http_error_msg:
+ raise NetworkConnectionError(http_error_msg, response=resp)
+
+
+def response_chunks(
+ response: Response, chunk_size: int = CONTENT_CHUNK_SIZE
+) -> Generator[bytes, None, None]:
+ """Given a requests Response, provide the data chunks."""
+ try:
+ # Special case for urllib3.
+ for chunk in response.raw.stream(
+ chunk_size,
+ # We use decode_content=False here because we don't
+ # want urllib3 to mess with the raw bytes we get
+ # from the server. If we decompress inside of
+ # urllib3 then we cannot verify the checksum
+ # because the checksum will be of the compressed
+ # file. This breakage will only occur if the
+ # server adds a Content-Encoding header, which
+ # depends on how the server was configured:
+ # - Some servers will notice that the file isn't a
+ # compressible file and will leave the file alone
+ # and with an empty Content-Encoding
+ # - Some servers will notice that the file is
+ # already compressed and will leave the file
+ # alone and will add a Content-Encoding: gzip
+ # header
+ # - Some servers won't notice anything at all and
+ # will take a file that's already been compressed
+ # and compress it again and set the
+ # Content-Encoding: gzip header
+ #
+ # By setting this not to decode automatically we
+ # hope to eliminate problems with the second case.
+ decode_content=False,
+ ):
+ yield chunk
+ except AttributeError:
+ # Standard file-like object.
+ while True:
+ chunk = response.raw.read(chunk_size)
+ if not chunk:
+ break
+ yield chunk
diff --git a/third_party/python/pip/pip/_internal/network/xmlrpc.py b/third_party/python/pip/pip/_internal/network/xmlrpc.py
new file mode 100644
index 0000000000..4a7d55d0e5
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/network/xmlrpc.py
@@ -0,0 +1,60 @@
+"""xmlrpclib.Transport implementation
+"""
+
+import logging
+import urllib.parse
+import xmlrpc.client
+from typing import TYPE_CHECKING, Tuple
+
+from pip._internal.exceptions import NetworkConnectionError
+from pip._internal.network.session import PipSession
+from pip._internal.network.utils import raise_for_status
+
+if TYPE_CHECKING:
+ from xmlrpc.client import _HostType, _Marshallable
+
+logger = logging.getLogger(__name__)
+
+
+class PipXmlrpcTransport(xmlrpc.client.Transport):
+ """Provide a `xmlrpclib.Transport` implementation via a `PipSession`
+ object.
+ """
+
+ def __init__(
+ self, index_url: str, session: PipSession, use_datetime: bool = False
+ ) -> None:
+ super().__init__(use_datetime)
+ index_parts = urllib.parse.urlparse(index_url)
+ self._scheme = index_parts.scheme
+ self._session = session
+
+ def request(
+ self,
+ host: "_HostType",
+ handler: str,
+ request_body: bytes,
+ verbose: bool = False,
+ ) -> Tuple["_Marshallable", ...]:
+ assert isinstance(host, str)
+ parts = (self._scheme, host, handler, None, None, None)
+ url = urllib.parse.urlunparse(parts)
+ try:
+ headers = {"Content-Type": "text/xml"}
+ response = self._session.post(
+ url,
+ data=request_body,
+ headers=headers,
+ stream=True,
+ )
+ raise_for_status(response)
+ self.verbose = verbose
+ return self.parse_response(response.raw)
+ except NetworkConnectionError as exc:
+ assert exc.response
+ logger.critical(
+ "HTTP error %s while getting %s",
+ exc.response.status_code,
+ url,
+ )
+ raise
diff --git a/third_party/python/pip/pip/_internal/operations/__init__.py b/third_party/python/pip/pip/_internal/operations/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/operations/__init__.py
diff --git a/third_party/python/pip/pip/_internal/operations/build/__init__.py b/third_party/python/pip/pip/_internal/operations/build/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/operations/build/__init__.py
diff --git a/third_party/python/pip/pip/_internal/operations/build/build_tracker.py b/third_party/python/pip/pip/_internal/operations/build/build_tracker.py
new file mode 100644
index 0000000000..6621549b84
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/operations/build/build_tracker.py
@@ -0,0 +1,124 @@
+import contextlib
+import hashlib
+import logging
+import os
+from types import TracebackType
+from typing import Dict, Generator, Optional, Set, Type, Union
+
+from pip._internal.models.link import Link
+from pip._internal.req.req_install import InstallRequirement
+from pip._internal.utils.temp_dir import TempDirectory
+
+logger = logging.getLogger(__name__)
+
+
+@contextlib.contextmanager
+def update_env_context_manager(**changes: str) -> Generator[None, None, None]:
+ target = os.environ
+
+ # Save values from the target and change them.
+ non_existent_marker = object()
+ saved_values: Dict[str, Union[object, str]] = {}
+ for name, new_value in changes.items():
+ try:
+ saved_values[name] = target[name]
+ except KeyError:
+ saved_values[name] = non_existent_marker
+ target[name] = new_value
+
+ try:
+ yield
+ finally:
+ # Restore original values in the target.
+ for name, original_value in saved_values.items():
+ if original_value is non_existent_marker:
+ del target[name]
+ else:
+ assert isinstance(original_value, str) # for mypy
+ target[name] = original_value
+
+
+@contextlib.contextmanager
+def get_build_tracker() -> Generator["BuildTracker", None, None]:
+ root = os.environ.get("PIP_BUILD_TRACKER")
+ with contextlib.ExitStack() as ctx:
+ if root is None:
+ root = ctx.enter_context(TempDirectory(kind="build-tracker")).path
+ ctx.enter_context(update_env_context_manager(PIP_BUILD_TRACKER=root))
+ logger.debug("Initialized build tracking at %s", root)
+
+ with BuildTracker(root) as tracker:
+ yield tracker
+
+
+class BuildTracker:
+ def __init__(self, root: str) -> None:
+ self._root = root
+ self._entries: Set[InstallRequirement] = set()
+ logger.debug("Created build tracker: %s", self._root)
+
+ def __enter__(self) -> "BuildTracker":
+ logger.debug("Entered build tracker: %s", self._root)
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ self.cleanup()
+
+ def _entry_path(self, link: Link) -> str:
+ hashed = hashlib.sha224(link.url_without_fragment.encode()).hexdigest()
+ return os.path.join(self._root, hashed)
+
+ def add(self, req: InstallRequirement) -> None:
+ """Add an InstallRequirement to build tracking."""
+
+ assert req.link
+ # Get the file to write information about this requirement.
+ entry_path = self._entry_path(req.link)
+
+ # Try reading from the file. If it exists and can be read from, a build
+ # is already in progress, so a LookupError is raised.
+ try:
+ with open(entry_path) as fp:
+ contents = fp.read()
+ except FileNotFoundError:
+ pass
+ else:
+ message = "{} is already being built: {}".format(req.link, contents)
+ raise LookupError(message)
+
+ # If we're here, req should really not be building already.
+ assert req not in self._entries
+
+ # Start tracking this requirement.
+ with open(entry_path, "w", encoding="utf-8") as fp:
+ fp.write(str(req))
+ self._entries.add(req)
+
+ logger.debug("Added %s to build tracker %r", req, self._root)
+
+ def remove(self, req: InstallRequirement) -> None:
+ """Remove an InstallRequirement from build tracking."""
+
+ assert req.link
+ # Delete the created file and the corresponding entries.
+ os.unlink(self._entry_path(req.link))
+ self._entries.remove(req)
+
+ logger.debug("Removed %s from build tracker %r", req, self._root)
+
+ def cleanup(self) -> None:
+ for req in set(self._entries):
+ self.remove(req)
+
+ logger.debug("Removed build tracker: %r", self._root)
+
+ @contextlib.contextmanager
+ def track(self, req: InstallRequirement) -> Generator[None, None, None]:
+ self.add(req)
+ yield
+ self.remove(req)
diff --git a/third_party/python/pip/pip/_internal/operations/build/metadata.py b/third_party/python/pip/pip/_internal/operations/build/metadata.py
new file mode 100644
index 0000000000..c66ac354de
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/operations/build/metadata.py
@@ -0,0 +1,39 @@
+"""Metadata generation logic for source distributions.
+"""
+
+import os
+
+from pip._vendor.pyproject_hooks import BuildBackendHookCaller
+
+from pip._internal.build_env import BuildEnvironment
+from pip._internal.exceptions import (
+ InstallationSubprocessError,
+ MetadataGenerationFailed,
+)
+from pip._internal.utils.subprocess import runner_with_spinner_message
+from pip._internal.utils.temp_dir import TempDirectory
+
+
+def generate_metadata(
+ build_env: BuildEnvironment, backend: BuildBackendHookCaller, details: str
+) -> str:
+ """Generate metadata using mechanisms described in PEP 517.
+
+ Returns the generated metadata directory.
+ """
+ metadata_tmpdir = TempDirectory(kind="modern-metadata", globally_managed=True)
+
+ metadata_dir = metadata_tmpdir.path
+
+ with build_env:
+ # Note that BuildBackendHookCaller implements a fallback for
+ # prepare_metadata_for_build_wheel, so we don't have to
+ # consider the possibility that this hook doesn't exist.
+ runner = runner_with_spinner_message("Preparing metadata (pyproject.toml)")
+ with backend.subprocess_runner(runner):
+ try:
+ distinfo_dir = backend.prepare_metadata_for_build_wheel(metadata_dir)
+ except InstallationSubprocessError as error:
+ raise MetadataGenerationFailed(package_details=details) from error
+
+ return os.path.join(metadata_dir, distinfo_dir)
diff --git a/third_party/python/pip/pip/_internal/operations/build/metadata_editable.py b/third_party/python/pip/pip/_internal/operations/build/metadata_editable.py
new file mode 100644
index 0000000000..27c69f0d1e
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/operations/build/metadata_editable.py
@@ -0,0 +1,41 @@
+"""Metadata generation logic for source distributions.
+"""
+
+import os
+
+from pip._vendor.pyproject_hooks import BuildBackendHookCaller
+
+from pip._internal.build_env import BuildEnvironment
+from pip._internal.exceptions import (
+ InstallationSubprocessError,
+ MetadataGenerationFailed,
+)
+from pip._internal.utils.subprocess import runner_with_spinner_message
+from pip._internal.utils.temp_dir import TempDirectory
+
+
+def generate_editable_metadata(
+ build_env: BuildEnvironment, backend: BuildBackendHookCaller, details: str
+) -> str:
+ """Generate metadata using mechanisms described in PEP 660.
+
+ Returns the generated metadata directory.
+ """
+ metadata_tmpdir = TempDirectory(kind="modern-metadata", globally_managed=True)
+
+ metadata_dir = metadata_tmpdir.path
+
+ with build_env:
+ # Note that BuildBackendHookCaller implements a fallback for
+ # prepare_metadata_for_build_wheel/editable, so we don't have to
+ # consider the possibility that this hook doesn't exist.
+ runner = runner_with_spinner_message(
+ "Preparing editable metadata (pyproject.toml)"
+ )
+ with backend.subprocess_runner(runner):
+ try:
+ distinfo_dir = backend.prepare_metadata_for_build_editable(metadata_dir)
+ except InstallationSubprocessError as error:
+ raise MetadataGenerationFailed(package_details=details) from error
+
+ return os.path.join(metadata_dir, distinfo_dir)
diff --git a/third_party/python/pip/pip/_internal/operations/build/metadata_legacy.py b/third_party/python/pip/pip/_internal/operations/build/metadata_legacy.py
new file mode 100644
index 0000000000..e60988d643
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/operations/build/metadata_legacy.py
@@ -0,0 +1,74 @@
+"""Metadata generation logic for legacy source distributions.
+"""
+
+import logging
+import os
+
+from pip._internal.build_env import BuildEnvironment
+from pip._internal.cli.spinners import open_spinner
+from pip._internal.exceptions import (
+ InstallationError,
+ InstallationSubprocessError,
+ MetadataGenerationFailed,
+)
+from pip._internal.utils.setuptools_build import make_setuptools_egg_info_args
+from pip._internal.utils.subprocess import call_subprocess
+from pip._internal.utils.temp_dir import TempDirectory
+
+logger = logging.getLogger(__name__)
+
+
+def _find_egg_info(directory: str) -> str:
+ """Find an .egg-info subdirectory in `directory`."""
+ filenames = [f for f in os.listdir(directory) if f.endswith(".egg-info")]
+
+ if not filenames:
+ raise InstallationError(f"No .egg-info directory found in {directory}")
+
+ if len(filenames) > 1:
+ raise InstallationError(
+ "More than one .egg-info directory found in {}".format(directory)
+ )
+
+ return os.path.join(directory, filenames[0])
+
+
+def generate_metadata(
+ build_env: BuildEnvironment,
+ setup_py_path: str,
+ source_dir: str,
+ isolated: bool,
+ details: str,
+) -> str:
+ """Generate metadata using setup.py-based defacto mechanisms.
+
+ Returns the generated metadata directory.
+ """
+ logger.debug(
+ "Running setup.py (path:%s) egg_info for package %s",
+ setup_py_path,
+ details,
+ )
+
+ egg_info_dir = TempDirectory(kind="pip-egg-info", globally_managed=True).path
+
+ args = make_setuptools_egg_info_args(
+ setup_py_path,
+ egg_info_dir=egg_info_dir,
+ no_user_config=isolated,
+ )
+
+ with build_env:
+ with open_spinner("Preparing metadata (setup.py)") as spinner:
+ try:
+ call_subprocess(
+ args,
+ cwd=source_dir,
+ command_desc="python setup.py egg_info",
+ spinner=spinner,
+ )
+ except InstallationSubprocessError as error:
+ raise MetadataGenerationFailed(package_details=details) from error
+
+ # Return the .egg-info directory.
+ return _find_egg_info(egg_info_dir)
diff --git a/third_party/python/pip/pip/_internal/operations/build/wheel.py b/third_party/python/pip/pip/_internal/operations/build/wheel.py
new file mode 100644
index 0000000000..064811ad11
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/operations/build/wheel.py
@@ -0,0 +1,37 @@
+import logging
+import os
+from typing import Optional
+
+from pip._vendor.pyproject_hooks import BuildBackendHookCaller
+
+from pip._internal.utils.subprocess import runner_with_spinner_message
+
+logger = logging.getLogger(__name__)
+
+
+def build_wheel_pep517(
+ name: str,
+ backend: BuildBackendHookCaller,
+ metadata_directory: str,
+ tempd: str,
+) -> Optional[str]:
+ """Build one InstallRequirement using the PEP 517 build process.
+
+ Returns path to wheel if successfully built. Otherwise, returns None.
+ """
+ assert metadata_directory is not None
+ try:
+ logger.debug("Destination directory: %s", tempd)
+
+ runner = runner_with_spinner_message(
+ f"Building wheel for {name} (pyproject.toml)"
+ )
+ with backend.subprocess_runner(runner):
+ wheel_name = backend.build_wheel(
+ tempd,
+ metadata_directory=metadata_directory,
+ )
+ except Exception:
+ logger.error("Failed building wheel for %s", name)
+ return None
+ return os.path.join(tempd, wheel_name)
diff --git a/third_party/python/pip/pip/_internal/operations/build/wheel_editable.py b/third_party/python/pip/pip/_internal/operations/build/wheel_editable.py
new file mode 100644
index 0000000000..719d69dd80
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/operations/build/wheel_editable.py
@@ -0,0 +1,46 @@
+import logging
+import os
+from typing import Optional
+
+from pip._vendor.pyproject_hooks import BuildBackendHookCaller, HookMissing
+
+from pip._internal.utils.subprocess import runner_with_spinner_message
+
+logger = logging.getLogger(__name__)
+
+
+def build_wheel_editable(
+ name: str,
+ backend: BuildBackendHookCaller,
+ metadata_directory: str,
+ tempd: str,
+) -> Optional[str]:
+ """Build one InstallRequirement using the PEP 660 build process.
+
+ Returns path to wheel if successfully built. Otherwise, returns None.
+ """
+ assert metadata_directory is not None
+ try:
+ logger.debug("Destination directory: %s", tempd)
+
+ runner = runner_with_spinner_message(
+ f"Building editable for {name} (pyproject.toml)"
+ )
+ with backend.subprocess_runner(runner):
+ try:
+ wheel_name = backend.build_editable(
+ tempd,
+ metadata_directory=metadata_directory,
+ )
+ except HookMissing as e:
+ logger.error(
+ "Cannot build editable %s because the build "
+ "backend does not have the %s hook",
+ name,
+ e,
+ )
+ return None
+ except Exception:
+ logger.error("Failed building editable for %s", name)
+ return None
+ return os.path.join(tempd, wheel_name)
diff --git a/third_party/python/pip/pip/_internal/operations/build/wheel_legacy.py b/third_party/python/pip/pip/_internal/operations/build/wheel_legacy.py
new file mode 100644
index 0000000000..c5f0492ccb
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/operations/build/wheel_legacy.py
@@ -0,0 +1,102 @@
+import logging
+import os.path
+from typing import List, Optional
+
+from pip._internal.cli.spinners import open_spinner
+from pip._internal.utils.setuptools_build import make_setuptools_bdist_wheel_args
+from pip._internal.utils.subprocess import call_subprocess, format_command_args
+
+logger = logging.getLogger(__name__)
+
+
+def format_command_result(
+ command_args: List[str],
+ command_output: str,
+) -> str:
+ """Format command information for logging."""
+ command_desc = format_command_args(command_args)
+ text = f"Command arguments: {command_desc}\n"
+
+ if not command_output:
+ text += "Command output: None"
+ elif logger.getEffectiveLevel() > logging.DEBUG:
+ text += "Command output: [use --verbose to show]"
+ else:
+ if not command_output.endswith("\n"):
+ command_output += "\n"
+ text += f"Command output:\n{command_output}"
+
+ return text
+
+
+def get_legacy_build_wheel_path(
+ names: List[str],
+ temp_dir: str,
+ name: str,
+ command_args: List[str],
+ command_output: str,
+) -> Optional[str]:
+ """Return the path to the wheel in the temporary build directory."""
+ # Sort for determinism.
+ names = sorted(names)
+ if not names:
+ msg = ("Legacy build of wheel for {!r} created no files.\n").format(name)
+ msg += format_command_result(command_args, command_output)
+ logger.warning(msg)
+ return None
+
+ if len(names) > 1:
+ msg = (
+ "Legacy build of wheel for {!r} created more than one file.\n"
+ "Filenames (choosing first): {}\n"
+ ).format(name, names)
+ msg += format_command_result(command_args, command_output)
+ logger.warning(msg)
+
+ return os.path.join(temp_dir, names[0])
+
+
+def build_wheel_legacy(
+ name: str,
+ setup_py_path: str,
+ source_dir: str,
+ global_options: List[str],
+ build_options: List[str],
+ tempd: str,
+) -> Optional[str]:
+ """Build one unpacked package using the "legacy" build process.
+
+ Returns path to wheel if successfully built. Otherwise, returns None.
+ """
+ wheel_args = make_setuptools_bdist_wheel_args(
+ setup_py_path,
+ global_options=global_options,
+ build_options=build_options,
+ destination_dir=tempd,
+ )
+
+ spin_message = f"Building wheel for {name} (setup.py)"
+ with open_spinner(spin_message) as spinner:
+ logger.debug("Destination directory: %s", tempd)
+
+ try:
+ output = call_subprocess(
+ wheel_args,
+ command_desc="python setup.py bdist_wheel",
+ cwd=source_dir,
+ spinner=spinner,
+ )
+ except Exception:
+ spinner.finish("error")
+ logger.error("Failed building wheel for %s", name)
+ return None
+
+ names = os.listdir(tempd)
+ wheel_path = get_legacy_build_wheel_path(
+ names=names,
+ temp_dir=tempd,
+ name=name,
+ command_args=wheel_args,
+ command_output=output,
+ )
+ return wheel_path
diff --git a/third_party/python/pip/pip/_internal/operations/check.py b/third_party/python/pip/pip/_internal/operations/check.py
new file mode 100644
index 0000000000..e3bce69b20
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/operations/check.py
@@ -0,0 +1,149 @@
+"""Validation of dependencies of packages
+"""
+
+import logging
+from typing import Callable, Dict, List, NamedTuple, Optional, Set, Tuple
+
+from pip._vendor.packaging.requirements import Requirement
+from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
+
+from pip._internal.distributions import make_distribution_for_install_requirement
+from pip._internal.metadata import get_default_environment
+from pip._internal.metadata.base import DistributionVersion
+from pip._internal.req.req_install import InstallRequirement
+
+logger = logging.getLogger(__name__)
+
+
+class PackageDetails(NamedTuple):
+ version: DistributionVersion
+ dependencies: List[Requirement]
+
+
+# Shorthands
+PackageSet = Dict[NormalizedName, PackageDetails]
+Missing = Tuple[NormalizedName, Requirement]
+Conflicting = Tuple[NormalizedName, DistributionVersion, Requirement]
+
+MissingDict = Dict[NormalizedName, List[Missing]]
+ConflictingDict = Dict[NormalizedName, List[Conflicting]]
+CheckResult = Tuple[MissingDict, ConflictingDict]
+ConflictDetails = Tuple[PackageSet, CheckResult]
+
+
+def create_package_set_from_installed() -> Tuple[PackageSet, bool]:
+ """Converts a list of distributions into a PackageSet."""
+ package_set = {}
+ problems = False
+ env = get_default_environment()
+ for dist in env.iter_installed_distributions(local_only=False, skip=()):
+ name = dist.canonical_name
+ try:
+ dependencies = list(dist.iter_dependencies())
+ package_set[name] = PackageDetails(dist.version, dependencies)
+ except (OSError, ValueError) as e:
+ # Don't crash on unreadable or broken metadata.
+ logger.warning("Error parsing requirements for %s: %s", name, e)
+ problems = True
+ return package_set, problems
+
+
+def check_package_set(
+ package_set: PackageSet, should_ignore: Optional[Callable[[str], bool]] = None
+) -> CheckResult:
+ """Check if a package set is consistent
+
+ If should_ignore is passed, it should be a callable that takes a
+ package name and returns a boolean.
+ """
+
+ missing = {}
+ conflicting = {}
+
+ for package_name, package_detail in package_set.items():
+ # Info about dependencies of package_name
+ missing_deps: Set[Missing] = set()
+ conflicting_deps: Set[Conflicting] = set()
+
+ if should_ignore and should_ignore(package_name):
+ continue
+
+ for req in package_detail.dependencies:
+ name = canonicalize_name(req.name)
+
+ # Check if it's missing
+ if name not in package_set:
+ missed = True
+ if req.marker is not None:
+ missed = req.marker.evaluate({"extra": ""})
+ if missed:
+ missing_deps.add((name, req))
+ continue
+
+ # Check if there's a conflict
+ version = package_set[name].version
+ if not req.specifier.contains(version, prereleases=True):
+ conflicting_deps.add((name, version, req))
+
+ if missing_deps:
+ missing[package_name] = sorted(missing_deps, key=str)
+ if conflicting_deps:
+ conflicting[package_name] = sorted(conflicting_deps, key=str)
+
+ return missing, conflicting
+
+
+def check_install_conflicts(to_install: List[InstallRequirement]) -> ConflictDetails:
+ """For checking if the dependency graph would be consistent after \
+ installing given requirements
+ """
+ # Start from the current state
+ package_set, _ = create_package_set_from_installed()
+ # Install packages
+ would_be_installed = _simulate_installation_of(to_install, package_set)
+
+ # Only warn about directly-dependent packages; create a whitelist of them
+ whitelist = _create_whitelist(would_be_installed, package_set)
+
+ return (
+ package_set,
+ check_package_set(
+ package_set, should_ignore=lambda name: name not in whitelist
+ ),
+ )
+
+
+def _simulate_installation_of(
+ to_install: List[InstallRequirement], package_set: PackageSet
+) -> Set[NormalizedName]:
+ """Computes the version of packages after installing to_install."""
+ # Keep track of packages that were installed
+ installed = set()
+
+ # Modify it as installing requirement_set would (assuming no errors)
+ for inst_req in to_install:
+ abstract_dist = make_distribution_for_install_requirement(inst_req)
+ dist = abstract_dist.get_metadata_distribution()
+ name = dist.canonical_name
+ package_set[name] = PackageDetails(dist.version, list(dist.iter_dependencies()))
+
+ installed.add(name)
+
+ return installed
+
+
+def _create_whitelist(
+ would_be_installed: Set[NormalizedName], package_set: PackageSet
+) -> Set[NormalizedName]:
+ packages_affected = set(would_be_installed)
+
+ for package_name in package_set:
+ if package_name in packages_affected:
+ continue
+
+ for req in package_set[package_name].dependencies:
+ if canonicalize_name(req.name) in packages_affected:
+ packages_affected.add(package_name)
+ break
+
+ return packages_affected
diff --git a/third_party/python/pip/pip/_internal/operations/freeze.py b/third_party/python/pip/pip/_internal/operations/freeze.py
new file mode 100644
index 0000000000..930d4c6005
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/operations/freeze.py
@@ -0,0 +1,254 @@
+import collections
+import logging
+import os
+from typing import Container, Dict, Generator, Iterable, List, NamedTuple, Optional, Set
+
+from pip._vendor.packaging.utils import canonicalize_name
+from pip._vendor.packaging.version import Version
+
+from pip._internal.exceptions import BadCommand, InstallationError
+from pip._internal.metadata import BaseDistribution, get_environment
+from pip._internal.req.constructors import (
+ install_req_from_editable,
+ install_req_from_line,
+)
+from pip._internal.req.req_file import COMMENT_RE
+from pip._internal.utils.direct_url_helpers import direct_url_as_pep440_direct_reference
+
+logger = logging.getLogger(__name__)
+
+
+class _EditableInfo(NamedTuple):
+ requirement: str
+ comments: List[str]
+
+
+def freeze(
+ requirement: Optional[List[str]] = None,
+ local_only: bool = False,
+ user_only: bool = False,
+ paths: Optional[List[str]] = None,
+ isolated: bool = False,
+ exclude_editable: bool = False,
+ skip: Container[str] = (),
+) -> Generator[str, None, None]:
+ installations: Dict[str, FrozenRequirement] = {}
+
+ dists = get_environment(paths).iter_installed_distributions(
+ local_only=local_only,
+ skip=(),
+ user_only=user_only,
+ )
+ for dist in dists:
+ req = FrozenRequirement.from_dist(dist)
+ if exclude_editable and req.editable:
+ continue
+ installations[req.canonical_name] = req
+
+ if requirement:
+ # the options that don't get turned into an InstallRequirement
+ # should only be emitted once, even if the same option is in multiple
+ # requirements files, so we need to keep track of what has been emitted
+ # so that we don't emit it again if it's seen again
+ emitted_options: Set[str] = set()
+ # keep track of which files a requirement is in so that we can
+ # give an accurate warning if a requirement appears multiple times.
+ req_files: Dict[str, List[str]] = collections.defaultdict(list)
+ for req_file_path in requirement:
+ with open(req_file_path) as req_file:
+ for line in req_file:
+ if (
+ not line.strip()
+ or line.strip().startswith("#")
+ or line.startswith(
+ (
+ "-r",
+ "--requirement",
+ "-f",
+ "--find-links",
+ "-i",
+ "--index-url",
+ "--pre",
+ "--trusted-host",
+ "--process-dependency-links",
+ "--extra-index-url",
+ "--use-feature",
+ )
+ )
+ ):
+ line = line.rstrip()
+ if line not in emitted_options:
+ emitted_options.add(line)
+ yield line
+ continue
+
+ if line.startswith("-e") or line.startswith("--editable"):
+ if line.startswith("-e"):
+ line = line[2:].strip()
+ else:
+ line = line[len("--editable") :].strip().lstrip("=")
+ line_req = install_req_from_editable(
+ line,
+ isolated=isolated,
+ )
+ else:
+ line_req = install_req_from_line(
+ COMMENT_RE.sub("", line).strip(),
+ isolated=isolated,
+ )
+
+ if not line_req.name:
+ logger.info(
+ "Skipping line in requirement file [%s] because "
+ "it's not clear what it would install: %s",
+ req_file_path,
+ line.strip(),
+ )
+ logger.info(
+ " (add #egg=PackageName to the URL to avoid"
+ " this warning)"
+ )
+ else:
+ line_req_canonical_name = canonicalize_name(line_req.name)
+ if line_req_canonical_name not in installations:
+ # either it's not installed, or it is installed
+ # but has been processed already
+ if not req_files[line_req.name]:
+ logger.warning(
+ "Requirement file [%s] contains %s, but "
+ "package %r is not installed",
+ req_file_path,
+ COMMENT_RE.sub("", line).strip(),
+ line_req.name,
+ )
+ else:
+ req_files[line_req.name].append(req_file_path)
+ else:
+ yield str(installations[line_req_canonical_name]).rstrip()
+ del installations[line_req_canonical_name]
+ req_files[line_req.name].append(req_file_path)
+
+ # Warn about requirements that were included multiple times (in a
+ # single requirements file or in different requirements files).
+ for name, files in req_files.items():
+ if len(files) > 1:
+ logger.warning(
+ "Requirement %s included multiple times [%s]",
+ name,
+ ", ".join(sorted(set(files))),
+ )
+
+ yield ("## The following requirements were added by pip freeze:")
+ for installation in sorted(installations.values(), key=lambda x: x.name.lower()):
+ if installation.canonical_name not in skip:
+ yield str(installation).rstrip()
+
+
+def _format_as_name_version(dist: BaseDistribution) -> str:
+ if isinstance(dist.version, Version):
+ return f"{dist.raw_name}=={dist.version}"
+ return f"{dist.raw_name}==={dist.version}"
+
+
+def _get_editable_info(dist: BaseDistribution) -> _EditableInfo:
+ """
+ Compute and return values (req, comments) for use in
+ FrozenRequirement.from_dist().
+ """
+ editable_project_location = dist.editable_project_location
+ assert editable_project_location
+ location = os.path.normcase(os.path.abspath(editable_project_location))
+
+ from pip._internal.vcs import RemoteNotFoundError, RemoteNotValidError, vcs
+
+ vcs_backend = vcs.get_backend_for_dir(location)
+
+ if vcs_backend is None:
+ display = _format_as_name_version(dist)
+ logger.debug(
+ 'No VCS found for editable requirement "%s" in: %r',
+ display,
+ location,
+ )
+ return _EditableInfo(
+ requirement=location,
+ comments=[f"# Editable install with no version control ({display})"],
+ )
+
+ vcs_name = type(vcs_backend).__name__
+
+ try:
+ req = vcs_backend.get_src_requirement(location, dist.raw_name)
+ except RemoteNotFoundError:
+ display = _format_as_name_version(dist)
+ return _EditableInfo(
+ requirement=location,
+ comments=[f"# Editable {vcs_name} install with no remote ({display})"],
+ )
+ except RemoteNotValidError as ex:
+ display = _format_as_name_version(dist)
+ return _EditableInfo(
+ requirement=location,
+ comments=[
+ f"# Editable {vcs_name} install ({display}) with either a deleted "
+ f"local remote or invalid URI:",
+ f"# '{ex.url}'",
+ ],
+ )
+ except BadCommand:
+ logger.warning(
+ "cannot determine version of editable source in %s "
+ "(%s command not found in path)",
+ location,
+ vcs_backend.name,
+ )
+ return _EditableInfo(requirement=location, comments=[])
+ except InstallationError as exc:
+ logger.warning("Error when trying to get requirement for VCS system %s", exc)
+ else:
+ return _EditableInfo(requirement=req, comments=[])
+
+ logger.warning("Could not determine repository location of %s", location)
+
+ return _EditableInfo(
+ requirement=location,
+ comments=["## !! Could not determine repository location"],
+ )
+
+
+class FrozenRequirement:
+ def __init__(
+ self,
+ name: str,
+ req: str,
+ editable: bool,
+ comments: Iterable[str] = (),
+ ) -> None:
+ self.name = name
+ self.canonical_name = canonicalize_name(name)
+ self.req = req
+ self.editable = editable
+ self.comments = comments
+
+ @classmethod
+ def from_dist(cls, dist: BaseDistribution) -> "FrozenRequirement":
+ editable = dist.editable
+ if editable:
+ req, comments = _get_editable_info(dist)
+ else:
+ comments = []
+ direct_url = dist.direct_url
+ if direct_url:
+ # if PEP 610 metadata is present, use it
+ req = direct_url_as_pep440_direct_reference(direct_url, dist.raw_name)
+ else:
+ # name==version requirement
+ req = _format_as_name_version(dist)
+
+ return cls(dist.raw_name, req, editable, comments=comments)
+
+ def __str__(self) -> str:
+ req = self.req
+ if self.editable:
+ req = f"-e {req}"
+ return "\n".join(list(self.comments) + [str(req)]) + "\n"
diff --git a/third_party/python/pip/pip/_internal/operations/install/__init__.py b/third_party/python/pip/pip/_internal/operations/install/__init__.py
new file mode 100644
index 0000000000..24d6a5dd31
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/operations/install/__init__.py
@@ -0,0 +1,2 @@
+"""For modules related to installing packages.
+"""
diff --git a/third_party/python/pip/pip/_internal/operations/install/editable_legacy.py b/third_party/python/pip/pip/_internal/operations/install/editable_legacy.py
new file mode 100644
index 0000000000..bb548cdca7
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/operations/install/editable_legacy.py
@@ -0,0 +1,47 @@
+"""Legacy editable installation process, i.e. `setup.py develop`.
+"""
+import logging
+from typing import List, Optional, Sequence
+
+from pip._internal.build_env import BuildEnvironment
+from pip._internal.utils.logging import indent_log
+from pip._internal.utils.setuptools_build import make_setuptools_develop_args
+from pip._internal.utils.subprocess import call_subprocess
+
+logger = logging.getLogger(__name__)
+
+
+def install_editable(
+ install_options: List[str],
+ global_options: Sequence[str],
+ prefix: Optional[str],
+ home: Optional[str],
+ use_user_site: bool,
+ name: str,
+ setup_py_path: str,
+ isolated: bool,
+ build_env: BuildEnvironment,
+ unpacked_source_directory: str,
+) -> None:
+ """Install a package in editable mode. Most arguments are pass-through
+ to setuptools.
+ """
+ logger.info("Running setup.py develop for %s", name)
+
+ args = make_setuptools_develop_args(
+ setup_py_path,
+ global_options=global_options,
+ install_options=install_options,
+ no_user_config=isolated,
+ prefix=prefix,
+ home=home,
+ use_user_site=use_user_site,
+ )
+
+ with indent_log():
+ with build_env:
+ call_subprocess(
+ args,
+ command_desc="python setup.py develop",
+ cwd=unpacked_source_directory,
+ )
diff --git a/third_party/python/pip/pip/_internal/operations/install/legacy.py b/third_party/python/pip/pip/_internal/operations/install/legacy.py
new file mode 100644
index 0000000000..290967dd6d
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/operations/install/legacy.py
@@ -0,0 +1,120 @@
+"""Legacy installation process, i.e. `setup.py install`.
+"""
+
+import logging
+import os
+from typing import List, Optional, Sequence
+
+from pip._internal.build_env import BuildEnvironment
+from pip._internal.exceptions import InstallationError, LegacyInstallFailure
+from pip._internal.locations.base import change_root
+from pip._internal.models.scheme import Scheme
+from pip._internal.utils.misc import ensure_dir
+from pip._internal.utils.setuptools_build import make_setuptools_install_args
+from pip._internal.utils.subprocess import runner_with_spinner_message
+from pip._internal.utils.temp_dir import TempDirectory
+
+logger = logging.getLogger(__name__)
+
+
+def write_installed_files_from_setuptools_record(
+ record_lines: List[str],
+ root: Optional[str],
+ req_description: str,
+) -> None:
+ def prepend_root(path: str) -> str:
+ if root is None or not os.path.isabs(path):
+ return path
+ else:
+ return change_root(root, path)
+
+ for line in record_lines:
+ directory = os.path.dirname(line)
+ if directory.endswith(".egg-info"):
+ egg_info_dir = prepend_root(directory)
+ break
+ else:
+ message = (
+ "{} did not indicate that it installed an "
+ ".egg-info directory. Only setup.py projects "
+ "generating .egg-info directories are supported."
+ ).format(req_description)
+ raise InstallationError(message)
+
+ new_lines = []
+ for line in record_lines:
+ filename = line.strip()
+ if os.path.isdir(filename):
+ filename += os.path.sep
+ new_lines.append(os.path.relpath(prepend_root(filename), egg_info_dir))
+ new_lines.sort()
+ ensure_dir(egg_info_dir)
+ inst_files_path = os.path.join(egg_info_dir, "installed-files.txt")
+ with open(inst_files_path, "w") as f:
+ f.write("\n".join(new_lines) + "\n")
+
+
+def install(
+ install_options: List[str],
+ global_options: Sequence[str],
+ root: Optional[str],
+ home: Optional[str],
+ prefix: Optional[str],
+ use_user_site: bool,
+ pycompile: bool,
+ scheme: Scheme,
+ setup_py_path: str,
+ isolated: bool,
+ req_name: str,
+ build_env: BuildEnvironment,
+ unpacked_source_directory: str,
+ req_description: str,
+) -> bool:
+
+ header_dir = scheme.headers
+
+ with TempDirectory(kind="record") as temp_dir:
+ try:
+ record_filename = os.path.join(temp_dir.path, "install-record.txt")
+ install_args = make_setuptools_install_args(
+ setup_py_path,
+ global_options=global_options,
+ install_options=install_options,
+ record_filename=record_filename,
+ root=root,
+ prefix=prefix,
+ header_dir=header_dir,
+ home=home,
+ use_user_site=use_user_site,
+ no_user_config=isolated,
+ pycompile=pycompile,
+ )
+
+ runner = runner_with_spinner_message(
+ f"Running setup.py install for {req_name}"
+ )
+ with build_env:
+ runner(
+ cmd=install_args,
+ cwd=unpacked_source_directory,
+ )
+
+ if not os.path.exists(record_filename):
+ logger.debug("Record file %s not found", record_filename)
+ # Signal to the caller that we didn't install the new package
+ return False
+
+ except Exception as e:
+ # Signal to the caller that we didn't install the new package
+ raise LegacyInstallFailure(package_details=req_name) from e
+
+ # At this point, we have successfully installed the requirement.
+
+ # We intentionally do not use any encoding to read the file because
+ # setuptools writes the file using distutils.file_util.write_file,
+ # which does not specify an encoding.
+ with open(record_filename) as f:
+ record_lines = f.read().splitlines()
+
+ write_installed_files_from_setuptools_record(record_lines, root, req_description)
+ return True
diff --git a/third_party/python/pip/pip/_internal/operations/install/wheel.py b/third_party/python/pip/pip/_internal/operations/install/wheel.py
new file mode 100644
index 0000000000..c79941398a
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/operations/install/wheel.py
@@ -0,0 +1,738 @@
+"""Support for installing and building the "wheel" binary package format.
+"""
+
+import collections
+import compileall
+import contextlib
+import csv
+import importlib
+import logging
+import os.path
+import re
+import shutil
+import sys
+import warnings
+from base64 import urlsafe_b64encode
+from email.message import Message
+from itertools import chain, filterfalse, starmap
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ BinaryIO,
+ Callable,
+ Dict,
+ Generator,
+ Iterable,
+ Iterator,
+ List,
+ NewType,
+ Optional,
+ Sequence,
+ Set,
+ Tuple,
+ Union,
+ cast,
+)
+from zipfile import ZipFile, ZipInfo
+
+from pip._vendor.distlib.scripts import ScriptMaker
+from pip._vendor.distlib.util import get_export_entry
+from pip._vendor.packaging.utils import canonicalize_name
+
+from pip._internal.exceptions import InstallationError
+from pip._internal.locations import get_major_minor_version
+from pip._internal.metadata import (
+ BaseDistribution,
+ FilesystemWheel,
+ get_wheel_distribution,
+)
+from pip._internal.models.direct_url import DIRECT_URL_METADATA_NAME, DirectUrl
+from pip._internal.models.scheme import SCHEME_KEYS, Scheme
+from pip._internal.utils.filesystem import adjacent_tmp_file, replace
+from pip._internal.utils.misc import captured_stdout, ensure_dir, hash_file, partition
+from pip._internal.utils.unpacking import (
+ current_umask,
+ is_within_directory,
+ set_extracted_file_to_default_mode_plus_executable,
+ zip_item_is_executable,
+)
+from pip._internal.utils.wheel import parse_wheel
+
+if TYPE_CHECKING:
+ from typing import Protocol
+
+ class File(Protocol):
+ src_record_path: "RecordPath"
+ dest_path: str
+ changed: bool
+
+ def save(self) -> None:
+ pass
+
+
+logger = logging.getLogger(__name__)
+
+RecordPath = NewType("RecordPath", str)
+InstalledCSVRow = Tuple[RecordPath, str, Union[int, str]]
+
+
+def rehash(path: str, blocksize: int = 1 << 20) -> Tuple[str, str]:
+ """Return (encoded_digest, length) for path using hashlib.sha256()"""
+ h, length = hash_file(path, blocksize)
+ digest = "sha256=" + urlsafe_b64encode(h.digest()).decode("latin1").rstrip("=")
+ return (digest, str(length))
+
+
+def csv_io_kwargs(mode: str) -> Dict[str, Any]:
+ """Return keyword arguments to properly open a CSV file
+ in the given mode.
+ """
+ return {"mode": mode, "newline": "", "encoding": "utf-8"}
+
+
+def fix_script(path: str) -> bool:
+ """Replace #!python with #!/path/to/python
+ Return True if file was changed.
+ """
+ # XXX RECORD hashes will need to be updated
+ assert os.path.isfile(path)
+
+ with open(path, "rb") as script:
+ firstline = script.readline()
+ if not firstline.startswith(b"#!python"):
+ return False
+ exename = sys.executable.encode(sys.getfilesystemencoding())
+ firstline = b"#!" + exename + os.linesep.encode("ascii")
+ rest = script.read()
+ with open(path, "wb") as script:
+ script.write(firstline)
+ script.write(rest)
+ return True
+
+
+def wheel_root_is_purelib(metadata: Message) -> bool:
+ return metadata.get("Root-Is-Purelib", "").lower() == "true"
+
+
+def get_entrypoints(dist: BaseDistribution) -> Tuple[Dict[str, str], Dict[str, str]]:
+ console_scripts = {}
+ gui_scripts = {}
+ for entry_point in dist.iter_entry_points():
+ if entry_point.group == "console_scripts":
+ console_scripts[entry_point.name] = entry_point.value
+ elif entry_point.group == "gui_scripts":
+ gui_scripts[entry_point.name] = entry_point.value
+ return console_scripts, gui_scripts
+
+
+def message_about_scripts_not_on_PATH(scripts: Sequence[str]) -> Optional[str]:
+ """Determine if any scripts are not on PATH and format a warning.
+ Returns a warning message if one or more scripts are not on PATH,
+ otherwise None.
+ """
+ if not scripts:
+ return None
+
+ # Group scripts by the path they were installed in
+ grouped_by_dir: Dict[str, Set[str]] = collections.defaultdict(set)
+ for destfile in scripts:
+ parent_dir = os.path.dirname(destfile)
+ script_name = os.path.basename(destfile)
+ grouped_by_dir[parent_dir].add(script_name)
+
+ # We don't want to warn for directories that are on PATH.
+ not_warn_dirs = [
+ os.path.normcase(i).rstrip(os.sep)
+ for i in os.environ.get("PATH", "").split(os.pathsep)
+ ]
+ # If an executable sits with sys.executable, we don't warn for it.
+ # This covers the case of venv invocations without activating the venv.
+ not_warn_dirs.append(os.path.normcase(os.path.dirname(sys.executable)))
+ warn_for: Dict[str, Set[str]] = {
+ parent_dir: scripts
+ for parent_dir, scripts in grouped_by_dir.items()
+ if os.path.normcase(parent_dir) not in not_warn_dirs
+ }
+ if not warn_for:
+ return None
+
+ # Format a message
+ msg_lines = []
+ for parent_dir, dir_scripts in warn_for.items():
+ sorted_scripts: List[str] = sorted(dir_scripts)
+ if len(sorted_scripts) == 1:
+ start_text = "script {} is".format(sorted_scripts[0])
+ else:
+ start_text = "scripts {} are".format(
+ ", ".join(sorted_scripts[:-1]) + " and " + sorted_scripts[-1]
+ )
+
+ msg_lines.append(
+ "The {} installed in '{}' which is not on PATH.".format(
+ start_text, parent_dir
+ )
+ )
+
+ last_line_fmt = (
+ "Consider adding {} to PATH or, if you prefer "
+ "to suppress this warning, use --no-warn-script-location."
+ )
+ if len(msg_lines) == 1:
+ msg_lines.append(last_line_fmt.format("this directory"))
+ else:
+ msg_lines.append(last_line_fmt.format("these directories"))
+
+ # Add a note if any directory starts with ~
+ warn_for_tilde = any(
+ i[0] == "~" for i in os.environ.get("PATH", "").split(os.pathsep) if i
+ )
+ if warn_for_tilde:
+ tilde_warning_msg = (
+ "NOTE: The current PATH contains path(s) starting with `~`, "
+ "which may not be expanded by all applications."
+ )
+ msg_lines.append(tilde_warning_msg)
+
+ # Returns the formatted multiline message
+ return "\n".join(msg_lines)
+
+
+def _normalized_outrows(
+ outrows: Iterable[InstalledCSVRow],
+) -> List[Tuple[str, str, str]]:
+ """Normalize the given rows of a RECORD file.
+
+ Items in each row are converted into str. Rows are then sorted to make
+ the value more predictable for tests.
+
+ Each row is a 3-tuple (path, hash, size) and corresponds to a record of
+ a RECORD file (see PEP 376 and PEP 427 for details). For the rows
+ passed to this function, the size can be an integer as an int or string,
+ or the empty string.
+ """
+ # Normally, there should only be one row per path, in which case the
+ # second and third elements don't come into play when sorting.
+ # However, in cases in the wild where a path might happen to occur twice,
+ # we don't want the sort operation to trigger an error (but still want
+ # determinism). Since the third element can be an int or string, we
+ # coerce each element to a string to avoid a TypeError in this case.
+ # For additional background, see--
+ # https://github.com/pypa/pip/issues/5868
+ return sorted(
+ (record_path, hash_, str(size)) for record_path, hash_, size in outrows
+ )
+
+
+def _record_to_fs_path(record_path: RecordPath, lib_dir: str) -> str:
+ return os.path.join(lib_dir, record_path)
+
+
+def _fs_to_record_path(path: str, lib_dir: str) -> RecordPath:
+ # On Windows, do not handle relative paths if they belong to different
+ # logical disks
+ if os.path.splitdrive(path)[0].lower() == os.path.splitdrive(lib_dir)[0].lower():
+ path = os.path.relpath(path, lib_dir)
+
+ path = path.replace(os.path.sep, "/")
+ return cast("RecordPath", path)
+
+
+def get_csv_rows_for_installed(
+ old_csv_rows: List[List[str]],
+ installed: Dict[RecordPath, RecordPath],
+ changed: Set[RecordPath],
+ generated: List[str],
+ lib_dir: str,
+) -> List[InstalledCSVRow]:
+ """
+ :param installed: A map from archive RECORD path to installation RECORD
+ path.
+ """
+ installed_rows: List[InstalledCSVRow] = []
+ for row in old_csv_rows:
+ if len(row) > 3:
+ logger.warning("RECORD line has more than three elements: %s", row)
+ old_record_path = cast("RecordPath", row[0])
+ new_record_path = installed.pop(old_record_path, old_record_path)
+ if new_record_path in changed:
+ digest, length = rehash(_record_to_fs_path(new_record_path, lib_dir))
+ else:
+ digest = row[1] if len(row) > 1 else ""
+ length = row[2] if len(row) > 2 else ""
+ installed_rows.append((new_record_path, digest, length))
+ for f in generated:
+ path = _fs_to_record_path(f, lib_dir)
+ digest, length = rehash(f)
+ installed_rows.append((path, digest, length))
+ for installed_record_path in installed.values():
+ installed_rows.append((installed_record_path, "", ""))
+ return installed_rows
+
+
+def get_console_script_specs(console: Dict[str, str]) -> List[str]:
+ """
+ Given the mapping from entrypoint name to callable, return the relevant
+ console script specs.
+ """
+ # Don't mutate caller's version
+ console = console.copy()
+
+ scripts_to_generate = []
+
+ # Special case pip and setuptools to generate versioned wrappers
+ #
+ # The issue is that some projects (specifically, pip and setuptools) use
+ # code in setup.py to create "versioned" entry points - pip2.7 on Python
+ # 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
+ # the wheel metadata at build time, and so if the wheel is installed with
+ # a *different* version of Python the entry points will be wrong. The
+ # correct fix for this is to enhance the metadata to be able to describe
+ # such versioned entry points, but that won't happen till Metadata 2.0 is
+ # available.
+ # In the meantime, projects using versioned entry points will either have
+ # incorrect versioned entry points, or they will not be able to distribute
+ # "universal" wheels (i.e., they will need a wheel per Python version).
+ #
+ # Because setuptools and pip are bundled with _ensurepip and virtualenv,
+ # we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
+ # override the versioned entry points in the wheel and generate the
+ # correct ones. This code is purely a short-term measure until Metadata 2.0
+ # is available.
+ #
+ # To add the level of hack in this section of code, in order to support
+ # ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
+ # variable which will control which version scripts get installed.
+ #
+ # ENSUREPIP_OPTIONS=altinstall
+ # - Only pipX.Y and easy_install-X.Y will be generated and installed
+ # ENSUREPIP_OPTIONS=install
+ # - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
+ # that this option is technically if ENSUREPIP_OPTIONS is set and is
+ # not altinstall
+ # DEFAULT
+ # - The default behavior is to install pip, pipX, pipX.Y, easy_install
+ # and easy_install-X.Y.
+ pip_script = console.pop("pip", None)
+ if pip_script:
+ if "ENSUREPIP_OPTIONS" not in os.environ:
+ scripts_to_generate.append("pip = " + pip_script)
+
+ if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
+ scripts_to_generate.append(
+ "pip{} = {}".format(sys.version_info[0], pip_script)
+ )
+
+ scripts_to_generate.append(f"pip{get_major_minor_version()} = {pip_script}")
+ # Delete any other versioned pip entry points
+ pip_ep = [k for k in console if re.match(r"pip(\d+(\.\d+)?)?$", k)]
+ for k in pip_ep:
+ del console[k]
+ easy_install_script = console.pop("easy_install", None)
+ if easy_install_script:
+ if "ENSUREPIP_OPTIONS" not in os.environ:
+ scripts_to_generate.append("easy_install = " + easy_install_script)
+
+ scripts_to_generate.append(
+ "easy_install-{} = {}".format(
+ get_major_minor_version(), easy_install_script
+ )
+ )
+ # Delete any other versioned easy_install entry points
+ easy_install_ep = [
+ k for k in console if re.match(r"easy_install(-\d+\.\d+)?$", k)
+ ]
+ for k in easy_install_ep:
+ del console[k]
+
+ # Generate the console entry points specified in the wheel
+ scripts_to_generate.extend(starmap("{} = {}".format, console.items()))
+
+ return scripts_to_generate
+
+
+class ZipBackedFile:
+ def __init__(
+ self, src_record_path: RecordPath, dest_path: str, zip_file: ZipFile
+ ) -> None:
+ self.src_record_path = src_record_path
+ self.dest_path = dest_path
+ self._zip_file = zip_file
+ self.changed = False
+
+ def _getinfo(self) -> ZipInfo:
+ return self._zip_file.getinfo(self.src_record_path)
+
+ def save(self) -> None:
+ # directory creation is lazy and after file filtering
+ # to ensure we don't install empty dirs; empty dirs can't be
+ # uninstalled.
+ parent_dir = os.path.dirname(self.dest_path)
+ ensure_dir(parent_dir)
+
+ # When we open the output file below, any existing file is truncated
+ # before we start writing the new contents. This is fine in most
+ # cases, but can cause a segfault if pip has loaded a shared
+ # object (e.g. from pyopenssl through its vendored urllib3)
+ # Since the shared object is mmap'd an attempt to call a
+ # symbol in it will then cause a segfault. Unlinking the file
+ # allows writing of new contents while allowing the process to
+ # continue to use the old copy.
+ if os.path.exists(self.dest_path):
+ os.unlink(self.dest_path)
+
+ zipinfo = self._getinfo()
+
+ with self._zip_file.open(zipinfo) as f:
+ with open(self.dest_path, "wb") as dest:
+ shutil.copyfileobj(f, dest)
+
+ if zip_item_is_executable(zipinfo):
+ set_extracted_file_to_default_mode_plus_executable(self.dest_path)
+
+
+class ScriptFile:
+ def __init__(self, file: "File") -> None:
+ self._file = file
+ self.src_record_path = self._file.src_record_path
+ self.dest_path = self._file.dest_path
+ self.changed = False
+
+ def save(self) -> None:
+ self._file.save()
+ self.changed = fix_script(self.dest_path)
+
+
+class MissingCallableSuffix(InstallationError):
+ def __init__(self, entry_point: str) -> None:
+ super().__init__(
+ "Invalid script entry point: {} - A callable "
+ "suffix is required. Cf https://packaging.python.org/"
+ "specifications/entry-points/#use-for-scripts for more "
+ "information.".format(entry_point)
+ )
+
+
+def _raise_for_invalid_entrypoint(specification: str) -> None:
+ entry = get_export_entry(specification)
+ if entry is not None and entry.suffix is None:
+ raise MissingCallableSuffix(str(entry))
+
+
+class PipScriptMaker(ScriptMaker):
+ def make(
+ self, specification: str, options: Optional[Dict[str, Any]] = None
+ ) -> List[str]:
+ _raise_for_invalid_entrypoint(specification)
+ return super().make(specification, options)
+
+
+def _install_wheel(
+ name: str,
+ wheel_zip: ZipFile,
+ wheel_path: str,
+ scheme: Scheme,
+ pycompile: bool = True,
+ warn_script_location: bool = True,
+ direct_url: Optional[DirectUrl] = None,
+ requested: bool = False,
+) -> None:
+ """Install a wheel.
+
+ :param name: Name of the project to install
+ :param wheel_zip: open ZipFile for wheel being installed
+ :param scheme: Distutils scheme dictating the install directories
+ :param req_description: String used in place of the requirement, for
+ logging
+ :param pycompile: Whether to byte-compile installed Python files
+ :param warn_script_location: Whether to check that scripts are installed
+ into a directory on PATH
+ :raises UnsupportedWheel:
+ * when the directory holds an unpacked wheel with incompatible
+ Wheel-Version
+ * when the .dist-info dir does not match the wheel
+ """
+ info_dir, metadata = parse_wheel(wheel_zip, name)
+
+ if wheel_root_is_purelib(metadata):
+ lib_dir = scheme.purelib
+ else:
+ lib_dir = scheme.platlib
+
+ # Record details of the files moved
+ # installed = files copied from the wheel to the destination
+ # changed = files changed while installing (scripts #! line typically)
+ # generated = files newly generated during the install (script wrappers)
+ installed: Dict[RecordPath, RecordPath] = {}
+ changed: Set[RecordPath] = set()
+ generated: List[str] = []
+
+ def record_installed(
+ srcfile: RecordPath, destfile: str, modified: bool = False
+ ) -> None:
+ """Map archive RECORD paths to installation RECORD paths."""
+ newpath = _fs_to_record_path(destfile, lib_dir)
+ installed[srcfile] = newpath
+ if modified:
+ changed.add(newpath)
+
+ def is_dir_path(path: RecordPath) -> bool:
+ return path.endswith("/")
+
+ def assert_no_path_traversal(dest_dir_path: str, target_path: str) -> None:
+ if not is_within_directory(dest_dir_path, target_path):
+ message = (
+ "The wheel {!r} has a file {!r} trying to install"
+ " outside the target directory {!r}"
+ )
+ raise InstallationError(
+ message.format(wheel_path, target_path, dest_dir_path)
+ )
+
+ def root_scheme_file_maker(
+ zip_file: ZipFile, dest: str
+ ) -> Callable[[RecordPath], "File"]:
+ def make_root_scheme_file(record_path: RecordPath) -> "File":
+ normed_path = os.path.normpath(record_path)
+ dest_path = os.path.join(dest, normed_path)
+ assert_no_path_traversal(dest, dest_path)
+ return ZipBackedFile(record_path, dest_path, zip_file)
+
+ return make_root_scheme_file
+
+ def data_scheme_file_maker(
+ zip_file: ZipFile, scheme: Scheme
+ ) -> Callable[[RecordPath], "File"]:
+ scheme_paths = {key: getattr(scheme, key) for key in SCHEME_KEYS}
+
+ def make_data_scheme_file(record_path: RecordPath) -> "File":
+ normed_path = os.path.normpath(record_path)
+ try:
+ _, scheme_key, dest_subpath = normed_path.split(os.path.sep, 2)
+ except ValueError:
+ message = (
+ "Unexpected file in {}: {!r}. .data directory contents"
+ " should be named like: '<scheme key>/<path>'."
+ ).format(wheel_path, record_path)
+ raise InstallationError(message)
+
+ try:
+ scheme_path = scheme_paths[scheme_key]
+ except KeyError:
+ valid_scheme_keys = ", ".join(sorted(scheme_paths))
+ message = (
+ "Unknown scheme key used in {}: {} (for file {!r}). .data"
+ " directory contents should be in subdirectories named"
+ " with a valid scheme key ({})"
+ ).format(wheel_path, scheme_key, record_path, valid_scheme_keys)
+ raise InstallationError(message)
+
+ dest_path = os.path.join(scheme_path, dest_subpath)
+ assert_no_path_traversal(scheme_path, dest_path)
+ return ZipBackedFile(record_path, dest_path, zip_file)
+
+ return make_data_scheme_file
+
+ def is_data_scheme_path(path: RecordPath) -> bool:
+ return path.split("/", 1)[0].endswith(".data")
+
+ paths = cast(List[RecordPath], wheel_zip.namelist())
+ file_paths = filterfalse(is_dir_path, paths)
+ root_scheme_paths, data_scheme_paths = partition(is_data_scheme_path, file_paths)
+
+ make_root_scheme_file = root_scheme_file_maker(wheel_zip, lib_dir)
+ files: Iterator[File] = map(make_root_scheme_file, root_scheme_paths)
+
+ def is_script_scheme_path(path: RecordPath) -> bool:
+ parts = path.split("/", 2)
+ return len(parts) > 2 and parts[0].endswith(".data") and parts[1] == "scripts"
+
+ other_scheme_paths, script_scheme_paths = partition(
+ is_script_scheme_path, data_scheme_paths
+ )
+
+ make_data_scheme_file = data_scheme_file_maker(wheel_zip, scheme)
+ other_scheme_files = map(make_data_scheme_file, other_scheme_paths)
+ files = chain(files, other_scheme_files)
+
+ # Get the defined entry points
+ distribution = get_wheel_distribution(
+ FilesystemWheel(wheel_path),
+ canonicalize_name(name),
+ )
+ console, gui = get_entrypoints(distribution)
+
+ def is_entrypoint_wrapper(file: "File") -> bool:
+ # EP, EP.exe and EP-script.py are scripts generated for
+ # entry point EP by setuptools
+ path = file.dest_path
+ name = os.path.basename(path)
+ if name.lower().endswith(".exe"):
+ matchname = name[:-4]
+ elif name.lower().endswith("-script.py"):
+ matchname = name[:-10]
+ elif name.lower().endswith(".pya"):
+ matchname = name[:-4]
+ else:
+ matchname = name
+ # Ignore setuptools-generated scripts
+ return matchname in console or matchname in gui
+
+ script_scheme_files: Iterator[File] = map(
+ make_data_scheme_file, script_scheme_paths
+ )
+ script_scheme_files = filterfalse(is_entrypoint_wrapper, script_scheme_files)
+ script_scheme_files = map(ScriptFile, script_scheme_files)
+ files = chain(files, script_scheme_files)
+
+ for file in files:
+ file.save()
+ record_installed(file.src_record_path, file.dest_path, file.changed)
+
+ def pyc_source_file_paths() -> Generator[str, None, None]:
+ # We de-duplicate installation paths, since there can be overlap (e.g.
+ # file in .data maps to same location as file in wheel root).
+ # Sorting installation paths makes it easier to reproduce and debug
+ # issues related to permissions on existing files.
+ for installed_path in sorted(set(installed.values())):
+ full_installed_path = os.path.join(lib_dir, installed_path)
+ if not os.path.isfile(full_installed_path):
+ continue
+ if not full_installed_path.endswith(".py"):
+ continue
+ yield full_installed_path
+
+ def pyc_output_path(path: str) -> str:
+ """Return the path the pyc file would have been written to."""
+ return importlib.util.cache_from_source(path)
+
+ # Compile all of the pyc files for the installed files
+ if pycompile:
+ with captured_stdout() as stdout:
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore")
+ for path in pyc_source_file_paths():
+ success = compileall.compile_file(path, force=True, quiet=True)
+ if success:
+ pyc_path = pyc_output_path(path)
+ assert os.path.exists(pyc_path)
+ pyc_record_path = cast(
+ "RecordPath", pyc_path.replace(os.path.sep, "/")
+ )
+ record_installed(pyc_record_path, pyc_path)
+ logger.debug(stdout.getvalue())
+
+ maker = PipScriptMaker(None, scheme.scripts)
+
+ # Ensure old scripts are overwritten.
+ # See https://github.com/pypa/pip/issues/1800
+ maker.clobber = True
+
+ # Ensure we don't generate any variants for scripts because this is almost
+ # never what somebody wants.
+ # See https://bitbucket.org/pypa/distlib/issue/35/
+ maker.variants = {""}
+
+ # This is required because otherwise distlib creates scripts that are not
+ # executable.
+ # See https://bitbucket.org/pypa/distlib/issue/32/
+ maker.set_mode = True
+
+ # Generate the console and GUI entry points specified in the wheel
+ scripts_to_generate = get_console_script_specs(console)
+
+ gui_scripts_to_generate = list(starmap("{} = {}".format, gui.items()))
+
+ generated_console_scripts = maker.make_multiple(scripts_to_generate)
+ generated.extend(generated_console_scripts)
+
+ generated.extend(maker.make_multiple(gui_scripts_to_generate, {"gui": True}))
+
+ if warn_script_location:
+ msg = message_about_scripts_not_on_PATH(generated_console_scripts)
+ if msg is not None:
+ logger.warning(msg)
+
+ generated_file_mode = 0o666 & ~current_umask()
+
+ @contextlib.contextmanager
+ def _generate_file(path: str, **kwargs: Any) -> Generator[BinaryIO, None, None]:
+ with adjacent_tmp_file(path, **kwargs) as f:
+ yield f
+ os.chmod(f.name, generated_file_mode)
+ replace(f.name, path)
+
+ dest_info_dir = os.path.join(lib_dir, info_dir)
+
+ # Record pip as the installer
+ installer_path = os.path.join(dest_info_dir, "INSTALLER")
+ with _generate_file(installer_path) as installer_file:
+ installer_file.write(b"pip\n")
+ generated.append(installer_path)
+
+ # Record the PEP 610 direct URL reference
+ if direct_url is not None:
+ direct_url_path = os.path.join(dest_info_dir, DIRECT_URL_METADATA_NAME)
+ with _generate_file(direct_url_path) as direct_url_file:
+ direct_url_file.write(direct_url.to_json().encode("utf-8"))
+ generated.append(direct_url_path)
+
+ # Record the REQUESTED file
+ if requested:
+ requested_path = os.path.join(dest_info_dir, "REQUESTED")
+ with open(requested_path, "wb"):
+ pass
+ generated.append(requested_path)
+
+ record_text = distribution.read_text("RECORD")
+ record_rows = list(csv.reader(record_text.splitlines()))
+
+ rows = get_csv_rows_for_installed(
+ record_rows,
+ installed=installed,
+ changed=changed,
+ generated=generated,
+ lib_dir=lib_dir,
+ )
+
+ # Record details of all files installed
+ record_path = os.path.join(dest_info_dir, "RECORD")
+
+ with _generate_file(record_path, **csv_io_kwargs("w")) as record_file:
+ # Explicitly cast to typing.IO[str] as a workaround for the mypy error:
+ # "writer" has incompatible type "BinaryIO"; expected "_Writer"
+ writer = csv.writer(cast("IO[str]", record_file))
+ writer.writerows(_normalized_outrows(rows))
+
+
+@contextlib.contextmanager
+def req_error_context(req_description: str) -> Generator[None, None, None]:
+ try:
+ yield
+ except InstallationError as e:
+ message = "For req: {}. {}".format(req_description, e.args[0])
+ raise InstallationError(message) from e
+
+
+def install_wheel(
+ name: str,
+ wheel_path: str,
+ scheme: Scheme,
+ req_description: str,
+ pycompile: bool = True,
+ warn_script_location: bool = True,
+ direct_url: Optional[DirectUrl] = None,
+ requested: bool = False,
+) -> None:
+ with ZipFile(wheel_path, allowZip64=True) as z:
+ with req_error_context(req_description):
+ _install_wheel(
+ name=name,
+ wheel_zip=z,
+ wheel_path=wheel_path,
+ scheme=scheme,
+ pycompile=pycompile,
+ warn_script_location=warn_script_location,
+ direct_url=direct_url,
+ requested=requested,
+ )
diff --git a/third_party/python/pip/pip/_internal/operations/prepare.py b/third_party/python/pip/pip/_internal/operations/prepare.py
new file mode 100644
index 0000000000..4bf414cb00
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/operations/prepare.py
@@ -0,0 +1,667 @@
+"""Prepares a distribution for installation
+"""
+
+# The following comment should be removed at some point in the future.
+# mypy: strict-optional=False
+
+import logging
+import mimetypes
+import os
+import shutil
+from typing import Dict, Iterable, List, Optional
+
+from pip._vendor.packaging.utils import canonicalize_name
+
+from pip._internal.distributions import make_distribution_for_install_requirement
+from pip._internal.distributions.installed import InstalledDistribution
+from pip._internal.exceptions import (
+ DirectoryUrlHashUnsupported,
+ HashMismatch,
+ HashUnpinned,
+ InstallationError,
+ MetadataInconsistent,
+ NetworkConnectionError,
+ PreviousBuildDirError,
+ VcsHashUnsupported,
+)
+from pip._internal.index.package_finder import PackageFinder
+from pip._internal.metadata import BaseDistribution, get_metadata_distribution
+from pip._internal.models.direct_url import ArchiveInfo
+from pip._internal.models.link import Link
+from pip._internal.models.wheel import Wheel
+from pip._internal.network.download import BatchDownloader, Downloader
+from pip._internal.network.lazy_wheel import (
+ HTTPRangeRequestUnsupported,
+ dist_from_wheel_url,
+)
+from pip._internal.network.session import PipSession
+from pip._internal.operations.build.build_tracker import BuildTracker
+from pip._internal.req.req_install import InstallRequirement
+from pip._internal.utils.direct_url_helpers import (
+ direct_url_for_editable,
+ direct_url_from_link,
+)
+from pip._internal.utils.hashes import Hashes, MissingHashes
+from pip._internal.utils.logging import indent_log
+from pip._internal.utils.misc import (
+ display_path,
+ hash_file,
+ hide_url,
+ is_installable_dir,
+)
+from pip._internal.utils.temp_dir import TempDirectory
+from pip._internal.utils.unpacking import unpack_file
+from pip._internal.vcs import vcs
+
+logger = logging.getLogger(__name__)
+
+
+def _get_prepared_distribution(
+ req: InstallRequirement,
+ build_tracker: BuildTracker,
+ finder: PackageFinder,
+ build_isolation: bool,
+ check_build_deps: bool,
+) -> BaseDistribution:
+ """Prepare a distribution for installation."""
+ abstract_dist = make_distribution_for_install_requirement(req)
+ with build_tracker.track(req):
+ abstract_dist.prepare_distribution_metadata(
+ finder, build_isolation, check_build_deps
+ )
+ return abstract_dist.get_metadata_distribution()
+
+
+def unpack_vcs_link(link: Link, location: str, verbosity: int) -> None:
+ vcs_backend = vcs.get_backend_for_scheme(link.scheme)
+ assert vcs_backend is not None
+ vcs_backend.unpack(location, url=hide_url(link.url), verbosity=verbosity)
+
+
+class File:
+ def __init__(self, path: str, content_type: Optional[str]) -> None:
+ self.path = path
+ if content_type is None:
+ self.content_type = mimetypes.guess_type(path)[0]
+ else:
+ self.content_type = content_type
+
+
+def get_http_url(
+ link: Link,
+ download: Downloader,
+ download_dir: Optional[str] = None,
+ hashes: Optional[Hashes] = None,
+) -> File:
+ temp_dir = TempDirectory(kind="unpack", globally_managed=True)
+ # If a download dir is specified, is the file already downloaded there?
+ already_downloaded_path = None
+ if download_dir:
+ already_downloaded_path = _check_download_dir(link, download_dir, hashes)
+
+ if already_downloaded_path:
+ from_path = already_downloaded_path
+ content_type = None
+ else:
+ # let's download to a tmp dir
+ from_path, content_type = download(link, temp_dir.path)
+ if hashes:
+ hashes.check_against_path(from_path)
+
+ return File(from_path, content_type)
+
+
+def get_file_url(
+ link: Link, download_dir: Optional[str] = None, hashes: Optional[Hashes] = None
+) -> File:
+ """Get file and optionally check its hash."""
+ # If a download dir is specified, is the file already there and valid?
+ already_downloaded_path = None
+ if download_dir:
+ already_downloaded_path = _check_download_dir(link, download_dir, hashes)
+
+ if already_downloaded_path:
+ from_path = already_downloaded_path
+ else:
+ from_path = link.file_path
+
+ # If --require-hashes is off, `hashes` is either empty, the
+ # link's embedded hash, or MissingHashes; it is required to
+ # match. If --require-hashes is on, we are satisfied by any
+ # hash in `hashes` matching: a URL-based or an option-based
+ # one; no internet-sourced hash will be in `hashes`.
+ if hashes:
+ hashes.check_against_path(from_path)
+ return File(from_path, None)
+
+
+def unpack_url(
+ link: Link,
+ location: str,
+ download: Downloader,
+ verbosity: int,
+ download_dir: Optional[str] = None,
+ hashes: Optional[Hashes] = None,
+) -> Optional[File]:
+ """Unpack link into location, downloading if required.
+
+ :param hashes: A Hashes object, one of whose embedded hashes must match,
+ or HashMismatch will be raised. If the Hashes is empty, no matches are
+ required, and unhashable types of requirements (like VCS ones, which
+ would ordinarily raise HashUnsupported) are allowed.
+ """
+ # non-editable vcs urls
+ if link.is_vcs:
+ unpack_vcs_link(link, location, verbosity=verbosity)
+ return None
+
+ assert not link.is_existing_dir()
+
+ # file urls
+ if link.is_file:
+ file = get_file_url(link, download_dir, hashes=hashes)
+
+ # http urls
+ else:
+ file = get_http_url(
+ link,
+ download,
+ download_dir,
+ hashes=hashes,
+ )
+
+ # unpack the archive to the build dir location. even when only downloading
+ # archives, they have to be unpacked to parse dependencies, except wheels
+ if not link.is_wheel:
+ unpack_file(file.path, location, file.content_type)
+
+ return file
+
+
+def _check_download_dir(
+ link: Link, download_dir: str, hashes: Optional[Hashes]
+) -> Optional[str]:
+ """Check download_dir for previously downloaded file with correct hash
+ If a correct file is found return its path else None
+ """
+ download_path = os.path.join(download_dir, link.filename)
+
+ if not os.path.exists(download_path):
+ return None
+
+ # If already downloaded, does its hash match?
+ logger.info("File was already downloaded %s", download_path)
+ if hashes:
+ try:
+ hashes.check_against_path(download_path)
+ except HashMismatch:
+ logger.warning(
+ "Previously-downloaded file %s has bad hash. Re-downloading.",
+ download_path,
+ )
+ os.unlink(download_path)
+ return None
+ return download_path
+
+
+class RequirementPreparer:
+ """Prepares a Requirement"""
+
+ def __init__(
+ self,
+ build_dir: str,
+ download_dir: Optional[str],
+ src_dir: str,
+ build_isolation: bool,
+ check_build_deps: bool,
+ build_tracker: BuildTracker,
+ session: PipSession,
+ progress_bar: str,
+ finder: PackageFinder,
+ require_hashes: bool,
+ use_user_site: bool,
+ lazy_wheel: bool,
+ verbosity: int,
+ ) -> None:
+ super().__init__()
+
+ self.src_dir = src_dir
+ self.build_dir = build_dir
+ self.build_tracker = build_tracker
+ self._session = session
+ self._download = Downloader(session, progress_bar)
+ self._batch_download = BatchDownloader(session, progress_bar)
+ self.finder = finder
+
+ # Where still-packed archives should be written to. If None, they are
+ # not saved, and are deleted immediately after unpacking.
+ self.download_dir = download_dir
+
+ # Is build isolation allowed?
+ self.build_isolation = build_isolation
+
+ # Should check build dependencies?
+ self.check_build_deps = check_build_deps
+
+ # Should hash-checking be required?
+ self.require_hashes = require_hashes
+
+ # Should install in user site-packages?
+ self.use_user_site = use_user_site
+
+ # Should wheels be downloaded lazily?
+ self.use_lazy_wheel = lazy_wheel
+
+ # How verbose should underlying tooling be?
+ self.verbosity = verbosity
+
+ # Memoized downloaded files, as mapping of url: path.
+ self._downloaded: Dict[str, str] = {}
+
+ # Previous "header" printed for a link-based InstallRequirement
+ self._previous_requirement_header = ("", "")
+
+ def _log_preparing_link(self, req: InstallRequirement) -> None:
+ """Provide context for the requirement being prepared."""
+ if req.link.is_file and not req.original_link_is_in_wheel_cache:
+ message = "Processing %s"
+ information = str(display_path(req.link.file_path))
+ else:
+ message = "Collecting %s"
+ information = str(req.req or req)
+
+ if (message, information) != self._previous_requirement_header:
+ self._previous_requirement_header = (message, information)
+ logger.info(message, information)
+
+ if req.original_link_is_in_wheel_cache:
+ with indent_log():
+ logger.info("Using cached %s", req.link.filename)
+
+ def _ensure_link_req_src_dir(
+ self, req: InstallRequirement, parallel_builds: bool
+ ) -> None:
+ """Ensure source_dir of a linked InstallRequirement."""
+ # Since source_dir is only set for editable requirements.
+ if req.link.is_wheel:
+ # We don't need to unpack wheels, so no need for a source
+ # directory.
+ return
+ assert req.source_dir is None
+ if req.link.is_existing_dir():
+ # build local directories in-tree
+ req.source_dir = req.link.file_path
+ return
+
+ # We always delete unpacked sdists after pip runs.
+ req.ensure_has_source_dir(
+ self.build_dir,
+ autodelete=True,
+ parallel_builds=parallel_builds,
+ )
+
+ # If a checkout exists, it's unwise to keep going. version
+ # inconsistencies are logged later, but do not fail the
+ # installation.
+ # FIXME: this won't upgrade when there's an existing
+ # package unpacked in `req.source_dir`
+ # TODO: this check is now probably dead code
+ if is_installable_dir(req.source_dir):
+ raise PreviousBuildDirError(
+ "pip can't proceed with requirements '{}' due to a"
+ "pre-existing build directory ({}). This is likely "
+ "due to a previous installation that failed . pip is "
+ "being responsible and not assuming it can delete this. "
+ "Please delete it and try again.".format(req, req.source_dir)
+ )
+
+ def _get_linked_req_hashes(self, req: InstallRequirement) -> Hashes:
+ # By the time this is called, the requirement's link should have
+ # been checked so we can tell what kind of requirements req is
+ # and raise some more informative errors than otherwise.
+ # (For example, we can raise VcsHashUnsupported for a VCS URL
+ # rather than HashMissing.)
+ if not self.require_hashes:
+ return req.hashes(trust_internet=True)
+
+ # We could check these first 2 conditions inside unpack_url
+ # and save repetition of conditions, but then we would
+ # report less-useful error messages for unhashable
+ # requirements, complaining that there's no hash provided.
+ if req.link.is_vcs:
+ raise VcsHashUnsupported()
+ if req.link.is_existing_dir():
+ raise DirectoryUrlHashUnsupported()
+
+ # Unpinned packages are asking for trouble when a new version
+ # is uploaded. This isn't a security check, but it saves users
+ # a surprising hash mismatch in the future.
+ # file:/// URLs aren't pinnable, so don't complain about them
+ # not being pinned.
+ if req.original_link is None and not req.is_pinned:
+ raise HashUnpinned()
+
+ # If known-good hashes are missing for this requirement,
+ # shim it with a facade object that will provoke hash
+ # computation and then raise a HashMissing exception
+ # showing the user what the hash should be.
+ return req.hashes(trust_internet=False) or MissingHashes()
+
+ def _fetch_metadata_only(
+ self,
+ req: InstallRequirement,
+ ) -> Optional[BaseDistribution]:
+ if self.require_hashes:
+ logger.debug(
+ "Metadata-only fetching is not used as hash checking is required",
+ )
+ return None
+ # Try PEP 658 metadata first, then fall back to lazy wheel if unavailable.
+ return self._fetch_metadata_using_link_data_attr(
+ req
+ ) or self._fetch_metadata_using_lazy_wheel(req.link)
+
+ def _fetch_metadata_using_link_data_attr(
+ self,
+ req: InstallRequirement,
+ ) -> Optional[BaseDistribution]:
+ """Fetch metadata from the data-dist-info-metadata attribute, if possible."""
+ # (1) Get the link to the metadata file, if provided by the backend.
+ metadata_link = req.link.metadata_link()
+ if metadata_link is None:
+ return None
+ assert req.req is not None
+ logger.info(
+ "Obtaining dependency information for %s from %s",
+ req.req,
+ metadata_link,
+ )
+ # (2) Download the contents of the METADATA file, separate from the dist itself.
+ metadata_file = get_http_url(
+ metadata_link,
+ self._download,
+ hashes=metadata_link.as_hashes(),
+ )
+ with open(metadata_file.path, "rb") as f:
+ metadata_contents = f.read()
+ # (3) Generate a dist just from those file contents.
+ metadata_dist = get_metadata_distribution(
+ metadata_contents,
+ req.link.filename,
+ req.req.name,
+ )
+ # (4) Ensure the Name: field from the METADATA file matches the name from the
+ # install requirement.
+ #
+ # NB: raw_name will fall back to the name from the install requirement if
+ # the Name: field is not present, but it's noted in the raw_name docstring
+ # that that should NEVER happen anyway.
+ if metadata_dist.raw_name != req.req.name:
+ raise MetadataInconsistent(
+ req, "Name", req.req.name, metadata_dist.raw_name
+ )
+ return metadata_dist
+
+ def _fetch_metadata_using_lazy_wheel(
+ self,
+ link: Link,
+ ) -> Optional[BaseDistribution]:
+ """Fetch metadata using lazy wheel, if possible."""
+ # --use-feature=fast-deps must be provided.
+ if not self.use_lazy_wheel:
+ return None
+ if link.is_file or not link.is_wheel:
+ logger.debug(
+ "Lazy wheel is not used as %r does not point to a remote wheel",
+ link,
+ )
+ return None
+
+ wheel = Wheel(link.filename)
+ name = canonicalize_name(wheel.name)
+ logger.info(
+ "Obtaining dependency information from %s %s",
+ name,
+ wheel.version,
+ )
+ url = link.url.split("#", 1)[0]
+ try:
+ return dist_from_wheel_url(name, url, self._session)
+ except HTTPRangeRequestUnsupported:
+ logger.debug("%s does not support range requests", url)
+ return None
+
+ def _complete_partial_requirements(
+ self,
+ partially_downloaded_reqs: Iterable[InstallRequirement],
+ parallel_builds: bool = False,
+ ) -> None:
+ """Download any requirements which were only fetched by metadata."""
+ # Download to a temporary directory. These will be copied over as
+ # needed for downstream 'download', 'wheel', and 'install' commands.
+ temp_dir = TempDirectory(kind="unpack", globally_managed=True).path
+
+ # Map each link to the requirement that owns it. This allows us to set
+ # `req.local_file_path` on the appropriate requirement after passing
+ # all the links at once into BatchDownloader.
+ links_to_fully_download: Dict[Link, InstallRequirement] = {}
+ for req in partially_downloaded_reqs:
+ assert req.link
+ links_to_fully_download[req.link] = req
+
+ batch_download = self._batch_download(
+ links_to_fully_download.keys(),
+ temp_dir,
+ )
+ for link, (filepath, _) in batch_download:
+ logger.debug("Downloading link %s to %s", link, filepath)
+ req = links_to_fully_download[link]
+ req.local_file_path = filepath
+
+ # This step is necessary to ensure all lazy wheels are processed
+ # successfully by the 'download', 'wheel', and 'install' commands.
+ for req in partially_downloaded_reqs:
+ self._prepare_linked_requirement(req, parallel_builds)
+
+ def prepare_linked_requirement(
+ self, req: InstallRequirement, parallel_builds: bool = False
+ ) -> BaseDistribution:
+ """Prepare a requirement to be obtained from req.link."""
+ assert req.link
+ self._log_preparing_link(req)
+ with indent_log():
+ # Check if the relevant file is already available
+ # in the download directory
+ file_path = None
+ if self.download_dir is not None and req.link.is_wheel:
+ hashes = self._get_linked_req_hashes(req)
+ file_path = _check_download_dir(req.link, self.download_dir, hashes)
+
+ if file_path is not None:
+ # The file is already available, so mark it as downloaded
+ self._downloaded[req.link.url] = file_path
+ else:
+ # The file is not available, attempt to fetch only metadata
+ metadata_dist = self._fetch_metadata_only(req)
+ if metadata_dist is not None:
+ req.needs_more_preparation = True
+ return metadata_dist
+
+ # None of the optimizations worked, fully prepare the requirement
+ return self._prepare_linked_requirement(req, parallel_builds)
+
+ def prepare_linked_requirements_more(
+ self, reqs: Iterable[InstallRequirement], parallel_builds: bool = False
+ ) -> None:
+ """Prepare linked requirements more, if needed."""
+ reqs = [req for req in reqs if req.needs_more_preparation]
+ for req in reqs:
+ # Determine if any of these requirements were already downloaded.
+ if self.download_dir is not None and req.link.is_wheel:
+ hashes = self._get_linked_req_hashes(req)
+ file_path = _check_download_dir(req.link, self.download_dir, hashes)
+ if file_path is not None:
+ self._downloaded[req.link.url] = file_path
+ req.needs_more_preparation = False
+
+ # Prepare requirements we found were already downloaded for some
+ # reason. The other downloads will be completed separately.
+ partially_downloaded_reqs: List[InstallRequirement] = []
+ for req in reqs:
+ if req.needs_more_preparation:
+ partially_downloaded_reqs.append(req)
+ else:
+ self._prepare_linked_requirement(req, parallel_builds)
+
+ # TODO: separate this part out from RequirementPreparer when the v1
+ # resolver can be removed!
+ self._complete_partial_requirements(
+ partially_downloaded_reqs,
+ parallel_builds=parallel_builds,
+ )
+
+ def _prepare_linked_requirement(
+ self, req: InstallRequirement, parallel_builds: bool
+ ) -> BaseDistribution:
+ assert req.link
+ link = req.link
+
+ self._ensure_link_req_src_dir(req, parallel_builds)
+ hashes = self._get_linked_req_hashes(req)
+
+ if link.is_existing_dir():
+ local_file = None
+ elif link.url not in self._downloaded:
+ try:
+ local_file = unpack_url(
+ link,
+ req.source_dir,
+ self._download,
+ self.verbosity,
+ self.download_dir,
+ hashes,
+ )
+ except NetworkConnectionError as exc:
+ raise InstallationError(
+ "Could not install requirement {} because of HTTP "
+ "error {} for URL {}".format(req, exc, link)
+ )
+ else:
+ file_path = self._downloaded[link.url]
+ if hashes:
+ hashes.check_against_path(file_path)
+ local_file = File(file_path, content_type=None)
+
+ # If download_info is set, we got it from the wheel cache.
+ if req.download_info is None:
+ # Editables don't go through this function (see
+ # prepare_editable_requirement).
+ assert not req.editable
+ req.download_info = direct_url_from_link(link, req.source_dir)
+ # Make sure we have a hash in download_info. If we got it as part of the
+ # URL, it will have been verified and we can rely on it. Otherwise we
+ # compute it from the downloaded file.
+ if (
+ isinstance(req.download_info.info, ArchiveInfo)
+ and not req.download_info.info.hash
+ and local_file
+ ):
+ hash = hash_file(local_file.path)[0].hexdigest()
+ req.download_info.info.hash = f"sha256={hash}"
+
+ # For use in later processing,
+ # preserve the file path on the requirement.
+ if local_file:
+ req.local_file_path = local_file.path
+
+ dist = _get_prepared_distribution(
+ req,
+ self.build_tracker,
+ self.finder,
+ self.build_isolation,
+ self.check_build_deps,
+ )
+ return dist
+
+ def save_linked_requirement(self, req: InstallRequirement) -> None:
+ assert self.download_dir is not None
+ assert req.link is not None
+ link = req.link
+ if link.is_vcs or (link.is_existing_dir() and req.editable):
+ # Make a .zip of the source_dir we already created.
+ req.archive(self.download_dir)
+ return
+
+ if link.is_existing_dir():
+ logger.debug(
+ "Not copying link to destination directory "
+ "since it is a directory: %s",
+ link,
+ )
+ return
+ if req.local_file_path is None:
+ # No distribution was downloaded for this requirement.
+ return
+
+ download_location = os.path.join(self.download_dir, link.filename)
+ if not os.path.exists(download_location):
+ shutil.copy(req.local_file_path, download_location)
+ download_path = display_path(download_location)
+ logger.info("Saved %s", download_path)
+
+ def prepare_editable_requirement(
+ self,
+ req: InstallRequirement,
+ ) -> BaseDistribution:
+ """Prepare an editable requirement."""
+ assert req.editable, "cannot prepare a non-editable req as editable"
+
+ logger.info("Obtaining %s", req)
+
+ with indent_log():
+ if self.require_hashes:
+ raise InstallationError(
+ "The editable requirement {} cannot be installed when "
+ "requiring hashes, because there is no single file to "
+ "hash.".format(req)
+ )
+ req.ensure_has_source_dir(self.src_dir)
+ req.update_editable()
+ assert req.source_dir
+ req.download_info = direct_url_for_editable(req.unpacked_source_directory)
+
+ dist = _get_prepared_distribution(
+ req,
+ self.build_tracker,
+ self.finder,
+ self.build_isolation,
+ self.check_build_deps,
+ )
+
+ req.check_if_exists(self.use_user_site)
+
+ return dist
+
+ def prepare_installed_requirement(
+ self,
+ req: InstallRequirement,
+ skip_reason: str,
+ ) -> BaseDistribution:
+ """Prepare an already-installed requirement."""
+ assert req.satisfied_by, "req should have been satisfied but isn't"
+ assert skip_reason is not None, (
+ "did not get skip reason skipped but req.satisfied_by "
+ "is set to {}".format(req.satisfied_by)
+ )
+ logger.info(
+ "Requirement %s: %s (%s)", skip_reason, req, req.satisfied_by.version
+ )
+ with indent_log():
+ if self.require_hashes:
+ logger.debug(
+ "Since it is already installed, we are trusting this "
+ "package without checking its hash. To ensure a "
+ "completely repeatable environment, install into an "
+ "empty virtualenv."
+ )
+ return InstalledDistribution(req).get_metadata_distribution()
diff --git a/third_party/python/pip/pip/_internal/pyproject.py b/third_party/python/pip/pip/_internal/pyproject.py
new file mode 100644
index 0000000000..1de9f0fde5
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/pyproject.py
@@ -0,0 +1,174 @@
+import importlib.util
+import os
+from collections import namedtuple
+from typing import Any, List, Optional
+
+from pip._vendor import tomli
+from pip._vendor.packaging.requirements import InvalidRequirement, Requirement
+
+from pip._internal.exceptions import (
+ InstallationError,
+ InvalidPyProjectBuildRequires,
+ MissingPyProjectBuildRequires,
+)
+
+
+def _is_list_of_str(obj: Any) -> bool:
+ return isinstance(obj, list) and all(isinstance(item, str) for item in obj)
+
+
+def make_pyproject_path(unpacked_source_directory: str) -> str:
+ return os.path.join(unpacked_source_directory, "pyproject.toml")
+
+
+BuildSystemDetails = namedtuple(
+ "BuildSystemDetails", ["requires", "backend", "check", "backend_path"]
+)
+
+
+def load_pyproject_toml(
+ use_pep517: Optional[bool], pyproject_toml: str, setup_py: str, req_name: str
+) -> Optional[BuildSystemDetails]:
+ """Load the pyproject.toml file.
+
+ Parameters:
+ use_pep517 - Has the user requested PEP 517 processing? None
+ means the user hasn't explicitly specified.
+ pyproject_toml - Location of the project's pyproject.toml file
+ setup_py - Location of the project's setup.py file
+ req_name - The name of the requirement we're processing (for
+ error reporting)
+
+ Returns:
+ None if we should use the legacy code path, otherwise a tuple
+ (
+ requirements from pyproject.toml,
+ name of PEP 517 backend,
+ requirements we should check are installed after setting
+ up the build environment
+ directory paths to import the backend from (backend-path),
+ relative to the project root.
+ )
+ """
+ has_pyproject = os.path.isfile(pyproject_toml)
+ has_setup = os.path.isfile(setup_py)
+
+ if not has_pyproject and not has_setup:
+ raise InstallationError(
+ f"{req_name} does not appear to be a Python project: "
+ f"neither 'setup.py' nor 'pyproject.toml' found."
+ )
+
+ if has_pyproject:
+ with open(pyproject_toml, encoding="utf-8") as f:
+ pp_toml = tomli.loads(f.read())
+ build_system = pp_toml.get("build-system")
+ else:
+ build_system = None
+
+ # The following cases must use PEP 517
+ # We check for use_pep517 being non-None and falsey because that means
+ # the user explicitly requested --no-use-pep517. The value 0 as
+ # opposed to False can occur when the value is provided via an
+ # environment variable or config file option (due to the quirk of
+ # strtobool() returning an integer in pip's configuration code).
+ if has_pyproject and not has_setup:
+ if use_pep517 is not None and not use_pep517:
+ raise InstallationError(
+ "Disabling PEP 517 processing is invalid: "
+ "project does not have a setup.py"
+ )
+ use_pep517 = True
+ elif build_system and "build-backend" in build_system:
+ if use_pep517 is not None and not use_pep517:
+ raise InstallationError(
+ "Disabling PEP 517 processing is invalid: "
+ "project specifies a build backend of {} "
+ "in pyproject.toml".format(build_system["build-backend"])
+ )
+ use_pep517 = True
+
+ # If we haven't worked out whether to use PEP 517 yet,
+ # and the user hasn't explicitly stated a preference,
+ # we do so if the project has a pyproject.toml file
+ # or if we cannot import setuptools.
+
+ # We fallback to PEP 517 when without setuptools,
+ # so setuptools can be installed as a default build backend.
+ # For more info see:
+ # https://discuss.python.org/t/pip-without-setuptools-could-the-experience-be-improved/11810/9
+ elif use_pep517 is None:
+ use_pep517 = has_pyproject or not importlib.util.find_spec("setuptools")
+
+ # At this point, we know whether we're going to use PEP 517.
+ assert use_pep517 is not None
+
+ # If we're using the legacy code path, there is nothing further
+ # for us to do here.
+ if not use_pep517:
+ return None
+
+ if build_system is None:
+ # Either the user has a pyproject.toml with no build-system
+ # section, or the user has no pyproject.toml, but has opted in
+ # explicitly via --use-pep517.
+ # In the absence of any explicit backend specification, we
+ # assume the setuptools backend that most closely emulates the
+ # traditional direct setup.py execution, and require wheel and
+ # a version of setuptools that supports that backend.
+
+ build_system = {
+ "requires": ["setuptools>=40.8.0", "wheel"],
+ "build-backend": "setuptools.build_meta:__legacy__",
+ }
+
+ # If we're using PEP 517, we have build system information (either
+ # from pyproject.toml, or defaulted by the code above).
+ # Note that at this point, we do not know if the user has actually
+ # specified a backend, though.
+ assert build_system is not None
+
+ # Ensure that the build-system section in pyproject.toml conforms
+ # to PEP 518.
+
+ # Specifying the build-system table but not the requires key is invalid
+ if "requires" not in build_system:
+ raise MissingPyProjectBuildRequires(package=req_name)
+
+ # Error out if requires is not a list of strings
+ requires = build_system["requires"]
+ if not _is_list_of_str(requires):
+ raise InvalidPyProjectBuildRequires(
+ package=req_name,
+ reason="It is not a list of strings.",
+ )
+
+ # Each requirement must be valid as per PEP 508
+ for requirement in requires:
+ try:
+ Requirement(requirement)
+ except InvalidRequirement as error:
+ raise InvalidPyProjectBuildRequires(
+ package=req_name,
+ reason=f"It contains an invalid requirement: {requirement!r}",
+ ) from error
+
+ backend = build_system.get("build-backend")
+ backend_path = build_system.get("backend-path", [])
+ check: List[str] = []
+ if backend is None:
+ # If the user didn't specify a backend, we assume they want to use
+ # the setuptools backend. But we can't be sure they have included
+ # a version of setuptools which supplies the backend. So we
+ # make a note to check that this requirement is present once
+ # we have set up the environment.
+ # This is quite a lot of work to check for a very specific case. But
+ # the problem is, that case is potentially quite common - projects that
+ # adopted PEP 518 early for the ability to specify requirements to
+ # execute setup.py, but never considered needing to mention the build
+ # tools themselves. The original PEP 518 code had a similar check (but
+ # implemented in a different way).
+ backend = "setuptools.build_meta:__legacy__"
+ check = ["setuptools>=40.8.0"]
+
+ return BuildSystemDetails(requires, backend, check, backend_path)
diff --git a/third_party/python/pip/pip/_internal/req/__init__.py b/third_party/python/pip/pip/_internal/req/__init__.py
new file mode 100644
index 0000000000..8d56359666
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/req/__init__.py
@@ -0,0 +1,94 @@
+import collections
+import logging
+from typing import Generator, List, Optional, Sequence, Tuple
+
+from pip._internal.utils.logging import indent_log
+
+from .req_file import parse_requirements
+from .req_install import InstallRequirement
+from .req_set import RequirementSet
+
+__all__ = [
+ "RequirementSet",
+ "InstallRequirement",
+ "parse_requirements",
+ "install_given_reqs",
+]
+
+logger = logging.getLogger(__name__)
+
+
+class InstallationResult:
+ def __init__(self, name: str) -> None:
+ self.name = name
+
+ def __repr__(self) -> str:
+ return f"InstallationResult(name={self.name!r})"
+
+
+def _validate_requirements(
+ requirements: List[InstallRequirement],
+) -> Generator[Tuple[str, InstallRequirement], None, None]:
+ for req in requirements:
+ assert req.name, f"invalid to-be-installed requirement: {req}"
+ yield req.name, req
+
+
+def install_given_reqs(
+ requirements: List[InstallRequirement],
+ install_options: List[str],
+ global_options: Sequence[str],
+ root: Optional[str],
+ home: Optional[str],
+ prefix: Optional[str],
+ warn_script_location: bool,
+ use_user_site: bool,
+ pycompile: bool,
+) -> List[InstallationResult]:
+ """
+ Install everything in the given list.
+
+ (to be called after having downloaded and unpacked the packages)
+ """
+ to_install = collections.OrderedDict(_validate_requirements(requirements))
+
+ if to_install:
+ logger.info(
+ "Installing collected packages: %s",
+ ", ".join(to_install.keys()),
+ )
+
+ installed = []
+
+ with indent_log():
+ for req_name, requirement in to_install.items():
+ if requirement.should_reinstall:
+ logger.info("Attempting uninstall: %s", req_name)
+ with indent_log():
+ uninstalled_pathset = requirement.uninstall(auto_confirm=True)
+ else:
+ uninstalled_pathset = None
+
+ try:
+ requirement.install(
+ install_options,
+ global_options,
+ root=root,
+ home=home,
+ prefix=prefix,
+ warn_script_location=warn_script_location,
+ use_user_site=use_user_site,
+ pycompile=pycompile,
+ )
+ except Exception:
+ # if install did not succeed, rollback previous uninstall
+ if uninstalled_pathset and not requirement.install_succeeded:
+ uninstalled_pathset.rollback()
+ raise
+ else:
+ if uninstalled_pathset and requirement.install_succeeded:
+ uninstalled_pathset.commit()
+
+ installed.append(InstallationResult(req_name))
+
+ return installed
diff --git a/third_party/python/pip/pip/_internal/req/constructors.py b/third_party/python/pip/pip/_internal/req/constructors.py
new file mode 100644
index 0000000000..dea7c3b011
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/req/constructors.py
@@ -0,0 +1,501 @@
+"""Backing implementation for InstallRequirement's various constructors
+
+The idea here is that these formed a major chunk of InstallRequirement's size
+so, moving them and support code dedicated to them outside of that class
+helps creates for better understandability for the rest of the code.
+
+These are meant to be used elsewhere within pip to create instances of
+InstallRequirement.
+"""
+
+import logging
+import os
+import re
+from typing import Any, Dict, Optional, Set, Tuple, Union
+
+from pip._vendor.packaging.markers import Marker
+from pip._vendor.packaging.requirements import InvalidRequirement, Requirement
+from pip._vendor.packaging.specifiers import Specifier
+
+from pip._internal.exceptions import InstallationError
+from pip._internal.models.index import PyPI, TestPyPI
+from pip._internal.models.link import Link
+from pip._internal.models.wheel import Wheel
+from pip._internal.req.req_file import ParsedRequirement
+from pip._internal.req.req_install import InstallRequirement
+from pip._internal.utils.filetypes import is_archive_file
+from pip._internal.utils.misc import is_installable_dir
+from pip._internal.utils.packaging import get_requirement
+from pip._internal.utils.urls import path_to_url
+from pip._internal.vcs import is_url, vcs
+
+__all__ = [
+ "install_req_from_editable",
+ "install_req_from_line",
+ "parse_editable",
+]
+
+logger = logging.getLogger(__name__)
+operators = Specifier._operators.keys()
+
+
+def _strip_extras(path: str) -> Tuple[str, Optional[str]]:
+ m = re.match(r"^(.+)(\[[^\]]+\])$", path)
+ extras = None
+ if m:
+ path_no_extras = m.group(1)
+ extras = m.group(2)
+ else:
+ path_no_extras = path
+
+ return path_no_extras, extras
+
+
+def convert_extras(extras: Optional[str]) -> Set[str]:
+ if not extras:
+ return set()
+ return get_requirement("placeholder" + extras.lower()).extras
+
+
+def parse_editable(editable_req: str) -> Tuple[Optional[str], str, Set[str]]:
+ """Parses an editable requirement into:
+ - a requirement name
+ - an URL
+ - extras
+ - editable options
+ Accepted requirements:
+ svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir
+ .[some_extra]
+ """
+
+ url = editable_req
+
+ # If a file path is specified with extras, strip off the extras.
+ url_no_extras, extras = _strip_extras(url)
+
+ if os.path.isdir(url_no_extras):
+ # Treating it as code that has already been checked out
+ url_no_extras = path_to_url(url_no_extras)
+
+ if url_no_extras.lower().startswith("file:"):
+ package_name = Link(url_no_extras).egg_fragment
+ if extras:
+ return (
+ package_name,
+ url_no_extras,
+ get_requirement("placeholder" + extras.lower()).extras,
+ )
+ else:
+ return package_name, url_no_extras, set()
+
+ for version_control in vcs:
+ if url.lower().startswith(f"{version_control}:"):
+ url = f"{version_control}+{url}"
+ break
+
+ link = Link(url)
+
+ if not link.is_vcs:
+ backends = ", ".join(vcs.all_schemes)
+ raise InstallationError(
+ f"{editable_req} is not a valid editable requirement. "
+ f"It should either be a path to a local project or a VCS URL "
+ f"(beginning with {backends})."
+ )
+
+ package_name = link.egg_fragment
+ if not package_name:
+ raise InstallationError(
+ "Could not detect requirement name for '{}', please specify one "
+ "with #egg=your_package_name".format(editable_req)
+ )
+ return package_name, url, set()
+
+
+def check_first_requirement_in_file(filename: str) -> None:
+ """Check if file is parsable as a requirements file.
+
+ This is heavily based on ``pkg_resources.parse_requirements``, but
+ simplified to just check the first meaningful line.
+
+ :raises InvalidRequirement: If the first meaningful line cannot be parsed
+ as an requirement.
+ """
+ with open(filename, encoding="utf-8", errors="ignore") as f:
+ # Create a steppable iterator, so we can handle \-continuations.
+ lines = (
+ line
+ for line in (line.strip() for line in f)
+ if line and not line.startswith("#") # Skip blank lines/comments.
+ )
+
+ for line in lines:
+ # Drop comments -- a hash without a space may be in a URL.
+ if " #" in line:
+ line = line[: line.find(" #")]
+ # If there is a line continuation, drop it, and append the next line.
+ if line.endswith("\\"):
+ line = line[:-2].strip() + next(lines, "")
+ Requirement(line)
+ return
+
+
+def deduce_helpful_msg(req: str) -> str:
+ """Returns helpful msg in case requirements file does not exist,
+ or cannot be parsed.
+
+ :params req: Requirements file path
+ """
+ if not os.path.exists(req):
+ return f" File '{req}' does not exist."
+ msg = " The path does exist. "
+ # Try to parse and check if it is a requirements file.
+ try:
+ check_first_requirement_in_file(req)
+ except InvalidRequirement:
+ logger.debug("Cannot parse '%s' as requirements file", req)
+ else:
+ msg += (
+ f"The argument you provided "
+ f"({req}) appears to be a"
+ f" requirements file. If that is the"
+ f" case, use the '-r' flag to install"
+ f" the packages specified within it."
+ )
+ return msg
+
+
+class RequirementParts:
+ def __init__(
+ self,
+ requirement: Optional[Requirement],
+ link: Optional[Link],
+ markers: Optional[Marker],
+ extras: Set[str],
+ ):
+ self.requirement = requirement
+ self.link = link
+ self.markers = markers
+ self.extras = extras
+
+
+def parse_req_from_editable(editable_req: str) -> RequirementParts:
+ name, url, extras_override = parse_editable(editable_req)
+
+ if name is not None:
+ try:
+ req: Optional[Requirement] = Requirement(name)
+ except InvalidRequirement:
+ raise InstallationError(f"Invalid requirement: '{name}'")
+ else:
+ req = None
+
+ link = Link(url)
+
+ return RequirementParts(req, link, None, extras_override)
+
+
+# ---- The actual constructors follow ----
+
+
+def install_req_from_editable(
+ editable_req: str,
+ comes_from: Optional[Union[InstallRequirement, str]] = None,
+ use_pep517: Optional[bool] = None,
+ isolated: bool = False,
+ options: Optional[Dict[str, Any]] = None,
+ constraint: bool = False,
+ user_supplied: bool = False,
+ permit_editable_wheels: bool = False,
+ config_settings: Optional[Dict[str, str]] = None,
+) -> InstallRequirement:
+
+ parts = parse_req_from_editable(editable_req)
+
+ return InstallRequirement(
+ parts.requirement,
+ comes_from=comes_from,
+ user_supplied=user_supplied,
+ editable=True,
+ permit_editable_wheels=permit_editable_wheels,
+ link=parts.link,
+ constraint=constraint,
+ use_pep517=use_pep517,
+ isolated=isolated,
+ install_options=options.get("install_options", []) if options else [],
+ global_options=options.get("global_options", []) if options else [],
+ hash_options=options.get("hashes", {}) if options else {},
+ config_settings=config_settings,
+ extras=parts.extras,
+ )
+
+
+def _looks_like_path(name: str) -> bool:
+ """Checks whether the string "looks like" a path on the filesystem.
+
+ This does not check whether the target actually exists, only judge from the
+ appearance.
+
+ Returns true if any of the following conditions is true:
+ * a path separator is found (either os.path.sep or os.path.altsep);
+ * a dot is found (which represents the current directory).
+ """
+ if os.path.sep in name:
+ return True
+ if os.path.altsep is not None and os.path.altsep in name:
+ return True
+ if name.startswith("."):
+ return True
+ return False
+
+
+def _get_url_from_path(path: str, name: str) -> Optional[str]:
+ """
+ First, it checks whether a provided path is an installable directory. If it
+ is, returns the path.
+
+ If false, check if the path is an archive file (such as a .whl).
+ The function checks if the path is a file. If false, if the path has
+ an @, it will treat it as a PEP 440 URL requirement and return the path.
+ """
+ if _looks_like_path(name) and os.path.isdir(path):
+ if is_installable_dir(path):
+ return path_to_url(path)
+ # TODO: The is_installable_dir test here might not be necessary
+ # now that it is done in load_pyproject_toml too.
+ raise InstallationError(
+ f"Directory {name!r} is not installable. Neither 'setup.py' "
+ "nor 'pyproject.toml' found."
+ )
+ if not is_archive_file(path):
+ return None
+ if os.path.isfile(path):
+ return path_to_url(path)
+ urlreq_parts = name.split("@", 1)
+ if len(urlreq_parts) >= 2 and not _looks_like_path(urlreq_parts[0]):
+ # If the path contains '@' and the part before it does not look
+ # like a path, try to treat it as a PEP 440 URL req instead.
+ return None
+ logger.warning(
+ "Requirement %r looks like a filename, but the file does not exist",
+ name,
+ )
+ return path_to_url(path)
+
+
+def parse_req_from_line(name: str, line_source: Optional[str]) -> RequirementParts:
+ if is_url(name):
+ marker_sep = "; "
+ else:
+ marker_sep = ";"
+ if marker_sep in name:
+ name, markers_as_string = name.split(marker_sep, 1)
+ markers_as_string = markers_as_string.strip()
+ if not markers_as_string:
+ markers = None
+ else:
+ markers = Marker(markers_as_string)
+ else:
+ markers = None
+ name = name.strip()
+ req_as_string = None
+ path = os.path.normpath(os.path.abspath(name))
+ link = None
+ extras_as_string = None
+
+ if is_url(name):
+ link = Link(name)
+ else:
+ p, extras_as_string = _strip_extras(path)
+ url = _get_url_from_path(p, name)
+ if url is not None:
+ link = Link(url)
+
+ # it's a local file, dir, or url
+ if link:
+ # Handle relative file URLs
+ if link.scheme == "file" and re.search(r"\.\./", link.url):
+ link = Link(path_to_url(os.path.normpath(os.path.abspath(link.path))))
+ # wheel file
+ if link.is_wheel:
+ wheel = Wheel(link.filename) # can raise InvalidWheelFilename
+ req_as_string = f"{wheel.name}=={wheel.version}"
+ else:
+ # set the req to the egg fragment. when it's not there, this
+ # will become an 'unnamed' requirement
+ req_as_string = link.egg_fragment
+
+ # a requirement specifier
+ else:
+ req_as_string = name
+
+ extras = convert_extras(extras_as_string)
+
+ def with_source(text: str) -> str:
+ if not line_source:
+ return text
+ return f"{text} (from {line_source})"
+
+ def _parse_req_string(req_as_string: str) -> Requirement:
+ try:
+ req = get_requirement(req_as_string)
+ except InvalidRequirement:
+ if os.path.sep in req_as_string:
+ add_msg = "It looks like a path."
+ add_msg += deduce_helpful_msg(req_as_string)
+ elif "=" in req_as_string and not any(
+ op in req_as_string for op in operators
+ ):
+ add_msg = "= is not a valid operator. Did you mean == ?"
+ else:
+ add_msg = ""
+ msg = with_source(f"Invalid requirement: {req_as_string!r}")
+ if add_msg:
+ msg += f"\nHint: {add_msg}"
+ raise InstallationError(msg)
+ else:
+ # Deprecate extras after specifiers: "name>=1.0[extras]"
+ # This currently works by accident because _strip_extras() parses
+ # any extras in the end of the string and those are saved in
+ # RequirementParts
+ for spec in req.specifier:
+ spec_str = str(spec)
+ if spec_str.endswith("]"):
+ msg = f"Extras after version '{spec_str}'."
+ raise InstallationError(msg)
+ return req
+
+ if req_as_string is not None:
+ req: Optional[Requirement] = _parse_req_string(req_as_string)
+ else:
+ req = None
+
+ return RequirementParts(req, link, markers, extras)
+
+
+def install_req_from_line(
+ name: str,
+ comes_from: Optional[Union[str, InstallRequirement]] = None,
+ use_pep517: Optional[bool] = None,
+ isolated: bool = False,
+ options: Optional[Dict[str, Any]] = None,
+ constraint: bool = False,
+ line_source: Optional[str] = None,
+ user_supplied: bool = False,
+ config_settings: Optional[Dict[str, str]] = None,
+) -> InstallRequirement:
+ """Creates an InstallRequirement from a name, which might be a
+ requirement, directory containing 'setup.py', filename, or URL.
+
+ :param line_source: An optional string describing where the line is from,
+ for logging purposes in case of an error.
+ """
+ parts = parse_req_from_line(name, line_source)
+
+ return InstallRequirement(
+ parts.requirement,
+ comes_from,
+ link=parts.link,
+ markers=parts.markers,
+ use_pep517=use_pep517,
+ isolated=isolated,
+ install_options=options.get("install_options", []) if options else [],
+ global_options=options.get("global_options", []) if options else [],
+ hash_options=options.get("hashes", {}) if options else {},
+ config_settings=config_settings,
+ constraint=constraint,
+ extras=parts.extras,
+ user_supplied=user_supplied,
+ )
+
+
+def install_req_from_req_string(
+ req_string: str,
+ comes_from: Optional[InstallRequirement] = None,
+ isolated: bool = False,
+ use_pep517: Optional[bool] = None,
+ user_supplied: bool = False,
+ config_settings: Optional[Dict[str, str]] = None,
+) -> InstallRequirement:
+ try:
+ req = get_requirement(req_string)
+ except InvalidRequirement:
+ raise InstallationError(f"Invalid requirement: '{req_string}'")
+
+ domains_not_allowed = [
+ PyPI.file_storage_domain,
+ TestPyPI.file_storage_domain,
+ ]
+ if (
+ req.url
+ and comes_from
+ and comes_from.link
+ and comes_from.link.netloc in domains_not_allowed
+ ):
+ # Explicitly disallow pypi packages that depend on external urls
+ raise InstallationError(
+ "Packages installed from PyPI cannot depend on packages "
+ "which are not also hosted on PyPI.\n"
+ "{} depends on {} ".format(comes_from.name, req)
+ )
+
+ return InstallRequirement(
+ req,
+ comes_from,
+ isolated=isolated,
+ use_pep517=use_pep517,
+ user_supplied=user_supplied,
+ config_settings=config_settings,
+ )
+
+
+def install_req_from_parsed_requirement(
+ parsed_req: ParsedRequirement,
+ isolated: bool = False,
+ use_pep517: Optional[bool] = None,
+ user_supplied: bool = False,
+ config_settings: Optional[Dict[str, str]] = None,
+) -> InstallRequirement:
+ if parsed_req.is_editable:
+ req = install_req_from_editable(
+ parsed_req.requirement,
+ comes_from=parsed_req.comes_from,
+ use_pep517=use_pep517,
+ constraint=parsed_req.constraint,
+ isolated=isolated,
+ user_supplied=user_supplied,
+ config_settings=config_settings,
+ )
+
+ else:
+ req = install_req_from_line(
+ parsed_req.requirement,
+ comes_from=parsed_req.comes_from,
+ use_pep517=use_pep517,
+ isolated=isolated,
+ options=parsed_req.options,
+ constraint=parsed_req.constraint,
+ line_source=parsed_req.line_source,
+ user_supplied=user_supplied,
+ config_settings=config_settings,
+ )
+ return req
+
+
+def install_req_from_link_and_ireq(
+ link: Link, ireq: InstallRequirement
+) -> InstallRequirement:
+ return InstallRequirement(
+ req=ireq.req,
+ comes_from=ireq.comes_from,
+ editable=ireq.editable,
+ link=link,
+ markers=ireq.markers,
+ use_pep517=ireq.use_pep517,
+ isolated=ireq.isolated,
+ install_options=ireq.install_options,
+ global_options=ireq.global_options,
+ hash_options=ireq.hash_options,
+ config_settings=ireq.config_settings,
+ user_supplied=ireq.user_supplied,
+ )
diff --git a/third_party/python/pip/pip/_internal/req/req_file.py b/third_party/python/pip/pip/_internal/req/req_file.py
new file mode 100644
index 0000000000..11ec699acc
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/req/req_file.py
@@ -0,0 +1,544 @@
+"""
+Requirements file parsing
+"""
+
+import optparse
+import os
+import re
+import shlex
+import urllib.parse
+from optparse import Values
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Dict,
+ Generator,
+ Iterable,
+ List,
+ Optional,
+ Tuple,
+)
+
+from pip._internal.cli import cmdoptions
+from pip._internal.exceptions import InstallationError, RequirementsFileParseError
+from pip._internal.models.search_scope import SearchScope
+from pip._internal.network.session import PipSession
+from pip._internal.network.utils import raise_for_status
+from pip._internal.utils.encoding import auto_decode
+from pip._internal.utils.urls import get_url_scheme
+
+if TYPE_CHECKING:
+ # NoReturn introduced in 3.6.2; imported only for type checking to maintain
+ # pip compatibility with older patch versions of Python 3.6
+ from typing import NoReturn
+
+ from pip._internal.index.package_finder import PackageFinder
+
+__all__ = ["parse_requirements"]
+
+ReqFileLines = Iterable[Tuple[int, str]]
+
+LineParser = Callable[[str], Tuple[str, Values]]
+
+SCHEME_RE = re.compile(r"^(http|https|file):", re.I)
+COMMENT_RE = re.compile(r"(^|\s+)#.*$")
+
+# Matches environment variable-style values in '${MY_VARIABLE_1}' with the
+# variable name consisting of only uppercase letters, digits or the '_'
+# (underscore). This follows the POSIX standard defined in IEEE Std 1003.1,
+# 2013 Edition.
+ENV_VAR_RE = re.compile(r"(?P<var>\$\{(?P<name>[A-Z0-9_]+)\})")
+
+SUPPORTED_OPTIONS: List[Callable[..., optparse.Option]] = [
+ cmdoptions.index_url,
+ cmdoptions.extra_index_url,
+ cmdoptions.no_index,
+ cmdoptions.constraints,
+ cmdoptions.requirements,
+ cmdoptions.editable,
+ cmdoptions.find_links,
+ cmdoptions.no_binary,
+ cmdoptions.only_binary,
+ cmdoptions.prefer_binary,
+ cmdoptions.require_hashes,
+ cmdoptions.pre,
+ cmdoptions.trusted_host,
+ cmdoptions.use_new_feature,
+]
+
+# options to be passed to requirements
+SUPPORTED_OPTIONS_REQ: List[Callable[..., optparse.Option]] = [
+ cmdoptions.install_options,
+ cmdoptions.global_options,
+ cmdoptions.hash,
+]
+
+# the 'dest' string values
+SUPPORTED_OPTIONS_REQ_DEST = [str(o().dest) for o in SUPPORTED_OPTIONS_REQ]
+
+
+class ParsedRequirement:
+ def __init__(
+ self,
+ requirement: str,
+ is_editable: bool,
+ comes_from: str,
+ constraint: bool,
+ options: Optional[Dict[str, Any]] = None,
+ line_source: Optional[str] = None,
+ ) -> None:
+ self.requirement = requirement
+ self.is_editable = is_editable
+ self.comes_from = comes_from
+ self.options = options
+ self.constraint = constraint
+ self.line_source = line_source
+
+
+class ParsedLine:
+ def __init__(
+ self,
+ filename: str,
+ lineno: int,
+ args: str,
+ opts: Values,
+ constraint: bool,
+ ) -> None:
+ self.filename = filename
+ self.lineno = lineno
+ self.opts = opts
+ self.constraint = constraint
+
+ if args:
+ self.is_requirement = True
+ self.is_editable = False
+ self.requirement = args
+ elif opts.editables:
+ self.is_requirement = True
+ self.is_editable = True
+ # We don't support multiple -e on one line
+ self.requirement = opts.editables[0]
+ else:
+ self.is_requirement = False
+
+
+def parse_requirements(
+ filename: str,
+ session: PipSession,
+ finder: Optional["PackageFinder"] = None,
+ options: Optional[optparse.Values] = None,
+ constraint: bool = False,
+) -> Generator[ParsedRequirement, None, None]:
+ """Parse a requirements file and yield ParsedRequirement instances.
+
+ :param filename: Path or url of requirements file.
+ :param session: PipSession instance.
+ :param finder: Instance of pip.index.PackageFinder.
+ :param options: cli options.
+ :param constraint: If true, parsing a constraint file rather than
+ requirements file.
+ """
+ line_parser = get_line_parser(finder)
+ parser = RequirementsFileParser(session, line_parser)
+
+ for parsed_line in parser.parse(filename, constraint):
+ parsed_req = handle_line(
+ parsed_line, options=options, finder=finder, session=session
+ )
+ if parsed_req is not None:
+ yield parsed_req
+
+
+def preprocess(content: str) -> ReqFileLines:
+ """Split, filter, and join lines, and return a line iterator
+
+ :param content: the content of the requirements file
+ """
+ lines_enum: ReqFileLines = enumerate(content.splitlines(), start=1)
+ lines_enum = join_lines(lines_enum)
+ lines_enum = ignore_comments(lines_enum)
+ lines_enum = expand_env_variables(lines_enum)
+ return lines_enum
+
+
+def handle_requirement_line(
+ line: ParsedLine,
+ options: Optional[optparse.Values] = None,
+) -> ParsedRequirement:
+
+ # preserve for the nested code path
+ line_comes_from = "{} {} (line {})".format(
+ "-c" if line.constraint else "-r",
+ line.filename,
+ line.lineno,
+ )
+
+ assert line.is_requirement
+
+ if line.is_editable:
+ # For editable requirements, we don't support per-requirement
+ # options, so just return the parsed requirement.
+ return ParsedRequirement(
+ requirement=line.requirement,
+ is_editable=line.is_editable,
+ comes_from=line_comes_from,
+ constraint=line.constraint,
+ )
+ else:
+ # get the options that apply to requirements
+ req_options = {}
+ for dest in SUPPORTED_OPTIONS_REQ_DEST:
+ if dest in line.opts.__dict__ and line.opts.__dict__[dest]:
+ req_options[dest] = line.opts.__dict__[dest]
+
+ line_source = f"line {line.lineno} of {line.filename}"
+ return ParsedRequirement(
+ requirement=line.requirement,
+ is_editable=line.is_editable,
+ comes_from=line_comes_from,
+ constraint=line.constraint,
+ options=req_options,
+ line_source=line_source,
+ )
+
+
+def handle_option_line(
+ opts: Values,
+ filename: str,
+ lineno: int,
+ finder: Optional["PackageFinder"] = None,
+ options: Optional[optparse.Values] = None,
+ session: Optional[PipSession] = None,
+) -> None:
+
+ if options:
+ # percolate options upward
+ if opts.require_hashes:
+ options.require_hashes = opts.require_hashes
+ if opts.features_enabled:
+ options.features_enabled.extend(
+ f for f in opts.features_enabled if f not in options.features_enabled
+ )
+
+ # set finder options
+ if finder:
+ find_links = finder.find_links
+ index_urls = finder.index_urls
+ no_index = finder.search_scope.no_index
+ if opts.no_index is True:
+ no_index = True
+ index_urls = []
+ if opts.index_url and not no_index:
+ index_urls = [opts.index_url]
+ if opts.extra_index_urls and not no_index:
+ index_urls.extend(opts.extra_index_urls)
+ if opts.find_links:
+ # FIXME: it would be nice to keep track of the source
+ # of the find_links: support a find-links local path
+ # relative to a requirements file.
+ value = opts.find_links[0]
+ req_dir = os.path.dirname(os.path.abspath(filename))
+ relative_to_reqs_file = os.path.join(req_dir, value)
+ if os.path.exists(relative_to_reqs_file):
+ value = relative_to_reqs_file
+ find_links.append(value)
+
+ if session:
+ # We need to update the auth urls in session
+ session.update_index_urls(index_urls)
+
+ search_scope = SearchScope(
+ find_links=find_links,
+ index_urls=index_urls,
+ no_index=no_index,
+ )
+ finder.search_scope = search_scope
+
+ if opts.pre:
+ finder.set_allow_all_prereleases()
+
+ if opts.prefer_binary:
+ finder.set_prefer_binary()
+
+ if session:
+ for host in opts.trusted_hosts or []:
+ source = f"line {lineno} of {filename}"
+ session.add_trusted_host(host, source=source)
+
+
+def handle_line(
+ line: ParsedLine,
+ options: Optional[optparse.Values] = None,
+ finder: Optional["PackageFinder"] = None,
+ session: Optional[PipSession] = None,
+) -> Optional[ParsedRequirement]:
+ """Handle a single parsed requirements line; This can result in
+ creating/yielding requirements, or updating the finder.
+
+ :param line: The parsed line to be processed.
+ :param options: CLI options.
+ :param finder: The finder - updated by non-requirement lines.
+ :param session: The session - updated by non-requirement lines.
+
+ Returns a ParsedRequirement object if the line is a requirement line,
+ otherwise returns None.
+
+ For lines that contain requirements, the only options that have an effect
+ are from SUPPORTED_OPTIONS_REQ, and they are scoped to the
+ requirement. Other options from SUPPORTED_OPTIONS may be present, but are
+ ignored.
+
+ For lines that do not contain requirements, the only options that have an
+ effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may
+ be present, but are ignored. These lines may contain multiple options
+ (although our docs imply only one is supported), and all our parsed and
+ affect the finder.
+ """
+
+ if line.is_requirement:
+ parsed_req = handle_requirement_line(line, options)
+ return parsed_req
+ else:
+ handle_option_line(
+ line.opts,
+ line.filename,
+ line.lineno,
+ finder,
+ options,
+ session,
+ )
+ return None
+
+
+class RequirementsFileParser:
+ def __init__(
+ self,
+ session: PipSession,
+ line_parser: LineParser,
+ ) -> None:
+ self._session = session
+ self._line_parser = line_parser
+
+ def parse(
+ self, filename: str, constraint: bool
+ ) -> Generator[ParsedLine, None, None]:
+ """Parse a given file, yielding parsed lines."""
+ yield from self._parse_and_recurse(filename, constraint)
+
+ def _parse_and_recurse(
+ self, filename: str, constraint: bool
+ ) -> Generator[ParsedLine, None, None]:
+ for line in self._parse_file(filename, constraint):
+ if not line.is_requirement and (
+ line.opts.requirements or line.opts.constraints
+ ):
+ # parse a nested requirements file
+ if line.opts.requirements:
+ req_path = line.opts.requirements[0]
+ nested_constraint = False
+ else:
+ req_path = line.opts.constraints[0]
+ nested_constraint = True
+
+ # original file is over http
+ if SCHEME_RE.search(filename):
+ # do a url join so relative paths work
+ req_path = urllib.parse.urljoin(filename, req_path)
+ # original file and nested file are paths
+ elif not SCHEME_RE.search(req_path):
+ # do a join so relative paths work
+ req_path = os.path.join(
+ os.path.dirname(filename),
+ req_path,
+ )
+
+ yield from self._parse_and_recurse(req_path, nested_constraint)
+ else:
+ yield line
+
+ def _parse_file(
+ self, filename: str, constraint: bool
+ ) -> Generator[ParsedLine, None, None]:
+ _, content = get_file_content(filename, self._session)
+
+ lines_enum = preprocess(content)
+
+ for line_number, line in lines_enum:
+ try:
+ args_str, opts = self._line_parser(line)
+ except OptionParsingError as e:
+ # add offending line
+ msg = f"Invalid requirement: {line}\n{e.msg}"
+ raise RequirementsFileParseError(msg)
+
+ yield ParsedLine(
+ filename,
+ line_number,
+ args_str,
+ opts,
+ constraint,
+ )
+
+
+def get_line_parser(finder: Optional["PackageFinder"]) -> LineParser:
+ def parse_line(line: str) -> Tuple[str, Values]:
+ # Build new parser for each line since it accumulates appendable
+ # options.
+ parser = build_parser()
+ defaults = parser.get_default_values()
+ defaults.index_url = None
+ if finder:
+ defaults.format_control = finder.format_control
+
+ args_str, options_str = break_args_options(line)
+
+ try:
+ options = shlex.split(options_str)
+ except ValueError as e:
+ raise OptionParsingError(f"Could not split options: {options_str}") from e
+
+ opts, _ = parser.parse_args(options, defaults)
+
+ return args_str, opts
+
+ return parse_line
+
+
+def break_args_options(line: str) -> Tuple[str, str]:
+ """Break up the line into an args and options string. We only want to shlex
+ (and then optparse) the options, not the args. args can contain markers
+ which are corrupted by shlex.
+ """
+ tokens = line.split(" ")
+ args = []
+ options = tokens[:]
+ for token in tokens:
+ if token.startswith("-") or token.startswith("--"):
+ break
+ else:
+ args.append(token)
+ options.pop(0)
+ return " ".join(args), " ".join(options)
+
+
+class OptionParsingError(Exception):
+ def __init__(self, msg: str) -> None:
+ self.msg = msg
+
+
+def build_parser() -> optparse.OptionParser:
+ """
+ Return a parser for parsing requirement lines
+ """
+ parser = optparse.OptionParser(add_help_option=False)
+
+ option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ
+ for option_factory in option_factories:
+ option = option_factory()
+ parser.add_option(option)
+
+ # By default optparse sys.exits on parsing errors. We want to wrap
+ # that in our own exception.
+ def parser_exit(self: Any, msg: str) -> "NoReturn":
+ raise OptionParsingError(msg)
+
+ # NOTE: mypy disallows assigning to a method
+ # https://github.com/python/mypy/issues/2427
+ parser.exit = parser_exit # type: ignore
+
+ return parser
+
+
+def join_lines(lines_enum: ReqFileLines) -> ReqFileLines:
+ """Joins a line ending in '\' with the previous line (except when following
+ comments). The joined line takes on the index of the first line.
+ """
+ primary_line_number = None
+ new_line: List[str] = []
+ for line_number, line in lines_enum:
+ if not line.endswith("\\") or COMMENT_RE.match(line):
+ if COMMENT_RE.match(line):
+ # this ensures comments are always matched later
+ line = " " + line
+ if new_line:
+ new_line.append(line)
+ assert primary_line_number is not None
+ yield primary_line_number, "".join(new_line)
+ new_line = []
+ else:
+ yield line_number, line
+ else:
+ if not new_line:
+ primary_line_number = line_number
+ new_line.append(line.strip("\\"))
+
+ # last line contains \
+ if new_line:
+ assert primary_line_number is not None
+ yield primary_line_number, "".join(new_line)
+
+ # TODO: handle space after '\'.
+
+
+def ignore_comments(lines_enum: ReqFileLines) -> ReqFileLines:
+ """
+ Strips comments and filter empty lines.
+ """
+ for line_number, line in lines_enum:
+ line = COMMENT_RE.sub("", line)
+ line = line.strip()
+ if line:
+ yield line_number, line
+
+
+def expand_env_variables(lines_enum: ReqFileLines) -> ReqFileLines:
+ """Replace all environment variables that can be retrieved via `os.getenv`.
+
+ The only allowed format for environment variables defined in the
+ requirement file is `${MY_VARIABLE_1}` to ensure two things:
+
+ 1. Strings that contain a `$` aren't accidentally (partially) expanded.
+ 2. Ensure consistency across platforms for requirement files.
+
+ These points are the result of a discussion on the `github pull
+ request #3514 <https://github.com/pypa/pip/pull/3514>`_.
+
+ Valid characters in variable names follow the `POSIX standard
+ <http://pubs.opengroup.org/onlinepubs/9699919799/>`_ and are limited
+ to uppercase letter, digits and the `_` (underscore).
+ """
+ for line_number, line in lines_enum:
+ for env_var, var_name in ENV_VAR_RE.findall(line):
+ value = os.getenv(var_name)
+ if not value:
+ continue
+
+ line = line.replace(env_var, value)
+
+ yield line_number, line
+
+
+def get_file_content(url: str, session: PipSession) -> Tuple[str, str]:
+ """Gets the content of a file; it may be a filename, file: URL, or
+ http: URL. Returns (location, content). Content is unicode.
+ Respects # -*- coding: declarations on the retrieved files.
+
+ :param url: File path or url.
+ :param session: PipSession instance.
+ """
+ scheme = get_url_scheme(url)
+
+ # Pip has special support for file:// URLs (LocalFSAdapter).
+ if scheme in ["http", "https", "file"]:
+ resp = session.get(url)
+ raise_for_status(resp)
+ return resp.url, resp.text
+
+ # Assume this is a bare path.
+ try:
+ with open(url, "rb") as f:
+ content = auto_decode(f.read())
+ except OSError as exc:
+ raise InstallationError(f"Could not open requirements file: {exc}")
+ return url, content
diff --git a/third_party/python/pip/pip/_internal/req/req_install.py b/third_party/python/pip/pip/_internal/req/req_install.py
new file mode 100644
index 0000000000..bb38ec09da
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/req/req_install.py
@@ -0,0 +1,946 @@
+# The following comment should be removed at some point in the future.
+# mypy: strict-optional=False
+
+import functools
+import logging
+import os
+import shutil
+import sys
+import uuid
+import zipfile
+from enum import Enum
+from optparse import Values
+from typing import Any, Collection, Dict, Iterable, List, Optional, Sequence, Union
+
+from pip._vendor.packaging.markers import Marker
+from pip._vendor.packaging.requirements import Requirement
+from pip._vendor.packaging.specifiers import SpecifierSet
+from pip._vendor.packaging.utils import canonicalize_name
+from pip._vendor.packaging.version import Version
+from pip._vendor.packaging.version import parse as parse_version
+from pip._vendor.pyproject_hooks import BuildBackendHookCaller
+
+from pip._internal.build_env import BuildEnvironment, NoOpBuildEnvironment
+from pip._internal.exceptions import InstallationError, LegacyInstallFailure
+from pip._internal.locations import get_scheme
+from pip._internal.metadata import (
+ BaseDistribution,
+ get_default_environment,
+ get_directory_distribution,
+ get_wheel_distribution,
+)
+from pip._internal.metadata.base import FilesystemWheel
+from pip._internal.models.direct_url import DirectUrl
+from pip._internal.models.link import Link
+from pip._internal.operations.build.metadata import generate_metadata
+from pip._internal.operations.build.metadata_editable import generate_editable_metadata
+from pip._internal.operations.build.metadata_legacy import (
+ generate_metadata as generate_metadata_legacy,
+)
+from pip._internal.operations.install.editable_legacy import (
+ install_editable as install_editable_legacy,
+)
+from pip._internal.operations.install.legacy import install as install_legacy
+from pip._internal.operations.install.wheel import install_wheel
+from pip._internal.pyproject import load_pyproject_toml, make_pyproject_path
+from pip._internal.req.req_uninstall import UninstallPathSet
+from pip._internal.utils.deprecation import LegacyInstallReason, deprecated
+from pip._internal.utils.direct_url_helpers import (
+ direct_url_for_editable,
+ direct_url_from_link,
+)
+from pip._internal.utils.hashes import Hashes
+from pip._internal.utils.misc import (
+ ConfiguredBuildBackendHookCaller,
+ ask_path_exists,
+ backup_dir,
+ display_path,
+ hide_url,
+ redact_auth_from_url,
+)
+from pip._internal.utils.packaging import safe_extra
+from pip._internal.utils.subprocess import runner_with_spinner_message
+from pip._internal.utils.temp_dir import TempDirectory, tempdir_kinds
+from pip._internal.utils.virtualenv import running_under_virtualenv
+from pip._internal.vcs import vcs
+
+logger = logging.getLogger(__name__)
+
+
+class InstallRequirement:
+ """
+ Represents something that may be installed later on, may have information
+ about where to fetch the relevant requirement and also contains logic for
+ installing the said requirement.
+ """
+
+ def __init__(
+ self,
+ req: Optional[Requirement],
+ comes_from: Optional[Union[str, "InstallRequirement"]],
+ editable: bool = False,
+ link: Optional[Link] = None,
+ markers: Optional[Marker] = None,
+ use_pep517: Optional[bool] = None,
+ isolated: bool = False,
+ install_options: Optional[List[str]] = None,
+ global_options: Optional[List[str]] = None,
+ hash_options: Optional[Dict[str, List[str]]] = None,
+ config_settings: Optional[Dict[str, str]] = None,
+ constraint: bool = False,
+ extras: Collection[str] = (),
+ user_supplied: bool = False,
+ permit_editable_wheels: bool = False,
+ ) -> None:
+ assert req is None or isinstance(req, Requirement), req
+ self.req = req
+ self.comes_from = comes_from
+ self.constraint = constraint
+ self.editable = editable
+ self.permit_editable_wheels = permit_editable_wheels
+ self.legacy_install_reason: Optional[LegacyInstallReason] = None
+
+ # source_dir is the local directory where the linked requirement is
+ # located, or unpacked. In case unpacking is needed, creating and
+ # populating source_dir is done by the RequirementPreparer. Note this
+ # is not necessarily the directory where pyproject.toml or setup.py is
+ # located - that one is obtained via unpacked_source_directory.
+ self.source_dir: Optional[str] = None
+ if self.editable:
+ assert link
+ if link.is_file:
+ self.source_dir = os.path.normpath(os.path.abspath(link.file_path))
+
+ if link is None and req and req.url:
+ # PEP 508 URL requirement
+ link = Link(req.url)
+ self.link = self.original_link = link
+ self.original_link_is_in_wheel_cache = False
+
+ # Information about the location of the artifact that was downloaded . This
+ # property is guaranteed to be set in resolver results.
+ self.download_info: Optional[DirectUrl] = None
+
+ # Path to any downloaded or already-existing package.
+ self.local_file_path: Optional[str] = None
+ if self.link and self.link.is_file:
+ self.local_file_path = self.link.file_path
+
+ if extras:
+ self.extras = extras
+ elif req:
+ self.extras = {safe_extra(extra) for extra in req.extras}
+ else:
+ self.extras = set()
+ if markers is None and req:
+ markers = req.marker
+ self.markers = markers
+
+ # This holds the Distribution object if this requirement is already installed.
+ self.satisfied_by: Optional[BaseDistribution] = None
+ # Whether the installation process should try to uninstall an existing
+ # distribution before installing this requirement.
+ self.should_reinstall = False
+ # Temporary build location
+ self._temp_build_dir: Optional[TempDirectory] = None
+ # Set to True after successful installation
+ self.install_succeeded: Optional[bool] = None
+ # Supplied options
+ self.install_options = install_options if install_options else []
+ self.global_options = global_options if global_options else []
+ self.hash_options = hash_options if hash_options else {}
+ self.config_settings = config_settings
+ # Set to True after successful preparation of this requirement
+ self.prepared = False
+ # User supplied requirement are explicitly requested for installation
+ # by the user via CLI arguments or requirements files, as opposed to,
+ # e.g. dependencies, extras or constraints.
+ self.user_supplied = user_supplied
+
+ self.isolated = isolated
+ self.build_env: BuildEnvironment = NoOpBuildEnvironment()
+
+ # For PEP 517, the directory where we request the project metadata
+ # gets stored. We need this to pass to build_wheel, so the backend
+ # can ensure that the wheel matches the metadata (see the PEP for
+ # details).
+ self.metadata_directory: Optional[str] = None
+
+ # The static build requirements (from pyproject.toml)
+ self.pyproject_requires: Optional[List[str]] = None
+
+ # Build requirements that we will check are available
+ self.requirements_to_check: List[str] = []
+
+ # The PEP 517 backend we should use to build the project
+ self.pep517_backend: Optional[BuildBackendHookCaller] = None
+
+ # Are we using PEP 517 for this requirement?
+ # After pyproject.toml has been loaded, the only valid values are True
+ # and False. Before loading, None is valid (meaning "use the default").
+ # Setting an explicit value before loading pyproject.toml is supported,
+ # but after loading this flag should be treated as read only.
+ self.use_pep517 = use_pep517
+
+ # This requirement needs more preparation before it can be built
+ self.needs_more_preparation = False
+
+ def __str__(self) -> str:
+ if self.req:
+ s = str(self.req)
+ if self.link:
+ s += " from {}".format(redact_auth_from_url(self.link.url))
+ elif self.link:
+ s = redact_auth_from_url(self.link.url)
+ else:
+ s = "<InstallRequirement>"
+ if self.satisfied_by is not None:
+ if self.satisfied_by.location is not None:
+ location = display_path(self.satisfied_by.location)
+ else:
+ location = "<memory>"
+ s += f" in {location}"
+ if self.comes_from:
+ if isinstance(self.comes_from, str):
+ comes_from: Optional[str] = self.comes_from
+ else:
+ comes_from = self.comes_from.from_path()
+ if comes_from:
+ s += f" (from {comes_from})"
+ return s
+
+ def __repr__(self) -> str:
+ return "<{} object: {} editable={!r}>".format(
+ self.__class__.__name__, str(self), self.editable
+ )
+
+ def format_debug(self) -> str:
+ """An un-tested helper for getting state, for debugging."""
+ attributes = vars(self)
+ names = sorted(attributes)
+
+ state = ("{}={!r}".format(attr, attributes[attr]) for attr in sorted(names))
+ return "<{name} object: {{{state}}}>".format(
+ name=self.__class__.__name__,
+ state=", ".join(state),
+ )
+
+ # Things that are valid for all kinds of requirements?
+ @property
+ def name(self) -> Optional[str]:
+ if self.req is None:
+ return None
+ return self.req.name
+
+ @functools.lru_cache() # use cached_property in python 3.8+
+ def supports_pyproject_editable(self) -> bool:
+ if not self.use_pep517:
+ return False
+ assert self.pep517_backend
+ with self.build_env:
+ runner = runner_with_spinner_message(
+ "Checking if build backend supports build_editable"
+ )
+ with self.pep517_backend.subprocess_runner(runner):
+ return "build_editable" in self.pep517_backend._supported_features()
+
+ @property
+ def specifier(self) -> SpecifierSet:
+ return self.req.specifier
+
+ @property
+ def is_pinned(self) -> bool:
+ """Return whether I am pinned to an exact version.
+
+ For example, some-package==1.2 is pinned; some-package>1.2 is not.
+ """
+ specifiers = self.specifier
+ return len(specifiers) == 1 and next(iter(specifiers)).operator in {"==", "==="}
+
+ def match_markers(self, extras_requested: Optional[Iterable[str]] = None) -> bool:
+ if not extras_requested:
+ # Provide an extra to safely evaluate the markers
+ # without matching any extra
+ extras_requested = ("",)
+ if self.markers is not None:
+ return any(
+ self.markers.evaluate({"extra": extra}) for extra in extras_requested
+ )
+ else:
+ return True
+
+ @property
+ def has_hash_options(self) -> bool:
+ """Return whether any known-good hashes are specified as options.
+
+ These activate --require-hashes mode; hashes specified as part of a
+ URL do not.
+
+ """
+ return bool(self.hash_options)
+
+ def hashes(self, trust_internet: bool = True) -> Hashes:
+ """Return a hash-comparer that considers my option- and URL-based
+ hashes to be known-good.
+
+ Hashes in URLs--ones embedded in the requirements file, not ones
+ downloaded from an index server--are almost peers with ones from
+ flags. They satisfy --require-hashes (whether it was implicitly or
+ explicitly activated) but do not activate it. md5 and sha224 are not
+ allowed in flags, which should nudge people toward good algos. We
+ always OR all hashes together, even ones from URLs.
+
+ :param trust_internet: Whether to trust URL-based (#md5=...) hashes
+ downloaded from the internet, as by populate_link()
+
+ """
+ good_hashes = self.hash_options.copy()
+ link = self.link if trust_internet else self.original_link
+ if link and link.hash:
+ good_hashes.setdefault(link.hash_name, []).append(link.hash)
+ return Hashes(good_hashes)
+
+ def from_path(self) -> Optional[str]:
+ """Format a nice indicator to show where this "comes from" """
+ if self.req is None:
+ return None
+ s = str(self.req)
+ if self.comes_from:
+ if isinstance(self.comes_from, str):
+ comes_from = self.comes_from
+ else:
+ comes_from = self.comes_from.from_path()
+ if comes_from:
+ s += "->" + comes_from
+ return s
+
+ def ensure_build_location(
+ self, build_dir: str, autodelete: bool, parallel_builds: bool
+ ) -> str:
+ assert build_dir is not None
+ if self._temp_build_dir is not None:
+ assert self._temp_build_dir.path
+ return self._temp_build_dir.path
+ if self.req is None:
+ # Some systems have /tmp as a symlink which confuses custom
+ # builds (such as numpy). Thus, we ensure that the real path
+ # is returned.
+ self._temp_build_dir = TempDirectory(
+ kind=tempdir_kinds.REQ_BUILD, globally_managed=True
+ )
+
+ return self._temp_build_dir.path
+
+ # This is the only remaining place where we manually determine the path
+ # for the temporary directory. It is only needed for editables where
+ # it is the value of the --src option.
+
+ # When parallel builds are enabled, add a UUID to the build directory
+ # name so multiple builds do not interfere with each other.
+ dir_name: str = canonicalize_name(self.name)
+ if parallel_builds:
+ dir_name = f"{dir_name}_{uuid.uuid4().hex}"
+
+ # FIXME: Is there a better place to create the build_dir? (hg and bzr
+ # need this)
+ if not os.path.exists(build_dir):
+ logger.debug("Creating directory %s", build_dir)
+ os.makedirs(build_dir)
+ actual_build_dir = os.path.join(build_dir, dir_name)
+ # `None` indicates that we respect the globally-configured deletion
+ # settings, which is what we actually want when auto-deleting.
+ delete_arg = None if autodelete else False
+ return TempDirectory(
+ path=actual_build_dir,
+ delete=delete_arg,
+ kind=tempdir_kinds.REQ_BUILD,
+ globally_managed=True,
+ ).path
+
+ def _set_requirement(self) -> None:
+ """Set requirement after generating metadata."""
+ assert self.req is None
+ assert self.metadata is not None
+ assert self.source_dir is not None
+
+ # Construct a Requirement object from the generated metadata
+ if isinstance(parse_version(self.metadata["Version"]), Version):
+ op = "=="
+ else:
+ op = "==="
+
+ self.req = Requirement(
+ "".join(
+ [
+ self.metadata["Name"],
+ op,
+ self.metadata["Version"],
+ ]
+ )
+ )
+
+ def warn_on_mismatching_name(self) -> None:
+ metadata_name = canonicalize_name(self.metadata["Name"])
+ if canonicalize_name(self.req.name) == metadata_name:
+ # Everything is fine.
+ return
+
+ # If we're here, there's a mismatch. Log a warning about it.
+ logger.warning(
+ "Generating metadata for package %s "
+ "produced metadata for project name %s. Fix your "
+ "#egg=%s fragments.",
+ self.name,
+ metadata_name,
+ self.name,
+ )
+ self.req = Requirement(metadata_name)
+
+ def check_if_exists(self, use_user_site: bool) -> None:
+ """Find an installed distribution that satisfies or conflicts
+ with this requirement, and set self.satisfied_by or
+ self.should_reinstall appropriately.
+ """
+ if self.req is None:
+ return
+ existing_dist = get_default_environment().get_distribution(self.req.name)
+ if not existing_dist:
+ return
+
+ version_compatible = self.req.specifier.contains(
+ existing_dist.version,
+ prereleases=True,
+ )
+ if not version_compatible:
+ self.satisfied_by = None
+ if use_user_site:
+ if existing_dist.in_usersite:
+ self.should_reinstall = True
+ elif running_under_virtualenv() and existing_dist.in_site_packages:
+ raise InstallationError(
+ f"Will not install to the user site because it will "
+ f"lack sys.path precedence to {existing_dist.raw_name} "
+ f"in {existing_dist.location}"
+ )
+ else:
+ self.should_reinstall = True
+ else:
+ if self.editable:
+ self.should_reinstall = True
+ # when installing editables, nothing pre-existing should ever
+ # satisfy
+ self.satisfied_by = None
+ else:
+ self.satisfied_by = existing_dist
+
+ # Things valid for wheels
+ @property
+ def is_wheel(self) -> bool:
+ if not self.link:
+ return False
+ return self.link.is_wheel
+
+ # Things valid for sdists
+ @property
+ def unpacked_source_directory(self) -> str:
+ return os.path.join(
+ self.source_dir, self.link and self.link.subdirectory_fragment or ""
+ )
+
+ @property
+ def setup_py_path(self) -> str:
+ assert self.source_dir, f"No source dir for {self}"
+ setup_py = os.path.join(self.unpacked_source_directory, "setup.py")
+
+ return setup_py
+
+ @property
+ def setup_cfg_path(self) -> str:
+ assert self.source_dir, f"No source dir for {self}"
+ setup_cfg = os.path.join(self.unpacked_source_directory, "setup.cfg")
+
+ return setup_cfg
+
+ @property
+ def pyproject_toml_path(self) -> str:
+ assert self.source_dir, f"No source dir for {self}"
+ return make_pyproject_path(self.unpacked_source_directory)
+
+ def load_pyproject_toml(self) -> None:
+ """Load the pyproject.toml file.
+
+ After calling this routine, all of the attributes related to PEP 517
+ processing for this requirement have been set. In particular, the
+ use_pep517 attribute can be used to determine whether we should
+ follow the PEP 517 or legacy (setup.py) code path.
+ """
+ pyproject_toml_data = load_pyproject_toml(
+ self.use_pep517, self.pyproject_toml_path, self.setup_py_path, str(self)
+ )
+
+ if pyproject_toml_data is None:
+ self.use_pep517 = False
+ return
+
+ self.use_pep517 = True
+ requires, backend, check, backend_path = pyproject_toml_data
+ self.requirements_to_check = check
+ self.pyproject_requires = requires
+ self.pep517_backend = ConfiguredBuildBackendHookCaller(
+ self,
+ self.unpacked_source_directory,
+ backend,
+ backend_path=backend_path,
+ )
+
+ def isolated_editable_sanity_check(self) -> None:
+ """Check that an editable requirement if valid for use with PEP 517/518.
+
+ This verifies that an editable that has a pyproject.toml either supports PEP 660
+ or as a setup.py or a setup.cfg
+ """
+ if (
+ self.editable
+ and self.use_pep517
+ and not self.supports_pyproject_editable()
+ and not os.path.isfile(self.setup_py_path)
+ and not os.path.isfile(self.setup_cfg_path)
+ ):
+ raise InstallationError(
+ f"Project {self} has a 'pyproject.toml' and its build "
+ f"backend is missing the 'build_editable' hook. Since it does not "
+ f"have a 'setup.py' nor a 'setup.cfg', "
+ f"it cannot be installed in editable mode. "
+ f"Consider using a build backend that supports PEP 660."
+ )
+
+ def prepare_metadata(self) -> None:
+ """Ensure that project metadata is available.
+
+ Under PEP 517 and PEP 660, call the backend hook to prepare the metadata.
+ Under legacy processing, call setup.py egg-info.
+ """
+ assert self.source_dir
+ details = self.name or f"from {self.link}"
+
+ if self.use_pep517:
+ assert self.pep517_backend is not None
+ if (
+ self.editable
+ and self.permit_editable_wheels
+ and self.supports_pyproject_editable()
+ ):
+ self.metadata_directory = generate_editable_metadata(
+ build_env=self.build_env,
+ backend=self.pep517_backend,
+ details=details,
+ )
+ else:
+ self.metadata_directory = generate_metadata(
+ build_env=self.build_env,
+ backend=self.pep517_backend,
+ details=details,
+ )
+ else:
+ self.metadata_directory = generate_metadata_legacy(
+ build_env=self.build_env,
+ setup_py_path=self.setup_py_path,
+ source_dir=self.unpacked_source_directory,
+ isolated=self.isolated,
+ details=details,
+ )
+
+ # Act on the newly generated metadata, based on the name and version.
+ if not self.name:
+ self._set_requirement()
+ else:
+ self.warn_on_mismatching_name()
+
+ self.assert_source_matches_version()
+
+ @property
+ def metadata(self) -> Any:
+ if not hasattr(self, "_metadata"):
+ self._metadata = self.get_dist().metadata
+
+ return self._metadata
+
+ def get_dist(self) -> BaseDistribution:
+ if self.metadata_directory:
+ return get_directory_distribution(self.metadata_directory)
+ elif self.local_file_path and self.is_wheel:
+ return get_wheel_distribution(
+ FilesystemWheel(self.local_file_path), canonicalize_name(self.name)
+ )
+ raise AssertionError(
+ f"InstallRequirement {self} has no metadata directory and no wheel: "
+ f"can't make a distribution."
+ )
+
+ def assert_source_matches_version(self) -> None:
+ assert self.source_dir
+ version = self.metadata["version"]
+ if self.req.specifier and version not in self.req.specifier:
+ logger.warning(
+ "Requested %s, but installing version %s",
+ self,
+ version,
+ )
+ else:
+ logger.debug(
+ "Source in %s has version %s, which satisfies requirement %s",
+ display_path(self.source_dir),
+ version,
+ self,
+ )
+
+ # For both source distributions and editables
+ def ensure_has_source_dir(
+ self,
+ parent_dir: str,
+ autodelete: bool = False,
+ parallel_builds: bool = False,
+ ) -> None:
+ """Ensure that a source_dir is set.
+
+ This will create a temporary build dir if the name of the requirement
+ isn't known yet.
+
+ :param parent_dir: The ideal pip parent_dir for the source_dir.
+ Generally src_dir for editables and build_dir for sdists.
+ :return: self.source_dir
+ """
+ if self.source_dir is None:
+ self.source_dir = self.ensure_build_location(
+ parent_dir,
+ autodelete=autodelete,
+ parallel_builds=parallel_builds,
+ )
+
+ # For editable installations
+ def update_editable(self) -> None:
+ if not self.link:
+ logger.debug(
+ "Cannot update repository at %s; repository location is unknown",
+ self.source_dir,
+ )
+ return
+ assert self.editable
+ assert self.source_dir
+ if self.link.scheme == "file":
+ # Static paths don't get updated
+ return
+ vcs_backend = vcs.get_backend_for_scheme(self.link.scheme)
+ # Editable requirements are validated in Requirement constructors.
+ # So here, if it's neither a path nor a valid VCS URL, it's a bug.
+ assert vcs_backend, f"Unsupported VCS URL {self.link.url}"
+ hidden_url = hide_url(self.link.url)
+ vcs_backend.obtain(self.source_dir, url=hidden_url, verbosity=0)
+
+ # Top-level Actions
+ def uninstall(
+ self, auto_confirm: bool = False, verbose: bool = False
+ ) -> Optional[UninstallPathSet]:
+ """
+ Uninstall the distribution currently satisfying this requirement.
+
+ Prompts before removing or modifying files unless
+ ``auto_confirm`` is True.
+
+ Refuses to delete or modify files outside of ``sys.prefix`` -
+ thus uninstallation within a virtual environment can only
+ modify that virtual environment, even if the virtualenv is
+ linked to global site-packages.
+
+ """
+ assert self.req
+ dist = get_default_environment().get_distribution(self.req.name)
+ if not dist:
+ logger.warning("Skipping %s as it is not installed.", self.name)
+ return None
+ logger.info("Found existing installation: %s", dist)
+
+ uninstalled_pathset = UninstallPathSet.from_dist(dist)
+ uninstalled_pathset.remove(auto_confirm, verbose)
+ return uninstalled_pathset
+
+ def _get_archive_name(self, path: str, parentdir: str, rootdir: str) -> str:
+ def _clean_zip_name(name: str, prefix: str) -> str:
+ assert name.startswith(
+ prefix + os.path.sep
+ ), f"name {name!r} doesn't start with prefix {prefix!r}"
+ name = name[len(prefix) + 1 :]
+ name = name.replace(os.path.sep, "/")
+ return name
+
+ path = os.path.join(parentdir, path)
+ name = _clean_zip_name(path, rootdir)
+ return self.name + "/" + name
+
+ def archive(self, build_dir: Optional[str]) -> None:
+ """Saves archive to provided build_dir.
+
+ Used for saving downloaded VCS requirements as part of `pip download`.
+ """
+ assert self.source_dir
+ if build_dir is None:
+ return
+
+ create_archive = True
+ archive_name = "{}-{}.zip".format(self.name, self.metadata["version"])
+ archive_path = os.path.join(build_dir, archive_name)
+
+ if os.path.exists(archive_path):
+ response = ask_path_exists(
+ "The file {} exists. (i)gnore, (w)ipe, "
+ "(b)ackup, (a)bort ".format(display_path(archive_path)),
+ ("i", "w", "b", "a"),
+ )
+ if response == "i":
+ create_archive = False
+ elif response == "w":
+ logger.warning("Deleting %s", display_path(archive_path))
+ os.remove(archive_path)
+ elif response == "b":
+ dest_file = backup_dir(archive_path)
+ logger.warning(
+ "Backing up %s to %s",
+ display_path(archive_path),
+ display_path(dest_file),
+ )
+ shutil.move(archive_path, dest_file)
+ elif response == "a":
+ sys.exit(-1)
+
+ if not create_archive:
+ return
+
+ zip_output = zipfile.ZipFile(
+ archive_path,
+ "w",
+ zipfile.ZIP_DEFLATED,
+ allowZip64=True,
+ )
+ with zip_output:
+ dir = os.path.normcase(os.path.abspath(self.unpacked_source_directory))
+ for dirpath, dirnames, filenames in os.walk(dir):
+ for dirname in dirnames:
+ dir_arcname = self._get_archive_name(
+ dirname,
+ parentdir=dirpath,
+ rootdir=dir,
+ )
+ zipdir = zipfile.ZipInfo(dir_arcname + "/")
+ zipdir.external_attr = 0x1ED << 16 # 0o755
+ zip_output.writestr(zipdir, "")
+ for filename in filenames:
+ file_arcname = self._get_archive_name(
+ filename,
+ parentdir=dirpath,
+ rootdir=dir,
+ )
+ filename = os.path.join(dirpath, filename)
+ zip_output.write(filename, file_arcname)
+
+ logger.info("Saved %s", display_path(archive_path))
+
+ def install(
+ self,
+ install_options: List[str],
+ global_options: Optional[Sequence[str]] = None,
+ root: Optional[str] = None,
+ home: Optional[str] = None,
+ prefix: Optional[str] = None,
+ warn_script_location: bool = True,
+ use_user_site: bool = False,
+ pycompile: bool = True,
+ ) -> None:
+ scheme = get_scheme(
+ self.name,
+ user=use_user_site,
+ home=home,
+ root=root,
+ isolated=self.isolated,
+ prefix=prefix,
+ )
+
+ global_options = global_options if global_options is not None else []
+ if self.editable and not self.is_wheel:
+ install_editable_legacy(
+ install_options,
+ global_options,
+ prefix=prefix,
+ home=home,
+ use_user_site=use_user_site,
+ name=self.name,
+ setup_py_path=self.setup_py_path,
+ isolated=self.isolated,
+ build_env=self.build_env,
+ unpacked_source_directory=self.unpacked_source_directory,
+ )
+ self.install_succeeded = True
+ return
+
+ if self.is_wheel:
+ assert self.local_file_path
+ direct_url = None
+ # TODO this can be refactored to direct_url = self.download_info
+ if self.editable:
+ direct_url = direct_url_for_editable(self.unpacked_source_directory)
+ elif self.original_link:
+ direct_url = direct_url_from_link(
+ self.original_link,
+ self.source_dir,
+ self.original_link_is_in_wheel_cache,
+ )
+ install_wheel(
+ self.name,
+ self.local_file_path,
+ scheme=scheme,
+ req_description=str(self.req),
+ pycompile=pycompile,
+ warn_script_location=warn_script_location,
+ direct_url=direct_url,
+ requested=self.user_supplied,
+ )
+ self.install_succeeded = True
+ return
+
+ # TODO: Why don't we do this for editable installs?
+
+ # Extend the list of global and install options passed on to
+ # the setup.py call with the ones from the requirements file.
+ # Options specified in requirements file override those
+ # specified on the command line, since the last option given
+ # to setup.py is the one that is used.
+ global_options = list(global_options) + self.global_options
+ install_options = list(install_options) + self.install_options
+
+ try:
+ if (
+ self.legacy_install_reason is not None
+ and self.legacy_install_reason.emit_before_install
+ ):
+ self.legacy_install_reason.emit_deprecation(self.name)
+ success = install_legacy(
+ install_options=install_options,
+ global_options=global_options,
+ root=root,
+ home=home,
+ prefix=prefix,
+ use_user_site=use_user_site,
+ pycompile=pycompile,
+ scheme=scheme,
+ setup_py_path=self.setup_py_path,
+ isolated=self.isolated,
+ req_name=self.name,
+ build_env=self.build_env,
+ unpacked_source_directory=self.unpacked_source_directory,
+ req_description=str(self.req),
+ )
+ except LegacyInstallFailure as exc:
+ self.install_succeeded = False
+ raise exc
+ except Exception:
+ self.install_succeeded = True
+ raise
+
+ self.install_succeeded = success
+
+ if (
+ success
+ and self.legacy_install_reason is not None
+ and self.legacy_install_reason.emit_after_success
+ ):
+ self.legacy_install_reason.emit_deprecation(self.name)
+
+
+def check_invalid_constraint_type(req: InstallRequirement) -> str:
+
+ # Check for unsupported forms
+ problem = ""
+ if not req.name:
+ problem = "Unnamed requirements are not allowed as constraints"
+ elif req.editable:
+ problem = "Editable requirements are not allowed as constraints"
+ elif req.extras:
+ problem = "Constraints cannot have extras"
+
+ if problem:
+ deprecated(
+ reason=(
+ "Constraints are only allowed to take the form of a package "
+ "name and a version specifier. Other forms were originally "
+ "permitted as an accident of the implementation, but were "
+ "undocumented. The new implementation of the resolver no "
+ "longer supports these forms."
+ ),
+ replacement="replacing the constraint with a requirement",
+ # No plan yet for when the new resolver becomes default
+ gone_in=None,
+ issue=8210,
+ )
+
+ return problem
+
+
+def _has_option(options: Values, reqs: List[InstallRequirement], option: str) -> bool:
+ if getattr(options, option, None):
+ return True
+ for req in reqs:
+ if getattr(req, option, None):
+ return True
+ return False
+
+
+def _install_option_ignored(
+ install_options: List[str], reqs: List[InstallRequirement]
+) -> bool:
+ for req in reqs:
+ if (install_options or req.install_options) and not req.use_pep517:
+ return False
+ return True
+
+
+class LegacySetupPyOptionsCheckMode(Enum):
+ INSTALL = 1
+ WHEEL = 2
+ DOWNLOAD = 3
+
+
+def check_legacy_setup_py_options(
+ options: Values,
+ reqs: List[InstallRequirement],
+ mode: LegacySetupPyOptionsCheckMode,
+) -> None:
+ has_install_options = _has_option(options, reqs, "install_options")
+ has_build_options = _has_option(options, reqs, "build_options")
+ has_global_options = _has_option(options, reqs, "global_options")
+ legacy_setup_py_options_present = (
+ has_install_options or has_build_options or has_global_options
+ )
+ if not legacy_setup_py_options_present:
+ return
+
+ options.format_control.disallow_binaries()
+ logger.warning(
+ "Implying --no-binary=:all: due to the presence of "
+ "--build-option / --global-option / --install-option. "
+ "Consider using --config-settings for more flexibility.",
+ )
+ if mode == LegacySetupPyOptionsCheckMode.INSTALL and has_install_options:
+ if _install_option_ignored(options.install_options, reqs):
+ logger.warning(
+ "Ignoring --install-option when building using PEP 517",
+ )
+ else:
+ deprecated(
+ reason=(
+ "--install-option is deprecated because "
+ "it forces pip to use the 'setup.py install' "
+ "command which is itself deprecated."
+ ),
+ issue=11358,
+ replacement="to use --config-settings",
+ gone_in="23.1",
+ )
diff --git a/third_party/python/pip/pip/_internal/req/req_set.py b/third_party/python/pip/pip/_internal/req/req_set.py
new file mode 100644
index 0000000000..ec7a6e07a2
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/req/req_set.py
@@ -0,0 +1,82 @@
+import logging
+from collections import OrderedDict
+from typing import Dict, List
+
+from pip._vendor.packaging.utils import canonicalize_name
+
+from pip._internal.req.req_install import InstallRequirement
+
+logger = logging.getLogger(__name__)
+
+
+class RequirementSet:
+ def __init__(self, check_supported_wheels: bool = True) -> None:
+ """Create a RequirementSet."""
+
+ self.requirements: Dict[str, InstallRequirement] = OrderedDict()
+ self.check_supported_wheels = check_supported_wheels
+
+ self.unnamed_requirements: List[InstallRequirement] = []
+
+ def __str__(self) -> str:
+ requirements = sorted(
+ (req for req in self.requirements.values() if not req.comes_from),
+ key=lambda req: canonicalize_name(req.name or ""),
+ )
+ return " ".join(str(req.req) for req in requirements)
+
+ def __repr__(self) -> str:
+ requirements = sorted(
+ self.requirements.values(),
+ key=lambda req: canonicalize_name(req.name or ""),
+ )
+
+ format_string = "<{classname} object; {count} requirement(s): {reqs}>"
+ return format_string.format(
+ classname=self.__class__.__name__,
+ count=len(requirements),
+ reqs=", ".join(str(req.req) for req in requirements),
+ )
+
+ def add_unnamed_requirement(self, install_req: InstallRequirement) -> None:
+ assert not install_req.name
+ self.unnamed_requirements.append(install_req)
+
+ def add_named_requirement(self, install_req: InstallRequirement) -> None:
+ assert install_req.name
+
+ project_name = canonicalize_name(install_req.name)
+ self.requirements[project_name] = install_req
+
+ def has_requirement(self, name: str) -> bool:
+ project_name = canonicalize_name(name)
+
+ return (
+ project_name in self.requirements
+ and not self.requirements[project_name].constraint
+ )
+
+ def get_requirement(self, name: str) -> InstallRequirement:
+ project_name = canonicalize_name(name)
+
+ if project_name in self.requirements:
+ return self.requirements[project_name]
+
+ raise KeyError(f"No project with the name {name!r}")
+
+ @property
+ def all_requirements(self) -> List[InstallRequirement]:
+ return self.unnamed_requirements + list(self.requirements.values())
+
+ @property
+ def requirements_to_install(self) -> List[InstallRequirement]:
+ """Return the list of requirements that need to be installed.
+
+ TODO remove this property together with the legacy resolver, since the new
+ resolver only returns requirements that need to be installed.
+ """
+ return [
+ install_req
+ for install_req in self.all_requirements
+ if not install_req.constraint and not install_req.satisfied_by
+ ]
diff --git a/third_party/python/pip/pip/_internal/req/req_uninstall.py b/third_party/python/pip/pip/_internal/req/req_uninstall.py
new file mode 100644
index 0000000000..15b67385c8
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/req/req_uninstall.py
@@ -0,0 +1,640 @@
+import functools
+import os
+import sys
+import sysconfig
+from importlib.util import cache_from_source
+from typing import Any, Callable, Dict, Generator, Iterable, List, Optional, Set, Tuple
+
+from pip._internal.exceptions import UninstallationError
+from pip._internal.locations import get_bin_prefix, get_bin_user
+from pip._internal.metadata import BaseDistribution
+from pip._internal.utils.compat import WINDOWS
+from pip._internal.utils.egg_link import egg_link_path_from_location
+from pip._internal.utils.logging import getLogger, indent_log
+from pip._internal.utils.misc import ask, is_local, normalize_path, renames, rmtree
+from pip._internal.utils.temp_dir import AdjacentTempDirectory, TempDirectory
+
+logger = getLogger(__name__)
+
+
+def _script_names(
+ bin_dir: str, script_name: str, is_gui: bool
+) -> Generator[str, None, None]:
+ """Create the fully qualified name of the files created by
+ {console,gui}_scripts for the given ``dist``.
+ Returns the list of file names
+ """
+ exe_name = os.path.join(bin_dir, script_name)
+ yield exe_name
+ if not WINDOWS:
+ return
+ yield f"{exe_name}.exe"
+ yield f"{exe_name}.exe.manifest"
+ if is_gui:
+ yield f"{exe_name}-script.pyw"
+ else:
+ yield f"{exe_name}-script.py"
+
+
+def _unique(
+ fn: Callable[..., Generator[Any, None, None]]
+) -> Callable[..., Generator[Any, None, None]]:
+ @functools.wraps(fn)
+ def unique(*args: Any, **kw: Any) -> Generator[Any, None, None]:
+ seen: Set[Any] = set()
+ for item in fn(*args, **kw):
+ if item not in seen:
+ seen.add(item)
+ yield item
+
+ return unique
+
+
+@_unique
+def uninstallation_paths(dist: BaseDistribution) -> Generator[str, None, None]:
+ """
+ Yield all the uninstallation paths for dist based on RECORD-without-.py[co]
+
+ Yield paths to all the files in RECORD. For each .py file in RECORD, add
+ the .pyc and .pyo in the same directory.
+
+ UninstallPathSet.add() takes care of the __pycache__ .py[co].
+
+ If RECORD is not found, raises UninstallationError,
+ with possible information from the INSTALLER file.
+
+ https://packaging.python.org/specifications/recording-installed-packages/
+ """
+ location = dist.location
+ assert location is not None, "not installed"
+
+ entries = dist.iter_declared_entries()
+ if entries is None:
+ msg = "Cannot uninstall {dist}, RECORD file not found.".format(dist=dist)
+ installer = dist.installer
+ if not installer or installer == "pip":
+ dep = "{}=={}".format(dist.raw_name, dist.version)
+ msg += (
+ " You might be able to recover from this via: "
+ "'pip install --force-reinstall --no-deps {}'.".format(dep)
+ )
+ else:
+ msg += " Hint: The package was installed by {}.".format(installer)
+ raise UninstallationError(msg)
+
+ for entry in entries:
+ path = os.path.join(location, entry)
+ yield path
+ if path.endswith(".py"):
+ dn, fn = os.path.split(path)
+ base = fn[:-3]
+ path = os.path.join(dn, base + ".pyc")
+ yield path
+ path = os.path.join(dn, base + ".pyo")
+ yield path
+
+
+def compact(paths: Iterable[str]) -> Set[str]:
+ """Compact a path set to contain the minimal number of paths
+ necessary to contain all paths in the set. If /a/path/ and
+ /a/path/to/a/file.txt are both in the set, leave only the
+ shorter path."""
+
+ sep = os.path.sep
+ short_paths: Set[str] = set()
+ for path in sorted(paths, key=len):
+ should_skip = any(
+ path.startswith(shortpath.rstrip("*"))
+ and path[len(shortpath.rstrip("*").rstrip(sep))] == sep
+ for shortpath in short_paths
+ )
+ if not should_skip:
+ short_paths.add(path)
+ return short_paths
+
+
+def compress_for_rename(paths: Iterable[str]) -> Set[str]:
+ """Returns a set containing the paths that need to be renamed.
+
+ This set may include directories when the original sequence of paths
+ included every file on disk.
+ """
+ case_map = {os.path.normcase(p): p for p in paths}
+ remaining = set(case_map)
+ unchecked = sorted({os.path.split(p)[0] for p in case_map.values()}, key=len)
+ wildcards: Set[str] = set()
+
+ def norm_join(*a: str) -> str:
+ return os.path.normcase(os.path.join(*a))
+
+ for root in unchecked:
+ if any(os.path.normcase(root).startswith(w) for w in wildcards):
+ # This directory has already been handled.
+ continue
+
+ all_files: Set[str] = set()
+ all_subdirs: Set[str] = set()
+ for dirname, subdirs, files in os.walk(root):
+ all_subdirs.update(norm_join(root, dirname, d) for d in subdirs)
+ all_files.update(norm_join(root, dirname, f) for f in files)
+ # If all the files we found are in our remaining set of files to
+ # remove, then remove them from the latter set and add a wildcard
+ # for the directory.
+ if not (all_files - remaining):
+ remaining.difference_update(all_files)
+ wildcards.add(root + os.sep)
+
+ return set(map(case_map.__getitem__, remaining)) | wildcards
+
+
+def compress_for_output_listing(paths: Iterable[str]) -> Tuple[Set[str], Set[str]]:
+ """Returns a tuple of 2 sets of which paths to display to user
+
+ The first set contains paths that would be deleted. Files of a package
+ are not added and the top-level directory of the package has a '*' added
+ at the end - to signify that all it's contents are removed.
+
+ The second set contains files that would have been skipped in the above
+ folders.
+ """
+
+ will_remove = set(paths)
+ will_skip = set()
+
+ # Determine folders and files
+ folders = set()
+ files = set()
+ for path in will_remove:
+ if path.endswith(".pyc"):
+ continue
+ if path.endswith("__init__.py") or ".dist-info" in path:
+ folders.add(os.path.dirname(path))
+ files.add(path)
+
+ # probably this one https://github.com/python/mypy/issues/390
+ _normcased_files = set(map(os.path.normcase, files)) # type: ignore
+
+ folders = compact(folders)
+
+ # This walks the tree using os.walk to not miss extra folders
+ # that might get added.
+ for folder in folders:
+ for dirpath, _, dirfiles in os.walk(folder):
+ for fname in dirfiles:
+ if fname.endswith(".pyc"):
+ continue
+
+ file_ = os.path.join(dirpath, fname)
+ if (
+ os.path.isfile(file_)
+ and os.path.normcase(file_) not in _normcased_files
+ ):
+ # We are skipping this file. Add it to the set.
+ will_skip.add(file_)
+
+ will_remove = files | {os.path.join(folder, "*") for folder in folders}
+
+ return will_remove, will_skip
+
+
+class StashedUninstallPathSet:
+ """A set of file rename operations to stash files while
+ tentatively uninstalling them."""
+
+ def __init__(self) -> None:
+ # Mapping from source file root to [Adjacent]TempDirectory
+ # for files under that directory.
+ self._save_dirs: Dict[str, TempDirectory] = {}
+ # (old path, new path) tuples for each move that may need
+ # to be undone.
+ self._moves: List[Tuple[str, str]] = []
+
+ def _get_directory_stash(self, path: str) -> str:
+ """Stashes a directory.
+
+ Directories are stashed adjacent to their original location if
+ possible, or else moved/copied into the user's temp dir."""
+
+ try:
+ save_dir: TempDirectory = AdjacentTempDirectory(path)
+ except OSError:
+ save_dir = TempDirectory(kind="uninstall")
+ self._save_dirs[os.path.normcase(path)] = save_dir
+
+ return save_dir.path
+
+ def _get_file_stash(self, path: str) -> str:
+ """Stashes a file.
+
+ If no root has been provided, one will be created for the directory
+ in the user's temp directory."""
+ path = os.path.normcase(path)
+ head, old_head = os.path.dirname(path), None
+ save_dir = None
+
+ while head != old_head:
+ try:
+ save_dir = self._save_dirs[head]
+ break
+ except KeyError:
+ pass
+ head, old_head = os.path.dirname(head), head
+ else:
+ # Did not find any suitable root
+ head = os.path.dirname(path)
+ save_dir = TempDirectory(kind="uninstall")
+ self._save_dirs[head] = save_dir
+
+ relpath = os.path.relpath(path, head)
+ if relpath and relpath != os.path.curdir:
+ return os.path.join(save_dir.path, relpath)
+ return save_dir.path
+
+ def stash(self, path: str) -> str:
+ """Stashes the directory or file and returns its new location.
+ Handle symlinks as files to avoid modifying the symlink targets.
+ """
+ path_is_dir = os.path.isdir(path) and not os.path.islink(path)
+ if path_is_dir:
+ new_path = self._get_directory_stash(path)
+ else:
+ new_path = self._get_file_stash(path)
+
+ self._moves.append((path, new_path))
+ if path_is_dir and os.path.isdir(new_path):
+ # If we're moving a directory, we need to
+ # remove the destination first or else it will be
+ # moved to inside the existing directory.
+ # We just created new_path ourselves, so it will
+ # be removable.
+ os.rmdir(new_path)
+ renames(path, new_path)
+ return new_path
+
+ def commit(self) -> None:
+ """Commits the uninstall by removing stashed files."""
+ for _, save_dir in self._save_dirs.items():
+ save_dir.cleanup()
+ self._moves = []
+ self._save_dirs = {}
+
+ def rollback(self) -> None:
+ """Undoes the uninstall by moving stashed files back."""
+ for p in self._moves:
+ logger.info("Moving to %s\n from %s", *p)
+
+ for new_path, path in self._moves:
+ try:
+ logger.debug("Replacing %s from %s", new_path, path)
+ if os.path.isfile(new_path) or os.path.islink(new_path):
+ os.unlink(new_path)
+ elif os.path.isdir(new_path):
+ rmtree(new_path)
+ renames(path, new_path)
+ except OSError as ex:
+ logger.error("Failed to restore %s", new_path)
+ logger.debug("Exception: %s", ex)
+
+ self.commit()
+
+ @property
+ def can_rollback(self) -> bool:
+ return bool(self._moves)
+
+
+class UninstallPathSet:
+ """A set of file paths to be removed in the uninstallation of a
+ requirement."""
+
+ def __init__(self, dist: BaseDistribution) -> None:
+ self._paths: Set[str] = set()
+ self._refuse: Set[str] = set()
+ self._pth: Dict[str, UninstallPthEntries] = {}
+ self._dist = dist
+ self._moved_paths = StashedUninstallPathSet()
+
+ def _permitted(self, path: str) -> bool:
+ """
+ Return True if the given path is one we are permitted to
+ remove/modify, False otherwise.
+
+ """
+ return is_local(path)
+
+ def add(self, path: str) -> None:
+ head, tail = os.path.split(path)
+
+ # we normalize the head to resolve parent directory symlinks, but not
+ # the tail, since we only want to uninstall symlinks, not their targets
+ path = os.path.join(normalize_path(head), os.path.normcase(tail))
+
+ if not os.path.exists(path):
+ return
+ if self._permitted(path):
+ self._paths.add(path)
+ else:
+ self._refuse.add(path)
+
+ # __pycache__ files can show up after 'installed-files.txt' is created,
+ # due to imports
+ if os.path.splitext(path)[1] == ".py":
+ self.add(cache_from_source(path))
+
+ def add_pth(self, pth_file: str, entry: str) -> None:
+ pth_file = normalize_path(pth_file)
+ if self._permitted(pth_file):
+ if pth_file not in self._pth:
+ self._pth[pth_file] = UninstallPthEntries(pth_file)
+ self._pth[pth_file].add(entry)
+ else:
+ self._refuse.add(pth_file)
+
+ def remove(self, auto_confirm: bool = False, verbose: bool = False) -> None:
+ """Remove paths in ``self._paths`` with confirmation (unless
+ ``auto_confirm`` is True)."""
+
+ if not self._paths:
+ logger.info(
+ "Can't uninstall '%s'. No files were found to uninstall.",
+ self._dist.raw_name,
+ )
+ return
+
+ dist_name_version = f"{self._dist.raw_name}-{self._dist.version}"
+ logger.info("Uninstalling %s:", dist_name_version)
+
+ with indent_log():
+ if auto_confirm or self._allowed_to_proceed(verbose):
+ moved = self._moved_paths
+
+ for_rename = compress_for_rename(self._paths)
+
+ for path in sorted(compact(for_rename)):
+ moved.stash(path)
+ logger.verbose("Removing file or directory %s", path)
+
+ for pth in self._pth.values():
+ pth.remove()
+
+ logger.info("Successfully uninstalled %s", dist_name_version)
+
+ def _allowed_to_proceed(self, verbose: bool) -> bool:
+ """Display which files would be deleted and prompt for confirmation"""
+
+ def _display(msg: str, paths: Iterable[str]) -> None:
+ if not paths:
+ return
+
+ logger.info(msg)
+ with indent_log():
+ for path in sorted(compact(paths)):
+ logger.info(path)
+
+ if not verbose:
+ will_remove, will_skip = compress_for_output_listing(self._paths)
+ else:
+ # In verbose mode, display all the files that are going to be
+ # deleted.
+ will_remove = set(self._paths)
+ will_skip = set()
+
+ _display("Would remove:", will_remove)
+ _display("Would not remove (might be manually added):", will_skip)
+ _display("Would not remove (outside of prefix):", self._refuse)
+ if verbose:
+ _display("Will actually move:", compress_for_rename(self._paths))
+
+ return ask("Proceed (Y/n)? ", ("y", "n", "")) != "n"
+
+ def rollback(self) -> None:
+ """Rollback the changes previously made by remove()."""
+ if not self._moved_paths.can_rollback:
+ logger.error(
+ "Can't roll back %s; was not uninstalled",
+ self._dist.raw_name,
+ )
+ return
+ logger.info("Rolling back uninstall of %s", self._dist.raw_name)
+ self._moved_paths.rollback()
+ for pth in self._pth.values():
+ pth.rollback()
+
+ def commit(self) -> None:
+ """Remove temporary save dir: rollback will no longer be possible."""
+ self._moved_paths.commit()
+
+ @classmethod
+ def from_dist(cls, dist: BaseDistribution) -> "UninstallPathSet":
+ dist_location = dist.location
+ info_location = dist.info_location
+ if dist_location is None:
+ logger.info(
+ "Not uninstalling %s since it is not installed",
+ dist.canonical_name,
+ )
+ return cls(dist)
+
+ normalized_dist_location = normalize_path(dist_location)
+ if not dist.local:
+ logger.info(
+ "Not uninstalling %s at %s, outside environment %s",
+ dist.canonical_name,
+ normalized_dist_location,
+ sys.prefix,
+ )
+ return cls(dist)
+
+ if normalized_dist_location in {
+ p
+ for p in {sysconfig.get_path("stdlib"), sysconfig.get_path("platstdlib")}
+ if p
+ }:
+ logger.info(
+ "Not uninstalling %s at %s, as it is in the standard library.",
+ dist.canonical_name,
+ normalized_dist_location,
+ )
+ return cls(dist)
+
+ paths_to_remove = cls(dist)
+ develop_egg_link = egg_link_path_from_location(dist.raw_name)
+
+ # Distribution is installed with metadata in a "flat" .egg-info
+ # directory. This means it is not a modern .dist-info installation, an
+ # egg, or legacy editable.
+ setuptools_flat_installation = (
+ dist.installed_with_setuptools_egg_info
+ and info_location is not None
+ and os.path.exists(info_location)
+ # If dist is editable and the location points to a ``.egg-info``,
+ # we are in fact in the legacy editable case.
+ and not info_location.endswith(f"{dist.setuptools_filename}.egg-info")
+ )
+
+ # Uninstall cases order do matter as in the case of 2 installs of the
+ # same package, pip needs to uninstall the currently detected version
+ if setuptools_flat_installation:
+ if info_location is not None:
+ paths_to_remove.add(info_location)
+ installed_files = dist.iter_declared_entries()
+ if installed_files is not None:
+ for installed_file in installed_files:
+ paths_to_remove.add(os.path.join(dist_location, installed_file))
+ # FIXME: need a test for this elif block
+ # occurs with --single-version-externally-managed/--record outside
+ # of pip
+ elif dist.is_file("top_level.txt"):
+ try:
+ namespace_packages = dist.read_text("namespace_packages.txt")
+ except FileNotFoundError:
+ namespaces = []
+ else:
+ namespaces = namespace_packages.splitlines(keepends=False)
+ for top_level_pkg in [
+ p
+ for p in dist.read_text("top_level.txt").splitlines()
+ if p and p not in namespaces
+ ]:
+ path = os.path.join(dist_location, top_level_pkg)
+ paths_to_remove.add(path)
+ paths_to_remove.add(f"{path}.py")
+ paths_to_remove.add(f"{path}.pyc")
+ paths_to_remove.add(f"{path}.pyo")
+
+ elif dist.installed_by_distutils:
+ raise UninstallationError(
+ "Cannot uninstall {!r}. It is a distutils installed project "
+ "and thus we cannot accurately determine which files belong "
+ "to it which would lead to only a partial uninstall.".format(
+ dist.raw_name,
+ )
+ )
+
+ elif dist.installed_as_egg:
+ # package installed by easy_install
+ # We cannot match on dist.egg_name because it can slightly vary
+ # i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg
+ paths_to_remove.add(dist_location)
+ easy_install_egg = os.path.split(dist_location)[1]
+ easy_install_pth = os.path.join(
+ os.path.dirname(dist_location),
+ "easy-install.pth",
+ )
+ paths_to_remove.add_pth(easy_install_pth, "./" + easy_install_egg)
+
+ elif dist.installed_with_dist_info:
+ for path in uninstallation_paths(dist):
+ paths_to_remove.add(path)
+
+ elif develop_egg_link:
+ # PEP 660 modern editable is handled in the ``.dist-info`` case
+ # above, so this only covers the setuptools-style editable.
+ with open(develop_egg_link) as fh:
+ link_pointer = os.path.normcase(fh.readline().strip())
+ normalized_link_pointer = normalize_path(link_pointer)
+ assert os.path.samefile(
+ normalized_link_pointer, normalized_dist_location
+ ), (
+ f"Egg-link {link_pointer} does not match installed location of "
+ f"{dist.raw_name} (at {dist_location})"
+ )
+ paths_to_remove.add(develop_egg_link)
+ easy_install_pth = os.path.join(
+ os.path.dirname(develop_egg_link), "easy-install.pth"
+ )
+ paths_to_remove.add_pth(easy_install_pth, dist_location)
+
+ else:
+ logger.debug(
+ "Not sure how to uninstall: %s - Check: %s",
+ dist,
+ dist_location,
+ )
+
+ if dist.in_usersite:
+ bin_dir = get_bin_user()
+ else:
+ bin_dir = get_bin_prefix()
+
+ # find distutils scripts= scripts
+ try:
+ for script in dist.iter_distutils_script_names():
+ paths_to_remove.add(os.path.join(bin_dir, script))
+ if WINDOWS:
+ paths_to_remove.add(os.path.join(bin_dir, f"{script}.bat"))
+ except (FileNotFoundError, NotADirectoryError):
+ pass
+
+ # find console_scripts and gui_scripts
+ def iter_scripts_to_remove(
+ dist: BaseDistribution,
+ bin_dir: str,
+ ) -> Generator[str, None, None]:
+ for entry_point in dist.iter_entry_points():
+ if entry_point.group == "console_scripts":
+ yield from _script_names(bin_dir, entry_point.name, False)
+ elif entry_point.group == "gui_scripts":
+ yield from _script_names(bin_dir, entry_point.name, True)
+
+ for s in iter_scripts_to_remove(dist, bin_dir):
+ paths_to_remove.add(s)
+
+ return paths_to_remove
+
+
+class UninstallPthEntries:
+ def __init__(self, pth_file: str) -> None:
+ self.file = pth_file
+ self.entries: Set[str] = set()
+ self._saved_lines: Optional[List[bytes]] = None
+
+ def add(self, entry: str) -> None:
+ entry = os.path.normcase(entry)
+ # On Windows, os.path.normcase converts the entry to use
+ # backslashes. This is correct for entries that describe absolute
+ # paths outside of site-packages, but all the others use forward
+ # slashes.
+ # os.path.splitdrive is used instead of os.path.isabs because isabs
+ # treats non-absolute paths with drive letter markings like c:foo\bar
+ # as absolute paths. It also does not recognize UNC paths if they don't
+ # have more than "\\sever\share". Valid examples: "\\server\share\" or
+ # "\\server\share\folder".
+ if WINDOWS and not os.path.splitdrive(entry)[0]:
+ entry = entry.replace("\\", "/")
+ self.entries.add(entry)
+
+ def remove(self) -> None:
+ logger.verbose("Removing pth entries from %s:", self.file)
+
+ # If the file doesn't exist, log a warning and return
+ if not os.path.isfile(self.file):
+ logger.warning("Cannot remove entries from nonexistent file %s", self.file)
+ return
+ with open(self.file, "rb") as fh:
+ # windows uses '\r\n' with py3k, but uses '\n' with py2.x
+ lines = fh.readlines()
+ self._saved_lines = lines
+ if any(b"\r\n" in line for line in lines):
+ endline = "\r\n"
+ else:
+ endline = "\n"
+ # handle missing trailing newline
+ if lines and not lines[-1].endswith(endline.encode("utf-8")):
+ lines[-1] = lines[-1] + endline.encode("utf-8")
+ for entry in self.entries:
+ try:
+ logger.verbose("Removing entry: %s", entry)
+ lines.remove((entry + endline).encode("utf-8"))
+ except ValueError:
+ pass
+ with open(self.file, "wb") as fh:
+ fh.writelines(lines)
+
+ def rollback(self) -> bool:
+ if self._saved_lines is None:
+ logger.error("Cannot roll back changes to %s, none were made", self.file)
+ return False
+ logger.debug("Rolling %s back to previous state", self.file)
+ with open(self.file, "wb") as fh:
+ fh.writelines(self._saved_lines)
+ return True
diff --git a/third_party/python/pip/pip/_internal/resolution/__init__.py b/third_party/python/pip/pip/_internal/resolution/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/resolution/__init__.py
diff --git a/third_party/python/pip/pip/_internal/resolution/base.py b/third_party/python/pip/pip/_internal/resolution/base.py
new file mode 100644
index 0000000000..42dade18c1
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/resolution/base.py
@@ -0,0 +1,20 @@
+from typing import Callable, List, Optional
+
+from pip._internal.req.req_install import InstallRequirement
+from pip._internal.req.req_set import RequirementSet
+
+InstallRequirementProvider = Callable[
+ [str, Optional[InstallRequirement]], InstallRequirement
+]
+
+
+class BaseResolver:
+ def resolve(
+ self, root_reqs: List[InstallRequirement], check_supported_wheels: bool
+ ) -> RequirementSet:
+ raise NotImplementedError()
+
+ def get_installation_order(
+ self, req_set: RequirementSet
+ ) -> List[InstallRequirement]:
+ raise NotImplementedError()
diff --git a/third_party/python/pip/pip/_internal/resolution/legacy/__init__.py b/third_party/python/pip/pip/_internal/resolution/legacy/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/resolution/legacy/__init__.py
diff --git a/third_party/python/pip/pip/_internal/resolution/legacy/resolver.py b/third_party/python/pip/pip/_internal/resolution/legacy/resolver.py
new file mode 100644
index 0000000000..fb49d41695
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/resolution/legacy/resolver.py
@@ -0,0 +1,600 @@
+"""Dependency Resolution
+
+The dependency resolution in pip is performed as follows:
+
+for top-level requirements:
+ a. only one spec allowed per project, regardless of conflicts or not.
+ otherwise a "double requirement" exception is raised
+ b. they override sub-dependency requirements.
+for sub-dependencies
+ a. "first found, wins" (where the order is breadth first)
+"""
+
+# The following comment should be removed at some point in the future.
+# mypy: strict-optional=False
+
+import logging
+import sys
+from collections import defaultdict
+from itertools import chain
+from typing import DefaultDict, Iterable, List, Optional, Set, Tuple
+
+from pip._vendor.packaging import specifiers
+from pip._vendor.packaging.requirements import Requirement
+
+from pip._internal.cache import WheelCache
+from pip._internal.exceptions import (
+ BestVersionAlreadyInstalled,
+ DistributionNotFound,
+ HashError,
+ HashErrors,
+ InstallationError,
+ NoneMetadataError,
+ UnsupportedPythonVersion,
+)
+from pip._internal.index.package_finder import PackageFinder
+from pip._internal.metadata import BaseDistribution
+from pip._internal.models.link import Link
+from pip._internal.models.wheel import Wheel
+from pip._internal.operations.prepare import RequirementPreparer
+from pip._internal.req.req_install import (
+ InstallRequirement,
+ check_invalid_constraint_type,
+)
+from pip._internal.req.req_set import RequirementSet
+from pip._internal.resolution.base import BaseResolver, InstallRequirementProvider
+from pip._internal.utils import compatibility_tags
+from pip._internal.utils.compatibility_tags import get_supported
+from pip._internal.utils.direct_url_helpers import direct_url_from_link
+from pip._internal.utils.logging import indent_log
+from pip._internal.utils.misc import normalize_version_info
+from pip._internal.utils.packaging import check_requires_python
+
+logger = logging.getLogger(__name__)
+
+DiscoveredDependencies = DefaultDict[str, List[InstallRequirement]]
+
+
+def _check_dist_requires_python(
+ dist: BaseDistribution,
+ version_info: Tuple[int, int, int],
+ ignore_requires_python: bool = False,
+) -> None:
+ """
+ Check whether the given Python version is compatible with a distribution's
+ "Requires-Python" value.
+
+ :param version_info: A 3-tuple of ints representing the Python
+ major-minor-micro version to check.
+ :param ignore_requires_python: Whether to ignore the "Requires-Python"
+ value if the given Python version isn't compatible.
+
+ :raises UnsupportedPythonVersion: When the given Python version isn't
+ compatible.
+ """
+ # This idiosyncratically converts the SpecifierSet to str and let
+ # check_requires_python then parse it again into SpecifierSet. But this
+ # is the legacy resolver so I'm just not going to bother refactoring.
+ try:
+ requires_python = str(dist.requires_python)
+ except FileNotFoundError as e:
+ raise NoneMetadataError(dist, str(e))
+ try:
+ is_compatible = check_requires_python(
+ requires_python,
+ version_info=version_info,
+ )
+ except specifiers.InvalidSpecifier as exc:
+ logger.warning(
+ "Package %r has an invalid Requires-Python: %s", dist.raw_name, exc
+ )
+ return
+
+ if is_compatible:
+ return
+
+ version = ".".join(map(str, version_info))
+ if ignore_requires_python:
+ logger.debug(
+ "Ignoring failed Requires-Python check for package %r: %s not in %r",
+ dist.raw_name,
+ version,
+ requires_python,
+ )
+ return
+
+ raise UnsupportedPythonVersion(
+ "Package {!r} requires a different Python: {} not in {!r}".format(
+ dist.raw_name, version, requires_python
+ )
+ )
+
+
+class Resolver(BaseResolver):
+ """Resolves which packages need to be installed/uninstalled to perform \
+ the requested operation without breaking the requirements of any package.
+ """
+
+ _allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
+
+ def __init__(
+ self,
+ preparer: RequirementPreparer,
+ finder: PackageFinder,
+ wheel_cache: Optional[WheelCache],
+ make_install_req: InstallRequirementProvider,
+ use_user_site: bool,
+ ignore_dependencies: bool,
+ ignore_installed: bool,
+ ignore_requires_python: bool,
+ force_reinstall: bool,
+ upgrade_strategy: str,
+ py_version_info: Optional[Tuple[int, ...]] = None,
+ ) -> None:
+ super().__init__()
+ assert upgrade_strategy in self._allowed_strategies
+
+ if py_version_info is None:
+ py_version_info = sys.version_info[:3]
+ else:
+ py_version_info = normalize_version_info(py_version_info)
+
+ self._py_version_info = py_version_info
+
+ self.preparer = preparer
+ self.finder = finder
+ self.wheel_cache = wheel_cache
+
+ self.upgrade_strategy = upgrade_strategy
+ self.force_reinstall = force_reinstall
+ self.ignore_dependencies = ignore_dependencies
+ self.ignore_installed = ignore_installed
+ self.ignore_requires_python = ignore_requires_python
+ self.use_user_site = use_user_site
+ self._make_install_req = make_install_req
+
+ self._discovered_dependencies: DiscoveredDependencies = defaultdict(list)
+
+ def resolve(
+ self, root_reqs: List[InstallRequirement], check_supported_wheels: bool
+ ) -> RequirementSet:
+ """Resolve what operations need to be done
+
+ As a side-effect of this method, the packages (and their dependencies)
+ are downloaded, unpacked and prepared for installation. This
+ preparation is done by ``pip.operations.prepare``.
+
+ Once PyPI has static dependency metadata available, it would be
+ possible to move the preparation to become a step separated from
+ dependency resolution.
+ """
+ requirement_set = RequirementSet(check_supported_wheels=check_supported_wheels)
+ for req in root_reqs:
+ if req.constraint:
+ check_invalid_constraint_type(req)
+ self._add_requirement_to_set(requirement_set, req)
+
+ # Actually prepare the files, and collect any exceptions. Most hash
+ # exceptions cannot be checked ahead of time, because
+ # _populate_link() needs to be called before we can make decisions
+ # based on link type.
+ discovered_reqs: List[InstallRequirement] = []
+ hash_errors = HashErrors()
+ for req in chain(requirement_set.all_requirements, discovered_reqs):
+ try:
+ discovered_reqs.extend(self._resolve_one(requirement_set, req))
+ except HashError as exc:
+ exc.req = req
+ hash_errors.append(exc)
+
+ if hash_errors:
+ raise hash_errors
+
+ return requirement_set
+
+ def _add_requirement_to_set(
+ self,
+ requirement_set: RequirementSet,
+ install_req: InstallRequirement,
+ parent_req_name: Optional[str] = None,
+ extras_requested: Optional[Iterable[str]] = None,
+ ) -> Tuple[List[InstallRequirement], Optional[InstallRequirement]]:
+ """Add install_req as a requirement to install.
+
+ :param parent_req_name: The name of the requirement that needed this
+ added. The name is used because when multiple unnamed requirements
+ resolve to the same name, we could otherwise end up with dependency
+ links that point outside the Requirements set. parent_req must
+ already be added. Note that None implies that this is a user
+ supplied requirement, vs an inferred one.
+ :param extras_requested: an iterable of extras used to evaluate the
+ environment markers.
+ :return: Additional requirements to scan. That is either [] if
+ the requirement is not applicable, or [install_req] if the
+ requirement is applicable and has just been added.
+ """
+ # If the markers do not match, ignore this requirement.
+ if not install_req.match_markers(extras_requested):
+ logger.info(
+ "Ignoring %s: markers '%s' don't match your environment",
+ install_req.name,
+ install_req.markers,
+ )
+ return [], None
+
+ # If the wheel is not supported, raise an error.
+ # Should check this after filtering out based on environment markers to
+ # allow specifying different wheels based on the environment/OS, in a
+ # single requirements file.
+ if install_req.link and install_req.link.is_wheel:
+ wheel = Wheel(install_req.link.filename)
+ tags = compatibility_tags.get_supported()
+ if requirement_set.check_supported_wheels and not wheel.supported(tags):
+ raise InstallationError(
+ "{} is not a supported wheel on this platform.".format(
+ wheel.filename
+ )
+ )
+
+ # This next bit is really a sanity check.
+ assert (
+ not install_req.user_supplied or parent_req_name is None
+ ), "a user supplied req shouldn't have a parent"
+
+ # Unnamed requirements are scanned again and the requirement won't be
+ # added as a dependency until after scanning.
+ if not install_req.name:
+ requirement_set.add_unnamed_requirement(install_req)
+ return [install_req], None
+
+ try:
+ existing_req: Optional[
+ InstallRequirement
+ ] = requirement_set.get_requirement(install_req.name)
+ except KeyError:
+ existing_req = None
+
+ has_conflicting_requirement = (
+ parent_req_name is None
+ and existing_req
+ and not existing_req.constraint
+ and existing_req.extras == install_req.extras
+ and existing_req.req
+ and install_req.req
+ and existing_req.req.specifier != install_req.req.specifier
+ )
+ if has_conflicting_requirement:
+ raise InstallationError(
+ "Double requirement given: {} (already in {}, name={!r})".format(
+ install_req, existing_req, install_req.name
+ )
+ )
+
+ # When no existing requirement exists, add the requirement as a
+ # dependency and it will be scanned again after.
+ if not existing_req:
+ requirement_set.add_named_requirement(install_req)
+ # We'd want to rescan this requirement later
+ return [install_req], install_req
+
+ # Assume there's no need to scan, and that we've already
+ # encountered this for scanning.
+ if install_req.constraint or not existing_req.constraint:
+ return [], existing_req
+
+ does_not_satisfy_constraint = install_req.link and not (
+ existing_req.link and install_req.link.path == existing_req.link.path
+ )
+ if does_not_satisfy_constraint:
+ raise InstallationError(
+ "Could not satisfy constraints for '{}': "
+ "installation from path or url cannot be "
+ "constrained to a version".format(install_req.name)
+ )
+ # If we're now installing a constraint, mark the existing
+ # object for real installation.
+ existing_req.constraint = False
+ # If we're now installing a user supplied requirement,
+ # mark the existing object as such.
+ if install_req.user_supplied:
+ existing_req.user_supplied = True
+ existing_req.extras = tuple(
+ sorted(set(existing_req.extras) | set(install_req.extras))
+ )
+ logger.debug(
+ "Setting %s extras to: %s",
+ existing_req,
+ existing_req.extras,
+ )
+ # Return the existing requirement for addition to the parent and
+ # scanning again.
+ return [existing_req], existing_req
+
+ def _is_upgrade_allowed(self, req: InstallRequirement) -> bool:
+ if self.upgrade_strategy == "to-satisfy-only":
+ return False
+ elif self.upgrade_strategy == "eager":
+ return True
+ else:
+ assert self.upgrade_strategy == "only-if-needed"
+ return req.user_supplied or req.constraint
+
+ def _set_req_to_reinstall(self, req: InstallRequirement) -> None:
+ """
+ Set a requirement to be installed.
+ """
+ # Don't uninstall the conflict if doing a user install and the
+ # conflict is not a user install.
+ if not self.use_user_site or req.satisfied_by.in_usersite:
+ req.should_reinstall = True
+ req.satisfied_by = None
+
+ def _check_skip_installed(
+ self, req_to_install: InstallRequirement
+ ) -> Optional[str]:
+ """Check if req_to_install should be skipped.
+
+ This will check if the req is installed, and whether we should upgrade
+ or reinstall it, taking into account all the relevant user options.
+
+ After calling this req_to_install will only have satisfied_by set to
+ None if the req_to_install is to be upgraded/reinstalled etc. Any
+ other value will be a dist recording the current thing installed that
+ satisfies the requirement.
+
+ Note that for vcs urls and the like we can't assess skipping in this
+ routine - we simply identify that we need to pull the thing down,
+ then later on it is pulled down and introspected to assess upgrade/
+ reinstalls etc.
+
+ :return: A text reason for why it was skipped, or None.
+ """
+ if self.ignore_installed:
+ return None
+
+ req_to_install.check_if_exists(self.use_user_site)
+ if not req_to_install.satisfied_by:
+ return None
+
+ if self.force_reinstall:
+ self._set_req_to_reinstall(req_to_install)
+ return None
+
+ if not self._is_upgrade_allowed(req_to_install):
+ if self.upgrade_strategy == "only-if-needed":
+ return "already satisfied, skipping upgrade"
+ return "already satisfied"
+
+ # Check for the possibility of an upgrade. For link-based
+ # requirements we have to pull the tree down and inspect to assess
+ # the version #, so it's handled way down.
+ if not req_to_install.link:
+ try:
+ self.finder.find_requirement(req_to_install, upgrade=True)
+ except BestVersionAlreadyInstalled:
+ # Then the best version is installed.
+ return "already up-to-date"
+ except DistributionNotFound:
+ # No distribution found, so we squash the error. It will
+ # be raised later when we re-try later to do the install.
+ # Why don't we just raise here?
+ pass
+
+ self._set_req_to_reinstall(req_to_install)
+ return None
+
+ def _find_requirement_link(self, req: InstallRequirement) -> Optional[Link]:
+ upgrade = self._is_upgrade_allowed(req)
+ best_candidate = self.finder.find_requirement(req, upgrade)
+ if not best_candidate:
+ return None
+
+ # Log a warning per PEP 592 if necessary before returning.
+ link = best_candidate.link
+ if link.is_yanked:
+ reason = link.yanked_reason or "<none given>"
+ msg = (
+ # Mark this as a unicode string to prevent
+ # "UnicodeEncodeError: 'ascii' codec can't encode character"
+ # in Python 2 when the reason contains non-ascii characters.
+ "The candidate selected for download or install is a "
+ "yanked version: {candidate}\n"
+ "Reason for being yanked: {reason}"
+ ).format(candidate=best_candidate, reason=reason)
+ logger.warning(msg)
+
+ return link
+
+ def _populate_link(self, req: InstallRequirement) -> None:
+ """Ensure that if a link can be found for this, that it is found.
+
+ Note that req.link may still be None - if the requirement is already
+ installed and not needed to be upgraded based on the return value of
+ _is_upgrade_allowed().
+
+ If preparer.require_hashes is True, don't use the wheel cache, because
+ cached wheels, always built locally, have different hashes than the
+ files downloaded from the index server and thus throw false hash
+ mismatches. Furthermore, cached wheels at present have undeterministic
+ contents due to file modification times.
+ """
+ if req.link is None:
+ req.link = self._find_requirement_link(req)
+
+ if self.wheel_cache is None or self.preparer.require_hashes:
+ return
+ cache_entry = self.wheel_cache.get_cache_entry(
+ link=req.link,
+ package_name=req.name,
+ supported_tags=get_supported(),
+ )
+ if cache_entry is not None:
+ logger.debug("Using cached wheel link: %s", cache_entry.link)
+ if req.link is req.original_link and cache_entry.persistent:
+ req.original_link_is_in_wheel_cache = True
+ if cache_entry.origin is not None:
+ req.download_info = cache_entry.origin
+ else:
+ # Legacy cache entry that does not have origin.json.
+ # download_info may miss the archive_info.hash field.
+ req.download_info = direct_url_from_link(
+ req.link, link_is_in_wheel_cache=cache_entry.persistent
+ )
+ req.link = cache_entry.link
+
+ def _get_dist_for(self, req: InstallRequirement) -> BaseDistribution:
+ """Takes a InstallRequirement and returns a single AbstractDist \
+ representing a prepared variant of the same.
+ """
+ if req.editable:
+ return self.preparer.prepare_editable_requirement(req)
+
+ # satisfied_by is only evaluated by calling _check_skip_installed,
+ # so it must be None here.
+ assert req.satisfied_by is None
+ skip_reason = self._check_skip_installed(req)
+
+ if req.satisfied_by:
+ return self.preparer.prepare_installed_requirement(req, skip_reason)
+
+ # We eagerly populate the link, since that's our "legacy" behavior.
+ self._populate_link(req)
+ dist = self.preparer.prepare_linked_requirement(req)
+
+ # NOTE
+ # The following portion is for determining if a certain package is
+ # going to be re-installed/upgraded or not and reporting to the user.
+ # This should probably get cleaned up in a future refactor.
+
+ # req.req is only avail after unpack for URL
+ # pkgs repeat check_if_exists to uninstall-on-upgrade
+ # (#14)
+ if not self.ignore_installed:
+ req.check_if_exists(self.use_user_site)
+
+ if req.satisfied_by:
+ should_modify = (
+ self.upgrade_strategy != "to-satisfy-only"
+ or self.force_reinstall
+ or self.ignore_installed
+ or req.link.scheme == "file"
+ )
+ if should_modify:
+ self._set_req_to_reinstall(req)
+ else:
+ logger.info(
+ "Requirement already satisfied (use --upgrade to upgrade): %s",
+ req,
+ )
+ return dist
+
+ def _resolve_one(
+ self,
+ requirement_set: RequirementSet,
+ req_to_install: InstallRequirement,
+ ) -> List[InstallRequirement]:
+ """Prepare a single requirements file.
+
+ :return: A list of additional InstallRequirements to also install.
+ """
+ # Tell user what we are doing for this requirement:
+ # obtain (editable), skipping, processing (local url), collecting
+ # (remote url or package name)
+ if req_to_install.constraint or req_to_install.prepared:
+ return []
+
+ req_to_install.prepared = True
+
+ # Parse and return dependencies
+ dist = self._get_dist_for(req_to_install)
+ # This will raise UnsupportedPythonVersion if the given Python
+ # version isn't compatible with the distribution's Requires-Python.
+ _check_dist_requires_python(
+ dist,
+ version_info=self._py_version_info,
+ ignore_requires_python=self.ignore_requires_python,
+ )
+
+ more_reqs: List[InstallRequirement] = []
+
+ def add_req(subreq: Requirement, extras_requested: Iterable[str]) -> None:
+ # This idiosyncratically converts the Requirement to str and let
+ # make_install_req then parse it again into Requirement. But this is
+ # the legacy resolver so I'm just not going to bother refactoring.
+ sub_install_req = self._make_install_req(str(subreq), req_to_install)
+ parent_req_name = req_to_install.name
+ to_scan_again, add_to_parent = self._add_requirement_to_set(
+ requirement_set,
+ sub_install_req,
+ parent_req_name=parent_req_name,
+ extras_requested=extras_requested,
+ )
+ if parent_req_name and add_to_parent:
+ self._discovered_dependencies[parent_req_name].append(add_to_parent)
+ more_reqs.extend(to_scan_again)
+
+ with indent_log():
+ # We add req_to_install before its dependencies, so that we
+ # can refer to it when adding dependencies.
+ if not requirement_set.has_requirement(req_to_install.name):
+ # 'unnamed' requirements will get added here
+ # 'unnamed' requirements can only come from being directly
+ # provided by the user.
+ assert req_to_install.user_supplied
+ self._add_requirement_to_set(
+ requirement_set, req_to_install, parent_req_name=None
+ )
+
+ if not self.ignore_dependencies:
+ if req_to_install.extras:
+ logger.debug(
+ "Installing extra requirements: %r",
+ ",".join(req_to_install.extras),
+ )
+ missing_requested = sorted(
+ set(req_to_install.extras) - set(dist.iter_provided_extras())
+ )
+ for missing in missing_requested:
+ logger.warning(
+ "%s %s does not provide the extra '%s'",
+ dist.raw_name,
+ dist.version,
+ missing,
+ )
+
+ available_requested = sorted(
+ set(dist.iter_provided_extras()) & set(req_to_install.extras)
+ )
+ for subreq in dist.iter_dependencies(available_requested):
+ add_req(subreq, extras_requested=available_requested)
+
+ return more_reqs
+
+ def get_installation_order(
+ self, req_set: RequirementSet
+ ) -> List[InstallRequirement]:
+ """Create the installation order.
+
+ The installation order is topological - requirements are installed
+ before the requiring thing. We break cycles at an arbitrary point,
+ and make no other guarantees.
+ """
+ # The current implementation, which we may change at any point
+ # installs the user specified things in the order given, except when
+ # dependencies must come earlier to achieve topological order.
+ order = []
+ ordered_reqs: Set[InstallRequirement] = set()
+
+ def schedule(req: InstallRequirement) -> None:
+ if req.satisfied_by or req in ordered_reqs:
+ return
+ if req.constraint:
+ return
+ ordered_reqs.add(req)
+ for dep in self._discovered_dependencies[req.name]:
+ schedule(dep)
+ order.append(req)
+
+ for install_req in req_set.requirements.values():
+ schedule(install_req)
+ return order
diff --git a/third_party/python/pip/pip/_internal/resolution/resolvelib/__init__.py b/third_party/python/pip/pip/_internal/resolution/resolvelib/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/resolution/resolvelib/__init__.py
diff --git a/third_party/python/pip/pip/_internal/resolution/resolvelib/base.py b/third_party/python/pip/pip/_internal/resolution/resolvelib/base.py
new file mode 100644
index 0000000000..b206692a0a
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/resolution/resolvelib/base.py
@@ -0,0 +1,141 @@
+from typing import FrozenSet, Iterable, Optional, Tuple, Union
+
+from pip._vendor.packaging.specifiers import SpecifierSet
+from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
+from pip._vendor.packaging.version import LegacyVersion, Version
+
+from pip._internal.models.link import Link, links_equivalent
+from pip._internal.req.req_install import InstallRequirement
+from pip._internal.utils.hashes import Hashes
+
+CandidateLookup = Tuple[Optional["Candidate"], Optional[InstallRequirement]]
+CandidateVersion = Union[LegacyVersion, Version]
+
+
+def format_name(project: str, extras: FrozenSet[str]) -> str:
+ if not extras:
+ return project
+ canonical_extras = sorted(canonicalize_name(e) for e in extras)
+ return "{}[{}]".format(project, ",".join(canonical_extras))
+
+
+class Constraint:
+ def __init__(
+ self, specifier: SpecifierSet, hashes: Hashes, links: FrozenSet[Link]
+ ) -> None:
+ self.specifier = specifier
+ self.hashes = hashes
+ self.links = links
+
+ @classmethod
+ def empty(cls) -> "Constraint":
+ return Constraint(SpecifierSet(), Hashes(), frozenset())
+
+ @classmethod
+ def from_ireq(cls, ireq: InstallRequirement) -> "Constraint":
+ links = frozenset([ireq.link]) if ireq.link else frozenset()
+ return Constraint(ireq.specifier, ireq.hashes(trust_internet=False), links)
+
+ def __bool__(self) -> bool:
+ return bool(self.specifier) or bool(self.hashes) or bool(self.links)
+
+ def __and__(self, other: InstallRequirement) -> "Constraint":
+ if not isinstance(other, InstallRequirement):
+ return NotImplemented
+ specifier = self.specifier & other.specifier
+ hashes = self.hashes & other.hashes(trust_internet=False)
+ links = self.links
+ if other.link:
+ links = links.union([other.link])
+ return Constraint(specifier, hashes, links)
+
+ def is_satisfied_by(self, candidate: "Candidate") -> bool:
+ # Reject if there are any mismatched URL constraints on this package.
+ if self.links and not all(_match_link(link, candidate) for link in self.links):
+ return False
+ # We can safely always allow prereleases here since PackageFinder
+ # already implements the prerelease logic, and would have filtered out
+ # prerelease candidates if the user does not expect them.
+ return self.specifier.contains(candidate.version, prereleases=True)
+
+
+class Requirement:
+ @property
+ def project_name(self) -> NormalizedName:
+ """The "project name" of a requirement.
+
+ This is different from ``name`` if this requirement contains extras,
+ in which case ``name`` would contain the ``[...]`` part, while this
+ refers to the name of the project.
+ """
+ raise NotImplementedError("Subclass should override")
+
+ @property
+ def name(self) -> str:
+ """The name identifying this requirement in the resolver.
+
+ This is different from ``project_name`` if this requirement contains
+ extras, where ``project_name`` would not contain the ``[...]`` part.
+ """
+ raise NotImplementedError("Subclass should override")
+
+ def is_satisfied_by(self, candidate: "Candidate") -> bool:
+ return False
+
+ def get_candidate_lookup(self) -> CandidateLookup:
+ raise NotImplementedError("Subclass should override")
+
+ def format_for_error(self) -> str:
+ raise NotImplementedError("Subclass should override")
+
+
+def _match_link(link: Link, candidate: "Candidate") -> bool:
+ if candidate.source_link:
+ return links_equivalent(link, candidate.source_link)
+ return False
+
+
+class Candidate:
+ @property
+ def project_name(self) -> NormalizedName:
+ """The "project name" of the candidate.
+
+ This is different from ``name`` if this candidate contains extras,
+ in which case ``name`` would contain the ``[...]`` part, while this
+ refers to the name of the project.
+ """
+ raise NotImplementedError("Override in subclass")
+
+ @property
+ def name(self) -> str:
+ """The name identifying this candidate in the resolver.
+
+ This is different from ``project_name`` if this candidate contains
+ extras, where ``project_name`` would not contain the ``[...]`` part.
+ """
+ raise NotImplementedError("Override in subclass")
+
+ @property
+ def version(self) -> CandidateVersion:
+ raise NotImplementedError("Override in subclass")
+
+ @property
+ def is_installed(self) -> bool:
+ raise NotImplementedError("Override in subclass")
+
+ @property
+ def is_editable(self) -> bool:
+ raise NotImplementedError("Override in subclass")
+
+ @property
+ def source_link(self) -> Optional[Link]:
+ raise NotImplementedError("Override in subclass")
+
+ def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:
+ raise NotImplementedError("Override in subclass")
+
+ def get_install_requirement(self) -> Optional[InstallRequirement]:
+ raise NotImplementedError("Override in subclass")
+
+ def format_for_error(self) -> str:
+ raise NotImplementedError("Subclass should override")
diff --git a/third_party/python/pip/pip/_internal/resolution/resolvelib/candidates.py b/third_party/python/pip/pip/_internal/resolution/resolvelib/candidates.py
new file mode 100644
index 0000000000..f5bc343b91
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/resolution/resolvelib/candidates.py
@@ -0,0 +1,556 @@
+import logging
+import sys
+from typing import TYPE_CHECKING, Any, FrozenSet, Iterable, Optional, Tuple, Union, cast
+
+from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
+from pip._vendor.packaging.version import Version
+
+from pip._internal.exceptions import (
+ HashError,
+ InstallationSubprocessError,
+ MetadataInconsistent,
+)
+from pip._internal.metadata import BaseDistribution
+from pip._internal.models.link import Link, links_equivalent
+from pip._internal.models.wheel import Wheel
+from pip._internal.req.constructors import (
+ install_req_from_editable,
+ install_req_from_line,
+)
+from pip._internal.req.req_install import InstallRequirement
+from pip._internal.utils.direct_url_helpers import direct_url_from_link
+from pip._internal.utils.misc import normalize_version_info
+
+from .base import Candidate, CandidateVersion, Requirement, format_name
+
+if TYPE_CHECKING:
+ from .factory import Factory
+
+logger = logging.getLogger(__name__)
+
+BaseCandidate = Union[
+ "AlreadyInstalledCandidate",
+ "EditableCandidate",
+ "LinkCandidate",
+]
+
+# Avoid conflicting with the PyPI package "Python".
+REQUIRES_PYTHON_IDENTIFIER = cast(NormalizedName, "<Python from Requires-Python>")
+
+
+def as_base_candidate(candidate: Candidate) -> Optional[BaseCandidate]:
+ """The runtime version of BaseCandidate."""
+ base_candidate_classes = (
+ AlreadyInstalledCandidate,
+ EditableCandidate,
+ LinkCandidate,
+ )
+ if isinstance(candidate, base_candidate_classes):
+ return candidate
+ return None
+
+
+def make_install_req_from_link(
+ link: Link, template: InstallRequirement
+) -> InstallRequirement:
+ assert not template.editable, "template is editable"
+ if template.req:
+ line = str(template.req)
+ else:
+ line = link.url
+ ireq = install_req_from_line(
+ line,
+ user_supplied=template.user_supplied,
+ comes_from=template.comes_from,
+ use_pep517=template.use_pep517,
+ isolated=template.isolated,
+ constraint=template.constraint,
+ options=dict(
+ install_options=template.install_options,
+ global_options=template.global_options,
+ hashes=template.hash_options,
+ ),
+ config_settings=template.config_settings,
+ )
+ ireq.original_link = template.original_link
+ ireq.link = link
+ return ireq
+
+
+def make_install_req_from_editable(
+ link: Link, template: InstallRequirement
+) -> InstallRequirement:
+ assert template.editable, "template not editable"
+ return install_req_from_editable(
+ link.url,
+ user_supplied=template.user_supplied,
+ comes_from=template.comes_from,
+ use_pep517=template.use_pep517,
+ isolated=template.isolated,
+ constraint=template.constraint,
+ permit_editable_wheels=template.permit_editable_wheels,
+ options=dict(
+ install_options=template.install_options,
+ global_options=template.global_options,
+ hashes=template.hash_options,
+ ),
+ config_settings=template.config_settings,
+ )
+
+
+def _make_install_req_from_dist(
+ dist: BaseDistribution, template: InstallRequirement
+) -> InstallRequirement:
+ if template.req:
+ line = str(template.req)
+ elif template.link:
+ line = f"{dist.canonical_name} @ {template.link.url}"
+ else:
+ line = f"{dist.canonical_name}=={dist.version}"
+ ireq = install_req_from_line(
+ line,
+ user_supplied=template.user_supplied,
+ comes_from=template.comes_from,
+ use_pep517=template.use_pep517,
+ isolated=template.isolated,
+ constraint=template.constraint,
+ options=dict(
+ install_options=template.install_options,
+ global_options=template.global_options,
+ hashes=template.hash_options,
+ ),
+ config_settings=template.config_settings,
+ )
+ ireq.satisfied_by = dist
+ return ireq
+
+
+class _InstallRequirementBackedCandidate(Candidate):
+ """A candidate backed by an ``InstallRequirement``.
+
+ This represents a package request with the target not being already
+ in the environment, and needs to be fetched and installed. The backing
+ ``InstallRequirement`` is responsible for most of the leg work; this
+ class exposes appropriate information to the resolver.
+
+ :param link: The link passed to the ``InstallRequirement``. The backing
+ ``InstallRequirement`` will use this link to fetch the distribution.
+ :param source_link: The link this candidate "originates" from. This is
+ different from ``link`` when the link is found in the wheel cache.
+ ``link`` would point to the wheel cache, while this points to the
+ found remote link (e.g. from pypi.org).
+ """
+
+ dist: BaseDistribution
+ is_installed = False
+
+ def __init__(
+ self,
+ link: Link,
+ source_link: Link,
+ ireq: InstallRequirement,
+ factory: "Factory",
+ name: Optional[NormalizedName] = None,
+ version: Optional[CandidateVersion] = None,
+ ) -> None:
+ self._link = link
+ self._source_link = source_link
+ self._factory = factory
+ self._ireq = ireq
+ self._name = name
+ self._version = version
+ self.dist = self._prepare()
+
+ def __str__(self) -> str:
+ return f"{self.name} {self.version}"
+
+ def __repr__(self) -> str:
+ return "{class_name}({link!r})".format(
+ class_name=self.__class__.__name__,
+ link=str(self._link),
+ )
+
+ def __hash__(self) -> int:
+ return hash((self.__class__, self._link))
+
+ def __eq__(self, other: Any) -> bool:
+ if isinstance(other, self.__class__):
+ return links_equivalent(self._link, other._link)
+ return False
+
+ @property
+ def source_link(self) -> Optional[Link]:
+ return self._source_link
+
+ @property
+ def project_name(self) -> NormalizedName:
+ """The normalised name of the project the candidate refers to"""
+ if self._name is None:
+ self._name = self.dist.canonical_name
+ return self._name
+
+ @property
+ def name(self) -> str:
+ return self.project_name
+
+ @property
+ def version(self) -> CandidateVersion:
+ if self._version is None:
+ self._version = self.dist.version
+ return self._version
+
+ def format_for_error(self) -> str:
+ return "{} {} (from {})".format(
+ self.name,
+ self.version,
+ self._link.file_path if self._link.is_file else self._link,
+ )
+
+ def _prepare_distribution(self) -> BaseDistribution:
+ raise NotImplementedError("Override in subclass")
+
+ def _check_metadata_consistency(self, dist: BaseDistribution) -> None:
+ """Check for consistency of project name and version of dist."""
+ if self._name is not None and self._name != dist.canonical_name:
+ raise MetadataInconsistent(
+ self._ireq,
+ "name",
+ self._name,
+ dist.canonical_name,
+ )
+ if self._version is not None and self._version != dist.version:
+ raise MetadataInconsistent(
+ self._ireq,
+ "version",
+ str(self._version),
+ str(dist.version),
+ )
+
+ def _prepare(self) -> BaseDistribution:
+ try:
+ dist = self._prepare_distribution()
+ except HashError as e:
+ # Provide HashError the underlying ireq that caused it. This
+ # provides context for the resulting error message to show the
+ # offending line to the user.
+ e.req = self._ireq
+ raise
+ except InstallationSubprocessError as exc:
+ # The output has been presented already, so don't duplicate it.
+ exc.context = "See above for output."
+ raise
+
+ self._check_metadata_consistency(dist)
+ return dist
+
+ def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:
+ requires = self.dist.iter_dependencies() if with_requires else ()
+ for r in requires:
+ yield self._factory.make_requirement_from_spec(str(r), self._ireq)
+ yield self._factory.make_requires_python_requirement(self.dist.requires_python)
+
+ def get_install_requirement(self) -> Optional[InstallRequirement]:
+ return self._ireq
+
+
+class LinkCandidate(_InstallRequirementBackedCandidate):
+ is_editable = False
+
+ def __init__(
+ self,
+ link: Link,
+ template: InstallRequirement,
+ factory: "Factory",
+ name: Optional[NormalizedName] = None,
+ version: Optional[CandidateVersion] = None,
+ ) -> None:
+ source_link = link
+ cache_entry = factory.get_wheel_cache_entry(link, name)
+ if cache_entry is not None:
+ logger.debug("Using cached wheel link: %s", cache_entry.link)
+ link = cache_entry.link
+ ireq = make_install_req_from_link(link, template)
+ assert ireq.link == link
+ if ireq.link.is_wheel and not ireq.link.is_file:
+ wheel = Wheel(ireq.link.filename)
+ wheel_name = canonicalize_name(wheel.name)
+ assert name == wheel_name, f"{name!r} != {wheel_name!r} for wheel"
+ # Version may not be present for PEP 508 direct URLs
+ if version is not None:
+ wheel_version = Version(wheel.version)
+ assert version == wheel_version, "{!r} != {!r} for wheel {}".format(
+ version, wheel_version, name
+ )
+
+ if cache_entry is not None:
+ if cache_entry.persistent and template.link is template.original_link:
+ ireq.original_link_is_in_wheel_cache = True
+ if cache_entry.origin is not None:
+ ireq.download_info = cache_entry.origin
+ else:
+ # Legacy cache entry that does not have origin.json.
+ # download_info may miss the archive_info.hash field.
+ ireq.download_info = direct_url_from_link(
+ source_link, link_is_in_wheel_cache=cache_entry.persistent
+ )
+
+ super().__init__(
+ link=link,
+ source_link=source_link,
+ ireq=ireq,
+ factory=factory,
+ name=name,
+ version=version,
+ )
+
+ def _prepare_distribution(self) -> BaseDistribution:
+ preparer = self._factory.preparer
+ return preparer.prepare_linked_requirement(self._ireq, parallel_builds=True)
+
+
+class EditableCandidate(_InstallRequirementBackedCandidate):
+ is_editable = True
+
+ def __init__(
+ self,
+ link: Link,
+ template: InstallRequirement,
+ factory: "Factory",
+ name: Optional[NormalizedName] = None,
+ version: Optional[CandidateVersion] = None,
+ ) -> None:
+ super().__init__(
+ link=link,
+ source_link=link,
+ ireq=make_install_req_from_editable(link, template),
+ factory=factory,
+ name=name,
+ version=version,
+ )
+
+ def _prepare_distribution(self) -> BaseDistribution:
+ return self._factory.preparer.prepare_editable_requirement(self._ireq)
+
+
+class AlreadyInstalledCandidate(Candidate):
+ is_installed = True
+ source_link = None
+
+ def __init__(
+ self,
+ dist: BaseDistribution,
+ template: InstallRequirement,
+ factory: "Factory",
+ ) -> None:
+ self.dist = dist
+ self._ireq = _make_install_req_from_dist(dist, template)
+ self._factory = factory
+
+ # This is just logging some messages, so we can do it eagerly.
+ # The returned dist would be exactly the same as self.dist because we
+ # set satisfied_by in _make_install_req_from_dist.
+ # TODO: Supply reason based on force_reinstall and upgrade_strategy.
+ skip_reason = "already satisfied"
+ factory.preparer.prepare_installed_requirement(self._ireq, skip_reason)
+
+ def __str__(self) -> str:
+ return str(self.dist)
+
+ def __repr__(self) -> str:
+ return "{class_name}({distribution!r})".format(
+ class_name=self.__class__.__name__,
+ distribution=self.dist,
+ )
+
+ def __hash__(self) -> int:
+ return hash((self.__class__, self.name, self.version))
+
+ def __eq__(self, other: Any) -> bool:
+ if isinstance(other, self.__class__):
+ return self.name == other.name and self.version == other.version
+ return False
+
+ @property
+ def project_name(self) -> NormalizedName:
+ return self.dist.canonical_name
+
+ @property
+ def name(self) -> str:
+ return self.project_name
+
+ @property
+ def version(self) -> CandidateVersion:
+ return self.dist.version
+
+ @property
+ def is_editable(self) -> bool:
+ return self.dist.editable
+
+ def format_for_error(self) -> str:
+ return f"{self.name} {self.version} (Installed)"
+
+ def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:
+ if not with_requires:
+ return
+ for r in self.dist.iter_dependencies():
+ yield self._factory.make_requirement_from_spec(str(r), self._ireq)
+
+ def get_install_requirement(self) -> Optional[InstallRequirement]:
+ return None
+
+
+class ExtrasCandidate(Candidate):
+ """A candidate that has 'extras', indicating additional dependencies.
+
+ Requirements can be for a project with dependencies, something like
+ foo[extra]. The extras don't affect the project/version being installed
+ directly, but indicate that we need additional dependencies. We model that
+ by having an artificial ExtrasCandidate that wraps the "base" candidate.
+
+ The ExtrasCandidate differs from the base in the following ways:
+
+ 1. It has a unique name, of the form foo[extra]. This causes the resolver
+ to treat it as a separate node in the dependency graph.
+ 2. When we're getting the candidate's dependencies,
+ a) We specify that we want the extra dependencies as well.
+ b) We add a dependency on the base candidate.
+ See below for why this is needed.
+ 3. We return None for the underlying InstallRequirement, as the base
+ candidate will provide it, and we don't want to end up with duplicates.
+
+ The dependency on the base candidate is needed so that the resolver can't
+ decide that it should recommend foo[extra1] version 1.0 and foo[extra2]
+ version 2.0. Having those candidates depend on foo=1.0 and foo=2.0
+ respectively forces the resolver to recognise that this is a conflict.
+ """
+
+ def __init__(
+ self,
+ base: BaseCandidate,
+ extras: FrozenSet[str],
+ ) -> None:
+ self.base = base
+ self.extras = extras
+
+ def __str__(self) -> str:
+ name, rest = str(self.base).split(" ", 1)
+ return "{}[{}] {}".format(name, ",".join(self.extras), rest)
+
+ def __repr__(self) -> str:
+ return "{class_name}(base={base!r}, extras={extras!r})".format(
+ class_name=self.__class__.__name__,
+ base=self.base,
+ extras=self.extras,
+ )
+
+ def __hash__(self) -> int:
+ return hash((self.base, self.extras))
+
+ def __eq__(self, other: Any) -> bool:
+ if isinstance(other, self.__class__):
+ return self.base == other.base and self.extras == other.extras
+ return False
+
+ @property
+ def project_name(self) -> NormalizedName:
+ return self.base.project_name
+
+ @property
+ def name(self) -> str:
+ """The normalised name of the project the candidate refers to"""
+ return format_name(self.base.project_name, self.extras)
+
+ @property
+ def version(self) -> CandidateVersion:
+ return self.base.version
+
+ def format_for_error(self) -> str:
+ return "{} [{}]".format(
+ self.base.format_for_error(), ", ".join(sorted(self.extras))
+ )
+
+ @property
+ def is_installed(self) -> bool:
+ return self.base.is_installed
+
+ @property
+ def is_editable(self) -> bool:
+ return self.base.is_editable
+
+ @property
+ def source_link(self) -> Optional[Link]:
+ return self.base.source_link
+
+ def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:
+ factory = self.base._factory
+
+ # Add a dependency on the exact base
+ # (See note 2b in the class docstring)
+ yield factory.make_requirement_from_candidate(self.base)
+ if not with_requires:
+ return
+
+ # The user may have specified extras that the candidate doesn't
+ # support. We ignore any unsupported extras here.
+ valid_extras = self.extras.intersection(self.base.dist.iter_provided_extras())
+ invalid_extras = self.extras.difference(self.base.dist.iter_provided_extras())
+ for extra in sorted(invalid_extras):
+ logger.warning(
+ "%s %s does not provide the extra '%s'",
+ self.base.name,
+ self.version,
+ extra,
+ )
+
+ for r in self.base.dist.iter_dependencies(valid_extras):
+ requirement = factory.make_requirement_from_spec(
+ str(r), self.base._ireq, valid_extras
+ )
+ if requirement:
+ yield requirement
+
+ def get_install_requirement(self) -> Optional[InstallRequirement]:
+ # We don't return anything here, because we always
+ # depend on the base candidate, and we'll get the
+ # install requirement from that.
+ return None
+
+
+class RequiresPythonCandidate(Candidate):
+ is_installed = False
+ source_link = None
+
+ def __init__(self, py_version_info: Optional[Tuple[int, ...]]) -> None:
+ if py_version_info is not None:
+ version_info = normalize_version_info(py_version_info)
+ else:
+ version_info = sys.version_info[:3]
+ self._version = Version(".".join(str(c) for c in version_info))
+
+ # We don't need to implement __eq__() and __ne__() since there is always
+ # only one RequiresPythonCandidate in a resolution, i.e. the host Python.
+ # The built-in object.__eq__() and object.__ne__() do exactly what we want.
+
+ def __str__(self) -> str:
+ return f"Python {self._version}"
+
+ @property
+ def project_name(self) -> NormalizedName:
+ return REQUIRES_PYTHON_IDENTIFIER
+
+ @property
+ def name(self) -> str:
+ return REQUIRES_PYTHON_IDENTIFIER
+
+ @property
+ def version(self) -> CandidateVersion:
+ return self._version
+
+ def format_for_error(self) -> str:
+ return f"Python {self.version}"
+
+ def iter_dependencies(self, with_requires: bool) -> Iterable[Optional[Requirement]]:
+ return ()
+
+ def get_install_requirement(self) -> Optional[InstallRequirement]:
+ return None
diff --git a/third_party/python/pip/pip/_internal/resolution/resolvelib/factory.py b/third_party/python/pip/pip/_internal/resolution/resolvelib/factory.py
new file mode 100644
index 0000000000..a4c24b52a1
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/resolution/resolvelib/factory.py
@@ -0,0 +1,731 @@
+import contextlib
+import functools
+import logging
+from typing import (
+ TYPE_CHECKING,
+ Dict,
+ FrozenSet,
+ Iterable,
+ Iterator,
+ List,
+ Mapping,
+ NamedTuple,
+ Optional,
+ Sequence,
+ Set,
+ Tuple,
+ TypeVar,
+ cast,
+)
+
+from pip._vendor.packaging.requirements import InvalidRequirement
+from pip._vendor.packaging.specifiers import SpecifierSet
+from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
+from pip._vendor.resolvelib import ResolutionImpossible
+
+from pip._internal.cache import CacheEntry, WheelCache
+from pip._internal.exceptions import (
+ DistributionNotFound,
+ InstallationError,
+ MetadataInconsistent,
+ UnsupportedPythonVersion,
+ UnsupportedWheel,
+)
+from pip._internal.index.package_finder import PackageFinder
+from pip._internal.metadata import BaseDistribution, get_default_environment
+from pip._internal.models.link import Link
+from pip._internal.models.wheel import Wheel
+from pip._internal.operations.prepare import RequirementPreparer
+from pip._internal.req.constructors import install_req_from_link_and_ireq
+from pip._internal.req.req_install import (
+ InstallRequirement,
+ check_invalid_constraint_type,
+)
+from pip._internal.resolution.base import InstallRequirementProvider
+from pip._internal.utils.compatibility_tags import get_supported
+from pip._internal.utils.hashes import Hashes
+from pip._internal.utils.packaging import get_requirement
+from pip._internal.utils.virtualenv import running_under_virtualenv
+
+from .base import Candidate, CandidateVersion, Constraint, Requirement
+from .candidates import (
+ AlreadyInstalledCandidate,
+ BaseCandidate,
+ EditableCandidate,
+ ExtrasCandidate,
+ LinkCandidate,
+ RequiresPythonCandidate,
+ as_base_candidate,
+)
+from .found_candidates import FoundCandidates, IndexCandidateInfo
+from .requirements import (
+ ExplicitRequirement,
+ RequiresPythonRequirement,
+ SpecifierRequirement,
+ UnsatisfiableRequirement,
+)
+
+if TYPE_CHECKING:
+ from typing import Protocol
+
+ class ConflictCause(Protocol):
+ requirement: RequiresPythonRequirement
+ parent: Candidate
+
+
+logger = logging.getLogger(__name__)
+
+C = TypeVar("C")
+Cache = Dict[Link, C]
+
+
+class CollectedRootRequirements(NamedTuple):
+ requirements: List[Requirement]
+ constraints: Dict[str, Constraint]
+ user_requested: Dict[str, int]
+
+
+class Factory:
+ def __init__(
+ self,
+ finder: PackageFinder,
+ preparer: RequirementPreparer,
+ make_install_req: InstallRequirementProvider,
+ wheel_cache: Optional[WheelCache],
+ use_user_site: bool,
+ force_reinstall: bool,
+ ignore_installed: bool,
+ ignore_requires_python: bool,
+ py_version_info: Optional[Tuple[int, ...]] = None,
+ ) -> None:
+ self._finder = finder
+ self.preparer = preparer
+ self._wheel_cache = wheel_cache
+ self._python_candidate = RequiresPythonCandidate(py_version_info)
+ self._make_install_req_from_spec = make_install_req
+ self._use_user_site = use_user_site
+ self._force_reinstall = force_reinstall
+ self._ignore_requires_python = ignore_requires_python
+
+ self._build_failures: Cache[InstallationError] = {}
+ self._link_candidate_cache: Cache[LinkCandidate] = {}
+ self._editable_candidate_cache: Cache[EditableCandidate] = {}
+ self._installed_candidate_cache: Dict[str, AlreadyInstalledCandidate] = {}
+ self._extras_candidate_cache: Dict[
+ Tuple[int, FrozenSet[str]], ExtrasCandidate
+ ] = {}
+
+ if not ignore_installed:
+ env = get_default_environment()
+ self._installed_dists = {
+ dist.canonical_name: dist
+ for dist in env.iter_installed_distributions(local_only=False)
+ }
+ else:
+ self._installed_dists = {}
+
+ @property
+ def force_reinstall(self) -> bool:
+ return self._force_reinstall
+
+ def _fail_if_link_is_unsupported_wheel(self, link: Link) -> None:
+ if not link.is_wheel:
+ return
+ wheel = Wheel(link.filename)
+ if wheel.supported(self._finder.target_python.get_tags()):
+ return
+ msg = f"{link.filename} is not a supported wheel on this platform."
+ raise UnsupportedWheel(msg)
+
+ def _make_extras_candidate(
+ self, base: BaseCandidate, extras: FrozenSet[str]
+ ) -> ExtrasCandidate:
+ cache_key = (id(base), extras)
+ try:
+ candidate = self._extras_candidate_cache[cache_key]
+ except KeyError:
+ candidate = ExtrasCandidate(base, extras)
+ self._extras_candidate_cache[cache_key] = candidate
+ return candidate
+
+ def _make_candidate_from_dist(
+ self,
+ dist: BaseDistribution,
+ extras: FrozenSet[str],
+ template: InstallRequirement,
+ ) -> Candidate:
+ try:
+ base = self._installed_candidate_cache[dist.canonical_name]
+ except KeyError:
+ base = AlreadyInstalledCandidate(dist, template, factory=self)
+ self._installed_candidate_cache[dist.canonical_name] = base
+ if not extras:
+ return base
+ return self._make_extras_candidate(base, extras)
+
+ def _make_candidate_from_link(
+ self,
+ link: Link,
+ extras: FrozenSet[str],
+ template: InstallRequirement,
+ name: Optional[NormalizedName],
+ version: Optional[CandidateVersion],
+ ) -> Optional[Candidate]:
+ # TODO: Check already installed candidate, and use it if the link and
+ # editable flag match.
+
+ if link in self._build_failures:
+ # We already tried this candidate before, and it does not build.
+ # Don't bother trying again.
+ return None
+
+ if template.editable:
+ if link not in self._editable_candidate_cache:
+ try:
+ self._editable_candidate_cache[link] = EditableCandidate(
+ link,
+ template,
+ factory=self,
+ name=name,
+ version=version,
+ )
+ except MetadataInconsistent as e:
+ logger.info(
+ "Discarding [blue underline]%s[/]: [yellow]%s[reset]",
+ link,
+ e,
+ extra={"markup": True},
+ )
+ self._build_failures[link] = e
+ return None
+
+ base: BaseCandidate = self._editable_candidate_cache[link]
+ else:
+ if link not in self._link_candidate_cache:
+ try:
+ self._link_candidate_cache[link] = LinkCandidate(
+ link,
+ template,
+ factory=self,
+ name=name,
+ version=version,
+ )
+ except MetadataInconsistent as e:
+ logger.info(
+ "Discarding [blue underline]%s[/]: [yellow]%s[reset]",
+ link,
+ e,
+ extra={"markup": True},
+ )
+ self._build_failures[link] = e
+ return None
+ base = self._link_candidate_cache[link]
+
+ if not extras:
+ return base
+ return self._make_extras_candidate(base, extras)
+
+ def _iter_found_candidates(
+ self,
+ ireqs: Sequence[InstallRequirement],
+ specifier: SpecifierSet,
+ hashes: Hashes,
+ prefers_installed: bool,
+ incompatible_ids: Set[int],
+ ) -> Iterable[Candidate]:
+ if not ireqs:
+ return ()
+
+ # The InstallRequirement implementation requires us to give it a
+ # "template". Here we just choose the first requirement to represent
+ # all of them.
+ # Hopefully the Project model can correct this mismatch in the future.
+ template = ireqs[0]
+ assert template.req, "Candidates found on index must be PEP 508"
+ name = canonicalize_name(template.req.name)
+
+ extras: FrozenSet[str] = frozenset()
+ for ireq in ireqs:
+ assert ireq.req, "Candidates found on index must be PEP 508"
+ specifier &= ireq.req.specifier
+ hashes &= ireq.hashes(trust_internet=False)
+ extras |= frozenset(ireq.extras)
+
+ def _get_installed_candidate() -> Optional[Candidate]:
+ """Get the candidate for the currently-installed version."""
+ # If --force-reinstall is set, we want the version from the index
+ # instead, so we "pretend" there is nothing installed.
+ if self._force_reinstall:
+ return None
+ try:
+ installed_dist = self._installed_dists[name]
+ except KeyError:
+ return None
+ # Don't use the installed distribution if its version does not fit
+ # the current dependency graph.
+ if not specifier.contains(installed_dist.version, prereleases=True):
+ return None
+ candidate = self._make_candidate_from_dist(
+ dist=installed_dist,
+ extras=extras,
+ template=template,
+ )
+ # The candidate is a known incompatibility. Don't use it.
+ if id(candidate) in incompatible_ids:
+ return None
+ return candidate
+
+ def iter_index_candidate_infos() -> Iterator[IndexCandidateInfo]:
+ result = self._finder.find_best_candidate(
+ project_name=name,
+ specifier=specifier,
+ hashes=hashes,
+ )
+ icans = list(result.iter_applicable())
+
+ # PEP 592: Yanked releases are ignored unless the specifier
+ # explicitly pins a version (via '==' or '===') that can be
+ # solely satisfied by a yanked release.
+ all_yanked = all(ican.link.is_yanked for ican in icans)
+
+ def is_pinned(specifier: SpecifierSet) -> bool:
+ for sp in specifier:
+ if sp.operator == "===":
+ return True
+ if sp.operator != "==":
+ continue
+ if sp.version.endswith(".*"):
+ continue
+ return True
+ return False
+
+ pinned = is_pinned(specifier)
+
+ # PackageFinder returns earlier versions first, so we reverse.
+ for ican in reversed(icans):
+ if not (all_yanked and pinned) and ican.link.is_yanked:
+ continue
+ func = functools.partial(
+ self._make_candidate_from_link,
+ link=ican.link,
+ extras=extras,
+ template=template,
+ name=name,
+ version=ican.version,
+ )
+ yield ican.version, func
+
+ return FoundCandidates(
+ iter_index_candidate_infos,
+ _get_installed_candidate(),
+ prefers_installed,
+ incompatible_ids,
+ )
+
+ def _iter_explicit_candidates_from_base(
+ self,
+ base_requirements: Iterable[Requirement],
+ extras: FrozenSet[str],
+ ) -> Iterator[Candidate]:
+ """Produce explicit candidates from the base given an extra-ed package.
+
+ :param base_requirements: Requirements known to the resolver. The
+ requirements are guaranteed to not have extras.
+ :param extras: The extras to inject into the explicit requirements'
+ candidates.
+ """
+ for req in base_requirements:
+ lookup_cand, _ = req.get_candidate_lookup()
+ if lookup_cand is None: # Not explicit.
+ continue
+ # We've stripped extras from the identifier, and should always
+ # get a BaseCandidate here, unless there's a bug elsewhere.
+ base_cand = as_base_candidate(lookup_cand)
+ assert base_cand is not None, "no extras here"
+ yield self._make_extras_candidate(base_cand, extras)
+
+ def _iter_candidates_from_constraints(
+ self,
+ identifier: str,
+ constraint: Constraint,
+ template: InstallRequirement,
+ ) -> Iterator[Candidate]:
+ """Produce explicit candidates from constraints.
+
+ This creates "fake" InstallRequirement objects that are basically clones
+ of what "should" be the template, but with original_link set to link.
+ """
+ for link in constraint.links:
+ self._fail_if_link_is_unsupported_wheel(link)
+ candidate = self._make_candidate_from_link(
+ link,
+ extras=frozenset(),
+ template=install_req_from_link_and_ireq(link, template),
+ name=canonicalize_name(identifier),
+ version=None,
+ )
+ if candidate:
+ yield candidate
+
+ def find_candidates(
+ self,
+ identifier: str,
+ requirements: Mapping[str, Iterable[Requirement]],
+ incompatibilities: Mapping[str, Iterator[Candidate]],
+ constraint: Constraint,
+ prefers_installed: bool,
+ ) -> Iterable[Candidate]:
+ # Collect basic lookup information from the requirements.
+ explicit_candidates: Set[Candidate] = set()
+ ireqs: List[InstallRequirement] = []
+ for req in requirements[identifier]:
+ cand, ireq = req.get_candidate_lookup()
+ if cand is not None:
+ explicit_candidates.add(cand)
+ if ireq is not None:
+ ireqs.append(ireq)
+
+ # If the current identifier contains extras, add explicit candidates
+ # from entries from extra-less identifier.
+ with contextlib.suppress(InvalidRequirement):
+ parsed_requirement = get_requirement(identifier)
+ explicit_candidates.update(
+ self._iter_explicit_candidates_from_base(
+ requirements.get(parsed_requirement.name, ()),
+ frozenset(parsed_requirement.extras),
+ ),
+ )
+
+ # Add explicit candidates from constraints. We only do this if there are
+ # known ireqs, which represent requirements not already explicit. If
+ # there are no ireqs, we're constraining already-explicit requirements,
+ # which is handled later when we return the explicit candidates.
+ if ireqs:
+ try:
+ explicit_candidates.update(
+ self._iter_candidates_from_constraints(
+ identifier,
+ constraint,
+ template=ireqs[0],
+ ),
+ )
+ except UnsupportedWheel:
+ # If we're constrained to install a wheel incompatible with the
+ # target architecture, no candidates will ever be valid.
+ return ()
+
+ # Since we cache all the candidates, incompatibility identification
+ # can be made quicker by comparing only the id() values.
+ incompat_ids = {id(c) for c in incompatibilities.get(identifier, ())}
+
+ # If none of the requirements want an explicit candidate, we can ask
+ # the finder for candidates.
+ if not explicit_candidates:
+ return self._iter_found_candidates(
+ ireqs,
+ constraint.specifier,
+ constraint.hashes,
+ prefers_installed,
+ incompat_ids,
+ )
+
+ return (
+ c
+ for c in explicit_candidates
+ if id(c) not in incompat_ids
+ and constraint.is_satisfied_by(c)
+ and all(req.is_satisfied_by(c) for req in requirements[identifier])
+ )
+
+ def _make_requirement_from_install_req(
+ self, ireq: InstallRequirement, requested_extras: Iterable[str]
+ ) -> Optional[Requirement]:
+ if not ireq.match_markers(requested_extras):
+ logger.info(
+ "Ignoring %s: markers '%s' don't match your environment",
+ ireq.name,
+ ireq.markers,
+ )
+ return None
+ if not ireq.link:
+ return SpecifierRequirement(ireq)
+ self._fail_if_link_is_unsupported_wheel(ireq.link)
+ cand = self._make_candidate_from_link(
+ ireq.link,
+ extras=frozenset(ireq.extras),
+ template=ireq,
+ name=canonicalize_name(ireq.name) if ireq.name else None,
+ version=None,
+ )
+ if cand is None:
+ # There's no way we can satisfy a URL requirement if the underlying
+ # candidate fails to build. An unnamed URL must be user-supplied, so
+ # we fail eagerly. If the URL is named, an unsatisfiable requirement
+ # can make the resolver do the right thing, either backtrack (and
+ # maybe find some other requirement that's buildable) or raise a
+ # ResolutionImpossible eventually.
+ if not ireq.name:
+ raise self._build_failures[ireq.link]
+ return UnsatisfiableRequirement(canonicalize_name(ireq.name))
+ return self.make_requirement_from_candidate(cand)
+
+ def collect_root_requirements(
+ self, root_ireqs: List[InstallRequirement]
+ ) -> CollectedRootRequirements:
+ collected = CollectedRootRequirements([], {}, {})
+ for i, ireq in enumerate(root_ireqs):
+ if ireq.constraint:
+ # Ensure we only accept valid constraints
+ problem = check_invalid_constraint_type(ireq)
+ if problem:
+ raise InstallationError(problem)
+ if not ireq.match_markers():
+ continue
+ assert ireq.name, "Constraint must be named"
+ name = canonicalize_name(ireq.name)
+ if name in collected.constraints:
+ collected.constraints[name] &= ireq
+ else:
+ collected.constraints[name] = Constraint.from_ireq(ireq)
+ else:
+ req = self._make_requirement_from_install_req(
+ ireq,
+ requested_extras=(),
+ )
+ if req is None:
+ continue
+ if ireq.user_supplied and req.name not in collected.user_requested:
+ collected.user_requested[req.name] = i
+ collected.requirements.append(req)
+ return collected
+
+ def make_requirement_from_candidate(
+ self, candidate: Candidate
+ ) -> ExplicitRequirement:
+ return ExplicitRequirement(candidate)
+
+ def make_requirement_from_spec(
+ self,
+ specifier: str,
+ comes_from: Optional[InstallRequirement],
+ requested_extras: Iterable[str] = (),
+ ) -> Optional[Requirement]:
+ ireq = self._make_install_req_from_spec(specifier, comes_from)
+ return self._make_requirement_from_install_req(ireq, requested_extras)
+
+ def make_requires_python_requirement(
+ self,
+ specifier: SpecifierSet,
+ ) -> Optional[Requirement]:
+ if self._ignore_requires_python:
+ return None
+ # Don't bother creating a dependency for an empty Requires-Python.
+ if not str(specifier):
+ return None
+ return RequiresPythonRequirement(specifier, self._python_candidate)
+
+ def get_wheel_cache_entry(
+ self, link: Link, name: Optional[str]
+ ) -> Optional[CacheEntry]:
+ """Look up the link in the wheel cache.
+
+ If ``preparer.require_hashes`` is True, don't use the wheel cache,
+ because cached wheels, always built locally, have different hashes
+ than the files downloaded from the index server and thus throw false
+ hash mismatches. Furthermore, cached wheels at present have
+ nondeterministic contents due to file modification times.
+ """
+ if self._wheel_cache is None or self.preparer.require_hashes:
+ return None
+ return self._wheel_cache.get_cache_entry(
+ link=link,
+ package_name=name,
+ supported_tags=get_supported(),
+ )
+
+ def get_dist_to_uninstall(self, candidate: Candidate) -> Optional[BaseDistribution]:
+ # TODO: Are there more cases this needs to return True? Editable?
+ dist = self._installed_dists.get(candidate.project_name)
+ if dist is None: # Not installed, no uninstallation required.
+ return None
+
+ # We're installing into global site. The current installation must
+ # be uninstalled, no matter it's in global or user site, because the
+ # user site installation has precedence over global.
+ if not self._use_user_site:
+ return dist
+
+ # We're installing into user site. Remove the user site installation.
+ if dist.in_usersite:
+ return dist
+
+ # We're installing into user site, but the installed incompatible
+ # package is in global site. We can't uninstall that, and would let
+ # the new user installation to "shadow" it. But shadowing won't work
+ # in virtual environments, so we error out.
+ if running_under_virtualenv() and dist.in_site_packages:
+ message = (
+ f"Will not install to the user site because it will lack "
+ f"sys.path precedence to {dist.raw_name} in {dist.location}"
+ )
+ raise InstallationError(message)
+ return None
+
+ def _report_requires_python_error(
+ self, causes: Sequence["ConflictCause"]
+ ) -> UnsupportedPythonVersion:
+ assert causes, "Requires-Python error reported with no cause"
+
+ version = self._python_candidate.version
+
+ if len(causes) == 1:
+ specifier = str(causes[0].requirement.specifier)
+ message = (
+ f"Package {causes[0].parent.name!r} requires a different "
+ f"Python: {version} not in {specifier!r}"
+ )
+ return UnsupportedPythonVersion(message)
+
+ message = f"Packages require a different Python. {version} not in:"
+ for cause in causes:
+ package = cause.parent.format_for_error()
+ specifier = str(cause.requirement.specifier)
+ message += f"\n{specifier!r} (required by {package})"
+ return UnsupportedPythonVersion(message)
+
+ def _report_single_requirement_conflict(
+ self, req: Requirement, parent: Optional[Candidate]
+ ) -> DistributionNotFound:
+ if parent is None:
+ req_disp = str(req)
+ else:
+ req_disp = f"{req} (from {parent.name})"
+
+ cands = self._finder.find_all_candidates(req.project_name)
+ skipped_by_requires_python = self._finder.requires_python_skipped_reasons()
+ versions = [str(v) for v in sorted({c.version for c in cands})]
+
+ if skipped_by_requires_python:
+ logger.critical(
+ "Ignored the following versions that require a different python "
+ "version: %s",
+ "; ".join(skipped_by_requires_python) or "none",
+ )
+ logger.critical(
+ "Could not find a version that satisfies the requirement %s "
+ "(from versions: %s)",
+ req_disp,
+ ", ".join(versions) or "none",
+ )
+ if str(req) == "requirements.txt":
+ logger.info(
+ "HINT: You are attempting to install a package literally "
+ 'named "requirements.txt" (which cannot exist). Consider '
+ "using the '-r' flag to install the packages listed in "
+ "requirements.txt"
+ )
+
+ return DistributionNotFound(f"No matching distribution found for {req}")
+
+ def get_installation_error(
+ self,
+ e: "ResolutionImpossible[Requirement, Candidate]",
+ constraints: Dict[str, Constraint],
+ ) -> InstallationError:
+
+ assert e.causes, "Installation error reported with no cause"
+
+ # If one of the things we can't solve is "we need Python X.Y",
+ # that is what we report.
+ requires_python_causes = [
+ cause
+ for cause in e.causes
+ if isinstance(cause.requirement, RequiresPythonRequirement)
+ and not cause.requirement.is_satisfied_by(self._python_candidate)
+ ]
+ if requires_python_causes:
+ # The comprehension above makes sure all Requirement instances are
+ # RequiresPythonRequirement, so let's cast for convenience.
+ return self._report_requires_python_error(
+ cast("Sequence[ConflictCause]", requires_python_causes),
+ )
+
+ # Otherwise, we have a set of causes which can't all be satisfied
+ # at once.
+
+ # The simplest case is when we have *one* cause that can't be
+ # satisfied. We just report that case.
+ if len(e.causes) == 1:
+ req, parent = e.causes[0]
+ if req.name not in constraints:
+ return self._report_single_requirement_conflict(req, parent)
+
+ # OK, we now have a list of requirements that can't all be
+ # satisfied at once.
+
+ # A couple of formatting helpers
+ def text_join(parts: List[str]) -> str:
+ if len(parts) == 1:
+ return parts[0]
+
+ return ", ".join(parts[:-1]) + " and " + parts[-1]
+
+ def describe_trigger(parent: Candidate) -> str:
+ ireq = parent.get_install_requirement()
+ if not ireq or not ireq.comes_from:
+ return f"{parent.name}=={parent.version}"
+ if isinstance(ireq.comes_from, InstallRequirement):
+ return str(ireq.comes_from.name)
+ return str(ireq.comes_from)
+
+ triggers = set()
+ for req, parent in e.causes:
+ if parent is None:
+ # This is a root requirement, so we can report it directly
+ trigger = req.format_for_error()
+ else:
+ trigger = describe_trigger(parent)
+ triggers.add(trigger)
+
+ if triggers:
+ info = text_join(sorted(triggers))
+ else:
+ info = "the requested packages"
+
+ msg = (
+ "Cannot install {} because these package versions "
+ "have conflicting dependencies.".format(info)
+ )
+ logger.critical(msg)
+ msg = "\nThe conflict is caused by:"
+
+ relevant_constraints = set()
+ for req, parent in e.causes:
+ if req.name in constraints:
+ relevant_constraints.add(req.name)
+ msg = msg + "\n "
+ if parent:
+ msg = msg + f"{parent.name} {parent.version} depends on "
+ else:
+ msg = msg + "The user requested "
+ msg = msg + req.format_for_error()
+ for key in relevant_constraints:
+ spec = constraints[key].specifier
+ msg += f"\n The user requested (constraint) {key}{spec}"
+
+ msg = (
+ msg
+ + "\n\n"
+ + "To fix this you could try to:\n"
+ + "1. loosen the range of package versions you've specified\n"
+ + "2. remove package versions to allow pip attempt to solve "
+ + "the dependency conflict\n"
+ )
+
+ logger.info(msg)
+
+ return DistributionNotFound(
+ "ResolutionImpossible: for help visit "
+ "https://pip.pypa.io/en/latest/topics/dependency-resolution/"
+ "#dealing-with-dependency-conflicts"
+ )
diff --git a/third_party/python/pip/pip/_internal/resolution/resolvelib/found_candidates.py b/third_party/python/pip/pip/_internal/resolution/resolvelib/found_candidates.py
new file mode 100644
index 0000000000..8663097b44
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/resolution/resolvelib/found_candidates.py
@@ -0,0 +1,155 @@
+"""Utilities to lazily create and visit candidates found.
+
+Creating and visiting a candidate is a *very* costly operation. It involves
+fetching, extracting, potentially building modules from source, and verifying
+distribution metadata. It is therefore crucial for performance to keep
+everything here lazy all the way down, so we only touch candidates that we
+absolutely need, and not "download the world" when we only need one version of
+something.
+"""
+
+import functools
+from collections.abc import Sequence
+from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, Set, Tuple
+
+from pip._vendor.packaging.version import _BaseVersion
+
+from .base import Candidate
+
+IndexCandidateInfo = Tuple[_BaseVersion, Callable[[], Optional[Candidate]]]
+
+if TYPE_CHECKING:
+ SequenceCandidate = Sequence[Candidate]
+else:
+ # For compatibility: Python before 3.9 does not support using [] on the
+ # Sequence class.
+ #
+ # >>> from collections.abc import Sequence
+ # >>> Sequence[str]
+ # Traceback (most recent call last):
+ # File "<stdin>", line 1, in <module>
+ # TypeError: 'ABCMeta' object is not subscriptable
+ #
+ # TODO: Remove this block after dropping Python 3.8 support.
+ SequenceCandidate = Sequence
+
+
+def _iter_built(infos: Iterator[IndexCandidateInfo]) -> Iterator[Candidate]:
+ """Iterator for ``FoundCandidates``.
+
+ This iterator is used when the package is not already installed. Candidates
+ from index come later in their normal ordering.
+ """
+ versions_found: Set[_BaseVersion] = set()
+ for version, func in infos:
+ if version in versions_found:
+ continue
+ candidate = func()
+ if candidate is None:
+ continue
+ yield candidate
+ versions_found.add(version)
+
+
+def _iter_built_with_prepended(
+ installed: Candidate, infos: Iterator[IndexCandidateInfo]
+) -> Iterator[Candidate]:
+ """Iterator for ``FoundCandidates``.
+
+ This iterator is used when the resolver prefers the already-installed
+ candidate and NOT to upgrade. The installed candidate is therefore
+ always yielded first, and candidates from index come later in their
+ normal ordering, except skipped when the version is already installed.
+ """
+ yield installed
+ versions_found: Set[_BaseVersion] = {installed.version}
+ for version, func in infos:
+ if version in versions_found:
+ continue
+ candidate = func()
+ if candidate is None:
+ continue
+ yield candidate
+ versions_found.add(version)
+
+
+def _iter_built_with_inserted(
+ installed: Candidate, infos: Iterator[IndexCandidateInfo]
+) -> Iterator[Candidate]:
+ """Iterator for ``FoundCandidates``.
+
+ This iterator is used when the resolver prefers to upgrade an
+ already-installed package. Candidates from index are returned in their
+ normal ordering, except replaced when the version is already installed.
+
+ The implementation iterates through and yields other candidates, inserting
+ the installed candidate exactly once before we start yielding older or
+ equivalent candidates, or after all other candidates if they are all newer.
+ """
+ versions_found: Set[_BaseVersion] = set()
+ for version, func in infos:
+ if version in versions_found:
+ continue
+ # If the installed candidate is better, yield it first.
+ if installed.version >= version:
+ yield installed
+ versions_found.add(installed.version)
+ candidate = func()
+ if candidate is None:
+ continue
+ yield candidate
+ versions_found.add(version)
+
+ # If the installed candidate is older than all other candidates.
+ if installed.version not in versions_found:
+ yield installed
+
+
+class FoundCandidates(SequenceCandidate):
+ """A lazy sequence to provide candidates to the resolver.
+
+ The intended usage is to return this from `find_matches()` so the resolver
+ can iterate through the sequence multiple times, but only access the index
+ page when remote packages are actually needed. This improve performances
+ when suitable candidates are already installed on disk.
+ """
+
+ def __init__(
+ self,
+ get_infos: Callable[[], Iterator[IndexCandidateInfo]],
+ installed: Optional[Candidate],
+ prefers_installed: bool,
+ incompatible_ids: Set[int],
+ ):
+ self._get_infos = get_infos
+ self._installed = installed
+ self._prefers_installed = prefers_installed
+ self._incompatible_ids = incompatible_ids
+
+ def __getitem__(self, index: Any) -> Any:
+ # Implemented to satisfy the ABC check. This is not needed by the
+ # resolver, and should not be used by the provider either (for
+ # performance reasons).
+ raise NotImplementedError("don't do this")
+
+ def __iter__(self) -> Iterator[Candidate]:
+ infos = self._get_infos()
+ if not self._installed:
+ iterator = _iter_built(infos)
+ elif self._prefers_installed:
+ iterator = _iter_built_with_prepended(self._installed, infos)
+ else:
+ iterator = _iter_built_with_inserted(self._installed, infos)
+ return (c for c in iterator if id(c) not in self._incompatible_ids)
+
+ def __len__(self) -> int:
+ # Implemented to satisfy the ABC check. This is not needed by the
+ # resolver, and should not be used by the provider either (for
+ # performance reasons).
+ raise NotImplementedError("don't do this")
+
+ @functools.lru_cache(maxsize=1)
+ def __bool__(self) -> bool:
+ if self._prefers_installed and self._installed:
+ return True
+ return any(self)
diff --git a/third_party/python/pip/pip/_internal/resolution/resolvelib/provider.py b/third_party/python/pip/pip/_internal/resolution/resolvelib/provider.py
new file mode 100644
index 0000000000..6300dfc57f
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/resolution/resolvelib/provider.py
@@ -0,0 +1,248 @@
+import collections
+import math
+from typing import (
+ TYPE_CHECKING,
+ Dict,
+ Iterable,
+ Iterator,
+ Mapping,
+ Sequence,
+ TypeVar,
+ Union,
+)
+
+from pip._vendor.resolvelib.providers import AbstractProvider
+
+from .base import Candidate, Constraint, Requirement
+from .candidates import REQUIRES_PYTHON_IDENTIFIER
+from .factory import Factory
+
+if TYPE_CHECKING:
+ from pip._vendor.resolvelib.providers import Preference
+ from pip._vendor.resolvelib.resolvers import RequirementInformation
+
+ PreferenceInformation = RequirementInformation[Requirement, Candidate]
+
+ _ProviderBase = AbstractProvider[Requirement, Candidate, str]
+else:
+ _ProviderBase = AbstractProvider
+
+# Notes on the relationship between the provider, the factory, and the
+# candidate and requirement classes.
+#
+# The provider is a direct implementation of the resolvelib class. Its role
+# is to deliver the API that resolvelib expects.
+#
+# Rather than work with completely abstract "requirement" and "candidate"
+# concepts as resolvelib does, pip has concrete classes implementing these two
+# ideas. The API of Requirement and Candidate objects are defined in the base
+# classes, but essentially map fairly directly to the equivalent provider
+# methods. In particular, `find_matches` and `is_satisfied_by` are
+# requirement methods, and `get_dependencies` is a candidate method.
+#
+# The factory is the interface to pip's internal mechanisms. It is stateless,
+# and is created by the resolver and held as a property of the provider. It is
+# responsible for creating Requirement and Candidate objects, and provides
+# services to those objects (access to pip's finder and preparer).
+
+
+D = TypeVar("D")
+V = TypeVar("V")
+
+
+def _get_with_identifier(
+ mapping: Mapping[str, V],
+ identifier: str,
+ default: D,
+) -> Union[D, V]:
+ """Get item from a package name lookup mapping with a resolver identifier.
+
+ This extra logic is needed when the target mapping is keyed by package
+ name, which cannot be directly looked up with an identifier (which may
+ contain requested extras). Additional logic is added to also look up a value
+ by "cleaning up" the extras from the identifier.
+ """
+ if identifier in mapping:
+ return mapping[identifier]
+ # HACK: Theoretically we should check whether this identifier is a valid
+ # "NAME[EXTRAS]" format, and parse out the name part with packaging or
+ # some regular expression. But since pip's resolver only spits out three
+ # kinds of identifiers: normalized PEP 503 names, normalized names plus
+ # extras, and Requires-Python, we can cheat a bit here.
+ name, open_bracket, _ = identifier.partition("[")
+ if open_bracket and name in mapping:
+ return mapping[name]
+ return default
+
+
+class PipProvider(_ProviderBase):
+ """Pip's provider implementation for resolvelib.
+
+ :params constraints: A mapping of constraints specified by the user. Keys
+ are canonicalized project names.
+ :params ignore_dependencies: Whether the user specified ``--no-deps``.
+ :params upgrade_strategy: The user-specified upgrade strategy.
+ :params user_requested: A set of canonicalized package names that the user
+ supplied for pip to install/upgrade.
+ """
+
+ def __init__(
+ self,
+ factory: Factory,
+ constraints: Dict[str, Constraint],
+ ignore_dependencies: bool,
+ upgrade_strategy: str,
+ user_requested: Dict[str, int],
+ ) -> None:
+ self._factory = factory
+ self._constraints = constraints
+ self._ignore_dependencies = ignore_dependencies
+ self._upgrade_strategy = upgrade_strategy
+ self._user_requested = user_requested
+ self._known_depths: Dict[str, float] = collections.defaultdict(lambda: math.inf)
+
+ def identify(self, requirement_or_candidate: Union[Requirement, Candidate]) -> str:
+ return requirement_or_candidate.name
+
+ def get_preference( # type: ignore
+ self,
+ identifier: str,
+ resolutions: Mapping[str, Candidate],
+ candidates: Mapping[str, Iterator[Candidate]],
+ information: Mapping[str, Iterable["PreferenceInformation"]],
+ backtrack_causes: Sequence["PreferenceInformation"],
+ ) -> "Preference":
+ """Produce a sort key for given requirement based on preference.
+
+ The lower the return value is, the more preferred this group of
+ arguments is.
+
+ Currently pip considers the following in order:
+
+ * Prefer if any of the known requirements is "direct", e.g. points to an
+ explicit URL.
+ * If equal, prefer if any requirement is "pinned", i.e. contains
+ operator ``===`` or ``==``.
+ * If equal, calculate an approximate "depth" and resolve requirements
+ closer to the user-specified requirements first.
+ * Order user-specified requirements by the order they are specified.
+ * If equal, prefers "non-free" requirements, i.e. contains at least one
+ operator, such as ``>=`` or ``<``.
+ * If equal, order alphabetically for consistency (helps debuggability).
+ """
+ lookups = (r.get_candidate_lookup() for r, _ in information[identifier])
+ candidate, ireqs = zip(*lookups)
+ operators = [
+ specifier.operator
+ for specifier_set in (ireq.specifier for ireq in ireqs if ireq)
+ for specifier in specifier_set
+ ]
+
+ direct = candidate is not None
+ pinned = any(op[:2] == "==" for op in operators)
+ unfree = bool(operators)
+
+ try:
+ requested_order: Union[int, float] = self._user_requested[identifier]
+ except KeyError:
+ requested_order = math.inf
+ parent_depths = (
+ self._known_depths[parent.name] if parent is not None else 0.0
+ for _, parent in information[identifier]
+ )
+ inferred_depth = min(d for d in parent_depths) + 1.0
+ else:
+ inferred_depth = 1.0
+ self._known_depths[identifier] = inferred_depth
+
+ requested_order = self._user_requested.get(identifier, math.inf)
+
+ # Requires-Python has only one candidate and the check is basically
+ # free, so we always do it first to avoid needless work if it fails.
+ requires_python = identifier == REQUIRES_PYTHON_IDENTIFIER
+
+ # HACK: Setuptools have a very long and solid backward compatibility
+ # track record, and extremely few projects would request a narrow,
+ # non-recent version range of it since that would break a lot things.
+ # (Most projects specify it only to request for an installer feature,
+ # which does not work, but that's another topic.) Intentionally
+ # delaying Setuptools helps reduce branches the resolver has to check.
+ # This serves as a temporary fix for issues like "apache-airflow[all]"
+ # while we work on "proper" branch pruning techniques.
+ delay_this = identifier == "setuptools"
+
+ # Prefer the causes of backtracking on the assumption that the problem
+ # resolving the dependency tree is related to the failures that caused
+ # the backtracking
+ backtrack_cause = self.is_backtrack_cause(identifier, backtrack_causes)
+
+ return (
+ not requires_python,
+ delay_this,
+ not direct,
+ not pinned,
+ not backtrack_cause,
+ inferred_depth,
+ requested_order,
+ not unfree,
+ identifier,
+ )
+
+ def find_matches(
+ self,
+ identifier: str,
+ requirements: Mapping[str, Iterator[Requirement]],
+ incompatibilities: Mapping[str, Iterator[Candidate]],
+ ) -> Iterable[Candidate]:
+ def _eligible_for_upgrade(identifier: str) -> bool:
+ """Are upgrades allowed for this project?
+
+ This checks the upgrade strategy, and whether the project was one
+ that the user specified in the command line, in order to decide
+ whether we should upgrade if there's a newer version available.
+
+ (Note that we don't need access to the `--upgrade` flag, because
+ an upgrade strategy of "to-satisfy-only" means that `--upgrade`
+ was not specified).
+ """
+ if self._upgrade_strategy == "eager":
+ return True
+ elif self._upgrade_strategy == "only-if-needed":
+ user_order = _get_with_identifier(
+ self._user_requested,
+ identifier,
+ default=None,
+ )
+ return user_order is not None
+ return False
+
+ constraint = _get_with_identifier(
+ self._constraints,
+ identifier,
+ default=Constraint.empty(),
+ )
+ return self._factory.find_candidates(
+ identifier=identifier,
+ requirements=requirements,
+ constraint=constraint,
+ prefers_installed=(not _eligible_for_upgrade(identifier)),
+ incompatibilities=incompatibilities,
+ )
+
+ def is_satisfied_by(self, requirement: Requirement, candidate: Candidate) -> bool:
+ return requirement.is_satisfied_by(candidate)
+
+ def get_dependencies(self, candidate: Candidate) -> Sequence[Requirement]:
+ with_requires = not self._ignore_dependencies
+ return [r for r in candidate.iter_dependencies(with_requires) if r is not None]
+
+ @staticmethod
+ def is_backtrack_cause(
+ identifier: str, backtrack_causes: Sequence["PreferenceInformation"]
+ ) -> bool:
+ for backtrack_cause in backtrack_causes:
+ if identifier == backtrack_cause.requirement.name:
+ return True
+ if backtrack_cause.parent and identifier == backtrack_cause.parent.name:
+ return True
+ return False
diff --git a/third_party/python/pip/pip/_internal/resolution/resolvelib/reporter.py b/third_party/python/pip/pip/_internal/resolution/resolvelib/reporter.py
new file mode 100644
index 0000000000..6ced5329b8
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/resolution/resolvelib/reporter.py
@@ -0,0 +1,68 @@
+from collections import defaultdict
+from logging import getLogger
+from typing import Any, DefaultDict
+
+from pip._vendor.resolvelib.reporters import BaseReporter
+
+from .base import Candidate, Requirement
+
+logger = getLogger(__name__)
+
+
+class PipReporter(BaseReporter):
+ def __init__(self) -> None:
+ self.backtracks_by_package: DefaultDict[str, int] = defaultdict(int)
+
+ self._messages_at_backtrack = {
+ 1: (
+ "pip is looking at multiple versions of {package_name} to "
+ "determine which version is compatible with other "
+ "requirements. This could take a while."
+ ),
+ 8: (
+ "pip is looking at multiple versions of {package_name} to "
+ "determine which version is compatible with other "
+ "requirements. This could take a while."
+ ),
+ 13: (
+ "This is taking longer than usual. You might need to provide "
+ "the dependency resolver with stricter constraints to reduce "
+ "runtime. See https://pip.pypa.io/warnings/backtracking for "
+ "guidance. If you want to abort this run, press Ctrl + C."
+ ),
+ }
+
+ def backtracking(self, candidate: Candidate) -> None:
+ self.backtracks_by_package[candidate.name] += 1
+
+ count = self.backtracks_by_package[candidate.name]
+ if count not in self._messages_at_backtrack:
+ return
+
+ message = self._messages_at_backtrack[count]
+ logger.info("INFO: %s", message.format(package_name=candidate.name))
+
+
+class PipDebuggingReporter(BaseReporter):
+ """A reporter that does an info log for every event it sees."""
+
+ def starting(self) -> None:
+ logger.info("Reporter.starting()")
+
+ def starting_round(self, index: int) -> None:
+ logger.info("Reporter.starting_round(%r)", index)
+
+ def ending_round(self, index: int, state: Any) -> None:
+ logger.info("Reporter.ending_round(%r, state)", index)
+
+ def ending(self, state: Any) -> None:
+ logger.info("Reporter.ending(%r)", state)
+
+ def adding_requirement(self, requirement: Requirement, parent: Candidate) -> None:
+ logger.info("Reporter.adding_requirement(%r, %r)", requirement, parent)
+
+ def backtracking(self, candidate: Candidate) -> None:
+ logger.info("Reporter.backtracking(%r)", candidate)
+
+ def pinning(self, candidate: Candidate) -> None:
+ logger.info("Reporter.pinning(%r)", candidate)
diff --git a/third_party/python/pip/pip/_internal/resolution/resolvelib/requirements.py b/third_party/python/pip/pip/_internal/resolution/resolvelib/requirements.py
new file mode 100644
index 0000000000..f561f1f1e2
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/resolution/resolvelib/requirements.py
@@ -0,0 +1,166 @@
+from pip._vendor.packaging.specifiers import SpecifierSet
+from pip._vendor.packaging.utils import NormalizedName, canonicalize_name
+
+from pip._internal.req.req_install import InstallRequirement
+
+from .base import Candidate, CandidateLookup, Requirement, format_name
+
+
+class ExplicitRequirement(Requirement):
+ def __init__(self, candidate: Candidate) -> None:
+ self.candidate = candidate
+
+ def __str__(self) -> str:
+ return str(self.candidate)
+
+ def __repr__(self) -> str:
+ return "{class_name}({candidate!r})".format(
+ class_name=self.__class__.__name__,
+ candidate=self.candidate,
+ )
+
+ @property
+ def project_name(self) -> NormalizedName:
+ # No need to canonicalize - the candidate did this
+ return self.candidate.project_name
+
+ @property
+ def name(self) -> str:
+ # No need to canonicalize - the candidate did this
+ return self.candidate.name
+
+ def format_for_error(self) -> str:
+ return self.candidate.format_for_error()
+
+ def get_candidate_lookup(self) -> CandidateLookup:
+ return self.candidate, None
+
+ def is_satisfied_by(self, candidate: Candidate) -> bool:
+ return candidate == self.candidate
+
+
+class SpecifierRequirement(Requirement):
+ def __init__(self, ireq: InstallRequirement) -> None:
+ assert ireq.link is None, "This is a link, not a specifier"
+ self._ireq = ireq
+ self._extras = frozenset(ireq.extras)
+
+ def __str__(self) -> str:
+ return str(self._ireq.req)
+
+ def __repr__(self) -> str:
+ return "{class_name}({requirement!r})".format(
+ class_name=self.__class__.__name__,
+ requirement=str(self._ireq.req),
+ )
+
+ @property
+ def project_name(self) -> NormalizedName:
+ assert self._ireq.req, "Specifier-backed ireq is always PEP 508"
+ return canonicalize_name(self._ireq.req.name)
+
+ @property
+ def name(self) -> str:
+ return format_name(self.project_name, self._extras)
+
+ def format_for_error(self) -> str:
+
+ # Convert comma-separated specifiers into "A, B, ..., F and G"
+ # This makes the specifier a bit more "human readable", without
+ # risking a change in meaning. (Hopefully! Not all edge cases have
+ # been checked)
+ parts = [s.strip() for s in str(self).split(",")]
+ if len(parts) == 0:
+ return ""
+ elif len(parts) == 1:
+ return parts[0]
+
+ return ", ".join(parts[:-1]) + " and " + parts[-1]
+
+ def get_candidate_lookup(self) -> CandidateLookup:
+ return None, self._ireq
+
+ def is_satisfied_by(self, candidate: Candidate) -> bool:
+ assert candidate.name == self.name, (
+ f"Internal issue: Candidate is not for this requirement "
+ f"{candidate.name} vs {self.name}"
+ )
+ # We can safely always allow prereleases here since PackageFinder
+ # already implements the prerelease logic, and would have filtered out
+ # prerelease candidates if the user does not expect them.
+ assert self._ireq.req, "Specifier-backed ireq is always PEP 508"
+ spec = self._ireq.req.specifier
+ return spec.contains(candidate.version, prereleases=True)
+
+
+class RequiresPythonRequirement(Requirement):
+ """A requirement representing Requires-Python metadata."""
+
+ def __init__(self, specifier: SpecifierSet, match: Candidate) -> None:
+ self.specifier = specifier
+ self._candidate = match
+
+ def __str__(self) -> str:
+ return f"Python {self.specifier}"
+
+ def __repr__(self) -> str:
+ return "{class_name}({specifier!r})".format(
+ class_name=self.__class__.__name__,
+ specifier=str(self.specifier),
+ )
+
+ @property
+ def project_name(self) -> NormalizedName:
+ return self._candidate.project_name
+
+ @property
+ def name(self) -> str:
+ return self._candidate.name
+
+ def format_for_error(self) -> str:
+ return str(self)
+
+ def get_candidate_lookup(self) -> CandidateLookup:
+ if self.specifier.contains(self._candidate.version, prereleases=True):
+ return self._candidate, None
+ return None, None
+
+ def is_satisfied_by(self, candidate: Candidate) -> bool:
+ assert candidate.name == self._candidate.name, "Not Python candidate"
+ # We can safely always allow prereleases here since PackageFinder
+ # already implements the prerelease logic, and would have filtered out
+ # prerelease candidates if the user does not expect them.
+ return self.specifier.contains(candidate.version, prereleases=True)
+
+
+class UnsatisfiableRequirement(Requirement):
+ """A requirement that cannot be satisfied."""
+
+ def __init__(self, name: NormalizedName) -> None:
+ self._name = name
+
+ def __str__(self) -> str:
+ return f"{self._name} (unavailable)"
+
+ def __repr__(self) -> str:
+ return "{class_name}({name!r})".format(
+ class_name=self.__class__.__name__,
+ name=str(self._name),
+ )
+
+ @property
+ def project_name(self) -> NormalizedName:
+ return self._name
+
+ @property
+ def name(self) -> str:
+ return self._name
+
+ def format_for_error(self) -> str:
+ return str(self)
+
+ def get_candidate_lookup(self) -> CandidateLookup:
+ return None, None
+
+ def is_satisfied_by(self, candidate: Candidate) -> bool:
+ return False
diff --git a/third_party/python/pip/pip/_internal/resolution/resolvelib/resolver.py b/third_party/python/pip/pip/_internal/resolution/resolvelib/resolver.py
new file mode 100644
index 0000000000..a605d6c254
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/resolution/resolvelib/resolver.py
@@ -0,0 +1,296 @@
+import functools
+import logging
+import os
+from typing import TYPE_CHECKING, Dict, List, Optional, Set, Tuple, cast
+
+from pip._vendor.packaging.utils import canonicalize_name
+from pip._vendor.resolvelib import BaseReporter, ResolutionImpossible
+from pip._vendor.resolvelib import Resolver as RLResolver
+from pip._vendor.resolvelib.structs import DirectedGraph
+
+from pip._internal.cache import WheelCache
+from pip._internal.index.package_finder import PackageFinder
+from pip._internal.operations.prepare import RequirementPreparer
+from pip._internal.req.req_install import InstallRequirement
+from pip._internal.req.req_set import RequirementSet
+from pip._internal.resolution.base import BaseResolver, InstallRequirementProvider
+from pip._internal.resolution.resolvelib.provider import PipProvider
+from pip._internal.resolution.resolvelib.reporter import (
+ PipDebuggingReporter,
+ PipReporter,
+)
+
+from .base import Candidate, Requirement
+from .factory import Factory
+
+if TYPE_CHECKING:
+ from pip._vendor.resolvelib.resolvers import Result as RLResult
+
+ Result = RLResult[Requirement, Candidate, str]
+
+
+logger = logging.getLogger(__name__)
+
+
+class Resolver(BaseResolver):
+ _allowed_strategies = {"eager", "only-if-needed", "to-satisfy-only"}
+
+ def __init__(
+ self,
+ preparer: RequirementPreparer,
+ finder: PackageFinder,
+ wheel_cache: Optional[WheelCache],
+ make_install_req: InstallRequirementProvider,
+ use_user_site: bool,
+ ignore_dependencies: bool,
+ ignore_installed: bool,
+ ignore_requires_python: bool,
+ force_reinstall: bool,
+ upgrade_strategy: str,
+ py_version_info: Optional[Tuple[int, ...]] = None,
+ ):
+ super().__init__()
+ assert upgrade_strategy in self._allowed_strategies
+
+ self.factory = Factory(
+ finder=finder,
+ preparer=preparer,
+ make_install_req=make_install_req,
+ wheel_cache=wheel_cache,
+ use_user_site=use_user_site,
+ force_reinstall=force_reinstall,
+ ignore_installed=ignore_installed,
+ ignore_requires_python=ignore_requires_python,
+ py_version_info=py_version_info,
+ )
+ self.ignore_dependencies = ignore_dependencies
+ self.upgrade_strategy = upgrade_strategy
+ self._result: Optional[Result] = None
+
+ def resolve(
+ self, root_reqs: List[InstallRequirement], check_supported_wheels: bool
+ ) -> RequirementSet:
+ collected = self.factory.collect_root_requirements(root_reqs)
+ provider = PipProvider(
+ factory=self.factory,
+ constraints=collected.constraints,
+ ignore_dependencies=self.ignore_dependencies,
+ upgrade_strategy=self.upgrade_strategy,
+ user_requested=collected.user_requested,
+ )
+ if "PIP_RESOLVER_DEBUG" in os.environ:
+ reporter: BaseReporter = PipDebuggingReporter()
+ else:
+ reporter = PipReporter()
+ resolver: RLResolver[Requirement, Candidate, str] = RLResolver(
+ provider,
+ reporter,
+ )
+
+ try:
+ try_to_avoid_resolution_too_deep = 2000000
+ result = self._result = resolver.resolve(
+ collected.requirements, max_rounds=try_to_avoid_resolution_too_deep
+ )
+
+ except ResolutionImpossible as e:
+ error = self.factory.get_installation_error(
+ cast("ResolutionImpossible[Requirement, Candidate]", e),
+ collected.constraints,
+ )
+ raise error from e
+
+ req_set = RequirementSet(check_supported_wheels=check_supported_wheels)
+ for candidate in result.mapping.values():
+ ireq = candidate.get_install_requirement()
+ if ireq is None:
+ continue
+
+ # Check if there is already an installation under the same name,
+ # and set a flag for later stages to uninstall it, if needed.
+ installed_dist = self.factory.get_dist_to_uninstall(candidate)
+ if installed_dist is None:
+ # There is no existing installation -- nothing to uninstall.
+ ireq.should_reinstall = False
+ elif self.factory.force_reinstall:
+ # The --force-reinstall flag is set -- reinstall.
+ ireq.should_reinstall = True
+ elif installed_dist.version != candidate.version:
+ # The installation is different in version -- reinstall.
+ ireq.should_reinstall = True
+ elif candidate.is_editable or installed_dist.editable:
+ # The incoming distribution is editable, or different in
+ # editable-ness to installation -- reinstall.
+ ireq.should_reinstall = True
+ elif candidate.source_link and candidate.source_link.is_file:
+ # The incoming distribution is under file://
+ if candidate.source_link.is_wheel:
+ # is a local wheel -- do nothing.
+ logger.info(
+ "%s is already installed with the same version as the "
+ "provided wheel. Use --force-reinstall to force an "
+ "installation of the wheel.",
+ ireq.name,
+ )
+ continue
+
+ # is a local sdist or path -- reinstall
+ ireq.should_reinstall = True
+ else:
+ continue
+
+ link = candidate.source_link
+ if link and link.is_yanked:
+ # The reason can contain non-ASCII characters, Unicode
+ # is required for Python 2.
+ msg = (
+ "The candidate selected for download or install is a "
+ "yanked version: {name!r} candidate (version {version} "
+ "at {link})\nReason for being yanked: {reason}"
+ ).format(
+ name=candidate.name,
+ version=candidate.version,
+ link=link,
+ reason=link.yanked_reason or "<none given>",
+ )
+ logger.warning(msg)
+
+ req_set.add_named_requirement(ireq)
+
+ reqs = req_set.all_requirements
+ self.factory.preparer.prepare_linked_requirements_more(reqs)
+ return req_set
+
+ def get_installation_order(
+ self, req_set: RequirementSet
+ ) -> List[InstallRequirement]:
+ """Get order for installation of requirements in RequirementSet.
+
+ The returned list contains a requirement before another that depends on
+ it. This helps ensure that the environment is kept consistent as they
+ get installed one-by-one.
+
+ The current implementation creates a topological ordering of the
+ dependency graph, giving more weight to packages with less
+ or no dependencies, while breaking any cycles in the graph at
+ arbitrary points. We make no guarantees about where the cycle
+ would be broken, other than it *would* be broken.
+ """
+ assert self._result is not None, "must call resolve() first"
+
+ if not req_set.requirements:
+ # Nothing is left to install, so we do not need an order.
+ return []
+
+ graph = self._result.graph
+ weights = get_topological_weights(graph, set(req_set.requirements.keys()))
+
+ sorted_items = sorted(
+ req_set.requirements.items(),
+ key=functools.partial(_req_set_item_sorter, weights=weights),
+ reverse=True,
+ )
+ return [ireq for _, ireq in sorted_items]
+
+
+def get_topological_weights(
+ graph: "DirectedGraph[Optional[str]]", requirement_keys: Set[str]
+) -> Dict[Optional[str], int]:
+ """Assign weights to each node based on how "deep" they are.
+
+ This implementation may change at any point in the future without prior
+ notice.
+
+ We first simplify the dependency graph by pruning any leaves and giving them
+ the highest weight: a package without any dependencies should be installed
+ first. This is done again and again in the same way, giving ever less weight
+ to the newly found leaves. The loop stops when no leaves are left: all
+ remaining packages have at least one dependency left in the graph.
+
+ Then we continue with the remaining graph, by taking the length for the
+ longest path to any node from root, ignoring any paths that contain a single
+ node twice (i.e. cycles). This is done through a depth-first search through
+ the graph, while keeping track of the path to the node.
+
+ Cycles in the graph result would result in node being revisited while also
+ being on its own path. In this case, take no action. This helps ensure we
+ don't get stuck in a cycle.
+
+ When assigning weight, the longer path (i.e. larger length) is preferred.
+
+ We are only interested in the weights of packages that are in the
+ requirement_keys.
+ """
+ path: Set[Optional[str]] = set()
+ weights: Dict[Optional[str], int] = {}
+
+ def visit(node: Optional[str]) -> None:
+ if node in path:
+ # We hit a cycle, so we'll break it here.
+ return
+
+ # Time to visit the children!
+ path.add(node)
+ for child in graph.iter_children(node):
+ visit(child)
+ path.remove(node)
+
+ if node not in requirement_keys:
+ return
+
+ last_known_parent_count = weights.get(node, 0)
+ weights[node] = max(last_known_parent_count, len(path))
+
+ # Simplify the graph, pruning leaves that have no dependencies.
+ # This is needed for large graphs (say over 200 packages) because the
+ # `visit` function is exponentially slower then, taking minutes.
+ # See https://github.com/pypa/pip/issues/10557
+ # We will loop until we explicitly break the loop.
+ while True:
+ leaves = set()
+ for key in graph:
+ if key is None:
+ continue
+ for _child in graph.iter_children(key):
+ # This means we have at least one child
+ break
+ else:
+ # No child.
+ leaves.add(key)
+ if not leaves:
+ # We are done simplifying.
+ break
+ # Calculate the weight for the leaves.
+ weight = len(graph) - 1
+ for leaf in leaves:
+ if leaf not in requirement_keys:
+ continue
+ weights[leaf] = weight
+ # Remove the leaves from the graph, making it simpler.
+ for leaf in leaves:
+ graph.remove(leaf)
+
+ # Visit the remaining graph.
+ # `None` is guaranteed to be the root node by resolvelib.
+ visit(None)
+
+ # Sanity check: all requirement keys should be in the weights,
+ # and no other keys should be in the weights.
+ difference = set(weights.keys()).difference(requirement_keys)
+ assert not difference, difference
+
+ return weights
+
+
+def _req_set_item_sorter(
+ item: Tuple[str, InstallRequirement],
+ weights: Dict[Optional[str], int],
+) -> Tuple[int, str]:
+ """Key function used to sort install requirements for installation.
+
+ Based on the "weight" mapping calculated in ``get_installation_order()``.
+ The canonical package name is returned as the second member as a tie-
+ breaker to ensure the result is predictable, which is useful in tests.
+ """
+ name = canonicalize_name(item[0])
+ return weights[name], name
diff --git a/third_party/python/pip/pip/_internal/self_outdated_check.py b/third_party/python/pip/pip/_internal/self_outdated_check.py
new file mode 100644
index 0000000000..41cc42c567
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/self_outdated_check.py
@@ -0,0 +1,242 @@
+import datetime
+import functools
+import hashlib
+import json
+import logging
+import optparse
+import os.path
+import sys
+from dataclasses import dataclass
+from typing import Any, Callable, Dict, Optional
+
+from pip._vendor.packaging.version import parse as parse_version
+from pip._vendor.rich.console import Group
+from pip._vendor.rich.markup import escape
+from pip._vendor.rich.text import Text
+
+from pip._internal.index.collector import LinkCollector
+from pip._internal.index.package_finder import PackageFinder
+from pip._internal.metadata import get_default_environment
+from pip._internal.metadata.base import DistributionVersion
+from pip._internal.models.selection_prefs import SelectionPreferences
+from pip._internal.network.session import PipSession
+from pip._internal.utils.compat import WINDOWS
+from pip._internal.utils.entrypoints import (
+ get_best_invocation_for_this_pip,
+ get_best_invocation_for_this_python,
+)
+from pip._internal.utils.filesystem import adjacent_tmp_file, check_path_owner, replace
+from pip._internal.utils.misc import ensure_dir
+
+_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ"
+
+
+logger = logging.getLogger(__name__)
+
+
+def _get_statefile_name(key: str) -> str:
+ key_bytes = key.encode()
+ name = hashlib.sha224(key_bytes).hexdigest()
+ return name
+
+
+class SelfCheckState:
+ def __init__(self, cache_dir: str) -> None:
+ self._state: Dict[str, Any] = {}
+ self._statefile_path = None
+
+ # Try to load the existing state
+ if cache_dir:
+ self._statefile_path = os.path.join(
+ cache_dir, "selfcheck", _get_statefile_name(self.key)
+ )
+ try:
+ with open(self._statefile_path, encoding="utf-8") as statefile:
+ self._state = json.load(statefile)
+ except (OSError, ValueError, KeyError):
+ # Explicitly suppressing exceptions, since we don't want to
+ # error out if the cache file is invalid.
+ pass
+
+ @property
+ def key(self) -> str:
+ return sys.prefix
+
+ def get(self, current_time: datetime.datetime) -> Optional[str]:
+ """Check if we have a not-outdated version loaded already."""
+ if not self._state:
+ return None
+
+ if "last_check" not in self._state:
+ return None
+
+ if "pypi_version" not in self._state:
+ return None
+
+ seven_days_in_seconds = 7 * 24 * 60 * 60
+
+ # Determine if we need to refresh the state
+ last_check = datetime.datetime.strptime(self._state["last_check"], _DATE_FMT)
+ seconds_since_last_check = (current_time - last_check).total_seconds()
+ if seconds_since_last_check > seven_days_in_seconds:
+ return None
+
+ return self._state["pypi_version"]
+
+ def set(self, pypi_version: str, current_time: datetime.datetime) -> None:
+ # If we do not have a path to cache in, don't bother saving.
+ if not self._statefile_path:
+ return
+
+ # Check to make sure that we own the directory
+ if not check_path_owner(os.path.dirname(self._statefile_path)):
+ return
+
+ # Now that we've ensured the directory is owned by this user, we'll go
+ # ahead and make sure that all our directories are created.
+ ensure_dir(os.path.dirname(self._statefile_path))
+
+ state = {
+ # Include the key so it's easy to tell which pip wrote the
+ # file.
+ "key": self.key,
+ "last_check": current_time.strftime(_DATE_FMT),
+ "pypi_version": pypi_version,
+ }
+
+ text = json.dumps(state, sort_keys=True, separators=(",", ":"))
+
+ with adjacent_tmp_file(self._statefile_path) as f:
+ f.write(text.encode())
+
+ try:
+ # Since we have a prefix-specific state file, we can just
+ # overwrite whatever is there, no need to check.
+ replace(f.name, self._statefile_path)
+ except OSError:
+ # Best effort.
+ pass
+
+
+@dataclass
+class UpgradePrompt:
+ old: str
+ new: str
+
+ def __rich__(self) -> Group:
+ if WINDOWS:
+ pip_cmd = f"{get_best_invocation_for_this_python()} -m pip"
+ else:
+ pip_cmd = get_best_invocation_for_this_pip()
+
+ notice = "[bold][[reset][blue]notice[reset][bold]][reset]"
+ return Group(
+ Text(),
+ Text.from_markup(
+ f"{notice} A new release of pip is available: "
+ f"[red]{self.old}[reset] -> [green]{self.new}[reset]"
+ ),
+ Text.from_markup(
+ f"{notice} To update, run: "
+ f"[green]{escape(pip_cmd)} install --upgrade pip"
+ ),
+ )
+
+
+def was_installed_by_pip(pkg: str) -> bool:
+ """Checks whether pkg was installed by pip
+
+ This is used not to display the upgrade message when pip is in fact
+ installed by system package manager, such as dnf on Fedora.
+ """
+ dist = get_default_environment().get_distribution(pkg)
+ return dist is not None and "pip" == dist.installer
+
+
+def _get_current_remote_pip_version(
+ session: PipSession, options: optparse.Values
+) -> Optional[str]:
+ # Lets use PackageFinder to see what the latest pip version is
+ link_collector = LinkCollector.create(
+ session,
+ options=options,
+ suppress_no_index=True,
+ )
+
+ # Pass allow_yanked=False so we don't suggest upgrading to a
+ # yanked version.
+ selection_prefs = SelectionPreferences(
+ allow_yanked=False,
+ allow_all_prereleases=False, # Explicitly set to False
+ )
+
+ finder = PackageFinder.create(
+ link_collector=link_collector,
+ selection_prefs=selection_prefs,
+ )
+ best_candidate = finder.find_best_candidate("pip").best_candidate
+ if best_candidate is None:
+ return None
+
+ return str(best_candidate.version)
+
+
+def _self_version_check_logic(
+ *,
+ state: SelfCheckState,
+ current_time: datetime.datetime,
+ local_version: DistributionVersion,
+ get_remote_version: Callable[[], Optional[str]],
+) -> Optional[UpgradePrompt]:
+ remote_version_str = state.get(current_time)
+ if remote_version_str is None:
+ remote_version_str = get_remote_version()
+ if remote_version_str is None:
+ logger.debug("No remote pip version found")
+ return None
+ state.set(remote_version_str, current_time)
+
+ remote_version = parse_version(remote_version_str)
+ logger.debug("Remote version of pip: %s", remote_version)
+ logger.debug("Local version of pip: %s", local_version)
+
+ pip_installed_by_pip = was_installed_by_pip("pip")
+ logger.debug("Was pip installed by pip? %s", pip_installed_by_pip)
+ if not pip_installed_by_pip:
+ return None # Only suggest upgrade if pip is installed by pip.
+
+ local_version_is_older = (
+ local_version < remote_version
+ and local_version.base_version != remote_version.base_version
+ )
+ if local_version_is_older:
+ return UpgradePrompt(old=str(local_version), new=remote_version_str)
+
+ return None
+
+
+def pip_self_version_check(session: PipSession, options: optparse.Values) -> None:
+ """Check for an update for pip.
+
+ Limit the frequency of checks to once per week. State is stored either in
+ the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
+ of the pip script path.
+ """
+ installed_dist = get_default_environment().get_distribution("pip")
+ if not installed_dist:
+ return
+
+ try:
+ upgrade_prompt = _self_version_check_logic(
+ state=SelfCheckState(cache_dir=options.cache_dir),
+ current_time=datetime.datetime.utcnow(),
+ local_version=installed_dist.version,
+ get_remote_version=functools.partial(
+ _get_current_remote_pip_version, session, options
+ ),
+ )
+ if upgrade_prompt is not None:
+ logger.warning("[present-rich] %s", upgrade_prompt)
+ except Exception:
+ logger.warning("There was an error checking the latest version of pip.")
+ logger.debug("See below for error", exc_info=True)
diff --git a/third_party/python/pip/pip/_internal/utils/__init__.py b/third_party/python/pip/pip/_internal/utils/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/__init__.py
diff --git a/third_party/python/pip/pip/_internal/utils/_log.py b/third_party/python/pip/pip/_internal/utils/_log.py
new file mode 100644
index 0000000000..92c4c6a193
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/_log.py
@@ -0,0 +1,38 @@
+"""Customize logging
+
+Defines custom logger class for the `logger.verbose(...)` method.
+
+init_logging() must be called before any other modules that call logging.getLogger.
+"""
+
+import logging
+from typing import Any, cast
+
+# custom log level for `--verbose` output
+# between DEBUG and INFO
+VERBOSE = 15
+
+
+class VerboseLogger(logging.Logger):
+ """Custom Logger, defining a verbose log-level
+
+ VERBOSE is between INFO and DEBUG.
+ """
+
+ def verbose(self, msg: str, *args: Any, **kwargs: Any) -> None:
+ return self.log(VERBOSE, msg, *args, **kwargs)
+
+
+def getLogger(name: str) -> VerboseLogger:
+ """logging.getLogger, but ensures our VerboseLogger class is returned"""
+ return cast(VerboseLogger, logging.getLogger(name))
+
+
+def init_logging() -> None:
+ """Register our VerboseLogger and VERBOSE log level.
+
+ Should be called before any calls to getLogger(),
+ i.e. in pip._internal.__init__
+ """
+ logging.setLoggerClass(VerboseLogger)
+ logging.addLevelName(VERBOSE, "VERBOSE")
diff --git a/third_party/python/pip/pip/_internal/utils/appdirs.py b/third_party/python/pip/pip/_internal/utils/appdirs.py
new file mode 100644
index 0000000000..16933bf8af
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/appdirs.py
@@ -0,0 +1,52 @@
+"""
+This code wraps the vendored appdirs module to so the return values are
+compatible for the current pip code base.
+
+The intention is to rewrite current usages gradually, keeping the tests pass,
+and eventually drop this after all usages are changed.
+"""
+
+import os
+import sys
+from typing import List
+
+from pip._vendor import platformdirs as _appdirs
+
+
+def user_cache_dir(appname: str) -> str:
+ return _appdirs.user_cache_dir(appname, appauthor=False)
+
+
+def _macos_user_config_dir(appname: str, roaming: bool = True) -> str:
+ # Use ~/Application Support/pip, if the directory exists.
+ path = _appdirs.user_data_dir(appname, appauthor=False, roaming=roaming)
+ if os.path.isdir(path):
+ return path
+
+ # Use a Linux-like ~/.config/pip, by default.
+ linux_like_path = "~/.config/"
+ if appname:
+ linux_like_path = os.path.join(linux_like_path, appname)
+
+ return os.path.expanduser(linux_like_path)
+
+
+def user_config_dir(appname: str, roaming: bool = True) -> str:
+ if sys.platform == "darwin":
+ return _macos_user_config_dir(appname, roaming)
+
+ return _appdirs.user_config_dir(appname, appauthor=False, roaming=roaming)
+
+
+# for the discussion regarding site_config_dir locations
+# see <https://github.com/pypa/pip/issues/1733>
+def site_config_dirs(appname: str) -> List[str]:
+ if sys.platform == "darwin":
+ return [_appdirs.site_data_dir(appname, appauthor=False, multipath=True)]
+
+ dirval = _appdirs.site_config_dir(appname, appauthor=False, multipath=True)
+ if sys.platform == "win32":
+ return [dirval]
+
+ # Unix-y system. Look in /etc as well.
+ return dirval.split(os.pathsep) + ["/etc"]
diff --git a/third_party/python/pip/pip/_internal/utils/compat.py b/third_party/python/pip/pip/_internal/utils/compat.py
new file mode 100644
index 0000000000..3f4d300cef
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/compat.py
@@ -0,0 +1,63 @@
+"""Stuff that differs in different Python versions and platform
+distributions."""
+
+import logging
+import os
+import sys
+
+__all__ = ["get_path_uid", "stdlib_pkgs", "WINDOWS"]
+
+
+logger = logging.getLogger(__name__)
+
+
+def has_tls() -> bool:
+ try:
+ import _ssl # noqa: F401 # ignore unused
+
+ return True
+ except ImportError:
+ pass
+
+ from pip._vendor.urllib3.util import IS_PYOPENSSL
+
+ return IS_PYOPENSSL
+
+
+def get_path_uid(path: str) -> int:
+ """
+ Return path's uid.
+
+ Does not follow symlinks:
+ https://github.com/pypa/pip/pull/935#discussion_r5307003
+
+ Placed this function in compat due to differences on AIX and
+ Jython, that should eventually go away.
+
+ :raises OSError: When path is a symlink or can't be read.
+ """
+ if hasattr(os, "O_NOFOLLOW"):
+ fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW)
+ file_uid = os.fstat(fd).st_uid
+ os.close(fd)
+ else: # AIX and Jython
+ # WARNING: time of check vulnerability, but best we can do w/o NOFOLLOW
+ if not os.path.islink(path):
+ # older versions of Jython don't have `os.fstat`
+ file_uid = os.stat(path).st_uid
+ else:
+ # raise OSError for parity with os.O_NOFOLLOW above
+ raise OSError(f"{path} is a symlink; Will not return uid for symlinks")
+ return file_uid
+
+
+# packages in the stdlib that may have installation metadata, but should not be
+# considered 'installed'. this theoretically could be determined based on
+# dist.location (py27:`sysconfig.get_paths()['stdlib']`,
+# py26:sysconfig.get_config_vars('LIBDEST')), but fear platform variation may
+# make this ineffective, so hard-coding
+stdlib_pkgs = {"python", "wsgiref", "argparse"}
+
+
+# windows detection, covers cpython and ironpython
+WINDOWS = sys.platform.startswith("win") or (sys.platform == "cli" and os.name == "nt")
diff --git a/third_party/python/pip/pip/_internal/utils/compatibility_tags.py b/third_party/python/pip/pip/_internal/utils/compatibility_tags.py
new file mode 100644
index 0000000000..b6ed9a78e5
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/compatibility_tags.py
@@ -0,0 +1,165 @@
+"""Generate and work with PEP 425 Compatibility Tags.
+"""
+
+import re
+from typing import List, Optional, Tuple
+
+from pip._vendor.packaging.tags import (
+ PythonVersion,
+ Tag,
+ compatible_tags,
+ cpython_tags,
+ generic_tags,
+ interpreter_name,
+ interpreter_version,
+ mac_platforms,
+)
+
+_osx_arch_pat = re.compile(r"(.+)_(\d+)_(\d+)_(.+)")
+
+
+def version_info_to_nodot(version_info: Tuple[int, ...]) -> str:
+ # Only use up to the first two numbers.
+ return "".join(map(str, version_info[:2]))
+
+
+def _mac_platforms(arch: str) -> List[str]:
+ match = _osx_arch_pat.match(arch)
+ if match:
+ name, major, minor, actual_arch = match.groups()
+ mac_version = (int(major), int(minor))
+ arches = [
+ # Since we have always only checked that the platform starts
+ # with "macosx", for backwards-compatibility we extract the
+ # actual prefix provided by the user in case they provided
+ # something like "macosxcustom_". It may be good to remove
+ # this as undocumented or deprecate it in the future.
+ "{}_{}".format(name, arch[len("macosx_") :])
+ for arch in mac_platforms(mac_version, actual_arch)
+ ]
+ else:
+ # arch pattern didn't match (?!)
+ arches = [arch]
+ return arches
+
+
+def _custom_manylinux_platforms(arch: str) -> List[str]:
+ arches = [arch]
+ arch_prefix, arch_sep, arch_suffix = arch.partition("_")
+ if arch_prefix == "manylinux2014":
+ # manylinux1/manylinux2010 wheels run on most manylinux2014 systems
+ # with the exception of wheels depending on ncurses. PEP 599 states
+ # manylinux1/manylinux2010 wheels should be considered
+ # manylinux2014 wheels:
+ # https://www.python.org/dev/peps/pep-0599/#backwards-compatibility-with-manylinux2010-wheels
+ if arch_suffix in {"i686", "x86_64"}:
+ arches.append("manylinux2010" + arch_sep + arch_suffix)
+ arches.append("manylinux1" + arch_sep + arch_suffix)
+ elif arch_prefix == "manylinux2010":
+ # manylinux1 wheels run on most manylinux2010 systems with the
+ # exception of wheels depending on ncurses. PEP 571 states
+ # manylinux1 wheels should be considered manylinux2010 wheels:
+ # https://www.python.org/dev/peps/pep-0571/#backwards-compatibility-with-manylinux1-wheels
+ arches.append("manylinux1" + arch_sep + arch_suffix)
+ return arches
+
+
+def _get_custom_platforms(arch: str) -> List[str]:
+ arch_prefix, arch_sep, arch_suffix = arch.partition("_")
+ if arch.startswith("macosx"):
+ arches = _mac_platforms(arch)
+ elif arch_prefix in ["manylinux2014", "manylinux2010"]:
+ arches = _custom_manylinux_platforms(arch)
+ else:
+ arches = [arch]
+ return arches
+
+
+def _expand_allowed_platforms(platforms: Optional[List[str]]) -> Optional[List[str]]:
+ if not platforms:
+ return None
+
+ seen = set()
+ result = []
+
+ for p in platforms:
+ if p in seen:
+ continue
+ additions = [c for c in _get_custom_platforms(p) if c not in seen]
+ seen.update(additions)
+ result.extend(additions)
+
+ return result
+
+
+def _get_python_version(version: str) -> PythonVersion:
+ if len(version) > 1:
+ return int(version[0]), int(version[1:])
+ else:
+ return (int(version[0]),)
+
+
+def _get_custom_interpreter(
+ implementation: Optional[str] = None, version: Optional[str] = None
+) -> str:
+ if implementation is None:
+ implementation = interpreter_name()
+ if version is None:
+ version = interpreter_version()
+ return f"{implementation}{version}"
+
+
+def get_supported(
+ version: Optional[str] = None,
+ platforms: Optional[List[str]] = None,
+ impl: Optional[str] = None,
+ abis: Optional[List[str]] = None,
+) -> List[Tag]:
+ """Return a list of supported tags for each version specified in
+ `versions`.
+
+ :param version: a string version, of the form "33" or "32",
+ or None. The version will be assumed to support our ABI.
+ :param platform: specify a list of platforms you want valid
+ tags for, or None. If None, use the local system platform.
+ :param impl: specify the exact implementation you want valid
+ tags for, or None. If None, use the local interpreter impl.
+ :param abis: specify a list of abis you want valid
+ tags for, or None. If None, use the local interpreter abi.
+ """
+ supported: List[Tag] = []
+
+ python_version: Optional[PythonVersion] = None
+ if version is not None:
+ python_version = _get_python_version(version)
+
+ interpreter = _get_custom_interpreter(impl, version)
+
+ platforms = _expand_allowed_platforms(platforms)
+
+ is_cpython = (impl or interpreter_name()) == "cp"
+ if is_cpython:
+ supported.extend(
+ cpython_tags(
+ python_version=python_version,
+ abis=abis,
+ platforms=platforms,
+ )
+ )
+ else:
+ supported.extend(
+ generic_tags(
+ interpreter=interpreter,
+ abis=abis,
+ platforms=platforms,
+ )
+ )
+ supported.extend(
+ compatible_tags(
+ python_version=python_version,
+ interpreter=interpreter,
+ platforms=platforms,
+ )
+ )
+
+ return supported
diff --git a/third_party/python/pip/pip/_internal/utils/datetime.py b/third_party/python/pip/pip/_internal/utils/datetime.py
new file mode 100644
index 0000000000..8668b3b0ec
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/datetime.py
@@ -0,0 +1,11 @@
+"""For when pip wants to check the date or time.
+"""
+
+import datetime
+
+
+def today_is_later_than(year: int, month: int, day: int) -> bool:
+ today = datetime.date.today()
+ given = datetime.date(year, month, day)
+
+ return today > given
diff --git a/third_party/python/pip/pip/_internal/utils/deprecation.py b/third_party/python/pip/pip/_internal/utils/deprecation.py
new file mode 100644
index 0000000000..18e9be9f36
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/deprecation.py
@@ -0,0 +1,188 @@
+"""
+A module that implements tooling to enable easy warnings about deprecations.
+"""
+
+import logging
+import warnings
+from typing import Any, Optional, TextIO, Type, Union
+
+from pip._vendor.packaging.version import parse
+
+from pip import __version__ as current_version # NOTE: tests patch this name.
+
+DEPRECATION_MSG_PREFIX = "DEPRECATION: "
+
+
+class PipDeprecationWarning(Warning):
+ pass
+
+
+_original_showwarning: Any = None
+
+
+# Warnings <-> Logging Integration
+def _showwarning(
+ message: Union[Warning, str],
+ category: Type[Warning],
+ filename: str,
+ lineno: int,
+ file: Optional[TextIO] = None,
+ line: Optional[str] = None,
+) -> None:
+ if file is not None:
+ if _original_showwarning is not None:
+ _original_showwarning(message, category, filename, lineno, file, line)
+ elif issubclass(category, PipDeprecationWarning):
+ # We use a specially named logger which will handle all of the
+ # deprecation messages for pip.
+ logger = logging.getLogger("pip._internal.deprecations")
+ logger.warning(message)
+ else:
+ _original_showwarning(message, category, filename, lineno, file, line)
+
+
+def install_warning_logger() -> None:
+ # Enable our Deprecation Warnings
+ warnings.simplefilter("default", PipDeprecationWarning, append=True)
+
+ global _original_showwarning
+
+ if _original_showwarning is None:
+ _original_showwarning = warnings.showwarning
+ warnings.showwarning = _showwarning
+
+
+def deprecated(
+ *,
+ reason: str,
+ replacement: Optional[str],
+ gone_in: Optional[str],
+ feature_flag: Optional[str] = None,
+ issue: Optional[int] = None,
+) -> None:
+ """Helper to deprecate existing functionality.
+
+ reason:
+ Textual reason shown to the user about why this functionality has
+ been deprecated. Should be a complete sentence.
+ replacement:
+ Textual suggestion shown to the user about what alternative
+ functionality they can use.
+ gone_in:
+ The version of pip does this functionality should get removed in.
+ Raises an error if pip's current version is greater than or equal to
+ this.
+ feature_flag:
+ Command-line flag of the form --use-feature={feature_flag} for testing
+ upcoming functionality.
+ issue:
+ Issue number on the tracker that would serve as a useful place for
+ users to find related discussion and provide feedback.
+ """
+
+ # Determine whether or not the feature is already gone in this version.
+ is_gone = gone_in is not None and parse(current_version) >= parse(gone_in)
+
+ message_parts = [
+ (reason, f"{DEPRECATION_MSG_PREFIX}{{}}"),
+ (
+ gone_in,
+ "pip {} will enforce this behaviour change."
+ if not is_gone
+ else "Since pip {}, this is no longer supported.",
+ ),
+ (
+ replacement,
+ "A possible replacement is {}.",
+ ),
+ (
+ feature_flag,
+ "You can use the flag --use-feature={} to test the upcoming behaviour."
+ if not is_gone
+ else None,
+ ),
+ (
+ issue,
+ "Discussion can be found at https://github.com/pypa/pip/issues/{}",
+ ),
+ ]
+
+ message = " ".join(
+ format_str.format(value)
+ for value, format_str in message_parts
+ if format_str is not None and value is not None
+ )
+
+ # Raise as an error if this behaviour is deprecated.
+ if is_gone:
+ raise PipDeprecationWarning(message)
+
+ warnings.warn(message, category=PipDeprecationWarning, stacklevel=2)
+
+
+class LegacyInstallReason:
+ def __init__(
+ self,
+ reason: str,
+ replacement: Optional[str] = None,
+ gone_in: Optional[str] = None,
+ feature_flag: Optional[str] = None,
+ issue: Optional[int] = None,
+ emit_after_success: bool = False,
+ emit_before_install: bool = False,
+ ):
+ self._reason = reason
+ self._replacement = replacement
+ self._gone_in = gone_in
+ self._feature_flag = feature_flag
+ self._issue = issue
+ self.emit_after_success = emit_after_success
+ self.emit_before_install = emit_before_install
+
+ def emit_deprecation(self, name: str) -> None:
+ deprecated(
+ reason=self._reason.format(name=name),
+ replacement=self._replacement,
+ gone_in=self._gone_in,
+ feature_flag=self._feature_flag,
+ issue=self._issue,
+ )
+
+
+LegacyInstallReasonFailedBdistWheel = LegacyInstallReason(
+ reason=(
+ "{name} was installed using the legacy 'setup.py install' "
+ "method, because a wheel could not be built for it."
+ ),
+ replacement="to fix the wheel build issue reported above",
+ gone_in="23.1",
+ issue=8368,
+ emit_after_success=True,
+)
+
+
+LegacyInstallReasonMissingWheelPackage = LegacyInstallReason(
+ reason=(
+ "{name} is being installed using the legacy "
+ "'setup.py install' method, because it does not have a "
+ "'pyproject.toml' and the 'wheel' package "
+ "is not installed."
+ ),
+ replacement="to enable the '--use-pep517' option",
+ gone_in="23.1",
+ issue=8559,
+ emit_before_install=True,
+)
+
+LegacyInstallReasonNoBinaryForcesSetuptoolsInstall = LegacyInstallReason(
+ reason=(
+ "{name} is being installed using the legacy "
+ "'setup.py install' method, because the '--no-binary' option was enabled "
+ "for it and this currently disables local wheel building for projects that "
+ "don't have a 'pyproject.toml' file."
+ ),
+ replacement="to enable the '--use-pep517' option",
+ gone_in="23.1",
+ issue=11451,
+ emit_before_install=True,
+)
diff --git a/third_party/python/pip/pip/_internal/utils/direct_url_helpers.py b/third_party/python/pip/pip/_internal/utils/direct_url_helpers.py
new file mode 100644
index 0000000000..0e8e5e1608
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/direct_url_helpers.py
@@ -0,0 +1,87 @@
+from typing import Optional
+
+from pip._internal.models.direct_url import ArchiveInfo, DirectUrl, DirInfo, VcsInfo
+from pip._internal.models.link import Link
+from pip._internal.utils.urls import path_to_url
+from pip._internal.vcs import vcs
+
+
+def direct_url_as_pep440_direct_reference(direct_url: DirectUrl, name: str) -> str:
+ """Convert a DirectUrl to a pip requirement string."""
+ direct_url.validate() # if invalid, this is a pip bug
+ requirement = name + " @ "
+ fragments = []
+ if isinstance(direct_url.info, VcsInfo):
+ requirement += "{}+{}@{}".format(
+ direct_url.info.vcs, direct_url.url, direct_url.info.commit_id
+ )
+ elif isinstance(direct_url.info, ArchiveInfo):
+ requirement += direct_url.url
+ if direct_url.info.hash:
+ fragments.append(direct_url.info.hash)
+ else:
+ assert isinstance(direct_url.info, DirInfo)
+ requirement += direct_url.url
+ if direct_url.subdirectory:
+ fragments.append("subdirectory=" + direct_url.subdirectory)
+ if fragments:
+ requirement += "#" + "&".join(fragments)
+ return requirement
+
+
+def direct_url_for_editable(source_dir: str) -> DirectUrl:
+ return DirectUrl(
+ url=path_to_url(source_dir),
+ info=DirInfo(editable=True),
+ )
+
+
+def direct_url_from_link(
+ link: Link, source_dir: Optional[str] = None, link_is_in_wheel_cache: bool = False
+) -> DirectUrl:
+ if link.is_vcs:
+ vcs_backend = vcs.get_backend_for_scheme(link.scheme)
+ assert vcs_backend
+ url, requested_revision, _ = vcs_backend.get_url_rev_and_auth(
+ link.url_without_fragment
+ )
+ # For VCS links, we need to find out and add commit_id.
+ if link_is_in_wheel_cache:
+ # If the requested VCS link corresponds to a cached
+ # wheel, it means the requested revision was an
+ # immutable commit hash, otherwise it would not have
+ # been cached. In that case we don't have a source_dir
+ # with the VCS checkout.
+ assert requested_revision
+ commit_id = requested_revision
+ else:
+ # If the wheel was not in cache, it means we have
+ # had to checkout from VCS to build and we have a source_dir
+ # which we can inspect to find out the commit id.
+ assert source_dir
+ commit_id = vcs_backend.get_revision(source_dir)
+ return DirectUrl(
+ url=url,
+ info=VcsInfo(
+ vcs=vcs_backend.name,
+ commit_id=commit_id,
+ requested_revision=requested_revision,
+ ),
+ subdirectory=link.subdirectory_fragment,
+ )
+ elif link.is_existing_dir():
+ return DirectUrl(
+ url=link.url_without_fragment,
+ info=DirInfo(),
+ subdirectory=link.subdirectory_fragment,
+ )
+ else:
+ hash = None
+ hash_name = link.hash_name
+ if hash_name:
+ hash = f"{hash_name}={link.hash}"
+ return DirectUrl(
+ url=link.url_without_fragment,
+ info=ArchiveInfo(hash=hash),
+ subdirectory=link.subdirectory_fragment,
+ )
diff --git a/third_party/python/pip/pip/_internal/utils/distutils_args.py b/third_party/python/pip/pip/_internal/utils/distutils_args.py
new file mode 100644
index 0000000000..2fd1862073
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/distutils_args.py
@@ -0,0 +1,43 @@
+from getopt import GetoptError, getopt
+from typing import Dict, List
+
+_options = [
+ "exec-prefix=",
+ "home=",
+ "install-base=",
+ "install-data=",
+ "install-headers=",
+ "install-lib=",
+ "install-platlib=",
+ "install-purelib=",
+ "install-scripts=",
+ "prefix=",
+ "root=",
+ "user",
+]
+
+
+def parse_distutils_args(args: List[str]) -> Dict[str, str]:
+ """Parse provided arguments, returning an object that has the matched arguments.
+
+ Any unknown arguments are ignored.
+ """
+ result = {}
+ for arg in args:
+ try:
+ parsed_opt, _ = getopt(args=[arg], shortopts="", longopts=_options)
+ except GetoptError:
+ # We don't care about any other options, which here may be
+ # considered unrecognized since our option list is not
+ # exhaustive.
+ continue
+
+ if not parsed_opt:
+ continue
+
+ option = parsed_opt[0]
+ name_from_parsed = option[0][2:].replace("-", "_")
+ value_from_parsed = option[1] or "true"
+ result[name_from_parsed] = value_from_parsed
+
+ return result
diff --git a/third_party/python/pip/pip/_internal/utils/egg_link.py b/third_party/python/pip/pip/_internal/utils/egg_link.py
new file mode 100644
index 0000000000..eb57ed1519
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/egg_link.py
@@ -0,0 +1,72 @@
+import os
+import re
+import sys
+from typing import List, Optional
+
+from pip._internal.locations import site_packages, user_site
+from pip._internal.utils.virtualenv import (
+ running_under_virtualenv,
+ virtualenv_no_global,
+)
+
+__all__ = [
+ "egg_link_path_from_sys_path",
+ "egg_link_path_from_location",
+]
+
+
+def _egg_link_name(raw_name: str) -> str:
+ """
+ Convert a Name metadata value to a .egg-link name, by applying
+ the same substitution as pkg_resources's safe_name function.
+ Note: we cannot use canonicalize_name because it has a different logic.
+ """
+ return re.sub("[^A-Za-z0-9.]+", "-", raw_name) + ".egg-link"
+
+
+def egg_link_path_from_sys_path(raw_name: str) -> Optional[str]:
+ """
+ Look for a .egg-link file for project name, by walking sys.path.
+ """
+ egg_link_name = _egg_link_name(raw_name)
+ for path_item in sys.path:
+ egg_link = os.path.join(path_item, egg_link_name)
+ if os.path.isfile(egg_link):
+ return egg_link
+ return None
+
+
+def egg_link_path_from_location(raw_name: str) -> Optional[str]:
+ """
+ Return the path for the .egg-link file if it exists, otherwise, None.
+
+ There's 3 scenarios:
+ 1) not in a virtualenv
+ try to find in site.USER_SITE, then site_packages
+ 2) in a no-global virtualenv
+ try to find in site_packages
+ 3) in a yes-global virtualenv
+ try to find in site_packages, then site.USER_SITE
+ (don't look in global location)
+
+ For #1 and #3, there could be odd cases, where there's an egg-link in 2
+ locations.
+
+ This method will just return the first one found.
+ """
+ sites: List[str] = []
+ if running_under_virtualenv():
+ sites.append(site_packages)
+ if not virtualenv_no_global() and user_site:
+ sites.append(user_site)
+ else:
+ if user_site:
+ sites.append(user_site)
+ sites.append(site_packages)
+
+ egg_link_name = _egg_link_name(raw_name)
+ for site in sites:
+ egglink = os.path.join(site, egg_link_name)
+ if os.path.isfile(egglink):
+ return egglink
+ return None
diff --git a/third_party/python/pip/pip/_internal/utils/encoding.py b/third_party/python/pip/pip/_internal/utils/encoding.py
new file mode 100644
index 0000000000..008f06a79b
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/encoding.py
@@ -0,0 +1,36 @@
+import codecs
+import locale
+import re
+import sys
+from typing import List, Tuple
+
+BOMS: List[Tuple[bytes, str]] = [
+ (codecs.BOM_UTF8, "utf-8"),
+ (codecs.BOM_UTF16, "utf-16"),
+ (codecs.BOM_UTF16_BE, "utf-16-be"),
+ (codecs.BOM_UTF16_LE, "utf-16-le"),
+ (codecs.BOM_UTF32, "utf-32"),
+ (codecs.BOM_UTF32_BE, "utf-32-be"),
+ (codecs.BOM_UTF32_LE, "utf-32-le"),
+]
+
+ENCODING_RE = re.compile(rb"coding[:=]\s*([-\w.]+)")
+
+
+def auto_decode(data: bytes) -> str:
+ """Check a bytes string for a BOM to correctly detect the encoding
+
+ Fallback to locale.getpreferredencoding(False) like open() on Python3"""
+ for bom, encoding in BOMS:
+ if data.startswith(bom):
+ return data[len(bom) :].decode(encoding)
+ # Lets check the first two lines as in PEP263
+ for line in data.split(b"\n")[:2]:
+ if line[0:1] == b"#" and ENCODING_RE.search(line):
+ result = ENCODING_RE.search(line)
+ assert result is not None
+ encoding = result.groups()[0].decode("ascii")
+ return data.decode(encoding)
+ return data.decode(
+ locale.getpreferredencoding(False) or sys.getdefaultencoding(),
+ )
diff --git a/third_party/python/pip/pip/_internal/utils/entrypoints.py b/third_party/python/pip/pip/_internal/utils/entrypoints.py
new file mode 100644
index 0000000000..1501369385
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/entrypoints.py
@@ -0,0 +1,84 @@
+import itertools
+import os
+import shutil
+import sys
+from typing import List, Optional
+
+from pip._internal.cli.main import main
+from pip._internal.utils.compat import WINDOWS
+
+_EXECUTABLE_NAMES = [
+ "pip",
+ f"pip{sys.version_info.major}",
+ f"pip{sys.version_info.major}.{sys.version_info.minor}",
+]
+if WINDOWS:
+ _allowed_extensions = {"", ".exe"}
+ _EXECUTABLE_NAMES = [
+ "".join(parts)
+ for parts in itertools.product(_EXECUTABLE_NAMES, _allowed_extensions)
+ ]
+
+
+def _wrapper(args: Optional[List[str]] = None) -> int:
+ """Central wrapper for all old entrypoints.
+
+ Historically pip has had several entrypoints defined. Because of issues
+ arising from PATH, sys.path, multiple Pythons, their interactions, and most
+ of them having a pip installed, users suffer every time an entrypoint gets
+ moved.
+
+ To alleviate this pain, and provide a mechanism for warning users and
+ directing them to an appropriate place for help, we now define all of
+ our old entrypoints as wrappers for the current one.
+ """
+ sys.stderr.write(
+ "WARNING: pip is being invoked by an old script wrapper. This will "
+ "fail in a future version of pip.\n"
+ "Please see https://github.com/pypa/pip/issues/5599 for advice on "
+ "fixing the underlying issue.\n"
+ "To avoid this problem you can invoke Python with '-m pip' instead of "
+ "running pip directly.\n"
+ )
+ return main(args)
+
+
+def get_best_invocation_for_this_pip() -> str:
+ """Try to figure out the best way to invoke pip in the current environment."""
+ binary_directory = "Scripts" if WINDOWS else "bin"
+ binary_prefix = os.path.join(sys.prefix, binary_directory)
+
+ # Try to use pip[X[.Y]] names, if those executables for this environment are
+ # the first on PATH with that name.
+ path_parts = os.path.normcase(os.environ.get("PATH", "")).split(os.pathsep)
+ exe_are_in_PATH = os.path.normcase(binary_prefix) in path_parts
+ if exe_are_in_PATH:
+ for exe_name in _EXECUTABLE_NAMES:
+ found_executable = shutil.which(exe_name)
+ binary_executable = os.path.join(binary_prefix, exe_name)
+ if (
+ found_executable
+ and os.path.exists(binary_executable)
+ and os.path.samefile(
+ found_executable,
+ binary_executable,
+ )
+ ):
+ return exe_name
+
+ # Use the `-m` invocation, if there's no "nice" invocation.
+ return f"{get_best_invocation_for_this_python()} -m pip"
+
+
+def get_best_invocation_for_this_python() -> str:
+ """Try to figure out the best way to invoke the current Python."""
+ exe = sys.executable
+ exe_name = os.path.basename(exe)
+
+ # Try to use the basename, if it's the first executable.
+ found_executable = shutil.which(exe_name)
+ if found_executable and os.path.samefile(found_executable, exe):
+ return exe_name
+
+ # Use the full executable name, because we couldn't find something simpler.
+ return exe
diff --git a/third_party/python/pip/pip/_internal/utils/filesystem.py b/third_party/python/pip/pip/_internal/utils/filesystem.py
new file mode 100644
index 0000000000..83c2df75b9
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/filesystem.py
@@ -0,0 +1,153 @@
+import fnmatch
+import os
+import os.path
+import random
+import sys
+from contextlib import contextmanager
+from tempfile import NamedTemporaryFile
+from typing import Any, BinaryIO, Generator, List, Union, cast
+
+from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed
+
+from pip._internal.utils.compat import get_path_uid
+from pip._internal.utils.misc import format_size
+
+
+def check_path_owner(path: str) -> bool:
+ # If we don't have a way to check the effective uid of this process, then
+ # we'll just assume that we own the directory.
+ if sys.platform == "win32" or not hasattr(os, "geteuid"):
+ return True
+
+ assert os.path.isabs(path)
+
+ previous = None
+ while path != previous:
+ if os.path.lexists(path):
+ # Check if path is writable by current user.
+ if os.geteuid() == 0:
+ # Special handling for root user in order to handle properly
+ # cases where users use sudo without -H flag.
+ try:
+ path_uid = get_path_uid(path)
+ except OSError:
+ return False
+ return path_uid == 0
+ else:
+ return os.access(path, os.W_OK)
+ else:
+ previous, path = path, os.path.dirname(path)
+ return False # assume we don't own the path
+
+
+@contextmanager
+def adjacent_tmp_file(path: str, **kwargs: Any) -> Generator[BinaryIO, None, None]:
+ """Return a file-like object pointing to a tmp file next to path.
+
+ The file is created securely and is ensured to be written to disk
+ after the context reaches its end.
+
+ kwargs will be passed to tempfile.NamedTemporaryFile to control
+ the way the temporary file will be opened.
+ """
+ with NamedTemporaryFile(
+ delete=False,
+ dir=os.path.dirname(path),
+ prefix=os.path.basename(path),
+ suffix=".tmp",
+ **kwargs,
+ ) as f:
+ result = cast(BinaryIO, f)
+ try:
+ yield result
+ finally:
+ result.flush()
+ os.fsync(result.fileno())
+
+
+# Tenacity raises RetryError by default, explicitly raise the original exception
+_replace_retry = retry(reraise=True, stop=stop_after_delay(1), wait=wait_fixed(0.25))
+
+replace = _replace_retry(os.replace)
+
+
+# test_writable_dir and _test_writable_dir_win are copied from Flit,
+# with the author's agreement to also place them under pip's license.
+def test_writable_dir(path: str) -> bool:
+ """Check if a directory is writable.
+
+ Uses os.access() on POSIX, tries creating files on Windows.
+ """
+ # If the directory doesn't exist, find the closest parent that does.
+ while not os.path.isdir(path):
+ parent = os.path.dirname(path)
+ if parent == path:
+ break # Should never get here, but infinite loops are bad
+ path = parent
+
+ if os.name == "posix":
+ return os.access(path, os.W_OK)
+
+ return _test_writable_dir_win(path)
+
+
+def _test_writable_dir_win(path: str) -> bool:
+ # os.access doesn't work on Windows: http://bugs.python.org/issue2528
+ # and we can't use tempfile: http://bugs.python.org/issue22107
+ basename = "accesstest_deleteme_fishfingers_custard_"
+ alphabet = "abcdefghijklmnopqrstuvwxyz0123456789"
+ for _ in range(10):
+ name = basename + "".join(random.choice(alphabet) for _ in range(6))
+ file = os.path.join(path, name)
+ try:
+ fd = os.open(file, os.O_RDWR | os.O_CREAT | os.O_EXCL)
+ except FileExistsError:
+ pass
+ except PermissionError:
+ # This could be because there's a directory with the same name.
+ # But it's highly unlikely there's a directory called that,
+ # so we'll assume it's because the parent dir is not writable.
+ # This could as well be because the parent dir is not readable,
+ # due to non-privileged user access.
+ return False
+ else:
+ os.close(fd)
+ os.unlink(file)
+ return True
+
+ # This should never be reached
+ raise OSError("Unexpected condition testing for writable directory")
+
+
+def find_files(path: str, pattern: str) -> List[str]:
+ """Returns a list of absolute paths of files beneath path, recursively,
+ with filenames which match the UNIX-style shell glob pattern."""
+ result: List[str] = []
+ for root, _, files in os.walk(path):
+ matches = fnmatch.filter(files, pattern)
+ result.extend(os.path.join(root, f) for f in matches)
+ return result
+
+
+def file_size(path: str) -> Union[int, float]:
+ # If it's a symlink, return 0.
+ if os.path.islink(path):
+ return 0
+ return os.path.getsize(path)
+
+
+def format_file_size(path: str) -> str:
+ return format_size(file_size(path))
+
+
+def directory_size(path: str) -> Union[int, float]:
+ size = 0.0
+ for root, _dirs, files in os.walk(path):
+ for filename in files:
+ file_path = os.path.join(root, filename)
+ size += file_size(file_path)
+ return size
+
+
+def format_directory_size(path: str) -> str:
+ return format_size(directory_size(path))
diff --git a/third_party/python/pip/pip/_internal/utils/filetypes.py b/third_party/python/pip/pip/_internal/utils/filetypes.py
new file mode 100644
index 0000000000..5948570178
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/filetypes.py
@@ -0,0 +1,27 @@
+"""Filetype information.
+"""
+
+from typing import Tuple
+
+from pip._internal.utils.misc import splitext
+
+WHEEL_EXTENSION = ".whl"
+BZ2_EXTENSIONS: Tuple[str, ...] = (".tar.bz2", ".tbz")
+XZ_EXTENSIONS: Tuple[str, ...] = (
+ ".tar.xz",
+ ".txz",
+ ".tlz",
+ ".tar.lz",
+ ".tar.lzma",
+)
+ZIP_EXTENSIONS: Tuple[str, ...] = (".zip", WHEEL_EXTENSION)
+TAR_EXTENSIONS: Tuple[str, ...] = (".tar.gz", ".tgz", ".tar")
+ARCHIVE_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS
+
+
+def is_archive_file(name: str) -> bool:
+ """Return True if `name` is a considered as an archive file."""
+ ext = splitext(name)[1].lower()
+ if ext in ARCHIVE_EXTENSIONS:
+ return True
+ return False
diff --git a/third_party/python/pip/pip/_internal/utils/glibc.py b/third_party/python/pip/pip/_internal/utils/glibc.py
new file mode 100644
index 0000000000..7bd3c20681
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/glibc.py
@@ -0,0 +1,88 @@
+# The following comment should be removed at some point in the future.
+# mypy: strict-optional=False
+
+import os
+import sys
+from typing import Optional, Tuple
+
+
+def glibc_version_string() -> Optional[str]:
+ "Returns glibc version string, or None if not using glibc."
+ return glibc_version_string_confstr() or glibc_version_string_ctypes()
+
+
+def glibc_version_string_confstr() -> Optional[str]:
+ "Primary implementation of glibc_version_string using os.confstr."
+ # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
+ # to be broken or missing. This strategy is used in the standard library
+ # platform module:
+ # https://github.com/python/cpython/blob/fcf1d003bf4f0100c9d0921ff3d70e1127ca1b71/Lib/platform.py#L175-L183
+ if sys.platform == "win32":
+ return None
+ try:
+ # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17":
+ _, version = os.confstr("CS_GNU_LIBC_VERSION").split()
+ except (AttributeError, OSError, ValueError):
+ # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
+ return None
+ return version
+
+
+def glibc_version_string_ctypes() -> Optional[str]:
+ "Fallback implementation of glibc_version_string using ctypes."
+
+ try:
+ import ctypes
+ except ImportError:
+ return None
+
+ # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
+ # manpage says, "If filename is NULL, then the returned handle is for the
+ # main program". This way we can let the linker do the work to figure out
+ # which libc our process is actually using.
+ process_namespace = ctypes.CDLL(None)
+ try:
+ gnu_get_libc_version = process_namespace.gnu_get_libc_version
+ except AttributeError:
+ # Symbol doesn't exist -> therefore, we are not linked to
+ # glibc.
+ return None
+
+ # Call gnu_get_libc_version, which returns a string like "2.5"
+ gnu_get_libc_version.restype = ctypes.c_char_p
+ version_str = gnu_get_libc_version()
+ # py2 / py3 compatibility:
+ if not isinstance(version_str, str):
+ version_str = version_str.decode("ascii")
+
+ return version_str
+
+
+# platform.libc_ver regularly returns completely nonsensical glibc
+# versions. E.g. on my computer, platform says:
+#
+# ~$ python2.7 -c 'import platform; print(platform.libc_ver())'
+# ('glibc', '2.7')
+# ~$ python3.5 -c 'import platform; print(platform.libc_ver())'
+# ('glibc', '2.9')
+#
+# But the truth is:
+#
+# ~$ ldd --version
+# ldd (Debian GLIBC 2.22-11) 2.22
+#
+# This is unfortunate, because it means that the linehaul data on libc
+# versions that was generated by pip 8.1.2 and earlier is useless and
+# misleading. Solution: instead of using platform, use our code that actually
+# works.
+def libc_ver() -> Tuple[str, str]:
+ """Try to determine the glibc version
+
+ Returns a tuple of strings (lib, version) which default to empty strings
+ in case the lookup fails.
+ """
+ glibc_version = glibc_version_string()
+ if glibc_version is None:
+ return ("", "")
+ else:
+ return ("glibc", glibc_version)
diff --git a/third_party/python/pip/pip/_internal/utils/hashes.py b/third_party/python/pip/pip/_internal/utils/hashes.py
new file mode 100644
index 0000000000..76727306a4
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/hashes.py
@@ -0,0 +1,144 @@
+import hashlib
+from typing import TYPE_CHECKING, BinaryIO, Dict, Iterable, List, Optional
+
+from pip._internal.exceptions import HashMismatch, HashMissing, InstallationError
+from pip._internal.utils.misc import read_chunks
+
+if TYPE_CHECKING:
+ from hashlib import _Hash
+
+ # NoReturn introduced in 3.6.2; imported only for type checking to maintain
+ # pip compatibility with older patch versions of Python 3.6
+ from typing import NoReturn
+
+
+# The recommended hash algo of the moment. Change this whenever the state of
+# the art changes; it won't hurt backward compatibility.
+FAVORITE_HASH = "sha256"
+
+
+# Names of hashlib algorithms allowed by the --hash option and ``pip hash``
+# Currently, those are the ones at least as collision-resistant as sha256.
+STRONG_HASHES = ["sha256", "sha384", "sha512"]
+
+
+class Hashes:
+ """A wrapper that builds multiple hashes at once and checks them against
+ known-good values
+
+ """
+
+ def __init__(self, hashes: Optional[Dict[str, List[str]]] = None) -> None:
+ """
+ :param hashes: A dict of algorithm names pointing to lists of allowed
+ hex digests
+ """
+ allowed = {}
+ if hashes is not None:
+ for alg, keys in hashes.items():
+ # Make sure values are always sorted (to ease equality checks)
+ allowed[alg] = sorted(keys)
+ self._allowed = allowed
+
+ def __and__(self, other: "Hashes") -> "Hashes":
+ if not isinstance(other, Hashes):
+ return NotImplemented
+
+ # If either of the Hashes object is entirely empty (i.e. no hash
+ # specified at all), all hashes from the other object are allowed.
+ if not other:
+ return self
+ if not self:
+ return other
+
+ # Otherwise only hashes that present in both objects are allowed.
+ new = {}
+ for alg, values in other._allowed.items():
+ if alg not in self._allowed:
+ continue
+ new[alg] = [v for v in values if v in self._allowed[alg]]
+ return Hashes(new)
+
+ @property
+ def digest_count(self) -> int:
+ return sum(len(digests) for digests in self._allowed.values())
+
+ def is_hash_allowed(self, hash_name: str, hex_digest: str) -> bool:
+ """Return whether the given hex digest is allowed."""
+ return hex_digest in self._allowed.get(hash_name, [])
+
+ def check_against_chunks(self, chunks: Iterable[bytes]) -> None:
+ """Check good hashes against ones built from iterable of chunks of
+ data.
+
+ Raise HashMismatch if none match.
+
+ """
+ gots = {}
+ for hash_name in self._allowed.keys():
+ try:
+ gots[hash_name] = hashlib.new(hash_name)
+ except (ValueError, TypeError):
+ raise InstallationError(f"Unknown hash name: {hash_name}")
+
+ for chunk in chunks:
+ for hash in gots.values():
+ hash.update(chunk)
+
+ for hash_name, got in gots.items():
+ if got.hexdigest() in self._allowed[hash_name]:
+ return
+ self._raise(gots)
+
+ def _raise(self, gots: Dict[str, "_Hash"]) -> "NoReturn":
+ raise HashMismatch(self._allowed, gots)
+
+ def check_against_file(self, file: BinaryIO) -> None:
+ """Check good hashes against a file-like object
+
+ Raise HashMismatch if none match.
+
+ """
+ return self.check_against_chunks(read_chunks(file))
+
+ def check_against_path(self, path: str) -> None:
+ with open(path, "rb") as file:
+ return self.check_against_file(file)
+
+ def __bool__(self) -> bool:
+ """Return whether I know any known-good hashes."""
+ return bool(self._allowed)
+
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, Hashes):
+ return NotImplemented
+ return self._allowed == other._allowed
+
+ def __hash__(self) -> int:
+ return hash(
+ ",".join(
+ sorted(
+ ":".join((alg, digest))
+ for alg, digest_list in self._allowed.items()
+ for digest in digest_list
+ )
+ )
+ )
+
+
+class MissingHashes(Hashes):
+ """A workalike for Hashes used when we're missing a hash for a requirement
+
+ It computes the actual hash of the requirement and raises a HashMissing
+ exception showing it to the user.
+
+ """
+
+ def __init__(self) -> None:
+ """Don't offer the ``hashes`` kwarg."""
+ # Pass our favorite hash in to generate a "gotten hash". With the
+ # empty list, it will never match, so an error will always raise.
+ super().__init__(hashes={FAVORITE_HASH: []})
+
+ def _raise(self, gots: Dict[str, "_Hash"]) -> "NoReturn":
+ raise HashMissing(gots[FAVORITE_HASH].hexdigest())
diff --git a/third_party/python/pip/pip/_internal/utils/inject_securetransport.py b/third_party/python/pip/pip/_internal/utils/inject_securetransport.py
new file mode 100644
index 0000000000..276aa79bb8
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/inject_securetransport.py
@@ -0,0 +1,35 @@
+"""A helper module that injects SecureTransport, on import.
+
+The import should be done as early as possible, to ensure all requests and
+sessions (or whatever) are created after injecting SecureTransport.
+
+Note that we only do the injection on macOS, when the linked OpenSSL is too
+old to handle TLSv1.2.
+"""
+
+import sys
+
+
+def inject_securetransport() -> None:
+ # Only relevant on macOS
+ if sys.platform != "darwin":
+ return
+
+ try:
+ import ssl
+ except ImportError:
+ return
+
+ # Checks for OpenSSL 1.0.1
+ if ssl.OPENSSL_VERSION_NUMBER >= 0x1000100F:
+ return
+
+ try:
+ from pip._vendor.urllib3.contrib import securetransport
+ except (ImportError, OSError):
+ return
+
+ securetransport.inject_into_urllib3()
+
+
+inject_securetransport()
diff --git a/third_party/python/pip/pip/_internal/utils/logging.py b/third_party/python/pip/pip/_internal/utils/logging.py
new file mode 100644
index 0000000000..c10e1f4ced
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/logging.py
@@ -0,0 +1,348 @@
+import contextlib
+import errno
+import logging
+import logging.handlers
+import os
+import sys
+import threading
+from dataclasses import dataclass
+from io import TextIOWrapper
+from logging import Filter
+from typing import Any, ClassVar, Generator, List, Optional, TextIO, Type
+
+from pip._vendor.rich.console import (
+ Console,
+ ConsoleOptions,
+ ConsoleRenderable,
+ RenderableType,
+ RenderResult,
+ RichCast,
+)
+from pip._vendor.rich.highlighter import NullHighlighter
+from pip._vendor.rich.logging import RichHandler
+from pip._vendor.rich.segment import Segment
+from pip._vendor.rich.style import Style
+
+from pip._internal.utils._log import VERBOSE, getLogger
+from pip._internal.utils.compat import WINDOWS
+from pip._internal.utils.deprecation import DEPRECATION_MSG_PREFIX
+from pip._internal.utils.misc import ensure_dir
+
+_log_state = threading.local()
+subprocess_logger = getLogger("pip.subprocessor")
+
+
+class BrokenStdoutLoggingError(Exception):
+ """
+ Raised if BrokenPipeError occurs for the stdout stream while logging.
+ """
+
+
+def _is_broken_pipe_error(exc_class: Type[BaseException], exc: BaseException) -> bool:
+ if exc_class is BrokenPipeError:
+ return True
+
+ # On Windows, a broken pipe can show up as EINVAL rather than EPIPE:
+ # https://bugs.python.org/issue19612
+ # https://bugs.python.org/issue30418
+ if not WINDOWS:
+ return False
+
+ return isinstance(exc, OSError) and exc.errno in (errno.EINVAL, errno.EPIPE)
+
+
+@contextlib.contextmanager
+def indent_log(num: int = 2) -> Generator[None, None, None]:
+ """
+ A context manager which will cause the log output to be indented for any
+ log messages emitted inside it.
+ """
+ # For thread-safety
+ _log_state.indentation = get_indentation()
+ _log_state.indentation += num
+ try:
+ yield
+ finally:
+ _log_state.indentation -= num
+
+
+def get_indentation() -> int:
+ return getattr(_log_state, "indentation", 0)
+
+
+class IndentingFormatter(logging.Formatter):
+ default_time_format = "%Y-%m-%dT%H:%M:%S"
+
+ def __init__(
+ self,
+ *args: Any,
+ add_timestamp: bool = False,
+ **kwargs: Any,
+ ) -> None:
+ """
+ A logging.Formatter that obeys the indent_log() context manager.
+
+ :param add_timestamp: A bool indicating output lines should be prefixed
+ with their record's timestamp.
+ """
+ self.add_timestamp = add_timestamp
+ super().__init__(*args, **kwargs)
+
+ def get_message_start(self, formatted: str, levelno: int) -> str:
+ """
+ Return the start of the formatted log message (not counting the
+ prefix to add to each line).
+ """
+ if levelno < logging.WARNING:
+ return ""
+ if formatted.startswith(DEPRECATION_MSG_PREFIX):
+ # Then the message already has a prefix. We don't want it to
+ # look like "WARNING: DEPRECATION: ...."
+ return ""
+ if levelno < logging.ERROR:
+ return "WARNING: "
+
+ return "ERROR: "
+
+ def format(self, record: logging.LogRecord) -> str:
+ """
+ Calls the standard formatter, but will indent all of the log message
+ lines by our current indentation level.
+ """
+ formatted = super().format(record)
+ message_start = self.get_message_start(formatted, record.levelno)
+ formatted = message_start + formatted
+
+ prefix = ""
+ if self.add_timestamp:
+ prefix = f"{self.formatTime(record)} "
+ prefix += " " * get_indentation()
+ formatted = "".join([prefix + line for line in formatted.splitlines(True)])
+ return formatted
+
+
+@dataclass
+class IndentedRenderable:
+ renderable: RenderableType
+ indent: int
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+ segments = console.render(self.renderable, options)
+ lines = Segment.split_lines(segments)
+ for line in lines:
+ yield Segment(" " * self.indent)
+ yield from line
+ yield Segment("\n")
+
+
+class RichPipStreamHandler(RichHandler):
+ KEYWORDS: ClassVar[Optional[List[str]]] = []
+
+ def __init__(self, stream: Optional[TextIO], no_color: bool) -> None:
+ super().__init__(
+ console=Console(file=stream, no_color=no_color, soft_wrap=True),
+ show_time=False,
+ show_level=False,
+ show_path=False,
+ highlighter=NullHighlighter(),
+ )
+
+ # Our custom override on Rich's logger, to make things work as we need them to.
+ def emit(self, record: logging.LogRecord) -> None:
+ style: Optional[Style] = None
+
+ # If we are given a diagnostic error to present, present it with indentation.
+ assert isinstance(record.args, tuple)
+ if record.msg == "[present-rich] %s" and len(record.args) == 1:
+ rich_renderable = record.args[0]
+ assert isinstance(
+ rich_renderable, (ConsoleRenderable, RichCast, str)
+ ), f"{rich_renderable} is not rich-console-renderable"
+
+ renderable: RenderableType = IndentedRenderable(
+ rich_renderable, indent=get_indentation()
+ )
+ else:
+ message = self.format(record)
+ renderable = self.render_message(record, message)
+ if record.levelno is not None:
+ if record.levelno >= logging.ERROR:
+ style = Style(color="red")
+ elif record.levelno >= logging.WARNING:
+ style = Style(color="yellow")
+
+ try:
+ self.console.print(renderable, overflow="ignore", crop=False, style=style)
+ except Exception:
+ self.handleError(record)
+
+ def handleError(self, record: logging.LogRecord) -> None:
+ """Called when logging is unable to log some output."""
+
+ exc_class, exc = sys.exc_info()[:2]
+ # If a broken pipe occurred while calling write() or flush() on the
+ # stdout stream in logging's Handler.emit(), then raise our special
+ # exception so we can handle it in main() instead of logging the
+ # broken pipe error and continuing.
+ if (
+ exc_class
+ and exc
+ and self.console.file is sys.stdout
+ and _is_broken_pipe_error(exc_class, exc)
+ ):
+ raise BrokenStdoutLoggingError()
+
+ return super().handleError(record)
+
+
+class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler):
+ def _open(self) -> TextIOWrapper:
+ ensure_dir(os.path.dirname(self.baseFilename))
+ return super()._open()
+
+
+class MaxLevelFilter(Filter):
+ def __init__(self, level: int) -> None:
+ self.level = level
+
+ def filter(self, record: logging.LogRecord) -> bool:
+ return record.levelno < self.level
+
+
+class ExcludeLoggerFilter(Filter):
+
+ """
+ A logging Filter that excludes records from a logger (or its children).
+ """
+
+ def filter(self, record: logging.LogRecord) -> bool:
+ # The base Filter class allows only records from a logger (or its
+ # children).
+ return not super().filter(record)
+
+
+def setup_logging(verbosity: int, no_color: bool, user_log_file: Optional[str]) -> int:
+ """Configures and sets up all of the logging
+
+ Returns the requested logging level, as its integer value.
+ """
+
+ # Determine the level to be logging at.
+ if verbosity >= 2:
+ level_number = logging.DEBUG
+ elif verbosity == 1:
+ level_number = VERBOSE
+ elif verbosity == -1:
+ level_number = logging.WARNING
+ elif verbosity == -2:
+ level_number = logging.ERROR
+ elif verbosity <= -3:
+ level_number = logging.CRITICAL
+ else:
+ level_number = logging.INFO
+
+ level = logging.getLevelName(level_number)
+
+ # The "root" logger should match the "console" level *unless* we also need
+ # to log to a user log file.
+ include_user_log = user_log_file is not None
+ if include_user_log:
+ additional_log_file = user_log_file
+ root_level = "DEBUG"
+ else:
+ additional_log_file = "/dev/null"
+ root_level = level
+
+ # Disable any logging besides WARNING unless we have DEBUG level logging
+ # enabled for vendored libraries.
+ vendored_log_level = "WARNING" if level in ["INFO", "ERROR"] else "DEBUG"
+
+ # Shorthands for clarity
+ log_streams = {
+ "stdout": "ext://sys.stdout",
+ "stderr": "ext://sys.stderr",
+ }
+ handler_classes = {
+ "stream": "pip._internal.utils.logging.RichPipStreamHandler",
+ "file": "pip._internal.utils.logging.BetterRotatingFileHandler",
+ }
+ handlers = ["console", "console_errors", "console_subprocess"] + (
+ ["user_log"] if include_user_log else []
+ )
+
+ logging.config.dictConfig(
+ {
+ "version": 1,
+ "disable_existing_loggers": False,
+ "filters": {
+ "exclude_warnings": {
+ "()": "pip._internal.utils.logging.MaxLevelFilter",
+ "level": logging.WARNING,
+ },
+ "restrict_to_subprocess": {
+ "()": "logging.Filter",
+ "name": subprocess_logger.name,
+ },
+ "exclude_subprocess": {
+ "()": "pip._internal.utils.logging.ExcludeLoggerFilter",
+ "name": subprocess_logger.name,
+ },
+ },
+ "formatters": {
+ "indent": {
+ "()": IndentingFormatter,
+ "format": "%(message)s",
+ },
+ "indent_with_timestamp": {
+ "()": IndentingFormatter,
+ "format": "%(message)s",
+ "add_timestamp": True,
+ },
+ },
+ "handlers": {
+ "console": {
+ "level": level,
+ "class": handler_classes["stream"],
+ "no_color": no_color,
+ "stream": log_streams["stdout"],
+ "filters": ["exclude_subprocess", "exclude_warnings"],
+ "formatter": "indent",
+ },
+ "console_errors": {
+ "level": "WARNING",
+ "class": handler_classes["stream"],
+ "no_color": no_color,
+ "stream": log_streams["stderr"],
+ "filters": ["exclude_subprocess"],
+ "formatter": "indent",
+ },
+ # A handler responsible for logging to the console messages
+ # from the "subprocessor" logger.
+ "console_subprocess": {
+ "level": level,
+ "class": handler_classes["stream"],
+ "stream": log_streams["stderr"],
+ "no_color": no_color,
+ "filters": ["restrict_to_subprocess"],
+ "formatter": "indent",
+ },
+ "user_log": {
+ "level": "DEBUG",
+ "class": handler_classes["file"],
+ "filename": additional_log_file,
+ "encoding": "utf-8",
+ "delay": True,
+ "formatter": "indent_with_timestamp",
+ },
+ },
+ "root": {
+ "level": root_level,
+ "handlers": handlers,
+ },
+ "loggers": {"pip._vendor": {"level": vendored_log_level}},
+ }
+ )
+
+ return level_number
diff --git a/third_party/python/pip/pip/_internal/utils/misc.py b/third_party/python/pip/pip/_internal/utils/misc.py
new file mode 100644
index 0000000000..baa1ba7eac
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/misc.py
@@ -0,0 +1,739 @@
+# The following comment should be removed at some point in the future.
+# mypy: strict-optional=False
+
+import contextlib
+import errno
+import getpass
+import hashlib
+import io
+import logging
+import os
+import posixpath
+import shutil
+import stat
+import sys
+import sysconfig
+import urllib.parse
+from io import StringIO
+from itertools import filterfalse, tee, zip_longest
+from types import TracebackType
+from typing import (
+ Any,
+ BinaryIO,
+ Callable,
+ ContextManager,
+ Dict,
+ Generator,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ TextIO,
+ Tuple,
+ Type,
+ TypeVar,
+ cast,
+)
+
+from pip._vendor.pyproject_hooks import BuildBackendHookCaller
+from pip._vendor.tenacity import retry, stop_after_delay, wait_fixed
+
+from pip import __version__
+from pip._internal.exceptions import CommandError, ExternallyManagedEnvironment
+from pip._internal.locations import get_major_minor_version
+from pip._internal.utils.compat import WINDOWS
+from pip._internal.utils.virtualenv import running_under_virtualenv
+
+__all__ = [
+ "rmtree",
+ "display_path",
+ "backup_dir",
+ "ask",
+ "splitext",
+ "format_size",
+ "is_installable_dir",
+ "normalize_path",
+ "renames",
+ "get_prog",
+ "captured_stdout",
+ "ensure_dir",
+ "remove_auth_from_url",
+ "check_externally_managed",
+ "ConfiguredBuildBackendHookCaller",
+]
+
+logger = logging.getLogger(__name__)
+
+T = TypeVar("T")
+ExcInfo = Tuple[Type[BaseException], BaseException, TracebackType]
+VersionInfo = Tuple[int, int, int]
+NetlocTuple = Tuple[str, Tuple[Optional[str], Optional[str]]]
+
+
+def get_pip_version() -> str:
+ pip_pkg_dir = os.path.join(os.path.dirname(__file__), "..", "..")
+ pip_pkg_dir = os.path.abspath(pip_pkg_dir)
+
+ return "pip {} from {} (python {})".format(
+ __version__,
+ pip_pkg_dir,
+ get_major_minor_version(),
+ )
+
+
+def normalize_version_info(py_version_info: Tuple[int, ...]) -> Tuple[int, int, int]:
+ """
+ Convert a tuple of ints representing a Python version to one of length
+ three.
+
+ :param py_version_info: a tuple of ints representing a Python version,
+ or None to specify no version. The tuple can have any length.
+
+ :return: a tuple of length three if `py_version_info` is non-None.
+ Otherwise, return `py_version_info` unchanged (i.e. None).
+ """
+ if len(py_version_info) < 3:
+ py_version_info += (3 - len(py_version_info)) * (0,)
+ elif len(py_version_info) > 3:
+ py_version_info = py_version_info[:3]
+
+ return cast("VersionInfo", py_version_info)
+
+
+def ensure_dir(path: str) -> None:
+ """os.path.makedirs without EEXIST."""
+ try:
+ os.makedirs(path)
+ except OSError as e:
+ # Windows can raise spurious ENOTEMPTY errors. See #6426.
+ if e.errno != errno.EEXIST and e.errno != errno.ENOTEMPTY:
+ raise
+
+
+def get_prog() -> str:
+ try:
+ prog = os.path.basename(sys.argv[0])
+ if prog in ("__main__.py", "-c"):
+ return f"{sys.executable} -m pip"
+ else:
+ return prog
+ except (AttributeError, TypeError, IndexError):
+ pass
+ return "pip"
+
+
+# Retry every half second for up to 3 seconds
+# Tenacity raises RetryError by default, explicitly raise the original exception
+@retry(reraise=True, stop=stop_after_delay(3), wait=wait_fixed(0.5))
+def rmtree(dir: str, ignore_errors: bool = False) -> None:
+ shutil.rmtree(dir, ignore_errors=ignore_errors, onerror=rmtree_errorhandler)
+
+
+def rmtree_errorhandler(func: Callable[..., Any], path: str, exc_info: ExcInfo) -> None:
+ """On Windows, the files in .svn are read-only, so when rmtree() tries to
+ remove them, an exception is thrown. We catch that here, remove the
+ read-only attribute, and hopefully continue without problems."""
+ try:
+ has_attr_readonly = not (os.stat(path).st_mode & stat.S_IWRITE)
+ except OSError:
+ # it's equivalent to os.path.exists
+ return
+
+ if has_attr_readonly:
+ # convert to read/write
+ os.chmod(path, stat.S_IWRITE)
+ # use the original function to repeat the operation
+ func(path)
+ return
+ else:
+ raise
+
+
+def display_path(path: str) -> str:
+ """Gives the display value for a given path, making it relative to cwd
+ if possible."""
+ path = os.path.normcase(os.path.abspath(path))
+ if path.startswith(os.getcwd() + os.path.sep):
+ path = "." + path[len(os.getcwd()) :]
+ return path
+
+
+def backup_dir(dir: str, ext: str = ".bak") -> str:
+ """Figure out the name of a directory to back up the given dir to
+ (adding .bak, .bak2, etc)"""
+ n = 1
+ extension = ext
+ while os.path.exists(dir + extension):
+ n += 1
+ extension = ext + str(n)
+ return dir + extension
+
+
+def ask_path_exists(message: str, options: Iterable[str]) -> str:
+ for action in os.environ.get("PIP_EXISTS_ACTION", "").split():
+ if action in options:
+ return action
+ return ask(message, options)
+
+
+def _check_no_input(message: str) -> None:
+ """Raise an error if no input is allowed."""
+ if os.environ.get("PIP_NO_INPUT"):
+ raise Exception(
+ f"No input was expected ($PIP_NO_INPUT set); question: {message}"
+ )
+
+
+def ask(message: str, options: Iterable[str]) -> str:
+ """Ask the message interactively, with the given possible responses"""
+ while 1:
+ _check_no_input(message)
+ response = input(message)
+ response = response.strip().lower()
+ if response not in options:
+ print(
+ "Your response ({!r}) was not one of the expected responses: "
+ "{}".format(response, ", ".join(options))
+ )
+ else:
+ return response
+
+
+def ask_input(message: str) -> str:
+ """Ask for input interactively."""
+ _check_no_input(message)
+ return input(message)
+
+
+def ask_password(message: str) -> str:
+ """Ask for a password interactively."""
+ _check_no_input(message)
+ return getpass.getpass(message)
+
+
+def strtobool(val: str) -> int:
+ """Convert a string representation of truth to true (1) or false (0).
+
+ True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
+ are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
+ 'val' is anything else.
+ """
+ val = val.lower()
+ if val in ("y", "yes", "t", "true", "on", "1"):
+ return 1
+ elif val in ("n", "no", "f", "false", "off", "0"):
+ return 0
+ else:
+ raise ValueError(f"invalid truth value {val!r}")
+
+
+def format_size(bytes: float) -> str:
+ if bytes > 1000 * 1000:
+ return "{:.1f} MB".format(bytes / 1000.0 / 1000)
+ elif bytes > 10 * 1000:
+ return "{} kB".format(int(bytes / 1000))
+ elif bytes > 1000:
+ return "{:.1f} kB".format(bytes / 1000.0)
+ else:
+ return "{} bytes".format(int(bytes))
+
+
+def tabulate(rows: Iterable[Iterable[Any]]) -> Tuple[List[str], List[int]]:
+ """Return a list of formatted rows and a list of column sizes.
+
+ For example::
+
+ >>> tabulate([['foobar', 2000], [0xdeadbeef]])
+ (['foobar 2000', '3735928559'], [10, 4])
+ """
+ rows = [tuple(map(str, row)) for row in rows]
+ sizes = [max(map(len, col)) for col in zip_longest(*rows, fillvalue="")]
+ table = [" ".join(map(str.ljust, row, sizes)).rstrip() for row in rows]
+ return table, sizes
+
+
+def is_installable_dir(path: str) -> bool:
+ """Is path is a directory containing pyproject.toml or setup.py?
+
+ If pyproject.toml exists, this is a PEP 517 project. Otherwise we look for
+ a legacy setuptools layout by identifying setup.py. We don't check for the
+ setup.cfg because using it without setup.py is only available for PEP 517
+ projects, which are already covered by the pyproject.toml check.
+ """
+ if not os.path.isdir(path):
+ return False
+ if os.path.isfile(os.path.join(path, "pyproject.toml")):
+ return True
+ if os.path.isfile(os.path.join(path, "setup.py")):
+ return True
+ return False
+
+
+def read_chunks(
+ file: BinaryIO, size: int = io.DEFAULT_BUFFER_SIZE
+) -> Generator[bytes, None, None]:
+ """Yield pieces of data from a file-like object until EOF."""
+ while True:
+ chunk = file.read(size)
+ if not chunk:
+ break
+ yield chunk
+
+
+def normalize_path(path: str, resolve_symlinks: bool = True) -> str:
+ """
+ Convert a path to its canonical, case-normalized, absolute version.
+
+ """
+ path = os.path.expanduser(path)
+ if resolve_symlinks:
+ path = os.path.realpath(path)
+ else:
+ path = os.path.abspath(path)
+ return os.path.normcase(path)
+
+
+def splitext(path: str) -> Tuple[str, str]:
+ """Like os.path.splitext, but take off .tar too"""
+ base, ext = posixpath.splitext(path)
+ if base.lower().endswith(".tar"):
+ ext = base[-4:] + ext
+ base = base[:-4]
+ return base, ext
+
+
+def renames(old: str, new: str) -> None:
+ """Like os.renames(), but handles renaming across devices."""
+ # Implementation borrowed from os.renames().
+ head, tail = os.path.split(new)
+ if head and tail and not os.path.exists(head):
+ os.makedirs(head)
+
+ shutil.move(old, new)
+
+ head, tail = os.path.split(old)
+ if head and tail:
+ try:
+ os.removedirs(head)
+ except OSError:
+ pass
+
+
+def is_local(path: str) -> bool:
+ """
+ Return True if path is within sys.prefix, if we're running in a virtualenv.
+
+ If we're not in a virtualenv, all paths are considered "local."
+
+ Caution: this function assumes the head of path has been normalized
+ with normalize_path.
+ """
+ if not running_under_virtualenv():
+ return True
+ return path.startswith(normalize_path(sys.prefix))
+
+
+def write_output(msg: Any, *args: Any) -> None:
+ logger.info(msg, *args)
+
+
+class StreamWrapper(StringIO):
+ orig_stream: TextIO = None
+
+ @classmethod
+ def from_stream(cls, orig_stream: TextIO) -> "StreamWrapper":
+ cls.orig_stream = orig_stream
+ return cls()
+
+ # compileall.compile_dir() needs stdout.encoding to print to stdout
+ # https://github.com/python/mypy/issues/4125
+ @property
+ def encoding(self): # type: ignore
+ return self.orig_stream.encoding
+
+
+@contextlib.contextmanager
+def captured_output(stream_name: str) -> Generator[StreamWrapper, None, None]:
+ """Return a context manager used by captured_stdout/stdin/stderr
+ that temporarily replaces the sys stream *stream_name* with a StringIO.
+
+ Taken from Lib/support/__init__.py in the CPython repo.
+ """
+ orig_stdout = getattr(sys, stream_name)
+ setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
+ try:
+ yield getattr(sys, stream_name)
+ finally:
+ setattr(sys, stream_name, orig_stdout)
+
+
+def captured_stdout() -> ContextManager[StreamWrapper]:
+ """Capture the output of sys.stdout:
+
+ with captured_stdout() as stdout:
+ print('hello')
+ self.assertEqual(stdout.getvalue(), 'hello\n')
+
+ Taken from Lib/support/__init__.py in the CPython repo.
+ """
+ return captured_output("stdout")
+
+
+def captured_stderr() -> ContextManager[StreamWrapper]:
+ """
+ See captured_stdout().
+ """
+ return captured_output("stderr")
+
+
+# Simulates an enum
+def enum(*sequential: Any, **named: Any) -> Type[Any]:
+ enums = dict(zip(sequential, range(len(sequential))), **named)
+ reverse = {value: key for key, value in enums.items()}
+ enums["reverse_mapping"] = reverse
+ return type("Enum", (), enums)
+
+
+def build_netloc(host: str, port: Optional[int]) -> str:
+ """
+ Build a netloc from a host-port pair
+ """
+ if port is None:
+ return host
+ if ":" in host:
+ # Only wrap host with square brackets when it is IPv6
+ host = f"[{host}]"
+ return f"{host}:{port}"
+
+
+def build_url_from_netloc(netloc: str, scheme: str = "https") -> str:
+ """
+ Build a full URL from a netloc.
+ """
+ if netloc.count(":") >= 2 and "@" not in netloc and "[" not in netloc:
+ # It must be a bare IPv6 address, so wrap it with brackets.
+ netloc = f"[{netloc}]"
+ return f"{scheme}://{netloc}"
+
+
+def parse_netloc(netloc: str) -> Tuple[str, Optional[int]]:
+ """
+ Return the host-port pair from a netloc.
+ """
+ url = build_url_from_netloc(netloc)
+ parsed = urllib.parse.urlparse(url)
+ return parsed.hostname, parsed.port
+
+
+def split_auth_from_netloc(netloc: str) -> NetlocTuple:
+ """
+ Parse out and remove the auth information from a netloc.
+
+ Returns: (netloc, (username, password)).
+ """
+ if "@" not in netloc:
+ return netloc, (None, None)
+
+ # Split from the right because that's how urllib.parse.urlsplit()
+ # behaves if more than one @ is present (which can be checked using
+ # the password attribute of urlsplit()'s return value).
+ auth, netloc = netloc.rsplit("@", 1)
+ pw: Optional[str] = None
+ if ":" in auth:
+ # Split from the left because that's how urllib.parse.urlsplit()
+ # behaves if more than one : is present (which again can be checked
+ # using the password attribute of the return value)
+ user, pw = auth.split(":", 1)
+ else:
+ user, pw = auth, None
+
+ user = urllib.parse.unquote(user)
+ if pw is not None:
+ pw = urllib.parse.unquote(pw)
+
+ return netloc, (user, pw)
+
+
+def redact_netloc(netloc: str) -> str:
+ """
+ Replace the sensitive data in a netloc with "****", if it exists.
+
+ For example:
+ - "user:pass@example.com" returns "user:****@example.com"
+ - "accesstoken@example.com" returns "****@example.com"
+ """
+ netloc, (user, password) = split_auth_from_netloc(netloc)
+ if user is None:
+ return netloc
+ if password is None:
+ user = "****"
+ password = ""
+ else:
+ user = urllib.parse.quote(user)
+ password = ":****"
+ return "{user}{password}@{netloc}".format(
+ user=user, password=password, netloc=netloc
+ )
+
+
+def _transform_url(
+ url: str, transform_netloc: Callable[[str], Tuple[Any, ...]]
+) -> Tuple[str, NetlocTuple]:
+ """Transform and replace netloc in a url.
+
+ transform_netloc is a function taking the netloc and returning a
+ tuple. The first element of this tuple is the new netloc. The
+ entire tuple is returned.
+
+ Returns a tuple containing the transformed url as item 0 and the
+ original tuple returned by transform_netloc as item 1.
+ """
+ purl = urllib.parse.urlsplit(url)
+ netloc_tuple = transform_netloc(purl.netloc)
+ # stripped url
+ url_pieces = (purl.scheme, netloc_tuple[0], purl.path, purl.query, purl.fragment)
+ surl = urllib.parse.urlunsplit(url_pieces)
+ return surl, cast("NetlocTuple", netloc_tuple)
+
+
+def _get_netloc(netloc: str) -> NetlocTuple:
+ return split_auth_from_netloc(netloc)
+
+
+def _redact_netloc(netloc: str) -> Tuple[str]:
+ return (redact_netloc(netloc),)
+
+
+def split_auth_netloc_from_url(url: str) -> Tuple[str, str, Tuple[str, str]]:
+ """
+ Parse a url into separate netloc, auth, and url with no auth.
+
+ Returns: (url_without_auth, netloc, (username, password))
+ """
+ url_without_auth, (netloc, auth) = _transform_url(url, _get_netloc)
+ return url_without_auth, netloc, auth
+
+
+def remove_auth_from_url(url: str) -> str:
+ """Return a copy of url with 'username:password@' removed."""
+ # username/pass params are passed to subversion through flags
+ # and are not recognized in the url.
+ return _transform_url(url, _get_netloc)[0]
+
+
+def redact_auth_from_url(url: str) -> str:
+ """Replace the password in a given url with ****."""
+ return _transform_url(url, _redact_netloc)[0]
+
+
+class HiddenText:
+ def __init__(self, secret: str, redacted: str) -> None:
+ self.secret = secret
+ self.redacted = redacted
+
+ def __repr__(self) -> str:
+ return "<HiddenText {!r}>".format(str(self))
+
+ def __str__(self) -> str:
+ return self.redacted
+
+ # This is useful for testing.
+ def __eq__(self, other: Any) -> bool:
+ if type(self) != type(other):
+ return False
+
+ # The string being used for redaction doesn't also have to match,
+ # just the raw, original string.
+ return self.secret == other.secret
+
+
+def hide_value(value: str) -> HiddenText:
+ return HiddenText(value, redacted="****")
+
+
+def hide_url(url: str) -> HiddenText:
+ redacted = redact_auth_from_url(url)
+ return HiddenText(url, redacted=redacted)
+
+
+def protect_pip_from_modification_on_windows(modifying_pip: bool) -> None:
+ """Protection of pip.exe from modification on Windows
+
+ On Windows, any operation modifying pip should be run as:
+ python -m pip ...
+ """
+ pip_names = [
+ "pip",
+ f"pip{sys.version_info.major}",
+ f"pip{sys.version_info.major}.{sys.version_info.minor}",
+ ]
+
+ # See https://github.com/pypa/pip/issues/1299 for more discussion
+ should_show_use_python_msg = (
+ modifying_pip and WINDOWS and os.path.basename(sys.argv[0]) in pip_names
+ )
+
+ if should_show_use_python_msg:
+ new_command = [sys.executable, "-m", "pip"] + sys.argv[1:]
+ raise CommandError(
+ "To modify pip, please run the following command:\n{}".format(
+ " ".join(new_command)
+ )
+ )
+
+
+def check_externally_managed() -> None:
+ """Check whether the current environment is externally managed.
+
+ If the ``EXTERNALLY-MANAGED`` config file is found, the current environment
+ is considered externally managed, and an ExternallyManagedEnvironment is
+ raised.
+ """
+ if running_under_virtualenv():
+ return
+ marker = os.path.join(sysconfig.get_path("stdlib"), "EXTERNALLY-MANAGED")
+ if not os.path.isfile(marker):
+ return
+ raise ExternallyManagedEnvironment.from_config(marker)
+
+
+def is_console_interactive() -> bool:
+ """Is this console interactive?"""
+ return sys.stdin is not None and sys.stdin.isatty()
+
+
+def hash_file(path: str, blocksize: int = 1 << 20) -> Tuple[Any, int]:
+ """Return (hash, length) for path using hashlib.sha256()"""
+
+ h = hashlib.sha256()
+ length = 0
+ with open(path, "rb") as f:
+ for block in read_chunks(f, size=blocksize):
+ length += len(block)
+ h.update(block)
+ return h, length
+
+
+def is_wheel_installed() -> bool:
+ """
+ Return whether the wheel package is installed.
+ """
+ try:
+ import wheel # noqa: F401
+ except ImportError:
+ return False
+
+ return True
+
+
+def pairwise(iterable: Iterable[Any]) -> Iterator[Tuple[Any, Any]]:
+ """
+ Return paired elements.
+
+ For example:
+ s -> (s0, s1), (s2, s3), (s4, s5), ...
+ """
+ iterable = iter(iterable)
+ return zip_longest(iterable, iterable)
+
+
+def partition(
+ pred: Callable[[T], bool],
+ iterable: Iterable[T],
+) -> Tuple[Iterable[T], Iterable[T]]:
+ """
+ Use a predicate to partition entries into false entries and true entries,
+ like
+
+ partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
+ """
+ t1, t2 = tee(iterable)
+ return filterfalse(pred, t1), filter(pred, t2)
+
+
+class ConfiguredBuildBackendHookCaller(BuildBackendHookCaller):
+ def __init__(
+ self,
+ config_holder: Any,
+ source_dir: str,
+ build_backend: str,
+ backend_path: Optional[str] = None,
+ runner: Optional[Callable[..., None]] = None,
+ python_executable: Optional[str] = None,
+ ):
+ super().__init__(
+ source_dir, build_backend, backend_path, runner, python_executable
+ )
+ self.config_holder = config_holder
+
+ def build_wheel(
+ self,
+ wheel_directory: str,
+ config_settings: Optional[Dict[str, str]] = None,
+ metadata_directory: Optional[str] = None,
+ ) -> str:
+ cs = self.config_holder.config_settings
+ return super().build_wheel(
+ wheel_directory, config_settings=cs, metadata_directory=metadata_directory
+ )
+
+ def build_sdist(
+ self, sdist_directory: str, config_settings: Optional[Dict[str, str]] = None
+ ) -> str:
+ cs = self.config_holder.config_settings
+ return super().build_sdist(sdist_directory, config_settings=cs)
+
+ def build_editable(
+ self,
+ wheel_directory: str,
+ config_settings: Optional[Dict[str, str]] = None,
+ metadata_directory: Optional[str] = None,
+ ) -> str:
+ cs = self.config_holder.config_settings
+ return super().build_editable(
+ wheel_directory, config_settings=cs, metadata_directory=metadata_directory
+ )
+
+ def get_requires_for_build_wheel(
+ self, config_settings: Optional[Dict[str, str]] = None
+ ) -> List[str]:
+ cs = self.config_holder.config_settings
+ return super().get_requires_for_build_wheel(config_settings=cs)
+
+ def get_requires_for_build_sdist(
+ self, config_settings: Optional[Dict[str, str]] = None
+ ) -> List[str]:
+ cs = self.config_holder.config_settings
+ return super().get_requires_for_build_sdist(config_settings=cs)
+
+ def get_requires_for_build_editable(
+ self, config_settings: Optional[Dict[str, str]] = None
+ ) -> List[str]:
+ cs = self.config_holder.config_settings
+ return super().get_requires_for_build_editable(config_settings=cs)
+
+ def prepare_metadata_for_build_wheel(
+ self,
+ metadata_directory: str,
+ config_settings: Optional[Dict[str, str]] = None,
+ _allow_fallback: bool = True,
+ ) -> str:
+ cs = self.config_holder.config_settings
+ return super().prepare_metadata_for_build_wheel(
+ metadata_directory=metadata_directory,
+ config_settings=cs,
+ _allow_fallback=_allow_fallback,
+ )
+
+ def prepare_metadata_for_build_editable(
+ self,
+ metadata_directory: str,
+ config_settings: Optional[Dict[str, str]] = None,
+ _allow_fallback: bool = True,
+ ) -> str:
+ cs = self.config_holder.config_settings
+ return super().prepare_metadata_for_build_editable(
+ metadata_directory=metadata_directory,
+ config_settings=cs,
+ _allow_fallback=_allow_fallback,
+ )
diff --git a/third_party/python/pip/pip/_internal/utils/models.py b/third_party/python/pip/pip/_internal/utils/models.py
new file mode 100644
index 0000000000..b6bb21a8b2
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/models.py
@@ -0,0 +1,39 @@
+"""Utilities for defining models
+"""
+
+import operator
+from typing import Any, Callable, Type
+
+
+class KeyBasedCompareMixin:
+ """Provides comparison capabilities that is based on a key"""
+
+ __slots__ = ["_compare_key", "_defining_class"]
+
+ def __init__(self, key: Any, defining_class: Type["KeyBasedCompareMixin"]) -> None:
+ self._compare_key = key
+ self._defining_class = defining_class
+
+ def __hash__(self) -> int:
+ return hash(self._compare_key)
+
+ def __lt__(self, other: Any) -> bool:
+ return self._compare(other, operator.__lt__)
+
+ def __le__(self, other: Any) -> bool:
+ return self._compare(other, operator.__le__)
+
+ def __gt__(self, other: Any) -> bool:
+ return self._compare(other, operator.__gt__)
+
+ def __ge__(self, other: Any) -> bool:
+ return self._compare(other, operator.__ge__)
+
+ def __eq__(self, other: Any) -> bool:
+ return self._compare(other, operator.__eq__)
+
+ def _compare(self, other: Any, method: Callable[[Any, Any], bool]) -> bool:
+ if not isinstance(other, self._defining_class):
+ return NotImplemented
+
+ return method(self._compare_key, other._compare_key)
diff --git a/third_party/python/pip/pip/_internal/utils/packaging.py b/third_party/python/pip/pip/_internal/utils/packaging.py
new file mode 100644
index 0000000000..b9f6af4d17
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/packaging.py
@@ -0,0 +1,57 @@
+import functools
+import logging
+import re
+from typing import NewType, Optional, Tuple, cast
+
+from pip._vendor.packaging import specifiers, version
+from pip._vendor.packaging.requirements import Requirement
+
+NormalizedExtra = NewType("NormalizedExtra", str)
+
+logger = logging.getLogger(__name__)
+
+
+def check_requires_python(
+ requires_python: Optional[str], version_info: Tuple[int, ...]
+) -> bool:
+ """
+ Check if the given Python version matches a "Requires-Python" specifier.
+
+ :param version_info: A 3-tuple of ints representing a Python
+ major-minor-micro version to check (e.g. `sys.version_info[:3]`).
+
+ :return: `True` if the given Python version satisfies the requirement.
+ Otherwise, return `False`.
+
+ :raises InvalidSpecifier: If `requires_python` has an invalid format.
+ """
+ if requires_python is None:
+ # The package provides no information
+ return True
+ requires_python_specifier = specifiers.SpecifierSet(requires_python)
+
+ python_version = version.parse(".".join(map(str, version_info)))
+ return python_version in requires_python_specifier
+
+
+@functools.lru_cache(maxsize=512)
+def get_requirement(req_string: str) -> Requirement:
+ """Construct a packaging.Requirement object with caching"""
+ # Parsing requirement strings is expensive, and is also expected to happen
+ # with a low diversity of different arguments (at least relative the number
+ # constructed). This method adds a cache to requirement object creation to
+ # minimize repeated parsing of the same string to construct equivalent
+ # Requirement objects.
+ return Requirement(req_string)
+
+
+def safe_extra(extra: str) -> NormalizedExtra:
+ """Convert an arbitrary string to a standard 'extra' name
+
+ Any runs of non-alphanumeric characters are replaced with a single '_',
+ and the result is always lowercased.
+
+ This function is duplicated from ``pkg_resources``. Note that this is not
+ the same to either ``canonicalize_name`` or ``_egg_link_name``.
+ """
+ return cast(NormalizedExtra, re.sub("[^A-Za-z0-9.-]+", "_", extra).lower())
diff --git a/third_party/python/pip/pip/_internal/utils/setuptools_build.py b/third_party/python/pip/pip/_internal/utils/setuptools_build.py
new file mode 100644
index 0000000000..01ef4a4ca5
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/setuptools_build.py
@@ -0,0 +1,195 @@
+import sys
+import textwrap
+from typing import List, Optional, Sequence
+
+# Shim to wrap setup.py invocation with setuptools
+# Note that __file__ is handled via two {!r} *and* %r, to ensure that paths on
+# Windows are correctly handled (it should be "C:\\Users" not "C:\Users").
+_SETUPTOOLS_SHIM = textwrap.dedent(
+ """
+ exec(compile('''
+ # This is <pip-setuptools-caller> -- a caller that pip uses to run setup.py
+ #
+ # - It imports setuptools before invoking setup.py, to enable projects that directly
+ # import from `distutils.core` to work with newer packaging standards.
+ # - It provides a clear error message when setuptools is not installed.
+ # - It sets `sys.argv[0]` to the underlying `setup.py`, when invoking `setup.py` so
+ # setuptools doesn't think the script is `-c`. This avoids the following warning:
+ # manifest_maker: standard file '-c' not found".
+ # - It generates a shim setup.py, for handling setup.cfg-only projects.
+ import os, sys, tokenize
+
+ try:
+ import setuptools
+ except ImportError as error:
+ print(
+ "ERROR: Can not execute `setup.py` since setuptools is not available in "
+ "the build environment.",
+ file=sys.stderr,
+ )
+ sys.exit(1)
+
+ __file__ = %r
+ sys.argv[0] = __file__
+
+ if os.path.exists(__file__):
+ filename = __file__
+ with tokenize.open(__file__) as f:
+ setup_py_code = f.read()
+ else:
+ filename = "<auto-generated setuptools caller>"
+ setup_py_code = "from setuptools import setup; setup()"
+
+ exec(compile(setup_py_code, filename, "exec"))
+ ''' % ({!r},), "<pip-setuptools-caller>", "exec"))
+ """
+).rstrip()
+
+
+def make_setuptools_shim_args(
+ setup_py_path: str,
+ global_options: Optional[Sequence[str]] = None,
+ no_user_config: bool = False,
+ unbuffered_output: bool = False,
+) -> List[str]:
+ """
+ Get setuptools command arguments with shim wrapped setup file invocation.
+
+ :param setup_py_path: The path to setup.py to be wrapped.
+ :param global_options: Additional global options.
+ :param no_user_config: If True, disables personal user configuration.
+ :param unbuffered_output: If True, adds the unbuffered switch to the
+ argument list.
+ """
+ args = [sys.executable]
+ if unbuffered_output:
+ args += ["-u"]
+ args += ["-c", _SETUPTOOLS_SHIM.format(setup_py_path)]
+ if global_options:
+ args += global_options
+ if no_user_config:
+ args += ["--no-user-cfg"]
+ return args
+
+
+def make_setuptools_bdist_wheel_args(
+ setup_py_path: str,
+ global_options: Sequence[str],
+ build_options: Sequence[str],
+ destination_dir: str,
+) -> List[str]:
+ # NOTE: Eventually, we'd want to also -S to the flags here, when we're
+ # isolating. Currently, it breaks Python in virtualenvs, because it
+ # relies on site.py to find parts of the standard library outside the
+ # virtualenv.
+ args = make_setuptools_shim_args(
+ setup_py_path, global_options=global_options, unbuffered_output=True
+ )
+ args += ["bdist_wheel", "-d", destination_dir]
+ args += build_options
+ return args
+
+
+def make_setuptools_clean_args(
+ setup_py_path: str,
+ global_options: Sequence[str],
+) -> List[str]:
+ args = make_setuptools_shim_args(
+ setup_py_path, global_options=global_options, unbuffered_output=True
+ )
+ args += ["clean", "--all"]
+ return args
+
+
+def make_setuptools_develop_args(
+ setup_py_path: str,
+ global_options: Sequence[str],
+ install_options: Sequence[str],
+ no_user_config: bool,
+ prefix: Optional[str],
+ home: Optional[str],
+ use_user_site: bool,
+) -> List[str]:
+ assert not (use_user_site and prefix)
+
+ args = make_setuptools_shim_args(
+ setup_py_path,
+ global_options=global_options,
+ no_user_config=no_user_config,
+ )
+
+ args += ["develop", "--no-deps"]
+
+ args += install_options
+
+ if prefix:
+ args += ["--prefix", prefix]
+ if home is not None:
+ args += ["--install-dir", home]
+
+ if use_user_site:
+ args += ["--user", "--prefix="]
+
+ return args
+
+
+def make_setuptools_egg_info_args(
+ setup_py_path: str,
+ egg_info_dir: Optional[str],
+ no_user_config: bool,
+) -> List[str]:
+ args = make_setuptools_shim_args(setup_py_path, no_user_config=no_user_config)
+
+ args += ["egg_info"]
+
+ if egg_info_dir:
+ args += ["--egg-base", egg_info_dir]
+
+ return args
+
+
+def make_setuptools_install_args(
+ setup_py_path: str,
+ global_options: Sequence[str],
+ install_options: Sequence[str],
+ record_filename: str,
+ root: Optional[str],
+ prefix: Optional[str],
+ header_dir: Optional[str],
+ home: Optional[str],
+ use_user_site: bool,
+ no_user_config: bool,
+ pycompile: bool,
+) -> List[str]:
+ assert not (use_user_site and prefix)
+ assert not (use_user_site and root)
+
+ args = make_setuptools_shim_args(
+ setup_py_path,
+ global_options=global_options,
+ no_user_config=no_user_config,
+ unbuffered_output=True,
+ )
+ args += ["install", "--record", record_filename]
+ args += ["--single-version-externally-managed"]
+
+ if root is not None:
+ args += ["--root", root]
+ if prefix is not None:
+ args += ["--prefix", prefix]
+ if home is not None:
+ args += ["--home", home]
+ if use_user_site:
+ args += ["--user", "--prefix="]
+
+ if pycompile:
+ args += ["--compile"]
+ else:
+ args += ["--no-compile"]
+
+ if header_dir:
+ args += ["--install-headers", header_dir]
+
+ args += install_options
+
+ return args
diff --git a/third_party/python/pip/pip/_internal/utils/subprocess.py b/third_party/python/pip/pip/_internal/utils/subprocess.py
new file mode 100644
index 0000000000..1e8ff50edf
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/subprocess.py
@@ -0,0 +1,260 @@
+import logging
+import os
+import shlex
+import subprocess
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Iterable,
+ List,
+ Mapping,
+ Optional,
+ Union,
+)
+
+from pip._vendor.rich.markup import escape
+
+from pip._internal.cli.spinners import SpinnerInterface, open_spinner
+from pip._internal.exceptions import InstallationSubprocessError
+from pip._internal.utils.logging import VERBOSE, subprocess_logger
+from pip._internal.utils.misc import HiddenText
+
+if TYPE_CHECKING:
+ # Literal was introduced in Python 3.8.
+ #
+ # TODO: Remove `if TYPE_CHECKING` when dropping support for Python 3.7.
+ from typing import Literal
+
+CommandArgs = List[Union[str, HiddenText]]
+
+
+def make_command(*args: Union[str, HiddenText, CommandArgs]) -> CommandArgs:
+ """
+ Create a CommandArgs object.
+ """
+ command_args: CommandArgs = []
+ for arg in args:
+ # Check for list instead of CommandArgs since CommandArgs is
+ # only known during type-checking.
+ if isinstance(arg, list):
+ command_args.extend(arg)
+ else:
+ # Otherwise, arg is str or HiddenText.
+ command_args.append(arg)
+
+ return command_args
+
+
+def format_command_args(args: Union[List[str], CommandArgs]) -> str:
+ """
+ Format command arguments for display.
+ """
+ # For HiddenText arguments, display the redacted form by calling str().
+ # Also, we don't apply str() to arguments that aren't HiddenText since
+ # this can trigger a UnicodeDecodeError in Python 2 if the argument
+ # has type unicode and includes a non-ascii character. (The type
+ # checker doesn't ensure the annotations are correct in all cases.)
+ return " ".join(
+ shlex.quote(str(arg)) if isinstance(arg, HiddenText) else shlex.quote(arg)
+ for arg in args
+ )
+
+
+def reveal_command_args(args: Union[List[str], CommandArgs]) -> List[str]:
+ """
+ Return the arguments in their raw, unredacted form.
+ """
+ return [arg.secret if isinstance(arg, HiddenText) else arg for arg in args]
+
+
+def call_subprocess(
+ cmd: Union[List[str], CommandArgs],
+ show_stdout: bool = False,
+ cwd: Optional[str] = None,
+ on_returncode: 'Literal["raise", "warn", "ignore"]' = "raise",
+ extra_ok_returncodes: Optional[Iterable[int]] = None,
+ extra_environ: Optional[Mapping[str, Any]] = None,
+ unset_environ: Optional[Iterable[str]] = None,
+ spinner: Optional[SpinnerInterface] = None,
+ log_failed_cmd: Optional[bool] = True,
+ stdout_only: Optional[bool] = False,
+ *,
+ command_desc: str,
+) -> str:
+ """
+ Args:
+ show_stdout: if true, use INFO to log the subprocess's stderr and
+ stdout streams. Otherwise, use DEBUG. Defaults to False.
+ extra_ok_returncodes: an iterable of integer return codes that are
+ acceptable, in addition to 0. Defaults to None, which means [].
+ unset_environ: an iterable of environment variable names to unset
+ prior to calling subprocess.Popen().
+ log_failed_cmd: if false, failed commands are not logged, only raised.
+ stdout_only: if true, return only stdout, else return both. When true,
+ logging of both stdout and stderr occurs when the subprocess has
+ terminated, else logging occurs as subprocess output is produced.
+ """
+ if extra_ok_returncodes is None:
+ extra_ok_returncodes = []
+ if unset_environ is None:
+ unset_environ = []
+ # Most places in pip use show_stdout=False. What this means is--
+ #
+ # - We connect the child's output (combined stderr and stdout) to a
+ # single pipe, which we read.
+ # - We log this output to stderr at DEBUG level as it is received.
+ # - If DEBUG logging isn't enabled (e.g. if --verbose logging wasn't
+ # requested), then we show a spinner so the user can still see the
+ # subprocess is in progress.
+ # - If the subprocess exits with an error, we log the output to stderr
+ # at ERROR level if it hasn't already been displayed to the console
+ # (e.g. if --verbose logging wasn't enabled). This way we don't log
+ # the output to the console twice.
+ #
+ # If show_stdout=True, then the above is still done, but with DEBUG
+ # replaced by INFO.
+ if show_stdout:
+ # Then log the subprocess output at INFO level.
+ log_subprocess: Callable[..., None] = subprocess_logger.info
+ used_level = logging.INFO
+ else:
+ # Then log the subprocess output using VERBOSE. This also ensures
+ # it will be logged to the log file (aka user_log), if enabled.
+ log_subprocess = subprocess_logger.verbose
+ used_level = VERBOSE
+
+ # Whether the subprocess will be visible in the console.
+ showing_subprocess = subprocess_logger.getEffectiveLevel() <= used_level
+
+ # Only use the spinner if we're not showing the subprocess output
+ # and we have a spinner.
+ use_spinner = not showing_subprocess and spinner is not None
+
+ log_subprocess("Running command %s", command_desc)
+ env = os.environ.copy()
+ if extra_environ:
+ env.update(extra_environ)
+ for name in unset_environ:
+ env.pop(name, None)
+ try:
+ proc = subprocess.Popen(
+ # Convert HiddenText objects to the underlying str.
+ reveal_command_args(cmd),
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT if not stdout_only else subprocess.PIPE,
+ cwd=cwd,
+ env=env,
+ errors="backslashreplace",
+ )
+ except Exception as exc:
+ if log_failed_cmd:
+ subprocess_logger.critical(
+ "Error %s while executing command %s",
+ exc,
+ command_desc,
+ )
+ raise
+ all_output = []
+ if not stdout_only:
+ assert proc.stdout
+ assert proc.stdin
+ proc.stdin.close()
+ # In this mode, stdout and stderr are in the same pipe.
+ while True:
+ line: str = proc.stdout.readline()
+ if not line:
+ break
+ line = line.rstrip()
+ all_output.append(line + "\n")
+
+ # Show the line immediately.
+ log_subprocess(line)
+ # Update the spinner.
+ if use_spinner:
+ assert spinner
+ spinner.spin()
+ try:
+ proc.wait()
+ finally:
+ if proc.stdout:
+ proc.stdout.close()
+ output = "".join(all_output)
+ else:
+ # In this mode, stdout and stderr are in different pipes.
+ # We must use communicate() which is the only safe way to read both.
+ out, err = proc.communicate()
+ # log line by line to preserve pip log indenting
+ for out_line in out.splitlines():
+ log_subprocess(out_line)
+ all_output.append(out)
+ for err_line in err.splitlines():
+ log_subprocess(err_line)
+ all_output.append(err)
+ output = out
+
+ proc_had_error = proc.returncode and proc.returncode not in extra_ok_returncodes
+ if use_spinner:
+ assert spinner
+ if proc_had_error:
+ spinner.finish("error")
+ else:
+ spinner.finish("done")
+ if proc_had_error:
+ if on_returncode == "raise":
+ error = InstallationSubprocessError(
+ command_description=command_desc,
+ exit_code=proc.returncode,
+ output_lines=all_output if not showing_subprocess else None,
+ )
+ if log_failed_cmd:
+ subprocess_logger.error("[present-rich] %s", error)
+ subprocess_logger.verbose(
+ "[bold magenta]full command[/]: [blue]%s[/]",
+ escape(format_command_args(cmd)),
+ extra={"markup": True},
+ )
+ subprocess_logger.verbose(
+ "[bold magenta]cwd[/]: %s",
+ escape(cwd or "[inherit]"),
+ extra={"markup": True},
+ )
+
+ raise error
+ elif on_returncode == "warn":
+ subprocess_logger.warning(
+ 'Command "%s" had error code %s in %s',
+ command_desc,
+ proc.returncode,
+ cwd,
+ )
+ elif on_returncode == "ignore":
+ pass
+ else:
+ raise ValueError(f"Invalid value: on_returncode={on_returncode!r}")
+ return output
+
+
+def runner_with_spinner_message(message: str) -> Callable[..., None]:
+ """Provide a subprocess_runner that shows a spinner message.
+
+ Intended for use with for BuildBackendHookCaller. Thus, the runner has
+ an API that matches what's expected by BuildBackendHookCaller.subprocess_runner.
+ """
+
+ def runner(
+ cmd: List[str],
+ cwd: Optional[str] = None,
+ extra_environ: Optional[Mapping[str, Any]] = None,
+ ) -> None:
+ with open_spinner(message) as spinner:
+ call_subprocess(
+ cmd,
+ command_desc=message,
+ cwd=cwd,
+ extra_environ=extra_environ,
+ spinner=spinner,
+ )
+
+ return runner
diff --git a/third_party/python/pip/pip/_internal/utils/temp_dir.py b/third_party/python/pip/pip/_internal/utils/temp_dir.py
new file mode 100644
index 0000000000..8ee8a1cb18
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/temp_dir.py
@@ -0,0 +1,246 @@
+import errno
+import itertools
+import logging
+import os.path
+import tempfile
+from contextlib import ExitStack, contextmanager
+from typing import Any, Dict, Generator, Optional, TypeVar, Union
+
+from pip._internal.utils.misc import enum, rmtree
+
+logger = logging.getLogger(__name__)
+
+_T = TypeVar("_T", bound="TempDirectory")
+
+
+# Kinds of temporary directories. Only needed for ones that are
+# globally-managed.
+tempdir_kinds = enum(
+ BUILD_ENV="build-env",
+ EPHEM_WHEEL_CACHE="ephem-wheel-cache",
+ REQ_BUILD="req-build",
+)
+
+
+_tempdir_manager: Optional[ExitStack] = None
+
+
+@contextmanager
+def global_tempdir_manager() -> Generator[None, None, None]:
+ global _tempdir_manager
+ with ExitStack() as stack:
+ old_tempdir_manager, _tempdir_manager = _tempdir_manager, stack
+ try:
+ yield
+ finally:
+ _tempdir_manager = old_tempdir_manager
+
+
+class TempDirectoryTypeRegistry:
+ """Manages temp directory behavior"""
+
+ def __init__(self) -> None:
+ self._should_delete: Dict[str, bool] = {}
+
+ def set_delete(self, kind: str, value: bool) -> None:
+ """Indicate whether a TempDirectory of the given kind should be
+ auto-deleted.
+ """
+ self._should_delete[kind] = value
+
+ def get_delete(self, kind: str) -> bool:
+ """Get configured auto-delete flag for a given TempDirectory type,
+ default True.
+ """
+ return self._should_delete.get(kind, True)
+
+
+_tempdir_registry: Optional[TempDirectoryTypeRegistry] = None
+
+
+@contextmanager
+def tempdir_registry() -> Generator[TempDirectoryTypeRegistry, None, None]:
+ """Provides a scoped global tempdir registry that can be used to dictate
+ whether directories should be deleted.
+ """
+ global _tempdir_registry
+ old_tempdir_registry = _tempdir_registry
+ _tempdir_registry = TempDirectoryTypeRegistry()
+ try:
+ yield _tempdir_registry
+ finally:
+ _tempdir_registry = old_tempdir_registry
+
+
+class _Default:
+ pass
+
+
+_default = _Default()
+
+
+class TempDirectory:
+ """Helper class that owns and cleans up a temporary directory.
+
+ This class can be used as a context manager or as an OO representation of a
+ temporary directory.
+
+ Attributes:
+ path
+ Location to the created temporary directory
+ delete
+ Whether the directory should be deleted when exiting
+ (when used as a contextmanager)
+
+ Methods:
+ cleanup()
+ Deletes the temporary directory
+
+ When used as a context manager, if the delete attribute is True, on
+ exiting the context the temporary directory is deleted.
+ """
+
+ def __init__(
+ self,
+ path: Optional[str] = None,
+ delete: Union[bool, None, _Default] = _default,
+ kind: str = "temp",
+ globally_managed: bool = False,
+ ):
+ super().__init__()
+
+ if delete is _default:
+ if path is not None:
+ # If we were given an explicit directory, resolve delete option
+ # now.
+ delete = False
+ else:
+ # Otherwise, we wait until cleanup and see what
+ # tempdir_registry says.
+ delete = None
+
+ # The only time we specify path is in for editables where it
+ # is the value of the --src option.
+ if path is None:
+ path = self._create(kind)
+
+ self._path = path
+ self._deleted = False
+ self.delete = delete
+ self.kind = kind
+
+ if globally_managed:
+ assert _tempdir_manager is not None
+ _tempdir_manager.enter_context(self)
+
+ @property
+ def path(self) -> str:
+ assert not self._deleted, f"Attempted to access deleted path: {self._path}"
+ return self._path
+
+ def __repr__(self) -> str:
+ return f"<{self.__class__.__name__} {self.path!r}>"
+
+ def __enter__(self: _T) -> _T:
+ return self
+
+ def __exit__(self, exc: Any, value: Any, tb: Any) -> None:
+ if self.delete is not None:
+ delete = self.delete
+ elif _tempdir_registry:
+ delete = _tempdir_registry.get_delete(self.kind)
+ else:
+ delete = True
+
+ if delete:
+ self.cleanup()
+
+ def _create(self, kind: str) -> str:
+ """Create a temporary directory and store its path in self.path"""
+ # We realpath here because some systems have their default tmpdir
+ # symlinked to another directory. This tends to confuse build
+ # scripts, so we canonicalize the path by traversing potential
+ # symlinks here.
+ path = os.path.realpath(tempfile.mkdtemp(prefix=f"pip-{kind}-"))
+ logger.debug("Created temporary directory: %s", path)
+ return path
+
+ def cleanup(self) -> None:
+ """Remove the temporary directory created and reset state"""
+ self._deleted = True
+ if not os.path.exists(self._path):
+ return
+ rmtree(self._path)
+
+
+class AdjacentTempDirectory(TempDirectory):
+ """Helper class that creates a temporary directory adjacent to a real one.
+
+ Attributes:
+ original
+ The original directory to create a temp directory for.
+ path
+ After calling create() or entering, contains the full
+ path to the temporary directory.
+ delete
+ Whether the directory should be deleted when exiting
+ (when used as a contextmanager)
+
+ """
+
+ # The characters that may be used to name the temp directory
+ # We always prepend a ~ and then rotate through these until
+ # a usable name is found.
+ # pkg_resources raises a different error for .dist-info folder
+ # with leading '-' and invalid metadata
+ LEADING_CHARS = "-~.=%0123456789"
+
+ def __init__(self, original: str, delete: Optional[bool] = None) -> None:
+ self.original = original.rstrip("/\\")
+ super().__init__(delete=delete)
+
+ @classmethod
+ def _generate_names(cls, name: str) -> Generator[str, None, None]:
+ """Generates a series of temporary names.
+
+ The algorithm replaces the leading characters in the name
+ with ones that are valid filesystem characters, but are not
+ valid package names (for both Python and pip definitions of
+ package).
+ """
+ for i in range(1, len(name)):
+ for candidate in itertools.combinations_with_replacement(
+ cls.LEADING_CHARS, i - 1
+ ):
+ new_name = "~" + "".join(candidate) + name[i:]
+ if new_name != name:
+ yield new_name
+
+ # If we make it this far, we will have to make a longer name
+ for i in range(len(cls.LEADING_CHARS)):
+ for candidate in itertools.combinations_with_replacement(
+ cls.LEADING_CHARS, i
+ ):
+ new_name = "~" + "".join(candidate) + name
+ if new_name != name:
+ yield new_name
+
+ def _create(self, kind: str) -> str:
+ root, name = os.path.split(self.original)
+ for candidate in self._generate_names(name):
+ path = os.path.join(root, candidate)
+ try:
+ os.mkdir(path)
+ except OSError as ex:
+ # Continue if the name exists already
+ if ex.errno != errno.EEXIST:
+ raise
+ else:
+ path = os.path.realpath(path)
+ break
+ else:
+ # Final fallback on the default behavior.
+ path = os.path.realpath(tempfile.mkdtemp(prefix=f"pip-{kind}-"))
+
+ logger.debug("Created temporary directory: %s", path)
+ return path
diff --git a/third_party/python/pip/pip/_internal/utils/unpacking.py b/third_party/python/pip/pip/_internal/utils/unpacking.py
new file mode 100644
index 0000000000..78b5c13ced
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/unpacking.py
@@ -0,0 +1,257 @@
+"""Utilities related archives.
+"""
+
+import logging
+import os
+import shutil
+import stat
+import tarfile
+import zipfile
+from typing import Iterable, List, Optional
+from zipfile import ZipInfo
+
+from pip._internal.exceptions import InstallationError
+from pip._internal.utils.filetypes import (
+ BZ2_EXTENSIONS,
+ TAR_EXTENSIONS,
+ XZ_EXTENSIONS,
+ ZIP_EXTENSIONS,
+)
+from pip._internal.utils.misc import ensure_dir
+
+logger = logging.getLogger(__name__)
+
+
+SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
+
+try:
+ import bz2 # noqa
+
+ SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
+except ImportError:
+ logger.debug("bz2 module is not available")
+
+try:
+ # Only for Python 3.3+
+ import lzma # noqa
+
+ SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
+except ImportError:
+ logger.debug("lzma module is not available")
+
+
+def current_umask() -> int:
+ """Get the current umask which involves having to set it temporarily."""
+ mask = os.umask(0)
+ os.umask(mask)
+ return mask
+
+
+def split_leading_dir(path: str) -> List[str]:
+ path = path.lstrip("/").lstrip("\\")
+ if "/" in path and (
+ ("\\" in path and path.find("/") < path.find("\\")) or "\\" not in path
+ ):
+ return path.split("/", 1)
+ elif "\\" in path:
+ return path.split("\\", 1)
+ else:
+ return [path, ""]
+
+
+def has_leading_dir(paths: Iterable[str]) -> bool:
+ """Returns true if all the paths have the same leading path name
+ (i.e., everything is in one subdirectory in an archive)"""
+ common_prefix = None
+ for path in paths:
+ prefix, rest = split_leading_dir(path)
+ if not prefix:
+ return False
+ elif common_prefix is None:
+ common_prefix = prefix
+ elif prefix != common_prefix:
+ return False
+ return True
+
+
+def is_within_directory(directory: str, target: str) -> bool:
+ """
+ Return true if the absolute path of target is within the directory
+ """
+ abs_directory = os.path.abspath(directory)
+ abs_target = os.path.abspath(target)
+
+ prefix = os.path.commonprefix([abs_directory, abs_target])
+ return prefix == abs_directory
+
+
+def set_extracted_file_to_default_mode_plus_executable(path: str) -> None:
+ """
+ Make file present at path have execute for user/group/world
+ (chmod +x) is no-op on windows per python docs
+ """
+ os.chmod(path, (0o777 & ~current_umask() | 0o111))
+
+
+def zip_item_is_executable(info: ZipInfo) -> bool:
+ mode = info.external_attr >> 16
+ # if mode and regular file and any execute permissions for
+ # user/group/world?
+ return bool(mode and stat.S_ISREG(mode) and mode & 0o111)
+
+
+def unzip_file(filename: str, location: str, flatten: bool = True) -> None:
+ """
+ Unzip the file (with path `filename`) to the destination `location`. All
+ files are written based on system defaults and umask (i.e. permissions are
+ not preserved), except that regular file members with any execute
+ permissions (user, group, or world) have "chmod +x" applied after being
+ written. Note that for windows, any execute changes using os.chmod are
+ no-ops per the python docs.
+ """
+ ensure_dir(location)
+ zipfp = open(filename, "rb")
+ try:
+ zip = zipfile.ZipFile(zipfp, allowZip64=True)
+ leading = has_leading_dir(zip.namelist()) and flatten
+ for info in zip.infolist():
+ name = info.filename
+ fn = name
+ if leading:
+ fn = split_leading_dir(name)[1]
+ fn = os.path.join(location, fn)
+ dir = os.path.dirname(fn)
+ if not is_within_directory(location, fn):
+ message = (
+ "The zip file ({}) has a file ({}) trying to install "
+ "outside target directory ({})"
+ )
+ raise InstallationError(message.format(filename, fn, location))
+ if fn.endswith("/") or fn.endswith("\\"):
+ # A directory
+ ensure_dir(fn)
+ else:
+ ensure_dir(dir)
+ # Don't use read() to avoid allocating an arbitrarily large
+ # chunk of memory for the file's content
+ fp = zip.open(name)
+ try:
+ with open(fn, "wb") as destfp:
+ shutil.copyfileobj(fp, destfp)
+ finally:
+ fp.close()
+ if zip_item_is_executable(info):
+ set_extracted_file_to_default_mode_plus_executable(fn)
+ finally:
+ zipfp.close()
+
+
+def untar_file(filename: str, location: str) -> None:
+ """
+ Untar the file (with path `filename`) to the destination `location`.
+ All files are written based on system defaults and umask (i.e. permissions
+ are not preserved), except that regular file members with any execute
+ permissions (user, group, or world) have "chmod +x" applied after being
+ written. Note that for windows, any execute changes using os.chmod are
+ no-ops per the python docs.
+ """
+ ensure_dir(location)
+ if filename.lower().endswith(".gz") or filename.lower().endswith(".tgz"):
+ mode = "r:gz"
+ elif filename.lower().endswith(BZ2_EXTENSIONS):
+ mode = "r:bz2"
+ elif filename.lower().endswith(XZ_EXTENSIONS):
+ mode = "r:xz"
+ elif filename.lower().endswith(".tar"):
+ mode = "r"
+ else:
+ logger.warning(
+ "Cannot determine compression type for file %s",
+ filename,
+ )
+ mode = "r:*"
+ tar = tarfile.open(filename, mode, encoding="utf-8")
+ try:
+ leading = has_leading_dir([member.name for member in tar.getmembers()])
+ for member in tar.getmembers():
+ fn = member.name
+ if leading:
+ fn = split_leading_dir(fn)[1]
+ path = os.path.join(location, fn)
+ if not is_within_directory(location, path):
+ message = (
+ "The tar file ({}) has a file ({}) trying to install "
+ "outside target directory ({})"
+ )
+ raise InstallationError(message.format(filename, path, location))
+ if member.isdir():
+ ensure_dir(path)
+ elif member.issym():
+ try:
+ tar._extract_member(member, path)
+ except Exception as exc:
+ # Some corrupt tar files seem to produce this
+ # (specifically bad symlinks)
+ logger.warning(
+ "In the tar file %s the member %s is invalid: %s",
+ filename,
+ member.name,
+ exc,
+ )
+ continue
+ else:
+ try:
+ fp = tar.extractfile(member)
+ except (KeyError, AttributeError) as exc:
+ # Some corrupt tar files seem to produce this
+ # (specifically bad symlinks)
+ logger.warning(
+ "In the tar file %s the member %s is invalid: %s",
+ filename,
+ member.name,
+ exc,
+ )
+ continue
+ ensure_dir(os.path.dirname(path))
+ assert fp is not None
+ with open(path, "wb") as destfp:
+ shutil.copyfileobj(fp, destfp)
+ fp.close()
+ # Update the timestamp (useful for cython compiled files)
+ tar.utime(member, path)
+ # member have any execute permissions for user/group/world?
+ if member.mode & 0o111:
+ set_extracted_file_to_default_mode_plus_executable(path)
+ finally:
+ tar.close()
+
+
+def unpack_file(
+ filename: str,
+ location: str,
+ content_type: Optional[str] = None,
+) -> None:
+ filename = os.path.realpath(filename)
+ if (
+ content_type == "application/zip"
+ or filename.lower().endswith(ZIP_EXTENSIONS)
+ or zipfile.is_zipfile(filename)
+ ):
+ unzip_file(filename, location, flatten=not filename.endswith(".whl"))
+ elif (
+ content_type == "application/x-gzip"
+ or tarfile.is_tarfile(filename)
+ or filename.lower().endswith(TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)
+ ):
+ untar_file(filename, location)
+ else:
+ # FIXME: handle?
+ # FIXME: magic signatures?
+ logger.critical(
+ "Cannot unpack file %s (downloaded from %s, content-type: %s); "
+ "cannot detect archive format",
+ filename,
+ location,
+ content_type,
+ )
+ raise InstallationError(f"Cannot determine archive format of {location}")
diff --git a/third_party/python/pip/pip/_internal/utils/urls.py b/third_party/python/pip/pip/_internal/utils/urls.py
new file mode 100644
index 0000000000..6ba2e04f35
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/urls.py
@@ -0,0 +1,62 @@
+import os
+import string
+import urllib.parse
+import urllib.request
+from typing import Optional
+
+from .compat import WINDOWS
+
+
+def get_url_scheme(url: str) -> Optional[str]:
+ if ":" not in url:
+ return None
+ return url.split(":", 1)[0].lower()
+
+
+def path_to_url(path: str) -> str:
+ """
+ Convert a path to a file: URL. The path will be made absolute and have
+ quoted path parts.
+ """
+ path = os.path.normpath(os.path.abspath(path))
+ url = urllib.parse.urljoin("file:", urllib.request.pathname2url(path))
+ return url
+
+
+def url_to_path(url: str) -> str:
+ """
+ Convert a file: URL to a path.
+ """
+ assert url.startswith(
+ "file:"
+ ), f"You can only turn file: urls into filenames (not {url!r})"
+
+ _, netloc, path, _, _ = urllib.parse.urlsplit(url)
+
+ if not netloc or netloc == "localhost":
+ # According to RFC 8089, same as empty authority.
+ netloc = ""
+ elif WINDOWS:
+ # If we have a UNC path, prepend UNC share notation.
+ netloc = "\\\\" + netloc
+ else:
+ raise ValueError(
+ f"non-local file URIs are not supported on this platform: {url!r}"
+ )
+
+ path = urllib.request.url2pathname(netloc + path)
+
+ # On Windows, urlsplit parses the path as something like "/C:/Users/foo".
+ # This creates issues for path-related functions like io.open(), so we try
+ # to detect and strip the leading slash.
+ if (
+ WINDOWS
+ and not netloc # Not UNC.
+ and len(path) >= 3
+ and path[0] == "/" # Leading slash to strip.
+ and path[1] in string.ascii_letters # Drive letter.
+ and path[2:4] in (":", ":/") # Colon + end of string, or colon + absolute path.
+ ):
+ path = path[1:]
+
+ return path
diff --git a/third_party/python/pip/pip/_internal/utils/virtualenv.py b/third_party/python/pip/pip/_internal/utils/virtualenv.py
new file mode 100644
index 0000000000..882e36f5c1
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/virtualenv.py
@@ -0,0 +1,104 @@
+import logging
+import os
+import re
+import site
+import sys
+from typing import List, Optional
+
+logger = logging.getLogger(__name__)
+_INCLUDE_SYSTEM_SITE_PACKAGES_REGEX = re.compile(
+ r"include-system-site-packages\s*=\s*(?P<value>true|false)"
+)
+
+
+def _running_under_venv() -> bool:
+ """Checks if sys.base_prefix and sys.prefix match.
+
+ This handles PEP 405 compliant virtual environments.
+ """
+ return sys.prefix != getattr(sys, "base_prefix", sys.prefix)
+
+
+def _running_under_legacy_virtualenv() -> bool:
+ """Checks if sys.real_prefix is set.
+
+ This handles virtual environments created with pypa's virtualenv.
+ """
+ # pypa/virtualenv case
+ return hasattr(sys, "real_prefix")
+
+
+def running_under_virtualenv() -> bool:
+ """True if we're running inside a virtual environment, False otherwise."""
+ return _running_under_venv() or _running_under_legacy_virtualenv()
+
+
+def _get_pyvenv_cfg_lines() -> Optional[List[str]]:
+ """Reads {sys.prefix}/pyvenv.cfg and returns its contents as list of lines
+
+ Returns None, if it could not read/access the file.
+ """
+ pyvenv_cfg_file = os.path.join(sys.prefix, "pyvenv.cfg")
+ try:
+ # Although PEP 405 does not specify, the built-in venv module always
+ # writes with UTF-8. (pypa/pip#8717)
+ with open(pyvenv_cfg_file, encoding="utf-8") as f:
+ return f.read().splitlines() # avoids trailing newlines
+ except OSError:
+ return None
+
+
+def _no_global_under_venv() -> bool:
+ """Check `{sys.prefix}/pyvenv.cfg` for system site-packages inclusion
+
+ PEP 405 specifies that when system site-packages are not supposed to be
+ visible from a virtual environment, `pyvenv.cfg` must contain the following
+ line:
+
+ include-system-site-packages = false
+
+ Additionally, log a warning if accessing the file fails.
+ """
+ cfg_lines = _get_pyvenv_cfg_lines()
+ if cfg_lines is None:
+ # We're not in a "sane" venv, so assume there is no system
+ # site-packages access (since that's PEP 405's default state).
+ logger.warning(
+ "Could not access 'pyvenv.cfg' despite a virtual environment "
+ "being active. Assuming global site-packages is not accessible "
+ "in this environment."
+ )
+ return True
+
+ for line in cfg_lines:
+ match = _INCLUDE_SYSTEM_SITE_PACKAGES_REGEX.match(line)
+ if match is not None and match.group("value") == "false":
+ return True
+ return False
+
+
+def _no_global_under_legacy_virtualenv() -> bool:
+ """Check if "no-global-site-packages.txt" exists beside site.py
+
+ This mirrors logic in pypa/virtualenv for determining whether system
+ site-packages are visible in the virtual environment.
+ """
+ site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))
+ no_global_site_packages_file = os.path.join(
+ site_mod_dir,
+ "no-global-site-packages.txt",
+ )
+ return os.path.exists(no_global_site_packages_file)
+
+
+def virtualenv_no_global() -> bool:
+ """Returns a boolean, whether running in venv with no system site-packages."""
+ # PEP 405 compliance needs to be checked first since virtualenv >=20 would
+ # return True for both checks, but is only able to use the PEP 405 config.
+ if _running_under_venv():
+ return _no_global_under_venv()
+
+ if _running_under_legacy_virtualenv():
+ return _no_global_under_legacy_virtualenv()
+
+ return False
diff --git a/third_party/python/pip/pip/_internal/utils/wheel.py b/third_party/python/pip/pip/_internal/utils/wheel.py
new file mode 100644
index 0000000000..e5e3f34ed8
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/utils/wheel.py
@@ -0,0 +1,136 @@
+"""Support functions for working with wheel files.
+"""
+
+import logging
+from email.message import Message
+from email.parser import Parser
+from typing import Tuple
+from zipfile import BadZipFile, ZipFile
+
+from pip._vendor.packaging.utils import canonicalize_name
+
+from pip._internal.exceptions import UnsupportedWheel
+
+VERSION_COMPATIBLE = (1, 0)
+
+
+logger = logging.getLogger(__name__)
+
+
+def parse_wheel(wheel_zip: ZipFile, name: str) -> Tuple[str, Message]:
+ """Extract information from the provided wheel, ensuring it meets basic
+ standards.
+
+ Returns the name of the .dist-info directory and the parsed WHEEL metadata.
+ """
+ try:
+ info_dir = wheel_dist_info_dir(wheel_zip, name)
+ metadata = wheel_metadata(wheel_zip, info_dir)
+ version = wheel_version(metadata)
+ except UnsupportedWheel as e:
+ raise UnsupportedWheel("{} has an invalid wheel, {}".format(name, str(e)))
+
+ check_compatibility(version, name)
+
+ return info_dir, metadata
+
+
+def wheel_dist_info_dir(source: ZipFile, name: str) -> str:
+ """Returns the name of the contained .dist-info directory.
+
+ Raises AssertionError or UnsupportedWheel if not found, >1 found, or
+ it doesn't match the provided name.
+ """
+ # Zip file path separators must be /
+ subdirs = {p.split("/", 1)[0] for p in source.namelist()}
+
+ info_dirs = [s for s in subdirs if s.endswith(".dist-info")]
+
+ if not info_dirs:
+ raise UnsupportedWheel(".dist-info directory not found")
+
+ if len(info_dirs) > 1:
+ raise UnsupportedWheel(
+ "multiple .dist-info directories found: {}".format(", ".join(info_dirs))
+ )
+
+ info_dir = info_dirs[0]
+
+ info_dir_name = canonicalize_name(info_dir)
+ canonical_name = canonicalize_name(name)
+ if not info_dir_name.startswith(canonical_name):
+ raise UnsupportedWheel(
+ ".dist-info directory {!r} does not start with {!r}".format(
+ info_dir, canonical_name
+ )
+ )
+
+ return info_dir
+
+
+def read_wheel_metadata_file(source: ZipFile, path: str) -> bytes:
+ try:
+ return source.read(path)
+ # BadZipFile for general corruption, KeyError for missing entry,
+ # and RuntimeError for password-protected files
+ except (BadZipFile, KeyError, RuntimeError) as e:
+ raise UnsupportedWheel(f"could not read {path!r} file: {e!r}")
+
+
+def wheel_metadata(source: ZipFile, dist_info_dir: str) -> Message:
+ """Return the WHEEL metadata of an extracted wheel, if possible.
+ Otherwise, raise UnsupportedWheel.
+ """
+ path = f"{dist_info_dir}/WHEEL"
+ # Zip file path separators must be /
+ wheel_contents = read_wheel_metadata_file(source, path)
+
+ try:
+ wheel_text = wheel_contents.decode()
+ except UnicodeDecodeError as e:
+ raise UnsupportedWheel(f"error decoding {path!r}: {e!r}")
+
+ # FeedParser (used by Parser) does not raise any exceptions. The returned
+ # message may have .defects populated, but for backwards-compatibility we
+ # currently ignore them.
+ return Parser().parsestr(wheel_text)
+
+
+def wheel_version(wheel_data: Message) -> Tuple[int, ...]:
+ """Given WHEEL metadata, return the parsed Wheel-Version.
+ Otherwise, raise UnsupportedWheel.
+ """
+ version_text = wheel_data["Wheel-Version"]
+ if version_text is None:
+ raise UnsupportedWheel("WHEEL is missing Wheel-Version")
+
+ version = version_text.strip()
+
+ try:
+ return tuple(map(int, version.split(".")))
+ except ValueError:
+ raise UnsupportedWheel(f"invalid Wheel-Version: {version!r}")
+
+
+def check_compatibility(version: Tuple[int, ...], name: str) -> None:
+ """Raises errors or warns if called with an incompatible Wheel-Version.
+
+ pip should refuse to install a Wheel-Version that's a major series
+ ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
+ installing a version only minor version ahead (e.g 1.2 > 1.1).
+
+ version: a 2-tuple representing a Wheel-Version (Major, Minor)
+ name: name of wheel or package to raise exception about
+
+ :raises UnsupportedWheel: when an incompatible Wheel-Version is given
+ """
+ if version[0] > VERSION_COMPATIBLE[0]:
+ raise UnsupportedWheel(
+ "{}'s Wheel-Version ({}) is not compatible with this version "
+ "of pip".format(name, ".".join(map(str, version)))
+ )
+ elif version > VERSION_COMPATIBLE:
+ logger.warning(
+ "Installing from a newer Wheel-Version (%s)",
+ ".".join(map(str, version)),
+ )
diff --git a/third_party/python/pip/pip/_internal/vcs/__init__.py b/third_party/python/pip/pip/_internal/vcs/__init__.py
new file mode 100644
index 0000000000..b6beddbe6d
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/vcs/__init__.py
@@ -0,0 +1,15 @@
+# Expose a limited set of classes and functions so callers outside of
+# the vcs package don't need to import deeper than `pip._internal.vcs`.
+# (The test directory may still need to import from a vcs sub-package.)
+# Import all vcs modules to register each VCS in the VcsSupport object.
+import pip._internal.vcs.bazaar
+import pip._internal.vcs.git
+import pip._internal.vcs.mercurial
+import pip._internal.vcs.subversion # noqa: F401
+from pip._internal.vcs.versioncontrol import ( # noqa: F401
+ RemoteNotFoundError,
+ RemoteNotValidError,
+ is_url,
+ make_vcs_requirement_url,
+ vcs,
+)
diff --git a/third_party/python/pip/pip/_internal/vcs/bazaar.py b/third_party/python/pip/pip/_internal/vcs/bazaar.py
new file mode 100644
index 0000000000..20a17ed092
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/vcs/bazaar.py
@@ -0,0 +1,112 @@
+import logging
+from typing import List, Optional, Tuple
+
+from pip._internal.utils.misc import HiddenText, display_path
+from pip._internal.utils.subprocess import make_command
+from pip._internal.utils.urls import path_to_url
+from pip._internal.vcs.versioncontrol import (
+ AuthInfo,
+ RemoteNotFoundError,
+ RevOptions,
+ VersionControl,
+ vcs,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class Bazaar(VersionControl):
+ name = "bzr"
+ dirname = ".bzr"
+ repo_name = "branch"
+ schemes = (
+ "bzr+http",
+ "bzr+https",
+ "bzr+ssh",
+ "bzr+sftp",
+ "bzr+ftp",
+ "bzr+lp",
+ "bzr+file",
+ )
+
+ @staticmethod
+ def get_base_rev_args(rev: str) -> List[str]:
+ return ["-r", rev]
+
+ def fetch_new(
+ self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int
+ ) -> None:
+ rev_display = rev_options.to_display()
+ logger.info(
+ "Checking out %s%s to %s",
+ url,
+ rev_display,
+ display_path(dest),
+ )
+ if verbosity <= 0:
+ flag = "--quiet"
+ elif verbosity == 1:
+ flag = ""
+ else:
+ flag = f"-{'v'*verbosity}"
+ cmd_args = make_command(
+ "checkout", "--lightweight", flag, rev_options.to_args(), url, dest
+ )
+ self.run_command(cmd_args)
+
+ def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
+ self.run_command(make_command("switch", url), cwd=dest)
+
+ def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
+ output = self.run_command(
+ make_command("info"), show_stdout=False, stdout_only=True, cwd=dest
+ )
+ if output.startswith("Standalone "):
+ # Older versions of pip used to create standalone branches.
+ # Convert the standalone branch to a checkout by calling "bzr bind".
+ cmd_args = make_command("bind", "-q", url)
+ self.run_command(cmd_args, cwd=dest)
+
+ cmd_args = make_command("update", "-q", rev_options.to_args())
+ self.run_command(cmd_args, cwd=dest)
+
+ @classmethod
+ def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]:
+ # hotfix the URL scheme after removing bzr+ from bzr+ssh:// re-add it
+ url, rev, user_pass = super().get_url_rev_and_auth(url)
+ if url.startswith("ssh://"):
+ url = "bzr+" + url
+ return url, rev, user_pass
+
+ @classmethod
+ def get_remote_url(cls, location: str) -> str:
+ urls = cls.run_command(
+ ["info"], show_stdout=False, stdout_only=True, cwd=location
+ )
+ for line in urls.splitlines():
+ line = line.strip()
+ for x in ("checkout of branch: ", "parent branch: "):
+ if line.startswith(x):
+ repo = line.split(x)[1]
+ if cls._is_local_repository(repo):
+ return path_to_url(repo)
+ return repo
+ raise RemoteNotFoundError
+
+ @classmethod
+ def get_revision(cls, location: str) -> str:
+ revision = cls.run_command(
+ ["revno"],
+ show_stdout=False,
+ stdout_only=True,
+ cwd=location,
+ )
+ return revision.splitlines()[-1]
+
+ @classmethod
+ def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:
+ """Always assume the versions don't match"""
+ return False
+
+
+vcs.register(Bazaar)
diff --git a/third_party/python/pip/pip/_internal/vcs/git.py b/third_party/python/pip/pip/_internal/vcs/git.py
new file mode 100644
index 0000000000..8d1d499376
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/vcs/git.py
@@ -0,0 +1,526 @@
+import logging
+import os.path
+import pathlib
+import re
+import urllib.parse
+import urllib.request
+from typing import List, Optional, Tuple
+
+from pip._internal.exceptions import BadCommand, InstallationError
+from pip._internal.utils.misc import HiddenText, display_path, hide_url
+from pip._internal.utils.subprocess import make_command
+from pip._internal.vcs.versioncontrol import (
+ AuthInfo,
+ RemoteNotFoundError,
+ RemoteNotValidError,
+ RevOptions,
+ VersionControl,
+ find_path_to_project_root_from_repo_root,
+ vcs,
+)
+
+urlsplit = urllib.parse.urlsplit
+urlunsplit = urllib.parse.urlunsplit
+
+
+logger = logging.getLogger(__name__)
+
+
+GIT_VERSION_REGEX = re.compile(
+ r"^git version " # Prefix.
+ r"(\d+)" # Major.
+ r"\.(\d+)" # Dot, minor.
+ r"(?:\.(\d+))?" # Optional dot, patch.
+ r".*$" # Suffix, including any pre- and post-release segments we don't care about.
+)
+
+HASH_REGEX = re.compile("^[a-fA-F0-9]{40}$")
+
+# SCP (Secure copy protocol) shorthand. e.g. 'git@example.com:foo/bar.git'
+SCP_REGEX = re.compile(
+ r"""^
+ # Optional user, e.g. 'git@'
+ (\w+@)?
+ # Server, e.g. 'github.com'.
+ ([^/:]+):
+ # The server-side path. e.g. 'user/project.git'. Must start with an
+ # alphanumeric character so as not to be confusable with a Windows paths
+ # like 'C:/foo/bar' or 'C:\foo\bar'.
+ (\w[^:]*)
+ $""",
+ re.VERBOSE,
+)
+
+
+def looks_like_hash(sha: str) -> bool:
+ return bool(HASH_REGEX.match(sha))
+
+
+class Git(VersionControl):
+ name = "git"
+ dirname = ".git"
+ repo_name = "clone"
+ schemes = (
+ "git+http",
+ "git+https",
+ "git+ssh",
+ "git+git",
+ "git+file",
+ )
+ # Prevent the user's environment variables from interfering with pip:
+ # https://github.com/pypa/pip/issues/1130
+ unset_environ = ("GIT_DIR", "GIT_WORK_TREE")
+ default_arg_rev = "HEAD"
+
+ @staticmethod
+ def get_base_rev_args(rev: str) -> List[str]:
+ return [rev]
+
+ def is_immutable_rev_checkout(self, url: str, dest: str) -> bool:
+ _, rev_options = self.get_url_rev_options(hide_url(url))
+ if not rev_options.rev:
+ return False
+ if not self.is_commit_id_equal(dest, rev_options.rev):
+ # the current commit is different from rev,
+ # which means rev was something else than a commit hash
+ return False
+ # return False in the rare case rev is both a commit hash
+ # and a tag or a branch; we don't want to cache in that case
+ # because that branch/tag could point to something else in the future
+ is_tag_or_branch = bool(self.get_revision_sha(dest, rev_options.rev)[0])
+ return not is_tag_or_branch
+
+ def get_git_version(self) -> Tuple[int, ...]:
+ version = self.run_command(
+ ["version"],
+ command_desc="git version",
+ show_stdout=False,
+ stdout_only=True,
+ )
+ match = GIT_VERSION_REGEX.match(version)
+ if not match:
+ logger.warning("Can't parse git version: %s", version)
+ return ()
+ return tuple(int(c) for c in match.groups())
+
+ @classmethod
+ def get_current_branch(cls, location: str) -> Optional[str]:
+ """
+ Return the current branch, or None if HEAD isn't at a branch
+ (e.g. detached HEAD).
+ """
+ # git-symbolic-ref exits with empty stdout if "HEAD" is a detached
+ # HEAD rather than a symbolic ref. In addition, the -q causes the
+ # command to exit with status code 1 instead of 128 in this case
+ # and to suppress the message to stderr.
+ args = ["symbolic-ref", "-q", "HEAD"]
+ output = cls.run_command(
+ args,
+ extra_ok_returncodes=(1,),
+ show_stdout=False,
+ stdout_only=True,
+ cwd=location,
+ )
+ ref = output.strip()
+
+ if ref.startswith("refs/heads/"):
+ return ref[len("refs/heads/") :]
+
+ return None
+
+ @classmethod
+ def get_revision_sha(cls, dest: str, rev: str) -> Tuple[Optional[str], bool]:
+ """
+ Return (sha_or_none, is_branch), where sha_or_none is a commit hash
+ if the revision names a remote branch or tag, otherwise None.
+
+ Args:
+ dest: the repository directory.
+ rev: the revision name.
+ """
+ # Pass rev to pre-filter the list.
+ output = cls.run_command(
+ ["show-ref", rev],
+ cwd=dest,
+ show_stdout=False,
+ stdout_only=True,
+ on_returncode="ignore",
+ )
+ refs = {}
+ # NOTE: We do not use splitlines here since that would split on other
+ # unicode separators, which can be maliciously used to install a
+ # different revision.
+ for line in output.strip().split("\n"):
+ line = line.rstrip("\r")
+ if not line:
+ continue
+ try:
+ ref_sha, ref_name = line.split(" ", maxsplit=2)
+ except ValueError:
+ # Include the offending line to simplify troubleshooting if
+ # this error ever occurs.
+ raise ValueError(f"unexpected show-ref line: {line!r}")
+
+ refs[ref_name] = ref_sha
+
+ branch_ref = f"refs/remotes/origin/{rev}"
+ tag_ref = f"refs/tags/{rev}"
+
+ sha = refs.get(branch_ref)
+ if sha is not None:
+ return (sha, True)
+
+ sha = refs.get(tag_ref)
+
+ return (sha, False)
+
+ @classmethod
+ def _should_fetch(cls, dest: str, rev: str) -> bool:
+ """
+ Return true if rev is a ref or is a commit that we don't have locally.
+
+ Branches and tags are not considered in this method because they are
+ assumed to be always available locally (which is a normal outcome of
+ ``git clone`` and ``git fetch --tags``).
+ """
+ if rev.startswith("refs/"):
+ # Always fetch remote refs.
+ return True
+
+ if not looks_like_hash(rev):
+ # Git fetch would fail with abbreviated commits.
+ return False
+
+ if cls.has_commit(dest, rev):
+ # Don't fetch if we have the commit locally.
+ return False
+
+ return True
+
+ @classmethod
+ def resolve_revision(
+ cls, dest: str, url: HiddenText, rev_options: RevOptions
+ ) -> RevOptions:
+ """
+ Resolve a revision to a new RevOptions object with the SHA1 of the
+ branch, tag, or ref if found.
+
+ Args:
+ rev_options: a RevOptions object.
+ """
+ rev = rev_options.arg_rev
+ # The arg_rev property's implementation for Git ensures that the
+ # rev return value is always non-None.
+ assert rev is not None
+
+ sha, is_branch = cls.get_revision_sha(dest, rev)
+
+ if sha is not None:
+ rev_options = rev_options.make_new(sha)
+ rev_options.branch_name = rev if is_branch else None
+
+ return rev_options
+
+ # Do not show a warning for the common case of something that has
+ # the form of a Git commit hash.
+ if not looks_like_hash(rev):
+ logger.warning(
+ "Did not find branch or tag '%s', assuming revision or ref.",
+ rev,
+ )
+
+ if not cls._should_fetch(dest, rev):
+ return rev_options
+
+ # fetch the requested revision
+ cls.run_command(
+ make_command("fetch", "-q", url, rev_options.to_args()),
+ cwd=dest,
+ )
+ # Change the revision to the SHA of the ref we fetched
+ sha = cls.get_revision(dest, rev="FETCH_HEAD")
+ rev_options = rev_options.make_new(sha)
+
+ return rev_options
+
+ @classmethod
+ def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:
+ """
+ Return whether the current commit hash equals the given name.
+
+ Args:
+ dest: the repository directory.
+ name: a string name.
+ """
+ if not name:
+ # Then avoid an unnecessary subprocess call.
+ return False
+
+ return cls.get_revision(dest) == name
+
+ def fetch_new(
+ self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int
+ ) -> None:
+ rev_display = rev_options.to_display()
+ logger.info("Cloning %s%s to %s", url, rev_display, display_path(dest))
+ if verbosity <= 0:
+ flags: Tuple[str, ...] = ("--quiet",)
+ elif verbosity == 1:
+ flags = ()
+ else:
+ flags = ("--verbose", "--progress")
+ if self.get_git_version() >= (2, 17):
+ # Git added support for partial clone in 2.17
+ # https://git-scm.com/docs/partial-clone
+ # Speeds up cloning by functioning without a complete copy of repository
+ self.run_command(
+ make_command(
+ "clone",
+ "--filter=blob:none",
+ *flags,
+ url,
+ dest,
+ )
+ )
+ else:
+ self.run_command(make_command("clone", *flags, url, dest))
+
+ if rev_options.rev:
+ # Then a specific revision was requested.
+ rev_options = self.resolve_revision(dest, url, rev_options)
+ branch_name = getattr(rev_options, "branch_name", None)
+ logger.debug("Rev options %s, branch_name %s", rev_options, branch_name)
+ if branch_name is None:
+ # Only do a checkout if the current commit id doesn't match
+ # the requested revision.
+ if not self.is_commit_id_equal(dest, rev_options.rev):
+ cmd_args = make_command(
+ "checkout",
+ "-q",
+ rev_options.to_args(),
+ )
+ self.run_command(cmd_args, cwd=dest)
+ elif self.get_current_branch(dest) != branch_name:
+ # Then a specific branch was requested, and that branch
+ # is not yet checked out.
+ track_branch = f"origin/{branch_name}"
+ cmd_args = [
+ "checkout",
+ "-b",
+ branch_name,
+ "--track",
+ track_branch,
+ ]
+ self.run_command(cmd_args, cwd=dest)
+ else:
+ sha = self.get_revision(dest)
+ rev_options = rev_options.make_new(sha)
+
+ logger.info("Resolved %s to commit %s", url, rev_options.rev)
+
+ #: repo may contain submodules
+ self.update_submodules(dest)
+
+ def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
+ self.run_command(
+ make_command("config", "remote.origin.url", url),
+ cwd=dest,
+ )
+ cmd_args = make_command("checkout", "-q", rev_options.to_args())
+ self.run_command(cmd_args, cwd=dest)
+
+ self.update_submodules(dest)
+
+ def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
+ # First fetch changes from the default remote
+ if self.get_git_version() >= (1, 9):
+ # fetch tags in addition to everything else
+ self.run_command(["fetch", "-q", "--tags"], cwd=dest)
+ else:
+ self.run_command(["fetch", "-q"], cwd=dest)
+ # Then reset to wanted revision (maybe even origin/master)
+ rev_options = self.resolve_revision(dest, url, rev_options)
+ cmd_args = make_command("reset", "--hard", "-q", rev_options.to_args())
+ self.run_command(cmd_args, cwd=dest)
+ #: update submodules
+ self.update_submodules(dest)
+
+ @classmethod
+ def get_remote_url(cls, location: str) -> str:
+ """
+ Return URL of the first remote encountered.
+
+ Raises RemoteNotFoundError if the repository does not have a remote
+ url configured.
+ """
+ # We need to pass 1 for extra_ok_returncodes since the command
+ # exits with return code 1 if there are no matching lines.
+ stdout = cls.run_command(
+ ["config", "--get-regexp", r"remote\..*\.url"],
+ extra_ok_returncodes=(1,),
+ show_stdout=False,
+ stdout_only=True,
+ cwd=location,
+ )
+ remotes = stdout.splitlines()
+ try:
+ found_remote = remotes[0]
+ except IndexError:
+ raise RemoteNotFoundError
+
+ for remote in remotes:
+ if remote.startswith("remote.origin.url "):
+ found_remote = remote
+ break
+ url = found_remote.split(" ")[1]
+ return cls._git_remote_to_pip_url(url.strip())
+
+ @staticmethod
+ def _git_remote_to_pip_url(url: str) -> str:
+ """
+ Convert a remote url from what git uses to what pip accepts.
+
+ There are 3 legal forms **url** may take:
+
+ 1. A fully qualified url: ssh://git@example.com/foo/bar.git
+ 2. A local project.git folder: /path/to/bare/repository.git
+ 3. SCP shorthand for form 1: git@example.com:foo/bar.git
+
+ Form 1 is output as-is. Form 2 must be converted to URI and form 3 must
+ be converted to form 1.
+
+ See the corresponding test test_git_remote_url_to_pip() for examples of
+ sample inputs/outputs.
+ """
+ if re.match(r"\w+://", url):
+ # This is already valid. Pass it though as-is.
+ return url
+ if os.path.exists(url):
+ # A local bare remote (git clone --mirror).
+ # Needs a file:// prefix.
+ return pathlib.PurePath(url).as_uri()
+ scp_match = SCP_REGEX.match(url)
+ if scp_match:
+ # Add an ssh:// prefix and replace the ':' with a '/'.
+ return scp_match.expand(r"ssh://\1\2/\3")
+ # Otherwise, bail out.
+ raise RemoteNotValidError(url)
+
+ @classmethod
+ def has_commit(cls, location: str, rev: str) -> bool:
+ """
+ Check if rev is a commit that is available in the local repository.
+ """
+ try:
+ cls.run_command(
+ ["rev-parse", "-q", "--verify", "sha^" + rev],
+ cwd=location,
+ log_failed_cmd=False,
+ )
+ except InstallationError:
+ return False
+ else:
+ return True
+
+ @classmethod
+ def get_revision(cls, location: str, rev: Optional[str] = None) -> str:
+ if rev is None:
+ rev = "HEAD"
+ current_rev = cls.run_command(
+ ["rev-parse", rev],
+ show_stdout=False,
+ stdout_only=True,
+ cwd=location,
+ )
+ return current_rev.strip()
+
+ @classmethod
+ def get_subdirectory(cls, location: str) -> Optional[str]:
+ """
+ Return the path to Python project root, relative to the repo root.
+ Return None if the project root is in the repo root.
+ """
+ # find the repo root
+ git_dir = cls.run_command(
+ ["rev-parse", "--git-dir"],
+ show_stdout=False,
+ stdout_only=True,
+ cwd=location,
+ ).strip()
+ if not os.path.isabs(git_dir):
+ git_dir = os.path.join(location, git_dir)
+ repo_root = os.path.abspath(os.path.join(git_dir, ".."))
+ return find_path_to_project_root_from_repo_root(location, repo_root)
+
+ @classmethod
+ def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]:
+ """
+ Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'.
+ That's required because although they use SSH they sometimes don't
+ work with a ssh:// scheme (e.g. GitHub). But we need a scheme for
+ parsing. Hence we remove it again afterwards and return it as a stub.
+ """
+ # Works around an apparent Git bug
+ # (see https://article.gmane.org/gmane.comp.version-control.git/146500)
+ scheme, netloc, path, query, fragment = urlsplit(url)
+ if scheme.endswith("file"):
+ initial_slashes = path[: -len(path.lstrip("/"))]
+ newpath = initial_slashes + urllib.request.url2pathname(path).replace(
+ "\\", "/"
+ ).lstrip("/")
+ after_plus = scheme.find("+") + 1
+ url = scheme[:after_plus] + urlunsplit(
+ (scheme[after_plus:], netloc, newpath, query, fragment),
+ )
+
+ if "://" not in url:
+ assert "file:" not in url
+ url = url.replace("git+", "git+ssh://")
+ url, rev, user_pass = super().get_url_rev_and_auth(url)
+ url = url.replace("ssh://", "")
+ else:
+ url, rev, user_pass = super().get_url_rev_and_auth(url)
+
+ return url, rev, user_pass
+
+ @classmethod
+ def update_submodules(cls, location: str) -> None:
+ if not os.path.exists(os.path.join(location, ".gitmodules")):
+ return
+ cls.run_command(
+ ["submodule", "update", "--init", "--recursive", "-q"],
+ cwd=location,
+ )
+
+ @classmethod
+ def get_repository_root(cls, location: str) -> Optional[str]:
+ loc = super().get_repository_root(location)
+ if loc:
+ return loc
+ try:
+ r = cls.run_command(
+ ["rev-parse", "--show-toplevel"],
+ cwd=location,
+ show_stdout=False,
+ stdout_only=True,
+ on_returncode="raise",
+ log_failed_cmd=False,
+ )
+ except BadCommand:
+ logger.debug(
+ "could not determine if %s is under git control "
+ "because git is not available",
+ location,
+ )
+ return None
+ except InstallationError:
+ return None
+ return os.path.normpath(r.rstrip("\r\n"))
+
+ @staticmethod
+ def should_add_vcs_url_prefix(repo_url: str) -> bool:
+ """In either https or ssh form, requirements must be prefixed with git+."""
+ return True
+
+
+vcs.register(Git)
diff --git a/third_party/python/pip/pip/_internal/vcs/mercurial.py b/third_party/python/pip/pip/_internal/vcs/mercurial.py
new file mode 100644
index 0000000000..2a005e0aff
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/vcs/mercurial.py
@@ -0,0 +1,163 @@
+import configparser
+import logging
+import os
+from typing import List, Optional, Tuple
+
+from pip._internal.exceptions import BadCommand, InstallationError
+from pip._internal.utils.misc import HiddenText, display_path
+from pip._internal.utils.subprocess import make_command
+from pip._internal.utils.urls import path_to_url
+from pip._internal.vcs.versioncontrol import (
+ RevOptions,
+ VersionControl,
+ find_path_to_project_root_from_repo_root,
+ vcs,
+)
+
+logger = logging.getLogger(__name__)
+
+
+class Mercurial(VersionControl):
+ name = "hg"
+ dirname = ".hg"
+ repo_name = "clone"
+ schemes = (
+ "hg+file",
+ "hg+http",
+ "hg+https",
+ "hg+ssh",
+ "hg+static-http",
+ )
+
+ @staticmethod
+ def get_base_rev_args(rev: str) -> List[str]:
+ return [rev]
+
+ def fetch_new(
+ self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int
+ ) -> None:
+ rev_display = rev_options.to_display()
+ logger.info(
+ "Cloning hg %s%s to %s",
+ url,
+ rev_display,
+ display_path(dest),
+ )
+ if verbosity <= 0:
+ flags: Tuple[str, ...] = ("--quiet",)
+ elif verbosity == 1:
+ flags = ()
+ elif verbosity == 2:
+ flags = ("--verbose",)
+ else:
+ flags = ("--verbose", "--debug")
+ self.run_command(make_command("clone", "--noupdate", *flags, url, dest))
+ self.run_command(
+ make_command("update", *flags, rev_options.to_args()),
+ cwd=dest,
+ )
+
+ def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
+ repo_config = os.path.join(dest, self.dirname, "hgrc")
+ config = configparser.RawConfigParser()
+ try:
+ config.read(repo_config)
+ config.set("paths", "default", url.secret)
+ with open(repo_config, "w") as config_file:
+ config.write(config_file)
+ except (OSError, configparser.NoSectionError) as exc:
+ logger.warning("Could not switch Mercurial repository to %s: %s", url, exc)
+ else:
+ cmd_args = make_command("update", "-q", rev_options.to_args())
+ self.run_command(cmd_args, cwd=dest)
+
+ def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
+ self.run_command(["pull", "-q"], cwd=dest)
+ cmd_args = make_command("update", "-q", rev_options.to_args())
+ self.run_command(cmd_args, cwd=dest)
+
+ @classmethod
+ def get_remote_url(cls, location: str) -> str:
+ url = cls.run_command(
+ ["showconfig", "paths.default"],
+ show_stdout=False,
+ stdout_only=True,
+ cwd=location,
+ ).strip()
+ if cls._is_local_repository(url):
+ url = path_to_url(url)
+ return url.strip()
+
+ @classmethod
+ def get_revision(cls, location: str) -> str:
+ """
+ Return the repository-local changeset revision number, as an integer.
+ """
+ current_revision = cls.run_command(
+ ["parents", "--template={rev}"],
+ show_stdout=False,
+ stdout_only=True,
+ cwd=location,
+ ).strip()
+ return current_revision
+
+ @classmethod
+ def get_requirement_revision(cls, location: str) -> str:
+ """
+ Return the changeset identification hash, as a 40-character
+ hexadecimal string
+ """
+ current_rev_hash = cls.run_command(
+ ["parents", "--template={node}"],
+ show_stdout=False,
+ stdout_only=True,
+ cwd=location,
+ ).strip()
+ return current_rev_hash
+
+ @classmethod
+ def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:
+ """Always assume the versions don't match"""
+ return False
+
+ @classmethod
+ def get_subdirectory(cls, location: str) -> Optional[str]:
+ """
+ Return the path to Python project root, relative to the repo root.
+ Return None if the project root is in the repo root.
+ """
+ # find the repo root
+ repo_root = cls.run_command(
+ ["root"], show_stdout=False, stdout_only=True, cwd=location
+ ).strip()
+ if not os.path.isabs(repo_root):
+ repo_root = os.path.abspath(os.path.join(location, repo_root))
+ return find_path_to_project_root_from_repo_root(location, repo_root)
+
+ @classmethod
+ def get_repository_root(cls, location: str) -> Optional[str]:
+ loc = super().get_repository_root(location)
+ if loc:
+ return loc
+ try:
+ r = cls.run_command(
+ ["root"],
+ cwd=location,
+ show_stdout=False,
+ stdout_only=True,
+ on_returncode="raise",
+ log_failed_cmd=False,
+ )
+ except BadCommand:
+ logger.debug(
+ "could not determine if %s is under hg control "
+ "because hg is not available",
+ location,
+ )
+ return None
+ except InstallationError:
+ return None
+ return os.path.normpath(r.rstrip("\r\n"))
+
+
+vcs.register(Mercurial)
diff --git a/third_party/python/pip/pip/_internal/vcs/subversion.py b/third_party/python/pip/pip/_internal/vcs/subversion.py
new file mode 100644
index 0000000000..16d93a67b7
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/vcs/subversion.py
@@ -0,0 +1,324 @@
+import logging
+import os
+import re
+from typing import List, Optional, Tuple
+
+from pip._internal.utils.misc import (
+ HiddenText,
+ display_path,
+ is_console_interactive,
+ is_installable_dir,
+ split_auth_from_netloc,
+)
+from pip._internal.utils.subprocess import CommandArgs, make_command
+from pip._internal.vcs.versioncontrol import (
+ AuthInfo,
+ RemoteNotFoundError,
+ RevOptions,
+ VersionControl,
+ vcs,
+)
+
+logger = logging.getLogger(__name__)
+
+_svn_xml_url_re = re.compile('url="([^"]+)"')
+_svn_rev_re = re.compile(r'committed-rev="(\d+)"')
+_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"')
+_svn_info_xml_url_re = re.compile(r"<url>(.*)</url>")
+
+
+class Subversion(VersionControl):
+ name = "svn"
+ dirname = ".svn"
+ repo_name = "checkout"
+ schemes = ("svn+ssh", "svn+http", "svn+https", "svn+svn", "svn+file")
+
+ @classmethod
+ def should_add_vcs_url_prefix(cls, remote_url: str) -> bool:
+ return True
+
+ @staticmethod
+ def get_base_rev_args(rev: str) -> List[str]:
+ return ["-r", rev]
+
+ @classmethod
+ def get_revision(cls, location: str) -> str:
+ """
+ Return the maximum revision for all files under a given location
+ """
+ # Note: taken from setuptools.command.egg_info
+ revision = 0
+
+ for base, dirs, _ in os.walk(location):
+ if cls.dirname not in dirs:
+ dirs[:] = []
+ continue # no sense walking uncontrolled subdirs
+ dirs.remove(cls.dirname)
+ entries_fn = os.path.join(base, cls.dirname, "entries")
+ if not os.path.exists(entries_fn):
+ # FIXME: should we warn?
+ continue
+
+ dirurl, localrev = cls._get_svn_url_rev(base)
+
+ if base == location:
+ assert dirurl is not None
+ base = dirurl + "/" # save the root url
+ elif not dirurl or not dirurl.startswith(base):
+ dirs[:] = []
+ continue # not part of the same svn tree, skip it
+ revision = max(revision, localrev)
+ return str(revision)
+
+ @classmethod
+ def get_netloc_and_auth(
+ cls, netloc: str, scheme: str
+ ) -> Tuple[str, Tuple[Optional[str], Optional[str]]]:
+ """
+ This override allows the auth information to be passed to svn via the
+ --username and --password options instead of via the URL.
+ """
+ if scheme == "ssh":
+ # The --username and --password options can't be used for
+ # svn+ssh URLs, so keep the auth information in the URL.
+ return super().get_netloc_and_auth(netloc, scheme)
+
+ return split_auth_from_netloc(netloc)
+
+ @classmethod
+ def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]:
+ # hotfix the URL scheme after removing svn+ from svn+ssh:// re-add it
+ url, rev, user_pass = super().get_url_rev_and_auth(url)
+ if url.startswith("ssh://"):
+ url = "svn+" + url
+ return url, rev, user_pass
+
+ @staticmethod
+ def make_rev_args(
+ username: Optional[str], password: Optional[HiddenText]
+ ) -> CommandArgs:
+ extra_args: CommandArgs = []
+ if username:
+ extra_args += ["--username", username]
+ if password:
+ extra_args += ["--password", password]
+
+ return extra_args
+
+ @classmethod
+ def get_remote_url(cls, location: str) -> str:
+ # In cases where the source is in a subdirectory, we have to look up in
+ # the location until we find a valid project root.
+ orig_location = location
+ while not is_installable_dir(location):
+ last_location = location
+ location = os.path.dirname(location)
+ if location == last_location:
+ # We've traversed up to the root of the filesystem without
+ # finding a Python project.
+ logger.warning(
+ "Could not find Python project for directory %s (tried all "
+ "parent directories)",
+ orig_location,
+ )
+ raise RemoteNotFoundError
+
+ url, _rev = cls._get_svn_url_rev(location)
+ if url is None:
+ raise RemoteNotFoundError
+
+ return url
+
+ @classmethod
+ def _get_svn_url_rev(cls, location: str) -> Tuple[Optional[str], int]:
+ from pip._internal.exceptions import InstallationError
+
+ entries_path = os.path.join(location, cls.dirname, "entries")
+ if os.path.exists(entries_path):
+ with open(entries_path) as f:
+ data = f.read()
+ else: # subversion >= 1.7 does not have the 'entries' file
+ data = ""
+
+ url = None
+ if data.startswith("8") or data.startswith("9") or data.startswith("10"):
+ entries = list(map(str.splitlines, data.split("\n\x0c\n")))
+ del entries[0][0] # get rid of the '8'
+ url = entries[0][3]
+ revs = [int(d[9]) for d in entries if len(d) > 9 and d[9]] + [0]
+ elif data.startswith("<?xml"):
+ match = _svn_xml_url_re.search(data)
+ if not match:
+ raise ValueError(f"Badly formatted data: {data!r}")
+ url = match.group(1) # get repository URL
+ revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0]
+ else:
+ try:
+ # subversion >= 1.7
+ # Note that using get_remote_call_options is not necessary here
+ # because `svn info` is being run against a local directory.
+ # We don't need to worry about making sure interactive mode
+ # is being used to prompt for passwords, because passwords
+ # are only potentially needed for remote server requests.
+ xml = cls.run_command(
+ ["info", "--xml", location],
+ show_stdout=False,
+ stdout_only=True,
+ )
+ match = _svn_info_xml_url_re.search(xml)
+ assert match is not None
+ url = match.group(1)
+ revs = [int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)]
+ except InstallationError:
+ url, revs = None, []
+
+ if revs:
+ rev = max(revs)
+ else:
+ rev = 0
+
+ return url, rev
+
+ @classmethod
+ def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:
+ """Always assume the versions don't match"""
+ return False
+
+ def __init__(self, use_interactive: Optional[bool] = None) -> None:
+ if use_interactive is None:
+ use_interactive = is_console_interactive()
+ self.use_interactive = use_interactive
+
+ # This member is used to cache the fetched version of the current
+ # ``svn`` client.
+ # Special value definitions:
+ # None: Not evaluated yet.
+ # Empty tuple: Could not parse version.
+ self._vcs_version: Optional[Tuple[int, ...]] = None
+
+ super().__init__()
+
+ def call_vcs_version(self) -> Tuple[int, ...]:
+ """Query the version of the currently installed Subversion client.
+
+ :return: A tuple containing the parts of the version information or
+ ``()`` if the version returned from ``svn`` could not be parsed.
+ :raises: BadCommand: If ``svn`` is not installed.
+ """
+ # Example versions:
+ # svn, version 1.10.3 (r1842928)
+ # compiled Feb 25 2019, 14:20:39 on x86_64-apple-darwin17.0.0
+ # svn, version 1.7.14 (r1542130)
+ # compiled Mar 28 2018, 08:49:13 on x86_64-pc-linux-gnu
+ # svn, version 1.12.0-SlikSvn (SlikSvn/1.12.0)
+ # compiled May 28 2019, 13:44:56 on x86_64-microsoft-windows6.2
+ version_prefix = "svn, version "
+ version = self.run_command(["--version"], show_stdout=False, stdout_only=True)
+ if not version.startswith(version_prefix):
+ return ()
+
+ version = version[len(version_prefix) :].split()[0]
+ version_list = version.partition("-")[0].split(".")
+ try:
+ parsed_version = tuple(map(int, version_list))
+ except ValueError:
+ return ()
+
+ return parsed_version
+
+ def get_vcs_version(self) -> Tuple[int, ...]:
+ """Return the version of the currently installed Subversion client.
+
+ If the version of the Subversion client has already been queried,
+ a cached value will be used.
+
+ :return: A tuple containing the parts of the version information or
+ ``()`` if the version returned from ``svn`` could not be parsed.
+ :raises: BadCommand: If ``svn`` is not installed.
+ """
+ if self._vcs_version is not None:
+ # Use cached version, if available.
+ # If parsing the version failed previously (empty tuple),
+ # do not attempt to parse it again.
+ return self._vcs_version
+
+ vcs_version = self.call_vcs_version()
+ self._vcs_version = vcs_version
+ return vcs_version
+
+ def get_remote_call_options(self) -> CommandArgs:
+ """Return options to be used on calls to Subversion that contact the server.
+
+ These options are applicable for the following ``svn`` subcommands used
+ in this class.
+
+ - checkout
+ - switch
+ - update
+
+ :return: A list of command line arguments to pass to ``svn``.
+ """
+ if not self.use_interactive:
+ # --non-interactive switch is available since Subversion 0.14.4.
+ # Subversion < 1.8 runs in interactive mode by default.
+ return ["--non-interactive"]
+
+ svn_version = self.get_vcs_version()
+ # By default, Subversion >= 1.8 runs in non-interactive mode if
+ # stdin is not a TTY. Since that is how pip invokes SVN, in
+ # call_subprocess(), pip must pass --force-interactive to ensure
+ # the user can be prompted for a password, if required.
+ # SVN added the --force-interactive option in SVN 1.8. Since
+ # e.g. RHEL/CentOS 7, which is supported until 2024, ships with
+ # SVN 1.7, pip should continue to support SVN 1.7. Therefore, pip
+ # can't safely add the option if the SVN version is < 1.8 (or unknown).
+ if svn_version >= (1, 8):
+ return ["--force-interactive"]
+
+ return []
+
+ def fetch_new(
+ self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int
+ ) -> None:
+ rev_display = rev_options.to_display()
+ logger.info(
+ "Checking out %s%s to %s",
+ url,
+ rev_display,
+ display_path(dest),
+ )
+ if verbosity <= 0:
+ flag = "--quiet"
+ else:
+ flag = ""
+ cmd_args = make_command(
+ "checkout",
+ flag,
+ self.get_remote_call_options(),
+ rev_options.to_args(),
+ url,
+ dest,
+ )
+ self.run_command(cmd_args)
+
+ def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
+ cmd_args = make_command(
+ "switch",
+ self.get_remote_call_options(),
+ rev_options.to_args(),
+ url,
+ dest,
+ )
+ self.run_command(cmd_args)
+
+ def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
+ cmd_args = make_command(
+ "update",
+ self.get_remote_call_options(),
+ rev_options.to_args(),
+ dest,
+ )
+ self.run_command(cmd_args)
+
+
+vcs.register(Subversion)
diff --git a/third_party/python/pip/pip/_internal/vcs/versioncontrol.py b/third_party/python/pip/pip/_internal/vcs/versioncontrol.py
new file mode 100644
index 0000000000..02bbf68e7a
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/vcs/versioncontrol.py
@@ -0,0 +1,705 @@
+"""Handles all VCS (version control) support"""
+
+import logging
+import os
+import shutil
+import sys
+import urllib.parse
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Mapping,
+ Optional,
+ Tuple,
+ Type,
+ Union,
+)
+
+from pip._internal.cli.spinners import SpinnerInterface
+from pip._internal.exceptions import BadCommand, InstallationError
+from pip._internal.utils.misc import (
+ HiddenText,
+ ask_path_exists,
+ backup_dir,
+ display_path,
+ hide_url,
+ hide_value,
+ is_installable_dir,
+ rmtree,
+)
+from pip._internal.utils.subprocess import (
+ CommandArgs,
+ call_subprocess,
+ format_command_args,
+ make_command,
+)
+from pip._internal.utils.urls import get_url_scheme
+
+if TYPE_CHECKING:
+ # Literal was introduced in Python 3.8.
+ #
+ # TODO: Remove `if TYPE_CHECKING` when dropping support for Python 3.7.
+ from typing import Literal
+
+
+__all__ = ["vcs"]
+
+
+logger = logging.getLogger(__name__)
+
+AuthInfo = Tuple[Optional[str], Optional[str]]
+
+
+def is_url(name: str) -> bool:
+ """
+ Return true if the name looks like a URL.
+ """
+ scheme = get_url_scheme(name)
+ if scheme is None:
+ return False
+ return scheme in ["http", "https", "file", "ftp"] + vcs.all_schemes
+
+
+def make_vcs_requirement_url(
+ repo_url: str, rev: str, project_name: str, subdir: Optional[str] = None
+) -> str:
+ """
+ Return the URL for a VCS requirement.
+
+ Args:
+ repo_url: the remote VCS url, with any needed VCS prefix (e.g. "git+").
+ project_name: the (unescaped) project name.
+ """
+ egg_project_name = project_name.replace("-", "_")
+ req = f"{repo_url}@{rev}#egg={egg_project_name}"
+ if subdir:
+ req += f"&subdirectory={subdir}"
+
+ return req
+
+
+def find_path_to_project_root_from_repo_root(
+ location: str, repo_root: str
+) -> Optional[str]:
+ """
+ Find the the Python project's root by searching up the filesystem from
+ `location`. Return the path to project root relative to `repo_root`.
+ Return None if the project root is `repo_root`, or cannot be found.
+ """
+ # find project root.
+ orig_location = location
+ while not is_installable_dir(location):
+ last_location = location
+ location = os.path.dirname(location)
+ if location == last_location:
+ # We've traversed up to the root of the filesystem without
+ # finding a Python project.
+ logger.warning(
+ "Could not find a Python project for directory %s (tried all "
+ "parent directories)",
+ orig_location,
+ )
+ return None
+
+ if os.path.samefile(repo_root, location):
+ return None
+
+ return os.path.relpath(location, repo_root)
+
+
+class RemoteNotFoundError(Exception):
+ pass
+
+
+class RemoteNotValidError(Exception):
+ def __init__(self, url: str):
+ super().__init__(url)
+ self.url = url
+
+
+class RevOptions:
+
+ """
+ Encapsulates a VCS-specific revision to install, along with any VCS
+ install options.
+
+ Instances of this class should be treated as if immutable.
+ """
+
+ def __init__(
+ self,
+ vc_class: Type["VersionControl"],
+ rev: Optional[str] = None,
+ extra_args: Optional[CommandArgs] = None,
+ ) -> None:
+ """
+ Args:
+ vc_class: a VersionControl subclass.
+ rev: the name of the revision to install.
+ extra_args: a list of extra options.
+ """
+ if extra_args is None:
+ extra_args = []
+
+ self.extra_args = extra_args
+ self.rev = rev
+ self.vc_class = vc_class
+ self.branch_name: Optional[str] = None
+
+ def __repr__(self) -> str:
+ return f"<RevOptions {self.vc_class.name}: rev={self.rev!r}>"
+
+ @property
+ def arg_rev(self) -> Optional[str]:
+ if self.rev is None:
+ return self.vc_class.default_arg_rev
+
+ return self.rev
+
+ def to_args(self) -> CommandArgs:
+ """
+ Return the VCS-specific command arguments.
+ """
+ args: CommandArgs = []
+ rev = self.arg_rev
+ if rev is not None:
+ args += self.vc_class.get_base_rev_args(rev)
+ args += self.extra_args
+
+ return args
+
+ def to_display(self) -> str:
+ if not self.rev:
+ return ""
+
+ return f" (to revision {self.rev})"
+
+ def make_new(self, rev: str) -> "RevOptions":
+ """
+ Make a copy of the current instance, but with a new rev.
+
+ Args:
+ rev: the name of the revision for the new object.
+ """
+ return self.vc_class.make_rev_options(rev, extra_args=self.extra_args)
+
+
+class VcsSupport:
+ _registry: Dict[str, "VersionControl"] = {}
+ schemes = ["ssh", "git", "hg", "bzr", "sftp", "svn"]
+
+ def __init__(self) -> None:
+ # Register more schemes with urlparse for various version control
+ # systems
+ urllib.parse.uses_netloc.extend(self.schemes)
+ super().__init__()
+
+ def __iter__(self) -> Iterator[str]:
+ return self._registry.__iter__()
+
+ @property
+ def backends(self) -> List["VersionControl"]:
+ return list(self._registry.values())
+
+ @property
+ def dirnames(self) -> List[str]:
+ return [backend.dirname for backend in self.backends]
+
+ @property
+ def all_schemes(self) -> List[str]:
+ schemes: List[str] = []
+ for backend in self.backends:
+ schemes.extend(backend.schemes)
+ return schemes
+
+ def register(self, cls: Type["VersionControl"]) -> None:
+ if not hasattr(cls, "name"):
+ logger.warning("Cannot register VCS %s", cls.__name__)
+ return
+ if cls.name not in self._registry:
+ self._registry[cls.name] = cls()
+ logger.debug("Registered VCS backend: %s", cls.name)
+
+ def unregister(self, name: str) -> None:
+ if name in self._registry:
+ del self._registry[name]
+
+ def get_backend_for_dir(self, location: str) -> Optional["VersionControl"]:
+ """
+ Return a VersionControl object if a repository of that type is found
+ at the given directory.
+ """
+ vcs_backends = {}
+ for vcs_backend in self._registry.values():
+ repo_path = vcs_backend.get_repository_root(location)
+ if not repo_path:
+ continue
+ logger.debug("Determine that %s uses VCS: %s", location, vcs_backend.name)
+ vcs_backends[repo_path] = vcs_backend
+
+ if not vcs_backends:
+ return None
+
+ # Choose the VCS in the inner-most directory. Since all repository
+ # roots found here would be either `location` or one of its
+ # parents, the longest path should have the most path components,
+ # i.e. the backend representing the inner-most repository.
+ inner_most_repo_path = max(vcs_backends, key=len)
+ return vcs_backends[inner_most_repo_path]
+
+ def get_backend_for_scheme(self, scheme: str) -> Optional["VersionControl"]:
+ """
+ Return a VersionControl object or None.
+ """
+ for vcs_backend in self._registry.values():
+ if scheme in vcs_backend.schemes:
+ return vcs_backend
+ return None
+
+ def get_backend(self, name: str) -> Optional["VersionControl"]:
+ """
+ Return a VersionControl object or None.
+ """
+ name = name.lower()
+ return self._registry.get(name)
+
+
+vcs = VcsSupport()
+
+
+class VersionControl:
+ name = ""
+ dirname = ""
+ repo_name = ""
+ # List of supported schemes for this Version Control
+ schemes: Tuple[str, ...] = ()
+ # Iterable of environment variable names to pass to call_subprocess().
+ unset_environ: Tuple[str, ...] = ()
+ default_arg_rev: Optional[str] = None
+
+ @classmethod
+ def should_add_vcs_url_prefix(cls, remote_url: str) -> bool:
+ """
+ Return whether the vcs prefix (e.g. "git+") should be added to a
+ repository's remote url when used in a requirement.
+ """
+ return not remote_url.lower().startswith(f"{cls.name}:")
+
+ @classmethod
+ def get_subdirectory(cls, location: str) -> Optional[str]:
+ """
+ Return the path to Python project root, relative to the repo root.
+ Return None if the project root is in the repo root.
+ """
+ return None
+
+ @classmethod
+ def get_requirement_revision(cls, repo_dir: str) -> str:
+ """
+ Return the revision string that should be used in a requirement.
+ """
+ return cls.get_revision(repo_dir)
+
+ @classmethod
+ def get_src_requirement(cls, repo_dir: str, project_name: str) -> str:
+ """
+ Return the requirement string to use to redownload the files
+ currently at the given repository directory.
+
+ Args:
+ project_name: the (unescaped) project name.
+
+ The return value has a form similar to the following:
+
+ {repository_url}@{revision}#egg={project_name}
+ """
+ repo_url = cls.get_remote_url(repo_dir)
+
+ if cls.should_add_vcs_url_prefix(repo_url):
+ repo_url = f"{cls.name}+{repo_url}"
+
+ revision = cls.get_requirement_revision(repo_dir)
+ subdir = cls.get_subdirectory(repo_dir)
+ req = make_vcs_requirement_url(repo_url, revision, project_name, subdir=subdir)
+
+ return req
+
+ @staticmethod
+ def get_base_rev_args(rev: str) -> List[str]:
+ """
+ Return the base revision arguments for a vcs command.
+
+ Args:
+ rev: the name of a revision to install. Cannot be None.
+ """
+ raise NotImplementedError
+
+ def is_immutable_rev_checkout(self, url: str, dest: str) -> bool:
+ """
+ Return true if the commit hash checked out at dest matches
+ the revision in url.
+
+ Always return False, if the VCS does not support immutable commit
+ hashes.
+
+ This method does not check if there are local uncommitted changes
+ in dest after checkout, as pip currently has no use case for that.
+ """
+ return False
+
+ @classmethod
+ def make_rev_options(
+ cls, rev: Optional[str] = None, extra_args: Optional[CommandArgs] = None
+ ) -> RevOptions:
+ """
+ Return a RevOptions object.
+
+ Args:
+ rev: the name of a revision to install.
+ extra_args: a list of extra options.
+ """
+ return RevOptions(cls, rev, extra_args=extra_args)
+
+ @classmethod
+ def _is_local_repository(cls, repo: str) -> bool:
+ """
+ posix absolute paths start with os.path.sep,
+ win32 ones start with drive (like c:\\folder)
+ """
+ drive, tail = os.path.splitdrive(repo)
+ return repo.startswith(os.path.sep) or bool(drive)
+
+ @classmethod
+ def get_netloc_and_auth(
+ cls, netloc: str, scheme: str
+ ) -> Tuple[str, Tuple[Optional[str], Optional[str]]]:
+ """
+ Parse the repository URL's netloc, and return the new netloc to use
+ along with auth information.
+
+ Args:
+ netloc: the original repository URL netloc.
+ scheme: the repository URL's scheme without the vcs prefix.
+
+ This is mainly for the Subversion class to override, so that auth
+ information can be provided via the --username and --password options
+ instead of through the URL. For other subclasses like Git without
+ such an option, auth information must stay in the URL.
+
+ Returns: (netloc, (username, password)).
+ """
+ return netloc, (None, None)
+
+ @classmethod
+ def get_url_rev_and_auth(cls, url: str) -> Tuple[str, Optional[str], AuthInfo]:
+ """
+ Parse the repository URL to use, and return the URL, revision,
+ and auth info to use.
+
+ Returns: (url, rev, (username, password)).
+ """
+ scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
+ if "+" not in scheme:
+ raise ValueError(
+ "Sorry, {!r} is a malformed VCS url. "
+ "The format is <vcs>+<protocol>://<url>, "
+ "e.g. svn+http://myrepo/svn/MyApp#egg=MyApp".format(url)
+ )
+ # Remove the vcs prefix.
+ scheme = scheme.split("+", 1)[1]
+ netloc, user_pass = cls.get_netloc_and_auth(netloc, scheme)
+ rev = None
+ if "@" in path:
+ path, rev = path.rsplit("@", 1)
+ if not rev:
+ raise InstallationError(
+ "The URL {!r} has an empty revision (after @) "
+ "which is not supported. Include a revision after @ "
+ "or remove @ from the URL.".format(url)
+ )
+ url = urllib.parse.urlunsplit((scheme, netloc, path, query, ""))
+ return url, rev, user_pass
+
+ @staticmethod
+ def make_rev_args(
+ username: Optional[str], password: Optional[HiddenText]
+ ) -> CommandArgs:
+ """
+ Return the RevOptions "extra arguments" to use in obtain().
+ """
+ return []
+
+ def get_url_rev_options(self, url: HiddenText) -> Tuple[HiddenText, RevOptions]:
+ """
+ Return the URL and RevOptions object to use in obtain(),
+ as a tuple (url, rev_options).
+ """
+ secret_url, rev, user_pass = self.get_url_rev_and_auth(url.secret)
+ username, secret_password = user_pass
+ password: Optional[HiddenText] = None
+ if secret_password is not None:
+ password = hide_value(secret_password)
+ extra_args = self.make_rev_args(username, password)
+ rev_options = self.make_rev_options(rev, extra_args=extra_args)
+
+ return hide_url(secret_url), rev_options
+
+ @staticmethod
+ def normalize_url(url: str) -> str:
+ """
+ Normalize a URL for comparison by unquoting it and removing any
+ trailing slash.
+ """
+ return urllib.parse.unquote(url).rstrip("/")
+
+ @classmethod
+ def compare_urls(cls, url1: str, url2: str) -> bool:
+ """
+ Compare two repo URLs for identity, ignoring incidental differences.
+ """
+ return cls.normalize_url(url1) == cls.normalize_url(url2)
+
+ def fetch_new(
+ self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int
+ ) -> None:
+ """
+ Fetch a revision from a repository, in the case that this is the
+ first fetch from the repository.
+
+ Args:
+ dest: the directory to fetch the repository to.
+ rev_options: a RevOptions object.
+ verbosity: verbosity level.
+ """
+ raise NotImplementedError
+
+ def switch(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
+ """
+ Switch the repo at ``dest`` to point to ``URL``.
+
+ Args:
+ rev_options: a RevOptions object.
+ """
+ raise NotImplementedError
+
+ def update(self, dest: str, url: HiddenText, rev_options: RevOptions) -> None:
+ """
+ Update an already-existing repo to the given ``rev_options``.
+
+ Args:
+ rev_options: a RevOptions object.
+ """
+ raise NotImplementedError
+
+ @classmethod
+ def is_commit_id_equal(cls, dest: str, name: Optional[str]) -> bool:
+ """
+ Return whether the id of the current commit equals the given name.
+
+ Args:
+ dest: the repository directory.
+ name: a string name.
+ """
+ raise NotImplementedError
+
+ def obtain(self, dest: str, url: HiddenText, verbosity: int) -> None:
+ """
+ Install or update in editable mode the package represented by this
+ VersionControl object.
+
+ :param dest: the repository directory in which to install or update.
+ :param url: the repository URL starting with a vcs prefix.
+ :param verbosity: verbosity level.
+ """
+ url, rev_options = self.get_url_rev_options(url)
+
+ if not os.path.exists(dest):
+ self.fetch_new(dest, url, rev_options, verbosity=verbosity)
+ return
+
+ rev_display = rev_options.to_display()
+ if self.is_repository_directory(dest):
+ existing_url = self.get_remote_url(dest)
+ if self.compare_urls(existing_url, url.secret):
+ logger.debug(
+ "%s in %s exists, and has correct URL (%s)",
+ self.repo_name.title(),
+ display_path(dest),
+ url,
+ )
+ if not self.is_commit_id_equal(dest, rev_options.rev):
+ logger.info(
+ "Updating %s %s%s",
+ display_path(dest),
+ self.repo_name,
+ rev_display,
+ )
+ self.update(dest, url, rev_options)
+ else:
+ logger.info("Skipping because already up-to-date.")
+ return
+
+ logger.warning(
+ "%s %s in %s exists with URL %s",
+ self.name,
+ self.repo_name,
+ display_path(dest),
+ existing_url,
+ )
+ prompt = ("(s)witch, (i)gnore, (w)ipe, (b)ackup ", ("s", "i", "w", "b"))
+ else:
+ logger.warning(
+ "Directory %s already exists, and is not a %s %s.",
+ dest,
+ self.name,
+ self.repo_name,
+ )
+ # https://github.com/python/mypy/issues/1174
+ prompt = ("(i)gnore, (w)ipe, (b)ackup ", ("i", "w", "b")) # type: ignore
+
+ logger.warning(
+ "The plan is to install the %s repository %s",
+ self.name,
+ url,
+ )
+ response = ask_path_exists("What to do? {}".format(prompt[0]), prompt[1])
+
+ if response == "a":
+ sys.exit(-1)
+
+ if response == "w":
+ logger.warning("Deleting %s", display_path(dest))
+ rmtree(dest)
+ self.fetch_new(dest, url, rev_options, verbosity=verbosity)
+ return
+
+ if response == "b":
+ dest_dir = backup_dir(dest)
+ logger.warning("Backing up %s to %s", display_path(dest), dest_dir)
+ shutil.move(dest, dest_dir)
+ self.fetch_new(dest, url, rev_options, verbosity=verbosity)
+ return
+
+ # Do nothing if the response is "i".
+ if response == "s":
+ logger.info(
+ "Switching %s %s to %s%s",
+ self.repo_name,
+ display_path(dest),
+ url,
+ rev_display,
+ )
+ self.switch(dest, url, rev_options)
+
+ def unpack(self, location: str, url: HiddenText, verbosity: int) -> None:
+ """
+ Clean up current location and download the url repository
+ (and vcs infos) into location
+
+ :param url: the repository URL starting with a vcs prefix.
+ :param verbosity: verbosity level.
+ """
+ if os.path.exists(location):
+ rmtree(location)
+ self.obtain(location, url=url, verbosity=verbosity)
+
+ @classmethod
+ def get_remote_url(cls, location: str) -> str:
+ """
+ Return the url used at location
+
+ Raises RemoteNotFoundError if the repository does not have a remote
+ url configured.
+ """
+ raise NotImplementedError
+
+ @classmethod
+ def get_revision(cls, location: str) -> str:
+ """
+ Return the current commit id of the files at the given location.
+ """
+ raise NotImplementedError
+
+ @classmethod
+ def run_command(
+ cls,
+ cmd: Union[List[str], CommandArgs],
+ show_stdout: bool = True,
+ cwd: Optional[str] = None,
+ on_returncode: 'Literal["raise", "warn", "ignore"]' = "raise",
+ extra_ok_returncodes: Optional[Iterable[int]] = None,
+ command_desc: Optional[str] = None,
+ extra_environ: Optional[Mapping[str, Any]] = None,
+ spinner: Optional[SpinnerInterface] = None,
+ log_failed_cmd: bool = True,
+ stdout_only: bool = False,
+ ) -> str:
+ """
+ Run a VCS subcommand
+ This is simply a wrapper around call_subprocess that adds the VCS
+ command name, and checks that the VCS is available
+ """
+ cmd = make_command(cls.name, *cmd)
+ if command_desc is None:
+ command_desc = format_command_args(cmd)
+ try:
+ return call_subprocess(
+ cmd,
+ show_stdout,
+ cwd,
+ on_returncode=on_returncode,
+ extra_ok_returncodes=extra_ok_returncodes,
+ command_desc=command_desc,
+ extra_environ=extra_environ,
+ unset_environ=cls.unset_environ,
+ spinner=spinner,
+ log_failed_cmd=log_failed_cmd,
+ stdout_only=stdout_only,
+ )
+ except FileNotFoundError:
+ # errno.ENOENT = no such file or directory
+ # In other words, the VCS executable isn't available
+ raise BadCommand(
+ f"Cannot find command {cls.name!r} - do you have "
+ f"{cls.name!r} installed and in your PATH?"
+ )
+ except PermissionError:
+ # errno.EACCES = Permission denied
+ # This error occurs, for instance, when the command is installed
+ # only for another user. So, the current user don't have
+ # permission to call the other user command.
+ raise BadCommand(
+ f"No permission to execute {cls.name!r} - install it "
+ f"locally, globally (ask admin), or check your PATH. "
+ f"See possible solutions at "
+ f"https://pip.pypa.io/en/latest/reference/pip_freeze/"
+ f"#fixing-permission-denied."
+ )
+
+ @classmethod
+ def is_repository_directory(cls, path: str) -> bool:
+ """
+ Return whether a directory path is a repository directory.
+ """
+ logger.debug("Checking in %s for %s (%s)...", path, cls.dirname, cls.name)
+ return os.path.exists(os.path.join(path, cls.dirname))
+
+ @classmethod
+ def get_repository_root(cls, location: str) -> Optional[str]:
+ """
+ Return the "root" (top-level) directory controlled by the vcs,
+ or `None` if the directory is not in any.
+
+ It is meant to be overridden to implement smarter detection
+ mechanisms for specific vcs.
+
+ This can do more than is_repository_directory() alone. For
+ example, the Git override checks that Git is actually available.
+ """
+ if cls.is_repository_directory(location):
+ return location
+ return None
diff --git a/third_party/python/pip/pip/_internal/wheel_builder.py b/third_party/python/pip/pip/_internal/wheel_builder.py
new file mode 100644
index 0000000000..15b30af58e
--- /dev/null
+++ b/third_party/python/pip/pip/_internal/wheel_builder.py
@@ -0,0 +1,382 @@
+"""Orchestrator for building wheels from InstallRequirements.
+"""
+
+import logging
+import os.path
+import re
+import shutil
+from typing import Callable, Iterable, List, Optional, Tuple
+
+from pip._vendor.packaging.utils import canonicalize_name, canonicalize_version
+from pip._vendor.packaging.version import InvalidVersion, Version
+
+from pip._internal.cache import WheelCache
+from pip._internal.exceptions import InvalidWheelFilename, UnsupportedWheel
+from pip._internal.metadata import FilesystemWheel, get_wheel_distribution
+from pip._internal.models.link import Link
+from pip._internal.models.wheel import Wheel
+from pip._internal.operations.build.wheel import build_wheel_pep517
+from pip._internal.operations.build.wheel_editable import build_wheel_editable
+from pip._internal.operations.build.wheel_legacy import build_wheel_legacy
+from pip._internal.req.req_install import InstallRequirement
+from pip._internal.utils.deprecation import (
+ LegacyInstallReasonMissingWheelPackage,
+ LegacyInstallReasonNoBinaryForcesSetuptoolsInstall,
+)
+from pip._internal.utils.logging import indent_log
+from pip._internal.utils.misc import ensure_dir, hash_file, is_wheel_installed
+from pip._internal.utils.setuptools_build import make_setuptools_clean_args
+from pip._internal.utils.subprocess import call_subprocess
+from pip._internal.utils.temp_dir import TempDirectory
+from pip._internal.utils.urls import path_to_url
+from pip._internal.vcs import vcs
+
+logger = logging.getLogger(__name__)
+
+_egg_info_re = re.compile(r"([a-z0-9_.]+)-([a-z0-9_.!+-]+)", re.IGNORECASE)
+
+BdistWheelAllowedPredicate = Callable[[InstallRequirement], bool]
+BuildResult = Tuple[List[InstallRequirement], List[InstallRequirement]]
+
+
+def _contains_egg_info(s: str) -> bool:
+ """Determine whether the string looks like an egg_info.
+
+ :param s: The string to parse. E.g. foo-2.1
+ """
+ return bool(_egg_info_re.search(s))
+
+
+def _should_build(
+ req: InstallRequirement,
+ need_wheel: bool,
+ check_bdist_wheel: Optional[BdistWheelAllowedPredicate] = None,
+) -> bool:
+ """Return whether an InstallRequirement should be built into a wheel."""
+ if req.constraint:
+ # never build requirements that are merely constraints
+ return False
+ if req.is_wheel:
+ if need_wheel:
+ logger.info(
+ "Skipping %s, due to already being wheel.",
+ req.name,
+ )
+ return False
+
+ if need_wheel:
+ # i.e. pip wheel, not pip install
+ return True
+
+ # From this point, this concerns the pip install command only
+ # (need_wheel=False).
+
+ if not req.source_dir:
+ return False
+
+ if req.editable:
+ # we only build PEP 660 editable requirements
+ return req.supports_pyproject_editable()
+
+ if req.use_pep517:
+ return True
+
+ assert check_bdist_wheel is not None
+ if not check_bdist_wheel(req):
+ # /!\ When we change this to unconditionally return True, we must also remove
+ # support for `--install-option`. Indeed, `--install-option` implies
+ # `--no-binary` so we can return False here and run `setup.py install`.
+ # `--global-option` and `--build-option` can remain until we drop support for
+ # building with `setup.py bdist_wheel`.
+ req.legacy_install_reason = LegacyInstallReasonNoBinaryForcesSetuptoolsInstall
+ return False
+
+ if not is_wheel_installed():
+ # we don't build legacy requirements if wheel is not installed
+ req.legacy_install_reason = LegacyInstallReasonMissingWheelPackage
+ return False
+
+ return True
+
+
+def should_build_for_wheel_command(
+ req: InstallRequirement,
+) -> bool:
+ return _should_build(req, need_wheel=True)
+
+
+def should_build_for_install_command(
+ req: InstallRequirement,
+ check_bdist_wheel_allowed: BdistWheelAllowedPredicate,
+) -> bool:
+ return _should_build(
+ req, need_wheel=False, check_bdist_wheel=check_bdist_wheel_allowed
+ )
+
+
+def _should_cache(
+ req: InstallRequirement,
+) -> Optional[bool]:
+ """
+ Return whether a built InstallRequirement can be stored in the persistent
+ wheel cache, assuming the wheel cache is available, and _should_build()
+ has determined a wheel needs to be built.
+ """
+ if req.editable or not req.source_dir:
+ # never cache editable requirements
+ return False
+
+ if req.link and req.link.is_vcs:
+ # VCS checkout. Do not cache
+ # unless it points to an immutable commit hash.
+ assert not req.editable
+ assert req.source_dir
+ vcs_backend = vcs.get_backend_for_scheme(req.link.scheme)
+ assert vcs_backend
+ if vcs_backend.is_immutable_rev_checkout(req.link.url, req.source_dir):
+ return True
+ return False
+
+ assert req.link
+ base, ext = req.link.splitext()
+ if _contains_egg_info(base):
+ return True
+
+ # Otherwise, do not cache.
+ return False
+
+
+def _get_cache_dir(
+ req: InstallRequirement,
+ wheel_cache: WheelCache,
+) -> str:
+ """Return the persistent or temporary cache directory where the built
+ wheel need to be stored.
+ """
+ cache_available = bool(wheel_cache.cache_dir)
+ assert req.link
+ if cache_available and _should_cache(req):
+ cache_dir = wheel_cache.get_path_for_link(req.link)
+ else:
+ cache_dir = wheel_cache.get_ephem_path_for_link(req.link)
+ return cache_dir
+
+
+def _verify_one(req: InstallRequirement, wheel_path: str) -> None:
+ canonical_name = canonicalize_name(req.name or "")
+ w = Wheel(os.path.basename(wheel_path))
+ if canonicalize_name(w.name) != canonical_name:
+ raise InvalidWheelFilename(
+ "Wheel has unexpected file name: expected {!r}, "
+ "got {!r}".format(canonical_name, w.name),
+ )
+ dist = get_wheel_distribution(FilesystemWheel(wheel_path), canonical_name)
+ dist_verstr = str(dist.version)
+ if canonicalize_version(dist_verstr) != canonicalize_version(w.version):
+ raise InvalidWheelFilename(
+ "Wheel has unexpected file name: expected {!r}, "
+ "got {!r}".format(dist_verstr, w.version),
+ )
+ metadata_version_value = dist.metadata_version
+ if metadata_version_value is None:
+ raise UnsupportedWheel("Missing Metadata-Version")
+ try:
+ metadata_version = Version(metadata_version_value)
+ except InvalidVersion:
+ msg = f"Invalid Metadata-Version: {metadata_version_value}"
+ raise UnsupportedWheel(msg)
+ if metadata_version >= Version("1.2") and not isinstance(dist.version, Version):
+ raise UnsupportedWheel(
+ "Metadata 1.2 mandates PEP 440 version, "
+ "but {!r} is not".format(dist_verstr)
+ )
+
+
+def _build_one(
+ req: InstallRequirement,
+ output_dir: str,
+ verify: bool,
+ build_options: List[str],
+ global_options: List[str],
+ editable: bool,
+) -> Optional[str]:
+ """Build one wheel.
+
+ :return: The filename of the built wheel, or None if the build failed.
+ """
+ artifact = "editable" if editable else "wheel"
+ try:
+ ensure_dir(output_dir)
+ except OSError as e:
+ logger.warning(
+ "Building %s for %s failed: %s",
+ artifact,
+ req.name,
+ e,
+ )
+ return None
+
+ # Install build deps into temporary directory (PEP 518)
+ with req.build_env:
+ wheel_path = _build_one_inside_env(
+ req, output_dir, build_options, global_options, editable
+ )
+ if wheel_path and verify:
+ try:
+ _verify_one(req, wheel_path)
+ except (InvalidWheelFilename, UnsupportedWheel) as e:
+ logger.warning("Built %s for %s is invalid: %s", artifact, req.name, e)
+ return None
+ return wheel_path
+
+
+def _build_one_inside_env(
+ req: InstallRequirement,
+ output_dir: str,
+ build_options: List[str],
+ global_options: List[str],
+ editable: bool,
+) -> Optional[str]:
+ with TempDirectory(kind="wheel") as temp_dir:
+ assert req.name
+ if req.use_pep517:
+ assert req.metadata_directory
+ assert req.pep517_backend
+ if global_options:
+ logger.warning(
+ "Ignoring --global-option when building %s using PEP 517", req.name
+ )
+ if build_options:
+ logger.warning(
+ "Ignoring --build-option when building %s using PEP 517", req.name
+ )
+ if editable:
+ wheel_path = build_wheel_editable(
+ name=req.name,
+ backend=req.pep517_backend,
+ metadata_directory=req.metadata_directory,
+ tempd=temp_dir.path,
+ )
+ else:
+ wheel_path = build_wheel_pep517(
+ name=req.name,
+ backend=req.pep517_backend,
+ metadata_directory=req.metadata_directory,
+ tempd=temp_dir.path,
+ )
+ else:
+ wheel_path = build_wheel_legacy(
+ name=req.name,
+ setup_py_path=req.setup_py_path,
+ source_dir=req.unpacked_source_directory,
+ global_options=global_options,
+ build_options=build_options,
+ tempd=temp_dir.path,
+ )
+
+ if wheel_path is not None:
+ wheel_name = os.path.basename(wheel_path)
+ dest_path = os.path.join(output_dir, wheel_name)
+ try:
+ wheel_hash, length = hash_file(wheel_path)
+ shutil.move(wheel_path, dest_path)
+ logger.info(
+ "Created wheel for %s: filename=%s size=%d sha256=%s",
+ req.name,
+ wheel_name,
+ length,
+ wheel_hash.hexdigest(),
+ )
+ logger.info("Stored in directory: %s", output_dir)
+ return dest_path
+ except Exception as e:
+ logger.warning(
+ "Building wheel for %s failed: %s",
+ req.name,
+ e,
+ )
+ # Ignore return, we can't do anything else useful.
+ if not req.use_pep517:
+ _clean_one_legacy(req, global_options)
+ return None
+
+
+def _clean_one_legacy(req: InstallRequirement, global_options: List[str]) -> bool:
+ clean_args = make_setuptools_clean_args(
+ req.setup_py_path,
+ global_options=global_options,
+ )
+
+ logger.info("Running setup.py clean for %s", req.name)
+ try:
+ call_subprocess(
+ clean_args, command_desc="python setup.py clean", cwd=req.source_dir
+ )
+ return True
+ except Exception:
+ logger.error("Failed cleaning build dir for %s", req.name)
+ return False
+
+
+def build(
+ requirements: Iterable[InstallRequirement],
+ wheel_cache: WheelCache,
+ verify: bool,
+ build_options: List[str],
+ global_options: List[str],
+) -> BuildResult:
+ """Build wheels.
+
+ :return: The list of InstallRequirement that succeeded to build and
+ the list of InstallRequirement that failed to build.
+ """
+ if not requirements:
+ return [], []
+
+ # Build the wheels.
+ logger.info(
+ "Building wheels for collected packages: %s",
+ ", ".join(req.name for req in requirements), # type: ignore
+ )
+
+ with indent_log():
+ build_successes, build_failures = [], []
+ for req in requirements:
+ assert req.name
+ cache_dir = _get_cache_dir(req, wheel_cache)
+ wheel_file = _build_one(
+ req,
+ cache_dir,
+ verify,
+ build_options,
+ global_options,
+ req.editable and req.permit_editable_wheels,
+ )
+ if wheel_file:
+ # Record the download origin in the cache
+ if req.download_info is not None:
+ # download_info is guaranteed to be set because when we build an
+ # InstallRequirement it has been through the preparer before, but
+ # let's be cautious.
+ wheel_cache.record_download_origin(cache_dir, req.download_info)
+ # Update the link for this.
+ req.link = Link(path_to_url(wheel_file))
+ req.local_file_path = req.link.file_path
+ assert req.link.is_wheel
+ build_successes.append(req)
+ else:
+ build_failures.append(req)
+
+ # notify success/failure
+ if build_successes:
+ logger.info(
+ "Successfully built %s",
+ " ".join([req.name for req in build_successes]), # type: ignore
+ )
+ if build_failures:
+ logger.info(
+ "Failed to build %s",
+ " ".join([req.name for req in build_failures]), # type: ignore
+ )
+ # Return a list of requirements that failed to build
+ return build_successes, build_failures
diff --git a/third_party/python/pip/pip/_vendor/__init__.py b/third_party/python/pip/pip/_vendor/__init__.py
new file mode 100644
index 0000000000..b22f7abb93
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/__init__.py
@@ -0,0 +1,120 @@
+"""
+pip._vendor is for vendoring dependencies of pip to prevent needing pip to
+depend on something external.
+
+Files inside of pip._vendor should be considered immutable and should only be
+updated to versions from upstream.
+"""
+from __future__ import absolute_import
+
+import glob
+import os.path
+import sys
+
+# Downstream redistributors which have debundled our dependencies should also
+# patch this value to be true. This will trigger the additional patching
+# to cause things like "six" to be available as pip.
+DEBUNDLED = False
+
+# By default, look in this directory for a bunch of .whl files which we will
+# add to the beginning of sys.path before attempting to import anything. This
+# is done to support downstream re-distributors like Debian and Fedora who
+# wish to create their own Wheels for our dependencies to aid in debundling.
+WHEEL_DIR = os.path.abspath(os.path.dirname(__file__))
+
+
+# Define a small helper function to alias our vendored modules to the real ones
+# if the vendored ones do not exist. This idea of this was taken from
+# https://github.com/kennethreitz/requests/pull/2567.
+def vendored(modulename):
+ vendored_name = "{0}.{1}".format(__name__, modulename)
+
+ try:
+ __import__(modulename, globals(), locals(), level=0)
+ except ImportError:
+ # We can just silently allow import failures to pass here. If we
+ # got to this point it means that ``import pip._vendor.whatever``
+ # failed and so did ``import whatever``. Since we're importing this
+ # upfront in an attempt to alias imports, not erroring here will
+ # just mean we get a regular import error whenever pip *actually*
+ # tries to import one of these modules to use it, which actually
+ # gives us a better error message than we would have otherwise
+ # gotten.
+ pass
+ else:
+ sys.modules[vendored_name] = sys.modules[modulename]
+ base, head = vendored_name.rsplit(".", 1)
+ setattr(sys.modules[base], head, sys.modules[modulename])
+
+
+# If we're operating in a debundled setup, then we want to go ahead and trigger
+# the aliasing of our vendored libraries as well as looking for wheels to add
+# to our sys.path. This will cause all of this code to be a no-op typically
+# however downstream redistributors can enable it in a consistent way across
+# all platforms.
+if DEBUNDLED:
+ # Actually look inside of WHEEL_DIR to find .whl files and add them to the
+ # front of our sys.path.
+ sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path
+
+ # Actually alias all of our vendored dependencies.
+ vendored("cachecontrol")
+ vendored("certifi")
+ vendored("colorama")
+ vendored("distlib")
+ vendored("distro")
+ vendored("six")
+ vendored("six.moves")
+ vendored("six.moves.urllib")
+ vendored("six.moves.urllib.parse")
+ vendored("packaging")
+ vendored("packaging.version")
+ vendored("packaging.specifiers")
+ vendored("pep517")
+ vendored("pkg_resources")
+ vendored("platformdirs")
+ vendored("progress")
+ vendored("requests")
+ vendored("requests.exceptions")
+ vendored("requests.packages")
+ vendored("requests.packages.urllib3")
+ vendored("requests.packages.urllib3._collections")
+ vendored("requests.packages.urllib3.connection")
+ vendored("requests.packages.urllib3.connectionpool")
+ vendored("requests.packages.urllib3.contrib")
+ vendored("requests.packages.urllib3.contrib.ntlmpool")
+ vendored("requests.packages.urllib3.contrib.pyopenssl")
+ vendored("requests.packages.urllib3.exceptions")
+ vendored("requests.packages.urllib3.fields")
+ vendored("requests.packages.urllib3.filepost")
+ vendored("requests.packages.urllib3.packages")
+ vendored("requests.packages.urllib3.packages.ordered_dict")
+ vendored("requests.packages.urllib3.packages.six")
+ vendored("requests.packages.urllib3.packages.ssl_match_hostname")
+ vendored("requests.packages.urllib3.packages.ssl_match_hostname."
+ "_implementation")
+ vendored("requests.packages.urllib3.poolmanager")
+ vendored("requests.packages.urllib3.request")
+ vendored("requests.packages.urllib3.response")
+ vendored("requests.packages.urllib3.util")
+ vendored("requests.packages.urllib3.util.connection")
+ vendored("requests.packages.urllib3.util.request")
+ vendored("requests.packages.urllib3.util.response")
+ vendored("requests.packages.urllib3.util.retry")
+ vendored("requests.packages.urllib3.util.ssl_")
+ vendored("requests.packages.urllib3.util.timeout")
+ vendored("requests.packages.urllib3.util.url")
+ vendored("resolvelib")
+ vendored("rich")
+ vendored("rich.console")
+ vendored("rich.highlighter")
+ vendored("rich.logging")
+ vendored("rich.markup")
+ vendored("rich.progress")
+ vendored("rich.segment")
+ vendored("rich.style")
+ vendored("rich.text")
+ vendored("rich.traceback")
+ vendored("tenacity")
+ vendored("tomli")
+ vendored("urllib3")
diff --git a/third_party/python/pip/pip/_vendor/cachecontrol/__init__.py b/third_party/python/pip/pip/_vendor/cachecontrol/__init__.py
new file mode 100644
index 0000000000..f631ae6df4
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/cachecontrol/__init__.py
@@ -0,0 +1,18 @@
+# SPDX-FileCopyrightText: 2015 Eric Larson
+#
+# SPDX-License-Identifier: Apache-2.0
+
+"""CacheControl import Interface.
+
+Make it easy to import from cachecontrol without long namespaces.
+"""
+__author__ = "Eric Larson"
+__email__ = "eric@ionrock.org"
+__version__ = "0.12.11"
+
+from .wrapper import CacheControl
+from .adapter import CacheControlAdapter
+from .controller import CacheController
+
+import logging
+logging.getLogger(__name__).addHandler(logging.NullHandler())
diff --git a/third_party/python/pip/pip/_vendor/cachecontrol/_cmd.py b/third_party/python/pip/pip/_vendor/cachecontrol/_cmd.py
new file mode 100644
index 0000000000..4266b5ee92
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/cachecontrol/_cmd.py
@@ -0,0 +1,61 @@
+# SPDX-FileCopyrightText: 2015 Eric Larson
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import logging
+
+from pip._vendor import requests
+
+from pip._vendor.cachecontrol.adapter import CacheControlAdapter
+from pip._vendor.cachecontrol.cache import DictCache
+from pip._vendor.cachecontrol.controller import logger
+
+from argparse import ArgumentParser
+
+
+def setup_logging():
+ logger.setLevel(logging.DEBUG)
+ handler = logging.StreamHandler()
+ logger.addHandler(handler)
+
+
+def get_session():
+ adapter = CacheControlAdapter(
+ DictCache(), cache_etags=True, serializer=None, heuristic=None
+ )
+ sess = requests.Session()
+ sess.mount("http://", adapter)
+ sess.mount("https://", adapter)
+
+ sess.cache_controller = adapter.controller
+ return sess
+
+
+def get_args():
+ parser = ArgumentParser()
+ parser.add_argument("url", help="The URL to try and cache")
+ return parser.parse_args()
+
+
+def main(args=None):
+ args = get_args()
+ sess = get_session()
+
+ # Make a request to get a response
+ resp = sess.get(args.url)
+
+ # Turn on logging
+ setup_logging()
+
+ # try setting the cache
+ sess.cache_controller.cache_response(resp.request, resp.raw)
+
+ # Now try to get it
+ if sess.cache_controller.cached_request(resp.request):
+ print("Cached!")
+ else:
+ print("Not cached :(")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/third_party/python/pip/pip/_vendor/cachecontrol/adapter.py b/third_party/python/pip/pip/_vendor/cachecontrol/adapter.py
new file mode 100644
index 0000000000..94c75e1a05
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/cachecontrol/adapter.py
@@ -0,0 +1,137 @@
+# SPDX-FileCopyrightText: 2015 Eric Larson
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import types
+import functools
+import zlib
+
+from pip._vendor.requests.adapters import HTTPAdapter
+
+from .controller import CacheController, PERMANENT_REDIRECT_STATUSES
+from .cache import DictCache
+from .filewrapper import CallbackFileWrapper
+
+
+class CacheControlAdapter(HTTPAdapter):
+ invalidating_methods = {"PUT", "PATCH", "DELETE"}
+
+ def __init__(
+ self,
+ cache=None,
+ cache_etags=True,
+ controller_class=None,
+ serializer=None,
+ heuristic=None,
+ cacheable_methods=None,
+ *args,
+ **kw
+ ):
+ super(CacheControlAdapter, self).__init__(*args, **kw)
+ self.cache = DictCache() if cache is None else cache
+ self.heuristic = heuristic
+ self.cacheable_methods = cacheable_methods or ("GET",)
+
+ controller_factory = controller_class or CacheController
+ self.controller = controller_factory(
+ self.cache, cache_etags=cache_etags, serializer=serializer
+ )
+
+ def send(self, request, cacheable_methods=None, **kw):
+ """
+ Send a request. Use the request information to see if it
+ exists in the cache and cache the response if we need to and can.
+ """
+ cacheable = cacheable_methods or self.cacheable_methods
+ if request.method in cacheable:
+ try:
+ cached_response = self.controller.cached_request(request)
+ except zlib.error:
+ cached_response = None
+ if cached_response:
+ return self.build_response(request, cached_response, from_cache=True)
+
+ # check for etags and add headers if appropriate
+ request.headers.update(self.controller.conditional_headers(request))
+
+ resp = super(CacheControlAdapter, self).send(request, **kw)
+
+ return resp
+
+ def build_response(
+ self, request, response, from_cache=False, cacheable_methods=None
+ ):
+ """
+ Build a response by making a request or using the cache.
+
+ This will end up calling send and returning a potentially
+ cached response
+ """
+ cacheable = cacheable_methods or self.cacheable_methods
+ if not from_cache and request.method in cacheable:
+ # Check for any heuristics that might update headers
+ # before trying to cache.
+ if self.heuristic:
+ response = self.heuristic.apply(response)
+
+ # apply any expiration heuristics
+ if response.status == 304:
+ # We must have sent an ETag request. This could mean
+ # that we've been expired already or that we simply
+ # have an etag. In either case, we want to try and
+ # update the cache if that is the case.
+ cached_response = self.controller.update_cached_response(
+ request, response
+ )
+
+ if cached_response is not response:
+ from_cache = True
+
+ # We are done with the server response, read a
+ # possible response body (compliant servers will
+ # not return one, but we cannot be 100% sure) and
+ # release the connection back to the pool.
+ response.read(decode_content=False)
+ response.release_conn()
+
+ response = cached_response
+
+ # We always cache the 301 responses
+ elif int(response.status) in PERMANENT_REDIRECT_STATUSES:
+ self.controller.cache_response(request, response)
+ else:
+ # Wrap the response file with a wrapper that will cache the
+ # response when the stream has been consumed.
+ response._fp = CallbackFileWrapper(
+ response._fp,
+ functools.partial(
+ self.controller.cache_response, request, response
+ ),
+ )
+ if response.chunked:
+ super_update_chunk_length = response._update_chunk_length
+
+ def _update_chunk_length(self):
+ super_update_chunk_length()
+ if self.chunk_left == 0:
+ self._fp._close()
+
+ response._update_chunk_length = types.MethodType(
+ _update_chunk_length, response
+ )
+
+ resp = super(CacheControlAdapter, self).build_response(request, response)
+
+ # See if we should invalidate the cache.
+ if request.method in self.invalidating_methods and resp.ok:
+ cache_url = self.controller.cache_url(request.url)
+ self.cache.delete(cache_url)
+
+ # Give the request a from_cache attr to let people use it
+ resp.from_cache = from_cache
+
+ return resp
+
+ def close(self):
+ self.cache.close()
+ super(CacheControlAdapter, self).close()
diff --git a/third_party/python/pip/pip/_vendor/cachecontrol/cache.py b/third_party/python/pip/pip/_vendor/cachecontrol/cache.py
new file mode 100644
index 0000000000..2a965f595f
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/cachecontrol/cache.py
@@ -0,0 +1,65 @@
+# SPDX-FileCopyrightText: 2015 Eric Larson
+#
+# SPDX-License-Identifier: Apache-2.0
+
+"""
+The cache object API for implementing caches. The default is a thread
+safe in-memory dictionary.
+"""
+from threading import Lock
+
+
+class BaseCache(object):
+
+ def get(self, key):
+ raise NotImplementedError()
+
+ def set(self, key, value, expires=None):
+ raise NotImplementedError()
+
+ def delete(self, key):
+ raise NotImplementedError()
+
+ def close(self):
+ pass
+
+
+class DictCache(BaseCache):
+
+ def __init__(self, init_dict=None):
+ self.lock = Lock()
+ self.data = init_dict or {}
+
+ def get(self, key):
+ return self.data.get(key, None)
+
+ def set(self, key, value, expires=None):
+ with self.lock:
+ self.data.update({key: value})
+
+ def delete(self, key):
+ with self.lock:
+ if key in self.data:
+ self.data.pop(key)
+
+
+class SeparateBodyBaseCache(BaseCache):
+ """
+ In this variant, the body is not stored mixed in with the metadata, but is
+ passed in (as a bytes-like object) in a separate call to ``set_body()``.
+
+ That is, the expected interaction pattern is::
+
+ cache.set(key, serialized_metadata)
+ cache.set_body(key)
+
+ Similarly, the body should be loaded separately via ``get_body()``.
+ """
+ def set_body(self, key, body):
+ raise NotImplementedError()
+
+ def get_body(self, key):
+ """
+ Return the body as file-like object.
+ """
+ raise NotImplementedError()
diff --git a/third_party/python/pip/pip/_vendor/cachecontrol/caches/__init__.py b/third_party/python/pip/pip/_vendor/cachecontrol/caches/__init__.py
new file mode 100644
index 0000000000..37827291fb
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/cachecontrol/caches/__init__.py
@@ -0,0 +1,9 @@
+# SPDX-FileCopyrightText: 2015 Eric Larson
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from .file_cache import FileCache, SeparateBodyFileCache
+from .redis_cache import RedisCache
+
+
+__all__ = ["FileCache", "SeparateBodyFileCache", "RedisCache"]
diff --git a/third_party/python/pip/pip/_vendor/cachecontrol/caches/file_cache.py b/third_party/python/pip/pip/_vendor/cachecontrol/caches/file_cache.py
new file mode 100644
index 0000000000..f1ddb2ebdf
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/cachecontrol/caches/file_cache.py
@@ -0,0 +1,188 @@
+# SPDX-FileCopyrightText: 2015 Eric Larson
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import hashlib
+import os
+from textwrap import dedent
+
+from ..cache import BaseCache, SeparateBodyBaseCache
+from ..controller import CacheController
+
+try:
+ FileNotFoundError
+except NameError:
+ # py2.X
+ FileNotFoundError = (IOError, OSError)
+
+
+def _secure_open_write(filename, fmode):
+ # We only want to write to this file, so open it in write only mode
+ flags = os.O_WRONLY
+
+ # os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only
+ # will open *new* files.
+ # We specify this because we want to ensure that the mode we pass is the
+ # mode of the file.
+ flags |= os.O_CREAT | os.O_EXCL
+
+ # Do not follow symlinks to prevent someone from making a symlink that
+ # we follow and insecurely open a cache file.
+ if hasattr(os, "O_NOFOLLOW"):
+ flags |= os.O_NOFOLLOW
+
+ # On Windows we'll mark this file as binary
+ if hasattr(os, "O_BINARY"):
+ flags |= os.O_BINARY
+
+ # Before we open our file, we want to delete any existing file that is
+ # there
+ try:
+ os.remove(filename)
+ except (IOError, OSError):
+ # The file must not exist already, so we can just skip ahead to opening
+ pass
+
+ # Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a
+ # race condition happens between the os.remove and this line, that an
+ # error will be raised. Because we utilize a lockfile this should only
+ # happen if someone is attempting to attack us.
+ fd = os.open(filename, flags, fmode)
+ try:
+ return os.fdopen(fd, "wb")
+
+ except:
+ # An error occurred wrapping our FD in a file object
+ os.close(fd)
+ raise
+
+
+class _FileCacheMixin:
+ """Shared implementation for both FileCache variants."""
+
+ def __init__(
+ self,
+ directory,
+ forever=False,
+ filemode=0o0600,
+ dirmode=0o0700,
+ use_dir_lock=None,
+ lock_class=None,
+ ):
+
+ if use_dir_lock is not None and lock_class is not None:
+ raise ValueError("Cannot use use_dir_lock and lock_class together")
+
+ try:
+ from lockfile import LockFile
+ from lockfile.mkdirlockfile import MkdirLockFile
+ except ImportError:
+ notice = dedent(
+ """
+ NOTE: In order to use the FileCache you must have
+ lockfile installed. You can install it via pip:
+ pip install lockfile
+ """
+ )
+ raise ImportError(notice)
+
+ else:
+ if use_dir_lock:
+ lock_class = MkdirLockFile
+
+ elif lock_class is None:
+ lock_class = LockFile
+
+ self.directory = directory
+ self.forever = forever
+ self.filemode = filemode
+ self.dirmode = dirmode
+ self.lock_class = lock_class
+
+ @staticmethod
+ def encode(x):
+ return hashlib.sha224(x.encode()).hexdigest()
+
+ def _fn(self, name):
+ # NOTE: This method should not change as some may depend on it.
+ # See: https://github.com/ionrock/cachecontrol/issues/63
+ hashed = self.encode(name)
+ parts = list(hashed[:5]) + [hashed]
+ return os.path.join(self.directory, *parts)
+
+ def get(self, key):
+ name = self._fn(key)
+ try:
+ with open(name, "rb") as fh:
+ return fh.read()
+
+ except FileNotFoundError:
+ return None
+
+ def set(self, key, value, expires=None):
+ name = self._fn(key)
+ self._write(name, value)
+
+ def _write(self, path, data: bytes):
+ """
+ Safely write the data to the given path.
+ """
+ # Make sure the directory exists
+ try:
+ os.makedirs(os.path.dirname(path), self.dirmode)
+ except (IOError, OSError):
+ pass
+
+ with self.lock_class(path) as lock:
+ # Write our actual file
+ with _secure_open_write(lock.path, self.filemode) as fh:
+ fh.write(data)
+
+ def _delete(self, key, suffix):
+ name = self._fn(key) + suffix
+ if not self.forever:
+ try:
+ os.remove(name)
+ except FileNotFoundError:
+ pass
+
+
+class FileCache(_FileCacheMixin, BaseCache):
+ """
+ Traditional FileCache: body is stored in memory, so not suitable for large
+ downloads.
+ """
+
+ def delete(self, key):
+ self._delete(key, "")
+
+
+class SeparateBodyFileCache(_FileCacheMixin, SeparateBodyBaseCache):
+ """
+ Memory-efficient FileCache: body is stored in a separate file, reducing
+ peak memory usage.
+ """
+
+ def get_body(self, key):
+ name = self._fn(key) + ".body"
+ try:
+ return open(name, "rb")
+ except FileNotFoundError:
+ return None
+
+ def set_body(self, key, body):
+ name = self._fn(key) + ".body"
+ self._write(name, body)
+
+ def delete(self, key):
+ self._delete(key, "")
+ self._delete(key, ".body")
+
+
+def url_to_file_path(url, filecache):
+ """Return the file cache path based on the URL.
+
+ This does not ensure the file exists!
+ """
+ key = CacheController.cache_url(url)
+ return filecache._fn(key)
diff --git a/third_party/python/pip/pip/_vendor/cachecontrol/caches/redis_cache.py b/third_party/python/pip/pip/_vendor/cachecontrol/caches/redis_cache.py
new file mode 100644
index 0000000000..2cba4b0708
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/cachecontrol/caches/redis_cache.py
@@ -0,0 +1,39 @@
+# SPDX-FileCopyrightText: 2015 Eric Larson
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import division
+
+from datetime import datetime
+from pip._vendor.cachecontrol.cache import BaseCache
+
+
+class RedisCache(BaseCache):
+
+ def __init__(self, conn):
+ self.conn = conn
+
+ def get(self, key):
+ return self.conn.get(key)
+
+ def set(self, key, value, expires=None):
+ if not expires:
+ self.conn.set(key, value)
+ elif isinstance(expires, datetime):
+ expires = expires - datetime.utcnow()
+ self.conn.setex(key, int(expires.total_seconds()), value)
+ else:
+ self.conn.setex(key, expires, value)
+
+ def delete(self, key):
+ self.conn.delete(key)
+
+ def clear(self):
+ """Helper for clearing all the keys in a database. Use with
+ caution!"""
+ for key in self.conn.keys():
+ self.conn.delete(key)
+
+ def close(self):
+ """Redis uses connection pooling, no need to close the connection."""
+ pass
diff --git a/third_party/python/pip/pip/_vendor/cachecontrol/compat.py b/third_party/python/pip/pip/_vendor/cachecontrol/compat.py
new file mode 100644
index 0000000000..ccec9379db
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/cachecontrol/compat.py
@@ -0,0 +1,32 @@
+# SPDX-FileCopyrightText: 2015 Eric Larson
+#
+# SPDX-License-Identifier: Apache-2.0
+
+try:
+ from urllib.parse import urljoin
+except ImportError:
+ from urlparse import urljoin
+
+
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
+
+# Handle the case where the requests module has been patched to not have
+# urllib3 bundled as part of its source.
+try:
+ from pip._vendor.requests.packages.urllib3.response import HTTPResponse
+except ImportError:
+ from pip._vendor.urllib3.response import HTTPResponse
+
+try:
+ from pip._vendor.requests.packages.urllib3.util import is_fp_closed
+except ImportError:
+ from pip._vendor.urllib3.util import is_fp_closed
+
+# Replicate some six behaviour
+try:
+ text_type = unicode
+except NameError:
+ text_type = str
diff --git a/third_party/python/pip/pip/_vendor/cachecontrol/controller.py b/third_party/python/pip/pip/_vendor/cachecontrol/controller.py
new file mode 100644
index 0000000000..7f23529f11
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/cachecontrol/controller.py
@@ -0,0 +1,439 @@
+# SPDX-FileCopyrightText: 2015 Eric Larson
+#
+# SPDX-License-Identifier: Apache-2.0
+
+"""
+The httplib2 algorithms ported for use with requests.
+"""
+import logging
+import re
+import calendar
+import time
+from email.utils import parsedate_tz
+
+from pip._vendor.requests.structures import CaseInsensitiveDict
+
+from .cache import DictCache, SeparateBodyBaseCache
+from .serialize import Serializer
+
+
+logger = logging.getLogger(__name__)
+
+URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
+
+PERMANENT_REDIRECT_STATUSES = (301, 308)
+
+
+def parse_uri(uri):
+ """Parses a URI using the regex given in Appendix B of RFC 3986.
+
+ (scheme, authority, path, query, fragment) = parse_uri(uri)
+ """
+ groups = URI.match(uri).groups()
+ return (groups[1], groups[3], groups[4], groups[6], groups[8])
+
+
+class CacheController(object):
+ """An interface to see if request should cached or not."""
+
+ def __init__(
+ self, cache=None, cache_etags=True, serializer=None, status_codes=None
+ ):
+ self.cache = DictCache() if cache is None else cache
+ self.cache_etags = cache_etags
+ self.serializer = serializer or Serializer()
+ self.cacheable_status_codes = status_codes or (200, 203, 300, 301, 308)
+
+ @classmethod
+ def _urlnorm(cls, uri):
+ """Normalize the URL to create a safe key for the cache"""
+ (scheme, authority, path, query, fragment) = parse_uri(uri)
+ if not scheme or not authority:
+ raise Exception("Only absolute URIs are allowed. uri = %s" % uri)
+
+ scheme = scheme.lower()
+ authority = authority.lower()
+
+ if not path:
+ path = "/"
+
+ # Could do syntax based normalization of the URI before
+ # computing the digest. See Section 6.2.2 of Std 66.
+ request_uri = query and "?".join([path, query]) or path
+ defrag_uri = scheme + "://" + authority + request_uri
+
+ return defrag_uri
+
+ @classmethod
+ def cache_url(cls, uri):
+ return cls._urlnorm(uri)
+
+ def parse_cache_control(self, headers):
+ known_directives = {
+ # https://tools.ietf.org/html/rfc7234#section-5.2
+ "max-age": (int, True),
+ "max-stale": (int, False),
+ "min-fresh": (int, True),
+ "no-cache": (None, False),
+ "no-store": (None, False),
+ "no-transform": (None, False),
+ "only-if-cached": (None, False),
+ "must-revalidate": (None, False),
+ "public": (None, False),
+ "private": (None, False),
+ "proxy-revalidate": (None, False),
+ "s-maxage": (int, True),
+ }
+
+ cc_headers = headers.get("cache-control", headers.get("Cache-Control", ""))
+
+ retval = {}
+
+ for cc_directive in cc_headers.split(","):
+ if not cc_directive.strip():
+ continue
+
+ parts = cc_directive.split("=", 1)
+ directive = parts[0].strip()
+
+ try:
+ typ, required = known_directives[directive]
+ except KeyError:
+ logger.debug("Ignoring unknown cache-control directive: %s", directive)
+ continue
+
+ if not typ or not required:
+ retval[directive] = None
+ if typ:
+ try:
+ retval[directive] = typ(parts[1].strip())
+ except IndexError:
+ if required:
+ logger.debug(
+ "Missing value for cache-control " "directive: %s",
+ directive,
+ )
+ except ValueError:
+ logger.debug(
+ "Invalid value for cache-control directive " "%s, must be %s",
+ directive,
+ typ.__name__,
+ )
+
+ return retval
+
+ def cached_request(self, request):
+ """
+ Return a cached response if it exists in the cache, otherwise
+ return False.
+ """
+ cache_url = self.cache_url(request.url)
+ logger.debug('Looking up "%s" in the cache', cache_url)
+ cc = self.parse_cache_control(request.headers)
+
+ # Bail out if the request insists on fresh data
+ if "no-cache" in cc:
+ logger.debug('Request header has "no-cache", cache bypassed')
+ return False
+
+ if "max-age" in cc and cc["max-age"] == 0:
+ logger.debug('Request header has "max_age" as 0, cache bypassed')
+ return False
+
+ # Request allows serving from the cache, let's see if we find something
+ cache_data = self.cache.get(cache_url)
+ if cache_data is None:
+ logger.debug("No cache entry available")
+ return False
+
+ if isinstance(self.cache, SeparateBodyBaseCache):
+ body_file = self.cache.get_body(cache_url)
+ else:
+ body_file = None
+
+ # Check whether it can be deserialized
+ resp = self.serializer.loads(request, cache_data, body_file)
+ if not resp:
+ logger.warning("Cache entry deserialization failed, entry ignored")
+ return False
+
+ # If we have a cached permanent redirect, return it immediately. We
+ # don't need to test our response for other headers b/c it is
+ # intrinsically "cacheable" as it is Permanent.
+ #
+ # See:
+ # https://tools.ietf.org/html/rfc7231#section-6.4.2
+ #
+ # Client can try to refresh the value by repeating the request
+ # with cache busting headers as usual (ie no-cache).
+ if int(resp.status) in PERMANENT_REDIRECT_STATUSES:
+ msg = (
+ "Returning cached permanent redirect response "
+ "(ignoring date and etag information)"
+ )
+ logger.debug(msg)
+ return resp
+
+ headers = CaseInsensitiveDict(resp.headers)
+ if not headers or "date" not in headers:
+ if "etag" not in headers:
+ # Without date or etag, the cached response can never be used
+ # and should be deleted.
+ logger.debug("Purging cached response: no date or etag")
+ self.cache.delete(cache_url)
+ logger.debug("Ignoring cached response: no date")
+ return False
+
+ now = time.time()
+ date = calendar.timegm(parsedate_tz(headers["date"]))
+ current_age = max(0, now - date)
+ logger.debug("Current age based on date: %i", current_age)
+
+ # TODO: There is an assumption that the result will be a
+ # urllib3 response object. This may not be best since we
+ # could probably avoid instantiating or constructing the
+ # response until we know we need it.
+ resp_cc = self.parse_cache_control(headers)
+
+ # determine freshness
+ freshness_lifetime = 0
+
+ # Check the max-age pragma in the cache control header
+ if "max-age" in resp_cc:
+ freshness_lifetime = resp_cc["max-age"]
+ logger.debug("Freshness lifetime from max-age: %i", freshness_lifetime)
+
+ # If there isn't a max-age, check for an expires header
+ elif "expires" in headers:
+ expires = parsedate_tz(headers["expires"])
+ if expires is not None:
+ expire_time = calendar.timegm(expires) - date
+ freshness_lifetime = max(0, expire_time)
+ logger.debug("Freshness lifetime from expires: %i", freshness_lifetime)
+
+ # Determine if we are setting freshness limit in the
+ # request. Note, this overrides what was in the response.
+ if "max-age" in cc:
+ freshness_lifetime = cc["max-age"]
+ logger.debug(
+ "Freshness lifetime from request max-age: %i", freshness_lifetime
+ )
+
+ if "min-fresh" in cc:
+ min_fresh = cc["min-fresh"]
+ # adjust our current age by our min fresh
+ current_age += min_fresh
+ logger.debug("Adjusted current age from min-fresh: %i", current_age)
+
+ # Return entry if it is fresh enough
+ if freshness_lifetime > current_age:
+ logger.debug('The response is "fresh", returning cached response')
+ logger.debug("%i > %i", freshness_lifetime, current_age)
+ return resp
+
+ # we're not fresh. If we don't have an Etag, clear it out
+ if "etag" not in headers:
+ logger.debug('The cached response is "stale" with no etag, purging')
+ self.cache.delete(cache_url)
+
+ # return the original handler
+ return False
+
+ def conditional_headers(self, request):
+ cache_url = self.cache_url(request.url)
+ resp = self.serializer.loads(request, self.cache.get(cache_url))
+ new_headers = {}
+
+ if resp:
+ headers = CaseInsensitiveDict(resp.headers)
+
+ if "etag" in headers:
+ new_headers["If-None-Match"] = headers["ETag"]
+
+ if "last-modified" in headers:
+ new_headers["If-Modified-Since"] = headers["Last-Modified"]
+
+ return new_headers
+
+ def _cache_set(self, cache_url, request, response, body=None, expires_time=None):
+ """
+ Store the data in the cache.
+ """
+ if isinstance(self.cache, SeparateBodyBaseCache):
+ # We pass in the body separately; just put a placeholder empty
+ # string in the metadata.
+ self.cache.set(
+ cache_url,
+ self.serializer.dumps(request, response, b""),
+ expires=expires_time,
+ )
+ self.cache.set_body(cache_url, body)
+ else:
+ self.cache.set(
+ cache_url,
+ self.serializer.dumps(request, response, body),
+ expires=expires_time,
+ )
+
+ def cache_response(self, request, response, body=None, status_codes=None):
+ """
+ Algorithm for caching requests.
+
+ This assumes a requests Response object.
+ """
+ # From httplib2: Don't cache 206's since we aren't going to
+ # handle byte range requests
+ cacheable_status_codes = status_codes or self.cacheable_status_codes
+ if response.status not in cacheable_status_codes:
+ logger.debug(
+ "Status code %s not in %s", response.status, cacheable_status_codes
+ )
+ return
+
+ response_headers = CaseInsensitiveDict(response.headers)
+
+ if "date" in response_headers:
+ date = calendar.timegm(parsedate_tz(response_headers["date"]))
+ else:
+ date = 0
+
+ # If we've been given a body, our response has a Content-Length, that
+ # Content-Length is valid then we can check to see if the body we've
+ # been given matches the expected size, and if it doesn't we'll just
+ # skip trying to cache it.
+ if (
+ body is not None
+ and "content-length" in response_headers
+ and response_headers["content-length"].isdigit()
+ and int(response_headers["content-length"]) != len(body)
+ ):
+ return
+
+ cc_req = self.parse_cache_control(request.headers)
+ cc = self.parse_cache_control(response_headers)
+
+ cache_url = self.cache_url(request.url)
+ logger.debug('Updating cache with response from "%s"', cache_url)
+
+ # Delete it from the cache if we happen to have it stored there
+ no_store = False
+ if "no-store" in cc:
+ no_store = True
+ logger.debug('Response header has "no-store"')
+ if "no-store" in cc_req:
+ no_store = True
+ logger.debug('Request header has "no-store"')
+ if no_store and self.cache.get(cache_url):
+ logger.debug('Purging existing cache entry to honor "no-store"')
+ self.cache.delete(cache_url)
+ if no_store:
+ return
+
+ # https://tools.ietf.org/html/rfc7234#section-4.1:
+ # A Vary header field-value of "*" always fails to match.
+ # Storing such a response leads to a deserialization warning
+ # during cache lookup and is not allowed to ever be served,
+ # so storing it can be avoided.
+ if "*" in response_headers.get("vary", ""):
+ logger.debug('Response header has "Vary: *"')
+ return
+
+ # If we've been given an etag, then keep the response
+ if self.cache_etags and "etag" in response_headers:
+ expires_time = 0
+ if response_headers.get("expires"):
+ expires = parsedate_tz(response_headers["expires"])
+ if expires is not None:
+ expires_time = calendar.timegm(expires) - date
+
+ expires_time = max(expires_time, 14 * 86400)
+
+ logger.debug("etag object cached for {0} seconds".format(expires_time))
+ logger.debug("Caching due to etag")
+ self._cache_set(cache_url, request, response, body, expires_time)
+
+ # Add to the cache any permanent redirects. We do this before looking
+ # that the Date headers.
+ elif int(response.status) in PERMANENT_REDIRECT_STATUSES:
+ logger.debug("Caching permanent redirect")
+ self._cache_set(cache_url, request, response, b"")
+
+ # Add to the cache if the response headers demand it. If there
+ # is no date header then we can't do anything about expiring
+ # the cache.
+ elif "date" in response_headers:
+ date = calendar.timegm(parsedate_tz(response_headers["date"]))
+ # cache when there is a max-age > 0
+ if "max-age" in cc and cc["max-age"] > 0:
+ logger.debug("Caching b/c date exists and max-age > 0")
+ expires_time = cc["max-age"]
+ self._cache_set(
+ cache_url,
+ request,
+ response,
+ body,
+ expires_time,
+ )
+
+ # If the request can expire, it means we should cache it
+ # in the meantime.
+ elif "expires" in response_headers:
+ if response_headers["expires"]:
+ expires = parsedate_tz(response_headers["expires"])
+ if expires is not None:
+ expires_time = calendar.timegm(expires) - date
+ else:
+ expires_time = None
+
+ logger.debug(
+ "Caching b/c of expires header. expires in {0} seconds".format(
+ expires_time
+ )
+ )
+ self._cache_set(
+ cache_url,
+ request,
+ response,
+ body,
+ expires_time,
+ )
+
+ def update_cached_response(self, request, response):
+ """On a 304 we will get a new set of headers that we want to
+ update our cached value with, assuming we have one.
+
+ This should only ever be called when we've sent an ETag and
+ gotten a 304 as the response.
+ """
+ cache_url = self.cache_url(request.url)
+
+ cached_response = self.serializer.loads(request, self.cache.get(cache_url))
+
+ if not cached_response:
+ # we didn't have a cached response
+ return response
+
+ # Lets update our headers with the headers from the new request:
+ # http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
+ #
+ # The server isn't supposed to send headers that would make
+ # the cached body invalid. But... just in case, we'll be sure
+ # to strip out ones we know that might be problmatic due to
+ # typical assumptions.
+ excluded_headers = ["content-length"]
+
+ cached_response.headers.update(
+ dict(
+ (k, v)
+ for k, v in response.headers.items()
+ if k.lower() not in excluded_headers
+ )
+ )
+
+ # we want a 200 b/c we have content via the cache
+ cached_response.status = 200
+
+ # update our cache
+ self._cache_set(cache_url, request, cached_response)
+
+ return cached_response
diff --git a/third_party/python/pip/pip/_vendor/cachecontrol/filewrapper.py b/third_party/python/pip/pip/_vendor/cachecontrol/filewrapper.py
new file mode 100644
index 0000000000..f5ed5f6f6e
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/cachecontrol/filewrapper.py
@@ -0,0 +1,111 @@
+# SPDX-FileCopyrightText: 2015 Eric Larson
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from tempfile import NamedTemporaryFile
+import mmap
+
+
+class CallbackFileWrapper(object):
+ """
+ Small wrapper around a fp object which will tee everything read into a
+ buffer, and when that file is closed it will execute a callback with the
+ contents of that buffer.
+
+ All attributes are proxied to the underlying file object.
+
+ This class uses members with a double underscore (__) leading prefix so as
+ not to accidentally shadow an attribute.
+
+ The data is stored in a temporary file until it is all available. As long
+ as the temporary files directory is disk-based (sometimes it's a
+ memory-backed-``tmpfs`` on Linux), data will be unloaded to disk if memory
+ pressure is high. For small files the disk usually won't be used at all,
+ it'll all be in the filesystem memory cache, so there should be no
+ performance impact.
+ """
+
+ def __init__(self, fp, callback):
+ self.__buf = NamedTemporaryFile("rb+", delete=True)
+ self.__fp = fp
+ self.__callback = callback
+
+ def __getattr__(self, name):
+ # The vaguaries of garbage collection means that self.__fp is
+ # not always set. By using __getattribute__ and the private
+ # name[0] allows looking up the attribute value and raising an
+ # AttributeError when it doesn't exist. This stop thigns from
+ # infinitely recursing calls to getattr in the case where
+ # self.__fp hasn't been set.
+ #
+ # [0] https://docs.python.org/2/reference/expressions.html#atom-identifiers
+ fp = self.__getattribute__("_CallbackFileWrapper__fp")
+ return getattr(fp, name)
+
+ def __is_fp_closed(self):
+ try:
+ return self.__fp.fp is None
+
+ except AttributeError:
+ pass
+
+ try:
+ return self.__fp.closed
+
+ except AttributeError:
+ pass
+
+ # We just don't cache it then.
+ # TODO: Add some logging here...
+ return False
+
+ def _close(self):
+ if self.__callback:
+ if self.__buf.tell() == 0:
+ # Empty file:
+ result = b""
+ else:
+ # Return the data without actually loading it into memory,
+ # relying on Python's buffer API and mmap(). mmap() just gives
+ # a view directly into the filesystem's memory cache, so it
+ # doesn't result in duplicate memory use.
+ self.__buf.seek(0, 0)
+ result = memoryview(
+ mmap.mmap(self.__buf.fileno(), 0, access=mmap.ACCESS_READ)
+ )
+ self.__callback(result)
+
+ # We assign this to None here, because otherwise we can get into
+ # really tricky problems where the CPython interpreter dead locks
+ # because the callback is holding a reference to something which
+ # has a __del__ method. Setting this to None breaks the cycle
+ # and allows the garbage collector to do it's thing normally.
+ self.__callback = None
+
+ # Closing the temporary file releases memory and frees disk space.
+ # Important when caching big files.
+ self.__buf.close()
+
+ def read(self, amt=None):
+ data = self.__fp.read(amt)
+ if data:
+ # We may be dealing with b'', a sign that things are over:
+ # it's passed e.g. after we've already closed self.__buf.
+ self.__buf.write(data)
+ if self.__is_fp_closed():
+ self._close()
+
+ return data
+
+ def _safe_read(self, amt):
+ data = self.__fp._safe_read(amt)
+ if amt == 2 and data == b"\r\n":
+ # urllib executes this read to toss the CRLF at the end
+ # of the chunk.
+ return data
+
+ self.__buf.write(data)
+ if self.__is_fp_closed():
+ self._close()
+
+ return data
diff --git a/third_party/python/pip/pip/_vendor/cachecontrol/heuristics.py b/third_party/python/pip/pip/_vendor/cachecontrol/heuristics.py
new file mode 100644
index 0000000000..ebe4a96f58
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/cachecontrol/heuristics.py
@@ -0,0 +1,139 @@
+# SPDX-FileCopyrightText: 2015 Eric Larson
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import calendar
+import time
+
+from email.utils import formatdate, parsedate, parsedate_tz
+
+from datetime import datetime, timedelta
+
+TIME_FMT = "%a, %d %b %Y %H:%M:%S GMT"
+
+
+def expire_after(delta, date=None):
+ date = date or datetime.utcnow()
+ return date + delta
+
+
+def datetime_to_header(dt):
+ return formatdate(calendar.timegm(dt.timetuple()))
+
+
+class BaseHeuristic(object):
+
+ def warning(self, response):
+ """
+ Return a valid 1xx warning header value describing the cache
+ adjustments.
+
+ The response is provided too allow warnings like 113
+ http://tools.ietf.org/html/rfc7234#section-5.5.4 where we need
+ to explicitly say response is over 24 hours old.
+ """
+ return '110 - "Response is Stale"'
+
+ def update_headers(self, response):
+ """Update the response headers with any new headers.
+
+ NOTE: This SHOULD always include some Warning header to
+ signify that the response was cached by the client, not
+ by way of the provided headers.
+ """
+ return {}
+
+ def apply(self, response):
+ updated_headers = self.update_headers(response)
+
+ if updated_headers:
+ response.headers.update(updated_headers)
+ warning_header_value = self.warning(response)
+ if warning_header_value is not None:
+ response.headers.update({"Warning": warning_header_value})
+
+ return response
+
+
+class OneDayCache(BaseHeuristic):
+ """
+ Cache the response by providing an expires 1 day in the
+ future.
+ """
+
+ def update_headers(self, response):
+ headers = {}
+
+ if "expires" not in response.headers:
+ date = parsedate(response.headers["date"])
+ expires = expire_after(timedelta(days=1), date=datetime(*date[:6]))
+ headers["expires"] = datetime_to_header(expires)
+ headers["cache-control"] = "public"
+ return headers
+
+
+class ExpiresAfter(BaseHeuristic):
+ """
+ Cache **all** requests for a defined time period.
+ """
+
+ def __init__(self, **kw):
+ self.delta = timedelta(**kw)
+
+ def update_headers(self, response):
+ expires = expire_after(self.delta)
+ return {"expires": datetime_to_header(expires), "cache-control": "public"}
+
+ def warning(self, response):
+ tmpl = "110 - Automatically cached for %s. Response might be stale"
+ return tmpl % self.delta
+
+
+class LastModified(BaseHeuristic):
+ """
+ If there is no Expires header already, fall back on Last-Modified
+ using the heuristic from
+ http://tools.ietf.org/html/rfc7234#section-4.2.2
+ to calculate a reasonable value.
+
+ Firefox also does something like this per
+ https://developer.mozilla.org/en-US/docs/Web/HTTP/Caching_FAQ
+ http://lxr.mozilla.org/mozilla-release/source/netwerk/protocol/http/nsHttpResponseHead.cpp#397
+ Unlike mozilla we limit this to 24-hr.
+ """
+ cacheable_by_default_statuses = {
+ 200, 203, 204, 206, 300, 301, 404, 405, 410, 414, 501
+ }
+
+ def update_headers(self, resp):
+ headers = resp.headers
+
+ if "expires" in headers:
+ return {}
+
+ if "cache-control" in headers and headers["cache-control"] != "public":
+ return {}
+
+ if resp.status not in self.cacheable_by_default_statuses:
+ return {}
+
+ if "date" not in headers or "last-modified" not in headers:
+ return {}
+
+ date = calendar.timegm(parsedate_tz(headers["date"]))
+ last_modified = parsedate(headers["last-modified"])
+ if date is None or last_modified is None:
+ return {}
+
+ now = time.time()
+ current_age = max(0, now - date)
+ delta = date - calendar.timegm(last_modified)
+ freshness_lifetime = max(0, min(delta / 10, 24 * 3600))
+ if freshness_lifetime <= current_age:
+ return {}
+
+ expires = date + freshness_lifetime
+ return {"expires": time.strftime(TIME_FMT, time.gmtime(expires))}
+
+ def warning(self, resp):
+ return None
diff --git a/third_party/python/pip/pip/_vendor/cachecontrol/serialize.py b/third_party/python/pip/pip/_vendor/cachecontrol/serialize.py
new file mode 100644
index 0000000000..7fe1a3e33a
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/cachecontrol/serialize.py
@@ -0,0 +1,190 @@
+# SPDX-FileCopyrightText: 2015 Eric Larson
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import base64
+import io
+import json
+import zlib
+
+from pip._vendor import msgpack
+from pip._vendor.requests.structures import CaseInsensitiveDict
+
+from .compat import HTTPResponse, pickle, text_type
+
+
+def _b64_decode_bytes(b):
+ return base64.b64decode(b.encode("ascii"))
+
+
+def _b64_decode_str(s):
+ return _b64_decode_bytes(s).decode("utf8")
+
+
+_default_body_read = object()
+
+
+class Serializer(object):
+ def dumps(self, request, response, body=None):
+ response_headers = CaseInsensitiveDict(response.headers)
+
+ if body is None:
+ # When a body isn't passed in, we'll read the response. We
+ # also update the response with a new file handler to be
+ # sure it acts as though it was never read.
+ body = response.read(decode_content=False)
+ response._fp = io.BytesIO(body)
+
+ # NOTE: This is all a bit weird, but it's really important that on
+ # Python 2.x these objects are unicode and not str, even when
+ # they contain only ascii. The problem here is that msgpack
+ # understands the difference between unicode and bytes and we
+ # have it set to differentiate between them, however Python 2
+ # doesn't know the difference. Forcing these to unicode will be
+ # enough to have msgpack know the difference.
+ data = {
+ u"response": {
+ u"body": body, # Empty bytestring if body is stored separately
+ u"headers": dict(
+ (text_type(k), text_type(v)) for k, v in response.headers.items()
+ ),
+ u"status": response.status,
+ u"version": response.version,
+ u"reason": text_type(response.reason),
+ u"strict": response.strict,
+ u"decode_content": response.decode_content,
+ }
+ }
+
+ # Construct our vary headers
+ data[u"vary"] = {}
+ if u"vary" in response_headers:
+ varied_headers = response_headers[u"vary"].split(",")
+ for header in varied_headers:
+ header = text_type(header).strip()
+ header_value = request.headers.get(header, None)
+ if header_value is not None:
+ header_value = text_type(header_value)
+ data[u"vary"][header] = header_value
+
+ return b",".join([b"cc=4", msgpack.dumps(data, use_bin_type=True)])
+
+ def loads(self, request, data, body_file=None):
+ # Short circuit if we've been given an empty set of data
+ if not data:
+ return
+
+ # Determine what version of the serializer the data was serialized
+ # with
+ try:
+ ver, data = data.split(b",", 1)
+ except ValueError:
+ ver = b"cc=0"
+
+ # Make sure that our "ver" is actually a version and isn't a false
+ # positive from a , being in the data stream.
+ if ver[:3] != b"cc=":
+ data = ver + data
+ ver = b"cc=0"
+
+ # Get the version number out of the cc=N
+ ver = ver.split(b"=", 1)[-1].decode("ascii")
+
+ # Dispatch to the actual load method for the given version
+ try:
+ return getattr(self, "_loads_v{}".format(ver))(request, data, body_file)
+
+ except AttributeError:
+ # This is a version we don't have a loads function for, so we'll
+ # just treat it as a miss and return None
+ return
+
+ def prepare_response(self, request, cached, body_file=None):
+ """Verify our vary headers match and construct a real urllib3
+ HTTPResponse object.
+ """
+ # Special case the '*' Vary value as it means we cannot actually
+ # determine if the cached response is suitable for this request.
+ # This case is also handled in the controller code when creating
+ # a cache entry, but is left here for backwards compatibility.
+ if "*" in cached.get("vary", {}):
+ return
+
+ # Ensure that the Vary headers for the cached response match our
+ # request
+ for header, value in cached.get("vary", {}).items():
+ if request.headers.get(header, None) != value:
+ return
+
+ body_raw = cached["response"].pop("body")
+
+ headers = CaseInsensitiveDict(data=cached["response"]["headers"])
+ if headers.get("transfer-encoding", "") == "chunked":
+ headers.pop("transfer-encoding")
+
+ cached["response"]["headers"] = headers
+
+ try:
+ if body_file is None:
+ body = io.BytesIO(body_raw)
+ else:
+ body = body_file
+ except TypeError:
+ # This can happen if cachecontrol serialized to v1 format (pickle)
+ # using Python 2. A Python 2 str(byte string) will be unpickled as
+ # a Python 3 str (unicode string), which will cause the above to
+ # fail with:
+ #
+ # TypeError: 'str' does not support the buffer interface
+ body = io.BytesIO(body_raw.encode("utf8"))
+
+ return HTTPResponse(body=body, preload_content=False, **cached["response"])
+
+ def _loads_v0(self, request, data, body_file=None):
+ # The original legacy cache data. This doesn't contain enough
+ # information to construct everything we need, so we'll treat this as
+ # a miss.
+ return
+
+ def _loads_v1(self, request, data, body_file=None):
+ try:
+ cached = pickle.loads(data)
+ except ValueError:
+ return
+
+ return self.prepare_response(request, cached, body_file)
+
+ def _loads_v2(self, request, data, body_file=None):
+ assert body_file is None
+ try:
+ cached = json.loads(zlib.decompress(data).decode("utf8"))
+ except (ValueError, zlib.error):
+ return
+
+ # We need to decode the items that we've base64 encoded
+ cached["response"]["body"] = _b64_decode_bytes(cached["response"]["body"])
+ cached["response"]["headers"] = dict(
+ (_b64_decode_str(k), _b64_decode_str(v))
+ for k, v in cached["response"]["headers"].items()
+ )
+ cached["response"]["reason"] = _b64_decode_str(cached["response"]["reason"])
+ cached["vary"] = dict(
+ (_b64_decode_str(k), _b64_decode_str(v) if v is not None else v)
+ for k, v in cached["vary"].items()
+ )
+
+ return self.prepare_response(request, cached, body_file)
+
+ def _loads_v3(self, request, data, body_file):
+ # Due to Python 2 encoding issues, it's impossible to know for sure
+ # exactly how to load v3 entries, thus we'll treat these as a miss so
+ # that they get rewritten out as v4 entries.
+ return
+
+ def _loads_v4(self, request, data, body_file=None):
+ try:
+ cached = msgpack.loads(data, raw=False)
+ except ValueError:
+ return
+
+ return self.prepare_response(request, cached, body_file)
diff --git a/third_party/python/pip/pip/_vendor/cachecontrol/wrapper.py b/third_party/python/pip/pip/_vendor/cachecontrol/wrapper.py
new file mode 100644
index 0000000000..b6ee7f2039
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/cachecontrol/wrapper.py
@@ -0,0 +1,33 @@
+# SPDX-FileCopyrightText: 2015 Eric Larson
+#
+# SPDX-License-Identifier: Apache-2.0
+
+from .adapter import CacheControlAdapter
+from .cache import DictCache
+
+
+def CacheControl(
+ sess,
+ cache=None,
+ cache_etags=True,
+ serializer=None,
+ heuristic=None,
+ controller_class=None,
+ adapter_class=None,
+ cacheable_methods=None,
+):
+
+ cache = DictCache() if cache is None else cache
+ adapter_class = adapter_class or CacheControlAdapter
+ adapter = adapter_class(
+ cache,
+ cache_etags=cache_etags,
+ serializer=serializer,
+ heuristic=heuristic,
+ controller_class=controller_class,
+ cacheable_methods=cacheable_methods,
+ )
+ sess.mount("http://", adapter)
+ sess.mount("https://", adapter)
+
+ return sess
diff --git a/third_party/python/pip/pip/_vendor/certifi/__init__.py b/third_party/python/pip/pip/_vendor/certifi/__init__.py
new file mode 100644
index 0000000000..a3546f1255
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/certifi/__init__.py
@@ -0,0 +1,4 @@
+from .core import contents, where
+
+__all__ = ["contents", "where"]
+__version__ = "2022.12.07"
diff --git a/third_party/python/pip/pip/_vendor/certifi/__main__.py b/third_party/python/pip/pip/_vendor/certifi/__main__.py
new file mode 100644
index 0000000000..00376349e6
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/certifi/__main__.py
@@ -0,0 +1,12 @@
+import argparse
+
+from pip._vendor.certifi import contents, where
+
+parser = argparse.ArgumentParser()
+parser.add_argument("-c", "--contents", action="store_true")
+args = parser.parse_args()
+
+if args.contents:
+ print(contents())
+else:
+ print(where())
diff --git a/third_party/python/pip/pip/_vendor/certifi/cacert.pem b/third_party/python/pip/pip/_vendor/certifi/cacert.pem
new file mode 100644
index 0000000000..df9e4e3c75
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/certifi/cacert.pem
@@ -0,0 +1,4527 @@
+
+# Issuer: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
+# Subject: CN=GlobalSign Root CA O=GlobalSign nv-sa OU=Root CA
+# Label: "GlobalSign Root CA"
+# Serial: 4835703278459707669005204
+# MD5 Fingerprint: 3e:45:52:15:09:51:92:e1:b7:5d:37:9f:b1:87:29:8a
+# SHA1 Fingerprint: b1:bc:96:8b:d4:f4:9d:62:2a:a8:9a:81:f2:15:01:52:a4:1d:82:9c
+# SHA256 Fingerprint: eb:d4:10:40:e4:bb:3e:c7:42:c9:e3:81:d3:1e:f2:a4:1a:48:b6:68:5c:96:e7:ce:f3:c1:df:6c:d4:33:1c:99
+-----BEGIN CERTIFICATE-----
+MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG
+A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv
+b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw
+MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i
+YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT
+aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ
+jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp
+xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp
+1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG
+snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ
+U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8
+9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E
+BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B
+AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz
+yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE
+38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP
+AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad
+DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME
+HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
+# Subject: CN=Entrust.net Certification Authority (2048) O=Entrust.net OU=www.entrust.net/CPS_2048 incorp. by ref. (limits liab.)/(c) 1999 Entrust.net Limited
+# Label: "Entrust.net Premium 2048 Secure Server CA"
+# Serial: 946069240
+# MD5 Fingerprint: ee:29:31:bc:32:7e:9a:e6:e8:b5:f7:51:b4:34:71:90
+# SHA1 Fingerprint: 50:30:06:09:1d:97:d4:f5:ae:39:f7:cb:e7:92:7d:7d:65:2d:34:31
+# SHA256 Fingerprint: 6d:c4:71:72:e0:1c:bc:b0:bf:62:58:0d:89:5f:e2:b8:ac:9a:d4:f8:73:80:1e:0c:10:b9:c8:37:d2:1e:b1:77
+-----BEGIN CERTIFICATE-----
+MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML
+RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp
+bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5
+IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3
+MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3
+LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp
+YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG
+A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq
+K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe
+sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX
+MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT
+XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/
+HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH
+4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub
+j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo
+U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf
+zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b
+u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+
+bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er
+fF6adulZkMV8gzURZVE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust
+# Subject: CN=Baltimore CyberTrust Root O=Baltimore OU=CyberTrust
+# Label: "Baltimore CyberTrust Root"
+# Serial: 33554617
+# MD5 Fingerprint: ac:b6:94:a5:9c:17:e0:d7:91:52:9b:b1:97:06:a6:e4
+# SHA1 Fingerprint: d4:de:20:d0:5e:66:fc:53:fe:1a:50:88:2c:78:db:28:52:ca:e4:74
+# SHA256 Fingerprint: 16:af:57:a9:f6:76:b0:ab:12:60:95:aa:5e:ba:de:f2:2a:b3:11:19:d6:44:ac:95:cd:4b:93:db:f3:f2:6a:eb
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ
+RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD
+VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX
+DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y
+ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy
+VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr
+mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr
+IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK
+mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu
+XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy
+dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye
+jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1
+BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3
+DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92
+9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx
+jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0
+Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz
+ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS
+R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
+# Subject: CN=Entrust Root Certification Authority O=Entrust, Inc. OU=www.entrust.net/CPS is incorporated by reference/(c) 2006 Entrust, Inc.
+# Label: "Entrust Root Certification Authority"
+# Serial: 1164660820
+# MD5 Fingerprint: d6:a5:c3:ed:5d:dd:3e:00:c1:3d:87:92:1f:1d:3f:e4
+# SHA1 Fingerprint: b3:1e:b1:b7:40:e3:6c:84:02:da:dc:37:d4:4d:f5:d4:67:49:52:f9
+# SHA256 Fingerprint: 73:c1:76:43:4f:1b:c6:d5:ad:f4:5b:0e:76:e7:27:28:7c:8d:e5:76:16:c1:e6:e6:14:1a:2b:2c:bc:7d:8e:4c
+-----BEGIN CERTIFICATE-----
+MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC
+VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0
+Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW
+KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl
+cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw
+NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw
+NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy
+ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV
+BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ
+KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo
+Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4
+4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9
+KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI
+rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi
+94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB
+sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi
+gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo
+kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE
+vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA
+A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t
+O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua
+AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP
+9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/
+eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m
+0vdXcDazv/wor3ElhVsT/h5/WrQ8
+-----END CERTIFICATE-----
+
+# Issuer: CN=AAA Certificate Services O=Comodo CA Limited
+# Subject: CN=AAA Certificate Services O=Comodo CA Limited
+# Label: "Comodo AAA Services root"
+# Serial: 1
+# MD5 Fingerprint: 49:79:04:b0:eb:87:19:ac:47:b0:bc:11:51:9b:74:d0
+# SHA1 Fingerprint: d1:eb:23:a4:6d:17:d6:8f:d9:25:64:c2:f1:f1:60:17:64:d8:e3:49
+# SHA256 Fingerprint: d7:a7:a0:fb:5d:7e:27:31:d7:71:e9:48:4e:bc:de:f7:1d:5f:0c:3e:0a:29:48:78:2b:c8:3e:e0:ea:69:9e:f4
+-----BEGIN CERTIFICATE-----
+MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb
+MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow
+GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj
+YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL
+MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
+BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM
+GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP
+ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua
+BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe
+3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4
+YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR
+rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm
+ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU
+oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF
+MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v
+QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t
+b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF
+AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q
+GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz
+Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2
+G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi
+l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3
+smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 2 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 2 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 2"
+# Serial: 1289
+# MD5 Fingerprint: 5e:39:7b:dd:f8:ba:ec:82:e9:ac:62:ba:0c:54:00:2b
+# SHA1 Fingerprint: ca:3a:fb:cf:12:40:36:4b:44:b2:16:20:88:80:48:39:19:93:7c:f7
+# SHA256 Fingerprint: 85:a0:dd:7d:d7:20:ad:b7:ff:05:f8:3d:54:2b:20:9d:c7:ff:45:28:f7:d6:77:b1:83:89:fe:a5:e5:c4:9e:86
+-----BEGIN CERTIFICATE-----
+MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x
+GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv
+b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV
+BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W
+YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa
+GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg
+Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J
+WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB
+rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp
++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1
+ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i
+Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz
+PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og
+/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH
+oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI
+yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud
+EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2
+A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL
+MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT
+ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f
+BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn
+g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl
+fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K
+WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha
+B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc
+hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR
+TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD
+mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z
+ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y
+4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza
+8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 3"
+# Serial: 1478
+# MD5 Fingerprint: 31:85:3c:62:94:97:63:b9:aa:fd:89:4e:af:6f:e0:cf
+# SHA1 Fingerprint: 1f:49:14:f7:d8:74:95:1d:dd:ae:02:c0:be:fd:3a:2d:82:75:51:85
+# SHA256 Fingerprint: 18:f1:fc:7f:20:5d:f8:ad:dd:eb:7f:e0:07:dd:57:e3:af:37:5a:9c:4d:8d:73:54:6b:f4:f1:fe:d1:e1:8d:35
+-----BEGIN CERTIFICATE-----
+MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x
+GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv
+b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV
+BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W
+YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM
+V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB
+4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr
+H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd
+8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv
+vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT
+mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe
+btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc
+T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt
+WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ
+c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A
+4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD
+VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG
+CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0
+aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0
+aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu
+dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw
+czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G
+A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC
+TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg
+Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0
+7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem
+d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd
++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B
+4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN
+t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x
+DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57
+k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s
+zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j
+Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT
+mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK
+4SVhM7JZG+Ju1zdXtg2pEto=
+-----END CERTIFICATE-----
+
+# Issuer: O=SECOM Trust.net OU=Security Communication RootCA1
+# Subject: O=SECOM Trust.net OU=Security Communication RootCA1
+# Label: "Security Communication Root CA"
+# Serial: 0
+# MD5 Fingerprint: f1:bc:63:6a:54:e0:b5:27:f5:cd:e7:1a:e3:4d:6e:4a
+# SHA1 Fingerprint: 36:b1:2b:49:f9:81:9e:d7:4c:9e:bc:38:0f:c6:56:8f:5d:ac:b2:f7
+# SHA256 Fingerprint: e7:5e:72:ed:9f:56:0e:ec:6e:b4:80:00:73:a4:3f:c3:ad:19:19:5a:39:22:82:01:78:95:97:4a:99:02:6b:6c
+-----BEGIN CERTIFICATE-----
+MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY
+MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t
+dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5
+WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD
+VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8
+9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ
+DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9
+Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N
+QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ
+xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G
+A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T
+AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG
+kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr
+Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5
+Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU
+JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot
+RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
+# Subject: CN=XRamp Global Certification Authority O=XRamp Security Services Inc OU=www.xrampsecurity.com
+# Label: "XRamp Global CA Root"
+# Serial: 107108908803651509692980124233745014957
+# MD5 Fingerprint: a1:0b:44:b3:ca:10:d8:00:6e:9d:0f:d8:0f:92:0a:d1
+# SHA1 Fingerprint: b8:01:86:d1:eb:9c:86:a5:41:04:cf:30:54:f3:4c:52:b7:e5:58:c6
+# SHA256 Fingerprint: ce:cd:dc:90:50:99:d8:da:df:c5:b1:d2:09:b7:37:cb:e2:c1:8c:fb:2c:10:c0:ff:0b:cf:0d:32:86:fc:1a:a2
+-----BEGIN CERTIFICATE-----
+MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB
+gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk
+MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY
+UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx
+NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3
+dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy
+dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB
+dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6
+38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP
+KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q
+DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4
+qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa
+JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi
+PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P
+BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs
+jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0
+eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD
+ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR
+vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt
+qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa
+IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy
+i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ
+O+7ETPTsJ3xCwnR8gooJybQDJbw=
+-----END CERTIFICATE-----
+
+# Issuer: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority
+# Subject: O=The Go Daddy Group, Inc. OU=Go Daddy Class 2 Certification Authority
+# Label: "Go Daddy Class 2 CA"
+# Serial: 0
+# MD5 Fingerprint: 91:de:06:25:ab:da:fd:32:17:0c:bb:25:17:2a:84:67
+# SHA1 Fingerprint: 27:96:ba:e6:3f:18:01:e2:77:26:1b:a0:d7:77:70:02:8f:20:ee:e4
+# SHA256 Fingerprint: c3:84:6b:f2:4b:9e:93:ca:64:27:4c:0e:c6:7c:1e:cc:5e:02:4f:fc:ac:d2:d7:40:19:35:0e:81:fe:54:6a:e4
+-----BEGIN CERTIFICATE-----
+MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh
+MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE
+YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3
+MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo
+ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg
+MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN
+ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA
+PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w
+wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi
+EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY
+avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+
+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE
+sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h
+/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5
+IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD
+ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy
+OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P
+TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ
+HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER
+dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf
+ReYNnyicsbkqWletNw+vHX/bvZ8=
+-----END CERTIFICATE-----
+
+# Issuer: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority
+# Subject: O=Starfield Technologies, Inc. OU=Starfield Class 2 Certification Authority
+# Label: "Starfield Class 2 CA"
+# Serial: 0
+# MD5 Fingerprint: 32:4a:4b:bb:c8:63:69:9b:be:74:9a:c6:dd:1d:46:24
+# SHA1 Fingerprint: ad:7e:1c:28:b0:64:ef:8f:60:03:40:20:14:c3:d0:e3:37:0e:b5:8a
+# SHA256 Fingerprint: 14:65:fa:20:53:97:b8:76:fa:a6:f0:a9:95:8e:55:90:e4:0f:cc:7f:aa:4f:b7:c2:c8:67:75:21:fb:5f:b6:58
+-----BEGIN CERTIFICATE-----
+MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl
+MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp
+U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw
+NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE
+ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp
+ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3
+DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf
+8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN
++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0
+X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa
+K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA
+1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G
+A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR
+zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0
+YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD
+bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w
+DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3
+L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D
+eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl
+xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp
+VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY
+WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q=
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root CA"
+# Serial: 17154717934120587862167794914071425081
+# MD5 Fingerprint: 87:ce:0b:7b:2a:0e:49:00:e1:58:71:9b:37:a8:93:72
+# SHA1 Fingerprint: 05:63:b8:63:0d:62:d7:5a:bb:c8:ab:1e:4b:df:b5:a8:99:b2:4d:43
+# SHA256 Fingerprint: 3e:90:99:b5:01:5e:8f:48:6c:00:bc:ea:9d:11:1e:e7:21:fa:ba:35:5a:89:bc:f1:df:69:56:1e:3d:c6:32:5c
+-----BEGIN CERTIFICATE-----
+MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
+b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
+cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c
+JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP
+mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+
+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4
+VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/
+AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB
+AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
+BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun
+pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC
+dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf
+fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm
+NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx
+H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe
++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root CA"
+# Serial: 10944719598952040374951832963794454346
+# MD5 Fingerprint: 79:e4:a9:84:0d:7d:3a:96:d7:c0:4f:e2:43:4c:89:2e
+# SHA1 Fingerprint: a8:98:5d:3a:65:e5:e5:c4:b2:d7:d6:6d:40:c6:dd:2f:b1:9c:54:36
+# SHA256 Fingerprint: 43:48:a0:e9:44:4c:78:cb:26:5e:05:8d:5e:89:44:b4:d8:4f:96:62:bd:26:db:25:7f:89:34:a4:43:c7:01:61
+-----BEGIN CERTIFICATE-----
+MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD
+QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
+b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB
+CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97
+nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt
+43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P
+T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4
+gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO
+BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR
+TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw
+DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr
+hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg
+06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF
+PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls
+YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk
+CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4=
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert High Assurance EV Root CA O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert High Assurance EV Root CA"
+# Serial: 3553400076410547919724730734378100087
+# MD5 Fingerprint: d4:74:de:57:5c:39:b2:d3:9c:85:83:c5:c0:65:49:8a
+# SHA1 Fingerprint: 5f:b7:ee:06:33:e2:59:db:ad:0c:4c:9a:e6:d3:8f:1a:61:c7:dc:25
+# SHA256 Fingerprint: 74:31:e5:f4:c3:c1:ce:46:90:77:4f:0b:61:e0:54:40:88:3b:a9:a0:1e:d0:0b:a6:ab:d7:80:6e:d3:b1:18:cf
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j
+ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL
+MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3
+LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug
+RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm
++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW
+PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM
+xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB
+Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3
+hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg
+EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA
+FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec
+nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z
+eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF
+hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2
+Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe
+vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep
++OkuE6N36B9K
+-----END CERTIFICATE-----
+
+# Issuer: CN=SwissSign Gold CA - G2 O=SwissSign AG
+# Subject: CN=SwissSign Gold CA - G2 O=SwissSign AG
+# Label: "SwissSign Gold CA - G2"
+# Serial: 13492815561806991280
+# MD5 Fingerprint: 24:77:d9:a8:91:d1:3b:fa:88:2d:c2:ff:f8:cd:33:93
+# SHA1 Fingerprint: d8:c5:38:8a:b7:30:1b:1b:6e:d4:7a:e6:45:25:3a:6f:9f:1a:27:61
+# SHA256 Fingerprint: 62:dd:0b:e9:b9:f5:0a:16:3e:a0:f8:e7:5c:05:3b:1e:ca:57:ea:55:c8:68:8f:64:7c:68:81:f2:c8:35:7b:95
+-----BEGIN CERTIFICATE-----
+MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV
+BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln
+biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF
+MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT
+d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC
+CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8
+76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+
+bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c
+6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE
+emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd
+MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt
+MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y
+MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y
+FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi
+aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM
+gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB
+qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7
+lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn
+8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov
+L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6
+45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO
+UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5
+O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC
+bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv
+GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a
+77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC
+hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3
+92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp
+Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w
+ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt
+Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ
+-----END CERTIFICATE-----
+
+# Issuer: CN=SwissSign Silver CA - G2 O=SwissSign AG
+# Subject: CN=SwissSign Silver CA - G2 O=SwissSign AG
+# Label: "SwissSign Silver CA - G2"
+# Serial: 5700383053117599563
+# MD5 Fingerprint: e0:06:a1:c9:7d:cf:c9:fc:0d:c0:56:75:96:d8:62:13
+# SHA1 Fingerprint: 9b:aa:e5:9f:56:ee:21:cb:43:5a:be:25:93:df:a7:f0:40:d1:1d:cb
+# SHA256 Fingerprint: be:6c:4d:a2:bb:b9:ba:59:b6:f3:93:97:68:37:42:46:c3:c0:05:99:3f:a9:8f:02:0d:1d:ed:be:d4:8a:81:d5
+-----BEGIN CERTIFICATE-----
+MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE
+BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu
+IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow
+RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY
+U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A
+MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv
+Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br
+YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF
+nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH
+6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt
+eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/
+c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ
+MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH
+HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf
+jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6
+5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB
+rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU
+F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c
+wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0
+cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB
+AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp
+WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9
+xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ
+2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ
+IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8
+aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X
+em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR
+dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/
+OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+
+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy
+tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u
+-----END CERTIFICATE-----
+
+# Issuer: CN=SecureTrust CA O=SecureTrust Corporation
+# Subject: CN=SecureTrust CA O=SecureTrust Corporation
+# Label: "SecureTrust CA"
+# Serial: 17199774589125277788362757014266862032
+# MD5 Fingerprint: dc:32:c3:a7:6d:25:57:c7:68:09:9d:ea:2d:a9:a2:d1
+# SHA1 Fingerprint: 87:82:c6:c3:04:35:3b:cf:d2:96:92:d2:59:3e:7d:44:d9:34:ff:11
+# SHA256 Fingerprint: f1:c1:b5:0a:e5:a2:0d:d8:03:0e:c9:f6:bc:24:82:3d:d3:67:b5:25:57:59:b4:e7:1b:61:fc:e9:f7:37:5d:73
+-----BEGIN CERTIFICATE-----
+MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI
+MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
+FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz
+MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv
+cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz
+Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO
+0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao
+wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj
+7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS
+8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT
+BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg
+JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC
+NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3
+6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/
+3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm
+D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS
+CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR
+3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Secure Global CA O=SecureTrust Corporation
+# Subject: CN=Secure Global CA O=SecureTrust Corporation
+# Label: "Secure Global CA"
+# Serial: 9751836167731051554232119481456978597
+# MD5 Fingerprint: cf:f4:27:0d:d4:ed:dc:65:16:49:6d:3d:da:bf:6e:de
+# SHA1 Fingerprint: 3a:44:73:5a:e5:81:90:1f:24:86:61:46:1e:3b:9c:c4:5f:f5:3a:1b
+# SHA256 Fingerprint: 42:00:f5:04:3a:c8:59:0e:bb:52:7d:20:9e:d1:50:30:29:fb:cb:d4:1c:a1:b5:06:ec:27:f1:5a:de:7d:ac:69
+-----BEGIN CERTIFICATE-----
+MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK
+MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x
+GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx
+MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg
+Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG
+SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ
+iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa
+/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ
+jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI
+HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7
+sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w
+gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw
+KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG
+AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L
+URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO
+H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm
+I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY
+iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc
+f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO Certification Authority O=COMODO CA Limited
+# Label: "COMODO Certification Authority"
+# Serial: 104350513648249232941998508985834464573
+# MD5 Fingerprint: 5c:48:dc:f7:42:72:ec:56:94:6d:1c:cc:71:35:80:75
+# SHA1 Fingerprint: 66:31:bf:9e:f7:4f:9e:b6:c9:d5:a6:0c:ba:6a:be:d1:f7:bd:ef:7b
+# SHA256 Fingerprint: 0c:2c:d6:3d:f7:80:6f:a3:99:ed:e8:09:11:6b:57:5b:f8:79:89:f0:65:18:f9:80:8c:86:05:03:17:8b:af:66
+-----BEGIN CERTIFICATE-----
+MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB
+gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV
+BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw
+MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl
+YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P
+RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0
+aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3
+UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI
+2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8
+Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp
++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+
+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O
+nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW
+/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g
+PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u
+QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY
+SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv
+IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/
+RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4
+zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd
+BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB
+ZQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO ECC Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO ECC Certification Authority O=COMODO CA Limited
+# Label: "COMODO ECC Certification Authority"
+# Serial: 41578283867086692638256921589707938090
+# MD5 Fingerprint: 7c:62:ff:74:9d:31:53:5e:68:4a:d5:78:aa:1e:bf:23
+# SHA1 Fingerprint: 9f:74:4e:9f:2b:4d:ba:ec:0f:31:2c:50:b6:56:3b:8e:2d:93:c3:11
+# SHA256 Fingerprint: 17:93:92:7a:06:14:54:97:89:ad:ce:2f:8f:34:f7:f0:b6:6d:0f:3a:e3:a3:b8:4d:21:ec:15:db:ba:4f:ad:c7
+-----BEGIN CERTIFICATE-----
+MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL
+MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE
+BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT
+IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw
+MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy
+ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N
+T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv
+biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR
+FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J
+cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW
+BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm
+fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv
+GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certigna O=Dhimyotis
+# Subject: CN=Certigna O=Dhimyotis
+# Label: "Certigna"
+# Serial: 18364802974209362175
+# MD5 Fingerprint: ab:57:a6:5b:7d:42:82:19:b5:d8:58:26:28:5e:fd:ff
+# SHA1 Fingerprint: b1:2e:13:63:45:86:a4:6f:1a:b2:60:68:37:58:2d:c4:ac:fd:94:97
+# SHA256 Fingerprint: e3:b6:a2:db:2e:d7:ce:48:84:2f:7a:c5:32:41:c7:b7:1d:54:14:4b:fb:40:c1:1f:3f:1d:0b:42:f5:ee:a1:2d
+-----BEGIN CERTIFICATE-----
+MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV
+BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X
+DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ
+BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3
+DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4
+QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny
+gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw
+zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q
+130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2
+JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw
+DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw
+ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT
+AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj
+AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG
+9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h
+bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc
+fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu
+HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w
+t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw
+WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg==
+-----END CERTIFICATE-----
+
+# Issuer: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority
+# Subject: O=Chunghwa Telecom Co., Ltd. OU=ePKI Root Certification Authority
+# Label: "ePKI Root Certification Authority"
+# Serial: 28956088682735189655030529057352760477
+# MD5 Fingerprint: 1b:2e:00:ca:26:06:90:3d:ad:fe:6f:15:68:d3:6b:b3
+# SHA1 Fingerprint: 67:65:0d:f1:7e:8e:7e:5b:82:40:a4:f4:56:4b:cf:e2:3d:69:c6:f0
+# SHA256 Fingerprint: c0:a6:f4:dc:63:a2:4b:fd:cf:54:ef:2a:6a:08:2a:0a:72:de:35:80:3e:2f:f5:ff:52:7a:e5:d8:72:06:df:d5
+-----BEGIN CERTIFICATE-----
+MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe
+MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0
+ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe
+Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw
+IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL
+SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH
+SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh
+ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X
+DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1
+TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ
+fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA
+sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU
+WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS
+nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH
+dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip
+NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC
+AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF
+MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH
+ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB
+uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl
+PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP
+JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/
+gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2
+j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6
+5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB
+o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS
+/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z
+Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE
+W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D
+hNQ+IIX3Sj0rnP0qCglN6oH4EZw=
+-----END CERTIFICATE-----
+
+# Issuer: O=certSIGN OU=certSIGN ROOT CA
+# Subject: O=certSIGN OU=certSIGN ROOT CA
+# Label: "certSIGN ROOT CA"
+# Serial: 35210227249154
+# MD5 Fingerprint: 18:98:c0:d6:e9:3a:fc:f9:b0:f5:0c:f7:4b:01:44:17
+# SHA1 Fingerprint: fa:b7:ee:36:97:26:62:fb:2d:b0:2a:f6:bf:03:fd:e8:7c:4b:2f:9b
+# SHA256 Fingerprint: ea:a9:62:c4:fa:4a:6b:af:eb:e4:15:19:6d:35:1c:cd:88:8d:4f:53:f3:fa:8a:e6:d7:c4:66:a9:4e:60:42:bb
+-----BEGIN CERTIFICATE-----
+MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT
+AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD
+QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP
+MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC
+ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do
+0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ
+UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d
+RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ
+OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv
+JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C
+AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O
+BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ
+LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY
+MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ
+44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I
+Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw
+i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN
+9u6wWk5JRFRYX0KD
+-----END CERTIFICATE-----
+
+# Issuer: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services)
+# Subject: CN=NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny O=NetLock Kft. OU=Tan\xfas\xedtv\xe1nykiad\xf3k (Certification Services)
+# Label: "NetLock Arany (Class Gold) F\u0151tan\xfas\xedtv\xe1ny"
+# Serial: 80544274841616
+# MD5 Fingerprint: c5:a1:b7:ff:73:dd:d6:d7:34:32:18:df:fc:3c:ad:88
+# SHA1 Fingerprint: 06:08:3f:59:3f:15:a1:04:a0:69:a4:6b:a9:03:d0:06:b7:97:09:91
+# SHA256 Fingerprint: 6c:61:da:c3:a2:de:f0:31:50:6b:e0:36:d2:a6:fe:40:19:94:fb:d1:3d:f9:c8:d4:66:59:92:74:c4:46:ec:98
+-----BEGIN CERTIFICATE-----
+MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG
+EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3
+MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl
+cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR
+dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB
+pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM
+b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm
+aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz
+IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT
+lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz
+AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5
+VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG
+ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2
+BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG
+AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M
+U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh
+bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C
++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC
+bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F
+uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2
+XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hongkong Post Root CA 1 O=Hongkong Post
+# Subject: CN=Hongkong Post Root CA 1 O=Hongkong Post
+# Label: "Hongkong Post Root CA 1"
+# Serial: 1000
+# MD5 Fingerprint: a8:0d:6f:39:78:b9:43:6d:77:42:6d:98:5a:cc:23:ca
+# SHA1 Fingerprint: d6:da:a8:20:8d:09:d2:15:4d:24:b5:2f:cb:34:6e:b2:58:b2:8a:58
+# SHA256 Fingerprint: f9:e6:7d:33:6c:51:00:2a:c0:54:c6:32:02:2d:66:dd:a2:e7:e3:ff:f1:0a:d0:61:ed:31:d8:bb:b4:10:cf:b2
+-----BEGIN CERTIFICATE-----
+MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx
+FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg
+Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG
+A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr
+b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ
+jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn
+PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh
+ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9
+nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h
+q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED
+MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC
+mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3
+7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB
+oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs
+EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO
+fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi
+AmvZWg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=SecureSign RootCA11 O=Japan Certification Services, Inc.
+# Subject: CN=SecureSign RootCA11 O=Japan Certification Services, Inc.
+# Label: "SecureSign RootCA11"
+# Serial: 1
+# MD5 Fingerprint: b7:52:74:e2:92:b4:80:93:f2:75:e4:cc:d7:f2:ea:26
+# SHA1 Fingerprint: 3b:c4:9f:48:f8:f3:73:a0:9c:1e:bd:f8:5b:b1:c3:65:c7:d8:11:b3
+# SHA256 Fingerprint: bf:0f:ee:fb:9e:3a:58:1a:d5:f9:e9:db:75:89:98:57:43:d2:61:08:5c:4d:31:4f:6f:5d:72:59:aa:42:16:12
+-----BEGIN CERTIFICATE-----
+MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDEr
+MCkGA1UEChMiSmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoG
+A1UEAxMTU2VjdXJlU2lnbiBSb290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0
+MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSswKQYDVQQKEyJKYXBhbiBDZXJ0aWZp
+Y2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1cmVTaWduIFJvb3RD
+QTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvLTJsz
+i1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8
+h9uuywGOwvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOV
+MdrAG/LuYpmGYz+/3ZMqg6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9
+UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rPO7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni
+8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitAbpSACW22s293bzUIUPsC
+h8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZXt94wDgYD
+VR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB
+AKChOBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xm
+KbabfSVSSUOrTC4rbnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQ
+X5Ucv+2rIrVls4W6ng+4reV6G4pQOh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWr
+QbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01y8hSyn+B/tlr0/cR7SXf+Of5
+pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061lgeLKBObjBmN
+QSdJQO7e5iNEOdyhIta6A/I=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd.
+# Subject: CN=Microsec e-Szigno Root CA 2009 O=Microsec Ltd.
+# Label: "Microsec e-Szigno Root CA 2009"
+# Serial: 14014712776195784473
+# MD5 Fingerprint: f8:49:f4:03:bc:44:2d:83:be:48:69:7d:29:64:fc:b1
+# SHA1 Fingerprint: 89:df:74:fe:5c:f4:0f:4a:80:f9:e3:37:7d:54:da:91:e1:01:31:8e
+# SHA256 Fingerprint: 3c:5f:81:fe:a5:fa:b8:2c:64:bf:a2:ea:ec:af:cd:e8:e0:77:fc:86:20:a7:ca:e5:37:16:3d:f3:6e:db:f3:78
+-----BEGIN CERTIFICATE-----
+MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD
+VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0
+ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G
+CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y
+OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx
+FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp
+Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o
+dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP
+kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc
+cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U
+fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7
+N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC
+xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1
++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G
+A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM
+Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG
+SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h
+mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk
+ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775
+tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c
+2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t
+HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R3
+# Label: "GlobalSign Root CA - R3"
+# Serial: 4835703278459759426209954
+# MD5 Fingerprint: c5:df:b8:49:ca:05:13:55:ee:2d:ba:1a:c3:3e:b0:28
+# SHA1 Fingerprint: d6:9b:56:11:48:f0:1c:77:c5:45:78:c1:09:26:df:5b:85:69:76:ad
+# SHA256 Fingerprint: cb:b5:22:d7:b7:f1:27:ad:6a:01:13:86:5b:df:1c:d4:10:2e:7d:07:59:af:63:5a:7c:f4:72:0d:c9:63:c5:3b
+-----BEGIN CERTIFICATE-----
+MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G
+A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp
+Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4
+MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG
+A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8
+RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT
+gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm
+KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd
+QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ
+XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw
+DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o
+LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU
+RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp
+jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK
+6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX
+mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs
+Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH
+WD9f
+-----END CERTIFICATE-----
+
+# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
+# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
+# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068"
+# Serial: 6047274297262753887
+# MD5 Fingerprint: 73:3a:74:7a:ec:bb:a3:96:a6:c2:e4:e2:c8:9b:c0:c3
+# SHA1 Fingerprint: ae:c5:fb:3f:c8:e1:bf:c4:e5:4f:03:07:5a:9a:e8:00:b7:f7:b6:fa
+# SHA256 Fingerprint: 04:04:80:28:bf:1f:28:64:d4:8f:9a:d4:d8:32:94:36:6a:82:88:56:55:3f:3b:14:30:3f:90:14:7f:5d:40:ef
+-----BEGIN CERTIFICATE-----
+MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE
+BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h
+cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy
+MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg
+Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9
+thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM
+cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG
+L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i
+NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h
+X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b
+m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy
+Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja
+EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T
+KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF
+6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh
+OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD
+VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD
+VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp
+cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv
+ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl
+AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF
+661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9
+am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1
+ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481
+PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS
+3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k
+SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF
+3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM
+ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g
+StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz
+Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB
+jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V
+-----END CERTIFICATE-----
+
+# Issuer: CN=Izenpe.com O=IZENPE S.A.
+# Subject: CN=Izenpe.com O=IZENPE S.A.
+# Label: "Izenpe.com"
+# Serial: 917563065490389241595536686991402621
+# MD5 Fingerprint: a6:b0:cd:85:80:da:5c:50:34:a3:39:90:2f:55:67:73
+# SHA1 Fingerprint: 2f:78:3d:25:52:18:a7:4a:65:39:71:b5:2c:a2:9c:45:15:6f:e9:19
+# SHA256 Fingerprint: 25:30:cc:8e:98:32:15:02:ba:d9:6f:9b:1f:ba:1b:09:9e:2d:29:9e:0f:45:48:bb:91:4f:36:3b:c0:d4:53:1f
+-----BEGIN CERTIFICATE-----
+MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4
+MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6
+ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD
+VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j
+b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq
+scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO
+xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H
+LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX
+uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD
+yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+
+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q
+rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN
+BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L
+hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB
+QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+
+HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu
+Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg
+QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB
+BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx
+MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA
+A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb
+laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56
+awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo
+JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw
+LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT
+VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk
+LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb
+UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/
+QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+
+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls
+QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
+# Subject: CN=Go Daddy Root Certificate Authority - G2 O=GoDaddy.com, Inc.
+# Label: "Go Daddy Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: 80:3a:bc:22:c1:e6:fb:8d:9b:3b:27:4a:32:1b:9a:01
+# SHA1 Fingerprint: 47:be:ab:c9:22:ea:e8:0e:78:78:34:62:a7:9f:45:c2:54:fd:e6:8b
+# SHA256 Fingerprint: 45:14:0b:32:47:eb:9c:c8:c5:b4:f0:d7:b5:30:91:f7:32:92:08:9e:6e:5a:63:e2:74:9d:d3:ac:a9:19:8e:da
+-----BEGIN CERTIFICATE-----
+MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT
+EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp
+ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz
+NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH
+EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE
+AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw
+DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD
+E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH
+/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy
+DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh
+GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR
+tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA
+AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
+FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX
+WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu
+9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr
+gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo
+2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO
+LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI
+4uJEvlz36hz1
+-----END CERTIFICATE-----
+
+# Issuer: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Subject: CN=Starfield Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Label: "Starfield Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: d6:39:81:c6:52:7e:96:69:fc:fc:ca:66:ed:05:f2:96
+# SHA1 Fingerprint: b5:1c:06:7c:ee:2b:0c:3d:f8:55:ab:2d:92:f4:fe:39:d4:e7:0f:0e
+# SHA256 Fingerprint: 2c:e1:cb:0b:f9:d2:f9:e1:02:99:3f:be:21:51:52:c3:b2:dd:0c:ab:de:1c:68:e5:31:9b:83:91:54:db:b7:f5
+-----BEGIN CERTIFICATE-----
+MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs
+ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw
+MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6
+b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj
+aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp
+Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC
+ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg
+nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1
+HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N
+Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN
+dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0
+HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO
+BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G
+CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU
+sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3
+4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg
+8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K
+pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1
+mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0
+-----END CERTIFICATE-----
+
+# Issuer: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Subject: CN=Starfield Services Root Certificate Authority - G2 O=Starfield Technologies, Inc.
+# Label: "Starfield Services Root Certificate Authority - G2"
+# Serial: 0
+# MD5 Fingerprint: 17:35:74:af:7b:61:1c:eb:f4:f9:3c:e2:ee:40:f9:a2
+# SHA1 Fingerprint: 92:5a:8f:8d:2c:6d:04:e0:66:5f:59:6a:ff:22:d8:63:e8:25:6f:3f
+# SHA256 Fingerprint: 56:8d:69:05:a2:c8:87:08:a4:b3:02:51:90:ed:cf:ed:b1:97:4a:60:6a:13:c6:e5:29:0f:cb:2a:e6:3e:da:b5
+-----BEGIN CERTIFICATE-----
+MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx
+EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT
+HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs
+ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5
+MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD
+VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy
+ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy
+dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p
+OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2
+8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K
+Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe
+hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk
+6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw
+DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q
+AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI
+bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB
+ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z
+qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd
+iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn
+0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN
+sSi6
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Commercial O=AffirmTrust
+# Subject: CN=AffirmTrust Commercial O=AffirmTrust
+# Label: "AffirmTrust Commercial"
+# Serial: 8608355977964138876
+# MD5 Fingerprint: 82:92:ba:5b:ef:cd:8a:6f:a6:3d:55:f9:84:f6:d6:b7
+# SHA1 Fingerprint: f9:b5:b6:32:45:5f:9c:be:ec:57:5f:80:dc:e9:6e:2c:c7:b2:78:b7
+# SHA256 Fingerprint: 03:76:ab:1d:54:c5:f9:80:3c:e4:b2:e2:01:a0:ee:7e:ef:7b:57:b6:36:e8:a9:3c:9b:8d:48:60:c9:6f:5f:a7
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP
+Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr
+ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL
+MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1
+yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr
+VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/
+nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG
+XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj
+vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt
+Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g
+N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC
+nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Networking O=AffirmTrust
+# Subject: CN=AffirmTrust Networking O=AffirmTrust
+# Label: "AffirmTrust Networking"
+# Serial: 8957382827206547757
+# MD5 Fingerprint: 42:65:ca:be:01:9a:9a:4c:a9:8c:41:49:cd:c0:d5:7f
+# SHA1 Fingerprint: 29:36:21:02:8b:20:ed:02:f5:66:c5:32:d1:d6:ed:90:9f:45:00:2f
+# SHA256 Fingerprint: 0a:81:ec:5a:92:97:77:f1:45:90:4a:f3:8d:5d:50:9f:66:b5:e2:c5:8f:cd:b5:31:05:8b:0e:17:f3:f0:b4:1b
+-----BEGIN CERTIFICATE-----
+MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz
+dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL
+MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp
+cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
+AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y
+YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua
+kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL
+QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp
+6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG
+yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i
+QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ
+KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO
+tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu
+QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ
+Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u
+olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48
+x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s=
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Premium O=AffirmTrust
+# Subject: CN=AffirmTrust Premium O=AffirmTrust
+# Label: "AffirmTrust Premium"
+# Serial: 7893706540734352110
+# MD5 Fingerprint: c4:5d:0e:48:b6:ac:28:30:4e:0a:bc:f9:38:16:87:57
+# SHA1 Fingerprint: d8:a6:33:2c:e0:03:6f:b1:85:f6:63:4f:7d:6a:06:65:26:32:28:27
+# SHA256 Fingerprint: 70:a7:3f:7f:37:6b:60:07:42:48:90:45:34:b1:14:82:d5:bf:0e:69:8e:cc:49:8d:f5:25:77:eb:f2:e9:3b:9a
+-----BEGIN CERTIFICATE-----
+MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE
+BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz
+dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG
+A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U
+cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf
+qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ
+JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ
++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS
+s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5
+HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7
+70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG
+V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S
+qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S
+5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia
+C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX
+OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE
+FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
+BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2
+KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg
+Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B
+8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ
+MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc
+0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ
+u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF
+u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH
+YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8
+GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO
+RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e
+KeC2uAloGRwYQw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=AffirmTrust Premium ECC O=AffirmTrust
+# Subject: CN=AffirmTrust Premium ECC O=AffirmTrust
+# Label: "AffirmTrust Premium ECC"
+# Serial: 8401224907861490260
+# MD5 Fingerprint: 64:b0:09:55:cf:b1:d5:99:e2:be:13:ab:a6:5d:ea:4d
+# SHA1 Fingerprint: b8:23:6b:00:2f:1d:16:86:53:01:55:6c:11:a4:37:ca:eb:ff:c3:bb
+# SHA256 Fingerprint: bd:71:fd:f6:da:97:e4:cf:62:d1:64:7a:dd:25:81:b0:7d:79:ad:f8:39:7e:b4:ec:ba:9c:5e:84:88:82:14:23
+-----BEGIN CERTIFICATE-----
+MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC
+VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ
+cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ
+BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt
+VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D
+0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9
+ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G
+A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G
+A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs
+aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I
+flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Subject: CN=Certum Trusted Network CA O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Label: "Certum Trusted Network CA"
+# Serial: 279744
+# MD5 Fingerprint: d5:e9:81:40:c5:18:69:fc:46:2c:89:75:62:0f:aa:78
+# SHA1 Fingerprint: 07:e0:32:e0:20:b7:2c:3f:19:2f:06:28:a2:59:3a:19:a7:0f:06:9e
+# SHA256 Fingerprint: 5c:58:46:8d:55:f5:8e:49:7e:74:39:82:d2:b5:00:10:b6:d1:65:37:4a:cf:83:a7:d4:a3:2d:b7:68:c4:40:8e
+-----BEGIN CERTIFICATE-----
+MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM
+MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D
+ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU
+cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3
+WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg
+Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw
+IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH
+UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM
+TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU
+BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM
+kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x
+AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV
+HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y
+sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL
+I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8
+J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY
+VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI
+03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
+# Subject: CN=TWCA Root Certification Authority O=TAIWAN-CA OU=Root CA
+# Label: "TWCA Root Certification Authority"
+# Serial: 1
+# MD5 Fingerprint: aa:08:8f:f6:f9:7b:b7:f2:b1:a7:1e:9b:ea:ea:bd:79
+# SHA1 Fingerprint: cf:9e:87:6d:d3:eb:fc:42:26:97:a3:b5:a3:7a:a0:76:a9:06:23:48
+# SHA256 Fingerprint: bf:d8:8f:e1:10:1c:41:ae:3e:80:1b:f8:be:56:35:0e:e9:ba:d1:a6:b9:bd:51:5e:dc:5c:6d:5b:87:11:ac:44
+-----BEGIN CERTIFICATE-----
+MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES
+MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU
+V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz
+WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO
+LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB
+AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE
+AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH
+K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX
+RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z
+rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx
+3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq
+hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC
+MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls
+XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D
+lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn
+aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ
+YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw==
+-----END CERTIFICATE-----
+
+# Issuer: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2
+# Subject: O=SECOM Trust Systems CO.,LTD. OU=Security Communication RootCA2
+# Label: "Security Communication RootCA2"
+# Serial: 0
+# MD5 Fingerprint: 6c:39:7d:a4:0e:55:59:b2:3f:d6:41:b1:12:50:de:43
+# SHA1 Fingerprint: 5f:3b:8c:f2:f8:10:b3:7d:78:b4:ce:ec:19:19:c3:73:34:b9:c7:74
+# SHA256 Fingerprint: 51:3b:2c:ec:b8:10:d4:cd:e5:dd:85:39:1a:df:c6:c2:dd:60:d8:7b:b7:36:d2:b5:21:48:4a:a4:7a:0e:be:f6
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl
+MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe
+U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX
+DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy
+dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj
+YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV
+OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr
+zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM
+VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ
+hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO
+ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw
+awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs
+OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3
+DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF
+coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc
+okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8
+t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy
+1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/
+SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03
+-----END CERTIFICATE-----
+
+# Issuer: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967
+# Subject: CN=Actalis Authentication Root CA O=Actalis S.p.A./03358520967
+# Label: "Actalis Authentication Root CA"
+# Serial: 6271844772424770508
+# MD5 Fingerprint: 69:c1:0d:4f:07:a3:1b:c3:fe:56:3d:04:bc:11:f6:a6
+# SHA1 Fingerprint: f3:73:b3:87:06:5a:28:84:8a:f2:f3:4a:ce:19:2b:dd:c7:8e:9c:ac
+# SHA256 Fingerprint: 55:92:60:84:ec:96:3a:64:b9:6e:2a:be:01:ce:0b:a8:6a:64:fb:fe:bc:c7:aa:b5:af:c1:55:b3:7f:d7:60:66
+-----BEGIN CERTIFICATE-----
+MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE
+BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w
+MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290
+IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC
+SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1
+ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv
+UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX
+4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9
+KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/
+gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb
+rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ
+51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F
+be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe
+KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F
+v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn
+fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7
+jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz
+ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt
+ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL
+e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70
+jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz
+WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V
+SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j
+pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX
+X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok
+fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R
+K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU
+ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU
+LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT
+LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Buypass Class 2 Root CA O=Buypass AS-983163327
+# Subject: CN=Buypass Class 2 Root CA O=Buypass AS-983163327
+# Label: "Buypass Class 2 Root CA"
+# Serial: 2
+# MD5 Fingerprint: 46:a7:d2:fe:45:fb:64:5a:a8:59:90:9b:78:44:9b:29
+# SHA1 Fingerprint: 49:0a:75:74:de:87:0a:47:fe:58:ee:f6:c7:6b:eb:c6:0b:12:40:99
+# SHA256 Fingerprint: 9a:11:40:25:19:7c:5b:b9:5d:94:e6:3d:55:cd:43:79:08:47:b6:46:b2:3c:df:11:ad:a4:a0:0e:ff:15:fb:48
+-----BEGIN CERTIFICATE-----
+MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd
+MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg
+Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow
+TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw
+HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB
+BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr
+6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV
+L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91
+1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx
+MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ
+QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB
+arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr
+Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi
+FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS
+P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN
+9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP
+AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz
+uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h
+9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s
+A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t
+OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo
++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7
+KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2
+DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us
+H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ
+I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7
+5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h
+3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz
+Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Buypass Class 3 Root CA O=Buypass AS-983163327
+# Subject: CN=Buypass Class 3 Root CA O=Buypass AS-983163327
+# Label: "Buypass Class 3 Root CA"
+# Serial: 2
+# MD5 Fingerprint: 3d:3b:18:9e:2c:64:5a:e8:d5:88:ce:0e:f9:37:c2:ec
+# SHA1 Fingerprint: da:fa:f7:fa:66:84:ec:06:8f:14:50:bd:c7:c2:81:a5:bc:a9:64:57
+# SHA256 Fingerprint: ed:f7:eb:bc:a2:7a:2a:38:4d:38:7b:7d:40:10:c6:66:e2:ed:b4:84:3e:4c:29:b4:ae:1d:5b:93:32:e6:b2:4d
+-----BEGIN CERTIFICATE-----
+MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd
+MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg
+Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow
+TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw
+HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB
+BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y
+ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E
+N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9
+tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX
+0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c
+/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X
+KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY
+zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS
+O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D
+34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP
+K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3
+AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv
+Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj
+QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV
+cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS
+IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2
+HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa
+O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv
+033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u
+dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE
+kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41
+3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD
+u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq
+4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc=
+-----END CERTIFICATE-----
+
+# Issuer: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Subject: CN=T-TeleSec GlobalRoot Class 3 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Label: "T-TeleSec GlobalRoot Class 3"
+# Serial: 1
+# MD5 Fingerprint: ca:fb:40:a8:4e:39:92:8a:1d:fe:8e:2f:c4:27:ea:ef
+# SHA1 Fingerprint: 55:a6:72:3e:cb:f2:ec:cd:c3:23:74:70:19:9d:2a:be:11:e3:81:d1
+# SHA256 Fingerprint: fd:73:da:d3:1c:64:4f:f1:b4:3b:ef:0c:cd:da:96:71:0b:9c:d9:87:5e:ca:7e:31:70:7a:f3:e9:6d:52:2b:bd
+-----BEGIN CERTIFICATE-----
+MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx
+KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd
+BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl
+YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1
+OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy
+aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50
+ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN
+8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/
+RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4
+hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5
+ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM
+EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1
+A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy
+WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ
+1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30
+6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT
+91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml
+e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p
+TpPDpFQUWw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH
+# Subject: CN=D-TRUST Root Class 3 CA 2 2009 O=D-Trust GmbH
+# Label: "D-TRUST Root Class 3 CA 2 2009"
+# Serial: 623603
+# MD5 Fingerprint: cd:e0:25:69:8d:47:ac:9c:89:35:90:f7:fd:51:3d:2f
+# SHA1 Fingerprint: 58:e8:ab:b0:36:15:33:fb:80:f7:9b:1b:6d:29:d3:ff:8d:5f:00:f0
+# SHA256 Fingerprint: 49:e7:a4:42:ac:f0:ea:62:87:05:00:54:b5:25:64:b6:50:e4:f4:9e:42:e3:48:d6:aa:38:e0:39:e9:57:b1:c1
+-----BEGIN CERTIFICATE-----
+MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF
+MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD
+bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha
+ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM
+HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03
+UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42
+tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R
+ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM
+lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp
+/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G
+A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G
+A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj
+dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy
+MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl
+cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js
+L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL
+BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni
+acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0
+o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K
+zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8
+PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y
+Johw1+qRzT65ysCQblrGXnRl11z+o+I=
+-----END CERTIFICATE-----
+
+# Issuer: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH
+# Subject: CN=D-TRUST Root Class 3 CA 2 EV 2009 O=D-Trust GmbH
+# Label: "D-TRUST Root Class 3 CA 2 EV 2009"
+# Serial: 623604
+# MD5 Fingerprint: aa:c6:43:2c:5e:2d:cd:c4:34:c0:50:4f:11:02:4f:b6
+# SHA1 Fingerprint: 96:c9:1b:0b:95:b4:10:98:42:fa:d0:d8:22:79:fe:60:fa:b9:16:83
+# SHA256 Fingerprint: ee:c5:49:6b:98:8c:e9:86:25:b9:34:09:2e:ec:29:08:be:d0:b0:f3:16:c2:d4:73:0c:84:ea:f1:f3:d3:48:81
+-----BEGIN CERTIFICATE-----
+MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF
+MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD
+bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw
+NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV
+BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn
+ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0
+3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z
+qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR
+p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8
+HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw
+ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea
+HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw
+Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh
+c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E
+RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt
+dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku
+Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp
+3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05
+nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF
+CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na
+xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX
+KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1
+-----END CERTIFICATE-----
+
+# Issuer: CN=CA Disig Root R2 O=Disig a.s.
+# Subject: CN=CA Disig Root R2 O=Disig a.s.
+# Label: "CA Disig Root R2"
+# Serial: 10572350602393338211
+# MD5 Fingerprint: 26:01:fb:d8:27:a7:17:9a:45:54:38:1a:43:01:3b:03
+# SHA1 Fingerprint: b5:61:eb:ea:a4:de:e4:25:4b:69:1a:98:a5:57:47:c2:34:c7:d9:71
+# SHA256 Fingerprint: e2:3d:4a:03:6d:7b:70:e9:f5:95:b1:42:20:79:d2:b9:1e:df:bb:1f:b6:51:a0:63:3e:aa:8a:9d:c5:f8:07:03
+-----BEGIN CERTIFICATE-----
+MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV
+BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu
+MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy
+MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx
+EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw
+ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe
+NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH
+PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I
+x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe
+QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR
+yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO
+QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912
+H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ
+QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD
+i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs
+nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1
+rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud
+DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI
+hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM
+tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf
+GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb
+lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka
++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal
+TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i
+nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3
+gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr
+G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os
+zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x
+L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL
+-----END CERTIFICATE-----
+
+# Issuer: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV
+# Subject: CN=ACCVRAIZ1 O=ACCV OU=PKIACCV
+# Label: "ACCVRAIZ1"
+# Serial: 6828503384748696800
+# MD5 Fingerprint: d0:a0:5a:ee:05:b6:09:94:21:a1:7d:f1:b2:29:82:02
+# SHA1 Fingerprint: 93:05:7a:88:15:c6:4f:ce:88:2f:fa:91:16:52:28:78:bc:53:64:17
+# SHA256 Fingerprint: 9a:6e:c0:12:e1:a7:da:9d:be:34:19:4d:47:8a:d7:c0:db:18:22:fb:07:1d:f1:29:81:49:6e:d1:04:38:41:13
+-----BEGIN CERTIFICATE-----
+MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UE
+AwwJQUNDVlJBSVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQsw
+CQYDVQQGEwJFUzAeFw0xMTA1MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQ
+BgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwHUEtJQUNDVjENMAsGA1UECgwEQUND
+VjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCb
+qau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gMjmoY
+HtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWo
+G2ioPej0RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpA
+lHPrzg5XPAOBOp0KoVdDaaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhr
+IA8wKFSVf+DuzgpmndFALW4ir50awQUZ0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/
+0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDGWuzndN9wrqODJerWx5eH
+k6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs78yM2x/47
+4KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMO
+m3WR5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpa
+cXpkatcnYGMN285J9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPl
+uUsXQA+xtrn13k/c4LOsOxFwYIRKQ26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYI
+KwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRwOi8vd3d3LmFjY3YuZXMvZmls
+ZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEuY3J0MB8GCCsG
+AQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2
+VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeT
+VfZW6oHlNsyMHj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIG
+CCsGAQUFBwICMIIBFB6CARAAQQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUA
+cgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBhAO0AegAgAGQAZQAgAGwAYQAgAEEA
+QwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUAYwBuAG8AbABvAGcA
+7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBjAHQA
+cgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAA
+QwBQAFMAIABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUA
+czAwBggrBgEFBQcCARYkaHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2Mu
+aHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRt
+aW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2MV9kZXIuY3JsMA4GA1Ud
+DwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZIhvcNAQEF
+BQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdp
+D70ER9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gU
+JyCpZET/LtZ1qmxNYEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+m
+AM/EKXMRNt6GGT6d7hmKG9Ww7Y49nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepD
+vV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJTS+xJlsndQAJxGJ3KQhfnlms
+tn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3sCPdK6jT2iWH
+7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h
+I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szA
+h1xA2syVP1XgNce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xF
+d3+YJ5oyXSrjhO7FmGYvliAd3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2H
+pPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3pEfbRD0tVNEYqi4Y7
+-----END CERTIFICATE-----
+
+# Issuer: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA
+# Subject: CN=TWCA Global Root CA O=TAIWAN-CA OU=Root CA
+# Label: "TWCA Global Root CA"
+# Serial: 3262
+# MD5 Fingerprint: f9:03:7e:cf:e6:9e:3c:73:7a:2a:90:07:69:ff:2b:96
+# SHA1 Fingerprint: 9c:bb:48:53:f6:a4:f6:d3:52:a4:e8:32:52:55:60:13:f5:ad:af:65
+# SHA256 Fingerprint: 59:76:90:07:f7:68:5d:0f:cd:50:87:2f:9f:95:d5:75:5a:5b:2b:45:7d:81:f3:69:2b:61:0a:98:67:2f:0e:1b
+-----BEGIN CERTIFICATE-----
+MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx
+EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT
+VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5
+NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT
+B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF
+10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz
+0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh
+MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH
+zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc
+46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2
+yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi
+laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP
+oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA
+BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE
+qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm
+4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
+/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL
+1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn
+LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF
+H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo
+RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+
+nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh
+15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW
+6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW
+nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j
+wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz
+aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy
+KwbQBM0=
+-----END CERTIFICATE-----
+
+# Issuer: CN=TeliaSonera Root CA v1 O=TeliaSonera
+# Subject: CN=TeliaSonera Root CA v1 O=TeliaSonera
+# Label: "TeliaSonera Root CA v1"
+# Serial: 199041966741090107964904287217786801558
+# MD5 Fingerprint: 37:41:49:1b:18:56:9a:26:f5:ad:c2:66:fb:40:a5:4c
+# SHA1 Fingerprint: 43:13:bb:96:f1:d5:86:9b:c1:4e:6a:92:f6:cf:f6:34:69:87:82:37
+# SHA256 Fingerprint: dd:69:36:fe:21:f8:f0:77:c1:23:a1:a5:21:c1:22:24:f7:22:55:b7:3e:03:a7:26:06:93:e8:a2:4b:0f:a3:89
+-----BEGIN CERTIFICATE-----
+MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw
+NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv
+b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD
+VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F
+VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1
+7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X
+Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+
+/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs
+81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm
+dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe
+Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu
+sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4
+pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs
+slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ
+arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD
+VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG
+9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl
+dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx
+0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj
+TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed
+Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7
+Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI
+OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7
+vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW
+t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn
+HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx
+SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi
+# Subject: CN=E-Tugra Certification Authority O=E-Tu\u011fra EBG Bili\u015fim Teknolojileri ve Hizmetleri A.\u015e. OU=E-Tugra Sertifikasyon Merkezi
+# Label: "E-Tugra Certification Authority"
+# Serial: 7667447206703254355
+# MD5 Fingerprint: b8:a1:03:63:b0:bd:21:71:70:8a:6f:13:3a:bb:79:49
+# SHA1 Fingerprint: 51:c6:e7:08:49:06:6e:f3:92:d4:5c:a0:0d:6d:a3:62:8f:c3:52:39
+# SHA256 Fingerprint: b0:bf:d5:2b:b0:d7:d9:bd:92:bf:5d:4d:c1:3d:a2:55:c0:2c:54:2f:37:83:65:ea:89:39:11:f5:5e:55:f2:3c
+-----BEGIN CERTIFICATE-----
+MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV
+BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC
+aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV
+BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1
+Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz
+MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+
+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp
+em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN
+ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY
+B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH
+D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF
+Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo
+q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D
+k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH
+fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut
+dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM
+ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8
+zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn
+rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX
+U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6
+Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5
+XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF
+Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR
+HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY
+GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c
+77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3
++GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK
+vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6
+FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl
+yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P
+AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD
+y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d
+NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Subject: CN=T-TeleSec GlobalRoot Class 2 O=T-Systems Enterprise Services GmbH OU=T-Systems Trust Center
+# Label: "T-TeleSec GlobalRoot Class 2"
+# Serial: 1
+# MD5 Fingerprint: 2b:9b:9e:e4:7b:6c:1f:00:72:1a:cc:c1:77:79:df:6a
+# SHA1 Fingerprint: 59:0d:2d:7d:88:4f:40:2e:61:7e:a5:62:32:17:65:cf:17:d8:94:e9
+# SHA256 Fingerprint: 91:e2:f5:78:8d:58:10:eb:a7:ba:58:73:7d:e1:54:8a:8e:ca:cd:01:45:98:bc:0b:14:3e:04:1b:17:05:25:52
+-----BEGIN CERTIFICATE-----
+MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx
+KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd
+BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl
+YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1
+OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy
+aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50
+ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G
+CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd
+AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC
+FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi
+1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq
+jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ
+wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/
+WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy
+NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC
+uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw
+IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6
+g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN
+9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP
+BSeOE6Fuwg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Atos TrustedRoot 2011 O=Atos
+# Subject: CN=Atos TrustedRoot 2011 O=Atos
+# Label: "Atos TrustedRoot 2011"
+# Serial: 6643877497813316402
+# MD5 Fingerprint: ae:b9:c4:32:4b:ac:7f:5d:66:cc:77:94:bb:2a:77:56
+# SHA1 Fingerprint: 2b:b1:f5:3e:55:0c:1d:c5:f1:d4:e6:b7:6a:46:4b:55:06:02:ac:21
+# SHA256 Fingerprint: f3:56:be:a2:44:b7:a9:1e:b3:5d:53:ca:9a:d7:86:4a:ce:01:8e:2d:35:d5:f8:f9:6d:df:68:a6:f4:1a:a4:74
+-----BEGIN CERTIFICATE-----
+MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE
+AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG
+EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM
+FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC
+REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp
+Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM
+VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+
+SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ
+4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L
+cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi
+eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV
+HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG
+A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3
+DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j
+vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP
+DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc
+maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D
+lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv
+KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 1 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 1 G3"
+# Serial: 687049649626669250736271037606554624078720034195
+# MD5 Fingerprint: a4:bc:5b:3f:fe:37:9a:fa:64:f0:e2:fa:05:3d:0b:ab
+# SHA1 Fingerprint: 1b:8e:ea:57:96:29:1a:c9:39:ea:b8:0a:81:1a:73:73:c0:93:79:67
+# SHA256 Fingerprint: 8a:86:6f:d1:b2:76:b5:7e:57:8e:92:1c:65:82:8a:2b:ed:58:e9:f2:f2:88:05:41:34:b7:f1:f4:bf:c9:cc:74
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00
+MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV
+wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe
+rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341
+68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh
+4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp
+UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o
+abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc
+3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G
+KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt
+hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO
+Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt
+zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD
+ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC
+MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2
+cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN
+qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5
+YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv
+b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2
+8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k
+NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj
+ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp
+q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt
+nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 2 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 2 G3"
+# Serial: 390156079458959257446133169266079962026824725800
+# MD5 Fingerprint: af:0c:86:6e:bf:40:2d:7f:0b:3e:12:50:ba:12:3d:06
+# SHA1 Fingerprint: 09:3c:61:f3:8b:8b:dc:7d:55:df:75:38:02:05:00:e1:25:f5:c8:36
+# SHA256 Fingerprint: 8f:e4:fb:0a:f9:3a:4d:0d:67:db:0b:eb:b2:3e:37:c7:1b:f3:25:dc:bc:dd:24:0e:a0:4d:af:58:b4:7e:18:40
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00
+MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf
+qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW
+n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym
+c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+
+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1
+o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j
+IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq
+IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz
+8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh
+vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l
+7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG
+cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD
+ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66
+AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC
+roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga
+W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n
+lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE
++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV
+csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd
+dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg
+KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM
+HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4
+WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M
+-----END CERTIFICATE-----
+
+# Issuer: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited
+# Subject: CN=QuoVadis Root CA 3 G3 O=QuoVadis Limited
+# Label: "QuoVadis Root CA 3 G3"
+# Serial: 268090761170461462463995952157327242137089239581
+# MD5 Fingerprint: df:7d:b9:ad:54:6f:68:a1:df:89:57:03:97:43:b0:d7
+# SHA1 Fingerprint: 48:12:bd:92:3c:a8:c4:39:06:e7:30:6d:27:96:e6:a4:cf:22:2e:7d
+# SHA256 Fingerprint: 88:ef:81:de:20:2e:b0:18:45:2e:43:f8:64:72:5c:ea:5f:bd:1f:c2:d9:d2:05:73:07:09:c5:d8:b8:69:0f:46
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL
+BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc
+BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00
+MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM
+aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR
+/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu
+FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR
+U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c
+ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR
+FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k
+A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw
+eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl
+sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp
+VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q
+A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+
+ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD
+ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px
+KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI
+FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv
+oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg
+u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP
+0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf
+3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl
+8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+
+DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN
+PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/
+ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root G2 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root G2"
+# Serial: 15385348160840213938643033620894905419
+# MD5 Fingerprint: 92:38:b9:f8:63:24:82:65:2c:57:33:e6:fe:81:8f:9d
+# SHA1 Fingerprint: a1:4b:48:d9:43:ee:0a:0e:40:90:4f:3c:e0:a4:c0:91:93:51:5d:3f
+# SHA256 Fingerprint: 7d:05:eb:b6:82:33:9f:8c:94:51:ee:09:4e:eb:fe:fa:79:53:a1:14:ed:b2:f4:49:49:45:2f:ab:7d:2f:c1:85
+-----BEGIN CERTIFICATE-----
+MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv
+b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG
+EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl
+cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA
+n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc
+biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp
+EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA
+bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu
+YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB
+AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW
+BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI
+QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I
+0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni
+lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9
+B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv
+ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo
+IhNzbM8m9Yop5w==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Assured ID Root G3 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Assured ID Root G3"
+# Serial: 15459312981008553731928384953135426796
+# MD5 Fingerprint: 7c:7f:65:31:0c:81:df:8d:ba:3e:99:e2:5c:ad:6e:fb
+# SHA1 Fingerprint: f5:17:a2:4f:9a:48:c6:c9:f8:a2:00:26:9f:dc:0f:48:2c:ab:30:89
+# SHA256 Fingerprint: 7e:37:cb:8b:4c:47:09:0c:ab:36:55:1b:a6:f4:5d:b8:40:68:0f:ba:16:6a:95:2d:b1:00:71:7f:43:05:3f:c2
+-----BEGIN CERTIFICATE-----
+MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw
+CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
+ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg
+RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV
+UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
+Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq
+hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf
+Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q
+RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/
+BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD
+AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY
+JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv
+6pZjamVFkpUBtA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root G2 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root G2"
+# Serial: 4293743540046975378534879503202253541
+# MD5 Fingerprint: e4:a6:8a:c8:54:ac:52:42:46:0a:fd:72:48:1b:2a:44
+# SHA1 Fingerprint: df:3c:24:f9:bf:d6:66:76:1b:26:80:73:fe:06:d1:cc:8d:4f:82:a4
+# SHA256 Fingerprint: cb:3c:cb:b7:60:31:e5:e0:13:8f:8d:d3:9a:23:f9:de:47:ff:c3:5e:43:c1:14:4c:ea:27:d4:6a:5a:b1:cb:5f
+-----BEGIN CERTIFICATE-----
+MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH
+MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT
+MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j
+b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG
+9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI
+2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx
+1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ
+q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz
+tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ
+vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP
+BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV
+5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY
+1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4
+NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG
+Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91
+8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe
+pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl
+MrY=
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Global Root G3 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Global Root G3"
+# Serial: 7089244469030293291760083333884364146
+# MD5 Fingerprint: f5:5d:a4:50:a5:fb:28:7e:1e:0f:0d:cc:96:57:56:ca
+# SHA1 Fingerprint: 7e:04:de:89:6a:3e:66:6d:00:e6:87:d3:3f:fa:d9:3b:e8:3d:34:9e
+# SHA256 Fingerprint: 31:ad:66:48:f8:10:41:38:c7:38:f3:9e:a4:32:01:33:39:3e:3a:18:cc:02:29:6e:f9:7c:2a:c9:ef:67:31:d0
+-----BEGIN CERTIFICATE-----
+MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw
+CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu
+ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe
+Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw
+EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x
+IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF
+K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG
+fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO
+Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd
+BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx
+AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/
+oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8
+sycX
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com
+# Subject: CN=DigiCert Trusted Root G4 O=DigiCert Inc OU=www.digicert.com
+# Label: "DigiCert Trusted Root G4"
+# Serial: 7451500558977370777930084869016614236
+# MD5 Fingerprint: 78:f2:fc:aa:60:1f:2f:b4:eb:c9:37:ba:53:2e:75:49
+# SHA1 Fingerprint: dd:fb:16:cd:49:31:c9:73:a2:03:7d:3f:c8:3a:4d:7d:77:5d:05:e4
+# SHA256 Fingerprint: 55:2f:7b:dc:f1:a7:af:9e:6c:e6:72:01:7f:4f:12:ab:f7:72:40:c7:8e:76:1a:c2:03:d1:d9:d2:0a:c8:99:88
+-----BEGIN CERTIFICATE-----
+MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi
+MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3
+d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg
+RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV
+UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu
+Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y
+ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If
+xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV
+ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO
+DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ
+jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/
+CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi
+EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM
+fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY
+uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK
+chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t
+9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB
+hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD
+ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2
+SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd
++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc
+fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa
+sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N
+cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N
+0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie
+4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI
+r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1
+/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm
+gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+
+-----END CERTIFICATE-----
+
+# Issuer: CN=COMODO RSA Certification Authority O=COMODO CA Limited
+# Subject: CN=COMODO RSA Certification Authority O=COMODO CA Limited
+# Label: "COMODO RSA Certification Authority"
+# Serial: 101909084537582093308941363524873193117
+# MD5 Fingerprint: 1b:31:b0:71:40:36:cc:14:36:91:ad:c4:3e:fd:ec:18
+# SHA1 Fingerprint: af:e5:d2:44:a8:d1:19:42:30:ff:47:9f:e2:f8:97:bb:cd:7a:8c:b4
+# SHA256 Fingerprint: 52:f0:e1:c4:e5:8e:c6:29:29:1b:60:31:7f:07:46:71:b8:5d:7e:a8:0d:5b:07:27:34:63:53:4b:32:b4:02:34
+-----BEGIN CERTIFICATE-----
+MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB
+hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G
+A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV
+BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5
+MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT
+EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR
+Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh
+dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR
+6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X
+pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC
+9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV
+/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf
+Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z
++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w
+qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah
+SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC
+u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf
+Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq
+crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E
+FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB
+/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl
+wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM
+4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV
+2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna
+FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ
+CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK
+boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke
+jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL
+S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb
+QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl
+0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB
+NVOFBkpdn627G190
+-----END CERTIFICATE-----
+
+# Issuer: CN=USERTrust RSA Certification Authority O=The USERTRUST Network
+# Subject: CN=USERTrust RSA Certification Authority O=The USERTRUST Network
+# Label: "USERTrust RSA Certification Authority"
+# Serial: 2645093764781058787591871645665788717
+# MD5 Fingerprint: 1b:fe:69:d1:91:b7:19:33:a3:72:a8:0f:e1:55:e5:b5
+# SHA1 Fingerprint: 2b:8f:1b:57:33:0d:bb:a2:d0:7a:6c:51:f7:0e:e9:0d:da:b9:ad:8e
+# SHA256 Fingerprint: e7:93:c9:b0:2f:d8:aa:13:e2:1c:31:22:8a:cc:b0:81:19:64:3b:74:9c:89:89:64:b1:74:6d:46:c3:d4:cb:d2
+-----BEGIN CERTIFICATE-----
+MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB
+iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl
+cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV
+BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw
+MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV
+BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU
+aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy
+dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B
+3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY
+tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/
+Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2
+VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT
+79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6
+c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT
+Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l
+c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee
+UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE
+Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd
+BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G
+A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF
+Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO
+VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3
+ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs
+8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR
+iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze
+Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ
+XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/
+qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB
+VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB
+L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG
+jjxDah2nGN59PRbxYvnKkKj9
+-----END CERTIFICATE-----
+
+# Issuer: CN=USERTrust ECC Certification Authority O=The USERTRUST Network
+# Subject: CN=USERTrust ECC Certification Authority O=The USERTRUST Network
+# Label: "USERTrust ECC Certification Authority"
+# Serial: 123013823720199481456569720443997572134
+# MD5 Fingerprint: fa:68:bc:d9:b5:7f:ad:fd:c9:1d:06:83:28:cc:24:c1
+# SHA1 Fingerprint: d1:cb:ca:5d:b2:d5:2a:7f:69:3b:67:4d:e5:f0:5a:1d:0c:95:7d:f0
+# SHA256 Fingerprint: 4f:f4:60:d5:4b:9c:86:da:bf:bc:fc:57:12:e0:40:0d:2b:ed:3f:bc:4d:4f:bd:aa:86:e0:6a:dc:d2:a9:ad:7a
+-----BEGIN CERTIFICATE-----
+MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL
+MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl
+eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT
+JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx
+MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT
+Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg
+VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm
+aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo
+I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng
+o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G
+A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB
+zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW
+RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R5
+# Label: "GlobalSign ECC Root CA - R5"
+# Serial: 32785792099990507226680698011560947931244
+# MD5 Fingerprint: 9f:ad:3b:1c:02:1e:8a:ba:17:74:38:81:0c:a2:bc:08
+# SHA1 Fingerprint: 1f:24:c6:30:cd:a4:18:ef:20:69:ff:ad:4f:dd:5f:46:3a:1b:69:aa
+# SHA256 Fingerprint: 17:9f:bc:14:8a:3d:d0:0f:d2:4e:a1:34:58:cc:43:bf:a7:f5:9c:81:82:d7:83:a5:13:f6:eb:ec:10:0c:89:24
+-----BEGIN CERTIFICATE-----
+MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk
+MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH
+bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX
+DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD
+QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu
+MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc
+8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke
+hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD
+VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI
+KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg
+515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO
+xwy8p2Fp8fc74SrL+SvzZpA3
+-----END CERTIFICATE-----
+
+# Issuer: CN=IdenTrust Commercial Root CA 1 O=IdenTrust
+# Subject: CN=IdenTrust Commercial Root CA 1 O=IdenTrust
+# Label: "IdenTrust Commercial Root CA 1"
+# Serial: 13298821034946342390520003877796839426
+# MD5 Fingerprint: b3:3e:77:73:75:ee:a0:d3:e3:7e:49:63:49:59:bb:c7
+# SHA1 Fingerprint: df:71:7e:aa:4a:d9:4e:c9:55:84:99:60:2d:48:de:5f:bc:f0:3a:25
+# SHA256 Fingerprint: 5d:56:49:9b:e4:d2:e0:8b:cf:ca:d0:8a:3e:38:72:3d:50:50:3b:de:70:69:48:e4:2f:55:60:30:19:e5:28:ae
+-----BEGIN CERTIFICATE-----
+MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK
+MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu
+VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw
+MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw
+JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT
+3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU
++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp
+S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1
+bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi
+T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL
+vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK
+Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK
+dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT
+c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv
+l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N
+iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB
+/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD
+ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH
+6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt
+LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93
+nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3
++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK
+W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT
+AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq
+l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG
+4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ
+mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A
+7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H
+-----END CERTIFICATE-----
+
+# Issuer: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust
+# Subject: CN=IdenTrust Public Sector Root CA 1 O=IdenTrust
+# Label: "IdenTrust Public Sector Root CA 1"
+# Serial: 13298821034946342390521976156843933698
+# MD5 Fingerprint: 37:06:a5:b0:fc:89:9d:ba:f4:6b:8c:1a:64:cd:d5:ba
+# SHA1 Fingerprint: ba:29:41:60:77:98:3f:f4:f3:ef:f2:31:05:3b:2e:ea:6d:4d:45:fd
+# SHA256 Fingerprint: 30:d0:89:5a:9a:44:8a:26:20:91:63:55:22:d1:f5:20:10:b5:86:7a:ca:e1:2c:78:ef:95:8f:d4:f4:38:9f:2f
+-----BEGIN CERTIFICATE-----
+MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN
+MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu
+VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN
+MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0
+MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7
+ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy
+RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS
+bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF
+/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R
+3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw
+EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy
+9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V
+GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ
+2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV
+WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD
+W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/
+BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN
+AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj
+t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV
+DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9
+TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G
+lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW
+mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df
+WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5
++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ
+tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA
+GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv
+8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only
+# Subject: CN=Entrust Root Certification Authority - G2 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2009 Entrust, Inc. - for authorized use only
+# Label: "Entrust Root Certification Authority - G2"
+# Serial: 1246989352
+# MD5 Fingerprint: 4b:e2:c9:91:96:65:0c:f4:0e:5a:93:92:a0:0a:fe:b2
+# SHA1 Fingerprint: 8c:f4:27:fd:79:0c:3a:d1:66:06:8d:e8:1e:57:ef:bb:93:22:72:d4
+# SHA256 Fingerprint: 43:df:57:74:b0:3e:7f:ef:5f:e4:0d:93:1a:7b:ed:f1:bb:2e:6b:42:73:8c:4e:6d:38:41:10:3d:3a:a7:f3:39
+-----BEGIN CERTIFICATE-----
+MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC
+VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50
+cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs
+IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz
+dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy
+NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu
+dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt
+dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0
+aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj
+YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T
+RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN
+cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW
+wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1
+U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0
+jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP
+BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN
+BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/
+jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ
+Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v
+1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R
+nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH
+VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only
+# Subject: CN=Entrust Root Certification Authority - EC1 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2012 Entrust, Inc. - for authorized use only
+# Label: "Entrust Root Certification Authority - EC1"
+# Serial: 51543124481930649114116133369
+# MD5 Fingerprint: b6:7e:1d:f0:58:c5:49:6c:24:3b:3d:ed:98:18:ed:bc
+# SHA1 Fingerprint: 20:d8:06:40:df:9b:25:f5:12:25:3a:11:ea:f7:59:8a:eb:14:b5:47
+# SHA256 Fingerprint: 02:ed:0e:b2:8c:14:da:45:16:5c:56:67:91:70:0d:64:51:d7:fb:56:f0:b2:ab:1d:3b:8e:b0:70:e5:6e:df:f5
+-----BEGIN CERTIFICATE-----
+MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG
+A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3
+d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu
+dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq
+RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy
+MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD
+VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0
+L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g
+Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD
+ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi
+A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt
+ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH
+Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O
+BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC
+R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX
+hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G
+-----END CERTIFICATE-----
+
+# Issuer: CN=CFCA EV ROOT O=China Financial Certification Authority
+# Subject: CN=CFCA EV ROOT O=China Financial Certification Authority
+# Label: "CFCA EV ROOT"
+# Serial: 407555286
+# MD5 Fingerprint: 74:e1:b6:ed:26:7a:7a:44:30:33:94:ab:7b:27:81:30
+# SHA1 Fingerprint: e2:b8:29:4b:55:84:ab:6b:58:c2:90:46:6c:ac:3f:b8:39:8f:84:83
+# SHA256 Fingerprint: 5c:c3:d7:8e:4e:1d:5e:45:54:7a:04:e6:87:3e:64:f9:0c:f9:53:6d:1c:cc:2e:f8:00:f3:55:c4:c5:fd:70:fd
+-----BEGIN CERTIFICATE-----
+MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD
+TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y
+aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx
+MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j
+aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP
+T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03
+sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL
+TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5
+/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp
+7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz
+EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt
+hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP
+a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot
+aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg
+TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV
+PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv
+cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL
+tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd
+BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB
+ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT
+ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL
+jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS
+ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy
+P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19
+xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d
+Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN
+5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe
+/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z
+AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ
+5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su
+-----END CERTIFICATE-----
+
+# Issuer: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
+# Subject: CN=OISTE WISeKey Global Root GB CA O=WISeKey OU=OISTE Foundation Endorsed
+# Label: "OISTE WISeKey Global Root GB CA"
+# Serial: 157768595616588414422159278966750757568
+# MD5 Fingerprint: a4:eb:b9:61:28:2e:b7:2f:98:b0:35:26:90:99:51:1d
+# SHA1 Fingerprint: 0f:f9:40:76:18:d3:d7:6a:4b:98:f0:a8:35:9e:0c:fd:27:ac:cc:ed
+# SHA256 Fingerprint: 6b:9c:08:e8:6e:b0:f7:67:cf:ad:65:cd:98:b6:21:49:e5:49:4a:67:f5:84:5e:7b:d1:ed:01:9f:27:b8:6b:d6
+-----BEGIN CERTIFICATE-----
+MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt
+MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg
+Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i
+YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x
+CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG
+b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh
+bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3
+HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx
+WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX
+1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk
+u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P
+99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r
+M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw
+AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB
+BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh
+cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5
+gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO
+ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf
+aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic
+Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM=
+-----END CERTIFICATE-----
+
+# Issuer: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A.
+# Subject: CN=SZAFIR ROOT CA2 O=Krajowa Izba Rozliczeniowa S.A.
+# Label: "SZAFIR ROOT CA2"
+# Serial: 357043034767186914217277344587386743377558296292
+# MD5 Fingerprint: 11:64:c1:89:b0:24:b1:8c:b1:07:7e:89:9e:51:9e:99
+# SHA1 Fingerprint: e2:52:fa:95:3f:ed:db:24:60:bd:6e:28:f3:9c:cc:cf:5e:b3:3f:de
+# SHA256 Fingerprint: a1:33:9d:33:28:1a:0b:56:e5:57:d3:d3:2b:1c:e7:f9:36:7e:b0:94:bd:5f:a7:2a:7e:50:04:c8:de:d7:ca:fe
+-----BEGIN CERTIFICATE-----
+MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQEL
+BQAwUTELMAkGA1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6
+ZW5pb3dhIFMuQS4xGDAWBgNVBAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkw
+NzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L
+cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYDVQQDDA9TWkFGSVIg
+Uk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5QqEvN
+QLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT
+3PSQ1hNKDJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw
+3gAeqDRHu5rr/gsUvTaE2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr6
+3fE9biCloBK0TXC5ztdyO4mTp4CEHCdJckm1/zuVnsHMyAHs6A6KCpbns6aH5db5
+BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwiieDhZNRnvDF5YTy7ykHN
+XGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD
+AgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsF
+AAOCAQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw
+8PRBEew/R40/cof5O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOG
+nXkZ7/e7DDWQw4rtTw/1zBLZpD67oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCP
+oky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul4+vJhaAlIDf7js4MNIThPIGy
+d05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6+/NNIxuZMzSg
+LvWpCz/UXeHPhJ/iGcJfitYgHuNztw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Subject: CN=Certum Trusted Network CA 2 O=Unizeto Technologies S.A. OU=Certum Certification Authority
+# Label: "Certum Trusted Network CA 2"
+# Serial: 44979900017204383099463764357512596969
+# MD5 Fingerprint: 6d:46:9e:d9:25:6d:08:23:5b:5e:74:7d:1e:27:db:f2
+# SHA1 Fingerprint: d3:dd:48:3e:2b:bf:4c:05:e8:af:10:f5:fa:76:26:cf:d3:dc:30:92
+# SHA256 Fingerprint: b6:76:f2:ed:da:e8:77:5c:d3:6c:b0:f6:3c:d1:d4:60:39:61:f4:9e:62:65:ba:01:3a:2f:03:07:b6:d0:b8:04
+-----BEGIN CERTIFICATE-----
+MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB
+gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu
+QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG
+A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz
+OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ
+VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp
+ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3
+b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA
+DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn
+0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB
+OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE
+fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E
+Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m
+o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i
+sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW
+OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez
+Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS
+adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n
+3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD
+AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC
+AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ
+F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf
+CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29
+XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm
+djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/
+WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb
+AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq
+P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko
+b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj
+XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P
+5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi
+DrW5viSP
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Subject: CN=Hellenic Academic and Research Institutions RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Label: "Hellenic Academic and Research Institutions RootCA 2015"
+# Serial: 0
+# MD5 Fingerprint: ca:ff:e2:db:03:d9:cb:4b:e9:0f:ad:84:fd:7b:18:ce
+# SHA1 Fingerprint: 01:0c:06:95:a6:98:19:14:ff:bf:5f:c6:b0:b6:95:ea:29:e9:12:a6
+# SHA256 Fingerprint: a0:40:92:9a:02:ce:53:b4:ac:f4:f2:ff:c6:98:1c:e4:49:6f:75:5e:6d:45:fe:0b:2a:69:2b:cd:52:52:3f:36
+-----BEGIN CERTIFICATE-----
+MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1Ix
+DzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5k
+IFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMT
+N0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9v
+dENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAxMTIxWjCBpjELMAkG
+A1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNh
+ZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkx
+QDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1
+dGlvbnMgUm9vdENBIDIwMTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC
+AQDC+Kk/G4n8PDwEXT2QNrCROnk8ZlrvbTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA
+4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+ehiGsxr/CL0BgzuNtFajT0
+AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+6PAQZe10
+4S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06C
+ojXdFPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV
+9Cz82XBST3i4vTwri5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrD
+gfgXy5I2XdGj2HUb4Ysn6npIQf1FGQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6
+Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2fu/Z8VFRfS0myGlZYeCsargq
+NhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9muiNX6hME6wGko
+LfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc
+Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNV
+HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVd
+ctA4GGqd83EkVAswDQYJKoZIhvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0I
+XtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+D1hYc2Ryx+hFjtyp8iY/xnmMsVMI
+M4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrMd/K4kPFox/la/vot
+9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+yd+2V
+Z5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/ea
+j8GsGsVn82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnh
+X9izjFk0WaSrT2y7HxjbdavYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQ
+l033DlZdwJVqwjbDG2jJ9SrcR5q+ss7FJej6A7na+RZukYT1HCjI/CbM1xyQVqdf
+bzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVtJ94Cj8rDtSvK6evIIVM4
+pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGaJI7ZjnHK
+e7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0
+vm9qp/UsQu0yrbYhnr68
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Subject: CN=Hellenic Academic and Research Institutions ECC RootCA 2015 O=Hellenic Academic and Research Institutions Cert. Authority
+# Label: "Hellenic Academic and Research Institutions ECC RootCA 2015"
+# Serial: 0
+# MD5 Fingerprint: 81:e5:b4:17:eb:c2:f5:e1:4b:0d:41:7b:49:92:fe:ef
+# SHA1 Fingerprint: 9f:f1:71:8d:92:d5:9a:f3:7d:74:97:b4:bc:6f:84:68:0b:ba:b6:66
+# SHA256 Fingerprint: 44:b5:45:aa:8a:25:e6:5a:73:ca:15:dc:27:fc:36:d2:4c:1c:b9:95:3a:06:65:39:b1:15:82:dc:48:7b:48:33
+-----BEGIN CERTIFICATE-----
+MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzAN
+BgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl
+c2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hl
+bGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgRUNDIFJv
+b3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEwMzcxMlowgaoxCzAJ
+BgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmljIEFj
+YWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5
+MUQwQgYDVQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0
+dXRpb25zIEVDQyBSb290Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKg
+QehLgoRc4vgxEZmGZE4JJS+dQS8KrjVPdJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJa
+jq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoKVlp8aQuqgAkkbH7BRqNC
+MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFLQi
+C4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaep
+lSTAGiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7Sof
+TUwJCA3sS61kFyjndc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR
+-----END CERTIFICATE-----
+
+# Issuer: CN=ISRG Root X1 O=Internet Security Research Group
+# Subject: CN=ISRG Root X1 O=Internet Security Research Group
+# Label: "ISRG Root X1"
+# Serial: 172886928669790476064670243504169061120
+# MD5 Fingerprint: 0c:d2:f9:e0:da:17:73:e9:ed:86:4d:a5:e3:70:e7:4e
+# SHA1 Fingerprint: ca:bd:2a:79:a1:07:6a:31:f2:1d:25:36:35:cb:03:9d:43:29:a5:e8
+# SHA256 Fingerprint: 96:bc:ec:06:26:49:76:f3:74:60:77:9a:cf:28:c5:a7:cf:e8:a3:c0:aa:e1:1a:8f:fc:ee:05:c0:bd:df:08:c6
+-----BEGIN CERTIFICATE-----
+MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
+TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
+cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
+WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
+ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
+MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
+h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
+0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
+A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
+T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
+B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
+B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
+KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
+OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
+jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
+qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
+rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
+HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
+hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
+ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
+3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
+NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
+ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
+TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
+jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
+oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
+4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
+mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
+emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
+-----END CERTIFICATE-----
+
+# Issuer: O=FNMT-RCM OU=AC RAIZ FNMT-RCM
+# Subject: O=FNMT-RCM OU=AC RAIZ FNMT-RCM
+# Label: "AC RAIZ FNMT-RCM"
+# Serial: 485876308206448804701554682760554759
+# MD5 Fingerprint: e2:09:04:b4:d3:bd:d1:a0:14:fd:1a:d2:47:c4:57:1d
+# SHA1 Fingerprint: ec:50:35:07:b2:15:c4:95:62:19:e2:a8:9a:5b:42:99:2c:4c:2c:20
+# SHA256 Fingerprint: eb:c5:57:0c:29:01:8c:4d:67:b1:aa:12:7b:af:12:f7:03:b4:61:1e:bc:17:b7:da:b5:57:38:94:17:9b:93:fa
+-----BEGIN CERTIFICATE-----
+MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsx
+CzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJ
+WiBGTk1ULVJDTTAeFw0wODEwMjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJ
+BgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBG
+Tk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALpxgHpMhm5/
+yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcfqQgf
+BBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAz
+WHFctPVrbtQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxF
+tBDXaEAUwED653cXeuYLj2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z
+374jNUUeAlz+taibmSXaXvMiwzn15Cou08YfxGyqxRxqAQVKL9LFwag0Jl1mpdIC
+IfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mwWsXmo8RZZUc1g16p6DUL
+mbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnTtOmlcYF7
+wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peS
+MKGJ47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2
+ZSysV4999AeU14ECll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMet
+UqIJ5G+GR4of6ygnXYMgrwTJbFaai0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUw
+AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFPd9xf3E6Jobd2Sn9R2gzL+H
+YJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1odHRwOi8vd3d3
+LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD
+nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1
+RXxlDPiyN8+sD8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYM
+LVN0V2Ue1bLdI4E7pWYjJ2cJj+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf
+77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrTQfv6MooqtyuGC2mDOL7Nii4LcK2N
+JpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW+YJF1DngoABd15jm
+fZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7Ixjp
+6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp
+1txyM/1d8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B
+9kiABdcPUXmsEKvU7ANm5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wok
+RqEIr9baRRmW1FMdW4R58MD3R++Lj8UGrp1MYp3/RgT408m2ECVAdf4WqslKYIYv
+uu8wd+RU4riEmViAqhOLUTpPSPaLtrM=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 1 O=Amazon
+# Subject: CN=Amazon Root CA 1 O=Amazon
+# Label: "Amazon Root CA 1"
+# Serial: 143266978916655856878034712317230054538369994
+# MD5 Fingerprint: 43:c6:bf:ae:ec:fe:ad:2f:18:c6:88:68:30:fc:c8:e6
+# SHA1 Fingerprint: 8d:a7:f9:65:ec:5e:fc:37:91:0f:1c:6e:59:fd:c1:cc:6a:6e:de:16
+# SHA256 Fingerprint: 8e:cd:e6:88:4f:3d:87:b1:12:5b:a3:1a:c3:fc:b1:3d:70:16:de:7f:57:cc:90:4f:e1:cb:97:c6:ae:98:19:6e
+-----BEGIN CERTIFICATE-----
+MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsF
+ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
+b24gUm9vdCBDQSAxMB4XDTE1MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTEL
+MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
+b3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALJ4gHHKeNXj
+ca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgHFzZM
+9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qw
+IFAGbHrQgLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6
+VOujw5H5SNz/0egwLX0tdHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L
+93FcXmn/6pUCyziKrlA4b9v7LWIbxcceVOF34GfID5yHI9Y/QCB/IIDEgEw+OyQm
+jgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3DQEBCwUA
+A4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDI
+U5PMCCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUs
+N+gDS63pYaACbvXy8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vv
+o/ufQJVtMVT8QtPHRh8jrdkPSHCa2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU
+5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2xJNDd2ZhwLnoQdeXeGADbkpy
+rqXRfboQnoZsG4q5WTP468SQvvG5
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 2 O=Amazon
+# Subject: CN=Amazon Root CA 2 O=Amazon
+# Label: "Amazon Root CA 2"
+# Serial: 143266982885963551818349160658925006970653239
+# MD5 Fingerprint: c8:e5:8d:ce:a8:42:e2:7a:c0:2a:5c:7c:9e:26:bf:66
+# SHA1 Fingerprint: 5a:8c:ef:45:d7:a6:98:59:76:7a:8c:8b:44:96:b5:78:cf:47:4b:1a
+# SHA256 Fingerprint: 1b:a5:b2:aa:8c:65:40:1a:82:96:01:18:f8:0b:ec:4f:62:30:4d:83:ce:c4:71:3a:19:c3:9c:01:1e:a4:6d:b4
+-----BEGIN CERTIFICATE-----
+MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwF
+ADA5MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6
+b24gUm9vdCBDQSAyMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTEL
+MAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJv
+b3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK2Wny2cSkxK
+gXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4kHbZ
+W0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg
+1dKmSYXpN+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K
+8nu+NQWpEjTj82R0Yiw9AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r
+2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvdfLC6HM783k81ds8P+HgfajZRRidhW+me
+z/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAExkv8LV/SasrlX6avvDXbR
+8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSSbtqDT6Zj
+mUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz
+7Mt0Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6
++XUyo05f7O0oYtlNc/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI
+0u1ufm8/0i2BWSlmy5A5lREedCf+3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMB
+Af8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSwDPBMMPQFWAJI/TPlUq9LhONm
+UjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oAA7CXDpO8Wqj2
+LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY
++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kS
+k5Nrp+gvU5LEYFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl
+7uxMMne0nxrpS10gxdr9HIcWxkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygm
+btmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQgj9sAq+uEjonljYE1x2igGOpm/Hl
+urR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbWaQbLU8uz/mtBzUF+
+fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoVYh63
+n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE
+76KlXIx3KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H
+9jVlpNMKVv/1F2Rs76giJUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT
+4PsJYGw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 3 O=Amazon
+# Subject: CN=Amazon Root CA 3 O=Amazon
+# Label: "Amazon Root CA 3"
+# Serial: 143266986699090766294700635381230934788665930
+# MD5 Fingerprint: a0:d4:ef:0b:f7:b5:d8:49:95:2a:ec:f5:c4:fc:81:87
+# SHA1 Fingerprint: 0d:44:dd:8c:3c:8c:1a:1a:58:75:64:81:e9:0f:2e:2a:ff:b3:d2:6e
+# SHA256 Fingerprint: 18:ce:6c:fe:7b:f1:4e:60:b2:e3:47:b8:df:e8:68:cb:31:d0:2e:bb:3a:da:27:15:69:f5:03:43:b4:6d:b3:a4
+-----BEGIN CERTIFICATE-----
+MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5
+MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g
+Um9vdCBDQSAzMB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG
+A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg
+Q0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZBf8ANm+gBG1bG8lKl
+ui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjrZt6j
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSr
+ttvXBp43rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkr
+BqWTrBqYaGFy+uGh0PsceGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteM
+YyRIHN8wfdVoOw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Amazon Root CA 4 O=Amazon
+# Subject: CN=Amazon Root CA 4 O=Amazon
+# Label: "Amazon Root CA 4"
+# Serial: 143266989758080763974105200630763877849284878
+# MD5 Fingerprint: 89:bc:27:d5:eb:17:8d:06:6a:69:d5:fd:89:47:b4:cd
+# SHA1 Fingerprint: f6:10:84:07:d6:f8:bb:67:98:0c:c2:e2:44:c2:eb:ae:1c:ef:63:be
+# SHA256 Fingerprint: e3:5d:28:41:9e:d0:20:25:cf:a6:90:38:cd:62:39:62:45:8d:a5:c6:95:fb:de:a3:c2:2b:0b:fb:25:89:70:92
+-----BEGIN CERTIFICATE-----
+MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5
+MQswCQYDVQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24g
+Um9vdCBDQSA0MB4XDTE1MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkG
+A1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZMBcGA1UEAxMQQW1hem9uIFJvb3Qg
+Q0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN/sGKe0uoe0ZLY7Bi
+9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri83Bk
+M6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB
+/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WB
+MAoGCCqGSM49BAMDA2gAMGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlw
+CkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1AE47xDqUEpHJWEadIRNyp4iciuRMStuW
+1KyLa2tJElMzrdfkviT8tQp21KW8EA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM
+# Subject: CN=TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 O=Turkiye Bilimsel ve Teknolojik Arastirma Kurumu - TUBITAK OU=Kamu Sertifikasyon Merkezi - Kamu SM
+# Label: "TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1"
+# Serial: 1
+# MD5 Fingerprint: dc:00:81:dc:69:2f:3e:2f:b0:3b:f6:3d:5a:91:8e:49
+# SHA1 Fingerprint: 31:43:64:9b:ec:ce:27:ec:ed:3a:3f:0b:8f:0d:e4:e8:91:dd:ee:ca
+# SHA256 Fingerprint: 46:ed:c3:68:90:46:d5:3a:45:3f:b3:10:4a:b8:0d:ca:ec:65:8b:26:60:ea:16:29:dd:7e:86:79:90:64:87:16
+-----BEGIN CERTIFICATE-----
+MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIx
+GDAWBgNVBAcTD0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxp
+bXNlbCB2ZSBUZWtub2xvamlrIEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0w
+KwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24gTWVya2V6aSAtIEthbXUgU00xNjA0
+BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRpZmlrYXNpIC0gU3Vy
+dW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYDVQQG
+EwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXll
+IEJpbGltc2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklU
+QUsxLTArBgNVBAsTJEthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBT
+TTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11IFNNIFNTTCBLb2sgU2VydGlmaWthc2kg
+LSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr3UwM6q7
+a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y86Ij5iySr
+LqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INr
+N3wcwv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2X
+YacQuFWQfw4tJzh03+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/
+iSIzL+aFCr2lqBs23tPcLG07xxO9WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4f
+AJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQUZT/HiobGPN08VFw1+DrtUgxH
+V8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL
+BQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh
+AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPf
+IPP54+M638yclNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4
+lzwDGrpDxpa5RXI4s6ehlj2Re37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c
+8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0jq5Rm+K37DwhuJi1/FwcJsoz7UMCf
+lo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD.
+# Subject: CN=GDCA TrustAUTH R5 ROOT O=GUANG DONG CERTIFICATE AUTHORITY CO.,LTD.
+# Label: "GDCA TrustAUTH R5 ROOT"
+# Serial: 9009899650740120186
+# MD5 Fingerprint: 63:cc:d9:3d:34:35:5c:6f:53:a3:e2:08:70:48:1f:b4
+# SHA1 Fingerprint: 0f:36:38:5b:81:1a:25:c3:9b:31:4e:83:ca:e9:34:66:70:cc:74:b4
+# SHA256 Fingerprint: bf:ff:8f:d0:44:33:48:7d:6a:8a:a6:0c:1a:29:76:7a:9f:c2:bb:b0:5e:42:0f:71:3a:13:b9:92:89:1d:38:93
+-----BEGIN CERTIFICATE-----
+MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UE
+BhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ
+IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0
+MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVowYjELMAkGA1UEBhMCQ04xMjAwBgNV
+BAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8w
+HQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0BAQEF
+AAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJj
+Dp6L3TQsAlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBj
+TnnEt1u9ol2x8kECK62pOqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+u
+KU49tm7srsHwJ5uu4/Ts765/94Y9cnrrpftZTqfrlYwiOXnhLQiPzLyRuEH3FMEj
+qcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ9Cy5WmYqsBebnh52nUpm
+MUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQxXABZG12
+ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloP
+zgsMR6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3Gk
+L30SgLdTMEZeS1SZD2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeC
+jGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4oR24qoAATILnsn8JuLwwoC8N9VKejveSswoA
+HQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx9hoh49pwBiFYFIeFd3mqgnkC
+AwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlRMA8GA1UdEwEB
+/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg
+p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZm
+DRd9FBUb1Ov9H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5
+COmSdI31R9KrO9b7eGZONn356ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ry
+L3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd+PwyvzeG5LuOmCd+uh8W4XAR8gPf
+JWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQHtZa37dG/OaG+svg
+IHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBDF8Io
+2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV
+09tL7ECQ8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQ
+XR4EzzffHqhmsYzmIGrv/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrq
+T8p+ck0LcIymSLumoRT2+1hEmRSuqguTaaApJUqlyyvdimYHFngVV3Eb7PVHhPOe
+MTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g==
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com Root Certification Authority RSA O=SSL Corporation
+# Subject: CN=SSL.com Root Certification Authority RSA O=SSL Corporation
+# Label: "SSL.com Root Certification Authority RSA"
+# Serial: 8875640296558310041
+# MD5 Fingerprint: 86:69:12:c0:70:f1:ec:ac:ac:c2:d5:bc:a5:5b:a1:29
+# SHA1 Fingerprint: b7:ab:33:08:d1:ea:44:77:ba:14:80:12:5a:6f:bd:a9:36:49:0c:bb
+# SHA256 Fingerprint: 85:66:6a:56:2e:e0:be:5c:e9:25:c1:d8:89:0a:6f:76:a8:7e:c1:6d:4d:7d:5f:29:ea:74:19:cf:20:12:3b:69
+-----BEGIN CERTIFICATE-----
+MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UE
+BhMCVVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQK
+DA9TU0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYwMjEyMTczOTM5WhcNNDEwMjEyMTcz
+OTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv
+dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv
+bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcN
+AQEBBQADggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2R
+xFdHaxh3a3by/ZPkPQ/CFp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aX
+qhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcC
+C52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/geoeOy3ZExqysdBP+lSgQ3
+6YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkpk8zruFvh
+/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrF
+YD3ZfBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93E
+JNyAKoFBbZQ+yODJgUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVc
+US4cK38acijnALXRdMbX5J+tB5O2UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8
+ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi81xtZPCvM8hnIk2snYxnP/Okm
++Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4sbE6x/c+cCbqi
+M+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV
+HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4G
+A1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGV
+cpNxJK1ok1iOMq8bs3AD/CUrdIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBc
+Hadm47GUBwwyOabqG7B52B2ccETjit3E+ZUfijhDPwGFpUenPUayvOUiaPd7nNgs
+PgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAslu1OJD7OAUN5F7kR/
+q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjqerQ0
+cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jr
+a6x+3uxjMxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90I
+H37hVZkLId6Tngr75qNJvTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/Y
+K9f1JmzJBjSWFupwWRoyeXkLtoh/D1JIPb9s2KJELtFOt3JY04kTlf5Eq/jXixtu
+nLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406ywKBjYZC6VWg3dGq2ktuf
+oYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NIWuuA8ShY
+Ic2wBlX7Jz9TkHCpBB5XJ7k=
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com Root Certification Authority ECC O=SSL Corporation
+# Subject: CN=SSL.com Root Certification Authority ECC O=SSL Corporation
+# Label: "SSL.com Root Certification Authority ECC"
+# Serial: 8495723813297216424
+# MD5 Fingerprint: 2e:da:e4:39:7f:9c:8f:37:d1:70:9f:26:17:51:3a:8e
+# SHA1 Fingerprint: c3:19:7c:39:24:e6:54:af:1b:c4:ab:20:95:7a:e2:c3:0e:13:02:6a
+# SHA256 Fingerprint: 34:17:bb:06:cc:60:07:da:1b:96:1c:92:0b:8a:b4:ce:3f:ad:82:0e:4a:a3:0b:9a:cb:c4:a7:4e:bd:ce:bc:65
+-----BEGIN CERTIFICATE-----
+MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMC
+VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T
+U0wgQ29ycG9yYXRpb24xMTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0
+aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNDAzWhcNNDEwMjEyMTgxNDAz
+WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0
+b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNvbSBS
+b290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI
+7Z4INcgn64mMU1jrYor+8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPg
+CemB+vNH06NjMGEwHQYDVR0OBBYEFILRhXMw5zUE044CkvvlpNHEIejNMA8GA1Ud
+EwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTTjgKS++Wk0cQh6M0wDgYD
+VR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCWe+0F+S8T
+kdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+
+gA0z5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation
+# Subject: CN=SSL.com EV Root Certification Authority RSA R2 O=SSL Corporation
+# Label: "SSL.com EV Root Certification Authority RSA R2"
+# Serial: 6248227494352943350
+# MD5 Fingerprint: e1:1e:31:58:1a:ae:54:53:02:f6:17:6a:11:7b:4d:95
+# SHA1 Fingerprint: 74:3a:f0:52:9b:d0:32:a0:f4:4a:83:cd:d4:ba:a9:7b:7c:2e:c4:9a
+# SHA256 Fingerprint: 2e:7b:f1:6c:c2:24:85:a7:bb:e2:aa:86:96:75:07:61:b0:ae:39:be:3b:2f:e9:d0:cc:6d:4e:f7:34:91:42:5c
+-----BEGIN CERTIFICATE-----
+MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNV
+BAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UE
+CgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2Vy
+dGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMB4XDTE3MDUzMTE4MTQzN1oXDTQy
+MDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQIDAVUZXhhczEQMA4G
+A1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYDVQQD
+DC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvq
+M0fNTPl9fb69LT3w23jhhqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssuf
+OePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7wcXHswxzpY6IXFJ3vG2fThVUCAtZJycxa
+4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTOZw+oz12WGQvE43LrrdF9
+HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+B6KjBSYR
+aZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcA
+b9ZhCBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQ
+Gp8hLH94t2S42Oim9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQV
+PWKchjgGAGYS5Fl2WlPAApiiECtoRHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMO
+pgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+SlmJuwgUHfbSguPvuUCYHBBXtSu
+UDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48+qvWBkofZ6aY
+MBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV
+HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa4
+9QaAJadz20ZpqJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBW
+s47LCp1Jjr+kxJG7ZhcFUZh1++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5
+Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nxY/hoLVUE0fKNsKTPvDxeH3jnpaAg
+cLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2GguDKBAdRUNf/ktUM
+79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDzOFSz
+/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXt
+ll9ldDz7CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEm
+Kf7GUmG6sXP/wwyc5WxqlD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKK
+QbNmC1r7fSOl8hqw/96bg5Qu0T/fkreRrwU7ZcegbLHNYhLDkBvjJc40vG93drEQ
+w/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1hlMYegouCRw2n5H9gooi
+S9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX9hwJ1C07
+mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w==
+-----END CERTIFICATE-----
+
+# Issuer: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation
+# Subject: CN=SSL.com EV Root Certification Authority ECC O=SSL Corporation
+# Label: "SSL.com EV Root Certification Authority ECC"
+# Serial: 3182246526754555285
+# MD5 Fingerprint: 59:53:22:65:83:42:01:54:c0:ce:42:b9:5a:7c:f2:90
+# SHA1 Fingerprint: 4c:dd:51:a3:d1:f5:20:32:14:b0:c6:c5:32:23:03:91:c7:46:42:6d
+# SHA256 Fingerprint: 22:a2:c1:f7:bd:ed:70:4c:c1:e7:01:b5:f4:08:c3:10:88:0f:e9:56:b5:de:2a:4a:44:f9:9c:87:3a:25:a7:c8
+-----BEGIN CERTIFICATE-----
+MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMC
+VVMxDjAMBgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9T
+U0wgQ29ycG9yYXRpb24xNDAyBgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEyMTgxNTIzWhcNNDEwMjEyMTgx
+NTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hv
+dXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NMLmNv
+bSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49
+AgEGBSuBBAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMA
+VIbc/R/fALhBYlzccBYy3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1Kthku
+WnBaBu2+8KGwytAJKaNjMGEwHQYDVR0OBBYEFFvKXuXe0oGqzagtZFG22XKbl+ZP
+MA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe5d7SgarNqC1kUbbZcpuX
+5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJN+vp1RPZ
+ytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZg
+h5Mmm7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign Root CA - R6
+# Label: "GlobalSign Root CA - R6"
+# Serial: 1417766617973444989252670301619537
+# MD5 Fingerprint: 4f:dd:07:e4:d4:22:64:39:1e:0c:37:42:ea:d1:c6:ae
+# SHA1 Fingerprint: 80:94:64:0e:b5:a7:a1:ca:11:9c:1f:dd:d5:9f:81:02:63:a7:fb:d1
+# SHA256 Fingerprint: 2c:ab:ea:fe:37:d0:6c:a2:2a:ba:73:91:c0:03:3d:25:98:29:52:c4:53:64:73:49:76:3a:3a:b5:ad:6c:cf:69
+-----BEGIN CERTIFICATE-----
+MIIFgzCCA2ugAwIBAgIORea7A4Mzw4VlSOb/RVEwDQYJKoZIhvcNAQEMBQAwTDEg
+MB4GA1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjYxEzARBgNVBAoTCkdsb2Jh
+bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTQxMjEwMDAwMDAwWhcNMzQx
+MjEwMDAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSNjET
+MBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCAiIwDQYJ
+KoZIhvcNAQEBBQADggIPADCCAgoCggIBAJUH6HPKZvnsFMp7PPcNCPG0RQssgrRI
+xutbPK6DuEGSMxSkb3/pKszGsIhrxbaJ0cay/xTOURQh7ErdG1rG1ofuTToVBu1k
+ZguSgMpE3nOUTvOniX9PeGMIyBJQbUJmL025eShNUhqKGoC3GYEOfsSKvGRMIRxD
+aNc9PIrFsmbVkJq3MQbFvuJtMgamHvm566qjuL++gmNQ0PAYid/kD3n16qIfKtJw
+LnvnvJO7bVPiSHyMEAc4/2ayd2F+4OqMPKq0pPbzlUoSB239jLKJz9CgYXfIWHSw
+1CM69106yqLbnQneXUQtkPGBzVeS+n68UARjNN9rkxi+azayOeSsJDa38O+2HBNX
+k7besvjihbdzorg1qkXy4J02oW9UivFyVm4uiMVRQkQVlO6jxTiWm05OWgtH8wY2
+SXcwvHE35absIQh1/OZhFj931dmRl4QKbNQCTXTAFO39OfuD8l4UoQSwC+n+7o/h
+bguyCLNhZglqsQY6ZZZZwPA1/cnaKI0aEYdwgQqomnUdnjqGBQCe24DWJfncBZ4n
+WUx2OVvq+aWh2IMP0f/fMBH5hc8zSPXKbWQULHpYT9NLCEnFlWQaYw55PfWzjMpY
+rZxCRXluDocZXFSxZba/jJvcE+kNb7gu3GduyYsRtYQUigAZcIN5kZeR1Bonvzce
+MgfYFGM8KEyvAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTAD
+AQH/MB0GA1UdDgQWBBSubAWjkxPioufi1xzWx/B/yGdToDAfBgNVHSMEGDAWgBSu
+bAWjkxPioufi1xzWx/B/yGdToDANBgkqhkiG9w0BAQwFAAOCAgEAgyXt6NH9lVLN
+nsAEoJFp5lzQhN7craJP6Ed41mWYqVuoPId8AorRbrcWc+ZfwFSY1XS+wc3iEZGt
+Ixg93eFyRJa0lV7Ae46ZeBZDE1ZXs6KzO7V33EByrKPrmzU+sQghoefEQzd5Mr61
+55wsTLxDKZmOMNOsIeDjHfrYBzN2VAAiKrlNIC5waNrlU/yDXNOd8v9EDERm8tLj
+vUYAGm0CuiVdjaExUd1URhxN25mW7xocBFymFe944Hn+Xds+qkxV/ZoVqW/hpvvf
+cDDpw+5CRu3CkwWJ+n1jez/QcYF8AOiYrg54NMMl+68KnyBr3TsTjxKM4kEaSHpz
+oHdpx7Zcf4LIHv5YGygrqGytXm3ABdJ7t+uA/iU3/gKbaKxCXcPu9czc8FB10jZp
+nOZ7BN9uBmm23goJSFmH63sUYHpkqmlD75HHTOwY3WzvUy2MmeFe8nI+z1TIvWfs
+pA9MRf/TuTAjB0yPEL+GltmZWrSZVxykzLsViVO6LAUP5MSeGbEYNNVMnbrt9x+v
+JJUEeKgDu+6B5dpffItKoZB0JaezPkvILFa9x8jvOOJckvB595yEunQtYQEgfn7R
+8k8HWV+LLUNS60YMlOH1Zkd5d9VUWx+tJDfLRVpOoERIyNiwmcUVhAn21klJwGW4
+5hpxbqCo8YLoRT5s1gLXCmeDBVrJpBA=
+-----END CERTIFICATE-----
+
+# Issuer: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed
+# Subject: CN=OISTE WISeKey Global Root GC CA O=WISeKey OU=OISTE Foundation Endorsed
+# Label: "OISTE WISeKey Global Root GC CA"
+# Serial: 44084345621038548146064804565436152554
+# MD5 Fingerprint: a9:d6:b9:2d:2f:93:64:f8:a5:69:ca:91:e9:68:07:23
+# SHA1 Fingerprint: e0:11:84:5e:34:de:be:88:81:b9:9c:f6:16:26:d1:96:1f:c3:b9:31
+# SHA256 Fingerprint: 85:60:f9:1c:36:24:da:ba:95:70:b5:fe:a0:db:e3:6f:f1:1a:83:23:be:94:86:85:4f:b3:f3:4a:55:71:19:8d
+-----BEGIN CERTIFICATE-----
+MIICaTCCAe+gAwIBAgIQISpWDK7aDKtARb8roi066jAKBggqhkjOPQQDAzBtMQsw
+CQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91
+bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwg
+Um9vdCBHQyBDQTAeFw0xNzA1MDkwOTQ4MzRaFw00MjA1MDkwOTU4MzNaMG0xCzAJ
+BgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBGb3Vu
+ZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2JhbCBS
+b290IEdDIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAETOlQwMYPchi82PG6s4ni
+eUqjFqdrVCTbUf/q9Akkwwsin8tqJ4KBDdLArzHkdIJuyiXZjHWd8dvQmqJLIX4W
+p2OQ0jnUsYd4XxiWD1AbNTcPasbc2RNNpI6QN+a9WzGRo1QwUjAOBgNVHQ8BAf8E
+BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUSIcUrOPDnpBgOtfKie7T
+rYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0EAwMDaAAwZQIwJsdpW9zV
+57LnyAyMjMPdeYwbY9XJUpROTYJKcx6ygISpJcBMWm1JKWB4E+J+SOtkAjEA2zQg
+Mgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9
+-----END CERTIFICATE-----
+
+# Issuer: CN=UCA Global G2 Root O=UniTrust
+# Subject: CN=UCA Global G2 Root O=UniTrust
+# Label: "UCA Global G2 Root"
+# Serial: 124779693093741543919145257850076631279
+# MD5 Fingerprint: 80:fe:f0:c4:4a:f0:5c:62:32:9f:1c:ba:78:a9:50:f8
+# SHA1 Fingerprint: 28:f9:78:16:19:7a:ff:18:25:18:aa:44:fe:c1:a0:ce:5c:b6:4c:8a
+# SHA256 Fingerprint: 9b:ea:11:c9:76:fe:01:47:64:c1:be:56:a6:f9:14:b5:a5:60:31:7a:bd:99:88:39:33:82:e5:16:1a:a0:49:3c
+-----BEGIN CERTIFICATE-----
+MIIFRjCCAy6gAwIBAgIQXd+x2lqj7V2+WmUgZQOQ7zANBgkqhkiG9w0BAQsFADA9
+MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxGzAZBgNVBAMMElVDQSBH
+bG9iYWwgRzIgUm9vdDAeFw0xNjAzMTEwMDAwMDBaFw00MDEyMzEwMDAwMDBaMD0x
+CzAJBgNVBAYTAkNOMREwDwYDVQQKDAhVbmlUcnVzdDEbMBkGA1UEAwwSVUNBIEds
+b2JhbCBHMiBSb290MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxeYr
+b3zvJgUno4Ek2m/LAfmZmqkywiKHYUGRO8vDaBsGxUypK8FnFyIdK+35KYmToni9
+kmugow2ifsqTs6bRjDXVdfkX9s9FxeV67HeToI8jrg4aA3++1NDtLnurRiNb/yzm
+VHqUwCoV8MmNsHo7JOHXaOIxPAYzRrZUEaalLyJUKlgNAQLx+hVRZ2zA+te2G3/R
+VogvGjqNO7uCEeBHANBSh6v7hn4PJGtAnTRnvI3HLYZveT6OqTwXS3+wmeOwcWDc
+C/Vkw85DvG1xudLeJ1uK6NjGruFZfc8oLTW4lVYa8bJYS7cSN8h8s+1LgOGN+jIj
+tm+3SJUIsUROhYw6AlQgL9+/V087OpAh18EmNVQg7Mc/R+zvWr9LesGtOxdQXGLY
+D0tK3Cv6brxzks3sx1DoQZbXqX5t2Okdj4q1uViSukqSKwxW/YDrCPBeKW4bHAyv
+j5OJrdu9o54hyokZ7N+1wxrrFv54NkzWbtA+FxyQF2smuvt6L78RHBgOLXMDj6Dl
+NaBa4kx1HXHhOThTeEDMg5PXCp6dW4+K5OXgSORIskfNTip1KnvyIvbJvgmRlld6
+iIis7nCs+dwp4wwcOxJORNanTrAmyPPZGpeRaOrvjUYG0lZFWJo8DA+DuAUlwznP
+O6Q0ibd5Ei9Hxeepl2n8pndntd978XplFeRhVmUCAwEAAaNCMEAwDgYDVR0PAQH/
+BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFIHEjMz15DD/pQwIX4wV
+ZyF0Ad/fMA0GCSqGSIb3DQEBCwUAA4ICAQATZSL1jiutROTL/7lo5sOASD0Ee/oj
+L3rtNtqyzm325p7lX1iPyzcyochltq44PTUbPrw7tgTQvPlJ9Zv3hcU2tsu8+Mg5
+1eRfB70VVJd0ysrtT7q6ZHafgbiERUlMjW+i67HM0cOU2kTC5uLqGOiiHycFutfl
+1qnN3e92mI0ADs0b+gO3joBYDic/UvuUospeZcnWhNq5NXHzJsBPd+aBJ9J3O5oU
+b3n09tDh05S60FdRvScFDcH9yBIw7m+NESsIndTUv4BFFJqIRNow6rSn4+7vW4LV
+PtateJLbXDzz2K36uGt/xDYotgIVilQsnLAXc47QN6MUPJiVAAwpBVueSUmxX8fj
+y88nZY41F7dXyDDZQVu5FLbowg+UMaeUmMxq67XhJ/UQqAHojhJi6IjMtX9Gl8Cb
+EGY4GjZGXyJoPd/JxhMnq1MGrKI8hgZlb7F+sSlEmqO6SWkoaY/X5V+tBIZkbxqg
+DMUIYs6Ao9Dz7GjevjPHF1t/gMRMTLGmhIrDO7gJzRSBuhjjVFc2/tsvfEehOjPI
++Vg7RE+xygKJBJYoaMVLuCaJu9YzL1DV/pqJuhgyklTGW+Cd+V7lDSKb9triyCGy
+YiGqhkCyLmTTX8jjfhFnRR8F/uOi77Oos/N9j/gMHyIfLXC0uAE0djAA5SN4p1bX
+UB+K+wb1whnw0A==
+-----END CERTIFICATE-----
+
+# Issuer: CN=UCA Extended Validation Root O=UniTrust
+# Subject: CN=UCA Extended Validation Root O=UniTrust
+# Label: "UCA Extended Validation Root"
+# Serial: 106100277556486529736699587978573607008
+# MD5 Fingerprint: a1:f3:5f:43:c6:34:9b:da:bf:8c:7e:05:53:ad:96:e2
+# SHA1 Fingerprint: a3:a1:b0:6f:24:61:23:4a:e3:36:a5:c2:37:fc:a6:ff:dd:f0:d7:3a
+# SHA256 Fingerprint: d4:3a:f9:b3:54:73:75:5c:96:84:fc:06:d7:d8:cb:70:ee:5c:28:e7:73:fb:29:4e:b4:1e:e7:17:22:92:4d:24
+-----BEGIN CERTIFICATE-----
+MIIFWjCCA0KgAwIBAgIQT9Irj/VkyDOeTzRYZiNwYDANBgkqhkiG9w0BAQsFADBH
+MQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNVBAMMHFVDQSBF
+eHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwHhcNMTUwMzEzMDAwMDAwWhcNMzgxMjMx
+MDAwMDAwWjBHMQswCQYDVQQGEwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNV
+BAMMHFVDQSBFeHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQCpCQcoEwKwmeBkqh5DFnpzsZGgdT6o+uM4AHrsiWog
+D4vFsJszA1qGxliG1cGFu0/GnEBNyr7uaZa4rYEwmnySBesFK5pI0Lh2PpbIILvS
+sPGP2KxFRv+qZ2C0d35qHzwaUnoEPQc8hQ2E0B92CvdqFN9y4zR8V05WAT558aop
+O2z6+I9tTcg1367r3CTueUWnhbYFiN6IXSV8l2RnCdm/WhUFhvMJHuxYMjMR83dk
+sHYf5BA1FxvyDrFspCqjc/wJHx4yGVMR59mzLC52LqGj3n5qiAno8geK+LLNEOfi
+c0CTuwjRP+H8C5SzJe98ptfRr5//lpr1kXuYC3fUfugH0mK1lTnj8/FtDw5lhIpj
+VMWAtuCeS31HJqcBCF3RiJ7XwzJE+oJKCmhUfzhTA8ykADNkUVkLo4KRel7sFsLz
+KuZi2irbWWIQJUoqgQtHB0MGcIfS+pMRKXpITeuUx3BNr2fVUbGAIAEBtHoIppB/
+TuDvB0GHr2qlXov7z1CymlSvw4m6WC31MJixNnI5fkkE/SmnTHnkBVfblLkWU41G
+sx2VYVdWf6/wFlthWG82UBEL2KwrlRYaDh8IzTY0ZRBiZtWAXxQgXy0MoHgKaNYs
+1+lvK9JKBZP8nm9rZ/+I8U6laUpSNwXqxhaN0sSZ0YIrO7o1dfdRUVjzyAfd5LQD
+fwIDAQABo0IwQDAdBgNVHQ4EFgQU2XQ65DA9DfcS3H5aBZ8eNJr34RQwDwYDVR0T
+AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBADaN
+l8xCFWQpN5smLNb7rhVpLGsaGvdftvkHTFnq88nIua7Mui563MD1sC3AO6+fcAUR
+ap8lTwEpcOPlDOHqWnzcSbvBHiqB9RZLcpHIojG5qtr8nR/zXUACE/xOHAbKsxSQ
+VBcZEhrxH9cMaVr2cXj0lH2RC47skFSOvG+hTKv8dGT9cZr4QQehzZHkPJrgmzI5
+c6sq1WnIeJEmMX3ixzDx/BR4dxIOE/TdFpS/S2d7cFOFyrC78zhNLJA5wA3CXWvp
+4uXViI3WLL+rG761KIcSF3Ru/H38j9CHJrAb+7lsq+KePRXBOy5nAliRn+/4Qh8s
+t2j1da3Ptfb/EX3C8CSlrdP6oDyp+l3cpaDvRKS+1ujl5BOWF3sGPjLtx7dCvHaj
+2GU4Kzg1USEODm8uNBNA4StnDG1KQTAYI1oyVZnJF+A83vbsea0rWBmirSwiGpWO
+vpaQXUJXxPkUAzUrHC1RVwinOt4/5Mi0A3PCwSaAuwtCH60NryZy2sy+s6ODWA2C
+xR9GUeOcGMyNm43sSet1UNWMKFnKdDTajAshqx7qG+XH/RU+wBeq+yNuJkbL+vmx
+cmtpzyKEC2IPrNkZAJSidjzULZrtBJ4tBmIQN1IchXIbJ+XMxjHsN+xjWZsLHXbM
+fjKaiJUINlK73nZfdklJrX+9ZSCyycErdhh2n1ax
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036
+# Subject: CN=Certigna Root CA O=Dhimyotis OU=0002 48146308100036
+# Label: "Certigna Root CA"
+# Serial: 269714418870597844693661054334862075617
+# MD5 Fingerprint: 0e:5c:30:62:27:eb:5b:bc:d7:ae:62:ba:e9:d5:df:77
+# SHA1 Fingerprint: 2d:0d:52:14:ff:9e:ad:99:24:01:74:20:47:6e:6c:85:27:27:f5:43
+# SHA256 Fingerprint: d4:8d:3d:23:ee:db:50:a4:59:e5:51:97:60:1c:27:77:4b:9d:7b:18:c9:4d:5a:05:95:11:a1:02:50:b9:31:68
+-----BEGIN CERTIFICATE-----
+MIIGWzCCBEOgAwIBAgIRAMrpG4nxVQMNo+ZBbcTjpuEwDQYJKoZIhvcNAQELBQAw
+WjELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczEcMBoGA1UECwwTMDAw
+MiA0ODE0NjMwODEwMDAzNjEZMBcGA1UEAwwQQ2VydGlnbmEgUm9vdCBDQTAeFw0x
+MzEwMDEwODMyMjdaFw0zMzEwMDEwODMyMjdaMFoxCzAJBgNVBAYTAkZSMRIwEAYD
+VQQKDAlEaGlteW90aXMxHDAaBgNVBAsMEzAwMDIgNDgxNDYzMDgxMDAwMzYxGTAX
+BgNVBAMMEENlcnRpZ25hIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw
+ggIKAoICAQDNGDllGlmx6mQWDoyUJJV8g9PFOSbcDO8WV43X2KyjQn+Cyu3NW9sO
+ty3tRQgXstmzy9YXUnIo245Onoq2C/mehJpNdt4iKVzSs9IGPjA5qXSjklYcoW9M
+CiBtnyN6tMbaLOQdLNyzKNAT8kxOAkmhVECe5uUFoC2EyP+YbNDrihqECB63aCPu
+I9Vwzm1RaRDuoXrC0SIxwoKF0vJVdlB8JXrJhFwLrN1CTivngqIkicuQstDuI7pm
+TLtipPlTWmR7fJj6o0ieD5Wupxj0auwuA0Wv8HT4Ks16XdG+RCYyKfHx9WzMfgIh
+C59vpD++nVPiz32pLHxYGpfhPTc3GGYo0kDFUYqMwy3OU4gkWGQwFsWq4NYKpkDf
+ePb1BHxpE4S80dGnBs8B92jAqFe7OmGtBIyT46388NtEbVncSVmurJqZNjBBe3Yz
+IoejwpKGbvlw7q6Hh5UbxHq9MfPU0uWZ/75I7HX1eBYdpnDBfzwboZL7z8g81sWT
+Co/1VTp2lc5ZmIoJlXcymoO6LAQ6l73UL77XbJuiyn1tJslV1c/DeVIICZkHJC1k
+JWumIWmbat10TWuXekG9qxf5kBdIjzb5LdXF2+6qhUVB+s06RbFo5jZMm5BX7CO5
+hwjCxAnxl4YqKE3idMDaxIzb3+KhF1nOJFl0Mdp//TBt2dzhauH8XwIDAQABo4IB
+GjCCARYwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE
+FBiHVuBud+4kNTxOc5of1uHieX4rMB8GA1UdIwQYMBaAFBiHVuBud+4kNTxOc5of
+1uHieX4rMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8GCCsGAQUFBwIBFiNodHRwczov
+L3d3d3cuY2VydGlnbmEuZnIvYXV0b3JpdGVzLzBtBgNVHR8EZjBkMC+gLaArhilo
+dHRwOi8vY3JsLmNlcnRpZ25hLmZyL2NlcnRpZ25hcm9vdGNhLmNybDAxoC+gLYYr
+aHR0cDovL2NybC5kaGlteW90aXMuY29tL2NlcnRpZ25hcm9vdGNhLmNybDANBgkq
+hkiG9w0BAQsFAAOCAgEAlLieT/DjlQgi581oQfccVdV8AOItOoldaDgvUSILSo3L
+6btdPrtcPbEo/uRTVRPPoZAbAh1fZkYJMyjhDSSXcNMQH+pkV5a7XdrnxIxPTGRG
+HVyH41neQtGbqH6mid2PHMkwgu07nM3A6RngatgCdTer9zQoKJHyBApPNeNgJgH6
+0BGM+RFq7q89w1DTj18zeTyGqHNFkIwgtnJzFyO+B2XleJINugHA64wcZr+shncB
+lA2c5uk5jR+mUYyZDDl34bSb+hxnV29qao6pK0xXeXpXIs/NX2NGjVxZOob4Mkdi
+o2cNGJHc+6Zr9UhhcyNZjgKnvETq9Emd8VRY+WCv2hikLyhF3HqgiIZd8zvn/yk1
+gPxkQ5Tm4xxvvq0OKmOZK8l+hfZx6AYDlf7ej0gcWtSS6Cvu5zHbugRqh5jnxV/v
+faci9wHYTfmJ0A6aBVmknpjZbyvKcL5kwlWj9Omvw5Ip3IgWJJk8jSaYtlu3zM63
+Nwf9JtmYhST/WSMDmu2dnajkXjjO11INb9I/bbEFa0nOipFGc/T2L/Coc3cOZayh
+jWZSaX5LaAzHHjcng6WMxwLkFM1JAbBzs/3GkDpv0mztO+7skb6iQ12LAEpmJURw
+3kAP+HwV96LOPNdeE4yBFxgX0b3xdxA61GU5wSesVywlVP+i2k+KYTlerj1KjL0=
+-----END CERTIFICATE-----
+
+# Issuer: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI
+# Subject: CN=emSign Root CA - G1 O=eMudhra Technologies Limited OU=emSign PKI
+# Label: "emSign Root CA - G1"
+# Serial: 235931866688319308814040
+# MD5 Fingerprint: 9c:42:84:57:dd:cb:0b:a7:2e:95:ad:b6:f3:da:bc:ac
+# SHA1 Fingerprint: 8a:c7:ad:8f:73:ac:4e:c1:b5:75:4d:a5:40:f4:fc:cf:7c:b5:8e:8c
+# SHA256 Fingerprint: 40:f6:af:03:46:a9:9a:a1:cd:1d:55:5a:4e:9c:ce:62:c7:f9:63:46:03:ee:40:66:15:83:3d:c8:c8:d0:03:67
+-----BEGIN CERTIFICATE-----
+MIIDlDCCAnygAwIBAgIKMfXkYgxsWO3W2DANBgkqhkiG9w0BAQsFADBnMQswCQYD
+VQQGEwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBU
+ZWNobm9sb2dpZXMgTGltaXRlZDEcMBoGA1UEAxMTZW1TaWduIFJvb3QgQ0EgLSBH
+MTAeFw0xODAyMTgxODMwMDBaFw00MzAyMTgxODMwMDBaMGcxCzAJBgNVBAYTAklO
+MRMwEQYDVQQLEwplbVNpZ24gUEtJMSUwIwYDVQQKExxlTXVkaHJhIFRlY2hub2xv
+Z2llcyBMaW1pdGVkMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEcxMIIBIjAN
+BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAk0u76WaK7p1b1TST0Bsew+eeuGQz
+f2N4aLTNLnF115sgxk0pvLZoYIr3IZpWNVrzdr3YzZr/k1ZLpVkGoZM0Kd0WNHVO
+8oG0x5ZOrRkVUkr+PHB1cM2vK6sVmjM8qrOLqs1D/fXqcP/tzxE7lM5OMhbTI0Aq
+d7OvPAEsbO2ZLIvZTmmYsvePQbAyeGHWDV/D+qJAkh1cF+ZwPjXnorfCYuKrpDhM
+tTk1b+oDafo6VGiFbdbyL0NVHpENDtjVaqSW0RM8LHhQ6DqS0hdW5TUaQBw+jSzt
+Od9C4INBdN+jzcKGYEho42kLVACL5HZpIQ15TjQIXhTCzLG3rdd8cIrHhQIDAQAB
+o0IwQDAdBgNVHQ4EFgQU++8Nhp6w492pufEhF38+/PB3KxowDgYDVR0PAQH/BAQD
+AgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAFn/8oz1h31x
+PaOfG1vR2vjTnGs2vZupYeveFix0PZ7mddrXuqe8QhfnPZHr5X3dPpzxz5KsbEjM
+wiI/aTvFthUvozXGaCocV685743QNcMYDHsAVhzNixl03r4PEuDQqqE/AjSxcM6d
+GNYIAwlG7mDgfrbESQRRfXBgvKqy/3lyeqYdPV8q+Mri/Tm3R7nrft8EI6/6nAYH
+6ftjk4BAtcZsCjEozgyfz7MjNYBBjWzEN3uBL4ChQEKF6dk4jeihU80Bv2noWgby
+RQuQ+q7hv53yrlc8pa6yVvSLZUDp/TGBLPQ5Cdjua6e0ph0VpZj3AYHYhX3zUVxx
+iN66zB+Afko=
+-----END CERTIFICATE-----
+
+# Issuer: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI
+# Subject: CN=emSign ECC Root CA - G3 O=eMudhra Technologies Limited OU=emSign PKI
+# Label: "emSign ECC Root CA - G3"
+# Serial: 287880440101571086945156
+# MD5 Fingerprint: ce:0b:72:d1:9f:88:8e:d0:50:03:e8:e3:b8:8b:67:40
+# SHA1 Fingerprint: 30:43:fa:4f:f2:57:dc:a0:c3:80:ee:2e:58:ea:78:b2:3f:e6:bb:c1
+# SHA256 Fingerprint: 86:a1:ec:ba:08:9c:4a:8d:3b:be:27:34:c6:12:ba:34:1d:81:3e:04:3c:f9:e8:a8:62:cd:5c:57:a3:6b:be:6b
+-----BEGIN CERTIFICATE-----
+MIICTjCCAdOgAwIBAgIKPPYHqWhwDtqLhDAKBggqhkjOPQQDAzBrMQswCQYDVQQG
+EwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNo
+bm9sb2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0g
+RzMwHhcNMTgwMjE4MTgzMDAwWhcNNDMwMjE4MTgzMDAwWjBrMQswCQYDVQQGEwJJ
+TjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNobm9s
+b2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0gRzMw
+djAQBgcqhkjOPQIBBgUrgQQAIgNiAAQjpQy4LRL1KPOxst3iAhKAnjlfSU2fySU0
+WXTsuwYc58Byr+iuL+FBVIcUqEqy6HyC5ltqtdyzdc6LBtCGI79G1Y4PPwT01xyS
+fvalY8L1X44uT6EYGQIrMgqCZH0Wk9GjQjBAMB0GA1UdDgQWBBR8XQKEE9TMipuB
+zhccLikenEhjQjAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggq
+hkjOPQQDAwNpADBmAjEAvvNhzwIQHWSVB7gYboiFBS+DCBeQyh+KTOgNG3qxrdWB
+CUfvO6wIBHxcmbHtRwfSAjEAnbpV/KlK6O3t5nYBQnvI+GDZjVGLVTv7jHvrZQnD
++JbNR6iC8hZVdyR+EhCVBCyj
+-----END CERTIFICATE-----
+
+# Issuer: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI
+# Subject: CN=emSign Root CA - C1 O=eMudhra Inc OU=emSign PKI
+# Label: "emSign Root CA - C1"
+# Serial: 825510296613316004955058
+# MD5 Fingerprint: d8:e3:5d:01:21:fa:78:5a:b0:df:ba:d2:ee:2a:5f:68
+# SHA1 Fingerprint: e7:2e:f1:df:fc:b2:09:28:cf:5d:d4:d5:67:37:b1:51:cb:86:4f:01
+# SHA256 Fingerprint: 12:56:09:aa:30:1d:a0:a2:49:b9:7a:82:39:cb:6a:34:21:6f:44:dc:ac:9f:39:54:b1:42:92:f2:e8:c8:60:8f
+-----BEGIN CERTIFICATE-----
+MIIDczCCAlugAwIBAgILAK7PALrEzzL4Q7IwDQYJKoZIhvcNAQELBQAwVjELMAkG
+A1UEBhMCVVMxEzARBgNVBAsTCmVtU2lnbiBQS0kxFDASBgNVBAoTC2VNdWRocmEg
+SW5jMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEMxMB4XDTE4MDIxODE4MzAw
+MFoXDTQzMDIxODE4MzAwMFowVjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln
+biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMRwwGgYDVQQDExNlbVNpZ24gUm9v
+dCBDQSAtIEMxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz+upufGZ
+BczYKCFK83M0UYRWEPWgTywS4/oTmifQz/l5GnRfHXk5/Fv4cI7gklL35CX5VIPZ
+HdPIWoU/Xse2B+4+wM6ar6xWQio5JXDWv7V7Nq2s9nPczdcdioOl+yuQFTdrHCZH
+3DspVpNqs8FqOp099cGXOFgFixwR4+S0uF2FHYP+eF8LRWgYSKVGczQ7/g/IdrvH
+GPMF0Ybzhe3nudkyrVWIzqa2kbBPrH4VI5b2P/AgNBbeCsbEBEV5f6f9vtKppa+c
+xSMq9zwhbL2vj07FOrLzNBL834AaSaTUqZX3noleoomslMuoaJuvimUnzYnu3Yy1
+aylwQ6BpC+S5DwIDAQABo0IwQDAdBgNVHQ4EFgQU/qHgcB4qAzlSWkK+XJGFehiq
+TbUwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL
+BQADggEBAMJKVvoVIXsoounlHfv4LcQ5lkFMOycsxGwYFYDGrK9HWS8mC+M2sO87
+/kOXSTKZEhVb3xEp/6tT+LvBeA+snFOvV71ojD1pM/CjoCNjO2RnIkSt1XHLVip4
+kqNPEjE2NuLe/gDEo2APJ62gsIq1NnpSob0n9CAnYuhNlCQT5AoE6TyrLshDCUrG
+YQTlSTR+08TI9Q/Aqum6VF7zYytPT1DU/rl7mYw9wC68AivTxEDkigcxHpvOJpkT
++xHqmiIMERnHXhuBUDDIlhJu58tBf5E7oke3VIAb3ADMmpDqw8NQBmIMMMAVSKeo
+WXzhriKi4gp6D/piq1JM4fHfyr6DDUI=
+-----END CERTIFICATE-----
+
+# Issuer: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI
+# Subject: CN=emSign ECC Root CA - C3 O=eMudhra Inc OU=emSign PKI
+# Label: "emSign ECC Root CA - C3"
+# Serial: 582948710642506000014504
+# MD5 Fingerprint: 3e:53:b3:a3:81:ee:d7:10:f8:d3:b0:1d:17:92:f5:d5
+# SHA1 Fingerprint: b6:af:43:c2:9b:81:53:7d:f6:ef:6b:c3:1f:1f:60:15:0c:ee:48:66
+# SHA256 Fingerprint: bc:4d:80:9b:15:18:9d:78:db:3e:1d:8c:f4:f9:72:6a:79:5d:a1:64:3c:a5:f1:35:8e:1d:db:0e:dc:0d:7e:b3
+-----BEGIN CERTIFICATE-----
+MIICKzCCAbGgAwIBAgIKe3G2gla4EnycqDAKBggqhkjOPQQDAzBaMQswCQYDVQQG
+EwJVUzETMBEGA1UECxMKZW1TaWduIFBLSTEUMBIGA1UEChMLZU11ZGhyYSBJbmMx
+IDAeBgNVBAMTF2VtU2lnbiBFQ0MgUm9vdCBDQSAtIEMzMB4XDTE4MDIxODE4MzAw
+MFoXDTQzMDIxODE4MzAwMFowWjELMAkGA1UEBhMCVVMxEzARBgNVBAsTCmVtU2ln
+biBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMSAwHgYDVQQDExdlbVNpZ24gRUND
+IFJvb3QgQ0EgLSBDMzB2MBAGByqGSM49AgEGBSuBBAAiA2IABP2lYa57JhAd6bci
+MK4G9IGzsUJxlTm801Ljr6/58pc1kjZGDoeVjbk5Wum739D+yAdBPLtVb4Ojavti
+sIGJAnB9SMVK4+kiVCJNk7tCDK93nCOmfddhEc5lx/h//vXyqaNCMEAwHQYDVR0O
+BBYEFPtaSNCAIEDyqOkAB2kZd6fmw/TPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB
+Af8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMQC02C8Cif22TGK6Q04ThHK1rt0c
+3ta13FaPWEBaLd4gTCKDypOofu4SQMfWh0/434UCMBwUZOR8loMRnLDRWmFLpg9J
+0wD8ofzkpf9/rdcw0Md3f76BB1UwUCAU9Vc4CqgxUQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Hongkong Post Root CA 3 O=Hongkong Post
+# Subject: CN=Hongkong Post Root CA 3 O=Hongkong Post
+# Label: "Hongkong Post Root CA 3"
+# Serial: 46170865288971385588281144162979347873371282084
+# MD5 Fingerprint: 11:fc:9f:bd:73:30:02:8a:fd:3f:f3:58:b9:cb:20:f0
+# SHA1 Fingerprint: 58:a2:d0:ec:20:52:81:5b:c1:f3:f8:64:02:24:4e:c2:8e:02:4b:02
+# SHA256 Fingerprint: 5a:2f:c0:3f:0c:83:b0:90:bb:fa:40:60:4b:09:88:44:6c:76:36:18:3d:f9:84:6e:17:10:1a:44:7f:b8:ef:d6
+-----BEGIN CERTIFICATE-----
+MIIFzzCCA7egAwIBAgIUCBZfikyl7ADJk0DfxMauI7gcWqQwDQYJKoZIhvcNAQEL
+BQAwbzELMAkGA1UEBhMCSEsxEjAQBgNVBAgTCUhvbmcgS29uZzESMBAGA1UEBxMJ
+SG9uZyBLb25nMRYwFAYDVQQKEw1Ib25na29uZyBQb3N0MSAwHgYDVQQDExdIb25n
+a29uZyBQb3N0IFJvb3QgQ0EgMzAeFw0xNzA2MDMwMjI5NDZaFw00MjA2MDMwMjI5
+NDZaMG8xCzAJBgNVBAYTAkhLMRIwEAYDVQQIEwlIb25nIEtvbmcxEjAQBgNVBAcT
+CUhvbmcgS29uZzEWMBQGA1UEChMNSG9uZ2tvbmcgUG9zdDEgMB4GA1UEAxMXSG9u
+Z2tvbmcgUG9zdCBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK
+AoICAQCziNfqzg8gTr7m1gNt7ln8wlffKWihgw4+aMdoWJwcYEuJQwy51BWy7sFO
+dem1p+/l6TWZ5Mwc50tfjTMwIDNT2aa71T4Tjukfh0mtUC1Qyhi+AViiE3CWu4mI
+VoBc+L0sPOFMV4i707mV78vH9toxdCim5lSJ9UExyuUmGs2C4HDaOym71QP1mbpV
+9WTRYA6ziUm4ii8F0oRFKHyPaFASePwLtVPLwpgchKOesL4jpNrcyCse2m5FHomY
+2vkALgbpDDtw1VAliJnLzXNg99X/NWfFobxeq81KuEXryGgeDQ0URhLj0mRiikKY
+vLTGCAj4/ahMZJx2Ab0vqWwzD9g/KLg8aQFChn5pwckGyuV6RmXpwtZQQS4/t+Tt
+bNe/JgERohYpSms0BpDsE9K2+2p20jzt8NYt3eEV7KObLyzJPivkaTv/ciWxNoZb
+x39ri1UbSsUgYT2uy1DhCDq+sI9jQVMwCFk8mB13umOResoQUGC/8Ne8lYePl8X+
+l2oBlKN8W4UdKjk60FSh0Tlxnf0h+bV78OLgAo9uliQlLKAeLKjEiafv7ZkGL7YK
+TE/bosw3Gq9HhS2KX8Q0NEwA/RiTZxPRN+ZItIsGxVd7GYYKecsAyVKvQv83j+Gj
+Hno9UKtjBucVtT+2RTeUN7F+8kjDf8V1/peNRY8apxpyKBpADwIDAQABo2MwYTAP
+BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQXnc0e
+i9Y5K3DTXNSguB+wAPzFYTAdBgNVHQ4EFgQUF53NHovWOStw01zUoLgfsAD8xWEw
+DQYJKoZIhvcNAQELBQADggIBAFbVe27mIgHSQpsY1Q7XZiNc4/6gx5LS6ZStS6LG
+7BJ8dNVI0lkUmcDrudHr9EgwW62nV3OZqdPlt9EuWSRY3GguLmLYauRwCy0gUCCk
+MpXRAJi70/33MvJJrsZ64Ee+bs7Lo3I6LWldy8joRTnU+kLBEUx3XZL7av9YROXr
+gZ6voJmtvqkBZss4HTzfQx/0TW60uhdG/H39h4F5ag0zD/ov+BS5gLNdTaqX4fnk
+GMX41TiMJjz98iji7lpJiCzfeT2OnpA8vUFKOt1b9pq0zj8lMH8yfaIDlNDceqFS
+3m6TjRgm/VWsvY+b0s+v54Ysyx8Jb6NvqYTUc79NoXQbTiNg8swOqn+knEwlqLJm
+Ozj/2ZQw9nKEvmhVEA/GcywWaZMH/rFF7buiVWqw2rVKAiUnhde3t4ZEFolsgCs+
+l6mc1X5VTMbeRRAc6uk7nwNT7u56AQIWeNTowr5GdogTPyK7SBIdUgC0An4hGh6c
+JfTzPV4e0hz5sy229zdcxsshTrD3mUcYhcErulWuBurQB7Lcq9CClnXO0lD+mefP
+L5/ndtFhKvshuzHQqp9HpLIiyhY6UFfEW0NnxWViA0kB60PZ2Pierc+xYw5F9KBa
+LJstxabArahH9CdMOA0uG0k7UvToiIMrVCjU8jVStDKDYmlkDJGcn5fqdBb9HxEG
+mpv0
+-----END CERTIFICATE-----
+
+# Issuer: CN=Entrust Root Certification Authority - G4 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2015 Entrust, Inc. - for authorized use only
+# Subject: CN=Entrust Root Certification Authority - G4 O=Entrust, Inc. OU=See www.entrust.net/legal-terms/(c) 2015 Entrust, Inc. - for authorized use only
+# Label: "Entrust Root Certification Authority - G4"
+# Serial: 289383649854506086828220374796556676440
+# MD5 Fingerprint: 89:53:f1:83:23:b7:7c:8e:05:f1:8c:71:38:4e:1f:88
+# SHA1 Fingerprint: 14:88:4e:86:26:37:b0:26:af:59:62:5c:40:77:ec:35:29:ba:96:01
+# SHA256 Fingerprint: db:35:17:d1:f6:73:2a:2d:5a:b9:7c:53:3e:c7:07:79:ee:32:70:a6:2f:b4:ac:42:38:37:24:60:e6:f0:1e:88
+-----BEGIN CERTIFICATE-----
+MIIGSzCCBDOgAwIBAgIRANm1Q3+vqTkPAAAAAFVlrVgwDQYJKoZIhvcNAQELBQAw
+gb4xCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQL
+Ex9TZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykg
+MjAxNSBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMjAw
+BgNVBAMTKUVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEc0
+MB4XDTE1MDUyNzExMTExNloXDTM3MTIyNzExNDExNlowgb4xCzAJBgNVBAYTAlVT
+MRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1
+c3QubmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxNSBFbnRydXN0LCBJ
+bmMuIC0gZm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMjAwBgNVBAMTKUVudHJ1c3Qg
+Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEc0MIICIjANBgkqhkiG9w0B
+AQEFAAOCAg8AMIICCgKCAgEAsewsQu7i0TD/pZJH4i3DumSXbcr3DbVZwbPLqGgZ
+2K+EbTBwXX7zLtJTmeH+H17ZSK9dE43b/2MzTdMAArzE+NEGCJR5WIoV3imz/f3E
+T+iq4qA7ec2/a0My3dl0ELn39GjUu9CH1apLiipvKgS1sqbHoHrmSKvS0VnM1n4j
+5pds8ELl3FFLFUHtSUrJ3hCX1nbB76W1NhSXNdh4IjVS70O92yfbYVaCNNzLiGAM
+C1rlLAHGVK/XqsEQe9IFWrhAnoanw5CGAlZSCXqc0ieCU0plUmr1POeo8pyvi73T
+DtTUXm6Hnmo9RR3RXRv06QqsYJn7ibT/mCzPfB3pAqoEmh643IhuJbNsZvc8kPNX
+wbMv9W3y+8qh+CmdRouzavbmZwe+LGcKKh9asj5XxNMhIWNlUpEbsZmOeX7m640A
+2Vqq6nPopIICR5b+W45UYaPrL0swsIsjdXJ8ITzI9vF01Bx7owVV7rtNOzK+mndm
+nqxpkCIHH2E6lr7lmk/MBTwoWdPBDFSoWWG9yHJM6Nyfh3+9nEg2XpWjDrk4JFX8
+dWbrAuMINClKxuMrLzOg2qOGpRKX/YAr2hRC45K9PvJdXmd0LhyIRyk0X+IyqJwl
+N4y6mACXi0mWHv0liqzc2thddG5msP9E36EYxr5ILzeUePiVSj9/E15dWf10hkNj
+c0kCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD
+VR0OBBYEFJ84xFYjwznooHFs6FRM5Og6sb9nMA0GCSqGSIb3DQEBCwUAA4ICAQAS
+5UKme4sPDORGpbZgQIeMJX6tuGguW8ZAdjwD+MlZ9POrYs4QjbRaZIxowLByQzTS
+Gwv2LFPSypBLhmb8qoMi9IsabyZIrHZ3CL/FmFz0Jomee8O5ZDIBf9PD3Vht7LGr
+hFV0d4QEJ1JrhkzO3bll/9bGXp+aEJlLdWr+aumXIOTkdnrG0CSqkM0gkLpHZPt/
+B7NTeLUKYvJzQ85BK4FqLoUWlFPUa19yIqtRLULVAJyZv967lDtX/Zr1hstWO1uI
+AeV8KEsD+UmDfLJ/fOPtjqF/YFOOVZ1QNBIPt5d7bIdKROf1beyAN/BYGW5KaHbw
+H5Lk6rWS02FREAutp9lfx1/cH6NcjKF+m7ee01ZvZl4HliDtC3T7Zk6LERXpgUl+
+b7DUUH8i119lAg2m9IUe2K4GS0qn0jFmwvjO5QimpAKWRGhXxNUzzxkvFMSUHHuk
+2fCfDrGA4tGeEWSpiBE6doLlYsKA2KSD7ZPvfC+QsDJMlhVoSFLUmQjAJOgc47Ol
+IQ6SwJAfzyBfyjs4x7dtOvPmRLgOMWuIjnDrnBdSqEGULoe256YSxXXfW8AKbnuk
+5F6G+TaU33fD6Q3AOfF5u0aOq0NZJ7cguyPpVkAh7DE9ZapD8j3fcEThuk0mEDuY
+n/PIjhs4ViFqUZPTkcpG2om3PVODLAgfi49T3f+sHw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation
+# Subject: CN=Microsoft ECC Root Certificate Authority 2017 O=Microsoft Corporation
+# Label: "Microsoft ECC Root Certificate Authority 2017"
+# Serial: 136839042543790627607696632466672567020
+# MD5 Fingerprint: dd:a1:03:e6:4a:93:10:d1:bf:f0:19:42:cb:fe:ed:67
+# SHA1 Fingerprint: 99:9a:64:c3:7f:f4:7d:9f:ab:95:f1:47:69:89:14:60:ee:c4:c3:c5
+# SHA256 Fingerprint: 35:8d:f3:9d:76:4a:f9:e1:b7:66:e9:c9:72:df:35:2e:e1:5c:fa:c2:27:af:6a:d1:d7:0e:8e:4a:6e:dc:ba:02
+-----BEGIN CERTIFICATE-----
+MIICWTCCAd+gAwIBAgIQZvI9r4fei7FK6gxXMQHC7DAKBggqhkjOPQQDAzBlMQsw
+CQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYD
+VQQDEy1NaWNyb3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIw
+MTcwHhcNMTkxMjE4MjMwNjQ1WhcNNDIwNzE4MjMxNjA0WjBlMQswCQYDVQQGEwJV
+UzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1NaWNy
+b3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwdjAQBgcq
+hkjOPQIBBgUrgQQAIgNiAATUvD0CQnVBEyPNgASGAlEvaqiBYgtlzPbKnR5vSmZR
+ogPZnZH6thaxjG7efM3beaYvzrvOcS/lpaso7GMEZpn4+vKTEAXhgShC48Zo9OYb
+hGBKia/teQ87zvH2RPUBeMCjVDBSMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8E
+BTADAQH/MB0GA1UdDgQWBBTIy5lycFIM+Oa+sgRXKSrPQhDtNTAQBgkrBgEEAYI3
+FQEEAwIBADAKBggqhkjOPQQDAwNoADBlAjBY8k3qDPlfXu5gKcs68tvWMoQZP3zV
+L8KxzJOuULsJMsbG7X7JNpQS5GiFBqIb0C8CMQCZ6Ra0DvpWSNSkMBaReNtUjGUB
+iudQZsIxtzm6uBoiB078a1QWIP8rtedMDE2mT3M=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation
+# Subject: CN=Microsoft RSA Root Certificate Authority 2017 O=Microsoft Corporation
+# Label: "Microsoft RSA Root Certificate Authority 2017"
+# Serial: 40975477897264996090493496164228220339
+# MD5 Fingerprint: 10:ff:00:ff:cf:c9:f8:c7:7a:c0:ee:35:8e:c9:0f:47
+# SHA1 Fingerprint: 73:a5:e6:4a:3b:ff:83:16:ff:0e:dc:cc:61:8a:90:6e:4e:ae:4d:74
+# SHA256 Fingerprint: c7:41:f7:0f:4b:2a:8d:88:bf:2e:71:c1:41:22:ef:53:ef:10:eb:a0:cf:a5:e6:4c:fa:20:f4:18:85:30:73:e0
+-----BEGIN CERTIFICATE-----
+MIIFqDCCA5CgAwIBAgIQHtOXCV/YtLNHcB6qvn9FszANBgkqhkiG9w0BAQwFADBl
+MQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYw
+NAYDVQQDEy1NaWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5
+IDIwMTcwHhcNMTkxMjE4MjI1MTIyWhcNNDIwNzE4MjMwMDIzWjBlMQswCQYDVQQG
+EwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1N
+aWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKW76UM4wplZEWCpW9R2LBifOZ
+Nt9GkMml7Xhqb0eRaPgnZ1AzHaGm++DlQ6OEAlcBXZxIQIJTELy/xztokLaCLeX0
+ZdDMbRnMlfl7rEqUrQ7eS0MdhweSE5CAg2Q1OQT85elss7YfUJQ4ZVBcF0a5toW1
+HLUX6NZFndiyJrDKxHBKrmCk3bPZ7Pw71VdyvD/IybLeS2v4I2wDwAW9lcfNcztm
+gGTjGqwu+UcF8ga2m3P1eDNbx6H7JyqhtJqRjJHTOoI+dkC0zVJhUXAoP8XFWvLJ
+jEm7FFtNyP9nTUwSlq31/niol4fX/V4ggNyhSyL71Imtus5Hl0dVe49FyGcohJUc
+aDDv70ngNXtk55iwlNpNhTs+VcQor1fznhPbRiefHqJeRIOkpcrVE7NLP8TjwuaG
+YaRSMLl6IE9vDzhTyzMMEyuP1pq9KsgtsRx9S1HKR9FIJ3Jdh+vVReZIZZ2vUpC6
+W6IYZVcSn2i51BVrlMRpIpj0M+Dt+VGOQVDJNE92kKz8OMHY4Xu54+OU4UZpyw4K
+UGsTuqwPN1q3ErWQgR5WrlcihtnJ0tHXUeOrO8ZV/R4O03QK0dqq6mm4lyiPSMQH
++FJDOvTKVTUssKZqwJz58oHhEmrARdlns87/I6KJClTUFLkqqNfs+avNJVgyeY+Q
+W5g5xAgGwax/Dj0ApQIDAQABo1QwUjAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/
+BAUwAwEB/zAdBgNVHQ4EFgQUCctZf4aycI8awznjwNnpv7tNsiMwEAYJKwYBBAGC
+NxUBBAMCAQAwDQYJKoZIhvcNAQEMBQADggIBAKyvPl3CEZaJjqPnktaXFbgToqZC
+LgLNFgVZJ8og6Lq46BrsTaiXVq5lQ7GPAJtSzVXNUzltYkyLDVt8LkS/gxCP81OC
+gMNPOsduET/m4xaRhPtthH80dK2Jp86519efhGSSvpWhrQlTM93uCupKUY5vVau6
+tZRGrox/2KJQJWVggEbbMwSubLWYdFQl3JPk+ONVFT24bcMKpBLBaYVu32TxU5nh
+SnUgnZUP5NbcA/FZGOhHibJXWpS2qdgXKxdJ5XbLwVaZOjex/2kskZGT4d9Mozd2
+TaGf+G0eHdP67Pv0RR0Tbc/3WeUiJ3IrhvNXuzDtJE3cfVa7o7P4NHmJweDyAmH3
+pvwPuxwXC65B2Xy9J6P9LjrRk5Sxcx0ki69bIImtt2dmefU6xqaWM/5TkshGsRGR
+xpl/j8nWZjEgQRCHLQzWwa80mMpkg/sTV9HB8Dx6jKXB/ZUhoHHBk2dxEuqPiApp
+GWSZI1b7rCoucL5mxAyE7+WL85MB+GqQk2dLsmijtWKP6T+MejteD+eMuMZ87zf9
+dOLITzNy4ZQ5bb0Sr74MTnB8G2+NszKTc0QWbej09+CVgI+WXTik9KveCjCHk9hN
+AHFiRSdLOkKEW39lt2c0Ui2cFmuqqNh7o0JMcccMyj6D5KbvtwEwXlGjefVwaaZB
+RA+GsCyRxj3qrg+E
+-----END CERTIFICATE-----
+
+# Issuer: CN=e-Szigno Root CA 2017 O=Microsec Ltd.
+# Subject: CN=e-Szigno Root CA 2017 O=Microsec Ltd.
+# Label: "e-Szigno Root CA 2017"
+# Serial: 411379200276854331539784714
+# MD5 Fingerprint: de:1f:f6:9e:84:ae:a7:b4:21:ce:1e:58:7d:d1:84:98
+# SHA1 Fingerprint: 89:d4:83:03:4f:9e:9a:48:80:5f:72:37:d4:a9:a6:ef:cb:7c:1f:d1
+# SHA256 Fingerprint: be:b0:0b:30:83:9b:9b:c3:2c:32:e4:44:79:05:95:06:41:f2:64:21:b1:5e:d0:89:19:8b:51:8a:e2:ea:1b:99
+-----BEGIN CERTIFICATE-----
+MIICQDCCAeWgAwIBAgIMAVRI7yH9l1kN9QQKMAoGCCqGSM49BAMCMHExCzAJBgNV
+BAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMgTHRk
+LjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25vIFJv
+b3QgQ0EgMjAxNzAeFw0xNzA4MjIxMjA3MDZaFw00MjA4MjIxMjA3MDZaMHExCzAJ
+BgNVBAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMg
+THRkLjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25v
+IFJvb3QgQ0EgMjAxNzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABJbcPYrYsHtv
+xie+RJCxs1YVe45DJH0ahFnuY2iyxl6H0BVIHqiQrb1TotreOpCmYF9oMrWGQd+H
+Wyx7xf58etqjYzBhMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G
+A1UdDgQWBBSHERUI0arBeAyxr87GyZDvvzAEwDAfBgNVHSMEGDAWgBSHERUI0arB
+eAyxr87GyZDvvzAEwDAKBggqhkjOPQQDAgNJADBGAiEAtVfd14pVCzbhhkT61Nlo
+jbjcI4qKDdQvfepz7L9NbKgCIQDLpbQS+ue16M9+k/zzNY9vTlp8tLxOsvxyqltZ
++efcMQ==
+-----END CERTIFICATE-----
+
+# Issuer: O=CERTSIGN SA OU=certSIGN ROOT CA G2
+# Subject: O=CERTSIGN SA OU=certSIGN ROOT CA G2
+# Label: "certSIGN Root CA G2"
+# Serial: 313609486401300475190
+# MD5 Fingerprint: 8c:f1:75:8a:c6:19:cf:94:b7:f7:65:20:87:c3:97:c7
+# SHA1 Fingerprint: 26:f9:93:b4:ed:3d:28:27:b0:b9:4b:a7:e9:15:1d:a3:8d:92:e5:32
+# SHA256 Fingerprint: 65:7c:fe:2f:a7:3f:aa:38:46:25:71:f3:32:a2:36:3a:46:fc:e7:02:09:51:71:07:02:cd:fb:b6:ee:da:33:05
+-----BEGIN CERTIFICATE-----
+MIIFRzCCAy+gAwIBAgIJEQA0tk7GNi02MA0GCSqGSIb3DQEBCwUAMEExCzAJBgNV
+BAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJR04g
+Uk9PVCBDQSBHMjAeFw0xNzAyMDYwOTI3MzVaFw00MjAyMDYwOTI3MzVaMEExCzAJ
+BgNVBAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJ
+R04gUk9PVCBDQSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDF
+dRmRfUR0dIf+DjuW3NgBFszuY5HnC2/OOwppGnzC46+CjobXXo9X69MhWf05N0Iw
+vlDqtg+piNguLWkh59E3GE59kdUWX2tbAMI5Qw02hVK5U2UPHULlj88F0+7cDBrZ
+uIt4ImfkabBoxTzkbFpG583H+u/E7Eu9aqSs/cwoUe+StCmrqzWaTOTECMYmzPhp
+n+Sc8CnTXPnGFiWeI8MgwT0PPzhAsP6CRDiqWhqKa2NYOLQV07YRaXseVO6MGiKs
+cpc/I1mbySKEwQdPzH/iV8oScLumZfNpdWO9lfsbl83kqK/20U6o2YpxJM02PbyW
+xPFsqa7lzw1uKA2wDrXKUXt4FMMgL3/7FFXhEZn91QqhngLjYl/rNUssuHLoPj1P
+rCy7Lobio3aP5ZMqz6WryFyNSwb/EkaseMsUBzXgqd+L6a8VTxaJW732jcZZroiF
+DsGJ6x9nxUWO/203Nit4ZoORUSs9/1F3dmKh7Gc+PoGD4FapUB8fepmrY7+EF3fx
+DTvf95xhszWYijqy7DwaNz9+j5LP2RIUZNoQAhVB/0/E6xyjyfqZ90bp4RjZsbgy
+LcsUDFDYg2WD7rlcz8sFWkz6GZdr1l0T08JcVLwyc6B49fFtHsufpaafItzRUZ6C
+eWRgKRM+o/1Pcmqr4tTluCRVLERLiohEnMqE0yo7AgMBAAGjQjBAMA8GA1UdEwEB
+/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSCIS1mxteg4BXrzkwJ
+d8RgnlRuAzANBgkqhkiG9w0BAQsFAAOCAgEAYN4auOfyYILVAzOBywaK8SJJ6ejq
+kX/GM15oGQOGO0MBzwdw5AgeZYWR5hEit/UCI46uuR59H35s5r0l1ZUa8gWmr4UC
+b6741jH/JclKyMeKqdmfS0mbEVeZkkMR3rYzpMzXjWR91M08KCy0mpbqTfXERMQl
+qiCA2ClV9+BB/AYm/7k29UMUA2Z44RGx2iBfRgB4ACGlHgAoYXhvqAEBj500mv/0
+OJD7uNGzcgbJceaBxXntC6Z58hMLnPddDnskk7RI24Zf3lCGeOdA5jGokHZwYa+c
+NywRtYK3qq4kNFtyDGkNzVmf9nGvnAvRCjj5BiKDUyUM/FHE5r7iOZULJK2v0ZXk
+ltd0ZGtxTgI8qoXzIKNDOXZbbFD+mpwUHmUUihW9o4JFWklWatKcsWMy5WHgUyIO
+pwpJ6st+H6jiYoD2EEVSmAYY3qXNL3+q1Ok+CHLsIwMCPKaq2LxndD0UF/tUSxfj
+03k9bWtJySgOLnRQvwzZRjoQhsmnP+mg7H/rpXdYaXHmgwo38oZJar55CJD2AhZk
+PuXaTH4MNMn5X7azKFGnpyuqSfqNZSlO42sTp5SjLVFteAxEy9/eCG/Oo2Sr05WE
+1LlSVHJ7liXMvGnjSG4N0MedJ5qq+BOS3R7fY581qRY27Iy4g/Q9iY/NtBde17MX
+QRBdJ3NghVdJIgc=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc.
+# Subject: CN=Trustwave Global Certification Authority O=Trustwave Holdings, Inc.
+# Label: "Trustwave Global Certification Authority"
+# Serial: 1846098327275375458322922162
+# MD5 Fingerprint: f8:1c:18:2d:2f:ba:5f:6d:a1:6c:bc:c7:ab:91:c7:0e
+# SHA1 Fingerprint: 2f:8f:36:4f:e1:58:97:44:21:59:87:a5:2a:9a:d0:69:95:26:7f:b5
+# SHA256 Fingerprint: 97:55:20:15:f5:dd:fc:3c:87:88:c0:06:94:45:55:40:88:94:45:00:84:f1:00:86:70:86:bc:1a:2b:b5:8d:c8
+-----BEGIN CERTIFICATE-----
+MIIF2jCCA8KgAwIBAgIMBfcOhtpJ80Y1LrqyMA0GCSqGSIb3DQEBCwUAMIGIMQsw
+CQYDVQQGEwJVUzERMA8GA1UECAwISWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28x
+ITAfBgNVBAoMGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1
+c3R3YXZlIEdsb2JhbCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0xNzA4MjMx
+OTM0MTJaFw00MjA4MjMxOTM0MTJaMIGIMQswCQYDVQQGEwJVUzERMA8GA1UECAwI
+SWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28xITAfBgNVBAoMGFRydXN0d2F2ZSBI
+b2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1c3R3YXZlIEdsb2JhbCBDZXJ0aWZp
+Y2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB
+ALldUShLPDeS0YLOvR29zd24q88KPuFd5dyqCblXAj7mY2Hf8g+CY66j96xz0Xzn
+swuvCAAJWX/NKSqIk4cXGIDtiLK0thAfLdZfVaITXdHG6wZWiYj+rDKd/VzDBcdu
+7oaJuogDnXIhhpCujwOl3J+IKMujkkkP7NAP4m1ET4BqstTnoApTAbqOl5F2brz8
+1Ws25kCI1nsvXwXoLG0R8+eyvpJETNKXpP7ScoFDB5zpET71ixpZfR9oWN0EACyW
+80OzfpgZdNmcc9kYvkHHNHnZ9GLCQ7mzJ7Aiy/k9UscwR7PJPrhq4ufogXBeQotP
+JqX+OsIgbrv4Fo7NDKm0G2x2EOFYeUY+VM6AqFcJNykbmROPDMjWLBz7BegIlT1l
+RtzuzWniTY+HKE40Cz7PFNm73bZQmq131BnW2hqIyE4bJ3XYsgjxroMwuREOzYfw
+hI0Vcnyh78zyiGG69Gm7DIwLdVcEuE4qFC49DxweMqZiNu5m4iK4BUBjECLzMx10
+coos9TkpoNPnG4CELcU9402x/RpvumUHO1jsQkUm+9jaJXLE9gCxInm943xZYkqc
+BW89zubWR2OZxiRvchLIrH+QtAuRcOi35hYQcRfO3gZPSEF9NUqjifLJS3tBEW1n
+twiYTOURGa5CgNz7kAXU+FDKvuStx8KU1xad5hePrzb7AgMBAAGjQjBAMA8GA1Ud
+EwEB/wQFMAMBAf8wHQYDVR0OBBYEFJngGWcNYtt2s9o9uFvo/ULSMQ6HMA4GA1Ud
+DwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAmHNw4rDT7TnsTGDZqRKGFx6W
+0OhUKDtkLSGm+J1WE2pIPU/HPinbbViDVD2HfSMF1OQc3Og4ZYbFdada2zUFvXfe
+uyk3QAUHw5RSn8pk3fEbK9xGChACMf1KaA0HZJDmHvUqoai7PF35owgLEQzxPy0Q
+lG/+4jSHg9bP5Rs1bdID4bANqKCqRieCNqcVtgimQlRXtpla4gt5kNdXElE1GYhB
+aCXUNxeEFfsBctyV3lImIJgm4nb1J2/6ADtKYdkNy1GTKv0WBpanI5ojSP5RvbbE
+sLFUzt5sQa0WZ37b/TjNuThOssFgy50X31ieemKyJo90lZvkWx3SD92YHJtZuSPT
+MaCm/zjdzyBP6VhWOmfD0faZmZ26NraAL4hHT4a/RDqA5Dccprrql5gR0IRiR2Qe
+qu5AvzSxnI9O4fKSTx+O856X3vOmeWqJcU9LJxdI/uz0UA9PSX3MReO9ekDFQdxh
+VicGaeVyQYHTtgGJoC86cnn+OjC/QezHYj6RS8fZMXZC+fc8Y+wmjHMMfRod6qh8
+h6jCJ3zhM0EPz8/8AKAigJ5Kp28AsEFFtyLKaEjFQqKu3R3y4G5OBVixwJAWKqQ9
+EEC+j2Jjg6mcgn0tAumDMHzLJ8n9HmYAsC7TIS+OMxZsmO0QqAfWzJPP29FpHOTK
+yeC2nOnOcXHebD8WpHk=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc.
+# Subject: CN=Trustwave Global ECC P256 Certification Authority O=Trustwave Holdings, Inc.
+# Label: "Trustwave Global ECC P256 Certification Authority"
+# Serial: 4151900041497450638097112925
+# MD5 Fingerprint: 5b:44:e3:8d:5d:36:86:26:e8:0d:05:d2:59:a7:83:54
+# SHA1 Fingerprint: b4:90:82:dd:45:0c:be:8b:5b:b1:66:d3:e2:a4:08:26:cd:ed:42:cf
+# SHA256 Fingerprint: 94:5b:bc:82:5e:a5:54:f4:89:d1:fd:51:a7:3d:df:2e:a6:24:ac:70:19:a0:52:05:22:5c:22:a7:8c:cf:a8:b4
+-----BEGIN CERTIFICATE-----
+MIICYDCCAgegAwIBAgIMDWpfCD8oXD5Rld9dMAoGCCqGSM49BAMCMIGRMQswCQYD
+VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf
+BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3
+YXZlIEdsb2JhbCBFQ0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x
+NzA4MjMxOTM1MTBaFw00MjA4MjMxOTM1MTBaMIGRMQswCQYDVQQGEwJVUzERMA8G
+A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0
+d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF
+Q0MgUDI1NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTBZMBMGByqGSM49AgEGCCqG
+SM49AwEHA0IABH77bOYj43MyCMpg5lOcunSNGLB4kFKA3TjASh3RqMyTpJcGOMoN
+FWLGjgEqZZ2q3zSRLoHB5DOSMcT9CTqmP62jQzBBMA8GA1UdEwEB/wQFMAMBAf8w
+DwYDVR0PAQH/BAUDAwcGADAdBgNVHQ4EFgQUo0EGrJBt0UrrdaVKEJmzsaGLSvcw
+CgYIKoZIzj0EAwIDRwAwRAIgB+ZU2g6gWrKuEZ+Hxbb/ad4lvvigtwjzRM4q3wgh
+DDcCIC0mA6AFvWvR9lz4ZcyGbbOcNEhjhAnFjXca4syc4XR7
+-----END CERTIFICATE-----
+
+# Issuer: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc.
+# Subject: CN=Trustwave Global ECC P384 Certification Authority O=Trustwave Holdings, Inc.
+# Label: "Trustwave Global ECC P384 Certification Authority"
+# Serial: 2704997926503831671788816187
+# MD5 Fingerprint: ea:cf:60:c4:3b:b9:15:29:40:a1:97:ed:78:27:93:d6
+# SHA1 Fingerprint: e7:f3:a3:c8:cf:6f:c3:04:2e:6d:0e:67:32:c5:9e:68:95:0d:5e:d2
+# SHA256 Fingerprint: 55:90:38:59:c8:c0:c3:eb:b8:75:9e:ce:4e:25:57:22:5f:f5:75:8b:bd:38:eb:d4:82:76:60:1e:1b:d5:80:97
+-----BEGIN CERTIFICATE-----
+MIICnTCCAiSgAwIBAgIMCL2Fl2yZJ6SAaEc7MAoGCCqGSM49BAMDMIGRMQswCQYD
+VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAf
+BgNVBAoTGFRydXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3
+YXZlIEdsb2JhbCBFQ0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0x
+NzA4MjMxOTM2NDNaFw00MjA4MjMxOTM2NDNaMIGRMQswCQYDVQQGEwJVUzERMA8G
+A1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0
+d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBF
+Q0MgUDM4NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABGvaDXU1CDFHBa5FmVXxERMuSvgQMSOjfoPTfygIOiYaOs+Xgh+AtycJ
+j9GOMMQKmw6sWASr9zZ9lCOkmwqKi6vr/TklZvFe/oyujUF5nQlgziip04pt89ZF
+1PKYhDhloKNDMEEwDwYDVR0TAQH/BAUwAwEB/zAPBgNVHQ8BAf8EBQMDBwYAMB0G
+A1UdDgQWBBRVqYSJ0sEyvRjLbKYHTsjnnb6CkDAKBggqhkjOPQQDAwNnADBkAjA3
+AZKXRRJ+oPM+rRk6ct30UJMDEr5E0k9BpIycnR+j9sKS50gU/k6bpZFXrsY3crsC
+MGclCrEMXu6pY5Jv5ZAL/mYiykf9ijH3g/56vxC+GCsej/YpHpRZ744hN8tRmKVu
+Sw==
+-----END CERTIFICATE-----
+
+# Issuer: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp.
+# Subject: CN=NAVER Global Root Certification Authority O=NAVER BUSINESS PLATFORM Corp.
+# Label: "NAVER Global Root Certification Authority"
+# Serial: 9013692873798656336226253319739695165984492813
+# MD5 Fingerprint: c8:7e:41:f6:25:3b:f5:09:b3:17:e8:46:3d:bf:d0:9b
+# SHA1 Fingerprint: 8f:6b:f2:a9:27:4a:da:14:a0:c4:f4:8e:61:27:f9:c0:1e:78:5d:d1
+# SHA256 Fingerprint: 88:f4:38:dc:f8:ff:d1:fa:8f:42:91:15:ff:e5:f8:2a:e1:e0:6e:0c:70:c3:75:fa:ad:71:7b:34:a4:9e:72:65
+-----BEGIN CERTIFICATE-----
+MIIFojCCA4qgAwIBAgIUAZQwHqIL3fXFMyqxQ0Rx+NZQTQ0wDQYJKoZIhvcNAQEM
+BQAwaTELMAkGA1UEBhMCS1IxJjAkBgNVBAoMHU5BVkVSIEJVU0lORVNTIFBMQVRG
+T1JNIENvcnAuMTIwMAYDVQQDDClOQVZFUiBHbG9iYWwgUm9vdCBDZXJ0aWZpY2F0
+aW9uIEF1dGhvcml0eTAeFw0xNzA4MTgwODU4NDJaFw0zNzA4MTgyMzU5NTlaMGkx
+CzAJBgNVBAYTAktSMSYwJAYDVQQKDB1OQVZFUiBCVVNJTkVTUyBQTEFURk9STSBD
+b3JwLjEyMDAGA1UEAwwpTkFWRVIgR2xvYmFsIFJvb3QgQ2VydGlmaWNhdGlvbiBB
+dXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC21PGTXLVA
+iQqrDZBbUGOukJR0F0Vy1ntlWilLp1agS7gvQnXp2XskWjFlqxcX0TM62RHcQDaH
+38dq6SZeWYp34+hInDEW+j6RscrJo+KfziFTowI2MMtSAuXaMl3Dxeb57hHHi8lE
+HoSTGEq0n+USZGnQJoViAbbJAh2+g1G7XNr4rRVqmfeSVPc0W+m/6imBEtRTkZaz
+kVrd/pBzKPswRrXKCAfHcXLJZtM0l/aM9BhK4dA9WkW2aacp+yPOiNgSnABIqKYP
+szuSjXEOdMWLyEz59JuOuDxp7W87UC9Y7cSw0BwbagzivESq2M0UXZR4Yb8Obtoq
+vC8MC3GmsxY/nOb5zJ9TNeIDoKAYv7vxvvTWjIcNQvcGufFt7QSUqP620wbGQGHf
+nZ3zVHbOUzoBppJB7ASjjw2i1QnK1sua8e9DXcCrpUHPXFNwcMmIpi3Ua2FzUCaG
+YQ5fG8Ir4ozVu53BA0K6lNpfqbDKzE0K70dpAy8i+/Eozr9dUGWokG2zdLAIx6yo
+0es+nPxdGoMuK8u180SdOqcXYZaicdNwlhVNt0xz7hlcxVs+Qf6sdWA7G2POAN3a
+CJBitOUt7kinaxeZVL6HSuOpXgRM6xBtVNbv8ejyYhbLgGvtPe31HzClrkvJE+2K
+AQHJuFFYwGY6sWZLxNUxAmLpdIQM201GLQIDAQABo0IwQDAdBgNVHQ4EFgQU0p+I
+36HNLL3s9TsBAZMzJ7LrYEswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMB
+Af8wDQYJKoZIhvcNAQEMBQADggIBADLKgLOdPVQG3dLSLvCkASELZ0jKbY7gyKoN
+qo0hV4/GPnrK21HUUrPUloSlWGB/5QuOH/XcChWB5Tu2tyIvCZwTFrFsDDUIbatj
+cu3cvuzHV+YwIHHW1xDBE1UBjCpD5EHxzzp6U5LOogMFDTjfArsQLtk70pt6wKGm
++LUx5vR1yblTmXVHIloUFcd4G7ad6Qz4G3bxhYTeodoS76TiEJd6eN4MUZeoIUCL
+hr0N8F5OSza7OyAfikJW4Qsav3vQIkMsRIz75Sq0bBwcupTgE34h5prCy8VCZLQe
+lHsIJchxzIdFV4XTnyliIoNRlwAYl3dqmJLJfGBs32x9SuRwTMKeuB330DTHD8z7
+p/8Dvq1wkNoL3chtl1+afwkyQf3NosxabUzyqkn+Zvjp2DXrDige7kgvOtB5CTh8
+piKCk5XQA76+AqAF3SAi428diDRgxuYKuQl1C/AH6GmWNcf7I4GOODm4RStDeKLR
+LBT/DShycpWbXgnbiUSYqqFJu3FS8r/2/yehNq+4tneI3TqkbZs0kNwUXTC/t+sX
+5Ie3cdCh13cV1ELX8vMxmV2b3RZtP+oGI/hGoiLtk/bdmuYqh7GYVPEi92tF4+KO
+dh2ajcQGjTa3FPOdVGm3jjzVpG2Tgbet9r1ke8LJaDmgkpzNNIaRkPpkUZ3+/uul
+9XXeifdy
+-----END CERTIFICATE-----
+
+# Issuer: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres
+# Subject: CN=AC RAIZ FNMT-RCM SERVIDORES SEGUROS O=FNMT-RCM OU=Ceres
+# Label: "AC RAIZ FNMT-RCM SERVIDORES SEGUROS"
+# Serial: 131542671362353147877283741781055151509
+# MD5 Fingerprint: 19:36:9c:52:03:2f:d2:d1:bb:23:cc:dd:1e:12:55:bb
+# SHA1 Fingerprint: 62:ff:d9:9e:c0:65:0d:03:ce:75:93:d2:ed:3f:2d:32:c9:e3:e5:4a
+# SHA256 Fingerprint: 55:41:53:b1:3d:2c:f9:dd:b7:53:bf:be:1a:4e:0a:e0:8d:0a:a4:18:70:58:fe:60:a2:b8:62:b2:e4:b8:7b:cb
+-----BEGIN CERTIFICATE-----
+MIICbjCCAfOgAwIBAgIQYvYybOXE42hcG2LdnC6dlTAKBggqhkjOPQQDAzB4MQsw
+CQYDVQQGEwJFUzERMA8GA1UECgwIRk5NVC1SQ00xDjAMBgNVBAsMBUNlcmVzMRgw
+FgYDVQRhDA9WQVRFUy1RMjgyNjAwNEoxLDAqBgNVBAMMI0FDIFJBSVogRk5NVC1S
+Q00gU0VSVklET1JFUyBTRUdVUk9TMB4XDTE4MTIyMDA5MzczM1oXDTQzMTIyMDA5
+MzczM1oweDELMAkGA1UEBhMCRVMxETAPBgNVBAoMCEZOTVQtUkNNMQ4wDAYDVQQL
+DAVDZXJlczEYMBYGA1UEYQwPVkFURVMtUTI4MjYwMDRKMSwwKgYDVQQDDCNBQyBS
+QUlaIEZOTVQtUkNNIFNFUlZJRE9SRVMgU0VHVVJPUzB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABPa6V1PIyqvfNkpSIeSX0oNnnvBlUdBeh8dHsVnyV0ebAAKTRBdp20LH
+sbI6GA60XYyzZl2hNPk2LEnb80b8s0RpRBNm/dfF/a82Tc4DTQdxz69qBdKiQ1oK
+Um8BA06Oi6NCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD
+VR0OBBYEFAG5L++/EYZg8k/QQW6rcx/n0m5JMAoGCCqGSM49BAMDA2kAMGYCMQCu
+SuMrQMN0EfKVrRYj3k4MGuZdpSRea0R7/DjiT8ucRRcRTBQnJlU5dUoDzBOQn5IC
+MQD6SmxgiHPz7riYYqnOK8LZiqZwMR2vsJRM60/G49HzYqc8/5MuB1xJAWdpEgJy
+v+c=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign Root R46 O=GlobalSign nv-sa
+# Subject: CN=GlobalSign Root R46 O=GlobalSign nv-sa
+# Label: "GlobalSign Root R46"
+# Serial: 1552617688466950547958867513931858518042577
+# MD5 Fingerprint: c4:14:30:e4:fa:66:43:94:2a:6a:1b:24:5f:19:d0:ef
+# SHA1 Fingerprint: 53:a2:b0:4b:ca:6b:d6:45:e6:39:8a:8e:c4:0d:d2:bf:77:c3:a2:90
+# SHA256 Fingerprint: 4f:a3:12:6d:8d:3a:11:d1:c4:85:5a:4f:80:7c:ba:d6:cf:91:9d:3a:5a:88:b0:3b:ea:2c:63:72:d9:3c:40:c9
+-----BEGIN CERTIFICATE-----
+MIIFWjCCA0KgAwIBAgISEdK7udcjGJ5AXwqdLdDfJWfRMA0GCSqGSIb3DQEBDAUA
+MEYxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYD
+VQQDExNHbG9iYWxTaWduIFJvb3QgUjQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMy
+MDAwMDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYt
+c2ExHDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBSNDYwggIiMA0GCSqGSIb3DQEB
+AQUAA4ICDwAwggIKAoICAQCsrHQy6LNl5brtQyYdpokNRbopiLKkHWPd08EsCVeJ
+OaFV6Wc0dwxu5FUdUiXSE2te4R2pt32JMl8Nnp8semNgQB+msLZ4j5lUlghYruQG
+vGIFAha/r6gjA7aUD7xubMLL1aa7DOn2wQL7Id5m3RerdELv8HQvJfTqa1VbkNud
+316HCkD7rRlr+/fKYIje2sGP1q7Vf9Q8g+7XFkyDRTNrJ9CG0Bwta/OrffGFqfUo
+0q3v84RLHIf8E6M6cqJaESvWJ3En7YEtbWaBkoe0G1h6zD8K+kZPTXhc+CtI4wSE
+y132tGqzZfxCnlEmIyDLPRT5ge1lFgBPGmSXZgjPjHvjK8Cd+RTyG/FWaha/LIWF
+zXg4mutCagI0GIMXTpRW+LaCtfOW3T3zvn8gdz57GSNrLNRyc0NXfeD412lPFzYE
++cCQYDdF3uYM2HSNrpyibXRdQr4G9dlkbgIQrImwTDsHTUB+JMWKmIJ5jqSngiCN
+I/onccnfxkF0oE32kRbcRoxfKWMxWXEM2G/CtjJ9++ZdU6Z+Ffy7dXxd7Pj2Fxzs
+x2sZy/N78CsHpdlseVR2bJ0cpm4O6XkMqCNqo98bMDGfsVR7/mrLZqrcZdCinkqa
+ByFrgY/bxFn63iLABJzjqls2k+g9vXqhnQt2sQvHnf3PmKgGwvgqo6GDoLclcqUC
+4wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQUA1yrc4GHqMywptWU4jaWSf8FmSwwDQYJKoZIhvcNAQEMBQADggIBAHx4
+7PYCLLtbfpIrXTncvtgdokIzTfnvpCo7RGkerNlFo048p9gkUbJUHJNOxO97k4Vg
+JuoJSOD1u8fpaNK7ajFxzHmuEajwmf3lH7wvqMxX63bEIaZHU1VNaL8FpO7XJqti
+2kM3S+LGteWygxk6x9PbTZ4IevPuzz5i+6zoYMzRx6Fcg0XERczzF2sUyQQCPtIk
+pnnpHs6i58FZFZ8d4kuaPp92CC1r2LpXFNqD6v6MVenQTqnMdzGxRBF6XLE+0xRF
+FRhiJBPSy03OXIPBNvIQtQ6IbbjhVp+J3pZmOUdkLG5NrmJ7v2B0GbhWrJKsFjLt
+rWhV/pi60zTe9Mlhww6G9kuEYO4Ne7UyWHmRVSyBQ7N0H3qqJZ4d16GLuc1CLgSk
+ZoNNiTW2bKg2SnkheCLQQrzRQDGQob4Ez8pn7fXwgNNgyYMqIgXQBztSvwyeqiv5
+u+YfjyW6hY0XHgL+XVAEV8/+LbzvXMAaq7afJMbfc2hIkCwU9D9SGuTSyxTDYWnP
+4vkYxboznxSjBF25cfe1lNj2M8FawTSLfJvdkzrnE6JwYZ+vj+vYxXX4M2bUdGc6
+N3ec592kD3ZDZopD8p/7DEJ4Y9HiD2971KE9dJeFt0g5QdYg/NA6s/rob8SKunE3
+vouXsXgxT7PntgMTzlSdriVZzH81Xwj3QEUxeCp6
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign Root E46 O=GlobalSign nv-sa
+# Subject: CN=GlobalSign Root E46 O=GlobalSign nv-sa
+# Label: "GlobalSign Root E46"
+# Serial: 1552617690338932563915843282459653771421763
+# MD5 Fingerprint: b5:b8:66:ed:de:08:83:e3:c9:e2:01:34:06:ac:51:6f
+# SHA1 Fingerprint: 39:b4:6c:d5:fe:80:06:eb:e2:2f:4a:bb:08:33:a0:af:db:b9:dd:84
+# SHA256 Fingerprint: cb:b9:c4:4d:84:b8:04:3e:10:50:ea:31:a6:9f:51:49:55:d7:bf:d2:e2:c6:b4:93:01:01:9a:d6:1d:9f:50:58
+-----BEGIN CERTIFICATE-----
+MIICCzCCAZGgAwIBAgISEdK7ujNu1LzmJGjFDYQdmOhDMAoGCCqGSM49BAMDMEYx
+CzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYDVQQD
+ExNHbG9iYWxTaWduIFJvb3QgRTQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMyMDAw
+MDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2Ex
+HDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBFNDYwdjAQBgcqhkjOPQIBBgUrgQQA
+IgNiAAScDrHPt+ieUnd1NPqlRqetMhkytAepJ8qUuwzSChDH2omwlwxwEwkBjtjq
+R+q+soArzfwoDdusvKSGN+1wCAB16pMLey5SnCNoIwZD7JIvU4Tb+0cUB+hflGdd
+yXqBPCCjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud
+DgQWBBQxCpCPtsad0kRLgLWi5h+xEk8blTAKBggqhkjOPQQDAwNoADBlAjEA31SQ
+7Zvvi5QCkxeCmb6zniz2C5GMn0oUsfZkvLtoURMMA/cVi4RguYv/Uo7njLwcAjA8
++RHUjE7AwWHCFUyqqx0LMV87HOIAl0Qx5v5zli/altP+CAezNIm8BZ/3Hobui3A=
+-----END CERTIFICATE-----
+
+# Issuer: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH
+# Subject: CN=GLOBALTRUST 2020 O=e-commerce monitoring GmbH
+# Label: "GLOBALTRUST 2020"
+# Serial: 109160994242082918454945253
+# MD5 Fingerprint: 8a:c7:6f:cb:6d:e3:cc:a2:f1:7c:83:fa:0e:78:d7:e8
+# SHA1 Fingerprint: d0:67:c1:13:51:01:0c:aa:d0:c7:6a:65:37:31:16:26:4f:53:71:a2
+# SHA256 Fingerprint: 9a:29:6a:51:82:d1:d4:51:a2:e3:7f:43:9b:74:da:af:a2:67:52:33:29:f9:0f:9a:0d:20:07:c3:34:e2:3c:9a
+-----BEGIN CERTIFICATE-----
+MIIFgjCCA2qgAwIBAgILWku9WvtPilv6ZeUwDQYJKoZIhvcNAQELBQAwTTELMAkG
+A1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9uaXRvcmluZyBHbWJIMRkw
+FwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMB4XDTIwMDIxMDAwMDAwMFoXDTQwMDYx
+MDAwMDAwMFowTTELMAkGA1UEBhMCQVQxIzAhBgNVBAoTGmUtY29tbWVyY2UgbW9u
+aXRvcmluZyBHbWJIMRkwFwYDVQQDExBHTE9CQUxUUlVTVCAyMDIwMIICIjANBgkq
+hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAri5WrRsc7/aVj6B3GyvTY4+ETUWiD59b
+RatZe1E0+eyLinjF3WuvvcTfk0Uev5E4C64OFudBc/jbu9G4UeDLgztzOG53ig9Z
+YybNpyrOVPu44sB8R85gfD+yc/LAGbaKkoc1DZAoouQVBGM+uq/ufF7MpotQsjj3
+QWPKzv9pj2gOlTblzLmMCcpL3TGQlsjMH/1WljTbjhzqLL6FLmPdqqmV0/0plRPw
+yJiT2S0WR5ARg6I6IqIoV6Lr/sCMKKCmfecqQjuCgGOlYx8ZzHyyZqjC0203b+J+
+BlHZRYQfEs4kUmSFC0iAToexIiIwquuuvuAC4EDosEKAA1GqtH6qRNdDYfOiaxaJ
+SaSjpCuKAsR49GiKweR6NrFvG5Ybd0mN1MkGco/PU+PcF4UgStyYJ9ORJitHHmkH
+r96i5OTUawuzXnzUJIBHKWk7buis/UDr2O1xcSvy6Fgd60GXIsUf1DnQJ4+H4xj0
+4KlGDfV0OoIu0G4skaMxXDtG6nsEEFZegB31pWXogvziB4xiRfUg3kZwhqG8k9Me
+dKZssCz3AwyIDMvUclOGvGBG85hqwvG/Q/lwIHfKN0F5VVJjjVsSn8VoxIidrPIw
+q7ejMZdnrY8XD2zHc+0klGvIg5rQmjdJBKuxFshsSUktq6HQjJLyQUp5ISXbY9e2
+nKd+Qmn7OmMCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC
+AQYwHQYDVR0OBBYEFNwuH9FhN3nkq9XVsxJxaD1qaJwiMB8GA1UdIwQYMBaAFNwu
+H9FhN3nkq9XVsxJxaD1qaJwiMA0GCSqGSIb3DQEBCwUAA4ICAQCR8EICaEDuw2jA
+VC/f7GLDw56KoDEoqoOOpFaWEhCGVrqXctJUMHytGdUdaG/7FELYjQ7ztdGl4wJC
+XtzoRlgHNQIw4Lx0SsFDKv/bGtCwr2zD/cuz9X9tAy5ZVp0tLTWMstZDFyySCstd
+6IwPS3BD0IL/qMy/pJTAvoe9iuOTe8aPmxadJ2W8esVCgmxcB9CpwYhgROmYhRZf
++I/KARDOJcP5YBugxZfD0yyIMaK9MOzQ0MAS8cE54+X1+NZK3TTN+2/BT+MAi1bi
+kvcoskJ3ciNnxz8RFbLEAwW+uxF7Cr+obuf/WEPPm2eggAe2HcqtbepBEX4tdJP7
+wry+UUTF72glJ4DjyKDUEuzZpTcdN3y0kcra1LGWge9oXHYQSa9+pTeAsRxSvTOB
+TI/53WXZFM2KJVj04sWDpQmQ1GwUY7VA3+vA/MRYfg0UFodUJ25W5HCEuGwyEn6C
+MUO+1918oa2u1qsgEu8KwxCMSZY13At1XrFP1U80DhEgB3VDRemjEdqso5nCtnkn
+4rnvyOL2NSl6dPrFf4IFYqYK6miyeUcGbvJXqBUzxvd4Sj1Ce2t+/vdG6tHrju+I
+aFvowdlxfv1k7/9nR4hYJS8+hge9+6jlgqispdNpQ80xiEmEU5LAsTkbOYMBMMTy
+qfrQA71yN2BWHzZ8vTmR9W0Nv3vXkg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz
+# Subject: CN=ANF Secure Server Root CA O=ANF Autoridad de Certificacion OU=ANF CA Raiz
+# Label: "ANF Secure Server Root CA"
+# Serial: 996390341000653745
+# MD5 Fingerprint: 26:a6:44:5a:d9:af:4e:2f:b2:1d:b6:65:b0:4e:e8:96
+# SHA1 Fingerprint: 5b:6e:68:d0:cc:15:b6:a0:5f:1e:c1:5f:ae:02:fc:6b:2f:5d:6f:74
+# SHA256 Fingerprint: fb:8f:ec:75:91:69:b9:10:6b:1e:51:16:44:c6:18:c5:13:04:37:3f:6c:06:43:08:8d:8b:ef:fd:1b:99:75:99
+-----BEGIN CERTIFICATE-----
+MIIF7zCCA9egAwIBAgIIDdPjvGz5a7EwDQYJKoZIhvcNAQELBQAwgYQxEjAQBgNV
+BAUTCUc2MzI4NzUxMDELMAkGA1UEBhMCRVMxJzAlBgNVBAoTHkFORiBBdXRvcmlk
+YWQgZGUgQ2VydGlmaWNhY2lvbjEUMBIGA1UECxMLQU5GIENBIFJhaXoxIjAgBgNV
+BAMTGUFORiBTZWN1cmUgU2VydmVyIFJvb3QgQ0EwHhcNMTkwOTA0MTAwMDM4WhcN
+MzkwODMwMTAwMDM4WjCBhDESMBAGA1UEBRMJRzYzMjg3NTEwMQswCQYDVQQGEwJF
+UzEnMCUGA1UEChMeQU5GIEF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uMRQwEgYD
+VQQLEwtBTkYgQ0EgUmFpejEiMCAGA1UEAxMZQU5GIFNlY3VyZSBTZXJ2ZXIgUm9v
+dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANvrayvmZFSVgpCj
+cqQZAZ2cC4Ffc0m6p6zzBE57lgvsEeBbphzOG9INgxwruJ4dfkUyYA8H6XdYfp9q
+yGFOtibBTI3/TO80sh9l2Ll49a2pcbnvT1gdpd50IJeh7WhM3pIXS7yr/2WanvtH
+2Vdy8wmhrnZEE26cLUQ5vPnHO6RYPUG9tMJJo8gN0pcvB2VSAKduyK9o7PQUlrZX
+H1bDOZ8rbeTzPvY1ZNoMHKGESy9LS+IsJJ1tk0DrtSOOMspvRdOoiXsezx76W0OL
+zc2oD2rKDF65nkeP8Nm2CgtYZRczuSPkdxl9y0oukntPLxB3sY0vaJxizOBQ+OyR
+p1RMVwnVdmPF6GUe7m1qzwmd+nxPrWAI/VaZDxUse6mAq4xhj0oHdkLePfTdsiQz
+W7i1o0TJrH93PB0j7IKppuLIBkwC/qxcmZkLLxCKpvR/1Yd0DVlJRfbwcVw5Kda/
+SiOL9V8BY9KHcyi1Swr1+KuCLH5zJTIdC2MKF4EA/7Z2Xue0sUDKIbvVgFHlSFJn
+LNJhiQcND85Cd8BEc5xEUKDbEAotlRyBr+Qc5RQe8TZBAQIvfXOn3kLMTOmJDVb3
+n5HUA8ZsyY/b2BzgQJhdZpmYgG4t/wHFzstGH6wCxkPmrqKEPMVOHj1tyRRM4y5B
+u8o5vzY8KhmqQYdOpc5LMnndkEl/AgMBAAGjYzBhMB8GA1UdIwQYMBaAFJxf0Gxj
+o1+TypOYCK2Mh6UsXME3MB0GA1UdDgQWBBScX9BsY6Nfk8qTmAitjIelLFzBNzAO
+BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC
+AgEATh65isagmD9uw2nAalxJUqzLK114OMHVVISfk/CHGT0sZonrDUL8zPB1hT+L
+9IBdeeUXZ701guLyPI59WzbLWoAAKfLOKyzxj6ptBZNscsdW699QIyjlRRA96Gej
+rw5VD5AJYu9LWaL2U/HANeQvwSS9eS9OICI7/RogsKQOLHDtdD+4E5UGUcjohybK
+pFtqFiGS3XNgnhAY3jyB6ugYw3yJ8otQPr0R4hUDqDZ9MwFsSBXXiJCZBMXM5gf0
+vPSQ7RPi6ovDj6MzD8EpTBNO2hVWcXNyglD2mjN8orGoGjR0ZVzO0eurU+AagNjq
+OknkJjCb5RyKqKkVMoaZkgoQI1YS4PbOTOK7vtuNknMBZi9iPrJyJ0U27U1W45eZ
+/zo1PqVUSlJZS2Db7v54EX9K3BR5YLZrZAPbFYPhor72I5dQ8AkzNqdxliXzuUJ9
+2zg/LFis6ELhDtjTO0wugumDLmsx2d1Hhk9tl5EuT+IocTUW0fJz/iUrB0ckYyfI
++PbZa/wSMVYIwFNCr5zQM378BvAxRAMU8Vjq8moNqRGyg77FGr8H6lnco4g175x2
+MjxNBiLOFeXdntiP2t7SxDnlF4HPOEfrf4htWRvfn0IUrn7PqLBmZdo3r5+qPeoo
+tt7VMVgWglvquxl1AnMaykgaIZOQCo6ThKd9OyMYkomgjaw=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority
+# Subject: CN=Certum EC-384 CA O=Asseco Data Systems S.A. OU=Certum Certification Authority
+# Label: "Certum EC-384 CA"
+# Serial: 160250656287871593594747141429395092468
+# MD5 Fingerprint: b6:65:b3:96:60:97:12:a1:ec:4e:e1:3d:a3:c6:c9:f1
+# SHA1 Fingerprint: f3:3e:78:3c:ac:df:f4:a2:cc:ac:67:55:69:56:d7:e5:16:3c:e1:ed
+# SHA256 Fingerprint: 6b:32:80:85:62:53:18:aa:50:d1:73:c9:8d:8b:da:09:d5:7e:27:41:3d:11:4c:f7:87:a0:f5:d0:6c:03:0c:f6
+-----BEGIN CERTIFICATE-----
+MIICZTCCAeugAwIBAgIQeI8nXIESUiClBNAt3bpz9DAKBggqhkjOPQQDAzB0MQsw
+CQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScw
+JQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAXBgNVBAMT
+EENlcnR1bSBFQy0zODQgQ0EwHhcNMTgwMzI2MDcyNDU0WhcNNDMwMzI2MDcyNDU0
+WjB0MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBT
+LkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxGTAX
+BgNVBAMTEENlcnR1bSBFQy0zODQgQ0EwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATE
+KI6rGFtqvm5kN2PkzeyrOvfMobgOgknXhimfoZTy42B4mIF4Bk3y7JoOV2CDn7Tm
+Fy8as10CW4kjPMIRBSqniBMY81CE1700LCeJVf/OTOffph8oxPBUw7l8t1Ot68Kj
+QjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI0GZnQkdjrzife81r1HfS+8
+EF9LMA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNoADBlAjADVS2m5hjEfO/J
+UG7BJw+ch69u1RsIGL2SKcHvlJF40jocVYli5RsJHrpka/F2tNQCMQC0QoSZ/6vn
+nvuRlydd3LBbMHHOXjgaatkl5+r3YZJW+OraNsKHZZYuciUvf9/DE8k=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority
+# Subject: CN=Certum Trusted Root CA O=Asseco Data Systems S.A. OU=Certum Certification Authority
+# Label: "Certum Trusted Root CA"
+# Serial: 40870380103424195783807378461123655149
+# MD5 Fingerprint: 51:e1:c2:e7:fe:4c:84:af:59:0e:2f:f4:54:6f:ea:29
+# SHA1 Fingerprint: c8:83:44:c0:18:ae:9f:cc:f1:87:b7:8f:22:d1:c5:d7:45:84:ba:e5
+# SHA256 Fingerprint: fe:76:96:57:38:55:77:3e:37:a9:5e:7a:d4:d9:cc:96:c3:01:57:c1:5d:31:76:5b:a9:b1:57:04:e1:ae:78:fd
+-----BEGIN CERTIFICATE-----
+MIIFwDCCA6igAwIBAgIQHr9ZULjJgDdMBvfrVU+17TANBgkqhkiG9w0BAQ0FADB6
+MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEu
+MScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxHzAdBgNV
+BAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwHhcNMTgwMzE2MTIxMDEzWhcNNDMw
+MzE2MTIxMDEzWjB6MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEg
+U3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRo
+b3JpdHkxHzAdBgNVBAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwggIiMA0GCSqG
+SIb3DQEBAQUAA4ICDwAwggIKAoICAQDRLY67tzbqbTeRn06TpwXkKQMlzhyC93yZ
+n0EGze2jusDbCSzBfN8pfktlL5On1AFrAygYo9idBcEq2EXxkd7fO9CAAozPOA/q
+p1x4EaTByIVcJdPTsuclzxFUl6s1wB52HO8AU5853BSlLCIls3Jy/I2z5T4IHhQq
+NwuIPMqw9MjCoa68wb4pZ1Xi/K1ZXP69VyywkI3C7Te2fJmItdUDmj0VDT06qKhF
+8JVOJVkdzZhpu9PMMsmN74H+rX2Ju7pgE8pllWeg8xn2A1bUatMn4qGtg/BKEiJ3
+HAVz4hlxQsDsdUaakFjgao4rpUYwBI4Zshfjvqm6f1bxJAPXsiEodg42MEx51UGa
+mqi4NboMOvJEGyCI98Ul1z3G4z5D3Yf+xOr1Uz5MZf87Sst4WmsXXw3Hw09Omiqi
+7VdNIuJGmj8PkTQkfVXjjJU30xrwCSss0smNtA0Aq2cpKNgB9RkEth2+dv5yXMSF
+ytKAQd8FqKPVhJBPC/PgP5sZ0jeJP/J7UhyM9uH3PAeXjA6iWYEMspA90+NZRu0P
+qafegGtaqge2Gcu8V/OXIXoMsSt0Puvap2ctTMSYnjYJdmZm/Bo/6khUHL4wvYBQ
+v3y1zgD2DGHZ5yQD4OMBgQ692IU0iL2yNqh7XAjlRICMb/gv1SHKHRzQ+8S1h9E6
+Tsd2tTVItQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSM+xx1
+vALTn04uSNn5YFSqxLNP+jAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQENBQAD
+ggIBAEii1QALLtA/vBzVtVRJHlpr9OTy4EA34MwUe7nJ+jW1dReTagVphZzNTxl4
+WxmB82M+w85bj/UvXgF2Ez8sALnNllI5SW0ETsXpD4YN4fqzX4IS8TrOZgYkNCvo
+zMrnadyHncI013nR03e4qllY/p0m+jiGPp2Kh2RX5Rc64vmNueMzeMGQ2Ljdt4NR
+5MTMI9UGfOZR0800McD2RrsLrfw9EAUqO0qRJe6M1ISHgCq8CYyqOhNf6DR5UMEQ
+GfnTKB7U0VEwKbOukGfWHwpjscWpxkIxYxeU72nLL/qMFH3EQxiJ2fAyQOaA4kZf
+5ePBAFmo+eggvIksDkc0C+pXwlM2/KfUrzHN/gLldfq5Jwn58/U7yn2fqSLLiMmq
+0Uc9NneoWWRrJ8/vJ8HjJLWG965+Mk2weWjROeiQWMODvA8s1pfrzgzhIMfatz7D
+P78v3DSk+yshzWePS/Tj6tQ/50+6uaWTRRxmHyH6ZF5v4HaUMst19W7l9o/HuKTM
+qJZ9ZPskWkoDbGs4xugDQ5r3V7mzKWmTOPQD8rv7gmsHINFSH5pkAnuYZttcTVoP
+0ISVoDwUQwbKytu4QTbaakRnh6+v40URFWkIsr4WOZckbxJF0WddCajJFdr60qZf
+E2Efv4WstK2tBZQIgx51F9NxO5NQI1mg7TyRVJ12AMXDuDjb
+-----END CERTIFICATE-----
+
+# Issuer: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique
+# Subject: CN=TunTrust Root CA O=Agence Nationale de Certification Electronique
+# Label: "TunTrust Root CA"
+# Serial: 108534058042236574382096126452369648152337120275
+# MD5 Fingerprint: 85:13:b9:90:5b:36:5c:b6:5e:b8:5a:f8:e0:31:57:b4
+# SHA1 Fingerprint: cf:e9:70:84:0f:e0:73:0f:9d:f6:0c:7f:2c:4b:ee:20:46:34:9c:bb
+# SHA256 Fingerprint: 2e:44:10:2a:b5:8c:b8:54:19:45:1c:8e:19:d9:ac:f3:66:2c:af:bc:61:4b:6a:53:96:0a:30:f7:d0:e2:eb:41
+-----BEGIN CERTIFICATE-----
+MIIFszCCA5ugAwIBAgIUEwLV4kBMkkaGFmddtLu7sms+/BMwDQYJKoZIhvcNAQEL
+BQAwYTELMAkGA1UEBhMCVE4xNzA1BgNVBAoMLkFnZW5jZSBOYXRpb25hbGUgZGUg
+Q2VydGlmaWNhdGlvbiBFbGVjdHJvbmlxdWUxGTAXBgNVBAMMEFR1blRydXN0IFJv
+b3QgQ0EwHhcNMTkwNDI2MDg1NzU2WhcNNDQwNDI2MDg1NzU2WjBhMQswCQYDVQQG
+EwJUTjE3MDUGA1UECgwuQWdlbmNlIE5hdGlvbmFsZSBkZSBDZXJ0aWZpY2F0aW9u
+IEVsZWN0cm9uaXF1ZTEZMBcGA1UEAwwQVHVuVHJ1c3QgUm9vdCBDQTCCAiIwDQYJ
+KoZIhvcNAQEBBQADggIPADCCAgoCggIBAMPN0/y9BFPdDCA61YguBUtB9YOCfvdZ
+n56eY+hz2vYGqU8ftPkLHzmMmiDQfgbU7DTZhrx1W4eI8NLZ1KMKsmwb60ksPqxd
+2JQDoOw05TDENX37Jk0bbjBU2PWARZw5rZzJJQRNmpA+TkBuimvNKWfGzC3gdOgF
+VwpIUPp6Q9p+7FuaDmJ2/uqdHYVy7BG7NegfJ7/Boce7SBbdVtfMTqDhuazb1YMZ
+GoXRlJfXyqNlC/M4+QKu3fZnz8k/9YosRxqZbwUN/dAdgjH8KcwAWJeRTIAAHDOF
+li/LQcKLEITDCSSJH7UP2dl3RxiSlGBcx5kDPP73lad9UKGAwqmDrViWVSHbhlnU
+r8a83YFuB9tgYv7sEG7aaAH0gxupPqJbI9dkxt/con3YS7qC0lH4Zr8GRuR5KiY2
+eY8fTpkdso8MDhz/yV3A/ZAQprE38806JG60hZC/gLkMjNWb1sjxVj8agIl6qeIb
+MlEsPvLfe/ZdeikZjuXIvTZxi11Mwh0/rViizz1wTaZQmCXcI/m4WEEIcb9PuISg
+jwBUFfyRbVinljvrS5YnzWuioYasDXxU5mZMZl+QviGaAkYt5IPCgLnPSz7ofzwB
+7I9ezX/SKEIBlYrilz0QIX32nRzFNKHsLA4KUiwSVXAkPcvCFDVDXSdOvsC9qnyW
+5/yeYa1E0wCXAgMBAAGjYzBhMB0GA1UdDgQWBBQGmpsfU33x9aTI04Y+oXNZtPdE
+ITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFAaamx9TffH1pMjThj6hc1m0
+90QhMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAqgVutt0Vyb+z
+xiD2BkewhpMl0425yAA/l/VSJ4hxyXT968pk21vvHl26v9Hr7lxpuhbI87mP0zYu
+QEkHDVneixCwSQXi/5E/S7fdAo74gShczNxtr18UnH1YeA32gAm56Q6XKRm4t+v4
+FstVEuTGfbvE7Pi1HE4+Z7/FXxttbUcoqgRYYdZ2vyJ/0Adqp2RT8JeNnYA/u8EH
+22Wv5psymsNUk8QcCMNE+3tjEUPRahphanltkE8pjkcFwRJpadbGNjHh/PqAulxP
+xOu3Mqz4dWEX1xAZufHSCe96Qp1bWgvUxpVOKs7/B9dPfhgGiPEZtdmYu65xxBzn
+dFlY7wyJz4sfdZMaBBSSSFCp61cpABbjNhzI+L/wM9VBD8TMPN3pM0MBkRArHtG5
+Xc0yGYuPjCB31yLEQtyEFpslbei0VXF/sHyz03FJuc9SpAQ/3D2gu68zngowYI7b
+nV2UqL1g52KAdoGDDIzMMEZJ4gzSqK/rYXHv5yJiqfdcZGyfFoxnNidF9Ql7v/YQ
+CvGwjVRDjAS6oz/v4jXH+XTgbzRB0L9zZVcg+ZtnemZoJE6AZb0QmQZZ8mWvuMZH
+u/2QeItBcy6vVR/cO5JyboTT0GFMDcx2V+IthSIVNg3rAZ3r2OvEhJn7wAzMMujj
+d9qDRIueVSjAi1jTkD5OGwDxFa2DK5o=
+-----END CERTIFICATE-----
+
+# Issuer: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA
+# Subject: CN=HARICA TLS RSA Root CA 2021 O=Hellenic Academic and Research Institutions CA
+# Label: "HARICA TLS RSA Root CA 2021"
+# Serial: 76817823531813593706434026085292783742
+# MD5 Fingerprint: 65:47:9b:58:86:dd:2c:f0:fc:a2:84:1f:1e:96:c4:91
+# SHA1 Fingerprint: 02:2d:05:82:fa:88:ce:14:0c:06:79:de:7f:14:10:e9:45:d7:a5:6d
+# SHA256 Fingerprint: d9:5d:0e:8e:da:79:52:5b:f9:be:b1:1b:14:d2:10:0d:32:94:98:5f:0c:62:d9:fa:bd:9c:d9:99:ec:cb:7b:1d
+-----BEGIN CERTIFICATE-----
+MIIFpDCCA4ygAwIBAgIQOcqTHO9D88aOk8f0ZIk4fjANBgkqhkiG9w0BAQsFADBs
+MQswCQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl
+c2VhcmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0Eg
+Um9vdCBDQSAyMDIxMB4XDTIxMDIxOTEwNTUzOFoXDTQ1MDIxMzEwNTUzN1owbDEL
+MAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl
+YXJjaCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgUlNBIFJv
+b3QgQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAIvC569l
+mwVnlskNJLnQDmT8zuIkGCyEf3dRywQRNrhe7Wlxp57kJQmXZ8FHws+RFjZiPTgE
+4VGC/6zStGndLuwRo0Xua2s7TL+MjaQenRG56Tj5eg4MmOIjHdFOY9TnuEFE+2uv
+a9of08WRiFukiZLRgeaMOVig1mlDqa2YUlhu2wr7a89o+uOkXjpFc5gH6l8Cct4M
+pbOfrqkdtx2z/IpZ525yZa31MJQjB/OCFks1mJxTuy/K5FrZx40d/JiZ+yykgmvw
+Kh+OC19xXFyuQnspiYHLA6OZyoieC0AJQTPb5lh6/a6ZcMBaD9YThnEvdmn8kN3b
+LW7R8pv1GmuebxWMevBLKKAiOIAkbDakO/IwkfN4E8/BPzWr8R0RI7VDIp4BkrcY
+AuUR0YLbFQDMYTfBKnya4dC6s1BG7oKsnTH4+yPiAwBIcKMJJnkVU2DzOFytOOqB
+AGMUuTNe3QvboEUHGjMJ+E20pwKmafTCWQWIZYVWrkvL4N48fS0ayOn7H6NhStYq
+E613TBoYm5EPWNgGVMWX+Ko/IIqmhaZ39qb8HOLubpQzKoNQhArlT4b4UEV4AIHr
+W2jjJo3Me1xR9BQsQL4aYB16cmEdH2MtiKrOokWQCPxrvrNQKlr9qEgYRtaQQJKQ
+CoReaDH46+0N0x3GfZkYVVYnZS6NRcUk7M7jAgMBAAGjQjBAMA8GA1UdEwEB/wQF
+MAMBAf8wHQYDVR0OBBYEFApII6ZgpJIKM+qTW8VX6iVNvRLuMA4GA1UdDwEB/wQE
+AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAPpBIqm5iFSVmewzVjIuJndftTgfvnNAU
+X15QvWiWkKQUEapobQk1OUAJ2vQJLDSle1mESSmXdMgHHkdt8s4cUCbjnj1AUz/3
+f5Z2EMVGpdAgS1D0NTsY9FVqQRtHBmg8uwkIYtlfVUKqrFOFrJVWNlar5AWMxaja
+H6NpvVMPxP/cyuN+8kyIhkdGGvMA9YCRotxDQpSbIPDRzbLrLFPCU3hKTwSUQZqP
+JzLB5UkZv/HywouoCjkxKLR9YjYsTewfM7Z+d21+UPCfDtcRj88YxeMn/ibvBZ3P
+zzfF0HvaO7AWhAw6k9a+F9sPPg4ZeAnHqQJyIkv3N3a6dcSFA1pj1bF1BcK5vZSt
+jBWZp5N99sXzqnTPBIWUmAD04vnKJGW/4GKvyMX6ssmeVkjaef2WdhW+o45WxLM0
+/L5H9MG0qPzVMIho7suuyWPEdr6sOBjhXlzPrjoiUevRi7PzKzMHVIf6tLITe7pT
+BGIBnfHAT+7hOtSLIBD6Alfm78ELt5BGnBkpjNxvoEppaZS3JGWg/6w/zgH7IS79
+aPib8qXPMThcFarmlwDB31qlpzmq6YR/PFGoOtmUW4y/Twhx5duoXNTSpv4Ao8YW
+xw/ogM4cKGR0GQjTQuPOAF1/sdwTsOEFy9EgqoZ0njnnkf3/W9b3raYvAwtt41dU
+63ZTGI0RmLo=
+-----END CERTIFICATE-----
+
+# Issuer: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA
+# Subject: CN=HARICA TLS ECC Root CA 2021 O=Hellenic Academic and Research Institutions CA
+# Label: "HARICA TLS ECC Root CA 2021"
+# Serial: 137515985548005187474074462014555733966
+# MD5 Fingerprint: ae:f7:4c:e5:66:35:d1:b7:9b:8c:22:93:74:d3:4b:b0
+# SHA1 Fingerprint: bc:b0:c1:9d:e9:98:92:70:19:38:57:e9:8d:a7:b4:5d:6e:ee:01:48
+# SHA256 Fingerprint: 3f:99:cc:47:4a:cf:ce:4d:fe:d5:87:94:66:5e:47:8d:15:47:73:9f:2e:78:0f:1b:b4:ca:9b:13:30:97:d4:01
+-----BEGIN CERTIFICATE-----
+MIICVDCCAdugAwIBAgIQZ3SdjXfYO2rbIvT/WeK/zjAKBggqhkjOPQQDAzBsMQsw
+CQYDVQQGEwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2Vh
+cmNoIEluc3RpdHV0aW9ucyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9v
+dCBDQSAyMDIxMB4XDTIxMDIxOTExMDExMFoXDTQ1MDIxMzExMDEwOVowbDELMAkG
+A1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj
+aCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgRUNDIFJvb3Qg
+Q0EgMjAyMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABDgI/rGgltJ6rK9JOtDA4MM7
+KKrxcm1lAEeIhPyaJmuqS7psBAqIXhfyVYf8MLA04jRYVxqEU+kw2anylnTDUR9Y
+STHMmE5gEYd103KUkE+bECUqqHgtvpBBWJAVcqeht6NCMEAwDwYDVR0TAQH/BAUw
+AwEB/zAdBgNVHQ4EFgQUyRtTgRL+BNUW0aq8mm+3oJUZbsowDgYDVR0PAQH/BAQD
+AgGGMAoGCCqGSM49BAMDA2cAMGQCMBHervjcToiwqfAircJRQO9gcS3ujwLEXQNw
+SaSS6sUUiHCm0w2wqsosQJz76YJumgIwK0eaB8bRwoF8yguWGEEbo/QwCZ61IygN
+nxS2PFOiTAZpffpskcYqSUXm7LcT4Tps
+-----END CERTIFICATE-----
+
+# Issuer: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
+# Subject: CN=Autoridad de Certificacion Firmaprofesional CIF A62634068
+# Label: "Autoridad de Certificacion Firmaprofesional CIF A62634068"
+# Serial: 1977337328857672817
+# MD5 Fingerprint: 4e:6e:9b:54:4c:ca:b7:fa:48:e4:90:b1:15:4b:1c:a3
+# SHA1 Fingerprint: 0b:be:c2:27:22:49:cb:39:aa:db:35:5c:53:e3:8c:ae:78:ff:b6:fe
+# SHA256 Fingerprint: 57:de:05:83:ef:d2:b2:6e:03:61:da:99:da:9d:f4:64:8d:ef:7e:e8:44:1c:3b:72:8a:fa:9b:cd:e0:f9:b2:6a
+-----BEGIN CERTIFICATE-----
+MIIGFDCCA/ygAwIBAgIIG3Dp0v+ubHEwDQYJKoZIhvcNAQELBQAwUTELMAkGA1UE
+BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h
+cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0xNDA5MjMxNTIyMDdaFw0zNjA1
+MDUxNTIyMDdaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg
+Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9
+thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM
+cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG
+L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i
+NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h
+X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b
+m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy
+Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja
+EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T
+KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF
+6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh
+OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMB0GA1UdDgQWBBRlzeurNR4APn7VdMAc
+tHNHDhpkLzASBgNVHRMBAf8ECDAGAQH/AgEBMIGmBgNVHSAEgZ4wgZswgZgGBFUd
+IAAwgY8wLwYIKwYBBQUHAgEWI2h0dHA6Ly93d3cuZmlybWFwcm9mZXNpb25hbC5j
+b20vY3BzMFwGCCsGAQUFBwICMFAeTgBQAGEAcwBlAG8AIABkAGUAIABsAGEAIABC
+AG8AbgBhAG4AbwB2AGEAIAA0ADcAIABCAGEAcgBjAGUAbABvAG4AYQAgADAAOAAw
+ADEANzAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQELBQADggIBAHSHKAIrdx9m
+iWTtj3QuRhy7qPj4Cx2Dtjqn6EWKB7fgPiDL4QjbEwj4KKE1soCzC1HA01aajTNF
+Sa9J8OA9B3pFE1r/yJfY0xgsfZb43aJlQ3CTkBW6kN/oGbDbLIpgD7dvlAceHabJ
+hfa9NPhAeGIQcDq+fUs5gakQ1JZBu/hfHAsdCPKxsIl68veg4MSPi3i1O1ilI45P
+Vf42O+AMt8oqMEEgtIDNrvx2ZnOorm7hfNoD6JQg5iKj0B+QXSBTFCZX2lSX3xZE
+EAEeiGaPcjiT3SC3NL7X8e5jjkd5KAb881lFJWAiMxujX6i6KtoaPc1A6ozuBRWV
+1aUsIC+nmCjuRfzxuIgALI9C2lHVnOUTaHFFQ4ueCyE8S1wF3BqfmI7avSKecs2t
+CsvMo2ebKHTEm9caPARYpoKdrcd7b/+Alun4jWq9GJAd/0kakFI3ky88Al2CdgtR
+5xbHV/g4+afNmyJU72OwFW1TZQNKXkqgsqeOSQBZONXH9IBk9W6VULgRfhVwOEqw
+f9DEMnDAGf/JOC0ULGb0QkTmVXYbgBVX/8Cnp6o5qtjTcNAuuuuUavpfNIbnYrX9
+ivAwhZTJryQCL2/W3Wf+47BVTwSYT6RBVuKT0Gro1vP7ZeDOdcQxWQzugsgMYDNK
+GbqEZycPvEJdvSRUDewdcAZfpLz6IHxV
+-----END CERTIFICATE-----
+
+# Issuer: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd.
+# Subject: CN=vTrus ECC Root CA O=iTrusChina Co.,Ltd.
+# Label: "vTrus ECC Root CA"
+# Serial: 630369271402956006249506845124680065938238527194
+# MD5 Fingerprint: de:4b:c1:f5:52:8c:9b:43:e1:3e:8f:55:54:17:8d:85
+# SHA1 Fingerprint: f6:9c:db:b0:fc:f6:02:13:b6:52:32:a6:a3:91:3f:16:70:da:c3:e1
+# SHA256 Fingerprint: 30:fb:ba:2c:32:23:8e:2a:98:54:7a:f9:79:31:e5:50:42:8b:9b:3f:1c:8e:eb:66:33:dc:fa:86:c5:b2:7d:d3
+-----BEGIN CERTIFICATE-----
+MIICDzCCAZWgAwIBAgIUbmq8WapTvpg5Z6LSa6Q75m0c1towCgYIKoZIzj0EAwMw
+RzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4xGjAY
+BgNVBAMTEXZUcnVzIEVDQyBSb290IENBMB4XDTE4MDczMTA3MjY0NFoXDTQzMDcz
+MTA3MjY0NFowRzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28u
+LEx0ZC4xGjAYBgNVBAMTEXZUcnVzIEVDQyBSb290IENBMHYwEAYHKoZIzj0CAQYF
+K4EEACIDYgAEZVBKrox5lkqqHAjDo6LN/llWQXf9JpRCux3NCNtzslt188+cToL0
+v/hhJoVs1oVbcnDS/dtitN9Ti72xRFhiQgnH+n9bEOf+QP3A2MMrMudwpremIFUd
+e4BdS49nTPEQo0IwQDAdBgNVHQ4EFgQUmDnNvtiyjPeyq+GtJK97fKHbH88wDwYD
+VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwCgYIKoZIzj0EAwMDaAAwZQIw
+V53dVvHH4+m4SVBrm2nDb+zDfSXkV5UTQJtS0zvzQBm8JsctBp61ezaf9SXUY2sA
+AjEA6dPGnlaaKsyh2j/IZivTWJwghfqrkYpwcBE4YGQLYgmRWAD5Tfs0aNoJrSEG
+GJTO
+-----END CERTIFICATE-----
+
+# Issuer: CN=vTrus Root CA O=iTrusChina Co.,Ltd.
+# Subject: CN=vTrus Root CA O=iTrusChina Co.,Ltd.
+# Label: "vTrus Root CA"
+# Serial: 387574501246983434957692974888460947164905180485
+# MD5 Fingerprint: b8:c9:37:df:fa:6b:31:84:64:c5:ea:11:6a:1b:75:fc
+# SHA1 Fingerprint: 84:1a:69:fb:f5:cd:1a:25:34:13:3d:e3:f8:fc:b8:99:d0:c9:14:b7
+# SHA256 Fingerprint: 8a:71:de:65:59:33:6f:42:6c:26:e5:38:80:d0:0d:88:a1:8d:a4:c6:a9:1f:0d:cb:61:94:e2:06:c5:c9:63:87
+-----BEGIN CERTIFICATE-----
+MIIFVjCCAz6gAwIBAgIUQ+NxE9izWRRdt86M/TX9b7wFjUUwDQYJKoZIhvcNAQEL
+BQAwQzELMAkGA1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4x
+FjAUBgNVBAMTDXZUcnVzIFJvb3QgQ0EwHhcNMTgwNzMxMDcyNDA1WhcNNDMwNzMx
+MDcyNDA1WjBDMQswCQYDVQQGEwJDTjEcMBoGA1UEChMTaVRydXNDaGluYSBDby4s
+THRkLjEWMBQGA1UEAxMNdlRydXMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQAD
+ggIPADCCAgoCggIBAL1VfGHTuB0EYgWgrmy3cLRB6ksDXhA/kFocizuwZotsSKYc
+IrrVQJLuM7IjWcmOvFjai57QGfIvWcaMY1q6n6MLsLOaXLoRuBLpDLvPbmyAhykU
+AyyNJJrIZIO1aqwTLDPxn9wsYTwaP3BVm60AUn/PBLn+NvqcwBauYv6WTEN+VRS+
+GrPSbcKvdmaVayqwlHeFXgQPYh1jdfdr58tbmnDsPmcF8P4HCIDPKNsFxhQnL4Z9
+8Cfe/+Z+M0jnCx5Y0ScrUw5XSmXX+6KAYPxMvDVTAWqXcoKv8R1w6Jz1717CbMdH
+flqUhSZNO7rrTOiwCcJlwp2dCZtOtZcFrPUGoPc2BX70kLJrxLT5ZOrpGgrIDajt
+J8nU57O5q4IikCc9Kuh8kO+8T/3iCiSn3mUkpF3qwHYw03dQ+A0Em5Q2AXPKBlim
+0zvc+gRGE1WKyURHuFE5Gi7oNOJ5y1lKCn+8pu8fA2dqWSslYpPZUxlmPCdiKYZN
+pGvu/9ROutW04o5IWgAZCfEF2c6Rsffr6TlP9m8EQ5pV9T4FFL2/s1m02I4zhKOQ
+UqqzApVg+QxMaPnu1RcN+HFXtSXkKe5lXa/R7jwXC1pDxaWG6iSe4gUH3DRCEpHW
+OXSuTEGC2/KmSNGzm/MzqvOmwMVO9fSddmPmAsYiS8GVP1BkLFTltvA8Kc9XAgMB
+AAGjQjBAMB0GA1UdDgQWBBRUYnBj8XWEQ1iO0RYgscasGrz2iTAPBgNVHRMBAf8E
+BTADAQH/MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAKbqSSaet
+8PFww+SX8J+pJdVrnjT+5hpk9jprUrIQeBqfTNqK2uwcN1LgQkv7bHbKJAs5EhWd
+nxEt/Hlk3ODg9d3gV8mlsnZwUKT+twpw1aA08XXXTUm6EdGz2OyC/+sOxL9kLX1j
+bhd47F18iMjrjld22VkE+rxSH0Ws8HqA7Oxvdq6R2xCOBNyS36D25q5J08FsEhvM
+Kar5CKXiNxTKsbhm7xqC5PD48acWabfbqWE8n/Uxy+QARsIvdLGx14HuqCaVvIiv
+TDUHKgLKeBRtRytAVunLKmChZwOgzoy8sHJnxDHO2zTlJQNgJXtxmOTAGytfdELS
+S8VZCAeHvsXDf+eW2eHcKJfWjwXj9ZtOyh1QRwVTsMo554WgicEFOwE30z9J4nfr
+I8iIZjs9OXYhRvHsXyO466JmdXTBQPfYaJqT4i2pLr0cox7IdMakLXogqzu4sEb9
+b91fUlV1YvCXoHzXOP0l382gmxDPi7g4Xl7FtKYCNqEeXxzP4padKar9mK5S4fNB
+UvupLnKWnyfjqnN9+BojZns7q2WwMgFLFT49ok8MKzWixtlnEjUwzXYuFrOZnk1P
+Ti07NEPhmg4NpGaXutIcSkwsKouLgU9xGqndXHt7CMUADTdA43x7VF8vhV929ven
+sBxXVsFy6K2ir40zSbofitzmdHxghm+Hl3s=
+-----END CERTIFICATE-----
+
+# Issuer: CN=ISRG Root X2 O=Internet Security Research Group
+# Subject: CN=ISRG Root X2 O=Internet Security Research Group
+# Label: "ISRG Root X2"
+# Serial: 87493402998870891108772069816698636114
+# MD5 Fingerprint: d3:9e:c4:1e:23:3c:a6:df:cf:a3:7e:6d:e0:14:e6:e5
+# SHA1 Fingerprint: bd:b1:b9:3c:d5:97:8d:45:c6:26:14:55:f8:db:95:c7:5a:d1:53:af
+# SHA256 Fingerprint: 69:72:9b:8e:15:a8:6e:fc:17:7a:57:af:b7:17:1d:fc:64:ad:d2:8c:2f:ca:8c:f1:50:7e:34:45:3c:cb:14:70
+-----BEGIN CERTIFICATE-----
+MIICGzCCAaGgAwIBAgIQQdKd0XLq7qeAwSxs6S+HUjAKBggqhkjOPQQDAzBPMQsw
+CQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2gg
+R3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBYMjAeFw0yMDA5MDQwMDAwMDBaFw00
+MDA5MTcxNjAwMDBaME8xCzAJBgNVBAYTAlVTMSkwJwYDVQQKEyBJbnRlcm5ldCBT
+ZWN1cml0eSBSZXNlYXJjaCBHcm91cDEVMBMGA1UEAxMMSVNSRyBSb290IFgyMHYw
+EAYHKoZIzj0CAQYFK4EEACIDYgAEzZvVn4CDCuwJSvMWSj5cz3es3mcFDR0HttwW
++1qLFNvicWDEukWVEYmO6gbf9yoWHKS5xcUy4APgHoIYOIvXRdgKam7mAHf7AlF9
+ItgKbppbd9/w+kHsOdx1ymgHDB/qo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0T
+AQH/BAUwAwEB/zAdBgNVHQ4EFgQUfEKWrt5LSDv6kviejM9ti6lyN5UwCgYIKoZI
+zj0EAwMDaAAwZQIwe3lORlCEwkSHRhtFcP9Ymd70/aTSVaYgLXTWNLxBo1BfASdW
+tL4ndQavEi51mI38AjEAi/V3bNTIZargCyzuFJ0nN6T5U6VR5CmD1/iQMVtCnwr1
+/q4AaOeMSQ+2b1tbFfLn
+-----END CERTIFICATE-----
+
+# Issuer: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd.
+# Subject: CN=HiPKI Root CA - G1 O=Chunghwa Telecom Co., Ltd.
+# Label: "HiPKI Root CA - G1"
+# Serial: 60966262342023497858655262305426234976
+# MD5 Fingerprint: 69:45:df:16:65:4b:e8:68:9a:8f:76:5f:ff:80:9e:d3
+# SHA1 Fingerprint: 6a:92:e4:a8:ee:1b:ec:96:45:37:e3:29:57:49:cd:96:e3:e5:d2:60
+# SHA256 Fingerprint: f0:15:ce:3c:c2:39:bf:ef:06:4b:e9:f1:d2:c4:17:e1:a0:26:4a:0a:94:be:1f:0c:8d:12:18:64:eb:69:49:cc
+-----BEGIN CERTIFICATE-----
+MIIFajCCA1KgAwIBAgIQLd2szmKXlKFD6LDNdmpeYDANBgkqhkiG9w0BAQsFADBP
+MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0
+ZC4xGzAZBgNVBAMMEkhpUEtJIFJvb3QgQ0EgLSBHMTAeFw0xOTAyMjIwOTQ2MDRa
+Fw0zNzEyMzExNTU5NTlaME8xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3
+YSBUZWxlY29tIENvLiwgTHRkLjEbMBkGA1UEAwwSSGlQS0kgUm9vdCBDQSAtIEcx
+MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA9B5/UnMyDHPkvRN0o9Qw
+qNCuS9i233VHZvR85zkEHmpwINJaR3JnVfSl6J3VHiGh8Ge6zCFovkRTv4354twv
+Vcg3Px+kwJyz5HdcoEb+d/oaoDjq7Zpy3iu9lFc6uux55199QmQ5eiY29yTw1S+6
+lZgRZq2XNdZ1AYDgr/SEYYwNHl98h5ZeQa/rh+r4XfEuiAU+TCK72h8q3VJGZDnz
+Qs7ZngyzsHeXZJzA9KMuH5UHsBffMNsAGJZMoYFL3QRtU6M9/Aes1MU3guvklQgZ
+KILSQjqj2FPseYlgSGDIcpJQ3AOPgz+yQlda22rpEZfdhSi8MEyr48KxRURHH+CK
+FgeW0iEPU8DtqX7UTuybCeyvQqww1r/REEXgphaypcXTT3OUM3ECoWqj1jOXTyFj
+HluP2cFeRXF3D4FdXyGarYPM+l7WjSNfGz1BryB1ZlpK9p/7qxj3ccC2HTHsOyDr
+y+K49a6SsvfhhEvyovKTmiKe0xRvNlS9H15ZFblzqMF8b3ti6RZsR1pl8w4Rm0bZ
+/W3c1pzAtH2lsN0/Vm+h+fbkEkj9Bn8SV7apI09bA8PgcSojt/ewsTu8mL3WmKgM
+a/aOEmem8rJY5AIJEzypuxC00jBF8ez3ABHfZfjcK0NVvxaXxA/VLGGEqnKG/uY6
+fsI/fe78LxQ+5oXdUG+3Se0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNV
+HQ4EFgQU8ncX+l6o/vY9cdVouslGDDjYr7AwDgYDVR0PAQH/BAQDAgGGMA0GCSqG
+SIb3DQEBCwUAA4ICAQBQUfB13HAE4/+qddRxosuej6ip0691x1TPOhwEmSKsxBHi
+7zNKpiMdDg1H2DfHb680f0+BazVP6XKlMeJ45/dOlBhbQH3PayFUhuaVevvGyuqc
+SE5XCV0vrPSltJczWNWseanMX/mF+lLFjfiRFOs6DRfQUsJ748JzjkZ4Bjgs6Fza
+ZsT0pPBWGTMpWmWSBUdGSquEwx4noR8RkpkndZMPvDY7l1ePJlsMu5wP1G4wB9Tc
+XzZoZjmDlicmisjEOf6aIW/Vcobpf2Lll07QJNBAsNB1CI69aO4I1258EHBGG3zg
+iLKecoaZAeO/n0kZtCW+VmWuF2PlHt/o/0elv+EmBYTksMCv5wiZqAxeJoBF1Pho
+L5aPruJKHJwWDBNvOIf2u8g0X5IDUXlwpt/L9ZlNec1OvFefQ05rLisY+GpzjLrF
+Ne85akEez3GoorKGB1s6yeHvP2UEgEcyRHCVTjFnanRbEEV16rCf0OY1/k6fi8wr
+kkVbbiVghUbN0aqwdmaTd5a+g744tiROJgvM7XpWGuDpWsZkrUx6AEhEL7lAuxM+
+vhV4nYWBSipX3tUZQ9rbyltHhoMLP7YNdnhzeSJesYAfz77RP1YQmCuVh6EfnWQU
+YDksswBVLuT1sw5XxJFBAJw/6KXf6vb/yPCtbVKoF6ubYfwSUTXkJf2vqmqGOQ==
+-----END CERTIFICATE-----
+
+# Issuer: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
+# Subject: CN=GlobalSign O=GlobalSign OU=GlobalSign ECC Root CA - R4
+# Label: "GlobalSign ECC Root CA - R4"
+# Serial: 159662223612894884239637590694
+# MD5 Fingerprint: 26:29:f8:6d:e1:88:bf:a2:65:7f:aa:c4:cd:0f:7f:fc
+# SHA1 Fingerprint: 6b:a0:b0:98:e1:71:ef:5a:ad:fe:48:15:80:77:10:f4:bd:6f:0b:28
+# SHA256 Fingerprint: b0:85:d7:0b:96:4f:19:1a:73:e4:af:0d:54:ae:7a:0e:07:aa:fd:af:9b:71:dd:08:62:13:8a:b7:32:5a:24:a2
+-----BEGIN CERTIFICATE-----
+MIIB3DCCAYOgAwIBAgINAgPlfvU/k/2lCSGypjAKBggqhkjOPQQDAjBQMSQwIgYD
+VQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0gUjQxEzARBgNVBAoTCkdsb2Jh
+bFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMTIxMTEzMDAwMDAwWhcNMzgw
+MTE5MDMxNDA3WjBQMSQwIgYDVQQLExtHbG9iYWxTaWduIEVDQyBSb290IENBIC0g
+UjQxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2JhbFNpZ24wWTAT
+BgcqhkjOPQIBBggqhkjOPQMBBwNCAAS4xnnTj2wlDp8uORkcA6SumuU5BwkWymOx
+uYb4ilfBV85C+nOh92VC/x7BALJucw7/xyHlGKSq2XE/qNS5zowdo0IwQDAOBgNV
+HQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVLB7rUW44kB/
++wpu+74zyTyjhNUwCgYIKoZIzj0EAwIDRwAwRAIgIk90crlgr/HmnKAWBVBfw147
+bmF0774BxL4YSFlhgjICICadVGNA3jdgUM/I2O2dgq43mLyjj0xMqTQrbO/7lZsm
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R1 O=Google Trust Services LLC
+# Subject: CN=GTS Root R1 O=Google Trust Services LLC
+# Label: "GTS Root R1"
+# Serial: 159662320309726417404178440727
+# MD5 Fingerprint: 05:fe:d0:bf:71:a8:a3:76:63:da:01:e0:d8:52:dc:40
+# SHA1 Fingerprint: e5:8c:1c:c4:91:3b:38:63:4b:e9:10:6e:e3:ad:8e:6b:9d:d9:81:4a
+# SHA256 Fingerprint: d9:47:43:2a:bd:e7:b7:fa:90:fc:2e:6b:59:10:1b:12:80:e0:e1:c7:e4:e4:0f:a3:c6:88:7f:ff:57:a7:f4:cf
+-----BEGIN CERTIFICATE-----
+MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQsw
+CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
+MBIGA1UEAxMLR1RTIFJvb3QgUjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
+MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
+Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaMf/vo
+27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7w
+Cl7raKb0xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjw
+TcLCeoiKu7rPWRnWr4+wB7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0Pfybl
+qAj+lug8aJRT7oM6iCsVlgmy4HqMLnXWnOunVmSPlk9orj2XwoSPwLxAwAtcvfaH
+szVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk9+aCEI3oncKKiPo4Zor8
+Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zqkUspzBmk
+MiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92
+wO1AK/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70p
+aDPvOmbsB4om3xPXV2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrN
+VjzRlwW5y0vtOUucxD/SVRNuJLDWcfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQID
+AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
+FgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQADggIBAJ+qQibb
+C5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe
+QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuy
+h6f88/qBVRRiClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM4
+7HLwEXWdyzRSjeZ2axfG34arJ45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8J
+ZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYciNuaCp+0KueIHoI17eko8cdLiA6Ef
+MgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5meLMFrUKTX5hgUvYU/
+Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJFfbdT
+6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ
+0E6yove+7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm
+2tIMPNuzjsmhDYAPexZ3FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bb
+bP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3gm3c
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R2 O=Google Trust Services LLC
+# Subject: CN=GTS Root R2 O=Google Trust Services LLC
+# Label: "GTS Root R2"
+# Serial: 159662449406622349769042896298
+# MD5 Fingerprint: 1e:39:c0:53:e6:1e:29:82:0b:ca:52:55:36:5d:57:dc
+# SHA1 Fingerprint: 9a:44:49:76:32:db:de:fa:d0:bc:fb:5a:7b:17:bd:9e:56:09:24:94
+# SHA256 Fingerprint: 8d:25:cd:97:22:9d:bf:70:35:6b:da:4e:b3:cc:73:40:31:e2:4c:f0:0f:af:cf:d3:2d:c7:6e:b5:84:1c:7e:a8
+-----BEGIN CERTIFICATE-----
+MIIFVzCCAz+gAwIBAgINAgPlrsWNBCUaqxElqjANBgkqhkiG9w0BAQwFADBHMQsw
+CQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEU
+MBIGA1UEAxMLR1RTIFJvb3QgUjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAw
+MDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZp
+Y2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUA
+A4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3LvCvpt
+nfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3KgGjSY
+6Dlo7JUle3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9BuXvAu
+MC6C/Pq8tBcKSOWIm8Wba96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOdre7k
+RXuJVfeKH2JShBKzwkCX44ofR5GmdFrS+LFjKBC4swm4VndAoiaYecb+3yXuPuWg
+f9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7MkogwTZq9TwtImoS1mKPV
++3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJGr61K8Yzo
+dDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqjx5RW
+Ir9qS34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsRnTKa
+G73VululycslaVNVJ1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0kzCq
+gc7dGtxRcw1PcOnlthYhGXmy5okLdWTK1au8CcEYof/UVKGFPP0UJAOyh9OktwID
+AQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
+FgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQADggIBAB/Kzt3H
+vqGf2SdMC9wXmBFqiN495nFWcrKeGk6c1SuYJF2ba3uwM4IJvd8lRuqYnrYb/oM8
+0mJhwQTtzuDFycgTE1XnqGOtjHsB/ncw4c5omwX4Eu55MaBBRTUoCnGkJE+M3DyC
+B19m3H0Q/gxhswWV7uGugQ+o+MePTagjAiZrHYNSVc61LwDKgEDg4XSsYPWHgJ2u
+NmSRXbBoGOqKYcl3qJfEycel/FVL8/B/uWU9J2jQzGv6U53hkRrJXRqWbTKH7QMg
+yALOWr7Z6v2yTcQvG99fevX4i8buMTolUVVnjWQye+mew4K6Ki3pHrTgSAai/Gev
+HyICc/sgCq+dVEuhzf9gR7A/Xe8bVr2XIZYtCtFenTgCR2y59PYjJbigapordwj6
+xLEokCZYCDzifqrXPW+6MYgKBesntaFJ7qBFVHvmJ2WZICGoo7z7GJa7Um8M7YNR
+TOlZ4iBgxcJlkoKM8xAfDoqXvneCbT+PHV28SSe9zE8P4c52hgQjxcCMElv924Sg
+JPFI/2R80L5cFtHvma3AH/vLrrw4IgYmZNralw4/KBVEqE8AyvCazM90arQ+POuV
+7LXTWtiBmelDGDfrs7vRWGJB82bSj6p4lVQgw1oudCvV0b4YacCs1aTPObpRhANl
+6WLAYv7YTVWW4tAR+kg0Eeye7QUd5MjWHYbL
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R3 O=Google Trust Services LLC
+# Subject: CN=GTS Root R3 O=Google Trust Services LLC
+# Label: "GTS Root R3"
+# Serial: 159662495401136852707857743206
+# MD5 Fingerprint: 3e:e7:9d:58:02:94:46:51:94:e5:e0:22:4a:8b:e7:73
+# SHA1 Fingerprint: ed:e5:71:80:2b:c8:92:b9:5b:83:3c:d2:32:68:3f:09:cd:a0:1e:46
+# SHA256 Fingerprint: 34:d8:a7:3e:e2:08:d9:bc:db:0d:95:65:20:93:4b:4e:40:e6:94:82:59:6e:8b:6f:73:c8:42:6b:01:0a:6f:48
+-----BEGIN CERTIFICATE-----
+MIICCTCCAY6gAwIBAgINAgPluILrIPglJ209ZjAKBggqhkjOPQQDAzBHMQswCQYD
+VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG
+A1UEAxMLR1RTIFJvb3QgUjMwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw
+WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz
+IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjOPQIBBgUrgQQAIgNi
+AAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout736G
+jOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2ADDL2
+4CejQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
+BBTB8Sa6oC2uhYHP0/EqEr24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEA9uEglRR7
+VKOQFhG/hMjqb2sXnh5GmCCbn9MN2azTL818+FsuVbu/3ZL3pAzcMeGiAjEA/Jdm
+ZuVDFhOD3cffL74UOO0BzrEXGhF16b0DjyZ+hOXJYKaV11RZt+cRLInUue4X
+-----END CERTIFICATE-----
+
+# Issuer: CN=GTS Root R4 O=Google Trust Services LLC
+# Subject: CN=GTS Root R4 O=Google Trust Services LLC
+# Label: "GTS Root R4"
+# Serial: 159662532700760215368942768210
+# MD5 Fingerprint: 43:96:83:77:19:4d:76:b3:9d:65:52:e4:1d:22:a5:e8
+# SHA1 Fingerprint: 77:d3:03:67:b5:e0:0c:15:f6:0c:38:61:df:7c:e1:3b:92:46:4d:47
+# SHA256 Fingerprint: 34:9d:fa:40:58:c5:e2:63:12:3b:39:8a:e7:95:57:3c:4e:13:13:c8:3f:e6:8f:93:55:6c:d5:e8:03:1b:3c:7d
+-----BEGIN CERTIFICATE-----
+MIICCTCCAY6gAwIBAgINAgPlwGjvYxqccpBQUjAKBggqhkjOPQQDAzBHMQswCQYD
+VQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIG
+A1UEAxMLR1RTIFJvb3QgUjQwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAw
+WjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2Vz
+IExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjOPQIBBgUrgQQAIgNi
+AATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzuhXyi
+QHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/lxKvR
+HYqjQjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW
+BBSATNbrdP9JNqPV2Py1PsVq8JQdjDAKBggqhkjOPQQDAwNpADBmAjEA6ED/g94D
+9J+uHXqnLrmvT/aDHQ4thQEd0dlq7A/Cr8deVl5c1RxYIigL9zC2L7F8AjEA8GE8
+p/SgguMh1YQdc4acLa/KNJvxn7kjNuK8YAOdgLOaVsjh4rsUecrNIdSUtUlD
+-----END CERTIFICATE-----
+
+# Issuer: CN=Telia Root CA v2 O=Telia Finland Oyj
+# Subject: CN=Telia Root CA v2 O=Telia Finland Oyj
+# Label: "Telia Root CA v2"
+# Serial: 7288924052977061235122729490515358
+# MD5 Fingerprint: 0e:8f:ac:aa:82:df:85:b1:f4:dc:10:1c:fc:99:d9:48
+# SHA1 Fingerprint: b9:99:cd:d1:73:50:8a:c4:47:05:08:9c:8c:88:fb:be:a0:2b:40:cd
+# SHA256 Fingerprint: 24:2b:69:74:2f:cb:1e:5b:2a:bf:98:89:8b:94:57:21:87:54:4e:5b:4d:99:11:78:65:73:62:1f:6a:74:b8:2c
+-----BEGIN CERTIFICATE-----
+MIIFdDCCA1ygAwIBAgIPAWdfJ9b+euPkrL4JWwWeMA0GCSqGSIb3DQEBCwUAMEQx
+CzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZMBcGA1UE
+AwwQVGVsaWEgUm9vdCBDQSB2MjAeFw0xODExMjkxMTU1NTRaFw00MzExMjkxMTU1
+NTRaMEQxCzAJBgNVBAYTAkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZ
+MBcGA1UEAwwQVGVsaWEgUm9vdCBDQSB2MjCCAiIwDQYJKoZIhvcNAQEBBQADggIP
+ADCCAgoCggIBALLQPwe84nvQa5n44ndp586dpAO8gm2h/oFlH0wnrI4AuhZ76zBq
+AMCzdGh+sq/H1WKzej9Qyow2RCRj0jbpDIX2Q3bVTKFgcmfiKDOlyzG4OiIjNLh9
+vVYiQJ3q9HsDrWj8soFPmNB06o3lfc1jw6P23pLCWBnglrvFxKk9pXSW/q/5iaq9
+lRdU2HhE8Qx3FZLgmEKnpNaqIJLNwaCzlrI6hEKNfdWV5Nbb6WLEWLN5xYzTNTOD
+n3WhUidhOPFZPY5Q4L15POdslv5e2QJltI5c0BE0312/UqeBAMN/mUWZFdUXyApT
+7GPzmX3MaRKGwhfwAZ6/hLzRUssbkmbOpFPlob/E2wnW5olWK8jjfN7j/4nlNW4o
+6GwLI1GpJQXrSPjdscr6bAhR77cYbETKJuFzxokGgeWKrLDiKca5JLNrRBH0pUPC
+TEPlcDaMtjNXepUugqD0XBCzYYP2AgWGLnwtbNwDRm41k9V6lS/eINhbfpSQBGq6
+WT0EBXWdN6IOLj3rwaRSg/7Qa9RmjtzG6RJOHSpXqhC8fF6CfaamyfItufUXJ63R
+DolUK5X6wK0dmBR4M0KGCqlztft0DbcbMBnEWg4cJ7faGND/isgFuvGqHKI3t+ZI
+pEYslOqodmJHixBTB0hXbOKSTbauBcvcwUpej6w9GU7C7WB1K9vBykLVAgMBAAGj
+YzBhMB8GA1UdIwQYMBaAFHKs5DN5qkWH9v2sHZ7Wxy+G2CQ5MB0GA1UdDgQWBBRy
+rOQzeapFh/b9rB2e1scvhtgkOTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw
+AwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAoDtZpwmUPjaE0n4vOaWWl/oRrfxn83EJ
+8rKJhGdEr7nv7ZbsnGTbMjBvZ5qsfl+yqwE2foH65IRe0qw24GtixX1LDoJt0nZi
+0f6X+J8wfBj5tFJ3gh1229MdqfDBmgC9bXXYfef6xzijnHDoRnkDry5023X4blMM
+A8iZGok1GTzTyVR8qPAs5m4HeW9q4ebqkYJpCh3DflminmtGFZhb069GHWLIzoBS
+SRE/yQQSwxN8PzuKlts8oB4KtItUsiRnDe+Cy748fdHif64W1lZYudogsYMVoe+K
+TTJvQS8TUoKU1xrBeKJR3Stwbbca+few4GeXVtt8YVMJAygCQMez2P2ccGrGKMOF
+6eLtGpOg3kuYooQ+BXcBlj37tCAPnHICehIv1aO6UXivKitEZU61/Qrowc15h2Er
+3oBXRb9n8ZuRXqWk7FlIEA04x7D6w0RtBPV4UBySllva9bguulvP5fBqnUsvWHMt
+Ty3EHD70sz+rFQ47GUGKpMFXEmZxTPpT41frYpUJnlTd0cI8Vzy9OK2YZLe4A5pT
+VmBds9hCG1xLEooc6+t9xnppxyd/pPiL8uSUZodL6ZQHCRJ5irLrdATczvREWeAW
+ysUsWNc8e89ihmpQfTU2Zqf7N+cox9jQraVplI/owd8k+BsHMYeB2F326CjYSlKA
+rBPuUBQemMc=
+-----END CERTIFICATE-----
+
+# Issuer: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH
+# Subject: CN=D-TRUST BR Root CA 1 2020 O=D-Trust GmbH
+# Label: "D-TRUST BR Root CA 1 2020"
+# Serial: 165870826978392376648679885835942448534
+# MD5 Fingerprint: b5:aa:4b:d5:ed:f7:e3:55:2e:8f:72:0a:f3:75:b8:ed
+# SHA1 Fingerprint: 1f:5b:98:f0:e3:b5:f7:74:3c:ed:e6:b0:36:7d:32:cd:f4:09:41:67
+# SHA256 Fingerprint: e5:9a:aa:81:60:09:c2:2b:ff:5b:25:ba:d3:7d:f3:06:f0:49:79:7c:1f:81:d8:5a:b0:89:e6:57:bd:8f:00:44
+-----BEGIN CERTIFICATE-----
+MIIC2zCCAmCgAwIBAgIQfMmPK4TX3+oPyWWa00tNljAKBggqhkjOPQQDAzBIMQsw
+CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS
+VVNUIEJSIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTA5NDUwMFoXDTM1MDIxMTA5
+NDQ1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG
+A1UEAxMZRC1UUlVTVCBCUiBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABMbLxyjR+4T1mu9CFCDhQ2tuda38KwOE1HaTJddZO0Flax7mNCq7dPYS
+zuht56vkPE4/RAiLzRZxy7+SmfSk1zxQVFKQhYN4lGdnoxwJGT11NIXe7WB9xwy0
+QVK5buXuQqOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHOREKv/
+VbNafAkl1bK6CKBrqx9tMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g
+PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2JyX3Jvb3Rf
+Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l
+dC9DTj1ELVRSVVNUJTIwQlIlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1
+c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO
+PQQDAwNpADBmAjEAlJAtE/rhY/hhY+ithXhUkZy4kzg+GkHaQBZTQgjKL47xPoFW
+wKrY7RjEsK70PvomAjEA8yjixtsrmfu3Ubgko6SUeho/5jbiA1czijDLgsfWFBHV
+dWNbFJWcHwHP2NVypw87
+-----END CERTIFICATE-----
+
+# Issuer: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH
+# Subject: CN=D-TRUST EV Root CA 1 2020 O=D-Trust GmbH
+# Label: "D-TRUST EV Root CA 1 2020"
+# Serial: 126288379621884218666039612629459926992
+# MD5 Fingerprint: 8c:2d:9d:70:9f:48:99:11:06:11:fb:e9:cb:30:c0:6e
+# SHA1 Fingerprint: 61:db:8c:21:59:69:03:90:d8:7c:9c:12:86:54:cf:9d:3d:f4:dd:07
+# SHA256 Fingerprint: 08:17:0d:1a:a3:64:53:90:1a:2f:95:92:45:e3:47:db:0c:8d:37:ab:aa:bc:56:b8:1a:a1:00:dc:95:89:70:db
+-----BEGIN CERTIFICATE-----
+MIIC2zCCAmCgAwIBAgIQXwJB13qHfEwDo6yWjfv/0DAKBggqhkjOPQQDAzBIMQsw
+CQYDVQQGEwJERTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRS
+VVNUIEVWIFJvb3QgQ0EgMSAyMDIwMB4XDTIwMDIxMTEwMDAwMFoXDTM1MDIxMTA5
+NTk1OVowSDELMAkGA1UEBhMCREUxFTATBgNVBAoTDEQtVHJ1c3QgR21iSDEiMCAG
+A1UEAxMZRC1UUlVTVCBFViBSb290IENBIDEgMjAyMDB2MBAGByqGSM49AgEGBSuB
+BAAiA2IABPEL3YZDIBnfl4XoIkqbz52Yv7QFJsnL46bSj8WeeHsxiamJrSc8ZRCC
+/N/DnU7wMyPE0jL1HLDfMxddxfCxivnvubcUyilKwg+pf3VlSSowZ/Rk99Yad9rD
+wpdhQntJraOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH8QARY3
+OqQo5FD4pPfsazK2/umLMA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6g
+PKA6hjhodHRwOi8vY3JsLmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X2V2X3Jvb3Rf
+Y2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVjdG9yeS5kLXRydXN0Lm5l
+dC9DTj1ELVRSVVNUJTIwRVYlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxPPUQtVHJ1
+c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjO
+PQQDAwNpADBmAjEAyjzGKnXCXnViOTYAYFqLwZOZzNnbQTs7h5kXO9XMT8oi96CA
+y/m0sRtW9XLS/BnRAjEAkfcwkz8QRitxpNA7RJvAKQIFskF3UfN5Wp6OFKBOQtJb
+gfM0agPnIjhQW+0ZT0MW
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc.
+# Subject: CN=DigiCert TLS ECC P384 Root G5 O=DigiCert, Inc.
+# Label: "DigiCert TLS ECC P384 Root G5"
+# Serial: 13129116028163249804115411775095713523
+# MD5 Fingerprint: d3:71:04:6a:43:1c:db:a6:59:e1:a8:a3:aa:c5:71:ed
+# SHA1 Fingerprint: 17:f3:de:5e:9f:0f:19:e9:8e:f6:1f:32:26:6e:20:c4:07:ae:30:ee
+# SHA256 Fingerprint: 01:8e:13:f0:77:25:32:cf:80:9b:d1:b1:72:81:86:72:83:fc:48:c6:e1:3b:e9:c6:98:12:85:4a:49:0c:1b:05
+-----BEGIN CERTIFICATE-----
+MIICGTCCAZ+gAwIBAgIQCeCTZaz32ci5PhwLBCou8zAKBggqhkjOPQQDAzBOMQsw
+CQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJjAkBgNVBAMTHURp
+Z2lDZXJ0IFRMUyBFQ0MgUDM4NCBSb290IEc1MB4XDTIxMDExNTAwMDAwMFoXDTQ2
+MDExNDIzNTk1OVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDkRpZ2lDZXJ0LCBJ
+bmMuMSYwJAYDVQQDEx1EaWdpQ2VydCBUTFMgRUNDIFAzODQgUm9vdCBHNTB2MBAG
+ByqGSM49AgEGBSuBBAAiA2IABMFEoc8Rl1Ca3iOCNQfN0MsYndLxf3c1TzvdlHJS
+7cI7+Oz6e2tYIOyZrsn8aLN1udsJ7MgT9U7GCh1mMEy7H0cKPGEQQil8pQgO4CLp
+0zVozptjn4S1mU1YoI71VOeVyaNCMEAwHQYDVR0OBBYEFMFRRVBZqz7nLFr6ICIS
+B4CIfBFqMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49
+BAMDA2gAMGUCMQCJao1H5+z8blUD2WdsJk6Dxv3J+ysTvLd6jLRl0mlpYxNjOyZQ
+LgGheQaRnUi/wr4CMEfDFXuxoJGZSZOoPHzoRgaLLPIxAJSdYsiJvRmEFOml+wG4
+DXZDjC5Ty3zfDBeWUA==
+-----END CERTIFICATE-----
+
+# Issuer: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc.
+# Subject: CN=DigiCert TLS RSA4096 Root G5 O=DigiCert, Inc.
+# Label: "DigiCert TLS RSA4096 Root G5"
+# Serial: 11930366277458970227240571539258396554
+# MD5 Fingerprint: ac:fe:f7:34:96:a9:f2:b3:b4:12:4b:e4:27:41:6f:e1
+# SHA1 Fingerprint: a7:88:49:dc:5d:7c:75:8c:8c:de:39:98:56:b3:aa:d0:b2:a5:71:35
+# SHA256 Fingerprint: 37:1a:00:dc:05:33:b3:72:1a:7e:eb:40:e8:41:9e:70:79:9d:2b:0a:0f:2c:1d:80:69:31:65:f7:ce:c4:ad:75
+-----BEGIN CERTIFICATE-----
+MIIFZjCCA06gAwIBAgIQCPm0eKj6ftpqMzeJ3nzPijANBgkqhkiG9w0BAQwFADBN
+MQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJTAjBgNVBAMT
+HERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwHhcNMjEwMTE1MDAwMDAwWhcN
+NDYwMTE0MjM1OTU5WjBNMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORGlnaUNlcnQs
+IEluYy4xJTAjBgNVBAMTHERpZ2lDZXJ0IFRMUyBSU0E0MDk2IFJvb3QgRzUwggIi
+MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCz0PTJeRGd/fxmgefM1eS87IE+
+ajWOLrfn3q/5B03PMJ3qCQuZvWxX2hhKuHisOjmopkisLnLlvevxGs3npAOpPxG0
+2C+JFvuUAT27L/gTBaF4HI4o4EXgg/RZG5Wzrn4DReW+wkL+7vI8toUTmDKdFqgp
+wgscONyfMXdcvyej/Cestyu9dJsXLfKB2l2w4SMXPohKEiPQ6s+d3gMXsUJKoBZM
+pG2T6T867jp8nVid9E6P/DsjyG244gXazOvswzH016cpVIDPRFtMbzCe88zdH5RD
+nU1/cHAN1DrRN/BsnZvAFJNY781BOHW8EwOVfH/jXOnVDdXifBBiqmvwPXbzP6Po
+sMH976pXTayGpxi0KcEsDr9kvimM2AItzVwv8n/vFfQMFawKsPHTDU9qTXeXAaDx
+Zre3zu/O7Oyldcqs4+Fj97ihBMi8ez9dLRYiVu1ISf6nL3kwJZu6ay0/nTvEF+cd
+Lvvyz6b84xQslpghjLSR6Rlgg/IwKwZzUNWYOwbpx4oMYIwo+FKbbuH2TbsGJJvX
+KyY//SovcfXWJL5/MZ4PbeiPT02jP/816t9JXkGPhvnxd3lLG7SjXi/7RgLQZhNe
+XoVPzthwiHvOAbWWl9fNff2C+MIkwcoBOU+NosEUQB+cZtUMCUbW8tDRSHZWOkPL
+tgoRObqME2wGtZ7P6wIDAQABo0IwQDAdBgNVHQ4EFgQUUTMc7TZArxfTJc1paPKv
+TiM+s0EwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcN
+AQEMBQADggIBAGCmr1tfV9qJ20tQqcQjNSH/0GEwhJG3PxDPJY7Jv0Y02cEhJhxw
+GXIeo8mH/qlDZJY6yFMECrZBu8RHANmfGBg7sg7zNOok992vIGCukihfNudd5N7H
+PNtQOa27PShNlnx2xlv0wdsUpasZYgcYQF+Xkdycx6u1UQ3maVNVzDl92sURVXLF
+O4uJ+DQtpBflF+aZfTCIITfNMBc9uPK8qHWgQ9w+iUuQrm0D4ByjoJYJu32jtyoQ
+REtGBzRj7TG5BO6jm5qu5jF49OokYTurWGT/u4cnYiWB39yhL/btp/96j1EuMPik
+AdKFOV8BmZZvWltwGUb+hmA+rYAQCd05JS9Yf7vSdPD3Rh9GOUrYU9DzLjtxpdRv
+/PNn5AeP3SYZ4Y1b+qOTEZvpyDrDVWiakuFSdjjo4bq9+0/V77PnSIMx8IIh47a+
+p6tv75/fTM8BuGJqIz3nCU2AG3swpMPdB380vqQmsvZB6Akd4yCYqjdP//fx4ilw
+MUc/dNAUFvohigLVigmUdy7yWSiLfFCSCmZ4OIN1xLVaqBHG5cGdZlXPU8Sv13WF
+qUITVuwhd4GTWgzqltlJyqEI8pc7bZsEGCREjnwB8twl2F6GmrE52/WRMmrRpnCK
+ovfepEWFJqgejF0pW8hL2JpqA15w8oVPbEtoL8pU9ozaMv7Da4M/OMZ+
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certainly Root R1 O=Certainly
+# Subject: CN=Certainly Root R1 O=Certainly
+# Label: "Certainly Root R1"
+# Serial: 188833316161142517227353805653483829216
+# MD5 Fingerprint: 07:70:d4:3e:82:87:a0:fa:33:36:13:f4:fa:33:e7:12
+# SHA1 Fingerprint: a0:50:ee:0f:28:71:f4:27:b2:12:6d:6f:50:96:25:ba:cc:86:42:af
+# SHA256 Fingerprint: 77:b8:2c:d8:64:4c:43:05:f7:ac:c5:cb:15:6b:45:67:50:04:03:3d:51:c6:0c:62:02:a8:e0:c3:34:67:d3:a0
+-----BEGIN CERTIFICATE-----
+MIIFRzCCAy+gAwIBAgIRAI4P+UuQcWhlM1T01EQ5t+AwDQYJKoZIhvcNAQELBQAw
+PTELMAkGA1UEBhMCVVMxEjAQBgNVBAoTCUNlcnRhaW5seTEaMBgGA1UEAxMRQ2Vy
+dGFpbmx5IFJvb3QgUjEwHhcNMjEwNDAxMDAwMDAwWhcNNDYwNDAxMDAwMDAwWjA9
+MQswCQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0
+YWlubHkgUm9vdCBSMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANA2
+1B/q3avk0bbm+yLA3RMNansiExyXPGhjZjKcA7WNpIGD2ngwEc/csiu+kr+O5MQT
+vqRoTNoCaBZ0vrLdBORrKt03H2As2/X3oXyVtwxwhi7xOu9S98zTm/mLvg7fMbed
+aFySpvXl8wo0tf97ouSHocavFwDvA5HtqRxOcT3Si2yJ9HiG5mpJoM610rCrm/b0
+1C7jcvk2xusVtyWMOvwlDbMicyF0yEqWYZL1LwsYpfSt4u5BvQF5+paMjRcCMLT5
+r3gajLQ2EBAHBXDQ9DGQilHFhiZ5shGIXsXwClTNSaa/ApzSRKft43jvRl5tcdF5
+cBxGX1HpyTfcX35pe0HfNEXgO4T0oYoKNp43zGJS4YkNKPl6I7ENPT2a/Z2B7yyQ
+wHtETrtJ4A5KVpK8y7XdeReJkd5hiXSSqOMyhb5OhaRLWcsrxXiOcVTQAjeZjOVJ
+6uBUcqQRBi8LjMFbvrWhsFNunLhgkR9Za/kt9JQKl7XsxXYDVBtlUrpMklZRNaBA
+2CnbrlJ2Oy0wQJuK0EJWtLeIAaSHO1OWzaMWj/Nmqhexx2DgwUMFDO6bW2BvBlyH
+Wyf5QBGenDPBt+U1VwV/J84XIIwc/PH72jEpSe31C4SnT8H2TsIonPru4K8H+zMR
+eiFPCyEQtkA6qyI6BJyLm4SGcprSp6XEtHWRqSsjAgMBAAGjQjBAMA4GA1UdDwEB
+/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTgqj8ljZ9EXME66C6u
+d0yEPmcM9DANBgkqhkiG9w0BAQsFAAOCAgEAuVevuBLaV4OPaAszHQNTVfSVcOQr
+PbA56/qJYv331hgELyE03fFo8NWWWt7CgKPBjcZq91l3rhVkz1t5BXdm6ozTaw3d
+8VkswTOlMIAVRQdFGjEitpIAq5lNOo93r6kiyi9jyhXWx8bwPWz8HA2YEGGeEaIi
+1wrykXprOQ4vMMM2SZ/g6Q8CRFA3lFV96p/2O7qUpUzpvD5RtOjKkjZUbVwlKNrd
+rRT90+7iIgXr0PK3aBLXWopBGsaSpVo7Y0VPv+E6dyIvXL9G+VoDhRNCX8reU9di
+taY1BMJH/5n9hN9czulegChB8n3nHpDYT3Y+gjwN/KUD+nsa2UUeYNrEjvn8K8l7
+lcUq/6qJ34IxD3L/DCfXCh5WAFAeDJDBlrXYFIW7pw0WwfgHJBu6haEaBQmAupVj
+yTrsJZ9/nbqkRxWbRHDxakvWOF5D8xh+UG7pWijmZeZ3Gzr9Hb4DJqPb1OG7fpYn
+Kx3upPvaJVQTA945xsMfTZDsjxtK0hzthZU4UHlG1sGQUDGpXJpuHfUzVounmdLy
+yCwzk5Iwx06MZTMQZBf9JBeW0Y3COmor6xOLRPIh80oat3df1+2IpHLlOR+Vnb5n
+wXARPbv0+Em34yaXOp/SX3z7wJl8OSngex2/DaeP0ik0biQVy96QXr8axGbqwua6
+OV+KmalBWQewLK8=
+-----END CERTIFICATE-----
+
+# Issuer: CN=Certainly Root E1 O=Certainly
+# Subject: CN=Certainly Root E1 O=Certainly
+# Label: "Certainly Root E1"
+# Serial: 8168531406727139161245376702891150584
+# MD5 Fingerprint: 0a:9e:ca:cd:3e:52:50:c6:36:f3:4b:a3:ed:a7:53:e9
+# SHA1 Fingerprint: f9:e1:6d:dc:01:89:cf:d5:82:45:63:3e:c5:37:7d:c2:eb:93:6f:2b
+# SHA256 Fingerprint: b4:58:5f:22:e4:ac:75:6a:4e:86:12:a1:36:1c:5d:9d:03:1a:93:fd:84:fe:bb:77:8f:a3:06:8b:0f:c4:2d:c2
+-----BEGIN CERTIFICATE-----
+MIIB9zCCAX2gAwIBAgIQBiUzsUcDMydc+Y2aub/M+DAKBggqhkjOPQQDAzA9MQsw
+CQYDVQQGEwJVUzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0YWlu
+bHkgUm9vdCBFMTAeFw0yMTA0MDEwMDAwMDBaFw00NjA0MDEwMDAwMDBaMD0xCzAJ
+BgNVBAYTAlVTMRIwEAYDVQQKEwlDZXJ0YWlubHkxGjAYBgNVBAMTEUNlcnRhaW5s
+eSBSb290IEUxMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE3m/4fxzf7flHh4axpMCK
++IKXgOqPyEpeKn2IaKcBYhSRJHpcnqMXfYqGITQYUBsQ3tA3SybHGWCA6TS9YBk2
+QNYphwk8kXr2vBMj3VlOBF7PyAIcGFPBMdjaIOlEjeR2o0IwQDAOBgNVHQ8BAf8E
+BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU8ygYy2R17ikq6+2uI1g4
+hevIIgcwCgYIKoZIzj0EAwMDaAAwZQIxALGOWiDDshliTd6wT99u0nCK8Z9+aozm
+ut6Dacpps6kFtZaSF4fC0urQe87YQVt8rgIwRt7qy12a7DLCZRawTDBcMPPaTnOG
+BtjOiQRINzf43TNRnXCve1XYAS59BWQOhriR
+-----END CERTIFICATE-----
+
+# Issuer: CN=E-Tugra Global Root CA RSA v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center
+# Subject: CN=E-Tugra Global Root CA RSA v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center
+# Label: "E-Tugra Global Root CA RSA v3"
+# Serial: 75951268308633135324246244059508261641472512052
+# MD5 Fingerprint: 22:be:10:f6:c2:f8:03:88:73:5f:33:29:47:28:47:a4
+# SHA1 Fingerprint: e9:a8:5d:22:14:52:1c:5b:aa:0a:b4:be:24:6a:23:8a:c9:ba:e2:a9
+# SHA256 Fingerprint: ef:66:b0:b1:0a:3c:db:9f:2e:36:48:c7:6b:d2:af:18:ea:d2:bf:e6:f1:17:65:5e:28:c4:06:0d:a1:a3:f4:c2
+-----BEGIN CERTIFICATE-----
+MIIF8zCCA9ugAwIBAgIUDU3FzRYilZYIfrgLfxUGNPt5EDQwDQYJKoZIhvcNAQEL
+BQAwgYAxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHEwZBbmthcmExGTAXBgNVBAoTEEUt
+VHVncmEgRUJHIEEuUy4xHTAbBgNVBAsTFEUtVHVncmEgVHJ1c3QgQ2VudGVyMSYw
+JAYDVQQDEx1FLVR1Z3JhIEdsb2JhbCBSb290IENBIFJTQSB2MzAeFw0yMDAzMTgw
+OTA3MTdaFw00NTAzMTIwOTA3MTdaMIGAMQswCQYDVQQGEwJUUjEPMA0GA1UEBxMG
+QW5rYXJhMRkwFwYDVQQKExBFLVR1Z3JhIEVCRyBBLlMuMR0wGwYDVQQLExRFLVR1
+Z3JhIFRydXN0IENlbnRlcjEmMCQGA1UEAxMdRS1UdWdyYSBHbG9iYWwgUm9vdCBD
+QSBSU0EgdjMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCiZvCJt3J7
+7gnJY9LTQ91ew6aEOErxjYG7FL1H6EAX8z3DeEVypi6Q3po61CBxyryfHUuXCscx
+uj7X/iWpKo429NEvx7epXTPcMHD4QGxLsqYxYdE0PD0xesevxKenhOGXpOhL9hd8
+7jwH7eKKV9y2+/hDJVDqJ4GohryPUkqWOmAalrv9c/SF/YP9f4RtNGx/ardLAQO/
+rWm31zLZ9Vdq6YaCPqVmMbMWPcLzJmAy01IesGykNz709a/r4d+ABs8qQedmCeFL
+l+d3vSFtKbZnwy1+7dZ5ZdHPOrbRsV5WYVB6Ws5OUDGAA5hH5+QYfERaxqSzO8bG
+wzrwbMOLyKSRBfP12baqBqG3q+Sx6iEUXIOk/P+2UNOMEiaZdnDpwA+mdPy70Bt4
+znKS4iicvObpCdg604nmvi533wEKb5b25Y08TVJ2Glbhc34XrD2tbKNSEhhw5oBO
+M/J+JjKsBY04pOZ2PJ8QaQ5tndLBeSBrW88zjdGUdjXnXVXHt6woq0bM5zshtQoK
+5EpZ3IE1S0SVEgpnpaH/WwAH0sDM+T/8nzPyAPiMbIedBi3x7+PmBvrFZhNb/FAH
+nnGGstpvdDDPk1Po3CLW3iAfYY2jLqN4MpBs3KwytQXk9TwzDdbgh3cXTJ2w2Amo
+DVf3RIXwyAS+XF1a4xeOVGNpf0l0ZAWMowIDAQABo2MwYTAPBgNVHRMBAf8EBTAD
+AQH/MB8GA1UdIwQYMBaAFLK0ruYt9ybVqnUtdkvAG1Mh0EjvMB0GA1UdDgQWBBSy
+tK7mLfcm1ap1LXZLwBtTIdBI7zAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEL
+BQADggIBAImocn+M684uGMQQgC0QDP/7FM0E4BQ8Tpr7nym/Ip5XuYJzEmMmtcyQ
+6dIqKe6cLcwsmb5FJ+Sxce3kOJUxQfJ9emN438o2Fi+CiJ+8EUdPdk3ILY7r3y18
+Tjvarvbj2l0Upq7ohUSdBm6O++96SmotKygY/r+QLHUWnw/qln0F7psTpURs+APQ
+3SPh/QMSEgj0GDSz4DcLdxEBSL9htLX4GdnLTeqjjO/98Aa1bZL0SmFQhO3sSdPk
+vmjmLuMxC1QLGpLWgti2omU8ZgT5Vdps+9u1FGZNlIM7zR6mK7L+d0CGq+ffCsn9
+9t2HVhjYsCxVYJb6CH5SkPVLpi6HfMsg2wY+oF0Dd32iPBMbKaITVaA9FCKvb7jQ
+mhty3QUBjYZgv6Rn7rWlDdF/5horYmbDB7rnoEgcOMPpRfunf/ztAmgayncSd6YA
+VSgU7NbHEqIbZULpkejLPoeJVF3Zr52XnGnnCv8PWniLYypMfUeUP95L6VPQMPHF
+9p5J3zugkaOj/s1YzOrfr28oO6Bpm4/srK4rVJ2bBLFHIK+WEj5jlB0E5y67hscM
+moi/dkfv97ALl2bSRM9gUgfh1SxKOidhd8rXj+eHDjD/DLsE4mHDosiXYY60MGo8
+bcIHX0pzLz/5FooBZu+6kcpSV3uu1OYP3Qt6f4ueJiDPO++BcYNZ
+-----END CERTIFICATE-----
+
+# Issuer: CN=E-Tugra Global Root CA ECC v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center
+# Subject: CN=E-Tugra Global Root CA ECC v3 O=E-Tugra EBG A.S. OU=E-Tugra Trust Center
+# Label: "E-Tugra Global Root CA ECC v3"
+# Serial: 218504919822255052842371958738296604628416471745
+# MD5 Fingerprint: 46:bc:81:bb:f1:b5:1e:f7:4b:96:bc:14:e2:e7:27:64
+# SHA1 Fingerprint: 8a:2f:af:57:53:b1:b0:e6:a1:04:ec:5b:6a:69:71:6d:f6:1c:e2:84
+# SHA256 Fingerprint: 87:3f:46:85:fa:7f:56:36:25:25:2e:6d:36:bc:d7:f1:6f:c2:49:51:f2:64:e4:7e:1b:95:4f:49:08:cd:ca:13
+-----BEGIN CERTIFICATE-----
+MIICpTCCAiqgAwIBAgIUJkYZdzHhT28oNt45UYbm1JeIIsEwCgYIKoZIzj0EAwMw
+gYAxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHEwZBbmthcmExGTAXBgNVBAoTEEUtVHVn
+cmEgRUJHIEEuUy4xHTAbBgNVBAsTFEUtVHVncmEgVHJ1c3QgQ2VudGVyMSYwJAYD
+VQQDEx1FLVR1Z3JhIEdsb2JhbCBSb290IENBIEVDQyB2MzAeFw0yMDAzMTgwOTQ2
+NThaFw00NTAzMTIwOTQ2NThaMIGAMQswCQYDVQQGEwJUUjEPMA0GA1UEBxMGQW5r
+YXJhMRkwFwYDVQQKExBFLVR1Z3JhIEVCRyBBLlMuMR0wGwYDVQQLExRFLVR1Z3Jh
+IFRydXN0IENlbnRlcjEmMCQGA1UEAxMdRS1UdWdyYSBHbG9iYWwgUm9vdCBDQSBF
+Q0MgdjMwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASOmCm/xxAeJ9urA8woLNheSBkQ
+KczLWYHMjLiSF4mDKpL2w6QdTGLVn9agRtwcvHbB40fQWxPa56WzZkjnIZpKT4YK
+fWzqTTKACrJ6CZtpS5iB4i7sAnCWH/31Rs7K3IKjYzBhMA8GA1UdEwEB/wQFMAMB
+Af8wHwYDVR0jBBgwFoAU/4Ixcj75xGZsrTie0bBRiKWQzPUwHQYDVR0OBBYEFP+C
+MXI++cRmbK04ntGwUYilkMz1MA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNp
+ADBmAjEA5gVYaWHlLcoNy/EZCL3W/VGSGn5jVASQkZo1kTmZ+gepZpO6yGjUij/6
+7W4WAie3AjEA3VoXK3YdZUKWpqxdinlW2Iob35reX8dQj7FbcQwm32pAAOwzkSFx
+vmjkI6TZraE3
+-----END CERTIFICATE-----
+
+# Issuer: CN=Security Communication RootCA3 O=SECOM Trust Systems CO.,LTD.
+# Subject: CN=Security Communication RootCA3 O=SECOM Trust Systems CO.,LTD.
+# Label: "Security Communication RootCA3"
+# Serial: 16247922307909811815
+# MD5 Fingerprint: 1c:9a:16:ff:9e:5c:e0:4d:8a:14:01:f4:35:5d:29:26
+# SHA1 Fingerprint: c3:03:c8:22:74:92:e5:61:a2:9c:5f:79:91:2b:1e:44:13:91:30:3a
+# SHA256 Fingerprint: 24:a5:5c:2a:b0:51:44:2d:06:17:76:65:41:23:9a:4a:d0:32:d7:c5:51:75:aa:34:ff:de:2f:bc:4f:5c:52:94
+-----BEGIN CERTIFICATE-----
+MIIFfzCCA2egAwIBAgIJAOF8N0D9G/5nMA0GCSqGSIb3DQEBDAUAMF0xCzAJBgNV
+BAYTAkpQMSUwIwYDVQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMScw
+JQYDVQQDEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTMwHhcNMTYwNjE2
+MDYxNzE2WhcNMzgwMTE4MDYxNzE2WjBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc
+U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UEAxMeU2VjdXJpdHkg
+Q29tbXVuaWNhdGlvbiBSb290Q0EzMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC
+CgKCAgEA48lySfcw3gl8qUCBWNO0Ot26YQ+TUG5pPDXC7ltzkBtnTCHsXzW7OT4r
+CmDvu20rhvtxosis5FaU+cmvsXLUIKx00rgVrVH+hXShuRD+BYD5UpOzQD11EKzA
+lrenfna84xtSGc4RHwsENPXY9Wk8d/Nk9A2qhd7gCVAEF5aEt8iKvE1y/By7z/MG
+TfmfZPd+pmaGNXHIEYBMwXFAWB6+oHP2/D5Q4eAvJj1+XCO1eXDe+uDRpdYMQXF7
+9+qMHIjH7Iv10S9VlkZ8WjtYO/u62C21Jdp6Ts9EriGmnpjKIG58u4iFW/vAEGK7
+8vknR+/RiTlDxN/e4UG/VHMgly1s2vPUB6PmudhvrvyMGS7TZ2crldtYXLVqAvO4
+g160a75BflcJdURQVc1aEWEhCmHCqYj9E7wtiS/NYeCVvsq1e+F7NGcLH7YMx3we
+GVPKp7FKFSBWFHA9K4IsD50VHUeAR/94mQ4xr28+j+2GaR57GIgUssL8gjMunEst
++3A7caoreyYn8xrC3PsXuKHqy6C0rtOUfnrQq8PsOC0RLoi/1D+tEjtCrI8Cbn3M
+0V9hvqG8OmpI6iZVIhZdXw3/JzOfGAN0iltSIEdrRU0id4xVJ/CvHozJgyJUt5rQ
+T9nO/NkuHJYosQLTA70lUhw0Zk8jq/R3gpYd0VcwCBEF/VfR2ccCAwEAAaNCMEAw
+HQYDVR0OBBYEFGQUfPxYchamCik0FW8qy7z8r6irMA4GA1UdDwEB/wQEAwIBBjAP
+BgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBDAUAA4ICAQDcAiMI4u8hOscNtybS
+YpOnpSNyByCCYN8Y11StaSWSntkUz5m5UoHPrmyKO1o5yGwBQ8IibQLwYs1OY0PA
+FNr0Y/Dq9HHuTofjcan0yVflLl8cebsjqodEV+m9NU1Bu0soo5iyG9kLFwfl9+qd
+9XbXv8S2gVj/yP9kaWJ5rW4OH3/uHWnlt3Jxs/6lATWUVCvAUm2PVcTJ0rjLyjQI
+UYWg9by0F1jqClx6vWPGOi//lkkZhOpn2ASxYfQAW0q3nHE3GYV5v4GwxxMOdnE+
+OoAGrgYWp421wsTL/0ClXI2lyTrtcoHKXJg80jQDdwj98ClZXSEIx2C/pHF7uNke
+gr4Jr2VvKKu/S7XuPghHJ6APbw+LP6yVGPO5DtxnVW5inkYO0QR4ynKudtml+LLf
+iAlhi+8kTtFZP1rUPcmTPCtk9YENFpb3ksP+MW/oKjJ0DvRMmEoYDjBU1cXrvMUV
+nuiZIesnKwkK2/HmcBhWuwzkvvnoEKQTkrgc4NtnHVMDpCKn3F2SEDzq//wbEBrD
+2NCcnWXL0CsnMQMeNuE9dnUM/0Umud1RvCPHX9jYhxBAEg09ODfnRDwYwFMJZI//
+1ZqmfHAuc1Uh6N//g7kdPjIe1qZ9LPFm6Vwdp6POXiUyK+OVrCoHzrQoeIY8Laad
+TdJ0MN1kURXbg4NR16/9M51NZg==
+-----END CERTIFICATE-----
+
+# Issuer: CN=Security Communication ECC RootCA1 O=SECOM Trust Systems CO.,LTD.
+# Subject: CN=Security Communication ECC RootCA1 O=SECOM Trust Systems CO.,LTD.
+# Label: "Security Communication ECC RootCA1"
+# Serial: 15446673492073852651
+# MD5 Fingerprint: 7e:43:b0:92:68:ec:05:43:4c:98:ab:5d:35:2e:7e:86
+# SHA1 Fingerprint: b8:0e:26:a9:bf:d2:b2:3b:c0:ef:46:c9:ba:c7:bb:f6:1d:0d:41:41
+# SHA256 Fingerprint: e7:4f:bd:a5:5b:d5:64:c4:73:a3:6b:44:1a:a7:99:c8:a6:8e:07:74:40:e8:28:8b:9f:a1:e5:0e:4b:ba:ca:11
+-----BEGIN CERTIFICATE-----
+MIICODCCAb6gAwIBAgIJANZdm7N4gS7rMAoGCCqGSM49BAMDMGExCzAJBgNVBAYT
+AkpQMSUwIwYDVQQKExxTRUNPTSBUcnVzdCBTeXN0ZW1zIENPLixMVEQuMSswKQYD
+VQQDEyJTZWN1cml0eSBDb21tdW5pY2F0aW9uIEVDQyBSb290Q0ExMB4XDTE2MDYx
+NjA1MTUyOFoXDTM4MDExODA1MTUyOFowYTELMAkGA1UEBhMCSlAxJTAjBgNVBAoT
+HFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKzApBgNVBAMTIlNlY3VyaXR5
+IENvbW11bmljYXRpb24gRUNDIFJvb3RDQTEwdjAQBgcqhkjOPQIBBgUrgQQAIgNi
+AASkpW9gAwPDvTH00xecK4R1rOX9PVdu12O/5gSJko6BnOPpR27KkBLIE+Cnnfdl
+dB9sELLo5OnvbYUymUSxXv3MdhDYW72ixvnWQuRXdtyQwjWpS4g8EkdtXP9JTxpK
+ULGjQjBAMB0GA1UdDgQWBBSGHOf+LaVKiwj+KBH6vqNm+GBZLzAOBgNVHQ8BAf8E
+BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjAVXUI9/Lbu
+9zuxNuie9sRGKEkz0FhDKmMpzE2xtHqiuQ04pV1IKv3LsnNdo4gIxwwCMQDAqy0O
+be0YottT6SXbVQjgUMzfRGEWgqtJsLKB7HOHeLRMsmIbEvoWTSVLY70eN9k=
+-----END CERTIFICATE-----
diff --git a/third_party/python/pip/pip/_vendor/certifi/core.py b/third_party/python/pip/pip/_vendor/certifi/core.py
new file mode 100644
index 0000000000..c3e546604c
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/certifi/core.py
@@ -0,0 +1,108 @@
+"""
+certifi.py
+~~~~~~~~~~
+
+This module returns the installation location of cacert.pem or its contents.
+"""
+import sys
+
+
+if sys.version_info >= (3, 11):
+
+ from importlib.resources import as_file, files
+
+ _CACERT_CTX = None
+ _CACERT_PATH = None
+
+ def where() -> str:
+ # This is slightly terrible, but we want to delay extracting the file
+ # in cases where we're inside of a zipimport situation until someone
+ # actually calls where(), but we don't want to re-extract the file
+ # on every call of where(), so we'll do it once then store it in a
+ # global variable.
+ global _CACERT_CTX
+ global _CACERT_PATH
+ if _CACERT_PATH is None:
+ # This is slightly janky, the importlib.resources API wants you to
+ # manage the cleanup of this file, so it doesn't actually return a
+ # path, it returns a context manager that will give you the path
+ # when you enter it and will do any cleanup when you leave it. In
+ # the common case of not needing a temporary file, it will just
+ # return the file system location and the __exit__() is a no-op.
+ #
+ # We also have to hold onto the actual context manager, because
+ # it will do the cleanup whenever it gets garbage collected, so
+ # we will also store that at the global level as well.
+ _CACERT_CTX = as_file(files("pip._vendor.certifi").joinpath("cacert.pem"))
+ _CACERT_PATH = str(_CACERT_CTX.__enter__())
+
+ return _CACERT_PATH
+
+ def contents() -> str:
+ return files("pip._vendor.certifi").joinpath("cacert.pem").read_text(encoding="ascii")
+
+elif sys.version_info >= (3, 7):
+
+ from importlib.resources import path as get_path, read_text
+
+ _CACERT_CTX = None
+ _CACERT_PATH = None
+
+ def where() -> str:
+ # This is slightly terrible, but we want to delay extracting the
+ # file in cases where we're inside of a zipimport situation until
+ # someone actually calls where(), but we don't want to re-extract
+ # the file on every call of where(), so we'll do it once then store
+ # it in a global variable.
+ global _CACERT_CTX
+ global _CACERT_PATH
+ if _CACERT_PATH is None:
+ # This is slightly janky, the importlib.resources API wants you
+ # to manage the cleanup of this file, so it doesn't actually
+ # return a path, it returns a context manager that will give
+ # you the path when you enter it and will do any cleanup when
+ # you leave it. In the common case of not needing a temporary
+ # file, it will just return the file system location and the
+ # __exit__() is a no-op.
+ #
+ # We also have to hold onto the actual context manager, because
+ # it will do the cleanup whenever it gets garbage collected, so
+ # we will also store that at the global level as well.
+ _CACERT_CTX = get_path("pip._vendor.certifi", "cacert.pem")
+ _CACERT_PATH = str(_CACERT_CTX.__enter__())
+
+ return _CACERT_PATH
+
+ def contents() -> str:
+ return read_text("pip._vendor.certifi", "cacert.pem", encoding="ascii")
+
+else:
+ import os
+ import types
+ from typing import Union
+
+ Package = Union[types.ModuleType, str]
+ Resource = Union[str, "os.PathLike"]
+
+ # This fallback will work for Python versions prior to 3.7 that lack the
+ # importlib.resources module but relies on the existing `where` function
+ # so won't address issues with environments like PyOxidizer that don't set
+ # __file__ on modules.
+ def read_text(
+ package: Package,
+ resource: Resource,
+ encoding: str = 'utf-8',
+ errors: str = 'strict'
+ ) -> str:
+ with open(where(), encoding=encoding) as data:
+ return data.read()
+
+ # If we don't have importlib.resources, then we will just do the old logic
+ # of assuming we're on the filesystem and munge the path directly.
+ def where() -> str:
+ f = os.path.dirname(__file__)
+
+ return os.path.join(f, "cacert.pem")
+
+ def contents() -> str:
+ return read_text("pip._vendor.certifi", "cacert.pem", encoding="ascii")
diff --git a/third_party/python/pip/pip/_vendor/chardet/__init__.py b/third_party/python/pip/pip/_vendor/chardet/__init__.py
new file mode 100644
index 0000000000..fe581623d8
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/__init__.py
@@ -0,0 +1,115 @@
+######################## BEGIN LICENSE BLOCK ########################
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from typing import List, Union
+
+from .charsetgroupprober import CharSetGroupProber
+from .charsetprober import CharSetProber
+from .enums import InputState
+from .resultdict import ResultDict
+from .universaldetector import UniversalDetector
+from .version import VERSION, __version__
+
+__all__ = ["UniversalDetector", "detect", "detect_all", "__version__", "VERSION"]
+
+
+def detect(
+ byte_str: Union[bytes, bytearray], should_rename_legacy: bool = False
+) -> ResultDict:
+ """
+ Detect the encoding of the given byte string.
+
+ :param byte_str: The byte sequence to examine.
+ :type byte_str: ``bytes`` or ``bytearray``
+ :param should_rename_legacy: Should we rename legacy encodings
+ to their more modern equivalents?
+ :type should_rename_legacy: ``bool``
+ """
+ if not isinstance(byte_str, bytearray):
+ if not isinstance(byte_str, bytes):
+ raise TypeError(
+ f"Expected object of type bytes or bytearray, got: {type(byte_str)}"
+ )
+ byte_str = bytearray(byte_str)
+ detector = UniversalDetector(should_rename_legacy=should_rename_legacy)
+ detector.feed(byte_str)
+ return detector.close()
+
+
+def detect_all(
+ byte_str: Union[bytes, bytearray],
+ ignore_threshold: bool = False,
+ should_rename_legacy: bool = False,
+) -> List[ResultDict]:
+ """
+ Detect all the possible encodings of the given byte string.
+
+ :param byte_str: The byte sequence to examine.
+ :type byte_str: ``bytes`` or ``bytearray``
+ :param ignore_threshold: Include encodings that are below
+ ``UniversalDetector.MINIMUM_THRESHOLD``
+ in results.
+ :type ignore_threshold: ``bool``
+ :param should_rename_legacy: Should we rename legacy encodings
+ to their more modern equivalents?
+ :type should_rename_legacy: ``bool``
+ """
+ if not isinstance(byte_str, bytearray):
+ if not isinstance(byte_str, bytes):
+ raise TypeError(
+ f"Expected object of type bytes or bytearray, got: {type(byte_str)}"
+ )
+ byte_str = bytearray(byte_str)
+
+ detector = UniversalDetector(should_rename_legacy=should_rename_legacy)
+ detector.feed(byte_str)
+ detector.close()
+
+ if detector.input_state == InputState.HIGH_BYTE:
+ results: List[ResultDict] = []
+ probers: List[CharSetProber] = []
+ for prober in detector.charset_probers:
+ if isinstance(prober, CharSetGroupProber):
+ probers.extend(p for p in prober.probers)
+ else:
+ probers.append(prober)
+ for prober in probers:
+ if ignore_threshold or prober.get_confidence() > detector.MINIMUM_THRESHOLD:
+ charset_name = prober.charset_name or ""
+ lower_charset_name = charset_name.lower()
+ # Use Windows encoding name instead of ISO-8859 if we saw any
+ # extra Windows-specific bytes
+ if lower_charset_name.startswith("iso-8859") and detector.has_win_bytes:
+ charset_name = detector.ISO_WIN_MAP.get(
+ lower_charset_name, charset_name
+ )
+ # Rename legacy encodings with superset encodings if asked
+ if should_rename_legacy:
+ charset_name = detector.LEGACY_MAP.get(
+ charset_name.lower(), charset_name
+ )
+ results.append(
+ {
+ "encoding": charset_name,
+ "confidence": prober.get_confidence(),
+ "language": prober.language,
+ }
+ )
+ if len(results) > 0:
+ return sorted(results, key=lambda result: -result["confidence"])
+
+ return [detector.result]
diff --git a/third_party/python/pip/pip/_vendor/chardet/big5freq.py b/third_party/python/pip/pip/_vendor/chardet/big5freq.py
new file mode 100644
index 0000000000..87d9f972ed
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/big5freq.py
@@ -0,0 +1,386 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# Big5 frequency table
+# by Taiwan's Mandarin Promotion Council
+# <http://www.edu.tw:81/mandr/>
+#
+# 128 --> 0.42261
+# 256 --> 0.57851
+# 512 --> 0.74851
+# 1024 --> 0.89384
+# 2048 --> 0.97583
+#
+# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
+# Random Distribution Ration = 512/(5401-512)=0.105
+#
+# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
+
+BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
+
+# Char to FreqOrder table
+BIG5_TABLE_SIZE = 5376
+# fmt: off
+BIG5_CHAR_TO_FREQ_ORDER = (
+ 1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
+3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
+1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
+ 63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
+3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
+4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
+5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
+ 630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
+ 179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
+ 995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
+2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
+1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
+3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
+ 706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
+1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
+3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
+2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
+ 437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
+3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
+1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
+5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
+ 266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
+5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
+1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
+ 32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
+ 188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
+3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
+3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
+ 324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
+2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
+2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
+ 314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
+ 287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
+3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
+1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
+1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
+1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
+2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
+ 265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
+4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
+1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
+5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
+2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
+ 383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
+ 98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
+ 523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
+ 710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
+5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
+ 379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
+1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
+ 585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
+ 690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
+5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
+1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
+ 544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
+3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
+4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
+3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
+ 279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
+ 610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
+1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
+4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
+3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
+3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
+2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
+5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
+3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
+5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
+1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
+2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
+1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
+ 78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
+1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
+4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
+3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
+ 534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
+ 165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
+ 626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
+2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
+5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
+1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
+2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
+1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
+1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
+5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
+5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
+5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
+3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
+4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
+4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
+2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
+5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
+3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
+ 598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
+5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
+5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
+1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
+2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
+3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
+4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
+5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
+3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
+4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
+1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
+1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
+4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
+1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
+ 240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
+1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
+1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
+3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
+ 619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
+5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
+2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
+1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
+1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
+5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
+ 829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
+4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
+ 375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
+2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
+ 444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
+1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
+1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
+ 730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
+4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
+4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
+1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
+3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
+5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
+5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
+1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
+2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
+1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
+3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
+2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
+3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
+2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
+4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
+4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
+3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
+ 97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
+3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
+ 424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
+3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
+4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
+3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
+1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
+5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
+ 199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
+5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
+1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
+ 391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
+4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
+4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
+ 397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
+2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
+2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
+3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
+1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
+4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
+2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
+1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
+1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
+2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
+3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
+1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
+5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
+1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
+4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
+1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
+ 135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
+1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
+4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
+4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
+2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
+1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
+4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
+ 660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
+5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
+2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
+3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
+4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
+ 790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
+5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
+5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
+1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
+4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
+4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
+2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
+3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
+3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
+2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
+1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
+4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
+3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
+3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
+2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
+4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
+5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
+3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
+2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
+3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
+1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
+2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
+3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
+4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
+2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
+2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
+5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
+1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
+2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
+1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
+3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
+4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
+2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
+3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
+3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
+2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
+4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
+2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
+3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
+4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
+5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
+3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
+ 194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
+1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
+4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
+1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
+4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
+5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
+ 510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
+5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
+5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
+2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
+3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
+2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
+2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
+ 681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
+1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
+4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
+3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
+3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
+ 838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
+2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
+ 625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
+2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
+4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
+1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
+4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
+1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
+3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
+ 574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
+3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
+5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
+5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
+3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
+3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
+1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
+2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
+5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
+1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
+1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
+3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
+ 919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
+1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
+4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
+5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
+2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
+3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
+ 516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
+1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
+2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
+2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
+5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
+5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
+5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
+2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
+2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
+1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
+4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
+3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
+3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
+4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
+4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
+2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
+2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
+5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
+4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
+5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
+4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
+ 502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
+ 121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
+1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
+3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
+4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
+1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
+5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
+2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
+2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
+3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
+5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
+1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
+3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
+5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
+1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
+5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
+2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
+3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
+2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
+3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
+3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
+3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
+4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
+ 803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
+2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
+4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
+3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
+5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
+1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
+5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
+ 425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
+1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
+ 479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
+4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
+1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
+4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
+1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
+ 433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
+3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
+4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
+5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
+ 938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
+3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
+ 890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
+2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376
+)
+# fmt: on
diff --git a/third_party/python/pip/pip/_vendor/chardet/big5prober.py b/third_party/python/pip/pip/_vendor/chardet/big5prober.py
new file mode 100644
index 0000000000..ef09c60e32
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/big5prober.py
@@ -0,0 +1,47 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .chardistribution import Big5DistributionAnalysis
+from .codingstatemachine import CodingStateMachine
+from .mbcharsetprober import MultiByteCharSetProber
+from .mbcssm import BIG5_SM_MODEL
+
+
+class Big5Prober(MultiByteCharSetProber):
+ def __init__(self) -> None:
+ super().__init__()
+ self.coding_sm = CodingStateMachine(BIG5_SM_MODEL)
+ self.distribution_analyzer = Big5DistributionAnalysis()
+ self.reset()
+
+ @property
+ def charset_name(self) -> str:
+ return "Big5"
+
+ @property
+ def language(self) -> str:
+ return "Chinese"
diff --git a/third_party/python/pip/pip/_vendor/chardet/chardistribution.py b/third_party/python/pip/pip/_vendor/chardet/chardistribution.py
new file mode 100644
index 0000000000..176cb99640
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/chardistribution.py
@@ -0,0 +1,261 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from typing import Tuple, Union
+
+from .big5freq import (
+ BIG5_CHAR_TO_FREQ_ORDER,
+ BIG5_TABLE_SIZE,
+ BIG5_TYPICAL_DISTRIBUTION_RATIO,
+)
+from .euckrfreq import (
+ EUCKR_CHAR_TO_FREQ_ORDER,
+ EUCKR_TABLE_SIZE,
+ EUCKR_TYPICAL_DISTRIBUTION_RATIO,
+)
+from .euctwfreq import (
+ EUCTW_CHAR_TO_FREQ_ORDER,
+ EUCTW_TABLE_SIZE,
+ EUCTW_TYPICAL_DISTRIBUTION_RATIO,
+)
+from .gb2312freq import (
+ GB2312_CHAR_TO_FREQ_ORDER,
+ GB2312_TABLE_SIZE,
+ GB2312_TYPICAL_DISTRIBUTION_RATIO,
+)
+from .jisfreq import (
+ JIS_CHAR_TO_FREQ_ORDER,
+ JIS_TABLE_SIZE,
+ JIS_TYPICAL_DISTRIBUTION_RATIO,
+)
+from .johabfreq import JOHAB_TO_EUCKR_ORDER_TABLE
+
+
+class CharDistributionAnalysis:
+ ENOUGH_DATA_THRESHOLD = 1024
+ SURE_YES = 0.99
+ SURE_NO = 0.01
+ MINIMUM_DATA_THRESHOLD = 3
+
+ def __init__(self) -> None:
+ # Mapping table to get frequency order from char order (get from
+ # GetOrder())
+ self._char_to_freq_order: Tuple[int, ...] = tuple()
+ self._table_size = 0 # Size of above table
+ # This is a constant value which varies from language to language,
+ # used in calculating confidence. See
+ # http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
+ # for further detail.
+ self.typical_distribution_ratio = 0.0
+ self._done = False
+ self._total_chars = 0
+ self._freq_chars = 0
+ self.reset()
+
+ def reset(self) -> None:
+ """reset analyser, clear any state"""
+ # If this flag is set to True, detection is done and conclusion has
+ # been made
+ self._done = False
+ self._total_chars = 0 # Total characters encountered
+ # The number of characters whose frequency order is less than 512
+ self._freq_chars = 0
+
+ def feed(self, char: Union[bytes, bytearray], char_len: int) -> None:
+ """feed a character with known length"""
+ if char_len == 2:
+ # we only care about 2-bytes character in our distribution analysis
+ order = self.get_order(char)
+ else:
+ order = -1
+ if order >= 0:
+ self._total_chars += 1
+ # order is valid
+ if order < self._table_size:
+ if 512 > self._char_to_freq_order[order]:
+ self._freq_chars += 1
+
+ def get_confidence(self) -> float:
+ """return confidence based on existing data"""
+ # if we didn't receive any character in our consideration range,
+ # return negative answer
+ if self._total_chars <= 0 or self._freq_chars <= self.MINIMUM_DATA_THRESHOLD:
+ return self.SURE_NO
+
+ if self._total_chars != self._freq_chars:
+ r = self._freq_chars / (
+ (self._total_chars - self._freq_chars) * self.typical_distribution_ratio
+ )
+ if r < self.SURE_YES:
+ return r
+
+ # normalize confidence (we don't want to be 100% sure)
+ return self.SURE_YES
+
+ def got_enough_data(self) -> bool:
+ # It is not necessary to receive all data to draw conclusion.
+ # For charset detection, certain amount of data is enough
+ return self._total_chars > self.ENOUGH_DATA_THRESHOLD
+
+ def get_order(self, _: Union[bytes, bytearray]) -> int:
+ # We do not handle characters based on the original encoding string,
+ # but convert this encoding string to a number, here called order.
+ # This allows multiple encodings of a language to share one frequency
+ # table.
+ return -1
+
+
+class EUCTWDistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self) -> None:
+ super().__init__()
+ self._char_to_freq_order = EUCTW_CHAR_TO_FREQ_ORDER
+ self._table_size = EUCTW_TABLE_SIZE
+ self.typical_distribution_ratio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
+ # for euc-TW encoding, we are interested
+ # first byte range: 0xc4 -- 0xfe
+ # second byte range: 0xa1 -- 0xfe
+ # no validation needed here. State machine has done that
+ first_char = byte_str[0]
+ if first_char >= 0xC4:
+ return 94 * (first_char - 0xC4) + byte_str[1] - 0xA1
+ return -1
+
+
+class EUCKRDistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self) -> None:
+ super().__init__()
+ self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER
+ self._table_size = EUCKR_TABLE_SIZE
+ self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
+ # for euc-KR encoding, we are interested
+ # first byte range: 0xb0 -- 0xfe
+ # second byte range: 0xa1 -- 0xfe
+ # no validation needed here. State machine has done that
+ first_char = byte_str[0]
+ if first_char >= 0xB0:
+ return 94 * (first_char - 0xB0) + byte_str[1] - 0xA1
+ return -1
+
+
+class JOHABDistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self) -> None:
+ super().__init__()
+ self._char_to_freq_order = EUCKR_CHAR_TO_FREQ_ORDER
+ self._table_size = EUCKR_TABLE_SIZE
+ self.typical_distribution_ratio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
+ first_char = byte_str[0]
+ if 0x88 <= first_char < 0xD4:
+ code = first_char * 256 + byte_str[1]
+ return JOHAB_TO_EUCKR_ORDER_TABLE.get(code, -1)
+ return -1
+
+
+class GB2312DistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self) -> None:
+ super().__init__()
+ self._char_to_freq_order = GB2312_CHAR_TO_FREQ_ORDER
+ self._table_size = GB2312_TABLE_SIZE
+ self.typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
+ # for GB2312 encoding, we are interested
+ # first byte range: 0xb0 -- 0xfe
+ # second byte range: 0xa1 -- 0xfe
+ # no validation needed here. State machine has done that
+ first_char, second_char = byte_str[0], byte_str[1]
+ if (first_char >= 0xB0) and (second_char >= 0xA1):
+ return 94 * (first_char - 0xB0) + second_char - 0xA1
+ return -1
+
+
+class Big5DistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self) -> None:
+ super().__init__()
+ self._char_to_freq_order = BIG5_CHAR_TO_FREQ_ORDER
+ self._table_size = BIG5_TABLE_SIZE
+ self.typical_distribution_ratio = BIG5_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
+ # for big5 encoding, we are interested
+ # first byte range: 0xa4 -- 0xfe
+ # second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
+ # no validation needed here. State machine has done that
+ first_char, second_char = byte_str[0], byte_str[1]
+ if first_char >= 0xA4:
+ if second_char >= 0xA1:
+ return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
+ return 157 * (first_char - 0xA4) + second_char - 0x40
+ return -1
+
+
+class SJISDistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self) -> None:
+ super().__init__()
+ self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
+ self._table_size = JIS_TABLE_SIZE
+ self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
+ # for sjis encoding, we are interested
+ # first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
+ # second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
+ # no validation needed here. State machine has done that
+ first_char, second_char = byte_str[0], byte_str[1]
+ if 0x81 <= first_char <= 0x9F:
+ order = 188 * (first_char - 0x81)
+ elif 0xE0 <= first_char <= 0xEF:
+ order = 188 * (first_char - 0xE0 + 31)
+ else:
+ return -1
+ order = order + second_char - 0x40
+ if second_char > 0x7F:
+ order = -1
+ return order
+
+
+class EUCJPDistributionAnalysis(CharDistributionAnalysis):
+ def __init__(self) -> None:
+ super().__init__()
+ self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
+ self._table_size = JIS_TABLE_SIZE
+ self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
+
+ def get_order(self, byte_str: Union[bytes, bytearray]) -> int:
+ # for euc-JP encoding, we are interested
+ # first byte range: 0xa0 -- 0xfe
+ # second byte range: 0xa1 -- 0xfe
+ # no validation needed here. State machine has done that
+ char = byte_str[0]
+ if char >= 0xA0:
+ return 94 * (char - 0xA1) + byte_str[1] - 0xA1
+ return -1
diff --git a/third_party/python/pip/pip/_vendor/chardet/charsetgroupprober.py b/third_party/python/pip/pip/_vendor/chardet/charsetgroupprober.py
new file mode 100644
index 0000000000..6def56b4a7
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/charsetgroupprober.py
@@ -0,0 +1,106 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from typing import List, Optional, Union
+
+from .charsetprober import CharSetProber
+from .enums import LanguageFilter, ProbingState
+
+
+class CharSetGroupProber(CharSetProber):
+ def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
+ super().__init__(lang_filter=lang_filter)
+ self._active_num = 0
+ self.probers: List[CharSetProber] = []
+ self._best_guess_prober: Optional[CharSetProber] = None
+
+ def reset(self) -> None:
+ super().reset()
+ self._active_num = 0
+ for prober in self.probers:
+ prober.reset()
+ prober.active = True
+ self._active_num += 1
+ self._best_guess_prober = None
+
+ @property
+ def charset_name(self) -> Optional[str]:
+ if not self._best_guess_prober:
+ self.get_confidence()
+ if not self._best_guess_prober:
+ return None
+ return self._best_guess_prober.charset_name
+
+ @property
+ def language(self) -> Optional[str]:
+ if not self._best_guess_prober:
+ self.get_confidence()
+ if not self._best_guess_prober:
+ return None
+ return self._best_guess_prober.language
+
+ def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
+ for prober in self.probers:
+ if not prober.active:
+ continue
+ state = prober.feed(byte_str)
+ if not state:
+ continue
+ if state == ProbingState.FOUND_IT:
+ self._best_guess_prober = prober
+ self._state = ProbingState.FOUND_IT
+ return self.state
+ if state == ProbingState.NOT_ME:
+ prober.active = False
+ self._active_num -= 1
+ if self._active_num <= 0:
+ self._state = ProbingState.NOT_ME
+ return self.state
+ return self.state
+
+ def get_confidence(self) -> float:
+ state = self.state
+ if state == ProbingState.FOUND_IT:
+ return 0.99
+ if state == ProbingState.NOT_ME:
+ return 0.01
+ best_conf = 0.0
+ self._best_guess_prober = None
+ for prober in self.probers:
+ if not prober.active:
+ self.logger.debug("%s not active", prober.charset_name)
+ continue
+ conf = prober.get_confidence()
+ self.logger.debug(
+ "%s %s confidence = %s", prober.charset_name, prober.language, conf
+ )
+ if best_conf < conf:
+ best_conf = conf
+ self._best_guess_prober = prober
+ if not self._best_guess_prober:
+ return 0.0
+ return best_conf
diff --git a/third_party/python/pip/pip/_vendor/chardet/charsetprober.py b/third_party/python/pip/pip/_vendor/chardet/charsetprober.py
new file mode 100644
index 0000000000..a103ca1135
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/charsetprober.py
@@ -0,0 +1,147 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+import logging
+import re
+from typing import Optional, Union
+
+from .enums import LanguageFilter, ProbingState
+
+INTERNATIONAL_WORDS_PATTERN = re.compile(
+ b"[a-zA-Z]*[\x80-\xFF]+[a-zA-Z]*[^a-zA-Z\x80-\xFF]?"
+)
+
+
+class CharSetProber:
+
+ SHORTCUT_THRESHOLD = 0.95
+
+ def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
+ self._state = ProbingState.DETECTING
+ self.active = True
+ self.lang_filter = lang_filter
+ self.logger = logging.getLogger(__name__)
+
+ def reset(self) -> None:
+ self._state = ProbingState.DETECTING
+
+ @property
+ def charset_name(self) -> Optional[str]:
+ return None
+
+ @property
+ def language(self) -> Optional[str]:
+ raise NotImplementedError
+
+ def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
+ raise NotImplementedError
+
+ @property
+ def state(self) -> ProbingState:
+ return self._state
+
+ def get_confidence(self) -> float:
+ return 0.0
+
+ @staticmethod
+ def filter_high_byte_only(buf: Union[bytes, bytearray]) -> bytes:
+ buf = re.sub(b"([\x00-\x7F])+", b" ", buf)
+ return buf
+
+ @staticmethod
+ def filter_international_words(buf: Union[bytes, bytearray]) -> bytearray:
+ """
+ We define three types of bytes:
+ alphabet: english alphabets [a-zA-Z]
+ international: international characters [\x80-\xFF]
+ marker: everything else [^a-zA-Z\x80-\xFF]
+ The input buffer can be thought to contain a series of words delimited
+ by markers. This function works to filter all words that contain at
+ least one international character. All contiguous sequences of markers
+ are replaced by a single space ascii character.
+ This filter applies to all scripts which do not use English characters.
+ """
+ filtered = bytearray()
+
+ # This regex expression filters out only words that have at-least one
+ # international character. The word may include one marker character at
+ # the end.
+ words = INTERNATIONAL_WORDS_PATTERN.findall(buf)
+
+ for word in words:
+ filtered.extend(word[:-1])
+
+ # If the last character in the word is a marker, replace it with a
+ # space as markers shouldn't affect our analysis (they are used
+ # similarly across all languages and may thus have similar
+ # frequencies).
+ last_char = word[-1:]
+ if not last_char.isalpha() and last_char < b"\x80":
+ last_char = b" "
+ filtered.extend(last_char)
+
+ return filtered
+
+ @staticmethod
+ def remove_xml_tags(buf: Union[bytes, bytearray]) -> bytes:
+ """
+ Returns a copy of ``buf`` that retains only the sequences of English
+ alphabet and high byte characters that are not between <> characters.
+ This filter can be applied to all scripts which contain both English
+ characters and extended ASCII characters, but is currently only used by
+ ``Latin1Prober``.
+ """
+ filtered = bytearray()
+ in_tag = False
+ prev = 0
+ buf = memoryview(buf).cast("c")
+
+ for curr, buf_char in enumerate(buf):
+ # Check if we're coming out of or entering an XML tag
+
+ # https://github.com/python/typeshed/issues/8182
+ if buf_char == b">": # type: ignore[comparison-overlap]
+ prev = curr + 1
+ in_tag = False
+ # https://github.com/python/typeshed/issues/8182
+ elif buf_char == b"<": # type: ignore[comparison-overlap]
+ if curr > prev and not in_tag:
+ # Keep everything after last non-extended-ASCII,
+ # non-alphabetic character
+ filtered.extend(buf[prev:curr])
+ # Output a space to delimit stretch we kept
+ filtered.extend(b" ")
+ in_tag = True
+
+ # If we're not in a tag...
+ if not in_tag:
+ # Keep everything after last non-extended-ASCII, non-alphabetic
+ # character
+ filtered.extend(buf[prev:])
+
+ return filtered
diff --git a/third_party/python/pip/pip/_vendor/chardet/cli/__init__.py b/third_party/python/pip/pip/_vendor/chardet/cli/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/cli/__init__.py
diff --git a/third_party/python/pip/pip/_vendor/chardet/cli/chardetect.py b/third_party/python/pip/pip/_vendor/chardet/cli/chardetect.py
new file mode 100644
index 0000000000..43f6e144f6
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/cli/chardetect.py
@@ -0,0 +1,112 @@
+"""
+Script which takes one or more file paths and reports on their detected
+encodings
+
+Example::
+
+ % chardetect somefile someotherfile
+ somefile: windows-1252 with confidence 0.5
+ someotherfile: ascii with confidence 1.0
+
+If no paths are provided, it takes its input from stdin.
+
+"""
+
+
+import argparse
+import sys
+from typing import Iterable, List, Optional
+
+from .. import __version__
+from ..universaldetector import UniversalDetector
+
+
+def description_of(
+ lines: Iterable[bytes],
+ name: str = "stdin",
+ minimal: bool = False,
+ should_rename_legacy: bool = False,
+) -> Optional[str]:
+ """
+ Return a string describing the probable encoding of a file or
+ list of strings.
+
+ :param lines: The lines to get the encoding of.
+ :type lines: Iterable of bytes
+ :param name: Name of file or collection of lines
+ :type name: str
+ :param should_rename_legacy: Should we rename legacy encodings to
+ their more modern equivalents?
+ :type should_rename_legacy: ``bool``
+ """
+ u = UniversalDetector(should_rename_legacy=should_rename_legacy)
+ for line in lines:
+ line = bytearray(line)
+ u.feed(line)
+ # shortcut out of the loop to save reading further - particularly useful if we read a BOM.
+ if u.done:
+ break
+ u.close()
+ result = u.result
+ if minimal:
+ return result["encoding"]
+ if result["encoding"]:
+ return f'{name}: {result["encoding"]} with confidence {result["confidence"]}'
+ return f"{name}: no result"
+
+
+def main(argv: Optional[List[str]] = None) -> None:
+ """
+ Handles command line arguments and gets things started.
+
+ :param argv: List of arguments, as if specified on the command-line.
+ If None, ``sys.argv[1:]`` is used instead.
+ :type argv: list of str
+ """
+ # Get command line arguments
+ parser = argparse.ArgumentParser(
+ description=(
+ "Takes one or more file paths and reports their detected encodings"
+ )
+ )
+ parser.add_argument(
+ "input",
+ help="File whose encoding we would like to determine. (default: stdin)",
+ type=argparse.FileType("rb"),
+ nargs="*",
+ default=[sys.stdin.buffer],
+ )
+ parser.add_argument(
+ "--minimal",
+ help="Print only the encoding to standard output",
+ action="store_true",
+ )
+ parser.add_argument(
+ "-l",
+ "--legacy",
+ help="Rename legacy encodings to more modern ones.",
+ action="store_true",
+ )
+ parser.add_argument(
+ "--version", action="version", version=f"%(prog)s {__version__}"
+ )
+ args = parser.parse_args(argv)
+
+ for f in args.input:
+ if f.isatty():
+ print(
+ "You are running chardetect interactively. Press "
+ "CTRL-D twice at the start of a blank line to signal the "
+ "end of your input. If you want help, run chardetect "
+ "--help\n",
+ file=sys.stderr,
+ )
+ print(
+ description_of(
+ f, f.name, minimal=args.minimal, should_rename_legacy=args.legacy
+ )
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/third_party/python/pip/pip/_vendor/chardet/codingstatemachine.py b/third_party/python/pip/pip/_vendor/chardet/codingstatemachine.py
new file mode 100644
index 0000000000..8ed4a8773b
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/codingstatemachine.py
@@ -0,0 +1,90 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+import logging
+
+from .codingstatemachinedict import CodingStateMachineDict
+from .enums import MachineState
+
+
+class CodingStateMachine:
+ """
+ A state machine to verify a byte sequence for a particular encoding. For
+ each byte the detector receives, it will feed that byte to every active
+ state machine available, one byte at a time. The state machine changes its
+ state based on its previous state and the byte it receives. There are 3
+ states in a state machine that are of interest to an auto-detector:
+
+ START state: This is the state to start with, or a legal byte sequence
+ (i.e. a valid code point) for character has been identified.
+
+ ME state: This indicates that the state machine identified a byte sequence
+ that is specific to the charset it is designed for and that
+ there is no other possible encoding which can contain this byte
+ sequence. This will to lead to an immediate positive answer for
+ the detector.
+
+ ERROR state: This indicates the state machine identified an illegal byte
+ sequence for that encoding. This will lead to an immediate
+ negative answer for this encoding. Detector will exclude this
+ encoding from consideration from here on.
+ """
+
+ def __init__(self, sm: CodingStateMachineDict) -> None:
+ self._model = sm
+ self._curr_byte_pos = 0
+ self._curr_char_len = 0
+ self._curr_state = MachineState.START
+ self.active = True
+ self.logger = logging.getLogger(__name__)
+ self.reset()
+
+ def reset(self) -> None:
+ self._curr_state = MachineState.START
+
+ def next_state(self, c: int) -> int:
+ # for each byte we get its class
+ # if it is first byte, we also get byte length
+ byte_class = self._model["class_table"][c]
+ if self._curr_state == MachineState.START:
+ self._curr_byte_pos = 0
+ self._curr_char_len = self._model["char_len_table"][byte_class]
+ # from byte's class and state_table, we get its next state
+ curr_state = self._curr_state * self._model["class_factor"] + byte_class
+ self._curr_state = self._model["state_table"][curr_state]
+ self._curr_byte_pos += 1
+ return self._curr_state
+
+ def get_current_charlen(self) -> int:
+ return self._curr_char_len
+
+ def get_coding_state_machine(self) -> str:
+ return self._model["name"]
+
+ @property
+ def language(self) -> str:
+ return self._model["language"]
diff --git a/third_party/python/pip/pip/_vendor/chardet/codingstatemachinedict.py b/third_party/python/pip/pip/_vendor/chardet/codingstatemachinedict.py
new file mode 100644
index 0000000000..7a3c4c7e3f
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/codingstatemachinedict.py
@@ -0,0 +1,19 @@
+from typing import TYPE_CHECKING, Tuple
+
+if TYPE_CHECKING:
+ # TypedDict was introduced in Python 3.8.
+ #
+ # TODO: Remove the else block and TYPE_CHECKING check when dropping support
+ # for Python 3.7.
+ from typing import TypedDict
+
+ class CodingStateMachineDict(TypedDict, total=False):
+ class_table: Tuple[int, ...]
+ class_factor: int
+ state_table: Tuple[int, ...]
+ char_len_table: Tuple[int, ...]
+ name: str
+ language: str # Optional key
+
+else:
+ CodingStateMachineDict = dict
diff --git a/third_party/python/pip/pip/_vendor/chardet/cp949prober.py b/third_party/python/pip/pip/_vendor/chardet/cp949prober.py
new file mode 100644
index 0000000000..fa7307ed89
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/cp949prober.py
@@ -0,0 +1,49 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .chardistribution import EUCKRDistributionAnalysis
+from .codingstatemachine import CodingStateMachine
+from .mbcharsetprober import MultiByteCharSetProber
+from .mbcssm import CP949_SM_MODEL
+
+
+class CP949Prober(MultiByteCharSetProber):
+ def __init__(self) -> None:
+ super().__init__()
+ self.coding_sm = CodingStateMachine(CP949_SM_MODEL)
+ # NOTE: CP949 is a superset of EUC-KR, so the distribution should be
+ # not different.
+ self.distribution_analyzer = EUCKRDistributionAnalysis()
+ self.reset()
+
+ @property
+ def charset_name(self) -> str:
+ return "CP949"
+
+ @property
+ def language(self) -> str:
+ return "Korean"
diff --git a/third_party/python/pip/pip/_vendor/chardet/enums.py b/third_party/python/pip/pip/_vendor/chardet/enums.py
new file mode 100644
index 0000000000..5e3e198233
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/enums.py
@@ -0,0 +1,85 @@
+"""
+All of the Enums that are used throughout the chardet package.
+
+:author: Dan Blanchard (dan.blanchard@gmail.com)
+"""
+
+from enum import Enum, Flag
+
+
+class InputState:
+ """
+ This enum represents the different states a universal detector can be in.
+ """
+
+ PURE_ASCII = 0
+ ESC_ASCII = 1
+ HIGH_BYTE = 2
+
+
+class LanguageFilter(Flag):
+ """
+ This enum represents the different language filters we can apply to a
+ ``UniversalDetector``.
+ """
+
+ NONE = 0x00
+ CHINESE_SIMPLIFIED = 0x01
+ CHINESE_TRADITIONAL = 0x02
+ JAPANESE = 0x04
+ KOREAN = 0x08
+ NON_CJK = 0x10
+ ALL = 0x1F
+ CHINESE = CHINESE_SIMPLIFIED | CHINESE_TRADITIONAL
+ CJK = CHINESE | JAPANESE | KOREAN
+
+
+class ProbingState(Enum):
+ """
+ This enum represents the different states a prober can be in.
+ """
+
+ DETECTING = 0
+ FOUND_IT = 1
+ NOT_ME = 2
+
+
+class MachineState:
+ """
+ This enum represents the different states a state machine can be in.
+ """
+
+ START = 0
+ ERROR = 1
+ ITS_ME = 2
+
+
+class SequenceLikelihood:
+ """
+ This enum represents the likelihood of a character following the previous one.
+ """
+
+ NEGATIVE = 0
+ UNLIKELY = 1
+ LIKELY = 2
+ POSITIVE = 3
+
+ @classmethod
+ def get_num_categories(cls) -> int:
+ """:returns: The number of likelihood categories in the enum."""
+ return 4
+
+
+class CharacterCategory:
+ """
+ This enum represents the different categories language models for
+ ``SingleByteCharsetProber`` put characters into.
+
+ Anything less than CONTROL is considered a letter.
+ """
+
+ UNDEFINED = 255
+ LINE_BREAK = 254
+ SYMBOL = 253
+ DIGIT = 252
+ CONTROL = 251
diff --git a/third_party/python/pip/pip/_vendor/chardet/escprober.py b/third_party/python/pip/pip/_vendor/chardet/escprober.py
new file mode 100644
index 0000000000..fd713830d3
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/escprober.py
@@ -0,0 +1,102 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from typing import Optional, Union
+
+from .charsetprober import CharSetProber
+from .codingstatemachine import CodingStateMachine
+from .enums import LanguageFilter, MachineState, ProbingState
+from .escsm import (
+ HZ_SM_MODEL,
+ ISO2022CN_SM_MODEL,
+ ISO2022JP_SM_MODEL,
+ ISO2022KR_SM_MODEL,
+)
+
+
+class EscCharSetProber(CharSetProber):
+ """
+ This CharSetProber uses a "code scheme" approach for detecting encodings,
+ whereby easily recognizable escape or shift sequences are relied on to
+ identify these encodings.
+ """
+
+ def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
+ super().__init__(lang_filter=lang_filter)
+ self.coding_sm = []
+ if self.lang_filter & LanguageFilter.CHINESE_SIMPLIFIED:
+ self.coding_sm.append(CodingStateMachine(HZ_SM_MODEL))
+ self.coding_sm.append(CodingStateMachine(ISO2022CN_SM_MODEL))
+ if self.lang_filter & LanguageFilter.JAPANESE:
+ self.coding_sm.append(CodingStateMachine(ISO2022JP_SM_MODEL))
+ if self.lang_filter & LanguageFilter.KOREAN:
+ self.coding_sm.append(CodingStateMachine(ISO2022KR_SM_MODEL))
+ self.active_sm_count = 0
+ self._detected_charset: Optional[str] = None
+ self._detected_language: Optional[str] = None
+ self._state = ProbingState.DETECTING
+ self.reset()
+
+ def reset(self) -> None:
+ super().reset()
+ for coding_sm in self.coding_sm:
+ coding_sm.active = True
+ coding_sm.reset()
+ self.active_sm_count = len(self.coding_sm)
+ self._detected_charset = None
+ self._detected_language = None
+
+ @property
+ def charset_name(self) -> Optional[str]:
+ return self._detected_charset
+
+ @property
+ def language(self) -> Optional[str]:
+ return self._detected_language
+
+ def get_confidence(self) -> float:
+ return 0.99 if self._detected_charset else 0.00
+
+ def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
+ for c in byte_str:
+ for coding_sm in self.coding_sm:
+ if not coding_sm.active:
+ continue
+ coding_state = coding_sm.next_state(c)
+ if coding_state == MachineState.ERROR:
+ coding_sm.active = False
+ self.active_sm_count -= 1
+ if self.active_sm_count <= 0:
+ self._state = ProbingState.NOT_ME
+ return self.state
+ elif coding_state == MachineState.ITS_ME:
+ self._state = ProbingState.FOUND_IT
+ self._detected_charset = coding_sm.get_coding_state_machine()
+ self._detected_language = coding_sm.language
+ return self.state
+
+ return self.state
diff --git a/third_party/python/pip/pip/_vendor/chardet/escsm.py b/third_party/python/pip/pip/_vendor/chardet/escsm.py
new file mode 100644
index 0000000000..11d4adf771
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/escsm.py
@@ -0,0 +1,261 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .codingstatemachinedict import CodingStateMachineDict
+from .enums import MachineState
+
+# fmt: off
+HZ_CLS = (
+ 1, 0, 0, 0, 0, 0, 0, 0, # 00 - 07
+ 0, 0, 0, 0, 0, 0, 0, 0, # 08 - 0f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 10 - 17
+ 0, 0, 0, 1, 0, 0, 0, 0, # 18 - 1f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 20 - 27
+ 0, 0, 0, 0, 0, 0, 0, 0, # 28 - 2f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 30 - 37
+ 0, 0, 0, 0, 0, 0, 0, 0, # 38 - 3f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 40 - 47
+ 0, 0, 0, 0, 0, 0, 0, 0, # 48 - 4f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 50 - 57
+ 0, 0, 0, 0, 0, 0, 0, 0, # 58 - 5f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 60 - 67
+ 0, 0, 0, 0, 0, 0, 0, 0, # 68 - 6f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 70 - 77
+ 0, 0, 0, 4, 0, 5, 2, 0, # 78 - 7f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 80 - 87
+ 1, 1, 1, 1, 1, 1, 1, 1, # 88 - 8f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 90 - 97
+ 1, 1, 1, 1, 1, 1, 1, 1, # 98 - 9f
+ 1, 1, 1, 1, 1, 1, 1, 1, # a0 - a7
+ 1, 1, 1, 1, 1, 1, 1, 1, # a8 - af
+ 1, 1, 1, 1, 1, 1, 1, 1, # b0 - b7
+ 1, 1, 1, 1, 1, 1, 1, 1, # b8 - bf
+ 1, 1, 1, 1, 1, 1, 1, 1, # c0 - c7
+ 1, 1, 1, 1, 1, 1, 1, 1, # c8 - cf
+ 1, 1, 1, 1, 1, 1, 1, 1, # d0 - d7
+ 1, 1, 1, 1, 1, 1, 1, 1, # d8 - df
+ 1, 1, 1, 1, 1, 1, 1, 1, # e0 - e7
+ 1, 1, 1, 1, 1, 1, 1, 1, # e8 - ef
+ 1, 1, 1, 1, 1, 1, 1, 1, # f0 - f7
+ 1, 1, 1, 1, 1, 1, 1, 1, # f8 - ff
+)
+
+HZ_ST = (
+MachineState.START, MachineState.ERROR, 3, MachineState.START, MachineState.START, MachineState.START, MachineState.ERROR, MachineState.ERROR, # 00-07
+MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, # 08-0f
+MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.START, MachineState.START, 4, MachineState.ERROR, # 10-17
+ 5, MachineState.ERROR, 6, MachineState.ERROR, 5, 5, 4, MachineState.ERROR, # 18-1f
+ 4, MachineState.ERROR, 4, 4, 4, MachineState.ERROR, 4, MachineState.ERROR, # 20-27
+ 4, MachineState.ITS_ME, MachineState.START, MachineState.START, MachineState.START, MachineState.START, MachineState.START, MachineState.START, # 28-2f
+)
+# fmt: on
+
+HZ_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
+
+HZ_SM_MODEL: CodingStateMachineDict = {
+ "class_table": HZ_CLS,
+ "class_factor": 6,
+ "state_table": HZ_ST,
+ "char_len_table": HZ_CHAR_LEN_TABLE,
+ "name": "HZ-GB-2312",
+ "language": "Chinese",
+}
+
+# fmt: off
+ISO2022CN_CLS = (
+ 2, 0, 0, 0, 0, 0, 0, 0, # 00 - 07
+ 0, 0, 0, 0, 0, 0, 0, 0, # 08 - 0f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 10 - 17
+ 0, 0, 0, 1, 0, 0, 0, 0, # 18 - 1f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 20 - 27
+ 0, 3, 0, 0, 0, 0, 0, 0, # 28 - 2f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 30 - 37
+ 0, 0, 0, 0, 0, 0, 0, 0, # 38 - 3f
+ 0, 0, 0, 4, 0, 0, 0, 0, # 40 - 47
+ 0, 0, 0, 0, 0, 0, 0, 0, # 48 - 4f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 50 - 57
+ 0, 0, 0, 0, 0, 0, 0, 0, # 58 - 5f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 60 - 67
+ 0, 0, 0, 0, 0, 0, 0, 0, # 68 - 6f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 70 - 77
+ 0, 0, 0, 0, 0, 0, 0, 0, # 78 - 7f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 80 - 87
+ 2, 2, 2, 2, 2, 2, 2, 2, # 88 - 8f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 90 - 97
+ 2, 2, 2, 2, 2, 2, 2, 2, # 98 - 9f
+ 2, 2, 2, 2, 2, 2, 2, 2, # a0 - a7
+ 2, 2, 2, 2, 2, 2, 2, 2, # a8 - af
+ 2, 2, 2, 2, 2, 2, 2, 2, # b0 - b7
+ 2, 2, 2, 2, 2, 2, 2, 2, # b8 - bf
+ 2, 2, 2, 2, 2, 2, 2, 2, # c0 - c7
+ 2, 2, 2, 2, 2, 2, 2, 2, # c8 - cf
+ 2, 2, 2, 2, 2, 2, 2, 2, # d0 - d7
+ 2, 2, 2, 2, 2, 2, 2, 2, # d8 - df
+ 2, 2, 2, 2, 2, 2, 2, 2, # e0 - e7
+ 2, 2, 2, 2, 2, 2, 2, 2, # e8 - ef
+ 2, 2, 2, 2, 2, 2, 2, 2, # f0 - f7
+ 2, 2, 2, 2, 2, 2, 2, 2, # f8 - ff
+)
+
+ISO2022CN_ST = (
+ MachineState.START, 3, MachineState.ERROR, MachineState.START, MachineState.START, MachineState.START, MachineState.START, MachineState.START, # 00-07
+ MachineState.START, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 08-0f
+ MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, # 10-17
+ MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, 4, MachineState.ERROR, # 18-1f
+ MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 20-27
+ 5, 6, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 28-2f
+ MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 30-37
+ MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.START, # 38-3f
+)
+# fmt: on
+
+ISO2022CN_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+ISO2022CN_SM_MODEL: CodingStateMachineDict = {
+ "class_table": ISO2022CN_CLS,
+ "class_factor": 9,
+ "state_table": ISO2022CN_ST,
+ "char_len_table": ISO2022CN_CHAR_LEN_TABLE,
+ "name": "ISO-2022-CN",
+ "language": "Chinese",
+}
+
+# fmt: off
+ISO2022JP_CLS = (
+ 2, 0, 0, 0, 0, 0, 0, 0, # 00 - 07
+ 0, 0, 0, 0, 0, 0, 2, 2, # 08 - 0f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 10 - 17
+ 0, 0, 0, 1, 0, 0, 0, 0, # 18 - 1f
+ 0, 0, 0, 0, 7, 0, 0, 0, # 20 - 27
+ 3, 0, 0, 0, 0, 0, 0, 0, # 28 - 2f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 30 - 37
+ 0, 0, 0, 0, 0, 0, 0, 0, # 38 - 3f
+ 6, 0, 4, 0, 8, 0, 0, 0, # 40 - 47
+ 0, 9, 5, 0, 0, 0, 0, 0, # 48 - 4f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 50 - 57
+ 0, 0, 0, 0, 0, 0, 0, 0, # 58 - 5f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 60 - 67
+ 0, 0, 0, 0, 0, 0, 0, 0, # 68 - 6f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 70 - 77
+ 0, 0, 0, 0, 0, 0, 0, 0, # 78 - 7f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 80 - 87
+ 2, 2, 2, 2, 2, 2, 2, 2, # 88 - 8f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 90 - 97
+ 2, 2, 2, 2, 2, 2, 2, 2, # 98 - 9f
+ 2, 2, 2, 2, 2, 2, 2, 2, # a0 - a7
+ 2, 2, 2, 2, 2, 2, 2, 2, # a8 - af
+ 2, 2, 2, 2, 2, 2, 2, 2, # b0 - b7
+ 2, 2, 2, 2, 2, 2, 2, 2, # b8 - bf
+ 2, 2, 2, 2, 2, 2, 2, 2, # c0 - c7
+ 2, 2, 2, 2, 2, 2, 2, 2, # c8 - cf
+ 2, 2, 2, 2, 2, 2, 2, 2, # d0 - d7
+ 2, 2, 2, 2, 2, 2, 2, 2, # d8 - df
+ 2, 2, 2, 2, 2, 2, 2, 2, # e0 - e7
+ 2, 2, 2, 2, 2, 2, 2, 2, # e8 - ef
+ 2, 2, 2, 2, 2, 2, 2, 2, # f0 - f7
+ 2, 2, 2, 2, 2, 2, 2, 2, # f8 - ff
+)
+
+ISO2022JP_ST = (
+ MachineState.START, 3, MachineState.ERROR, MachineState.START, MachineState.START, MachineState.START, MachineState.START, MachineState.START, # 00-07
+ MachineState.START, MachineState.START, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 08-0f
+ MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, # 10-17
+ MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, # 18-1f
+ MachineState.ERROR, 5, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, 4, MachineState.ERROR, MachineState.ERROR, # 20-27
+ MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, 6, MachineState.ITS_ME, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, # 28-2f
+ MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ITS_ME, # 30-37
+ MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 38-3f
+ MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ERROR, MachineState.START, MachineState.START, # 40-47
+)
+# fmt: on
+
+ISO2022JP_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+ISO2022JP_SM_MODEL: CodingStateMachineDict = {
+ "class_table": ISO2022JP_CLS,
+ "class_factor": 10,
+ "state_table": ISO2022JP_ST,
+ "char_len_table": ISO2022JP_CHAR_LEN_TABLE,
+ "name": "ISO-2022-JP",
+ "language": "Japanese",
+}
+
+# fmt: off
+ISO2022KR_CLS = (
+ 2, 0, 0, 0, 0, 0, 0, 0, # 00 - 07
+ 0, 0, 0, 0, 0, 0, 0, 0, # 08 - 0f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 10 - 17
+ 0, 0, 0, 1, 0, 0, 0, 0, # 18 - 1f
+ 0, 0, 0, 0, 3, 0, 0, 0, # 20 - 27
+ 0, 4, 0, 0, 0, 0, 0, 0, # 28 - 2f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 30 - 37
+ 0, 0, 0, 0, 0, 0, 0, 0, # 38 - 3f
+ 0, 0, 0, 5, 0, 0, 0, 0, # 40 - 47
+ 0, 0, 0, 0, 0, 0, 0, 0, # 48 - 4f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 50 - 57
+ 0, 0, 0, 0, 0, 0, 0, 0, # 58 - 5f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 60 - 67
+ 0, 0, 0, 0, 0, 0, 0, 0, # 68 - 6f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 70 - 77
+ 0, 0, 0, 0, 0, 0, 0, 0, # 78 - 7f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 80 - 87
+ 2, 2, 2, 2, 2, 2, 2, 2, # 88 - 8f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 90 - 97
+ 2, 2, 2, 2, 2, 2, 2, 2, # 98 - 9f
+ 2, 2, 2, 2, 2, 2, 2, 2, # a0 - a7
+ 2, 2, 2, 2, 2, 2, 2, 2, # a8 - af
+ 2, 2, 2, 2, 2, 2, 2, 2, # b0 - b7
+ 2, 2, 2, 2, 2, 2, 2, 2, # b8 - bf
+ 2, 2, 2, 2, 2, 2, 2, 2, # c0 - c7
+ 2, 2, 2, 2, 2, 2, 2, 2, # c8 - cf
+ 2, 2, 2, 2, 2, 2, 2, 2, # d0 - d7
+ 2, 2, 2, 2, 2, 2, 2, 2, # d8 - df
+ 2, 2, 2, 2, 2, 2, 2, 2, # e0 - e7
+ 2, 2, 2, 2, 2, 2, 2, 2, # e8 - ef
+ 2, 2, 2, 2, 2, 2, 2, 2, # f0 - f7
+ 2, 2, 2, 2, 2, 2, 2, 2, # f8 - ff
+)
+
+ISO2022KR_ST = (
+ MachineState.START, 3, MachineState.ERROR, MachineState.START, MachineState.START, MachineState.START, MachineState.ERROR, MachineState.ERROR, # 00-07
+ MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ITS_ME, # 08-0f
+ MachineState.ITS_ME, MachineState.ITS_ME, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, 4, MachineState.ERROR, MachineState.ERROR, # 10-17
+ MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, 5, MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, # 18-1f
+ MachineState.ERROR, MachineState.ERROR, MachineState.ERROR, MachineState.ITS_ME, MachineState.START, MachineState.START, MachineState.START, MachineState.START, # 20-27
+)
+# fmt: on
+
+ISO2022KR_CHAR_LEN_TABLE = (0, 0, 0, 0, 0, 0)
+
+ISO2022KR_SM_MODEL: CodingStateMachineDict = {
+ "class_table": ISO2022KR_CLS,
+ "class_factor": 6,
+ "state_table": ISO2022KR_ST,
+ "char_len_table": ISO2022KR_CHAR_LEN_TABLE,
+ "name": "ISO-2022-KR",
+ "language": "Korean",
+}
diff --git a/third_party/python/pip/pip/_vendor/chardet/eucjpprober.py b/third_party/python/pip/pip/_vendor/chardet/eucjpprober.py
new file mode 100644
index 0000000000..39487f4098
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/eucjpprober.py
@@ -0,0 +1,102 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from typing import Union
+
+from .chardistribution import EUCJPDistributionAnalysis
+from .codingstatemachine import CodingStateMachine
+from .enums import MachineState, ProbingState
+from .jpcntx import EUCJPContextAnalysis
+from .mbcharsetprober import MultiByteCharSetProber
+from .mbcssm import EUCJP_SM_MODEL
+
+
+class EUCJPProber(MultiByteCharSetProber):
+ def __init__(self) -> None:
+ super().__init__()
+ self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL)
+ self.distribution_analyzer = EUCJPDistributionAnalysis()
+ self.context_analyzer = EUCJPContextAnalysis()
+ self.reset()
+
+ def reset(self) -> None:
+ super().reset()
+ self.context_analyzer.reset()
+
+ @property
+ def charset_name(self) -> str:
+ return "EUC-JP"
+
+ @property
+ def language(self) -> str:
+ return "Japanese"
+
+ def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
+ assert self.coding_sm is not None
+ assert self.distribution_analyzer is not None
+
+ for i, byte in enumerate(byte_str):
+ # PY3K: byte_str is a byte array, so byte is an int, not a byte
+ coding_state = self.coding_sm.next_state(byte)
+ if coding_state == MachineState.ERROR:
+ self.logger.debug(
+ "%s %s prober hit error at byte %s",
+ self.charset_name,
+ self.language,
+ i,
+ )
+ self._state = ProbingState.NOT_ME
+ break
+ if coding_state == MachineState.ITS_ME:
+ self._state = ProbingState.FOUND_IT
+ break
+ if coding_state == MachineState.START:
+ char_len = self.coding_sm.get_current_charlen()
+ if i == 0:
+ self._last_char[1] = byte
+ self.context_analyzer.feed(self._last_char, char_len)
+ self.distribution_analyzer.feed(self._last_char, char_len)
+ else:
+ self.context_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
+ self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
+
+ self._last_char[0] = byte_str[-1]
+
+ if self.state == ProbingState.DETECTING:
+ if self.context_analyzer.got_enough_data() and (
+ self.get_confidence() > self.SHORTCUT_THRESHOLD
+ ):
+ self._state = ProbingState.FOUND_IT
+
+ return self.state
+
+ def get_confidence(self) -> float:
+ assert self.distribution_analyzer is not None
+
+ context_conf = self.context_analyzer.get_confidence()
+ distrib_conf = self.distribution_analyzer.get_confidence()
+ return max(context_conf, distrib_conf)
diff --git a/third_party/python/pip/pip/_vendor/chardet/euckrfreq.py b/third_party/python/pip/pip/_vendor/chardet/euckrfreq.py
new file mode 100644
index 0000000000..7dc3b10387
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/euckrfreq.py
@@ -0,0 +1,196 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# Sampling from about 20M text materials include literature and computer technology
+
+# 128 --> 0.79
+# 256 --> 0.92
+# 512 --> 0.986
+# 1024 --> 0.99944
+# 2048 --> 0.99999
+#
+# Idea Distribution Ratio = 0.98653 / (1-0.98653) = 73.24
+# Random Distribution Ration = 512 / (2350-512) = 0.279.
+#
+# Typical Distribution Ratio
+
+EUCKR_TYPICAL_DISTRIBUTION_RATIO = 6.0
+
+EUCKR_TABLE_SIZE = 2352
+
+# Char to FreqOrder table ,
+# fmt: off
+EUCKR_CHAR_TO_FREQ_ORDER = (
+ 13, 130, 120,1396, 481,1719,1720, 328, 609, 212,1721, 707, 400, 299,1722, 87,
+1397,1723, 104, 536,1117,1203,1724,1267, 685,1268, 508,1725,1726,1727,1728,1398,
+1399,1729,1730,1731, 141, 621, 326,1057, 368,1732, 267, 488, 20,1733,1269,1734,
+ 945,1400,1735, 47, 904,1270,1736,1737, 773, 248,1738, 409, 313, 786, 429,1739,
+ 116, 987, 813,1401, 683, 75,1204, 145,1740,1741,1742,1743, 16, 847, 667, 622,
+ 708,1744,1745,1746, 966, 787, 304, 129,1747, 60, 820, 123, 676,1748,1749,1750,
+1751, 617,1752, 626,1753,1754,1755,1756, 653,1757,1758,1759,1760,1761,1762, 856,
+ 344,1763,1764,1765,1766, 89, 401, 418, 806, 905, 848,1767,1768,1769, 946,1205,
+ 709,1770,1118,1771, 241,1772,1773,1774,1271,1775, 569,1776, 999,1777,1778,1779,
+1780, 337, 751,1058, 28, 628, 254,1781, 177, 906, 270, 349, 891,1079,1782, 19,
+1783, 379,1784, 315,1785, 629, 754,1402, 559,1786, 636, 203,1206,1787, 710, 567,
+1788, 935, 814,1789,1790,1207, 766, 528,1791,1792,1208,1793,1794,1795,1796,1797,
+1403,1798,1799, 533,1059,1404,1405,1156,1406, 936, 884,1080,1800, 351,1801,1802,
+1803,1804,1805, 801,1806,1807,1808,1119,1809,1157, 714, 474,1407,1810, 298, 899,
+ 885,1811,1120, 802,1158,1812, 892,1813,1814,1408, 659,1815,1816,1121,1817,1818,
+1819,1820,1821,1822, 319,1823, 594, 545,1824, 815, 937,1209,1825,1826, 573,1409,
+1022,1827,1210,1828,1829,1830,1831,1832,1833, 556, 722, 807,1122,1060,1834, 697,
+1835, 900, 557, 715,1836,1410, 540,1411, 752,1159, 294, 597,1211, 976, 803, 770,
+1412,1837,1838, 39, 794,1413, 358,1839, 371, 925,1840, 453, 661, 788, 531, 723,
+ 544,1023,1081, 869, 91,1841, 392, 430, 790, 602,1414, 677,1082, 457,1415,1416,
+1842,1843, 475, 327,1024,1417, 795, 121,1844, 733, 403,1418,1845,1846,1847, 300,
+ 119, 711,1212, 627,1848,1272, 207,1849,1850, 796,1213, 382,1851, 519,1852,1083,
+ 893,1853,1854,1855, 367, 809, 487, 671,1856, 663,1857,1858, 956, 471, 306, 857,
+1859,1860,1160,1084,1861,1862,1863,1864,1865,1061,1866,1867,1868,1869,1870,1871,
+ 282, 96, 574,1872, 502,1085,1873,1214,1874, 907,1875,1876, 827, 977,1419,1420,
+1421, 268,1877,1422,1878,1879,1880, 308,1881, 2, 537,1882,1883,1215,1884,1885,
+ 127, 791,1886,1273,1423,1887, 34, 336, 404, 643,1888, 571, 654, 894, 840,1889,
+ 0, 886,1274, 122, 575, 260, 908, 938,1890,1275, 410, 316,1891,1892, 100,1893,
+1894,1123, 48,1161,1124,1025,1895, 633, 901,1276,1896,1897, 115, 816,1898, 317,
+1899, 694,1900, 909, 734,1424, 572, 866,1425, 691, 85, 524,1010, 543, 394, 841,
+1901,1902,1903,1026,1904,1905,1906,1907,1908,1909, 30, 451, 651, 988, 310,1910,
+1911,1426, 810,1216, 93,1912,1913,1277,1217,1914, 858, 759, 45, 58, 181, 610,
+ 269,1915,1916, 131,1062, 551, 443,1000, 821,1427, 957, 895,1086,1917,1918, 375,
+1919, 359,1920, 687,1921, 822,1922, 293,1923,1924, 40, 662, 118, 692, 29, 939,
+ 887, 640, 482, 174,1925, 69,1162, 728,1428, 910,1926,1278,1218,1279, 386, 870,
+ 217, 854,1163, 823,1927,1928,1929,1930, 834,1931, 78,1932, 859,1933,1063,1934,
+1935,1936,1937, 438,1164, 208, 595,1938,1939,1940,1941,1219,1125,1942, 280, 888,
+1429,1430,1220,1431,1943,1944,1945,1946,1947,1280, 150, 510,1432,1948,1949,1950,
+1951,1952,1953,1954,1011,1087,1955,1433,1043,1956, 881,1957, 614, 958,1064,1065,
+1221,1958, 638,1001, 860, 967, 896,1434, 989, 492, 553,1281,1165,1959,1282,1002,
+1283,1222,1960,1961,1962,1963, 36, 383, 228, 753, 247, 454,1964, 876, 678,1965,
+1966,1284, 126, 464, 490, 835, 136, 672, 529, 940,1088,1435, 473,1967,1968, 467,
+ 50, 390, 227, 587, 279, 378, 598, 792, 968, 240, 151, 160, 849, 882,1126,1285,
+ 639,1044, 133, 140, 288, 360, 811, 563,1027, 561, 142, 523,1969,1970,1971, 7,
+ 103, 296, 439, 407, 506, 634, 990,1972,1973,1974,1975, 645,1976,1977,1978,1979,
+1980,1981, 236,1982,1436,1983,1984,1089, 192, 828, 618, 518,1166, 333,1127,1985,
+ 818,1223,1986,1987,1988,1989,1990,1991,1992,1993, 342,1128,1286, 746, 842,1994,
+1995, 560, 223,1287, 98, 8, 189, 650, 978,1288,1996,1437,1997, 17, 345, 250,
+ 423, 277, 234, 512, 226, 97, 289, 42, 167,1998, 201,1999,2000, 843, 836, 824,
+ 532, 338, 783,1090, 182, 576, 436,1438,1439, 527, 500,2001, 947, 889,2002,2003,
+2004,2005, 262, 600, 314, 447,2006, 547,2007, 693, 738,1129,2008, 71,1440, 745,
+ 619, 688,2009, 829,2010,2011, 147,2012, 33, 948,2013,2014, 74, 224,2015, 61,
+ 191, 918, 399, 637,2016,1028,1130, 257, 902,2017,2018,2019,2020,2021,2022,2023,
+2024,2025,2026, 837,2027,2028,2029,2030, 179, 874, 591, 52, 724, 246,2031,2032,
+2033,2034,1167, 969,2035,1289, 630, 605, 911,1091,1168,2036,2037,2038,1441, 912,
+2039, 623,2040,2041, 253,1169,1290,2042,1442, 146, 620, 611, 577, 433,2043,1224,
+ 719,1170, 959, 440, 437, 534, 84, 388, 480,1131, 159, 220, 198, 679,2044,1012,
+ 819,1066,1443, 113,1225, 194, 318,1003,1029,2045,2046,2047,2048,1067,2049,2050,
+2051,2052,2053, 59, 913, 112,2054, 632,2055, 455, 144, 739,1291,2056, 273, 681,
+ 499,2057, 448,2058,2059, 760,2060,2061, 970, 384, 169, 245,1132,2062,2063, 414,
+1444,2064,2065, 41, 235,2066, 157, 252, 877, 568, 919, 789, 580,2067, 725,2068,
+2069,1292,2070,2071,1445,2072,1446,2073,2074, 55, 588, 66,1447, 271,1092,2075,
+1226,2076, 960,1013, 372,2077,2078,2079,2080,2081,1293,2082,2083,2084,2085, 850,
+2086,2087,2088,2089,2090, 186,2091,1068, 180,2092,2093,2094, 109,1227, 522, 606,
+2095, 867,1448,1093, 991,1171, 926, 353,1133,2096, 581,2097,2098,2099,1294,1449,
+1450,2100, 596,1172,1014,1228,2101,1451,1295,1173,1229,2102,2103,1296,1134,1452,
+ 949,1135,2104,2105,1094,1453,1454,1455,2106,1095,2107,2108,2109,2110,2111,2112,
+2113,2114,2115,2116,2117, 804,2118,2119,1230,1231, 805,1456, 405,1136,2120,2121,
+2122,2123,2124, 720, 701,1297, 992,1457, 927,1004,2125,2126,2127,2128,2129,2130,
+ 22, 417,2131, 303,2132, 385,2133, 971, 520, 513,2134,1174, 73,1096, 231, 274,
+ 962,1458, 673,2135,1459,2136, 152,1137,2137,2138,2139,2140,1005,1138,1460,1139,
+2141,2142,2143,2144, 11, 374, 844,2145, 154,1232, 46,1461,2146, 838, 830, 721,
+1233, 106,2147, 90, 428, 462, 578, 566,1175, 352,2148,2149, 538,1234, 124,1298,
+2150,1462, 761, 565,2151, 686,2152, 649,2153, 72, 173,2154, 460, 415,2155,1463,
+2156,1235, 305,2157,2158,2159,2160,2161,2162, 579,2163,2164,2165,2166,2167, 747,
+2168,2169,2170,2171,1464, 669,2172,2173,2174,2175,2176,1465,2177, 23, 530, 285,
+2178, 335, 729,2179, 397,2180,2181,2182,1030,2183,2184, 698,2185,2186, 325,2187,
+2188, 369,2189, 799,1097,1015, 348,2190,1069, 680,2191, 851,1466,2192,2193, 10,
+2194, 613, 424,2195, 979, 108, 449, 589, 27, 172, 81,1031, 80, 774, 281, 350,
+1032, 525, 301, 582,1176,2196, 674,1045,2197,2198,1467, 730, 762,2199,2200,2201,
+2202,1468,2203, 993,2204,2205, 266,1070, 963,1140,2206,2207,2208, 664,1098, 972,
+2209,2210,2211,1177,1469,1470, 871,2212,2213,2214,2215,2216,1471,2217,2218,2219,
+2220,2221,2222,2223,2224,2225,2226,2227,1472,1236,2228,2229,2230,2231,2232,2233,
+2234,2235,1299,2236,2237, 200,2238, 477, 373,2239,2240, 731, 825, 777,2241,2242,
+2243, 521, 486, 548,2244,2245,2246,1473,1300, 53, 549, 137, 875, 76, 158,2247,
+1301,1474, 469, 396,1016, 278, 712,2248, 321, 442, 503, 767, 744, 941,1237,1178,
+1475,2249, 82, 178,1141,1179, 973,2250,1302,2251, 297,2252,2253, 570,2254,2255,
+2256, 18, 450, 206,2257, 290, 292,1142,2258, 511, 162, 99, 346, 164, 735,2259,
+1476,1477, 4, 554, 343, 798,1099,2260,1100,2261, 43, 171,1303, 139, 215,2262,
+2263, 717, 775,2264,1033, 322, 216,2265, 831,2266, 149,2267,1304,2268,2269, 702,
+1238, 135, 845, 347, 309,2270, 484,2271, 878, 655, 238,1006,1478,2272, 67,2273,
+ 295,2274,2275, 461,2276, 478, 942, 412,2277,1034,2278,2279,2280, 265,2281, 541,
+2282,2283,2284,2285,2286, 70, 852,1071,2287,2288,2289,2290, 21, 56, 509, 117,
+ 432,2291,2292, 331, 980, 552,1101, 148, 284, 105, 393,1180,1239, 755,2293, 187,
+2294,1046,1479,2295, 340,2296, 63,1047, 230,2297,2298,1305, 763,1306, 101, 800,
+ 808, 494,2299,2300,2301, 903,2302, 37,1072, 14, 5,2303, 79, 675,2304, 312,
+2305,2306,2307,2308,2309,1480, 6,1307,2310,2311,2312, 1, 470, 35, 24, 229,
+2313, 695, 210, 86, 778, 15, 784, 592, 779, 32, 77, 855, 964,2314, 259,2315,
+ 501, 380,2316,2317, 83, 981, 153, 689,1308,1481,1482,1483,2318,2319, 716,1484,
+2320,2321,2322,2323,2324,2325,1485,2326,2327, 128, 57, 68, 261,1048, 211, 170,
+1240, 31,2328, 51, 435, 742,2329,2330,2331, 635,2332, 264, 456,2333,2334,2335,
+ 425,2336,1486, 143, 507, 263, 943,2337, 363, 920,1487, 256,1488,1102, 243, 601,
+1489,2338,2339,2340,2341,2342,2343,2344, 861,2345,2346,2347,2348,2349,2350, 395,
+2351,1490,1491, 62, 535, 166, 225,2352,2353, 668, 419,1241, 138, 604, 928,2354,
+1181,2355,1492,1493,2356,2357,2358,1143,2359, 696,2360, 387, 307,1309, 682, 476,
+2361,2362, 332, 12, 222, 156,2363, 232,2364, 641, 276, 656, 517,1494,1495,1035,
+ 416, 736,1496,2365,1017, 586,2366,2367,2368,1497,2369, 242,2370,2371,2372,1498,
+2373, 965, 713,2374,2375,2376,2377, 740, 982,1499, 944,1500,1007,2378,2379,1310,
+1501,2380,2381,2382, 785, 329,2383,2384,1502,2385,2386,2387, 932,2388,1503,2389,
+2390,2391,2392,1242,2393,2394,2395,2396,2397, 994, 950,2398,2399,2400,2401,1504,
+1311,2402,2403,2404,2405,1049, 749,2406,2407, 853, 718,1144,1312,2408,1182,1505,
+2409,2410, 255, 516, 479, 564, 550, 214,1506,1507,1313, 413, 239, 444, 339,1145,
+1036,1508,1509,1314,1037,1510,1315,2411,1511,2412,2413,2414, 176, 703, 497, 624,
+ 593, 921, 302,2415, 341, 165,1103,1512,2416,1513,2417,2418,2419, 376,2420, 700,
+2421,2422,2423, 258, 768,1316,2424,1183,2425, 995, 608,2426,2427,2428,2429, 221,
+2430,2431,2432,2433,2434,2435,2436,2437, 195, 323, 726, 188, 897, 983,1317, 377,
+ 644,1050, 879,2438, 452,2439,2440,2441,2442,2443,2444, 914,2445,2446,2447,2448,
+ 915, 489,2449,1514,1184,2450,2451, 515, 64, 427, 495,2452, 583,2453, 483, 485,
+1038, 562, 213,1515, 748, 666,2454,2455,2456,2457, 334,2458, 780, 996,1008, 705,
+1243,2459,2460,2461,2462,2463, 114,2464, 493,1146, 366, 163,1516, 961,1104,2465,
+ 291,2466,1318,1105,2467,1517, 365,2468, 355, 951,1244,2469,1319,2470, 631,2471,
+2472, 218,1320, 364, 320, 756,1518,1519,1321,1520,1322,2473,2474,2475,2476, 997,
+2477,2478,2479,2480, 665,1185,2481, 916,1521,2482,2483,2484, 584, 684,2485,2486,
+ 797,2487,1051,1186,2488,2489,2490,1522,2491,2492, 370,2493,1039,1187, 65,2494,
+ 434, 205, 463,1188,2495, 125, 812, 391, 402, 826, 699, 286, 398, 155, 781, 771,
+ 585,2496, 590, 505,1073,2497, 599, 244, 219, 917,1018, 952, 646,1523,2498,1323,
+2499,2500, 49, 984, 354, 741,2501, 625,2502,1324,2503,1019, 190, 357, 757, 491,
+ 95, 782, 868,2504,2505,2506,2507,2508,2509, 134,1524,1074, 422,1525, 898,2510,
+ 161,2511,2512,2513,2514, 769,2515,1526,2516,2517, 411,1325,2518, 472,1527,2519,
+2520,2521,2522,2523,2524, 985,2525,2526,2527,2528,2529,2530, 764,2531,1245,2532,
+2533, 25, 204, 311,2534, 496,2535,1052,2536,2537,2538,2539,2540,2541,2542, 199,
+ 704, 504, 468, 758, 657,1528, 196, 44, 839,1246, 272, 750,2543, 765, 862,2544,
+2545,1326,2546, 132, 615, 933,2547, 732,2548,2549,2550,1189,1529,2551, 283,1247,
+1053, 607, 929,2552,2553,2554, 930, 183, 872, 616,1040,1147,2555,1148,1020, 441,
+ 249,1075,2556,2557,2558, 466, 743,2559,2560,2561, 92, 514, 426, 420, 526,2562,
+2563,2564,2565,2566,2567,2568, 185,2569,2570,2571,2572, 776,1530, 658,2573, 362,
+2574, 361, 922,1076, 793,2575,2576,2577,2578,2579,2580,1531, 251,2581,2582,2583,
+2584,1532, 54, 612, 237,1327,2585,2586, 275, 408, 647, 111,2587,1533,1106, 465,
+ 3, 458, 9, 38,2588, 107, 110, 890, 209, 26, 737, 498,2589,1534,2590, 431,
+ 202, 88,1535, 356, 287,1107, 660,1149,2591, 381,1536, 986,1150, 445,1248,1151,
+ 974,2592,2593, 846,2594, 446, 953, 184,1249,1250, 727,2595, 923, 193, 883,2596,
+2597,2598, 102, 324, 539, 817,2599, 421,1041,2600, 832,2601, 94, 175, 197, 406,
+2602, 459,2603,2604,2605,2606,2607, 330, 555,2608,2609,2610, 706,1108, 389,2611,
+2612,2613,2614, 233,2615, 833, 558, 931, 954,1251,2616,2617,1537, 546,2618,2619,
+1009,2620,2621,2622,1538, 690,1328,2623, 955,2624,1539,2625,2626, 772,2627,2628,
+2629,2630,2631, 924, 648, 863, 603,2632,2633, 934,1540, 864, 865,2634, 642,1042,
+ 670,1190,2635,2636,2637,2638, 168,2639, 652, 873, 542,1054,1541,2640,2641,2642, # 512, 256
+)
+# fmt: on
diff --git a/third_party/python/pip/pip/_vendor/chardet/euckrprober.py b/third_party/python/pip/pip/_vendor/chardet/euckrprober.py
new file mode 100644
index 0000000000..1fc5de0462
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/euckrprober.py
@@ -0,0 +1,47 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .chardistribution import EUCKRDistributionAnalysis
+from .codingstatemachine import CodingStateMachine
+from .mbcharsetprober import MultiByteCharSetProber
+from .mbcssm import EUCKR_SM_MODEL
+
+
+class EUCKRProber(MultiByteCharSetProber):
+ def __init__(self) -> None:
+ super().__init__()
+ self.coding_sm = CodingStateMachine(EUCKR_SM_MODEL)
+ self.distribution_analyzer = EUCKRDistributionAnalysis()
+ self.reset()
+
+ @property
+ def charset_name(self) -> str:
+ return "EUC-KR"
+
+ @property
+ def language(self) -> str:
+ return "Korean"
diff --git a/third_party/python/pip/pip/_vendor/chardet/euctwfreq.py b/third_party/python/pip/pip/_vendor/chardet/euctwfreq.py
new file mode 100644
index 0000000000..4900ccc160
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/euctwfreq.py
@@ -0,0 +1,388 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# EUCTW frequency table
+# Converted from big5 work
+# by Taiwan's Mandarin Promotion Council
+# <http:#www.edu.tw:81/mandr/>
+
+# 128 --> 0.42261
+# 256 --> 0.57851
+# 512 --> 0.74851
+# 1024 --> 0.89384
+# 2048 --> 0.97583
+#
+# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
+# Random Distribution Ration = 512/(5401-512)=0.105
+#
+# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
+
+EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
+
+# Char to FreqOrder table
+EUCTW_TABLE_SIZE = 5376
+
+# fmt: off
+EUCTW_CHAR_TO_FREQ_ORDER = (
+ 1, 1800, 1506, 255, 1431, 198, 9, 82, 6, 7310, 177, 202, 3615, 1256, 2808, 110, # 2742
+ 3735, 33, 3241, 261, 76, 44, 2113, 16, 2931, 2184, 1176, 659, 3868, 26, 3404, 2643, # 2758
+ 1198, 3869, 3313, 4060, 410, 2211, 302, 590, 361, 1963, 8, 204, 58, 4296, 7311, 1931, # 2774
+ 63, 7312, 7313, 317, 1614, 75, 222, 159, 4061, 2412, 1480, 7314, 3500, 3068, 224, 2809, # 2790
+ 3616, 3, 10, 3870, 1471, 29, 2774, 1135, 2852, 1939, 873, 130, 3242, 1123, 312, 7315, # 2806
+ 4297, 2051, 507, 252, 682, 7316, 142, 1914, 124, 206, 2932, 34, 3501, 3173, 64, 604, # 2822
+ 7317, 2494, 1976, 1977, 155, 1990, 645, 641, 1606, 7318, 3405, 337, 72, 406, 7319, 80, # 2838
+ 630, 238, 3174, 1509, 263, 939, 1092, 2644, 756, 1440, 1094, 3406, 449, 69, 2969, 591, # 2854
+ 179, 2095, 471, 115, 2034, 1843, 60, 50, 2970, 134, 806, 1868, 734, 2035, 3407, 180, # 2870
+ 995, 1607, 156, 537, 2893, 688, 7320, 319, 1305, 779, 2144, 514, 2374, 298, 4298, 359, # 2886
+ 2495, 90, 2707, 1338, 663, 11, 906, 1099, 2545, 20, 2436, 182, 532, 1716, 7321, 732, # 2902
+ 1376, 4062, 1311, 1420, 3175, 25, 2312, 1056, 113, 399, 382, 1949, 242, 3408, 2467, 529, # 2918
+ 3243, 475, 1447, 3617, 7322, 117, 21, 656, 810, 1297, 2295, 2329, 3502, 7323, 126, 4063, # 2934
+ 706, 456, 150, 613, 4299, 71, 1118, 2036, 4064, 145, 3069, 85, 835, 486, 2114, 1246, # 2950
+ 1426, 428, 727, 1285, 1015, 800, 106, 623, 303, 1281, 7324, 2127, 2354, 347, 3736, 221, # 2966
+ 3503, 3110, 7325, 1955, 1153, 4065, 83, 296, 1199, 3070, 192, 624, 93, 7326, 822, 1897, # 2982
+ 2810, 3111, 795, 2064, 991, 1554, 1542, 1592, 27, 43, 2853, 859, 139, 1456, 860, 4300, # 2998
+ 437, 712, 3871, 164, 2392, 3112, 695, 211, 3017, 2096, 195, 3872, 1608, 3504, 3505, 3618, # 3014
+ 3873, 234, 811, 2971, 2097, 3874, 2229, 1441, 3506, 1615, 2375, 668, 2076, 1638, 305, 228, # 3030
+ 1664, 4301, 467, 415, 7327, 262, 2098, 1593, 239, 108, 300, 200, 1033, 512, 1247, 2077, # 3046
+ 7328, 7329, 2173, 3176, 3619, 2673, 593, 845, 1062, 3244, 88, 1723, 2037, 3875, 1950, 212, # 3062
+ 266, 152, 149, 468, 1898, 4066, 4302, 77, 187, 7330, 3018, 37, 5, 2972, 7331, 3876, # 3078
+ 7332, 7333, 39, 2517, 4303, 2894, 3177, 2078, 55, 148, 74, 4304, 545, 483, 1474, 1029, # 3094
+ 1665, 217, 1869, 1531, 3113, 1104, 2645, 4067, 24, 172, 3507, 900, 3877, 3508, 3509, 4305, # 3110
+ 32, 1408, 2811, 1312, 329, 487, 2355, 2247, 2708, 784, 2674, 4, 3019, 3314, 1427, 1788, # 3126
+ 188, 109, 499, 7334, 3620, 1717, 1789, 888, 1217, 3020, 4306, 7335, 3510, 7336, 3315, 1520, # 3142
+ 3621, 3878, 196, 1034, 775, 7337, 7338, 929, 1815, 249, 439, 38, 7339, 1063, 7340, 794, # 3158
+ 3879, 1435, 2296, 46, 178, 3245, 2065, 7341, 2376, 7342, 214, 1709, 4307, 804, 35, 707, # 3174
+ 324, 3622, 1601, 2546, 140, 459, 4068, 7343, 7344, 1365, 839, 272, 978, 2257, 2572, 3409, # 3190
+ 2128, 1363, 3623, 1423, 697, 100, 3071, 48, 70, 1231, 495, 3114, 2193, 7345, 1294, 7346, # 3206
+ 2079, 462, 586, 1042, 3246, 853, 256, 988, 185, 2377, 3410, 1698, 434, 1084, 7347, 3411, # 3222
+ 314, 2615, 2775, 4308, 2330, 2331, 569, 2280, 637, 1816, 2518, 757, 1162, 1878, 1616, 3412, # 3238
+ 287, 1577, 2115, 768, 4309, 1671, 2854, 3511, 2519, 1321, 3737, 909, 2413, 7348, 4069, 933, # 3254
+ 3738, 7349, 2052, 2356, 1222, 4310, 765, 2414, 1322, 786, 4311, 7350, 1919, 1462, 1677, 2895, # 3270
+ 1699, 7351, 4312, 1424, 2437, 3115, 3624, 2590, 3316, 1774, 1940, 3413, 3880, 4070, 309, 1369, # 3286
+ 1130, 2812, 364, 2230, 1653, 1299, 3881, 3512, 3882, 3883, 2646, 525, 1085, 3021, 902, 2000, # 3302
+ 1475, 964, 4313, 421, 1844, 1415, 1057, 2281, 940, 1364, 3116, 376, 4314, 4315, 1381, 7, # 3318
+ 2520, 983, 2378, 336, 1710, 2675, 1845, 321, 3414, 559, 1131, 3022, 2742, 1808, 1132, 1313, # 3334
+ 265, 1481, 1857, 7352, 352, 1203, 2813, 3247, 167, 1089, 420, 2814, 776, 792, 1724, 3513, # 3350
+ 4071, 2438, 3248, 7353, 4072, 7354, 446, 229, 333, 2743, 901, 3739, 1200, 1557, 4316, 2647, # 3366
+ 1920, 395, 2744, 2676, 3740, 4073, 1835, 125, 916, 3178, 2616, 4317, 7355, 7356, 3741, 7357, # 3382
+ 7358, 7359, 4318, 3117, 3625, 1133, 2547, 1757, 3415, 1510, 2313, 1409, 3514, 7360, 2145, 438, # 3398
+ 2591, 2896, 2379, 3317, 1068, 958, 3023, 461, 311, 2855, 2677, 4074, 1915, 3179, 4075, 1978, # 3414
+ 383, 750, 2745, 2617, 4076, 274, 539, 385, 1278, 1442, 7361, 1154, 1964, 384, 561, 210, # 3430
+ 98, 1295, 2548, 3515, 7362, 1711, 2415, 1482, 3416, 3884, 2897, 1257, 129, 7363, 3742, 642, # 3446
+ 523, 2776, 2777, 2648, 7364, 141, 2231, 1333, 68, 176, 441, 876, 907, 4077, 603, 2592, # 3462
+ 710, 171, 3417, 404, 549, 18, 3118, 2393, 1410, 3626, 1666, 7365, 3516, 4319, 2898, 4320, # 3478
+ 7366, 2973, 368, 7367, 146, 366, 99, 871, 3627, 1543, 748, 807, 1586, 1185, 22, 2258, # 3494
+ 379, 3743, 3180, 7368, 3181, 505, 1941, 2618, 1991, 1382, 2314, 7369, 380, 2357, 218, 702, # 3510
+ 1817, 1248, 3418, 3024, 3517, 3318, 3249, 7370, 2974, 3628, 930, 3250, 3744, 7371, 59, 7372, # 3526
+ 585, 601, 4078, 497, 3419, 1112, 1314, 4321, 1801, 7373, 1223, 1472, 2174, 7374, 749, 1836, # 3542
+ 690, 1899, 3745, 1772, 3885, 1476, 429, 1043, 1790, 2232, 2116, 917, 4079, 447, 1086, 1629, # 3558
+ 7375, 556, 7376, 7377, 2020, 1654, 844, 1090, 105, 550, 966, 1758, 2815, 1008, 1782, 686, # 3574
+ 1095, 7378, 2282, 793, 1602, 7379, 3518, 2593, 4322, 4080, 2933, 2297, 4323, 3746, 980, 2496, # 3590
+ 544, 353, 527, 4324, 908, 2678, 2899, 7380, 381, 2619, 1942, 1348, 7381, 1341, 1252, 560, # 3606
+ 3072, 7382, 3420, 2856, 7383, 2053, 973, 886, 2080, 143, 4325, 7384, 7385, 157, 3886, 496, # 3622
+ 4081, 57, 840, 540, 2038, 4326, 4327, 3421, 2117, 1445, 970, 2259, 1748, 1965, 2081, 4082, # 3638
+ 3119, 1234, 1775, 3251, 2816, 3629, 773, 1206, 2129, 1066, 2039, 1326, 3887, 1738, 1725, 4083, # 3654
+ 279, 3120, 51, 1544, 2594, 423, 1578, 2130, 2066, 173, 4328, 1879, 7386, 7387, 1583, 264, # 3670
+ 610, 3630, 4329, 2439, 280, 154, 7388, 7389, 7390, 1739, 338, 1282, 3073, 693, 2857, 1411, # 3686
+ 1074, 3747, 2440, 7391, 4330, 7392, 7393, 1240, 952, 2394, 7394, 2900, 1538, 2679, 685, 1483, # 3702
+ 4084, 2468, 1436, 953, 4085, 2054, 4331, 671, 2395, 79, 4086, 2441, 3252, 608, 567, 2680, # 3718
+ 3422, 4087, 4088, 1691, 393, 1261, 1791, 2396, 7395, 4332, 7396, 7397, 7398, 7399, 1383, 1672, # 3734
+ 3748, 3182, 1464, 522, 1119, 661, 1150, 216, 675, 4333, 3888, 1432, 3519, 609, 4334, 2681, # 3750
+ 2397, 7400, 7401, 7402, 4089, 3025, 0, 7403, 2469, 315, 231, 2442, 301, 3319, 4335, 2380, # 3766
+ 7404, 233, 4090, 3631, 1818, 4336, 4337, 7405, 96, 1776, 1315, 2082, 7406, 257, 7407, 1809, # 3782
+ 3632, 2709, 1139, 1819, 4091, 2021, 1124, 2163, 2778, 1777, 2649, 7408, 3074, 363, 1655, 3183, # 3798
+ 7409, 2975, 7410, 7411, 7412, 3889, 1567, 3890, 718, 103, 3184, 849, 1443, 341, 3320, 2934, # 3814
+ 1484, 7413, 1712, 127, 67, 339, 4092, 2398, 679, 1412, 821, 7414, 7415, 834, 738, 351, # 3830
+ 2976, 2146, 846, 235, 1497, 1880, 418, 1992, 3749, 2710, 186, 1100, 2147, 2746, 3520, 1545, # 3846
+ 1355, 2935, 2858, 1377, 583, 3891, 4093, 2573, 2977, 7416, 1298, 3633, 1078, 2549, 3634, 2358, # 3862
+ 78, 3750, 3751, 267, 1289, 2099, 2001, 1594, 4094, 348, 369, 1274, 2194, 2175, 1837, 4338, # 3878
+ 1820, 2817, 3635, 2747, 2283, 2002, 4339, 2936, 2748, 144, 3321, 882, 4340, 3892, 2749, 3423, # 3894
+ 4341, 2901, 7417, 4095, 1726, 320, 7418, 3893, 3026, 788, 2978, 7419, 2818, 1773, 1327, 2859, # 3910
+ 3894, 2819, 7420, 1306, 4342, 2003, 1700, 3752, 3521, 2359, 2650, 787, 2022, 506, 824, 3636, # 3926
+ 534, 323, 4343, 1044, 3322, 2023, 1900, 946, 3424, 7421, 1778, 1500, 1678, 7422, 1881, 4344, # 3942
+ 165, 243, 4345, 3637, 2521, 123, 683, 4096, 764, 4346, 36, 3895, 1792, 589, 2902, 816, # 3958
+ 626, 1667, 3027, 2233, 1639, 1555, 1622, 3753, 3896, 7423, 3897, 2860, 1370, 1228, 1932, 891, # 3974
+ 2083, 2903, 304, 4097, 7424, 292, 2979, 2711, 3522, 691, 2100, 4098, 1115, 4347, 118, 662, # 3990
+ 7425, 611, 1156, 854, 2381, 1316, 2861, 2, 386, 515, 2904, 7426, 7427, 3253, 868, 2234, # 4006
+ 1486, 855, 2651, 785, 2212, 3028, 7428, 1040, 3185, 3523, 7429, 3121, 448, 7430, 1525, 7431, # 4022
+ 2164, 4348, 7432, 3754, 7433, 4099, 2820, 3524, 3122, 503, 818, 3898, 3123, 1568, 814, 676, # 4038
+ 1444, 306, 1749, 7434, 3755, 1416, 1030, 197, 1428, 805, 2821, 1501, 4349, 7435, 7436, 7437, # 4054
+ 1993, 7438, 4350, 7439, 7440, 2195, 13, 2779, 3638, 2980, 3124, 1229, 1916, 7441, 3756, 2131, # 4070
+ 7442, 4100, 4351, 2399, 3525, 7443, 2213, 1511, 1727, 1120, 7444, 7445, 646, 3757, 2443, 307, # 4086
+ 7446, 7447, 1595, 3186, 7448, 7449, 7450, 3639, 1113, 1356, 3899, 1465, 2522, 2523, 7451, 519, # 4102
+ 7452, 128, 2132, 92, 2284, 1979, 7453, 3900, 1512, 342, 3125, 2196, 7454, 2780, 2214, 1980, # 4118
+ 3323, 7455, 290, 1656, 1317, 789, 827, 2360, 7456, 3758, 4352, 562, 581, 3901, 7457, 401, # 4134
+ 4353, 2248, 94, 4354, 1399, 2781, 7458, 1463, 2024, 4355, 3187, 1943, 7459, 828, 1105, 4101, # 4150
+ 1262, 1394, 7460, 4102, 605, 4356, 7461, 1783, 2862, 7462, 2822, 819, 2101, 578, 2197, 2937, # 4166
+ 7463, 1502, 436, 3254, 4103, 3255, 2823, 3902, 2905, 3425, 3426, 7464, 2712, 2315, 7465, 7466, # 4182
+ 2332, 2067, 23, 4357, 193, 826, 3759, 2102, 699, 1630, 4104, 3075, 390, 1793, 1064, 3526, # 4198
+ 7467, 1579, 3076, 3077, 1400, 7468, 4105, 1838, 1640, 2863, 7469, 4358, 4359, 137, 4106, 598, # 4214
+ 3078, 1966, 780, 104, 974, 2938, 7470, 278, 899, 253, 402, 572, 504, 493, 1339, 7471, # 4230
+ 3903, 1275, 4360, 2574, 2550, 7472, 3640, 3029, 3079, 2249, 565, 1334, 2713, 863, 41, 7473, # 4246
+ 7474, 4361, 7475, 1657, 2333, 19, 463, 2750, 4107, 606, 7476, 2981, 3256, 1087, 2084, 1323, # 4262
+ 2652, 2982, 7477, 1631, 1623, 1750, 4108, 2682, 7478, 2864, 791, 2714, 2653, 2334, 232, 2416, # 4278
+ 7479, 2983, 1498, 7480, 2654, 2620, 755, 1366, 3641, 3257, 3126, 2025, 1609, 119, 1917, 3427, # 4294
+ 862, 1026, 4109, 7481, 3904, 3760, 4362, 3905, 4363, 2260, 1951, 2470, 7482, 1125, 817, 4110, # 4310
+ 4111, 3906, 1513, 1766, 2040, 1487, 4112, 3030, 3258, 2824, 3761, 3127, 7483, 7484, 1507, 7485, # 4326
+ 2683, 733, 40, 1632, 1106, 2865, 345, 4113, 841, 2524, 230, 4364, 2984, 1846, 3259, 3428, # 4342
+ 7486, 1263, 986, 3429, 7487, 735, 879, 254, 1137, 857, 622, 1300, 1180, 1388, 1562, 3907, # 4358
+ 3908, 2939, 967, 2751, 2655, 1349, 592, 2133, 1692, 3324, 2985, 1994, 4114, 1679, 3909, 1901, # 4374
+ 2185, 7488, 739, 3642, 2715, 1296, 1290, 7489, 4115, 2198, 2199, 1921, 1563, 2595, 2551, 1870, # 4390
+ 2752, 2986, 7490, 435, 7491, 343, 1108, 596, 17, 1751, 4365, 2235, 3430, 3643, 7492, 4366, # 4406
+ 294, 3527, 2940, 1693, 477, 979, 281, 2041, 3528, 643, 2042, 3644, 2621, 2782, 2261, 1031, # 4422
+ 2335, 2134, 2298, 3529, 4367, 367, 1249, 2552, 7493, 3530, 7494, 4368, 1283, 3325, 2004, 240, # 4438
+ 1762, 3326, 4369, 4370, 836, 1069, 3128, 474, 7495, 2148, 2525, 268, 3531, 7496, 3188, 1521, # 4454
+ 1284, 7497, 1658, 1546, 4116, 7498, 3532, 3533, 7499, 4117, 3327, 2684, 1685, 4118, 961, 1673, # 4470
+ 2622, 190, 2005, 2200, 3762, 4371, 4372, 7500, 570, 2497, 3645, 1490, 7501, 4373, 2623, 3260, # 4486
+ 1956, 4374, 584, 1514, 396, 1045, 1944, 7502, 4375, 1967, 2444, 7503, 7504, 4376, 3910, 619, # 4502
+ 7505, 3129, 3261, 215, 2006, 2783, 2553, 3189, 4377, 3190, 4378, 763, 4119, 3763, 4379, 7506, # 4518
+ 7507, 1957, 1767, 2941, 3328, 3646, 1174, 452, 1477, 4380, 3329, 3130, 7508, 2825, 1253, 2382, # 4534
+ 2186, 1091, 2285, 4120, 492, 7509, 638, 1169, 1824, 2135, 1752, 3911, 648, 926, 1021, 1324, # 4550
+ 4381, 520, 4382, 997, 847, 1007, 892, 4383, 3764, 2262, 1871, 3647, 7510, 2400, 1784, 4384, # 4566
+ 1952, 2942, 3080, 3191, 1728, 4121, 2043, 3648, 4385, 2007, 1701, 3131, 1551, 30, 2263, 4122, # 4582
+ 7511, 2026, 4386, 3534, 7512, 501, 7513, 4123, 594, 3431, 2165, 1821, 3535, 3432, 3536, 3192, # 4598
+ 829, 2826, 4124, 7514, 1680, 3132, 1225, 4125, 7515, 3262, 4387, 4126, 3133, 2336, 7516, 4388, # 4614
+ 4127, 7517, 3912, 3913, 7518, 1847, 2383, 2596, 3330, 7519, 4389, 374, 3914, 652, 4128, 4129, # 4630
+ 375, 1140, 798, 7520, 7521, 7522, 2361, 4390, 2264, 546, 1659, 138, 3031, 2445, 4391, 7523, # 4646
+ 2250, 612, 1848, 910, 796, 3765, 1740, 1371, 825, 3766, 3767, 7524, 2906, 2554, 7525, 692, # 4662
+ 444, 3032, 2624, 801, 4392, 4130, 7526, 1491, 244, 1053, 3033, 4131, 4132, 340, 7527, 3915, # 4678
+ 1041, 2987, 293, 1168, 87, 1357, 7528, 1539, 959, 7529, 2236, 721, 694, 4133, 3768, 219, # 4694
+ 1478, 644, 1417, 3331, 2656, 1413, 1401, 1335, 1389, 3916, 7530, 7531, 2988, 2362, 3134, 1825, # 4710
+ 730, 1515, 184, 2827, 66, 4393, 7532, 1660, 2943, 246, 3332, 378, 1457, 226, 3433, 975, # 4726
+ 3917, 2944, 1264, 3537, 674, 696, 7533, 163, 7534, 1141, 2417, 2166, 713, 3538, 3333, 4394, # 4742
+ 3918, 7535, 7536, 1186, 15, 7537, 1079, 1070, 7538, 1522, 3193, 3539, 276, 1050, 2716, 758, # 4758
+ 1126, 653, 2945, 3263, 7539, 2337, 889, 3540, 3919, 3081, 2989, 903, 1250, 4395, 3920, 3434, # 4774
+ 3541, 1342, 1681, 1718, 766, 3264, 286, 89, 2946, 3649, 7540, 1713, 7541, 2597, 3334, 2990, # 4790
+ 7542, 2947, 2215, 3194, 2866, 7543, 4396, 2498, 2526, 181, 387, 1075, 3921, 731, 2187, 3335, # 4806
+ 7544, 3265, 310, 313, 3435, 2299, 770, 4134, 54, 3034, 189, 4397, 3082, 3769, 3922, 7545, # 4822
+ 1230, 1617, 1849, 355, 3542, 4135, 4398, 3336, 111, 4136, 3650, 1350, 3135, 3436, 3035, 4137, # 4838
+ 2149, 3266, 3543, 7546, 2784, 3923, 3924, 2991, 722, 2008, 7547, 1071, 247, 1207, 2338, 2471, # 4854
+ 1378, 4399, 2009, 864, 1437, 1214, 4400, 373, 3770, 1142, 2216, 667, 4401, 442, 2753, 2555, # 4870
+ 3771, 3925, 1968, 4138, 3267, 1839, 837, 170, 1107, 934, 1336, 1882, 7548, 7549, 2118, 4139, # 4886
+ 2828, 743, 1569, 7550, 4402, 4140, 582, 2384, 1418, 3437, 7551, 1802, 7552, 357, 1395, 1729, # 4902
+ 3651, 3268, 2418, 1564, 2237, 7553, 3083, 3772, 1633, 4403, 1114, 2085, 4141, 1532, 7554, 482, # 4918
+ 2446, 4404, 7555, 7556, 1492, 833, 1466, 7557, 2717, 3544, 1641, 2829, 7558, 1526, 1272, 3652, # 4934
+ 4142, 1686, 1794, 416, 2556, 1902, 1953, 1803, 7559, 3773, 2785, 3774, 1159, 2316, 7560, 2867, # 4950
+ 4405, 1610, 1584, 3036, 2419, 2754, 443, 3269, 1163, 3136, 7561, 7562, 3926, 7563, 4143, 2499, # 4966
+ 3037, 4406, 3927, 3137, 2103, 1647, 3545, 2010, 1872, 4144, 7564, 4145, 431, 3438, 7565, 250, # 4982
+ 97, 81, 4146, 7566, 1648, 1850, 1558, 160, 848, 7567, 866, 740, 1694, 7568, 2201, 2830, # 4998
+ 3195, 4147, 4407, 3653, 1687, 950, 2472, 426, 469, 3196, 3654, 3655, 3928, 7569, 7570, 1188, # 5014
+ 424, 1995, 861, 3546, 4148, 3775, 2202, 2685, 168, 1235, 3547, 4149, 7571, 2086, 1674, 4408, # 5030
+ 3337, 3270, 220, 2557, 1009, 7572, 3776, 670, 2992, 332, 1208, 717, 7573, 7574, 3548, 2447, # 5046
+ 3929, 3338, 7575, 513, 7576, 1209, 2868, 3339, 3138, 4409, 1080, 7577, 7578, 7579, 7580, 2527, # 5062
+ 3656, 3549, 815, 1587, 3930, 3931, 7581, 3550, 3439, 3777, 1254, 4410, 1328, 3038, 1390, 3932, # 5078
+ 1741, 3933, 3778, 3934, 7582, 236, 3779, 2448, 3271, 7583, 7584, 3657, 3780, 1273, 3781, 4411, # 5094
+ 7585, 308, 7586, 4412, 245, 4413, 1851, 2473, 1307, 2575, 430, 715, 2136, 2449, 7587, 270, # 5110
+ 199, 2869, 3935, 7588, 3551, 2718, 1753, 761, 1754, 725, 1661, 1840, 4414, 3440, 3658, 7589, # 5126
+ 7590, 587, 14, 3272, 227, 2598, 326, 480, 2265, 943, 2755, 3552, 291, 650, 1883, 7591, # 5142
+ 1702, 1226, 102, 1547, 62, 3441, 904, 4415, 3442, 1164, 4150, 7592, 7593, 1224, 1548, 2756, # 5158
+ 391, 498, 1493, 7594, 1386, 1419, 7595, 2055, 1177, 4416, 813, 880, 1081, 2363, 566, 1145, # 5174
+ 4417, 2286, 1001, 1035, 2558, 2599, 2238, 394, 1286, 7596, 7597, 2068, 7598, 86, 1494, 1730, # 5190
+ 3936, 491, 1588, 745, 897, 2948, 843, 3340, 3937, 2757, 2870, 3273, 1768, 998, 2217, 2069, # 5206
+ 397, 1826, 1195, 1969, 3659, 2993, 3341, 284, 7599, 3782, 2500, 2137, 2119, 1903, 7600, 3938, # 5222
+ 2150, 3939, 4151, 1036, 3443, 1904, 114, 2559, 4152, 209, 1527, 7601, 7602, 2949, 2831, 2625, # 5238
+ 2385, 2719, 3139, 812, 2560, 7603, 3274, 7604, 1559, 737, 1884, 3660, 1210, 885, 28, 2686, # 5254
+ 3553, 3783, 7605, 4153, 1004, 1779, 4418, 7606, 346, 1981, 2218, 2687, 4419, 3784, 1742, 797, # 5270
+ 1642, 3940, 1933, 1072, 1384, 2151, 896, 3941, 3275, 3661, 3197, 2871, 3554, 7607, 2561, 1958, # 5286
+ 4420, 2450, 1785, 7608, 7609, 7610, 3942, 4154, 1005, 1308, 3662, 4155, 2720, 4421, 4422, 1528, # 5302
+ 2600, 161, 1178, 4156, 1982, 987, 4423, 1101, 4157, 631, 3943, 1157, 3198, 2420, 1343, 1241, # 5318
+ 1016, 2239, 2562, 372, 877, 2339, 2501, 1160, 555, 1934, 911, 3944, 7611, 466, 1170, 169, # 5334
+ 1051, 2907, 2688, 3663, 2474, 2994, 1182, 2011, 2563, 1251, 2626, 7612, 992, 2340, 3444, 1540, # 5350
+ 2721, 1201, 2070, 2401, 1996, 2475, 7613, 4424, 528, 1922, 2188, 1503, 1873, 1570, 2364, 3342, # 5366
+ 3276, 7614, 557, 1073, 7615, 1827, 3445, 2087, 2266, 3140, 3039, 3084, 767, 3085, 2786, 4425, # 5382
+ 1006, 4158, 4426, 2341, 1267, 2176, 3664, 3199, 778, 3945, 3200, 2722, 1597, 2657, 7616, 4427, # 5398
+ 7617, 3446, 7618, 7619, 7620, 3277, 2689, 1433, 3278, 131, 95, 1504, 3946, 723, 4159, 3141, # 5414
+ 1841, 3555, 2758, 2189, 3947, 2027, 2104, 3665, 7621, 2995, 3948, 1218, 7622, 3343, 3201, 3949, # 5430
+ 4160, 2576, 248, 1634, 3785, 912, 7623, 2832, 3666, 3040, 3786, 654, 53, 7624, 2996, 7625, # 5446
+ 1688, 4428, 777, 3447, 1032, 3950, 1425, 7626, 191, 820, 2120, 2833, 971, 4429, 931, 3202, # 5462
+ 135, 664, 783, 3787, 1997, 772, 2908, 1935, 3951, 3788, 4430, 2909, 3203, 282, 2723, 640, # 5478
+ 1372, 3448, 1127, 922, 325, 3344, 7627, 7628, 711, 2044, 7629, 7630, 3952, 2219, 2787, 1936, # 5494
+ 3953, 3345, 2220, 2251, 3789, 2300, 7631, 4431, 3790, 1258, 3279, 3954, 3204, 2138, 2950, 3955, # 5510
+ 3956, 7632, 2221, 258, 3205, 4432, 101, 1227, 7633, 3280, 1755, 7634, 1391, 3281, 7635, 2910, # 5526
+ 2056, 893, 7636, 7637, 7638, 1402, 4161, 2342, 7639, 7640, 3206, 3556, 7641, 7642, 878, 1325, # 5542
+ 1780, 2788, 4433, 259, 1385, 2577, 744, 1183, 2267, 4434, 7643, 3957, 2502, 7644, 684, 1024, # 5558
+ 4162, 7645, 472, 3557, 3449, 1165, 3282, 3958, 3959, 322, 2152, 881, 455, 1695, 1152, 1340, # 5574
+ 660, 554, 2153, 4435, 1058, 4436, 4163, 830, 1065, 3346, 3960, 4437, 1923, 7646, 1703, 1918, # 5590
+ 7647, 932, 2268, 122, 7648, 4438, 947, 677, 7649, 3791, 2627, 297, 1905, 1924, 2269, 4439, # 5606
+ 2317, 3283, 7650, 7651, 4164, 7652, 4165, 84, 4166, 112, 989, 7653, 547, 1059, 3961, 701, # 5622
+ 3558, 1019, 7654, 4167, 7655, 3450, 942, 639, 457, 2301, 2451, 993, 2951, 407, 851, 494, # 5638
+ 4440, 3347, 927, 7656, 1237, 7657, 2421, 3348, 573, 4168, 680, 921, 2911, 1279, 1874, 285, # 5654
+ 790, 1448, 1983, 719, 2167, 7658, 7659, 4441, 3962, 3963, 1649, 7660, 1541, 563, 7661, 1077, # 5670
+ 7662, 3349, 3041, 3451, 511, 2997, 3964, 3965, 3667, 3966, 1268, 2564, 3350, 3207, 4442, 4443, # 5686
+ 7663, 535, 1048, 1276, 1189, 2912, 2028, 3142, 1438, 1373, 2834, 2952, 1134, 2012, 7664, 4169, # 5702
+ 1238, 2578, 3086, 1259, 7665, 700, 7666, 2953, 3143, 3668, 4170, 7667, 4171, 1146, 1875, 1906, # 5718
+ 4444, 2601, 3967, 781, 2422, 132, 1589, 203, 147, 273, 2789, 2402, 898, 1786, 2154, 3968, # 5734
+ 3969, 7668, 3792, 2790, 7669, 7670, 4445, 4446, 7671, 3208, 7672, 1635, 3793, 965, 7673, 1804, # 5750
+ 2690, 1516, 3559, 1121, 1082, 1329, 3284, 3970, 1449, 3794, 65, 1128, 2835, 2913, 2759, 1590, # 5766
+ 3795, 7674, 7675, 12, 2658, 45, 976, 2579, 3144, 4447, 517, 2528, 1013, 1037, 3209, 7676, # 5782
+ 3796, 2836, 7677, 3797, 7678, 3452, 7679, 2602, 614, 1998, 2318, 3798, 3087, 2724, 2628, 7680, # 5798
+ 2580, 4172, 599, 1269, 7681, 1810, 3669, 7682, 2691, 3088, 759, 1060, 489, 1805, 3351, 3285, # 5814
+ 1358, 7683, 7684, 2386, 1387, 1215, 2629, 2252, 490, 7685, 7686, 4173, 1759, 2387, 2343, 7687, # 5830
+ 4448, 3799, 1907, 3971, 2630, 1806, 3210, 4449, 3453, 3286, 2760, 2344, 874, 7688, 7689, 3454, # 5846
+ 3670, 1858, 91, 2914, 3671, 3042, 3800, 4450, 7690, 3145, 3972, 2659, 7691, 3455, 1202, 1403, # 5862
+ 3801, 2954, 2529, 1517, 2503, 4451, 3456, 2504, 7692, 4452, 7693, 2692, 1885, 1495, 1731, 3973, # 5878
+ 2365, 4453, 7694, 2029, 7695, 7696, 3974, 2693, 1216, 237, 2581, 4174, 2319, 3975, 3802, 4454, # 5894
+ 4455, 2694, 3560, 3457, 445, 4456, 7697, 7698, 7699, 7700, 2761, 61, 3976, 3672, 1822, 3977, # 5910
+ 7701, 687, 2045, 935, 925, 405, 2660, 703, 1096, 1859, 2725, 4457, 3978, 1876, 1367, 2695, # 5926
+ 3352, 918, 2105, 1781, 2476, 334, 3287, 1611, 1093, 4458, 564, 3146, 3458, 3673, 3353, 945, # 5942
+ 2631, 2057, 4459, 7702, 1925, 872, 4175, 7703, 3459, 2696, 3089, 349, 4176, 3674, 3979, 4460, # 5958
+ 3803, 4177, 3675, 2155, 3980, 4461, 4462, 4178, 4463, 2403, 2046, 782, 3981, 400, 251, 4179, # 5974
+ 1624, 7704, 7705, 277, 3676, 299, 1265, 476, 1191, 3804, 2121, 4180, 4181, 1109, 205, 7706, # 5990
+ 2582, 1000, 2156, 3561, 1860, 7707, 7708, 7709, 4464, 7710, 4465, 2565, 107, 2477, 2157, 3982, # 6006
+ 3460, 3147, 7711, 1533, 541, 1301, 158, 753, 4182, 2872, 3562, 7712, 1696, 370, 1088, 4183, # 6022
+ 4466, 3563, 579, 327, 440, 162, 2240, 269, 1937, 1374, 3461, 968, 3043, 56, 1396, 3090, # 6038
+ 2106, 3288, 3354, 7713, 1926, 2158, 4467, 2998, 7714, 3564, 7715, 7716, 3677, 4468, 2478, 7717, # 6054
+ 2791, 7718, 1650, 4469, 7719, 2603, 7720, 7721, 3983, 2661, 3355, 1149, 3356, 3984, 3805, 3985, # 6070
+ 7722, 1076, 49, 7723, 951, 3211, 3289, 3290, 450, 2837, 920, 7724, 1811, 2792, 2366, 4184, # 6086
+ 1908, 1138, 2367, 3806, 3462, 7725, 3212, 4470, 1909, 1147, 1518, 2423, 4471, 3807, 7726, 4472, # 6102
+ 2388, 2604, 260, 1795, 3213, 7727, 7728, 3808, 3291, 708, 7729, 3565, 1704, 7730, 3566, 1351, # 6118
+ 1618, 3357, 2999, 1886, 944, 4185, 3358, 4186, 3044, 3359, 4187, 7731, 3678, 422, 413, 1714, # 6134
+ 3292, 500, 2058, 2345, 4188, 2479, 7732, 1344, 1910, 954, 7733, 1668, 7734, 7735, 3986, 2404, # 6150
+ 4189, 3567, 3809, 4190, 7736, 2302, 1318, 2505, 3091, 133, 3092, 2873, 4473, 629, 31, 2838, # 6166
+ 2697, 3810, 4474, 850, 949, 4475, 3987, 2955, 1732, 2088, 4191, 1496, 1852, 7737, 3988, 620, # 6182
+ 3214, 981, 1242, 3679, 3360, 1619, 3680, 1643, 3293, 2139, 2452, 1970, 1719, 3463, 2168, 7738, # 6198
+ 3215, 7739, 7740, 3361, 1828, 7741, 1277, 4476, 1565, 2047, 7742, 1636, 3568, 3093, 7743, 869, # 6214
+ 2839, 655, 3811, 3812, 3094, 3989, 3000, 3813, 1310, 3569, 4477, 7744, 7745, 7746, 1733, 558, # 6230
+ 4478, 3681, 335, 1549, 3045, 1756, 4192, 3682, 1945, 3464, 1829, 1291, 1192, 470, 2726, 2107, # 6246
+ 2793, 913, 1054, 3990, 7747, 1027, 7748, 3046, 3991, 4479, 982, 2662, 3362, 3148, 3465, 3216, # 6262
+ 3217, 1946, 2794, 7749, 571, 4480, 7750, 1830, 7751, 3570, 2583, 1523, 2424, 7752, 2089, 984, # 6278
+ 4481, 3683, 1959, 7753, 3684, 852, 923, 2795, 3466, 3685, 969, 1519, 999, 2048, 2320, 1705, # 6294
+ 7754, 3095, 615, 1662, 151, 597, 3992, 2405, 2321, 1049, 275, 4482, 3686, 4193, 568, 3687, # 6310
+ 3571, 2480, 4194, 3688, 7755, 2425, 2270, 409, 3218, 7756, 1566, 2874, 3467, 1002, 769, 2840, # 6326
+ 194, 2090, 3149, 3689, 2222, 3294, 4195, 628, 1505, 7757, 7758, 1763, 2177, 3001, 3993, 521, # 6342
+ 1161, 2584, 1787, 2203, 2406, 4483, 3994, 1625, 4196, 4197, 412, 42, 3096, 464, 7759, 2632, # 6358
+ 4484, 3363, 1760, 1571, 2875, 3468, 2530, 1219, 2204, 3814, 2633, 2140, 2368, 4485, 4486, 3295, # 6374
+ 1651, 3364, 3572, 7760, 7761, 3573, 2481, 3469, 7762, 3690, 7763, 7764, 2271, 2091, 460, 7765, # 6390
+ 4487, 7766, 3002, 962, 588, 3574, 289, 3219, 2634, 1116, 52, 7767, 3047, 1796, 7768, 7769, # 6406
+ 7770, 1467, 7771, 1598, 1143, 3691, 4198, 1984, 1734, 1067, 4488, 1280, 3365, 465, 4489, 1572, # 6422
+ 510, 7772, 1927, 2241, 1812, 1644, 3575, 7773, 4490, 3692, 7774, 7775, 2663, 1573, 1534, 7776, # 6438
+ 7777, 4199, 536, 1807, 1761, 3470, 3815, 3150, 2635, 7778, 7779, 7780, 4491, 3471, 2915, 1911, # 6454
+ 2796, 7781, 3296, 1122, 377, 3220, 7782, 360, 7783, 7784, 4200, 1529, 551, 7785, 2059, 3693, # 6470
+ 1769, 2426, 7786, 2916, 4201, 3297, 3097, 2322, 2108, 2030, 4492, 1404, 136, 1468, 1479, 672, # 6486
+ 1171, 3221, 2303, 271, 3151, 7787, 2762, 7788, 2049, 678, 2727, 865, 1947, 4493, 7789, 2013, # 6502
+ 3995, 2956, 7790, 2728, 2223, 1397, 3048, 3694, 4494, 4495, 1735, 2917, 3366, 3576, 7791, 3816, # 6518
+ 509, 2841, 2453, 2876, 3817, 7792, 7793, 3152, 3153, 4496, 4202, 2531, 4497, 2304, 1166, 1010, # 6534
+ 552, 681, 1887, 7794, 7795, 2957, 2958, 3996, 1287, 1596, 1861, 3154, 358, 453, 736, 175, # 6550
+ 478, 1117, 905, 1167, 1097, 7796, 1853, 1530, 7797, 1706, 7798, 2178, 3472, 2287, 3695, 3473, # 6566
+ 3577, 4203, 2092, 4204, 7799, 3367, 1193, 2482, 4205, 1458, 2190, 2205, 1862, 1888, 1421, 3298, # 6582
+ 2918, 3049, 2179, 3474, 595, 2122, 7800, 3997, 7801, 7802, 4206, 1707, 2636, 223, 3696, 1359, # 6598
+ 751, 3098, 183, 3475, 7803, 2797, 3003, 419, 2369, 633, 704, 3818, 2389, 241, 7804, 7805, # 6614
+ 7806, 838, 3004, 3697, 2272, 2763, 2454, 3819, 1938, 2050, 3998, 1309, 3099, 2242, 1181, 7807, # 6630
+ 1136, 2206, 3820, 2370, 1446, 4207, 2305, 4498, 7808, 7809, 4208, 1055, 2605, 484, 3698, 7810, # 6646
+ 3999, 625, 4209, 2273, 3368, 1499, 4210, 4000, 7811, 4001, 4211, 3222, 2274, 2275, 3476, 7812, # 6662
+ 7813, 2764, 808, 2606, 3699, 3369, 4002, 4212, 3100, 2532, 526, 3370, 3821, 4213, 955, 7814, # 6678
+ 1620, 4214, 2637, 2427, 7815, 1429, 3700, 1669, 1831, 994, 928, 7816, 3578, 1260, 7817, 7818, # 6694
+ 7819, 1948, 2288, 741, 2919, 1626, 4215, 2729, 2455, 867, 1184, 362, 3371, 1392, 7820, 7821, # 6710
+ 4003, 4216, 1770, 1736, 3223, 2920, 4499, 4500, 1928, 2698, 1459, 1158, 7822, 3050, 3372, 2877, # 6726
+ 1292, 1929, 2506, 2842, 3701, 1985, 1187, 2071, 2014, 2607, 4217, 7823, 2566, 2507, 2169, 3702, # 6742
+ 2483, 3299, 7824, 3703, 4501, 7825, 7826, 666, 1003, 3005, 1022, 3579, 4218, 7827, 4502, 1813, # 6758
+ 2253, 574, 3822, 1603, 295, 1535, 705, 3823, 4219, 283, 858, 417, 7828, 7829, 3224, 4503, # 6774
+ 4504, 3051, 1220, 1889, 1046, 2276, 2456, 4004, 1393, 1599, 689, 2567, 388, 4220, 7830, 2484, # 6790
+ 802, 7831, 2798, 3824, 2060, 1405, 2254, 7832, 4505, 3825, 2109, 1052, 1345, 3225, 1585, 7833, # 6806
+ 809, 7834, 7835, 7836, 575, 2730, 3477, 956, 1552, 1469, 1144, 2323, 7837, 2324, 1560, 2457, # 6822
+ 3580, 3226, 4005, 616, 2207, 3155, 2180, 2289, 7838, 1832, 7839, 3478, 4506, 7840, 1319, 3704, # 6838
+ 3705, 1211, 3581, 1023, 3227, 1293, 2799, 7841, 7842, 7843, 3826, 607, 2306, 3827, 762, 2878, # 6854
+ 1439, 4221, 1360, 7844, 1485, 3052, 7845, 4507, 1038, 4222, 1450, 2061, 2638, 4223, 1379, 4508, # 6870
+ 2585, 7846, 7847, 4224, 1352, 1414, 2325, 2921, 1172, 7848, 7849, 3828, 3829, 7850, 1797, 1451, # 6886
+ 7851, 7852, 7853, 7854, 2922, 4006, 4007, 2485, 2346, 411, 4008, 4009, 3582, 3300, 3101, 4509, # 6902
+ 1561, 2664, 1452, 4010, 1375, 7855, 7856, 47, 2959, 316, 7857, 1406, 1591, 2923, 3156, 7858, # 6918
+ 1025, 2141, 3102, 3157, 354, 2731, 884, 2224, 4225, 2407, 508, 3706, 726, 3583, 996, 2428, # 6934
+ 3584, 729, 7859, 392, 2191, 1453, 4011, 4510, 3707, 7860, 7861, 2458, 3585, 2608, 1675, 2800, # 6950
+ 919, 2347, 2960, 2348, 1270, 4511, 4012, 73, 7862, 7863, 647, 7864, 3228, 2843, 2255, 1550, # 6966
+ 1346, 3006, 7865, 1332, 883, 3479, 7866, 7867, 7868, 7869, 3301, 2765, 7870, 1212, 831, 1347, # 6982
+ 4226, 4512, 2326, 3830, 1863, 3053, 720, 3831, 4513, 4514, 3832, 7871, 4227, 7872, 7873, 4515, # 6998
+ 7874, 7875, 1798, 4516, 3708, 2609, 4517, 3586, 1645, 2371, 7876, 7877, 2924, 669, 2208, 2665, # 7014
+ 2429, 7878, 2879, 7879, 7880, 1028, 3229, 7881, 4228, 2408, 7882, 2256, 1353, 7883, 7884, 4518, # 7030
+ 3158, 518, 7885, 4013, 7886, 4229, 1960, 7887, 2142, 4230, 7888, 7889, 3007, 2349, 2350, 3833, # 7046
+ 516, 1833, 1454, 4014, 2699, 4231, 4519, 2225, 2610, 1971, 1129, 3587, 7890, 2766, 7891, 2961, # 7062
+ 1422, 577, 1470, 3008, 1524, 3373, 7892, 7893, 432, 4232, 3054, 3480, 7894, 2586, 1455, 2508, # 7078
+ 2226, 1972, 1175, 7895, 1020, 2732, 4015, 3481, 4520, 7896, 2733, 7897, 1743, 1361, 3055, 3482, # 7094
+ 2639, 4016, 4233, 4521, 2290, 895, 924, 4234, 2170, 331, 2243, 3056, 166, 1627, 3057, 1098, # 7110
+ 7898, 1232, 2880, 2227, 3374, 4522, 657, 403, 1196, 2372, 542, 3709, 3375, 1600, 4235, 3483, # 7126
+ 7899, 4523, 2767, 3230, 576, 530, 1362, 7900, 4524, 2533, 2666, 3710, 4017, 7901, 842, 3834, # 7142
+ 7902, 2801, 2031, 1014, 4018, 213, 2700, 3376, 665, 621, 4236, 7903, 3711, 2925, 2430, 7904, # 7158
+ 2431, 3302, 3588, 3377, 7905, 4237, 2534, 4238, 4525, 3589, 1682, 4239, 3484, 1380, 7906, 724, # 7174
+ 2277, 600, 1670, 7907, 1337, 1233, 4526, 3103, 2244, 7908, 1621, 4527, 7909, 651, 4240, 7910, # 7190
+ 1612, 4241, 2611, 7911, 2844, 7912, 2734, 2307, 3058, 7913, 716, 2459, 3059, 174, 1255, 2701, # 7206
+ 4019, 3590, 548, 1320, 1398, 728, 4020, 1574, 7914, 1890, 1197, 3060, 4021, 7915, 3061, 3062, # 7222
+ 3712, 3591, 3713, 747, 7916, 635, 4242, 4528, 7917, 7918, 7919, 4243, 7920, 7921, 4529, 7922, # 7238
+ 3378, 4530, 2432, 451, 7923, 3714, 2535, 2072, 4244, 2735, 4245, 4022, 7924, 1764, 4531, 7925, # 7254
+ 4246, 350, 7926, 2278, 2390, 2486, 7927, 4247, 4023, 2245, 1434, 4024, 488, 4532, 458, 4248, # 7270
+ 4025, 3715, 771, 1330, 2391, 3835, 2568, 3159, 2159, 2409, 1553, 2667, 3160, 4249, 7928, 2487, # 7286
+ 2881, 2612, 1720, 2702, 4250, 3379, 4533, 7929, 2536, 4251, 7930, 3231, 4252, 2768, 7931, 2015, # 7302
+ 2736, 7932, 1155, 1017, 3716, 3836, 7933, 3303, 2308, 201, 1864, 4253, 1430, 7934, 4026, 7935, # 7318
+ 7936, 7937, 7938, 7939, 4254, 1604, 7940, 414, 1865, 371, 2587, 4534, 4535, 3485, 2016, 3104, # 7334
+ 4536, 1708, 960, 4255, 887, 389, 2171, 1536, 1663, 1721, 7941, 2228, 4027, 2351, 2926, 1580, # 7350
+ 7942, 7943, 7944, 1744, 7945, 2537, 4537, 4538, 7946, 4539, 7947, 2073, 7948, 7949, 3592, 3380, # 7366
+ 2882, 4256, 7950, 4257, 2640, 3381, 2802, 673, 2703, 2460, 709, 3486, 4028, 3593, 4258, 7951, # 7382
+ 1148, 502, 634, 7952, 7953, 1204, 4540, 3594, 1575, 4541, 2613, 3717, 7954, 3718, 3105, 948, # 7398
+ 3232, 121, 1745, 3837, 1110, 7955, 4259, 3063, 2509, 3009, 4029, 3719, 1151, 1771, 3838, 1488, # 7414
+ 4030, 1986, 7956, 2433, 3487, 7957, 7958, 2093, 7959, 4260, 3839, 1213, 1407, 2803, 531, 2737, # 7430
+ 2538, 3233, 1011, 1537, 7960, 2769, 4261, 3106, 1061, 7961, 3720, 3721, 1866, 2883, 7962, 2017, # 7446
+ 120, 4262, 4263, 2062, 3595, 3234, 2309, 3840, 2668, 3382, 1954, 4542, 7963, 7964, 3488, 1047, # 7462
+ 2704, 1266, 7965, 1368, 4543, 2845, 649, 3383, 3841, 2539, 2738, 1102, 2846, 2669, 7966, 7967, # 7478
+ 1999, 7968, 1111, 3596, 2962, 7969, 2488, 3842, 3597, 2804, 1854, 3384, 3722, 7970, 7971, 3385, # 7494
+ 2410, 2884, 3304, 3235, 3598, 7972, 2569, 7973, 3599, 2805, 4031, 1460, 856, 7974, 3600, 7975, # 7510
+ 2885, 2963, 7976, 2886, 3843, 7977, 4264, 632, 2510, 875, 3844, 1697, 3845, 2291, 7978, 7979, # 7526
+ 4544, 3010, 1239, 580, 4545, 4265, 7980, 914, 936, 2074, 1190, 4032, 1039, 2123, 7981, 7982, # 7542
+ 7983, 3386, 1473, 7984, 1354, 4266, 3846, 7985, 2172, 3064, 4033, 915, 3305, 4267, 4268, 3306, # 7558
+ 1605, 1834, 7986, 2739, 398, 3601, 4269, 3847, 4034, 328, 1912, 2847, 4035, 3848, 1331, 4270, # 7574
+ 3011, 937, 4271, 7987, 3602, 4036, 4037, 3387, 2160, 4546, 3388, 524, 742, 538, 3065, 1012, # 7590
+ 7988, 7989, 3849, 2461, 7990, 658, 1103, 225, 3850, 7991, 7992, 4547, 7993, 4548, 7994, 3236, # 7606
+ 1243, 7995, 4038, 963, 2246, 4549, 7996, 2705, 3603, 3161, 7997, 7998, 2588, 2327, 7999, 4550, # 7622
+ 8000, 8001, 8002, 3489, 3307, 957, 3389, 2540, 2032, 1930, 2927, 2462, 870, 2018, 3604, 1746, # 7638
+ 2770, 2771, 2434, 2463, 8003, 3851, 8004, 3723, 3107, 3724, 3490, 3390, 3725, 8005, 1179, 3066, # 7654
+ 8006, 3162, 2373, 4272, 3726, 2541, 3163, 3108, 2740, 4039, 8007, 3391, 1556, 2542, 2292, 977, # 7670
+ 2887, 2033, 4040, 1205, 3392, 8008, 1765, 3393, 3164, 2124, 1271, 1689, 714, 4551, 3491, 8009, # 7686
+ 2328, 3852, 533, 4273, 3605, 2181, 617, 8010, 2464, 3308, 3492, 2310, 8011, 8012, 3165, 8013, # 7702
+ 8014, 3853, 1987, 618, 427, 2641, 3493, 3394, 8015, 8016, 1244, 1690, 8017, 2806, 4274, 4552, # 7718
+ 8018, 3494, 8019, 8020, 2279, 1576, 473, 3606, 4275, 3395, 972, 8021, 3607, 8022, 3067, 8023, # 7734
+ 8024, 4553, 4554, 8025, 3727, 4041, 4042, 8026, 153, 4555, 356, 8027, 1891, 2888, 4276, 2143, # 7750
+ 408, 803, 2352, 8028, 3854, 8029, 4277, 1646, 2570, 2511, 4556, 4557, 3855, 8030, 3856, 4278, # 7766
+ 8031, 2411, 3396, 752, 8032, 8033, 1961, 2964, 8034, 746, 3012, 2465, 8035, 4279, 3728, 698, # 7782
+ 4558, 1892, 4280, 3608, 2543, 4559, 3609, 3857, 8036, 3166, 3397, 8037, 1823, 1302, 4043, 2706, # 7798
+ 3858, 1973, 4281, 8038, 4282, 3167, 823, 1303, 1288, 1236, 2848, 3495, 4044, 3398, 774, 3859, # 7814
+ 8039, 1581, 4560, 1304, 2849, 3860, 4561, 8040, 2435, 2161, 1083, 3237, 4283, 4045, 4284, 344, # 7830
+ 1173, 288, 2311, 454, 1683, 8041, 8042, 1461, 4562, 4046, 2589, 8043, 8044, 4563, 985, 894, # 7846
+ 8045, 3399, 3168, 8046, 1913, 2928, 3729, 1988, 8047, 2110, 1974, 8048, 4047, 8049, 2571, 1194, # 7862
+ 425, 8050, 4564, 3169, 1245, 3730, 4285, 8051, 8052, 2850, 8053, 636, 4565, 1855, 3861, 760, # 7878
+ 1799, 8054, 4286, 2209, 1508, 4566, 4048, 1893, 1684, 2293, 8055, 8056, 8057, 4287, 4288, 2210, # 7894
+ 479, 8058, 8059, 832, 8060, 4049, 2489, 8061, 2965, 2490, 3731, 990, 3109, 627, 1814, 2642, # 7910
+ 4289, 1582, 4290, 2125, 2111, 3496, 4567, 8062, 799, 4291, 3170, 8063, 4568, 2112, 1737, 3013, # 7926
+ 1018, 543, 754, 4292, 3309, 1676, 4569, 4570, 4050, 8064, 1489, 8065, 3497, 8066, 2614, 2889, # 7942
+ 4051, 8067, 8068, 2966, 8069, 8070, 8071, 8072, 3171, 4571, 4572, 2182, 1722, 8073, 3238, 3239, # 7958
+ 1842, 3610, 1715, 481, 365, 1975, 1856, 8074, 8075, 1962, 2491, 4573, 8076, 2126, 3611, 3240, # 7974
+ 433, 1894, 2063, 2075, 8077, 602, 2741, 8078, 8079, 8080, 8081, 8082, 3014, 1628, 3400, 8083, # 7990
+ 3172, 4574, 4052, 2890, 4575, 2512, 8084, 2544, 2772, 8085, 8086, 8087, 3310, 4576, 2891, 8088, # 8006
+ 4577, 8089, 2851, 4578, 4579, 1221, 2967, 4053, 2513, 8090, 8091, 8092, 1867, 1989, 8093, 8094, # 8022
+ 8095, 1895, 8096, 8097, 4580, 1896, 4054, 318, 8098, 2094, 4055, 4293, 8099, 8100, 485, 8101, # 8038
+ 938, 3862, 553, 2670, 116, 8102, 3863, 3612, 8103, 3498, 2671, 2773, 3401, 3311, 2807, 8104, # 8054
+ 3613, 2929, 4056, 1747, 2930, 2968, 8105, 8106, 207, 8107, 8108, 2672, 4581, 2514, 8109, 3015, # 8070
+ 890, 3614, 3864, 8110, 1877, 3732, 3402, 8111, 2183, 2353, 3403, 1652, 8112, 8113, 8114, 941, # 8086
+ 2294, 208, 3499, 4057, 2019, 330, 4294, 3865, 2892, 2492, 3733, 4295, 8115, 8116, 8117, 8118, # 8102
+)
+# fmt: on
diff --git a/third_party/python/pip/pip/_vendor/chardet/euctwprober.py b/third_party/python/pip/pip/_vendor/chardet/euctwprober.py
new file mode 100644
index 0000000000..a37ab18995
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/euctwprober.py
@@ -0,0 +1,47 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .chardistribution import EUCTWDistributionAnalysis
+from .codingstatemachine import CodingStateMachine
+from .mbcharsetprober import MultiByteCharSetProber
+from .mbcssm import EUCTW_SM_MODEL
+
+
+class EUCTWProber(MultiByteCharSetProber):
+ def __init__(self) -> None:
+ super().__init__()
+ self.coding_sm = CodingStateMachine(EUCTW_SM_MODEL)
+ self.distribution_analyzer = EUCTWDistributionAnalysis()
+ self.reset()
+
+ @property
+ def charset_name(self) -> str:
+ return "EUC-TW"
+
+ @property
+ def language(self) -> str:
+ return "Taiwan"
diff --git a/third_party/python/pip/pip/_vendor/chardet/gb2312freq.py b/third_party/python/pip/pip/_vendor/chardet/gb2312freq.py
new file mode 100644
index 0000000000..b32bfc7421
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/gb2312freq.py
@@ -0,0 +1,284 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# GB2312 most frequently used character table
+#
+# Char to FreqOrder table , from hz6763
+
+# 512 --> 0.79 -- 0.79
+# 1024 --> 0.92 -- 0.13
+# 2048 --> 0.98 -- 0.06
+# 6768 --> 1.00 -- 0.02
+#
+# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79
+# Random Distribution Ration = 512 / (3755 - 512) = 0.157
+#
+# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR
+
+GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
+
+GB2312_TABLE_SIZE = 3760
+
+# fmt: off
+GB2312_CHAR_TO_FREQ_ORDER = (
+1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
+2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
+2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409,
+ 249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670,
+1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820,
+1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585,
+ 152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566,
+1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575,
+2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853,
+3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061,
+ 544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155,
+1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406,
+ 927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816,
+2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606,
+ 360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023,
+2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414,
+1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513,
+3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052,
+ 198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570,
+1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575,
+ 253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250,
+2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506,
+1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26,
+3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835,
+1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686,
+2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054,
+1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894,
+ 585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105,
+3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403,
+3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694,
+ 252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873,
+3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940,
+ 836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121,
+1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648,
+3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992,
+2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233,
+1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157,
+ 755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807,
+1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094,
+4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258,
+ 887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478,
+3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152,
+3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909,
+ 509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272,
+1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221,
+2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252,
+1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301,
+1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254,
+ 389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070,
+3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461,
+3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360,
+4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124,
+ 296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535,
+3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243,
+1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713,
+1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071,
+4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442,
+ 215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946,
+ 814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257,
+3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180,
+1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427,
+ 602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781,
+1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724,
+2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937,
+ 930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943,
+ 432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789,
+ 396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552,
+3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246,
+4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451,
+3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310,
+ 750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860,
+2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297,
+2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780,
+2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745,
+ 776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936,
+2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032,
+ 968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657,
+ 163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414,
+ 220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976,
+3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436,
+2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254,
+2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536,
+1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238,
+ 18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059,
+2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741,
+ 90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447,
+ 286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601,
+1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269,
+1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894,
+ 915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173,
+ 681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994,
+1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956,
+2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437,
+3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154,
+2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240,
+2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143,
+2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634,
+3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472,
+1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541,
+1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143,
+2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312,
+1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414,
+3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754,
+1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424,
+1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302,
+3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739,
+ 795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004,
+2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484,
+1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739,
+4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535,
+1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641,
+1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307,
+3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573,
+1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533,
+ 47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965,
+ 504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99,
+1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280,
+ 160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505,
+1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012,
+1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039,
+ 744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982,
+3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530,
+4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392,
+3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656,
+2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220,
+2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766,
+1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535,
+3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728,
+2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338,
+1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627,
+1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885,
+ 125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411,
+2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671,
+2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162,
+3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774,
+4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524,
+3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346,
+ 180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040,
+3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188,
+2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280,
+1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131,
+ 259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947,
+ 774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970,
+3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814,
+4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557,
+2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997,
+1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972,
+1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369,
+ 766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376,
+1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480,
+3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610,
+ 955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128,
+ 642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769,
+1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207,
+ 57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392,
+1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623,
+ 193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782,
+2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650,
+ 158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478,
+2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773,
+2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007,
+1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323,
+1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598,
+2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961,
+ 819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302,
+1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409,
+1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683,
+2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191,
+2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616,
+3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302,
+1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774,
+4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147,
+ 571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731,
+ 845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464,
+3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377,
+1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315,
+ 470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557,
+3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903,
+1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060,
+4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261,
+1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092,
+2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810,
+1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708,
+ 498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658,
+1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871,
+3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503,
+ 448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229,
+2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112,
+ 136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504,
+1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389,
+1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27,
+1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542,
+3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861,
+2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845,
+3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700,
+3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469,
+3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582,
+ 996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999,
+2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274,
+ 786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020,
+2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601,
+ 12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628,
+1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31,
+ 475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668,
+ 233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778,
+1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169,
+3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667,
+3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881,
+1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276,
+1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320,
+3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751,
+2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432,
+2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772,
+1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843,
+3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116,
+ 451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904,
+4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652,
+1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664,
+2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770,
+3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283,
+3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626,
+1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713,
+ 768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333,
+ 391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062,
+2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555,
+ 931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014,
+1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510,
+ 386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015,
+1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459,
+1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390,
+1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238,
+1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232,
+1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624,
+ 381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
+ 852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, #last 512
+)
+# fmt: on
diff --git a/third_party/python/pip/pip/_vendor/chardet/gb2312prober.py b/third_party/python/pip/pip/_vendor/chardet/gb2312prober.py
new file mode 100644
index 0000000000..d423e7311e
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/gb2312prober.py
@@ -0,0 +1,47 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .chardistribution import GB2312DistributionAnalysis
+from .codingstatemachine import CodingStateMachine
+from .mbcharsetprober import MultiByteCharSetProber
+from .mbcssm import GB2312_SM_MODEL
+
+
+class GB2312Prober(MultiByteCharSetProber):
+ def __init__(self) -> None:
+ super().__init__()
+ self.coding_sm = CodingStateMachine(GB2312_SM_MODEL)
+ self.distribution_analyzer = GB2312DistributionAnalysis()
+ self.reset()
+
+ @property
+ def charset_name(self) -> str:
+ return "GB2312"
+
+ @property
+ def language(self) -> str:
+ return "Chinese"
diff --git a/third_party/python/pip/pip/_vendor/chardet/hebrewprober.py b/third_party/python/pip/pip/_vendor/chardet/hebrewprober.py
new file mode 100644
index 0000000000..785d0057bc
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/hebrewprober.py
@@ -0,0 +1,316 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Shy Shalom
+# Portions created by the Initial Developer are Copyright (C) 2005
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from typing import Optional, Union
+
+from .charsetprober import CharSetProber
+from .enums import ProbingState
+from .sbcharsetprober import SingleByteCharSetProber
+
+# This prober doesn't actually recognize a language or a charset.
+# It is a helper prober for the use of the Hebrew model probers
+
+### General ideas of the Hebrew charset recognition ###
+#
+# Four main charsets exist in Hebrew:
+# "ISO-8859-8" - Visual Hebrew
+# "windows-1255" - Logical Hebrew
+# "ISO-8859-8-I" - Logical Hebrew
+# "x-mac-hebrew" - ?? Logical Hebrew ??
+#
+# Both "ISO" charsets use a completely identical set of code points, whereas
+# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
+# these code points. windows-1255 defines additional characters in the range
+# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
+# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
+# x-mac-hebrew defines similar additional code points but with a different
+# mapping.
+#
+# As far as an average Hebrew text with no diacritics is concerned, all four
+# charsets are identical with respect to code points. Meaning that for the
+# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
+# (including final letters).
+#
+# The dominant difference between these charsets is their directionality.
+# "Visual" directionality means that the text is ordered as if the renderer is
+# not aware of a BIDI rendering algorithm. The renderer sees the text and
+# draws it from left to right. The text itself when ordered naturally is read
+# backwards. A buffer of Visual Hebrew generally looks like so:
+# "[last word of first line spelled backwards] [whole line ordered backwards
+# and spelled backwards] [first word of first line spelled backwards]
+# [end of line] [last word of second line] ... etc' "
+# adding punctuation marks, numbers and English text to visual text is
+# naturally also "visual" and from left to right.
+#
+# "Logical" directionality means the text is ordered "naturally" according to
+# the order it is read. It is the responsibility of the renderer to display
+# the text from right to left. A BIDI algorithm is used to place general
+# punctuation marks, numbers and English text in the text.
+#
+# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
+# what little evidence I could find, it seems that its general directionality
+# is Logical.
+#
+# To sum up all of the above, the Hebrew probing mechanism knows about two
+# charsets:
+# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
+# backwards while line order is natural. For charset recognition purposes
+# the line order is unimportant (In fact, for this implementation, even
+# word order is unimportant).
+# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
+#
+# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
+# specifically identified.
+# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
+# that contain special punctuation marks or diacritics is displayed with
+# some unconverted characters showing as question marks. This problem might
+# be corrected using another model prober for x-mac-hebrew. Due to the fact
+# that x-mac-hebrew texts are so rare, writing another model prober isn't
+# worth the effort and performance hit.
+#
+#### The Prober ####
+#
+# The prober is divided between two SBCharSetProbers and a HebrewProber,
+# all of which are managed, created, fed data, inquired and deleted by the
+# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
+# fact some kind of Hebrew, Logical or Visual. The final decision about which
+# one is it is made by the HebrewProber by combining final-letter scores
+# with the scores of the two SBCharSetProbers to produce a final answer.
+#
+# The SBCSGroupProber is responsible for stripping the original text of HTML
+# tags, English characters, numbers, low-ASCII punctuation characters, spaces
+# and new lines. It reduces any sequence of such characters to a single space.
+# The buffer fed to each prober in the SBCS group prober is pure text in
+# high-ASCII.
+# The two SBCharSetProbers (model probers) share the same language model:
+# Win1255Model.
+# The first SBCharSetProber uses the model normally as any other
+# SBCharSetProber does, to recognize windows-1255, upon which this model was
+# built. The second SBCharSetProber is told to make the pair-of-letter
+# lookup in the language model backwards. This in practice exactly simulates
+# a visual Hebrew model using the windows-1255 logical Hebrew model.
+#
+# The HebrewProber is not using any language model. All it does is look for
+# final-letter evidence suggesting the text is either logical Hebrew or visual
+# Hebrew. Disjointed from the model probers, the results of the HebrewProber
+# alone are meaningless. HebrewProber always returns 0.00 as confidence
+# since it never identifies a charset by itself. Instead, the pointer to the
+# HebrewProber is passed to the model probers as a helper "Name Prober".
+# When the Group prober receives a positive identification from any prober,
+# it asks for the name of the charset identified. If the prober queried is a
+# Hebrew model prober, the model prober forwards the call to the
+# HebrewProber to make the final decision. In the HebrewProber, the
+# decision is made according to the final-letters scores maintained and Both
+# model probers scores. The answer is returned in the form of the name of the
+# charset identified, either "windows-1255" or "ISO-8859-8".
+
+
+class HebrewProber(CharSetProber):
+ SPACE = 0x20
+ # windows-1255 / ISO-8859-8 code points of interest
+ FINAL_KAF = 0xEA
+ NORMAL_KAF = 0xEB
+ FINAL_MEM = 0xED
+ NORMAL_MEM = 0xEE
+ FINAL_NUN = 0xEF
+ NORMAL_NUN = 0xF0
+ FINAL_PE = 0xF3
+ NORMAL_PE = 0xF4
+ FINAL_TSADI = 0xF5
+ NORMAL_TSADI = 0xF6
+
+ # Minimum Visual vs Logical final letter score difference.
+ # If the difference is below this, don't rely solely on the final letter score
+ # distance.
+ MIN_FINAL_CHAR_DISTANCE = 5
+
+ # Minimum Visual vs Logical model score difference.
+ # If the difference is below this, don't rely at all on the model score
+ # distance.
+ MIN_MODEL_DISTANCE = 0.01
+
+ VISUAL_HEBREW_NAME = "ISO-8859-8"
+ LOGICAL_HEBREW_NAME = "windows-1255"
+
+ def __init__(self) -> None:
+ super().__init__()
+ self._final_char_logical_score = 0
+ self._final_char_visual_score = 0
+ self._prev = self.SPACE
+ self._before_prev = self.SPACE
+ self._logical_prober: Optional[SingleByteCharSetProber] = None
+ self._visual_prober: Optional[SingleByteCharSetProber] = None
+ self.reset()
+
+ def reset(self) -> None:
+ self._final_char_logical_score = 0
+ self._final_char_visual_score = 0
+ # The two last characters seen in the previous buffer,
+ # mPrev and mBeforePrev are initialized to space in order to simulate
+ # a word delimiter at the beginning of the data
+ self._prev = self.SPACE
+ self._before_prev = self.SPACE
+ # These probers are owned by the group prober.
+
+ def set_model_probers(
+ self,
+ logical_prober: SingleByteCharSetProber,
+ visual_prober: SingleByteCharSetProber,
+ ) -> None:
+ self._logical_prober = logical_prober
+ self._visual_prober = visual_prober
+
+ def is_final(self, c: int) -> bool:
+ return c in [
+ self.FINAL_KAF,
+ self.FINAL_MEM,
+ self.FINAL_NUN,
+ self.FINAL_PE,
+ self.FINAL_TSADI,
+ ]
+
+ def is_non_final(self, c: int) -> bool:
+ # The normal Tsadi is not a good Non-Final letter due to words like
+ # 'lechotet' (to chat) containing an apostrophe after the tsadi. This
+ # apostrophe is converted to a space in FilterWithoutEnglishLetters
+ # causing the Non-Final tsadi to appear at an end of a word even
+ # though this is not the case in the original text.
+ # The letters Pe and Kaf rarely display a related behavior of not being
+ # a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
+ # for example legally end with a Non-Final Pe or Kaf. However, the
+ # benefit of these letters as Non-Final letters outweighs the damage
+ # since these words are quite rare.
+ return c in [self.NORMAL_KAF, self.NORMAL_MEM, self.NORMAL_NUN, self.NORMAL_PE]
+
+ def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
+ # Final letter analysis for logical-visual decision.
+ # Look for evidence that the received buffer is either logical Hebrew
+ # or visual Hebrew.
+ # The following cases are checked:
+ # 1) A word longer than 1 letter, ending with a final letter. This is
+ # an indication that the text is laid out "naturally" since the
+ # final letter really appears at the end. +1 for logical score.
+ # 2) A word longer than 1 letter, ending with a Non-Final letter. In
+ # normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
+ # should not end with the Non-Final form of that letter. Exceptions
+ # to this rule are mentioned above in isNonFinal(). This is an
+ # indication that the text is laid out backwards. +1 for visual
+ # score
+ # 3) A word longer than 1 letter, starting with a final letter. Final
+ # letters should not appear at the beginning of a word. This is an
+ # indication that the text is laid out backwards. +1 for visual
+ # score.
+ #
+ # The visual score and logical score are accumulated throughout the
+ # text and are finally checked against each other in GetCharSetName().
+ # No checking for final letters in the middle of words is done since
+ # that case is not an indication for either Logical or Visual text.
+ #
+ # We automatically filter out all 7-bit characters (replace them with
+ # spaces) so the word boundary detection works properly. [MAP]
+
+ if self.state == ProbingState.NOT_ME:
+ # Both model probers say it's not them. No reason to continue.
+ return ProbingState.NOT_ME
+
+ byte_str = self.filter_high_byte_only(byte_str)
+
+ for cur in byte_str:
+ if cur == self.SPACE:
+ # We stand on a space - a word just ended
+ if self._before_prev != self.SPACE:
+ # next-to-last char was not a space so self._prev is not a
+ # 1 letter word
+ if self.is_final(self._prev):
+ # case (1) [-2:not space][-1:final letter][cur:space]
+ self._final_char_logical_score += 1
+ elif self.is_non_final(self._prev):
+ # case (2) [-2:not space][-1:Non-Final letter][
+ # cur:space]
+ self._final_char_visual_score += 1
+ else:
+ # Not standing on a space
+ if (
+ (self._before_prev == self.SPACE)
+ and (self.is_final(self._prev))
+ and (cur != self.SPACE)
+ ):
+ # case (3) [-2:space][-1:final letter][cur:not space]
+ self._final_char_visual_score += 1
+ self._before_prev = self._prev
+ self._prev = cur
+
+ # Forever detecting, till the end or until both model probers return
+ # ProbingState.NOT_ME (handled above)
+ return ProbingState.DETECTING
+
+ @property
+ def charset_name(self) -> str:
+ assert self._logical_prober is not None
+ assert self._visual_prober is not None
+
+ # Make the decision: is it Logical or Visual?
+ # If the final letter score distance is dominant enough, rely on it.
+ finalsub = self._final_char_logical_score - self._final_char_visual_score
+ if finalsub >= self.MIN_FINAL_CHAR_DISTANCE:
+ return self.LOGICAL_HEBREW_NAME
+ if finalsub <= -self.MIN_FINAL_CHAR_DISTANCE:
+ return self.VISUAL_HEBREW_NAME
+
+ # It's not dominant enough, try to rely on the model scores instead.
+ modelsub = (
+ self._logical_prober.get_confidence() - self._visual_prober.get_confidence()
+ )
+ if modelsub > self.MIN_MODEL_DISTANCE:
+ return self.LOGICAL_HEBREW_NAME
+ if modelsub < -self.MIN_MODEL_DISTANCE:
+ return self.VISUAL_HEBREW_NAME
+
+ # Still no good, back to final letter distance, maybe it'll save the
+ # day.
+ if finalsub < 0.0:
+ return self.VISUAL_HEBREW_NAME
+
+ # (finalsub > 0 - Logical) or (don't know what to do) default to
+ # Logical.
+ return self.LOGICAL_HEBREW_NAME
+
+ @property
+ def language(self) -> str:
+ return "Hebrew"
+
+ @property
+ def state(self) -> ProbingState:
+ assert self._logical_prober is not None
+ assert self._visual_prober is not None
+
+ # Remain active as long as any of the model probers are active.
+ if (self._logical_prober.state == ProbingState.NOT_ME) and (
+ self._visual_prober.state == ProbingState.NOT_ME
+ ):
+ return ProbingState.NOT_ME
+ return ProbingState.DETECTING
diff --git a/third_party/python/pip/pip/_vendor/chardet/jisfreq.py b/third_party/python/pip/pip/_vendor/chardet/jisfreq.py
new file mode 100644
index 0000000000..3293576e01
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/jisfreq.py
@@ -0,0 +1,325 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# Sampling from about 20M text materials include literature and computer technology
+#
+# Japanese frequency table, applied to both S-JIS and EUC-JP
+# They are sorted in order.
+
+# 128 --> 0.77094
+# 256 --> 0.85710
+# 512 --> 0.92635
+# 1024 --> 0.97130
+# 2048 --> 0.99431
+#
+# Ideal Distribution Ratio = 0.92635 / (1-0.92635) = 12.58
+# Random Distribution Ration = 512 / (2965+62+83+86-512) = 0.191
+#
+# Typical Distribution Ratio, 25% of IDR
+
+JIS_TYPICAL_DISTRIBUTION_RATIO = 3.0
+
+# Char to FreqOrder table ,
+JIS_TABLE_SIZE = 4368
+
+# fmt: off
+JIS_CHAR_TO_FREQ_ORDER = (
+ 40, 1, 6, 182, 152, 180, 295,2127, 285, 381,3295,4304,3068,4606,3165,3510, # 16
+3511,1822,2785,4607,1193,2226,5070,4608, 171,2996,1247, 18, 179,5071, 856,1661, # 32
+1262,5072, 619, 127,3431,3512,3230,1899,1700, 232, 228,1294,1298, 284, 283,2041, # 48
+2042,1061,1062, 48, 49, 44, 45, 433, 434,1040,1041, 996, 787,2997,1255,4305, # 64
+2108,4609,1684,1648,5073,5074,5075,5076,5077,5078,3687,5079,4610,5080,3927,3928, # 80
+5081,3296,3432, 290,2285,1471,2187,5082,2580,2825,1303,2140,1739,1445,2691,3375, # 96
+1691,3297,4306,4307,4611, 452,3376,1182,2713,3688,3069,4308,5083,5084,5085,5086, # 112
+5087,5088,5089,5090,5091,5092,5093,5094,5095,5096,5097,5098,5099,5100,5101,5102, # 128
+5103,5104,5105,5106,5107,5108,5109,5110,5111,5112,4097,5113,5114,5115,5116,5117, # 144
+5118,5119,5120,5121,5122,5123,5124,5125,5126,5127,5128,5129,5130,5131,5132,5133, # 160
+5134,5135,5136,5137,5138,5139,5140,5141,5142,5143,5144,5145,5146,5147,5148,5149, # 176
+5150,5151,5152,4612,5153,5154,5155,5156,5157,5158,5159,5160,5161,5162,5163,5164, # 192
+5165,5166,5167,5168,5169,5170,5171,5172,5173,5174,5175,1472, 598, 618, 820,1205, # 208
+1309,1412,1858,1307,1692,5176,5177,5178,5179,5180,5181,5182,1142,1452,1234,1172, # 224
+1875,2043,2149,1793,1382,2973, 925,2404,1067,1241, 960,1377,2935,1491, 919,1217, # 240
+1865,2030,1406,1499,2749,4098,5183,5184,5185,5186,5187,5188,2561,4099,3117,1804, # 256
+2049,3689,4309,3513,1663,5189,3166,3118,3298,1587,1561,3433,5190,3119,1625,2998, # 272
+3299,4613,1766,3690,2786,4614,5191,5192,5193,5194,2161, 26,3377, 2,3929, 20, # 288
+3691, 47,4100, 50, 17, 16, 35, 268, 27, 243, 42, 155, 24, 154, 29, 184, # 304
+ 4, 91, 14, 92, 53, 396, 33, 289, 9, 37, 64, 620, 21, 39, 321, 5, # 320
+ 12, 11, 52, 13, 3, 208, 138, 0, 7, 60, 526, 141, 151,1069, 181, 275, # 336
+1591, 83, 132,1475, 126, 331, 829, 15, 69, 160, 59, 22, 157, 55,1079, 312, # 352
+ 109, 38, 23, 25, 10, 19, 79,5195, 61, 382,1124, 8, 30,5196,5197,5198, # 368
+5199,5200,5201,5202,5203,5204,5205,5206, 89, 62, 74, 34,2416, 112, 139, 196, # 384
+ 271, 149, 84, 607, 131, 765, 46, 88, 153, 683, 76, 874, 101, 258, 57, 80, # 400
+ 32, 364, 121,1508, 169,1547, 68, 235, 145,2999, 41, 360,3027, 70, 63, 31, # 416
+ 43, 259, 262,1383, 99, 533, 194, 66, 93, 846, 217, 192, 56, 106, 58, 565, # 432
+ 280, 272, 311, 256, 146, 82, 308, 71, 100, 128, 214, 655, 110, 261, 104,1140, # 448
+ 54, 51, 36, 87, 67,3070, 185,2618,2936,2020, 28,1066,2390,2059,5207,5208, # 464
+5209,5210,5211,5212,5213,5214,5215,5216,4615,5217,5218,5219,5220,5221,5222,5223, # 480
+5224,5225,5226,5227,5228,5229,5230,5231,5232,5233,5234,5235,5236,3514,5237,5238, # 496
+5239,5240,5241,5242,5243,5244,2297,2031,4616,4310,3692,5245,3071,5246,3598,5247, # 512
+4617,3231,3515,5248,4101,4311,4618,3808,4312,4102,5249,4103,4104,3599,5250,5251, # 528
+5252,5253,5254,5255,5256,5257,5258,5259,5260,5261,5262,5263,5264,5265,5266,5267, # 544
+5268,5269,5270,5271,5272,5273,5274,5275,5276,5277,5278,5279,5280,5281,5282,5283, # 560
+5284,5285,5286,5287,5288,5289,5290,5291,5292,5293,5294,5295,5296,5297,5298,5299, # 576
+5300,5301,5302,5303,5304,5305,5306,5307,5308,5309,5310,5311,5312,5313,5314,5315, # 592
+5316,5317,5318,5319,5320,5321,5322,5323,5324,5325,5326,5327,5328,5329,5330,5331, # 608
+5332,5333,5334,5335,5336,5337,5338,5339,5340,5341,5342,5343,5344,5345,5346,5347, # 624
+5348,5349,5350,5351,5352,5353,5354,5355,5356,5357,5358,5359,5360,5361,5362,5363, # 640
+5364,5365,5366,5367,5368,5369,5370,5371,5372,5373,5374,5375,5376,5377,5378,5379, # 656
+5380,5381, 363, 642,2787,2878,2788,2789,2316,3232,2317,3434,2011, 165,1942,3930, # 672
+3931,3932,3933,5382,4619,5383,4620,5384,5385,5386,5387,5388,5389,5390,5391,5392, # 688
+5393,5394,5395,5396,5397,5398,5399,5400,5401,5402,5403,5404,5405,5406,5407,5408, # 704
+5409,5410,5411,5412,5413,5414,5415,5416,5417,5418,5419,5420,5421,5422,5423,5424, # 720
+5425,5426,5427,5428,5429,5430,5431,5432,5433,5434,5435,5436,5437,5438,5439,5440, # 736
+5441,5442,5443,5444,5445,5446,5447,5448,5449,5450,5451,5452,5453,5454,5455,5456, # 752
+5457,5458,5459,5460,5461,5462,5463,5464,5465,5466,5467,5468,5469,5470,5471,5472, # 768
+5473,5474,5475,5476,5477,5478,5479,5480,5481,5482,5483,5484,5485,5486,5487,5488, # 784
+5489,5490,5491,5492,5493,5494,5495,5496,5497,5498,5499,5500,5501,5502,5503,5504, # 800
+5505,5506,5507,5508,5509,5510,5511,5512,5513,5514,5515,5516,5517,5518,5519,5520, # 816
+5521,5522,5523,5524,5525,5526,5527,5528,5529,5530,5531,5532,5533,5534,5535,5536, # 832
+5537,5538,5539,5540,5541,5542,5543,5544,5545,5546,5547,5548,5549,5550,5551,5552, # 848
+5553,5554,5555,5556,5557,5558,5559,5560,5561,5562,5563,5564,5565,5566,5567,5568, # 864
+5569,5570,5571,5572,5573,5574,5575,5576,5577,5578,5579,5580,5581,5582,5583,5584, # 880
+5585,5586,5587,5588,5589,5590,5591,5592,5593,5594,5595,5596,5597,5598,5599,5600, # 896
+5601,5602,5603,5604,5605,5606,5607,5608,5609,5610,5611,5612,5613,5614,5615,5616, # 912
+5617,5618,5619,5620,5621,5622,5623,5624,5625,5626,5627,5628,5629,5630,5631,5632, # 928
+5633,5634,5635,5636,5637,5638,5639,5640,5641,5642,5643,5644,5645,5646,5647,5648, # 944
+5649,5650,5651,5652,5653,5654,5655,5656,5657,5658,5659,5660,5661,5662,5663,5664, # 960
+5665,5666,5667,5668,5669,5670,5671,5672,5673,5674,5675,5676,5677,5678,5679,5680, # 976
+5681,5682,5683,5684,5685,5686,5687,5688,5689,5690,5691,5692,5693,5694,5695,5696, # 992
+5697,5698,5699,5700,5701,5702,5703,5704,5705,5706,5707,5708,5709,5710,5711,5712, # 1008
+5713,5714,5715,5716,5717,5718,5719,5720,5721,5722,5723,5724,5725,5726,5727,5728, # 1024
+5729,5730,5731,5732,5733,5734,5735,5736,5737,5738,5739,5740,5741,5742,5743,5744, # 1040
+5745,5746,5747,5748,5749,5750,5751,5752,5753,5754,5755,5756,5757,5758,5759,5760, # 1056
+5761,5762,5763,5764,5765,5766,5767,5768,5769,5770,5771,5772,5773,5774,5775,5776, # 1072
+5777,5778,5779,5780,5781,5782,5783,5784,5785,5786,5787,5788,5789,5790,5791,5792, # 1088
+5793,5794,5795,5796,5797,5798,5799,5800,5801,5802,5803,5804,5805,5806,5807,5808, # 1104
+5809,5810,5811,5812,5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824, # 1120
+5825,5826,5827,5828,5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840, # 1136
+5841,5842,5843,5844,5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856, # 1152
+5857,5858,5859,5860,5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872, # 1168
+5873,5874,5875,5876,5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888, # 1184
+5889,5890,5891,5892,5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904, # 1200
+5905,5906,5907,5908,5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920, # 1216
+5921,5922,5923,5924,5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936, # 1232
+5937,5938,5939,5940,5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952, # 1248
+5953,5954,5955,5956,5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968, # 1264
+5969,5970,5971,5972,5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984, # 1280
+5985,5986,5987,5988,5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000, # 1296
+6001,6002,6003,6004,6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016, # 1312
+6017,6018,6019,6020,6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032, # 1328
+6033,6034,6035,6036,6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048, # 1344
+6049,6050,6051,6052,6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064, # 1360
+6065,6066,6067,6068,6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080, # 1376
+6081,6082,6083,6084,6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096, # 1392
+6097,6098,6099,6100,6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112, # 1408
+6113,6114,2044,2060,4621, 997,1235, 473,1186,4622, 920,3378,6115,6116, 379,1108, # 1424
+4313,2657,2735,3934,6117,3809, 636,3233, 573,1026,3693,3435,2974,3300,2298,4105, # 1440
+ 854,2937,2463, 393,2581,2417, 539, 752,1280,2750,2480, 140,1161, 440, 708,1569, # 1456
+ 665,2497,1746,1291,1523,3000, 164,1603, 847,1331, 537,1997, 486, 508,1693,2418, # 1472
+1970,2227, 878,1220, 299,1030, 969, 652,2751, 624,1137,3301,2619, 65,3302,2045, # 1488
+1761,1859,3120,1930,3694,3516, 663,1767, 852, 835,3695, 269, 767,2826,2339,1305, # 1504
+ 896,1150, 770,1616,6118, 506,1502,2075,1012,2519, 775,2520,2975,2340,2938,4314, # 1520
+3028,2086,1224,1943,2286,6119,3072,4315,2240,1273,1987,3935,1557, 175, 597, 985, # 1536
+3517,2419,2521,1416,3029, 585, 938,1931,1007,1052,1932,1685,6120,3379,4316,4623, # 1552
+ 804, 599,3121,1333,2128,2539,1159,1554,2032,3810, 687,2033,2904, 952, 675,1467, # 1568
+3436,6121,2241,1096,1786,2440,1543,1924, 980,1813,2228, 781,2692,1879, 728,1918, # 1584
+3696,4624, 548,1950,4625,1809,1088,1356,3303,2522,1944, 502, 972, 373, 513,2827, # 1600
+ 586,2377,2391,1003,1976,1631,6122,2464,1084, 648,1776,4626,2141, 324, 962,2012, # 1616
+2177,2076,1384, 742,2178,1448,1173,1810, 222, 102, 301, 445, 125,2420, 662,2498, # 1632
+ 277, 200,1476,1165,1068, 224,2562,1378,1446, 450,1880, 659, 791, 582,4627,2939, # 1648
+3936,1516,1274, 555,2099,3697,1020,1389,1526,3380,1762,1723,1787,2229, 412,2114, # 1664
+1900,2392,3518, 512,2597, 427,1925,2341,3122,1653,1686,2465,2499, 697, 330, 273, # 1680
+ 380,2162, 951, 832, 780, 991,1301,3073, 965,2270,3519, 668,2523,2636,1286, 535, # 1696
+1407, 518, 671, 957,2658,2378, 267, 611,2197,3030,6123, 248,2299, 967,1799,2356, # 1712
+ 850,1418,3437,1876,1256,1480,2828,1718,6124,6125,1755,1664,2405,6126,4628,2879, # 1728
+2829, 499,2179, 676,4629, 557,2329,2214,2090, 325,3234, 464, 811,3001, 992,2342, # 1744
+2481,1232,1469, 303,2242, 466,1070,2163, 603,1777,2091,4630,2752,4631,2714, 322, # 1760
+2659,1964,1768, 481,2188,1463,2330,2857,3600,2092,3031,2421,4632,2318,2070,1849, # 1776
+2598,4633,1302,2254,1668,1701,2422,3811,2905,3032,3123,2046,4106,1763,1694,4634, # 1792
+1604, 943,1724,1454, 917, 868,2215,1169,2940, 552,1145,1800,1228,1823,1955, 316, # 1808
+1080,2510, 361,1807,2830,4107,2660,3381,1346,1423,1134,4108,6127, 541,1263,1229, # 1824
+1148,2540, 545, 465,1833,2880,3438,1901,3074,2482, 816,3937, 713,1788,2500, 122, # 1840
+1575, 195,1451,2501,1111,6128, 859, 374,1225,2243,2483,4317, 390,1033,3439,3075, # 1856
+2524,1687, 266, 793,1440,2599, 946, 779, 802, 507, 897,1081, 528,2189,1292, 711, # 1872
+1866,1725,1167,1640, 753, 398,2661,1053, 246, 348,4318, 137,1024,3440,1600,2077, # 1888
+2129, 825,4319, 698, 238, 521, 187,2300,1157,2423,1641,1605,1464,1610,1097,2541, # 1904
+1260,1436, 759,2255,1814,2150, 705,3235, 409,2563,3304, 561,3033,2005,2564, 726, # 1920
+1956,2343,3698,4109, 949,3812,3813,3520,1669, 653,1379,2525, 881,2198, 632,2256, # 1936
+1027, 778,1074, 733,1957, 514,1481,2466, 554,2180, 702,3938,1606,1017,1398,6129, # 1952
+1380,3521, 921, 993,1313, 594, 449,1489,1617,1166, 768,1426,1360, 495,1794,3601, # 1968
+1177,3602,1170,4320,2344, 476, 425,3167,4635,3168,1424, 401,2662,1171,3382,1998, # 1984
+1089,4110, 477,3169, 474,6130,1909, 596,2831,1842, 494, 693,1051,1028,1207,3076, # 2000
+ 606,2115, 727,2790,1473,1115, 743,3522, 630, 805,1532,4321,2021, 366,1057, 838, # 2016
+ 684,1114,2142,4322,2050,1492,1892,1808,2271,3814,2424,1971,1447,1373,3305,1090, # 2032
+1536,3939,3523,3306,1455,2199, 336, 369,2331,1035, 584,2393, 902, 718,2600,6131, # 2048
+2753, 463,2151,1149,1611,2467, 715,1308,3124,1268, 343,1413,3236,1517,1347,2663, # 2064
+2093,3940,2022,1131,1553,2100,2941,1427,3441,2942,1323,2484,6132,1980, 872,2368, # 2080
+2441,2943, 320,2369,2116,1082, 679,1933,3941,2791,3815, 625,1143,2023, 422,2200, # 2096
+3816,6133, 730,1695, 356,2257,1626,2301,2858,2637,1627,1778, 937, 883,2906,2693, # 2112
+3002,1769,1086, 400,1063,1325,3307,2792,4111,3077, 456,2345,1046, 747,6134,1524, # 2128
+ 884,1094,3383,1474,2164,1059, 974,1688,2181,2258,1047, 345,1665,1187, 358, 875, # 2144
+3170, 305, 660,3524,2190,1334,1135,3171,1540,1649,2542,1527, 927, 968,2793, 885, # 2160
+1972,1850, 482, 500,2638,1218,1109,1085,2543,1654,2034, 876, 78,2287,1482,1277, # 2176
+ 861,1675,1083,1779, 724,2754, 454, 397,1132,1612,2332, 893, 672,1237, 257,2259, # 2192
+2370, 135,3384, 337,2244, 547, 352, 340, 709,2485,1400, 788,1138,2511, 540, 772, # 2208
+1682,2260,2272,2544,2013,1843,1902,4636,1999,1562,2288,4637,2201,1403,1533, 407, # 2224
+ 576,3308,1254,2071, 978,3385, 170, 136,1201,3125,2664,3172,2394, 213, 912, 873, # 2240
+3603,1713,2202, 699,3604,3699, 813,3442, 493, 531,1054, 468,2907,1483, 304, 281, # 2256
+4112,1726,1252,2094, 339,2319,2130,2639, 756,1563,2944, 748, 571,2976,1588,2425, # 2272
+2715,1851,1460,2426,1528,1392,1973,3237, 288,3309, 685,3386, 296, 892,2716,2216, # 2288
+1570,2245, 722,1747,2217, 905,3238,1103,6135,1893,1441,1965, 251,1805,2371,3700, # 2304
+2601,1919,1078, 75,2182,1509,1592,1270,2640,4638,2152,6136,3310,3817, 524, 706, # 2320
+1075, 292,3818,1756,2602, 317, 98,3173,3605,3525,1844,2218,3819,2502, 814, 567, # 2336
+ 385,2908,1534,6137, 534,1642,3239, 797,6138,1670,1529, 953,4323, 188,1071, 538, # 2352
+ 178, 729,3240,2109,1226,1374,2000,2357,2977, 731,2468,1116,2014,2051,6139,1261, # 2368
+1593, 803,2859,2736,3443, 556, 682, 823,1541,6140,1369,2289,1706,2794, 845, 462, # 2384
+2603,2665,1361, 387, 162,2358,1740, 739,1770,1720,1304,1401,3241,1049, 627,1571, # 2400
+2427,3526,1877,3942,1852,1500, 431,1910,1503, 677, 297,2795, 286,1433,1038,1198, # 2416
+2290,1133,1596,4113,4639,2469,1510,1484,3943,6141,2442, 108, 712,4640,2372, 866, # 2432
+3701,2755,3242,1348, 834,1945,1408,3527,2395,3243,1811, 824, 994,1179,2110,1548, # 2448
+1453, 790,3003, 690,4324,4325,2832,2909,3820,1860,3821, 225,1748, 310, 346,1780, # 2464
+2470, 821,1993,2717,2796, 828, 877,3528,2860,2471,1702,2165,2910,2486,1789, 453, # 2480
+ 359,2291,1676, 73,1164,1461,1127,3311, 421, 604, 314,1037, 589, 116,2487, 737, # 2496
+ 837,1180, 111, 244, 735,6142,2261,1861,1362, 986, 523, 418, 581,2666,3822, 103, # 2512
+ 855, 503,1414,1867,2488,1091, 657,1597, 979, 605,1316,4641,1021,2443,2078,2001, # 2528
+1209, 96, 587,2166,1032, 260,1072,2153, 173, 94, 226,3244, 819,2006,4642,4114, # 2544
+2203, 231,1744, 782, 97,2667, 786,3387, 887, 391, 442,2219,4326,1425,6143,2694, # 2560
+ 633,1544,1202, 483,2015, 592,2052,1958,2472,1655, 419, 129,4327,3444,3312,1714, # 2576
+1257,3078,4328,1518,1098, 865,1310,1019,1885,1512,1734, 469,2444, 148, 773, 436, # 2592
+1815,1868,1128,1055,4329,1245,2756,3445,2154,1934,1039,4643, 579,1238, 932,2320, # 2608
+ 353, 205, 801, 115,2428, 944,2321,1881, 399,2565,1211, 678, 766,3944, 335,2101, # 2624
+1459,1781,1402,3945,2737,2131,1010, 844, 981,1326,1013, 550,1816,1545,2620,1335, # 2640
+1008, 371,2881, 936,1419,1613,3529,1456,1395,2273,1834,2604,1317,2738,2503, 416, # 2656
+1643,4330, 806,1126, 229, 591,3946,1314,1981,1576,1837,1666, 347,1790, 977,3313, # 2672
+ 764,2861,1853, 688,2429,1920,1462, 77, 595, 415,2002,3034, 798,1192,4115,6144, # 2688
+2978,4331,3035,2695,2582,2072,2566, 430,2430,1727, 842,1396,3947,3702, 613, 377, # 2704
+ 278, 236,1417,3388,3314,3174, 757,1869, 107,3530,6145,1194, 623,2262, 207,1253, # 2720
+2167,3446,3948, 492,1117,1935, 536,1838,2757,1246,4332, 696,2095,2406,1393,1572, # 2736
+3175,1782, 583, 190, 253,1390,2230, 830,3126,3389, 934,3245,1703,1749,2979,1870, # 2752
+2545,1656,2204, 869,2346,4116,3176,1817, 496,1764,4644, 942,1504, 404,1903,1122, # 2768
+1580,3606,2945,1022, 515, 372,1735, 955,2431,3036,6146,2797,1110,2302,2798, 617, # 2784
+6147, 441, 762,1771,3447,3607,3608,1904, 840,3037, 86, 939,1385, 572,1370,2445, # 2800
+1336, 114,3703, 898, 294, 203,3315, 703,1583,2274, 429, 961,4333,1854,1951,3390, # 2816
+2373,3704,4334,1318,1381, 966,1911,2322,1006,1155, 309, 989, 458,2718,1795,1372, # 2832
+1203, 252,1689,1363,3177, 517,1936, 168,1490, 562, 193,3823,1042,4117,1835, 551, # 2848
+ 470,4645, 395, 489,3448,1871,1465,2583,2641, 417,1493, 279,1295, 511,1236,1119, # 2864
+ 72,1231,1982,1812,3004, 871,1564, 984,3449,1667,2696,2096,4646,2347,2833,1673, # 2880
+3609, 695,3246,2668, 807,1183,4647, 890, 388,2333,1801,1457,2911,1765,1477,1031, # 2896
+3316,3317,1278,3391,2799,2292,2526, 163,3450,4335,2669,1404,1802,6148,2323,2407, # 2912
+1584,1728,1494,1824,1269, 298, 909,3318,1034,1632, 375, 776,1683,2061, 291, 210, # 2928
+1123, 809,1249,1002,2642,3038, 206,1011,2132, 144, 975, 882,1565, 342, 667, 754, # 2944
+1442,2143,1299,2303,2062, 447, 626,2205,1221,2739,2912,1144,1214,2206,2584, 760, # 2960
+1715, 614, 950,1281,2670,2621, 810, 577,1287,2546,4648, 242,2168, 250,2643, 691, # 2976
+ 123,2644, 647, 313,1029, 689,1357,2946,1650, 216, 771,1339,1306, 808,2063, 549, # 2992
+ 913,1371,2913,2914,6149,1466,1092,1174,1196,1311,2605,2396,1783,1796,3079, 406, # 3008
+2671,2117,3949,4649, 487,1825,2220,6150,2915, 448,2348,1073,6151,2397,1707, 130, # 3024
+ 900,1598, 329, 176,1959,2527,1620,6152,2275,4336,3319,1983,2191,3705,3610,2155, # 3040
+3706,1912,1513,1614,6153,1988, 646, 392,2304,1589,3320,3039,1826,1239,1352,1340, # 3056
+2916, 505,2567,1709,1437,2408,2547, 906,6154,2672, 384,1458,1594,1100,1329, 710, # 3072
+ 423,3531,2064,2231,2622,1989,2673,1087,1882, 333, 841,3005,1296,2882,2379, 580, # 3088
+1937,1827,1293,2585, 601, 574, 249,1772,4118,2079,1120, 645, 901,1176,1690, 795, # 3104
+2207, 478,1434, 516,1190,1530, 761,2080, 930,1264, 355, 435,1552, 644,1791, 987, # 3120
+ 220,1364,1163,1121,1538, 306,2169,1327,1222, 546,2645, 218, 241, 610,1704,3321, # 3136
+1984,1839,1966,2528, 451,6155,2586,3707,2568, 907,3178, 254,2947, 186,1845,4650, # 3152
+ 745, 432,1757, 428,1633, 888,2246,2221,2489,3611,2118,1258,1265, 956,3127,1784, # 3168
+4337,2490, 319, 510, 119, 457,3612, 274,2035,2007,4651,1409,3128, 970,2758, 590, # 3184
+2800, 661,2247,4652,2008,3950,1420,1549,3080,3322,3951,1651,1375,2111, 485,2491, # 3200
+1429,1156,6156,2548,2183,1495, 831,1840,2529,2446, 501,1657, 307,1894,3247,1341, # 3216
+ 666, 899,2156,1539,2549,1559, 886, 349,2208,3081,2305,1736,3824,2170,2759,1014, # 3232
+1913,1386, 542,1397,2948, 490, 368, 716, 362, 159, 282,2569,1129,1658,1288,1750, # 3248
+2674, 276, 649,2016, 751,1496, 658,1818,1284,1862,2209,2087,2512,3451, 622,2834, # 3264
+ 376, 117,1060,2053,1208,1721,1101,1443, 247,1250,3179,1792,3952,2760,2398,3953, # 3280
+6157,2144,3708, 446,2432,1151,2570,3452,2447,2761,2835,1210,2448,3082, 424,2222, # 3296
+1251,2449,2119,2836, 504,1581,4338, 602, 817, 857,3825,2349,2306, 357,3826,1470, # 3312
+1883,2883, 255, 958, 929,2917,3248, 302,4653,1050,1271,1751,2307,1952,1430,2697, # 3328
+2719,2359, 354,3180, 777, 158,2036,4339,1659,4340,4654,2308,2949,2248,1146,2232, # 3344
+3532,2720,1696,2623,3827,6158,3129,1550,2698,1485,1297,1428, 637, 931,2721,2145, # 3360
+ 914,2550,2587, 81,2450, 612, 827,2646,1242,4655,1118,2884, 472,1855,3181,3533, # 3376
+3534, 569,1353,2699,1244,1758,2588,4119,2009,2762,2171,3709,1312,1531,6159,1152, # 3392
+1938, 134,1830, 471,3710,2276,1112,1535,3323,3453,3535, 982,1337,2950, 488, 826, # 3408
+ 674,1058,1628,4120,2017, 522,2399, 211, 568,1367,3454, 350, 293,1872,1139,3249, # 3424
+1399,1946,3006,1300,2360,3324, 588, 736,6160,2606, 744, 669,3536,3828,6161,1358, # 3440
+ 199, 723, 848, 933, 851,1939,1505,1514,1338,1618,1831,4656,1634,3613, 443,2740, # 3456
+3829, 717,1947, 491,1914,6162,2551,1542,4121,1025,6163,1099,1223, 198,3040,2722, # 3472
+ 370, 410,1905,2589, 998,1248,3182,2380, 519,1449,4122,1710, 947, 928,1153,4341, # 3488
+2277, 344,2624,1511, 615, 105, 161,1212,1076,1960,3130,2054,1926,1175,1906,2473, # 3504
+ 414,1873,2801,6164,2309, 315,1319,3325, 318,2018,2146,2157, 963, 631, 223,4342, # 3520
+4343,2675, 479,3711,1197,2625,3712,2676,2361,6165,4344,4123,6166,2451,3183,1886, # 3536
+2184,1674,1330,1711,1635,1506, 799, 219,3250,3083,3954,1677,3713,3326,2081,3614, # 3552
+1652,2073,4657,1147,3041,1752, 643,1961, 147,1974,3955,6167,1716,2037, 918,3007, # 3568
+1994, 120,1537, 118, 609,3184,4345, 740,3455,1219, 332,1615,3830,6168,1621,2980, # 3584
+1582, 783, 212, 553,2350,3714,1349,2433,2082,4124, 889,6169,2310,1275,1410, 973, # 3600
+ 166,1320,3456,1797,1215,3185,2885,1846,2590,2763,4658, 629, 822,3008, 763, 940, # 3616
+1990,2862, 439,2409,1566,1240,1622, 926,1282,1907,2764, 654,2210,1607, 327,1130, # 3632
+3956,1678,1623,6170,2434,2192, 686, 608,3831,3715, 903,3957,3042,6171,2741,1522, # 3648
+1915,1105,1555,2552,1359, 323,3251,4346,3457, 738,1354,2553,2311,2334,1828,2003, # 3664
+3832,1753,2351,1227,6172,1887,4125,1478,6173,2410,1874,1712,1847, 520,1204,2607, # 3680
+ 264,4659, 836,2677,2102, 600,4660,3833,2278,3084,6174,4347,3615,1342, 640, 532, # 3696
+ 543,2608,1888,2400,2591,1009,4348,1497, 341,1737,3616,2723,1394, 529,3252,1321, # 3712
+ 983,4661,1515,2120, 971,2592, 924, 287,1662,3186,4349,2700,4350,1519, 908,1948, # 3728
+2452, 156, 796,1629,1486,2223,2055, 694,4126,1259,1036,3392,1213,2249,2742,1889, # 3744
+1230,3958,1015, 910, 408, 559,3617,4662, 746, 725, 935,4663,3959,3009,1289, 563, # 3760
+ 867,4664,3960,1567,2981,2038,2626, 988,2263,2381,4351, 143,2374, 704,1895,6175, # 3776
+1188,3716,2088, 673,3085,2362,4352, 484,1608,1921,2765,2918, 215, 904,3618,3537, # 3792
+ 894, 509, 976,3043,2701,3961,4353,2837,2982, 498,6176,6177,1102,3538,1332,3393, # 3808
+1487,1636,1637, 233, 245,3962, 383, 650, 995,3044, 460,1520,1206,2352, 749,3327, # 3824
+ 530, 700, 389,1438,1560,1773,3963,2264, 719,2951,2724,3834, 870,1832,1644,1000, # 3840
+ 839,2474,3717, 197,1630,3394, 365,2886,3964,1285,2133, 734, 922, 818,1106, 732, # 3856
+ 480,2083,1774,3458, 923,2279,1350, 221,3086, 85,2233,2234,3835,1585,3010,2147, # 3872
+1387,1705,2382,1619,2475, 133, 239,2802,1991,1016,2084,2383, 411,2838,1113, 651, # 3888
+1985,1160,3328, 990,1863,3087,1048,1276,2647, 265,2627,1599,3253,2056, 150, 638, # 3904
+2019, 656, 853, 326,1479, 680,1439,4354,1001,1759, 413,3459,3395,2492,1431, 459, # 3920
+4355,1125,3329,2265,1953,1450,2065,2863, 849, 351,2678,3131,3254,3255,1104,1577, # 3936
+ 227,1351,1645,2453,2193,1421,2887, 812,2121, 634, 95,2435, 201,2312,4665,1646, # 3952
+1671,2743,1601,2554,2702,2648,2280,1315,1366,2089,3132,1573,3718,3965,1729,1189, # 3968
+ 328,2679,1077,1940,1136, 558,1283, 964,1195, 621,2074,1199,1743,3460,3619,1896, # 3984
+1916,1890,3836,2952,1154,2112,1064, 862, 378,3011,2066,2113,2803,1568,2839,6178, # 4000
+3088,2919,1941,1660,2004,1992,2194, 142, 707,1590,1708,1624,1922,1023,1836,1233, # 4016
+1004,2313, 789, 741,3620,6179,1609,2411,1200,4127,3719,3720,4666,2057,3721, 593, # 4032
+2840, 367,2920,1878,6180,3461,1521, 628,1168, 692,2211,2649, 300, 720,2067,2571, # 4048
+2953,3396, 959,2504,3966,3539,3462,1977, 701,6181, 954,1043, 800, 681, 183,3722, # 4064
+1803,1730,3540,4128,2103, 815,2314, 174, 467, 230,2454,1093,2134, 755,3541,3397, # 4080
+1141,1162,6182,1738,2039, 270,3256,2513,1005,1647,2185,3837, 858,1679,1897,1719, # 4096
+2954,2324,1806, 402, 670, 167,4129,1498,2158,2104, 750,6183, 915, 189,1680,1551, # 4112
+ 455,4356,1501,2455, 405,1095,2955, 338,1586,1266,1819, 570, 641,1324, 237,1556, # 4128
+2650,1388,3723,6184,1368,2384,1343,1978,3089,2436, 879,3724, 792,1191, 758,3012, # 4144
+1411,2135,1322,4357, 240,4667,1848,3725,1574,6185, 420,3045,1546,1391, 714,4358, # 4160
+1967, 941,1864, 863, 664, 426, 560,1731,2680,1785,2864,1949,2363, 403,3330,1415, # 4176
+1279,2136,1697,2335, 204, 721,2097,3838, 90,6186,2085,2505, 191,3967, 124,2148, # 4192
+1376,1798,1178,1107,1898,1405, 860,4359,1243,1272,2375,2983,1558,2456,1638, 113, # 4208
+3621, 578,1923,2609, 880, 386,4130, 784,2186,2266,1422,2956,2172,1722, 497, 263, # 4224
+2514,1267,2412,2610, 177,2703,3542, 774,1927,1344, 616,1432,1595,1018, 172,4360, # 4240
+2325, 911,4361, 438,1468,3622, 794,3968,2024,2173,1681,1829,2957, 945, 895,3090, # 4256
+ 575,2212,2476, 475,2401,2681, 785,2744,1745,2293,2555,1975,3133,2865, 394,4668, # 4272
+3839, 635,4131, 639, 202,1507,2195,2766,1345,1435,2572,3726,1908,1184,1181,2457, # 4288
+3727,3134,4362, 843,2611, 437, 916,4669, 234, 769,1884,3046,3047,3623, 833,6187, # 4304
+1639,2250,2402,1355,1185,2010,2047, 999, 525,1732,1290,1488,2612, 948,1578,3728, # 4320
+2413,2477,1216,2725,2159, 334,3840,1328,3624,2921,1525,4132, 564,1056, 891,4363, # 4336
+1444,1698,2385,2251,3729,1365,2281,2235,1717,6188, 864,3841,2515, 444, 527,2767, # 4352
+2922,3625, 544, 461,6189, 566, 209,2437,3398,2098,1065,2068,3331,3626,3257,2137, # 4368 #last 512
+)
+# fmt: on
diff --git a/third_party/python/pip/pip/_vendor/chardet/johabfreq.py b/third_party/python/pip/pip/_vendor/chardet/johabfreq.py
new file mode 100644
index 0000000000..c12969990d
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/johabfreq.py
@@ -0,0 +1,2382 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+# The frequency data itself is the same as euc-kr.
+# This is just a mapping table to euc-kr.
+
+JOHAB_TO_EUCKR_ORDER_TABLE = {
+ 0x8861: 0,
+ 0x8862: 1,
+ 0x8865: 2,
+ 0x8868: 3,
+ 0x8869: 4,
+ 0x886A: 5,
+ 0x886B: 6,
+ 0x8871: 7,
+ 0x8873: 8,
+ 0x8874: 9,
+ 0x8875: 10,
+ 0x8876: 11,
+ 0x8877: 12,
+ 0x8878: 13,
+ 0x8879: 14,
+ 0x887B: 15,
+ 0x887C: 16,
+ 0x887D: 17,
+ 0x8881: 18,
+ 0x8882: 19,
+ 0x8885: 20,
+ 0x8889: 21,
+ 0x8891: 22,
+ 0x8893: 23,
+ 0x8895: 24,
+ 0x8896: 25,
+ 0x8897: 26,
+ 0x88A1: 27,
+ 0x88A2: 28,
+ 0x88A5: 29,
+ 0x88A9: 30,
+ 0x88B5: 31,
+ 0x88B7: 32,
+ 0x88C1: 33,
+ 0x88C5: 34,
+ 0x88C9: 35,
+ 0x88E1: 36,
+ 0x88E2: 37,
+ 0x88E5: 38,
+ 0x88E8: 39,
+ 0x88E9: 40,
+ 0x88EB: 41,
+ 0x88F1: 42,
+ 0x88F3: 43,
+ 0x88F5: 44,
+ 0x88F6: 45,
+ 0x88F7: 46,
+ 0x88F8: 47,
+ 0x88FB: 48,
+ 0x88FC: 49,
+ 0x88FD: 50,
+ 0x8941: 51,
+ 0x8945: 52,
+ 0x8949: 53,
+ 0x8951: 54,
+ 0x8953: 55,
+ 0x8955: 56,
+ 0x8956: 57,
+ 0x8957: 58,
+ 0x8961: 59,
+ 0x8962: 60,
+ 0x8963: 61,
+ 0x8965: 62,
+ 0x8968: 63,
+ 0x8969: 64,
+ 0x8971: 65,
+ 0x8973: 66,
+ 0x8975: 67,
+ 0x8976: 68,
+ 0x8977: 69,
+ 0x897B: 70,
+ 0x8981: 71,
+ 0x8985: 72,
+ 0x8989: 73,
+ 0x8993: 74,
+ 0x8995: 75,
+ 0x89A1: 76,
+ 0x89A2: 77,
+ 0x89A5: 78,
+ 0x89A8: 79,
+ 0x89A9: 80,
+ 0x89AB: 81,
+ 0x89AD: 82,
+ 0x89B0: 83,
+ 0x89B1: 84,
+ 0x89B3: 85,
+ 0x89B5: 86,
+ 0x89B7: 87,
+ 0x89B8: 88,
+ 0x89C1: 89,
+ 0x89C2: 90,
+ 0x89C5: 91,
+ 0x89C9: 92,
+ 0x89CB: 93,
+ 0x89D1: 94,
+ 0x89D3: 95,
+ 0x89D5: 96,
+ 0x89D7: 97,
+ 0x89E1: 98,
+ 0x89E5: 99,
+ 0x89E9: 100,
+ 0x89F3: 101,
+ 0x89F6: 102,
+ 0x89F7: 103,
+ 0x8A41: 104,
+ 0x8A42: 105,
+ 0x8A45: 106,
+ 0x8A49: 107,
+ 0x8A51: 108,
+ 0x8A53: 109,
+ 0x8A55: 110,
+ 0x8A57: 111,
+ 0x8A61: 112,
+ 0x8A65: 113,
+ 0x8A69: 114,
+ 0x8A73: 115,
+ 0x8A75: 116,
+ 0x8A81: 117,
+ 0x8A82: 118,
+ 0x8A85: 119,
+ 0x8A88: 120,
+ 0x8A89: 121,
+ 0x8A8A: 122,
+ 0x8A8B: 123,
+ 0x8A90: 124,
+ 0x8A91: 125,
+ 0x8A93: 126,
+ 0x8A95: 127,
+ 0x8A97: 128,
+ 0x8A98: 129,
+ 0x8AA1: 130,
+ 0x8AA2: 131,
+ 0x8AA5: 132,
+ 0x8AA9: 133,
+ 0x8AB6: 134,
+ 0x8AB7: 135,
+ 0x8AC1: 136,
+ 0x8AD5: 137,
+ 0x8AE1: 138,
+ 0x8AE2: 139,
+ 0x8AE5: 140,
+ 0x8AE9: 141,
+ 0x8AF1: 142,
+ 0x8AF3: 143,
+ 0x8AF5: 144,
+ 0x8B41: 145,
+ 0x8B45: 146,
+ 0x8B49: 147,
+ 0x8B61: 148,
+ 0x8B62: 149,
+ 0x8B65: 150,
+ 0x8B68: 151,
+ 0x8B69: 152,
+ 0x8B6A: 153,
+ 0x8B71: 154,
+ 0x8B73: 155,
+ 0x8B75: 156,
+ 0x8B77: 157,
+ 0x8B81: 158,
+ 0x8BA1: 159,
+ 0x8BA2: 160,
+ 0x8BA5: 161,
+ 0x8BA8: 162,
+ 0x8BA9: 163,
+ 0x8BAB: 164,
+ 0x8BB1: 165,
+ 0x8BB3: 166,
+ 0x8BB5: 167,
+ 0x8BB7: 168,
+ 0x8BB8: 169,
+ 0x8BBC: 170,
+ 0x8C61: 171,
+ 0x8C62: 172,
+ 0x8C63: 173,
+ 0x8C65: 174,
+ 0x8C69: 175,
+ 0x8C6B: 176,
+ 0x8C71: 177,
+ 0x8C73: 178,
+ 0x8C75: 179,
+ 0x8C76: 180,
+ 0x8C77: 181,
+ 0x8C7B: 182,
+ 0x8C81: 183,
+ 0x8C82: 184,
+ 0x8C85: 185,
+ 0x8C89: 186,
+ 0x8C91: 187,
+ 0x8C93: 188,
+ 0x8C95: 189,
+ 0x8C96: 190,
+ 0x8C97: 191,
+ 0x8CA1: 192,
+ 0x8CA2: 193,
+ 0x8CA9: 194,
+ 0x8CE1: 195,
+ 0x8CE2: 196,
+ 0x8CE3: 197,
+ 0x8CE5: 198,
+ 0x8CE9: 199,
+ 0x8CF1: 200,
+ 0x8CF3: 201,
+ 0x8CF5: 202,
+ 0x8CF6: 203,
+ 0x8CF7: 204,
+ 0x8D41: 205,
+ 0x8D42: 206,
+ 0x8D45: 207,
+ 0x8D51: 208,
+ 0x8D55: 209,
+ 0x8D57: 210,
+ 0x8D61: 211,
+ 0x8D65: 212,
+ 0x8D69: 213,
+ 0x8D75: 214,
+ 0x8D76: 215,
+ 0x8D7B: 216,
+ 0x8D81: 217,
+ 0x8DA1: 218,
+ 0x8DA2: 219,
+ 0x8DA5: 220,
+ 0x8DA7: 221,
+ 0x8DA9: 222,
+ 0x8DB1: 223,
+ 0x8DB3: 224,
+ 0x8DB5: 225,
+ 0x8DB7: 226,
+ 0x8DB8: 227,
+ 0x8DB9: 228,
+ 0x8DC1: 229,
+ 0x8DC2: 230,
+ 0x8DC9: 231,
+ 0x8DD6: 232,
+ 0x8DD7: 233,
+ 0x8DE1: 234,
+ 0x8DE2: 235,
+ 0x8DF7: 236,
+ 0x8E41: 237,
+ 0x8E45: 238,
+ 0x8E49: 239,
+ 0x8E51: 240,
+ 0x8E53: 241,
+ 0x8E57: 242,
+ 0x8E61: 243,
+ 0x8E81: 244,
+ 0x8E82: 245,
+ 0x8E85: 246,
+ 0x8E89: 247,
+ 0x8E90: 248,
+ 0x8E91: 249,
+ 0x8E93: 250,
+ 0x8E95: 251,
+ 0x8E97: 252,
+ 0x8E98: 253,
+ 0x8EA1: 254,
+ 0x8EA9: 255,
+ 0x8EB6: 256,
+ 0x8EB7: 257,
+ 0x8EC1: 258,
+ 0x8EC2: 259,
+ 0x8EC5: 260,
+ 0x8EC9: 261,
+ 0x8ED1: 262,
+ 0x8ED3: 263,
+ 0x8ED6: 264,
+ 0x8EE1: 265,
+ 0x8EE5: 266,
+ 0x8EE9: 267,
+ 0x8EF1: 268,
+ 0x8EF3: 269,
+ 0x8F41: 270,
+ 0x8F61: 271,
+ 0x8F62: 272,
+ 0x8F65: 273,
+ 0x8F67: 274,
+ 0x8F69: 275,
+ 0x8F6B: 276,
+ 0x8F70: 277,
+ 0x8F71: 278,
+ 0x8F73: 279,
+ 0x8F75: 280,
+ 0x8F77: 281,
+ 0x8F7B: 282,
+ 0x8FA1: 283,
+ 0x8FA2: 284,
+ 0x8FA5: 285,
+ 0x8FA9: 286,
+ 0x8FB1: 287,
+ 0x8FB3: 288,
+ 0x8FB5: 289,
+ 0x8FB7: 290,
+ 0x9061: 291,
+ 0x9062: 292,
+ 0x9063: 293,
+ 0x9065: 294,
+ 0x9068: 295,
+ 0x9069: 296,
+ 0x906A: 297,
+ 0x906B: 298,
+ 0x9071: 299,
+ 0x9073: 300,
+ 0x9075: 301,
+ 0x9076: 302,
+ 0x9077: 303,
+ 0x9078: 304,
+ 0x9079: 305,
+ 0x907B: 306,
+ 0x907D: 307,
+ 0x9081: 308,
+ 0x9082: 309,
+ 0x9085: 310,
+ 0x9089: 311,
+ 0x9091: 312,
+ 0x9093: 313,
+ 0x9095: 314,
+ 0x9096: 315,
+ 0x9097: 316,
+ 0x90A1: 317,
+ 0x90A2: 318,
+ 0x90A5: 319,
+ 0x90A9: 320,
+ 0x90B1: 321,
+ 0x90B7: 322,
+ 0x90E1: 323,
+ 0x90E2: 324,
+ 0x90E4: 325,
+ 0x90E5: 326,
+ 0x90E9: 327,
+ 0x90EB: 328,
+ 0x90EC: 329,
+ 0x90F1: 330,
+ 0x90F3: 331,
+ 0x90F5: 332,
+ 0x90F6: 333,
+ 0x90F7: 334,
+ 0x90FD: 335,
+ 0x9141: 336,
+ 0x9142: 337,
+ 0x9145: 338,
+ 0x9149: 339,
+ 0x9151: 340,
+ 0x9153: 341,
+ 0x9155: 342,
+ 0x9156: 343,
+ 0x9157: 344,
+ 0x9161: 345,
+ 0x9162: 346,
+ 0x9165: 347,
+ 0x9169: 348,
+ 0x9171: 349,
+ 0x9173: 350,
+ 0x9176: 351,
+ 0x9177: 352,
+ 0x917A: 353,
+ 0x9181: 354,
+ 0x9185: 355,
+ 0x91A1: 356,
+ 0x91A2: 357,
+ 0x91A5: 358,
+ 0x91A9: 359,
+ 0x91AB: 360,
+ 0x91B1: 361,
+ 0x91B3: 362,
+ 0x91B5: 363,
+ 0x91B7: 364,
+ 0x91BC: 365,
+ 0x91BD: 366,
+ 0x91C1: 367,
+ 0x91C5: 368,
+ 0x91C9: 369,
+ 0x91D6: 370,
+ 0x9241: 371,
+ 0x9245: 372,
+ 0x9249: 373,
+ 0x9251: 374,
+ 0x9253: 375,
+ 0x9255: 376,
+ 0x9261: 377,
+ 0x9262: 378,
+ 0x9265: 379,
+ 0x9269: 380,
+ 0x9273: 381,
+ 0x9275: 382,
+ 0x9277: 383,
+ 0x9281: 384,
+ 0x9282: 385,
+ 0x9285: 386,
+ 0x9288: 387,
+ 0x9289: 388,
+ 0x9291: 389,
+ 0x9293: 390,
+ 0x9295: 391,
+ 0x9297: 392,
+ 0x92A1: 393,
+ 0x92B6: 394,
+ 0x92C1: 395,
+ 0x92E1: 396,
+ 0x92E5: 397,
+ 0x92E9: 398,
+ 0x92F1: 399,
+ 0x92F3: 400,
+ 0x9341: 401,
+ 0x9342: 402,
+ 0x9349: 403,
+ 0x9351: 404,
+ 0x9353: 405,
+ 0x9357: 406,
+ 0x9361: 407,
+ 0x9362: 408,
+ 0x9365: 409,
+ 0x9369: 410,
+ 0x936A: 411,
+ 0x936B: 412,
+ 0x9371: 413,
+ 0x9373: 414,
+ 0x9375: 415,
+ 0x9377: 416,
+ 0x9378: 417,
+ 0x937C: 418,
+ 0x9381: 419,
+ 0x9385: 420,
+ 0x9389: 421,
+ 0x93A1: 422,
+ 0x93A2: 423,
+ 0x93A5: 424,
+ 0x93A9: 425,
+ 0x93AB: 426,
+ 0x93B1: 427,
+ 0x93B3: 428,
+ 0x93B5: 429,
+ 0x93B7: 430,
+ 0x93BC: 431,
+ 0x9461: 432,
+ 0x9462: 433,
+ 0x9463: 434,
+ 0x9465: 435,
+ 0x9468: 436,
+ 0x9469: 437,
+ 0x946A: 438,
+ 0x946B: 439,
+ 0x946C: 440,
+ 0x9470: 441,
+ 0x9471: 442,
+ 0x9473: 443,
+ 0x9475: 444,
+ 0x9476: 445,
+ 0x9477: 446,
+ 0x9478: 447,
+ 0x9479: 448,
+ 0x947D: 449,
+ 0x9481: 450,
+ 0x9482: 451,
+ 0x9485: 452,
+ 0x9489: 453,
+ 0x9491: 454,
+ 0x9493: 455,
+ 0x9495: 456,
+ 0x9496: 457,
+ 0x9497: 458,
+ 0x94A1: 459,
+ 0x94E1: 460,
+ 0x94E2: 461,
+ 0x94E3: 462,
+ 0x94E5: 463,
+ 0x94E8: 464,
+ 0x94E9: 465,
+ 0x94EB: 466,
+ 0x94EC: 467,
+ 0x94F1: 468,
+ 0x94F3: 469,
+ 0x94F5: 470,
+ 0x94F7: 471,
+ 0x94F9: 472,
+ 0x94FC: 473,
+ 0x9541: 474,
+ 0x9542: 475,
+ 0x9545: 476,
+ 0x9549: 477,
+ 0x9551: 478,
+ 0x9553: 479,
+ 0x9555: 480,
+ 0x9556: 481,
+ 0x9557: 482,
+ 0x9561: 483,
+ 0x9565: 484,
+ 0x9569: 485,
+ 0x9576: 486,
+ 0x9577: 487,
+ 0x9581: 488,
+ 0x9585: 489,
+ 0x95A1: 490,
+ 0x95A2: 491,
+ 0x95A5: 492,
+ 0x95A8: 493,
+ 0x95A9: 494,
+ 0x95AB: 495,
+ 0x95AD: 496,
+ 0x95B1: 497,
+ 0x95B3: 498,
+ 0x95B5: 499,
+ 0x95B7: 500,
+ 0x95B9: 501,
+ 0x95BB: 502,
+ 0x95C1: 503,
+ 0x95C5: 504,
+ 0x95C9: 505,
+ 0x95E1: 506,
+ 0x95F6: 507,
+ 0x9641: 508,
+ 0x9645: 509,
+ 0x9649: 510,
+ 0x9651: 511,
+ 0x9653: 512,
+ 0x9655: 513,
+ 0x9661: 514,
+ 0x9681: 515,
+ 0x9682: 516,
+ 0x9685: 517,
+ 0x9689: 518,
+ 0x9691: 519,
+ 0x9693: 520,
+ 0x9695: 521,
+ 0x9697: 522,
+ 0x96A1: 523,
+ 0x96B6: 524,
+ 0x96C1: 525,
+ 0x96D7: 526,
+ 0x96E1: 527,
+ 0x96E5: 528,
+ 0x96E9: 529,
+ 0x96F3: 530,
+ 0x96F5: 531,
+ 0x96F7: 532,
+ 0x9741: 533,
+ 0x9745: 534,
+ 0x9749: 535,
+ 0x9751: 536,
+ 0x9757: 537,
+ 0x9761: 538,
+ 0x9762: 539,
+ 0x9765: 540,
+ 0x9768: 541,
+ 0x9769: 542,
+ 0x976B: 543,
+ 0x9771: 544,
+ 0x9773: 545,
+ 0x9775: 546,
+ 0x9777: 547,
+ 0x9781: 548,
+ 0x97A1: 549,
+ 0x97A2: 550,
+ 0x97A5: 551,
+ 0x97A8: 552,
+ 0x97A9: 553,
+ 0x97B1: 554,
+ 0x97B3: 555,
+ 0x97B5: 556,
+ 0x97B6: 557,
+ 0x97B7: 558,
+ 0x97B8: 559,
+ 0x9861: 560,
+ 0x9862: 561,
+ 0x9865: 562,
+ 0x9869: 563,
+ 0x9871: 564,
+ 0x9873: 565,
+ 0x9875: 566,
+ 0x9876: 567,
+ 0x9877: 568,
+ 0x987D: 569,
+ 0x9881: 570,
+ 0x9882: 571,
+ 0x9885: 572,
+ 0x9889: 573,
+ 0x9891: 574,
+ 0x9893: 575,
+ 0x9895: 576,
+ 0x9896: 577,
+ 0x9897: 578,
+ 0x98E1: 579,
+ 0x98E2: 580,
+ 0x98E5: 581,
+ 0x98E9: 582,
+ 0x98EB: 583,
+ 0x98EC: 584,
+ 0x98F1: 585,
+ 0x98F3: 586,
+ 0x98F5: 587,
+ 0x98F6: 588,
+ 0x98F7: 589,
+ 0x98FD: 590,
+ 0x9941: 591,
+ 0x9942: 592,
+ 0x9945: 593,
+ 0x9949: 594,
+ 0x9951: 595,
+ 0x9953: 596,
+ 0x9955: 597,
+ 0x9956: 598,
+ 0x9957: 599,
+ 0x9961: 600,
+ 0x9976: 601,
+ 0x99A1: 602,
+ 0x99A2: 603,
+ 0x99A5: 604,
+ 0x99A9: 605,
+ 0x99B7: 606,
+ 0x99C1: 607,
+ 0x99C9: 608,
+ 0x99E1: 609,
+ 0x9A41: 610,
+ 0x9A45: 611,
+ 0x9A81: 612,
+ 0x9A82: 613,
+ 0x9A85: 614,
+ 0x9A89: 615,
+ 0x9A90: 616,
+ 0x9A91: 617,
+ 0x9A97: 618,
+ 0x9AC1: 619,
+ 0x9AE1: 620,
+ 0x9AE5: 621,
+ 0x9AE9: 622,
+ 0x9AF1: 623,
+ 0x9AF3: 624,
+ 0x9AF7: 625,
+ 0x9B61: 626,
+ 0x9B62: 627,
+ 0x9B65: 628,
+ 0x9B68: 629,
+ 0x9B69: 630,
+ 0x9B71: 631,
+ 0x9B73: 632,
+ 0x9B75: 633,
+ 0x9B81: 634,
+ 0x9B85: 635,
+ 0x9B89: 636,
+ 0x9B91: 637,
+ 0x9B93: 638,
+ 0x9BA1: 639,
+ 0x9BA5: 640,
+ 0x9BA9: 641,
+ 0x9BB1: 642,
+ 0x9BB3: 643,
+ 0x9BB5: 644,
+ 0x9BB7: 645,
+ 0x9C61: 646,
+ 0x9C62: 647,
+ 0x9C65: 648,
+ 0x9C69: 649,
+ 0x9C71: 650,
+ 0x9C73: 651,
+ 0x9C75: 652,
+ 0x9C76: 653,
+ 0x9C77: 654,
+ 0x9C78: 655,
+ 0x9C7C: 656,
+ 0x9C7D: 657,
+ 0x9C81: 658,
+ 0x9C82: 659,
+ 0x9C85: 660,
+ 0x9C89: 661,
+ 0x9C91: 662,
+ 0x9C93: 663,
+ 0x9C95: 664,
+ 0x9C96: 665,
+ 0x9C97: 666,
+ 0x9CA1: 667,
+ 0x9CA2: 668,
+ 0x9CA5: 669,
+ 0x9CB5: 670,
+ 0x9CB7: 671,
+ 0x9CE1: 672,
+ 0x9CE2: 673,
+ 0x9CE5: 674,
+ 0x9CE9: 675,
+ 0x9CF1: 676,
+ 0x9CF3: 677,
+ 0x9CF5: 678,
+ 0x9CF6: 679,
+ 0x9CF7: 680,
+ 0x9CFD: 681,
+ 0x9D41: 682,
+ 0x9D42: 683,
+ 0x9D45: 684,
+ 0x9D49: 685,
+ 0x9D51: 686,
+ 0x9D53: 687,
+ 0x9D55: 688,
+ 0x9D57: 689,
+ 0x9D61: 690,
+ 0x9D62: 691,
+ 0x9D65: 692,
+ 0x9D69: 693,
+ 0x9D71: 694,
+ 0x9D73: 695,
+ 0x9D75: 696,
+ 0x9D76: 697,
+ 0x9D77: 698,
+ 0x9D81: 699,
+ 0x9D85: 700,
+ 0x9D93: 701,
+ 0x9D95: 702,
+ 0x9DA1: 703,
+ 0x9DA2: 704,
+ 0x9DA5: 705,
+ 0x9DA9: 706,
+ 0x9DB1: 707,
+ 0x9DB3: 708,
+ 0x9DB5: 709,
+ 0x9DB7: 710,
+ 0x9DC1: 711,
+ 0x9DC5: 712,
+ 0x9DD7: 713,
+ 0x9DF6: 714,
+ 0x9E41: 715,
+ 0x9E45: 716,
+ 0x9E49: 717,
+ 0x9E51: 718,
+ 0x9E53: 719,
+ 0x9E55: 720,
+ 0x9E57: 721,
+ 0x9E61: 722,
+ 0x9E65: 723,
+ 0x9E69: 724,
+ 0x9E73: 725,
+ 0x9E75: 726,
+ 0x9E77: 727,
+ 0x9E81: 728,
+ 0x9E82: 729,
+ 0x9E85: 730,
+ 0x9E89: 731,
+ 0x9E91: 732,
+ 0x9E93: 733,
+ 0x9E95: 734,
+ 0x9E97: 735,
+ 0x9EA1: 736,
+ 0x9EB6: 737,
+ 0x9EC1: 738,
+ 0x9EE1: 739,
+ 0x9EE2: 740,
+ 0x9EE5: 741,
+ 0x9EE9: 742,
+ 0x9EF1: 743,
+ 0x9EF5: 744,
+ 0x9EF7: 745,
+ 0x9F41: 746,
+ 0x9F42: 747,
+ 0x9F45: 748,
+ 0x9F49: 749,
+ 0x9F51: 750,
+ 0x9F53: 751,
+ 0x9F55: 752,
+ 0x9F57: 753,
+ 0x9F61: 754,
+ 0x9F62: 755,
+ 0x9F65: 756,
+ 0x9F69: 757,
+ 0x9F71: 758,
+ 0x9F73: 759,
+ 0x9F75: 760,
+ 0x9F77: 761,
+ 0x9F78: 762,
+ 0x9F7B: 763,
+ 0x9F7C: 764,
+ 0x9FA1: 765,
+ 0x9FA2: 766,
+ 0x9FA5: 767,
+ 0x9FA9: 768,
+ 0x9FB1: 769,
+ 0x9FB3: 770,
+ 0x9FB5: 771,
+ 0x9FB7: 772,
+ 0xA061: 773,
+ 0xA062: 774,
+ 0xA065: 775,
+ 0xA067: 776,
+ 0xA068: 777,
+ 0xA069: 778,
+ 0xA06A: 779,
+ 0xA06B: 780,
+ 0xA071: 781,
+ 0xA073: 782,
+ 0xA075: 783,
+ 0xA077: 784,
+ 0xA078: 785,
+ 0xA07B: 786,
+ 0xA07D: 787,
+ 0xA081: 788,
+ 0xA082: 789,
+ 0xA085: 790,
+ 0xA089: 791,
+ 0xA091: 792,
+ 0xA093: 793,
+ 0xA095: 794,
+ 0xA096: 795,
+ 0xA097: 796,
+ 0xA098: 797,
+ 0xA0A1: 798,
+ 0xA0A2: 799,
+ 0xA0A9: 800,
+ 0xA0B7: 801,
+ 0xA0E1: 802,
+ 0xA0E2: 803,
+ 0xA0E5: 804,
+ 0xA0E9: 805,
+ 0xA0EB: 806,
+ 0xA0F1: 807,
+ 0xA0F3: 808,
+ 0xA0F5: 809,
+ 0xA0F7: 810,
+ 0xA0F8: 811,
+ 0xA0FD: 812,
+ 0xA141: 813,
+ 0xA142: 814,
+ 0xA145: 815,
+ 0xA149: 816,
+ 0xA151: 817,
+ 0xA153: 818,
+ 0xA155: 819,
+ 0xA156: 820,
+ 0xA157: 821,
+ 0xA161: 822,
+ 0xA162: 823,
+ 0xA165: 824,
+ 0xA169: 825,
+ 0xA175: 826,
+ 0xA176: 827,
+ 0xA177: 828,
+ 0xA179: 829,
+ 0xA181: 830,
+ 0xA1A1: 831,
+ 0xA1A2: 832,
+ 0xA1A4: 833,
+ 0xA1A5: 834,
+ 0xA1A9: 835,
+ 0xA1AB: 836,
+ 0xA1B1: 837,
+ 0xA1B3: 838,
+ 0xA1B5: 839,
+ 0xA1B7: 840,
+ 0xA1C1: 841,
+ 0xA1C5: 842,
+ 0xA1D6: 843,
+ 0xA1D7: 844,
+ 0xA241: 845,
+ 0xA245: 846,
+ 0xA249: 847,
+ 0xA253: 848,
+ 0xA255: 849,
+ 0xA257: 850,
+ 0xA261: 851,
+ 0xA265: 852,
+ 0xA269: 853,
+ 0xA273: 854,
+ 0xA275: 855,
+ 0xA281: 856,
+ 0xA282: 857,
+ 0xA283: 858,
+ 0xA285: 859,
+ 0xA288: 860,
+ 0xA289: 861,
+ 0xA28A: 862,
+ 0xA28B: 863,
+ 0xA291: 864,
+ 0xA293: 865,
+ 0xA295: 866,
+ 0xA297: 867,
+ 0xA29B: 868,
+ 0xA29D: 869,
+ 0xA2A1: 870,
+ 0xA2A5: 871,
+ 0xA2A9: 872,
+ 0xA2B3: 873,
+ 0xA2B5: 874,
+ 0xA2C1: 875,
+ 0xA2E1: 876,
+ 0xA2E5: 877,
+ 0xA2E9: 878,
+ 0xA341: 879,
+ 0xA345: 880,
+ 0xA349: 881,
+ 0xA351: 882,
+ 0xA355: 883,
+ 0xA361: 884,
+ 0xA365: 885,
+ 0xA369: 886,
+ 0xA371: 887,
+ 0xA375: 888,
+ 0xA3A1: 889,
+ 0xA3A2: 890,
+ 0xA3A5: 891,
+ 0xA3A8: 892,
+ 0xA3A9: 893,
+ 0xA3AB: 894,
+ 0xA3B1: 895,
+ 0xA3B3: 896,
+ 0xA3B5: 897,
+ 0xA3B6: 898,
+ 0xA3B7: 899,
+ 0xA3B9: 900,
+ 0xA3BB: 901,
+ 0xA461: 902,
+ 0xA462: 903,
+ 0xA463: 904,
+ 0xA464: 905,
+ 0xA465: 906,
+ 0xA468: 907,
+ 0xA469: 908,
+ 0xA46A: 909,
+ 0xA46B: 910,
+ 0xA46C: 911,
+ 0xA471: 912,
+ 0xA473: 913,
+ 0xA475: 914,
+ 0xA477: 915,
+ 0xA47B: 916,
+ 0xA481: 917,
+ 0xA482: 918,
+ 0xA485: 919,
+ 0xA489: 920,
+ 0xA491: 921,
+ 0xA493: 922,
+ 0xA495: 923,
+ 0xA496: 924,
+ 0xA497: 925,
+ 0xA49B: 926,
+ 0xA4A1: 927,
+ 0xA4A2: 928,
+ 0xA4A5: 929,
+ 0xA4B3: 930,
+ 0xA4E1: 931,
+ 0xA4E2: 932,
+ 0xA4E5: 933,
+ 0xA4E8: 934,
+ 0xA4E9: 935,
+ 0xA4EB: 936,
+ 0xA4F1: 937,
+ 0xA4F3: 938,
+ 0xA4F5: 939,
+ 0xA4F7: 940,
+ 0xA4F8: 941,
+ 0xA541: 942,
+ 0xA542: 943,
+ 0xA545: 944,
+ 0xA548: 945,
+ 0xA549: 946,
+ 0xA551: 947,
+ 0xA553: 948,
+ 0xA555: 949,
+ 0xA556: 950,
+ 0xA557: 951,
+ 0xA561: 952,
+ 0xA562: 953,
+ 0xA565: 954,
+ 0xA569: 955,
+ 0xA573: 956,
+ 0xA575: 957,
+ 0xA576: 958,
+ 0xA577: 959,
+ 0xA57B: 960,
+ 0xA581: 961,
+ 0xA585: 962,
+ 0xA5A1: 963,
+ 0xA5A2: 964,
+ 0xA5A3: 965,
+ 0xA5A5: 966,
+ 0xA5A9: 967,
+ 0xA5B1: 968,
+ 0xA5B3: 969,
+ 0xA5B5: 970,
+ 0xA5B7: 971,
+ 0xA5C1: 972,
+ 0xA5C5: 973,
+ 0xA5D6: 974,
+ 0xA5E1: 975,
+ 0xA5F6: 976,
+ 0xA641: 977,
+ 0xA642: 978,
+ 0xA645: 979,
+ 0xA649: 980,
+ 0xA651: 981,
+ 0xA653: 982,
+ 0xA661: 983,
+ 0xA665: 984,
+ 0xA681: 985,
+ 0xA682: 986,
+ 0xA685: 987,
+ 0xA688: 988,
+ 0xA689: 989,
+ 0xA68A: 990,
+ 0xA68B: 991,
+ 0xA691: 992,
+ 0xA693: 993,
+ 0xA695: 994,
+ 0xA697: 995,
+ 0xA69B: 996,
+ 0xA69C: 997,
+ 0xA6A1: 998,
+ 0xA6A9: 999,
+ 0xA6B6: 1000,
+ 0xA6C1: 1001,
+ 0xA6E1: 1002,
+ 0xA6E2: 1003,
+ 0xA6E5: 1004,
+ 0xA6E9: 1005,
+ 0xA6F7: 1006,
+ 0xA741: 1007,
+ 0xA745: 1008,
+ 0xA749: 1009,
+ 0xA751: 1010,
+ 0xA755: 1011,
+ 0xA757: 1012,
+ 0xA761: 1013,
+ 0xA762: 1014,
+ 0xA765: 1015,
+ 0xA769: 1016,
+ 0xA771: 1017,
+ 0xA773: 1018,
+ 0xA775: 1019,
+ 0xA7A1: 1020,
+ 0xA7A2: 1021,
+ 0xA7A5: 1022,
+ 0xA7A9: 1023,
+ 0xA7AB: 1024,
+ 0xA7B1: 1025,
+ 0xA7B3: 1026,
+ 0xA7B5: 1027,
+ 0xA7B7: 1028,
+ 0xA7B8: 1029,
+ 0xA7B9: 1030,
+ 0xA861: 1031,
+ 0xA862: 1032,
+ 0xA865: 1033,
+ 0xA869: 1034,
+ 0xA86B: 1035,
+ 0xA871: 1036,
+ 0xA873: 1037,
+ 0xA875: 1038,
+ 0xA876: 1039,
+ 0xA877: 1040,
+ 0xA87D: 1041,
+ 0xA881: 1042,
+ 0xA882: 1043,
+ 0xA885: 1044,
+ 0xA889: 1045,
+ 0xA891: 1046,
+ 0xA893: 1047,
+ 0xA895: 1048,
+ 0xA896: 1049,
+ 0xA897: 1050,
+ 0xA8A1: 1051,
+ 0xA8A2: 1052,
+ 0xA8B1: 1053,
+ 0xA8E1: 1054,
+ 0xA8E2: 1055,
+ 0xA8E5: 1056,
+ 0xA8E8: 1057,
+ 0xA8E9: 1058,
+ 0xA8F1: 1059,
+ 0xA8F5: 1060,
+ 0xA8F6: 1061,
+ 0xA8F7: 1062,
+ 0xA941: 1063,
+ 0xA957: 1064,
+ 0xA961: 1065,
+ 0xA962: 1066,
+ 0xA971: 1067,
+ 0xA973: 1068,
+ 0xA975: 1069,
+ 0xA976: 1070,
+ 0xA977: 1071,
+ 0xA9A1: 1072,
+ 0xA9A2: 1073,
+ 0xA9A5: 1074,
+ 0xA9A9: 1075,
+ 0xA9B1: 1076,
+ 0xA9B3: 1077,
+ 0xA9B7: 1078,
+ 0xAA41: 1079,
+ 0xAA61: 1080,
+ 0xAA77: 1081,
+ 0xAA81: 1082,
+ 0xAA82: 1083,
+ 0xAA85: 1084,
+ 0xAA89: 1085,
+ 0xAA91: 1086,
+ 0xAA95: 1087,
+ 0xAA97: 1088,
+ 0xAB41: 1089,
+ 0xAB57: 1090,
+ 0xAB61: 1091,
+ 0xAB65: 1092,
+ 0xAB69: 1093,
+ 0xAB71: 1094,
+ 0xAB73: 1095,
+ 0xABA1: 1096,
+ 0xABA2: 1097,
+ 0xABA5: 1098,
+ 0xABA9: 1099,
+ 0xABB1: 1100,
+ 0xABB3: 1101,
+ 0xABB5: 1102,
+ 0xABB7: 1103,
+ 0xAC61: 1104,
+ 0xAC62: 1105,
+ 0xAC64: 1106,
+ 0xAC65: 1107,
+ 0xAC68: 1108,
+ 0xAC69: 1109,
+ 0xAC6A: 1110,
+ 0xAC6B: 1111,
+ 0xAC71: 1112,
+ 0xAC73: 1113,
+ 0xAC75: 1114,
+ 0xAC76: 1115,
+ 0xAC77: 1116,
+ 0xAC7B: 1117,
+ 0xAC81: 1118,
+ 0xAC82: 1119,
+ 0xAC85: 1120,
+ 0xAC89: 1121,
+ 0xAC91: 1122,
+ 0xAC93: 1123,
+ 0xAC95: 1124,
+ 0xAC96: 1125,
+ 0xAC97: 1126,
+ 0xACA1: 1127,
+ 0xACA2: 1128,
+ 0xACA5: 1129,
+ 0xACA9: 1130,
+ 0xACB1: 1131,
+ 0xACB3: 1132,
+ 0xACB5: 1133,
+ 0xACB7: 1134,
+ 0xACC1: 1135,
+ 0xACC5: 1136,
+ 0xACC9: 1137,
+ 0xACD1: 1138,
+ 0xACD7: 1139,
+ 0xACE1: 1140,
+ 0xACE2: 1141,
+ 0xACE3: 1142,
+ 0xACE4: 1143,
+ 0xACE5: 1144,
+ 0xACE8: 1145,
+ 0xACE9: 1146,
+ 0xACEB: 1147,
+ 0xACEC: 1148,
+ 0xACF1: 1149,
+ 0xACF3: 1150,
+ 0xACF5: 1151,
+ 0xACF6: 1152,
+ 0xACF7: 1153,
+ 0xACFC: 1154,
+ 0xAD41: 1155,
+ 0xAD42: 1156,
+ 0xAD45: 1157,
+ 0xAD49: 1158,
+ 0xAD51: 1159,
+ 0xAD53: 1160,
+ 0xAD55: 1161,
+ 0xAD56: 1162,
+ 0xAD57: 1163,
+ 0xAD61: 1164,
+ 0xAD62: 1165,
+ 0xAD65: 1166,
+ 0xAD69: 1167,
+ 0xAD71: 1168,
+ 0xAD73: 1169,
+ 0xAD75: 1170,
+ 0xAD76: 1171,
+ 0xAD77: 1172,
+ 0xAD81: 1173,
+ 0xAD85: 1174,
+ 0xAD89: 1175,
+ 0xAD97: 1176,
+ 0xADA1: 1177,
+ 0xADA2: 1178,
+ 0xADA3: 1179,
+ 0xADA5: 1180,
+ 0xADA9: 1181,
+ 0xADAB: 1182,
+ 0xADB1: 1183,
+ 0xADB3: 1184,
+ 0xADB5: 1185,
+ 0xADB7: 1186,
+ 0xADBB: 1187,
+ 0xADC1: 1188,
+ 0xADC2: 1189,
+ 0xADC5: 1190,
+ 0xADC9: 1191,
+ 0xADD7: 1192,
+ 0xADE1: 1193,
+ 0xADE5: 1194,
+ 0xADE9: 1195,
+ 0xADF1: 1196,
+ 0xADF5: 1197,
+ 0xADF6: 1198,
+ 0xAE41: 1199,
+ 0xAE45: 1200,
+ 0xAE49: 1201,
+ 0xAE51: 1202,
+ 0xAE53: 1203,
+ 0xAE55: 1204,
+ 0xAE61: 1205,
+ 0xAE62: 1206,
+ 0xAE65: 1207,
+ 0xAE69: 1208,
+ 0xAE71: 1209,
+ 0xAE73: 1210,
+ 0xAE75: 1211,
+ 0xAE77: 1212,
+ 0xAE81: 1213,
+ 0xAE82: 1214,
+ 0xAE85: 1215,
+ 0xAE88: 1216,
+ 0xAE89: 1217,
+ 0xAE91: 1218,
+ 0xAE93: 1219,
+ 0xAE95: 1220,
+ 0xAE97: 1221,
+ 0xAE99: 1222,
+ 0xAE9B: 1223,
+ 0xAE9C: 1224,
+ 0xAEA1: 1225,
+ 0xAEB6: 1226,
+ 0xAEC1: 1227,
+ 0xAEC2: 1228,
+ 0xAEC5: 1229,
+ 0xAEC9: 1230,
+ 0xAED1: 1231,
+ 0xAED7: 1232,
+ 0xAEE1: 1233,
+ 0xAEE2: 1234,
+ 0xAEE5: 1235,
+ 0xAEE9: 1236,
+ 0xAEF1: 1237,
+ 0xAEF3: 1238,
+ 0xAEF5: 1239,
+ 0xAEF7: 1240,
+ 0xAF41: 1241,
+ 0xAF42: 1242,
+ 0xAF49: 1243,
+ 0xAF51: 1244,
+ 0xAF55: 1245,
+ 0xAF57: 1246,
+ 0xAF61: 1247,
+ 0xAF62: 1248,
+ 0xAF65: 1249,
+ 0xAF69: 1250,
+ 0xAF6A: 1251,
+ 0xAF71: 1252,
+ 0xAF73: 1253,
+ 0xAF75: 1254,
+ 0xAF77: 1255,
+ 0xAFA1: 1256,
+ 0xAFA2: 1257,
+ 0xAFA5: 1258,
+ 0xAFA8: 1259,
+ 0xAFA9: 1260,
+ 0xAFB0: 1261,
+ 0xAFB1: 1262,
+ 0xAFB3: 1263,
+ 0xAFB5: 1264,
+ 0xAFB7: 1265,
+ 0xAFBC: 1266,
+ 0xB061: 1267,
+ 0xB062: 1268,
+ 0xB064: 1269,
+ 0xB065: 1270,
+ 0xB069: 1271,
+ 0xB071: 1272,
+ 0xB073: 1273,
+ 0xB076: 1274,
+ 0xB077: 1275,
+ 0xB07D: 1276,
+ 0xB081: 1277,
+ 0xB082: 1278,
+ 0xB085: 1279,
+ 0xB089: 1280,
+ 0xB091: 1281,
+ 0xB093: 1282,
+ 0xB096: 1283,
+ 0xB097: 1284,
+ 0xB0B7: 1285,
+ 0xB0E1: 1286,
+ 0xB0E2: 1287,
+ 0xB0E5: 1288,
+ 0xB0E9: 1289,
+ 0xB0EB: 1290,
+ 0xB0F1: 1291,
+ 0xB0F3: 1292,
+ 0xB0F6: 1293,
+ 0xB0F7: 1294,
+ 0xB141: 1295,
+ 0xB145: 1296,
+ 0xB149: 1297,
+ 0xB185: 1298,
+ 0xB1A1: 1299,
+ 0xB1A2: 1300,
+ 0xB1A5: 1301,
+ 0xB1A8: 1302,
+ 0xB1A9: 1303,
+ 0xB1AB: 1304,
+ 0xB1B1: 1305,
+ 0xB1B3: 1306,
+ 0xB1B7: 1307,
+ 0xB1C1: 1308,
+ 0xB1C2: 1309,
+ 0xB1C5: 1310,
+ 0xB1D6: 1311,
+ 0xB1E1: 1312,
+ 0xB1F6: 1313,
+ 0xB241: 1314,
+ 0xB245: 1315,
+ 0xB249: 1316,
+ 0xB251: 1317,
+ 0xB253: 1318,
+ 0xB261: 1319,
+ 0xB281: 1320,
+ 0xB282: 1321,
+ 0xB285: 1322,
+ 0xB289: 1323,
+ 0xB291: 1324,
+ 0xB293: 1325,
+ 0xB297: 1326,
+ 0xB2A1: 1327,
+ 0xB2B6: 1328,
+ 0xB2C1: 1329,
+ 0xB2E1: 1330,
+ 0xB2E5: 1331,
+ 0xB357: 1332,
+ 0xB361: 1333,
+ 0xB362: 1334,
+ 0xB365: 1335,
+ 0xB369: 1336,
+ 0xB36B: 1337,
+ 0xB370: 1338,
+ 0xB371: 1339,
+ 0xB373: 1340,
+ 0xB381: 1341,
+ 0xB385: 1342,
+ 0xB389: 1343,
+ 0xB391: 1344,
+ 0xB3A1: 1345,
+ 0xB3A2: 1346,
+ 0xB3A5: 1347,
+ 0xB3A9: 1348,
+ 0xB3B1: 1349,
+ 0xB3B3: 1350,
+ 0xB3B5: 1351,
+ 0xB3B7: 1352,
+ 0xB461: 1353,
+ 0xB462: 1354,
+ 0xB465: 1355,
+ 0xB466: 1356,
+ 0xB467: 1357,
+ 0xB469: 1358,
+ 0xB46A: 1359,
+ 0xB46B: 1360,
+ 0xB470: 1361,
+ 0xB471: 1362,
+ 0xB473: 1363,
+ 0xB475: 1364,
+ 0xB476: 1365,
+ 0xB477: 1366,
+ 0xB47B: 1367,
+ 0xB47C: 1368,
+ 0xB481: 1369,
+ 0xB482: 1370,
+ 0xB485: 1371,
+ 0xB489: 1372,
+ 0xB491: 1373,
+ 0xB493: 1374,
+ 0xB495: 1375,
+ 0xB496: 1376,
+ 0xB497: 1377,
+ 0xB4A1: 1378,
+ 0xB4A2: 1379,
+ 0xB4A5: 1380,
+ 0xB4A9: 1381,
+ 0xB4AC: 1382,
+ 0xB4B1: 1383,
+ 0xB4B3: 1384,
+ 0xB4B5: 1385,
+ 0xB4B7: 1386,
+ 0xB4BB: 1387,
+ 0xB4BD: 1388,
+ 0xB4C1: 1389,
+ 0xB4C5: 1390,
+ 0xB4C9: 1391,
+ 0xB4D3: 1392,
+ 0xB4E1: 1393,
+ 0xB4E2: 1394,
+ 0xB4E5: 1395,
+ 0xB4E6: 1396,
+ 0xB4E8: 1397,
+ 0xB4E9: 1398,
+ 0xB4EA: 1399,
+ 0xB4EB: 1400,
+ 0xB4F1: 1401,
+ 0xB4F3: 1402,
+ 0xB4F4: 1403,
+ 0xB4F5: 1404,
+ 0xB4F6: 1405,
+ 0xB4F7: 1406,
+ 0xB4F8: 1407,
+ 0xB4FA: 1408,
+ 0xB4FC: 1409,
+ 0xB541: 1410,
+ 0xB542: 1411,
+ 0xB545: 1412,
+ 0xB549: 1413,
+ 0xB551: 1414,
+ 0xB553: 1415,
+ 0xB555: 1416,
+ 0xB557: 1417,
+ 0xB561: 1418,
+ 0xB562: 1419,
+ 0xB563: 1420,
+ 0xB565: 1421,
+ 0xB569: 1422,
+ 0xB56B: 1423,
+ 0xB56C: 1424,
+ 0xB571: 1425,
+ 0xB573: 1426,
+ 0xB574: 1427,
+ 0xB575: 1428,
+ 0xB576: 1429,
+ 0xB577: 1430,
+ 0xB57B: 1431,
+ 0xB57C: 1432,
+ 0xB57D: 1433,
+ 0xB581: 1434,
+ 0xB585: 1435,
+ 0xB589: 1436,
+ 0xB591: 1437,
+ 0xB593: 1438,
+ 0xB595: 1439,
+ 0xB596: 1440,
+ 0xB5A1: 1441,
+ 0xB5A2: 1442,
+ 0xB5A5: 1443,
+ 0xB5A9: 1444,
+ 0xB5AA: 1445,
+ 0xB5AB: 1446,
+ 0xB5AD: 1447,
+ 0xB5B0: 1448,
+ 0xB5B1: 1449,
+ 0xB5B3: 1450,
+ 0xB5B5: 1451,
+ 0xB5B7: 1452,
+ 0xB5B9: 1453,
+ 0xB5C1: 1454,
+ 0xB5C2: 1455,
+ 0xB5C5: 1456,
+ 0xB5C9: 1457,
+ 0xB5D1: 1458,
+ 0xB5D3: 1459,
+ 0xB5D5: 1460,
+ 0xB5D6: 1461,
+ 0xB5D7: 1462,
+ 0xB5E1: 1463,
+ 0xB5E2: 1464,
+ 0xB5E5: 1465,
+ 0xB5F1: 1466,
+ 0xB5F5: 1467,
+ 0xB5F7: 1468,
+ 0xB641: 1469,
+ 0xB642: 1470,
+ 0xB645: 1471,
+ 0xB649: 1472,
+ 0xB651: 1473,
+ 0xB653: 1474,
+ 0xB655: 1475,
+ 0xB657: 1476,
+ 0xB661: 1477,
+ 0xB662: 1478,
+ 0xB665: 1479,
+ 0xB669: 1480,
+ 0xB671: 1481,
+ 0xB673: 1482,
+ 0xB675: 1483,
+ 0xB677: 1484,
+ 0xB681: 1485,
+ 0xB682: 1486,
+ 0xB685: 1487,
+ 0xB689: 1488,
+ 0xB68A: 1489,
+ 0xB68B: 1490,
+ 0xB691: 1491,
+ 0xB693: 1492,
+ 0xB695: 1493,
+ 0xB697: 1494,
+ 0xB6A1: 1495,
+ 0xB6A2: 1496,
+ 0xB6A5: 1497,
+ 0xB6A9: 1498,
+ 0xB6B1: 1499,
+ 0xB6B3: 1500,
+ 0xB6B6: 1501,
+ 0xB6B7: 1502,
+ 0xB6C1: 1503,
+ 0xB6C2: 1504,
+ 0xB6C5: 1505,
+ 0xB6C9: 1506,
+ 0xB6D1: 1507,
+ 0xB6D3: 1508,
+ 0xB6D7: 1509,
+ 0xB6E1: 1510,
+ 0xB6E2: 1511,
+ 0xB6E5: 1512,
+ 0xB6E9: 1513,
+ 0xB6F1: 1514,
+ 0xB6F3: 1515,
+ 0xB6F5: 1516,
+ 0xB6F7: 1517,
+ 0xB741: 1518,
+ 0xB742: 1519,
+ 0xB745: 1520,
+ 0xB749: 1521,
+ 0xB751: 1522,
+ 0xB753: 1523,
+ 0xB755: 1524,
+ 0xB757: 1525,
+ 0xB759: 1526,
+ 0xB761: 1527,
+ 0xB762: 1528,
+ 0xB765: 1529,
+ 0xB769: 1530,
+ 0xB76F: 1531,
+ 0xB771: 1532,
+ 0xB773: 1533,
+ 0xB775: 1534,
+ 0xB777: 1535,
+ 0xB778: 1536,
+ 0xB779: 1537,
+ 0xB77A: 1538,
+ 0xB77B: 1539,
+ 0xB77C: 1540,
+ 0xB77D: 1541,
+ 0xB781: 1542,
+ 0xB785: 1543,
+ 0xB789: 1544,
+ 0xB791: 1545,
+ 0xB795: 1546,
+ 0xB7A1: 1547,
+ 0xB7A2: 1548,
+ 0xB7A5: 1549,
+ 0xB7A9: 1550,
+ 0xB7AA: 1551,
+ 0xB7AB: 1552,
+ 0xB7B0: 1553,
+ 0xB7B1: 1554,
+ 0xB7B3: 1555,
+ 0xB7B5: 1556,
+ 0xB7B6: 1557,
+ 0xB7B7: 1558,
+ 0xB7B8: 1559,
+ 0xB7BC: 1560,
+ 0xB861: 1561,
+ 0xB862: 1562,
+ 0xB865: 1563,
+ 0xB867: 1564,
+ 0xB868: 1565,
+ 0xB869: 1566,
+ 0xB86B: 1567,
+ 0xB871: 1568,
+ 0xB873: 1569,
+ 0xB875: 1570,
+ 0xB876: 1571,
+ 0xB877: 1572,
+ 0xB878: 1573,
+ 0xB881: 1574,
+ 0xB882: 1575,
+ 0xB885: 1576,
+ 0xB889: 1577,
+ 0xB891: 1578,
+ 0xB893: 1579,
+ 0xB895: 1580,
+ 0xB896: 1581,
+ 0xB897: 1582,
+ 0xB8A1: 1583,
+ 0xB8A2: 1584,
+ 0xB8A5: 1585,
+ 0xB8A7: 1586,
+ 0xB8A9: 1587,
+ 0xB8B1: 1588,
+ 0xB8B7: 1589,
+ 0xB8C1: 1590,
+ 0xB8C5: 1591,
+ 0xB8C9: 1592,
+ 0xB8E1: 1593,
+ 0xB8E2: 1594,
+ 0xB8E5: 1595,
+ 0xB8E9: 1596,
+ 0xB8EB: 1597,
+ 0xB8F1: 1598,
+ 0xB8F3: 1599,
+ 0xB8F5: 1600,
+ 0xB8F7: 1601,
+ 0xB8F8: 1602,
+ 0xB941: 1603,
+ 0xB942: 1604,
+ 0xB945: 1605,
+ 0xB949: 1606,
+ 0xB951: 1607,
+ 0xB953: 1608,
+ 0xB955: 1609,
+ 0xB957: 1610,
+ 0xB961: 1611,
+ 0xB965: 1612,
+ 0xB969: 1613,
+ 0xB971: 1614,
+ 0xB973: 1615,
+ 0xB976: 1616,
+ 0xB977: 1617,
+ 0xB981: 1618,
+ 0xB9A1: 1619,
+ 0xB9A2: 1620,
+ 0xB9A5: 1621,
+ 0xB9A9: 1622,
+ 0xB9AB: 1623,
+ 0xB9B1: 1624,
+ 0xB9B3: 1625,
+ 0xB9B5: 1626,
+ 0xB9B7: 1627,
+ 0xB9B8: 1628,
+ 0xB9B9: 1629,
+ 0xB9BD: 1630,
+ 0xB9C1: 1631,
+ 0xB9C2: 1632,
+ 0xB9C9: 1633,
+ 0xB9D3: 1634,
+ 0xB9D5: 1635,
+ 0xB9D7: 1636,
+ 0xB9E1: 1637,
+ 0xB9F6: 1638,
+ 0xB9F7: 1639,
+ 0xBA41: 1640,
+ 0xBA45: 1641,
+ 0xBA49: 1642,
+ 0xBA51: 1643,
+ 0xBA53: 1644,
+ 0xBA55: 1645,
+ 0xBA57: 1646,
+ 0xBA61: 1647,
+ 0xBA62: 1648,
+ 0xBA65: 1649,
+ 0xBA77: 1650,
+ 0xBA81: 1651,
+ 0xBA82: 1652,
+ 0xBA85: 1653,
+ 0xBA89: 1654,
+ 0xBA8A: 1655,
+ 0xBA8B: 1656,
+ 0xBA91: 1657,
+ 0xBA93: 1658,
+ 0xBA95: 1659,
+ 0xBA97: 1660,
+ 0xBAA1: 1661,
+ 0xBAB6: 1662,
+ 0xBAC1: 1663,
+ 0xBAE1: 1664,
+ 0xBAE2: 1665,
+ 0xBAE5: 1666,
+ 0xBAE9: 1667,
+ 0xBAF1: 1668,
+ 0xBAF3: 1669,
+ 0xBAF5: 1670,
+ 0xBB41: 1671,
+ 0xBB45: 1672,
+ 0xBB49: 1673,
+ 0xBB51: 1674,
+ 0xBB61: 1675,
+ 0xBB62: 1676,
+ 0xBB65: 1677,
+ 0xBB69: 1678,
+ 0xBB71: 1679,
+ 0xBB73: 1680,
+ 0xBB75: 1681,
+ 0xBB77: 1682,
+ 0xBBA1: 1683,
+ 0xBBA2: 1684,
+ 0xBBA5: 1685,
+ 0xBBA8: 1686,
+ 0xBBA9: 1687,
+ 0xBBAB: 1688,
+ 0xBBB1: 1689,
+ 0xBBB3: 1690,
+ 0xBBB5: 1691,
+ 0xBBB7: 1692,
+ 0xBBB8: 1693,
+ 0xBBBB: 1694,
+ 0xBBBC: 1695,
+ 0xBC61: 1696,
+ 0xBC62: 1697,
+ 0xBC65: 1698,
+ 0xBC67: 1699,
+ 0xBC69: 1700,
+ 0xBC6C: 1701,
+ 0xBC71: 1702,
+ 0xBC73: 1703,
+ 0xBC75: 1704,
+ 0xBC76: 1705,
+ 0xBC77: 1706,
+ 0xBC81: 1707,
+ 0xBC82: 1708,
+ 0xBC85: 1709,
+ 0xBC89: 1710,
+ 0xBC91: 1711,
+ 0xBC93: 1712,
+ 0xBC95: 1713,
+ 0xBC96: 1714,
+ 0xBC97: 1715,
+ 0xBCA1: 1716,
+ 0xBCA5: 1717,
+ 0xBCB7: 1718,
+ 0xBCE1: 1719,
+ 0xBCE2: 1720,
+ 0xBCE5: 1721,
+ 0xBCE9: 1722,
+ 0xBCF1: 1723,
+ 0xBCF3: 1724,
+ 0xBCF5: 1725,
+ 0xBCF6: 1726,
+ 0xBCF7: 1727,
+ 0xBD41: 1728,
+ 0xBD57: 1729,
+ 0xBD61: 1730,
+ 0xBD76: 1731,
+ 0xBDA1: 1732,
+ 0xBDA2: 1733,
+ 0xBDA5: 1734,
+ 0xBDA9: 1735,
+ 0xBDB1: 1736,
+ 0xBDB3: 1737,
+ 0xBDB5: 1738,
+ 0xBDB7: 1739,
+ 0xBDB9: 1740,
+ 0xBDC1: 1741,
+ 0xBDC2: 1742,
+ 0xBDC9: 1743,
+ 0xBDD6: 1744,
+ 0xBDE1: 1745,
+ 0xBDF6: 1746,
+ 0xBE41: 1747,
+ 0xBE45: 1748,
+ 0xBE49: 1749,
+ 0xBE51: 1750,
+ 0xBE53: 1751,
+ 0xBE77: 1752,
+ 0xBE81: 1753,
+ 0xBE82: 1754,
+ 0xBE85: 1755,
+ 0xBE89: 1756,
+ 0xBE91: 1757,
+ 0xBE93: 1758,
+ 0xBE97: 1759,
+ 0xBEA1: 1760,
+ 0xBEB6: 1761,
+ 0xBEB7: 1762,
+ 0xBEE1: 1763,
+ 0xBF41: 1764,
+ 0xBF61: 1765,
+ 0xBF71: 1766,
+ 0xBF75: 1767,
+ 0xBF77: 1768,
+ 0xBFA1: 1769,
+ 0xBFA2: 1770,
+ 0xBFA5: 1771,
+ 0xBFA9: 1772,
+ 0xBFB1: 1773,
+ 0xBFB3: 1774,
+ 0xBFB7: 1775,
+ 0xBFB8: 1776,
+ 0xBFBD: 1777,
+ 0xC061: 1778,
+ 0xC062: 1779,
+ 0xC065: 1780,
+ 0xC067: 1781,
+ 0xC069: 1782,
+ 0xC071: 1783,
+ 0xC073: 1784,
+ 0xC075: 1785,
+ 0xC076: 1786,
+ 0xC077: 1787,
+ 0xC078: 1788,
+ 0xC081: 1789,
+ 0xC082: 1790,
+ 0xC085: 1791,
+ 0xC089: 1792,
+ 0xC091: 1793,
+ 0xC093: 1794,
+ 0xC095: 1795,
+ 0xC096: 1796,
+ 0xC097: 1797,
+ 0xC0A1: 1798,
+ 0xC0A5: 1799,
+ 0xC0A7: 1800,
+ 0xC0A9: 1801,
+ 0xC0B1: 1802,
+ 0xC0B7: 1803,
+ 0xC0E1: 1804,
+ 0xC0E2: 1805,
+ 0xC0E5: 1806,
+ 0xC0E9: 1807,
+ 0xC0F1: 1808,
+ 0xC0F3: 1809,
+ 0xC0F5: 1810,
+ 0xC0F6: 1811,
+ 0xC0F7: 1812,
+ 0xC141: 1813,
+ 0xC142: 1814,
+ 0xC145: 1815,
+ 0xC149: 1816,
+ 0xC151: 1817,
+ 0xC153: 1818,
+ 0xC155: 1819,
+ 0xC157: 1820,
+ 0xC161: 1821,
+ 0xC165: 1822,
+ 0xC176: 1823,
+ 0xC181: 1824,
+ 0xC185: 1825,
+ 0xC197: 1826,
+ 0xC1A1: 1827,
+ 0xC1A2: 1828,
+ 0xC1A5: 1829,
+ 0xC1A9: 1830,
+ 0xC1B1: 1831,
+ 0xC1B3: 1832,
+ 0xC1B5: 1833,
+ 0xC1B7: 1834,
+ 0xC1C1: 1835,
+ 0xC1C5: 1836,
+ 0xC1C9: 1837,
+ 0xC1D7: 1838,
+ 0xC241: 1839,
+ 0xC245: 1840,
+ 0xC249: 1841,
+ 0xC251: 1842,
+ 0xC253: 1843,
+ 0xC255: 1844,
+ 0xC257: 1845,
+ 0xC261: 1846,
+ 0xC271: 1847,
+ 0xC281: 1848,
+ 0xC282: 1849,
+ 0xC285: 1850,
+ 0xC289: 1851,
+ 0xC291: 1852,
+ 0xC293: 1853,
+ 0xC295: 1854,
+ 0xC297: 1855,
+ 0xC2A1: 1856,
+ 0xC2B6: 1857,
+ 0xC2C1: 1858,
+ 0xC2C5: 1859,
+ 0xC2E1: 1860,
+ 0xC2E5: 1861,
+ 0xC2E9: 1862,
+ 0xC2F1: 1863,
+ 0xC2F3: 1864,
+ 0xC2F5: 1865,
+ 0xC2F7: 1866,
+ 0xC341: 1867,
+ 0xC345: 1868,
+ 0xC349: 1869,
+ 0xC351: 1870,
+ 0xC357: 1871,
+ 0xC361: 1872,
+ 0xC362: 1873,
+ 0xC365: 1874,
+ 0xC369: 1875,
+ 0xC371: 1876,
+ 0xC373: 1877,
+ 0xC375: 1878,
+ 0xC377: 1879,
+ 0xC3A1: 1880,
+ 0xC3A2: 1881,
+ 0xC3A5: 1882,
+ 0xC3A8: 1883,
+ 0xC3A9: 1884,
+ 0xC3AA: 1885,
+ 0xC3B1: 1886,
+ 0xC3B3: 1887,
+ 0xC3B5: 1888,
+ 0xC3B7: 1889,
+ 0xC461: 1890,
+ 0xC462: 1891,
+ 0xC465: 1892,
+ 0xC469: 1893,
+ 0xC471: 1894,
+ 0xC473: 1895,
+ 0xC475: 1896,
+ 0xC477: 1897,
+ 0xC481: 1898,
+ 0xC482: 1899,
+ 0xC485: 1900,
+ 0xC489: 1901,
+ 0xC491: 1902,
+ 0xC493: 1903,
+ 0xC495: 1904,
+ 0xC496: 1905,
+ 0xC497: 1906,
+ 0xC4A1: 1907,
+ 0xC4A2: 1908,
+ 0xC4B7: 1909,
+ 0xC4E1: 1910,
+ 0xC4E2: 1911,
+ 0xC4E5: 1912,
+ 0xC4E8: 1913,
+ 0xC4E9: 1914,
+ 0xC4F1: 1915,
+ 0xC4F3: 1916,
+ 0xC4F5: 1917,
+ 0xC4F6: 1918,
+ 0xC4F7: 1919,
+ 0xC541: 1920,
+ 0xC542: 1921,
+ 0xC545: 1922,
+ 0xC549: 1923,
+ 0xC551: 1924,
+ 0xC553: 1925,
+ 0xC555: 1926,
+ 0xC557: 1927,
+ 0xC561: 1928,
+ 0xC565: 1929,
+ 0xC569: 1930,
+ 0xC571: 1931,
+ 0xC573: 1932,
+ 0xC575: 1933,
+ 0xC576: 1934,
+ 0xC577: 1935,
+ 0xC581: 1936,
+ 0xC5A1: 1937,
+ 0xC5A2: 1938,
+ 0xC5A5: 1939,
+ 0xC5A9: 1940,
+ 0xC5B1: 1941,
+ 0xC5B3: 1942,
+ 0xC5B5: 1943,
+ 0xC5B7: 1944,
+ 0xC5C1: 1945,
+ 0xC5C2: 1946,
+ 0xC5C5: 1947,
+ 0xC5C9: 1948,
+ 0xC5D1: 1949,
+ 0xC5D7: 1950,
+ 0xC5E1: 1951,
+ 0xC5F7: 1952,
+ 0xC641: 1953,
+ 0xC649: 1954,
+ 0xC661: 1955,
+ 0xC681: 1956,
+ 0xC682: 1957,
+ 0xC685: 1958,
+ 0xC689: 1959,
+ 0xC691: 1960,
+ 0xC693: 1961,
+ 0xC695: 1962,
+ 0xC697: 1963,
+ 0xC6A1: 1964,
+ 0xC6A5: 1965,
+ 0xC6A9: 1966,
+ 0xC6B7: 1967,
+ 0xC6C1: 1968,
+ 0xC6D7: 1969,
+ 0xC6E1: 1970,
+ 0xC6E2: 1971,
+ 0xC6E5: 1972,
+ 0xC6E9: 1973,
+ 0xC6F1: 1974,
+ 0xC6F3: 1975,
+ 0xC6F5: 1976,
+ 0xC6F7: 1977,
+ 0xC741: 1978,
+ 0xC745: 1979,
+ 0xC749: 1980,
+ 0xC751: 1981,
+ 0xC761: 1982,
+ 0xC762: 1983,
+ 0xC765: 1984,
+ 0xC769: 1985,
+ 0xC771: 1986,
+ 0xC773: 1987,
+ 0xC777: 1988,
+ 0xC7A1: 1989,
+ 0xC7A2: 1990,
+ 0xC7A5: 1991,
+ 0xC7A9: 1992,
+ 0xC7B1: 1993,
+ 0xC7B3: 1994,
+ 0xC7B5: 1995,
+ 0xC7B7: 1996,
+ 0xC861: 1997,
+ 0xC862: 1998,
+ 0xC865: 1999,
+ 0xC869: 2000,
+ 0xC86A: 2001,
+ 0xC871: 2002,
+ 0xC873: 2003,
+ 0xC875: 2004,
+ 0xC876: 2005,
+ 0xC877: 2006,
+ 0xC881: 2007,
+ 0xC882: 2008,
+ 0xC885: 2009,
+ 0xC889: 2010,
+ 0xC891: 2011,
+ 0xC893: 2012,
+ 0xC895: 2013,
+ 0xC896: 2014,
+ 0xC897: 2015,
+ 0xC8A1: 2016,
+ 0xC8B7: 2017,
+ 0xC8E1: 2018,
+ 0xC8E2: 2019,
+ 0xC8E5: 2020,
+ 0xC8E9: 2021,
+ 0xC8EB: 2022,
+ 0xC8F1: 2023,
+ 0xC8F3: 2024,
+ 0xC8F5: 2025,
+ 0xC8F6: 2026,
+ 0xC8F7: 2027,
+ 0xC941: 2028,
+ 0xC942: 2029,
+ 0xC945: 2030,
+ 0xC949: 2031,
+ 0xC951: 2032,
+ 0xC953: 2033,
+ 0xC955: 2034,
+ 0xC957: 2035,
+ 0xC961: 2036,
+ 0xC965: 2037,
+ 0xC976: 2038,
+ 0xC981: 2039,
+ 0xC985: 2040,
+ 0xC9A1: 2041,
+ 0xC9A2: 2042,
+ 0xC9A5: 2043,
+ 0xC9A9: 2044,
+ 0xC9B1: 2045,
+ 0xC9B3: 2046,
+ 0xC9B5: 2047,
+ 0xC9B7: 2048,
+ 0xC9BC: 2049,
+ 0xC9C1: 2050,
+ 0xC9C5: 2051,
+ 0xC9E1: 2052,
+ 0xCA41: 2053,
+ 0xCA45: 2054,
+ 0xCA55: 2055,
+ 0xCA57: 2056,
+ 0xCA61: 2057,
+ 0xCA81: 2058,
+ 0xCA82: 2059,
+ 0xCA85: 2060,
+ 0xCA89: 2061,
+ 0xCA91: 2062,
+ 0xCA93: 2063,
+ 0xCA95: 2064,
+ 0xCA97: 2065,
+ 0xCAA1: 2066,
+ 0xCAB6: 2067,
+ 0xCAC1: 2068,
+ 0xCAE1: 2069,
+ 0xCAE2: 2070,
+ 0xCAE5: 2071,
+ 0xCAE9: 2072,
+ 0xCAF1: 2073,
+ 0xCAF3: 2074,
+ 0xCAF7: 2075,
+ 0xCB41: 2076,
+ 0xCB45: 2077,
+ 0xCB49: 2078,
+ 0xCB51: 2079,
+ 0xCB57: 2080,
+ 0xCB61: 2081,
+ 0xCB62: 2082,
+ 0xCB65: 2083,
+ 0xCB68: 2084,
+ 0xCB69: 2085,
+ 0xCB6B: 2086,
+ 0xCB71: 2087,
+ 0xCB73: 2088,
+ 0xCB75: 2089,
+ 0xCB81: 2090,
+ 0xCB85: 2091,
+ 0xCB89: 2092,
+ 0xCB91: 2093,
+ 0xCB93: 2094,
+ 0xCBA1: 2095,
+ 0xCBA2: 2096,
+ 0xCBA5: 2097,
+ 0xCBA9: 2098,
+ 0xCBB1: 2099,
+ 0xCBB3: 2100,
+ 0xCBB5: 2101,
+ 0xCBB7: 2102,
+ 0xCC61: 2103,
+ 0xCC62: 2104,
+ 0xCC63: 2105,
+ 0xCC65: 2106,
+ 0xCC69: 2107,
+ 0xCC6B: 2108,
+ 0xCC71: 2109,
+ 0xCC73: 2110,
+ 0xCC75: 2111,
+ 0xCC76: 2112,
+ 0xCC77: 2113,
+ 0xCC7B: 2114,
+ 0xCC81: 2115,
+ 0xCC82: 2116,
+ 0xCC85: 2117,
+ 0xCC89: 2118,
+ 0xCC91: 2119,
+ 0xCC93: 2120,
+ 0xCC95: 2121,
+ 0xCC96: 2122,
+ 0xCC97: 2123,
+ 0xCCA1: 2124,
+ 0xCCA2: 2125,
+ 0xCCE1: 2126,
+ 0xCCE2: 2127,
+ 0xCCE5: 2128,
+ 0xCCE9: 2129,
+ 0xCCF1: 2130,
+ 0xCCF3: 2131,
+ 0xCCF5: 2132,
+ 0xCCF6: 2133,
+ 0xCCF7: 2134,
+ 0xCD41: 2135,
+ 0xCD42: 2136,
+ 0xCD45: 2137,
+ 0xCD49: 2138,
+ 0xCD51: 2139,
+ 0xCD53: 2140,
+ 0xCD55: 2141,
+ 0xCD57: 2142,
+ 0xCD61: 2143,
+ 0xCD65: 2144,
+ 0xCD69: 2145,
+ 0xCD71: 2146,
+ 0xCD73: 2147,
+ 0xCD76: 2148,
+ 0xCD77: 2149,
+ 0xCD81: 2150,
+ 0xCD89: 2151,
+ 0xCD93: 2152,
+ 0xCD95: 2153,
+ 0xCDA1: 2154,
+ 0xCDA2: 2155,
+ 0xCDA5: 2156,
+ 0xCDA9: 2157,
+ 0xCDB1: 2158,
+ 0xCDB3: 2159,
+ 0xCDB5: 2160,
+ 0xCDB7: 2161,
+ 0xCDC1: 2162,
+ 0xCDD7: 2163,
+ 0xCE41: 2164,
+ 0xCE45: 2165,
+ 0xCE61: 2166,
+ 0xCE65: 2167,
+ 0xCE69: 2168,
+ 0xCE73: 2169,
+ 0xCE75: 2170,
+ 0xCE81: 2171,
+ 0xCE82: 2172,
+ 0xCE85: 2173,
+ 0xCE88: 2174,
+ 0xCE89: 2175,
+ 0xCE8B: 2176,
+ 0xCE91: 2177,
+ 0xCE93: 2178,
+ 0xCE95: 2179,
+ 0xCE97: 2180,
+ 0xCEA1: 2181,
+ 0xCEB7: 2182,
+ 0xCEE1: 2183,
+ 0xCEE5: 2184,
+ 0xCEE9: 2185,
+ 0xCEF1: 2186,
+ 0xCEF5: 2187,
+ 0xCF41: 2188,
+ 0xCF45: 2189,
+ 0xCF49: 2190,
+ 0xCF51: 2191,
+ 0xCF55: 2192,
+ 0xCF57: 2193,
+ 0xCF61: 2194,
+ 0xCF65: 2195,
+ 0xCF69: 2196,
+ 0xCF71: 2197,
+ 0xCF73: 2198,
+ 0xCF75: 2199,
+ 0xCFA1: 2200,
+ 0xCFA2: 2201,
+ 0xCFA5: 2202,
+ 0xCFA9: 2203,
+ 0xCFB1: 2204,
+ 0xCFB3: 2205,
+ 0xCFB5: 2206,
+ 0xCFB7: 2207,
+ 0xD061: 2208,
+ 0xD062: 2209,
+ 0xD065: 2210,
+ 0xD069: 2211,
+ 0xD06E: 2212,
+ 0xD071: 2213,
+ 0xD073: 2214,
+ 0xD075: 2215,
+ 0xD077: 2216,
+ 0xD081: 2217,
+ 0xD082: 2218,
+ 0xD085: 2219,
+ 0xD089: 2220,
+ 0xD091: 2221,
+ 0xD093: 2222,
+ 0xD095: 2223,
+ 0xD096: 2224,
+ 0xD097: 2225,
+ 0xD0A1: 2226,
+ 0xD0B7: 2227,
+ 0xD0E1: 2228,
+ 0xD0E2: 2229,
+ 0xD0E5: 2230,
+ 0xD0E9: 2231,
+ 0xD0EB: 2232,
+ 0xD0F1: 2233,
+ 0xD0F3: 2234,
+ 0xD0F5: 2235,
+ 0xD0F7: 2236,
+ 0xD141: 2237,
+ 0xD142: 2238,
+ 0xD145: 2239,
+ 0xD149: 2240,
+ 0xD151: 2241,
+ 0xD153: 2242,
+ 0xD155: 2243,
+ 0xD157: 2244,
+ 0xD161: 2245,
+ 0xD162: 2246,
+ 0xD165: 2247,
+ 0xD169: 2248,
+ 0xD171: 2249,
+ 0xD173: 2250,
+ 0xD175: 2251,
+ 0xD176: 2252,
+ 0xD177: 2253,
+ 0xD181: 2254,
+ 0xD185: 2255,
+ 0xD189: 2256,
+ 0xD193: 2257,
+ 0xD1A1: 2258,
+ 0xD1A2: 2259,
+ 0xD1A5: 2260,
+ 0xD1A9: 2261,
+ 0xD1AE: 2262,
+ 0xD1B1: 2263,
+ 0xD1B3: 2264,
+ 0xD1B5: 2265,
+ 0xD1B7: 2266,
+ 0xD1BB: 2267,
+ 0xD1C1: 2268,
+ 0xD1C2: 2269,
+ 0xD1C5: 2270,
+ 0xD1C9: 2271,
+ 0xD1D5: 2272,
+ 0xD1D7: 2273,
+ 0xD1E1: 2274,
+ 0xD1E2: 2275,
+ 0xD1E5: 2276,
+ 0xD1F5: 2277,
+ 0xD1F7: 2278,
+ 0xD241: 2279,
+ 0xD242: 2280,
+ 0xD245: 2281,
+ 0xD249: 2282,
+ 0xD253: 2283,
+ 0xD255: 2284,
+ 0xD257: 2285,
+ 0xD261: 2286,
+ 0xD265: 2287,
+ 0xD269: 2288,
+ 0xD273: 2289,
+ 0xD275: 2290,
+ 0xD281: 2291,
+ 0xD282: 2292,
+ 0xD285: 2293,
+ 0xD289: 2294,
+ 0xD28E: 2295,
+ 0xD291: 2296,
+ 0xD295: 2297,
+ 0xD297: 2298,
+ 0xD2A1: 2299,
+ 0xD2A5: 2300,
+ 0xD2A9: 2301,
+ 0xD2B1: 2302,
+ 0xD2B7: 2303,
+ 0xD2C1: 2304,
+ 0xD2C2: 2305,
+ 0xD2C5: 2306,
+ 0xD2C9: 2307,
+ 0xD2D7: 2308,
+ 0xD2E1: 2309,
+ 0xD2E2: 2310,
+ 0xD2E5: 2311,
+ 0xD2E9: 2312,
+ 0xD2F1: 2313,
+ 0xD2F3: 2314,
+ 0xD2F5: 2315,
+ 0xD2F7: 2316,
+ 0xD341: 2317,
+ 0xD342: 2318,
+ 0xD345: 2319,
+ 0xD349: 2320,
+ 0xD351: 2321,
+ 0xD355: 2322,
+ 0xD357: 2323,
+ 0xD361: 2324,
+ 0xD362: 2325,
+ 0xD365: 2326,
+ 0xD367: 2327,
+ 0xD368: 2328,
+ 0xD369: 2329,
+ 0xD36A: 2330,
+ 0xD371: 2331,
+ 0xD373: 2332,
+ 0xD375: 2333,
+ 0xD377: 2334,
+ 0xD37B: 2335,
+ 0xD381: 2336,
+ 0xD385: 2337,
+ 0xD389: 2338,
+ 0xD391: 2339,
+ 0xD393: 2340,
+ 0xD397: 2341,
+ 0xD3A1: 2342,
+ 0xD3A2: 2343,
+ 0xD3A5: 2344,
+ 0xD3A9: 2345,
+ 0xD3B1: 2346,
+ 0xD3B3: 2347,
+ 0xD3B5: 2348,
+ 0xD3B7: 2349,
+}
diff --git a/third_party/python/pip/pip/_vendor/chardet/johabprober.py b/third_party/python/pip/pip/_vendor/chardet/johabprober.py
new file mode 100644
index 0000000000..d7364ba61e
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/johabprober.py
@@ -0,0 +1,47 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .chardistribution import JOHABDistributionAnalysis
+from .codingstatemachine import CodingStateMachine
+from .mbcharsetprober import MultiByteCharSetProber
+from .mbcssm import JOHAB_SM_MODEL
+
+
+class JOHABProber(MultiByteCharSetProber):
+ def __init__(self) -> None:
+ super().__init__()
+ self.coding_sm = CodingStateMachine(JOHAB_SM_MODEL)
+ self.distribution_analyzer = JOHABDistributionAnalysis()
+ self.reset()
+
+ @property
+ def charset_name(self) -> str:
+ return "Johab"
+
+ @property
+ def language(self) -> str:
+ return "Korean"
diff --git a/third_party/python/pip/pip/_vendor/chardet/jpcntx.py b/third_party/python/pip/pip/_vendor/chardet/jpcntx.py
new file mode 100644
index 0000000000..2f53bdda09
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/jpcntx.py
@@ -0,0 +1,238 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Communicator client code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from typing import List, Tuple, Union
+
+# This is hiragana 2-char sequence table, the number in each cell represents its frequency category
+# fmt: off
+jp2_char_context = (
+ (0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1),
+ (2, 4, 0, 4, 0, 3, 0, 4, 0, 3, 4, 4, 4, 2, 4, 3, 3, 4, 3, 2, 3, 3, 4, 2, 3, 3, 3, 2, 4, 1, 4, 3, 3, 1, 5, 4, 3, 4, 3, 4, 3, 5, 3, 0, 3, 5, 4, 2, 0, 3, 1, 0, 3, 3, 0, 3, 3, 0, 1, 1, 0, 4, 3, 0, 3, 3, 0, 4, 0, 2, 0, 3, 5, 5, 5, 5, 4, 0, 4, 1, 0, 3, 4),
+ (0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2),
+ (0, 4, 0, 5, 0, 5, 0, 4, 0, 4, 5, 4, 4, 3, 5, 3, 5, 1, 5, 3, 4, 3, 4, 4, 3, 4, 3, 3, 4, 3, 5, 4, 4, 3, 5, 5, 3, 5, 5, 5, 3, 5, 5, 3, 4, 5, 5, 3, 1, 3, 2, 0, 3, 4, 0, 4, 2, 0, 4, 2, 1, 5, 3, 2, 3, 5, 0, 4, 0, 2, 0, 5, 4, 4, 5, 4, 5, 0, 4, 0, 0, 4, 4),
+ (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
+ (0, 3, 0, 4, 0, 3, 0, 3, 0, 4, 5, 4, 3, 3, 3, 3, 4, 3, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 4, 4, 4, 4, 5, 3, 4, 4, 3, 4, 5, 5, 4, 5, 5, 1, 4, 5, 4, 3, 0, 3, 3, 1, 3, 3, 0, 4, 4, 0, 3, 3, 1, 5, 3, 3, 3, 5, 0, 4, 0, 3, 0, 4, 4, 3, 4, 3, 3, 0, 4, 1, 1, 3, 4),
+ (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
+ (0, 4, 0, 3, 0, 3, 0, 4, 0, 3, 4, 4, 3, 2, 2, 1, 2, 1, 3, 1, 3, 3, 3, 3, 3, 4, 3, 1, 3, 3, 5, 3, 3, 0, 4, 3, 0, 5, 4, 3, 3, 5, 4, 4, 3, 4, 4, 5, 0, 1, 2, 0, 1, 2, 0, 2, 2, 0, 1, 0, 0, 5, 2, 2, 1, 4, 0, 3, 0, 1, 0, 4, 4, 3, 5, 4, 3, 0, 2, 1, 0, 4, 3),
+ (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
+ (0, 3, 0, 5, 0, 4, 0, 2, 1, 4, 4, 2, 4, 1, 4, 2, 4, 2, 4, 3, 3, 3, 4, 3, 3, 3, 3, 1, 4, 2, 3, 3, 3, 1, 4, 4, 1, 1, 1, 4, 3, 3, 2, 0, 2, 4, 3, 2, 0, 3, 3, 0, 3, 1, 1, 0, 0, 0, 3, 3, 0, 4, 2, 2, 3, 4, 0, 4, 0, 3, 0, 4, 4, 5, 3, 4, 4, 0, 3, 0, 0, 1, 4),
+ (1, 4, 0, 4, 0, 4, 0, 4, 0, 3, 5, 4, 4, 3, 4, 3, 5, 4, 3, 3, 4, 3, 5, 4, 4, 4, 4, 3, 4, 2, 4, 3, 3, 1, 5, 4, 3, 2, 4, 5, 4, 5, 5, 4, 4, 5, 4, 4, 0, 3, 2, 2, 3, 3, 0, 4, 3, 1, 3, 2, 1, 4, 3, 3, 4, 5, 0, 3, 0, 2, 0, 4, 5, 5, 4, 5, 4, 0, 4, 0, 0, 5, 4),
+ (0, 5, 0, 5, 0, 4, 0, 3, 0, 4, 4, 3, 4, 3, 3, 3, 4, 0, 4, 4, 4, 3, 4, 3, 4, 3, 3, 1, 4, 2, 4, 3, 4, 0, 5, 4, 1, 4, 5, 4, 4, 5, 3, 2, 4, 3, 4, 3, 2, 4, 1, 3, 3, 3, 2, 3, 2, 0, 4, 3, 3, 4, 3, 3, 3, 4, 0, 4, 0, 3, 0, 4, 5, 4, 4, 4, 3, 0, 4, 1, 0, 1, 3),
+ (0, 3, 1, 4, 0, 3, 0, 2, 0, 3, 4, 4, 3, 1, 4, 2, 3, 3, 4, 3, 4, 3, 4, 3, 4, 4, 3, 2, 3, 1, 5, 4, 4, 1, 4, 4, 3, 5, 4, 4, 3, 5, 5, 4, 3, 4, 4, 3, 1, 2, 3, 1, 2, 2, 0, 3, 2, 0, 3, 1, 0, 5, 3, 3, 3, 4, 3, 3, 3, 3, 4, 4, 4, 4, 5, 4, 2, 0, 3, 3, 2, 4, 3),
+ (0, 2, 0, 3, 0, 1, 0, 1, 0, 0, 3, 2, 0, 0, 2, 0, 1, 0, 2, 1, 3, 3, 3, 1, 2, 3, 1, 0, 1, 0, 4, 2, 1, 1, 3, 3, 0, 4, 3, 3, 1, 4, 3, 3, 0, 3, 3, 2, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 4, 1, 0, 2, 3, 2, 2, 2, 1, 3, 3, 3, 4, 4, 3, 2, 0, 3, 1, 0, 3, 3),
+ (0, 4, 0, 4, 0, 3, 0, 3, 0, 4, 4, 4, 3, 3, 3, 3, 3, 3, 4, 3, 4, 2, 4, 3, 4, 3, 3, 2, 4, 3, 4, 5, 4, 1, 4, 5, 3, 5, 4, 5, 3, 5, 4, 0, 3, 5, 5, 3, 1, 3, 3, 2, 2, 3, 0, 3, 4, 1, 3, 3, 2, 4, 3, 3, 3, 4, 0, 4, 0, 3, 0, 4, 5, 4, 4, 5, 3, 0, 4, 1, 0, 3, 4),
+ (0, 2, 0, 3, 0, 3, 0, 0, 0, 2, 2, 2, 1, 0, 1, 0, 0, 0, 3, 0, 3, 0, 3, 0, 1, 3, 1, 0, 3, 1, 3, 3, 3, 1, 3, 3, 3, 0, 1, 3, 1, 3, 4, 0, 0, 3, 1, 1, 0, 3, 2, 0, 0, 0, 0, 1, 3, 0, 1, 0, 0, 3, 3, 2, 0, 3, 0, 0, 0, 0, 0, 3, 4, 3, 4, 3, 3, 0, 3, 0, 0, 2, 3),
+ (2, 3, 0, 3, 0, 2, 0, 1, 0, 3, 3, 4, 3, 1, 3, 1, 1, 1, 3, 1, 4, 3, 4, 3, 3, 3, 0, 0, 3, 1, 5, 4, 3, 1, 4, 3, 2, 5, 5, 4, 4, 4, 4, 3, 3, 4, 4, 4, 0, 2, 1, 1, 3, 2, 0, 1, 2, 0, 0, 1, 0, 4, 1, 3, 3, 3, 0, 3, 0, 1, 0, 4, 4, 4, 5, 5, 3, 0, 2, 0, 0, 4, 4),
+ (0, 2, 0, 1, 0, 3, 1, 3, 0, 2, 3, 3, 3, 0, 3, 1, 0, 0, 3, 0, 3, 2, 3, 1, 3, 2, 1, 1, 0, 0, 4, 2, 1, 0, 2, 3, 1, 4, 3, 2, 0, 4, 4, 3, 1, 3, 1, 3, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 4, 1, 1, 1, 2, 0, 3, 0, 0, 0, 3, 4, 2, 4, 3, 2, 0, 1, 0, 0, 3, 3),
+ (0, 1, 0, 4, 0, 5, 0, 4, 0, 2, 4, 4, 2, 3, 3, 2, 3, 3, 5, 3, 3, 3, 4, 3, 4, 2, 3, 0, 4, 3, 3, 3, 4, 1, 4, 3, 2, 1, 5, 5, 3, 4, 5, 1, 3, 5, 4, 2, 0, 3, 3, 0, 1, 3, 0, 4, 2, 0, 1, 3, 1, 4, 3, 3, 3, 3, 0, 3, 0, 1, 0, 3, 4, 4, 4, 5, 5, 0, 3, 0, 1, 4, 5),
+ (0, 2, 0, 3, 0, 3, 0, 0, 0, 2, 3, 1, 3, 0, 4, 0, 1, 1, 3, 0, 3, 4, 3, 2, 3, 1, 0, 3, 3, 2, 3, 1, 3, 0, 2, 3, 0, 2, 1, 4, 1, 2, 2, 0, 0, 3, 3, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 0, 2, 2, 0, 3, 2, 1, 3, 3, 0, 2, 0, 2, 0, 0, 3, 3, 1, 2, 4, 0, 3, 0, 2, 2, 3),
+ (2, 4, 0, 5, 0, 4, 0, 4, 0, 2, 4, 4, 4, 3, 4, 3, 3, 3, 1, 2, 4, 3, 4, 3, 4, 4, 5, 0, 3, 3, 3, 3, 2, 0, 4, 3, 1, 4, 3, 4, 1, 4, 4, 3, 3, 4, 4, 3, 1, 2, 3, 0, 4, 2, 0, 4, 1, 0, 3, 3, 0, 4, 3, 3, 3, 4, 0, 4, 0, 2, 0, 3, 5, 3, 4, 5, 2, 0, 3, 0, 0, 4, 5),
+ (0, 3, 0, 4, 0, 1, 0, 1, 0, 1, 3, 2, 2, 1, 3, 0, 3, 0, 2, 0, 2, 0, 3, 0, 2, 0, 0, 0, 1, 0, 1, 1, 0, 0, 3, 1, 0, 0, 0, 4, 0, 3, 1, 0, 2, 1, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 4, 2, 2, 3, 1, 0, 3, 0, 0, 0, 1, 4, 4, 4, 3, 0, 0, 4, 0, 0, 1, 4),
+ (1, 4, 1, 5, 0, 3, 0, 3, 0, 4, 5, 4, 4, 3, 5, 3, 3, 4, 4, 3, 4, 1, 3, 3, 3, 3, 2, 1, 4, 1, 5, 4, 3, 1, 4, 4, 3, 5, 4, 4, 3, 5, 4, 3, 3, 4, 4, 4, 0, 3, 3, 1, 2, 3, 0, 3, 1, 0, 3, 3, 0, 5, 4, 4, 4, 4, 4, 4, 3, 3, 5, 4, 4, 3, 3, 5, 4, 0, 3, 2, 0, 4, 4),
+ (0, 2, 0, 3, 0, 1, 0, 0, 0, 1, 3, 3, 3, 2, 4, 1, 3, 0, 3, 1, 3, 0, 2, 2, 1, 1, 0, 0, 2, 0, 4, 3, 1, 0, 4, 3, 0, 4, 4, 4, 1, 4, 3, 1, 1, 3, 3, 1, 0, 2, 0, 0, 1, 3, 0, 0, 0, 0, 2, 0, 0, 4, 3, 2, 4, 3, 5, 4, 3, 3, 3, 4, 3, 3, 4, 3, 3, 0, 2, 1, 0, 3, 3),
+ (0, 2, 0, 4, 0, 3, 0, 2, 0, 2, 5, 5, 3, 4, 4, 4, 4, 1, 4, 3, 3, 0, 4, 3, 4, 3, 1, 3, 3, 2, 4, 3, 0, 3, 4, 3, 0, 3, 4, 4, 2, 4, 4, 0, 4, 5, 3, 3, 2, 2, 1, 1, 1, 2, 0, 1, 5, 0, 3, 3, 2, 4, 3, 3, 3, 4, 0, 3, 0, 2, 0, 4, 4, 3, 5, 5, 0, 0, 3, 0, 2, 3, 3),
+ (0, 3, 0, 4, 0, 3, 0, 1, 0, 3, 4, 3, 3, 1, 3, 3, 3, 0, 3, 1, 3, 0, 4, 3, 3, 1, 1, 0, 3, 0, 3, 3, 0, 0, 4, 4, 0, 1, 5, 4, 3, 3, 5, 0, 3, 3, 4, 3, 0, 2, 0, 1, 1, 1, 0, 1, 3, 0, 1, 2, 1, 3, 3, 2, 3, 3, 0, 3, 0, 1, 0, 1, 3, 3, 4, 4, 1, 0, 1, 2, 2, 1, 3),
+ (0, 1, 0, 4, 0, 4, 0, 3, 0, 1, 3, 3, 3, 2, 3, 1, 1, 0, 3, 0, 3, 3, 4, 3, 2, 4, 2, 0, 1, 0, 4, 3, 2, 0, 4, 3, 0, 5, 3, 3, 2, 4, 4, 4, 3, 3, 3, 4, 0, 1, 3, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 4, 2, 3, 3, 3, 0, 3, 0, 0, 0, 4, 4, 4, 5, 3, 2, 0, 3, 3, 0, 3, 5),
+ (0, 2, 0, 3, 0, 0, 0, 3, 0, 1, 3, 0, 2, 0, 0, 0, 1, 0, 3, 1, 1, 3, 3, 0, 0, 3, 0, 0, 3, 0, 2, 3, 1, 0, 3, 1, 0, 3, 3, 2, 0, 4, 2, 2, 0, 2, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 2, 0, 1, 0, 1, 0, 0, 0, 1, 3, 1, 2, 0, 0, 0, 1, 0, 0, 1, 4),
+ (0, 3, 0, 3, 0, 5, 0, 1, 0, 2, 4, 3, 1, 3, 3, 2, 1, 1, 5, 2, 1, 0, 5, 1, 2, 0, 0, 0, 3, 3, 2, 2, 3, 2, 4, 3, 0, 0, 3, 3, 1, 3, 3, 0, 2, 5, 3, 4, 0, 3, 3, 0, 1, 2, 0, 2, 2, 0, 3, 2, 0, 2, 2, 3, 3, 3, 0, 2, 0, 1, 0, 3, 4, 4, 2, 5, 4, 0, 3, 0, 0, 3, 5),
+ (0, 3, 0, 3, 0, 3, 0, 1, 0, 3, 3, 3, 3, 0, 3, 0, 2, 0, 2, 1, 1, 0, 2, 0, 1, 0, 0, 0, 2, 1, 0, 0, 1, 0, 3, 2, 0, 0, 3, 3, 1, 2, 3, 1, 0, 3, 3, 0, 0, 1, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 2, 3, 1, 2, 3, 0, 3, 0, 1, 0, 3, 2, 1, 0, 4, 3, 0, 1, 1, 0, 3, 3),
+ (0, 4, 0, 5, 0, 3, 0, 3, 0, 4, 5, 5, 4, 3, 5, 3, 4, 3, 5, 3, 3, 2, 5, 3, 4, 4, 4, 3, 4, 3, 4, 5, 5, 3, 4, 4, 3, 4, 4, 5, 4, 4, 4, 3, 4, 5, 5, 4, 2, 3, 4, 2, 3, 4, 0, 3, 3, 1, 4, 3, 2, 4, 3, 3, 5, 5, 0, 3, 0, 3, 0, 5, 5, 5, 5, 4, 4, 0, 4, 0, 1, 4, 4),
+ (0, 4, 0, 4, 0, 3, 0, 3, 0, 3, 5, 4, 4, 2, 3, 2, 5, 1, 3, 2, 5, 1, 4, 2, 3, 2, 3, 3, 4, 3, 3, 3, 3, 2, 5, 4, 1, 3, 3, 5, 3, 4, 4, 0, 4, 4, 3, 1, 1, 3, 1, 0, 2, 3, 0, 2, 3, 0, 3, 0, 0, 4, 3, 1, 3, 4, 0, 3, 0, 2, 0, 4, 4, 4, 3, 4, 5, 0, 4, 0, 0, 3, 4),
+ (0, 3, 0, 3, 0, 3, 1, 2, 0, 3, 4, 4, 3, 3, 3, 0, 2, 2, 4, 3, 3, 1, 3, 3, 3, 1, 1, 0, 3, 1, 4, 3, 2, 3, 4, 4, 2, 4, 4, 4, 3, 4, 4, 3, 2, 4, 4, 3, 1, 3, 3, 1, 3, 3, 0, 4, 1, 0, 2, 2, 1, 4, 3, 2, 3, 3, 5, 4, 3, 3, 5, 4, 4, 3, 3, 0, 4, 0, 3, 2, 2, 4, 4),
+ (0, 2, 0, 1, 0, 0, 0, 0, 0, 1, 2, 1, 3, 0, 0, 0, 0, 0, 2, 0, 1, 2, 1, 0, 0, 1, 0, 0, 0, 0, 3, 0, 0, 1, 0, 1, 1, 3, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 0, 3, 4, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1),
+ (0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 4, 0, 4, 1, 4, 0, 3, 0, 4, 0, 3, 0, 4, 0, 3, 0, 3, 0, 4, 1, 5, 1, 4, 0, 0, 3, 0, 5, 0, 5, 2, 0, 1, 0, 0, 0, 2, 1, 4, 0, 1, 3, 0, 0, 3, 0, 0, 3, 1, 1, 4, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0),
+ (1, 4, 0, 5, 0, 3, 0, 2, 0, 3, 5, 4, 4, 3, 4, 3, 5, 3, 4, 3, 3, 0, 4, 3, 3, 3, 3, 3, 3, 2, 4, 4, 3, 1, 3, 4, 4, 5, 4, 4, 3, 4, 4, 1, 3, 5, 4, 3, 3, 3, 1, 2, 2, 3, 3, 1, 3, 1, 3, 3, 3, 5, 3, 3, 4, 5, 0, 3, 0, 3, 0, 3, 4, 3, 4, 4, 3, 0, 3, 0, 2, 4, 3),
+ (0, 1, 0, 4, 0, 0, 0, 0, 0, 1, 4, 0, 4, 1, 4, 2, 4, 0, 3, 0, 1, 0, 1, 0, 0, 0, 0, 0, 2, 0, 3, 1, 1, 1, 0, 3, 0, 0, 0, 1, 2, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 3, 0, 0, 0, 0, 3, 2, 0, 2, 2, 0, 1, 0, 0, 0, 2, 3, 2, 3, 3, 0, 0, 0, 0, 2, 1, 0),
+ (0, 5, 1, 5, 0, 3, 0, 3, 0, 5, 4, 4, 5, 1, 5, 3, 3, 0, 4, 3, 4, 3, 5, 3, 4, 3, 3, 2, 4, 3, 4, 3, 3, 0, 3, 3, 1, 4, 4, 3, 4, 4, 4, 3, 4, 5, 5, 3, 2, 3, 1, 1, 3, 3, 1, 3, 1, 1, 3, 3, 2, 4, 5, 3, 3, 5, 0, 4, 0, 3, 0, 4, 4, 3, 5, 3, 3, 0, 3, 4, 0, 4, 3),
+ (0, 5, 0, 5, 0, 3, 0, 2, 0, 4, 4, 3, 5, 2, 4, 3, 3, 3, 4, 4, 4, 3, 5, 3, 5, 3, 3, 1, 4, 0, 4, 3, 3, 0, 3, 3, 0, 4, 4, 4, 4, 5, 4, 3, 3, 5, 5, 3, 2, 3, 1, 2, 3, 2, 0, 1, 0, 0, 3, 2, 2, 4, 4, 3, 1, 5, 0, 4, 0, 3, 0, 4, 3, 1, 3, 2, 1, 0, 3, 3, 0, 3, 3),
+ (0, 4, 0, 5, 0, 5, 0, 4, 0, 4, 5, 5, 5, 3, 4, 3, 3, 2, 5, 4, 4, 3, 5, 3, 5, 3, 4, 0, 4, 3, 4, 4, 3, 2, 4, 4, 3, 4, 5, 4, 4, 5, 5, 0, 3, 5, 5, 4, 1, 3, 3, 2, 3, 3, 1, 3, 1, 0, 4, 3, 1, 4, 4, 3, 4, 5, 0, 4, 0, 2, 0, 4, 3, 4, 4, 3, 3, 0, 4, 0, 0, 5, 5),
+ (0, 4, 0, 4, 0, 5, 0, 1, 1, 3, 3, 4, 4, 3, 4, 1, 3, 0, 5, 1, 3, 0, 3, 1, 3, 1, 1, 0, 3, 0, 3, 3, 4, 0, 4, 3, 0, 4, 4, 4, 3, 4, 4, 0, 3, 5, 4, 1, 0, 3, 0, 0, 2, 3, 0, 3, 1, 0, 3, 1, 0, 3, 2, 1, 3, 5, 0, 3, 0, 1, 0, 3, 2, 3, 3, 4, 4, 0, 2, 2, 0, 4, 4),
+ (2, 4, 0, 5, 0, 4, 0, 3, 0, 4, 5, 5, 4, 3, 5, 3, 5, 3, 5, 3, 5, 2, 5, 3, 4, 3, 3, 4, 3, 4, 5, 3, 2, 1, 5, 4, 3, 2, 3, 4, 5, 3, 4, 1, 2, 5, 4, 3, 0, 3, 3, 0, 3, 2, 0, 2, 3, 0, 4, 1, 0, 3, 4, 3, 3, 5, 0, 3, 0, 1, 0, 4, 5, 5, 5, 4, 3, 0, 4, 2, 0, 3, 5),
+ (0, 5, 0, 4, 0, 4, 0, 2, 0, 5, 4, 3, 4, 3, 4, 3, 3, 3, 4, 3, 4, 2, 5, 3, 5, 3, 4, 1, 4, 3, 4, 4, 4, 0, 3, 5, 0, 4, 4, 4, 4, 5, 3, 1, 3, 4, 5, 3, 3, 3, 3, 3, 3, 3, 0, 2, 2, 0, 3, 3, 2, 4, 3, 3, 3, 5, 3, 4, 1, 3, 3, 5, 3, 2, 0, 0, 0, 0, 4, 3, 1, 3, 3),
+ (0, 1, 0, 3, 0, 3, 0, 1, 0, 1, 3, 3, 3, 2, 3, 3, 3, 0, 3, 0, 0, 0, 3, 1, 3, 0, 0, 0, 2, 2, 2, 3, 0, 0, 3, 2, 0, 1, 2, 4, 1, 3, 3, 0, 0, 3, 3, 3, 0, 1, 0, 0, 2, 1, 0, 0, 3, 0, 3, 1, 0, 3, 0, 0, 1, 3, 0, 2, 0, 1, 0, 3, 3, 1, 3, 3, 0, 0, 1, 1, 0, 3, 3),
+ (0, 2, 0, 3, 0, 2, 1, 4, 0, 2, 2, 3, 1, 1, 3, 1, 1, 0, 2, 0, 3, 1, 2, 3, 1, 3, 0, 0, 1, 0, 4, 3, 2, 3, 3, 3, 1, 4, 2, 3, 3, 3, 3, 1, 0, 3, 1, 4, 0, 1, 1, 0, 1, 2, 0, 1, 1, 0, 1, 1, 0, 3, 1, 3, 2, 2, 0, 1, 0, 0, 0, 2, 3, 3, 3, 1, 0, 0, 0, 0, 0, 2, 3),
+ (0, 5, 0, 4, 0, 5, 0, 2, 0, 4, 5, 5, 3, 3, 4, 3, 3, 1, 5, 4, 4, 2, 4, 4, 4, 3, 4, 2, 4, 3, 5, 5, 4, 3, 3, 4, 3, 3, 5, 5, 4, 5, 5, 1, 3, 4, 5, 3, 1, 4, 3, 1, 3, 3, 0, 3, 3, 1, 4, 3, 1, 4, 5, 3, 3, 5, 0, 4, 0, 3, 0, 5, 3, 3, 1, 4, 3, 0, 4, 0, 1, 5, 3),
+ (0, 5, 0, 5, 0, 4, 0, 2, 0, 4, 4, 3, 4, 3, 3, 3, 3, 3, 5, 4, 4, 4, 4, 4, 4, 5, 3, 3, 5, 2, 4, 4, 4, 3, 4, 4, 3, 3, 4, 4, 5, 5, 3, 3, 4, 3, 4, 3, 3, 4, 3, 3, 3, 3, 1, 2, 2, 1, 4, 3, 3, 5, 4, 4, 3, 4, 0, 4, 0, 3, 0, 4, 4, 4, 4, 4, 1, 0, 4, 2, 0, 2, 4),
+ (0, 4, 0, 4, 0, 3, 0, 1, 0, 3, 5, 2, 3, 0, 3, 0, 2, 1, 4, 2, 3, 3, 4, 1, 4, 3, 3, 2, 4, 1, 3, 3, 3, 0, 3, 3, 0, 0, 3, 3, 3, 5, 3, 3, 3, 3, 3, 2, 0, 2, 0, 0, 2, 0, 0, 2, 0, 0, 1, 0, 0, 3, 1, 2, 2, 3, 0, 3, 0, 2, 0, 4, 4, 3, 3, 4, 1, 0, 3, 0, 0, 2, 4),
+ (0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 2, 0, 0, 0, 0, 0, 1, 0, 2, 0, 1, 0, 0, 0, 0, 0, 3, 1, 3, 0, 3, 2, 0, 0, 0, 1, 0, 3, 2, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 4, 0, 2, 0, 0, 0, 0, 0, 0, 2),
+ (0, 2, 1, 3, 0, 2, 0, 2, 0, 3, 3, 3, 3, 1, 3, 1, 3, 3, 3, 3, 3, 3, 4, 2, 2, 1, 2, 1, 4, 0, 4, 3, 1, 3, 3, 3, 2, 4, 3, 5, 4, 3, 3, 3, 3, 3, 3, 3, 0, 1, 3, 0, 2, 0, 0, 1, 0, 0, 1, 0, 0, 4, 2, 0, 2, 3, 0, 3, 3, 0, 3, 3, 4, 2, 3, 1, 4, 0, 1, 2, 0, 2, 3),
+ (0, 3, 0, 3, 0, 1, 0, 3, 0, 2, 3, 3, 3, 0, 3, 1, 2, 0, 3, 3, 2, 3, 3, 2, 3, 2, 3, 1, 3, 0, 4, 3, 2, 0, 3, 3, 1, 4, 3, 3, 2, 3, 4, 3, 1, 3, 3, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 4, 1, 1, 0, 3, 0, 3, 1, 0, 2, 3, 3, 3, 3, 3, 1, 0, 0, 2, 0, 3, 3),
+ (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 2, 0, 3, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 3, 0, 3, 0, 3, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 2, 0, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3),
+ (0, 2, 0, 3, 1, 3, 0, 3, 0, 2, 3, 3, 3, 1, 3, 1, 3, 1, 3, 1, 3, 3, 3, 1, 3, 0, 2, 3, 1, 1, 4, 3, 3, 2, 3, 3, 1, 2, 2, 4, 1, 3, 3, 0, 1, 4, 2, 3, 0, 1, 3, 0, 3, 0, 0, 1, 3, 0, 2, 0, 0, 3, 3, 2, 1, 3, 0, 3, 0, 2, 0, 3, 4, 4, 4, 3, 1, 0, 3, 0, 0, 3, 3),
+ (0, 2, 0, 1, 0, 2, 0, 0, 0, 1, 3, 2, 2, 1, 3, 0, 1, 1, 3, 0, 3, 2, 3, 1, 2, 0, 2, 0, 1, 1, 3, 3, 3, 0, 3, 3, 1, 1, 2, 3, 2, 3, 3, 1, 2, 3, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 3, 0, 1, 0, 0, 2, 1, 2, 1, 3, 0, 3, 0, 0, 0, 3, 4, 4, 4, 3, 2, 0, 2, 0, 0, 2, 4),
+ (0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 3, 1, 0, 0, 0, 0, 0, 0, 0, 3),
+ (0, 3, 0, 3, 0, 2, 0, 3, 0, 3, 3, 3, 2, 3, 2, 2, 2, 0, 3, 1, 3, 3, 3, 2, 3, 3, 0, 0, 3, 0, 3, 2, 2, 0, 2, 3, 1, 4, 3, 4, 3, 3, 2, 3, 1, 5, 4, 4, 0, 3, 1, 2, 1, 3, 0, 3, 1, 1, 2, 0, 2, 3, 1, 3, 1, 3, 0, 3, 0, 1, 0, 3, 3, 4, 4, 2, 1, 0, 2, 1, 0, 2, 4),
+ (0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 4, 2, 5, 1, 4, 0, 2, 0, 2, 1, 3, 1, 4, 0, 2, 1, 0, 0, 2, 1, 4, 1, 1, 0, 3, 3, 0, 5, 1, 3, 2, 3, 3, 1, 0, 3, 2, 3, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 4, 0, 1, 0, 3, 0, 2, 0, 1, 0, 3, 3, 3, 4, 3, 3, 0, 0, 0, 0, 2, 3),
+ (0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 2, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 1, 0, 0, 1, 0, 0, 0, 0, 0, 3),
+ (0, 1, 0, 3, 0, 4, 0, 3, 0, 2, 4, 3, 1, 0, 3, 2, 2, 1, 3, 1, 2, 2, 3, 1, 1, 1, 2, 1, 3, 0, 1, 2, 0, 1, 3, 2, 1, 3, 0, 5, 5, 1, 0, 0, 1, 3, 2, 1, 0, 3, 0, 0, 1, 0, 0, 0, 0, 0, 3, 4, 0, 1, 1, 1, 3, 2, 0, 2, 0, 1, 0, 2, 3, 3, 1, 2, 3, 0, 1, 0, 1, 0, 4),
+ (0, 0, 0, 1, 0, 3, 0, 3, 0, 2, 2, 1, 0, 0, 4, 0, 3, 0, 3, 1, 3, 0, 3, 0, 3, 0, 1, 0, 3, 0, 3, 1, 3, 0, 3, 3, 0, 0, 1, 2, 1, 1, 1, 0, 1, 2, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 1, 2, 0, 0, 2, 0, 0, 0, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, 1, 4),
+ (0, 0, 0, 3, 0, 3, 0, 0, 0, 0, 3, 1, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 0, 2, 0, 2, 3, 0, 0, 2, 2, 3, 1, 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 2, 0, 0, 0, 0, 2, 3),
+ (2, 4, 0, 5, 0, 5, 0, 4, 0, 3, 4, 3, 3, 3, 4, 3, 3, 3, 4, 3, 4, 4, 5, 4, 5, 5, 5, 2, 3, 0, 5, 5, 4, 1, 5, 4, 3, 1, 5, 4, 3, 4, 4, 3, 3, 4, 3, 3, 0, 3, 2, 0, 2, 3, 0, 3, 0, 0, 3, 3, 0, 5, 3, 2, 3, 3, 0, 3, 0, 3, 0, 3, 4, 5, 4, 5, 3, 0, 4, 3, 0, 3, 4),
+ (0, 3, 0, 3, 0, 3, 0, 3, 0, 3, 3, 4, 3, 2, 3, 2, 3, 0, 4, 3, 3, 3, 3, 3, 3, 3, 3, 0, 3, 2, 4, 3, 3, 1, 3, 4, 3, 4, 4, 4, 3, 4, 4, 3, 2, 4, 4, 1, 0, 2, 0, 0, 1, 1, 0, 2, 0, 0, 3, 1, 0, 5, 3, 2, 1, 3, 0, 3, 0, 1, 2, 4, 3, 2, 4, 3, 3, 0, 3, 2, 0, 4, 4),
+ (0, 3, 0, 3, 0, 1, 0, 0, 0, 1, 4, 3, 3, 2, 3, 1, 3, 1, 4, 2, 3, 2, 4, 2, 3, 4, 3, 0, 2, 2, 3, 3, 3, 0, 3, 3, 3, 0, 3, 4, 1, 3, 3, 0, 3, 4, 3, 3, 0, 1, 1, 0, 1, 0, 0, 0, 4, 0, 3, 0, 0, 3, 1, 2, 1, 3, 0, 4, 0, 1, 0, 4, 3, 3, 4, 3, 3, 0, 2, 0, 0, 3, 3),
+ (0, 3, 0, 4, 0, 1, 0, 3, 0, 3, 4, 3, 3, 0, 3, 3, 3, 1, 3, 1, 3, 3, 4, 3, 3, 3, 0, 0, 3, 1, 5, 3, 3, 1, 3, 3, 2, 5, 4, 3, 3, 4, 5, 3, 2, 5, 3, 4, 0, 1, 0, 0, 0, 0, 0, 2, 0, 0, 1, 1, 0, 4, 2, 2, 1, 3, 0, 3, 0, 2, 0, 4, 4, 3, 5, 3, 2, 0, 1, 1, 0, 3, 4),
+ (0, 5, 0, 4, 0, 5, 0, 2, 0, 4, 4, 3, 3, 2, 3, 3, 3, 1, 4, 3, 4, 1, 5, 3, 4, 3, 4, 0, 4, 2, 4, 3, 4, 1, 5, 4, 0, 4, 4, 4, 4, 5, 4, 1, 3, 5, 4, 2, 1, 4, 1, 1, 3, 2, 0, 3, 1, 0, 3, 2, 1, 4, 3, 3, 3, 4, 0, 4, 0, 3, 0, 4, 4, 4, 3, 3, 3, 0, 4, 2, 0, 3, 4),
+ (1, 4, 0, 4, 0, 3, 0, 1, 0, 3, 3, 3, 1, 1, 3, 3, 2, 2, 3, 3, 1, 0, 3, 2, 2, 1, 2, 0, 3, 1, 2, 1, 2, 0, 3, 2, 0, 2, 2, 3, 3, 4, 3, 0, 3, 3, 1, 2, 0, 1, 1, 3, 1, 2, 0, 0, 3, 0, 1, 1, 0, 3, 2, 2, 3, 3, 0, 3, 0, 0, 0, 2, 3, 3, 4, 3, 3, 0, 1, 0, 0, 1, 4),
+ (0, 4, 0, 4, 0, 4, 0, 0, 0, 3, 4, 4, 3, 1, 4, 2, 3, 2, 3, 3, 3, 1, 4, 3, 4, 0, 3, 0, 4, 2, 3, 3, 2, 2, 5, 4, 2, 1, 3, 4, 3, 4, 3, 1, 3, 3, 4, 2, 0, 2, 1, 0, 3, 3, 0, 0, 2, 0, 3, 1, 0, 4, 4, 3, 4, 3, 0, 4, 0, 1, 0, 2, 4, 4, 4, 4, 4, 0, 3, 2, 0, 3, 3),
+ (0, 0, 0, 1, 0, 4, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 3, 2, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 2),
+ (0, 2, 0, 3, 0, 4, 0, 4, 0, 1, 3, 3, 3, 0, 4, 0, 2, 1, 2, 1, 1, 1, 2, 0, 3, 1, 1, 0, 1, 0, 3, 1, 0, 0, 3, 3, 2, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 2, 0, 2, 2, 0, 3, 1, 0, 0, 1, 0, 1, 1, 0, 1, 2, 0, 3, 0, 0, 0, 0, 1, 0, 0, 3, 3, 4, 3, 1, 0, 1, 0, 3, 0, 2),
+ (0, 0, 0, 3, 0, 5, 0, 0, 0, 0, 1, 0, 2, 0, 3, 1, 0, 1, 3, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 4, 0, 0, 0, 2, 3, 0, 1, 4, 1, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 3, 0, 0, 0, 0, 0, 3),
+ (0, 2, 0, 5, 0, 5, 0, 1, 0, 2, 4, 3, 3, 2, 5, 1, 3, 2, 3, 3, 3, 0, 4, 1, 2, 0, 3, 0, 4, 0, 2, 2, 1, 1, 5, 3, 0, 0, 1, 4, 2, 3, 2, 0, 3, 3, 3, 2, 0, 2, 4, 1, 1, 2, 0, 1, 1, 0, 3, 1, 0, 1, 3, 1, 2, 3, 0, 2, 0, 0, 0, 1, 3, 5, 4, 4, 4, 0, 3, 0, 0, 1, 3),
+ (0, 4, 0, 5, 0, 4, 0, 4, 0, 4, 5, 4, 3, 3, 4, 3, 3, 3, 4, 3, 4, 4, 5, 3, 4, 5, 4, 2, 4, 2, 3, 4, 3, 1, 4, 4, 1, 3, 5, 4, 4, 5, 5, 4, 4, 5, 5, 5, 2, 3, 3, 1, 4, 3, 1, 3, 3, 0, 3, 3, 1, 4, 3, 4, 4, 4, 0, 3, 0, 4, 0, 3, 3, 4, 4, 5, 0, 0, 4, 3, 0, 4, 5),
+ (0, 4, 0, 4, 0, 3, 0, 3, 0, 3, 4, 4, 4, 3, 3, 2, 4, 3, 4, 3, 4, 3, 5, 3, 4, 3, 2, 1, 4, 2, 4, 4, 3, 1, 3, 4, 2, 4, 5, 5, 3, 4, 5, 4, 1, 5, 4, 3, 0, 3, 2, 2, 3, 2, 1, 3, 1, 0, 3, 3, 3, 5, 3, 3, 3, 5, 4, 4, 2, 3, 3, 4, 3, 3, 3, 2, 1, 0, 3, 2, 1, 4, 3),
+ (0, 4, 0, 5, 0, 4, 0, 3, 0, 3, 5, 5, 3, 2, 4, 3, 4, 0, 5, 4, 4, 1, 4, 4, 4, 3, 3, 3, 4, 3, 5, 5, 2, 3, 3, 4, 1, 2, 5, 5, 3, 5, 5, 2, 3, 5, 5, 4, 0, 3, 2, 0, 3, 3, 1, 1, 5, 1, 4, 1, 0, 4, 3, 2, 3, 5, 0, 4, 0, 3, 0, 5, 4, 3, 4, 3, 0, 0, 4, 1, 0, 4, 4),
+ (1, 3, 0, 4, 0, 2, 0, 2, 0, 2, 5, 5, 3, 3, 3, 3, 3, 0, 4, 2, 3, 4, 4, 4, 3, 4, 0, 0, 3, 4, 5, 4, 3, 3, 3, 3, 2, 5, 5, 4, 5, 5, 5, 4, 3, 5, 5, 5, 1, 3, 1, 0, 1, 0, 0, 3, 2, 0, 4, 2, 0, 5, 2, 3, 2, 4, 1, 3, 0, 3, 0, 4, 5, 4, 5, 4, 3, 0, 4, 2, 0, 5, 4),
+ (0, 3, 0, 4, 0, 5, 0, 3, 0, 3, 4, 4, 3, 2, 3, 2, 3, 3, 3, 3, 3, 2, 4, 3, 3, 2, 2, 0, 3, 3, 3, 3, 3, 1, 3, 3, 3, 0, 4, 4, 3, 4, 4, 1, 1, 4, 4, 2, 0, 3, 1, 0, 1, 1, 0, 4, 1, 0, 2, 3, 1, 3, 3, 1, 3, 4, 0, 3, 0, 1, 0, 3, 1, 3, 0, 0, 1, 0, 2, 0, 0, 4, 4),
+ (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
+ (0, 3, 0, 3, 0, 2, 0, 3, 0, 1, 5, 4, 3, 3, 3, 1, 4, 2, 1, 2, 3, 4, 4, 2, 4, 4, 5, 0, 3, 1, 4, 3, 4, 0, 4, 3, 3, 3, 2, 3, 2, 5, 3, 4, 3, 2, 2, 3, 0, 0, 3, 0, 2, 1, 0, 1, 2, 0, 0, 0, 0, 2, 1, 1, 3, 1, 0, 2, 0, 4, 0, 3, 4, 4, 4, 5, 2, 0, 2, 0, 0, 1, 3),
+ (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 4, 2, 1, 1, 0, 1, 0, 3, 2, 0, 0, 3, 1, 1, 1, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1, 4, 0, 4, 2, 1, 0, 0, 0, 0, 0, 1),
+ (0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 3, 1, 0, 0, 0, 2, 0, 2, 1, 0, 0, 1, 2, 1, 0, 1, 1, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3, 1, 0, 0, 0, 0, 0, 1, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 2),
+ (0, 4, 0, 4, 0, 4, 0, 3, 0, 4, 4, 3, 4, 2, 4, 3, 2, 0, 4, 4, 4, 3, 5, 3, 5, 3, 3, 2, 4, 2, 4, 3, 4, 3, 1, 4, 0, 2, 3, 4, 4, 4, 3, 3, 3, 4, 4, 4, 3, 4, 1, 3, 4, 3, 2, 1, 2, 1, 3, 3, 3, 4, 4, 3, 3, 5, 0, 4, 0, 3, 0, 4, 3, 3, 3, 2, 1, 0, 3, 0, 0, 3, 3),
+ (0, 4, 0, 3, 0, 3, 0, 3, 0, 3, 5, 5, 3, 3, 3, 3, 4, 3, 4, 3, 3, 3, 4, 4, 4, 3, 3, 3, 3, 4, 3, 5, 3, 3, 1, 3, 2, 4, 5, 5, 5, 5, 4, 3, 4, 5, 5, 3, 2, 2, 3, 3, 3, 3, 2, 3, 3, 1, 2, 3, 2, 4, 3, 3, 3, 4, 0, 4, 0, 2, 0, 4, 3, 2, 2, 1, 2, 0, 3, 0, 0, 4, 1),
+)
+# fmt: on
+
+
+class JapaneseContextAnalysis:
+ NUM_OF_CATEGORY = 6
+ DONT_KNOW = -1
+ ENOUGH_REL_THRESHOLD = 100
+ MAX_REL_THRESHOLD = 1000
+ MINIMUM_DATA_THRESHOLD = 4
+
+ def __init__(self) -> None:
+ self._total_rel = 0
+ self._rel_sample: List[int] = []
+ self._need_to_skip_char_num = 0
+ self._last_char_order = -1
+ self._done = False
+ self.reset()
+
+ def reset(self) -> None:
+ self._total_rel = 0 # total sequence received
+ # category counters, each integer counts sequence in its category
+ self._rel_sample = [0] * self.NUM_OF_CATEGORY
+ # if last byte in current buffer is not the last byte of a character,
+ # we need to know how many bytes to skip in next buffer
+ self._need_to_skip_char_num = 0
+ self._last_char_order = -1 # The order of previous char
+ # If this flag is set to True, detection is done and conclusion has
+ # been made
+ self._done = False
+
+ def feed(self, byte_str: Union[bytes, bytearray], num_bytes: int) -> None:
+ if self._done:
+ return
+
+ # The buffer we got is byte oriented, and a character may span in more than one
+ # buffers. In case the last one or two byte in last buffer is not
+ # complete, we record how many byte needed to complete that character
+ # and skip these bytes here. We can choose to record those bytes as
+ # well and analyse the character once it is complete, but since a
+ # character will not make much difference, by simply skipping
+ # this character will simply our logic and improve performance.
+ i = self._need_to_skip_char_num
+ while i < num_bytes:
+ order, char_len = self.get_order(byte_str[i : i + 2])
+ i += char_len
+ if i > num_bytes:
+ self._need_to_skip_char_num = i - num_bytes
+ self._last_char_order = -1
+ else:
+ if (order != -1) and (self._last_char_order != -1):
+ self._total_rel += 1
+ if self._total_rel > self.MAX_REL_THRESHOLD:
+ self._done = True
+ break
+ self._rel_sample[
+ jp2_char_context[self._last_char_order][order]
+ ] += 1
+ self._last_char_order = order
+
+ def got_enough_data(self) -> bool:
+ return self._total_rel > self.ENOUGH_REL_THRESHOLD
+
+ def get_confidence(self) -> float:
+ # This is just one way to calculate confidence. It works well for me.
+ if self._total_rel > self.MINIMUM_DATA_THRESHOLD:
+ return (self._total_rel - self._rel_sample[0]) / self._total_rel
+ return self.DONT_KNOW
+
+ def get_order(self, _: Union[bytes, bytearray]) -> Tuple[int, int]:
+ return -1, 1
+
+
+class SJISContextAnalysis(JapaneseContextAnalysis):
+ def __init__(self) -> None:
+ super().__init__()
+ self._charset_name = "SHIFT_JIS"
+
+ @property
+ def charset_name(self) -> str:
+ return self._charset_name
+
+ def get_order(self, byte_str: Union[bytes, bytearray]) -> Tuple[int, int]:
+ if not byte_str:
+ return -1, 1
+ # find out current char's byte length
+ first_char = byte_str[0]
+ if (0x81 <= first_char <= 0x9F) or (0xE0 <= first_char <= 0xFC):
+ char_len = 2
+ if (first_char == 0x87) or (0xFA <= first_char <= 0xFC):
+ self._charset_name = "CP932"
+ else:
+ char_len = 1
+
+ # return its order if it is hiragana
+ if len(byte_str) > 1:
+ second_char = byte_str[1]
+ if (first_char == 202) and (0x9F <= second_char <= 0xF1):
+ return second_char - 0x9F, char_len
+
+ return -1, char_len
+
+
+class EUCJPContextAnalysis(JapaneseContextAnalysis):
+ def get_order(self, byte_str: Union[bytes, bytearray]) -> Tuple[int, int]:
+ if not byte_str:
+ return -1, 1
+ # find out current char's byte length
+ first_char = byte_str[0]
+ if (first_char == 0x8E) or (0xA1 <= first_char <= 0xFE):
+ char_len = 2
+ elif first_char == 0x8F:
+ char_len = 3
+ else:
+ char_len = 1
+
+ # return its order if it is hiragana
+ if len(byte_str) > 1:
+ second_char = byte_str[1]
+ if (first_char == 0xA4) and (0xA1 <= second_char <= 0xF3):
+ return second_char - 0xA1, char_len
+
+ return -1, char_len
diff --git a/third_party/python/pip/pip/_vendor/chardet/langbulgarianmodel.py b/third_party/python/pip/pip/_vendor/chardet/langbulgarianmodel.py
new file mode 100644
index 0000000000..994668219d
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/langbulgarianmodel.py
@@ -0,0 +1,4649 @@
+from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel
+
+# 3: Positive
+# 2: Likely
+# 1: Unlikely
+# 0: Negative
+
+BULGARIAN_LANG_MODEL = {
+ 63: { # 'e'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 0, # 'а'
+ 18: 1, # 'б'
+ 9: 1, # 'в'
+ 20: 1, # 'г'
+ 11: 1, # 'д'
+ 3: 1, # 'е'
+ 23: 1, # 'ж'
+ 15: 1, # 'з'
+ 2: 0, # 'и'
+ 26: 1, # 'й'
+ 12: 1, # 'к'
+ 10: 1, # 'л'
+ 14: 1, # 'м'
+ 6: 1, # 'н'
+ 4: 1, # 'о'
+ 13: 1, # 'п'
+ 7: 1, # 'р'
+ 8: 1, # 'с'
+ 5: 1, # 'т'
+ 19: 0, # 'у'
+ 29: 1, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 1, # 'ч'
+ 27: 1, # 'ш'
+ 24: 1, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 45: { # '\xad'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 0, # 'Г'
+ 37: 1, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 0, # 'Л'
+ 38: 1, # 'М'
+ 36: 0, # 'Н'
+ 41: 1, # 'О'
+ 30: 1, # 'П'
+ 39: 1, # 'Р'
+ 28: 1, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 0, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 0, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 0, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 0, # 'о'
+ 13: 0, # 'п'
+ 7: 0, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 0, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 31: { # 'А'
+ 63: 0, # 'e'
+ 45: 1, # '\xad'
+ 31: 1, # 'А'
+ 32: 1, # 'Б'
+ 35: 2, # 'В'
+ 43: 1, # 'Г'
+ 37: 2, # 'Д'
+ 44: 2, # 'Е'
+ 55: 1, # 'Ж'
+ 47: 2, # 'З'
+ 40: 1, # 'И'
+ 59: 1, # 'Й'
+ 33: 1, # 'К'
+ 46: 2, # 'Л'
+ 38: 1, # 'М'
+ 36: 2, # 'Н'
+ 41: 1, # 'О'
+ 30: 2, # 'П'
+ 39: 2, # 'Р'
+ 28: 2, # 'С'
+ 34: 2, # 'Т'
+ 51: 1, # 'У'
+ 48: 2, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 1, # 'Ш'
+ 57: 2, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 1, # 'а'
+ 18: 2, # 'б'
+ 9: 2, # 'в'
+ 20: 2, # 'г'
+ 11: 2, # 'д'
+ 3: 1, # 'е'
+ 23: 1, # 'ж'
+ 15: 2, # 'з'
+ 2: 0, # 'и'
+ 26: 2, # 'й'
+ 12: 2, # 'к'
+ 10: 3, # 'л'
+ 14: 2, # 'м'
+ 6: 3, # 'н'
+ 4: 0, # 'о'
+ 13: 2, # 'п'
+ 7: 2, # 'р'
+ 8: 2, # 'с'
+ 5: 2, # 'т'
+ 19: 1, # 'у'
+ 29: 2, # 'ф'
+ 25: 1, # 'х'
+ 22: 1, # 'ц'
+ 21: 1, # 'ч'
+ 27: 1, # 'ш'
+ 24: 0, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 32: { # 'Б'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 2, # 'Б'
+ 35: 1, # 'В'
+ 43: 1, # 'Г'
+ 37: 2, # 'Д'
+ 44: 1, # 'Е'
+ 55: 1, # 'Ж'
+ 47: 2, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 2, # 'Н'
+ 41: 2, # 'О'
+ 30: 1, # 'П'
+ 39: 1, # 'Р'
+ 28: 2, # 'С'
+ 34: 2, # 'Т'
+ 51: 1, # 'У'
+ 48: 2, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 1, # 'Щ'
+ 61: 2, # 'Ъ'
+ 60: 1, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 3, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 1, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 2, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 2, # 'р'
+ 8: 1, # 'с'
+ 5: 0, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 2, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 35: { # 'В'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 0, # 'Г'
+ 37: 1, # 'Д'
+ 44: 2, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 1, # 'О'
+ 30: 1, # 'П'
+ 39: 2, # 'Р'
+ 28: 2, # 'С'
+ 34: 1, # 'Т'
+ 51: 1, # 'У'
+ 48: 2, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 1, # 'Ю'
+ 56: 2, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 1, # 'д'
+ 3: 3, # 'е'
+ 23: 1, # 'ж'
+ 15: 2, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 2, # 'л'
+ 14: 1, # 'м'
+ 6: 2, # 'н'
+ 4: 2, # 'о'
+ 13: 1, # 'п'
+ 7: 2, # 'р'
+ 8: 2, # 'с'
+ 5: 2, # 'т'
+ 19: 1, # 'у'
+ 29: 0, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 2, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 43: { # 'Г'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 1, # 'Д'
+ 44: 2, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 1, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 0, # 'М'
+ 36: 1, # 'Н'
+ 41: 1, # 'О'
+ 30: 0, # 'П'
+ 39: 1, # 'Р'
+ 28: 1, # 'С'
+ 34: 0, # 'Т'
+ 51: 1, # 'У'
+ 48: 1, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 1, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 2, # 'а'
+ 18: 1, # 'б'
+ 9: 1, # 'в'
+ 20: 0, # 'г'
+ 11: 1, # 'д'
+ 3: 3, # 'е'
+ 23: 1, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 2, # 'л'
+ 14: 1, # 'м'
+ 6: 1, # 'н'
+ 4: 2, # 'о'
+ 13: 0, # 'п'
+ 7: 2, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 1, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 37: { # 'Д'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 2, # 'В'
+ 43: 1, # 'Г'
+ 37: 2, # 'Д'
+ 44: 2, # 'Е'
+ 55: 2, # 'Ж'
+ 47: 1, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 2, # 'О'
+ 30: 2, # 'П'
+ 39: 1, # 'Р'
+ 28: 2, # 'С'
+ 34: 1, # 'Т'
+ 51: 1, # 'У'
+ 48: 1, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 1, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 3, # 'а'
+ 18: 0, # 'б'
+ 9: 2, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 3, # 'е'
+ 23: 3, # 'ж'
+ 15: 1, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 1, # 'л'
+ 14: 1, # 'м'
+ 6: 2, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 2, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 2, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 44: { # 'Е'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 1, # 'А'
+ 32: 1, # 'Б'
+ 35: 2, # 'В'
+ 43: 1, # 'Г'
+ 37: 1, # 'Д'
+ 44: 1, # 'Е'
+ 55: 1, # 'Ж'
+ 47: 1, # 'З'
+ 40: 1, # 'И'
+ 59: 1, # 'Й'
+ 33: 2, # 'К'
+ 46: 2, # 'Л'
+ 38: 1, # 'М'
+ 36: 2, # 'Н'
+ 41: 2, # 'О'
+ 30: 1, # 'П'
+ 39: 2, # 'Р'
+ 28: 2, # 'С'
+ 34: 2, # 'Т'
+ 51: 1, # 'У'
+ 48: 2, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 2, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 1, # 'Ш'
+ 57: 1, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 0, # 'а'
+ 18: 1, # 'б'
+ 9: 2, # 'в'
+ 20: 1, # 'г'
+ 11: 2, # 'д'
+ 3: 0, # 'е'
+ 23: 1, # 'ж'
+ 15: 1, # 'з'
+ 2: 0, # 'и'
+ 26: 1, # 'й'
+ 12: 2, # 'к'
+ 10: 2, # 'л'
+ 14: 2, # 'м'
+ 6: 2, # 'н'
+ 4: 0, # 'о'
+ 13: 1, # 'п'
+ 7: 2, # 'р'
+ 8: 2, # 'с'
+ 5: 1, # 'т'
+ 19: 1, # 'у'
+ 29: 1, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 1, # 'ч'
+ 27: 1, # 'ш'
+ 24: 1, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 55: { # 'Ж'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 1, # 'А'
+ 32: 0, # 'Б'
+ 35: 1, # 'В'
+ 43: 0, # 'Г'
+ 37: 1, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 1, # 'Н'
+ 41: 1, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 1, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 2, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 1, # 'д'
+ 3: 2, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 2, # 'о'
+ 13: 1, # 'п'
+ 7: 1, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 1, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 47: { # 'З'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 1, # 'Г'
+ 37: 1, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 1, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 2, # 'Н'
+ 41: 1, # 'О'
+ 30: 1, # 'П'
+ 39: 1, # 'Р'
+ 28: 1, # 'С'
+ 34: 1, # 'Т'
+ 51: 1, # 'У'
+ 48: 0, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 2, # 'в'
+ 20: 1, # 'г'
+ 11: 2, # 'д'
+ 3: 2, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 1, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 2, # 'л'
+ 14: 1, # 'м'
+ 6: 1, # 'н'
+ 4: 1, # 'о'
+ 13: 0, # 'п'
+ 7: 1, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 1, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 40: { # 'И'
+ 63: 0, # 'e'
+ 45: 1, # '\xad'
+ 31: 1, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 1, # 'Г'
+ 37: 1, # 'Д'
+ 44: 2, # 'Е'
+ 55: 1, # 'Ж'
+ 47: 2, # 'З'
+ 40: 1, # 'И'
+ 59: 1, # 'Й'
+ 33: 2, # 'К'
+ 46: 2, # 'Л'
+ 38: 2, # 'М'
+ 36: 2, # 'Н'
+ 41: 1, # 'О'
+ 30: 1, # 'П'
+ 39: 2, # 'Р'
+ 28: 2, # 'С'
+ 34: 2, # 'Т'
+ 51: 0, # 'У'
+ 48: 1, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 1, # 'Ш'
+ 57: 1, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 2, # 'Я'
+ 1: 1, # 'а'
+ 18: 1, # 'б'
+ 9: 3, # 'в'
+ 20: 2, # 'г'
+ 11: 1, # 'д'
+ 3: 1, # 'е'
+ 23: 0, # 'ж'
+ 15: 3, # 'з'
+ 2: 0, # 'и'
+ 26: 1, # 'й'
+ 12: 1, # 'к'
+ 10: 2, # 'л'
+ 14: 2, # 'м'
+ 6: 2, # 'н'
+ 4: 0, # 'о'
+ 13: 1, # 'п'
+ 7: 2, # 'р'
+ 8: 2, # 'с'
+ 5: 2, # 'т'
+ 19: 0, # 'у'
+ 29: 1, # 'ф'
+ 25: 1, # 'х'
+ 22: 1, # 'ц'
+ 21: 1, # 'ч'
+ 27: 1, # 'ш'
+ 24: 1, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 59: { # 'Й'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 1, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 1, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 1, # 'С'
+ 34: 1, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 0, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 1, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 0, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 2, # 'о'
+ 13: 0, # 'п'
+ 7: 0, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 0, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 33: { # 'К'
+ 63: 0, # 'e'
+ 45: 1, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 1, # 'Г'
+ 37: 1, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 1, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 0, # 'М'
+ 36: 2, # 'Н'
+ 41: 2, # 'О'
+ 30: 2, # 'П'
+ 39: 1, # 'Р'
+ 28: 2, # 'С'
+ 34: 1, # 'Т'
+ 51: 1, # 'У'
+ 48: 1, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 1, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 0, # 'б'
+ 9: 1, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 2, # 'е'
+ 23: 1, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 2, # 'л'
+ 14: 1, # 'м'
+ 6: 2, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 3, # 'р'
+ 8: 1, # 'с'
+ 5: 0, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 1, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 2, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 46: { # 'Л'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 2, # 'Г'
+ 37: 1, # 'Д'
+ 44: 2, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 1, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 0, # 'М'
+ 36: 1, # 'Н'
+ 41: 2, # 'О'
+ 30: 1, # 'П'
+ 39: 0, # 'Р'
+ 28: 1, # 'С'
+ 34: 1, # 'Т'
+ 51: 1, # 'У'
+ 48: 0, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 1, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 2, # 'а'
+ 18: 0, # 'б'
+ 9: 1, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 2, # 'о'
+ 13: 0, # 'п'
+ 7: 0, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 2, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 38: { # 'М'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 2, # 'В'
+ 43: 0, # 'Г'
+ 37: 1, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 1, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 2, # 'О'
+ 30: 1, # 'П'
+ 39: 1, # 'Р'
+ 28: 2, # 'С'
+ 34: 1, # 'Т'
+ 51: 1, # 'У'
+ 48: 1, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 3, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 2, # 'л'
+ 14: 0, # 'м'
+ 6: 2, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 1, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 2, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 36: { # 'Н'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 2, # 'Б'
+ 35: 1, # 'В'
+ 43: 1, # 'Г'
+ 37: 2, # 'Д'
+ 44: 2, # 'Е'
+ 55: 1, # 'Ж'
+ 47: 1, # 'З'
+ 40: 2, # 'И'
+ 59: 1, # 'Й'
+ 33: 2, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 2, # 'О'
+ 30: 1, # 'П'
+ 39: 1, # 'Р'
+ 28: 2, # 'С'
+ 34: 2, # 'Т'
+ 51: 1, # 'У'
+ 48: 1, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 1, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 1, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 3, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 1, # 'г'
+ 11: 0, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 0, # 'р'
+ 8: 0, # 'с'
+ 5: 1, # 'т'
+ 19: 1, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 1, # 'ш'
+ 24: 0, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 2, # 'ю'
+ 16: 2, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 41: { # 'О'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 1, # 'А'
+ 32: 1, # 'Б'
+ 35: 2, # 'В'
+ 43: 1, # 'Г'
+ 37: 2, # 'Д'
+ 44: 1, # 'Е'
+ 55: 1, # 'Ж'
+ 47: 1, # 'З'
+ 40: 1, # 'И'
+ 59: 1, # 'Й'
+ 33: 2, # 'К'
+ 46: 2, # 'Л'
+ 38: 2, # 'М'
+ 36: 2, # 'Н'
+ 41: 2, # 'О'
+ 30: 1, # 'П'
+ 39: 2, # 'Р'
+ 28: 2, # 'С'
+ 34: 2, # 'Т'
+ 51: 1, # 'У'
+ 48: 1, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 1, # 'Ш'
+ 57: 1, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 1, # 'а'
+ 18: 2, # 'б'
+ 9: 2, # 'в'
+ 20: 2, # 'г'
+ 11: 1, # 'д'
+ 3: 1, # 'е'
+ 23: 1, # 'ж'
+ 15: 1, # 'з'
+ 2: 0, # 'и'
+ 26: 1, # 'й'
+ 12: 2, # 'к'
+ 10: 2, # 'л'
+ 14: 1, # 'м'
+ 6: 1, # 'н'
+ 4: 0, # 'о'
+ 13: 2, # 'п'
+ 7: 2, # 'р'
+ 8: 2, # 'с'
+ 5: 3, # 'т'
+ 19: 1, # 'у'
+ 29: 1, # 'ф'
+ 25: 1, # 'х'
+ 22: 1, # 'ц'
+ 21: 2, # 'ч'
+ 27: 0, # 'ш'
+ 24: 2, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 30: { # 'П'
+ 63: 0, # 'e'
+ 45: 1, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 1, # 'Г'
+ 37: 1, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 1, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 2, # 'О'
+ 30: 2, # 'П'
+ 39: 2, # 'Р'
+ 28: 2, # 'С'
+ 34: 1, # 'Т'
+ 51: 2, # 'У'
+ 48: 1, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 1, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 1, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 2, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 3, # 'л'
+ 14: 0, # 'м'
+ 6: 1, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 3, # 'р'
+ 8: 1, # 'с'
+ 5: 1, # 'т'
+ 19: 2, # 'у'
+ 29: 1, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 1, # 'ч'
+ 27: 1, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 39: { # 'Р'
+ 63: 0, # 'e'
+ 45: 1, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 2, # 'Г'
+ 37: 2, # 'Д'
+ 44: 2, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 1, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 0, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 2, # 'О'
+ 30: 2, # 'П'
+ 39: 1, # 'Р'
+ 28: 1, # 'С'
+ 34: 1, # 'Т'
+ 51: 1, # 'У'
+ 48: 1, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 1, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 3, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 2, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 1, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 0, # 'р'
+ 8: 1, # 'с'
+ 5: 0, # 'т'
+ 19: 3, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 28: { # 'С'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 3, # 'А'
+ 32: 2, # 'Б'
+ 35: 2, # 'В'
+ 43: 1, # 'Г'
+ 37: 2, # 'Д'
+ 44: 2, # 'Е'
+ 55: 1, # 'Ж'
+ 47: 1, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 2, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 2, # 'О'
+ 30: 2, # 'П'
+ 39: 1, # 'Р'
+ 28: 2, # 'С'
+ 34: 2, # 'Т'
+ 51: 1, # 'У'
+ 48: 1, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 1, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 2, # 'в'
+ 20: 1, # 'г'
+ 11: 1, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 2, # 'к'
+ 10: 3, # 'л'
+ 14: 2, # 'м'
+ 6: 1, # 'н'
+ 4: 3, # 'о'
+ 13: 3, # 'п'
+ 7: 2, # 'р'
+ 8: 0, # 'с'
+ 5: 3, # 'т'
+ 19: 2, # 'у'
+ 29: 2, # 'ф'
+ 25: 1, # 'х'
+ 22: 1, # 'ц'
+ 21: 1, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 34: { # 'Т'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 2, # 'Б'
+ 35: 1, # 'В'
+ 43: 0, # 'Г'
+ 37: 1, # 'Д'
+ 44: 2, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 2, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 2, # 'О'
+ 30: 1, # 'П'
+ 39: 2, # 'Р'
+ 28: 2, # 'С'
+ 34: 1, # 'Т'
+ 51: 1, # 'У'
+ 48: 1, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 1, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 1, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 1, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 1, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 3, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 2, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 51: { # 'У'
+ 63: 0, # 'e'
+ 45: 1, # '\xad'
+ 31: 1, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 1, # 'Г'
+ 37: 1, # 'Д'
+ 44: 2, # 'Е'
+ 55: 1, # 'Ж'
+ 47: 1, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 0, # 'О'
+ 30: 1, # 'П'
+ 39: 1, # 'Р'
+ 28: 1, # 'С'
+ 34: 2, # 'Т'
+ 51: 0, # 'У'
+ 48: 1, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 1, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 1, # 'а'
+ 18: 1, # 'б'
+ 9: 2, # 'в'
+ 20: 1, # 'г'
+ 11: 1, # 'д'
+ 3: 2, # 'е'
+ 23: 1, # 'ж'
+ 15: 1, # 'з'
+ 2: 2, # 'и'
+ 26: 1, # 'й'
+ 12: 2, # 'к'
+ 10: 1, # 'л'
+ 14: 1, # 'м'
+ 6: 2, # 'н'
+ 4: 2, # 'о'
+ 13: 1, # 'п'
+ 7: 1, # 'р'
+ 8: 2, # 'с'
+ 5: 1, # 'т'
+ 19: 1, # 'у'
+ 29: 0, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 2, # 'ч'
+ 27: 1, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 48: { # 'Ф'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 0, # 'М'
+ 36: 1, # 'Н'
+ 41: 1, # 'О'
+ 30: 2, # 'П'
+ 39: 1, # 'Р'
+ 28: 2, # 'С'
+ 34: 1, # 'Т'
+ 51: 1, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 2, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 2, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 2, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 2, # 'о'
+ 13: 0, # 'п'
+ 7: 2, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 1, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 49: { # 'Х'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 1, # 'А'
+ 32: 0, # 'Б'
+ 35: 1, # 'В'
+ 43: 1, # 'Г'
+ 37: 1, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 1, # 'О'
+ 30: 1, # 'П'
+ 39: 1, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 2, # 'а'
+ 18: 0, # 'б'
+ 9: 1, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 2, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 1, # 'л'
+ 14: 1, # 'м'
+ 6: 0, # 'н'
+ 4: 2, # 'о'
+ 13: 0, # 'п'
+ 7: 2, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 53: { # 'Ц'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 1, # 'А'
+ 32: 0, # 'Б'
+ 35: 1, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 2, # 'И'
+ 59: 0, # 'Й'
+ 33: 2, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 1, # 'Р'
+ 28: 2, # 'С'
+ 34: 0, # 'Т'
+ 51: 1, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 2, # 'а'
+ 18: 0, # 'б'
+ 9: 2, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 2, # 'е'
+ 23: 0, # 'ж'
+ 15: 1, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 1, # 'о'
+ 13: 0, # 'п'
+ 7: 1, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 1, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 50: { # 'Ч'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 2, # 'А'
+ 32: 1, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 1, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 0, # 'М'
+ 36: 1, # 'Н'
+ 41: 1, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 1, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 2, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 3, # 'е'
+ 23: 1, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 1, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 2, # 'о'
+ 13: 0, # 'п'
+ 7: 1, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 0, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 54: { # 'Ш'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 1, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 1, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 1, # 'Н'
+ 41: 1, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 1, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 2, # 'а'
+ 18: 0, # 'б'
+ 9: 2, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 2, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 2, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 1, # 'л'
+ 14: 1, # 'м'
+ 6: 1, # 'н'
+ 4: 2, # 'о'
+ 13: 1, # 'п'
+ 7: 1, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 1, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 0, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 57: { # 'Щ'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 1, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 1, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 2, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 2, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 1, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 1, # 'о'
+ 13: 0, # 'п'
+ 7: 1, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 1, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 61: { # 'Ъ'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 0, # 'Г'
+ 37: 1, # 'Д'
+ 44: 0, # 'Е'
+ 55: 1, # 'Ж'
+ 47: 1, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 2, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 0, # 'О'
+ 30: 1, # 'П'
+ 39: 2, # 'Р'
+ 28: 1, # 'С'
+ 34: 1, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 1, # 'Х'
+ 53: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 54: 1, # 'Ш'
+ 57: 1, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 0, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 0, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 0, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 1, # 'л'
+ 14: 0, # 'м'
+ 6: 1, # 'н'
+ 4: 0, # 'о'
+ 13: 0, # 'п'
+ 7: 1, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 0, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 60: { # 'Ю'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 1, # 'А'
+ 32: 1, # 'Б'
+ 35: 0, # 'В'
+ 43: 1, # 'Г'
+ 37: 1, # 'Д'
+ 44: 0, # 'Е'
+ 55: 1, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 0, # 'М'
+ 36: 1, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 1, # 'Р'
+ 28: 1, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 0, # 'а'
+ 18: 1, # 'б'
+ 9: 1, # 'в'
+ 20: 2, # 'г'
+ 11: 1, # 'д'
+ 3: 0, # 'е'
+ 23: 2, # 'ж'
+ 15: 1, # 'з'
+ 2: 1, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 1, # 'л'
+ 14: 1, # 'м'
+ 6: 1, # 'н'
+ 4: 0, # 'о'
+ 13: 1, # 'п'
+ 7: 1, # 'р'
+ 8: 1, # 'с'
+ 5: 1, # 'т'
+ 19: 0, # 'у'
+ 29: 0, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 56: { # 'Я'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 1, # 'Б'
+ 35: 1, # 'В'
+ 43: 1, # 'Г'
+ 37: 1, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 1, # 'Л'
+ 38: 1, # 'М'
+ 36: 1, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 1, # 'С'
+ 34: 2, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 0, # 'а'
+ 18: 1, # 'б'
+ 9: 1, # 'в'
+ 20: 1, # 'г'
+ 11: 1, # 'д'
+ 3: 0, # 'е'
+ 23: 0, # 'ж'
+ 15: 1, # 'з'
+ 2: 1, # 'и'
+ 26: 1, # 'й'
+ 12: 1, # 'к'
+ 10: 1, # 'л'
+ 14: 2, # 'м'
+ 6: 2, # 'н'
+ 4: 0, # 'о'
+ 13: 2, # 'п'
+ 7: 1, # 'р'
+ 8: 1, # 'с'
+ 5: 1, # 'т'
+ 19: 0, # 'у'
+ 29: 0, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 1, # 'ш'
+ 24: 0, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 1: { # 'а'
+ 63: 1, # 'e'
+ 45: 1, # '\xad'
+ 31: 1, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 1, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 1, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 3, # 'г'
+ 11: 3, # 'д'
+ 3: 3, # 'е'
+ 23: 3, # 'ж'
+ 15: 3, # 'з'
+ 2: 3, # 'и'
+ 26: 3, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 2, # 'о'
+ 13: 3, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 3, # 'у'
+ 29: 3, # 'ф'
+ 25: 3, # 'х'
+ 22: 3, # 'ц'
+ 21: 3, # 'ч'
+ 27: 3, # 'ш'
+ 24: 3, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 18: { # 'б'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 0, # 'б'
+ 9: 3, # 'в'
+ 20: 1, # 'г'
+ 11: 2, # 'д'
+ 3: 3, # 'е'
+ 23: 1, # 'ж'
+ 15: 1, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 3, # 'л'
+ 14: 2, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 1, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 0, # 'т'
+ 19: 3, # 'у'
+ 29: 0, # 'ф'
+ 25: 2, # 'х'
+ 22: 1, # 'ц'
+ 21: 1, # 'ч'
+ 27: 1, # 'ш'
+ 24: 3, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 2, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 9: { # 'в'
+ 63: 1, # 'e'
+ 45: 1, # '\xad'
+ 31: 0, # 'А'
+ 32: 1, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 1, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 0, # 'в'
+ 20: 2, # 'г'
+ 11: 3, # 'д'
+ 3: 3, # 'е'
+ 23: 1, # 'ж'
+ 15: 3, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 2, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 2, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 2, # 'х'
+ 22: 2, # 'ц'
+ 21: 3, # 'ч'
+ 27: 2, # 'ш'
+ 24: 1, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 2, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 20: { # 'г'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 2, # 'в'
+ 20: 1, # 'г'
+ 11: 2, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 1, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 3, # 'л'
+ 14: 1, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 1, # 'п'
+ 7: 3, # 'р'
+ 8: 2, # 'с'
+ 5: 2, # 'т'
+ 19: 3, # 'у'
+ 29: 1, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 1, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 11: { # 'д'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 2, # 'б'
+ 9: 3, # 'в'
+ 20: 2, # 'г'
+ 11: 2, # 'д'
+ 3: 3, # 'е'
+ 23: 3, # 'ж'
+ 15: 2, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 3, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 1, # 'т'
+ 19: 3, # 'у'
+ 29: 1, # 'ф'
+ 25: 2, # 'х'
+ 22: 2, # 'ц'
+ 21: 2, # 'ч'
+ 27: 1, # 'ш'
+ 24: 1, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 3: { # 'е'
+ 63: 0, # 'e'
+ 45: 1, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 2, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 3, # 'г'
+ 11: 3, # 'д'
+ 3: 2, # 'е'
+ 23: 3, # 'ж'
+ 15: 3, # 'з'
+ 2: 2, # 'и'
+ 26: 3, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 3, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 2, # 'у'
+ 29: 3, # 'ф'
+ 25: 3, # 'х'
+ 22: 3, # 'ц'
+ 21: 3, # 'ч'
+ 27: 3, # 'ш'
+ 24: 3, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 23: { # 'ж'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 3, # 'б'
+ 9: 2, # 'в'
+ 20: 1, # 'г'
+ 11: 3, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 2, # 'к'
+ 10: 1, # 'л'
+ 14: 1, # 'м'
+ 6: 3, # 'н'
+ 4: 2, # 'о'
+ 13: 1, # 'п'
+ 7: 1, # 'р'
+ 8: 1, # 'с'
+ 5: 1, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 1, # 'ц'
+ 21: 1, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 15: { # 'з'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 3, # 'г'
+ 11: 3, # 'д'
+ 3: 3, # 'е'
+ 23: 1, # 'ж'
+ 15: 1, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 3, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 3, # 'у'
+ 29: 1, # 'ф'
+ 25: 2, # 'х'
+ 22: 2, # 'ц'
+ 21: 2, # 'ч'
+ 27: 2, # 'ш'
+ 24: 1, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 2, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 2: { # 'и'
+ 63: 1, # 'e'
+ 45: 1, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 1, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 1, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 1, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 1, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 3, # 'г'
+ 11: 3, # 'д'
+ 3: 3, # 'е'
+ 23: 3, # 'ж'
+ 15: 3, # 'з'
+ 2: 3, # 'и'
+ 26: 3, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 3, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 2, # 'у'
+ 29: 3, # 'ф'
+ 25: 3, # 'х'
+ 22: 3, # 'ц'
+ 21: 3, # 'ч'
+ 27: 3, # 'ш'
+ 24: 3, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 26: { # 'й'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 1, # 'а'
+ 18: 2, # 'б'
+ 9: 2, # 'в'
+ 20: 1, # 'г'
+ 11: 2, # 'д'
+ 3: 2, # 'е'
+ 23: 0, # 'ж'
+ 15: 2, # 'з'
+ 2: 1, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 2, # 'л'
+ 14: 2, # 'м'
+ 6: 3, # 'н'
+ 4: 2, # 'о'
+ 13: 1, # 'п'
+ 7: 2, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 1, # 'у'
+ 29: 2, # 'ф'
+ 25: 1, # 'х'
+ 22: 2, # 'ц'
+ 21: 2, # 'ч'
+ 27: 1, # 'ш'
+ 24: 1, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 12: { # 'к'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 1, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 1, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 3, # 'в'
+ 20: 2, # 'г'
+ 11: 1, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 2, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 3, # 'л'
+ 14: 2, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 1, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 3, # 'у'
+ 29: 1, # 'ф'
+ 25: 1, # 'х'
+ 22: 3, # 'ц'
+ 21: 2, # 'ч'
+ 27: 1, # 'ш'
+ 24: 0, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 2, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 10: { # 'л'
+ 63: 1, # 'e'
+ 45: 1, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 1, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 3, # 'г'
+ 11: 2, # 'д'
+ 3: 3, # 'е'
+ 23: 3, # 'ж'
+ 15: 2, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 1, # 'л'
+ 14: 2, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 2, # 'п'
+ 7: 2, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 3, # 'у'
+ 29: 2, # 'ф'
+ 25: 2, # 'х'
+ 22: 2, # 'ц'
+ 21: 2, # 'ч'
+ 27: 2, # 'ш'
+ 24: 1, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 2, # 'ь'
+ 42: 3, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 14: { # 'м'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 1, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 1, # 'г'
+ 11: 1, # 'д'
+ 3: 3, # 'е'
+ 23: 1, # 'ж'
+ 15: 1, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 2, # 'к'
+ 10: 3, # 'л'
+ 14: 1, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 3, # 'п'
+ 7: 2, # 'р'
+ 8: 2, # 'с'
+ 5: 1, # 'т'
+ 19: 3, # 'у'
+ 29: 2, # 'ф'
+ 25: 1, # 'х'
+ 22: 2, # 'ц'
+ 21: 2, # 'ч'
+ 27: 2, # 'ш'
+ 24: 1, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 2, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 6: { # 'н'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 1, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 2, # 'б'
+ 9: 2, # 'в'
+ 20: 3, # 'г'
+ 11: 3, # 'д'
+ 3: 3, # 'е'
+ 23: 2, # 'ж'
+ 15: 2, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 2, # 'л'
+ 14: 1, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 1, # 'п'
+ 7: 2, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 3, # 'у'
+ 29: 3, # 'ф'
+ 25: 2, # 'х'
+ 22: 3, # 'ц'
+ 21: 3, # 'ч'
+ 27: 2, # 'ш'
+ 24: 1, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 2, # 'ь'
+ 42: 2, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 4: { # 'о'
+ 63: 0, # 'e'
+ 45: 1, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 2, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 3, # 'г'
+ 11: 3, # 'д'
+ 3: 3, # 'е'
+ 23: 3, # 'ж'
+ 15: 3, # 'з'
+ 2: 3, # 'и'
+ 26: 3, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 2, # 'о'
+ 13: 3, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 2, # 'у'
+ 29: 3, # 'ф'
+ 25: 3, # 'х'
+ 22: 3, # 'ц'
+ 21: 3, # 'ч'
+ 27: 3, # 'ш'
+ 24: 3, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 13: { # 'п'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 2, # 'в'
+ 20: 1, # 'г'
+ 11: 1, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 1, # 'з'
+ 2: 3, # 'и'
+ 26: 1, # 'й'
+ 12: 2, # 'к'
+ 10: 3, # 'л'
+ 14: 1, # 'м'
+ 6: 2, # 'н'
+ 4: 3, # 'о'
+ 13: 1, # 'п'
+ 7: 3, # 'р'
+ 8: 2, # 'с'
+ 5: 2, # 'т'
+ 19: 3, # 'у'
+ 29: 1, # 'ф'
+ 25: 1, # 'х'
+ 22: 2, # 'ц'
+ 21: 2, # 'ч'
+ 27: 1, # 'ш'
+ 24: 1, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 2, # 'ю'
+ 16: 2, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 7: { # 'р'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 3, # 'г'
+ 11: 3, # 'д'
+ 3: 3, # 'е'
+ 23: 3, # 'ж'
+ 15: 2, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 2, # 'п'
+ 7: 1, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 3, # 'у'
+ 29: 2, # 'ф'
+ 25: 3, # 'х'
+ 22: 3, # 'ц'
+ 21: 2, # 'ч'
+ 27: 3, # 'ш'
+ 24: 1, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 2, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 8: { # 'с'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 2, # 'б'
+ 9: 3, # 'в'
+ 20: 2, # 'г'
+ 11: 2, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 1, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 3, # 'п'
+ 7: 3, # 'р'
+ 8: 1, # 'с'
+ 5: 3, # 'т'
+ 19: 3, # 'у'
+ 29: 2, # 'ф'
+ 25: 2, # 'х'
+ 22: 2, # 'ц'
+ 21: 2, # 'ч'
+ 27: 2, # 'ш'
+ 24: 0, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 2, # 'ь'
+ 42: 2, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 5: { # 'т'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 2, # 'г'
+ 11: 2, # 'д'
+ 3: 3, # 'е'
+ 23: 1, # 'ж'
+ 15: 1, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 2, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 2, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 3, # 'у'
+ 29: 1, # 'ф'
+ 25: 2, # 'х'
+ 22: 2, # 'ц'
+ 21: 2, # 'ч'
+ 27: 1, # 'ш'
+ 24: 1, # 'щ'
+ 17: 3, # 'ъ'
+ 52: 2, # 'ь'
+ 42: 2, # 'ю'
+ 16: 3, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 19: { # 'у'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 3, # 'г'
+ 11: 3, # 'д'
+ 3: 2, # 'е'
+ 23: 3, # 'ж'
+ 15: 3, # 'з'
+ 2: 2, # 'и'
+ 26: 2, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 2, # 'о'
+ 13: 3, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 1, # 'у'
+ 29: 2, # 'ф'
+ 25: 2, # 'х'
+ 22: 2, # 'ц'
+ 21: 3, # 'ч'
+ 27: 3, # 'ш'
+ 24: 2, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 29: { # 'ф'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 1, # 'в'
+ 20: 1, # 'г'
+ 11: 0, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 2, # 'к'
+ 10: 2, # 'л'
+ 14: 1, # 'м'
+ 6: 1, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 2, # 'р'
+ 8: 2, # 'с'
+ 5: 2, # 'т'
+ 19: 2, # 'у'
+ 29: 0, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 1, # 'ч'
+ 27: 1, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 2, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 25: { # 'х'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 3, # 'в'
+ 20: 0, # 'г'
+ 11: 1, # 'д'
+ 3: 2, # 'е'
+ 23: 0, # 'ж'
+ 15: 1, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 2, # 'л'
+ 14: 2, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 1, # 'п'
+ 7: 3, # 'р'
+ 8: 1, # 'с'
+ 5: 2, # 'т'
+ 19: 3, # 'у'
+ 29: 0, # 'ф'
+ 25: 1, # 'х'
+ 22: 0, # 'ц'
+ 21: 1, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 22: { # 'ц'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 2, # 'в'
+ 20: 1, # 'г'
+ 11: 1, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 1, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 2, # 'к'
+ 10: 1, # 'л'
+ 14: 1, # 'м'
+ 6: 1, # 'н'
+ 4: 2, # 'о'
+ 13: 1, # 'п'
+ 7: 1, # 'р'
+ 8: 1, # 'с'
+ 5: 1, # 'т'
+ 19: 2, # 'у'
+ 29: 1, # 'ф'
+ 25: 1, # 'х'
+ 22: 1, # 'ц'
+ 21: 1, # 'ч'
+ 27: 1, # 'ш'
+ 24: 1, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 0, # 'ю'
+ 16: 2, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 21: { # 'ч'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 1, # 'б'
+ 9: 3, # 'в'
+ 20: 1, # 'г'
+ 11: 0, # 'д'
+ 3: 3, # 'е'
+ 23: 1, # 'ж'
+ 15: 0, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 2, # 'л'
+ 14: 2, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 2, # 'р'
+ 8: 0, # 'с'
+ 5: 2, # 'т'
+ 19: 3, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 1, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 27: { # 'ш'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 0, # 'б'
+ 9: 2, # 'в'
+ 20: 0, # 'г'
+ 11: 1, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 3, # 'к'
+ 10: 2, # 'л'
+ 14: 1, # 'м'
+ 6: 3, # 'н'
+ 4: 2, # 'о'
+ 13: 2, # 'п'
+ 7: 1, # 'р'
+ 8: 0, # 'с'
+ 5: 1, # 'т'
+ 19: 2, # 'у'
+ 29: 1, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 1, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 2, # 'ъ'
+ 52: 1, # 'ь'
+ 42: 1, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 24: { # 'щ'
+ 63: 1, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 3, # 'а'
+ 18: 0, # 'б'
+ 9: 1, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 3, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 3, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 2, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 1, # 'р'
+ 8: 0, # 'с'
+ 5: 2, # 'т'
+ 19: 3, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 1, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 2, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 17: { # 'ъ'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 1, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 3, # 'г'
+ 11: 3, # 'д'
+ 3: 2, # 'е'
+ 23: 3, # 'ж'
+ 15: 3, # 'з'
+ 2: 1, # 'и'
+ 26: 2, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 3, # 'о'
+ 13: 3, # 'п'
+ 7: 3, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 1, # 'у'
+ 29: 1, # 'ф'
+ 25: 2, # 'х'
+ 22: 2, # 'ц'
+ 21: 3, # 'ч'
+ 27: 2, # 'ш'
+ 24: 3, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 2, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 52: { # 'ь'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 0, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 1, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 0, # 'и'
+ 26: 0, # 'й'
+ 12: 1, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 1, # 'н'
+ 4: 3, # 'о'
+ 13: 0, # 'п'
+ 7: 0, # 'р'
+ 8: 0, # 'с'
+ 5: 1, # 'т'
+ 19: 0, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 1, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 1, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 42: { # 'ю'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 1, # 'а'
+ 18: 2, # 'б'
+ 9: 1, # 'в'
+ 20: 2, # 'г'
+ 11: 2, # 'д'
+ 3: 1, # 'е'
+ 23: 2, # 'ж'
+ 15: 2, # 'з'
+ 2: 1, # 'и'
+ 26: 1, # 'й'
+ 12: 2, # 'к'
+ 10: 2, # 'л'
+ 14: 2, # 'м'
+ 6: 2, # 'н'
+ 4: 1, # 'о'
+ 13: 1, # 'п'
+ 7: 2, # 'р'
+ 8: 2, # 'с'
+ 5: 2, # 'т'
+ 19: 1, # 'у'
+ 29: 1, # 'ф'
+ 25: 1, # 'х'
+ 22: 2, # 'ц'
+ 21: 3, # 'ч'
+ 27: 1, # 'ш'
+ 24: 1, # 'щ'
+ 17: 1, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 16: { # 'я'
+ 63: 0, # 'e'
+ 45: 1, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 0, # 'а'
+ 18: 3, # 'б'
+ 9: 3, # 'в'
+ 20: 2, # 'г'
+ 11: 3, # 'д'
+ 3: 2, # 'е'
+ 23: 1, # 'ж'
+ 15: 2, # 'з'
+ 2: 1, # 'и'
+ 26: 2, # 'й'
+ 12: 3, # 'к'
+ 10: 3, # 'л'
+ 14: 3, # 'м'
+ 6: 3, # 'н'
+ 4: 1, # 'о'
+ 13: 2, # 'п'
+ 7: 2, # 'р'
+ 8: 3, # 'с'
+ 5: 3, # 'т'
+ 19: 1, # 'у'
+ 29: 1, # 'ф'
+ 25: 3, # 'х'
+ 22: 2, # 'ц'
+ 21: 1, # 'ч'
+ 27: 1, # 'ш'
+ 24: 2, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 1, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 58: { # 'є'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 0, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 0, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 0, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 0, # 'о'
+ 13: 0, # 'п'
+ 7: 0, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 0, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+ 62: { # '№'
+ 63: 0, # 'e'
+ 45: 0, # '\xad'
+ 31: 0, # 'А'
+ 32: 0, # 'Б'
+ 35: 0, # 'В'
+ 43: 0, # 'Г'
+ 37: 0, # 'Д'
+ 44: 0, # 'Е'
+ 55: 0, # 'Ж'
+ 47: 0, # 'З'
+ 40: 0, # 'И'
+ 59: 0, # 'Й'
+ 33: 0, # 'К'
+ 46: 0, # 'Л'
+ 38: 0, # 'М'
+ 36: 0, # 'Н'
+ 41: 0, # 'О'
+ 30: 0, # 'П'
+ 39: 0, # 'Р'
+ 28: 0, # 'С'
+ 34: 0, # 'Т'
+ 51: 0, # 'У'
+ 48: 0, # 'Ф'
+ 49: 0, # 'Х'
+ 53: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 54: 0, # 'Ш'
+ 57: 0, # 'Щ'
+ 61: 0, # 'Ъ'
+ 60: 0, # 'Ю'
+ 56: 0, # 'Я'
+ 1: 0, # 'а'
+ 18: 0, # 'б'
+ 9: 0, # 'в'
+ 20: 0, # 'г'
+ 11: 0, # 'д'
+ 3: 0, # 'е'
+ 23: 0, # 'ж'
+ 15: 0, # 'з'
+ 2: 0, # 'и'
+ 26: 0, # 'й'
+ 12: 0, # 'к'
+ 10: 0, # 'л'
+ 14: 0, # 'м'
+ 6: 0, # 'н'
+ 4: 0, # 'о'
+ 13: 0, # 'п'
+ 7: 0, # 'р'
+ 8: 0, # 'с'
+ 5: 0, # 'т'
+ 19: 0, # 'у'
+ 29: 0, # 'ф'
+ 25: 0, # 'х'
+ 22: 0, # 'ц'
+ 21: 0, # 'ч'
+ 27: 0, # 'ш'
+ 24: 0, # 'щ'
+ 17: 0, # 'ъ'
+ 52: 0, # 'ь'
+ 42: 0, # 'ю'
+ 16: 0, # 'я'
+ 58: 0, # 'є'
+ 62: 0, # '№'
+ },
+}
+
+# 255: Undefined characters that did not exist in training text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+# 251: Control characters
+
+# Character Mapping Table(s):
+ISO_8859_5_BULGARIAN_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 77, # 'A'
+ 66: 90, # 'B'
+ 67: 99, # 'C'
+ 68: 100, # 'D'
+ 69: 72, # 'E'
+ 70: 109, # 'F'
+ 71: 107, # 'G'
+ 72: 101, # 'H'
+ 73: 79, # 'I'
+ 74: 185, # 'J'
+ 75: 81, # 'K'
+ 76: 102, # 'L'
+ 77: 76, # 'M'
+ 78: 94, # 'N'
+ 79: 82, # 'O'
+ 80: 110, # 'P'
+ 81: 186, # 'Q'
+ 82: 108, # 'R'
+ 83: 91, # 'S'
+ 84: 74, # 'T'
+ 85: 119, # 'U'
+ 86: 84, # 'V'
+ 87: 96, # 'W'
+ 88: 111, # 'X'
+ 89: 187, # 'Y'
+ 90: 115, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 65, # 'a'
+ 98: 69, # 'b'
+ 99: 70, # 'c'
+ 100: 66, # 'd'
+ 101: 63, # 'e'
+ 102: 68, # 'f'
+ 103: 112, # 'g'
+ 104: 103, # 'h'
+ 105: 92, # 'i'
+ 106: 194, # 'j'
+ 107: 104, # 'k'
+ 108: 95, # 'l'
+ 109: 86, # 'm'
+ 110: 87, # 'n'
+ 111: 71, # 'o'
+ 112: 116, # 'p'
+ 113: 195, # 'q'
+ 114: 85, # 'r'
+ 115: 93, # 's'
+ 116: 97, # 't'
+ 117: 113, # 'u'
+ 118: 196, # 'v'
+ 119: 197, # 'w'
+ 120: 198, # 'x'
+ 121: 199, # 'y'
+ 122: 200, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 194, # '\x80'
+ 129: 195, # '\x81'
+ 130: 196, # '\x82'
+ 131: 197, # '\x83'
+ 132: 198, # '\x84'
+ 133: 199, # '\x85'
+ 134: 200, # '\x86'
+ 135: 201, # '\x87'
+ 136: 202, # '\x88'
+ 137: 203, # '\x89'
+ 138: 204, # '\x8a'
+ 139: 205, # '\x8b'
+ 140: 206, # '\x8c'
+ 141: 207, # '\x8d'
+ 142: 208, # '\x8e'
+ 143: 209, # '\x8f'
+ 144: 210, # '\x90'
+ 145: 211, # '\x91'
+ 146: 212, # '\x92'
+ 147: 213, # '\x93'
+ 148: 214, # '\x94'
+ 149: 215, # '\x95'
+ 150: 216, # '\x96'
+ 151: 217, # '\x97'
+ 152: 218, # '\x98'
+ 153: 219, # '\x99'
+ 154: 220, # '\x9a'
+ 155: 221, # '\x9b'
+ 156: 222, # '\x9c'
+ 157: 223, # '\x9d'
+ 158: 224, # '\x9e'
+ 159: 225, # '\x9f'
+ 160: 81, # '\xa0'
+ 161: 226, # 'Ё'
+ 162: 227, # 'Ђ'
+ 163: 228, # 'Ѓ'
+ 164: 229, # 'Є'
+ 165: 230, # 'Ѕ'
+ 166: 105, # 'І'
+ 167: 231, # 'Ї'
+ 168: 232, # 'Ј'
+ 169: 233, # 'Љ'
+ 170: 234, # 'Њ'
+ 171: 235, # 'Ћ'
+ 172: 236, # 'Ќ'
+ 173: 45, # '\xad'
+ 174: 237, # 'Ў'
+ 175: 238, # 'Џ'
+ 176: 31, # 'А'
+ 177: 32, # 'Б'
+ 178: 35, # 'В'
+ 179: 43, # 'Г'
+ 180: 37, # 'Д'
+ 181: 44, # 'Е'
+ 182: 55, # 'Ж'
+ 183: 47, # 'З'
+ 184: 40, # 'И'
+ 185: 59, # 'Й'
+ 186: 33, # 'К'
+ 187: 46, # 'Л'
+ 188: 38, # 'М'
+ 189: 36, # 'Н'
+ 190: 41, # 'О'
+ 191: 30, # 'П'
+ 192: 39, # 'Р'
+ 193: 28, # 'С'
+ 194: 34, # 'Т'
+ 195: 51, # 'У'
+ 196: 48, # 'Ф'
+ 197: 49, # 'Х'
+ 198: 53, # 'Ц'
+ 199: 50, # 'Ч'
+ 200: 54, # 'Ш'
+ 201: 57, # 'Щ'
+ 202: 61, # 'Ъ'
+ 203: 239, # 'Ы'
+ 204: 67, # 'Ь'
+ 205: 240, # 'Э'
+ 206: 60, # 'Ю'
+ 207: 56, # 'Я'
+ 208: 1, # 'а'
+ 209: 18, # 'б'
+ 210: 9, # 'в'
+ 211: 20, # 'г'
+ 212: 11, # 'д'
+ 213: 3, # 'е'
+ 214: 23, # 'ж'
+ 215: 15, # 'з'
+ 216: 2, # 'и'
+ 217: 26, # 'й'
+ 218: 12, # 'к'
+ 219: 10, # 'л'
+ 220: 14, # 'м'
+ 221: 6, # 'н'
+ 222: 4, # 'о'
+ 223: 13, # 'п'
+ 224: 7, # 'р'
+ 225: 8, # 'с'
+ 226: 5, # 'т'
+ 227: 19, # 'у'
+ 228: 29, # 'ф'
+ 229: 25, # 'х'
+ 230: 22, # 'ц'
+ 231: 21, # 'ч'
+ 232: 27, # 'ш'
+ 233: 24, # 'щ'
+ 234: 17, # 'ъ'
+ 235: 75, # 'ы'
+ 236: 52, # 'ь'
+ 237: 241, # 'э'
+ 238: 42, # 'ю'
+ 239: 16, # 'я'
+ 240: 62, # '№'
+ 241: 242, # 'ё'
+ 242: 243, # 'ђ'
+ 243: 244, # 'ѓ'
+ 244: 58, # 'є'
+ 245: 245, # 'ѕ'
+ 246: 98, # 'і'
+ 247: 246, # 'ї'
+ 248: 247, # 'ј'
+ 249: 248, # 'љ'
+ 250: 249, # 'њ'
+ 251: 250, # 'ћ'
+ 252: 251, # 'ќ'
+ 253: 91, # '§'
+ 254: 252, # 'ў'
+ 255: 253, # 'џ'
+}
+
+ISO_8859_5_BULGARIAN_MODEL = SingleByteCharSetModel(
+ charset_name="ISO-8859-5",
+ language="Bulgarian",
+ char_to_order_map=ISO_8859_5_BULGARIAN_CHAR_TO_ORDER,
+ language_model=BULGARIAN_LANG_MODEL,
+ typical_positive_ratio=0.969392,
+ keep_ascii_letters=False,
+ alphabet="АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя",
+)
+
+WINDOWS_1251_BULGARIAN_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 77, # 'A'
+ 66: 90, # 'B'
+ 67: 99, # 'C'
+ 68: 100, # 'D'
+ 69: 72, # 'E'
+ 70: 109, # 'F'
+ 71: 107, # 'G'
+ 72: 101, # 'H'
+ 73: 79, # 'I'
+ 74: 185, # 'J'
+ 75: 81, # 'K'
+ 76: 102, # 'L'
+ 77: 76, # 'M'
+ 78: 94, # 'N'
+ 79: 82, # 'O'
+ 80: 110, # 'P'
+ 81: 186, # 'Q'
+ 82: 108, # 'R'
+ 83: 91, # 'S'
+ 84: 74, # 'T'
+ 85: 119, # 'U'
+ 86: 84, # 'V'
+ 87: 96, # 'W'
+ 88: 111, # 'X'
+ 89: 187, # 'Y'
+ 90: 115, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 65, # 'a'
+ 98: 69, # 'b'
+ 99: 70, # 'c'
+ 100: 66, # 'd'
+ 101: 63, # 'e'
+ 102: 68, # 'f'
+ 103: 112, # 'g'
+ 104: 103, # 'h'
+ 105: 92, # 'i'
+ 106: 194, # 'j'
+ 107: 104, # 'k'
+ 108: 95, # 'l'
+ 109: 86, # 'm'
+ 110: 87, # 'n'
+ 111: 71, # 'o'
+ 112: 116, # 'p'
+ 113: 195, # 'q'
+ 114: 85, # 'r'
+ 115: 93, # 's'
+ 116: 97, # 't'
+ 117: 113, # 'u'
+ 118: 196, # 'v'
+ 119: 197, # 'w'
+ 120: 198, # 'x'
+ 121: 199, # 'y'
+ 122: 200, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 206, # 'Ђ'
+ 129: 207, # 'Ѓ'
+ 130: 208, # '‚'
+ 131: 209, # 'ѓ'
+ 132: 210, # '„'
+ 133: 211, # '…'
+ 134: 212, # '†'
+ 135: 213, # '‡'
+ 136: 120, # '€'
+ 137: 214, # '‰'
+ 138: 215, # 'Љ'
+ 139: 216, # '‹'
+ 140: 217, # 'Њ'
+ 141: 218, # 'Ќ'
+ 142: 219, # 'Ћ'
+ 143: 220, # 'Џ'
+ 144: 221, # 'ђ'
+ 145: 78, # '‘'
+ 146: 64, # '’'
+ 147: 83, # '“'
+ 148: 121, # '”'
+ 149: 98, # '•'
+ 150: 117, # '–'
+ 151: 105, # '—'
+ 152: 222, # None
+ 153: 223, # '™'
+ 154: 224, # 'љ'
+ 155: 225, # '›'
+ 156: 226, # 'њ'
+ 157: 227, # 'ќ'
+ 158: 228, # 'ћ'
+ 159: 229, # 'џ'
+ 160: 88, # '\xa0'
+ 161: 230, # 'Ў'
+ 162: 231, # 'ў'
+ 163: 232, # 'Ј'
+ 164: 233, # '¤'
+ 165: 122, # 'Ґ'
+ 166: 89, # '¦'
+ 167: 106, # '§'
+ 168: 234, # 'Ё'
+ 169: 235, # '©'
+ 170: 236, # 'Є'
+ 171: 237, # '«'
+ 172: 238, # '¬'
+ 173: 45, # '\xad'
+ 174: 239, # '®'
+ 175: 240, # 'Ї'
+ 176: 73, # '°'
+ 177: 80, # '±'
+ 178: 118, # 'І'
+ 179: 114, # 'і'
+ 180: 241, # 'ґ'
+ 181: 242, # 'µ'
+ 182: 243, # '¶'
+ 183: 244, # '·'
+ 184: 245, # 'ё'
+ 185: 62, # '№'
+ 186: 58, # 'є'
+ 187: 246, # '»'
+ 188: 247, # 'ј'
+ 189: 248, # 'Ѕ'
+ 190: 249, # 'ѕ'
+ 191: 250, # 'ї'
+ 192: 31, # 'А'
+ 193: 32, # 'Б'
+ 194: 35, # 'В'
+ 195: 43, # 'Г'
+ 196: 37, # 'Д'
+ 197: 44, # 'Е'
+ 198: 55, # 'Ж'
+ 199: 47, # 'З'
+ 200: 40, # 'И'
+ 201: 59, # 'Й'
+ 202: 33, # 'К'
+ 203: 46, # 'Л'
+ 204: 38, # 'М'
+ 205: 36, # 'Н'
+ 206: 41, # 'О'
+ 207: 30, # 'П'
+ 208: 39, # 'Р'
+ 209: 28, # 'С'
+ 210: 34, # 'Т'
+ 211: 51, # 'У'
+ 212: 48, # 'Ф'
+ 213: 49, # 'Х'
+ 214: 53, # 'Ц'
+ 215: 50, # 'Ч'
+ 216: 54, # 'Ш'
+ 217: 57, # 'Щ'
+ 218: 61, # 'Ъ'
+ 219: 251, # 'Ы'
+ 220: 67, # 'Ь'
+ 221: 252, # 'Э'
+ 222: 60, # 'Ю'
+ 223: 56, # 'Я'
+ 224: 1, # 'а'
+ 225: 18, # 'б'
+ 226: 9, # 'в'
+ 227: 20, # 'г'
+ 228: 11, # 'д'
+ 229: 3, # 'е'
+ 230: 23, # 'ж'
+ 231: 15, # 'з'
+ 232: 2, # 'и'
+ 233: 26, # 'й'
+ 234: 12, # 'к'
+ 235: 10, # 'л'
+ 236: 14, # 'м'
+ 237: 6, # 'н'
+ 238: 4, # 'о'
+ 239: 13, # 'п'
+ 240: 7, # 'р'
+ 241: 8, # 'с'
+ 242: 5, # 'т'
+ 243: 19, # 'у'
+ 244: 29, # 'ф'
+ 245: 25, # 'х'
+ 246: 22, # 'ц'
+ 247: 21, # 'ч'
+ 248: 27, # 'ш'
+ 249: 24, # 'щ'
+ 250: 17, # 'ъ'
+ 251: 75, # 'ы'
+ 252: 52, # 'ь'
+ 253: 253, # 'э'
+ 254: 42, # 'ю'
+ 255: 16, # 'я'
+}
+
+WINDOWS_1251_BULGARIAN_MODEL = SingleByteCharSetModel(
+ charset_name="windows-1251",
+ language="Bulgarian",
+ char_to_order_map=WINDOWS_1251_BULGARIAN_CHAR_TO_ORDER,
+ language_model=BULGARIAN_LANG_MODEL,
+ typical_positive_ratio=0.969392,
+ keep_ascii_letters=False,
+ alphabet="АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя",
+)
diff --git a/third_party/python/pip/pip/_vendor/chardet/langgreekmodel.py b/third_party/python/pip/pip/_vendor/chardet/langgreekmodel.py
new file mode 100644
index 0000000000..cfb8639e56
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/langgreekmodel.py
@@ -0,0 +1,4397 @@
+from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel
+
+# 3: Positive
+# 2: Likely
+# 1: Unlikely
+# 0: Negative
+
+GREEK_LANG_MODEL = {
+ 60: { # 'e'
+ 60: 2, # 'e'
+ 55: 1, # 'o'
+ 58: 2, # 't'
+ 36: 1, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 1, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 55: { # 'o'
+ 60: 0, # 'e'
+ 55: 2, # 'o'
+ 58: 2, # 't'
+ 36: 1, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 1, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 1, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 58: { # 't'
+ 60: 2, # 'e'
+ 55: 1, # 'o'
+ 58: 1, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 1, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 36: { # '·'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 61: { # 'Ά'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 1, # 'γ'
+ 21: 2, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 1, # 'π'
+ 8: 2, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 46: { # 'Έ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 2, # 'β'
+ 20: 2, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 2, # 'κ'
+ 16: 2, # 'λ'
+ 10: 0, # 'μ'
+ 6: 3, # 'ν'
+ 30: 2, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 2, # 'π'
+ 8: 2, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 1, # 'σ'
+ 2: 2, # 'τ'
+ 12: 0, # 'υ'
+ 28: 2, # 'φ'
+ 23: 3, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 54: { # 'Ό'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 2, # 'μ'
+ 6: 2, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 2, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 2, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 2, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 31: { # 'Α'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 2, # 'Β'
+ 43: 2, # 'Γ'
+ 41: 1, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 2, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 2, # 'Κ'
+ 53: 2, # 'Λ'
+ 38: 2, # 'Μ'
+ 49: 2, # 'Ν'
+ 59: 1, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 2, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 2, # 'Σ'
+ 33: 2, # 'Τ'
+ 45: 2, # 'Υ'
+ 56: 2, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 2, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 1, # 'θ'
+ 5: 0, # 'ι'
+ 11: 2, # 'κ'
+ 16: 3, # 'λ'
+ 10: 2, # 'μ'
+ 6: 3, # 'ν'
+ 30: 2, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 2, # 'ς'
+ 7: 2, # 'σ'
+ 2: 0, # 'τ'
+ 12: 3, # 'υ'
+ 28: 2, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 51: { # 'Β'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 1, # 'Ε'
+ 40: 1, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 1, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 1, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 2, # 'έ'
+ 22: 2, # 'ή'
+ 15: 0, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 2, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 2, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 0, # 'π'
+ 8: 2, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 43: { # 'Γ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 1, # 'Α'
+ 51: 0, # 'Β'
+ 43: 2, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 1, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 1, # 'Κ'
+ 53: 1, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 1, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 2, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 1, # 'Χ'
+ 57: 2, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 2, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 2, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 0, # 'μ'
+ 6: 2, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 0, # 'π'
+ 8: 2, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 41: { # 'Δ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 2, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 2, # 'ή'
+ 15: 2, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 2, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 0, # 'π'
+ 8: 2, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 2, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 1, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 34: { # 'Ε'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 0, # 'Β'
+ 43: 2, # 'Γ'
+ 41: 2, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 2, # 'Κ'
+ 53: 2, # 'Λ'
+ 38: 2, # 'Μ'
+ 49: 2, # 'Ν'
+ 59: 1, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 2, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 2, # 'Σ'
+ 33: 2, # 'Τ'
+ 45: 2, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 2, # 'Χ'
+ 57: 2, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 3, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 3, # 'γ'
+ 21: 2, # 'δ'
+ 3: 1, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 1, # 'θ'
+ 5: 2, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 2, # 'μ'
+ 6: 3, # 'ν'
+ 30: 2, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 3, # 'π'
+ 8: 2, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 2, # 'σ'
+ 2: 2, # 'τ'
+ 12: 2, # 'υ'
+ 28: 2, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 1, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 40: { # 'Η'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 1, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 2, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 2, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 2, # 'Μ'
+ 49: 2, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 2, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 2, # 'Σ'
+ 33: 2, # 'Τ'
+ 45: 1, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 0, # 'μ'
+ 6: 1, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 1, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 52: { # 'Θ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 1, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 1, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 2, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 2, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 2, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 47: { # 'Ι'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 1, # 'Β'
+ 43: 1, # 'Γ'
+ 41: 2, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 2, # 'Κ'
+ 53: 2, # 'Λ'
+ 38: 2, # 'Μ'
+ 49: 2, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 2, # 'Σ'
+ 33: 2, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 2, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 2, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 2, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 1, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 2, # 'σ'
+ 2: 1, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 1, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 44: { # 'Κ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 1, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 1, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 1, # 'Τ'
+ 45: 2, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 1, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 2, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 2, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 0, # 'π'
+ 8: 2, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 2, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 2, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 53: { # 'Λ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 2, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 2, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 2, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 2, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 2, # 'έ'
+ 22: 0, # 'ή'
+ 15: 2, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 2, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 1, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 2, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 2, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 38: { # 'Μ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 2, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 2, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 2, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 2, # 'έ'
+ 22: 2, # 'ή'
+ 15: 2, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 2, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 3, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 2, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 2, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 49: { # 'Ν'
+ 60: 2, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 2, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 2, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 2, # 'έ'
+ 22: 0, # 'ή'
+ 15: 2, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 1, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 1, # 'ω'
+ 19: 2, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 59: { # 'Ξ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 1, # 'Ε'
+ 40: 1, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 1, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 2, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 2, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 39: { # 'Ο'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 1, # 'Β'
+ 43: 2, # 'Γ'
+ 41: 2, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 1, # 'Η'
+ 52: 2, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 2, # 'Κ'
+ 53: 2, # 'Λ'
+ 38: 2, # 'Μ'
+ 49: 2, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 2, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 2, # 'Σ'
+ 33: 2, # 'Τ'
+ 45: 2, # 'Υ'
+ 56: 2, # 'Φ'
+ 50: 2, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 2, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 2, # 'κ'
+ 16: 2, # 'λ'
+ 10: 2, # 'μ'
+ 6: 2, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 2, # 'π'
+ 8: 2, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 2, # 'τ'
+ 12: 2, # 'υ'
+ 28: 1, # 'φ'
+ 23: 1, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 35: { # 'Π'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 2, # 'Λ'
+ 38: 1, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 1, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 1, # 'Χ'
+ 57: 2, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 1, # 'έ'
+ 22: 1, # 'ή'
+ 15: 2, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 2, # 'η'
+ 25: 0, # 'θ'
+ 5: 2, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 0, # 'μ'
+ 6: 2, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 3, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 2, # 'υ'
+ 28: 0, # 'φ'
+ 23: 2, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 2, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 48: { # 'Ρ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 0, # 'Β'
+ 43: 1, # 'Γ'
+ 41: 1, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 2, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 1, # 'Τ'
+ 45: 1, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 1, # 'Χ'
+ 57: 1, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 2, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 1, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 3, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 0, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 37: { # 'Σ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 1, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 2, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 2, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 2, # 'Σ'
+ 33: 2, # 'Τ'
+ 45: 2, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 2, # 'Χ'
+ 57: 2, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 2, # 'ή'
+ 15: 2, # 'ί'
+ 1: 2, # 'α'
+ 29: 2, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 0, # 'θ'
+ 5: 2, # 'ι'
+ 11: 2, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 2, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 0, # 'φ'
+ 23: 2, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 0, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 33: { # 'Τ'
+ 60: 0, # 'e'
+ 55: 1, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 2, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 2, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 1, # 'Τ'
+ 45: 1, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 2, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 2, # 'έ'
+ 22: 0, # 'ή'
+ 15: 2, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 2, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 2, # 'η'
+ 25: 0, # 'θ'
+ 5: 2, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 2, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 2, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 2, # 'σ'
+ 2: 0, # 'τ'
+ 12: 2, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 2, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 45: { # 'Υ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 2, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 1, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 2, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 1, # 'Λ'
+ 38: 2, # 'Μ'
+ 49: 2, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 2, # 'Π'
+ 48: 1, # 'Ρ'
+ 37: 2, # 'Σ'
+ 33: 2, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 1, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 3, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 56: { # 'Φ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 1, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 1, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 2, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 2, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 2, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 2, # 'τ'
+ 12: 2, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 1, # 'ύ'
+ 27: 1, # 'ώ'
+ },
+ 50: { # 'Χ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 1, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 2, # 'Ε'
+ 40: 2, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 2, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 1, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 1, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 1, # 'Χ'
+ 57: 1, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 2, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 0, # 'π'
+ 8: 3, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 2, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 57: { # 'Ω'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 1, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 1, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 2, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 2, # 'Ρ'
+ 37: 2, # 'Σ'
+ 33: 2, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 0, # 'π'
+ 8: 2, # 'ρ'
+ 14: 2, # 'ς'
+ 7: 2, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 1, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 17: { # 'ά'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 2, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 3, # 'β'
+ 20: 3, # 'γ'
+ 21: 3, # 'δ'
+ 3: 3, # 'ε'
+ 32: 3, # 'ζ'
+ 13: 0, # 'η'
+ 25: 3, # 'θ'
+ 5: 2, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 3, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 3, # 'φ'
+ 23: 3, # 'χ'
+ 42: 3, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 18: { # 'έ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 3, # 'α'
+ 29: 2, # 'β'
+ 20: 3, # 'γ'
+ 21: 2, # 'δ'
+ 3: 3, # 'ε'
+ 32: 2, # 'ζ'
+ 13: 0, # 'η'
+ 25: 3, # 'θ'
+ 5: 0, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 3, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 3, # 'φ'
+ 23: 3, # 'χ'
+ 42: 3, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 22: { # 'ή'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 1, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 3, # 'γ'
+ 21: 3, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 3, # 'θ'
+ 5: 0, # 'ι'
+ 11: 3, # 'κ'
+ 16: 2, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 2, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 2, # 'φ'
+ 23: 3, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 15: { # 'ί'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 3, # 'α'
+ 29: 2, # 'β'
+ 20: 3, # 'γ'
+ 21: 3, # 'δ'
+ 3: 3, # 'ε'
+ 32: 3, # 'ζ'
+ 13: 3, # 'η'
+ 25: 3, # 'θ'
+ 5: 0, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 3, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 1, # 'φ'
+ 23: 3, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 1: { # 'α'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 2, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 2, # 'έ'
+ 22: 0, # 'ή'
+ 15: 3, # 'ί'
+ 1: 0, # 'α'
+ 29: 3, # 'β'
+ 20: 3, # 'γ'
+ 21: 3, # 'δ'
+ 3: 2, # 'ε'
+ 32: 3, # 'ζ'
+ 13: 1, # 'η'
+ 25: 3, # 'θ'
+ 5: 3, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 3, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 3, # 'φ'
+ 23: 3, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 2, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 29: { # 'β'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 2, # 'έ'
+ 22: 3, # 'ή'
+ 15: 2, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 2, # 'γ'
+ 21: 2, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 2, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 3, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 3, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 2, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 20: { # 'γ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 3, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 3, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 3, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 2, # 'υ'
+ 28: 0, # 'φ'
+ 23: 3, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 21: { # 'δ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 3, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 3, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 3: { # 'ε'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 2, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 3, # 'ί'
+ 1: 2, # 'α'
+ 29: 3, # 'β'
+ 20: 3, # 'γ'
+ 21: 3, # 'δ'
+ 3: 2, # 'ε'
+ 32: 2, # 'ζ'
+ 13: 0, # 'η'
+ 25: 3, # 'θ'
+ 5: 3, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 3, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 3, # 'φ'
+ 23: 3, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 2, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 32: { # 'ζ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 2, # 'έ'
+ 22: 2, # 'ή'
+ 15: 2, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 0, # 'θ'
+ 5: 2, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 1, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 2, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 13: { # 'η'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 2, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 3, # 'γ'
+ 21: 2, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 3, # 'θ'
+ 5: 0, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 2, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 2, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 2, # 'φ'
+ 23: 3, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 25: { # 'θ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 2, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 1, # 'λ'
+ 10: 3, # 'μ'
+ 6: 2, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 3, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 3, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 5: { # 'ι'
+ 60: 0, # 'e'
+ 55: 1, # 'o'
+ 58: 0, # 't'
+ 36: 2, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 1, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 0, # 'ί'
+ 1: 3, # 'α'
+ 29: 3, # 'β'
+ 20: 3, # 'γ'
+ 21: 3, # 'δ'
+ 3: 3, # 'ε'
+ 32: 2, # 'ζ'
+ 13: 3, # 'η'
+ 25: 3, # 'θ'
+ 5: 0, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 3, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 2, # 'φ'
+ 23: 3, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 11: { # 'κ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 3, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 2, # 'θ'
+ 5: 3, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 2, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 2, # 'π'
+ 8: 3, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 2, # 'φ'
+ 23: 2, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 16: { # 'λ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 1, # 'β'
+ 20: 2, # 'γ'
+ 21: 1, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 2, # 'θ'
+ 5: 3, # 'ι'
+ 11: 2, # 'κ'
+ 16: 3, # 'λ'
+ 10: 2, # 'μ'
+ 6: 2, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 3, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 2, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 10: { # 'μ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 1, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 3, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 3, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 2, # 'υ'
+ 28: 3, # 'φ'
+ 23: 0, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 6: { # 'ν'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 2, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 3, # 'δ'
+ 3: 3, # 'ε'
+ 32: 2, # 'ζ'
+ 13: 3, # 'η'
+ 25: 3, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 1, # 'λ'
+ 10: 0, # 'μ'
+ 6: 2, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 30: { # 'ξ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 2, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 0, # 'θ'
+ 5: 2, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 3, # 'τ'
+ 12: 2, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 2, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 1, # 'ώ'
+ },
+ 4: { # 'ο'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 2, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 2, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 2, # 'α'
+ 29: 3, # 'β'
+ 20: 3, # 'γ'
+ 21: 3, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 3, # 'θ'
+ 5: 3, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 2, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 3, # 'φ'
+ 23: 3, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 1, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 9: { # 'π'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 3, # 'λ'
+ 10: 0, # 'μ'
+ 6: 2, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 3, # 'ρ'
+ 14: 2, # 'ς'
+ 7: 0, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 0, # 'φ'
+ 23: 2, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 8: { # 'ρ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 2, # 'β'
+ 20: 3, # 'γ'
+ 21: 2, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 3, # 'θ'
+ 5: 3, # 'ι'
+ 11: 3, # 'κ'
+ 16: 1, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 2, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 2, # 'π'
+ 8: 2, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 2, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 3, # 'φ'
+ 23: 3, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 14: { # 'ς'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 2, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 0, # 'θ'
+ 5: 0, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 0, # 'τ'
+ 12: 0, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 7: { # 'σ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 2, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 3, # 'β'
+ 20: 0, # 'γ'
+ 21: 2, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 3, # 'θ'
+ 5: 3, # 'ι'
+ 11: 3, # 'κ'
+ 16: 2, # 'λ'
+ 10: 3, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 3, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 3, # 'φ'
+ 23: 3, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 2: { # 'τ'
+ 60: 0, # 'e'
+ 55: 2, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 2, # 'ζ'
+ 13: 3, # 'η'
+ 25: 0, # 'θ'
+ 5: 3, # 'ι'
+ 11: 2, # 'κ'
+ 16: 2, # 'λ'
+ 10: 3, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 3, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 2, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 12: { # 'υ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 2, # 'έ'
+ 22: 3, # 'ή'
+ 15: 2, # 'ί'
+ 1: 3, # 'α'
+ 29: 2, # 'β'
+ 20: 3, # 'γ'
+ 21: 2, # 'δ'
+ 3: 2, # 'ε'
+ 32: 2, # 'ζ'
+ 13: 2, # 'η'
+ 25: 3, # 'θ'
+ 5: 2, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 3, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 2, # 'φ'
+ 23: 3, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 2, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 28: { # 'φ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 3, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 2, # 'η'
+ 25: 2, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 0, # 'μ'
+ 6: 1, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 3, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 1, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 2, # 'ύ'
+ 27: 2, # 'ώ'
+ },
+ 23: { # 'χ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 3, # 'ά'
+ 18: 2, # 'έ'
+ 22: 3, # 'ή'
+ 15: 3, # 'ί'
+ 1: 3, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 2, # 'η'
+ 25: 2, # 'θ'
+ 5: 3, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 2, # 'μ'
+ 6: 3, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 0, # 'π'
+ 8: 3, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 3, # 'τ'
+ 12: 3, # 'υ'
+ 28: 0, # 'φ'
+ 23: 2, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 3, # 'ω'
+ 19: 3, # 'ό'
+ 26: 3, # 'ύ'
+ 27: 3, # 'ώ'
+ },
+ 42: { # 'ψ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 2, # 'ά'
+ 18: 2, # 'έ'
+ 22: 1, # 'ή'
+ 15: 2, # 'ί'
+ 1: 2, # 'α'
+ 29: 0, # 'β'
+ 20: 0, # 'γ'
+ 21: 0, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 3, # 'η'
+ 25: 0, # 'θ'
+ 5: 2, # 'ι'
+ 11: 0, # 'κ'
+ 16: 0, # 'λ'
+ 10: 0, # 'μ'
+ 6: 0, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 0, # 'π'
+ 8: 0, # 'ρ'
+ 14: 0, # 'ς'
+ 7: 0, # 'σ'
+ 2: 2, # 'τ'
+ 12: 1, # 'υ'
+ 28: 0, # 'φ'
+ 23: 0, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 24: { # 'ω'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 1, # 'ά'
+ 18: 0, # 'έ'
+ 22: 2, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 2, # 'β'
+ 20: 3, # 'γ'
+ 21: 2, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 0, # 'η'
+ 25: 3, # 'θ'
+ 5: 2, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 0, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 2, # 'φ'
+ 23: 2, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 19: { # 'ό'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 3, # 'β'
+ 20: 3, # 'γ'
+ 21: 3, # 'δ'
+ 3: 1, # 'ε'
+ 32: 2, # 'ζ'
+ 13: 2, # 'η'
+ 25: 2, # 'θ'
+ 5: 2, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 1, # 'ξ'
+ 4: 2, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 2, # 'φ'
+ 23: 3, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 26: { # 'ύ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 2, # 'α'
+ 29: 2, # 'β'
+ 20: 2, # 'γ'
+ 21: 1, # 'δ'
+ 3: 3, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 2, # 'η'
+ 25: 3, # 'θ'
+ 5: 0, # 'ι'
+ 11: 3, # 'κ'
+ 16: 3, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 2, # 'ξ'
+ 4: 3, # 'ο'
+ 9: 3, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 2, # 'φ'
+ 23: 2, # 'χ'
+ 42: 2, # 'ψ'
+ 24: 2, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+ 27: { # 'ώ'
+ 60: 0, # 'e'
+ 55: 0, # 'o'
+ 58: 0, # 't'
+ 36: 0, # '·'
+ 61: 0, # 'Ά'
+ 46: 0, # 'Έ'
+ 54: 0, # 'Ό'
+ 31: 0, # 'Α'
+ 51: 0, # 'Β'
+ 43: 0, # 'Γ'
+ 41: 0, # 'Δ'
+ 34: 0, # 'Ε'
+ 40: 0, # 'Η'
+ 52: 0, # 'Θ'
+ 47: 0, # 'Ι'
+ 44: 0, # 'Κ'
+ 53: 0, # 'Λ'
+ 38: 0, # 'Μ'
+ 49: 0, # 'Ν'
+ 59: 0, # 'Ξ'
+ 39: 0, # 'Ο'
+ 35: 0, # 'Π'
+ 48: 0, # 'Ρ'
+ 37: 0, # 'Σ'
+ 33: 0, # 'Τ'
+ 45: 0, # 'Υ'
+ 56: 0, # 'Φ'
+ 50: 0, # 'Χ'
+ 57: 0, # 'Ω'
+ 17: 0, # 'ά'
+ 18: 0, # 'έ'
+ 22: 0, # 'ή'
+ 15: 0, # 'ί'
+ 1: 0, # 'α'
+ 29: 1, # 'β'
+ 20: 0, # 'γ'
+ 21: 3, # 'δ'
+ 3: 0, # 'ε'
+ 32: 0, # 'ζ'
+ 13: 1, # 'η'
+ 25: 2, # 'θ'
+ 5: 2, # 'ι'
+ 11: 0, # 'κ'
+ 16: 2, # 'λ'
+ 10: 3, # 'μ'
+ 6: 3, # 'ν'
+ 30: 1, # 'ξ'
+ 4: 0, # 'ο'
+ 9: 2, # 'π'
+ 8: 3, # 'ρ'
+ 14: 3, # 'ς'
+ 7: 3, # 'σ'
+ 2: 3, # 'τ'
+ 12: 0, # 'υ'
+ 28: 1, # 'φ'
+ 23: 1, # 'χ'
+ 42: 0, # 'ψ'
+ 24: 0, # 'ω'
+ 19: 0, # 'ό'
+ 26: 0, # 'ύ'
+ 27: 0, # 'ώ'
+ },
+}
+
+# 255: Undefined characters that did not exist in training text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+# 251: Control characters
+
+# Character Mapping Table(s):
+WINDOWS_1253_GREEK_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 82, # 'A'
+ 66: 100, # 'B'
+ 67: 104, # 'C'
+ 68: 94, # 'D'
+ 69: 98, # 'E'
+ 70: 101, # 'F'
+ 71: 116, # 'G'
+ 72: 102, # 'H'
+ 73: 111, # 'I'
+ 74: 187, # 'J'
+ 75: 117, # 'K'
+ 76: 92, # 'L'
+ 77: 88, # 'M'
+ 78: 113, # 'N'
+ 79: 85, # 'O'
+ 80: 79, # 'P'
+ 81: 118, # 'Q'
+ 82: 105, # 'R'
+ 83: 83, # 'S'
+ 84: 67, # 'T'
+ 85: 114, # 'U'
+ 86: 119, # 'V'
+ 87: 95, # 'W'
+ 88: 99, # 'X'
+ 89: 109, # 'Y'
+ 90: 188, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 72, # 'a'
+ 98: 70, # 'b'
+ 99: 80, # 'c'
+ 100: 81, # 'd'
+ 101: 60, # 'e'
+ 102: 96, # 'f'
+ 103: 93, # 'g'
+ 104: 89, # 'h'
+ 105: 68, # 'i'
+ 106: 120, # 'j'
+ 107: 97, # 'k'
+ 108: 77, # 'l'
+ 109: 86, # 'm'
+ 110: 69, # 'n'
+ 111: 55, # 'o'
+ 112: 78, # 'p'
+ 113: 115, # 'q'
+ 114: 65, # 'r'
+ 115: 66, # 's'
+ 116: 58, # 't'
+ 117: 76, # 'u'
+ 118: 106, # 'v'
+ 119: 103, # 'w'
+ 120: 87, # 'x'
+ 121: 107, # 'y'
+ 122: 112, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 255, # '€'
+ 129: 255, # None
+ 130: 255, # '‚'
+ 131: 255, # 'ƒ'
+ 132: 255, # '„'
+ 133: 255, # '…'
+ 134: 255, # '†'
+ 135: 255, # '‡'
+ 136: 255, # None
+ 137: 255, # '‰'
+ 138: 255, # None
+ 139: 255, # '‹'
+ 140: 255, # None
+ 141: 255, # None
+ 142: 255, # None
+ 143: 255, # None
+ 144: 255, # None
+ 145: 255, # '‘'
+ 146: 255, # '’'
+ 147: 255, # '“'
+ 148: 255, # '”'
+ 149: 255, # '•'
+ 150: 255, # '–'
+ 151: 255, # '—'
+ 152: 255, # None
+ 153: 255, # '™'
+ 154: 255, # None
+ 155: 255, # '›'
+ 156: 255, # None
+ 157: 255, # None
+ 158: 255, # None
+ 159: 255, # None
+ 160: 253, # '\xa0'
+ 161: 233, # '΅'
+ 162: 61, # 'Ά'
+ 163: 253, # '£'
+ 164: 253, # '¤'
+ 165: 253, # '¥'
+ 166: 253, # '¦'
+ 167: 253, # '§'
+ 168: 253, # '¨'
+ 169: 253, # '©'
+ 170: 253, # None
+ 171: 253, # '«'
+ 172: 253, # '¬'
+ 173: 74, # '\xad'
+ 174: 253, # '®'
+ 175: 253, # '―'
+ 176: 253, # '°'
+ 177: 253, # '±'
+ 178: 253, # '²'
+ 179: 253, # '³'
+ 180: 247, # '΄'
+ 181: 253, # 'µ'
+ 182: 253, # '¶'
+ 183: 36, # '·'
+ 184: 46, # 'Έ'
+ 185: 71, # 'Ή'
+ 186: 73, # 'Ί'
+ 187: 253, # '»'
+ 188: 54, # 'Ό'
+ 189: 253, # '½'
+ 190: 108, # 'Ύ'
+ 191: 123, # 'Ώ'
+ 192: 110, # 'ΐ'
+ 193: 31, # 'Α'
+ 194: 51, # 'Β'
+ 195: 43, # 'Γ'
+ 196: 41, # 'Δ'
+ 197: 34, # 'Ε'
+ 198: 91, # 'Ζ'
+ 199: 40, # 'Η'
+ 200: 52, # 'Θ'
+ 201: 47, # 'Ι'
+ 202: 44, # 'Κ'
+ 203: 53, # 'Λ'
+ 204: 38, # 'Μ'
+ 205: 49, # 'Ν'
+ 206: 59, # 'Ξ'
+ 207: 39, # 'Ο'
+ 208: 35, # 'Π'
+ 209: 48, # 'Ρ'
+ 210: 250, # None
+ 211: 37, # 'Σ'
+ 212: 33, # 'Τ'
+ 213: 45, # 'Υ'
+ 214: 56, # 'Φ'
+ 215: 50, # 'Χ'
+ 216: 84, # 'Ψ'
+ 217: 57, # 'Ω'
+ 218: 120, # 'Ϊ'
+ 219: 121, # 'Ϋ'
+ 220: 17, # 'ά'
+ 221: 18, # 'έ'
+ 222: 22, # 'ή'
+ 223: 15, # 'ί'
+ 224: 124, # 'ΰ'
+ 225: 1, # 'α'
+ 226: 29, # 'β'
+ 227: 20, # 'γ'
+ 228: 21, # 'δ'
+ 229: 3, # 'ε'
+ 230: 32, # 'ζ'
+ 231: 13, # 'η'
+ 232: 25, # 'θ'
+ 233: 5, # 'ι'
+ 234: 11, # 'κ'
+ 235: 16, # 'λ'
+ 236: 10, # 'μ'
+ 237: 6, # 'ν'
+ 238: 30, # 'ξ'
+ 239: 4, # 'ο'
+ 240: 9, # 'π'
+ 241: 8, # 'ρ'
+ 242: 14, # 'ς'
+ 243: 7, # 'σ'
+ 244: 2, # 'τ'
+ 245: 12, # 'υ'
+ 246: 28, # 'φ'
+ 247: 23, # 'χ'
+ 248: 42, # 'ψ'
+ 249: 24, # 'ω'
+ 250: 64, # 'ϊ'
+ 251: 75, # 'ϋ'
+ 252: 19, # 'ό'
+ 253: 26, # 'ύ'
+ 254: 27, # 'ώ'
+ 255: 253, # None
+}
+
+WINDOWS_1253_GREEK_MODEL = SingleByteCharSetModel(
+ charset_name="windows-1253",
+ language="Greek",
+ char_to_order_map=WINDOWS_1253_GREEK_CHAR_TO_ORDER,
+ language_model=GREEK_LANG_MODEL,
+ typical_positive_ratio=0.982851,
+ keep_ascii_letters=False,
+ alphabet="ΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίαβγδεζηθικλμνξοπρςστυφχψωόύώ",
+)
+
+ISO_8859_7_GREEK_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 82, # 'A'
+ 66: 100, # 'B'
+ 67: 104, # 'C'
+ 68: 94, # 'D'
+ 69: 98, # 'E'
+ 70: 101, # 'F'
+ 71: 116, # 'G'
+ 72: 102, # 'H'
+ 73: 111, # 'I'
+ 74: 187, # 'J'
+ 75: 117, # 'K'
+ 76: 92, # 'L'
+ 77: 88, # 'M'
+ 78: 113, # 'N'
+ 79: 85, # 'O'
+ 80: 79, # 'P'
+ 81: 118, # 'Q'
+ 82: 105, # 'R'
+ 83: 83, # 'S'
+ 84: 67, # 'T'
+ 85: 114, # 'U'
+ 86: 119, # 'V'
+ 87: 95, # 'W'
+ 88: 99, # 'X'
+ 89: 109, # 'Y'
+ 90: 188, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 72, # 'a'
+ 98: 70, # 'b'
+ 99: 80, # 'c'
+ 100: 81, # 'd'
+ 101: 60, # 'e'
+ 102: 96, # 'f'
+ 103: 93, # 'g'
+ 104: 89, # 'h'
+ 105: 68, # 'i'
+ 106: 120, # 'j'
+ 107: 97, # 'k'
+ 108: 77, # 'l'
+ 109: 86, # 'm'
+ 110: 69, # 'n'
+ 111: 55, # 'o'
+ 112: 78, # 'p'
+ 113: 115, # 'q'
+ 114: 65, # 'r'
+ 115: 66, # 's'
+ 116: 58, # 't'
+ 117: 76, # 'u'
+ 118: 106, # 'v'
+ 119: 103, # 'w'
+ 120: 87, # 'x'
+ 121: 107, # 'y'
+ 122: 112, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 255, # '\x80'
+ 129: 255, # '\x81'
+ 130: 255, # '\x82'
+ 131: 255, # '\x83'
+ 132: 255, # '\x84'
+ 133: 255, # '\x85'
+ 134: 255, # '\x86'
+ 135: 255, # '\x87'
+ 136: 255, # '\x88'
+ 137: 255, # '\x89'
+ 138: 255, # '\x8a'
+ 139: 255, # '\x8b'
+ 140: 255, # '\x8c'
+ 141: 255, # '\x8d'
+ 142: 255, # '\x8e'
+ 143: 255, # '\x8f'
+ 144: 255, # '\x90'
+ 145: 255, # '\x91'
+ 146: 255, # '\x92'
+ 147: 255, # '\x93'
+ 148: 255, # '\x94'
+ 149: 255, # '\x95'
+ 150: 255, # '\x96'
+ 151: 255, # '\x97'
+ 152: 255, # '\x98'
+ 153: 255, # '\x99'
+ 154: 255, # '\x9a'
+ 155: 255, # '\x9b'
+ 156: 255, # '\x9c'
+ 157: 255, # '\x9d'
+ 158: 255, # '\x9e'
+ 159: 255, # '\x9f'
+ 160: 253, # '\xa0'
+ 161: 233, # '‘'
+ 162: 90, # '’'
+ 163: 253, # '£'
+ 164: 253, # '€'
+ 165: 253, # '₯'
+ 166: 253, # '¦'
+ 167: 253, # '§'
+ 168: 253, # '¨'
+ 169: 253, # '©'
+ 170: 253, # 'ͺ'
+ 171: 253, # '«'
+ 172: 253, # '¬'
+ 173: 74, # '\xad'
+ 174: 253, # None
+ 175: 253, # '―'
+ 176: 253, # '°'
+ 177: 253, # '±'
+ 178: 253, # '²'
+ 179: 253, # '³'
+ 180: 247, # '΄'
+ 181: 248, # '΅'
+ 182: 61, # 'Ά'
+ 183: 36, # '·'
+ 184: 46, # 'Έ'
+ 185: 71, # 'Ή'
+ 186: 73, # 'Ί'
+ 187: 253, # '»'
+ 188: 54, # 'Ό'
+ 189: 253, # '½'
+ 190: 108, # 'Ύ'
+ 191: 123, # 'Ώ'
+ 192: 110, # 'ΐ'
+ 193: 31, # 'Α'
+ 194: 51, # 'Β'
+ 195: 43, # 'Γ'
+ 196: 41, # 'Δ'
+ 197: 34, # 'Ε'
+ 198: 91, # 'Ζ'
+ 199: 40, # 'Η'
+ 200: 52, # 'Θ'
+ 201: 47, # 'Ι'
+ 202: 44, # 'Κ'
+ 203: 53, # 'Λ'
+ 204: 38, # 'Μ'
+ 205: 49, # 'Ν'
+ 206: 59, # 'Ξ'
+ 207: 39, # 'Ο'
+ 208: 35, # 'Π'
+ 209: 48, # 'Ρ'
+ 210: 250, # None
+ 211: 37, # 'Σ'
+ 212: 33, # 'Τ'
+ 213: 45, # 'Υ'
+ 214: 56, # 'Φ'
+ 215: 50, # 'Χ'
+ 216: 84, # 'Ψ'
+ 217: 57, # 'Ω'
+ 218: 120, # 'Ϊ'
+ 219: 121, # 'Ϋ'
+ 220: 17, # 'ά'
+ 221: 18, # 'έ'
+ 222: 22, # 'ή'
+ 223: 15, # 'ί'
+ 224: 124, # 'ΰ'
+ 225: 1, # 'α'
+ 226: 29, # 'β'
+ 227: 20, # 'γ'
+ 228: 21, # 'δ'
+ 229: 3, # 'ε'
+ 230: 32, # 'ζ'
+ 231: 13, # 'η'
+ 232: 25, # 'θ'
+ 233: 5, # 'ι'
+ 234: 11, # 'κ'
+ 235: 16, # 'λ'
+ 236: 10, # 'μ'
+ 237: 6, # 'ν'
+ 238: 30, # 'ξ'
+ 239: 4, # 'ο'
+ 240: 9, # 'π'
+ 241: 8, # 'ρ'
+ 242: 14, # 'ς'
+ 243: 7, # 'σ'
+ 244: 2, # 'τ'
+ 245: 12, # 'υ'
+ 246: 28, # 'φ'
+ 247: 23, # 'χ'
+ 248: 42, # 'ψ'
+ 249: 24, # 'ω'
+ 250: 64, # 'ϊ'
+ 251: 75, # 'ϋ'
+ 252: 19, # 'ό'
+ 253: 26, # 'ύ'
+ 254: 27, # 'ώ'
+ 255: 253, # None
+}
+
+ISO_8859_7_GREEK_MODEL = SingleByteCharSetModel(
+ charset_name="ISO-8859-7",
+ language="Greek",
+ char_to_order_map=ISO_8859_7_GREEK_CHAR_TO_ORDER,
+ language_model=GREEK_LANG_MODEL,
+ typical_positive_ratio=0.982851,
+ keep_ascii_letters=False,
+ alphabet="ΆΈΉΊΌΎΏΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΤΥΦΧΨΩάέήίαβγδεζηθικλμνξοπρςστυφχψωόύώ",
+)
diff --git a/third_party/python/pip/pip/_vendor/chardet/langhebrewmodel.py b/third_party/python/pip/pip/_vendor/chardet/langhebrewmodel.py
new file mode 100644
index 0000000000..56d2975877
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/langhebrewmodel.py
@@ -0,0 +1,4380 @@
+from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel
+
+# 3: Positive
+# 2: Likely
+# 1: Unlikely
+# 0: Negative
+
+HEBREW_LANG_MODEL = {
+ 50: { # 'a'
+ 50: 0, # 'a'
+ 60: 1, # 'c'
+ 61: 1, # 'd'
+ 42: 1, # 'e'
+ 53: 1, # 'i'
+ 56: 2, # 'l'
+ 54: 2, # 'n'
+ 49: 0, # 'o'
+ 51: 2, # 'r'
+ 43: 1, # 's'
+ 44: 2, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 1, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 1, # 'ק'
+ 7: 0, # 'ר'
+ 10: 1, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 60: { # 'c'
+ 50: 1, # 'a'
+ 60: 1, # 'c'
+ 61: 0, # 'd'
+ 42: 1, # 'e'
+ 53: 1, # 'i'
+ 56: 1, # 'l'
+ 54: 0, # 'n'
+ 49: 1, # 'o'
+ 51: 1, # 'r'
+ 43: 1, # 's'
+ 44: 2, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 1, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 1, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 61: { # 'd'
+ 50: 1, # 'a'
+ 60: 0, # 'c'
+ 61: 1, # 'd'
+ 42: 1, # 'e'
+ 53: 1, # 'i'
+ 56: 1, # 'l'
+ 54: 1, # 'n'
+ 49: 2, # 'o'
+ 51: 1, # 'r'
+ 43: 1, # 's'
+ 44: 0, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 1, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 1, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 42: { # 'e'
+ 50: 1, # 'a'
+ 60: 1, # 'c'
+ 61: 2, # 'd'
+ 42: 1, # 'e'
+ 53: 1, # 'i'
+ 56: 2, # 'l'
+ 54: 2, # 'n'
+ 49: 1, # 'o'
+ 51: 2, # 'r'
+ 43: 2, # 's'
+ 44: 2, # 't'
+ 63: 1, # 'u'
+ 34: 1, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 1, # '–'
+ 52: 2, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 53: { # 'i'
+ 50: 1, # 'a'
+ 60: 2, # 'c'
+ 61: 1, # 'd'
+ 42: 1, # 'e'
+ 53: 0, # 'i'
+ 56: 1, # 'l'
+ 54: 2, # 'n'
+ 49: 2, # 'o'
+ 51: 1, # 'r'
+ 43: 2, # 's'
+ 44: 2, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 56: { # 'l'
+ 50: 1, # 'a'
+ 60: 1, # 'c'
+ 61: 1, # 'd'
+ 42: 2, # 'e'
+ 53: 2, # 'i'
+ 56: 2, # 'l'
+ 54: 1, # 'n'
+ 49: 1, # 'o'
+ 51: 0, # 'r'
+ 43: 1, # 's'
+ 44: 1, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 54: { # 'n'
+ 50: 1, # 'a'
+ 60: 1, # 'c'
+ 61: 1, # 'd'
+ 42: 1, # 'e'
+ 53: 1, # 'i'
+ 56: 1, # 'l'
+ 54: 1, # 'n'
+ 49: 1, # 'o'
+ 51: 0, # 'r'
+ 43: 1, # 's'
+ 44: 2, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 1, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 2, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 49: { # 'o'
+ 50: 1, # 'a'
+ 60: 1, # 'c'
+ 61: 1, # 'd'
+ 42: 1, # 'e'
+ 53: 1, # 'i'
+ 56: 1, # 'l'
+ 54: 2, # 'n'
+ 49: 1, # 'o'
+ 51: 2, # 'r'
+ 43: 1, # 's'
+ 44: 1, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 51: { # 'r'
+ 50: 2, # 'a'
+ 60: 1, # 'c'
+ 61: 1, # 'd'
+ 42: 2, # 'e'
+ 53: 1, # 'i'
+ 56: 1, # 'l'
+ 54: 1, # 'n'
+ 49: 2, # 'o'
+ 51: 1, # 'r'
+ 43: 1, # 's'
+ 44: 1, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 2, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 43: { # 's'
+ 50: 1, # 'a'
+ 60: 1, # 'c'
+ 61: 0, # 'd'
+ 42: 2, # 'e'
+ 53: 1, # 'i'
+ 56: 1, # 'l'
+ 54: 1, # 'n'
+ 49: 1, # 'o'
+ 51: 1, # 'r'
+ 43: 1, # 's'
+ 44: 2, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 2, # '”'
+ 58: 0, # '†'
+ 40: 2, # '…'
+ },
+ 44: { # 't'
+ 50: 1, # 'a'
+ 60: 1, # 'c'
+ 61: 0, # 'd'
+ 42: 2, # 'e'
+ 53: 2, # 'i'
+ 56: 1, # 'l'
+ 54: 0, # 'n'
+ 49: 1, # 'o'
+ 51: 1, # 'r'
+ 43: 1, # 's'
+ 44: 1, # 't'
+ 63: 1, # 'u'
+ 34: 1, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 2, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 63: { # 'u'
+ 50: 1, # 'a'
+ 60: 1, # 'c'
+ 61: 1, # 'd'
+ 42: 1, # 'e'
+ 53: 1, # 'i'
+ 56: 1, # 'l'
+ 54: 1, # 'n'
+ 49: 0, # 'o'
+ 51: 1, # 'r'
+ 43: 2, # 's'
+ 44: 1, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 34: { # '\xa0'
+ 50: 1, # 'a'
+ 60: 0, # 'c'
+ 61: 1, # 'd'
+ 42: 0, # 'e'
+ 53: 1, # 'i'
+ 56: 0, # 'l'
+ 54: 1, # 'n'
+ 49: 1, # 'o'
+ 51: 0, # 'r'
+ 43: 1, # 's'
+ 44: 1, # 't'
+ 63: 0, # 'u'
+ 34: 2, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 1, # 'ב'
+ 20: 1, # 'ג'
+ 16: 1, # 'ד'
+ 3: 1, # 'ה'
+ 2: 1, # 'ו'
+ 24: 1, # 'ז'
+ 14: 1, # 'ח'
+ 22: 1, # 'ט'
+ 1: 2, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 2, # 'מ'
+ 23: 0, # 'ן'
+ 12: 1, # 'נ'
+ 19: 1, # 'ס'
+ 13: 1, # 'ע'
+ 26: 0, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 1, # 'ק'
+ 7: 1, # 'ר'
+ 10: 1, # 'ש'
+ 5: 1, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 55: { # '´'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 1, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 1, # 'ה'
+ 2: 1, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 2, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 1, # 'ן'
+ 12: 1, # 'נ'
+ 19: 1, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 1, # 'ר'
+ 10: 1, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 48: { # '¼'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 1, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 39: { # '½'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 1, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 57: { # '¾'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 30: { # 'ְ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 2, # 'ב'
+ 20: 2, # 'ג'
+ 16: 2, # 'ד'
+ 3: 2, # 'ה'
+ 2: 2, # 'ו'
+ 24: 2, # 'ז'
+ 14: 2, # 'ח'
+ 22: 2, # 'ט'
+ 1: 2, # 'י'
+ 25: 2, # 'ך'
+ 15: 2, # 'כ'
+ 4: 2, # 'ל'
+ 11: 1, # 'ם'
+ 6: 2, # 'מ'
+ 23: 0, # 'ן'
+ 12: 2, # 'נ'
+ 19: 2, # 'ס'
+ 13: 2, # 'ע'
+ 26: 0, # 'ף'
+ 18: 2, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 2, # 'ק'
+ 7: 2, # 'ר'
+ 10: 2, # 'ש'
+ 5: 2, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 59: { # 'ֱ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 1, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 1, # 'ב'
+ 20: 1, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 1, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 1, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 2, # 'ל'
+ 11: 0, # 'ם'
+ 6: 2, # 'מ'
+ 23: 0, # 'ן'
+ 12: 1, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 1, # 'ר'
+ 10: 1, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 41: { # 'ֲ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 2, # 'ב'
+ 20: 1, # 'ג'
+ 16: 2, # 'ד'
+ 3: 1, # 'ה'
+ 2: 1, # 'ו'
+ 24: 1, # 'ז'
+ 14: 1, # 'ח'
+ 22: 1, # 'ט'
+ 1: 1, # 'י'
+ 25: 1, # 'ך'
+ 15: 1, # 'כ'
+ 4: 2, # 'ל'
+ 11: 0, # 'ם'
+ 6: 2, # 'מ'
+ 23: 0, # 'ן'
+ 12: 2, # 'נ'
+ 19: 1, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 1, # 'ק'
+ 7: 2, # 'ר'
+ 10: 2, # 'ש'
+ 5: 1, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 33: { # 'ִ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 1, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 1, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 1, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 1, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 2, # 'ב'
+ 20: 2, # 'ג'
+ 16: 2, # 'ד'
+ 3: 1, # 'ה'
+ 2: 1, # 'ו'
+ 24: 2, # 'ז'
+ 14: 1, # 'ח'
+ 22: 1, # 'ט'
+ 1: 3, # 'י'
+ 25: 1, # 'ך'
+ 15: 2, # 'כ'
+ 4: 2, # 'ל'
+ 11: 2, # 'ם'
+ 6: 2, # 'מ'
+ 23: 2, # 'ן'
+ 12: 2, # 'נ'
+ 19: 2, # 'ס'
+ 13: 1, # 'ע'
+ 26: 0, # 'ף'
+ 18: 2, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 2, # 'ק'
+ 7: 2, # 'ר'
+ 10: 2, # 'ש'
+ 5: 2, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 37: { # 'ֵ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 1, # 'ַ'
+ 29: 1, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 2, # 'ב'
+ 20: 1, # 'ג'
+ 16: 2, # 'ד'
+ 3: 2, # 'ה'
+ 2: 1, # 'ו'
+ 24: 1, # 'ז'
+ 14: 2, # 'ח'
+ 22: 1, # 'ט'
+ 1: 3, # 'י'
+ 25: 2, # 'ך'
+ 15: 1, # 'כ'
+ 4: 2, # 'ל'
+ 11: 2, # 'ם'
+ 6: 1, # 'מ'
+ 23: 2, # 'ן'
+ 12: 2, # 'נ'
+ 19: 1, # 'ס'
+ 13: 2, # 'ע'
+ 26: 1, # 'ף'
+ 18: 1, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 1, # 'ק'
+ 7: 2, # 'ר'
+ 10: 2, # 'ש'
+ 5: 2, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 36: { # 'ֶ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 1, # 'ַ'
+ 29: 1, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 2, # 'ב'
+ 20: 1, # 'ג'
+ 16: 2, # 'ד'
+ 3: 2, # 'ה'
+ 2: 1, # 'ו'
+ 24: 1, # 'ז'
+ 14: 2, # 'ח'
+ 22: 1, # 'ט'
+ 1: 2, # 'י'
+ 25: 2, # 'ך'
+ 15: 1, # 'כ'
+ 4: 2, # 'ל'
+ 11: 2, # 'ם'
+ 6: 2, # 'מ'
+ 23: 2, # 'ן'
+ 12: 2, # 'נ'
+ 19: 2, # 'ס'
+ 13: 1, # 'ע'
+ 26: 1, # 'ף'
+ 18: 1, # 'פ'
+ 27: 2, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 1, # 'ק'
+ 7: 2, # 'ר'
+ 10: 2, # 'ש'
+ 5: 2, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 31: { # 'ַ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 1, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 2, # 'ב'
+ 20: 2, # 'ג'
+ 16: 2, # 'ד'
+ 3: 2, # 'ה'
+ 2: 1, # 'ו'
+ 24: 2, # 'ז'
+ 14: 2, # 'ח'
+ 22: 2, # 'ט'
+ 1: 3, # 'י'
+ 25: 1, # 'ך'
+ 15: 2, # 'כ'
+ 4: 2, # 'ל'
+ 11: 2, # 'ם'
+ 6: 2, # 'מ'
+ 23: 2, # 'ן'
+ 12: 2, # 'נ'
+ 19: 2, # 'ס'
+ 13: 2, # 'ע'
+ 26: 2, # 'ף'
+ 18: 2, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 2, # 'ק'
+ 7: 2, # 'ר'
+ 10: 2, # 'ש'
+ 5: 2, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 29: { # 'ָ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 1, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 1, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 2, # 'ב'
+ 20: 2, # 'ג'
+ 16: 2, # 'ד'
+ 3: 3, # 'ה'
+ 2: 2, # 'ו'
+ 24: 2, # 'ז'
+ 14: 2, # 'ח'
+ 22: 1, # 'ט'
+ 1: 2, # 'י'
+ 25: 2, # 'ך'
+ 15: 2, # 'כ'
+ 4: 2, # 'ל'
+ 11: 2, # 'ם'
+ 6: 2, # 'מ'
+ 23: 2, # 'ן'
+ 12: 2, # 'נ'
+ 19: 1, # 'ס'
+ 13: 2, # 'ע'
+ 26: 1, # 'ף'
+ 18: 2, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 2, # 'ק'
+ 7: 2, # 'ר'
+ 10: 2, # 'ש'
+ 5: 2, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 35: { # 'ֹ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 2, # 'ב'
+ 20: 1, # 'ג'
+ 16: 2, # 'ד'
+ 3: 2, # 'ה'
+ 2: 1, # 'ו'
+ 24: 1, # 'ז'
+ 14: 1, # 'ח'
+ 22: 1, # 'ט'
+ 1: 1, # 'י'
+ 25: 1, # 'ך'
+ 15: 2, # 'כ'
+ 4: 2, # 'ל'
+ 11: 2, # 'ם'
+ 6: 2, # 'מ'
+ 23: 2, # 'ן'
+ 12: 2, # 'נ'
+ 19: 2, # 'ס'
+ 13: 2, # 'ע'
+ 26: 1, # 'ף'
+ 18: 2, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 2, # 'ק'
+ 7: 2, # 'ר'
+ 10: 2, # 'ש'
+ 5: 2, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 62: { # 'ֻ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 1, # 'ב'
+ 20: 1, # 'ג'
+ 16: 1, # 'ד'
+ 3: 1, # 'ה'
+ 2: 1, # 'ו'
+ 24: 1, # 'ז'
+ 14: 1, # 'ח'
+ 22: 0, # 'ט'
+ 1: 1, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 2, # 'ל'
+ 11: 1, # 'ם'
+ 6: 1, # 'מ'
+ 23: 1, # 'ן'
+ 12: 1, # 'נ'
+ 19: 1, # 'ס'
+ 13: 1, # 'ע'
+ 26: 0, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 1, # 'ק'
+ 7: 1, # 'ר'
+ 10: 1, # 'ש'
+ 5: 1, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 28: { # 'ּ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 3, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 1, # 'ֲ'
+ 33: 3, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 3, # 'ַ'
+ 29: 3, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 2, # 'ׁ'
+ 45: 1, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 2, # 'ב'
+ 20: 1, # 'ג'
+ 16: 2, # 'ד'
+ 3: 1, # 'ה'
+ 2: 2, # 'ו'
+ 24: 1, # 'ז'
+ 14: 1, # 'ח'
+ 22: 1, # 'ט'
+ 1: 2, # 'י'
+ 25: 2, # 'ך'
+ 15: 2, # 'כ'
+ 4: 2, # 'ל'
+ 11: 1, # 'ם'
+ 6: 2, # 'מ'
+ 23: 1, # 'ן'
+ 12: 2, # 'נ'
+ 19: 1, # 'ס'
+ 13: 2, # 'ע'
+ 26: 1, # 'ף'
+ 18: 1, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 1, # 'ק'
+ 7: 2, # 'ר'
+ 10: 2, # 'ש'
+ 5: 2, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 38: { # 'ׁ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 2, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 1, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 1, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 45: { # 'ׂ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 1, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 1, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 0, # 'ב'
+ 20: 1, # 'ג'
+ 16: 0, # 'ד'
+ 3: 1, # 'ה'
+ 2: 2, # 'ו'
+ 24: 0, # 'ז'
+ 14: 1, # 'ח'
+ 22: 0, # 'ט'
+ 1: 1, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 1, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 1, # 'נ'
+ 19: 0, # 'ס'
+ 13: 1, # 'ע'
+ 26: 0, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 1, # 'ר'
+ 10: 0, # 'ש'
+ 5: 1, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 9: { # 'א'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 1, # '´'
+ 48: 1, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 2, # 'ֱ'
+ 41: 2, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 3, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 2, # 'ע'
+ 26: 3, # 'ף'
+ 18: 3, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 8: { # 'ב'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 1, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 3, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 2, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 2, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 3, # 'ע'
+ 26: 1, # 'ף'
+ 18: 3, # 'פ'
+ 27: 2, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 1, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 20: { # 'ג'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 2, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 1, # 'ִ'
+ 37: 1, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 3, # 'ב'
+ 20: 2, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 2, # 'ח'
+ 22: 2, # 'ט'
+ 1: 3, # 'י'
+ 25: 1, # 'ך'
+ 15: 1, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 2, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 2, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 1, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 16: { # 'ד'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 1, # 'ז'
+ 14: 2, # 'ח'
+ 22: 2, # 'ט'
+ 1: 3, # 'י'
+ 25: 2, # 'ך'
+ 15: 2, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 2, # 'ן'
+ 12: 3, # 'נ'
+ 19: 2, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 3, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 3: { # 'ה'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 1, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 0, # '´'
+ 48: 1, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 1, # 'ְ'
+ 59: 1, # 'ֱ'
+ 41: 2, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 3, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 1, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 3, # 'ע'
+ 26: 0, # 'ף'
+ 18: 3, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 1, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 2, # '…'
+ },
+ 2: { # 'ו'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 1, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 1, # '´'
+ 48: 1, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 1, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 3, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 3, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 3, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 3, # 'ע'
+ 26: 3, # 'ף'
+ 18: 3, # 'פ'
+ 27: 3, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 1, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 2, # '…'
+ },
+ 24: { # 'ז'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 1, # 'ֲ'
+ 33: 1, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 2, # 'ב'
+ 20: 2, # 'ג'
+ 16: 2, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 2, # 'ז'
+ 14: 2, # 'ח'
+ 22: 1, # 'ט'
+ 1: 3, # 'י'
+ 25: 1, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 2, # 'ם'
+ 6: 3, # 'מ'
+ 23: 2, # 'ן'
+ 12: 2, # 'נ'
+ 19: 1, # 'ס'
+ 13: 2, # 'ע'
+ 26: 1, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 1, # 'ש'
+ 5: 2, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 14: { # 'ח'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 1, # 'ֱ'
+ 41: 2, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 3, # 'ב'
+ 20: 2, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 2, # 'ח'
+ 22: 2, # 'ט'
+ 1: 3, # 'י'
+ 25: 1, # 'ך'
+ 15: 2, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 2, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 1, # 'ע'
+ 26: 2, # 'ף'
+ 18: 2, # 'פ'
+ 27: 2, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 22: { # 'ט'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 1, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 1, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 1, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 1, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 2, # 'ז'
+ 14: 3, # 'ח'
+ 22: 2, # 'ט'
+ 1: 3, # 'י'
+ 25: 1, # 'ך'
+ 15: 2, # 'כ'
+ 4: 3, # 'ל'
+ 11: 2, # 'ם'
+ 6: 2, # 'מ'
+ 23: 2, # 'ן'
+ 12: 3, # 'נ'
+ 19: 2, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 3, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 2, # 'ק'
+ 7: 3, # 'ר'
+ 10: 2, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 1: { # 'י'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 1, # '´'
+ 48: 1, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 3, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 3, # 'ע'
+ 26: 3, # 'ף'
+ 18: 3, # 'פ'
+ 27: 3, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 1, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 2, # '…'
+ },
+ 25: { # 'ך'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 1, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 1, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 1, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 1, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 15: { # 'כ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 3, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 2, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 3, # 'ח'
+ 22: 2, # 'ט'
+ 1: 3, # 'י'
+ 25: 3, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 2, # 'ע'
+ 26: 3, # 'ף'
+ 18: 3, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 2, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 4: { # 'ל'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 3, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 3, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 2, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 3, # 'פ'
+ 27: 2, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 1, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 11: { # 'ם'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 1, # 'ב'
+ 20: 1, # 'ג'
+ 16: 0, # 'ד'
+ 3: 1, # 'ה'
+ 2: 1, # 'ו'
+ 24: 1, # 'ז'
+ 14: 1, # 'ח'
+ 22: 0, # 'ט'
+ 1: 1, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 1, # 'ל'
+ 11: 1, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 1, # 'נ'
+ 19: 0, # 'ס'
+ 13: 1, # 'ע'
+ 26: 0, # 'ף'
+ 18: 1, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 1, # 'ק'
+ 7: 1, # 'ר'
+ 10: 1, # 'ש'
+ 5: 1, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 2, # '…'
+ },
+ 6: { # 'מ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 2, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 3, # 'ע'
+ 26: 0, # 'ף'
+ 18: 3, # 'פ'
+ 27: 2, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 23: { # 'ן'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 0, # '´'
+ 48: 1, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 1, # 'ב'
+ 20: 1, # 'ג'
+ 16: 1, # 'ד'
+ 3: 1, # 'ה'
+ 2: 1, # 'ו'
+ 24: 0, # 'ז'
+ 14: 1, # 'ח'
+ 22: 1, # 'ט'
+ 1: 1, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 1, # 'ל'
+ 11: 1, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 1, # 'נ'
+ 19: 1, # 'ס'
+ 13: 1, # 'ע'
+ 26: 1, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 1, # 'ק'
+ 7: 1, # 'ר'
+ 10: 1, # 'ש'
+ 5: 1, # 'ת'
+ 32: 1, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 2, # '…'
+ },
+ 12: { # 'נ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 2, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 3, # 'פ'
+ 27: 2, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 19: { # 'ס'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 1, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 1, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 2, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 1, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 2, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 2, # 'ם'
+ 6: 3, # 'מ'
+ 23: 2, # 'ן'
+ 12: 3, # 'נ'
+ 19: 2, # 'ס'
+ 13: 3, # 'ע'
+ 26: 3, # 'ף'
+ 18: 3, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 1, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 13: { # 'ע'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 1, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 1, # 'ְ'
+ 59: 1, # 'ֱ'
+ 41: 2, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 1, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 2, # 'ך'
+ 15: 2, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 2, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 2, # 'ע'
+ 26: 1, # 'ף'
+ 18: 2, # 'פ'
+ 27: 2, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 26: { # 'ף'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 1, # 'ו'
+ 24: 0, # 'ז'
+ 14: 1, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 1, # 'ס'
+ 13: 0, # 'ע'
+ 26: 1, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 1, # 'ק'
+ 7: 1, # 'ר'
+ 10: 1, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 18: { # 'פ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 1, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 1, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 2, # 'ב'
+ 20: 3, # 'ג'
+ 16: 2, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 2, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 2, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 2, # 'ם'
+ 6: 2, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 2, # 'פ'
+ 27: 2, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 27: { # 'ץ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 1, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 1, # 'ר'
+ 10: 0, # 'ש'
+ 5: 1, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 21: { # 'צ'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 2, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 1, # 'ז'
+ 14: 3, # 'ח'
+ 22: 2, # 'ט'
+ 1: 3, # 'י'
+ 25: 1, # 'ך'
+ 15: 1, # 'כ'
+ 4: 3, # 'ל'
+ 11: 2, # 'ם'
+ 6: 3, # 'מ'
+ 23: 2, # 'ן'
+ 12: 3, # 'נ'
+ 19: 1, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 3, # 'פ'
+ 27: 2, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 0, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 17: { # 'ק'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 1, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 2, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 2, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 1, # 'ך'
+ 15: 1, # 'כ'
+ 4: 3, # 'ל'
+ 11: 2, # 'ם'
+ 6: 3, # 'מ'
+ 23: 2, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 3, # 'פ'
+ 27: 2, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 2, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 7: { # 'ר'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 2, # '´'
+ 48: 1, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 1, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 2, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 3, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 3, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 3, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 3, # 'פ'
+ 27: 3, # 'ץ'
+ 21: 3, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 2, # '…'
+ },
+ 10: { # 'ש'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 1, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 1, # 'ִ'
+ 37: 1, # 'ֵ'
+ 36: 1, # 'ֶ'
+ 31: 1, # 'ַ'
+ 29: 1, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 3, # 'ׁ'
+ 45: 2, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 3, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 2, # 'ז'
+ 14: 3, # 'ח'
+ 22: 3, # 'ט'
+ 1: 3, # 'י'
+ 25: 3, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 2, # 'ן'
+ 12: 3, # 'נ'
+ 19: 2, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 3, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 1, # '…'
+ },
+ 5: { # 'ת'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 1, # '\xa0'
+ 55: 0, # '´'
+ 48: 1, # '¼'
+ 39: 1, # '½'
+ 57: 0, # '¾'
+ 30: 2, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 2, # 'ִ'
+ 37: 2, # 'ֵ'
+ 36: 2, # 'ֶ'
+ 31: 2, # 'ַ'
+ 29: 2, # 'ָ'
+ 35: 1, # 'ֹ'
+ 62: 1, # 'ֻ'
+ 28: 2, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 3, # 'א'
+ 8: 3, # 'ב'
+ 20: 3, # 'ג'
+ 16: 2, # 'ד'
+ 3: 3, # 'ה'
+ 2: 3, # 'ו'
+ 24: 2, # 'ז'
+ 14: 3, # 'ח'
+ 22: 2, # 'ט'
+ 1: 3, # 'י'
+ 25: 2, # 'ך'
+ 15: 3, # 'כ'
+ 4: 3, # 'ל'
+ 11: 3, # 'ם'
+ 6: 3, # 'מ'
+ 23: 3, # 'ן'
+ 12: 3, # 'נ'
+ 19: 2, # 'ס'
+ 13: 3, # 'ע'
+ 26: 2, # 'ף'
+ 18: 3, # 'פ'
+ 27: 1, # 'ץ'
+ 21: 2, # 'צ'
+ 17: 3, # 'ק'
+ 7: 3, # 'ר'
+ 10: 3, # 'ש'
+ 5: 3, # 'ת'
+ 32: 1, # '–'
+ 52: 1, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 2, # '…'
+ },
+ 32: { # '–'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 1, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 1, # 'ב'
+ 20: 1, # 'ג'
+ 16: 1, # 'ד'
+ 3: 1, # 'ה'
+ 2: 1, # 'ו'
+ 24: 0, # 'ז'
+ 14: 1, # 'ח'
+ 22: 0, # 'ט'
+ 1: 1, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 1, # 'ס'
+ 13: 1, # 'ע'
+ 26: 0, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 0, # 'ק'
+ 7: 1, # 'ר'
+ 10: 1, # 'ש'
+ 5: 1, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 52: { # '’'
+ 50: 1, # 'a'
+ 60: 0, # 'c'
+ 61: 1, # 'd'
+ 42: 1, # 'e'
+ 53: 1, # 'i'
+ 56: 1, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 1, # 'r'
+ 43: 2, # 's'
+ 44: 2, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 1, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 1, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 47: { # '“'
+ 50: 1, # 'a'
+ 60: 1, # 'c'
+ 61: 1, # 'd'
+ 42: 1, # 'e'
+ 53: 1, # 'i'
+ 56: 1, # 'l'
+ 54: 1, # 'n'
+ 49: 1, # 'o'
+ 51: 1, # 'r'
+ 43: 1, # 's'
+ 44: 1, # 't'
+ 63: 1, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 2, # 'א'
+ 8: 1, # 'ב'
+ 20: 1, # 'ג'
+ 16: 1, # 'ד'
+ 3: 1, # 'ה'
+ 2: 1, # 'ו'
+ 24: 1, # 'ז'
+ 14: 1, # 'ח'
+ 22: 1, # 'ט'
+ 1: 1, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 1, # 'נ'
+ 19: 1, # 'ס'
+ 13: 1, # 'ע'
+ 26: 0, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 1, # 'ק'
+ 7: 1, # 'ר'
+ 10: 1, # 'ש'
+ 5: 1, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 46: { # '”'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 1, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 1, # 'ב'
+ 20: 1, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 1, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 1, # 'צ'
+ 17: 0, # 'ק'
+ 7: 1, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 0, # '†'
+ 40: 0, # '…'
+ },
+ 58: { # '†'
+ 50: 0, # 'a'
+ 60: 0, # 'c'
+ 61: 0, # 'd'
+ 42: 0, # 'e'
+ 53: 0, # 'i'
+ 56: 0, # 'l'
+ 54: 0, # 'n'
+ 49: 0, # 'o'
+ 51: 0, # 'r'
+ 43: 0, # 's'
+ 44: 0, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 0, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 0, # 'ה'
+ 2: 0, # 'ו'
+ 24: 0, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 0, # 'י'
+ 25: 0, # 'ך'
+ 15: 0, # 'כ'
+ 4: 0, # 'ל'
+ 11: 0, # 'ם'
+ 6: 0, # 'מ'
+ 23: 0, # 'ן'
+ 12: 0, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 0, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 0, # 'ר'
+ 10: 0, # 'ש'
+ 5: 0, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 0, # '”'
+ 58: 2, # '†'
+ 40: 0, # '…'
+ },
+ 40: { # '…'
+ 50: 1, # 'a'
+ 60: 1, # 'c'
+ 61: 1, # 'd'
+ 42: 1, # 'e'
+ 53: 1, # 'i'
+ 56: 0, # 'l'
+ 54: 1, # 'n'
+ 49: 0, # 'o'
+ 51: 1, # 'r'
+ 43: 1, # 's'
+ 44: 1, # 't'
+ 63: 0, # 'u'
+ 34: 0, # '\xa0'
+ 55: 0, # '´'
+ 48: 0, # '¼'
+ 39: 0, # '½'
+ 57: 0, # '¾'
+ 30: 0, # 'ְ'
+ 59: 0, # 'ֱ'
+ 41: 0, # 'ֲ'
+ 33: 0, # 'ִ'
+ 37: 0, # 'ֵ'
+ 36: 0, # 'ֶ'
+ 31: 0, # 'ַ'
+ 29: 0, # 'ָ'
+ 35: 0, # 'ֹ'
+ 62: 0, # 'ֻ'
+ 28: 0, # 'ּ'
+ 38: 0, # 'ׁ'
+ 45: 0, # 'ׂ'
+ 9: 1, # 'א'
+ 8: 0, # 'ב'
+ 20: 0, # 'ג'
+ 16: 0, # 'ד'
+ 3: 1, # 'ה'
+ 2: 1, # 'ו'
+ 24: 1, # 'ז'
+ 14: 0, # 'ח'
+ 22: 0, # 'ט'
+ 1: 1, # 'י'
+ 25: 0, # 'ך'
+ 15: 1, # 'כ'
+ 4: 1, # 'ל'
+ 11: 0, # 'ם'
+ 6: 1, # 'מ'
+ 23: 0, # 'ן'
+ 12: 1, # 'נ'
+ 19: 0, # 'ס'
+ 13: 0, # 'ע'
+ 26: 0, # 'ף'
+ 18: 1, # 'פ'
+ 27: 0, # 'ץ'
+ 21: 0, # 'צ'
+ 17: 0, # 'ק'
+ 7: 1, # 'ר'
+ 10: 1, # 'ש'
+ 5: 1, # 'ת'
+ 32: 0, # '–'
+ 52: 0, # '’'
+ 47: 0, # '“'
+ 46: 1, # '”'
+ 58: 0, # '†'
+ 40: 2, # '…'
+ },
+}
+
+# 255: Undefined characters that did not exist in training text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+# 251: Control characters
+
+# Character Mapping Table(s):
+WINDOWS_1255_HEBREW_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 69, # 'A'
+ 66: 91, # 'B'
+ 67: 79, # 'C'
+ 68: 80, # 'D'
+ 69: 92, # 'E'
+ 70: 89, # 'F'
+ 71: 97, # 'G'
+ 72: 90, # 'H'
+ 73: 68, # 'I'
+ 74: 111, # 'J'
+ 75: 112, # 'K'
+ 76: 82, # 'L'
+ 77: 73, # 'M'
+ 78: 95, # 'N'
+ 79: 85, # 'O'
+ 80: 78, # 'P'
+ 81: 121, # 'Q'
+ 82: 86, # 'R'
+ 83: 71, # 'S'
+ 84: 67, # 'T'
+ 85: 102, # 'U'
+ 86: 107, # 'V'
+ 87: 84, # 'W'
+ 88: 114, # 'X'
+ 89: 103, # 'Y'
+ 90: 115, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 50, # 'a'
+ 98: 74, # 'b'
+ 99: 60, # 'c'
+ 100: 61, # 'd'
+ 101: 42, # 'e'
+ 102: 76, # 'f'
+ 103: 70, # 'g'
+ 104: 64, # 'h'
+ 105: 53, # 'i'
+ 106: 105, # 'j'
+ 107: 93, # 'k'
+ 108: 56, # 'l'
+ 109: 65, # 'm'
+ 110: 54, # 'n'
+ 111: 49, # 'o'
+ 112: 66, # 'p'
+ 113: 110, # 'q'
+ 114: 51, # 'r'
+ 115: 43, # 's'
+ 116: 44, # 't'
+ 117: 63, # 'u'
+ 118: 81, # 'v'
+ 119: 77, # 'w'
+ 120: 98, # 'x'
+ 121: 75, # 'y'
+ 122: 108, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 124, # '€'
+ 129: 202, # None
+ 130: 203, # '‚'
+ 131: 204, # 'ƒ'
+ 132: 205, # '„'
+ 133: 40, # '…'
+ 134: 58, # '†'
+ 135: 206, # '‡'
+ 136: 207, # 'ˆ'
+ 137: 208, # '‰'
+ 138: 209, # None
+ 139: 210, # '‹'
+ 140: 211, # None
+ 141: 212, # None
+ 142: 213, # None
+ 143: 214, # None
+ 144: 215, # None
+ 145: 83, # '‘'
+ 146: 52, # '’'
+ 147: 47, # '“'
+ 148: 46, # '”'
+ 149: 72, # '•'
+ 150: 32, # '–'
+ 151: 94, # '—'
+ 152: 216, # '˜'
+ 153: 113, # '™'
+ 154: 217, # None
+ 155: 109, # '›'
+ 156: 218, # None
+ 157: 219, # None
+ 158: 220, # None
+ 159: 221, # None
+ 160: 34, # '\xa0'
+ 161: 116, # '¡'
+ 162: 222, # '¢'
+ 163: 118, # '£'
+ 164: 100, # '₪'
+ 165: 223, # '¥'
+ 166: 224, # '¦'
+ 167: 117, # '§'
+ 168: 119, # '¨'
+ 169: 104, # '©'
+ 170: 125, # '×'
+ 171: 225, # '«'
+ 172: 226, # '¬'
+ 173: 87, # '\xad'
+ 174: 99, # '®'
+ 175: 227, # '¯'
+ 176: 106, # '°'
+ 177: 122, # '±'
+ 178: 123, # '²'
+ 179: 228, # '³'
+ 180: 55, # '´'
+ 181: 229, # 'µ'
+ 182: 230, # '¶'
+ 183: 101, # '·'
+ 184: 231, # '¸'
+ 185: 232, # '¹'
+ 186: 120, # '÷'
+ 187: 233, # '»'
+ 188: 48, # '¼'
+ 189: 39, # '½'
+ 190: 57, # '¾'
+ 191: 234, # '¿'
+ 192: 30, # 'ְ'
+ 193: 59, # 'ֱ'
+ 194: 41, # 'ֲ'
+ 195: 88, # 'ֳ'
+ 196: 33, # 'ִ'
+ 197: 37, # 'ֵ'
+ 198: 36, # 'ֶ'
+ 199: 31, # 'ַ'
+ 200: 29, # 'ָ'
+ 201: 35, # 'ֹ'
+ 202: 235, # None
+ 203: 62, # 'ֻ'
+ 204: 28, # 'ּ'
+ 205: 236, # 'ֽ'
+ 206: 126, # '־'
+ 207: 237, # 'ֿ'
+ 208: 238, # '׀'
+ 209: 38, # 'ׁ'
+ 210: 45, # 'ׂ'
+ 211: 239, # '׃'
+ 212: 240, # 'װ'
+ 213: 241, # 'ױ'
+ 214: 242, # 'ײ'
+ 215: 243, # '׳'
+ 216: 127, # '״'
+ 217: 244, # None
+ 218: 245, # None
+ 219: 246, # None
+ 220: 247, # None
+ 221: 248, # None
+ 222: 249, # None
+ 223: 250, # None
+ 224: 9, # 'א'
+ 225: 8, # 'ב'
+ 226: 20, # 'ג'
+ 227: 16, # 'ד'
+ 228: 3, # 'ה'
+ 229: 2, # 'ו'
+ 230: 24, # 'ז'
+ 231: 14, # 'ח'
+ 232: 22, # 'ט'
+ 233: 1, # 'י'
+ 234: 25, # 'ך'
+ 235: 15, # 'כ'
+ 236: 4, # 'ל'
+ 237: 11, # 'ם'
+ 238: 6, # 'מ'
+ 239: 23, # 'ן'
+ 240: 12, # 'נ'
+ 241: 19, # 'ס'
+ 242: 13, # 'ע'
+ 243: 26, # 'ף'
+ 244: 18, # 'פ'
+ 245: 27, # 'ץ'
+ 246: 21, # 'צ'
+ 247: 17, # 'ק'
+ 248: 7, # 'ר'
+ 249: 10, # 'ש'
+ 250: 5, # 'ת'
+ 251: 251, # None
+ 252: 252, # None
+ 253: 128, # '\u200e'
+ 254: 96, # '\u200f'
+ 255: 253, # None
+}
+
+WINDOWS_1255_HEBREW_MODEL = SingleByteCharSetModel(
+ charset_name="windows-1255",
+ language="Hebrew",
+ char_to_order_map=WINDOWS_1255_HEBREW_CHAR_TO_ORDER,
+ language_model=HEBREW_LANG_MODEL,
+ typical_positive_ratio=0.984004,
+ keep_ascii_letters=False,
+ alphabet="אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ",
+)
diff --git a/third_party/python/pip/pip/_vendor/chardet/langhungarianmodel.py b/third_party/python/pip/pip/_vendor/chardet/langhungarianmodel.py
new file mode 100644
index 0000000000..09a0d326b9
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/langhungarianmodel.py
@@ -0,0 +1,4649 @@
+from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel
+
+# 3: Positive
+# 2: Likely
+# 1: Unlikely
+# 0: Negative
+
+HUNGARIAN_LANG_MODEL = {
+ 28: { # 'A'
+ 28: 0, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 2, # 'D'
+ 32: 1, # 'E'
+ 50: 1, # 'F'
+ 49: 2, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 2, # 'K'
+ 41: 2, # 'L'
+ 34: 1, # 'M'
+ 35: 2, # 'N'
+ 47: 1, # 'O'
+ 46: 2, # 'P'
+ 43: 2, # 'R'
+ 33: 2, # 'S'
+ 37: 2, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 2, # 'Z'
+ 2: 0, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 2, # 'd'
+ 1: 1, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 1, # 'h'
+ 9: 1, # 'i'
+ 22: 1, # 'j'
+ 7: 2, # 'k'
+ 6: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 2, # 'n'
+ 8: 0, # 'o'
+ 23: 2, # 'p'
+ 10: 2, # 'r'
+ 5: 1, # 's'
+ 3: 1, # 't'
+ 21: 1, # 'u'
+ 19: 1, # 'v'
+ 62: 1, # 'x'
+ 16: 0, # 'y'
+ 11: 3, # 'z'
+ 51: 1, # 'Á'
+ 44: 0, # 'É'
+ 61: 1, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 40: { # 'B'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 0, # 'M'
+ 35: 1, # 'N'
+ 47: 2, # 'O'
+ 46: 0, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 3, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 2, # 'i'
+ 22: 1, # 'j'
+ 7: 0, # 'k'
+ 6: 1, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 2, # 'o'
+ 23: 1, # 'p'
+ 10: 2, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 3, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 0, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 2, # 'á'
+ 15: 2, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 54: { # 'C'
+ 28: 1, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 1, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 0, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 2, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 0, # 'V'
+ 55: 1, # 'Y'
+ 52: 1, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 1, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 1, # 'h'
+ 9: 1, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 1, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 2, # 'o'
+ 23: 0, # 'p'
+ 10: 1, # 'r'
+ 5: 3, # 's'
+ 3: 0, # 't'
+ 21: 1, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 1, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 1, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 45: { # 'D'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 0, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 0, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 2, # 'O'
+ 46: 0, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 1, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 3, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 1, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 1, # 'o'
+ 23: 0, # 'p'
+ 10: 2, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 2, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 1, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 1, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 1, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 32: { # 'E'
+ 28: 1, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 1, # 'E'
+ 50: 1, # 'F'
+ 49: 2, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 2, # 'K'
+ 41: 2, # 'L'
+ 34: 2, # 'M'
+ 35: 2, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 2, # 'R'
+ 33: 2, # 'S'
+ 37: 2, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 1, # 'Z'
+ 2: 1, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 2, # 'd'
+ 1: 1, # 'e'
+ 27: 1, # 'f'
+ 12: 3, # 'g'
+ 20: 1, # 'h'
+ 9: 1, # 'i'
+ 22: 1, # 'j'
+ 7: 1, # 'k'
+ 6: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 2, # 'n'
+ 8: 0, # 'o'
+ 23: 1, # 'p'
+ 10: 2, # 'r'
+ 5: 2, # 's'
+ 3: 1, # 't'
+ 21: 2, # 'u'
+ 19: 1, # 'v'
+ 62: 1, # 'x'
+ 16: 0, # 'y'
+ 11: 3, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 0, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 1, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 50: { # 'F'
+ 28: 1, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 1, # 'E'
+ 50: 1, # 'F'
+ 49: 0, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 1, # 'O'
+ 46: 0, # 'P'
+ 43: 1, # 'R'
+ 33: 0, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 0, # 'V'
+ 55: 1, # 'Y'
+ 52: 0, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 2, # 'e'
+ 27: 1, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 2, # 'i'
+ 22: 1, # 'j'
+ 7: 0, # 'k'
+ 6: 1, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 2, # 'o'
+ 23: 0, # 'p'
+ 10: 2, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 1, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 0, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 0, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 1, # 'á'
+ 15: 1, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 2, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 49: { # 'G'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 2, # 'Y'
+ 52: 1, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 2, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 1, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 1, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 2, # 'o'
+ 23: 0, # 'p'
+ 10: 2, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 1, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 2, # 'y'
+ 11: 0, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 1, # 'á'
+ 15: 1, # 'é'
+ 30: 0, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 1, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 38: { # 'H'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 0, # 'D'
+ 32: 1, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 1, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 1, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 1, # 'O'
+ 46: 0, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 0, # 'V'
+ 55: 1, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 2, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 2, # 'i'
+ 22: 1, # 'j'
+ 7: 0, # 'k'
+ 6: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 0, # 'n'
+ 8: 3, # 'o'
+ 23: 0, # 'p'
+ 10: 1, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 2, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 0, # 'z'
+ 51: 2, # 'Á'
+ 44: 2, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 2, # 'á'
+ 15: 1, # 'é'
+ 30: 2, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 39: { # 'I'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 1, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 2, # 'K'
+ 41: 2, # 'L'
+ 34: 1, # 'M'
+ 35: 2, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 2, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 2, # 'Z'
+ 2: 0, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 2, # 'd'
+ 1: 0, # 'e'
+ 27: 1, # 'f'
+ 12: 2, # 'g'
+ 20: 1, # 'h'
+ 9: 0, # 'i'
+ 22: 1, # 'j'
+ 7: 1, # 'k'
+ 6: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 1, # 'n'
+ 8: 0, # 'o'
+ 23: 1, # 'p'
+ 10: 2, # 'r'
+ 5: 2, # 's'
+ 3: 2, # 't'
+ 21: 0, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 1, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 0, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 53: { # 'J'
+ 28: 2, # 'A'
+ 40: 0, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 1, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 1, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 2, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 1, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 1, # 'o'
+ 23: 0, # 'p'
+ 10: 0, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 2, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 0, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 0, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 2, # 'á'
+ 15: 1, # 'é'
+ 30: 0, # 'í'
+ 25: 2, # 'ó'
+ 24: 2, # 'ö'
+ 31: 1, # 'ú'
+ 29: 0, # 'ü'
+ 42: 1, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 36: { # 'K'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 0, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 2, # 'O'
+ 46: 0, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 0, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 2, # 'e'
+ 27: 1, # 'f'
+ 12: 0, # 'g'
+ 20: 1, # 'h'
+ 9: 3, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 8: 2, # 'o'
+ 23: 0, # 'p'
+ 10: 2, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 1, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 0, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 2, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 2, # 'á'
+ 15: 2, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 2, # 'ö'
+ 31: 1, # 'ú'
+ 29: 2, # 'ü'
+ 42: 1, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 41: { # 'L'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 2, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 2, # 'O'
+ 46: 0, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 2, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 1, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 3, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 2, # 'i'
+ 22: 1, # 'j'
+ 7: 0, # 'k'
+ 6: 1, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 2, # 'o'
+ 23: 0, # 'p'
+ 10: 0, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 2, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 0, # 'z'
+ 51: 2, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 2, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 0, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 34: { # 'M'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 0, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 1, # 'Z'
+ 2: 3, # 'a'
+ 18: 0, # 'b'
+ 26: 1, # 'c'
+ 17: 0, # 'd'
+ 1: 3, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 3, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 0, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 8: 3, # 'o'
+ 23: 0, # 'p'
+ 10: 1, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 2, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 0, # 'z'
+ 51: 2, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 2, # 'á'
+ 15: 2, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 35: { # 'N'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 2, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 2, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 2, # 'Y'
+ 52: 1, # 'Z'
+ 2: 3, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 3, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 2, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 1, # 'n'
+ 8: 2, # 'o'
+ 23: 0, # 'p'
+ 10: 0, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 1, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 2, # 'y'
+ 11: 0, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 1, # 'á'
+ 15: 2, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 1, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 47: { # 'O'
+ 28: 1, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 1, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 2, # 'K'
+ 41: 2, # 'L'
+ 34: 2, # 'M'
+ 35: 2, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 2, # 'R'
+ 33: 2, # 'S'
+ 37: 2, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 1, # 'Z'
+ 2: 0, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 1, # 'd'
+ 1: 1, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 1, # 'h'
+ 9: 1, # 'i'
+ 22: 1, # 'j'
+ 7: 2, # 'k'
+ 6: 2, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 8: 1, # 'o'
+ 23: 1, # 'p'
+ 10: 2, # 'r'
+ 5: 1, # 's'
+ 3: 2, # 't'
+ 21: 1, # 'u'
+ 19: 0, # 'v'
+ 62: 1, # 'x'
+ 16: 0, # 'y'
+ 11: 1, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 0, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 46: { # 'P'
+ 28: 1, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 1, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 0, # 'M'
+ 35: 1, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 2, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 1, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 2, # 'e'
+ 27: 1, # 'f'
+ 12: 0, # 'g'
+ 20: 1, # 'h'
+ 9: 2, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 1, # 'l'
+ 13: 0, # 'm'
+ 4: 1, # 'n'
+ 8: 2, # 'o'
+ 23: 0, # 'p'
+ 10: 2, # 'r'
+ 5: 1, # 's'
+ 3: 0, # 't'
+ 21: 1, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 0, # 'z'
+ 51: 2, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 3, # 'á'
+ 15: 2, # 'é'
+ 30: 0, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 0, # 'ú'
+ 29: 1, # 'ü'
+ 42: 1, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 43: { # 'R'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 2, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 2, # 'S'
+ 37: 2, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 1, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 2, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 1, # 'h'
+ 9: 2, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 2, # 'o'
+ 23: 0, # 'p'
+ 10: 0, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 1, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 0, # 'z'
+ 51: 2, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 2, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 2, # 'á'
+ 15: 2, # 'é'
+ 30: 1, # 'í'
+ 25: 2, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 33: { # 'S'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 2, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 2, # 'S'
+ 37: 2, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 3, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 1, # 'c'
+ 17: 0, # 'd'
+ 1: 2, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 1, # 'h'
+ 9: 2, # 'i'
+ 22: 0, # 'j'
+ 7: 1, # 'k'
+ 6: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 0, # 'n'
+ 8: 2, # 'o'
+ 23: 1, # 'p'
+ 10: 0, # 'r'
+ 5: 0, # 's'
+ 3: 1, # 't'
+ 21: 1, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 3, # 'z'
+ 51: 2, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 2, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 37: { # 'T'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 2, # 'O'
+ 46: 1, # 'P'
+ 43: 2, # 'R'
+ 33: 1, # 'S'
+ 37: 2, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 1, # 'Z'
+ 2: 2, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 2, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 1, # 'h'
+ 9: 2, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 2, # 'o'
+ 23: 0, # 'p'
+ 10: 1, # 'r'
+ 5: 1, # 's'
+ 3: 0, # 't'
+ 21: 2, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 1, # 'z'
+ 51: 2, # 'Á'
+ 44: 2, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 2, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 2, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 57: { # 'U'
+ 28: 1, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 1, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 2, # 'S'
+ 37: 1, # 'T'
+ 57: 0, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 1, # 'Z'
+ 2: 0, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 1, # 'd'
+ 1: 1, # 'e'
+ 27: 0, # 'f'
+ 12: 2, # 'g'
+ 20: 0, # 'h'
+ 9: 0, # 'i'
+ 22: 1, # 'j'
+ 7: 1, # 'k'
+ 6: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 8: 0, # 'o'
+ 23: 1, # 'p'
+ 10: 1, # 'r'
+ 5: 1, # 's'
+ 3: 1, # 't'
+ 21: 0, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 1, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 1, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 48: { # 'V'
+ 28: 2, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 0, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 2, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 2, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 1, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 2, # 'o'
+ 23: 0, # 'p'
+ 10: 0, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 1, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 0, # 'z'
+ 51: 2, # 'Á'
+ 44: 2, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 2, # 'á'
+ 15: 2, # 'é'
+ 30: 1, # 'í'
+ 25: 0, # 'ó'
+ 24: 1, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 55: { # 'Y'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 1, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 2, # 'Z'
+ 2: 1, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 1, # 'd'
+ 1: 1, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 0, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 8: 1, # 'o'
+ 23: 1, # 'p'
+ 10: 0, # 'r'
+ 5: 0, # 's'
+ 3: 0, # 't'
+ 21: 0, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 0, # 'z'
+ 51: 1, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 52: { # 'Z'
+ 28: 2, # 'A'
+ 40: 1, # 'B'
+ 54: 0, # 'C'
+ 45: 1, # 'D'
+ 32: 2, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 2, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 2, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 2, # 'S'
+ 37: 1, # 'T'
+ 57: 1, # 'U'
+ 48: 1, # 'V'
+ 55: 1, # 'Y'
+ 52: 1, # 'Z'
+ 2: 1, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 1, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 1, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 1, # 'n'
+ 8: 1, # 'o'
+ 23: 0, # 'p'
+ 10: 1, # 'r'
+ 5: 2, # 's'
+ 3: 0, # 't'
+ 21: 1, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 0, # 'z'
+ 51: 2, # 'Á'
+ 44: 1, # 'É'
+ 61: 1, # 'Í'
+ 58: 1, # 'Ó'
+ 59: 1, # 'Ö'
+ 60: 1, # 'Ú'
+ 63: 1, # 'Ü'
+ 14: 1, # 'á'
+ 15: 1, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 2: { # 'a'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 1, # 'a'
+ 18: 3, # 'b'
+ 26: 3, # 'c'
+ 17: 3, # 'd'
+ 1: 2, # 'e'
+ 27: 2, # 'f'
+ 12: 3, # 'g'
+ 20: 3, # 'h'
+ 9: 3, # 'i'
+ 22: 3, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 2, # 'o'
+ 23: 3, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 3, # 'v'
+ 62: 1, # 'x'
+ 16: 2, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 1, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 18: { # 'b'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 3, # 'b'
+ 26: 1, # 'c'
+ 17: 1, # 'd'
+ 1: 3, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 1, # 'h'
+ 9: 3, # 'i'
+ 22: 2, # 'j'
+ 7: 2, # 'k'
+ 6: 2, # 'l'
+ 13: 1, # 'm'
+ 4: 2, # 'n'
+ 8: 3, # 'o'
+ 23: 1, # 'p'
+ 10: 3, # 'r'
+ 5: 2, # 's'
+ 3: 1, # 't'
+ 21: 3, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 1, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 2, # 'í'
+ 25: 3, # 'ó'
+ 24: 2, # 'ö'
+ 31: 2, # 'ú'
+ 29: 2, # 'ü'
+ 42: 2, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 26: { # 'c'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 1, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 1, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 2, # 'a'
+ 18: 1, # 'b'
+ 26: 2, # 'c'
+ 17: 1, # 'd'
+ 1: 3, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 3, # 'h'
+ 9: 3, # 'i'
+ 22: 1, # 'j'
+ 7: 2, # 'k'
+ 6: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 8: 3, # 'o'
+ 23: 1, # 'p'
+ 10: 2, # 'r'
+ 5: 3, # 's'
+ 3: 2, # 't'
+ 21: 2, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 2, # 'á'
+ 15: 2, # 'é'
+ 30: 2, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 17: { # 'd'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 2, # 'b'
+ 26: 1, # 'c'
+ 17: 2, # 'd'
+ 1: 3, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 2, # 'h'
+ 9: 3, # 'i'
+ 22: 3, # 'j'
+ 7: 2, # 'k'
+ 6: 1, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 8: 3, # 'o'
+ 23: 1, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 3, # 'v'
+ 62: 0, # 'x'
+ 16: 2, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 3, # 'í'
+ 25: 3, # 'ó'
+ 24: 3, # 'ö'
+ 31: 2, # 'ú'
+ 29: 2, # 'ü'
+ 42: 2, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 1: { # 'e'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 2, # 'a'
+ 18: 3, # 'b'
+ 26: 3, # 'c'
+ 17: 3, # 'd'
+ 1: 2, # 'e'
+ 27: 3, # 'f'
+ 12: 3, # 'g'
+ 20: 3, # 'h'
+ 9: 3, # 'i'
+ 22: 3, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 2, # 'o'
+ 23: 3, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 2, # 'u'
+ 19: 3, # 'v'
+ 62: 2, # 'x'
+ 16: 2, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 27: { # 'f'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 1, # 'd'
+ 1: 3, # 'e'
+ 27: 2, # 'f'
+ 12: 1, # 'g'
+ 20: 1, # 'h'
+ 9: 3, # 'i'
+ 22: 2, # 'j'
+ 7: 1, # 'k'
+ 6: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 8: 3, # 'o'
+ 23: 0, # 'p'
+ 10: 3, # 'r'
+ 5: 1, # 's'
+ 3: 1, # 't'
+ 21: 2, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 0, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 3, # 'ö'
+ 31: 1, # 'ú'
+ 29: 2, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 12: { # 'g'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 3, # 'b'
+ 26: 2, # 'c'
+ 17: 2, # 'd'
+ 1: 3, # 'e'
+ 27: 2, # 'f'
+ 12: 3, # 'g'
+ 20: 3, # 'h'
+ 9: 3, # 'i'
+ 22: 3, # 'j'
+ 7: 2, # 'k'
+ 6: 3, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 8: 3, # 'o'
+ 23: 1, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 3, # 'v'
+ 62: 0, # 'x'
+ 16: 3, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 2, # 'í'
+ 25: 3, # 'ó'
+ 24: 2, # 'ö'
+ 31: 2, # 'ú'
+ 29: 2, # 'ü'
+ 42: 2, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 20: { # 'h'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 0, # 'd'
+ 1: 3, # 'e'
+ 27: 0, # 'f'
+ 12: 1, # 'g'
+ 20: 2, # 'h'
+ 9: 3, # 'i'
+ 22: 1, # 'j'
+ 7: 1, # 'k'
+ 6: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 8: 3, # 'o'
+ 23: 0, # 'p'
+ 10: 1, # 'r'
+ 5: 2, # 's'
+ 3: 1, # 't'
+ 21: 3, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 2, # 'y'
+ 11: 0, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 3, # 'í'
+ 25: 2, # 'ó'
+ 24: 2, # 'ö'
+ 31: 2, # 'ú'
+ 29: 1, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 9: { # 'i'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 3, # 'b'
+ 26: 3, # 'c'
+ 17: 3, # 'd'
+ 1: 3, # 'e'
+ 27: 3, # 'f'
+ 12: 3, # 'g'
+ 20: 3, # 'h'
+ 9: 2, # 'i'
+ 22: 2, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 2, # 'o'
+ 23: 2, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 3, # 'v'
+ 62: 1, # 'x'
+ 16: 1, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 2, # 'é'
+ 30: 1, # 'í'
+ 25: 3, # 'ó'
+ 24: 1, # 'ö'
+ 31: 2, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 22: { # 'j'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 2, # 'b'
+ 26: 1, # 'c'
+ 17: 3, # 'd'
+ 1: 3, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 2, # 'h'
+ 9: 1, # 'i'
+ 22: 2, # 'j'
+ 7: 2, # 'k'
+ 6: 2, # 'l'
+ 13: 1, # 'm'
+ 4: 2, # 'n'
+ 8: 3, # 'o'
+ 23: 1, # 'p'
+ 10: 2, # 'r'
+ 5: 2, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 1, # 'í'
+ 25: 3, # 'ó'
+ 24: 3, # 'ö'
+ 31: 3, # 'ú'
+ 29: 2, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 7: { # 'k'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 3, # 'b'
+ 26: 2, # 'c'
+ 17: 1, # 'd'
+ 1: 3, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 2, # 'h'
+ 9: 3, # 'i'
+ 22: 2, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 1, # 'm'
+ 4: 3, # 'n'
+ 8: 3, # 'o'
+ 23: 1, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 2, # 'v'
+ 62: 0, # 'x'
+ 16: 2, # 'y'
+ 11: 1, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 3, # 'í'
+ 25: 2, # 'ó'
+ 24: 3, # 'ö'
+ 31: 1, # 'ú'
+ 29: 3, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 6: { # 'l'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 1, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 1, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 2, # 'b'
+ 26: 3, # 'c'
+ 17: 3, # 'd'
+ 1: 3, # 'e'
+ 27: 3, # 'f'
+ 12: 3, # 'g'
+ 20: 3, # 'h'
+ 9: 3, # 'i'
+ 22: 3, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 3, # 'o'
+ 23: 2, # 'p'
+ 10: 2, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 3, # 'v'
+ 62: 0, # 'x'
+ 16: 3, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 3, # 'í'
+ 25: 3, # 'ó'
+ 24: 3, # 'ö'
+ 31: 2, # 'ú'
+ 29: 2, # 'ü'
+ 42: 3, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 13: { # 'm'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 3, # 'b'
+ 26: 2, # 'c'
+ 17: 1, # 'd'
+ 1: 3, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 2, # 'h'
+ 9: 3, # 'i'
+ 22: 2, # 'j'
+ 7: 1, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 2, # 'n'
+ 8: 3, # 'o'
+ 23: 3, # 'p'
+ 10: 2, # 'r'
+ 5: 2, # 's'
+ 3: 2, # 't'
+ 21: 3, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 2, # 'í'
+ 25: 2, # 'ó'
+ 24: 2, # 'ö'
+ 31: 2, # 'ú'
+ 29: 2, # 'ü'
+ 42: 1, # 'ő'
+ 56: 2, # 'ű'
+ },
+ 4: { # 'n'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 3, # 'b'
+ 26: 3, # 'c'
+ 17: 3, # 'd'
+ 1: 3, # 'e'
+ 27: 2, # 'f'
+ 12: 3, # 'g'
+ 20: 3, # 'h'
+ 9: 3, # 'i'
+ 22: 2, # 'j'
+ 7: 3, # 'k'
+ 6: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 8: 3, # 'o'
+ 23: 2, # 'p'
+ 10: 2, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 2, # 'v'
+ 62: 1, # 'x'
+ 16: 3, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 2, # 'í'
+ 25: 2, # 'ó'
+ 24: 3, # 'ö'
+ 31: 2, # 'ú'
+ 29: 3, # 'ü'
+ 42: 2, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 8: { # 'o'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 1, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 2, # 'a'
+ 18: 3, # 'b'
+ 26: 3, # 'c'
+ 17: 3, # 'd'
+ 1: 2, # 'e'
+ 27: 2, # 'f'
+ 12: 3, # 'g'
+ 20: 3, # 'h'
+ 9: 2, # 'i'
+ 22: 2, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 1, # 'o'
+ 23: 3, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 2, # 'u'
+ 19: 3, # 'v'
+ 62: 1, # 'x'
+ 16: 1, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 1, # 'á'
+ 15: 2, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 23: { # 'p'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 1, # 'b'
+ 26: 2, # 'c'
+ 17: 1, # 'd'
+ 1: 3, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 2, # 'h'
+ 9: 3, # 'i'
+ 22: 2, # 'j'
+ 7: 2, # 'k'
+ 6: 3, # 'l'
+ 13: 1, # 'm'
+ 4: 2, # 'n'
+ 8: 3, # 'o'
+ 23: 3, # 'p'
+ 10: 3, # 'r'
+ 5: 2, # 's'
+ 3: 2, # 't'
+ 21: 3, # 'u'
+ 19: 2, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 2, # 'í'
+ 25: 2, # 'ó'
+ 24: 2, # 'ö'
+ 31: 1, # 'ú'
+ 29: 2, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 10: { # 'r'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 3, # 'b'
+ 26: 3, # 'c'
+ 17: 3, # 'd'
+ 1: 3, # 'e'
+ 27: 2, # 'f'
+ 12: 3, # 'g'
+ 20: 2, # 'h'
+ 9: 3, # 'i'
+ 22: 3, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 3, # 'o'
+ 23: 2, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 3, # 'v'
+ 62: 1, # 'x'
+ 16: 2, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 2, # 'í'
+ 25: 3, # 'ó'
+ 24: 3, # 'ö'
+ 31: 3, # 'ú'
+ 29: 3, # 'ü'
+ 42: 2, # 'ő'
+ 56: 2, # 'ű'
+ },
+ 5: { # 's'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 3, # 'b'
+ 26: 2, # 'c'
+ 17: 2, # 'd'
+ 1: 3, # 'e'
+ 27: 2, # 'f'
+ 12: 2, # 'g'
+ 20: 2, # 'h'
+ 9: 3, # 'i'
+ 22: 1, # 'j'
+ 7: 3, # 'k'
+ 6: 2, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 3, # 'o'
+ 23: 2, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 2, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 3, # 'í'
+ 25: 3, # 'ó'
+ 24: 3, # 'ö'
+ 31: 3, # 'ú'
+ 29: 3, # 'ü'
+ 42: 2, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 3: { # 't'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 3, # 'b'
+ 26: 2, # 'c'
+ 17: 1, # 'd'
+ 1: 3, # 'e'
+ 27: 2, # 'f'
+ 12: 1, # 'g'
+ 20: 3, # 'h'
+ 9: 3, # 'i'
+ 22: 3, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 8: 3, # 'o'
+ 23: 1, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 3, # 'v'
+ 62: 0, # 'x'
+ 16: 3, # 'y'
+ 11: 1, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 2, # 'í'
+ 25: 3, # 'ó'
+ 24: 3, # 'ö'
+ 31: 3, # 'ú'
+ 29: 3, # 'ü'
+ 42: 3, # 'ő'
+ 56: 2, # 'ű'
+ },
+ 21: { # 'u'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 1, # 'a'
+ 18: 2, # 'b'
+ 26: 2, # 'c'
+ 17: 3, # 'd'
+ 1: 2, # 'e'
+ 27: 1, # 'f'
+ 12: 3, # 'g'
+ 20: 2, # 'h'
+ 9: 2, # 'i'
+ 22: 2, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 1, # 'o'
+ 23: 2, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 1, # 'u'
+ 19: 3, # 'v'
+ 62: 1, # 'x'
+ 16: 1, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 2, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 0, # 'ö'
+ 31: 1, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 19: { # 'v'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 2, # 'b'
+ 26: 1, # 'c'
+ 17: 1, # 'd'
+ 1: 3, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 1, # 'h'
+ 9: 3, # 'i'
+ 22: 1, # 'j'
+ 7: 1, # 'k'
+ 6: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 8: 3, # 'o'
+ 23: 1, # 'p'
+ 10: 1, # 'r'
+ 5: 2, # 's'
+ 3: 2, # 't'
+ 21: 2, # 'u'
+ 19: 2, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 1, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 2, # 'í'
+ 25: 2, # 'ó'
+ 24: 2, # 'ö'
+ 31: 1, # 'ú'
+ 29: 2, # 'ü'
+ 42: 1, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 62: { # 'x'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 1, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 0, # 'd'
+ 1: 1, # 'e'
+ 27: 1, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 1, # 'i'
+ 22: 0, # 'j'
+ 7: 1, # 'k'
+ 6: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 8: 1, # 'o'
+ 23: 1, # 'p'
+ 10: 1, # 'r'
+ 5: 1, # 's'
+ 3: 1, # 't'
+ 21: 1, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 0, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 1, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 1, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 16: { # 'y'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 2, # 'b'
+ 26: 1, # 'c'
+ 17: 1, # 'd'
+ 1: 3, # 'e'
+ 27: 2, # 'f'
+ 12: 2, # 'g'
+ 20: 2, # 'h'
+ 9: 3, # 'i'
+ 22: 2, # 'j'
+ 7: 2, # 'k'
+ 6: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 8: 3, # 'o'
+ 23: 2, # 'p'
+ 10: 2, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 3, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 2, # 'í'
+ 25: 2, # 'ó'
+ 24: 3, # 'ö'
+ 31: 2, # 'ú'
+ 29: 2, # 'ü'
+ 42: 1, # 'ő'
+ 56: 2, # 'ű'
+ },
+ 11: { # 'z'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 3, # 'a'
+ 18: 2, # 'b'
+ 26: 1, # 'c'
+ 17: 3, # 'd'
+ 1: 3, # 'e'
+ 27: 1, # 'f'
+ 12: 2, # 'g'
+ 20: 2, # 'h'
+ 9: 3, # 'i'
+ 22: 1, # 'j'
+ 7: 3, # 'k'
+ 6: 2, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 3, # 'o'
+ 23: 1, # 'p'
+ 10: 2, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 3, # 'u'
+ 19: 2, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 3, # 'á'
+ 15: 3, # 'é'
+ 30: 3, # 'í'
+ 25: 3, # 'ó'
+ 24: 3, # 'ö'
+ 31: 2, # 'ú'
+ 29: 3, # 'ü'
+ 42: 2, # 'ő'
+ 56: 1, # 'ű'
+ },
+ 51: { # 'Á'
+ 28: 0, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 0, # 'E'
+ 50: 1, # 'F'
+ 49: 2, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 2, # 'L'
+ 34: 1, # 'M'
+ 35: 2, # 'N'
+ 47: 0, # 'O'
+ 46: 1, # 'P'
+ 43: 2, # 'R'
+ 33: 2, # 'S'
+ 37: 1, # 'T'
+ 57: 0, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 1, # 'Z'
+ 2: 0, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 1, # 'd'
+ 1: 0, # 'e'
+ 27: 0, # 'f'
+ 12: 1, # 'g'
+ 20: 1, # 'h'
+ 9: 0, # 'i'
+ 22: 1, # 'j'
+ 7: 1, # 'k'
+ 6: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 0, # 'n'
+ 8: 0, # 'o'
+ 23: 1, # 'p'
+ 10: 1, # 'r'
+ 5: 1, # 's'
+ 3: 1, # 't'
+ 21: 0, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 1, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 1, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 44: { # 'É'
+ 28: 0, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 1, # 'E'
+ 50: 0, # 'F'
+ 49: 2, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 2, # 'L'
+ 34: 1, # 'M'
+ 35: 2, # 'N'
+ 47: 0, # 'O'
+ 46: 1, # 'P'
+ 43: 2, # 'R'
+ 33: 2, # 'S'
+ 37: 2, # 'T'
+ 57: 0, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 1, # 'Z'
+ 2: 0, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 1, # 'd'
+ 1: 0, # 'e'
+ 27: 0, # 'f'
+ 12: 1, # 'g'
+ 20: 1, # 'h'
+ 9: 0, # 'i'
+ 22: 1, # 'j'
+ 7: 1, # 'k'
+ 6: 2, # 'l'
+ 13: 1, # 'm'
+ 4: 2, # 'n'
+ 8: 0, # 'o'
+ 23: 1, # 'p'
+ 10: 2, # 'r'
+ 5: 3, # 's'
+ 3: 1, # 't'
+ 21: 0, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 0, # 'z'
+ 51: 0, # 'Á'
+ 44: 1, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 61: { # 'Í'
+ 28: 0, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 0, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 1, # 'J'
+ 36: 0, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 0, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 0, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 1, # 'Z'
+ 2: 0, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 0, # 'e'
+ 27: 0, # 'f'
+ 12: 2, # 'g'
+ 20: 0, # 'h'
+ 9: 0, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 0, # 'l'
+ 13: 1, # 'm'
+ 4: 0, # 'n'
+ 8: 0, # 'o'
+ 23: 0, # 'p'
+ 10: 1, # 'r'
+ 5: 0, # 's'
+ 3: 1, # 't'
+ 21: 0, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 1, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 58: { # 'Ó'
+ 28: 1, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 0, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 1, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 2, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 0, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 0, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 1, # 'Z'
+ 2: 0, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 1, # 'd'
+ 1: 0, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 2, # 'h'
+ 9: 0, # 'i'
+ 22: 0, # 'j'
+ 7: 1, # 'k'
+ 6: 1, # 'l'
+ 13: 0, # 'm'
+ 4: 1, # 'n'
+ 8: 0, # 'o'
+ 23: 1, # 'p'
+ 10: 1, # 'r'
+ 5: 1, # 's'
+ 3: 0, # 't'
+ 21: 0, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 1, # 'z'
+ 51: 0, # 'Á'
+ 44: 1, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 59: { # 'Ö'
+ 28: 0, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 0, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 0, # 'O'
+ 46: 1, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 0, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 1, # 'Z'
+ 2: 0, # 'a'
+ 18: 0, # 'b'
+ 26: 1, # 'c'
+ 17: 1, # 'd'
+ 1: 0, # 'e'
+ 27: 0, # 'f'
+ 12: 0, # 'g'
+ 20: 0, # 'h'
+ 9: 0, # 'i'
+ 22: 0, # 'j'
+ 7: 1, # 'k'
+ 6: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 8: 0, # 'o'
+ 23: 0, # 'p'
+ 10: 2, # 'r'
+ 5: 1, # 's'
+ 3: 1, # 't'
+ 21: 0, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 1, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 60: { # 'Ú'
+ 28: 0, # 'A'
+ 40: 1, # 'B'
+ 54: 1, # 'C'
+ 45: 1, # 'D'
+ 32: 0, # 'E'
+ 50: 1, # 'F'
+ 49: 1, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 0, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 1, # 'Z'
+ 2: 0, # 'a'
+ 18: 0, # 'b'
+ 26: 0, # 'c'
+ 17: 0, # 'd'
+ 1: 0, # 'e'
+ 27: 0, # 'f'
+ 12: 2, # 'g'
+ 20: 0, # 'h'
+ 9: 0, # 'i'
+ 22: 2, # 'j'
+ 7: 0, # 'k'
+ 6: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 1, # 'n'
+ 8: 0, # 'o'
+ 23: 0, # 'p'
+ 10: 1, # 'r'
+ 5: 1, # 's'
+ 3: 1, # 't'
+ 21: 0, # 'u'
+ 19: 0, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 0, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 63: { # 'Ü'
+ 28: 0, # 'A'
+ 40: 1, # 'B'
+ 54: 0, # 'C'
+ 45: 1, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 1, # 'G'
+ 38: 1, # 'H'
+ 39: 0, # 'I'
+ 53: 1, # 'J'
+ 36: 1, # 'K'
+ 41: 1, # 'L'
+ 34: 1, # 'M'
+ 35: 1, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 1, # 'R'
+ 33: 1, # 'S'
+ 37: 1, # 'T'
+ 57: 0, # 'U'
+ 48: 1, # 'V'
+ 55: 0, # 'Y'
+ 52: 1, # 'Z'
+ 2: 0, # 'a'
+ 18: 1, # 'b'
+ 26: 0, # 'c'
+ 17: 1, # 'd'
+ 1: 0, # 'e'
+ 27: 0, # 'f'
+ 12: 1, # 'g'
+ 20: 0, # 'h'
+ 9: 0, # 'i'
+ 22: 0, # 'j'
+ 7: 0, # 'k'
+ 6: 1, # 'l'
+ 13: 0, # 'm'
+ 4: 1, # 'n'
+ 8: 0, # 'o'
+ 23: 0, # 'p'
+ 10: 1, # 'r'
+ 5: 1, # 's'
+ 3: 1, # 't'
+ 21: 0, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 1, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 14: { # 'á'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 1, # 'a'
+ 18: 3, # 'b'
+ 26: 3, # 'c'
+ 17: 3, # 'd'
+ 1: 1, # 'e'
+ 27: 2, # 'f'
+ 12: 3, # 'g'
+ 20: 2, # 'h'
+ 9: 2, # 'i'
+ 22: 3, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 1, # 'o'
+ 23: 2, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 2, # 'u'
+ 19: 3, # 'v'
+ 62: 0, # 'x'
+ 16: 1, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 1, # 'á'
+ 15: 2, # 'é'
+ 30: 1, # 'í'
+ 25: 0, # 'ó'
+ 24: 1, # 'ö'
+ 31: 0, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 15: { # 'é'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 1, # 'a'
+ 18: 3, # 'b'
+ 26: 2, # 'c'
+ 17: 3, # 'd'
+ 1: 1, # 'e'
+ 27: 1, # 'f'
+ 12: 3, # 'g'
+ 20: 3, # 'h'
+ 9: 2, # 'i'
+ 22: 2, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 1, # 'o'
+ 23: 3, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 0, # 'u'
+ 19: 3, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 1, # 'á'
+ 15: 1, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 30: { # 'í'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 0, # 'a'
+ 18: 1, # 'b'
+ 26: 2, # 'c'
+ 17: 1, # 'd'
+ 1: 0, # 'e'
+ 27: 1, # 'f'
+ 12: 3, # 'g'
+ 20: 0, # 'h'
+ 9: 0, # 'i'
+ 22: 1, # 'j'
+ 7: 1, # 'k'
+ 6: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 8: 0, # 'o'
+ 23: 1, # 'p'
+ 10: 3, # 'r'
+ 5: 2, # 's'
+ 3: 3, # 't'
+ 21: 0, # 'u'
+ 19: 3, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 25: { # 'ó'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 2, # 'a'
+ 18: 3, # 'b'
+ 26: 2, # 'c'
+ 17: 3, # 'd'
+ 1: 1, # 'e'
+ 27: 2, # 'f'
+ 12: 2, # 'g'
+ 20: 2, # 'h'
+ 9: 2, # 'i'
+ 22: 2, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 8: 1, # 'o'
+ 23: 2, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 1, # 'u'
+ 19: 2, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 1, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 0, # 'ó'
+ 24: 1, # 'ö'
+ 31: 1, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 24: { # 'ö'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 0, # 'a'
+ 18: 3, # 'b'
+ 26: 1, # 'c'
+ 17: 2, # 'd'
+ 1: 0, # 'e'
+ 27: 1, # 'f'
+ 12: 2, # 'g'
+ 20: 1, # 'h'
+ 9: 0, # 'i'
+ 22: 1, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 8: 0, # 'o'
+ 23: 2, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 3, # 't'
+ 21: 0, # 'u'
+ 19: 3, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 3, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 31: { # 'ú'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 1, # 'a'
+ 18: 1, # 'b'
+ 26: 2, # 'c'
+ 17: 1, # 'd'
+ 1: 1, # 'e'
+ 27: 2, # 'f'
+ 12: 3, # 'g'
+ 20: 1, # 'h'
+ 9: 1, # 'i'
+ 22: 3, # 'j'
+ 7: 1, # 'k'
+ 6: 3, # 'l'
+ 13: 1, # 'm'
+ 4: 2, # 'n'
+ 8: 0, # 'o'
+ 23: 1, # 'p'
+ 10: 3, # 'r'
+ 5: 3, # 's'
+ 3: 2, # 't'
+ 21: 1, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 1, # 'á'
+ 15: 1, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 29: { # 'ü'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 1, # 'a'
+ 18: 1, # 'b'
+ 26: 1, # 'c'
+ 17: 2, # 'd'
+ 1: 1, # 'e'
+ 27: 1, # 'f'
+ 12: 3, # 'g'
+ 20: 2, # 'h'
+ 9: 1, # 'i'
+ 22: 1, # 'j'
+ 7: 3, # 'k'
+ 6: 3, # 'l'
+ 13: 1, # 'm'
+ 4: 3, # 'n'
+ 8: 0, # 'o'
+ 23: 1, # 'p'
+ 10: 2, # 'r'
+ 5: 2, # 's'
+ 3: 2, # 't'
+ 21: 0, # 'u'
+ 19: 2, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 1, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 42: { # 'ő'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 1, # 'a'
+ 18: 2, # 'b'
+ 26: 1, # 'c'
+ 17: 2, # 'd'
+ 1: 1, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 1, # 'h'
+ 9: 1, # 'i'
+ 22: 1, # 'j'
+ 7: 2, # 'k'
+ 6: 3, # 'l'
+ 13: 1, # 'm'
+ 4: 2, # 'n'
+ 8: 1, # 'o'
+ 23: 1, # 'p'
+ 10: 2, # 'r'
+ 5: 2, # 's'
+ 3: 2, # 't'
+ 21: 1, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 1, # 'é'
+ 30: 1, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 1, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+ 56: { # 'ű'
+ 28: 0, # 'A'
+ 40: 0, # 'B'
+ 54: 0, # 'C'
+ 45: 0, # 'D'
+ 32: 0, # 'E'
+ 50: 0, # 'F'
+ 49: 0, # 'G'
+ 38: 0, # 'H'
+ 39: 0, # 'I'
+ 53: 0, # 'J'
+ 36: 0, # 'K'
+ 41: 0, # 'L'
+ 34: 0, # 'M'
+ 35: 0, # 'N'
+ 47: 0, # 'O'
+ 46: 0, # 'P'
+ 43: 0, # 'R'
+ 33: 0, # 'S'
+ 37: 0, # 'T'
+ 57: 0, # 'U'
+ 48: 0, # 'V'
+ 55: 0, # 'Y'
+ 52: 0, # 'Z'
+ 2: 1, # 'a'
+ 18: 1, # 'b'
+ 26: 0, # 'c'
+ 17: 1, # 'd'
+ 1: 1, # 'e'
+ 27: 1, # 'f'
+ 12: 1, # 'g'
+ 20: 1, # 'h'
+ 9: 1, # 'i'
+ 22: 1, # 'j'
+ 7: 1, # 'k'
+ 6: 1, # 'l'
+ 13: 0, # 'm'
+ 4: 2, # 'n'
+ 8: 0, # 'o'
+ 23: 0, # 'p'
+ 10: 1, # 'r'
+ 5: 1, # 's'
+ 3: 1, # 't'
+ 21: 0, # 'u'
+ 19: 1, # 'v'
+ 62: 0, # 'x'
+ 16: 0, # 'y'
+ 11: 2, # 'z'
+ 51: 0, # 'Á'
+ 44: 0, # 'É'
+ 61: 0, # 'Í'
+ 58: 0, # 'Ó'
+ 59: 0, # 'Ö'
+ 60: 0, # 'Ú'
+ 63: 0, # 'Ü'
+ 14: 0, # 'á'
+ 15: 0, # 'é'
+ 30: 0, # 'í'
+ 25: 0, # 'ó'
+ 24: 0, # 'ö'
+ 31: 0, # 'ú'
+ 29: 0, # 'ü'
+ 42: 0, # 'ő'
+ 56: 0, # 'ű'
+ },
+}
+
+# 255: Undefined characters that did not exist in training text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+# 251: Control characters
+
+# Character Mapping Table(s):
+WINDOWS_1250_HUNGARIAN_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 28, # 'A'
+ 66: 40, # 'B'
+ 67: 54, # 'C'
+ 68: 45, # 'D'
+ 69: 32, # 'E'
+ 70: 50, # 'F'
+ 71: 49, # 'G'
+ 72: 38, # 'H'
+ 73: 39, # 'I'
+ 74: 53, # 'J'
+ 75: 36, # 'K'
+ 76: 41, # 'L'
+ 77: 34, # 'M'
+ 78: 35, # 'N'
+ 79: 47, # 'O'
+ 80: 46, # 'P'
+ 81: 72, # 'Q'
+ 82: 43, # 'R'
+ 83: 33, # 'S'
+ 84: 37, # 'T'
+ 85: 57, # 'U'
+ 86: 48, # 'V'
+ 87: 64, # 'W'
+ 88: 68, # 'X'
+ 89: 55, # 'Y'
+ 90: 52, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 2, # 'a'
+ 98: 18, # 'b'
+ 99: 26, # 'c'
+ 100: 17, # 'd'
+ 101: 1, # 'e'
+ 102: 27, # 'f'
+ 103: 12, # 'g'
+ 104: 20, # 'h'
+ 105: 9, # 'i'
+ 106: 22, # 'j'
+ 107: 7, # 'k'
+ 108: 6, # 'l'
+ 109: 13, # 'm'
+ 110: 4, # 'n'
+ 111: 8, # 'o'
+ 112: 23, # 'p'
+ 113: 67, # 'q'
+ 114: 10, # 'r'
+ 115: 5, # 's'
+ 116: 3, # 't'
+ 117: 21, # 'u'
+ 118: 19, # 'v'
+ 119: 65, # 'w'
+ 120: 62, # 'x'
+ 121: 16, # 'y'
+ 122: 11, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 161, # '€'
+ 129: 162, # None
+ 130: 163, # '‚'
+ 131: 164, # None
+ 132: 165, # '„'
+ 133: 166, # '…'
+ 134: 167, # '†'
+ 135: 168, # '‡'
+ 136: 169, # None
+ 137: 170, # '‰'
+ 138: 171, # 'Š'
+ 139: 172, # '‹'
+ 140: 173, # 'Ś'
+ 141: 174, # 'Ť'
+ 142: 175, # 'Ž'
+ 143: 176, # 'Ź'
+ 144: 177, # None
+ 145: 178, # '‘'
+ 146: 179, # '’'
+ 147: 180, # '“'
+ 148: 78, # '”'
+ 149: 181, # '•'
+ 150: 69, # '–'
+ 151: 182, # '—'
+ 152: 183, # None
+ 153: 184, # '™'
+ 154: 185, # 'š'
+ 155: 186, # '›'
+ 156: 187, # 'ś'
+ 157: 188, # 'ť'
+ 158: 189, # 'ž'
+ 159: 190, # 'ź'
+ 160: 191, # '\xa0'
+ 161: 192, # 'ˇ'
+ 162: 193, # '˘'
+ 163: 194, # 'Ł'
+ 164: 195, # '¤'
+ 165: 196, # 'Ą'
+ 166: 197, # '¦'
+ 167: 76, # '§'
+ 168: 198, # '¨'
+ 169: 199, # '©'
+ 170: 200, # 'Ş'
+ 171: 201, # '«'
+ 172: 202, # '¬'
+ 173: 203, # '\xad'
+ 174: 204, # '®'
+ 175: 205, # 'Ż'
+ 176: 81, # '°'
+ 177: 206, # '±'
+ 178: 207, # '˛'
+ 179: 208, # 'ł'
+ 180: 209, # '´'
+ 181: 210, # 'µ'
+ 182: 211, # '¶'
+ 183: 212, # '·'
+ 184: 213, # '¸'
+ 185: 214, # 'ą'
+ 186: 215, # 'ş'
+ 187: 216, # '»'
+ 188: 217, # 'Ľ'
+ 189: 218, # '˝'
+ 190: 219, # 'ľ'
+ 191: 220, # 'ż'
+ 192: 221, # 'Ŕ'
+ 193: 51, # 'Á'
+ 194: 83, # 'Â'
+ 195: 222, # 'Ă'
+ 196: 80, # 'Ä'
+ 197: 223, # 'Ĺ'
+ 198: 224, # 'Ć'
+ 199: 225, # 'Ç'
+ 200: 226, # 'Č'
+ 201: 44, # 'É'
+ 202: 227, # 'Ę'
+ 203: 228, # 'Ë'
+ 204: 229, # 'Ě'
+ 205: 61, # 'Í'
+ 206: 230, # 'Î'
+ 207: 231, # 'Ď'
+ 208: 232, # 'Đ'
+ 209: 233, # 'Ń'
+ 210: 234, # 'Ň'
+ 211: 58, # 'Ó'
+ 212: 235, # 'Ô'
+ 213: 66, # 'Ő'
+ 214: 59, # 'Ö'
+ 215: 236, # '×'
+ 216: 237, # 'Ř'
+ 217: 238, # 'Ů'
+ 218: 60, # 'Ú'
+ 219: 70, # 'Ű'
+ 220: 63, # 'Ü'
+ 221: 239, # 'Ý'
+ 222: 240, # 'Ţ'
+ 223: 241, # 'ß'
+ 224: 84, # 'ŕ'
+ 225: 14, # 'á'
+ 226: 75, # 'â'
+ 227: 242, # 'ă'
+ 228: 71, # 'ä'
+ 229: 82, # 'ĺ'
+ 230: 243, # 'ć'
+ 231: 73, # 'ç'
+ 232: 244, # 'č'
+ 233: 15, # 'é'
+ 234: 85, # 'ę'
+ 235: 79, # 'ë'
+ 236: 86, # 'ě'
+ 237: 30, # 'í'
+ 238: 77, # 'î'
+ 239: 87, # 'ď'
+ 240: 245, # 'đ'
+ 241: 246, # 'ń'
+ 242: 247, # 'ň'
+ 243: 25, # 'ó'
+ 244: 74, # 'ô'
+ 245: 42, # 'ő'
+ 246: 24, # 'ö'
+ 247: 248, # '÷'
+ 248: 249, # 'ř'
+ 249: 250, # 'ů'
+ 250: 31, # 'ú'
+ 251: 56, # 'ű'
+ 252: 29, # 'ü'
+ 253: 251, # 'ý'
+ 254: 252, # 'ţ'
+ 255: 253, # '˙'
+}
+
+WINDOWS_1250_HUNGARIAN_MODEL = SingleByteCharSetModel(
+ charset_name="windows-1250",
+ language="Hungarian",
+ char_to_order_map=WINDOWS_1250_HUNGARIAN_CHAR_TO_ORDER,
+ language_model=HUNGARIAN_LANG_MODEL,
+ typical_positive_ratio=0.947368,
+ keep_ascii_letters=True,
+ alphabet="ABCDEFGHIJKLMNOPRSTUVZabcdefghijklmnoprstuvzÁÉÍÓÖÚÜáéíóöúüŐőŰű",
+)
+
+ISO_8859_2_HUNGARIAN_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 28, # 'A'
+ 66: 40, # 'B'
+ 67: 54, # 'C'
+ 68: 45, # 'D'
+ 69: 32, # 'E'
+ 70: 50, # 'F'
+ 71: 49, # 'G'
+ 72: 38, # 'H'
+ 73: 39, # 'I'
+ 74: 53, # 'J'
+ 75: 36, # 'K'
+ 76: 41, # 'L'
+ 77: 34, # 'M'
+ 78: 35, # 'N'
+ 79: 47, # 'O'
+ 80: 46, # 'P'
+ 81: 71, # 'Q'
+ 82: 43, # 'R'
+ 83: 33, # 'S'
+ 84: 37, # 'T'
+ 85: 57, # 'U'
+ 86: 48, # 'V'
+ 87: 64, # 'W'
+ 88: 68, # 'X'
+ 89: 55, # 'Y'
+ 90: 52, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 2, # 'a'
+ 98: 18, # 'b'
+ 99: 26, # 'c'
+ 100: 17, # 'd'
+ 101: 1, # 'e'
+ 102: 27, # 'f'
+ 103: 12, # 'g'
+ 104: 20, # 'h'
+ 105: 9, # 'i'
+ 106: 22, # 'j'
+ 107: 7, # 'k'
+ 108: 6, # 'l'
+ 109: 13, # 'm'
+ 110: 4, # 'n'
+ 111: 8, # 'o'
+ 112: 23, # 'p'
+ 113: 67, # 'q'
+ 114: 10, # 'r'
+ 115: 5, # 's'
+ 116: 3, # 't'
+ 117: 21, # 'u'
+ 118: 19, # 'v'
+ 119: 65, # 'w'
+ 120: 62, # 'x'
+ 121: 16, # 'y'
+ 122: 11, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 159, # '\x80'
+ 129: 160, # '\x81'
+ 130: 161, # '\x82'
+ 131: 162, # '\x83'
+ 132: 163, # '\x84'
+ 133: 164, # '\x85'
+ 134: 165, # '\x86'
+ 135: 166, # '\x87'
+ 136: 167, # '\x88'
+ 137: 168, # '\x89'
+ 138: 169, # '\x8a'
+ 139: 170, # '\x8b'
+ 140: 171, # '\x8c'
+ 141: 172, # '\x8d'
+ 142: 173, # '\x8e'
+ 143: 174, # '\x8f'
+ 144: 175, # '\x90'
+ 145: 176, # '\x91'
+ 146: 177, # '\x92'
+ 147: 178, # '\x93'
+ 148: 179, # '\x94'
+ 149: 180, # '\x95'
+ 150: 181, # '\x96'
+ 151: 182, # '\x97'
+ 152: 183, # '\x98'
+ 153: 184, # '\x99'
+ 154: 185, # '\x9a'
+ 155: 186, # '\x9b'
+ 156: 187, # '\x9c'
+ 157: 188, # '\x9d'
+ 158: 189, # '\x9e'
+ 159: 190, # '\x9f'
+ 160: 191, # '\xa0'
+ 161: 192, # 'Ą'
+ 162: 193, # '˘'
+ 163: 194, # 'Ł'
+ 164: 195, # '¤'
+ 165: 196, # 'Ľ'
+ 166: 197, # 'Ś'
+ 167: 75, # '§'
+ 168: 198, # '¨'
+ 169: 199, # 'Š'
+ 170: 200, # 'Ş'
+ 171: 201, # 'Ť'
+ 172: 202, # 'Ź'
+ 173: 203, # '\xad'
+ 174: 204, # 'Ž'
+ 175: 205, # 'Ż'
+ 176: 79, # '°'
+ 177: 206, # 'ą'
+ 178: 207, # '˛'
+ 179: 208, # 'ł'
+ 180: 209, # '´'
+ 181: 210, # 'ľ'
+ 182: 211, # 'ś'
+ 183: 212, # 'ˇ'
+ 184: 213, # '¸'
+ 185: 214, # 'š'
+ 186: 215, # 'ş'
+ 187: 216, # 'ť'
+ 188: 217, # 'ź'
+ 189: 218, # '˝'
+ 190: 219, # 'ž'
+ 191: 220, # 'ż'
+ 192: 221, # 'Ŕ'
+ 193: 51, # 'Á'
+ 194: 81, # 'Â'
+ 195: 222, # 'Ă'
+ 196: 78, # 'Ä'
+ 197: 223, # 'Ĺ'
+ 198: 224, # 'Ć'
+ 199: 225, # 'Ç'
+ 200: 226, # 'Č'
+ 201: 44, # 'É'
+ 202: 227, # 'Ę'
+ 203: 228, # 'Ë'
+ 204: 229, # 'Ě'
+ 205: 61, # 'Í'
+ 206: 230, # 'Î'
+ 207: 231, # 'Ď'
+ 208: 232, # 'Đ'
+ 209: 233, # 'Ń'
+ 210: 234, # 'Ň'
+ 211: 58, # 'Ó'
+ 212: 235, # 'Ô'
+ 213: 66, # 'Ő'
+ 214: 59, # 'Ö'
+ 215: 236, # '×'
+ 216: 237, # 'Ř'
+ 217: 238, # 'Ů'
+ 218: 60, # 'Ú'
+ 219: 69, # 'Ű'
+ 220: 63, # 'Ü'
+ 221: 239, # 'Ý'
+ 222: 240, # 'Ţ'
+ 223: 241, # 'ß'
+ 224: 82, # 'ŕ'
+ 225: 14, # 'á'
+ 226: 74, # 'â'
+ 227: 242, # 'ă'
+ 228: 70, # 'ä'
+ 229: 80, # 'ĺ'
+ 230: 243, # 'ć'
+ 231: 72, # 'ç'
+ 232: 244, # 'č'
+ 233: 15, # 'é'
+ 234: 83, # 'ę'
+ 235: 77, # 'ë'
+ 236: 84, # 'ě'
+ 237: 30, # 'í'
+ 238: 76, # 'î'
+ 239: 85, # 'ď'
+ 240: 245, # 'đ'
+ 241: 246, # 'ń'
+ 242: 247, # 'ň'
+ 243: 25, # 'ó'
+ 244: 73, # 'ô'
+ 245: 42, # 'ő'
+ 246: 24, # 'ö'
+ 247: 248, # '÷'
+ 248: 249, # 'ř'
+ 249: 250, # 'ů'
+ 250: 31, # 'ú'
+ 251: 56, # 'ű'
+ 252: 29, # 'ü'
+ 253: 251, # 'ý'
+ 254: 252, # 'ţ'
+ 255: 253, # '˙'
+}
+
+ISO_8859_2_HUNGARIAN_MODEL = SingleByteCharSetModel(
+ charset_name="ISO-8859-2",
+ language="Hungarian",
+ char_to_order_map=ISO_8859_2_HUNGARIAN_CHAR_TO_ORDER,
+ language_model=HUNGARIAN_LANG_MODEL,
+ typical_positive_ratio=0.947368,
+ keep_ascii_letters=True,
+ alphabet="ABCDEFGHIJKLMNOPRSTUVZabcdefghijklmnoprstuvzÁÉÍÓÖÚÜáéíóöúüŐőŰű",
+)
diff --git a/third_party/python/pip/pip/_vendor/chardet/langrussianmodel.py b/third_party/python/pip/pip/_vendor/chardet/langrussianmodel.py
new file mode 100644
index 0000000000..39a5388948
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/langrussianmodel.py
@@ -0,0 +1,5725 @@
+from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel
+
+# 3: Positive
+# 2: Likely
+# 1: Unlikely
+# 0: Negative
+
+RUSSIAN_LANG_MODEL = {
+ 37: { # 'А'
+ 37: 0, # 'А'
+ 44: 1, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 1, # 'Д'
+ 48: 1, # 'Е'
+ 56: 1, # 'Ж'
+ 51: 1, # 'З'
+ 42: 1, # 'И'
+ 60: 1, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 2, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 1, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 1, # 'Ш'
+ 63: 1, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 1, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 1, # 'а'
+ 21: 2, # 'б'
+ 10: 2, # 'в'
+ 19: 2, # 'г'
+ 13: 2, # 'д'
+ 2: 0, # 'е'
+ 24: 1, # 'ж'
+ 20: 1, # 'з'
+ 4: 0, # 'и'
+ 23: 1, # 'й'
+ 11: 2, # 'к'
+ 8: 3, # 'л'
+ 12: 2, # 'м'
+ 5: 2, # 'н'
+ 1: 0, # 'о'
+ 15: 2, # 'п'
+ 9: 2, # 'р'
+ 7: 2, # 'с'
+ 6: 2, # 'т'
+ 14: 2, # 'у'
+ 39: 2, # 'ф'
+ 26: 2, # 'х'
+ 28: 0, # 'ц'
+ 22: 1, # 'ч'
+ 25: 2, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 1, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 44: { # 'Б'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 0, # 'П'
+ 45: 1, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 2, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 1, # 'д'
+ 2: 3, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 2, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 2, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 2, # 'ы'
+ 17: 1, # 'ь'
+ 30: 2, # 'э'
+ 27: 1, # 'ю'
+ 16: 1, # 'я'
+ },
+ 33: { # 'В'
+ 37: 2, # 'А'
+ 44: 0, # 'Б'
+ 33: 1, # 'В'
+ 46: 0, # 'Г'
+ 41: 1, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 1, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 2, # 'а'
+ 21: 1, # 'б'
+ 10: 1, # 'в'
+ 19: 1, # 'г'
+ 13: 2, # 'д'
+ 2: 3, # 'е'
+ 24: 0, # 'ж'
+ 20: 2, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 1, # 'к'
+ 8: 2, # 'л'
+ 12: 2, # 'м'
+ 5: 2, # 'н'
+ 1: 3, # 'о'
+ 15: 2, # 'п'
+ 9: 2, # 'р'
+ 7: 3, # 'с'
+ 6: 2, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 1, # 'х'
+ 28: 1, # 'ц'
+ 22: 2, # 'ч'
+ 25: 1, # 'ш'
+ 29: 0, # 'щ'
+ 54: 1, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 1, # 'ь'
+ 30: 2, # 'э'
+ 27: 0, # 'ю'
+ 16: 1, # 'я'
+ },
+ 46: { # 'Г'
+ 37: 1, # 'А'
+ 44: 1, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 1, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 1, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 0, # 'б'
+ 10: 1, # 'в'
+ 19: 0, # 'г'
+ 13: 2, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 2, # 'л'
+ 12: 1, # 'м'
+ 5: 1, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 2, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 1, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 0, # 'я'
+ },
+ 41: { # 'Д'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 1, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 2, # 'Е'
+ 56: 1, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 0, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 0, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 3, # 'а'
+ 21: 0, # 'б'
+ 10: 2, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 3, # 'ж'
+ 20: 1, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 2, # 'л'
+ 12: 1, # 'м'
+ 5: 1, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 2, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 1, # 'ь'
+ 30: 2, # 'э'
+ 27: 1, # 'ю'
+ 16: 1, # 'я'
+ },
+ 48: { # 'Е'
+ 37: 1, # 'А'
+ 44: 1, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 1, # 'Д'
+ 48: 1, # 'Е'
+ 56: 1, # 'Ж'
+ 51: 1, # 'З'
+ 42: 1, # 'И'
+ 60: 1, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 2, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 2, # 'Р'
+ 32: 2, # 'С'
+ 40: 1, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 1, # 'Ш'
+ 63: 1, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 0, # 'а'
+ 21: 0, # 'б'
+ 10: 2, # 'в'
+ 19: 2, # 'г'
+ 13: 2, # 'д'
+ 2: 2, # 'е'
+ 24: 1, # 'ж'
+ 20: 1, # 'з'
+ 4: 0, # 'и'
+ 23: 2, # 'й'
+ 11: 1, # 'к'
+ 8: 2, # 'л'
+ 12: 2, # 'м'
+ 5: 1, # 'н'
+ 1: 0, # 'о'
+ 15: 1, # 'п'
+ 9: 1, # 'р'
+ 7: 3, # 'с'
+ 6: 0, # 'т'
+ 14: 0, # 'у'
+ 39: 1, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 1, # 'ш'
+ 29: 2, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 1, # 'ю'
+ 16: 0, # 'я'
+ },
+ 56: { # 'Ж'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 1, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 1, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 1, # 'б'
+ 10: 0, # 'в'
+ 19: 1, # 'г'
+ 13: 1, # 'д'
+ 2: 2, # 'е'
+ 24: 1, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 0, # 'л'
+ 12: 1, # 'м'
+ 5: 0, # 'н'
+ 1: 2, # 'о'
+ 15: 0, # 'п'
+ 9: 1, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 2, # 'ю'
+ 16: 0, # 'я'
+ },
+ 51: { # 'З'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 1, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 0, # 'П'
+ 45: 1, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 1, # 'б'
+ 10: 2, # 'в'
+ 19: 0, # 'г'
+ 13: 2, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 1, # 'л'
+ 12: 1, # 'м'
+ 5: 2, # 'н'
+ 1: 2, # 'о'
+ 15: 0, # 'п'
+ 9: 1, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 1, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 1, # 'я'
+ },
+ 42: { # 'И'
+ 37: 1, # 'А'
+ 44: 1, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 1, # 'Д'
+ 48: 2, # 'Е'
+ 56: 1, # 'Ж'
+ 51: 1, # 'З'
+ 42: 1, # 'И'
+ 60: 1, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 1, # 'Р'
+ 32: 2, # 'С'
+ 40: 1, # 'Т'
+ 52: 0, # 'У'
+ 53: 1, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 1, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 1, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 1, # 'а'
+ 21: 2, # 'б'
+ 10: 2, # 'в'
+ 19: 2, # 'г'
+ 13: 2, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 2, # 'з'
+ 4: 1, # 'и'
+ 23: 0, # 'й'
+ 11: 1, # 'к'
+ 8: 2, # 'л'
+ 12: 2, # 'м'
+ 5: 2, # 'н'
+ 1: 1, # 'о'
+ 15: 1, # 'п'
+ 9: 2, # 'р'
+ 7: 2, # 'с'
+ 6: 2, # 'т'
+ 14: 1, # 'у'
+ 39: 1, # 'ф'
+ 26: 2, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 1, # 'ш'
+ 29: 1, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 1, # 'ю'
+ 16: 0, # 'я'
+ },
+ 60: { # 'Й'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 1, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 1, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 0, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 1, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 0, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 0, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 2, # 'о'
+ 15: 0, # 'п'
+ 9: 0, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 0, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 36: { # 'К'
+ 37: 2, # 'А'
+ 44: 0, # 'Б'
+ 33: 1, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 1, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 1, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 2, # 'О'
+ 35: 1, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 1, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 0, # 'б'
+ 10: 1, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 2, # 'л'
+ 12: 0, # 'м'
+ 5: 1, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 2, # 'р'
+ 7: 2, # 'с'
+ 6: 2, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 1, # 'ь'
+ 30: 2, # 'э'
+ 27: 1, # 'ю'
+ 16: 0, # 'я'
+ },
+ 49: { # 'Л'
+ 37: 2, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 1, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 1, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 0, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 0, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 1, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 2, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 1, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 1, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 1, # 'л'
+ 12: 0, # 'м'
+ 5: 1, # 'н'
+ 1: 2, # 'о'
+ 15: 0, # 'п'
+ 9: 0, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 1, # 'ь'
+ 30: 2, # 'э'
+ 27: 2, # 'ю'
+ 16: 1, # 'я'
+ },
+ 38: { # 'М'
+ 37: 1, # 'А'
+ 44: 1, # 'Б'
+ 33: 1, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 1, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 1, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 3, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 1, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 1, # 'л'
+ 12: 1, # 'м'
+ 5: 2, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 1, # 'р'
+ 7: 1, # 'с'
+ 6: 0, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 1, # 'ь'
+ 30: 2, # 'э'
+ 27: 1, # 'ю'
+ 16: 1, # 'я'
+ },
+ 31: { # 'Н'
+ 37: 2, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 1, # 'Г'
+ 41: 1, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 1, # 'З'
+ 42: 2, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 0, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 1, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 1, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 3, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 3, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 0, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 1, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 3, # 'у'
+ 39: 0, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 2, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 1, # 'я'
+ },
+ 34: { # 'О'
+ 37: 0, # 'А'
+ 44: 1, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 2, # 'Д'
+ 48: 1, # 'Е'
+ 56: 1, # 'Ж'
+ 51: 1, # 'З'
+ 42: 1, # 'И'
+ 60: 1, # 'Й'
+ 36: 1, # 'К'
+ 49: 2, # 'Л'
+ 38: 1, # 'М'
+ 31: 2, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 2, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 1, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 1, # 'Ш'
+ 63: 1, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 1, # 'а'
+ 21: 2, # 'б'
+ 10: 1, # 'в'
+ 19: 2, # 'г'
+ 13: 2, # 'д'
+ 2: 0, # 'е'
+ 24: 1, # 'ж'
+ 20: 1, # 'з'
+ 4: 0, # 'и'
+ 23: 1, # 'й'
+ 11: 2, # 'к'
+ 8: 2, # 'л'
+ 12: 1, # 'м'
+ 5: 3, # 'н'
+ 1: 0, # 'о'
+ 15: 2, # 'п'
+ 9: 2, # 'р'
+ 7: 2, # 'с'
+ 6: 2, # 'т'
+ 14: 1, # 'у'
+ 39: 1, # 'ф'
+ 26: 2, # 'х'
+ 28: 1, # 'ц'
+ 22: 2, # 'ч'
+ 25: 2, # 'ш'
+ 29: 1, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 35: { # 'П'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 1, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 2, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 2, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 2, # 'л'
+ 12: 0, # 'м'
+ 5: 1, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 3, # 'р'
+ 7: 1, # 'с'
+ 6: 1, # 'т'
+ 14: 2, # 'у'
+ 39: 1, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 1, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 2, # 'ь'
+ 30: 1, # 'э'
+ 27: 0, # 'ю'
+ 16: 2, # 'я'
+ },
+ 45: { # 'Р'
+ 37: 2, # 'А'
+ 44: 1, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 1, # 'Д'
+ 48: 2, # 'Е'
+ 56: 1, # 'Ж'
+ 51: 0, # 'З'
+ 42: 2, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 2, # 'О'
+ 35: 0, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 1, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 1, # 'Э'
+ 59: 1, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 3, # 'а'
+ 21: 0, # 'б'
+ 10: 1, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 1, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 0, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 1, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 2, # 'ы'
+ 17: 0, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 2, # 'я'
+ },
+ 32: { # 'С'
+ 37: 1, # 'А'
+ 44: 1, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 1, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 2, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 1, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 1, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 1, # 'Э'
+ 59: 1, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 2, # 'а'
+ 21: 1, # 'б'
+ 10: 2, # 'в'
+ 19: 1, # 'г'
+ 13: 2, # 'д'
+ 2: 3, # 'е'
+ 24: 1, # 'ж'
+ 20: 1, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 2, # 'к'
+ 8: 2, # 'л'
+ 12: 2, # 'м'
+ 5: 2, # 'н'
+ 1: 2, # 'о'
+ 15: 2, # 'п'
+ 9: 2, # 'р'
+ 7: 1, # 'с'
+ 6: 3, # 'т'
+ 14: 2, # 'у'
+ 39: 1, # 'ф'
+ 26: 1, # 'х'
+ 28: 1, # 'ц'
+ 22: 1, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 1, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 1, # 'ь'
+ 30: 2, # 'э'
+ 27: 1, # 'ю'
+ 16: 1, # 'я'
+ },
+ 40: { # 'Т'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 1, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 2, # 'О'
+ 35: 0, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 1, # 'Э'
+ 59: 1, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 3, # 'а'
+ 21: 1, # 'б'
+ 10: 2, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 3, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 1, # 'к'
+ 8: 1, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 2, # 'р'
+ 7: 1, # 'с'
+ 6: 0, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 1, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 1, # 'ь'
+ 30: 2, # 'э'
+ 27: 1, # 'ю'
+ 16: 1, # 'я'
+ },
+ 52: { # 'У'
+ 37: 1, # 'А'
+ 44: 1, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 1, # 'Д'
+ 48: 1, # 'Е'
+ 56: 1, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 1, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 1, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 1, # 'Ш'
+ 63: 1, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 1, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 1, # 'а'
+ 21: 2, # 'б'
+ 10: 2, # 'в'
+ 19: 1, # 'г'
+ 13: 2, # 'д'
+ 2: 1, # 'е'
+ 24: 2, # 'ж'
+ 20: 2, # 'з'
+ 4: 2, # 'и'
+ 23: 1, # 'й'
+ 11: 1, # 'к'
+ 8: 2, # 'л'
+ 12: 2, # 'м'
+ 5: 1, # 'н'
+ 1: 2, # 'о'
+ 15: 1, # 'п'
+ 9: 2, # 'р'
+ 7: 2, # 'с'
+ 6: 2, # 'т'
+ 14: 0, # 'у'
+ 39: 1, # 'ф'
+ 26: 1, # 'х'
+ 28: 1, # 'ц'
+ 22: 2, # 'ч'
+ 25: 1, # 'ш'
+ 29: 1, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 2, # 'э'
+ 27: 1, # 'ю'
+ 16: 0, # 'я'
+ },
+ 53: { # 'Ф'
+ 37: 1, # 'А'
+ 44: 1, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 1, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 1, # 'О'
+ 35: 0, # 'П'
+ 45: 1, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 2, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 2, # 'о'
+ 15: 0, # 'п'
+ 9: 2, # 'р'
+ 7: 0, # 'с'
+ 6: 1, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 1, # 'ь'
+ 30: 2, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 55: { # 'Х'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 1, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 0, # 'б'
+ 10: 2, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 2, # 'л'
+ 12: 1, # 'м'
+ 5: 0, # 'н'
+ 1: 2, # 'о'
+ 15: 0, # 'п'
+ 9: 2, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 1, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 1, # 'ь'
+ 30: 1, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 58: { # 'Ц'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 1, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 1, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 1, # 'а'
+ 21: 0, # 'б'
+ 10: 1, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 0, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 0, # 'о'
+ 15: 0, # 'п'
+ 9: 0, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 1, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 1, # 'ю'
+ 16: 0, # 'я'
+ },
+ 50: { # 'Ч'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 0, # 'О'
+ 35: 1, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 1, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 1, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 1, # 'о'
+ 15: 0, # 'п'
+ 9: 1, # 'р'
+ 7: 0, # 'с'
+ 6: 3, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 1, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 57: { # 'Ш'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 1, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 0, # 'б'
+ 10: 1, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 1, # 'и'
+ 23: 0, # 'й'
+ 11: 1, # 'к'
+ 8: 2, # 'л'
+ 12: 1, # 'м'
+ 5: 1, # 'н'
+ 1: 2, # 'о'
+ 15: 2, # 'п'
+ 9: 1, # 'р'
+ 7: 0, # 'с'
+ 6: 2, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 1, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 1, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 63: { # 'Щ'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 1, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 1, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 1, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 1, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 0, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 1, # 'о'
+ 15: 0, # 'п'
+ 9: 0, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 1, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 62: { # 'Ы'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 1, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 0, # 'О'
+ 35: 1, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 1, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 1, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 0, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 0, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 0, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 0, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 0, # 'о'
+ 15: 0, # 'п'
+ 9: 0, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 0, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 61: { # 'Ь'
+ 37: 0, # 'А'
+ 44: 1, # 'Б'
+ 33: 1, # 'В'
+ 46: 0, # 'Г'
+ 41: 1, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 0, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 1, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 1, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 1, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 1, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 1, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 0, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 0, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 0, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 0, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 0, # 'о'
+ 15: 0, # 'п'
+ 9: 0, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 0, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 47: { # 'Э'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 1, # 'В'
+ 46: 0, # 'Г'
+ 41: 1, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 1, # 'Й'
+ 36: 1, # 'К'
+ 49: 1, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 0, # 'О'
+ 35: 1, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 1, # 'а'
+ 21: 1, # 'б'
+ 10: 2, # 'в'
+ 19: 1, # 'г'
+ 13: 2, # 'д'
+ 2: 0, # 'е'
+ 24: 1, # 'ж'
+ 20: 0, # 'з'
+ 4: 0, # 'и'
+ 23: 2, # 'й'
+ 11: 2, # 'к'
+ 8: 2, # 'л'
+ 12: 2, # 'м'
+ 5: 2, # 'н'
+ 1: 0, # 'о'
+ 15: 1, # 'п'
+ 9: 2, # 'р'
+ 7: 1, # 'с'
+ 6: 3, # 'т'
+ 14: 1, # 'у'
+ 39: 1, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 1, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 59: { # 'Ю'
+ 37: 1, # 'А'
+ 44: 1, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 1, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 1, # 'Р'
+ 32: 0, # 'С'
+ 40: 1, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 1, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 0, # 'а'
+ 21: 1, # 'б'
+ 10: 0, # 'в'
+ 19: 1, # 'г'
+ 13: 1, # 'д'
+ 2: 0, # 'е'
+ 24: 1, # 'ж'
+ 20: 0, # 'з'
+ 4: 0, # 'и'
+ 23: 0, # 'й'
+ 11: 1, # 'к'
+ 8: 2, # 'л'
+ 12: 1, # 'м'
+ 5: 2, # 'н'
+ 1: 0, # 'о'
+ 15: 1, # 'п'
+ 9: 1, # 'р'
+ 7: 1, # 'с'
+ 6: 0, # 'т'
+ 14: 0, # 'у'
+ 39: 0, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 43: { # 'Я'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 1, # 'В'
+ 46: 1, # 'Г'
+ 41: 0, # 'Д'
+ 48: 1, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 1, # 'С'
+ 40: 1, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 1, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 1, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 1, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 1, # 'Ю'
+ 43: 1, # 'Я'
+ 3: 0, # 'а'
+ 21: 1, # 'б'
+ 10: 1, # 'в'
+ 19: 1, # 'г'
+ 13: 1, # 'д'
+ 2: 0, # 'е'
+ 24: 0, # 'ж'
+ 20: 1, # 'з'
+ 4: 0, # 'и'
+ 23: 1, # 'й'
+ 11: 1, # 'к'
+ 8: 1, # 'л'
+ 12: 1, # 'м'
+ 5: 2, # 'н'
+ 1: 0, # 'о'
+ 15: 1, # 'п'
+ 9: 1, # 'р'
+ 7: 1, # 'с'
+ 6: 0, # 'т'
+ 14: 0, # 'у'
+ 39: 0, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 1, # 'ш'
+ 29: 1, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 3: { # 'а'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 1, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 3, # 'б'
+ 10: 3, # 'в'
+ 19: 3, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 3, # 'ж'
+ 20: 3, # 'з'
+ 4: 3, # 'и'
+ 23: 3, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 2, # 'о'
+ 15: 3, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 3, # 'у'
+ 39: 2, # 'ф'
+ 26: 3, # 'х'
+ 28: 3, # 'ц'
+ 22: 3, # 'ч'
+ 25: 3, # 'ш'
+ 29: 3, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 2, # 'э'
+ 27: 3, # 'ю'
+ 16: 3, # 'я'
+ },
+ 21: { # 'б'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 1, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 2, # 'б'
+ 10: 2, # 'в'
+ 19: 1, # 'г'
+ 13: 2, # 'д'
+ 2: 3, # 'е'
+ 24: 2, # 'ж'
+ 20: 1, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 2, # 'к'
+ 8: 3, # 'л'
+ 12: 2, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 1, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 2, # 'т'
+ 14: 3, # 'у'
+ 39: 0, # 'ф'
+ 26: 2, # 'х'
+ 28: 1, # 'ц'
+ 22: 1, # 'ч'
+ 25: 2, # 'ш'
+ 29: 3, # 'щ'
+ 54: 2, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 2, # 'ь'
+ 30: 1, # 'э'
+ 27: 2, # 'ю'
+ 16: 3, # 'я'
+ },
+ 10: { # 'в'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 2, # 'б'
+ 10: 2, # 'в'
+ 19: 2, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 1, # 'ж'
+ 20: 3, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 2, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 3, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 3, # 'у'
+ 39: 1, # 'ф'
+ 26: 2, # 'х'
+ 28: 2, # 'ц'
+ 22: 2, # 'ч'
+ 25: 3, # 'ш'
+ 29: 2, # 'щ'
+ 54: 2, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 3, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 3, # 'я'
+ },
+ 19: { # 'г'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 1, # 'б'
+ 10: 2, # 'в'
+ 19: 1, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 0, # 'ж'
+ 20: 1, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 2, # 'к'
+ 8: 3, # 'л'
+ 12: 2, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 3, # 'р'
+ 7: 2, # 'с'
+ 6: 2, # 'т'
+ 14: 3, # 'у'
+ 39: 1, # 'ф'
+ 26: 1, # 'х'
+ 28: 1, # 'ц'
+ 22: 2, # 'ч'
+ 25: 1, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 1, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 0, # 'я'
+ },
+ 13: { # 'д'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 2, # 'б'
+ 10: 3, # 'в'
+ 19: 2, # 'г'
+ 13: 2, # 'д'
+ 2: 3, # 'е'
+ 24: 2, # 'ж'
+ 20: 2, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 2, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 2, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 3, # 'у'
+ 39: 1, # 'ф'
+ 26: 2, # 'х'
+ 28: 3, # 'ц'
+ 22: 2, # 'ч'
+ 25: 2, # 'ш'
+ 29: 1, # 'щ'
+ 54: 2, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 3, # 'ь'
+ 30: 1, # 'э'
+ 27: 2, # 'ю'
+ 16: 3, # 'я'
+ },
+ 2: { # 'е'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 3, # 'б'
+ 10: 3, # 'в'
+ 19: 3, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 3, # 'ж'
+ 20: 3, # 'з'
+ 4: 2, # 'и'
+ 23: 3, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 3, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 2, # 'у'
+ 39: 2, # 'ф'
+ 26: 3, # 'х'
+ 28: 3, # 'ц'
+ 22: 3, # 'ч'
+ 25: 3, # 'ш'
+ 29: 3, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 1, # 'э'
+ 27: 2, # 'ю'
+ 16: 3, # 'я'
+ },
+ 24: { # 'ж'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 2, # 'б'
+ 10: 1, # 'в'
+ 19: 2, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 2, # 'ж'
+ 20: 1, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 2, # 'к'
+ 8: 2, # 'л'
+ 12: 1, # 'м'
+ 5: 3, # 'н'
+ 1: 2, # 'о'
+ 15: 1, # 'п'
+ 9: 2, # 'р'
+ 7: 2, # 'с'
+ 6: 1, # 'т'
+ 14: 3, # 'у'
+ 39: 1, # 'ф'
+ 26: 0, # 'х'
+ 28: 1, # 'ц'
+ 22: 2, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 2, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 1, # 'я'
+ },
+ 20: { # 'з'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 3, # 'б'
+ 10: 3, # 'в'
+ 19: 3, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 2, # 'ж'
+ 20: 2, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 3, # 'р'
+ 7: 2, # 'с'
+ 6: 2, # 'т'
+ 14: 3, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 1, # 'ц'
+ 22: 2, # 'ч'
+ 25: 1, # 'ш'
+ 29: 0, # 'щ'
+ 54: 2, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 2, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 3, # 'я'
+ },
+ 4: { # 'и'
+ 37: 1, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 1, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 3, # 'б'
+ 10: 3, # 'в'
+ 19: 3, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 3, # 'ж'
+ 20: 3, # 'з'
+ 4: 3, # 'и'
+ 23: 3, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 3, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 2, # 'у'
+ 39: 2, # 'ф'
+ 26: 3, # 'х'
+ 28: 3, # 'ц'
+ 22: 3, # 'ч'
+ 25: 3, # 'ш'
+ 29: 3, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 2, # 'э'
+ 27: 3, # 'ю'
+ 16: 3, # 'я'
+ },
+ 23: { # 'й'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 1, # 'а'
+ 21: 1, # 'б'
+ 10: 1, # 'в'
+ 19: 2, # 'г'
+ 13: 3, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 2, # 'з'
+ 4: 1, # 'и'
+ 23: 0, # 'й'
+ 11: 2, # 'к'
+ 8: 2, # 'л'
+ 12: 2, # 'м'
+ 5: 3, # 'н'
+ 1: 2, # 'о'
+ 15: 1, # 'п'
+ 9: 2, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 1, # 'у'
+ 39: 2, # 'ф'
+ 26: 1, # 'х'
+ 28: 2, # 'ц'
+ 22: 3, # 'ч'
+ 25: 2, # 'ш'
+ 29: 1, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 2, # 'я'
+ },
+ 11: { # 'к'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 1, # 'б'
+ 10: 3, # 'в'
+ 19: 1, # 'г'
+ 13: 1, # 'д'
+ 2: 3, # 'е'
+ 24: 2, # 'ж'
+ 20: 2, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 2, # 'к'
+ 8: 3, # 'л'
+ 12: 1, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 3, # 'у'
+ 39: 1, # 'ф'
+ 26: 2, # 'х'
+ 28: 2, # 'ц'
+ 22: 1, # 'ч'
+ 25: 2, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 1, # 'ы'
+ 17: 1, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 1, # 'я'
+ },
+ 8: { # 'л'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 2, # 'б'
+ 10: 2, # 'в'
+ 19: 3, # 'г'
+ 13: 2, # 'д'
+ 2: 3, # 'е'
+ 24: 3, # 'ж'
+ 20: 2, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 2, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 2, # 'п'
+ 9: 1, # 'р'
+ 7: 3, # 'с'
+ 6: 2, # 'т'
+ 14: 3, # 'у'
+ 39: 2, # 'ф'
+ 26: 2, # 'х'
+ 28: 1, # 'ц'
+ 22: 3, # 'ч'
+ 25: 2, # 'ш'
+ 29: 1, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 3, # 'ь'
+ 30: 1, # 'э'
+ 27: 3, # 'ю'
+ 16: 3, # 'я'
+ },
+ 12: { # 'м'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 2, # 'б'
+ 10: 2, # 'в'
+ 19: 2, # 'г'
+ 13: 1, # 'д'
+ 2: 3, # 'е'
+ 24: 1, # 'ж'
+ 20: 1, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 2, # 'к'
+ 8: 3, # 'л'
+ 12: 2, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 2, # 'п'
+ 9: 2, # 'р'
+ 7: 3, # 'с'
+ 6: 2, # 'т'
+ 14: 3, # 'у'
+ 39: 2, # 'ф'
+ 26: 2, # 'х'
+ 28: 2, # 'ц'
+ 22: 2, # 'ч'
+ 25: 1, # 'ш'
+ 29: 1, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 2, # 'ь'
+ 30: 2, # 'э'
+ 27: 1, # 'ю'
+ 16: 3, # 'я'
+ },
+ 5: { # 'н'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 2, # 'б'
+ 10: 2, # 'в'
+ 19: 3, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 2, # 'ж'
+ 20: 2, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 2, # 'л'
+ 12: 1, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 1, # 'п'
+ 9: 2, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 3, # 'у'
+ 39: 2, # 'ф'
+ 26: 2, # 'х'
+ 28: 3, # 'ц'
+ 22: 3, # 'ч'
+ 25: 2, # 'ш'
+ 29: 2, # 'щ'
+ 54: 1, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 3, # 'ь'
+ 30: 1, # 'э'
+ 27: 3, # 'ю'
+ 16: 3, # 'я'
+ },
+ 1: { # 'о'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 3, # 'б'
+ 10: 3, # 'в'
+ 19: 3, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 3, # 'ж'
+ 20: 3, # 'з'
+ 4: 3, # 'и'
+ 23: 3, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 3, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 2, # 'у'
+ 39: 2, # 'ф'
+ 26: 3, # 'х'
+ 28: 2, # 'ц'
+ 22: 3, # 'ч'
+ 25: 3, # 'ш'
+ 29: 3, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 2, # 'э'
+ 27: 3, # 'ю'
+ 16: 3, # 'я'
+ },
+ 15: { # 'п'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 1, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 3, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 2, # 'к'
+ 8: 3, # 'л'
+ 12: 1, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 2, # 'п'
+ 9: 3, # 'р'
+ 7: 2, # 'с'
+ 6: 2, # 'т'
+ 14: 3, # 'у'
+ 39: 1, # 'ф'
+ 26: 0, # 'х'
+ 28: 2, # 'ц'
+ 22: 2, # 'ч'
+ 25: 1, # 'ш'
+ 29: 1, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 2, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 3, # 'я'
+ },
+ 9: { # 'р'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 2, # 'б'
+ 10: 3, # 'в'
+ 19: 3, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 3, # 'ж'
+ 20: 2, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 2, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 2, # 'п'
+ 9: 2, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 3, # 'у'
+ 39: 2, # 'ф'
+ 26: 3, # 'х'
+ 28: 2, # 'ц'
+ 22: 2, # 'ч'
+ 25: 3, # 'ш'
+ 29: 2, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 3, # 'ь'
+ 30: 2, # 'э'
+ 27: 2, # 'ю'
+ 16: 3, # 'я'
+ },
+ 7: { # 'с'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 1, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 2, # 'б'
+ 10: 3, # 'в'
+ 19: 2, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 2, # 'ж'
+ 20: 2, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 3, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 3, # 'у'
+ 39: 2, # 'ф'
+ 26: 3, # 'х'
+ 28: 2, # 'ц'
+ 22: 3, # 'ч'
+ 25: 2, # 'ш'
+ 29: 1, # 'щ'
+ 54: 2, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 3, # 'ь'
+ 30: 2, # 'э'
+ 27: 3, # 'ю'
+ 16: 3, # 'я'
+ },
+ 6: { # 'т'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 2, # 'б'
+ 10: 3, # 'в'
+ 19: 2, # 'г'
+ 13: 2, # 'д'
+ 2: 3, # 'е'
+ 24: 1, # 'ж'
+ 20: 1, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 2, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 2, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 2, # 'т'
+ 14: 3, # 'у'
+ 39: 2, # 'ф'
+ 26: 2, # 'х'
+ 28: 2, # 'ц'
+ 22: 2, # 'ч'
+ 25: 2, # 'ш'
+ 29: 2, # 'щ'
+ 54: 2, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 3, # 'ь'
+ 30: 2, # 'э'
+ 27: 2, # 'ю'
+ 16: 3, # 'я'
+ },
+ 14: { # 'у'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 3, # 'б'
+ 10: 3, # 'в'
+ 19: 3, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 3, # 'ж'
+ 20: 3, # 'з'
+ 4: 2, # 'и'
+ 23: 2, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 2, # 'о'
+ 15: 3, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 1, # 'у'
+ 39: 2, # 'ф'
+ 26: 3, # 'х'
+ 28: 2, # 'ц'
+ 22: 3, # 'ч'
+ 25: 3, # 'ш'
+ 29: 3, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 2, # 'э'
+ 27: 3, # 'ю'
+ 16: 2, # 'я'
+ },
+ 39: { # 'ф'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 1, # 'б'
+ 10: 0, # 'в'
+ 19: 1, # 'г'
+ 13: 0, # 'д'
+ 2: 3, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 1, # 'к'
+ 8: 2, # 'л'
+ 12: 1, # 'м'
+ 5: 1, # 'н'
+ 1: 3, # 'о'
+ 15: 1, # 'п'
+ 9: 2, # 'р'
+ 7: 2, # 'с'
+ 6: 2, # 'т'
+ 14: 2, # 'у'
+ 39: 2, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 1, # 'ч'
+ 25: 1, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 2, # 'ы'
+ 17: 1, # 'ь'
+ 30: 2, # 'э'
+ 27: 1, # 'ю'
+ 16: 1, # 'я'
+ },
+ 26: { # 'х'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 0, # 'б'
+ 10: 3, # 'в'
+ 19: 1, # 'г'
+ 13: 1, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 1, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 1, # 'к'
+ 8: 2, # 'л'
+ 12: 2, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 1, # 'п'
+ 9: 3, # 'р'
+ 7: 2, # 'с'
+ 6: 2, # 'т'
+ 14: 2, # 'у'
+ 39: 1, # 'ф'
+ 26: 1, # 'х'
+ 28: 1, # 'ц'
+ 22: 1, # 'ч'
+ 25: 2, # 'ш'
+ 29: 0, # 'щ'
+ 54: 1, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 1, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 0, # 'я'
+ },
+ 28: { # 'ц'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 1, # 'б'
+ 10: 2, # 'в'
+ 19: 1, # 'г'
+ 13: 1, # 'д'
+ 2: 3, # 'е'
+ 24: 0, # 'ж'
+ 20: 1, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 2, # 'к'
+ 8: 1, # 'л'
+ 12: 1, # 'м'
+ 5: 1, # 'н'
+ 1: 3, # 'о'
+ 15: 0, # 'п'
+ 9: 1, # 'р'
+ 7: 0, # 'с'
+ 6: 1, # 'т'
+ 14: 3, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 1, # 'ц'
+ 22: 0, # 'ч'
+ 25: 1, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 3, # 'ы'
+ 17: 1, # 'ь'
+ 30: 0, # 'э'
+ 27: 1, # 'ю'
+ 16: 0, # 'я'
+ },
+ 22: { # 'ч'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 1, # 'б'
+ 10: 1, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 3, # 'е'
+ 24: 1, # 'ж'
+ 20: 0, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 2, # 'л'
+ 12: 1, # 'м'
+ 5: 3, # 'н'
+ 1: 2, # 'о'
+ 15: 0, # 'п'
+ 9: 2, # 'р'
+ 7: 1, # 'с'
+ 6: 3, # 'т'
+ 14: 3, # 'у'
+ 39: 1, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 1, # 'ч'
+ 25: 2, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 3, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 25: { # 'ш'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 1, # 'б'
+ 10: 2, # 'в'
+ 19: 1, # 'г'
+ 13: 0, # 'д'
+ 2: 3, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 2, # 'м'
+ 5: 3, # 'н'
+ 1: 3, # 'о'
+ 15: 2, # 'п'
+ 9: 2, # 'р'
+ 7: 1, # 'с'
+ 6: 2, # 'т'
+ 14: 3, # 'у'
+ 39: 2, # 'ф'
+ 26: 1, # 'х'
+ 28: 1, # 'ц'
+ 22: 1, # 'ч'
+ 25: 1, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 3, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 0, # 'я'
+ },
+ 29: { # 'щ'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 3, # 'а'
+ 21: 0, # 'б'
+ 10: 1, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 3, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 3, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 0, # 'л'
+ 12: 1, # 'м'
+ 5: 2, # 'н'
+ 1: 1, # 'о'
+ 15: 0, # 'п'
+ 9: 2, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 2, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 2, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 0, # 'я'
+ },
+ 54: { # 'ъ'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 0, # 'а'
+ 21: 0, # 'б'
+ 10: 0, # 'в'
+ 19: 0, # 'г'
+ 13: 0, # 'д'
+ 2: 2, # 'е'
+ 24: 0, # 'ж'
+ 20: 0, # 'з'
+ 4: 0, # 'и'
+ 23: 0, # 'й'
+ 11: 0, # 'к'
+ 8: 0, # 'л'
+ 12: 0, # 'м'
+ 5: 0, # 'н'
+ 1: 0, # 'о'
+ 15: 0, # 'п'
+ 9: 0, # 'р'
+ 7: 0, # 'с'
+ 6: 0, # 'т'
+ 14: 0, # 'у'
+ 39: 0, # 'ф'
+ 26: 0, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 0, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 1, # 'ю'
+ 16: 2, # 'я'
+ },
+ 18: { # 'ы'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 0, # 'а'
+ 21: 3, # 'б'
+ 10: 3, # 'в'
+ 19: 2, # 'г'
+ 13: 2, # 'д'
+ 2: 3, # 'е'
+ 24: 2, # 'ж'
+ 20: 2, # 'з'
+ 4: 2, # 'и'
+ 23: 3, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 1, # 'о'
+ 15: 3, # 'п'
+ 9: 3, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 1, # 'у'
+ 39: 0, # 'ф'
+ 26: 3, # 'х'
+ 28: 2, # 'ц'
+ 22: 3, # 'ч'
+ 25: 3, # 'ш'
+ 29: 2, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 0, # 'ю'
+ 16: 2, # 'я'
+ },
+ 17: { # 'ь'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 0, # 'а'
+ 21: 2, # 'б'
+ 10: 2, # 'в'
+ 19: 2, # 'г'
+ 13: 2, # 'д'
+ 2: 3, # 'е'
+ 24: 1, # 'ж'
+ 20: 3, # 'з'
+ 4: 2, # 'и'
+ 23: 0, # 'й'
+ 11: 3, # 'к'
+ 8: 0, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 2, # 'о'
+ 15: 2, # 'п'
+ 9: 1, # 'р'
+ 7: 3, # 'с'
+ 6: 2, # 'т'
+ 14: 0, # 'у'
+ 39: 2, # 'ф'
+ 26: 1, # 'х'
+ 28: 2, # 'ц'
+ 22: 2, # 'ч'
+ 25: 3, # 'ш'
+ 29: 2, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 1, # 'э'
+ 27: 3, # 'ю'
+ 16: 3, # 'я'
+ },
+ 30: { # 'э'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 1, # 'М'
+ 31: 1, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 1, # 'Р'
+ 32: 1, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 1, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 0, # 'а'
+ 21: 1, # 'б'
+ 10: 1, # 'в'
+ 19: 1, # 'г'
+ 13: 2, # 'д'
+ 2: 1, # 'е'
+ 24: 0, # 'ж'
+ 20: 1, # 'з'
+ 4: 0, # 'и'
+ 23: 2, # 'й'
+ 11: 2, # 'к'
+ 8: 2, # 'л'
+ 12: 2, # 'м'
+ 5: 2, # 'н'
+ 1: 0, # 'о'
+ 15: 2, # 'п'
+ 9: 2, # 'р'
+ 7: 2, # 'с'
+ 6: 3, # 'т'
+ 14: 1, # 'у'
+ 39: 2, # 'ф'
+ 26: 1, # 'х'
+ 28: 0, # 'ц'
+ 22: 0, # 'ч'
+ 25: 1, # 'ш'
+ 29: 0, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 1, # 'э'
+ 27: 1, # 'ю'
+ 16: 1, # 'я'
+ },
+ 27: { # 'ю'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 2, # 'а'
+ 21: 3, # 'б'
+ 10: 1, # 'в'
+ 19: 2, # 'г'
+ 13: 3, # 'д'
+ 2: 1, # 'е'
+ 24: 2, # 'ж'
+ 20: 2, # 'з'
+ 4: 1, # 'и'
+ 23: 1, # 'й'
+ 11: 2, # 'к'
+ 8: 2, # 'л'
+ 12: 2, # 'м'
+ 5: 2, # 'н'
+ 1: 1, # 'о'
+ 15: 2, # 'п'
+ 9: 2, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 0, # 'у'
+ 39: 1, # 'ф'
+ 26: 2, # 'х'
+ 28: 2, # 'ц'
+ 22: 2, # 'ч'
+ 25: 2, # 'ш'
+ 29: 3, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 1, # 'э'
+ 27: 2, # 'ю'
+ 16: 1, # 'я'
+ },
+ 16: { # 'я'
+ 37: 0, # 'А'
+ 44: 0, # 'Б'
+ 33: 0, # 'В'
+ 46: 0, # 'Г'
+ 41: 0, # 'Д'
+ 48: 0, # 'Е'
+ 56: 0, # 'Ж'
+ 51: 0, # 'З'
+ 42: 0, # 'И'
+ 60: 0, # 'Й'
+ 36: 0, # 'К'
+ 49: 0, # 'Л'
+ 38: 0, # 'М'
+ 31: 0, # 'Н'
+ 34: 0, # 'О'
+ 35: 0, # 'П'
+ 45: 0, # 'Р'
+ 32: 0, # 'С'
+ 40: 0, # 'Т'
+ 52: 0, # 'У'
+ 53: 0, # 'Ф'
+ 55: 0, # 'Х'
+ 58: 0, # 'Ц'
+ 50: 0, # 'Ч'
+ 57: 0, # 'Ш'
+ 63: 0, # 'Щ'
+ 62: 0, # 'Ы'
+ 61: 0, # 'Ь'
+ 47: 0, # 'Э'
+ 59: 0, # 'Ю'
+ 43: 0, # 'Я'
+ 3: 0, # 'а'
+ 21: 2, # 'б'
+ 10: 3, # 'в'
+ 19: 2, # 'г'
+ 13: 3, # 'д'
+ 2: 3, # 'е'
+ 24: 3, # 'ж'
+ 20: 3, # 'з'
+ 4: 2, # 'и'
+ 23: 2, # 'й'
+ 11: 3, # 'к'
+ 8: 3, # 'л'
+ 12: 3, # 'м'
+ 5: 3, # 'н'
+ 1: 0, # 'о'
+ 15: 2, # 'п'
+ 9: 2, # 'р'
+ 7: 3, # 'с'
+ 6: 3, # 'т'
+ 14: 1, # 'у'
+ 39: 1, # 'ф'
+ 26: 3, # 'х'
+ 28: 2, # 'ц'
+ 22: 2, # 'ч'
+ 25: 2, # 'ш'
+ 29: 3, # 'щ'
+ 54: 0, # 'ъ'
+ 18: 0, # 'ы'
+ 17: 0, # 'ь'
+ 30: 0, # 'э'
+ 27: 2, # 'ю'
+ 16: 2, # 'я'
+ },
+}
+
+# 255: Undefined characters that did not exist in training text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+# 251: Control characters
+
+# Character Mapping Table(s):
+IBM866_RUSSIAN_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 142, # 'A'
+ 66: 143, # 'B'
+ 67: 144, # 'C'
+ 68: 145, # 'D'
+ 69: 146, # 'E'
+ 70: 147, # 'F'
+ 71: 148, # 'G'
+ 72: 149, # 'H'
+ 73: 150, # 'I'
+ 74: 151, # 'J'
+ 75: 152, # 'K'
+ 76: 74, # 'L'
+ 77: 153, # 'M'
+ 78: 75, # 'N'
+ 79: 154, # 'O'
+ 80: 155, # 'P'
+ 81: 156, # 'Q'
+ 82: 157, # 'R'
+ 83: 158, # 'S'
+ 84: 159, # 'T'
+ 85: 160, # 'U'
+ 86: 161, # 'V'
+ 87: 162, # 'W'
+ 88: 163, # 'X'
+ 89: 164, # 'Y'
+ 90: 165, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 71, # 'a'
+ 98: 172, # 'b'
+ 99: 66, # 'c'
+ 100: 173, # 'd'
+ 101: 65, # 'e'
+ 102: 174, # 'f'
+ 103: 76, # 'g'
+ 104: 175, # 'h'
+ 105: 64, # 'i'
+ 106: 176, # 'j'
+ 107: 177, # 'k'
+ 108: 77, # 'l'
+ 109: 72, # 'm'
+ 110: 178, # 'n'
+ 111: 69, # 'o'
+ 112: 67, # 'p'
+ 113: 179, # 'q'
+ 114: 78, # 'r'
+ 115: 73, # 's'
+ 116: 180, # 't'
+ 117: 181, # 'u'
+ 118: 79, # 'v'
+ 119: 182, # 'w'
+ 120: 183, # 'x'
+ 121: 184, # 'y'
+ 122: 185, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 37, # 'А'
+ 129: 44, # 'Б'
+ 130: 33, # 'В'
+ 131: 46, # 'Г'
+ 132: 41, # 'Д'
+ 133: 48, # 'Е'
+ 134: 56, # 'Ж'
+ 135: 51, # 'З'
+ 136: 42, # 'И'
+ 137: 60, # 'Й'
+ 138: 36, # 'К'
+ 139: 49, # 'Л'
+ 140: 38, # 'М'
+ 141: 31, # 'Н'
+ 142: 34, # 'О'
+ 143: 35, # 'П'
+ 144: 45, # 'Р'
+ 145: 32, # 'С'
+ 146: 40, # 'Т'
+ 147: 52, # 'У'
+ 148: 53, # 'Ф'
+ 149: 55, # 'Х'
+ 150: 58, # 'Ц'
+ 151: 50, # 'Ч'
+ 152: 57, # 'Ш'
+ 153: 63, # 'Щ'
+ 154: 70, # 'Ъ'
+ 155: 62, # 'Ы'
+ 156: 61, # 'Ь'
+ 157: 47, # 'Э'
+ 158: 59, # 'Ю'
+ 159: 43, # 'Я'
+ 160: 3, # 'а'
+ 161: 21, # 'б'
+ 162: 10, # 'в'
+ 163: 19, # 'г'
+ 164: 13, # 'д'
+ 165: 2, # 'е'
+ 166: 24, # 'ж'
+ 167: 20, # 'з'
+ 168: 4, # 'и'
+ 169: 23, # 'й'
+ 170: 11, # 'к'
+ 171: 8, # 'л'
+ 172: 12, # 'м'
+ 173: 5, # 'н'
+ 174: 1, # 'о'
+ 175: 15, # 'п'
+ 176: 191, # '░'
+ 177: 192, # '▒'
+ 178: 193, # '▓'
+ 179: 194, # '│'
+ 180: 195, # '┤'
+ 181: 196, # '╡'
+ 182: 197, # '╢'
+ 183: 198, # '╖'
+ 184: 199, # '╕'
+ 185: 200, # '╣'
+ 186: 201, # '║'
+ 187: 202, # '╗'
+ 188: 203, # '╝'
+ 189: 204, # '╜'
+ 190: 205, # '╛'
+ 191: 206, # '┐'
+ 192: 207, # '└'
+ 193: 208, # '┴'
+ 194: 209, # '┬'
+ 195: 210, # '├'
+ 196: 211, # '─'
+ 197: 212, # '┼'
+ 198: 213, # '╞'
+ 199: 214, # '╟'
+ 200: 215, # '╚'
+ 201: 216, # '╔'
+ 202: 217, # '╩'
+ 203: 218, # '╦'
+ 204: 219, # '╠'
+ 205: 220, # '═'
+ 206: 221, # '╬'
+ 207: 222, # '╧'
+ 208: 223, # '╨'
+ 209: 224, # '╤'
+ 210: 225, # '╥'
+ 211: 226, # '╙'
+ 212: 227, # '╘'
+ 213: 228, # '╒'
+ 214: 229, # '╓'
+ 215: 230, # '╫'
+ 216: 231, # '╪'
+ 217: 232, # '┘'
+ 218: 233, # '┌'
+ 219: 234, # '█'
+ 220: 235, # '▄'
+ 221: 236, # '▌'
+ 222: 237, # '▐'
+ 223: 238, # '▀'
+ 224: 9, # 'р'
+ 225: 7, # 'с'
+ 226: 6, # 'т'
+ 227: 14, # 'у'
+ 228: 39, # 'ф'
+ 229: 26, # 'х'
+ 230: 28, # 'ц'
+ 231: 22, # 'ч'
+ 232: 25, # 'ш'
+ 233: 29, # 'щ'
+ 234: 54, # 'ъ'
+ 235: 18, # 'ы'
+ 236: 17, # 'ь'
+ 237: 30, # 'э'
+ 238: 27, # 'ю'
+ 239: 16, # 'я'
+ 240: 239, # 'Ё'
+ 241: 68, # 'ё'
+ 242: 240, # 'Є'
+ 243: 241, # 'є'
+ 244: 242, # 'Ї'
+ 245: 243, # 'ї'
+ 246: 244, # 'Ў'
+ 247: 245, # 'ў'
+ 248: 246, # '°'
+ 249: 247, # '∙'
+ 250: 248, # '·'
+ 251: 249, # '√'
+ 252: 250, # '№'
+ 253: 251, # '¤'
+ 254: 252, # '■'
+ 255: 255, # '\xa0'
+}
+
+IBM866_RUSSIAN_MODEL = SingleByteCharSetModel(
+ charset_name="IBM866",
+ language="Russian",
+ char_to_order_map=IBM866_RUSSIAN_CHAR_TO_ORDER,
+ language_model=RUSSIAN_LANG_MODEL,
+ typical_positive_ratio=0.976601,
+ keep_ascii_letters=False,
+ alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
+)
+
+WINDOWS_1251_RUSSIAN_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 142, # 'A'
+ 66: 143, # 'B'
+ 67: 144, # 'C'
+ 68: 145, # 'D'
+ 69: 146, # 'E'
+ 70: 147, # 'F'
+ 71: 148, # 'G'
+ 72: 149, # 'H'
+ 73: 150, # 'I'
+ 74: 151, # 'J'
+ 75: 152, # 'K'
+ 76: 74, # 'L'
+ 77: 153, # 'M'
+ 78: 75, # 'N'
+ 79: 154, # 'O'
+ 80: 155, # 'P'
+ 81: 156, # 'Q'
+ 82: 157, # 'R'
+ 83: 158, # 'S'
+ 84: 159, # 'T'
+ 85: 160, # 'U'
+ 86: 161, # 'V'
+ 87: 162, # 'W'
+ 88: 163, # 'X'
+ 89: 164, # 'Y'
+ 90: 165, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 71, # 'a'
+ 98: 172, # 'b'
+ 99: 66, # 'c'
+ 100: 173, # 'd'
+ 101: 65, # 'e'
+ 102: 174, # 'f'
+ 103: 76, # 'g'
+ 104: 175, # 'h'
+ 105: 64, # 'i'
+ 106: 176, # 'j'
+ 107: 177, # 'k'
+ 108: 77, # 'l'
+ 109: 72, # 'm'
+ 110: 178, # 'n'
+ 111: 69, # 'o'
+ 112: 67, # 'p'
+ 113: 179, # 'q'
+ 114: 78, # 'r'
+ 115: 73, # 's'
+ 116: 180, # 't'
+ 117: 181, # 'u'
+ 118: 79, # 'v'
+ 119: 182, # 'w'
+ 120: 183, # 'x'
+ 121: 184, # 'y'
+ 122: 185, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 191, # 'Ђ'
+ 129: 192, # 'Ѓ'
+ 130: 193, # '‚'
+ 131: 194, # 'ѓ'
+ 132: 195, # '„'
+ 133: 196, # '…'
+ 134: 197, # '†'
+ 135: 198, # '‡'
+ 136: 199, # '€'
+ 137: 200, # '‰'
+ 138: 201, # 'Љ'
+ 139: 202, # '‹'
+ 140: 203, # 'Њ'
+ 141: 204, # 'Ќ'
+ 142: 205, # 'Ћ'
+ 143: 206, # 'Џ'
+ 144: 207, # 'ђ'
+ 145: 208, # '‘'
+ 146: 209, # '’'
+ 147: 210, # '“'
+ 148: 211, # '”'
+ 149: 212, # '•'
+ 150: 213, # '–'
+ 151: 214, # '—'
+ 152: 215, # None
+ 153: 216, # '™'
+ 154: 217, # 'љ'
+ 155: 218, # '›'
+ 156: 219, # 'њ'
+ 157: 220, # 'ќ'
+ 158: 221, # 'ћ'
+ 159: 222, # 'џ'
+ 160: 223, # '\xa0'
+ 161: 224, # 'Ў'
+ 162: 225, # 'ў'
+ 163: 226, # 'Ј'
+ 164: 227, # '¤'
+ 165: 228, # 'Ґ'
+ 166: 229, # '¦'
+ 167: 230, # '§'
+ 168: 231, # 'Ё'
+ 169: 232, # '©'
+ 170: 233, # 'Є'
+ 171: 234, # '«'
+ 172: 235, # '¬'
+ 173: 236, # '\xad'
+ 174: 237, # '®'
+ 175: 238, # 'Ї'
+ 176: 239, # '°'
+ 177: 240, # '±'
+ 178: 241, # 'І'
+ 179: 242, # 'і'
+ 180: 243, # 'ґ'
+ 181: 244, # 'µ'
+ 182: 245, # '¶'
+ 183: 246, # '·'
+ 184: 68, # 'ё'
+ 185: 247, # '№'
+ 186: 248, # 'є'
+ 187: 249, # '»'
+ 188: 250, # 'ј'
+ 189: 251, # 'Ѕ'
+ 190: 252, # 'ѕ'
+ 191: 253, # 'ї'
+ 192: 37, # 'А'
+ 193: 44, # 'Б'
+ 194: 33, # 'В'
+ 195: 46, # 'Г'
+ 196: 41, # 'Д'
+ 197: 48, # 'Е'
+ 198: 56, # 'Ж'
+ 199: 51, # 'З'
+ 200: 42, # 'И'
+ 201: 60, # 'Й'
+ 202: 36, # 'К'
+ 203: 49, # 'Л'
+ 204: 38, # 'М'
+ 205: 31, # 'Н'
+ 206: 34, # 'О'
+ 207: 35, # 'П'
+ 208: 45, # 'Р'
+ 209: 32, # 'С'
+ 210: 40, # 'Т'
+ 211: 52, # 'У'
+ 212: 53, # 'Ф'
+ 213: 55, # 'Х'
+ 214: 58, # 'Ц'
+ 215: 50, # 'Ч'
+ 216: 57, # 'Ш'
+ 217: 63, # 'Щ'
+ 218: 70, # 'Ъ'
+ 219: 62, # 'Ы'
+ 220: 61, # 'Ь'
+ 221: 47, # 'Э'
+ 222: 59, # 'Ю'
+ 223: 43, # 'Я'
+ 224: 3, # 'а'
+ 225: 21, # 'б'
+ 226: 10, # 'в'
+ 227: 19, # 'г'
+ 228: 13, # 'д'
+ 229: 2, # 'е'
+ 230: 24, # 'ж'
+ 231: 20, # 'з'
+ 232: 4, # 'и'
+ 233: 23, # 'й'
+ 234: 11, # 'к'
+ 235: 8, # 'л'
+ 236: 12, # 'м'
+ 237: 5, # 'н'
+ 238: 1, # 'о'
+ 239: 15, # 'п'
+ 240: 9, # 'р'
+ 241: 7, # 'с'
+ 242: 6, # 'т'
+ 243: 14, # 'у'
+ 244: 39, # 'ф'
+ 245: 26, # 'х'
+ 246: 28, # 'ц'
+ 247: 22, # 'ч'
+ 248: 25, # 'ш'
+ 249: 29, # 'щ'
+ 250: 54, # 'ъ'
+ 251: 18, # 'ы'
+ 252: 17, # 'ь'
+ 253: 30, # 'э'
+ 254: 27, # 'ю'
+ 255: 16, # 'я'
+}
+
+WINDOWS_1251_RUSSIAN_MODEL = SingleByteCharSetModel(
+ charset_name="windows-1251",
+ language="Russian",
+ char_to_order_map=WINDOWS_1251_RUSSIAN_CHAR_TO_ORDER,
+ language_model=RUSSIAN_LANG_MODEL,
+ typical_positive_ratio=0.976601,
+ keep_ascii_letters=False,
+ alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
+)
+
+IBM855_RUSSIAN_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 142, # 'A'
+ 66: 143, # 'B'
+ 67: 144, # 'C'
+ 68: 145, # 'D'
+ 69: 146, # 'E'
+ 70: 147, # 'F'
+ 71: 148, # 'G'
+ 72: 149, # 'H'
+ 73: 150, # 'I'
+ 74: 151, # 'J'
+ 75: 152, # 'K'
+ 76: 74, # 'L'
+ 77: 153, # 'M'
+ 78: 75, # 'N'
+ 79: 154, # 'O'
+ 80: 155, # 'P'
+ 81: 156, # 'Q'
+ 82: 157, # 'R'
+ 83: 158, # 'S'
+ 84: 159, # 'T'
+ 85: 160, # 'U'
+ 86: 161, # 'V'
+ 87: 162, # 'W'
+ 88: 163, # 'X'
+ 89: 164, # 'Y'
+ 90: 165, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 71, # 'a'
+ 98: 172, # 'b'
+ 99: 66, # 'c'
+ 100: 173, # 'd'
+ 101: 65, # 'e'
+ 102: 174, # 'f'
+ 103: 76, # 'g'
+ 104: 175, # 'h'
+ 105: 64, # 'i'
+ 106: 176, # 'j'
+ 107: 177, # 'k'
+ 108: 77, # 'l'
+ 109: 72, # 'm'
+ 110: 178, # 'n'
+ 111: 69, # 'o'
+ 112: 67, # 'p'
+ 113: 179, # 'q'
+ 114: 78, # 'r'
+ 115: 73, # 's'
+ 116: 180, # 't'
+ 117: 181, # 'u'
+ 118: 79, # 'v'
+ 119: 182, # 'w'
+ 120: 183, # 'x'
+ 121: 184, # 'y'
+ 122: 185, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 191, # 'ђ'
+ 129: 192, # 'Ђ'
+ 130: 193, # 'ѓ'
+ 131: 194, # 'Ѓ'
+ 132: 68, # 'ё'
+ 133: 195, # 'Ё'
+ 134: 196, # 'є'
+ 135: 197, # 'Є'
+ 136: 198, # 'ѕ'
+ 137: 199, # 'Ѕ'
+ 138: 200, # 'і'
+ 139: 201, # 'І'
+ 140: 202, # 'ї'
+ 141: 203, # 'Ї'
+ 142: 204, # 'ј'
+ 143: 205, # 'Ј'
+ 144: 206, # 'љ'
+ 145: 207, # 'Љ'
+ 146: 208, # 'њ'
+ 147: 209, # 'Њ'
+ 148: 210, # 'ћ'
+ 149: 211, # 'Ћ'
+ 150: 212, # 'ќ'
+ 151: 213, # 'Ќ'
+ 152: 214, # 'ў'
+ 153: 215, # 'Ў'
+ 154: 216, # 'џ'
+ 155: 217, # 'Џ'
+ 156: 27, # 'ю'
+ 157: 59, # 'Ю'
+ 158: 54, # 'ъ'
+ 159: 70, # 'Ъ'
+ 160: 3, # 'а'
+ 161: 37, # 'А'
+ 162: 21, # 'б'
+ 163: 44, # 'Б'
+ 164: 28, # 'ц'
+ 165: 58, # 'Ц'
+ 166: 13, # 'д'
+ 167: 41, # 'Д'
+ 168: 2, # 'е'
+ 169: 48, # 'Е'
+ 170: 39, # 'ф'
+ 171: 53, # 'Ф'
+ 172: 19, # 'г'
+ 173: 46, # 'Г'
+ 174: 218, # '«'
+ 175: 219, # '»'
+ 176: 220, # '░'
+ 177: 221, # '▒'
+ 178: 222, # '▓'
+ 179: 223, # '│'
+ 180: 224, # '┤'
+ 181: 26, # 'х'
+ 182: 55, # 'Х'
+ 183: 4, # 'и'
+ 184: 42, # 'И'
+ 185: 225, # '╣'
+ 186: 226, # '║'
+ 187: 227, # '╗'
+ 188: 228, # '╝'
+ 189: 23, # 'й'
+ 190: 60, # 'Й'
+ 191: 229, # '┐'
+ 192: 230, # '└'
+ 193: 231, # '┴'
+ 194: 232, # '┬'
+ 195: 233, # '├'
+ 196: 234, # '─'
+ 197: 235, # '┼'
+ 198: 11, # 'к'
+ 199: 36, # 'К'
+ 200: 236, # '╚'
+ 201: 237, # '╔'
+ 202: 238, # '╩'
+ 203: 239, # '╦'
+ 204: 240, # '╠'
+ 205: 241, # '═'
+ 206: 242, # '╬'
+ 207: 243, # '¤'
+ 208: 8, # 'л'
+ 209: 49, # 'Л'
+ 210: 12, # 'м'
+ 211: 38, # 'М'
+ 212: 5, # 'н'
+ 213: 31, # 'Н'
+ 214: 1, # 'о'
+ 215: 34, # 'О'
+ 216: 15, # 'п'
+ 217: 244, # '┘'
+ 218: 245, # '┌'
+ 219: 246, # '█'
+ 220: 247, # '▄'
+ 221: 35, # 'П'
+ 222: 16, # 'я'
+ 223: 248, # '▀'
+ 224: 43, # 'Я'
+ 225: 9, # 'р'
+ 226: 45, # 'Р'
+ 227: 7, # 'с'
+ 228: 32, # 'С'
+ 229: 6, # 'т'
+ 230: 40, # 'Т'
+ 231: 14, # 'у'
+ 232: 52, # 'У'
+ 233: 24, # 'ж'
+ 234: 56, # 'Ж'
+ 235: 10, # 'в'
+ 236: 33, # 'В'
+ 237: 17, # 'ь'
+ 238: 61, # 'Ь'
+ 239: 249, # '№'
+ 240: 250, # '\xad'
+ 241: 18, # 'ы'
+ 242: 62, # 'Ы'
+ 243: 20, # 'з'
+ 244: 51, # 'З'
+ 245: 25, # 'ш'
+ 246: 57, # 'Ш'
+ 247: 30, # 'э'
+ 248: 47, # 'Э'
+ 249: 29, # 'щ'
+ 250: 63, # 'Щ'
+ 251: 22, # 'ч'
+ 252: 50, # 'Ч'
+ 253: 251, # '§'
+ 254: 252, # '■'
+ 255: 255, # '\xa0'
+}
+
+IBM855_RUSSIAN_MODEL = SingleByteCharSetModel(
+ charset_name="IBM855",
+ language="Russian",
+ char_to_order_map=IBM855_RUSSIAN_CHAR_TO_ORDER,
+ language_model=RUSSIAN_LANG_MODEL,
+ typical_positive_ratio=0.976601,
+ keep_ascii_letters=False,
+ alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
+)
+
+KOI8_R_RUSSIAN_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 142, # 'A'
+ 66: 143, # 'B'
+ 67: 144, # 'C'
+ 68: 145, # 'D'
+ 69: 146, # 'E'
+ 70: 147, # 'F'
+ 71: 148, # 'G'
+ 72: 149, # 'H'
+ 73: 150, # 'I'
+ 74: 151, # 'J'
+ 75: 152, # 'K'
+ 76: 74, # 'L'
+ 77: 153, # 'M'
+ 78: 75, # 'N'
+ 79: 154, # 'O'
+ 80: 155, # 'P'
+ 81: 156, # 'Q'
+ 82: 157, # 'R'
+ 83: 158, # 'S'
+ 84: 159, # 'T'
+ 85: 160, # 'U'
+ 86: 161, # 'V'
+ 87: 162, # 'W'
+ 88: 163, # 'X'
+ 89: 164, # 'Y'
+ 90: 165, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 71, # 'a'
+ 98: 172, # 'b'
+ 99: 66, # 'c'
+ 100: 173, # 'd'
+ 101: 65, # 'e'
+ 102: 174, # 'f'
+ 103: 76, # 'g'
+ 104: 175, # 'h'
+ 105: 64, # 'i'
+ 106: 176, # 'j'
+ 107: 177, # 'k'
+ 108: 77, # 'l'
+ 109: 72, # 'm'
+ 110: 178, # 'n'
+ 111: 69, # 'o'
+ 112: 67, # 'p'
+ 113: 179, # 'q'
+ 114: 78, # 'r'
+ 115: 73, # 's'
+ 116: 180, # 't'
+ 117: 181, # 'u'
+ 118: 79, # 'v'
+ 119: 182, # 'w'
+ 120: 183, # 'x'
+ 121: 184, # 'y'
+ 122: 185, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 191, # '─'
+ 129: 192, # '│'
+ 130: 193, # '┌'
+ 131: 194, # '┐'
+ 132: 195, # '└'
+ 133: 196, # '┘'
+ 134: 197, # '├'
+ 135: 198, # '┤'
+ 136: 199, # '┬'
+ 137: 200, # '┴'
+ 138: 201, # '┼'
+ 139: 202, # '▀'
+ 140: 203, # '▄'
+ 141: 204, # '█'
+ 142: 205, # '▌'
+ 143: 206, # '▐'
+ 144: 207, # '░'
+ 145: 208, # '▒'
+ 146: 209, # '▓'
+ 147: 210, # '⌠'
+ 148: 211, # '■'
+ 149: 212, # '∙'
+ 150: 213, # '√'
+ 151: 214, # '≈'
+ 152: 215, # '≤'
+ 153: 216, # '≥'
+ 154: 217, # '\xa0'
+ 155: 218, # '⌡'
+ 156: 219, # '°'
+ 157: 220, # '²'
+ 158: 221, # '·'
+ 159: 222, # '÷'
+ 160: 223, # '═'
+ 161: 224, # '║'
+ 162: 225, # '╒'
+ 163: 68, # 'ё'
+ 164: 226, # '╓'
+ 165: 227, # '╔'
+ 166: 228, # '╕'
+ 167: 229, # '╖'
+ 168: 230, # '╗'
+ 169: 231, # '╘'
+ 170: 232, # '╙'
+ 171: 233, # '╚'
+ 172: 234, # '╛'
+ 173: 235, # '╜'
+ 174: 236, # '╝'
+ 175: 237, # '╞'
+ 176: 238, # '╟'
+ 177: 239, # '╠'
+ 178: 240, # '╡'
+ 179: 241, # 'Ё'
+ 180: 242, # '╢'
+ 181: 243, # '╣'
+ 182: 244, # '╤'
+ 183: 245, # '╥'
+ 184: 246, # '╦'
+ 185: 247, # '╧'
+ 186: 248, # '╨'
+ 187: 249, # '╩'
+ 188: 250, # '╪'
+ 189: 251, # '╫'
+ 190: 252, # '╬'
+ 191: 253, # '©'
+ 192: 27, # 'ю'
+ 193: 3, # 'а'
+ 194: 21, # 'б'
+ 195: 28, # 'ц'
+ 196: 13, # 'д'
+ 197: 2, # 'е'
+ 198: 39, # 'ф'
+ 199: 19, # 'г'
+ 200: 26, # 'х'
+ 201: 4, # 'и'
+ 202: 23, # 'й'
+ 203: 11, # 'к'
+ 204: 8, # 'л'
+ 205: 12, # 'м'
+ 206: 5, # 'н'
+ 207: 1, # 'о'
+ 208: 15, # 'п'
+ 209: 16, # 'я'
+ 210: 9, # 'р'
+ 211: 7, # 'с'
+ 212: 6, # 'т'
+ 213: 14, # 'у'
+ 214: 24, # 'ж'
+ 215: 10, # 'в'
+ 216: 17, # 'ь'
+ 217: 18, # 'ы'
+ 218: 20, # 'з'
+ 219: 25, # 'ш'
+ 220: 30, # 'э'
+ 221: 29, # 'щ'
+ 222: 22, # 'ч'
+ 223: 54, # 'ъ'
+ 224: 59, # 'Ю'
+ 225: 37, # 'А'
+ 226: 44, # 'Б'
+ 227: 58, # 'Ц'
+ 228: 41, # 'Д'
+ 229: 48, # 'Е'
+ 230: 53, # 'Ф'
+ 231: 46, # 'Г'
+ 232: 55, # 'Х'
+ 233: 42, # 'И'
+ 234: 60, # 'Й'
+ 235: 36, # 'К'
+ 236: 49, # 'Л'
+ 237: 38, # 'М'
+ 238: 31, # 'Н'
+ 239: 34, # 'О'
+ 240: 35, # 'П'
+ 241: 43, # 'Я'
+ 242: 45, # 'Р'
+ 243: 32, # 'С'
+ 244: 40, # 'Т'
+ 245: 52, # 'У'
+ 246: 56, # 'Ж'
+ 247: 33, # 'В'
+ 248: 61, # 'Ь'
+ 249: 62, # 'Ы'
+ 250: 51, # 'З'
+ 251: 57, # 'Ш'
+ 252: 47, # 'Э'
+ 253: 63, # 'Щ'
+ 254: 50, # 'Ч'
+ 255: 70, # 'Ъ'
+}
+
+KOI8_R_RUSSIAN_MODEL = SingleByteCharSetModel(
+ charset_name="KOI8-R",
+ language="Russian",
+ char_to_order_map=KOI8_R_RUSSIAN_CHAR_TO_ORDER,
+ language_model=RUSSIAN_LANG_MODEL,
+ typical_positive_ratio=0.976601,
+ keep_ascii_letters=False,
+ alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
+)
+
+MACCYRILLIC_RUSSIAN_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 142, # 'A'
+ 66: 143, # 'B'
+ 67: 144, # 'C'
+ 68: 145, # 'D'
+ 69: 146, # 'E'
+ 70: 147, # 'F'
+ 71: 148, # 'G'
+ 72: 149, # 'H'
+ 73: 150, # 'I'
+ 74: 151, # 'J'
+ 75: 152, # 'K'
+ 76: 74, # 'L'
+ 77: 153, # 'M'
+ 78: 75, # 'N'
+ 79: 154, # 'O'
+ 80: 155, # 'P'
+ 81: 156, # 'Q'
+ 82: 157, # 'R'
+ 83: 158, # 'S'
+ 84: 159, # 'T'
+ 85: 160, # 'U'
+ 86: 161, # 'V'
+ 87: 162, # 'W'
+ 88: 163, # 'X'
+ 89: 164, # 'Y'
+ 90: 165, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 71, # 'a'
+ 98: 172, # 'b'
+ 99: 66, # 'c'
+ 100: 173, # 'd'
+ 101: 65, # 'e'
+ 102: 174, # 'f'
+ 103: 76, # 'g'
+ 104: 175, # 'h'
+ 105: 64, # 'i'
+ 106: 176, # 'j'
+ 107: 177, # 'k'
+ 108: 77, # 'l'
+ 109: 72, # 'm'
+ 110: 178, # 'n'
+ 111: 69, # 'o'
+ 112: 67, # 'p'
+ 113: 179, # 'q'
+ 114: 78, # 'r'
+ 115: 73, # 's'
+ 116: 180, # 't'
+ 117: 181, # 'u'
+ 118: 79, # 'v'
+ 119: 182, # 'w'
+ 120: 183, # 'x'
+ 121: 184, # 'y'
+ 122: 185, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 37, # 'А'
+ 129: 44, # 'Б'
+ 130: 33, # 'В'
+ 131: 46, # 'Г'
+ 132: 41, # 'Д'
+ 133: 48, # 'Е'
+ 134: 56, # 'Ж'
+ 135: 51, # 'З'
+ 136: 42, # 'И'
+ 137: 60, # 'Й'
+ 138: 36, # 'К'
+ 139: 49, # 'Л'
+ 140: 38, # 'М'
+ 141: 31, # 'Н'
+ 142: 34, # 'О'
+ 143: 35, # 'П'
+ 144: 45, # 'Р'
+ 145: 32, # 'С'
+ 146: 40, # 'Т'
+ 147: 52, # 'У'
+ 148: 53, # 'Ф'
+ 149: 55, # 'Х'
+ 150: 58, # 'Ц'
+ 151: 50, # 'Ч'
+ 152: 57, # 'Ш'
+ 153: 63, # 'Щ'
+ 154: 70, # 'Ъ'
+ 155: 62, # 'Ы'
+ 156: 61, # 'Ь'
+ 157: 47, # 'Э'
+ 158: 59, # 'Ю'
+ 159: 43, # 'Я'
+ 160: 191, # '†'
+ 161: 192, # '°'
+ 162: 193, # 'Ґ'
+ 163: 194, # '£'
+ 164: 195, # '§'
+ 165: 196, # '•'
+ 166: 197, # '¶'
+ 167: 198, # 'І'
+ 168: 199, # '®'
+ 169: 200, # '©'
+ 170: 201, # '™'
+ 171: 202, # 'Ђ'
+ 172: 203, # 'ђ'
+ 173: 204, # '≠'
+ 174: 205, # 'Ѓ'
+ 175: 206, # 'ѓ'
+ 176: 207, # '∞'
+ 177: 208, # '±'
+ 178: 209, # '≤'
+ 179: 210, # '≥'
+ 180: 211, # 'і'
+ 181: 212, # 'µ'
+ 182: 213, # 'ґ'
+ 183: 214, # 'Ј'
+ 184: 215, # 'Є'
+ 185: 216, # 'є'
+ 186: 217, # 'Ї'
+ 187: 218, # 'ї'
+ 188: 219, # 'Љ'
+ 189: 220, # 'љ'
+ 190: 221, # 'Њ'
+ 191: 222, # 'њ'
+ 192: 223, # 'ј'
+ 193: 224, # 'Ѕ'
+ 194: 225, # '¬'
+ 195: 226, # '√'
+ 196: 227, # 'ƒ'
+ 197: 228, # '≈'
+ 198: 229, # '∆'
+ 199: 230, # '«'
+ 200: 231, # '»'
+ 201: 232, # '…'
+ 202: 233, # '\xa0'
+ 203: 234, # 'Ћ'
+ 204: 235, # 'ћ'
+ 205: 236, # 'Ќ'
+ 206: 237, # 'ќ'
+ 207: 238, # 'ѕ'
+ 208: 239, # '–'
+ 209: 240, # '—'
+ 210: 241, # '“'
+ 211: 242, # '”'
+ 212: 243, # '‘'
+ 213: 244, # '’'
+ 214: 245, # '÷'
+ 215: 246, # '„'
+ 216: 247, # 'Ў'
+ 217: 248, # 'ў'
+ 218: 249, # 'Џ'
+ 219: 250, # 'џ'
+ 220: 251, # '№'
+ 221: 252, # 'Ё'
+ 222: 68, # 'ё'
+ 223: 16, # 'я'
+ 224: 3, # 'а'
+ 225: 21, # 'б'
+ 226: 10, # 'в'
+ 227: 19, # 'г'
+ 228: 13, # 'д'
+ 229: 2, # 'е'
+ 230: 24, # 'ж'
+ 231: 20, # 'з'
+ 232: 4, # 'и'
+ 233: 23, # 'й'
+ 234: 11, # 'к'
+ 235: 8, # 'л'
+ 236: 12, # 'м'
+ 237: 5, # 'н'
+ 238: 1, # 'о'
+ 239: 15, # 'п'
+ 240: 9, # 'р'
+ 241: 7, # 'с'
+ 242: 6, # 'т'
+ 243: 14, # 'у'
+ 244: 39, # 'ф'
+ 245: 26, # 'х'
+ 246: 28, # 'ц'
+ 247: 22, # 'ч'
+ 248: 25, # 'ш'
+ 249: 29, # 'щ'
+ 250: 54, # 'ъ'
+ 251: 18, # 'ы'
+ 252: 17, # 'ь'
+ 253: 30, # 'э'
+ 254: 27, # 'ю'
+ 255: 255, # '€'
+}
+
+MACCYRILLIC_RUSSIAN_MODEL = SingleByteCharSetModel(
+ charset_name="MacCyrillic",
+ language="Russian",
+ char_to_order_map=MACCYRILLIC_RUSSIAN_CHAR_TO_ORDER,
+ language_model=RUSSIAN_LANG_MODEL,
+ typical_positive_ratio=0.976601,
+ keep_ascii_letters=False,
+ alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
+)
+
+ISO_8859_5_RUSSIAN_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 142, # 'A'
+ 66: 143, # 'B'
+ 67: 144, # 'C'
+ 68: 145, # 'D'
+ 69: 146, # 'E'
+ 70: 147, # 'F'
+ 71: 148, # 'G'
+ 72: 149, # 'H'
+ 73: 150, # 'I'
+ 74: 151, # 'J'
+ 75: 152, # 'K'
+ 76: 74, # 'L'
+ 77: 153, # 'M'
+ 78: 75, # 'N'
+ 79: 154, # 'O'
+ 80: 155, # 'P'
+ 81: 156, # 'Q'
+ 82: 157, # 'R'
+ 83: 158, # 'S'
+ 84: 159, # 'T'
+ 85: 160, # 'U'
+ 86: 161, # 'V'
+ 87: 162, # 'W'
+ 88: 163, # 'X'
+ 89: 164, # 'Y'
+ 90: 165, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 71, # 'a'
+ 98: 172, # 'b'
+ 99: 66, # 'c'
+ 100: 173, # 'd'
+ 101: 65, # 'e'
+ 102: 174, # 'f'
+ 103: 76, # 'g'
+ 104: 175, # 'h'
+ 105: 64, # 'i'
+ 106: 176, # 'j'
+ 107: 177, # 'k'
+ 108: 77, # 'l'
+ 109: 72, # 'm'
+ 110: 178, # 'n'
+ 111: 69, # 'o'
+ 112: 67, # 'p'
+ 113: 179, # 'q'
+ 114: 78, # 'r'
+ 115: 73, # 's'
+ 116: 180, # 't'
+ 117: 181, # 'u'
+ 118: 79, # 'v'
+ 119: 182, # 'w'
+ 120: 183, # 'x'
+ 121: 184, # 'y'
+ 122: 185, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 191, # '\x80'
+ 129: 192, # '\x81'
+ 130: 193, # '\x82'
+ 131: 194, # '\x83'
+ 132: 195, # '\x84'
+ 133: 196, # '\x85'
+ 134: 197, # '\x86'
+ 135: 198, # '\x87'
+ 136: 199, # '\x88'
+ 137: 200, # '\x89'
+ 138: 201, # '\x8a'
+ 139: 202, # '\x8b'
+ 140: 203, # '\x8c'
+ 141: 204, # '\x8d'
+ 142: 205, # '\x8e'
+ 143: 206, # '\x8f'
+ 144: 207, # '\x90'
+ 145: 208, # '\x91'
+ 146: 209, # '\x92'
+ 147: 210, # '\x93'
+ 148: 211, # '\x94'
+ 149: 212, # '\x95'
+ 150: 213, # '\x96'
+ 151: 214, # '\x97'
+ 152: 215, # '\x98'
+ 153: 216, # '\x99'
+ 154: 217, # '\x9a'
+ 155: 218, # '\x9b'
+ 156: 219, # '\x9c'
+ 157: 220, # '\x9d'
+ 158: 221, # '\x9e'
+ 159: 222, # '\x9f'
+ 160: 223, # '\xa0'
+ 161: 224, # 'Ё'
+ 162: 225, # 'Ђ'
+ 163: 226, # 'Ѓ'
+ 164: 227, # 'Є'
+ 165: 228, # 'Ѕ'
+ 166: 229, # 'І'
+ 167: 230, # 'Ї'
+ 168: 231, # 'Ј'
+ 169: 232, # 'Љ'
+ 170: 233, # 'Њ'
+ 171: 234, # 'Ћ'
+ 172: 235, # 'Ќ'
+ 173: 236, # '\xad'
+ 174: 237, # 'Ў'
+ 175: 238, # 'Џ'
+ 176: 37, # 'А'
+ 177: 44, # 'Б'
+ 178: 33, # 'В'
+ 179: 46, # 'Г'
+ 180: 41, # 'Д'
+ 181: 48, # 'Е'
+ 182: 56, # 'Ж'
+ 183: 51, # 'З'
+ 184: 42, # 'И'
+ 185: 60, # 'Й'
+ 186: 36, # 'К'
+ 187: 49, # 'Л'
+ 188: 38, # 'М'
+ 189: 31, # 'Н'
+ 190: 34, # 'О'
+ 191: 35, # 'П'
+ 192: 45, # 'Р'
+ 193: 32, # 'С'
+ 194: 40, # 'Т'
+ 195: 52, # 'У'
+ 196: 53, # 'Ф'
+ 197: 55, # 'Х'
+ 198: 58, # 'Ц'
+ 199: 50, # 'Ч'
+ 200: 57, # 'Ш'
+ 201: 63, # 'Щ'
+ 202: 70, # 'Ъ'
+ 203: 62, # 'Ы'
+ 204: 61, # 'Ь'
+ 205: 47, # 'Э'
+ 206: 59, # 'Ю'
+ 207: 43, # 'Я'
+ 208: 3, # 'а'
+ 209: 21, # 'б'
+ 210: 10, # 'в'
+ 211: 19, # 'г'
+ 212: 13, # 'д'
+ 213: 2, # 'е'
+ 214: 24, # 'ж'
+ 215: 20, # 'з'
+ 216: 4, # 'и'
+ 217: 23, # 'й'
+ 218: 11, # 'к'
+ 219: 8, # 'л'
+ 220: 12, # 'м'
+ 221: 5, # 'н'
+ 222: 1, # 'о'
+ 223: 15, # 'п'
+ 224: 9, # 'р'
+ 225: 7, # 'с'
+ 226: 6, # 'т'
+ 227: 14, # 'у'
+ 228: 39, # 'ф'
+ 229: 26, # 'х'
+ 230: 28, # 'ц'
+ 231: 22, # 'ч'
+ 232: 25, # 'ш'
+ 233: 29, # 'щ'
+ 234: 54, # 'ъ'
+ 235: 18, # 'ы'
+ 236: 17, # 'ь'
+ 237: 30, # 'э'
+ 238: 27, # 'ю'
+ 239: 16, # 'я'
+ 240: 239, # '№'
+ 241: 68, # 'ё'
+ 242: 240, # 'ђ'
+ 243: 241, # 'ѓ'
+ 244: 242, # 'є'
+ 245: 243, # 'ѕ'
+ 246: 244, # 'і'
+ 247: 245, # 'ї'
+ 248: 246, # 'ј'
+ 249: 247, # 'љ'
+ 250: 248, # 'њ'
+ 251: 249, # 'ћ'
+ 252: 250, # 'ќ'
+ 253: 251, # '§'
+ 254: 252, # 'ў'
+ 255: 255, # 'џ'
+}
+
+ISO_8859_5_RUSSIAN_MODEL = SingleByteCharSetModel(
+ charset_name="ISO-8859-5",
+ language="Russian",
+ char_to_order_map=ISO_8859_5_RUSSIAN_CHAR_TO_ORDER,
+ language_model=RUSSIAN_LANG_MODEL,
+ typical_positive_ratio=0.976601,
+ keep_ascii_letters=False,
+ alphabet="ЁАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдежзийклмнопрстуфхцчшщъыьэюяё",
+)
diff --git a/third_party/python/pip/pip/_vendor/chardet/langthaimodel.py b/third_party/python/pip/pip/_vendor/chardet/langthaimodel.py
new file mode 100644
index 0000000000..489cad930e
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/langthaimodel.py
@@ -0,0 +1,4380 @@
+from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel
+
+# 3: Positive
+# 2: Likely
+# 1: Unlikely
+# 0: Negative
+
+THAI_LANG_MODEL = {
+ 5: { # 'ก'
+ 5: 2, # 'ก'
+ 30: 2, # 'ข'
+ 24: 2, # 'ค'
+ 8: 2, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 3, # 'ฎ'
+ 57: 2, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 2, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 3, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 2, # 'น'
+ 17: 1, # 'บ'
+ 25: 2, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 1, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 1, # 'ย'
+ 2: 3, # 'ร'
+ 61: 2, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 3, # 'ว'
+ 42: 2, # 'ศ'
+ 46: 3, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 2, # 'ห'
+ 4: 3, # 'อ'
+ 63: 1, # 'ฯ'
+ 22: 2, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 3, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 2, # 'ื'
+ 32: 2, # 'ุ'
+ 35: 1, # 'ู'
+ 11: 2, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 1, # 'ๆ'
+ 37: 3, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 2, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 30: { # 'ข'
+ 5: 1, # 'ก'
+ 30: 0, # 'ข'
+ 24: 1, # 'ค'
+ 8: 1, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 2, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 2, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 2, # 'น'
+ 17: 1, # 'บ'
+ 25: 1, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 2, # 'ย'
+ 2: 1, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 2, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 1, # 'ห'
+ 4: 3, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 2, # 'ี'
+ 40: 3, # 'ึ'
+ 27: 1, # 'ื'
+ 32: 1, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 1, # '็'
+ 6: 2, # '่'
+ 7: 3, # '้'
+ 38: 1, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 24: { # 'ค'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 2, # 'ค'
+ 8: 2, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 2, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 2, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 0, # 'บ'
+ 25: 1, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 2, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 3, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 0, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 2, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 2, # 'า'
+ 36: 3, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 2, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 3, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 2, # 'ู'
+ 11: 1, # 'เ'
+ 28: 0, # 'แ'
+ 41: 3, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 1, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 3, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 8: { # 'ง'
+ 5: 3, # 'ก'
+ 30: 2, # 'ข'
+ 24: 3, # 'ค'
+ 8: 2, # 'ง'
+ 26: 2, # 'จ'
+ 52: 1, # 'ฉ'
+ 34: 2, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 2, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 1, # 'ธ'
+ 3: 3, # 'น'
+ 17: 2, # 'บ'
+ 25: 2, # 'ป'
+ 39: 2, # 'ผ'
+ 62: 1, # 'ฝ'
+ 31: 2, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 1, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 2, # 'ว'
+ 42: 2, # 'ศ'
+ 46: 1, # 'ษ'
+ 18: 3, # 'ส'
+ 21: 3, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 1, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 1, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 1, # 'ื'
+ 32: 1, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 3, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 3, # 'ๆ'
+ 37: 0, # '็'
+ 6: 2, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 26: { # 'จ'
+ 5: 2, # 'ก'
+ 30: 1, # 'ข'
+ 24: 0, # 'ค'
+ 8: 2, # 'ง'
+ 26: 3, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 1, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 1, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 1, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 1, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 3, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 3, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 1, # 'ี'
+ 40: 3, # 'ึ'
+ 27: 1, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 2, # 'ู'
+ 11: 1, # 'เ'
+ 28: 1, # 'แ'
+ 41: 0, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 2, # '่'
+ 7: 2, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 52: { # 'ฉ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 3, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 3, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 1, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 1, # 'ะ'
+ 10: 1, # 'ั'
+ 1: 1, # 'า'
+ 36: 0, # 'ำ'
+ 23: 1, # 'ิ'
+ 13: 1, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 1, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 34: { # 'ช'
+ 5: 1, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 1, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 1, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 2, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 1, # 'ย'
+ 2: 1, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 2, # 'ั'
+ 1: 3, # 'า'
+ 36: 1, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 2, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 3, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 1, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 1, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 51: { # 'ซ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 1, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 0, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 1, # 'ั'
+ 1: 1, # 'า'
+ 36: 0, # 'ำ'
+ 23: 1, # 'ิ'
+ 13: 2, # 'ี'
+ 40: 3, # 'ึ'
+ 27: 2, # 'ื'
+ 32: 1, # 'ุ'
+ 35: 1, # 'ู'
+ 11: 1, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 1, # '็'
+ 6: 1, # '่'
+ 7: 2, # '้'
+ 38: 1, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 47: { # 'ญ'
+ 5: 1, # 'ก'
+ 30: 1, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 3, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 1, # 'บ'
+ 25: 1, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 2, # 'ห'
+ 4: 1, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 1, # 'ะ'
+ 10: 2, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 1, # 'ิ'
+ 13: 1, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 1, # 'เ'
+ 28: 1, # 'แ'
+ 41: 0, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 1, # 'ๆ'
+ 37: 0, # '็'
+ 6: 2, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 58: { # 'ฎ'
+ 5: 2, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 1, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 1, # 'ิ'
+ 13: 2, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 57: { # 'ฏ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 1, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 49: { # 'ฐ'
+ 5: 1, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 2, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 1, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 1, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 53: { # 'ฑ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 3, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 55: { # 'ฒ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 1, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 43: { # 'ณ'
+ 5: 1, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 3, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 3, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 1, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 1, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 3, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 1, # 'ิ'
+ 13: 2, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 1, # 'เ'
+ 28: 1, # 'แ'
+ 41: 0, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 3, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 20: { # 'ด'
+ 5: 2, # 'ก'
+ 30: 2, # 'ข'
+ 24: 2, # 'ค'
+ 8: 3, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 2, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 1, # 'น'
+ 17: 1, # 'บ'
+ 25: 1, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 3, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 2, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 2, # 'ห'
+ 4: 1, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 2, # 'า'
+ 36: 2, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 1, # 'ึ'
+ 27: 2, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 2, # 'ู'
+ 11: 2, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 2, # 'ๆ'
+ 37: 2, # '็'
+ 6: 1, # '่'
+ 7: 3, # '้'
+ 38: 1, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 19: { # 'ต'
+ 5: 2, # 'ก'
+ 30: 1, # 'ข'
+ 24: 1, # 'ค'
+ 8: 0, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 1, # 'ต'
+ 44: 2, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 2, # 'น'
+ 17: 1, # 'บ'
+ 25: 1, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 2, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 1, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 3, # 'ส'
+ 21: 0, # 'ห'
+ 4: 3, # 'อ'
+ 63: 1, # 'ฯ'
+ 22: 2, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 2, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 2, # 'ี'
+ 40: 1, # 'ึ'
+ 27: 1, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 2, # 'ู'
+ 11: 1, # 'เ'
+ 28: 1, # 'แ'
+ 41: 1, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 2, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 2, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 44: { # 'ถ'
+ 5: 1, # 'ก'
+ 30: 0, # 'ข'
+ 24: 1, # 'ค'
+ 8: 0, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 1, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 1, # 'น'
+ 17: 2, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 1, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 0, # 'ห'
+ 4: 1, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 2, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 1, # 'ี'
+ 40: 3, # 'ึ'
+ 27: 2, # 'ื'
+ 32: 2, # 'ุ'
+ 35: 3, # 'ู'
+ 11: 1, # 'เ'
+ 28: 1, # 'แ'
+ 41: 0, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 2, # '่'
+ 7: 3, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 14: { # 'ท'
+ 5: 1, # 'ก'
+ 30: 1, # 'ข'
+ 24: 3, # 'ค'
+ 8: 1, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 1, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 3, # 'ธ'
+ 3: 3, # 'น'
+ 17: 2, # 'บ'
+ 25: 2, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 2, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 3, # 'ย'
+ 2: 3, # 'ร'
+ 61: 1, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 2, # 'ว'
+ 42: 3, # 'ศ'
+ 46: 1, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 0, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 2, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 3, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 2, # 'ึ'
+ 27: 1, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 1, # 'ู'
+ 11: 0, # 'เ'
+ 28: 1, # 'แ'
+ 41: 0, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 1, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 2, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 48: { # 'ธ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 1, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 1, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 2, # 'า'
+ 36: 0, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 2, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 3, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 3: { # 'น'
+ 5: 3, # 'ก'
+ 30: 2, # 'ข'
+ 24: 3, # 'ค'
+ 8: 1, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 1, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 3, # 'ต'
+ 44: 2, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 3, # 'ธ'
+ 3: 2, # 'น'
+ 17: 2, # 'บ'
+ 25: 2, # 'ป'
+ 39: 2, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 2, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 2, # 'ย'
+ 2: 2, # 'ร'
+ 61: 1, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 3, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 2, # 'ห'
+ 4: 3, # 'อ'
+ 63: 1, # 'ฯ'
+ 22: 2, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 3, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 3, # 'ึ'
+ 27: 3, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 2, # 'ู'
+ 11: 3, # 'เ'
+ 28: 2, # 'แ'
+ 41: 3, # 'โ'
+ 29: 3, # 'ใ'
+ 33: 3, # 'ไ'
+ 50: 2, # 'ๆ'
+ 37: 1, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 2, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 17: { # 'บ'
+ 5: 3, # 'ก'
+ 30: 2, # 'ข'
+ 24: 2, # 'ค'
+ 8: 1, # 'ง'
+ 26: 1, # 'จ'
+ 52: 1, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 2, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 3, # 'บ'
+ 25: 2, # 'ป'
+ 39: 2, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 0, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 3, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 2, # 'ห'
+ 4: 2, # 'อ'
+ 63: 1, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 2, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 2, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 2, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 2, # 'ู'
+ 11: 2, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 1, # '็'
+ 6: 2, # '่'
+ 7: 2, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 25: { # 'ป'
+ 5: 2, # 'ก'
+ 30: 0, # 'ข'
+ 24: 1, # 'ค'
+ 8: 0, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 1, # 'ฎ'
+ 57: 3, # 'ฏ'
+ 49: 1, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 1, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 2, # 'น'
+ 17: 0, # 'บ'
+ 25: 1, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 1, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 0, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 1, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 1, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 1, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 1, # 'า'
+ 36: 0, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 1, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 1, # 'เ'
+ 28: 2, # 'แ'
+ 41: 0, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 3, # '็'
+ 6: 1, # '่'
+ 7: 2, # '้'
+ 38: 1, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 39: { # 'ผ'
+ 5: 1, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 1, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 2, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 2, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 1, # 'ะ'
+ 10: 1, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 1, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 3, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 3, # '่'
+ 7: 1, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 62: { # 'ฝ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 1, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 1, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 1, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 1, # 'ี'
+ 40: 2, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 2, # '่'
+ 7: 1, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 31: { # 'พ'
+ 5: 1, # 'ก'
+ 30: 1, # 'ข'
+ 24: 1, # 'ค'
+ 8: 1, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 1, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 1, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 1, # 'ธ'
+ 3: 3, # 'น'
+ 17: 2, # 'บ'
+ 25: 0, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 2, # 'ย'
+ 2: 3, # 'ร'
+ 61: 2, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 2, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 1, # 'ห'
+ 4: 2, # 'อ'
+ 63: 1, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 2, # 'ี'
+ 40: 1, # 'ึ'
+ 27: 3, # 'ื'
+ 32: 1, # 'ุ'
+ 35: 2, # 'ู'
+ 11: 1, # 'เ'
+ 28: 1, # 'แ'
+ 41: 0, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 1, # '็'
+ 6: 0, # '่'
+ 7: 1, # '้'
+ 38: 3, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 54: { # 'ฟ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 1, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 2, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 1, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 0, # 'ห'
+ 4: 1, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 2, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 1, # 'ิ'
+ 13: 1, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 1, # 'ื'
+ 32: 1, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 1, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 2, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 45: { # 'ภ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 1, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 1, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 1, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 2, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 1, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 9: { # 'ม'
+ 5: 2, # 'ก'
+ 30: 2, # 'ข'
+ 24: 2, # 'ค'
+ 8: 2, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 1, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 2, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 1, # 'ธ'
+ 3: 3, # 'น'
+ 17: 2, # 'บ'
+ 25: 2, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 3, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 1, # 'ย'
+ 2: 2, # 'ร'
+ 61: 2, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 2, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 1, # 'ษ'
+ 18: 3, # 'ส'
+ 21: 3, # 'ห'
+ 4: 3, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 1, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 3, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 3, # 'ู'
+ 11: 2, # 'เ'
+ 28: 2, # 'แ'
+ 41: 2, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 1, # 'ๆ'
+ 37: 1, # '็'
+ 6: 3, # '่'
+ 7: 2, # '้'
+ 38: 1, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 16: { # 'ย'
+ 5: 3, # 'ก'
+ 30: 1, # 'ข'
+ 24: 2, # 'ค'
+ 8: 3, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 2, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 2, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 2, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 1, # 'ธ'
+ 3: 3, # 'น'
+ 17: 3, # 'บ'
+ 25: 1, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 0, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 3, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 1, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 2, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 1, # 'ึ'
+ 27: 2, # 'ื'
+ 32: 2, # 'ุ'
+ 35: 3, # 'ู'
+ 11: 2, # 'เ'
+ 28: 1, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 2, # 'ๆ'
+ 37: 1, # '็'
+ 6: 3, # '่'
+ 7: 2, # '้'
+ 38: 3, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 2: { # 'ร'
+ 5: 3, # 'ก'
+ 30: 2, # 'ข'
+ 24: 2, # 'ค'
+ 8: 3, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 2, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 3, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 3, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 2, # 'ต'
+ 44: 3, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 1, # 'ธ'
+ 3: 2, # 'น'
+ 17: 2, # 'บ'
+ 25: 3, # 'ป'
+ 39: 2, # 'ผ'
+ 62: 1, # 'ฝ'
+ 31: 2, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 2, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 3, # 'ว'
+ 42: 2, # 'ศ'
+ 46: 2, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 2, # 'ห'
+ 4: 3, # 'อ'
+ 63: 1, # 'ฯ'
+ 22: 3, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 2, # 'ึ'
+ 27: 3, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 3, # 'ู'
+ 11: 3, # 'เ'
+ 28: 3, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 3, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 3, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 61: { # 'ฤ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 2, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 2, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 15: { # 'ล'
+ 5: 2, # 'ก'
+ 30: 3, # 'ข'
+ 24: 1, # 'ค'
+ 8: 3, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 2, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 1, # 'น'
+ 17: 2, # 'บ'
+ 25: 2, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 3, # 'ย'
+ 2: 1, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 1, # 'ห'
+ 4: 3, # 'อ'
+ 63: 2, # 'ฯ'
+ 22: 3, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 2, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 2, # 'ึ'
+ 27: 3, # 'ื'
+ 32: 2, # 'ุ'
+ 35: 3, # 'ู'
+ 11: 2, # 'เ'
+ 28: 1, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 2, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 2, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 12: { # 'ว'
+ 5: 3, # 'ก'
+ 30: 2, # 'ข'
+ 24: 1, # 'ค'
+ 8: 3, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 1, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 1, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 2, # 'บ'
+ 25: 1, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 3, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 2, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 2, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 2, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 2, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 3, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 1, # 'ๆ'
+ 37: 0, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 1, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 42: { # 'ศ'
+ 5: 1, # 'ก'
+ 30: 0, # 'ข'
+ 24: 1, # 'ค'
+ 8: 0, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 1, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 1, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 2, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 2, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 2, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 2, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 3, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 2, # 'ู'
+ 11: 0, # 'เ'
+ 28: 1, # 'แ'
+ 41: 0, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 1, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 46: { # 'ษ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 2, # 'ฎ'
+ 57: 1, # 'ฏ'
+ 49: 2, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 3, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 1, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 2, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 2, # 'ะ'
+ 10: 2, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 1, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 1, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 2, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 18: { # 'ส'
+ 5: 2, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 2, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 3, # 'ต'
+ 44: 3, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 2, # 'บ'
+ 25: 1, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 2, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 1, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 2, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 2, # 'ห'
+ 4: 3, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 2, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 3, # 'ำ'
+ 23: 3, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 2, # 'ึ'
+ 27: 3, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 3, # 'ู'
+ 11: 2, # 'เ'
+ 28: 0, # 'แ'
+ 41: 1, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 3, # '่'
+ 7: 1, # '้'
+ 38: 2, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 21: { # 'ห'
+ 5: 3, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 1, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 2, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 3, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 0, # 'บ'
+ 25: 1, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 2, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 2, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 3, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 1, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 0, # 'ำ'
+ 23: 1, # 'ิ'
+ 13: 1, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 1, # 'ุ'
+ 35: 1, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 3, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 2, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 4: { # 'อ'
+ 5: 3, # 'ก'
+ 30: 1, # 'ข'
+ 24: 2, # 'ค'
+ 8: 3, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 2, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 1, # 'ธ'
+ 3: 3, # 'น'
+ 17: 3, # 'บ'
+ 25: 1, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 3, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 2, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 2, # 'ห'
+ 4: 3, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 2, # 'ะ'
+ 10: 3, # 'ั'
+ 1: 3, # 'า'
+ 36: 2, # 'ำ'
+ 23: 2, # 'ิ'
+ 13: 3, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 3, # 'ื'
+ 32: 3, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 3, # 'เ'
+ 28: 1, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 1, # 'ๆ'
+ 37: 1, # '็'
+ 6: 2, # '่'
+ 7: 2, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 63: { # 'ฯ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 22: { # 'ะ'
+ 5: 3, # 'ก'
+ 30: 1, # 'ข'
+ 24: 2, # 'ค'
+ 8: 1, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 3, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 3, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 1, # 'ธ'
+ 3: 2, # 'น'
+ 17: 3, # 'บ'
+ 25: 2, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 2, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 2, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 2, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 3, # 'ส'
+ 21: 3, # 'ห'
+ 4: 2, # 'อ'
+ 63: 1, # 'ฯ'
+ 22: 1, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 3, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 10: { # 'ั'
+ 5: 3, # 'ก'
+ 30: 0, # 'ข'
+ 24: 1, # 'ค'
+ 8: 3, # 'ง'
+ 26: 3, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 3, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 2, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 3, # 'ฒ'
+ 43: 3, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 3, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 3, # 'บ'
+ 25: 1, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 2, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 3, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 3, # 'ว'
+ 42: 2, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 3, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 1: { # 'า'
+ 5: 3, # 'ก'
+ 30: 2, # 'ข'
+ 24: 3, # 'ค'
+ 8: 3, # 'ง'
+ 26: 3, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 3, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 2, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 3, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 3, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 2, # 'ธ'
+ 3: 3, # 'น'
+ 17: 3, # 'บ'
+ 25: 2, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 1, # 'ฝ'
+ 31: 3, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 3, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 3, # 'ว'
+ 42: 2, # 'ศ'
+ 46: 3, # 'ษ'
+ 18: 3, # 'ส'
+ 21: 3, # 'ห'
+ 4: 2, # 'อ'
+ 63: 1, # 'ฯ'
+ 22: 3, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 3, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 1, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 36: { # 'ำ'
+ 5: 2, # 'ก'
+ 30: 1, # 'ข'
+ 24: 3, # 'ค'
+ 8: 2, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 1, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 1, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 1, # 'บ'
+ 25: 1, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 0, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 3, # 'ห'
+ 4: 1, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 3, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 23: { # 'ิ'
+ 5: 3, # 'ก'
+ 30: 1, # 'ข'
+ 24: 2, # 'ค'
+ 8: 3, # 'ง'
+ 26: 3, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 3, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 2, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 3, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 3, # 'ธ'
+ 3: 3, # 'น'
+ 17: 3, # 'บ'
+ 25: 2, # 'ป'
+ 39: 2, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 3, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 2, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 2, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 3, # 'ว'
+ 42: 3, # 'ศ'
+ 46: 2, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 3, # 'ห'
+ 4: 1, # 'อ'
+ 63: 1, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 3, # 'เ'
+ 28: 1, # 'แ'
+ 41: 1, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 3, # '่'
+ 7: 2, # '้'
+ 38: 2, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 13: { # 'ี'
+ 5: 3, # 'ก'
+ 30: 2, # 'ข'
+ 24: 2, # 'ค'
+ 8: 0, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 1, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 1, # 'น'
+ 17: 2, # 'บ'
+ 25: 2, # 'ป'
+ 39: 1, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 2, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 3, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 2, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 1, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 2, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 1, # 'ๆ'
+ 37: 0, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 40: { # 'ึ'
+ 5: 3, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 3, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 27: { # 'ื'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 2, # 'น'
+ 17: 3, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 3, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 32: { # 'ุ'
+ 5: 3, # 'ก'
+ 30: 2, # 'ข'
+ 24: 3, # 'ค'
+ 8: 3, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 2, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 1, # 'ฒ'
+ 43: 3, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 3, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 1, # 'ธ'
+ 3: 2, # 'น'
+ 17: 2, # 'บ'
+ 25: 2, # 'ป'
+ 39: 2, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 1, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 1, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 2, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 1, # 'ห'
+ 4: 1, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 1, # 'เ'
+ 28: 0, # 'แ'
+ 41: 1, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 3, # '่'
+ 7: 2, # '้'
+ 38: 1, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 35: { # 'ู'
+ 5: 3, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 2, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 2, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 1, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 2, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 2, # 'น'
+ 17: 0, # 'บ'
+ 25: 3, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 0, # 'ย'
+ 2: 1, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 1, # 'เ'
+ 28: 1, # 'แ'
+ 41: 1, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 3, # '่'
+ 7: 3, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 11: { # 'เ'
+ 5: 3, # 'ก'
+ 30: 3, # 'ข'
+ 24: 3, # 'ค'
+ 8: 2, # 'ง'
+ 26: 3, # 'จ'
+ 52: 3, # 'ฉ'
+ 34: 3, # 'ช'
+ 51: 2, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 1, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 3, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 1, # 'ธ'
+ 3: 3, # 'น'
+ 17: 3, # 'บ'
+ 25: 3, # 'ป'
+ 39: 2, # 'ผ'
+ 62: 1, # 'ฝ'
+ 31: 3, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 3, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 2, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 3, # 'ว'
+ 42: 2, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 3, # 'ส'
+ 21: 3, # 'ห'
+ 4: 3, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 28: { # 'แ'
+ 5: 3, # 'ก'
+ 30: 2, # 'ข'
+ 24: 2, # 'ค'
+ 8: 1, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 3, # 'ต'
+ 44: 2, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 3, # 'บ'
+ 25: 2, # 'ป'
+ 39: 3, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 2, # 'พ'
+ 54: 2, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 2, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 2, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 3, # 'ส'
+ 21: 3, # 'ห'
+ 4: 1, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 41: { # 'โ'
+ 5: 2, # 'ก'
+ 30: 1, # 'ข'
+ 24: 2, # 'ค'
+ 8: 0, # 'ง'
+ 26: 1, # 'จ'
+ 52: 1, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 2, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 1, # 'บ'
+ 25: 3, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 1, # 'ภ'
+ 9: 1, # 'ม'
+ 16: 2, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 3, # 'ล'
+ 12: 0, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 0, # 'ห'
+ 4: 2, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 29: { # 'ใ'
+ 5: 2, # 'ก'
+ 30: 0, # 'ข'
+ 24: 1, # 'ค'
+ 8: 0, # 'ง'
+ 26: 3, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 3, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 1, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 2, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 1, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 3, # 'ส'
+ 21: 3, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 33: { # 'ไ'
+ 5: 1, # 'ก'
+ 30: 2, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 3, # 'ด'
+ 19: 1, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 3, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 1, # 'บ'
+ 25: 3, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 2, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 0, # 'ย'
+ 2: 3, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 3, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 2, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 50: { # 'ๆ'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 37: { # '็'
+ 5: 2, # 'ก'
+ 30: 1, # 'ข'
+ 24: 2, # 'ค'
+ 8: 2, # 'ง'
+ 26: 3, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 1, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 2, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 3, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 1, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 2, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 0, # 'ห'
+ 4: 1, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 1, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 6: { # '่'
+ 5: 2, # 'ก'
+ 30: 1, # 'ข'
+ 24: 2, # 'ค'
+ 8: 3, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 1, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 2, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 1, # 'ธ'
+ 3: 3, # 'น'
+ 17: 1, # 'บ'
+ 25: 2, # 'ป'
+ 39: 2, # 'ผ'
+ 62: 1, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 3, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 2, # 'ล'
+ 12: 3, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 1, # 'ห'
+ 4: 3, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 1, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 3, # 'า'
+ 36: 2, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 3, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 1, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 7: { # '้'
+ 5: 2, # 'ก'
+ 30: 1, # 'ข'
+ 24: 2, # 'ค'
+ 8: 3, # 'ง'
+ 26: 2, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 1, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 1, # 'ด'
+ 19: 2, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 2, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 3, # 'น'
+ 17: 2, # 'บ'
+ 25: 2, # 'ป'
+ 39: 2, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 3, # 'ม'
+ 16: 2, # 'ย'
+ 2: 2, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 3, # 'ว'
+ 42: 1, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 2, # 'ส'
+ 21: 2, # 'ห'
+ 4: 3, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 3, # 'า'
+ 36: 2, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 2, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 2, # 'ใ'
+ 33: 2, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 38: { # '์'
+ 5: 2, # 'ก'
+ 30: 1, # 'ข'
+ 24: 1, # 'ค'
+ 8: 0, # 'ง'
+ 26: 1, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 1, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 2, # 'ด'
+ 19: 1, # 'ต'
+ 44: 1, # 'ถ'
+ 14: 1, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 1, # 'น'
+ 17: 1, # 'บ'
+ 25: 1, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 1, # 'พ'
+ 54: 1, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 2, # 'ม'
+ 16: 0, # 'ย'
+ 2: 1, # 'ร'
+ 61: 1, # 'ฤ'
+ 15: 1, # 'ล'
+ 12: 1, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 1, # 'ส'
+ 21: 1, # 'ห'
+ 4: 2, # 'อ'
+ 63: 1, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 2, # 'เ'
+ 28: 2, # 'แ'
+ 41: 1, # 'โ'
+ 29: 1, # 'ใ'
+ 33: 1, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 0, # '๑'
+ 59: 0, # '๒'
+ 60: 0, # '๕'
+ },
+ 56: { # '๑'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 2, # '๑'
+ 59: 1, # '๒'
+ 60: 1, # '๕'
+ },
+ 59: { # '๒'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 1, # '๑'
+ 59: 1, # '๒'
+ 60: 3, # '๕'
+ },
+ 60: { # '๕'
+ 5: 0, # 'ก'
+ 30: 0, # 'ข'
+ 24: 0, # 'ค'
+ 8: 0, # 'ง'
+ 26: 0, # 'จ'
+ 52: 0, # 'ฉ'
+ 34: 0, # 'ช'
+ 51: 0, # 'ซ'
+ 47: 0, # 'ญ'
+ 58: 0, # 'ฎ'
+ 57: 0, # 'ฏ'
+ 49: 0, # 'ฐ'
+ 53: 0, # 'ฑ'
+ 55: 0, # 'ฒ'
+ 43: 0, # 'ณ'
+ 20: 0, # 'ด'
+ 19: 0, # 'ต'
+ 44: 0, # 'ถ'
+ 14: 0, # 'ท'
+ 48: 0, # 'ธ'
+ 3: 0, # 'น'
+ 17: 0, # 'บ'
+ 25: 0, # 'ป'
+ 39: 0, # 'ผ'
+ 62: 0, # 'ฝ'
+ 31: 0, # 'พ'
+ 54: 0, # 'ฟ'
+ 45: 0, # 'ภ'
+ 9: 0, # 'ม'
+ 16: 0, # 'ย'
+ 2: 0, # 'ร'
+ 61: 0, # 'ฤ'
+ 15: 0, # 'ล'
+ 12: 0, # 'ว'
+ 42: 0, # 'ศ'
+ 46: 0, # 'ษ'
+ 18: 0, # 'ส'
+ 21: 0, # 'ห'
+ 4: 0, # 'อ'
+ 63: 0, # 'ฯ'
+ 22: 0, # 'ะ'
+ 10: 0, # 'ั'
+ 1: 0, # 'า'
+ 36: 0, # 'ำ'
+ 23: 0, # 'ิ'
+ 13: 0, # 'ี'
+ 40: 0, # 'ึ'
+ 27: 0, # 'ื'
+ 32: 0, # 'ุ'
+ 35: 0, # 'ู'
+ 11: 0, # 'เ'
+ 28: 0, # 'แ'
+ 41: 0, # 'โ'
+ 29: 0, # 'ใ'
+ 33: 0, # 'ไ'
+ 50: 0, # 'ๆ'
+ 37: 0, # '็'
+ 6: 0, # '่'
+ 7: 0, # '้'
+ 38: 0, # '์'
+ 56: 2, # '๑'
+ 59: 1, # '๒'
+ 60: 0, # '๕'
+ },
+}
+
+# 255: Undefined characters that did not exist in training text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+# 251: Control characters
+
+# Character Mapping Table(s):
+TIS_620_THAI_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 254, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 254, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 253, # ' '
+ 33: 253, # '!'
+ 34: 253, # '"'
+ 35: 253, # '#'
+ 36: 253, # '$'
+ 37: 253, # '%'
+ 38: 253, # '&'
+ 39: 253, # "'"
+ 40: 253, # '('
+ 41: 253, # ')'
+ 42: 253, # '*'
+ 43: 253, # '+'
+ 44: 253, # ','
+ 45: 253, # '-'
+ 46: 253, # '.'
+ 47: 253, # '/'
+ 48: 252, # '0'
+ 49: 252, # '1'
+ 50: 252, # '2'
+ 51: 252, # '3'
+ 52: 252, # '4'
+ 53: 252, # '5'
+ 54: 252, # '6'
+ 55: 252, # '7'
+ 56: 252, # '8'
+ 57: 252, # '9'
+ 58: 253, # ':'
+ 59: 253, # ';'
+ 60: 253, # '<'
+ 61: 253, # '='
+ 62: 253, # '>'
+ 63: 253, # '?'
+ 64: 253, # '@'
+ 65: 182, # 'A'
+ 66: 106, # 'B'
+ 67: 107, # 'C'
+ 68: 100, # 'D'
+ 69: 183, # 'E'
+ 70: 184, # 'F'
+ 71: 185, # 'G'
+ 72: 101, # 'H'
+ 73: 94, # 'I'
+ 74: 186, # 'J'
+ 75: 187, # 'K'
+ 76: 108, # 'L'
+ 77: 109, # 'M'
+ 78: 110, # 'N'
+ 79: 111, # 'O'
+ 80: 188, # 'P'
+ 81: 189, # 'Q'
+ 82: 190, # 'R'
+ 83: 89, # 'S'
+ 84: 95, # 'T'
+ 85: 112, # 'U'
+ 86: 113, # 'V'
+ 87: 191, # 'W'
+ 88: 192, # 'X'
+ 89: 193, # 'Y'
+ 90: 194, # 'Z'
+ 91: 253, # '['
+ 92: 253, # '\\'
+ 93: 253, # ']'
+ 94: 253, # '^'
+ 95: 253, # '_'
+ 96: 253, # '`'
+ 97: 64, # 'a'
+ 98: 72, # 'b'
+ 99: 73, # 'c'
+ 100: 114, # 'd'
+ 101: 74, # 'e'
+ 102: 115, # 'f'
+ 103: 116, # 'g'
+ 104: 102, # 'h'
+ 105: 81, # 'i'
+ 106: 201, # 'j'
+ 107: 117, # 'k'
+ 108: 90, # 'l'
+ 109: 103, # 'm'
+ 110: 78, # 'n'
+ 111: 82, # 'o'
+ 112: 96, # 'p'
+ 113: 202, # 'q'
+ 114: 91, # 'r'
+ 115: 79, # 's'
+ 116: 84, # 't'
+ 117: 104, # 'u'
+ 118: 105, # 'v'
+ 119: 97, # 'w'
+ 120: 98, # 'x'
+ 121: 92, # 'y'
+ 122: 203, # 'z'
+ 123: 253, # '{'
+ 124: 253, # '|'
+ 125: 253, # '}'
+ 126: 253, # '~'
+ 127: 253, # '\x7f'
+ 128: 209, # '\x80'
+ 129: 210, # '\x81'
+ 130: 211, # '\x82'
+ 131: 212, # '\x83'
+ 132: 213, # '\x84'
+ 133: 88, # '\x85'
+ 134: 214, # '\x86'
+ 135: 215, # '\x87'
+ 136: 216, # '\x88'
+ 137: 217, # '\x89'
+ 138: 218, # '\x8a'
+ 139: 219, # '\x8b'
+ 140: 220, # '\x8c'
+ 141: 118, # '\x8d'
+ 142: 221, # '\x8e'
+ 143: 222, # '\x8f'
+ 144: 223, # '\x90'
+ 145: 224, # '\x91'
+ 146: 99, # '\x92'
+ 147: 85, # '\x93'
+ 148: 83, # '\x94'
+ 149: 225, # '\x95'
+ 150: 226, # '\x96'
+ 151: 227, # '\x97'
+ 152: 228, # '\x98'
+ 153: 229, # '\x99'
+ 154: 230, # '\x9a'
+ 155: 231, # '\x9b'
+ 156: 232, # '\x9c'
+ 157: 233, # '\x9d'
+ 158: 234, # '\x9e'
+ 159: 235, # '\x9f'
+ 160: 236, # None
+ 161: 5, # 'ก'
+ 162: 30, # 'ข'
+ 163: 237, # 'ฃ'
+ 164: 24, # 'ค'
+ 165: 238, # 'ฅ'
+ 166: 75, # 'ฆ'
+ 167: 8, # 'ง'
+ 168: 26, # 'จ'
+ 169: 52, # 'ฉ'
+ 170: 34, # 'ช'
+ 171: 51, # 'ซ'
+ 172: 119, # 'ฌ'
+ 173: 47, # 'ญ'
+ 174: 58, # 'ฎ'
+ 175: 57, # 'ฏ'
+ 176: 49, # 'ฐ'
+ 177: 53, # 'ฑ'
+ 178: 55, # 'ฒ'
+ 179: 43, # 'ณ'
+ 180: 20, # 'ด'
+ 181: 19, # 'ต'
+ 182: 44, # 'ถ'
+ 183: 14, # 'ท'
+ 184: 48, # 'ธ'
+ 185: 3, # 'น'
+ 186: 17, # 'บ'
+ 187: 25, # 'ป'
+ 188: 39, # 'ผ'
+ 189: 62, # 'ฝ'
+ 190: 31, # 'พ'
+ 191: 54, # 'ฟ'
+ 192: 45, # 'ภ'
+ 193: 9, # 'ม'
+ 194: 16, # 'ย'
+ 195: 2, # 'ร'
+ 196: 61, # 'ฤ'
+ 197: 15, # 'ล'
+ 198: 239, # 'ฦ'
+ 199: 12, # 'ว'
+ 200: 42, # 'ศ'
+ 201: 46, # 'ษ'
+ 202: 18, # 'ส'
+ 203: 21, # 'ห'
+ 204: 76, # 'ฬ'
+ 205: 4, # 'อ'
+ 206: 66, # 'ฮ'
+ 207: 63, # 'ฯ'
+ 208: 22, # 'ะ'
+ 209: 10, # 'ั'
+ 210: 1, # 'า'
+ 211: 36, # 'ำ'
+ 212: 23, # 'ิ'
+ 213: 13, # 'ี'
+ 214: 40, # 'ึ'
+ 215: 27, # 'ื'
+ 216: 32, # 'ุ'
+ 217: 35, # 'ู'
+ 218: 86, # 'ฺ'
+ 219: 240, # None
+ 220: 241, # None
+ 221: 242, # None
+ 222: 243, # None
+ 223: 244, # '฿'
+ 224: 11, # 'เ'
+ 225: 28, # 'แ'
+ 226: 41, # 'โ'
+ 227: 29, # 'ใ'
+ 228: 33, # 'ไ'
+ 229: 245, # 'ๅ'
+ 230: 50, # 'ๆ'
+ 231: 37, # '็'
+ 232: 6, # '่'
+ 233: 7, # '้'
+ 234: 67, # '๊'
+ 235: 77, # '๋'
+ 236: 38, # '์'
+ 237: 93, # 'ํ'
+ 238: 246, # '๎'
+ 239: 247, # '๏'
+ 240: 68, # '๐'
+ 241: 56, # '๑'
+ 242: 59, # '๒'
+ 243: 65, # '๓'
+ 244: 69, # '๔'
+ 245: 60, # '๕'
+ 246: 70, # '๖'
+ 247: 80, # '๗'
+ 248: 71, # '๘'
+ 249: 87, # '๙'
+ 250: 248, # '๚'
+ 251: 249, # '๛'
+ 252: 250, # None
+ 253: 251, # None
+ 254: 252, # None
+ 255: 253, # None
+}
+
+TIS_620_THAI_MODEL = SingleByteCharSetModel(
+ charset_name="TIS-620",
+ language="Thai",
+ char_to_order_map=TIS_620_THAI_CHAR_TO_ORDER,
+ language_model=THAI_LANG_MODEL,
+ typical_positive_ratio=0.926386,
+ keep_ascii_letters=False,
+ alphabet="กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛",
+)
diff --git a/third_party/python/pip/pip/_vendor/chardet/langturkishmodel.py b/third_party/python/pip/pip/_vendor/chardet/langturkishmodel.py
new file mode 100644
index 0000000000..291857c25c
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/langturkishmodel.py
@@ -0,0 +1,4380 @@
+from pip._vendor.chardet.sbcharsetprober import SingleByteCharSetModel
+
+# 3: Positive
+# 2: Likely
+# 1: Unlikely
+# 0: Negative
+
+TURKISH_LANG_MODEL = {
+ 23: { # 'A'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 0, # 'c'
+ 12: 2, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 1, # 'g'
+ 25: 1, # 'h'
+ 3: 1, # 'i'
+ 24: 0, # 'j'
+ 10: 2, # 'k'
+ 5: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 1, # 'r'
+ 8: 1, # 's'
+ 9: 1, # 't'
+ 14: 1, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 3, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 0, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 37: { # 'B'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 2, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 2, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 1, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 1, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 0, # 'Z'
+ 1: 2, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 0, # 'k'
+ 5: 0, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 1, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 1, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 0, # 'ı'
+ 40: 1, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 47: { # 'C'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 1, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 1, # 'L'
+ 20: 0, # 'M'
+ 46: 1, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 1, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 2, # 'j'
+ 10: 1, # 'k'
+ 5: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 2, # 'n'
+ 15: 1, # 'o'
+ 26: 0, # 'p'
+ 7: 2, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 1, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 39: { # 'D'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 1, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 1, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 2, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 0, # 'k'
+ 5: 1, # 'l'
+ 13: 3, # 'm'
+ 4: 0, # 'n'
+ 15: 1, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 1, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 1, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 1, # 'ı'
+ 40: 1, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 29: { # 'E'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 1, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 0, # 'c'
+ 12: 2, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 1, # 'g'
+ 25: 0, # 'h'
+ 3: 1, # 'i'
+ 24: 1, # 'j'
+ 10: 0, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 1, # 's'
+ 9: 1, # 't'
+ 14: 1, # 'u'
+ 32: 1, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 2, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 52: { # 'F'
+ 23: 0, # 'A'
+ 37: 1, # 'B'
+ 47: 1, # 'C'
+ 39: 1, # 'D'
+ 29: 1, # 'E'
+ 52: 2, # 'F'
+ 36: 0, # 'G'
+ 45: 2, # 'H'
+ 53: 1, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 1, # 'N'
+ 42: 1, # 'O'
+ 48: 2, # 'P'
+ 44: 1, # 'R'
+ 35: 1, # 'S'
+ 31: 1, # 'T'
+ 51: 1, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 2, # 'Y'
+ 56: 0, # 'Z'
+ 1: 0, # 'a'
+ 21: 1, # 'b'
+ 28: 1, # 'c'
+ 12: 1, # 'd'
+ 2: 0, # 'e'
+ 18: 1, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 2, # 'i'
+ 24: 1, # 'j'
+ 10: 0, # 'k'
+ 5: 0, # 'l'
+ 13: 1, # 'm'
+ 4: 2, # 'n'
+ 15: 1, # 'o'
+ 26: 0, # 'p'
+ 7: 2, # 'r'
+ 8: 1, # 's'
+ 9: 1, # 't'
+ 14: 1, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 1, # 'y'
+ 22: 1, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 1, # 'Ö'
+ 55: 2, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 2, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 2, # 'ş'
+ },
+ 36: { # 'G'
+ 23: 1, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 1, # 'F'
+ 36: 2, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 2, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 2, # 'N'
+ 42: 1, # 'O'
+ 48: 1, # 'P'
+ 44: 1, # 'R'
+ 35: 1, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 2, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 1, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 1, # 'j'
+ 10: 1, # 'k'
+ 5: 0, # 'l'
+ 13: 3, # 'm'
+ 4: 2, # 'n'
+ 15: 0, # 'o'
+ 26: 1, # 'p'
+ 7: 0, # 'r'
+ 8: 1, # 's'
+ 9: 1, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 1, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 2, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 1, # 'â'
+ 33: 2, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 2, # 'ı'
+ 40: 2, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 45: { # 'H'
+ 23: 0, # 'A'
+ 37: 1, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 2, # 'F'
+ 36: 2, # 'G'
+ 45: 1, # 'H'
+ 53: 1, # 'I'
+ 60: 0, # 'J'
+ 16: 2, # 'K'
+ 49: 1, # 'L'
+ 20: 0, # 'M'
+ 46: 1, # 'N'
+ 42: 1, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 2, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 2, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 2, # 'i'
+ 24: 0, # 'j'
+ 10: 1, # 'k'
+ 5: 0, # 'l'
+ 13: 2, # 'm'
+ 4: 0, # 'n'
+ 15: 1, # 'o'
+ 26: 1, # 'p'
+ 7: 1, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 1, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 2, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 0, # 'ı'
+ 40: 2, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 53: { # 'I'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 1, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 2, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 2, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 0, # 'k'
+ 5: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 0, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 2, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 0, # 'ı'
+ 40: 1, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 60: { # 'J'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 0, # 'a'
+ 21: 1, # 'b'
+ 28: 0, # 'c'
+ 12: 1, # 'd'
+ 2: 0, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 1, # 'i'
+ 24: 0, # 'j'
+ 10: 0, # 'k'
+ 5: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 1, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 1, # 's'
+ 9: 0, # 't'
+ 14: 0, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 0, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 16: { # 'K'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 3, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 2, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 2, # 'a'
+ 21: 3, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 1, # 'e'
+ 18: 3, # 'f'
+ 27: 3, # 'g'
+ 25: 3, # 'h'
+ 3: 3, # 'i'
+ 24: 2, # 'j'
+ 10: 3, # 'k'
+ 5: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 1, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 0, # 'u'
+ 32: 3, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 2, # 'y'
+ 22: 1, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 2, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 49: { # 'L'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 2, # 'E'
+ 52: 0, # 'F'
+ 36: 1, # 'G'
+ 45: 1, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 2, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 0, # 'Z'
+ 1: 0, # 'a'
+ 21: 3, # 'b'
+ 28: 0, # 'c'
+ 12: 2, # 'd'
+ 2: 0, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 2, # 'i'
+ 24: 0, # 'j'
+ 10: 1, # 'k'
+ 5: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 2, # 'n'
+ 15: 1, # 'o'
+ 26: 1, # 'p'
+ 7: 1, # 'r'
+ 8: 1, # 's'
+ 9: 1, # 't'
+ 14: 0, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 2, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 2, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 1, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 20: { # 'M'
+ 23: 1, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 1, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 1, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 2, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 1, # 'g'
+ 25: 1, # 'h'
+ 3: 2, # 'i'
+ 24: 2, # 'j'
+ 10: 2, # 'k'
+ 5: 2, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 1, # 'p'
+ 7: 3, # 'r'
+ 8: 0, # 's'
+ 9: 2, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 2, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 3, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 46: { # 'N'
+ 23: 0, # 'A'
+ 37: 1, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 1, # 'F'
+ 36: 1, # 'G'
+ 45: 1, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 2, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 1, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 1, # 'R'
+ 35: 1, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 2, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 1, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 2, # 'j'
+ 10: 1, # 'k'
+ 5: 1, # 'l'
+ 13: 3, # 'm'
+ 4: 2, # 'n'
+ 15: 1, # 'o'
+ 26: 1, # 'p'
+ 7: 1, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 1, # 'x'
+ 11: 1, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 1, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 2, # 'ı'
+ 40: 1, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 42: { # 'O'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 1, # 'F'
+ 36: 0, # 'G'
+ 45: 1, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 2, # 'K'
+ 49: 1, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 2, # 'P'
+ 44: 1, # 'R'
+ 35: 1, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 0, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 0, # 'n'
+ 15: 1, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 2, # 'Ç'
+ 50: 1, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 2, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 2, # 'İ'
+ 6: 1, # 'ı'
+ 40: 1, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 48: { # 'P'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 2, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 2, # 'F'
+ 36: 1, # 'G'
+ 45: 1, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 2, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 1, # 'N'
+ 42: 1, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 1, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 2, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 1, # 'k'
+ 5: 0, # 'l'
+ 13: 2, # 'm'
+ 4: 0, # 'n'
+ 15: 2, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 2, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 2, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 2, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 0, # 'ı'
+ 40: 2, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 44: { # 'R'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 1, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 1, # 'b'
+ 28: 1, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 1, # 'k'
+ 5: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 0, # 'n'
+ 15: 1, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 1, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 1, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 1, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 1, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 35: { # 'S'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 1, # 'F'
+ 36: 1, # 'G'
+ 45: 1, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 1, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 1, # 'k'
+ 5: 1, # 'l'
+ 13: 2, # 'm'
+ 4: 1, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 1, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 1, # 'z'
+ 63: 0, # '·'
+ 54: 2, # 'Ç'
+ 50: 2, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 3, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 2, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 31: { # 'T'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 1, # 'J'
+ 16: 2, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 2, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 2, # 'b'
+ 28: 0, # 'c'
+ 12: 1, # 'd'
+ 2: 3, # 'e'
+ 18: 2, # 'f'
+ 27: 2, # 'g'
+ 25: 0, # 'h'
+ 3: 1, # 'i'
+ 24: 1, # 'j'
+ 10: 2, # 'k'
+ 5: 2, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 2, # 'p'
+ 7: 2, # 'r'
+ 8: 0, # 's'
+ 9: 2, # 't'
+ 14: 2, # 'u'
+ 32: 1, # 'v'
+ 57: 1, # 'w'
+ 58: 1, # 'x'
+ 11: 2, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 1, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 51: { # 'U'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 1, # 'F'
+ 36: 1, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 1, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 1, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 1, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 2, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 1, # 'k'
+ 5: 1, # 'l'
+ 13: 3, # 'm'
+ 4: 2, # 'n'
+ 15: 0, # 'o'
+ 26: 1, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 1, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 38: { # 'V'
+ 23: 1, # 'A'
+ 37: 1, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 2, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 1, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 0, # 'k'
+ 5: 2, # 'l'
+ 13: 2, # 'm'
+ 4: 0, # 'n'
+ 15: 2, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 1, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 1, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 1, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 1, # 'â'
+ 33: 2, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 3, # 'ı'
+ 40: 2, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 62: { # 'W'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 0, # 'a'
+ 21: 0, # 'b'
+ 28: 0, # 'c'
+ 12: 0, # 'd'
+ 2: 0, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 0, # 'k'
+ 5: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 0, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 0, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 0, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 43: { # 'Y'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 2, # 'F'
+ 36: 0, # 'G'
+ 45: 1, # 'H'
+ 53: 1, # 'I'
+ 60: 0, # 'J'
+ 16: 2, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 2, # 'N'
+ 42: 0, # 'O'
+ 48: 2, # 'P'
+ 44: 1, # 'R'
+ 35: 1, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 2, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 1, # 'j'
+ 10: 1, # 'k'
+ 5: 1, # 'l'
+ 13: 3, # 'm'
+ 4: 0, # 'n'
+ 15: 2, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 1, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 2, # 'Ö'
+ 55: 1, # 'Ü'
+ 59: 1, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 0, # 'ı'
+ 40: 2, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 56: { # 'Z'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 2, # 'Z'
+ 1: 2, # 'a'
+ 21: 1, # 'b'
+ 28: 0, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 2, # 'i'
+ 24: 1, # 'j'
+ 10: 0, # 'k'
+ 5: 0, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 1, # 'r'
+ 8: 1, # 's'
+ 9: 0, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 1, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 1, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 1: { # 'a'
+ 23: 3, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 3, # 'E'
+ 52: 0, # 'F'
+ 36: 1, # 'G'
+ 45: 1, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 1, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 3, # 'T'
+ 51: 0, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 2, # 'Z'
+ 1: 2, # 'a'
+ 21: 3, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 2, # 'e'
+ 18: 3, # 'f'
+ 27: 3, # 'g'
+ 25: 3, # 'h'
+ 3: 3, # 'i'
+ 24: 3, # 'j'
+ 10: 3, # 'k'
+ 5: 0, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 15: 1, # 'o'
+ 26: 3, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 3, # 'v'
+ 57: 2, # 'w'
+ 58: 0, # 'x'
+ 11: 3, # 'y'
+ 22: 0, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 1, # 'î'
+ 34: 1, # 'ö'
+ 17: 3, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 21: { # 'b'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 1, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 1, # 'J'
+ 16: 2, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 1, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 2, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 3, # 'g'
+ 25: 1, # 'h'
+ 3: 3, # 'i'
+ 24: 2, # 'j'
+ 10: 3, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 3, # 'p'
+ 7: 1, # 'r'
+ 8: 2, # 's'
+ 9: 2, # 't'
+ 14: 2, # 'u'
+ 32: 1, # 'v'
+ 57: 0, # 'w'
+ 58: 1, # 'x'
+ 11: 3, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 28: { # 'c'
+ 23: 0, # 'A'
+ 37: 1, # 'B'
+ 47: 1, # 'C'
+ 39: 1, # 'D'
+ 29: 2, # 'E'
+ 52: 0, # 'F'
+ 36: 2, # 'G'
+ 45: 2, # 'H'
+ 53: 1, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 1, # 'N'
+ 42: 1, # 'O'
+ 48: 2, # 'P'
+ 44: 1, # 'R'
+ 35: 1, # 'S'
+ 31: 2, # 'T'
+ 51: 2, # 'U'
+ 38: 2, # 'V'
+ 62: 0, # 'W'
+ 43: 3, # 'Y'
+ 56: 0, # 'Z'
+ 1: 1, # 'a'
+ 21: 1, # 'b'
+ 28: 2, # 'c'
+ 12: 2, # 'd'
+ 2: 1, # 'e'
+ 18: 1, # 'f'
+ 27: 2, # 'g'
+ 25: 2, # 'h'
+ 3: 3, # 'i'
+ 24: 1, # 'j'
+ 10: 3, # 'k'
+ 5: 0, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 15: 2, # 'o'
+ 26: 2, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 1, # 'u'
+ 32: 0, # 'v'
+ 57: 1, # 'w'
+ 58: 0, # 'x'
+ 11: 2, # 'y'
+ 22: 1, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 1, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 1, # 'î'
+ 34: 2, # 'ö'
+ 17: 2, # 'ü'
+ 30: 2, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 2, # 'ş'
+ },
+ 12: { # 'd'
+ 23: 1, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 2, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 1, # 'S'
+ 31: 1, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 2, # 'b'
+ 28: 1, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 1, # 'f'
+ 27: 3, # 'g'
+ 25: 3, # 'h'
+ 3: 2, # 'i'
+ 24: 3, # 'j'
+ 10: 2, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 1, # 'o'
+ 26: 2, # 'p'
+ 7: 3, # 'r'
+ 8: 2, # 's'
+ 9: 2, # 't'
+ 14: 3, # 'u'
+ 32: 1, # 'v'
+ 57: 0, # 'w'
+ 58: 1, # 'x'
+ 11: 3, # 'y'
+ 22: 1, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 1, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 2: { # 'e'
+ 23: 2, # 'A'
+ 37: 0, # 'B'
+ 47: 2, # 'C'
+ 39: 0, # 'D'
+ 29: 3, # 'E'
+ 52: 1, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 1, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 1, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 1, # 'R'
+ 35: 0, # 'S'
+ 31: 3, # 'T'
+ 51: 0, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 3, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 2, # 'e'
+ 18: 3, # 'f'
+ 27: 3, # 'g'
+ 25: 3, # 'h'
+ 3: 3, # 'i'
+ 24: 3, # 'j'
+ 10: 3, # 'k'
+ 5: 0, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 15: 1, # 'o'
+ 26: 3, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 3, # 'v'
+ 57: 2, # 'w'
+ 58: 0, # 'x'
+ 11: 3, # 'y'
+ 22: 1, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 3, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 18: { # 'f'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 2, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 2, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 1, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 2, # 'f'
+ 27: 1, # 'g'
+ 25: 1, # 'h'
+ 3: 1, # 'i'
+ 24: 1, # 'j'
+ 10: 1, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 2, # 'p'
+ 7: 1, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 1, # 'u'
+ 32: 2, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 1, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 1, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 1, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 27: { # 'g'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 1, # 'S'
+ 31: 1, # 'T'
+ 51: 0, # 'U'
+ 38: 2, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 1, # 'b'
+ 28: 0, # 'c'
+ 12: 1, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 2, # 'g'
+ 25: 1, # 'h'
+ 3: 2, # 'i'
+ 24: 3, # 'j'
+ 10: 2, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 2, # 'n'
+ 15: 0, # 'o'
+ 26: 1, # 'p'
+ 7: 2, # 'r'
+ 8: 2, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 1, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 1, # 'y'
+ 22: 0, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 25: { # 'h'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 2, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 0, # 'c'
+ 12: 2, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 1, # 'g'
+ 25: 2, # 'h'
+ 3: 2, # 'i'
+ 24: 3, # 'j'
+ 10: 3, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 1, # 'o'
+ 26: 1, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 2, # 't'
+ 14: 3, # 'u'
+ 32: 2, # 'v'
+ 57: 1, # 'w'
+ 58: 0, # 'x'
+ 11: 1, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 3: { # 'i'
+ 23: 2, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 1, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 0, # 'N'
+ 42: 1, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 1, # 'S'
+ 31: 2, # 'T'
+ 51: 0, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 2, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 2, # 'f'
+ 27: 3, # 'g'
+ 25: 1, # 'h'
+ 3: 3, # 'i'
+ 24: 2, # 'j'
+ 10: 3, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 1, # 'o'
+ 26: 3, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 2, # 'v'
+ 57: 1, # 'w'
+ 58: 1, # 'x'
+ 11: 3, # 'y'
+ 22: 1, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 1, # 'Ü'
+ 59: 0, # 'â'
+ 33: 2, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 3, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 24: { # 'j'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 1, # 'J'
+ 16: 2, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 1, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 1, # 'Z'
+ 1: 3, # 'a'
+ 21: 1, # 'b'
+ 28: 1, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 2, # 'f'
+ 27: 1, # 'g'
+ 25: 1, # 'h'
+ 3: 2, # 'i'
+ 24: 1, # 'j'
+ 10: 2, # 'k'
+ 5: 2, # 'l'
+ 13: 3, # 'm'
+ 4: 2, # 'n'
+ 15: 0, # 'o'
+ 26: 1, # 'p'
+ 7: 2, # 'r'
+ 8: 3, # 's'
+ 9: 2, # 't'
+ 14: 3, # 'u'
+ 32: 2, # 'v'
+ 57: 0, # 'w'
+ 58: 2, # 'x'
+ 11: 1, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 1, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 10: { # 'k'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 3, # 'T'
+ 51: 0, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 1, # 'Z'
+ 1: 3, # 'a'
+ 21: 2, # 'b'
+ 28: 0, # 'c'
+ 12: 2, # 'd'
+ 2: 3, # 'e'
+ 18: 1, # 'f'
+ 27: 2, # 'g'
+ 25: 2, # 'h'
+ 3: 3, # 'i'
+ 24: 2, # 'j'
+ 10: 2, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 3, # 'p'
+ 7: 2, # 'r'
+ 8: 2, # 's'
+ 9: 2, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 1, # 'x'
+ 11: 3, # 'y'
+ 22: 0, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 3, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 3, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 5: { # 'l'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 3, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 1, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 0, # 'a'
+ 21: 3, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 1, # 'e'
+ 18: 3, # 'f'
+ 27: 3, # 'g'
+ 25: 2, # 'h'
+ 3: 3, # 'i'
+ 24: 2, # 'j'
+ 10: 3, # 'k'
+ 5: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 2, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 2, # 'u'
+ 32: 2, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 3, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 2, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 13: { # 'm'
+ 23: 1, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 3, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 3, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 0, # 'Z'
+ 1: 2, # 'a'
+ 21: 3, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 2, # 'e'
+ 18: 3, # 'f'
+ 27: 3, # 'g'
+ 25: 3, # 'h'
+ 3: 3, # 'i'
+ 24: 3, # 'j'
+ 10: 3, # 'k'
+ 5: 0, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 15: 1, # 'o'
+ 26: 2, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 2, # 'u'
+ 32: 2, # 'v'
+ 57: 1, # 'w'
+ 58: 0, # 'x'
+ 11: 3, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 3, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 4: { # 'n'
+ 23: 1, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 1, # 'H'
+ 53: 0, # 'I'
+ 60: 2, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 2, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 2, # 'b'
+ 28: 1, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 1, # 'f'
+ 27: 2, # 'g'
+ 25: 3, # 'h'
+ 3: 2, # 'i'
+ 24: 2, # 'j'
+ 10: 3, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 1, # 'o'
+ 26: 3, # 'p'
+ 7: 2, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 2, # 'v'
+ 57: 0, # 'w'
+ 58: 2, # 'x'
+ 11: 3, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 2, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 1, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 15: { # 'o'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 2, # 'F'
+ 36: 1, # 'G'
+ 45: 1, # 'H'
+ 53: 1, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 2, # 'L'
+ 20: 0, # 'M'
+ 46: 2, # 'N'
+ 42: 1, # 'O'
+ 48: 2, # 'P'
+ 44: 1, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 1, # 'i'
+ 24: 2, # 'j'
+ 10: 1, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 2, # 'n'
+ 15: 2, # 'o'
+ 26: 0, # 'p'
+ 7: 1, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 2, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 2, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 3, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 2, # 'ğ'
+ 41: 2, # 'İ'
+ 6: 3, # 'ı'
+ 40: 2, # 'Ş'
+ 19: 2, # 'ş'
+ },
+ 26: { # 'p'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 1, # 'b'
+ 28: 0, # 'c'
+ 12: 1, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 1, # 'g'
+ 25: 1, # 'h'
+ 3: 2, # 'i'
+ 24: 3, # 'j'
+ 10: 1, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 2, # 'n'
+ 15: 0, # 'o'
+ 26: 2, # 'p'
+ 7: 2, # 'r'
+ 8: 1, # 's'
+ 9: 1, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 1, # 'x'
+ 11: 1, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 3, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 1, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 7: { # 'r'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 1, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 2, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 2, # 'T'
+ 51: 1, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 1, # 'Z'
+ 1: 3, # 'a'
+ 21: 1, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 2, # 'g'
+ 25: 3, # 'h'
+ 3: 2, # 'i'
+ 24: 2, # 'j'
+ 10: 3, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 2, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 2, # 'v'
+ 57: 0, # 'w'
+ 58: 1, # 'x'
+ 11: 2, # 'y'
+ 22: 0, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 2, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 3, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 8: { # 's'
+ 23: 1, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 1, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 2, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 1, # 'Z'
+ 1: 3, # 'a'
+ 21: 2, # 'b'
+ 28: 1, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 2, # 'g'
+ 25: 2, # 'h'
+ 3: 2, # 'i'
+ 24: 3, # 'j'
+ 10: 3, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 3, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 2, # 'v'
+ 57: 0, # 'w'
+ 58: 1, # 'x'
+ 11: 2, # 'y'
+ 22: 1, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 2, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 2, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 9: { # 't'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 1, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 2, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 1, # 'Z'
+ 1: 3, # 'a'
+ 21: 3, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 2, # 'f'
+ 27: 2, # 'g'
+ 25: 2, # 'h'
+ 3: 2, # 'i'
+ 24: 2, # 'j'
+ 10: 3, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 2, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 3, # 'v'
+ 57: 0, # 'w'
+ 58: 2, # 'x'
+ 11: 2, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 3, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 2, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 14: { # 'u'
+ 23: 3, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 3, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 1, # 'H'
+ 53: 0, # 'I'
+ 60: 1, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 2, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 3, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 2, # 'Z'
+ 1: 2, # 'a'
+ 21: 3, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 2, # 'e'
+ 18: 2, # 'f'
+ 27: 3, # 'g'
+ 25: 3, # 'h'
+ 3: 3, # 'i'
+ 24: 2, # 'j'
+ 10: 3, # 'k'
+ 5: 0, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 3, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 2, # 'v'
+ 57: 2, # 'w'
+ 58: 0, # 'x'
+ 11: 3, # 'y'
+ 22: 0, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 3, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 32: { # 'v'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 0, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 1, # 'j'
+ 10: 1, # 'k'
+ 5: 3, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 1, # 'p'
+ 7: 1, # 'r'
+ 8: 2, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 1, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 2, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 1, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 57: { # 'w'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 1, # 'a'
+ 21: 0, # 'b'
+ 28: 0, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 1, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 1, # 'k'
+ 5: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 1, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 1, # 's'
+ 9: 0, # 't'
+ 14: 1, # 'u'
+ 32: 0, # 'v'
+ 57: 2, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 0, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 1, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 0, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 58: { # 'x'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 1, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 1, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 0, # 'a'
+ 21: 1, # 'b'
+ 28: 0, # 'c'
+ 12: 2, # 'd'
+ 2: 1, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 2, # 'i'
+ 24: 2, # 'j'
+ 10: 1, # 'k'
+ 5: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 2, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 1, # 'r'
+ 8: 2, # 's'
+ 9: 1, # 't'
+ 14: 0, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 2, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 1, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 11: { # 'y'
+ 23: 1, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 1, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 1, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 1, # 'Z'
+ 1: 3, # 'a'
+ 21: 1, # 'b'
+ 28: 0, # 'c'
+ 12: 2, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 2, # 'g'
+ 25: 2, # 'h'
+ 3: 2, # 'i'
+ 24: 1, # 'j'
+ 10: 2, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 1, # 'p'
+ 7: 2, # 'r'
+ 8: 1, # 's'
+ 9: 2, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 1, # 'x'
+ 11: 3, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 3, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 2, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 22: { # 'z'
+ 23: 2, # 'A'
+ 37: 2, # 'B'
+ 47: 1, # 'C'
+ 39: 2, # 'D'
+ 29: 3, # 'E'
+ 52: 1, # 'F'
+ 36: 2, # 'G'
+ 45: 2, # 'H'
+ 53: 1, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 2, # 'N'
+ 42: 2, # 'O'
+ 48: 2, # 'P'
+ 44: 1, # 'R'
+ 35: 1, # 'S'
+ 31: 3, # 'T'
+ 51: 2, # 'U'
+ 38: 2, # 'V'
+ 62: 0, # 'W'
+ 43: 2, # 'Y'
+ 56: 1, # 'Z'
+ 1: 1, # 'a'
+ 21: 2, # 'b'
+ 28: 1, # 'c'
+ 12: 2, # 'd'
+ 2: 2, # 'e'
+ 18: 3, # 'f'
+ 27: 2, # 'g'
+ 25: 2, # 'h'
+ 3: 3, # 'i'
+ 24: 2, # 'j'
+ 10: 3, # 'k'
+ 5: 0, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 15: 2, # 'o'
+ 26: 2, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 0, # 'u'
+ 32: 2, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 3, # 'y'
+ 22: 2, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 2, # 'Ü'
+ 59: 1, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 2, # 'ö'
+ 17: 2, # 'ü'
+ 30: 2, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 3, # 'ı'
+ 40: 1, # 'Ş'
+ 19: 2, # 'ş'
+ },
+ 63: { # '·'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 0, # 'a'
+ 21: 0, # 'b'
+ 28: 0, # 'c'
+ 12: 0, # 'd'
+ 2: 1, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 0, # 'k'
+ 5: 0, # 'l'
+ 13: 2, # 'm'
+ 4: 0, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 0, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 54: { # 'Ç'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 1, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 1, # 'G'
+ 45: 1, # 'H'
+ 53: 1, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 1, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 2, # 'Y'
+ 56: 0, # 'Z'
+ 1: 0, # 'a'
+ 21: 1, # 'b'
+ 28: 0, # 'c'
+ 12: 1, # 'd'
+ 2: 0, # 'e'
+ 18: 0, # 'f'
+ 27: 1, # 'g'
+ 25: 0, # 'h'
+ 3: 3, # 'i'
+ 24: 0, # 'j'
+ 10: 1, # 'k'
+ 5: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 2, # 'n'
+ 15: 1, # 'o'
+ 26: 0, # 'p'
+ 7: 2, # 'r'
+ 8: 0, # 's'
+ 9: 1, # 't'
+ 14: 0, # 'u'
+ 32: 2, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 2, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 50: { # 'Ö'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 1, # 'D'
+ 29: 2, # 'E'
+ 52: 0, # 'F'
+ 36: 1, # 'G'
+ 45: 2, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 1, # 'N'
+ 42: 2, # 'O'
+ 48: 2, # 'P'
+ 44: 1, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 2, # 'Y'
+ 56: 0, # 'Z'
+ 1: 0, # 'a'
+ 21: 2, # 'b'
+ 28: 1, # 'c'
+ 12: 2, # 'd'
+ 2: 0, # 'e'
+ 18: 1, # 'f'
+ 27: 1, # 'g'
+ 25: 1, # 'h'
+ 3: 2, # 'i'
+ 24: 0, # 'j'
+ 10: 2, # 'k'
+ 5: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 3, # 'n'
+ 15: 2, # 'o'
+ 26: 2, # 'p'
+ 7: 3, # 'r'
+ 8: 1, # 's'
+ 9: 2, # 't'
+ 14: 0, # 'u'
+ 32: 1, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 1, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 2, # 'ö'
+ 17: 2, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 55: { # 'Ü'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 2, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 1, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 1, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 2, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 1, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 0, # 'k'
+ 5: 1, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 1, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 1, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 1, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 0, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 59: { # 'â'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 1, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 1, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 2, # 'a'
+ 21: 0, # 'b'
+ 28: 0, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 0, # 'j'
+ 10: 0, # 'k'
+ 5: 0, # 'l'
+ 13: 2, # 'm'
+ 4: 0, # 'n'
+ 15: 1, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 2, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 1, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 1, # 'ı'
+ 40: 1, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 33: { # 'ç'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 3, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 2, # 'T'
+ 51: 0, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 0, # 'Z'
+ 1: 0, # 'a'
+ 21: 3, # 'b'
+ 28: 0, # 'c'
+ 12: 2, # 'd'
+ 2: 0, # 'e'
+ 18: 2, # 'f'
+ 27: 1, # 'g'
+ 25: 3, # 'h'
+ 3: 3, # 'i'
+ 24: 0, # 'j'
+ 10: 3, # 'k'
+ 5: 0, # 'l'
+ 13: 0, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 1, # 'p'
+ 7: 3, # 'r'
+ 8: 2, # 's'
+ 9: 3, # 't'
+ 14: 0, # 'u'
+ 32: 2, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 2, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 1, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 61: { # 'î'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 0, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 0, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 1, # 'Z'
+ 1: 2, # 'a'
+ 21: 0, # 'b'
+ 28: 0, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 1, # 'j'
+ 10: 0, # 'k'
+ 5: 0, # 'l'
+ 13: 1, # 'm'
+ 4: 1, # 'n'
+ 15: 0, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 1, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 1, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 1, # 'î'
+ 34: 0, # 'ö'
+ 17: 0, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 1, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 34: { # 'ö'
+ 23: 0, # 'A'
+ 37: 1, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 2, # 'F'
+ 36: 1, # 'G'
+ 45: 1, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 1, # 'L'
+ 20: 0, # 'M'
+ 46: 1, # 'N'
+ 42: 1, # 'O'
+ 48: 2, # 'P'
+ 44: 1, # 'R'
+ 35: 1, # 'S'
+ 31: 1, # 'T'
+ 51: 1, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 1, # 'Z'
+ 1: 3, # 'a'
+ 21: 1, # 'b'
+ 28: 2, # 'c'
+ 12: 1, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 2, # 'g'
+ 25: 2, # 'h'
+ 3: 1, # 'i'
+ 24: 2, # 'j'
+ 10: 1, # 'k'
+ 5: 2, # 'l'
+ 13: 3, # 'm'
+ 4: 2, # 'n'
+ 15: 2, # 'o'
+ 26: 0, # 'p'
+ 7: 0, # 'r'
+ 8: 3, # 's'
+ 9: 1, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 1, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 2, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 2, # 'ç'
+ 61: 0, # 'î'
+ 34: 2, # 'ö'
+ 17: 0, # 'ü'
+ 30: 2, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 1, # 'ı'
+ 40: 2, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 17: { # 'ü'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 0, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 1, # 'J'
+ 16: 1, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 0, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 1, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 0, # 'Y'
+ 56: 1, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 0, # 'c'
+ 12: 1, # 'd'
+ 2: 3, # 'e'
+ 18: 1, # 'f'
+ 27: 2, # 'g'
+ 25: 0, # 'h'
+ 3: 1, # 'i'
+ 24: 1, # 'j'
+ 10: 2, # 'k'
+ 5: 3, # 'l'
+ 13: 2, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 2, # 'p'
+ 7: 2, # 'r'
+ 8: 3, # 's'
+ 9: 2, # 't'
+ 14: 3, # 'u'
+ 32: 1, # 'v'
+ 57: 1, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 2, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 30: { # 'ğ'
+ 23: 0, # 'A'
+ 37: 2, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 2, # 'F'
+ 36: 1, # 'G'
+ 45: 0, # 'H'
+ 53: 1, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 1, # 'M'
+ 46: 2, # 'N'
+ 42: 2, # 'O'
+ 48: 1, # 'P'
+ 44: 1, # 'R'
+ 35: 0, # 'S'
+ 31: 1, # 'T'
+ 51: 0, # 'U'
+ 38: 2, # 'V'
+ 62: 0, # 'W'
+ 43: 2, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 0, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 2, # 'e'
+ 18: 0, # 'f'
+ 27: 0, # 'g'
+ 25: 0, # 'h'
+ 3: 0, # 'i'
+ 24: 3, # 'j'
+ 10: 1, # 'k'
+ 5: 2, # 'l'
+ 13: 3, # 'm'
+ 4: 0, # 'n'
+ 15: 1, # 'o'
+ 26: 0, # 'p'
+ 7: 1, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 2, # 'Ç'
+ 50: 2, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 0, # 'î'
+ 34: 2, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 2, # 'İ'
+ 6: 2, # 'ı'
+ 40: 2, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 41: { # 'İ'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 1, # 'D'
+ 29: 1, # 'E'
+ 52: 0, # 'F'
+ 36: 2, # 'G'
+ 45: 2, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 1, # 'N'
+ 42: 1, # 'O'
+ 48: 2, # 'P'
+ 44: 0, # 'R'
+ 35: 1, # 'S'
+ 31: 1, # 'T'
+ 51: 1, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 2, # 'Y'
+ 56: 0, # 'Z'
+ 1: 1, # 'a'
+ 21: 2, # 'b'
+ 28: 1, # 'c'
+ 12: 2, # 'd'
+ 2: 1, # 'e'
+ 18: 0, # 'f'
+ 27: 3, # 'g'
+ 25: 2, # 'h'
+ 3: 2, # 'i'
+ 24: 2, # 'j'
+ 10: 2, # 'k'
+ 5: 0, # 'l'
+ 13: 1, # 'm'
+ 4: 3, # 'n'
+ 15: 1, # 'o'
+ 26: 1, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 2, # 't'
+ 14: 0, # 'u'
+ 32: 0, # 'v'
+ 57: 1, # 'w'
+ 58: 0, # 'x'
+ 11: 2, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 1, # 'Ü'
+ 59: 1, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 1, # 'ö'
+ 17: 1, # 'ü'
+ 30: 2, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 1, # 'ş'
+ },
+ 6: { # 'ı'
+ 23: 2, # 'A'
+ 37: 0, # 'B'
+ 47: 0, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 0, # 'F'
+ 36: 1, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 2, # 'J'
+ 16: 3, # 'K'
+ 49: 0, # 'L'
+ 20: 3, # 'M'
+ 46: 1, # 'N'
+ 42: 0, # 'O'
+ 48: 0, # 'P'
+ 44: 0, # 'R'
+ 35: 0, # 'S'
+ 31: 2, # 'T'
+ 51: 0, # 'U'
+ 38: 0, # 'V'
+ 62: 0, # 'W'
+ 43: 2, # 'Y'
+ 56: 1, # 'Z'
+ 1: 3, # 'a'
+ 21: 2, # 'b'
+ 28: 1, # 'c'
+ 12: 3, # 'd'
+ 2: 3, # 'e'
+ 18: 3, # 'f'
+ 27: 3, # 'g'
+ 25: 2, # 'h'
+ 3: 3, # 'i'
+ 24: 3, # 'j'
+ 10: 3, # 'k'
+ 5: 3, # 'l'
+ 13: 3, # 'm'
+ 4: 3, # 'n'
+ 15: 0, # 'o'
+ 26: 3, # 'p'
+ 7: 3, # 'r'
+ 8: 3, # 's'
+ 9: 3, # 't'
+ 14: 3, # 'u'
+ 32: 3, # 'v'
+ 57: 1, # 'w'
+ 58: 1, # 'x'
+ 11: 3, # 'y'
+ 22: 0, # 'z'
+ 63: 1, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 2, # 'ç'
+ 61: 0, # 'î'
+ 34: 0, # 'ö'
+ 17: 3, # 'ü'
+ 30: 0, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 3, # 'ı'
+ 40: 0, # 'Ş'
+ 19: 0, # 'ş'
+ },
+ 40: { # 'Ş'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 1, # 'D'
+ 29: 1, # 'E'
+ 52: 0, # 'F'
+ 36: 1, # 'G'
+ 45: 2, # 'H'
+ 53: 1, # 'I'
+ 60: 0, # 'J'
+ 16: 0, # 'K'
+ 49: 0, # 'L'
+ 20: 2, # 'M'
+ 46: 1, # 'N'
+ 42: 1, # 'O'
+ 48: 2, # 'P'
+ 44: 2, # 'R'
+ 35: 1, # 'S'
+ 31: 1, # 'T'
+ 51: 0, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 2, # 'Y'
+ 56: 1, # 'Z'
+ 1: 0, # 'a'
+ 21: 2, # 'b'
+ 28: 0, # 'c'
+ 12: 2, # 'd'
+ 2: 0, # 'e'
+ 18: 3, # 'f'
+ 27: 0, # 'g'
+ 25: 2, # 'h'
+ 3: 3, # 'i'
+ 24: 2, # 'j'
+ 10: 1, # 'k'
+ 5: 0, # 'l'
+ 13: 1, # 'm'
+ 4: 3, # 'n'
+ 15: 2, # 'o'
+ 26: 0, # 'p'
+ 7: 3, # 'r'
+ 8: 2, # 's'
+ 9: 2, # 't'
+ 14: 1, # 'u'
+ 32: 3, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 2, # 'y'
+ 22: 0, # 'z'
+ 63: 0, # '·'
+ 54: 0, # 'Ç'
+ 50: 0, # 'Ö'
+ 55: 1, # 'Ü'
+ 59: 0, # 'â'
+ 33: 0, # 'ç'
+ 61: 0, # 'î'
+ 34: 2, # 'ö'
+ 17: 1, # 'ü'
+ 30: 2, # 'ğ'
+ 41: 0, # 'İ'
+ 6: 2, # 'ı'
+ 40: 1, # 'Ş'
+ 19: 2, # 'ş'
+ },
+ 19: { # 'ş'
+ 23: 0, # 'A'
+ 37: 0, # 'B'
+ 47: 1, # 'C'
+ 39: 0, # 'D'
+ 29: 0, # 'E'
+ 52: 2, # 'F'
+ 36: 1, # 'G'
+ 45: 0, # 'H'
+ 53: 0, # 'I'
+ 60: 0, # 'J'
+ 16: 3, # 'K'
+ 49: 2, # 'L'
+ 20: 0, # 'M'
+ 46: 1, # 'N'
+ 42: 1, # 'O'
+ 48: 1, # 'P'
+ 44: 1, # 'R'
+ 35: 1, # 'S'
+ 31: 0, # 'T'
+ 51: 1, # 'U'
+ 38: 1, # 'V'
+ 62: 0, # 'W'
+ 43: 1, # 'Y'
+ 56: 0, # 'Z'
+ 1: 3, # 'a'
+ 21: 1, # 'b'
+ 28: 2, # 'c'
+ 12: 0, # 'd'
+ 2: 3, # 'e'
+ 18: 0, # 'f'
+ 27: 2, # 'g'
+ 25: 1, # 'h'
+ 3: 1, # 'i'
+ 24: 0, # 'j'
+ 10: 2, # 'k'
+ 5: 2, # 'l'
+ 13: 3, # 'm'
+ 4: 0, # 'n'
+ 15: 0, # 'o'
+ 26: 1, # 'p'
+ 7: 3, # 'r'
+ 8: 0, # 's'
+ 9: 0, # 't'
+ 14: 3, # 'u'
+ 32: 0, # 'v'
+ 57: 0, # 'w'
+ 58: 0, # 'x'
+ 11: 0, # 'y'
+ 22: 2, # 'z'
+ 63: 0, # '·'
+ 54: 1, # 'Ç'
+ 50: 2, # 'Ö'
+ 55: 0, # 'Ü'
+ 59: 0, # 'â'
+ 33: 1, # 'ç'
+ 61: 1, # 'î'
+ 34: 2, # 'ö'
+ 17: 0, # 'ü'
+ 30: 1, # 'ğ'
+ 41: 1, # 'İ'
+ 6: 1, # 'ı'
+ 40: 1, # 'Ş'
+ 19: 1, # 'ş'
+ },
+}
+
+# 255: Undefined characters that did not exist in training text
+# 254: Carriage/Return
+# 253: symbol (punctuation) that does not belong to word
+# 252: 0 - 9
+# 251: Control characters
+
+# Character Mapping Table(s):
+ISO_8859_9_TURKISH_CHAR_TO_ORDER = {
+ 0: 255, # '\x00'
+ 1: 255, # '\x01'
+ 2: 255, # '\x02'
+ 3: 255, # '\x03'
+ 4: 255, # '\x04'
+ 5: 255, # '\x05'
+ 6: 255, # '\x06'
+ 7: 255, # '\x07'
+ 8: 255, # '\x08'
+ 9: 255, # '\t'
+ 10: 255, # '\n'
+ 11: 255, # '\x0b'
+ 12: 255, # '\x0c'
+ 13: 255, # '\r'
+ 14: 255, # '\x0e'
+ 15: 255, # '\x0f'
+ 16: 255, # '\x10'
+ 17: 255, # '\x11'
+ 18: 255, # '\x12'
+ 19: 255, # '\x13'
+ 20: 255, # '\x14'
+ 21: 255, # '\x15'
+ 22: 255, # '\x16'
+ 23: 255, # '\x17'
+ 24: 255, # '\x18'
+ 25: 255, # '\x19'
+ 26: 255, # '\x1a'
+ 27: 255, # '\x1b'
+ 28: 255, # '\x1c'
+ 29: 255, # '\x1d'
+ 30: 255, # '\x1e'
+ 31: 255, # '\x1f'
+ 32: 255, # ' '
+ 33: 255, # '!'
+ 34: 255, # '"'
+ 35: 255, # '#'
+ 36: 255, # '$'
+ 37: 255, # '%'
+ 38: 255, # '&'
+ 39: 255, # "'"
+ 40: 255, # '('
+ 41: 255, # ')'
+ 42: 255, # '*'
+ 43: 255, # '+'
+ 44: 255, # ','
+ 45: 255, # '-'
+ 46: 255, # '.'
+ 47: 255, # '/'
+ 48: 255, # '0'
+ 49: 255, # '1'
+ 50: 255, # '2'
+ 51: 255, # '3'
+ 52: 255, # '4'
+ 53: 255, # '5'
+ 54: 255, # '6'
+ 55: 255, # '7'
+ 56: 255, # '8'
+ 57: 255, # '9'
+ 58: 255, # ':'
+ 59: 255, # ';'
+ 60: 255, # '<'
+ 61: 255, # '='
+ 62: 255, # '>'
+ 63: 255, # '?'
+ 64: 255, # '@'
+ 65: 23, # 'A'
+ 66: 37, # 'B'
+ 67: 47, # 'C'
+ 68: 39, # 'D'
+ 69: 29, # 'E'
+ 70: 52, # 'F'
+ 71: 36, # 'G'
+ 72: 45, # 'H'
+ 73: 53, # 'I'
+ 74: 60, # 'J'
+ 75: 16, # 'K'
+ 76: 49, # 'L'
+ 77: 20, # 'M'
+ 78: 46, # 'N'
+ 79: 42, # 'O'
+ 80: 48, # 'P'
+ 81: 69, # 'Q'
+ 82: 44, # 'R'
+ 83: 35, # 'S'
+ 84: 31, # 'T'
+ 85: 51, # 'U'
+ 86: 38, # 'V'
+ 87: 62, # 'W'
+ 88: 65, # 'X'
+ 89: 43, # 'Y'
+ 90: 56, # 'Z'
+ 91: 255, # '['
+ 92: 255, # '\\'
+ 93: 255, # ']'
+ 94: 255, # '^'
+ 95: 255, # '_'
+ 96: 255, # '`'
+ 97: 1, # 'a'
+ 98: 21, # 'b'
+ 99: 28, # 'c'
+ 100: 12, # 'd'
+ 101: 2, # 'e'
+ 102: 18, # 'f'
+ 103: 27, # 'g'
+ 104: 25, # 'h'
+ 105: 3, # 'i'
+ 106: 24, # 'j'
+ 107: 10, # 'k'
+ 108: 5, # 'l'
+ 109: 13, # 'm'
+ 110: 4, # 'n'
+ 111: 15, # 'o'
+ 112: 26, # 'p'
+ 113: 64, # 'q'
+ 114: 7, # 'r'
+ 115: 8, # 's'
+ 116: 9, # 't'
+ 117: 14, # 'u'
+ 118: 32, # 'v'
+ 119: 57, # 'w'
+ 120: 58, # 'x'
+ 121: 11, # 'y'
+ 122: 22, # 'z'
+ 123: 255, # '{'
+ 124: 255, # '|'
+ 125: 255, # '}'
+ 126: 255, # '~'
+ 127: 255, # '\x7f'
+ 128: 180, # '\x80'
+ 129: 179, # '\x81'
+ 130: 178, # '\x82'
+ 131: 177, # '\x83'
+ 132: 176, # '\x84'
+ 133: 175, # '\x85'
+ 134: 174, # '\x86'
+ 135: 173, # '\x87'
+ 136: 172, # '\x88'
+ 137: 171, # '\x89'
+ 138: 170, # '\x8a'
+ 139: 169, # '\x8b'
+ 140: 168, # '\x8c'
+ 141: 167, # '\x8d'
+ 142: 166, # '\x8e'
+ 143: 165, # '\x8f'
+ 144: 164, # '\x90'
+ 145: 163, # '\x91'
+ 146: 162, # '\x92'
+ 147: 161, # '\x93'
+ 148: 160, # '\x94'
+ 149: 159, # '\x95'
+ 150: 101, # '\x96'
+ 151: 158, # '\x97'
+ 152: 157, # '\x98'
+ 153: 156, # '\x99'
+ 154: 155, # '\x9a'
+ 155: 154, # '\x9b'
+ 156: 153, # '\x9c'
+ 157: 152, # '\x9d'
+ 158: 151, # '\x9e'
+ 159: 106, # '\x9f'
+ 160: 150, # '\xa0'
+ 161: 149, # '¡'
+ 162: 148, # '¢'
+ 163: 147, # '£'
+ 164: 146, # '¤'
+ 165: 145, # '¥'
+ 166: 144, # '¦'
+ 167: 100, # '§'
+ 168: 143, # '¨'
+ 169: 142, # '©'
+ 170: 141, # 'ª'
+ 171: 140, # '«'
+ 172: 139, # '¬'
+ 173: 138, # '\xad'
+ 174: 137, # '®'
+ 175: 136, # '¯'
+ 176: 94, # '°'
+ 177: 80, # '±'
+ 178: 93, # '²'
+ 179: 135, # '³'
+ 180: 105, # '´'
+ 181: 134, # 'µ'
+ 182: 133, # '¶'
+ 183: 63, # '·'
+ 184: 132, # '¸'
+ 185: 131, # '¹'
+ 186: 130, # 'º'
+ 187: 129, # '»'
+ 188: 128, # '¼'
+ 189: 127, # '½'
+ 190: 126, # '¾'
+ 191: 125, # '¿'
+ 192: 124, # 'À'
+ 193: 104, # 'Á'
+ 194: 73, # 'Â'
+ 195: 99, # 'Ã'
+ 196: 79, # 'Ä'
+ 197: 85, # 'Å'
+ 198: 123, # 'Æ'
+ 199: 54, # 'Ç'
+ 200: 122, # 'È'
+ 201: 98, # 'É'
+ 202: 92, # 'Ê'
+ 203: 121, # 'Ë'
+ 204: 120, # 'Ì'
+ 205: 91, # 'Í'
+ 206: 103, # 'Î'
+ 207: 119, # 'Ï'
+ 208: 68, # 'Ğ'
+ 209: 118, # 'Ñ'
+ 210: 117, # 'Ò'
+ 211: 97, # 'Ó'
+ 212: 116, # 'Ô'
+ 213: 115, # 'Õ'
+ 214: 50, # 'Ö'
+ 215: 90, # '×'
+ 216: 114, # 'Ø'
+ 217: 113, # 'Ù'
+ 218: 112, # 'Ú'
+ 219: 111, # 'Û'
+ 220: 55, # 'Ü'
+ 221: 41, # 'İ'
+ 222: 40, # 'Ş'
+ 223: 86, # 'ß'
+ 224: 89, # 'à'
+ 225: 70, # 'á'
+ 226: 59, # 'â'
+ 227: 78, # 'ã'
+ 228: 71, # 'ä'
+ 229: 82, # 'å'
+ 230: 88, # 'æ'
+ 231: 33, # 'ç'
+ 232: 77, # 'è'
+ 233: 66, # 'é'
+ 234: 84, # 'ê'
+ 235: 83, # 'ë'
+ 236: 110, # 'ì'
+ 237: 75, # 'í'
+ 238: 61, # 'î'
+ 239: 96, # 'ï'
+ 240: 30, # 'ğ'
+ 241: 67, # 'ñ'
+ 242: 109, # 'ò'
+ 243: 74, # 'ó'
+ 244: 87, # 'ô'
+ 245: 102, # 'õ'
+ 246: 34, # 'ö'
+ 247: 95, # '÷'
+ 248: 81, # 'ø'
+ 249: 108, # 'ù'
+ 250: 76, # 'ú'
+ 251: 72, # 'û'
+ 252: 17, # 'ü'
+ 253: 6, # 'ı'
+ 254: 19, # 'ş'
+ 255: 107, # 'ÿ'
+}
+
+ISO_8859_9_TURKISH_MODEL = SingleByteCharSetModel(
+ charset_name="ISO-8859-9",
+ language="Turkish",
+ char_to_order_map=ISO_8859_9_TURKISH_CHAR_TO_ORDER,
+ language_model=TURKISH_LANG_MODEL,
+ typical_positive_ratio=0.97029,
+ keep_ascii_letters=True,
+ alphabet="ABCDEFGHIJKLMNOPRSTUVYZabcdefghijklmnoprstuvyzÂÇÎÖÛÜâçîöûüĞğİıŞş",
+)
diff --git a/third_party/python/pip/pip/_vendor/chardet/latin1prober.py b/third_party/python/pip/pip/_vendor/chardet/latin1prober.py
new file mode 100644
index 0000000000..59a01d91b8
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/latin1prober.py
@@ -0,0 +1,147 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from typing import List, Union
+
+from .charsetprober import CharSetProber
+from .enums import ProbingState
+
+FREQ_CAT_NUM = 4
+
+UDF = 0 # undefined
+OTH = 1 # other
+ASC = 2 # ascii capital letter
+ASS = 3 # ascii small letter
+ACV = 4 # accent capital vowel
+ACO = 5 # accent capital other
+ASV = 6 # accent small vowel
+ASO = 7 # accent small other
+CLASS_NUM = 8 # total classes
+
+# fmt: off
+Latin1_CharToClass = (
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
+ OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
+ ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
+ ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
+ ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
+ OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
+ ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
+ ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
+ ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
+ OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
+ OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
+ UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
+ OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
+ ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
+ ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
+ ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
+ ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
+ ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
+ ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
+ ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
+ ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
+)
+
+# 0 : illegal
+# 1 : very unlikely
+# 2 : normal
+# 3 : very likely
+Latin1ClassModel = (
+# UDF OTH ASC ASS ACV ACO ASV ASO
+ 0, 0, 0, 0, 0, 0, 0, 0, # UDF
+ 0, 3, 3, 3, 3, 3, 3, 3, # OTH
+ 0, 3, 3, 3, 3, 3, 3, 3, # ASC
+ 0, 3, 3, 3, 1, 1, 3, 3, # ASS
+ 0, 3, 3, 3, 1, 2, 1, 2, # ACV
+ 0, 3, 3, 3, 3, 3, 3, 3, # ACO
+ 0, 3, 1, 3, 1, 1, 1, 3, # ASV
+ 0, 3, 1, 3, 1, 1, 3, 3, # ASO
+)
+# fmt: on
+
+
+class Latin1Prober(CharSetProber):
+ def __init__(self) -> None:
+ super().__init__()
+ self._last_char_class = OTH
+ self._freq_counter: List[int] = []
+ self.reset()
+
+ def reset(self) -> None:
+ self._last_char_class = OTH
+ self._freq_counter = [0] * FREQ_CAT_NUM
+ super().reset()
+
+ @property
+ def charset_name(self) -> str:
+ return "ISO-8859-1"
+
+ @property
+ def language(self) -> str:
+ return ""
+
+ def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
+ byte_str = self.remove_xml_tags(byte_str)
+ for c in byte_str:
+ char_class = Latin1_CharToClass[c]
+ freq = Latin1ClassModel[(self._last_char_class * CLASS_NUM) + char_class]
+ if freq == 0:
+ self._state = ProbingState.NOT_ME
+ break
+ self._freq_counter[freq] += 1
+ self._last_char_class = char_class
+
+ return self.state
+
+ def get_confidence(self) -> float:
+ if self.state == ProbingState.NOT_ME:
+ return 0.01
+
+ total = sum(self._freq_counter)
+ confidence = (
+ 0.0
+ if total < 0.01
+ else (self._freq_counter[3] - self._freq_counter[1] * 20.0) / total
+ )
+ confidence = max(confidence, 0.0)
+ # lower the confidence of latin1 so that other more accurate
+ # detector can take priority.
+ confidence *= 0.73
+ return confidence
diff --git a/third_party/python/pip/pip/_vendor/chardet/macromanprober.py b/third_party/python/pip/pip/_vendor/chardet/macromanprober.py
new file mode 100644
index 0000000000..1425d10eca
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/macromanprober.py
@@ -0,0 +1,162 @@
+######################## BEGIN LICENSE BLOCK ########################
+# This code was modified from latin1prober.py by Rob Speer <rob@lumino.so>.
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Rob Speer - adapt to MacRoman encoding
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from typing import List, Union
+
+from .charsetprober import CharSetProber
+from .enums import ProbingState
+
+FREQ_CAT_NUM = 4
+
+UDF = 0 # undefined
+OTH = 1 # other
+ASC = 2 # ascii capital letter
+ASS = 3 # ascii small letter
+ACV = 4 # accent capital vowel
+ACO = 5 # accent capital other
+ASV = 6 # accent small vowel
+ASO = 7 # accent small other
+ODD = 8 # character that is unlikely to appear
+CLASS_NUM = 9 # total classes
+
+# The change from Latin1 is that we explicitly look for extended characters
+# that are infrequently-occurring symbols, and consider them to always be
+# improbable. This should let MacRoman get out of the way of more likely
+# encodings in most situations.
+
+# fmt: off
+MacRoman_CharToClass = (
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
+ OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
+ ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
+ ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
+ ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
+ OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
+ ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
+ ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
+ ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
+ ACV, ACV, ACO, ACV, ACO, ACV, ACV, ASV, # 80 - 87
+ ASV, ASV, ASV, ASV, ASV, ASO, ASV, ASV, # 88 - 8F
+ ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASV, # 90 - 97
+ ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # 98 - 9F
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, ASO, # A0 - A7
+ OTH, OTH, ODD, ODD, OTH, OTH, ACV, ACV, # A8 - AF
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
+ OTH, OTH, OTH, OTH, OTH, OTH, ASV, ASV, # B8 - BF
+ OTH, OTH, ODD, OTH, ODD, OTH, OTH, OTH, # C0 - C7
+ OTH, OTH, OTH, ACV, ACV, ACV, ACV, ASV, # C8 - CF
+ OTH, OTH, OTH, OTH, OTH, OTH, OTH, ODD, # D0 - D7
+ ASV, ACV, ODD, OTH, OTH, OTH, OTH, OTH, # D8 - DF
+ OTH, OTH, OTH, OTH, OTH, ACV, ACV, ACV, # E0 - E7
+ ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # E8 - EF
+ ODD, ACV, ACV, ACV, ACV, ASV, ODD, ODD, # F0 - F7
+ ODD, ODD, ODD, ODD, ODD, ODD, ODD, ODD, # F8 - FF
+)
+
+# 0 : illegal
+# 1 : very unlikely
+# 2 : normal
+# 3 : very likely
+MacRomanClassModel = (
+# UDF OTH ASC ASS ACV ACO ASV ASO ODD
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, # UDF
+ 0, 3, 3, 3, 3, 3, 3, 3, 1, # OTH
+ 0, 3, 3, 3, 3, 3, 3, 3, 1, # ASC
+ 0, 3, 3, 3, 1, 1, 3, 3, 1, # ASS
+ 0, 3, 3, 3, 1, 2, 1, 2, 1, # ACV
+ 0, 3, 3, 3, 3, 3, 3, 3, 1, # ACO
+ 0, 3, 1, 3, 1, 1, 1, 3, 1, # ASV
+ 0, 3, 1, 3, 1, 1, 3, 3, 1, # ASO
+ 0, 1, 1, 1, 1, 1, 1, 1, 1, # ODD
+)
+# fmt: on
+
+
+class MacRomanProber(CharSetProber):
+ def __init__(self) -> None:
+ super().__init__()
+ self._last_char_class = OTH
+ self._freq_counter: List[int] = []
+ self.reset()
+
+ def reset(self) -> None:
+ self._last_char_class = OTH
+ self._freq_counter = [0] * FREQ_CAT_NUM
+
+ # express the prior that MacRoman is a somewhat rare encoding;
+ # this can be done by starting out in a slightly improbable state
+ # that must be overcome
+ self._freq_counter[2] = 10
+
+ super().reset()
+
+ @property
+ def charset_name(self) -> str:
+ return "MacRoman"
+
+ @property
+ def language(self) -> str:
+ return ""
+
+ def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
+ byte_str = self.remove_xml_tags(byte_str)
+ for c in byte_str:
+ char_class = MacRoman_CharToClass[c]
+ freq = MacRomanClassModel[(self._last_char_class * CLASS_NUM) + char_class]
+ if freq == 0:
+ self._state = ProbingState.NOT_ME
+ break
+ self._freq_counter[freq] += 1
+ self._last_char_class = char_class
+
+ return self.state
+
+ def get_confidence(self) -> float:
+ if self.state == ProbingState.NOT_ME:
+ return 0.01
+
+ total = sum(self._freq_counter)
+ confidence = (
+ 0.0
+ if total < 0.01
+ else (self._freq_counter[3] - self._freq_counter[1] * 20.0) / total
+ )
+ confidence = max(confidence, 0.0)
+ # lower the confidence of MacRoman so that other more accurate
+ # detector can take priority.
+ confidence *= 0.73
+ return confidence
diff --git a/third_party/python/pip/pip/_vendor/chardet/mbcharsetprober.py b/third_party/python/pip/pip/_vendor/chardet/mbcharsetprober.py
new file mode 100644
index 0000000000..666307e8fe
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/mbcharsetprober.py
@@ -0,0 +1,95 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+# Proofpoint, Inc.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from typing import Optional, Union
+
+from .chardistribution import CharDistributionAnalysis
+from .charsetprober import CharSetProber
+from .codingstatemachine import CodingStateMachine
+from .enums import LanguageFilter, MachineState, ProbingState
+
+
+class MultiByteCharSetProber(CharSetProber):
+ """
+ MultiByteCharSetProber
+ """
+
+ def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
+ super().__init__(lang_filter=lang_filter)
+ self.distribution_analyzer: Optional[CharDistributionAnalysis] = None
+ self.coding_sm: Optional[CodingStateMachine] = None
+ self._last_char = bytearray(b"\0\0")
+
+ def reset(self) -> None:
+ super().reset()
+ if self.coding_sm:
+ self.coding_sm.reset()
+ if self.distribution_analyzer:
+ self.distribution_analyzer.reset()
+ self._last_char = bytearray(b"\0\0")
+
+ def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
+ assert self.coding_sm is not None
+ assert self.distribution_analyzer is not None
+
+ for i, byte in enumerate(byte_str):
+ coding_state = self.coding_sm.next_state(byte)
+ if coding_state == MachineState.ERROR:
+ self.logger.debug(
+ "%s %s prober hit error at byte %s",
+ self.charset_name,
+ self.language,
+ i,
+ )
+ self._state = ProbingState.NOT_ME
+ break
+ if coding_state == MachineState.ITS_ME:
+ self._state = ProbingState.FOUND_IT
+ break
+ if coding_state == MachineState.START:
+ char_len = self.coding_sm.get_current_charlen()
+ if i == 0:
+ self._last_char[1] = byte
+ self.distribution_analyzer.feed(self._last_char, char_len)
+ else:
+ self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
+
+ self._last_char[0] = byte_str[-1]
+
+ if self.state == ProbingState.DETECTING:
+ if self.distribution_analyzer.got_enough_data() and (
+ self.get_confidence() > self.SHORTCUT_THRESHOLD
+ ):
+ self._state = ProbingState.FOUND_IT
+
+ return self.state
+
+ def get_confidence(self) -> float:
+ assert self.distribution_analyzer is not None
+ return self.distribution_analyzer.get_confidence()
diff --git a/third_party/python/pip/pip/_vendor/chardet/mbcsgroupprober.py b/third_party/python/pip/pip/_vendor/chardet/mbcsgroupprober.py
new file mode 100644
index 0000000000..6cb9cc7b3b
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/mbcsgroupprober.py
@@ -0,0 +1,57 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+# Proofpoint, Inc.
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .big5prober import Big5Prober
+from .charsetgroupprober import CharSetGroupProber
+from .cp949prober import CP949Prober
+from .enums import LanguageFilter
+from .eucjpprober import EUCJPProber
+from .euckrprober import EUCKRProber
+from .euctwprober import EUCTWProber
+from .gb2312prober import GB2312Prober
+from .johabprober import JOHABProber
+from .sjisprober import SJISProber
+from .utf8prober import UTF8Prober
+
+
+class MBCSGroupProber(CharSetGroupProber):
+ def __init__(self, lang_filter: LanguageFilter = LanguageFilter.NONE) -> None:
+ super().__init__(lang_filter=lang_filter)
+ self.probers = [
+ UTF8Prober(),
+ SJISProber(),
+ EUCJPProber(),
+ GB2312Prober(),
+ EUCKRProber(),
+ CP949Prober(),
+ Big5Prober(),
+ EUCTWProber(),
+ JOHABProber(),
+ ]
+ self.reset()
diff --git a/third_party/python/pip/pip/_vendor/chardet/mbcssm.py b/third_party/python/pip/pip/_vendor/chardet/mbcssm.py
new file mode 100644
index 0000000000..7bbe97e666
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/mbcssm.py
@@ -0,0 +1,661 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .codingstatemachinedict import CodingStateMachineDict
+from .enums import MachineState
+
+# BIG5
+
+# fmt: off
+BIG5_CLS = (
+ 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 #allow 0x00 as legal value
+ 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 10 - 17
+ 1, 1, 1, 0, 1, 1, 1, 1, # 18 - 1f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 27
+ 1, 1, 1, 1, 1, 1, 1, 1, # 28 - 2f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 30 - 37
+ 1, 1, 1, 1, 1, 1, 1, 1, # 38 - 3f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 40 - 47
+ 2, 2, 2, 2, 2, 2, 2, 2, # 48 - 4f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 50 - 57
+ 2, 2, 2, 2, 2, 2, 2, 2, # 58 - 5f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 60 - 67
+ 2, 2, 2, 2, 2, 2, 2, 2, # 68 - 6f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 70 - 77
+ 2, 2, 2, 2, 2, 2, 2, 1, # 78 - 7f
+ 4, 4, 4, 4, 4, 4, 4, 4, # 80 - 87
+ 4, 4, 4, 4, 4, 4, 4, 4, # 88 - 8f
+ 4, 4, 4, 4, 4, 4, 4, 4, # 90 - 97
+ 4, 4, 4, 4, 4, 4, 4, 4, # 98 - 9f
+ 4, 3, 3, 3, 3, 3, 3, 3, # a0 - a7
+ 3, 3, 3, 3, 3, 3, 3, 3, # a8 - af
+ 3, 3, 3, 3, 3, 3, 3, 3, # b0 - b7
+ 3, 3, 3, 3, 3, 3, 3, 3, # b8 - bf
+ 3, 3, 3, 3, 3, 3, 3, 3, # c0 - c7
+ 3, 3, 3, 3, 3, 3, 3, 3, # c8 - cf
+ 3, 3, 3, 3, 3, 3, 3, 3, # d0 - d7
+ 3, 3, 3, 3, 3, 3, 3, 3, # d8 - df
+ 3, 3, 3, 3, 3, 3, 3, 3, # e0 - e7
+ 3, 3, 3, 3, 3, 3, 3, 3, # e8 - ef
+ 3, 3, 3, 3, 3, 3, 3, 3, # f0 - f7
+ 3, 3, 3, 3, 3, 3, 3, 0 # f8 - ff
+)
+
+BIG5_ST = (
+ MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,#08-0f
+ MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START#10-17
+)
+# fmt: on
+
+BIG5_CHAR_LEN_TABLE = (0, 1, 1, 2, 0)
+
+BIG5_SM_MODEL: CodingStateMachineDict = {
+ "class_table": BIG5_CLS,
+ "class_factor": 5,
+ "state_table": BIG5_ST,
+ "char_len_table": BIG5_CHAR_LEN_TABLE,
+ "name": "Big5",
+}
+
+# CP949
+# fmt: off
+CP949_CLS = (
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, # 00 - 0f
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, # 10 - 1f
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 2f
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, # 30 - 3f
+ 1, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, # 40 - 4f
+ 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 1, 1, 1, # 50 - 5f
+ 1, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, # 60 - 6f
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 1, 1, 1, # 70 - 7f
+ 0, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, # 80 - 8f
+ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, # 90 - 9f
+ 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, # a0 - af
+ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, # b0 - bf
+ 7, 7, 7, 7, 7, 7, 9, 2, 2, 3, 2, 2, 2, 2, 2, 2, # c0 - cf
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, # d0 - df
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, # e0 - ef
+ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 0, # f0 - ff
+)
+
+CP949_ST = (
+#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
+ MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.START,MachineState.START, 4, 5,MachineState.ERROR, 6, # MachineState.START
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, # MachineState.ERROR
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME, # MachineState.ITS_ME
+ MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 3
+ MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 4
+ MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, # 5
+ MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START, # 6
+)
+# fmt: on
+
+CP949_CHAR_LEN_TABLE = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
+
+CP949_SM_MODEL: CodingStateMachineDict = {
+ "class_table": CP949_CLS,
+ "class_factor": 10,
+ "state_table": CP949_ST,
+ "char_len_table": CP949_CHAR_LEN_TABLE,
+ "name": "CP949",
+}
+
+# EUC-JP
+# fmt: off
+EUCJP_CLS = (
+ 4, 4, 4, 4, 4, 4, 4, 4, # 00 - 07
+ 4, 4, 4, 4, 4, 4, 5, 5, # 08 - 0f
+ 4, 4, 4, 4, 4, 4, 4, 4, # 10 - 17
+ 4, 4, 4, 5, 4, 4, 4, 4, # 18 - 1f
+ 4, 4, 4, 4, 4, 4, 4, 4, # 20 - 27
+ 4, 4, 4, 4, 4, 4, 4, 4, # 28 - 2f
+ 4, 4, 4, 4, 4, 4, 4, 4, # 30 - 37
+ 4, 4, 4, 4, 4, 4, 4, 4, # 38 - 3f
+ 4, 4, 4, 4, 4, 4, 4, 4, # 40 - 47
+ 4, 4, 4, 4, 4, 4, 4, 4, # 48 - 4f
+ 4, 4, 4, 4, 4, 4, 4, 4, # 50 - 57
+ 4, 4, 4, 4, 4, 4, 4, 4, # 58 - 5f
+ 4, 4, 4, 4, 4, 4, 4, 4, # 60 - 67
+ 4, 4, 4, 4, 4, 4, 4, 4, # 68 - 6f
+ 4, 4, 4, 4, 4, 4, 4, 4, # 70 - 77
+ 4, 4, 4, 4, 4, 4, 4, 4, # 78 - 7f
+ 5, 5, 5, 5, 5, 5, 5, 5, # 80 - 87
+ 5, 5, 5, 5, 5, 5, 1, 3, # 88 - 8f
+ 5, 5, 5, 5, 5, 5, 5, 5, # 90 - 97
+ 5, 5, 5, 5, 5, 5, 5, 5, # 98 - 9f
+ 5, 2, 2, 2, 2, 2, 2, 2, # a0 - a7
+ 2, 2, 2, 2, 2, 2, 2, 2, # a8 - af
+ 2, 2, 2, 2, 2, 2, 2, 2, # b0 - b7
+ 2, 2, 2, 2, 2, 2, 2, 2, # b8 - bf
+ 2, 2, 2, 2, 2, 2, 2, 2, # c0 - c7
+ 2, 2, 2, 2, 2, 2, 2, 2, # c8 - cf
+ 2, 2, 2, 2, 2, 2, 2, 2, # d0 - d7
+ 2, 2, 2, 2, 2, 2, 2, 2, # d8 - df
+ 0, 0, 0, 0, 0, 0, 0, 0, # e0 - e7
+ 0, 0, 0, 0, 0, 0, 0, 0, # e8 - ef
+ 0, 0, 0, 0, 0, 0, 0, 0, # f0 - f7
+ 0, 0, 0, 0, 0, 0, 0, 5 # f8 - ff
+)
+
+EUCJP_ST = (
+ 3, 4, 3, 5,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17
+ MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 3,MachineState.ERROR,#18-1f
+ 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START#20-27
+)
+# fmt: on
+
+EUCJP_CHAR_LEN_TABLE = (2, 2, 2, 3, 1, 0)
+
+EUCJP_SM_MODEL: CodingStateMachineDict = {
+ "class_table": EUCJP_CLS,
+ "class_factor": 6,
+ "state_table": EUCJP_ST,
+ "char_len_table": EUCJP_CHAR_LEN_TABLE,
+ "name": "EUC-JP",
+}
+
+# EUC-KR
+# fmt: off
+EUCKR_CLS = (
+ 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07
+ 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 10 - 17
+ 1, 1, 1, 0, 1, 1, 1, 1, # 18 - 1f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 27
+ 1, 1, 1, 1, 1, 1, 1, 1, # 28 - 2f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 30 - 37
+ 1, 1, 1, 1, 1, 1, 1, 1, # 38 - 3f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 40 - 47
+ 1, 1, 1, 1, 1, 1, 1, 1, # 48 - 4f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 50 - 57
+ 1, 1, 1, 1, 1, 1, 1, 1, # 58 - 5f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 60 - 67
+ 1, 1, 1, 1, 1, 1, 1, 1, # 68 - 6f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 70 - 77
+ 1, 1, 1, 1, 1, 1, 1, 1, # 78 - 7f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 80 - 87
+ 0, 0, 0, 0, 0, 0, 0, 0, # 88 - 8f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 90 - 97
+ 0, 0, 0, 0, 0, 0, 0, 0, # 98 - 9f
+ 0, 2, 2, 2, 2, 2, 2, 2, # a0 - a7
+ 2, 2, 2, 2, 2, 3, 3, 3, # a8 - af
+ 2, 2, 2, 2, 2, 2, 2, 2, # b0 - b7
+ 2, 2, 2, 2, 2, 2, 2, 2, # b8 - bf
+ 2, 2, 2, 2, 2, 2, 2, 2, # c0 - c7
+ 2, 3, 2, 2, 2, 2, 2, 2, # c8 - cf
+ 2, 2, 2, 2, 2, 2, 2, 2, # d0 - d7
+ 2, 2, 2, 2, 2, 2, 2, 2, # d8 - df
+ 2, 2, 2, 2, 2, 2, 2, 2, # e0 - e7
+ 2, 2, 2, 2, 2, 2, 2, 2, # e8 - ef
+ 2, 2, 2, 2, 2, 2, 2, 2, # f0 - f7
+ 2, 2, 2, 2, 2, 2, 2, 0 # f8 - ff
+)
+
+EUCKR_ST = (
+ MachineState.ERROR,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #08-0f
+)
+# fmt: on
+
+EUCKR_CHAR_LEN_TABLE = (0, 1, 2, 0)
+
+EUCKR_SM_MODEL: CodingStateMachineDict = {
+ "class_table": EUCKR_CLS,
+ "class_factor": 4,
+ "state_table": EUCKR_ST,
+ "char_len_table": EUCKR_CHAR_LEN_TABLE,
+ "name": "EUC-KR",
+}
+
+# JOHAB
+# fmt: off
+JOHAB_CLS = (
+ 4,4,4,4,4,4,4,4, # 00 - 07
+ 4,4,4,4,4,4,0,0, # 08 - 0f
+ 4,4,4,4,4,4,4,4, # 10 - 17
+ 4,4,4,0,4,4,4,4, # 18 - 1f
+ 4,4,4,4,4,4,4,4, # 20 - 27
+ 4,4,4,4,4,4,4,4, # 28 - 2f
+ 4,3,3,3,3,3,3,3, # 30 - 37
+ 3,3,3,3,3,3,3,3, # 38 - 3f
+ 3,1,1,1,1,1,1,1, # 40 - 47
+ 1,1,1,1,1,1,1,1, # 48 - 4f
+ 1,1,1,1,1,1,1,1, # 50 - 57
+ 1,1,1,1,1,1,1,1, # 58 - 5f
+ 1,1,1,1,1,1,1,1, # 60 - 67
+ 1,1,1,1,1,1,1,1, # 68 - 6f
+ 1,1,1,1,1,1,1,1, # 70 - 77
+ 1,1,1,1,1,1,1,2, # 78 - 7f
+ 6,6,6,6,8,8,8,8, # 80 - 87
+ 8,8,8,8,8,8,8,8, # 88 - 8f
+ 8,7,7,7,7,7,7,7, # 90 - 97
+ 7,7,7,7,7,7,7,7, # 98 - 9f
+ 7,7,7,7,7,7,7,7, # a0 - a7
+ 7,7,7,7,7,7,7,7, # a8 - af
+ 7,7,7,7,7,7,7,7, # b0 - b7
+ 7,7,7,7,7,7,7,7, # b8 - bf
+ 7,7,7,7,7,7,7,7, # c0 - c7
+ 7,7,7,7,7,7,7,7, # c8 - cf
+ 7,7,7,7,5,5,5,5, # d0 - d7
+ 5,9,9,9,9,9,9,5, # d8 - df
+ 9,9,9,9,9,9,9,9, # e0 - e7
+ 9,9,9,9,9,9,9,9, # e8 - ef
+ 9,9,9,9,9,9,9,9, # f0 - f7
+ 9,9,5,5,5,5,5,0 # f8 - ff
+)
+
+JOHAB_ST = (
+# cls = 0 1 2 3 4 5 6 7 8 9
+ MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.ERROR ,MachineState.ERROR ,3 ,3 ,4 , # MachineState.START
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME, # MachineState.ITS_ME
+ MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR ,MachineState.ERROR , # MachineState.ERROR
+ MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.ERROR ,MachineState.ERROR ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START ,MachineState.START , # 3
+ MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START ,MachineState.ERROR ,MachineState.START , # 4
+)
+# fmt: on
+
+JOHAB_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 0, 0, 2, 2, 2)
+
+JOHAB_SM_MODEL: CodingStateMachineDict = {
+ "class_table": JOHAB_CLS,
+ "class_factor": 10,
+ "state_table": JOHAB_ST,
+ "char_len_table": JOHAB_CHAR_LEN_TABLE,
+ "name": "Johab",
+}
+
+# EUC-TW
+# fmt: off
+EUCTW_CLS = (
+ 2, 2, 2, 2, 2, 2, 2, 2, # 00 - 07
+ 2, 2, 2, 2, 2, 2, 0, 0, # 08 - 0f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 10 - 17
+ 2, 2, 2, 0, 2, 2, 2, 2, # 18 - 1f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 20 - 27
+ 2, 2, 2, 2, 2, 2, 2, 2, # 28 - 2f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 30 - 37
+ 2, 2, 2, 2, 2, 2, 2, 2, # 38 - 3f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 40 - 47
+ 2, 2, 2, 2, 2, 2, 2, 2, # 48 - 4f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 50 - 57
+ 2, 2, 2, 2, 2, 2, 2, 2, # 58 - 5f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 60 - 67
+ 2, 2, 2, 2, 2, 2, 2, 2, # 68 - 6f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 70 - 77
+ 2, 2, 2, 2, 2, 2, 2, 2, # 78 - 7f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 80 - 87
+ 0, 0, 0, 0, 0, 0, 6, 0, # 88 - 8f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 90 - 97
+ 0, 0, 0, 0, 0, 0, 0, 0, # 98 - 9f
+ 0, 3, 4, 4, 4, 4, 4, 4, # a0 - a7
+ 5, 5, 1, 1, 1, 1, 1, 1, # a8 - af
+ 1, 1, 1, 1, 1, 1, 1, 1, # b0 - b7
+ 1, 1, 1, 1, 1, 1, 1, 1, # b8 - bf
+ 1, 1, 3, 1, 3, 3, 3, 3, # c0 - c7
+ 3, 3, 3, 3, 3, 3, 3, 3, # c8 - cf
+ 3, 3, 3, 3, 3, 3, 3, 3, # d0 - d7
+ 3, 3, 3, 3, 3, 3, 3, 3, # d8 - df
+ 3, 3, 3, 3, 3, 3, 3, 3, # e0 - e7
+ 3, 3, 3, 3, 3, 3, 3, 3, # e8 - ef
+ 3, 3, 3, 3, 3, 3, 3, 3, # f0 - f7
+ 3, 3, 3, 3, 3, 3, 3, 0 # f8 - ff
+)
+
+EUCTW_ST = (
+ MachineState.ERROR,MachineState.ERROR,MachineState.START, 3, 3, 3, 4,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.START,MachineState.ERROR,#10-17
+ MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
+ 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,#20-27
+ MachineState.START,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
+)
+# fmt: on
+
+EUCTW_CHAR_LEN_TABLE = (0, 0, 1, 2, 2, 2, 3)
+
+EUCTW_SM_MODEL: CodingStateMachineDict = {
+ "class_table": EUCTW_CLS,
+ "class_factor": 7,
+ "state_table": EUCTW_ST,
+ "char_len_table": EUCTW_CHAR_LEN_TABLE,
+ "name": "x-euc-tw",
+}
+
+# GB2312
+# fmt: off
+GB2312_CLS = (
+ 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07
+ 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 10 - 17
+ 1, 1, 1, 0, 1, 1, 1, 1, # 18 - 1f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 27
+ 1, 1, 1, 1, 1, 1, 1, 1, # 28 - 2f
+ 3, 3, 3, 3, 3, 3, 3, 3, # 30 - 37
+ 3, 3, 1, 1, 1, 1, 1, 1, # 38 - 3f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 40 - 47
+ 2, 2, 2, 2, 2, 2, 2, 2, # 48 - 4f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 50 - 57
+ 2, 2, 2, 2, 2, 2, 2, 2, # 58 - 5f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 60 - 67
+ 2, 2, 2, 2, 2, 2, 2, 2, # 68 - 6f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 70 - 77
+ 2, 2, 2, 2, 2, 2, 2, 4, # 78 - 7f
+ 5, 6, 6, 6, 6, 6, 6, 6, # 80 - 87
+ 6, 6, 6, 6, 6, 6, 6, 6, # 88 - 8f
+ 6, 6, 6, 6, 6, 6, 6, 6, # 90 - 97
+ 6, 6, 6, 6, 6, 6, 6, 6, # 98 - 9f
+ 6, 6, 6, 6, 6, 6, 6, 6, # a0 - a7
+ 6, 6, 6, 6, 6, 6, 6, 6, # a8 - af
+ 6, 6, 6, 6, 6, 6, 6, 6, # b0 - b7
+ 6, 6, 6, 6, 6, 6, 6, 6, # b8 - bf
+ 6, 6, 6, 6, 6, 6, 6, 6, # c0 - c7
+ 6, 6, 6, 6, 6, 6, 6, 6, # c8 - cf
+ 6, 6, 6, 6, 6, 6, 6, 6, # d0 - d7
+ 6, 6, 6, 6, 6, 6, 6, 6, # d8 - df
+ 6, 6, 6, 6, 6, 6, 6, 6, # e0 - e7
+ 6, 6, 6, 6, 6, 6, 6, 6, # e8 - ef
+ 6, 6, 6, 6, 6, 6, 6, 6, # f0 - f7
+ 6, 6, 6, 6, 6, 6, 6, 0 # f8 - ff
+)
+
+GB2312_ST = (
+ MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START, 3,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,#10-17
+ 4,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
+ MachineState.ERROR,MachineState.ERROR, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#20-27
+ MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.START #28-2f
+)
+# fmt: on
+
+# To be accurate, the length of class 6 can be either 2 or 4.
+# But it is not necessary to discriminate between the two since
+# it is used for frequency analysis only, and we are validating
+# each code range there as well. So it is safe to set it to be
+# 2 here.
+GB2312_CHAR_LEN_TABLE = (0, 1, 1, 1, 1, 1, 2)
+
+GB2312_SM_MODEL: CodingStateMachineDict = {
+ "class_table": GB2312_CLS,
+ "class_factor": 7,
+ "state_table": GB2312_ST,
+ "char_len_table": GB2312_CHAR_LEN_TABLE,
+ "name": "GB2312",
+}
+
+# Shift_JIS
+# fmt: off
+SJIS_CLS = (
+ 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07
+ 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 10 - 17
+ 1, 1, 1, 0, 1, 1, 1, 1, # 18 - 1f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 27
+ 1, 1, 1, 1, 1, 1, 1, 1, # 28 - 2f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 30 - 37
+ 1, 1, 1, 1, 1, 1, 1, 1, # 38 - 3f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 40 - 47
+ 2, 2, 2, 2, 2, 2, 2, 2, # 48 - 4f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 50 - 57
+ 2, 2, 2, 2, 2, 2, 2, 2, # 58 - 5f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 60 - 67
+ 2, 2, 2, 2, 2, 2, 2, 2, # 68 - 6f
+ 2, 2, 2, 2, 2, 2, 2, 2, # 70 - 77
+ 2, 2, 2, 2, 2, 2, 2, 1, # 78 - 7f
+ 3, 3, 3, 3, 3, 2, 2, 3, # 80 - 87
+ 3, 3, 3, 3, 3, 3, 3, 3, # 88 - 8f
+ 3, 3, 3, 3, 3, 3, 3, 3, # 90 - 97
+ 3, 3, 3, 3, 3, 3, 3, 3, # 98 - 9f
+ #0xa0 is illegal in sjis encoding, but some pages does
+ #contain such byte. We need to be more error forgiven.
+ 2, 2, 2, 2, 2, 2, 2, 2, # a0 - a7
+ 2, 2, 2, 2, 2, 2, 2, 2, # a8 - af
+ 2, 2, 2, 2, 2, 2, 2, 2, # b0 - b7
+ 2, 2, 2, 2, 2, 2, 2, 2, # b8 - bf
+ 2, 2, 2, 2, 2, 2, 2, 2, # c0 - c7
+ 2, 2, 2, 2, 2, 2, 2, 2, # c8 - cf
+ 2, 2, 2, 2, 2, 2, 2, 2, # d0 - d7
+ 2, 2, 2, 2, 2, 2, 2, 2, # d8 - df
+ 3, 3, 3, 3, 3, 3, 3, 3, # e0 - e7
+ 3, 3, 3, 3, 3, 4, 4, 4, # e8 - ef
+ 3, 3, 3, 3, 3, 3, 3, 3, # f0 - f7
+ 3, 3, 3, 3, 3, 0, 0, 0, # f8 - ff
+)
+
+SJIS_ST = (
+ MachineState.ERROR,MachineState.START,MachineState.START, 3,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START #10-17
+)
+# fmt: on
+
+SJIS_CHAR_LEN_TABLE = (0, 1, 1, 2, 0, 0)
+
+SJIS_SM_MODEL: CodingStateMachineDict = {
+ "class_table": SJIS_CLS,
+ "class_factor": 6,
+ "state_table": SJIS_ST,
+ "char_len_table": SJIS_CHAR_LEN_TABLE,
+ "name": "Shift_JIS",
+}
+
+# UCS2-BE
+# fmt: off
+UCS2BE_CLS = (
+ 0, 0, 0, 0, 0, 0, 0, 0, # 00 - 07
+ 0, 0, 1, 0, 0, 2, 0, 0, # 08 - 0f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 10 - 17
+ 0, 0, 0, 3, 0, 0, 0, 0, # 18 - 1f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 20 - 27
+ 0, 3, 3, 3, 3, 3, 0, 0, # 28 - 2f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 30 - 37
+ 0, 0, 0, 0, 0, 0, 0, 0, # 38 - 3f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 40 - 47
+ 0, 0, 0, 0, 0, 0, 0, 0, # 48 - 4f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 50 - 57
+ 0, 0, 0, 0, 0, 0, 0, 0, # 58 - 5f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 60 - 67
+ 0, 0, 0, 0, 0, 0, 0, 0, # 68 - 6f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 70 - 77
+ 0, 0, 0, 0, 0, 0, 0, 0, # 78 - 7f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 80 - 87
+ 0, 0, 0, 0, 0, 0, 0, 0, # 88 - 8f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 90 - 97
+ 0, 0, 0, 0, 0, 0, 0, 0, # 98 - 9f
+ 0, 0, 0, 0, 0, 0, 0, 0, # a0 - a7
+ 0, 0, 0, 0, 0, 0, 0, 0, # a8 - af
+ 0, 0, 0, 0, 0, 0, 0, 0, # b0 - b7
+ 0, 0, 0, 0, 0, 0, 0, 0, # b8 - bf
+ 0, 0, 0, 0, 0, 0, 0, 0, # c0 - c7
+ 0, 0, 0, 0, 0, 0, 0, 0, # c8 - cf
+ 0, 0, 0, 0, 0, 0, 0, 0, # d0 - d7
+ 0, 0, 0, 0, 0, 0, 0, 0, # d8 - df
+ 0, 0, 0, 0, 0, 0, 0, 0, # e0 - e7
+ 0, 0, 0, 0, 0, 0, 0, 0, # e8 - ef
+ 0, 0, 0, 0, 0, 0, 0, 0, # f0 - f7
+ 0, 0, 0, 0, 0, 0, 4, 5 # f8 - ff
+)
+
+UCS2BE_ST = (
+ 5, 7, 7,MachineState.ERROR, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+ MachineState.ITS_ME,MachineState.ITS_ME, 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,#10-17
+ 6, 6, 6, 6, 6,MachineState.ITS_ME, 6, 6,#18-1f
+ 6, 6, 6, 6, 5, 7, 7,MachineState.ERROR,#20-27
+ 5, 8, 6, 6,MachineState.ERROR, 6, 6, 6,#28-2f
+ 6, 6, 6, 6,MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START #30-37
+)
+# fmt: on
+
+UCS2BE_CHAR_LEN_TABLE = (2, 2, 2, 0, 2, 2)
+
+UCS2BE_SM_MODEL: CodingStateMachineDict = {
+ "class_table": UCS2BE_CLS,
+ "class_factor": 6,
+ "state_table": UCS2BE_ST,
+ "char_len_table": UCS2BE_CHAR_LEN_TABLE,
+ "name": "UTF-16BE",
+}
+
+# UCS2-LE
+# fmt: off
+UCS2LE_CLS = (
+ 0, 0, 0, 0, 0, 0, 0, 0, # 00 - 07
+ 0, 0, 1, 0, 0, 2, 0, 0, # 08 - 0f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 10 - 17
+ 0, 0, 0, 3, 0, 0, 0, 0, # 18 - 1f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 20 - 27
+ 0, 3, 3, 3, 3, 3, 0, 0, # 28 - 2f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 30 - 37
+ 0, 0, 0, 0, 0, 0, 0, 0, # 38 - 3f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 40 - 47
+ 0, 0, 0, 0, 0, 0, 0, 0, # 48 - 4f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 50 - 57
+ 0, 0, 0, 0, 0, 0, 0, 0, # 58 - 5f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 60 - 67
+ 0, 0, 0, 0, 0, 0, 0, 0, # 68 - 6f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 70 - 77
+ 0, 0, 0, 0, 0, 0, 0, 0, # 78 - 7f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 80 - 87
+ 0, 0, 0, 0, 0, 0, 0, 0, # 88 - 8f
+ 0, 0, 0, 0, 0, 0, 0, 0, # 90 - 97
+ 0, 0, 0, 0, 0, 0, 0, 0, # 98 - 9f
+ 0, 0, 0, 0, 0, 0, 0, 0, # a0 - a7
+ 0, 0, 0, 0, 0, 0, 0, 0, # a8 - af
+ 0, 0, 0, 0, 0, 0, 0, 0, # b0 - b7
+ 0, 0, 0, 0, 0, 0, 0, 0, # b8 - bf
+ 0, 0, 0, 0, 0, 0, 0, 0, # c0 - c7
+ 0, 0, 0, 0, 0, 0, 0, 0, # c8 - cf
+ 0, 0, 0, 0, 0, 0, 0, 0, # d0 - d7
+ 0, 0, 0, 0, 0, 0, 0, 0, # d8 - df
+ 0, 0, 0, 0, 0, 0, 0, 0, # e0 - e7
+ 0, 0, 0, 0, 0, 0, 0, 0, # e8 - ef
+ 0, 0, 0, 0, 0, 0, 0, 0, # f0 - f7
+ 0, 0, 0, 0, 0, 0, 4, 5 # f8 - ff
+)
+
+UCS2LE_ST = (
+ 6, 6, 7, 6, 4, 3,MachineState.ERROR,MachineState.ERROR,#00-07
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#08-0f
+ MachineState.ITS_ME,MachineState.ITS_ME, 5, 5, 5,MachineState.ERROR,MachineState.ITS_ME,MachineState.ERROR,#10-17
+ 5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR, 6, 6,#18-1f
+ 7, 6, 8, 8, 5, 5, 5,MachineState.ERROR,#20-27
+ 5, 5, 5,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5,#28-2f
+ 5, 5, 5,MachineState.ERROR, 5,MachineState.ERROR,MachineState.START,MachineState.START #30-37
+)
+# fmt: on
+
+UCS2LE_CHAR_LEN_TABLE = (2, 2, 2, 2, 2, 2)
+
+UCS2LE_SM_MODEL: CodingStateMachineDict = {
+ "class_table": UCS2LE_CLS,
+ "class_factor": 6,
+ "state_table": UCS2LE_ST,
+ "char_len_table": UCS2LE_CHAR_LEN_TABLE,
+ "name": "UTF-16LE",
+}
+
+# UTF-8
+# fmt: off
+UTF8_CLS = (
+ 1, 1, 1, 1, 1, 1, 1, 1, # 00 - 07 #allow 0x00 as a legal value
+ 1, 1, 1, 1, 1, 1, 0, 0, # 08 - 0f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 10 - 17
+ 1, 1, 1, 0, 1, 1, 1, 1, # 18 - 1f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 20 - 27
+ 1, 1, 1, 1, 1, 1, 1, 1, # 28 - 2f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 30 - 37
+ 1, 1, 1, 1, 1, 1, 1, 1, # 38 - 3f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 40 - 47
+ 1, 1, 1, 1, 1, 1, 1, 1, # 48 - 4f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 50 - 57
+ 1, 1, 1, 1, 1, 1, 1, 1, # 58 - 5f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 60 - 67
+ 1, 1, 1, 1, 1, 1, 1, 1, # 68 - 6f
+ 1, 1, 1, 1, 1, 1, 1, 1, # 70 - 77
+ 1, 1, 1, 1, 1, 1, 1, 1, # 78 - 7f
+ 2, 2, 2, 2, 3, 3, 3, 3, # 80 - 87
+ 4, 4, 4, 4, 4, 4, 4, 4, # 88 - 8f
+ 4, 4, 4, 4, 4, 4, 4, 4, # 90 - 97
+ 4, 4, 4, 4, 4, 4, 4, 4, # 98 - 9f
+ 5, 5, 5, 5, 5, 5, 5, 5, # a0 - a7
+ 5, 5, 5, 5, 5, 5, 5, 5, # a8 - af
+ 5, 5, 5, 5, 5, 5, 5, 5, # b0 - b7
+ 5, 5, 5, 5, 5, 5, 5, 5, # b8 - bf
+ 0, 0, 6, 6, 6, 6, 6, 6, # c0 - c7
+ 6, 6, 6, 6, 6, 6, 6, 6, # c8 - cf
+ 6, 6, 6, 6, 6, 6, 6, 6, # d0 - d7
+ 6, 6, 6, 6, 6, 6, 6, 6, # d8 - df
+ 7, 8, 8, 8, 8, 8, 8, 8, # e0 - e7
+ 8, 8, 8, 8, 8, 9, 8, 8, # e8 - ef
+ 10, 11, 11, 11, 11, 11, 11, 11, # f0 - f7
+ 12, 13, 13, 13, 14, 15, 0, 0 # f8 - ff
+)
+
+UTF8_ST = (
+ MachineState.ERROR,MachineState.START,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12, 10,#00-07
+ 9, 11, 8, 7, 6, 5, 4, 3,#08-0f
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#10-17
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#18-1f
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#20-27
+ MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,MachineState.ITS_ME,#28-2f
+ MachineState.ERROR,MachineState.ERROR, 5, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#30-37
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#38-3f
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 5, 5, 5,MachineState.ERROR,MachineState.ERROR,#40-47
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#48-4f
+ MachineState.ERROR,MachineState.ERROR, 7, 7, 7, 7,MachineState.ERROR,MachineState.ERROR,#50-57
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#58-5f
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 7, 7,MachineState.ERROR,MachineState.ERROR,#60-67
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#68-6f
+ MachineState.ERROR,MachineState.ERROR, 9, 9, 9, 9,MachineState.ERROR,MachineState.ERROR,#70-77
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#78-7f
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 9,MachineState.ERROR,MachineState.ERROR,#80-87
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#88-8f
+ MachineState.ERROR,MachineState.ERROR, 12, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,#90-97
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#98-9f
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR, 12,MachineState.ERROR,MachineState.ERROR,#a0-a7
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#a8-af
+ MachineState.ERROR,MachineState.ERROR, 12, 12, 12,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b0-b7
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,#b8-bf
+ MachineState.ERROR,MachineState.ERROR,MachineState.START,MachineState.START,MachineState.START,MachineState.START,MachineState.ERROR,MachineState.ERROR,#c0-c7
+ MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR,MachineState.ERROR #c8-cf
+)
+# fmt: on
+
+UTF8_CHAR_LEN_TABLE = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
+
+UTF8_SM_MODEL: CodingStateMachineDict = {
+ "class_table": UTF8_CLS,
+ "class_factor": 16,
+ "state_table": UTF8_ST,
+ "char_len_table": UTF8_CHAR_LEN_TABLE,
+ "name": "UTF-8",
+}
diff --git a/third_party/python/pip/pip/_vendor/chardet/metadata/__init__.py b/third_party/python/pip/pip/_vendor/chardet/metadata/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/metadata/__init__.py
diff --git a/third_party/python/pip/pip/_vendor/chardet/metadata/languages.py b/third_party/python/pip/pip/_vendor/chardet/metadata/languages.py
new file mode 100644
index 0000000000..eb40c5f0c8
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/metadata/languages.py
@@ -0,0 +1,352 @@
+"""
+Metadata about languages used by our model training code for our
+SingleByteCharSetProbers. Could be used for other things in the future.
+
+This code is based on the language metadata from the uchardet project.
+"""
+
+from string import ascii_letters
+from typing import List, Optional
+
+# TODO: Add Ukrainian (KOI8-U)
+
+
+class Language:
+ """Metadata about a language useful for training models
+
+ :ivar name: The human name for the language, in English.
+ :type name: str
+ :ivar iso_code: 2-letter ISO 639-1 if possible, 3-letter ISO code otherwise,
+ or use another catalog as a last resort.
+ :type iso_code: str
+ :ivar use_ascii: Whether or not ASCII letters should be included in trained
+ models.
+ :type use_ascii: bool
+ :ivar charsets: The charsets we want to support and create data for.
+ :type charsets: list of str
+ :ivar alphabet: The characters in the language's alphabet. If `use_ascii` is
+ `True`, you only need to add those not in the ASCII set.
+ :type alphabet: str
+ :ivar wiki_start_pages: The Wikipedia pages to start from if we're crawling
+ Wikipedia for training data.
+ :type wiki_start_pages: list of str
+ """
+
+ def __init__(
+ self,
+ name: Optional[str] = None,
+ iso_code: Optional[str] = None,
+ use_ascii: bool = True,
+ charsets: Optional[List[str]] = None,
+ alphabet: Optional[str] = None,
+ wiki_start_pages: Optional[List[str]] = None,
+ ) -> None:
+ super().__init__()
+ self.name = name
+ self.iso_code = iso_code
+ self.use_ascii = use_ascii
+ self.charsets = charsets
+ if self.use_ascii:
+ if alphabet:
+ alphabet += ascii_letters
+ else:
+ alphabet = ascii_letters
+ elif not alphabet:
+ raise ValueError("Must supply alphabet if use_ascii is False")
+ self.alphabet = "".join(sorted(set(alphabet))) if alphabet else None
+ self.wiki_start_pages = wiki_start_pages
+
+ def __repr__(self) -> str:
+ param_str = ", ".join(
+ f"{k}={v!r}" for k, v in self.__dict__.items() if not k.startswith("_")
+ )
+ return f"{self.__class__.__name__}({param_str})"
+
+
+LANGUAGES = {
+ "Arabic": Language(
+ name="Arabic",
+ iso_code="ar",
+ use_ascii=False,
+ # We only support encodings that use isolated
+ # forms, because the current recommendation is
+ # that the rendering system handles presentation
+ # forms. This means we purposefully skip IBM864.
+ charsets=["ISO-8859-6", "WINDOWS-1256", "CP720", "CP864"],
+ alphabet="ءآأؤإئابةتثجحخدذرزسشصضطظعغػؼؽؾؿـفقكلمنهوىيًٌٍَُِّ",
+ wiki_start_pages=["الصفحة_الرئيسية"],
+ ),
+ "Belarusian": Language(
+ name="Belarusian",
+ iso_code="be",
+ use_ascii=False,
+ charsets=["ISO-8859-5", "WINDOWS-1251", "IBM866", "MacCyrillic"],
+ alphabet="АБВГДЕЁЖЗІЙКЛМНОПРСТУЎФХЦЧШЫЬЭЮЯабвгдеёжзійклмнопрстуўфхцчшыьэюяʼ",
+ wiki_start_pages=["Галоўная_старонка"],
+ ),
+ "Bulgarian": Language(
+ name="Bulgarian",
+ iso_code="bg",
+ use_ascii=False,
+ charsets=["ISO-8859-5", "WINDOWS-1251", "IBM855"],
+ alphabet="АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЬЮЯабвгдежзийклмнопрстуфхцчшщъьюя",
+ wiki_start_pages=["Начална_страница"],
+ ),
+ "Czech": Language(
+ name="Czech",
+ iso_code="cz",
+ use_ascii=True,
+ charsets=["ISO-8859-2", "WINDOWS-1250"],
+ alphabet="áčďéěíňóřšťúůýžÁČĎÉĚÍŇÓŘŠŤÚŮÝŽ",
+ wiki_start_pages=["Hlavní_strana"],
+ ),
+ "Danish": Language(
+ name="Danish",
+ iso_code="da",
+ use_ascii=True,
+ charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
+ alphabet="æøåÆØÅ",
+ wiki_start_pages=["Forside"],
+ ),
+ "German": Language(
+ name="German",
+ iso_code="de",
+ use_ascii=True,
+ charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
+ alphabet="äöüßẞÄÖÜ",
+ wiki_start_pages=["Wikipedia:Hauptseite"],
+ ),
+ "Greek": Language(
+ name="Greek",
+ iso_code="el",
+ use_ascii=False,
+ charsets=["ISO-8859-7", "WINDOWS-1253"],
+ alphabet="αβγδεζηθικλμνξοπρσςτυφχψωάέήίόύώΑΒΓΔΕΖΗΘΙΚΛΜΝΞΟΠΡΣΣΤΥΦΧΨΩΆΈΉΊΌΎΏ",
+ wiki_start_pages=["Πύλη:Κύρια"],
+ ),
+ "English": Language(
+ name="English",
+ iso_code="en",
+ use_ascii=True,
+ charsets=["ISO-8859-1", "WINDOWS-1252", "MacRoman"],
+ wiki_start_pages=["Main_Page"],
+ ),
+ "Esperanto": Language(
+ name="Esperanto",
+ iso_code="eo",
+ # Q, W, X, and Y not used at all
+ use_ascii=False,
+ charsets=["ISO-8859-3"],
+ alphabet="abcĉdefgĝhĥijĵklmnoprsŝtuŭvzABCĈDEFGĜHĤIJĴKLMNOPRSŜTUŬVZ",
+ wiki_start_pages=["Vikipedio:Ĉefpaĝo"],
+ ),
+ "Spanish": Language(
+ name="Spanish",
+ iso_code="es",
+ use_ascii=True,
+ charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
+ alphabet="ñáéíóúüÑÁÉÍÓÚÜ",
+ wiki_start_pages=["Wikipedia:Portada"],
+ ),
+ "Estonian": Language(
+ name="Estonian",
+ iso_code="et",
+ use_ascii=False,
+ charsets=["ISO-8859-4", "ISO-8859-13", "WINDOWS-1257"],
+ # C, F, Š, Q, W, X, Y, Z, Ž are only for
+ # loanwords
+ alphabet="ABDEGHIJKLMNOPRSTUVÕÄÖÜabdeghijklmnoprstuvõäöü",
+ wiki_start_pages=["Esileht"],
+ ),
+ "Finnish": Language(
+ name="Finnish",
+ iso_code="fi",
+ use_ascii=True,
+ charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
+ alphabet="ÅÄÖŠŽåäöšž",
+ wiki_start_pages=["Wikipedia:Etusivu"],
+ ),
+ "French": Language(
+ name="French",
+ iso_code="fr",
+ use_ascii=True,
+ charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
+ alphabet="œàâçèéîïùûêŒÀÂÇÈÉÎÏÙÛÊ",
+ wiki_start_pages=["Wikipédia:Accueil_principal", "Bœuf (animal)"],
+ ),
+ "Hebrew": Language(
+ name="Hebrew",
+ iso_code="he",
+ use_ascii=False,
+ charsets=["ISO-8859-8", "WINDOWS-1255"],
+ alphabet="אבגדהוזחטיךכלםמןנסעףפץצקרשתװױײ",
+ wiki_start_pages=["עמוד_ראשי"],
+ ),
+ "Croatian": Language(
+ name="Croatian",
+ iso_code="hr",
+ # Q, W, X, Y are only used for foreign words.
+ use_ascii=False,
+ charsets=["ISO-8859-2", "WINDOWS-1250"],
+ alphabet="abcčćdđefghijklmnoprsštuvzžABCČĆDĐEFGHIJKLMNOPRSŠTUVZŽ",
+ wiki_start_pages=["Glavna_stranica"],
+ ),
+ "Hungarian": Language(
+ name="Hungarian",
+ iso_code="hu",
+ # Q, W, X, Y are only used for foreign words.
+ use_ascii=False,
+ charsets=["ISO-8859-2", "WINDOWS-1250"],
+ alphabet="abcdefghijklmnoprstuvzáéíóöőúüűABCDEFGHIJKLMNOPRSTUVZÁÉÍÓÖŐÚÜŰ",
+ wiki_start_pages=["Kezdőlap"],
+ ),
+ "Italian": Language(
+ name="Italian",
+ iso_code="it",
+ use_ascii=True,
+ charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
+ alphabet="ÀÈÉÌÒÓÙàèéìòóù",
+ wiki_start_pages=["Pagina_principale"],
+ ),
+ "Lithuanian": Language(
+ name="Lithuanian",
+ iso_code="lt",
+ use_ascii=False,
+ charsets=["ISO-8859-13", "WINDOWS-1257", "ISO-8859-4"],
+ # Q, W, and X not used at all
+ alphabet="AĄBCČDEĘĖFGHIĮYJKLMNOPRSŠTUŲŪVZŽaąbcčdeęėfghiįyjklmnoprsštuųūvzž",
+ wiki_start_pages=["Pagrindinis_puslapis"],
+ ),
+ "Latvian": Language(
+ name="Latvian",
+ iso_code="lv",
+ use_ascii=False,
+ charsets=["ISO-8859-13", "WINDOWS-1257", "ISO-8859-4"],
+ # Q, W, X, Y are only for loanwords
+ alphabet="AĀBCČDEĒFGĢHIĪJKĶLĻMNŅOPRSŠTUŪVZŽaābcčdeēfgģhiījkķlļmnņoprsštuūvzž",
+ wiki_start_pages=["Sākumlapa"],
+ ),
+ "Macedonian": Language(
+ name="Macedonian",
+ iso_code="mk",
+ use_ascii=False,
+ charsets=["ISO-8859-5", "WINDOWS-1251", "MacCyrillic", "IBM855"],
+ alphabet="АБВГДЃЕЖЗЅИЈКЛЉМНЊОПРСТЌУФХЦЧЏШабвгдѓежзѕијклљмнњопрстќуфхцчџш",
+ wiki_start_pages=["Главна_страница"],
+ ),
+ "Dutch": Language(
+ name="Dutch",
+ iso_code="nl",
+ use_ascii=True,
+ charsets=["ISO-8859-1", "WINDOWS-1252", "MacRoman"],
+ wiki_start_pages=["Hoofdpagina"],
+ ),
+ "Polish": Language(
+ name="Polish",
+ iso_code="pl",
+ # Q and X are only used for foreign words.
+ use_ascii=False,
+ charsets=["ISO-8859-2", "WINDOWS-1250"],
+ alphabet="AĄBCĆDEĘFGHIJKLŁMNŃOÓPRSŚTUWYZŹŻaąbcćdeęfghijklłmnńoóprsśtuwyzźż",
+ wiki_start_pages=["Wikipedia:Strona_główna"],
+ ),
+ "Portuguese": Language(
+ name="Portuguese",
+ iso_code="pt",
+ use_ascii=True,
+ charsets=["ISO-8859-1", "ISO-8859-15", "WINDOWS-1252", "MacRoman"],
+ alphabet="ÁÂÃÀÇÉÊÍÓÔÕÚáâãàçéêíóôõú",
+ wiki_start_pages=["Wikipédia:Página_principal"],
+ ),
+ "Romanian": Language(
+ name="Romanian",
+ iso_code="ro",
+ use_ascii=True,
+ charsets=["ISO-8859-2", "WINDOWS-1250"],
+ alphabet="ăâîșțĂÂÎȘȚ",
+ wiki_start_pages=["Pagina_principală"],
+ ),
+ "Russian": Language(
+ name="Russian",
+ iso_code="ru",
+ use_ascii=False,
+ charsets=[
+ "ISO-8859-5",
+ "WINDOWS-1251",
+ "KOI8-R",
+ "MacCyrillic",
+ "IBM866",
+ "IBM855",
+ ],
+ alphabet="абвгдеёжзийклмнопрстуфхцчшщъыьэюяАБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ",
+ wiki_start_pages=["Заглавная_страница"],
+ ),
+ "Slovak": Language(
+ name="Slovak",
+ iso_code="sk",
+ use_ascii=True,
+ charsets=["ISO-8859-2", "WINDOWS-1250"],
+ alphabet="áäčďéíĺľňóôŕšťúýžÁÄČĎÉÍĹĽŇÓÔŔŠŤÚÝŽ",
+ wiki_start_pages=["Hlavná_stránka"],
+ ),
+ "Slovene": Language(
+ name="Slovene",
+ iso_code="sl",
+ # Q, W, X, Y are only used for foreign words.
+ use_ascii=False,
+ charsets=["ISO-8859-2", "WINDOWS-1250"],
+ alphabet="abcčdefghijklmnoprsštuvzžABCČDEFGHIJKLMNOPRSŠTUVZŽ",
+ wiki_start_pages=["Glavna_stran"],
+ ),
+ # Serbian can be written in both Latin and Cyrillic, but there's no
+ # simple way to get the Latin alphabet pages from Wikipedia through
+ # the API, so for now we just support Cyrillic.
+ "Serbian": Language(
+ name="Serbian",
+ iso_code="sr",
+ alphabet="АБВГДЂЕЖЗИЈКЛЉМНЊОПРСТЋУФХЦЧЏШабвгдђежзијклљмнњопрстћуфхцчџш",
+ charsets=["ISO-8859-5", "WINDOWS-1251", "MacCyrillic", "IBM855"],
+ wiki_start_pages=["Главна_страна"],
+ ),
+ "Thai": Language(
+ name="Thai",
+ iso_code="th",
+ use_ascii=False,
+ charsets=["ISO-8859-11", "TIS-620", "CP874"],
+ alphabet="กขฃคฅฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลฦวศษสหฬอฮฯะัาำิีึืฺุู฿เแโใไๅๆ็่้๊๋์ํ๎๏๐๑๒๓๔๕๖๗๘๙๚๛",
+ wiki_start_pages=["หน้าหลัก"],
+ ),
+ "Turkish": Language(
+ name="Turkish",
+ iso_code="tr",
+ # Q, W, and X are not used by Turkish
+ use_ascii=False,
+ charsets=["ISO-8859-3", "ISO-8859-9", "WINDOWS-1254"],
+ alphabet="abcçdefgğhıijklmnoöprsştuüvyzâîûABCÇDEFGĞHIİJKLMNOÖPRSŞTUÜVYZÂÎÛ",
+ wiki_start_pages=["Ana_Sayfa"],
+ ),
+ "Vietnamese": Language(
+ name="Vietnamese",
+ iso_code="vi",
+ use_ascii=False,
+ # Windows-1258 is the only common 8-bit
+ # Vietnamese encoding supported by Python.
+ # From Wikipedia:
+ # For systems that lack support for Unicode,
+ # dozens of 8-bit Vietnamese code pages are
+ # available.[1] The most common are VISCII
+ # (TCVN 5712:1993), VPS, and Windows-1258.[3]
+ # Where ASCII is required, such as when
+ # ensuring readability in plain text e-mail,
+ # Vietnamese letters are often encoded
+ # according to Vietnamese Quoted-Readable
+ # (VIQR) or VSCII Mnemonic (VSCII-MNEM),[4]
+ # though usage of either variable-width
+ # scheme has declined dramatically following
+ # the adoption of Unicode on the World Wide
+ # Web.
+ charsets=["WINDOWS-1258"],
+ alphabet="aăâbcdđeêghiklmnoôơpqrstuưvxyAĂÂBCDĐEÊGHIKLMNOÔƠPQRSTUƯVXY",
+ wiki_start_pages=["Chữ_Quốc_ngữ"],
+ ),
+}
diff --git a/third_party/python/pip/pip/_vendor/chardet/resultdict.py b/third_party/python/pip/pip/_vendor/chardet/resultdict.py
new file mode 100644
index 0000000000..7d36e64c46
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/resultdict.py
@@ -0,0 +1,16 @@
+from typing import TYPE_CHECKING, Optional
+
+if TYPE_CHECKING:
+ # TypedDict was introduced in Python 3.8.
+ #
+ # TODO: Remove the else block and TYPE_CHECKING check when dropping support
+ # for Python 3.7.
+ from typing import TypedDict
+
+ class ResultDict(TypedDict):
+ encoding: Optional[str]
+ confidence: float
+ language: Optional[str]
+
+else:
+ ResultDict = dict
diff --git a/third_party/python/pip/pip/_vendor/chardet/sbcharsetprober.py b/third_party/python/pip/pip/_vendor/chardet/sbcharsetprober.py
new file mode 100644
index 0000000000..0ffbcdd2c3
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/sbcharsetprober.py
@@ -0,0 +1,162 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from typing import Dict, List, NamedTuple, Optional, Union
+
+from .charsetprober import CharSetProber
+from .enums import CharacterCategory, ProbingState, SequenceLikelihood
+
+
+class SingleByteCharSetModel(NamedTuple):
+ charset_name: str
+ language: str
+ char_to_order_map: Dict[int, int]
+ language_model: Dict[int, Dict[int, int]]
+ typical_positive_ratio: float
+ keep_ascii_letters: bool
+ alphabet: str
+
+
+class SingleByteCharSetProber(CharSetProber):
+ SAMPLE_SIZE = 64
+ SB_ENOUGH_REL_THRESHOLD = 1024 # 0.25 * SAMPLE_SIZE^2
+ POSITIVE_SHORTCUT_THRESHOLD = 0.95
+ NEGATIVE_SHORTCUT_THRESHOLD = 0.05
+
+ def __init__(
+ self,
+ model: SingleByteCharSetModel,
+ is_reversed: bool = False,
+ name_prober: Optional[CharSetProber] = None,
+ ) -> None:
+ super().__init__()
+ self._model = model
+ # TRUE if we need to reverse every pair in the model lookup
+ self._reversed = is_reversed
+ # Optional auxiliary prober for name decision
+ self._name_prober = name_prober
+ self._last_order = 255
+ self._seq_counters: List[int] = []
+ self._total_seqs = 0
+ self._total_char = 0
+ self._control_char = 0
+ self._freq_char = 0
+ self.reset()
+
+ def reset(self) -> None:
+ super().reset()
+ # char order of last character
+ self._last_order = 255
+ self._seq_counters = [0] * SequenceLikelihood.get_num_categories()
+ self._total_seqs = 0
+ self._total_char = 0
+ self._control_char = 0
+ # characters that fall in our sampling range
+ self._freq_char = 0
+
+ @property
+ def charset_name(self) -> Optional[str]:
+ if self._name_prober:
+ return self._name_prober.charset_name
+ return self._model.charset_name
+
+ @property
+ def language(self) -> Optional[str]:
+ if self._name_prober:
+ return self._name_prober.language
+ return self._model.language
+
+ def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
+ # TODO: Make filter_international_words keep things in self.alphabet
+ if not self._model.keep_ascii_letters:
+ byte_str = self.filter_international_words(byte_str)
+ else:
+ byte_str = self.remove_xml_tags(byte_str)
+ if not byte_str:
+ return self.state
+ char_to_order_map = self._model.char_to_order_map
+ language_model = self._model.language_model
+ for char in byte_str:
+ order = char_to_order_map.get(char, CharacterCategory.UNDEFINED)
+ # XXX: This was SYMBOL_CAT_ORDER before, with a value of 250, but
+ # CharacterCategory.SYMBOL is actually 253, so we use CONTROL
+ # to make it closer to the original intent. The only difference
+ # is whether or not we count digits and control characters for
+ # _total_char purposes.
+ if order < CharacterCategory.CONTROL:
+ self._total_char += 1
+ if order < self.SAMPLE_SIZE:
+ self._freq_char += 1
+ if self._last_order < self.SAMPLE_SIZE:
+ self._total_seqs += 1
+ if not self._reversed:
+ lm_cat = language_model[self._last_order][order]
+ else:
+ lm_cat = language_model[order][self._last_order]
+ self._seq_counters[lm_cat] += 1
+ self._last_order = order
+
+ charset_name = self._model.charset_name
+ if self.state == ProbingState.DETECTING:
+ if self._total_seqs > self.SB_ENOUGH_REL_THRESHOLD:
+ confidence = self.get_confidence()
+ if confidence > self.POSITIVE_SHORTCUT_THRESHOLD:
+ self.logger.debug(
+ "%s confidence = %s, we have a winner", charset_name, confidence
+ )
+ self._state = ProbingState.FOUND_IT
+ elif confidence < self.NEGATIVE_SHORTCUT_THRESHOLD:
+ self.logger.debug(
+ "%s confidence = %s, below negative shortcut threshold %s",
+ charset_name,
+ confidence,
+ self.NEGATIVE_SHORTCUT_THRESHOLD,
+ )
+ self._state = ProbingState.NOT_ME
+
+ return self.state
+
+ def get_confidence(self) -> float:
+ r = 0.01
+ if self._total_seqs > 0:
+ r = (
+ (
+ self._seq_counters[SequenceLikelihood.POSITIVE]
+ + 0.25 * self._seq_counters[SequenceLikelihood.LIKELY]
+ )
+ / self._total_seqs
+ / self._model.typical_positive_ratio
+ )
+ # The more control characters (proportionnaly to the size
+ # of the text), the less confident we become in the current
+ # charset.
+ r = r * (self._total_char - self._control_char) / self._total_char
+ r = r * self._freq_char / self._total_char
+ if r >= 1.0:
+ r = 0.99
+ return r
diff --git a/third_party/python/pip/pip/_vendor/chardet/sbcsgroupprober.py b/third_party/python/pip/pip/_vendor/chardet/sbcsgroupprober.py
new file mode 100644
index 0000000000..890ae8465c
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/sbcsgroupprober.py
@@ -0,0 +1,88 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from .charsetgroupprober import CharSetGroupProber
+from .hebrewprober import HebrewProber
+from .langbulgarianmodel import ISO_8859_5_BULGARIAN_MODEL, WINDOWS_1251_BULGARIAN_MODEL
+from .langgreekmodel import ISO_8859_7_GREEK_MODEL, WINDOWS_1253_GREEK_MODEL
+from .langhebrewmodel import WINDOWS_1255_HEBREW_MODEL
+
+# from .langhungarianmodel import (ISO_8859_2_HUNGARIAN_MODEL,
+# WINDOWS_1250_HUNGARIAN_MODEL)
+from .langrussianmodel import (
+ IBM855_RUSSIAN_MODEL,
+ IBM866_RUSSIAN_MODEL,
+ ISO_8859_5_RUSSIAN_MODEL,
+ KOI8_R_RUSSIAN_MODEL,
+ MACCYRILLIC_RUSSIAN_MODEL,
+ WINDOWS_1251_RUSSIAN_MODEL,
+)
+from .langthaimodel import TIS_620_THAI_MODEL
+from .langturkishmodel import ISO_8859_9_TURKISH_MODEL
+from .sbcharsetprober import SingleByteCharSetProber
+
+
+class SBCSGroupProber(CharSetGroupProber):
+ def __init__(self) -> None:
+ super().__init__()
+ hebrew_prober = HebrewProber()
+ logical_hebrew_prober = SingleByteCharSetProber(
+ WINDOWS_1255_HEBREW_MODEL, is_reversed=False, name_prober=hebrew_prober
+ )
+ # TODO: See if using ISO-8859-8 Hebrew model works better here, since
+ # it's actually the visual one
+ visual_hebrew_prober = SingleByteCharSetProber(
+ WINDOWS_1255_HEBREW_MODEL, is_reversed=True, name_prober=hebrew_prober
+ )
+ hebrew_prober.set_model_probers(logical_hebrew_prober, visual_hebrew_prober)
+ # TODO: ORDER MATTERS HERE. I changed the order vs what was in master
+ # and several tests failed that did not before. Some thought
+ # should be put into the ordering, and we should consider making
+ # order not matter here, because that is very counter-intuitive.
+ self.probers = [
+ SingleByteCharSetProber(WINDOWS_1251_RUSSIAN_MODEL),
+ SingleByteCharSetProber(KOI8_R_RUSSIAN_MODEL),
+ SingleByteCharSetProber(ISO_8859_5_RUSSIAN_MODEL),
+ SingleByteCharSetProber(MACCYRILLIC_RUSSIAN_MODEL),
+ SingleByteCharSetProber(IBM866_RUSSIAN_MODEL),
+ SingleByteCharSetProber(IBM855_RUSSIAN_MODEL),
+ SingleByteCharSetProber(ISO_8859_7_GREEK_MODEL),
+ SingleByteCharSetProber(WINDOWS_1253_GREEK_MODEL),
+ SingleByteCharSetProber(ISO_8859_5_BULGARIAN_MODEL),
+ SingleByteCharSetProber(WINDOWS_1251_BULGARIAN_MODEL),
+ # TODO: Restore Hungarian encodings (iso-8859-2 and windows-1250)
+ # after we retrain model.
+ # SingleByteCharSetProber(ISO_8859_2_HUNGARIAN_MODEL),
+ # SingleByteCharSetProber(WINDOWS_1250_HUNGARIAN_MODEL),
+ SingleByteCharSetProber(TIS_620_THAI_MODEL),
+ SingleByteCharSetProber(ISO_8859_9_TURKISH_MODEL),
+ hebrew_prober,
+ logical_hebrew_prober,
+ visual_hebrew_prober,
+ ]
+ self.reset()
diff --git a/third_party/python/pip/pip/_vendor/chardet/sjisprober.py b/third_party/python/pip/pip/_vendor/chardet/sjisprober.py
new file mode 100644
index 0000000000..91df077961
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/sjisprober.py
@@ -0,0 +1,105 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from typing import Union
+
+from .chardistribution import SJISDistributionAnalysis
+from .codingstatemachine import CodingStateMachine
+from .enums import MachineState, ProbingState
+from .jpcntx import SJISContextAnalysis
+from .mbcharsetprober import MultiByteCharSetProber
+from .mbcssm import SJIS_SM_MODEL
+
+
+class SJISProber(MultiByteCharSetProber):
+ def __init__(self) -> None:
+ super().__init__()
+ self.coding_sm = CodingStateMachine(SJIS_SM_MODEL)
+ self.distribution_analyzer = SJISDistributionAnalysis()
+ self.context_analyzer = SJISContextAnalysis()
+ self.reset()
+
+ def reset(self) -> None:
+ super().reset()
+ self.context_analyzer.reset()
+
+ @property
+ def charset_name(self) -> str:
+ return self.context_analyzer.charset_name
+
+ @property
+ def language(self) -> str:
+ return "Japanese"
+
+ def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
+ assert self.coding_sm is not None
+ assert self.distribution_analyzer is not None
+
+ for i, byte in enumerate(byte_str):
+ coding_state = self.coding_sm.next_state(byte)
+ if coding_state == MachineState.ERROR:
+ self.logger.debug(
+ "%s %s prober hit error at byte %s",
+ self.charset_name,
+ self.language,
+ i,
+ )
+ self._state = ProbingState.NOT_ME
+ break
+ if coding_state == MachineState.ITS_ME:
+ self._state = ProbingState.FOUND_IT
+ break
+ if coding_state == MachineState.START:
+ char_len = self.coding_sm.get_current_charlen()
+ if i == 0:
+ self._last_char[1] = byte
+ self.context_analyzer.feed(
+ self._last_char[2 - char_len :], char_len
+ )
+ self.distribution_analyzer.feed(self._last_char, char_len)
+ else:
+ self.context_analyzer.feed(
+ byte_str[i + 1 - char_len : i + 3 - char_len], char_len
+ )
+ self.distribution_analyzer.feed(byte_str[i - 1 : i + 1], char_len)
+
+ self._last_char[0] = byte_str[-1]
+
+ if self.state == ProbingState.DETECTING:
+ if self.context_analyzer.got_enough_data() and (
+ self.get_confidence() > self.SHORTCUT_THRESHOLD
+ ):
+ self._state = ProbingState.FOUND_IT
+
+ return self.state
+
+ def get_confidence(self) -> float:
+ assert self.distribution_analyzer is not None
+
+ context_conf = self.context_analyzer.get_confidence()
+ distrib_conf = self.distribution_analyzer.get_confidence()
+ return max(context_conf, distrib_conf)
diff --git a/third_party/python/pip/pip/_vendor/chardet/universaldetector.py b/third_party/python/pip/pip/_vendor/chardet/universaldetector.py
new file mode 100644
index 0000000000..30c441dc28
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/universaldetector.py
@@ -0,0 +1,362 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is Mozilla Universal charset detector code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 2001
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+# Shy Shalom - original C code
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+"""
+Module containing the UniversalDetector detector class, which is the primary
+class a user of ``chardet`` should use.
+
+:author: Mark Pilgrim (initial port to Python)
+:author: Shy Shalom (original C code)
+:author: Dan Blanchard (major refactoring for 3.0)
+:author: Ian Cordasco
+"""
+
+
+import codecs
+import logging
+import re
+from typing import List, Optional, Union
+
+from .charsetgroupprober import CharSetGroupProber
+from .charsetprober import CharSetProber
+from .enums import InputState, LanguageFilter, ProbingState
+from .escprober import EscCharSetProber
+from .latin1prober import Latin1Prober
+from .macromanprober import MacRomanProber
+from .mbcsgroupprober import MBCSGroupProber
+from .resultdict import ResultDict
+from .sbcsgroupprober import SBCSGroupProber
+from .utf1632prober import UTF1632Prober
+
+
+class UniversalDetector:
+ """
+ The ``UniversalDetector`` class underlies the ``chardet.detect`` function
+ and coordinates all of the different charset probers.
+
+ To get a ``dict`` containing an encoding and its confidence, you can simply
+ run:
+
+ .. code::
+
+ u = UniversalDetector()
+ u.feed(some_bytes)
+ u.close()
+ detected = u.result
+
+ """
+
+ MINIMUM_THRESHOLD = 0.20
+ HIGH_BYTE_DETECTOR = re.compile(b"[\x80-\xFF]")
+ ESC_DETECTOR = re.compile(b"(\033|~{)")
+ WIN_BYTE_DETECTOR = re.compile(b"[\x80-\x9F]")
+ ISO_WIN_MAP = {
+ "iso-8859-1": "Windows-1252",
+ "iso-8859-2": "Windows-1250",
+ "iso-8859-5": "Windows-1251",
+ "iso-8859-6": "Windows-1256",
+ "iso-8859-7": "Windows-1253",
+ "iso-8859-8": "Windows-1255",
+ "iso-8859-9": "Windows-1254",
+ "iso-8859-13": "Windows-1257",
+ }
+ # Based on https://encoding.spec.whatwg.org/#names-and-labels
+ # but altered to match Python names for encodings and remove mappings
+ # that break tests.
+ LEGACY_MAP = {
+ "ascii": "Windows-1252",
+ "iso-8859-1": "Windows-1252",
+ "tis-620": "ISO-8859-11",
+ "iso-8859-9": "Windows-1254",
+ "gb2312": "GB18030",
+ "euc-kr": "CP949",
+ "utf-16le": "UTF-16",
+ }
+
+ def __init__(
+ self,
+ lang_filter: LanguageFilter = LanguageFilter.ALL,
+ should_rename_legacy: bool = False,
+ ) -> None:
+ self._esc_charset_prober: Optional[EscCharSetProber] = None
+ self._utf1632_prober: Optional[UTF1632Prober] = None
+ self._charset_probers: List[CharSetProber] = []
+ self.result: ResultDict = {
+ "encoding": None,
+ "confidence": 0.0,
+ "language": None,
+ }
+ self.done = False
+ self._got_data = False
+ self._input_state = InputState.PURE_ASCII
+ self._last_char = b""
+ self.lang_filter = lang_filter
+ self.logger = logging.getLogger(__name__)
+ self._has_win_bytes = False
+ self.should_rename_legacy = should_rename_legacy
+ self.reset()
+
+ @property
+ def input_state(self) -> int:
+ return self._input_state
+
+ @property
+ def has_win_bytes(self) -> bool:
+ return self._has_win_bytes
+
+ @property
+ def charset_probers(self) -> List[CharSetProber]:
+ return self._charset_probers
+
+ def reset(self) -> None:
+ """
+ Reset the UniversalDetector and all of its probers back to their
+ initial states. This is called by ``__init__``, so you only need to
+ call this directly in between analyses of different documents.
+ """
+ self.result = {"encoding": None, "confidence": 0.0, "language": None}
+ self.done = False
+ self._got_data = False
+ self._has_win_bytes = False
+ self._input_state = InputState.PURE_ASCII
+ self._last_char = b""
+ if self._esc_charset_prober:
+ self._esc_charset_prober.reset()
+ if self._utf1632_prober:
+ self._utf1632_prober.reset()
+ for prober in self._charset_probers:
+ prober.reset()
+
+ def feed(self, byte_str: Union[bytes, bytearray]) -> None:
+ """
+ Takes a chunk of a document and feeds it through all of the relevant
+ charset probers.
+
+ After calling ``feed``, you can check the value of the ``done``
+ attribute to see if you need to continue feeding the
+ ``UniversalDetector`` more data, or if it has made a prediction
+ (in the ``result`` attribute).
+
+ .. note::
+ You should always call ``close`` when you're done feeding in your
+ document if ``done`` is not already ``True``.
+ """
+ if self.done:
+ return
+
+ if not byte_str:
+ return
+
+ if not isinstance(byte_str, bytearray):
+ byte_str = bytearray(byte_str)
+
+ # First check for known BOMs, since these are guaranteed to be correct
+ if not self._got_data:
+ # If the data starts with BOM, we know it is UTF
+ if byte_str.startswith(codecs.BOM_UTF8):
+ # EF BB BF UTF-8 with BOM
+ self.result = {
+ "encoding": "UTF-8-SIG",
+ "confidence": 1.0,
+ "language": "",
+ }
+ elif byte_str.startswith((codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)):
+ # FF FE 00 00 UTF-32, little-endian BOM
+ # 00 00 FE FF UTF-32, big-endian BOM
+ self.result = {"encoding": "UTF-32", "confidence": 1.0, "language": ""}
+ elif byte_str.startswith(b"\xFE\xFF\x00\x00"):
+ # FE FF 00 00 UCS-4, unusual octet order BOM (3412)
+ self.result = {
+ # TODO: This encoding is not supported by Python. Should remove?
+ "encoding": "X-ISO-10646-UCS-4-3412",
+ "confidence": 1.0,
+ "language": "",
+ }
+ elif byte_str.startswith(b"\x00\x00\xFF\xFE"):
+ # 00 00 FF FE UCS-4, unusual octet order BOM (2143)
+ self.result = {
+ # TODO: This encoding is not supported by Python. Should remove?
+ "encoding": "X-ISO-10646-UCS-4-2143",
+ "confidence": 1.0,
+ "language": "",
+ }
+ elif byte_str.startswith((codecs.BOM_LE, codecs.BOM_BE)):
+ # FF FE UTF-16, little endian BOM
+ # FE FF UTF-16, big endian BOM
+ self.result = {"encoding": "UTF-16", "confidence": 1.0, "language": ""}
+
+ self._got_data = True
+ if self.result["encoding"] is not None:
+ self.done = True
+ return
+
+ # If none of those matched and we've only see ASCII so far, check
+ # for high bytes and escape sequences
+ if self._input_state == InputState.PURE_ASCII:
+ if self.HIGH_BYTE_DETECTOR.search(byte_str):
+ self._input_state = InputState.HIGH_BYTE
+ elif (
+ self._input_state == InputState.PURE_ASCII
+ and self.ESC_DETECTOR.search(self._last_char + byte_str)
+ ):
+ self._input_state = InputState.ESC_ASCII
+
+ self._last_char = byte_str[-1:]
+
+ # next we will look to see if it is appears to be either a UTF-16 or
+ # UTF-32 encoding
+ if not self._utf1632_prober:
+ self._utf1632_prober = UTF1632Prober()
+
+ if self._utf1632_prober.state == ProbingState.DETECTING:
+ if self._utf1632_prober.feed(byte_str) == ProbingState.FOUND_IT:
+ self.result = {
+ "encoding": self._utf1632_prober.charset_name,
+ "confidence": self._utf1632_prober.get_confidence(),
+ "language": "",
+ }
+ self.done = True
+ return
+
+ # If we've seen escape sequences, use the EscCharSetProber, which
+ # uses a simple state machine to check for known escape sequences in
+ # HZ and ISO-2022 encodings, since those are the only encodings that
+ # use such sequences.
+ if self._input_state == InputState.ESC_ASCII:
+ if not self._esc_charset_prober:
+ self._esc_charset_prober = EscCharSetProber(self.lang_filter)
+ if self._esc_charset_prober.feed(byte_str) == ProbingState.FOUND_IT:
+ self.result = {
+ "encoding": self._esc_charset_prober.charset_name,
+ "confidence": self._esc_charset_prober.get_confidence(),
+ "language": self._esc_charset_prober.language,
+ }
+ self.done = True
+ # If we've seen high bytes (i.e., those with values greater than 127),
+ # we need to do more complicated checks using all our multi-byte and
+ # single-byte probers that are left. The single-byte probers
+ # use character bigram distributions to determine the encoding, whereas
+ # the multi-byte probers use a combination of character unigram and
+ # bigram distributions.
+ elif self._input_state == InputState.HIGH_BYTE:
+ if not self._charset_probers:
+ self._charset_probers = [MBCSGroupProber(self.lang_filter)]
+ # If we're checking non-CJK encodings, use single-byte prober
+ if self.lang_filter & LanguageFilter.NON_CJK:
+ self._charset_probers.append(SBCSGroupProber())
+ self._charset_probers.append(Latin1Prober())
+ self._charset_probers.append(MacRomanProber())
+ for prober in self._charset_probers:
+ if prober.feed(byte_str) == ProbingState.FOUND_IT:
+ self.result = {
+ "encoding": prober.charset_name,
+ "confidence": prober.get_confidence(),
+ "language": prober.language,
+ }
+ self.done = True
+ break
+ if self.WIN_BYTE_DETECTOR.search(byte_str):
+ self._has_win_bytes = True
+
+ def close(self) -> ResultDict:
+ """
+ Stop analyzing the current document and come up with a final
+ prediction.
+
+ :returns: The ``result`` attribute, a ``dict`` with the keys
+ `encoding`, `confidence`, and `language`.
+ """
+ # Don't bother with checks if we're already done
+ if self.done:
+ return self.result
+ self.done = True
+
+ if not self._got_data:
+ self.logger.debug("no data received!")
+
+ # Default to ASCII if it is all we've seen so far
+ elif self._input_state == InputState.PURE_ASCII:
+ self.result = {"encoding": "ascii", "confidence": 1.0, "language": ""}
+
+ # If we have seen non-ASCII, return the best that met MINIMUM_THRESHOLD
+ elif self._input_state == InputState.HIGH_BYTE:
+ prober_confidence = None
+ max_prober_confidence = 0.0
+ max_prober = None
+ for prober in self._charset_probers:
+ if not prober:
+ continue
+ prober_confidence = prober.get_confidence()
+ if prober_confidence > max_prober_confidence:
+ max_prober_confidence = prober_confidence
+ max_prober = prober
+ if max_prober and (max_prober_confidence > self.MINIMUM_THRESHOLD):
+ charset_name = max_prober.charset_name
+ assert charset_name is not None
+ lower_charset_name = charset_name.lower()
+ confidence = max_prober.get_confidence()
+ # Use Windows encoding name instead of ISO-8859 if we saw any
+ # extra Windows-specific bytes
+ if lower_charset_name.startswith("iso-8859"):
+ if self._has_win_bytes:
+ charset_name = self.ISO_WIN_MAP.get(
+ lower_charset_name, charset_name
+ )
+ # Rename legacy encodings with superset encodings if asked
+ if self.should_rename_legacy:
+ charset_name = self.LEGACY_MAP.get(
+ (charset_name or "").lower(), charset_name
+ )
+ self.result = {
+ "encoding": charset_name,
+ "confidence": confidence,
+ "language": max_prober.language,
+ }
+
+ # Log all prober confidences if none met MINIMUM_THRESHOLD
+ if self.logger.getEffectiveLevel() <= logging.DEBUG:
+ if self.result["encoding"] is None:
+ self.logger.debug("no probers hit minimum threshold")
+ for group_prober in self._charset_probers:
+ if not group_prober:
+ continue
+ if isinstance(group_prober, CharSetGroupProber):
+ for prober in group_prober.probers:
+ self.logger.debug(
+ "%s %s confidence = %s",
+ prober.charset_name,
+ prober.language,
+ prober.get_confidence(),
+ )
+ else:
+ self.logger.debug(
+ "%s %s confidence = %s",
+ group_prober.charset_name,
+ group_prober.language,
+ group_prober.get_confidence(),
+ )
+ return self.result
diff --git a/third_party/python/pip/pip/_vendor/chardet/utf1632prober.py b/third_party/python/pip/pip/_vendor/chardet/utf1632prober.py
new file mode 100644
index 0000000000..6bdec63d68
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/utf1632prober.py
@@ -0,0 +1,225 @@
+######################## BEGIN LICENSE BLOCK ########################
+#
+# Contributor(s):
+# Jason Zavaglia
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+from typing import List, Union
+
+from .charsetprober import CharSetProber
+from .enums import ProbingState
+
+
+class UTF1632Prober(CharSetProber):
+ """
+ This class simply looks for occurrences of zero bytes, and infers
+ whether the file is UTF16 or UTF32 (low-endian or big-endian)
+ For instance, files looking like ( \0 \0 \0 [nonzero] )+
+ have a good probability to be UTF32BE. Files looking like ( \0 [nonzero] )+
+ may be guessed to be UTF16BE, and inversely for little-endian varieties.
+ """
+
+ # how many logical characters to scan before feeling confident of prediction
+ MIN_CHARS_FOR_DETECTION = 20
+ # a fixed constant ratio of expected zeros or non-zeros in modulo-position.
+ EXPECTED_RATIO = 0.94
+
+ def __init__(self) -> None:
+ super().__init__()
+ self.position = 0
+ self.zeros_at_mod = [0] * 4
+ self.nonzeros_at_mod = [0] * 4
+ self._state = ProbingState.DETECTING
+ self.quad = [0, 0, 0, 0]
+ self.invalid_utf16be = False
+ self.invalid_utf16le = False
+ self.invalid_utf32be = False
+ self.invalid_utf32le = False
+ self.first_half_surrogate_pair_detected_16be = False
+ self.first_half_surrogate_pair_detected_16le = False
+ self.reset()
+
+ def reset(self) -> None:
+ super().reset()
+ self.position = 0
+ self.zeros_at_mod = [0] * 4
+ self.nonzeros_at_mod = [0] * 4
+ self._state = ProbingState.DETECTING
+ self.invalid_utf16be = False
+ self.invalid_utf16le = False
+ self.invalid_utf32be = False
+ self.invalid_utf32le = False
+ self.first_half_surrogate_pair_detected_16be = False
+ self.first_half_surrogate_pair_detected_16le = False
+ self.quad = [0, 0, 0, 0]
+
+ @property
+ def charset_name(self) -> str:
+ if self.is_likely_utf32be():
+ return "utf-32be"
+ if self.is_likely_utf32le():
+ return "utf-32le"
+ if self.is_likely_utf16be():
+ return "utf-16be"
+ if self.is_likely_utf16le():
+ return "utf-16le"
+ # default to something valid
+ return "utf-16"
+
+ @property
+ def language(self) -> str:
+ return ""
+
+ def approx_32bit_chars(self) -> float:
+ return max(1.0, self.position / 4.0)
+
+ def approx_16bit_chars(self) -> float:
+ return max(1.0, self.position / 2.0)
+
+ def is_likely_utf32be(self) -> bool:
+ approx_chars = self.approx_32bit_chars()
+ return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
+ self.zeros_at_mod[0] / approx_chars > self.EXPECTED_RATIO
+ and self.zeros_at_mod[1] / approx_chars > self.EXPECTED_RATIO
+ and self.zeros_at_mod[2] / approx_chars > self.EXPECTED_RATIO
+ and self.nonzeros_at_mod[3] / approx_chars > self.EXPECTED_RATIO
+ and not self.invalid_utf32be
+ )
+
+ def is_likely_utf32le(self) -> bool:
+ approx_chars = self.approx_32bit_chars()
+ return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
+ self.nonzeros_at_mod[0] / approx_chars > self.EXPECTED_RATIO
+ and self.zeros_at_mod[1] / approx_chars > self.EXPECTED_RATIO
+ and self.zeros_at_mod[2] / approx_chars > self.EXPECTED_RATIO
+ and self.zeros_at_mod[3] / approx_chars > self.EXPECTED_RATIO
+ and not self.invalid_utf32le
+ )
+
+ def is_likely_utf16be(self) -> bool:
+ approx_chars = self.approx_16bit_chars()
+ return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
+ (self.nonzeros_at_mod[1] + self.nonzeros_at_mod[3]) / approx_chars
+ > self.EXPECTED_RATIO
+ and (self.zeros_at_mod[0] + self.zeros_at_mod[2]) / approx_chars
+ > self.EXPECTED_RATIO
+ and not self.invalid_utf16be
+ )
+
+ def is_likely_utf16le(self) -> bool:
+ approx_chars = self.approx_16bit_chars()
+ return approx_chars >= self.MIN_CHARS_FOR_DETECTION and (
+ (self.nonzeros_at_mod[0] + self.nonzeros_at_mod[2]) / approx_chars
+ > self.EXPECTED_RATIO
+ and (self.zeros_at_mod[1] + self.zeros_at_mod[3]) / approx_chars
+ > self.EXPECTED_RATIO
+ and not self.invalid_utf16le
+ )
+
+ def validate_utf32_characters(self, quad: List[int]) -> None:
+ """
+ Validate if the quad of bytes is valid UTF-32.
+
+ UTF-32 is valid in the range 0x00000000 - 0x0010FFFF
+ excluding 0x0000D800 - 0x0000DFFF
+
+ https://en.wikipedia.org/wiki/UTF-32
+ """
+ if (
+ quad[0] != 0
+ or quad[1] > 0x10
+ or (quad[0] == 0 and quad[1] == 0 and 0xD8 <= quad[2] <= 0xDF)
+ ):
+ self.invalid_utf32be = True
+ if (
+ quad[3] != 0
+ or quad[2] > 0x10
+ or (quad[3] == 0 and quad[2] == 0 and 0xD8 <= quad[1] <= 0xDF)
+ ):
+ self.invalid_utf32le = True
+
+ def validate_utf16_characters(self, pair: List[int]) -> None:
+ """
+ Validate if the pair of bytes is valid UTF-16.
+
+ UTF-16 is valid in the range 0x0000 - 0xFFFF excluding 0xD800 - 0xFFFF
+ with an exception for surrogate pairs, which must be in the range
+ 0xD800-0xDBFF followed by 0xDC00-0xDFFF
+
+ https://en.wikipedia.org/wiki/UTF-16
+ """
+ if not self.first_half_surrogate_pair_detected_16be:
+ if 0xD8 <= pair[0] <= 0xDB:
+ self.first_half_surrogate_pair_detected_16be = True
+ elif 0xDC <= pair[0] <= 0xDF:
+ self.invalid_utf16be = True
+ else:
+ if 0xDC <= pair[0] <= 0xDF:
+ self.first_half_surrogate_pair_detected_16be = False
+ else:
+ self.invalid_utf16be = True
+
+ if not self.first_half_surrogate_pair_detected_16le:
+ if 0xD8 <= pair[1] <= 0xDB:
+ self.first_half_surrogate_pair_detected_16le = True
+ elif 0xDC <= pair[1] <= 0xDF:
+ self.invalid_utf16le = True
+ else:
+ if 0xDC <= pair[1] <= 0xDF:
+ self.first_half_surrogate_pair_detected_16le = False
+ else:
+ self.invalid_utf16le = True
+
+ def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
+ for c in byte_str:
+ mod4 = self.position % 4
+ self.quad[mod4] = c
+ if mod4 == 3:
+ self.validate_utf32_characters(self.quad)
+ self.validate_utf16_characters(self.quad[0:2])
+ self.validate_utf16_characters(self.quad[2:4])
+ if c == 0:
+ self.zeros_at_mod[mod4] += 1
+ else:
+ self.nonzeros_at_mod[mod4] += 1
+ self.position += 1
+ return self.state
+
+ @property
+ def state(self) -> ProbingState:
+ if self._state in {ProbingState.NOT_ME, ProbingState.FOUND_IT}:
+ # terminal, decided states
+ return self._state
+ if self.get_confidence() > 0.80:
+ self._state = ProbingState.FOUND_IT
+ elif self.position > 4 * 1024:
+ # if we get to 4kb into the file, and we can't conclude it's UTF,
+ # let's give up
+ self._state = ProbingState.NOT_ME
+ return self._state
+
+ def get_confidence(self) -> float:
+ return (
+ 0.85
+ if (
+ self.is_likely_utf16le()
+ or self.is_likely_utf16be()
+ or self.is_likely_utf32le()
+ or self.is_likely_utf32be()
+ )
+ else 0.00
+ )
diff --git a/third_party/python/pip/pip/_vendor/chardet/utf8prober.py b/third_party/python/pip/pip/_vendor/chardet/utf8prober.py
new file mode 100644
index 0000000000..d96354d97c
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/utf8prober.py
@@ -0,0 +1,82 @@
+######################## BEGIN LICENSE BLOCK ########################
+# The Original Code is mozilla.org code.
+#
+# The Initial Developer of the Original Code is
+# Netscape Communications Corporation.
+# Portions created by the Initial Developer are Copyright (C) 1998
+# the Initial Developer. All Rights Reserved.
+#
+# Contributor(s):
+# Mark Pilgrim - port to Python
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+# 02110-1301 USA
+######################### END LICENSE BLOCK #########################
+
+from typing import Union
+
+from .charsetprober import CharSetProber
+from .codingstatemachine import CodingStateMachine
+from .enums import MachineState, ProbingState
+from .mbcssm import UTF8_SM_MODEL
+
+
+class UTF8Prober(CharSetProber):
+ ONE_CHAR_PROB = 0.5
+
+ def __init__(self) -> None:
+ super().__init__()
+ self.coding_sm = CodingStateMachine(UTF8_SM_MODEL)
+ self._num_mb_chars = 0
+ self.reset()
+
+ def reset(self) -> None:
+ super().reset()
+ self.coding_sm.reset()
+ self._num_mb_chars = 0
+
+ @property
+ def charset_name(self) -> str:
+ return "utf-8"
+
+ @property
+ def language(self) -> str:
+ return ""
+
+ def feed(self, byte_str: Union[bytes, bytearray]) -> ProbingState:
+ for c in byte_str:
+ coding_state = self.coding_sm.next_state(c)
+ if coding_state == MachineState.ERROR:
+ self._state = ProbingState.NOT_ME
+ break
+ if coding_state == MachineState.ITS_ME:
+ self._state = ProbingState.FOUND_IT
+ break
+ if coding_state == MachineState.START:
+ if self.coding_sm.get_current_charlen() >= 2:
+ self._num_mb_chars += 1
+
+ if self.state == ProbingState.DETECTING:
+ if self.get_confidence() > self.SHORTCUT_THRESHOLD:
+ self._state = ProbingState.FOUND_IT
+
+ return self.state
+
+ def get_confidence(self) -> float:
+ unlike = 0.99
+ if self._num_mb_chars < 6:
+ unlike *= self.ONE_CHAR_PROB**self._num_mb_chars
+ return 1.0 - unlike
+ return unlike
diff --git a/third_party/python/pip/pip/_vendor/chardet/version.py b/third_party/python/pip/pip/_vendor/chardet/version.py
new file mode 100644
index 0000000000..c5e9d85cd7
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/chardet/version.py
@@ -0,0 +1,9 @@
+"""
+This module exists only to simplify retrieving the version number of chardet
+from within setuptools and from chardet subpackages.
+
+:author: Dan Blanchard (dan.blanchard@gmail.com)
+"""
+
+__version__ = "5.1.0"
+VERSION = __version__.split(".")
diff --git a/third_party/python/pip/pip/_vendor/colorama/__init__.py b/third_party/python/pip/pip/_vendor/colorama/__init__.py
new file mode 100644
index 0000000000..383101cdb3
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/colorama/__init__.py
@@ -0,0 +1,7 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+from .initialise import init, deinit, reinit, colorama_text, just_fix_windows_console
+from .ansi import Fore, Back, Style, Cursor
+from .ansitowin32 import AnsiToWin32
+
+__version__ = '0.4.6'
+
diff --git a/third_party/python/pip/pip/_vendor/colorama/ansi.py b/third_party/python/pip/pip/_vendor/colorama/ansi.py
new file mode 100644
index 0000000000..11ec695ff7
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/colorama/ansi.py
@@ -0,0 +1,102 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+'''
+This module generates ANSI character codes to printing colors to terminals.
+See: http://en.wikipedia.org/wiki/ANSI_escape_code
+'''
+
+CSI = '\033['
+OSC = '\033]'
+BEL = '\a'
+
+
+def code_to_chars(code):
+ return CSI + str(code) + 'm'
+
+def set_title(title):
+ return OSC + '2;' + title + BEL
+
+def clear_screen(mode=2):
+ return CSI + str(mode) + 'J'
+
+def clear_line(mode=2):
+ return CSI + str(mode) + 'K'
+
+
+class AnsiCodes(object):
+ def __init__(self):
+ # the subclasses declare class attributes which are numbers.
+ # Upon instantiation we define instance attributes, which are the same
+ # as the class attributes but wrapped with the ANSI escape sequence
+ for name in dir(self):
+ if not name.startswith('_'):
+ value = getattr(self, name)
+ setattr(self, name, code_to_chars(value))
+
+
+class AnsiCursor(object):
+ def UP(self, n=1):
+ return CSI + str(n) + 'A'
+ def DOWN(self, n=1):
+ return CSI + str(n) + 'B'
+ def FORWARD(self, n=1):
+ return CSI + str(n) + 'C'
+ def BACK(self, n=1):
+ return CSI + str(n) + 'D'
+ def POS(self, x=1, y=1):
+ return CSI + str(y) + ';' + str(x) + 'H'
+
+
+class AnsiFore(AnsiCodes):
+ BLACK = 30
+ RED = 31
+ GREEN = 32
+ YELLOW = 33
+ BLUE = 34
+ MAGENTA = 35
+ CYAN = 36
+ WHITE = 37
+ RESET = 39
+
+ # These are fairly well supported, but not part of the standard.
+ LIGHTBLACK_EX = 90
+ LIGHTRED_EX = 91
+ LIGHTGREEN_EX = 92
+ LIGHTYELLOW_EX = 93
+ LIGHTBLUE_EX = 94
+ LIGHTMAGENTA_EX = 95
+ LIGHTCYAN_EX = 96
+ LIGHTWHITE_EX = 97
+
+
+class AnsiBack(AnsiCodes):
+ BLACK = 40
+ RED = 41
+ GREEN = 42
+ YELLOW = 43
+ BLUE = 44
+ MAGENTA = 45
+ CYAN = 46
+ WHITE = 47
+ RESET = 49
+
+ # These are fairly well supported, but not part of the standard.
+ LIGHTBLACK_EX = 100
+ LIGHTRED_EX = 101
+ LIGHTGREEN_EX = 102
+ LIGHTYELLOW_EX = 103
+ LIGHTBLUE_EX = 104
+ LIGHTMAGENTA_EX = 105
+ LIGHTCYAN_EX = 106
+ LIGHTWHITE_EX = 107
+
+
+class AnsiStyle(AnsiCodes):
+ BRIGHT = 1
+ DIM = 2
+ NORMAL = 22
+ RESET_ALL = 0
+
+Fore = AnsiFore()
+Back = AnsiBack()
+Style = AnsiStyle()
+Cursor = AnsiCursor()
diff --git a/third_party/python/pip/pip/_vendor/colorama/ansitowin32.py b/third_party/python/pip/pip/_vendor/colorama/ansitowin32.py
new file mode 100644
index 0000000000..abf209e60c
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/colorama/ansitowin32.py
@@ -0,0 +1,277 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+import re
+import sys
+import os
+
+from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style, BEL
+from .winterm import enable_vt_processing, WinTerm, WinColor, WinStyle
+from .win32 import windll, winapi_test
+
+
+winterm = None
+if windll is not None:
+ winterm = WinTerm()
+
+
+class StreamWrapper(object):
+ '''
+ Wraps a stream (such as stdout), acting as a transparent proxy for all
+ attribute access apart from method 'write()', which is delegated to our
+ Converter instance.
+ '''
+ def __init__(self, wrapped, converter):
+ # double-underscore everything to prevent clashes with names of
+ # attributes on the wrapped stream object.
+ self.__wrapped = wrapped
+ self.__convertor = converter
+
+ def __getattr__(self, name):
+ return getattr(self.__wrapped, name)
+
+ def __enter__(self, *args, **kwargs):
+ # special method lookup bypasses __getattr__/__getattribute__, see
+ # https://stackoverflow.com/questions/12632894/why-doesnt-getattr-work-with-exit
+ # thus, contextlib magic methods are not proxied via __getattr__
+ return self.__wrapped.__enter__(*args, **kwargs)
+
+ def __exit__(self, *args, **kwargs):
+ return self.__wrapped.__exit__(*args, **kwargs)
+
+ def __setstate__(self, state):
+ self.__dict__ = state
+
+ def __getstate__(self):
+ return self.__dict__
+
+ def write(self, text):
+ self.__convertor.write(text)
+
+ def isatty(self):
+ stream = self.__wrapped
+ if 'PYCHARM_HOSTED' in os.environ:
+ if stream is not None and (stream is sys.__stdout__ or stream is sys.__stderr__):
+ return True
+ try:
+ stream_isatty = stream.isatty
+ except AttributeError:
+ return False
+ else:
+ return stream_isatty()
+
+ @property
+ def closed(self):
+ stream = self.__wrapped
+ try:
+ return stream.closed
+ # AttributeError in the case that the stream doesn't support being closed
+ # ValueError for the case that the stream has already been detached when atexit runs
+ except (AttributeError, ValueError):
+ return True
+
+
+class AnsiToWin32(object):
+ '''
+ Implements a 'write()' method which, on Windows, will strip ANSI character
+ sequences from the text, and if outputting to a tty, will convert them into
+ win32 function calls.
+ '''
+ ANSI_CSI_RE = re.compile('\001?\033\\[((?:\\d|;)*)([a-zA-Z])\002?') # Control Sequence Introducer
+ ANSI_OSC_RE = re.compile('\001?\033\\]([^\a]*)(\a)\002?') # Operating System Command
+
+ def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
+ # The wrapped stream (normally sys.stdout or sys.stderr)
+ self.wrapped = wrapped
+
+ # should we reset colors to defaults after every .write()
+ self.autoreset = autoreset
+
+ # create the proxy wrapping our output stream
+ self.stream = StreamWrapper(wrapped, self)
+
+ on_windows = os.name == 'nt'
+ # We test if the WinAPI works, because even if we are on Windows
+ # we may be using a terminal that doesn't support the WinAPI
+ # (e.g. Cygwin Terminal). In this case it's up to the terminal
+ # to support the ANSI codes.
+ conversion_supported = on_windows and winapi_test()
+ try:
+ fd = wrapped.fileno()
+ except Exception:
+ fd = -1
+ system_has_native_ansi = not on_windows or enable_vt_processing(fd)
+ have_tty = not self.stream.closed and self.stream.isatty()
+ need_conversion = conversion_supported and not system_has_native_ansi
+
+ # should we strip ANSI sequences from our output?
+ if strip is None:
+ strip = need_conversion or not have_tty
+ self.strip = strip
+
+ # should we should convert ANSI sequences into win32 calls?
+ if convert is None:
+ convert = need_conversion and have_tty
+ self.convert = convert
+
+ # dict of ansi codes to win32 functions and parameters
+ self.win32_calls = self.get_win32_calls()
+
+ # are we wrapping stderr?
+ self.on_stderr = self.wrapped is sys.stderr
+
+ def should_wrap(self):
+ '''
+ True if this class is actually needed. If false, then the output
+ stream will not be affected, nor will win32 calls be issued, so
+ wrapping stdout is not actually required. This will generally be
+ False on non-Windows platforms, unless optional functionality like
+ autoreset has been requested using kwargs to init()
+ '''
+ return self.convert or self.strip or self.autoreset
+
+ def get_win32_calls(self):
+ if self.convert and winterm:
+ return {
+ AnsiStyle.RESET_ALL: (winterm.reset_all, ),
+ AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
+ AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
+ AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
+ AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
+ AnsiFore.RED: (winterm.fore, WinColor.RED),
+ AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
+ AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
+ AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
+ AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
+ AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
+ AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
+ AnsiFore.RESET: (winterm.fore, ),
+ AnsiFore.LIGHTBLACK_EX: (winterm.fore, WinColor.BLACK, True),
+ AnsiFore.LIGHTRED_EX: (winterm.fore, WinColor.RED, True),
+ AnsiFore.LIGHTGREEN_EX: (winterm.fore, WinColor.GREEN, True),
+ AnsiFore.LIGHTYELLOW_EX: (winterm.fore, WinColor.YELLOW, True),
+ AnsiFore.LIGHTBLUE_EX: (winterm.fore, WinColor.BLUE, True),
+ AnsiFore.LIGHTMAGENTA_EX: (winterm.fore, WinColor.MAGENTA, True),
+ AnsiFore.LIGHTCYAN_EX: (winterm.fore, WinColor.CYAN, True),
+ AnsiFore.LIGHTWHITE_EX: (winterm.fore, WinColor.GREY, True),
+ AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
+ AnsiBack.RED: (winterm.back, WinColor.RED),
+ AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
+ AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
+ AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
+ AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
+ AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
+ AnsiBack.WHITE: (winterm.back, WinColor.GREY),
+ AnsiBack.RESET: (winterm.back, ),
+ AnsiBack.LIGHTBLACK_EX: (winterm.back, WinColor.BLACK, True),
+ AnsiBack.LIGHTRED_EX: (winterm.back, WinColor.RED, True),
+ AnsiBack.LIGHTGREEN_EX: (winterm.back, WinColor.GREEN, True),
+ AnsiBack.LIGHTYELLOW_EX: (winterm.back, WinColor.YELLOW, True),
+ AnsiBack.LIGHTBLUE_EX: (winterm.back, WinColor.BLUE, True),
+ AnsiBack.LIGHTMAGENTA_EX: (winterm.back, WinColor.MAGENTA, True),
+ AnsiBack.LIGHTCYAN_EX: (winterm.back, WinColor.CYAN, True),
+ AnsiBack.LIGHTWHITE_EX: (winterm.back, WinColor.GREY, True),
+ }
+ return dict()
+
+ def write(self, text):
+ if self.strip or self.convert:
+ self.write_and_convert(text)
+ else:
+ self.wrapped.write(text)
+ self.wrapped.flush()
+ if self.autoreset:
+ self.reset_all()
+
+
+ def reset_all(self):
+ if self.convert:
+ self.call_win32('m', (0,))
+ elif not self.strip and not self.stream.closed:
+ self.wrapped.write(Style.RESET_ALL)
+
+
+ def write_and_convert(self, text):
+ '''
+ Write the given text to our wrapped stream, stripping any ANSI
+ sequences from the text, and optionally converting them into win32
+ calls.
+ '''
+ cursor = 0
+ text = self.convert_osc(text)
+ for match in self.ANSI_CSI_RE.finditer(text):
+ start, end = match.span()
+ self.write_plain_text(text, cursor, start)
+ self.convert_ansi(*match.groups())
+ cursor = end
+ self.write_plain_text(text, cursor, len(text))
+
+
+ def write_plain_text(self, text, start, end):
+ if start < end:
+ self.wrapped.write(text[start:end])
+ self.wrapped.flush()
+
+
+ def convert_ansi(self, paramstring, command):
+ if self.convert:
+ params = self.extract_params(command, paramstring)
+ self.call_win32(command, params)
+
+
+ def extract_params(self, command, paramstring):
+ if command in 'Hf':
+ params = tuple(int(p) if len(p) != 0 else 1 for p in paramstring.split(';'))
+ while len(params) < 2:
+ # defaults:
+ params = params + (1,)
+ else:
+ params = tuple(int(p) for p in paramstring.split(';') if len(p) != 0)
+ if len(params) == 0:
+ # defaults:
+ if command in 'JKm':
+ params = (0,)
+ elif command in 'ABCD':
+ params = (1,)
+
+ return params
+
+
+ def call_win32(self, command, params):
+ if command == 'm':
+ for param in params:
+ if param in self.win32_calls:
+ func_args = self.win32_calls[param]
+ func = func_args[0]
+ args = func_args[1:]
+ kwargs = dict(on_stderr=self.on_stderr)
+ func(*args, **kwargs)
+ elif command in 'J':
+ winterm.erase_screen(params[0], on_stderr=self.on_stderr)
+ elif command in 'K':
+ winterm.erase_line(params[0], on_stderr=self.on_stderr)
+ elif command in 'Hf': # cursor position - absolute
+ winterm.set_cursor_position(params, on_stderr=self.on_stderr)
+ elif command in 'ABCD': # cursor position - relative
+ n = params[0]
+ # A - up, B - down, C - forward, D - back
+ x, y = {'A': (0, -n), 'B': (0, n), 'C': (n, 0), 'D': (-n, 0)}[command]
+ winterm.cursor_adjust(x, y, on_stderr=self.on_stderr)
+
+
+ def convert_osc(self, text):
+ for match in self.ANSI_OSC_RE.finditer(text):
+ start, end = match.span()
+ text = text[:start] + text[end:]
+ paramstring, command = match.groups()
+ if command == BEL:
+ if paramstring.count(";") == 1:
+ params = paramstring.split(";")
+ # 0 - change title and icon (we will only change title)
+ # 1 - change icon (we don't support this)
+ # 2 - change title
+ if params[0] in '02':
+ winterm.set_title(params[1])
+ return text
+
+
+ def flush(self):
+ self.wrapped.flush()
diff --git a/third_party/python/pip/pip/_vendor/colorama/initialise.py b/third_party/python/pip/pip/_vendor/colorama/initialise.py
new file mode 100644
index 0000000000..d5fd4b71fe
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/colorama/initialise.py
@@ -0,0 +1,121 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+import atexit
+import contextlib
+import sys
+
+from .ansitowin32 import AnsiToWin32
+
+
+def _wipe_internal_state_for_tests():
+ global orig_stdout, orig_stderr
+ orig_stdout = None
+ orig_stderr = None
+
+ global wrapped_stdout, wrapped_stderr
+ wrapped_stdout = None
+ wrapped_stderr = None
+
+ global atexit_done
+ atexit_done = False
+
+ global fixed_windows_console
+ fixed_windows_console = False
+
+ try:
+ # no-op if it wasn't registered
+ atexit.unregister(reset_all)
+ except AttributeError:
+ # python 2: no atexit.unregister. Oh well, we did our best.
+ pass
+
+
+def reset_all():
+ if AnsiToWin32 is not None: # Issue #74: objects might become None at exit
+ AnsiToWin32(orig_stdout).reset_all()
+
+
+def init(autoreset=False, convert=None, strip=None, wrap=True):
+
+ if not wrap and any([autoreset, convert, strip]):
+ raise ValueError('wrap=False conflicts with any other arg=True')
+
+ global wrapped_stdout, wrapped_stderr
+ global orig_stdout, orig_stderr
+
+ orig_stdout = sys.stdout
+ orig_stderr = sys.stderr
+
+ if sys.stdout is None:
+ wrapped_stdout = None
+ else:
+ sys.stdout = wrapped_stdout = \
+ wrap_stream(orig_stdout, convert, strip, autoreset, wrap)
+ if sys.stderr is None:
+ wrapped_stderr = None
+ else:
+ sys.stderr = wrapped_stderr = \
+ wrap_stream(orig_stderr, convert, strip, autoreset, wrap)
+
+ global atexit_done
+ if not atexit_done:
+ atexit.register(reset_all)
+ atexit_done = True
+
+
+def deinit():
+ if orig_stdout is not None:
+ sys.stdout = orig_stdout
+ if orig_stderr is not None:
+ sys.stderr = orig_stderr
+
+
+def just_fix_windows_console():
+ global fixed_windows_console
+
+ if sys.platform != "win32":
+ return
+ if fixed_windows_console:
+ return
+ if wrapped_stdout is not None or wrapped_stderr is not None:
+ # Someone already ran init() and it did stuff, so we won't second-guess them
+ return
+
+ # On newer versions of Windows, AnsiToWin32.__init__ will implicitly enable the
+ # native ANSI support in the console as a side-effect. We only need to actually
+ # replace sys.stdout/stderr if we're in the old-style conversion mode.
+ new_stdout = AnsiToWin32(sys.stdout, convert=None, strip=None, autoreset=False)
+ if new_stdout.convert:
+ sys.stdout = new_stdout
+ new_stderr = AnsiToWin32(sys.stderr, convert=None, strip=None, autoreset=False)
+ if new_stderr.convert:
+ sys.stderr = new_stderr
+
+ fixed_windows_console = True
+
+@contextlib.contextmanager
+def colorama_text(*args, **kwargs):
+ init(*args, **kwargs)
+ try:
+ yield
+ finally:
+ deinit()
+
+
+def reinit():
+ if wrapped_stdout is not None:
+ sys.stdout = wrapped_stdout
+ if wrapped_stderr is not None:
+ sys.stderr = wrapped_stderr
+
+
+def wrap_stream(stream, convert, strip, autoreset, wrap):
+ if wrap:
+ wrapper = AnsiToWin32(stream,
+ convert=convert, strip=strip, autoreset=autoreset)
+ if wrapper.should_wrap():
+ stream = wrapper.stream
+ return stream
+
+
+# Use this for initial setup as well, to reduce code duplication
+_wipe_internal_state_for_tests()
diff --git a/third_party/python/pip/pip/_vendor/colorama/tests/__init__.py b/third_party/python/pip/pip/_vendor/colorama/tests/__init__.py
new file mode 100644
index 0000000000..8c5661e93a
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/colorama/tests/__init__.py
@@ -0,0 +1 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
diff --git a/third_party/python/pip/pip/_vendor/colorama/tests/ansi_test.py b/third_party/python/pip/pip/_vendor/colorama/tests/ansi_test.py
new file mode 100644
index 0000000000..0a20c80f88
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/colorama/tests/ansi_test.py
@@ -0,0 +1,76 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+import sys
+from unittest import TestCase, main
+
+from ..ansi import Back, Fore, Style
+from ..ansitowin32 import AnsiToWin32
+
+stdout_orig = sys.stdout
+stderr_orig = sys.stderr
+
+
+class AnsiTest(TestCase):
+
+ def setUp(self):
+ # sanity check: stdout should be a file or StringIO object.
+ # It will only be AnsiToWin32 if init() has previously wrapped it
+ self.assertNotEqual(type(sys.stdout), AnsiToWin32)
+ self.assertNotEqual(type(sys.stderr), AnsiToWin32)
+
+ def tearDown(self):
+ sys.stdout = stdout_orig
+ sys.stderr = stderr_orig
+
+
+ def testForeAttributes(self):
+ self.assertEqual(Fore.BLACK, '\033[30m')
+ self.assertEqual(Fore.RED, '\033[31m')
+ self.assertEqual(Fore.GREEN, '\033[32m')
+ self.assertEqual(Fore.YELLOW, '\033[33m')
+ self.assertEqual(Fore.BLUE, '\033[34m')
+ self.assertEqual(Fore.MAGENTA, '\033[35m')
+ self.assertEqual(Fore.CYAN, '\033[36m')
+ self.assertEqual(Fore.WHITE, '\033[37m')
+ self.assertEqual(Fore.RESET, '\033[39m')
+
+ # Check the light, extended versions.
+ self.assertEqual(Fore.LIGHTBLACK_EX, '\033[90m')
+ self.assertEqual(Fore.LIGHTRED_EX, '\033[91m')
+ self.assertEqual(Fore.LIGHTGREEN_EX, '\033[92m')
+ self.assertEqual(Fore.LIGHTYELLOW_EX, '\033[93m')
+ self.assertEqual(Fore.LIGHTBLUE_EX, '\033[94m')
+ self.assertEqual(Fore.LIGHTMAGENTA_EX, '\033[95m')
+ self.assertEqual(Fore.LIGHTCYAN_EX, '\033[96m')
+ self.assertEqual(Fore.LIGHTWHITE_EX, '\033[97m')
+
+
+ def testBackAttributes(self):
+ self.assertEqual(Back.BLACK, '\033[40m')
+ self.assertEqual(Back.RED, '\033[41m')
+ self.assertEqual(Back.GREEN, '\033[42m')
+ self.assertEqual(Back.YELLOW, '\033[43m')
+ self.assertEqual(Back.BLUE, '\033[44m')
+ self.assertEqual(Back.MAGENTA, '\033[45m')
+ self.assertEqual(Back.CYAN, '\033[46m')
+ self.assertEqual(Back.WHITE, '\033[47m')
+ self.assertEqual(Back.RESET, '\033[49m')
+
+ # Check the light, extended versions.
+ self.assertEqual(Back.LIGHTBLACK_EX, '\033[100m')
+ self.assertEqual(Back.LIGHTRED_EX, '\033[101m')
+ self.assertEqual(Back.LIGHTGREEN_EX, '\033[102m')
+ self.assertEqual(Back.LIGHTYELLOW_EX, '\033[103m')
+ self.assertEqual(Back.LIGHTBLUE_EX, '\033[104m')
+ self.assertEqual(Back.LIGHTMAGENTA_EX, '\033[105m')
+ self.assertEqual(Back.LIGHTCYAN_EX, '\033[106m')
+ self.assertEqual(Back.LIGHTWHITE_EX, '\033[107m')
+
+
+ def testStyleAttributes(self):
+ self.assertEqual(Style.DIM, '\033[2m')
+ self.assertEqual(Style.NORMAL, '\033[22m')
+ self.assertEqual(Style.BRIGHT, '\033[1m')
+
+
+if __name__ == '__main__':
+ main()
diff --git a/third_party/python/pip/pip/_vendor/colorama/tests/ansitowin32_test.py b/third_party/python/pip/pip/_vendor/colorama/tests/ansitowin32_test.py
new file mode 100644
index 0000000000..91ca551f97
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/colorama/tests/ansitowin32_test.py
@@ -0,0 +1,294 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+from io import StringIO, TextIOWrapper
+from unittest import TestCase, main
+try:
+ from contextlib import ExitStack
+except ImportError:
+ # python 2
+ from contextlib2 import ExitStack
+
+try:
+ from unittest.mock import MagicMock, Mock, patch
+except ImportError:
+ from mock import MagicMock, Mock, patch
+
+from ..ansitowin32 import AnsiToWin32, StreamWrapper
+from ..win32 import ENABLE_VIRTUAL_TERMINAL_PROCESSING
+from .utils import osname
+
+
+class StreamWrapperTest(TestCase):
+
+ def testIsAProxy(self):
+ mockStream = Mock()
+ wrapper = StreamWrapper(mockStream, None)
+ self.assertTrue( wrapper.random_attr is mockStream.random_attr )
+
+ def testDelegatesWrite(self):
+ mockStream = Mock()
+ mockConverter = Mock()
+ wrapper = StreamWrapper(mockStream, mockConverter)
+ wrapper.write('hello')
+ self.assertTrue(mockConverter.write.call_args, (('hello',), {}))
+
+ def testDelegatesContext(self):
+ mockConverter = Mock()
+ s = StringIO()
+ with StreamWrapper(s, mockConverter) as fp:
+ fp.write(u'hello')
+ self.assertTrue(s.closed)
+
+ def testProxyNoContextManager(self):
+ mockStream = MagicMock()
+ mockStream.__enter__.side_effect = AttributeError()
+ mockConverter = Mock()
+ with self.assertRaises(AttributeError) as excinfo:
+ with StreamWrapper(mockStream, mockConverter) as wrapper:
+ wrapper.write('hello')
+
+ def test_closed_shouldnt_raise_on_closed_stream(self):
+ stream = StringIO()
+ stream.close()
+ wrapper = StreamWrapper(stream, None)
+ self.assertEqual(wrapper.closed, True)
+
+ def test_closed_shouldnt_raise_on_detached_stream(self):
+ stream = TextIOWrapper(StringIO())
+ stream.detach()
+ wrapper = StreamWrapper(stream, None)
+ self.assertEqual(wrapper.closed, True)
+
+class AnsiToWin32Test(TestCase):
+
+ def testInit(self):
+ mockStdout = Mock()
+ auto = Mock()
+ stream = AnsiToWin32(mockStdout, autoreset=auto)
+ self.assertEqual(stream.wrapped, mockStdout)
+ self.assertEqual(stream.autoreset, auto)
+
+ @patch('colorama.ansitowin32.winterm', None)
+ @patch('colorama.ansitowin32.winapi_test', lambda *_: True)
+ def testStripIsTrueOnWindows(self):
+ with osname('nt'):
+ mockStdout = Mock()
+ stream = AnsiToWin32(mockStdout)
+ self.assertTrue(stream.strip)
+
+ def testStripIsFalseOffWindows(self):
+ with osname('posix'):
+ mockStdout = Mock(closed=False)
+ stream = AnsiToWin32(mockStdout)
+ self.assertFalse(stream.strip)
+
+ def testWriteStripsAnsi(self):
+ mockStdout = Mock()
+ stream = AnsiToWin32(mockStdout)
+ stream.wrapped = Mock()
+ stream.write_and_convert = Mock()
+ stream.strip = True
+
+ stream.write('abc')
+
+ self.assertFalse(stream.wrapped.write.called)
+ self.assertEqual(stream.write_and_convert.call_args, (('abc',), {}))
+
+ def testWriteDoesNotStripAnsi(self):
+ mockStdout = Mock()
+ stream = AnsiToWin32(mockStdout)
+ stream.wrapped = Mock()
+ stream.write_and_convert = Mock()
+ stream.strip = False
+ stream.convert = False
+
+ stream.write('abc')
+
+ self.assertFalse(stream.write_and_convert.called)
+ self.assertEqual(stream.wrapped.write.call_args, (('abc',), {}))
+
+ def assert_autoresets(self, convert, autoreset=True):
+ stream = AnsiToWin32(Mock())
+ stream.convert = convert
+ stream.reset_all = Mock()
+ stream.autoreset = autoreset
+ stream.winterm = Mock()
+
+ stream.write('abc')
+
+ self.assertEqual(stream.reset_all.called, autoreset)
+
+ def testWriteAutoresets(self):
+ self.assert_autoresets(convert=True)
+ self.assert_autoresets(convert=False)
+ self.assert_autoresets(convert=True, autoreset=False)
+ self.assert_autoresets(convert=False, autoreset=False)
+
+ def testWriteAndConvertWritesPlainText(self):
+ stream = AnsiToWin32(Mock())
+ stream.write_and_convert( 'abc' )
+ self.assertEqual( stream.wrapped.write.call_args, (('abc',), {}) )
+
+ def testWriteAndConvertStripsAllValidAnsi(self):
+ stream = AnsiToWin32(Mock())
+ stream.call_win32 = Mock()
+ data = [
+ 'abc\033[mdef',
+ 'abc\033[0mdef',
+ 'abc\033[2mdef',
+ 'abc\033[02mdef',
+ 'abc\033[002mdef',
+ 'abc\033[40mdef',
+ 'abc\033[040mdef',
+ 'abc\033[0;1mdef',
+ 'abc\033[40;50mdef',
+ 'abc\033[50;30;40mdef',
+ 'abc\033[Adef',
+ 'abc\033[0Gdef',
+ 'abc\033[1;20;128Hdef',
+ ]
+ for datum in data:
+ stream.wrapped.write.reset_mock()
+ stream.write_and_convert( datum )
+ self.assertEqual(
+ [args[0] for args in stream.wrapped.write.call_args_list],
+ [ ('abc',), ('def',) ]
+ )
+
+ def testWriteAndConvertSkipsEmptySnippets(self):
+ stream = AnsiToWin32(Mock())
+ stream.call_win32 = Mock()
+ stream.write_and_convert( '\033[40m\033[41m' )
+ self.assertFalse( stream.wrapped.write.called )
+
+ def testWriteAndConvertCallsWin32WithParamsAndCommand(self):
+ stream = AnsiToWin32(Mock())
+ stream.convert = True
+ stream.call_win32 = Mock()
+ stream.extract_params = Mock(return_value='params')
+ data = {
+ 'abc\033[adef': ('a', 'params'),
+ 'abc\033[;;bdef': ('b', 'params'),
+ 'abc\033[0cdef': ('c', 'params'),
+ 'abc\033[;;0;;Gdef': ('G', 'params'),
+ 'abc\033[1;20;128Hdef': ('H', 'params'),
+ }
+ for datum, expected in data.items():
+ stream.call_win32.reset_mock()
+ stream.write_and_convert( datum )
+ self.assertEqual( stream.call_win32.call_args[0], expected )
+
+ def test_reset_all_shouldnt_raise_on_closed_orig_stdout(self):
+ stream = StringIO()
+ converter = AnsiToWin32(stream)
+ stream.close()
+
+ converter.reset_all()
+
+ def test_wrap_shouldnt_raise_on_closed_orig_stdout(self):
+ stream = StringIO()
+ stream.close()
+ with \
+ patch("colorama.ansitowin32.os.name", "nt"), \
+ patch("colorama.ansitowin32.winapi_test", lambda: True):
+ converter = AnsiToWin32(stream)
+ self.assertTrue(converter.strip)
+ self.assertFalse(converter.convert)
+
+ def test_wrap_shouldnt_raise_on_missing_closed_attr(self):
+ with \
+ patch("colorama.ansitowin32.os.name", "nt"), \
+ patch("colorama.ansitowin32.winapi_test", lambda: True):
+ converter = AnsiToWin32(object())
+ self.assertTrue(converter.strip)
+ self.assertFalse(converter.convert)
+
+ def testExtractParams(self):
+ stream = AnsiToWin32(Mock())
+ data = {
+ '': (0,),
+ ';;': (0,),
+ '2': (2,),
+ ';;002;;': (2,),
+ '0;1': (0, 1),
+ ';;003;;456;;': (3, 456),
+ '11;22;33;44;55': (11, 22, 33, 44, 55),
+ }
+ for datum, expected in data.items():
+ self.assertEqual(stream.extract_params('m', datum), expected)
+
+ def testCallWin32UsesLookup(self):
+ listener = Mock()
+ stream = AnsiToWin32(listener)
+ stream.win32_calls = {
+ 1: (lambda *_, **__: listener(11),),
+ 2: (lambda *_, **__: listener(22),),
+ 3: (lambda *_, **__: listener(33),),
+ }
+ stream.call_win32('m', (3, 1, 99, 2))
+ self.assertEqual(
+ [a[0][0] for a in listener.call_args_list],
+ [33, 11, 22] )
+
+ def test_osc_codes(self):
+ mockStdout = Mock()
+ stream = AnsiToWin32(mockStdout, convert=True)
+ with patch('colorama.ansitowin32.winterm') as winterm:
+ data = [
+ '\033]0\x07', # missing arguments
+ '\033]0;foo\x08', # wrong OSC command
+ '\033]0;colorama_test_title\x07', # should work
+ '\033]1;colorama_test_title\x07', # wrong set command
+ '\033]2;colorama_test_title\x07', # should work
+ '\033]' + ';' * 64 + '\x08', # see issue #247
+ ]
+ for code in data:
+ stream.write(code)
+ self.assertEqual(winterm.set_title.call_count, 2)
+
+ def test_native_windows_ansi(self):
+ with ExitStack() as stack:
+ def p(a, b):
+ stack.enter_context(patch(a, b, create=True))
+ # Pretend to be on Windows
+ p("colorama.ansitowin32.os.name", "nt")
+ p("colorama.ansitowin32.winapi_test", lambda: True)
+ p("colorama.win32.winapi_test", lambda: True)
+ p("colorama.winterm.win32.windll", "non-None")
+ p("colorama.winterm.get_osfhandle", lambda _: 1234)
+
+ # Pretend that our mock stream has native ANSI support
+ p(
+ "colorama.winterm.win32.GetConsoleMode",
+ lambda _: ENABLE_VIRTUAL_TERMINAL_PROCESSING,
+ )
+ SetConsoleMode = Mock()
+ p("colorama.winterm.win32.SetConsoleMode", SetConsoleMode)
+
+ stdout = Mock()
+ stdout.closed = False
+ stdout.isatty.return_value = True
+ stdout.fileno.return_value = 1
+
+ # Our fake console says it has native vt support, so AnsiToWin32 should
+ # enable that support and do nothing else.
+ stream = AnsiToWin32(stdout)
+ SetConsoleMode.assert_called_with(1234, ENABLE_VIRTUAL_TERMINAL_PROCESSING)
+ self.assertFalse(stream.strip)
+ self.assertFalse(stream.convert)
+ self.assertFalse(stream.should_wrap())
+
+ # Now let's pretend we're on an old Windows console, that doesn't have
+ # native ANSI support.
+ p("colorama.winterm.win32.GetConsoleMode", lambda _: 0)
+ SetConsoleMode = Mock()
+ p("colorama.winterm.win32.SetConsoleMode", SetConsoleMode)
+
+ stream = AnsiToWin32(stdout)
+ SetConsoleMode.assert_called_with(1234, ENABLE_VIRTUAL_TERMINAL_PROCESSING)
+ self.assertTrue(stream.strip)
+ self.assertTrue(stream.convert)
+ self.assertTrue(stream.should_wrap())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/third_party/python/pip/pip/_vendor/colorama/tests/initialise_test.py b/third_party/python/pip/pip/_vendor/colorama/tests/initialise_test.py
new file mode 100644
index 0000000000..89f9b07511
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/colorama/tests/initialise_test.py
@@ -0,0 +1,189 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+import sys
+from unittest import TestCase, main, skipUnless
+
+try:
+ from unittest.mock import patch, Mock
+except ImportError:
+ from mock import patch, Mock
+
+from ..ansitowin32 import StreamWrapper
+from ..initialise import init, just_fix_windows_console, _wipe_internal_state_for_tests
+from .utils import osname, replace_by
+
+orig_stdout = sys.stdout
+orig_stderr = sys.stderr
+
+
+class InitTest(TestCase):
+
+ @skipUnless(sys.stdout.isatty(), "sys.stdout is not a tty")
+ def setUp(self):
+ # sanity check
+ self.assertNotWrapped()
+
+ def tearDown(self):
+ _wipe_internal_state_for_tests()
+ sys.stdout = orig_stdout
+ sys.stderr = orig_stderr
+
+ def assertWrapped(self):
+ self.assertIsNot(sys.stdout, orig_stdout, 'stdout should be wrapped')
+ self.assertIsNot(sys.stderr, orig_stderr, 'stderr should be wrapped')
+ self.assertTrue(isinstance(sys.stdout, StreamWrapper),
+ 'bad stdout wrapper')
+ self.assertTrue(isinstance(sys.stderr, StreamWrapper),
+ 'bad stderr wrapper')
+
+ def assertNotWrapped(self):
+ self.assertIs(sys.stdout, orig_stdout, 'stdout should not be wrapped')
+ self.assertIs(sys.stderr, orig_stderr, 'stderr should not be wrapped')
+
+ @patch('colorama.initialise.reset_all')
+ @patch('colorama.ansitowin32.winapi_test', lambda *_: True)
+ @patch('colorama.ansitowin32.enable_vt_processing', lambda *_: False)
+ def testInitWrapsOnWindows(self, _):
+ with osname("nt"):
+ init()
+ self.assertWrapped()
+
+ @patch('colorama.initialise.reset_all')
+ @patch('colorama.ansitowin32.winapi_test', lambda *_: False)
+ def testInitDoesntWrapOnEmulatedWindows(self, _):
+ with osname("nt"):
+ init()
+ self.assertNotWrapped()
+
+ def testInitDoesntWrapOnNonWindows(self):
+ with osname("posix"):
+ init()
+ self.assertNotWrapped()
+
+ def testInitDoesntWrapIfNone(self):
+ with replace_by(None):
+ init()
+ # We can't use assertNotWrapped here because replace_by(None)
+ # changes stdout/stderr already.
+ self.assertIsNone(sys.stdout)
+ self.assertIsNone(sys.stderr)
+
+ def testInitAutoresetOnWrapsOnAllPlatforms(self):
+ with osname("posix"):
+ init(autoreset=True)
+ self.assertWrapped()
+
+ def testInitWrapOffDoesntWrapOnWindows(self):
+ with osname("nt"):
+ init(wrap=False)
+ self.assertNotWrapped()
+
+ def testInitWrapOffIncompatibleWithAutoresetOn(self):
+ self.assertRaises(ValueError, lambda: init(autoreset=True, wrap=False))
+
+ @patch('colorama.win32.SetConsoleTextAttribute')
+ @patch('colorama.initialise.AnsiToWin32')
+ def testAutoResetPassedOn(self, mockATW32, _):
+ with osname("nt"):
+ init(autoreset=True)
+ self.assertEqual(len(mockATW32.call_args_list), 2)
+ self.assertEqual(mockATW32.call_args_list[1][1]['autoreset'], True)
+ self.assertEqual(mockATW32.call_args_list[0][1]['autoreset'], True)
+
+ @patch('colorama.initialise.AnsiToWin32')
+ def testAutoResetChangeable(self, mockATW32):
+ with osname("nt"):
+ init()
+
+ init(autoreset=True)
+ self.assertEqual(len(mockATW32.call_args_list), 4)
+ self.assertEqual(mockATW32.call_args_list[2][1]['autoreset'], True)
+ self.assertEqual(mockATW32.call_args_list[3][1]['autoreset'], True)
+
+ init()
+ self.assertEqual(len(mockATW32.call_args_list), 6)
+ self.assertEqual(
+ mockATW32.call_args_list[4][1]['autoreset'], False)
+ self.assertEqual(
+ mockATW32.call_args_list[5][1]['autoreset'], False)
+
+
+ @patch('colorama.initialise.atexit.register')
+ def testAtexitRegisteredOnlyOnce(self, mockRegister):
+ init()
+ self.assertTrue(mockRegister.called)
+ mockRegister.reset_mock()
+ init()
+ self.assertFalse(mockRegister.called)
+
+
+class JustFixWindowsConsoleTest(TestCase):
+ def _reset(self):
+ _wipe_internal_state_for_tests()
+ sys.stdout = orig_stdout
+ sys.stderr = orig_stderr
+
+ def tearDown(self):
+ self._reset()
+
+ @patch("colorama.ansitowin32.winapi_test", lambda: True)
+ def testJustFixWindowsConsole(self):
+ if sys.platform != "win32":
+ # just_fix_windows_console should be a no-op
+ just_fix_windows_console()
+ self.assertIs(sys.stdout, orig_stdout)
+ self.assertIs(sys.stderr, orig_stderr)
+ else:
+ def fake_std():
+ # Emulate stdout=not a tty, stderr=tty
+ # to check that we handle both cases correctly
+ stdout = Mock()
+ stdout.closed = False
+ stdout.isatty.return_value = False
+ stdout.fileno.return_value = 1
+ sys.stdout = stdout
+
+ stderr = Mock()
+ stderr.closed = False
+ stderr.isatty.return_value = True
+ stderr.fileno.return_value = 2
+ sys.stderr = stderr
+
+ for native_ansi in [False, True]:
+ with patch(
+ 'colorama.ansitowin32.enable_vt_processing',
+ lambda *_: native_ansi
+ ):
+ self._reset()
+ fake_std()
+
+ # Regular single-call test
+ prev_stdout = sys.stdout
+ prev_stderr = sys.stderr
+ just_fix_windows_console()
+ self.assertIs(sys.stdout, prev_stdout)
+ if native_ansi:
+ self.assertIs(sys.stderr, prev_stderr)
+ else:
+ self.assertIsNot(sys.stderr, prev_stderr)
+
+ # second call without resetting is always a no-op
+ prev_stdout = sys.stdout
+ prev_stderr = sys.stderr
+ just_fix_windows_console()
+ self.assertIs(sys.stdout, prev_stdout)
+ self.assertIs(sys.stderr, prev_stderr)
+
+ self._reset()
+ fake_std()
+
+ # If init() runs first, just_fix_windows_console should be a no-op
+ init()
+ prev_stdout = sys.stdout
+ prev_stderr = sys.stderr
+ just_fix_windows_console()
+ self.assertIs(prev_stdout, sys.stdout)
+ self.assertIs(prev_stderr, sys.stderr)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/third_party/python/pip/pip/_vendor/colorama/tests/isatty_test.py b/third_party/python/pip/pip/_vendor/colorama/tests/isatty_test.py
new file mode 100644
index 0000000000..0f84e4befe
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/colorama/tests/isatty_test.py
@@ -0,0 +1,57 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+import sys
+from unittest import TestCase, main
+
+from ..ansitowin32 import StreamWrapper, AnsiToWin32
+from .utils import pycharm, replace_by, replace_original_by, StreamTTY, StreamNonTTY
+
+
+def is_a_tty(stream):
+ return StreamWrapper(stream, None).isatty()
+
+class IsattyTest(TestCase):
+
+ def test_TTY(self):
+ tty = StreamTTY()
+ self.assertTrue(is_a_tty(tty))
+ with pycharm():
+ self.assertTrue(is_a_tty(tty))
+
+ def test_nonTTY(self):
+ non_tty = StreamNonTTY()
+ self.assertFalse(is_a_tty(non_tty))
+ with pycharm():
+ self.assertFalse(is_a_tty(non_tty))
+
+ def test_withPycharm(self):
+ with pycharm():
+ self.assertTrue(is_a_tty(sys.stderr))
+ self.assertTrue(is_a_tty(sys.stdout))
+
+ def test_withPycharmTTYOverride(self):
+ tty = StreamTTY()
+ with pycharm(), replace_by(tty):
+ self.assertTrue(is_a_tty(tty))
+
+ def test_withPycharmNonTTYOverride(self):
+ non_tty = StreamNonTTY()
+ with pycharm(), replace_by(non_tty):
+ self.assertFalse(is_a_tty(non_tty))
+
+ def test_withPycharmNoneOverride(self):
+ with pycharm():
+ with replace_by(None), replace_original_by(None):
+ self.assertFalse(is_a_tty(None))
+ self.assertFalse(is_a_tty(StreamNonTTY()))
+ self.assertTrue(is_a_tty(StreamTTY()))
+
+ def test_withPycharmStreamWrapped(self):
+ with pycharm():
+ self.assertTrue(AnsiToWin32(StreamTTY()).stream.isatty())
+ self.assertFalse(AnsiToWin32(StreamNonTTY()).stream.isatty())
+ self.assertTrue(AnsiToWin32(sys.stdout).stream.isatty())
+ self.assertTrue(AnsiToWin32(sys.stderr).stream.isatty())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/third_party/python/pip/pip/_vendor/colorama/tests/utils.py b/third_party/python/pip/pip/_vendor/colorama/tests/utils.py
new file mode 100644
index 0000000000..472fafb440
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/colorama/tests/utils.py
@@ -0,0 +1,49 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+from contextlib import contextmanager
+from io import StringIO
+import sys
+import os
+
+
+class StreamTTY(StringIO):
+ def isatty(self):
+ return True
+
+class StreamNonTTY(StringIO):
+ def isatty(self):
+ return False
+
+@contextmanager
+def osname(name):
+ orig = os.name
+ os.name = name
+ yield
+ os.name = orig
+
+@contextmanager
+def replace_by(stream):
+ orig_stdout = sys.stdout
+ orig_stderr = sys.stderr
+ sys.stdout = stream
+ sys.stderr = stream
+ yield
+ sys.stdout = orig_stdout
+ sys.stderr = orig_stderr
+
+@contextmanager
+def replace_original_by(stream):
+ orig_stdout = sys.__stdout__
+ orig_stderr = sys.__stderr__
+ sys.__stdout__ = stream
+ sys.__stderr__ = stream
+ yield
+ sys.__stdout__ = orig_stdout
+ sys.__stderr__ = orig_stderr
+
+@contextmanager
+def pycharm():
+ os.environ["PYCHARM_HOSTED"] = "1"
+ non_tty = StreamNonTTY()
+ with replace_by(non_tty), replace_original_by(non_tty):
+ yield
+ del os.environ["PYCHARM_HOSTED"]
diff --git a/third_party/python/pip/pip/_vendor/colorama/tests/winterm_test.py b/third_party/python/pip/pip/_vendor/colorama/tests/winterm_test.py
new file mode 100644
index 0000000000..d0955f9e60
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/colorama/tests/winterm_test.py
@@ -0,0 +1,131 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+import sys
+from unittest import TestCase, main, skipUnless
+
+try:
+ from unittest.mock import Mock, patch
+except ImportError:
+ from mock import Mock, patch
+
+from ..winterm import WinColor, WinStyle, WinTerm
+
+
+class WinTermTest(TestCase):
+
+ @patch('colorama.winterm.win32')
+ def testInit(self, mockWin32):
+ mockAttr = Mock()
+ mockAttr.wAttributes = 7 + 6 * 16 + 8
+ mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr
+ term = WinTerm()
+ self.assertEqual(term._fore, 7)
+ self.assertEqual(term._back, 6)
+ self.assertEqual(term._style, 8)
+
+ @skipUnless(sys.platform.startswith("win"), "requires Windows")
+ def testGetAttrs(self):
+ term = WinTerm()
+
+ term._fore = 0
+ term._back = 0
+ term._style = 0
+ self.assertEqual(term.get_attrs(), 0)
+
+ term._fore = WinColor.YELLOW
+ self.assertEqual(term.get_attrs(), WinColor.YELLOW)
+
+ term._back = WinColor.MAGENTA
+ self.assertEqual(
+ term.get_attrs(),
+ WinColor.YELLOW + WinColor.MAGENTA * 16)
+
+ term._style = WinStyle.BRIGHT
+ self.assertEqual(
+ term.get_attrs(),
+ WinColor.YELLOW + WinColor.MAGENTA * 16 + WinStyle.BRIGHT)
+
+ @patch('colorama.winterm.win32')
+ def testResetAll(self, mockWin32):
+ mockAttr = Mock()
+ mockAttr.wAttributes = 1 + 2 * 16 + 8
+ mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr
+ term = WinTerm()
+
+ term.set_console = Mock()
+ term._fore = -1
+ term._back = -1
+ term._style = -1
+
+ term.reset_all()
+
+ self.assertEqual(term._fore, 1)
+ self.assertEqual(term._back, 2)
+ self.assertEqual(term._style, 8)
+ self.assertEqual(term.set_console.called, True)
+
+ @skipUnless(sys.platform.startswith("win"), "requires Windows")
+ def testFore(self):
+ term = WinTerm()
+ term.set_console = Mock()
+ term._fore = 0
+
+ term.fore(5)
+
+ self.assertEqual(term._fore, 5)
+ self.assertEqual(term.set_console.called, True)
+
+ @skipUnless(sys.platform.startswith("win"), "requires Windows")
+ def testBack(self):
+ term = WinTerm()
+ term.set_console = Mock()
+ term._back = 0
+
+ term.back(5)
+
+ self.assertEqual(term._back, 5)
+ self.assertEqual(term.set_console.called, True)
+
+ @skipUnless(sys.platform.startswith("win"), "requires Windows")
+ def testStyle(self):
+ term = WinTerm()
+ term.set_console = Mock()
+ term._style = 0
+
+ term.style(22)
+
+ self.assertEqual(term._style, 22)
+ self.assertEqual(term.set_console.called, True)
+
+ @patch('colorama.winterm.win32')
+ def testSetConsole(self, mockWin32):
+ mockAttr = Mock()
+ mockAttr.wAttributes = 0
+ mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr
+ term = WinTerm()
+ term.windll = Mock()
+
+ term.set_console()
+
+ self.assertEqual(
+ mockWin32.SetConsoleTextAttribute.call_args,
+ ((mockWin32.STDOUT, term.get_attrs()), {})
+ )
+
+ @patch('colorama.winterm.win32')
+ def testSetConsoleOnStderr(self, mockWin32):
+ mockAttr = Mock()
+ mockAttr.wAttributes = 0
+ mockWin32.GetConsoleScreenBufferInfo.return_value = mockAttr
+ term = WinTerm()
+ term.windll = Mock()
+
+ term.set_console(on_stderr=True)
+
+ self.assertEqual(
+ mockWin32.SetConsoleTextAttribute.call_args,
+ ((mockWin32.STDERR, term.get_attrs()), {})
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/third_party/python/pip/pip/_vendor/colorama/win32.py b/third_party/python/pip/pip/_vendor/colorama/win32.py
new file mode 100644
index 0000000000..841b0e270a
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/colorama/win32.py
@@ -0,0 +1,180 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+
+# from winbase.h
+STDOUT = -11
+STDERR = -12
+
+ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
+
+try:
+ import ctypes
+ from ctypes import LibraryLoader
+ windll = LibraryLoader(ctypes.WinDLL)
+ from ctypes import wintypes
+except (AttributeError, ImportError):
+ windll = None
+ SetConsoleTextAttribute = lambda *_: None
+ winapi_test = lambda *_: None
+else:
+ from ctypes import byref, Structure, c_char, POINTER
+
+ COORD = wintypes._COORD
+
+ class CONSOLE_SCREEN_BUFFER_INFO(Structure):
+ """struct in wincon.h."""
+ _fields_ = [
+ ("dwSize", COORD),
+ ("dwCursorPosition", COORD),
+ ("wAttributes", wintypes.WORD),
+ ("srWindow", wintypes.SMALL_RECT),
+ ("dwMaximumWindowSize", COORD),
+ ]
+ def __str__(self):
+ return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
+ self.dwSize.Y, self.dwSize.X
+ , self.dwCursorPosition.Y, self.dwCursorPosition.X
+ , self.wAttributes
+ , self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
+ , self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
+ )
+
+ _GetStdHandle = windll.kernel32.GetStdHandle
+ _GetStdHandle.argtypes = [
+ wintypes.DWORD,
+ ]
+ _GetStdHandle.restype = wintypes.HANDLE
+
+ _GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
+ _GetConsoleScreenBufferInfo.argtypes = [
+ wintypes.HANDLE,
+ POINTER(CONSOLE_SCREEN_BUFFER_INFO),
+ ]
+ _GetConsoleScreenBufferInfo.restype = wintypes.BOOL
+
+ _SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
+ _SetConsoleTextAttribute.argtypes = [
+ wintypes.HANDLE,
+ wintypes.WORD,
+ ]
+ _SetConsoleTextAttribute.restype = wintypes.BOOL
+
+ _SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
+ _SetConsoleCursorPosition.argtypes = [
+ wintypes.HANDLE,
+ COORD,
+ ]
+ _SetConsoleCursorPosition.restype = wintypes.BOOL
+
+ _FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
+ _FillConsoleOutputCharacterA.argtypes = [
+ wintypes.HANDLE,
+ c_char,
+ wintypes.DWORD,
+ COORD,
+ POINTER(wintypes.DWORD),
+ ]
+ _FillConsoleOutputCharacterA.restype = wintypes.BOOL
+
+ _FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
+ _FillConsoleOutputAttribute.argtypes = [
+ wintypes.HANDLE,
+ wintypes.WORD,
+ wintypes.DWORD,
+ COORD,
+ POINTER(wintypes.DWORD),
+ ]
+ _FillConsoleOutputAttribute.restype = wintypes.BOOL
+
+ _SetConsoleTitleW = windll.kernel32.SetConsoleTitleW
+ _SetConsoleTitleW.argtypes = [
+ wintypes.LPCWSTR
+ ]
+ _SetConsoleTitleW.restype = wintypes.BOOL
+
+ _GetConsoleMode = windll.kernel32.GetConsoleMode
+ _GetConsoleMode.argtypes = [
+ wintypes.HANDLE,
+ POINTER(wintypes.DWORD)
+ ]
+ _GetConsoleMode.restype = wintypes.BOOL
+
+ _SetConsoleMode = windll.kernel32.SetConsoleMode
+ _SetConsoleMode.argtypes = [
+ wintypes.HANDLE,
+ wintypes.DWORD
+ ]
+ _SetConsoleMode.restype = wintypes.BOOL
+
+ def _winapi_test(handle):
+ csbi = CONSOLE_SCREEN_BUFFER_INFO()
+ success = _GetConsoleScreenBufferInfo(
+ handle, byref(csbi))
+ return bool(success)
+
+ def winapi_test():
+ return any(_winapi_test(h) for h in
+ (_GetStdHandle(STDOUT), _GetStdHandle(STDERR)))
+
+ def GetConsoleScreenBufferInfo(stream_id=STDOUT):
+ handle = _GetStdHandle(stream_id)
+ csbi = CONSOLE_SCREEN_BUFFER_INFO()
+ success = _GetConsoleScreenBufferInfo(
+ handle, byref(csbi))
+ return csbi
+
+ def SetConsoleTextAttribute(stream_id, attrs):
+ handle = _GetStdHandle(stream_id)
+ return _SetConsoleTextAttribute(handle, attrs)
+
+ def SetConsoleCursorPosition(stream_id, position, adjust=True):
+ position = COORD(*position)
+ # If the position is out of range, do nothing.
+ if position.Y <= 0 or position.X <= 0:
+ return
+ # Adjust for Windows' SetConsoleCursorPosition:
+ # 1. being 0-based, while ANSI is 1-based.
+ # 2. expecting (x,y), while ANSI uses (y,x).
+ adjusted_position = COORD(position.Y - 1, position.X - 1)
+ if adjust:
+ # Adjust for viewport's scroll position
+ sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
+ adjusted_position.Y += sr.Top
+ adjusted_position.X += sr.Left
+ # Resume normal processing
+ handle = _GetStdHandle(stream_id)
+ return _SetConsoleCursorPosition(handle, adjusted_position)
+
+ def FillConsoleOutputCharacter(stream_id, char, length, start):
+ handle = _GetStdHandle(stream_id)
+ char = c_char(char.encode())
+ length = wintypes.DWORD(length)
+ num_written = wintypes.DWORD(0)
+ # Note that this is hard-coded for ANSI (vs wide) bytes.
+ success = _FillConsoleOutputCharacterA(
+ handle, char, length, start, byref(num_written))
+ return num_written.value
+
+ def FillConsoleOutputAttribute(stream_id, attr, length, start):
+ ''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
+ handle = _GetStdHandle(stream_id)
+ attribute = wintypes.WORD(attr)
+ length = wintypes.DWORD(length)
+ num_written = wintypes.DWORD(0)
+ # Note that this is hard-coded for ANSI (vs wide) bytes.
+ return _FillConsoleOutputAttribute(
+ handle, attribute, length, start, byref(num_written))
+
+ def SetConsoleTitle(title):
+ return _SetConsoleTitleW(title)
+
+ def GetConsoleMode(handle):
+ mode = wintypes.DWORD()
+ success = _GetConsoleMode(handle, byref(mode))
+ if not success:
+ raise ctypes.WinError()
+ return mode.value
+
+ def SetConsoleMode(handle, mode):
+ success = _SetConsoleMode(handle, mode)
+ if not success:
+ raise ctypes.WinError()
diff --git a/third_party/python/pip/pip/_vendor/colorama/winterm.py b/third_party/python/pip/pip/_vendor/colorama/winterm.py
new file mode 100644
index 0000000000..aad867e8c8
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/colorama/winterm.py
@@ -0,0 +1,195 @@
+# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
+try:
+ from msvcrt import get_osfhandle
+except ImportError:
+ def get_osfhandle(_):
+ raise OSError("This isn't windows!")
+
+
+from . import win32
+
+# from wincon.h
+class WinColor(object):
+ BLACK = 0
+ BLUE = 1
+ GREEN = 2
+ CYAN = 3
+ RED = 4
+ MAGENTA = 5
+ YELLOW = 6
+ GREY = 7
+
+# from wincon.h
+class WinStyle(object):
+ NORMAL = 0x00 # dim text, dim background
+ BRIGHT = 0x08 # bright text, dim background
+ BRIGHT_BACKGROUND = 0x80 # dim text, bright background
+
+class WinTerm(object):
+
+ def __init__(self):
+ self._default = win32.GetConsoleScreenBufferInfo(win32.STDOUT).wAttributes
+ self.set_attrs(self._default)
+ self._default_fore = self._fore
+ self._default_back = self._back
+ self._default_style = self._style
+ # In order to emulate LIGHT_EX in windows, we borrow the BRIGHT style.
+ # So that LIGHT_EX colors and BRIGHT style do not clobber each other,
+ # we track them separately, since LIGHT_EX is overwritten by Fore/Back
+ # and BRIGHT is overwritten by Style codes.
+ self._light = 0
+
+ def get_attrs(self):
+ return self._fore + self._back * 16 + (self._style | self._light)
+
+ def set_attrs(self, value):
+ self._fore = value & 7
+ self._back = (value >> 4) & 7
+ self._style = value & (WinStyle.BRIGHT | WinStyle.BRIGHT_BACKGROUND)
+
+ def reset_all(self, on_stderr=None):
+ self.set_attrs(self._default)
+ self.set_console(attrs=self._default)
+ self._light = 0
+
+ def fore(self, fore=None, light=False, on_stderr=False):
+ if fore is None:
+ fore = self._default_fore
+ self._fore = fore
+ # Emulate LIGHT_EX with BRIGHT Style
+ if light:
+ self._light |= WinStyle.BRIGHT
+ else:
+ self._light &= ~WinStyle.BRIGHT
+ self.set_console(on_stderr=on_stderr)
+
+ def back(self, back=None, light=False, on_stderr=False):
+ if back is None:
+ back = self._default_back
+ self._back = back
+ # Emulate LIGHT_EX with BRIGHT_BACKGROUND Style
+ if light:
+ self._light |= WinStyle.BRIGHT_BACKGROUND
+ else:
+ self._light &= ~WinStyle.BRIGHT_BACKGROUND
+ self.set_console(on_stderr=on_stderr)
+
+ def style(self, style=None, on_stderr=False):
+ if style is None:
+ style = self._default_style
+ self._style = style
+ self.set_console(on_stderr=on_stderr)
+
+ def set_console(self, attrs=None, on_stderr=False):
+ if attrs is None:
+ attrs = self.get_attrs()
+ handle = win32.STDOUT
+ if on_stderr:
+ handle = win32.STDERR
+ win32.SetConsoleTextAttribute(handle, attrs)
+
+ def get_position(self, handle):
+ position = win32.GetConsoleScreenBufferInfo(handle).dwCursorPosition
+ # Because Windows coordinates are 0-based,
+ # and win32.SetConsoleCursorPosition expects 1-based.
+ position.X += 1
+ position.Y += 1
+ return position
+
+ def set_cursor_position(self, position=None, on_stderr=False):
+ if position is None:
+ # I'm not currently tracking the position, so there is no default.
+ # position = self.get_position()
+ return
+ handle = win32.STDOUT
+ if on_stderr:
+ handle = win32.STDERR
+ win32.SetConsoleCursorPosition(handle, position)
+
+ def cursor_adjust(self, x, y, on_stderr=False):
+ handle = win32.STDOUT
+ if on_stderr:
+ handle = win32.STDERR
+ position = self.get_position(handle)
+ adjusted_position = (position.Y + y, position.X + x)
+ win32.SetConsoleCursorPosition(handle, adjusted_position, adjust=False)
+
+ def erase_screen(self, mode=0, on_stderr=False):
+ # 0 should clear from the cursor to the end of the screen.
+ # 1 should clear from the cursor to the beginning of the screen.
+ # 2 should clear the entire screen, and move cursor to (1,1)
+ handle = win32.STDOUT
+ if on_stderr:
+ handle = win32.STDERR
+ csbi = win32.GetConsoleScreenBufferInfo(handle)
+ # get the number of character cells in the current buffer
+ cells_in_screen = csbi.dwSize.X * csbi.dwSize.Y
+ # get number of character cells before current cursor position
+ cells_before_cursor = csbi.dwSize.X * csbi.dwCursorPosition.Y + csbi.dwCursorPosition.X
+ if mode == 0:
+ from_coord = csbi.dwCursorPosition
+ cells_to_erase = cells_in_screen - cells_before_cursor
+ elif mode == 1:
+ from_coord = win32.COORD(0, 0)
+ cells_to_erase = cells_before_cursor
+ elif mode == 2:
+ from_coord = win32.COORD(0, 0)
+ cells_to_erase = cells_in_screen
+ else:
+ # invalid mode
+ return
+ # fill the entire screen with blanks
+ win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
+ # now set the buffer's attributes accordingly
+ win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
+ if mode == 2:
+ # put the cursor where needed
+ win32.SetConsoleCursorPosition(handle, (1, 1))
+
+ def erase_line(self, mode=0, on_stderr=False):
+ # 0 should clear from the cursor to the end of the line.
+ # 1 should clear from the cursor to the beginning of the line.
+ # 2 should clear the entire line.
+ handle = win32.STDOUT
+ if on_stderr:
+ handle = win32.STDERR
+ csbi = win32.GetConsoleScreenBufferInfo(handle)
+ if mode == 0:
+ from_coord = csbi.dwCursorPosition
+ cells_to_erase = csbi.dwSize.X - csbi.dwCursorPosition.X
+ elif mode == 1:
+ from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
+ cells_to_erase = csbi.dwCursorPosition.X
+ elif mode == 2:
+ from_coord = win32.COORD(0, csbi.dwCursorPosition.Y)
+ cells_to_erase = csbi.dwSize.X
+ else:
+ # invalid mode
+ return
+ # fill the entire screen with blanks
+ win32.FillConsoleOutputCharacter(handle, ' ', cells_to_erase, from_coord)
+ # now set the buffer's attributes accordingly
+ win32.FillConsoleOutputAttribute(handle, self.get_attrs(), cells_to_erase, from_coord)
+
+ def set_title(self, title):
+ win32.SetConsoleTitle(title)
+
+
+def enable_vt_processing(fd):
+ if win32.windll is None or not win32.winapi_test():
+ return False
+
+ try:
+ handle = get_osfhandle(fd)
+ mode = win32.GetConsoleMode(handle)
+ win32.SetConsoleMode(
+ handle,
+ mode | win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING,
+ )
+
+ mode = win32.GetConsoleMode(handle)
+ if mode & win32.ENABLE_VIRTUAL_TERMINAL_PROCESSING:
+ return True
+ # Can get TypeError in testsuite where 'fd' is a Mock()
+ except (OSError, TypeError):
+ return False
diff --git a/third_party/python/pip/pip/_vendor/distlib/__init__.py b/third_party/python/pip/pip/_vendor/distlib/__init__.py
new file mode 100644
index 0000000000..962173c8d0
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distlib/__init__.py
@@ -0,0 +1,23 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2012-2022 Vinay Sajip.
+# Licensed to the Python Software Foundation under a contributor agreement.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+import logging
+
+__version__ = '0.3.6'
+
+class DistlibException(Exception):
+ pass
+
+try:
+ from logging import NullHandler
+except ImportError: # pragma: no cover
+ class NullHandler(logging.Handler):
+ def handle(self, record): pass
+ def emit(self, record): pass
+ def createLock(self): self.lock = None
+
+logger = logging.getLogger(__name__)
+logger.addHandler(NullHandler())
diff --git a/third_party/python/pip/pip/_vendor/distlib/compat.py b/third_party/python/pip/pip/_vendor/distlib/compat.py
new file mode 100644
index 0000000000..1fe3d225ac
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distlib/compat.py
@@ -0,0 +1,1116 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2013-2017 Vinay Sajip.
+# Licensed to the Python Software Foundation under a contributor agreement.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+from __future__ import absolute_import
+
+import os
+import re
+import sys
+
+try:
+ import ssl
+except ImportError: # pragma: no cover
+ ssl = None
+
+if sys.version_info[0] < 3: # pragma: no cover
+ from StringIO import StringIO
+ string_types = basestring,
+ text_type = unicode
+ from types import FileType as file_type
+ import __builtin__ as builtins
+ import ConfigParser as configparser
+ from urlparse import urlparse, urlunparse, urljoin, urlsplit, urlunsplit
+ from urllib import (urlretrieve, quote as _quote, unquote, url2pathname,
+ pathname2url, ContentTooShortError, splittype)
+
+ def quote(s):
+ if isinstance(s, unicode):
+ s = s.encode('utf-8')
+ return _quote(s)
+
+ import urllib2
+ from urllib2 import (Request, urlopen, URLError, HTTPError,
+ HTTPBasicAuthHandler, HTTPPasswordMgr,
+ HTTPHandler, HTTPRedirectHandler,
+ build_opener)
+ if ssl:
+ from urllib2 import HTTPSHandler
+ import httplib
+ import xmlrpclib
+ import Queue as queue
+ from HTMLParser import HTMLParser
+ import htmlentitydefs
+ raw_input = raw_input
+ from itertools import ifilter as filter
+ from itertools import ifilterfalse as filterfalse
+
+ # Leaving this around for now, in case it needs resurrecting in some way
+ # _userprog = None
+ # def splituser(host):
+ # """splituser('user[:passwd]@host[:port]') --> 'user[:passwd]', 'host[:port]'."""
+ # global _userprog
+ # if _userprog is None:
+ # import re
+ # _userprog = re.compile('^(.*)@(.*)$')
+
+ # match = _userprog.match(host)
+ # if match: return match.group(1, 2)
+ # return None, host
+
+else: # pragma: no cover
+ from io import StringIO
+ string_types = str,
+ text_type = str
+ from io import TextIOWrapper as file_type
+ import builtins
+ import configparser
+ import shutil
+ from urllib.parse import (urlparse, urlunparse, urljoin, quote,
+ unquote, urlsplit, urlunsplit, splittype)
+ from urllib.request import (urlopen, urlretrieve, Request, url2pathname,
+ pathname2url,
+ HTTPBasicAuthHandler, HTTPPasswordMgr,
+ HTTPHandler, HTTPRedirectHandler,
+ build_opener)
+ if ssl:
+ from urllib.request import HTTPSHandler
+ from urllib.error import HTTPError, URLError, ContentTooShortError
+ import http.client as httplib
+ import urllib.request as urllib2
+ import xmlrpc.client as xmlrpclib
+ import queue
+ from html.parser import HTMLParser
+ import html.entities as htmlentitydefs
+ raw_input = input
+ from itertools import filterfalse
+ filter = filter
+
+
+try:
+ from ssl import match_hostname, CertificateError
+except ImportError: # pragma: no cover
+ class CertificateError(ValueError):
+ pass
+
+
+ def _dnsname_match(dn, hostname, max_wildcards=1):
+ """Matching according to RFC 6125, section 6.4.3
+
+ http://tools.ietf.org/html/rfc6125#section-6.4.3
+ """
+ pats = []
+ if not dn:
+ return False
+
+ parts = dn.split('.')
+ leftmost, remainder = parts[0], parts[1:]
+
+ wildcards = leftmost.count('*')
+ if wildcards > max_wildcards:
+ # Issue #17980: avoid denials of service by refusing more
+ # than one wildcard per fragment. A survey of established
+ # policy among SSL implementations showed it to be a
+ # reasonable choice.
+ raise CertificateError(
+ "too many wildcards in certificate DNS name: " + repr(dn))
+
+ # speed up common case w/o wildcards
+ if not wildcards:
+ return dn.lower() == hostname.lower()
+
+ # RFC 6125, section 6.4.3, subitem 1.
+ # The client SHOULD NOT attempt to match a presented identifier in which
+ # the wildcard character comprises a label other than the left-most label.
+ if leftmost == '*':
+ # When '*' is a fragment by itself, it matches a non-empty dotless
+ # fragment.
+ pats.append('[^.]+')
+ elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
+ # RFC 6125, section 6.4.3, subitem 3.
+ # The client SHOULD NOT attempt to match a presented identifier
+ # where the wildcard character is embedded within an A-label or
+ # U-label of an internationalized domain name.
+ pats.append(re.escape(leftmost))
+ else:
+ # Otherwise, '*' matches any dotless string, e.g. www*
+ pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
+
+ # add the remaining fragments, ignore any wildcards
+ for frag in remainder:
+ pats.append(re.escape(frag))
+
+ pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
+ return pat.match(hostname)
+
+
+ def match_hostname(cert, hostname):
+ """Verify that *cert* (in decoded format as returned by
+ SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
+ rules are followed, but IP addresses are not accepted for *hostname*.
+
+ CertificateError is raised on failure. On success, the function
+ returns nothing.
+ """
+ if not cert:
+ raise ValueError("empty or no certificate, match_hostname needs a "
+ "SSL socket or SSL context with either "
+ "CERT_OPTIONAL or CERT_REQUIRED")
+ dnsnames = []
+ san = cert.get('subjectAltName', ())
+ for key, value in san:
+ if key == 'DNS':
+ if _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ if not dnsnames:
+ # The subject is only checked when there is no dNSName entry
+ # in subjectAltName
+ for sub in cert.get('subject', ()):
+ for key, value in sub:
+ # XXX according to RFC 2818, the most specific Common Name
+ # must be used.
+ if key == 'commonName':
+ if _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ if len(dnsnames) > 1:
+ raise CertificateError("hostname %r "
+ "doesn't match either of %s"
+ % (hostname, ', '.join(map(repr, dnsnames))))
+ elif len(dnsnames) == 1:
+ raise CertificateError("hostname %r "
+ "doesn't match %r"
+ % (hostname, dnsnames[0]))
+ else:
+ raise CertificateError("no appropriate commonName or "
+ "subjectAltName fields were found")
+
+
+try:
+ from types import SimpleNamespace as Container
+except ImportError: # pragma: no cover
+ class Container(object):
+ """
+ A generic container for when multiple values need to be returned
+ """
+ def __init__(self, **kwargs):
+ self.__dict__.update(kwargs)
+
+
+try:
+ from shutil import which
+except ImportError: # pragma: no cover
+ # Implementation from Python 3.3
+ def which(cmd, mode=os.F_OK | os.X_OK, path=None):
+ """Given a command, mode, and a PATH string, return the path which
+ conforms to the given mode on the PATH, or None if there is no such
+ file.
+
+ `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
+ of os.environ.get("PATH"), or can be overridden with a custom search
+ path.
+
+ """
+ # Check that a given file can be accessed with the correct mode.
+ # Additionally check that `file` is not a directory, as on Windows
+ # directories pass the os.access check.
+ def _access_check(fn, mode):
+ return (os.path.exists(fn) and os.access(fn, mode)
+ and not os.path.isdir(fn))
+
+ # If we're given a path with a directory part, look it up directly rather
+ # than referring to PATH directories. This includes checking relative to the
+ # current directory, e.g. ./script
+ if os.path.dirname(cmd):
+ if _access_check(cmd, mode):
+ return cmd
+ return None
+
+ if path is None:
+ path = os.environ.get("PATH", os.defpath)
+ if not path:
+ return None
+ path = path.split(os.pathsep)
+
+ if sys.platform == "win32":
+ # The current directory takes precedence on Windows.
+ if not os.curdir in path:
+ path.insert(0, os.curdir)
+
+ # PATHEXT is necessary to check on Windows.
+ pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
+ # See if the given file matches any of the expected path extensions.
+ # This will allow us to short circuit when given "python.exe".
+ # If it does match, only test that one, otherwise we have to try
+ # others.
+ if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
+ files = [cmd]
+ else:
+ files = [cmd + ext for ext in pathext]
+ else:
+ # On other platforms you don't have things like PATHEXT to tell you
+ # what file suffixes are executable, so just pass on cmd as-is.
+ files = [cmd]
+
+ seen = set()
+ for dir in path:
+ normdir = os.path.normcase(dir)
+ if not normdir in seen:
+ seen.add(normdir)
+ for thefile in files:
+ name = os.path.join(dir, thefile)
+ if _access_check(name, mode):
+ return name
+ return None
+
+
+# ZipFile is a context manager in 2.7, but not in 2.6
+
+from zipfile import ZipFile as BaseZipFile
+
+if hasattr(BaseZipFile, '__enter__'): # pragma: no cover
+ ZipFile = BaseZipFile
+else: # pragma: no cover
+ from zipfile import ZipExtFile as BaseZipExtFile
+
+ class ZipExtFile(BaseZipExtFile):
+ def __init__(self, base):
+ self.__dict__.update(base.__dict__)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *exc_info):
+ self.close()
+ # return None, so if an exception occurred, it will propagate
+
+ class ZipFile(BaseZipFile):
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *exc_info):
+ self.close()
+ # return None, so if an exception occurred, it will propagate
+
+ def open(self, *args, **kwargs):
+ base = BaseZipFile.open(self, *args, **kwargs)
+ return ZipExtFile(base)
+
+try:
+ from platform import python_implementation
+except ImportError: # pragma: no cover
+ def python_implementation():
+ """Return a string identifying the Python implementation."""
+ if 'PyPy' in sys.version:
+ return 'PyPy'
+ if os.name == 'java':
+ return 'Jython'
+ if sys.version.startswith('IronPython'):
+ return 'IronPython'
+ return 'CPython'
+
+import shutil
+import sysconfig
+
+try:
+ callable = callable
+except NameError: # pragma: no cover
+ from collections.abc import Callable
+
+ def callable(obj):
+ return isinstance(obj, Callable)
+
+
+try:
+ fsencode = os.fsencode
+ fsdecode = os.fsdecode
+except AttributeError: # pragma: no cover
+ # Issue #99: on some systems (e.g. containerised),
+ # sys.getfilesystemencoding() returns None, and we need a real value,
+ # so fall back to utf-8. From the CPython 2.7 docs relating to Unix and
+ # sys.getfilesystemencoding(): the return value is "the user’s preference
+ # according to the result of nl_langinfo(CODESET), or None if the
+ # nl_langinfo(CODESET) failed."
+ _fsencoding = sys.getfilesystemencoding() or 'utf-8'
+ if _fsencoding == 'mbcs':
+ _fserrors = 'strict'
+ else:
+ _fserrors = 'surrogateescape'
+
+ def fsencode(filename):
+ if isinstance(filename, bytes):
+ return filename
+ elif isinstance(filename, text_type):
+ return filename.encode(_fsencoding, _fserrors)
+ else:
+ raise TypeError("expect bytes or str, not %s" %
+ type(filename).__name__)
+
+ def fsdecode(filename):
+ if isinstance(filename, text_type):
+ return filename
+ elif isinstance(filename, bytes):
+ return filename.decode(_fsencoding, _fserrors)
+ else:
+ raise TypeError("expect bytes or str, not %s" %
+ type(filename).__name__)
+
+try:
+ from tokenize import detect_encoding
+except ImportError: # pragma: no cover
+ from codecs import BOM_UTF8, lookup
+ import re
+
+ cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)")
+
+ def _get_normal_name(orig_enc):
+ """Imitates get_normal_name in tokenizer.c."""
+ # Only care about the first 12 characters.
+ enc = orig_enc[:12].lower().replace("_", "-")
+ if enc == "utf-8" or enc.startswith("utf-8-"):
+ return "utf-8"
+ if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
+ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
+ return "iso-8859-1"
+ return orig_enc
+
+ def detect_encoding(readline):
+ """
+ The detect_encoding() function is used to detect the encoding that should
+ be used to decode a Python source file. It requires one argument, readline,
+ in the same way as the tokenize() generator.
+
+ It will call readline a maximum of twice, and return the encoding used
+ (as a string) and a list of any lines (left as bytes) it has read in.
+
+ It detects the encoding from the presence of a utf-8 bom or an encoding
+ cookie as specified in pep-0263. If both a bom and a cookie are present,
+ but disagree, a SyntaxError will be raised. If the encoding cookie is an
+ invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
+ 'utf-8-sig' is returned.
+
+ If no encoding is specified, then the default of 'utf-8' will be returned.
+ """
+ try:
+ filename = readline.__self__.name
+ except AttributeError:
+ filename = None
+ bom_found = False
+ encoding = None
+ default = 'utf-8'
+ def read_or_stop():
+ try:
+ return readline()
+ except StopIteration:
+ return b''
+
+ def find_cookie(line):
+ try:
+ # Decode as UTF-8. Either the line is an encoding declaration,
+ # in which case it should be pure ASCII, or it must be UTF-8
+ # per default encoding.
+ line_string = line.decode('utf-8')
+ except UnicodeDecodeError:
+ msg = "invalid or missing encoding declaration"
+ if filename is not None:
+ msg = '{} for {!r}'.format(msg, filename)
+ raise SyntaxError(msg)
+
+ matches = cookie_re.findall(line_string)
+ if not matches:
+ return None
+ encoding = _get_normal_name(matches[0])
+ try:
+ codec = lookup(encoding)
+ except LookupError:
+ # This behaviour mimics the Python interpreter
+ if filename is None:
+ msg = "unknown encoding: " + encoding
+ else:
+ msg = "unknown encoding for {!r}: {}".format(filename,
+ encoding)
+ raise SyntaxError(msg)
+
+ if bom_found:
+ if codec.name != 'utf-8':
+ # This behaviour mimics the Python interpreter
+ if filename is None:
+ msg = 'encoding problem: utf-8'
+ else:
+ msg = 'encoding problem for {!r}: utf-8'.format(filename)
+ raise SyntaxError(msg)
+ encoding += '-sig'
+ return encoding
+
+ first = read_or_stop()
+ if first.startswith(BOM_UTF8):
+ bom_found = True
+ first = first[3:]
+ default = 'utf-8-sig'
+ if not first:
+ return default, []
+
+ encoding = find_cookie(first)
+ if encoding:
+ return encoding, [first]
+
+ second = read_or_stop()
+ if not second:
+ return default, [first]
+
+ encoding = find_cookie(second)
+ if encoding:
+ return encoding, [first, second]
+
+ return default, [first, second]
+
+# For converting & <-> &amp; etc.
+try:
+ from html import escape
+except ImportError:
+ from cgi import escape
+if sys.version_info[:2] < (3, 4):
+ unescape = HTMLParser().unescape
+else:
+ from html import unescape
+
+try:
+ from collections import ChainMap
+except ImportError: # pragma: no cover
+ from collections import MutableMapping
+
+ try:
+ from reprlib import recursive_repr as _recursive_repr
+ except ImportError:
+ def _recursive_repr(fillvalue='...'):
+ '''
+ Decorator to make a repr function return fillvalue for a recursive
+ call
+ '''
+
+ def decorating_function(user_function):
+ repr_running = set()
+
+ def wrapper(self):
+ key = id(self), get_ident()
+ if key in repr_running:
+ return fillvalue
+ repr_running.add(key)
+ try:
+ result = user_function(self)
+ finally:
+ repr_running.discard(key)
+ return result
+
+ # Can't use functools.wraps() here because of bootstrap issues
+ wrapper.__module__ = getattr(user_function, '__module__')
+ wrapper.__doc__ = getattr(user_function, '__doc__')
+ wrapper.__name__ = getattr(user_function, '__name__')
+ wrapper.__annotations__ = getattr(user_function, '__annotations__', {})
+ return wrapper
+
+ return decorating_function
+
+ class ChainMap(MutableMapping):
+ ''' A ChainMap groups multiple dicts (or other mappings) together
+ to create a single, updateable view.
+
+ The underlying mappings are stored in a list. That list is public and can
+ accessed or updated using the *maps* attribute. There is no other state.
+
+ Lookups search the underlying mappings successively until a key is found.
+ In contrast, writes, updates, and deletions only operate on the first
+ mapping.
+
+ '''
+
+ def __init__(self, *maps):
+ '''Initialize a ChainMap by setting *maps* to the given mappings.
+ If no mappings are provided, a single empty dictionary is used.
+
+ '''
+ self.maps = list(maps) or [{}] # always at least one map
+
+ def __missing__(self, key):
+ raise KeyError(key)
+
+ def __getitem__(self, key):
+ for mapping in self.maps:
+ try:
+ return mapping[key] # can't use 'key in mapping' with defaultdict
+ except KeyError:
+ pass
+ return self.__missing__(key) # support subclasses that define __missing__
+
+ def get(self, key, default=None):
+ return self[key] if key in self else default
+
+ def __len__(self):
+ return len(set().union(*self.maps)) # reuses stored hash values if possible
+
+ def __iter__(self):
+ return iter(set().union(*self.maps))
+
+ def __contains__(self, key):
+ return any(key in m for m in self.maps)
+
+ def __bool__(self):
+ return any(self.maps)
+
+ @_recursive_repr()
+ def __repr__(self):
+ return '{0.__class__.__name__}({1})'.format(
+ self, ', '.join(map(repr, self.maps)))
+
+ @classmethod
+ def fromkeys(cls, iterable, *args):
+ 'Create a ChainMap with a single dict created from the iterable.'
+ return cls(dict.fromkeys(iterable, *args))
+
+ def copy(self):
+ 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]'
+ return self.__class__(self.maps[0].copy(), *self.maps[1:])
+
+ __copy__ = copy
+
+ def new_child(self): # like Django's Context.push()
+ 'New ChainMap with a new dict followed by all previous maps.'
+ return self.__class__({}, *self.maps)
+
+ @property
+ def parents(self): # like Django's Context.pop()
+ 'New ChainMap from maps[1:].'
+ return self.__class__(*self.maps[1:])
+
+ def __setitem__(self, key, value):
+ self.maps[0][key] = value
+
+ def __delitem__(self, key):
+ try:
+ del self.maps[0][key]
+ except KeyError:
+ raise KeyError('Key not found in the first mapping: {!r}'.format(key))
+
+ def popitem(self):
+ 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.'
+ try:
+ return self.maps[0].popitem()
+ except KeyError:
+ raise KeyError('No keys found in the first mapping.')
+
+ def pop(self, key, *args):
+ 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].'
+ try:
+ return self.maps[0].pop(key, *args)
+ except KeyError:
+ raise KeyError('Key not found in the first mapping: {!r}'.format(key))
+
+ def clear(self):
+ 'Clear maps[0], leaving maps[1:] intact.'
+ self.maps[0].clear()
+
+try:
+ from importlib.util import cache_from_source # Python >= 3.4
+except ImportError: # pragma: no cover
+ def cache_from_source(path, debug_override=None):
+ assert path.endswith('.py')
+ if debug_override is None:
+ debug_override = __debug__
+ if debug_override:
+ suffix = 'c'
+ else:
+ suffix = 'o'
+ return path + suffix
+
+try:
+ from collections import OrderedDict
+except ImportError: # pragma: no cover
+## {{{ http://code.activestate.com/recipes/576693/ (r9)
+# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
+# Passes Python2.7's test suite and incorporates all the latest updates.
+ try:
+ from thread import get_ident as _get_ident
+ except ImportError:
+ from dummy_thread import get_ident as _get_ident
+
+ try:
+ from _abcoll import KeysView, ValuesView, ItemsView
+ except ImportError:
+ pass
+
+
+ class OrderedDict(dict):
+ 'Dictionary that remembers insertion order'
+ # An inherited dict maps keys to values.
+ # The inherited dict provides __getitem__, __len__, __contains__, and get.
+ # The remaining methods are order-aware.
+ # Big-O running times for all methods are the same as for regular dictionaries.
+
+ # The internal self.__map dictionary maps keys to links in a doubly linked list.
+ # The circular doubly linked list starts and ends with a sentinel element.
+ # The sentinel element never gets deleted (this simplifies the algorithm).
+ # Each link is stored as a list of length three: [PREV, NEXT, KEY].
+
+ def __init__(self, *args, **kwds):
+ '''Initialize an ordered dictionary. Signature is the same as for
+ regular dictionaries, but keyword arguments are not recommended
+ because their insertion order is arbitrary.
+
+ '''
+ if len(args) > 1:
+ raise TypeError('expected at most 1 arguments, got %d' % len(args))
+ try:
+ self.__root
+ except AttributeError:
+ self.__root = root = [] # sentinel node
+ root[:] = [root, root, None]
+ self.__map = {}
+ self.__update(*args, **kwds)
+
+ def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
+ 'od.__setitem__(i, y) <==> od[i]=y'
+ # Setting a new item creates a new link which goes at the end of the linked
+ # list, and the inherited dictionary is updated with the new key/value pair.
+ if key not in self:
+ root = self.__root
+ last = root[0]
+ last[1] = root[0] = self.__map[key] = [last, root, key]
+ dict_setitem(self, key, value)
+
+ def __delitem__(self, key, dict_delitem=dict.__delitem__):
+ 'od.__delitem__(y) <==> del od[y]'
+ # Deleting an existing item uses self.__map to find the link which is
+ # then removed by updating the links in the predecessor and successor nodes.
+ dict_delitem(self, key)
+ link_prev, link_next, key = self.__map.pop(key)
+ link_prev[1] = link_next
+ link_next[0] = link_prev
+
+ def __iter__(self):
+ 'od.__iter__() <==> iter(od)'
+ root = self.__root
+ curr = root[1]
+ while curr is not root:
+ yield curr[2]
+ curr = curr[1]
+
+ def __reversed__(self):
+ 'od.__reversed__() <==> reversed(od)'
+ root = self.__root
+ curr = root[0]
+ while curr is not root:
+ yield curr[2]
+ curr = curr[0]
+
+ def clear(self):
+ 'od.clear() -> None. Remove all items from od.'
+ try:
+ for node in self.__map.itervalues():
+ del node[:]
+ root = self.__root
+ root[:] = [root, root, None]
+ self.__map.clear()
+ except AttributeError:
+ pass
+ dict.clear(self)
+
+ def popitem(self, last=True):
+ '''od.popitem() -> (k, v), return and remove a (key, value) pair.
+ Pairs are returned in LIFO order if last is true or FIFO order if false.
+
+ '''
+ if not self:
+ raise KeyError('dictionary is empty')
+ root = self.__root
+ if last:
+ link = root[0]
+ link_prev = link[0]
+ link_prev[1] = root
+ root[0] = link_prev
+ else:
+ link = root[1]
+ link_next = link[1]
+ root[1] = link_next
+ link_next[0] = root
+ key = link[2]
+ del self.__map[key]
+ value = dict.pop(self, key)
+ return key, value
+
+ # -- the following methods do not depend on the internal structure --
+
+ def keys(self):
+ 'od.keys() -> list of keys in od'
+ return list(self)
+
+ def values(self):
+ 'od.values() -> list of values in od'
+ return [self[key] for key in self]
+
+ def items(self):
+ 'od.items() -> list of (key, value) pairs in od'
+ return [(key, self[key]) for key in self]
+
+ def iterkeys(self):
+ 'od.iterkeys() -> an iterator over the keys in od'
+ return iter(self)
+
+ def itervalues(self):
+ 'od.itervalues -> an iterator over the values in od'
+ for k in self:
+ yield self[k]
+
+ def iteritems(self):
+ 'od.iteritems -> an iterator over the (key, value) items in od'
+ for k in self:
+ yield (k, self[k])
+
+ def update(*args, **kwds):
+ '''od.update(E, **F) -> None. Update od from dict/iterable E and F.
+
+ If E is a dict instance, does: for k in E: od[k] = E[k]
+ If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
+ Or if E is an iterable of items, does: for k, v in E: od[k] = v
+ In either case, this is followed by: for k, v in F.items(): od[k] = v
+
+ '''
+ if len(args) > 2:
+ raise TypeError('update() takes at most 2 positional '
+ 'arguments (%d given)' % (len(args),))
+ elif not args:
+ raise TypeError('update() takes at least 1 argument (0 given)')
+ self = args[0]
+ # Make progressively weaker assumptions about "other"
+ other = ()
+ if len(args) == 2:
+ other = args[1]
+ if isinstance(other, dict):
+ for key in other:
+ self[key] = other[key]
+ elif hasattr(other, 'keys'):
+ for key in other.keys():
+ self[key] = other[key]
+ else:
+ for key, value in other:
+ self[key] = value
+ for key, value in kwds.items():
+ self[key] = value
+
+ __update = update # let subclasses override update without breaking __init__
+
+ __marker = object()
+
+ def pop(self, key, default=__marker):
+ '''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+ If key is not found, d is returned if given, otherwise KeyError is raised.
+
+ '''
+ if key in self:
+ result = self[key]
+ del self[key]
+ return result
+ if default is self.__marker:
+ raise KeyError(key)
+ return default
+
+ def setdefault(self, key, default=None):
+ 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
+ if key in self:
+ return self[key]
+ self[key] = default
+ return default
+
+ def __repr__(self, _repr_running=None):
+ 'od.__repr__() <==> repr(od)'
+ if not _repr_running: _repr_running = {}
+ call_key = id(self), _get_ident()
+ if call_key in _repr_running:
+ return '...'
+ _repr_running[call_key] = 1
+ try:
+ if not self:
+ return '%s()' % (self.__class__.__name__,)
+ return '%s(%r)' % (self.__class__.__name__, self.items())
+ finally:
+ del _repr_running[call_key]
+
+ def __reduce__(self):
+ 'Return state information for pickling'
+ items = [[k, self[k]] for k in self]
+ inst_dict = vars(self).copy()
+ for k in vars(OrderedDict()):
+ inst_dict.pop(k, None)
+ if inst_dict:
+ return (self.__class__, (items,), inst_dict)
+ return self.__class__, (items,)
+
+ def copy(self):
+ 'od.copy() -> a shallow copy of od'
+ return self.__class__(self)
+
+ @classmethod
+ def fromkeys(cls, iterable, value=None):
+ '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
+ and values equal to v (which defaults to None).
+
+ '''
+ d = cls()
+ for key in iterable:
+ d[key] = value
+ return d
+
+ def __eq__(self, other):
+ '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
+ while comparison to a regular mapping is order-insensitive.
+
+ '''
+ if isinstance(other, OrderedDict):
+ return len(self)==len(other) and self.items() == other.items()
+ return dict.__eq__(self, other)
+
+ def __ne__(self, other):
+ return not self == other
+
+ # -- the following methods are only used in Python 2.7 --
+
+ def viewkeys(self):
+ "od.viewkeys() -> a set-like object providing a view on od's keys"
+ return KeysView(self)
+
+ def viewvalues(self):
+ "od.viewvalues() -> an object providing a view on od's values"
+ return ValuesView(self)
+
+ def viewitems(self):
+ "od.viewitems() -> a set-like object providing a view on od's items"
+ return ItemsView(self)
+
+try:
+ from logging.config import BaseConfigurator, valid_ident
+except ImportError: # pragma: no cover
+ IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
+
+
+ def valid_ident(s):
+ m = IDENTIFIER.match(s)
+ if not m:
+ raise ValueError('Not a valid Python identifier: %r' % s)
+ return True
+
+
+ # The ConvertingXXX classes are wrappers around standard Python containers,
+ # and they serve to convert any suitable values in the container. The
+ # conversion converts base dicts, lists and tuples to their wrapped
+ # equivalents, whereas strings which match a conversion format are converted
+ # appropriately.
+ #
+ # Each wrapper should have a configurator attribute holding the actual
+ # configurator to use for conversion.
+
+ class ConvertingDict(dict):
+ """A converting dictionary wrapper."""
+
+ def __getitem__(self, key):
+ value = dict.__getitem__(self, key)
+ result = self.configurator.convert(value)
+ #If the converted value is different, save for next time
+ if value is not result:
+ self[key] = result
+ if type(result) in (ConvertingDict, ConvertingList,
+ ConvertingTuple):
+ result.parent = self
+ result.key = key
+ return result
+
+ def get(self, key, default=None):
+ value = dict.get(self, key, default)
+ result = self.configurator.convert(value)
+ #If the converted value is different, save for next time
+ if value is not result:
+ self[key] = result
+ if type(result) in (ConvertingDict, ConvertingList,
+ ConvertingTuple):
+ result.parent = self
+ result.key = key
+ return result
+
+ def pop(self, key, default=None):
+ value = dict.pop(self, key, default)
+ result = self.configurator.convert(value)
+ if value is not result:
+ if type(result) in (ConvertingDict, ConvertingList,
+ ConvertingTuple):
+ result.parent = self
+ result.key = key
+ return result
+
+ class ConvertingList(list):
+ """A converting list wrapper."""
+ def __getitem__(self, key):
+ value = list.__getitem__(self, key)
+ result = self.configurator.convert(value)
+ #If the converted value is different, save for next time
+ if value is not result:
+ self[key] = result
+ if type(result) in (ConvertingDict, ConvertingList,
+ ConvertingTuple):
+ result.parent = self
+ result.key = key
+ return result
+
+ def pop(self, idx=-1):
+ value = list.pop(self, idx)
+ result = self.configurator.convert(value)
+ if value is not result:
+ if type(result) in (ConvertingDict, ConvertingList,
+ ConvertingTuple):
+ result.parent = self
+ return result
+
+ class ConvertingTuple(tuple):
+ """A converting tuple wrapper."""
+ def __getitem__(self, key):
+ value = tuple.__getitem__(self, key)
+ result = self.configurator.convert(value)
+ if value is not result:
+ if type(result) in (ConvertingDict, ConvertingList,
+ ConvertingTuple):
+ result.parent = self
+ result.key = key
+ return result
+
+ class BaseConfigurator(object):
+ """
+ The configurator base class which defines some useful defaults.
+ """
+
+ CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
+
+ WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
+ DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
+ INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
+ DIGIT_PATTERN = re.compile(r'^\d+$')
+
+ value_converters = {
+ 'ext' : 'ext_convert',
+ 'cfg' : 'cfg_convert',
+ }
+
+ # We might want to use a different one, e.g. importlib
+ importer = staticmethod(__import__)
+
+ def __init__(self, config):
+ self.config = ConvertingDict(config)
+ self.config.configurator = self
+
+ def resolve(self, s):
+ """
+ Resolve strings to objects using standard import and attribute
+ syntax.
+ """
+ name = s.split('.')
+ used = name.pop(0)
+ try:
+ found = self.importer(used)
+ for frag in name:
+ used += '.' + frag
+ try:
+ found = getattr(found, frag)
+ except AttributeError:
+ self.importer(used)
+ found = getattr(found, frag)
+ return found
+ except ImportError:
+ e, tb = sys.exc_info()[1:]
+ v = ValueError('Cannot resolve %r: %s' % (s, e))
+ v.__cause__, v.__traceback__ = e, tb
+ raise v
+
+ def ext_convert(self, value):
+ """Default converter for the ext:// protocol."""
+ return self.resolve(value)
+
+ def cfg_convert(self, value):
+ """Default converter for the cfg:// protocol."""
+ rest = value
+ m = self.WORD_PATTERN.match(rest)
+ if m is None:
+ raise ValueError("Unable to convert %r" % value)
+ else:
+ rest = rest[m.end():]
+ d = self.config[m.groups()[0]]
+ #print d, rest
+ while rest:
+ m = self.DOT_PATTERN.match(rest)
+ if m:
+ d = d[m.groups()[0]]
+ else:
+ m = self.INDEX_PATTERN.match(rest)
+ if m:
+ idx = m.groups()[0]
+ if not self.DIGIT_PATTERN.match(idx):
+ d = d[idx]
+ else:
+ try:
+ n = int(idx) # try as number first (most likely)
+ d = d[n]
+ except TypeError:
+ d = d[idx]
+ if m:
+ rest = rest[m.end():]
+ else:
+ raise ValueError('Unable to convert '
+ '%r at %r' % (value, rest))
+ #rest should be empty
+ return d
+
+ def convert(self, value):
+ """
+ Convert values to an appropriate type. dicts, lists and tuples are
+ replaced by their converting alternatives. Strings are checked to
+ see if they have a conversion format and are converted if they do.
+ """
+ if not isinstance(value, ConvertingDict) and isinstance(value, dict):
+ value = ConvertingDict(value)
+ value.configurator = self
+ elif not isinstance(value, ConvertingList) and isinstance(value, list):
+ value = ConvertingList(value)
+ value.configurator = self
+ elif not isinstance(value, ConvertingTuple) and\
+ isinstance(value, tuple):
+ value = ConvertingTuple(value)
+ value.configurator = self
+ elif isinstance(value, string_types):
+ m = self.CONVERT_PATTERN.match(value)
+ if m:
+ d = m.groupdict()
+ prefix = d['prefix']
+ converter = self.value_converters.get(prefix, None)
+ if converter:
+ suffix = d['suffix']
+ converter = getattr(self, converter)
+ value = converter(suffix)
+ return value
+
+ def configure_custom(self, config):
+ """Configure an object with a user-supplied factory."""
+ c = config.pop('()')
+ if not callable(c):
+ c = self.resolve(c)
+ props = config.pop('.', None)
+ # Check for valid identifiers
+ kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
+ result = c(**kwargs)
+ if props:
+ for name, value in props.items():
+ setattr(result, name, value)
+ return result
+
+ def as_tuple(self, value):
+ """Utility function which converts lists to tuples."""
+ if isinstance(value, list):
+ value = tuple(value)
+ return value
diff --git a/third_party/python/pip/pip/_vendor/distlib/database.py b/third_party/python/pip/pip/_vendor/distlib/database.py
new file mode 100644
index 0000000000..5db5d7f507
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distlib/database.py
@@ -0,0 +1,1350 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2012-2017 The Python Software Foundation.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+"""PEP 376 implementation."""
+
+from __future__ import unicode_literals
+
+import base64
+import codecs
+import contextlib
+import hashlib
+import logging
+import os
+import posixpath
+import sys
+import zipimport
+
+from . import DistlibException, resources
+from .compat import StringIO
+from .version import get_scheme, UnsupportedVersionError
+from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME,
+ LEGACY_METADATA_FILENAME)
+from .util import (parse_requirement, cached_property, parse_name_and_version,
+ read_exports, write_exports, CSVReader, CSVWriter)
+
+
+__all__ = ['Distribution', 'BaseInstalledDistribution',
+ 'InstalledDistribution', 'EggInfoDistribution',
+ 'DistributionPath']
+
+
+logger = logging.getLogger(__name__)
+
+EXPORTS_FILENAME = 'pydist-exports.json'
+COMMANDS_FILENAME = 'pydist-commands.json'
+
+DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED',
+ 'RESOURCES', EXPORTS_FILENAME, 'SHARED')
+
+DISTINFO_EXT = '.dist-info'
+
+
+class _Cache(object):
+ """
+ A simple cache mapping names and .dist-info paths to distributions
+ """
+ def __init__(self):
+ """
+ Initialise an instance. There is normally one for each DistributionPath.
+ """
+ self.name = {}
+ self.path = {}
+ self.generated = False
+
+ def clear(self):
+ """
+ Clear the cache, setting it to its initial state.
+ """
+ self.name.clear()
+ self.path.clear()
+ self.generated = False
+
+ def add(self, dist):
+ """
+ Add a distribution to the cache.
+ :param dist: The distribution to add.
+ """
+ if dist.path not in self.path:
+ self.path[dist.path] = dist
+ self.name.setdefault(dist.key, []).append(dist)
+
+
+class DistributionPath(object):
+ """
+ Represents a set of distributions installed on a path (typically sys.path).
+ """
+ def __init__(self, path=None, include_egg=False):
+ """
+ Create an instance from a path, optionally including legacy (distutils/
+ setuptools/distribute) distributions.
+ :param path: The path to use, as a list of directories. If not specified,
+ sys.path is used.
+ :param include_egg: If True, this instance will look for and return legacy
+ distributions as well as those based on PEP 376.
+ """
+ if path is None:
+ path = sys.path
+ self.path = path
+ self._include_dist = True
+ self._include_egg = include_egg
+
+ self._cache = _Cache()
+ self._cache_egg = _Cache()
+ self._cache_enabled = True
+ self._scheme = get_scheme('default')
+
+ def _get_cache_enabled(self):
+ return self._cache_enabled
+
+ def _set_cache_enabled(self, value):
+ self._cache_enabled = value
+
+ cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
+
+ def clear_cache(self):
+ """
+ Clears the internal cache.
+ """
+ self._cache.clear()
+ self._cache_egg.clear()
+
+
+ def _yield_distributions(self):
+ """
+ Yield .dist-info and/or .egg(-info) distributions.
+ """
+ # We need to check if we've seen some resources already, because on
+ # some Linux systems (e.g. some Debian/Ubuntu variants) there are
+ # symlinks which alias other files in the environment.
+ seen = set()
+ for path in self.path:
+ finder = resources.finder_for_path(path)
+ if finder is None:
+ continue
+ r = finder.find('')
+ if not r or not r.is_container:
+ continue
+ rset = sorted(r.resources)
+ for entry in rset:
+ r = finder.find(entry)
+ if not r or r.path in seen:
+ continue
+ try:
+ if self._include_dist and entry.endswith(DISTINFO_EXT):
+ possible_filenames = [METADATA_FILENAME,
+ WHEEL_METADATA_FILENAME,
+ LEGACY_METADATA_FILENAME]
+ for metadata_filename in possible_filenames:
+ metadata_path = posixpath.join(entry, metadata_filename)
+ pydist = finder.find(metadata_path)
+ if pydist:
+ break
+ else:
+ continue
+
+ with contextlib.closing(pydist.as_stream()) as stream:
+ metadata = Metadata(fileobj=stream, scheme='legacy')
+ logger.debug('Found %s', r.path)
+ seen.add(r.path)
+ yield new_dist_class(r.path, metadata=metadata,
+ env=self)
+ elif self._include_egg and entry.endswith(('.egg-info',
+ '.egg')):
+ logger.debug('Found %s', r.path)
+ seen.add(r.path)
+ yield old_dist_class(r.path, self)
+ except Exception as e:
+ msg = 'Unable to read distribution at %s, perhaps due to bad metadata: %s'
+ logger.warning(msg, r.path, e)
+ import warnings
+ warnings.warn(msg % (r.path, e), stacklevel=2)
+
+ def _generate_cache(self):
+ """
+ Scan the path for distributions and populate the cache with
+ those that are found.
+ """
+ gen_dist = not self._cache.generated
+ gen_egg = self._include_egg and not self._cache_egg.generated
+ if gen_dist or gen_egg:
+ for dist in self._yield_distributions():
+ if isinstance(dist, InstalledDistribution):
+ self._cache.add(dist)
+ else:
+ self._cache_egg.add(dist)
+
+ if gen_dist:
+ self._cache.generated = True
+ if gen_egg:
+ self._cache_egg.generated = True
+
+ @classmethod
+ def distinfo_dirname(cls, name, version):
+ """
+ The *name* and *version* parameters are converted into their
+ filename-escaped form, i.e. any ``'-'`` characters are replaced
+ with ``'_'`` other than the one in ``'dist-info'`` and the one
+ separating the name from the version number.
+
+ :parameter name: is converted to a standard distribution name by replacing
+ any runs of non- alphanumeric characters with a single
+ ``'-'``.
+ :type name: string
+ :parameter version: is converted to a standard version string. Spaces
+ become dots, and all other non-alphanumeric characters
+ (except dots) become dashes, with runs of multiple
+ dashes condensed to a single dash.
+ :type version: string
+ :returns: directory name
+ :rtype: string"""
+ name = name.replace('-', '_')
+ return '-'.join([name, version]) + DISTINFO_EXT
+
+ def get_distributions(self):
+ """
+ Provides an iterator that looks for distributions and returns
+ :class:`InstalledDistribution` or
+ :class:`EggInfoDistribution` instances for each one of them.
+
+ :rtype: iterator of :class:`InstalledDistribution` and
+ :class:`EggInfoDistribution` instances
+ """
+ if not self._cache_enabled:
+ for dist in self._yield_distributions():
+ yield dist
+ else:
+ self._generate_cache()
+
+ for dist in self._cache.path.values():
+ yield dist
+
+ if self._include_egg:
+ for dist in self._cache_egg.path.values():
+ yield dist
+
+ def get_distribution(self, name):
+ """
+ Looks for a named distribution on the path.
+
+ This function only returns the first result found, as no more than one
+ value is expected. If nothing is found, ``None`` is returned.
+
+ :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
+ or ``None``
+ """
+ result = None
+ name = name.lower()
+ if not self._cache_enabled:
+ for dist in self._yield_distributions():
+ if dist.key == name:
+ result = dist
+ break
+ else:
+ self._generate_cache()
+
+ if name in self._cache.name:
+ result = self._cache.name[name][0]
+ elif self._include_egg and name in self._cache_egg.name:
+ result = self._cache_egg.name[name][0]
+ return result
+
+ def provides_distribution(self, name, version=None):
+ """
+ Iterates over all distributions to find which distributions provide *name*.
+ If a *version* is provided, it will be used to filter the results.
+
+ This function only returns the first result found, since no more than
+ one values are expected. If the directory is not found, returns ``None``.
+
+ :parameter version: a version specifier that indicates the version
+ required, conforming to the format in ``PEP-345``
+
+ :type name: string
+ :type version: string
+ """
+ matcher = None
+ if version is not None:
+ try:
+ matcher = self._scheme.matcher('%s (%s)' % (name, version))
+ except ValueError:
+ raise DistlibException('invalid name or version: %r, %r' %
+ (name, version))
+
+ for dist in self.get_distributions():
+ # We hit a problem on Travis where enum34 was installed and doesn't
+ # have a provides attribute ...
+ if not hasattr(dist, 'provides'):
+ logger.debug('No "provides": %s', dist)
+ else:
+ provided = dist.provides
+
+ for p in provided:
+ p_name, p_ver = parse_name_and_version(p)
+ if matcher is None:
+ if p_name == name:
+ yield dist
+ break
+ else:
+ if p_name == name and matcher.match(p_ver):
+ yield dist
+ break
+
+ def get_file_path(self, name, relative_path):
+ """
+ Return the path to a resource file.
+ """
+ dist = self.get_distribution(name)
+ if dist is None:
+ raise LookupError('no distribution named %r found' % name)
+ return dist.get_resource_path(relative_path)
+
+ def get_exported_entries(self, category, name=None):
+ """
+ Return all of the exported entries in a particular category.
+
+ :param category: The category to search for entries.
+ :param name: If specified, only entries with that name are returned.
+ """
+ for dist in self.get_distributions():
+ r = dist.exports
+ if category in r:
+ d = r[category]
+ if name is not None:
+ if name in d:
+ yield d[name]
+ else:
+ for v in d.values():
+ yield v
+
+
+class Distribution(object):
+ """
+ A base class for distributions, whether installed or from indexes.
+ Either way, it must have some metadata, so that's all that's needed
+ for construction.
+ """
+
+ build_time_dependency = False
+ """
+ Set to True if it's known to be only a build-time dependency (i.e.
+ not needed after installation).
+ """
+
+ requested = False
+ """A boolean that indicates whether the ``REQUESTED`` metadata file is
+ present (in other words, whether the package was installed by user
+ request or it was installed as a dependency)."""
+
+ def __init__(self, metadata):
+ """
+ Initialise an instance.
+ :param metadata: The instance of :class:`Metadata` describing this
+ distribution.
+ """
+ self.metadata = metadata
+ self.name = metadata.name
+ self.key = self.name.lower() # for case-insensitive comparisons
+ self.version = metadata.version
+ self.locator = None
+ self.digest = None
+ self.extras = None # additional features requested
+ self.context = None # environment marker overrides
+ self.download_urls = set()
+ self.digests = {}
+
+ @property
+ def source_url(self):
+ """
+ The source archive download URL for this distribution.
+ """
+ return self.metadata.source_url
+
+ download_url = source_url # Backward compatibility
+
+ @property
+ def name_and_version(self):
+ """
+ A utility property which displays the name and version in parentheses.
+ """
+ return '%s (%s)' % (self.name, self.version)
+
+ @property
+ def provides(self):
+ """
+ A set of distribution names and versions provided by this distribution.
+ :return: A set of "name (version)" strings.
+ """
+ plist = self.metadata.provides
+ s = '%s (%s)' % (self.name, self.version)
+ if s not in plist:
+ plist.append(s)
+ return plist
+
+ def _get_requirements(self, req_attr):
+ md = self.metadata
+ reqts = getattr(md, req_attr)
+ logger.debug('%s: got requirements %r from metadata: %r', self.name, req_attr,
+ reqts)
+ return set(md.get_requirements(reqts, extras=self.extras,
+ env=self.context))
+
+ @property
+ def run_requires(self):
+ return self._get_requirements('run_requires')
+
+ @property
+ def meta_requires(self):
+ return self._get_requirements('meta_requires')
+
+ @property
+ def build_requires(self):
+ return self._get_requirements('build_requires')
+
+ @property
+ def test_requires(self):
+ return self._get_requirements('test_requires')
+
+ @property
+ def dev_requires(self):
+ return self._get_requirements('dev_requires')
+
+ def matches_requirement(self, req):
+ """
+ Say if this instance matches (fulfills) a requirement.
+ :param req: The requirement to match.
+ :rtype req: str
+ :return: True if it matches, else False.
+ """
+ # Requirement may contain extras - parse to lose those
+ # from what's passed to the matcher
+ r = parse_requirement(req)
+ scheme = get_scheme(self.metadata.scheme)
+ try:
+ matcher = scheme.matcher(r.requirement)
+ except UnsupportedVersionError:
+ # XXX compat-mode if cannot read the version
+ logger.warning('could not read version %r - using name only',
+ req)
+ name = req.split()[0]
+ matcher = scheme.matcher(name)
+
+ name = matcher.key # case-insensitive
+
+ result = False
+ for p in self.provides:
+ p_name, p_ver = parse_name_and_version(p)
+ if p_name != name:
+ continue
+ try:
+ result = matcher.match(p_ver)
+ break
+ except UnsupportedVersionError:
+ pass
+ return result
+
+ def __repr__(self):
+ """
+ Return a textual representation of this instance,
+ """
+ if self.source_url:
+ suffix = ' [%s]' % self.source_url
+ else:
+ suffix = ''
+ return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
+
+ def __eq__(self, other):
+ """
+ See if this distribution is the same as another.
+ :param other: The distribution to compare with. To be equal to one
+ another. distributions must have the same type, name,
+ version and source_url.
+ :return: True if it is the same, else False.
+ """
+ if type(other) is not type(self):
+ result = False
+ else:
+ result = (self.name == other.name and
+ self.version == other.version and
+ self.source_url == other.source_url)
+ return result
+
+ def __hash__(self):
+ """
+ Compute hash in a way which matches the equality test.
+ """
+ return hash(self.name) + hash(self.version) + hash(self.source_url)
+
+
+class BaseInstalledDistribution(Distribution):
+ """
+ This is the base class for installed distributions (whether PEP 376 or
+ legacy).
+ """
+
+ hasher = None
+
+ def __init__(self, metadata, path, env=None):
+ """
+ Initialise an instance.
+ :param metadata: An instance of :class:`Metadata` which describes the
+ distribution. This will normally have been initialised
+ from a metadata file in the ``path``.
+ :param path: The path of the ``.dist-info`` or ``.egg-info``
+ directory for the distribution.
+ :param env: This is normally the :class:`DistributionPath`
+ instance where this distribution was found.
+ """
+ super(BaseInstalledDistribution, self).__init__(metadata)
+ self.path = path
+ self.dist_path = env
+
+ def get_hash(self, data, hasher=None):
+ """
+ Get the hash of some data, using a particular hash algorithm, if
+ specified.
+
+ :param data: The data to be hashed.
+ :type data: bytes
+ :param hasher: The name of a hash implementation, supported by hashlib,
+ or ``None``. Examples of valid values are ``'sha1'``,
+ ``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
+ ``'sha512'``. If no hasher is specified, the ``hasher``
+ attribute of the :class:`InstalledDistribution` instance
+ is used. If the hasher is determined to be ``None``, MD5
+ is used as the hashing algorithm.
+ :returns: The hash of the data. If a hasher was explicitly specified,
+ the returned hash will be prefixed with the specified hasher
+ followed by '='.
+ :rtype: str
+ """
+ if hasher is None:
+ hasher = self.hasher
+ if hasher is None:
+ hasher = hashlib.md5
+ prefix = ''
+ else:
+ hasher = getattr(hashlib, hasher)
+ prefix = '%s=' % self.hasher
+ digest = hasher(data).digest()
+ digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
+ return '%s%s' % (prefix, digest)
+
+
+class InstalledDistribution(BaseInstalledDistribution):
+ """
+ Created with the *path* of the ``.dist-info`` directory provided to the
+ constructor. It reads the metadata contained in ``pydist.json`` when it is
+ instantiated., or uses a passed in Metadata instance (useful for when
+ dry-run mode is being used).
+ """
+
+ hasher = 'sha256'
+
+ def __init__(self, path, metadata=None, env=None):
+ self.modules = []
+ self.finder = finder = resources.finder_for_path(path)
+ if finder is None:
+ raise ValueError('finder unavailable for %s' % path)
+ if env and env._cache_enabled and path in env._cache.path:
+ metadata = env._cache.path[path].metadata
+ elif metadata is None:
+ r = finder.find(METADATA_FILENAME)
+ # Temporary - for Wheel 0.23 support
+ if r is None:
+ r = finder.find(WHEEL_METADATA_FILENAME)
+ # Temporary - for legacy support
+ if r is None:
+ r = finder.find(LEGACY_METADATA_FILENAME)
+ if r is None:
+ raise ValueError('no %s found in %s' % (METADATA_FILENAME,
+ path))
+ with contextlib.closing(r.as_stream()) as stream:
+ metadata = Metadata(fileobj=stream, scheme='legacy')
+
+ super(InstalledDistribution, self).__init__(metadata, path, env)
+
+ if env and env._cache_enabled:
+ env._cache.add(self)
+
+ r = finder.find('REQUESTED')
+ self.requested = r is not None
+ p = os.path.join(path, 'top_level.txt')
+ if os.path.exists(p):
+ with open(p, 'rb') as f:
+ data = f.read().decode('utf-8')
+ self.modules = data.splitlines()
+
+ def __repr__(self):
+ return '<InstalledDistribution %r %s at %r>' % (
+ self.name, self.version, self.path)
+
+ def __str__(self):
+ return "%s %s" % (self.name, self.version)
+
+ def _get_records(self):
+ """
+ Get the list of installed files for the distribution
+ :return: A list of tuples of path, hash and size. Note that hash and
+ size might be ``None`` for some entries. The path is exactly
+ as stored in the file (which is as in PEP 376).
+ """
+ results = []
+ r = self.get_distinfo_resource('RECORD')
+ with contextlib.closing(r.as_stream()) as stream:
+ with CSVReader(stream=stream) as record_reader:
+ # Base location is parent dir of .dist-info dir
+ #base_location = os.path.dirname(self.path)
+ #base_location = os.path.abspath(base_location)
+ for row in record_reader:
+ missing = [None for i in range(len(row), 3)]
+ path, checksum, size = row + missing
+ #if not os.path.isabs(path):
+ # path = path.replace('/', os.sep)
+ # path = os.path.join(base_location, path)
+ results.append((path, checksum, size))
+ return results
+
+ @cached_property
+ def exports(self):
+ """
+ Return the information exported by this distribution.
+ :return: A dictionary of exports, mapping an export category to a dict
+ of :class:`ExportEntry` instances describing the individual
+ export entries, and keyed by name.
+ """
+ result = {}
+ r = self.get_distinfo_resource(EXPORTS_FILENAME)
+ if r:
+ result = self.read_exports()
+ return result
+
+ def read_exports(self):
+ """
+ Read exports data from a file in .ini format.
+
+ :return: A dictionary of exports, mapping an export category to a list
+ of :class:`ExportEntry` instances describing the individual
+ export entries.
+ """
+ result = {}
+ r = self.get_distinfo_resource(EXPORTS_FILENAME)
+ if r:
+ with contextlib.closing(r.as_stream()) as stream:
+ result = read_exports(stream)
+ return result
+
+ def write_exports(self, exports):
+ """
+ Write a dictionary of exports to a file in .ini format.
+ :param exports: A dictionary of exports, mapping an export category to
+ a list of :class:`ExportEntry` instances describing the
+ individual export entries.
+ """
+ rf = self.get_distinfo_file(EXPORTS_FILENAME)
+ with open(rf, 'w') as f:
+ write_exports(exports, f)
+
+ def get_resource_path(self, relative_path):
+ """
+ NOTE: This API may change in the future.
+
+ Return the absolute path to a resource file with the given relative
+ path.
+
+ :param relative_path: The path, relative to .dist-info, of the resource
+ of interest.
+ :return: The absolute path where the resource is to be found.
+ """
+ r = self.get_distinfo_resource('RESOURCES')
+ with contextlib.closing(r.as_stream()) as stream:
+ with CSVReader(stream=stream) as resources_reader:
+ for relative, destination in resources_reader:
+ if relative == relative_path:
+ return destination
+ raise KeyError('no resource file with relative path %r '
+ 'is installed' % relative_path)
+
+ def list_installed_files(self):
+ """
+ Iterates over the ``RECORD`` entries and returns a tuple
+ ``(path, hash, size)`` for each line.
+
+ :returns: iterator of (path, hash, size)
+ """
+ for result in self._get_records():
+ yield result
+
+ def write_installed_files(self, paths, prefix, dry_run=False):
+ """
+ Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
+ existing ``RECORD`` file is silently overwritten.
+
+ prefix is used to determine when to write absolute paths.
+ """
+ prefix = os.path.join(prefix, '')
+ base = os.path.dirname(self.path)
+ base_under_prefix = base.startswith(prefix)
+ base = os.path.join(base, '')
+ record_path = self.get_distinfo_file('RECORD')
+ logger.info('creating %s', record_path)
+ if dry_run:
+ return None
+ with CSVWriter(record_path) as writer:
+ for path in paths:
+ if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
+ # do not put size and hash, as in PEP-376
+ hash_value = size = ''
+ else:
+ size = '%d' % os.path.getsize(path)
+ with open(path, 'rb') as fp:
+ hash_value = self.get_hash(fp.read())
+ if path.startswith(base) or (base_under_prefix and
+ path.startswith(prefix)):
+ path = os.path.relpath(path, base)
+ writer.writerow((path, hash_value, size))
+
+ # add the RECORD file itself
+ if record_path.startswith(base):
+ record_path = os.path.relpath(record_path, base)
+ writer.writerow((record_path, '', ''))
+ return record_path
+
+ def check_installed_files(self):
+ """
+ Checks that the hashes and sizes of the files in ``RECORD`` are
+ matched by the files themselves. Returns a (possibly empty) list of
+ mismatches. Each entry in the mismatch list will be a tuple consisting
+ of the path, 'exists', 'size' or 'hash' according to what didn't match
+ (existence is checked first, then size, then hash), the expected
+ value and the actual value.
+ """
+ mismatches = []
+ base = os.path.dirname(self.path)
+ record_path = self.get_distinfo_file('RECORD')
+ for path, hash_value, size in self.list_installed_files():
+ if not os.path.isabs(path):
+ path = os.path.join(base, path)
+ if path == record_path:
+ continue
+ if not os.path.exists(path):
+ mismatches.append((path, 'exists', True, False))
+ elif os.path.isfile(path):
+ actual_size = str(os.path.getsize(path))
+ if size and actual_size != size:
+ mismatches.append((path, 'size', size, actual_size))
+ elif hash_value:
+ if '=' in hash_value:
+ hasher = hash_value.split('=', 1)[0]
+ else:
+ hasher = None
+
+ with open(path, 'rb') as f:
+ actual_hash = self.get_hash(f.read(), hasher)
+ if actual_hash != hash_value:
+ mismatches.append((path, 'hash', hash_value, actual_hash))
+ return mismatches
+
+ @cached_property
+ def shared_locations(self):
+ """
+ A dictionary of shared locations whose keys are in the set 'prefix',
+ 'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
+ The corresponding value is the absolute path of that category for
+ this distribution, and takes into account any paths selected by the
+ user at installation time (e.g. via command-line arguments). In the
+ case of the 'namespace' key, this would be a list of absolute paths
+ for the roots of namespace packages in this distribution.
+
+ The first time this property is accessed, the relevant information is
+ read from the SHARED file in the .dist-info directory.
+ """
+ result = {}
+ shared_path = os.path.join(self.path, 'SHARED')
+ if os.path.isfile(shared_path):
+ with codecs.open(shared_path, 'r', encoding='utf-8') as f:
+ lines = f.read().splitlines()
+ for line in lines:
+ key, value = line.split('=', 1)
+ if key == 'namespace':
+ result.setdefault(key, []).append(value)
+ else:
+ result[key] = value
+ return result
+
+ def write_shared_locations(self, paths, dry_run=False):
+ """
+ Write shared location information to the SHARED file in .dist-info.
+ :param paths: A dictionary as described in the documentation for
+ :meth:`shared_locations`.
+ :param dry_run: If True, the action is logged but no file is actually
+ written.
+ :return: The path of the file written to.
+ """
+ shared_path = os.path.join(self.path, 'SHARED')
+ logger.info('creating %s', shared_path)
+ if dry_run:
+ return None
+ lines = []
+ for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
+ path = paths[key]
+ if os.path.isdir(paths[key]):
+ lines.append('%s=%s' % (key, path))
+ for ns in paths.get('namespace', ()):
+ lines.append('namespace=%s' % ns)
+
+ with codecs.open(shared_path, 'w', encoding='utf-8') as f:
+ f.write('\n'.join(lines))
+ return shared_path
+
+ def get_distinfo_resource(self, path):
+ if path not in DIST_FILES:
+ raise DistlibException('invalid path for a dist-info file: '
+ '%r at %r' % (path, self.path))
+ finder = resources.finder_for_path(self.path)
+ if finder is None:
+ raise DistlibException('Unable to get a finder for %s' % self.path)
+ return finder.find(path)
+
+ def get_distinfo_file(self, path):
+ """
+ Returns a path located under the ``.dist-info`` directory. Returns a
+ string representing the path.
+
+ :parameter path: a ``'/'``-separated path relative to the
+ ``.dist-info`` directory or an absolute path;
+ If *path* is an absolute path and doesn't start
+ with the ``.dist-info`` directory path,
+ a :class:`DistlibException` is raised
+ :type path: str
+ :rtype: str
+ """
+ # Check if it is an absolute path # XXX use relpath, add tests
+ if path.find(os.sep) >= 0:
+ # it's an absolute path?
+ distinfo_dirname, path = path.split(os.sep)[-2:]
+ if distinfo_dirname != self.path.split(os.sep)[-1]:
+ raise DistlibException(
+ 'dist-info file %r does not belong to the %r %s '
+ 'distribution' % (path, self.name, self.version))
+
+ # The file must be relative
+ if path not in DIST_FILES:
+ raise DistlibException('invalid path for a dist-info file: '
+ '%r at %r' % (path, self.path))
+
+ return os.path.join(self.path, path)
+
+ def list_distinfo_files(self):
+ """
+ Iterates over the ``RECORD`` entries and returns paths for each line if
+ the path is pointing to a file located in the ``.dist-info`` directory
+ or one of its subdirectories.
+
+ :returns: iterator of paths
+ """
+ base = os.path.dirname(self.path)
+ for path, checksum, size in self._get_records():
+ # XXX add separator or use real relpath algo
+ if not os.path.isabs(path):
+ path = os.path.join(base, path)
+ if path.startswith(self.path):
+ yield path
+
+ def __eq__(self, other):
+ return (isinstance(other, InstalledDistribution) and
+ self.path == other.path)
+
+ # See http://docs.python.org/reference/datamodel#object.__hash__
+ __hash__ = object.__hash__
+
+
+class EggInfoDistribution(BaseInstalledDistribution):
+ """Created with the *path* of the ``.egg-info`` directory or file provided
+ to the constructor. It reads the metadata contained in the file itself, or
+ if the given path happens to be a directory, the metadata is read from the
+ file ``PKG-INFO`` under that directory."""
+
+ requested = True # as we have no way of knowing, assume it was
+ shared_locations = {}
+
+ def __init__(self, path, env=None):
+ def set_name_and_version(s, n, v):
+ s.name = n
+ s.key = n.lower() # for case-insensitive comparisons
+ s.version = v
+
+ self.path = path
+ self.dist_path = env
+ if env and env._cache_enabled and path in env._cache_egg.path:
+ metadata = env._cache_egg.path[path].metadata
+ set_name_and_version(self, metadata.name, metadata.version)
+ else:
+ metadata = self._get_metadata(path)
+
+ # Need to be set before caching
+ set_name_and_version(self, metadata.name, metadata.version)
+
+ if env and env._cache_enabled:
+ env._cache_egg.add(self)
+ super(EggInfoDistribution, self).__init__(metadata, path, env)
+
+ def _get_metadata(self, path):
+ requires = None
+
+ def parse_requires_data(data):
+ """Create a list of dependencies from a requires.txt file.
+
+ *data*: the contents of a setuptools-produced requires.txt file.
+ """
+ reqs = []
+ lines = data.splitlines()
+ for line in lines:
+ line = line.strip()
+ if line.startswith('['):
+ logger.warning('Unexpected line: quitting requirement scan: %r',
+ line)
+ break
+ r = parse_requirement(line)
+ if not r:
+ logger.warning('Not recognised as a requirement: %r', line)
+ continue
+ if r.extras:
+ logger.warning('extra requirements in requires.txt are '
+ 'not supported')
+ if not r.constraints:
+ reqs.append(r.name)
+ else:
+ cons = ', '.join('%s%s' % c for c in r.constraints)
+ reqs.append('%s (%s)' % (r.name, cons))
+ return reqs
+
+ def parse_requires_path(req_path):
+ """Create a list of dependencies from a requires.txt file.
+
+ *req_path*: the path to a setuptools-produced requires.txt file.
+ """
+
+ reqs = []
+ try:
+ with codecs.open(req_path, 'r', 'utf-8') as fp:
+ reqs = parse_requires_data(fp.read())
+ except IOError:
+ pass
+ return reqs
+
+ tl_path = tl_data = None
+ if path.endswith('.egg'):
+ if os.path.isdir(path):
+ p = os.path.join(path, 'EGG-INFO')
+ meta_path = os.path.join(p, 'PKG-INFO')
+ metadata = Metadata(path=meta_path, scheme='legacy')
+ req_path = os.path.join(p, 'requires.txt')
+ tl_path = os.path.join(p, 'top_level.txt')
+ requires = parse_requires_path(req_path)
+ else:
+ # FIXME handle the case where zipfile is not available
+ zipf = zipimport.zipimporter(path)
+ fileobj = StringIO(
+ zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
+ metadata = Metadata(fileobj=fileobj, scheme='legacy')
+ try:
+ data = zipf.get_data('EGG-INFO/requires.txt')
+ tl_data = zipf.get_data('EGG-INFO/top_level.txt').decode('utf-8')
+ requires = parse_requires_data(data.decode('utf-8'))
+ except IOError:
+ requires = None
+ elif path.endswith('.egg-info'):
+ if os.path.isdir(path):
+ req_path = os.path.join(path, 'requires.txt')
+ requires = parse_requires_path(req_path)
+ path = os.path.join(path, 'PKG-INFO')
+ tl_path = os.path.join(path, 'top_level.txt')
+ metadata = Metadata(path=path, scheme='legacy')
+ else:
+ raise DistlibException('path must end with .egg-info or .egg, '
+ 'got %r' % path)
+
+ if requires:
+ metadata.add_requirements(requires)
+ # look for top-level modules in top_level.txt, if present
+ if tl_data is None:
+ if tl_path is not None and os.path.exists(tl_path):
+ with open(tl_path, 'rb') as f:
+ tl_data = f.read().decode('utf-8')
+ if not tl_data:
+ tl_data = []
+ else:
+ tl_data = tl_data.splitlines()
+ self.modules = tl_data
+ return metadata
+
+ def __repr__(self):
+ return '<EggInfoDistribution %r %s at %r>' % (
+ self.name, self.version, self.path)
+
+ def __str__(self):
+ return "%s %s" % (self.name, self.version)
+
+ def check_installed_files(self):
+ """
+ Checks that the hashes and sizes of the files in ``RECORD`` are
+ matched by the files themselves. Returns a (possibly empty) list of
+ mismatches. Each entry in the mismatch list will be a tuple consisting
+ of the path, 'exists', 'size' or 'hash' according to what didn't match
+ (existence is checked first, then size, then hash), the expected
+ value and the actual value.
+ """
+ mismatches = []
+ record_path = os.path.join(self.path, 'installed-files.txt')
+ if os.path.exists(record_path):
+ for path, _, _ in self.list_installed_files():
+ if path == record_path:
+ continue
+ if not os.path.exists(path):
+ mismatches.append((path, 'exists', True, False))
+ return mismatches
+
+ def list_installed_files(self):
+ """
+ Iterates over the ``installed-files.txt`` entries and returns a tuple
+ ``(path, hash, size)`` for each line.
+
+ :returns: a list of (path, hash, size)
+ """
+
+ def _md5(path):
+ f = open(path, 'rb')
+ try:
+ content = f.read()
+ finally:
+ f.close()
+ return hashlib.md5(content).hexdigest()
+
+ def _size(path):
+ return os.stat(path).st_size
+
+ record_path = os.path.join(self.path, 'installed-files.txt')
+ result = []
+ if os.path.exists(record_path):
+ with codecs.open(record_path, 'r', encoding='utf-8') as f:
+ for line in f:
+ line = line.strip()
+ p = os.path.normpath(os.path.join(self.path, line))
+ # "./" is present as a marker between installed files
+ # and installation metadata files
+ if not os.path.exists(p):
+ logger.warning('Non-existent file: %s', p)
+ if p.endswith(('.pyc', '.pyo')):
+ continue
+ #otherwise fall through and fail
+ if not os.path.isdir(p):
+ result.append((p, _md5(p), _size(p)))
+ result.append((record_path, None, None))
+ return result
+
+ def list_distinfo_files(self, absolute=False):
+ """
+ Iterates over the ``installed-files.txt`` entries and returns paths for
+ each line if the path is pointing to a file located in the
+ ``.egg-info`` directory or one of its subdirectories.
+
+ :parameter absolute: If *absolute* is ``True``, each returned path is
+ transformed into a local absolute path. Otherwise the
+ raw value from ``installed-files.txt`` is returned.
+ :type absolute: boolean
+ :returns: iterator of paths
+ """
+ record_path = os.path.join(self.path, 'installed-files.txt')
+ if os.path.exists(record_path):
+ skip = True
+ with codecs.open(record_path, 'r', encoding='utf-8') as f:
+ for line in f:
+ line = line.strip()
+ if line == './':
+ skip = False
+ continue
+ if not skip:
+ p = os.path.normpath(os.path.join(self.path, line))
+ if p.startswith(self.path):
+ if absolute:
+ yield p
+ else:
+ yield line
+
+ def __eq__(self, other):
+ return (isinstance(other, EggInfoDistribution) and
+ self.path == other.path)
+
+ # See http://docs.python.org/reference/datamodel#object.__hash__
+ __hash__ = object.__hash__
+
+new_dist_class = InstalledDistribution
+old_dist_class = EggInfoDistribution
+
+
+class DependencyGraph(object):
+ """
+ Represents a dependency graph between distributions.
+
+ The dependency relationships are stored in an ``adjacency_list`` that maps
+ distributions to a list of ``(other, label)`` tuples where ``other``
+ is a distribution and the edge is labeled with ``label`` (i.e. the version
+ specifier, if such was provided). Also, for more efficient traversal, for
+ every distribution ``x``, a list of predecessors is kept in
+ ``reverse_list[x]``. An edge from distribution ``a`` to
+ distribution ``b`` means that ``a`` depends on ``b``. If any missing
+ dependencies are found, they are stored in ``missing``, which is a
+ dictionary that maps distributions to a list of requirements that were not
+ provided by any other distributions.
+ """
+
+ def __init__(self):
+ self.adjacency_list = {}
+ self.reverse_list = {}
+ self.missing = {}
+
+ def add_distribution(self, distribution):
+ """Add the *distribution* to the graph.
+
+ :type distribution: :class:`distutils2.database.InstalledDistribution`
+ or :class:`distutils2.database.EggInfoDistribution`
+ """
+ self.adjacency_list[distribution] = []
+ self.reverse_list[distribution] = []
+ #self.missing[distribution] = []
+
+ def add_edge(self, x, y, label=None):
+ """Add an edge from distribution *x* to distribution *y* with the given
+ *label*.
+
+ :type x: :class:`distutils2.database.InstalledDistribution` or
+ :class:`distutils2.database.EggInfoDistribution`
+ :type y: :class:`distutils2.database.InstalledDistribution` or
+ :class:`distutils2.database.EggInfoDistribution`
+ :type label: ``str`` or ``None``
+ """
+ self.adjacency_list[x].append((y, label))
+ # multiple edges are allowed, so be careful
+ if x not in self.reverse_list[y]:
+ self.reverse_list[y].append(x)
+
+ def add_missing(self, distribution, requirement):
+ """
+ Add a missing *requirement* for the given *distribution*.
+
+ :type distribution: :class:`distutils2.database.InstalledDistribution`
+ or :class:`distutils2.database.EggInfoDistribution`
+ :type requirement: ``str``
+ """
+ logger.debug('%s missing %r', distribution, requirement)
+ self.missing.setdefault(distribution, []).append(requirement)
+
+ def _repr_dist(self, dist):
+ return '%s %s' % (dist.name, dist.version)
+
+ def repr_node(self, dist, level=1):
+ """Prints only a subgraph"""
+ output = [self._repr_dist(dist)]
+ for other, label in self.adjacency_list[dist]:
+ dist = self._repr_dist(other)
+ if label is not None:
+ dist = '%s [%s]' % (dist, label)
+ output.append(' ' * level + str(dist))
+ suboutput = self.repr_node(other, level + 1)
+ subs = suboutput.split('\n')
+ output.extend(subs[1:])
+ return '\n'.join(output)
+
+ def to_dot(self, f, skip_disconnected=True):
+ """Writes a DOT output for the graph to the provided file *f*.
+
+ If *skip_disconnected* is set to ``True``, then all distributions
+ that are not dependent on any other distribution are skipped.
+
+ :type f: has to support ``file``-like operations
+ :type skip_disconnected: ``bool``
+ """
+ disconnected = []
+
+ f.write("digraph dependencies {\n")
+ for dist, adjs in self.adjacency_list.items():
+ if len(adjs) == 0 and not skip_disconnected:
+ disconnected.append(dist)
+ for other, label in adjs:
+ if not label is None:
+ f.write('"%s" -> "%s" [label="%s"]\n' %
+ (dist.name, other.name, label))
+ else:
+ f.write('"%s" -> "%s"\n' % (dist.name, other.name))
+ if not skip_disconnected and len(disconnected) > 0:
+ f.write('subgraph disconnected {\n')
+ f.write('label = "Disconnected"\n')
+ f.write('bgcolor = red\n')
+
+ for dist in disconnected:
+ f.write('"%s"' % dist.name)
+ f.write('\n')
+ f.write('}\n')
+ f.write('}\n')
+
+ def topological_sort(self):
+ """
+ Perform a topological sort of the graph.
+ :return: A tuple, the first element of which is a topologically sorted
+ list of distributions, and the second element of which is a
+ list of distributions that cannot be sorted because they have
+ circular dependencies and so form a cycle.
+ """
+ result = []
+ # Make a shallow copy of the adjacency list
+ alist = {}
+ for k, v in self.adjacency_list.items():
+ alist[k] = v[:]
+ while True:
+ # See what we can remove in this run
+ to_remove = []
+ for k, v in list(alist.items())[:]:
+ if not v:
+ to_remove.append(k)
+ del alist[k]
+ if not to_remove:
+ # What's left in alist (if anything) is a cycle.
+ break
+ # Remove from the adjacency list of others
+ for k, v in alist.items():
+ alist[k] = [(d, r) for d, r in v if d not in to_remove]
+ logger.debug('Moving to result: %s',
+ ['%s (%s)' % (d.name, d.version) for d in to_remove])
+ result.extend(to_remove)
+ return result, list(alist.keys())
+
+ def __repr__(self):
+ """Representation of the graph"""
+ output = []
+ for dist, adjs in self.adjacency_list.items():
+ output.append(self.repr_node(dist))
+ return '\n'.join(output)
+
+
+def make_graph(dists, scheme='default'):
+ """Makes a dependency graph from the given distributions.
+
+ :parameter dists: a list of distributions
+ :type dists: list of :class:`distutils2.database.InstalledDistribution` and
+ :class:`distutils2.database.EggInfoDistribution` instances
+ :rtype: a :class:`DependencyGraph` instance
+ """
+ scheme = get_scheme(scheme)
+ graph = DependencyGraph()
+ provided = {} # maps names to lists of (version, dist) tuples
+
+ # first, build the graph and find out what's provided
+ for dist in dists:
+ graph.add_distribution(dist)
+
+ for p in dist.provides:
+ name, version = parse_name_and_version(p)
+ logger.debug('Add to provided: %s, %s, %s', name, version, dist)
+ provided.setdefault(name, []).append((version, dist))
+
+ # now make the edges
+ for dist in dists:
+ requires = (dist.run_requires | dist.meta_requires |
+ dist.build_requires | dist.dev_requires)
+ for req in requires:
+ try:
+ matcher = scheme.matcher(req)
+ except UnsupportedVersionError:
+ # XXX compat-mode if cannot read the version
+ logger.warning('could not read version %r - using name only',
+ req)
+ name = req.split()[0]
+ matcher = scheme.matcher(name)
+
+ name = matcher.key # case-insensitive
+
+ matched = False
+ if name in provided:
+ for version, provider in provided[name]:
+ try:
+ match = matcher.match(version)
+ except UnsupportedVersionError:
+ match = False
+
+ if match:
+ graph.add_edge(dist, provider, req)
+ matched = True
+ break
+ if not matched:
+ graph.add_missing(dist, req)
+ return graph
+
+
+def get_dependent_dists(dists, dist):
+ """Recursively generate a list of distributions from *dists* that are
+ dependent on *dist*.
+
+ :param dists: a list of distributions
+ :param dist: a distribution, member of *dists* for which we are interested
+ """
+ if dist not in dists:
+ raise DistlibException('given distribution %r is not a member '
+ 'of the list' % dist.name)
+ graph = make_graph(dists)
+
+ dep = [dist] # dependent distributions
+ todo = graph.reverse_list[dist] # list of nodes we should inspect
+
+ while todo:
+ d = todo.pop()
+ dep.append(d)
+ for succ in graph.reverse_list[d]:
+ if succ not in dep:
+ todo.append(succ)
+
+ dep.pop(0) # remove dist from dep, was there to prevent infinite loops
+ return dep
+
+
+def get_required_dists(dists, dist):
+ """Recursively generate a list of distributions from *dists* that are
+ required by *dist*.
+
+ :param dists: a list of distributions
+ :param dist: a distribution, member of *dists* for which we are interested
+ in finding the dependencies.
+ """
+ if dist not in dists:
+ raise DistlibException('given distribution %r is not a member '
+ 'of the list' % dist.name)
+ graph = make_graph(dists)
+
+ req = set() # required distributions
+ todo = graph.adjacency_list[dist] # list of nodes we should inspect
+ seen = set(t[0] for t in todo) # already added to todo
+
+ while todo:
+ d = todo.pop()[0]
+ req.add(d)
+ pred_list = graph.adjacency_list[d]
+ for pred in pred_list:
+ d = pred[0]
+ if d not in req and d not in seen:
+ seen.add(d)
+ todo.append(pred)
+ return req
+
+
+def make_dist(name, version, **kwargs):
+ """
+ A convenience method for making a dist given just a name and version.
+ """
+ summary = kwargs.pop('summary', 'Placeholder for summary')
+ md = Metadata(**kwargs)
+ md.name = name
+ md.version = version
+ md.summary = summary or 'Placeholder for summary'
+ return Distribution(md)
diff --git a/third_party/python/pip/pip/_vendor/distlib/index.py b/third_party/python/pip/pip/_vendor/distlib/index.py
new file mode 100644
index 0000000000..9b6d129ed6
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distlib/index.py
@@ -0,0 +1,508 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2013 Vinay Sajip.
+# Licensed to the Python Software Foundation under a contributor agreement.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+import hashlib
+import logging
+import os
+import shutil
+import subprocess
+import tempfile
+try:
+ from threading import Thread
+except ImportError: # pragma: no cover
+ from dummy_threading import Thread
+
+from . import DistlibException
+from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
+ urlparse, build_opener, string_types)
+from .util import zip_dir, ServerProxy
+
+logger = logging.getLogger(__name__)
+
+DEFAULT_INDEX = 'https://pypi.org/pypi'
+DEFAULT_REALM = 'pypi'
+
+class PackageIndex(object):
+ """
+ This class represents a package index compatible with PyPI, the Python
+ Package Index.
+ """
+
+ boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
+
+ def __init__(self, url=None):
+ """
+ Initialise an instance.
+
+ :param url: The URL of the index. If not specified, the URL for PyPI is
+ used.
+ """
+ self.url = url or DEFAULT_INDEX
+ self.read_configuration()
+ scheme, netloc, path, params, query, frag = urlparse(self.url)
+ if params or query or frag or scheme not in ('http', 'https'):
+ raise DistlibException('invalid repository: %s' % self.url)
+ self.password_handler = None
+ self.ssl_verifier = None
+ self.gpg = None
+ self.gpg_home = None
+ with open(os.devnull, 'w') as sink:
+ # Use gpg by default rather than gpg2, as gpg2 insists on
+ # prompting for passwords
+ for s in ('gpg', 'gpg2'):
+ try:
+ rc = subprocess.check_call([s, '--version'], stdout=sink,
+ stderr=sink)
+ if rc == 0:
+ self.gpg = s
+ break
+ except OSError:
+ pass
+
+ def _get_pypirc_command(self):
+ """
+ Get the distutils command for interacting with PyPI configurations.
+ :return: the command.
+ """
+ from .util import _get_pypirc_command as cmd
+ return cmd()
+
+ def read_configuration(self):
+ """
+ Read the PyPI access configuration as supported by distutils. This populates
+ ``username``, ``password``, ``realm`` and ``url`` attributes from the
+ configuration.
+ """
+ from .util import _load_pypirc
+ cfg = _load_pypirc(self)
+ self.username = cfg.get('username')
+ self.password = cfg.get('password')
+ self.realm = cfg.get('realm', 'pypi')
+ self.url = cfg.get('repository', self.url)
+
+ def save_configuration(self):
+ """
+ Save the PyPI access configuration. You must have set ``username`` and
+ ``password`` attributes before calling this method.
+ """
+ self.check_credentials()
+ from .util import _store_pypirc
+ _store_pypirc(self)
+
+ def check_credentials(self):
+ """
+ Check that ``username`` and ``password`` have been set, and raise an
+ exception if not.
+ """
+ if self.username is None or self.password is None:
+ raise DistlibException('username and password must be set')
+ pm = HTTPPasswordMgr()
+ _, netloc, _, _, _, _ = urlparse(self.url)
+ pm.add_password(self.realm, netloc, self.username, self.password)
+ self.password_handler = HTTPBasicAuthHandler(pm)
+
+ def register(self, metadata): # pragma: no cover
+ """
+ Register a distribution on PyPI, using the provided metadata.
+
+ :param metadata: A :class:`Metadata` instance defining at least a name
+ and version number for the distribution to be
+ registered.
+ :return: The HTTP response received from PyPI upon submission of the
+ request.
+ """
+ self.check_credentials()
+ metadata.validate()
+ d = metadata.todict()
+ d[':action'] = 'verify'
+ request = self.encode_request(d.items(), [])
+ response = self.send_request(request)
+ d[':action'] = 'submit'
+ request = self.encode_request(d.items(), [])
+ return self.send_request(request)
+
+ def _reader(self, name, stream, outbuf):
+ """
+ Thread runner for reading lines of from a subprocess into a buffer.
+
+ :param name: The logical name of the stream (used for logging only).
+ :param stream: The stream to read from. This will typically a pipe
+ connected to the output stream of a subprocess.
+ :param outbuf: The list to append the read lines to.
+ """
+ while True:
+ s = stream.readline()
+ if not s:
+ break
+ s = s.decode('utf-8').rstrip()
+ outbuf.append(s)
+ logger.debug('%s: %s' % (name, s))
+ stream.close()
+
+ def get_sign_command(self, filename, signer, sign_password, keystore=None): # pragma: no cover
+ """
+ Return a suitable command for signing a file.
+
+ :param filename: The pathname to the file to be signed.
+ :param signer: The identifier of the signer of the file.
+ :param sign_password: The passphrase for the signer's
+ private key used for signing.
+ :param keystore: The path to a directory which contains the keys
+ used in verification. If not specified, the
+ instance's ``gpg_home`` attribute is used instead.
+ :return: The signing command as a list suitable to be
+ passed to :class:`subprocess.Popen`.
+ """
+ cmd = [self.gpg, '--status-fd', '2', '--no-tty']
+ if keystore is None:
+ keystore = self.gpg_home
+ if keystore:
+ cmd.extend(['--homedir', keystore])
+ if sign_password is not None:
+ cmd.extend(['--batch', '--passphrase-fd', '0'])
+ td = tempfile.mkdtemp()
+ sf = os.path.join(td, os.path.basename(filename) + '.asc')
+ cmd.extend(['--detach-sign', '--armor', '--local-user',
+ signer, '--output', sf, filename])
+ logger.debug('invoking: %s', ' '.join(cmd))
+ return cmd, sf
+
+ def run_command(self, cmd, input_data=None):
+ """
+ Run a command in a child process , passing it any input data specified.
+
+ :param cmd: The command to run.
+ :param input_data: If specified, this must be a byte string containing
+ data to be sent to the child process.
+ :return: A tuple consisting of the subprocess' exit code, a list of
+ lines read from the subprocess' ``stdout``, and a list of
+ lines read from the subprocess' ``stderr``.
+ """
+ kwargs = {
+ 'stdout': subprocess.PIPE,
+ 'stderr': subprocess.PIPE,
+ }
+ if input_data is not None:
+ kwargs['stdin'] = subprocess.PIPE
+ stdout = []
+ stderr = []
+ p = subprocess.Popen(cmd, **kwargs)
+ # We don't use communicate() here because we may need to
+ # get clever with interacting with the command
+ t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
+ t1.start()
+ t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
+ t2.start()
+ if input_data is not None:
+ p.stdin.write(input_data)
+ p.stdin.close()
+
+ p.wait()
+ t1.join()
+ t2.join()
+ return p.returncode, stdout, stderr
+
+ def sign_file(self, filename, signer, sign_password, keystore=None): # pragma: no cover
+ """
+ Sign a file.
+
+ :param filename: The pathname to the file to be signed.
+ :param signer: The identifier of the signer of the file.
+ :param sign_password: The passphrase for the signer's
+ private key used for signing.
+ :param keystore: The path to a directory which contains the keys
+ used in signing. If not specified, the instance's
+ ``gpg_home`` attribute is used instead.
+ :return: The absolute pathname of the file where the signature is
+ stored.
+ """
+ cmd, sig_file = self.get_sign_command(filename, signer, sign_password,
+ keystore)
+ rc, stdout, stderr = self.run_command(cmd,
+ sign_password.encode('utf-8'))
+ if rc != 0:
+ raise DistlibException('sign command failed with error '
+ 'code %s' % rc)
+ return sig_file
+
+ def upload_file(self, metadata, filename, signer=None, sign_password=None,
+ filetype='sdist', pyversion='source', keystore=None):
+ """
+ Upload a release file to the index.
+
+ :param metadata: A :class:`Metadata` instance defining at least a name
+ and version number for the file to be uploaded.
+ :param filename: The pathname of the file to be uploaded.
+ :param signer: The identifier of the signer of the file.
+ :param sign_password: The passphrase for the signer's
+ private key used for signing.
+ :param filetype: The type of the file being uploaded. This is the
+ distutils command which produced that file, e.g.
+ ``sdist`` or ``bdist_wheel``.
+ :param pyversion: The version of Python which the release relates
+ to. For code compatible with any Python, this would
+ be ``source``, otherwise it would be e.g. ``3.2``.
+ :param keystore: The path to a directory which contains the keys
+ used in signing. If not specified, the instance's
+ ``gpg_home`` attribute is used instead.
+ :return: The HTTP response received from PyPI upon submission of the
+ request.
+ """
+ self.check_credentials()
+ if not os.path.exists(filename):
+ raise DistlibException('not found: %s' % filename)
+ metadata.validate()
+ d = metadata.todict()
+ sig_file = None
+ if signer:
+ if not self.gpg:
+ logger.warning('no signing program available - not signed')
+ else:
+ sig_file = self.sign_file(filename, signer, sign_password,
+ keystore)
+ with open(filename, 'rb') as f:
+ file_data = f.read()
+ md5_digest = hashlib.md5(file_data).hexdigest()
+ sha256_digest = hashlib.sha256(file_data).hexdigest()
+ d.update({
+ ':action': 'file_upload',
+ 'protocol_version': '1',
+ 'filetype': filetype,
+ 'pyversion': pyversion,
+ 'md5_digest': md5_digest,
+ 'sha256_digest': sha256_digest,
+ })
+ files = [('content', os.path.basename(filename), file_data)]
+ if sig_file:
+ with open(sig_file, 'rb') as f:
+ sig_data = f.read()
+ files.append(('gpg_signature', os.path.basename(sig_file),
+ sig_data))
+ shutil.rmtree(os.path.dirname(sig_file))
+ request = self.encode_request(d.items(), files)
+ return self.send_request(request)
+
+ def upload_documentation(self, metadata, doc_dir): # pragma: no cover
+ """
+ Upload documentation to the index.
+
+ :param metadata: A :class:`Metadata` instance defining at least a name
+ and version number for the documentation to be
+ uploaded.
+ :param doc_dir: The pathname of the directory which contains the
+ documentation. This should be the directory that
+ contains the ``index.html`` for the documentation.
+ :return: The HTTP response received from PyPI upon submission of the
+ request.
+ """
+ self.check_credentials()
+ if not os.path.isdir(doc_dir):
+ raise DistlibException('not a directory: %r' % doc_dir)
+ fn = os.path.join(doc_dir, 'index.html')
+ if not os.path.exists(fn):
+ raise DistlibException('not found: %r' % fn)
+ metadata.validate()
+ name, version = metadata.name, metadata.version
+ zip_data = zip_dir(doc_dir).getvalue()
+ fields = [(':action', 'doc_upload'),
+ ('name', name), ('version', version)]
+ files = [('content', name, zip_data)]
+ request = self.encode_request(fields, files)
+ return self.send_request(request)
+
+ def get_verify_command(self, signature_filename, data_filename,
+ keystore=None):
+ """
+ Return a suitable command for verifying a file.
+
+ :param signature_filename: The pathname to the file containing the
+ signature.
+ :param data_filename: The pathname to the file containing the
+ signed data.
+ :param keystore: The path to a directory which contains the keys
+ used in verification. If not specified, the
+ instance's ``gpg_home`` attribute is used instead.
+ :return: The verifying command as a list suitable to be
+ passed to :class:`subprocess.Popen`.
+ """
+ cmd = [self.gpg, '--status-fd', '2', '--no-tty']
+ if keystore is None:
+ keystore = self.gpg_home
+ if keystore:
+ cmd.extend(['--homedir', keystore])
+ cmd.extend(['--verify', signature_filename, data_filename])
+ logger.debug('invoking: %s', ' '.join(cmd))
+ return cmd
+
+ def verify_signature(self, signature_filename, data_filename,
+ keystore=None):
+ """
+ Verify a signature for a file.
+
+ :param signature_filename: The pathname to the file containing the
+ signature.
+ :param data_filename: The pathname to the file containing the
+ signed data.
+ :param keystore: The path to a directory which contains the keys
+ used in verification. If not specified, the
+ instance's ``gpg_home`` attribute is used instead.
+ :return: True if the signature was verified, else False.
+ """
+ if not self.gpg:
+ raise DistlibException('verification unavailable because gpg '
+ 'unavailable')
+ cmd = self.get_verify_command(signature_filename, data_filename,
+ keystore)
+ rc, stdout, stderr = self.run_command(cmd)
+ if rc not in (0, 1):
+ raise DistlibException('verify command failed with error '
+ 'code %s' % rc)
+ return rc == 0
+
+ def download_file(self, url, destfile, digest=None, reporthook=None):
+ """
+ This is a convenience method for downloading a file from an URL.
+ Normally, this will be a file from the index, though currently
+ no check is made for this (i.e. a file can be downloaded from
+ anywhere).
+
+ The method is just like the :func:`urlretrieve` function in the
+ standard library, except that it allows digest computation to be
+ done during download and checking that the downloaded data
+ matched any expected value.
+
+ :param url: The URL of the file to be downloaded (assumed to be
+ available via an HTTP GET request).
+ :param destfile: The pathname where the downloaded file is to be
+ saved.
+ :param digest: If specified, this must be a (hasher, value)
+ tuple, where hasher is the algorithm used (e.g.
+ ``'md5'``) and ``value`` is the expected value.
+ :param reporthook: The same as for :func:`urlretrieve` in the
+ standard library.
+ """
+ if digest is None:
+ digester = None
+ logger.debug('No digest specified')
+ else:
+ if isinstance(digest, (list, tuple)):
+ hasher, digest = digest
+ else:
+ hasher = 'md5'
+ digester = getattr(hashlib, hasher)()
+ logger.debug('Digest specified: %s' % digest)
+ # The following code is equivalent to urlretrieve.
+ # We need to do it this way so that we can compute the
+ # digest of the file as we go.
+ with open(destfile, 'wb') as dfp:
+ # addinfourl is not a context manager on 2.x
+ # so we have to use try/finally
+ sfp = self.send_request(Request(url))
+ try:
+ headers = sfp.info()
+ blocksize = 8192
+ size = -1
+ read = 0
+ blocknum = 0
+ if "content-length" in headers:
+ size = int(headers["Content-Length"])
+ if reporthook:
+ reporthook(blocknum, blocksize, size)
+ while True:
+ block = sfp.read(blocksize)
+ if not block:
+ break
+ read += len(block)
+ dfp.write(block)
+ if digester:
+ digester.update(block)
+ blocknum += 1
+ if reporthook:
+ reporthook(blocknum, blocksize, size)
+ finally:
+ sfp.close()
+
+ # check that we got the whole file, if we can
+ if size >= 0 and read < size:
+ raise DistlibException(
+ 'retrieval incomplete: got only %d out of %d bytes'
+ % (read, size))
+ # if we have a digest, it must match.
+ if digester:
+ actual = digester.hexdigest()
+ if digest != actual:
+ raise DistlibException('%s digest mismatch for %s: expected '
+ '%s, got %s' % (hasher, destfile,
+ digest, actual))
+ logger.debug('Digest verified: %s', digest)
+
+ def send_request(self, req):
+ """
+ Send a standard library :class:`Request` to PyPI and return its
+ response.
+
+ :param req: The request to send.
+ :return: The HTTP response from PyPI (a standard library HTTPResponse).
+ """
+ handlers = []
+ if self.password_handler:
+ handlers.append(self.password_handler)
+ if self.ssl_verifier:
+ handlers.append(self.ssl_verifier)
+ opener = build_opener(*handlers)
+ return opener.open(req)
+
+ def encode_request(self, fields, files):
+ """
+ Encode fields and files for posting to an HTTP server.
+
+ :param fields: The fields to send as a list of (fieldname, value)
+ tuples.
+ :param files: The files to send as a list of (fieldname, filename,
+ file_bytes) tuple.
+ """
+ # Adapted from packaging, which in turn was adapted from
+ # http://code.activestate.com/recipes/146306
+
+ parts = []
+ boundary = self.boundary
+ for k, values in fields:
+ if not isinstance(values, (list, tuple)):
+ values = [values]
+
+ for v in values:
+ parts.extend((
+ b'--' + boundary,
+ ('Content-Disposition: form-data; name="%s"' %
+ k).encode('utf-8'),
+ b'',
+ v.encode('utf-8')))
+ for key, filename, value in files:
+ parts.extend((
+ b'--' + boundary,
+ ('Content-Disposition: form-data; name="%s"; filename="%s"' %
+ (key, filename)).encode('utf-8'),
+ b'',
+ value))
+
+ parts.extend((b'--' + boundary + b'--', b''))
+
+ body = b'\r\n'.join(parts)
+ ct = b'multipart/form-data; boundary=' + boundary
+ headers = {
+ 'Content-type': ct,
+ 'Content-length': str(len(body))
+ }
+ return Request(self.url, body, headers)
+
+ def search(self, terms, operator=None): # pragma: no cover
+ if isinstance(terms, string_types):
+ terms = {'name': terms}
+ rpc_proxy = ServerProxy(self.url, timeout=3.0)
+ try:
+ return rpc_proxy.search(terms, operator or 'and')
+ finally:
+ rpc_proxy('close')()
diff --git a/third_party/python/pip/pip/_vendor/distlib/locators.py b/third_party/python/pip/pip/_vendor/distlib/locators.py
new file mode 100644
index 0000000000..966ebc0e37
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distlib/locators.py
@@ -0,0 +1,1300 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2012-2015 Vinay Sajip.
+# Licensed to the Python Software Foundation under a contributor agreement.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+
+import gzip
+from io import BytesIO
+import json
+import logging
+import os
+import posixpath
+import re
+try:
+ import threading
+except ImportError: # pragma: no cover
+ import dummy_threading as threading
+import zlib
+
+from . import DistlibException
+from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
+ queue, quote, unescape, build_opener,
+ HTTPRedirectHandler as BaseRedirectHandler, text_type,
+ Request, HTTPError, URLError)
+from .database import Distribution, DistributionPath, make_dist
+from .metadata import Metadata, MetadataInvalidError
+from .util import (cached_property, ensure_slash, split_filename, get_project_data,
+ parse_requirement, parse_name_and_version, ServerProxy,
+ normalize_name)
+from .version import get_scheme, UnsupportedVersionError
+from .wheel import Wheel, is_compatible
+
+logger = logging.getLogger(__name__)
+
+HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)')
+CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
+HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
+DEFAULT_INDEX = 'https://pypi.org/pypi'
+
+def get_all_distribution_names(url=None):
+ """
+ Return all distribution names known by an index.
+ :param url: The URL of the index.
+ :return: A list of all known distribution names.
+ """
+ if url is None:
+ url = DEFAULT_INDEX
+ client = ServerProxy(url, timeout=3.0)
+ try:
+ return client.list_packages()
+ finally:
+ client('close')()
+
+class RedirectHandler(BaseRedirectHandler):
+ """
+ A class to work around a bug in some Python 3.2.x releases.
+ """
+ # There's a bug in the base version for some 3.2.x
+ # (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
+ # returns e.g. /abc, it bails because it says the scheme ''
+ # is bogus, when actually it should use the request's
+ # URL for the scheme. See Python issue #13696.
+ def http_error_302(self, req, fp, code, msg, headers):
+ # Some servers (incorrectly) return multiple Location headers
+ # (so probably same goes for URI). Use first header.
+ newurl = None
+ for key in ('location', 'uri'):
+ if key in headers:
+ newurl = headers[key]
+ break
+ if newurl is None: # pragma: no cover
+ return
+ urlparts = urlparse(newurl)
+ if urlparts.scheme == '':
+ newurl = urljoin(req.get_full_url(), newurl)
+ if hasattr(headers, 'replace_header'):
+ headers.replace_header(key, newurl)
+ else:
+ headers[key] = newurl
+ return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
+ headers)
+
+ http_error_301 = http_error_303 = http_error_307 = http_error_302
+
+class Locator(object):
+ """
+ A base class for locators - things that locate distributions.
+ """
+ source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
+ binary_extensions = ('.egg', '.exe', '.whl')
+ excluded_extensions = ('.pdf',)
+
+ # A list of tags indicating which wheels you want to match. The default
+ # value of None matches against the tags compatible with the running
+ # Python. If you want to match other values, set wheel_tags on a locator
+ # instance to a list of tuples (pyver, abi, arch) which you want to match.
+ wheel_tags = None
+
+ downloadable_extensions = source_extensions + ('.whl',)
+
+ def __init__(self, scheme='default'):
+ """
+ Initialise an instance.
+ :param scheme: Because locators look for most recent versions, they
+ need to know the version scheme to use. This specifies
+ the current PEP-recommended scheme - use ``'legacy'``
+ if you need to support existing distributions on PyPI.
+ """
+ self._cache = {}
+ self.scheme = scheme
+ # Because of bugs in some of the handlers on some of the platforms,
+ # we use our own opener rather than just using urlopen.
+ self.opener = build_opener(RedirectHandler())
+ # If get_project() is called from locate(), the matcher instance
+ # is set from the requirement passed to locate(). See issue #18 for
+ # why this can be useful to know.
+ self.matcher = None
+ self.errors = queue.Queue()
+
+ def get_errors(self):
+ """
+ Return any errors which have occurred.
+ """
+ result = []
+ while not self.errors.empty(): # pragma: no cover
+ try:
+ e = self.errors.get(False)
+ result.append(e)
+ except self.errors.Empty:
+ continue
+ self.errors.task_done()
+ return result
+
+ def clear_errors(self):
+ """
+ Clear any errors which may have been logged.
+ """
+ # Just get the errors and throw them away
+ self.get_errors()
+
+ def clear_cache(self):
+ self._cache.clear()
+
+ def _get_scheme(self):
+ return self._scheme
+
+ def _set_scheme(self, value):
+ self._scheme = value
+
+ scheme = property(_get_scheme, _set_scheme)
+
+ def _get_project(self, name):
+ """
+ For a given project, get a dictionary mapping available versions to Distribution
+ instances.
+
+ This should be implemented in subclasses.
+
+ If called from a locate() request, self.matcher will be set to a
+ matcher for the requirement to satisfy, otherwise it will be None.
+ """
+ raise NotImplementedError('Please implement in the subclass')
+
+ def get_distribution_names(self):
+ """
+ Return all the distribution names known to this locator.
+ """
+ raise NotImplementedError('Please implement in the subclass')
+
+ def get_project(self, name):
+ """
+ For a given project, get a dictionary mapping available versions to Distribution
+ instances.
+
+ This calls _get_project to do all the work, and just implements a caching layer on top.
+ """
+ if self._cache is None: # pragma: no cover
+ result = self._get_project(name)
+ elif name in self._cache:
+ result = self._cache[name]
+ else:
+ self.clear_errors()
+ result = self._get_project(name)
+ self._cache[name] = result
+ return result
+
+ def score_url(self, url):
+ """
+ Give an url a score which can be used to choose preferred URLs
+ for a given project release.
+ """
+ t = urlparse(url)
+ basename = posixpath.basename(t.path)
+ compatible = True
+ is_wheel = basename.endswith('.whl')
+ is_downloadable = basename.endswith(self.downloadable_extensions)
+ if is_wheel:
+ compatible = is_compatible(Wheel(basename), self.wheel_tags)
+ return (t.scheme == 'https', 'pypi.org' in t.netloc,
+ is_downloadable, is_wheel, compatible, basename)
+
+ def prefer_url(self, url1, url2):
+ """
+ Choose one of two URLs where both are candidates for distribution
+ archives for the same version of a distribution (for example,
+ .tar.gz vs. zip).
+
+ The current implementation favours https:// URLs over http://, archives
+ from PyPI over those from other locations, wheel compatibility (if a
+ wheel) and then the archive name.
+ """
+ result = url2
+ if url1:
+ s1 = self.score_url(url1)
+ s2 = self.score_url(url2)
+ if s1 > s2:
+ result = url1
+ if result != url2:
+ logger.debug('Not replacing %r with %r', url1, url2)
+ else:
+ logger.debug('Replacing %r with %r', url1, url2)
+ return result
+
+ def split_filename(self, filename, project_name):
+ """
+ Attempt to split a filename in project name, version and Python version.
+ """
+ return split_filename(filename, project_name)
+
+ def convert_url_to_download_info(self, url, project_name):
+ """
+ See if a URL is a candidate for a download URL for a project (the URL
+ has typically been scraped from an HTML page).
+
+ If it is, a dictionary is returned with keys "name", "version",
+ "filename" and "url"; otherwise, None is returned.
+ """
+ def same_project(name1, name2):
+ return normalize_name(name1) == normalize_name(name2)
+
+ result = None
+ scheme, netloc, path, params, query, frag = urlparse(url)
+ if frag.lower().startswith('egg='): # pragma: no cover
+ logger.debug('%s: version hint in fragment: %r',
+ project_name, frag)
+ m = HASHER_HASH.match(frag)
+ if m:
+ algo, digest = m.groups()
+ else:
+ algo, digest = None, None
+ origpath = path
+ if path and path[-1] == '/': # pragma: no cover
+ path = path[:-1]
+ if path.endswith('.whl'):
+ try:
+ wheel = Wheel(path)
+ if not is_compatible(wheel, self.wheel_tags):
+ logger.debug('Wheel not compatible: %s', path)
+ else:
+ if project_name is None:
+ include = True
+ else:
+ include = same_project(wheel.name, project_name)
+ if include:
+ result = {
+ 'name': wheel.name,
+ 'version': wheel.version,
+ 'filename': wheel.filename,
+ 'url': urlunparse((scheme, netloc, origpath,
+ params, query, '')),
+ 'python-version': ', '.join(
+ ['.'.join(list(v[2:])) for v in wheel.pyver]),
+ }
+ except Exception as e: # pragma: no cover
+ logger.warning('invalid path for wheel: %s', path)
+ elif not path.endswith(self.downloadable_extensions): # pragma: no cover
+ logger.debug('Not downloadable: %s', path)
+ else: # downloadable extension
+ path = filename = posixpath.basename(path)
+ for ext in self.downloadable_extensions:
+ if path.endswith(ext):
+ path = path[:-len(ext)]
+ t = self.split_filename(path, project_name)
+ if not t: # pragma: no cover
+ logger.debug('No match for project/version: %s', path)
+ else:
+ name, version, pyver = t
+ if not project_name or same_project(project_name, name):
+ result = {
+ 'name': name,
+ 'version': version,
+ 'filename': filename,
+ 'url': urlunparse((scheme, netloc, origpath,
+ params, query, '')),
+ #'packagetype': 'sdist',
+ }
+ if pyver: # pragma: no cover
+ result['python-version'] = pyver
+ break
+ if result and algo:
+ result['%s_digest' % algo] = digest
+ return result
+
+ def _get_digest(self, info):
+ """
+ Get a digest from a dictionary by looking at a "digests" dictionary
+ or keys of the form 'algo_digest'.
+
+ Returns a 2-tuple (algo, digest) if found, else None. Currently
+ looks only for SHA256, then MD5.
+ """
+ result = None
+ if 'digests' in info:
+ digests = info['digests']
+ for algo in ('sha256', 'md5'):
+ if algo in digests:
+ result = (algo, digests[algo])
+ break
+ if not result:
+ for algo in ('sha256', 'md5'):
+ key = '%s_digest' % algo
+ if key in info:
+ result = (algo, info[key])
+ break
+ return result
+
+ def _update_version_data(self, result, info):
+ """
+ Update a result dictionary (the final result from _get_project) with a
+ dictionary for a specific version, which typically holds information
+ gleaned from a filename or URL for an archive for the distribution.
+ """
+ name = info.pop('name')
+ version = info.pop('version')
+ if version in result:
+ dist = result[version]
+ md = dist.metadata
+ else:
+ dist = make_dist(name, version, scheme=self.scheme)
+ md = dist.metadata
+ dist.digest = digest = self._get_digest(info)
+ url = info['url']
+ result['digests'][url] = digest
+ if md.source_url != info['url']:
+ md.source_url = self.prefer_url(md.source_url, url)
+ result['urls'].setdefault(version, set()).add(url)
+ dist.locator = self
+ result[version] = dist
+
+ def locate(self, requirement, prereleases=False):
+ """
+ Find the most recent distribution which matches the given
+ requirement.
+
+ :param requirement: A requirement of the form 'foo (1.0)' or perhaps
+ 'foo (>= 1.0, < 2.0, != 1.3)'
+ :param prereleases: If ``True``, allow pre-release versions
+ to be located. Otherwise, pre-release versions
+ are not returned.
+ :return: A :class:`Distribution` instance, or ``None`` if no such
+ distribution could be located.
+ """
+ result = None
+ r = parse_requirement(requirement)
+ if r is None: # pragma: no cover
+ raise DistlibException('Not a valid requirement: %r' % requirement)
+ scheme = get_scheme(self.scheme)
+ self.matcher = matcher = scheme.matcher(r.requirement)
+ logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
+ versions = self.get_project(r.name)
+ if len(versions) > 2: # urls and digests keys are present
+ # sometimes, versions are invalid
+ slist = []
+ vcls = matcher.version_class
+ for k in versions:
+ if k in ('urls', 'digests'):
+ continue
+ try:
+ if not matcher.match(k):
+ pass # logger.debug('%s did not match %r', matcher, k)
+ else:
+ if prereleases or not vcls(k).is_prerelease:
+ slist.append(k)
+ # else:
+ # logger.debug('skipping pre-release '
+ # 'version %s of %s', k, matcher.name)
+ except Exception: # pragma: no cover
+ logger.warning('error matching %s with %r', matcher, k)
+ pass # slist.append(k)
+ if len(slist) > 1:
+ slist = sorted(slist, key=scheme.key)
+ if slist:
+ logger.debug('sorted list: %s', slist)
+ version = slist[-1]
+ result = versions[version]
+ if result:
+ if r.extras:
+ result.extras = r.extras
+ result.download_urls = versions.get('urls', {}).get(version, set())
+ d = {}
+ sd = versions.get('digests', {})
+ for url in result.download_urls:
+ if url in sd: # pragma: no cover
+ d[url] = sd[url]
+ result.digests = d
+ self.matcher = None
+ return result
+
+
+class PyPIRPCLocator(Locator):
+ """
+ This locator uses XML-RPC to locate distributions. It therefore
+ cannot be used with simple mirrors (that only mirror file content).
+ """
+ def __init__(self, url, **kwargs):
+ """
+ Initialise an instance.
+
+ :param url: The URL to use for XML-RPC.
+ :param kwargs: Passed to the superclass constructor.
+ """
+ super(PyPIRPCLocator, self).__init__(**kwargs)
+ self.base_url = url
+ self.client = ServerProxy(url, timeout=3.0)
+
+ def get_distribution_names(self):
+ """
+ Return all the distribution names known to this locator.
+ """
+ return set(self.client.list_packages())
+
+ def _get_project(self, name):
+ result = {'urls': {}, 'digests': {}}
+ versions = self.client.package_releases(name, True)
+ for v in versions:
+ urls = self.client.release_urls(name, v)
+ data = self.client.release_data(name, v)
+ metadata = Metadata(scheme=self.scheme)
+ metadata.name = data['name']
+ metadata.version = data['version']
+ metadata.license = data.get('license')
+ metadata.keywords = data.get('keywords', [])
+ metadata.summary = data.get('summary')
+ dist = Distribution(metadata)
+ if urls:
+ info = urls[0]
+ metadata.source_url = info['url']
+ dist.digest = self._get_digest(info)
+ dist.locator = self
+ result[v] = dist
+ for info in urls:
+ url = info['url']
+ digest = self._get_digest(info)
+ result['urls'].setdefault(v, set()).add(url)
+ result['digests'][url] = digest
+ return result
+
+class PyPIJSONLocator(Locator):
+ """
+ This locator uses PyPI's JSON interface. It's very limited in functionality
+ and probably not worth using.
+ """
+ def __init__(self, url, **kwargs):
+ super(PyPIJSONLocator, self).__init__(**kwargs)
+ self.base_url = ensure_slash(url)
+
+ def get_distribution_names(self):
+ """
+ Return all the distribution names known to this locator.
+ """
+ raise NotImplementedError('Not available from this locator')
+
+ def _get_project(self, name):
+ result = {'urls': {}, 'digests': {}}
+ url = urljoin(self.base_url, '%s/json' % quote(name))
+ try:
+ resp = self.opener.open(url)
+ data = resp.read().decode() # for now
+ d = json.loads(data)
+ md = Metadata(scheme=self.scheme)
+ data = d['info']
+ md.name = data['name']
+ md.version = data['version']
+ md.license = data.get('license')
+ md.keywords = data.get('keywords', [])
+ md.summary = data.get('summary')
+ dist = Distribution(md)
+ dist.locator = self
+ urls = d['urls']
+ result[md.version] = dist
+ for info in d['urls']:
+ url = info['url']
+ dist.download_urls.add(url)
+ dist.digests[url] = self._get_digest(info)
+ result['urls'].setdefault(md.version, set()).add(url)
+ result['digests'][url] = self._get_digest(info)
+ # Now get other releases
+ for version, infos in d['releases'].items():
+ if version == md.version:
+ continue # already done
+ omd = Metadata(scheme=self.scheme)
+ omd.name = md.name
+ omd.version = version
+ odist = Distribution(omd)
+ odist.locator = self
+ result[version] = odist
+ for info in infos:
+ url = info['url']
+ odist.download_urls.add(url)
+ odist.digests[url] = self._get_digest(info)
+ result['urls'].setdefault(version, set()).add(url)
+ result['digests'][url] = self._get_digest(info)
+# for info in urls:
+# md.source_url = info['url']
+# dist.digest = self._get_digest(info)
+# dist.locator = self
+# for info in urls:
+# url = info['url']
+# result['urls'].setdefault(md.version, set()).add(url)
+# result['digests'][url] = self._get_digest(info)
+ except Exception as e:
+ self.errors.put(text_type(e))
+ logger.exception('JSON fetch failed: %s', e)
+ return result
+
+
+class Page(object):
+ """
+ This class represents a scraped HTML page.
+ """
+ # The following slightly hairy-looking regex just looks for the contents of
+ # an anchor link, which has an attribute "href" either immediately preceded
+ # or immediately followed by a "rel" attribute. The attribute values can be
+ # declared with double quotes, single quotes or no quotes - which leads to
+ # the length of the expression.
+ _href = re.compile("""
+(rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)?
+href\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*))
+(\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))?
+""", re.I | re.S | re.X)
+ _base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
+
+ def __init__(self, data, url):
+ """
+ Initialise an instance with the Unicode page contents and the URL they
+ came from.
+ """
+ self.data = data
+ self.base_url = self.url = url
+ m = self._base.search(self.data)
+ if m:
+ self.base_url = m.group(1)
+
+ _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
+
+ @cached_property
+ def links(self):
+ """
+ Return the URLs of all the links on a page together with information
+ about their "rel" attribute, for determining which ones to treat as
+ downloads and which ones to queue for further scraping.
+ """
+ def clean(url):
+ "Tidy up an URL."
+ scheme, netloc, path, params, query, frag = urlparse(url)
+ return urlunparse((scheme, netloc, quote(path),
+ params, query, frag))
+
+ result = set()
+ for match in self._href.finditer(self.data):
+ d = match.groupdict('')
+ rel = (d['rel1'] or d['rel2'] or d['rel3'] or
+ d['rel4'] or d['rel5'] or d['rel6'])
+ url = d['url1'] or d['url2'] or d['url3']
+ url = urljoin(self.base_url, url)
+ url = unescape(url)
+ url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
+ result.add((url, rel))
+ # We sort the result, hoping to bring the most recent versions
+ # to the front
+ result = sorted(result, key=lambda t: t[0], reverse=True)
+ return result
+
+
+class SimpleScrapingLocator(Locator):
+ """
+ A locator which scrapes HTML pages to locate downloads for a distribution.
+ This runs multiple threads to do the I/O; performance is at least as good
+ as pip's PackageFinder, which works in an analogous fashion.
+ """
+
+ # These are used to deal with various Content-Encoding schemes.
+ decoders = {
+ 'deflate': zlib.decompress,
+ 'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(b)).read(),
+ 'none': lambda b: b,
+ }
+
+ def __init__(self, url, timeout=None, num_workers=10, **kwargs):
+ """
+ Initialise an instance.
+ :param url: The root URL to use for scraping.
+ :param timeout: The timeout, in seconds, to be applied to requests.
+ This defaults to ``None`` (no timeout specified).
+ :param num_workers: The number of worker threads you want to do I/O,
+ This defaults to 10.
+ :param kwargs: Passed to the superclass.
+ """
+ super(SimpleScrapingLocator, self).__init__(**kwargs)
+ self.base_url = ensure_slash(url)
+ self.timeout = timeout
+ self._page_cache = {}
+ self._seen = set()
+ self._to_fetch = queue.Queue()
+ self._bad_hosts = set()
+ self.skip_externals = False
+ self.num_workers = num_workers
+ self._lock = threading.RLock()
+ # See issue #45: we need to be resilient when the locator is used
+ # in a thread, e.g. with concurrent.futures. We can't use self._lock
+ # as it is for coordinating our internal threads - the ones created
+ # in _prepare_threads.
+ self._gplock = threading.RLock()
+ self.platform_check = False # See issue #112
+
+ def _prepare_threads(self):
+ """
+ Threads are created only when get_project is called, and terminate
+ before it returns. They are there primarily to parallelise I/O (i.e.
+ fetching web pages).
+ """
+ self._threads = []
+ for i in range(self.num_workers):
+ t = threading.Thread(target=self._fetch)
+ t.daemon = True
+ t.start()
+ self._threads.append(t)
+
+ def _wait_threads(self):
+ """
+ Tell all the threads to terminate (by sending a sentinel value) and
+ wait for them to do so.
+ """
+ # Note that you need two loops, since you can't say which
+ # thread will get each sentinel
+ for t in self._threads:
+ self._to_fetch.put(None) # sentinel
+ for t in self._threads:
+ t.join()
+ self._threads = []
+
+ def _get_project(self, name):
+ result = {'urls': {}, 'digests': {}}
+ with self._gplock:
+ self.result = result
+ self.project_name = name
+ url = urljoin(self.base_url, '%s/' % quote(name))
+ self._seen.clear()
+ self._page_cache.clear()
+ self._prepare_threads()
+ try:
+ logger.debug('Queueing %s', url)
+ self._to_fetch.put(url)
+ self._to_fetch.join()
+ finally:
+ self._wait_threads()
+ del self.result
+ return result
+
+ platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|'
+ r'win(32|_amd64)|macosx_?\d+)\b', re.I)
+
+ def _is_platform_dependent(self, url):
+ """
+ Does an URL refer to a platform-specific download?
+ """
+ return self.platform_dependent.search(url)
+
+ def _process_download(self, url):
+ """
+ See if an URL is a suitable download for a project.
+
+ If it is, register information in the result dictionary (for
+ _get_project) about the specific version it's for.
+
+ Note that the return value isn't actually used other than as a boolean
+ value.
+ """
+ if self.platform_check and self._is_platform_dependent(url):
+ info = None
+ else:
+ info = self.convert_url_to_download_info(url, self.project_name)
+ logger.debug('process_download: %s -> %s', url, info)
+ if info:
+ with self._lock: # needed because self.result is shared
+ self._update_version_data(self.result, info)
+ return info
+
+ def _should_queue(self, link, referrer, rel):
+ """
+ Determine whether a link URL from a referring page and with a
+ particular "rel" attribute should be queued for scraping.
+ """
+ scheme, netloc, path, _, _, _ = urlparse(link)
+ if path.endswith(self.source_extensions + self.binary_extensions +
+ self.excluded_extensions):
+ result = False
+ elif self.skip_externals and not link.startswith(self.base_url):
+ result = False
+ elif not referrer.startswith(self.base_url):
+ result = False
+ elif rel not in ('homepage', 'download'):
+ result = False
+ elif scheme not in ('http', 'https', 'ftp'):
+ result = False
+ elif self._is_platform_dependent(link):
+ result = False
+ else:
+ host = netloc.split(':', 1)[0]
+ if host.lower() == 'localhost':
+ result = False
+ else:
+ result = True
+ logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
+ referrer, result)
+ return result
+
+ def _fetch(self):
+ """
+ Get a URL to fetch from the work queue, get the HTML page, examine its
+ links for download candidates and candidates for further scraping.
+
+ This is a handy method to run in a thread.
+ """
+ while True:
+ url = self._to_fetch.get()
+ try:
+ if url:
+ page = self.get_page(url)
+ if page is None: # e.g. after an error
+ continue
+ for link, rel in page.links:
+ if link not in self._seen:
+ try:
+ self._seen.add(link)
+ if (not self._process_download(link) and
+ self._should_queue(link, url, rel)):
+ logger.debug('Queueing %s from %s', link, url)
+ self._to_fetch.put(link)
+ except MetadataInvalidError: # e.g. invalid versions
+ pass
+ except Exception as e: # pragma: no cover
+ self.errors.put(text_type(e))
+ finally:
+ # always do this, to avoid hangs :-)
+ self._to_fetch.task_done()
+ if not url:
+ #logger.debug('Sentinel seen, quitting.')
+ break
+
+ def get_page(self, url):
+ """
+ Get the HTML for an URL, possibly from an in-memory cache.
+
+ XXX TODO Note: this cache is never actually cleared. It's assumed that
+ the data won't get stale over the lifetime of a locator instance (not
+ necessarily true for the default_locator).
+ """
+ # http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
+ scheme, netloc, path, _, _, _ = urlparse(url)
+ if scheme == 'file' and os.path.isdir(url2pathname(path)):
+ url = urljoin(ensure_slash(url), 'index.html')
+
+ if url in self._page_cache:
+ result = self._page_cache[url]
+ logger.debug('Returning %s from cache: %s', url, result)
+ else:
+ host = netloc.split(':', 1)[0]
+ result = None
+ if host in self._bad_hosts:
+ logger.debug('Skipping %s due to bad host %s', url, host)
+ else:
+ req = Request(url, headers={'Accept-encoding': 'identity'})
+ try:
+ logger.debug('Fetching %s', url)
+ resp = self.opener.open(req, timeout=self.timeout)
+ logger.debug('Fetched %s', url)
+ headers = resp.info()
+ content_type = headers.get('Content-Type', '')
+ if HTML_CONTENT_TYPE.match(content_type):
+ final_url = resp.geturl()
+ data = resp.read()
+ encoding = headers.get('Content-Encoding')
+ if encoding:
+ decoder = self.decoders[encoding] # fail if not found
+ data = decoder(data)
+ encoding = 'utf-8'
+ m = CHARSET.search(content_type)
+ if m:
+ encoding = m.group(1)
+ try:
+ data = data.decode(encoding)
+ except UnicodeError: # pragma: no cover
+ data = data.decode('latin-1') # fallback
+ result = Page(data, final_url)
+ self._page_cache[final_url] = result
+ except HTTPError as e:
+ if e.code != 404:
+ logger.exception('Fetch failed: %s: %s', url, e)
+ except URLError as e: # pragma: no cover
+ logger.exception('Fetch failed: %s: %s', url, e)
+ with self._lock:
+ self._bad_hosts.add(host)
+ except Exception as e: # pragma: no cover
+ logger.exception('Fetch failed: %s: %s', url, e)
+ finally:
+ self._page_cache[url] = result # even if None (failure)
+ return result
+
+ _distname_re = re.compile('<a href=[^>]*>([^<]+)<')
+
+ def get_distribution_names(self):
+ """
+ Return all the distribution names known to this locator.
+ """
+ result = set()
+ page = self.get_page(self.base_url)
+ if not page:
+ raise DistlibException('Unable to get %s' % self.base_url)
+ for match in self._distname_re.finditer(page.data):
+ result.add(match.group(1))
+ return result
+
+class DirectoryLocator(Locator):
+ """
+ This class locates distributions in a directory tree.
+ """
+
+ def __init__(self, path, **kwargs):
+ """
+ Initialise an instance.
+ :param path: The root of the directory tree to search.
+ :param kwargs: Passed to the superclass constructor,
+ except for:
+ * recursive - if True (the default), subdirectories are
+ recursed into. If False, only the top-level directory
+ is searched,
+ """
+ self.recursive = kwargs.pop('recursive', True)
+ super(DirectoryLocator, self).__init__(**kwargs)
+ path = os.path.abspath(path)
+ if not os.path.isdir(path): # pragma: no cover
+ raise DistlibException('Not a directory: %r' % path)
+ self.base_dir = path
+
+ def should_include(self, filename, parent):
+ """
+ Should a filename be considered as a candidate for a distribution
+ archive? As well as the filename, the directory which contains it
+ is provided, though not used by the current implementation.
+ """
+ return filename.endswith(self.downloadable_extensions)
+
+ def _get_project(self, name):
+ result = {'urls': {}, 'digests': {}}
+ for root, dirs, files in os.walk(self.base_dir):
+ for fn in files:
+ if self.should_include(fn, root):
+ fn = os.path.join(root, fn)
+ url = urlunparse(('file', '',
+ pathname2url(os.path.abspath(fn)),
+ '', '', ''))
+ info = self.convert_url_to_download_info(url, name)
+ if info:
+ self._update_version_data(result, info)
+ if not self.recursive:
+ break
+ return result
+
+ def get_distribution_names(self):
+ """
+ Return all the distribution names known to this locator.
+ """
+ result = set()
+ for root, dirs, files in os.walk(self.base_dir):
+ for fn in files:
+ if self.should_include(fn, root):
+ fn = os.path.join(root, fn)
+ url = urlunparse(('file', '',
+ pathname2url(os.path.abspath(fn)),
+ '', '', ''))
+ info = self.convert_url_to_download_info(url, None)
+ if info:
+ result.add(info['name'])
+ if not self.recursive:
+ break
+ return result
+
+class JSONLocator(Locator):
+ """
+ This locator uses special extended metadata (not available on PyPI) and is
+ the basis of performant dependency resolution in distlib. Other locators
+ require archive downloads before dependencies can be determined! As you
+ might imagine, that can be slow.
+ """
+ def get_distribution_names(self):
+ """
+ Return all the distribution names known to this locator.
+ """
+ raise NotImplementedError('Not available from this locator')
+
+ def _get_project(self, name):
+ result = {'urls': {}, 'digests': {}}
+ data = get_project_data(name)
+ if data:
+ for info in data.get('files', []):
+ if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
+ continue
+ # We don't store summary in project metadata as it makes
+ # the data bigger for no benefit during dependency
+ # resolution
+ dist = make_dist(data['name'], info['version'],
+ summary=data.get('summary',
+ 'Placeholder for summary'),
+ scheme=self.scheme)
+ md = dist.metadata
+ md.source_url = info['url']
+ # TODO SHA256 digest
+ if 'digest' in info and info['digest']:
+ dist.digest = ('md5', info['digest'])
+ md.dependencies = info.get('requirements', {})
+ dist.exports = info.get('exports', {})
+ result[dist.version] = dist
+ result['urls'].setdefault(dist.version, set()).add(info['url'])
+ return result
+
+class DistPathLocator(Locator):
+ """
+ This locator finds installed distributions in a path. It can be useful for
+ adding to an :class:`AggregatingLocator`.
+ """
+ def __init__(self, distpath, **kwargs):
+ """
+ Initialise an instance.
+
+ :param distpath: A :class:`DistributionPath` instance to search.
+ """
+ super(DistPathLocator, self).__init__(**kwargs)
+ assert isinstance(distpath, DistributionPath)
+ self.distpath = distpath
+
+ def _get_project(self, name):
+ dist = self.distpath.get_distribution(name)
+ if dist is None:
+ result = {'urls': {}, 'digests': {}}
+ else:
+ result = {
+ dist.version: dist,
+ 'urls': {dist.version: set([dist.source_url])},
+ 'digests': {dist.version: set([None])}
+ }
+ return result
+
+
+class AggregatingLocator(Locator):
+ """
+ This class allows you to chain and/or merge a list of locators.
+ """
+ def __init__(self, *locators, **kwargs):
+ """
+ Initialise an instance.
+
+ :param locators: The list of locators to search.
+ :param kwargs: Passed to the superclass constructor,
+ except for:
+ * merge - if False (the default), the first successful
+ search from any of the locators is returned. If True,
+ the results from all locators are merged (this can be
+ slow).
+ """
+ self.merge = kwargs.pop('merge', False)
+ self.locators = locators
+ super(AggregatingLocator, self).__init__(**kwargs)
+
+ def clear_cache(self):
+ super(AggregatingLocator, self).clear_cache()
+ for locator in self.locators:
+ locator.clear_cache()
+
+ def _set_scheme(self, value):
+ self._scheme = value
+ for locator in self.locators:
+ locator.scheme = value
+
+ scheme = property(Locator.scheme.fget, _set_scheme)
+
+ def _get_project(self, name):
+ result = {}
+ for locator in self.locators:
+ d = locator.get_project(name)
+ if d:
+ if self.merge:
+ files = result.get('urls', {})
+ digests = result.get('digests', {})
+ # next line could overwrite result['urls'], result['digests']
+ result.update(d)
+ df = result.get('urls')
+ if files and df:
+ for k, v in files.items():
+ if k in df:
+ df[k] |= v
+ else:
+ df[k] = v
+ dd = result.get('digests')
+ if digests and dd:
+ dd.update(digests)
+ else:
+ # See issue #18. If any dists are found and we're looking
+ # for specific constraints, we only return something if
+ # a match is found. For example, if a DirectoryLocator
+ # returns just foo (1.0) while we're looking for
+ # foo (>= 2.0), we'll pretend there was nothing there so
+ # that subsequent locators can be queried. Otherwise we
+ # would just return foo (1.0) which would then lead to a
+ # failure to find foo (>= 2.0), because other locators
+ # weren't searched. Note that this only matters when
+ # merge=False.
+ if self.matcher is None:
+ found = True
+ else:
+ found = False
+ for k in d:
+ if self.matcher.match(k):
+ found = True
+ break
+ if found:
+ result = d
+ break
+ return result
+
+ def get_distribution_names(self):
+ """
+ Return all the distribution names known to this locator.
+ """
+ result = set()
+ for locator in self.locators:
+ try:
+ result |= locator.get_distribution_names()
+ except NotImplementedError:
+ pass
+ return result
+
+
+# We use a legacy scheme simply because most of the dists on PyPI use legacy
+# versions which don't conform to PEP 440.
+default_locator = AggregatingLocator(
+ # JSONLocator(), # don't use as PEP 426 is withdrawn
+ SimpleScrapingLocator('https://pypi.org/simple/',
+ timeout=3.0),
+ scheme='legacy')
+
+locate = default_locator.locate
+
+
+class DependencyFinder(object):
+ """
+ Locate dependencies for distributions.
+ """
+
+ def __init__(self, locator=None):
+ """
+ Initialise an instance, using the specified locator
+ to locate distributions.
+ """
+ self.locator = locator or default_locator
+ self.scheme = get_scheme(self.locator.scheme)
+
+ def add_distribution(self, dist):
+ """
+ Add a distribution to the finder. This will update internal information
+ about who provides what.
+ :param dist: The distribution to add.
+ """
+ logger.debug('adding distribution %s', dist)
+ name = dist.key
+ self.dists_by_name[name] = dist
+ self.dists[(name, dist.version)] = dist
+ for p in dist.provides:
+ name, version = parse_name_and_version(p)
+ logger.debug('Add to provided: %s, %s, %s', name, version, dist)
+ self.provided.setdefault(name, set()).add((version, dist))
+
+ def remove_distribution(self, dist):
+ """
+ Remove a distribution from the finder. This will update internal
+ information about who provides what.
+ :param dist: The distribution to remove.
+ """
+ logger.debug('removing distribution %s', dist)
+ name = dist.key
+ del self.dists_by_name[name]
+ del self.dists[(name, dist.version)]
+ for p in dist.provides:
+ name, version = parse_name_and_version(p)
+ logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
+ s = self.provided[name]
+ s.remove((version, dist))
+ if not s:
+ del self.provided[name]
+
+ def get_matcher(self, reqt):
+ """
+ Get a version matcher for a requirement.
+ :param reqt: The requirement
+ :type reqt: str
+ :return: A version matcher (an instance of
+ :class:`distlib.version.Matcher`).
+ """
+ try:
+ matcher = self.scheme.matcher(reqt)
+ except UnsupportedVersionError: # pragma: no cover
+ # XXX compat-mode if cannot read the version
+ name = reqt.split()[0]
+ matcher = self.scheme.matcher(name)
+ return matcher
+
+ def find_providers(self, reqt):
+ """
+ Find the distributions which can fulfill a requirement.
+
+ :param reqt: The requirement.
+ :type reqt: str
+ :return: A set of distribution which can fulfill the requirement.
+ """
+ matcher = self.get_matcher(reqt)
+ name = matcher.key # case-insensitive
+ result = set()
+ provided = self.provided
+ if name in provided:
+ for version, provider in provided[name]:
+ try:
+ match = matcher.match(version)
+ except UnsupportedVersionError:
+ match = False
+
+ if match:
+ result.add(provider)
+ break
+ return result
+
+ def try_to_replace(self, provider, other, problems):
+ """
+ Attempt to replace one provider with another. This is typically used
+ when resolving dependencies from multiple sources, e.g. A requires
+ (B >= 1.0) while C requires (B >= 1.1).
+
+ For successful replacement, ``provider`` must meet all the requirements
+ which ``other`` fulfills.
+
+ :param provider: The provider we are trying to replace with.
+ :param other: The provider we're trying to replace.
+ :param problems: If False is returned, this will contain what
+ problems prevented replacement. This is currently
+ a tuple of the literal string 'cantreplace',
+ ``provider``, ``other`` and the set of requirements
+ that ``provider`` couldn't fulfill.
+ :return: True if we can replace ``other`` with ``provider``, else
+ False.
+ """
+ rlist = self.reqts[other]
+ unmatched = set()
+ for s in rlist:
+ matcher = self.get_matcher(s)
+ if not matcher.match(provider.version):
+ unmatched.add(s)
+ if unmatched:
+ # can't replace other with provider
+ problems.add(('cantreplace', provider, other,
+ frozenset(unmatched)))
+ result = False
+ else:
+ # can replace other with provider
+ self.remove_distribution(other)
+ del self.reqts[other]
+ for s in rlist:
+ self.reqts.setdefault(provider, set()).add(s)
+ self.add_distribution(provider)
+ result = True
+ return result
+
+ def find(self, requirement, meta_extras=None, prereleases=False):
+ """
+ Find a distribution and all distributions it depends on.
+
+ :param requirement: The requirement specifying the distribution to
+ find, or a Distribution instance.
+ :param meta_extras: A list of meta extras such as :test:, :build: and
+ so on.
+ :param prereleases: If ``True``, allow pre-release versions to be
+ returned - otherwise, don't return prereleases
+ unless they're all that's available.
+
+ Return a set of :class:`Distribution` instances and a set of
+ problems.
+
+ The distributions returned should be such that they have the
+ :attr:`required` attribute set to ``True`` if they were
+ from the ``requirement`` passed to ``find()``, and they have the
+ :attr:`build_time_dependency` attribute set to ``True`` unless they
+ are post-installation dependencies of the ``requirement``.
+
+ The problems should be a tuple consisting of the string
+ ``'unsatisfied'`` and the requirement which couldn't be satisfied
+ by any distribution known to the locator.
+ """
+
+ self.provided = {}
+ self.dists = {}
+ self.dists_by_name = {}
+ self.reqts = {}
+
+ meta_extras = set(meta_extras or [])
+ if ':*:' in meta_extras:
+ meta_extras.remove(':*:')
+ # :meta: and :run: are implicitly included
+ meta_extras |= set([':test:', ':build:', ':dev:'])
+
+ if isinstance(requirement, Distribution):
+ dist = odist = requirement
+ logger.debug('passed %s as requirement', odist)
+ else:
+ dist = odist = self.locator.locate(requirement,
+ prereleases=prereleases)
+ if dist is None:
+ raise DistlibException('Unable to locate %r' % requirement)
+ logger.debug('located %s', odist)
+ dist.requested = True
+ problems = set()
+ todo = set([dist])
+ install_dists = set([odist])
+ while todo:
+ dist = todo.pop()
+ name = dist.key # case-insensitive
+ if name not in self.dists_by_name:
+ self.add_distribution(dist)
+ else:
+ #import pdb; pdb.set_trace()
+ other = self.dists_by_name[name]
+ if other != dist:
+ self.try_to_replace(dist, other, problems)
+
+ ireqts = dist.run_requires | dist.meta_requires
+ sreqts = dist.build_requires
+ ereqts = set()
+ if meta_extras and dist in install_dists:
+ for key in ('test', 'build', 'dev'):
+ e = ':%s:' % key
+ if e in meta_extras:
+ ereqts |= getattr(dist, '%s_requires' % key)
+ all_reqts = ireqts | sreqts | ereqts
+ for r in all_reqts:
+ providers = self.find_providers(r)
+ if not providers:
+ logger.debug('No providers found for %r', r)
+ provider = self.locator.locate(r, prereleases=prereleases)
+ # If no provider is found and we didn't consider
+ # prereleases, consider them now.
+ if provider is None and not prereleases:
+ provider = self.locator.locate(r, prereleases=True)
+ if provider is None:
+ logger.debug('Cannot satisfy %r', r)
+ problems.add(('unsatisfied', r))
+ else:
+ n, v = provider.key, provider.version
+ if (n, v) not in self.dists:
+ todo.add(provider)
+ providers.add(provider)
+ if r in ireqts and dist in install_dists:
+ install_dists.add(provider)
+ logger.debug('Adding %s to install_dists',
+ provider.name_and_version)
+ for p in providers:
+ name = p.key
+ if name not in self.dists_by_name:
+ self.reqts.setdefault(p, set()).add(r)
+ else:
+ other = self.dists_by_name[name]
+ if other != p:
+ # see if other can be replaced by p
+ self.try_to_replace(p, other, problems)
+
+ dists = set(self.dists.values())
+ for dist in dists:
+ dist.build_time_dependency = dist not in install_dists
+ if dist.build_time_dependency:
+ logger.debug('%s is a build-time dependency only.',
+ dist.name_and_version)
+ logger.debug('find done for %s', odist)
+ return dists, problems
diff --git a/third_party/python/pip/pip/_vendor/distlib/manifest.py b/third_party/python/pip/pip/_vendor/distlib/manifest.py
new file mode 100644
index 0000000000..ca0fe442d9
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distlib/manifest.py
@@ -0,0 +1,393 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2012-2013 Python Software Foundation.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+"""
+Class representing the list of files in a distribution.
+
+Equivalent to distutils.filelist, but fixes some problems.
+"""
+import fnmatch
+import logging
+import os
+import re
+import sys
+
+from . import DistlibException
+from .compat import fsdecode
+from .util import convert_path
+
+
+__all__ = ['Manifest']
+
+logger = logging.getLogger(__name__)
+
+# a \ followed by some spaces + EOL
+_COLLAPSE_PATTERN = re.compile('\\\\w*\n', re.M)
+_COMMENTED_LINE = re.compile('#.*?(?=\n)|\n(?=$)', re.M | re.S)
+
+#
+# Due to the different results returned by fnmatch.translate, we need
+# to do slightly different processing for Python 2.7 and 3.2 ... this needed
+# to be brought in for Python 3.6 onwards.
+#
+_PYTHON_VERSION = sys.version_info[:2]
+
+class Manifest(object):
+ """A list of files built by on exploring the filesystem and filtered by
+ applying various patterns to what we find there.
+ """
+
+ def __init__(self, base=None):
+ """
+ Initialise an instance.
+
+ :param base: The base directory to explore under.
+ """
+ self.base = os.path.abspath(os.path.normpath(base or os.getcwd()))
+ self.prefix = self.base + os.sep
+ self.allfiles = None
+ self.files = set()
+
+ #
+ # Public API
+ #
+
+ def findall(self):
+ """Find all files under the base and set ``allfiles`` to the absolute
+ pathnames of files found.
+ """
+ from stat import S_ISREG, S_ISDIR, S_ISLNK
+
+ self.allfiles = allfiles = []
+ root = self.base
+ stack = [root]
+ pop = stack.pop
+ push = stack.append
+
+ while stack:
+ root = pop()
+ names = os.listdir(root)
+
+ for name in names:
+ fullname = os.path.join(root, name)
+
+ # Avoid excess stat calls -- just one will do, thank you!
+ stat = os.stat(fullname)
+ mode = stat.st_mode
+ if S_ISREG(mode):
+ allfiles.append(fsdecode(fullname))
+ elif S_ISDIR(mode) and not S_ISLNK(mode):
+ push(fullname)
+
+ def add(self, item):
+ """
+ Add a file to the manifest.
+
+ :param item: The pathname to add. This can be relative to the base.
+ """
+ if not item.startswith(self.prefix):
+ item = os.path.join(self.base, item)
+ self.files.add(os.path.normpath(item))
+
+ def add_many(self, items):
+ """
+ Add a list of files to the manifest.
+
+ :param items: The pathnames to add. These can be relative to the base.
+ """
+ for item in items:
+ self.add(item)
+
+ def sorted(self, wantdirs=False):
+ """
+ Return sorted files in directory order
+ """
+
+ def add_dir(dirs, d):
+ dirs.add(d)
+ logger.debug('add_dir added %s', d)
+ if d != self.base:
+ parent, _ = os.path.split(d)
+ assert parent not in ('', '/')
+ add_dir(dirs, parent)
+
+ result = set(self.files) # make a copy!
+ if wantdirs:
+ dirs = set()
+ for f in result:
+ add_dir(dirs, os.path.dirname(f))
+ result |= dirs
+ return [os.path.join(*path_tuple) for path_tuple in
+ sorted(os.path.split(path) for path in result)]
+
+ def clear(self):
+ """Clear all collected files."""
+ self.files = set()
+ self.allfiles = []
+
+ def process_directive(self, directive):
+ """
+ Process a directive which either adds some files from ``allfiles`` to
+ ``files``, or removes some files from ``files``.
+
+ :param directive: The directive to process. This should be in a format
+ compatible with distutils ``MANIFEST.in`` files:
+
+ http://docs.python.org/distutils/sourcedist.html#commands
+ """
+ # Parse the line: split it up, make sure the right number of words
+ # is there, and return the relevant words. 'action' is always
+ # defined: it's the first word of the line. Which of the other
+ # three are defined depends on the action; it'll be either
+ # patterns, (dir and patterns), or (dirpattern).
+ action, patterns, thedir, dirpattern = self._parse_directive(directive)
+
+ # OK, now we know that the action is valid and we have the
+ # right number of words on the line for that action -- so we
+ # can proceed with minimal error-checking.
+ if action == 'include':
+ for pattern in patterns:
+ if not self._include_pattern(pattern, anchor=True):
+ logger.warning('no files found matching %r', pattern)
+
+ elif action == 'exclude':
+ for pattern in patterns:
+ found = self._exclude_pattern(pattern, anchor=True)
+ #if not found:
+ # logger.warning('no previously-included files '
+ # 'found matching %r', pattern)
+
+ elif action == 'global-include':
+ for pattern in patterns:
+ if not self._include_pattern(pattern, anchor=False):
+ logger.warning('no files found matching %r '
+ 'anywhere in distribution', pattern)
+
+ elif action == 'global-exclude':
+ for pattern in patterns:
+ found = self._exclude_pattern(pattern, anchor=False)
+ #if not found:
+ # logger.warning('no previously-included files '
+ # 'matching %r found anywhere in '
+ # 'distribution', pattern)
+
+ elif action == 'recursive-include':
+ for pattern in patterns:
+ if not self._include_pattern(pattern, prefix=thedir):
+ logger.warning('no files found matching %r '
+ 'under directory %r', pattern, thedir)
+
+ elif action == 'recursive-exclude':
+ for pattern in patterns:
+ found = self._exclude_pattern(pattern, prefix=thedir)
+ #if not found:
+ # logger.warning('no previously-included files '
+ # 'matching %r found under directory %r',
+ # pattern, thedir)
+
+ elif action == 'graft':
+ if not self._include_pattern(None, prefix=dirpattern):
+ logger.warning('no directories found matching %r',
+ dirpattern)
+
+ elif action == 'prune':
+ if not self._exclude_pattern(None, prefix=dirpattern):
+ logger.warning('no previously-included directories found '
+ 'matching %r', dirpattern)
+ else: # pragma: no cover
+ # This should never happen, as it should be caught in
+ # _parse_template_line
+ raise DistlibException(
+ 'invalid action %r' % action)
+
+ #
+ # Private API
+ #
+
+ def _parse_directive(self, directive):
+ """
+ Validate a directive.
+ :param directive: The directive to validate.
+ :return: A tuple of action, patterns, thedir, dir_patterns
+ """
+ words = directive.split()
+ if len(words) == 1 and words[0] not in ('include', 'exclude',
+ 'global-include',
+ 'global-exclude',
+ 'recursive-include',
+ 'recursive-exclude',
+ 'graft', 'prune'):
+ # no action given, let's use the default 'include'
+ words.insert(0, 'include')
+
+ action = words[0]
+ patterns = thedir = dir_pattern = None
+
+ if action in ('include', 'exclude',
+ 'global-include', 'global-exclude'):
+ if len(words) < 2:
+ raise DistlibException(
+ '%r expects <pattern1> <pattern2> ...' % action)
+
+ patterns = [convert_path(word) for word in words[1:]]
+
+ elif action in ('recursive-include', 'recursive-exclude'):
+ if len(words) < 3:
+ raise DistlibException(
+ '%r expects <dir> <pattern1> <pattern2> ...' % action)
+
+ thedir = convert_path(words[1])
+ patterns = [convert_path(word) for word in words[2:]]
+
+ elif action in ('graft', 'prune'):
+ if len(words) != 2:
+ raise DistlibException(
+ '%r expects a single <dir_pattern>' % action)
+
+ dir_pattern = convert_path(words[1])
+
+ else:
+ raise DistlibException('unknown action %r' % action)
+
+ return action, patterns, thedir, dir_pattern
+
+ def _include_pattern(self, pattern, anchor=True, prefix=None,
+ is_regex=False):
+ """Select strings (presumably filenames) from 'self.files' that
+ match 'pattern', a Unix-style wildcard (glob) pattern.
+
+ Patterns are not quite the same as implemented by the 'fnmatch'
+ module: '*' and '?' match non-special characters, where "special"
+ is platform-dependent: slash on Unix; colon, slash, and backslash on
+ DOS/Windows; and colon on Mac OS.
+
+ If 'anchor' is true (the default), then the pattern match is more
+ stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
+ 'anchor' is false, both of these will match.
+
+ If 'prefix' is supplied, then only filenames starting with 'prefix'
+ (itself a pattern) and ending with 'pattern', with anything in between
+ them, will match. 'anchor' is ignored in this case.
+
+ If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
+ 'pattern' is assumed to be either a string containing a regex or a
+ regex object -- no translation is done, the regex is just compiled
+ and used as-is.
+
+ Selected strings will be added to self.files.
+
+ Return True if files are found.
+ """
+ # XXX docstring lying about what the special chars are?
+ found = False
+ pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
+
+ # delayed loading of allfiles list
+ if self.allfiles is None:
+ self.findall()
+
+ for name in self.allfiles:
+ if pattern_re.search(name):
+ self.files.add(name)
+ found = True
+ return found
+
+ def _exclude_pattern(self, pattern, anchor=True, prefix=None,
+ is_regex=False):
+ """Remove strings (presumably filenames) from 'files' that match
+ 'pattern'.
+
+ Other parameters are the same as for 'include_pattern()', above.
+ The list 'self.files' is modified in place. Return True if files are
+ found.
+
+ This API is public to allow e.g. exclusion of SCM subdirs, e.g. when
+ packaging source distributions
+ """
+ found = False
+ pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex)
+ for f in list(self.files):
+ if pattern_re.search(f):
+ self.files.remove(f)
+ found = True
+ return found
+
+ def _translate_pattern(self, pattern, anchor=True, prefix=None,
+ is_regex=False):
+ """Translate a shell-like wildcard pattern to a compiled regular
+ expression.
+
+ Return the compiled regex. If 'is_regex' true,
+ then 'pattern' is directly compiled to a regex (if it's a string)
+ or just returned as-is (assumes it's a regex object).
+ """
+ if is_regex:
+ if isinstance(pattern, str):
+ return re.compile(pattern)
+ else:
+ return pattern
+
+ if _PYTHON_VERSION > (3, 2):
+ # ditch start and end characters
+ start, _, end = self._glob_to_re('_').partition('_')
+
+ if pattern:
+ pattern_re = self._glob_to_re(pattern)
+ if _PYTHON_VERSION > (3, 2):
+ assert pattern_re.startswith(start) and pattern_re.endswith(end)
+ else:
+ pattern_re = ''
+
+ base = re.escape(os.path.join(self.base, ''))
+ if prefix is not None:
+ # ditch end of pattern character
+ if _PYTHON_VERSION <= (3, 2):
+ empty_pattern = self._glob_to_re('')
+ prefix_re = self._glob_to_re(prefix)[:-len(empty_pattern)]
+ else:
+ prefix_re = self._glob_to_re(prefix)
+ assert prefix_re.startswith(start) and prefix_re.endswith(end)
+ prefix_re = prefix_re[len(start): len(prefix_re) - len(end)]
+ sep = os.sep
+ if os.sep == '\\':
+ sep = r'\\'
+ if _PYTHON_VERSION <= (3, 2):
+ pattern_re = '^' + base + sep.join((prefix_re,
+ '.*' + pattern_re))
+ else:
+ pattern_re = pattern_re[len(start): len(pattern_re) - len(end)]
+ pattern_re = r'%s%s%s%s.*%s%s' % (start, base, prefix_re, sep,
+ pattern_re, end)
+ else: # no prefix -- respect anchor flag
+ if anchor:
+ if _PYTHON_VERSION <= (3, 2):
+ pattern_re = '^' + base + pattern_re
+ else:
+ pattern_re = r'%s%s%s' % (start, base, pattern_re[len(start):])
+
+ return re.compile(pattern_re)
+
+ def _glob_to_re(self, pattern):
+ """Translate a shell-like glob pattern to a regular expression.
+
+ Return a string containing the regex. Differs from
+ 'fnmatch.translate()' in that '*' does not match "special characters"
+ (which are platform-specific).
+ """
+ pattern_re = fnmatch.translate(pattern)
+
+ # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
+ # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
+ # and by extension they shouldn't match such "special characters" under
+ # any OS. So change all non-escaped dots in the RE to match any
+ # character except the special characters (currently: just os.sep).
+ sep = os.sep
+ if os.sep == '\\':
+ # we're using a regex to manipulate a regex, so we need
+ # to escape the backslash twice
+ sep = r'\\\\'
+ escaped = r'\1[^%s]' % sep
+ pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)
+ return pattern_re
diff --git a/third_party/python/pip/pip/_vendor/distlib/markers.py b/third_party/python/pip/pip/_vendor/distlib/markers.py
new file mode 100644
index 0000000000..9dc6841033
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distlib/markers.py
@@ -0,0 +1,152 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2012-2017 Vinay Sajip.
+# Licensed to the Python Software Foundation under a contributor agreement.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+"""
+Parser for the environment markers micro-language defined in PEP 508.
+"""
+
+# Note: In PEP 345, the micro-language was Python compatible, so the ast
+# module could be used to parse it. However, PEP 508 introduced operators such
+# as ~= and === which aren't in Python, necessitating a different approach.
+
+import os
+import re
+import sys
+import platform
+
+from .compat import string_types
+from .util import in_venv, parse_marker
+from .version import NormalizedVersion as NV
+
+__all__ = ['interpret']
+
+_VERSION_PATTERN = re.compile(r'((\d+(\.\d+)*\w*)|\'(\d+(\.\d+)*\w*)\'|\"(\d+(\.\d+)*\w*)\")')
+
+def _is_literal(o):
+ if not isinstance(o, string_types) or not o:
+ return False
+ return o[0] in '\'"'
+
+def _get_versions(s):
+ result = []
+ for m in _VERSION_PATTERN.finditer(s):
+ result.append(NV(m.groups()[0]))
+ return set(result)
+
+class Evaluator(object):
+ """
+ This class is used to evaluate marker expessions.
+ """
+
+ operations = {
+ '==': lambda x, y: x == y,
+ '===': lambda x, y: x == y,
+ '~=': lambda x, y: x == y or x > y,
+ '!=': lambda x, y: x != y,
+ '<': lambda x, y: x < y,
+ '<=': lambda x, y: x == y or x < y,
+ '>': lambda x, y: x > y,
+ '>=': lambda x, y: x == y or x > y,
+ 'and': lambda x, y: x and y,
+ 'or': lambda x, y: x or y,
+ 'in': lambda x, y: x in y,
+ 'not in': lambda x, y: x not in y,
+ }
+
+ def evaluate(self, expr, context):
+ """
+ Evaluate a marker expression returned by the :func:`parse_requirement`
+ function in the specified context.
+ """
+ if isinstance(expr, string_types):
+ if expr[0] in '\'"':
+ result = expr[1:-1]
+ else:
+ if expr not in context:
+ raise SyntaxError('unknown variable: %s' % expr)
+ result = context[expr]
+ else:
+ assert isinstance(expr, dict)
+ op = expr['op']
+ if op not in self.operations:
+ raise NotImplementedError('op not implemented: %s' % op)
+ elhs = expr['lhs']
+ erhs = expr['rhs']
+ if _is_literal(expr['lhs']) and _is_literal(expr['rhs']):
+ raise SyntaxError('invalid comparison: %s %s %s' % (elhs, op, erhs))
+
+ lhs = self.evaluate(elhs, context)
+ rhs = self.evaluate(erhs, context)
+ if ((elhs == 'python_version' or erhs == 'python_version') and
+ op in ('<', '<=', '>', '>=', '===', '==', '!=', '~=')):
+ lhs = NV(lhs)
+ rhs = NV(rhs)
+ elif elhs == 'python_version' and op in ('in', 'not in'):
+ lhs = NV(lhs)
+ rhs = _get_versions(rhs)
+ result = self.operations[op](lhs, rhs)
+ return result
+
+_DIGITS = re.compile(r'\d+\.\d+')
+
+def default_context():
+ def format_full_version(info):
+ version = '%s.%s.%s' % (info.major, info.minor, info.micro)
+ kind = info.releaselevel
+ if kind != 'final':
+ version += kind[0] + str(info.serial)
+ return version
+
+ if hasattr(sys, 'implementation'):
+ implementation_version = format_full_version(sys.implementation.version)
+ implementation_name = sys.implementation.name
+ else:
+ implementation_version = '0'
+ implementation_name = ''
+
+ ppv = platform.python_version()
+ m = _DIGITS.match(ppv)
+ pv = m.group(0)
+ result = {
+ 'implementation_name': implementation_name,
+ 'implementation_version': implementation_version,
+ 'os_name': os.name,
+ 'platform_machine': platform.machine(),
+ 'platform_python_implementation': platform.python_implementation(),
+ 'platform_release': platform.release(),
+ 'platform_system': platform.system(),
+ 'platform_version': platform.version(),
+ 'platform_in_venv': str(in_venv()),
+ 'python_full_version': ppv,
+ 'python_version': pv,
+ 'sys_platform': sys.platform,
+ }
+ return result
+
+DEFAULT_CONTEXT = default_context()
+del default_context
+
+evaluator = Evaluator()
+
+def interpret(marker, execution_context=None):
+ """
+ Interpret a marker and return a result depending on environment.
+
+ :param marker: The marker to interpret.
+ :type marker: str
+ :param execution_context: The context used for name lookup.
+ :type execution_context: mapping
+ """
+ try:
+ expr, rest = parse_marker(marker)
+ except Exception as e:
+ raise SyntaxError('Unable to interpret marker syntax: %s: %s' % (marker, e))
+ if rest and rest[0] != '#':
+ raise SyntaxError('unexpected trailing data in marker: %s: %s' % (marker, rest))
+ context = dict(DEFAULT_CONTEXT)
+ if execution_context:
+ context.update(execution_context)
+ return evaluator.evaluate(expr, context)
diff --git a/third_party/python/pip/pip/_vendor/distlib/metadata.py b/third_party/python/pip/pip/_vendor/distlib/metadata.py
new file mode 100644
index 0000000000..c329e1977f
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distlib/metadata.py
@@ -0,0 +1,1076 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2012 The Python Software Foundation.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+"""Implementation of the Metadata for Python packages PEPs.
+
+Supports all metadata formats (1.0, 1.1, 1.2, 1.3/2.1 and 2.2).
+"""
+from __future__ import unicode_literals
+
+import codecs
+from email import message_from_file
+import json
+import logging
+import re
+
+
+from . import DistlibException, __version__
+from .compat import StringIO, string_types, text_type
+from .markers import interpret
+from .util import extract_by_key, get_extras
+from .version import get_scheme, PEP440_VERSION_RE
+
+logger = logging.getLogger(__name__)
+
+
+class MetadataMissingError(DistlibException):
+ """A required metadata is missing"""
+
+
+class MetadataConflictError(DistlibException):
+ """Attempt to read or write metadata fields that are conflictual."""
+
+
+class MetadataUnrecognizedVersionError(DistlibException):
+ """Unknown metadata version number."""
+
+
+class MetadataInvalidError(DistlibException):
+ """A metadata value is invalid"""
+
+# public API of this module
+__all__ = ['Metadata', 'PKG_INFO_ENCODING', 'PKG_INFO_PREFERRED_VERSION']
+
+# Encoding used for the PKG-INFO files
+PKG_INFO_ENCODING = 'utf-8'
+
+# preferred version. Hopefully will be changed
+# to 1.2 once PEP 345 is supported everywhere
+PKG_INFO_PREFERRED_VERSION = '1.1'
+
+_LINE_PREFIX_1_2 = re.compile('\n \\|')
+_LINE_PREFIX_PRE_1_2 = re.compile('\n ')
+_241_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
+ 'Summary', 'Description',
+ 'Keywords', 'Home-page', 'Author', 'Author-email',
+ 'License')
+
+_314_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
+ 'Supported-Platform', 'Summary', 'Description',
+ 'Keywords', 'Home-page', 'Author', 'Author-email',
+ 'License', 'Classifier', 'Download-URL', 'Obsoletes',
+ 'Provides', 'Requires')
+
+_314_MARKERS = ('Obsoletes', 'Provides', 'Requires', 'Classifier',
+ 'Download-URL')
+
+_345_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
+ 'Supported-Platform', 'Summary', 'Description',
+ 'Keywords', 'Home-page', 'Author', 'Author-email',
+ 'Maintainer', 'Maintainer-email', 'License',
+ 'Classifier', 'Download-URL', 'Obsoletes-Dist',
+ 'Project-URL', 'Provides-Dist', 'Requires-Dist',
+ 'Requires-Python', 'Requires-External')
+
+_345_MARKERS = ('Provides-Dist', 'Requires-Dist', 'Requires-Python',
+ 'Obsoletes-Dist', 'Requires-External', 'Maintainer',
+ 'Maintainer-email', 'Project-URL')
+
+_426_FIELDS = ('Metadata-Version', 'Name', 'Version', 'Platform',
+ 'Supported-Platform', 'Summary', 'Description',
+ 'Keywords', 'Home-page', 'Author', 'Author-email',
+ 'Maintainer', 'Maintainer-email', 'License',
+ 'Classifier', 'Download-URL', 'Obsoletes-Dist',
+ 'Project-URL', 'Provides-Dist', 'Requires-Dist',
+ 'Requires-Python', 'Requires-External', 'Private-Version',
+ 'Obsoleted-By', 'Setup-Requires-Dist', 'Extension',
+ 'Provides-Extra')
+
+_426_MARKERS = ('Private-Version', 'Provides-Extra', 'Obsoleted-By',
+ 'Setup-Requires-Dist', 'Extension')
+
+# See issue #106: Sometimes 'Requires' and 'Provides' occur wrongly in
+# the metadata. Include them in the tuple literal below to allow them
+# (for now).
+# Ditto for Obsoletes - see issue #140.
+_566_FIELDS = _426_FIELDS + ('Description-Content-Type',
+ 'Requires', 'Provides', 'Obsoletes')
+
+_566_MARKERS = ('Description-Content-Type',)
+
+_643_MARKERS = ('Dynamic', 'License-File')
+
+_643_FIELDS = _566_FIELDS + _643_MARKERS
+
+_ALL_FIELDS = set()
+_ALL_FIELDS.update(_241_FIELDS)
+_ALL_FIELDS.update(_314_FIELDS)
+_ALL_FIELDS.update(_345_FIELDS)
+_ALL_FIELDS.update(_426_FIELDS)
+_ALL_FIELDS.update(_566_FIELDS)
+_ALL_FIELDS.update(_643_FIELDS)
+
+EXTRA_RE = re.compile(r'''extra\s*==\s*("([^"]+)"|'([^']+)')''')
+
+
+def _version2fieldlist(version):
+ if version == '1.0':
+ return _241_FIELDS
+ elif version == '1.1':
+ return _314_FIELDS
+ elif version == '1.2':
+ return _345_FIELDS
+ elif version in ('1.3', '2.1'):
+ # avoid adding field names if already there
+ return _345_FIELDS + tuple(f for f in _566_FIELDS if f not in _345_FIELDS)
+ elif version == '2.0':
+ raise ValueError('Metadata 2.0 is withdrawn and not supported')
+ # return _426_FIELDS
+ elif version == '2.2':
+ return _643_FIELDS
+ raise MetadataUnrecognizedVersionError(version)
+
+
+def _best_version(fields):
+ """Detect the best version depending on the fields used."""
+ def _has_marker(keys, markers):
+ for marker in markers:
+ if marker in keys:
+ return True
+ return False
+
+ keys = []
+ for key, value in fields.items():
+ if value in ([], 'UNKNOWN', None):
+ continue
+ keys.append(key)
+
+ possible_versions = ['1.0', '1.1', '1.2', '1.3', '2.1', '2.2'] # 2.0 removed
+
+ # first let's try to see if a field is not part of one of the version
+ for key in keys:
+ if key not in _241_FIELDS and '1.0' in possible_versions:
+ possible_versions.remove('1.0')
+ logger.debug('Removed 1.0 due to %s', key)
+ if key not in _314_FIELDS and '1.1' in possible_versions:
+ possible_versions.remove('1.1')
+ logger.debug('Removed 1.1 due to %s', key)
+ if key not in _345_FIELDS and '1.2' in possible_versions:
+ possible_versions.remove('1.2')
+ logger.debug('Removed 1.2 due to %s', key)
+ if key not in _566_FIELDS and '1.3' in possible_versions:
+ possible_versions.remove('1.3')
+ logger.debug('Removed 1.3 due to %s', key)
+ if key not in _566_FIELDS and '2.1' in possible_versions:
+ if key != 'Description': # In 2.1, description allowed after headers
+ possible_versions.remove('2.1')
+ logger.debug('Removed 2.1 due to %s', key)
+ if key not in _643_FIELDS and '2.2' in possible_versions:
+ possible_versions.remove('2.2')
+ logger.debug('Removed 2.2 due to %s', key)
+ # if key not in _426_FIELDS and '2.0' in possible_versions:
+ # possible_versions.remove('2.0')
+ # logger.debug('Removed 2.0 due to %s', key)
+
+ # possible_version contains qualified versions
+ if len(possible_versions) == 1:
+ return possible_versions[0] # found !
+ elif len(possible_versions) == 0:
+ logger.debug('Out of options - unknown metadata set: %s', fields)
+ raise MetadataConflictError('Unknown metadata set')
+
+ # let's see if one unique marker is found
+ is_1_1 = '1.1' in possible_versions and _has_marker(keys, _314_MARKERS)
+ is_1_2 = '1.2' in possible_versions and _has_marker(keys, _345_MARKERS)
+ is_2_1 = '2.1' in possible_versions and _has_marker(keys, _566_MARKERS)
+ # is_2_0 = '2.0' in possible_versions and _has_marker(keys, _426_MARKERS)
+ is_2_2 = '2.2' in possible_versions and _has_marker(keys, _643_MARKERS)
+ if int(is_1_1) + int(is_1_2) + int(is_2_1) + int(is_2_2) > 1:
+ raise MetadataConflictError('You used incompatible 1.1/1.2/2.1/2.2 fields')
+
+ # we have the choice, 1.0, or 1.2, 2.1 or 2.2
+ # - 1.0 has a broken Summary field but works with all tools
+ # - 1.1 is to avoid
+ # - 1.2 fixes Summary but has little adoption
+ # - 2.1 adds more features
+ # - 2.2 is the latest
+ if not is_1_1 and not is_1_2 and not is_2_1 and not is_2_2:
+ # we couldn't find any specific marker
+ if PKG_INFO_PREFERRED_VERSION in possible_versions:
+ return PKG_INFO_PREFERRED_VERSION
+ if is_1_1:
+ return '1.1'
+ if is_1_2:
+ return '1.2'
+ if is_2_1:
+ return '2.1'
+ # if is_2_2:
+ # return '2.2'
+
+ return '2.2'
+
+# This follows the rules about transforming keys as described in
+# https://www.python.org/dev/peps/pep-0566/#id17
+_ATTR2FIELD = {
+ name.lower().replace("-", "_"): name for name in _ALL_FIELDS
+}
+_FIELD2ATTR = {field: attr for attr, field in _ATTR2FIELD.items()}
+
+_PREDICATE_FIELDS = ('Requires-Dist', 'Obsoletes-Dist', 'Provides-Dist')
+_VERSIONS_FIELDS = ('Requires-Python',)
+_VERSION_FIELDS = ('Version',)
+_LISTFIELDS = ('Platform', 'Classifier', 'Obsoletes',
+ 'Requires', 'Provides', 'Obsoletes-Dist',
+ 'Provides-Dist', 'Requires-Dist', 'Requires-External',
+ 'Project-URL', 'Supported-Platform', 'Setup-Requires-Dist',
+ 'Provides-Extra', 'Extension', 'License-File')
+_LISTTUPLEFIELDS = ('Project-URL',)
+
+_ELEMENTSFIELD = ('Keywords',)
+
+_UNICODEFIELDS = ('Author', 'Maintainer', 'Summary', 'Description')
+
+_MISSING = object()
+
+_FILESAFE = re.compile('[^A-Za-z0-9.]+')
+
+
+def _get_name_and_version(name, version, for_filename=False):
+ """Return the distribution name with version.
+
+ If for_filename is true, return a filename-escaped form."""
+ if for_filename:
+ # For both name and version any runs of non-alphanumeric or '.'
+ # characters are replaced with a single '-'. Additionally any
+ # spaces in the version string become '.'
+ name = _FILESAFE.sub('-', name)
+ version = _FILESAFE.sub('-', version.replace(' ', '.'))
+ return '%s-%s' % (name, version)
+
+
+class LegacyMetadata(object):
+ """The legacy metadata of a release.
+
+ Supports versions 1.0, 1.1, 1.2, 2.0 and 1.3/2.1 (auto-detected). You can
+ instantiate the class with one of these arguments (or none):
+ - *path*, the path to a metadata file
+ - *fileobj* give a file-like object with metadata as content
+ - *mapping* is a dict-like object
+ - *scheme* is a version scheme name
+ """
+ # TODO document the mapping API and UNKNOWN default key
+
+ def __init__(self, path=None, fileobj=None, mapping=None,
+ scheme='default'):
+ if [path, fileobj, mapping].count(None) < 2:
+ raise TypeError('path, fileobj and mapping are exclusive')
+ self._fields = {}
+ self.requires_files = []
+ self._dependencies = None
+ self.scheme = scheme
+ if path is not None:
+ self.read(path)
+ elif fileobj is not None:
+ self.read_file(fileobj)
+ elif mapping is not None:
+ self.update(mapping)
+ self.set_metadata_version()
+
+ def set_metadata_version(self):
+ self._fields['Metadata-Version'] = _best_version(self._fields)
+
+ def _write_field(self, fileobj, name, value):
+ fileobj.write('%s: %s\n' % (name, value))
+
+ def __getitem__(self, name):
+ return self.get(name)
+
+ def __setitem__(self, name, value):
+ return self.set(name, value)
+
+ def __delitem__(self, name):
+ field_name = self._convert_name(name)
+ try:
+ del self._fields[field_name]
+ except KeyError:
+ raise KeyError(name)
+
+ def __contains__(self, name):
+ return (name in self._fields or
+ self._convert_name(name) in self._fields)
+
+ def _convert_name(self, name):
+ if name in _ALL_FIELDS:
+ return name
+ name = name.replace('-', '_').lower()
+ return _ATTR2FIELD.get(name, name)
+
+ def _default_value(self, name):
+ if name in _LISTFIELDS or name in _ELEMENTSFIELD:
+ return []
+ return 'UNKNOWN'
+
+ def _remove_line_prefix(self, value):
+ if self.metadata_version in ('1.0', '1.1'):
+ return _LINE_PREFIX_PRE_1_2.sub('\n', value)
+ else:
+ return _LINE_PREFIX_1_2.sub('\n', value)
+
+ def __getattr__(self, name):
+ if name in _ATTR2FIELD:
+ return self[name]
+ raise AttributeError(name)
+
+ #
+ # Public API
+ #
+
+# dependencies = property(_get_dependencies, _set_dependencies)
+
+ def get_fullname(self, filesafe=False):
+ """Return the distribution name with version.
+
+ If filesafe is true, return a filename-escaped form."""
+ return _get_name_and_version(self['Name'], self['Version'], filesafe)
+
+ def is_field(self, name):
+ """return True if name is a valid metadata key"""
+ name = self._convert_name(name)
+ return name in _ALL_FIELDS
+
+ def is_multi_field(self, name):
+ name = self._convert_name(name)
+ return name in _LISTFIELDS
+
+ def read(self, filepath):
+ """Read the metadata values from a file path."""
+ fp = codecs.open(filepath, 'r', encoding='utf-8')
+ try:
+ self.read_file(fp)
+ finally:
+ fp.close()
+
+ def read_file(self, fileob):
+ """Read the metadata values from a file object."""
+ msg = message_from_file(fileob)
+ self._fields['Metadata-Version'] = msg['metadata-version']
+
+ # When reading, get all the fields we can
+ for field in _ALL_FIELDS:
+ if field not in msg:
+ continue
+ if field in _LISTFIELDS:
+ # we can have multiple lines
+ values = msg.get_all(field)
+ if field in _LISTTUPLEFIELDS and values is not None:
+ values = [tuple(value.split(',')) for value in values]
+ self.set(field, values)
+ else:
+ # single line
+ value = msg[field]
+ if value is not None and value != 'UNKNOWN':
+ self.set(field, value)
+
+ # PEP 566 specifies that the body be used for the description, if
+ # available
+ body = msg.get_payload()
+ self["Description"] = body if body else self["Description"]
+ # logger.debug('Attempting to set metadata for %s', self)
+ # self.set_metadata_version()
+
+ def write(self, filepath, skip_unknown=False):
+ """Write the metadata fields to filepath."""
+ fp = codecs.open(filepath, 'w', encoding='utf-8')
+ try:
+ self.write_file(fp, skip_unknown)
+ finally:
+ fp.close()
+
+ def write_file(self, fileobject, skip_unknown=False):
+ """Write the PKG-INFO format data to a file object."""
+ self.set_metadata_version()
+
+ for field in _version2fieldlist(self['Metadata-Version']):
+ values = self.get(field)
+ if skip_unknown and values in ('UNKNOWN', [], ['UNKNOWN']):
+ continue
+ if field in _ELEMENTSFIELD:
+ self._write_field(fileobject, field, ','.join(values))
+ continue
+ if field not in _LISTFIELDS:
+ if field == 'Description':
+ if self.metadata_version in ('1.0', '1.1'):
+ values = values.replace('\n', '\n ')
+ else:
+ values = values.replace('\n', '\n |')
+ values = [values]
+
+ if field in _LISTTUPLEFIELDS:
+ values = [','.join(value) for value in values]
+
+ for value in values:
+ self._write_field(fileobject, field, value)
+
+ def update(self, other=None, **kwargs):
+ """Set metadata values from the given iterable `other` and kwargs.
+
+ Behavior is like `dict.update`: If `other` has a ``keys`` method,
+ they are looped over and ``self[key]`` is assigned ``other[key]``.
+ Else, ``other`` is an iterable of ``(key, value)`` iterables.
+
+ Keys that don't match a metadata field or that have an empty value are
+ dropped.
+ """
+ def _set(key, value):
+ if key in _ATTR2FIELD and value:
+ self.set(self._convert_name(key), value)
+
+ if not other:
+ # other is None or empty container
+ pass
+ elif hasattr(other, 'keys'):
+ for k in other.keys():
+ _set(k, other[k])
+ else:
+ for k, v in other:
+ _set(k, v)
+
+ if kwargs:
+ for k, v in kwargs.items():
+ _set(k, v)
+
+ def set(self, name, value):
+ """Control then set a metadata field."""
+ name = self._convert_name(name)
+
+ if ((name in _ELEMENTSFIELD or name == 'Platform') and
+ not isinstance(value, (list, tuple))):
+ if isinstance(value, string_types):
+ value = [v.strip() for v in value.split(',')]
+ else:
+ value = []
+ elif (name in _LISTFIELDS and
+ not isinstance(value, (list, tuple))):
+ if isinstance(value, string_types):
+ value = [value]
+ else:
+ value = []
+
+ if logger.isEnabledFor(logging.WARNING):
+ project_name = self['Name']
+
+ scheme = get_scheme(self.scheme)
+ if name in _PREDICATE_FIELDS and value is not None:
+ for v in value:
+ # check that the values are valid
+ if not scheme.is_valid_matcher(v.split(';')[0]):
+ logger.warning(
+ "'%s': '%s' is not valid (field '%s')",
+ project_name, v, name)
+ # FIXME this rejects UNKNOWN, is that right?
+ elif name in _VERSIONS_FIELDS and value is not None:
+ if not scheme.is_valid_constraint_list(value):
+ logger.warning("'%s': '%s' is not a valid version (field '%s')",
+ project_name, value, name)
+ elif name in _VERSION_FIELDS and value is not None:
+ if not scheme.is_valid_version(value):
+ logger.warning("'%s': '%s' is not a valid version (field '%s')",
+ project_name, value, name)
+
+ if name in _UNICODEFIELDS:
+ if name == 'Description':
+ value = self._remove_line_prefix(value)
+
+ self._fields[name] = value
+
+ def get(self, name, default=_MISSING):
+ """Get a metadata field."""
+ name = self._convert_name(name)
+ if name not in self._fields:
+ if default is _MISSING:
+ default = self._default_value(name)
+ return default
+ if name in _UNICODEFIELDS:
+ value = self._fields[name]
+ return value
+ elif name in _LISTFIELDS:
+ value = self._fields[name]
+ if value is None:
+ return []
+ res = []
+ for val in value:
+ if name not in _LISTTUPLEFIELDS:
+ res.append(val)
+ else:
+ # That's for Project-URL
+ res.append((val[0], val[1]))
+ return res
+
+ elif name in _ELEMENTSFIELD:
+ value = self._fields[name]
+ if isinstance(value, string_types):
+ return value.split(',')
+ return self._fields[name]
+
+ def check(self, strict=False):
+ """Check if the metadata is compliant. If strict is True then raise if
+ no Name or Version are provided"""
+ self.set_metadata_version()
+
+ # XXX should check the versions (if the file was loaded)
+ missing, warnings = [], []
+
+ for attr in ('Name', 'Version'): # required by PEP 345
+ if attr not in self:
+ missing.append(attr)
+
+ if strict and missing != []:
+ msg = 'missing required metadata: %s' % ', '.join(missing)
+ raise MetadataMissingError(msg)
+
+ for attr in ('Home-page', 'Author'):
+ if attr not in self:
+ missing.append(attr)
+
+ # checking metadata 1.2 (XXX needs to check 1.1, 1.0)
+ if self['Metadata-Version'] != '1.2':
+ return missing, warnings
+
+ scheme = get_scheme(self.scheme)
+
+ def are_valid_constraints(value):
+ for v in value:
+ if not scheme.is_valid_matcher(v.split(';')[0]):
+ return False
+ return True
+
+ for fields, controller in ((_PREDICATE_FIELDS, are_valid_constraints),
+ (_VERSIONS_FIELDS,
+ scheme.is_valid_constraint_list),
+ (_VERSION_FIELDS,
+ scheme.is_valid_version)):
+ for field in fields:
+ value = self.get(field, None)
+ if value is not None and not controller(value):
+ warnings.append("Wrong value for '%s': %s" % (field, value))
+
+ return missing, warnings
+
+ def todict(self, skip_missing=False):
+ """Return fields as a dict.
+
+ Field names will be converted to use the underscore-lowercase style
+ instead of hyphen-mixed case (i.e. home_page instead of Home-page).
+ This is as per https://www.python.org/dev/peps/pep-0566/#id17.
+ """
+ self.set_metadata_version()
+
+ fields = _version2fieldlist(self['Metadata-Version'])
+
+ data = {}
+
+ for field_name in fields:
+ if not skip_missing or field_name in self._fields:
+ key = _FIELD2ATTR[field_name]
+ if key != 'project_url':
+ data[key] = self[field_name]
+ else:
+ data[key] = [','.join(u) for u in self[field_name]]
+
+ return data
+
+ def add_requirements(self, requirements):
+ if self['Metadata-Version'] == '1.1':
+ # we can't have 1.1 metadata *and* Setuptools requires
+ for field in ('Obsoletes', 'Requires', 'Provides'):
+ if field in self:
+ del self[field]
+ self['Requires-Dist'] += requirements
+
+ # Mapping API
+ # TODO could add iter* variants
+
+ def keys(self):
+ return list(_version2fieldlist(self['Metadata-Version']))
+
+ def __iter__(self):
+ for key in self.keys():
+ yield key
+
+ def values(self):
+ return [self[key] for key in self.keys()]
+
+ def items(self):
+ return [(key, self[key]) for key in self.keys()]
+
+ def __repr__(self):
+ return '<%s %s %s>' % (self.__class__.__name__, self.name,
+ self.version)
+
+
+METADATA_FILENAME = 'pydist.json'
+WHEEL_METADATA_FILENAME = 'metadata.json'
+LEGACY_METADATA_FILENAME = 'METADATA'
+
+
+class Metadata(object):
+ """
+ The metadata of a release. This implementation uses 2.1
+ metadata where possible. If not possible, it wraps a LegacyMetadata
+ instance which handles the key-value metadata format.
+ """
+
+ METADATA_VERSION_MATCHER = re.compile(r'^\d+(\.\d+)*$')
+
+ NAME_MATCHER = re.compile('^[0-9A-Z]([0-9A-Z_.-]*[0-9A-Z])?$', re.I)
+
+ FIELDNAME_MATCHER = re.compile('^[A-Z]([0-9A-Z-]*[0-9A-Z])?$', re.I)
+
+ VERSION_MATCHER = PEP440_VERSION_RE
+
+ SUMMARY_MATCHER = re.compile('.{1,2047}')
+
+ METADATA_VERSION = '2.0'
+
+ GENERATOR = 'distlib (%s)' % __version__
+
+ MANDATORY_KEYS = {
+ 'name': (),
+ 'version': (),
+ 'summary': ('legacy',),
+ }
+
+ INDEX_KEYS = ('name version license summary description author '
+ 'author_email keywords platform home_page classifiers '
+ 'download_url')
+
+ DEPENDENCY_KEYS = ('extras run_requires test_requires build_requires '
+ 'dev_requires provides meta_requires obsoleted_by '
+ 'supports_environments')
+
+ SYNTAX_VALIDATORS = {
+ 'metadata_version': (METADATA_VERSION_MATCHER, ()),
+ 'name': (NAME_MATCHER, ('legacy',)),
+ 'version': (VERSION_MATCHER, ('legacy',)),
+ 'summary': (SUMMARY_MATCHER, ('legacy',)),
+ 'dynamic': (FIELDNAME_MATCHER, ('legacy',)),
+ }
+
+ __slots__ = ('_legacy', '_data', 'scheme')
+
+ def __init__(self, path=None, fileobj=None, mapping=None,
+ scheme='default'):
+ if [path, fileobj, mapping].count(None) < 2:
+ raise TypeError('path, fileobj and mapping are exclusive')
+ self._legacy = None
+ self._data = None
+ self.scheme = scheme
+ #import pdb; pdb.set_trace()
+ if mapping is not None:
+ try:
+ self._validate_mapping(mapping, scheme)
+ self._data = mapping
+ except MetadataUnrecognizedVersionError:
+ self._legacy = LegacyMetadata(mapping=mapping, scheme=scheme)
+ self.validate()
+ else:
+ data = None
+ if path:
+ with open(path, 'rb') as f:
+ data = f.read()
+ elif fileobj:
+ data = fileobj.read()
+ if data is None:
+ # Initialised with no args - to be added
+ self._data = {
+ 'metadata_version': self.METADATA_VERSION,
+ 'generator': self.GENERATOR,
+ }
+ else:
+ if not isinstance(data, text_type):
+ data = data.decode('utf-8')
+ try:
+ self._data = json.loads(data)
+ self._validate_mapping(self._data, scheme)
+ except ValueError:
+ # Note: MetadataUnrecognizedVersionError does not
+ # inherit from ValueError (it's a DistlibException,
+ # which should not inherit from ValueError).
+ # The ValueError comes from the json.load - if that
+ # succeeds and we get a validation error, we want
+ # that to propagate
+ self._legacy = LegacyMetadata(fileobj=StringIO(data),
+ scheme=scheme)
+ self.validate()
+
+ common_keys = set(('name', 'version', 'license', 'keywords', 'summary'))
+
+ none_list = (None, list)
+ none_dict = (None, dict)
+
+ mapped_keys = {
+ 'run_requires': ('Requires-Dist', list),
+ 'build_requires': ('Setup-Requires-Dist', list),
+ 'dev_requires': none_list,
+ 'test_requires': none_list,
+ 'meta_requires': none_list,
+ 'extras': ('Provides-Extra', list),
+ 'modules': none_list,
+ 'namespaces': none_list,
+ 'exports': none_dict,
+ 'commands': none_dict,
+ 'classifiers': ('Classifier', list),
+ 'source_url': ('Download-URL', None),
+ 'metadata_version': ('Metadata-Version', None),
+ }
+
+ del none_list, none_dict
+
+ def __getattribute__(self, key):
+ common = object.__getattribute__(self, 'common_keys')
+ mapped = object.__getattribute__(self, 'mapped_keys')
+ if key in mapped:
+ lk, maker = mapped[key]
+ if self._legacy:
+ if lk is None:
+ result = None if maker is None else maker()
+ else:
+ result = self._legacy.get(lk)
+ else:
+ value = None if maker is None else maker()
+ if key not in ('commands', 'exports', 'modules', 'namespaces',
+ 'classifiers'):
+ result = self._data.get(key, value)
+ else:
+ # special cases for PEP 459
+ sentinel = object()
+ result = sentinel
+ d = self._data.get('extensions')
+ if d:
+ if key == 'commands':
+ result = d.get('python.commands', value)
+ elif key == 'classifiers':
+ d = d.get('python.details')
+ if d:
+ result = d.get(key, value)
+ else:
+ d = d.get('python.exports')
+ if not d:
+ d = self._data.get('python.exports')
+ if d:
+ result = d.get(key, value)
+ if result is sentinel:
+ result = value
+ elif key not in common:
+ result = object.__getattribute__(self, key)
+ elif self._legacy:
+ result = self._legacy.get(key)
+ else:
+ result = self._data.get(key)
+ return result
+
+ def _validate_value(self, key, value, scheme=None):
+ if key in self.SYNTAX_VALIDATORS:
+ pattern, exclusions = self.SYNTAX_VALIDATORS[key]
+ if (scheme or self.scheme) not in exclusions:
+ m = pattern.match(value)
+ if not m:
+ raise MetadataInvalidError("'%s' is an invalid value for "
+ "the '%s' property" % (value,
+ key))
+
+ def __setattr__(self, key, value):
+ self._validate_value(key, value)
+ common = object.__getattribute__(self, 'common_keys')
+ mapped = object.__getattribute__(self, 'mapped_keys')
+ if key in mapped:
+ lk, _ = mapped[key]
+ if self._legacy:
+ if lk is None:
+ raise NotImplementedError
+ self._legacy[lk] = value
+ elif key not in ('commands', 'exports', 'modules', 'namespaces',
+ 'classifiers'):
+ self._data[key] = value
+ else:
+ # special cases for PEP 459
+ d = self._data.setdefault('extensions', {})
+ if key == 'commands':
+ d['python.commands'] = value
+ elif key == 'classifiers':
+ d = d.setdefault('python.details', {})
+ d[key] = value
+ else:
+ d = d.setdefault('python.exports', {})
+ d[key] = value
+ elif key not in common:
+ object.__setattr__(self, key, value)
+ else:
+ if key == 'keywords':
+ if isinstance(value, string_types):
+ value = value.strip()
+ if value:
+ value = value.split()
+ else:
+ value = []
+ if self._legacy:
+ self._legacy[key] = value
+ else:
+ self._data[key] = value
+
+ @property
+ def name_and_version(self):
+ return _get_name_and_version(self.name, self.version, True)
+
+ @property
+ def provides(self):
+ if self._legacy:
+ result = self._legacy['Provides-Dist']
+ else:
+ result = self._data.setdefault('provides', [])
+ s = '%s (%s)' % (self.name, self.version)
+ if s not in result:
+ result.append(s)
+ return result
+
+ @provides.setter
+ def provides(self, value):
+ if self._legacy:
+ self._legacy['Provides-Dist'] = value
+ else:
+ self._data['provides'] = value
+
+ def get_requirements(self, reqts, extras=None, env=None):
+ """
+ Base method to get dependencies, given a set of extras
+ to satisfy and an optional environment context.
+ :param reqts: A list of sometimes-wanted dependencies,
+ perhaps dependent on extras and environment.
+ :param extras: A list of optional components being requested.
+ :param env: An optional environment for marker evaluation.
+ """
+ if self._legacy:
+ result = reqts
+ else:
+ result = []
+ extras = get_extras(extras or [], self.extras)
+ for d in reqts:
+ if 'extra' not in d and 'environment' not in d:
+ # unconditional
+ include = True
+ else:
+ if 'extra' not in d:
+ # Not extra-dependent - only environment-dependent
+ include = True
+ else:
+ include = d.get('extra') in extras
+ if include:
+ # Not excluded because of extras, check environment
+ marker = d.get('environment')
+ if marker:
+ include = interpret(marker, env)
+ if include:
+ result.extend(d['requires'])
+ for key in ('build', 'dev', 'test'):
+ e = ':%s:' % key
+ if e in extras:
+ extras.remove(e)
+ # A recursive call, but it should terminate since 'test'
+ # has been removed from the extras
+ reqts = self._data.get('%s_requires' % key, [])
+ result.extend(self.get_requirements(reqts, extras=extras,
+ env=env))
+ return result
+
+ @property
+ def dictionary(self):
+ if self._legacy:
+ return self._from_legacy()
+ return self._data
+
+ @property
+ def dependencies(self):
+ if self._legacy:
+ raise NotImplementedError
+ else:
+ return extract_by_key(self._data, self.DEPENDENCY_KEYS)
+
+ @dependencies.setter
+ def dependencies(self, value):
+ if self._legacy:
+ raise NotImplementedError
+ else:
+ self._data.update(value)
+
+ def _validate_mapping(self, mapping, scheme):
+ if mapping.get('metadata_version') != self.METADATA_VERSION:
+ raise MetadataUnrecognizedVersionError()
+ missing = []
+ for key, exclusions in self.MANDATORY_KEYS.items():
+ if key not in mapping:
+ if scheme not in exclusions:
+ missing.append(key)
+ if missing:
+ msg = 'Missing metadata items: %s' % ', '.join(missing)
+ raise MetadataMissingError(msg)
+ for k, v in mapping.items():
+ self._validate_value(k, v, scheme)
+
+ def validate(self):
+ if self._legacy:
+ missing, warnings = self._legacy.check(True)
+ if missing or warnings:
+ logger.warning('Metadata: missing: %s, warnings: %s',
+ missing, warnings)
+ else:
+ self._validate_mapping(self._data, self.scheme)
+
+ def todict(self):
+ if self._legacy:
+ return self._legacy.todict(True)
+ else:
+ result = extract_by_key(self._data, self.INDEX_KEYS)
+ return result
+
+ def _from_legacy(self):
+ assert self._legacy and not self._data
+ result = {
+ 'metadata_version': self.METADATA_VERSION,
+ 'generator': self.GENERATOR,
+ }
+ lmd = self._legacy.todict(True) # skip missing ones
+ for k in ('name', 'version', 'license', 'summary', 'description',
+ 'classifier'):
+ if k in lmd:
+ if k == 'classifier':
+ nk = 'classifiers'
+ else:
+ nk = k
+ result[nk] = lmd[k]
+ kw = lmd.get('Keywords', [])
+ if kw == ['']:
+ kw = []
+ result['keywords'] = kw
+ keys = (('requires_dist', 'run_requires'),
+ ('setup_requires_dist', 'build_requires'))
+ for ok, nk in keys:
+ if ok in lmd and lmd[ok]:
+ result[nk] = [{'requires': lmd[ok]}]
+ result['provides'] = self.provides
+ author = {}
+ maintainer = {}
+ return result
+
+ LEGACY_MAPPING = {
+ 'name': 'Name',
+ 'version': 'Version',
+ ('extensions', 'python.details', 'license'): 'License',
+ 'summary': 'Summary',
+ 'description': 'Description',
+ ('extensions', 'python.project', 'project_urls', 'Home'): 'Home-page',
+ ('extensions', 'python.project', 'contacts', 0, 'name'): 'Author',
+ ('extensions', 'python.project', 'contacts', 0, 'email'): 'Author-email',
+ 'source_url': 'Download-URL',
+ ('extensions', 'python.details', 'classifiers'): 'Classifier',
+ }
+
+ def _to_legacy(self):
+ def process_entries(entries):
+ reqts = set()
+ for e in entries:
+ extra = e.get('extra')
+ env = e.get('environment')
+ rlist = e['requires']
+ for r in rlist:
+ if not env and not extra:
+ reqts.add(r)
+ else:
+ marker = ''
+ if extra:
+ marker = 'extra == "%s"' % extra
+ if env:
+ if marker:
+ marker = '(%s) and %s' % (env, marker)
+ else:
+ marker = env
+ reqts.add(';'.join((r, marker)))
+ return reqts
+
+ assert self._data and not self._legacy
+ result = LegacyMetadata()
+ nmd = self._data
+ # import pdb; pdb.set_trace()
+ for nk, ok in self.LEGACY_MAPPING.items():
+ if not isinstance(nk, tuple):
+ if nk in nmd:
+ result[ok] = nmd[nk]
+ else:
+ d = nmd
+ found = True
+ for k in nk:
+ try:
+ d = d[k]
+ except (KeyError, IndexError):
+ found = False
+ break
+ if found:
+ result[ok] = d
+ r1 = process_entries(self.run_requires + self.meta_requires)
+ r2 = process_entries(self.build_requires + self.dev_requires)
+ if self.extras:
+ result['Provides-Extra'] = sorted(self.extras)
+ result['Requires-Dist'] = sorted(r1)
+ result['Setup-Requires-Dist'] = sorted(r2)
+ # TODO: any other fields wanted
+ return result
+
+ def write(self, path=None, fileobj=None, legacy=False, skip_unknown=True):
+ if [path, fileobj].count(None) != 1:
+ raise ValueError('Exactly one of path and fileobj is needed')
+ self.validate()
+ if legacy:
+ if self._legacy:
+ legacy_md = self._legacy
+ else:
+ legacy_md = self._to_legacy()
+ if path:
+ legacy_md.write(path, skip_unknown=skip_unknown)
+ else:
+ legacy_md.write_file(fileobj, skip_unknown=skip_unknown)
+ else:
+ if self._legacy:
+ d = self._from_legacy()
+ else:
+ d = self._data
+ if fileobj:
+ json.dump(d, fileobj, ensure_ascii=True, indent=2,
+ sort_keys=True)
+ else:
+ with codecs.open(path, 'w', 'utf-8') as f:
+ json.dump(d, f, ensure_ascii=True, indent=2,
+ sort_keys=True)
+
+ def add_requirements(self, requirements):
+ if self._legacy:
+ self._legacy.add_requirements(requirements)
+ else:
+ run_requires = self._data.setdefault('run_requires', [])
+ always = None
+ for entry in run_requires:
+ if 'environment' not in entry and 'extra' not in entry:
+ always = entry
+ break
+ if always is None:
+ always = { 'requires': requirements }
+ run_requires.insert(0, always)
+ else:
+ rset = set(always['requires']) | set(requirements)
+ always['requires'] = sorted(rset)
+
+ def __repr__(self):
+ name = self.name or '(no name)'
+ version = self.version or 'no version'
+ return '<%s %s %s (%s)>' % (self.__class__.__name__,
+ self.metadata_version, name, version)
diff --git a/third_party/python/pip/pip/_vendor/distlib/resources.py b/third_party/python/pip/pip/_vendor/distlib/resources.py
new file mode 100644
index 0000000000..fef52aa103
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distlib/resources.py
@@ -0,0 +1,358 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2013-2017 Vinay Sajip.
+# Licensed to the Python Software Foundation under a contributor agreement.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+from __future__ import unicode_literals
+
+import bisect
+import io
+import logging
+import os
+import pkgutil
+import sys
+import types
+import zipimport
+
+from . import DistlibException
+from .util import cached_property, get_cache_base, Cache
+
+logger = logging.getLogger(__name__)
+
+
+cache = None # created when needed
+
+
+class ResourceCache(Cache):
+ def __init__(self, base=None):
+ if base is None:
+ # Use native string to avoid issues on 2.x: see Python #20140.
+ base = os.path.join(get_cache_base(), str('resource-cache'))
+ super(ResourceCache, self).__init__(base)
+
+ def is_stale(self, resource, path):
+ """
+ Is the cache stale for the given resource?
+
+ :param resource: The :class:`Resource` being cached.
+ :param path: The path of the resource in the cache.
+ :return: True if the cache is stale.
+ """
+ # Cache invalidation is a hard problem :-)
+ return True
+
+ def get(self, resource):
+ """
+ Get a resource into the cache,
+
+ :param resource: A :class:`Resource` instance.
+ :return: The pathname of the resource in the cache.
+ """
+ prefix, path = resource.finder.get_cache_info(resource)
+ if prefix is None:
+ result = path
+ else:
+ result = os.path.join(self.base, self.prefix_to_dir(prefix), path)
+ dirname = os.path.dirname(result)
+ if not os.path.isdir(dirname):
+ os.makedirs(dirname)
+ if not os.path.exists(result):
+ stale = True
+ else:
+ stale = self.is_stale(resource, path)
+ if stale:
+ # write the bytes of the resource to the cache location
+ with open(result, 'wb') as f:
+ f.write(resource.bytes)
+ return result
+
+
+class ResourceBase(object):
+ def __init__(self, finder, name):
+ self.finder = finder
+ self.name = name
+
+
+class Resource(ResourceBase):
+ """
+ A class representing an in-package resource, such as a data file. This is
+ not normally instantiated by user code, but rather by a
+ :class:`ResourceFinder` which manages the resource.
+ """
+ is_container = False # Backwards compatibility
+
+ def as_stream(self):
+ """
+ Get the resource as a stream.
+
+ This is not a property to make it obvious that it returns a new stream
+ each time.
+ """
+ return self.finder.get_stream(self)
+
+ @cached_property
+ def file_path(self):
+ global cache
+ if cache is None:
+ cache = ResourceCache()
+ return cache.get(self)
+
+ @cached_property
+ def bytes(self):
+ return self.finder.get_bytes(self)
+
+ @cached_property
+ def size(self):
+ return self.finder.get_size(self)
+
+
+class ResourceContainer(ResourceBase):
+ is_container = True # Backwards compatibility
+
+ @cached_property
+ def resources(self):
+ return self.finder.get_resources(self)
+
+
+class ResourceFinder(object):
+ """
+ Resource finder for file system resources.
+ """
+
+ if sys.platform.startswith('java'):
+ skipped_extensions = ('.pyc', '.pyo', '.class')
+ else:
+ skipped_extensions = ('.pyc', '.pyo')
+
+ def __init__(self, module):
+ self.module = module
+ self.loader = getattr(module, '__loader__', None)
+ self.base = os.path.dirname(getattr(module, '__file__', ''))
+
+ def _adjust_path(self, path):
+ return os.path.realpath(path)
+
+ def _make_path(self, resource_name):
+ # Issue #50: need to preserve type of path on Python 2.x
+ # like os.path._get_sep
+ if isinstance(resource_name, bytes): # should only happen on 2.x
+ sep = b'/'
+ else:
+ sep = '/'
+ parts = resource_name.split(sep)
+ parts.insert(0, self.base)
+ result = os.path.join(*parts)
+ return self._adjust_path(result)
+
+ def _find(self, path):
+ return os.path.exists(path)
+
+ def get_cache_info(self, resource):
+ return None, resource.path
+
+ def find(self, resource_name):
+ path = self._make_path(resource_name)
+ if not self._find(path):
+ result = None
+ else:
+ if self._is_directory(path):
+ result = ResourceContainer(self, resource_name)
+ else:
+ result = Resource(self, resource_name)
+ result.path = path
+ return result
+
+ def get_stream(self, resource):
+ return open(resource.path, 'rb')
+
+ def get_bytes(self, resource):
+ with open(resource.path, 'rb') as f:
+ return f.read()
+
+ def get_size(self, resource):
+ return os.path.getsize(resource.path)
+
+ def get_resources(self, resource):
+ def allowed(f):
+ return (f != '__pycache__' and not
+ f.endswith(self.skipped_extensions))
+ return set([f for f in os.listdir(resource.path) if allowed(f)])
+
+ def is_container(self, resource):
+ return self._is_directory(resource.path)
+
+ _is_directory = staticmethod(os.path.isdir)
+
+ def iterator(self, resource_name):
+ resource = self.find(resource_name)
+ if resource is not None:
+ todo = [resource]
+ while todo:
+ resource = todo.pop(0)
+ yield resource
+ if resource.is_container:
+ rname = resource.name
+ for name in resource.resources:
+ if not rname:
+ new_name = name
+ else:
+ new_name = '/'.join([rname, name])
+ child = self.find(new_name)
+ if child.is_container:
+ todo.append(child)
+ else:
+ yield child
+
+
+class ZipResourceFinder(ResourceFinder):
+ """
+ Resource finder for resources in .zip files.
+ """
+ def __init__(self, module):
+ super(ZipResourceFinder, self).__init__(module)
+ archive = self.loader.archive
+ self.prefix_len = 1 + len(archive)
+ # PyPy doesn't have a _files attr on zipimporter, and you can't set one
+ if hasattr(self.loader, '_files'):
+ self._files = self.loader._files
+ else:
+ self._files = zipimport._zip_directory_cache[archive]
+ self.index = sorted(self._files)
+
+ def _adjust_path(self, path):
+ return path
+
+ def _find(self, path):
+ path = path[self.prefix_len:]
+ if path in self._files:
+ result = True
+ else:
+ if path and path[-1] != os.sep:
+ path = path + os.sep
+ i = bisect.bisect(self.index, path)
+ try:
+ result = self.index[i].startswith(path)
+ except IndexError:
+ result = False
+ if not result:
+ logger.debug('_find failed: %r %r', path, self.loader.prefix)
+ else:
+ logger.debug('_find worked: %r %r', path, self.loader.prefix)
+ return result
+
+ def get_cache_info(self, resource):
+ prefix = self.loader.archive
+ path = resource.path[1 + len(prefix):]
+ return prefix, path
+
+ def get_bytes(self, resource):
+ return self.loader.get_data(resource.path)
+
+ def get_stream(self, resource):
+ return io.BytesIO(self.get_bytes(resource))
+
+ def get_size(self, resource):
+ path = resource.path[self.prefix_len:]
+ return self._files[path][3]
+
+ def get_resources(self, resource):
+ path = resource.path[self.prefix_len:]
+ if path and path[-1] != os.sep:
+ path += os.sep
+ plen = len(path)
+ result = set()
+ i = bisect.bisect(self.index, path)
+ while i < len(self.index):
+ if not self.index[i].startswith(path):
+ break
+ s = self.index[i][plen:]
+ result.add(s.split(os.sep, 1)[0]) # only immediate children
+ i += 1
+ return result
+
+ def _is_directory(self, path):
+ path = path[self.prefix_len:]
+ if path and path[-1] != os.sep:
+ path += os.sep
+ i = bisect.bisect(self.index, path)
+ try:
+ result = self.index[i].startswith(path)
+ except IndexError:
+ result = False
+ return result
+
+
+_finder_registry = {
+ type(None): ResourceFinder,
+ zipimport.zipimporter: ZipResourceFinder
+}
+
+try:
+ # In Python 3.6, _frozen_importlib -> _frozen_importlib_external
+ try:
+ import _frozen_importlib_external as _fi
+ except ImportError:
+ import _frozen_importlib as _fi
+ _finder_registry[_fi.SourceFileLoader] = ResourceFinder
+ _finder_registry[_fi.FileFinder] = ResourceFinder
+ # See issue #146
+ _finder_registry[_fi.SourcelessFileLoader] = ResourceFinder
+ del _fi
+except (ImportError, AttributeError):
+ pass
+
+
+def register_finder(loader, finder_maker):
+ _finder_registry[type(loader)] = finder_maker
+
+
+_finder_cache = {}
+
+
+def finder(package):
+ """
+ Return a resource finder for a package.
+ :param package: The name of the package.
+ :return: A :class:`ResourceFinder` instance for the package.
+ """
+ if package in _finder_cache:
+ result = _finder_cache[package]
+ else:
+ if package not in sys.modules:
+ __import__(package)
+ module = sys.modules[package]
+ path = getattr(module, '__path__', None)
+ if path is None:
+ raise DistlibException('You cannot get a finder for a module, '
+ 'only for a package')
+ loader = getattr(module, '__loader__', None)
+ finder_maker = _finder_registry.get(type(loader))
+ if finder_maker is None:
+ raise DistlibException('Unable to locate finder for %r' % package)
+ result = finder_maker(module)
+ _finder_cache[package] = result
+ return result
+
+
+_dummy_module = types.ModuleType(str('__dummy__'))
+
+
+def finder_for_path(path):
+ """
+ Return a resource finder for a path, which should represent a container.
+
+ :param path: The path.
+ :return: A :class:`ResourceFinder` instance for the path.
+ """
+ result = None
+ # calls any path hooks, gets importer into cache
+ pkgutil.get_importer(path)
+ loader = sys.path_importer_cache.get(path)
+ finder = _finder_registry.get(type(loader))
+ if finder:
+ module = _dummy_module
+ module.__file__ = os.path.join(path, '')
+ module.__loader__ = loader
+ result = finder(module)
+ return result
diff --git a/third_party/python/pip/pip/_vendor/distlib/scripts.py b/third_party/python/pip/pip/_vendor/distlib/scripts.py
new file mode 100644
index 0000000000..d2706242b8
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distlib/scripts.py
@@ -0,0 +1,437 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2013-2015 Vinay Sajip.
+# Licensed to the Python Software Foundation under a contributor agreement.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+from io import BytesIO
+import logging
+import os
+import re
+import struct
+import sys
+import time
+from zipfile import ZipInfo
+
+from .compat import sysconfig, detect_encoding, ZipFile
+from .resources import finder
+from .util import (FileOperator, get_export_entry, convert_path,
+ get_executable, get_platform, in_venv)
+
+logger = logging.getLogger(__name__)
+
+_DEFAULT_MANIFEST = '''
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
+ <assemblyIdentity version="1.0.0.0"
+ processorArchitecture="X86"
+ name="%s"
+ type="win32"/>
+
+ <!-- Identify the application security requirements. -->
+ <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
+ <security>
+ <requestedPrivileges>
+ <requestedExecutionLevel level="asInvoker" uiAccess="false"/>
+ </requestedPrivileges>
+ </security>
+ </trustInfo>
+</assembly>'''.strip()
+
+# check if Python is called on the first line with this expression
+FIRST_LINE_RE = re.compile(b'^#!.*pythonw?[0-9.]*([ \t].*)?$')
+SCRIPT_TEMPLATE = r'''# -*- coding: utf-8 -*-
+import re
+import sys
+from %(module)s import %(import_name)s
+if __name__ == '__main__':
+ sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
+ sys.exit(%(func)s())
+'''
+
+
+def enquote_executable(executable):
+ if ' ' in executable:
+ # make sure we quote only the executable in case of env
+ # for example /usr/bin/env "/dir with spaces/bin/jython"
+ # instead of "/usr/bin/env /dir with spaces/bin/jython"
+ # otherwise whole
+ if executable.startswith('/usr/bin/env '):
+ env, _executable = executable.split(' ', 1)
+ if ' ' in _executable and not _executable.startswith('"'):
+ executable = '%s "%s"' % (env, _executable)
+ else:
+ if not executable.startswith('"'):
+ executable = '"%s"' % executable
+ return executable
+
+# Keep the old name around (for now), as there is at least one project using it!
+_enquote_executable = enquote_executable
+
+class ScriptMaker(object):
+ """
+ A class to copy or create scripts from source scripts or callable
+ specifications.
+ """
+ script_template = SCRIPT_TEMPLATE
+
+ executable = None # for shebangs
+
+ def __init__(self, source_dir, target_dir, add_launchers=True,
+ dry_run=False, fileop=None):
+ self.source_dir = source_dir
+ self.target_dir = target_dir
+ self.add_launchers = add_launchers
+ self.force = False
+ self.clobber = False
+ # It only makes sense to set mode bits on POSIX.
+ self.set_mode = (os.name == 'posix') or (os.name == 'java' and
+ os._name == 'posix')
+ self.variants = set(('', 'X.Y'))
+ self._fileop = fileop or FileOperator(dry_run)
+
+ self._is_nt = os.name == 'nt' or (
+ os.name == 'java' and os._name == 'nt')
+ self.version_info = sys.version_info
+
+ def _get_alternate_executable(self, executable, options):
+ if options.get('gui', False) and self._is_nt: # pragma: no cover
+ dn, fn = os.path.split(executable)
+ fn = fn.replace('python', 'pythonw')
+ executable = os.path.join(dn, fn)
+ return executable
+
+ if sys.platform.startswith('java'): # pragma: no cover
+ def _is_shell(self, executable):
+ """
+ Determine if the specified executable is a script
+ (contains a #! line)
+ """
+ try:
+ with open(executable) as fp:
+ return fp.read(2) == '#!'
+ except (OSError, IOError):
+ logger.warning('Failed to open %s', executable)
+ return False
+
+ def _fix_jython_executable(self, executable):
+ if self._is_shell(executable):
+ # Workaround for Jython is not needed on Linux systems.
+ import java
+
+ if java.lang.System.getProperty('os.name') == 'Linux':
+ return executable
+ elif executable.lower().endswith('jython.exe'):
+ # Use wrapper exe for Jython on Windows
+ return executable
+ return '/usr/bin/env %s' % executable
+
+ def _build_shebang(self, executable, post_interp):
+ """
+ Build a shebang line. In the simple case (on Windows, or a shebang line
+ which is not too long or contains spaces) use a simple formulation for
+ the shebang. Otherwise, use /bin/sh as the executable, with a contrived
+ shebang which allows the script to run either under Python or sh, using
+ suitable quoting. Thanks to Harald Nordgren for his input.
+
+ See also: http://www.in-ulm.de/~mascheck/various/shebang/#length
+ https://hg.mozilla.org/mozilla-central/file/tip/mach
+ """
+ if os.name != 'posix':
+ simple_shebang = True
+ else:
+ # Add 3 for '#!' prefix and newline suffix.
+ shebang_length = len(executable) + len(post_interp) + 3
+ if sys.platform == 'darwin':
+ max_shebang_length = 512
+ else:
+ max_shebang_length = 127
+ simple_shebang = ((b' ' not in executable) and
+ (shebang_length <= max_shebang_length))
+
+ if simple_shebang:
+ result = b'#!' + executable + post_interp + b'\n'
+ else:
+ result = b'#!/bin/sh\n'
+ result += b"'''exec' " + executable + post_interp + b' "$0" "$@"\n'
+ result += b"' '''"
+ return result
+
+ def _get_shebang(self, encoding, post_interp=b'', options=None):
+ enquote = True
+ if self.executable:
+ executable = self.executable
+ enquote = False # assume this will be taken care of
+ elif not sysconfig.is_python_build():
+ executable = get_executable()
+ elif in_venv(): # pragma: no cover
+ executable = os.path.join(sysconfig.get_path('scripts'),
+ 'python%s' % sysconfig.get_config_var('EXE'))
+ else: # pragma: no cover
+ executable = os.path.join(
+ sysconfig.get_config_var('BINDIR'),
+ 'python%s%s' % (sysconfig.get_config_var('VERSION'),
+ sysconfig.get_config_var('EXE')))
+ if not os.path.isfile(executable):
+ # for Python builds from source on Windows, no Python executables with
+ # a version suffix are created, so we use python.exe
+ executable = os.path.join(sysconfig.get_config_var('BINDIR'),
+ 'python%s' % (sysconfig.get_config_var('EXE')))
+ if options:
+ executable = self._get_alternate_executable(executable, options)
+
+ if sys.platform.startswith('java'): # pragma: no cover
+ executable = self._fix_jython_executable(executable)
+
+ # Normalise case for Windows - COMMENTED OUT
+ # executable = os.path.normcase(executable)
+ # N.B. The normalising operation above has been commented out: See
+ # issue #124. Although paths in Windows are generally case-insensitive,
+ # they aren't always. For example, a path containing a ẞ (which is a
+ # LATIN CAPITAL LETTER SHARP S - U+1E9E) is normcased to ß (which is a
+ # LATIN SMALL LETTER SHARP S' - U+00DF). The two are not considered by
+ # Windows as equivalent in path names.
+
+ # If the user didn't specify an executable, it may be necessary to
+ # cater for executable paths with spaces (not uncommon on Windows)
+ if enquote:
+ executable = enquote_executable(executable)
+ # Issue #51: don't use fsencode, since we later try to
+ # check that the shebang is decodable using utf-8.
+ executable = executable.encode('utf-8')
+ # in case of IronPython, play safe and enable frames support
+ if (sys.platform == 'cli' and '-X:Frames' not in post_interp
+ and '-X:FullFrames' not in post_interp): # pragma: no cover
+ post_interp += b' -X:Frames'
+ shebang = self._build_shebang(executable, post_interp)
+ # Python parser starts to read a script using UTF-8 until
+ # it gets a #coding:xxx cookie. The shebang has to be the
+ # first line of a file, the #coding:xxx cookie cannot be
+ # written before. So the shebang has to be decodable from
+ # UTF-8.
+ try:
+ shebang.decode('utf-8')
+ except UnicodeDecodeError: # pragma: no cover
+ raise ValueError(
+ 'The shebang (%r) is not decodable from utf-8' % shebang)
+ # If the script is encoded to a custom encoding (use a
+ # #coding:xxx cookie), the shebang has to be decodable from
+ # the script encoding too.
+ if encoding != 'utf-8':
+ try:
+ shebang.decode(encoding)
+ except UnicodeDecodeError: # pragma: no cover
+ raise ValueError(
+ 'The shebang (%r) is not decodable '
+ 'from the script encoding (%r)' % (shebang, encoding))
+ return shebang
+
+ def _get_script_text(self, entry):
+ return self.script_template % dict(module=entry.prefix,
+ import_name=entry.suffix.split('.')[0],
+ func=entry.suffix)
+
+ manifest = _DEFAULT_MANIFEST
+
+ def get_manifest(self, exename):
+ base = os.path.basename(exename)
+ return self.manifest % base
+
+ def _write_script(self, names, shebang, script_bytes, filenames, ext):
+ use_launcher = self.add_launchers and self._is_nt
+ linesep = os.linesep.encode('utf-8')
+ if not shebang.endswith(linesep):
+ shebang += linesep
+ if not use_launcher:
+ script_bytes = shebang + script_bytes
+ else: # pragma: no cover
+ if ext == 'py':
+ launcher = self._get_launcher('t')
+ else:
+ launcher = self._get_launcher('w')
+ stream = BytesIO()
+ with ZipFile(stream, 'w') as zf:
+ source_date_epoch = os.environ.get('SOURCE_DATE_EPOCH')
+ if source_date_epoch:
+ date_time = time.gmtime(int(source_date_epoch))[:6]
+ zinfo = ZipInfo(filename='__main__.py', date_time=date_time)
+ zf.writestr(zinfo, script_bytes)
+ else:
+ zf.writestr('__main__.py', script_bytes)
+ zip_data = stream.getvalue()
+ script_bytes = launcher + shebang + zip_data
+ for name in names:
+ outname = os.path.join(self.target_dir, name)
+ if use_launcher: # pragma: no cover
+ n, e = os.path.splitext(outname)
+ if e.startswith('.py'):
+ outname = n
+ outname = '%s.exe' % outname
+ try:
+ self._fileop.write_binary_file(outname, script_bytes)
+ except Exception:
+ # Failed writing an executable - it might be in use.
+ logger.warning('Failed to write executable - trying to '
+ 'use .deleteme logic')
+ dfname = '%s.deleteme' % outname
+ if os.path.exists(dfname):
+ os.remove(dfname) # Not allowed to fail here
+ os.rename(outname, dfname) # nor here
+ self._fileop.write_binary_file(outname, script_bytes)
+ logger.debug('Able to replace executable using '
+ '.deleteme logic')
+ try:
+ os.remove(dfname)
+ except Exception:
+ pass # still in use - ignore error
+ else:
+ if self._is_nt and not outname.endswith('.' + ext): # pragma: no cover
+ outname = '%s.%s' % (outname, ext)
+ if os.path.exists(outname) and not self.clobber:
+ logger.warning('Skipping existing file %s', outname)
+ continue
+ self._fileop.write_binary_file(outname, script_bytes)
+ if self.set_mode:
+ self._fileop.set_executable_mode([outname])
+ filenames.append(outname)
+
+ variant_separator = '-'
+
+ def get_script_filenames(self, name):
+ result = set()
+ if '' in self.variants:
+ result.add(name)
+ if 'X' in self.variants:
+ result.add('%s%s' % (name, self.version_info[0]))
+ if 'X.Y' in self.variants:
+ result.add('%s%s%s.%s' % (name, self.variant_separator,
+ self.version_info[0], self.version_info[1]))
+ return result
+
+ def _make_script(self, entry, filenames, options=None):
+ post_interp = b''
+ if options:
+ args = options.get('interpreter_args', [])
+ if args:
+ args = ' %s' % ' '.join(args)
+ post_interp = args.encode('utf-8')
+ shebang = self._get_shebang('utf-8', post_interp, options=options)
+ script = self._get_script_text(entry).encode('utf-8')
+ scriptnames = self.get_script_filenames(entry.name)
+ if options and options.get('gui', False):
+ ext = 'pyw'
+ else:
+ ext = 'py'
+ self._write_script(scriptnames, shebang, script, filenames, ext)
+
+ def _copy_script(self, script, filenames):
+ adjust = False
+ script = os.path.join(self.source_dir, convert_path(script))
+ outname = os.path.join(self.target_dir, os.path.basename(script))
+ if not self.force and not self._fileop.newer(script, outname):
+ logger.debug('not copying %s (up-to-date)', script)
+ return
+
+ # Always open the file, but ignore failures in dry-run mode --
+ # that way, we'll get accurate feedback if we can read the
+ # script.
+ try:
+ f = open(script, 'rb')
+ except IOError: # pragma: no cover
+ if not self.dry_run:
+ raise
+ f = None
+ else:
+ first_line = f.readline()
+ if not first_line: # pragma: no cover
+ logger.warning('%s is an empty file (skipping)', script)
+ return
+
+ match = FIRST_LINE_RE.match(first_line.replace(b'\r\n', b'\n'))
+ if match:
+ adjust = True
+ post_interp = match.group(1) or b''
+
+ if not adjust:
+ if f:
+ f.close()
+ self._fileop.copy_file(script, outname)
+ if self.set_mode:
+ self._fileop.set_executable_mode([outname])
+ filenames.append(outname)
+ else:
+ logger.info('copying and adjusting %s -> %s', script,
+ self.target_dir)
+ if not self._fileop.dry_run:
+ encoding, lines = detect_encoding(f.readline)
+ f.seek(0)
+ shebang = self._get_shebang(encoding, post_interp)
+ if b'pythonw' in first_line: # pragma: no cover
+ ext = 'pyw'
+ else:
+ ext = 'py'
+ n = os.path.basename(outname)
+ self._write_script([n], shebang, f.read(), filenames, ext)
+ if f:
+ f.close()
+
+ @property
+ def dry_run(self):
+ return self._fileop.dry_run
+
+ @dry_run.setter
+ def dry_run(self, value):
+ self._fileop.dry_run = value
+
+ if os.name == 'nt' or (os.name == 'java' and os._name == 'nt'): # pragma: no cover
+ # Executable launcher support.
+ # Launchers are from https://bitbucket.org/vinay.sajip/simple_launcher/
+
+ def _get_launcher(self, kind):
+ if struct.calcsize('P') == 8: # 64-bit
+ bits = '64'
+ else:
+ bits = '32'
+ platform_suffix = '-arm' if get_platform() == 'win-arm64' else ''
+ name = '%s%s%s.exe' % (kind, bits, platform_suffix)
+ # Issue 31: don't hardcode an absolute package name, but
+ # determine it relative to the current package
+ distlib_package = __name__.rsplit('.', 1)[0]
+ resource = finder(distlib_package).find(name)
+ if not resource:
+ msg = ('Unable to find resource %s in package %s' % (name,
+ distlib_package))
+ raise ValueError(msg)
+ return resource.bytes
+
+ # Public API follows
+
+ def make(self, specification, options=None):
+ """
+ Make a script.
+
+ :param specification: The specification, which is either a valid export
+ entry specification (to make a script from a
+ callable) or a filename (to make a script by
+ copying from a source location).
+ :param options: A dictionary of options controlling script generation.
+ :return: A list of all absolute pathnames written to.
+ """
+ filenames = []
+ entry = get_export_entry(specification)
+ if entry is None:
+ self._copy_script(specification, filenames)
+ else:
+ self._make_script(entry, filenames, options=options)
+ return filenames
+
+ def make_multiple(self, specifications, options=None):
+ """
+ Take a list of specifications and make scripts from them,
+ :param specifications: A list of specifications.
+ :return: A list of all absolute pathnames written to,
+ """
+ filenames = []
+ for specification in specifications:
+ filenames.extend(self.make(specification, options))
+ return filenames
diff --git a/third_party/python/pip/pip/_vendor/distlib/t32.exe b/third_party/python/pip/pip/_vendor/distlib/t32.exe
new file mode 100644
index 0000000000..52154f0be3
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distlib/t32.exe
Binary files differ
diff --git a/third_party/python/pip/pip/_vendor/distlib/t64-arm.exe b/third_party/python/pip/pip/_vendor/distlib/t64-arm.exe
new file mode 100644
index 0000000000..e1ab8f8f58
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distlib/t64-arm.exe
Binary files differ
diff --git a/third_party/python/pip/pip/_vendor/distlib/t64.exe b/third_party/python/pip/pip/_vendor/distlib/t64.exe
new file mode 100644
index 0000000000..e8bebdba6d
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distlib/t64.exe
Binary files differ
diff --git a/third_party/python/pip/pip/_vendor/distlib/util.py b/third_party/python/pip/pip/_vendor/distlib/util.py
new file mode 100644
index 0000000000..dd01849d99
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distlib/util.py
@@ -0,0 +1,1932 @@
+#
+# Copyright (C) 2012-2021 The Python Software Foundation.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+import codecs
+from collections import deque
+import contextlib
+import csv
+from glob import iglob as std_iglob
+import io
+import json
+import logging
+import os
+import py_compile
+import re
+import socket
+try:
+ import ssl
+except ImportError: # pragma: no cover
+ ssl = None
+import subprocess
+import sys
+import tarfile
+import tempfile
+import textwrap
+
+try:
+ import threading
+except ImportError: # pragma: no cover
+ import dummy_threading as threading
+import time
+
+from . import DistlibException
+from .compat import (string_types, text_type, shutil, raw_input, StringIO,
+ cache_from_source, urlopen, urljoin, httplib, xmlrpclib,
+ splittype, HTTPHandler, BaseConfigurator, valid_ident,
+ Container, configparser, URLError, ZipFile, fsdecode,
+ unquote, urlparse)
+
+logger = logging.getLogger(__name__)
+
+#
+# Requirement parsing code as per PEP 508
+#
+
+IDENTIFIER = re.compile(r'^([\w\.-]+)\s*')
+VERSION_IDENTIFIER = re.compile(r'^([\w\.*+-]+)\s*')
+COMPARE_OP = re.compile(r'^(<=?|>=?|={2,3}|[~!]=)\s*')
+MARKER_OP = re.compile(r'^((<=?)|(>=?)|={2,3}|[~!]=|in|not\s+in)\s*')
+OR = re.compile(r'^or\b\s*')
+AND = re.compile(r'^and\b\s*')
+NON_SPACE = re.compile(r'(\S+)\s*')
+STRING_CHUNK = re.compile(r'([\s\w\.{}()*+#:;,/?!~`@$%^&=|<>\[\]-]+)')
+
+
+def parse_marker(marker_string):
+ """
+ Parse a marker string and return a dictionary containing a marker expression.
+
+ The dictionary will contain keys "op", "lhs" and "rhs" for non-terminals in
+ the expression grammar, or strings. A string contained in quotes is to be
+ interpreted as a literal string, and a string not contained in quotes is a
+ variable (such as os_name).
+ """
+ def marker_var(remaining):
+ # either identifier, or literal string
+ m = IDENTIFIER.match(remaining)
+ if m:
+ result = m.groups()[0]
+ remaining = remaining[m.end():]
+ elif not remaining:
+ raise SyntaxError('unexpected end of input')
+ else:
+ q = remaining[0]
+ if q not in '\'"':
+ raise SyntaxError('invalid expression: %s' % remaining)
+ oq = '\'"'.replace(q, '')
+ remaining = remaining[1:]
+ parts = [q]
+ while remaining:
+ # either a string chunk, or oq, or q to terminate
+ if remaining[0] == q:
+ break
+ elif remaining[0] == oq:
+ parts.append(oq)
+ remaining = remaining[1:]
+ else:
+ m = STRING_CHUNK.match(remaining)
+ if not m:
+ raise SyntaxError('error in string literal: %s' % remaining)
+ parts.append(m.groups()[0])
+ remaining = remaining[m.end():]
+ else:
+ s = ''.join(parts)
+ raise SyntaxError('unterminated string: %s' % s)
+ parts.append(q)
+ result = ''.join(parts)
+ remaining = remaining[1:].lstrip() # skip past closing quote
+ return result, remaining
+
+ def marker_expr(remaining):
+ if remaining and remaining[0] == '(':
+ result, remaining = marker(remaining[1:].lstrip())
+ if remaining[0] != ')':
+ raise SyntaxError('unterminated parenthesis: %s' % remaining)
+ remaining = remaining[1:].lstrip()
+ else:
+ lhs, remaining = marker_var(remaining)
+ while remaining:
+ m = MARKER_OP.match(remaining)
+ if not m:
+ break
+ op = m.groups()[0]
+ remaining = remaining[m.end():]
+ rhs, remaining = marker_var(remaining)
+ lhs = {'op': op, 'lhs': lhs, 'rhs': rhs}
+ result = lhs
+ return result, remaining
+
+ def marker_and(remaining):
+ lhs, remaining = marker_expr(remaining)
+ while remaining:
+ m = AND.match(remaining)
+ if not m:
+ break
+ remaining = remaining[m.end():]
+ rhs, remaining = marker_expr(remaining)
+ lhs = {'op': 'and', 'lhs': lhs, 'rhs': rhs}
+ return lhs, remaining
+
+ def marker(remaining):
+ lhs, remaining = marker_and(remaining)
+ while remaining:
+ m = OR.match(remaining)
+ if not m:
+ break
+ remaining = remaining[m.end():]
+ rhs, remaining = marker_and(remaining)
+ lhs = {'op': 'or', 'lhs': lhs, 'rhs': rhs}
+ return lhs, remaining
+
+ return marker(marker_string)
+
+
+def parse_requirement(req):
+ """
+ Parse a requirement passed in as a string. Return a Container
+ whose attributes contain the various parts of the requirement.
+ """
+ remaining = req.strip()
+ if not remaining or remaining.startswith('#'):
+ return None
+ m = IDENTIFIER.match(remaining)
+ if not m:
+ raise SyntaxError('name expected: %s' % remaining)
+ distname = m.groups()[0]
+ remaining = remaining[m.end():]
+ extras = mark_expr = versions = uri = None
+ if remaining and remaining[0] == '[':
+ i = remaining.find(']', 1)
+ if i < 0:
+ raise SyntaxError('unterminated extra: %s' % remaining)
+ s = remaining[1:i]
+ remaining = remaining[i + 1:].lstrip()
+ extras = []
+ while s:
+ m = IDENTIFIER.match(s)
+ if not m:
+ raise SyntaxError('malformed extra: %s' % s)
+ extras.append(m.groups()[0])
+ s = s[m.end():]
+ if not s:
+ break
+ if s[0] != ',':
+ raise SyntaxError('comma expected in extras: %s' % s)
+ s = s[1:].lstrip()
+ if not extras:
+ extras = None
+ if remaining:
+ if remaining[0] == '@':
+ # it's a URI
+ remaining = remaining[1:].lstrip()
+ m = NON_SPACE.match(remaining)
+ if not m:
+ raise SyntaxError('invalid URI: %s' % remaining)
+ uri = m.groups()[0]
+ t = urlparse(uri)
+ # there are issues with Python and URL parsing, so this test
+ # is a bit crude. See bpo-20271, bpo-23505. Python doesn't
+ # always parse invalid URLs correctly - it should raise
+ # exceptions for malformed URLs
+ if not (t.scheme and t.netloc):
+ raise SyntaxError('Invalid URL: %s' % uri)
+ remaining = remaining[m.end():].lstrip()
+ else:
+
+ def get_versions(ver_remaining):
+ """
+ Return a list of operator, version tuples if any are
+ specified, else None.
+ """
+ m = COMPARE_OP.match(ver_remaining)
+ versions = None
+ if m:
+ versions = []
+ while True:
+ op = m.groups()[0]
+ ver_remaining = ver_remaining[m.end():]
+ m = VERSION_IDENTIFIER.match(ver_remaining)
+ if not m:
+ raise SyntaxError('invalid version: %s' % ver_remaining)
+ v = m.groups()[0]
+ versions.append((op, v))
+ ver_remaining = ver_remaining[m.end():]
+ if not ver_remaining or ver_remaining[0] != ',':
+ break
+ ver_remaining = ver_remaining[1:].lstrip()
+ # Some packages have a trailing comma which would break things
+ # See issue #148
+ if not ver_remaining:
+ break
+ m = COMPARE_OP.match(ver_remaining)
+ if not m:
+ raise SyntaxError('invalid constraint: %s' % ver_remaining)
+ if not versions:
+ versions = None
+ return versions, ver_remaining
+
+ if remaining[0] != '(':
+ versions, remaining = get_versions(remaining)
+ else:
+ i = remaining.find(')', 1)
+ if i < 0:
+ raise SyntaxError('unterminated parenthesis: %s' % remaining)
+ s = remaining[1:i]
+ remaining = remaining[i + 1:].lstrip()
+ # As a special diversion from PEP 508, allow a version number
+ # a.b.c in parentheses as a synonym for ~= a.b.c (because this
+ # is allowed in earlier PEPs)
+ if COMPARE_OP.match(s):
+ versions, _ = get_versions(s)
+ else:
+ m = VERSION_IDENTIFIER.match(s)
+ if not m:
+ raise SyntaxError('invalid constraint: %s' % s)
+ v = m.groups()[0]
+ s = s[m.end():].lstrip()
+ if s:
+ raise SyntaxError('invalid constraint: %s' % s)
+ versions = [('~=', v)]
+
+ if remaining:
+ if remaining[0] != ';':
+ raise SyntaxError('invalid requirement: %s' % remaining)
+ remaining = remaining[1:].lstrip()
+
+ mark_expr, remaining = parse_marker(remaining)
+
+ if remaining and remaining[0] != '#':
+ raise SyntaxError('unexpected trailing data: %s' % remaining)
+
+ if not versions:
+ rs = distname
+ else:
+ rs = '%s %s' % (distname, ', '.join(['%s %s' % con for con in versions]))
+ return Container(name=distname, extras=extras, constraints=versions,
+ marker=mark_expr, url=uri, requirement=rs)
+
+
+def get_resources_dests(resources_root, rules):
+ """Find destinations for resources files"""
+
+ def get_rel_path(root, path):
+ # normalizes and returns a lstripped-/-separated path
+ root = root.replace(os.path.sep, '/')
+ path = path.replace(os.path.sep, '/')
+ assert path.startswith(root)
+ return path[len(root):].lstrip('/')
+
+ destinations = {}
+ for base, suffix, dest in rules:
+ prefix = os.path.join(resources_root, base)
+ for abs_base in iglob(prefix):
+ abs_glob = os.path.join(abs_base, suffix)
+ for abs_path in iglob(abs_glob):
+ resource_file = get_rel_path(resources_root, abs_path)
+ if dest is None: # remove the entry if it was here
+ destinations.pop(resource_file, None)
+ else:
+ rel_path = get_rel_path(abs_base, abs_path)
+ rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
+ destinations[resource_file] = rel_dest + '/' + rel_path
+ return destinations
+
+
+def in_venv():
+ if hasattr(sys, 'real_prefix'):
+ # virtualenv venvs
+ result = True
+ else:
+ # PEP 405 venvs
+ result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
+ return result
+
+
+def get_executable():
+# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
+# changes to the stub launcher mean that sys.executable always points
+# to the stub on OS X
+# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
+# in os.environ):
+# result = os.environ['__PYVENV_LAUNCHER__']
+# else:
+# result = sys.executable
+# return result
+ # Avoid normcasing: see issue #143
+ # result = os.path.normcase(sys.executable)
+ result = sys.executable
+ if not isinstance(result, text_type):
+ result = fsdecode(result)
+ return result
+
+
+def proceed(prompt, allowed_chars, error_prompt=None, default=None):
+ p = prompt
+ while True:
+ s = raw_input(p)
+ p = prompt
+ if not s and default:
+ s = default
+ if s:
+ c = s[0].lower()
+ if c in allowed_chars:
+ break
+ if error_prompt:
+ p = '%c: %s\n%s' % (c, error_prompt, prompt)
+ return c
+
+
+def extract_by_key(d, keys):
+ if isinstance(keys, string_types):
+ keys = keys.split()
+ result = {}
+ for key in keys:
+ if key in d:
+ result[key] = d[key]
+ return result
+
+def read_exports(stream):
+ if sys.version_info[0] >= 3:
+ # needs to be a text stream
+ stream = codecs.getreader('utf-8')(stream)
+ # Try to load as JSON, falling back on legacy format
+ data = stream.read()
+ stream = StringIO(data)
+ try:
+ jdata = json.load(stream)
+ result = jdata['extensions']['python.exports']['exports']
+ for group, entries in result.items():
+ for k, v in entries.items():
+ s = '%s = %s' % (k, v)
+ entry = get_export_entry(s)
+ assert entry is not None
+ entries[k] = entry
+ return result
+ except Exception:
+ stream.seek(0, 0)
+
+ def read_stream(cp, stream):
+ if hasattr(cp, 'read_file'):
+ cp.read_file(stream)
+ else:
+ cp.readfp(stream)
+
+ cp = configparser.ConfigParser()
+ try:
+ read_stream(cp, stream)
+ except configparser.MissingSectionHeaderError:
+ stream.close()
+ data = textwrap.dedent(data)
+ stream = StringIO(data)
+ read_stream(cp, stream)
+
+ result = {}
+ for key in cp.sections():
+ result[key] = entries = {}
+ for name, value in cp.items(key):
+ s = '%s = %s' % (name, value)
+ entry = get_export_entry(s)
+ assert entry is not None
+ #entry.dist = self
+ entries[name] = entry
+ return result
+
+
+def write_exports(exports, stream):
+ if sys.version_info[0] >= 3:
+ # needs to be a text stream
+ stream = codecs.getwriter('utf-8')(stream)
+ cp = configparser.ConfigParser()
+ for k, v in exports.items():
+ # TODO check k, v for valid values
+ cp.add_section(k)
+ for entry in v.values():
+ if entry.suffix is None:
+ s = entry.prefix
+ else:
+ s = '%s:%s' % (entry.prefix, entry.suffix)
+ if entry.flags:
+ s = '%s [%s]' % (s, ', '.join(entry.flags))
+ cp.set(k, entry.name, s)
+ cp.write(stream)
+
+
+@contextlib.contextmanager
+def tempdir():
+ td = tempfile.mkdtemp()
+ try:
+ yield td
+ finally:
+ shutil.rmtree(td)
+
+@contextlib.contextmanager
+def chdir(d):
+ cwd = os.getcwd()
+ try:
+ os.chdir(d)
+ yield
+ finally:
+ os.chdir(cwd)
+
+
+@contextlib.contextmanager
+def socket_timeout(seconds=15):
+ cto = socket.getdefaulttimeout()
+ try:
+ socket.setdefaulttimeout(seconds)
+ yield
+ finally:
+ socket.setdefaulttimeout(cto)
+
+
+class cached_property(object):
+ def __init__(self, func):
+ self.func = func
+ #for attr in ('__name__', '__module__', '__doc__'):
+ # setattr(self, attr, getattr(func, attr, None))
+
+ def __get__(self, obj, cls=None):
+ if obj is None:
+ return self
+ value = self.func(obj)
+ object.__setattr__(obj, self.func.__name__, value)
+ #obj.__dict__[self.func.__name__] = value = self.func(obj)
+ return value
+
+def convert_path(pathname):
+ """Return 'pathname' as a name that will work on the native filesystem.
+
+ The path is split on '/' and put back together again using the current
+ directory separator. Needed because filenames in the setup script are
+ always supplied in Unix style, and have to be converted to the local
+ convention before we can actually use them in the filesystem. Raises
+ ValueError on non-Unix-ish systems if 'pathname' either starts or
+ ends with a slash.
+ """
+ if os.sep == '/':
+ return pathname
+ if not pathname:
+ return pathname
+ if pathname[0] == '/':
+ raise ValueError("path '%s' cannot be absolute" % pathname)
+ if pathname[-1] == '/':
+ raise ValueError("path '%s' cannot end with '/'" % pathname)
+
+ paths = pathname.split('/')
+ while os.curdir in paths:
+ paths.remove(os.curdir)
+ if not paths:
+ return os.curdir
+ return os.path.join(*paths)
+
+
+class FileOperator(object):
+ def __init__(self, dry_run=False):
+ self.dry_run = dry_run
+ self.ensured = set()
+ self._init_record()
+
+ def _init_record(self):
+ self.record = False
+ self.files_written = set()
+ self.dirs_created = set()
+
+ def record_as_written(self, path):
+ if self.record:
+ self.files_written.add(path)
+
+ def newer(self, source, target):
+ """Tell if the target is newer than the source.
+
+ Returns true if 'source' exists and is more recently modified than
+ 'target', or if 'source' exists and 'target' doesn't.
+
+ Returns false if both exist and 'target' is the same age or younger
+ than 'source'. Raise PackagingFileError if 'source' does not exist.
+
+ Note that this test is not very accurate: files created in the same
+ second will have the same "age".
+ """
+ if not os.path.exists(source):
+ raise DistlibException("file '%r' does not exist" %
+ os.path.abspath(source))
+ if not os.path.exists(target):
+ return True
+
+ return os.stat(source).st_mtime > os.stat(target).st_mtime
+
+ def copy_file(self, infile, outfile, check=True):
+ """Copy a file respecting dry-run and force flags.
+ """
+ self.ensure_dir(os.path.dirname(outfile))
+ logger.info('Copying %s to %s', infile, outfile)
+ if not self.dry_run:
+ msg = None
+ if check:
+ if os.path.islink(outfile):
+ msg = '%s is a symlink' % outfile
+ elif os.path.exists(outfile) and not os.path.isfile(outfile):
+ msg = '%s is a non-regular file' % outfile
+ if msg:
+ raise ValueError(msg + ' which would be overwritten')
+ shutil.copyfile(infile, outfile)
+ self.record_as_written(outfile)
+
+ def copy_stream(self, instream, outfile, encoding=None):
+ assert not os.path.isdir(outfile)
+ self.ensure_dir(os.path.dirname(outfile))
+ logger.info('Copying stream %s to %s', instream, outfile)
+ if not self.dry_run:
+ if encoding is None:
+ outstream = open(outfile, 'wb')
+ else:
+ outstream = codecs.open(outfile, 'w', encoding=encoding)
+ try:
+ shutil.copyfileobj(instream, outstream)
+ finally:
+ outstream.close()
+ self.record_as_written(outfile)
+
+ def write_binary_file(self, path, data):
+ self.ensure_dir(os.path.dirname(path))
+ if not self.dry_run:
+ if os.path.exists(path):
+ os.remove(path)
+ with open(path, 'wb') as f:
+ f.write(data)
+ self.record_as_written(path)
+
+ def write_text_file(self, path, data, encoding):
+ self.write_binary_file(path, data.encode(encoding))
+
+ def set_mode(self, bits, mask, files):
+ if os.name == 'posix' or (os.name == 'java' and os._name == 'posix'):
+ # Set the executable bits (owner, group, and world) on
+ # all the files specified.
+ for f in files:
+ if self.dry_run:
+ logger.info("changing mode of %s", f)
+ else:
+ mode = (os.stat(f).st_mode | bits) & mask
+ logger.info("changing mode of %s to %o", f, mode)
+ os.chmod(f, mode)
+
+ set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
+
+ def ensure_dir(self, path):
+ path = os.path.abspath(path)
+ if path not in self.ensured and not os.path.exists(path):
+ self.ensured.add(path)
+ d, f = os.path.split(path)
+ self.ensure_dir(d)
+ logger.info('Creating %s' % path)
+ if not self.dry_run:
+ os.mkdir(path)
+ if self.record:
+ self.dirs_created.add(path)
+
+ def byte_compile(self, path, optimize=False, force=False, prefix=None, hashed_invalidation=False):
+ dpath = cache_from_source(path, not optimize)
+ logger.info('Byte-compiling %s to %s', path, dpath)
+ if not self.dry_run:
+ if force or self.newer(path, dpath):
+ if not prefix:
+ diagpath = None
+ else:
+ assert path.startswith(prefix)
+ diagpath = path[len(prefix):]
+ compile_kwargs = {}
+ if hashed_invalidation and hasattr(py_compile, 'PycInvalidationMode'):
+ compile_kwargs['invalidation_mode'] = py_compile.PycInvalidationMode.CHECKED_HASH
+ py_compile.compile(path, dpath, diagpath, True, **compile_kwargs) # raise error
+ self.record_as_written(dpath)
+ return dpath
+
+ def ensure_removed(self, path):
+ if os.path.exists(path):
+ if os.path.isdir(path) and not os.path.islink(path):
+ logger.debug('Removing directory tree at %s', path)
+ if not self.dry_run:
+ shutil.rmtree(path)
+ if self.record:
+ if path in self.dirs_created:
+ self.dirs_created.remove(path)
+ else:
+ if os.path.islink(path):
+ s = 'link'
+ else:
+ s = 'file'
+ logger.debug('Removing %s %s', s, path)
+ if not self.dry_run:
+ os.remove(path)
+ if self.record:
+ if path in self.files_written:
+ self.files_written.remove(path)
+
+ def is_writable(self, path):
+ result = False
+ while not result:
+ if os.path.exists(path):
+ result = os.access(path, os.W_OK)
+ break
+ parent = os.path.dirname(path)
+ if parent == path:
+ break
+ path = parent
+ return result
+
+ def commit(self):
+ """
+ Commit recorded changes, turn off recording, return
+ changes.
+ """
+ assert self.record
+ result = self.files_written, self.dirs_created
+ self._init_record()
+ return result
+
+ def rollback(self):
+ if not self.dry_run:
+ for f in list(self.files_written):
+ if os.path.exists(f):
+ os.remove(f)
+ # dirs should all be empty now, except perhaps for
+ # __pycache__ subdirs
+ # reverse so that subdirs appear before their parents
+ dirs = sorted(self.dirs_created, reverse=True)
+ for d in dirs:
+ flist = os.listdir(d)
+ if flist:
+ assert flist == ['__pycache__']
+ sd = os.path.join(d, flist[0])
+ os.rmdir(sd)
+ os.rmdir(d) # should fail if non-empty
+ self._init_record()
+
+def resolve(module_name, dotted_path):
+ if module_name in sys.modules:
+ mod = sys.modules[module_name]
+ else:
+ mod = __import__(module_name)
+ if dotted_path is None:
+ result = mod
+ else:
+ parts = dotted_path.split('.')
+ result = getattr(mod, parts.pop(0))
+ for p in parts:
+ result = getattr(result, p)
+ return result
+
+
+class ExportEntry(object):
+ def __init__(self, name, prefix, suffix, flags):
+ self.name = name
+ self.prefix = prefix
+ self.suffix = suffix
+ self.flags = flags
+
+ @cached_property
+ def value(self):
+ return resolve(self.prefix, self.suffix)
+
+ def __repr__(self): # pragma: no cover
+ return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
+ self.suffix, self.flags)
+
+ def __eq__(self, other):
+ if not isinstance(other, ExportEntry):
+ result = False
+ else:
+ result = (self.name == other.name and
+ self.prefix == other.prefix and
+ self.suffix == other.suffix and
+ self.flags == other.flags)
+ return result
+
+ __hash__ = object.__hash__
+
+
+ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.+])+)
+ \s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
+ \s*(\[\s*(?P<flags>[\w-]+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
+ ''', re.VERBOSE)
+
+def get_export_entry(specification):
+ m = ENTRY_RE.search(specification)
+ if not m:
+ result = None
+ if '[' in specification or ']' in specification:
+ raise DistlibException("Invalid specification "
+ "'%s'" % specification)
+ else:
+ d = m.groupdict()
+ name = d['name']
+ path = d['callable']
+ colons = path.count(':')
+ if colons == 0:
+ prefix, suffix = path, None
+ else:
+ if colons != 1:
+ raise DistlibException("Invalid specification "
+ "'%s'" % specification)
+ prefix, suffix = path.split(':')
+ flags = d['flags']
+ if flags is None:
+ if '[' in specification or ']' in specification:
+ raise DistlibException("Invalid specification "
+ "'%s'" % specification)
+ flags = []
+ else:
+ flags = [f.strip() for f in flags.split(',')]
+ result = ExportEntry(name, prefix, suffix, flags)
+ return result
+
+
+def get_cache_base(suffix=None):
+ """
+ Return the default base location for distlib caches. If the directory does
+ not exist, it is created. Use the suffix provided for the base directory,
+ and default to '.distlib' if it isn't provided.
+
+ On Windows, if LOCALAPPDATA is defined in the environment, then it is
+ assumed to be a directory, and will be the parent directory of the result.
+ On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
+ directory - using os.expanduser('~') - will be the parent directory of
+ the result.
+
+ The result is just the directory '.distlib' in the parent directory as
+ determined above, or with the name specified with ``suffix``.
+ """
+ if suffix is None:
+ suffix = '.distlib'
+ if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
+ result = os.path.expandvars('$localappdata')
+ else:
+ # Assume posix, or old Windows
+ result = os.path.expanduser('~')
+ # we use 'isdir' instead of 'exists', because we want to
+ # fail if there's a file with that name
+ if os.path.isdir(result):
+ usable = os.access(result, os.W_OK)
+ if not usable:
+ logger.warning('Directory exists but is not writable: %s', result)
+ else:
+ try:
+ os.makedirs(result)
+ usable = True
+ except OSError:
+ logger.warning('Unable to create %s', result, exc_info=True)
+ usable = False
+ if not usable:
+ result = tempfile.mkdtemp()
+ logger.warning('Default location unusable, using %s', result)
+ return os.path.join(result, suffix)
+
+
+def path_to_cache_dir(path):
+ """
+ Convert an absolute path to a directory name for use in a cache.
+
+ The algorithm used is:
+
+ #. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
+ #. Any occurrence of ``os.sep`` is replaced with ``'--'``.
+ #. ``'.cache'`` is appended.
+ """
+ d, p = os.path.splitdrive(os.path.abspath(path))
+ if d:
+ d = d.replace(':', '---')
+ p = p.replace(os.sep, '--')
+ return d + p + '.cache'
+
+
+def ensure_slash(s):
+ if not s.endswith('/'):
+ return s + '/'
+ return s
+
+
+def parse_credentials(netloc):
+ username = password = None
+ if '@' in netloc:
+ prefix, netloc = netloc.rsplit('@', 1)
+ if ':' not in prefix:
+ username = prefix
+ else:
+ username, password = prefix.split(':', 1)
+ if username:
+ username = unquote(username)
+ if password:
+ password = unquote(password)
+ return username, password, netloc
+
+
+def get_process_umask():
+ result = os.umask(0o22)
+ os.umask(result)
+ return result
+
+def is_string_sequence(seq):
+ result = True
+ i = None
+ for i, s in enumerate(seq):
+ if not isinstance(s, string_types):
+ result = False
+ break
+ assert i is not None
+ return result
+
+PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
+ '([a-z0-9_.+-]+)', re.I)
+PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
+
+
+def split_filename(filename, project_name=None):
+ """
+ Extract name, version, python version from a filename (no extension)
+
+ Return name, version, pyver or None
+ """
+ result = None
+ pyver = None
+ filename = unquote(filename).replace(' ', '-')
+ m = PYTHON_VERSION.search(filename)
+ if m:
+ pyver = m.group(1)
+ filename = filename[:m.start()]
+ if project_name and len(filename) > len(project_name) + 1:
+ m = re.match(re.escape(project_name) + r'\b', filename)
+ if m:
+ n = m.end()
+ result = filename[:n], filename[n + 1:], pyver
+ if result is None:
+ m = PROJECT_NAME_AND_VERSION.match(filename)
+ if m:
+ result = m.group(1), m.group(3), pyver
+ return result
+
+# Allow spaces in name because of legacy dists like "Twisted Core"
+NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
+ r'\(\s*(?P<ver>[^\s)]+)\)$')
+
+def parse_name_and_version(p):
+ """
+ A utility method used to get name and version from a string.
+
+ From e.g. a Provides-Dist value.
+
+ :param p: A value in a form 'foo (1.0)'
+ :return: The name and version as a tuple.
+ """
+ m = NAME_VERSION_RE.match(p)
+ if not m:
+ raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
+ d = m.groupdict()
+ return d['name'].strip().lower(), d['ver']
+
+def get_extras(requested, available):
+ result = set()
+ requested = set(requested or [])
+ available = set(available or [])
+ if '*' in requested:
+ requested.remove('*')
+ result |= available
+ for r in requested:
+ if r == '-':
+ result.add(r)
+ elif r.startswith('-'):
+ unwanted = r[1:]
+ if unwanted not in available:
+ logger.warning('undeclared extra: %s' % unwanted)
+ if unwanted in result:
+ result.remove(unwanted)
+ else:
+ if r not in available:
+ logger.warning('undeclared extra: %s' % r)
+ result.add(r)
+ return result
+#
+# Extended metadata functionality
+#
+
+def _get_external_data(url):
+ result = {}
+ try:
+ # urlopen might fail if it runs into redirections,
+ # because of Python issue #13696. Fixed in locators
+ # using a custom redirect handler.
+ resp = urlopen(url)
+ headers = resp.info()
+ ct = headers.get('Content-Type')
+ if not ct.startswith('application/json'):
+ logger.debug('Unexpected response for JSON request: %s', ct)
+ else:
+ reader = codecs.getreader('utf-8')(resp)
+ #data = reader.read().decode('utf-8')
+ #result = json.loads(data)
+ result = json.load(reader)
+ except Exception as e:
+ logger.exception('Failed to get external data for %s: %s', url, e)
+ return result
+
+_external_data_base_url = 'https://www.red-dove.com/pypi/projects/'
+
+def get_project_data(name):
+ url = '%s/%s/project.json' % (name[0].upper(), name)
+ url = urljoin(_external_data_base_url, url)
+ result = _get_external_data(url)
+ return result
+
+def get_package_data(name, version):
+ url = '%s/%s/package-%s.json' % (name[0].upper(), name, version)
+ url = urljoin(_external_data_base_url, url)
+ return _get_external_data(url)
+
+
+class Cache(object):
+ """
+ A class implementing a cache for resources that need to live in the file system
+ e.g. shared libraries. This class was moved from resources to here because it
+ could be used by other modules, e.g. the wheel module.
+ """
+
+ def __init__(self, base):
+ """
+ Initialise an instance.
+
+ :param base: The base directory where the cache should be located.
+ """
+ # we use 'isdir' instead of 'exists', because we want to
+ # fail if there's a file with that name
+ if not os.path.isdir(base): # pragma: no cover
+ os.makedirs(base)
+ if (os.stat(base).st_mode & 0o77) != 0:
+ logger.warning('Directory \'%s\' is not private', base)
+ self.base = os.path.abspath(os.path.normpath(base))
+
+ def prefix_to_dir(self, prefix):
+ """
+ Converts a resource prefix to a directory name in the cache.
+ """
+ return path_to_cache_dir(prefix)
+
+ def clear(self):
+ """
+ Clear the cache.
+ """
+ not_removed = []
+ for fn in os.listdir(self.base):
+ fn = os.path.join(self.base, fn)
+ try:
+ if os.path.islink(fn) or os.path.isfile(fn):
+ os.remove(fn)
+ elif os.path.isdir(fn):
+ shutil.rmtree(fn)
+ except Exception:
+ not_removed.append(fn)
+ return not_removed
+
+
+class EventMixin(object):
+ """
+ A very simple publish/subscribe system.
+ """
+ def __init__(self):
+ self._subscribers = {}
+
+ def add(self, event, subscriber, append=True):
+ """
+ Add a subscriber for an event.
+
+ :param event: The name of an event.
+ :param subscriber: The subscriber to be added (and called when the
+ event is published).
+ :param append: Whether to append or prepend the subscriber to an
+ existing subscriber list for the event.
+ """
+ subs = self._subscribers
+ if event not in subs:
+ subs[event] = deque([subscriber])
+ else:
+ sq = subs[event]
+ if append:
+ sq.append(subscriber)
+ else:
+ sq.appendleft(subscriber)
+
+ def remove(self, event, subscriber):
+ """
+ Remove a subscriber for an event.
+
+ :param event: The name of an event.
+ :param subscriber: The subscriber to be removed.
+ """
+ subs = self._subscribers
+ if event not in subs:
+ raise ValueError('No subscribers: %r' % event)
+ subs[event].remove(subscriber)
+
+ def get_subscribers(self, event):
+ """
+ Return an iterator for the subscribers for an event.
+ :param event: The event to return subscribers for.
+ """
+ return iter(self._subscribers.get(event, ()))
+
+ def publish(self, event, *args, **kwargs):
+ """
+ Publish a event and return a list of values returned by its
+ subscribers.
+
+ :param event: The event to publish.
+ :param args: The positional arguments to pass to the event's
+ subscribers.
+ :param kwargs: The keyword arguments to pass to the event's
+ subscribers.
+ """
+ result = []
+ for subscriber in self.get_subscribers(event):
+ try:
+ value = subscriber(event, *args, **kwargs)
+ except Exception:
+ logger.exception('Exception during event publication')
+ value = None
+ result.append(value)
+ logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
+ event, args, kwargs, result)
+ return result
+
+#
+# Simple sequencing
+#
+class Sequencer(object):
+ def __init__(self):
+ self._preds = {}
+ self._succs = {}
+ self._nodes = set() # nodes with no preds/succs
+
+ def add_node(self, node):
+ self._nodes.add(node)
+
+ def remove_node(self, node, edges=False):
+ if node in self._nodes:
+ self._nodes.remove(node)
+ if edges:
+ for p in set(self._preds.get(node, ())):
+ self.remove(p, node)
+ for s in set(self._succs.get(node, ())):
+ self.remove(node, s)
+ # Remove empties
+ for k, v in list(self._preds.items()):
+ if not v:
+ del self._preds[k]
+ for k, v in list(self._succs.items()):
+ if not v:
+ del self._succs[k]
+
+ def add(self, pred, succ):
+ assert pred != succ
+ self._preds.setdefault(succ, set()).add(pred)
+ self._succs.setdefault(pred, set()).add(succ)
+
+ def remove(self, pred, succ):
+ assert pred != succ
+ try:
+ preds = self._preds[succ]
+ succs = self._succs[pred]
+ except KeyError: # pragma: no cover
+ raise ValueError('%r not a successor of anything' % succ)
+ try:
+ preds.remove(pred)
+ succs.remove(succ)
+ except KeyError: # pragma: no cover
+ raise ValueError('%r not a successor of %r' % (succ, pred))
+
+ def is_step(self, step):
+ return (step in self._preds or step in self._succs or
+ step in self._nodes)
+
+ def get_steps(self, final):
+ if not self.is_step(final):
+ raise ValueError('Unknown: %r' % final)
+ result = []
+ todo = []
+ seen = set()
+ todo.append(final)
+ while todo:
+ step = todo.pop(0)
+ if step in seen:
+ # if a step was already seen,
+ # move it to the end (so it will appear earlier
+ # when reversed on return) ... but not for the
+ # final step, as that would be confusing for
+ # users
+ if step != final:
+ result.remove(step)
+ result.append(step)
+ else:
+ seen.add(step)
+ result.append(step)
+ preds = self._preds.get(step, ())
+ todo.extend(preds)
+ return reversed(result)
+
+ @property
+ def strong_connections(self):
+ #http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
+ index_counter = [0]
+ stack = []
+ lowlinks = {}
+ index = {}
+ result = []
+
+ graph = self._succs
+
+ def strongconnect(node):
+ # set the depth index for this node to the smallest unused index
+ index[node] = index_counter[0]
+ lowlinks[node] = index_counter[0]
+ index_counter[0] += 1
+ stack.append(node)
+
+ # Consider successors
+ try:
+ successors = graph[node]
+ except Exception:
+ successors = []
+ for successor in successors:
+ if successor not in lowlinks:
+ # Successor has not yet been visited
+ strongconnect(successor)
+ lowlinks[node] = min(lowlinks[node],lowlinks[successor])
+ elif successor in stack:
+ # the successor is in the stack and hence in the current
+ # strongly connected component (SCC)
+ lowlinks[node] = min(lowlinks[node],index[successor])
+
+ # If `node` is a root node, pop the stack and generate an SCC
+ if lowlinks[node] == index[node]:
+ connected_component = []
+
+ while True:
+ successor = stack.pop()
+ connected_component.append(successor)
+ if successor == node: break
+ component = tuple(connected_component)
+ # storing the result
+ result.append(component)
+
+ for node in graph:
+ if node not in lowlinks:
+ strongconnect(node)
+
+ return result
+
+ @property
+ def dot(self):
+ result = ['digraph G {']
+ for succ in self._preds:
+ preds = self._preds[succ]
+ for pred in preds:
+ result.append(' %s -> %s;' % (pred, succ))
+ for node in self._nodes:
+ result.append(' %s;' % node)
+ result.append('}')
+ return '\n'.join(result)
+
+#
+# Unarchiving functionality for zip, tar, tgz, tbz, whl
+#
+
+ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
+ '.tgz', '.tbz', '.whl')
+
+def unarchive(archive_filename, dest_dir, format=None, check=True):
+
+ def check_path(path):
+ if not isinstance(path, text_type):
+ path = path.decode('utf-8')
+ p = os.path.abspath(os.path.join(dest_dir, path))
+ if not p.startswith(dest_dir) or p[plen] != os.sep:
+ raise ValueError('path outside destination: %r' % p)
+
+ dest_dir = os.path.abspath(dest_dir)
+ plen = len(dest_dir)
+ archive = None
+ if format is None:
+ if archive_filename.endswith(('.zip', '.whl')):
+ format = 'zip'
+ elif archive_filename.endswith(('.tar.gz', '.tgz')):
+ format = 'tgz'
+ mode = 'r:gz'
+ elif archive_filename.endswith(('.tar.bz2', '.tbz')):
+ format = 'tbz'
+ mode = 'r:bz2'
+ elif archive_filename.endswith('.tar'):
+ format = 'tar'
+ mode = 'r'
+ else: # pragma: no cover
+ raise ValueError('Unknown format for %r' % archive_filename)
+ try:
+ if format == 'zip':
+ archive = ZipFile(archive_filename, 'r')
+ if check:
+ names = archive.namelist()
+ for name in names:
+ check_path(name)
+ else:
+ archive = tarfile.open(archive_filename, mode)
+ if check:
+ names = archive.getnames()
+ for name in names:
+ check_path(name)
+ if format != 'zip' and sys.version_info[0] < 3:
+ # See Python issue 17153. If the dest path contains Unicode,
+ # tarfile extraction fails on Python 2.x if a member path name
+ # contains non-ASCII characters - it leads to an implicit
+ # bytes -> unicode conversion using ASCII to decode.
+ for tarinfo in archive.getmembers():
+ if not isinstance(tarinfo.name, text_type):
+ tarinfo.name = tarinfo.name.decode('utf-8')
+ archive.extractall(dest_dir)
+
+ finally:
+ if archive:
+ archive.close()
+
+
+def zip_dir(directory):
+ """zip a directory tree into a BytesIO object"""
+ result = io.BytesIO()
+ dlen = len(directory)
+ with ZipFile(result, "w") as zf:
+ for root, dirs, files in os.walk(directory):
+ for name in files:
+ full = os.path.join(root, name)
+ rel = root[dlen:]
+ dest = os.path.join(rel, name)
+ zf.write(full, dest)
+ return result
+
+#
+# Simple progress bar
+#
+
+UNITS = ('', 'K', 'M', 'G','T','P')
+
+
+class Progress(object):
+ unknown = 'UNKNOWN'
+
+ def __init__(self, minval=0, maxval=100):
+ assert maxval is None or maxval >= minval
+ self.min = self.cur = minval
+ self.max = maxval
+ self.started = None
+ self.elapsed = 0
+ self.done = False
+
+ def update(self, curval):
+ assert self.min <= curval
+ assert self.max is None or curval <= self.max
+ self.cur = curval
+ now = time.time()
+ if self.started is None:
+ self.started = now
+ else:
+ self.elapsed = now - self.started
+
+ def increment(self, incr):
+ assert incr >= 0
+ self.update(self.cur + incr)
+
+ def start(self):
+ self.update(self.min)
+ return self
+
+ def stop(self):
+ if self.max is not None:
+ self.update(self.max)
+ self.done = True
+
+ @property
+ def maximum(self):
+ return self.unknown if self.max is None else self.max
+
+ @property
+ def percentage(self):
+ if self.done:
+ result = '100 %'
+ elif self.max is None:
+ result = ' ?? %'
+ else:
+ v = 100.0 * (self.cur - self.min) / (self.max - self.min)
+ result = '%3d %%' % v
+ return result
+
+ def format_duration(self, duration):
+ if (duration <= 0) and self.max is None or self.cur == self.min:
+ result = '??:??:??'
+ #elif duration < 1:
+ # result = '--:--:--'
+ else:
+ result = time.strftime('%H:%M:%S', time.gmtime(duration))
+ return result
+
+ @property
+ def ETA(self):
+ if self.done:
+ prefix = 'Done'
+ t = self.elapsed
+ #import pdb; pdb.set_trace()
+ else:
+ prefix = 'ETA '
+ if self.max is None:
+ t = -1
+ elif self.elapsed == 0 or (self.cur == self.min):
+ t = 0
+ else:
+ #import pdb; pdb.set_trace()
+ t = float(self.max - self.min)
+ t /= self.cur - self.min
+ t = (t - 1) * self.elapsed
+ return '%s: %s' % (prefix, self.format_duration(t))
+
+ @property
+ def speed(self):
+ if self.elapsed == 0:
+ result = 0.0
+ else:
+ result = (self.cur - self.min) / self.elapsed
+ for unit in UNITS:
+ if result < 1000:
+ break
+ result /= 1000.0
+ return '%d %sB/s' % (result, unit)
+
+#
+# Glob functionality
+#
+
+RICH_GLOB = re.compile(r'\{([^}]*)\}')
+_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
+_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
+
+
+def iglob(path_glob):
+ """Extended globbing function that supports ** and {opt1,opt2,opt3}."""
+ if _CHECK_RECURSIVE_GLOB.search(path_glob):
+ msg = """invalid glob %r: recursive glob "**" must be used alone"""
+ raise ValueError(msg % path_glob)
+ if _CHECK_MISMATCH_SET.search(path_glob):
+ msg = """invalid glob %r: mismatching set marker '{' or '}'"""
+ raise ValueError(msg % path_glob)
+ return _iglob(path_glob)
+
+
+def _iglob(path_glob):
+ rich_path_glob = RICH_GLOB.split(path_glob, 1)
+ if len(rich_path_glob) > 1:
+ assert len(rich_path_glob) == 3, rich_path_glob
+ prefix, set, suffix = rich_path_glob
+ for item in set.split(','):
+ for path in _iglob(''.join((prefix, item, suffix))):
+ yield path
+ else:
+ if '**' not in path_glob:
+ for item in std_iglob(path_glob):
+ yield item
+ else:
+ prefix, radical = path_glob.split('**', 1)
+ if prefix == '':
+ prefix = '.'
+ if radical == '':
+ radical = '*'
+ else:
+ # we support both
+ radical = radical.lstrip('/')
+ radical = radical.lstrip('\\')
+ for path, dir, files in os.walk(prefix):
+ path = os.path.normpath(path)
+ for fn in _iglob(os.path.join(path, radical)):
+ yield fn
+
+if ssl:
+ from .compat import (HTTPSHandler as BaseHTTPSHandler, match_hostname,
+ CertificateError)
+
+
+#
+# HTTPSConnection which verifies certificates/matches domains
+#
+
+ class HTTPSConnection(httplib.HTTPSConnection):
+ ca_certs = None # set this to the path to the certs file (.pem)
+ check_domain = True # only used if ca_certs is not None
+
+ # noinspection PyPropertyAccess
+ def connect(self):
+ sock = socket.create_connection((self.host, self.port), self.timeout)
+ if getattr(self, '_tunnel_host', False):
+ self.sock = sock
+ self._tunnel()
+
+ context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
+ if hasattr(ssl, 'OP_NO_SSLv2'):
+ context.options |= ssl.OP_NO_SSLv2
+ if self.cert_file:
+ context.load_cert_chain(self.cert_file, self.key_file)
+ kwargs = {}
+ if self.ca_certs:
+ context.verify_mode = ssl.CERT_REQUIRED
+ context.load_verify_locations(cafile=self.ca_certs)
+ if getattr(ssl, 'HAS_SNI', False):
+ kwargs['server_hostname'] = self.host
+
+ self.sock = context.wrap_socket(sock, **kwargs)
+ if self.ca_certs and self.check_domain:
+ try:
+ match_hostname(self.sock.getpeercert(), self.host)
+ logger.debug('Host verified: %s', self.host)
+ except CertificateError: # pragma: no cover
+ self.sock.shutdown(socket.SHUT_RDWR)
+ self.sock.close()
+ raise
+
+ class HTTPSHandler(BaseHTTPSHandler):
+ def __init__(self, ca_certs, check_domain=True):
+ BaseHTTPSHandler.__init__(self)
+ self.ca_certs = ca_certs
+ self.check_domain = check_domain
+
+ def _conn_maker(self, *args, **kwargs):
+ """
+ This is called to create a connection instance. Normally you'd
+ pass a connection class to do_open, but it doesn't actually check for
+ a class, and just expects a callable. As long as we behave just as a
+ constructor would have, we should be OK. If it ever changes so that
+ we *must* pass a class, we'll create an UnsafeHTTPSConnection class
+ which just sets check_domain to False in the class definition, and
+ choose which one to pass to do_open.
+ """
+ result = HTTPSConnection(*args, **kwargs)
+ if self.ca_certs:
+ result.ca_certs = self.ca_certs
+ result.check_domain = self.check_domain
+ return result
+
+ def https_open(self, req):
+ try:
+ return self.do_open(self._conn_maker, req)
+ except URLError as e:
+ if 'certificate verify failed' in str(e.reason):
+ raise CertificateError('Unable to verify server certificate '
+ 'for %s' % req.host)
+ else:
+ raise
+
+ #
+ # To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
+ # Middle proxy using HTTP listens on port 443, or an index mistakenly serves
+ # HTML containing a http://xyz link when it should be https://xyz),
+ # you can use the following handler class, which does not allow HTTP traffic.
+ #
+ # It works by inheriting from HTTPHandler - so build_opener won't add a
+ # handler for HTTP itself.
+ #
+ class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
+ def http_open(self, req):
+ raise URLError('Unexpected HTTP request on what should be a secure '
+ 'connection: %s' % req)
+
+#
+# XML-RPC with timeouts
+#
+class Transport(xmlrpclib.Transport):
+ def __init__(self, timeout, use_datetime=0):
+ self.timeout = timeout
+ xmlrpclib.Transport.__init__(self, use_datetime)
+
+ def make_connection(self, host):
+ h, eh, x509 = self.get_host_info(host)
+ if not self._connection or host != self._connection[0]:
+ self._extra_headers = eh
+ self._connection = host, httplib.HTTPConnection(h)
+ return self._connection[1]
+
+if ssl:
+ class SafeTransport(xmlrpclib.SafeTransport):
+ def __init__(self, timeout, use_datetime=0):
+ self.timeout = timeout
+ xmlrpclib.SafeTransport.__init__(self, use_datetime)
+
+ def make_connection(self, host):
+ h, eh, kwargs = self.get_host_info(host)
+ if not kwargs:
+ kwargs = {}
+ kwargs['timeout'] = self.timeout
+ if not self._connection or host != self._connection[0]:
+ self._extra_headers = eh
+ self._connection = host, httplib.HTTPSConnection(h, None,
+ **kwargs)
+ return self._connection[1]
+
+
+class ServerProxy(xmlrpclib.ServerProxy):
+ def __init__(self, uri, **kwargs):
+ self.timeout = timeout = kwargs.pop('timeout', None)
+ # The above classes only come into play if a timeout
+ # is specified
+ if timeout is not None:
+ # scheme = splittype(uri) # deprecated as of Python 3.8
+ scheme = urlparse(uri)[0]
+ use_datetime = kwargs.get('use_datetime', 0)
+ if scheme == 'https':
+ tcls = SafeTransport
+ else:
+ tcls = Transport
+ kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
+ self.transport = t
+ xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
+
+#
+# CSV functionality. This is provided because on 2.x, the csv module can't
+# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
+#
+
+def _csv_open(fn, mode, **kwargs):
+ if sys.version_info[0] < 3:
+ mode += 'b'
+ else:
+ kwargs['newline'] = ''
+ # Python 3 determines encoding from locale. Force 'utf-8'
+ # file encoding to match other forced utf-8 encoding
+ kwargs['encoding'] = 'utf-8'
+ return open(fn, mode, **kwargs)
+
+
+class CSVBase(object):
+ defaults = {
+ 'delimiter': str(','), # The strs are used because we need native
+ 'quotechar': str('"'), # str in the csv API (2.x won't take
+ 'lineterminator': str('\n') # Unicode)
+ }
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *exc_info):
+ self.stream.close()
+
+
+class CSVReader(CSVBase):
+ def __init__(self, **kwargs):
+ if 'stream' in kwargs:
+ stream = kwargs['stream']
+ if sys.version_info[0] >= 3:
+ # needs to be a text stream
+ stream = codecs.getreader('utf-8')(stream)
+ self.stream = stream
+ else:
+ self.stream = _csv_open(kwargs['path'], 'r')
+ self.reader = csv.reader(self.stream, **self.defaults)
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ result = next(self.reader)
+ if sys.version_info[0] < 3:
+ for i, item in enumerate(result):
+ if not isinstance(item, text_type):
+ result[i] = item.decode('utf-8')
+ return result
+
+ __next__ = next
+
+class CSVWriter(CSVBase):
+ def __init__(self, fn, **kwargs):
+ self.stream = _csv_open(fn, 'w')
+ self.writer = csv.writer(self.stream, **self.defaults)
+
+ def writerow(self, row):
+ if sys.version_info[0] < 3:
+ r = []
+ for item in row:
+ if isinstance(item, text_type):
+ item = item.encode('utf-8')
+ r.append(item)
+ row = r
+ self.writer.writerow(row)
+
+#
+# Configurator functionality
+#
+
+class Configurator(BaseConfigurator):
+
+ value_converters = dict(BaseConfigurator.value_converters)
+ value_converters['inc'] = 'inc_convert'
+
+ def __init__(self, config, base=None):
+ super(Configurator, self).__init__(config)
+ self.base = base or os.getcwd()
+
+ def configure_custom(self, config):
+ def convert(o):
+ if isinstance(o, (list, tuple)):
+ result = type(o)([convert(i) for i in o])
+ elif isinstance(o, dict):
+ if '()' in o:
+ result = self.configure_custom(o)
+ else:
+ result = {}
+ for k in o:
+ result[k] = convert(o[k])
+ else:
+ result = self.convert(o)
+ return result
+
+ c = config.pop('()')
+ if not callable(c):
+ c = self.resolve(c)
+ props = config.pop('.', None)
+ # Check for valid identifiers
+ args = config.pop('[]', ())
+ if args:
+ args = tuple([convert(o) for o in args])
+ items = [(k, convert(config[k])) for k in config if valid_ident(k)]
+ kwargs = dict(items)
+ result = c(*args, **kwargs)
+ if props:
+ for n, v in props.items():
+ setattr(result, n, convert(v))
+ return result
+
+ def __getitem__(self, key):
+ result = self.config[key]
+ if isinstance(result, dict) and '()' in result:
+ self.config[key] = result = self.configure_custom(result)
+ return result
+
+ def inc_convert(self, value):
+ """Default converter for the inc:// protocol."""
+ if not os.path.isabs(value):
+ value = os.path.join(self.base, value)
+ with codecs.open(value, 'r', encoding='utf-8') as f:
+ result = json.load(f)
+ return result
+
+
+class SubprocessMixin(object):
+ """
+ Mixin for running subprocesses and capturing their output
+ """
+ def __init__(self, verbose=False, progress=None):
+ self.verbose = verbose
+ self.progress = progress
+
+ def reader(self, stream, context):
+ """
+ Read lines from a subprocess' output stream and either pass to a progress
+ callable (if specified) or write progress information to sys.stderr.
+ """
+ progress = self.progress
+ verbose = self.verbose
+ while True:
+ s = stream.readline()
+ if not s:
+ break
+ if progress is not None:
+ progress(s, context)
+ else:
+ if not verbose:
+ sys.stderr.write('.')
+ else:
+ sys.stderr.write(s.decode('utf-8'))
+ sys.stderr.flush()
+ stream.close()
+
+ def run_command(self, cmd, **kwargs):
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, **kwargs)
+ t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
+ t1.start()
+ t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
+ t2.start()
+ p.wait()
+ t1.join()
+ t2.join()
+ if self.progress is not None:
+ self.progress('done.', 'main')
+ elif self.verbose:
+ sys.stderr.write('done.\n')
+ return p
+
+
+def normalize_name(name):
+ """Normalize a python package name a la PEP 503"""
+ # https://www.python.org/dev/peps/pep-0503/#normalized-names
+ return re.sub('[-_.]+', '-', name).lower()
+
+# def _get_pypirc_command():
+ # """
+ # Get the distutils command for interacting with PyPI configurations.
+ # :return: the command.
+ # """
+ # from distutils.core import Distribution
+ # from distutils.config import PyPIRCCommand
+ # d = Distribution()
+ # return PyPIRCCommand(d)
+
+class PyPIRCFile(object):
+
+ DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/'
+ DEFAULT_REALM = 'pypi'
+
+ def __init__(self, fn=None, url=None):
+ if fn is None:
+ fn = os.path.join(os.path.expanduser('~'), '.pypirc')
+ self.filename = fn
+ self.url = url
+
+ def read(self):
+ result = {}
+
+ if os.path.exists(self.filename):
+ repository = self.url or self.DEFAULT_REPOSITORY
+
+ config = configparser.RawConfigParser()
+ config.read(self.filename)
+ sections = config.sections()
+ if 'distutils' in sections:
+ # let's get the list of servers
+ index_servers = config.get('distutils', 'index-servers')
+ _servers = [server.strip() for server in
+ index_servers.split('\n')
+ if server.strip() != '']
+ if _servers == []:
+ # nothing set, let's try to get the default pypi
+ if 'pypi' in sections:
+ _servers = ['pypi']
+ else:
+ for server in _servers:
+ result = {'server': server}
+ result['username'] = config.get(server, 'username')
+
+ # optional params
+ for key, default in (('repository', self.DEFAULT_REPOSITORY),
+ ('realm', self.DEFAULT_REALM),
+ ('password', None)):
+ if config.has_option(server, key):
+ result[key] = config.get(server, key)
+ else:
+ result[key] = default
+
+ # work around people having "repository" for the "pypi"
+ # section of their config set to the HTTP (rather than
+ # HTTPS) URL
+ if (server == 'pypi' and
+ repository in (self.DEFAULT_REPOSITORY, 'pypi')):
+ result['repository'] = self.DEFAULT_REPOSITORY
+ elif (result['server'] != repository and
+ result['repository'] != repository):
+ result = {}
+ elif 'server-login' in sections:
+ # old format
+ server = 'server-login'
+ if config.has_option(server, 'repository'):
+ repository = config.get(server, 'repository')
+ else:
+ repository = self.DEFAULT_REPOSITORY
+ result = {
+ 'username': config.get(server, 'username'),
+ 'password': config.get(server, 'password'),
+ 'repository': repository,
+ 'server': server,
+ 'realm': self.DEFAULT_REALM
+ }
+ return result
+
+ def update(self, username, password):
+ # import pdb; pdb.set_trace()
+ config = configparser.RawConfigParser()
+ fn = self.filename
+ config.read(fn)
+ if not config.has_section('pypi'):
+ config.add_section('pypi')
+ config.set('pypi', 'username', username)
+ config.set('pypi', 'password', password)
+ with open(fn, 'w') as f:
+ config.write(f)
+
+def _load_pypirc(index):
+ """
+ Read the PyPI access configuration as supported by distutils.
+ """
+ return PyPIRCFile(url=index.url).read()
+
+def _store_pypirc(index):
+ PyPIRCFile().update(index.username, index.password)
+
+#
+# get_platform()/get_host_platform() copied from Python 3.10.a0 source, with some minor
+# tweaks
+#
+
+def get_host_platform():
+ """Return a string that identifies the current platform. This is used mainly to
+ distinguish platform-specific build directories and platform-specific built
+ distributions. Typically includes the OS name and version and the
+ architecture (as supplied by 'os.uname()'), although the exact information
+ included depends on the OS; eg. on Linux, the kernel version isn't
+ particularly important.
+
+ Examples of returned values:
+ linux-i586
+ linux-alpha (?)
+ solaris-2.6-sun4u
+
+ Windows will return one of:
+ win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
+ win32 (all others - specifically, sys.platform is returned)
+
+ For other non-POSIX platforms, currently just returns 'sys.platform'.
+
+ """
+ if os.name == 'nt':
+ if 'amd64' in sys.version.lower():
+ return 'win-amd64'
+ if '(arm)' in sys.version.lower():
+ return 'win-arm32'
+ if '(arm64)' in sys.version.lower():
+ return 'win-arm64'
+ return sys.platform
+
+ # Set for cross builds explicitly
+ if "_PYTHON_HOST_PLATFORM" in os.environ:
+ return os.environ["_PYTHON_HOST_PLATFORM"]
+
+ if os.name != 'posix' or not hasattr(os, 'uname'):
+ # XXX what about the architecture? NT is Intel or Alpha,
+ # Mac OS is M68k or PPC, etc.
+ return sys.platform
+
+ # Try to distinguish various flavours of Unix
+
+ (osname, host, release, version, machine) = os.uname()
+
+ # Convert the OS name to lowercase, remove '/' characters, and translate
+ # spaces (for "Power Macintosh")
+ osname = osname.lower().replace('/', '')
+ machine = machine.replace(' ', '_').replace('/', '-')
+
+ if osname[:5] == 'linux':
+ # At least on Linux/Intel, 'machine' is the processor --
+ # i386, etc.
+ # XXX what about Alpha, SPARC, etc?
+ return "%s-%s" % (osname, machine)
+
+ elif osname[:5] == 'sunos':
+ if release[0] >= '5': # SunOS 5 == Solaris 2
+ osname = 'solaris'
+ release = '%d.%s' % (int(release[0]) - 3, release[2:])
+ # We can't use 'platform.architecture()[0]' because a
+ # bootstrap problem. We use a dict to get an error
+ # if some suspicious happens.
+ bitness = {2147483647:'32bit', 9223372036854775807:'64bit'}
+ machine += '.%s' % bitness[sys.maxsize]
+ # fall through to standard osname-release-machine representation
+ elif osname[:3] == 'aix':
+ from _aix_support import aix_platform
+ return aix_platform()
+ elif osname[:6] == 'cygwin':
+ osname = 'cygwin'
+ rel_re = re.compile (r'[\d.]+', re.ASCII)
+ m = rel_re.match(release)
+ if m:
+ release = m.group()
+ elif osname[:6] == 'darwin':
+ import _osx_support, distutils.sysconfig
+ osname, release, machine = _osx_support.get_platform_osx(
+ distutils.sysconfig.get_config_vars(),
+ osname, release, machine)
+
+ return '%s-%s-%s' % (osname, release, machine)
+
+
+_TARGET_TO_PLAT = {
+ 'x86' : 'win32',
+ 'x64' : 'win-amd64',
+ 'arm' : 'win-arm32',
+}
+
+
+def get_platform():
+ if os.name != 'nt':
+ return get_host_platform()
+ cross_compilation_target = os.environ.get('VSCMD_ARG_TGT_ARCH')
+ if cross_compilation_target not in _TARGET_TO_PLAT:
+ return get_host_platform()
+ return _TARGET_TO_PLAT[cross_compilation_target]
diff --git a/third_party/python/pip/pip/_vendor/distlib/version.py b/third_party/python/pip/pip/_vendor/distlib/version.py
new file mode 100644
index 0000000000..c7c8bb6ff4
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distlib/version.py
@@ -0,0 +1,739 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2012-2017 The Python Software Foundation.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+"""
+Implementation of a flexible versioning scheme providing support for PEP-440,
+setuptools-compatible and semantic versioning.
+"""
+
+import logging
+import re
+
+from .compat import string_types
+from .util import parse_requirement
+
+__all__ = ['NormalizedVersion', 'NormalizedMatcher',
+ 'LegacyVersion', 'LegacyMatcher',
+ 'SemanticVersion', 'SemanticMatcher',
+ 'UnsupportedVersionError', 'get_scheme']
+
+logger = logging.getLogger(__name__)
+
+
+class UnsupportedVersionError(ValueError):
+ """This is an unsupported version."""
+ pass
+
+
+class Version(object):
+ def __init__(self, s):
+ self._string = s = s.strip()
+ self._parts = parts = self.parse(s)
+ assert isinstance(parts, tuple)
+ assert len(parts) > 0
+
+ def parse(self, s):
+ raise NotImplementedError('please implement in a subclass')
+
+ def _check_compatible(self, other):
+ if type(self) != type(other):
+ raise TypeError('cannot compare %r and %r' % (self, other))
+
+ def __eq__(self, other):
+ self._check_compatible(other)
+ return self._parts == other._parts
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __lt__(self, other):
+ self._check_compatible(other)
+ return self._parts < other._parts
+
+ def __gt__(self, other):
+ return not (self.__lt__(other) or self.__eq__(other))
+
+ def __le__(self, other):
+ return self.__lt__(other) or self.__eq__(other)
+
+ def __ge__(self, other):
+ return self.__gt__(other) or self.__eq__(other)
+
+ # See http://docs.python.org/reference/datamodel#object.__hash__
+ def __hash__(self):
+ return hash(self._parts)
+
+ def __repr__(self):
+ return "%s('%s')" % (self.__class__.__name__, self._string)
+
+ def __str__(self):
+ return self._string
+
+ @property
+ def is_prerelease(self):
+ raise NotImplementedError('Please implement in subclasses.')
+
+
+class Matcher(object):
+ version_class = None
+
+ # value is either a callable or the name of a method
+ _operators = {
+ '<': lambda v, c, p: v < c,
+ '>': lambda v, c, p: v > c,
+ '<=': lambda v, c, p: v == c or v < c,
+ '>=': lambda v, c, p: v == c or v > c,
+ '==': lambda v, c, p: v == c,
+ '===': lambda v, c, p: v == c,
+ # by default, compatible => >=.
+ '~=': lambda v, c, p: v == c or v > c,
+ '!=': lambda v, c, p: v != c,
+ }
+
+ # this is a method only to support alternative implementations
+ # via overriding
+ def parse_requirement(self, s):
+ return parse_requirement(s)
+
+ def __init__(self, s):
+ if self.version_class is None:
+ raise ValueError('Please specify a version class')
+ self._string = s = s.strip()
+ r = self.parse_requirement(s)
+ if not r:
+ raise ValueError('Not valid: %r' % s)
+ self.name = r.name
+ self.key = self.name.lower() # for case-insensitive comparisons
+ clist = []
+ if r.constraints:
+ # import pdb; pdb.set_trace()
+ for op, s in r.constraints:
+ if s.endswith('.*'):
+ if op not in ('==', '!='):
+ raise ValueError('\'.*\' not allowed for '
+ '%r constraints' % op)
+ # Could be a partial version (e.g. for '2.*') which
+ # won't parse as a version, so keep it as a string
+ vn, prefix = s[:-2], True
+ # Just to check that vn is a valid version
+ self.version_class(vn)
+ else:
+ # Should parse as a version, so we can create an
+ # instance for the comparison
+ vn, prefix = self.version_class(s), False
+ clist.append((op, vn, prefix))
+ self._parts = tuple(clist)
+
+ def match(self, version):
+ """
+ Check if the provided version matches the constraints.
+
+ :param version: The version to match against this instance.
+ :type version: String or :class:`Version` instance.
+ """
+ if isinstance(version, string_types):
+ version = self.version_class(version)
+ for operator, constraint, prefix in self._parts:
+ f = self._operators.get(operator)
+ if isinstance(f, string_types):
+ f = getattr(self, f)
+ if not f:
+ msg = ('%r not implemented '
+ 'for %s' % (operator, self.__class__.__name__))
+ raise NotImplementedError(msg)
+ if not f(version, constraint, prefix):
+ return False
+ return True
+
+ @property
+ def exact_version(self):
+ result = None
+ if len(self._parts) == 1 and self._parts[0][0] in ('==', '==='):
+ result = self._parts[0][1]
+ return result
+
+ def _check_compatible(self, other):
+ if type(self) != type(other) or self.name != other.name:
+ raise TypeError('cannot compare %s and %s' % (self, other))
+
+ def __eq__(self, other):
+ self._check_compatible(other)
+ return self.key == other.key and self._parts == other._parts
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ # See http://docs.python.org/reference/datamodel#object.__hash__
+ def __hash__(self):
+ return hash(self.key) + hash(self._parts)
+
+ def __repr__(self):
+ return "%s(%r)" % (self.__class__.__name__, self._string)
+
+ def __str__(self):
+ return self._string
+
+
+PEP440_VERSION_RE = re.compile(r'^v?(\d+!)?(\d+(\.\d+)*)((a|b|c|rc)(\d+))?'
+ r'(\.(post)(\d+))?(\.(dev)(\d+))?'
+ r'(\+([a-zA-Z\d]+(\.[a-zA-Z\d]+)?))?$')
+
+
+def _pep_440_key(s):
+ s = s.strip()
+ m = PEP440_VERSION_RE.match(s)
+ if not m:
+ raise UnsupportedVersionError('Not a valid version: %s' % s)
+ groups = m.groups()
+ nums = tuple(int(v) for v in groups[1].split('.'))
+ while len(nums) > 1 and nums[-1] == 0:
+ nums = nums[:-1]
+
+ if not groups[0]:
+ epoch = 0
+ else:
+ epoch = int(groups[0][:-1])
+ pre = groups[4:6]
+ post = groups[7:9]
+ dev = groups[10:12]
+ local = groups[13]
+ if pre == (None, None):
+ pre = ()
+ else:
+ pre = pre[0], int(pre[1])
+ if post == (None, None):
+ post = ()
+ else:
+ post = post[0], int(post[1])
+ if dev == (None, None):
+ dev = ()
+ else:
+ dev = dev[0], int(dev[1])
+ if local is None:
+ local = ()
+ else:
+ parts = []
+ for part in local.split('.'):
+ # to ensure that numeric compares as > lexicographic, avoid
+ # comparing them directly, but encode a tuple which ensures
+ # correct sorting
+ if part.isdigit():
+ part = (1, int(part))
+ else:
+ part = (0, part)
+ parts.append(part)
+ local = tuple(parts)
+ if not pre:
+ # either before pre-release, or final release and after
+ if not post and dev:
+ # before pre-release
+ pre = ('a', -1) # to sort before a0
+ else:
+ pre = ('z',) # to sort after all pre-releases
+ # now look at the state of post and dev.
+ if not post:
+ post = ('_',) # sort before 'a'
+ if not dev:
+ dev = ('final',)
+
+ #print('%s -> %s' % (s, m.groups()))
+ return epoch, nums, pre, post, dev, local
+
+
+_normalized_key = _pep_440_key
+
+
+class NormalizedVersion(Version):
+ """A rational version.
+
+ Good:
+ 1.2 # equivalent to "1.2.0"
+ 1.2.0
+ 1.2a1
+ 1.2.3a2
+ 1.2.3b1
+ 1.2.3c1
+ 1.2.3.4
+ TODO: fill this out
+
+ Bad:
+ 1 # minimum two numbers
+ 1.2a # release level must have a release serial
+ 1.2.3b
+ """
+ def parse(self, s):
+ result = _normalized_key(s)
+ # _normalized_key loses trailing zeroes in the release
+ # clause, since that's needed to ensure that X.Y == X.Y.0 == X.Y.0.0
+ # However, PEP 440 prefix matching needs it: for example,
+ # (~= 1.4.5.0) matches differently to (~= 1.4.5.0.0).
+ m = PEP440_VERSION_RE.match(s) # must succeed
+ groups = m.groups()
+ self._release_clause = tuple(int(v) for v in groups[1].split('.'))
+ return result
+
+ PREREL_TAGS = set(['a', 'b', 'c', 'rc', 'dev'])
+
+ @property
+ def is_prerelease(self):
+ return any(t[0] in self.PREREL_TAGS for t in self._parts if t)
+
+
+def _match_prefix(x, y):
+ x = str(x)
+ y = str(y)
+ if x == y:
+ return True
+ if not x.startswith(y):
+ return False
+ n = len(y)
+ return x[n] == '.'
+
+
+class NormalizedMatcher(Matcher):
+ version_class = NormalizedVersion
+
+ # value is either a callable or the name of a method
+ _operators = {
+ '~=': '_match_compatible',
+ '<': '_match_lt',
+ '>': '_match_gt',
+ '<=': '_match_le',
+ '>=': '_match_ge',
+ '==': '_match_eq',
+ '===': '_match_arbitrary',
+ '!=': '_match_ne',
+ }
+
+ def _adjust_local(self, version, constraint, prefix):
+ if prefix:
+ strip_local = '+' not in constraint and version._parts[-1]
+ else:
+ # both constraint and version are
+ # NormalizedVersion instances.
+ # If constraint does not have a local component,
+ # ensure the version doesn't, either.
+ strip_local = not constraint._parts[-1] and version._parts[-1]
+ if strip_local:
+ s = version._string.split('+', 1)[0]
+ version = self.version_class(s)
+ return version, constraint
+
+ def _match_lt(self, version, constraint, prefix):
+ version, constraint = self._adjust_local(version, constraint, prefix)
+ if version >= constraint:
+ return False
+ release_clause = constraint._release_clause
+ pfx = '.'.join([str(i) for i in release_clause])
+ return not _match_prefix(version, pfx)
+
+ def _match_gt(self, version, constraint, prefix):
+ version, constraint = self._adjust_local(version, constraint, prefix)
+ if version <= constraint:
+ return False
+ release_clause = constraint._release_clause
+ pfx = '.'.join([str(i) for i in release_clause])
+ return not _match_prefix(version, pfx)
+
+ def _match_le(self, version, constraint, prefix):
+ version, constraint = self._adjust_local(version, constraint, prefix)
+ return version <= constraint
+
+ def _match_ge(self, version, constraint, prefix):
+ version, constraint = self._adjust_local(version, constraint, prefix)
+ return version >= constraint
+
+ def _match_eq(self, version, constraint, prefix):
+ version, constraint = self._adjust_local(version, constraint, prefix)
+ if not prefix:
+ result = (version == constraint)
+ else:
+ result = _match_prefix(version, constraint)
+ return result
+
+ def _match_arbitrary(self, version, constraint, prefix):
+ return str(version) == str(constraint)
+
+ def _match_ne(self, version, constraint, prefix):
+ version, constraint = self._adjust_local(version, constraint, prefix)
+ if not prefix:
+ result = (version != constraint)
+ else:
+ result = not _match_prefix(version, constraint)
+ return result
+
+ def _match_compatible(self, version, constraint, prefix):
+ version, constraint = self._adjust_local(version, constraint, prefix)
+ if version == constraint:
+ return True
+ if version < constraint:
+ return False
+# if not prefix:
+# return True
+ release_clause = constraint._release_clause
+ if len(release_clause) > 1:
+ release_clause = release_clause[:-1]
+ pfx = '.'.join([str(i) for i in release_clause])
+ return _match_prefix(version, pfx)
+
+_REPLACEMENTS = (
+ (re.compile('[.+-]$'), ''), # remove trailing puncts
+ (re.compile(r'^[.](\d)'), r'0.\1'), # .N -> 0.N at start
+ (re.compile('^[.-]'), ''), # remove leading puncts
+ (re.compile(r'^\((.*)\)$'), r'\1'), # remove parentheses
+ (re.compile(r'^v(ersion)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
+ (re.compile(r'^r(ev)?\s*(\d+)'), r'\2'), # remove leading v(ersion)
+ (re.compile('[.]{2,}'), '.'), # multiple runs of '.'
+ (re.compile(r'\b(alfa|apha)\b'), 'alpha'), # misspelt alpha
+ (re.compile(r'\b(pre-alpha|prealpha)\b'),
+ 'pre.alpha'), # standardise
+ (re.compile(r'\(beta\)$'), 'beta'), # remove parentheses
+)
+
+_SUFFIX_REPLACEMENTS = (
+ (re.compile('^[:~._+-]+'), ''), # remove leading puncts
+ (re.compile('[,*")([\\]]'), ''), # remove unwanted chars
+ (re.compile('[~:+_ -]'), '.'), # replace illegal chars
+ (re.compile('[.]{2,}'), '.'), # multiple runs of '.'
+ (re.compile(r'\.$'), ''), # trailing '.'
+)
+
+_NUMERIC_PREFIX = re.compile(r'(\d+(\.\d+)*)')
+
+
+def _suggest_semantic_version(s):
+ """
+ Try to suggest a semantic form for a version for which
+ _suggest_normalized_version couldn't come up with anything.
+ """
+ result = s.strip().lower()
+ for pat, repl in _REPLACEMENTS:
+ result = pat.sub(repl, result)
+ if not result:
+ result = '0.0.0'
+
+ # Now look for numeric prefix, and separate it out from
+ # the rest.
+ #import pdb; pdb.set_trace()
+ m = _NUMERIC_PREFIX.match(result)
+ if not m:
+ prefix = '0.0.0'
+ suffix = result
+ else:
+ prefix = m.groups()[0].split('.')
+ prefix = [int(i) for i in prefix]
+ while len(prefix) < 3:
+ prefix.append(0)
+ if len(prefix) == 3:
+ suffix = result[m.end():]
+ else:
+ suffix = '.'.join([str(i) for i in prefix[3:]]) + result[m.end():]
+ prefix = prefix[:3]
+ prefix = '.'.join([str(i) for i in prefix])
+ suffix = suffix.strip()
+ if suffix:
+ #import pdb; pdb.set_trace()
+ # massage the suffix.
+ for pat, repl in _SUFFIX_REPLACEMENTS:
+ suffix = pat.sub(repl, suffix)
+
+ if not suffix:
+ result = prefix
+ else:
+ sep = '-' if 'dev' in suffix else '+'
+ result = prefix + sep + suffix
+ if not is_semver(result):
+ result = None
+ return result
+
+
+def _suggest_normalized_version(s):
+ """Suggest a normalized version close to the given version string.
+
+ If you have a version string that isn't rational (i.e. NormalizedVersion
+ doesn't like it) then you might be able to get an equivalent (or close)
+ rational version from this function.
+
+ This does a number of simple normalizations to the given string, based
+ on observation of versions currently in use on PyPI. Given a dump of
+ those version during PyCon 2009, 4287 of them:
+ - 2312 (53.93%) match NormalizedVersion without change
+ with the automatic suggestion
+ - 3474 (81.04%) match when using this suggestion method
+
+ @param s {str} An irrational version string.
+ @returns A rational version string, or None, if couldn't determine one.
+ """
+ try:
+ _normalized_key(s)
+ return s # already rational
+ except UnsupportedVersionError:
+ pass
+
+ rs = s.lower()
+
+ # part of this could use maketrans
+ for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
+ ('beta', 'b'), ('rc', 'c'), ('-final', ''),
+ ('-pre', 'c'),
+ ('-release', ''), ('.release', ''), ('-stable', ''),
+ ('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
+ ('final', '')):
+ rs = rs.replace(orig, repl)
+
+ # if something ends with dev or pre, we add a 0
+ rs = re.sub(r"pre$", r"pre0", rs)
+ rs = re.sub(r"dev$", r"dev0", rs)
+
+ # if we have something like "b-2" or "a.2" at the end of the
+ # version, that is probably beta, alpha, etc
+ # let's remove the dash or dot
+ rs = re.sub(r"([abc]|rc)[\-\.](\d+)$", r"\1\2", rs)
+
+ # 1.0-dev-r371 -> 1.0.dev371
+ # 0.1-dev-r79 -> 0.1.dev79
+ rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
+
+ # Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
+ rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
+
+ # Clean: v0.3, v1.0
+ if rs.startswith('v'):
+ rs = rs[1:]
+
+ # Clean leading '0's on numbers.
+ #TODO: unintended side-effect on, e.g., "2003.05.09"
+ # PyPI stats: 77 (~2%) better
+ rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
+
+ # Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
+ # zero.
+ # PyPI stats: 245 (7.56%) better
+ rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
+
+ # the 'dev-rNNN' tag is a dev tag
+ rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
+
+ # clean the - when used as a pre delimiter
+ rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
+
+ # a terminal "dev" or "devel" can be changed into ".dev0"
+ rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
+
+ # a terminal "dev" can be changed into ".dev0"
+ rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
+
+ # a terminal "final" or "stable" can be removed
+ rs = re.sub(r"(final|stable)$", "", rs)
+
+ # The 'r' and the '-' tags are post release tags
+ # 0.4a1.r10 -> 0.4a1.post10
+ # 0.9.33-17222 -> 0.9.33.post17222
+ # 0.9.33-r17222 -> 0.9.33.post17222
+ rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
+
+ # Clean 'r' instead of 'dev' usage:
+ # 0.9.33+r17222 -> 0.9.33.dev17222
+ # 1.0dev123 -> 1.0.dev123
+ # 1.0.git123 -> 1.0.dev123
+ # 1.0.bzr123 -> 1.0.dev123
+ # 0.1a0dev.123 -> 0.1a0.dev123
+ # PyPI stats: ~150 (~4%) better
+ rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
+
+ # Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
+ # 0.2.pre1 -> 0.2c1
+ # 0.2-c1 -> 0.2c1
+ # 1.0preview123 -> 1.0c123
+ # PyPI stats: ~21 (0.62%) better
+ rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
+
+ # Tcl/Tk uses "px" for their post release markers
+ rs = re.sub(r"p(\d+)$", r".post\1", rs)
+
+ try:
+ _normalized_key(rs)
+ except UnsupportedVersionError:
+ rs = None
+ return rs
+
+#
+# Legacy version processing (distribute-compatible)
+#
+
+_VERSION_PART = re.compile(r'([a-z]+|\d+|[\.-])', re.I)
+_VERSION_REPLACE = {
+ 'pre': 'c',
+ 'preview': 'c',
+ '-': 'final-',
+ 'rc': 'c',
+ 'dev': '@',
+ '': None,
+ '.': None,
+}
+
+
+def _legacy_key(s):
+ def get_parts(s):
+ result = []
+ for p in _VERSION_PART.split(s.lower()):
+ p = _VERSION_REPLACE.get(p, p)
+ if p:
+ if '0' <= p[:1] <= '9':
+ p = p.zfill(8)
+ else:
+ p = '*' + p
+ result.append(p)
+ result.append('*final')
+ return result
+
+ result = []
+ for p in get_parts(s):
+ if p.startswith('*'):
+ if p < '*final':
+ while result and result[-1] == '*final-':
+ result.pop()
+ while result and result[-1] == '00000000':
+ result.pop()
+ result.append(p)
+ return tuple(result)
+
+
+class LegacyVersion(Version):
+ def parse(self, s):
+ return _legacy_key(s)
+
+ @property
+ def is_prerelease(self):
+ result = False
+ for x in self._parts:
+ if (isinstance(x, string_types) and x.startswith('*') and
+ x < '*final'):
+ result = True
+ break
+ return result
+
+
+class LegacyMatcher(Matcher):
+ version_class = LegacyVersion
+
+ _operators = dict(Matcher._operators)
+ _operators['~='] = '_match_compatible'
+
+ numeric_re = re.compile(r'^(\d+(\.\d+)*)')
+
+ def _match_compatible(self, version, constraint, prefix):
+ if version < constraint:
+ return False
+ m = self.numeric_re.match(str(constraint))
+ if not m:
+ logger.warning('Cannot compute compatible match for version %s '
+ ' and constraint %s', version, constraint)
+ return True
+ s = m.groups()[0]
+ if '.' in s:
+ s = s.rsplit('.', 1)[0]
+ return _match_prefix(version, s)
+
+#
+# Semantic versioning
+#
+
+_SEMVER_RE = re.compile(r'^(\d+)\.(\d+)\.(\d+)'
+ r'(-[a-z0-9]+(\.[a-z0-9-]+)*)?'
+ r'(\+[a-z0-9]+(\.[a-z0-9-]+)*)?$', re.I)
+
+
+def is_semver(s):
+ return _SEMVER_RE.match(s)
+
+
+def _semantic_key(s):
+ def make_tuple(s, absent):
+ if s is None:
+ result = (absent,)
+ else:
+ parts = s[1:].split('.')
+ # We can't compare ints and strings on Python 3, so fudge it
+ # by zero-filling numeric values so simulate a numeric comparison
+ result = tuple([p.zfill(8) if p.isdigit() else p for p in parts])
+ return result
+
+ m = is_semver(s)
+ if not m:
+ raise UnsupportedVersionError(s)
+ groups = m.groups()
+ major, minor, patch = [int(i) for i in groups[:3]]
+ # choose the '|' and '*' so that versions sort correctly
+ pre, build = make_tuple(groups[3], '|'), make_tuple(groups[5], '*')
+ return (major, minor, patch), pre, build
+
+
+class SemanticVersion(Version):
+ def parse(self, s):
+ return _semantic_key(s)
+
+ @property
+ def is_prerelease(self):
+ return self._parts[1][0] != '|'
+
+
+class SemanticMatcher(Matcher):
+ version_class = SemanticVersion
+
+
+class VersionScheme(object):
+ def __init__(self, key, matcher, suggester=None):
+ self.key = key
+ self.matcher = matcher
+ self.suggester = suggester
+
+ def is_valid_version(self, s):
+ try:
+ self.matcher.version_class(s)
+ result = True
+ except UnsupportedVersionError:
+ result = False
+ return result
+
+ def is_valid_matcher(self, s):
+ try:
+ self.matcher(s)
+ result = True
+ except UnsupportedVersionError:
+ result = False
+ return result
+
+ def is_valid_constraint_list(self, s):
+ """
+ Used for processing some metadata fields
+ """
+ # See issue #140. Be tolerant of a single trailing comma.
+ if s.endswith(','):
+ s = s[:-1]
+ return self.is_valid_matcher('dummy_name (%s)' % s)
+
+ def suggest(self, s):
+ if self.suggester is None:
+ result = None
+ else:
+ result = self.suggester(s)
+ return result
+
+_SCHEMES = {
+ 'normalized': VersionScheme(_normalized_key, NormalizedMatcher,
+ _suggest_normalized_version),
+ 'legacy': VersionScheme(_legacy_key, LegacyMatcher, lambda self, s: s),
+ 'semantic': VersionScheme(_semantic_key, SemanticMatcher,
+ _suggest_semantic_version),
+}
+
+_SCHEMES['default'] = _SCHEMES['normalized']
+
+
+def get_scheme(name):
+ if name not in _SCHEMES:
+ raise ValueError('unknown scheme name: %r' % name)
+ return _SCHEMES[name]
diff --git a/third_party/python/pip/pip/_vendor/distlib/w32.exe b/third_party/python/pip/pip/_vendor/distlib/w32.exe
new file mode 100644
index 0000000000..4ee2d3a31b
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distlib/w32.exe
Binary files differ
diff --git a/third_party/python/pip/pip/_vendor/distlib/w64-arm.exe b/third_party/python/pip/pip/_vendor/distlib/w64-arm.exe
new file mode 100644
index 0000000000..951d5817c9
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distlib/w64-arm.exe
Binary files differ
diff --git a/third_party/python/pip/pip/_vendor/distlib/w64.exe b/third_party/python/pip/pip/_vendor/distlib/w64.exe
new file mode 100644
index 0000000000..5763076d28
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distlib/w64.exe
Binary files differ
diff --git a/third_party/python/pip/pip/_vendor/distlib/wheel.py b/third_party/python/pip/pip/_vendor/distlib/wheel.py
new file mode 100644
index 0000000000..028c2d99b5
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distlib/wheel.py
@@ -0,0 +1,1082 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2013-2020 Vinay Sajip.
+# Licensed to the Python Software Foundation under a contributor agreement.
+# See LICENSE.txt and CONTRIBUTORS.txt.
+#
+from __future__ import unicode_literals
+
+import base64
+import codecs
+import datetime
+from email import message_from_file
+import hashlib
+import json
+import logging
+import os
+import posixpath
+import re
+import shutil
+import sys
+import tempfile
+import zipfile
+
+from . import __version__, DistlibException
+from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
+from .database import InstalledDistribution
+from .metadata import (Metadata, METADATA_FILENAME, WHEEL_METADATA_FILENAME,
+ LEGACY_METADATA_FILENAME)
+from .util import (FileOperator, convert_path, CSVReader, CSVWriter, Cache,
+ cached_property, get_cache_base, read_exports, tempdir,
+ get_platform)
+from .version import NormalizedVersion, UnsupportedVersionError
+
+logger = logging.getLogger(__name__)
+
+cache = None # created when needed
+
+if hasattr(sys, 'pypy_version_info'): # pragma: no cover
+ IMP_PREFIX = 'pp'
+elif sys.platform.startswith('java'): # pragma: no cover
+ IMP_PREFIX = 'jy'
+elif sys.platform == 'cli': # pragma: no cover
+ IMP_PREFIX = 'ip'
+else:
+ IMP_PREFIX = 'cp'
+
+VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
+if not VER_SUFFIX: # pragma: no cover
+ VER_SUFFIX = '%s%s' % sys.version_info[:2]
+PYVER = 'py' + VER_SUFFIX
+IMPVER = IMP_PREFIX + VER_SUFFIX
+
+ARCH = get_platform().replace('-', '_').replace('.', '_')
+
+ABI = sysconfig.get_config_var('SOABI')
+if ABI and ABI.startswith('cpython-'):
+ ABI = ABI.replace('cpython-', 'cp').split('-')[0]
+else:
+ def _derive_abi():
+ parts = ['cp', VER_SUFFIX]
+ if sysconfig.get_config_var('Py_DEBUG'):
+ parts.append('d')
+ if IMP_PREFIX == 'cp':
+ vi = sys.version_info[:2]
+ if vi < (3, 8):
+ wpm = sysconfig.get_config_var('WITH_PYMALLOC')
+ if wpm is None:
+ wpm = True
+ if wpm:
+ parts.append('m')
+ if vi < (3, 3):
+ us = sysconfig.get_config_var('Py_UNICODE_SIZE')
+ if us == 4 or (us is None and sys.maxunicode == 0x10FFFF):
+ parts.append('u')
+ return ''.join(parts)
+ ABI = _derive_abi()
+ del _derive_abi
+
+FILENAME_RE = re.compile(r'''
+(?P<nm>[^-]+)
+-(?P<vn>\d+[^-]*)
+(-(?P<bn>\d+[^-]*))?
+-(?P<py>\w+\d+(\.\w+\d+)*)
+-(?P<bi>\w+)
+-(?P<ar>\w+(\.\w+)*)
+\.whl$
+''', re.IGNORECASE | re.VERBOSE)
+
+NAME_VERSION_RE = re.compile(r'''
+(?P<nm>[^-]+)
+-(?P<vn>\d+[^-]*)
+(-(?P<bn>\d+[^-]*))?$
+''', re.IGNORECASE | re.VERBOSE)
+
+SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
+SHEBANG_DETAIL_RE = re.compile(br'^(\s*#!("[^"]+"|\S+))\s+(.*)$')
+SHEBANG_PYTHON = b'#!python'
+SHEBANG_PYTHONW = b'#!pythonw'
+
+if os.sep == '/':
+ to_posix = lambda o: o
+else:
+ to_posix = lambda o: o.replace(os.sep, '/')
+
+if sys.version_info[0] < 3:
+ import imp
+else:
+ imp = None
+ import importlib.machinery
+ import importlib.util
+
+def _get_suffixes():
+ if imp:
+ return [s[0] for s in imp.get_suffixes()]
+ else:
+ return importlib.machinery.EXTENSION_SUFFIXES
+
+def _load_dynamic(name, path):
+ # https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
+ if imp:
+ return imp.load_dynamic(name, path)
+ else:
+ spec = importlib.util.spec_from_file_location(name, path)
+ module = importlib.util.module_from_spec(spec)
+ sys.modules[name] = module
+ spec.loader.exec_module(module)
+ return module
+
+class Mounter(object):
+ def __init__(self):
+ self.impure_wheels = {}
+ self.libs = {}
+
+ def add(self, pathname, extensions):
+ self.impure_wheels[pathname] = extensions
+ self.libs.update(extensions)
+
+ def remove(self, pathname):
+ extensions = self.impure_wheels.pop(pathname)
+ for k, v in extensions:
+ if k in self.libs:
+ del self.libs[k]
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.libs:
+ result = self
+ else:
+ result = None
+ return result
+
+ def load_module(self, fullname):
+ if fullname in sys.modules:
+ result = sys.modules[fullname]
+ else:
+ if fullname not in self.libs:
+ raise ImportError('unable to find extension for %s' % fullname)
+ result = _load_dynamic(fullname, self.libs[fullname])
+ result.__loader__ = self
+ parts = fullname.rsplit('.', 1)
+ if len(parts) > 1:
+ result.__package__ = parts[0]
+ return result
+
+_hook = Mounter()
+
+
+class Wheel(object):
+ """
+ Class to build and install from Wheel files (PEP 427).
+ """
+
+ wheel_version = (1, 1)
+ hash_kind = 'sha256'
+
+ def __init__(self, filename=None, sign=False, verify=False):
+ """
+ Initialise an instance using a (valid) filename.
+ """
+ self.sign = sign
+ self.should_verify = verify
+ self.buildver = ''
+ self.pyver = [PYVER]
+ self.abi = ['none']
+ self.arch = ['any']
+ self.dirname = os.getcwd()
+ if filename is None:
+ self.name = 'dummy'
+ self.version = '0.1'
+ self._filename = self.filename
+ else:
+ m = NAME_VERSION_RE.match(filename)
+ if m:
+ info = m.groupdict('')
+ self.name = info['nm']
+ # Reinstate the local version separator
+ self.version = info['vn'].replace('_', '-')
+ self.buildver = info['bn']
+ self._filename = self.filename
+ else:
+ dirname, filename = os.path.split(filename)
+ m = FILENAME_RE.match(filename)
+ if not m:
+ raise DistlibException('Invalid name or '
+ 'filename: %r' % filename)
+ if dirname:
+ self.dirname = os.path.abspath(dirname)
+ self._filename = filename
+ info = m.groupdict('')
+ self.name = info['nm']
+ self.version = info['vn']
+ self.buildver = info['bn']
+ self.pyver = info['py'].split('.')
+ self.abi = info['bi'].split('.')
+ self.arch = info['ar'].split('.')
+
+ @property
+ def filename(self):
+ """
+ Build and return a filename from the various components.
+ """
+ if self.buildver:
+ buildver = '-' + self.buildver
+ else:
+ buildver = ''
+ pyver = '.'.join(self.pyver)
+ abi = '.'.join(self.abi)
+ arch = '.'.join(self.arch)
+ # replace - with _ as a local version separator
+ version = self.version.replace('-', '_')
+ return '%s-%s%s-%s-%s-%s.whl' % (self.name, version, buildver,
+ pyver, abi, arch)
+
+ @property
+ def exists(self):
+ path = os.path.join(self.dirname, self.filename)
+ return os.path.isfile(path)
+
+ @property
+ def tags(self):
+ for pyver in self.pyver:
+ for abi in self.abi:
+ for arch in self.arch:
+ yield pyver, abi, arch
+
+ @cached_property
+ def metadata(self):
+ pathname = os.path.join(self.dirname, self.filename)
+ name_ver = '%s-%s' % (self.name, self.version)
+ info_dir = '%s.dist-info' % name_ver
+ wrapper = codecs.getreader('utf-8')
+ with ZipFile(pathname, 'r') as zf:
+ wheel_metadata = self.get_wheel_metadata(zf)
+ wv = wheel_metadata['Wheel-Version'].split('.', 1)
+ file_version = tuple([int(i) for i in wv])
+ # if file_version < (1, 1):
+ # fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME,
+ # LEGACY_METADATA_FILENAME]
+ # else:
+ # fns = [WHEEL_METADATA_FILENAME, METADATA_FILENAME]
+ fns = [WHEEL_METADATA_FILENAME, LEGACY_METADATA_FILENAME]
+ result = None
+ for fn in fns:
+ try:
+ metadata_filename = posixpath.join(info_dir, fn)
+ with zf.open(metadata_filename) as bf:
+ wf = wrapper(bf)
+ result = Metadata(fileobj=wf)
+ if result:
+ break
+ except KeyError:
+ pass
+ if not result:
+ raise ValueError('Invalid wheel, because metadata is '
+ 'missing: looked in %s' % ', '.join(fns))
+ return result
+
+ def get_wheel_metadata(self, zf):
+ name_ver = '%s-%s' % (self.name, self.version)
+ info_dir = '%s.dist-info' % name_ver
+ metadata_filename = posixpath.join(info_dir, 'WHEEL')
+ with zf.open(metadata_filename) as bf:
+ wf = codecs.getreader('utf-8')(bf)
+ message = message_from_file(wf)
+ return dict(message)
+
+ @cached_property
+ def info(self):
+ pathname = os.path.join(self.dirname, self.filename)
+ with ZipFile(pathname, 'r') as zf:
+ result = self.get_wheel_metadata(zf)
+ return result
+
+ def process_shebang(self, data):
+ m = SHEBANG_RE.match(data)
+ if m:
+ end = m.end()
+ shebang, data_after_shebang = data[:end], data[end:]
+ # Preserve any arguments after the interpreter
+ if b'pythonw' in shebang.lower():
+ shebang_python = SHEBANG_PYTHONW
+ else:
+ shebang_python = SHEBANG_PYTHON
+ m = SHEBANG_DETAIL_RE.match(shebang)
+ if m:
+ args = b' ' + m.groups()[-1]
+ else:
+ args = b''
+ shebang = shebang_python + args
+ data = shebang + data_after_shebang
+ else:
+ cr = data.find(b'\r')
+ lf = data.find(b'\n')
+ if cr < 0 or cr > lf:
+ term = b'\n'
+ else:
+ if data[cr:cr + 2] == b'\r\n':
+ term = b'\r\n'
+ else:
+ term = b'\r'
+ data = SHEBANG_PYTHON + term + data
+ return data
+
+ def get_hash(self, data, hash_kind=None):
+ if hash_kind is None:
+ hash_kind = self.hash_kind
+ try:
+ hasher = getattr(hashlib, hash_kind)
+ except AttributeError:
+ raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
+ result = hasher(data).digest()
+ result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
+ return hash_kind, result
+
+ def write_record(self, records, record_path, archive_record_path):
+ records = list(records) # make a copy, as mutated
+ records.append((archive_record_path, '', ''))
+ with CSVWriter(record_path) as writer:
+ for row in records:
+ writer.writerow(row)
+
+ def write_records(self, info, libdir, archive_paths):
+ records = []
+ distinfo, info_dir = info
+ hasher = getattr(hashlib, self.hash_kind)
+ for ap, p in archive_paths:
+ with open(p, 'rb') as f:
+ data = f.read()
+ digest = '%s=%s' % self.get_hash(data)
+ size = os.path.getsize(p)
+ records.append((ap, digest, size))
+
+ p = os.path.join(distinfo, 'RECORD')
+ ap = to_posix(os.path.join(info_dir, 'RECORD'))
+ self.write_record(records, p, ap)
+ archive_paths.append((ap, p))
+
+ def build_zip(self, pathname, archive_paths):
+ with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
+ for ap, p in archive_paths:
+ logger.debug('Wrote %s to %s in wheel', p, ap)
+ zf.write(p, ap)
+
+ def build(self, paths, tags=None, wheel_version=None):
+ """
+ Build a wheel from files in specified paths, and use any specified tags
+ when determining the name of the wheel.
+ """
+ if tags is None:
+ tags = {}
+
+ libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
+ if libkey == 'platlib':
+ is_pure = 'false'
+ default_pyver = [IMPVER]
+ default_abi = [ABI]
+ default_arch = [ARCH]
+ else:
+ is_pure = 'true'
+ default_pyver = [PYVER]
+ default_abi = ['none']
+ default_arch = ['any']
+
+ self.pyver = tags.get('pyver', default_pyver)
+ self.abi = tags.get('abi', default_abi)
+ self.arch = tags.get('arch', default_arch)
+
+ libdir = paths[libkey]
+
+ name_ver = '%s-%s' % (self.name, self.version)
+ data_dir = '%s.data' % name_ver
+ info_dir = '%s.dist-info' % name_ver
+
+ archive_paths = []
+
+ # First, stuff which is not in site-packages
+ for key in ('data', 'headers', 'scripts'):
+ if key not in paths:
+ continue
+ path = paths[key]
+ if os.path.isdir(path):
+ for root, dirs, files in os.walk(path):
+ for fn in files:
+ p = fsdecode(os.path.join(root, fn))
+ rp = os.path.relpath(p, path)
+ ap = to_posix(os.path.join(data_dir, key, rp))
+ archive_paths.append((ap, p))
+ if key == 'scripts' and not p.endswith('.exe'):
+ with open(p, 'rb') as f:
+ data = f.read()
+ data = self.process_shebang(data)
+ with open(p, 'wb') as f:
+ f.write(data)
+
+ # Now, stuff which is in site-packages, other than the
+ # distinfo stuff.
+ path = libdir
+ distinfo = None
+ for root, dirs, files in os.walk(path):
+ if root == path:
+ # At the top level only, save distinfo for later
+ # and skip it for now
+ for i, dn in enumerate(dirs):
+ dn = fsdecode(dn)
+ if dn.endswith('.dist-info'):
+ distinfo = os.path.join(root, dn)
+ del dirs[i]
+ break
+ assert distinfo, '.dist-info directory expected, not found'
+
+ for fn in files:
+ # comment out next suite to leave .pyc files in
+ if fsdecode(fn).endswith(('.pyc', '.pyo')):
+ continue
+ p = os.path.join(root, fn)
+ rp = to_posix(os.path.relpath(p, path))
+ archive_paths.append((rp, p))
+
+ # Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
+ files = os.listdir(distinfo)
+ for fn in files:
+ if fn not in ('RECORD', 'INSTALLER', 'SHARED', 'WHEEL'):
+ p = fsdecode(os.path.join(distinfo, fn))
+ ap = to_posix(os.path.join(info_dir, fn))
+ archive_paths.append((ap, p))
+
+ wheel_metadata = [
+ 'Wheel-Version: %d.%d' % (wheel_version or self.wheel_version),
+ 'Generator: distlib %s' % __version__,
+ 'Root-Is-Purelib: %s' % is_pure,
+ ]
+ for pyver, abi, arch in self.tags:
+ wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
+ p = os.path.join(distinfo, 'WHEEL')
+ with open(p, 'w') as f:
+ f.write('\n'.join(wheel_metadata))
+ ap = to_posix(os.path.join(info_dir, 'WHEEL'))
+ archive_paths.append((ap, p))
+
+ # sort the entries by archive path. Not needed by any spec, but it
+ # keeps the archive listing and RECORD tidier than they would otherwise
+ # be. Use the number of path segments to keep directory entries together,
+ # and keep the dist-info stuff at the end.
+ def sorter(t):
+ ap = t[0]
+ n = ap.count('/')
+ if '.dist-info' in ap:
+ n += 10000
+ return (n, ap)
+ archive_paths = sorted(archive_paths, key=sorter)
+
+ # Now, at last, RECORD.
+ # Paths in here are archive paths - nothing else makes sense.
+ self.write_records((distinfo, info_dir), libdir, archive_paths)
+ # Now, ready to build the zip file
+ pathname = os.path.join(self.dirname, self.filename)
+ self.build_zip(pathname, archive_paths)
+ return pathname
+
+ def skip_entry(self, arcname):
+ """
+ Determine whether an archive entry should be skipped when verifying
+ or installing.
+ """
+ # The signature file won't be in RECORD,
+ # and we don't currently don't do anything with it
+ # We also skip directories, as they won't be in RECORD
+ # either. See:
+ #
+ # https://github.com/pypa/wheel/issues/294
+ # https://github.com/pypa/wheel/issues/287
+ # https://github.com/pypa/wheel/pull/289
+ #
+ return arcname.endswith(('/', '/RECORD.jws'))
+
+ def install(self, paths, maker, **kwargs):
+ """
+ Install a wheel to the specified paths. If kwarg ``warner`` is
+ specified, it should be a callable, which will be called with two
+ tuples indicating the wheel version of this software and the wheel
+ version in the file, if there is a discrepancy in the versions.
+ This can be used to issue any warnings to raise any exceptions.
+ If kwarg ``lib_only`` is True, only the purelib/platlib files are
+ installed, and the headers, scripts, data and dist-info metadata are
+ not written. If kwarg ``bytecode_hashed_invalidation`` is True, written
+ bytecode will try to use file-hash based invalidation (PEP-552) on
+ supported interpreter versions (CPython 2.7+).
+
+ The return value is a :class:`InstalledDistribution` instance unless
+ ``options.lib_only`` is True, in which case the return value is ``None``.
+ """
+
+ dry_run = maker.dry_run
+ warner = kwargs.get('warner')
+ lib_only = kwargs.get('lib_only', False)
+ bc_hashed_invalidation = kwargs.get('bytecode_hashed_invalidation', False)
+
+ pathname = os.path.join(self.dirname, self.filename)
+ name_ver = '%s-%s' % (self.name, self.version)
+ data_dir = '%s.data' % name_ver
+ info_dir = '%s.dist-info' % name_ver
+
+ metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME)
+ wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
+ record_name = posixpath.join(info_dir, 'RECORD')
+
+ wrapper = codecs.getreader('utf-8')
+
+ with ZipFile(pathname, 'r') as zf:
+ with zf.open(wheel_metadata_name) as bwf:
+ wf = wrapper(bwf)
+ message = message_from_file(wf)
+ wv = message['Wheel-Version'].split('.', 1)
+ file_version = tuple([int(i) for i in wv])
+ if (file_version != self.wheel_version) and warner:
+ warner(self.wheel_version, file_version)
+
+ if message['Root-Is-Purelib'] == 'true':
+ libdir = paths['purelib']
+ else:
+ libdir = paths['platlib']
+
+ records = {}
+ with zf.open(record_name) as bf:
+ with CSVReader(stream=bf) as reader:
+ for row in reader:
+ p = row[0]
+ records[p] = row
+
+ data_pfx = posixpath.join(data_dir, '')
+ info_pfx = posixpath.join(info_dir, '')
+ script_pfx = posixpath.join(data_dir, 'scripts', '')
+
+ # make a new instance rather than a copy of maker's,
+ # as we mutate it
+ fileop = FileOperator(dry_run=dry_run)
+ fileop.record = True # so we can rollback if needed
+
+ bc = not sys.dont_write_bytecode # Double negatives. Lovely!
+
+ outfiles = [] # for RECORD writing
+
+ # for script copying/shebang processing
+ workdir = tempfile.mkdtemp()
+ # set target dir later
+ # we default add_launchers to False, as the
+ # Python Launcher should be used instead
+ maker.source_dir = workdir
+ maker.target_dir = None
+ try:
+ for zinfo in zf.infolist():
+ arcname = zinfo.filename
+ if isinstance(arcname, text_type):
+ u_arcname = arcname
+ else:
+ u_arcname = arcname.decode('utf-8')
+ if self.skip_entry(u_arcname):
+ continue
+ row = records[u_arcname]
+ if row[2] and str(zinfo.file_size) != row[2]:
+ raise DistlibException('size mismatch for '
+ '%s' % u_arcname)
+ if row[1]:
+ kind, value = row[1].split('=', 1)
+ with zf.open(arcname) as bf:
+ data = bf.read()
+ _, digest = self.get_hash(data, kind)
+ if digest != value:
+ raise DistlibException('digest mismatch for '
+ '%s' % arcname)
+
+ if lib_only and u_arcname.startswith((info_pfx, data_pfx)):
+ logger.debug('lib_only: skipping %s', u_arcname)
+ continue
+ is_script = (u_arcname.startswith(script_pfx)
+ and not u_arcname.endswith('.exe'))
+
+ if u_arcname.startswith(data_pfx):
+ _, where, rp = u_arcname.split('/', 2)
+ outfile = os.path.join(paths[where], convert_path(rp))
+ else:
+ # meant for site-packages.
+ if u_arcname in (wheel_metadata_name, record_name):
+ continue
+ outfile = os.path.join(libdir, convert_path(u_arcname))
+ if not is_script:
+ with zf.open(arcname) as bf:
+ fileop.copy_stream(bf, outfile)
+ # Issue #147: permission bits aren't preserved. Using
+ # zf.extract(zinfo, libdir) should have worked, but didn't,
+ # see https://www.thetopsites.net/article/53834422.shtml
+ # So ... manually preserve permission bits as given in zinfo
+ if os.name == 'posix':
+ # just set the normal permission bits
+ os.chmod(outfile, (zinfo.external_attr >> 16) & 0x1FF)
+ outfiles.append(outfile)
+ # Double check the digest of the written file
+ if not dry_run and row[1]:
+ with open(outfile, 'rb') as bf:
+ data = bf.read()
+ _, newdigest = self.get_hash(data, kind)
+ if newdigest != digest:
+ raise DistlibException('digest mismatch '
+ 'on write for '
+ '%s' % outfile)
+ if bc and outfile.endswith('.py'):
+ try:
+ pyc = fileop.byte_compile(outfile,
+ hashed_invalidation=bc_hashed_invalidation)
+ outfiles.append(pyc)
+ except Exception:
+ # Don't give up if byte-compilation fails,
+ # but log it and perhaps warn the user
+ logger.warning('Byte-compilation failed',
+ exc_info=True)
+ else:
+ fn = os.path.basename(convert_path(arcname))
+ workname = os.path.join(workdir, fn)
+ with zf.open(arcname) as bf:
+ fileop.copy_stream(bf, workname)
+
+ dn, fn = os.path.split(outfile)
+ maker.target_dir = dn
+ filenames = maker.make(fn)
+ fileop.set_executable_mode(filenames)
+ outfiles.extend(filenames)
+
+ if lib_only:
+ logger.debug('lib_only: returning None')
+ dist = None
+ else:
+ # Generate scripts
+
+ # Try to get pydist.json so we can see if there are
+ # any commands to generate. If this fails (e.g. because
+ # of a legacy wheel), log a warning but don't give up.
+ commands = None
+ file_version = self.info['Wheel-Version']
+ if file_version == '1.0':
+ # Use legacy info
+ ep = posixpath.join(info_dir, 'entry_points.txt')
+ try:
+ with zf.open(ep) as bwf:
+ epdata = read_exports(bwf)
+ commands = {}
+ for key in ('console', 'gui'):
+ k = '%s_scripts' % key
+ if k in epdata:
+ commands['wrap_%s' % key] = d = {}
+ for v in epdata[k].values():
+ s = '%s:%s' % (v.prefix, v.suffix)
+ if v.flags:
+ s += ' [%s]' % ','.join(v.flags)
+ d[v.name] = s
+ except Exception:
+ logger.warning('Unable to read legacy script '
+ 'metadata, so cannot generate '
+ 'scripts')
+ else:
+ try:
+ with zf.open(metadata_name) as bwf:
+ wf = wrapper(bwf)
+ commands = json.load(wf).get('extensions')
+ if commands:
+ commands = commands.get('python.commands')
+ except Exception:
+ logger.warning('Unable to read JSON metadata, so '
+ 'cannot generate scripts')
+ if commands:
+ console_scripts = commands.get('wrap_console', {})
+ gui_scripts = commands.get('wrap_gui', {})
+ if console_scripts or gui_scripts:
+ script_dir = paths.get('scripts', '')
+ if not os.path.isdir(script_dir):
+ raise ValueError('Valid script path not '
+ 'specified')
+ maker.target_dir = script_dir
+ for k, v in console_scripts.items():
+ script = '%s = %s' % (k, v)
+ filenames = maker.make(script)
+ fileop.set_executable_mode(filenames)
+
+ if gui_scripts:
+ options = {'gui': True }
+ for k, v in gui_scripts.items():
+ script = '%s = %s' % (k, v)
+ filenames = maker.make(script, options)
+ fileop.set_executable_mode(filenames)
+
+ p = os.path.join(libdir, info_dir)
+ dist = InstalledDistribution(p)
+
+ # Write SHARED
+ paths = dict(paths) # don't change passed in dict
+ del paths['purelib']
+ del paths['platlib']
+ paths['lib'] = libdir
+ p = dist.write_shared_locations(paths, dry_run)
+ if p:
+ outfiles.append(p)
+
+ # Write RECORD
+ dist.write_installed_files(outfiles, paths['prefix'],
+ dry_run)
+ return dist
+ except Exception: # pragma: no cover
+ logger.exception('installation failed.')
+ fileop.rollback()
+ raise
+ finally:
+ shutil.rmtree(workdir)
+
+ def _get_dylib_cache(self):
+ global cache
+ if cache is None:
+ # Use native string to avoid issues on 2.x: see Python #20140.
+ base = os.path.join(get_cache_base(), str('dylib-cache'),
+ '%s.%s' % sys.version_info[:2])
+ cache = Cache(base)
+ return cache
+
+ def _get_extensions(self):
+ pathname = os.path.join(self.dirname, self.filename)
+ name_ver = '%s-%s' % (self.name, self.version)
+ info_dir = '%s.dist-info' % name_ver
+ arcname = posixpath.join(info_dir, 'EXTENSIONS')
+ wrapper = codecs.getreader('utf-8')
+ result = []
+ with ZipFile(pathname, 'r') as zf:
+ try:
+ with zf.open(arcname) as bf:
+ wf = wrapper(bf)
+ extensions = json.load(wf)
+ cache = self._get_dylib_cache()
+ prefix = cache.prefix_to_dir(pathname)
+ cache_base = os.path.join(cache.base, prefix)
+ if not os.path.isdir(cache_base):
+ os.makedirs(cache_base)
+ for name, relpath in extensions.items():
+ dest = os.path.join(cache_base, convert_path(relpath))
+ if not os.path.exists(dest):
+ extract = True
+ else:
+ file_time = os.stat(dest).st_mtime
+ file_time = datetime.datetime.fromtimestamp(file_time)
+ info = zf.getinfo(relpath)
+ wheel_time = datetime.datetime(*info.date_time)
+ extract = wheel_time > file_time
+ if extract:
+ zf.extract(relpath, cache_base)
+ result.append((name, dest))
+ except KeyError:
+ pass
+ return result
+
+ def is_compatible(self):
+ """
+ Determine if a wheel is compatible with the running system.
+ """
+ return is_compatible(self)
+
+ def is_mountable(self):
+ """
+ Determine if a wheel is asserted as mountable by its metadata.
+ """
+ return True # for now - metadata details TBD
+
+ def mount(self, append=False):
+ pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
+ if not self.is_compatible():
+ msg = 'Wheel %s not compatible with this Python.' % pathname
+ raise DistlibException(msg)
+ if not self.is_mountable():
+ msg = 'Wheel %s is marked as not mountable.' % pathname
+ raise DistlibException(msg)
+ if pathname in sys.path:
+ logger.debug('%s already in path', pathname)
+ else:
+ if append:
+ sys.path.append(pathname)
+ else:
+ sys.path.insert(0, pathname)
+ extensions = self._get_extensions()
+ if extensions:
+ if _hook not in sys.meta_path:
+ sys.meta_path.append(_hook)
+ _hook.add(pathname, extensions)
+
+ def unmount(self):
+ pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
+ if pathname not in sys.path:
+ logger.debug('%s not in path', pathname)
+ else:
+ sys.path.remove(pathname)
+ if pathname in _hook.impure_wheels:
+ _hook.remove(pathname)
+ if not _hook.impure_wheels:
+ if _hook in sys.meta_path:
+ sys.meta_path.remove(_hook)
+
+ def verify(self):
+ pathname = os.path.join(self.dirname, self.filename)
+ name_ver = '%s-%s' % (self.name, self.version)
+ data_dir = '%s.data' % name_ver
+ info_dir = '%s.dist-info' % name_ver
+
+ metadata_name = posixpath.join(info_dir, LEGACY_METADATA_FILENAME)
+ wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
+ record_name = posixpath.join(info_dir, 'RECORD')
+
+ wrapper = codecs.getreader('utf-8')
+
+ with ZipFile(pathname, 'r') as zf:
+ with zf.open(wheel_metadata_name) as bwf:
+ wf = wrapper(bwf)
+ message = message_from_file(wf)
+ wv = message['Wheel-Version'].split('.', 1)
+ file_version = tuple([int(i) for i in wv])
+ # TODO version verification
+
+ records = {}
+ with zf.open(record_name) as bf:
+ with CSVReader(stream=bf) as reader:
+ for row in reader:
+ p = row[0]
+ records[p] = row
+
+ for zinfo in zf.infolist():
+ arcname = zinfo.filename
+ if isinstance(arcname, text_type):
+ u_arcname = arcname
+ else:
+ u_arcname = arcname.decode('utf-8')
+ # See issue #115: some wheels have .. in their entries, but
+ # in the filename ... e.g. __main__..py ! So the check is
+ # updated to look for .. in the directory portions
+ p = u_arcname.split('/')
+ if '..' in p:
+ raise DistlibException('invalid entry in '
+ 'wheel: %r' % u_arcname)
+
+ if self.skip_entry(u_arcname):
+ continue
+ row = records[u_arcname]
+ if row[2] and str(zinfo.file_size) != row[2]:
+ raise DistlibException('size mismatch for '
+ '%s' % u_arcname)
+ if row[1]:
+ kind, value = row[1].split('=', 1)
+ with zf.open(arcname) as bf:
+ data = bf.read()
+ _, digest = self.get_hash(data, kind)
+ if digest != value:
+ raise DistlibException('digest mismatch for '
+ '%s' % arcname)
+
+ def update(self, modifier, dest_dir=None, **kwargs):
+ """
+ Update the contents of a wheel in a generic way. The modifier should
+ be a callable which expects a dictionary argument: its keys are
+ archive-entry paths, and its values are absolute filesystem paths
+ where the contents the corresponding archive entries can be found. The
+ modifier is free to change the contents of the files pointed to, add
+ new entries and remove entries, before returning. This method will
+ extract the entire contents of the wheel to a temporary location, call
+ the modifier, and then use the passed (and possibly updated)
+ dictionary to write a new wheel. If ``dest_dir`` is specified, the new
+ wheel is written there -- otherwise, the original wheel is overwritten.
+
+ The modifier should return True if it updated the wheel, else False.
+ This method returns the same value the modifier returns.
+ """
+
+ def get_version(path_map, info_dir):
+ version = path = None
+ key = '%s/%s' % (info_dir, LEGACY_METADATA_FILENAME)
+ if key not in path_map:
+ key = '%s/PKG-INFO' % info_dir
+ if key in path_map:
+ path = path_map[key]
+ version = Metadata(path=path).version
+ return version, path
+
+ def update_version(version, path):
+ updated = None
+ try:
+ v = NormalizedVersion(version)
+ i = version.find('-')
+ if i < 0:
+ updated = '%s+1' % version
+ else:
+ parts = [int(s) for s in version[i + 1:].split('.')]
+ parts[-1] += 1
+ updated = '%s+%s' % (version[:i],
+ '.'.join(str(i) for i in parts))
+ except UnsupportedVersionError:
+ logger.debug('Cannot update non-compliant (PEP-440) '
+ 'version %r', version)
+ if updated:
+ md = Metadata(path=path)
+ md.version = updated
+ legacy = path.endswith(LEGACY_METADATA_FILENAME)
+ md.write(path=path, legacy=legacy)
+ logger.debug('Version updated from %r to %r', version,
+ updated)
+
+ pathname = os.path.join(self.dirname, self.filename)
+ name_ver = '%s-%s' % (self.name, self.version)
+ info_dir = '%s.dist-info' % name_ver
+ record_name = posixpath.join(info_dir, 'RECORD')
+ with tempdir() as workdir:
+ with ZipFile(pathname, 'r') as zf:
+ path_map = {}
+ for zinfo in zf.infolist():
+ arcname = zinfo.filename
+ if isinstance(arcname, text_type):
+ u_arcname = arcname
+ else:
+ u_arcname = arcname.decode('utf-8')
+ if u_arcname == record_name:
+ continue
+ if '..' in u_arcname:
+ raise DistlibException('invalid entry in '
+ 'wheel: %r' % u_arcname)
+ zf.extract(zinfo, workdir)
+ path = os.path.join(workdir, convert_path(u_arcname))
+ path_map[u_arcname] = path
+
+ # Remember the version.
+ original_version, _ = get_version(path_map, info_dir)
+ # Files extracted. Call the modifier.
+ modified = modifier(path_map, **kwargs)
+ if modified:
+ # Something changed - need to build a new wheel.
+ current_version, path = get_version(path_map, info_dir)
+ if current_version and (current_version == original_version):
+ # Add or update local version to signify changes.
+ update_version(current_version, path)
+ # Decide where the new wheel goes.
+ if dest_dir is None:
+ fd, newpath = tempfile.mkstemp(suffix='.whl',
+ prefix='wheel-update-',
+ dir=workdir)
+ os.close(fd)
+ else:
+ if not os.path.isdir(dest_dir):
+ raise DistlibException('Not a directory: %r' % dest_dir)
+ newpath = os.path.join(dest_dir, self.filename)
+ archive_paths = list(path_map.items())
+ distinfo = os.path.join(workdir, info_dir)
+ info = distinfo, info_dir
+ self.write_records(info, workdir, archive_paths)
+ self.build_zip(newpath, archive_paths)
+ if dest_dir is None:
+ shutil.copyfile(newpath, pathname)
+ return modified
+
+def _get_glibc_version():
+ import platform
+ ver = platform.libc_ver()
+ result = []
+ if ver[0] == 'glibc':
+ for s in ver[1].split('.'):
+ result.append(int(s) if s.isdigit() else 0)
+ result = tuple(result)
+ return result
+
+def compatible_tags():
+ """
+ Return (pyver, abi, arch) tuples compatible with this Python.
+ """
+ versions = [VER_SUFFIX]
+ major = VER_SUFFIX[0]
+ for minor in range(sys.version_info[1] - 1, - 1, -1):
+ versions.append(''.join([major, str(minor)]))
+
+ abis = []
+ for suffix in _get_suffixes():
+ if suffix.startswith('.abi'):
+ abis.append(suffix.split('.', 2)[1])
+ abis.sort()
+ if ABI != 'none':
+ abis.insert(0, ABI)
+ abis.append('none')
+ result = []
+
+ arches = [ARCH]
+ if sys.platform == 'darwin':
+ m = re.match(r'(\w+)_(\d+)_(\d+)_(\w+)$', ARCH)
+ if m:
+ name, major, minor, arch = m.groups()
+ minor = int(minor)
+ matches = [arch]
+ if arch in ('i386', 'ppc'):
+ matches.append('fat')
+ if arch in ('i386', 'ppc', 'x86_64'):
+ matches.append('fat3')
+ if arch in ('ppc64', 'x86_64'):
+ matches.append('fat64')
+ if arch in ('i386', 'x86_64'):
+ matches.append('intel')
+ if arch in ('i386', 'x86_64', 'intel', 'ppc', 'ppc64'):
+ matches.append('universal')
+ while minor >= 0:
+ for match in matches:
+ s = '%s_%s_%s_%s' % (name, major, minor, match)
+ if s != ARCH: # already there
+ arches.append(s)
+ minor -= 1
+
+ # Most specific - our Python version, ABI and arch
+ for abi in abis:
+ for arch in arches:
+ result.append((''.join((IMP_PREFIX, versions[0])), abi, arch))
+ # manylinux
+ if abi != 'none' and sys.platform.startswith('linux'):
+ arch = arch.replace('linux_', '')
+ parts = _get_glibc_version()
+ if len(parts) == 2:
+ if parts >= (2, 5):
+ result.append((''.join((IMP_PREFIX, versions[0])), abi,
+ 'manylinux1_%s' % arch))
+ if parts >= (2, 12):
+ result.append((''.join((IMP_PREFIX, versions[0])), abi,
+ 'manylinux2010_%s' % arch))
+ if parts >= (2, 17):
+ result.append((''.join((IMP_PREFIX, versions[0])), abi,
+ 'manylinux2014_%s' % arch))
+ result.append((''.join((IMP_PREFIX, versions[0])), abi,
+ 'manylinux_%s_%s_%s' % (parts[0], parts[1],
+ arch)))
+
+ # where no ABI / arch dependency, but IMP_PREFIX dependency
+ for i, version in enumerate(versions):
+ result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
+ if i == 0:
+ result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
+
+ # no IMP_PREFIX, ABI or arch dependency
+ for i, version in enumerate(versions):
+ result.append((''.join(('py', version)), 'none', 'any'))
+ if i == 0:
+ result.append((''.join(('py', version[0])), 'none', 'any'))
+
+ return set(result)
+
+
+COMPATIBLE_TAGS = compatible_tags()
+
+del compatible_tags
+
+
+def is_compatible(wheel, tags=None):
+ if not isinstance(wheel, Wheel):
+ wheel = Wheel(wheel) # assume it's a filename
+ result = False
+ if tags is None:
+ tags = COMPATIBLE_TAGS
+ for ver, abi, arch in tags:
+ if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
+ result = True
+ break
+ return result
diff --git a/third_party/python/pip/pip/_vendor/distro/__init__.py b/third_party/python/pip/pip/_vendor/distro/__init__.py
new file mode 100644
index 0000000000..7686fe85a7
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distro/__init__.py
@@ -0,0 +1,54 @@
+from .distro import (
+ NORMALIZED_DISTRO_ID,
+ NORMALIZED_LSB_ID,
+ NORMALIZED_OS_ID,
+ LinuxDistribution,
+ __version__,
+ build_number,
+ codename,
+ distro_release_attr,
+ distro_release_info,
+ id,
+ info,
+ like,
+ linux_distribution,
+ lsb_release_attr,
+ lsb_release_info,
+ major_version,
+ minor_version,
+ name,
+ os_release_attr,
+ os_release_info,
+ uname_attr,
+ uname_info,
+ version,
+ version_parts,
+)
+
+__all__ = [
+ "NORMALIZED_DISTRO_ID",
+ "NORMALIZED_LSB_ID",
+ "NORMALIZED_OS_ID",
+ "LinuxDistribution",
+ "build_number",
+ "codename",
+ "distro_release_attr",
+ "distro_release_info",
+ "id",
+ "info",
+ "like",
+ "linux_distribution",
+ "lsb_release_attr",
+ "lsb_release_info",
+ "major_version",
+ "minor_version",
+ "name",
+ "os_release_attr",
+ "os_release_info",
+ "uname_attr",
+ "uname_info",
+ "version",
+ "version_parts",
+]
+
+__version__ = __version__
diff --git a/third_party/python/pip/pip/_vendor/distro/__main__.py b/third_party/python/pip/pip/_vendor/distro/__main__.py
new file mode 100644
index 0000000000..0c01d5b08b
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distro/__main__.py
@@ -0,0 +1,4 @@
+from .distro import main
+
+if __name__ == "__main__":
+ main()
diff --git a/third_party/python/pip/pip/_vendor/distro/distro.py b/third_party/python/pip/pip/_vendor/distro/distro.py
new file mode 100644
index 0000000000..89e1868047
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/distro/distro.py
@@ -0,0 +1,1399 @@
+#!/usr/bin/env python
+# Copyright 2015,2016,2017 Nir Cohen
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+The ``distro`` package (``distro`` stands for Linux Distribution) provides
+information about the Linux distribution it runs on, such as a reliable
+machine-readable distro ID, or version information.
+
+It is the recommended replacement for Python's original
+:py:func:`platform.linux_distribution` function, but it provides much more
+functionality. An alternative implementation became necessary because Python
+3.5 deprecated this function, and Python 3.8 removed it altogether. Its
+predecessor function :py:func:`platform.dist` was already deprecated since
+Python 2.6 and removed in Python 3.8. Still, there are many cases in which
+access to OS distribution information is needed. See `Python issue 1322
+<https://bugs.python.org/issue1322>`_ for more information.
+"""
+
+import argparse
+import json
+import logging
+import os
+import re
+import shlex
+import subprocess
+import sys
+import warnings
+from typing import (
+ Any,
+ Callable,
+ Dict,
+ Iterable,
+ Optional,
+ Sequence,
+ TextIO,
+ Tuple,
+ Type,
+)
+
+try:
+ from typing import TypedDict
+except ImportError:
+ # Python 3.7
+ TypedDict = dict
+
+__version__ = "1.8.0"
+
+
+class VersionDict(TypedDict):
+ major: str
+ minor: str
+ build_number: str
+
+
+class InfoDict(TypedDict):
+ id: str
+ version: str
+ version_parts: VersionDict
+ like: str
+ codename: str
+
+
+_UNIXCONFDIR = os.environ.get("UNIXCONFDIR", "/etc")
+_UNIXUSRLIBDIR = os.environ.get("UNIXUSRLIBDIR", "/usr/lib")
+_OS_RELEASE_BASENAME = "os-release"
+
+#: Translation table for normalizing the "ID" attribute defined in os-release
+#: files, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as defined in the os-release file, translated to lower case,
+#: with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_OS_ID = {
+ "ol": "oracle", # Oracle Linux
+ "opensuse-leap": "opensuse", # Newer versions of OpenSuSE report as opensuse-leap
+}
+
+#: Translation table for normalizing the "Distributor ID" attribute returned by
+#: the lsb_release command, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as returned by the lsb_release command, translated to lower
+#: case, with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_LSB_ID = {
+ "enterpriseenterpriseas": "oracle", # Oracle Enterprise Linux 4
+ "enterpriseenterpriseserver": "oracle", # Oracle Linux 5
+ "redhatenterpriseworkstation": "rhel", # RHEL 6, 7 Workstation
+ "redhatenterpriseserver": "rhel", # RHEL 6, 7 Server
+ "redhatenterprisecomputenode": "rhel", # RHEL 6 ComputeNode
+}
+
+#: Translation table for normalizing the distro ID derived from the file name
+#: of distro release files, for use by the :func:`distro.id` method.
+#:
+#: * Key: Value as derived from the file name of a distro release file,
+#: translated to lower case, with blanks translated to underscores.
+#:
+#: * Value: Normalized value.
+NORMALIZED_DISTRO_ID = {
+ "redhat": "rhel", # RHEL 6.x, 7.x
+}
+
+# Pattern for content of distro release file (reversed)
+_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile(
+ r"(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)"
+)
+
+# Pattern for base file name of distro release file
+_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(r"(\w+)[-_](release|version)$")
+
+# Base file names to be looked up for if _UNIXCONFDIR is not readable.
+_DISTRO_RELEASE_BASENAMES = [
+ "SuSE-release",
+ "arch-release",
+ "base-release",
+ "centos-release",
+ "fedora-release",
+ "gentoo-release",
+ "mageia-release",
+ "mandrake-release",
+ "mandriva-release",
+ "mandrivalinux-release",
+ "manjaro-release",
+ "oracle-release",
+ "redhat-release",
+ "rocky-release",
+ "sl-release",
+ "slackware-version",
+]
+
+# Base file names to be ignored when searching for distro release file
+_DISTRO_RELEASE_IGNORE_BASENAMES = (
+ "debian_version",
+ "lsb-release",
+ "oem-release",
+ _OS_RELEASE_BASENAME,
+ "system-release",
+ "plesk-release",
+ "iredmail-release",
+)
+
+
+def linux_distribution(full_distribution_name: bool = True) -> Tuple[str, str, str]:
+ """
+ .. deprecated:: 1.6.0
+
+ :func:`distro.linux_distribution()` is deprecated. It should only be
+ used as a compatibility shim with Python's
+ :py:func:`platform.linux_distribution()`. Please use :func:`distro.id`,
+ :func:`distro.version` and :func:`distro.name` instead.
+
+ Return information about the current OS distribution as a tuple
+ ``(id_name, version, codename)`` with items as follows:
+
+ * ``id_name``: If *full_distribution_name* is false, the result of
+ :func:`distro.id`. Otherwise, the result of :func:`distro.name`.
+
+ * ``version``: The result of :func:`distro.version`.
+
+ * ``codename``: The extra item (usually in parentheses) after the
+ os-release version number, or the result of :func:`distro.codename`.
+
+ The interface of this function is compatible with the original
+ :py:func:`platform.linux_distribution` function, supporting a subset of
+ its parameters.
+
+ The data it returns may not exactly be the same, because it uses more data
+ sources than the original function, and that may lead to different data if
+ the OS distribution is not consistent across multiple data sources it
+ provides (there are indeed such distributions ...).
+
+ Another reason for differences is the fact that the :func:`distro.id`
+ method normalizes the distro ID string to a reliable machine-readable value
+ for a number of popular OS distributions.
+ """
+ warnings.warn(
+ "distro.linux_distribution() is deprecated. It should only be used as a "
+ "compatibility shim with Python's platform.linux_distribution(). Please use "
+ "distro.id(), distro.version() and distro.name() instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ return _distro.linux_distribution(full_distribution_name)
+
+
+def id() -> str:
+ """
+ Return the distro ID of the current distribution, as a
+ machine-readable string.
+
+ For a number of OS distributions, the returned distro ID value is
+ *reliable*, in the sense that it is documented and that it does not change
+ across releases of the distribution.
+
+ This package maintains the following reliable distro ID values:
+
+ ============== =========================================
+ Distro ID Distribution
+ ============== =========================================
+ "ubuntu" Ubuntu
+ "debian" Debian
+ "rhel" RedHat Enterprise Linux
+ "centos" CentOS
+ "fedora" Fedora
+ "sles" SUSE Linux Enterprise Server
+ "opensuse" openSUSE
+ "amzn" Amazon Linux
+ "arch" Arch Linux
+ "buildroot" Buildroot
+ "cloudlinux" CloudLinux OS
+ "exherbo" Exherbo Linux
+ "gentoo" GenToo Linux
+ "ibm_powerkvm" IBM PowerKVM
+ "kvmibm" KVM for IBM z Systems
+ "linuxmint" Linux Mint
+ "mageia" Mageia
+ "mandriva" Mandriva Linux
+ "parallels" Parallels
+ "pidora" Pidora
+ "raspbian" Raspbian
+ "oracle" Oracle Linux (and Oracle Enterprise Linux)
+ "scientific" Scientific Linux
+ "slackware" Slackware
+ "xenserver" XenServer
+ "openbsd" OpenBSD
+ "netbsd" NetBSD
+ "freebsd" FreeBSD
+ "midnightbsd" MidnightBSD
+ "rocky" Rocky Linux
+ "aix" AIX
+ "guix" Guix System
+ ============== =========================================
+
+ If you have a need to get distros for reliable IDs added into this set,
+ or if you find that the :func:`distro.id` function returns a different
+ distro ID for one of the listed distros, please create an issue in the
+ `distro issue tracker`_.
+
+ **Lookup hierarchy and transformations:**
+
+ First, the ID is obtained from the following sources, in the specified
+ order. The first available and non-empty value is used:
+
+ * the value of the "ID" attribute of the os-release file,
+
+ * the value of the "Distributor ID" attribute returned by the lsb_release
+ command,
+
+ * the first part of the file name of the distro release file,
+
+ The so determined ID value then passes the following transformations,
+ before it is returned by this method:
+
+ * it is translated to lower case,
+
+ * blanks (which should not be there anyway) are translated to underscores,
+
+ * a normalization of the ID is performed, based upon
+ `normalization tables`_. The purpose of this normalization is to ensure
+ that the ID is as reliable as possible, even across incompatible changes
+ in the OS distributions. A common reason for an incompatible change is
+ the addition of an os-release file, or the addition of the lsb_release
+ command, with ID values that differ from what was previously determined
+ from the distro release file name.
+ """
+ return _distro.id()
+
+
+def name(pretty: bool = False) -> str:
+ """
+ Return the name of the current OS distribution, as a human-readable
+ string.
+
+ If *pretty* is false, the name is returned without version or codename.
+ (e.g. "CentOS Linux")
+
+ If *pretty* is true, the version and codename are appended.
+ (e.g. "CentOS Linux 7.1.1503 (Core)")
+
+ **Lookup hierarchy:**
+
+ The name is obtained from the following sources, in the specified order.
+ The first available and non-empty value is used:
+
+ * If *pretty* is false:
+
+ - the value of the "NAME" attribute of the os-release file,
+
+ - the value of the "Distributor ID" attribute returned by the lsb_release
+ command,
+
+ - the value of the "<name>" field of the distro release file.
+
+ * If *pretty* is true:
+
+ - the value of the "PRETTY_NAME" attribute of the os-release file,
+
+ - the value of the "Description" attribute returned by the lsb_release
+ command,
+
+ - the value of the "<name>" field of the distro release file, appended
+ with the value of the pretty version ("<version_id>" and "<codename>"
+ fields) of the distro release file, if available.
+ """
+ return _distro.name(pretty)
+
+
+def version(pretty: bool = False, best: bool = False) -> str:
+ """
+ Return the version of the current OS distribution, as a human-readable
+ string.
+
+ If *pretty* is false, the version is returned without codename (e.g.
+ "7.0").
+
+ If *pretty* is true, the codename in parenthesis is appended, if the
+ codename is non-empty (e.g. "7.0 (Maipo)").
+
+ Some distributions provide version numbers with different precisions in
+ the different sources of distribution information. Examining the different
+ sources in a fixed priority order does not always yield the most precise
+ version (e.g. for Debian 8.2, or CentOS 7.1).
+
+ Some other distributions may not provide this kind of information. In these
+ cases, an empty string would be returned. This behavior can be observed
+ with rolling releases distributions (e.g. Arch Linux).
+
+ The *best* parameter can be used to control the approach for the returned
+ version:
+
+ If *best* is false, the first non-empty version number in priority order of
+ the examined sources is returned.
+
+ If *best* is true, the most precise version number out of all examined
+ sources is returned.
+
+ **Lookup hierarchy:**
+
+ In all cases, the version number is obtained from the following sources.
+ If *best* is false, this order represents the priority order:
+
+ * the value of the "VERSION_ID" attribute of the os-release file,
+ * the value of the "Release" attribute returned by the lsb_release
+ command,
+ * the version number parsed from the "<version_id>" field of the first line
+ of the distro release file,
+ * the version number parsed from the "PRETTY_NAME" attribute of the
+ os-release file, if it follows the format of the distro release files.
+ * the version number parsed from the "Description" attribute returned by
+ the lsb_release command, if it follows the format of the distro release
+ files.
+ """
+ return _distro.version(pretty, best)
+
+
+def version_parts(best: bool = False) -> Tuple[str, str, str]:
+ """
+ Return the version of the current OS distribution as a tuple
+ ``(major, minor, build_number)`` with items as follows:
+
+ * ``major``: The result of :func:`distro.major_version`.
+
+ * ``minor``: The result of :func:`distro.minor_version`.
+
+ * ``build_number``: The result of :func:`distro.build_number`.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distro.version_parts(best)
+
+
+def major_version(best: bool = False) -> str:
+ """
+ Return the major version of the current OS distribution, as a string,
+ if provided.
+ Otherwise, the empty string is returned. The major version is the first
+ part of the dot-separated version string.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distro.major_version(best)
+
+
+def minor_version(best: bool = False) -> str:
+ """
+ Return the minor version of the current OS distribution, as a string,
+ if provided.
+ Otherwise, the empty string is returned. The minor version is the second
+ part of the dot-separated version string.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distro.minor_version(best)
+
+
+def build_number(best: bool = False) -> str:
+ """
+ Return the build number of the current OS distribution, as a string,
+ if provided.
+ Otherwise, the empty string is returned. The build number is the third part
+ of the dot-separated version string.
+
+ For a description of the *best* parameter, see the :func:`distro.version`
+ method.
+ """
+ return _distro.build_number(best)
+
+
+def like() -> str:
+ """
+ Return a space-separated list of distro IDs of distributions that are
+ closely related to the current OS distribution in regards to packaging
+ and programming interfaces, for example distributions the current
+ distribution is a derivative from.
+
+ **Lookup hierarchy:**
+
+ This information item is only provided by the os-release file.
+ For details, see the description of the "ID_LIKE" attribute in the
+ `os-release man page
+ <http://www.freedesktop.org/software/systemd/man/os-release.html>`_.
+ """
+ return _distro.like()
+
+
+def codename() -> str:
+ """
+ Return the codename for the release of the current OS distribution,
+ as a string.
+
+ If the distribution does not have a codename, an empty string is returned.
+
+ Note that the returned codename is not always really a codename. For
+ example, openSUSE returns "x86_64". This function does not handle such
+ cases in any special way and just returns the string it finds, if any.
+
+ **Lookup hierarchy:**
+
+ * the codename within the "VERSION" attribute of the os-release file, if
+ provided,
+
+ * the value of the "Codename" attribute returned by the lsb_release
+ command,
+
+ * the value of the "<codename>" field of the distro release file.
+ """
+ return _distro.codename()
+
+
+def info(pretty: bool = False, best: bool = False) -> InfoDict:
+ """
+ Return certain machine-readable information items about the current OS
+ distribution in a dictionary, as shown in the following example:
+
+ .. sourcecode:: python
+
+ {
+ 'id': 'rhel',
+ 'version': '7.0',
+ 'version_parts': {
+ 'major': '7',
+ 'minor': '0',
+ 'build_number': ''
+ },
+ 'like': 'fedora',
+ 'codename': 'Maipo'
+ }
+
+ The dictionary structure and keys are always the same, regardless of which
+ information items are available in the underlying data sources. The values
+ for the various keys are as follows:
+
+ * ``id``: The result of :func:`distro.id`.
+
+ * ``version``: The result of :func:`distro.version`.
+
+ * ``version_parts -> major``: The result of :func:`distro.major_version`.
+
+ * ``version_parts -> minor``: The result of :func:`distro.minor_version`.
+
+ * ``version_parts -> build_number``: The result of
+ :func:`distro.build_number`.
+
+ * ``like``: The result of :func:`distro.like`.
+
+ * ``codename``: The result of :func:`distro.codename`.
+
+ For a description of the *pretty* and *best* parameters, see the
+ :func:`distro.version` method.
+ """
+ return _distro.info(pretty, best)
+
+
+def os_release_info() -> Dict[str, str]:
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the os-release file data source of the current OS distribution.
+
+ See `os-release file`_ for details about these information items.
+ """
+ return _distro.os_release_info()
+
+
+def lsb_release_info() -> Dict[str, str]:
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the lsb_release command data source of the current OS distribution.
+
+ See `lsb_release command output`_ for details about these information
+ items.
+ """
+ return _distro.lsb_release_info()
+
+
+def distro_release_info() -> Dict[str, str]:
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the distro release file data source of the current OS distribution.
+
+ See `distro release file`_ for details about these information items.
+ """
+ return _distro.distro_release_info()
+
+
+def uname_info() -> Dict[str, str]:
+ """
+ Return a dictionary containing key-value pairs for the information items
+ from the distro release file data source of the current OS distribution.
+ """
+ return _distro.uname_info()
+
+
+def os_release_attr(attribute: str) -> str:
+ """
+ Return a single named information item from the os-release file data source
+ of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+
+ See `os-release file`_ for details about these information items.
+ """
+ return _distro.os_release_attr(attribute)
+
+
+def lsb_release_attr(attribute: str) -> str:
+ """
+ Return a single named information item from the lsb_release command output
+ data source of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+
+ See `lsb_release command output`_ for details about these information
+ items.
+ """
+ return _distro.lsb_release_attr(attribute)
+
+
+def distro_release_attr(attribute: str) -> str:
+ """
+ Return a single named information item from the distro release file
+ data source of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+
+ See `distro release file`_ for details about these information items.
+ """
+ return _distro.distro_release_attr(attribute)
+
+
+def uname_attr(attribute: str) -> str:
+ """
+ Return a single named information item from the distro release file
+ data source of the current OS distribution.
+
+ Parameters:
+
+ * ``attribute`` (string): Key of the information item.
+
+ Returns:
+
+ * (string): Value of the information item, if the item exists.
+ The empty string, if the item does not exist.
+ """
+ return _distro.uname_attr(attribute)
+
+
+try:
+ from functools import cached_property
+except ImportError:
+ # Python < 3.8
+ class cached_property: # type: ignore
+ """A version of @property which caches the value. On access, it calls the
+ underlying function and sets the value in `__dict__` so future accesses
+ will not re-call the property.
+ """
+
+ def __init__(self, f: Callable[[Any], Any]) -> None:
+ self._fname = f.__name__
+ self._f = f
+
+ def __get__(self, obj: Any, owner: Type[Any]) -> Any:
+ assert obj is not None, f"call {self._fname} on an instance"
+ ret = obj.__dict__[self._fname] = self._f(obj)
+ return ret
+
+
+class LinuxDistribution:
+ """
+ Provides information about a OS distribution.
+
+ This package creates a private module-global instance of this class with
+ default initialization arguments, that is used by the
+ `consolidated accessor functions`_ and `single source accessor functions`_.
+ By using default initialization arguments, that module-global instance
+ returns data about the current OS distribution (i.e. the distro this
+ package runs on).
+
+ Normally, it is not necessary to create additional instances of this class.
+ However, in situations where control is needed over the exact data sources
+ that are used, instances of this class can be created with a specific
+ distro release file, or a specific os-release file, or without invoking the
+ lsb_release command.
+ """
+
+ def __init__(
+ self,
+ include_lsb: Optional[bool] = None,
+ os_release_file: str = "",
+ distro_release_file: str = "",
+ include_uname: Optional[bool] = None,
+ root_dir: Optional[str] = None,
+ include_oslevel: Optional[bool] = None,
+ ) -> None:
+ """
+ The initialization method of this class gathers information from the
+ available data sources, and stores that in private instance attributes.
+ Subsequent access to the information items uses these private instance
+ attributes, so that the data sources are read only once.
+
+ Parameters:
+
+ * ``include_lsb`` (bool): Controls whether the
+ `lsb_release command output`_ is included as a data source.
+
+ If the lsb_release command is not available in the program execution
+ path, the data source for the lsb_release command will be empty.
+
+ * ``os_release_file`` (string): The path name of the
+ `os-release file`_ that is to be used as a data source.
+
+ An empty string (the default) will cause the default path name to
+ be used (see `os-release file`_ for details).
+
+ If the specified or defaulted os-release file does not exist, the
+ data source for the os-release file will be empty.
+
+ * ``distro_release_file`` (string): The path name of the
+ `distro release file`_ that is to be used as a data source.
+
+ An empty string (the default) will cause a default search algorithm
+ to be used (see `distro release file`_ for details).
+
+ If the specified distro release file does not exist, or if no default
+ distro release file can be found, the data source for the distro
+ release file will be empty.
+
+ * ``include_uname`` (bool): Controls whether uname command output is
+ included as a data source. If the uname command is not available in
+ the program execution path the data source for the uname command will
+ be empty.
+
+ * ``root_dir`` (string): The absolute path to the root directory to use
+ to find distro-related information files. Note that ``include_*``
+ parameters must not be enabled in combination with ``root_dir``.
+
+ * ``include_oslevel`` (bool): Controls whether (AIX) oslevel command
+ output is included as a data source. If the oslevel command is not
+ available in the program execution path the data source will be
+ empty.
+
+ Public instance attributes:
+
+ * ``os_release_file`` (string): The path name of the
+ `os-release file`_ that is actually used as a data source. The
+ empty string if no distro release file is used as a data source.
+
+ * ``distro_release_file`` (string): The path name of the
+ `distro release file`_ that is actually used as a data source. The
+ empty string if no distro release file is used as a data source.
+
+ * ``include_lsb`` (bool): The result of the ``include_lsb`` parameter.
+ This controls whether the lsb information will be loaded.
+
+ * ``include_uname`` (bool): The result of the ``include_uname``
+ parameter. This controls whether the uname information will
+ be loaded.
+
+ * ``include_oslevel`` (bool): The result of the ``include_oslevel``
+ parameter. This controls whether (AIX) oslevel information will be
+ loaded.
+
+ * ``root_dir`` (string): The result of the ``root_dir`` parameter.
+ The absolute path to the root directory to use to find distro-related
+ information files.
+
+ Raises:
+
+ * :py:exc:`ValueError`: Initialization parameters combination is not
+ supported.
+
+ * :py:exc:`OSError`: Some I/O issue with an os-release file or distro
+ release file.
+
+ * :py:exc:`UnicodeError`: A data source has unexpected characters or
+ uses an unexpected encoding.
+ """
+ self.root_dir = root_dir
+ self.etc_dir = os.path.join(root_dir, "etc") if root_dir else _UNIXCONFDIR
+ self.usr_lib_dir = (
+ os.path.join(root_dir, "usr/lib") if root_dir else _UNIXUSRLIBDIR
+ )
+
+ if os_release_file:
+ self.os_release_file = os_release_file
+ else:
+ etc_dir_os_release_file = os.path.join(self.etc_dir, _OS_RELEASE_BASENAME)
+ usr_lib_os_release_file = os.path.join(
+ self.usr_lib_dir, _OS_RELEASE_BASENAME
+ )
+
+ # NOTE: The idea is to respect order **and** have it set
+ # at all times for API backwards compatibility.
+ if os.path.isfile(etc_dir_os_release_file) or not os.path.isfile(
+ usr_lib_os_release_file
+ ):
+ self.os_release_file = etc_dir_os_release_file
+ else:
+ self.os_release_file = usr_lib_os_release_file
+
+ self.distro_release_file = distro_release_file or "" # updated later
+
+ is_root_dir_defined = root_dir is not None
+ if is_root_dir_defined and (include_lsb or include_uname or include_oslevel):
+ raise ValueError(
+ "Including subprocess data sources from specific root_dir is disallowed"
+ " to prevent false information"
+ )
+ self.include_lsb = (
+ include_lsb if include_lsb is not None else not is_root_dir_defined
+ )
+ self.include_uname = (
+ include_uname if include_uname is not None else not is_root_dir_defined
+ )
+ self.include_oslevel = (
+ include_oslevel if include_oslevel is not None else not is_root_dir_defined
+ )
+
+ def __repr__(self) -> str:
+ """Return repr of all info"""
+ return (
+ "LinuxDistribution("
+ "os_release_file={self.os_release_file!r}, "
+ "distro_release_file={self.distro_release_file!r}, "
+ "include_lsb={self.include_lsb!r}, "
+ "include_uname={self.include_uname!r}, "
+ "include_oslevel={self.include_oslevel!r}, "
+ "root_dir={self.root_dir!r}, "
+ "_os_release_info={self._os_release_info!r}, "
+ "_lsb_release_info={self._lsb_release_info!r}, "
+ "_distro_release_info={self._distro_release_info!r}, "
+ "_uname_info={self._uname_info!r}, "
+ "_oslevel_info={self._oslevel_info!r})".format(self=self)
+ )
+
+ def linux_distribution(
+ self, full_distribution_name: bool = True
+ ) -> Tuple[str, str, str]:
+ """
+ Return information about the OS distribution that is compatible
+ with Python's :func:`platform.linux_distribution`, supporting a subset
+ of its parameters.
+
+ For details, see :func:`distro.linux_distribution`.
+ """
+ return (
+ self.name() if full_distribution_name else self.id(),
+ self.version(),
+ self._os_release_info.get("release_codename") or self.codename(),
+ )
+
+ def id(self) -> str:
+ """Return the distro ID of the OS distribution, as a string.
+
+ For details, see :func:`distro.id`.
+ """
+
+ def normalize(distro_id: str, table: Dict[str, str]) -> str:
+ distro_id = distro_id.lower().replace(" ", "_")
+ return table.get(distro_id, distro_id)
+
+ distro_id = self.os_release_attr("id")
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_OS_ID)
+
+ distro_id = self.lsb_release_attr("distributor_id")
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_LSB_ID)
+
+ distro_id = self.distro_release_attr("id")
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_DISTRO_ID)
+
+ distro_id = self.uname_attr("id")
+ if distro_id:
+ return normalize(distro_id, NORMALIZED_DISTRO_ID)
+
+ return ""
+
+ def name(self, pretty: bool = False) -> str:
+ """
+ Return the name of the OS distribution, as a string.
+
+ For details, see :func:`distro.name`.
+ """
+ name = (
+ self.os_release_attr("name")
+ or self.lsb_release_attr("distributor_id")
+ or self.distro_release_attr("name")
+ or self.uname_attr("name")
+ )
+ if pretty:
+ name = self.os_release_attr("pretty_name") or self.lsb_release_attr(
+ "description"
+ )
+ if not name:
+ name = self.distro_release_attr("name") or self.uname_attr("name")
+ version = self.version(pretty=True)
+ if version:
+ name = f"{name} {version}"
+ return name or ""
+
+ def version(self, pretty: bool = False, best: bool = False) -> str:
+ """
+ Return the version of the OS distribution, as a string.
+
+ For details, see :func:`distro.version`.
+ """
+ versions = [
+ self.os_release_attr("version_id"),
+ self.lsb_release_attr("release"),
+ self.distro_release_attr("version_id"),
+ self._parse_distro_release_content(self.os_release_attr("pretty_name")).get(
+ "version_id", ""
+ ),
+ self._parse_distro_release_content(
+ self.lsb_release_attr("description")
+ ).get("version_id", ""),
+ self.uname_attr("release"),
+ ]
+ if self.uname_attr("id").startswith("aix"):
+ # On AIX platforms, prefer oslevel command output.
+ versions.insert(0, self.oslevel_info())
+ elif self.id() == "debian" or "debian" in self.like().split():
+ # On Debian-like, add debian_version file content to candidates list.
+ versions.append(self._debian_version)
+ version = ""
+ if best:
+ # This algorithm uses the last version in priority order that has
+ # the best precision. If the versions are not in conflict, that
+ # does not matter; otherwise, using the last one instead of the
+ # first one might be considered a surprise.
+ for v in versions:
+ if v.count(".") > version.count(".") or version == "":
+ version = v
+ else:
+ for v in versions:
+ if v != "":
+ version = v
+ break
+ if pretty and version and self.codename():
+ version = f"{version} ({self.codename()})"
+ return version
+
+ def version_parts(self, best: bool = False) -> Tuple[str, str, str]:
+ """
+ Return the version of the OS distribution, as a tuple of version
+ numbers.
+
+ For details, see :func:`distro.version_parts`.
+ """
+ version_str = self.version(best=best)
+ if version_str:
+ version_regex = re.compile(r"(\d+)\.?(\d+)?\.?(\d+)?")
+ matches = version_regex.match(version_str)
+ if matches:
+ major, minor, build_number = matches.groups()
+ return major, minor or "", build_number or ""
+ return "", "", ""
+
+ def major_version(self, best: bool = False) -> str:
+ """
+ Return the major version number of the current distribution.
+
+ For details, see :func:`distro.major_version`.
+ """
+ return self.version_parts(best)[0]
+
+ def minor_version(self, best: bool = False) -> str:
+ """
+ Return the minor version number of the current distribution.
+
+ For details, see :func:`distro.minor_version`.
+ """
+ return self.version_parts(best)[1]
+
+ def build_number(self, best: bool = False) -> str:
+ """
+ Return the build number of the current distribution.
+
+ For details, see :func:`distro.build_number`.
+ """
+ return self.version_parts(best)[2]
+
+ def like(self) -> str:
+ """
+ Return the IDs of distributions that are like the OS distribution.
+
+ For details, see :func:`distro.like`.
+ """
+ return self.os_release_attr("id_like") or ""
+
+ def codename(self) -> str:
+ """
+ Return the codename of the OS distribution.
+
+ For details, see :func:`distro.codename`.
+ """
+ try:
+ # Handle os_release specially since distros might purposefully set
+ # this to empty string to have no codename
+ return self._os_release_info["codename"]
+ except KeyError:
+ return (
+ self.lsb_release_attr("codename")
+ or self.distro_release_attr("codename")
+ or ""
+ )
+
+ def info(self, pretty: bool = False, best: bool = False) -> InfoDict:
+ """
+ Return certain machine-readable information about the OS
+ distribution.
+
+ For details, see :func:`distro.info`.
+ """
+ return dict(
+ id=self.id(),
+ version=self.version(pretty, best),
+ version_parts=dict(
+ major=self.major_version(best),
+ minor=self.minor_version(best),
+ build_number=self.build_number(best),
+ ),
+ like=self.like(),
+ codename=self.codename(),
+ )
+
+ def os_release_info(self) -> Dict[str, str]:
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the os-release file data source of the OS distribution.
+
+ For details, see :func:`distro.os_release_info`.
+ """
+ return self._os_release_info
+
+ def lsb_release_info(self) -> Dict[str, str]:
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the lsb_release command data source of the OS
+ distribution.
+
+ For details, see :func:`distro.lsb_release_info`.
+ """
+ return self._lsb_release_info
+
+ def distro_release_info(self) -> Dict[str, str]:
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the distro release file data source of the OS
+ distribution.
+
+ For details, see :func:`distro.distro_release_info`.
+ """
+ return self._distro_release_info
+
+ def uname_info(self) -> Dict[str, str]:
+ """
+ Return a dictionary containing key-value pairs for the information
+ items from the uname command data source of the OS distribution.
+
+ For details, see :func:`distro.uname_info`.
+ """
+ return self._uname_info
+
+ def oslevel_info(self) -> str:
+ """
+ Return AIX' oslevel command output.
+ """
+ return self._oslevel_info
+
+ def os_release_attr(self, attribute: str) -> str:
+ """
+ Return a single named information item from the os-release file data
+ source of the OS distribution.
+
+ For details, see :func:`distro.os_release_attr`.
+ """
+ return self._os_release_info.get(attribute, "")
+
+ def lsb_release_attr(self, attribute: str) -> str:
+ """
+ Return a single named information item from the lsb_release command
+ output data source of the OS distribution.
+
+ For details, see :func:`distro.lsb_release_attr`.
+ """
+ return self._lsb_release_info.get(attribute, "")
+
+ def distro_release_attr(self, attribute: str) -> str:
+ """
+ Return a single named information item from the distro release file
+ data source of the OS distribution.
+
+ For details, see :func:`distro.distro_release_attr`.
+ """
+ return self._distro_release_info.get(attribute, "")
+
+ def uname_attr(self, attribute: str) -> str:
+ """
+ Return a single named information item from the uname command
+ output data source of the OS distribution.
+
+ For details, see :func:`distro.uname_attr`.
+ """
+ return self._uname_info.get(attribute, "")
+
+ @cached_property
+ def _os_release_info(self) -> Dict[str, str]:
+ """
+ Get the information items from the specified os-release file.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ if os.path.isfile(self.os_release_file):
+ with open(self.os_release_file, encoding="utf-8") as release_file:
+ return self._parse_os_release_content(release_file)
+ return {}
+
+ @staticmethod
+ def _parse_os_release_content(lines: TextIO) -> Dict[str, str]:
+ """
+ Parse the lines of an os-release file.
+
+ Parameters:
+
+ * lines: Iterable through the lines in the os-release file.
+ Each line must be a unicode string or a UTF-8 encoded byte
+ string.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ props = {}
+ lexer = shlex.shlex(lines, posix=True)
+ lexer.whitespace_split = True
+
+ tokens = list(lexer)
+ for token in tokens:
+ # At this point, all shell-like parsing has been done (i.e.
+ # comments processed, quotes and backslash escape sequences
+ # processed, multi-line values assembled, trailing newlines
+ # stripped, etc.), so the tokens are now either:
+ # * variable assignments: var=value
+ # * commands or their arguments (not allowed in os-release)
+ # Ignore any tokens that are not variable assignments
+ if "=" in token:
+ k, v = token.split("=", 1)
+ props[k.lower()] = v
+
+ if "version" in props:
+ # extract release codename (if any) from version attribute
+ match = re.search(r"\((\D+)\)|,\s*(\D+)", props["version"])
+ if match:
+ release_codename = match.group(1) or match.group(2)
+ props["codename"] = props["release_codename"] = release_codename
+
+ if "version_codename" in props:
+ # os-release added a version_codename field. Use that in
+ # preference to anything else Note that some distros purposefully
+ # do not have code names. They should be setting
+ # version_codename=""
+ props["codename"] = props["version_codename"]
+ elif "ubuntu_codename" in props:
+ # Same as above but a non-standard field name used on older Ubuntus
+ props["codename"] = props["ubuntu_codename"]
+
+ return props
+
+ @cached_property
+ def _lsb_release_info(self) -> Dict[str, str]:
+ """
+ Get the information items from the lsb_release command output.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ if not self.include_lsb:
+ return {}
+ try:
+ cmd = ("lsb_release", "-a")
+ stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
+ # Command not found or lsb_release returned error
+ except (OSError, subprocess.CalledProcessError):
+ return {}
+ content = self._to_str(stdout).splitlines()
+ return self._parse_lsb_release_content(content)
+
+ @staticmethod
+ def _parse_lsb_release_content(lines: Iterable[str]) -> Dict[str, str]:
+ """
+ Parse the output of the lsb_release command.
+
+ Parameters:
+
+ * lines: Iterable through the lines of the lsb_release output.
+ Each line must be a unicode string or a UTF-8 encoded byte
+ string.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ props = {}
+ for line in lines:
+ kv = line.strip("\n").split(":", 1)
+ if len(kv) != 2:
+ # Ignore lines without colon.
+ continue
+ k, v = kv
+ props.update({k.replace(" ", "_").lower(): v.strip()})
+ return props
+
+ @cached_property
+ def _uname_info(self) -> Dict[str, str]:
+ if not self.include_uname:
+ return {}
+ try:
+ cmd = ("uname", "-rs")
+ stdout = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
+ except OSError:
+ return {}
+ content = self._to_str(stdout).splitlines()
+ return self._parse_uname_content(content)
+
+ @cached_property
+ def _oslevel_info(self) -> str:
+ if not self.include_oslevel:
+ return ""
+ try:
+ stdout = subprocess.check_output("oslevel", stderr=subprocess.DEVNULL)
+ except (OSError, subprocess.CalledProcessError):
+ return ""
+ return self._to_str(stdout).strip()
+
+ @cached_property
+ def _debian_version(self) -> str:
+ try:
+ with open(
+ os.path.join(self.etc_dir, "debian_version"), encoding="ascii"
+ ) as fp:
+ return fp.readline().rstrip()
+ except FileNotFoundError:
+ return ""
+
+ @staticmethod
+ def _parse_uname_content(lines: Sequence[str]) -> Dict[str, str]:
+ if not lines:
+ return {}
+ props = {}
+ match = re.search(r"^([^\s]+)\s+([\d\.]+)", lines[0].strip())
+ if match:
+ name, version = match.groups()
+
+ # This is to prevent the Linux kernel version from
+ # appearing as the 'best' version on otherwise
+ # identifiable distributions.
+ if name == "Linux":
+ return {}
+ props["id"] = name.lower()
+ props["name"] = name
+ props["release"] = version
+ return props
+
+ @staticmethod
+ def _to_str(bytestring: bytes) -> str:
+ encoding = sys.getfilesystemencoding()
+ return bytestring.decode(encoding)
+
+ @cached_property
+ def _distro_release_info(self) -> Dict[str, str]:
+ """
+ Get the information items from the specified distro release file.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ if self.distro_release_file:
+ # If it was specified, we use it and parse what we can, even if
+ # its file name or content does not match the expected pattern.
+ distro_info = self._parse_distro_release_file(self.distro_release_file)
+ basename = os.path.basename(self.distro_release_file)
+ # The file name pattern for user-specified distro release files
+ # is somewhat more tolerant (compared to when searching for the
+ # file), because we want to use what was specified as best as
+ # possible.
+ match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
+ else:
+ try:
+ basenames = [
+ basename
+ for basename in os.listdir(self.etc_dir)
+ if basename not in _DISTRO_RELEASE_IGNORE_BASENAMES
+ and os.path.isfile(os.path.join(self.etc_dir, basename))
+ ]
+ # We sort for repeatability in cases where there are multiple
+ # distro specific files; e.g. CentOS, Oracle, Enterprise all
+ # containing `redhat-release` on top of their own.
+ basenames.sort()
+ except OSError:
+ # This may occur when /etc is not readable but we can't be
+ # sure about the *-release files. Check common entries of
+ # /etc for information. If they turn out to not be there the
+ # error is handled in `_parse_distro_release_file()`.
+ basenames = _DISTRO_RELEASE_BASENAMES
+ for basename in basenames:
+ match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
+ if match is None:
+ continue
+ filepath = os.path.join(self.etc_dir, basename)
+ distro_info = self._parse_distro_release_file(filepath)
+ # The name is always present if the pattern matches.
+ if "name" not in distro_info:
+ continue
+ self.distro_release_file = filepath
+ break
+ else: # the loop didn't "break": no candidate.
+ return {}
+
+ if match is not None:
+ distro_info["id"] = match.group(1)
+
+ # CloudLinux < 7: manually enrich info with proper id.
+ if "cloudlinux" in distro_info.get("name", "").lower():
+ distro_info["id"] = "cloudlinux"
+
+ return distro_info
+
+ def _parse_distro_release_file(self, filepath: str) -> Dict[str, str]:
+ """
+ Parse a distro release file.
+
+ Parameters:
+
+ * filepath: Path name of the distro release file.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ try:
+ with open(filepath, encoding="utf-8") as fp:
+ # Only parse the first line. For instance, on SLES there
+ # are multiple lines. We don't want them...
+ return self._parse_distro_release_content(fp.readline())
+ except OSError:
+ # Ignore not being able to read a specific, seemingly version
+ # related file.
+ # See https://github.com/python-distro/distro/issues/162
+ return {}
+
+ @staticmethod
+ def _parse_distro_release_content(line: str) -> Dict[str, str]:
+ """
+ Parse a line from a distro release file.
+
+ Parameters:
+ * line: Line from the distro release file. Must be a unicode string
+ or a UTF-8 encoded byte string.
+
+ Returns:
+ A dictionary containing all information items.
+ """
+ matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1])
+ distro_info = {}
+ if matches:
+ # regexp ensures non-None
+ distro_info["name"] = matches.group(3)[::-1]
+ if matches.group(2):
+ distro_info["version_id"] = matches.group(2)[::-1]
+ if matches.group(1):
+ distro_info["codename"] = matches.group(1)[::-1]
+ elif line:
+ distro_info["name"] = line.strip()
+ return distro_info
+
+
+_distro = LinuxDistribution()
+
+
+def main() -> None:
+ logger = logging.getLogger(__name__)
+ logger.setLevel(logging.DEBUG)
+ logger.addHandler(logging.StreamHandler(sys.stdout))
+
+ parser = argparse.ArgumentParser(description="OS distro info tool")
+ parser.add_argument(
+ "--json", "-j", help="Output in machine readable format", action="store_true"
+ )
+
+ parser.add_argument(
+ "--root-dir",
+ "-r",
+ type=str,
+ dest="root_dir",
+ help="Path to the root filesystem directory (defaults to /)",
+ )
+
+ args = parser.parse_args()
+
+ if args.root_dir:
+ dist = LinuxDistribution(
+ include_lsb=False,
+ include_uname=False,
+ include_oslevel=False,
+ root_dir=args.root_dir,
+ )
+ else:
+ dist = _distro
+
+ if args.json:
+ logger.info(json.dumps(dist.info(), indent=4, sort_keys=True))
+ else:
+ logger.info("Name: %s", dist.name(pretty=True))
+ distribution_version = dist.version(pretty=True)
+ logger.info("Version: %s", distribution_version)
+ distribution_codename = dist.codename()
+ logger.info("Codename: %s", distribution_codename)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/third_party/python/pip/pip/_vendor/idna/__init__.py b/third_party/python/pip/pip/_vendor/idna/__init__.py
new file mode 100644
index 0000000000..a40eeafcc9
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/idna/__init__.py
@@ -0,0 +1,44 @@
+from .package_data import __version__
+from .core import (
+ IDNABidiError,
+ IDNAError,
+ InvalidCodepoint,
+ InvalidCodepointContext,
+ alabel,
+ check_bidi,
+ check_hyphen_ok,
+ check_initial_combiner,
+ check_label,
+ check_nfc,
+ decode,
+ encode,
+ ulabel,
+ uts46_remap,
+ valid_contextj,
+ valid_contexto,
+ valid_label_length,
+ valid_string_length,
+)
+from .intranges import intranges_contain
+
+__all__ = [
+ "IDNABidiError",
+ "IDNAError",
+ "InvalidCodepoint",
+ "InvalidCodepointContext",
+ "alabel",
+ "check_bidi",
+ "check_hyphen_ok",
+ "check_initial_combiner",
+ "check_label",
+ "check_nfc",
+ "decode",
+ "encode",
+ "intranges_contain",
+ "ulabel",
+ "uts46_remap",
+ "valid_contextj",
+ "valid_contexto",
+ "valid_label_length",
+ "valid_string_length",
+]
diff --git a/third_party/python/pip/pip/_vendor/idna/codec.py b/third_party/python/pip/pip/_vendor/idna/codec.py
new file mode 100644
index 0000000000..1ca9ba62c2
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/idna/codec.py
@@ -0,0 +1,112 @@
+from .core import encode, decode, alabel, ulabel, IDNAError
+import codecs
+import re
+from typing import Tuple, Optional
+
+_unicode_dots_re = re.compile('[\u002e\u3002\uff0e\uff61]')
+
+class Codec(codecs.Codec):
+
+ def encode(self, data: str, errors: str = 'strict') -> Tuple[bytes, int]:
+ if errors != 'strict':
+ raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
+
+ if not data:
+ return b"", 0
+
+ return encode(data), len(data)
+
+ def decode(self, data: bytes, errors: str = 'strict') -> Tuple[str, int]:
+ if errors != 'strict':
+ raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
+
+ if not data:
+ return '', 0
+
+ return decode(data), len(data)
+
+class IncrementalEncoder(codecs.BufferedIncrementalEncoder):
+ def _buffer_encode(self, data: str, errors: str, final: bool) -> Tuple[str, int]: # type: ignore
+ if errors != 'strict':
+ raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
+
+ if not data:
+ return "", 0
+
+ labels = _unicode_dots_re.split(data)
+ trailing_dot = ''
+ if labels:
+ if not labels[-1]:
+ trailing_dot = '.'
+ del labels[-1]
+ elif not final:
+ # Keep potentially unfinished label until the next call
+ del labels[-1]
+ if labels:
+ trailing_dot = '.'
+
+ result = []
+ size = 0
+ for label in labels:
+ result.append(alabel(label))
+ if size:
+ size += 1
+ size += len(label)
+
+ # Join with U+002E
+ result_str = '.'.join(result) + trailing_dot # type: ignore
+ size += len(trailing_dot)
+ return result_str, size
+
+class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
+ def _buffer_decode(self, data: str, errors: str, final: bool) -> Tuple[str, int]: # type: ignore
+ if errors != 'strict':
+ raise IDNAError('Unsupported error handling \"{}\"'.format(errors))
+
+ if not data:
+ return ('', 0)
+
+ labels = _unicode_dots_re.split(data)
+ trailing_dot = ''
+ if labels:
+ if not labels[-1]:
+ trailing_dot = '.'
+ del labels[-1]
+ elif not final:
+ # Keep potentially unfinished label until the next call
+ del labels[-1]
+ if labels:
+ trailing_dot = '.'
+
+ result = []
+ size = 0
+ for label in labels:
+ result.append(ulabel(label))
+ if size:
+ size += 1
+ size += len(label)
+
+ result_str = '.'.join(result) + trailing_dot
+ size += len(trailing_dot)
+ return (result_str, size)
+
+
+class StreamWriter(Codec, codecs.StreamWriter):
+ pass
+
+
+class StreamReader(Codec, codecs.StreamReader):
+ pass
+
+
+def getregentry() -> codecs.CodecInfo:
+ # Compatibility as a search_function for codecs.register()
+ return codecs.CodecInfo(
+ name='idna',
+ encode=Codec().encode, # type: ignore
+ decode=Codec().decode, # type: ignore
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamwriter=StreamWriter,
+ streamreader=StreamReader,
+ )
diff --git a/third_party/python/pip/pip/_vendor/idna/compat.py b/third_party/python/pip/pip/_vendor/idna/compat.py
new file mode 100644
index 0000000000..786e6bda63
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/idna/compat.py
@@ -0,0 +1,13 @@
+from .core import *
+from .codec import *
+from typing import Any, Union
+
+def ToASCII(label: str) -> bytes:
+ return encode(label)
+
+def ToUnicode(label: Union[bytes, bytearray]) -> str:
+ return decode(label)
+
+def nameprep(s: Any) -> None:
+ raise NotImplementedError('IDNA 2008 does not utilise nameprep protocol')
+
diff --git a/third_party/python/pip/pip/_vendor/idna/core.py b/third_party/python/pip/pip/_vendor/idna/core.py
new file mode 100644
index 0000000000..4f30037110
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/idna/core.py
@@ -0,0 +1,400 @@
+from . import idnadata
+import bisect
+import unicodedata
+import re
+from typing import Union, Optional
+from .intranges import intranges_contain
+
+_virama_combining_class = 9
+_alabel_prefix = b'xn--'
+_unicode_dots_re = re.compile('[\u002e\u3002\uff0e\uff61]')
+
+class IDNAError(UnicodeError):
+ """ Base exception for all IDNA-encoding related problems """
+ pass
+
+
+class IDNABidiError(IDNAError):
+ """ Exception when bidirectional requirements are not satisfied """
+ pass
+
+
+class InvalidCodepoint(IDNAError):
+ """ Exception when a disallowed or unallocated codepoint is used """
+ pass
+
+
+class InvalidCodepointContext(IDNAError):
+ """ Exception when the codepoint is not valid in the context it is used """
+ pass
+
+
+def _combining_class(cp: int) -> int:
+ v = unicodedata.combining(chr(cp))
+ if v == 0:
+ if not unicodedata.name(chr(cp)):
+ raise ValueError('Unknown character in unicodedata')
+ return v
+
+def _is_script(cp: str, script: str) -> bool:
+ return intranges_contain(ord(cp), idnadata.scripts[script])
+
+def _punycode(s: str) -> bytes:
+ return s.encode('punycode')
+
+def _unot(s: int) -> str:
+ return 'U+{:04X}'.format(s)
+
+
+def valid_label_length(label: Union[bytes, str]) -> bool:
+ if len(label) > 63:
+ return False
+ return True
+
+
+def valid_string_length(label: Union[bytes, str], trailing_dot: bool) -> bool:
+ if len(label) > (254 if trailing_dot else 253):
+ return False
+ return True
+
+
+def check_bidi(label: str, check_ltr: bool = False) -> bool:
+ # Bidi rules should only be applied if string contains RTL characters
+ bidi_label = False
+ for (idx, cp) in enumerate(label, 1):
+ direction = unicodedata.bidirectional(cp)
+ if direction == '':
+ # String likely comes from a newer version of Unicode
+ raise IDNABidiError('Unknown directionality in label {} at position {}'.format(repr(label), idx))
+ if direction in ['R', 'AL', 'AN']:
+ bidi_label = True
+ if not bidi_label and not check_ltr:
+ return True
+
+ # Bidi rule 1
+ direction = unicodedata.bidirectional(label[0])
+ if direction in ['R', 'AL']:
+ rtl = True
+ elif direction == 'L':
+ rtl = False
+ else:
+ raise IDNABidiError('First codepoint in label {} must be directionality L, R or AL'.format(repr(label)))
+
+ valid_ending = False
+ number_type = None # type: Optional[str]
+ for (idx, cp) in enumerate(label, 1):
+ direction = unicodedata.bidirectional(cp)
+
+ if rtl:
+ # Bidi rule 2
+ if not direction in ['R', 'AL', 'AN', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
+ raise IDNABidiError('Invalid direction for codepoint at position {} in a right-to-left label'.format(idx))
+ # Bidi rule 3
+ if direction in ['R', 'AL', 'EN', 'AN']:
+ valid_ending = True
+ elif direction != 'NSM':
+ valid_ending = False
+ # Bidi rule 4
+ if direction in ['AN', 'EN']:
+ if not number_type:
+ number_type = direction
+ else:
+ if number_type != direction:
+ raise IDNABidiError('Can not mix numeral types in a right-to-left label')
+ else:
+ # Bidi rule 5
+ if not direction in ['L', 'EN', 'ES', 'CS', 'ET', 'ON', 'BN', 'NSM']:
+ raise IDNABidiError('Invalid direction for codepoint at position {} in a left-to-right label'.format(idx))
+ # Bidi rule 6
+ if direction in ['L', 'EN']:
+ valid_ending = True
+ elif direction != 'NSM':
+ valid_ending = False
+
+ if not valid_ending:
+ raise IDNABidiError('Label ends with illegal codepoint directionality')
+
+ return True
+
+
+def check_initial_combiner(label: str) -> bool:
+ if unicodedata.category(label[0])[0] == 'M':
+ raise IDNAError('Label begins with an illegal combining character')
+ return True
+
+
+def check_hyphen_ok(label: str) -> bool:
+ if label[2:4] == '--':
+ raise IDNAError('Label has disallowed hyphens in 3rd and 4th position')
+ if label[0] == '-' or label[-1] == '-':
+ raise IDNAError('Label must not start or end with a hyphen')
+ return True
+
+
+def check_nfc(label: str) -> None:
+ if unicodedata.normalize('NFC', label) != label:
+ raise IDNAError('Label must be in Normalization Form C')
+
+
+def valid_contextj(label: str, pos: int) -> bool:
+ cp_value = ord(label[pos])
+
+ if cp_value == 0x200c:
+
+ if pos > 0:
+ if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
+ return True
+
+ ok = False
+ for i in range(pos-1, -1, -1):
+ joining_type = idnadata.joining_types.get(ord(label[i]))
+ if joining_type == ord('T'):
+ continue
+ if joining_type in [ord('L'), ord('D')]:
+ ok = True
+ break
+
+ if not ok:
+ return False
+
+ ok = False
+ for i in range(pos+1, len(label)):
+ joining_type = idnadata.joining_types.get(ord(label[i]))
+ if joining_type == ord('T'):
+ continue
+ if joining_type in [ord('R'), ord('D')]:
+ ok = True
+ break
+ return ok
+
+ if cp_value == 0x200d:
+
+ if pos > 0:
+ if _combining_class(ord(label[pos - 1])) == _virama_combining_class:
+ return True
+ return False
+
+ else:
+
+ return False
+
+
+def valid_contexto(label: str, pos: int, exception: bool = False) -> bool:
+ cp_value = ord(label[pos])
+
+ if cp_value == 0x00b7:
+ if 0 < pos < len(label)-1:
+ if ord(label[pos - 1]) == 0x006c and ord(label[pos + 1]) == 0x006c:
+ return True
+ return False
+
+ elif cp_value == 0x0375:
+ if pos < len(label)-1 and len(label) > 1:
+ return _is_script(label[pos + 1], 'Greek')
+ return False
+
+ elif cp_value == 0x05f3 or cp_value == 0x05f4:
+ if pos > 0:
+ return _is_script(label[pos - 1], 'Hebrew')
+ return False
+
+ elif cp_value == 0x30fb:
+ for cp in label:
+ if cp == '\u30fb':
+ continue
+ if _is_script(cp, 'Hiragana') or _is_script(cp, 'Katakana') or _is_script(cp, 'Han'):
+ return True
+ return False
+
+ elif 0x660 <= cp_value <= 0x669:
+ for cp in label:
+ if 0x6f0 <= ord(cp) <= 0x06f9:
+ return False
+ return True
+
+ elif 0x6f0 <= cp_value <= 0x6f9:
+ for cp in label:
+ if 0x660 <= ord(cp) <= 0x0669:
+ return False
+ return True
+
+ return False
+
+
+def check_label(label: Union[str, bytes, bytearray]) -> None:
+ if isinstance(label, (bytes, bytearray)):
+ label = label.decode('utf-8')
+ if len(label) == 0:
+ raise IDNAError('Empty Label')
+
+ check_nfc(label)
+ check_hyphen_ok(label)
+ check_initial_combiner(label)
+
+ for (pos, cp) in enumerate(label):
+ cp_value = ord(cp)
+ if intranges_contain(cp_value, idnadata.codepoint_classes['PVALID']):
+ continue
+ elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTJ']):
+ try:
+ if not valid_contextj(label, pos):
+ raise InvalidCodepointContext('Joiner {} not allowed at position {} in {}'.format(
+ _unot(cp_value), pos+1, repr(label)))
+ except ValueError:
+ raise IDNAError('Unknown codepoint adjacent to joiner {} at position {} in {}'.format(
+ _unot(cp_value), pos+1, repr(label)))
+ elif intranges_contain(cp_value, idnadata.codepoint_classes['CONTEXTO']):
+ if not valid_contexto(label, pos):
+ raise InvalidCodepointContext('Codepoint {} not allowed at position {} in {}'.format(_unot(cp_value), pos+1, repr(label)))
+ else:
+ raise InvalidCodepoint('Codepoint {} at position {} of {} not allowed'.format(_unot(cp_value), pos+1, repr(label)))
+
+ check_bidi(label)
+
+
+def alabel(label: str) -> bytes:
+ try:
+ label_bytes = label.encode('ascii')
+ ulabel(label_bytes)
+ if not valid_label_length(label_bytes):
+ raise IDNAError('Label too long')
+ return label_bytes
+ except UnicodeEncodeError:
+ pass
+
+ if not label:
+ raise IDNAError('No Input')
+
+ label = str(label)
+ check_label(label)
+ label_bytes = _punycode(label)
+ label_bytes = _alabel_prefix + label_bytes
+
+ if not valid_label_length(label_bytes):
+ raise IDNAError('Label too long')
+
+ return label_bytes
+
+
+def ulabel(label: Union[str, bytes, bytearray]) -> str:
+ if not isinstance(label, (bytes, bytearray)):
+ try:
+ label_bytes = label.encode('ascii')
+ except UnicodeEncodeError:
+ check_label(label)
+ return label
+ else:
+ label_bytes = label
+
+ label_bytes = label_bytes.lower()
+ if label_bytes.startswith(_alabel_prefix):
+ label_bytes = label_bytes[len(_alabel_prefix):]
+ if not label_bytes:
+ raise IDNAError('Malformed A-label, no Punycode eligible content found')
+ if label_bytes.decode('ascii')[-1] == '-':
+ raise IDNAError('A-label must not end with a hyphen')
+ else:
+ check_label(label_bytes)
+ return label_bytes.decode('ascii')
+
+ try:
+ label = label_bytes.decode('punycode')
+ except UnicodeError:
+ raise IDNAError('Invalid A-label')
+ check_label(label)
+ return label
+
+
+def uts46_remap(domain: str, std3_rules: bool = True, transitional: bool = False) -> str:
+ """Re-map the characters in the string according to UTS46 processing."""
+ from .uts46data import uts46data
+ output = ''
+
+ for pos, char in enumerate(domain):
+ code_point = ord(char)
+ try:
+ uts46row = uts46data[code_point if code_point < 256 else
+ bisect.bisect_left(uts46data, (code_point, 'Z')) - 1]
+ status = uts46row[1]
+ replacement = None # type: Optional[str]
+ if len(uts46row) == 3:
+ replacement = uts46row[2] # type: ignore
+ if (status == 'V' or
+ (status == 'D' and not transitional) or
+ (status == '3' and not std3_rules and replacement is None)):
+ output += char
+ elif replacement is not None and (status == 'M' or
+ (status == '3' and not std3_rules) or
+ (status == 'D' and transitional)):
+ output += replacement
+ elif status != 'I':
+ raise IndexError()
+ except IndexError:
+ raise InvalidCodepoint(
+ 'Codepoint {} not allowed at position {} in {}'.format(
+ _unot(code_point), pos + 1, repr(domain)))
+
+ return unicodedata.normalize('NFC', output)
+
+
+def encode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False, transitional: bool = False) -> bytes:
+ if isinstance(s, (bytes, bytearray)):
+ try:
+ s = s.decode('ascii')
+ except UnicodeDecodeError:
+ raise IDNAError('should pass a unicode string to the function rather than a byte string.')
+ if uts46:
+ s = uts46_remap(s, std3_rules, transitional)
+ trailing_dot = False
+ result = []
+ if strict:
+ labels = s.split('.')
+ else:
+ labels = _unicode_dots_re.split(s)
+ if not labels or labels == ['']:
+ raise IDNAError('Empty domain')
+ if labels[-1] == '':
+ del labels[-1]
+ trailing_dot = True
+ for label in labels:
+ s = alabel(label)
+ if s:
+ result.append(s)
+ else:
+ raise IDNAError('Empty label')
+ if trailing_dot:
+ result.append(b'')
+ s = b'.'.join(result)
+ if not valid_string_length(s, trailing_dot):
+ raise IDNAError('Domain too long')
+ return s
+
+
+def decode(s: Union[str, bytes, bytearray], strict: bool = False, uts46: bool = False, std3_rules: bool = False) -> str:
+ try:
+ if isinstance(s, (bytes, bytearray)):
+ s = s.decode('ascii')
+ except UnicodeDecodeError:
+ raise IDNAError('Invalid ASCII in A-label')
+ if uts46:
+ s = uts46_remap(s, std3_rules, False)
+ trailing_dot = False
+ result = []
+ if not strict:
+ labels = _unicode_dots_re.split(s)
+ else:
+ labels = s.split('.')
+ if not labels or labels == ['']:
+ raise IDNAError('Empty domain')
+ if not labels[-1]:
+ del labels[-1]
+ trailing_dot = True
+ for label in labels:
+ s = ulabel(label)
+ if s:
+ result.append(s)
+ else:
+ raise IDNAError('Empty label')
+ if trailing_dot:
+ result.append('')
+ return '.'.join(result)
diff --git a/third_party/python/pip/pip/_vendor/idna/idnadata.py b/third_party/python/pip/pip/_vendor/idna/idnadata.py
new file mode 100644
index 0000000000..67db462582
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/idna/idnadata.py
@@ -0,0 +1,2151 @@
+# This file is automatically generated by tools/idna-data
+
+__version__ = '15.0.0'
+scripts = {
+ 'Greek': (
+ 0x37000000374,
+ 0x37500000378,
+ 0x37a0000037e,
+ 0x37f00000380,
+ 0x38400000385,
+ 0x38600000387,
+ 0x3880000038b,
+ 0x38c0000038d,
+ 0x38e000003a2,
+ 0x3a3000003e2,
+ 0x3f000000400,
+ 0x1d2600001d2b,
+ 0x1d5d00001d62,
+ 0x1d6600001d6b,
+ 0x1dbf00001dc0,
+ 0x1f0000001f16,
+ 0x1f1800001f1e,
+ 0x1f2000001f46,
+ 0x1f4800001f4e,
+ 0x1f5000001f58,
+ 0x1f5900001f5a,
+ 0x1f5b00001f5c,
+ 0x1f5d00001f5e,
+ 0x1f5f00001f7e,
+ 0x1f8000001fb5,
+ 0x1fb600001fc5,
+ 0x1fc600001fd4,
+ 0x1fd600001fdc,
+ 0x1fdd00001ff0,
+ 0x1ff200001ff5,
+ 0x1ff600001fff,
+ 0x212600002127,
+ 0xab650000ab66,
+ 0x101400001018f,
+ 0x101a0000101a1,
+ 0x1d2000001d246,
+ ),
+ 'Han': (
+ 0x2e8000002e9a,
+ 0x2e9b00002ef4,
+ 0x2f0000002fd6,
+ 0x300500003006,
+ 0x300700003008,
+ 0x30210000302a,
+ 0x30380000303c,
+ 0x340000004dc0,
+ 0x4e000000a000,
+ 0xf9000000fa6e,
+ 0xfa700000fada,
+ 0x16fe200016fe4,
+ 0x16ff000016ff2,
+ 0x200000002a6e0,
+ 0x2a7000002b73a,
+ 0x2b7400002b81e,
+ 0x2b8200002cea2,
+ 0x2ceb00002ebe1,
+ 0x2f8000002fa1e,
+ 0x300000003134b,
+ 0x31350000323b0,
+ ),
+ 'Hebrew': (
+ 0x591000005c8,
+ 0x5d0000005eb,
+ 0x5ef000005f5,
+ 0xfb1d0000fb37,
+ 0xfb380000fb3d,
+ 0xfb3e0000fb3f,
+ 0xfb400000fb42,
+ 0xfb430000fb45,
+ 0xfb460000fb50,
+ ),
+ 'Hiragana': (
+ 0x304100003097,
+ 0x309d000030a0,
+ 0x1b0010001b120,
+ 0x1b1320001b133,
+ 0x1b1500001b153,
+ 0x1f2000001f201,
+ ),
+ 'Katakana': (
+ 0x30a1000030fb,
+ 0x30fd00003100,
+ 0x31f000003200,
+ 0x32d0000032ff,
+ 0x330000003358,
+ 0xff660000ff70,
+ 0xff710000ff9e,
+ 0x1aff00001aff4,
+ 0x1aff50001affc,
+ 0x1affd0001afff,
+ 0x1b0000001b001,
+ 0x1b1200001b123,
+ 0x1b1550001b156,
+ 0x1b1640001b168,
+ ),
+}
+joining_types = {
+ 0x600: 85,
+ 0x601: 85,
+ 0x602: 85,
+ 0x603: 85,
+ 0x604: 85,
+ 0x605: 85,
+ 0x608: 85,
+ 0x60b: 85,
+ 0x620: 68,
+ 0x621: 85,
+ 0x622: 82,
+ 0x623: 82,
+ 0x624: 82,
+ 0x625: 82,
+ 0x626: 68,
+ 0x627: 82,
+ 0x628: 68,
+ 0x629: 82,
+ 0x62a: 68,
+ 0x62b: 68,
+ 0x62c: 68,
+ 0x62d: 68,
+ 0x62e: 68,
+ 0x62f: 82,
+ 0x630: 82,
+ 0x631: 82,
+ 0x632: 82,
+ 0x633: 68,
+ 0x634: 68,
+ 0x635: 68,
+ 0x636: 68,
+ 0x637: 68,
+ 0x638: 68,
+ 0x639: 68,
+ 0x63a: 68,
+ 0x63b: 68,
+ 0x63c: 68,
+ 0x63d: 68,
+ 0x63e: 68,
+ 0x63f: 68,
+ 0x640: 67,
+ 0x641: 68,
+ 0x642: 68,
+ 0x643: 68,
+ 0x644: 68,
+ 0x645: 68,
+ 0x646: 68,
+ 0x647: 68,
+ 0x648: 82,
+ 0x649: 68,
+ 0x64a: 68,
+ 0x66e: 68,
+ 0x66f: 68,
+ 0x671: 82,
+ 0x672: 82,
+ 0x673: 82,
+ 0x674: 85,
+ 0x675: 82,
+ 0x676: 82,
+ 0x677: 82,
+ 0x678: 68,
+ 0x679: 68,
+ 0x67a: 68,
+ 0x67b: 68,
+ 0x67c: 68,
+ 0x67d: 68,
+ 0x67e: 68,
+ 0x67f: 68,
+ 0x680: 68,
+ 0x681: 68,
+ 0x682: 68,
+ 0x683: 68,
+ 0x684: 68,
+ 0x685: 68,
+ 0x686: 68,
+ 0x687: 68,
+ 0x688: 82,
+ 0x689: 82,
+ 0x68a: 82,
+ 0x68b: 82,
+ 0x68c: 82,
+ 0x68d: 82,
+ 0x68e: 82,
+ 0x68f: 82,
+ 0x690: 82,
+ 0x691: 82,
+ 0x692: 82,
+ 0x693: 82,
+ 0x694: 82,
+ 0x695: 82,
+ 0x696: 82,
+ 0x697: 82,
+ 0x698: 82,
+ 0x699: 82,
+ 0x69a: 68,
+ 0x69b: 68,
+ 0x69c: 68,
+ 0x69d: 68,
+ 0x69e: 68,
+ 0x69f: 68,
+ 0x6a0: 68,
+ 0x6a1: 68,
+ 0x6a2: 68,
+ 0x6a3: 68,
+ 0x6a4: 68,
+ 0x6a5: 68,
+ 0x6a6: 68,
+ 0x6a7: 68,
+ 0x6a8: 68,
+ 0x6a9: 68,
+ 0x6aa: 68,
+ 0x6ab: 68,
+ 0x6ac: 68,
+ 0x6ad: 68,
+ 0x6ae: 68,
+ 0x6af: 68,
+ 0x6b0: 68,
+ 0x6b1: 68,
+ 0x6b2: 68,
+ 0x6b3: 68,
+ 0x6b4: 68,
+ 0x6b5: 68,
+ 0x6b6: 68,
+ 0x6b7: 68,
+ 0x6b8: 68,
+ 0x6b9: 68,
+ 0x6ba: 68,
+ 0x6bb: 68,
+ 0x6bc: 68,
+ 0x6bd: 68,
+ 0x6be: 68,
+ 0x6bf: 68,
+ 0x6c0: 82,
+ 0x6c1: 68,
+ 0x6c2: 68,
+ 0x6c3: 82,
+ 0x6c4: 82,
+ 0x6c5: 82,
+ 0x6c6: 82,
+ 0x6c7: 82,
+ 0x6c8: 82,
+ 0x6c9: 82,
+ 0x6ca: 82,
+ 0x6cb: 82,
+ 0x6cc: 68,
+ 0x6cd: 82,
+ 0x6ce: 68,
+ 0x6cf: 82,
+ 0x6d0: 68,
+ 0x6d1: 68,
+ 0x6d2: 82,
+ 0x6d3: 82,
+ 0x6d5: 82,
+ 0x6dd: 85,
+ 0x6ee: 82,
+ 0x6ef: 82,
+ 0x6fa: 68,
+ 0x6fb: 68,
+ 0x6fc: 68,
+ 0x6ff: 68,
+ 0x70f: 84,
+ 0x710: 82,
+ 0x712: 68,
+ 0x713: 68,
+ 0x714: 68,
+ 0x715: 82,
+ 0x716: 82,
+ 0x717: 82,
+ 0x718: 82,
+ 0x719: 82,
+ 0x71a: 68,
+ 0x71b: 68,
+ 0x71c: 68,
+ 0x71d: 68,
+ 0x71e: 82,
+ 0x71f: 68,
+ 0x720: 68,
+ 0x721: 68,
+ 0x722: 68,
+ 0x723: 68,
+ 0x724: 68,
+ 0x725: 68,
+ 0x726: 68,
+ 0x727: 68,
+ 0x728: 82,
+ 0x729: 68,
+ 0x72a: 82,
+ 0x72b: 68,
+ 0x72c: 82,
+ 0x72d: 68,
+ 0x72e: 68,
+ 0x72f: 82,
+ 0x74d: 82,
+ 0x74e: 68,
+ 0x74f: 68,
+ 0x750: 68,
+ 0x751: 68,
+ 0x752: 68,
+ 0x753: 68,
+ 0x754: 68,
+ 0x755: 68,
+ 0x756: 68,
+ 0x757: 68,
+ 0x758: 68,
+ 0x759: 82,
+ 0x75a: 82,
+ 0x75b: 82,
+ 0x75c: 68,
+ 0x75d: 68,
+ 0x75e: 68,
+ 0x75f: 68,
+ 0x760: 68,
+ 0x761: 68,
+ 0x762: 68,
+ 0x763: 68,
+ 0x764: 68,
+ 0x765: 68,
+ 0x766: 68,
+ 0x767: 68,
+ 0x768: 68,
+ 0x769: 68,
+ 0x76a: 68,
+ 0x76b: 82,
+ 0x76c: 82,
+ 0x76d: 68,
+ 0x76e: 68,
+ 0x76f: 68,
+ 0x770: 68,
+ 0x771: 82,
+ 0x772: 68,
+ 0x773: 82,
+ 0x774: 82,
+ 0x775: 68,
+ 0x776: 68,
+ 0x777: 68,
+ 0x778: 82,
+ 0x779: 82,
+ 0x77a: 68,
+ 0x77b: 68,
+ 0x77c: 68,
+ 0x77d: 68,
+ 0x77e: 68,
+ 0x77f: 68,
+ 0x7ca: 68,
+ 0x7cb: 68,
+ 0x7cc: 68,
+ 0x7cd: 68,
+ 0x7ce: 68,
+ 0x7cf: 68,
+ 0x7d0: 68,
+ 0x7d1: 68,
+ 0x7d2: 68,
+ 0x7d3: 68,
+ 0x7d4: 68,
+ 0x7d5: 68,
+ 0x7d6: 68,
+ 0x7d7: 68,
+ 0x7d8: 68,
+ 0x7d9: 68,
+ 0x7da: 68,
+ 0x7db: 68,
+ 0x7dc: 68,
+ 0x7dd: 68,
+ 0x7de: 68,
+ 0x7df: 68,
+ 0x7e0: 68,
+ 0x7e1: 68,
+ 0x7e2: 68,
+ 0x7e3: 68,
+ 0x7e4: 68,
+ 0x7e5: 68,
+ 0x7e6: 68,
+ 0x7e7: 68,
+ 0x7e8: 68,
+ 0x7e9: 68,
+ 0x7ea: 68,
+ 0x7fa: 67,
+ 0x840: 82,
+ 0x841: 68,
+ 0x842: 68,
+ 0x843: 68,
+ 0x844: 68,
+ 0x845: 68,
+ 0x846: 82,
+ 0x847: 82,
+ 0x848: 68,
+ 0x849: 82,
+ 0x84a: 68,
+ 0x84b: 68,
+ 0x84c: 68,
+ 0x84d: 68,
+ 0x84e: 68,
+ 0x84f: 68,
+ 0x850: 68,
+ 0x851: 68,
+ 0x852: 68,
+ 0x853: 68,
+ 0x854: 82,
+ 0x855: 68,
+ 0x856: 82,
+ 0x857: 82,
+ 0x858: 82,
+ 0x860: 68,
+ 0x861: 85,
+ 0x862: 68,
+ 0x863: 68,
+ 0x864: 68,
+ 0x865: 68,
+ 0x866: 85,
+ 0x867: 82,
+ 0x868: 68,
+ 0x869: 82,
+ 0x86a: 82,
+ 0x870: 82,
+ 0x871: 82,
+ 0x872: 82,
+ 0x873: 82,
+ 0x874: 82,
+ 0x875: 82,
+ 0x876: 82,
+ 0x877: 82,
+ 0x878: 82,
+ 0x879: 82,
+ 0x87a: 82,
+ 0x87b: 82,
+ 0x87c: 82,
+ 0x87d: 82,
+ 0x87e: 82,
+ 0x87f: 82,
+ 0x880: 82,
+ 0x881: 82,
+ 0x882: 82,
+ 0x883: 67,
+ 0x884: 67,
+ 0x885: 67,
+ 0x886: 68,
+ 0x887: 85,
+ 0x888: 85,
+ 0x889: 68,
+ 0x88a: 68,
+ 0x88b: 68,
+ 0x88c: 68,
+ 0x88d: 68,
+ 0x88e: 82,
+ 0x890: 85,
+ 0x891: 85,
+ 0x8a0: 68,
+ 0x8a1: 68,
+ 0x8a2: 68,
+ 0x8a3: 68,
+ 0x8a4: 68,
+ 0x8a5: 68,
+ 0x8a6: 68,
+ 0x8a7: 68,
+ 0x8a8: 68,
+ 0x8a9: 68,
+ 0x8aa: 82,
+ 0x8ab: 82,
+ 0x8ac: 82,
+ 0x8ad: 85,
+ 0x8ae: 82,
+ 0x8af: 68,
+ 0x8b0: 68,
+ 0x8b1: 82,
+ 0x8b2: 82,
+ 0x8b3: 68,
+ 0x8b4: 68,
+ 0x8b5: 68,
+ 0x8b6: 68,
+ 0x8b7: 68,
+ 0x8b8: 68,
+ 0x8b9: 82,
+ 0x8ba: 68,
+ 0x8bb: 68,
+ 0x8bc: 68,
+ 0x8bd: 68,
+ 0x8be: 68,
+ 0x8bf: 68,
+ 0x8c0: 68,
+ 0x8c1: 68,
+ 0x8c2: 68,
+ 0x8c3: 68,
+ 0x8c4: 68,
+ 0x8c5: 68,
+ 0x8c6: 68,
+ 0x8c7: 68,
+ 0x8c8: 68,
+ 0x8e2: 85,
+ 0x1806: 85,
+ 0x1807: 68,
+ 0x180a: 67,
+ 0x180e: 85,
+ 0x1820: 68,
+ 0x1821: 68,
+ 0x1822: 68,
+ 0x1823: 68,
+ 0x1824: 68,
+ 0x1825: 68,
+ 0x1826: 68,
+ 0x1827: 68,
+ 0x1828: 68,
+ 0x1829: 68,
+ 0x182a: 68,
+ 0x182b: 68,
+ 0x182c: 68,
+ 0x182d: 68,
+ 0x182e: 68,
+ 0x182f: 68,
+ 0x1830: 68,
+ 0x1831: 68,
+ 0x1832: 68,
+ 0x1833: 68,
+ 0x1834: 68,
+ 0x1835: 68,
+ 0x1836: 68,
+ 0x1837: 68,
+ 0x1838: 68,
+ 0x1839: 68,
+ 0x183a: 68,
+ 0x183b: 68,
+ 0x183c: 68,
+ 0x183d: 68,
+ 0x183e: 68,
+ 0x183f: 68,
+ 0x1840: 68,
+ 0x1841: 68,
+ 0x1842: 68,
+ 0x1843: 68,
+ 0x1844: 68,
+ 0x1845: 68,
+ 0x1846: 68,
+ 0x1847: 68,
+ 0x1848: 68,
+ 0x1849: 68,
+ 0x184a: 68,
+ 0x184b: 68,
+ 0x184c: 68,
+ 0x184d: 68,
+ 0x184e: 68,
+ 0x184f: 68,
+ 0x1850: 68,
+ 0x1851: 68,
+ 0x1852: 68,
+ 0x1853: 68,
+ 0x1854: 68,
+ 0x1855: 68,
+ 0x1856: 68,
+ 0x1857: 68,
+ 0x1858: 68,
+ 0x1859: 68,
+ 0x185a: 68,
+ 0x185b: 68,
+ 0x185c: 68,
+ 0x185d: 68,
+ 0x185e: 68,
+ 0x185f: 68,
+ 0x1860: 68,
+ 0x1861: 68,
+ 0x1862: 68,
+ 0x1863: 68,
+ 0x1864: 68,
+ 0x1865: 68,
+ 0x1866: 68,
+ 0x1867: 68,
+ 0x1868: 68,
+ 0x1869: 68,
+ 0x186a: 68,
+ 0x186b: 68,
+ 0x186c: 68,
+ 0x186d: 68,
+ 0x186e: 68,
+ 0x186f: 68,
+ 0x1870: 68,
+ 0x1871: 68,
+ 0x1872: 68,
+ 0x1873: 68,
+ 0x1874: 68,
+ 0x1875: 68,
+ 0x1876: 68,
+ 0x1877: 68,
+ 0x1878: 68,
+ 0x1880: 85,
+ 0x1881: 85,
+ 0x1882: 85,
+ 0x1883: 85,
+ 0x1884: 85,
+ 0x1885: 84,
+ 0x1886: 84,
+ 0x1887: 68,
+ 0x1888: 68,
+ 0x1889: 68,
+ 0x188a: 68,
+ 0x188b: 68,
+ 0x188c: 68,
+ 0x188d: 68,
+ 0x188e: 68,
+ 0x188f: 68,
+ 0x1890: 68,
+ 0x1891: 68,
+ 0x1892: 68,
+ 0x1893: 68,
+ 0x1894: 68,
+ 0x1895: 68,
+ 0x1896: 68,
+ 0x1897: 68,
+ 0x1898: 68,
+ 0x1899: 68,
+ 0x189a: 68,
+ 0x189b: 68,
+ 0x189c: 68,
+ 0x189d: 68,
+ 0x189e: 68,
+ 0x189f: 68,
+ 0x18a0: 68,
+ 0x18a1: 68,
+ 0x18a2: 68,
+ 0x18a3: 68,
+ 0x18a4: 68,
+ 0x18a5: 68,
+ 0x18a6: 68,
+ 0x18a7: 68,
+ 0x18a8: 68,
+ 0x18aa: 68,
+ 0x200c: 85,
+ 0x200d: 67,
+ 0x202f: 85,
+ 0x2066: 85,
+ 0x2067: 85,
+ 0x2068: 85,
+ 0x2069: 85,
+ 0xa840: 68,
+ 0xa841: 68,
+ 0xa842: 68,
+ 0xa843: 68,
+ 0xa844: 68,
+ 0xa845: 68,
+ 0xa846: 68,
+ 0xa847: 68,
+ 0xa848: 68,
+ 0xa849: 68,
+ 0xa84a: 68,
+ 0xa84b: 68,
+ 0xa84c: 68,
+ 0xa84d: 68,
+ 0xa84e: 68,
+ 0xa84f: 68,
+ 0xa850: 68,
+ 0xa851: 68,
+ 0xa852: 68,
+ 0xa853: 68,
+ 0xa854: 68,
+ 0xa855: 68,
+ 0xa856: 68,
+ 0xa857: 68,
+ 0xa858: 68,
+ 0xa859: 68,
+ 0xa85a: 68,
+ 0xa85b: 68,
+ 0xa85c: 68,
+ 0xa85d: 68,
+ 0xa85e: 68,
+ 0xa85f: 68,
+ 0xa860: 68,
+ 0xa861: 68,
+ 0xa862: 68,
+ 0xa863: 68,
+ 0xa864: 68,
+ 0xa865: 68,
+ 0xa866: 68,
+ 0xa867: 68,
+ 0xa868: 68,
+ 0xa869: 68,
+ 0xa86a: 68,
+ 0xa86b: 68,
+ 0xa86c: 68,
+ 0xa86d: 68,
+ 0xa86e: 68,
+ 0xa86f: 68,
+ 0xa870: 68,
+ 0xa871: 68,
+ 0xa872: 76,
+ 0xa873: 85,
+ 0x10ac0: 68,
+ 0x10ac1: 68,
+ 0x10ac2: 68,
+ 0x10ac3: 68,
+ 0x10ac4: 68,
+ 0x10ac5: 82,
+ 0x10ac6: 85,
+ 0x10ac7: 82,
+ 0x10ac8: 85,
+ 0x10ac9: 82,
+ 0x10aca: 82,
+ 0x10acb: 85,
+ 0x10acc: 85,
+ 0x10acd: 76,
+ 0x10ace: 82,
+ 0x10acf: 82,
+ 0x10ad0: 82,
+ 0x10ad1: 82,
+ 0x10ad2: 82,
+ 0x10ad3: 68,
+ 0x10ad4: 68,
+ 0x10ad5: 68,
+ 0x10ad6: 68,
+ 0x10ad7: 76,
+ 0x10ad8: 68,
+ 0x10ad9: 68,
+ 0x10ada: 68,
+ 0x10adb: 68,
+ 0x10adc: 68,
+ 0x10add: 82,
+ 0x10ade: 68,
+ 0x10adf: 68,
+ 0x10ae0: 68,
+ 0x10ae1: 82,
+ 0x10ae2: 85,
+ 0x10ae3: 85,
+ 0x10ae4: 82,
+ 0x10aeb: 68,
+ 0x10aec: 68,
+ 0x10aed: 68,
+ 0x10aee: 68,
+ 0x10aef: 82,
+ 0x10b80: 68,
+ 0x10b81: 82,
+ 0x10b82: 68,
+ 0x10b83: 82,
+ 0x10b84: 82,
+ 0x10b85: 82,
+ 0x10b86: 68,
+ 0x10b87: 68,
+ 0x10b88: 68,
+ 0x10b89: 82,
+ 0x10b8a: 68,
+ 0x10b8b: 68,
+ 0x10b8c: 82,
+ 0x10b8d: 68,
+ 0x10b8e: 82,
+ 0x10b8f: 82,
+ 0x10b90: 68,
+ 0x10b91: 82,
+ 0x10ba9: 82,
+ 0x10baa: 82,
+ 0x10bab: 82,
+ 0x10bac: 82,
+ 0x10bad: 68,
+ 0x10bae: 68,
+ 0x10baf: 85,
+ 0x10d00: 76,
+ 0x10d01: 68,
+ 0x10d02: 68,
+ 0x10d03: 68,
+ 0x10d04: 68,
+ 0x10d05: 68,
+ 0x10d06: 68,
+ 0x10d07: 68,
+ 0x10d08: 68,
+ 0x10d09: 68,
+ 0x10d0a: 68,
+ 0x10d0b: 68,
+ 0x10d0c: 68,
+ 0x10d0d: 68,
+ 0x10d0e: 68,
+ 0x10d0f: 68,
+ 0x10d10: 68,
+ 0x10d11: 68,
+ 0x10d12: 68,
+ 0x10d13: 68,
+ 0x10d14: 68,
+ 0x10d15: 68,
+ 0x10d16: 68,
+ 0x10d17: 68,
+ 0x10d18: 68,
+ 0x10d19: 68,
+ 0x10d1a: 68,
+ 0x10d1b: 68,
+ 0x10d1c: 68,
+ 0x10d1d: 68,
+ 0x10d1e: 68,
+ 0x10d1f: 68,
+ 0x10d20: 68,
+ 0x10d21: 68,
+ 0x10d22: 82,
+ 0x10d23: 68,
+ 0x10f30: 68,
+ 0x10f31: 68,
+ 0x10f32: 68,
+ 0x10f33: 82,
+ 0x10f34: 68,
+ 0x10f35: 68,
+ 0x10f36: 68,
+ 0x10f37: 68,
+ 0x10f38: 68,
+ 0x10f39: 68,
+ 0x10f3a: 68,
+ 0x10f3b: 68,
+ 0x10f3c: 68,
+ 0x10f3d: 68,
+ 0x10f3e: 68,
+ 0x10f3f: 68,
+ 0x10f40: 68,
+ 0x10f41: 68,
+ 0x10f42: 68,
+ 0x10f43: 68,
+ 0x10f44: 68,
+ 0x10f45: 85,
+ 0x10f51: 68,
+ 0x10f52: 68,
+ 0x10f53: 68,
+ 0x10f54: 82,
+ 0x10f70: 68,
+ 0x10f71: 68,
+ 0x10f72: 68,
+ 0x10f73: 68,
+ 0x10f74: 82,
+ 0x10f75: 82,
+ 0x10f76: 68,
+ 0x10f77: 68,
+ 0x10f78: 68,
+ 0x10f79: 68,
+ 0x10f7a: 68,
+ 0x10f7b: 68,
+ 0x10f7c: 68,
+ 0x10f7d: 68,
+ 0x10f7e: 68,
+ 0x10f7f: 68,
+ 0x10f80: 68,
+ 0x10f81: 68,
+ 0x10fb0: 68,
+ 0x10fb1: 85,
+ 0x10fb2: 68,
+ 0x10fb3: 68,
+ 0x10fb4: 82,
+ 0x10fb5: 82,
+ 0x10fb6: 82,
+ 0x10fb7: 85,
+ 0x10fb8: 68,
+ 0x10fb9: 82,
+ 0x10fba: 82,
+ 0x10fbb: 68,
+ 0x10fbc: 68,
+ 0x10fbd: 82,
+ 0x10fbe: 68,
+ 0x10fbf: 68,
+ 0x10fc0: 85,
+ 0x10fc1: 68,
+ 0x10fc2: 82,
+ 0x10fc3: 82,
+ 0x10fc4: 68,
+ 0x10fc5: 85,
+ 0x10fc6: 85,
+ 0x10fc7: 85,
+ 0x10fc8: 85,
+ 0x10fc9: 82,
+ 0x10fca: 68,
+ 0x10fcb: 76,
+ 0x110bd: 85,
+ 0x110cd: 85,
+ 0x1e900: 68,
+ 0x1e901: 68,
+ 0x1e902: 68,
+ 0x1e903: 68,
+ 0x1e904: 68,
+ 0x1e905: 68,
+ 0x1e906: 68,
+ 0x1e907: 68,
+ 0x1e908: 68,
+ 0x1e909: 68,
+ 0x1e90a: 68,
+ 0x1e90b: 68,
+ 0x1e90c: 68,
+ 0x1e90d: 68,
+ 0x1e90e: 68,
+ 0x1e90f: 68,
+ 0x1e910: 68,
+ 0x1e911: 68,
+ 0x1e912: 68,
+ 0x1e913: 68,
+ 0x1e914: 68,
+ 0x1e915: 68,
+ 0x1e916: 68,
+ 0x1e917: 68,
+ 0x1e918: 68,
+ 0x1e919: 68,
+ 0x1e91a: 68,
+ 0x1e91b: 68,
+ 0x1e91c: 68,
+ 0x1e91d: 68,
+ 0x1e91e: 68,
+ 0x1e91f: 68,
+ 0x1e920: 68,
+ 0x1e921: 68,
+ 0x1e922: 68,
+ 0x1e923: 68,
+ 0x1e924: 68,
+ 0x1e925: 68,
+ 0x1e926: 68,
+ 0x1e927: 68,
+ 0x1e928: 68,
+ 0x1e929: 68,
+ 0x1e92a: 68,
+ 0x1e92b: 68,
+ 0x1e92c: 68,
+ 0x1e92d: 68,
+ 0x1e92e: 68,
+ 0x1e92f: 68,
+ 0x1e930: 68,
+ 0x1e931: 68,
+ 0x1e932: 68,
+ 0x1e933: 68,
+ 0x1e934: 68,
+ 0x1e935: 68,
+ 0x1e936: 68,
+ 0x1e937: 68,
+ 0x1e938: 68,
+ 0x1e939: 68,
+ 0x1e93a: 68,
+ 0x1e93b: 68,
+ 0x1e93c: 68,
+ 0x1e93d: 68,
+ 0x1e93e: 68,
+ 0x1e93f: 68,
+ 0x1e940: 68,
+ 0x1e941: 68,
+ 0x1e942: 68,
+ 0x1e943: 68,
+ 0x1e94b: 84,
+}
+codepoint_classes = {
+ 'PVALID': (
+ 0x2d0000002e,
+ 0x300000003a,
+ 0x610000007b,
+ 0xdf000000f7,
+ 0xf800000100,
+ 0x10100000102,
+ 0x10300000104,
+ 0x10500000106,
+ 0x10700000108,
+ 0x1090000010a,
+ 0x10b0000010c,
+ 0x10d0000010e,
+ 0x10f00000110,
+ 0x11100000112,
+ 0x11300000114,
+ 0x11500000116,
+ 0x11700000118,
+ 0x1190000011a,
+ 0x11b0000011c,
+ 0x11d0000011e,
+ 0x11f00000120,
+ 0x12100000122,
+ 0x12300000124,
+ 0x12500000126,
+ 0x12700000128,
+ 0x1290000012a,
+ 0x12b0000012c,
+ 0x12d0000012e,
+ 0x12f00000130,
+ 0x13100000132,
+ 0x13500000136,
+ 0x13700000139,
+ 0x13a0000013b,
+ 0x13c0000013d,
+ 0x13e0000013f,
+ 0x14200000143,
+ 0x14400000145,
+ 0x14600000147,
+ 0x14800000149,
+ 0x14b0000014c,
+ 0x14d0000014e,
+ 0x14f00000150,
+ 0x15100000152,
+ 0x15300000154,
+ 0x15500000156,
+ 0x15700000158,
+ 0x1590000015a,
+ 0x15b0000015c,
+ 0x15d0000015e,
+ 0x15f00000160,
+ 0x16100000162,
+ 0x16300000164,
+ 0x16500000166,
+ 0x16700000168,
+ 0x1690000016a,
+ 0x16b0000016c,
+ 0x16d0000016e,
+ 0x16f00000170,
+ 0x17100000172,
+ 0x17300000174,
+ 0x17500000176,
+ 0x17700000178,
+ 0x17a0000017b,
+ 0x17c0000017d,
+ 0x17e0000017f,
+ 0x18000000181,
+ 0x18300000184,
+ 0x18500000186,
+ 0x18800000189,
+ 0x18c0000018e,
+ 0x19200000193,
+ 0x19500000196,
+ 0x1990000019c,
+ 0x19e0000019f,
+ 0x1a1000001a2,
+ 0x1a3000001a4,
+ 0x1a5000001a6,
+ 0x1a8000001a9,
+ 0x1aa000001ac,
+ 0x1ad000001ae,
+ 0x1b0000001b1,
+ 0x1b4000001b5,
+ 0x1b6000001b7,
+ 0x1b9000001bc,
+ 0x1bd000001c4,
+ 0x1ce000001cf,
+ 0x1d0000001d1,
+ 0x1d2000001d3,
+ 0x1d4000001d5,
+ 0x1d6000001d7,
+ 0x1d8000001d9,
+ 0x1da000001db,
+ 0x1dc000001de,
+ 0x1df000001e0,
+ 0x1e1000001e2,
+ 0x1e3000001e4,
+ 0x1e5000001e6,
+ 0x1e7000001e8,
+ 0x1e9000001ea,
+ 0x1eb000001ec,
+ 0x1ed000001ee,
+ 0x1ef000001f1,
+ 0x1f5000001f6,
+ 0x1f9000001fa,
+ 0x1fb000001fc,
+ 0x1fd000001fe,
+ 0x1ff00000200,
+ 0x20100000202,
+ 0x20300000204,
+ 0x20500000206,
+ 0x20700000208,
+ 0x2090000020a,
+ 0x20b0000020c,
+ 0x20d0000020e,
+ 0x20f00000210,
+ 0x21100000212,
+ 0x21300000214,
+ 0x21500000216,
+ 0x21700000218,
+ 0x2190000021a,
+ 0x21b0000021c,
+ 0x21d0000021e,
+ 0x21f00000220,
+ 0x22100000222,
+ 0x22300000224,
+ 0x22500000226,
+ 0x22700000228,
+ 0x2290000022a,
+ 0x22b0000022c,
+ 0x22d0000022e,
+ 0x22f00000230,
+ 0x23100000232,
+ 0x2330000023a,
+ 0x23c0000023d,
+ 0x23f00000241,
+ 0x24200000243,
+ 0x24700000248,
+ 0x2490000024a,
+ 0x24b0000024c,
+ 0x24d0000024e,
+ 0x24f000002b0,
+ 0x2b9000002c2,
+ 0x2c6000002d2,
+ 0x2ec000002ed,
+ 0x2ee000002ef,
+ 0x30000000340,
+ 0x34200000343,
+ 0x3460000034f,
+ 0x35000000370,
+ 0x37100000372,
+ 0x37300000374,
+ 0x37700000378,
+ 0x37b0000037e,
+ 0x39000000391,
+ 0x3ac000003cf,
+ 0x3d7000003d8,
+ 0x3d9000003da,
+ 0x3db000003dc,
+ 0x3dd000003de,
+ 0x3df000003e0,
+ 0x3e1000003e2,
+ 0x3e3000003e4,
+ 0x3e5000003e6,
+ 0x3e7000003e8,
+ 0x3e9000003ea,
+ 0x3eb000003ec,
+ 0x3ed000003ee,
+ 0x3ef000003f0,
+ 0x3f3000003f4,
+ 0x3f8000003f9,
+ 0x3fb000003fd,
+ 0x43000000460,
+ 0x46100000462,
+ 0x46300000464,
+ 0x46500000466,
+ 0x46700000468,
+ 0x4690000046a,
+ 0x46b0000046c,
+ 0x46d0000046e,
+ 0x46f00000470,
+ 0x47100000472,
+ 0x47300000474,
+ 0x47500000476,
+ 0x47700000478,
+ 0x4790000047a,
+ 0x47b0000047c,
+ 0x47d0000047e,
+ 0x47f00000480,
+ 0x48100000482,
+ 0x48300000488,
+ 0x48b0000048c,
+ 0x48d0000048e,
+ 0x48f00000490,
+ 0x49100000492,
+ 0x49300000494,
+ 0x49500000496,
+ 0x49700000498,
+ 0x4990000049a,
+ 0x49b0000049c,
+ 0x49d0000049e,
+ 0x49f000004a0,
+ 0x4a1000004a2,
+ 0x4a3000004a4,
+ 0x4a5000004a6,
+ 0x4a7000004a8,
+ 0x4a9000004aa,
+ 0x4ab000004ac,
+ 0x4ad000004ae,
+ 0x4af000004b0,
+ 0x4b1000004b2,
+ 0x4b3000004b4,
+ 0x4b5000004b6,
+ 0x4b7000004b8,
+ 0x4b9000004ba,
+ 0x4bb000004bc,
+ 0x4bd000004be,
+ 0x4bf000004c0,
+ 0x4c2000004c3,
+ 0x4c4000004c5,
+ 0x4c6000004c7,
+ 0x4c8000004c9,
+ 0x4ca000004cb,
+ 0x4cc000004cd,
+ 0x4ce000004d0,
+ 0x4d1000004d2,
+ 0x4d3000004d4,
+ 0x4d5000004d6,
+ 0x4d7000004d8,
+ 0x4d9000004da,
+ 0x4db000004dc,
+ 0x4dd000004de,
+ 0x4df000004e0,
+ 0x4e1000004e2,
+ 0x4e3000004e4,
+ 0x4e5000004e6,
+ 0x4e7000004e8,
+ 0x4e9000004ea,
+ 0x4eb000004ec,
+ 0x4ed000004ee,
+ 0x4ef000004f0,
+ 0x4f1000004f2,
+ 0x4f3000004f4,
+ 0x4f5000004f6,
+ 0x4f7000004f8,
+ 0x4f9000004fa,
+ 0x4fb000004fc,
+ 0x4fd000004fe,
+ 0x4ff00000500,
+ 0x50100000502,
+ 0x50300000504,
+ 0x50500000506,
+ 0x50700000508,
+ 0x5090000050a,
+ 0x50b0000050c,
+ 0x50d0000050e,
+ 0x50f00000510,
+ 0x51100000512,
+ 0x51300000514,
+ 0x51500000516,
+ 0x51700000518,
+ 0x5190000051a,
+ 0x51b0000051c,
+ 0x51d0000051e,
+ 0x51f00000520,
+ 0x52100000522,
+ 0x52300000524,
+ 0x52500000526,
+ 0x52700000528,
+ 0x5290000052a,
+ 0x52b0000052c,
+ 0x52d0000052e,
+ 0x52f00000530,
+ 0x5590000055a,
+ 0x56000000587,
+ 0x58800000589,
+ 0x591000005be,
+ 0x5bf000005c0,
+ 0x5c1000005c3,
+ 0x5c4000005c6,
+ 0x5c7000005c8,
+ 0x5d0000005eb,
+ 0x5ef000005f3,
+ 0x6100000061b,
+ 0x62000000640,
+ 0x64100000660,
+ 0x66e00000675,
+ 0x679000006d4,
+ 0x6d5000006dd,
+ 0x6df000006e9,
+ 0x6ea000006f0,
+ 0x6fa00000700,
+ 0x7100000074b,
+ 0x74d000007b2,
+ 0x7c0000007f6,
+ 0x7fd000007fe,
+ 0x8000000082e,
+ 0x8400000085c,
+ 0x8600000086b,
+ 0x87000000888,
+ 0x8890000088f,
+ 0x898000008e2,
+ 0x8e300000958,
+ 0x96000000964,
+ 0x96600000970,
+ 0x97100000984,
+ 0x9850000098d,
+ 0x98f00000991,
+ 0x993000009a9,
+ 0x9aa000009b1,
+ 0x9b2000009b3,
+ 0x9b6000009ba,
+ 0x9bc000009c5,
+ 0x9c7000009c9,
+ 0x9cb000009cf,
+ 0x9d7000009d8,
+ 0x9e0000009e4,
+ 0x9e6000009f2,
+ 0x9fc000009fd,
+ 0x9fe000009ff,
+ 0xa0100000a04,
+ 0xa0500000a0b,
+ 0xa0f00000a11,
+ 0xa1300000a29,
+ 0xa2a00000a31,
+ 0xa3200000a33,
+ 0xa3500000a36,
+ 0xa3800000a3a,
+ 0xa3c00000a3d,
+ 0xa3e00000a43,
+ 0xa4700000a49,
+ 0xa4b00000a4e,
+ 0xa5100000a52,
+ 0xa5c00000a5d,
+ 0xa6600000a76,
+ 0xa8100000a84,
+ 0xa8500000a8e,
+ 0xa8f00000a92,
+ 0xa9300000aa9,
+ 0xaaa00000ab1,
+ 0xab200000ab4,
+ 0xab500000aba,
+ 0xabc00000ac6,
+ 0xac700000aca,
+ 0xacb00000ace,
+ 0xad000000ad1,
+ 0xae000000ae4,
+ 0xae600000af0,
+ 0xaf900000b00,
+ 0xb0100000b04,
+ 0xb0500000b0d,
+ 0xb0f00000b11,
+ 0xb1300000b29,
+ 0xb2a00000b31,
+ 0xb3200000b34,
+ 0xb3500000b3a,
+ 0xb3c00000b45,
+ 0xb4700000b49,
+ 0xb4b00000b4e,
+ 0xb5500000b58,
+ 0xb5f00000b64,
+ 0xb6600000b70,
+ 0xb7100000b72,
+ 0xb8200000b84,
+ 0xb8500000b8b,
+ 0xb8e00000b91,
+ 0xb9200000b96,
+ 0xb9900000b9b,
+ 0xb9c00000b9d,
+ 0xb9e00000ba0,
+ 0xba300000ba5,
+ 0xba800000bab,
+ 0xbae00000bba,
+ 0xbbe00000bc3,
+ 0xbc600000bc9,
+ 0xbca00000bce,
+ 0xbd000000bd1,
+ 0xbd700000bd8,
+ 0xbe600000bf0,
+ 0xc0000000c0d,
+ 0xc0e00000c11,
+ 0xc1200000c29,
+ 0xc2a00000c3a,
+ 0xc3c00000c45,
+ 0xc4600000c49,
+ 0xc4a00000c4e,
+ 0xc5500000c57,
+ 0xc5800000c5b,
+ 0xc5d00000c5e,
+ 0xc6000000c64,
+ 0xc6600000c70,
+ 0xc8000000c84,
+ 0xc8500000c8d,
+ 0xc8e00000c91,
+ 0xc9200000ca9,
+ 0xcaa00000cb4,
+ 0xcb500000cba,
+ 0xcbc00000cc5,
+ 0xcc600000cc9,
+ 0xcca00000cce,
+ 0xcd500000cd7,
+ 0xcdd00000cdf,
+ 0xce000000ce4,
+ 0xce600000cf0,
+ 0xcf100000cf4,
+ 0xd0000000d0d,
+ 0xd0e00000d11,
+ 0xd1200000d45,
+ 0xd4600000d49,
+ 0xd4a00000d4f,
+ 0xd5400000d58,
+ 0xd5f00000d64,
+ 0xd6600000d70,
+ 0xd7a00000d80,
+ 0xd8100000d84,
+ 0xd8500000d97,
+ 0xd9a00000db2,
+ 0xdb300000dbc,
+ 0xdbd00000dbe,
+ 0xdc000000dc7,
+ 0xdca00000dcb,
+ 0xdcf00000dd5,
+ 0xdd600000dd7,
+ 0xdd800000de0,
+ 0xde600000df0,
+ 0xdf200000df4,
+ 0xe0100000e33,
+ 0xe3400000e3b,
+ 0xe4000000e4f,
+ 0xe5000000e5a,
+ 0xe8100000e83,
+ 0xe8400000e85,
+ 0xe8600000e8b,
+ 0xe8c00000ea4,
+ 0xea500000ea6,
+ 0xea700000eb3,
+ 0xeb400000ebe,
+ 0xec000000ec5,
+ 0xec600000ec7,
+ 0xec800000ecf,
+ 0xed000000eda,
+ 0xede00000ee0,
+ 0xf0000000f01,
+ 0xf0b00000f0c,
+ 0xf1800000f1a,
+ 0xf2000000f2a,
+ 0xf3500000f36,
+ 0xf3700000f38,
+ 0xf3900000f3a,
+ 0xf3e00000f43,
+ 0xf4400000f48,
+ 0xf4900000f4d,
+ 0xf4e00000f52,
+ 0xf5300000f57,
+ 0xf5800000f5c,
+ 0xf5d00000f69,
+ 0xf6a00000f6d,
+ 0xf7100000f73,
+ 0xf7400000f75,
+ 0xf7a00000f81,
+ 0xf8200000f85,
+ 0xf8600000f93,
+ 0xf9400000f98,
+ 0xf9900000f9d,
+ 0xf9e00000fa2,
+ 0xfa300000fa7,
+ 0xfa800000fac,
+ 0xfad00000fb9,
+ 0xfba00000fbd,
+ 0xfc600000fc7,
+ 0x10000000104a,
+ 0x10500000109e,
+ 0x10d0000010fb,
+ 0x10fd00001100,
+ 0x120000001249,
+ 0x124a0000124e,
+ 0x125000001257,
+ 0x125800001259,
+ 0x125a0000125e,
+ 0x126000001289,
+ 0x128a0000128e,
+ 0x1290000012b1,
+ 0x12b2000012b6,
+ 0x12b8000012bf,
+ 0x12c0000012c1,
+ 0x12c2000012c6,
+ 0x12c8000012d7,
+ 0x12d800001311,
+ 0x131200001316,
+ 0x13180000135b,
+ 0x135d00001360,
+ 0x138000001390,
+ 0x13a0000013f6,
+ 0x14010000166d,
+ 0x166f00001680,
+ 0x16810000169b,
+ 0x16a0000016eb,
+ 0x16f1000016f9,
+ 0x170000001716,
+ 0x171f00001735,
+ 0x174000001754,
+ 0x17600000176d,
+ 0x176e00001771,
+ 0x177200001774,
+ 0x1780000017b4,
+ 0x17b6000017d4,
+ 0x17d7000017d8,
+ 0x17dc000017de,
+ 0x17e0000017ea,
+ 0x18100000181a,
+ 0x182000001879,
+ 0x1880000018ab,
+ 0x18b0000018f6,
+ 0x19000000191f,
+ 0x19200000192c,
+ 0x19300000193c,
+ 0x19460000196e,
+ 0x197000001975,
+ 0x1980000019ac,
+ 0x19b0000019ca,
+ 0x19d0000019da,
+ 0x1a0000001a1c,
+ 0x1a2000001a5f,
+ 0x1a6000001a7d,
+ 0x1a7f00001a8a,
+ 0x1a9000001a9a,
+ 0x1aa700001aa8,
+ 0x1ab000001abe,
+ 0x1abf00001acf,
+ 0x1b0000001b4d,
+ 0x1b5000001b5a,
+ 0x1b6b00001b74,
+ 0x1b8000001bf4,
+ 0x1c0000001c38,
+ 0x1c4000001c4a,
+ 0x1c4d00001c7e,
+ 0x1cd000001cd3,
+ 0x1cd400001cfb,
+ 0x1d0000001d2c,
+ 0x1d2f00001d30,
+ 0x1d3b00001d3c,
+ 0x1d4e00001d4f,
+ 0x1d6b00001d78,
+ 0x1d7900001d9b,
+ 0x1dc000001e00,
+ 0x1e0100001e02,
+ 0x1e0300001e04,
+ 0x1e0500001e06,
+ 0x1e0700001e08,
+ 0x1e0900001e0a,
+ 0x1e0b00001e0c,
+ 0x1e0d00001e0e,
+ 0x1e0f00001e10,
+ 0x1e1100001e12,
+ 0x1e1300001e14,
+ 0x1e1500001e16,
+ 0x1e1700001e18,
+ 0x1e1900001e1a,
+ 0x1e1b00001e1c,
+ 0x1e1d00001e1e,
+ 0x1e1f00001e20,
+ 0x1e2100001e22,
+ 0x1e2300001e24,
+ 0x1e2500001e26,
+ 0x1e2700001e28,
+ 0x1e2900001e2a,
+ 0x1e2b00001e2c,
+ 0x1e2d00001e2e,
+ 0x1e2f00001e30,
+ 0x1e3100001e32,
+ 0x1e3300001e34,
+ 0x1e3500001e36,
+ 0x1e3700001e38,
+ 0x1e3900001e3a,
+ 0x1e3b00001e3c,
+ 0x1e3d00001e3e,
+ 0x1e3f00001e40,
+ 0x1e4100001e42,
+ 0x1e4300001e44,
+ 0x1e4500001e46,
+ 0x1e4700001e48,
+ 0x1e4900001e4a,
+ 0x1e4b00001e4c,
+ 0x1e4d00001e4e,
+ 0x1e4f00001e50,
+ 0x1e5100001e52,
+ 0x1e5300001e54,
+ 0x1e5500001e56,
+ 0x1e5700001e58,
+ 0x1e5900001e5a,
+ 0x1e5b00001e5c,
+ 0x1e5d00001e5e,
+ 0x1e5f00001e60,
+ 0x1e6100001e62,
+ 0x1e6300001e64,
+ 0x1e6500001e66,
+ 0x1e6700001e68,
+ 0x1e6900001e6a,
+ 0x1e6b00001e6c,
+ 0x1e6d00001e6e,
+ 0x1e6f00001e70,
+ 0x1e7100001e72,
+ 0x1e7300001e74,
+ 0x1e7500001e76,
+ 0x1e7700001e78,
+ 0x1e7900001e7a,
+ 0x1e7b00001e7c,
+ 0x1e7d00001e7e,
+ 0x1e7f00001e80,
+ 0x1e8100001e82,
+ 0x1e8300001e84,
+ 0x1e8500001e86,
+ 0x1e8700001e88,
+ 0x1e8900001e8a,
+ 0x1e8b00001e8c,
+ 0x1e8d00001e8e,
+ 0x1e8f00001e90,
+ 0x1e9100001e92,
+ 0x1e9300001e94,
+ 0x1e9500001e9a,
+ 0x1e9c00001e9e,
+ 0x1e9f00001ea0,
+ 0x1ea100001ea2,
+ 0x1ea300001ea4,
+ 0x1ea500001ea6,
+ 0x1ea700001ea8,
+ 0x1ea900001eaa,
+ 0x1eab00001eac,
+ 0x1ead00001eae,
+ 0x1eaf00001eb0,
+ 0x1eb100001eb2,
+ 0x1eb300001eb4,
+ 0x1eb500001eb6,
+ 0x1eb700001eb8,
+ 0x1eb900001eba,
+ 0x1ebb00001ebc,
+ 0x1ebd00001ebe,
+ 0x1ebf00001ec0,
+ 0x1ec100001ec2,
+ 0x1ec300001ec4,
+ 0x1ec500001ec6,
+ 0x1ec700001ec8,
+ 0x1ec900001eca,
+ 0x1ecb00001ecc,
+ 0x1ecd00001ece,
+ 0x1ecf00001ed0,
+ 0x1ed100001ed2,
+ 0x1ed300001ed4,
+ 0x1ed500001ed6,
+ 0x1ed700001ed8,
+ 0x1ed900001eda,
+ 0x1edb00001edc,
+ 0x1edd00001ede,
+ 0x1edf00001ee0,
+ 0x1ee100001ee2,
+ 0x1ee300001ee4,
+ 0x1ee500001ee6,
+ 0x1ee700001ee8,
+ 0x1ee900001eea,
+ 0x1eeb00001eec,
+ 0x1eed00001eee,
+ 0x1eef00001ef0,
+ 0x1ef100001ef2,
+ 0x1ef300001ef4,
+ 0x1ef500001ef6,
+ 0x1ef700001ef8,
+ 0x1ef900001efa,
+ 0x1efb00001efc,
+ 0x1efd00001efe,
+ 0x1eff00001f08,
+ 0x1f1000001f16,
+ 0x1f2000001f28,
+ 0x1f3000001f38,
+ 0x1f4000001f46,
+ 0x1f5000001f58,
+ 0x1f6000001f68,
+ 0x1f7000001f71,
+ 0x1f7200001f73,
+ 0x1f7400001f75,
+ 0x1f7600001f77,
+ 0x1f7800001f79,
+ 0x1f7a00001f7b,
+ 0x1f7c00001f7d,
+ 0x1fb000001fb2,
+ 0x1fb600001fb7,
+ 0x1fc600001fc7,
+ 0x1fd000001fd3,
+ 0x1fd600001fd8,
+ 0x1fe000001fe3,
+ 0x1fe400001fe8,
+ 0x1ff600001ff7,
+ 0x214e0000214f,
+ 0x218400002185,
+ 0x2c3000002c60,
+ 0x2c6100002c62,
+ 0x2c6500002c67,
+ 0x2c6800002c69,
+ 0x2c6a00002c6b,
+ 0x2c6c00002c6d,
+ 0x2c7100002c72,
+ 0x2c7300002c75,
+ 0x2c7600002c7c,
+ 0x2c8100002c82,
+ 0x2c8300002c84,
+ 0x2c8500002c86,
+ 0x2c8700002c88,
+ 0x2c8900002c8a,
+ 0x2c8b00002c8c,
+ 0x2c8d00002c8e,
+ 0x2c8f00002c90,
+ 0x2c9100002c92,
+ 0x2c9300002c94,
+ 0x2c9500002c96,
+ 0x2c9700002c98,
+ 0x2c9900002c9a,
+ 0x2c9b00002c9c,
+ 0x2c9d00002c9e,
+ 0x2c9f00002ca0,
+ 0x2ca100002ca2,
+ 0x2ca300002ca4,
+ 0x2ca500002ca6,
+ 0x2ca700002ca8,
+ 0x2ca900002caa,
+ 0x2cab00002cac,
+ 0x2cad00002cae,
+ 0x2caf00002cb0,
+ 0x2cb100002cb2,
+ 0x2cb300002cb4,
+ 0x2cb500002cb6,
+ 0x2cb700002cb8,
+ 0x2cb900002cba,
+ 0x2cbb00002cbc,
+ 0x2cbd00002cbe,
+ 0x2cbf00002cc0,
+ 0x2cc100002cc2,
+ 0x2cc300002cc4,
+ 0x2cc500002cc6,
+ 0x2cc700002cc8,
+ 0x2cc900002cca,
+ 0x2ccb00002ccc,
+ 0x2ccd00002cce,
+ 0x2ccf00002cd0,
+ 0x2cd100002cd2,
+ 0x2cd300002cd4,
+ 0x2cd500002cd6,
+ 0x2cd700002cd8,
+ 0x2cd900002cda,
+ 0x2cdb00002cdc,
+ 0x2cdd00002cde,
+ 0x2cdf00002ce0,
+ 0x2ce100002ce2,
+ 0x2ce300002ce5,
+ 0x2cec00002ced,
+ 0x2cee00002cf2,
+ 0x2cf300002cf4,
+ 0x2d0000002d26,
+ 0x2d2700002d28,
+ 0x2d2d00002d2e,
+ 0x2d3000002d68,
+ 0x2d7f00002d97,
+ 0x2da000002da7,
+ 0x2da800002daf,
+ 0x2db000002db7,
+ 0x2db800002dbf,
+ 0x2dc000002dc7,
+ 0x2dc800002dcf,
+ 0x2dd000002dd7,
+ 0x2dd800002ddf,
+ 0x2de000002e00,
+ 0x2e2f00002e30,
+ 0x300500003008,
+ 0x302a0000302e,
+ 0x303c0000303d,
+ 0x304100003097,
+ 0x30990000309b,
+ 0x309d0000309f,
+ 0x30a1000030fb,
+ 0x30fc000030ff,
+ 0x310500003130,
+ 0x31a0000031c0,
+ 0x31f000003200,
+ 0x340000004dc0,
+ 0x4e000000a48d,
+ 0xa4d00000a4fe,
+ 0xa5000000a60d,
+ 0xa6100000a62c,
+ 0xa6410000a642,
+ 0xa6430000a644,
+ 0xa6450000a646,
+ 0xa6470000a648,
+ 0xa6490000a64a,
+ 0xa64b0000a64c,
+ 0xa64d0000a64e,
+ 0xa64f0000a650,
+ 0xa6510000a652,
+ 0xa6530000a654,
+ 0xa6550000a656,
+ 0xa6570000a658,
+ 0xa6590000a65a,
+ 0xa65b0000a65c,
+ 0xa65d0000a65e,
+ 0xa65f0000a660,
+ 0xa6610000a662,
+ 0xa6630000a664,
+ 0xa6650000a666,
+ 0xa6670000a668,
+ 0xa6690000a66a,
+ 0xa66b0000a66c,
+ 0xa66d0000a670,
+ 0xa6740000a67e,
+ 0xa67f0000a680,
+ 0xa6810000a682,
+ 0xa6830000a684,
+ 0xa6850000a686,
+ 0xa6870000a688,
+ 0xa6890000a68a,
+ 0xa68b0000a68c,
+ 0xa68d0000a68e,
+ 0xa68f0000a690,
+ 0xa6910000a692,
+ 0xa6930000a694,
+ 0xa6950000a696,
+ 0xa6970000a698,
+ 0xa6990000a69a,
+ 0xa69b0000a69c,
+ 0xa69e0000a6e6,
+ 0xa6f00000a6f2,
+ 0xa7170000a720,
+ 0xa7230000a724,
+ 0xa7250000a726,
+ 0xa7270000a728,
+ 0xa7290000a72a,
+ 0xa72b0000a72c,
+ 0xa72d0000a72e,
+ 0xa72f0000a732,
+ 0xa7330000a734,
+ 0xa7350000a736,
+ 0xa7370000a738,
+ 0xa7390000a73a,
+ 0xa73b0000a73c,
+ 0xa73d0000a73e,
+ 0xa73f0000a740,
+ 0xa7410000a742,
+ 0xa7430000a744,
+ 0xa7450000a746,
+ 0xa7470000a748,
+ 0xa7490000a74a,
+ 0xa74b0000a74c,
+ 0xa74d0000a74e,
+ 0xa74f0000a750,
+ 0xa7510000a752,
+ 0xa7530000a754,
+ 0xa7550000a756,
+ 0xa7570000a758,
+ 0xa7590000a75a,
+ 0xa75b0000a75c,
+ 0xa75d0000a75e,
+ 0xa75f0000a760,
+ 0xa7610000a762,
+ 0xa7630000a764,
+ 0xa7650000a766,
+ 0xa7670000a768,
+ 0xa7690000a76a,
+ 0xa76b0000a76c,
+ 0xa76d0000a76e,
+ 0xa76f0000a770,
+ 0xa7710000a779,
+ 0xa77a0000a77b,
+ 0xa77c0000a77d,
+ 0xa77f0000a780,
+ 0xa7810000a782,
+ 0xa7830000a784,
+ 0xa7850000a786,
+ 0xa7870000a789,
+ 0xa78c0000a78d,
+ 0xa78e0000a790,
+ 0xa7910000a792,
+ 0xa7930000a796,
+ 0xa7970000a798,
+ 0xa7990000a79a,
+ 0xa79b0000a79c,
+ 0xa79d0000a79e,
+ 0xa79f0000a7a0,
+ 0xa7a10000a7a2,
+ 0xa7a30000a7a4,
+ 0xa7a50000a7a6,
+ 0xa7a70000a7a8,
+ 0xa7a90000a7aa,
+ 0xa7af0000a7b0,
+ 0xa7b50000a7b6,
+ 0xa7b70000a7b8,
+ 0xa7b90000a7ba,
+ 0xa7bb0000a7bc,
+ 0xa7bd0000a7be,
+ 0xa7bf0000a7c0,
+ 0xa7c10000a7c2,
+ 0xa7c30000a7c4,
+ 0xa7c80000a7c9,
+ 0xa7ca0000a7cb,
+ 0xa7d10000a7d2,
+ 0xa7d30000a7d4,
+ 0xa7d50000a7d6,
+ 0xa7d70000a7d8,
+ 0xa7d90000a7da,
+ 0xa7f20000a7f5,
+ 0xa7f60000a7f8,
+ 0xa7fa0000a828,
+ 0xa82c0000a82d,
+ 0xa8400000a874,
+ 0xa8800000a8c6,
+ 0xa8d00000a8da,
+ 0xa8e00000a8f8,
+ 0xa8fb0000a8fc,
+ 0xa8fd0000a92e,
+ 0xa9300000a954,
+ 0xa9800000a9c1,
+ 0xa9cf0000a9da,
+ 0xa9e00000a9ff,
+ 0xaa000000aa37,
+ 0xaa400000aa4e,
+ 0xaa500000aa5a,
+ 0xaa600000aa77,
+ 0xaa7a0000aac3,
+ 0xaadb0000aade,
+ 0xaae00000aaf0,
+ 0xaaf20000aaf7,
+ 0xab010000ab07,
+ 0xab090000ab0f,
+ 0xab110000ab17,
+ 0xab200000ab27,
+ 0xab280000ab2f,
+ 0xab300000ab5b,
+ 0xab600000ab69,
+ 0xabc00000abeb,
+ 0xabec0000abee,
+ 0xabf00000abfa,
+ 0xac000000d7a4,
+ 0xfa0e0000fa10,
+ 0xfa110000fa12,
+ 0xfa130000fa15,
+ 0xfa1f0000fa20,
+ 0xfa210000fa22,
+ 0xfa230000fa25,
+ 0xfa270000fa2a,
+ 0xfb1e0000fb1f,
+ 0xfe200000fe30,
+ 0xfe730000fe74,
+ 0x100000001000c,
+ 0x1000d00010027,
+ 0x100280001003b,
+ 0x1003c0001003e,
+ 0x1003f0001004e,
+ 0x100500001005e,
+ 0x10080000100fb,
+ 0x101fd000101fe,
+ 0x102800001029d,
+ 0x102a0000102d1,
+ 0x102e0000102e1,
+ 0x1030000010320,
+ 0x1032d00010341,
+ 0x103420001034a,
+ 0x103500001037b,
+ 0x103800001039e,
+ 0x103a0000103c4,
+ 0x103c8000103d0,
+ 0x104280001049e,
+ 0x104a0000104aa,
+ 0x104d8000104fc,
+ 0x1050000010528,
+ 0x1053000010564,
+ 0x10597000105a2,
+ 0x105a3000105b2,
+ 0x105b3000105ba,
+ 0x105bb000105bd,
+ 0x1060000010737,
+ 0x1074000010756,
+ 0x1076000010768,
+ 0x1078000010786,
+ 0x10787000107b1,
+ 0x107b2000107bb,
+ 0x1080000010806,
+ 0x1080800010809,
+ 0x1080a00010836,
+ 0x1083700010839,
+ 0x1083c0001083d,
+ 0x1083f00010856,
+ 0x1086000010877,
+ 0x108800001089f,
+ 0x108e0000108f3,
+ 0x108f4000108f6,
+ 0x1090000010916,
+ 0x109200001093a,
+ 0x10980000109b8,
+ 0x109be000109c0,
+ 0x10a0000010a04,
+ 0x10a0500010a07,
+ 0x10a0c00010a14,
+ 0x10a1500010a18,
+ 0x10a1900010a36,
+ 0x10a3800010a3b,
+ 0x10a3f00010a40,
+ 0x10a6000010a7d,
+ 0x10a8000010a9d,
+ 0x10ac000010ac8,
+ 0x10ac900010ae7,
+ 0x10b0000010b36,
+ 0x10b4000010b56,
+ 0x10b6000010b73,
+ 0x10b8000010b92,
+ 0x10c0000010c49,
+ 0x10cc000010cf3,
+ 0x10d0000010d28,
+ 0x10d3000010d3a,
+ 0x10e8000010eaa,
+ 0x10eab00010ead,
+ 0x10eb000010eb2,
+ 0x10efd00010f1d,
+ 0x10f2700010f28,
+ 0x10f3000010f51,
+ 0x10f7000010f86,
+ 0x10fb000010fc5,
+ 0x10fe000010ff7,
+ 0x1100000011047,
+ 0x1106600011076,
+ 0x1107f000110bb,
+ 0x110c2000110c3,
+ 0x110d0000110e9,
+ 0x110f0000110fa,
+ 0x1110000011135,
+ 0x1113600011140,
+ 0x1114400011148,
+ 0x1115000011174,
+ 0x1117600011177,
+ 0x11180000111c5,
+ 0x111c9000111cd,
+ 0x111ce000111db,
+ 0x111dc000111dd,
+ 0x1120000011212,
+ 0x1121300011238,
+ 0x1123e00011242,
+ 0x1128000011287,
+ 0x1128800011289,
+ 0x1128a0001128e,
+ 0x1128f0001129e,
+ 0x1129f000112a9,
+ 0x112b0000112eb,
+ 0x112f0000112fa,
+ 0x1130000011304,
+ 0x113050001130d,
+ 0x1130f00011311,
+ 0x1131300011329,
+ 0x1132a00011331,
+ 0x1133200011334,
+ 0x113350001133a,
+ 0x1133b00011345,
+ 0x1134700011349,
+ 0x1134b0001134e,
+ 0x1135000011351,
+ 0x1135700011358,
+ 0x1135d00011364,
+ 0x113660001136d,
+ 0x1137000011375,
+ 0x114000001144b,
+ 0x114500001145a,
+ 0x1145e00011462,
+ 0x11480000114c6,
+ 0x114c7000114c8,
+ 0x114d0000114da,
+ 0x11580000115b6,
+ 0x115b8000115c1,
+ 0x115d8000115de,
+ 0x1160000011641,
+ 0x1164400011645,
+ 0x116500001165a,
+ 0x11680000116b9,
+ 0x116c0000116ca,
+ 0x117000001171b,
+ 0x1171d0001172c,
+ 0x117300001173a,
+ 0x1174000011747,
+ 0x118000001183b,
+ 0x118c0000118ea,
+ 0x118ff00011907,
+ 0x119090001190a,
+ 0x1190c00011914,
+ 0x1191500011917,
+ 0x1191800011936,
+ 0x1193700011939,
+ 0x1193b00011944,
+ 0x119500001195a,
+ 0x119a0000119a8,
+ 0x119aa000119d8,
+ 0x119da000119e2,
+ 0x119e3000119e5,
+ 0x11a0000011a3f,
+ 0x11a4700011a48,
+ 0x11a5000011a9a,
+ 0x11a9d00011a9e,
+ 0x11ab000011af9,
+ 0x11c0000011c09,
+ 0x11c0a00011c37,
+ 0x11c3800011c41,
+ 0x11c5000011c5a,
+ 0x11c7200011c90,
+ 0x11c9200011ca8,
+ 0x11ca900011cb7,
+ 0x11d0000011d07,
+ 0x11d0800011d0a,
+ 0x11d0b00011d37,
+ 0x11d3a00011d3b,
+ 0x11d3c00011d3e,
+ 0x11d3f00011d48,
+ 0x11d5000011d5a,
+ 0x11d6000011d66,
+ 0x11d6700011d69,
+ 0x11d6a00011d8f,
+ 0x11d9000011d92,
+ 0x11d9300011d99,
+ 0x11da000011daa,
+ 0x11ee000011ef7,
+ 0x11f0000011f11,
+ 0x11f1200011f3b,
+ 0x11f3e00011f43,
+ 0x11f5000011f5a,
+ 0x11fb000011fb1,
+ 0x120000001239a,
+ 0x1248000012544,
+ 0x12f9000012ff1,
+ 0x1300000013430,
+ 0x1344000013456,
+ 0x1440000014647,
+ 0x1680000016a39,
+ 0x16a4000016a5f,
+ 0x16a6000016a6a,
+ 0x16a7000016abf,
+ 0x16ac000016aca,
+ 0x16ad000016aee,
+ 0x16af000016af5,
+ 0x16b0000016b37,
+ 0x16b4000016b44,
+ 0x16b5000016b5a,
+ 0x16b6300016b78,
+ 0x16b7d00016b90,
+ 0x16e6000016e80,
+ 0x16f0000016f4b,
+ 0x16f4f00016f88,
+ 0x16f8f00016fa0,
+ 0x16fe000016fe2,
+ 0x16fe300016fe5,
+ 0x16ff000016ff2,
+ 0x17000000187f8,
+ 0x1880000018cd6,
+ 0x18d0000018d09,
+ 0x1aff00001aff4,
+ 0x1aff50001affc,
+ 0x1affd0001afff,
+ 0x1b0000001b123,
+ 0x1b1320001b133,
+ 0x1b1500001b153,
+ 0x1b1550001b156,
+ 0x1b1640001b168,
+ 0x1b1700001b2fc,
+ 0x1bc000001bc6b,
+ 0x1bc700001bc7d,
+ 0x1bc800001bc89,
+ 0x1bc900001bc9a,
+ 0x1bc9d0001bc9f,
+ 0x1cf000001cf2e,
+ 0x1cf300001cf47,
+ 0x1da000001da37,
+ 0x1da3b0001da6d,
+ 0x1da750001da76,
+ 0x1da840001da85,
+ 0x1da9b0001daa0,
+ 0x1daa10001dab0,
+ 0x1df000001df1f,
+ 0x1df250001df2b,
+ 0x1e0000001e007,
+ 0x1e0080001e019,
+ 0x1e01b0001e022,
+ 0x1e0230001e025,
+ 0x1e0260001e02b,
+ 0x1e0300001e06e,
+ 0x1e08f0001e090,
+ 0x1e1000001e12d,
+ 0x1e1300001e13e,
+ 0x1e1400001e14a,
+ 0x1e14e0001e14f,
+ 0x1e2900001e2af,
+ 0x1e2c00001e2fa,
+ 0x1e4d00001e4fa,
+ 0x1e7e00001e7e7,
+ 0x1e7e80001e7ec,
+ 0x1e7ed0001e7ef,
+ 0x1e7f00001e7ff,
+ 0x1e8000001e8c5,
+ 0x1e8d00001e8d7,
+ 0x1e9220001e94c,
+ 0x1e9500001e95a,
+ 0x200000002a6e0,
+ 0x2a7000002b73a,
+ 0x2b7400002b81e,
+ 0x2b8200002cea2,
+ 0x2ceb00002ebe1,
+ 0x300000003134b,
+ 0x31350000323b0,
+ ),
+ 'CONTEXTJ': (
+ 0x200c0000200e,
+ ),
+ 'CONTEXTO': (
+ 0xb7000000b8,
+ 0x37500000376,
+ 0x5f3000005f5,
+ 0x6600000066a,
+ 0x6f0000006fa,
+ 0x30fb000030fc,
+ ),
+}
diff --git a/third_party/python/pip/pip/_vendor/idna/intranges.py b/third_party/python/pip/pip/_vendor/idna/intranges.py
new file mode 100644
index 0000000000..6a43b04753
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/idna/intranges.py
@@ -0,0 +1,54 @@
+"""
+Given a list of integers, made up of (hopefully) a small number of long runs
+of consecutive integers, compute a representation of the form
+((start1, end1), (start2, end2) ...). Then answer the question "was x present
+in the original list?" in time O(log(# runs)).
+"""
+
+import bisect
+from typing import List, Tuple
+
+def intranges_from_list(list_: List[int]) -> Tuple[int, ...]:
+ """Represent a list of integers as a sequence of ranges:
+ ((start_0, end_0), (start_1, end_1), ...), such that the original
+ integers are exactly those x such that start_i <= x < end_i for some i.
+
+ Ranges are encoded as single integers (start << 32 | end), not as tuples.
+ """
+
+ sorted_list = sorted(list_)
+ ranges = []
+ last_write = -1
+ for i in range(len(sorted_list)):
+ if i+1 < len(sorted_list):
+ if sorted_list[i] == sorted_list[i+1]-1:
+ continue
+ current_range = sorted_list[last_write+1:i+1]
+ ranges.append(_encode_range(current_range[0], current_range[-1] + 1))
+ last_write = i
+
+ return tuple(ranges)
+
+def _encode_range(start: int, end: int) -> int:
+ return (start << 32) | end
+
+def _decode_range(r: int) -> Tuple[int, int]:
+ return (r >> 32), (r & ((1 << 32) - 1))
+
+
+def intranges_contain(int_: int, ranges: Tuple[int, ...]) -> bool:
+ """Determine if `int_` falls into one of the ranges in `ranges`."""
+ tuple_ = _encode_range(int_, 0)
+ pos = bisect.bisect_left(ranges, tuple_)
+ # we could be immediately ahead of a tuple (start, end)
+ # with start < int_ <= end
+ if pos > 0:
+ left, right = _decode_range(ranges[pos-1])
+ if left <= int_ < right:
+ return True
+ # or we could be immediately behind a tuple (int_, end)
+ if pos < len(ranges):
+ left, _ = _decode_range(ranges[pos])
+ if left == int_:
+ return True
+ return False
diff --git a/third_party/python/pip/pip/_vendor/idna/package_data.py b/third_party/python/pip/pip/_vendor/idna/package_data.py
new file mode 100644
index 0000000000..8501893bd1
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/idna/package_data.py
@@ -0,0 +1,2 @@
+__version__ = '3.4'
+
diff --git a/third_party/python/pip/pip/_vendor/idna/uts46data.py b/third_party/python/pip/pip/_vendor/idna/uts46data.py
new file mode 100644
index 0000000000..186796c17b
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/idna/uts46data.py
@@ -0,0 +1,8600 @@
+# This file is automatically generated by tools/idna-data
+# vim: set fileencoding=utf-8 :
+
+from typing import List, Tuple, Union
+
+
+"""IDNA Mapping Table from UTS46."""
+
+
+__version__ = '15.0.0'
+def _seg_0() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x0, '3'),
+ (0x1, '3'),
+ (0x2, '3'),
+ (0x3, '3'),
+ (0x4, '3'),
+ (0x5, '3'),
+ (0x6, '3'),
+ (0x7, '3'),
+ (0x8, '3'),
+ (0x9, '3'),
+ (0xA, '3'),
+ (0xB, '3'),
+ (0xC, '3'),
+ (0xD, '3'),
+ (0xE, '3'),
+ (0xF, '3'),
+ (0x10, '3'),
+ (0x11, '3'),
+ (0x12, '3'),
+ (0x13, '3'),
+ (0x14, '3'),
+ (0x15, '3'),
+ (0x16, '3'),
+ (0x17, '3'),
+ (0x18, '3'),
+ (0x19, '3'),
+ (0x1A, '3'),
+ (0x1B, '3'),
+ (0x1C, '3'),
+ (0x1D, '3'),
+ (0x1E, '3'),
+ (0x1F, '3'),
+ (0x20, '3'),
+ (0x21, '3'),
+ (0x22, '3'),
+ (0x23, '3'),
+ (0x24, '3'),
+ (0x25, '3'),
+ (0x26, '3'),
+ (0x27, '3'),
+ (0x28, '3'),
+ (0x29, '3'),
+ (0x2A, '3'),
+ (0x2B, '3'),
+ (0x2C, '3'),
+ (0x2D, 'V'),
+ (0x2E, 'V'),
+ (0x2F, '3'),
+ (0x30, 'V'),
+ (0x31, 'V'),
+ (0x32, 'V'),
+ (0x33, 'V'),
+ (0x34, 'V'),
+ (0x35, 'V'),
+ (0x36, 'V'),
+ (0x37, 'V'),
+ (0x38, 'V'),
+ (0x39, 'V'),
+ (0x3A, '3'),
+ (0x3B, '3'),
+ (0x3C, '3'),
+ (0x3D, '3'),
+ (0x3E, '3'),
+ (0x3F, '3'),
+ (0x40, '3'),
+ (0x41, 'M', 'a'),
+ (0x42, 'M', 'b'),
+ (0x43, 'M', 'c'),
+ (0x44, 'M', 'd'),
+ (0x45, 'M', 'e'),
+ (0x46, 'M', 'f'),
+ (0x47, 'M', 'g'),
+ (0x48, 'M', 'h'),
+ (0x49, 'M', 'i'),
+ (0x4A, 'M', 'j'),
+ (0x4B, 'M', 'k'),
+ (0x4C, 'M', 'l'),
+ (0x4D, 'M', 'm'),
+ (0x4E, 'M', 'n'),
+ (0x4F, 'M', 'o'),
+ (0x50, 'M', 'p'),
+ (0x51, 'M', 'q'),
+ (0x52, 'M', 'r'),
+ (0x53, 'M', 's'),
+ (0x54, 'M', 't'),
+ (0x55, 'M', 'u'),
+ (0x56, 'M', 'v'),
+ (0x57, 'M', 'w'),
+ (0x58, 'M', 'x'),
+ (0x59, 'M', 'y'),
+ (0x5A, 'M', 'z'),
+ (0x5B, '3'),
+ (0x5C, '3'),
+ (0x5D, '3'),
+ (0x5E, '3'),
+ (0x5F, '3'),
+ (0x60, '3'),
+ (0x61, 'V'),
+ (0x62, 'V'),
+ (0x63, 'V'),
+ ]
+
+def _seg_1() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x64, 'V'),
+ (0x65, 'V'),
+ (0x66, 'V'),
+ (0x67, 'V'),
+ (0x68, 'V'),
+ (0x69, 'V'),
+ (0x6A, 'V'),
+ (0x6B, 'V'),
+ (0x6C, 'V'),
+ (0x6D, 'V'),
+ (0x6E, 'V'),
+ (0x6F, 'V'),
+ (0x70, 'V'),
+ (0x71, 'V'),
+ (0x72, 'V'),
+ (0x73, 'V'),
+ (0x74, 'V'),
+ (0x75, 'V'),
+ (0x76, 'V'),
+ (0x77, 'V'),
+ (0x78, 'V'),
+ (0x79, 'V'),
+ (0x7A, 'V'),
+ (0x7B, '3'),
+ (0x7C, '3'),
+ (0x7D, '3'),
+ (0x7E, '3'),
+ (0x7F, '3'),
+ (0x80, 'X'),
+ (0x81, 'X'),
+ (0x82, 'X'),
+ (0x83, 'X'),
+ (0x84, 'X'),
+ (0x85, 'X'),
+ (0x86, 'X'),
+ (0x87, 'X'),
+ (0x88, 'X'),
+ (0x89, 'X'),
+ (0x8A, 'X'),
+ (0x8B, 'X'),
+ (0x8C, 'X'),
+ (0x8D, 'X'),
+ (0x8E, 'X'),
+ (0x8F, 'X'),
+ (0x90, 'X'),
+ (0x91, 'X'),
+ (0x92, 'X'),
+ (0x93, 'X'),
+ (0x94, 'X'),
+ (0x95, 'X'),
+ (0x96, 'X'),
+ (0x97, 'X'),
+ (0x98, 'X'),
+ (0x99, 'X'),
+ (0x9A, 'X'),
+ (0x9B, 'X'),
+ (0x9C, 'X'),
+ (0x9D, 'X'),
+ (0x9E, 'X'),
+ (0x9F, 'X'),
+ (0xA0, '3', ' '),
+ (0xA1, 'V'),
+ (0xA2, 'V'),
+ (0xA3, 'V'),
+ (0xA4, 'V'),
+ (0xA5, 'V'),
+ (0xA6, 'V'),
+ (0xA7, 'V'),
+ (0xA8, '3', ' ̈'),
+ (0xA9, 'V'),
+ (0xAA, 'M', 'a'),
+ (0xAB, 'V'),
+ (0xAC, 'V'),
+ (0xAD, 'I'),
+ (0xAE, 'V'),
+ (0xAF, '3', ' ̄'),
+ (0xB0, 'V'),
+ (0xB1, 'V'),
+ (0xB2, 'M', '2'),
+ (0xB3, 'M', '3'),
+ (0xB4, '3', ' ́'),
+ (0xB5, 'M', 'μ'),
+ (0xB6, 'V'),
+ (0xB7, 'V'),
+ (0xB8, '3', ' ̧'),
+ (0xB9, 'M', '1'),
+ (0xBA, 'M', 'o'),
+ (0xBB, 'V'),
+ (0xBC, 'M', '1⁄4'),
+ (0xBD, 'M', '1⁄2'),
+ (0xBE, 'M', '3⁄4'),
+ (0xBF, 'V'),
+ (0xC0, 'M', 'à'),
+ (0xC1, 'M', 'á'),
+ (0xC2, 'M', 'â'),
+ (0xC3, 'M', 'ã'),
+ (0xC4, 'M', 'ä'),
+ (0xC5, 'M', 'å'),
+ (0xC6, 'M', 'æ'),
+ (0xC7, 'M', 'ç'),
+ ]
+
+def _seg_2() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xC8, 'M', 'è'),
+ (0xC9, 'M', 'é'),
+ (0xCA, 'M', 'ê'),
+ (0xCB, 'M', 'ë'),
+ (0xCC, 'M', 'ì'),
+ (0xCD, 'M', 'í'),
+ (0xCE, 'M', 'î'),
+ (0xCF, 'M', 'ï'),
+ (0xD0, 'M', 'ð'),
+ (0xD1, 'M', 'ñ'),
+ (0xD2, 'M', 'ò'),
+ (0xD3, 'M', 'ó'),
+ (0xD4, 'M', 'ô'),
+ (0xD5, 'M', 'õ'),
+ (0xD6, 'M', 'ö'),
+ (0xD7, 'V'),
+ (0xD8, 'M', 'ø'),
+ (0xD9, 'M', 'ù'),
+ (0xDA, 'M', 'ú'),
+ (0xDB, 'M', 'û'),
+ (0xDC, 'M', 'ü'),
+ (0xDD, 'M', 'ý'),
+ (0xDE, 'M', 'þ'),
+ (0xDF, 'D', 'ss'),
+ (0xE0, 'V'),
+ (0xE1, 'V'),
+ (0xE2, 'V'),
+ (0xE3, 'V'),
+ (0xE4, 'V'),
+ (0xE5, 'V'),
+ (0xE6, 'V'),
+ (0xE7, 'V'),
+ (0xE8, 'V'),
+ (0xE9, 'V'),
+ (0xEA, 'V'),
+ (0xEB, 'V'),
+ (0xEC, 'V'),
+ (0xED, 'V'),
+ (0xEE, 'V'),
+ (0xEF, 'V'),
+ (0xF0, 'V'),
+ (0xF1, 'V'),
+ (0xF2, 'V'),
+ (0xF3, 'V'),
+ (0xF4, 'V'),
+ (0xF5, 'V'),
+ (0xF6, 'V'),
+ (0xF7, 'V'),
+ (0xF8, 'V'),
+ (0xF9, 'V'),
+ (0xFA, 'V'),
+ (0xFB, 'V'),
+ (0xFC, 'V'),
+ (0xFD, 'V'),
+ (0xFE, 'V'),
+ (0xFF, 'V'),
+ (0x100, 'M', 'ā'),
+ (0x101, 'V'),
+ (0x102, 'M', 'ă'),
+ (0x103, 'V'),
+ (0x104, 'M', 'ą'),
+ (0x105, 'V'),
+ (0x106, 'M', 'ć'),
+ (0x107, 'V'),
+ (0x108, 'M', 'ĉ'),
+ (0x109, 'V'),
+ (0x10A, 'M', 'ċ'),
+ (0x10B, 'V'),
+ (0x10C, 'M', 'č'),
+ (0x10D, 'V'),
+ (0x10E, 'M', 'ď'),
+ (0x10F, 'V'),
+ (0x110, 'M', 'đ'),
+ (0x111, 'V'),
+ (0x112, 'M', 'ē'),
+ (0x113, 'V'),
+ (0x114, 'M', 'ĕ'),
+ (0x115, 'V'),
+ (0x116, 'M', 'ė'),
+ (0x117, 'V'),
+ (0x118, 'M', 'ę'),
+ (0x119, 'V'),
+ (0x11A, 'M', 'ě'),
+ (0x11B, 'V'),
+ (0x11C, 'M', 'ĝ'),
+ (0x11D, 'V'),
+ (0x11E, 'M', 'ğ'),
+ (0x11F, 'V'),
+ (0x120, 'M', 'ġ'),
+ (0x121, 'V'),
+ (0x122, 'M', 'ģ'),
+ (0x123, 'V'),
+ (0x124, 'M', 'ĥ'),
+ (0x125, 'V'),
+ (0x126, 'M', 'ħ'),
+ (0x127, 'V'),
+ (0x128, 'M', 'ĩ'),
+ (0x129, 'V'),
+ (0x12A, 'M', 'ī'),
+ (0x12B, 'V'),
+ ]
+
+def _seg_3() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x12C, 'M', 'ĭ'),
+ (0x12D, 'V'),
+ (0x12E, 'M', 'į'),
+ (0x12F, 'V'),
+ (0x130, 'M', 'i̇'),
+ (0x131, 'V'),
+ (0x132, 'M', 'ij'),
+ (0x134, 'M', 'ĵ'),
+ (0x135, 'V'),
+ (0x136, 'M', 'ķ'),
+ (0x137, 'V'),
+ (0x139, 'M', 'ĺ'),
+ (0x13A, 'V'),
+ (0x13B, 'M', 'ļ'),
+ (0x13C, 'V'),
+ (0x13D, 'M', 'ľ'),
+ (0x13E, 'V'),
+ (0x13F, 'M', 'l·'),
+ (0x141, 'M', 'ł'),
+ (0x142, 'V'),
+ (0x143, 'M', 'ń'),
+ (0x144, 'V'),
+ (0x145, 'M', 'ņ'),
+ (0x146, 'V'),
+ (0x147, 'M', 'ň'),
+ (0x148, 'V'),
+ (0x149, 'M', 'ʼn'),
+ (0x14A, 'M', 'ŋ'),
+ (0x14B, 'V'),
+ (0x14C, 'M', 'ō'),
+ (0x14D, 'V'),
+ (0x14E, 'M', 'ŏ'),
+ (0x14F, 'V'),
+ (0x150, 'M', 'ő'),
+ (0x151, 'V'),
+ (0x152, 'M', 'œ'),
+ (0x153, 'V'),
+ (0x154, 'M', 'ŕ'),
+ (0x155, 'V'),
+ (0x156, 'M', 'ŗ'),
+ (0x157, 'V'),
+ (0x158, 'M', 'ř'),
+ (0x159, 'V'),
+ (0x15A, 'M', 'ś'),
+ (0x15B, 'V'),
+ (0x15C, 'M', 'ŝ'),
+ (0x15D, 'V'),
+ (0x15E, 'M', 'ş'),
+ (0x15F, 'V'),
+ (0x160, 'M', 'š'),
+ (0x161, 'V'),
+ (0x162, 'M', 'ţ'),
+ (0x163, 'V'),
+ (0x164, 'M', 'ť'),
+ (0x165, 'V'),
+ (0x166, 'M', 'ŧ'),
+ (0x167, 'V'),
+ (0x168, 'M', 'ũ'),
+ (0x169, 'V'),
+ (0x16A, 'M', 'ū'),
+ (0x16B, 'V'),
+ (0x16C, 'M', 'ŭ'),
+ (0x16D, 'V'),
+ (0x16E, 'M', 'ů'),
+ (0x16F, 'V'),
+ (0x170, 'M', 'ű'),
+ (0x171, 'V'),
+ (0x172, 'M', 'ų'),
+ (0x173, 'V'),
+ (0x174, 'M', 'ŵ'),
+ (0x175, 'V'),
+ (0x176, 'M', 'ŷ'),
+ (0x177, 'V'),
+ (0x178, 'M', 'ÿ'),
+ (0x179, 'M', 'ź'),
+ (0x17A, 'V'),
+ (0x17B, 'M', 'ż'),
+ (0x17C, 'V'),
+ (0x17D, 'M', 'ž'),
+ (0x17E, 'V'),
+ (0x17F, 'M', 's'),
+ (0x180, 'V'),
+ (0x181, 'M', 'ɓ'),
+ (0x182, 'M', 'ƃ'),
+ (0x183, 'V'),
+ (0x184, 'M', 'ƅ'),
+ (0x185, 'V'),
+ (0x186, 'M', 'ɔ'),
+ (0x187, 'M', 'ƈ'),
+ (0x188, 'V'),
+ (0x189, 'M', 'ɖ'),
+ (0x18A, 'M', 'ɗ'),
+ (0x18B, 'M', 'ƌ'),
+ (0x18C, 'V'),
+ (0x18E, 'M', 'ǝ'),
+ (0x18F, 'M', 'ə'),
+ (0x190, 'M', 'ɛ'),
+ (0x191, 'M', 'ƒ'),
+ (0x192, 'V'),
+ (0x193, 'M', 'ɠ'),
+ ]
+
+def _seg_4() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x194, 'M', 'ɣ'),
+ (0x195, 'V'),
+ (0x196, 'M', 'ɩ'),
+ (0x197, 'M', 'ɨ'),
+ (0x198, 'M', 'ƙ'),
+ (0x199, 'V'),
+ (0x19C, 'M', 'ɯ'),
+ (0x19D, 'M', 'ɲ'),
+ (0x19E, 'V'),
+ (0x19F, 'M', 'ɵ'),
+ (0x1A0, 'M', 'ơ'),
+ (0x1A1, 'V'),
+ (0x1A2, 'M', 'ƣ'),
+ (0x1A3, 'V'),
+ (0x1A4, 'M', 'ƥ'),
+ (0x1A5, 'V'),
+ (0x1A6, 'M', 'ʀ'),
+ (0x1A7, 'M', 'ƨ'),
+ (0x1A8, 'V'),
+ (0x1A9, 'M', 'ʃ'),
+ (0x1AA, 'V'),
+ (0x1AC, 'M', 'ƭ'),
+ (0x1AD, 'V'),
+ (0x1AE, 'M', 'ʈ'),
+ (0x1AF, 'M', 'ư'),
+ (0x1B0, 'V'),
+ (0x1B1, 'M', 'ʊ'),
+ (0x1B2, 'M', 'ʋ'),
+ (0x1B3, 'M', 'ƴ'),
+ (0x1B4, 'V'),
+ (0x1B5, 'M', 'ƶ'),
+ (0x1B6, 'V'),
+ (0x1B7, 'M', 'ʒ'),
+ (0x1B8, 'M', 'ƹ'),
+ (0x1B9, 'V'),
+ (0x1BC, 'M', 'ƽ'),
+ (0x1BD, 'V'),
+ (0x1C4, 'M', 'dž'),
+ (0x1C7, 'M', 'lj'),
+ (0x1CA, 'M', 'nj'),
+ (0x1CD, 'M', 'ǎ'),
+ (0x1CE, 'V'),
+ (0x1CF, 'M', 'ǐ'),
+ (0x1D0, 'V'),
+ (0x1D1, 'M', 'ǒ'),
+ (0x1D2, 'V'),
+ (0x1D3, 'M', 'ǔ'),
+ (0x1D4, 'V'),
+ (0x1D5, 'M', 'ǖ'),
+ (0x1D6, 'V'),
+ (0x1D7, 'M', 'ǘ'),
+ (0x1D8, 'V'),
+ (0x1D9, 'M', 'ǚ'),
+ (0x1DA, 'V'),
+ (0x1DB, 'M', 'ǜ'),
+ (0x1DC, 'V'),
+ (0x1DE, 'M', 'ǟ'),
+ (0x1DF, 'V'),
+ (0x1E0, 'M', 'ǡ'),
+ (0x1E1, 'V'),
+ (0x1E2, 'M', 'ǣ'),
+ (0x1E3, 'V'),
+ (0x1E4, 'M', 'ǥ'),
+ (0x1E5, 'V'),
+ (0x1E6, 'M', 'ǧ'),
+ (0x1E7, 'V'),
+ (0x1E8, 'M', 'ǩ'),
+ (0x1E9, 'V'),
+ (0x1EA, 'M', 'ǫ'),
+ (0x1EB, 'V'),
+ (0x1EC, 'M', 'ǭ'),
+ (0x1ED, 'V'),
+ (0x1EE, 'M', 'ǯ'),
+ (0x1EF, 'V'),
+ (0x1F1, 'M', 'dz'),
+ (0x1F4, 'M', 'ǵ'),
+ (0x1F5, 'V'),
+ (0x1F6, 'M', 'ƕ'),
+ (0x1F7, 'M', 'ƿ'),
+ (0x1F8, 'M', 'ǹ'),
+ (0x1F9, 'V'),
+ (0x1FA, 'M', 'ǻ'),
+ (0x1FB, 'V'),
+ (0x1FC, 'M', 'ǽ'),
+ (0x1FD, 'V'),
+ (0x1FE, 'M', 'ǿ'),
+ (0x1FF, 'V'),
+ (0x200, 'M', 'ȁ'),
+ (0x201, 'V'),
+ (0x202, 'M', 'ȃ'),
+ (0x203, 'V'),
+ (0x204, 'M', 'ȅ'),
+ (0x205, 'V'),
+ (0x206, 'M', 'ȇ'),
+ (0x207, 'V'),
+ (0x208, 'M', 'ȉ'),
+ (0x209, 'V'),
+ (0x20A, 'M', 'ȋ'),
+ (0x20B, 'V'),
+ (0x20C, 'M', 'ȍ'),
+ ]
+
+def _seg_5() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x20D, 'V'),
+ (0x20E, 'M', 'ȏ'),
+ (0x20F, 'V'),
+ (0x210, 'M', 'ȑ'),
+ (0x211, 'V'),
+ (0x212, 'M', 'ȓ'),
+ (0x213, 'V'),
+ (0x214, 'M', 'ȕ'),
+ (0x215, 'V'),
+ (0x216, 'M', 'ȗ'),
+ (0x217, 'V'),
+ (0x218, 'M', 'ș'),
+ (0x219, 'V'),
+ (0x21A, 'M', 'ț'),
+ (0x21B, 'V'),
+ (0x21C, 'M', 'ȝ'),
+ (0x21D, 'V'),
+ (0x21E, 'M', 'ȟ'),
+ (0x21F, 'V'),
+ (0x220, 'M', 'ƞ'),
+ (0x221, 'V'),
+ (0x222, 'M', 'ȣ'),
+ (0x223, 'V'),
+ (0x224, 'M', 'ȥ'),
+ (0x225, 'V'),
+ (0x226, 'M', 'ȧ'),
+ (0x227, 'V'),
+ (0x228, 'M', 'ȩ'),
+ (0x229, 'V'),
+ (0x22A, 'M', 'ȫ'),
+ (0x22B, 'V'),
+ (0x22C, 'M', 'ȭ'),
+ (0x22D, 'V'),
+ (0x22E, 'M', 'ȯ'),
+ (0x22F, 'V'),
+ (0x230, 'M', 'ȱ'),
+ (0x231, 'V'),
+ (0x232, 'M', 'ȳ'),
+ (0x233, 'V'),
+ (0x23A, 'M', 'ⱥ'),
+ (0x23B, 'M', 'ȼ'),
+ (0x23C, 'V'),
+ (0x23D, 'M', 'ƚ'),
+ (0x23E, 'M', 'ⱦ'),
+ (0x23F, 'V'),
+ (0x241, 'M', 'ɂ'),
+ (0x242, 'V'),
+ (0x243, 'M', 'ƀ'),
+ (0x244, 'M', 'ʉ'),
+ (0x245, 'M', 'ʌ'),
+ (0x246, 'M', 'ɇ'),
+ (0x247, 'V'),
+ (0x248, 'M', 'ɉ'),
+ (0x249, 'V'),
+ (0x24A, 'M', 'ɋ'),
+ (0x24B, 'V'),
+ (0x24C, 'M', 'ɍ'),
+ (0x24D, 'V'),
+ (0x24E, 'M', 'ɏ'),
+ (0x24F, 'V'),
+ (0x2B0, 'M', 'h'),
+ (0x2B1, 'M', 'ɦ'),
+ (0x2B2, 'M', 'j'),
+ (0x2B3, 'M', 'r'),
+ (0x2B4, 'M', 'ɹ'),
+ (0x2B5, 'M', 'ɻ'),
+ (0x2B6, 'M', 'ʁ'),
+ (0x2B7, 'M', 'w'),
+ (0x2B8, 'M', 'y'),
+ (0x2B9, 'V'),
+ (0x2D8, '3', ' ̆'),
+ (0x2D9, '3', ' ̇'),
+ (0x2DA, '3', ' ̊'),
+ (0x2DB, '3', ' ̨'),
+ (0x2DC, '3', ' ̃'),
+ (0x2DD, '3', ' ̋'),
+ (0x2DE, 'V'),
+ (0x2E0, 'M', 'ɣ'),
+ (0x2E1, 'M', 'l'),
+ (0x2E2, 'M', 's'),
+ (0x2E3, 'M', 'x'),
+ (0x2E4, 'M', 'ʕ'),
+ (0x2E5, 'V'),
+ (0x340, 'M', '̀'),
+ (0x341, 'M', '́'),
+ (0x342, 'V'),
+ (0x343, 'M', '̓'),
+ (0x344, 'M', '̈́'),
+ (0x345, 'M', 'ι'),
+ (0x346, 'V'),
+ (0x34F, 'I'),
+ (0x350, 'V'),
+ (0x370, 'M', 'ͱ'),
+ (0x371, 'V'),
+ (0x372, 'M', 'ͳ'),
+ (0x373, 'V'),
+ (0x374, 'M', 'ʹ'),
+ (0x375, 'V'),
+ (0x376, 'M', 'ͷ'),
+ (0x377, 'V'),
+ ]
+
+def _seg_6() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x378, 'X'),
+ (0x37A, '3', ' ι'),
+ (0x37B, 'V'),
+ (0x37E, '3', ';'),
+ (0x37F, 'M', 'ϳ'),
+ (0x380, 'X'),
+ (0x384, '3', ' ́'),
+ (0x385, '3', ' ̈́'),
+ (0x386, 'M', 'ά'),
+ (0x387, 'M', '·'),
+ (0x388, 'M', 'έ'),
+ (0x389, 'M', 'ή'),
+ (0x38A, 'M', 'ί'),
+ (0x38B, 'X'),
+ (0x38C, 'M', 'ό'),
+ (0x38D, 'X'),
+ (0x38E, 'M', 'ύ'),
+ (0x38F, 'M', 'ώ'),
+ (0x390, 'V'),
+ (0x391, 'M', 'α'),
+ (0x392, 'M', 'β'),
+ (0x393, 'M', 'γ'),
+ (0x394, 'M', 'δ'),
+ (0x395, 'M', 'ε'),
+ (0x396, 'M', 'ζ'),
+ (0x397, 'M', 'η'),
+ (0x398, 'M', 'θ'),
+ (0x399, 'M', 'ι'),
+ (0x39A, 'M', 'κ'),
+ (0x39B, 'M', 'λ'),
+ (0x39C, 'M', 'μ'),
+ (0x39D, 'M', 'ν'),
+ (0x39E, 'M', 'ξ'),
+ (0x39F, 'M', 'ο'),
+ (0x3A0, 'M', 'π'),
+ (0x3A1, 'M', 'ρ'),
+ (0x3A2, 'X'),
+ (0x3A3, 'M', 'σ'),
+ (0x3A4, 'M', 'τ'),
+ (0x3A5, 'M', 'υ'),
+ (0x3A6, 'M', 'φ'),
+ (0x3A7, 'M', 'χ'),
+ (0x3A8, 'M', 'ψ'),
+ (0x3A9, 'M', 'ω'),
+ (0x3AA, 'M', 'ϊ'),
+ (0x3AB, 'M', 'ϋ'),
+ (0x3AC, 'V'),
+ (0x3C2, 'D', 'σ'),
+ (0x3C3, 'V'),
+ (0x3CF, 'M', 'ϗ'),
+ (0x3D0, 'M', 'β'),
+ (0x3D1, 'M', 'θ'),
+ (0x3D2, 'M', 'υ'),
+ (0x3D3, 'M', 'ύ'),
+ (0x3D4, 'M', 'ϋ'),
+ (0x3D5, 'M', 'φ'),
+ (0x3D6, 'M', 'π'),
+ (0x3D7, 'V'),
+ (0x3D8, 'M', 'ϙ'),
+ (0x3D9, 'V'),
+ (0x3DA, 'M', 'ϛ'),
+ (0x3DB, 'V'),
+ (0x3DC, 'M', 'ϝ'),
+ (0x3DD, 'V'),
+ (0x3DE, 'M', 'ϟ'),
+ (0x3DF, 'V'),
+ (0x3E0, 'M', 'ϡ'),
+ (0x3E1, 'V'),
+ (0x3E2, 'M', 'ϣ'),
+ (0x3E3, 'V'),
+ (0x3E4, 'M', 'ϥ'),
+ (0x3E5, 'V'),
+ (0x3E6, 'M', 'ϧ'),
+ (0x3E7, 'V'),
+ (0x3E8, 'M', 'ϩ'),
+ (0x3E9, 'V'),
+ (0x3EA, 'M', 'ϫ'),
+ (0x3EB, 'V'),
+ (0x3EC, 'M', 'ϭ'),
+ (0x3ED, 'V'),
+ (0x3EE, 'M', 'ϯ'),
+ (0x3EF, 'V'),
+ (0x3F0, 'M', 'κ'),
+ (0x3F1, 'M', 'ρ'),
+ (0x3F2, 'M', 'σ'),
+ (0x3F3, 'V'),
+ (0x3F4, 'M', 'θ'),
+ (0x3F5, 'M', 'ε'),
+ (0x3F6, 'V'),
+ (0x3F7, 'M', 'ϸ'),
+ (0x3F8, 'V'),
+ (0x3F9, 'M', 'σ'),
+ (0x3FA, 'M', 'ϻ'),
+ (0x3FB, 'V'),
+ (0x3FD, 'M', 'ͻ'),
+ (0x3FE, 'M', 'ͼ'),
+ (0x3FF, 'M', 'ͽ'),
+ (0x400, 'M', 'ѐ'),
+ (0x401, 'M', 'ё'),
+ (0x402, 'M', 'ђ'),
+ ]
+
+def _seg_7() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x403, 'M', 'ѓ'),
+ (0x404, 'M', 'є'),
+ (0x405, 'M', 'ѕ'),
+ (0x406, 'M', 'і'),
+ (0x407, 'M', 'ї'),
+ (0x408, 'M', 'ј'),
+ (0x409, 'M', 'љ'),
+ (0x40A, 'M', 'њ'),
+ (0x40B, 'M', 'ћ'),
+ (0x40C, 'M', 'ќ'),
+ (0x40D, 'M', 'ѝ'),
+ (0x40E, 'M', 'ў'),
+ (0x40F, 'M', 'џ'),
+ (0x410, 'M', 'а'),
+ (0x411, 'M', 'б'),
+ (0x412, 'M', 'в'),
+ (0x413, 'M', 'г'),
+ (0x414, 'M', 'д'),
+ (0x415, 'M', 'е'),
+ (0x416, 'M', 'ж'),
+ (0x417, 'M', 'з'),
+ (0x418, 'M', 'и'),
+ (0x419, 'M', 'й'),
+ (0x41A, 'M', 'к'),
+ (0x41B, 'M', 'л'),
+ (0x41C, 'M', 'м'),
+ (0x41D, 'M', 'н'),
+ (0x41E, 'M', 'о'),
+ (0x41F, 'M', 'п'),
+ (0x420, 'M', 'р'),
+ (0x421, 'M', 'с'),
+ (0x422, 'M', 'т'),
+ (0x423, 'M', 'у'),
+ (0x424, 'M', 'ф'),
+ (0x425, 'M', 'х'),
+ (0x426, 'M', 'ц'),
+ (0x427, 'M', 'ч'),
+ (0x428, 'M', 'ш'),
+ (0x429, 'M', 'щ'),
+ (0x42A, 'M', 'ъ'),
+ (0x42B, 'M', 'ы'),
+ (0x42C, 'M', 'ь'),
+ (0x42D, 'M', 'э'),
+ (0x42E, 'M', 'ю'),
+ (0x42F, 'M', 'я'),
+ (0x430, 'V'),
+ (0x460, 'M', 'ѡ'),
+ (0x461, 'V'),
+ (0x462, 'M', 'ѣ'),
+ (0x463, 'V'),
+ (0x464, 'M', 'ѥ'),
+ (0x465, 'V'),
+ (0x466, 'M', 'ѧ'),
+ (0x467, 'V'),
+ (0x468, 'M', 'ѩ'),
+ (0x469, 'V'),
+ (0x46A, 'M', 'ѫ'),
+ (0x46B, 'V'),
+ (0x46C, 'M', 'ѭ'),
+ (0x46D, 'V'),
+ (0x46E, 'M', 'ѯ'),
+ (0x46F, 'V'),
+ (0x470, 'M', 'ѱ'),
+ (0x471, 'V'),
+ (0x472, 'M', 'ѳ'),
+ (0x473, 'V'),
+ (0x474, 'M', 'ѵ'),
+ (0x475, 'V'),
+ (0x476, 'M', 'ѷ'),
+ (0x477, 'V'),
+ (0x478, 'M', 'ѹ'),
+ (0x479, 'V'),
+ (0x47A, 'M', 'ѻ'),
+ (0x47B, 'V'),
+ (0x47C, 'M', 'ѽ'),
+ (0x47D, 'V'),
+ (0x47E, 'M', 'ѿ'),
+ (0x47F, 'V'),
+ (0x480, 'M', 'ҁ'),
+ (0x481, 'V'),
+ (0x48A, 'M', 'ҋ'),
+ (0x48B, 'V'),
+ (0x48C, 'M', 'ҍ'),
+ (0x48D, 'V'),
+ (0x48E, 'M', 'ҏ'),
+ (0x48F, 'V'),
+ (0x490, 'M', 'ґ'),
+ (0x491, 'V'),
+ (0x492, 'M', 'ғ'),
+ (0x493, 'V'),
+ (0x494, 'M', 'ҕ'),
+ (0x495, 'V'),
+ (0x496, 'M', 'җ'),
+ (0x497, 'V'),
+ (0x498, 'M', 'ҙ'),
+ (0x499, 'V'),
+ (0x49A, 'M', 'қ'),
+ (0x49B, 'V'),
+ (0x49C, 'M', 'ҝ'),
+ (0x49D, 'V'),
+ ]
+
+def _seg_8() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x49E, 'M', 'ҟ'),
+ (0x49F, 'V'),
+ (0x4A0, 'M', 'ҡ'),
+ (0x4A1, 'V'),
+ (0x4A2, 'M', 'ң'),
+ (0x4A3, 'V'),
+ (0x4A4, 'M', 'ҥ'),
+ (0x4A5, 'V'),
+ (0x4A6, 'M', 'ҧ'),
+ (0x4A7, 'V'),
+ (0x4A8, 'M', 'ҩ'),
+ (0x4A9, 'V'),
+ (0x4AA, 'M', 'ҫ'),
+ (0x4AB, 'V'),
+ (0x4AC, 'M', 'ҭ'),
+ (0x4AD, 'V'),
+ (0x4AE, 'M', 'ү'),
+ (0x4AF, 'V'),
+ (0x4B0, 'M', 'ұ'),
+ (0x4B1, 'V'),
+ (0x4B2, 'M', 'ҳ'),
+ (0x4B3, 'V'),
+ (0x4B4, 'M', 'ҵ'),
+ (0x4B5, 'V'),
+ (0x4B6, 'M', 'ҷ'),
+ (0x4B7, 'V'),
+ (0x4B8, 'M', 'ҹ'),
+ (0x4B9, 'V'),
+ (0x4BA, 'M', 'һ'),
+ (0x4BB, 'V'),
+ (0x4BC, 'M', 'ҽ'),
+ (0x4BD, 'V'),
+ (0x4BE, 'M', 'ҿ'),
+ (0x4BF, 'V'),
+ (0x4C0, 'X'),
+ (0x4C1, 'M', 'ӂ'),
+ (0x4C2, 'V'),
+ (0x4C3, 'M', 'ӄ'),
+ (0x4C4, 'V'),
+ (0x4C5, 'M', 'ӆ'),
+ (0x4C6, 'V'),
+ (0x4C7, 'M', 'ӈ'),
+ (0x4C8, 'V'),
+ (0x4C9, 'M', 'ӊ'),
+ (0x4CA, 'V'),
+ (0x4CB, 'M', 'ӌ'),
+ (0x4CC, 'V'),
+ (0x4CD, 'M', 'ӎ'),
+ (0x4CE, 'V'),
+ (0x4D0, 'M', 'ӑ'),
+ (0x4D1, 'V'),
+ (0x4D2, 'M', 'ӓ'),
+ (0x4D3, 'V'),
+ (0x4D4, 'M', 'ӕ'),
+ (0x4D5, 'V'),
+ (0x4D6, 'M', 'ӗ'),
+ (0x4D7, 'V'),
+ (0x4D8, 'M', 'ә'),
+ (0x4D9, 'V'),
+ (0x4DA, 'M', 'ӛ'),
+ (0x4DB, 'V'),
+ (0x4DC, 'M', 'ӝ'),
+ (0x4DD, 'V'),
+ (0x4DE, 'M', 'ӟ'),
+ (0x4DF, 'V'),
+ (0x4E0, 'M', 'ӡ'),
+ (0x4E1, 'V'),
+ (0x4E2, 'M', 'ӣ'),
+ (0x4E3, 'V'),
+ (0x4E4, 'M', 'ӥ'),
+ (0x4E5, 'V'),
+ (0x4E6, 'M', 'ӧ'),
+ (0x4E7, 'V'),
+ (0x4E8, 'M', 'ө'),
+ (0x4E9, 'V'),
+ (0x4EA, 'M', 'ӫ'),
+ (0x4EB, 'V'),
+ (0x4EC, 'M', 'ӭ'),
+ (0x4ED, 'V'),
+ (0x4EE, 'M', 'ӯ'),
+ (0x4EF, 'V'),
+ (0x4F0, 'M', 'ӱ'),
+ (0x4F1, 'V'),
+ (0x4F2, 'M', 'ӳ'),
+ (0x4F3, 'V'),
+ (0x4F4, 'M', 'ӵ'),
+ (0x4F5, 'V'),
+ (0x4F6, 'M', 'ӷ'),
+ (0x4F7, 'V'),
+ (0x4F8, 'M', 'ӹ'),
+ (0x4F9, 'V'),
+ (0x4FA, 'M', 'ӻ'),
+ (0x4FB, 'V'),
+ (0x4FC, 'M', 'ӽ'),
+ (0x4FD, 'V'),
+ (0x4FE, 'M', 'ӿ'),
+ (0x4FF, 'V'),
+ (0x500, 'M', 'ԁ'),
+ (0x501, 'V'),
+ (0x502, 'M', 'ԃ'),
+ ]
+
+def _seg_9() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x503, 'V'),
+ (0x504, 'M', 'ԅ'),
+ (0x505, 'V'),
+ (0x506, 'M', 'ԇ'),
+ (0x507, 'V'),
+ (0x508, 'M', 'ԉ'),
+ (0x509, 'V'),
+ (0x50A, 'M', 'ԋ'),
+ (0x50B, 'V'),
+ (0x50C, 'M', 'ԍ'),
+ (0x50D, 'V'),
+ (0x50E, 'M', 'ԏ'),
+ (0x50F, 'V'),
+ (0x510, 'M', 'ԑ'),
+ (0x511, 'V'),
+ (0x512, 'M', 'ԓ'),
+ (0x513, 'V'),
+ (0x514, 'M', 'ԕ'),
+ (0x515, 'V'),
+ (0x516, 'M', 'ԗ'),
+ (0x517, 'V'),
+ (0x518, 'M', 'ԙ'),
+ (0x519, 'V'),
+ (0x51A, 'M', 'ԛ'),
+ (0x51B, 'V'),
+ (0x51C, 'M', 'ԝ'),
+ (0x51D, 'V'),
+ (0x51E, 'M', 'ԟ'),
+ (0x51F, 'V'),
+ (0x520, 'M', 'ԡ'),
+ (0x521, 'V'),
+ (0x522, 'M', 'ԣ'),
+ (0x523, 'V'),
+ (0x524, 'M', 'ԥ'),
+ (0x525, 'V'),
+ (0x526, 'M', 'ԧ'),
+ (0x527, 'V'),
+ (0x528, 'M', 'ԩ'),
+ (0x529, 'V'),
+ (0x52A, 'M', 'ԫ'),
+ (0x52B, 'V'),
+ (0x52C, 'M', 'ԭ'),
+ (0x52D, 'V'),
+ (0x52E, 'M', 'ԯ'),
+ (0x52F, 'V'),
+ (0x530, 'X'),
+ (0x531, 'M', 'ա'),
+ (0x532, 'M', 'բ'),
+ (0x533, 'M', 'գ'),
+ (0x534, 'M', 'դ'),
+ (0x535, 'M', 'ե'),
+ (0x536, 'M', 'զ'),
+ (0x537, 'M', 'է'),
+ (0x538, 'M', 'ը'),
+ (0x539, 'M', 'թ'),
+ (0x53A, 'M', 'ժ'),
+ (0x53B, 'M', 'ի'),
+ (0x53C, 'M', 'լ'),
+ (0x53D, 'M', 'խ'),
+ (0x53E, 'M', 'ծ'),
+ (0x53F, 'M', 'կ'),
+ (0x540, 'M', 'հ'),
+ (0x541, 'M', 'ձ'),
+ (0x542, 'M', 'ղ'),
+ (0x543, 'M', 'ճ'),
+ (0x544, 'M', 'մ'),
+ (0x545, 'M', 'յ'),
+ (0x546, 'M', 'ն'),
+ (0x547, 'M', 'շ'),
+ (0x548, 'M', 'ո'),
+ (0x549, 'M', 'չ'),
+ (0x54A, 'M', 'պ'),
+ (0x54B, 'M', 'ջ'),
+ (0x54C, 'M', 'ռ'),
+ (0x54D, 'M', 'ս'),
+ (0x54E, 'M', 'վ'),
+ (0x54F, 'M', 'տ'),
+ (0x550, 'M', 'ր'),
+ (0x551, 'M', 'ց'),
+ (0x552, 'M', 'ւ'),
+ (0x553, 'M', 'փ'),
+ (0x554, 'M', 'ք'),
+ (0x555, 'M', 'օ'),
+ (0x556, 'M', 'ֆ'),
+ (0x557, 'X'),
+ (0x559, 'V'),
+ (0x587, 'M', 'եւ'),
+ (0x588, 'V'),
+ (0x58B, 'X'),
+ (0x58D, 'V'),
+ (0x590, 'X'),
+ (0x591, 'V'),
+ (0x5C8, 'X'),
+ (0x5D0, 'V'),
+ (0x5EB, 'X'),
+ (0x5EF, 'V'),
+ (0x5F5, 'X'),
+ (0x606, 'V'),
+ (0x61C, 'X'),
+ (0x61D, 'V'),
+ ]
+
+def _seg_10() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x675, 'M', 'اٴ'),
+ (0x676, 'M', 'وٴ'),
+ (0x677, 'M', 'ۇٴ'),
+ (0x678, 'M', 'يٴ'),
+ (0x679, 'V'),
+ (0x6DD, 'X'),
+ (0x6DE, 'V'),
+ (0x70E, 'X'),
+ (0x710, 'V'),
+ (0x74B, 'X'),
+ (0x74D, 'V'),
+ (0x7B2, 'X'),
+ (0x7C0, 'V'),
+ (0x7FB, 'X'),
+ (0x7FD, 'V'),
+ (0x82E, 'X'),
+ (0x830, 'V'),
+ (0x83F, 'X'),
+ (0x840, 'V'),
+ (0x85C, 'X'),
+ (0x85E, 'V'),
+ (0x85F, 'X'),
+ (0x860, 'V'),
+ (0x86B, 'X'),
+ (0x870, 'V'),
+ (0x88F, 'X'),
+ (0x898, 'V'),
+ (0x8E2, 'X'),
+ (0x8E3, 'V'),
+ (0x958, 'M', 'क़'),
+ (0x959, 'M', 'ख़'),
+ (0x95A, 'M', 'ग़'),
+ (0x95B, 'M', 'ज़'),
+ (0x95C, 'M', 'ड़'),
+ (0x95D, 'M', 'ढ़'),
+ (0x95E, 'M', 'फ़'),
+ (0x95F, 'M', 'य़'),
+ (0x960, 'V'),
+ (0x984, 'X'),
+ (0x985, 'V'),
+ (0x98D, 'X'),
+ (0x98F, 'V'),
+ (0x991, 'X'),
+ (0x993, 'V'),
+ (0x9A9, 'X'),
+ (0x9AA, 'V'),
+ (0x9B1, 'X'),
+ (0x9B2, 'V'),
+ (0x9B3, 'X'),
+ (0x9B6, 'V'),
+ (0x9BA, 'X'),
+ (0x9BC, 'V'),
+ (0x9C5, 'X'),
+ (0x9C7, 'V'),
+ (0x9C9, 'X'),
+ (0x9CB, 'V'),
+ (0x9CF, 'X'),
+ (0x9D7, 'V'),
+ (0x9D8, 'X'),
+ (0x9DC, 'M', 'ড়'),
+ (0x9DD, 'M', 'ঢ়'),
+ (0x9DE, 'X'),
+ (0x9DF, 'M', 'য়'),
+ (0x9E0, 'V'),
+ (0x9E4, 'X'),
+ (0x9E6, 'V'),
+ (0x9FF, 'X'),
+ (0xA01, 'V'),
+ (0xA04, 'X'),
+ (0xA05, 'V'),
+ (0xA0B, 'X'),
+ (0xA0F, 'V'),
+ (0xA11, 'X'),
+ (0xA13, 'V'),
+ (0xA29, 'X'),
+ (0xA2A, 'V'),
+ (0xA31, 'X'),
+ (0xA32, 'V'),
+ (0xA33, 'M', 'ਲ਼'),
+ (0xA34, 'X'),
+ (0xA35, 'V'),
+ (0xA36, 'M', 'ਸ਼'),
+ (0xA37, 'X'),
+ (0xA38, 'V'),
+ (0xA3A, 'X'),
+ (0xA3C, 'V'),
+ (0xA3D, 'X'),
+ (0xA3E, 'V'),
+ (0xA43, 'X'),
+ (0xA47, 'V'),
+ (0xA49, 'X'),
+ (0xA4B, 'V'),
+ (0xA4E, 'X'),
+ (0xA51, 'V'),
+ (0xA52, 'X'),
+ (0xA59, 'M', 'ਖ਼'),
+ (0xA5A, 'M', 'ਗ਼'),
+ (0xA5B, 'M', 'ਜ਼'),
+ (0xA5C, 'V'),
+ (0xA5D, 'X'),
+ ]
+
+def _seg_11() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xA5E, 'M', 'ਫ਼'),
+ (0xA5F, 'X'),
+ (0xA66, 'V'),
+ (0xA77, 'X'),
+ (0xA81, 'V'),
+ (0xA84, 'X'),
+ (0xA85, 'V'),
+ (0xA8E, 'X'),
+ (0xA8F, 'V'),
+ (0xA92, 'X'),
+ (0xA93, 'V'),
+ (0xAA9, 'X'),
+ (0xAAA, 'V'),
+ (0xAB1, 'X'),
+ (0xAB2, 'V'),
+ (0xAB4, 'X'),
+ (0xAB5, 'V'),
+ (0xABA, 'X'),
+ (0xABC, 'V'),
+ (0xAC6, 'X'),
+ (0xAC7, 'V'),
+ (0xACA, 'X'),
+ (0xACB, 'V'),
+ (0xACE, 'X'),
+ (0xAD0, 'V'),
+ (0xAD1, 'X'),
+ (0xAE0, 'V'),
+ (0xAE4, 'X'),
+ (0xAE6, 'V'),
+ (0xAF2, 'X'),
+ (0xAF9, 'V'),
+ (0xB00, 'X'),
+ (0xB01, 'V'),
+ (0xB04, 'X'),
+ (0xB05, 'V'),
+ (0xB0D, 'X'),
+ (0xB0F, 'V'),
+ (0xB11, 'X'),
+ (0xB13, 'V'),
+ (0xB29, 'X'),
+ (0xB2A, 'V'),
+ (0xB31, 'X'),
+ (0xB32, 'V'),
+ (0xB34, 'X'),
+ (0xB35, 'V'),
+ (0xB3A, 'X'),
+ (0xB3C, 'V'),
+ (0xB45, 'X'),
+ (0xB47, 'V'),
+ (0xB49, 'X'),
+ (0xB4B, 'V'),
+ (0xB4E, 'X'),
+ (0xB55, 'V'),
+ (0xB58, 'X'),
+ (0xB5C, 'M', 'ଡ଼'),
+ (0xB5D, 'M', 'ଢ଼'),
+ (0xB5E, 'X'),
+ (0xB5F, 'V'),
+ (0xB64, 'X'),
+ (0xB66, 'V'),
+ (0xB78, 'X'),
+ (0xB82, 'V'),
+ (0xB84, 'X'),
+ (0xB85, 'V'),
+ (0xB8B, 'X'),
+ (0xB8E, 'V'),
+ (0xB91, 'X'),
+ (0xB92, 'V'),
+ (0xB96, 'X'),
+ (0xB99, 'V'),
+ (0xB9B, 'X'),
+ (0xB9C, 'V'),
+ (0xB9D, 'X'),
+ (0xB9E, 'V'),
+ (0xBA0, 'X'),
+ (0xBA3, 'V'),
+ (0xBA5, 'X'),
+ (0xBA8, 'V'),
+ (0xBAB, 'X'),
+ (0xBAE, 'V'),
+ (0xBBA, 'X'),
+ (0xBBE, 'V'),
+ (0xBC3, 'X'),
+ (0xBC6, 'V'),
+ (0xBC9, 'X'),
+ (0xBCA, 'V'),
+ (0xBCE, 'X'),
+ (0xBD0, 'V'),
+ (0xBD1, 'X'),
+ (0xBD7, 'V'),
+ (0xBD8, 'X'),
+ (0xBE6, 'V'),
+ (0xBFB, 'X'),
+ (0xC00, 'V'),
+ (0xC0D, 'X'),
+ (0xC0E, 'V'),
+ (0xC11, 'X'),
+ (0xC12, 'V'),
+ (0xC29, 'X'),
+ (0xC2A, 'V'),
+ ]
+
+def _seg_12() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xC3A, 'X'),
+ (0xC3C, 'V'),
+ (0xC45, 'X'),
+ (0xC46, 'V'),
+ (0xC49, 'X'),
+ (0xC4A, 'V'),
+ (0xC4E, 'X'),
+ (0xC55, 'V'),
+ (0xC57, 'X'),
+ (0xC58, 'V'),
+ (0xC5B, 'X'),
+ (0xC5D, 'V'),
+ (0xC5E, 'X'),
+ (0xC60, 'V'),
+ (0xC64, 'X'),
+ (0xC66, 'V'),
+ (0xC70, 'X'),
+ (0xC77, 'V'),
+ (0xC8D, 'X'),
+ (0xC8E, 'V'),
+ (0xC91, 'X'),
+ (0xC92, 'V'),
+ (0xCA9, 'X'),
+ (0xCAA, 'V'),
+ (0xCB4, 'X'),
+ (0xCB5, 'V'),
+ (0xCBA, 'X'),
+ (0xCBC, 'V'),
+ (0xCC5, 'X'),
+ (0xCC6, 'V'),
+ (0xCC9, 'X'),
+ (0xCCA, 'V'),
+ (0xCCE, 'X'),
+ (0xCD5, 'V'),
+ (0xCD7, 'X'),
+ (0xCDD, 'V'),
+ (0xCDF, 'X'),
+ (0xCE0, 'V'),
+ (0xCE4, 'X'),
+ (0xCE6, 'V'),
+ (0xCF0, 'X'),
+ (0xCF1, 'V'),
+ (0xCF4, 'X'),
+ (0xD00, 'V'),
+ (0xD0D, 'X'),
+ (0xD0E, 'V'),
+ (0xD11, 'X'),
+ (0xD12, 'V'),
+ (0xD45, 'X'),
+ (0xD46, 'V'),
+ (0xD49, 'X'),
+ (0xD4A, 'V'),
+ (0xD50, 'X'),
+ (0xD54, 'V'),
+ (0xD64, 'X'),
+ (0xD66, 'V'),
+ (0xD80, 'X'),
+ (0xD81, 'V'),
+ (0xD84, 'X'),
+ (0xD85, 'V'),
+ (0xD97, 'X'),
+ (0xD9A, 'V'),
+ (0xDB2, 'X'),
+ (0xDB3, 'V'),
+ (0xDBC, 'X'),
+ (0xDBD, 'V'),
+ (0xDBE, 'X'),
+ (0xDC0, 'V'),
+ (0xDC7, 'X'),
+ (0xDCA, 'V'),
+ (0xDCB, 'X'),
+ (0xDCF, 'V'),
+ (0xDD5, 'X'),
+ (0xDD6, 'V'),
+ (0xDD7, 'X'),
+ (0xDD8, 'V'),
+ (0xDE0, 'X'),
+ (0xDE6, 'V'),
+ (0xDF0, 'X'),
+ (0xDF2, 'V'),
+ (0xDF5, 'X'),
+ (0xE01, 'V'),
+ (0xE33, 'M', 'ํา'),
+ (0xE34, 'V'),
+ (0xE3B, 'X'),
+ (0xE3F, 'V'),
+ (0xE5C, 'X'),
+ (0xE81, 'V'),
+ (0xE83, 'X'),
+ (0xE84, 'V'),
+ (0xE85, 'X'),
+ (0xE86, 'V'),
+ (0xE8B, 'X'),
+ (0xE8C, 'V'),
+ (0xEA4, 'X'),
+ (0xEA5, 'V'),
+ (0xEA6, 'X'),
+ (0xEA7, 'V'),
+ (0xEB3, 'M', 'ໍາ'),
+ (0xEB4, 'V'),
+ ]
+
+def _seg_13() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xEBE, 'X'),
+ (0xEC0, 'V'),
+ (0xEC5, 'X'),
+ (0xEC6, 'V'),
+ (0xEC7, 'X'),
+ (0xEC8, 'V'),
+ (0xECF, 'X'),
+ (0xED0, 'V'),
+ (0xEDA, 'X'),
+ (0xEDC, 'M', 'ຫນ'),
+ (0xEDD, 'M', 'ຫມ'),
+ (0xEDE, 'V'),
+ (0xEE0, 'X'),
+ (0xF00, 'V'),
+ (0xF0C, 'M', '་'),
+ (0xF0D, 'V'),
+ (0xF43, 'M', 'གྷ'),
+ (0xF44, 'V'),
+ (0xF48, 'X'),
+ (0xF49, 'V'),
+ (0xF4D, 'M', 'ཌྷ'),
+ (0xF4E, 'V'),
+ (0xF52, 'M', 'དྷ'),
+ (0xF53, 'V'),
+ (0xF57, 'M', 'བྷ'),
+ (0xF58, 'V'),
+ (0xF5C, 'M', 'ཛྷ'),
+ (0xF5D, 'V'),
+ (0xF69, 'M', 'ཀྵ'),
+ (0xF6A, 'V'),
+ (0xF6D, 'X'),
+ (0xF71, 'V'),
+ (0xF73, 'M', 'ཱི'),
+ (0xF74, 'V'),
+ (0xF75, 'M', 'ཱུ'),
+ (0xF76, 'M', 'ྲྀ'),
+ (0xF77, 'M', 'ྲཱྀ'),
+ (0xF78, 'M', 'ླྀ'),
+ (0xF79, 'M', 'ླཱྀ'),
+ (0xF7A, 'V'),
+ (0xF81, 'M', 'ཱྀ'),
+ (0xF82, 'V'),
+ (0xF93, 'M', 'ྒྷ'),
+ (0xF94, 'V'),
+ (0xF98, 'X'),
+ (0xF99, 'V'),
+ (0xF9D, 'M', 'ྜྷ'),
+ (0xF9E, 'V'),
+ (0xFA2, 'M', 'ྡྷ'),
+ (0xFA3, 'V'),
+ (0xFA7, 'M', 'ྦྷ'),
+ (0xFA8, 'V'),
+ (0xFAC, 'M', 'ྫྷ'),
+ (0xFAD, 'V'),
+ (0xFB9, 'M', 'ྐྵ'),
+ (0xFBA, 'V'),
+ (0xFBD, 'X'),
+ (0xFBE, 'V'),
+ (0xFCD, 'X'),
+ (0xFCE, 'V'),
+ (0xFDB, 'X'),
+ (0x1000, 'V'),
+ (0x10A0, 'X'),
+ (0x10C7, 'M', 'ⴧ'),
+ (0x10C8, 'X'),
+ (0x10CD, 'M', 'ⴭ'),
+ (0x10CE, 'X'),
+ (0x10D0, 'V'),
+ (0x10FC, 'M', 'ნ'),
+ (0x10FD, 'V'),
+ (0x115F, 'X'),
+ (0x1161, 'V'),
+ (0x1249, 'X'),
+ (0x124A, 'V'),
+ (0x124E, 'X'),
+ (0x1250, 'V'),
+ (0x1257, 'X'),
+ (0x1258, 'V'),
+ (0x1259, 'X'),
+ (0x125A, 'V'),
+ (0x125E, 'X'),
+ (0x1260, 'V'),
+ (0x1289, 'X'),
+ (0x128A, 'V'),
+ (0x128E, 'X'),
+ (0x1290, 'V'),
+ (0x12B1, 'X'),
+ (0x12B2, 'V'),
+ (0x12B6, 'X'),
+ (0x12B8, 'V'),
+ (0x12BF, 'X'),
+ (0x12C0, 'V'),
+ (0x12C1, 'X'),
+ (0x12C2, 'V'),
+ (0x12C6, 'X'),
+ (0x12C8, 'V'),
+ (0x12D7, 'X'),
+ (0x12D8, 'V'),
+ (0x1311, 'X'),
+ (0x1312, 'V'),
+ ]
+
+def _seg_14() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1316, 'X'),
+ (0x1318, 'V'),
+ (0x135B, 'X'),
+ (0x135D, 'V'),
+ (0x137D, 'X'),
+ (0x1380, 'V'),
+ (0x139A, 'X'),
+ (0x13A0, 'V'),
+ (0x13F6, 'X'),
+ (0x13F8, 'M', 'Ᏸ'),
+ (0x13F9, 'M', 'Ᏹ'),
+ (0x13FA, 'M', 'Ᏺ'),
+ (0x13FB, 'M', 'Ᏻ'),
+ (0x13FC, 'M', 'Ᏼ'),
+ (0x13FD, 'M', 'Ᏽ'),
+ (0x13FE, 'X'),
+ (0x1400, 'V'),
+ (0x1680, 'X'),
+ (0x1681, 'V'),
+ (0x169D, 'X'),
+ (0x16A0, 'V'),
+ (0x16F9, 'X'),
+ (0x1700, 'V'),
+ (0x1716, 'X'),
+ (0x171F, 'V'),
+ (0x1737, 'X'),
+ (0x1740, 'V'),
+ (0x1754, 'X'),
+ (0x1760, 'V'),
+ (0x176D, 'X'),
+ (0x176E, 'V'),
+ (0x1771, 'X'),
+ (0x1772, 'V'),
+ (0x1774, 'X'),
+ (0x1780, 'V'),
+ (0x17B4, 'X'),
+ (0x17B6, 'V'),
+ (0x17DE, 'X'),
+ (0x17E0, 'V'),
+ (0x17EA, 'X'),
+ (0x17F0, 'V'),
+ (0x17FA, 'X'),
+ (0x1800, 'V'),
+ (0x1806, 'X'),
+ (0x1807, 'V'),
+ (0x180B, 'I'),
+ (0x180E, 'X'),
+ (0x180F, 'I'),
+ (0x1810, 'V'),
+ (0x181A, 'X'),
+ (0x1820, 'V'),
+ (0x1879, 'X'),
+ (0x1880, 'V'),
+ (0x18AB, 'X'),
+ (0x18B0, 'V'),
+ (0x18F6, 'X'),
+ (0x1900, 'V'),
+ (0x191F, 'X'),
+ (0x1920, 'V'),
+ (0x192C, 'X'),
+ (0x1930, 'V'),
+ (0x193C, 'X'),
+ (0x1940, 'V'),
+ (0x1941, 'X'),
+ (0x1944, 'V'),
+ (0x196E, 'X'),
+ (0x1970, 'V'),
+ (0x1975, 'X'),
+ (0x1980, 'V'),
+ (0x19AC, 'X'),
+ (0x19B0, 'V'),
+ (0x19CA, 'X'),
+ (0x19D0, 'V'),
+ (0x19DB, 'X'),
+ (0x19DE, 'V'),
+ (0x1A1C, 'X'),
+ (0x1A1E, 'V'),
+ (0x1A5F, 'X'),
+ (0x1A60, 'V'),
+ (0x1A7D, 'X'),
+ (0x1A7F, 'V'),
+ (0x1A8A, 'X'),
+ (0x1A90, 'V'),
+ (0x1A9A, 'X'),
+ (0x1AA0, 'V'),
+ (0x1AAE, 'X'),
+ (0x1AB0, 'V'),
+ (0x1ACF, 'X'),
+ (0x1B00, 'V'),
+ (0x1B4D, 'X'),
+ (0x1B50, 'V'),
+ (0x1B7F, 'X'),
+ (0x1B80, 'V'),
+ (0x1BF4, 'X'),
+ (0x1BFC, 'V'),
+ (0x1C38, 'X'),
+ (0x1C3B, 'V'),
+ (0x1C4A, 'X'),
+ (0x1C4D, 'V'),
+ (0x1C80, 'M', 'в'),
+ ]
+
+def _seg_15() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1C81, 'M', 'д'),
+ (0x1C82, 'M', 'о'),
+ (0x1C83, 'M', 'с'),
+ (0x1C84, 'M', 'т'),
+ (0x1C86, 'M', 'ъ'),
+ (0x1C87, 'M', 'ѣ'),
+ (0x1C88, 'M', 'ꙋ'),
+ (0x1C89, 'X'),
+ (0x1C90, 'M', 'ა'),
+ (0x1C91, 'M', 'ბ'),
+ (0x1C92, 'M', 'გ'),
+ (0x1C93, 'M', 'დ'),
+ (0x1C94, 'M', 'ე'),
+ (0x1C95, 'M', 'ვ'),
+ (0x1C96, 'M', 'ზ'),
+ (0x1C97, 'M', 'თ'),
+ (0x1C98, 'M', 'ი'),
+ (0x1C99, 'M', 'კ'),
+ (0x1C9A, 'M', 'ლ'),
+ (0x1C9B, 'M', 'მ'),
+ (0x1C9C, 'M', 'ნ'),
+ (0x1C9D, 'M', 'ო'),
+ (0x1C9E, 'M', 'პ'),
+ (0x1C9F, 'M', 'ჟ'),
+ (0x1CA0, 'M', 'რ'),
+ (0x1CA1, 'M', 'ს'),
+ (0x1CA2, 'M', 'ტ'),
+ (0x1CA3, 'M', 'უ'),
+ (0x1CA4, 'M', 'ფ'),
+ (0x1CA5, 'M', 'ქ'),
+ (0x1CA6, 'M', 'ღ'),
+ (0x1CA7, 'M', 'ყ'),
+ (0x1CA8, 'M', 'შ'),
+ (0x1CA9, 'M', 'ჩ'),
+ (0x1CAA, 'M', 'ც'),
+ (0x1CAB, 'M', 'ძ'),
+ (0x1CAC, 'M', 'წ'),
+ (0x1CAD, 'M', 'ჭ'),
+ (0x1CAE, 'M', 'ხ'),
+ (0x1CAF, 'M', 'ჯ'),
+ (0x1CB0, 'M', 'ჰ'),
+ (0x1CB1, 'M', 'ჱ'),
+ (0x1CB2, 'M', 'ჲ'),
+ (0x1CB3, 'M', 'ჳ'),
+ (0x1CB4, 'M', 'ჴ'),
+ (0x1CB5, 'M', 'ჵ'),
+ (0x1CB6, 'M', 'ჶ'),
+ (0x1CB7, 'M', 'ჷ'),
+ (0x1CB8, 'M', 'ჸ'),
+ (0x1CB9, 'M', 'ჹ'),
+ (0x1CBA, 'M', 'ჺ'),
+ (0x1CBB, 'X'),
+ (0x1CBD, 'M', 'ჽ'),
+ (0x1CBE, 'M', 'ჾ'),
+ (0x1CBF, 'M', 'ჿ'),
+ (0x1CC0, 'V'),
+ (0x1CC8, 'X'),
+ (0x1CD0, 'V'),
+ (0x1CFB, 'X'),
+ (0x1D00, 'V'),
+ (0x1D2C, 'M', 'a'),
+ (0x1D2D, 'M', 'æ'),
+ (0x1D2E, 'M', 'b'),
+ (0x1D2F, 'V'),
+ (0x1D30, 'M', 'd'),
+ (0x1D31, 'M', 'e'),
+ (0x1D32, 'M', 'ǝ'),
+ (0x1D33, 'M', 'g'),
+ (0x1D34, 'M', 'h'),
+ (0x1D35, 'M', 'i'),
+ (0x1D36, 'M', 'j'),
+ (0x1D37, 'M', 'k'),
+ (0x1D38, 'M', 'l'),
+ (0x1D39, 'M', 'm'),
+ (0x1D3A, 'M', 'n'),
+ (0x1D3B, 'V'),
+ (0x1D3C, 'M', 'o'),
+ (0x1D3D, 'M', 'ȣ'),
+ (0x1D3E, 'M', 'p'),
+ (0x1D3F, 'M', 'r'),
+ (0x1D40, 'M', 't'),
+ (0x1D41, 'M', 'u'),
+ (0x1D42, 'M', 'w'),
+ (0x1D43, 'M', 'a'),
+ (0x1D44, 'M', 'ɐ'),
+ (0x1D45, 'M', 'ɑ'),
+ (0x1D46, 'M', 'ᴂ'),
+ (0x1D47, 'M', 'b'),
+ (0x1D48, 'M', 'd'),
+ (0x1D49, 'M', 'e'),
+ (0x1D4A, 'M', 'ə'),
+ (0x1D4B, 'M', 'ɛ'),
+ (0x1D4C, 'M', 'ɜ'),
+ (0x1D4D, 'M', 'g'),
+ (0x1D4E, 'V'),
+ (0x1D4F, 'M', 'k'),
+ (0x1D50, 'M', 'm'),
+ (0x1D51, 'M', 'ŋ'),
+ (0x1D52, 'M', 'o'),
+ (0x1D53, 'M', 'ɔ'),
+ ]
+
+def _seg_16() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1D54, 'M', 'ᴖ'),
+ (0x1D55, 'M', 'ᴗ'),
+ (0x1D56, 'M', 'p'),
+ (0x1D57, 'M', 't'),
+ (0x1D58, 'M', 'u'),
+ (0x1D59, 'M', 'ᴝ'),
+ (0x1D5A, 'M', 'ɯ'),
+ (0x1D5B, 'M', 'v'),
+ (0x1D5C, 'M', 'ᴥ'),
+ (0x1D5D, 'M', 'β'),
+ (0x1D5E, 'M', 'γ'),
+ (0x1D5F, 'M', 'δ'),
+ (0x1D60, 'M', 'φ'),
+ (0x1D61, 'M', 'χ'),
+ (0x1D62, 'M', 'i'),
+ (0x1D63, 'M', 'r'),
+ (0x1D64, 'M', 'u'),
+ (0x1D65, 'M', 'v'),
+ (0x1D66, 'M', 'β'),
+ (0x1D67, 'M', 'γ'),
+ (0x1D68, 'M', 'ρ'),
+ (0x1D69, 'M', 'φ'),
+ (0x1D6A, 'M', 'χ'),
+ (0x1D6B, 'V'),
+ (0x1D78, 'M', 'н'),
+ (0x1D79, 'V'),
+ (0x1D9B, 'M', 'ɒ'),
+ (0x1D9C, 'M', 'c'),
+ (0x1D9D, 'M', 'ɕ'),
+ (0x1D9E, 'M', 'ð'),
+ (0x1D9F, 'M', 'ɜ'),
+ (0x1DA0, 'M', 'f'),
+ (0x1DA1, 'M', 'ɟ'),
+ (0x1DA2, 'M', 'ɡ'),
+ (0x1DA3, 'M', 'ɥ'),
+ (0x1DA4, 'M', 'ɨ'),
+ (0x1DA5, 'M', 'ɩ'),
+ (0x1DA6, 'M', 'ɪ'),
+ (0x1DA7, 'M', 'ᵻ'),
+ (0x1DA8, 'M', 'ʝ'),
+ (0x1DA9, 'M', 'ɭ'),
+ (0x1DAA, 'M', 'ᶅ'),
+ (0x1DAB, 'M', 'ʟ'),
+ (0x1DAC, 'M', 'ɱ'),
+ (0x1DAD, 'M', 'ɰ'),
+ (0x1DAE, 'M', 'ɲ'),
+ (0x1DAF, 'M', 'ɳ'),
+ (0x1DB0, 'M', 'ɴ'),
+ (0x1DB1, 'M', 'ɵ'),
+ (0x1DB2, 'M', 'ɸ'),
+ (0x1DB3, 'M', 'ʂ'),
+ (0x1DB4, 'M', 'ʃ'),
+ (0x1DB5, 'M', 'ƫ'),
+ (0x1DB6, 'M', 'ʉ'),
+ (0x1DB7, 'M', 'ʊ'),
+ (0x1DB8, 'M', 'ᴜ'),
+ (0x1DB9, 'M', 'ʋ'),
+ (0x1DBA, 'M', 'ʌ'),
+ (0x1DBB, 'M', 'z'),
+ (0x1DBC, 'M', 'ʐ'),
+ (0x1DBD, 'M', 'ʑ'),
+ (0x1DBE, 'M', 'ʒ'),
+ (0x1DBF, 'M', 'θ'),
+ (0x1DC0, 'V'),
+ (0x1E00, 'M', 'ḁ'),
+ (0x1E01, 'V'),
+ (0x1E02, 'M', 'ḃ'),
+ (0x1E03, 'V'),
+ (0x1E04, 'M', 'ḅ'),
+ (0x1E05, 'V'),
+ (0x1E06, 'M', 'ḇ'),
+ (0x1E07, 'V'),
+ (0x1E08, 'M', 'ḉ'),
+ (0x1E09, 'V'),
+ (0x1E0A, 'M', 'ḋ'),
+ (0x1E0B, 'V'),
+ (0x1E0C, 'M', 'ḍ'),
+ (0x1E0D, 'V'),
+ (0x1E0E, 'M', 'ḏ'),
+ (0x1E0F, 'V'),
+ (0x1E10, 'M', 'ḑ'),
+ (0x1E11, 'V'),
+ (0x1E12, 'M', 'ḓ'),
+ (0x1E13, 'V'),
+ (0x1E14, 'M', 'ḕ'),
+ (0x1E15, 'V'),
+ (0x1E16, 'M', 'ḗ'),
+ (0x1E17, 'V'),
+ (0x1E18, 'M', 'ḙ'),
+ (0x1E19, 'V'),
+ (0x1E1A, 'M', 'ḛ'),
+ (0x1E1B, 'V'),
+ (0x1E1C, 'M', 'ḝ'),
+ (0x1E1D, 'V'),
+ (0x1E1E, 'M', 'ḟ'),
+ (0x1E1F, 'V'),
+ (0x1E20, 'M', 'ḡ'),
+ (0x1E21, 'V'),
+ (0x1E22, 'M', 'ḣ'),
+ (0x1E23, 'V'),
+ ]
+
+def _seg_17() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1E24, 'M', 'ḥ'),
+ (0x1E25, 'V'),
+ (0x1E26, 'M', 'ḧ'),
+ (0x1E27, 'V'),
+ (0x1E28, 'M', 'ḩ'),
+ (0x1E29, 'V'),
+ (0x1E2A, 'M', 'ḫ'),
+ (0x1E2B, 'V'),
+ (0x1E2C, 'M', 'ḭ'),
+ (0x1E2D, 'V'),
+ (0x1E2E, 'M', 'ḯ'),
+ (0x1E2F, 'V'),
+ (0x1E30, 'M', 'ḱ'),
+ (0x1E31, 'V'),
+ (0x1E32, 'M', 'ḳ'),
+ (0x1E33, 'V'),
+ (0x1E34, 'M', 'ḵ'),
+ (0x1E35, 'V'),
+ (0x1E36, 'M', 'ḷ'),
+ (0x1E37, 'V'),
+ (0x1E38, 'M', 'ḹ'),
+ (0x1E39, 'V'),
+ (0x1E3A, 'M', 'ḻ'),
+ (0x1E3B, 'V'),
+ (0x1E3C, 'M', 'ḽ'),
+ (0x1E3D, 'V'),
+ (0x1E3E, 'M', 'ḿ'),
+ (0x1E3F, 'V'),
+ (0x1E40, 'M', 'ṁ'),
+ (0x1E41, 'V'),
+ (0x1E42, 'M', 'ṃ'),
+ (0x1E43, 'V'),
+ (0x1E44, 'M', 'ṅ'),
+ (0x1E45, 'V'),
+ (0x1E46, 'M', 'ṇ'),
+ (0x1E47, 'V'),
+ (0x1E48, 'M', 'ṉ'),
+ (0x1E49, 'V'),
+ (0x1E4A, 'M', 'ṋ'),
+ (0x1E4B, 'V'),
+ (0x1E4C, 'M', 'ṍ'),
+ (0x1E4D, 'V'),
+ (0x1E4E, 'M', 'ṏ'),
+ (0x1E4F, 'V'),
+ (0x1E50, 'M', 'ṑ'),
+ (0x1E51, 'V'),
+ (0x1E52, 'M', 'ṓ'),
+ (0x1E53, 'V'),
+ (0x1E54, 'M', 'ṕ'),
+ (0x1E55, 'V'),
+ (0x1E56, 'M', 'ṗ'),
+ (0x1E57, 'V'),
+ (0x1E58, 'M', 'ṙ'),
+ (0x1E59, 'V'),
+ (0x1E5A, 'M', 'ṛ'),
+ (0x1E5B, 'V'),
+ (0x1E5C, 'M', 'ṝ'),
+ (0x1E5D, 'V'),
+ (0x1E5E, 'M', 'ṟ'),
+ (0x1E5F, 'V'),
+ (0x1E60, 'M', 'ṡ'),
+ (0x1E61, 'V'),
+ (0x1E62, 'M', 'ṣ'),
+ (0x1E63, 'V'),
+ (0x1E64, 'M', 'ṥ'),
+ (0x1E65, 'V'),
+ (0x1E66, 'M', 'ṧ'),
+ (0x1E67, 'V'),
+ (0x1E68, 'M', 'ṩ'),
+ (0x1E69, 'V'),
+ (0x1E6A, 'M', 'ṫ'),
+ (0x1E6B, 'V'),
+ (0x1E6C, 'M', 'ṭ'),
+ (0x1E6D, 'V'),
+ (0x1E6E, 'M', 'ṯ'),
+ (0x1E6F, 'V'),
+ (0x1E70, 'M', 'ṱ'),
+ (0x1E71, 'V'),
+ (0x1E72, 'M', 'ṳ'),
+ (0x1E73, 'V'),
+ (0x1E74, 'M', 'ṵ'),
+ (0x1E75, 'V'),
+ (0x1E76, 'M', 'ṷ'),
+ (0x1E77, 'V'),
+ (0x1E78, 'M', 'ṹ'),
+ (0x1E79, 'V'),
+ (0x1E7A, 'M', 'ṻ'),
+ (0x1E7B, 'V'),
+ (0x1E7C, 'M', 'ṽ'),
+ (0x1E7D, 'V'),
+ (0x1E7E, 'M', 'ṿ'),
+ (0x1E7F, 'V'),
+ (0x1E80, 'M', 'ẁ'),
+ (0x1E81, 'V'),
+ (0x1E82, 'M', 'ẃ'),
+ (0x1E83, 'V'),
+ (0x1E84, 'M', 'ẅ'),
+ (0x1E85, 'V'),
+ (0x1E86, 'M', 'ẇ'),
+ (0x1E87, 'V'),
+ ]
+
+def _seg_18() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1E88, 'M', 'ẉ'),
+ (0x1E89, 'V'),
+ (0x1E8A, 'M', 'ẋ'),
+ (0x1E8B, 'V'),
+ (0x1E8C, 'M', 'ẍ'),
+ (0x1E8D, 'V'),
+ (0x1E8E, 'M', 'ẏ'),
+ (0x1E8F, 'V'),
+ (0x1E90, 'M', 'ẑ'),
+ (0x1E91, 'V'),
+ (0x1E92, 'M', 'ẓ'),
+ (0x1E93, 'V'),
+ (0x1E94, 'M', 'ẕ'),
+ (0x1E95, 'V'),
+ (0x1E9A, 'M', 'aʾ'),
+ (0x1E9B, 'M', 'ṡ'),
+ (0x1E9C, 'V'),
+ (0x1E9E, 'M', 'ss'),
+ (0x1E9F, 'V'),
+ (0x1EA0, 'M', 'ạ'),
+ (0x1EA1, 'V'),
+ (0x1EA2, 'M', 'ả'),
+ (0x1EA3, 'V'),
+ (0x1EA4, 'M', 'ấ'),
+ (0x1EA5, 'V'),
+ (0x1EA6, 'M', 'ầ'),
+ (0x1EA7, 'V'),
+ (0x1EA8, 'M', 'ẩ'),
+ (0x1EA9, 'V'),
+ (0x1EAA, 'M', 'ẫ'),
+ (0x1EAB, 'V'),
+ (0x1EAC, 'M', 'ậ'),
+ (0x1EAD, 'V'),
+ (0x1EAE, 'M', 'ắ'),
+ (0x1EAF, 'V'),
+ (0x1EB0, 'M', 'ằ'),
+ (0x1EB1, 'V'),
+ (0x1EB2, 'M', 'ẳ'),
+ (0x1EB3, 'V'),
+ (0x1EB4, 'M', 'ẵ'),
+ (0x1EB5, 'V'),
+ (0x1EB6, 'M', 'ặ'),
+ (0x1EB7, 'V'),
+ (0x1EB8, 'M', 'ẹ'),
+ (0x1EB9, 'V'),
+ (0x1EBA, 'M', 'ẻ'),
+ (0x1EBB, 'V'),
+ (0x1EBC, 'M', 'ẽ'),
+ (0x1EBD, 'V'),
+ (0x1EBE, 'M', 'ế'),
+ (0x1EBF, 'V'),
+ (0x1EC0, 'M', 'ề'),
+ (0x1EC1, 'V'),
+ (0x1EC2, 'M', 'ể'),
+ (0x1EC3, 'V'),
+ (0x1EC4, 'M', 'ễ'),
+ (0x1EC5, 'V'),
+ (0x1EC6, 'M', 'ệ'),
+ (0x1EC7, 'V'),
+ (0x1EC8, 'M', 'ỉ'),
+ (0x1EC9, 'V'),
+ (0x1ECA, 'M', 'ị'),
+ (0x1ECB, 'V'),
+ (0x1ECC, 'M', 'ọ'),
+ (0x1ECD, 'V'),
+ (0x1ECE, 'M', 'ỏ'),
+ (0x1ECF, 'V'),
+ (0x1ED0, 'M', 'ố'),
+ (0x1ED1, 'V'),
+ (0x1ED2, 'M', 'ồ'),
+ (0x1ED3, 'V'),
+ (0x1ED4, 'M', 'ổ'),
+ (0x1ED5, 'V'),
+ (0x1ED6, 'M', 'ỗ'),
+ (0x1ED7, 'V'),
+ (0x1ED8, 'M', 'ộ'),
+ (0x1ED9, 'V'),
+ (0x1EDA, 'M', 'ớ'),
+ (0x1EDB, 'V'),
+ (0x1EDC, 'M', 'ờ'),
+ (0x1EDD, 'V'),
+ (0x1EDE, 'M', 'ở'),
+ (0x1EDF, 'V'),
+ (0x1EE0, 'M', 'ỡ'),
+ (0x1EE1, 'V'),
+ (0x1EE2, 'M', 'ợ'),
+ (0x1EE3, 'V'),
+ (0x1EE4, 'M', 'ụ'),
+ (0x1EE5, 'V'),
+ (0x1EE6, 'M', 'ủ'),
+ (0x1EE7, 'V'),
+ (0x1EE8, 'M', 'ứ'),
+ (0x1EE9, 'V'),
+ (0x1EEA, 'M', 'ừ'),
+ (0x1EEB, 'V'),
+ (0x1EEC, 'M', 'ử'),
+ (0x1EED, 'V'),
+ (0x1EEE, 'M', 'ữ'),
+ (0x1EEF, 'V'),
+ (0x1EF0, 'M', 'ự'),
+ ]
+
+def _seg_19() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1EF1, 'V'),
+ (0x1EF2, 'M', 'ỳ'),
+ (0x1EF3, 'V'),
+ (0x1EF4, 'M', 'ỵ'),
+ (0x1EF5, 'V'),
+ (0x1EF6, 'M', 'ỷ'),
+ (0x1EF7, 'V'),
+ (0x1EF8, 'M', 'ỹ'),
+ (0x1EF9, 'V'),
+ (0x1EFA, 'M', 'ỻ'),
+ (0x1EFB, 'V'),
+ (0x1EFC, 'M', 'ỽ'),
+ (0x1EFD, 'V'),
+ (0x1EFE, 'M', 'ỿ'),
+ (0x1EFF, 'V'),
+ (0x1F08, 'M', 'ἀ'),
+ (0x1F09, 'M', 'ἁ'),
+ (0x1F0A, 'M', 'ἂ'),
+ (0x1F0B, 'M', 'ἃ'),
+ (0x1F0C, 'M', 'ἄ'),
+ (0x1F0D, 'M', 'ἅ'),
+ (0x1F0E, 'M', 'ἆ'),
+ (0x1F0F, 'M', 'ἇ'),
+ (0x1F10, 'V'),
+ (0x1F16, 'X'),
+ (0x1F18, 'M', 'ἐ'),
+ (0x1F19, 'M', 'ἑ'),
+ (0x1F1A, 'M', 'ἒ'),
+ (0x1F1B, 'M', 'ἓ'),
+ (0x1F1C, 'M', 'ἔ'),
+ (0x1F1D, 'M', 'ἕ'),
+ (0x1F1E, 'X'),
+ (0x1F20, 'V'),
+ (0x1F28, 'M', 'ἠ'),
+ (0x1F29, 'M', 'ἡ'),
+ (0x1F2A, 'M', 'ἢ'),
+ (0x1F2B, 'M', 'ἣ'),
+ (0x1F2C, 'M', 'ἤ'),
+ (0x1F2D, 'M', 'ἥ'),
+ (0x1F2E, 'M', 'ἦ'),
+ (0x1F2F, 'M', 'ἧ'),
+ (0x1F30, 'V'),
+ (0x1F38, 'M', 'ἰ'),
+ (0x1F39, 'M', 'ἱ'),
+ (0x1F3A, 'M', 'ἲ'),
+ (0x1F3B, 'M', 'ἳ'),
+ (0x1F3C, 'M', 'ἴ'),
+ (0x1F3D, 'M', 'ἵ'),
+ (0x1F3E, 'M', 'ἶ'),
+ (0x1F3F, 'M', 'ἷ'),
+ (0x1F40, 'V'),
+ (0x1F46, 'X'),
+ (0x1F48, 'M', 'ὀ'),
+ (0x1F49, 'M', 'ὁ'),
+ (0x1F4A, 'M', 'ὂ'),
+ (0x1F4B, 'M', 'ὃ'),
+ (0x1F4C, 'M', 'ὄ'),
+ (0x1F4D, 'M', 'ὅ'),
+ (0x1F4E, 'X'),
+ (0x1F50, 'V'),
+ (0x1F58, 'X'),
+ (0x1F59, 'M', 'ὑ'),
+ (0x1F5A, 'X'),
+ (0x1F5B, 'M', 'ὓ'),
+ (0x1F5C, 'X'),
+ (0x1F5D, 'M', 'ὕ'),
+ (0x1F5E, 'X'),
+ (0x1F5F, 'M', 'ὗ'),
+ (0x1F60, 'V'),
+ (0x1F68, 'M', 'ὠ'),
+ (0x1F69, 'M', 'ὡ'),
+ (0x1F6A, 'M', 'ὢ'),
+ (0x1F6B, 'M', 'ὣ'),
+ (0x1F6C, 'M', 'ὤ'),
+ (0x1F6D, 'M', 'ὥ'),
+ (0x1F6E, 'M', 'ὦ'),
+ (0x1F6F, 'M', 'ὧ'),
+ (0x1F70, 'V'),
+ (0x1F71, 'M', 'ά'),
+ (0x1F72, 'V'),
+ (0x1F73, 'M', 'έ'),
+ (0x1F74, 'V'),
+ (0x1F75, 'M', 'ή'),
+ (0x1F76, 'V'),
+ (0x1F77, 'M', 'ί'),
+ (0x1F78, 'V'),
+ (0x1F79, 'M', 'ό'),
+ (0x1F7A, 'V'),
+ (0x1F7B, 'M', 'ύ'),
+ (0x1F7C, 'V'),
+ (0x1F7D, 'M', 'ώ'),
+ (0x1F7E, 'X'),
+ (0x1F80, 'M', 'ἀι'),
+ (0x1F81, 'M', 'ἁι'),
+ (0x1F82, 'M', 'ἂι'),
+ (0x1F83, 'M', 'ἃι'),
+ (0x1F84, 'M', 'ἄι'),
+ (0x1F85, 'M', 'ἅι'),
+ (0x1F86, 'M', 'ἆι'),
+ (0x1F87, 'M', 'ἇι'),
+ ]
+
+def _seg_20() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1F88, 'M', 'ἀι'),
+ (0x1F89, 'M', 'ἁι'),
+ (0x1F8A, 'M', 'ἂι'),
+ (0x1F8B, 'M', 'ἃι'),
+ (0x1F8C, 'M', 'ἄι'),
+ (0x1F8D, 'M', 'ἅι'),
+ (0x1F8E, 'M', 'ἆι'),
+ (0x1F8F, 'M', 'ἇι'),
+ (0x1F90, 'M', 'ἠι'),
+ (0x1F91, 'M', 'ἡι'),
+ (0x1F92, 'M', 'ἢι'),
+ (0x1F93, 'M', 'ἣι'),
+ (0x1F94, 'M', 'ἤι'),
+ (0x1F95, 'M', 'ἥι'),
+ (0x1F96, 'M', 'ἦι'),
+ (0x1F97, 'M', 'ἧι'),
+ (0x1F98, 'M', 'ἠι'),
+ (0x1F99, 'M', 'ἡι'),
+ (0x1F9A, 'M', 'ἢι'),
+ (0x1F9B, 'M', 'ἣι'),
+ (0x1F9C, 'M', 'ἤι'),
+ (0x1F9D, 'M', 'ἥι'),
+ (0x1F9E, 'M', 'ἦι'),
+ (0x1F9F, 'M', 'ἧι'),
+ (0x1FA0, 'M', 'ὠι'),
+ (0x1FA1, 'M', 'ὡι'),
+ (0x1FA2, 'M', 'ὢι'),
+ (0x1FA3, 'M', 'ὣι'),
+ (0x1FA4, 'M', 'ὤι'),
+ (0x1FA5, 'M', 'ὥι'),
+ (0x1FA6, 'M', 'ὦι'),
+ (0x1FA7, 'M', 'ὧι'),
+ (0x1FA8, 'M', 'ὠι'),
+ (0x1FA9, 'M', 'ὡι'),
+ (0x1FAA, 'M', 'ὢι'),
+ (0x1FAB, 'M', 'ὣι'),
+ (0x1FAC, 'M', 'ὤι'),
+ (0x1FAD, 'M', 'ὥι'),
+ (0x1FAE, 'M', 'ὦι'),
+ (0x1FAF, 'M', 'ὧι'),
+ (0x1FB0, 'V'),
+ (0x1FB2, 'M', 'ὰι'),
+ (0x1FB3, 'M', 'αι'),
+ (0x1FB4, 'M', 'άι'),
+ (0x1FB5, 'X'),
+ (0x1FB6, 'V'),
+ (0x1FB7, 'M', 'ᾶι'),
+ (0x1FB8, 'M', 'ᾰ'),
+ (0x1FB9, 'M', 'ᾱ'),
+ (0x1FBA, 'M', 'ὰ'),
+ (0x1FBB, 'M', 'ά'),
+ (0x1FBC, 'M', 'αι'),
+ (0x1FBD, '3', ' ̓'),
+ (0x1FBE, 'M', 'ι'),
+ (0x1FBF, '3', ' ̓'),
+ (0x1FC0, '3', ' ͂'),
+ (0x1FC1, '3', ' ̈͂'),
+ (0x1FC2, 'M', 'ὴι'),
+ (0x1FC3, 'M', 'ηι'),
+ (0x1FC4, 'M', 'ήι'),
+ (0x1FC5, 'X'),
+ (0x1FC6, 'V'),
+ (0x1FC7, 'M', 'ῆι'),
+ (0x1FC8, 'M', 'ὲ'),
+ (0x1FC9, 'M', 'έ'),
+ (0x1FCA, 'M', 'ὴ'),
+ (0x1FCB, 'M', 'ή'),
+ (0x1FCC, 'M', 'ηι'),
+ (0x1FCD, '3', ' ̓̀'),
+ (0x1FCE, '3', ' ̓́'),
+ (0x1FCF, '3', ' ̓͂'),
+ (0x1FD0, 'V'),
+ (0x1FD3, 'M', 'ΐ'),
+ (0x1FD4, 'X'),
+ (0x1FD6, 'V'),
+ (0x1FD8, 'M', 'ῐ'),
+ (0x1FD9, 'M', 'ῑ'),
+ (0x1FDA, 'M', 'ὶ'),
+ (0x1FDB, 'M', 'ί'),
+ (0x1FDC, 'X'),
+ (0x1FDD, '3', ' ̔̀'),
+ (0x1FDE, '3', ' ̔́'),
+ (0x1FDF, '3', ' ̔͂'),
+ (0x1FE0, 'V'),
+ (0x1FE3, 'M', 'ΰ'),
+ (0x1FE4, 'V'),
+ (0x1FE8, 'M', 'ῠ'),
+ (0x1FE9, 'M', 'ῡ'),
+ (0x1FEA, 'M', 'ὺ'),
+ (0x1FEB, 'M', 'ύ'),
+ (0x1FEC, 'M', 'ῥ'),
+ (0x1FED, '3', ' ̈̀'),
+ (0x1FEE, '3', ' ̈́'),
+ (0x1FEF, '3', '`'),
+ (0x1FF0, 'X'),
+ (0x1FF2, 'M', 'ὼι'),
+ (0x1FF3, 'M', 'ωι'),
+ (0x1FF4, 'M', 'ώι'),
+ (0x1FF5, 'X'),
+ (0x1FF6, 'V'),
+ ]
+
+def _seg_21() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1FF7, 'M', 'ῶι'),
+ (0x1FF8, 'M', 'ὸ'),
+ (0x1FF9, 'M', 'ό'),
+ (0x1FFA, 'M', 'ὼ'),
+ (0x1FFB, 'M', 'ώ'),
+ (0x1FFC, 'M', 'ωι'),
+ (0x1FFD, '3', ' ́'),
+ (0x1FFE, '3', ' ̔'),
+ (0x1FFF, 'X'),
+ (0x2000, '3', ' '),
+ (0x200B, 'I'),
+ (0x200C, 'D', ''),
+ (0x200E, 'X'),
+ (0x2010, 'V'),
+ (0x2011, 'M', '‐'),
+ (0x2012, 'V'),
+ (0x2017, '3', ' ̳'),
+ (0x2018, 'V'),
+ (0x2024, 'X'),
+ (0x2027, 'V'),
+ (0x2028, 'X'),
+ (0x202F, '3', ' '),
+ (0x2030, 'V'),
+ (0x2033, 'M', '′′'),
+ (0x2034, 'M', '′′′'),
+ (0x2035, 'V'),
+ (0x2036, 'M', '‵‵'),
+ (0x2037, 'M', '‵‵‵'),
+ (0x2038, 'V'),
+ (0x203C, '3', '!!'),
+ (0x203D, 'V'),
+ (0x203E, '3', ' ̅'),
+ (0x203F, 'V'),
+ (0x2047, '3', '??'),
+ (0x2048, '3', '?!'),
+ (0x2049, '3', '!?'),
+ (0x204A, 'V'),
+ (0x2057, 'M', '′′′′'),
+ (0x2058, 'V'),
+ (0x205F, '3', ' '),
+ (0x2060, 'I'),
+ (0x2061, 'X'),
+ (0x2064, 'I'),
+ (0x2065, 'X'),
+ (0x2070, 'M', '0'),
+ (0x2071, 'M', 'i'),
+ (0x2072, 'X'),
+ (0x2074, 'M', '4'),
+ (0x2075, 'M', '5'),
+ (0x2076, 'M', '6'),
+ (0x2077, 'M', '7'),
+ (0x2078, 'M', '8'),
+ (0x2079, 'M', '9'),
+ (0x207A, '3', '+'),
+ (0x207B, 'M', '−'),
+ (0x207C, '3', '='),
+ (0x207D, '3', '('),
+ (0x207E, '3', ')'),
+ (0x207F, 'M', 'n'),
+ (0x2080, 'M', '0'),
+ (0x2081, 'M', '1'),
+ (0x2082, 'M', '2'),
+ (0x2083, 'M', '3'),
+ (0x2084, 'M', '4'),
+ (0x2085, 'M', '5'),
+ (0x2086, 'M', '6'),
+ (0x2087, 'M', '7'),
+ (0x2088, 'M', '8'),
+ (0x2089, 'M', '9'),
+ (0x208A, '3', '+'),
+ (0x208B, 'M', '−'),
+ (0x208C, '3', '='),
+ (0x208D, '3', '('),
+ (0x208E, '3', ')'),
+ (0x208F, 'X'),
+ (0x2090, 'M', 'a'),
+ (0x2091, 'M', 'e'),
+ (0x2092, 'M', 'o'),
+ (0x2093, 'M', 'x'),
+ (0x2094, 'M', 'ə'),
+ (0x2095, 'M', 'h'),
+ (0x2096, 'M', 'k'),
+ (0x2097, 'M', 'l'),
+ (0x2098, 'M', 'm'),
+ (0x2099, 'M', 'n'),
+ (0x209A, 'M', 'p'),
+ (0x209B, 'M', 's'),
+ (0x209C, 'M', 't'),
+ (0x209D, 'X'),
+ (0x20A0, 'V'),
+ (0x20A8, 'M', 'rs'),
+ (0x20A9, 'V'),
+ (0x20C1, 'X'),
+ (0x20D0, 'V'),
+ (0x20F1, 'X'),
+ (0x2100, '3', 'a/c'),
+ (0x2101, '3', 'a/s'),
+ (0x2102, 'M', 'c'),
+ (0x2103, 'M', '°c'),
+ (0x2104, 'V'),
+ ]
+
+def _seg_22() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x2105, '3', 'c/o'),
+ (0x2106, '3', 'c/u'),
+ (0x2107, 'M', 'ɛ'),
+ (0x2108, 'V'),
+ (0x2109, 'M', '°f'),
+ (0x210A, 'M', 'g'),
+ (0x210B, 'M', 'h'),
+ (0x210F, 'M', 'ħ'),
+ (0x2110, 'M', 'i'),
+ (0x2112, 'M', 'l'),
+ (0x2114, 'V'),
+ (0x2115, 'M', 'n'),
+ (0x2116, 'M', 'no'),
+ (0x2117, 'V'),
+ (0x2119, 'M', 'p'),
+ (0x211A, 'M', 'q'),
+ (0x211B, 'M', 'r'),
+ (0x211E, 'V'),
+ (0x2120, 'M', 'sm'),
+ (0x2121, 'M', 'tel'),
+ (0x2122, 'M', 'tm'),
+ (0x2123, 'V'),
+ (0x2124, 'M', 'z'),
+ (0x2125, 'V'),
+ (0x2126, 'M', 'ω'),
+ (0x2127, 'V'),
+ (0x2128, 'M', 'z'),
+ (0x2129, 'V'),
+ (0x212A, 'M', 'k'),
+ (0x212B, 'M', 'å'),
+ (0x212C, 'M', 'b'),
+ (0x212D, 'M', 'c'),
+ (0x212E, 'V'),
+ (0x212F, 'M', 'e'),
+ (0x2131, 'M', 'f'),
+ (0x2132, 'X'),
+ (0x2133, 'M', 'm'),
+ (0x2134, 'M', 'o'),
+ (0x2135, 'M', 'א'),
+ (0x2136, 'M', 'ב'),
+ (0x2137, 'M', 'ג'),
+ (0x2138, 'M', 'ד'),
+ (0x2139, 'M', 'i'),
+ (0x213A, 'V'),
+ (0x213B, 'M', 'fax'),
+ (0x213C, 'M', 'π'),
+ (0x213D, 'M', 'γ'),
+ (0x213F, 'M', 'π'),
+ (0x2140, 'M', '∑'),
+ (0x2141, 'V'),
+ (0x2145, 'M', 'd'),
+ (0x2147, 'M', 'e'),
+ (0x2148, 'M', 'i'),
+ (0x2149, 'M', 'j'),
+ (0x214A, 'V'),
+ (0x2150, 'M', '1⁄7'),
+ (0x2151, 'M', '1⁄9'),
+ (0x2152, 'M', '1⁄10'),
+ (0x2153, 'M', '1⁄3'),
+ (0x2154, 'M', '2⁄3'),
+ (0x2155, 'M', '1⁄5'),
+ (0x2156, 'M', '2⁄5'),
+ (0x2157, 'M', '3⁄5'),
+ (0x2158, 'M', '4⁄5'),
+ (0x2159, 'M', '1⁄6'),
+ (0x215A, 'M', '5⁄6'),
+ (0x215B, 'M', '1⁄8'),
+ (0x215C, 'M', '3⁄8'),
+ (0x215D, 'M', '5⁄8'),
+ (0x215E, 'M', '7⁄8'),
+ (0x215F, 'M', '1⁄'),
+ (0x2160, 'M', 'i'),
+ (0x2161, 'M', 'ii'),
+ (0x2162, 'M', 'iii'),
+ (0x2163, 'M', 'iv'),
+ (0x2164, 'M', 'v'),
+ (0x2165, 'M', 'vi'),
+ (0x2166, 'M', 'vii'),
+ (0x2167, 'M', 'viii'),
+ (0x2168, 'M', 'ix'),
+ (0x2169, 'M', 'x'),
+ (0x216A, 'M', 'xi'),
+ (0x216B, 'M', 'xii'),
+ (0x216C, 'M', 'l'),
+ (0x216D, 'M', 'c'),
+ (0x216E, 'M', 'd'),
+ (0x216F, 'M', 'm'),
+ (0x2170, 'M', 'i'),
+ (0x2171, 'M', 'ii'),
+ (0x2172, 'M', 'iii'),
+ (0x2173, 'M', 'iv'),
+ (0x2174, 'M', 'v'),
+ (0x2175, 'M', 'vi'),
+ (0x2176, 'M', 'vii'),
+ (0x2177, 'M', 'viii'),
+ (0x2178, 'M', 'ix'),
+ (0x2179, 'M', 'x'),
+ (0x217A, 'M', 'xi'),
+ (0x217B, 'M', 'xii'),
+ (0x217C, 'M', 'l'),
+ ]
+
+def _seg_23() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x217D, 'M', 'c'),
+ (0x217E, 'M', 'd'),
+ (0x217F, 'M', 'm'),
+ (0x2180, 'V'),
+ (0x2183, 'X'),
+ (0x2184, 'V'),
+ (0x2189, 'M', '0⁄3'),
+ (0x218A, 'V'),
+ (0x218C, 'X'),
+ (0x2190, 'V'),
+ (0x222C, 'M', '∫∫'),
+ (0x222D, 'M', '∫∫∫'),
+ (0x222E, 'V'),
+ (0x222F, 'M', '∮∮'),
+ (0x2230, 'M', '∮∮∮'),
+ (0x2231, 'V'),
+ (0x2260, '3'),
+ (0x2261, 'V'),
+ (0x226E, '3'),
+ (0x2270, 'V'),
+ (0x2329, 'M', '〈'),
+ (0x232A, 'M', '〉'),
+ (0x232B, 'V'),
+ (0x2427, 'X'),
+ (0x2440, 'V'),
+ (0x244B, 'X'),
+ (0x2460, 'M', '1'),
+ (0x2461, 'M', '2'),
+ (0x2462, 'M', '3'),
+ (0x2463, 'M', '4'),
+ (0x2464, 'M', '5'),
+ (0x2465, 'M', '6'),
+ (0x2466, 'M', '7'),
+ (0x2467, 'M', '8'),
+ (0x2468, 'M', '9'),
+ (0x2469, 'M', '10'),
+ (0x246A, 'M', '11'),
+ (0x246B, 'M', '12'),
+ (0x246C, 'M', '13'),
+ (0x246D, 'M', '14'),
+ (0x246E, 'M', '15'),
+ (0x246F, 'M', '16'),
+ (0x2470, 'M', '17'),
+ (0x2471, 'M', '18'),
+ (0x2472, 'M', '19'),
+ (0x2473, 'M', '20'),
+ (0x2474, '3', '(1)'),
+ (0x2475, '3', '(2)'),
+ (0x2476, '3', '(3)'),
+ (0x2477, '3', '(4)'),
+ (0x2478, '3', '(5)'),
+ (0x2479, '3', '(6)'),
+ (0x247A, '3', '(7)'),
+ (0x247B, '3', '(8)'),
+ (0x247C, '3', '(9)'),
+ (0x247D, '3', '(10)'),
+ (0x247E, '3', '(11)'),
+ (0x247F, '3', '(12)'),
+ (0x2480, '3', '(13)'),
+ (0x2481, '3', '(14)'),
+ (0x2482, '3', '(15)'),
+ (0x2483, '3', '(16)'),
+ (0x2484, '3', '(17)'),
+ (0x2485, '3', '(18)'),
+ (0x2486, '3', '(19)'),
+ (0x2487, '3', '(20)'),
+ (0x2488, 'X'),
+ (0x249C, '3', '(a)'),
+ (0x249D, '3', '(b)'),
+ (0x249E, '3', '(c)'),
+ (0x249F, '3', '(d)'),
+ (0x24A0, '3', '(e)'),
+ (0x24A1, '3', '(f)'),
+ (0x24A2, '3', '(g)'),
+ (0x24A3, '3', '(h)'),
+ (0x24A4, '3', '(i)'),
+ (0x24A5, '3', '(j)'),
+ (0x24A6, '3', '(k)'),
+ (0x24A7, '3', '(l)'),
+ (0x24A8, '3', '(m)'),
+ (0x24A9, '3', '(n)'),
+ (0x24AA, '3', '(o)'),
+ (0x24AB, '3', '(p)'),
+ (0x24AC, '3', '(q)'),
+ (0x24AD, '3', '(r)'),
+ (0x24AE, '3', '(s)'),
+ (0x24AF, '3', '(t)'),
+ (0x24B0, '3', '(u)'),
+ (0x24B1, '3', '(v)'),
+ (0x24B2, '3', '(w)'),
+ (0x24B3, '3', '(x)'),
+ (0x24B4, '3', '(y)'),
+ (0x24B5, '3', '(z)'),
+ (0x24B6, 'M', 'a'),
+ (0x24B7, 'M', 'b'),
+ (0x24B8, 'M', 'c'),
+ (0x24B9, 'M', 'd'),
+ (0x24BA, 'M', 'e'),
+ (0x24BB, 'M', 'f'),
+ (0x24BC, 'M', 'g'),
+ ]
+
+def _seg_24() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x24BD, 'M', 'h'),
+ (0x24BE, 'M', 'i'),
+ (0x24BF, 'M', 'j'),
+ (0x24C0, 'M', 'k'),
+ (0x24C1, 'M', 'l'),
+ (0x24C2, 'M', 'm'),
+ (0x24C3, 'M', 'n'),
+ (0x24C4, 'M', 'o'),
+ (0x24C5, 'M', 'p'),
+ (0x24C6, 'M', 'q'),
+ (0x24C7, 'M', 'r'),
+ (0x24C8, 'M', 's'),
+ (0x24C9, 'M', 't'),
+ (0x24CA, 'M', 'u'),
+ (0x24CB, 'M', 'v'),
+ (0x24CC, 'M', 'w'),
+ (0x24CD, 'M', 'x'),
+ (0x24CE, 'M', 'y'),
+ (0x24CF, 'M', 'z'),
+ (0x24D0, 'M', 'a'),
+ (0x24D1, 'M', 'b'),
+ (0x24D2, 'M', 'c'),
+ (0x24D3, 'M', 'd'),
+ (0x24D4, 'M', 'e'),
+ (0x24D5, 'M', 'f'),
+ (0x24D6, 'M', 'g'),
+ (0x24D7, 'M', 'h'),
+ (0x24D8, 'M', 'i'),
+ (0x24D9, 'M', 'j'),
+ (0x24DA, 'M', 'k'),
+ (0x24DB, 'M', 'l'),
+ (0x24DC, 'M', 'm'),
+ (0x24DD, 'M', 'n'),
+ (0x24DE, 'M', 'o'),
+ (0x24DF, 'M', 'p'),
+ (0x24E0, 'M', 'q'),
+ (0x24E1, 'M', 'r'),
+ (0x24E2, 'M', 's'),
+ (0x24E3, 'M', 't'),
+ (0x24E4, 'M', 'u'),
+ (0x24E5, 'M', 'v'),
+ (0x24E6, 'M', 'w'),
+ (0x24E7, 'M', 'x'),
+ (0x24E8, 'M', 'y'),
+ (0x24E9, 'M', 'z'),
+ (0x24EA, 'M', '0'),
+ (0x24EB, 'V'),
+ (0x2A0C, 'M', '∫∫∫∫'),
+ (0x2A0D, 'V'),
+ (0x2A74, '3', '::='),
+ (0x2A75, '3', '=='),
+ (0x2A76, '3', '==='),
+ (0x2A77, 'V'),
+ (0x2ADC, 'M', '⫝̸'),
+ (0x2ADD, 'V'),
+ (0x2B74, 'X'),
+ (0x2B76, 'V'),
+ (0x2B96, 'X'),
+ (0x2B97, 'V'),
+ (0x2C00, 'M', 'ⰰ'),
+ (0x2C01, 'M', 'ⰱ'),
+ (0x2C02, 'M', 'ⰲ'),
+ (0x2C03, 'M', 'ⰳ'),
+ (0x2C04, 'M', 'ⰴ'),
+ (0x2C05, 'M', 'ⰵ'),
+ (0x2C06, 'M', 'ⰶ'),
+ (0x2C07, 'M', 'ⰷ'),
+ (0x2C08, 'M', 'ⰸ'),
+ (0x2C09, 'M', 'ⰹ'),
+ (0x2C0A, 'M', 'ⰺ'),
+ (0x2C0B, 'M', 'ⰻ'),
+ (0x2C0C, 'M', 'ⰼ'),
+ (0x2C0D, 'M', 'ⰽ'),
+ (0x2C0E, 'M', 'ⰾ'),
+ (0x2C0F, 'M', 'ⰿ'),
+ (0x2C10, 'M', 'ⱀ'),
+ (0x2C11, 'M', 'ⱁ'),
+ (0x2C12, 'M', 'ⱂ'),
+ (0x2C13, 'M', 'ⱃ'),
+ (0x2C14, 'M', 'ⱄ'),
+ (0x2C15, 'M', 'ⱅ'),
+ (0x2C16, 'M', 'ⱆ'),
+ (0x2C17, 'M', 'ⱇ'),
+ (0x2C18, 'M', 'ⱈ'),
+ (0x2C19, 'M', 'ⱉ'),
+ (0x2C1A, 'M', 'ⱊ'),
+ (0x2C1B, 'M', 'ⱋ'),
+ (0x2C1C, 'M', 'ⱌ'),
+ (0x2C1D, 'M', 'ⱍ'),
+ (0x2C1E, 'M', 'ⱎ'),
+ (0x2C1F, 'M', 'ⱏ'),
+ (0x2C20, 'M', 'ⱐ'),
+ (0x2C21, 'M', 'ⱑ'),
+ (0x2C22, 'M', 'ⱒ'),
+ (0x2C23, 'M', 'ⱓ'),
+ (0x2C24, 'M', 'ⱔ'),
+ (0x2C25, 'M', 'ⱕ'),
+ (0x2C26, 'M', 'ⱖ'),
+ (0x2C27, 'M', 'ⱗ'),
+ (0x2C28, 'M', 'ⱘ'),
+ ]
+
+def _seg_25() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x2C29, 'M', 'ⱙ'),
+ (0x2C2A, 'M', 'ⱚ'),
+ (0x2C2B, 'M', 'ⱛ'),
+ (0x2C2C, 'M', 'ⱜ'),
+ (0x2C2D, 'M', 'ⱝ'),
+ (0x2C2E, 'M', 'ⱞ'),
+ (0x2C2F, 'M', 'ⱟ'),
+ (0x2C30, 'V'),
+ (0x2C60, 'M', 'ⱡ'),
+ (0x2C61, 'V'),
+ (0x2C62, 'M', 'ɫ'),
+ (0x2C63, 'M', 'ᵽ'),
+ (0x2C64, 'M', 'ɽ'),
+ (0x2C65, 'V'),
+ (0x2C67, 'M', 'ⱨ'),
+ (0x2C68, 'V'),
+ (0x2C69, 'M', 'ⱪ'),
+ (0x2C6A, 'V'),
+ (0x2C6B, 'M', 'ⱬ'),
+ (0x2C6C, 'V'),
+ (0x2C6D, 'M', 'ɑ'),
+ (0x2C6E, 'M', 'ɱ'),
+ (0x2C6F, 'M', 'ɐ'),
+ (0x2C70, 'M', 'ɒ'),
+ (0x2C71, 'V'),
+ (0x2C72, 'M', 'ⱳ'),
+ (0x2C73, 'V'),
+ (0x2C75, 'M', 'ⱶ'),
+ (0x2C76, 'V'),
+ (0x2C7C, 'M', 'j'),
+ (0x2C7D, 'M', 'v'),
+ (0x2C7E, 'M', 'ȿ'),
+ (0x2C7F, 'M', 'ɀ'),
+ (0x2C80, 'M', 'ⲁ'),
+ (0x2C81, 'V'),
+ (0x2C82, 'M', 'ⲃ'),
+ (0x2C83, 'V'),
+ (0x2C84, 'M', 'ⲅ'),
+ (0x2C85, 'V'),
+ (0x2C86, 'M', 'ⲇ'),
+ (0x2C87, 'V'),
+ (0x2C88, 'M', 'ⲉ'),
+ (0x2C89, 'V'),
+ (0x2C8A, 'M', 'ⲋ'),
+ (0x2C8B, 'V'),
+ (0x2C8C, 'M', 'ⲍ'),
+ (0x2C8D, 'V'),
+ (0x2C8E, 'M', 'ⲏ'),
+ (0x2C8F, 'V'),
+ (0x2C90, 'M', 'ⲑ'),
+ (0x2C91, 'V'),
+ (0x2C92, 'M', 'ⲓ'),
+ (0x2C93, 'V'),
+ (0x2C94, 'M', 'ⲕ'),
+ (0x2C95, 'V'),
+ (0x2C96, 'M', 'ⲗ'),
+ (0x2C97, 'V'),
+ (0x2C98, 'M', 'ⲙ'),
+ (0x2C99, 'V'),
+ (0x2C9A, 'M', 'ⲛ'),
+ (0x2C9B, 'V'),
+ (0x2C9C, 'M', 'ⲝ'),
+ (0x2C9D, 'V'),
+ (0x2C9E, 'M', 'ⲟ'),
+ (0x2C9F, 'V'),
+ (0x2CA0, 'M', 'ⲡ'),
+ (0x2CA1, 'V'),
+ (0x2CA2, 'M', 'ⲣ'),
+ (0x2CA3, 'V'),
+ (0x2CA4, 'M', 'ⲥ'),
+ (0x2CA5, 'V'),
+ (0x2CA6, 'M', 'ⲧ'),
+ (0x2CA7, 'V'),
+ (0x2CA8, 'M', 'ⲩ'),
+ (0x2CA9, 'V'),
+ (0x2CAA, 'M', 'ⲫ'),
+ (0x2CAB, 'V'),
+ (0x2CAC, 'M', 'ⲭ'),
+ (0x2CAD, 'V'),
+ (0x2CAE, 'M', 'ⲯ'),
+ (0x2CAF, 'V'),
+ (0x2CB0, 'M', 'ⲱ'),
+ (0x2CB1, 'V'),
+ (0x2CB2, 'M', 'ⲳ'),
+ (0x2CB3, 'V'),
+ (0x2CB4, 'M', 'ⲵ'),
+ (0x2CB5, 'V'),
+ (0x2CB6, 'M', 'ⲷ'),
+ (0x2CB7, 'V'),
+ (0x2CB8, 'M', 'ⲹ'),
+ (0x2CB9, 'V'),
+ (0x2CBA, 'M', 'ⲻ'),
+ (0x2CBB, 'V'),
+ (0x2CBC, 'M', 'ⲽ'),
+ (0x2CBD, 'V'),
+ (0x2CBE, 'M', 'ⲿ'),
+ (0x2CBF, 'V'),
+ (0x2CC0, 'M', 'ⳁ'),
+ (0x2CC1, 'V'),
+ (0x2CC2, 'M', 'ⳃ'),
+ ]
+
+def _seg_26() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x2CC3, 'V'),
+ (0x2CC4, 'M', 'ⳅ'),
+ (0x2CC5, 'V'),
+ (0x2CC6, 'M', 'ⳇ'),
+ (0x2CC7, 'V'),
+ (0x2CC8, 'M', 'ⳉ'),
+ (0x2CC9, 'V'),
+ (0x2CCA, 'M', 'ⳋ'),
+ (0x2CCB, 'V'),
+ (0x2CCC, 'M', 'ⳍ'),
+ (0x2CCD, 'V'),
+ (0x2CCE, 'M', 'ⳏ'),
+ (0x2CCF, 'V'),
+ (0x2CD0, 'M', 'ⳑ'),
+ (0x2CD1, 'V'),
+ (0x2CD2, 'M', 'ⳓ'),
+ (0x2CD3, 'V'),
+ (0x2CD4, 'M', 'ⳕ'),
+ (0x2CD5, 'V'),
+ (0x2CD6, 'M', 'ⳗ'),
+ (0x2CD7, 'V'),
+ (0x2CD8, 'M', 'ⳙ'),
+ (0x2CD9, 'V'),
+ (0x2CDA, 'M', 'ⳛ'),
+ (0x2CDB, 'V'),
+ (0x2CDC, 'M', 'ⳝ'),
+ (0x2CDD, 'V'),
+ (0x2CDE, 'M', 'ⳟ'),
+ (0x2CDF, 'V'),
+ (0x2CE0, 'M', 'ⳡ'),
+ (0x2CE1, 'V'),
+ (0x2CE2, 'M', 'ⳣ'),
+ (0x2CE3, 'V'),
+ (0x2CEB, 'M', 'ⳬ'),
+ (0x2CEC, 'V'),
+ (0x2CED, 'M', 'ⳮ'),
+ (0x2CEE, 'V'),
+ (0x2CF2, 'M', 'ⳳ'),
+ (0x2CF3, 'V'),
+ (0x2CF4, 'X'),
+ (0x2CF9, 'V'),
+ (0x2D26, 'X'),
+ (0x2D27, 'V'),
+ (0x2D28, 'X'),
+ (0x2D2D, 'V'),
+ (0x2D2E, 'X'),
+ (0x2D30, 'V'),
+ (0x2D68, 'X'),
+ (0x2D6F, 'M', 'ⵡ'),
+ (0x2D70, 'V'),
+ (0x2D71, 'X'),
+ (0x2D7F, 'V'),
+ (0x2D97, 'X'),
+ (0x2DA0, 'V'),
+ (0x2DA7, 'X'),
+ (0x2DA8, 'V'),
+ (0x2DAF, 'X'),
+ (0x2DB0, 'V'),
+ (0x2DB7, 'X'),
+ (0x2DB8, 'V'),
+ (0x2DBF, 'X'),
+ (0x2DC0, 'V'),
+ (0x2DC7, 'X'),
+ (0x2DC8, 'V'),
+ (0x2DCF, 'X'),
+ (0x2DD0, 'V'),
+ (0x2DD7, 'X'),
+ (0x2DD8, 'V'),
+ (0x2DDF, 'X'),
+ (0x2DE0, 'V'),
+ (0x2E5E, 'X'),
+ (0x2E80, 'V'),
+ (0x2E9A, 'X'),
+ (0x2E9B, 'V'),
+ (0x2E9F, 'M', '母'),
+ (0x2EA0, 'V'),
+ (0x2EF3, 'M', '龟'),
+ (0x2EF4, 'X'),
+ (0x2F00, 'M', '一'),
+ (0x2F01, 'M', '丨'),
+ (0x2F02, 'M', '丶'),
+ (0x2F03, 'M', '丿'),
+ (0x2F04, 'M', '乙'),
+ (0x2F05, 'M', '亅'),
+ (0x2F06, 'M', '二'),
+ (0x2F07, 'M', '亠'),
+ (0x2F08, 'M', '人'),
+ (0x2F09, 'M', '儿'),
+ (0x2F0A, 'M', '入'),
+ (0x2F0B, 'M', '八'),
+ (0x2F0C, 'M', '冂'),
+ (0x2F0D, 'M', '冖'),
+ (0x2F0E, 'M', '冫'),
+ (0x2F0F, 'M', '几'),
+ (0x2F10, 'M', '凵'),
+ (0x2F11, 'M', '刀'),
+ (0x2F12, 'M', '力'),
+ (0x2F13, 'M', '勹'),
+ (0x2F14, 'M', '匕'),
+ (0x2F15, 'M', '匚'),
+ ]
+
+def _seg_27() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x2F16, 'M', '匸'),
+ (0x2F17, 'M', '十'),
+ (0x2F18, 'M', '卜'),
+ (0x2F19, 'M', '卩'),
+ (0x2F1A, 'M', '厂'),
+ (0x2F1B, 'M', '厶'),
+ (0x2F1C, 'M', '又'),
+ (0x2F1D, 'M', '口'),
+ (0x2F1E, 'M', '囗'),
+ (0x2F1F, 'M', '土'),
+ (0x2F20, 'M', '士'),
+ (0x2F21, 'M', '夂'),
+ (0x2F22, 'M', '夊'),
+ (0x2F23, 'M', '夕'),
+ (0x2F24, 'M', '大'),
+ (0x2F25, 'M', '女'),
+ (0x2F26, 'M', '子'),
+ (0x2F27, 'M', '宀'),
+ (0x2F28, 'M', '寸'),
+ (0x2F29, 'M', '小'),
+ (0x2F2A, 'M', '尢'),
+ (0x2F2B, 'M', '尸'),
+ (0x2F2C, 'M', '屮'),
+ (0x2F2D, 'M', '山'),
+ (0x2F2E, 'M', '巛'),
+ (0x2F2F, 'M', '工'),
+ (0x2F30, 'M', '己'),
+ (0x2F31, 'M', '巾'),
+ (0x2F32, 'M', '干'),
+ (0x2F33, 'M', '幺'),
+ (0x2F34, 'M', '广'),
+ (0x2F35, 'M', '廴'),
+ (0x2F36, 'M', '廾'),
+ (0x2F37, 'M', '弋'),
+ (0x2F38, 'M', '弓'),
+ (0x2F39, 'M', '彐'),
+ (0x2F3A, 'M', '彡'),
+ (0x2F3B, 'M', '彳'),
+ (0x2F3C, 'M', '心'),
+ (0x2F3D, 'M', '戈'),
+ (0x2F3E, 'M', '戶'),
+ (0x2F3F, 'M', '手'),
+ (0x2F40, 'M', '支'),
+ (0x2F41, 'M', '攴'),
+ (0x2F42, 'M', '文'),
+ (0x2F43, 'M', '斗'),
+ (0x2F44, 'M', '斤'),
+ (0x2F45, 'M', '方'),
+ (0x2F46, 'M', '无'),
+ (0x2F47, 'M', '日'),
+ (0x2F48, 'M', '曰'),
+ (0x2F49, 'M', '月'),
+ (0x2F4A, 'M', '木'),
+ (0x2F4B, 'M', '欠'),
+ (0x2F4C, 'M', '止'),
+ (0x2F4D, 'M', '歹'),
+ (0x2F4E, 'M', '殳'),
+ (0x2F4F, 'M', '毋'),
+ (0x2F50, 'M', '比'),
+ (0x2F51, 'M', '毛'),
+ (0x2F52, 'M', '氏'),
+ (0x2F53, 'M', '气'),
+ (0x2F54, 'M', '水'),
+ (0x2F55, 'M', '火'),
+ (0x2F56, 'M', '爪'),
+ (0x2F57, 'M', '父'),
+ (0x2F58, 'M', '爻'),
+ (0x2F59, 'M', '爿'),
+ (0x2F5A, 'M', '片'),
+ (0x2F5B, 'M', '牙'),
+ (0x2F5C, 'M', '牛'),
+ (0x2F5D, 'M', '犬'),
+ (0x2F5E, 'M', '玄'),
+ (0x2F5F, 'M', '玉'),
+ (0x2F60, 'M', '瓜'),
+ (0x2F61, 'M', '瓦'),
+ (0x2F62, 'M', '甘'),
+ (0x2F63, 'M', '生'),
+ (0x2F64, 'M', '用'),
+ (0x2F65, 'M', '田'),
+ (0x2F66, 'M', '疋'),
+ (0x2F67, 'M', '疒'),
+ (0x2F68, 'M', '癶'),
+ (0x2F69, 'M', '白'),
+ (0x2F6A, 'M', '皮'),
+ (0x2F6B, 'M', '皿'),
+ (0x2F6C, 'M', '目'),
+ (0x2F6D, 'M', '矛'),
+ (0x2F6E, 'M', '矢'),
+ (0x2F6F, 'M', '石'),
+ (0x2F70, 'M', '示'),
+ (0x2F71, 'M', '禸'),
+ (0x2F72, 'M', '禾'),
+ (0x2F73, 'M', '穴'),
+ (0x2F74, 'M', '立'),
+ (0x2F75, 'M', '竹'),
+ (0x2F76, 'M', '米'),
+ (0x2F77, 'M', '糸'),
+ (0x2F78, 'M', '缶'),
+ (0x2F79, 'M', '网'),
+ ]
+
+def _seg_28() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x2F7A, 'M', '羊'),
+ (0x2F7B, 'M', '羽'),
+ (0x2F7C, 'M', '老'),
+ (0x2F7D, 'M', '而'),
+ (0x2F7E, 'M', '耒'),
+ (0x2F7F, 'M', '耳'),
+ (0x2F80, 'M', '聿'),
+ (0x2F81, 'M', '肉'),
+ (0x2F82, 'M', '臣'),
+ (0x2F83, 'M', '自'),
+ (0x2F84, 'M', '至'),
+ (0x2F85, 'M', '臼'),
+ (0x2F86, 'M', '舌'),
+ (0x2F87, 'M', '舛'),
+ (0x2F88, 'M', '舟'),
+ (0x2F89, 'M', '艮'),
+ (0x2F8A, 'M', '色'),
+ (0x2F8B, 'M', '艸'),
+ (0x2F8C, 'M', '虍'),
+ (0x2F8D, 'M', '虫'),
+ (0x2F8E, 'M', '血'),
+ (0x2F8F, 'M', '行'),
+ (0x2F90, 'M', '衣'),
+ (0x2F91, 'M', '襾'),
+ (0x2F92, 'M', '見'),
+ (0x2F93, 'M', '角'),
+ (0x2F94, 'M', '言'),
+ (0x2F95, 'M', '谷'),
+ (0x2F96, 'M', '豆'),
+ (0x2F97, 'M', '豕'),
+ (0x2F98, 'M', '豸'),
+ (0x2F99, 'M', '貝'),
+ (0x2F9A, 'M', '赤'),
+ (0x2F9B, 'M', '走'),
+ (0x2F9C, 'M', '足'),
+ (0x2F9D, 'M', '身'),
+ (0x2F9E, 'M', '車'),
+ (0x2F9F, 'M', '辛'),
+ (0x2FA0, 'M', '辰'),
+ (0x2FA1, 'M', '辵'),
+ (0x2FA2, 'M', '邑'),
+ (0x2FA3, 'M', '酉'),
+ (0x2FA4, 'M', '釆'),
+ (0x2FA5, 'M', '里'),
+ (0x2FA6, 'M', '金'),
+ (0x2FA7, 'M', '長'),
+ (0x2FA8, 'M', '門'),
+ (0x2FA9, 'M', '阜'),
+ (0x2FAA, 'M', '隶'),
+ (0x2FAB, 'M', '隹'),
+ (0x2FAC, 'M', '雨'),
+ (0x2FAD, 'M', '靑'),
+ (0x2FAE, 'M', '非'),
+ (0x2FAF, 'M', '面'),
+ (0x2FB0, 'M', '革'),
+ (0x2FB1, 'M', '韋'),
+ (0x2FB2, 'M', '韭'),
+ (0x2FB3, 'M', '音'),
+ (0x2FB4, 'M', '頁'),
+ (0x2FB5, 'M', '風'),
+ (0x2FB6, 'M', '飛'),
+ (0x2FB7, 'M', '食'),
+ (0x2FB8, 'M', '首'),
+ (0x2FB9, 'M', '香'),
+ (0x2FBA, 'M', '馬'),
+ (0x2FBB, 'M', '骨'),
+ (0x2FBC, 'M', '高'),
+ (0x2FBD, 'M', '髟'),
+ (0x2FBE, 'M', '鬥'),
+ (0x2FBF, 'M', '鬯'),
+ (0x2FC0, 'M', '鬲'),
+ (0x2FC1, 'M', '鬼'),
+ (0x2FC2, 'M', '魚'),
+ (0x2FC3, 'M', '鳥'),
+ (0x2FC4, 'M', '鹵'),
+ (0x2FC5, 'M', '鹿'),
+ (0x2FC6, 'M', '麥'),
+ (0x2FC7, 'M', '麻'),
+ (0x2FC8, 'M', '黃'),
+ (0x2FC9, 'M', '黍'),
+ (0x2FCA, 'M', '黑'),
+ (0x2FCB, 'M', '黹'),
+ (0x2FCC, 'M', '黽'),
+ (0x2FCD, 'M', '鼎'),
+ (0x2FCE, 'M', '鼓'),
+ (0x2FCF, 'M', '鼠'),
+ (0x2FD0, 'M', '鼻'),
+ (0x2FD1, 'M', '齊'),
+ (0x2FD2, 'M', '齒'),
+ (0x2FD3, 'M', '龍'),
+ (0x2FD4, 'M', '龜'),
+ (0x2FD5, 'M', '龠'),
+ (0x2FD6, 'X'),
+ (0x3000, '3', ' '),
+ (0x3001, 'V'),
+ (0x3002, 'M', '.'),
+ (0x3003, 'V'),
+ (0x3036, 'M', '〒'),
+ (0x3037, 'V'),
+ (0x3038, 'M', '十'),
+ ]
+
+def _seg_29() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x3039, 'M', '卄'),
+ (0x303A, 'M', '卅'),
+ (0x303B, 'V'),
+ (0x3040, 'X'),
+ (0x3041, 'V'),
+ (0x3097, 'X'),
+ (0x3099, 'V'),
+ (0x309B, '3', ' ゙'),
+ (0x309C, '3', ' ゚'),
+ (0x309D, 'V'),
+ (0x309F, 'M', 'より'),
+ (0x30A0, 'V'),
+ (0x30FF, 'M', 'コト'),
+ (0x3100, 'X'),
+ (0x3105, 'V'),
+ (0x3130, 'X'),
+ (0x3131, 'M', 'ᄀ'),
+ (0x3132, 'M', 'ᄁ'),
+ (0x3133, 'M', 'ᆪ'),
+ (0x3134, 'M', 'ᄂ'),
+ (0x3135, 'M', 'ᆬ'),
+ (0x3136, 'M', 'ᆭ'),
+ (0x3137, 'M', 'ᄃ'),
+ (0x3138, 'M', 'ᄄ'),
+ (0x3139, 'M', 'ᄅ'),
+ (0x313A, 'M', 'ᆰ'),
+ (0x313B, 'M', 'ᆱ'),
+ (0x313C, 'M', 'ᆲ'),
+ (0x313D, 'M', 'ᆳ'),
+ (0x313E, 'M', 'ᆴ'),
+ (0x313F, 'M', 'ᆵ'),
+ (0x3140, 'M', 'ᄚ'),
+ (0x3141, 'M', 'ᄆ'),
+ (0x3142, 'M', 'ᄇ'),
+ (0x3143, 'M', 'ᄈ'),
+ (0x3144, 'M', 'ᄡ'),
+ (0x3145, 'M', 'ᄉ'),
+ (0x3146, 'M', 'ᄊ'),
+ (0x3147, 'M', 'ᄋ'),
+ (0x3148, 'M', 'ᄌ'),
+ (0x3149, 'M', 'ᄍ'),
+ (0x314A, 'M', 'ᄎ'),
+ (0x314B, 'M', 'ᄏ'),
+ (0x314C, 'M', 'ᄐ'),
+ (0x314D, 'M', 'ᄑ'),
+ (0x314E, 'M', 'ᄒ'),
+ (0x314F, 'M', 'ᅡ'),
+ (0x3150, 'M', 'ᅢ'),
+ (0x3151, 'M', 'ᅣ'),
+ (0x3152, 'M', 'ᅤ'),
+ (0x3153, 'M', 'ᅥ'),
+ (0x3154, 'M', 'ᅦ'),
+ (0x3155, 'M', 'ᅧ'),
+ (0x3156, 'M', 'ᅨ'),
+ (0x3157, 'M', 'ᅩ'),
+ (0x3158, 'M', 'ᅪ'),
+ (0x3159, 'M', 'ᅫ'),
+ (0x315A, 'M', 'ᅬ'),
+ (0x315B, 'M', 'ᅭ'),
+ (0x315C, 'M', 'ᅮ'),
+ (0x315D, 'M', 'ᅯ'),
+ (0x315E, 'M', 'ᅰ'),
+ (0x315F, 'M', 'ᅱ'),
+ (0x3160, 'M', 'ᅲ'),
+ (0x3161, 'M', 'ᅳ'),
+ (0x3162, 'M', 'ᅴ'),
+ (0x3163, 'M', 'ᅵ'),
+ (0x3164, 'X'),
+ (0x3165, 'M', 'ᄔ'),
+ (0x3166, 'M', 'ᄕ'),
+ (0x3167, 'M', 'ᇇ'),
+ (0x3168, 'M', 'ᇈ'),
+ (0x3169, 'M', 'ᇌ'),
+ (0x316A, 'M', 'ᇎ'),
+ (0x316B, 'M', 'ᇓ'),
+ (0x316C, 'M', 'ᇗ'),
+ (0x316D, 'M', 'ᇙ'),
+ (0x316E, 'M', 'ᄜ'),
+ (0x316F, 'M', 'ᇝ'),
+ (0x3170, 'M', 'ᇟ'),
+ (0x3171, 'M', 'ᄝ'),
+ (0x3172, 'M', 'ᄞ'),
+ (0x3173, 'M', 'ᄠ'),
+ (0x3174, 'M', 'ᄢ'),
+ (0x3175, 'M', 'ᄣ'),
+ (0x3176, 'M', 'ᄧ'),
+ (0x3177, 'M', 'ᄩ'),
+ (0x3178, 'M', 'ᄫ'),
+ (0x3179, 'M', 'ᄬ'),
+ (0x317A, 'M', 'ᄭ'),
+ (0x317B, 'M', 'ᄮ'),
+ (0x317C, 'M', 'ᄯ'),
+ (0x317D, 'M', 'ᄲ'),
+ (0x317E, 'M', 'ᄶ'),
+ (0x317F, 'M', 'ᅀ'),
+ (0x3180, 'M', 'ᅇ'),
+ (0x3181, 'M', 'ᅌ'),
+ (0x3182, 'M', 'ᇱ'),
+ (0x3183, 'M', 'ᇲ'),
+ (0x3184, 'M', 'ᅗ'),
+ ]
+
+def _seg_30() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x3185, 'M', 'ᅘ'),
+ (0x3186, 'M', 'ᅙ'),
+ (0x3187, 'M', 'ᆄ'),
+ (0x3188, 'M', 'ᆅ'),
+ (0x3189, 'M', 'ᆈ'),
+ (0x318A, 'M', 'ᆑ'),
+ (0x318B, 'M', 'ᆒ'),
+ (0x318C, 'M', 'ᆔ'),
+ (0x318D, 'M', 'ᆞ'),
+ (0x318E, 'M', 'ᆡ'),
+ (0x318F, 'X'),
+ (0x3190, 'V'),
+ (0x3192, 'M', '一'),
+ (0x3193, 'M', '二'),
+ (0x3194, 'M', '三'),
+ (0x3195, 'M', '四'),
+ (0x3196, 'M', '上'),
+ (0x3197, 'M', '中'),
+ (0x3198, 'M', '下'),
+ (0x3199, 'M', '甲'),
+ (0x319A, 'M', '乙'),
+ (0x319B, 'M', '丙'),
+ (0x319C, 'M', '丁'),
+ (0x319D, 'M', '天'),
+ (0x319E, 'M', '地'),
+ (0x319F, 'M', '人'),
+ (0x31A0, 'V'),
+ (0x31E4, 'X'),
+ (0x31F0, 'V'),
+ (0x3200, '3', '(ᄀ)'),
+ (0x3201, '3', '(ᄂ)'),
+ (0x3202, '3', '(ᄃ)'),
+ (0x3203, '3', '(ᄅ)'),
+ (0x3204, '3', '(ᄆ)'),
+ (0x3205, '3', '(ᄇ)'),
+ (0x3206, '3', '(ᄉ)'),
+ (0x3207, '3', '(ᄋ)'),
+ (0x3208, '3', '(ᄌ)'),
+ (0x3209, '3', '(ᄎ)'),
+ (0x320A, '3', '(ᄏ)'),
+ (0x320B, '3', '(ᄐ)'),
+ (0x320C, '3', '(ᄑ)'),
+ (0x320D, '3', '(ᄒ)'),
+ (0x320E, '3', '(가)'),
+ (0x320F, '3', '(나)'),
+ (0x3210, '3', '(다)'),
+ (0x3211, '3', '(라)'),
+ (0x3212, '3', '(마)'),
+ (0x3213, '3', '(바)'),
+ (0x3214, '3', '(사)'),
+ (0x3215, '3', '(아)'),
+ (0x3216, '3', '(자)'),
+ (0x3217, '3', '(차)'),
+ (0x3218, '3', '(카)'),
+ (0x3219, '3', '(타)'),
+ (0x321A, '3', '(파)'),
+ (0x321B, '3', '(하)'),
+ (0x321C, '3', '(주)'),
+ (0x321D, '3', '(오전)'),
+ (0x321E, '3', '(오후)'),
+ (0x321F, 'X'),
+ (0x3220, '3', '(一)'),
+ (0x3221, '3', '(二)'),
+ (0x3222, '3', '(三)'),
+ (0x3223, '3', '(四)'),
+ (0x3224, '3', '(五)'),
+ (0x3225, '3', '(六)'),
+ (0x3226, '3', '(七)'),
+ (0x3227, '3', '(八)'),
+ (0x3228, '3', '(九)'),
+ (0x3229, '3', '(十)'),
+ (0x322A, '3', '(月)'),
+ (0x322B, '3', '(火)'),
+ (0x322C, '3', '(水)'),
+ (0x322D, '3', '(木)'),
+ (0x322E, '3', '(金)'),
+ (0x322F, '3', '(土)'),
+ (0x3230, '3', '(日)'),
+ (0x3231, '3', '(株)'),
+ (0x3232, '3', '(有)'),
+ (0x3233, '3', '(社)'),
+ (0x3234, '3', '(名)'),
+ (0x3235, '3', '(特)'),
+ (0x3236, '3', '(財)'),
+ (0x3237, '3', '(祝)'),
+ (0x3238, '3', '(労)'),
+ (0x3239, '3', '(代)'),
+ (0x323A, '3', '(呼)'),
+ (0x323B, '3', '(学)'),
+ (0x323C, '3', '(監)'),
+ (0x323D, '3', '(企)'),
+ (0x323E, '3', '(資)'),
+ (0x323F, '3', '(協)'),
+ (0x3240, '3', '(祭)'),
+ (0x3241, '3', '(休)'),
+ (0x3242, '3', '(自)'),
+ (0x3243, '3', '(至)'),
+ (0x3244, 'M', '問'),
+ (0x3245, 'M', '幼'),
+ (0x3246, 'M', '文'),
+ ]
+
+def _seg_31() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x3247, 'M', '箏'),
+ (0x3248, 'V'),
+ (0x3250, 'M', 'pte'),
+ (0x3251, 'M', '21'),
+ (0x3252, 'M', '22'),
+ (0x3253, 'M', '23'),
+ (0x3254, 'M', '24'),
+ (0x3255, 'M', '25'),
+ (0x3256, 'M', '26'),
+ (0x3257, 'M', '27'),
+ (0x3258, 'M', '28'),
+ (0x3259, 'M', '29'),
+ (0x325A, 'M', '30'),
+ (0x325B, 'M', '31'),
+ (0x325C, 'M', '32'),
+ (0x325D, 'M', '33'),
+ (0x325E, 'M', '34'),
+ (0x325F, 'M', '35'),
+ (0x3260, 'M', 'ᄀ'),
+ (0x3261, 'M', 'ᄂ'),
+ (0x3262, 'M', 'ᄃ'),
+ (0x3263, 'M', 'ᄅ'),
+ (0x3264, 'M', 'ᄆ'),
+ (0x3265, 'M', 'ᄇ'),
+ (0x3266, 'M', 'ᄉ'),
+ (0x3267, 'M', 'ᄋ'),
+ (0x3268, 'M', 'ᄌ'),
+ (0x3269, 'M', 'ᄎ'),
+ (0x326A, 'M', 'ᄏ'),
+ (0x326B, 'M', 'ᄐ'),
+ (0x326C, 'M', 'ᄑ'),
+ (0x326D, 'M', 'ᄒ'),
+ (0x326E, 'M', '가'),
+ (0x326F, 'M', '나'),
+ (0x3270, 'M', '다'),
+ (0x3271, 'M', '라'),
+ (0x3272, 'M', '마'),
+ (0x3273, 'M', '바'),
+ (0x3274, 'M', '사'),
+ (0x3275, 'M', '아'),
+ (0x3276, 'M', '자'),
+ (0x3277, 'M', '차'),
+ (0x3278, 'M', '카'),
+ (0x3279, 'M', '타'),
+ (0x327A, 'M', '파'),
+ (0x327B, 'M', '하'),
+ (0x327C, 'M', '참고'),
+ (0x327D, 'M', '주의'),
+ (0x327E, 'M', '우'),
+ (0x327F, 'V'),
+ (0x3280, 'M', '一'),
+ (0x3281, 'M', '二'),
+ (0x3282, 'M', '三'),
+ (0x3283, 'M', '四'),
+ (0x3284, 'M', '五'),
+ (0x3285, 'M', '六'),
+ (0x3286, 'M', '七'),
+ (0x3287, 'M', '八'),
+ (0x3288, 'M', '九'),
+ (0x3289, 'M', '十'),
+ (0x328A, 'M', '月'),
+ (0x328B, 'M', '火'),
+ (0x328C, 'M', '水'),
+ (0x328D, 'M', '木'),
+ (0x328E, 'M', '金'),
+ (0x328F, 'M', '土'),
+ (0x3290, 'M', '日'),
+ (0x3291, 'M', '株'),
+ (0x3292, 'M', '有'),
+ (0x3293, 'M', '社'),
+ (0x3294, 'M', '名'),
+ (0x3295, 'M', '特'),
+ (0x3296, 'M', '財'),
+ (0x3297, 'M', '祝'),
+ (0x3298, 'M', '労'),
+ (0x3299, 'M', '秘'),
+ (0x329A, 'M', '男'),
+ (0x329B, 'M', '女'),
+ (0x329C, 'M', '適'),
+ (0x329D, 'M', '優'),
+ (0x329E, 'M', '印'),
+ (0x329F, 'M', '注'),
+ (0x32A0, 'M', '項'),
+ (0x32A1, 'M', '休'),
+ (0x32A2, 'M', '写'),
+ (0x32A3, 'M', '正'),
+ (0x32A4, 'M', '上'),
+ (0x32A5, 'M', '中'),
+ (0x32A6, 'M', '下'),
+ (0x32A7, 'M', '左'),
+ (0x32A8, 'M', '右'),
+ (0x32A9, 'M', '医'),
+ (0x32AA, 'M', '宗'),
+ (0x32AB, 'M', '学'),
+ (0x32AC, 'M', '監'),
+ (0x32AD, 'M', '企'),
+ (0x32AE, 'M', '資'),
+ (0x32AF, 'M', '協'),
+ (0x32B0, 'M', '夜'),
+ (0x32B1, 'M', '36'),
+ ]
+
+def _seg_32() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x32B2, 'M', '37'),
+ (0x32B3, 'M', '38'),
+ (0x32B4, 'M', '39'),
+ (0x32B5, 'M', '40'),
+ (0x32B6, 'M', '41'),
+ (0x32B7, 'M', '42'),
+ (0x32B8, 'M', '43'),
+ (0x32B9, 'M', '44'),
+ (0x32BA, 'M', '45'),
+ (0x32BB, 'M', '46'),
+ (0x32BC, 'M', '47'),
+ (0x32BD, 'M', '48'),
+ (0x32BE, 'M', '49'),
+ (0x32BF, 'M', '50'),
+ (0x32C0, 'M', '1月'),
+ (0x32C1, 'M', '2月'),
+ (0x32C2, 'M', '3月'),
+ (0x32C3, 'M', '4月'),
+ (0x32C4, 'M', '5月'),
+ (0x32C5, 'M', '6月'),
+ (0x32C6, 'M', '7月'),
+ (0x32C7, 'M', '8月'),
+ (0x32C8, 'M', '9月'),
+ (0x32C9, 'M', '10月'),
+ (0x32CA, 'M', '11月'),
+ (0x32CB, 'M', '12月'),
+ (0x32CC, 'M', 'hg'),
+ (0x32CD, 'M', 'erg'),
+ (0x32CE, 'M', 'ev'),
+ (0x32CF, 'M', 'ltd'),
+ (0x32D0, 'M', 'ア'),
+ (0x32D1, 'M', 'イ'),
+ (0x32D2, 'M', 'ウ'),
+ (0x32D3, 'M', 'エ'),
+ (0x32D4, 'M', 'オ'),
+ (0x32D5, 'M', 'カ'),
+ (0x32D6, 'M', 'キ'),
+ (0x32D7, 'M', 'ク'),
+ (0x32D8, 'M', 'ケ'),
+ (0x32D9, 'M', 'コ'),
+ (0x32DA, 'M', 'サ'),
+ (0x32DB, 'M', 'シ'),
+ (0x32DC, 'M', 'ス'),
+ (0x32DD, 'M', 'セ'),
+ (0x32DE, 'M', 'ソ'),
+ (0x32DF, 'M', 'タ'),
+ (0x32E0, 'M', 'チ'),
+ (0x32E1, 'M', 'ツ'),
+ (0x32E2, 'M', 'テ'),
+ (0x32E3, 'M', 'ト'),
+ (0x32E4, 'M', 'ナ'),
+ (0x32E5, 'M', 'ニ'),
+ (0x32E6, 'M', 'ヌ'),
+ (0x32E7, 'M', 'ネ'),
+ (0x32E8, 'M', 'ノ'),
+ (0x32E9, 'M', 'ハ'),
+ (0x32EA, 'M', 'ヒ'),
+ (0x32EB, 'M', 'フ'),
+ (0x32EC, 'M', 'ヘ'),
+ (0x32ED, 'M', 'ホ'),
+ (0x32EE, 'M', 'マ'),
+ (0x32EF, 'M', 'ミ'),
+ (0x32F0, 'M', 'ム'),
+ (0x32F1, 'M', 'メ'),
+ (0x32F2, 'M', 'モ'),
+ (0x32F3, 'M', 'ヤ'),
+ (0x32F4, 'M', 'ユ'),
+ (0x32F5, 'M', 'ヨ'),
+ (0x32F6, 'M', 'ラ'),
+ (0x32F7, 'M', 'リ'),
+ (0x32F8, 'M', 'ル'),
+ (0x32F9, 'M', 'レ'),
+ (0x32FA, 'M', 'ロ'),
+ (0x32FB, 'M', 'ワ'),
+ (0x32FC, 'M', 'ヰ'),
+ (0x32FD, 'M', 'ヱ'),
+ (0x32FE, 'M', 'ヲ'),
+ (0x32FF, 'M', '令和'),
+ (0x3300, 'M', 'アパート'),
+ (0x3301, 'M', 'アルファ'),
+ (0x3302, 'M', 'アンペア'),
+ (0x3303, 'M', 'アール'),
+ (0x3304, 'M', 'イニング'),
+ (0x3305, 'M', 'インチ'),
+ (0x3306, 'M', 'ウォン'),
+ (0x3307, 'M', 'エスクード'),
+ (0x3308, 'M', 'エーカー'),
+ (0x3309, 'M', 'オンス'),
+ (0x330A, 'M', 'オーム'),
+ (0x330B, 'M', 'カイリ'),
+ (0x330C, 'M', 'カラット'),
+ (0x330D, 'M', 'カロリー'),
+ (0x330E, 'M', 'ガロン'),
+ (0x330F, 'M', 'ガンマ'),
+ (0x3310, 'M', 'ギガ'),
+ (0x3311, 'M', 'ギニー'),
+ (0x3312, 'M', 'キュリー'),
+ (0x3313, 'M', 'ギルダー'),
+ (0x3314, 'M', 'キロ'),
+ (0x3315, 'M', 'キログラム'),
+ ]
+
+def _seg_33() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x3316, 'M', 'キロメートル'),
+ (0x3317, 'M', 'キロワット'),
+ (0x3318, 'M', 'グラム'),
+ (0x3319, 'M', 'グラムトン'),
+ (0x331A, 'M', 'クルゼイロ'),
+ (0x331B, 'M', 'クローネ'),
+ (0x331C, 'M', 'ケース'),
+ (0x331D, 'M', 'コルナ'),
+ (0x331E, 'M', 'コーポ'),
+ (0x331F, 'M', 'サイクル'),
+ (0x3320, 'M', 'サンチーム'),
+ (0x3321, 'M', 'シリング'),
+ (0x3322, 'M', 'センチ'),
+ (0x3323, 'M', 'セント'),
+ (0x3324, 'M', 'ダース'),
+ (0x3325, 'M', 'デシ'),
+ (0x3326, 'M', 'ドル'),
+ (0x3327, 'M', 'トン'),
+ (0x3328, 'M', 'ナノ'),
+ (0x3329, 'M', 'ノット'),
+ (0x332A, 'M', 'ハイツ'),
+ (0x332B, 'M', 'パーセント'),
+ (0x332C, 'M', 'パーツ'),
+ (0x332D, 'M', 'バーレル'),
+ (0x332E, 'M', 'ピアストル'),
+ (0x332F, 'M', 'ピクル'),
+ (0x3330, 'M', 'ピコ'),
+ (0x3331, 'M', 'ビル'),
+ (0x3332, 'M', 'ファラッド'),
+ (0x3333, 'M', 'フィート'),
+ (0x3334, 'M', 'ブッシェル'),
+ (0x3335, 'M', 'フラン'),
+ (0x3336, 'M', 'ヘクタール'),
+ (0x3337, 'M', 'ペソ'),
+ (0x3338, 'M', 'ペニヒ'),
+ (0x3339, 'M', 'ヘルツ'),
+ (0x333A, 'M', 'ペンス'),
+ (0x333B, 'M', 'ページ'),
+ (0x333C, 'M', 'ベータ'),
+ (0x333D, 'M', 'ポイント'),
+ (0x333E, 'M', 'ボルト'),
+ (0x333F, 'M', 'ホン'),
+ (0x3340, 'M', 'ポンド'),
+ (0x3341, 'M', 'ホール'),
+ (0x3342, 'M', 'ホーン'),
+ (0x3343, 'M', 'マイクロ'),
+ (0x3344, 'M', 'マイル'),
+ (0x3345, 'M', 'マッハ'),
+ (0x3346, 'M', 'マルク'),
+ (0x3347, 'M', 'マンション'),
+ (0x3348, 'M', 'ミクロン'),
+ (0x3349, 'M', 'ミリ'),
+ (0x334A, 'M', 'ミリバール'),
+ (0x334B, 'M', 'メガ'),
+ (0x334C, 'M', 'メガトン'),
+ (0x334D, 'M', 'メートル'),
+ (0x334E, 'M', 'ヤード'),
+ (0x334F, 'M', 'ヤール'),
+ (0x3350, 'M', 'ユアン'),
+ (0x3351, 'M', 'リットル'),
+ (0x3352, 'M', 'リラ'),
+ (0x3353, 'M', 'ルピー'),
+ (0x3354, 'M', 'ルーブル'),
+ (0x3355, 'M', 'レム'),
+ (0x3356, 'M', 'レントゲン'),
+ (0x3357, 'M', 'ワット'),
+ (0x3358, 'M', '0点'),
+ (0x3359, 'M', '1点'),
+ (0x335A, 'M', '2点'),
+ (0x335B, 'M', '3点'),
+ (0x335C, 'M', '4点'),
+ (0x335D, 'M', '5点'),
+ (0x335E, 'M', '6点'),
+ (0x335F, 'M', '7点'),
+ (0x3360, 'M', '8点'),
+ (0x3361, 'M', '9点'),
+ (0x3362, 'M', '10点'),
+ (0x3363, 'M', '11点'),
+ (0x3364, 'M', '12点'),
+ (0x3365, 'M', '13点'),
+ (0x3366, 'M', '14点'),
+ (0x3367, 'M', '15点'),
+ (0x3368, 'M', '16点'),
+ (0x3369, 'M', '17点'),
+ (0x336A, 'M', '18点'),
+ (0x336B, 'M', '19点'),
+ (0x336C, 'M', '20点'),
+ (0x336D, 'M', '21点'),
+ (0x336E, 'M', '22点'),
+ (0x336F, 'M', '23点'),
+ (0x3370, 'M', '24点'),
+ (0x3371, 'M', 'hpa'),
+ (0x3372, 'M', 'da'),
+ (0x3373, 'M', 'au'),
+ (0x3374, 'M', 'bar'),
+ (0x3375, 'M', 'ov'),
+ (0x3376, 'M', 'pc'),
+ (0x3377, 'M', 'dm'),
+ (0x3378, 'M', 'dm2'),
+ (0x3379, 'M', 'dm3'),
+ ]
+
+def _seg_34() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x337A, 'M', 'iu'),
+ (0x337B, 'M', '平成'),
+ (0x337C, 'M', '昭和'),
+ (0x337D, 'M', '大正'),
+ (0x337E, 'M', '明治'),
+ (0x337F, 'M', '株式会社'),
+ (0x3380, 'M', 'pa'),
+ (0x3381, 'M', 'na'),
+ (0x3382, 'M', 'μa'),
+ (0x3383, 'M', 'ma'),
+ (0x3384, 'M', 'ka'),
+ (0x3385, 'M', 'kb'),
+ (0x3386, 'M', 'mb'),
+ (0x3387, 'M', 'gb'),
+ (0x3388, 'M', 'cal'),
+ (0x3389, 'M', 'kcal'),
+ (0x338A, 'M', 'pf'),
+ (0x338B, 'M', 'nf'),
+ (0x338C, 'M', 'μf'),
+ (0x338D, 'M', 'μg'),
+ (0x338E, 'M', 'mg'),
+ (0x338F, 'M', 'kg'),
+ (0x3390, 'M', 'hz'),
+ (0x3391, 'M', 'khz'),
+ (0x3392, 'M', 'mhz'),
+ (0x3393, 'M', 'ghz'),
+ (0x3394, 'M', 'thz'),
+ (0x3395, 'M', 'μl'),
+ (0x3396, 'M', 'ml'),
+ (0x3397, 'M', 'dl'),
+ (0x3398, 'M', 'kl'),
+ (0x3399, 'M', 'fm'),
+ (0x339A, 'M', 'nm'),
+ (0x339B, 'M', 'μm'),
+ (0x339C, 'M', 'mm'),
+ (0x339D, 'M', 'cm'),
+ (0x339E, 'M', 'km'),
+ (0x339F, 'M', 'mm2'),
+ (0x33A0, 'M', 'cm2'),
+ (0x33A1, 'M', 'm2'),
+ (0x33A2, 'M', 'km2'),
+ (0x33A3, 'M', 'mm3'),
+ (0x33A4, 'M', 'cm3'),
+ (0x33A5, 'M', 'm3'),
+ (0x33A6, 'M', 'km3'),
+ (0x33A7, 'M', 'm∕s'),
+ (0x33A8, 'M', 'm∕s2'),
+ (0x33A9, 'M', 'pa'),
+ (0x33AA, 'M', 'kpa'),
+ (0x33AB, 'M', 'mpa'),
+ (0x33AC, 'M', 'gpa'),
+ (0x33AD, 'M', 'rad'),
+ (0x33AE, 'M', 'rad∕s'),
+ (0x33AF, 'M', 'rad∕s2'),
+ (0x33B0, 'M', 'ps'),
+ (0x33B1, 'M', 'ns'),
+ (0x33B2, 'M', 'μs'),
+ (0x33B3, 'M', 'ms'),
+ (0x33B4, 'M', 'pv'),
+ (0x33B5, 'M', 'nv'),
+ (0x33B6, 'M', 'μv'),
+ (0x33B7, 'M', 'mv'),
+ (0x33B8, 'M', 'kv'),
+ (0x33B9, 'M', 'mv'),
+ (0x33BA, 'M', 'pw'),
+ (0x33BB, 'M', 'nw'),
+ (0x33BC, 'M', 'μw'),
+ (0x33BD, 'M', 'mw'),
+ (0x33BE, 'M', 'kw'),
+ (0x33BF, 'M', 'mw'),
+ (0x33C0, 'M', 'kω'),
+ (0x33C1, 'M', 'mω'),
+ (0x33C2, 'X'),
+ (0x33C3, 'M', 'bq'),
+ (0x33C4, 'M', 'cc'),
+ (0x33C5, 'M', 'cd'),
+ (0x33C6, 'M', 'c∕kg'),
+ (0x33C7, 'X'),
+ (0x33C8, 'M', 'db'),
+ (0x33C9, 'M', 'gy'),
+ (0x33CA, 'M', 'ha'),
+ (0x33CB, 'M', 'hp'),
+ (0x33CC, 'M', 'in'),
+ (0x33CD, 'M', 'kk'),
+ (0x33CE, 'M', 'km'),
+ (0x33CF, 'M', 'kt'),
+ (0x33D0, 'M', 'lm'),
+ (0x33D1, 'M', 'ln'),
+ (0x33D2, 'M', 'log'),
+ (0x33D3, 'M', 'lx'),
+ (0x33D4, 'M', 'mb'),
+ (0x33D5, 'M', 'mil'),
+ (0x33D6, 'M', 'mol'),
+ (0x33D7, 'M', 'ph'),
+ (0x33D8, 'X'),
+ (0x33D9, 'M', 'ppm'),
+ (0x33DA, 'M', 'pr'),
+ (0x33DB, 'M', 'sr'),
+ (0x33DC, 'M', 'sv'),
+ (0x33DD, 'M', 'wb'),
+ ]
+
+def _seg_35() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x33DE, 'M', 'v∕m'),
+ (0x33DF, 'M', 'a∕m'),
+ (0x33E0, 'M', '1日'),
+ (0x33E1, 'M', '2日'),
+ (0x33E2, 'M', '3日'),
+ (0x33E3, 'M', '4日'),
+ (0x33E4, 'M', '5日'),
+ (0x33E5, 'M', '6日'),
+ (0x33E6, 'M', '7日'),
+ (0x33E7, 'M', '8日'),
+ (0x33E8, 'M', '9日'),
+ (0x33E9, 'M', '10日'),
+ (0x33EA, 'M', '11日'),
+ (0x33EB, 'M', '12日'),
+ (0x33EC, 'M', '13日'),
+ (0x33ED, 'M', '14日'),
+ (0x33EE, 'M', '15日'),
+ (0x33EF, 'M', '16日'),
+ (0x33F0, 'M', '17日'),
+ (0x33F1, 'M', '18日'),
+ (0x33F2, 'M', '19日'),
+ (0x33F3, 'M', '20日'),
+ (0x33F4, 'M', '21日'),
+ (0x33F5, 'M', '22日'),
+ (0x33F6, 'M', '23日'),
+ (0x33F7, 'M', '24日'),
+ (0x33F8, 'M', '25日'),
+ (0x33F9, 'M', '26日'),
+ (0x33FA, 'M', '27日'),
+ (0x33FB, 'M', '28日'),
+ (0x33FC, 'M', '29日'),
+ (0x33FD, 'M', '30日'),
+ (0x33FE, 'M', '31日'),
+ (0x33FF, 'M', 'gal'),
+ (0x3400, 'V'),
+ (0xA48D, 'X'),
+ (0xA490, 'V'),
+ (0xA4C7, 'X'),
+ (0xA4D0, 'V'),
+ (0xA62C, 'X'),
+ (0xA640, 'M', 'ꙁ'),
+ (0xA641, 'V'),
+ (0xA642, 'M', 'ꙃ'),
+ (0xA643, 'V'),
+ (0xA644, 'M', 'ꙅ'),
+ (0xA645, 'V'),
+ (0xA646, 'M', 'ꙇ'),
+ (0xA647, 'V'),
+ (0xA648, 'M', 'ꙉ'),
+ (0xA649, 'V'),
+ (0xA64A, 'M', 'ꙋ'),
+ (0xA64B, 'V'),
+ (0xA64C, 'M', 'ꙍ'),
+ (0xA64D, 'V'),
+ (0xA64E, 'M', 'ꙏ'),
+ (0xA64F, 'V'),
+ (0xA650, 'M', 'ꙑ'),
+ (0xA651, 'V'),
+ (0xA652, 'M', 'ꙓ'),
+ (0xA653, 'V'),
+ (0xA654, 'M', 'ꙕ'),
+ (0xA655, 'V'),
+ (0xA656, 'M', 'ꙗ'),
+ (0xA657, 'V'),
+ (0xA658, 'M', 'ꙙ'),
+ (0xA659, 'V'),
+ (0xA65A, 'M', 'ꙛ'),
+ (0xA65B, 'V'),
+ (0xA65C, 'M', 'ꙝ'),
+ (0xA65D, 'V'),
+ (0xA65E, 'M', 'ꙟ'),
+ (0xA65F, 'V'),
+ (0xA660, 'M', 'ꙡ'),
+ (0xA661, 'V'),
+ (0xA662, 'M', 'ꙣ'),
+ (0xA663, 'V'),
+ (0xA664, 'M', 'ꙥ'),
+ (0xA665, 'V'),
+ (0xA666, 'M', 'ꙧ'),
+ (0xA667, 'V'),
+ (0xA668, 'M', 'ꙩ'),
+ (0xA669, 'V'),
+ (0xA66A, 'M', 'ꙫ'),
+ (0xA66B, 'V'),
+ (0xA66C, 'M', 'ꙭ'),
+ (0xA66D, 'V'),
+ (0xA680, 'M', 'ꚁ'),
+ (0xA681, 'V'),
+ (0xA682, 'M', 'ꚃ'),
+ (0xA683, 'V'),
+ (0xA684, 'M', 'ꚅ'),
+ (0xA685, 'V'),
+ (0xA686, 'M', 'ꚇ'),
+ (0xA687, 'V'),
+ (0xA688, 'M', 'ꚉ'),
+ (0xA689, 'V'),
+ (0xA68A, 'M', 'ꚋ'),
+ (0xA68B, 'V'),
+ (0xA68C, 'M', 'ꚍ'),
+ (0xA68D, 'V'),
+ ]
+
+def _seg_36() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xA68E, 'M', 'ꚏ'),
+ (0xA68F, 'V'),
+ (0xA690, 'M', 'ꚑ'),
+ (0xA691, 'V'),
+ (0xA692, 'M', 'ꚓ'),
+ (0xA693, 'V'),
+ (0xA694, 'M', 'ꚕ'),
+ (0xA695, 'V'),
+ (0xA696, 'M', 'ꚗ'),
+ (0xA697, 'V'),
+ (0xA698, 'M', 'ꚙ'),
+ (0xA699, 'V'),
+ (0xA69A, 'M', 'ꚛ'),
+ (0xA69B, 'V'),
+ (0xA69C, 'M', 'ъ'),
+ (0xA69D, 'M', 'ь'),
+ (0xA69E, 'V'),
+ (0xA6F8, 'X'),
+ (0xA700, 'V'),
+ (0xA722, 'M', 'ꜣ'),
+ (0xA723, 'V'),
+ (0xA724, 'M', 'ꜥ'),
+ (0xA725, 'V'),
+ (0xA726, 'M', 'ꜧ'),
+ (0xA727, 'V'),
+ (0xA728, 'M', 'ꜩ'),
+ (0xA729, 'V'),
+ (0xA72A, 'M', 'ꜫ'),
+ (0xA72B, 'V'),
+ (0xA72C, 'M', 'ꜭ'),
+ (0xA72D, 'V'),
+ (0xA72E, 'M', 'ꜯ'),
+ (0xA72F, 'V'),
+ (0xA732, 'M', 'ꜳ'),
+ (0xA733, 'V'),
+ (0xA734, 'M', 'ꜵ'),
+ (0xA735, 'V'),
+ (0xA736, 'M', 'ꜷ'),
+ (0xA737, 'V'),
+ (0xA738, 'M', 'ꜹ'),
+ (0xA739, 'V'),
+ (0xA73A, 'M', 'ꜻ'),
+ (0xA73B, 'V'),
+ (0xA73C, 'M', 'ꜽ'),
+ (0xA73D, 'V'),
+ (0xA73E, 'M', 'ꜿ'),
+ (0xA73F, 'V'),
+ (0xA740, 'M', 'ꝁ'),
+ (0xA741, 'V'),
+ (0xA742, 'M', 'ꝃ'),
+ (0xA743, 'V'),
+ (0xA744, 'M', 'ꝅ'),
+ (0xA745, 'V'),
+ (0xA746, 'M', 'ꝇ'),
+ (0xA747, 'V'),
+ (0xA748, 'M', 'ꝉ'),
+ (0xA749, 'V'),
+ (0xA74A, 'M', 'ꝋ'),
+ (0xA74B, 'V'),
+ (0xA74C, 'M', 'ꝍ'),
+ (0xA74D, 'V'),
+ (0xA74E, 'M', 'ꝏ'),
+ (0xA74F, 'V'),
+ (0xA750, 'M', 'ꝑ'),
+ (0xA751, 'V'),
+ (0xA752, 'M', 'ꝓ'),
+ (0xA753, 'V'),
+ (0xA754, 'M', 'ꝕ'),
+ (0xA755, 'V'),
+ (0xA756, 'M', 'ꝗ'),
+ (0xA757, 'V'),
+ (0xA758, 'M', 'ꝙ'),
+ (0xA759, 'V'),
+ (0xA75A, 'M', 'ꝛ'),
+ (0xA75B, 'V'),
+ (0xA75C, 'M', 'ꝝ'),
+ (0xA75D, 'V'),
+ (0xA75E, 'M', 'ꝟ'),
+ (0xA75F, 'V'),
+ (0xA760, 'M', 'ꝡ'),
+ (0xA761, 'V'),
+ (0xA762, 'M', 'ꝣ'),
+ (0xA763, 'V'),
+ (0xA764, 'M', 'ꝥ'),
+ (0xA765, 'V'),
+ (0xA766, 'M', 'ꝧ'),
+ (0xA767, 'V'),
+ (0xA768, 'M', 'ꝩ'),
+ (0xA769, 'V'),
+ (0xA76A, 'M', 'ꝫ'),
+ (0xA76B, 'V'),
+ (0xA76C, 'M', 'ꝭ'),
+ (0xA76D, 'V'),
+ (0xA76E, 'M', 'ꝯ'),
+ (0xA76F, 'V'),
+ (0xA770, 'M', 'ꝯ'),
+ (0xA771, 'V'),
+ (0xA779, 'M', 'ꝺ'),
+ (0xA77A, 'V'),
+ (0xA77B, 'M', 'ꝼ'),
+ ]
+
+def _seg_37() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xA77C, 'V'),
+ (0xA77D, 'M', 'ᵹ'),
+ (0xA77E, 'M', 'ꝿ'),
+ (0xA77F, 'V'),
+ (0xA780, 'M', 'ꞁ'),
+ (0xA781, 'V'),
+ (0xA782, 'M', 'ꞃ'),
+ (0xA783, 'V'),
+ (0xA784, 'M', 'ꞅ'),
+ (0xA785, 'V'),
+ (0xA786, 'M', 'ꞇ'),
+ (0xA787, 'V'),
+ (0xA78B, 'M', 'ꞌ'),
+ (0xA78C, 'V'),
+ (0xA78D, 'M', 'ɥ'),
+ (0xA78E, 'V'),
+ (0xA790, 'M', 'ꞑ'),
+ (0xA791, 'V'),
+ (0xA792, 'M', 'ꞓ'),
+ (0xA793, 'V'),
+ (0xA796, 'M', 'ꞗ'),
+ (0xA797, 'V'),
+ (0xA798, 'M', 'ꞙ'),
+ (0xA799, 'V'),
+ (0xA79A, 'M', 'ꞛ'),
+ (0xA79B, 'V'),
+ (0xA79C, 'M', 'ꞝ'),
+ (0xA79D, 'V'),
+ (0xA79E, 'M', 'ꞟ'),
+ (0xA79F, 'V'),
+ (0xA7A0, 'M', 'ꞡ'),
+ (0xA7A1, 'V'),
+ (0xA7A2, 'M', 'ꞣ'),
+ (0xA7A3, 'V'),
+ (0xA7A4, 'M', 'ꞥ'),
+ (0xA7A5, 'V'),
+ (0xA7A6, 'M', 'ꞧ'),
+ (0xA7A7, 'V'),
+ (0xA7A8, 'M', 'ꞩ'),
+ (0xA7A9, 'V'),
+ (0xA7AA, 'M', 'ɦ'),
+ (0xA7AB, 'M', 'ɜ'),
+ (0xA7AC, 'M', 'ɡ'),
+ (0xA7AD, 'M', 'ɬ'),
+ (0xA7AE, 'M', 'ɪ'),
+ (0xA7AF, 'V'),
+ (0xA7B0, 'M', 'ʞ'),
+ (0xA7B1, 'M', 'ʇ'),
+ (0xA7B2, 'M', 'ʝ'),
+ (0xA7B3, 'M', 'ꭓ'),
+ (0xA7B4, 'M', 'ꞵ'),
+ (0xA7B5, 'V'),
+ (0xA7B6, 'M', 'ꞷ'),
+ (0xA7B7, 'V'),
+ (0xA7B8, 'M', 'ꞹ'),
+ (0xA7B9, 'V'),
+ (0xA7BA, 'M', 'ꞻ'),
+ (0xA7BB, 'V'),
+ (0xA7BC, 'M', 'ꞽ'),
+ (0xA7BD, 'V'),
+ (0xA7BE, 'M', 'ꞿ'),
+ (0xA7BF, 'V'),
+ (0xA7C0, 'M', 'ꟁ'),
+ (0xA7C1, 'V'),
+ (0xA7C2, 'M', 'ꟃ'),
+ (0xA7C3, 'V'),
+ (0xA7C4, 'M', 'ꞔ'),
+ (0xA7C5, 'M', 'ʂ'),
+ (0xA7C6, 'M', 'ᶎ'),
+ (0xA7C7, 'M', 'ꟈ'),
+ (0xA7C8, 'V'),
+ (0xA7C9, 'M', 'ꟊ'),
+ (0xA7CA, 'V'),
+ (0xA7CB, 'X'),
+ (0xA7D0, 'M', 'ꟑ'),
+ (0xA7D1, 'V'),
+ (0xA7D2, 'X'),
+ (0xA7D3, 'V'),
+ (0xA7D4, 'X'),
+ (0xA7D5, 'V'),
+ (0xA7D6, 'M', 'ꟗ'),
+ (0xA7D7, 'V'),
+ (0xA7D8, 'M', 'ꟙ'),
+ (0xA7D9, 'V'),
+ (0xA7DA, 'X'),
+ (0xA7F2, 'M', 'c'),
+ (0xA7F3, 'M', 'f'),
+ (0xA7F4, 'M', 'q'),
+ (0xA7F5, 'M', 'ꟶ'),
+ (0xA7F6, 'V'),
+ (0xA7F8, 'M', 'ħ'),
+ (0xA7F9, 'M', 'œ'),
+ (0xA7FA, 'V'),
+ (0xA82D, 'X'),
+ (0xA830, 'V'),
+ (0xA83A, 'X'),
+ (0xA840, 'V'),
+ (0xA878, 'X'),
+ (0xA880, 'V'),
+ (0xA8C6, 'X'),
+ ]
+
+def _seg_38() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xA8CE, 'V'),
+ (0xA8DA, 'X'),
+ (0xA8E0, 'V'),
+ (0xA954, 'X'),
+ (0xA95F, 'V'),
+ (0xA97D, 'X'),
+ (0xA980, 'V'),
+ (0xA9CE, 'X'),
+ (0xA9CF, 'V'),
+ (0xA9DA, 'X'),
+ (0xA9DE, 'V'),
+ (0xA9FF, 'X'),
+ (0xAA00, 'V'),
+ (0xAA37, 'X'),
+ (0xAA40, 'V'),
+ (0xAA4E, 'X'),
+ (0xAA50, 'V'),
+ (0xAA5A, 'X'),
+ (0xAA5C, 'V'),
+ (0xAAC3, 'X'),
+ (0xAADB, 'V'),
+ (0xAAF7, 'X'),
+ (0xAB01, 'V'),
+ (0xAB07, 'X'),
+ (0xAB09, 'V'),
+ (0xAB0F, 'X'),
+ (0xAB11, 'V'),
+ (0xAB17, 'X'),
+ (0xAB20, 'V'),
+ (0xAB27, 'X'),
+ (0xAB28, 'V'),
+ (0xAB2F, 'X'),
+ (0xAB30, 'V'),
+ (0xAB5C, 'M', 'ꜧ'),
+ (0xAB5D, 'M', 'ꬷ'),
+ (0xAB5E, 'M', 'ɫ'),
+ (0xAB5F, 'M', 'ꭒ'),
+ (0xAB60, 'V'),
+ (0xAB69, 'M', 'ʍ'),
+ (0xAB6A, 'V'),
+ (0xAB6C, 'X'),
+ (0xAB70, 'M', 'Ꭰ'),
+ (0xAB71, 'M', 'Ꭱ'),
+ (0xAB72, 'M', 'Ꭲ'),
+ (0xAB73, 'M', 'Ꭳ'),
+ (0xAB74, 'M', 'Ꭴ'),
+ (0xAB75, 'M', 'Ꭵ'),
+ (0xAB76, 'M', 'Ꭶ'),
+ (0xAB77, 'M', 'Ꭷ'),
+ (0xAB78, 'M', 'Ꭸ'),
+ (0xAB79, 'M', 'Ꭹ'),
+ (0xAB7A, 'M', 'Ꭺ'),
+ (0xAB7B, 'M', 'Ꭻ'),
+ (0xAB7C, 'M', 'Ꭼ'),
+ (0xAB7D, 'M', 'Ꭽ'),
+ (0xAB7E, 'M', 'Ꭾ'),
+ (0xAB7F, 'M', 'Ꭿ'),
+ (0xAB80, 'M', 'Ꮀ'),
+ (0xAB81, 'M', 'Ꮁ'),
+ (0xAB82, 'M', 'Ꮂ'),
+ (0xAB83, 'M', 'Ꮃ'),
+ (0xAB84, 'M', 'Ꮄ'),
+ (0xAB85, 'M', 'Ꮅ'),
+ (0xAB86, 'M', 'Ꮆ'),
+ (0xAB87, 'M', 'Ꮇ'),
+ (0xAB88, 'M', 'Ꮈ'),
+ (0xAB89, 'M', 'Ꮉ'),
+ (0xAB8A, 'M', 'Ꮊ'),
+ (0xAB8B, 'M', 'Ꮋ'),
+ (0xAB8C, 'M', 'Ꮌ'),
+ (0xAB8D, 'M', 'Ꮍ'),
+ (0xAB8E, 'M', 'Ꮎ'),
+ (0xAB8F, 'M', 'Ꮏ'),
+ (0xAB90, 'M', 'Ꮐ'),
+ (0xAB91, 'M', 'Ꮑ'),
+ (0xAB92, 'M', 'Ꮒ'),
+ (0xAB93, 'M', 'Ꮓ'),
+ (0xAB94, 'M', 'Ꮔ'),
+ (0xAB95, 'M', 'Ꮕ'),
+ (0xAB96, 'M', 'Ꮖ'),
+ (0xAB97, 'M', 'Ꮗ'),
+ (0xAB98, 'M', 'Ꮘ'),
+ (0xAB99, 'M', 'Ꮙ'),
+ (0xAB9A, 'M', 'Ꮚ'),
+ (0xAB9B, 'M', 'Ꮛ'),
+ (0xAB9C, 'M', 'Ꮜ'),
+ (0xAB9D, 'M', 'Ꮝ'),
+ (0xAB9E, 'M', 'Ꮞ'),
+ (0xAB9F, 'M', 'Ꮟ'),
+ (0xABA0, 'M', 'Ꮠ'),
+ (0xABA1, 'M', 'Ꮡ'),
+ (0xABA2, 'M', 'Ꮢ'),
+ (0xABA3, 'M', 'Ꮣ'),
+ (0xABA4, 'M', 'Ꮤ'),
+ (0xABA5, 'M', 'Ꮥ'),
+ (0xABA6, 'M', 'Ꮦ'),
+ (0xABA7, 'M', 'Ꮧ'),
+ (0xABA8, 'M', 'Ꮨ'),
+ (0xABA9, 'M', 'Ꮩ'),
+ (0xABAA, 'M', 'Ꮪ'),
+ ]
+
+def _seg_39() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xABAB, 'M', 'Ꮫ'),
+ (0xABAC, 'M', 'Ꮬ'),
+ (0xABAD, 'M', 'Ꮭ'),
+ (0xABAE, 'M', 'Ꮮ'),
+ (0xABAF, 'M', 'Ꮯ'),
+ (0xABB0, 'M', 'Ꮰ'),
+ (0xABB1, 'M', 'Ꮱ'),
+ (0xABB2, 'M', 'Ꮲ'),
+ (0xABB3, 'M', 'Ꮳ'),
+ (0xABB4, 'M', 'Ꮴ'),
+ (0xABB5, 'M', 'Ꮵ'),
+ (0xABB6, 'M', 'Ꮶ'),
+ (0xABB7, 'M', 'Ꮷ'),
+ (0xABB8, 'M', 'Ꮸ'),
+ (0xABB9, 'M', 'Ꮹ'),
+ (0xABBA, 'M', 'Ꮺ'),
+ (0xABBB, 'M', 'Ꮻ'),
+ (0xABBC, 'M', 'Ꮼ'),
+ (0xABBD, 'M', 'Ꮽ'),
+ (0xABBE, 'M', 'Ꮾ'),
+ (0xABBF, 'M', 'Ꮿ'),
+ (0xABC0, 'V'),
+ (0xABEE, 'X'),
+ (0xABF0, 'V'),
+ (0xABFA, 'X'),
+ (0xAC00, 'V'),
+ (0xD7A4, 'X'),
+ (0xD7B0, 'V'),
+ (0xD7C7, 'X'),
+ (0xD7CB, 'V'),
+ (0xD7FC, 'X'),
+ (0xF900, 'M', '豈'),
+ (0xF901, 'M', '更'),
+ (0xF902, 'M', '車'),
+ (0xF903, 'M', '賈'),
+ (0xF904, 'M', '滑'),
+ (0xF905, 'M', '串'),
+ (0xF906, 'M', '句'),
+ (0xF907, 'M', '龜'),
+ (0xF909, 'M', '契'),
+ (0xF90A, 'M', '金'),
+ (0xF90B, 'M', '喇'),
+ (0xF90C, 'M', '奈'),
+ (0xF90D, 'M', '懶'),
+ (0xF90E, 'M', '癩'),
+ (0xF90F, 'M', '羅'),
+ (0xF910, 'M', '蘿'),
+ (0xF911, 'M', '螺'),
+ (0xF912, 'M', '裸'),
+ (0xF913, 'M', '邏'),
+ (0xF914, 'M', '樂'),
+ (0xF915, 'M', '洛'),
+ (0xF916, 'M', '烙'),
+ (0xF917, 'M', '珞'),
+ (0xF918, 'M', '落'),
+ (0xF919, 'M', '酪'),
+ (0xF91A, 'M', '駱'),
+ (0xF91B, 'M', '亂'),
+ (0xF91C, 'M', '卵'),
+ (0xF91D, 'M', '欄'),
+ (0xF91E, 'M', '爛'),
+ (0xF91F, 'M', '蘭'),
+ (0xF920, 'M', '鸞'),
+ (0xF921, 'M', '嵐'),
+ (0xF922, 'M', '濫'),
+ (0xF923, 'M', '藍'),
+ (0xF924, 'M', '襤'),
+ (0xF925, 'M', '拉'),
+ (0xF926, 'M', '臘'),
+ (0xF927, 'M', '蠟'),
+ (0xF928, 'M', '廊'),
+ (0xF929, 'M', '朗'),
+ (0xF92A, 'M', '浪'),
+ (0xF92B, 'M', '狼'),
+ (0xF92C, 'M', '郎'),
+ (0xF92D, 'M', '來'),
+ (0xF92E, 'M', '冷'),
+ (0xF92F, 'M', '勞'),
+ (0xF930, 'M', '擄'),
+ (0xF931, 'M', '櫓'),
+ (0xF932, 'M', '爐'),
+ (0xF933, 'M', '盧'),
+ (0xF934, 'M', '老'),
+ (0xF935, 'M', '蘆'),
+ (0xF936, 'M', '虜'),
+ (0xF937, 'M', '路'),
+ (0xF938, 'M', '露'),
+ (0xF939, 'M', '魯'),
+ (0xF93A, 'M', '鷺'),
+ (0xF93B, 'M', '碌'),
+ (0xF93C, 'M', '祿'),
+ (0xF93D, 'M', '綠'),
+ (0xF93E, 'M', '菉'),
+ (0xF93F, 'M', '錄'),
+ (0xF940, 'M', '鹿'),
+ (0xF941, 'M', '論'),
+ (0xF942, 'M', '壟'),
+ (0xF943, 'M', '弄'),
+ (0xF944, 'M', '籠'),
+ (0xF945, 'M', '聾'),
+ ]
+
+def _seg_40() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xF946, 'M', '牢'),
+ (0xF947, 'M', '磊'),
+ (0xF948, 'M', '賂'),
+ (0xF949, 'M', '雷'),
+ (0xF94A, 'M', '壘'),
+ (0xF94B, 'M', '屢'),
+ (0xF94C, 'M', '樓'),
+ (0xF94D, 'M', '淚'),
+ (0xF94E, 'M', '漏'),
+ (0xF94F, 'M', '累'),
+ (0xF950, 'M', '縷'),
+ (0xF951, 'M', '陋'),
+ (0xF952, 'M', '勒'),
+ (0xF953, 'M', '肋'),
+ (0xF954, 'M', '凜'),
+ (0xF955, 'M', '凌'),
+ (0xF956, 'M', '稜'),
+ (0xF957, 'M', '綾'),
+ (0xF958, 'M', '菱'),
+ (0xF959, 'M', '陵'),
+ (0xF95A, 'M', '讀'),
+ (0xF95B, 'M', '拏'),
+ (0xF95C, 'M', '樂'),
+ (0xF95D, 'M', '諾'),
+ (0xF95E, 'M', '丹'),
+ (0xF95F, 'M', '寧'),
+ (0xF960, 'M', '怒'),
+ (0xF961, 'M', '率'),
+ (0xF962, 'M', '異'),
+ (0xF963, 'M', '北'),
+ (0xF964, 'M', '磻'),
+ (0xF965, 'M', '便'),
+ (0xF966, 'M', '復'),
+ (0xF967, 'M', '不'),
+ (0xF968, 'M', '泌'),
+ (0xF969, 'M', '數'),
+ (0xF96A, 'M', '索'),
+ (0xF96B, 'M', '參'),
+ (0xF96C, 'M', '塞'),
+ (0xF96D, 'M', '省'),
+ (0xF96E, 'M', '葉'),
+ (0xF96F, 'M', '說'),
+ (0xF970, 'M', '殺'),
+ (0xF971, 'M', '辰'),
+ (0xF972, 'M', '沈'),
+ (0xF973, 'M', '拾'),
+ (0xF974, 'M', '若'),
+ (0xF975, 'M', '掠'),
+ (0xF976, 'M', '略'),
+ (0xF977, 'M', '亮'),
+ (0xF978, 'M', '兩'),
+ (0xF979, 'M', '凉'),
+ (0xF97A, 'M', '梁'),
+ (0xF97B, 'M', '糧'),
+ (0xF97C, 'M', '良'),
+ (0xF97D, 'M', '諒'),
+ (0xF97E, 'M', '量'),
+ (0xF97F, 'M', '勵'),
+ (0xF980, 'M', '呂'),
+ (0xF981, 'M', '女'),
+ (0xF982, 'M', '廬'),
+ (0xF983, 'M', '旅'),
+ (0xF984, 'M', '濾'),
+ (0xF985, 'M', '礪'),
+ (0xF986, 'M', '閭'),
+ (0xF987, 'M', '驪'),
+ (0xF988, 'M', '麗'),
+ (0xF989, 'M', '黎'),
+ (0xF98A, 'M', '力'),
+ (0xF98B, 'M', '曆'),
+ (0xF98C, 'M', '歷'),
+ (0xF98D, 'M', '轢'),
+ (0xF98E, 'M', '年'),
+ (0xF98F, 'M', '憐'),
+ (0xF990, 'M', '戀'),
+ (0xF991, 'M', '撚'),
+ (0xF992, 'M', '漣'),
+ (0xF993, 'M', '煉'),
+ (0xF994, 'M', '璉'),
+ (0xF995, 'M', '秊'),
+ (0xF996, 'M', '練'),
+ (0xF997, 'M', '聯'),
+ (0xF998, 'M', '輦'),
+ (0xF999, 'M', '蓮'),
+ (0xF99A, 'M', '連'),
+ (0xF99B, 'M', '鍊'),
+ (0xF99C, 'M', '列'),
+ (0xF99D, 'M', '劣'),
+ (0xF99E, 'M', '咽'),
+ (0xF99F, 'M', '烈'),
+ (0xF9A0, 'M', '裂'),
+ (0xF9A1, 'M', '說'),
+ (0xF9A2, 'M', '廉'),
+ (0xF9A3, 'M', '念'),
+ (0xF9A4, 'M', '捻'),
+ (0xF9A5, 'M', '殮'),
+ (0xF9A6, 'M', '簾'),
+ (0xF9A7, 'M', '獵'),
+ (0xF9A8, 'M', '令'),
+ (0xF9A9, 'M', '囹'),
+ ]
+
+def _seg_41() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xF9AA, 'M', '寧'),
+ (0xF9AB, 'M', '嶺'),
+ (0xF9AC, 'M', '怜'),
+ (0xF9AD, 'M', '玲'),
+ (0xF9AE, 'M', '瑩'),
+ (0xF9AF, 'M', '羚'),
+ (0xF9B0, 'M', '聆'),
+ (0xF9B1, 'M', '鈴'),
+ (0xF9B2, 'M', '零'),
+ (0xF9B3, 'M', '靈'),
+ (0xF9B4, 'M', '領'),
+ (0xF9B5, 'M', '例'),
+ (0xF9B6, 'M', '禮'),
+ (0xF9B7, 'M', '醴'),
+ (0xF9B8, 'M', '隸'),
+ (0xF9B9, 'M', '惡'),
+ (0xF9BA, 'M', '了'),
+ (0xF9BB, 'M', '僚'),
+ (0xF9BC, 'M', '寮'),
+ (0xF9BD, 'M', '尿'),
+ (0xF9BE, 'M', '料'),
+ (0xF9BF, 'M', '樂'),
+ (0xF9C0, 'M', '燎'),
+ (0xF9C1, 'M', '療'),
+ (0xF9C2, 'M', '蓼'),
+ (0xF9C3, 'M', '遼'),
+ (0xF9C4, 'M', '龍'),
+ (0xF9C5, 'M', '暈'),
+ (0xF9C6, 'M', '阮'),
+ (0xF9C7, 'M', '劉'),
+ (0xF9C8, 'M', '杻'),
+ (0xF9C9, 'M', '柳'),
+ (0xF9CA, 'M', '流'),
+ (0xF9CB, 'M', '溜'),
+ (0xF9CC, 'M', '琉'),
+ (0xF9CD, 'M', '留'),
+ (0xF9CE, 'M', '硫'),
+ (0xF9CF, 'M', '紐'),
+ (0xF9D0, 'M', '類'),
+ (0xF9D1, 'M', '六'),
+ (0xF9D2, 'M', '戮'),
+ (0xF9D3, 'M', '陸'),
+ (0xF9D4, 'M', '倫'),
+ (0xF9D5, 'M', '崙'),
+ (0xF9D6, 'M', '淪'),
+ (0xF9D7, 'M', '輪'),
+ (0xF9D8, 'M', '律'),
+ (0xF9D9, 'M', '慄'),
+ (0xF9DA, 'M', '栗'),
+ (0xF9DB, 'M', '率'),
+ (0xF9DC, 'M', '隆'),
+ (0xF9DD, 'M', '利'),
+ (0xF9DE, 'M', '吏'),
+ (0xF9DF, 'M', '履'),
+ (0xF9E0, 'M', '易'),
+ (0xF9E1, 'M', '李'),
+ (0xF9E2, 'M', '梨'),
+ (0xF9E3, 'M', '泥'),
+ (0xF9E4, 'M', '理'),
+ (0xF9E5, 'M', '痢'),
+ (0xF9E6, 'M', '罹'),
+ (0xF9E7, 'M', '裏'),
+ (0xF9E8, 'M', '裡'),
+ (0xF9E9, 'M', '里'),
+ (0xF9EA, 'M', '離'),
+ (0xF9EB, 'M', '匿'),
+ (0xF9EC, 'M', '溺'),
+ (0xF9ED, 'M', '吝'),
+ (0xF9EE, 'M', '燐'),
+ (0xF9EF, 'M', '璘'),
+ (0xF9F0, 'M', '藺'),
+ (0xF9F1, 'M', '隣'),
+ (0xF9F2, 'M', '鱗'),
+ (0xF9F3, 'M', '麟'),
+ (0xF9F4, 'M', '林'),
+ (0xF9F5, 'M', '淋'),
+ (0xF9F6, 'M', '臨'),
+ (0xF9F7, 'M', '立'),
+ (0xF9F8, 'M', '笠'),
+ (0xF9F9, 'M', '粒'),
+ (0xF9FA, 'M', '狀'),
+ (0xF9FB, 'M', '炙'),
+ (0xF9FC, 'M', '識'),
+ (0xF9FD, 'M', '什'),
+ (0xF9FE, 'M', '茶'),
+ (0xF9FF, 'M', '刺'),
+ (0xFA00, 'M', '切'),
+ (0xFA01, 'M', '度'),
+ (0xFA02, 'M', '拓'),
+ (0xFA03, 'M', '糖'),
+ (0xFA04, 'M', '宅'),
+ (0xFA05, 'M', '洞'),
+ (0xFA06, 'M', '暴'),
+ (0xFA07, 'M', '輻'),
+ (0xFA08, 'M', '行'),
+ (0xFA09, 'M', '降'),
+ (0xFA0A, 'M', '見'),
+ (0xFA0B, 'M', '廓'),
+ (0xFA0C, 'M', '兀'),
+ (0xFA0D, 'M', '嗀'),
+ ]
+
+def _seg_42() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xFA0E, 'V'),
+ (0xFA10, 'M', '塚'),
+ (0xFA11, 'V'),
+ (0xFA12, 'M', '晴'),
+ (0xFA13, 'V'),
+ (0xFA15, 'M', '凞'),
+ (0xFA16, 'M', '猪'),
+ (0xFA17, 'M', '益'),
+ (0xFA18, 'M', '礼'),
+ (0xFA19, 'M', '神'),
+ (0xFA1A, 'M', '祥'),
+ (0xFA1B, 'M', '福'),
+ (0xFA1C, 'M', '靖'),
+ (0xFA1D, 'M', '精'),
+ (0xFA1E, 'M', '羽'),
+ (0xFA1F, 'V'),
+ (0xFA20, 'M', '蘒'),
+ (0xFA21, 'V'),
+ (0xFA22, 'M', '諸'),
+ (0xFA23, 'V'),
+ (0xFA25, 'M', '逸'),
+ (0xFA26, 'M', '都'),
+ (0xFA27, 'V'),
+ (0xFA2A, 'M', '飯'),
+ (0xFA2B, 'M', '飼'),
+ (0xFA2C, 'M', '館'),
+ (0xFA2D, 'M', '鶴'),
+ (0xFA2E, 'M', '郞'),
+ (0xFA2F, 'M', '隷'),
+ (0xFA30, 'M', '侮'),
+ (0xFA31, 'M', '僧'),
+ (0xFA32, 'M', '免'),
+ (0xFA33, 'M', '勉'),
+ (0xFA34, 'M', '勤'),
+ (0xFA35, 'M', '卑'),
+ (0xFA36, 'M', '喝'),
+ (0xFA37, 'M', '嘆'),
+ (0xFA38, 'M', '器'),
+ (0xFA39, 'M', '塀'),
+ (0xFA3A, 'M', '墨'),
+ (0xFA3B, 'M', '層'),
+ (0xFA3C, 'M', '屮'),
+ (0xFA3D, 'M', '悔'),
+ (0xFA3E, 'M', '慨'),
+ (0xFA3F, 'M', '憎'),
+ (0xFA40, 'M', '懲'),
+ (0xFA41, 'M', '敏'),
+ (0xFA42, 'M', '既'),
+ (0xFA43, 'M', '暑'),
+ (0xFA44, 'M', '梅'),
+ (0xFA45, 'M', '海'),
+ (0xFA46, 'M', '渚'),
+ (0xFA47, 'M', '漢'),
+ (0xFA48, 'M', '煮'),
+ (0xFA49, 'M', '爫'),
+ (0xFA4A, 'M', '琢'),
+ (0xFA4B, 'M', '碑'),
+ (0xFA4C, 'M', '社'),
+ (0xFA4D, 'M', '祉'),
+ (0xFA4E, 'M', '祈'),
+ (0xFA4F, 'M', '祐'),
+ (0xFA50, 'M', '祖'),
+ (0xFA51, 'M', '祝'),
+ (0xFA52, 'M', '禍'),
+ (0xFA53, 'M', '禎'),
+ (0xFA54, 'M', '穀'),
+ (0xFA55, 'M', '突'),
+ (0xFA56, 'M', '節'),
+ (0xFA57, 'M', '練'),
+ (0xFA58, 'M', '縉'),
+ (0xFA59, 'M', '繁'),
+ (0xFA5A, 'M', '署'),
+ (0xFA5B, 'M', '者'),
+ (0xFA5C, 'M', '臭'),
+ (0xFA5D, 'M', '艹'),
+ (0xFA5F, 'M', '著'),
+ (0xFA60, 'M', '褐'),
+ (0xFA61, 'M', '視'),
+ (0xFA62, 'M', '謁'),
+ (0xFA63, 'M', '謹'),
+ (0xFA64, 'M', '賓'),
+ (0xFA65, 'M', '贈'),
+ (0xFA66, 'M', '辶'),
+ (0xFA67, 'M', '逸'),
+ (0xFA68, 'M', '難'),
+ (0xFA69, 'M', '響'),
+ (0xFA6A, 'M', '頻'),
+ (0xFA6B, 'M', '恵'),
+ (0xFA6C, 'M', '𤋮'),
+ (0xFA6D, 'M', '舘'),
+ (0xFA6E, 'X'),
+ (0xFA70, 'M', '並'),
+ (0xFA71, 'M', '况'),
+ (0xFA72, 'M', '全'),
+ (0xFA73, 'M', '侀'),
+ (0xFA74, 'M', '充'),
+ (0xFA75, 'M', '冀'),
+ (0xFA76, 'M', '勇'),
+ (0xFA77, 'M', '勺'),
+ (0xFA78, 'M', '喝'),
+ ]
+
+def _seg_43() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xFA79, 'M', '啕'),
+ (0xFA7A, 'M', '喙'),
+ (0xFA7B, 'M', '嗢'),
+ (0xFA7C, 'M', '塚'),
+ (0xFA7D, 'M', '墳'),
+ (0xFA7E, 'M', '奄'),
+ (0xFA7F, 'M', '奔'),
+ (0xFA80, 'M', '婢'),
+ (0xFA81, 'M', '嬨'),
+ (0xFA82, 'M', '廒'),
+ (0xFA83, 'M', '廙'),
+ (0xFA84, 'M', '彩'),
+ (0xFA85, 'M', '徭'),
+ (0xFA86, 'M', '惘'),
+ (0xFA87, 'M', '慎'),
+ (0xFA88, 'M', '愈'),
+ (0xFA89, 'M', '憎'),
+ (0xFA8A, 'M', '慠'),
+ (0xFA8B, 'M', '懲'),
+ (0xFA8C, 'M', '戴'),
+ (0xFA8D, 'M', '揄'),
+ (0xFA8E, 'M', '搜'),
+ (0xFA8F, 'M', '摒'),
+ (0xFA90, 'M', '敖'),
+ (0xFA91, 'M', '晴'),
+ (0xFA92, 'M', '朗'),
+ (0xFA93, 'M', '望'),
+ (0xFA94, 'M', '杖'),
+ (0xFA95, 'M', '歹'),
+ (0xFA96, 'M', '殺'),
+ (0xFA97, 'M', '流'),
+ (0xFA98, 'M', '滛'),
+ (0xFA99, 'M', '滋'),
+ (0xFA9A, 'M', '漢'),
+ (0xFA9B, 'M', '瀞'),
+ (0xFA9C, 'M', '煮'),
+ (0xFA9D, 'M', '瞧'),
+ (0xFA9E, 'M', '爵'),
+ (0xFA9F, 'M', '犯'),
+ (0xFAA0, 'M', '猪'),
+ (0xFAA1, 'M', '瑱'),
+ (0xFAA2, 'M', '甆'),
+ (0xFAA3, 'M', '画'),
+ (0xFAA4, 'M', '瘝'),
+ (0xFAA5, 'M', '瘟'),
+ (0xFAA6, 'M', '益'),
+ (0xFAA7, 'M', '盛'),
+ (0xFAA8, 'M', '直'),
+ (0xFAA9, 'M', '睊'),
+ (0xFAAA, 'M', '着'),
+ (0xFAAB, 'M', '磌'),
+ (0xFAAC, 'M', '窱'),
+ (0xFAAD, 'M', '節'),
+ (0xFAAE, 'M', '类'),
+ (0xFAAF, 'M', '絛'),
+ (0xFAB0, 'M', '練'),
+ (0xFAB1, 'M', '缾'),
+ (0xFAB2, 'M', '者'),
+ (0xFAB3, 'M', '荒'),
+ (0xFAB4, 'M', '華'),
+ (0xFAB5, 'M', '蝹'),
+ (0xFAB6, 'M', '襁'),
+ (0xFAB7, 'M', '覆'),
+ (0xFAB8, 'M', '視'),
+ (0xFAB9, 'M', '調'),
+ (0xFABA, 'M', '諸'),
+ (0xFABB, 'M', '請'),
+ (0xFABC, 'M', '謁'),
+ (0xFABD, 'M', '諾'),
+ (0xFABE, 'M', '諭'),
+ (0xFABF, 'M', '謹'),
+ (0xFAC0, 'M', '變'),
+ (0xFAC1, 'M', '贈'),
+ (0xFAC2, 'M', '輸'),
+ (0xFAC3, 'M', '遲'),
+ (0xFAC4, 'M', '醙'),
+ (0xFAC5, 'M', '鉶'),
+ (0xFAC6, 'M', '陼'),
+ (0xFAC7, 'M', '難'),
+ (0xFAC8, 'M', '靖'),
+ (0xFAC9, 'M', '韛'),
+ (0xFACA, 'M', '響'),
+ (0xFACB, 'M', '頋'),
+ (0xFACC, 'M', '頻'),
+ (0xFACD, 'M', '鬒'),
+ (0xFACE, 'M', '龜'),
+ (0xFACF, 'M', '𢡊'),
+ (0xFAD0, 'M', '𢡄'),
+ (0xFAD1, 'M', '𣏕'),
+ (0xFAD2, 'M', '㮝'),
+ (0xFAD3, 'M', '䀘'),
+ (0xFAD4, 'M', '䀹'),
+ (0xFAD5, 'M', '𥉉'),
+ (0xFAD6, 'M', '𥳐'),
+ (0xFAD7, 'M', '𧻓'),
+ (0xFAD8, 'M', '齃'),
+ (0xFAD9, 'M', '龎'),
+ (0xFADA, 'X'),
+ (0xFB00, 'M', 'ff'),
+ (0xFB01, 'M', 'fi'),
+ ]
+
+def _seg_44() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xFB02, 'M', 'fl'),
+ (0xFB03, 'M', 'ffi'),
+ (0xFB04, 'M', 'ffl'),
+ (0xFB05, 'M', 'st'),
+ (0xFB07, 'X'),
+ (0xFB13, 'M', 'մն'),
+ (0xFB14, 'M', 'մե'),
+ (0xFB15, 'M', 'մի'),
+ (0xFB16, 'M', 'վն'),
+ (0xFB17, 'M', 'մխ'),
+ (0xFB18, 'X'),
+ (0xFB1D, 'M', 'יִ'),
+ (0xFB1E, 'V'),
+ (0xFB1F, 'M', 'ײַ'),
+ (0xFB20, 'M', 'ע'),
+ (0xFB21, 'M', 'א'),
+ (0xFB22, 'M', 'ד'),
+ (0xFB23, 'M', 'ה'),
+ (0xFB24, 'M', 'כ'),
+ (0xFB25, 'M', 'ל'),
+ (0xFB26, 'M', 'ם'),
+ (0xFB27, 'M', 'ר'),
+ (0xFB28, 'M', 'ת'),
+ (0xFB29, '3', '+'),
+ (0xFB2A, 'M', 'שׁ'),
+ (0xFB2B, 'M', 'שׂ'),
+ (0xFB2C, 'M', 'שּׁ'),
+ (0xFB2D, 'M', 'שּׂ'),
+ (0xFB2E, 'M', 'אַ'),
+ (0xFB2F, 'M', 'אָ'),
+ (0xFB30, 'M', 'אּ'),
+ (0xFB31, 'M', 'בּ'),
+ (0xFB32, 'M', 'גּ'),
+ (0xFB33, 'M', 'דּ'),
+ (0xFB34, 'M', 'הּ'),
+ (0xFB35, 'M', 'וּ'),
+ (0xFB36, 'M', 'זּ'),
+ (0xFB37, 'X'),
+ (0xFB38, 'M', 'טּ'),
+ (0xFB39, 'M', 'יּ'),
+ (0xFB3A, 'M', 'ךּ'),
+ (0xFB3B, 'M', 'כּ'),
+ (0xFB3C, 'M', 'לּ'),
+ (0xFB3D, 'X'),
+ (0xFB3E, 'M', 'מּ'),
+ (0xFB3F, 'X'),
+ (0xFB40, 'M', 'נּ'),
+ (0xFB41, 'M', 'סּ'),
+ (0xFB42, 'X'),
+ (0xFB43, 'M', 'ףּ'),
+ (0xFB44, 'M', 'פּ'),
+ (0xFB45, 'X'),
+ (0xFB46, 'M', 'צּ'),
+ (0xFB47, 'M', 'קּ'),
+ (0xFB48, 'M', 'רּ'),
+ (0xFB49, 'M', 'שּ'),
+ (0xFB4A, 'M', 'תּ'),
+ (0xFB4B, 'M', 'וֹ'),
+ (0xFB4C, 'M', 'בֿ'),
+ (0xFB4D, 'M', 'כֿ'),
+ (0xFB4E, 'M', 'פֿ'),
+ (0xFB4F, 'M', 'אל'),
+ (0xFB50, 'M', 'ٱ'),
+ (0xFB52, 'M', 'ٻ'),
+ (0xFB56, 'M', 'پ'),
+ (0xFB5A, 'M', 'ڀ'),
+ (0xFB5E, 'M', 'ٺ'),
+ (0xFB62, 'M', 'ٿ'),
+ (0xFB66, 'M', 'ٹ'),
+ (0xFB6A, 'M', 'ڤ'),
+ (0xFB6E, 'M', 'ڦ'),
+ (0xFB72, 'M', 'ڄ'),
+ (0xFB76, 'M', 'ڃ'),
+ (0xFB7A, 'M', 'چ'),
+ (0xFB7E, 'M', 'ڇ'),
+ (0xFB82, 'M', 'ڍ'),
+ (0xFB84, 'M', 'ڌ'),
+ (0xFB86, 'M', 'ڎ'),
+ (0xFB88, 'M', 'ڈ'),
+ (0xFB8A, 'M', 'ژ'),
+ (0xFB8C, 'M', 'ڑ'),
+ (0xFB8E, 'M', 'ک'),
+ (0xFB92, 'M', 'گ'),
+ (0xFB96, 'M', 'ڳ'),
+ (0xFB9A, 'M', 'ڱ'),
+ (0xFB9E, 'M', 'ں'),
+ (0xFBA0, 'M', 'ڻ'),
+ (0xFBA4, 'M', 'ۀ'),
+ (0xFBA6, 'M', 'ہ'),
+ (0xFBAA, 'M', 'ھ'),
+ (0xFBAE, 'M', 'ے'),
+ (0xFBB0, 'M', 'ۓ'),
+ (0xFBB2, 'V'),
+ (0xFBC3, 'X'),
+ (0xFBD3, 'M', 'ڭ'),
+ (0xFBD7, 'M', 'ۇ'),
+ (0xFBD9, 'M', 'ۆ'),
+ (0xFBDB, 'M', 'ۈ'),
+ (0xFBDD, 'M', 'ۇٴ'),
+ (0xFBDE, 'M', 'ۋ'),
+ ]
+
+def _seg_45() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xFBE0, 'M', 'ۅ'),
+ (0xFBE2, 'M', 'ۉ'),
+ (0xFBE4, 'M', 'ې'),
+ (0xFBE8, 'M', 'ى'),
+ (0xFBEA, 'M', 'ئا'),
+ (0xFBEC, 'M', 'ئە'),
+ (0xFBEE, 'M', 'ئو'),
+ (0xFBF0, 'M', 'ئۇ'),
+ (0xFBF2, 'M', 'ئۆ'),
+ (0xFBF4, 'M', 'ئۈ'),
+ (0xFBF6, 'M', 'ئې'),
+ (0xFBF9, 'M', 'ئى'),
+ (0xFBFC, 'M', 'ی'),
+ (0xFC00, 'M', 'ئج'),
+ (0xFC01, 'M', 'ئح'),
+ (0xFC02, 'M', 'ئم'),
+ (0xFC03, 'M', 'ئى'),
+ (0xFC04, 'M', 'ئي'),
+ (0xFC05, 'M', 'بج'),
+ (0xFC06, 'M', 'بح'),
+ (0xFC07, 'M', 'بخ'),
+ (0xFC08, 'M', 'بم'),
+ (0xFC09, 'M', 'بى'),
+ (0xFC0A, 'M', 'بي'),
+ (0xFC0B, 'M', 'تج'),
+ (0xFC0C, 'M', 'تح'),
+ (0xFC0D, 'M', 'تخ'),
+ (0xFC0E, 'M', 'تم'),
+ (0xFC0F, 'M', 'تى'),
+ (0xFC10, 'M', 'تي'),
+ (0xFC11, 'M', 'ثج'),
+ (0xFC12, 'M', 'ثم'),
+ (0xFC13, 'M', 'ثى'),
+ (0xFC14, 'M', 'ثي'),
+ (0xFC15, 'M', 'جح'),
+ (0xFC16, 'M', 'جم'),
+ (0xFC17, 'M', 'حج'),
+ (0xFC18, 'M', 'حم'),
+ (0xFC19, 'M', 'خج'),
+ (0xFC1A, 'M', 'خح'),
+ (0xFC1B, 'M', 'خم'),
+ (0xFC1C, 'M', 'سج'),
+ (0xFC1D, 'M', 'سح'),
+ (0xFC1E, 'M', 'سخ'),
+ (0xFC1F, 'M', 'سم'),
+ (0xFC20, 'M', 'صح'),
+ (0xFC21, 'M', 'صم'),
+ (0xFC22, 'M', 'ضج'),
+ (0xFC23, 'M', 'ضح'),
+ (0xFC24, 'M', 'ضخ'),
+ (0xFC25, 'M', 'ضم'),
+ (0xFC26, 'M', 'طح'),
+ (0xFC27, 'M', 'طم'),
+ (0xFC28, 'M', 'ظم'),
+ (0xFC29, 'M', 'عج'),
+ (0xFC2A, 'M', 'عم'),
+ (0xFC2B, 'M', 'غج'),
+ (0xFC2C, 'M', 'غم'),
+ (0xFC2D, 'M', 'فج'),
+ (0xFC2E, 'M', 'فح'),
+ (0xFC2F, 'M', 'فخ'),
+ (0xFC30, 'M', 'فم'),
+ (0xFC31, 'M', 'فى'),
+ (0xFC32, 'M', 'في'),
+ (0xFC33, 'M', 'قح'),
+ (0xFC34, 'M', 'قم'),
+ (0xFC35, 'M', 'قى'),
+ (0xFC36, 'M', 'قي'),
+ (0xFC37, 'M', 'كا'),
+ (0xFC38, 'M', 'كج'),
+ (0xFC39, 'M', 'كح'),
+ (0xFC3A, 'M', 'كخ'),
+ (0xFC3B, 'M', 'كل'),
+ (0xFC3C, 'M', 'كم'),
+ (0xFC3D, 'M', 'كى'),
+ (0xFC3E, 'M', 'كي'),
+ (0xFC3F, 'M', 'لج'),
+ (0xFC40, 'M', 'لح'),
+ (0xFC41, 'M', 'لخ'),
+ (0xFC42, 'M', 'لم'),
+ (0xFC43, 'M', 'لى'),
+ (0xFC44, 'M', 'لي'),
+ (0xFC45, 'M', 'مج'),
+ (0xFC46, 'M', 'مح'),
+ (0xFC47, 'M', 'مخ'),
+ (0xFC48, 'M', 'مم'),
+ (0xFC49, 'M', 'مى'),
+ (0xFC4A, 'M', 'مي'),
+ (0xFC4B, 'M', 'نج'),
+ (0xFC4C, 'M', 'نح'),
+ (0xFC4D, 'M', 'نخ'),
+ (0xFC4E, 'M', 'نم'),
+ (0xFC4F, 'M', 'نى'),
+ (0xFC50, 'M', 'ني'),
+ (0xFC51, 'M', 'هج'),
+ (0xFC52, 'M', 'هم'),
+ (0xFC53, 'M', 'هى'),
+ (0xFC54, 'M', 'هي'),
+ (0xFC55, 'M', 'يج'),
+ (0xFC56, 'M', 'يح'),
+ ]
+
+def _seg_46() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xFC57, 'M', 'يخ'),
+ (0xFC58, 'M', 'يم'),
+ (0xFC59, 'M', 'يى'),
+ (0xFC5A, 'M', 'يي'),
+ (0xFC5B, 'M', 'ذٰ'),
+ (0xFC5C, 'M', 'رٰ'),
+ (0xFC5D, 'M', 'ىٰ'),
+ (0xFC5E, '3', ' ٌّ'),
+ (0xFC5F, '3', ' ٍّ'),
+ (0xFC60, '3', ' َّ'),
+ (0xFC61, '3', ' ُّ'),
+ (0xFC62, '3', ' ِّ'),
+ (0xFC63, '3', ' ّٰ'),
+ (0xFC64, 'M', 'ئر'),
+ (0xFC65, 'M', 'ئز'),
+ (0xFC66, 'M', 'ئم'),
+ (0xFC67, 'M', 'ئن'),
+ (0xFC68, 'M', 'ئى'),
+ (0xFC69, 'M', 'ئي'),
+ (0xFC6A, 'M', 'بر'),
+ (0xFC6B, 'M', 'بز'),
+ (0xFC6C, 'M', 'بم'),
+ (0xFC6D, 'M', 'بن'),
+ (0xFC6E, 'M', 'بى'),
+ (0xFC6F, 'M', 'بي'),
+ (0xFC70, 'M', 'تر'),
+ (0xFC71, 'M', 'تز'),
+ (0xFC72, 'M', 'تم'),
+ (0xFC73, 'M', 'تن'),
+ (0xFC74, 'M', 'تى'),
+ (0xFC75, 'M', 'تي'),
+ (0xFC76, 'M', 'ثر'),
+ (0xFC77, 'M', 'ثز'),
+ (0xFC78, 'M', 'ثم'),
+ (0xFC79, 'M', 'ثن'),
+ (0xFC7A, 'M', 'ثى'),
+ (0xFC7B, 'M', 'ثي'),
+ (0xFC7C, 'M', 'فى'),
+ (0xFC7D, 'M', 'في'),
+ (0xFC7E, 'M', 'قى'),
+ (0xFC7F, 'M', 'قي'),
+ (0xFC80, 'M', 'كا'),
+ (0xFC81, 'M', 'كل'),
+ (0xFC82, 'M', 'كم'),
+ (0xFC83, 'M', 'كى'),
+ (0xFC84, 'M', 'كي'),
+ (0xFC85, 'M', 'لم'),
+ (0xFC86, 'M', 'لى'),
+ (0xFC87, 'M', 'لي'),
+ (0xFC88, 'M', 'ما'),
+ (0xFC89, 'M', 'مم'),
+ (0xFC8A, 'M', 'نر'),
+ (0xFC8B, 'M', 'نز'),
+ (0xFC8C, 'M', 'نم'),
+ (0xFC8D, 'M', 'نن'),
+ (0xFC8E, 'M', 'نى'),
+ (0xFC8F, 'M', 'ني'),
+ (0xFC90, 'M', 'ىٰ'),
+ (0xFC91, 'M', 'ير'),
+ (0xFC92, 'M', 'يز'),
+ (0xFC93, 'M', 'يم'),
+ (0xFC94, 'M', 'ين'),
+ (0xFC95, 'M', 'يى'),
+ (0xFC96, 'M', 'يي'),
+ (0xFC97, 'M', 'ئج'),
+ (0xFC98, 'M', 'ئح'),
+ (0xFC99, 'M', 'ئخ'),
+ (0xFC9A, 'M', 'ئم'),
+ (0xFC9B, 'M', 'ئه'),
+ (0xFC9C, 'M', 'بج'),
+ (0xFC9D, 'M', 'بح'),
+ (0xFC9E, 'M', 'بخ'),
+ (0xFC9F, 'M', 'بم'),
+ (0xFCA0, 'M', 'به'),
+ (0xFCA1, 'M', 'تج'),
+ (0xFCA2, 'M', 'تح'),
+ (0xFCA3, 'M', 'تخ'),
+ (0xFCA4, 'M', 'تم'),
+ (0xFCA5, 'M', 'ته'),
+ (0xFCA6, 'M', 'ثم'),
+ (0xFCA7, 'M', 'جح'),
+ (0xFCA8, 'M', 'جم'),
+ (0xFCA9, 'M', 'حج'),
+ (0xFCAA, 'M', 'حم'),
+ (0xFCAB, 'M', 'خج'),
+ (0xFCAC, 'M', 'خم'),
+ (0xFCAD, 'M', 'سج'),
+ (0xFCAE, 'M', 'سح'),
+ (0xFCAF, 'M', 'سخ'),
+ (0xFCB0, 'M', 'سم'),
+ (0xFCB1, 'M', 'صح'),
+ (0xFCB2, 'M', 'صخ'),
+ (0xFCB3, 'M', 'صم'),
+ (0xFCB4, 'M', 'ضج'),
+ (0xFCB5, 'M', 'ضح'),
+ (0xFCB6, 'M', 'ضخ'),
+ (0xFCB7, 'M', 'ضم'),
+ (0xFCB8, 'M', 'طح'),
+ (0xFCB9, 'M', 'ظم'),
+ (0xFCBA, 'M', 'عج'),
+ ]
+
+def _seg_47() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xFCBB, 'M', 'عم'),
+ (0xFCBC, 'M', 'غج'),
+ (0xFCBD, 'M', 'غم'),
+ (0xFCBE, 'M', 'فج'),
+ (0xFCBF, 'M', 'فح'),
+ (0xFCC0, 'M', 'فخ'),
+ (0xFCC1, 'M', 'فم'),
+ (0xFCC2, 'M', 'قح'),
+ (0xFCC3, 'M', 'قم'),
+ (0xFCC4, 'M', 'كج'),
+ (0xFCC5, 'M', 'كح'),
+ (0xFCC6, 'M', 'كخ'),
+ (0xFCC7, 'M', 'كل'),
+ (0xFCC8, 'M', 'كم'),
+ (0xFCC9, 'M', 'لج'),
+ (0xFCCA, 'M', 'لح'),
+ (0xFCCB, 'M', 'لخ'),
+ (0xFCCC, 'M', 'لم'),
+ (0xFCCD, 'M', 'له'),
+ (0xFCCE, 'M', 'مج'),
+ (0xFCCF, 'M', 'مح'),
+ (0xFCD0, 'M', 'مخ'),
+ (0xFCD1, 'M', 'مم'),
+ (0xFCD2, 'M', 'نج'),
+ (0xFCD3, 'M', 'نح'),
+ (0xFCD4, 'M', 'نخ'),
+ (0xFCD5, 'M', 'نم'),
+ (0xFCD6, 'M', 'نه'),
+ (0xFCD7, 'M', 'هج'),
+ (0xFCD8, 'M', 'هم'),
+ (0xFCD9, 'M', 'هٰ'),
+ (0xFCDA, 'M', 'يج'),
+ (0xFCDB, 'M', 'يح'),
+ (0xFCDC, 'M', 'يخ'),
+ (0xFCDD, 'M', 'يم'),
+ (0xFCDE, 'M', 'يه'),
+ (0xFCDF, 'M', 'ئم'),
+ (0xFCE0, 'M', 'ئه'),
+ (0xFCE1, 'M', 'بم'),
+ (0xFCE2, 'M', 'به'),
+ (0xFCE3, 'M', 'تم'),
+ (0xFCE4, 'M', 'ته'),
+ (0xFCE5, 'M', 'ثم'),
+ (0xFCE6, 'M', 'ثه'),
+ (0xFCE7, 'M', 'سم'),
+ (0xFCE8, 'M', 'سه'),
+ (0xFCE9, 'M', 'شم'),
+ (0xFCEA, 'M', 'شه'),
+ (0xFCEB, 'M', 'كل'),
+ (0xFCEC, 'M', 'كم'),
+ (0xFCED, 'M', 'لم'),
+ (0xFCEE, 'M', 'نم'),
+ (0xFCEF, 'M', 'نه'),
+ (0xFCF0, 'M', 'يم'),
+ (0xFCF1, 'M', 'يه'),
+ (0xFCF2, 'M', 'ـَّ'),
+ (0xFCF3, 'M', 'ـُّ'),
+ (0xFCF4, 'M', 'ـِّ'),
+ (0xFCF5, 'M', 'طى'),
+ (0xFCF6, 'M', 'طي'),
+ (0xFCF7, 'M', 'عى'),
+ (0xFCF8, 'M', 'عي'),
+ (0xFCF9, 'M', 'غى'),
+ (0xFCFA, 'M', 'غي'),
+ (0xFCFB, 'M', 'سى'),
+ (0xFCFC, 'M', 'سي'),
+ (0xFCFD, 'M', 'شى'),
+ (0xFCFE, 'M', 'شي'),
+ (0xFCFF, 'M', 'حى'),
+ (0xFD00, 'M', 'حي'),
+ (0xFD01, 'M', 'جى'),
+ (0xFD02, 'M', 'جي'),
+ (0xFD03, 'M', 'خى'),
+ (0xFD04, 'M', 'خي'),
+ (0xFD05, 'M', 'صى'),
+ (0xFD06, 'M', 'صي'),
+ (0xFD07, 'M', 'ضى'),
+ (0xFD08, 'M', 'ضي'),
+ (0xFD09, 'M', 'شج'),
+ (0xFD0A, 'M', 'شح'),
+ (0xFD0B, 'M', 'شخ'),
+ (0xFD0C, 'M', 'شم'),
+ (0xFD0D, 'M', 'شر'),
+ (0xFD0E, 'M', 'سر'),
+ (0xFD0F, 'M', 'صر'),
+ (0xFD10, 'M', 'ضر'),
+ (0xFD11, 'M', 'طى'),
+ (0xFD12, 'M', 'طي'),
+ (0xFD13, 'M', 'عى'),
+ (0xFD14, 'M', 'عي'),
+ (0xFD15, 'M', 'غى'),
+ (0xFD16, 'M', 'غي'),
+ (0xFD17, 'M', 'سى'),
+ (0xFD18, 'M', 'سي'),
+ (0xFD19, 'M', 'شى'),
+ (0xFD1A, 'M', 'شي'),
+ (0xFD1B, 'M', 'حى'),
+ (0xFD1C, 'M', 'حي'),
+ (0xFD1D, 'M', 'جى'),
+ (0xFD1E, 'M', 'جي'),
+ ]
+
+def _seg_48() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xFD1F, 'M', 'خى'),
+ (0xFD20, 'M', 'خي'),
+ (0xFD21, 'M', 'صى'),
+ (0xFD22, 'M', 'صي'),
+ (0xFD23, 'M', 'ضى'),
+ (0xFD24, 'M', 'ضي'),
+ (0xFD25, 'M', 'شج'),
+ (0xFD26, 'M', 'شح'),
+ (0xFD27, 'M', 'شخ'),
+ (0xFD28, 'M', 'شم'),
+ (0xFD29, 'M', 'شر'),
+ (0xFD2A, 'M', 'سر'),
+ (0xFD2B, 'M', 'صر'),
+ (0xFD2C, 'M', 'ضر'),
+ (0xFD2D, 'M', 'شج'),
+ (0xFD2E, 'M', 'شح'),
+ (0xFD2F, 'M', 'شخ'),
+ (0xFD30, 'M', 'شم'),
+ (0xFD31, 'M', 'سه'),
+ (0xFD32, 'M', 'شه'),
+ (0xFD33, 'M', 'طم'),
+ (0xFD34, 'M', 'سج'),
+ (0xFD35, 'M', 'سح'),
+ (0xFD36, 'M', 'سخ'),
+ (0xFD37, 'M', 'شج'),
+ (0xFD38, 'M', 'شح'),
+ (0xFD39, 'M', 'شخ'),
+ (0xFD3A, 'M', 'طم'),
+ (0xFD3B, 'M', 'ظم'),
+ (0xFD3C, 'M', 'اً'),
+ (0xFD3E, 'V'),
+ (0xFD50, 'M', 'تجم'),
+ (0xFD51, 'M', 'تحج'),
+ (0xFD53, 'M', 'تحم'),
+ (0xFD54, 'M', 'تخم'),
+ (0xFD55, 'M', 'تمج'),
+ (0xFD56, 'M', 'تمح'),
+ (0xFD57, 'M', 'تمخ'),
+ (0xFD58, 'M', 'جمح'),
+ (0xFD5A, 'M', 'حمي'),
+ (0xFD5B, 'M', 'حمى'),
+ (0xFD5C, 'M', 'سحج'),
+ (0xFD5D, 'M', 'سجح'),
+ (0xFD5E, 'M', 'سجى'),
+ (0xFD5F, 'M', 'سمح'),
+ (0xFD61, 'M', 'سمج'),
+ (0xFD62, 'M', 'سمم'),
+ (0xFD64, 'M', 'صحح'),
+ (0xFD66, 'M', 'صمم'),
+ (0xFD67, 'M', 'شحم'),
+ (0xFD69, 'M', 'شجي'),
+ (0xFD6A, 'M', 'شمخ'),
+ (0xFD6C, 'M', 'شمم'),
+ (0xFD6E, 'M', 'ضحى'),
+ (0xFD6F, 'M', 'ضخم'),
+ (0xFD71, 'M', 'طمح'),
+ (0xFD73, 'M', 'طمم'),
+ (0xFD74, 'M', 'طمي'),
+ (0xFD75, 'M', 'عجم'),
+ (0xFD76, 'M', 'عمم'),
+ (0xFD78, 'M', 'عمى'),
+ (0xFD79, 'M', 'غمم'),
+ (0xFD7A, 'M', 'غمي'),
+ (0xFD7B, 'M', 'غمى'),
+ (0xFD7C, 'M', 'فخم'),
+ (0xFD7E, 'M', 'قمح'),
+ (0xFD7F, 'M', 'قمم'),
+ (0xFD80, 'M', 'لحم'),
+ (0xFD81, 'M', 'لحي'),
+ (0xFD82, 'M', 'لحى'),
+ (0xFD83, 'M', 'لجج'),
+ (0xFD85, 'M', 'لخم'),
+ (0xFD87, 'M', 'لمح'),
+ (0xFD89, 'M', 'محج'),
+ (0xFD8A, 'M', 'محم'),
+ (0xFD8B, 'M', 'محي'),
+ (0xFD8C, 'M', 'مجح'),
+ (0xFD8D, 'M', 'مجم'),
+ (0xFD8E, 'M', 'مخج'),
+ (0xFD8F, 'M', 'مخم'),
+ (0xFD90, 'X'),
+ (0xFD92, 'M', 'مجخ'),
+ (0xFD93, 'M', 'همج'),
+ (0xFD94, 'M', 'همم'),
+ (0xFD95, 'M', 'نحم'),
+ (0xFD96, 'M', 'نحى'),
+ (0xFD97, 'M', 'نجم'),
+ (0xFD99, 'M', 'نجى'),
+ (0xFD9A, 'M', 'نمي'),
+ (0xFD9B, 'M', 'نمى'),
+ (0xFD9C, 'M', 'يمم'),
+ (0xFD9E, 'M', 'بخي'),
+ (0xFD9F, 'M', 'تجي'),
+ (0xFDA0, 'M', 'تجى'),
+ (0xFDA1, 'M', 'تخي'),
+ (0xFDA2, 'M', 'تخى'),
+ (0xFDA3, 'M', 'تمي'),
+ (0xFDA4, 'M', 'تمى'),
+ (0xFDA5, 'M', 'جمي'),
+ (0xFDA6, 'M', 'جحى'),
+ ]
+
+def _seg_49() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xFDA7, 'M', 'جمى'),
+ (0xFDA8, 'M', 'سخى'),
+ (0xFDA9, 'M', 'صحي'),
+ (0xFDAA, 'M', 'شحي'),
+ (0xFDAB, 'M', 'ضحي'),
+ (0xFDAC, 'M', 'لجي'),
+ (0xFDAD, 'M', 'لمي'),
+ (0xFDAE, 'M', 'يحي'),
+ (0xFDAF, 'M', 'يجي'),
+ (0xFDB0, 'M', 'يمي'),
+ (0xFDB1, 'M', 'ممي'),
+ (0xFDB2, 'M', 'قمي'),
+ (0xFDB3, 'M', 'نحي'),
+ (0xFDB4, 'M', 'قمح'),
+ (0xFDB5, 'M', 'لحم'),
+ (0xFDB6, 'M', 'عمي'),
+ (0xFDB7, 'M', 'كمي'),
+ (0xFDB8, 'M', 'نجح'),
+ (0xFDB9, 'M', 'مخي'),
+ (0xFDBA, 'M', 'لجم'),
+ (0xFDBB, 'M', 'كمم'),
+ (0xFDBC, 'M', 'لجم'),
+ (0xFDBD, 'M', 'نجح'),
+ (0xFDBE, 'M', 'جحي'),
+ (0xFDBF, 'M', 'حجي'),
+ (0xFDC0, 'M', 'مجي'),
+ (0xFDC1, 'M', 'فمي'),
+ (0xFDC2, 'M', 'بحي'),
+ (0xFDC3, 'M', 'كمم'),
+ (0xFDC4, 'M', 'عجم'),
+ (0xFDC5, 'M', 'صمم'),
+ (0xFDC6, 'M', 'سخي'),
+ (0xFDC7, 'M', 'نجي'),
+ (0xFDC8, 'X'),
+ (0xFDCF, 'V'),
+ (0xFDD0, 'X'),
+ (0xFDF0, 'M', 'صلے'),
+ (0xFDF1, 'M', 'قلے'),
+ (0xFDF2, 'M', 'الله'),
+ (0xFDF3, 'M', 'اكبر'),
+ (0xFDF4, 'M', 'محمد'),
+ (0xFDF5, 'M', 'صلعم'),
+ (0xFDF6, 'M', 'رسول'),
+ (0xFDF7, 'M', 'عليه'),
+ (0xFDF8, 'M', 'وسلم'),
+ (0xFDF9, 'M', 'صلى'),
+ (0xFDFA, '3', 'صلى الله عليه وسلم'),
+ (0xFDFB, '3', 'جل جلاله'),
+ (0xFDFC, 'M', 'ریال'),
+ (0xFDFD, 'V'),
+ (0xFE00, 'I'),
+ (0xFE10, '3', ','),
+ (0xFE11, 'M', '、'),
+ (0xFE12, 'X'),
+ (0xFE13, '3', ':'),
+ (0xFE14, '3', ';'),
+ (0xFE15, '3', '!'),
+ (0xFE16, '3', '?'),
+ (0xFE17, 'M', '〖'),
+ (0xFE18, 'M', '〗'),
+ (0xFE19, 'X'),
+ (0xFE20, 'V'),
+ (0xFE30, 'X'),
+ (0xFE31, 'M', '—'),
+ (0xFE32, 'M', '–'),
+ (0xFE33, '3', '_'),
+ (0xFE35, '3', '('),
+ (0xFE36, '3', ')'),
+ (0xFE37, '3', '{'),
+ (0xFE38, '3', '}'),
+ (0xFE39, 'M', '〔'),
+ (0xFE3A, 'M', '〕'),
+ (0xFE3B, 'M', '【'),
+ (0xFE3C, 'M', '】'),
+ (0xFE3D, 'M', '《'),
+ (0xFE3E, 'M', '》'),
+ (0xFE3F, 'M', '〈'),
+ (0xFE40, 'M', '〉'),
+ (0xFE41, 'M', '「'),
+ (0xFE42, 'M', '」'),
+ (0xFE43, 'M', '『'),
+ (0xFE44, 'M', '』'),
+ (0xFE45, 'V'),
+ (0xFE47, '3', '['),
+ (0xFE48, '3', ']'),
+ (0xFE49, '3', ' ̅'),
+ (0xFE4D, '3', '_'),
+ (0xFE50, '3', ','),
+ (0xFE51, 'M', '、'),
+ (0xFE52, 'X'),
+ (0xFE54, '3', ';'),
+ (0xFE55, '3', ':'),
+ (0xFE56, '3', '?'),
+ (0xFE57, '3', '!'),
+ (0xFE58, 'M', '—'),
+ (0xFE59, '3', '('),
+ (0xFE5A, '3', ')'),
+ (0xFE5B, '3', '{'),
+ (0xFE5C, '3', '}'),
+ (0xFE5D, 'M', '〔'),
+ ]
+
+def _seg_50() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xFE5E, 'M', '〕'),
+ (0xFE5F, '3', '#'),
+ (0xFE60, '3', '&'),
+ (0xFE61, '3', '*'),
+ (0xFE62, '3', '+'),
+ (0xFE63, 'M', '-'),
+ (0xFE64, '3', '<'),
+ (0xFE65, '3', '>'),
+ (0xFE66, '3', '='),
+ (0xFE67, 'X'),
+ (0xFE68, '3', '\\'),
+ (0xFE69, '3', '$'),
+ (0xFE6A, '3', '%'),
+ (0xFE6B, '3', '@'),
+ (0xFE6C, 'X'),
+ (0xFE70, '3', ' ً'),
+ (0xFE71, 'M', 'ـً'),
+ (0xFE72, '3', ' ٌ'),
+ (0xFE73, 'V'),
+ (0xFE74, '3', ' ٍ'),
+ (0xFE75, 'X'),
+ (0xFE76, '3', ' َ'),
+ (0xFE77, 'M', 'ـَ'),
+ (0xFE78, '3', ' ُ'),
+ (0xFE79, 'M', 'ـُ'),
+ (0xFE7A, '3', ' ِ'),
+ (0xFE7B, 'M', 'ـِ'),
+ (0xFE7C, '3', ' ّ'),
+ (0xFE7D, 'M', 'ـّ'),
+ (0xFE7E, '3', ' ْ'),
+ (0xFE7F, 'M', 'ـْ'),
+ (0xFE80, 'M', 'ء'),
+ (0xFE81, 'M', 'آ'),
+ (0xFE83, 'M', 'أ'),
+ (0xFE85, 'M', 'ؤ'),
+ (0xFE87, 'M', 'إ'),
+ (0xFE89, 'M', 'ئ'),
+ (0xFE8D, 'M', 'ا'),
+ (0xFE8F, 'M', 'ب'),
+ (0xFE93, 'M', 'ة'),
+ (0xFE95, 'M', 'ت'),
+ (0xFE99, 'M', 'ث'),
+ (0xFE9D, 'M', 'ج'),
+ (0xFEA1, 'M', 'ح'),
+ (0xFEA5, 'M', 'خ'),
+ (0xFEA9, 'M', 'د'),
+ (0xFEAB, 'M', 'ذ'),
+ (0xFEAD, 'M', 'ر'),
+ (0xFEAF, 'M', 'ز'),
+ (0xFEB1, 'M', 'س'),
+ (0xFEB5, 'M', 'ش'),
+ (0xFEB9, 'M', 'ص'),
+ (0xFEBD, 'M', 'ض'),
+ (0xFEC1, 'M', 'ط'),
+ (0xFEC5, 'M', 'ظ'),
+ (0xFEC9, 'M', 'ع'),
+ (0xFECD, 'M', 'غ'),
+ (0xFED1, 'M', 'ف'),
+ (0xFED5, 'M', 'ق'),
+ (0xFED9, 'M', 'ك'),
+ (0xFEDD, 'M', 'ل'),
+ (0xFEE1, 'M', 'م'),
+ (0xFEE5, 'M', 'ن'),
+ (0xFEE9, 'M', 'ه'),
+ (0xFEED, 'M', 'و'),
+ (0xFEEF, 'M', 'ى'),
+ (0xFEF1, 'M', 'ي'),
+ (0xFEF5, 'M', 'لآ'),
+ (0xFEF7, 'M', 'لأ'),
+ (0xFEF9, 'M', 'لإ'),
+ (0xFEFB, 'M', 'لا'),
+ (0xFEFD, 'X'),
+ (0xFEFF, 'I'),
+ (0xFF00, 'X'),
+ (0xFF01, '3', '!'),
+ (0xFF02, '3', '"'),
+ (0xFF03, '3', '#'),
+ (0xFF04, '3', '$'),
+ (0xFF05, '3', '%'),
+ (0xFF06, '3', '&'),
+ (0xFF07, '3', '\''),
+ (0xFF08, '3', '('),
+ (0xFF09, '3', ')'),
+ (0xFF0A, '3', '*'),
+ (0xFF0B, '3', '+'),
+ (0xFF0C, '3', ','),
+ (0xFF0D, 'M', '-'),
+ (0xFF0E, 'M', '.'),
+ (0xFF0F, '3', '/'),
+ (0xFF10, 'M', '0'),
+ (0xFF11, 'M', '1'),
+ (0xFF12, 'M', '2'),
+ (0xFF13, 'M', '3'),
+ (0xFF14, 'M', '4'),
+ (0xFF15, 'M', '5'),
+ (0xFF16, 'M', '6'),
+ (0xFF17, 'M', '7'),
+ (0xFF18, 'M', '8'),
+ (0xFF19, 'M', '9'),
+ (0xFF1A, '3', ':'),
+ ]
+
+def _seg_51() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xFF1B, '3', ';'),
+ (0xFF1C, '3', '<'),
+ (0xFF1D, '3', '='),
+ (0xFF1E, '3', '>'),
+ (0xFF1F, '3', '?'),
+ (0xFF20, '3', '@'),
+ (0xFF21, 'M', 'a'),
+ (0xFF22, 'M', 'b'),
+ (0xFF23, 'M', 'c'),
+ (0xFF24, 'M', 'd'),
+ (0xFF25, 'M', 'e'),
+ (0xFF26, 'M', 'f'),
+ (0xFF27, 'M', 'g'),
+ (0xFF28, 'M', 'h'),
+ (0xFF29, 'M', 'i'),
+ (0xFF2A, 'M', 'j'),
+ (0xFF2B, 'M', 'k'),
+ (0xFF2C, 'M', 'l'),
+ (0xFF2D, 'M', 'm'),
+ (0xFF2E, 'M', 'n'),
+ (0xFF2F, 'M', 'o'),
+ (0xFF30, 'M', 'p'),
+ (0xFF31, 'M', 'q'),
+ (0xFF32, 'M', 'r'),
+ (0xFF33, 'M', 's'),
+ (0xFF34, 'M', 't'),
+ (0xFF35, 'M', 'u'),
+ (0xFF36, 'M', 'v'),
+ (0xFF37, 'M', 'w'),
+ (0xFF38, 'M', 'x'),
+ (0xFF39, 'M', 'y'),
+ (0xFF3A, 'M', 'z'),
+ (0xFF3B, '3', '['),
+ (0xFF3C, '3', '\\'),
+ (0xFF3D, '3', ']'),
+ (0xFF3E, '3', '^'),
+ (0xFF3F, '3', '_'),
+ (0xFF40, '3', '`'),
+ (0xFF41, 'M', 'a'),
+ (0xFF42, 'M', 'b'),
+ (0xFF43, 'M', 'c'),
+ (0xFF44, 'M', 'd'),
+ (0xFF45, 'M', 'e'),
+ (0xFF46, 'M', 'f'),
+ (0xFF47, 'M', 'g'),
+ (0xFF48, 'M', 'h'),
+ (0xFF49, 'M', 'i'),
+ (0xFF4A, 'M', 'j'),
+ (0xFF4B, 'M', 'k'),
+ (0xFF4C, 'M', 'l'),
+ (0xFF4D, 'M', 'm'),
+ (0xFF4E, 'M', 'n'),
+ (0xFF4F, 'M', 'o'),
+ (0xFF50, 'M', 'p'),
+ (0xFF51, 'M', 'q'),
+ (0xFF52, 'M', 'r'),
+ (0xFF53, 'M', 's'),
+ (0xFF54, 'M', 't'),
+ (0xFF55, 'M', 'u'),
+ (0xFF56, 'M', 'v'),
+ (0xFF57, 'M', 'w'),
+ (0xFF58, 'M', 'x'),
+ (0xFF59, 'M', 'y'),
+ (0xFF5A, 'M', 'z'),
+ (0xFF5B, '3', '{'),
+ (0xFF5C, '3', '|'),
+ (0xFF5D, '3', '}'),
+ (0xFF5E, '3', '~'),
+ (0xFF5F, 'M', '⦅'),
+ (0xFF60, 'M', '⦆'),
+ (0xFF61, 'M', '.'),
+ (0xFF62, 'M', '「'),
+ (0xFF63, 'M', '」'),
+ (0xFF64, 'M', '、'),
+ (0xFF65, 'M', '・'),
+ (0xFF66, 'M', 'ヲ'),
+ (0xFF67, 'M', 'ァ'),
+ (0xFF68, 'M', 'ィ'),
+ (0xFF69, 'M', 'ゥ'),
+ (0xFF6A, 'M', 'ェ'),
+ (0xFF6B, 'M', 'ォ'),
+ (0xFF6C, 'M', 'ャ'),
+ (0xFF6D, 'M', 'ュ'),
+ (0xFF6E, 'M', 'ョ'),
+ (0xFF6F, 'M', 'ッ'),
+ (0xFF70, 'M', 'ー'),
+ (0xFF71, 'M', 'ア'),
+ (0xFF72, 'M', 'イ'),
+ (0xFF73, 'M', 'ウ'),
+ (0xFF74, 'M', 'エ'),
+ (0xFF75, 'M', 'オ'),
+ (0xFF76, 'M', 'カ'),
+ (0xFF77, 'M', 'キ'),
+ (0xFF78, 'M', 'ク'),
+ (0xFF79, 'M', 'ケ'),
+ (0xFF7A, 'M', 'コ'),
+ (0xFF7B, 'M', 'サ'),
+ (0xFF7C, 'M', 'シ'),
+ (0xFF7D, 'M', 'ス'),
+ (0xFF7E, 'M', 'セ'),
+ ]
+
+def _seg_52() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xFF7F, 'M', 'ソ'),
+ (0xFF80, 'M', 'タ'),
+ (0xFF81, 'M', 'チ'),
+ (0xFF82, 'M', 'ツ'),
+ (0xFF83, 'M', 'テ'),
+ (0xFF84, 'M', 'ト'),
+ (0xFF85, 'M', 'ナ'),
+ (0xFF86, 'M', 'ニ'),
+ (0xFF87, 'M', 'ヌ'),
+ (0xFF88, 'M', 'ネ'),
+ (0xFF89, 'M', 'ノ'),
+ (0xFF8A, 'M', 'ハ'),
+ (0xFF8B, 'M', 'ヒ'),
+ (0xFF8C, 'M', 'フ'),
+ (0xFF8D, 'M', 'ヘ'),
+ (0xFF8E, 'M', 'ホ'),
+ (0xFF8F, 'M', 'マ'),
+ (0xFF90, 'M', 'ミ'),
+ (0xFF91, 'M', 'ム'),
+ (0xFF92, 'M', 'メ'),
+ (0xFF93, 'M', 'モ'),
+ (0xFF94, 'M', 'ヤ'),
+ (0xFF95, 'M', 'ユ'),
+ (0xFF96, 'M', 'ヨ'),
+ (0xFF97, 'M', 'ラ'),
+ (0xFF98, 'M', 'リ'),
+ (0xFF99, 'M', 'ル'),
+ (0xFF9A, 'M', 'レ'),
+ (0xFF9B, 'M', 'ロ'),
+ (0xFF9C, 'M', 'ワ'),
+ (0xFF9D, 'M', 'ン'),
+ (0xFF9E, 'M', '゙'),
+ (0xFF9F, 'M', '゚'),
+ (0xFFA0, 'X'),
+ (0xFFA1, 'M', 'ᄀ'),
+ (0xFFA2, 'M', 'ᄁ'),
+ (0xFFA3, 'M', 'ᆪ'),
+ (0xFFA4, 'M', 'ᄂ'),
+ (0xFFA5, 'M', 'ᆬ'),
+ (0xFFA6, 'M', 'ᆭ'),
+ (0xFFA7, 'M', 'ᄃ'),
+ (0xFFA8, 'M', 'ᄄ'),
+ (0xFFA9, 'M', 'ᄅ'),
+ (0xFFAA, 'M', 'ᆰ'),
+ (0xFFAB, 'M', 'ᆱ'),
+ (0xFFAC, 'M', 'ᆲ'),
+ (0xFFAD, 'M', 'ᆳ'),
+ (0xFFAE, 'M', 'ᆴ'),
+ (0xFFAF, 'M', 'ᆵ'),
+ (0xFFB0, 'M', 'ᄚ'),
+ (0xFFB1, 'M', 'ᄆ'),
+ (0xFFB2, 'M', 'ᄇ'),
+ (0xFFB3, 'M', 'ᄈ'),
+ (0xFFB4, 'M', 'ᄡ'),
+ (0xFFB5, 'M', 'ᄉ'),
+ (0xFFB6, 'M', 'ᄊ'),
+ (0xFFB7, 'M', 'ᄋ'),
+ (0xFFB8, 'M', 'ᄌ'),
+ (0xFFB9, 'M', 'ᄍ'),
+ (0xFFBA, 'M', 'ᄎ'),
+ (0xFFBB, 'M', 'ᄏ'),
+ (0xFFBC, 'M', 'ᄐ'),
+ (0xFFBD, 'M', 'ᄑ'),
+ (0xFFBE, 'M', 'ᄒ'),
+ (0xFFBF, 'X'),
+ (0xFFC2, 'M', 'ᅡ'),
+ (0xFFC3, 'M', 'ᅢ'),
+ (0xFFC4, 'M', 'ᅣ'),
+ (0xFFC5, 'M', 'ᅤ'),
+ (0xFFC6, 'M', 'ᅥ'),
+ (0xFFC7, 'M', 'ᅦ'),
+ (0xFFC8, 'X'),
+ (0xFFCA, 'M', 'ᅧ'),
+ (0xFFCB, 'M', 'ᅨ'),
+ (0xFFCC, 'M', 'ᅩ'),
+ (0xFFCD, 'M', 'ᅪ'),
+ (0xFFCE, 'M', 'ᅫ'),
+ (0xFFCF, 'M', 'ᅬ'),
+ (0xFFD0, 'X'),
+ (0xFFD2, 'M', 'ᅭ'),
+ (0xFFD3, 'M', 'ᅮ'),
+ (0xFFD4, 'M', 'ᅯ'),
+ (0xFFD5, 'M', 'ᅰ'),
+ (0xFFD6, 'M', 'ᅱ'),
+ (0xFFD7, 'M', 'ᅲ'),
+ (0xFFD8, 'X'),
+ (0xFFDA, 'M', 'ᅳ'),
+ (0xFFDB, 'M', 'ᅴ'),
+ (0xFFDC, 'M', 'ᅵ'),
+ (0xFFDD, 'X'),
+ (0xFFE0, 'M', '¢'),
+ (0xFFE1, 'M', '£'),
+ (0xFFE2, 'M', '¬'),
+ (0xFFE3, '3', ' ̄'),
+ (0xFFE4, 'M', '¦'),
+ (0xFFE5, 'M', '¥'),
+ (0xFFE6, 'M', '₩'),
+ (0xFFE7, 'X'),
+ (0xFFE8, 'M', '│'),
+ (0xFFE9, 'M', '←'),
+ ]
+
+def _seg_53() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0xFFEA, 'M', '↑'),
+ (0xFFEB, 'M', '→'),
+ (0xFFEC, 'M', '↓'),
+ (0xFFED, 'M', '■'),
+ (0xFFEE, 'M', '○'),
+ (0xFFEF, 'X'),
+ (0x10000, 'V'),
+ (0x1000C, 'X'),
+ (0x1000D, 'V'),
+ (0x10027, 'X'),
+ (0x10028, 'V'),
+ (0x1003B, 'X'),
+ (0x1003C, 'V'),
+ (0x1003E, 'X'),
+ (0x1003F, 'V'),
+ (0x1004E, 'X'),
+ (0x10050, 'V'),
+ (0x1005E, 'X'),
+ (0x10080, 'V'),
+ (0x100FB, 'X'),
+ (0x10100, 'V'),
+ (0x10103, 'X'),
+ (0x10107, 'V'),
+ (0x10134, 'X'),
+ (0x10137, 'V'),
+ (0x1018F, 'X'),
+ (0x10190, 'V'),
+ (0x1019D, 'X'),
+ (0x101A0, 'V'),
+ (0x101A1, 'X'),
+ (0x101D0, 'V'),
+ (0x101FE, 'X'),
+ (0x10280, 'V'),
+ (0x1029D, 'X'),
+ (0x102A0, 'V'),
+ (0x102D1, 'X'),
+ (0x102E0, 'V'),
+ (0x102FC, 'X'),
+ (0x10300, 'V'),
+ (0x10324, 'X'),
+ (0x1032D, 'V'),
+ (0x1034B, 'X'),
+ (0x10350, 'V'),
+ (0x1037B, 'X'),
+ (0x10380, 'V'),
+ (0x1039E, 'X'),
+ (0x1039F, 'V'),
+ (0x103C4, 'X'),
+ (0x103C8, 'V'),
+ (0x103D6, 'X'),
+ (0x10400, 'M', '𐐨'),
+ (0x10401, 'M', '𐐩'),
+ (0x10402, 'M', '𐐪'),
+ (0x10403, 'M', '𐐫'),
+ (0x10404, 'M', '𐐬'),
+ (0x10405, 'M', '𐐭'),
+ (0x10406, 'M', '𐐮'),
+ (0x10407, 'M', '𐐯'),
+ (0x10408, 'M', '𐐰'),
+ (0x10409, 'M', '𐐱'),
+ (0x1040A, 'M', '𐐲'),
+ (0x1040B, 'M', '𐐳'),
+ (0x1040C, 'M', '𐐴'),
+ (0x1040D, 'M', '𐐵'),
+ (0x1040E, 'M', '𐐶'),
+ (0x1040F, 'M', '𐐷'),
+ (0x10410, 'M', '𐐸'),
+ (0x10411, 'M', '𐐹'),
+ (0x10412, 'M', '𐐺'),
+ (0x10413, 'M', '𐐻'),
+ (0x10414, 'M', '𐐼'),
+ (0x10415, 'M', '𐐽'),
+ (0x10416, 'M', '𐐾'),
+ (0x10417, 'M', '𐐿'),
+ (0x10418, 'M', '𐑀'),
+ (0x10419, 'M', '𐑁'),
+ (0x1041A, 'M', '𐑂'),
+ (0x1041B, 'M', '𐑃'),
+ (0x1041C, 'M', '𐑄'),
+ (0x1041D, 'M', '𐑅'),
+ (0x1041E, 'M', '𐑆'),
+ (0x1041F, 'M', '𐑇'),
+ (0x10420, 'M', '𐑈'),
+ (0x10421, 'M', '𐑉'),
+ (0x10422, 'M', '𐑊'),
+ (0x10423, 'M', '𐑋'),
+ (0x10424, 'M', '𐑌'),
+ (0x10425, 'M', '𐑍'),
+ (0x10426, 'M', '𐑎'),
+ (0x10427, 'M', '𐑏'),
+ (0x10428, 'V'),
+ (0x1049E, 'X'),
+ (0x104A0, 'V'),
+ (0x104AA, 'X'),
+ (0x104B0, 'M', '𐓘'),
+ (0x104B1, 'M', '𐓙'),
+ (0x104B2, 'M', '𐓚'),
+ (0x104B3, 'M', '𐓛'),
+ (0x104B4, 'M', '𐓜'),
+ (0x104B5, 'M', '𐓝'),
+ ]
+
+def _seg_54() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x104B6, 'M', '𐓞'),
+ (0x104B7, 'M', '𐓟'),
+ (0x104B8, 'M', '𐓠'),
+ (0x104B9, 'M', '𐓡'),
+ (0x104BA, 'M', '𐓢'),
+ (0x104BB, 'M', '𐓣'),
+ (0x104BC, 'M', '𐓤'),
+ (0x104BD, 'M', '𐓥'),
+ (0x104BE, 'M', '𐓦'),
+ (0x104BF, 'M', '𐓧'),
+ (0x104C0, 'M', '𐓨'),
+ (0x104C1, 'M', '𐓩'),
+ (0x104C2, 'M', '𐓪'),
+ (0x104C3, 'M', '𐓫'),
+ (0x104C4, 'M', '𐓬'),
+ (0x104C5, 'M', '𐓭'),
+ (0x104C6, 'M', '𐓮'),
+ (0x104C7, 'M', '𐓯'),
+ (0x104C8, 'M', '𐓰'),
+ (0x104C9, 'M', '𐓱'),
+ (0x104CA, 'M', '𐓲'),
+ (0x104CB, 'M', '𐓳'),
+ (0x104CC, 'M', '𐓴'),
+ (0x104CD, 'M', '𐓵'),
+ (0x104CE, 'M', '𐓶'),
+ (0x104CF, 'M', '𐓷'),
+ (0x104D0, 'M', '𐓸'),
+ (0x104D1, 'M', '𐓹'),
+ (0x104D2, 'M', '𐓺'),
+ (0x104D3, 'M', '𐓻'),
+ (0x104D4, 'X'),
+ (0x104D8, 'V'),
+ (0x104FC, 'X'),
+ (0x10500, 'V'),
+ (0x10528, 'X'),
+ (0x10530, 'V'),
+ (0x10564, 'X'),
+ (0x1056F, 'V'),
+ (0x10570, 'M', '𐖗'),
+ (0x10571, 'M', '𐖘'),
+ (0x10572, 'M', '𐖙'),
+ (0x10573, 'M', '𐖚'),
+ (0x10574, 'M', '𐖛'),
+ (0x10575, 'M', '𐖜'),
+ (0x10576, 'M', '𐖝'),
+ (0x10577, 'M', '𐖞'),
+ (0x10578, 'M', '𐖟'),
+ (0x10579, 'M', '𐖠'),
+ (0x1057A, 'M', '𐖡'),
+ (0x1057B, 'X'),
+ (0x1057C, 'M', '𐖣'),
+ (0x1057D, 'M', '𐖤'),
+ (0x1057E, 'M', '𐖥'),
+ (0x1057F, 'M', '𐖦'),
+ (0x10580, 'M', '𐖧'),
+ (0x10581, 'M', '𐖨'),
+ (0x10582, 'M', '𐖩'),
+ (0x10583, 'M', '𐖪'),
+ (0x10584, 'M', '𐖫'),
+ (0x10585, 'M', '𐖬'),
+ (0x10586, 'M', '𐖭'),
+ (0x10587, 'M', '𐖮'),
+ (0x10588, 'M', '𐖯'),
+ (0x10589, 'M', '𐖰'),
+ (0x1058A, 'M', '𐖱'),
+ (0x1058B, 'X'),
+ (0x1058C, 'M', '𐖳'),
+ (0x1058D, 'M', '𐖴'),
+ (0x1058E, 'M', '𐖵'),
+ (0x1058F, 'M', '𐖶'),
+ (0x10590, 'M', '𐖷'),
+ (0x10591, 'M', '𐖸'),
+ (0x10592, 'M', '𐖹'),
+ (0x10593, 'X'),
+ (0x10594, 'M', '𐖻'),
+ (0x10595, 'M', '𐖼'),
+ (0x10596, 'X'),
+ (0x10597, 'V'),
+ (0x105A2, 'X'),
+ (0x105A3, 'V'),
+ (0x105B2, 'X'),
+ (0x105B3, 'V'),
+ (0x105BA, 'X'),
+ (0x105BB, 'V'),
+ (0x105BD, 'X'),
+ (0x10600, 'V'),
+ (0x10737, 'X'),
+ (0x10740, 'V'),
+ (0x10756, 'X'),
+ (0x10760, 'V'),
+ (0x10768, 'X'),
+ (0x10780, 'V'),
+ (0x10781, 'M', 'ː'),
+ (0x10782, 'M', 'ˑ'),
+ (0x10783, 'M', 'æ'),
+ (0x10784, 'M', 'ʙ'),
+ (0x10785, 'M', 'ɓ'),
+ (0x10786, 'X'),
+ (0x10787, 'M', 'ʣ'),
+ (0x10788, 'M', 'ꭦ'),
+ ]
+
+def _seg_55() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x10789, 'M', 'ʥ'),
+ (0x1078A, 'M', 'ʤ'),
+ (0x1078B, 'M', 'ɖ'),
+ (0x1078C, 'M', 'ɗ'),
+ (0x1078D, 'M', 'ᶑ'),
+ (0x1078E, 'M', 'ɘ'),
+ (0x1078F, 'M', 'ɞ'),
+ (0x10790, 'M', 'ʩ'),
+ (0x10791, 'M', 'ɤ'),
+ (0x10792, 'M', 'ɢ'),
+ (0x10793, 'M', 'ɠ'),
+ (0x10794, 'M', 'ʛ'),
+ (0x10795, 'M', 'ħ'),
+ (0x10796, 'M', 'ʜ'),
+ (0x10797, 'M', 'ɧ'),
+ (0x10798, 'M', 'ʄ'),
+ (0x10799, 'M', 'ʪ'),
+ (0x1079A, 'M', 'ʫ'),
+ (0x1079B, 'M', 'ɬ'),
+ (0x1079C, 'M', '𝼄'),
+ (0x1079D, 'M', 'ꞎ'),
+ (0x1079E, 'M', 'ɮ'),
+ (0x1079F, 'M', '𝼅'),
+ (0x107A0, 'M', 'ʎ'),
+ (0x107A1, 'M', '𝼆'),
+ (0x107A2, 'M', 'ø'),
+ (0x107A3, 'M', 'ɶ'),
+ (0x107A4, 'M', 'ɷ'),
+ (0x107A5, 'M', 'q'),
+ (0x107A6, 'M', 'ɺ'),
+ (0x107A7, 'M', '𝼈'),
+ (0x107A8, 'M', 'ɽ'),
+ (0x107A9, 'M', 'ɾ'),
+ (0x107AA, 'M', 'ʀ'),
+ (0x107AB, 'M', 'ʨ'),
+ (0x107AC, 'M', 'ʦ'),
+ (0x107AD, 'M', 'ꭧ'),
+ (0x107AE, 'M', 'ʧ'),
+ (0x107AF, 'M', 'ʈ'),
+ (0x107B0, 'M', 'ⱱ'),
+ (0x107B1, 'X'),
+ (0x107B2, 'M', 'ʏ'),
+ (0x107B3, 'M', 'ʡ'),
+ (0x107B4, 'M', 'ʢ'),
+ (0x107B5, 'M', 'ʘ'),
+ (0x107B6, 'M', 'ǀ'),
+ (0x107B7, 'M', 'ǁ'),
+ (0x107B8, 'M', 'ǂ'),
+ (0x107B9, 'M', '𝼊'),
+ (0x107BA, 'M', '𝼞'),
+ (0x107BB, 'X'),
+ (0x10800, 'V'),
+ (0x10806, 'X'),
+ (0x10808, 'V'),
+ (0x10809, 'X'),
+ (0x1080A, 'V'),
+ (0x10836, 'X'),
+ (0x10837, 'V'),
+ (0x10839, 'X'),
+ (0x1083C, 'V'),
+ (0x1083D, 'X'),
+ (0x1083F, 'V'),
+ (0x10856, 'X'),
+ (0x10857, 'V'),
+ (0x1089F, 'X'),
+ (0x108A7, 'V'),
+ (0x108B0, 'X'),
+ (0x108E0, 'V'),
+ (0x108F3, 'X'),
+ (0x108F4, 'V'),
+ (0x108F6, 'X'),
+ (0x108FB, 'V'),
+ (0x1091C, 'X'),
+ (0x1091F, 'V'),
+ (0x1093A, 'X'),
+ (0x1093F, 'V'),
+ (0x10940, 'X'),
+ (0x10980, 'V'),
+ (0x109B8, 'X'),
+ (0x109BC, 'V'),
+ (0x109D0, 'X'),
+ (0x109D2, 'V'),
+ (0x10A04, 'X'),
+ (0x10A05, 'V'),
+ (0x10A07, 'X'),
+ (0x10A0C, 'V'),
+ (0x10A14, 'X'),
+ (0x10A15, 'V'),
+ (0x10A18, 'X'),
+ (0x10A19, 'V'),
+ (0x10A36, 'X'),
+ (0x10A38, 'V'),
+ (0x10A3B, 'X'),
+ (0x10A3F, 'V'),
+ (0x10A49, 'X'),
+ (0x10A50, 'V'),
+ (0x10A59, 'X'),
+ (0x10A60, 'V'),
+ (0x10AA0, 'X'),
+ (0x10AC0, 'V'),
+ ]
+
+def _seg_56() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x10AE7, 'X'),
+ (0x10AEB, 'V'),
+ (0x10AF7, 'X'),
+ (0x10B00, 'V'),
+ (0x10B36, 'X'),
+ (0x10B39, 'V'),
+ (0x10B56, 'X'),
+ (0x10B58, 'V'),
+ (0x10B73, 'X'),
+ (0x10B78, 'V'),
+ (0x10B92, 'X'),
+ (0x10B99, 'V'),
+ (0x10B9D, 'X'),
+ (0x10BA9, 'V'),
+ (0x10BB0, 'X'),
+ (0x10C00, 'V'),
+ (0x10C49, 'X'),
+ (0x10C80, 'M', '𐳀'),
+ (0x10C81, 'M', '𐳁'),
+ (0x10C82, 'M', '𐳂'),
+ (0x10C83, 'M', '𐳃'),
+ (0x10C84, 'M', '𐳄'),
+ (0x10C85, 'M', '𐳅'),
+ (0x10C86, 'M', '𐳆'),
+ (0x10C87, 'M', '𐳇'),
+ (0x10C88, 'M', '𐳈'),
+ (0x10C89, 'M', '𐳉'),
+ (0x10C8A, 'M', '𐳊'),
+ (0x10C8B, 'M', '𐳋'),
+ (0x10C8C, 'M', '𐳌'),
+ (0x10C8D, 'M', '𐳍'),
+ (0x10C8E, 'M', '𐳎'),
+ (0x10C8F, 'M', '𐳏'),
+ (0x10C90, 'M', '𐳐'),
+ (0x10C91, 'M', '𐳑'),
+ (0x10C92, 'M', '𐳒'),
+ (0x10C93, 'M', '𐳓'),
+ (0x10C94, 'M', '𐳔'),
+ (0x10C95, 'M', '𐳕'),
+ (0x10C96, 'M', '𐳖'),
+ (0x10C97, 'M', '𐳗'),
+ (0x10C98, 'M', '𐳘'),
+ (0x10C99, 'M', '𐳙'),
+ (0x10C9A, 'M', '𐳚'),
+ (0x10C9B, 'M', '𐳛'),
+ (0x10C9C, 'M', '𐳜'),
+ (0x10C9D, 'M', '𐳝'),
+ (0x10C9E, 'M', '𐳞'),
+ (0x10C9F, 'M', '𐳟'),
+ (0x10CA0, 'M', '𐳠'),
+ (0x10CA1, 'M', '𐳡'),
+ (0x10CA2, 'M', '𐳢'),
+ (0x10CA3, 'M', '𐳣'),
+ (0x10CA4, 'M', '𐳤'),
+ (0x10CA5, 'M', '𐳥'),
+ (0x10CA6, 'M', '𐳦'),
+ (0x10CA7, 'M', '𐳧'),
+ (0x10CA8, 'M', '𐳨'),
+ (0x10CA9, 'M', '𐳩'),
+ (0x10CAA, 'M', '𐳪'),
+ (0x10CAB, 'M', '𐳫'),
+ (0x10CAC, 'M', '𐳬'),
+ (0x10CAD, 'M', '𐳭'),
+ (0x10CAE, 'M', '𐳮'),
+ (0x10CAF, 'M', '𐳯'),
+ (0x10CB0, 'M', '𐳰'),
+ (0x10CB1, 'M', '𐳱'),
+ (0x10CB2, 'M', '𐳲'),
+ (0x10CB3, 'X'),
+ (0x10CC0, 'V'),
+ (0x10CF3, 'X'),
+ (0x10CFA, 'V'),
+ (0x10D28, 'X'),
+ (0x10D30, 'V'),
+ (0x10D3A, 'X'),
+ (0x10E60, 'V'),
+ (0x10E7F, 'X'),
+ (0x10E80, 'V'),
+ (0x10EAA, 'X'),
+ (0x10EAB, 'V'),
+ (0x10EAE, 'X'),
+ (0x10EB0, 'V'),
+ (0x10EB2, 'X'),
+ (0x10EFD, 'V'),
+ (0x10F28, 'X'),
+ (0x10F30, 'V'),
+ (0x10F5A, 'X'),
+ (0x10F70, 'V'),
+ (0x10F8A, 'X'),
+ (0x10FB0, 'V'),
+ (0x10FCC, 'X'),
+ (0x10FE0, 'V'),
+ (0x10FF7, 'X'),
+ (0x11000, 'V'),
+ (0x1104E, 'X'),
+ (0x11052, 'V'),
+ (0x11076, 'X'),
+ (0x1107F, 'V'),
+ (0x110BD, 'X'),
+ (0x110BE, 'V'),
+ ]
+
+def _seg_57() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x110C3, 'X'),
+ (0x110D0, 'V'),
+ (0x110E9, 'X'),
+ (0x110F0, 'V'),
+ (0x110FA, 'X'),
+ (0x11100, 'V'),
+ (0x11135, 'X'),
+ (0x11136, 'V'),
+ (0x11148, 'X'),
+ (0x11150, 'V'),
+ (0x11177, 'X'),
+ (0x11180, 'V'),
+ (0x111E0, 'X'),
+ (0x111E1, 'V'),
+ (0x111F5, 'X'),
+ (0x11200, 'V'),
+ (0x11212, 'X'),
+ (0x11213, 'V'),
+ (0x11242, 'X'),
+ (0x11280, 'V'),
+ (0x11287, 'X'),
+ (0x11288, 'V'),
+ (0x11289, 'X'),
+ (0x1128A, 'V'),
+ (0x1128E, 'X'),
+ (0x1128F, 'V'),
+ (0x1129E, 'X'),
+ (0x1129F, 'V'),
+ (0x112AA, 'X'),
+ (0x112B0, 'V'),
+ (0x112EB, 'X'),
+ (0x112F0, 'V'),
+ (0x112FA, 'X'),
+ (0x11300, 'V'),
+ (0x11304, 'X'),
+ (0x11305, 'V'),
+ (0x1130D, 'X'),
+ (0x1130F, 'V'),
+ (0x11311, 'X'),
+ (0x11313, 'V'),
+ (0x11329, 'X'),
+ (0x1132A, 'V'),
+ (0x11331, 'X'),
+ (0x11332, 'V'),
+ (0x11334, 'X'),
+ (0x11335, 'V'),
+ (0x1133A, 'X'),
+ (0x1133B, 'V'),
+ (0x11345, 'X'),
+ (0x11347, 'V'),
+ (0x11349, 'X'),
+ (0x1134B, 'V'),
+ (0x1134E, 'X'),
+ (0x11350, 'V'),
+ (0x11351, 'X'),
+ (0x11357, 'V'),
+ (0x11358, 'X'),
+ (0x1135D, 'V'),
+ (0x11364, 'X'),
+ (0x11366, 'V'),
+ (0x1136D, 'X'),
+ (0x11370, 'V'),
+ (0x11375, 'X'),
+ (0x11400, 'V'),
+ (0x1145C, 'X'),
+ (0x1145D, 'V'),
+ (0x11462, 'X'),
+ (0x11480, 'V'),
+ (0x114C8, 'X'),
+ (0x114D0, 'V'),
+ (0x114DA, 'X'),
+ (0x11580, 'V'),
+ (0x115B6, 'X'),
+ (0x115B8, 'V'),
+ (0x115DE, 'X'),
+ (0x11600, 'V'),
+ (0x11645, 'X'),
+ (0x11650, 'V'),
+ (0x1165A, 'X'),
+ (0x11660, 'V'),
+ (0x1166D, 'X'),
+ (0x11680, 'V'),
+ (0x116BA, 'X'),
+ (0x116C0, 'V'),
+ (0x116CA, 'X'),
+ (0x11700, 'V'),
+ (0x1171B, 'X'),
+ (0x1171D, 'V'),
+ (0x1172C, 'X'),
+ (0x11730, 'V'),
+ (0x11747, 'X'),
+ (0x11800, 'V'),
+ (0x1183C, 'X'),
+ (0x118A0, 'M', '𑣀'),
+ (0x118A1, 'M', '𑣁'),
+ (0x118A2, 'M', '𑣂'),
+ (0x118A3, 'M', '𑣃'),
+ (0x118A4, 'M', '𑣄'),
+ (0x118A5, 'M', '𑣅'),
+ (0x118A6, 'M', '𑣆'),
+ ]
+
+def _seg_58() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x118A7, 'M', '𑣇'),
+ (0x118A8, 'M', '𑣈'),
+ (0x118A9, 'M', '𑣉'),
+ (0x118AA, 'M', '𑣊'),
+ (0x118AB, 'M', '𑣋'),
+ (0x118AC, 'M', '𑣌'),
+ (0x118AD, 'M', '𑣍'),
+ (0x118AE, 'M', '𑣎'),
+ (0x118AF, 'M', '𑣏'),
+ (0x118B0, 'M', '𑣐'),
+ (0x118B1, 'M', '𑣑'),
+ (0x118B2, 'M', '𑣒'),
+ (0x118B3, 'M', '𑣓'),
+ (0x118B4, 'M', '𑣔'),
+ (0x118B5, 'M', '𑣕'),
+ (0x118B6, 'M', '𑣖'),
+ (0x118B7, 'M', '𑣗'),
+ (0x118B8, 'M', '𑣘'),
+ (0x118B9, 'M', '𑣙'),
+ (0x118BA, 'M', '𑣚'),
+ (0x118BB, 'M', '𑣛'),
+ (0x118BC, 'M', '𑣜'),
+ (0x118BD, 'M', '𑣝'),
+ (0x118BE, 'M', '𑣞'),
+ (0x118BF, 'M', '𑣟'),
+ (0x118C0, 'V'),
+ (0x118F3, 'X'),
+ (0x118FF, 'V'),
+ (0x11907, 'X'),
+ (0x11909, 'V'),
+ (0x1190A, 'X'),
+ (0x1190C, 'V'),
+ (0x11914, 'X'),
+ (0x11915, 'V'),
+ (0x11917, 'X'),
+ (0x11918, 'V'),
+ (0x11936, 'X'),
+ (0x11937, 'V'),
+ (0x11939, 'X'),
+ (0x1193B, 'V'),
+ (0x11947, 'X'),
+ (0x11950, 'V'),
+ (0x1195A, 'X'),
+ (0x119A0, 'V'),
+ (0x119A8, 'X'),
+ (0x119AA, 'V'),
+ (0x119D8, 'X'),
+ (0x119DA, 'V'),
+ (0x119E5, 'X'),
+ (0x11A00, 'V'),
+ (0x11A48, 'X'),
+ (0x11A50, 'V'),
+ (0x11AA3, 'X'),
+ (0x11AB0, 'V'),
+ (0x11AF9, 'X'),
+ (0x11B00, 'V'),
+ (0x11B0A, 'X'),
+ (0x11C00, 'V'),
+ (0x11C09, 'X'),
+ (0x11C0A, 'V'),
+ (0x11C37, 'X'),
+ (0x11C38, 'V'),
+ (0x11C46, 'X'),
+ (0x11C50, 'V'),
+ (0x11C6D, 'X'),
+ (0x11C70, 'V'),
+ (0x11C90, 'X'),
+ (0x11C92, 'V'),
+ (0x11CA8, 'X'),
+ (0x11CA9, 'V'),
+ (0x11CB7, 'X'),
+ (0x11D00, 'V'),
+ (0x11D07, 'X'),
+ (0x11D08, 'V'),
+ (0x11D0A, 'X'),
+ (0x11D0B, 'V'),
+ (0x11D37, 'X'),
+ (0x11D3A, 'V'),
+ (0x11D3B, 'X'),
+ (0x11D3C, 'V'),
+ (0x11D3E, 'X'),
+ (0x11D3F, 'V'),
+ (0x11D48, 'X'),
+ (0x11D50, 'V'),
+ (0x11D5A, 'X'),
+ (0x11D60, 'V'),
+ (0x11D66, 'X'),
+ (0x11D67, 'V'),
+ (0x11D69, 'X'),
+ (0x11D6A, 'V'),
+ (0x11D8F, 'X'),
+ (0x11D90, 'V'),
+ (0x11D92, 'X'),
+ (0x11D93, 'V'),
+ (0x11D99, 'X'),
+ (0x11DA0, 'V'),
+ (0x11DAA, 'X'),
+ (0x11EE0, 'V'),
+ (0x11EF9, 'X'),
+ (0x11F00, 'V'),
+ ]
+
+def _seg_59() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x11F11, 'X'),
+ (0x11F12, 'V'),
+ (0x11F3B, 'X'),
+ (0x11F3E, 'V'),
+ (0x11F5A, 'X'),
+ (0x11FB0, 'V'),
+ (0x11FB1, 'X'),
+ (0x11FC0, 'V'),
+ (0x11FF2, 'X'),
+ (0x11FFF, 'V'),
+ (0x1239A, 'X'),
+ (0x12400, 'V'),
+ (0x1246F, 'X'),
+ (0x12470, 'V'),
+ (0x12475, 'X'),
+ (0x12480, 'V'),
+ (0x12544, 'X'),
+ (0x12F90, 'V'),
+ (0x12FF3, 'X'),
+ (0x13000, 'V'),
+ (0x13430, 'X'),
+ (0x13440, 'V'),
+ (0x13456, 'X'),
+ (0x14400, 'V'),
+ (0x14647, 'X'),
+ (0x16800, 'V'),
+ (0x16A39, 'X'),
+ (0x16A40, 'V'),
+ (0x16A5F, 'X'),
+ (0x16A60, 'V'),
+ (0x16A6A, 'X'),
+ (0x16A6E, 'V'),
+ (0x16ABF, 'X'),
+ (0x16AC0, 'V'),
+ (0x16ACA, 'X'),
+ (0x16AD0, 'V'),
+ (0x16AEE, 'X'),
+ (0x16AF0, 'V'),
+ (0x16AF6, 'X'),
+ (0x16B00, 'V'),
+ (0x16B46, 'X'),
+ (0x16B50, 'V'),
+ (0x16B5A, 'X'),
+ (0x16B5B, 'V'),
+ (0x16B62, 'X'),
+ (0x16B63, 'V'),
+ (0x16B78, 'X'),
+ (0x16B7D, 'V'),
+ (0x16B90, 'X'),
+ (0x16E40, 'M', '𖹠'),
+ (0x16E41, 'M', '𖹡'),
+ (0x16E42, 'M', '𖹢'),
+ (0x16E43, 'M', '𖹣'),
+ (0x16E44, 'M', '𖹤'),
+ (0x16E45, 'M', '𖹥'),
+ (0x16E46, 'M', '𖹦'),
+ (0x16E47, 'M', '𖹧'),
+ (0x16E48, 'M', '𖹨'),
+ (0x16E49, 'M', '𖹩'),
+ (0x16E4A, 'M', '𖹪'),
+ (0x16E4B, 'M', '𖹫'),
+ (0x16E4C, 'M', '𖹬'),
+ (0x16E4D, 'M', '𖹭'),
+ (0x16E4E, 'M', '𖹮'),
+ (0x16E4F, 'M', '𖹯'),
+ (0x16E50, 'M', '𖹰'),
+ (0x16E51, 'M', '𖹱'),
+ (0x16E52, 'M', '𖹲'),
+ (0x16E53, 'M', '𖹳'),
+ (0x16E54, 'M', '𖹴'),
+ (0x16E55, 'M', '𖹵'),
+ (0x16E56, 'M', '𖹶'),
+ (0x16E57, 'M', '𖹷'),
+ (0x16E58, 'M', '𖹸'),
+ (0x16E59, 'M', '𖹹'),
+ (0x16E5A, 'M', '𖹺'),
+ (0x16E5B, 'M', '𖹻'),
+ (0x16E5C, 'M', '𖹼'),
+ (0x16E5D, 'M', '𖹽'),
+ (0x16E5E, 'M', '𖹾'),
+ (0x16E5F, 'M', '𖹿'),
+ (0x16E60, 'V'),
+ (0x16E9B, 'X'),
+ (0x16F00, 'V'),
+ (0x16F4B, 'X'),
+ (0x16F4F, 'V'),
+ (0x16F88, 'X'),
+ (0x16F8F, 'V'),
+ (0x16FA0, 'X'),
+ (0x16FE0, 'V'),
+ (0x16FE5, 'X'),
+ (0x16FF0, 'V'),
+ (0x16FF2, 'X'),
+ (0x17000, 'V'),
+ (0x187F8, 'X'),
+ (0x18800, 'V'),
+ (0x18CD6, 'X'),
+ (0x18D00, 'V'),
+ (0x18D09, 'X'),
+ (0x1AFF0, 'V'),
+ ]
+
+def _seg_60() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1AFF4, 'X'),
+ (0x1AFF5, 'V'),
+ (0x1AFFC, 'X'),
+ (0x1AFFD, 'V'),
+ (0x1AFFF, 'X'),
+ (0x1B000, 'V'),
+ (0x1B123, 'X'),
+ (0x1B132, 'V'),
+ (0x1B133, 'X'),
+ (0x1B150, 'V'),
+ (0x1B153, 'X'),
+ (0x1B155, 'V'),
+ (0x1B156, 'X'),
+ (0x1B164, 'V'),
+ (0x1B168, 'X'),
+ (0x1B170, 'V'),
+ (0x1B2FC, 'X'),
+ (0x1BC00, 'V'),
+ (0x1BC6B, 'X'),
+ (0x1BC70, 'V'),
+ (0x1BC7D, 'X'),
+ (0x1BC80, 'V'),
+ (0x1BC89, 'X'),
+ (0x1BC90, 'V'),
+ (0x1BC9A, 'X'),
+ (0x1BC9C, 'V'),
+ (0x1BCA0, 'I'),
+ (0x1BCA4, 'X'),
+ (0x1CF00, 'V'),
+ (0x1CF2E, 'X'),
+ (0x1CF30, 'V'),
+ (0x1CF47, 'X'),
+ (0x1CF50, 'V'),
+ (0x1CFC4, 'X'),
+ (0x1D000, 'V'),
+ (0x1D0F6, 'X'),
+ (0x1D100, 'V'),
+ (0x1D127, 'X'),
+ (0x1D129, 'V'),
+ (0x1D15E, 'M', '𝅗𝅥'),
+ (0x1D15F, 'M', '𝅘𝅥'),
+ (0x1D160, 'M', '𝅘𝅥𝅮'),
+ (0x1D161, 'M', '𝅘𝅥𝅯'),
+ (0x1D162, 'M', '𝅘𝅥𝅰'),
+ (0x1D163, 'M', '𝅘𝅥𝅱'),
+ (0x1D164, 'M', '𝅘𝅥𝅲'),
+ (0x1D165, 'V'),
+ (0x1D173, 'X'),
+ (0x1D17B, 'V'),
+ (0x1D1BB, 'M', '𝆹𝅥'),
+ (0x1D1BC, 'M', '𝆺𝅥'),
+ (0x1D1BD, 'M', '𝆹𝅥𝅮'),
+ (0x1D1BE, 'M', '𝆺𝅥𝅮'),
+ (0x1D1BF, 'M', '𝆹𝅥𝅯'),
+ (0x1D1C0, 'M', '𝆺𝅥𝅯'),
+ (0x1D1C1, 'V'),
+ (0x1D1EB, 'X'),
+ (0x1D200, 'V'),
+ (0x1D246, 'X'),
+ (0x1D2C0, 'V'),
+ (0x1D2D4, 'X'),
+ (0x1D2E0, 'V'),
+ (0x1D2F4, 'X'),
+ (0x1D300, 'V'),
+ (0x1D357, 'X'),
+ (0x1D360, 'V'),
+ (0x1D379, 'X'),
+ (0x1D400, 'M', 'a'),
+ (0x1D401, 'M', 'b'),
+ (0x1D402, 'M', 'c'),
+ (0x1D403, 'M', 'd'),
+ (0x1D404, 'M', 'e'),
+ (0x1D405, 'M', 'f'),
+ (0x1D406, 'M', 'g'),
+ (0x1D407, 'M', 'h'),
+ (0x1D408, 'M', 'i'),
+ (0x1D409, 'M', 'j'),
+ (0x1D40A, 'M', 'k'),
+ (0x1D40B, 'M', 'l'),
+ (0x1D40C, 'M', 'm'),
+ (0x1D40D, 'M', 'n'),
+ (0x1D40E, 'M', 'o'),
+ (0x1D40F, 'M', 'p'),
+ (0x1D410, 'M', 'q'),
+ (0x1D411, 'M', 'r'),
+ (0x1D412, 'M', 's'),
+ (0x1D413, 'M', 't'),
+ (0x1D414, 'M', 'u'),
+ (0x1D415, 'M', 'v'),
+ (0x1D416, 'M', 'w'),
+ (0x1D417, 'M', 'x'),
+ (0x1D418, 'M', 'y'),
+ (0x1D419, 'M', 'z'),
+ (0x1D41A, 'M', 'a'),
+ (0x1D41B, 'M', 'b'),
+ (0x1D41C, 'M', 'c'),
+ (0x1D41D, 'M', 'd'),
+ (0x1D41E, 'M', 'e'),
+ (0x1D41F, 'M', 'f'),
+ (0x1D420, 'M', 'g'),
+ ]
+
+def _seg_61() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1D421, 'M', 'h'),
+ (0x1D422, 'M', 'i'),
+ (0x1D423, 'M', 'j'),
+ (0x1D424, 'M', 'k'),
+ (0x1D425, 'M', 'l'),
+ (0x1D426, 'M', 'm'),
+ (0x1D427, 'M', 'n'),
+ (0x1D428, 'M', 'o'),
+ (0x1D429, 'M', 'p'),
+ (0x1D42A, 'M', 'q'),
+ (0x1D42B, 'M', 'r'),
+ (0x1D42C, 'M', 's'),
+ (0x1D42D, 'M', 't'),
+ (0x1D42E, 'M', 'u'),
+ (0x1D42F, 'M', 'v'),
+ (0x1D430, 'M', 'w'),
+ (0x1D431, 'M', 'x'),
+ (0x1D432, 'M', 'y'),
+ (0x1D433, 'M', 'z'),
+ (0x1D434, 'M', 'a'),
+ (0x1D435, 'M', 'b'),
+ (0x1D436, 'M', 'c'),
+ (0x1D437, 'M', 'd'),
+ (0x1D438, 'M', 'e'),
+ (0x1D439, 'M', 'f'),
+ (0x1D43A, 'M', 'g'),
+ (0x1D43B, 'M', 'h'),
+ (0x1D43C, 'M', 'i'),
+ (0x1D43D, 'M', 'j'),
+ (0x1D43E, 'M', 'k'),
+ (0x1D43F, 'M', 'l'),
+ (0x1D440, 'M', 'm'),
+ (0x1D441, 'M', 'n'),
+ (0x1D442, 'M', 'o'),
+ (0x1D443, 'M', 'p'),
+ (0x1D444, 'M', 'q'),
+ (0x1D445, 'M', 'r'),
+ (0x1D446, 'M', 's'),
+ (0x1D447, 'M', 't'),
+ (0x1D448, 'M', 'u'),
+ (0x1D449, 'M', 'v'),
+ (0x1D44A, 'M', 'w'),
+ (0x1D44B, 'M', 'x'),
+ (0x1D44C, 'M', 'y'),
+ (0x1D44D, 'M', 'z'),
+ (0x1D44E, 'M', 'a'),
+ (0x1D44F, 'M', 'b'),
+ (0x1D450, 'M', 'c'),
+ (0x1D451, 'M', 'd'),
+ (0x1D452, 'M', 'e'),
+ (0x1D453, 'M', 'f'),
+ (0x1D454, 'M', 'g'),
+ (0x1D455, 'X'),
+ (0x1D456, 'M', 'i'),
+ (0x1D457, 'M', 'j'),
+ (0x1D458, 'M', 'k'),
+ (0x1D459, 'M', 'l'),
+ (0x1D45A, 'M', 'm'),
+ (0x1D45B, 'M', 'n'),
+ (0x1D45C, 'M', 'o'),
+ (0x1D45D, 'M', 'p'),
+ (0x1D45E, 'M', 'q'),
+ (0x1D45F, 'M', 'r'),
+ (0x1D460, 'M', 's'),
+ (0x1D461, 'M', 't'),
+ (0x1D462, 'M', 'u'),
+ (0x1D463, 'M', 'v'),
+ (0x1D464, 'M', 'w'),
+ (0x1D465, 'M', 'x'),
+ (0x1D466, 'M', 'y'),
+ (0x1D467, 'M', 'z'),
+ (0x1D468, 'M', 'a'),
+ (0x1D469, 'M', 'b'),
+ (0x1D46A, 'M', 'c'),
+ (0x1D46B, 'M', 'd'),
+ (0x1D46C, 'M', 'e'),
+ (0x1D46D, 'M', 'f'),
+ (0x1D46E, 'M', 'g'),
+ (0x1D46F, 'M', 'h'),
+ (0x1D470, 'M', 'i'),
+ (0x1D471, 'M', 'j'),
+ (0x1D472, 'M', 'k'),
+ (0x1D473, 'M', 'l'),
+ (0x1D474, 'M', 'm'),
+ (0x1D475, 'M', 'n'),
+ (0x1D476, 'M', 'o'),
+ (0x1D477, 'M', 'p'),
+ (0x1D478, 'M', 'q'),
+ (0x1D479, 'M', 'r'),
+ (0x1D47A, 'M', 's'),
+ (0x1D47B, 'M', 't'),
+ (0x1D47C, 'M', 'u'),
+ (0x1D47D, 'M', 'v'),
+ (0x1D47E, 'M', 'w'),
+ (0x1D47F, 'M', 'x'),
+ (0x1D480, 'M', 'y'),
+ (0x1D481, 'M', 'z'),
+ (0x1D482, 'M', 'a'),
+ (0x1D483, 'M', 'b'),
+ (0x1D484, 'M', 'c'),
+ ]
+
+def _seg_62() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1D485, 'M', 'd'),
+ (0x1D486, 'M', 'e'),
+ (0x1D487, 'M', 'f'),
+ (0x1D488, 'M', 'g'),
+ (0x1D489, 'M', 'h'),
+ (0x1D48A, 'M', 'i'),
+ (0x1D48B, 'M', 'j'),
+ (0x1D48C, 'M', 'k'),
+ (0x1D48D, 'M', 'l'),
+ (0x1D48E, 'M', 'm'),
+ (0x1D48F, 'M', 'n'),
+ (0x1D490, 'M', 'o'),
+ (0x1D491, 'M', 'p'),
+ (0x1D492, 'M', 'q'),
+ (0x1D493, 'M', 'r'),
+ (0x1D494, 'M', 's'),
+ (0x1D495, 'M', 't'),
+ (0x1D496, 'M', 'u'),
+ (0x1D497, 'M', 'v'),
+ (0x1D498, 'M', 'w'),
+ (0x1D499, 'M', 'x'),
+ (0x1D49A, 'M', 'y'),
+ (0x1D49B, 'M', 'z'),
+ (0x1D49C, 'M', 'a'),
+ (0x1D49D, 'X'),
+ (0x1D49E, 'M', 'c'),
+ (0x1D49F, 'M', 'd'),
+ (0x1D4A0, 'X'),
+ (0x1D4A2, 'M', 'g'),
+ (0x1D4A3, 'X'),
+ (0x1D4A5, 'M', 'j'),
+ (0x1D4A6, 'M', 'k'),
+ (0x1D4A7, 'X'),
+ (0x1D4A9, 'M', 'n'),
+ (0x1D4AA, 'M', 'o'),
+ (0x1D4AB, 'M', 'p'),
+ (0x1D4AC, 'M', 'q'),
+ (0x1D4AD, 'X'),
+ (0x1D4AE, 'M', 's'),
+ (0x1D4AF, 'M', 't'),
+ (0x1D4B0, 'M', 'u'),
+ (0x1D4B1, 'M', 'v'),
+ (0x1D4B2, 'M', 'w'),
+ (0x1D4B3, 'M', 'x'),
+ (0x1D4B4, 'M', 'y'),
+ (0x1D4B5, 'M', 'z'),
+ (0x1D4B6, 'M', 'a'),
+ (0x1D4B7, 'M', 'b'),
+ (0x1D4B8, 'M', 'c'),
+ (0x1D4B9, 'M', 'd'),
+ (0x1D4BA, 'X'),
+ (0x1D4BB, 'M', 'f'),
+ (0x1D4BC, 'X'),
+ (0x1D4BD, 'M', 'h'),
+ (0x1D4BE, 'M', 'i'),
+ (0x1D4BF, 'M', 'j'),
+ (0x1D4C0, 'M', 'k'),
+ (0x1D4C1, 'M', 'l'),
+ (0x1D4C2, 'M', 'm'),
+ (0x1D4C3, 'M', 'n'),
+ (0x1D4C4, 'X'),
+ (0x1D4C5, 'M', 'p'),
+ (0x1D4C6, 'M', 'q'),
+ (0x1D4C7, 'M', 'r'),
+ (0x1D4C8, 'M', 's'),
+ (0x1D4C9, 'M', 't'),
+ (0x1D4CA, 'M', 'u'),
+ (0x1D4CB, 'M', 'v'),
+ (0x1D4CC, 'M', 'w'),
+ (0x1D4CD, 'M', 'x'),
+ (0x1D4CE, 'M', 'y'),
+ (0x1D4CF, 'M', 'z'),
+ (0x1D4D0, 'M', 'a'),
+ (0x1D4D1, 'M', 'b'),
+ (0x1D4D2, 'M', 'c'),
+ (0x1D4D3, 'M', 'd'),
+ (0x1D4D4, 'M', 'e'),
+ (0x1D4D5, 'M', 'f'),
+ (0x1D4D6, 'M', 'g'),
+ (0x1D4D7, 'M', 'h'),
+ (0x1D4D8, 'M', 'i'),
+ (0x1D4D9, 'M', 'j'),
+ (0x1D4DA, 'M', 'k'),
+ (0x1D4DB, 'M', 'l'),
+ (0x1D4DC, 'M', 'm'),
+ (0x1D4DD, 'M', 'n'),
+ (0x1D4DE, 'M', 'o'),
+ (0x1D4DF, 'M', 'p'),
+ (0x1D4E0, 'M', 'q'),
+ (0x1D4E1, 'M', 'r'),
+ (0x1D4E2, 'M', 's'),
+ (0x1D4E3, 'M', 't'),
+ (0x1D4E4, 'M', 'u'),
+ (0x1D4E5, 'M', 'v'),
+ (0x1D4E6, 'M', 'w'),
+ (0x1D4E7, 'M', 'x'),
+ (0x1D4E8, 'M', 'y'),
+ (0x1D4E9, 'M', 'z'),
+ (0x1D4EA, 'M', 'a'),
+ (0x1D4EB, 'M', 'b'),
+ ]
+
+def _seg_63() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1D4EC, 'M', 'c'),
+ (0x1D4ED, 'M', 'd'),
+ (0x1D4EE, 'M', 'e'),
+ (0x1D4EF, 'M', 'f'),
+ (0x1D4F0, 'M', 'g'),
+ (0x1D4F1, 'M', 'h'),
+ (0x1D4F2, 'M', 'i'),
+ (0x1D4F3, 'M', 'j'),
+ (0x1D4F4, 'M', 'k'),
+ (0x1D4F5, 'M', 'l'),
+ (0x1D4F6, 'M', 'm'),
+ (0x1D4F7, 'M', 'n'),
+ (0x1D4F8, 'M', 'o'),
+ (0x1D4F9, 'M', 'p'),
+ (0x1D4FA, 'M', 'q'),
+ (0x1D4FB, 'M', 'r'),
+ (0x1D4FC, 'M', 's'),
+ (0x1D4FD, 'M', 't'),
+ (0x1D4FE, 'M', 'u'),
+ (0x1D4FF, 'M', 'v'),
+ (0x1D500, 'M', 'w'),
+ (0x1D501, 'M', 'x'),
+ (0x1D502, 'M', 'y'),
+ (0x1D503, 'M', 'z'),
+ (0x1D504, 'M', 'a'),
+ (0x1D505, 'M', 'b'),
+ (0x1D506, 'X'),
+ (0x1D507, 'M', 'd'),
+ (0x1D508, 'M', 'e'),
+ (0x1D509, 'M', 'f'),
+ (0x1D50A, 'M', 'g'),
+ (0x1D50B, 'X'),
+ (0x1D50D, 'M', 'j'),
+ (0x1D50E, 'M', 'k'),
+ (0x1D50F, 'M', 'l'),
+ (0x1D510, 'M', 'm'),
+ (0x1D511, 'M', 'n'),
+ (0x1D512, 'M', 'o'),
+ (0x1D513, 'M', 'p'),
+ (0x1D514, 'M', 'q'),
+ (0x1D515, 'X'),
+ (0x1D516, 'M', 's'),
+ (0x1D517, 'M', 't'),
+ (0x1D518, 'M', 'u'),
+ (0x1D519, 'M', 'v'),
+ (0x1D51A, 'M', 'w'),
+ (0x1D51B, 'M', 'x'),
+ (0x1D51C, 'M', 'y'),
+ (0x1D51D, 'X'),
+ (0x1D51E, 'M', 'a'),
+ (0x1D51F, 'M', 'b'),
+ (0x1D520, 'M', 'c'),
+ (0x1D521, 'M', 'd'),
+ (0x1D522, 'M', 'e'),
+ (0x1D523, 'M', 'f'),
+ (0x1D524, 'M', 'g'),
+ (0x1D525, 'M', 'h'),
+ (0x1D526, 'M', 'i'),
+ (0x1D527, 'M', 'j'),
+ (0x1D528, 'M', 'k'),
+ (0x1D529, 'M', 'l'),
+ (0x1D52A, 'M', 'm'),
+ (0x1D52B, 'M', 'n'),
+ (0x1D52C, 'M', 'o'),
+ (0x1D52D, 'M', 'p'),
+ (0x1D52E, 'M', 'q'),
+ (0x1D52F, 'M', 'r'),
+ (0x1D530, 'M', 's'),
+ (0x1D531, 'M', 't'),
+ (0x1D532, 'M', 'u'),
+ (0x1D533, 'M', 'v'),
+ (0x1D534, 'M', 'w'),
+ (0x1D535, 'M', 'x'),
+ (0x1D536, 'M', 'y'),
+ (0x1D537, 'M', 'z'),
+ (0x1D538, 'M', 'a'),
+ (0x1D539, 'M', 'b'),
+ (0x1D53A, 'X'),
+ (0x1D53B, 'M', 'd'),
+ (0x1D53C, 'M', 'e'),
+ (0x1D53D, 'M', 'f'),
+ (0x1D53E, 'M', 'g'),
+ (0x1D53F, 'X'),
+ (0x1D540, 'M', 'i'),
+ (0x1D541, 'M', 'j'),
+ (0x1D542, 'M', 'k'),
+ (0x1D543, 'M', 'l'),
+ (0x1D544, 'M', 'm'),
+ (0x1D545, 'X'),
+ (0x1D546, 'M', 'o'),
+ (0x1D547, 'X'),
+ (0x1D54A, 'M', 's'),
+ (0x1D54B, 'M', 't'),
+ (0x1D54C, 'M', 'u'),
+ (0x1D54D, 'M', 'v'),
+ (0x1D54E, 'M', 'w'),
+ (0x1D54F, 'M', 'x'),
+ (0x1D550, 'M', 'y'),
+ (0x1D551, 'X'),
+ (0x1D552, 'M', 'a'),
+ ]
+
+def _seg_64() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1D553, 'M', 'b'),
+ (0x1D554, 'M', 'c'),
+ (0x1D555, 'M', 'd'),
+ (0x1D556, 'M', 'e'),
+ (0x1D557, 'M', 'f'),
+ (0x1D558, 'M', 'g'),
+ (0x1D559, 'M', 'h'),
+ (0x1D55A, 'M', 'i'),
+ (0x1D55B, 'M', 'j'),
+ (0x1D55C, 'M', 'k'),
+ (0x1D55D, 'M', 'l'),
+ (0x1D55E, 'M', 'm'),
+ (0x1D55F, 'M', 'n'),
+ (0x1D560, 'M', 'o'),
+ (0x1D561, 'M', 'p'),
+ (0x1D562, 'M', 'q'),
+ (0x1D563, 'M', 'r'),
+ (0x1D564, 'M', 's'),
+ (0x1D565, 'M', 't'),
+ (0x1D566, 'M', 'u'),
+ (0x1D567, 'M', 'v'),
+ (0x1D568, 'M', 'w'),
+ (0x1D569, 'M', 'x'),
+ (0x1D56A, 'M', 'y'),
+ (0x1D56B, 'M', 'z'),
+ (0x1D56C, 'M', 'a'),
+ (0x1D56D, 'M', 'b'),
+ (0x1D56E, 'M', 'c'),
+ (0x1D56F, 'M', 'd'),
+ (0x1D570, 'M', 'e'),
+ (0x1D571, 'M', 'f'),
+ (0x1D572, 'M', 'g'),
+ (0x1D573, 'M', 'h'),
+ (0x1D574, 'M', 'i'),
+ (0x1D575, 'M', 'j'),
+ (0x1D576, 'M', 'k'),
+ (0x1D577, 'M', 'l'),
+ (0x1D578, 'M', 'm'),
+ (0x1D579, 'M', 'n'),
+ (0x1D57A, 'M', 'o'),
+ (0x1D57B, 'M', 'p'),
+ (0x1D57C, 'M', 'q'),
+ (0x1D57D, 'M', 'r'),
+ (0x1D57E, 'M', 's'),
+ (0x1D57F, 'M', 't'),
+ (0x1D580, 'M', 'u'),
+ (0x1D581, 'M', 'v'),
+ (0x1D582, 'M', 'w'),
+ (0x1D583, 'M', 'x'),
+ (0x1D584, 'M', 'y'),
+ (0x1D585, 'M', 'z'),
+ (0x1D586, 'M', 'a'),
+ (0x1D587, 'M', 'b'),
+ (0x1D588, 'M', 'c'),
+ (0x1D589, 'M', 'd'),
+ (0x1D58A, 'M', 'e'),
+ (0x1D58B, 'M', 'f'),
+ (0x1D58C, 'M', 'g'),
+ (0x1D58D, 'M', 'h'),
+ (0x1D58E, 'M', 'i'),
+ (0x1D58F, 'M', 'j'),
+ (0x1D590, 'M', 'k'),
+ (0x1D591, 'M', 'l'),
+ (0x1D592, 'M', 'm'),
+ (0x1D593, 'M', 'n'),
+ (0x1D594, 'M', 'o'),
+ (0x1D595, 'M', 'p'),
+ (0x1D596, 'M', 'q'),
+ (0x1D597, 'M', 'r'),
+ (0x1D598, 'M', 's'),
+ (0x1D599, 'M', 't'),
+ (0x1D59A, 'M', 'u'),
+ (0x1D59B, 'M', 'v'),
+ (0x1D59C, 'M', 'w'),
+ (0x1D59D, 'M', 'x'),
+ (0x1D59E, 'M', 'y'),
+ (0x1D59F, 'M', 'z'),
+ (0x1D5A0, 'M', 'a'),
+ (0x1D5A1, 'M', 'b'),
+ (0x1D5A2, 'M', 'c'),
+ (0x1D5A3, 'M', 'd'),
+ (0x1D5A4, 'M', 'e'),
+ (0x1D5A5, 'M', 'f'),
+ (0x1D5A6, 'M', 'g'),
+ (0x1D5A7, 'M', 'h'),
+ (0x1D5A8, 'M', 'i'),
+ (0x1D5A9, 'M', 'j'),
+ (0x1D5AA, 'M', 'k'),
+ (0x1D5AB, 'M', 'l'),
+ (0x1D5AC, 'M', 'm'),
+ (0x1D5AD, 'M', 'n'),
+ (0x1D5AE, 'M', 'o'),
+ (0x1D5AF, 'M', 'p'),
+ (0x1D5B0, 'M', 'q'),
+ (0x1D5B1, 'M', 'r'),
+ (0x1D5B2, 'M', 's'),
+ (0x1D5B3, 'M', 't'),
+ (0x1D5B4, 'M', 'u'),
+ (0x1D5B5, 'M', 'v'),
+ (0x1D5B6, 'M', 'w'),
+ ]
+
+def _seg_65() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1D5B7, 'M', 'x'),
+ (0x1D5B8, 'M', 'y'),
+ (0x1D5B9, 'M', 'z'),
+ (0x1D5BA, 'M', 'a'),
+ (0x1D5BB, 'M', 'b'),
+ (0x1D5BC, 'M', 'c'),
+ (0x1D5BD, 'M', 'd'),
+ (0x1D5BE, 'M', 'e'),
+ (0x1D5BF, 'M', 'f'),
+ (0x1D5C0, 'M', 'g'),
+ (0x1D5C1, 'M', 'h'),
+ (0x1D5C2, 'M', 'i'),
+ (0x1D5C3, 'M', 'j'),
+ (0x1D5C4, 'M', 'k'),
+ (0x1D5C5, 'M', 'l'),
+ (0x1D5C6, 'M', 'm'),
+ (0x1D5C7, 'M', 'n'),
+ (0x1D5C8, 'M', 'o'),
+ (0x1D5C9, 'M', 'p'),
+ (0x1D5CA, 'M', 'q'),
+ (0x1D5CB, 'M', 'r'),
+ (0x1D5CC, 'M', 's'),
+ (0x1D5CD, 'M', 't'),
+ (0x1D5CE, 'M', 'u'),
+ (0x1D5CF, 'M', 'v'),
+ (0x1D5D0, 'M', 'w'),
+ (0x1D5D1, 'M', 'x'),
+ (0x1D5D2, 'M', 'y'),
+ (0x1D5D3, 'M', 'z'),
+ (0x1D5D4, 'M', 'a'),
+ (0x1D5D5, 'M', 'b'),
+ (0x1D5D6, 'M', 'c'),
+ (0x1D5D7, 'M', 'd'),
+ (0x1D5D8, 'M', 'e'),
+ (0x1D5D9, 'M', 'f'),
+ (0x1D5DA, 'M', 'g'),
+ (0x1D5DB, 'M', 'h'),
+ (0x1D5DC, 'M', 'i'),
+ (0x1D5DD, 'M', 'j'),
+ (0x1D5DE, 'M', 'k'),
+ (0x1D5DF, 'M', 'l'),
+ (0x1D5E0, 'M', 'm'),
+ (0x1D5E1, 'M', 'n'),
+ (0x1D5E2, 'M', 'o'),
+ (0x1D5E3, 'M', 'p'),
+ (0x1D5E4, 'M', 'q'),
+ (0x1D5E5, 'M', 'r'),
+ (0x1D5E6, 'M', 's'),
+ (0x1D5E7, 'M', 't'),
+ (0x1D5E8, 'M', 'u'),
+ (0x1D5E9, 'M', 'v'),
+ (0x1D5EA, 'M', 'w'),
+ (0x1D5EB, 'M', 'x'),
+ (0x1D5EC, 'M', 'y'),
+ (0x1D5ED, 'M', 'z'),
+ (0x1D5EE, 'M', 'a'),
+ (0x1D5EF, 'M', 'b'),
+ (0x1D5F0, 'M', 'c'),
+ (0x1D5F1, 'M', 'd'),
+ (0x1D5F2, 'M', 'e'),
+ (0x1D5F3, 'M', 'f'),
+ (0x1D5F4, 'M', 'g'),
+ (0x1D5F5, 'M', 'h'),
+ (0x1D5F6, 'M', 'i'),
+ (0x1D5F7, 'M', 'j'),
+ (0x1D5F8, 'M', 'k'),
+ (0x1D5F9, 'M', 'l'),
+ (0x1D5FA, 'M', 'm'),
+ (0x1D5FB, 'M', 'n'),
+ (0x1D5FC, 'M', 'o'),
+ (0x1D5FD, 'M', 'p'),
+ (0x1D5FE, 'M', 'q'),
+ (0x1D5FF, 'M', 'r'),
+ (0x1D600, 'M', 's'),
+ (0x1D601, 'M', 't'),
+ (0x1D602, 'M', 'u'),
+ (0x1D603, 'M', 'v'),
+ (0x1D604, 'M', 'w'),
+ (0x1D605, 'M', 'x'),
+ (0x1D606, 'M', 'y'),
+ (0x1D607, 'M', 'z'),
+ (0x1D608, 'M', 'a'),
+ (0x1D609, 'M', 'b'),
+ (0x1D60A, 'M', 'c'),
+ (0x1D60B, 'M', 'd'),
+ (0x1D60C, 'M', 'e'),
+ (0x1D60D, 'M', 'f'),
+ (0x1D60E, 'M', 'g'),
+ (0x1D60F, 'M', 'h'),
+ (0x1D610, 'M', 'i'),
+ (0x1D611, 'M', 'j'),
+ (0x1D612, 'M', 'k'),
+ (0x1D613, 'M', 'l'),
+ (0x1D614, 'M', 'm'),
+ (0x1D615, 'M', 'n'),
+ (0x1D616, 'M', 'o'),
+ (0x1D617, 'M', 'p'),
+ (0x1D618, 'M', 'q'),
+ (0x1D619, 'M', 'r'),
+ (0x1D61A, 'M', 's'),
+ ]
+
+def _seg_66() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1D61B, 'M', 't'),
+ (0x1D61C, 'M', 'u'),
+ (0x1D61D, 'M', 'v'),
+ (0x1D61E, 'M', 'w'),
+ (0x1D61F, 'M', 'x'),
+ (0x1D620, 'M', 'y'),
+ (0x1D621, 'M', 'z'),
+ (0x1D622, 'M', 'a'),
+ (0x1D623, 'M', 'b'),
+ (0x1D624, 'M', 'c'),
+ (0x1D625, 'M', 'd'),
+ (0x1D626, 'M', 'e'),
+ (0x1D627, 'M', 'f'),
+ (0x1D628, 'M', 'g'),
+ (0x1D629, 'M', 'h'),
+ (0x1D62A, 'M', 'i'),
+ (0x1D62B, 'M', 'j'),
+ (0x1D62C, 'M', 'k'),
+ (0x1D62D, 'M', 'l'),
+ (0x1D62E, 'M', 'm'),
+ (0x1D62F, 'M', 'n'),
+ (0x1D630, 'M', 'o'),
+ (0x1D631, 'M', 'p'),
+ (0x1D632, 'M', 'q'),
+ (0x1D633, 'M', 'r'),
+ (0x1D634, 'M', 's'),
+ (0x1D635, 'M', 't'),
+ (0x1D636, 'M', 'u'),
+ (0x1D637, 'M', 'v'),
+ (0x1D638, 'M', 'w'),
+ (0x1D639, 'M', 'x'),
+ (0x1D63A, 'M', 'y'),
+ (0x1D63B, 'M', 'z'),
+ (0x1D63C, 'M', 'a'),
+ (0x1D63D, 'M', 'b'),
+ (0x1D63E, 'M', 'c'),
+ (0x1D63F, 'M', 'd'),
+ (0x1D640, 'M', 'e'),
+ (0x1D641, 'M', 'f'),
+ (0x1D642, 'M', 'g'),
+ (0x1D643, 'M', 'h'),
+ (0x1D644, 'M', 'i'),
+ (0x1D645, 'M', 'j'),
+ (0x1D646, 'M', 'k'),
+ (0x1D647, 'M', 'l'),
+ (0x1D648, 'M', 'm'),
+ (0x1D649, 'M', 'n'),
+ (0x1D64A, 'M', 'o'),
+ (0x1D64B, 'M', 'p'),
+ (0x1D64C, 'M', 'q'),
+ (0x1D64D, 'M', 'r'),
+ (0x1D64E, 'M', 's'),
+ (0x1D64F, 'M', 't'),
+ (0x1D650, 'M', 'u'),
+ (0x1D651, 'M', 'v'),
+ (0x1D652, 'M', 'w'),
+ (0x1D653, 'M', 'x'),
+ (0x1D654, 'M', 'y'),
+ (0x1D655, 'M', 'z'),
+ (0x1D656, 'M', 'a'),
+ (0x1D657, 'M', 'b'),
+ (0x1D658, 'M', 'c'),
+ (0x1D659, 'M', 'd'),
+ (0x1D65A, 'M', 'e'),
+ (0x1D65B, 'M', 'f'),
+ (0x1D65C, 'M', 'g'),
+ (0x1D65D, 'M', 'h'),
+ (0x1D65E, 'M', 'i'),
+ (0x1D65F, 'M', 'j'),
+ (0x1D660, 'M', 'k'),
+ (0x1D661, 'M', 'l'),
+ (0x1D662, 'M', 'm'),
+ (0x1D663, 'M', 'n'),
+ (0x1D664, 'M', 'o'),
+ (0x1D665, 'M', 'p'),
+ (0x1D666, 'M', 'q'),
+ (0x1D667, 'M', 'r'),
+ (0x1D668, 'M', 's'),
+ (0x1D669, 'M', 't'),
+ (0x1D66A, 'M', 'u'),
+ (0x1D66B, 'M', 'v'),
+ (0x1D66C, 'M', 'w'),
+ (0x1D66D, 'M', 'x'),
+ (0x1D66E, 'M', 'y'),
+ (0x1D66F, 'M', 'z'),
+ (0x1D670, 'M', 'a'),
+ (0x1D671, 'M', 'b'),
+ (0x1D672, 'M', 'c'),
+ (0x1D673, 'M', 'd'),
+ (0x1D674, 'M', 'e'),
+ (0x1D675, 'M', 'f'),
+ (0x1D676, 'M', 'g'),
+ (0x1D677, 'M', 'h'),
+ (0x1D678, 'M', 'i'),
+ (0x1D679, 'M', 'j'),
+ (0x1D67A, 'M', 'k'),
+ (0x1D67B, 'M', 'l'),
+ (0x1D67C, 'M', 'm'),
+ (0x1D67D, 'M', 'n'),
+ (0x1D67E, 'M', 'o'),
+ ]
+
+def _seg_67() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1D67F, 'M', 'p'),
+ (0x1D680, 'M', 'q'),
+ (0x1D681, 'M', 'r'),
+ (0x1D682, 'M', 's'),
+ (0x1D683, 'M', 't'),
+ (0x1D684, 'M', 'u'),
+ (0x1D685, 'M', 'v'),
+ (0x1D686, 'M', 'w'),
+ (0x1D687, 'M', 'x'),
+ (0x1D688, 'M', 'y'),
+ (0x1D689, 'M', 'z'),
+ (0x1D68A, 'M', 'a'),
+ (0x1D68B, 'M', 'b'),
+ (0x1D68C, 'M', 'c'),
+ (0x1D68D, 'M', 'd'),
+ (0x1D68E, 'M', 'e'),
+ (0x1D68F, 'M', 'f'),
+ (0x1D690, 'M', 'g'),
+ (0x1D691, 'M', 'h'),
+ (0x1D692, 'M', 'i'),
+ (0x1D693, 'M', 'j'),
+ (0x1D694, 'M', 'k'),
+ (0x1D695, 'M', 'l'),
+ (0x1D696, 'M', 'm'),
+ (0x1D697, 'M', 'n'),
+ (0x1D698, 'M', 'o'),
+ (0x1D699, 'M', 'p'),
+ (0x1D69A, 'M', 'q'),
+ (0x1D69B, 'M', 'r'),
+ (0x1D69C, 'M', 's'),
+ (0x1D69D, 'M', 't'),
+ (0x1D69E, 'M', 'u'),
+ (0x1D69F, 'M', 'v'),
+ (0x1D6A0, 'M', 'w'),
+ (0x1D6A1, 'M', 'x'),
+ (0x1D6A2, 'M', 'y'),
+ (0x1D6A3, 'M', 'z'),
+ (0x1D6A4, 'M', 'ı'),
+ (0x1D6A5, 'M', 'ȷ'),
+ (0x1D6A6, 'X'),
+ (0x1D6A8, 'M', 'α'),
+ (0x1D6A9, 'M', 'β'),
+ (0x1D6AA, 'M', 'γ'),
+ (0x1D6AB, 'M', 'δ'),
+ (0x1D6AC, 'M', 'ε'),
+ (0x1D6AD, 'M', 'ζ'),
+ (0x1D6AE, 'M', 'η'),
+ (0x1D6AF, 'M', 'θ'),
+ (0x1D6B0, 'M', 'ι'),
+ (0x1D6B1, 'M', 'κ'),
+ (0x1D6B2, 'M', 'λ'),
+ (0x1D6B3, 'M', 'μ'),
+ (0x1D6B4, 'M', 'ν'),
+ (0x1D6B5, 'M', 'ξ'),
+ (0x1D6B6, 'M', 'ο'),
+ (0x1D6B7, 'M', 'π'),
+ (0x1D6B8, 'M', 'ρ'),
+ (0x1D6B9, 'M', 'θ'),
+ (0x1D6BA, 'M', 'σ'),
+ (0x1D6BB, 'M', 'τ'),
+ (0x1D6BC, 'M', 'υ'),
+ (0x1D6BD, 'M', 'φ'),
+ (0x1D6BE, 'M', 'χ'),
+ (0x1D6BF, 'M', 'ψ'),
+ (0x1D6C0, 'M', 'ω'),
+ (0x1D6C1, 'M', '∇'),
+ (0x1D6C2, 'M', 'α'),
+ (0x1D6C3, 'M', 'β'),
+ (0x1D6C4, 'M', 'γ'),
+ (0x1D6C5, 'M', 'δ'),
+ (0x1D6C6, 'M', 'ε'),
+ (0x1D6C7, 'M', 'ζ'),
+ (0x1D6C8, 'M', 'η'),
+ (0x1D6C9, 'M', 'θ'),
+ (0x1D6CA, 'M', 'ι'),
+ (0x1D6CB, 'M', 'κ'),
+ (0x1D6CC, 'M', 'λ'),
+ (0x1D6CD, 'M', 'μ'),
+ (0x1D6CE, 'M', 'ν'),
+ (0x1D6CF, 'M', 'ξ'),
+ (0x1D6D0, 'M', 'ο'),
+ (0x1D6D1, 'M', 'π'),
+ (0x1D6D2, 'M', 'ρ'),
+ (0x1D6D3, 'M', 'σ'),
+ (0x1D6D5, 'M', 'τ'),
+ (0x1D6D6, 'M', 'υ'),
+ (0x1D6D7, 'M', 'φ'),
+ (0x1D6D8, 'M', 'χ'),
+ (0x1D6D9, 'M', 'ψ'),
+ (0x1D6DA, 'M', 'ω'),
+ (0x1D6DB, 'M', '∂'),
+ (0x1D6DC, 'M', 'ε'),
+ (0x1D6DD, 'M', 'θ'),
+ (0x1D6DE, 'M', 'κ'),
+ (0x1D6DF, 'M', 'φ'),
+ (0x1D6E0, 'M', 'ρ'),
+ (0x1D6E1, 'M', 'π'),
+ (0x1D6E2, 'M', 'α'),
+ (0x1D6E3, 'M', 'β'),
+ (0x1D6E4, 'M', 'γ'),
+ ]
+
+def _seg_68() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1D6E5, 'M', 'δ'),
+ (0x1D6E6, 'M', 'ε'),
+ (0x1D6E7, 'M', 'ζ'),
+ (0x1D6E8, 'M', 'η'),
+ (0x1D6E9, 'M', 'θ'),
+ (0x1D6EA, 'M', 'ι'),
+ (0x1D6EB, 'M', 'κ'),
+ (0x1D6EC, 'M', 'λ'),
+ (0x1D6ED, 'M', 'μ'),
+ (0x1D6EE, 'M', 'ν'),
+ (0x1D6EF, 'M', 'ξ'),
+ (0x1D6F0, 'M', 'ο'),
+ (0x1D6F1, 'M', 'π'),
+ (0x1D6F2, 'M', 'ρ'),
+ (0x1D6F3, 'M', 'θ'),
+ (0x1D6F4, 'M', 'σ'),
+ (0x1D6F5, 'M', 'τ'),
+ (0x1D6F6, 'M', 'υ'),
+ (0x1D6F7, 'M', 'φ'),
+ (0x1D6F8, 'M', 'χ'),
+ (0x1D6F9, 'M', 'ψ'),
+ (0x1D6FA, 'M', 'ω'),
+ (0x1D6FB, 'M', '∇'),
+ (0x1D6FC, 'M', 'α'),
+ (0x1D6FD, 'M', 'β'),
+ (0x1D6FE, 'M', 'γ'),
+ (0x1D6FF, 'M', 'δ'),
+ (0x1D700, 'M', 'ε'),
+ (0x1D701, 'M', 'ζ'),
+ (0x1D702, 'M', 'η'),
+ (0x1D703, 'M', 'θ'),
+ (0x1D704, 'M', 'ι'),
+ (0x1D705, 'M', 'κ'),
+ (0x1D706, 'M', 'λ'),
+ (0x1D707, 'M', 'μ'),
+ (0x1D708, 'M', 'ν'),
+ (0x1D709, 'M', 'ξ'),
+ (0x1D70A, 'M', 'ο'),
+ (0x1D70B, 'M', 'π'),
+ (0x1D70C, 'M', 'ρ'),
+ (0x1D70D, 'M', 'σ'),
+ (0x1D70F, 'M', 'τ'),
+ (0x1D710, 'M', 'υ'),
+ (0x1D711, 'M', 'φ'),
+ (0x1D712, 'M', 'χ'),
+ (0x1D713, 'M', 'ψ'),
+ (0x1D714, 'M', 'ω'),
+ (0x1D715, 'M', '∂'),
+ (0x1D716, 'M', 'ε'),
+ (0x1D717, 'M', 'θ'),
+ (0x1D718, 'M', 'κ'),
+ (0x1D719, 'M', 'φ'),
+ (0x1D71A, 'M', 'ρ'),
+ (0x1D71B, 'M', 'π'),
+ (0x1D71C, 'M', 'α'),
+ (0x1D71D, 'M', 'β'),
+ (0x1D71E, 'M', 'γ'),
+ (0x1D71F, 'M', 'δ'),
+ (0x1D720, 'M', 'ε'),
+ (0x1D721, 'M', 'ζ'),
+ (0x1D722, 'M', 'η'),
+ (0x1D723, 'M', 'θ'),
+ (0x1D724, 'M', 'ι'),
+ (0x1D725, 'M', 'κ'),
+ (0x1D726, 'M', 'λ'),
+ (0x1D727, 'M', 'μ'),
+ (0x1D728, 'M', 'ν'),
+ (0x1D729, 'M', 'ξ'),
+ (0x1D72A, 'M', 'ο'),
+ (0x1D72B, 'M', 'π'),
+ (0x1D72C, 'M', 'ρ'),
+ (0x1D72D, 'M', 'θ'),
+ (0x1D72E, 'M', 'σ'),
+ (0x1D72F, 'M', 'τ'),
+ (0x1D730, 'M', 'υ'),
+ (0x1D731, 'M', 'φ'),
+ (0x1D732, 'M', 'χ'),
+ (0x1D733, 'M', 'ψ'),
+ (0x1D734, 'M', 'ω'),
+ (0x1D735, 'M', '∇'),
+ (0x1D736, 'M', 'α'),
+ (0x1D737, 'M', 'β'),
+ (0x1D738, 'M', 'γ'),
+ (0x1D739, 'M', 'δ'),
+ (0x1D73A, 'M', 'ε'),
+ (0x1D73B, 'M', 'ζ'),
+ (0x1D73C, 'M', 'η'),
+ (0x1D73D, 'M', 'θ'),
+ (0x1D73E, 'M', 'ι'),
+ (0x1D73F, 'M', 'κ'),
+ (0x1D740, 'M', 'λ'),
+ (0x1D741, 'M', 'μ'),
+ (0x1D742, 'M', 'ν'),
+ (0x1D743, 'M', 'ξ'),
+ (0x1D744, 'M', 'ο'),
+ (0x1D745, 'M', 'π'),
+ (0x1D746, 'M', 'ρ'),
+ (0x1D747, 'M', 'σ'),
+ (0x1D749, 'M', 'τ'),
+ (0x1D74A, 'M', 'υ'),
+ ]
+
+def _seg_69() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1D74B, 'M', 'φ'),
+ (0x1D74C, 'M', 'χ'),
+ (0x1D74D, 'M', 'ψ'),
+ (0x1D74E, 'M', 'ω'),
+ (0x1D74F, 'M', '∂'),
+ (0x1D750, 'M', 'ε'),
+ (0x1D751, 'M', 'θ'),
+ (0x1D752, 'M', 'κ'),
+ (0x1D753, 'M', 'φ'),
+ (0x1D754, 'M', 'ρ'),
+ (0x1D755, 'M', 'π'),
+ (0x1D756, 'M', 'α'),
+ (0x1D757, 'M', 'β'),
+ (0x1D758, 'M', 'γ'),
+ (0x1D759, 'M', 'δ'),
+ (0x1D75A, 'M', 'ε'),
+ (0x1D75B, 'M', 'ζ'),
+ (0x1D75C, 'M', 'η'),
+ (0x1D75D, 'M', 'θ'),
+ (0x1D75E, 'M', 'ι'),
+ (0x1D75F, 'M', 'κ'),
+ (0x1D760, 'M', 'λ'),
+ (0x1D761, 'M', 'μ'),
+ (0x1D762, 'M', 'ν'),
+ (0x1D763, 'M', 'ξ'),
+ (0x1D764, 'M', 'ο'),
+ (0x1D765, 'M', 'π'),
+ (0x1D766, 'M', 'ρ'),
+ (0x1D767, 'M', 'θ'),
+ (0x1D768, 'M', 'σ'),
+ (0x1D769, 'M', 'τ'),
+ (0x1D76A, 'M', 'υ'),
+ (0x1D76B, 'M', 'φ'),
+ (0x1D76C, 'M', 'χ'),
+ (0x1D76D, 'M', 'ψ'),
+ (0x1D76E, 'M', 'ω'),
+ (0x1D76F, 'M', '∇'),
+ (0x1D770, 'M', 'α'),
+ (0x1D771, 'M', 'β'),
+ (0x1D772, 'M', 'γ'),
+ (0x1D773, 'M', 'δ'),
+ (0x1D774, 'M', 'ε'),
+ (0x1D775, 'M', 'ζ'),
+ (0x1D776, 'M', 'η'),
+ (0x1D777, 'M', 'θ'),
+ (0x1D778, 'M', 'ι'),
+ (0x1D779, 'M', 'κ'),
+ (0x1D77A, 'M', 'λ'),
+ (0x1D77B, 'M', 'μ'),
+ (0x1D77C, 'M', 'ν'),
+ (0x1D77D, 'M', 'ξ'),
+ (0x1D77E, 'M', 'ο'),
+ (0x1D77F, 'M', 'π'),
+ (0x1D780, 'M', 'ρ'),
+ (0x1D781, 'M', 'σ'),
+ (0x1D783, 'M', 'τ'),
+ (0x1D784, 'M', 'υ'),
+ (0x1D785, 'M', 'φ'),
+ (0x1D786, 'M', 'χ'),
+ (0x1D787, 'M', 'ψ'),
+ (0x1D788, 'M', 'ω'),
+ (0x1D789, 'M', '∂'),
+ (0x1D78A, 'M', 'ε'),
+ (0x1D78B, 'M', 'θ'),
+ (0x1D78C, 'M', 'κ'),
+ (0x1D78D, 'M', 'φ'),
+ (0x1D78E, 'M', 'ρ'),
+ (0x1D78F, 'M', 'π'),
+ (0x1D790, 'M', 'α'),
+ (0x1D791, 'M', 'β'),
+ (0x1D792, 'M', 'γ'),
+ (0x1D793, 'M', 'δ'),
+ (0x1D794, 'M', 'ε'),
+ (0x1D795, 'M', 'ζ'),
+ (0x1D796, 'M', 'η'),
+ (0x1D797, 'M', 'θ'),
+ (0x1D798, 'M', 'ι'),
+ (0x1D799, 'M', 'κ'),
+ (0x1D79A, 'M', 'λ'),
+ (0x1D79B, 'M', 'μ'),
+ (0x1D79C, 'M', 'ν'),
+ (0x1D79D, 'M', 'ξ'),
+ (0x1D79E, 'M', 'ο'),
+ (0x1D79F, 'M', 'π'),
+ (0x1D7A0, 'M', 'ρ'),
+ (0x1D7A1, 'M', 'θ'),
+ (0x1D7A2, 'M', 'σ'),
+ (0x1D7A3, 'M', 'τ'),
+ (0x1D7A4, 'M', 'υ'),
+ (0x1D7A5, 'M', 'φ'),
+ (0x1D7A6, 'M', 'χ'),
+ (0x1D7A7, 'M', 'ψ'),
+ (0x1D7A8, 'M', 'ω'),
+ (0x1D7A9, 'M', '∇'),
+ (0x1D7AA, 'M', 'α'),
+ (0x1D7AB, 'M', 'β'),
+ (0x1D7AC, 'M', 'γ'),
+ (0x1D7AD, 'M', 'δ'),
+ (0x1D7AE, 'M', 'ε'),
+ (0x1D7AF, 'M', 'ζ'),
+ ]
+
+def _seg_70() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1D7B0, 'M', 'η'),
+ (0x1D7B1, 'M', 'θ'),
+ (0x1D7B2, 'M', 'ι'),
+ (0x1D7B3, 'M', 'κ'),
+ (0x1D7B4, 'M', 'λ'),
+ (0x1D7B5, 'M', 'μ'),
+ (0x1D7B6, 'M', 'ν'),
+ (0x1D7B7, 'M', 'ξ'),
+ (0x1D7B8, 'M', 'ο'),
+ (0x1D7B9, 'M', 'π'),
+ (0x1D7BA, 'M', 'ρ'),
+ (0x1D7BB, 'M', 'σ'),
+ (0x1D7BD, 'M', 'τ'),
+ (0x1D7BE, 'M', 'υ'),
+ (0x1D7BF, 'M', 'φ'),
+ (0x1D7C0, 'M', 'χ'),
+ (0x1D7C1, 'M', 'ψ'),
+ (0x1D7C2, 'M', 'ω'),
+ (0x1D7C3, 'M', '∂'),
+ (0x1D7C4, 'M', 'ε'),
+ (0x1D7C5, 'M', 'θ'),
+ (0x1D7C6, 'M', 'κ'),
+ (0x1D7C7, 'M', 'φ'),
+ (0x1D7C8, 'M', 'ρ'),
+ (0x1D7C9, 'M', 'π'),
+ (0x1D7CA, 'M', 'ϝ'),
+ (0x1D7CC, 'X'),
+ (0x1D7CE, 'M', '0'),
+ (0x1D7CF, 'M', '1'),
+ (0x1D7D0, 'M', '2'),
+ (0x1D7D1, 'M', '3'),
+ (0x1D7D2, 'M', '4'),
+ (0x1D7D3, 'M', '5'),
+ (0x1D7D4, 'M', '6'),
+ (0x1D7D5, 'M', '7'),
+ (0x1D7D6, 'M', '8'),
+ (0x1D7D7, 'M', '9'),
+ (0x1D7D8, 'M', '0'),
+ (0x1D7D9, 'M', '1'),
+ (0x1D7DA, 'M', '2'),
+ (0x1D7DB, 'M', '3'),
+ (0x1D7DC, 'M', '4'),
+ (0x1D7DD, 'M', '5'),
+ (0x1D7DE, 'M', '6'),
+ (0x1D7DF, 'M', '7'),
+ (0x1D7E0, 'M', '8'),
+ (0x1D7E1, 'M', '9'),
+ (0x1D7E2, 'M', '0'),
+ (0x1D7E3, 'M', '1'),
+ (0x1D7E4, 'M', '2'),
+ (0x1D7E5, 'M', '3'),
+ (0x1D7E6, 'M', '4'),
+ (0x1D7E7, 'M', '5'),
+ (0x1D7E8, 'M', '6'),
+ (0x1D7E9, 'M', '7'),
+ (0x1D7EA, 'M', '8'),
+ (0x1D7EB, 'M', '9'),
+ (0x1D7EC, 'M', '0'),
+ (0x1D7ED, 'M', '1'),
+ (0x1D7EE, 'M', '2'),
+ (0x1D7EF, 'M', '3'),
+ (0x1D7F0, 'M', '4'),
+ (0x1D7F1, 'M', '5'),
+ (0x1D7F2, 'M', '6'),
+ (0x1D7F3, 'M', '7'),
+ (0x1D7F4, 'M', '8'),
+ (0x1D7F5, 'M', '9'),
+ (0x1D7F6, 'M', '0'),
+ (0x1D7F7, 'M', '1'),
+ (0x1D7F8, 'M', '2'),
+ (0x1D7F9, 'M', '3'),
+ (0x1D7FA, 'M', '4'),
+ (0x1D7FB, 'M', '5'),
+ (0x1D7FC, 'M', '6'),
+ (0x1D7FD, 'M', '7'),
+ (0x1D7FE, 'M', '8'),
+ (0x1D7FF, 'M', '9'),
+ (0x1D800, 'V'),
+ (0x1DA8C, 'X'),
+ (0x1DA9B, 'V'),
+ (0x1DAA0, 'X'),
+ (0x1DAA1, 'V'),
+ (0x1DAB0, 'X'),
+ (0x1DF00, 'V'),
+ (0x1DF1F, 'X'),
+ (0x1DF25, 'V'),
+ (0x1DF2B, 'X'),
+ (0x1E000, 'V'),
+ (0x1E007, 'X'),
+ (0x1E008, 'V'),
+ (0x1E019, 'X'),
+ (0x1E01B, 'V'),
+ (0x1E022, 'X'),
+ (0x1E023, 'V'),
+ (0x1E025, 'X'),
+ (0x1E026, 'V'),
+ (0x1E02B, 'X'),
+ (0x1E030, 'M', 'а'),
+ (0x1E031, 'M', 'б'),
+ (0x1E032, 'M', 'в'),
+ ]
+
+def _seg_71() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1E033, 'M', 'г'),
+ (0x1E034, 'M', 'д'),
+ (0x1E035, 'M', 'е'),
+ (0x1E036, 'M', 'ж'),
+ (0x1E037, 'M', 'з'),
+ (0x1E038, 'M', 'и'),
+ (0x1E039, 'M', 'к'),
+ (0x1E03A, 'M', 'л'),
+ (0x1E03B, 'M', 'м'),
+ (0x1E03C, 'M', 'о'),
+ (0x1E03D, 'M', 'п'),
+ (0x1E03E, 'M', 'р'),
+ (0x1E03F, 'M', 'с'),
+ (0x1E040, 'M', 'т'),
+ (0x1E041, 'M', 'у'),
+ (0x1E042, 'M', 'ф'),
+ (0x1E043, 'M', 'х'),
+ (0x1E044, 'M', 'ц'),
+ (0x1E045, 'M', 'ч'),
+ (0x1E046, 'M', 'ш'),
+ (0x1E047, 'M', 'ы'),
+ (0x1E048, 'M', 'э'),
+ (0x1E049, 'M', 'ю'),
+ (0x1E04A, 'M', 'ꚉ'),
+ (0x1E04B, 'M', 'ә'),
+ (0x1E04C, 'M', 'і'),
+ (0x1E04D, 'M', 'ј'),
+ (0x1E04E, 'M', 'ө'),
+ (0x1E04F, 'M', 'ү'),
+ (0x1E050, 'M', 'ӏ'),
+ (0x1E051, 'M', 'а'),
+ (0x1E052, 'M', 'б'),
+ (0x1E053, 'M', 'в'),
+ (0x1E054, 'M', 'г'),
+ (0x1E055, 'M', 'д'),
+ (0x1E056, 'M', 'е'),
+ (0x1E057, 'M', 'ж'),
+ (0x1E058, 'M', 'з'),
+ (0x1E059, 'M', 'и'),
+ (0x1E05A, 'M', 'к'),
+ (0x1E05B, 'M', 'л'),
+ (0x1E05C, 'M', 'о'),
+ (0x1E05D, 'M', 'п'),
+ (0x1E05E, 'M', 'с'),
+ (0x1E05F, 'M', 'у'),
+ (0x1E060, 'M', 'ф'),
+ (0x1E061, 'M', 'х'),
+ (0x1E062, 'M', 'ц'),
+ (0x1E063, 'M', 'ч'),
+ (0x1E064, 'M', 'ш'),
+ (0x1E065, 'M', 'ъ'),
+ (0x1E066, 'M', 'ы'),
+ (0x1E067, 'M', 'ґ'),
+ (0x1E068, 'M', 'і'),
+ (0x1E069, 'M', 'ѕ'),
+ (0x1E06A, 'M', 'џ'),
+ (0x1E06B, 'M', 'ҫ'),
+ (0x1E06C, 'M', 'ꙑ'),
+ (0x1E06D, 'M', 'ұ'),
+ (0x1E06E, 'X'),
+ (0x1E08F, 'V'),
+ (0x1E090, 'X'),
+ (0x1E100, 'V'),
+ (0x1E12D, 'X'),
+ (0x1E130, 'V'),
+ (0x1E13E, 'X'),
+ (0x1E140, 'V'),
+ (0x1E14A, 'X'),
+ (0x1E14E, 'V'),
+ (0x1E150, 'X'),
+ (0x1E290, 'V'),
+ (0x1E2AF, 'X'),
+ (0x1E2C0, 'V'),
+ (0x1E2FA, 'X'),
+ (0x1E2FF, 'V'),
+ (0x1E300, 'X'),
+ (0x1E4D0, 'V'),
+ (0x1E4FA, 'X'),
+ (0x1E7E0, 'V'),
+ (0x1E7E7, 'X'),
+ (0x1E7E8, 'V'),
+ (0x1E7EC, 'X'),
+ (0x1E7ED, 'V'),
+ (0x1E7EF, 'X'),
+ (0x1E7F0, 'V'),
+ (0x1E7FF, 'X'),
+ (0x1E800, 'V'),
+ (0x1E8C5, 'X'),
+ (0x1E8C7, 'V'),
+ (0x1E8D7, 'X'),
+ (0x1E900, 'M', '𞤢'),
+ (0x1E901, 'M', '𞤣'),
+ (0x1E902, 'M', '𞤤'),
+ (0x1E903, 'M', '𞤥'),
+ (0x1E904, 'M', '𞤦'),
+ (0x1E905, 'M', '𞤧'),
+ (0x1E906, 'M', '𞤨'),
+ (0x1E907, 'M', '𞤩'),
+ (0x1E908, 'M', '𞤪'),
+ (0x1E909, 'M', '𞤫'),
+ ]
+
+def _seg_72() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1E90A, 'M', '𞤬'),
+ (0x1E90B, 'M', '𞤭'),
+ (0x1E90C, 'M', '𞤮'),
+ (0x1E90D, 'M', '𞤯'),
+ (0x1E90E, 'M', '𞤰'),
+ (0x1E90F, 'M', '𞤱'),
+ (0x1E910, 'M', '𞤲'),
+ (0x1E911, 'M', '𞤳'),
+ (0x1E912, 'M', '𞤴'),
+ (0x1E913, 'M', '𞤵'),
+ (0x1E914, 'M', '𞤶'),
+ (0x1E915, 'M', '𞤷'),
+ (0x1E916, 'M', '𞤸'),
+ (0x1E917, 'M', '𞤹'),
+ (0x1E918, 'M', '𞤺'),
+ (0x1E919, 'M', '𞤻'),
+ (0x1E91A, 'M', '𞤼'),
+ (0x1E91B, 'M', '𞤽'),
+ (0x1E91C, 'M', '𞤾'),
+ (0x1E91D, 'M', '𞤿'),
+ (0x1E91E, 'M', '𞥀'),
+ (0x1E91F, 'M', '𞥁'),
+ (0x1E920, 'M', '𞥂'),
+ (0x1E921, 'M', '𞥃'),
+ (0x1E922, 'V'),
+ (0x1E94C, 'X'),
+ (0x1E950, 'V'),
+ (0x1E95A, 'X'),
+ (0x1E95E, 'V'),
+ (0x1E960, 'X'),
+ (0x1EC71, 'V'),
+ (0x1ECB5, 'X'),
+ (0x1ED01, 'V'),
+ (0x1ED3E, 'X'),
+ (0x1EE00, 'M', 'ا'),
+ (0x1EE01, 'M', 'ب'),
+ (0x1EE02, 'M', 'ج'),
+ (0x1EE03, 'M', 'د'),
+ (0x1EE04, 'X'),
+ (0x1EE05, 'M', 'و'),
+ (0x1EE06, 'M', 'ز'),
+ (0x1EE07, 'M', 'ح'),
+ (0x1EE08, 'M', 'ط'),
+ (0x1EE09, 'M', 'ي'),
+ (0x1EE0A, 'M', 'ك'),
+ (0x1EE0B, 'M', 'ل'),
+ (0x1EE0C, 'M', 'م'),
+ (0x1EE0D, 'M', 'ن'),
+ (0x1EE0E, 'M', 'س'),
+ (0x1EE0F, 'M', 'ع'),
+ (0x1EE10, 'M', 'ف'),
+ (0x1EE11, 'M', 'ص'),
+ (0x1EE12, 'M', 'ق'),
+ (0x1EE13, 'M', 'ر'),
+ (0x1EE14, 'M', 'ش'),
+ (0x1EE15, 'M', 'ت'),
+ (0x1EE16, 'M', 'ث'),
+ (0x1EE17, 'M', 'خ'),
+ (0x1EE18, 'M', 'ذ'),
+ (0x1EE19, 'M', 'ض'),
+ (0x1EE1A, 'M', 'ظ'),
+ (0x1EE1B, 'M', 'غ'),
+ (0x1EE1C, 'M', 'ٮ'),
+ (0x1EE1D, 'M', 'ں'),
+ (0x1EE1E, 'M', 'ڡ'),
+ (0x1EE1F, 'M', 'ٯ'),
+ (0x1EE20, 'X'),
+ (0x1EE21, 'M', 'ب'),
+ (0x1EE22, 'M', 'ج'),
+ (0x1EE23, 'X'),
+ (0x1EE24, 'M', 'ه'),
+ (0x1EE25, 'X'),
+ (0x1EE27, 'M', 'ح'),
+ (0x1EE28, 'X'),
+ (0x1EE29, 'M', 'ي'),
+ (0x1EE2A, 'M', 'ك'),
+ (0x1EE2B, 'M', 'ل'),
+ (0x1EE2C, 'M', 'م'),
+ (0x1EE2D, 'M', 'ن'),
+ (0x1EE2E, 'M', 'س'),
+ (0x1EE2F, 'M', 'ع'),
+ (0x1EE30, 'M', 'ف'),
+ (0x1EE31, 'M', 'ص'),
+ (0x1EE32, 'M', 'ق'),
+ (0x1EE33, 'X'),
+ (0x1EE34, 'M', 'ش'),
+ (0x1EE35, 'M', 'ت'),
+ (0x1EE36, 'M', 'ث'),
+ (0x1EE37, 'M', 'خ'),
+ (0x1EE38, 'X'),
+ (0x1EE39, 'M', 'ض'),
+ (0x1EE3A, 'X'),
+ (0x1EE3B, 'M', 'غ'),
+ (0x1EE3C, 'X'),
+ (0x1EE42, 'M', 'ج'),
+ (0x1EE43, 'X'),
+ (0x1EE47, 'M', 'ح'),
+ (0x1EE48, 'X'),
+ (0x1EE49, 'M', 'ي'),
+ (0x1EE4A, 'X'),
+ ]
+
+def _seg_73() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1EE4B, 'M', 'ل'),
+ (0x1EE4C, 'X'),
+ (0x1EE4D, 'M', 'ن'),
+ (0x1EE4E, 'M', 'س'),
+ (0x1EE4F, 'M', 'ع'),
+ (0x1EE50, 'X'),
+ (0x1EE51, 'M', 'ص'),
+ (0x1EE52, 'M', 'ق'),
+ (0x1EE53, 'X'),
+ (0x1EE54, 'M', 'ش'),
+ (0x1EE55, 'X'),
+ (0x1EE57, 'M', 'خ'),
+ (0x1EE58, 'X'),
+ (0x1EE59, 'M', 'ض'),
+ (0x1EE5A, 'X'),
+ (0x1EE5B, 'M', 'غ'),
+ (0x1EE5C, 'X'),
+ (0x1EE5D, 'M', 'ں'),
+ (0x1EE5E, 'X'),
+ (0x1EE5F, 'M', 'ٯ'),
+ (0x1EE60, 'X'),
+ (0x1EE61, 'M', 'ب'),
+ (0x1EE62, 'M', 'ج'),
+ (0x1EE63, 'X'),
+ (0x1EE64, 'M', 'ه'),
+ (0x1EE65, 'X'),
+ (0x1EE67, 'M', 'ح'),
+ (0x1EE68, 'M', 'ط'),
+ (0x1EE69, 'M', 'ي'),
+ (0x1EE6A, 'M', 'ك'),
+ (0x1EE6B, 'X'),
+ (0x1EE6C, 'M', 'م'),
+ (0x1EE6D, 'M', 'ن'),
+ (0x1EE6E, 'M', 'س'),
+ (0x1EE6F, 'M', 'ع'),
+ (0x1EE70, 'M', 'ف'),
+ (0x1EE71, 'M', 'ص'),
+ (0x1EE72, 'M', 'ق'),
+ (0x1EE73, 'X'),
+ (0x1EE74, 'M', 'ش'),
+ (0x1EE75, 'M', 'ت'),
+ (0x1EE76, 'M', 'ث'),
+ (0x1EE77, 'M', 'خ'),
+ (0x1EE78, 'X'),
+ (0x1EE79, 'M', 'ض'),
+ (0x1EE7A, 'M', 'ظ'),
+ (0x1EE7B, 'M', 'غ'),
+ (0x1EE7C, 'M', 'ٮ'),
+ (0x1EE7D, 'X'),
+ (0x1EE7E, 'M', 'ڡ'),
+ (0x1EE7F, 'X'),
+ (0x1EE80, 'M', 'ا'),
+ (0x1EE81, 'M', 'ب'),
+ (0x1EE82, 'M', 'ج'),
+ (0x1EE83, 'M', 'د'),
+ (0x1EE84, 'M', 'ه'),
+ (0x1EE85, 'M', 'و'),
+ (0x1EE86, 'M', 'ز'),
+ (0x1EE87, 'M', 'ح'),
+ (0x1EE88, 'M', 'ط'),
+ (0x1EE89, 'M', 'ي'),
+ (0x1EE8A, 'X'),
+ (0x1EE8B, 'M', 'ل'),
+ (0x1EE8C, 'M', 'م'),
+ (0x1EE8D, 'M', 'ن'),
+ (0x1EE8E, 'M', 'س'),
+ (0x1EE8F, 'M', 'ع'),
+ (0x1EE90, 'M', 'ف'),
+ (0x1EE91, 'M', 'ص'),
+ (0x1EE92, 'M', 'ق'),
+ (0x1EE93, 'M', 'ر'),
+ (0x1EE94, 'M', 'ش'),
+ (0x1EE95, 'M', 'ت'),
+ (0x1EE96, 'M', 'ث'),
+ (0x1EE97, 'M', 'خ'),
+ (0x1EE98, 'M', 'ذ'),
+ (0x1EE99, 'M', 'ض'),
+ (0x1EE9A, 'M', 'ظ'),
+ (0x1EE9B, 'M', 'غ'),
+ (0x1EE9C, 'X'),
+ (0x1EEA1, 'M', 'ب'),
+ (0x1EEA2, 'M', 'ج'),
+ (0x1EEA3, 'M', 'د'),
+ (0x1EEA4, 'X'),
+ (0x1EEA5, 'M', 'و'),
+ (0x1EEA6, 'M', 'ز'),
+ (0x1EEA7, 'M', 'ح'),
+ (0x1EEA8, 'M', 'ط'),
+ (0x1EEA9, 'M', 'ي'),
+ (0x1EEAA, 'X'),
+ (0x1EEAB, 'M', 'ل'),
+ (0x1EEAC, 'M', 'م'),
+ (0x1EEAD, 'M', 'ن'),
+ (0x1EEAE, 'M', 'س'),
+ (0x1EEAF, 'M', 'ع'),
+ (0x1EEB0, 'M', 'ف'),
+ (0x1EEB1, 'M', 'ص'),
+ (0x1EEB2, 'M', 'ق'),
+ (0x1EEB3, 'M', 'ر'),
+ (0x1EEB4, 'M', 'ش'),
+ ]
+
+def _seg_74() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1EEB5, 'M', 'ت'),
+ (0x1EEB6, 'M', 'ث'),
+ (0x1EEB7, 'M', 'خ'),
+ (0x1EEB8, 'M', 'ذ'),
+ (0x1EEB9, 'M', 'ض'),
+ (0x1EEBA, 'M', 'ظ'),
+ (0x1EEBB, 'M', 'غ'),
+ (0x1EEBC, 'X'),
+ (0x1EEF0, 'V'),
+ (0x1EEF2, 'X'),
+ (0x1F000, 'V'),
+ (0x1F02C, 'X'),
+ (0x1F030, 'V'),
+ (0x1F094, 'X'),
+ (0x1F0A0, 'V'),
+ (0x1F0AF, 'X'),
+ (0x1F0B1, 'V'),
+ (0x1F0C0, 'X'),
+ (0x1F0C1, 'V'),
+ (0x1F0D0, 'X'),
+ (0x1F0D1, 'V'),
+ (0x1F0F6, 'X'),
+ (0x1F101, '3', '0,'),
+ (0x1F102, '3', '1,'),
+ (0x1F103, '3', '2,'),
+ (0x1F104, '3', '3,'),
+ (0x1F105, '3', '4,'),
+ (0x1F106, '3', '5,'),
+ (0x1F107, '3', '6,'),
+ (0x1F108, '3', '7,'),
+ (0x1F109, '3', '8,'),
+ (0x1F10A, '3', '9,'),
+ (0x1F10B, 'V'),
+ (0x1F110, '3', '(a)'),
+ (0x1F111, '3', '(b)'),
+ (0x1F112, '3', '(c)'),
+ (0x1F113, '3', '(d)'),
+ (0x1F114, '3', '(e)'),
+ (0x1F115, '3', '(f)'),
+ (0x1F116, '3', '(g)'),
+ (0x1F117, '3', '(h)'),
+ (0x1F118, '3', '(i)'),
+ (0x1F119, '3', '(j)'),
+ (0x1F11A, '3', '(k)'),
+ (0x1F11B, '3', '(l)'),
+ (0x1F11C, '3', '(m)'),
+ (0x1F11D, '3', '(n)'),
+ (0x1F11E, '3', '(o)'),
+ (0x1F11F, '3', '(p)'),
+ (0x1F120, '3', '(q)'),
+ (0x1F121, '3', '(r)'),
+ (0x1F122, '3', '(s)'),
+ (0x1F123, '3', '(t)'),
+ (0x1F124, '3', '(u)'),
+ (0x1F125, '3', '(v)'),
+ (0x1F126, '3', '(w)'),
+ (0x1F127, '3', '(x)'),
+ (0x1F128, '3', '(y)'),
+ (0x1F129, '3', '(z)'),
+ (0x1F12A, 'M', '〔s〕'),
+ (0x1F12B, 'M', 'c'),
+ (0x1F12C, 'M', 'r'),
+ (0x1F12D, 'M', 'cd'),
+ (0x1F12E, 'M', 'wz'),
+ (0x1F12F, 'V'),
+ (0x1F130, 'M', 'a'),
+ (0x1F131, 'M', 'b'),
+ (0x1F132, 'M', 'c'),
+ (0x1F133, 'M', 'd'),
+ (0x1F134, 'M', 'e'),
+ (0x1F135, 'M', 'f'),
+ (0x1F136, 'M', 'g'),
+ (0x1F137, 'M', 'h'),
+ (0x1F138, 'M', 'i'),
+ (0x1F139, 'M', 'j'),
+ (0x1F13A, 'M', 'k'),
+ (0x1F13B, 'M', 'l'),
+ (0x1F13C, 'M', 'm'),
+ (0x1F13D, 'M', 'n'),
+ (0x1F13E, 'M', 'o'),
+ (0x1F13F, 'M', 'p'),
+ (0x1F140, 'M', 'q'),
+ (0x1F141, 'M', 'r'),
+ (0x1F142, 'M', 's'),
+ (0x1F143, 'M', 't'),
+ (0x1F144, 'M', 'u'),
+ (0x1F145, 'M', 'v'),
+ (0x1F146, 'M', 'w'),
+ (0x1F147, 'M', 'x'),
+ (0x1F148, 'M', 'y'),
+ (0x1F149, 'M', 'z'),
+ (0x1F14A, 'M', 'hv'),
+ (0x1F14B, 'M', 'mv'),
+ (0x1F14C, 'M', 'sd'),
+ (0x1F14D, 'M', 'ss'),
+ (0x1F14E, 'M', 'ppv'),
+ (0x1F14F, 'M', 'wc'),
+ (0x1F150, 'V'),
+ (0x1F16A, 'M', 'mc'),
+ (0x1F16B, 'M', 'md'),
+ ]
+
+def _seg_75() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1F16C, 'M', 'mr'),
+ (0x1F16D, 'V'),
+ (0x1F190, 'M', 'dj'),
+ (0x1F191, 'V'),
+ (0x1F1AE, 'X'),
+ (0x1F1E6, 'V'),
+ (0x1F200, 'M', 'ほか'),
+ (0x1F201, 'M', 'ココ'),
+ (0x1F202, 'M', 'サ'),
+ (0x1F203, 'X'),
+ (0x1F210, 'M', '手'),
+ (0x1F211, 'M', '字'),
+ (0x1F212, 'M', '双'),
+ (0x1F213, 'M', 'デ'),
+ (0x1F214, 'M', '二'),
+ (0x1F215, 'M', '多'),
+ (0x1F216, 'M', '解'),
+ (0x1F217, 'M', '天'),
+ (0x1F218, 'M', '交'),
+ (0x1F219, 'M', '映'),
+ (0x1F21A, 'M', '無'),
+ (0x1F21B, 'M', '料'),
+ (0x1F21C, 'M', '前'),
+ (0x1F21D, 'M', '後'),
+ (0x1F21E, 'M', '再'),
+ (0x1F21F, 'M', '新'),
+ (0x1F220, 'M', '初'),
+ (0x1F221, 'M', '終'),
+ (0x1F222, 'M', '生'),
+ (0x1F223, 'M', '販'),
+ (0x1F224, 'M', '声'),
+ (0x1F225, 'M', '吹'),
+ (0x1F226, 'M', '演'),
+ (0x1F227, 'M', '投'),
+ (0x1F228, 'M', '捕'),
+ (0x1F229, 'M', '一'),
+ (0x1F22A, 'M', '三'),
+ (0x1F22B, 'M', '遊'),
+ (0x1F22C, 'M', '左'),
+ (0x1F22D, 'M', '中'),
+ (0x1F22E, 'M', '右'),
+ (0x1F22F, 'M', '指'),
+ (0x1F230, 'M', '走'),
+ (0x1F231, 'M', '打'),
+ (0x1F232, 'M', '禁'),
+ (0x1F233, 'M', '空'),
+ (0x1F234, 'M', '合'),
+ (0x1F235, 'M', '満'),
+ (0x1F236, 'M', '有'),
+ (0x1F237, 'M', '月'),
+ (0x1F238, 'M', '申'),
+ (0x1F239, 'M', '割'),
+ (0x1F23A, 'M', '営'),
+ (0x1F23B, 'M', '配'),
+ (0x1F23C, 'X'),
+ (0x1F240, 'M', '〔本〕'),
+ (0x1F241, 'M', '〔三〕'),
+ (0x1F242, 'M', '〔二〕'),
+ (0x1F243, 'M', '〔安〕'),
+ (0x1F244, 'M', '〔点〕'),
+ (0x1F245, 'M', '〔打〕'),
+ (0x1F246, 'M', '〔盗〕'),
+ (0x1F247, 'M', '〔勝〕'),
+ (0x1F248, 'M', '〔敗〕'),
+ (0x1F249, 'X'),
+ (0x1F250, 'M', '得'),
+ (0x1F251, 'M', '可'),
+ (0x1F252, 'X'),
+ (0x1F260, 'V'),
+ (0x1F266, 'X'),
+ (0x1F300, 'V'),
+ (0x1F6D8, 'X'),
+ (0x1F6DC, 'V'),
+ (0x1F6ED, 'X'),
+ (0x1F6F0, 'V'),
+ (0x1F6FD, 'X'),
+ (0x1F700, 'V'),
+ (0x1F777, 'X'),
+ (0x1F77B, 'V'),
+ (0x1F7DA, 'X'),
+ (0x1F7E0, 'V'),
+ (0x1F7EC, 'X'),
+ (0x1F7F0, 'V'),
+ (0x1F7F1, 'X'),
+ (0x1F800, 'V'),
+ (0x1F80C, 'X'),
+ (0x1F810, 'V'),
+ (0x1F848, 'X'),
+ (0x1F850, 'V'),
+ (0x1F85A, 'X'),
+ (0x1F860, 'V'),
+ (0x1F888, 'X'),
+ (0x1F890, 'V'),
+ (0x1F8AE, 'X'),
+ (0x1F8B0, 'V'),
+ (0x1F8B2, 'X'),
+ (0x1F900, 'V'),
+ (0x1FA54, 'X'),
+ (0x1FA60, 'V'),
+ (0x1FA6E, 'X'),
+ ]
+
+def _seg_76() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x1FA70, 'V'),
+ (0x1FA7D, 'X'),
+ (0x1FA80, 'V'),
+ (0x1FA89, 'X'),
+ (0x1FA90, 'V'),
+ (0x1FABE, 'X'),
+ (0x1FABF, 'V'),
+ (0x1FAC6, 'X'),
+ (0x1FACE, 'V'),
+ (0x1FADC, 'X'),
+ (0x1FAE0, 'V'),
+ (0x1FAE9, 'X'),
+ (0x1FAF0, 'V'),
+ (0x1FAF9, 'X'),
+ (0x1FB00, 'V'),
+ (0x1FB93, 'X'),
+ (0x1FB94, 'V'),
+ (0x1FBCB, 'X'),
+ (0x1FBF0, 'M', '0'),
+ (0x1FBF1, 'M', '1'),
+ (0x1FBF2, 'M', '2'),
+ (0x1FBF3, 'M', '3'),
+ (0x1FBF4, 'M', '4'),
+ (0x1FBF5, 'M', '5'),
+ (0x1FBF6, 'M', '6'),
+ (0x1FBF7, 'M', '7'),
+ (0x1FBF8, 'M', '8'),
+ (0x1FBF9, 'M', '9'),
+ (0x1FBFA, 'X'),
+ (0x20000, 'V'),
+ (0x2A6E0, 'X'),
+ (0x2A700, 'V'),
+ (0x2B73A, 'X'),
+ (0x2B740, 'V'),
+ (0x2B81E, 'X'),
+ (0x2B820, 'V'),
+ (0x2CEA2, 'X'),
+ (0x2CEB0, 'V'),
+ (0x2EBE1, 'X'),
+ (0x2F800, 'M', '丽'),
+ (0x2F801, 'M', '丸'),
+ (0x2F802, 'M', '乁'),
+ (0x2F803, 'M', '𠄢'),
+ (0x2F804, 'M', '你'),
+ (0x2F805, 'M', '侮'),
+ (0x2F806, 'M', '侻'),
+ (0x2F807, 'M', '倂'),
+ (0x2F808, 'M', '偺'),
+ (0x2F809, 'M', '備'),
+ (0x2F80A, 'M', '僧'),
+ (0x2F80B, 'M', '像'),
+ (0x2F80C, 'M', '㒞'),
+ (0x2F80D, 'M', '𠘺'),
+ (0x2F80E, 'M', '免'),
+ (0x2F80F, 'M', '兔'),
+ (0x2F810, 'M', '兤'),
+ (0x2F811, 'M', '具'),
+ (0x2F812, 'M', '𠔜'),
+ (0x2F813, 'M', '㒹'),
+ (0x2F814, 'M', '內'),
+ (0x2F815, 'M', '再'),
+ (0x2F816, 'M', '𠕋'),
+ (0x2F817, 'M', '冗'),
+ (0x2F818, 'M', '冤'),
+ (0x2F819, 'M', '仌'),
+ (0x2F81A, 'M', '冬'),
+ (0x2F81B, 'M', '况'),
+ (0x2F81C, 'M', '𩇟'),
+ (0x2F81D, 'M', '凵'),
+ (0x2F81E, 'M', '刃'),
+ (0x2F81F, 'M', '㓟'),
+ (0x2F820, 'M', '刻'),
+ (0x2F821, 'M', '剆'),
+ (0x2F822, 'M', '割'),
+ (0x2F823, 'M', '剷'),
+ (0x2F824, 'M', '㔕'),
+ (0x2F825, 'M', '勇'),
+ (0x2F826, 'M', '勉'),
+ (0x2F827, 'M', '勤'),
+ (0x2F828, 'M', '勺'),
+ (0x2F829, 'M', '包'),
+ (0x2F82A, 'M', '匆'),
+ (0x2F82B, 'M', '北'),
+ (0x2F82C, 'M', '卉'),
+ (0x2F82D, 'M', '卑'),
+ (0x2F82E, 'M', '博'),
+ (0x2F82F, 'M', '即'),
+ (0x2F830, 'M', '卽'),
+ (0x2F831, 'M', '卿'),
+ (0x2F834, 'M', '𠨬'),
+ (0x2F835, 'M', '灰'),
+ (0x2F836, 'M', '及'),
+ (0x2F837, 'M', '叟'),
+ (0x2F838, 'M', '𠭣'),
+ (0x2F839, 'M', '叫'),
+ (0x2F83A, 'M', '叱'),
+ (0x2F83B, 'M', '吆'),
+ (0x2F83C, 'M', '咞'),
+ (0x2F83D, 'M', '吸'),
+ (0x2F83E, 'M', '呈'),
+ ]
+
+def _seg_77() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x2F83F, 'M', '周'),
+ (0x2F840, 'M', '咢'),
+ (0x2F841, 'M', '哶'),
+ (0x2F842, 'M', '唐'),
+ (0x2F843, 'M', '啓'),
+ (0x2F844, 'M', '啣'),
+ (0x2F845, 'M', '善'),
+ (0x2F847, 'M', '喙'),
+ (0x2F848, 'M', '喫'),
+ (0x2F849, 'M', '喳'),
+ (0x2F84A, 'M', '嗂'),
+ (0x2F84B, 'M', '圖'),
+ (0x2F84C, 'M', '嘆'),
+ (0x2F84D, 'M', '圗'),
+ (0x2F84E, 'M', '噑'),
+ (0x2F84F, 'M', '噴'),
+ (0x2F850, 'M', '切'),
+ (0x2F851, 'M', '壮'),
+ (0x2F852, 'M', '城'),
+ (0x2F853, 'M', '埴'),
+ (0x2F854, 'M', '堍'),
+ (0x2F855, 'M', '型'),
+ (0x2F856, 'M', '堲'),
+ (0x2F857, 'M', '報'),
+ (0x2F858, 'M', '墬'),
+ (0x2F859, 'M', '𡓤'),
+ (0x2F85A, 'M', '売'),
+ (0x2F85B, 'M', '壷'),
+ (0x2F85C, 'M', '夆'),
+ (0x2F85D, 'M', '多'),
+ (0x2F85E, 'M', '夢'),
+ (0x2F85F, 'M', '奢'),
+ (0x2F860, 'M', '𡚨'),
+ (0x2F861, 'M', '𡛪'),
+ (0x2F862, 'M', '姬'),
+ (0x2F863, 'M', '娛'),
+ (0x2F864, 'M', '娧'),
+ (0x2F865, 'M', '姘'),
+ (0x2F866, 'M', '婦'),
+ (0x2F867, 'M', '㛮'),
+ (0x2F868, 'X'),
+ (0x2F869, 'M', '嬈'),
+ (0x2F86A, 'M', '嬾'),
+ (0x2F86C, 'M', '𡧈'),
+ (0x2F86D, 'M', '寃'),
+ (0x2F86E, 'M', '寘'),
+ (0x2F86F, 'M', '寧'),
+ (0x2F870, 'M', '寳'),
+ (0x2F871, 'M', '𡬘'),
+ (0x2F872, 'M', '寿'),
+ (0x2F873, 'M', '将'),
+ (0x2F874, 'X'),
+ (0x2F875, 'M', '尢'),
+ (0x2F876, 'M', '㞁'),
+ (0x2F877, 'M', '屠'),
+ (0x2F878, 'M', '屮'),
+ (0x2F879, 'M', '峀'),
+ (0x2F87A, 'M', '岍'),
+ (0x2F87B, 'M', '𡷤'),
+ (0x2F87C, 'M', '嵃'),
+ (0x2F87D, 'M', '𡷦'),
+ (0x2F87E, 'M', '嵮'),
+ (0x2F87F, 'M', '嵫'),
+ (0x2F880, 'M', '嵼'),
+ (0x2F881, 'M', '巡'),
+ (0x2F882, 'M', '巢'),
+ (0x2F883, 'M', '㠯'),
+ (0x2F884, 'M', '巽'),
+ (0x2F885, 'M', '帨'),
+ (0x2F886, 'M', '帽'),
+ (0x2F887, 'M', '幩'),
+ (0x2F888, 'M', '㡢'),
+ (0x2F889, 'M', '𢆃'),
+ (0x2F88A, 'M', '㡼'),
+ (0x2F88B, 'M', '庰'),
+ (0x2F88C, 'M', '庳'),
+ (0x2F88D, 'M', '庶'),
+ (0x2F88E, 'M', '廊'),
+ (0x2F88F, 'M', '𪎒'),
+ (0x2F890, 'M', '廾'),
+ (0x2F891, 'M', '𢌱'),
+ (0x2F893, 'M', '舁'),
+ (0x2F894, 'M', '弢'),
+ (0x2F896, 'M', '㣇'),
+ (0x2F897, 'M', '𣊸'),
+ (0x2F898, 'M', '𦇚'),
+ (0x2F899, 'M', '形'),
+ (0x2F89A, 'M', '彫'),
+ (0x2F89B, 'M', '㣣'),
+ (0x2F89C, 'M', '徚'),
+ (0x2F89D, 'M', '忍'),
+ (0x2F89E, 'M', '志'),
+ (0x2F89F, 'M', '忹'),
+ (0x2F8A0, 'M', '悁'),
+ (0x2F8A1, 'M', '㤺'),
+ (0x2F8A2, 'M', '㤜'),
+ (0x2F8A3, 'M', '悔'),
+ (0x2F8A4, 'M', '𢛔'),
+ (0x2F8A5, 'M', '惇'),
+ (0x2F8A6, 'M', '慈'),
+ ]
+
+def _seg_78() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x2F8A7, 'M', '慌'),
+ (0x2F8A8, 'M', '慎'),
+ (0x2F8A9, 'M', '慌'),
+ (0x2F8AA, 'M', '慺'),
+ (0x2F8AB, 'M', '憎'),
+ (0x2F8AC, 'M', '憲'),
+ (0x2F8AD, 'M', '憤'),
+ (0x2F8AE, 'M', '憯'),
+ (0x2F8AF, 'M', '懞'),
+ (0x2F8B0, 'M', '懲'),
+ (0x2F8B1, 'M', '懶'),
+ (0x2F8B2, 'M', '成'),
+ (0x2F8B3, 'M', '戛'),
+ (0x2F8B4, 'M', '扝'),
+ (0x2F8B5, 'M', '抱'),
+ (0x2F8B6, 'M', '拔'),
+ (0x2F8B7, 'M', '捐'),
+ (0x2F8B8, 'M', '𢬌'),
+ (0x2F8B9, 'M', '挽'),
+ (0x2F8BA, 'M', '拼'),
+ (0x2F8BB, 'M', '捨'),
+ (0x2F8BC, 'M', '掃'),
+ (0x2F8BD, 'M', '揤'),
+ (0x2F8BE, 'M', '𢯱'),
+ (0x2F8BF, 'M', '搢'),
+ (0x2F8C0, 'M', '揅'),
+ (0x2F8C1, 'M', '掩'),
+ (0x2F8C2, 'M', '㨮'),
+ (0x2F8C3, 'M', '摩'),
+ (0x2F8C4, 'M', '摾'),
+ (0x2F8C5, 'M', '撝'),
+ (0x2F8C6, 'M', '摷'),
+ (0x2F8C7, 'M', '㩬'),
+ (0x2F8C8, 'M', '敏'),
+ (0x2F8C9, 'M', '敬'),
+ (0x2F8CA, 'M', '𣀊'),
+ (0x2F8CB, 'M', '旣'),
+ (0x2F8CC, 'M', '書'),
+ (0x2F8CD, 'M', '晉'),
+ (0x2F8CE, 'M', '㬙'),
+ (0x2F8CF, 'M', '暑'),
+ (0x2F8D0, 'M', '㬈'),
+ (0x2F8D1, 'M', '㫤'),
+ (0x2F8D2, 'M', '冒'),
+ (0x2F8D3, 'M', '冕'),
+ (0x2F8D4, 'M', '最'),
+ (0x2F8D5, 'M', '暜'),
+ (0x2F8D6, 'M', '肭'),
+ (0x2F8D7, 'M', '䏙'),
+ (0x2F8D8, 'M', '朗'),
+ (0x2F8D9, 'M', '望'),
+ (0x2F8DA, 'M', '朡'),
+ (0x2F8DB, 'M', '杞'),
+ (0x2F8DC, 'M', '杓'),
+ (0x2F8DD, 'M', '𣏃'),
+ (0x2F8DE, 'M', '㭉'),
+ (0x2F8DF, 'M', '柺'),
+ (0x2F8E0, 'M', '枅'),
+ (0x2F8E1, 'M', '桒'),
+ (0x2F8E2, 'M', '梅'),
+ (0x2F8E3, 'M', '𣑭'),
+ (0x2F8E4, 'M', '梎'),
+ (0x2F8E5, 'M', '栟'),
+ (0x2F8E6, 'M', '椔'),
+ (0x2F8E7, 'M', '㮝'),
+ (0x2F8E8, 'M', '楂'),
+ (0x2F8E9, 'M', '榣'),
+ (0x2F8EA, 'M', '槪'),
+ (0x2F8EB, 'M', '檨'),
+ (0x2F8EC, 'M', '𣚣'),
+ (0x2F8ED, 'M', '櫛'),
+ (0x2F8EE, 'M', '㰘'),
+ (0x2F8EF, 'M', '次'),
+ (0x2F8F0, 'M', '𣢧'),
+ (0x2F8F1, 'M', '歔'),
+ (0x2F8F2, 'M', '㱎'),
+ (0x2F8F3, 'M', '歲'),
+ (0x2F8F4, 'M', '殟'),
+ (0x2F8F5, 'M', '殺'),
+ (0x2F8F6, 'M', '殻'),
+ (0x2F8F7, 'M', '𣪍'),
+ (0x2F8F8, 'M', '𡴋'),
+ (0x2F8F9, 'M', '𣫺'),
+ (0x2F8FA, 'M', '汎'),
+ (0x2F8FB, 'M', '𣲼'),
+ (0x2F8FC, 'M', '沿'),
+ (0x2F8FD, 'M', '泍'),
+ (0x2F8FE, 'M', '汧'),
+ (0x2F8FF, 'M', '洖'),
+ (0x2F900, 'M', '派'),
+ (0x2F901, 'M', '海'),
+ (0x2F902, 'M', '流'),
+ (0x2F903, 'M', '浩'),
+ (0x2F904, 'M', '浸'),
+ (0x2F905, 'M', '涅'),
+ (0x2F906, 'M', '𣴞'),
+ (0x2F907, 'M', '洴'),
+ (0x2F908, 'M', '港'),
+ (0x2F909, 'M', '湮'),
+ (0x2F90A, 'M', '㴳'),
+ ]
+
+def _seg_79() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x2F90B, 'M', '滋'),
+ (0x2F90C, 'M', '滇'),
+ (0x2F90D, 'M', '𣻑'),
+ (0x2F90E, 'M', '淹'),
+ (0x2F90F, 'M', '潮'),
+ (0x2F910, 'M', '𣽞'),
+ (0x2F911, 'M', '𣾎'),
+ (0x2F912, 'M', '濆'),
+ (0x2F913, 'M', '瀹'),
+ (0x2F914, 'M', '瀞'),
+ (0x2F915, 'M', '瀛'),
+ (0x2F916, 'M', '㶖'),
+ (0x2F917, 'M', '灊'),
+ (0x2F918, 'M', '災'),
+ (0x2F919, 'M', '灷'),
+ (0x2F91A, 'M', '炭'),
+ (0x2F91B, 'M', '𠔥'),
+ (0x2F91C, 'M', '煅'),
+ (0x2F91D, 'M', '𤉣'),
+ (0x2F91E, 'M', '熜'),
+ (0x2F91F, 'X'),
+ (0x2F920, 'M', '爨'),
+ (0x2F921, 'M', '爵'),
+ (0x2F922, 'M', '牐'),
+ (0x2F923, 'M', '𤘈'),
+ (0x2F924, 'M', '犀'),
+ (0x2F925, 'M', '犕'),
+ (0x2F926, 'M', '𤜵'),
+ (0x2F927, 'M', '𤠔'),
+ (0x2F928, 'M', '獺'),
+ (0x2F929, 'M', '王'),
+ (0x2F92A, 'M', '㺬'),
+ (0x2F92B, 'M', '玥'),
+ (0x2F92C, 'M', '㺸'),
+ (0x2F92E, 'M', '瑇'),
+ (0x2F92F, 'M', '瑜'),
+ (0x2F930, 'M', '瑱'),
+ (0x2F931, 'M', '璅'),
+ (0x2F932, 'M', '瓊'),
+ (0x2F933, 'M', '㼛'),
+ (0x2F934, 'M', '甤'),
+ (0x2F935, 'M', '𤰶'),
+ (0x2F936, 'M', '甾'),
+ (0x2F937, 'M', '𤲒'),
+ (0x2F938, 'M', '異'),
+ (0x2F939, 'M', '𢆟'),
+ (0x2F93A, 'M', '瘐'),
+ (0x2F93B, 'M', '𤾡'),
+ (0x2F93C, 'M', '𤾸'),
+ (0x2F93D, 'M', '𥁄'),
+ (0x2F93E, 'M', '㿼'),
+ (0x2F93F, 'M', '䀈'),
+ (0x2F940, 'M', '直'),
+ (0x2F941, 'M', '𥃳'),
+ (0x2F942, 'M', '𥃲'),
+ (0x2F943, 'M', '𥄙'),
+ (0x2F944, 'M', '𥄳'),
+ (0x2F945, 'M', '眞'),
+ (0x2F946, 'M', '真'),
+ (0x2F948, 'M', '睊'),
+ (0x2F949, 'M', '䀹'),
+ (0x2F94A, 'M', '瞋'),
+ (0x2F94B, 'M', '䁆'),
+ (0x2F94C, 'M', '䂖'),
+ (0x2F94D, 'M', '𥐝'),
+ (0x2F94E, 'M', '硎'),
+ (0x2F94F, 'M', '碌'),
+ (0x2F950, 'M', '磌'),
+ (0x2F951, 'M', '䃣'),
+ (0x2F952, 'M', '𥘦'),
+ (0x2F953, 'M', '祖'),
+ (0x2F954, 'M', '𥚚'),
+ (0x2F955, 'M', '𥛅'),
+ (0x2F956, 'M', '福'),
+ (0x2F957, 'M', '秫'),
+ (0x2F958, 'M', '䄯'),
+ (0x2F959, 'M', '穀'),
+ (0x2F95A, 'M', '穊'),
+ (0x2F95B, 'M', '穏'),
+ (0x2F95C, 'M', '𥥼'),
+ (0x2F95D, 'M', '𥪧'),
+ (0x2F95F, 'X'),
+ (0x2F960, 'M', '䈂'),
+ (0x2F961, 'M', '𥮫'),
+ (0x2F962, 'M', '篆'),
+ (0x2F963, 'M', '築'),
+ (0x2F964, 'M', '䈧'),
+ (0x2F965, 'M', '𥲀'),
+ (0x2F966, 'M', '糒'),
+ (0x2F967, 'M', '䊠'),
+ (0x2F968, 'M', '糨'),
+ (0x2F969, 'M', '糣'),
+ (0x2F96A, 'M', '紀'),
+ (0x2F96B, 'M', '𥾆'),
+ (0x2F96C, 'M', '絣'),
+ (0x2F96D, 'M', '䌁'),
+ (0x2F96E, 'M', '緇'),
+ (0x2F96F, 'M', '縂'),
+ (0x2F970, 'M', '繅'),
+ (0x2F971, 'M', '䌴'),
+ ]
+
+def _seg_80() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x2F972, 'M', '𦈨'),
+ (0x2F973, 'M', '𦉇'),
+ (0x2F974, 'M', '䍙'),
+ (0x2F975, 'M', '𦋙'),
+ (0x2F976, 'M', '罺'),
+ (0x2F977, 'M', '𦌾'),
+ (0x2F978, 'M', '羕'),
+ (0x2F979, 'M', '翺'),
+ (0x2F97A, 'M', '者'),
+ (0x2F97B, 'M', '𦓚'),
+ (0x2F97C, 'M', '𦔣'),
+ (0x2F97D, 'M', '聠'),
+ (0x2F97E, 'M', '𦖨'),
+ (0x2F97F, 'M', '聰'),
+ (0x2F980, 'M', '𣍟'),
+ (0x2F981, 'M', '䏕'),
+ (0x2F982, 'M', '育'),
+ (0x2F983, 'M', '脃'),
+ (0x2F984, 'M', '䐋'),
+ (0x2F985, 'M', '脾'),
+ (0x2F986, 'M', '媵'),
+ (0x2F987, 'M', '𦞧'),
+ (0x2F988, 'M', '𦞵'),
+ (0x2F989, 'M', '𣎓'),
+ (0x2F98A, 'M', '𣎜'),
+ (0x2F98B, 'M', '舁'),
+ (0x2F98C, 'M', '舄'),
+ (0x2F98D, 'M', '辞'),
+ (0x2F98E, 'M', '䑫'),
+ (0x2F98F, 'M', '芑'),
+ (0x2F990, 'M', '芋'),
+ (0x2F991, 'M', '芝'),
+ (0x2F992, 'M', '劳'),
+ (0x2F993, 'M', '花'),
+ (0x2F994, 'M', '芳'),
+ (0x2F995, 'M', '芽'),
+ (0x2F996, 'M', '苦'),
+ (0x2F997, 'M', '𦬼'),
+ (0x2F998, 'M', '若'),
+ (0x2F999, 'M', '茝'),
+ (0x2F99A, 'M', '荣'),
+ (0x2F99B, 'M', '莭'),
+ (0x2F99C, 'M', '茣'),
+ (0x2F99D, 'M', '莽'),
+ (0x2F99E, 'M', '菧'),
+ (0x2F99F, 'M', '著'),
+ (0x2F9A0, 'M', '荓'),
+ (0x2F9A1, 'M', '菊'),
+ (0x2F9A2, 'M', '菌'),
+ (0x2F9A3, 'M', '菜'),
+ (0x2F9A4, 'M', '𦰶'),
+ (0x2F9A5, 'M', '𦵫'),
+ (0x2F9A6, 'M', '𦳕'),
+ (0x2F9A7, 'M', '䔫'),
+ (0x2F9A8, 'M', '蓱'),
+ (0x2F9A9, 'M', '蓳'),
+ (0x2F9AA, 'M', '蔖'),
+ (0x2F9AB, 'M', '𧏊'),
+ (0x2F9AC, 'M', '蕤'),
+ (0x2F9AD, 'M', '𦼬'),
+ (0x2F9AE, 'M', '䕝'),
+ (0x2F9AF, 'M', '䕡'),
+ (0x2F9B0, 'M', '𦾱'),
+ (0x2F9B1, 'M', '𧃒'),
+ (0x2F9B2, 'M', '䕫'),
+ (0x2F9B3, 'M', '虐'),
+ (0x2F9B4, 'M', '虜'),
+ (0x2F9B5, 'M', '虧'),
+ (0x2F9B6, 'M', '虩'),
+ (0x2F9B7, 'M', '蚩'),
+ (0x2F9B8, 'M', '蚈'),
+ (0x2F9B9, 'M', '蜎'),
+ (0x2F9BA, 'M', '蛢'),
+ (0x2F9BB, 'M', '蝹'),
+ (0x2F9BC, 'M', '蜨'),
+ (0x2F9BD, 'M', '蝫'),
+ (0x2F9BE, 'M', '螆'),
+ (0x2F9BF, 'X'),
+ (0x2F9C0, 'M', '蟡'),
+ (0x2F9C1, 'M', '蠁'),
+ (0x2F9C2, 'M', '䗹'),
+ (0x2F9C3, 'M', '衠'),
+ (0x2F9C4, 'M', '衣'),
+ (0x2F9C5, 'M', '𧙧'),
+ (0x2F9C6, 'M', '裗'),
+ (0x2F9C7, 'M', '裞'),
+ (0x2F9C8, 'M', '䘵'),
+ (0x2F9C9, 'M', '裺'),
+ (0x2F9CA, 'M', '㒻'),
+ (0x2F9CB, 'M', '𧢮'),
+ (0x2F9CC, 'M', '𧥦'),
+ (0x2F9CD, 'M', '䚾'),
+ (0x2F9CE, 'M', '䛇'),
+ (0x2F9CF, 'M', '誠'),
+ (0x2F9D0, 'M', '諭'),
+ (0x2F9D1, 'M', '變'),
+ (0x2F9D2, 'M', '豕'),
+ (0x2F9D3, 'M', '𧲨'),
+ (0x2F9D4, 'M', '貫'),
+ (0x2F9D5, 'M', '賁'),
+ ]
+
+def _seg_81() -> List[Union[Tuple[int, str], Tuple[int, str, str]]]:
+ return [
+ (0x2F9D6, 'M', '贛'),
+ (0x2F9D7, 'M', '起'),
+ (0x2F9D8, 'M', '𧼯'),
+ (0x2F9D9, 'M', '𠠄'),
+ (0x2F9DA, 'M', '跋'),
+ (0x2F9DB, 'M', '趼'),
+ (0x2F9DC, 'M', '跰'),
+ (0x2F9DD, 'M', '𠣞'),
+ (0x2F9DE, 'M', '軔'),
+ (0x2F9DF, 'M', '輸'),
+ (0x2F9E0, 'M', '𨗒'),
+ (0x2F9E1, 'M', '𨗭'),
+ (0x2F9E2, 'M', '邔'),
+ (0x2F9E3, 'M', '郱'),
+ (0x2F9E4, 'M', '鄑'),
+ (0x2F9E5, 'M', '𨜮'),
+ (0x2F9E6, 'M', '鄛'),
+ (0x2F9E7, 'M', '鈸'),
+ (0x2F9E8, 'M', '鋗'),
+ (0x2F9E9, 'M', '鋘'),
+ (0x2F9EA, 'M', '鉼'),
+ (0x2F9EB, 'M', '鏹'),
+ (0x2F9EC, 'M', '鐕'),
+ (0x2F9ED, 'M', '𨯺'),
+ (0x2F9EE, 'M', '開'),
+ (0x2F9EF, 'M', '䦕'),
+ (0x2F9F0, 'M', '閷'),
+ (0x2F9F1, 'M', '𨵷'),
+ (0x2F9F2, 'M', '䧦'),
+ (0x2F9F3, 'M', '雃'),
+ (0x2F9F4, 'M', '嶲'),
+ (0x2F9F5, 'M', '霣'),
+ (0x2F9F6, 'M', '𩅅'),
+ (0x2F9F7, 'M', '𩈚'),
+ (0x2F9F8, 'M', '䩮'),
+ (0x2F9F9, 'M', '䩶'),
+ (0x2F9FA, 'M', '韠'),
+ (0x2F9FB, 'M', '𩐊'),
+ (0x2F9FC, 'M', '䪲'),
+ (0x2F9FD, 'M', '𩒖'),
+ (0x2F9FE, 'M', '頋'),
+ (0x2FA00, 'M', '頩'),
+ (0x2FA01, 'M', '𩖶'),
+ (0x2FA02, 'M', '飢'),
+ (0x2FA03, 'M', '䬳'),
+ (0x2FA04, 'M', '餩'),
+ (0x2FA05, 'M', '馧'),
+ (0x2FA06, 'M', '駂'),
+ (0x2FA07, 'M', '駾'),
+ (0x2FA08, 'M', '䯎'),
+ (0x2FA09, 'M', '𩬰'),
+ (0x2FA0A, 'M', '鬒'),
+ (0x2FA0B, 'M', '鱀'),
+ (0x2FA0C, 'M', '鳽'),
+ (0x2FA0D, 'M', '䳎'),
+ (0x2FA0E, 'M', '䳭'),
+ (0x2FA0F, 'M', '鵧'),
+ (0x2FA10, 'M', '𪃎'),
+ (0x2FA11, 'M', '䳸'),
+ (0x2FA12, 'M', '𪄅'),
+ (0x2FA13, 'M', '𪈎'),
+ (0x2FA14, 'M', '𪊑'),
+ (0x2FA15, 'M', '麻'),
+ (0x2FA16, 'M', '䵖'),
+ (0x2FA17, 'M', '黹'),
+ (0x2FA18, 'M', '黾'),
+ (0x2FA19, 'M', '鼅'),
+ (0x2FA1A, 'M', '鼏'),
+ (0x2FA1B, 'M', '鼖'),
+ (0x2FA1C, 'M', '鼻'),
+ (0x2FA1D, 'M', '𪘀'),
+ (0x2FA1E, 'X'),
+ (0x30000, 'V'),
+ (0x3134B, 'X'),
+ (0x31350, 'V'),
+ (0x323B0, 'X'),
+ (0xE0100, 'I'),
+ (0xE01F0, 'X'),
+ ]
+
+uts46data = tuple(
+ _seg_0()
+ + _seg_1()
+ + _seg_2()
+ + _seg_3()
+ + _seg_4()
+ + _seg_5()
+ + _seg_6()
+ + _seg_7()
+ + _seg_8()
+ + _seg_9()
+ + _seg_10()
+ + _seg_11()
+ + _seg_12()
+ + _seg_13()
+ + _seg_14()
+ + _seg_15()
+ + _seg_16()
+ + _seg_17()
+ + _seg_18()
+ + _seg_19()
+ + _seg_20()
+ + _seg_21()
+ + _seg_22()
+ + _seg_23()
+ + _seg_24()
+ + _seg_25()
+ + _seg_26()
+ + _seg_27()
+ + _seg_28()
+ + _seg_29()
+ + _seg_30()
+ + _seg_31()
+ + _seg_32()
+ + _seg_33()
+ + _seg_34()
+ + _seg_35()
+ + _seg_36()
+ + _seg_37()
+ + _seg_38()
+ + _seg_39()
+ + _seg_40()
+ + _seg_41()
+ + _seg_42()
+ + _seg_43()
+ + _seg_44()
+ + _seg_45()
+ + _seg_46()
+ + _seg_47()
+ + _seg_48()
+ + _seg_49()
+ + _seg_50()
+ + _seg_51()
+ + _seg_52()
+ + _seg_53()
+ + _seg_54()
+ + _seg_55()
+ + _seg_56()
+ + _seg_57()
+ + _seg_58()
+ + _seg_59()
+ + _seg_60()
+ + _seg_61()
+ + _seg_62()
+ + _seg_63()
+ + _seg_64()
+ + _seg_65()
+ + _seg_66()
+ + _seg_67()
+ + _seg_68()
+ + _seg_69()
+ + _seg_70()
+ + _seg_71()
+ + _seg_72()
+ + _seg_73()
+ + _seg_74()
+ + _seg_75()
+ + _seg_76()
+ + _seg_77()
+ + _seg_78()
+ + _seg_79()
+ + _seg_80()
+ + _seg_81()
+) # type: Tuple[Union[Tuple[int, str], Tuple[int, str, str]], ...]
diff --git a/third_party/python/pip/pip/_vendor/msgpack/__init__.py b/third_party/python/pip/pip/_vendor/msgpack/__init__.py
new file mode 100644
index 0000000000..5071021898
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/msgpack/__init__.py
@@ -0,0 +1,57 @@
+# coding: utf-8
+from .exceptions import *
+from .ext import ExtType, Timestamp
+
+import os
+import sys
+
+
+version = (1, 0, 4)
+__version__ = "1.0.4"
+
+
+if os.environ.get("MSGPACK_PUREPYTHON") or sys.version_info[0] == 2:
+ from .fallback import Packer, unpackb, Unpacker
+else:
+ try:
+ from ._cmsgpack import Packer, unpackb, Unpacker
+ except ImportError:
+ from .fallback import Packer, unpackb, Unpacker
+
+
+def pack(o, stream, **kwargs):
+ """
+ Pack object `o` and write it to `stream`
+
+ See :class:`Packer` for options.
+ """
+ packer = Packer(**kwargs)
+ stream.write(packer.pack(o))
+
+
+def packb(o, **kwargs):
+ """
+ Pack object `o` and return packed bytes
+
+ See :class:`Packer` for options.
+ """
+ return Packer(**kwargs).pack(o)
+
+
+def unpack(stream, **kwargs):
+ """
+ Unpack an object from `stream`.
+
+ Raises `ExtraData` when `stream` contains extra bytes.
+ See :class:`Unpacker` for options.
+ """
+ data = stream.read()
+ return unpackb(data, **kwargs)
+
+
+# alias for compatibility to simplejson/marshal/pickle.
+load = unpack
+loads = unpackb
+
+dump = pack
+dumps = packb
diff --git a/third_party/python/pip/pip/_vendor/msgpack/exceptions.py b/third_party/python/pip/pip/_vendor/msgpack/exceptions.py
new file mode 100644
index 0000000000..d6d2615cfd
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/msgpack/exceptions.py
@@ -0,0 +1,48 @@
+class UnpackException(Exception):
+ """Base class for some exceptions raised while unpacking.
+
+ NOTE: unpack may raise exception other than subclass of
+ UnpackException. If you want to catch all error, catch
+ Exception instead.
+ """
+
+
+class BufferFull(UnpackException):
+ pass
+
+
+class OutOfData(UnpackException):
+ pass
+
+
+class FormatError(ValueError, UnpackException):
+ """Invalid msgpack format"""
+
+
+class StackError(ValueError, UnpackException):
+ """Too nested"""
+
+
+# Deprecated. Use ValueError instead
+UnpackValueError = ValueError
+
+
+class ExtraData(UnpackValueError):
+ """ExtraData is raised when there is trailing data.
+
+ This exception is raised while only one-shot (not streaming)
+ unpack.
+ """
+
+ def __init__(self, unpacked, extra):
+ self.unpacked = unpacked
+ self.extra = extra
+
+ def __str__(self):
+ return "unpack(b) received extra data."
+
+
+# Deprecated. Use Exception instead to catch all exception during packing.
+PackException = Exception
+PackValueError = ValueError
+PackOverflowError = OverflowError
diff --git a/third_party/python/pip/pip/_vendor/msgpack/ext.py b/third_party/python/pip/pip/_vendor/msgpack/ext.py
new file mode 100644
index 0000000000..25544c5556
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/msgpack/ext.py
@@ -0,0 +1,193 @@
+# coding: utf-8
+from collections import namedtuple
+import datetime
+import sys
+import struct
+
+
+PY2 = sys.version_info[0] == 2
+
+if PY2:
+ int_types = (int, long)
+ _utc = None
+else:
+ int_types = int
+ try:
+ _utc = datetime.timezone.utc
+ except AttributeError:
+ _utc = datetime.timezone(datetime.timedelta(0))
+
+
+class ExtType(namedtuple("ExtType", "code data")):
+ """ExtType represents ext type in msgpack."""
+
+ def __new__(cls, code, data):
+ if not isinstance(code, int):
+ raise TypeError("code must be int")
+ if not isinstance(data, bytes):
+ raise TypeError("data must be bytes")
+ if not 0 <= code <= 127:
+ raise ValueError("code must be 0~127")
+ return super(ExtType, cls).__new__(cls, code, data)
+
+
+class Timestamp(object):
+ """Timestamp represents the Timestamp extension type in msgpack.
+
+ When built with Cython, msgpack uses C methods to pack and unpack `Timestamp`. When using pure-Python
+ msgpack, :func:`to_bytes` and :func:`from_bytes` are used to pack and unpack `Timestamp`.
+
+ This class is immutable: Do not override seconds and nanoseconds.
+ """
+
+ __slots__ = ["seconds", "nanoseconds"]
+
+ def __init__(self, seconds, nanoseconds=0):
+ """Initialize a Timestamp object.
+
+ :param int seconds:
+ Number of seconds since the UNIX epoch (00:00:00 UTC Jan 1 1970, minus leap seconds).
+ May be negative.
+
+ :param int nanoseconds:
+ Number of nanoseconds to add to `seconds` to get fractional time.
+ Maximum is 999_999_999. Default is 0.
+
+ Note: Negative times (before the UNIX epoch) are represented as negative seconds + positive ns.
+ """
+ if not isinstance(seconds, int_types):
+ raise TypeError("seconds must be an interger")
+ if not isinstance(nanoseconds, int_types):
+ raise TypeError("nanoseconds must be an integer")
+ if not (0 <= nanoseconds < 10**9):
+ raise ValueError(
+ "nanoseconds must be a non-negative integer less than 999999999."
+ )
+ self.seconds = seconds
+ self.nanoseconds = nanoseconds
+
+ def __repr__(self):
+ """String representation of Timestamp."""
+ return "Timestamp(seconds={0}, nanoseconds={1})".format(
+ self.seconds, self.nanoseconds
+ )
+
+ def __eq__(self, other):
+ """Check for equality with another Timestamp object"""
+ if type(other) is self.__class__:
+ return (
+ self.seconds == other.seconds and self.nanoseconds == other.nanoseconds
+ )
+ return False
+
+ def __ne__(self, other):
+ """not-equals method (see :func:`__eq__()`)"""
+ return not self.__eq__(other)
+
+ def __hash__(self):
+ return hash((self.seconds, self.nanoseconds))
+
+ @staticmethod
+ def from_bytes(b):
+ """Unpack bytes into a `Timestamp` object.
+
+ Used for pure-Python msgpack unpacking.
+
+ :param b: Payload from msgpack ext message with code -1
+ :type b: bytes
+
+ :returns: Timestamp object unpacked from msgpack ext payload
+ :rtype: Timestamp
+ """
+ if len(b) == 4:
+ seconds = struct.unpack("!L", b)[0]
+ nanoseconds = 0
+ elif len(b) == 8:
+ data64 = struct.unpack("!Q", b)[0]
+ seconds = data64 & 0x00000003FFFFFFFF
+ nanoseconds = data64 >> 34
+ elif len(b) == 12:
+ nanoseconds, seconds = struct.unpack("!Iq", b)
+ else:
+ raise ValueError(
+ "Timestamp type can only be created from 32, 64, or 96-bit byte objects"
+ )
+ return Timestamp(seconds, nanoseconds)
+
+ def to_bytes(self):
+ """Pack this Timestamp object into bytes.
+
+ Used for pure-Python msgpack packing.
+
+ :returns data: Payload for EXT message with code -1 (timestamp type)
+ :rtype: bytes
+ """
+ if (self.seconds >> 34) == 0: # seconds is non-negative and fits in 34 bits
+ data64 = self.nanoseconds << 34 | self.seconds
+ if data64 & 0xFFFFFFFF00000000 == 0:
+ # nanoseconds is zero and seconds < 2**32, so timestamp 32
+ data = struct.pack("!L", data64)
+ else:
+ # timestamp 64
+ data = struct.pack("!Q", data64)
+ else:
+ # timestamp 96
+ data = struct.pack("!Iq", self.nanoseconds, self.seconds)
+ return data
+
+ @staticmethod
+ def from_unix(unix_sec):
+ """Create a Timestamp from posix timestamp in seconds.
+
+ :param unix_float: Posix timestamp in seconds.
+ :type unix_float: int or float.
+ """
+ seconds = int(unix_sec // 1)
+ nanoseconds = int((unix_sec % 1) * 10**9)
+ return Timestamp(seconds, nanoseconds)
+
+ def to_unix(self):
+ """Get the timestamp as a floating-point value.
+
+ :returns: posix timestamp
+ :rtype: float
+ """
+ return self.seconds + self.nanoseconds / 1e9
+
+ @staticmethod
+ def from_unix_nano(unix_ns):
+ """Create a Timestamp from posix timestamp in nanoseconds.
+
+ :param int unix_ns: Posix timestamp in nanoseconds.
+ :rtype: Timestamp
+ """
+ return Timestamp(*divmod(unix_ns, 10**9))
+
+ def to_unix_nano(self):
+ """Get the timestamp as a unixtime in nanoseconds.
+
+ :returns: posix timestamp in nanoseconds
+ :rtype: int
+ """
+ return self.seconds * 10**9 + self.nanoseconds
+
+ def to_datetime(self):
+ """Get the timestamp as a UTC datetime.
+
+ Python 2 is not supported.
+
+ :rtype: datetime.
+ """
+ return datetime.datetime.fromtimestamp(0, _utc) + datetime.timedelta(
+ seconds=self.to_unix()
+ )
+
+ @staticmethod
+ def from_datetime(dt):
+ """Create a Timestamp from datetime with tzinfo.
+
+ Python 2 is not supported.
+
+ :rtype: Timestamp
+ """
+ return Timestamp.from_unix(dt.timestamp())
diff --git a/third_party/python/pip/pip/_vendor/msgpack/fallback.py b/third_party/python/pip/pip/_vendor/msgpack/fallback.py
new file mode 100644
index 0000000000..f560c7b550
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/msgpack/fallback.py
@@ -0,0 +1,1010 @@
+"""Fallback pure Python implementation of msgpack"""
+from datetime import datetime as _DateTime
+import sys
+import struct
+
+
+PY2 = sys.version_info[0] == 2
+if PY2:
+ int_types = (int, long)
+
+ def dict_iteritems(d):
+ return d.iteritems()
+
+else:
+ int_types = int
+ unicode = str
+ xrange = range
+
+ def dict_iteritems(d):
+ return d.items()
+
+
+if sys.version_info < (3, 5):
+ # Ugly hack...
+ RecursionError = RuntimeError
+
+ def _is_recursionerror(e):
+ return (
+ len(e.args) == 1
+ and isinstance(e.args[0], str)
+ and e.args[0].startswith("maximum recursion depth exceeded")
+ )
+
+else:
+
+ def _is_recursionerror(e):
+ return True
+
+
+if hasattr(sys, "pypy_version_info"):
+ # StringIO is slow on PyPy, StringIO is faster. However: PyPy's own
+ # StringBuilder is fastest.
+ from __pypy__ import newlist_hint
+
+ try:
+ from __pypy__.builders import BytesBuilder as StringBuilder
+ except ImportError:
+ from __pypy__.builders import StringBuilder
+ USING_STRINGBUILDER = True
+
+ class StringIO(object):
+ def __init__(self, s=b""):
+ if s:
+ self.builder = StringBuilder(len(s))
+ self.builder.append(s)
+ else:
+ self.builder = StringBuilder()
+
+ def write(self, s):
+ if isinstance(s, memoryview):
+ s = s.tobytes()
+ elif isinstance(s, bytearray):
+ s = bytes(s)
+ self.builder.append(s)
+
+ def getvalue(self):
+ return self.builder.build()
+
+else:
+ USING_STRINGBUILDER = False
+ from io import BytesIO as StringIO
+
+ newlist_hint = lambda size: []
+
+
+from .exceptions import BufferFull, OutOfData, ExtraData, FormatError, StackError
+
+from .ext import ExtType, Timestamp
+
+
+EX_SKIP = 0
+EX_CONSTRUCT = 1
+EX_READ_ARRAY_HEADER = 2
+EX_READ_MAP_HEADER = 3
+
+TYPE_IMMEDIATE = 0
+TYPE_ARRAY = 1
+TYPE_MAP = 2
+TYPE_RAW = 3
+TYPE_BIN = 4
+TYPE_EXT = 5
+
+DEFAULT_RECURSE_LIMIT = 511
+
+
+def _check_type_strict(obj, t, type=type, tuple=tuple):
+ if type(t) is tuple:
+ return type(obj) in t
+ else:
+ return type(obj) is t
+
+
+def _get_data_from_buffer(obj):
+ view = memoryview(obj)
+ if view.itemsize != 1:
+ raise ValueError("cannot unpack from multi-byte object")
+ return view
+
+
+def unpackb(packed, **kwargs):
+ """
+ Unpack an object from `packed`.
+
+ Raises ``ExtraData`` when *packed* contains extra bytes.
+ Raises ``ValueError`` when *packed* is incomplete.
+ Raises ``FormatError`` when *packed* is not valid msgpack.
+ Raises ``StackError`` when *packed* contains too nested.
+ Other exceptions can be raised during unpacking.
+
+ See :class:`Unpacker` for options.
+ """
+ unpacker = Unpacker(None, max_buffer_size=len(packed), **kwargs)
+ unpacker.feed(packed)
+ try:
+ ret = unpacker._unpack()
+ except OutOfData:
+ raise ValueError("Unpack failed: incomplete input")
+ except RecursionError as e:
+ if _is_recursionerror(e):
+ raise StackError
+ raise
+ if unpacker._got_extradata():
+ raise ExtraData(ret, unpacker._get_extradata())
+ return ret
+
+
+if sys.version_info < (2, 7, 6):
+
+ def _unpack_from(f, b, o=0):
+ """Explicit type cast for legacy struct.unpack_from"""
+ return struct.unpack_from(f, bytes(b), o)
+
+else:
+ _unpack_from = struct.unpack_from
+
+_NO_FORMAT_USED = ""
+_MSGPACK_HEADERS = {
+ 0xC4: (1, _NO_FORMAT_USED, TYPE_BIN),
+ 0xC5: (2, ">H", TYPE_BIN),
+ 0xC6: (4, ">I", TYPE_BIN),
+ 0xC7: (2, "Bb", TYPE_EXT),
+ 0xC8: (3, ">Hb", TYPE_EXT),
+ 0xC9: (5, ">Ib", TYPE_EXT),
+ 0xCA: (4, ">f"),
+ 0xCB: (8, ">d"),
+ 0xCC: (1, _NO_FORMAT_USED),
+ 0xCD: (2, ">H"),
+ 0xCE: (4, ">I"),
+ 0xCF: (8, ">Q"),
+ 0xD0: (1, "b"),
+ 0xD1: (2, ">h"),
+ 0xD2: (4, ">i"),
+ 0xD3: (8, ">q"),
+ 0xD4: (1, "b1s", TYPE_EXT),
+ 0xD5: (2, "b2s", TYPE_EXT),
+ 0xD6: (4, "b4s", TYPE_EXT),
+ 0xD7: (8, "b8s", TYPE_EXT),
+ 0xD8: (16, "b16s", TYPE_EXT),
+ 0xD9: (1, _NO_FORMAT_USED, TYPE_RAW),
+ 0xDA: (2, ">H", TYPE_RAW),
+ 0xDB: (4, ">I", TYPE_RAW),
+ 0xDC: (2, ">H", TYPE_ARRAY),
+ 0xDD: (4, ">I", TYPE_ARRAY),
+ 0xDE: (2, ">H", TYPE_MAP),
+ 0xDF: (4, ">I", TYPE_MAP),
+}
+
+
+class Unpacker(object):
+ """Streaming unpacker.
+
+ Arguments:
+
+ :param file_like:
+ File-like object having `.read(n)` method.
+ If specified, unpacker reads serialized data from it and :meth:`feed()` is not usable.
+
+ :param int read_size:
+ Used as `file_like.read(read_size)`. (default: `min(16*1024, max_buffer_size)`)
+
+ :param bool use_list:
+ If true, unpack msgpack array to Python list.
+ Otherwise, unpack to Python tuple. (default: True)
+
+ :param bool raw:
+ If true, unpack msgpack raw to Python bytes.
+ Otherwise, unpack to Python str by decoding with UTF-8 encoding (default).
+
+ :param int timestamp:
+ Control how timestamp type is unpacked:
+
+ 0 - Timestamp
+ 1 - float (Seconds from the EPOCH)
+ 2 - int (Nanoseconds from the EPOCH)
+ 3 - datetime.datetime (UTC). Python 2 is not supported.
+
+ :param bool strict_map_key:
+ If true (default), only str or bytes are accepted for map (dict) keys.
+
+ :param callable object_hook:
+ When specified, it should be callable.
+ Unpacker calls it with a dict argument after unpacking msgpack map.
+ (See also simplejson)
+
+ :param callable object_pairs_hook:
+ When specified, it should be callable.
+ Unpacker calls it with a list of key-value pairs after unpacking msgpack map.
+ (See also simplejson)
+
+ :param str unicode_errors:
+ The error handler for decoding unicode. (default: 'strict')
+ This option should be used only when you have msgpack data which
+ contains invalid UTF-8 string.
+
+ :param int max_buffer_size:
+ Limits size of data waiting unpacked. 0 means 2**32-1.
+ The default value is 100*1024*1024 (100MiB).
+ Raises `BufferFull` exception when it is insufficient.
+ You should set this parameter when unpacking data from untrusted source.
+
+ :param int max_str_len:
+ Deprecated, use *max_buffer_size* instead.
+ Limits max length of str. (default: max_buffer_size)
+
+ :param int max_bin_len:
+ Deprecated, use *max_buffer_size* instead.
+ Limits max length of bin. (default: max_buffer_size)
+
+ :param int max_array_len:
+ Limits max length of array.
+ (default: max_buffer_size)
+
+ :param int max_map_len:
+ Limits max length of map.
+ (default: max_buffer_size//2)
+
+ :param int max_ext_len:
+ Deprecated, use *max_buffer_size* instead.
+ Limits max size of ext type. (default: max_buffer_size)
+
+ Example of streaming deserialize from file-like object::
+
+ unpacker = Unpacker(file_like)
+ for o in unpacker:
+ process(o)
+
+ Example of streaming deserialize from socket::
+
+ unpacker = Unpacker()
+ while True:
+ buf = sock.recv(1024**2)
+ if not buf:
+ break
+ unpacker.feed(buf)
+ for o in unpacker:
+ process(o)
+
+ Raises ``ExtraData`` when *packed* contains extra bytes.
+ Raises ``OutOfData`` when *packed* is incomplete.
+ Raises ``FormatError`` when *packed* is not valid msgpack.
+ Raises ``StackError`` when *packed* contains too nested.
+ Other exceptions can be raised during unpacking.
+ """
+
+ def __init__(
+ self,
+ file_like=None,
+ read_size=0,
+ use_list=True,
+ raw=False,
+ timestamp=0,
+ strict_map_key=True,
+ object_hook=None,
+ object_pairs_hook=None,
+ list_hook=None,
+ unicode_errors=None,
+ max_buffer_size=100 * 1024 * 1024,
+ ext_hook=ExtType,
+ max_str_len=-1,
+ max_bin_len=-1,
+ max_array_len=-1,
+ max_map_len=-1,
+ max_ext_len=-1,
+ ):
+ if unicode_errors is None:
+ unicode_errors = "strict"
+
+ if file_like is None:
+ self._feeding = True
+ else:
+ if not callable(file_like.read):
+ raise TypeError("`file_like.read` must be callable")
+ self.file_like = file_like
+ self._feeding = False
+
+ #: array of bytes fed.
+ self._buffer = bytearray()
+ #: Which position we currently reads
+ self._buff_i = 0
+
+ # When Unpacker is used as an iterable, between the calls to next(),
+ # the buffer is not "consumed" completely, for efficiency sake.
+ # Instead, it is done sloppily. To make sure we raise BufferFull at
+ # the correct moments, we have to keep track of how sloppy we were.
+ # Furthermore, when the buffer is incomplete (that is: in the case
+ # we raise an OutOfData) we need to rollback the buffer to the correct
+ # state, which _buf_checkpoint records.
+ self._buf_checkpoint = 0
+
+ if not max_buffer_size:
+ max_buffer_size = 2**31 - 1
+ if max_str_len == -1:
+ max_str_len = max_buffer_size
+ if max_bin_len == -1:
+ max_bin_len = max_buffer_size
+ if max_array_len == -1:
+ max_array_len = max_buffer_size
+ if max_map_len == -1:
+ max_map_len = max_buffer_size // 2
+ if max_ext_len == -1:
+ max_ext_len = max_buffer_size
+
+ self._max_buffer_size = max_buffer_size
+ if read_size > self._max_buffer_size:
+ raise ValueError("read_size must be smaller than max_buffer_size")
+ self._read_size = read_size or min(self._max_buffer_size, 16 * 1024)
+ self._raw = bool(raw)
+ self._strict_map_key = bool(strict_map_key)
+ self._unicode_errors = unicode_errors
+ self._use_list = use_list
+ if not (0 <= timestamp <= 3):
+ raise ValueError("timestamp must be 0..3")
+ self._timestamp = timestamp
+ self._list_hook = list_hook
+ self._object_hook = object_hook
+ self._object_pairs_hook = object_pairs_hook
+ self._ext_hook = ext_hook
+ self._max_str_len = max_str_len
+ self._max_bin_len = max_bin_len
+ self._max_array_len = max_array_len
+ self._max_map_len = max_map_len
+ self._max_ext_len = max_ext_len
+ self._stream_offset = 0
+
+ if list_hook is not None and not callable(list_hook):
+ raise TypeError("`list_hook` is not callable")
+ if object_hook is not None and not callable(object_hook):
+ raise TypeError("`object_hook` is not callable")
+ if object_pairs_hook is not None and not callable(object_pairs_hook):
+ raise TypeError("`object_pairs_hook` is not callable")
+ if object_hook is not None and object_pairs_hook is not None:
+ raise TypeError(
+ "object_pairs_hook and object_hook are mutually " "exclusive"
+ )
+ if not callable(ext_hook):
+ raise TypeError("`ext_hook` is not callable")
+
+ def feed(self, next_bytes):
+ assert self._feeding
+ view = _get_data_from_buffer(next_bytes)
+ if len(self._buffer) - self._buff_i + len(view) > self._max_buffer_size:
+ raise BufferFull
+
+ # Strip buffer before checkpoint before reading file.
+ if self._buf_checkpoint > 0:
+ del self._buffer[: self._buf_checkpoint]
+ self._buff_i -= self._buf_checkpoint
+ self._buf_checkpoint = 0
+
+ # Use extend here: INPLACE_ADD += doesn't reliably typecast memoryview in jython
+ self._buffer.extend(view)
+
+ def _consume(self):
+ """Gets rid of the used parts of the buffer."""
+ self._stream_offset += self._buff_i - self._buf_checkpoint
+ self._buf_checkpoint = self._buff_i
+
+ def _got_extradata(self):
+ return self._buff_i < len(self._buffer)
+
+ def _get_extradata(self):
+ return self._buffer[self._buff_i :]
+
+ def read_bytes(self, n):
+ ret = self._read(n, raise_outofdata=False)
+ self._consume()
+ return ret
+
+ def _read(self, n, raise_outofdata=True):
+ # (int) -> bytearray
+ self._reserve(n, raise_outofdata=raise_outofdata)
+ i = self._buff_i
+ ret = self._buffer[i : i + n]
+ self._buff_i = i + len(ret)
+ return ret
+
+ def _reserve(self, n, raise_outofdata=True):
+ remain_bytes = len(self._buffer) - self._buff_i - n
+
+ # Fast path: buffer has n bytes already
+ if remain_bytes >= 0:
+ return
+
+ if self._feeding:
+ self._buff_i = self._buf_checkpoint
+ raise OutOfData
+
+ # Strip buffer before checkpoint before reading file.
+ if self._buf_checkpoint > 0:
+ del self._buffer[: self._buf_checkpoint]
+ self._buff_i -= self._buf_checkpoint
+ self._buf_checkpoint = 0
+
+ # Read from file
+ remain_bytes = -remain_bytes
+ if remain_bytes + len(self._buffer) > self._max_buffer_size:
+ raise BufferFull
+ while remain_bytes > 0:
+ to_read_bytes = max(self._read_size, remain_bytes)
+ read_data = self.file_like.read(to_read_bytes)
+ if not read_data:
+ break
+ assert isinstance(read_data, bytes)
+ self._buffer += read_data
+ remain_bytes -= len(read_data)
+
+ if len(self._buffer) < n + self._buff_i and raise_outofdata:
+ self._buff_i = 0 # rollback
+ raise OutOfData
+
+ def _read_header(self):
+ typ = TYPE_IMMEDIATE
+ n = 0
+ obj = None
+ self._reserve(1)
+ b = self._buffer[self._buff_i]
+ self._buff_i += 1
+ if b & 0b10000000 == 0:
+ obj = b
+ elif b & 0b11100000 == 0b11100000:
+ obj = -1 - (b ^ 0xFF)
+ elif b & 0b11100000 == 0b10100000:
+ n = b & 0b00011111
+ typ = TYPE_RAW
+ if n > self._max_str_len:
+ raise ValueError("%s exceeds max_str_len(%s)" % (n, self._max_str_len))
+ obj = self._read(n)
+ elif b & 0b11110000 == 0b10010000:
+ n = b & 0b00001111
+ typ = TYPE_ARRAY
+ if n > self._max_array_len:
+ raise ValueError(
+ "%s exceeds max_array_len(%s)" % (n, self._max_array_len)
+ )
+ elif b & 0b11110000 == 0b10000000:
+ n = b & 0b00001111
+ typ = TYPE_MAP
+ if n > self._max_map_len:
+ raise ValueError("%s exceeds max_map_len(%s)" % (n, self._max_map_len))
+ elif b == 0xC0:
+ obj = None
+ elif b == 0xC2:
+ obj = False
+ elif b == 0xC3:
+ obj = True
+ elif 0xC4 <= b <= 0xC6:
+ size, fmt, typ = _MSGPACK_HEADERS[b]
+ self._reserve(size)
+ if len(fmt) > 0:
+ n = _unpack_from(fmt, self._buffer, self._buff_i)[0]
+ else:
+ n = self._buffer[self._buff_i]
+ self._buff_i += size
+ if n > self._max_bin_len:
+ raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len))
+ obj = self._read(n)
+ elif 0xC7 <= b <= 0xC9:
+ size, fmt, typ = _MSGPACK_HEADERS[b]
+ self._reserve(size)
+ L, n = _unpack_from(fmt, self._buffer, self._buff_i)
+ self._buff_i += size
+ if L > self._max_ext_len:
+ raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len))
+ obj = self._read(L)
+ elif 0xCA <= b <= 0xD3:
+ size, fmt = _MSGPACK_HEADERS[b]
+ self._reserve(size)
+ if len(fmt) > 0:
+ obj = _unpack_from(fmt, self._buffer, self._buff_i)[0]
+ else:
+ obj = self._buffer[self._buff_i]
+ self._buff_i += size
+ elif 0xD4 <= b <= 0xD8:
+ size, fmt, typ = _MSGPACK_HEADERS[b]
+ if self._max_ext_len < size:
+ raise ValueError(
+ "%s exceeds max_ext_len(%s)" % (size, self._max_ext_len)
+ )
+ self._reserve(size + 1)
+ n, obj = _unpack_from(fmt, self._buffer, self._buff_i)
+ self._buff_i += size + 1
+ elif 0xD9 <= b <= 0xDB:
+ size, fmt, typ = _MSGPACK_HEADERS[b]
+ self._reserve(size)
+ if len(fmt) > 0:
+ (n,) = _unpack_from(fmt, self._buffer, self._buff_i)
+ else:
+ n = self._buffer[self._buff_i]
+ self._buff_i += size
+ if n > self._max_str_len:
+ raise ValueError("%s exceeds max_str_len(%s)" % (n, self._max_str_len))
+ obj = self._read(n)
+ elif 0xDC <= b <= 0xDD:
+ size, fmt, typ = _MSGPACK_HEADERS[b]
+ self._reserve(size)
+ (n,) = _unpack_from(fmt, self._buffer, self._buff_i)
+ self._buff_i += size
+ if n > self._max_array_len:
+ raise ValueError(
+ "%s exceeds max_array_len(%s)" % (n, self._max_array_len)
+ )
+ elif 0xDE <= b <= 0xDF:
+ size, fmt, typ = _MSGPACK_HEADERS[b]
+ self._reserve(size)
+ (n,) = _unpack_from(fmt, self._buffer, self._buff_i)
+ self._buff_i += size
+ if n > self._max_map_len:
+ raise ValueError("%s exceeds max_map_len(%s)" % (n, self._max_map_len))
+ else:
+ raise FormatError("Unknown header: 0x%x" % b)
+ return typ, n, obj
+
+ def _unpack(self, execute=EX_CONSTRUCT):
+ typ, n, obj = self._read_header()
+
+ if execute == EX_READ_ARRAY_HEADER:
+ if typ != TYPE_ARRAY:
+ raise ValueError("Expected array")
+ return n
+ if execute == EX_READ_MAP_HEADER:
+ if typ != TYPE_MAP:
+ raise ValueError("Expected map")
+ return n
+ # TODO should we eliminate the recursion?
+ if typ == TYPE_ARRAY:
+ if execute == EX_SKIP:
+ for i in xrange(n):
+ # TODO check whether we need to call `list_hook`
+ self._unpack(EX_SKIP)
+ return
+ ret = newlist_hint(n)
+ for i in xrange(n):
+ ret.append(self._unpack(EX_CONSTRUCT))
+ if self._list_hook is not None:
+ ret = self._list_hook(ret)
+ # TODO is the interaction between `list_hook` and `use_list` ok?
+ return ret if self._use_list else tuple(ret)
+ if typ == TYPE_MAP:
+ if execute == EX_SKIP:
+ for i in xrange(n):
+ # TODO check whether we need to call hooks
+ self._unpack(EX_SKIP)
+ self._unpack(EX_SKIP)
+ return
+ if self._object_pairs_hook is not None:
+ ret = self._object_pairs_hook(
+ (self._unpack(EX_CONSTRUCT), self._unpack(EX_CONSTRUCT))
+ for _ in xrange(n)
+ )
+ else:
+ ret = {}
+ for _ in xrange(n):
+ key = self._unpack(EX_CONSTRUCT)
+ if self._strict_map_key and type(key) not in (unicode, bytes):
+ raise ValueError(
+ "%s is not allowed for map key" % str(type(key))
+ )
+ if not PY2 and type(key) is str:
+ key = sys.intern(key)
+ ret[key] = self._unpack(EX_CONSTRUCT)
+ if self._object_hook is not None:
+ ret = self._object_hook(ret)
+ return ret
+ if execute == EX_SKIP:
+ return
+ if typ == TYPE_RAW:
+ if self._raw:
+ obj = bytes(obj)
+ else:
+ obj = obj.decode("utf_8", self._unicode_errors)
+ return obj
+ if typ == TYPE_BIN:
+ return bytes(obj)
+ if typ == TYPE_EXT:
+ if n == -1: # timestamp
+ ts = Timestamp.from_bytes(bytes(obj))
+ if self._timestamp == 1:
+ return ts.to_unix()
+ elif self._timestamp == 2:
+ return ts.to_unix_nano()
+ elif self._timestamp == 3:
+ return ts.to_datetime()
+ else:
+ return ts
+ else:
+ return self._ext_hook(n, bytes(obj))
+ assert typ == TYPE_IMMEDIATE
+ return obj
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ try:
+ ret = self._unpack(EX_CONSTRUCT)
+ self._consume()
+ return ret
+ except OutOfData:
+ self._consume()
+ raise StopIteration
+ except RecursionError:
+ raise StackError
+
+ next = __next__
+
+ def skip(self):
+ self._unpack(EX_SKIP)
+ self._consume()
+
+ def unpack(self):
+ try:
+ ret = self._unpack(EX_CONSTRUCT)
+ except RecursionError:
+ raise StackError
+ self._consume()
+ return ret
+
+ def read_array_header(self):
+ ret = self._unpack(EX_READ_ARRAY_HEADER)
+ self._consume()
+ return ret
+
+ def read_map_header(self):
+ ret = self._unpack(EX_READ_MAP_HEADER)
+ self._consume()
+ return ret
+
+ def tell(self):
+ return self._stream_offset
+
+
+class Packer(object):
+ """
+ MessagePack Packer
+
+ Usage::
+
+ packer = Packer()
+ astream.write(packer.pack(a))
+ astream.write(packer.pack(b))
+
+ Packer's constructor has some keyword arguments:
+
+ :param callable default:
+ Convert user type to builtin type that Packer supports.
+ See also simplejson's document.
+
+ :param bool use_single_float:
+ Use single precision float type for float. (default: False)
+
+ :param bool autoreset:
+ Reset buffer after each pack and return its content as `bytes`. (default: True).
+ If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
+
+ :param bool use_bin_type:
+ Use bin type introduced in msgpack spec 2.0 for bytes.
+ It also enables str8 type for unicode. (default: True)
+
+ :param bool strict_types:
+ If set to true, types will be checked to be exact. Derived classes
+ from serializable types will not be serialized and will be
+ treated as unsupported type and forwarded to default.
+ Additionally tuples will not be serialized as lists.
+ This is useful when trying to implement accurate serialization
+ for python types.
+
+ :param bool datetime:
+ If set to true, datetime with tzinfo is packed into Timestamp type.
+ Note that the tzinfo is stripped in the timestamp.
+ You can get UTC datetime with `timestamp=3` option of the Unpacker.
+ (Python 2 is not supported).
+
+ :param str unicode_errors:
+ The error handler for encoding unicode. (default: 'strict')
+ DO NOT USE THIS!! This option is kept for very specific usage.
+
+ Example of streaming deserialize from file-like object::
+
+ unpacker = Unpacker(file_like)
+ for o in unpacker:
+ process(o)
+
+ Example of streaming deserialize from socket::
+
+ unpacker = Unpacker()
+ while True:
+ buf = sock.recv(1024**2)
+ if not buf:
+ break
+ unpacker.feed(buf)
+ for o in unpacker:
+ process(o)
+
+ Raises ``ExtraData`` when *packed* contains extra bytes.
+ Raises ``OutOfData`` when *packed* is incomplete.
+ Raises ``FormatError`` when *packed* is not valid msgpack.
+ Raises ``StackError`` when *packed* contains too nested.
+ Other exceptions can be raised during unpacking.
+ """
+
+ def __init__(
+ self,
+ default=None,
+ use_single_float=False,
+ autoreset=True,
+ use_bin_type=True,
+ strict_types=False,
+ datetime=False,
+ unicode_errors=None,
+ ):
+ self._strict_types = strict_types
+ self._use_float = use_single_float
+ self._autoreset = autoreset
+ self._use_bin_type = use_bin_type
+ self._buffer = StringIO()
+ if PY2 and datetime:
+ raise ValueError("datetime is not supported in Python 2")
+ self._datetime = bool(datetime)
+ self._unicode_errors = unicode_errors or "strict"
+ if default is not None:
+ if not callable(default):
+ raise TypeError("default must be callable")
+ self._default = default
+
+ def _pack(
+ self,
+ obj,
+ nest_limit=DEFAULT_RECURSE_LIMIT,
+ check=isinstance,
+ check_type_strict=_check_type_strict,
+ ):
+ default_used = False
+ if self._strict_types:
+ check = check_type_strict
+ list_types = list
+ else:
+ list_types = (list, tuple)
+ while True:
+ if nest_limit < 0:
+ raise ValueError("recursion limit exceeded")
+ if obj is None:
+ return self._buffer.write(b"\xc0")
+ if check(obj, bool):
+ if obj:
+ return self._buffer.write(b"\xc3")
+ return self._buffer.write(b"\xc2")
+ if check(obj, int_types):
+ if 0 <= obj < 0x80:
+ return self._buffer.write(struct.pack("B", obj))
+ if -0x20 <= obj < 0:
+ return self._buffer.write(struct.pack("b", obj))
+ if 0x80 <= obj <= 0xFF:
+ return self._buffer.write(struct.pack("BB", 0xCC, obj))
+ if -0x80 <= obj < 0:
+ return self._buffer.write(struct.pack(">Bb", 0xD0, obj))
+ if 0xFF < obj <= 0xFFFF:
+ return self._buffer.write(struct.pack(">BH", 0xCD, obj))
+ if -0x8000 <= obj < -0x80:
+ return self._buffer.write(struct.pack(">Bh", 0xD1, obj))
+ if 0xFFFF < obj <= 0xFFFFFFFF:
+ return self._buffer.write(struct.pack(">BI", 0xCE, obj))
+ if -0x80000000 <= obj < -0x8000:
+ return self._buffer.write(struct.pack(">Bi", 0xD2, obj))
+ if 0xFFFFFFFF < obj <= 0xFFFFFFFFFFFFFFFF:
+ return self._buffer.write(struct.pack(">BQ", 0xCF, obj))
+ if -0x8000000000000000 <= obj < -0x80000000:
+ return self._buffer.write(struct.pack(">Bq", 0xD3, obj))
+ if not default_used and self._default is not None:
+ obj = self._default(obj)
+ default_used = True
+ continue
+ raise OverflowError("Integer value out of range")
+ if check(obj, (bytes, bytearray)):
+ n = len(obj)
+ if n >= 2**32:
+ raise ValueError("%s is too large" % type(obj).__name__)
+ self._pack_bin_header(n)
+ return self._buffer.write(obj)
+ if check(obj, unicode):
+ obj = obj.encode("utf-8", self._unicode_errors)
+ n = len(obj)
+ if n >= 2**32:
+ raise ValueError("String is too large")
+ self._pack_raw_header(n)
+ return self._buffer.write(obj)
+ if check(obj, memoryview):
+ n = len(obj) * obj.itemsize
+ if n >= 2**32:
+ raise ValueError("Memoryview is too large")
+ self._pack_bin_header(n)
+ return self._buffer.write(obj)
+ if check(obj, float):
+ if self._use_float:
+ return self._buffer.write(struct.pack(">Bf", 0xCA, obj))
+ return self._buffer.write(struct.pack(">Bd", 0xCB, obj))
+ if check(obj, (ExtType, Timestamp)):
+ if check(obj, Timestamp):
+ code = -1
+ data = obj.to_bytes()
+ else:
+ code = obj.code
+ data = obj.data
+ assert isinstance(code, int)
+ assert isinstance(data, bytes)
+ L = len(data)
+ if L == 1:
+ self._buffer.write(b"\xd4")
+ elif L == 2:
+ self._buffer.write(b"\xd5")
+ elif L == 4:
+ self._buffer.write(b"\xd6")
+ elif L == 8:
+ self._buffer.write(b"\xd7")
+ elif L == 16:
+ self._buffer.write(b"\xd8")
+ elif L <= 0xFF:
+ self._buffer.write(struct.pack(">BB", 0xC7, L))
+ elif L <= 0xFFFF:
+ self._buffer.write(struct.pack(">BH", 0xC8, L))
+ else:
+ self._buffer.write(struct.pack(">BI", 0xC9, L))
+ self._buffer.write(struct.pack("b", code))
+ self._buffer.write(data)
+ return
+ if check(obj, list_types):
+ n = len(obj)
+ self._pack_array_header(n)
+ for i in xrange(n):
+ self._pack(obj[i], nest_limit - 1)
+ return
+ if check(obj, dict):
+ return self._pack_map_pairs(
+ len(obj), dict_iteritems(obj), nest_limit - 1
+ )
+
+ if self._datetime and check(obj, _DateTime) and obj.tzinfo is not None:
+ obj = Timestamp.from_datetime(obj)
+ default_used = 1
+ continue
+
+ if not default_used and self._default is not None:
+ obj = self._default(obj)
+ default_used = 1
+ continue
+
+ if self._datetime and check(obj, _DateTime):
+ raise ValueError("Cannot serialize %r where tzinfo=None" % (obj,))
+
+ raise TypeError("Cannot serialize %r" % (obj,))
+
+ def pack(self, obj):
+ try:
+ self._pack(obj)
+ except:
+ self._buffer = StringIO() # force reset
+ raise
+ if self._autoreset:
+ ret = self._buffer.getvalue()
+ self._buffer = StringIO()
+ return ret
+
+ def pack_map_pairs(self, pairs):
+ self._pack_map_pairs(len(pairs), pairs)
+ if self._autoreset:
+ ret = self._buffer.getvalue()
+ self._buffer = StringIO()
+ return ret
+
+ def pack_array_header(self, n):
+ if n >= 2**32:
+ raise ValueError
+ self._pack_array_header(n)
+ if self._autoreset:
+ ret = self._buffer.getvalue()
+ self._buffer = StringIO()
+ return ret
+
+ def pack_map_header(self, n):
+ if n >= 2**32:
+ raise ValueError
+ self._pack_map_header(n)
+ if self._autoreset:
+ ret = self._buffer.getvalue()
+ self._buffer = StringIO()
+ return ret
+
+ def pack_ext_type(self, typecode, data):
+ if not isinstance(typecode, int):
+ raise TypeError("typecode must have int type.")
+ if not 0 <= typecode <= 127:
+ raise ValueError("typecode should be 0-127")
+ if not isinstance(data, bytes):
+ raise TypeError("data must have bytes type")
+ L = len(data)
+ if L > 0xFFFFFFFF:
+ raise ValueError("Too large data")
+ if L == 1:
+ self._buffer.write(b"\xd4")
+ elif L == 2:
+ self._buffer.write(b"\xd5")
+ elif L == 4:
+ self._buffer.write(b"\xd6")
+ elif L == 8:
+ self._buffer.write(b"\xd7")
+ elif L == 16:
+ self._buffer.write(b"\xd8")
+ elif L <= 0xFF:
+ self._buffer.write(b"\xc7" + struct.pack("B", L))
+ elif L <= 0xFFFF:
+ self._buffer.write(b"\xc8" + struct.pack(">H", L))
+ else:
+ self._buffer.write(b"\xc9" + struct.pack(">I", L))
+ self._buffer.write(struct.pack("B", typecode))
+ self._buffer.write(data)
+
+ def _pack_array_header(self, n):
+ if n <= 0x0F:
+ return self._buffer.write(struct.pack("B", 0x90 + n))
+ if n <= 0xFFFF:
+ return self._buffer.write(struct.pack(">BH", 0xDC, n))
+ if n <= 0xFFFFFFFF:
+ return self._buffer.write(struct.pack(">BI", 0xDD, n))
+ raise ValueError("Array is too large")
+
+ def _pack_map_header(self, n):
+ if n <= 0x0F:
+ return self._buffer.write(struct.pack("B", 0x80 + n))
+ if n <= 0xFFFF:
+ return self._buffer.write(struct.pack(">BH", 0xDE, n))
+ if n <= 0xFFFFFFFF:
+ return self._buffer.write(struct.pack(">BI", 0xDF, n))
+ raise ValueError("Dict is too large")
+
+ def _pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT):
+ self._pack_map_header(n)
+ for (k, v) in pairs:
+ self._pack(k, nest_limit - 1)
+ self._pack(v, nest_limit - 1)
+
+ def _pack_raw_header(self, n):
+ if n <= 0x1F:
+ self._buffer.write(struct.pack("B", 0xA0 + n))
+ elif self._use_bin_type and n <= 0xFF:
+ self._buffer.write(struct.pack(">BB", 0xD9, n))
+ elif n <= 0xFFFF:
+ self._buffer.write(struct.pack(">BH", 0xDA, n))
+ elif n <= 0xFFFFFFFF:
+ self._buffer.write(struct.pack(">BI", 0xDB, n))
+ else:
+ raise ValueError("Raw is too large")
+
+ def _pack_bin_header(self, n):
+ if not self._use_bin_type:
+ return self._pack_raw_header(n)
+ elif n <= 0xFF:
+ return self._buffer.write(struct.pack(">BB", 0xC4, n))
+ elif n <= 0xFFFF:
+ return self._buffer.write(struct.pack(">BH", 0xC5, n))
+ elif n <= 0xFFFFFFFF:
+ return self._buffer.write(struct.pack(">BI", 0xC6, n))
+ else:
+ raise ValueError("Bin is too large")
+
+ def bytes(self):
+ """Return internal buffer contents as bytes object"""
+ return self._buffer.getvalue()
+
+ def reset(self):
+ """Reset internal buffer.
+
+ This method is useful only when autoreset=False.
+ """
+ self._buffer = StringIO()
+
+ def getbuffer(self):
+ """Return view of internal buffer."""
+ if USING_STRINGBUILDER or PY2:
+ return memoryview(self.bytes())
+ else:
+ return self._buffer.getbuffer()
diff --git a/third_party/python/pip/pip/_vendor/packaging/__about__.py b/third_party/python/pip/pip/_vendor/packaging/__about__.py
new file mode 100644
index 0000000000..3551bc2d29
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/packaging/__about__.py
@@ -0,0 +1,26 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+__all__ = [
+ "__title__",
+ "__summary__",
+ "__uri__",
+ "__version__",
+ "__author__",
+ "__email__",
+ "__license__",
+ "__copyright__",
+]
+
+__title__ = "packaging"
+__summary__ = "Core utilities for Python packages"
+__uri__ = "https://github.com/pypa/packaging"
+
+__version__ = "21.3"
+
+__author__ = "Donald Stufft and individual contributors"
+__email__ = "donald@stufft.io"
+
+__license__ = "BSD-2-Clause or Apache-2.0"
+__copyright__ = "2014-2019 %s" % __author__
diff --git a/third_party/python/pip/pip/_vendor/packaging/__init__.py b/third_party/python/pip/pip/_vendor/packaging/__init__.py
new file mode 100644
index 0000000000..3c50c5dcfe
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/packaging/__init__.py
@@ -0,0 +1,25 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+from .__about__ import (
+ __author__,
+ __copyright__,
+ __email__,
+ __license__,
+ __summary__,
+ __title__,
+ __uri__,
+ __version__,
+)
+
+__all__ = [
+ "__title__",
+ "__summary__",
+ "__uri__",
+ "__version__",
+ "__author__",
+ "__email__",
+ "__license__",
+ "__copyright__",
+]
diff --git a/third_party/python/pip/pip/_vendor/packaging/_manylinux.py b/third_party/python/pip/pip/_vendor/packaging/_manylinux.py
new file mode 100644
index 0000000000..4c379aa6f6
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/packaging/_manylinux.py
@@ -0,0 +1,301 @@
+import collections
+import functools
+import os
+import re
+import struct
+import sys
+import warnings
+from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple
+
+
+# Python does not provide platform information at sufficient granularity to
+# identify the architecture of the running executable in some cases, so we
+# determine it dynamically by reading the information from the running
+# process. This only applies on Linux, which uses the ELF format.
+class _ELFFileHeader:
+ # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
+ class _InvalidELFFileHeader(ValueError):
+ """
+ An invalid ELF file header was found.
+ """
+
+ ELF_MAGIC_NUMBER = 0x7F454C46
+ ELFCLASS32 = 1
+ ELFCLASS64 = 2
+ ELFDATA2LSB = 1
+ ELFDATA2MSB = 2
+ EM_386 = 3
+ EM_S390 = 22
+ EM_ARM = 40
+ EM_X86_64 = 62
+ EF_ARM_ABIMASK = 0xFF000000
+ EF_ARM_ABI_VER5 = 0x05000000
+ EF_ARM_ABI_FLOAT_HARD = 0x00000400
+
+ def __init__(self, file: IO[bytes]) -> None:
+ def unpack(fmt: str) -> int:
+ try:
+ data = file.read(struct.calcsize(fmt))
+ result: Tuple[int, ...] = struct.unpack(fmt, data)
+ except struct.error:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ return result[0]
+
+ self.e_ident_magic = unpack(">I")
+ if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_class = unpack("B")
+ if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_data = unpack("B")
+ if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_version = unpack("B")
+ self.e_ident_osabi = unpack("B")
+ self.e_ident_abiversion = unpack("B")
+ self.e_ident_pad = file.read(7)
+ format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
+ format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
+ format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
+ format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
+ self.e_type = unpack(format_h)
+ self.e_machine = unpack(format_h)
+ self.e_version = unpack(format_i)
+ self.e_entry = unpack(format_p)
+ self.e_phoff = unpack(format_p)
+ self.e_shoff = unpack(format_p)
+ self.e_flags = unpack(format_i)
+ self.e_ehsize = unpack(format_h)
+ self.e_phentsize = unpack(format_h)
+ self.e_phnum = unpack(format_h)
+ self.e_shentsize = unpack(format_h)
+ self.e_shnum = unpack(format_h)
+ self.e_shstrndx = unpack(format_h)
+
+
+def _get_elf_header() -> Optional[_ELFFileHeader]:
+ try:
+ with open(sys.executable, "rb") as f:
+ elf_header = _ELFFileHeader(f)
+ except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
+ return None
+ return elf_header
+
+
+def _is_linux_armhf() -> bool:
+ # hard-float ABI can be detected from the ELF header of the running
+ # process
+ # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
+ elf_header = _get_elf_header()
+ if elf_header is None:
+ return False
+ result = elf_header.e_ident_class == elf_header.ELFCLASS32
+ result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
+ result &= elf_header.e_machine == elf_header.EM_ARM
+ result &= (
+ elf_header.e_flags & elf_header.EF_ARM_ABIMASK
+ ) == elf_header.EF_ARM_ABI_VER5
+ result &= (
+ elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
+ ) == elf_header.EF_ARM_ABI_FLOAT_HARD
+ return result
+
+
+def _is_linux_i686() -> bool:
+ elf_header = _get_elf_header()
+ if elf_header is None:
+ return False
+ result = elf_header.e_ident_class == elf_header.ELFCLASS32
+ result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
+ result &= elf_header.e_machine == elf_header.EM_386
+ return result
+
+
+def _have_compatible_abi(arch: str) -> bool:
+ if arch == "armv7l":
+ return _is_linux_armhf()
+ if arch == "i686":
+ return _is_linux_i686()
+ return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
+
+
+# If glibc ever changes its major version, we need to know what the last
+# minor version was, so we can build the complete list of all versions.
+# For now, guess what the highest minor version might be, assume it will
+# be 50 for testing. Once this actually happens, update the dictionary
+# with the actual value.
+_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50)
+
+
+class _GLibCVersion(NamedTuple):
+ major: int
+ minor: int
+
+
+def _glibc_version_string_confstr() -> Optional[str]:
+ """
+ Primary implementation of glibc_version_string using os.confstr.
+ """
+ # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
+ # to be broken or missing. This strategy is used in the standard library
+ # platform module.
+ # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183
+ try:
+ # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
+ version_string = os.confstr("CS_GNU_LIBC_VERSION")
+ assert version_string is not None
+ _, version = version_string.split()
+ except (AssertionError, AttributeError, OSError, ValueError):
+ # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
+ return None
+ return version
+
+
+def _glibc_version_string_ctypes() -> Optional[str]:
+ """
+ Fallback implementation of glibc_version_string using ctypes.
+ """
+ try:
+ import ctypes
+ except ImportError:
+ return None
+
+ # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
+ # manpage says, "If filename is NULL, then the returned handle is for the
+ # main program". This way we can let the linker do the work to figure out
+ # which libc our process is actually using.
+ #
+ # We must also handle the special case where the executable is not a
+ # dynamically linked executable. This can occur when using musl libc,
+ # for example. In this situation, dlopen() will error, leading to an
+ # OSError. Interestingly, at least in the case of musl, there is no
+ # errno set on the OSError. The single string argument used to construct
+ # OSError comes from libc itself and is therefore not portable to
+ # hard code here. In any case, failure to call dlopen() means we
+ # can proceed, so we bail on our attempt.
+ try:
+ process_namespace = ctypes.CDLL(None)
+ except OSError:
+ return None
+
+ try:
+ gnu_get_libc_version = process_namespace.gnu_get_libc_version
+ except AttributeError:
+ # Symbol doesn't exist -> therefore, we are not linked to
+ # glibc.
+ return None
+
+ # Call gnu_get_libc_version, which returns a string like "2.5"
+ gnu_get_libc_version.restype = ctypes.c_char_p
+ version_str: str = gnu_get_libc_version()
+ # py2 / py3 compatibility:
+ if not isinstance(version_str, str):
+ version_str = version_str.decode("ascii")
+
+ return version_str
+
+
+def _glibc_version_string() -> Optional[str]:
+ """Returns glibc version string, or None if not using glibc."""
+ return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
+
+
+def _parse_glibc_version(version_str: str) -> Tuple[int, int]:
+ """Parse glibc version.
+
+ We use a regexp instead of str.split because we want to discard any
+ random junk that might come after the minor version -- this might happen
+ in patched/forked versions of glibc (e.g. Linaro's version of glibc
+ uses version strings like "2.20-2014.11"). See gh-3588.
+ """
+ m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
+ if not m:
+ warnings.warn(
+ "Expected glibc version with 2 components major.minor,"
+ " got: %s" % version_str,
+ RuntimeWarning,
+ )
+ return -1, -1
+ return int(m.group("major")), int(m.group("minor"))
+
+
+@functools.lru_cache()
+def _get_glibc_version() -> Tuple[int, int]:
+ version_str = _glibc_version_string()
+ if version_str is None:
+ return (-1, -1)
+ return _parse_glibc_version(version_str)
+
+
+# From PEP 513, PEP 600
+def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool:
+ sys_glibc = _get_glibc_version()
+ if sys_glibc < version:
+ return False
+ # Check for presence of _manylinux module.
+ try:
+ import _manylinux # noqa
+ except ImportError:
+ return True
+ if hasattr(_manylinux, "manylinux_compatible"):
+ result = _manylinux.manylinux_compatible(version[0], version[1], arch)
+ if result is not None:
+ return bool(result)
+ return True
+ if version == _GLibCVersion(2, 5):
+ if hasattr(_manylinux, "manylinux1_compatible"):
+ return bool(_manylinux.manylinux1_compatible)
+ if version == _GLibCVersion(2, 12):
+ if hasattr(_manylinux, "manylinux2010_compatible"):
+ return bool(_manylinux.manylinux2010_compatible)
+ if version == _GLibCVersion(2, 17):
+ if hasattr(_manylinux, "manylinux2014_compatible"):
+ return bool(_manylinux.manylinux2014_compatible)
+ return True
+
+
+_LEGACY_MANYLINUX_MAP = {
+ # CentOS 7 w/ glibc 2.17 (PEP 599)
+ (2, 17): "manylinux2014",
+ # CentOS 6 w/ glibc 2.12 (PEP 571)
+ (2, 12): "manylinux2010",
+ # CentOS 5 w/ glibc 2.5 (PEP 513)
+ (2, 5): "manylinux1",
+}
+
+
+def platform_tags(linux: str, arch: str) -> Iterator[str]:
+ if not _have_compatible_abi(arch):
+ return
+ # Oldest glibc to be supported regardless of architecture is (2, 17).
+ too_old_glibc2 = _GLibCVersion(2, 16)
+ if arch in {"x86_64", "i686"}:
+ # On x86/i686 also oldest glibc to be supported is (2, 5).
+ too_old_glibc2 = _GLibCVersion(2, 4)
+ current_glibc = _GLibCVersion(*_get_glibc_version())
+ glibc_max_list = [current_glibc]
+ # We can assume compatibility across glibc major versions.
+ # https://sourceware.org/bugzilla/show_bug.cgi?id=24636
+ #
+ # Build a list of maximum glibc versions so that we can
+ # output the canonical list of all glibc from current_glibc
+ # down to too_old_glibc2, including all intermediary versions.
+ for glibc_major in range(current_glibc.major - 1, 1, -1):
+ glibc_minor = _LAST_GLIBC_MINOR[glibc_major]
+ glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor))
+ for glibc_max in glibc_max_list:
+ if glibc_max.major == too_old_glibc2.major:
+ min_minor = too_old_glibc2.minor
+ else:
+ # For other glibc major versions oldest supported is (x, 0).
+ min_minor = -1
+ for glibc_minor in range(glibc_max.minor, min_minor, -1):
+ glibc_version = _GLibCVersion(glibc_max.major, glibc_minor)
+ tag = "manylinux_{}_{}".format(*glibc_version)
+ if _is_compatible(tag, arch, glibc_version):
+ yield linux.replace("linux", tag)
+ # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
+ if glibc_version in _LEGACY_MANYLINUX_MAP:
+ legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
+ if _is_compatible(legacy_tag, arch, glibc_version):
+ yield linux.replace("linux", legacy_tag)
diff --git a/third_party/python/pip/pip/_vendor/packaging/_musllinux.py b/third_party/python/pip/pip/_vendor/packaging/_musllinux.py
new file mode 100644
index 0000000000..8ac3059ba3
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/packaging/_musllinux.py
@@ -0,0 +1,136 @@
+"""PEP 656 support.
+
+This module implements logic to detect if the currently running Python is
+linked against musl, and what musl version is used.
+"""
+
+import contextlib
+import functools
+import operator
+import os
+import re
+import struct
+import subprocess
+import sys
+from typing import IO, Iterator, NamedTuple, Optional, Tuple
+
+
+def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]:
+ return struct.unpack(fmt, f.read(struct.calcsize(fmt)))
+
+
+def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]:
+ """Detect musl libc location by parsing the Python executable.
+
+ Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca
+ ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html
+ """
+ f.seek(0)
+ try:
+ ident = _read_unpacked(f, "16B")
+ except struct.error:
+ return None
+ if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF.
+ return None
+ f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version.
+
+ try:
+ # e_fmt: Format for program header.
+ # p_fmt: Format for section header.
+ # p_idx: Indexes to find p_type, p_offset, and p_filesz.
+ e_fmt, p_fmt, p_idx = {
+ 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit.
+ 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit.
+ }[ident[4]]
+ except KeyError:
+ return None
+ else:
+ p_get = operator.itemgetter(*p_idx)
+
+ # Find the interpreter section and return its content.
+ try:
+ _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt)
+ except struct.error:
+ return None
+ for i in range(e_phnum + 1):
+ f.seek(e_phoff + e_phentsize * i)
+ try:
+ p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt))
+ except struct.error:
+ return None
+ if p_type != 3: # Not PT_INTERP.
+ continue
+ f.seek(p_offset)
+ interpreter = os.fsdecode(f.read(p_filesz)).strip("\0")
+ if "musl" not in interpreter:
+ return None
+ return interpreter
+ return None
+
+
+class _MuslVersion(NamedTuple):
+ major: int
+ minor: int
+
+
+def _parse_musl_version(output: str) -> Optional[_MuslVersion]:
+ lines = [n for n in (n.strip() for n in output.splitlines()) if n]
+ if len(lines) < 2 or lines[0][:4] != "musl":
+ return None
+ m = re.match(r"Version (\d+)\.(\d+)", lines[1])
+ if not m:
+ return None
+ return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2)))
+
+
+@functools.lru_cache()
+def _get_musl_version(executable: str) -> Optional[_MuslVersion]:
+ """Detect currently-running musl runtime version.
+
+ This is done by checking the specified executable's dynamic linking
+ information, and invoking the loader to parse its output for a version
+ string. If the loader is musl, the output would be something like::
+
+ musl libc (x86_64)
+ Version 1.2.2
+ Dynamic Program Loader
+ """
+ with contextlib.ExitStack() as stack:
+ try:
+ f = stack.enter_context(open(executable, "rb"))
+ except OSError:
+ return None
+ ld = _parse_ld_musl_from_elf(f)
+ if not ld:
+ return None
+ proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True)
+ return _parse_musl_version(proc.stderr)
+
+
+def platform_tags(arch: str) -> Iterator[str]:
+ """Generate musllinux tags compatible to the current platform.
+
+ :param arch: Should be the part of platform tag after the ``linux_``
+ prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a
+ prerequisite for the current platform to be musllinux-compatible.
+
+ :returns: An iterator of compatible musllinux tags.
+ """
+ sys_musl = _get_musl_version(sys.executable)
+ if sys_musl is None: # Python not dynamically linked against musl.
+ return
+ for minor in range(sys_musl.minor, -1, -1):
+ yield f"musllinux_{sys_musl.major}_{minor}_{arch}"
+
+
+if __name__ == "__main__": # pragma: no cover
+ import sysconfig
+
+ plat = sysconfig.get_platform()
+ assert plat.startswith("linux-"), "not linux"
+
+ print("plat:", plat)
+ print("musl:", _get_musl_version(sys.executable))
+ print("tags:", end=" ")
+ for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])):
+ print(t, end="\n ")
diff --git a/third_party/python/pip/pip/_vendor/packaging/_structures.py b/third_party/python/pip/pip/_vendor/packaging/_structures.py
new file mode 100644
index 0000000000..90a6465f96
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/packaging/_structures.py
@@ -0,0 +1,61 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+
+class InfinityType:
+ def __repr__(self) -> str:
+ return "Infinity"
+
+ def __hash__(self) -> int:
+ return hash(repr(self))
+
+ def __lt__(self, other: object) -> bool:
+ return False
+
+ def __le__(self, other: object) -> bool:
+ return False
+
+ def __eq__(self, other: object) -> bool:
+ return isinstance(other, self.__class__)
+
+ def __gt__(self, other: object) -> bool:
+ return True
+
+ def __ge__(self, other: object) -> bool:
+ return True
+
+ def __neg__(self: object) -> "NegativeInfinityType":
+ return NegativeInfinity
+
+
+Infinity = InfinityType()
+
+
+class NegativeInfinityType:
+ def __repr__(self) -> str:
+ return "-Infinity"
+
+ def __hash__(self) -> int:
+ return hash(repr(self))
+
+ def __lt__(self, other: object) -> bool:
+ return True
+
+ def __le__(self, other: object) -> bool:
+ return True
+
+ def __eq__(self, other: object) -> bool:
+ return isinstance(other, self.__class__)
+
+ def __gt__(self, other: object) -> bool:
+ return False
+
+ def __ge__(self, other: object) -> bool:
+ return False
+
+ def __neg__(self: object) -> InfinityType:
+ return Infinity
+
+
+NegativeInfinity = NegativeInfinityType()
diff --git a/third_party/python/pip/pip/_vendor/packaging/markers.py b/third_party/python/pip/pip/_vendor/packaging/markers.py
new file mode 100644
index 0000000000..540e7a4dc7
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/packaging/markers.py
@@ -0,0 +1,304 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+import operator
+import os
+import platform
+import sys
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+
+from pip._vendor.pyparsing import ( # noqa: N817
+ Forward,
+ Group,
+ Literal as L,
+ ParseException,
+ ParseResults,
+ QuotedString,
+ ZeroOrMore,
+ stringEnd,
+ stringStart,
+)
+
+from .specifiers import InvalidSpecifier, Specifier
+
+__all__ = [
+ "InvalidMarker",
+ "UndefinedComparison",
+ "UndefinedEnvironmentName",
+ "Marker",
+ "default_environment",
+]
+
+Operator = Callable[[str, str], bool]
+
+
+class InvalidMarker(ValueError):
+ """
+ An invalid marker was found, users should refer to PEP 508.
+ """
+
+
+class UndefinedComparison(ValueError):
+ """
+ An invalid operation was attempted on a value that doesn't support it.
+ """
+
+
+class UndefinedEnvironmentName(ValueError):
+ """
+ A name was attempted to be used that does not exist inside of the
+ environment.
+ """
+
+
+class Node:
+ def __init__(self, value: Any) -> None:
+ self.value = value
+
+ def __str__(self) -> str:
+ return str(self.value)
+
+ def __repr__(self) -> str:
+ return f"<{self.__class__.__name__}('{self}')>"
+
+ def serialize(self) -> str:
+ raise NotImplementedError
+
+
+class Variable(Node):
+ def serialize(self) -> str:
+ return str(self)
+
+
+class Value(Node):
+ def serialize(self) -> str:
+ return f'"{self}"'
+
+
+class Op(Node):
+ def serialize(self) -> str:
+ return str(self)
+
+
+VARIABLE = (
+ L("implementation_version")
+ | L("platform_python_implementation")
+ | L("implementation_name")
+ | L("python_full_version")
+ | L("platform_release")
+ | L("platform_version")
+ | L("platform_machine")
+ | L("platform_system")
+ | L("python_version")
+ | L("sys_platform")
+ | L("os_name")
+ | L("os.name") # PEP-345
+ | L("sys.platform") # PEP-345
+ | L("platform.version") # PEP-345
+ | L("platform.machine") # PEP-345
+ | L("platform.python_implementation") # PEP-345
+ | L("python_implementation") # undocumented setuptools legacy
+ | L("extra") # PEP-508
+)
+ALIASES = {
+ "os.name": "os_name",
+ "sys.platform": "sys_platform",
+ "platform.version": "platform_version",
+ "platform.machine": "platform_machine",
+ "platform.python_implementation": "platform_python_implementation",
+ "python_implementation": "platform_python_implementation",
+}
+VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
+
+VERSION_CMP = (
+ L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
+)
+
+MARKER_OP = VERSION_CMP | L("not in") | L("in")
+MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
+
+MARKER_VALUE = QuotedString("'") | QuotedString('"')
+MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
+
+BOOLOP = L("and") | L("or")
+
+MARKER_VAR = VARIABLE | MARKER_VALUE
+
+MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
+MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
+
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+
+MARKER_EXPR = Forward()
+MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
+MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
+
+MARKER = stringStart + MARKER_EXPR + stringEnd
+
+
+def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]:
+ if isinstance(results, ParseResults):
+ return [_coerce_parse_result(i) for i in results]
+ else:
+ return results
+
+
+def _format_marker(
+ marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True
+) -> str:
+
+ assert isinstance(marker, (list, tuple, str))
+
+ # Sometimes we have a structure like [[...]] which is a single item list
+ # where the single item is itself it's own list. In that case we want skip
+ # the rest of this function so that we don't get extraneous () on the
+ # outside.
+ if (
+ isinstance(marker, list)
+ and len(marker) == 1
+ and isinstance(marker[0], (list, tuple))
+ ):
+ return _format_marker(marker[0])
+
+ if isinstance(marker, list):
+ inner = (_format_marker(m, first=False) for m in marker)
+ if first:
+ return " ".join(inner)
+ else:
+ return "(" + " ".join(inner) + ")"
+ elif isinstance(marker, tuple):
+ return " ".join([m.serialize() for m in marker])
+ else:
+ return marker
+
+
+_operators: Dict[str, Operator] = {
+ "in": lambda lhs, rhs: lhs in rhs,
+ "not in": lambda lhs, rhs: lhs not in rhs,
+ "<": operator.lt,
+ "<=": operator.le,
+ "==": operator.eq,
+ "!=": operator.ne,
+ ">=": operator.ge,
+ ">": operator.gt,
+}
+
+
+def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
+ try:
+ spec = Specifier("".join([op.serialize(), rhs]))
+ except InvalidSpecifier:
+ pass
+ else:
+ return spec.contains(lhs)
+
+ oper: Optional[Operator] = _operators.get(op.serialize())
+ if oper is None:
+ raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
+
+ return oper(lhs, rhs)
+
+
+class Undefined:
+ pass
+
+
+_undefined = Undefined()
+
+
+def _get_env(environment: Dict[str, str], name: str) -> str:
+ value: Union[str, Undefined] = environment.get(name, _undefined)
+
+ if isinstance(value, Undefined):
+ raise UndefinedEnvironmentName(
+ f"{name!r} does not exist in evaluation environment."
+ )
+
+ return value
+
+
+def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool:
+ groups: List[List[bool]] = [[]]
+
+ for marker in markers:
+ assert isinstance(marker, (list, tuple, str))
+
+ if isinstance(marker, list):
+ groups[-1].append(_evaluate_markers(marker, environment))
+ elif isinstance(marker, tuple):
+ lhs, op, rhs = marker
+
+ if isinstance(lhs, Variable):
+ lhs_value = _get_env(environment, lhs.value)
+ rhs_value = rhs.value
+ else:
+ lhs_value = lhs.value
+ rhs_value = _get_env(environment, rhs.value)
+
+ groups[-1].append(_eval_op(lhs_value, op, rhs_value))
+ else:
+ assert marker in ["and", "or"]
+ if marker == "or":
+ groups.append([])
+
+ return any(all(item) for item in groups)
+
+
+def format_full_version(info: "sys._version_info") -> str:
+ version = "{0.major}.{0.minor}.{0.micro}".format(info)
+ kind = info.releaselevel
+ if kind != "final":
+ version += kind[0] + str(info.serial)
+ return version
+
+
+def default_environment() -> Dict[str, str]:
+ iver = format_full_version(sys.implementation.version)
+ implementation_name = sys.implementation.name
+ return {
+ "implementation_name": implementation_name,
+ "implementation_version": iver,
+ "os_name": os.name,
+ "platform_machine": platform.machine(),
+ "platform_release": platform.release(),
+ "platform_system": platform.system(),
+ "platform_version": platform.version(),
+ "python_full_version": platform.python_version(),
+ "platform_python_implementation": platform.python_implementation(),
+ "python_version": ".".join(platform.python_version_tuple()[:2]),
+ "sys_platform": sys.platform,
+ }
+
+
+class Marker:
+ def __init__(self, marker: str) -> None:
+ try:
+ self._markers = _coerce_parse_result(MARKER.parseString(marker))
+ except ParseException as e:
+ raise InvalidMarker(
+ f"Invalid marker: {marker!r}, parse error at "
+ f"{marker[e.loc : e.loc + 8]!r}"
+ )
+
+ def __str__(self) -> str:
+ return _format_marker(self._markers)
+
+ def __repr__(self) -> str:
+ return f"<Marker('{self}')>"
+
+ def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool:
+ """Evaluate a marker.
+
+ Return the boolean from evaluating the given marker against the
+ environment. environment is an optional argument to override all or
+ part of the determined environment.
+
+ The environment is determined from the current Python process.
+ """
+ current_environment = default_environment()
+ if environment is not None:
+ current_environment.update(environment)
+
+ return _evaluate_markers(self._markers, current_environment)
diff --git a/third_party/python/pip/pip/_vendor/packaging/requirements.py b/third_party/python/pip/pip/_vendor/packaging/requirements.py
new file mode 100644
index 0000000000..1eab7dd66d
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/packaging/requirements.py
@@ -0,0 +1,146 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+import re
+import string
+import urllib.parse
+from typing import List, Optional as TOptional, Set
+
+from pip._vendor.pyparsing import ( # noqa
+ Combine,
+ Literal as L,
+ Optional,
+ ParseException,
+ Regex,
+ Word,
+ ZeroOrMore,
+ originalTextFor,
+ stringEnd,
+ stringStart,
+)
+
+from .markers import MARKER_EXPR, Marker
+from .specifiers import LegacySpecifier, Specifier, SpecifierSet
+
+
+class InvalidRequirement(ValueError):
+ """
+ An invalid requirement was found, users should refer to PEP 508.
+ """
+
+
+ALPHANUM = Word(string.ascii_letters + string.digits)
+
+LBRACKET = L("[").suppress()
+RBRACKET = L("]").suppress()
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+COMMA = L(",").suppress()
+SEMICOLON = L(";").suppress()
+AT = L("@").suppress()
+
+PUNCTUATION = Word("-_.")
+IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
+IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
+
+NAME = IDENTIFIER("name")
+EXTRA = IDENTIFIER
+
+URI = Regex(r"[^ ]+")("url")
+URL = AT + URI
+
+EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
+EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
+
+VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
+VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
+
+VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
+VERSION_MANY = Combine(
+ VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
+)("_raw_spec")
+_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)
+_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
+
+VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
+VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
+
+MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
+MARKER_EXPR.setParseAction(
+ lambda s, l, t: Marker(s[t._original_start : t._original_end])
+)
+MARKER_SEPARATOR = SEMICOLON
+MARKER = MARKER_SEPARATOR + MARKER_EXPR
+
+VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
+URL_AND_MARKER = URL + Optional(MARKER)
+
+NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
+
+REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
+# pyparsing isn't thread safe during initialization, so we do it eagerly, see
+# issue #104
+REQUIREMENT.parseString("x[]")
+
+
+class Requirement:
+ """Parse a requirement.
+
+ Parse a given requirement string into its parts, such as name, specifier,
+ URL, and extras. Raises InvalidRequirement on a badly-formed requirement
+ string.
+ """
+
+ # TODO: Can we test whether something is contained within a requirement?
+ # If so how do we do that? Do we need to test against the _name_ of
+ # the thing as well as the version? What about the markers?
+ # TODO: Can we normalize the name and extra name?
+
+ def __init__(self, requirement_string: str) -> None:
+ try:
+ req = REQUIREMENT.parseString(requirement_string)
+ except ParseException as e:
+ raise InvalidRequirement(
+ f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}'
+ )
+
+ self.name: str = req.name
+ if req.url:
+ parsed_url = urllib.parse.urlparse(req.url)
+ if parsed_url.scheme == "file":
+ if urllib.parse.urlunparse(parsed_url) != req.url:
+ raise InvalidRequirement("Invalid URL given")
+ elif not (parsed_url.scheme and parsed_url.netloc) or (
+ not parsed_url.scheme and not parsed_url.netloc
+ ):
+ raise InvalidRequirement(f"Invalid URL: {req.url}")
+ self.url: TOptional[str] = req.url
+ else:
+ self.url = None
+ self.extras: Set[str] = set(req.extras.asList() if req.extras else [])
+ self.specifier: SpecifierSet = SpecifierSet(req.specifier)
+ self.marker: TOptional[Marker] = req.marker if req.marker else None
+
+ def __str__(self) -> str:
+ parts: List[str] = [self.name]
+
+ if self.extras:
+ formatted_extras = ",".join(sorted(self.extras))
+ parts.append(f"[{formatted_extras}]")
+
+ if self.specifier:
+ parts.append(str(self.specifier))
+
+ if self.url:
+ parts.append(f"@ {self.url}")
+ if self.marker:
+ parts.append(" ")
+
+ if self.marker:
+ parts.append(f"; {self.marker}")
+
+ return "".join(parts)
+
+ def __repr__(self) -> str:
+ return f"<Requirement('{self}')>"
diff --git a/third_party/python/pip/pip/_vendor/packaging/specifiers.py b/third_party/python/pip/pip/_vendor/packaging/specifiers.py
new file mode 100644
index 0000000000..0e218a6f9f
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/packaging/specifiers.py
@@ -0,0 +1,802 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+import abc
+import functools
+import itertools
+import re
+import warnings
+from typing import (
+ Callable,
+ Dict,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Pattern,
+ Set,
+ Tuple,
+ TypeVar,
+ Union,
+)
+
+from .utils import canonicalize_version
+from .version import LegacyVersion, Version, parse
+
+ParsedVersion = Union[Version, LegacyVersion]
+UnparsedVersion = Union[Version, LegacyVersion, str]
+VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion)
+CallableOperator = Callable[[ParsedVersion, str], bool]
+
+
+class InvalidSpecifier(ValueError):
+ """
+ An invalid specifier was found, users should refer to PEP 440.
+ """
+
+
+class BaseSpecifier(metaclass=abc.ABCMeta):
+ @abc.abstractmethod
+ def __str__(self) -> str:
+ """
+ Returns the str representation of this Specifier like object. This
+ should be representative of the Specifier itself.
+ """
+
+ @abc.abstractmethod
+ def __hash__(self) -> int:
+ """
+ Returns a hash value for this Specifier like object.
+ """
+
+ @abc.abstractmethod
+ def __eq__(self, other: object) -> bool:
+ """
+ Returns a boolean representing whether or not the two Specifier like
+ objects are equal.
+ """
+
+ @abc.abstractproperty
+ def prereleases(self) -> Optional[bool]:
+ """
+ Returns whether or not pre-releases as a whole are allowed by this
+ specifier.
+ """
+
+ @prereleases.setter
+ def prereleases(self, value: bool) -> None:
+ """
+ Sets whether or not pre-releases as a whole are allowed by this
+ specifier.
+ """
+
+ @abc.abstractmethod
+ def contains(self, item: str, prereleases: Optional[bool] = None) -> bool:
+ """
+ Determines if the given item is contained within this specifier.
+ """
+
+ @abc.abstractmethod
+ def filter(
+ self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
+ ) -> Iterable[VersionTypeVar]:
+ """
+ Takes an iterable of items and filters them so that only items which
+ are contained within this specifier are allowed in it.
+ """
+
+
+class _IndividualSpecifier(BaseSpecifier):
+
+ _operators: Dict[str, str] = {}
+ _regex: Pattern[str]
+
+ def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
+ match = self._regex.search(spec)
+ if not match:
+ raise InvalidSpecifier(f"Invalid specifier: '{spec}'")
+
+ self._spec: Tuple[str, str] = (
+ match.group("operator").strip(),
+ match.group("version").strip(),
+ )
+
+ # Store whether or not this Specifier should accept prereleases
+ self._prereleases = prereleases
+
+ def __repr__(self) -> str:
+ pre = (
+ f", prereleases={self.prereleases!r}"
+ if self._prereleases is not None
+ else ""
+ )
+
+ return f"<{self.__class__.__name__}({str(self)!r}{pre})>"
+
+ def __str__(self) -> str:
+ return "{}{}".format(*self._spec)
+
+ @property
+ def _canonical_spec(self) -> Tuple[str, str]:
+ return self._spec[0], canonicalize_version(self._spec[1])
+
+ def __hash__(self) -> int:
+ return hash(self._canonical_spec)
+
+ def __eq__(self, other: object) -> bool:
+ if isinstance(other, str):
+ try:
+ other = self.__class__(str(other))
+ except InvalidSpecifier:
+ return NotImplemented
+ elif not isinstance(other, self.__class__):
+ return NotImplemented
+
+ return self._canonical_spec == other._canonical_spec
+
+ def _get_operator(self, op: str) -> CallableOperator:
+ operator_callable: CallableOperator = getattr(
+ self, f"_compare_{self._operators[op]}"
+ )
+ return operator_callable
+
+ def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion:
+ if not isinstance(version, (LegacyVersion, Version)):
+ version = parse(version)
+ return version
+
+ @property
+ def operator(self) -> str:
+ return self._spec[0]
+
+ @property
+ def version(self) -> str:
+ return self._spec[1]
+
+ @property
+ def prereleases(self) -> Optional[bool]:
+ return self._prereleases
+
+ @prereleases.setter
+ def prereleases(self, value: bool) -> None:
+ self._prereleases = value
+
+ def __contains__(self, item: str) -> bool:
+ return self.contains(item)
+
+ def contains(
+ self, item: UnparsedVersion, prereleases: Optional[bool] = None
+ ) -> bool:
+
+ # Determine if prereleases are to be allowed or not.
+ if prereleases is None:
+ prereleases = self.prereleases
+
+ # Normalize item to a Version or LegacyVersion, this allows us to have
+ # a shortcut for ``"2.0" in Specifier(">=2")
+ normalized_item = self._coerce_version(item)
+
+ # Determine if we should be supporting prereleases in this specifier
+ # or not, if we do not support prereleases than we can short circuit
+ # logic if this version is a prereleases.
+ if normalized_item.is_prerelease and not prereleases:
+ return False
+
+ # Actually do the comparison to determine if this item is contained
+ # within this Specifier or not.
+ operator_callable: CallableOperator = self._get_operator(self.operator)
+ return operator_callable(normalized_item, self.version)
+
+ def filter(
+ self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
+ ) -> Iterable[VersionTypeVar]:
+
+ yielded = False
+ found_prereleases = []
+
+ kw = {"prereleases": prereleases if prereleases is not None else True}
+
+ # Attempt to iterate over all the values in the iterable and if any of
+ # them match, yield them.
+ for version in iterable:
+ parsed_version = self._coerce_version(version)
+
+ if self.contains(parsed_version, **kw):
+ # If our version is a prerelease, and we were not set to allow
+ # prereleases, then we'll store it for later in case nothing
+ # else matches this specifier.
+ if parsed_version.is_prerelease and not (
+ prereleases or self.prereleases
+ ):
+ found_prereleases.append(version)
+ # Either this is not a prerelease, or we should have been
+ # accepting prereleases from the beginning.
+ else:
+ yielded = True
+ yield version
+
+ # Now that we've iterated over everything, determine if we've yielded
+ # any values, and if we have not and we have any prereleases stored up
+ # then we will go ahead and yield the prereleases.
+ if not yielded and found_prereleases:
+ for version in found_prereleases:
+ yield version
+
+
+class LegacySpecifier(_IndividualSpecifier):
+
+ _regex_str = r"""
+ (?P<operator>(==|!=|<=|>=|<|>))
+ \s*
+ (?P<version>
+ [^,;\s)]* # Since this is a "legacy" specifier, and the version
+ # string can be just about anything, we match everything
+ # except for whitespace, a semi-colon for marker support,
+ # a closing paren since versions can be enclosed in
+ # them, and a comma since it's a version separator.
+ )
+ """
+
+ _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ _operators = {
+ "==": "equal",
+ "!=": "not_equal",
+ "<=": "less_than_equal",
+ ">=": "greater_than_equal",
+ "<": "less_than",
+ ">": "greater_than",
+ }
+
+ def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None:
+ super().__init__(spec, prereleases)
+
+ warnings.warn(
+ "Creating a LegacyVersion has been deprecated and will be "
+ "removed in the next major release",
+ DeprecationWarning,
+ )
+
+ def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion:
+ if not isinstance(version, LegacyVersion):
+ version = LegacyVersion(str(version))
+ return version
+
+ def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool:
+ return prospective == self._coerce_version(spec)
+
+ def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool:
+ return prospective != self._coerce_version(spec)
+
+ def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool:
+ return prospective <= self._coerce_version(spec)
+
+ def _compare_greater_than_equal(
+ self, prospective: LegacyVersion, spec: str
+ ) -> bool:
+ return prospective >= self._coerce_version(spec)
+
+ def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool:
+ return prospective < self._coerce_version(spec)
+
+ def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool:
+ return prospective > self._coerce_version(spec)
+
+
+def _require_version_compare(
+ fn: Callable[["Specifier", ParsedVersion, str], bool]
+) -> Callable[["Specifier", ParsedVersion, str], bool]:
+ @functools.wraps(fn)
+ def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool:
+ if not isinstance(prospective, Version):
+ return False
+ return fn(self, prospective, spec)
+
+ return wrapped
+
+
+class Specifier(_IndividualSpecifier):
+
+ _regex_str = r"""
+ (?P<operator>(~=|==|!=|<=|>=|<|>|===))
+ (?P<version>
+ (?:
+ # The identity operators allow for an escape hatch that will
+ # do an exact string match of the version you wish to install.
+ # This will not be parsed by PEP 440 and we cannot determine
+ # any semantic meaning from it. This operator is discouraged
+ # but included entirely as an escape hatch.
+ (?<====) # Only match for the identity operator
+ \s*
+ [^\s]* # We just match everything, except for whitespace
+ # since we are only testing for strict identity.
+ )
+ |
+ (?:
+ # The (non)equality operators allow for wild card and local
+ # versions to be specified so we have to define these two
+ # operators separately to enable that.
+ (?<===|!=) # Only match for equals and not equals
+
+ \s*
+ v?
+ (?:[0-9]+!)? # epoch
+ [0-9]+(?:\.[0-9]+)* # release
+ (?: # pre release
+ [-_\.]?
+ (a|b|c|rc|alpha|beta|pre|preview)
+ [-_\.]?
+ [0-9]*
+ )?
+ (?: # post release
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+ )?
+
+ # You cannot use a wild card and a dev or local version
+ # together so group them with a | and make them optional.
+ (?:
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
+ (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
+ |
+ \.\* # Wild card syntax of .*
+ )?
+ )
+ |
+ (?:
+ # The compatible operator requires at least two digits in the
+ # release segment.
+ (?<=~=) # Only match for the compatible operator
+
+ \s*
+ v?
+ (?:[0-9]+!)? # epoch
+ [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
+ (?: # pre release
+ [-_\.]?
+ (a|b|c|rc|alpha|beta|pre|preview)
+ [-_\.]?
+ [0-9]*
+ )?
+ (?: # post release
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+ )?
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
+ )
+ |
+ (?:
+ # All other operators only allow a sub set of what the
+ # (non)equality operators do. Specifically they do not allow
+ # local versions to be specified nor do they allow the prefix
+ # matching wild cards.
+ (?<!==|!=|~=) # We have special cases for these
+ # operators so we want to make sure they
+ # don't match here.
+
+ \s*
+ v?
+ (?:[0-9]+!)? # epoch
+ [0-9]+(?:\.[0-9]+)* # release
+ (?: # pre release
+ [-_\.]?
+ (a|b|c|rc|alpha|beta|pre|preview)
+ [-_\.]?
+ [0-9]*
+ )?
+ (?: # post release
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+ )?
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
+ )
+ )
+ """
+
+ _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ _operators = {
+ "~=": "compatible",
+ "==": "equal",
+ "!=": "not_equal",
+ "<=": "less_than_equal",
+ ">=": "greater_than_equal",
+ "<": "less_than",
+ ">": "greater_than",
+ "===": "arbitrary",
+ }
+
+ @_require_version_compare
+ def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool:
+
+ # Compatible releases have an equivalent combination of >= and ==. That
+ # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
+ # implement this in terms of the other specifiers instead of
+ # implementing it ourselves. The only thing we need to do is construct
+ # the other specifiers.
+
+ # We want everything but the last item in the version, but we want to
+ # ignore suffix segments.
+ prefix = ".".join(
+ list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1]
+ )
+
+ # Add the prefix notation to the end of our string
+ prefix += ".*"
+
+ return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
+ prospective, prefix
+ )
+
+ @_require_version_compare
+ def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool:
+
+ # We need special logic to handle prefix matching
+ if spec.endswith(".*"):
+ # In the case of prefix matching we want to ignore local segment.
+ prospective = Version(prospective.public)
+ # Split the spec out by dots, and pretend that there is an implicit
+ # dot in between a release segment and a pre-release segment.
+ split_spec = _version_split(spec[:-2]) # Remove the trailing .*
+
+ # Split the prospective version out by dots, and pretend that there
+ # is an implicit dot in between a release segment and a pre-release
+ # segment.
+ split_prospective = _version_split(str(prospective))
+
+ # Shorten the prospective version to be the same length as the spec
+ # so that we can determine if the specifier is a prefix of the
+ # prospective version or not.
+ shortened_prospective = split_prospective[: len(split_spec)]
+
+ # Pad out our two sides with zeros so that they both equal the same
+ # length.
+ padded_spec, padded_prospective = _pad_version(
+ split_spec, shortened_prospective
+ )
+
+ return padded_prospective == padded_spec
+ else:
+ # Convert our spec string into a Version
+ spec_version = Version(spec)
+
+ # If the specifier does not have a local segment, then we want to
+ # act as if the prospective version also does not have a local
+ # segment.
+ if not spec_version.local:
+ prospective = Version(prospective.public)
+
+ return prospective == spec_version
+
+ @_require_version_compare
+ def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool:
+ return not self._compare_equal(prospective, spec)
+
+ @_require_version_compare
+ def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool:
+
+ # NB: Local version identifiers are NOT permitted in the version
+ # specifier, so local version labels can be universally removed from
+ # the prospective version.
+ return Version(prospective.public) <= Version(spec)
+
+ @_require_version_compare
+ def _compare_greater_than_equal(
+ self, prospective: ParsedVersion, spec: str
+ ) -> bool:
+
+ # NB: Local version identifiers are NOT permitted in the version
+ # specifier, so local version labels can be universally removed from
+ # the prospective version.
+ return Version(prospective.public) >= Version(spec)
+
+ @_require_version_compare
+ def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
+
+ # Convert our spec to a Version instance, since we'll want to work with
+ # it as a version.
+ spec = Version(spec_str)
+
+ # Check to see if the prospective version is less than the spec
+ # version. If it's not we can short circuit and just return False now
+ # instead of doing extra unneeded work.
+ if not prospective < spec:
+ return False
+
+ # This special case is here so that, unless the specifier itself
+ # includes is a pre-release version, that we do not accept pre-release
+ # versions for the version mentioned in the specifier (e.g. <3.1 should
+ # not match 3.1.dev0, but should match 3.0.dev0).
+ if not spec.is_prerelease and prospective.is_prerelease:
+ if Version(prospective.base_version) == Version(spec.base_version):
+ return False
+
+ # If we've gotten to here, it means that prospective version is both
+ # less than the spec version *and* it's not a pre-release of the same
+ # version in the spec.
+ return True
+
+ @_require_version_compare
+ def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool:
+
+ # Convert our spec to a Version instance, since we'll want to work with
+ # it as a version.
+ spec = Version(spec_str)
+
+ # Check to see if the prospective version is greater than the spec
+ # version. If it's not we can short circuit and just return False now
+ # instead of doing extra unneeded work.
+ if not prospective > spec:
+ return False
+
+ # This special case is here so that, unless the specifier itself
+ # includes is a post-release version, that we do not accept
+ # post-release versions for the version mentioned in the specifier
+ # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
+ if not spec.is_postrelease and prospective.is_postrelease:
+ if Version(prospective.base_version) == Version(spec.base_version):
+ return False
+
+ # Ensure that we do not allow a local version of the version mentioned
+ # in the specifier, which is technically greater than, to match.
+ if prospective.local is not None:
+ if Version(prospective.base_version) == Version(spec.base_version):
+ return False
+
+ # If we've gotten to here, it means that prospective version is both
+ # greater than the spec version *and* it's not a pre-release of the
+ # same version in the spec.
+ return True
+
+ def _compare_arbitrary(self, prospective: Version, spec: str) -> bool:
+ return str(prospective).lower() == str(spec).lower()
+
+ @property
+ def prereleases(self) -> bool:
+
+ # If there is an explicit prereleases set for this, then we'll just
+ # blindly use that.
+ if self._prereleases is not None:
+ return self._prereleases
+
+ # Look at all of our specifiers and determine if they are inclusive
+ # operators, and if they are if they are including an explicit
+ # prerelease.
+ operator, version = self._spec
+ if operator in ["==", ">=", "<=", "~=", "==="]:
+ # The == specifier can include a trailing .*, if it does we
+ # want to remove before parsing.
+ if operator == "==" and version.endswith(".*"):
+ version = version[:-2]
+
+ # Parse the version, and if it is a pre-release than this
+ # specifier allows pre-releases.
+ if parse(version).is_prerelease:
+ return True
+
+ return False
+
+ @prereleases.setter
+ def prereleases(self, value: bool) -> None:
+ self._prereleases = value
+
+
+_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
+
+
+def _version_split(version: str) -> List[str]:
+ result: List[str] = []
+ for item in version.split("."):
+ match = _prefix_regex.search(item)
+ if match:
+ result.extend(match.groups())
+ else:
+ result.append(item)
+ return result
+
+
+def _is_not_suffix(segment: str) -> bool:
+ return not any(
+ segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post")
+ )
+
+
+def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]:
+ left_split, right_split = [], []
+
+ # Get the release segment of our versions
+ left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
+ right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
+
+ # Get the rest of our versions
+ left_split.append(left[len(left_split[0]) :])
+ right_split.append(right[len(right_split[0]) :])
+
+ # Insert our padding
+ left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
+ right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
+
+ return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
+
+
+class SpecifierSet(BaseSpecifier):
+ def __init__(
+ self, specifiers: str = "", prereleases: Optional[bool] = None
+ ) -> None:
+
+ # Split on , to break each individual specifier into it's own item, and
+ # strip each item to remove leading/trailing whitespace.
+ split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
+
+ # Parsed each individual specifier, attempting first to make it a
+ # Specifier and falling back to a LegacySpecifier.
+ parsed: Set[_IndividualSpecifier] = set()
+ for specifier in split_specifiers:
+ try:
+ parsed.add(Specifier(specifier))
+ except InvalidSpecifier:
+ parsed.add(LegacySpecifier(specifier))
+
+ # Turn our parsed specifiers into a frozen set and save them for later.
+ self._specs = frozenset(parsed)
+
+ # Store our prereleases value so we can use it later to determine if
+ # we accept prereleases or not.
+ self._prereleases = prereleases
+
+ def __repr__(self) -> str:
+ pre = (
+ f", prereleases={self.prereleases!r}"
+ if self._prereleases is not None
+ else ""
+ )
+
+ return f"<SpecifierSet({str(self)!r}{pre})>"
+
+ def __str__(self) -> str:
+ return ",".join(sorted(str(s) for s in self._specs))
+
+ def __hash__(self) -> int:
+ return hash(self._specs)
+
+ def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet":
+ if isinstance(other, str):
+ other = SpecifierSet(other)
+ elif not isinstance(other, SpecifierSet):
+ return NotImplemented
+
+ specifier = SpecifierSet()
+ specifier._specs = frozenset(self._specs | other._specs)
+
+ if self._prereleases is None and other._prereleases is not None:
+ specifier._prereleases = other._prereleases
+ elif self._prereleases is not None and other._prereleases is None:
+ specifier._prereleases = self._prereleases
+ elif self._prereleases == other._prereleases:
+ specifier._prereleases = self._prereleases
+ else:
+ raise ValueError(
+ "Cannot combine SpecifierSets with True and False prerelease "
+ "overrides."
+ )
+
+ return specifier
+
+ def __eq__(self, other: object) -> bool:
+ if isinstance(other, (str, _IndividualSpecifier)):
+ other = SpecifierSet(str(other))
+ elif not isinstance(other, SpecifierSet):
+ return NotImplemented
+
+ return self._specs == other._specs
+
+ def __len__(self) -> int:
+ return len(self._specs)
+
+ def __iter__(self) -> Iterator[_IndividualSpecifier]:
+ return iter(self._specs)
+
+ @property
+ def prereleases(self) -> Optional[bool]:
+
+ # If we have been given an explicit prerelease modifier, then we'll
+ # pass that through here.
+ if self._prereleases is not None:
+ return self._prereleases
+
+ # If we don't have any specifiers, and we don't have a forced value,
+ # then we'll just return None since we don't know if this should have
+ # pre-releases or not.
+ if not self._specs:
+ return None
+
+ # Otherwise we'll see if any of the given specifiers accept
+ # prereleases, if any of them do we'll return True, otherwise False.
+ return any(s.prereleases for s in self._specs)
+
+ @prereleases.setter
+ def prereleases(self, value: bool) -> None:
+ self._prereleases = value
+
+ def __contains__(self, item: UnparsedVersion) -> bool:
+ return self.contains(item)
+
+ def contains(
+ self, item: UnparsedVersion, prereleases: Optional[bool] = None
+ ) -> bool:
+
+ # Ensure that our item is a Version or LegacyVersion instance.
+ if not isinstance(item, (LegacyVersion, Version)):
+ item = parse(item)
+
+ # Determine if we're forcing a prerelease or not, if we're not forcing
+ # one for this particular filter call, then we'll use whatever the
+ # SpecifierSet thinks for whether or not we should support prereleases.
+ if prereleases is None:
+ prereleases = self.prereleases
+
+ # We can determine if we're going to allow pre-releases by looking to
+ # see if any of the underlying items supports them. If none of them do
+ # and this item is a pre-release then we do not allow it and we can
+ # short circuit that here.
+ # Note: This means that 1.0.dev1 would not be contained in something
+ # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
+ if not prereleases and item.is_prerelease:
+ return False
+
+ # We simply dispatch to the underlying specs here to make sure that the
+ # given version is contained within all of them.
+ # Note: This use of all() here means that an empty set of specifiers
+ # will always return True, this is an explicit design decision.
+ return all(s.contains(item, prereleases=prereleases) for s in self._specs)
+
+ def filter(
+ self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None
+ ) -> Iterable[VersionTypeVar]:
+
+ # Determine if we're forcing a prerelease or not, if we're not forcing
+ # one for this particular filter call, then we'll use whatever the
+ # SpecifierSet thinks for whether or not we should support prereleases.
+ if prereleases is None:
+ prereleases = self.prereleases
+
+ # If we have any specifiers, then we want to wrap our iterable in the
+ # filter method for each one, this will act as a logical AND amongst
+ # each specifier.
+ if self._specs:
+ for spec in self._specs:
+ iterable = spec.filter(iterable, prereleases=bool(prereleases))
+ return iterable
+ # If we do not have any specifiers, then we need to have a rough filter
+ # which will filter out any pre-releases, unless there are no final
+ # releases, and which will filter out LegacyVersion in general.
+ else:
+ filtered: List[VersionTypeVar] = []
+ found_prereleases: List[VersionTypeVar] = []
+
+ item: UnparsedVersion
+ parsed_version: Union[Version, LegacyVersion]
+
+ for item in iterable:
+ # Ensure that we some kind of Version class for this item.
+ if not isinstance(item, (LegacyVersion, Version)):
+ parsed_version = parse(item)
+ else:
+ parsed_version = item
+
+ # Filter out any item which is parsed as a LegacyVersion
+ if isinstance(parsed_version, LegacyVersion):
+ continue
+
+ # Store any item which is a pre-release for later unless we've
+ # already found a final version or we are accepting prereleases
+ if parsed_version.is_prerelease and not prereleases:
+ if not filtered:
+ found_prereleases.append(item)
+ else:
+ filtered.append(item)
+
+ # If we've found no items except for pre-releases, then we'll go
+ # ahead and use the pre-releases
+ if not filtered and found_prereleases and prereleases is None:
+ return found_prereleases
+
+ return filtered
diff --git a/third_party/python/pip/pip/_vendor/packaging/tags.py b/third_party/python/pip/pip/_vendor/packaging/tags.py
new file mode 100644
index 0000000000..9a3d25a71c
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/packaging/tags.py
@@ -0,0 +1,487 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+import logging
+import platform
+import sys
+import sysconfig
+from importlib.machinery import EXTENSION_SUFFIXES
+from typing import (
+ Dict,
+ FrozenSet,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+ cast,
+)
+
+from . import _manylinux, _musllinux
+
+logger = logging.getLogger(__name__)
+
+PythonVersion = Sequence[int]
+MacVersion = Tuple[int, int]
+
+INTERPRETER_SHORT_NAMES: Dict[str, str] = {
+ "python": "py", # Generic.
+ "cpython": "cp",
+ "pypy": "pp",
+ "ironpython": "ip",
+ "jython": "jy",
+}
+
+
+_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
+
+
+class Tag:
+ """
+ A representation of the tag triple for a wheel.
+
+ Instances are considered immutable and thus are hashable. Equality checking
+ is also supported.
+ """
+
+ __slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
+
+ def __init__(self, interpreter: str, abi: str, platform: str) -> None:
+ self._interpreter = interpreter.lower()
+ self._abi = abi.lower()
+ self._platform = platform.lower()
+ # The __hash__ of every single element in a Set[Tag] will be evaluated each time
+ # that a set calls its `.disjoint()` method, which may be called hundreds of
+ # times when scanning a page of links for packages with tags matching that
+ # Set[Tag]. Pre-computing the value here produces significant speedups for
+ # downstream consumers.
+ self._hash = hash((self._interpreter, self._abi, self._platform))
+
+ @property
+ def interpreter(self) -> str:
+ return self._interpreter
+
+ @property
+ def abi(self) -> str:
+ return self._abi
+
+ @property
+ def platform(self) -> str:
+ return self._platform
+
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, Tag):
+ return NotImplemented
+
+ return (
+ (self._hash == other._hash) # Short-circuit ASAP for perf reasons.
+ and (self._platform == other._platform)
+ and (self._abi == other._abi)
+ and (self._interpreter == other._interpreter)
+ )
+
+ def __hash__(self) -> int:
+ return self._hash
+
+ def __str__(self) -> str:
+ return f"{self._interpreter}-{self._abi}-{self._platform}"
+
+ def __repr__(self) -> str:
+ return f"<{self} @ {id(self)}>"
+
+
+def parse_tag(tag: str) -> FrozenSet[Tag]:
+ """
+ Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
+
+ Returning a set is required due to the possibility that the tag is a
+ compressed tag set.
+ """
+ tags = set()
+ interpreters, abis, platforms = tag.split("-")
+ for interpreter in interpreters.split("."):
+ for abi in abis.split("."):
+ for platform_ in platforms.split("."):
+ tags.add(Tag(interpreter, abi, platform_))
+ return frozenset(tags)
+
+
+def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]:
+ value = sysconfig.get_config_var(name)
+ if value is None and warn:
+ logger.debug(
+ "Config variable '%s' is unset, Python ABI tag may be incorrect", name
+ )
+ return value
+
+
+def _normalize_string(string: str) -> str:
+ return string.replace(".", "_").replace("-", "_")
+
+
+def _abi3_applies(python_version: PythonVersion) -> bool:
+ """
+ Determine if the Python version supports abi3.
+
+ PEP 384 was first implemented in Python 3.2.
+ """
+ return len(python_version) > 1 and tuple(python_version) >= (3, 2)
+
+
+def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]:
+ py_version = tuple(py_version) # To allow for version comparison.
+ abis = []
+ version = _version_nodot(py_version[:2])
+ debug = pymalloc = ucs4 = ""
+ with_debug = _get_config_var("Py_DEBUG", warn)
+ has_refcount = hasattr(sys, "gettotalrefcount")
+ # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
+ # extension modules is the best option.
+ # https://github.com/pypa/pip/issues/3383#issuecomment-173267692
+ has_ext = "_d.pyd" in EXTENSION_SUFFIXES
+ if with_debug or (with_debug is None and (has_refcount or has_ext)):
+ debug = "d"
+ if py_version < (3, 8):
+ with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
+ if with_pymalloc or with_pymalloc is None:
+ pymalloc = "m"
+ if py_version < (3, 3):
+ unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
+ if unicode_size == 4 or (
+ unicode_size is None and sys.maxunicode == 0x10FFFF
+ ):
+ ucs4 = "u"
+ elif debug:
+ # Debug builds can also load "normal" extension modules.
+ # We can also assume no UCS-4 or pymalloc requirement.
+ abis.append(f"cp{version}")
+ abis.insert(
+ 0,
+ "cp{version}{debug}{pymalloc}{ucs4}".format(
+ version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
+ ),
+ )
+ return abis
+
+
+def cpython_tags(
+ python_version: Optional[PythonVersion] = None,
+ abis: Optional[Iterable[str]] = None,
+ platforms: Optional[Iterable[str]] = None,
+ *,
+ warn: bool = False,
+) -> Iterator[Tag]:
+ """
+ Yields the tags for a CPython interpreter.
+
+ The tags consist of:
+ - cp<python_version>-<abi>-<platform>
+ - cp<python_version>-abi3-<platform>
+ - cp<python_version>-none-<platform>
+ - cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
+
+ If python_version only specifies a major version then user-provided ABIs and
+ the 'none' ABItag will be used.
+
+ If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
+ their normal position and not at the beginning.
+ """
+ if not python_version:
+ python_version = sys.version_info[:2]
+
+ interpreter = f"cp{_version_nodot(python_version[:2])}"
+
+ if abis is None:
+ if len(python_version) > 1:
+ abis = _cpython_abis(python_version, warn)
+ else:
+ abis = []
+ abis = list(abis)
+ # 'abi3' and 'none' are explicitly handled later.
+ for explicit_abi in ("abi3", "none"):
+ try:
+ abis.remove(explicit_abi)
+ except ValueError:
+ pass
+
+ platforms = list(platforms or platform_tags())
+ for abi in abis:
+ for platform_ in platforms:
+ yield Tag(interpreter, abi, platform_)
+ if _abi3_applies(python_version):
+ yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms)
+ yield from (Tag(interpreter, "none", platform_) for platform_ in platforms)
+
+ if _abi3_applies(python_version):
+ for minor_version in range(python_version[1] - 1, 1, -1):
+ for platform_ in platforms:
+ interpreter = "cp{version}".format(
+ version=_version_nodot((python_version[0], minor_version))
+ )
+ yield Tag(interpreter, "abi3", platform_)
+
+
+def _generic_abi() -> Iterator[str]:
+ abi = sysconfig.get_config_var("SOABI")
+ if abi:
+ yield _normalize_string(abi)
+
+
+def generic_tags(
+ interpreter: Optional[str] = None,
+ abis: Optional[Iterable[str]] = None,
+ platforms: Optional[Iterable[str]] = None,
+ *,
+ warn: bool = False,
+) -> Iterator[Tag]:
+ """
+ Yields the tags for a generic interpreter.
+
+ The tags consist of:
+ - <interpreter>-<abi>-<platform>
+
+ The "none" ABI will be added if it was not explicitly provided.
+ """
+ if not interpreter:
+ interp_name = interpreter_name()
+ interp_version = interpreter_version(warn=warn)
+ interpreter = "".join([interp_name, interp_version])
+ if abis is None:
+ abis = _generic_abi()
+ platforms = list(platforms or platform_tags())
+ abis = list(abis)
+ if "none" not in abis:
+ abis.append("none")
+ for abi in abis:
+ for platform_ in platforms:
+ yield Tag(interpreter, abi, platform_)
+
+
+def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]:
+ """
+ Yields Python versions in descending order.
+
+ After the latest version, the major-only version will be yielded, and then
+ all previous versions of that major version.
+ """
+ if len(py_version) > 1:
+ yield f"py{_version_nodot(py_version[:2])}"
+ yield f"py{py_version[0]}"
+ if len(py_version) > 1:
+ for minor in range(py_version[1] - 1, -1, -1):
+ yield f"py{_version_nodot((py_version[0], minor))}"
+
+
+def compatible_tags(
+ python_version: Optional[PythonVersion] = None,
+ interpreter: Optional[str] = None,
+ platforms: Optional[Iterable[str]] = None,
+) -> Iterator[Tag]:
+ """
+ Yields the sequence of tags that are compatible with a specific version of Python.
+
+ The tags consist of:
+ - py*-none-<platform>
+ - <interpreter>-none-any # ... if `interpreter` is provided.
+ - py*-none-any
+ """
+ if not python_version:
+ python_version = sys.version_info[:2]
+ platforms = list(platforms or platform_tags())
+ for version in _py_interpreter_range(python_version):
+ for platform_ in platforms:
+ yield Tag(version, "none", platform_)
+ if interpreter:
+ yield Tag(interpreter, "none", "any")
+ for version in _py_interpreter_range(python_version):
+ yield Tag(version, "none", "any")
+
+
+def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str:
+ if not is_32bit:
+ return arch
+
+ if arch.startswith("ppc"):
+ return "ppc"
+
+ return "i386"
+
+
+def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]:
+ formats = [cpu_arch]
+ if cpu_arch == "x86_64":
+ if version < (10, 4):
+ return []
+ formats.extend(["intel", "fat64", "fat32"])
+
+ elif cpu_arch == "i386":
+ if version < (10, 4):
+ return []
+ formats.extend(["intel", "fat32", "fat"])
+
+ elif cpu_arch == "ppc64":
+ # TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
+ if version > (10, 5) or version < (10, 4):
+ return []
+ formats.append("fat64")
+
+ elif cpu_arch == "ppc":
+ if version > (10, 6):
+ return []
+ formats.extend(["fat32", "fat"])
+
+ if cpu_arch in {"arm64", "x86_64"}:
+ formats.append("universal2")
+
+ if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
+ formats.append("universal")
+
+ return formats
+
+
+def mac_platforms(
+ version: Optional[MacVersion] = None, arch: Optional[str] = None
+) -> Iterator[str]:
+ """
+ Yields the platform tags for a macOS system.
+
+ The `version` parameter is a two-item tuple specifying the macOS version to
+ generate platform tags for. The `arch` parameter is the CPU architecture to
+ generate platform tags for. Both parameters default to the appropriate value
+ for the current system.
+ """
+ version_str, _, cpu_arch = platform.mac_ver()
+ if version is None:
+ version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
+ else:
+ version = version
+ if arch is None:
+ arch = _mac_arch(cpu_arch)
+ else:
+ arch = arch
+
+ if (10, 0) <= version and version < (11, 0):
+ # Prior to Mac OS 11, each yearly release of Mac OS bumped the
+ # "minor" version number. The major version was always 10.
+ for minor_version in range(version[1], -1, -1):
+ compat_version = 10, minor_version
+ binary_formats = _mac_binary_formats(compat_version, arch)
+ for binary_format in binary_formats:
+ yield "macosx_{major}_{minor}_{binary_format}".format(
+ major=10, minor=minor_version, binary_format=binary_format
+ )
+
+ if version >= (11, 0):
+ # Starting with Mac OS 11, each yearly release bumps the major version
+ # number. The minor versions are now the midyear updates.
+ for major_version in range(version[0], 10, -1):
+ compat_version = major_version, 0
+ binary_formats = _mac_binary_formats(compat_version, arch)
+ for binary_format in binary_formats:
+ yield "macosx_{major}_{minor}_{binary_format}".format(
+ major=major_version, minor=0, binary_format=binary_format
+ )
+
+ if version >= (11, 0):
+ # Mac OS 11 on x86_64 is compatible with binaries from previous releases.
+ # Arm64 support was introduced in 11.0, so no Arm binaries from previous
+ # releases exist.
+ #
+ # However, the "universal2" binary format can have a
+ # macOS version earlier than 11.0 when the x86_64 part of the binary supports
+ # that version of macOS.
+ if arch == "x86_64":
+ for minor_version in range(16, 3, -1):
+ compat_version = 10, minor_version
+ binary_formats = _mac_binary_formats(compat_version, arch)
+ for binary_format in binary_formats:
+ yield "macosx_{major}_{minor}_{binary_format}".format(
+ major=compat_version[0],
+ minor=compat_version[1],
+ binary_format=binary_format,
+ )
+ else:
+ for minor_version in range(16, 3, -1):
+ compat_version = 10, minor_version
+ binary_format = "universal2"
+ yield "macosx_{major}_{minor}_{binary_format}".format(
+ major=compat_version[0],
+ minor=compat_version[1],
+ binary_format=binary_format,
+ )
+
+
+def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]:
+ linux = _normalize_string(sysconfig.get_platform())
+ if is_32bit:
+ if linux == "linux_x86_64":
+ linux = "linux_i686"
+ elif linux == "linux_aarch64":
+ linux = "linux_armv7l"
+ _, arch = linux.split("_", 1)
+ yield from _manylinux.platform_tags(linux, arch)
+ yield from _musllinux.platform_tags(arch)
+ yield linux
+
+
+def _generic_platforms() -> Iterator[str]:
+ yield _normalize_string(sysconfig.get_platform())
+
+
+def platform_tags() -> Iterator[str]:
+ """
+ Provides the platform tags for this installation.
+ """
+ if platform.system() == "Darwin":
+ return mac_platforms()
+ elif platform.system() == "Linux":
+ return _linux_platforms()
+ else:
+ return _generic_platforms()
+
+
+def interpreter_name() -> str:
+ """
+ Returns the name of the running interpreter.
+ """
+ name = sys.implementation.name
+ return INTERPRETER_SHORT_NAMES.get(name) or name
+
+
+def interpreter_version(*, warn: bool = False) -> str:
+ """
+ Returns the version of the running interpreter.
+ """
+ version = _get_config_var("py_version_nodot", warn=warn)
+ if version:
+ version = str(version)
+ else:
+ version = _version_nodot(sys.version_info[:2])
+ return version
+
+
+def _version_nodot(version: PythonVersion) -> str:
+ return "".join(map(str, version))
+
+
+def sys_tags(*, warn: bool = False) -> Iterator[Tag]:
+ """
+ Returns the sequence of tag triples for the running interpreter.
+
+ The order of the sequence corresponds to priority order for the
+ interpreter, from most to least important.
+ """
+
+ interp_name = interpreter_name()
+ if interp_name == "cp":
+ yield from cpython_tags(warn=warn)
+ else:
+ yield from generic_tags()
+
+ if interp_name == "pp":
+ yield from compatible_tags(interpreter="pp3")
+ else:
+ yield from compatible_tags()
diff --git a/third_party/python/pip/pip/_vendor/packaging/utils.py b/third_party/python/pip/pip/_vendor/packaging/utils.py
new file mode 100644
index 0000000000..bab11b80c6
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/packaging/utils.py
@@ -0,0 +1,136 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+import re
+from typing import FrozenSet, NewType, Tuple, Union, cast
+
+from .tags import Tag, parse_tag
+from .version import InvalidVersion, Version
+
+BuildTag = Union[Tuple[()], Tuple[int, str]]
+NormalizedName = NewType("NormalizedName", str)
+
+
+class InvalidWheelFilename(ValueError):
+ """
+ An invalid wheel filename was found, users should refer to PEP 427.
+ """
+
+
+class InvalidSdistFilename(ValueError):
+ """
+ An invalid sdist filename was found, users should refer to the packaging user guide.
+ """
+
+
+_canonicalize_regex = re.compile(r"[-_.]+")
+# PEP 427: The build number must start with a digit.
+_build_tag_regex = re.compile(r"(\d+)(.*)")
+
+
+def canonicalize_name(name: str) -> NormalizedName:
+ # This is taken from PEP 503.
+ value = _canonicalize_regex.sub("-", name).lower()
+ return cast(NormalizedName, value)
+
+
+def canonicalize_version(version: Union[Version, str]) -> str:
+ """
+ This is very similar to Version.__str__, but has one subtle difference
+ with the way it handles the release segment.
+ """
+ if isinstance(version, str):
+ try:
+ parsed = Version(version)
+ except InvalidVersion:
+ # Legacy versions cannot be normalized
+ return version
+ else:
+ parsed = version
+
+ parts = []
+
+ # Epoch
+ if parsed.epoch != 0:
+ parts.append(f"{parsed.epoch}!")
+
+ # Release segment
+ # NB: This strips trailing '.0's to normalize
+ parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release)))
+
+ # Pre-release
+ if parsed.pre is not None:
+ parts.append("".join(str(x) for x in parsed.pre))
+
+ # Post-release
+ if parsed.post is not None:
+ parts.append(f".post{parsed.post}")
+
+ # Development release
+ if parsed.dev is not None:
+ parts.append(f".dev{parsed.dev}")
+
+ # Local version segment
+ if parsed.local is not None:
+ parts.append(f"+{parsed.local}")
+
+ return "".join(parts)
+
+
+def parse_wheel_filename(
+ filename: str,
+) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]:
+ if not filename.endswith(".whl"):
+ raise InvalidWheelFilename(
+ f"Invalid wheel filename (extension must be '.whl'): {filename}"
+ )
+
+ filename = filename[:-4]
+ dashes = filename.count("-")
+ if dashes not in (4, 5):
+ raise InvalidWheelFilename(
+ f"Invalid wheel filename (wrong number of parts): {filename}"
+ )
+
+ parts = filename.split("-", dashes - 2)
+ name_part = parts[0]
+ # See PEP 427 for the rules on escaping the project name
+ if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None:
+ raise InvalidWheelFilename(f"Invalid project name: {filename}")
+ name = canonicalize_name(name_part)
+ version = Version(parts[1])
+ if dashes == 5:
+ build_part = parts[2]
+ build_match = _build_tag_regex.match(build_part)
+ if build_match is None:
+ raise InvalidWheelFilename(
+ f"Invalid build number: {build_part} in '{filename}'"
+ )
+ build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2)))
+ else:
+ build = ()
+ tags = parse_tag(parts[-1])
+ return (name, version, build, tags)
+
+
+def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]:
+ if filename.endswith(".tar.gz"):
+ file_stem = filename[: -len(".tar.gz")]
+ elif filename.endswith(".zip"):
+ file_stem = filename[: -len(".zip")]
+ else:
+ raise InvalidSdistFilename(
+ f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):"
+ f" {filename}"
+ )
+
+ # We are requiring a PEP 440 version, which cannot contain dashes,
+ # so we split on the last dash.
+ name_part, sep, version_part = file_stem.rpartition("-")
+ if not sep:
+ raise InvalidSdistFilename(f"Invalid sdist filename: {filename}")
+
+ name = canonicalize_name(name_part)
+ version = Version(version_part)
+ return (name, version)
diff --git a/third_party/python/pip/pip/_vendor/packaging/version.py b/third_party/python/pip/pip/_vendor/packaging/version.py
new file mode 100644
index 0000000000..de9a09a4ed
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/packaging/version.py
@@ -0,0 +1,504 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+import collections
+import itertools
+import re
+import warnings
+from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
+
+from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType
+
+__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
+
+InfiniteTypes = Union[InfinityType, NegativeInfinityType]
+PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
+SubLocalType = Union[InfiniteTypes, int, str]
+LocalType = Union[
+ NegativeInfinityType,
+ Tuple[
+ Union[
+ SubLocalType,
+ Tuple[SubLocalType, str],
+ Tuple[NegativeInfinityType, SubLocalType],
+ ],
+ ...,
+ ],
+]
+CmpKey = Tuple[
+ int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
+]
+LegacyCmpKey = Tuple[int, Tuple[str, ...]]
+VersionComparisonMethod = Callable[
+ [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
+]
+
+_Version = collections.namedtuple(
+ "_Version", ["epoch", "release", "dev", "pre", "post", "local"]
+)
+
+
+def parse(version: str) -> Union["LegacyVersion", "Version"]:
+ """
+ Parse the given version string and return either a :class:`Version` object
+ or a :class:`LegacyVersion` object depending on if the given version is
+ a valid PEP 440 version or a legacy version.
+ """
+ try:
+ return Version(version)
+ except InvalidVersion:
+ return LegacyVersion(version)
+
+
+class InvalidVersion(ValueError):
+ """
+ An invalid version was found, users should refer to PEP 440.
+ """
+
+
+class _BaseVersion:
+ _key: Union[CmpKey, LegacyCmpKey]
+
+ def __hash__(self) -> int:
+ return hash(self._key)
+
+ # Please keep the duplicated `isinstance` check
+ # in the six comparisons hereunder
+ # unless you find a way to avoid adding overhead function calls.
+ def __lt__(self, other: "_BaseVersion") -> bool:
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
+
+ return self._key < other._key
+
+ def __le__(self, other: "_BaseVersion") -> bool:
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
+
+ return self._key <= other._key
+
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
+
+ return self._key == other._key
+
+ def __ge__(self, other: "_BaseVersion") -> bool:
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
+
+ return self._key >= other._key
+
+ def __gt__(self, other: "_BaseVersion") -> bool:
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
+
+ return self._key > other._key
+
+ def __ne__(self, other: object) -> bool:
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
+
+ return self._key != other._key
+
+
+class LegacyVersion(_BaseVersion):
+ def __init__(self, version: str) -> None:
+ self._version = str(version)
+ self._key = _legacy_cmpkey(self._version)
+
+ warnings.warn(
+ "Creating a LegacyVersion has been deprecated and will be "
+ "removed in the next major release",
+ DeprecationWarning,
+ )
+
+ def __str__(self) -> str:
+ return self._version
+
+ def __repr__(self) -> str:
+ return f"<LegacyVersion('{self}')>"
+
+ @property
+ def public(self) -> str:
+ return self._version
+
+ @property
+ def base_version(self) -> str:
+ return self._version
+
+ @property
+ def epoch(self) -> int:
+ return -1
+
+ @property
+ def release(self) -> None:
+ return None
+
+ @property
+ def pre(self) -> None:
+ return None
+
+ @property
+ def post(self) -> None:
+ return None
+
+ @property
+ def dev(self) -> None:
+ return None
+
+ @property
+ def local(self) -> None:
+ return None
+
+ @property
+ def is_prerelease(self) -> bool:
+ return False
+
+ @property
+ def is_postrelease(self) -> bool:
+ return False
+
+ @property
+ def is_devrelease(self) -> bool:
+ return False
+
+
+_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
+
+_legacy_version_replacement_map = {
+ "pre": "c",
+ "preview": "c",
+ "-": "final-",
+ "rc": "c",
+ "dev": "@",
+}
+
+
+def _parse_version_parts(s: str) -> Iterator[str]:
+ for part in _legacy_version_component_re.split(s):
+ part = _legacy_version_replacement_map.get(part, part)
+
+ if not part or part == ".":
+ continue
+
+ if part[:1] in "0123456789":
+ # pad for numeric comparison
+ yield part.zfill(8)
+ else:
+ yield "*" + part
+
+ # ensure that alpha/beta/candidate are before final
+ yield "*final"
+
+
+def _legacy_cmpkey(version: str) -> LegacyCmpKey:
+
+ # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
+ # greater than or equal to 0. This will effectively put the LegacyVersion,
+ # which uses the defacto standard originally implemented by setuptools,
+ # as before all PEP 440 versions.
+ epoch = -1
+
+ # This scheme is taken from pkg_resources.parse_version setuptools prior to
+ # it's adoption of the packaging library.
+ parts: List[str] = []
+ for part in _parse_version_parts(version.lower()):
+ if part.startswith("*"):
+ # remove "-" before a prerelease tag
+ if part < "*final":
+ while parts and parts[-1] == "*final-":
+ parts.pop()
+
+ # remove trailing zeros from each series of numeric parts
+ while parts and parts[-1] == "00000000":
+ parts.pop()
+
+ parts.append(part)
+
+ return epoch, tuple(parts)
+
+
+# Deliberately not anchored to the start and end of the string, to make it
+# easier for 3rd party code to reuse
+VERSION_PATTERN = r"""
+ v?
+ (?:
+ (?:(?P<epoch>[0-9]+)!)? # epoch
+ (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
+ (?P<pre> # pre-release
+ [-_\.]?
+ (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
+ [-_\.]?
+ (?P<pre_n>[0-9]+)?
+ )?
+ (?P<post> # post release
+ (?:-(?P<post_n1>[0-9]+))
+ |
+ (?:
+ [-_\.]?
+ (?P<post_l>post|rev|r)
+ [-_\.]?
+ (?P<post_n2>[0-9]+)?
+ )
+ )?
+ (?P<dev> # dev release
+ [-_\.]?
+ (?P<dev_l>dev)
+ [-_\.]?
+ (?P<dev_n>[0-9]+)?
+ )?
+ )
+ (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
+"""
+
+
+class Version(_BaseVersion):
+
+ _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ def __init__(self, version: str) -> None:
+
+ # Validate the version and parse it into pieces
+ match = self._regex.search(version)
+ if not match:
+ raise InvalidVersion(f"Invalid version: '{version}'")
+
+ # Store the parsed out pieces of the version
+ self._version = _Version(
+ epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+ release=tuple(int(i) for i in match.group("release").split(".")),
+ pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+ post=_parse_letter_version(
+ match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+ ),
+ dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+ local=_parse_local_version(match.group("local")),
+ )
+
+ # Generate a key which will be used for sorting
+ self._key = _cmpkey(
+ self._version.epoch,
+ self._version.release,
+ self._version.pre,
+ self._version.post,
+ self._version.dev,
+ self._version.local,
+ )
+
+ def __repr__(self) -> str:
+ return f"<Version('{self}')>"
+
+ def __str__(self) -> str:
+ parts = []
+
+ # Epoch
+ if self.epoch != 0:
+ parts.append(f"{self.epoch}!")
+
+ # Release segment
+ parts.append(".".join(str(x) for x in self.release))
+
+ # Pre-release
+ if self.pre is not None:
+ parts.append("".join(str(x) for x in self.pre))
+
+ # Post-release
+ if self.post is not None:
+ parts.append(f".post{self.post}")
+
+ # Development release
+ if self.dev is not None:
+ parts.append(f".dev{self.dev}")
+
+ # Local version segment
+ if self.local is not None:
+ parts.append(f"+{self.local}")
+
+ return "".join(parts)
+
+ @property
+ def epoch(self) -> int:
+ _epoch: int = self._version.epoch
+ return _epoch
+
+ @property
+ def release(self) -> Tuple[int, ...]:
+ _release: Tuple[int, ...] = self._version.release
+ return _release
+
+ @property
+ def pre(self) -> Optional[Tuple[str, int]]:
+ _pre: Optional[Tuple[str, int]] = self._version.pre
+ return _pre
+
+ @property
+ def post(self) -> Optional[int]:
+ return self._version.post[1] if self._version.post else None
+
+ @property
+ def dev(self) -> Optional[int]:
+ return self._version.dev[1] if self._version.dev else None
+
+ @property
+ def local(self) -> Optional[str]:
+ if self._version.local:
+ return ".".join(str(x) for x in self._version.local)
+ else:
+ return None
+
+ @property
+ def public(self) -> str:
+ return str(self).split("+", 1)[0]
+
+ @property
+ def base_version(self) -> str:
+ parts = []
+
+ # Epoch
+ if self.epoch != 0:
+ parts.append(f"{self.epoch}!")
+
+ # Release segment
+ parts.append(".".join(str(x) for x in self.release))
+
+ return "".join(parts)
+
+ @property
+ def is_prerelease(self) -> bool:
+ return self.dev is not None or self.pre is not None
+
+ @property
+ def is_postrelease(self) -> bool:
+ return self.post is not None
+
+ @property
+ def is_devrelease(self) -> bool:
+ return self.dev is not None
+
+ @property
+ def major(self) -> int:
+ return self.release[0] if len(self.release) >= 1 else 0
+
+ @property
+ def minor(self) -> int:
+ return self.release[1] if len(self.release) >= 2 else 0
+
+ @property
+ def micro(self) -> int:
+ return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+ letter: str, number: Union[str, bytes, SupportsInt]
+) -> Optional[Tuple[str, int]]:
+
+ if letter:
+ # We consider there to be an implicit 0 in a pre-release if there is
+ # not a numeral associated with it.
+ if number is None:
+ number = 0
+
+ # We normalize any letters to their lower case form
+ letter = letter.lower()
+
+ # We consider some words to be alternate spellings of other words and
+ # in those cases we want to normalize the spellings to our preferred
+ # spelling.
+ if letter == "alpha":
+ letter = "a"
+ elif letter == "beta":
+ letter = "b"
+ elif letter in ["c", "pre", "preview"]:
+ letter = "rc"
+ elif letter in ["rev", "r"]:
+ letter = "post"
+
+ return letter, int(number)
+ if not letter and number:
+ # We assume if we are given a number, but we are not given a letter
+ # then this is using the implicit post release syntax (e.g. 1.0-1)
+ letter = "post"
+
+ return letter, int(number)
+
+ return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: str) -> Optional[LocalType]:
+ """
+ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+ """
+ if local is not None:
+ return tuple(
+ part.lower() if not part.isdigit() else int(part)
+ for part in _local_version_separators.split(local)
+ )
+ return None
+
+
+def _cmpkey(
+ epoch: int,
+ release: Tuple[int, ...],
+ pre: Optional[Tuple[str, int]],
+ post: Optional[Tuple[str, int]],
+ dev: Optional[Tuple[str, int]],
+ local: Optional[Tuple[SubLocalType]],
+) -> CmpKey:
+
+ # When we compare a release version, we want to compare it with all of the
+ # trailing zeros removed. So we'll use a reverse the list, drop all the now
+ # leading zeros until we come to something non zero, then take the rest
+ # re-reverse it back into the correct order and make it a tuple and use
+ # that for our sorting key.
+ _release = tuple(
+ reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+ )
+
+ # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+ # We'll do this by abusing the pre segment, but we _only_ want to do this
+ # if there is not a pre or a post segment. If we have one of those then
+ # the normal sorting rules will handle this case correctly.
+ if pre is None and post is None and dev is not None:
+ _pre: PrePostDevType = NegativeInfinity
+ # Versions without a pre-release (except as noted above) should sort after
+ # those with one.
+ elif pre is None:
+ _pre = Infinity
+ else:
+ _pre = pre
+
+ # Versions without a post segment should sort before those with one.
+ if post is None:
+ _post: PrePostDevType = NegativeInfinity
+
+ else:
+ _post = post
+
+ # Versions without a development segment should sort after those with one.
+ if dev is None:
+ _dev: PrePostDevType = Infinity
+
+ else:
+ _dev = dev
+
+ if local is None:
+ # Versions without a local segment should sort before those with one.
+ _local: LocalType = NegativeInfinity
+ else:
+ # Versions with a local segment need that segment parsed to implement
+ # the sorting rules in PEP440.
+ # - Alpha numeric segments sort before numeric segments
+ # - Alpha numeric segments sort lexicographically
+ # - Numeric segments sort numerically
+ # - Shorter versions sort before longer versions when the prefixes
+ # match exactly
+ _local = tuple(
+ (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+ )
+
+ return epoch, _release, _pre, _post, _dev, _local
diff --git a/third_party/python/pip/pip/_vendor/pkg_resources/__init__.py b/third_party/python/pip/pip/_vendor/pkg_resources/__init__.py
new file mode 100644
index 0000000000..4cd562cf94
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pkg_resources/__init__.py
@@ -0,0 +1,3296 @@
+# coding: utf-8
+"""
+Package resource API
+--------------------
+
+A resource is a logical file contained within a package, or a logical
+subdirectory thereof. The package resource API expects resource names
+to have their path parts separated with ``/``, *not* whatever the local
+path separator is. Do not use os.path operations to manipulate resource
+names being passed into the API.
+
+The package resource API is designed to work with normal filesystem packages,
+.egg files, and unpacked .egg files. It can also work in a limited way with
+.zip files and with custom PEP 302 loaders that support the ``get_data()``
+method.
+"""
+
+from __future__ import absolute_import
+
+import sys
+import os
+import io
+import time
+import re
+import types
+import zipfile
+import zipimport
+import warnings
+import stat
+import functools
+import pkgutil
+import operator
+import platform
+import collections
+import plistlib
+import email.parser
+import errno
+import tempfile
+import textwrap
+import itertools
+import inspect
+import ntpath
+import posixpath
+from pkgutil import get_importer
+
+try:
+ import _imp
+except ImportError:
+ # Python 3.2 compatibility
+ import imp as _imp
+
+try:
+ FileExistsError
+except NameError:
+ FileExistsError = OSError
+
+from pip._vendor import six
+from pip._vendor.six.moves import urllib, map, filter
+
+# capture these to bypass sandboxing
+from os import utime
+try:
+ from os import mkdir, rename, unlink
+ WRITE_SUPPORT = True
+except ImportError:
+ # no write support, probably under GAE
+ WRITE_SUPPORT = False
+
+from os import open as os_open
+from os.path import isdir, split
+
+try:
+ import importlib.machinery as importlib_machinery
+ # access attribute to force import under delayed import mechanisms.
+ importlib_machinery.__name__
+except ImportError:
+ importlib_machinery = None
+
+from . import py31compat
+from pip._vendor import platformdirs
+from pip._vendor import packaging
+__import__('pip._vendor.packaging.version')
+__import__('pip._vendor.packaging.specifiers')
+__import__('pip._vendor.packaging.requirements')
+__import__('pip._vendor.packaging.markers')
+
+
+__metaclass__ = type
+
+
+if (3, 0) < sys.version_info < (3, 5):
+ raise RuntimeError("Python 3.5 or later is required")
+
+if six.PY2:
+ # Those builtin exceptions are only defined in Python 3
+ PermissionError = None
+ NotADirectoryError = None
+
+# declare some globals that will be defined later to
+# satisfy the linters.
+require = None
+working_set = None
+add_activation_listener = None
+resources_stream = None
+cleanup_resources = None
+resource_dir = None
+resource_stream = None
+set_extraction_path = None
+resource_isdir = None
+resource_string = None
+iter_entry_points = None
+resource_listdir = None
+resource_filename = None
+resource_exists = None
+_distribution_finders = None
+_namespace_handlers = None
+_namespace_packages = None
+
+
+class PEP440Warning(RuntimeWarning):
+ """
+ Used when there is an issue with a version or specifier not complying with
+ PEP 440.
+ """
+
+
+def parse_version(v):
+ try:
+ return packaging.version.Version(v)
+ except packaging.version.InvalidVersion:
+ return packaging.version.LegacyVersion(v)
+
+
+_state_vars = {}
+
+
+def _declare_state(vartype, **kw):
+ globals().update(kw)
+ _state_vars.update(dict.fromkeys(kw, vartype))
+
+
+def __getstate__():
+ state = {}
+ g = globals()
+ for k, v in _state_vars.items():
+ state[k] = g['_sget_' + v](g[k])
+ return state
+
+
+def __setstate__(state):
+ g = globals()
+ for k, v in state.items():
+ g['_sset_' + _state_vars[k]](k, g[k], v)
+ return state
+
+
+def _sget_dict(val):
+ return val.copy()
+
+
+def _sset_dict(key, ob, state):
+ ob.clear()
+ ob.update(state)
+
+
+def _sget_object(val):
+ return val.__getstate__()
+
+
+def _sset_object(key, ob, state):
+ ob.__setstate__(state)
+
+
+_sget_none = _sset_none = lambda *args: None
+
+
+def get_supported_platform():
+ """Return this platform's maximum compatible version.
+
+ distutils.util.get_platform() normally reports the minimum version
+ of Mac OS X that would be required to *use* extensions produced by
+ distutils. But what we want when checking compatibility is to know the
+ version of Mac OS X that we are *running*. To allow usage of packages that
+ explicitly require a newer version of Mac OS X, we must also know the
+ current version of the OS.
+
+ If this condition occurs for any other platform with a version in its
+ platform strings, this function should be extended accordingly.
+ """
+ plat = get_build_platform()
+ m = macosVersionString.match(plat)
+ if m is not None and sys.platform == "darwin":
+ try:
+ plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3))
+ except ValueError:
+ # not Mac OS X
+ pass
+ return plat
+
+
+__all__ = [
+ # Basic resource access and distribution/entry point discovery
+ 'require', 'run_script', 'get_provider', 'get_distribution',
+ 'load_entry_point', 'get_entry_map', 'get_entry_info',
+ 'iter_entry_points',
+ 'resource_string', 'resource_stream', 'resource_filename',
+ 'resource_listdir', 'resource_exists', 'resource_isdir',
+
+ # Environmental control
+ 'declare_namespace', 'working_set', 'add_activation_listener',
+ 'find_distributions', 'set_extraction_path', 'cleanup_resources',
+ 'get_default_cache',
+
+ # Primary implementation classes
+ 'Environment', 'WorkingSet', 'ResourceManager',
+ 'Distribution', 'Requirement', 'EntryPoint',
+
+ # Exceptions
+ 'ResolutionError', 'VersionConflict', 'DistributionNotFound',
+ 'UnknownExtra', 'ExtractionError',
+
+ # Warnings
+ 'PEP440Warning',
+
+ # Parsing functions and string utilities
+ 'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
+ 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
+ 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
+
+ # filesystem utilities
+ 'ensure_directory', 'normalize_path',
+
+ # Distribution "precedence" constants
+ 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
+
+ # "Provider" interfaces, implementations, and registration/lookup APIs
+ 'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
+ 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
+ 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
+ 'register_finder', 'register_namespace_handler', 'register_loader_type',
+ 'fixup_namespace_packages', 'get_importer',
+
+ # Warnings
+ 'PkgResourcesDeprecationWarning',
+
+ # Deprecated/backward compatibility only
+ 'run_main', 'AvailableDistributions',
+]
+
+
+class ResolutionError(Exception):
+ """Abstract base for dependency resolution errors"""
+
+ def __repr__(self):
+ return self.__class__.__name__ + repr(self.args)
+
+
+class VersionConflict(ResolutionError):
+ """
+ An already-installed version conflicts with the requested version.
+
+ Should be initialized with the installed Distribution and the requested
+ Requirement.
+ """
+
+ _template = "{self.dist} is installed but {self.req} is required"
+
+ @property
+ def dist(self):
+ return self.args[0]
+
+ @property
+ def req(self):
+ return self.args[1]
+
+ def report(self):
+ return self._template.format(**locals())
+
+ def with_context(self, required_by):
+ """
+ If required_by is non-empty, return a version of self that is a
+ ContextualVersionConflict.
+ """
+ if not required_by:
+ return self
+ args = self.args + (required_by,)
+ return ContextualVersionConflict(*args)
+
+
+class ContextualVersionConflict(VersionConflict):
+ """
+ A VersionConflict that accepts a third parameter, the set of the
+ requirements that required the installed Distribution.
+ """
+
+ _template = VersionConflict._template + ' by {self.required_by}'
+
+ @property
+ def required_by(self):
+ return self.args[2]
+
+
+class DistributionNotFound(ResolutionError):
+ """A requested distribution was not found"""
+
+ _template = ("The '{self.req}' distribution was not found "
+ "and is required by {self.requirers_str}")
+
+ @property
+ def req(self):
+ return self.args[0]
+
+ @property
+ def requirers(self):
+ return self.args[1]
+
+ @property
+ def requirers_str(self):
+ if not self.requirers:
+ return 'the application'
+ return ', '.join(self.requirers)
+
+ def report(self):
+ return self._template.format(**locals())
+
+ def __str__(self):
+ return self.report()
+
+
+class UnknownExtra(ResolutionError):
+ """Distribution doesn't have an "extra feature" of the given name"""
+
+
+_provider_factories = {}
+
+PY_MAJOR = '{}.{}'.format(*sys.version_info)
+EGG_DIST = 3
+BINARY_DIST = 2
+SOURCE_DIST = 1
+CHECKOUT_DIST = 0
+DEVELOP_DIST = -1
+
+
+def register_loader_type(loader_type, provider_factory):
+ """Register `provider_factory` to make providers for `loader_type`
+
+ `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
+ and `provider_factory` is a function that, passed a *module* object,
+ returns an ``IResourceProvider`` for that module.
+ """
+ _provider_factories[loader_type] = provider_factory
+
+
+def get_provider(moduleOrReq):
+ """Return an IResourceProvider for the named module or requirement"""
+ if isinstance(moduleOrReq, Requirement):
+ return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
+ try:
+ module = sys.modules[moduleOrReq]
+ except KeyError:
+ __import__(moduleOrReq)
+ module = sys.modules[moduleOrReq]
+ loader = getattr(module, '__loader__', None)
+ return _find_adapter(_provider_factories, loader)(module)
+
+
+def _macosx_vers(_cache=[]):
+ if not _cache:
+ version = platform.mac_ver()[0]
+ # fallback for MacPorts
+ if version == '':
+ plist = '/System/Library/CoreServices/SystemVersion.plist'
+ if os.path.exists(plist):
+ if hasattr(plistlib, 'readPlist'):
+ plist_content = plistlib.readPlist(plist)
+ if 'ProductVersion' in plist_content:
+ version = plist_content['ProductVersion']
+
+ _cache.append(version.split('.'))
+ return _cache[0]
+
+
+def _macosx_arch(machine):
+ return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
+
+
+def get_build_platform():
+ """Return this platform's string for platform-specific distributions
+
+ XXX Currently this is the same as ``distutils.util.get_platform()``, but it
+ needs some hacks for Linux and Mac OS X.
+ """
+ from sysconfig import get_platform
+
+ plat = get_platform()
+ if sys.platform == "darwin" and not plat.startswith('macosx-'):
+ try:
+ version = _macosx_vers()
+ machine = os.uname()[4].replace(" ", "_")
+ return "macosx-%d.%d-%s" % (
+ int(version[0]), int(version[1]),
+ _macosx_arch(machine),
+ )
+ except ValueError:
+ # if someone is running a non-Mac darwin system, this will fall
+ # through to the default implementation
+ pass
+ return plat
+
+
+macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
+darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
+# XXX backward compat
+get_platform = get_build_platform
+
+
+def compatible_platforms(provided, required):
+ """Can code for the `provided` platform run on the `required` platform?
+
+ Returns true if either platform is ``None``, or the platforms are equal.
+
+ XXX Needs compatibility checks for Linux and other unixy OSes.
+ """
+ if provided is None or required is None or provided == required:
+ # easy case
+ return True
+
+ # Mac OS X special cases
+ reqMac = macosVersionString.match(required)
+ if reqMac:
+ provMac = macosVersionString.match(provided)
+
+ # is this a Mac package?
+ if not provMac:
+ # this is backwards compatibility for packages built before
+ # setuptools 0.6. All packages built after this point will
+ # use the new macosx designation.
+ provDarwin = darwinVersionString.match(provided)
+ if provDarwin:
+ dversion = int(provDarwin.group(1))
+ macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
+ if dversion == 7 and macosversion >= "10.3" or \
+ dversion == 8 and macosversion >= "10.4":
+ return True
+ # egg isn't macosx or legacy darwin
+ return False
+
+ # are they the same major version and machine type?
+ if provMac.group(1) != reqMac.group(1) or \
+ provMac.group(3) != reqMac.group(3):
+ return False
+
+ # is the required OS major update >= the provided one?
+ if int(provMac.group(2)) > int(reqMac.group(2)):
+ return False
+
+ return True
+
+ # XXX Linux and other platforms' special cases should go here
+ return False
+
+
+def run_script(dist_spec, script_name):
+ """Locate distribution `dist_spec` and run its `script_name` script"""
+ ns = sys._getframe(1).f_globals
+ name = ns['__name__']
+ ns.clear()
+ ns['__name__'] = name
+ require(dist_spec)[0].run_script(script_name, ns)
+
+
+# backward compatibility
+run_main = run_script
+
+
+def get_distribution(dist):
+ """Return a current distribution object for a Requirement or string"""
+ if isinstance(dist, six.string_types):
+ dist = Requirement.parse(dist)
+ if isinstance(dist, Requirement):
+ dist = get_provider(dist)
+ if not isinstance(dist, Distribution):
+ raise TypeError("Expected string, Requirement, or Distribution", dist)
+ return dist
+
+
+def load_entry_point(dist, group, name):
+ """Return `name` entry point of `group` for `dist` or raise ImportError"""
+ return get_distribution(dist).load_entry_point(group, name)
+
+
+def get_entry_map(dist, group=None):
+ """Return the entry point map for `group`, or the full entry map"""
+ return get_distribution(dist).get_entry_map(group)
+
+
+def get_entry_info(dist, group, name):
+ """Return the EntryPoint object for `group`+`name`, or ``None``"""
+ return get_distribution(dist).get_entry_info(group, name)
+
+
+class IMetadataProvider:
+ def has_metadata(name):
+ """Does the package's distribution contain the named metadata?"""
+
+ def get_metadata(name):
+ """The named metadata resource as a string"""
+
+ def get_metadata_lines(name):
+ """Yield named metadata resource as list of non-blank non-comment lines
+
+ Leading and trailing whitespace is stripped from each line, and lines
+ with ``#`` as the first non-blank character are omitted."""
+
+ def metadata_isdir(name):
+ """Is the named metadata a directory? (like ``os.path.isdir()``)"""
+
+ def metadata_listdir(name):
+ """List of metadata names in the directory (like ``os.listdir()``)"""
+
+ def run_script(script_name, namespace):
+ """Execute the named script in the supplied namespace dictionary"""
+
+
+class IResourceProvider(IMetadataProvider):
+ """An object that provides access to package resources"""
+
+ def get_resource_filename(manager, resource_name):
+ """Return a true filesystem path for `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def get_resource_stream(manager, resource_name):
+ """Return a readable file-like object for `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def get_resource_string(manager, resource_name):
+ """Return a string containing the contents of `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def has_resource(resource_name):
+ """Does the package contain the named resource?"""
+
+ def resource_isdir(resource_name):
+ """Is the named resource a directory? (like ``os.path.isdir()``)"""
+
+ def resource_listdir(resource_name):
+ """List of resource names in the directory (like ``os.listdir()``)"""
+
+
+class WorkingSet:
+ """A collection of active distributions on sys.path (or a similar list)"""
+
+ def __init__(self, entries=None):
+ """Create working set from list of path entries (default=sys.path)"""
+ self.entries = []
+ self.entry_keys = {}
+ self.by_key = {}
+ self.callbacks = []
+
+ if entries is None:
+ entries = sys.path
+
+ for entry in entries:
+ self.add_entry(entry)
+
+ @classmethod
+ def _build_master(cls):
+ """
+ Prepare the master working set.
+ """
+ ws = cls()
+ try:
+ from __main__ import __requires__
+ except ImportError:
+ # The main program does not list any requirements
+ return ws
+
+ # ensure the requirements are met
+ try:
+ ws.require(__requires__)
+ except VersionConflict:
+ return cls._build_from_requirements(__requires__)
+
+ return ws
+
+ @classmethod
+ def _build_from_requirements(cls, req_spec):
+ """
+ Build a working set from a requirement spec. Rewrites sys.path.
+ """
+ # try it without defaults already on sys.path
+ # by starting with an empty path
+ ws = cls([])
+ reqs = parse_requirements(req_spec)
+ dists = ws.resolve(reqs, Environment())
+ for dist in dists:
+ ws.add(dist)
+
+ # add any missing entries from sys.path
+ for entry in sys.path:
+ if entry not in ws.entries:
+ ws.add_entry(entry)
+
+ # then copy back to sys.path
+ sys.path[:] = ws.entries
+ return ws
+
+ def add_entry(self, entry):
+ """Add a path item to ``.entries``, finding any distributions on it
+
+ ``find_distributions(entry, True)`` is used to find distributions
+ corresponding to the path entry, and they are added. `entry` is
+ always appended to ``.entries``, even if it is already present.
+ (This is because ``sys.path`` can contain the same value more than
+ once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
+ equal ``sys.path``.)
+ """
+ self.entry_keys.setdefault(entry, [])
+ self.entries.append(entry)
+ for dist in find_distributions(entry, True):
+ self.add(dist, entry, False)
+
+ def __contains__(self, dist):
+ """True if `dist` is the active distribution for its project"""
+ return self.by_key.get(dist.key) == dist
+
+ def find(self, req):
+ """Find a distribution matching requirement `req`
+
+ If there is an active distribution for the requested project, this
+ returns it as long as it meets the version requirement specified by
+ `req`. But, if there is an active distribution for the project and it
+ does *not* meet the `req` requirement, ``VersionConflict`` is raised.
+ If there is no active distribution for the requested project, ``None``
+ is returned.
+ """
+ dist = self.by_key.get(req.key)
+ if dist is not None and dist not in req:
+ # XXX add more info
+ raise VersionConflict(dist, req)
+ return dist
+
+ def iter_entry_points(self, group, name=None):
+ """Yield entry point objects from `group` matching `name`
+
+ If `name` is None, yields all entry points in `group` from all
+ distributions in the working set, otherwise only ones matching
+ both `group` and `name` are yielded (in distribution order).
+ """
+ return (
+ entry
+ for dist in self
+ for entry in dist.get_entry_map(group).values()
+ if name is None or name == entry.name
+ )
+
+ def run_script(self, requires, script_name):
+ """Locate distribution for `requires` and run `script_name` script"""
+ ns = sys._getframe(1).f_globals
+ name = ns['__name__']
+ ns.clear()
+ ns['__name__'] = name
+ self.require(requires)[0].run_script(script_name, ns)
+
+ def __iter__(self):
+ """Yield distributions for non-duplicate projects in the working set
+
+ The yield order is the order in which the items' path entries were
+ added to the working set.
+ """
+ seen = {}
+ for item in self.entries:
+ if item not in self.entry_keys:
+ # workaround a cache issue
+ continue
+
+ for key in self.entry_keys[item]:
+ if key not in seen:
+ seen[key] = 1
+ yield self.by_key[key]
+
+ def add(self, dist, entry=None, insert=True, replace=False):
+ """Add `dist` to working set, associated with `entry`
+
+ If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
+ On exit from this routine, `entry` is added to the end of the working
+ set's ``.entries`` (if it wasn't already present).
+
+ `dist` is only added to the working set if it's for a project that
+ doesn't already have a distribution in the set, unless `replace=True`.
+ If it's added, any callbacks registered with the ``subscribe()`` method
+ will be called.
+ """
+ if insert:
+ dist.insert_on(self.entries, entry, replace=replace)
+
+ if entry is None:
+ entry = dist.location
+ keys = self.entry_keys.setdefault(entry, [])
+ keys2 = self.entry_keys.setdefault(dist.location, [])
+ if not replace and dist.key in self.by_key:
+ # ignore hidden distros
+ return
+
+ self.by_key[dist.key] = dist
+ if dist.key not in keys:
+ keys.append(dist.key)
+ if dist.key not in keys2:
+ keys2.append(dist.key)
+ self._added_new(dist)
+
+ def resolve(self, requirements, env=None, installer=None,
+ replace_conflicting=False, extras=None):
+ """List all distributions needed to (recursively) meet `requirements`
+
+ `requirements` must be a sequence of ``Requirement`` objects. `env`,
+ if supplied, should be an ``Environment`` instance. If
+ not supplied, it defaults to all distributions available within any
+ entry or distribution in the working set. `installer`, if supplied,
+ will be invoked with each requirement that cannot be met by an
+ already-installed distribution; it should return a ``Distribution`` or
+ ``None``.
+
+ Unless `replace_conflicting=True`, raises a VersionConflict exception
+ if
+ any requirements are found on the path that have the correct name but
+ the wrong version. Otherwise, if an `installer` is supplied it will be
+ invoked to obtain the correct version of the requirement and activate
+ it.
+
+ `extras` is a list of the extras to be used with these requirements.
+ This is important because extra requirements may look like `my_req;
+ extra = "my_extra"`, which would otherwise be interpreted as a purely
+ optional requirement. Instead, we want to be able to assert that these
+ requirements are truly required.
+ """
+
+ # set up the stack
+ requirements = list(requirements)[::-1]
+ # set of processed requirements
+ processed = {}
+ # key -> dist
+ best = {}
+ to_activate = []
+
+ req_extras = _ReqExtras()
+
+ # Mapping of requirement to set of distributions that required it;
+ # useful for reporting info about conflicts.
+ required_by = collections.defaultdict(set)
+
+ while requirements:
+ # process dependencies breadth-first
+ req = requirements.pop(0)
+ if req in processed:
+ # Ignore cyclic or redundant dependencies
+ continue
+
+ if not req_extras.markers_pass(req, extras):
+ continue
+
+ dist = best.get(req.key)
+ if dist is None:
+ # Find the best distribution and add it to the map
+ dist = self.by_key.get(req.key)
+ if dist is None or (dist not in req and replace_conflicting):
+ ws = self
+ if env is None:
+ if dist is None:
+ env = Environment(self.entries)
+ else:
+ # Use an empty environment and workingset to avoid
+ # any further conflicts with the conflicting
+ # distribution
+ env = Environment([])
+ ws = WorkingSet([])
+ dist = best[req.key] = env.best_match(
+ req, ws, installer,
+ replace_conflicting=replace_conflicting
+ )
+ if dist is None:
+ requirers = required_by.get(req, None)
+ raise DistributionNotFound(req, requirers)
+ to_activate.append(dist)
+ if dist not in req:
+ # Oops, the "best" so far conflicts with a dependency
+ dependent_req = required_by[req]
+ raise VersionConflict(dist, req).with_context(dependent_req)
+
+ # push the new requirements onto the stack
+ new_requirements = dist.requires(req.extras)[::-1]
+ requirements.extend(new_requirements)
+
+ # Register the new requirements needed by req
+ for new_requirement in new_requirements:
+ required_by[new_requirement].add(req.project_name)
+ req_extras[new_requirement] = req.extras
+
+ processed[req] = True
+
+ # return list of distros to activate
+ return to_activate
+
+ def find_plugins(
+ self, plugin_env, full_env=None, installer=None, fallback=True):
+ """Find all activatable distributions in `plugin_env`
+
+ Example usage::
+
+ distributions, errors = working_set.find_plugins(
+ Environment(plugin_dirlist)
+ )
+ # add plugins+libs to sys.path
+ map(working_set.add, distributions)
+ # display errors
+ print('Could not load', errors)
+
+ The `plugin_env` should be an ``Environment`` instance that contains
+ only distributions that are in the project's "plugin directory" or
+ directories. The `full_env`, if supplied, should be an ``Environment``
+ contains all currently-available distributions. If `full_env` is not
+ supplied, one is created automatically from the ``WorkingSet`` this
+ method is called on, which will typically mean that every directory on
+ ``sys.path`` will be scanned for distributions.
+
+ `installer` is a standard installer callback as used by the
+ ``resolve()`` method. The `fallback` flag indicates whether we should
+ attempt to resolve older versions of a plugin if the newest version
+ cannot be resolved.
+
+ This method returns a 2-tuple: (`distributions`, `error_info`), where
+ `distributions` is a list of the distributions found in `plugin_env`
+ that were loadable, along with any other distributions that are needed
+ to resolve their dependencies. `error_info` is a dictionary mapping
+ unloadable plugin distributions to an exception instance describing the
+ error that occurred. Usually this will be a ``DistributionNotFound`` or
+ ``VersionConflict`` instance.
+ """
+
+ plugin_projects = list(plugin_env)
+ # scan project names in alphabetic order
+ plugin_projects.sort()
+
+ error_info = {}
+ distributions = {}
+
+ if full_env is None:
+ env = Environment(self.entries)
+ env += plugin_env
+ else:
+ env = full_env + plugin_env
+
+ shadow_set = self.__class__([])
+ # put all our entries in shadow_set
+ list(map(shadow_set.add, self))
+
+ for project_name in plugin_projects:
+
+ for dist in plugin_env[project_name]:
+
+ req = [dist.as_requirement()]
+
+ try:
+ resolvees = shadow_set.resolve(req, env, installer)
+
+ except ResolutionError as v:
+ # save error info
+ error_info[dist] = v
+ if fallback:
+ # try the next older version of project
+ continue
+ else:
+ # give up on this project, keep going
+ break
+
+ else:
+ list(map(shadow_set.add, resolvees))
+ distributions.update(dict.fromkeys(resolvees))
+
+ # success, no need to try any more versions of this project
+ break
+
+ distributions = list(distributions)
+ distributions.sort()
+
+ return distributions, error_info
+
+ def require(self, *requirements):
+ """Ensure that distributions matching `requirements` are activated
+
+ `requirements` must be a string or a (possibly-nested) sequence
+ thereof, specifying the distributions and versions required. The
+ return value is a sequence of the distributions that needed to be
+ activated to fulfill the requirements; all relevant distributions are
+ included, even if they were already activated in this working set.
+ """
+ needed = self.resolve(parse_requirements(requirements))
+
+ for dist in needed:
+ self.add(dist)
+
+ return needed
+
+ def subscribe(self, callback, existing=True):
+ """Invoke `callback` for all distributions
+
+ If `existing=True` (default),
+ call on all existing ones, as well.
+ """
+ if callback in self.callbacks:
+ return
+ self.callbacks.append(callback)
+ if not existing:
+ return
+ for dist in self:
+ callback(dist)
+
+ def _added_new(self, dist):
+ for callback in self.callbacks:
+ callback(dist)
+
+ def __getstate__(self):
+ return (
+ self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
+ self.callbacks[:]
+ )
+
+ def __setstate__(self, e_k_b_c):
+ entries, keys, by_key, callbacks = e_k_b_c
+ self.entries = entries[:]
+ self.entry_keys = keys.copy()
+ self.by_key = by_key.copy()
+ self.callbacks = callbacks[:]
+
+
+class _ReqExtras(dict):
+ """
+ Map each requirement to the extras that demanded it.
+ """
+
+ def markers_pass(self, req, extras=None):
+ """
+ Evaluate markers for req against each extra that
+ demanded it.
+
+ Return False if the req has a marker and fails
+ evaluation. Otherwise, return True.
+ """
+ extra_evals = (
+ req.marker.evaluate({'extra': extra})
+ for extra in self.get(req, ()) + (extras or (None,))
+ )
+ return not req.marker or any(extra_evals)
+
+
+class Environment:
+ """Searchable snapshot of distributions on a search path"""
+
+ def __init__(
+ self, search_path=None, platform=get_supported_platform(),
+ python=PY_MAJOR):
+ """Snapshot distributions available on a search path
+
+ Any distributions found on `search_path` are added to the environment.
+ `search_path` should be a sequence of ``sys.path`` items. If not
+ supplied, ``sys.path`` is used.
+
+ `platform` is an optional string specifying the name of the platform
+ that platform-specific distributions must be compatible with. If
+ unspecified, it defaults to the current platform. `python` is an
+ optional string naming the desired version of Python (e.g. ``'3.6'``);
+ it defaults to the current version.
+
+ You may explicitly set `platform` (and/or `python`) to ``None`` if you
+ wish to map *all* distributions, not just those compatible with the
+ running platform or Python version.
+ """
+ self._distmap = {}
+ self.platform = platform
+ self.python = python
+ self.scan(search_path)
+
+ def can_add(self, dist):
+ """Is distribution `dist` acceptable for this environment?
+
+ The distribution must match the platform and python version
+ requirements specified when this environment was created, or False
+ is returned.
+ """
+ py_compat = (
+ self.python is None
+ or dist.py_version is None
+ or dist.py_version == self.python
+ )
+ return py_compat and compatible_platforms(dist.platform, self.platform)
+
+ def remove(self, dist):
+ """Remove `dist` from the environment"""
+ self._distmap[dist.key].remove(dist)
+
+ def scan(self, search_path=None):
+ """Scan `search_path` for distributions usable in this environment
+
+ Any distributions found are added to the environment.
+ `search_path` should be a sequence of ``sys.path`` items. If not
+ supplied, ``sys.path`` is used. Only distributions conforming to
+ the platform/python version defined at initialization are added.
+ """
+ if search_path is None:
+ search_path = sys.path
+
+ for item in search_path:
+ for dist in find_distributions(item):
+ self.add(dist)
+
+ def __getitem__(self, project_name):
+ """Return a newest-to-oldest list of distributions for `project_name`
+
+ Uses case-insensitive `project_name` comparison, assuming all the
+ project's distributions use their project's name converted to all
+ lowercase as their key.
+
+ """
+ distribution_key = project_name.lower()
+ return self._distmap.get(distribution_key, [])
+
+ def add(self, dist):
+ """Add `dist` if we ``can_add()`` it and it has not already been added
+ """
+ if self.can_add(dist) and dist.has_version():
+ dists = self._distmap.setdefault(dist.key, [])
+ if dist not in dists:
+ dists.append(dist)
+ dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
+
+ def best_match(
+ self, req, working_set, installer=None, replace_conflicting=False):
+ """Find distribution best matching `req` and usable on `working_set`
+
+ This calls the ``find(req)`` method of the `working_set` to see if a
+ suitable distribution is already active. (This may raise
+ ``VersionConflict`` if an unsuitable version of the project is already
+ active in the specified `working_set`.) If a suitable distribution
+ isn't active, this method returns the newest distribution in the
+ environment that meets the ``Requirement`` in `req`. If no suitable
+ distribution is found, and `installer` is supplied, then the result of
+ calling the environment's ``obtain(req, installer)`` method will be
+ returned.
+ """
+ try:
+ dist = working_set.find(req)
+ except VersionConflict:
+ if not replace_conflicting:
+ raise
+ dist = None
+ if dist is not None:
+ return dist
+ for dist in self[req.key]:
+ if dist in req:
+ return dist
+ # try to download/install
+ return self.obtain(req, installer)
+
+ def obtain(self, requirement, installer=None):
+ """Obtain a distribution matching `requirement` (e.g. via download)
+
+ Obtain a distro that matches requirement (e.g. via download). In the
+ base ``Environment`` class, this routine just returns
+ ``installer(requirement)``, unless `installer` is None, in which case
+ None is returned instead. This method is a hook that allows subclasses
+ to attempt other ways of obtaining a distribution before falling back
+ to the `installer` argument."""
+ if installer is not None:
+ return installer(requirement)
+
+ def __iter__(self):
+ """Yield the unique project names of the available distributions"""
+ for key in self._distmap.keys():
+ if self[key]:
+ yield key
+
+ def __iadd__(self, other):
+ """In-place addition of a distribution or environment"""
+ if isinstance(other, Distribution):
+ self.add(other)
+ elif isinstance(other, Environment):
+ for project in other:
+ for dist in other[project]:
+ self.add(dist)
+ else:
+ raise TypeError("Can't add %r to environment" % (other,))
+ return self
+
+ def __add__(self, other):
+ """Add an environment or distribution to an environment"""
+ new = self.__class__([], platform=None, python=None)
+ for env in self, other:
+ new += env
+ return new
+
+
+# XXX backward compatibility
+AvailableDistributions = Environment
+
+
+class ExtractionError(RuntimeError):
+ """An error occurred extracting a resource
+
+ The following attributes are available from instances of this exception:
+
+ manager
+ The resource manager that raised this exception
+
+ cache_path
+ The base directory for resource extraction
+
+ original_error
+ The exception instance that caused extraction to fail
+ """
+
+
+class ResourceManager:
+ """Manage resource extraction and packages"""
+ extraction_path = None
+
+ def __init__(self):
+ self.cached_files = {}
+
+ def resource_exists(self, package_or_requirement, resource_name):
+ """Does the named resource exist?"""
+ return get_provider(package_or_requirement).has_resource(resource_name)
+
+ def resource_isdir(self, package_or_requirement, resource_name):
+ """Is the named resource an existing directory?"""
+ return get_provider(package_or_requirement).resource_isdir(
+ resource_name
+ )
+
+ def resource_filename(self, package_or_requirement, resource_name):
+ """Return a true filesystem path for specified resource"""
+ return get_provider(package_or_requirement).get_resource_filename(
+ self, resource_name
+ )
+
+ def resource_stream(self, package_or_requirement, resource_name):
+ """Return a readable file-like object for specified resource"""
+ return get_provider(package_or_requirement).get_resource_stream(
+ self, resource_name
+ )
+
+ def resource_string(self, package_or_requirement, resource_name):
+ """Return specified resource as a string"""
+ return get_provider(package_or_requirement).get_resource_string(
+ self, resource_name
+ )
+
+ def resource_listdir(self, package_or_requirement, resource_name):
+ """List the contents of the named resource directory"""
+ return get_provider(package_or_requirement).resource_listdir(
+ resource_name
+ )
+
+ def extraction_error(self):
+ """Give an error message for problems extracting file(s)"""
+
+ old_exc = sys.exc_info()[1]
+ cache_path = self.extraction_path or get_default_cache()
+
+ tmpl = textwrap.dedent("""
+ Can't extract file(s) to egg cache
+
+ The following error occurred while trying to extract file(s)
+ to the Python egg cache:
+
+ {old_exc}
+
+ The Python egg cache directory is currently set to:
+
+ {cache_path}
+
+ Perhaps your account does not have write access to this directory?
+ You can change the cache directory by setting the PYTHON_EGG_CACHE
+ environment variable to point to an accessible directory.
+ """).lstrip()
+ err = ExtractionError(tmpl.format(**locals()))
+ err.manager = self
+ err.cache_path = cache_path
+ err.original_error = old_exc
+ raise err
+
+ def get_cache_path(self, archive_name, names=()):
+ """Return absolute location in cache for `archive_name` and `names`
+
+ The parent directory of the resulting path will be created if it does
+ not already exist. `archive_name` should be the base filename of the
+ enclosing egg (which may not be the name of the enclosing zipfile!),
+ including its ".egg" extension. `names`, if provided, should be a
+ sequence of path name parts "under" the egg's extraction location.
+
+ This method should only be called by resource providers that need to
+ obtain an extraction location, and only for names they intend to
+ extract, as it tracks the generated names for possible cleanup later.
+ """
+ extract_path = self.extraction_path or get_default_cache()
+ target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
+ try:
+ _bypass_ensure_directory(target_path)
+ except Exception:
+ self.extraction_error()
+
+ self._warn_unsafe_extraction_path(extract_path)
+
+ self.cached_files[target_path] = 1
+ return target_path
+
+ @staticmethod
+ def _warn_unsafe_extraction_path(path):
+ """
+ If the default extraction path is overridden and set to an insecure
+ location, such as /tmp, it opens up an opportunity for an attacker to
+ replace an extracted file with an unauthorized payload. Warn the user
+ if a known insecure location is used.
+
+ See Distribute #375 for more details.
+ """
+ if os.name == 'nt' and not path.startswith(os.environ['windir']):
+ # On Windows, permissions are generally restrictive by default
+ # and temp directories are not writable by other users, so
+ # bypass the warning.
+ return
+ mode = os.stat(path).st_mode
+ if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
+ msg = (
+ "%s is writable by group/others and vulnerable to attack "
+ "when "
+ "used with get_resource_filename. Consider a more secure "
+ "location (set with .set_extraction_path or the "
+ "PYTHON_EGG_CACHE environment variable)." % path
+ )
+ warnings.warn(msg, UserWarning)
+
+ def postprocess(self, tempname, filename):
+ """Perform any platform-specific postprocessing of `tempname`
+
+ This is where Mac header rewrites should be done; other platforms don't
+ have anything special they should do.
+
+ Resource providers should call this method ONLY after successfully
+ extracting a compressed resource. They must NOT call it on resources
+ that are already in the filesystem.
+
+ `tempname` is the current (temporary) name of the file, and `filename`
+ is the name it will be renamed to by the caller after this routine
+ returns.
+ """
+
+ if os.name == 'posix':
+ # Make the resource executable
+ mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
+ os.chmod(tempname, mode)
+
+ def set_extraction_path(self, path):
+ """Set the base path where resources will be extracted to, if needed.
+
+ If you do not call this routine before any extractions take place, the
+ path defaults to the return value of ``get_default_cache()``. (Which
+ is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
+ platform-specific fallbacks. See that routine's documentation for more
+ details.)
+
+ Resources are extracted to subdirectories of this path based upon
+ information given by the ``IResourceProvider``. You may set this to a
+ temporary directory, but then you must call ``cleanup_resources()`` to
+ delete the extracted files when done. There is no guarantee that
+ ``cleanup_resources()`` will be able to remove all extracted files.
+
+ (Note: you may not change the extraction path for a given resource
+ manager once resources have been extracted, unless you first call
+ ``cleanup_resources()``.)
+ """
+ if self.cached_files:
+ raise ValueError(
+ "Can't change extraction path, files already extracted"
+ )
+
+ self.extraction_path = path
+
+ def cleanup_resources(self, force=False):
+ """
+ Delete all extracted resource files and directories, returning a list
+ of the file and directory names that could not be successfully removed.
+ This function does not have any concurrency protection, so it should
+ generally only be called when the extraction path is a temporary
+ directory exclusive to a single process. This method is not
+ automatically called; you must call it explicitly or register it as an
+ ``atexit`` function if you wish to ensure cleanup of a temporary
+ directory used for extractions.
+ """
+ # XXX
+
+
+def get_default_cache():
+ """
+ Return the ``PYTHON_EGG_CACHE`` environment variable
+ or a platform-relevant user cache dir for an app
+ named "Python-Eggs".
+ """
+ return (
+ os.environ.get('PYTHON_EGG_CACHE')
+ or platformdirs.user_cache_dir(appname='Python-Eggs')
+ )
+
+
+def safe_name(name):
+ """Convert an arbitrary string to a standard distribution name
+
+ Any runs of non-alphanumeric/. characters are replaced with a single '-'.
+ """
+ return re.sub('[^A-Za-z0-9.]+', '-', name)
+
+
+def safe_version(version):
+ """
+ Convert an arbitrary string to a standard version string
+ """
+ try:
+ # normalize the version
+ return str(packaging.version.Version(version))
+ except packaging.version.InvalidVersion:
+ version = version.replace(' ', '.')
+ return re.sub('[^A-Za-z0-9.]+', '-', version)
+
+
+def safe_extra(extra):
+ """Convert an arbitrary string to a standard 'extra' name
+
+ Any runs of non-alphanumeric characters are replaced with a single '_',
+ and the result is always lowercased.
+ """
+ return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
+
+
+def to_filename(name):
+ """Convert a project or version name to its filename-escaped form
+
+ Any '-' characters are currently replaced with '_'.
+ """
+ return name.replace('-', '_')
+
+
+def invalid_marker(text):
+ """
+ Validate text as a PEP 508 environment marker; return an exception
+ if invalid or False otherwise.
+ """
+ try:
+ evaluate_marker(text)
+ except SyntaxError as e:
+ e.filename = None
+ e.lineno = None
+ return e
+ return False
+
+
+def evaluate_marker(text, extra=None):
+ """
+ Evaluate a PEP 508 environment marker.
+ Return a boolean indicating the marker result in this environment.
+ Raise SyntaxError if marker is invalid.
+
+ This implementation uses the 'pyparsing' module.
+ """
+ try:
+ marker = packaging.markers.Marker(text)
+ return marker.evaluate()
+ except packaging.markers.InvalidMarker as e:
+ raise SyntaxError(e)
+
+
+class NullProvider:
+ """Try to implement resources and metadata for arbitrary PEP 302 loaders"""
+
+ egg_name = None
+ egg_info = None
+ loader = None
+
+ def __init__(self, module):
+ self.loader = getattr(module, '__loader__', None)
+ self.module_path = os.path.dirname(getattr(module, '__file__', ''))
+
+ def get_resource_filename(self, manager, resource_name):
+ return self._fn(self.module_path, resource_name)
+
+ def get_resource_stream(self, manager, resource_name):
+ return io.BytesIO(self.get_resource_string(manager, resource_name))
+
+ def get_resource_string(self, manager, resource_name):
+ return self._get(self._fn(self.module_path, resource_name))
+
+ def has_resource(self, resource_name):
+ return self._has(self._fn(self.module_path, resource_name))
+
+ def _get_metadata_path(self, name):
+ return self._fn(self.egg_info, name)
+
+ def has_metadata(self, name):
+ if not self.egg_info:
+ return self.egg_info
+
+ path = self._get_metadata_path(name)
+ return self._has(path)
+
+ def get_metadata(self, name):
+ if not self.egg_info:
+ return ""
+ path = self._get_metadata_path(name)
+ value = self._get(path)
+ if six.PY2:
+ return value
+ try:
+ return value.decode('utf-8')
+ except UnicodeDecodeError as exc:
+ # Include the path in the error message to simplify
+ # troubleshooting, and without changing the exception type.
+ exc.reason += ' in {} file at path: {}'.format(name, path)
+ raise
+
+ def get_metadata_lines(self, name):
+ return yield_lines(self.get_metadata(name))
+
+ def resource_isdir(self, resource_name):
+ return self._isdir(self._fn(self.module_path, resource_name))
+
+ def metadata_isdir(self, name):
+ return self.egg_info and self._isdir(self._fn(self.egg_info, name))
+
+ def resource_listdir(self, resource_name):
+ return self._listdir(self._fn(self.module_path, resource_name))
+
+ def metadata_listdir(self, name):
+ if self.egg_info:
+ return self._listdir(self._fn(self.egg_info, name))
+ return []
+
+ def run_script(self, script_name, namespace):
+ script = 'scripts/' + script_name
+ if not self.has_metadata(script):
+ raise ResolutionError(
+ "Script {script!r} not found in metadata at {self.egg_info!r}"
+ .format(**locals()),
+ )
+ script_text = self.get_metadata(script).replace('\r\n', '\n')
+ script_text = script_text.replace('\r', '\n')
+ script_filename = self._fn(self.egg_info, script)
+ namespace['__file__'] = script_filename
+ if os.path.exists(script_filename):
+ source = open(script_filename).read()
+ code = compile(source, script_filename, 'exec')
+ exec(code, namespace, namespace)
+ else:
+ from linecache import cache
+ cache[script_filename] = (
+ len(script_text), 0, script_text.split('\n'), script_filename
+ )
+ script_code = compile(script_text, script_filename, 'exec')
+ exec(script_code, namespace, namespace)
+
+ def _has(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _isdir(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _listdir(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _fn(self, base, resource_name):
+ self._validate_resource_path(resource_name)
+ if resource_name:
+ return os.path.join(base, *resource_name.split('/'))
+ return base
+
+ @staticmethod
+ def _validate_resource_path(path):
+ """
+ Validate the resource paths according to the docs.
+ https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access
+
+ >>> warned = getfixture('recwarn')
+ >>> warnings.simplefilter('always')
+ >>> vrp = NullProvider._validate_resource_path
+ >>> vrp('foo/bar.txt')
+ >>> bool(warned)
+ False
+ >>> vrp('../foo/bar.txt')
+ >>> bool(warned)
+ True
+ >>> warned.clear()
+ >>> vrp('/foo/bar.txt')
+ >>> bool(warned)
+ True
+ >>> vrp('foo/../../bar.txt')
+ >>> bool(warned)
+ True
+ >>> warned.clear()
+ >>> vrp('foo/f../bar.txt')
+ >>> bool(warned)
+ False
+
+ Windows path separators are straight-up disallowed.
+ >>> vrp(r'\\foo/bar.txt')
+ Traceback (most recent call last):
+ ...
+ ValueError: Use of .. or absolute path in a resource path \
+is not allowed.
+
+ >>> vrp(r'C:\\foo/bar.txt')
+ Traceback (most recent call last):
+ ...
+ ValueError: Use of .. or absolute path in a resource path \
+is not allowed.
+
+ Blank values are allowed
+
+ >>> vrp('')
+ >>> bool(warned)
+ False
+
+ Non-string values are not.
+
+ >>> vrp(None)
+ Traceback (most recent call last):
+ ...
+ AttributeError: ...
+ """
+ invalid = (
+ os.path.pardir in path.split(posixpath.sep) or
+ posixpath.isabs(path) or
+ ntpath.isabs(path)
+ )
+ if not invalid:
+ return
+
+ msg = "Use of .. or absolute path in a resource path is not allowed."
+
+ # Aggressively disallow Windows absolute paths
+ if ntpath.isabs(path) and not posixpath.isabs(path):
+ raise ValueError(msg)
+
+ # for compatibility, warn; in future
+ # raise ValueError(msg)
+ warnings.warn(
+ msg[:-1] + " and will raise exceptions in a future release.",
+ DeprecationWarning,
+ stacklevel=4,
+ )
+
+ def _get(self, path):
+ if hasattr(self.loader, 'get_data'):
+ return self.loader.get_data(path)
+ raise NotImplementedError(
+ "Can't perform this operation for loaders without 'get_data()'"
+ )
+
+
+register_loader_type(object, NullProvider)
+
+
+class EggProvider(NullProvider):
+ """Provider based on a virtual filesystem"""
+
+ def __init__(self, module):
+ NullProvider.__init__(self, module)
+ self._setup_prefix()
+
+ def _setup_prefix(self):
+ # we assume here that our metadata may be nested inside a "basket"
+ # of multiple eggs; that's why we use module_path instead of .archive
+ path = self.module_path
+ old = None
+ while path != old:
+ if _is_egg_path(path):
+ self.egg_name = os.path.basename(path)
+ self.egg_info = os.path.join(path, 'EGG-INFO')
+ self.egg_root = path
+ break
+ old = path
+ path, base = os.path.split(path)
+
+
+class DefaultProvider(EggProvider):
+ """Provides access to package resources in the filesystem"""
+
+ def _has(self, path):
+ return os.path.exists(path)
+
+ def _isdir(self, path):
+ return os.path.isdir(path)
+
+ def _listdir(self, path):
+ return os.listdir(path)
+
+ def get_resource_stream(self, manager, resource_name):
+ return open(self._fn(self.module_path, resource_name), 'rb')
+
+ def _get(self, path):
+ with open(path, 'rb') as stream:
+ return stream.read()
+
+ @classmethod
+ def _register(cls):
+ loader_names = 'SourceFileLoader', 'SourcelessFileLoader',
+ for name in loader_names:
+ loader_cls = getattr(importlib_machinery, name, type(None))
+ register_loader_type(loader_cls, cls)
+
+
+DefaultProvider._register()
+
+
+class EmptyProvider(NullProvider):
+ """Provider that returns nothing for all requests"""
+
+ module_path = None
+
+ _isdir = _has = lambda self, path: False
+
+ def _get(self, path):
+ return ''
+
+ def _listdir(self, path):
+ return []
+
+ def __init__(self):
+ pass
+
+
+empty_provider = EmptyProvider()
+
+
+class ZipManifests(dict):
+ """
+ zip manifest builder
+ """
+
+ @classmethod
+ def build(cls, path):
+ """
+ Build a dictionary similar to the zipimport directory
+ caches, except instead of tuples, store ZipInfo objects.
+
+ Use a platform-specific path separator (os.sep) for the path keys
+ for compatibility with pypy on Windows.
+ """
+ with zipfile.ZipFile(path) as zfile:
+ items = (
+ (
+ name.replace('/', os.sep),
+ zfile.getinfo(name),
+ )
+ for name in zfile.namelist()
+ )
+ return dict(items)
+
+ load = build
+
+
+class MemoizedZipManifests(ZipManifests):
+ """
+ Memoized zipfile manifests.
+ """
+ manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
+
+ def load(self, path):
+ """
+ Load a manifest at path or return a suitable manifest already loaded.
+ """
+ path = os.path.normpath(path)
+ mtime = os.stat(path).st_mtime
+
+ if path not in self or self[path].mtime != mtime:
+ manifest = self.build(path)
+ self[path] = self.manifest_mod(manifest, mtime)
+
+ return self[path].manifest
+
+
+class ZipProvider(EggProvider):
+ """Resource support for zips and eggs"""
+
+ eagers = None
+ _zip_manifests = MemoizedZipManifests()
+
+ def __init__(self, module):
+ EggProvider.__init__(self, module)
+ self.zip_pre = self.loader.archive + os.sep
+
+ def _zipinfo_name(self, fspath):
+ # Convert a virtual filename (full path to file) into a zipfile subpath
+ # usable with the zipimport directory cache for our target archive
+ fspath = fspath.rstrip(os.sep)
+ if fspath == self.loader.archive:
+ return ''
+ if fspath.startswith(self.zip_pre):
+ return fspath[len(self.zip_pre):]
+ raise AssertionError(
+ "%s is not a subpath of %s" % (fspath, self.zip_pre)
+ )
+
+ def _parts(self, zip_path):
+ # Convert a zipfile subpath into an egg-relative path part list.
+ # pseudo-fs path
+ fspath = self.zip_pre + zip_path
+ if fspath.startswith(self.egg_root + os.sep):
+ return fspath[len(self.egg_root) + 1:].split(os.sep)
+ raise AssertionError(
+ "%s is not a subpath of %s" % (fspath, self.egg_root)
+ )
+
+ @property
+ def zipinfo(self):
+ return self._zip_manifests.load(self.loader.archive)
+
+ def get_resource_filename(self, manager, resource_name):
+ if not self.egg_name:
+ raise NotImplementedError(
+ "resource_filename() only supported for .egg, not .zip"
+ )
+ # no need to lock for extraction, since we use temp names
+ zip_path = self._resource_to_zip(resource_name)
+ eagers = self._get_eager_resources()
+ if '/'.join(self._parts(zip_path)) in eagers:
+ for name in eagers:
+ self._extract_resource(manager, self._eager_to_zip(name))
+ return self._extract_resource(manager, zip_path)
+
+ @staticmethod
+ def _get_date_and_size(zip_stat):
+ size = zip_stat.file_size
+ # ymdhms+wday, yday, dst
+ date_time = zip_stat.date_time + (0, 0, -1)
+ # 1980 offset already done
+ timestamp = time.mktime(date_time)
+ return timestamp, size
+
+ def _extract_resource(self, manager, zip_path):
+
+ if zip_path in self._index():
+ for name in self._index()[zip_path]:
+ last = self._extract_resource(
+ manager, os.path.join(zip_path, name)
+ )
+ # return the extracted directory name
+ return os.path.dirname(last)
+
+ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
+
+ if not WRITE_SUPPORT:
+ raise IOError('"os.rename" and "os.unlink" are not supported '
+ 'on this platform')
+ try:
+
+ real_path = manager.get_cache_path(
+ self.egg_name, self._parts(zip_path)
+ )
+
+ if self._is_current(real_path, zip_path):
+ return real_path
+
+ outf, tmpnam = _mkstemp(
+ ".$extract",
+ dir=os.path.dirname(real_path),
+ )
+ os.write(outf, self.loader.get_data(zip_path))
+ os.close(outf)
+ utime(tmpnam, (timestamp, timestamp))
+ manager.postprocess(tmpnam, real_path)
+
+ try:
+ rename(tmpnam, real_path)
+
+ except os.error:
+ if os.path.isfile(real_path):
+ if self._is_current(real_path, zip_path):
+ # the file became current since it was checked above,
+ # so proceed.
+ return real_path
+ # Windows, del old file and retry
+ elif os.name == 'nt':
+ unlink(real_path)
+ rename(tmpnam, real_path)
+ return real_path
+ raise
+
+ except os.error:
+ # report a user-friendly error
+ manager.extraction_error()
+
+ return real_path
+
+ def _is_current(self, file_path, zip_path):
+ """
+ Return True if the file_path is current for this zip_path
+ """
+ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
+ if not os.path.isfile(file_path):
+ return False
+ stat = os.stat(file_path)
+ if stat.st_size != size or stat.st_mtime != timestamp:
+ return False
+ # check that the contents match
+ zip_contents = self.loader.get_data(zip_path)
+ with open(file_path, 'rb') as f:
+ file_contents = f.read()
+ return zip_contents == file_contents
+
+ def _get_eager_resources(self):
+ if self.eagers is None:
+ eagers = []
+ for name in ('native_libs.txt', 'eager_resources.txt'):
+ if self.has_metadata(name):
+ eagers.extend(self.get_metadata_lines(name))
+ self.eagers = eagers
+ return self.eagers
+
+ def _index(self):
+ try:
+ return self._dirindex
+ except AttributeError:
+ ind = {}
+ for path in self.zipinfo:
+ parts = path.split(os.sep)
+ while parts:
+ parent = os.sep.join(parts[:-1])
+ if parent in ind:
+ ind[parent].append(parts[-1])
+ break
+ else:
+ ind[parent] = [parts.pop()]
+ self._dirindex = ind
+ return ind
+
+ def _has(self, fspath):
+ zip_path = self._zipinfo_name(fspath)
+ return zip_path in self.zipinfo or zip_path in self._index()
+
+ def _isdir(self, fspath):
+ return self._zipinfo_name(fspath) in self._index()
+
+ def _listdir(self, fspath):
+ return list(self._index().get(self._zipinfo_name(fspath), ()))
+
+ def _eager_to_zip(self, resource_name):
+ return self._zipinfo_name(self._fn(self.egg_root, resource_name))
+
+ def _resource_to_zip(self, resource_name):
+ return self._zipinfo_name(self._fn(self.module_path, resource_name))
+
+
+register_loader_type(zipimport.zipimporter, ZipProvider)
+
+
+class FileMetadata(EmptyProvider):
+ """Metadata handler for standalone PKG-INFO files
+
+ Usage::
+
+ metadata = FileMetadata("/path/to/PKG-INFO")
+
+ This provider rejects all data and metadata requests except for PKG-INFO,
+ which is treated as existing, and will be the contents of the file at
+ the provided location.
+ """
+
+ def __init__(self, path):
+ self.path = path
+
+ def _get_metadata_path(self, name):
+ return self.path
+
+ def has_metadata(self, name):
+ return name == 'PKG-INFO' and os.path.isfile(self.path)
+
+ def get_metadata(self, name):
+ if name != 'PKG-INFO':
+ raise KeyError("No metadata except PKG-INFO is available")
+
+ with io.open(self.path, encoding='utf-8', errors="replace") as f:
+ metadata = f.read()
+ self._warn_on_replacement(metadata)
+ return metadata
+
+ def _warn_on_replacement(self, metadata):
+ # Python 2.7 compat for: replacement_char = '�'
+ replacement_char = b'\xef\xbf\xbd'.decode('utf-8')
+ if replacement_char in metadata:
+ tmpl = "{self.path} could not be properly decoded in UTF-8"
+ msg = tmpl.format(**locals())
+ warnings.warn(msg)
+
+ def get_metadata_lines(self, name):
+ return yield_lines(self.get_metadata(name))
+
+
+class PathMetadata(DefaultProvider):
+ """Metadata provider for egg directories
+
+ Usage::
+
+ # Development eggs:
+
+ egg_info = "/path/to/PackageName.egg-info"
+ base_dir = os.path.dirname(egg_info)
+ metadata = PathMetadata(base_dir, egg_info)
+ dist_name = os.path.splitext(os.path.basename(egg_info))[0]
+ dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
+
+ # Unpacked egg directories:
+
+ egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
+ metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
+ dist = Distribution.from_filename(egg_path, metadata=metadata)
+ """
+
+ def __init__(self, path, egg_info):
+ self.module_path = path
+ self.egg_info = egg_info
+
+
+class EggMetadata(ZipProvider):
+ """Metadata provider for .egg files"""
+
+ def __init__(self, importer):
+ """Create a metadata provider from a zipimporter"""
+
+ self.zip_pre = importer.archive + os.sep
+ self.loader = importer
+ if importer.prefix:
+ self.module_path = os.path.join(importer.archive, importer.prefix)
+ else:
+ self.module_path = importer.archive
+ self._setup_prefix()
+
+
+_declare_state('dict', _distribution_finders={})
+
+
+def register_finder(importer_type, distribution_finder):
+ """Register `distribution_finder` to find distributions in sys.path items
+
+ `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+ handler), and `distribution_finder` is a callable that, passed a path
+ item and the importer instance, yields ``Distribution`` instances found on
+ that path item. See ``pkg_resources.find_on_path`` for an example."""
+ _distribution_finders[importer_type] = distribution_finder
+
+
+def find_distributions(path_item, only=False):
+ """Yield distributions accessible via `path_item`"""
+ importer = get_importer(path_item)
+ finder = _find_adapter(_distribution_finders, importer)
+ return finder(importer, path_item, only)
+
+
+def find_eggs_in_zip(importer, path_item, only=False):
+ """
+ Find eggs in zip files; possibly multiple nested eggs.
+ """
+ if importer.archive.endswith('.whl'):
+ # wheels are not supported with this finder
+ # they don't have PKG-INFO metadata, and won't ever contain eggs
+ return
+ metadata = EggMetadata(importer)
+ if metadata.has_metadata('PKG-INFO'):
+ yield Distribution.from_filename(path_item, metadata=metadata)
+ if only:
+ # don't yield nested distros
+ return
+ for subitem in metadata.resource_listdir(''):
+ if _is_egg_path(subitem):
+ subpath = os.path.join(path_item, subitem)
+ dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
+ for dist in dists:
+ yield dist
+ elif subitem.lower().endswith('.dist-info'):
+ subpath = os.path.join(path_item, subitem)
+ submeta = EggMetadata(zipimport.zipimporter(subpath))
+ submeta.egg_info = subpath
+ yield Distribution.from_location(path_item, subitem, submeta)
+
+
+register_finder(zipimport.zipimporter, find_eggs_in_zip)
+
+
+def find_nothing(importer, path_item, only=False):
+ return ()
+
+
+register_finder(object, find_nothing)
+
+
+def _by_version_descending(names):
+ """
+ Given a list of filenames, return them in descending order
+ by version number.
+
+ >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
+ >>> _by_version_descending(names)
+ ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
+ >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
+ >>> _by_version_descending(names)
+ ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
+ >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
+ >>> _by_version_descending(names)
+ ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
+ """
+ def _by_version(name):
+ """
+ Parse each component of the filename
+ """
+ name, ext = os.path.splitext(name)
+ parts = itertools.chain(name.split('-'), [ext])
+ return [packaging.version.parse(part) for part in parts]
+
+ return sorted(names, key=_by_version, reverse=True)
+
+
+def find_on_path(importer, path_item, only=False):
+ """Yield distributions accessible on a sys.path directory"""
+ path_item = _normalize_cached(path_item)
+
+ if _is_unpacked_egg(path_item):
+ yield Distribution.from_filename(
+ path_item, metadata=PathMetadata(
+ path_item, os.path.join(path_item, 'EGG-INFO')
+ )
+ )
+ return
+
+ entries = safe_listdir(path_item)
+
+ # for performance, before sorting by version,
+ # screen entries for only those that will yield
+ # distributions
+ filtered = (
+ entry
+ for entry in entries
+ if dist_factory(path_item, entry, only)
+ )
+
+ # scan for .egg and .egg-info in directory
+ path_item_entries = _by_version_descending(filtered)
+ for entry in path_item_entries:
+ fullpath = os.path.join(path_item, entry)
+ factory = dist_factory(path_item, entry, only)
+ for dist in factory(fullpath):
+ yield dist
+
+
+def dist_factory(path_item, entry, only):
+ """
+ Return a dist_factory for a path_item and entry
+ """
+ lower = entry.lower()
+ is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info')))
+ return (
+ distributions_from_metadata
+ if is_meta else
+ find_distributions
+ if not only and _is_egg_path(entry) else
+ resolve_egg_link
+ if not only and lower.endswith('.egg-link') else
+ NoDists()
+ )
+
+
+class NoDists:
+ """
+ >>> bool(NoDists())
+ False
+
+ >>> list(NoDists()('anything'))
+ []
+ """
+ def __bool__(self):
+ return False
+ if six.PY2:
+ __nonzero__ = __bool__
+
+ def __call__(self, fullpath):
+ return iter(())
+
+
+def safe_listdir(path):
+ """
+ Attempt to list contents of path, but suppress some exceptions.
+ """
+ try:
+ return os.listdir(path)
+ except (PermissionError, NotADirectoryError):
+ pass
+ except OSError as e:
+ # Ignore the directory if does not exist, not a directory or
+ # permission denied
+ ignorable = (
+ e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT)
+ # Python 2 on Windows needs to be handled this way :(
+ or getattr(e, "winerror", None) == 267
+ )
+ if not ignorable:
+ raise
+ return ()
+
+
+def distributions_from_metadata(path):
+ root = os.path.dirname(path)
+ if os.path.isdir(path):
+ if len(os.listdir(path)) == 0:
+ # empty metadata dir; skip
+ return
+ metadata = PathMetadata(root, path)
+ else:
+ metadata = FileMetadata(path)
+ entry = os.path.basename(path)
+ yield Distribution.from_location(
+ root, entry, metadata, precedence=DEVELOP_DIST,
+ )
+
+
+def non_empty_lines(path):
+ """
+ Yield non-empty lines from file at path
+ """
+ with open(path) as f:
+ for line in f:
+ line = line.strip()
+ if line:
+ yield line
+
+
+def resolve_egg_link(path):
+ """
+ Given a path to an .egg-link, resolve distributions
+ present in the referenced path.
+ """
+ referenced_paths = non_empty_lines(path)
+ resolved_paths = (
+ os.path.join(os.path.dirname(path), ref)
+ for ref in referenced_paths
+ )
+ dist_groups = map(find_distributions, resolved_paths)
+ return next(dist_groups, ())
+
+
+register_finder(pkgutil.ImpImporter, find_on_path)
+
+if hasattr(importlib_machinery, 'FileFinder'):
+ register_finder(importlib_machinery.FileFinder, find_on_path)
+
+_declare_state('dict', _namespace_handlers={})
+_declare_state('dict', _namespace_packages={})
+
+
+def register_namespace_handler(importer_type, namespace_handler):
+ """Register `namespace_handler` to declare namespace packages
+
+ `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+ handler), and `namespace_handler` is a callable like this::
+
+ def namespace_handler(importer, path_entry, moduleName, module):
+ # return a path_entry to use for child packages
+
+ Namespace handlers are only called if the importer object has already
+ agreed that it can handle the relevant path item, and they should only
+ return a subpath if the module __path__ does not already contain an
+ equivalent subpath. For an example namespace handler, see
+ ``pkg_resources.file_ns_handler``.
+ """
+ _namespace_handlers[importer_type] = namespace_handler
+
+
+def _handle_ns(packageName, path_item):
+ """Ensure that named package includes a subpath of path_item (if needed)"""
+
+ importer = get_importer(path_item)
+ if importer is None:
+ return None
+
+ # capture warnings due to #1111
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ loader = importer.find_module(packageName)
+
+ if loader is None:
+ return None
+ module = sys.modules.get(packageName)
+ if module is None:
+ module = sys.modules[packageName] = types.ModuleType(packageName)
+ module.__path__ = []
+ _set_parent_ns(packageName)
+ elif not hasattr(module, '__path__'):
+ raise TypeError("Not a package:", packageName)
+ handler = _find_adapter(_namespace_handlers, importer)
+ subpath = handler(importer, path_item, packageName, module)
+ if subpath is not None:
+ path = module.__path__
+ path.append(subpath)
+ loader.load_module(packageName)
+ _rebuild_mod_path(path, packageName, module)
+ return subpath
+
+
+def _rebuild_mod_path(orig_path, package_name, module):
+ """
+ Rebuild module.__path__ ensuring that all entries are ordered
+ corresponding to their sys.path order
+ """
+ sys_path = [_normalize_cached(p) for p in sys.path]
+
+ def safe_sys_path_index(entry):
+ """
+ Workaround for #520 and #513.
+ """
+ try:
+ return sys_path.index(entry)
+ except ValueError:
+ return float('inf')
+
+ def position_in_sys_path(path):
+ """
+ Return the ordinal of the path based on its position in sys.path
+ """
+ path_parts = path.split(os.sep)
+ module_parts = package_name.count('.') + 1
+ parts = path_parts[:-module_parts]
+ return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
+
+ new_path = sorted(orig_path, key=position_in_sys_path)
+ new_path = [_normalize_cached(p) for p in new_path]
+
+ if isinstance(module.__path__, list):
+ module.__path__[:] = new_path
+ else:
+ module.__path__ = new_path
+
+
+def declare_namespace(packageName):
+ """Declare that package 'packageName' is a namespace package"""
+
+ _imp.acquire_lock()
+ try:
+ if packageName in _namespace_packages:
+ return
+
+ path = sys.path
+ parent, _, _ = packageName.rpartition('.')
+
+ if parent:
+ declare_namespace(parent)
+ if parent not in _namespace_packages:
+ __import__(parent)
+ try:
+ path = sys.modules[parent].__path__
+ except AttributeError:
+ raise TypeError("Not a package:", parent)
+
+ # Track what packages are namespaces, so when new path items are added,
+ # they can be updated
+ _namespace_packages.setdefault(parent or None, []).append(packageName)
+ _namespace_packages.setdefault(packageName, [])
+
+ for path_item in path:
+ # Ensure all the parent's path items are reflected in the child,
+ # if they apply
+ _handle_ns(packageName, path_item)
+
+ finally:
+ _imp.release_lock()
+
+
+def fixup_namespace_packages(path_item, parent=None):
+ """Ensure that previously-declared namespace packages include path_item"""
+ _imp.acquire_lock()
+ try:
+ for package in _namespace_packages.get(parent, ()):
+ subpath = _handle_ns(package, path_item)
+ if subpath:
+ fixup_namespace_packages(subpath, package)
+ finally:
+ _imp.release_lock()
+
+
+def file_ns_handler(importer, path_item, packageName, module):
+ """Compute an ns-package subpath for a filesystem or zipfile importer"""
+
+ subpath = os.path.join(path_item, packageName.split('.')[-1])
+ normalized = _normalize_cached(subpath)
+ for item in module.__path__:
+ if _normalize_cached(item) == normalized:
+ break
+ else:
+ # Only return the path if it's not already there
+ return subpath
+
+
+register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
+register_namespace_handler(zipimport.zipimporter, file_ns_handler)
+
+if hasattr(importlib_machinery, 'FileFinder'):
+ register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
+
+
+def null_ns_handler(importer, path_item, packageName, module):
+ return None
+
+
+register_namespace_handler(object, null_ns_handler)
+
+
+def normalize_path(filename):
+ """Normalize a file/dir name for comparison purposes"""
+ return os.path.normcase(os.path.realpath(os.path.normpath(_cygwin_patch(filename))))
+
+
+def _cygwin_patch(filename): # pragma: nocover
+ """
+ Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
+ symlink components. Using
+ os.path.abspath() works around this limitation. A fix in os.getcwd()
+ would probably better, in Cygwin even more so, except
+ that this seems to be by design...
+ """
+ return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
+
+
+def _normalize_cached(filename, _cache={}):
+ try:
+ return _cache[filename]
+ except KeyError:
+ _cache[filename] = result = normalize_path(filename)
+ return result
+
+
+def _is_egg_path(path):
+ """
+ Determine if given path appears to be an egg.
+ """
+ return path.lower().endswith('.egg')
+
+
+def _is_unpacked_egg(path):
+ """
+ Determine if given path appears to be an unpacked egg.
+ """
+ return (
+ _is_egg_path(path) and
+ os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
+ )
+
+
+def _set_parent_ns(packageName):
+ parts = packageName.split('.')
+ name = parts.pop()
+ if parts:
+ parent = '.'.join(parts)
+ setattr(sys.modules[parent], name, sys.modules[packageName])
+
+
+def yield_lines(strs):
+ """Yield non-empty/non-comment lines of a string or sequence"""
+ if isinstance(strs, six.string_types):
+ for s in strs.splitlines():
+ s = s.strip()
+ # skip blank lines/comments
+ if s and not s.startswith('#'):
+ yield s
+ else:
+ for ss in strs:
+ for s in yield_lines(ss):
+ yield s
+
+
+MODULE = re.compile(r"\w+(\.\w+)*$").match
+EGG_NAME = re.compile(
+ r"""
+ (?P<name>[^-]+) (
+ -(?P<ver>[^-]+) (
+ -py(?P<pyver>[^-]+) (
+ -(?P<plat>.+)
+ )?
+ )?
+ )?
+ """,
+ re.VERBOSE | re.IGNORECASE,
+).match
+
+
+class EntryPoint:
+ """Object representing an advertised importable object"""
+
+ def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
+ if not MODULE(module_name):
+ raise ValueError("Invalid module name", module_name)
+ self.name = name
+ self.module_name = module_name
+ self.attrs = tuple(attrs)
+ self.extras = tuple(extras)
+ self.dist = dist
+
+ def __str__(self):
+ s = "%s = %s" % (self.name, self.module_name)
+ if self.attrs:
+ s += ':' + '.'.join(self.attrs)
+ if self.extras:
+ s += ' [%s]' % ','.join(self.extras)
+ return s
+
+ def __repr__(self):
+ return "EntryPoint.parse(%r)" % str(self)
+
+ def load(self, require=True, *args, **kwargs):
+ """
+ Require packages for this EntryPoint, then resolve it.
+ """
+ if not require or args or kwargs:
+ warnings.warn(
+ "Parameters to load are deprecated. Call .resolve and "
+ ".require separately.",
+ PkgResourcesDeprecationWarning,
+ stacklevel=2,
+ )
+ if require:
+ self.require(*args, **kwargs)
+ return self.resolve()
+
+ def resolve(self):
+ """
+ Resolve the entry point from its module and attrs.
+ """
+ module = __import__(self.module_name, fromlist=['__name__'], level=0)
+ try:
+ return functools.reduce(getattr, self.attrs, module)
+ except AttributeError as exc:
+ raise ImportError(str(exc))
+
+ def require(self, env=None, installer=None):
+ if self.extras and not self.dist:
+ raise UnknownExtra("Can't require() without a distribution", self)
+
+ # Get the requirements for this entry point with all its extras and
+ # then resolve them. We have to pass `extras` along when resolving so
+ # that the working set knows what extras we want. Otherwise, for
+ # dist-info distributions, the working set will assume that the
+ # requirements for that extra are purely optional and skip over them.
+ reqs = self.dist.requires(self.extras)
+ items = working_set.resolve(reqs, env, installer, extras=self.extras)
+ list(map(working_set.add, items))
+
+ pattern = re.compile(
+ r'\s*'
+ r'(?P<name>.+?)\s*'
+ r'=\s*'
+ r'(?P<module>[\w.]+)\s*'
+ r'(:\s*(?P<attr>[\w.]+))?\s*'
+ r'(?P<extras>\[.*\])?\s*$'
+ )
+
+ @classmethod
+ def parse(cls, src, dist=None):
+ """Parse a single entry point from string `src`
+
+ Entry point syntax follows the form::
+
+ name = some.module:some.attr [extra1, extra2]
+
+ The entry name and module name are required, but the ``:attrs`` and
+ ``[extras]`` parts are optional
+ """
+ m = cls.pattern.match(src)
+ if not m:
+ msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
+ raise ValueError(msg, src)
+ res = m.groupdict()
+ extras = cls._parse_extras(res['extras'])
+ attrs = res['attr'].split('.') if res['attr'] else ()
+ return cls(res['name'], res['module'], attrs, extras, dist)
+
+ @classmethod
+ def _parse_extras(cls, extras_spec):
+ if not extras_spec:
+ return ()
+ req = Requirement.parse('x' + extras_spec)
+ if req.specs:
+ raise ValueError()
+ return req.extras
+
+ @classmethod
+ def parse_group(cls, group, lines, dist=None):
+ """Parse an entry point group"""
+ if not MODULE(group):
+ raise ValueError("Invalid group name", group)
+ this = {}
+ for line in yield_lines(lines):
+ ep = cls.parse(line, dist)
+ if ep.name in this:
+ raise ValueError("Duplicate entry point", group, ep.name)
+ this[ep.name] = ep
+ return this
+
+ @classmethod
+ def parse_map(cls, data, dist=None):
+ """Parse a map of entry point groups"""
+ if isinstance(data, dict):
+ data = data.items()
+ else:
+ data = split_sections(data)
+ maps = {}
+ for group, lines in data:
+ if group is None:
+ if not lines:
+ continue
+ raise ValueError("Entry points must be listed in groups")
+ group = group.strip()
+ if group in maps:
+ raise ValueError("Duplicate group name", group)
+ maps[group] = cls.parse_group(group, lines, dist)
+ return maps
+
+
+def _remove_md5_fragment(location):
+ if not location:
+ return ''
+ parsed = urllib.parse.urlparse(location)
+ if parsed[-1].startswith('md5='):
+ return urllib.parse.urlunparse(parsed[:-1] + ('',))
+ return location
+
+
+def _version_from_file(lines):
+ """
+ Given an iterable of lines from a Metadata file, return
+ the value of the Version field, if present, or None otherwise.
+ """
+ def is_version_line(line):
+ return line.lower().startswith('version:')
+ version_lines = filter(is_version_line, lines)
+ line = next(iter(version_lines), '')
+ _, _, value = line.partition(':')
+ return safe_version(value.strip()) or None
+
+
+class Distribution:
+ """Wrap an actual or potential sys.path entry w/metadata"""
+ PKG_INFO = 'PKG-INFO'
+
+ def __init__(
+ self, location=None, metadata=None, project_name=None,
+ version=None, py_version=PY_MAJOR, platform=None,
+ precedence=EGG_DIST):
+ self.project_name = safe_name(project_name or 'Unknown')
+ if version is not None:
+ self._version = safe_version(version)
+ self.py_version = py_version
+ self.platform = platform
+ self.location = location
+ self.precedence = precedence
+ self._provider = metadata or empty_provider
+
+ @classmethod
+ def from_location(cls, location, basename, metadata=None, **kw):
+ project_name, version, py_version, platform = [None] * 4
+ basename, ext = os.path.splitext(basename)
+ if ext.lower() in _distributionImpl:
+ cls = _distributionImpl[ext.lower()]
+
+ match = EGG_NAME(basename)
+ if match:
+ project_name, version, py_version, platform = match.group(
+ 'name', 'ver', 'pyver', 'plat'
+ )
+ return cls(
+ location, metadata, project_name=project_name, version=version,
+ py_version=py_version, platform=platform, **kw
+ )._reload_version()
+
+ def _reload_version(self):
+ return self
+
+ @property
+ def hashcmp(self):
+ return (
+ self.parsed_version,
+ self.precedence,
+ self.key,
+ _remove_md5_fragment(self.location),
+ self.py_version or '',
+ self.platform or '',
+ )
+
+ def __hash__(self):
+ return hash(self.hashcmp)
+
+ def __lt__(self, other):
+ return self.hashcmp < other.hashcmp
+
+ def __le__(self, other):
+ return self.hashcmp <= other.hashcmp
+
+ def __gt__(self, other):
+ return self.hashcmp > other.hashcmp
+
+ def __ge__(self, other):
+ return self.hashcmp >= other.hashcmp
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ # It's not a Distribution, so they are not equal
+ return False
+ return self.hashcmp == other.hashcmp
+
+ def __ne__(self, other):
+ return not self == other
+
+ # These properties have to be lazy so that we don't have to load any
+ # metadata until/unless it's actually needed. (i.e., some distributions
+ # may not know their name or version without loading PKG-INFO)
+
+ @property
+ def key(self):
+ try:
+ return self._key
+ except AttributeError:
+ self._key = key = self.project_name.lower()
+ return key
+
+ @property
+ def parsed_version(self):
+ if not hasattr(self, "_parsed_version"):
+ self._parsed_version = parse_version(self.version)
+
+ return self._parsed_version
+
+ def _warn_legacy_version(self):
+ LV = packaging.version.LegacyVersion
+ is_legacy = isinstance(self._parsed_version, LV)
+ if not is_legacy:
+ return
+
+ # While an empty version is technically a legacy version and
+ # is not a valid PEP 440 version, it's also unlikely to
+ # actually come from someone and instead it is more likely that
+ # it comes from setuptools attempting to parse a filename and
+ # including it in the list. So for that we'll gate this warning
+ # on if the version is anything at all or not.
+ if not self.version:
+ return
+
+ tmpl = textwrap.dedent("""
+ '{project_name} ({version})' is being parsed as a legacy,
+ non PEP 440,
+ version. You may find odd behavior and sort order.
+ In particular it will be sorted as less than 0.0. It
+ is recommended to migrate to PEP 440 compatible
+ versions.
+ """).strip().replace('\n', ' ')
+
+ warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
+
+ @property
+ def version(self):
+ try:
+ return self._version
+ except AttributeError:
+ version = self._get_version()
+ if version is None:
+ path = self._get_metadata_path_for_display(self.PKG_INFO)
+ msg = (
+ "Missing 'Version:' header and/or {} file at path: {}"
+ ).format(self.PKG_INFO, path)
+ raise ValueError(msg, self)
+
+ return version
+
+ @property
+ def _dep_map(self):
+ """
+ A map of extra to its list of (direct) requirements
+ for this distribution, including the null extra.
+ """
+ try:
+ return self.__dep_map
+ except AttributeError:
+ self.__dep_map = self._filter_extras(self._build_dep_map())
+ return self.__dep_map
+
+ @staticmethod
+ def _filter_extras(dm):
+ """
+ Given a mapping of extras to dependencies, strip off
+ environment markers and filter out any dependencies
+ not matching the markers.
+ """
+ for extra in list(filter(None, dm)):
+ new_extra = extra
+ reqs = dm.pop(extra)
+ new_extra, _, marker = extra.partition(':')
+ fails_marker = marker and (
+ invalid_marker(marker)
+ or not evaluate_marker(marker)
+ )
+ if fails_marker:
+ reqs = []
+ new_extra = safe_extra(new_extra) or None
+
+ dm.setdefault(new_extra, []).extend(reqs)
+ return dm
+
+ def _build_dep_map(self):
+ dm = {}
+ for name in 'requires.txt', 'depends.txt':
+ for extra, reqs in split_sections(self._get_metadata(name)):
+ dm.setdefault(extra, []).extend(parse_requirements(reqs))
+ return dm
+
+ def requires(self, extras=()):
+ """List of Requirements needed for this distro if `extras` are used"""
+ dm = self._dep_map
+ deps = []
+ deps.extend(dm.get(None, ()))
+ for ext in extras:
+ try:
+ deps.extend(dm[safe_extra(ext)])
+ except KeyError:
+ raise UnknownExtra(
+ "%s has no such extra feature %r" % (self, ext)
+ )
+ return deps
+
+ def _get_metadata_path_for_display(self, name):
+ """
+ Return the path to the given metadata file, if available.
+ """
+ try:
+ # We need to access _get_metadata_path() on the provider object
+ # directly rather than through this class's __getattr__()
+ # since _get_metadata_path() is marked private.
+ path = self._provider._get_metadata_path(name)
+
+ # Handle exceptions e.g. in case the distribution's metadata
+ # provider doesn't support _get_metadata_path().
+ except Exception:
+ return '[could not detect]'
+
+ return path
+
+ def _get_metadata(self, name):
+ if self.has_metadata(name):
+ for line in self.get_metadata_lines(name):
+ yield line
+
+ def _get_version(self):
+ lines = self._get_metadata(self.PKG_INFO)
+ version = _version_from_file(lines)
+
+ return version
+
+ def activate(self, path=None, replace=False):
+ """Ensure distribution is importable on `path` (default=sys.path)"""
+ if path is None:
+ path = sys.path
+ self.insert_on(path, replace=replace)
+ if path is sys.path:
+ fixup_namespace_packages(self.location)
+ for pkg in self._get_metadata('namespace_packages.txt'):
+ if pkg in sys.modules:
+ declare_namespace(pkg)
+
+ def egg_name(self):
+ """Return what this distribution's standard .egg filename should be"""
+ filename = "%s-%s-py%s" % (
+ to_filename(self.project_name), to_filename(self.version),
+ self.py_version or PY_MAJOR
+ )
+
+ if self.platform:
+ filename += '-' + self.platform
+ return filename
+
+ def __repr__(self):
+ if self.location:
+ return "%s (%s)" % (self, self.location)
+ else:
+ return str(self)
+
+ def __str__(self):
+ try:
+ version = getattr(self, 'version', None)
+ except ValueError:
+ version = None
+ version = version or "[unknown version]"
+ return "%s %s" % (self.project_name, version)
+
+ def __getattr__(self, attr):
+ """Delegate all unrecognized public attributes to .metadata provider"""
+ if attr.startswith('_'):
+ raise AttributeError(attr)
+ return getattr(self._provider, attr)
+
+ def __dir__(self):
+ return list(
+ set(super(Distribution, self).__dir__())
+ | set(
+ attr for attr in self._provider.__dir__()
+ if not attr.startswith('_')
+ )
+ )
+
+ if not hasattr(object, '__dir__'):
+ # python 2.7 not supported
+ del __dir__
+
+ @classmethod
+ def from_filename(cls, filename, metadata=None, **kw):
+ return cls.from_location(
+ _normalize_cached(filename), os.path.basename(filename), metadata,
+ **kw
+ )
+
+ def as_requirement(self):
+ """Return a ``Requirement`` that matches this distribution exactly"""
+ if isinstance(self.parsed_version, packaging.version.Version):
+ spec = "%s==%s" % (self.project_name, self.parsed_version)
+ else:
+ spec = "%s===%s" % (self.project_name, self.parsed_version)
+
+ return Requirement.parse(spec)
+
+ def load_entry_point(self, group, name):
+ """Return the `name` entry point of `group` or raise ImportError"""
+ ep = self.get_entry_info(group, name)
+ if ep is None:
+ raise ImportError("Entry point %r not found" % ((group, name),))
+ return ep.load()
+
+ def get_entry_map(self, group=None):
+ """Return the entry point map for `group`, or the full entry map"""
+ try:
+ ep_map = self._ep_map
+ except AttributeError:
+ ep_map = self._ep_map = EntryPoint.parse_map(
+ self._get_metadata('entry_points.txt'), self
+ )
+ if group is not None:
+ return ep_map.get(group, {})
+ return ep_map
+
+ def get_entry_info(self, group, name):
+ """Return the EntryPoint object for `group`+`name`, or ``None``"""
+ return self.get_entry_map(group).get(name)
+
+ def insert_on(self, path, loc=None, replace=False):
+ """Ensure self.location is on path
+
+ If replace=False (default):
+ - If location is already in path anywhere, do nothing.
+ - Else:
+ - If it's an egg and its parent directory is on path,
+ insert just ahead of the parent.
+ - Else: add to the end of path.
+ If replace=True:
+ - If location is already on path anywhere (not eggs)
+ or higher priority than its parent (eggs)
+ do nothing.
+ - Else:
+ - If it's an egg and its parent directory is on path,
+ insert just ahead of the parent,
+ removing any lower-priority entries.
+ - Else: add it to the front of path.
+ """
+
+ loc = loc or self.location
+ if not loc:
+ return
+
+ nloc = _normalize_cached(loc)
+ bdir = os.path.dirname(nloc)
+ npath = [(p and _normalize_cached(p) or p) for p in path]
+
+ for p, item in enumerate(npath):
+ if item == nloc:
+ if replace:
+ break
+ else:
+ # don't modify path (even removing duplicates) if
+ # found and not replace
+ return
+ elif item == bdir and self.precedence == EGG_DIST:
+ # if it's an .egg, give it precedence over its directory
+ # UNLESS it's already been added to sys.path and replace=False
+ if (not replace) and nloc in npath[p:]:
+ return
+ if path is sys.path:
+ self.check_version_conflict()
+ path.insert(p, loc)
+ npath.insert(p, nloc)
+ break
+ else:
+ if path is sys.path:
+ self.check_version_conflict()
+ if replace:
+ path.insert(0, loc)
+ else:
+ path.append(loc)
+ return
+
+ # p is the spot where we found or inserted loc; now remove duplicates
+ while True:
+ try:
+ np = npath.index(nloc, p + 1)
+ except ValueError:
+ break
+ else:
+ del npath[np], path[np]
+ # ha!
+ p = np
+
+ return
+
+ def check_version_conflict(self):
+ if self.key == 'setuptools':
+ # ignore the inevitable setuptools self-conflicts :(
+ return
+
+ nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
+ loc = normalize_path(self.location)
+ for modname in self._get_metadata('top_level.txt'):
+ if (modname not in sys.modules or modname in nsp
+ or modname in _namespace_packages):
+ continue
+ if modname in ('pkg_resources', 'setuptools', 'site'):
+ continue
+ fn = getattr(sys.modules[modname], '__file__', None)
+ if fn and (normalize_path(fn).startswith(loc) or
+ fn.startswith(self.location)):
+ continue
+ issue_warning(
+ "Module %s was already imported from %s, but %s is being added"
+ " to sys.path" % (modname, fn, self.location),
+ )
+
+ def has_version(self):
+ try:
+ self.version
+ except ValueError:
+ issue_warning("Unbuilt egg for " + repr(self))
+ return False
+ return True
+
+ def clone(self, **kw):
+ """Copy this distribution, substituting in any changed keyword args"""
+ names = 'project_name version py_version platform location precedence'
+ for attr in names.split():
+ kw.setdefault(attr, getattr(self, attr, None))
+ kw.setdefault('metadata', self._provider)
+ return self.__class__(**kw)
+
+ @property
+ def extras(self):
+ return [dep for dep in self._dep_map if dep]
+
+
+class EggInfoDistribution(Distribution):
+ def _reload_version(self):
+ """
+ Packages installed by distutils (e.g. numpy or scipy),
+ which uses an old safe_version, and so
+ their version numbers can get mangled when
+ converted to filenames (e.g., 1.11.0.dev0+2329eae to
+ 1.11.0.dev0_2329eae). These distributions will not be
+ parsed properly
+ downstream by Distribution and safe_version, so
+ take an extra step and try to get the version number from
+ the metadata file itself instead of the filename.
+ """
+ md_version = self._get_version()
+ if md_version:
+ self._version = md_version
+ return self
+
+
+class DistInfoDistribution(Distribution):
+ """
+ Wrap an actual or potential sys.path entry
+ w/metadata, .dist-info style.
+ """
+ PKG_INFO = 'METADATA'
+ EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
+
+ @property
+ def _parsed_pkg_info(self):
+ """Parse and cache metadata"""
+ try:
+ return self._pkg_info
+ except AttributeError:
+ metadata = self.get_metadata(self.PKG_INFO)
+ self._pkg_info = email.parser.Parser().parsestr(metadata)
+ return self._pkg_info
+
+ @property
+ def _dep_map(self):
+ try:
+ return self.__dep_map
+ except AttributeError:
+ self.__dep_map = self._compute_dependencies()
+ return self.__dep_map
+
+ def _compute_dependencies(self):
+ """Recompute this distribution's dependencies."""
+ dm = self.__dep_map = {None: []}
+
+ reqs = []
+ # Including any condition expressions
+ for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
+ reqs.extend(parse_requirements(req))
+
+ def reqs_for_extra(extra):
+ for req in reqs:
+ if not req.marker or req.marker.evaluate({'extra': extra}):
+ yield req
+
+ common = frozenset(reqs_for_extra(None))
+ dm[None].extend(common)
+
+ for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
+ s_extra = safe_extra(extra.strip())
+ dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
+
+ return dm
+
+
+_distributionImpl = {
+ '.egg': Distribution,
+ '.egg-info': EggInfoDistribution,
+ '.dist-info': DistInfoDistribution,
+}
+
+
+def issue_warning(*args, **kw):
+ level = 1
+ g = globals()
+ try:
+ # find the first stack frame that is *not* code in
+ # the pkg_resources module, to use for the warning
+ while sys._getframe(level).f_globals is g:
+ level += 1
+ except ValueError:
+ pass
+ warnings.warn(stacklevel=level + 1, *args, **kw)
+
+
+class RequirementParseError(ValueError):
+ def __str__(self):
+ return ' '.join(self.args)
+
+
+def parse_requirements(strs):
+ """Yield ``Requirement`` objects for each specification in `strs`
+
+ `strs` must be a string, or a (possibly-nested) iterable thereof.
+ """
+ # create a steppable iterator, so we can handle \-continuations
+ lines = iter(yield_lines(strs))
+
+ for line in lines:
+ # Drop comments -- a hash without a space may be in a URL.
+ if ' #' in line:
+ line = line[:line.find(' #')]
+ # If there is a line continuation, drop it, and append the next line.
+ if line.endswith('\\'):
+ line = line[:-2].strip()
+ try:
+ line += next(lines)
+ except StopIteration:
+ return
+ yield Requirement(line)
+
+
+class Requirement(packaging.requirements.Requirement):
+ def __init__(self, requirement_string):
+ """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
+ try:
+ super(Requirement, self).__init__(requirement_string)
+ except packaging.requirements.InvalidRequirement as e:
+ raise RequirementParseError(str(e))
+ self.unsafe_name = self.name
+ project_name = safe_name(self.name)
+ self.project_name, self.key = project_name, project_name.lower()
+ self.specs = [
+ (spec.operator, spec.version) for spec in self.specifier]
+ self.extras = tuple(map(safe_extra, self.extras))
+ self.hashCmp = (
+ self.key,
+ self.url,
+ self.specifier,
+ frozenset(self.extras),
+ str(self.marker) if self.marker else None,
+ )
+ self.__hash = hash(self.hashCmp)
+
+ def __eq__(self, other):
+ return (
+ isinstance(other, Requirement) and
+ self.hashCmp == other.hashCmp
+ )
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __contains__(self, item):
+ if isinstance(item, Distribution):
+ if item.key != self.key:
+ return False
+
+ item = item.version
+
+ # Allow prereleases always in order to match the previous behavior of
+ # this method. In the future this should be smarter and follow PEP 440
+ # more accurately.
+ return self.specifier.contains(item, prereleases=True)
+
+ def __hash__(self):
+ return self.__hash
+
+ def __repr__(self):
+ return "Requirement.parse(%r)" % str(self)
+
+ @staticmethod
+ def parse(s):
+ req, = parse_requirements(s)
+ return req
+
+
+def _always_object(classes):
+ """
+ Ensure object appears in the mro even
+ for old-style classes.
+ """
+ if object not in classes:
+ return classes + (object,)
+ return classes
+
+
+def _find_adapter(registry, ob):
+ """Return an adapter factory for `ob` from `registry`"""
+ types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
+ for t in types:
+ if t in registry:
+ return registry[t]
+
+
+def ensure_directory(path):
+ """Ensure that the parent directory of `path` exists"""
+ dirname = os.path.dirname(path)
+ py31compat.makedirs(dirname, exist_ok=True)
+
+
+def _bypass_ensure_directory(path):
+ """Sandbox-bypassing version of ensure_directory()"""
+ if not WRITE_SUPPORT:
+ raise IOError('"os.mkdir" not supported on this platform.')
+ dirname, filename = split(path)
+ if dirname and filename and not isdir(dirname):
+ _bypass_ensure_directory(dirname)
+ try:
+ mkdir(dirname, 0o755)
+ except FileExistsError:
+ pass
+
+
+def split_sections(s):
+ """Split a string or iterable thereof into (section, content) pairs
+
+ Each ``section`` is a stripped version of the section header ("[section]")
+ and each ``content`` is a list of stripped lines excluding blank lines and
+ comment-only lines. If there are any such lines before the first section
+ header, they're returned in a first ``section`` of ``None``.
+ """
+ section = None
+ content = []
+ for line in yield_lines(s):
+ if line.startswith("["):
+ if line.endswith("]"):
+ if section or content:
+ yield section, content
+ section = line[1:-1].strip()
+ content = []
+ else:
+ raise ValueError("Invalid section heading", line)
+ else:
+ content.append(line)
+
+ # wrap up last segment
+ yield section, content
+
+
+def _mkstemp(*args, **kw):
+ old_open = os.open
+ try:
+ # temporarily bypass sandboxing
+ os.open = os_open
+ return tempfile.mkstemp(*args, **kw)
+ finally:
+ # and then put it back
+ os.open = old_open
+
+
+# Silence the PEP440Warning by default, so that end users don't get hit by it
+# randomly just because they use pkg_resources. We want to append the rule
+# because we want earlier uses of filterwarnings to take precedence over this
+# one.
+warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
+
+
+# from jaraco.functools 1.3
+def _call_aside(f, *args, **kwargs):
+ f(*args, **kwargs)
+ return f
+
+
+@_call_aside
+def _initialize(g=globals()):
+ "Set up global resource manager (deliberately not state-saved)"
+ manager = ResourceManager()
+ g['_manager'] = manager
+ g.update(
+ (name, getattr(manager, name))
+ for name in dir(manager)
+ if not name.startswith('_')
+ )
+
+
+@_call_aside
+def _initialize_master_working_set():
+ """
+ Prepare the master working set and make the ``require()``
+ API available.
+
+ This function has explicit effects on the global state
+ of pkg_resources. It is intended to be invoked once at
+ the initialization of this module.
+
+ Invocation by other packages is unsupported and done
+ at their own risk.
+ """
+ working_set = WorkingSet._build_master()
+ _declare_state('object', working_set=working_set)
+
+ require = working_set.require
+ iter_entry_points = working_set.iter_entry_points
+ add_activation_listener = working_set.subscribe
+ run_script = working_set.run_script
+ # backward compatibility
+ run_main = run_script
+ # Activate all distributions already on sys.path with replace=False and
+ # ensure that all distributions added to the working set in the future
+ # (e.g. by calling ``require()``) will get activated as well,
+ # with higher priority (replace=True).
+ tuple(
+ dist.activate(replace=False)
+ for dist in working_set
+ )
+ add_activation_listener(
+ lambda dist: dist.activate(replace=True),
+ existing=False,
+ )
+ working_set.entries = []
+ # match order
+ list(map(working_set.add_entry, sys.path))
+ globals().update(locals())
+
+class PkgResourcesDeprecationWarning(Warning):
+ """
+ Base class for warning about deprecations in ``pkg_resources``
+
+ This class is not derived from ``DeprecationWarning``, and as such is
+ visible by default.
+ """
diff --git a/third_party/python/pip/pip/_vendor/pkg_resources/py31compat.py b/third_party/python/pip/pip/_vendor/pkg_resources/py31compat.py
new file mode 100644
index 0000000000..a2d3007ceb
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pkg_resources/py31compat.py
@@ -0,0 +1,23 @@
+import os
+import errno
+import sys
+
+from pip._vendor import six
+
+
+def _makedirs_31(path, exist_ok=False):
+ try:
+ os.makedirs(path)
+ except OSError as exc:
+ if not exist_ok or exc.errno != errno.EEXIST:
+ raise
+
+
+# rely on compatibility behavior until mode considerations
+# and exists_ok considerations are disentangled.
+# See https://github.com/pypa/setuptools/pull/1083#issuecomment-315168663
+needs_makedirs = (
+ six.PY2 or
+ (3, 4) <= sys.version_info < (3, 4, 1)
+)
+makedirs = _makedirs_31 if needs_makedirs else os.makedirs
diff --git a/third_party/python/pip/pip/_vendor/platformdirs/__init__.py b/third_party/python/pip/pip/_vendor/platformdirs/__init__.py
new file mode 100644
index 0000000000..82d907163c
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/platformdirs/__init__.py
@@ -0,0 +1,342 @@
+"""
+Utilities for determining application-specific dirs. See <https://github.com/platformdirs/platformdirs> for details and
+usage.
+"""
+from __future__ import annotations
+
+import os
+import sys
+from pathlib import Path
+
+if sys.version_info >= (3, 8): # pragma: no cover (py38+)
+ from typing import Literal
+else: # pragma: no cover (py38+)
+ from pip._vendor.typing_extensions import Literal
+
+from .api import PlatformDirsABC
+from .version import __version__
+from .version import __version_tuple__ as __version_info__
+
+
+def _set_platform_dir_class() -> type[PlatformDirsABC]:
+ if sys.platform == "win32":
+ from pip._vendor.platformdirs.windows import Windows as Result
+ elif sys.platform == "darwin":
+ from pip._vendor.platformdirs.macos import MacOS as Result
+ else:
+ from pip._vendor.platformdirs.unix import Unix as Result
+
+ if os.getenv("ANDROID_DATA") == "/data" and os.getenv("ANDROID_ROOT") == "/system":
+
+ if os.getenv("SHELL") or os.getenv("PREFIX"):
+ return Result
+
+ from pip._vendor.platformdirs.android import _android_folder
+
+ if _android_folder() is not None:
+ from pip._vendor.platformdirs.android import Android
+
+ return Android # return to avoid redefinition of result
+
+ return Result
+
+
+PlatformDirs = _set_platform_dir_class() #: Currently active platform
+AppDirs = PlatformDirs #: Backwards compatibility with appdirs
+
+
+def user_data_dir(
+ appname: str | None = None,
+ appauthor: str | None | Literal[False] = None,
+ version: str | None = None,
+ roaming: bool = False,
+) -> str:
+ """
+ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
+ :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
+ :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
+ :param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
+ :returns: data directory tied to the user
+ """
+ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_data_dir
+
+
+def site_data_dir(
+ appname: str | None = None,
+ appauthor: str | None | Literal[False] = None,
+ version: str | None = None,
+ multipath: bool = False,
+) -> str:
+ """
+ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
+ :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
+ :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
+ :param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.
+ :returns: data directory shared by users
+ """
+ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_data_dir
+
+
+def user_config_dir(
+ appname: str | None = None,
+ appauthor: str | None | Literal[False] = None,
+ version: str | None = None,
+ roaming: bool = False,
+) -> str:
+ """
+ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
+ :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
+ :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
+ :param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
+ :returns: config directory tied to the user
+ """
+ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_config_dir
+
+
+def site_config_dir(
+ appname: str | None = None,
+ appauthor: str | None | Literal[False] = None,
+ version: str | None = None,
+ multipath: bool = False,
+) -> str:
+ """
+ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
+ :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
+ :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
+ :param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.
+ :returns: config directory shared by the users
+ """
+ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_config_dir
+
+
+def user_cache_dir(
+ appname: str | None = None,
+ appauthor: str | None | Literal[False] = None,
+ version: str | None = None,
+ opinion: bool = True,
+) -> str:
+ """
+ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
+ :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
+ :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
+ :param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
+ :returns: cache directory tied to the user
+ """
+ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_cache_dir
+
+
+def user_state_dir(
+ appname: str | None = None,
+ appauthor: str | None | Literal[False] = None,
+ version: str | None = None,
+ roaming: bool = False,
+) -> str:
+ """
+ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
+ :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
+ :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
+ :param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
+ :returns: state directory tied to the user
+ """
+ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_state_dir
+
+
+def user_log_dir(
+ appname: str | None = None,
+ appauthor: str | None | Literal[False] = None,
+ version: str | None = None,
+ opinion: bool = True,
+) -> str:
+ """
+ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
+ :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
+ :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
+ :param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
+ :returns: log directory tied to the user
+ """
+ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_log_dir
+
+
+def user_documents_dir() -> str:
+ """
+ :returns: documents directory tied to the user
+ """
+ return PlatformDirs().user_documents_dir
+
+
+def user_runtime_dir(
+ appname: str | None = None,
+ appauthor: str | None | Literal[False] = None,
+ version: str | None = None,
+ opinion: bool = True,
+) -> str:
+ """
+ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
+ :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
+ :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
+ :param opinion: See `opinion <platformdirs.api.PlatformDirsABC.opinion>`.
+ :returns: runtime directory tied to the user
+ """
+ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_runtime_dir
+
+
+def user_data_path(
+ appname: str | None = None,
+ appauthor: str | None | Literal[False] = None,
+ version: str | None = None,
+ roaming: bool = False,
+) -> Path:
+ """
+ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
+ :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
+ :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
+ :param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
+ :returns: data path tied to the user
+ """
+ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_data_path
+
+
+def site_data_path(
+ appname: str | None = None,
+ appauthor: str | None | Literal[False] = None,
+ version: str | None = None,
+ multipath: bool = False,
+) -> Path:
+ """
+ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
+ :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
+ :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
+ :param multipath: See `multipath <platformdirs.api.PlatformDirsABC.multipath>`.
+ :returns: data path shared by users
+ """
+ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_data_path
+
+
+def user_config_path(
+ appname: str | None = None,
+ appauthor: str | None | Literal[False] = None,
+ version: str | None = None,
+ roaming: bool = False,
+) -> Path:
+ """
+ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
+ :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
+ :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
+ :param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
+ :returns: config path tied to the user
+ """
+ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_config_path
+
+
+def site_config_path(
+ appname: str | None = None,
+ appauthor: str | None | Literal[False] = None,
+ version: str | None = None,
+ multipath: bool = False,
+) -> Path:
+ """
+ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
+ :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
+ :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
+ :param multipath: See `roaming <platformdirs.api.PlatformDirsABC.multipath>`.
+ :returns: config path shared by the users
+ """
+ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, multipath=multipath).site_config_path
+
+
+def user_cache_path(
+ appname: str | None = None,
+ appauthor: str | None | Literal[False] = None,
+ version: str | None = None,
+ opinion: bool = True,
+) -> Path:
+ """
+ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
+ :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
+ :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
+ :param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
+ :returns: cache path tied to the user
+ """
+ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_cache_path
+
+
+def user_state_path(
+ appname: str | None = None,
+ appauthor: str | None | Literal[False] = None,
+ version: str | None = None,
+ roaming: bool = False,
+) -> Path:
+ """
+ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
+ :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
+ :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
+ :param roaming: See `roaming <platformdirs.api.PlatformDirsABC.version>`.
+ :returns: state path tied to the user
+ """
+ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, roaming=roaming).user_state_path
+
+
+def user_log_path(
+ appname: str | None = None,
+ appauthor: str | None | Literal[False] = None,
+ version: str | None = None,
+ opinion: bool = True,
+) -> Path:
+ """
+ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
+ :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
+ :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
+ :param opinion: See `roaming <platformdirs.api.PlatformDirsABC.opinion>`.
+ :returns: log path tied to the user
+ """
+ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_log_path
+
+
+def user_documents_path() -> Path:
+ """
+ :returns: documents path tied to the user
+ """
+ return PlatformDirs().user_documents_path
+
+
+def user_runtime_path(
+ appname: str | None = None,
+ appauthor: str | None | Literal[False] = None,
+ version: str | None = None,
+ opinion: bool = True,
+) -> Path:
+ """
+ :param appname: See `appname <platformdirs.api.PlatformDirsABC.appname>`.
+ :param appauthor: See `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`.
+ :param version: See `version <platformdirs.api.PlatformDirsABC.version>`.
+ :param opinion: See `opinion <platformdirs.api.PlatformDirsABC.opinion>`.
+ :returns: runtime path tied to the user
+ """
+ return PlatformDirs(appname=appname, appauthor=appauthor, version=version, opinion=opinion).user_runtime_path
+
+
+__all__ = [
+ "__version__",
+ "__version_info__",
+ "PlatformDirs",
+ "AppDirs",
+ "PlatformDirsABC",
+ "user_data_dir",
+ "user_config_dir",
+ "user_cache_dir",
+ "user_state_dir",
+ "user_log_dir",
+ "user_documents_dir",
+ "user_runtime_dir",
+ "site_data_dir",
+ "site_config_dir",
+ "user_data_path",
+ "user_config_path",
+ "user_cache_path",
+ "user_state_path",
+ "user_log_path",
+ "user_documents_path",
+ "user_runtime_path",
+ "site_data_path",
+ "site_config_path",
+]
diff --git a/third_party/python/pip/pip/_vendor/platformdirs/__main__.py b/third_party/python/pip/pip/_vendor/platformdirs/__main__.py
new file mode 100644
index 0000000000..9c54bfb438
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/platformdirs/__main__.py
@@ -0,0 +1,46 @@
+from __future__ import annotations
+
+from pip._vendor.platformdirs import PlatformDirs, __version__
+
+PROPS = (
+ "user_data_dir",
+ "user_config_dir",
+ "user_cache_dir",
+ "user_state_dir",
+ "user_log_dir",
+ "user_documents_dir",
+ "user_runtime_dir",
+ "site_data_dir",
+ "site_config_dir",
+)
+
+
+def main() -> None:
+ app_name = "MyApp"
+ app_author = "MyCompany"
+
+ print(f"-- platformdirs {__version__} --")
+
+ print("-- app dirs (with optional 'version')")
+ dirs = PlatformDirs(app_name, app_author, version="1.0")
+ for prop in PROPS:
+ print(f"{prop}: {getattr(dirs, prop)}")
+
+ print("\n-- app dirs (without optional 'version')")
+ dirs = PlatformDirs(app_name, app_author)
+ for prop in PROPS:
+ print(f"{prop}: {getattr(dirs, prop)}")
+
+ print("\n-- app dirs (without optional 'appauthor')")
+ dirs = PlatformDirs(app_name)
+ for prop in PROPS:
+ print(f"{prop}: {getattr(dirs, prop)}")
+
+ print("\n-- app dirs (with disabled 'appauthor')")
+ dirs = PlatformDirs(app_name, appauthor=False)
+ for prop in PROPS:
+ print(f"{prop}: {getattr(dirs, prop)}")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/third_party/python/pip/pip/_vendor/platformdirs/android.py b/third_party/python/pip/pip/_vendor/platformdirs/android.py
new file mode 100644
index 0000000000..eda8093512
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/platformdirs/android.py
@@ -0,0 +1,120 @@
+from __future__ import annotations
+
+import os
+import re
+import sys
+from functools import lru_cache
+from typing import cast
+
+from .api import PlatformDirsABC
+
+
+class Android(PlatformDirsABC):
+ """
+ Follows the guidance `from here <https://android.stackexchange.com/a/216132>`_. Makes use of the
+ `appname <platformdirs.api.PlatformDirsABC.appname>` and
+ `version <platformdirs.api.PlatformDirsABC.version>`.
+ """
+
+ @property
+ def user_data_dir(self) -> str:
+ """:return: data directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/files/<AppName>``"""
+ return self._append_app_name_and_version(cast(str, _android_folder()), "files")
+
+ @property
+ def site_data_dir(self) -> str:
+ """:return: data directory shared by users, same as `user_data_dir`"""
+ return self.user_data_dir
+
+ @property
+ def user_config_dir(self) -> str:
+ """
+ :return: config directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/shared_prefs/<AppName>``
+ """
+ return self._append_app_name_and_version(cast(str, _android_folder()), "shared_prefs")
+
+ @property
+ def site_config_dir(self) -> str:
+ """:return: config directory shared by the users, same as `user_config_dir`"""
+ return self.user_config_dir
+
+ @property
+ def user_cache_dir(self) -> str:
+ """:return: cache directory tied to the user, e.g. e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>``"""
+ return self._append_app_name_and_version(cast(str, _android_folder()), "cache")
+
+ @property
+ def user_state_dir(self) -> str:
+ """:return: state directory tied to the user, same as `user_data_dir`"""
+ return self.user_data_dir
+
+ @property
+ def user_log_dir(self) -> str:
+ """
+ :return: log directory tied to the user, same as `user_cache_dir` if not opinionated else ``log`` in it,
+ e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/log``
+ """
+ path = self.user_cache_dir
+ if self.opinion:
+ path = os.path.join(path, "log")
+ return path
+
+ @property
+ def user_documents_dir(self) -> str:
+ """
+ :return: documents directory tied to the user e.g. ``/storage/emulated/0/Documents``
+ """
+ return _android_documents_folder()
+
+ @property
+ def user_runtime_dir(self) -> str:
+ """
+ :return: runtime directory tied to the user, same as `user_cache_dir` if not opinionated else ``tmp`` in it,
+ e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/tmp``
+ """
+ path = self.user_cache_dir
+ if self.opinion:
+ path = os.path.join(path, "tmp")
+ return path
+
+
+@lru_cache(maxsize=1)
+def _android_folder() -> str | None:
+ """:return: base folder for the Android OS or None if cannot be found"""
+ try:
+ # First try to get path to android app via pyjnius
+ from jnius import autoclass
+
+ Context = autoclass("android.content.Context") # noqa: N806
+ result: str | None = Context.getFilesDir().getParentFile().getAbsolutePath()
+ except Exception:
+ # if fails find an android folder looking path on the sys.path
+ pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files")
+ for path in sys.path:
+ if pattern.match(path):
+ result = path.split("/files")[0]
+ break
+ else:
+ result = None
+ return result
+
+
+@lru_cache(maxsize=1)
+def _android_documents_folder() -> str:
+ """:return: documents folder for the Android OS"""
+ # Get directories with pyjnius
+ try:
+ from jnius import autoclass
+
+ Context = autoclass("android.content.Context") # noqa: N806
+ Environment = autoclass("android.os.Environment") # noqa: N806
+ documents_dir: str = Context.getExternalFilesDir(Environment.DIRECTORY_DOCUMENTS).getAbsolutePath()
+ except Exception:
+ documents_dir = "/storage/emulated/0/Documents"
+
+ return documents_dir
+
+
+__all__ = [
+ "Android",
+]
diff --git a/third_party/python/pip/pip/_vendor/platformdirs/api.py b/third_party/python/pip/pip/_vendor/platformdirs/api.py
new file mode 100644
index 0000000000..6f6e2c2c69
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/platformdirs/api.py
@@ -0,0 +1,156 @@
+from __future__ import annotations
+
+import os
+import sys
+from abc import ABC, abstractmethod
+from pathlib import Path
+
+if sys.version_info >= (3, 8): # pragma: no branch
+ from typing import Literal # pragma: no cover
+
+
+class PlatformDirsABC(ABC):
+ """
+ Abstract base class for platform directories.
+ """
+
+ def __init__(
+ self,
+ appname: str | None = None,
+ appauthor: str | None | Literal[False] = None,
+ version: str | None = None,
+ roaming: bool = False,
+ multipath: bool = False,
+ opinion: bool = True,
+ ):
+ """
+ Create a new platform directory.
+
+ :param appname: See `appname`.
+ :param appauthor: See `appauthor`.
+ :param version: See `version`.
+ :param roaming: See `roaming`.
+ :param multipath: See `multipath`.
+ :param opinion: See `opinion`.
+ """
+ self.appname = appname #: The name of application.
+ self.appauthor = appauthor
+ """
+ The name of the app author or distributing body for this application. Typically, it is the owning company name.
+ Defaults to `appname`. You may pass ``False`` to disable it.
+ """
+ self.version = version
+ """
+ An optional version path element to append to the path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this would typically be ``<major>.<minor>``.
+ """
+ self.roaming = roaming
+ """
+ Whether to use the roaming appdata directory on Windows. That means that for users on a Windows network setup
+ for roaming profiles, this user data will be synced on login (see
+ `here <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>`_).
+ """
+ self.multipath = multipath
+ """
+ An optional parameter only applicable to Unix/Linux which indicates that the entire list of data dirs should be
+ returned. By default, the first item would only be returned.
+ """
+ self.opinion = opinion #: A flag to indicating to use opinionated values.
+
+ def _append_app_name_and_version(self, *base: str) -> str:
+ params = list(base[1:])
+ if self.appname:
+ params.append(self.appname)
+ if self.version:
+ params.append(self.version)
+ return os.path.join(base[0], *params)
+
+ @property
+ @abstractmethod
+ def user_data_dir(self) -> str:
+ """:return: data directory tied to the user"""
+
+ @property
+ @abstractmethod
+ def site_data_dir(self) -> str:
+ """:return: data directory shared by users"""
+
+ @property
+ @abstractmethod
+ def user_config_dir(self) -> str:
+ """:return: config directory tied to the user"""
+
+ @property
+ @abstractmethod
+ def site_config_dir(self) -> str:
+ """:return: config directory shared by the users"""
+
+ @property
+ @abstractmethod
+ def user_cache_dir(self) -> str:
+ """:return: cache directory tied to the user"""
+
+ @property
+ @abstractmethod
+ def user_state_dir(self) -> str:
+ """:return: state directory tied to the user"""
+
+ @property
+ @abstractmethod
+ def user_log_dir(self) -> str:
+ """:return: log directory tied to the user"""
+
+ @property
+ @abstractmethod
+ def user_documents_dir(self) -> str:
+ """:return: documents directory tied to the user"""
+
+ @property
+ @abstractmethod
+ def user_runtime_dir(self) -> str:
+ """:return: runtime directory tied to the user"""
+
+ @property
+ def user_data_path(self) -> Path:
+ """:return: data path tied to the user"""
+ return Path(self.user_data_dir)
+
+ @property
+ def site_data_path(self) -> Path:
+ """:return: data path shared by users"""
+ return Path(self.site_data_dir)
+
+ @property
+ def user_config_path(self) -> Path:
+ """:return: config path tied to the user"""
+ return Path(self.user_config_dir)
+
+ @property
+ def site_config_path(self) -> Path:
+ """:return: config path shared by the users"""
+ return Path(self.site_config_dir)
+
+ @property
+ def user_cache_path(self) -> Path:
+ """:return: cache path tied to the user"""
+ return Path(self.user_cache_dir)
+
+ @property
+ def user_state_path(self) -> Path:
+ """:return: state path tied to the user"""
+ return Path(self.user_state_dir)
+
+ @property
+ def user_log_path(self) -> Path:
+ """:return: log path tied to the user"""
+ return Path(self.user_log_dir)
+
+ @property
+ def user_documents_path(self) -> Path:
+ """:return: documents path tied to the user"""
+ return Path(self.user_documents_dir)
+
+ @property
+ def user_runtime_path(self) -> Path:
+ """:return: runtime path tied to the user"""
+ return Path(self.user_runtime_dir)
diff --git a/third_party/python/pip/pip/_vendor/platformdirs/macos.py b/third_party/python/pip/pip/_vendor/platformdirs/macos.py
new file mode 100644
index 0000000000..a01337c776
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/platformdirs/macos.py
@@ -0,0 +1,64 @@
+from __future__ import annotations
+
+import os
+
+from .api import PlatformDirsABC
+
+
+class MacOS(PlatformDirsABC):
+ """
+ Platform directories for the macOS operating system. Follows the guidance from `Apple documentation
+ <https://developer.apple.com/library/archive/documentation/FileManagement/Conceptual/FileSystemProgrammingGuide/MacOSXDirectories/MacOSXDirectories.html>`_.
+ Makes use of the `appname <platformdirs.api.PlatformDirsABC.appname>` and
+ `version <platformdirs.api.PlatformDirsABC.version>`.
+ """
+
+ @property
+ def user_data_dir(self) -> str:
+ """:return: data directory tied to the user, e.g. ``~/Library/Application Support/$appname/$version``"""
+ return self._append_app_name_and_version(os.path.expanduser("~/Library/Application Support/"))
+
+ @property
+ def site_data_dir(self) -> str:
+ """:return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``"""
+ return self._append_app_name_and_version("/Library/Application Support")
+
+ @property
+ def user_config_dir(self) -> str:
+ """:return: config directory tied to the user, e.g. ``~/Library/Preferences/$appname/$version``"""
+ return self._append_app_name_and_version(os.path.expanduser("~/Library/Preferences/"))
+
+ @property
+ def site_config_dir(self) -> str:
+ """:return: config directory shared by the users, e.g. ``/Library/Preferences/$appname``"""
+ return self._append_app_name_and_version("/Library/Preferences")
+
+ @property
+ def user_cache_dir(self) -> str:
+ """:return: cache directory tied to the user, e.g. ``~/Library/Caches/$appname/$version``"""
+ return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches"))
+
+ @property
+ def user_state_dir(self) -> str:
+ """:return: state directory tied to the user, same as `user_data_dir`"""
+ return self.user_data_dir
+
+ @property
+ def user_log_dir(self) -> str:
+ """:return: log directory tied to the user, e.g. ``~/Library/Logs/$appname/$version``"""
+ return self._append_app_name_and_version(os.path.expanduser("~/Library/Logs"))
+
+ @property
+ def user_documents_dir(self) -> str:
+ """:return: documents directory tied to the user, e.g. ``~/Documents``"""
+ return os.path.expanduser("~/Documents")
+
+ @property
+ def user_runtime_dir(self) -> str:
+ """:return: runtime directory tied to the user, e.g. ``~/Library/Caches/TemporaryItems/$appname/$version``"""
+ return self._append_app_name_and_version(os.path.expanduser("~/Library/Caches/TemporaryItems"))
+
+
+__all__ = [
+ "MacOS",
+]
diff --git a/third_party/python/pip/pip/_vendor/platformdirs/unix.py b/third_party/python/pip/pip/_vendor/platformdirs/unix.py
new file mode 100644
index 0000000000..9aca5a0305
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/platformdirs/unix.py
@@ -0,0 +1,181 @@
+from __future__ import annotations
+
+import os
+import sys
+from configparser import ConfigParser
+from pathlib import Path
+
+from .api import PlatformDirsABC
+
+if sys.platform.startswith("linux"): # pragma: no branch # no op check, only to please the type checker
+ from os import getuid
+else:
+
+ def getuid() -> int:
+ raise RuntimeError("should only be used on Linux")
+
+
+class Unix(PlatformDirsABC):
+ """
+ On Unix/Linux, we follow the
+ `XDG Basedir Spec <https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html>`_. The spec allows
+ overriding directories with environment variables. The examples show are the default values, alongside the name of
+ the environment variable that overrides them. Makes use of the
+ `appname <platformdirs.api.PlatformDirsABC.appname>`,
+ `version <platformdirs.api.PlatformDirsABC.version>`,
+ `multipath <platformdirs.api.PlatformDirsABC.multipath>`,
+ `opinion <platformdirs.api.PlatformDirsABC.opinion>`.
+ """
+
+ @property
+ def user_data_dir(self) -> str:
+ """
+ :return: data directory tied to the user, e.g. ``~/.local/share/$appname/$version`` or
+ ``$XDG_DATA_HOME/$appname/$version``
+ """
+ path = os.environ.get("XDG_DATA_HOME", "")
+ if not path.strip():
+ path = os.path.expanduser("~/.local/share")
+ return self._append_app_name_and_version(path)
+
+ @property
+ def site_data_dir(self) -> str:
+ """
+ :return: data directories shared by users (if `multipath <platformdirs.api.PlatformDirsABC.multipath>` is
+ enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS
+ path separator), e.g. ``/usr/local/share/$appname/$version`` or ``/usr/share/$appname/$version``
+ """
+ # XDG default for $XDG_DATA_DIRS; only first, if multipath is False
+ path = os.environ.get("XDG_DATA_DIRS", "")
+ if not path.strip():
+ path = f"/usr/local/share{os.pathsep}/usr/share"
+ return self._with_multi_path(path)
+
+ def _with_multi_path(self, path: str) -> str:
+ path_list = path.split(os.pathsep)
+ if not self.multipath:
+ path_list = path_list[0:1]
+ path_list = [self._append_app_name_and_version(os.path.expanduser(p)) for p in path_list]
+ return os.pathsep.join(path_list)
+
+ @property
+ def user_config_dir(self) -> str:
+ """
+ :return: config directory tied to the user, e.g. ``~/.config/$appname/$version`` or
+ ``$XDG_CONFIG_HOME/$appname/$version``
+ """
+ path = os.environ.get("XDG_CONFIG_HOME", "")
+ if not path.strip():
+ path = os.path.expanduser("~/.config")
+ return self._append_app_name_and_version(path)
+
+ @property
+ def site_config_dir(self) -> str:
+ """
+ :return: config directories shared by users (if `multipath <platformdirs.api.PlatformDirsABC.multipath>`
+ is enabled and ``XDG_DATA_DIR`` is set and a multi path the response is also a multi path separated by the OS
+ path separator), e.g. ``/etc/xdg/$appname/$version``
+ """
+ # XDG default for $XDG_CONFIG_DIRS only first, if multipath is False
+ path = os.environ.get("XDG_CONFIG_DIRS", "")
+ if not path.strip():
+ path = "/etc/xdg"
+ return self._with_multi_path(path)
+
+ @property
+ def user_cache_dir(self) -> str:
+ """
+ :return: cache directory tied to the user, e.g. ``~/.cache/$appname/$version`` or
+ ``~/$XDG_CACHE_HOME/$appname/$version``
+ """
+ path = os.environ.get("XDG_CACHE_HOME", "")
+ if not path.strip():
+ path = os.path.expanduser("~/.cache")
+ return self._append_app_name_and_version(path)
+
+ @property
+ def user_state_dir(self) -> str:
+ """
+ :return: state directory tied to the user, e.g. ``~/.local/state/$appname/$version`` or
+ ``$XDG_STATE_HOME/$appname/$version``
+ """
+ path = os.environ.get("XDG_STATE_HOME", "")
+ if not path.strip():
+ path = os.path.expanduser("~/.local/state")
+ return self._append_app_name_and_version(path)
+
+ @property
+ def user_log_dir(self) -> str:
+ """
+ :return: log directory tied to the user, same as `user_state_dir` if not opinionated else ``log`` in it
+ """
+ path = self.user_state_dir
+ if self.opinion:
+ path = os.path.join(path, "log")
+ return path
+
+ @property
+ def user_documents_dir(self) -> str:
+ """
+ :return: documents directory tied to the user, e.g. ``~/Documents``
+ """
+ documents_dir = _get_user_dirs_folder("XDG_DOCUMENTS_DIR")
+ if documents_dir is None:
+ documents_dir = os.environ.get("XDG_DOCUMENTS_DIR", "").strip()
+ if not documents_dir:
+ documents_dir = os.path.expanduser("~/Documents")
+
+ return documents_dir
+
+ @property
+ def user_runtime_dir(self) -> str:
+ """
+ :return: runtime directory tied to the user, e.g. ``/run/user/$(id -u)/$appname/$version`` or
+ ``$XDG_RUNTIME_DIR/$appname/$version``
+ """
+ path = os.environ.get("XDG_RUNTIME_DIR", "")
+ if not path.strip():
+ path = f"/run/user/{getuid()}"
+ return self._append_app_name_and_version(path)
+
+ @property
+ def site_data_path(self) -> Path:
+ """:return: data path shared by users. Only return first item, even if ``multipath`` is set to ``True``"""
+ return self._first_item_as_path_if_multipath(self.site_data_dir)
+
+ @property
+ def site_config_path(self) -> Path:
+ """:return: config path shared by the users. Only return first item, even if ``multipath`` is set to ``True``"""
+ return self._first_item_as_path_if_multipath(self.site_config_dir)
+
+ def _first_item_as_path_if_multipath(self, directory: str) -> Path:
+ if self.multipath:
+ # If multipath is True, the first path is returned.
+ directory = directory.split(os.pathsep)[0]
+ return Path(directory)
+
+
+def _get_user_dirs_folder(key: str) -> str | None:
+ """Return directory from user-dirs.dirs config file. See https://freedesktop.org/wiki/Software/xdg-user-dirs/"""
+ user_dirs_config_path = os.path.join(Unix().user_config_dir, "user-dirs.dirs")
+ if os.path.exists(user_dirs_config_path):
+ parser = ConfigParser()
+
+ with open(user_dirs_config_path) as stream:
+ # Add fake section header, so ConfigParser doesn't complain
+ parser.read_string(f"[top]\n{stream.read()}")
+
+ if key not in parser["top"]:
+ return None
+
+ path = parser["top"][key].strip('"')
+ # Handle relative home paths
+ path = path.replace("$HOME", os.path.expanduser("~"))
+ return path
+
+ return None
+
+
+__all__ = [
+ "Unix",
+]
diff --git a/third_party/python/pip/pip/_vendor/platformdirs/version.py b/third_party/python/pip/pip/_vendor/platformdirs/version.py
new file mode 100644
index 0000000000..9f6eb98e8f
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/platformdirs/version.py
@@ -0,0 +1,4 @@
+# file generated by setuptools_scm
+# don't change, don't track in version control
+__version__ = version = '2.6.2'
+__version_tuple__ = version_tuple = (2, 6, 2)
diff --git a/third_party/python/pip/pip/_vendor/platformdirs/windows.py b/third_party/python/pip/pip/_vendor/platformdirs/windows.py
new file mode 100644
index 0000000000..d5c27b3414
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/platformdirs/windows.py
@@ -0,0 +1,184 @@
+from __future__ import annotations
+
+import ctypes
+import os
+import sys
+from functools import lru_cache
+from typing import Callable
+
+from .api import PlatformDirsABC
+
+
+class Windows(PlatformDirsABC):
+ """`MSDN on where to store app data files
+ <http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120>`_.
+ Makes use of the
+ `appname <platformdirs.api.PlatformDirsABC.appname>`,
+ `appauthor <platformdirs.api.PlatformDirsABC.appauthor>`,
+ `version <platformdirs.api.PlatformDirsABC.version>`,
+ `roaming <platformdirs.api.PlatformDirsABC.roaming>`,
+ `opinion <platformdirs.api.PlatformDirsABC.opinion>`."""
+
+ @property
+ def user_data_dir(self) -> str:
+ """
+ :return: data directory tied to the user, e.g.
+ ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname`` (not roaming) or
+ ``%USERPROFILE%\\AppData\\Roaming\\$appauthor\\$appname`` (roaming)
+ """
+ const = "CSIDL_APPDATA" if self.roaming else "CSIDL_LOCAL_APPDATA"
+ path = os.path.normpath(get_win_folder(const))
+ return self._append_parts(path)
+
+ def _append_parts(self, path: str, *, opinion_value: str | None = None) -> str:
+ params = []
+ if self.appname:
+ if self.appauthor is not False:
+ author = self.appauthor or self.appname
+ params.append(author)
+ params.append(self.appname)
+ if opinion_value is not None and self.opinion:
+ params.append(opinion_value)
+ if self.version:
+ params.append(self.version)
+ return os.path.join(path, *params)
+
+ @property
+ def site_data_dir(self) -> str:
+ """:return: data directory shared by users, e.g. ``C:\\ProgramData\\$appauthor\\$appname``"""
+ path = os.path.normpath(get_win_folder("CSIDL_COMMON_APPDATA"))
+ return self._append_parts(path)
+
+ @property
+ def user_config_dir(self) -> str:
+ """:return: config directory tied to the user, same as `user_data_dir`"""
+ return self.user_data_dir
+
+ @property
+ def site_config_dir(self) -> str:
+ """:return: config directory shared by the users, same as `site_data_dir`"""
+ return self.site_data_dir
+
+ @property
+ def user_cache_dir(self) -> str:
+ """
+ :return: cache directory tied to the user (if opinionated with ``Cache`` folder within ``$appname``) e.g.
+ ``%USERPROFILE%\\AppData\\Local\\$appauthor\\$appname\\Cache\\$version``
+ """
+ path = os.path.normpath(get_win_folder("CSIDL_LOCAL_APPDATA"))
+ return self._append_parts(path, opinion_value="Cache")
+
+ @property
+ def user_state_dir(self) -> str:
+ """:return: state directory tied to the user, same as `user_data_dir`"""
+ return self.user_data_dir
+
+ @property
+ def user_log_dir(self) -> str:
+ """
+ :return: log directory tied to the user, same as `user_data_dir` if not opinionated else ``Logs`` in it
+ """
+ path = self.user_data_dir
+ if self.opinion:
+ path = os.path.join(path, "Logs")
+ return path
+
+ @property
+ def user_documents_dir(self) -> str:
+ """
+ :return: documents directory tied to the user e.g. ``%USERPROFILE%\\Documents``
+ """
+ return os.path.normpath(get_win_folder("CSIDL_PERSONAL"))
+
+ @property
+ def user_runtime_dir(self) -> str:
+ """
+ :return: runtime directory tied to the user, e.g.
+ ``%USERPROFILE%\\AppData\\Local\\Temp\\$appauthor\\$appname``
+ """
+ path = os.path.normpath(os.path.join(get_win_folder("CSIDL_LOCAL_APPDATA"), "Temp"))
+ return self._append_parts(path)
+
+
+def get_win_folder_from_env_vars(csidl_name: str) -> str:
+ """Get folder from environment variables."""
+ if csidl_name == "CSIDL_PERSONAL": # does not have an environment name
+ return os.path.join(os.path.normpath(os.environ["USERPROFILE"]), "Documents")
+
+ env_var_name = {
+ "CSIDL_APPDATA": "APPDATA",
+ "CSIDL_COMMON_APPDATA": "ALLUSERSPROFILE",
+ "CSIDL_LOCAL_APPDATA": "LOCALAPPDATA",
+ }.get(csidl_name)
+ if env_var_name is None:
+ raise ValueError(f"Unknown CSIDL name: {csidl_name}")
+ result = os.environ.get(env_var_name)
+ if result is None:
+ raise ValueError(f"Unset environment variable: {env_var_name}")
+ return result
+
+
+def get_win_folder_from_registry(csidl_name: str) -> str:
+ """Get folder from the registry.
+
+ This is a fallback technique at best. I'm not sure if using the
+ registry for this guarantees us the correct answer for all CSIDL_*
+ names.
+ """
+ shell_folder_name = {
+ "CSIDL_APPDATA": "AppData",
+ "CSIDL_COMMON_APPDATA": "Common AppData",
+ "CSIDL_LOCAL_APPDATA": "Local AppData",
+ "CSIDL_PERSONAL": "Personal",
+ }.get(csidl_name)
+ if shell_folder_name is None:
+ raise ValueError(f"Unknown CSIDL name: {csidl_name}")
+ if sys.platform != "win32": # only needed for mypy type checker to know that this code runs only on Windows
+ raise NotImplementedError
+ import winreg
+
+ key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders")
+ directory, _ = winreg.QueryValueEx(key, shell_folder_name)
+ return str(directory)
+
+
+def get_win_folder_via_ctypes(csidl_name: str) -> str:
+ """Get folder with ctypes."""
+ csidl_const = {
+ "CSIDL_APPDATA": 26,
+ "CSIDL_COMMON_APPDATA": 35,
+ "CSIDL_LOCAL_APPDATA": 28,
+ "CSIDL_PERSONAL": 5,
+ }.get(csidl_name)
+ if csidl_const is None:
+ raise ValueError(f"Unknown CSIDL name: {csidl_name}")
+
+ buf = ctypes.create_unicode_buffer(1024)
+ windll = getattr(ctypes, "windll") # noqa: B009 # using getattr to avoid false positive with mypy type checker
+ windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
+
+ # Downgrade to short path name if it has highbit chars.
+ if any(ord(c) > 255 for c in buf):
+ buf2 = ctypes.create_unicode_buffer(1024)
+ if windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
+ buf = buf2
+
+ return buf.value
+
+
+def _pick_get_win_folder() -> Callable[[str], str]:
+ if hasattr(ctypes, "windll"):
+ return get_win_folder_via_ctypes
+ try:
+ import winreg # noqa: F401
+ except ImportError:
+ return get_win_folder_from_env_vars
+ else:
+ return get_win_folder_from_registry
+
+
+get_win_folder = lru_cache(maxsize=None)(_pick_get_win_folder())
+
+__all__ = [
+ "Windows",
+]
diff --git a/third_party/python/pip/pip/_vendor/pygments/__init__.py b/third_party/python/pip/pip/_vendor/pygments/__init__.py
new file mode 100644
index 0000000000..7185e53769
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/__init__.py
@@ -0,0 +1,82 @@
+"""
+ Pygments
+ ~~~~~~~~
+
+ Pygments is a syntax highlighting package written in Python.
+
+ It is a generic syntax highlighter for general use in all kinds of software
+ such as forum systems, wikis or other applications that need to prettify
+ source code. Highlights are:
+
+ * a wide range of common languages and markup formats is supported
+ * special attention is paid to details, increasing quality by a fair amount
+ * support for new languages and formats are added easily
+ * a number of output formats, presently HTML, LaTeX, RTF, SVG, all image
+ formats that PIL supports, and ANSI sequences
+ * it is usable as a command-line tool and as a library
+ * ... and it highlights even Brainfuck!
+
+ The `Pygments master branch`_ is installable with ``easy_install Pygments==dev``.
+
+ .. _Pygments master branch:
+ https://github.com/pygments/pygments/archive/master.zip#egg=Pygments-dev
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+from io import StringIO, BytesIO
+
+__version__ = '2.13.0'
+__docformat__ = 'restructuredtext'
+
+__all__ = ['lex', 'format', 'highlight']
+
+
+def lex(code, lexer):
+ """
+ Lex ``code`` with ``lexer`` and return an iterable of tokens.
+ """
+ try:
+ return lexer.get_tokens(code)
+ except TypeError:
+ # Heuristic to catch a common mistake.
+ from pip._vendor.pygments.lexer import RegexLexer
+ if isinstance(lexer, type) and issubclass(lexer, RegexLexer):
+ raise TypeError('lex() argument must be a lexer instance, '
+ 'not a class')
+ raise
+
+
+def format(tokens, formatter, outfile=None): # pylint: disable=redefined-builtin
+ """
+ Format a tokenlist ``tokens`` with the formatter ``formatter``.
+
+ If ``outfile`` is given and a valid file object (an object
+ with a ``write`` method), the result will be written to it, otherwise
+ it is returned as a string.
+ """
+ try:
+ if not outfile:
+ realoutfile = getattr(formatter, 'encoding', None) and BytesIO() or StringIO()
+ formatter.format(tokens, realoutfile)
+ return realoutfile.getvalue()
+ else:
+ formatter.format(tokens, outfile)
+ except TypeError:
+ # Heuristic to catch a common mistake.
+ from pip._vendor.pygments.formatter import Formatter
+ if isinstance(formatter, type) and issubclass(formatter, Formatter):
+ raise TypeError('format() argument must be a formatter instance, '
+ 'not a class')
+ raise
+
+
+def highlight(code, lexer, formatter, outfile=None):
+ """
+ Lex ``code`` with ``lexer`` and format it with the formatter ``formatter``.
+
+ If ``outfile`` is given and a valid file object (an object
+ with a ``write`` method), the result will be written to it, otherwise
+ it is returned as a string.
+ """
+ return format(lex(code, lexer), formatter, outfile)
diff --git a/third_party/python/pip/pip/_vendor/pygments/__main__.py b/third_party/python/pip/pip/_vendor/pygments/__main__.py
new file mode 100644
index 0000000000..90cafd9342
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/__main__.py
@@ -0,0 +1,17 @@
+"""
+ pygments.__main__
+ ~~~~~~~~~~~~~~~~~
+
+ Main entry point for ``python -m pygments``.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import sys
+from pip._vendor.pygments.cmdline import main
+
+try:
+ sys.exit(main(sys.argv))
+except KeyboardInterrupt:
+ sys.exit(1)
diff --git a/third_party/python/pip/pip/_vendor/pygments/cmdline.py b/third_party/python/pip/pip/_vendor/pygments/cmdline.py
new file mode 100644
index 0000000000..de73b06b4c
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/cmdline.py
@@ -0,0 +1,668 @@
+"""
+ pygments.cmdline
+ ~~~~~~~~~~~~~~~~
+
+ Command line interface.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import os
+import sys
+import shutil
+import argparse
+from textwrap import dedent
+
+from pip._vendor.pygments import __version__, highlight
+from pip._vendor.pygments.util import ClassNotFound, OptionError, docstring_headline, \
+ guess_decode, guess_decode_from_terminal, terminal_encoding, \
+ UnclosingTextIOWrapper
+from pip._vendor.pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \
+ load_lexer_from_file, get_lexer_for_filename, find_lexer_class_for_filename
+from pip._vendor.pygments.lexers.special import TextLexer
+from pip._vendor.pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter
+from pip._vendor.pygments.formatters import get_all_formatters, get_formatter_by_name, \
+ load_formatter_from_file, get_formatter_for_filename, find_formatter_class
+from pip._vendor.pygments.formatters.terminal import TerminalFormatter
+from pip._vendor.pygments.formatters.terminal256 import Terminal256Formatter, TerminalTrueColorFormatter
+from pip._vendor.pygments.filters import get_all_filters, find_filter_class
+from pip._vendor.pygments.styles import get_all_styles, get_style_by_name
+
+
+def _parse_options(o_strs):
+ opts = {}
+ if not o_strs:
+ return opts
+ for o_str in o_strs:
+ if not o_str.strip():
+ continue
+ o_args = o_str.split(',')
+ for o_arg in o_args:
+ o_arg = o_arg.strip()
+ try:
+ o_key, o_val = o_arg.split('=', 1)
+ o_key = o_key.strip()
+ o_val = o_val.strip()
+ except ValueError:
+ opts[o_arg] = True
+ else:
+ opts[o_key] = o_val
+ return opts
+
+
+def _parse_filters(f_strs):
+ filters = []
+ if not f_strs:
+ return filters
+ for f_str in f_strs:
+ if ':' in f_str:
+ fname, fopts = f_str.split(':', 1)
+ filters.append((fname, _parse_options([fopts])))
+ else:
+ filters.append((f_str, {}))
+ return filters
+
+
+def _print_help(what, name):
+ try:
+ if what == 'lexer':
+ cls = get_lexer_by_name(name)
+ print("Help on the %s lexer:" % cls.name)
+ print(dedent(cls.__doc__))
+ elif what == 'formatter':
+ cls = find_formatter_class(name)
+ print("Help on the %s formatter:" % cls.name)
+ print(dedent(cls.__doc__))
+ elif what == 'filter':
+ cls = find_filter_class(name)
+ print("Help on the %s filter:" % name)
+ print(dedent(cls.__doc__))
+ return 0
+ except (AttributeError, ValueError):
+ print("%s not found!" % what, file=sys.stderr)
+ return 1
+
+
+def _print_list(what):
+ if what == 'lexer':
+ print()
+ print("Lexers:")
+ print("~~~~~~~")
+
+ info = []
+ for fullname, names, exts, _ in get_all_lexers():
+ tup = (', '.join(names)+':', fullname,
+ exts and '(filenames ' + ', '.join(exts) + ')' or '')
+ info.append(tup)
+ info.sort()
+ for i in info:
+ print(('* %s\n %s %s') % i)
+
+ elif what == 'formatter':
+ print()
+ print("Formatters:")
+ print("~~~~~~~~~~~")
+
+ info = []
+ for cls in get_all_formatters():
+ doc = docstring_headline(cls)
+ tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and
+ '(filenames ' + ', '.join(cls.filenames) + ')' or '')
+ info.append(tup)
+ info.sort()
+ for i in info:
+ print(('* %s\n %s %s') % i)
+
+ elif what == 'filter':
+ print()
+ print("Filters:")
+ print("~~~~~~~~")
+
+ for name in get_all_filters():
+ cls = find_filter_class(name)
+ print("* " + name + ':')
+ print(" %s" % docstring_headline(cls))
+
+ elif what == 'style':
+ print()
+ print("Styles:")
+ print("~~~~~~~")
+
+ for name in get_all_styles():
+ cls = get_style_by_name(name)
+ print("* " + name + ':')
+ print(" %s" % docstring_headline(cls))
+
+
+def _print_list_as_json(requested_items):
+ import json
+ result = {}
+ if 'lexer' in requested_items:
+ info = {}
+ for fullname, names, filenames, mimetypes in get_all_lexers():
+ info[fullname] = {
+ 'aliases': names,
+ 'filenames': filenames,
+ 'mimetypes': mimetypes
+ }
+ result['lexers'] = info
+
+ if 'formatter' in requested_items:
+ info = {}
+ for cls in get_all_formatters():
+ doc = docstring_headline(cls)
+ info[cls.name] = {
+ 'aliases': cls.aliases,
+ 'filenames': cls.filenames,
+ 'doc': doc
+ }
+ result['formatters'] = info
+
+ if 'filter' in requested_items:
+ info = {}
+ for name in get_all_filters():
+ cls = find_filter_class(name)
+ info[name] = {
+ 'doc': docstring_headline(cls)
+ }
+ result['filters'] = info
+
+ if 'style' in requested_items:
+ info = {}
+ for name in get_all_styles():
+ cls = get_style_by_name(name)
+ info[name] = {
+ 'doc': docstring_headline(cls)
+ }
+ result['styles'] = info
+
+ json.dump(result, sys.stdout)
+
+def main_inner(parser, argns):
+ if argns.help:
+ parser.print_help()
+ return 0
+
+ if argns.V:
+ print('Pygments version %s, (c) 2006-2022 by Georg Brandl, Matthäus '
+ 'Chajdas and contributors.' % __version__)
+ return 0
+
+ def is_only_option(opt):
+ return not any(v for (k, v) in vars(argns).items() if k != opt)
+
+ # handle ``pygmentize -L``
+ if argns.L is not None:
+ arg_set = set()
+ for k, v in vars(argns).items():
+ if v:
+ arg_set.add(k)
+
+ arg_set.discard('L')
+ arg_set.discard('json')
+
+ if arg_set:
+ parser.print_help(sys.stderr)
+ return 2
+
+ # print version
+ if not argns.json:
+ main(['', '-V'])
+ allowed_types = {'lexer', 'formatter', 'filter', 'style'}
+ largs = [arg.rstrip('s') for arg in argns.L]
+ if any(arg not in allowed_types for arg in largs):
+ parser.print_help(sys.stderr)
+ return 0
+ if not largs:
+ largs = allowed_types
+ if not argns.json:
+ for arg in largs:
+ _print_list(arg)
+ else:
+ _print_list_as_json(largs)
+ return 0
+
+ # handle ``pygmentize -H``
+ if argns.H:
+ if not is_only_option('H'):
+ parser.print_help(sys.stderr)
+ return 2
+ what, name = argns.H
+ if what not in ('lexer', 'formatter', 'filter'):
+ parser.print_help(sys.stderr)
+ return 2
+ return _print_help(what, name)
+
+ # parse -O options
+ parsed_opts = _parse_options(argns.O or [])
+
+ # parse -P options
+ for p_opt in argns.P or []:
+ try:
+ name, value = p_opt.split('=', 1)
+ except ValueError:
+ parsed_opts[p_opt] = True
+ else:
+ parsed_opts[name] = value
+
+ # encodings
+ inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding'))
+ outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding'))
+
+ # handle ``pygmentize -N``
+ if argns.N:
+ lexer = find_lexer_class_for_filename(argns.N)
+ if lexer is None:
+ lexer = TextLexer
+
+ print(lexer.aliases[0])
+ return 0
+
+ # handle ``pygmentize -C``
+ if argns.C:
+ inp = sys.stdin.buffer.read()
+ try:
+ lexer = guess_lexer(inp, inencoding=inencoding)
+ except ClassNotFound:
+ lexer = TextLexer
+
+ print(lexer.aliases[0])
+ return 0
+
+ # handle ``pygmentize -S``
+ S_opt = argns.S
+ a_opt = argns.a
+ if S_opt is not None:
+ f_opt = argns.f
+ if not f_opt:
+ parser.print_help(sys.stderr)
+ return 2
+ if argns.l or argns.INPUTFILE:
+ parser.print_help(sys.stderr)
+ return 2
+
+ try:
+ parsed_opts['style'] = S_opt
+ fmter = get_formatter_by_name(f_opt, **parsed_opts)
+ except ClassNotFound as err:
+ print(err, file=sys.stderr)
+ return 1
+
+ print(fmter.get_style_defs(a_opt or ''))
+ return 0
+
+ # if no -S is given, -a is not allowed
+ if argns.a is not None:
+ parser.print_help(sys.stderr)
+ return 2
+
+ # parse -F options
+ F_opts = _parse_filters(argns.F or [])
+
+ # -x: allow custom (eXternal) lexers and formatters
+ allow_custom_lexer_formatter = bool(argns.x)
+
+ # select lexer
+ lexer = None
+
+ # given by name?
+ lexername = argns.l
+ if lexername:
+ # custom lexer, located relative to user's cwd
+ if allow_custom_lexer_formatter and '.py' in lexername:
+ try:
+ filename = None
+ name = None
+ if ':' in lexername:
+ filename, name = lexername.rsplit(':', 1)
+
+ if '.py' in name:
+ # This can happen on Windows: If the lexername is
+ # C:\lexer.py -- return to normal load path in that case
+ name = None
+
+ if filename and name:
+ lexer = load_lexer_from_file(filename, name,
+ **parsed_opts)
+ else:
+ lexer = load_lexer_from_file(lexername, **parsed_opts)
+ except ClassNotFound as err:
+ print('Error:', err, file=sys.stderr)
+ return 1
+ else:
+ try:
+ lexer = get_lexer_by_name(lexername, **parsed_opts)
+ except (OptionError, ClassNotFound) as err:
+ print('Error:', err, file=sys.stderr)
+ return 1
+
+ # read input code
+ code = None
+
+ if argns.INPUTFILE:
+ if argns.s:
+ print('Error: -s option not usable when input file specified',
+ file=sys.stderr)
+ return 2
+
+ infn = argns.INPUTFILE
+ try:
+ with open(infn, 'rb') as infp:
+ code = infp.read()
+ except Exception as err:
+ print('Error: cannot read infile:', err, file=sys.stderr)
+ return 1
+ if not inencoding:
+ code, inencoding = guess_decode(code)
+
+ # do we have to guess the lexer?
+ if not lexer:
+ try:
+ lexer = get_lexer_for_filename(infn, code, **parsed_opts)
+ except ClassNotFound as err:
+ if argns.g:
+ try:
+ lexer = guess_lexer(code, **parsed_opts)
+ except ClassNotFound:
+ lexer = TextLexer(**parsed_opts)
+ else:
+ print('Error:', err, file=sys.stderr)
+ return 1
+ except OptionError as err:
+ print('Error:', err, file=sys.stderr)
+ return 1
+
+ elif not argns.s: # treat stdin as full file (-s support is later)
+ # read code from terminal, always in binary mode since we want to
+ # decode ourselves and be tolerant with it
+ code = sys.stdin.buffer.read() # use .buffer to get a binary stream
+ if not inencoding:
+ code, inencoding = guess_decode_from_terminal(code, sys.stdin)
+ # else the lexer will do the decoding
+ if not lexer:
+ try:
+ lexer = guess_lexer(code, **parsed_opts)
+ except ClassNotFound:
+ lexer = TextLexer(**parsed_opts)
+
+ else: # -s option needs a lexer with -l
+ if not lexer:
+ print('Error: when using -s a lexer has to be selected with -l',
+ file=sys.stderr)
+ return 2
+
+ # process filters
+ for fname, fopts in F_opts:
+ try:
+ lexer.add_filter(fname, **fopts)
+ except ClassNotFound as err:
+ print('Error:', err, file=sys.stderr)
+ return 1
+
+ # select formatter
+ outfn = argns.o
+ fmter = argns.f
+ if fmter:
+ # custom formatter, located relative to user's cwd
+ if allow_custom_lexer_formatter and '.py' in fmter:
+ try:
+ filename = None
+ name = None
+ if ':' in fmter:
+ # Same logic as above for custom lexer
+ filename, name = fmter.rsplit(':', 1)
+
+ if '.py' in name:
+ name = None
+
+ if filename and name:
+ fmter = load_formatter_from_file(filename, name,
+ **parsed_opts)
+ else:
+ fmter = load_formatter_from_file(fmter, **parsed_opts)
+ except ClassNotFound as err:
+ print('Error:', err, file=sys.stderr)
+ return 1
+ else:
+ try:
+ fmter = get_formatter_by_name(fmter, **parsed_opts)
+ except (OptionError, ClassNotFound) as err:
+ print('Error:', err, file=sys.stderr)
+ return 1
+
+ if outfn:
+ if not fmter:
+ try:
+ fmter = get_formatter_for_filename(outfn, **parsed_opts)
+ except (OptionError, ClassNotFound) as err:
+ print('Error:', err, file=sys.stderr)
+ return 1
+ try:
+ outfile = open(outfn, 'wb')
+ except Exception as err:
+ print('Error: cannot open outfile:', err, file=sys.stderr)
+ return 1
+ else:
+ if not fmter:
+ if os.environ.get('COLORTERM','') in ('truecolor', '24bit'):
+ fmter = TerminalTrueColorFormatter(**parsed_opts)
+ elif '256' in os.environ.get('TERM', ''):
+ fmter = Terminal256Formatter(**parsed_opts)
+ else:
+ fmter = TerminalFormatter(**parsed_opts)
+ outfile = sys.stdout.buffer
+
+ # determine output encoding if not explicitly selected
+ if not outencoding:
+ if outfn:
+ # output file? use lexer encoding for now (can still be None)
+ fmter.encoding = inencoding
+ else:
+ # else use terminal encoding
+ fmter.encoding = terminal_encoding(sys.stdout)
+
+ # provide coloring under Windows, if possible
+ if not outfn and sys.platform in ('win32', 'cygwin') and \
+ fmter.name in ('Terminal', 'Terminal256'): # pragma: no cover
+ # unfortunately colorama doesn't support binary streams on Py3
+ outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding)
+ fmter.encoding = None
+ try:
+ import pip._vendor.colorama.initialise as colorama_initialise
+ except ImportError:
+ pass
+ else:
+ outfile = colorama_initialise.wrap_stream(
+ outfile, convert=None, strip=None, autoreset=False, wrap=True)
+
+ # When using the LaTeX formatter and the option `escapeinside` is
+ # specified, we need a special lexer which collects escaped text
+ # before running the chosen language lexer.
+ escapeinside = parsed_opts.get('escapeinside', '')
+ if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter):
+ left = escapeinside[0]
+ right = escapeinside[1]
+ lexer = LatexEmbeddedLexer(left, right, lexer)
+
+ # ... and do it!
+ if not argns.s:
+ # process whole input as per normal...
+ try:
+ highlight(code, lexer, fmter, outfile)
+ finally:
+ if outfn:
+ outfile.close()
+ return 0
+ else:
+ # line by line processing of stdin (eg: for 'tail -f')...
+ try:
+ while 1:
+ line = sys.stdin.buffer.readline()
+ if not line:
+ break
+ if not inencoding:
+ line = guess_decode_from_terminal(line, sys.stdin)[0]
+ highlight(line, lexer, fmter, outfile)
+ if hasattr(outfile, 'flush'):
+ outfile.flush()
+ return 0
+ except KeyboardInterrupt: # pragma: no cover
+ return 0
+ finally:
+ if outfn:
+ outfile.close()
+
+
+class HelpFormatter(argparse.HelpFormatter):
+ def __init__(self, prog, indent_increment=2, max_help_position=16, width=None):
+ if width is None:
+ try:
+ width = shutil.get_terminal_size().columns - 2
+ except Exception:
+ pass
+ argparse.HelpFormatter.__init__(self, prog, indent_increment,
+ max_help_position, width)
+
+
+def main(args=sys.argv):
+ """
+ Main command line entry point.
+ """
+ desc = "Highlight an input file and write the result to an output file."
+ parser = argparse.ArgumentParser(description=desc, add_help=False,
+ formatter_class=HelpFormatter)
+
+ operation = parser.add_argument_group('Main operation')
+ lexersel = operation.add_mutually_exclusive_group()
+ lexersel.add_argument(
+ '-l', metavar='LEXER',
+ help='Specify the lexer to use. (Query names with -L.) If not '
+ 'given and -g is not present, the lexer is guessed from the filename.')
+ lexersel.add_argument(
+ '-g', action='store_true',
+ help='Guess the lexer from the file contents, or pass through '
+ 'as plain text if nothing can be guessed.')
+ operation.add_argument(
+ '-F', metavar='FILTER[:options]', action='append',
+ help='Add a filter to the token stream. (Query names with -L.) '
+ 'Filter options are given after a colon if necessary.')
+ operation.add_argument(
+ '-f', metavar='FORMATTER',
+ help='Specify the formatter to use. (Query names with -L.) '
+ 'If not given, the formatter is guessed from the output filename, '
+ 'and defaults to the terminal formatter if the output is to the '
+ 'terminal or an unknown file extension.')
+ operation.add_argument(
+ '-O', metavar='OPTION=value[,OPTION=value,...]', action='append',
+ help='Give options to the lexer and formatter as a comma-separated '
+ 'list of key-value pairs. '
+ 'Example: `-O bg=light,python=cool`.')
+ operation.add_argument(
+ '-P', metavar='OPTION=value', action='append',
+ help='Give a single option to the lexer and formatter - with this '
+ 'you can pass options whose value contains commas and equal signs. '
+ 'Example: `-P "heading=Pygments, the Python highlighter"`.')
+ operation.add_argument(
+ '-o', metavar='OUTPUTFILE',
+ help='Where to write the output. Defaults to standard output.')
+
+ operation.add_argument(
+ 'INPUTFILE', nargs='?',
+ help='Where to read the input. Defaults to standard input.')
+
+ flags = parser.add_argument_group('Operation flags')
+ flags.add_argument(
+ '-v', action='store_true',
+ help='Print a detailed traceback on unhandled exceptions, which '
+ 'is useful for debugging and bug reports.')
+ flags.add_argument(
+ '-s', action='store_true',
+ help='Process lines one at a time until EOF, rather than waiting to '
+ 'process the entire file. This only works for stdin, only for lexers '
+ 'with no line-spanning constructs, and is intended for streaming '
+ 'input such as you get from `tail -f`. '
+ 'Example usage: `tail -f sql.log | pygmentize -s -l sql`.')
+ flags.add_argument(
+ '-x', action='store_true',
+ help='Allow custom lexers and formatters to be loaded from a .py file '
+ 'relative to the current working directory. For example, '
+ '`-l ./customlexer.py -x`. By default, this option expects a file '
+ 'with a class named CustomLexer or CustomFormatter; you can also '
+ 'specify your own class name with a colon (`-l ./lexer.py:MyLexer`). '
+ 'Users should be very careful not to use this option with untrusted '
+ 'files, because it will import and run them.')
+ flags.add_argument('--json', help='Output as JSON. This can '
+ 'be only used in conjunction with -L.',
+ default=False,
+ action='store_true')
+
+ special_modes_group = parser.add_argument_group(
+ 'Special modes - do not do any highlighting')
+ special_modes = special_modes_group.add_mutually_exclusive_group()
+ special_modes.add_argument(
+ '-S', metavar='STYLE -f formatter',
+ help='Print style definitions for STYLE for a formatter '
+ 'given with -f. The argument given by -a is formatter '
+ 'dependent.')
+ special_modes.add_argument(
+ '-L', nargs='*', metavar='WHAT',
+ help='List lexers, formatters, styles or filters -- '
+ 'give additional arguments for the thing(s) you want to list '
+ '(e.g. "styles"), or omit them to list everything.')
+ special_modes.add_argument(
+ '-N', metavar='FILENAME',
+ help='Guess and print out a lexer name based solely on the given '
+ 'filename. Does not take input or highlight anything. If no specific '
+ 'lexer can be determined, "text" is printed.')
+ special_modes.add_argument(
+ '-C', action='store_true',
+ help='Like -N, but print out a lexer name based solely on '
+ 'a given content from standard input.')
+ special_modes.add_argument(
+ '-H', action='store', nargs=2, metavar=('NAME', 'TYPE'),
+ help='Print detailed help for the object <name> of type <type>, '
+ 'where <type> is one of "lexer", "formatter" or "filter".')
+ special_modes.add_argument(
+ '-V', action='store_true',
+ help='Print the package version.')
+ special_modes.add_argument(
+ '-h', '--help', action='store_true',
+ help='Print this help.')
+ special_modes_group.add_argument(
+ '-a', metavar='ARG',
+ help='Formatter-specific additional argument for the -S (print '
+ 'style sheet) mode.')
+
+ argns = parser.parse_args(args[1:])
+
+ try:
+ return main_inner(parser, argns)
+ except BrokenPipeError:
+ # someone closed our stdout, e.g. by quitting a pager.
+ return 0
+ except Exception:
+ if argns.v:
+ print(file=sys.stderr)
+ print('*' * 65, file=sys.stderr)
+ print('An unhandled exception occurred while highlighting.',
+ file=sys.stderr)
+ print('Please report the whole traceback to the issue tracker at',
+ file=sys.stderr)
+ print('<https://github.com/pygments/pygments/issues>.',
+ file=sys.stderr)
+ print('*' * 65, file=sys.stderr)
+ print(file=sys.stderr)
+ raise
+ import traceback
+ info = traceback.format_exception(*sys.exc_info())
+ msg = info[-1].strip()
+ if len(info) >= 3:
+ # extract relevant file and position info
+ msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:]
+ print(file=sys.stderr)
+ print('*** Error while highlighting:', file=sys.stderr)
+ print(msg, file=sys.stderr)
+ print('*** If this is a bug you want to report, please rerun with -v.',
+ file=sys.stderr)
+ return 1
diff --git a/third_party/python/pip/pip/_vendor/pygments/console.py b/third_party/python/pip/pip/_vendor/pygments/console.py
new file mode 100644
index 0000000000..2ada68e03b
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/console.py
@@ -0,0 +1,70 @@
+"""
+ pygments.console
+ ~~~~~~~~~~~~~~~~
+
+ Format colored console output.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+esc = "\x1b["
+
+codes = {}
+codes[""] = ""
+codes["reset"] = esc + "39;49;00m"
+
+codes["bold"] = esc + "01m"
+codes["faint"] = esc + "02m"
+codes["standout"] = esc + "03m"
+codes["underline"] = esc + "04m"
+codes["blink"] = esc + "05m"
+codes["overline"] = esc + "06m"
+
+dark_colors = ["black", "red", "green", "yellow", "blue",
+ "magenta", "cyan", "gray"]
+light_colors = ["brightblack", "brightred", "brightgreen", "brightyellow", "brightblue",
+ "brightmagenta", "brightcyan", "white"]
+
+x = 30
+for d, l in zip(dark_colors, light_colors):
+ codes[d] = esc + "%im" % x
+ codes[l] = esc + "%im" % (60 + x)
+ x += 1
+
+del d, l, x
+
+codes["white"] = codes["bold"]
+
+
+def reset_color():
+ return codes["reset"]
+
+
+def colorize(color_key, text):
+ return codes[color_key] + text + codes["reset"]
+
+
+def ansiformat(attr, text):
+ """
+ Format ``text`` with a color and/or some attributes::
+
+ color normal color
+ *color* bold color
+ _color_ underlined color
+ +color+ blinking color
+ """
+ result = []
+ if attr[:1] == attr[-1:] == '+':
+ result.append(codes['blink'])
+ attr = attr[1:-1]
+ if attr[:1] == attr[-1:] == '*':
+ result.append(codes['bold'])
+ attr = attr[1:-1]
+ if attr[:1] == attr[-1:] == '_':
+ result.append(codes['underline'])
+ attr = attr[1:-1]
+ result.append(codes[attr])
+ result.append(text)
+ result.append(codes['reset'])
+ return ''.join(result)
diff --git a/third_party/python/pip/pip/_vendor/pygments/filter.py b/third_party/python/pip/pip/_vendor/pygments/filter.py
new file mode 100644
index 0000000000..e5c9664938
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/filter.py
@@ -0,0 +1,71 @@
+"""
+ pygments.filter
+ ~~~~~~~~~~~~~~~
+
+ Module that implements the default filter.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+
+def apply_filters(stream, filters, lexer=None):
+ """
+ Use this method to apply an iterable of filters to
+ a stream. If lexer is given it's forwarded to the
+ filter, otherwise the filter receives `None`.
+ """
+ def _apply(filter_, stream):
+ yield from filter_.filter(lexer, stream)
+ for filter_ in filters:
+ stream = _apply(filter_, stream)
+ return stream
+
+
+def simplefilter(f):
+ """
+ Decorator that converts a function into a filter::
+
+ @simplefilter
+ def lowercase(self, lexer, stream, options):
+ for ttype, value in stream:
+ yield ttype, value.lower()
+ """
+ return type(f.__name__, (FunctionFilter,), {
+ '__module__': getattr(f, '__module__'),
+ '__doc__': f.__doc__,
+ 'function': f,
+ })
+
+
+class Filter:
+ """
+ Default filter. Subclass this class or use the `simplefilter`
+ decorator to create own filters.
+ """
+
+ def __init__(self, **options):
+ self.options = options
+
+ def filter(self, lexer, stream):
+ raise NotImplementedError()
+
+
+class FunctionFilter(Filter):
+ """
+ Abstract class used by `simplefilter` to create simple
+ function filters on the fly. The `simplefilter` decorator
+ automatically creates subclasses of this class for
+ functions passed to it.
+ """
+ function = None
+
+ def __init__(self, **options):
+ if not hasattr(self, 'function'):
+ raise TypeError('%r used without bound function' %
+ self.__class__.__name__)
+ Filter.__init__(self, **options)
+
+ def filter(self, lexer, stream):
+ # pylint: disable=not-callable
+ yield from self.function(lexer, stream, self.options)
diff --git a/third_party/python/pip/pip/_vendor/pygments/filters/__init__.py b/third_party/python/pip/pip/_vendor/pygments/filters/__init__.py
new file mode 100644
index 0000000000..c302a6c0c5
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/filters/__init__.py
@@ -0,0 +1,940 @@
+"""
+ pygments.filters
+ ~~~~~~~~~~~~~~~~
+
+ Module containing filter lookup functions and default
+ filters.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+from pip._vendor.pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
+ string_to_tokentype
+from pip._vendor.pygments.filter import Filter
+from pip._vendor.pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
+ get_choice_opt, ClassNotFound, OptionError
+from pip._vendor.pygments.plugin import find_plugin_filters
+
+
+def find_filter_class(filtername):
+ """Lookup a filter by name. Return None if not found."""
+ if filtername in FILTERS:
+ return FILTERS[filtername]
+ for name, cls in find_plugin_filters():
+ if name == filtername:
+ return cls
+ return None
+
+
+def get_filter_by_name(filtername, **options):
+ """Return an instantiated filter.
+
+ Options are passed to the filter initializer if wanted.
+ Raise a ClassNotFound if not found.
+ """
+ cls = find_filter_class(filtername)
+ if cls:
+ return cls(**options)
+ else:
+ raise ClassNotFound('filter %r not found' % filtername)
+
+
+def get_all_filters():
+ """Return a generator of all filter names."""
+ yield from FILTERS
+ for name, _ in find_plugin_filters():
+ yield name
+
+
+def _replace_special(ttype, value, regex, specialttype,
+ replacefunc=lambda x: x):
+ last = 0
+ for match in regex.finditer(value):
+ start, end = match.start(), match.end()
+ if start != last:
+ yield ttype, value[last:start]
+ yield specialttype, replacefunc(value[start:end])
+ last = end
+ if last != len(value):
+ yield ttype, value[last:]
+
+
+class CodeTagFilter(Filter):
+ """Highlight special code tags in comments and docstrings.
+
+ Options accepted:
+
+ `codetags` : list of strings
+ A list of strings that are flagged as code tags. The default is to
+ highlight ``XXX``, ``TODO``, ``FIXME``, ``BUG`` and ``NOTE``.
+
+ .. versionchanged:: 2.13
+ Now recognizes ``FIXME`` by default.
+ """
+
+ def __init__(self, **options):
+ Filter.__init__(self, **options)
+ tags = get_list_opt(options, 'codetags',
+ ['XXX', 'TODO', 'FIXME', 'BUG', 'NOTE'])
+ self.tag_re = re.compile(r'\b(%s)\b' % '|'.join([
+ re.escape(tag) for tag in tags if tag
+ ]))
+
+ def filter(self, lexer, stream):
+ regex = self.tag_re
+ for ttype, value in stream:
+ if ttype in String.Doc or \
+ ttype in Comment and \
+ ttype not in Comment.Preproc:
+ yield from _replace_special(ttype, value, regex, Comment.Special)
+ else:
+ yield ttype, value
+
+
+class SymbolFilter(Filter):
+ """Convert mathematical symbols such as \\<longrightarrow> in Isabelle
+ or \\longrightarrow in LaTeX into Unicode characters.
+
+ This is mostly useful for HTML or console output when you want to
+ approximate the source rendering you'd see in an IDE.
+
+ Options accepted:
+
+ `lang` : string
+ The symbol language. Must be one of ``'isabelle'`` or
+ ``'latex'``. The default is ``'isabelle'``.
+ """
+
+ latex_symbols = {
+ '\\alpha' : '\U000003b1',
+ '\\beta' : '\U000003b2',
+ '\\gamma' : '\U000003b3',
+ '\\delta' : '\U000003b4',
+ '\\varepsilon' : '\U000003b5',
+ '\\zeta' : '\U000003b6',
+ '\\eta' : '\U000003b7',
+ '\\vartheta' : '\U000003b8',
+ '\\iota' : '\U000003b9',
+ '\\kappa' : '\U000003ba',
+ '\\lambda' : '\U000003bb',
+ '\\mu' : '\U000003bc',
+ '\\nu' : '\U000003bd',
+ '\\xi' : '\U000003be',
+ '\\pi' : '\U000003c0',
+ '\\varrho' : '\U000003c1',
+ '\\sigma' : '\U000003c3',
+ '\\tau' : '\U000003c4',
+ '\\upsilon' : '\U000003c5',
+ '\\varphi' : '\U000003c6',
+ '\\chi' : '\U000003c7',
+ '\\psi' : '\U000003c8',
+ '\\omega' : '\U000003c9',
+ '\\Gamma' : '\U00000393',
+ '\\Delta' : '\U00000394',
+ '\\Theta' : '\U00000398',
+ '\\Lambda' : '\U0000039b',
+ '\\Xi' : '\U0000039e',
+ '\\Pi' : '\U000003a0',
+ '\\Sigma' : '\U000003a3',
+ '\\Upsilon' : '\U000003a5',
+ '\\Phi' : '\U000003a6',
+ '\\Psi' : '\U000003a8',
+ '\\Omega' : '\U000003a9',
+ '\\leftarrow' : '\U00002190',
+ '\\longleftarrow' : '\U000027f5',
+ '\\rightarrow' : '\U00002192',
+ '\\longrightarrow' : '\U000027f6',
+ '\\Leftarrow' : '\U000021d0',
+ '\\Longleftarrow' : '\U000027f8',
+ '\\Rightarrow' : '\U000021d2',
+ '\\Longrightarrow' : '\U000027f9',
+ '\\leftrightarrow' : '\U00002194',
+ '\\longleftrightarrow' : '\U000027f7',
+ '\\Leftrightarrow' : '\U000021d4',
+ '\\Longleftrightarrow' : '\U000027fa',
+ '\\mapsto' : '\U000021a6',
+ '\\longmapsto' : '\U000027fc',
+ '\\relbar' : '\U00002500',
+ '\\Relbar' : '\U00002550',
+ '\\hookleftarrow' : '\U000021a9',
+ '\\hookrightarrow' : '\U000021aa',
+ '\\leftharpoondown' : '\U000021bd',
+ '\\rightharpoondown' : '\U000021c1',
+ '\\leftharpoonup' : '\U000021bc',
+ '\\rightharpoonup' : '\U000021c0',
+ '\\rightleftharpoons' : '\U000021cc',
+ '\\leadsto' : '\U0000219d',
+ '\\downharpoonleft' : '\U000021c3',
+ '\\downharpoonright' : '\U000021c2',
+ '\\upharpoonleft' : '\U000021bf',
+ '\\upharpoonright' : '\U000021be',
+ '\\restriction' : '\U000021be',
+ '\\uparrow' : '\U00002191',
+ '\\Uparrow' : '\U000021d1',
+ '\\downarrow' : '\U00002193',
+ '\\Downarrow' : '\U000021d3',
+ '\\updownarrow' : '\U00002195',
+ '\\Updownarrow' : '\U000021d5',
+ '\\langle' : '\U000027e8',
+ '\\rangle' : '\U000027e9',
+ '\\lceil' : '\U00002308',
+ '\\rceil' : '\U00002309',
+ '\\lfloor' : '\U0000230a',
+ '\\rfloor' : '\U0000230b',
+ '\\flqq' : '\U000000ab',
+ '\\frqq' : '\U000000bb',
+ '\\bot' : '\U000022a5',
+ '\\top' : '\U000022a4',
+ '\\wedge' : '\U00002227',
+ '\\bigwedge' : '\U000022c0',
+ '\\vee' : '\U00002228',
+ '\\bigvee' : '\U000022c1',
+ '\\forall' : '\U00002200',
+ '\\exists' : '\U00002203',
+ '\\nexists' : '\U00002204',
+ '\\neg' : '\U000000ac',
+ '\\Box' : '\U000025a1',
+ '\\Diamond' : '\U000025c7',
+ '\\vdash' : '\U000022a2',
+ '\\models' : '\U000022a8',
+ '\\dashv' : '\U000022a3',
+ '\\surd' : '\U0000221a',
+ '\\le' : '\U00002264',
+ '\\ge' : '\U00002265',
+ '\\ll' : '\U0000226a',
+ '\\gg' : '\U0000226b',
+ '\\lesssim' : '\U00002272',
+ '\\gtrsim' : '\U00002273',
+ '\\lessapprox' : '\U00002a85',
+ '\\gtrapprox' : '\U00002a86',
+ '\\in' : '\U00002208',
+ '\\notin' : '\U00002209',
+ '\\subset' : '\U00002282',
+ '\\supset' : '\U00002283',
+ '\\subseteq' : '\U00002286',
+ '\\supseteq' : '\U00002287',
+ '\\sqsubset' : '\U0000228f',
+ '\\sqsupset' : '\U00002290',
+ '\\sqsubseteq' : '\U00002291',
+ '\\sqsupseteq' : '\U00002292',
+ '\\cap' : '\U00002229',
+ '\\bigcap' : '\U000022c2',
+ '\\cup' : '\U0000222a',
+ '\\bigcup' : '\U000022c3',
+ '\\sqcup' : '\U00002294',
+ '\\bigsqcup' : '\U00002a06',
+ '\\sqcap' : '\U00002293',
+ '\\Bigsqcap' : '\U00002a05',
+ '\\setminus' : '\U00002216',
+ '\\propto' : '\U0000221d',
+ '\\uplus' : '\U0000228e',
+ '\\bigplus' : '\U00002a04',
+ '\\sim' : '\U0000223c',
+ '\\doteq' : '\U00002250',
+ '\\simeq' : '\U00002243',
+ '\\approx' : '\U00002248',
+ '\\asymp' : '\U0000224d',
+ '\\cong' : '\U00002245',
+ '\\equiv' : '\U00002261',
+ '\\Join' : '\U000022c8',
+ '\\bowtie' : '\U00002a1d',
+ '\\prec' : '\U0000227a',
+ '\\succ' : '\U0000227b',
+ '\\preceq' : '\U0000227c',
+ '\\succeq' : '\U0000227d',
+ '\\parallel' : '\U00002225',
+ '\\mid' : '\U000000a6',
+ '\\pm' : '\U000000b1',
+ '\\mp' : '\U00002213',
+ '\\times' : '\U000000d7',
+ '\\div' : '\U000000f7',
+ '\\cdot' : '\U000022c5',
+ '\\star' : '\U000022c6',
+ '\\circ' : '\U00002218',
+ '\\dagger' : '\U00002020',
+ '\\ddagger' : '\U00002021',
+ '\\lhd' : '\U000022b2',
+ '\\rhd' : '\U000022b3',
+ '\\unlhd' : '\U000022b4',
+ '\\unrhd' : '\U000022b5',
+ '\\triangleleft' : '\U000025c3',
+ '\\triangleright' : '\U000025b9',
+ '\\triangle' : '\U000025b3',
+ '\\triangleq' : '\U0000225c',
+ '\\oplus' : '\U00002295',
+ '\\bigoplus' : '\U00002a01',
+ '\\otimes' : '\U00002297',
+ '\\bigotimes' : '\U00002a02',
+ '\\odot' : '\U00002299',
+ '\\bigodot' : '\U00002a00',
+ '\\ominus' : '\U00002296',
+ '\\oslash' : '\U00002298',
+ '\\dots' : '\U00002026',
+ '\\cdots' : '\U000022ef',
+ '\\sum' : '\U00002211',
+ '\\prod' : '\U0000220f',
+ '\\coprod' : '\U00002210',
+ '\\infty' : '\U0000221e',
+ '\\int' : '\U0000222b',
+ '\\oint' : '\U0000222e',
+ '\\clubsuit' : '\U00002663',
+ '\\diamondsuit' : '\U00002662',
+ '\\heartsuit' : '\U00002661',
+ '\\spadesuit' : '\U00002660',
+ '\\aleph' : '\U00002135',
+ '\\emptyset' : '\U00002205',
+ '\\nabla' : '\U00002207',
+ '\\partial' : '\U00002202',
+ '\\flat' : '\U0000266d',
+ '\\natural' : '\U0000266e',
+ '\\sharp' : '\U0000266f',
+ '\\angle' : '\U00002220',
+ '\\copyright' : '\U000000a9',
+ '\\textregistered' : '\U000000ae',
+ '\\textonequarter' : '\U000000bc',
+ '\\textonehalf' : '\U000000bd',
+ '\\textthreequarters' : '\U000000be',
+ '\\textordfeminine' : '\U000000aa',
+ '\\textordmasculine' : '\U000000ba',
+ '\\euro' : '\U000020ac',
+ '\\pounds' : '\U000000a3',
+ '\\yen' : '\U000000a5',
+ '\\textcent' : '\U000000a2',
+ '\\textcurrency' : '\U000000a4',
+ '\\textdegree' : '\U000000b0',
+ }
+
+ isabelle_symbols = {
+ '\\<zero>' : '\U0001d7ec',
+ '\\<one>' : '\U0001d7ed',
+ '\\<two>' : '\U0001d7ee',
+ '\\<three>' : '\U0001d7ef',
+ '\\<four>' : '\U0001d7f0',
+ '\\<five>' : '\U0001d7f1',
+ '\\<six>' : '\U0001d7f2',
+ '\\<seven>' : '\U0001d7f3',
+ '\\<eight>' : '\U0001d7f4',
+ '\\<nine>' : '\U0001d7f5',
+ '\\<A>' : '\U0001d49c',
+ '\\<B>' : '\U0000212c',
+ '\\<C>' : '\U0001d49e',
+ '\\<D>' : '\U0001d49f',
+ '\\<E>' : '\U00002130',
+ '\\<F>' : '\U00002131',
+ '\\<G>' : '\U0001d4a2',
+ '\\<H>' : '\U0000210b',
+ '\\<I>' : '\U00002110',
+ '\\<J>' : '\U0001d4a5',
+ '\\<K>' : '\U0001d4a6',
+ '\\<L>' : '\U00002112',
+ '\\<M>' : '\U00002133',
+ '\\<N>' : '\U0001d4a9',
+ '\\<O>' : '\U0001d4aa',
+ '\\<P>' : '\U0001d4ab',
+ '\\<Q>' : '\U0001d4ac',
+ '\\<R>' : '\U0000211b',
+ '\\<S>' : '\U0001d4ae',
+ '\\<T>' : '\U0001d4af',
+ '\\<U>' : '\U0001d4b0',
+ '\\<V>' : '\U0001d4b1',
+ '\\<W>' : '\U0001d4b2',
+ '\\<X>' : '\U0001d4b3',
+ '\\<Y>' : '\U0001d4b4',
+ '\\<Z>' : '\U0001d4b5',
+ '\\<a>' : '\U0001d5ba',
+ '\\<b>' : '\U0001d5bb',
+ '\\<c>' : '\U0001d5bc',
+ '\\<d>' : '\U0001d5bd',
+ '\\<e>' : '\U0001d5be',
+ '\\<f>' : '\U0001d5bf',
+ '\\<g>' : '\U0001d5c0',
+ '\\<h>' : '\U0001d5c1',
+ '\\<i>' : '\U0001d5c2',
+ '\\<j>' : '\U0001d5c3',
+ '\\<k>' : '\U0001d5c4',
+ '\\<l>' : '\U0001d5c5',
+ '\\<m>' : '\U0001d5c6',
+ '\\<n>' : '\U0001d5c7',
+ '\\<o>' : '\U0001d5c8',
+ '\\<p>' : '\U0001d5c9',
+ '\\<q>' : '\U0001d5ca',
+ '\\<r>' : '\U0001d5cb',
+ '\\<s>' : '\U0001d5cc',
+ '\\<t>' : '\U0001d5cd',
+ '\\<u>' : '\U0001d5ce',
+ '\\<v>' : '\U0001d5cf',
+ '\\<w>' : '\U0001d5d0',
+ '\\<x>' : '\U0001d5d1',
+ '\\<y>' : '\U0001d5d2',
+ '\\<z>' : '\U0001d5d3',
+ '\\<AA>' : '\U0001d504',
+ '\\<BB>' : '\U0001d505',
+ '\\<CC>' : '\U0000212d',
+ '\\<DD>' : '\U0001d507',
+ '\\<EE>' : '\U0001d508',
+ '\\<FF>' : '\U0001d509',
+ '\\<GG>' : '\U0001d50a',
+ '\\<HH>' : '\U0000210c',
+ '\\<II>' : '\U00002111',
+ '\\<JJ>' : '\U0001d50d',
+ '\\<KK>' : '\U0001d50e',
+ '\\<LL>' : '\U0001d50f',
+ '\\<MM>' : '\U0001d510',
+ '\\<NN>' : '\U0001d511',
+ '\\<OO>' : '\U0001d512',
+ '\\<PP>' : '\U0001d513',
+ '\\<QQ>' : '\U0001d514',
+ '\\<RR>' : '\U0000211c',
+ '\\<SS>' : '\U0001d516',
+ '\\<TT>' : '\U0001d517',
+ '\\<UU>' : '\U0001d518',
+ '\\<VV>' : '\U0001d519',
+ '\\<WW>' : '\U0001d51a',
+ '\\<XX>' : '\U0001d51b',
+ '\\<YY>' : '\U0001d51c',
+ '\\<ZZ>' : '\U00002128',
+ '\\<aa>' : '\U0001d51e',
+ '\\<bb>' : '\U0001d51f',
+ '\\<cc>' : '\U0001d520',
+ '\\<dd>' : '\U0001d521',
+ '\\<ee>' : '\U0001d522',
+ '\\<ff>' : '\U0001d523',
+ '\\<gg>' : '\U0001d524',
+ '\\<hh>' : '\U0001d525',
+ '\\<ii>' : '\U0001d526',
+ '\\<jj>' : '\U0001d527',
+ '\\<kk>' : '\U0001d528',
+ '\\<ll>' : '\U0001d529',
+ '\\<mm>' : '\U0001d52a',
+ '\\<nn>' : '\U0001d52b',
+ '\\<oo>' : '\U0001d52c',
+ '\\<pp>' : '\U0001d52d',
+ '\\<qq>' : '\U0001d52e',
+ '\\<rr>' : '\U0001d52f',
+ '\\<ss>' : '\U0001d530',
+ '\\<tt>' : '\U0001d531',
+ '\\<uu>' : '\U0001d532',
+ '\\<vv>' : '\U0001d533',
+ '\\<ww>' : '\U0001d534',
+ '\\<xx>' : '\U0001d535',
+ '\\<yy>' : '\U0001d536',
+ '\\<zz>' : '\U0001d537',
+ '\\<alpha>' : '\U000003b1',
+ '\\<beta>' : '\U000003b2',
+ '\\<gamma>' : '\U000003b3',
+ '\\<delta>' : '\U000003b4',
+ '\\<epsilon>' : '\U000003b5',
+ '\\<zeta>' : '\U000003b6',
+ '\\<eta>' : '\U000003b7',
+ '\\<theta>' : '\U000003b8',
+ '\\<iota>' : '\U000003b9',
+ '\\<kappa>' : '\U000003ba',
+ '\\<lambda>' : '\U000003bb',
+ '\\<mu>' : '\U000003bc',
+ '\\<nu>' : '\U000003bd',
+ '\\<xi>' : '\U000003be',
+ '\\<pi>' : '\U000003c0',
+ '\\<rho>' : '\U000003c1',
+ '\\<sigma>' : '\U000003c3',
+ '\\<tau>' : '\U000003c4',
+ '\\<upsilon>' : '\U000003c5',
+ '\\<phi>' : '\U000003c6',
+ '\\<chi>' : '\U000003c7',
+ '\\<psi>' : '\U000003c8',
+ '\\<omega>' : '\U000003c9',
+ '\\<Gamma>' : '\U00000393',
+ '\\<Delta>' : '\U00000394',
+ '\\<Theta>' : '\U00000398',
+ '\\<Lambda>' : '\U0000039b',
+ '\\<Xi>' : '\U0000039e',
+ '\\<Pi>' : '\U000003a0',
+ '\\<Sigma>' : '\U000003a3',
+ '\\<Upsilon>' : '\U000003a5',
+ '\\<Phi>' : '\U000003a6',
+ '\\<Psi>' : '\U000003a8',
+ '\\<Omega>' : '\U000003a9',
+ '\\<bool>' : '\U0001d539',
+ '\\<complex>' : '\U00002102',
+ '\\<nat>' : '\U00002115',
+ '\\<rat>' : '\U0000211a',
+ '\\<real>' : '\U0000211d',
+ '\\<int>' : '\U00002124',
+ '\\<leftarrow>' : '\U00002190',
+ '\\<longleftarrow>' : '\U000027f5',
+ '\\<rightarrow>' : '\U00002192',
+ '\\<longrightarrow>' : '\U000027f6',
+ '\\<Leftarrow>' : '\U000021d0',
+ '\\<Longleftarrow>' : '\U000027f8',
+ '\\<Rightarrow>' : '\U000021d2',
+ '\\<Longrightarrow>' : '\U000027f9',
+ '\\<leftrightarrow>' : '\U00002194',
+ '\\<longleftrightarrow>' : '\U000027f7',
+ '\\<Leftrightarrow>' : '\U000021d4',
+ '\\<Longleftrightarrow>' : '\U000027fa',
+ '\\<mapsto>' : '\U000021a6',
+ '\\<longmapsto>' : '\U000027fc',
+ '\\<midarrow>' : '\U00002500',
+ '\\<Midarrow>' : '\U00002550',
+ '\\<hookleftarrow>' : '\U000021a9',
+ '\\<hookrightarrow>' : '\U000021aa',
+ '\\<leftharpoondown>' : '\U000021bd',
+ '\\<rightharpoondown>' : '\U000021c1',
+ '\\<leftharpoonup>' : '\U000021bc',
+ '\\<rightharpoonup>' : '\U000021c0',
+ '\\<rightleftharpoons>' : '\U000021cc',
+ '\\<leadsto>' : '\U0000219d',
+ '\\<downharpoonleft>' : '\U000021c3',
+ '\\<downharpoonright>' : '\U000021c2',
+ '\\<upharpoonleft>' : '\U000021bf',
+ '\\<upharpoonright>' : '\U000021be',
+ '\\<restriction>' : '\U000021be',
+ '\\<Colon>' : '\U00002237',
+ '\\<up>' : '\U00002191',
+ '\\<Up>' : '\U000021d1',
+ '\\<down>' : '\U00002193',
+ '\\<Down>' : '\U000021d3',
+ '\\<updown>' : '\U00002195',
+ '\\<Updown>' : '\U000021d5',
+ '\\<langle>' : '\U000027e8',
+ '\\<rangle>' : '\U000027e9',
+ '\\<lceil>' : '\U00002308',
+ '\\<rceil>' : '\U00002309',
+ '\\<lfloor>' : '\U0000230a',
+ '\\<rfloor>' : '\U0000230b',
+ '\\<lparr>' : '\U00002987',
+ '\\<rparr>' : '\U00002988',
+ '\\<lbrakk>' : '\U000027e6',
+ '\\<rbrakk>' : '\U000027e7',
+ '\\<lbrace>' : '\U00002983',
+ '\\<rbrace>' : '\U00002984',
+ '\\<guillemotleft>' : '\U000000ab',
+ '\\<guillemotright>' : '\U000000bb',
+ '\\<bottom>' : '\U000022a5',
+ '\\<top>' : '\U000022a4',
+ '\\<and>' : '\U00002227',
+ '\\<And>' : '\U000022c0',
+ '\\<or>' : '\U00002228',
+ '\\<Or>' : '\U000022c1',
+ '\\<forall>' : '\U00002200',
+ '\\<exists>' : '\U00002203',
+ '\\<nexists>' : '\U00002204',
+ '\\<not>' : '\U000000ac',
+ '\\<box>' : '\U000025a1',
+ '\\<diamond>' : '\U000025c7',
+ '\\<turnstile>' : '\U000022a2',
+ '\\<Turnstile>' : '\U000022a8',
+ '\\<tturnstile>' : '\U000022a9',
+ '\\<TTurnstile>' : '\U000022ab',
+ '\\<stileturn>' : '\U000022a3',
+ '\\<surd>' : '\U0000221a',
+ '\\<le>' : '\U00002264',
+ '\\<ge>' : '\U00002265',
+ '\\<lless>' : '\U0000226a',
+ '\\<ggreater>' : '\U0000226b',
+ '\\<lesssim>' : '\U00002272',
+ '\\<greatersim>' : '\U00002273',
+ '\\<lessapprox>' : '\U00002a85',
+ '\\<greaterapprox>' : '\U00002a86',
+ '\\<in>' : '\U00002208',
+ '\\<notin>' : '\U00002209',
+ '\\<subset>' : '\U00002282',
+ '\\<supset>' : '\U00002283',
+ '\\<subseteq>' : '\U00002286',
+ '\\<supseteq>' : '\U00002287',
+ '\\<sqsubset>' : '\U0000228f',
+ '\\<sqsupset>' : '\U00002290',
+ '\\<sqsubseteq>' : '\U00002291',
+ '\\<sqsupseteq>' : '\U00002292',
+ '\\<inter>' : '\U00002229',
+ '\\<Inter>' : '\U000022c2',
+ '\\<union>' : '\U0000222a',
+ '\\<Union>' : '\U000022c3',
+ '\\<squnion>' : '\U00002294',
+ '\\<Squnion>' : '\U00002a06',
+ '\\<sqinter>' : '\U00002293',
+ '\\<Sqinter>' : '\U00002a05',
+ '\\<setminus>' : '\U00002216',
+ '\\<propto>' : '\U0000221d',
+ '\\<uplus>' : '\U0000228e',
+ '\\<Uplus>' : '\U00002a04',
+ '\\<noteq>' : '\U00002260',
+ '\\<sim>' : '\U0000223c',
+ '\\<doteq>' : '\U00002250',
+ '\\<simeq>' : '\U00002243',
+ '\\<approx>' : '\U00002248',
+ '\\<asymp>' : '\U0000224d',
+ '\\<cong>' : '\U00002245',
+ '\\<smile>' : '\U00002323',
+ '\\<equiv>' : '\U00002261',
+ '\\<frown>' : '\U00002322',
+ '\\<Join>' : '\U000022c8',
+ '\\<bowtie>' : '\U00002a1d',
+ '\\<prec>' : '\U0000227a',
+ '\\<succ>' : '\U0000227b',
+ '\\<preceq>' : '\U0000227c',
+ '\\<succeq>' : '\U0000227d',
+ '\\<parallel>' : '\U00002225',
+ '\\<bar>' : '\U000000a6',
+ '\\<plusminus>' : '\U000000b1',
+ '\\<minusplus>' : '\U00002213',
+ '\\<times>' : '\U000000d7',
+ '\\<div>' : '\U000000f7',
+ '\\<cdot>' : '\U000022c5',
+ '\\<star>' : '\U000022c6',
+ '\\<bullet>' : '\U00002219',
+ '\\<circ>' : '\U00002218',
+ '\\<dagger>' : '\U00002020',
+ '\\<ddagger>' : '\U00002021',
+ '\\<lhd>' : '\U000022b2',
+ '\\<rhd>' : '\U000022b3',
+ '\\<unlhd>' : '\U000022b4',
+ '\\<unrhd>' : '\U000022b5',
+ '\\<triangleleft>' : '\U000025c3',
+ '\\<triangleright>' : '\U000025b9',
+ '\\<triangle>' : '\U000025b3',
+ '\\<triangleq>' : '\U0000225c',
+ '\\<oplus>' : '\U00002295',
+ '\\<Oplus>' : '\U00002a01',
+ '\\<otimes>' : '\U00002297',
+ '\\<Otimes>' : '\U00002a02',
+ '\\<odot>' : '\U00002299',
+ '\\<Odot>' : '\U00002a00',
+ '\\<ominus>' : '\U00002296',
+ '\\<oslash>' : '\U00002298',
+ '\\<dots>' : '\U00002026',
+ '\\<cdots>' : '\U000022ef',
+ '\\<Sum>' : '\U00002211',
+ '\\<Prod>' : '\U0000220f',
+ '\\<Coprod>' : '\U00002210',
+ '\\<infinity>' : '\U0000221e',
+ '\\<integral>' : '\U0000222b',
+ '\\<ointegral>' : '\U0000222e',
+ '\\<clubsuit>' : '\U00002663',
+ '\\<diamondsuit>' : '\U00002662',
+ '\\<heartsuit>' : '\U00002661',
+ '\\<spadesuit>' : '\U00002660',
+ '\\<aleph>' : '\U00002135',
+ '\\<emptyset>' : '\U00002205',
+ '\\<nabla>' : '\U00002207',
+ '\\<partial>' : '\U00002202',
+ '\\<flat>' : '\U0000266d',
+ '\\<natural>' : '\U0000266e',
+ '\\<sharp>' : '\U0000266f',
+ '\\<angle>' : '\U00002220',
+ '\\<copyright>' : '\U000000a9',
+ '\\<registered>' : '\U000000ae',
+ '\\<hyphen>' : '\U000000ad',
+ '\\<inverse>' : '\U000000af',
+ '\\<onequarter>' : '\U000000bc',
+ '\\<onehalf>' : '\U000000bd',
+ '\\<threequarters>' : '\U000000be',
+ '\\<ordfeminine>' : '\U000000aa',
+ '\\<ordmasculine>' : '\U000000ba',
+ '\\<section>' : '\U000000a7',
+ '\\<paragraph>' : '\U000000b6',
+ '\\<exclamdown>' : '\U000000a1',
+ '\\<questiondown>' : '\U000000bf',
+ '\\<euro>' : '\U000020ac',
+ '\\<pounds>' : '\U000000a3',
+ '\\<yen>' : '\U000000a5',
+ '\\<cent>' : '\U000000a2',
+ '\\<currency>' : '\U000000a4',
+ '\\<degree>' : '\U000000b0',
+ '\\<amalg>' : '\U00002a3f',
+ '\\<mho>' : '\U00002127',
+ '\\<lozenge>' : '\U000025ca',
+ '\\<wp>' : '\U00002118',
+ '\\<wrong>' : '\U00002240',
+ '\\<struct>' : '\U000022c4',
+ '\\<acute>' : '\U000000b4',
+ '\\<index>' : '\U00000131',
+ '\\<dieresis>' : '\U000000a8',
+ '\\<cedilla>' : '\U000000b8',
+ '\\<hungarumlaut>' : '\U000002dd',
+ '\\<some>' : '\U000003f5',
+ '\\<newline>' : '\U000023ce',
+ '\\<open>' : '\U00002039',
+ '\\<close>' : '\U0000203a',
+ '\\<here>' : '\U00002302',
+ '\\<^sub>' : '\U000021e9',
+ '\\<^sup>' : '\U000021e7',
+ '\\<^bold>' : '\U00002759',
+ '\\<^bsub>' : '\U000021d8',
+ '\\<^esub>' : '\U000021d9',
+ '\\<^bsup>' : '\U000021d7',
+ '\\<^esup>' : '\U000021d6',
+ }
+
+ lang_map = {'isabelle' : isabelle_symbols, 'latex' : latex_symbols}
+
+ def __init__(self, **options):
+ Filter.__init__(self, **options)
+ lang = get_choice_opt(options, 'lang',
+ ['isabelle', 'latex'], 'isabelle')
+ self.symbols = self.lang_map[lang]
+
+ def filter(self, lexer, stream):
+ for ttype, value in stream:
+ if value in self.symbols:
+ yield ttype, self.symbols[value]
+ else:
+ yield ttype, value
+
+
+class KeywordCaseFilter(Filter):
+ """Convert keywords to lowercase or uppercase or capitalize them, which
+ means first letter uppercase, rest lowercase.
+
+ This can be useful e.g. if you highlight Pascal code and want to adapt the
+ code to your styleguide.
+
+ Options accepted:
+
+ `case` : string
+ The casing to convert keywords to. Must be one of ``'lower'``,
+ ``'upper'`` or ``'capitalize'``. The default is ``'lower'``.
+ """
+
+ def __init__(self, **options):
+ Filter.__init__(self, **options)
+ case = get_choice_opt(options, 'case',
+ ['lower', 'upper', 'capitalize'], 'lower')
+ self.convert = getattr(str, case)
+
+ def filter(self, lexer, stream):
+ for ttype, value in stream:
+ if ttype in Keyword:
+ yield ttype, self.convert(value)
+ else:
+ yield ttype, value
+
+
+class NameHighlightFilter(Filter):
+ """Highlight a normal Name (and Name.*) token with a different token type.
+
+ Example::
+
+ filter = NameHighlightFilter(
+ names=['foo', 'bar', 'baz'],
+ tokentype=Name.Function,
+ )
+
+ This would highlight the names "foo", "bar" and "baz"
+ as functions. `Name.Function` is the default token type.
+
+ Options accepted:
+
+ `names` : list of strings
+ A list of names that should be given the different token type.
+ There is no default.
+ `tokentype` : TokenType or string
+ A token type or a string containing a token type name that is
+ used for highlighting the strings in `names`. The default is
+ `Name.Function`.
+ """
+
+ def __init__(self, **options):
+ Filter.__init__(self, **options)
+ self.names = set(get_list_opt(options, 'names', []))
+ tokentype = options.get('tokentype')
+ if tokentype:
+ self.tokentype = string_to_tokentype(tokentype)
+ else:
+ self.tokentype = Name.Function
+
+ def filter(self, lexer, stream):
+ for ttype, value in stream:
+ if ttype in Name and value in self.names:
+ yield self.tokentype, value
+ else:
+ yield ttype, value
+
+
+class ErrorToken(Exception):
+ pass
+
+
+class RaiseOnErrorTokenFilter(Filter):
+ """Raise an exception when the lexer generates an error token.
+
+ Options accepted:
+
+ `excclass` : Exception class
+ The exception class to raise.
+ The default is `pygments.filters.ErrorToken`.
+
+ .. versionadded:: 0.8
+ """
+
+ def __init__(self, **options):
+ Filter.__init__(self, **options)
+ self.exception = options.get('excclass', ErrorToken)
+ try:
+ # issubclass() will raise TypeError if first argument is not a class
+ if not issubclass(self.exception, Exception):
+ raise TypeError
+ except TypeError:
+ raise OptionError('excclass option is not an exception class')
+
+ def filter(self, lexer, stream):
+ for ttype, value in stream:
+ if ttype is Error:
+ raise self.exception(value)
+ yield ttype, value
+
+
+class VisibleWhitespaceFilter(Filter):
+ """Convert tabs, newlines and/or spaces to visible characters.
+
+ Options accepted:
+
+ `spaces` : string or bool
+ If this is a one-character string, spaces will be replaces by this string.
+ If it is another true value, spaces will be replaced by ``·`` (unicode
+ MIDDLE DOT). If it is a false value, spaces will not be replaced. The
+ default is ``False``.
+ `tabs` : string or bool
+ The same as for `spaces`, but the default replacement character is ``»``
+ (unicode RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK). The default value
+ is ``False``. Note: this will not work if the `tabsize` option for the
+ lexer is nonzero, as tabs will already have been expanded then.
+ `tabsize` : int
+ If tabs are to be replaced by this filter (see the `tabs` option), this
+ is the total number of characters that a tab should be expanded to.
+ The default is ``8``.
+ `newlines` : string or bool
+ The same as for `spaces`, but the default replacement character is ``¶``
+ (unicode PILCROW SIGN). The default value is ``False``.
+ `wstokentype` : bool
+ If true, give whitespace the special `Whitespace` token type. This allows
+ styling the visible whitespace differently (e.g. greyed out), but it can
+ disrupt background colors. The default is ``True``.
+
+ .. versionadded:: 0.8
+ """
+
+ def __init__(self, **options):
+ Filter.__init__(self, **options)
+ for name, default in [('spaces', '·'),
+ ('tabs', '»'),
+ ('newlines', '¶')]:
+ opt = options.get(name, False)
+ if isinstance(opt, str) and len(opt) == 1:
+ setattr(self, name, opt)
+ else:
+ setattr(self, name, (opt and default or ''))
+ tabsize = get_int_opt(options, 'tabsize', 8)
+ if self.tabs:
+ self.tabs += ' ' * (tabsize - 1)
+ if self.newlines:
+ self.newlines += '\n'
+ self.wstt = get_bool_opt(options, 'wstokentype', True)
+
+ def filter(self, lexer, stream):
+ if self.wstt:
+ spaces = self.spaces or ' '
+ tabs = self.tabs or '\t'
+ newlines = self.newlines or '\n'
+ regex = re.compile(r'\s')
+
+ def replacefunc(wschar):
+ if wschar == ' ':
+ return spaces
+ elif wschar == '\t':
+ return tabs
+ elif wschar == '\n':
+ return newlines
+ return wschar
+
+ for ttype, value in stream:
+ yield from _replace_special(ttype, value, regex, Whitespace,
+ replacefunc)
+ else:
+ spaces, tabs, newlines = self.spaces, self.tabs, self.newlines
+ # simpler processing
+ for ttype, value in stream:
+ if spaces:
+ value = value.replace(' ', spaces)
+ if tabs:
+ value = value.replace('\t', tabs)
+ if newlines:
+ value = value.replace('\n', newlines)
+ yield ttype, value
+
+
+class GobbleFilter(Filter):
+ """Gobbles source code lines (eats initial characters).
+
+ This filter drops the first ``n`` characters off every line of code. This
+ may be useful when the source code fed to the lexer is indented by a fixed
+ amount of space that isn't desired in the output.
+
+ Options accepted:
+
+ `n` : int
+ The number of characters to gobble.
+
+ .. versionadded:: 1.2
+ """
+ def __init__(self, **options):
+ Filter.__init__(self, **options)
+ self.n = get_int_opt(options, 'n', 0)
+
+ def gobble(self, value, left):
+ if left < len(value):
+ return value[left:], 0
+ else:
+ return '', left - len(value)
+
+ def filter(self, lexer, stream):
+ n = self.n
+ left = n # How many characters left to gobble.
+ for ttype, value in stream:
+ # Remove ``left`` tokens from first line, ``n`` from all others.
+ parts = value.split('\n')
+ (parts[0], left) = self.gobble(parts[0], left)
+ for i in range(1, len(parts)):
+ (parts[i], left) = self.gobble(parts[i], n)
+ value = '\n'.join(parts)
+
+ if value != '':
+ yield ttype, value
+
+
+class TokenMergeFilter(Filter):
+ """Merges consecutive tokens with the same token type in the output
+ stream of a lexer.
+
+ .. versionadded:: 1.2
+ """
+ def __init__(self, **options):
+ Filter.__init__(self, **options)
+
+ def filter(self, lexer, stream):
+ current_type = None
+ current_value = None
+ for ttype, value in stream:
+ if ttype is current_type:
+ current_value += value
+ else:
+ if current_type is not None:
+ yield current_type, current_value
+ current_type = ttype
+ current_value = value
+ if current_type is not None:
+ yield current_type, current_value
+
+
+FILTERS = {
+ 'codetagify': CodeTagFilter,
+ 'keywordcase': KeywordCaseFilter,
+ 'highlight': NameHighlightFilter,
+ 'raiseonerror': RaiseOnErrorTokenFilter,
+ 'whitespace': VisibleWhitespaceFilter,
+ 'gobble': GobbleFilter,
+ 'tokenmerge': TokenMergeFilter,
+ 'symbols': SymbolFilter,
+}
diff --git a/third_party/python/pip/pip/_vendor/pygments/formatter.py b/third_party/python/pip/pip/_vendor/pygments/formatter.py
new file mode 100644
index 0000000000..a2349ef865
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/formatter.py
@@ -0,0 +1,94 @@
+"""
+ pygments.formatter
+ ~~~~~~~~~~~~~~~~~~
+
+ Base formatter class.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import codecs
+
+from pip._vendor.pygments.util import get_bool_opt
+from pip._vendor.pygments.styles import get_style_by_name
+
+__all__ = ['Formatter']
+
+
+def _lookup_style(style):
+ if isinstance(style, str):
+ return get_style_by_name(style)
+ return style
+
+
+class Formatter:
+ """
+ Converts a token stream to text.
+
+ Options accepted:
+
+ ``style``
+ The style to use, can be a string or a Style subclass
+ (default: "default"). Not used by e.g. the
+ TerminalFormatter.
+ ``full``
+ Tells the formatter to output a "full" document, i.e.
+ a complete self-contained document. This doesn't have
+ any effect for some formatters (default: false).
+ ``title``
+ If ``full`` is true, the title that should be used to
+ caption the document (default: '').
+ ``encoding``
+ If given, must be an encoding name. This will be used to
+ convert the Unicode token strings to byte strings in the
+ output. If it is "" or None, Unicode strings will be written
+ to the output file, which most file-like objects do not
+ support (default: None).
+ ``outencoding``
+ Overrides ``encoding`` if given.
+ """
+
+ #: Name of the formatter
+ name = None
+
+ #: Shortcuts for the formatter
+ aliases = []
+
+ #: fn match rules
+ filenames = []
+
+ #: If True, this formatter outputs Unicode strings when no encoding
+ #: option is given.
+ unicodeoutput = True
+
+ def __init__(self, **options):
+ self.style = _lookup_style(options.get('style', 'default'))
+ self.full = get_bool_opt(options, 'full', False)
+ self.title = options.get('title', '')
+ self.encoding = options.get('encoding', None) or None
+ if self.encoding in ('guess', 'chardet'):
+ # can happen for e.g. pygmentize -O encoding=guess
+ self.encoding = 'utf-8'
+ self.encoding = options.get('outencoding') or self.encoding
+ self.options = options
+
+ def get_style_defs(self, arg=''):
+ """
+ Return the style definitions for the current style as a string.
+
+ ``arg`` is an additional argument whose meaning depends on the
+ formatter used. Note that ``arg`` can also be a list or tuple
+ for some formatters like the html formatter.
+ """
+ return ''
+
+ def format(self, tokensource, outfile):
+ """
+ Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
+ tuples and write it into ``outfile``.
+ """
+ if self.encoding:
+ # wrap the outfile in a StreamWriter
+ outfile = codecs.lookup(self.encoding)[3](outfile)
+ return self.format_unencoded(tokensource, outfile)
diff --git a/third_party/python/pip/pip/_vendor/pygments/formatters/__init__.py b/third_party/python/pip/pip/_vendor/pygments/formatters/__init__.py
new file mode 100644
index 0000000000..43c4c89aac
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/formatters/__init__.py
@@ -0,0 +1,143 @@
+"""
+ pygments.formatters
+ ~~~~~~~~~~~~~~~~~~~
+
+ Pygments formatters.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+import sys
+import types
+from fnmatch import fnmatch
+from os.path import basename
+
+from pip._vendor.pygments.formatters._mapping import FORMATTERS
+from pip._vendor.pygments.plugin import find_plugin_formatters
+from pip._vendor.pygments.util import ClassNotFound
+
+__all__ = ['get_formatter_by_name', 'get_formatter_for_filename',
+ 'get_all_formatters', 'load_formatter_from_file'] + list(FORMATTERS)
+
+_formatter_cache = {} # classes by name
+
+def _load_formatters(module_name):
+ """Load a formatter (and all others in the module too)."""
+ mod = __import__(module_name, None, None, ['__all__'])
+ for formatter_name in mod.__all__:
+ cls = getattr(mod, formatter_name)
+ _formatter_cache[cls.name] = cls
+
+
+def get_all_formatters():
+ """Return a generator for all formatter classes."""
+ # NB: this returns formatter classes, not info like get_all_lexers().
+ for info in FORMATTERS.values():
+ if info[1] not in _formatter_cache:
+ _load_formatters(info[0])
+ yield _formatter_cache[info[1]]
+ for _, formatter in find_plugin_formatters():
+ yield formatter
+
+
+def find_formatter_class(alias):
+ """Lookup a formatter by alias.
+
+ Returns None if not found.
+ """
+ for module_name, name, aliases, _, _ in FORMATTERS.values():
+ if alias in aliases:
+ if name not in _formatter_cache:
+ _load_formatters(module_name)
+ return _formatter_cache[name]
+ for _, cls in find_plugin_formatters():
+ if alias in cls.aliases:
+ return cls
+
+
+def get_formatter_by_name(_alias, **options):
+ """Lookup and instantiate a formatter by alias.
+
+ Raises ClassNotFound if not found.
+ """
+ cls = find_formatter_class(_alias)
+ if cls is None:
+ raise ClassNotFound("no formatter found for name %r" % _alias)
+ return cls(**options)
+
+
+def load_formatter_from_file(filename, formattername="CustomFormatter",
+ **options):
+ """Load a formatter from a file.
+
+ This method expects a file located relative to the current working
+ directory, which contains a class named CustomFormatter. By default,
+ it expects the Formatter to be named CustomFormatter; you can specify
+ your own class name as the second argument to this function.
+
+ Users should be very careful with the input, because this method
+ is equivalent to running eval on the input file.
+
+ Raises ClassNotFound if there are any problems importing the Formatter.
+
+ .. versionadded:: 2.2
+ """
+ try:
+ # This empty dict will contain the namespace for the exec'd file
+ custom_namespace = {}
+ with open(filename, 'rb') as f:
+ exec(f.read(), custom_namespace)
+ # Retrieve the class `formattername` from that namespace
+ if formattername not in custom_namespace:
+ raise ClassNotFound('no valid %s class found in %s' %
+ (formattername, filename))
+ formatter_class = custom_namespace[formattername]
+ # And finally instantiate it with the options
+ return formatter_class(**options)
+ except OSError as err:
+ raise ClassNotFound('cannot read %s: %s' % (filename, err))
+ except ClassNotFound:
+ raise
+ except Exception as err:
+ raise ClassNotFound('error when loading custom formatter: %s' % err)
+
+
+def get_formatter_for_filename(fn, **options):
+ """Lookup and instantiate a formatter by filename pattern.
+
+ Raises ClassNotFound if not found.
+ """
+ fn = basename(fn)
+ for modname, name, _, filenames, _ in FORMATTERS.values():
+ for filename in filenames:
+ if fnmatch(fn, filename):
+ if name not in _formatter_cache:
+ _load_formatters(modname)
+ return _formatter_cache[name](**options)
+ for cls in find_plugin_formatters():
+ for filename in cls.filenames:
+ if fnmatch(fn, filename):
+ return cls(**options)
+ raise ClassNotFound("no formatter found for file name %r" % fn)
+
+
+class _automodule(types.ModuleType):
+ """Automatically import formatters."""
+
+ def __getattr__(self, name):
+ info = FORMATTERS.get(name)
+ if info:
+ _load_formatters(info[0])
+ cls = _formatter_cache[info[1]]
+ setattr(self, name, cls)
+ return cls
+ raise AttributeError(name)
+
+
+oldmod = sys.modules[__name__]
+newmod = _automodule(__name__)
+newmod.__dict__.update(oldmod.__dict__)
+sys.modules[__name__] = newmod
+del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
diff --git a/third_party/python/pip/pip/_vendor/pygments/formatters/_mapping.py b/third_party/python/pip/pip/_vendor/pygments/formatters/_mapping.py
new file mode 100644
index 0000000000..6e34f96078
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/formatters/_mapping.py
@@ -0,0 +1,23 @@
+# Automatically generated by scripts/gen_mapfiles.py.
+# DO NOT EDIT BY HAND; run `make mapfiles` instead.
+
+FORMATTERS = {
+ 'BBCodeFormatter': ('pygments.formatters.bbcode', 'BBCode', ('bbcode', 'bb'), (), 'Format tokens with BBcodes. These formatting codes are used by many bulletin boards, so you can highlight your sourcecode with pygments before posting it there.'),
+ 'BmpImageFormatter': ('pygments.formatters.img', 'img_bmp', ('bmp', 'bitmap'), ('*.bmp',), 'Create a bitmap image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
+ 'GifImageFormatter': ('pygments.formatters.img', 'img_gif', ('gif',), ('*.gif',), 'Create a GIF image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
+ 'GroffFormatter': ('pygments.formatters.groff', 'groff', ('groff', 'troff', 'roff'), (), 'Format tokens with groff escapes to change their color and font style.'),
+ 'HtmlFormatter': ('pygments.formatters.html', 'HTML', ('html',), ('*.html', '*.htm'), "Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass` option."),
+ 'IRCFormatter': ('pygments.formatters.irc', 'IRC', ('irc', 'IRC'), (), 'Format tokens with IRC color sequences'),
+ 'ImageFormatter': ('pygments.formatters.img', 'img', ('img', 'IMG', 'png'), ('*.png',), 'Create a PNG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
+ 'JpgImageFormatter': ('pygments.formatters.img', 'img_jpg', ('jpg', 'jpeg'), ('*.jpg',), 'Create a JPEG image from source code. This uses the Python Imaging Library to generate a pixmap from the source code.'),
+ 'LatexFormatter': ('pygments.formatters.latex', 'LaTeX', ('latex', 'tex'), ('*.tex',), 'Format tokens as LaTeX code. This needs the `fancyvrb` and `color` standard packages.'),
+ 'NullFormatter': ('pygments.formatters.other', 'Text only', ('text', 'null'), ('*.txt',), 'Output the text unchanged without any formatting.'),
+ 'PangoMarkupFormatter': ('pygments.formatters.pangomarkup', 'Pango Markup', ('pango', 'pangomarkup'), (), 'Format tokens as Pango Markup code. It can then be rendered to an SVG.'),
+ 'RawTokenFormatter': ('pygments.formatters.other', 'Raw tokens', ('raw', 'tokens'), ('*.raw',), 'Format tokens as a raw representation for storing token streams.'),
+ 'RtfFormatter': ('pygments.formatters.rtf', 'RTF', ('rtf',), ('*.rtf',), 'Format tokens as RTF markup. This formatter automatically outputs full RTF documents with color information and other useful stuff. Perfect for Copy and Paste into Microsoft(R) Word(R) documents.'),
+ 'SvgFormatter': ('pygments.formatters.svg', 'SVG', ('svg',), ('*.svg',), 'Format tokens as an SVG graphics file. This formatter is still experimental. Each line of code is a ``<text>`` element with explicit ``x`` and ``y`` coordinates containing ``<tspan>`` elements with the individual token styles.'),
+ 'Terminal256Formatter': ('pygments.formatters.terminal256', 'Terminal256', ('terminal256', 'console256', '256'), (), 'Format tokens with ANSI color sequences, for output in a 256-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
+ 'TerminalFormatter': ('pygments.formatters.terminal', 'Terminal', ('terminal', 'console'), (), 'Format tokens with ANSI color sequences, for output in a text console. Color sequences are terminated at newlines, so that paging the output works correctly.'),
+ 'TerminalTrueColorFormatter': ('pygments.formatters.terminal256', 'TerminalTrueColor', ('terminal16m', 'console16m', '16m'), (), 'Format tokens with ANSI color sequences, for output in a true-color terminal or console. Like in `TerminalFormatter` color sequences are terminated at newlines, so that paging the output works correctly.'),
+ 'TestcaseFormatter': ('pygments.formatters.other', 'Testcase', ('testcase',), (), 'Format tokens as appropriate for a new testcase.'),
+}
diff --git a/third_party/python/pip/pip/_vendor/pygments/formatters/bbcode.py b/third_party/python/pip/pip/_vendor/pygments/formatters/bbcode.py
new file mode 100644
index 0000000000..2be2b4e312
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/formatters/bbcode.py
@@ -0,0 +1,108 @@
+"""
+ pygments.formatters.bbcode
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ BBcode formatter.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+
+from pip._vendor.pygments.formatter import Formatter
+from pip._vendor.pygments.util import get_bool_opt
+
+__all__ = ['BBCodeFormatter']
+
+
+class BBCodeFormatter(Formatter):
+ """
+ Format tokens with BBcodes. These formatting codes are used by many
+ bulletin boards, so you can highlight your sourcecode with pygments before
+ posting it there.
+
+ This formatter has no support for background colors and borders, as there
+ are no common BBcode tags for that.
+
+ Some board systems (e.g. phpBB) don't support colors in their [code] tag,
+ so you can't use the highlighting together with that tag.
+ Text in a [code] tag usually is shown with a monospace font (which this
+ formatter can do with the ``monofont`` option) and no spaces (which you
+ need for indentation) are removed.
+
+ Additional options accepted:
+
+ `style`
+ The style to use, can be a string or a Style subclass (default:
+ ``'default'``).
+
+ `codetag`
+ If set to true, put the output into ``[code]`` tags (default:
+ ``false``)
+
+ `monofont`
+ If set to true, add a tag to show the code with a monospace font
+ (default: ``false``).
+ """
+ name = 'BBCode'
+ aliases = ['bbcode', 'bb']
+ filenames = []
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+ self._code = get_bool_opt(options, 'codetag', False)
+ self._mono = get_bool_opt(options, 'monofont', False)
+
+ self.styles = {}
+ self._make_styles()
+
+ def _make_styles(self):
+ for ttype, ndef in self.style:
+ start = end = ''
+ if ndef['color']:
+ start += '[color=#%s]' % ndef['color']
+ end = '[/color]' + end
+ if ndef['bold']:
+ start += '[b]'
+ end = '[/b]' + end
+ if ndef['italic']:
+ start += '[i]'
+ end = '[/i]' + end
+ if ndef['underline']:
+ start += '[u]'
+ end = '[/u]' + end
+ # there are no common BBcodes for background-color and border
+
+ self.styles[ttype] = start, end
+
+ def format_unencoded(self, tokensource, outfile):
+ if self._code:
+ outfile.write('[code]')
+ if self._mono:
+ outfile.write('[font=monospace]')
+
+ lastval = ''
+ lasttype = None
+
+ for ttype, value in tokensource:
+ while ttype not in self.styles:
+ ttype = ttype.parent
+ if ttype == lasttype:
+ lastval += value
+ else:
+ if lastval:
+ start, end = self.styles[lasttype]
+ outfile.write(''.join((start, lastval, end)))
+ lastval = value
+ lasttype = ttype
+
+ if lastval:
+ start, end = self.styles[lasttype]
+ outfile.write(''.join((start, lastval, end)))
+
+ if self._mono:
+ outfile.write('[/font]')
+ if self._code:
+ outfile.write('[/code]')
+ if self._code or self._mono:
+ outfile.write('\n')
diff --git a/third_party/python/pip/pip/_vendor/pygments/formatters/groff.py b/third_party/python/pip/pip/_vendor/pygments/formatters/groff.py
new file mode 100644
index 0000000000..f3dcbce9b9
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/formatters/groff.py
@@ -0,0 +1,170 @@
+"""
+ pygments.formatters.groff
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Formatter for groff output.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import math
+from pip._vendor.pygments.formatter import Formatter
+from pip._vendor.pygments.util import get_bool_opt, get_int_opt
+
+__all__ = ['GroffFormatter']
+
+
+class GroffFormatter(Formatter):
+ """
+ Format tokens with groff escapes to change their color and font style.
+
+ .. versionadded:: 2.11
+
+ Additional options accepted:
+
+ `style`
+ The style to use, can be a string or a Style subclass (default:
+ ``'default'``).
+
+ `monospaced`
+ If set to true, monospace font will be used (default: ``true``).
+
+ `linenos`
+ If set to true, print the line numbers (default: ``false``).
+
+ `wrap`
+ Wrap lines to the specified number of characters. Disabled if set to 0
+ (default: ``0``).
+ """
+
+ name = 'groff'
+ aliases = ['groff','troff','roff']
+ filenames = []
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+
+ self.monospaced = get_bool_opt(options, 'monospaced', True)
+ self.linenos = get_bool_opt(options, 'linenos', False)
+ self._lineno = 0
+ self.wrap = get_int_opt(options, 'wrap', 0)
+ self._linelen = 0
+
+ self.styles = {}
+ self._make_styles()
+
+
+ def _make_styles(self):
+ regular = '\\f[CR]' if self.monospaced else '\\f[R]'
+ bold = '\\f[CB]' if self.monospaced else '\\f[B]'
+ italic = '\\f[CI]' if self.monospaced else '\\f[I]'
+
+ for ttype, ndef in self.style:
+ start = end = ''
+ if ndef['color']:
+ start += '\\m[%s]' % ndef['color']
+ end = '\\m[]' + end
+ if ndef['bold']:
+ start += bold
+ end = regular + end
+ if ndef['italic']:
+ start += italic
+ end = regular + end
+ if ndef['bgcolor']:
+ start += '\\M[%s]' % ndef['bgcolor']
+ end = '\\M[]' + end
+
+ self.styles[ttype] = start, end
+
+
+ def _define_colors(self, outfile):
+ colors = set()
+ for _, ndef in self.style:
+ if ndef['color'] is not None:
+ colors.add(ndef['color'])
+
+ for color in colors:
+ outfile.write('.defcolor ' + color + ' rgb #' + color + '\n')
+
+
+ def _write_lineno(self, outfile):
+ self._lineno += 1
+ outfile.write("%s% 4d " % (self._lineno != 1 and '\n' or '', self._lineno))
+
+
+ def _wrap_line(self, line):
+ length = len(line.rstrip('\n'))
+ space = ' ' if self.linenos else ''
+ newline = ''
+
+ if length > self.wrap:
+ for i in range(0, math.floor(length / self.wrap)):
+ chunk = line[i*self.wrap:i*self.wrap+self.wrap]
+ newline += (chunk + '\n' + space)
+ remainder = length % self.wrap
+ if remainder > 0:
+ newline += line[-remainder-1:]
+ self._linelen = remainder
+ elif self._linelen + length > self.wrap:
+ newline = ('\n' + space) + line
+ self._linelen = length
+ else:
+ newline = line
+ self._linelen += length
+
+ return newline
+
+
+ def _escape_chars(self, text):
+ text = text.replace('\\', '\\[u005C]'). \
+ replace('.', '\\[char46]'). \
+ replace('\'', '\\[u0027]'). \
+ replace('`', '\\[u0060]'). \
+ replace('~', '\\[u007E]')
+ copy = text
+
+ for char in copy:
+ if len(char) != len(char.encode()):
+ uni = char.encode('unicode_escape') \
+ .decode()[1:] \
+ .replace('x', 'u00') \
+ .upper()
+ text = text.replace(char, '\\[u' + uni[1:] + ']')
+
+ return text
+
+
+ def format_unencoded(self, tokensource, outfile):
+ self._define_colors(outfile)
+
+ outfile.write('.nf\n\\f[CR]\n')
+
+ if self.linenos:
+ self._write_lineno(outfile)
+
+ for ttype, value in tokensource:
+ while ttype not in self.styles:
+ ttype = ttype.parent
+ start, end = self.styles[ttype]
+
+ for line in value.splitlines(True):
+ if self.wrap > 0:
+ line = self._wrap_line(line)
+
+ if start and end:
+ text = self._escape_chars(line.rstrip('\n'))
+ if text != '':
+ outfile.write(''.join((start, text, end)))
+ else:
+ outfile.write(self._escape_chars(line.rstrip('\n')))
+
+ if line.endswith('\n'):
+ if self.linenos:
+ self._write_lineno(outfile)
+ self._linelen = 0
+ else:
+ outfile.write('\n')
+ self._linelen = 0
+
+ outfile.write('\n.fi')
diff --git a/third_party/python/pip/pip/_vendor/pygments/formatters/html.py b/third_party/python/pip/pip/_vendor/pygments/formatters/html.py
new file mode 100644
index 0000000000..d5cda4c4bc
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/formatters/html.py
@@ -0,0 +1,989 @@
+"""
+ pygments.formatters.html
+ ~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Formatter for HTML output.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import functools
+import os
+import sys
+import os.path
+from io import StringIO
+
+from pip._vendor.pygments.formatter import Formatter
+from pip._vendor.pygments.token import Token, Text, STANDARD_TYPES
+from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt
+
+try:
+ import ctags
+except ImportError:
+ ctags = None
+
+__all__ = ['HtmlFormatter']
+
+
+_escape_html_table = {
+ ord('&'): '&amp;',
+ ord('<'): '&lt;',
+ ord('>'): '&gt;',
+ ord('"'): '&quot;',
+ ord("'"): '&#39;',
+}
+
+
+def escape_html(text, table=_escape_html_table):
+ """Escape &, <, > as well as single and double quotes for HTML."""
+ return text.translate(table)
+
+
+def webify(color):
+ if color.startswith('calc') or color.startswith('var'):
+ return color
+ else:
+ return '#' + color
+
+
+def _get_ttype_class(ttype):
+ fname = STANDARD_TYPES.get(ttype)
+ if fname:
+ return fname
+ aname = ''
+ while fname is None:
+ aname = '-' + ttype[-1] + aname
+ ttype = ttype.parent
+ fname = STANDARD_TYPES.get(ttype)
+ return fname + aname
+
+
+CSSFILE_TEMPLATE = '''\
+/*
+generated by Pygments <https://pygments.org/>
+Copyright 2006-2022 by the Pygments team.
+Licensed under the BSD license, see LICENSE for details.
+*/
+%(styledefs)s
+'''
+
+DOC_HEADER = '''\
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+<!--
+generated by Pygments <https://pygments.org/>
+Copyright 2006-2022 by the Pygments team.
+Licensed under the BSD license, see LICENSE for details.
+-->
+<html>
+<head>
+ <title>%(title)s</title>
+ <meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
+ <style type="text/css">
+''' + CSSFILE_TEMPLATE + '''
+ </style>
+</head>
+<body>
+<h2>%(title)s</h2>
+
+'''
+
+DOC_HEADER_EXTERNALCSS = '''\
+<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
+ "http://www.w3.org/TR/html4/strict.dtd">
+
+<html>
+<head>
+ <title>%(title)s</title>
+ <meta http-equiv="content-type" content="text/html; charset=%(encoding)s">
+ <link rel="stylesheet" href="%(cssfile)s" type="text/css">
+</head>
+<body>
+<h2>%(title)s</h2>
+
+'''
+
+DOC_FOOTER = '''\
+</body>
+</html>
+'''
+
+
+class HtmlFormatter(Formatter):
+ r"""
+ Format tokens as HTML 4 ``<span>`` tags within a ``<pre>`` tag, wrapped
+ in a ``<div>`` tag. The ``<div>``'s CSS class can be set by the `cssclass`
+ option.
+
+ If the `linenos` option is set to ``"table"``, the ``<pre>`` is
+ additionally wrapped inside a ``<table>`` which has one row and two
+ cells: one containing the line numbers and one containing the code.
+ Example:
+
+ .. sourcecode:: html
+
+ <div class="highlight" >
+ <table><tr>
+ <td class="linenos" title="click to toggle"
+ onclick="with (this.firstChild.style)
+ { display = (display == '') ? 'none' : '' }">
+ <pre>1
+ 2</pre>
+ </td>
+ <td class="code">
+ <pre><span class="Ke">def </span><span class="NaFu">foo</span>(bar):
+ <span class="Ke">pass</span>
+ </pre>
+ </td>
+ </tr></table></div>
+
+ (whitespace added to improve clarity).
+
+ Wrapping can be disabled using the `nowrap` option.
+
+ A list of lines can be specified using the `hl_lines` option to make these
+ lines highlighted (as of Pygments 0.11).
+
+ With the `full` option, a complete HTML 4 document is output, including
+ the style definitions inside a ``<style>`` tag, or in a separate file if
+ the `cssfile` option is given.
+
+ When `tagsfile` is set to the path of a ctags index file, it is used to
+ generate hyperlinks from names to their definition. You must enable
+ `lineanchors` and run ctags with the `-n` option for this to work. The
+ `python-ctags` module from PyPI must be installed to use this feature;
+ otherwise a `RuntimeError` will be raised.
+
+ The `get_style_defs(arg='')` method of a `HtmlFormatter` returns a string
+ containing CSS rules for the CSS classes used by the formatter. The
+ argument `arg` can be used to specify additional CSS selectors that
+ are prepended to the classes. A call `fmter.get_style_defs('td .code')`
+ would result in the following CSS classes:
+
+ .. sourcecode:: css
+
+ td .code .kw { font-weight: bold; color: #00FF00 }
+ td .code .cm { color: #999999 }
+ ...
+
+ If you have Pygments 0.6 or higher, you can also pass a list or tuple to the
+ `get_style_defs()` method to request multiple prefixes for the tokens:
+
+ .. sourcecode:: python
+
+ formatter.get_style_defs(['div.syntax pre', 'pre.syntax'])
+
+ The output would then look like this:
+
+ .. sourcecode:: css
+
+ div.syntax pre .kw,
+ pre.syntax .kw { font-weight: bold; color: #00FF00 }
+ div.syntax pre .cm,
+ pre.syntax .cm { color: #999999 }
+ ...
+
+ Additional options accepted:
+
+ `nowrap`
+ If set to ``True``, don't wrap the tokens at all, not even inside a ``<pre>``
+ tag. This disables most other options (default: ``False``).
+
+ `full`
+ Tells the formatter to output a "full" document, i.e. a complete
+ self-contained document (default: ``False``).
+
+ `title`
+ If `full` is true, the title that should be used to caption the
+ document (default: ``''``).
+
+ `style`
+ The style to use, can be a string or a Style subclass (default:
+ ``'default'``). This option has no effect if the `cssfile`
+ and `noclobber_cssfile` option are given and the file specified in
+ `cssfile` exists.
+
+ `noclasses`
+ If set to true, token ``<span>`` tags (as well as line number elements)
+ will not use CSS classes, but inline styles. This is not recommended
+ for larger pieces of code since it increases output size by quite a bit
+ (default: ``False``).
+
+ `classprefix`
+ Since the token types use relatively short class names, they may clash
+ with some of your own class names. In this case you can use the
+ `classprefix` option to give a string to prepend to all Pygments-generated
+ CSS class names for token types.
+ Note that this option also affects the output of `get_style_defs()`.
+
+ `cssclass`
+ CSS class for the wrapping ``<div>`` tag (default: ``'highlight'``).
+ If you set this option, the default selector for `get_style_defs()`
+ will be this class.
+
+ .. versionadded:: 0.9
+ If you select the ``'table'`` line numbers, the wrapping table will
+ have a CSS class of this string plus ``'table'``, the default is
+ accordingly ``'highlighttable'``.
+
+ `cssstyles`
+ Inline CSS styles for the wrapping ``<div>`` tag (default: ``''``).
+
+ `prestyles`
+ Inline CSS styles for the ``<pre>`` tag (default: ``''``).
+
+ .. versionadded:: 0.11
+
+ `cssfile`
+ If the `full` option is true and this option is given, it must be the
+ name of an external file. If the filename does not include an absolute
+ path, the file's path will be assumed to be relative to the main output
+ file's path, if the latter can be found. The stylesheet is then written
+ to this file instead of the HTML file.
+
+ .. versionadded:: 0.6
+
+ `noclobber_cssfile`
+ If `cssfile` is given and the specified file exists, the css file will
+ not be overwritten. This allows the use of the `full` option in
+ combination with a user specified css file. Default is ``False``.
+
+ .. versionadded:: 1.1
+
+ `linenos`
+ If set to ``'table'``, output line numbers as a table with two cells,
+ one containing the line numbers, the other the whole code. This is
+ copy-and-paste-friendly, but may cause alignment problems with some
+ browsers or fonts. If set to ``'inline'``, the line numbers will be
+ integrated in the ``<pre>`` tag that contains the code (that setting
+ is *new in Pygments 0.8*).
+
+ For compatibility with Pygments 0.7 and earlier, every true value
+ except ``'inline'`` means the same as ``'table'`` (in particular, that
+ means also ``True``).
+
+ The default value is ``False``, which means no line numbers at all.
+
+ **Note:** with the default ("table") line number mechanism, the line
+ numbers and code can have different line heights in Internet Explorer
+ unless you give the enclosing ``<pre>`` tags an explicit ``line-height``
+ CSS property (you get the default line spacing with ``line-height:
+ 125%``).
+
+ `hl_lines`
+ Specify a list of lines to be highlighted. The line numbers are always
+ relative to the input (i.e. the first line is line 1) and are
+ independent of `linenostart`.
+
+ .. versionadded:: 0.11
+
+ `linenostart`
+ The line number for the first line (default: ``1``).
+
+ `linenostep`
+ If set to a number n > 1, only every nth line number is printed.
+
+ `linenospecial`
+ If set to a number n > 0, every nth line number is given the CSS
+ class ``"special"`` (default: ``0``).
+
+ `nobackground`
+ If set to ``True``, the formatter won't output the background color
+ for the wrapping element (this automatically defaults to ``False``
+ when there is no wrapping element [eg: no argument for the
+ `get_syntax_defs` method given]) (default: ``False``).
+
+ .. versionadded:: 0.6
+
+ `lineseparator`
+ This string is output between lines of code. It defaults to ``"\n"``,
+ which is enough to break a line inside ``<pre>`` tags, but you can
+ e.g. set it to ``"<br>"`` to get HTML line breaks.
+
+ .. versionadded:: 0.7
+
+ `lineanchors`
+ If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
+ output line in an anchor tag with an ``id`` (and `name`) of ``foo-linenumber``.
+ This allows easy linking to certain lines.
+
+ .. versionadded:: 0.9
+
+ `linespans`
+ If set to a nonempty string, e.g. ``foo``, the formatter will wrap each
+ output line in a span tag with an ``id`` of ``foo-linenumber``.
+ This allows easy access to lines via javascript.
+
+ .. versionadded:: 1.6
+
+ `anchorlinenos`
+ If set to `True`, will wrap line numbers in <a> tags. Used in
+ combination with `linenos` and `lineanchors`.
+
+ `tagsfile`
+ If set to the path of a ctags file, wrap names in anchor tags that
+ link to their definitions. `lineanchors` should be used, and the
+ tags file should specify line numbers (see the `-n` option to ctags).
+
+ .. versionadded:: 1.6
+
+ `tagurlformat`
+ A string formatting pattern used to generate links to ctags definitions.
+ Available variables are `%(path)s`, `%(fname)s` and `%(fext)s`.
+ Defaults to an empty string, resulting in just `#prefix-number` links.
+
+ .. versionadded:: 1.6
+
+ `filename`
+ A string used to generate a filename when rendering ``<pre>`` blocks,
+ for example if displaying source code. If `linenos` is set to
+ ``'table'`` then the filename will be rendered in an initial row
+ containing a single `<th>` which spans both columns.
+
+ .. versionadded:: 2.1
+
+ `wrapcode`
+ Wrap the code inside ``<pre>`` blocks using ``<code>``, as recommended
+ by the HTML5 specification.
+
+ .. versionadded:: 2.4
+
+ `debug_token_types`
+ Add ``title`` attributes to all token ``<span>`` tags that show the
+ name of the token.
+
+ .. versionadded:: 2.10
+
+
+ **Subclassing the HTML formatter**
+
+ .. versionadded:: 0.7
+
+ The HTML formatter is now built in a way that allows easy subclassing, thus
+ customizing the output HTML code. The `format()` method calls
+ `self._format_lines()` which returns a generator that yields tuples of ``(1,
+ line)``, where the ``1`` indicates that the ``line`` is a line of the
+ formatted source code.
+
+ If the `nowrap` option is set, the generator is the iterated over and the
+ resulting HTML is output.
+
+ Otherwise, `format()` calls `self.wrap()`, which wraps the generator with
+ other generators. These may add some HTML code to the one generated by
+ `_format_lines()`, either by modifying the lines generated by the latter,
+ then yielding them again with ``(1, line)``, and/or by yielding other HTML
+ code before or after the lines, with ``(0, html)``. The distinction between
+ source lines and other code makes it possible to wrap the generator multiple
+ times.
+
+ The default `wrap()` implementation adds a ``<div>`` and a ``<pre>`` tag.
+
+ A custom `HtmlFormatter` subclass could look like this:
+
+ .. sourcecode:: python
+
+ class CodeHtmlFormatter(HtmlFormatter):
+
+ def wrap(self, source, *, include_div):
+ return self._wrap_code(source)
+
+ def _wrap_code(self, source):
+ yield 0, '<code>'
+ for i, t in source:
+ if i == 1:
+ # it's a line of formatted code
+ t += '<br>'
+ yield i, t
+ yield 0, '</code>'
+
+ This results in wrapping the formatted lines with a ``<code>`` tag, where the
+ source lines are broken using ``<br>`` tags.
+
+ After calling `wrap()`, the `format()` method also adds the "line numbers"
+ and/or "full document" wrappers if the respective options are set. Then, all
+ HTML yielded by the wrapped generator is output.
+ """
+
+ name = 'HTML'
+ aliases = ['html']
+ filenames = ['*.html', '*.htm']
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+ self.title = self._decodeifneeded(self.title)
+ self.nowrap = get_bool_opt(options, 'nowrap', False)
+ self.noclasses = get_bool_opt(options, 'noclasses', False)
+ self.classprefix = options.get('classprefix', '')
+ self.cssclass = self._decodeifneeded(options.get('cssclass', 'highlight'))
+ self.cssstyles = self._decodeifneeded(options.get('cssstyles', ''))
+ self.prestyles = self._decodeifneeded(options.get('prestyles', ''))
+ self.cssfile = self._decodeifneeded(options.get('cssfile', ''))
+ self.noclobber_cssfile = get_bool_opt(options, 'noclobber_cssfile', False)
+ self.tagsfile = self._decodeifneeded(options.get('tagsfile', ''))
+ self.tagurlformat = self._decodeifneeded(options.get('tagurlformat', ''))
+ self.filename = self._decodeifneeded(options.get('filename', ''))
+ self.wrapcode = get_bool_opt(options, 'wrapcode', False)
+ self.span_element_openers = {}
+ self.debug_token_types = get_bool_opt(options, 'debug_token_types', False)
+
+ if self.tagsfile:
+ if not ctags:
+ raise RuntimeError('The "ctags" package must to be installed '
+ 'to be able to use the "tagsfile" feature.')
+ self._ctags = ctags.CTags(self.tagsfile)
+
+ linenos = options.get('linenos', False)
+ if linenos == 'inline':
+ self.linenos = 2
+ elif linenos:
+ # compatibility with <= 0.7
+ self.linenos = 1
+ else:
+ self.linenos = 0
+ self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
+ self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
+ self.linenospecial = abs(get_int_opt(options, 'linenospecial', 0))
+ self.nobackground = get_bool_opt(options, 'nobackground', False)
+ self.lineseparator = options.get('lineseparator', '\n')
+ self.lineanchors = options.get('lineanchors', '')
+ self.linespans = options.get('linespans', '')
+ self.anchorlinenos = get_bool_opt(options, 'anchorlinenos', False)
+ self.hl_lines = set()
+ for lineno in get_list_opt(options, 'hl_lines', []):
+ try:
+ self.hl_lines.add(int(lineno))
+ except ValueError:
+ pass
+
+ self._create_stylesheet()
+
+ def _get_css_class(self, ttype):
+ """Return the css class of this token type prefixed with
+ the classprefix option."""
+ ttypeclass = _get_ttype_class(ttype)
+ if ttypeclass:
+ return self.classprefix + ttypeclass
+ return ''
+
+ def _get_css_classes(self, ttype):
+ """Return the CSS classes of this token type prefixed with the classprefix option."""
+ cls = self._get_css_class(ttype)
+ while ttype not in STANDARD_TYPES:
+ ttype = ttype.parent
+ cls = self._get_css_class(ttype) + ' ' + cls
+ return cls or ''
+
+ def _get_css_inline_styles(self, ttype):
+ """Return the inline CSS styles for this token type."""
+ cclass = self.ttype2class.get(ttype)
+ while cclass is None:
+ ttype = ttype.parent
+ cclass = self.ttype2class.get(ttype)
+ return cclass or ''
+
+ def _create_stylesheet(self):
+ t2c = self.ttype2class = {Token: ''}
+ c2s = self.class2style = {}
+ for ttype, ndef in self.style:
+ name = self._get_css_class(ttype)
+ style = ''
+ if ndef['color']:
+ style += 'color: %s; ' % webify(ndef['color'])
+ if ndef['bold']:
+ style += 'font-weight: bold; '
+ if ndef['italic']:
+ style += 'font-style: italic; '
+ if ndef['underline']:
+ style += 'text-decoration: underline; '
+ if ndef['bgcolor']:
+ style += 'background-color: %s; ' % webify(ndef['bgcolor'])
+ if ndef['border']:
+ style += 'border: 1px solid %s; ' % webify(ndef['border'])
+ if style:
+ t2c[ttype] = name
+ # save len(ttype) to enable ordering the styles by
+ # hierarchy (necessary for CSS cascading rules!)
+ c2s[name] = (style[:-2], ttype, len(ttype))
+
+ def get_style_defs(self, arg=None):
+ """
+ Return CSS style definitions for the classes produced by the current
+ highlighting style. ``arg`` can be a string or list of selectors to
+ insert before the token type classes.
+ """
+ style_lines = []
+
+ style_lines.extend(self.get_linenos_style_defs())
+ style_lines.extend(self.get_background_style_defs(arg))
+ style_lines.extend(self.get_token_style_defs(arg))
+
+ return '\n'.join(style_lines)
+
+ def get_token_style_defs(self, arg=None):
+ prefix = self.get_css_prefix(arg)
+
+ styles = [
+ (level, ttype, cls, style)
+ for cls, (style, ttype, level) in self.class2style.items()
+ if cls and style
+ ]
+ styles.sort()
+
+ lines = [
+ '%s { %s } /* %s */' % (prefix(cls), style, repr(ttype)[6:])
+ for (level, ttype, cls, style) in styles
+ ]
+
+ return lines
+
+ def get_background_style_defs(self, arg=None):
+ prefix = self.get_css_prefix(arg)
+ bg_color = self.style.background_color
+ hl_color = self.style.highlight_color
+
+ lines = []
+
+ if arg and not self.nobackground and bg_color is not None:
+ text_style = ''
+ if Text in self.ttype2class:
+ text_style = ' ' + self.class2style[self.ttype2class[Text]][0]
+ lines.insert(
+ 0, '%s{ background: %s;%s }' % (
+ prefix(''), bg_color, text_style
+ )
+ )
+ if hl_color is not None:
+ lines.insert(
+ 0, '%s { background-color: %s }' % (prefix('hll'), hl_color)
+ )
+
+ return lines
+
+ def get_linenos_style_defs(self):
+ lines = [
+ 'pre { %s }' % self._pre_style,
+ 'td.linenos .normal { %s }' % self._linenos_style,
+ 'span.linenos { %s }' % self._linenos_style,
+ 'td.linenos .special { %s }' % self._linenos_special_style,
+ 'span.linenos.special { %s }' % self._linenos_special_style,
+ ]
+
+ return lines
+
+ def get_css_prefix(self, arg):
+ if arg is None:
+ arg = ('cssclass' in self.options and '.'+self.cssclass or '')
+ if isinstance(arg, str):
+ args = [arg]
+ else:
+ args = list(arg)
+
+ def prefix(cls):
+ if cls:
+ cls = '.' + cls
+ tmp = []
+ for arg in args:
+ tmp.append((arg and arg + ' ' or '') + cls)
+ return ', '.join(tmp)
+
+ return prefix
+
+ @property
+ def _pre_style(self):
+ return 'line-height: 125%;'
+
+ @property
+ def _linenos_style(self):
+ return 'color: %s; background-color: %s; padding-left: 5px; padding-right: 5px;' % (
+ self.style.line_number_color,
+ self.style.line_number_background_color
+ )
+
+ @property
+ def _linenos_special_style(self):
+ return 'color: %s; background-color: %s; padding-left: 5px; padding-right: 5px;' % (
+ self.style.line_number_special_color,
+ self.style.line_number_special_background_color
+ )
+
+ def _decodeifneeded(self, value):
+ if isinstance(value, bytes):
+ if self.encoding:
+ return value.decode(self.encoding)
+ return value.decode()
+ return value
+
+ def _wrap_full(self, inner, outfile):
+ if self.cssfile:
+ if os.path.isabs(self.cssfile):
+ # it's an absolute filename
+ cssfilename = self.cssfile
+ else:
+ try:
+ filename = outfile.name
+ if not filename or filename[0] == '<':
+ # pseudo files, e.g. name == '<fdopen>'
+ raise AttributeError
+ cssfilename = os.path.join(os.path.dirname(filename),
+ self.cssfile)
+ except AttributeError:
+ print('Note: Cannot determine output file name, '
+ 'using current directory as base for the CSS file name',
+ file=sys.stderr)
+ cssfilename = self.cssfile
+ # write CSS file only if noclobber_cssfile isn't given as an option.
+ try:
+ if not os.path.exists(cssfilename) or not self.noclobber_cssfile:
+ with open(cssfilename, "w") as cf:
+ cf.write(CSSFILE_TEMPLATE %
+ {'styledefs': self.get_style_defs('body')})
+ except OSError as err:
+ err.strerror = 'Error writing CSS file: ' + err.strerror
+ raise
+
+ yield 0, (DOC_HEADER_EXTERNALCSS %
+ dict(title=self.title,
+ cssfile=self.cssfile,
+ encoding=self.encoding))
+ else:
+ yield 0, (DOC_HEADER %
+ dict(title=self.title,
+ styledefs=self.get_style_defs('body'),
+ encoding=self.encoding))
+
+ yield from inner
+ yield 0, DOC_FOOTER
+
+ def _wrap_tablelinenos(self, inner):
+ dummyoutfile = StringIO()
+ lncount = 0
+ for t, line in inner:
+ if t:
+ lncount += 1
+ dummyoutfile.write(line)
+
+ fl = self.linenostart
+ mw = len(str(lncount + fl - 1))
+ sp = self.linenospecial
+ st = self.linenostep
+ anchor_name = self.lineanchors or self.linespans
+ aln = self.anchorlinenos
+ nocls = self.noclasses
+
+ lines = []
+
+ for i in range(fl, fl+lncount):
+ print_line = i % st == 0
+ special_line = sp and i % sp == 0
+
+ if print_line:
+ line = '%*d' % (mw, i)
+ if aln:
+ line = '<a href="#%s-%d">%s</a>' % (anchor_name, i, line)
+ else:
+ line = ' ' * mw
+
+ if nocls:
+ if special_line:
+ style = ' style="%s"' % self._linenos_special_style
+ else:
+ style = ' style="%s"' % self._linenos_style
+ else:
+ if special_line:
+ style = ' class="special"'
+ else:
+ style = ' class="normal"'
+
+ if style:
+ line = '<span%s>%s</span>' % (style, line)
+
+ lines.append(line)
+
+ ls = '\n'.join(lines)
+
+ # If a filename was specified, we can't put it into the code table as it
+ # would misalign the line numbers. Hence we emit a separate row for it.
+ filename_tr = ""
+ if self.filename:
+ filename_tr = (
+ '<tr><th colspan="2" class="filename">'
+ '<span class="filename">' + self.filename + '</span>'
+ '</th></tr>')
+
+ # in case you wonder about the seemingly redundant <div> here: since the
+ # content in the other cell also is wrapped in a div, some browsers in
+ # some configurations seem to mess up the formatting...
+ yield 0, (f'<table class="{self.cssclass}table">' + filename_tr +
+ '<tr><td class="linenos"><div class="linenodiv"><pre>' +
+ ls + '</pre></div></td><td class="code">')
+ yield 0, '<div>'
+ yield 0, dummyoutfile.getvalue()
+ yield 0, '</div>'
+ yield 0, '</td></tr></table>'
+
+
+ def _wrap_inlinelinenos(self, inner):
+ # need a list of lines since we need the width of a single number :(
+ inner_lines = list(inner)
+ sp = self.linenospecial
+ st = self.linenostep
+ num = self.linenostart
+ mw = len(str(len(inner_lines) + num - 1))
+ anchor_name = self.lineanchors or self.linespans
+ aln = self.anchorlinenos
+ nocls = self.noclasses
+
+ for _, inner_line in inner_lines:
+ print_line = num % st == 0
+ special_line = sp and num % sp == 0
+
+ if print_line:
+ line = '%*d' % (mw, num)
+ else:
+ line = ' ' * mw
+
+ if nocls:
+ if special_line:
+ style = ' style="%s"' % self._linenos_special_style
+ else:
+ style = ' style="%s"' % self._linenos_style
+ else:
+ if special_line:
+ style = ' class="linenos special"'
+ else:
+ style = ' class="linenos"'
+
+ if style:
+ linenos = '<span%s>%s</span>' % (style, line)
+ else:
+ linenos = line
+
+ if aln:
+ yield 1, ('<a href="#%s-%d">%s</a>' % (anchor_name, num, linenos) +
+ inner_line)
+ else:
+ yield 1, linenos + inner_line
+ num += 1
+
+ def _wrap_lineanchors(self, inner):
+ s = self.lineanchors
+ # subtract 1 since we have to increment i *before* yielding
+ i = self.linenostart - 1
+ for t, line in inner:
+ if t:
+ i += 1
+ href = "" if self.linenos else ' href="#%s-%d"' % (s, i)
+ yield 1, '<a id="%s-%d" name="%s-%d"%s></a>' % (s, i, s, i, href) + line
+ else:
+ yield 0, line
+
+ def _wrap_linespans(self, inner):
+ s = self.linespans
+ i = self.linenostart - 1
+ for t, line in inner:
+ if t:
+ i += 1
+ yield 1, '<span id="%s-%d">%s</span>' % (s, i, line)
+ else:
+ yield 0, line
+
+ def _wrap_div(self, inner):
+ style = []
+ if (self.noclasses and not self.nobackground and
+ self.style.background_color is not None):
+ style.append('background: %s' % (self.style.background_color,))
+ if self.cssstyles:
+ style.append(self.cssstyles)
+ style = '; '.join(style)
+
+ yield 0, ('<div' + (self.cssclass and ' class="%s"' % self.cssclass) +
+ (style and (' style="%s"' % style)) + '>')
+ yield from inner
+ yield 0, '</div>\n'
+
+ def _wrap_pre(self, inner):
+ style = []
+ if self.prestyles:
+ style.append(self.prestyles)
+ if self.noclasses:
+ style.append(self._pre_style)
+ style = '; '.join(style)
+
+ if self.filename and self.linenos != 1:
+ yield 0, ('<span class="filename">' + self.filename + '</span>')
+
+ # the empty span here is to keep leading empty lines from being
+ # ignored by HTML parsers
+ yield 0, ('<pre' + (style and ' style="%s"' % style) + '><span></span>')
+ yield from inner
+ yield 0, '</pre>'
+
+ def _wrap_code(self, inner):
+ yield 0, '<code>'
+ yield from inner
+ yield 0, '</code>'
+
+ @functools.lru_cache(maxsize=100)
+ def _translate_parts(self, value):
+ """HTML-escape a value and split it by newlines."""
+ return value.translate(_escape_html_table).split('\n')
+
+ def _format_lines(self, tokensource):
+ """
+ Just format the tokens, without any wrapping tags.
+ Yield individual lines.
+ """
+ nocls = self.noclasses
+ lsep = self.lineseparator
+ tagsfile = self.tagsfile
+
+ lspan = ''
+ line = []
+ for ttype, value in tokensource:
+ try:
+ cspan = self.span_element_openers[ttype]
+ except KeyError:
+ title = ' title="%s"' % '.'.join(ttype) if self.debug_token_types else ''
+ if nocls:
+ css_style = self._get_css_inline_styles(ttype)
+ if css_style:
+ css_style = self.class2style[css_style][0]
+ cspan = '<span style="%s"%s>' % (css_style, title)
+ else:
+ cspan = ''
+ else:
+ css_class = self._get_css_classes(ttype)
+ if css_class:
+ cspan = '<span class="%s"%s>' % (css_class, title)
+ else:
+ cspan = ''
+ self.span_element_openers[ttype] = cspan
+
+ parts = self._translate_parts(value)
+
+ if tagsfile and ttype in Token.Name:
+ filename, linenumber = self._lookup_ctag(value)
+ if linenumber:
+ base, filename = os.path.split(filename)
+ if base:
+ base += '/'
+ filename, extension = os.path.splitext(filename)
+ url = self.tagurlformat % {'path': base, 'fname': filename,
+ 'fext': extension}
+ parts[0] = "<a href=\"%s#%s-%d\">%s" % \
+ (url, self.lineanchors, linenumber, parts[0])
+ parts[-1] = parts[-1] + "</a>"
+
+ # for all but the last line
+ for part in parts[:-1]:
+ if line:
+ if lspan != cspan:
+ line.extend(((lspan and '</span>'), cspan, part,
+ (cspan and '</span>'), lsep))
+ else: # both are the same
+ line.extend((part, (lspan and '</span>'), lsep))
+ yield 1, ''.join(line)
+ line = []
+ elif part:
+ yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep))
+ else:
+ yield 1, lsep
+ # for the last line
+ if line and parts[-1]:
+ if lspan != cspan:
+ line.extend(((lspan and '</span>'), cspan, parts[-1]))
+ lspan = cspan
+ else:
+ line.append(parts[-1])
+ elif parts[-1]:
+ line = [cspan, parts[-1]]
+ lspan = cspan
+ # else we neither have to open a new span nor set lspan
+
+ if line:
+ line.extend(((lspan and '</span>'), lsep))
+ yield 1, ''.join(line)
+
+ def _lookup_ctag(self, token):
+ entry = ctags.TagEntry()
+ if self._ctags.find(entry, token.encode(), 0):
+ return entry['file'], entry['lineNumber']
+ else:
+ return None, None
+
+ def _highlight_lines(self, tokensource):
+ """
+ Highlighted the lines specified in the `hl_lines` option by
+ post-processing the token stream coming from `_format_lines`.
+ """
+ hls = self.hl_lines
+
+ for i, (t, value) in enumerate(tokensource):
+ if t != 1:
+ yield t, value
+ if i + 1 in hls: # i + 1 because Python indexes start at 0
+ if self.noclasses:
+ style = ''
+ if self.style.highlight_color is not None:
+ style = (' style="background-color: %s"' %
+ (self.style.highlight_color,))
+ yield 1, '<span%s>%s</span>' % (style, value)
+ else:
+ yield 1, '<span class="hll">%s</span>' % value
+ else:
+ yield 1, value
+
+ def wrap(self, source):
+ """
+ Wrap the ``source``, which is a generator yielding
+ individual lines, in custom generators. See docstring
+ for `format`. Can be overridden.
+ """
+
+ output = source
+ if self.wrapcode:
+ output = self._wrap_code(output)
+
+ output = self._wrap_pre(output)
+
+ return output
+
+ def format_unencoded(self, tokensource, outfile):
+ """
+ The formatting process uses several nested generators; which of
+ them are used is determined by the user's options.
+
+ Each generator should take at least one argument, ``inner``,
+ and wrap the pieces of text generated by this.
+
+ Always yield 2-tuples: (code, text). If "code" is 1, the text
+ is part of the original tokensource being highlighted, if it's
+ 0, the text is some piece of wrapping. This makes it possible to
+ use several different wrappers that process the original source
+ linewise, e.g. line number generators.
+ """
+ source = self._format_lines(tokensource)
+
+ # As a special case, we wrap line numbers before line highlighting
+ # so the line numbers get wrapped in the highlighting tag.
+ if not self.nowrap and self.linenos == 2:
+ source = self._wrap_inlinelinenos(source)
+
+ if self.hl_lines:
+ source = self._highlight_lines(source)
+
+ if not self.nowrap:
+ if self.lineanchors:
+ source = self._wrap_lineanchors(source)
+ if self.linespans:
+ source = self._wrap_linespans(source)
+ source = self.wrap(source)
+ if self.linenos == 1:
+ source = self._wrap_tablelinenos(source)
+ source = self._wrap_div(source)
+ if self.full:
+ source = self._wrap_full(source, outfile)
+
+ for t, piece in source:
+ outfile.write(piece)
diff --git a/third_party/python/pip/pip/_vendor/pygments/formatters/img.py b/third_party/python/pip/pip/_vendor/pygments/formatters/img.py
new file mode 100644
index 0000000000..0f36a32ba3
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/formatters/img.py
@@ -0,0 +1,645 @@
+"""
+ pygments.formatters.img
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Formatter for Pixmap output.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import os
+import sys
+
+from pip._vendor.pygments.formatter import Formatter
+from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
+ get_choice_opt
+
+import subprocess
+
+# Import this carefully
+try:
+ from PIL import Image, ImageDraw, ImageFont
+ pil_available = True
+except ImportError:
+ pil_available = False
+
+try:
+ import _winreg
+except ImportError:
+ try:
+ import winreg as _winreg
+ except ImportError:
+ _winreg = None
+
+__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
+ 'BmpImageFormatter']
+
+
+# For some unknown reason every font calls it something different
+STYLES = {
+ 'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
+ 'ITALIC': ['Oblique', 'Italic'],
+ 'BOLD': ['Bold'],
+ 'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
+}
+
+# A sane default for modern systems
+DEFAULT_FONT_NAME_NIX = 'DejaVu Sans Mono'
+DEFAULT_FONT_NAME_WIN = 'Courier New'
+DEFAULT_FONT_NAME_MAC = 'Menlo'
+
+
+class PilNotAvailable(ImportError):
+ """When Python imaging library is not available"""
+
+
+class FontNotFound(Exception):
+ """When there are no usable fonts specified"""
+
+
+class FontManager:
+ """
+ Manages a set of fonts: normal, italic, bold, etc...
+ """
+
+ def __init__(self, font_name, font_size=14):
+ self.font_name = font_name
+ self.font_size = font_size
+ self.fonts = {}
+ self.encoding = None
+ if sys.platform.startswith('win'):
+ if not font_name:
+ self.font_name = DEFAULT_FONT_NAME_WIN
+ self._create_win()
+ elif sys.platform.startswith('darwin'):
+ if not font_name:
+ self.font_name = DEFAULT_FONT_NAME_MAC
+ self._create_mac()
+ else:
+ if not font_name:
+ self.font_name = DEFAULT_FONT_NAME_NIX
+ self._create_nix()
+
+ def _get_nix_font_path(self, name, style):
+ proc = subprocess.Popen(['fc-list', "%s:style=%s" % (name, style), 'file'],
+ stdout=subprocess.PIPE, stderr=None)
+ stdout, _ = proc.communicate()
+ if proc.returncode == 0:
+ lines = stdout.splitlines()
+ for line in lines:
+ if line.startswith(b'Fontconfig warning:'):
+ continue
+ path = line.decode().strip().strip(':')
+ if path:
+ return path
+ return None
+
+ def _create_nix(self):
+ for name in STYLES['NORMAL']:
+ path = self._get_nix_font_path(self.font_name, name)
+ if path is not None:
+ self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
+ break
+ else:
+ raise FontNotFound('No usable fonts named: "%s"' %
+ self.font_name)
+ for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
+ for stylename in STYLES[style]:
+ path = self._get_nix_font_path(self.font_name, stylename)
+ if path is not None:
+ self.fonts[style] = ImageFont.truetype(path, self.font_size)
+ break
+ else:
+ if style == 'BOLDITALIC':
+ self.fonts[style] = self.fonts['BOLD']
+ else:
+ self.fonts[style] = self.fonts['NORMAL']
+
+ def _get_mac_font_path(self, font_map, name, style):
+ return font_map.get((name + ' ' + style).strip().lower())
+
+ def _create_mac(self):
+ font_map = {}
+ for font_dir in (os.path.join(os.getenv("HOME"), 'Library/Fonts/'),
+ '/Library/Fonts/', '/System/Library/Fonts/'):
+ font_map.update(
+ (os.path.splitext(f)[0].lower(), os.path.join(font_dir, f))
+ for f in os.listdir(font_dir)
+ if f.lower().endswith(('ttf', 'ttc')))
+
+ for name in STYLES['NORMAL']:
+ path = self._get_mac_font_path(font_map, self.font_name, name)
+ if path is not None:
+ self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
+ break
+ else:
+ raise FontNotFound('No usable fonts named: "%s"' %
+ self.font_name)
+ for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
+ for stylename in STYLES[style]:
+ path = self._get_mac_font_path(font_map, self.font_name, stylename)
+ if path is not None:
+ self.fonts[style] = ImageFont.truetype(path, self.font_size)
+ break
+ else:
+ if style == 'BOLDITALIC':
+ self.fonts[style] = self.fonts['BOLD']
+ else:
+ self.fonts[style] = self.fonts['NORMAL']
+
+ def _lookup_win(self, key, basename, styles, fail=False):
+ for suffix in ('', ' (TrueType)'):
+ for style in styles:
+ try:
+ valname = '%s%s%s' % (basename, style and ' '+style, suffix)
+ val, _ = _winreg.QueryValueEx(key, valname)
+ return val
+ except OSError:
+ continue
+ else:
+ if fail:
+ raise FontNotFound('Font %s (%s) not found in registry' %
+ (basename, styles[0]))
+ return None
+
+ def _create_win(self):
+ lookuperror = None
+ keynames = [ (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
+ (_winreg.HKEY_CURRENT_USER, r'Software\Microsoft\Windows\CurrentVersion\Fonts'),
+ (_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows NT\CurrentVersion\Fonts'),
+ (_winreg.HKEY_LOCAL_MACHINE, r'Software\Microsoft\Windows\CurrentVersion\Fonts') ]
+ for keyname in keynames:
+ try:
+ key = _winreg.OpenKey(*keyname)
+ try:
+ path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
+ self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
+ for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
+ path = self._lookup_win(key, self.font_name, STYLES[style])
+ if path:
+ self.fonts[style] = ImageFont.truetype(path, self.font_size)
+ else:
+ if style == 'BOLDITALIC':
+ self.fonts[style] = self.fonts['BOLD']
+ else:
+ self.fonts[style] = self.fonts['NORMAL']
+ return
+ except FontNotFound as err:
+ lookuperror = err
+ finally:
+ _winreg.CloseKey(key)
+ except OSError:
+ pass
+ else:
+ # If we get here, we checked all registry keys and had no luck
+ # We can be in one of two situations now:
+ # * All key lookups failed. In this case lookuperror is None and we
+ # will raise a generic error
+ # * At least one lookup failed with a FontNotFound error. In this
+ # case, we will raise that as a more specific error
+ if lookuperror:
+ raise lookuperror
+ raise FontNotFound('Can\'t open Windows font registry key')
+
+ def get_char_size(self):
+ """
+ Get the character size.
+ """
+ return self.get_text_size('M')
+
+ def get_text_size(self, text):
+ """
+ Get the text size (width, height).
+ """
+ font = self.fonts['NORMAL']
+ if hasattr(font, 'getbbox'): # Pillow >= 9.2.0
+ return font.getbbox(text)[2:4]
+ else:
+ return font.getsize(text)
+
+ def get_font(self, bold, oblique):
+ """
+ Get the font based on bold and italic flags.
+ """
+ if bold and oblique:
+ return self.fonts['BOLDITALIC']
+ elif bold:
+ return self.fonts['BOLD']
+ elif oblique:
+ return self.fonts['ITALIC']
+ else:
+ return self.fonts['NORMAL']
+
+
+class ImageFormatter(Formatter):
+ """
+ Create a PNG image from source code. This uses the Python Imaging Library to
+ generate a pixmap from the source code.
+
+ .. versionadded:: 0.10
+
+ Additional options accepted:
+
+ `image_format`
+ An image format to output to that is recognised by PIL, these include:
+
+ * "PNG" (default)
+ * "JPEG"
+ * "BMP"
+ * "GIF"
+
+ `line_pad`
+ The extra spacing (in pixels) between each line of text.
+
+ Default: 2
+
+ `font_name`
+ The font name to be used as the base font from which others, such as
+ bold and italic fonts will be generated. This really should be a
+ monospace font to look sane.
+
+ Default: "Courier New" on Windows, "Menlo" on Mac OS, and
+ "DejaVu Sans Mono" on \\*nix
+
+ `font_size`
+ The font size in points to be used.
+
+ Default: 14
+
+ `image_pad`
+ The padding, in pixels to be used at each edge of the resulting image.
+
+ Default: 10
+
+ `line_numbers`
+ Whether line numbers should be shown: True/False
+
+ Default: True
+
+ `line_number_start`
+ The line number of the first line.
+
+ Default: 1
+
+ `line_number_step`
+ The step used when printing line numbers.
+
+ Default: 1
+
+ `line_number_bg`
+ The background colour (in "#123456" format) of the line number bar, or
+ None to use the style background color.
+
+ Default: "#eed"
+
+ `line_number_fg`
+ The text color of the line numbers (in "#123456"-like format).
+
+ Default: "#886"
+
+ `line_number_chars`
+ The number of columns of line numbers allowable in the line number
+ margin.
+
+ Default: 2
+
+ `line_number_bold`
+ Whether line numbers will be bold: True/False
+
+ Default: False
+
+ `line_number_italic`
+ Whether line numbers will be italicized: True/False
+
+ Default: False
+
+ `line_number_separator`
+ Whether a line will be drawn between the line number area and the
+ source code area: True/False
+
+ Default: True
+
+ `line_number_pad`
+ The horizontal padding (in pixels) between the line number margin, and
+ the source code area.
+
+ Default: 6
+
+ `hl_lines`
+ Specify a list of lines to be highlighted.
+
+ .. versionadded:: 1.2
+
+ Default: empty list
+
+ `hl_color`
+ Specify the color for highlighting lines.
+
+ .. versionadded:: 1.2
+
+ Default: highlight color of the selected style
+ """
+
+ # Required by the pygments mapper
+ name = 'img'
+ aliases = ['img', 'IMG', 'png']
+ filenames = ['*.png']
+
+ unicodeoutput = False
+
+ default_image_format = 'png'
+
+ def __init__(self, **options):
+ """
+ See the class docstring for explanation of options.
+ """
+ if not pil_available:
+ raise PilNotAvailable(
+ 'Python Imaging Library is required for this formatter')
+ Formatter.__init__(self, **options)
+ self.encoding = 'latin1' # let pygments.format() do the right thing
+ # Read the style
+ self.styles = dict(self.style)
+ if self.style.background_color is None:
+ self.background_color = '#fff'
+ else:
+ self.background_color = self.style.background_color
+ # Image options
+ self.image_format = get_choice_opt(
+ options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
+ self.default_image_format, normcase=True)
+ self.image_pad = get_int_opt(options, 'image_pad', 10)
+ self.line_pad = get_int_opt(options, 'line_pad', 2)
+ # The fonts
+ fontsize = get_int_opt(options, 'font_size', 14)
+ self.fonts = FontManager(options.get('font_name', ''), fontsize)
+ self.fontw, self.fonth = self.fonts.get_char_size()
+ # Line number options
+ self.line_number_fg = options.get('line_number_fg', '#886')
+ self.line_number_bg = options.get('line_number_bg', '#eed')
+ self.line_number_chars = get_int_opt(options,
+ 'line_number_chars', 2)
+ self.line_number_bold = get_bool_opt(options,
+ 'line_number_bold', False)
+ self.line_number_italic = get_bool_opt(options,
+ 'line_number_italic', False)
+ self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
+ self.line_numbers = get_bool_opt(options, 'line_numbers', True)
+ self.line_number_separator = get_bool_opt(options,
+ 'line_number_separator', True)
+ self.line_number_step = get_int_opt(options, 'line_number_step', 1)
+ self.line_number_start = get_int_opt(options, 'line_number_start', 1)
+ if self.line_numbers:
+ self.line_number_width = (self.fontw * self.line_number_chars +
+ self.line_number_pad * 2)
+ else:
+ self.line_number_width = 0
+ self.hl_lines = []
+ hl_lines_str = get_list_opt(options, 'hl_lines', [])
+ for line in hl_lines_str:
+ try:
+ self.hl_lines.append(int(line))
+ except ValueError:
+ pass
+ self.hl_color = options.get('hl_color',
+ self.style.highlight_color) or '#f90'
+ self.drawables = []
+
+ def get_style_defs(self, arg=''):
+ raise NotImplementedError('The -S option is meaningless for the image '
+ 'formatter. Use -O style=<stylename> instead.')
+
+ def _get_line_height(self):
+ """
+ Get the height of a line.
+ """
+ return self.fonth + self.line_pad
+
+ def _get_line_y(self, lineno):
+ """
+ Get the Y coordinate of a line number.
+ """
+ return lineno * self._get_line_height() + self.image_pad
+
+ def _get_char_width(self):
+ """
+ Get the width of a character.
+ """
+ return self.fontw
+
+ def _get_char_x(self, linelength):
+ """
+ Get the X coordinate of a character position.
+ """
+ return linelength + self.image_pad + self.line_number_width
+
+ def _get_text_pos(self, linelength, lineno):
+ """
+ Get the actual position for a character and line position.
+ """
+ return self._get_char_x(linelength), self._get_line_y(lineno)
+
+ def _get_linenumber_pos(self, lineno):
+ """
+ Get the actual position for the start of a line number.
+ """
+ return (self.image_pad, self._get_line_y(lineno))
+
+ def _get_text_color(self, style):
+ """
+ Get the correct color for the token from the style.
+ """
+ if style['color'] is not None:
+ fill = '#' + style['color']
+ else:
+ fill = '#000'
+ return fill
+
+ def _get_text_bg_color(self, style):
+ """
+ Get the correct background color for the token from the style.
+ """
+ if style['bgcolor'] is not None:
+ bg_color = '#' + style['bgcolor']
+ else:
+ bg_color = None
+ return bg_color
+
+ def _get_style_font(self, style):
+ """
+ Get the correct font for the style.
+ """
+ return self.fonts.get_font(style['bold'], style['italic'])
+
+ def _get_image_size(self, maxlinelength, maxlineno):
+ """
+ Get the required image size.
+ """
+ return (self._get_char_x(maxlinelength) + self.image_pad,
+ self._get_line_y(maxlineno + 0) + self.image_pad)
+
+ def _draw_linenumber(self, posno, lineno):
+ """
+ Remember a line number drawable to paint later.
+ """
+ self._draw_text(
+ self._get_linenumber_pos(posno),
+ str(lineno).rjust(self.line_number_chars),
+ font=self.fonts.get_font(self.line_number_bold,
+ self.line_number_italic),
+ text_fg=self.line_number_fg,
+ text_bg=None,
+ )
+
+ def _draw_text(self, pos, text, font, text_fg, text_bg):
+ """
+ Remember a single drawable tuple to paint later.
+ """
+ self.drawables.append((pos, text, font, text_fg, text_bg))
+
+ def _create_drawables(self, tokensource):
+ """
+ Create drawables for the token content.
+ """
+ lineno = charno = maxcharno = 0
+ maxlinelength = linelength = 0
+ for ttype, value in tokensource:
+ while ttype not in self.styles:
+ ttype = ttype.parent
+ style = self.styles[ttype]
+ # TODO: make sure tab expansion happens earlier in the chain. It
+ # really ought to be done on the input, as to do it right here is
+ # quite complex.
+ value = value.expandtabs(4)
+ lines = value.splitlines(True)
+ # print lines
+ for i, line in enumerate(lines):
+ temp = line.rstrip('\n')
+ if temp:
+ self._draw_text(
+ self._get_text_pos(linelength, lineno),
+ temp,
+ font = self._get_style_font(style),
+ text_fg = self._get_text_color(style),
+ text_bg = self._get_text_bg_color(style),
+ )
+ temp_width, _ = self.fonts.get_text_size(temp)
+ linelength += temp_width
+ maxlinelength = max(maxlinelength, linelength)
+ charno += len(temp)
+ maxcharno = max(maxcharno, charno)
+ if line.endswith('\n'):
+ # add a line for each extra line in the value
+ linelength = 0
+ charno = 0
+ lineno += 1
+ self.maxlinelength = maxlinelength
+ self.maxcharno = maxcharno
+ self.maxlineno = lineno
+
+ def _draw_line_numbers(self):
+ """
+ Create drawables for the line numbers.
+ """
+ if not self.line_numbers:
+ return
+ for p in range(self.maxlineno):
+ n = p + self.line_number_start
+ if (n % self.line_number_step) == 0:
+ self._draw_linenumber(p, n)
+
+ def _paint_line_number_bg(self, im):
+ """
+ Paint the line number background on the image.
+ """
+ if not self.line_numbers:
+ return
+ if self.line_number_fg is None:
+ return
+ draw = ImageDraw.Draw(im)
+ recth = im.size[-1]
+ rectw = self.image_pad + self.line_number_width - self.line_number_pad
+ draw.rectangle([(0, 0), (rectw, recth)],
+ fill=self.line_number_bg)
+ if self.line_number_separator:
+ draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
+ del draw
+
+ def format(self, tokensource, outfile):
+ """
+ Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
+ tuples and write it into ``outfile``.
+
+ This implementation calculates where it should draw each token on the
+ pixmap, then calculates the required pixmap size and draws the items.
+ """
+ self._create_drawables(tokensource)
+ self._draw_line_numbers()
+ im = Image.new(
+ 'RGB',
+ self._get_image_size(self.maxlinelength, self.maxlineno),
+ self.background_color
+ )
+ self._paint_line_number_bg(im)
+ draw = ImageDraw.Draw(im)
+ # Highlight
+ if self.hl_lines:
+ x = self.image_pad + self.line_number_width - self.line_number_pad + 1
+ recth = self._get_line_height()
+ rectw = im.size[0] - x
+ for linenumber in self.hl_lines:
+ y = self._get_line_y(linenumber - 1)
+ draw.rectangle([(x, y), (x + rectw, y + recth)],
+ fill=self.hl_color)
+ for pos, value, font, text_fg, text_bg in self.drawables:
+ if text_bg:
+ text_size = draw.textsize(text=value, font=font)
+ draw.rectangle([pos[0], pos[1], pos[0] + text_size[0], pos[1] + text_size[1]], fill=text_bg)
+ draw.text(pos, value, font=font, fill=text_fg)
+ im.save(outfile, self.image_format.upper())
+
+
+# Add one formatter per format, so that the "-f gif" option gives the correct result
+# when used in pygmentize.
+
+class GifImageFormatter(ImageFormatter):
+ """
+ Create a GIF image from source code. This uses the Python Imaging Library to
+ generate a pixmap from the source code.
+
+ .. versionadded:: 1.0
+ """
+
+ name = 'img_gif'
+ aliases = ['gif']
+ filenames = ['*.gif']
+ default_image_format = 'gif'
+
+
+class JpgImageFormatter(ImageFormatter):
+ """
+ Create a JPEG image from source code. This uses the Python Imaging Library to
+ generate a pixmap from the source code.
+
+ .. versionadded:: 1.0
+ """
+
+ name = 'img_jpg'
+ aliases = ['jpg', 'jpeg']
+ filenames = ['*.jpg']
+ default_image_format = 'jpeg'
+
+
+class BmpImageFormatter(ImageFormatter):
+ """
+ Create a bitmap image from source code. This uses the Python Imaging Library to
+ generate a pixmap from the source code.
+
+ .. versionadded:: 1.0
+ """
+
+ name = 'img_bmp'
+ aliases = ['bmp', 'bitmap']
+ filenames = ['*.bmp']
+ default_image_format = 'bmp'
diff --git a/third_party/python/pip/pip/_vendor/pygments/formatters/irc.py b/third_party/python/pip/pip/_vendor/pygments/formatters/irc.py
new file mode 100644
index 0000000000..3f6d52deb4
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/formatters/irc.py
@@ -0,0 +1,179 @@
+"""
+ pygments.formatters.irc
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Formatter for IRC output
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pip._vendor.pygments.formatter import Formatter
+from pip._vendor.pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Token, Whitespace
+from pip._vendor.pygments.util import get_choice_opt
+
+
+__all__ = ['IRCFormatter']
+
+
+#: Map token types to a tuple of color values for light and dark
+#: backgrounds.
+IRC_COLORS = {
+ Token: ('', ''),
+
+ Whitespace: ('gray', 'brightblack'),
+ Comment: ('gray', 'brightblack'),
+ Comment.Preproc: ('cyan', 'brightcyan'),
+ Keyword: ('blue', 'brightblue'),
+ Keyword.Type: ('cyan', 'brightcyan'),
+ Operator.Word: ('magenta', 'brightcyan'),
+ Name.Builtin: ('cyan', 'brightcyan'),
+ Name.Function: ('green', 'brightgreen'),
+ Name.Namespace: ('_cyan_', '_brightcyan_'),
+ Name.Class: ('_green_', '_brightgreen_'),
+ Name.Exception: ('cyan', 'brightcyan'),
+ Name.Decorator: ('brightblack', 'gray'),
+ Name.Variable: ('red', 'brightred'),
+ Name.Constant: ('red', 'brightred'),
+ Name.Attribute: ('cyan', 'brightcyan'),
+ Name.Tag: ('brightblue', 'brightblue'),
+ String: ('yellow', 'yellow'),
+ Number: ('blue', 'brightblue'),
+
+ Generic.Deleted: ('brightred', 'brightred'),
+ Generic.Inserted: ('green', 'brightgreen'),
+ Generic.Heading: ('**', '**'),
+ Generic.Subheading: ('*magenta*', '*brightmagenta*'),
+ Generic.Error: ('brightred', 'brightred'),
+
+ Error: ('_brightred_', '_brightred_'),
+}
+
+
+IRC_COLOR_MAP = {
+ 'white': 0,
+ 'black': 1,
+ 'blue': 2,
+ 'brightgreen': 3,
+ 'brightred': 4,
+ 'yellow': 5,
+ 'magenta': 6,
+ 'orange': 7,
+ 'green': 7, #compat w/ ansi
+ 'brightyellow': 8,
+ 'lightgreen': 9,
+ 'brightcyan': 9, # compat w/ ansi
+ 'cyan': 10,
+ 'lightblue': 11,
+ 'red': 11, # compat w/ ansi
+ 'brightblue': 12,
+ 'brightmagenta': 13,
+ 'brightblack': 14,
+ 'gray': 15,
+}
+
+def ircformat(color, text):
+ if len(color) < 1:
+ return text
+ add = sub = ''
+ if '_' in color: # italic
+ add += '\x1D'
+ sub = '\x1D' + sub
+ color = color.strip('_')
+ if '*' in color: # bold
+ add += '\x02'
+ sub = '\x02' + sub
+ color = color.strip('*')
+ # underline (\x1F) not supported
+ # backgrounds (\x03FF,BB) not supported
+ if len(color) > 0: # actual color - may have issues with ircformat("red", "blah")+"10" type stuff
+ add += '\x03' + str(IRC_COLOR_MAP[color]).zfill(2)
+ sub = '\x03' + sub
+ return add + text + sub
+ return '<'+add+'>'+text+'</'+sub+'>'
+
+
+class IRCFormatter(Formatter):
+ r"""
+ Format tokens with IRC color sequences
+
+ The `get_style_defs()` method doesn't do anything special since there is
+ no support for common styles.
+
+ Options accepted:
+
+ `bg`
+ Set to ``"light"`` or ``"dark"`` depending on the terminal's background
+ (default: ``"light"``).
+
+ `colorscheme`
+ A dictionary mapping token types to (lightbg, darkbg) color names or
+ ``None`` (default: ``None`` = use builtin colorscheme).
+
+ `linenos`
+ Set to ``True`` to have line numbers in the output as well
+ (default: ``False`` = no line numbers).
+ """
+ name = 'IRC'
+ aliases = ['irc', 'IRC']
+ filenames = []
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+ self.darkbg = get_choice_opt(options, 'bg',
+ ['light', 'dark'], 'light') == 'dark'
+ self.colorscheme = options.get('colorscheme', None) or IRC_COLORS
+ self.linenos = options.get('linenos', False)
+ self._lineno = 0
+
+ def _write_lineno(self, outfile):
+ self._lineno += 1
+ outfile.write("\n%04d: " % self._lineno)
+
+ def _format_unencoded_with_lineno(self, tokensource, outfile):
+ self._write_lineno(outfile)
+
+ for ttype, value in tokensource:
+ if value.endswith("\n"):
+ self._write_lineno(outfile)
+ value = value[:-1]
+ color = self.colorscheme.get(ttype)
+ while color is None:
+ ttype = ttype.parent
+ color = self.colorscheme.get(ttype)
+ if color:
+ color = color[self.darkbg]
+ spl = value.split('\n')
+ for line in spl[:-1]:
+ self._write_lineno(outfile)
+ if line:
+ outfile.write(ircformat(color, line[:-1]))
+ if spl[-1]:
+ outfile.write(ircformat(color, spl[-1]))
+ else:
+ outfile.write(value)
+
+ outfile.write("\n")
+
+ def format_unencoded(self, tokensource, outfile):
+ if self.linenos:
+ self._format_unencoded_with_lineno(tokensource, outfile)
+ return
+
+ for ttype, value in tokensource:
+ color = self.colorscheme.get(ttype)
+ while color is None:
+ ttype = ttype[:-1]
+ color = self.colorscheme.get(ttype)
+ if color:
+ color = color[self.darkbg]
+ spl = value.split('\n')
+ for line in spl[:-1]:
+ if line:
+ outfile.write(ircformat(color, line))
+ outfile.write('\n')
+ if spl[-1]:
+ outfile.write(ircformat(color, spl[-1]))
+ else:
+ outfile.write(value)
diff --git a/third_party/python/pip/pip/_vendor/pygments/formatters/latex.py b/third_party/python/pip/pip/_vendor/pygments/formatters/latex.py
new file mode 100644
index 0000000000..4a7375a5ce
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/formatters/latex.py
@@ -0,0 +1,521 @@
+"""
+ pygments.formatters.latex
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Formatter for LaTeX fancyvrb output.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from io import StringIO
+
+from pip._vendor.pygments.formatter import Formatter
+from pip._vendor.pygments.lexer import Lexer, do_insertions
+from pip._vendor.pygments.token import Token, STANDARD_TYPES
+from pip._vendor.pygments.util import get_bool_opt, get_int_opt
+
+
+__all__ = ['LatexFormatter']
+
+
+def escape_tex(text, commandprefix):
+ return text.replace('\\', '\x00'). \
+ replace('{', '\x01'). \
+ replace('}', '\x02'). \
+ replace('\x00', r'\%sZbs{}' % commandprefix). \
+ replace('\x01', r'\%sZob{}' % commandprefix). \
+ replace('\x02', r'\%sZcb{}' % commandprefix). \
+ replace('^', r'\%sZca{}' % commandprefix). \
+ replace('_', r'\%sZus{}' % commandprefix). \
+ replace('&', r'\%sZam{}' % commandprefix). \
+ replace('<', r'\%sZlt{}' % commandprefix). \
+ replace('>', r'\%sZgt{}' % commandprefix). \
+ replace('#', r'\%sZsh{}' % commandprefix). \
+ replace('%', r'\%sZpc{}' % commandprefix). \
+ replace('$', r'\%sZdl{}' % commandprefix). \
+ replace('-', r'\%sZhy{}' % commandprefix). \
+ replace("'", r'\%sZsq{}' % commandprefix). \
+ replace('"', r'\%sZdq{}' % commandprefix). \
+ replace('~', r'\%sZti{}' % commandprefix)
+
+
+DOC_TEMPLATE = r'''
+\documentclass{%(docclass)s}
+\usepackage{fancyvrb}
+\usepackage{color}
+\usepackage[%(encoding)s]{inputenc}
+%(preamble)s
+
+%(styledefs)s
+
+\begin{document}
+
+\section*{%(title)s}
+
+%(code)s
+\end{document}
+'''
+
+## Small explanation of the mess below :)
+#
+# The previous version of the LaTeX formatter just assigned a command to
+# each token type defined in the current style. That obviously is
+# problematic if the highlighted code is produced for a different style
+# than the style commands themselves.
+#
+# This version works much like the HTML formatter which assigns multiple
+# CSS classes to each <span> tag, from the most specific to the least
+# specific token type, thus falling back to the parent token type if one
+# is not defined. Here, the classes are there too and use the same short
+# forms given in token.STANDARD_TYPES.
+#
+# Highlighted code now only uses one custom command, which by default is
+# \PY and selectable by the commandprefix option (and in addition the
+# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
+# backwards compatibility purposes).
+#
+# \PY has two arguments: the classes, separated by +, and the text to
+# render in that style. The classes are resolved into the respective
+# style commands by magic, which serves to ignore unknown classes.
+#
+# The magic macros are:
+# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
+# to render in \PY@do. Their definition determines the style.
+# * \PY@reset resets \PY@it etc. to do nothing.
+# * \PY@toks parses the list of classes, using magic inspired by the
+# keyval package (but modified to use plusses instead of commas
+# because fancyvrb redefines commas inside its environments).
+# * \PY@tok processes one class, calling the \PY@tok@classname command
+# if it exists.
+# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
+# for its class.
+# * \PY resets the style, parses the classnames and then calls \PY@do.
+#
+# Tip: to read this code, print it out in substituted form using e.g.
+# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
+
+STYLE_TEMPLATE = r'''
+\makeatletter
+\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
+ \let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
+ \let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
+\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
+\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
+ \%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
+\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
+ \%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
+\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
+
+%(styles)s
+
+\def\%(cp)sZbs{\char`\\}
+\def\%(cp)sZus{\char`\_}
+\def\%(cp)sZob{\char`\{}
+\def\%(cp)sZcb{\char`\}}
+\def\%(cp)sZca{\char`\^}
+\def\%(cp)sZam{\char`\&}
+\def\%(cp)sZlt{\char`\<}
+\def\%(cp)sZgt{\char`\>}
+\def\%(cp)sZsh{\char`\#}
+\def\%(cp)sZpc{\char`\%%}
+\def\%(cp)sZdl{\char`\$}
+\def\%(cp)sZhy{\char`\-}
+\def\%(cp)sZsq{\char`\'}
+\def\%(cp)sZdq{\char`\"}
+\def\%(cp)sZti{\char`\~}
+%% for compatibility with earlier versions
+\def\%(cp)sZat{@}
+\def\%(cp)sZlb{[}
+\def\%(cp)sZrb{]}
+\makeatother
+'''
+
+
+def _get_ttype_name(ttype):
+ fname = STANDARD_TYPES.get(ttype)
+ if fname:
+ return fname
+ aname = ''
+ while fname is None:
+ aname = ttype[-1] + aname
+ ttype = ttype.parent
+ fname = STANDARD_TYPES.get(ttype)
+ return fname + aname
+
+
+class LatexFormatter(Formatter):
+ r"""
+ Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
+ standard packages.
+
+ Without the `full` option, code is formatted as one ``Verbatim``
+ environment, like this:
+
+ .. sourcecode:: latex
+
+ \begin{Verbatim}[commandchars=\\\{\}]
+ \PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
+ \PY{k}{pass}
+ \end{Verbatim}
+
+ Wrapping can be disabled using the `nowrap` option.
+
+ The special command used here (``\PY``) and all the other macros it needs
+ are output by the `get_style_defs` method.
+
+ With the `full` option, a complete LaTeX document is output, including
+ the command definitions in the preamble.
+
+ The `get_style_defs()` method of a `LatexFormatter` returns a string
+ containing ``\def`` commands defining the macros needed inside the
+ ``Verbatim`` environments.
+
+ Additional options accepted:
+
+ `nowrap`
+ If set to ``True``, don't wrap the tokens at all, not even inside a
+ ``\begin{Verbatim}`` environment. This disables most other options
+ (default: ``False``).
+
+ `style`
+ The style to use, can be a string or a Style subclass (default:
+ ``'default'``).
+
+ `full`
+ Tells the formatter to output a "full" document, i.e. a complete
+ self-contained document (default: ``False``).
+
+ `title`
+ If `full` is true, the title that should be used to caption the
+ document (default: ``''``).
+
+ `docclass`
+ If the `full` option is enabled, this is the document class to use
+ (default: ``'article'``).
+
+ `preamble`
+ If the `full` option is enabled, this can be further preamble commands,
+ e.g. ``\usepackage`` (default: ``''``).
+
+ `linenos`
+ If set to ``True``, output line numbers (default: ``False``).
+
+ `linenostart`
+ The line number for the first line (default: ``1``).
+
+ `linenostep`
+ If set to a number n > 1, only every nth line number is printed.
+
+ `verboptions`
+ Additional options given to the Verbatim environment (see the *fancyvrb*
+ docs for possible values) (default: ``''``).
+
+ `commandprefix`
+ The LaTeX commands used to produce colored output are constructed
+ using this prefix and some letters (default: ``'PY'``).
+
+ .. versionadded:: 0.7
+ .. versionchanged:: 0.10
+ The default is now ``'PY'`` instead of ``'C'``.
+
+ `texcomments`
+ If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
+ in comment tokens is not escaped so that LaTeX can render it (default:
+ ``False``).
+
+ .. versionadded:: 1.2
+
+ `mathescape`
+ If set to ``True``, enables LaTeX math mode escape in comments. That
+ is, ``'$...$'`` inside a comment will trigger math mode (default:
+ ``False``).
+
+ .. versionadded:: 1.2
+
+ `escapeinside`
+ If set to a string of length 2, enables escaping to LaTeX. Text
+ delimited by these 2 characters is read as LaTeX code and
+ typeset accordingly. It has no effect in string literals. It has
+ no effect in comments if `texcomments` or `mathescape` is
+ set. (default: ``''``).
+
+ .. versionadded:: 2.0
+
+ `envname`
+ Allows you to pick an alternative environment name replacing Verbatim.
+ The alternate environment still has to support Verbatim's option syntax.
+ (default: ``'Verbatim'``).
+
+ .. versionadded:: 2.0
+ """
+ name = 'LaTeX'
+ aliases = ['latex', 'tex']
+ filenames = ['*.tex']
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+ self.nowrap = get_bool_opt(options, 'nowrap', False)
+ self.docclass = options.get('docclass', 'article')
+ self.preamble = options.get('preamble', '')
+ self.linenos = get_bool_opt(options, 'linenos', False)
+ self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
+ self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
+ self.verboptions = options.get('verboptions', '')
+ self.nobackground = get_bool_opt(options, 'nobackground', False)
+ self.commandprefix = options.get('commandprefix', 'PY')
+ self.texcomments = get_bool_opt(options, 'texcomments', False)
+ self.mathescape = get_bool_opt(options, 'mathescape', False)
+ self.escapeinside = options.get('escapeinside', '')
+ if len(self.escapeinside) == 2:
+ self.left = self.escapeinside[0]
+ self.right = self.escapeinside[1]
+ else:
+ self.escapeinside = ''
+ self.envname = options.get('envname', 'Verbatim')
+
+ self._create_stylesheet()
+
+ def _create_stylesheet(self):
+ t2n = self.ttype2name = {Token: ''}
+ c2d = self.cmd2def = {}
+ cp = self.commandprefix
+
+ def rgbcolor(col):
+ if col:
+ return ','.join(['%.2f' % (int(col[i] + col[i + 1], 16) / 255.0)
+ for i in (0, 2, 4)])
+ else:
+ return '1,1,1'
+
+ for ttype, ndef in self.style:
+ name = _get_ttype_name(ttype)
+ cmndef = ''
+ if ndef['bold']:
+ cmndef += r'\let\$$@bf=\textbf'
+ if ndef['italic']:
+ cmndef += r'\let\$$@it=\textit'
+ if ndef['underline']:
+ cmndef += r'\let\$$@ul=\underline'
+ if ndef['roman']:
+ cmndef += r'\let\$$@ff=\textrm'
+ if ndef['sans']:
+ cmndef += r'\let\$$@ff=\textsf'
+ if ndef['mono']:
+ cmndef += r'\let\$$@ff=\textsf'
+ if ndef['color']:
+ cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
+ rgbcolor(ndef['color']))
+ if ndef['border']:
+ cmndef += (r'\def\$$@bc##1{{\setlength{\fboxsep}{\string -\fboxrule}'
+ r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}}' %
+ (rgbcolor(ndef['border']),
+ rgbcolor(ndef['bgcolor'])))
+ elif ndef['bgcolor']:
+ cmndef += (r'\def\$$@bc##1{{\setlength{\fboxsep}{0pt}'
+ r'\colorbox[rgb]{%s}{\strut ##1}}}' %
+ rgbcolor(ndef['bgcolor']))
+ if cmndef == '':
+ continue
+ cmndef = cmndef.replace('$$', cp)
+ t2n[ttype] = name
+ c2d[name] = cmndef
+
+ def get_style_defs(self, arg=''):
+ """
+ Return the command sequences needed to define the commands
+ used to format text in the verbatim environment. ``arg`` is ignored.
+ """
+ cp = self.commandprefix
+ styles = []
+ for name, definition in self.cmd2def.items():
+ styles.append(r'\@namedef{%s@tok@%s}{%s}' % (cp, name, definition))
+ return STYLE_TEMPLATE % {'cp': self.commandprefix,
+ 'styles': '\n'.join(styles)}
+
+ def format_unencoded(self, tokensource, outfile):
+ # TODO: add support for background colors
+ t2n = self.ttype2name
+ cp = self.commandprefix
+
+ if self.full:
+ realoutfile = outfile
+ outfile = StringIO()
+
+ if not self.nowrap:
+ outfile.write('\\begin{' + self.envname + '}[commandchars=\\\\\\{\\}')
+ if self.linenos:
+ start, step = self.linenostart, self.linenostep
+ outfile.write(',numbers=left' +
+ (start and ',firstnumber=%d' % start or '') +
+ (step and ',stepnumber=%d' % step or ''))
+ if self.mathescape or self.texcomments or self.escapeinside:
+ outfile.write(',codes={\\catcode`\\$=3\\catcode`\\^=7'
+ '\\catcode`\\_=8\\relax}')
+ if self.verboptions:
+ outfile.write(',' + self.verboptions)
+ outfile.write(']\n')
+
+ for ttype, value in tokensource:
+ if ttype in Token.Comment:
+ if self.texcomments:
+ # Try to guess comment starting lexeme and escape it ...
+ start = value[0:1]
+ for i in range(1, len(value)):
+ if start[0] != value[i]:
+ break
+ start += value[i]
+
+ value = value[len(start):]
+ start = escape_tex(start, cp)
+
+ # ... but do not escape inside comment.
+ value = start + value
+ elif self.mathescape:
+ # Only escape parts not inside a math environment.
+ parts = value.split('$')
+ in_math = False
+ for i, part in enumerate(parts):
+ if not in_math:
+ parts[i] = escape_tex(part, cp)
+ in_math = not in_math
+ value = '$'.join(parts)
+ elif self.escapeinside:
+ text = value
+ value = ''
+ while text:
+ a, sep1, text = text.partition(self.left)
+ if sep1:
+ b, sep2, text = text.partition(self.right)
+ if sep2:
+ value += escape_tex(a, cp) + b
+ else:
+ value += escape_tex(a + sep1 + b, cp)
+ else:
+ value += escape_tex(a, cp)
+ else:
+ value = escape_tex(value, cp)
+ elif ttype not in Token.Escape:
+ value = escape_tex(value, cp)
+ styles = []
+ while ttype is not Token:
+ try:
+ styles.append(t2n[ttype])
+ except KeyError:
+ # not in current style
+ styles.append(_get_ttype_name(ttype))
+ ttype = ttype.parent
+ styleval = '+'.join(reversed(styles))
+ if styleval:
+ spl = value.split('\n')
+ for line in spl[:-1]:
+ if line:
+ outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
+ outfile.write('\n')
+ if spl[-1]:
+ outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
+ else:
+ outfile.write(value)
+
+ if not self.nowrap:
+ outfile.write('\\end{' + self.envname + '}\n')
+
+ if self.full:
+ encoding = self.encoding or 'utf8'
+ # map known existings encodings from LaTeX distribution
+ encoding = {
+ 'utf_8': 'utf8',
+ 'latin_1': 'latin1',
+ 'iso_8859_1': 'latin1',
+ }.get(encoding.replace('-', '_'), encoding)
+ realoutfile.write(DOC_TEMPLATE %
+ dict(docclass = self.docclass,
+ preamble = self.preamble,
+ title = self.title,
+ encoding = encoding,
+ styledefs = self.get_style_defs(),
+ code = outfile.getvalue()))
+
+
+class LatexEmbeddedLexer(Lexer):
+ """
+ This lexer takes one lexer as argument, the lexer for the language
+ being formatted, and the left and right delimiters for escaped text.
+
+ First everything is scanned using the language lexer to obtain
+ strings and comments. All other consecutive tokens are merged and
+ the resulting text is scanned for escaped segments, which are given
+ the Token.Escape type. Finally text that is not escaped is scanned
+ again with the language lexer.
+ """
+ def __init__(self, left, right, lang, **options):
+ self.left = left
+ self.right = right
+ self.lang = lang
+ Lexer.__init__(self, **options)
+
+ def get_tokens_unprocessed(self, text):
+ # find and remove all the escape tokens (replace with an empty string)
+ # this is very similar to DelegatingLexer.get_tokens_unprocessed.
+ buffered = ''
+ insertions = []
+ insertion_buf = []
+ for i, t, v in self._find_safe_escape_tokens(text):
+ if t is None:
+ if insertion_buf:
+ insertions.append((len(buffered), insertion_buf))
+ insertion_buf = []
+ buffered += v
+ else:
+ insertion_buf.append((i, t, v))
+ if insertion_buf:
+ insertions.append((len(buffered), insertion_buf))
+ return do_insertions(insertions,
+ self.lang.get_tokens_unprocessed(buffered))
+
+ def _find_safe_escape_tokens(self, text):
+ """ find escape tokens that are not in strings or comments """
+ for i, t, v in self._filter_to(
+ self.lang.get_tokens_unprocessed(text),
+ lambda t: t in Token.Comment or t in Token.String
+ ):
+ if t is None:
+ for i2, t2, v2 in self._find_escape_tokens(v):
+ yield i + i2, t2, v2
+ else:
+ yield i, None, v
+
+ def _filter_to(self, it, pred):
+ """ Keep only the tokens that match `pred`, merge the others together """
+ buf = ''
+ idx = 0
+ for i, t, v in it:
+ if pred(t):
+ if buf:
+ yield idx, None, buf
+ buf = ''
+ yield i, t, v
+ else:
+ if not buf:
+ idx = i
+ buf += v
+ if buf:
+ yield idx, None, buf
+
+ def _find_escape_tokens(self, text):
+ """ Find escape tokens within text, give token=None otherwise """
+ index = 0
+ while text:
+ a, sep1, text = text.partition(self.left)
+ if a:
+ yield index, None, a
+ index += len(a)
+ if sep1:
+ b, sep2, text = text.partition(self.right)
+ if sep2:
+ yield index + len(sep1), Token.Escape, b
+ index += len(sep1) + len(b) + len(sep2)
+ else:
+ yield index, Token.Error, sep1
+ index += len(sep1)
+ text = b
diff --git a/third_party/python/pip/pip/_vendor/pygments/formatters/other.py b/third_party/python/pip/pip/_vendor/pygments/formatters/other.py
new file mode 100644
index 0000000000..1e39cd42a8
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/formatters/other.py
@@ -0,0 +1,161 @@
+"""
+ pygments.formatters.other
+ ~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Other formatters: NullFormatter, RawTokenFormatter.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pip._vendor.pygments.formatter import Formatter
+from pip._vendor.pygments.util import get_choice_opt
+from pip._vendor.pygments.token import Token
+from pip._vendor.pygments.console import colorize
+
+__all__ = ['NullFormatter', 'RawTokenFormatter', 'TestcaseFormatter']
+
+
+class NullFormatter(Formatter):
+ """
+ Output the text unchanged without any formatting.
+ """
+ name = 'Text only'
+ aliases = ['text', 'null']
+ filenames = ['*.txt']
+
+ def format(self, tokensource, outfile):
+ enc = self.encoding
+ for ttype, value in tokensource:
+ if enc:
+ outfile.write(value.encode(enc))
+ else:
+ outfile.write(value)
+
+
+class RawTokenFormatter(Formatter):
+ r"""
+ Format tokens as a raw representation for storing token streams.
+
+ The format is ``tokentype<TAB>repr(tokenstring)\n``. The output can later
+ be converted to a token stream with the `RawTokenLexer`, described in the
+ :doc:`lexer list <lexers>`.
+
+ Only two options are accepted:
+
+ `compress`
+ If set to ``'gz'`` or ``'bz2'``, compress the output with the given
+ compression algorithm after encoding (default: ``''``).
+ `error_color`
+ If set to a color name, highlight error tokens using that color. If
+ set but with no value, defaults to ``'red'``.
+
+ .. versionadded:: 0.11
+
+ """
+ name = 'Raw tokens'
+ aliases = ['raw', 'tokens']
+ filenames = ['*.raw']
+
+ unicodeoutput = False
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+ # We ignore self.encoding if it is set, since it gets set for lexer
+ # and formatter if given with -Oencoding on the command line.
+ # The RawTokenFormatter outputs only ASCII. Override here.
+ self.encoding = 'ascii' # let pygments.format() do the right thing
+ self.compress = get_choice_opt(options, 'compress',
+ ['', 'none', 'gz', 'bz2'], '')
+ self.error_color = options.get('error_color', None)
+ if self.error_color is True:
+ self.error_color = 'red'
+ if self.error_color is not None:
+ try:
+ colorize(self.error_color, '')
+ except KeyError:
+ raise ValueError("Invalid color %r specified" %
+ self.error_color)
+
+ def format(self, tokensource, outfile):
+ try:
+ outfile.write(b'')
+ except TypeError:
+ raise TypeError('The raw tokens formatter needs a binary '
+ 'output file')
+ if self.compress == 'gz':
+ import gzip
+ outfile = gzip.GzipFile('', 'wb', 9, outfile)
+
+ write = outfile.write
+ flush = outfile.close
+ elif self.compress == 'bz2':
+ import bz2
+ compressor = bz2.BZ2Compressor(9)
+
+ def write(text):
+ outfile.write(compressor.compress(text))
+
+ def flush():
+ outfile.write(compressor.flush())
+ outfile.flush()
+ else:
+ write = outfile.write
+ flush = outfile.flush
+
+ if self.error_color:
+ for ttype, value in tokensource:
+ line = b"%r\t%r\n" % (ttype, value)
+ if ttype is Token.Error:
+ write(colorize(self.error_color, line))
+ else:
+ write(line)
+ else:
+ for ttype, value in tokensource:
+ write(b"%r\t%r\n" % (ttype, value))
+ flush()
+
+
+TESTCASE_BEFORE = '''\
+ def testNeedsName(lexer):
+ fragment = %r
+ tokens = [
+'''
+TESTCASE_AFTER = '''\
+ ]
+ assert list(lexer.get_tokens(fragment)) == tokens
+'''
+
+
+class TestcaseFormatter(Formatter):
+ """
+ Format tokens as appropriate for a new testcase.
+
+ .. versionadded:: 2.0
+ """
+ name = 'Testcase'
+ aliases = ['testcase']
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+ if self.encoding is not None and self.encoding != 'utf-8':
+ raise ValueError("Only None and utf-8 are allowed encodings.")
+
+ def format(self, tokensource, outfile):
+ indentation = ' ' * 12
+ rawbuf = []
+ outbuf = []
+ for ttype, value in tokensource:
+ rawbuf.append(value)
+ outbuf.append('%s(%s, %r),\n' % (indentation, ttype, value))
+
+ before = TESTCASE_BEFORE % (''.join(rawbuf),)
+ during = ''.join(outbuf)
+ after = TESTCASE_AFTER
+ if self.encoding is None:
+ outfile.write(before + during + after)
+ else:
+ outfile.write(before.encode('utf-8'))
+ outfile.write(during.encode('utf-8'))
+ outfile.write(after.encode('utf-8'))
+ outfile.flush()
diff --git a/third_party/python/pip/pip/_vendor/pygments/formatters/pangomarkup.py b/third_party/python/pip/pip/_vendor/pygments/formatters/pangomarkup.py
new file mode 100644
index 0000000000..bd00866b8b
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/formatters/pangomarkup.py
@@ -0,0 +1,83 @@
+"""
+ pygments.formatters.pangomarkup
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Formatter for Pango markup output.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pip._vendor.pygments.formatter import Formatter
+
+
+__all__ = ['PangoMarkupFormatter']
+
+
+_escape_table = {
+ ord('&'): '&amp;',
+ ord('<'): '&lt;',
+}
+
+
+def escape_special_chars(text, table=_escape_table):
+ """Escape & and < for Pango Markup."""
+ return text.translate(table)
+
+
+class PangoMarkupFormatter(Formatter):
+ """
+ Format tokens as Pango Markup code. It can then be rendered to an SVG.
+
+ .. versionadded:: 2.9
+ """
+
+ name = 'Pango Markup'
+ aliases = ['pango', 'pangomarkup']
+ filenames = []
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+
+ self.styles = {}
+
+ for token, style in self.style:
+ start = ''
+ end = ''
+ if style['color']:
+ start += '<span fgcolor="#%s">' % style['color']
+ end = '</span>' + end
+ if style['bold']:
+ start += '<b>'
+ end = '</b>' + end
+ if style['italic']:
+ start += '<i>'
+ end = '</i>' + end
+ if style['underline']:
+ start += '<u>'
+ end = '</u>' + end
+ self.styles[token] = (start, end)
+
+ def format_unencoded(self, tokensource, outfile):
+ lastval = ''
+ lasttype = None
+
+ outfile.write('<tt>')
+
+ for ttype, value in tokensource:
+ while ttype not in self.styles:
+ ttype = ttype.parent
+ if ttype == lasttype:
+ lastval += escape_special_chars(value)
+ else:
+ if lastval:
+ stylebegin, styleend = self.styles[lasttype]
+ outfile.write(stylebegin + lastval + styleend)
+ lastval = escape_special_chars(value)
+ lasttype = ttype
+
+ if lastval:
+ stylebegin, styleend = self.styles[lasttype]
+ outfile.write(stylebegin + lastval + styleend)
+
+ outfile.write('</tt>')
diff --git a/third_party/python/pip/pip/_vendor/pygments/formatters/rtf.py b/third_party/python/pip/pip/_vendor/pygments/formatters/rtf.py
new file mode 100644
index 0000000000..4114d1688c
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/formatters/rtf.py
@@ -0,0 +1,146 @@
+"""
+ pygments.formatters.rtf
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ A formatter that generates RTF files.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pip._vendor.pygments.formatter import Formatter
+from pip._vendor.pygments.util import get_int_opt, surrogatepair
+
+
+__all__ = ['RtfFormatter']
+
+
+class RtfFormatter(Formatter):
+ """
+ Format tokens as RTF markup. This formatter automatically outputs full RTF
+ documents with color information and other useful stuff. Perfect for Copy and
+ Paste into Microsoft(R) Word(R) documents.
+
+ Please note that ``encoding`` and ``outencoding`` options are ignored.
+ The RTF format is ASCII natively, but handles unicode characters correctly
+ thanks to escape sequences.
+
+ .. versionadded:: 0.6
+
+ Additional options accepted:
+
+ `style`
+ The style to use, can be a string or a Style subclass (default:
+ ``'default'``).
+
+ `fontface`
+ The used font family, for example ``Bitstream Vera Sans``. Defaults to
+ some generic font which is supposed to have fixed width.
+
+ `fontsize`
+ Size of the font used. Size is specified in half points. The
+ default is 24 half-points, giving a size 12 font.
+
+ .. versionadded:: 2.0
+ """
+ name = 'RTF'
+ aliases = ['rtf']
+ filenames = ['*.rtf']
+
+ def __init__(self, **options):
+ r"""
+ Additional options accepted:
+
+ ``fontface``
+ Name of the font used. Could for example be ``'Courier New'``
+ to further specify the default which is ``'\fmodern'``. The RTF
+ specification claims that ``\fmodern`` are "Fixed-pitch serif
+ and sans serif fonts". Hope every RTF implementation thinks
+ the same about modern...
+
+ """
+ Formatter.__init__(self, **options)
+ self.fontface = options.get('fontface') or ''
+ self.fontsize = get_int_opt(options, 'fontsize', 0)
+
+ def _escape(self, text):
+ return text.replace('\\', '\\\\') \
+ .replace('{', '\\{') \
+ .replace('}', '\\}')
+
+ def _escape_text(self, text):
+ # empty strings, should give a small performance improvement
+ if not text:
+ return ''
+
+ # escape text
+ text = self._escape(text)
+
+ buf = []
+ for c in text:
+ cn = ord(c)
+ if cn < (2**7):
+ # ASCII character
+ buf.append(str(c))
+ elif (2**7) <= cn < (2**16):
+ # single unicode escape sequence
+ buf.append('{\\u%d}' % cn)
+ elif (2**16) <= cn:
+ # RTF limits unicode to 16 bits.
+ # Force surrogate pairs
+ buf.append('{\\u%d}{\\u%d}' % surrogatepair(cn))
+
+ return ''.join(buf).replace('\n', '\\par\n')
+
+ def format_unencoded(self, tokensource, outfile):
+ # rtf 1.8 header
+ outfile.write('{\\rtf1\\ansi\\uc0\\deff0'
+ '{\\fonttbl{\\f0\\fmodern\\fprq1\\fcharset0%s;}}'
+ '{\\colortbl;' % (self.fontface and
+ ' ' + self._escape(self.fontface) or
+ ''))
+
+ # convert colors and save them in a mapping to access them later.
+ color_mapping = {}
+ offset = 1
+ for _, style in self.style:
+ for color in style['color'], style['bgcolor'], style['border']:
+ if color and color not in color_mapping:
+ color_mapping[color] = offset
+ outfile.write('\\red%d\\green%d\\blue%d;' % (
+ int(color[0:2], 16),
+ int(color[2:4], 16),
+ int(color[4:6], 16)
+ ))
+ offset += 1
+ outfile.write('}\\f0 ')
+ if self.fontsize:
+ outfile.write('\\fs%d' % self.fontsize)
+
+ # highlight stream
+ for ttype, value in tokensource:
+ while not self.style.styles_token(ttype) and ttype.parent:
+ ttype = ttype.parent
+ style = self.style.style_for_token(ttype)
+ buf = []
+ if style['bgcolor']:
+ buf.append('\\cb%d' % color_mapping[style['bgcolor']])
+ if style['color']:
+ buf.append('\\cf%d' % color_mapping[style['color']])
+ if style['bold']:
+ buf.append('\\b')
+ if style['italic']:
+ buf.append('\\i')
+ if style['underline']:
+ buf.append('\\ul')
+ if style['border']:
+ buf.append('\\chbrdr\\chcfpat%d' %
+ color_mapping[style['border']])
+ start = ''.join(buf)
+ if start:
+ outfile.write('{%s ' % start)
+ outfile.write(self._escape_text(value))
+ if start:
+ outfile.write('}')
+
+ outfile.write('}')
diff --git a/third_party/python/pip/pip/_vendor/pygments/formatters/svg.py b/third_party/python/pip/pip/_vendor/pygments/formatters/svg.py
new file mode 100644
index 0000000000..075150a4b5
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/formatters/svg.py
@@ -0,0 +1,188 @@
+"""
+ pygments.formatters.svg
+ ~~~~~~~~~~~~~~~~~~~~~~~
+
+ Formatter for SVG output.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pip._vendor.pygments.formatter import Formatter
+from pip._vendor.pygments.token import Comment
+from pip._vendor.pygments.util import get_bool_opt, get_int_opt
+
+__all__ = ['SvgFormatter']
+
+
+def escape_html(text):
+ """Escape &, <, > as well as single and double quotes for HTML."""
+ return text.replace('&', '&amp;'). \
+ replace('<', '&lt;'). \
+ replace('>', '&gt;'). \
+ replace('"', '&quot;'). \
+ replace("'", '&#39;')
+
+
+class2style = {}
+
+class SvgFormatter(Formatter):
+ """
+ Format tokens as an SVG graphics file. This formatter is still experimental.
+ Each line of code is a ``<text>`` element with explicit ``x`` and ``y``
+ coordinates containing ``<tspan>`` elements with the individual token styles.
+
+ By default, this formatter outputs a full SVG document including doctype
+ declaration and the ``<svg>`` root element.
+
+ .. versionadded:: 0.9
+
+ Additional options accepted:
+
+ `nowrap`
+ Don't wrap the SVG ``<text>`` elements in ``<svg><g>`` elements and
+ don't add a XML declaration and a doctype. If true, the `fontfamily`
+ and `fontsize` options are ignored. Defaults to ``False``.
+
+ `fontfamily`
+ The value to give the wrapping ``<g>`` element's ``font-family``
+ attribute, defaults to ``"monospace"``.
+
+ `fontsize`
+ The value to give the wrapping ``<g>`` element's ``font-size``
+ attribute, defaults to ``"14px"``.
+
+ `linenos`
+ If ``True``, add line numbers (default: ``False``).
+
+ `linenostart`
+ The line number for the first line (default: ``1``).
+
+ `linenostep`
+ If set to a number n > 1, only every nth line number is printed.
+
+ `linenowidth`
+ Maximum width devoted to line numbers (default: ``3*ystep``, sufficient
+ for up to 4-digit line numbers. Increase width for longer code blocks).
+
+ `xoffset`
+ Starting offset in X direction, defaults to ``0``.
+
+ `yoffset`
+ Starting offset in Y direction, defaults to the font size if it is given
+ in pixels, or ``20`` else. (This is necessary since text coordinates
+ refer to the text baseline, not the top edge.)
+
+ `ystep`
+ Offset to add to the Y coordinate for each subsequent line. This should
+ roughly be the text size plus 5. It defaults to that value if the text
+ size is given in pixels, or ``25`` else.
+
+ `spacehack`
+ Convert spaces in the source to ``&#160;``, which are non-breaking
+ spaces. SVG provides the ``xml:space`` attribute to control how
+ whitespace inside tags is handled, in theory, the ``preserve`` value
+ could be used to keep all whitespace as-is. However, many current SVG
+ viewers don't obey that rule, so this option is provided as a workaround
+ and defaults to ``True``.
+ """
+ name = 'SVG'
+ aliases = ['svg']
+ filenames = ['*.svg']
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+ self.nowrap = get_bool_opt(options, 'nowrap', False)
+ self.fontfamily = options.get('fontfamily', 'monospace')
+ self.fontsize = options.get('fontsize', '14px')
+ self.xoffset = get_int_opt(options, 'xoffset', 0)
+ fs = self.fontsize.strip()
+ if fs.endswith('px'): fs = fs[:-2].strip()
+ try:
+ int_fs = int(fs)
+ except:
+ int_fs = 20
+ self.yoffset = get_int_opt(options, 'yoffset', int_fs)
+ self.ystep = get_int_opt(options, 'ystep', int_fs + 5)
+ self.spacehack = get_bool_opt(options, 'spacehack', True)
+ self.linenos = get_bool_opt(options,'linenos',False)
+ self.linenostart = get_int_opt(options,'linenostart',1)
+ self.linenostep = get_int_opt(options,'linenostep',1)
+ self.linenowidth = get_int_opt(options,'linenowidth', 3*self.ystep)
+ self._stylecache = {}
+
+ def format_unencoded(self, tokensource, outfile):
+ """
+ Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
+ tuples and write it into ``outfile``.
+
+ For our implementation we put all lines in their own 'line group'.
+ """
+ x = self.xoffset
+ y = self.yoffset
+ if not self.nowrap:
+ if self.encoding:
+ outfile.write('<?xml version="1.0" encoding="%s"?>\n' %
+ self.encoding)
+ else:
+ outfile.write('<?xml version="1.0"?>\n')
+ outfile.write('<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" '
+ '"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/'
+ 'svg10.dtd">\n')
+ outfile.write('<svg xmlns="http://www.w3.org/2000/svg">\n')
+ outfile.write('<g font-family="%s" font-size="%s">\n' %
+ (self.fontfamily, self.fontsize))
+
+ counter = self.linenostart
+ counter_step = self.linenostep
+ counter_style = self._get_style(Comment)
+ line_x = x
+
+ if self.linenos:
+ if counter % counter_step == 0:
+ outfile.write('<text x="%s" y="%s" %s text-anchor="end">%s</text>' %
+ (x+self.linenowidth,y,counter_style,counter))
+ line_x += self.linenowidth + self.ystep
+ counter += 1
+
+ outfile.write('<text x="%s" y="%s" xml:space="preserve">' % (line_x, y))
+ for ttype, value in tokensource:
+ style = self._get_style(ttype)
+ tspan = style and '<tspan' + style + '>' or ''
+ tspanend = tspan and '</tspan>' or ''
+ value = escape_html(value)
+ if self.spacehack:
+ value = value.expandtabs().replace(' ', '&#160;')
+ parts = value.split('\n')
+ for part in parts[:-1]:
+ outfile.write(tspan + part + tspanend)
+ y += self.ystep
+ outfile.write('</text>\n')
+ if self.linenos and counter % counter_step == 0:
+ outfile.write('<text x="%s" y="%s" text-anchor="end" %s>%s</text>' %
+ (x+self.linenowidth,y,counter_style,counter))
+
+ counter += 1
+ outfile.write('<text x="%s" y="%s" ' 'xml:space="preserve">' % (line_x,y))
+ outfile.write(tspan + parts[-1] + tspanend)
+ outfile.write('</text>')
+
+ if not self.nowrap:
+ outfile.write('</g></svg>\n')
+
+ def _get_style(self, tokentype):
+ if tokentype in self._stylecache:
+ return self._stylecache[tokentype]
+ otokentype = tokentype
+ while not self.style.styles_token(tokentype):
+ tokentype = tokentype.parent
+ value = self.style.style_for_token(tokentype)
+ result = ''
+ if value['color']:
+ result = ' fill="#' + value['color'] + '"'
+ if value['bold']:
+ result += ' font-weight="bold"'
+ if value['italic']:
+ result += ' font-style="italic"'
+ self._stylecache[otokentype] = result
+ return result
diff --git a/third_party/python/pip/pip/_vendor/pygments/formatters/terminal.py b/third_party/python/pip/pip/_vendor/pygments/formatters/terminal.py
new file mode 100644
index 0000000000..e0bda16a23
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/formatters/terminal.py
@@ -0,0 +1,127 @@
+"""
+ pygments.formatters.terminal
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Formatter for terminal output with ANSI sequences.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pip._vendor.pygments.formatter import Formatter
+from pip._vendor.pygments.token import Keyword, Name, Comment, String, Error, \
+ Number, Operator, Generic, Token, Whitespace
+from pip._vendor.pygments.console import ansiformat
+from pip._vendor.pygments.util import get_choice_opt
+
+
+__all__ = ['TerminalFormatter']
+
+
+#: Map token types to a tuple of color values for light and dark
+#: backgrounds.
+TERMINAL_COLORS = {
+ Token: ('', ''),
+
+ Whitespace: ('gray', 'brightblack'),
+ Comment: ('gray', 'brightblack'),
+ Comment.Preproc: ('cyan', 'brightcyan'),
+ Keyword: ('blue', 'brightblue'),
+ Keyword.Type: ('cyan', 'brightcyan'),
+ Operator.Word: ('magenta', 'brightmagenta'),
+ Name.Builtin: ('cyan', 'brightcyan'),
+ Name.Function: ('green', 'brightgreen'),
+ Name.Namespace: ('_cyan_', '_brightcyan_'),
+ Name.Class: ('_green_', '_brightgreen_'),
+ Name.Exception: ('cyan', 'brightcyan'),
+ Name.Decorator: ('brightblack', 'gray'),
+ Name.Variable: ('red', 'brightred'),
+ Name.Constant: ('red', 'brightred'),
+ Name.Attribute: ('cyan', 'brightcyan'),
+ Name.Tag: ('brightblue', 'brightblue'),
+ String: ('yellow', 'yellow'),
+ Number: ('blue', 'brightblue'),
+
+ Generic.Deleted: ('brightred', 'brightred'),
+ Generic.Inserted: ('green', 'brightgreen'),
+ Generic.Heading: ('**', '**'),
+ Generic.Subheading: ('*magenta*', '*brightmagenta*'),
+ Generic.Prompt: ('**', '**'),
+ Generic.Error: ('brightred', 'brightred'),
+
+ Error: ('_brightred_', '_brightred_'),
+}
+
+
+class TerminalFormatter(Formatter):
+ r"""
+ Format tokens with ANSI color sequences, for output in a text console.
+ Color sequences are terminated at newlines, so that paging the output
+ works correctly.
+
+ The `get_style_defs()` method doesn't do anything special since there is
+ no support for common styles.
+
+ Options accepted:
+
+ `bg`
+ Set to ``"light"`` or ``"dark"`` depending on the terminal's background
+ (default: ``"light"``).
+
+ `colorscheme`
+ A dictionary mapping token types to (lightbg, darkbg) color names or
+ ``None`` (default: ``None`` = use builtin colorscheme).
+
+ `linenos`
+ Set to ``True`` to have line numbers on the terminal output as well
+ (default: ``False`` = no line numbers).
+ """
+ name = 'Terminal'
+ aliases = ['terminal', 'console']
+ filenames = []
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+ self.darkbg = get_choice_opt(options, 'bg',
+ ['light', 'dark'], 'light') == 'dark'
+ self.colorscheme = options.get('colorscheme', None) or TERMINAL_COLORS
+ self.linenos = options.get('linenos', False)
+ self._lineno = 0
+
+ def format(self, tokensource, outfile):
+ return Formatter.format(self, tokensource, outfile)
+
+ def _write_lineno(self, outfile):
+ self._lineno += 1
+ outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno))
+
+ def _get_color(self, ttype):
+ # self.colorscheme is a dict containing usually generic types, so we
+ # have to walk the tree of dots. The base Token type must be a key,
+ # even if it's empty string, as in the default above.
+ colors = self.colorscheme.get(ttype)
+ while colors is None:
+ ttype = ttype.parent
+ colors = self.colorscheme.get(ttype)
+ return colors[self.darkbg]
+
+ def format_unencoded(self, tokensource, outfile):
+ if self.linenos:
+ self._write_lineno(outfile)
+
+ for ttype, value in tokensource:
+ color = self._get_color(ttype)
+
+ for line in value.splitlines(True):
+ if color:
+ outfile.write(ansiformat(color, line.rstrip('\n')))
+ else:
+ outfile.write(line.rstrip('\n'))
+ if line.endswith('\n'):
+ if self.linenos:
+ self._write_lineno(outfile)
+ else:
+ outfile.write('\n')
+
+ if self.linenos:
+ outfile.write("\n")
diff --git a/third_party/python/pip/pip/_vendor/pygments/formatters/terminal256.py b/third_party/python/pip/pip/_vendor/pygments/formatters/terminal256.py
new file mode 100644
index 0000000000..201b3c3283
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/formatters/terminal256.py
@@ -0,0 +1,338 @@
+"""
+ pygments.formatters.terminal256
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ Formatter for 256-color terminal output with ANSI sequences.
+
+ RGB-to-XTERM color conversion routines adapted from xterm256-conv
+ tool (http://frexx.de/xterm-256-notes/data/xterm256-conv2.tar.bz2)
+ by Wolfgang Frisch.
+
+ Formatter version 1.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+# TODO:
+# - Options to map style's bold/underline/italic/border attributes
+# to some ANSI attrbutes (something like 'italic=underline')
+# - An option to output "style RGB to xterm RGB/index" conversion table
+# - An option to indicate that we are running in "reverse background"
+# xterm. This means that default colors are white-on-black, not
+# black-on-while, so colors like "white background" need to be converted
+# to "white background, black foreground", etc...
+
+from pip._vendor.pygments.formatter import Formatter
+from pip._vendor.pygments.console import codes
+from pip._vendor.pygments.style import ansicolors
+
+
+__all__ = ['Terminal256Formatter', 'TerminalTrueColorFormatter']
+
+
+class EscapeSequence:
+ def __init__(self, fg=None, bg=None, bold=False, underline=False, italic=False):
+ self.fg = fg
+ self.bg = bg
+ self.bold = bold
+ self.underline = underline
+ self.italic = italic
+
+ def escape(self, attrs):
+ if len(attrs):
+ return "\x1b[" + ";".join(attrs) + "m"
+ return ""
+
+ def color_string(self):
+ attrs = []
+ if self.fg is not None:
+ if self.fg in ansicolors:
+ esc = codes[self.fg.replace('ansi','')]
+ if ';01m' in esc:
+ self.bold = True
+ # extract fg color code.
+ attrs.append(esc[2:4])
+ else:
+ attrs.extend(("38", "5", "%i" % self.fg))
+ if self.bg is not None:
+ if self.bg in ansicolors:
+ esc = codes[self.bg.replace('ansi','')]
+ # extract fg color code, add 10 for bg.
+ attrs.append(str(int(esc[2:4])+10))
+ else:
+ attrs.extend(("48", "5", "%i" % self.bg))
+ if self.bold:
+ attrs.append("01")
+ if self.underline:
+ attrs.append("04")
+ if self.italic:
+ attrs.append("03")
+ return self.escape(attrs)
+
+ def true_color_string(self):
+ attrs = []
+ if self.fg:
+ attrs.extend(("38", "2", str(self.fg[0]), str(self.fg[1]), str(self.fg[2])))
+ if self.bg:
+ attrs.extend(("48", "2", str(self.bg[0]), str(self.bg[1]), str(self.bg[2])))
+ if self.bold:
+ attrs.append("01")
+ if self.underline:
+ attrs.append("04")
+ if self.italic:
+ attrs.append("03")
+ return self.escape(attrs)
+
+ def reset_string(self):
+ attrs = []
+ if self.fg is not None:
+ attrs.append("39")
+ if self.bg is not None:
+ attrs.append("49")
+ if self.bold or self.underline or self.italic:
+ attrs.append("00")
+ return self.escape(attrs)
+
+
+class Terminal256Formatter(Formatter):
+ """
+ Format tokens with ANSI color sequences, for output in a 256-color
+ terminal or console. Like in `TerminalFormatter` color sequences
+ are terminated at newlines, so that paging the output works correctly.
+
+ The formatter takes colors from a style defined by the `style` option
+ and converts them to nearest ANSI 256-color escape sequences. Bold and
+ underline attributes from the style are preserved (and displayed).
+
+ .. versionadded:: 0.9
+
+ .. versionchanged:: 2.2
+ If the used style defines foreground colors in the form ``#ansi*``, then
+ `Terminal256Formatter` will map these to non extended foreground color.
+ See :ref:`AnsiTerminalStyle` for more information.
+
+ .. versionchanged:: 2.4
+ The ANSI color names have been updated with names that are easier to
+ understand and align with colornames of other projects and terminals.
+ See :ref:`this table <new-ansi-color-names>` for more information.
+
+
+ Options accepted:
+
+ `style`
+ The style to use, can be a string or a Style subclass (default:
+ ``'default'``).
+
+ `linenos`
+ Set to ``True`` to have line numbers on the terminal output as well
+ (default: ``False`` = no line numbers).
+ """
+ name = 'Terminal256'
+ aliases = ['terminal256', 'console256', '256']
+ filenames = []
+
+ def __init__(self, **options):
+ Formatter.__init__(self, **options)
+
+ self.xterm_colors = []
+ self.best_match = {}
+ self.style_string = {}
+
+ self.usebold = 'nobold' not in options
+ self.useunderline = 'nounderline' not in options
+ self.useitalic = 'noitalic' not in options
+
+ self._build_color_table() # build an RGB-to-256 color conversion table
+ self._setup_styles() # convert selected style's colors to term. colors
+
+ self.linenos = options.get('linenos', False)
+ self._lineno = 0
+
+ def _build_color_table(self):
+ # colors 0..15: 16 basic colors
+
+ self.xterm_colors.append((0x00, 0x00, 0x00)) # 0
+ self.xterm_colors.append((0xcd, 0x00, 0x00)) # 1
+ self.xterm_colors.append((0x00, 0xcd, 0x00)) # 2
+ self.xterm_colors.append((0xcd, 0xcd, 0x00)) # 3
+ self.xterm_colors.append((0x00, 0x00, 0xee)) # 4
+ self.xterm_colors.append((0xcd, 0x00, 0xcd)) # 5
+ self.xterm_colors.append((0x00, 0xcd, 0xcd)) # 6
+ self.xterm_colors.append((0xe5, 0xe5, 0xe5)) # 7
+ self.xterm_colors.append((0x7f, 0x7f, 0x7f)) # 8
+ self.xterm_colors.append((0xff, 0x00, 0x00)) # 9
+ self.xterm_colors.append((0x00, 0xff, 0x00)) # 10
+ self.xterm_colors.append((0xff, 0xff, 0x00)) # 11
+ self.xterm_colors.append((0x5c, 0x5c, 0xff)) # 12
+ self.xterm_colors.append((0xff, 0x00, 0xff)) # 13
+ self.xterm_colors.append((0x00, 0xff, 0xff)) # 14
+ self.xterm_colors.append((0xff, 0xff, 0xff)) # 15
+
+ # colors 16..232: the 6x6x6 color cube
+
+ valuerange = (0x00, 0x5f, 0x87, 0xaf, 0xd7, 0xff)
+
+ for i in range(217):
+ r = valuerange[(i // 36) % 6]
+ g = valuerange[(i // 6) % 6]
+ b = valuerange[i % 6]
+ self.xterm_colors.append((r, g, b))
+
+ # colors 233..253: grayscale
+
+ for i in range(1, 22):
+ v = 8 + i * 10
+ self.xterm_colors.append((v, v, v))
+
+ def _closest_color(self, r, g, b):
+ distance = 257*257*3 # "infinity" (>distance from #000000 to #ffffff)
+ match = 0
+
+ for i in range(0, 254):
+ values = self.xterm_colors[i]
+
+ rd = r - values[0]
+ gd = g - values[1]
+ bd = b - values[2]
+ d = rd*rd + gd*gd + bd*bd
+
+ if d < distance:
+ match = i
+ distance = d
+ return match
+
+ def _color_index(self, color):
+ index = self.best_match.get(color, None)
+ if color in ansicolors:
+ # strip the `ansi/#ansi` part and look up code
+ index = color
+ self.best_match[color] = index
+ if index is None:
+ try:
+ rgb = int(str(color), 16)
+ except ValueError:
+ rgb = 0
+
+ r = (rgb >> 16) & 0xff
+ g = (rgb >> 8) & 0xff
+ b = rgb & 0xff
+ index = self._closest_color(r, g, b)
+ self.best_match[color] = index
+ return index
+
+ def _setup_styles(self):
+ for ttype, ndef in self.style:
+ escape = EscapeSequence()
+ # get foreground from ansicolor if set
+ if ndef['ansicolor']:
+ escape.fg = self._color_index(ndef['ansicolor'])
+ elif ndef['color']:
+ escape.fg = self._color_index(ndef['color'])
+ if ndef['bgansicolor']:
+ escape.bg = self._color_index(ndef['bgansicolor'])
+ elif ndef['bgcolor']:
+ escape.bg = self._color_index(ndef['bgcolor'])
+ if self.usebold and ndef['bold']:
+ escape.bold = True
+ if self.useunderline and ndef['underline']:
+ escape.underline = True
+ if self.useitalic and ndef['italic']:
+ escape.italic = True
+ self.style_string[str(ttype)] = (escape.color_string(),
+ escape.reset_string())
+
+ def _write_lineno(self, outfile):
+ self._lineno += 1
+ outfile.write("%s%04d: " % (self._lineno != 1 and '\n' or '', self._lineno))
+
+ def format(self, tokensource, outfile):
+ return Formatter.format(self, tokensource, outfile)
+
+ def format_unencoded(self, tokensource, outfile):
+ if self.linenos:
+ self._write_lineno(outfile)
+
+ for ttype, value in tokensource:
+ not_found = True
+ while ttype and not_found:
+ try:
+ # outfile.write( "<" + str(ttype) + ">" )
+ on, off = self.style_string[str(ttype)]
+
+ # Like TerminalFormatter, add "reset colors" escape sequence
+ # on newline.
+ spl = value.split('\n')
+ for line in spl[:-1]:
+ if line:
+ outfile.write(on + line + off)
+ if self.linenos:
+ self._write_lineno(outfile)
+ else:
+ outfile.write('\n')
+
+ if spl[-1]:
+ outfile.write(on + spl[-1] + off)
+
+ not_found = False
+ # outfile.write( '#' + str(ttype) + '#' )
+
+ except KeyError:
+ # ottype = ttype
+ ttype = ttype.parent
+ # outfile.write( '!' + str(ottype) + '->' + str(ttype) + '!' )
+
+ if not_found:
+ outfile.write(value)
+
+ if self.linenos:
+ outfile.write("\n")
+
+
+
+class TerminalTrueColorFormatter(Terminal256Formatter):
+ r"""
+ Format tokens with ANSI color sequences, for output in a true-color
+ terminal or console. Like in `TerminalFormatter` color sequences
+ are terminated at newlines, so that paging the output works correctly.
+
+ .. versionadded:: 2.1
+
+ Options accepted:
+
+ `style`
+ The style to use, can be a string or a Style subclass (default:
+ ``'default'``).
+ """
+ name = 'TerminalTrueColor'
+ aliases = ['terminal16m', 'console16m', '16m']
+ filenames = []
+
+ def _build_color_table(self):
+ pass
+
+ def _color_tuple(self, color):
+ try:
+ rgb = int(str(color), 16)
+ except ValueError:
+ return None
+ r = (rgb >> 16) & 0xff
+ g = (rgb >> 8) & 0xff
+ b = rgb & 0xff
+ return (r, g, b)
+
+ def _setup_styles(self):
+ for ttype, ndef in self.style:
+ escape = EscapeSequence()
+ if ndef['color']:
+ escape.fg = self._color_tuple(ndef['color'])
+ if ndef['bgcolor']:
+ escape.bg = self._color_tuple(ndef['bgcolor'])
+ if self.usebold and ndef['bold']:
+ escape.bold = True
+ if self.useunderline and ndef['underline']:
+ escape.underline = True
+ if self.useitalic and ndef['italic']:
+ escape.italic = True
+ self.style_string[str(ttype)] = (escape.true_color_string(),
+ escape.reset_string())
diff --git a/third_party/python/pip/pip/_vendor/pygments/lexer.py b/third_party/python/pip/pip/_vendor/pygments/lexer.py
new file mode 100644
index 0000000000..ec7f4de32c
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/lexer.py
@@ -0,0 +1,882 @@
+"""
+ pygments.lexer
+ ~~~~~~~~~~~~~~
+
+ Base lexer classes.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+import sys
+import time
+
+from pip._vendor.pygments.filter import apply_filters, Filter
+from pip._vendor.pygments.filters import get_filter_by_name
+from pip._vendor.pygments.token import Error, Text, Other, _TokenType
+from pip._vendor.pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
+ make_analysator, Future, guess_decode
+from pip._vendor.pygments.regexopt import regex_opt
+
+__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
+ 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this',
+ 'default', 'words']
+
+
+_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
+ (b'\xff\xfe\0\0', 'utf-32'),
+ (b'\0\0\xfe\xff', 'utf-32be'),
+ (b'\xff\xfe', 'utf-16'),
+ (b'\xfe\xff', 'utf-16be')]
+
+_default_analyse = staticmethod(lambda x: 0.0)
+
+
+class LexerMeta(type):
+ """
+ This metaclass automagically converts ``analyse_text`` methods into
+ static methods which always return float values.
+ """
+
+ def __new__(mcs, name, bases, d):
+ if 'analyse_text' in d:
+ d['analyse_text'] = make_analysator(d['analyse_text'])
+ return type.__new__(mcs, name, bases, d)
+
+
+class Lexer(metaclass=LexerMeta):
+ """
+ Lexer for a specific language.
+
+ Basic options recognized:
+ ``stripnl``
+ Strip leading and trailing newlines from the input (default: True).
+ ``stripall``
+ Strip all leading and trailing whitespace from the input
+ (default: False).
+ ``ensurenl``
+ Make sure that the input ends with a newline (default: True). This
+ is required for some lexers that consume input linewise.
+
+ .. versionadded:: 1.3
+
+ ``tabsize``
+ If given and greater than 0, expand tabs in the input (default: 0).
+ ``encoding``
+ If given, must be an encoding name. This encoding will be used to
+ convert the input string to Unicode, if it is not already a Unicode
+ string (default: ``'guess'``, which uses a simple UTF-8 / Locale /
+ Latin1 detection. Can also be ``'chardet'`` to use the chardet
+ library, if it is installed.
+ ``inencoding``
+ Overrides the ``encoding`` if given.
+ """
+
+ #: Name of the lexer
+ name = None
+
+ #: URL of the language specification/definition
+ url = None
+
+ #: Shortcuts for the lexer
+ aliases = []
+
+ #: File name globs
+ filenames = []
+
+ #: Secondary file name globs
+ alias_filenames = []
+
+ #: MIME types
+ mimetypes = []
+
+ #: Priority, should multiple lexers match and no content is provided
+ priority = 0
+
+ def __init__(self, **options):
+ self.options = options
+ self.stripnl = get_bool_opt(options, 'stripnl', True)
+ self.stripall = get_bool_opt(options, 'stripall', False)
+ self.ensurenl = get_bool_opt(options, 'ensurenl', True)
+ self.tabsize = get_int_opt(options, 'tabsize', 0)
+ self.encoding = options.get('encoding', 'guess')
+ self.encoding = options.get('inencoding') or self.encoding
+ self.filters = []
+ for filter_ in get_list_opt(options, 'filters', ()):
+ self.add_filter(filter_)
+
+ def __repr__(self):
+ if self.options:
+ return '<pygments.lexers.%s with %r>' % (self.__class__.__name__,
+ self.options)
+ else:
+ return '<pygments.lexers.%s>' % self.__class__.__name__
+
+ def add_filter(self, filter_, **options):
+ """
+ Add a new stream filter to this lexer.
+ """
+ if not isinstance(filter_, Filter):
+ filter_ = get_filter_by_name(filter_, **options)
+ self.filters.append(filter_)
+
+ def analyse_text(text):
+ """
+ Has to return a float between ``0`` and ``1`` that indicates
+ if a lexer wants to highlight this text. Used by ``guess_lexer``.
+ If this method returns ``0`` it won't highlight it in any case, if
+ it returns ``1`` highlighting with this lexer is guaranteed.
+
+ The `LexerMeta` metaclass automatically wraps this function so
+ that it works like a static method (no ``self`` or ``cls``
+ parameter) and the return value is automatically converted to
+ `float`. If the return value is an object that is boolean `False`
+ it's the same as if the return values was ``0.0``.
+ """
+
+ def get_tokens(self, text, unfiltered=False):
+ """
+ Return an iterable of (tokentype, value) pairs generated from
+ `text`. If `unfiltered` is set to `True`, the filtering mechanism
+ is bypassed even if filters are defined.
+
+ Also preprocess the text, i.e. expand tabs and strip it if
+ wanted and applies registered filters.
+ """
+ if not isinstance(text, str):
+ if self.encoding == 'guess':
+ text, _ = guess_decode(text)
+ elif self.encoding == 'chardet':
+ try:
+ from pip._vendor import chardet
+ except ImportError as e:
+ raise ImportError('To enable chardet encoding guessing, '
+ 'please install the chardet library '
+ 'from http://chardet.feedparser.org/') from e
+ # check for BOM first
+ decoded = None
+ for bom, encoding in _encoding_map:
+ if text.startswith(bom):
+ decoded = text[len(bom):].decode(encoding, 'replace')
+ break
+ # no BOM found, so use chardet
+ if decoded is None:
+ enc = chardet.detect(text[:1024]) # Guess using first 1KB
+ decoded = text.decode(enc.get('encoding') or 'utf-8',
+ 'replace')
+ text = decoded
+ else:
+ text = text.decode(self.encoding)
+ if text.startswith('\ufeff'):
+ text = text[len('\ufeff'):]
+ else:
+ if text.startswith('\ufeff'):
+ text = text[len('\ufeff'):]
+
+ # text now *is* a unicode string
+ text = text.replace('\r\n', '\n')
+ text = text.replace('\r', '\n')
+ if self.stripall:
+ text = text.strip()
+ elif self.stripnl:
+ text = text.strip('\n')
+ if self.tabsize > 0:
+ text = text.expandtabs(self.tabsize)
+ if self.ensurenl and not text.endswith('\n'):
+ text += '\n'
+
+ def streamer():
+ for _, t, v in self.get_tokens_unprocessed(text):
+ yield t, v
+ stream = streamer()
+ if not unfiltered:
+ stream = apply_filters(stream, self.filters, self)
+ return stream
+
+ def get_tokens_unprocessed(self, text):
+ """
+ Return an iterable of (index, tokentype, value) pairs where "index"
+ is the starting position of the token within the input text.
+
+ In subclasses, implement this method as a generator to
+ maximize effectiveness.
+ """
+ raise NotImplementedError
+
+
+class DelegatingLexer(Lexer):
+ """
+ This lexer takes two lexer as arguments. A root lexer and
+ a language lexer. First everything is scanned using the language
+ lexer, afterwards all ``Other`` tokens are lexed using the root
+ lexer.
+
+ The lexers from the ``template`` lexer package use this base lexer.
+ """
+
+ def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
+ self.root_lexer = _root_lexer(**options)
+ self.language_lexer = _language_lexer(**options)
+ self.needle = _needle
+ Lexer.__init__(self, **options)
+
+ def get_tokens_unprocessed(self, text):
+ buffered = ''
+ insertions = []
+ lng_buffer = []
+ for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
+ if t is self.needle:
+ if lng_buffer:
+ insertions.append((len(buffered), lng_buffer))
+ lng_buffer = []
+ buffered += v
+ else:
+ lng_buffer.append((i, t, v))
+ if lng_buffer:
+ insertions.append((len(buffered), lng_buffer))
+ return do_insertions(insertions,
+ self.root_lexer.get_tokens_unprocessed(buffered))
+
+
+# ------------------------------------------------------------------------------
+# RegexLexer and ExtendedRegexLexer
+#
+
+
+class include(str): # pylint: disable=invalid-name
+ """
+ Indicates that a state should include rules from another state.
+ """
+ pass
+
+
+class _inherit:
+ """
+ Indicates the a state should inherit from its superclass.
+ """
+ def __repr__(self):
+ return 'inherit'
+
+inherit = _inherit() # pylint: disable=invalid-name
+
+
+class combined(tuple): # pylint: disable=invalid-name
+ """
+ Indicates a state combined from multiple states.
+ """
+
+ def __new__(cls, *args):
+ return tuple.__new__(cls, args)
+
+ def __init__(self, *args):
+ # tuple.__init__ doesn't do anything
+ pass
+
+
+class _PseudoMatch:
+ """
+ A pseudo match object constructed from a string.
+ """
+
+ def __init__(self, start, text):
+ self._text = text
+ self._start = start
+
+ def start(self, arg=None):
+ return self._start
+
+ def end(self, arg=None):
+ return self._start + len(self._text)
+
+ def group(self, arg=None):
+ if arg:
+ raise IndexError('No such group')
+ return self._text
+
+ def groups(self):
+ return (self._text,)
+
+ def groupdict(self):
+ return {}
+
+
+def bygroups(*args):
+ """
+ Callback that yields multiple actions for each group in the match.
+ """
+ def callback(lexer, match, ctx=None):
+ for i, action in enumerate(args):
+ if action is None:
+ continue
+ elif type(action) is _TokenType:
+ data = match.group(i + 1)
+ if data:
+ yield match.start(i + 1), action, data
+ else:
+ data = match.group(i + 1)
+ if data is not None:
+ if ctx:
+ ctx.pos = match.start(i + 1)
+ for item in action(lexer,
+ _PseudoMatch(match.start(i + 1), data), ctx):
+ if item:
+ yield item
+ if ctx:
+ ctx.pos = match.end()
+ return callback
+
+
+class _This:
+ """
+ Special singleton used for indicating the caller class.
+ Used by ``using``.
+ """
+
+this = _This()
+
+
+def using(_other, **kwargs):
+ """
+ Callback that processes the match with a different lexer.
+
+ The keyword arguments are forwarded to the lexer, except `state` which
+ is handled separately.
+
+ `state` specifies the state that the new lexer will start in, and can
+ be an enumerable such as ('root', 'inline', 'string') or a simple
+ string which is assumed to be on top of the root state.
+
+ Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
+ """
+ gt_kwargs = {}
+ if 'state' in kwargs:
+ s = kwargs.pop('state')
+ if isinstance(s, (list, tuple)):
+ gt_kwargs['stack'] = s
+ else:
+ gt_kwargs['stack'] = ('root', s)
+
+ if _other is this:
+ def callback(lexer, match, ctx=None):
+ # if keyword arguments are given the callback
+ # function has to create a new lexer instance
+ if kwargs:
+ # XXX: cache that somehow
+ kwargs.update(lexer.options)
+ lx = lexer.__class__(**kwargs)
+ else:
+ lx = lexer
+ s = match.start()
+ for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
+ yield i + s, t, v
+ if ctx:
+ ctx.pos = match.end()
+ else:
+ def callback(lexer, match, ctx=None):
+ # XXX: cache that somehow
+ kwargs.update(lexer.options)
+ lx = _other(**kwargs)
+
+ s = match.start()
+ for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
+ yield i + s, t, v
+ if ctx:
+ ctx.pos = match.end()
+ return callback
+
+
+class default:
+ """
+ Indicates a state or state action (e.g. #pop) to apply.
+ For example default('#pop') is equivalent to ('', Token, '#pop')
+ Note that state tuples may be used as well.
+
+ .. versionadded:: 2.0
+ """
+ def __init__(self, state):
+ self.state = state
+
+
+class words(Future):
+ """
+ Indicates a list of literal words that is transformed into an optimized
+ regex that matches any of the words.
+
+ .. versionadded:: 2.0
+ """
+ def __init__(self, words, prefix='', suffix=''):
+ self.words = words
+ self.prefix = prefix
+ self.suffix = suffix
+
+ def get(self):
+ return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix)
+
+
+class RegexLexerMeta(LexerMeta):
+ """
+ Metaclass for RegexLexer, creates the self._tokens attribute from
+ self.tokens on the first instantiation.
+ """
+
+ def _process_regex(cls, regex, rflags, state):
+ """Preprocess the regular expression component of a token definition."""
+ if isinstance(regex, Future):
+ regex = regex.get()
+ return re.compile(regex, rflags).match
+
+ def _process_token(cls, token):
+ """Preprocess the token component of a token definition."""
+ assert type(token) is _TokenType or callable(token), \
+ 'token type must be simple type or callable, not %r' % (token,)
+ return token
+
+ def _process_new_state(cls, new_state, unprocessed, processed):
+ """Preprocess the state transition action of a token definition."""
+ if isinstance(new_state, str):
+ # an existing state
+ if new_state == '#pop':
+ return -1
+ elif new_state in unprocessed:
+ return (new_state,)
+ elif new_state == '#push':
+ return new_state
+ elif new_state[:5] == '#pop:':
+ return -int(new_state[5:])
+ else:
+ assert False, 'unknown new state %r' % new_state
+ elif isinstance(new_state, combined):
+ # combine a new state from existing ones
+ tmp_state = '_tmp_%d' % cls._tmpname
+ cls._tmpname += 1
+ itokens = []
+ for istate in new_state:
+ assert istate != new_state, 'circular state ref %r' % istate
+ itokens.extend(cls._process_state(unprocessed,
+ processed, istate))
+ processed[tmp_state] = itokens
+ return (tmp_state,)
+ elif isinstance(new_state, tuple):
+ # push more than one state
+ for istate in new_state:
+ assert (istate in unprocessed or
+ istate in ('#pop', '#push')), \
+ 'unknown new state ' + istate
+ return new_state
+ else:
+ assert False, 'unknown new state def %r' % new_state
+
+ def _process_state(cls, unprocessed, processed, state):
+ """Preprocess a single state definition."""
+ assert type(state) is str, "wrong state name %r" % state
+ assert state[0] != '#', "invalid state name %r" % state
+ if state in processed:
+ return processed[state]
+ tokens = processed[state] = []
+ rflags = cls.flags
+ for tdef in unprocessed[state]:
+ if isinstance(tdef, include):
+ # it's a state reference
+ assert tdef != state, "circular state reference %r" % state
+ tokens.extend(cls._process_state(unprocessed, processed,
+ str(tdef)))
+ continue
+ if isinstance(tdef, _inherit):
+ # should be processed already, but may not in the case of:
+ # 1. the state has no counterpart in any parent
+ # 2. the state includes more than one 'inherit'
+ continue
+ if isinstance(tdef, default):
+ new_state = cls._process_new_state(tdef.state, unprocessed, processed)
+ tokens.append((re.compile('').match, None, new_state))
+ continue
+
+ assert type(tdef) is tuple, "wrong rule def %r" % tdef
+
+ try:
+ rex = cls._process_regex(tdef[0], rflags, state)
+ except Exception as err:
+ raise ValueError("uncompilable regex %r in state %r of %r: %s" %
+ (tdef[0], state, cls, err)) from err
+
+ token = cls._process_token(tdef[1])
+
+ if len(tdef) == 2:
+ new_state = None
+ else:
+ new_state = cls._process_new_state(tdef[2],
+ unprocessed, processed)
+
+ tokens.append((rex, token, new_state))
+ return tokens
+
+ def process_tokendef(cls, name, tokendefs=None):
+ """Preprocess a dictionary of token definitions."""
+ processed = cls._all_tokens[name] = {}
+ tokendefs = tokendefs or cls.tokens[name]
+ for state in list(tokendefs):
+ cls._process_state(tokendefs, processed, state)
+ return processed
+
+ def get_tokendefs(cls):
+ """
+ Merge tokens from superclasses in MRO order, returning a single tokendef
+ dictionary.
+
+ Any state that is not defined by a subclass will be inherited
+ automatically. States that *are* defined by subclasses will, by
+ default, override that state in the superclass. If a subclass wishes to
+ inherit definitions from a superclass, it can use the special value
+ "inherit", which will cause the superclass' state definition to be
+ included at that point in the state.
+ """
+ tokens = {}
+ inheritable = {}
+ for c in cls.__mro__:
+ toks = c.__dict__.get('tokens', {})
+
+ for state, items in toks.items():
+ curitems = tokens.get(state)
+ if curitems is None:
+ # N.b. because this is assigned by reference, sufficiently
+ # deep hierarchies are processed incrementally (e.g. for
+ # A(B), B(C), C(RegexLexer), B will be premodified so X(B)
+ # will not see any inherits in B).
+ tokens[state] = items
+ try:
+ inherit_ndx = items.index(inherit)
+ except ValueError:
+ continue
+ inheritable[state] = inherit_ndx
+ continue
+
+ inherit_ndx = inheritable.pop(state, None)
+ if inherit_ndx is None:
+ continue
+
+ # Replace the "inherit" value with the items
+ curitems[inherit_ndx:inherit_ndx+1] = items
+ try:
+ # N.b. this is the index in items (that is, the superclass
+ # copy), so offset required when storing below.
+ new_inh_ndx = items.index(inherit)
+ except ValueError:
+ pass
+ else:
+ inheritable[state] = inherit_ndx + new_inh_ndx
+
+ return tokens
+
+ def __call__(cls, *args, **kwds):
+ """Instantiate cls after preprocessing its token definitions."""
+ if '_tokens' not in cls.__dict__:
+ cls._all_tokens = {}
+ cls._tmpname = 0
+ if hasattr(cls, 'token_variants') and cls.token_variants:
+ # don't process yet
+ pass
+ else:
+ cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
+
+ return type.__call__(cls, *args, **kwds)
+
+
+class RegexLexer(Lexer, metaclass=RegexLexerMeta):
+ """
+ Base for simple stateful regular expression-based lexers.
+ Simplifies the lexing process so that you need only
+ provide a list of states and regular expressions.
+ """
+
+ #: Flags for compiling the regular expressions.
+ #: Defaults to MULTILINE.
+ flags = re.MULTILINE
+
+ #: At all time there is a stack of states. Initially, the stack contains
+ #: a single state 'root'. The top of the stack is called "the current state".
+ #:
+ #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
+ #:
+ #: ``new_state`` can be omitted to signify no state transition.
+ #: If ``new_state`` is a string, it is pushed on the stack. This ensure
+ #: the new current state is ``new_state``.
+ #: If ``new_state`` is a tuple of strings, all of those strings are pushed
+ #: on the stack and the current state will be the last element of the list.
+ #: ``new_state`` can also be ``combined('state1', 'state2', ...)``
+ #: to signify a new, anonymous state combined from the rules of two
+ #: or more existing ones.
+ #: Furthermore, it can be '#pop' to signify going back one step in
+ #: the state stack, or '#push' to push the current state on the stack
+ #: again. Note that if you push while in a combined state, the combined
+ #: state itself is pushed, and not only the state in which the rule is
+ #: defined.
+ #:
+ #: The tuple can also be replaced with ``include('state')``, in which
+ #: case the rules from the state named by the string are included in the
+ #: current one.
+ tokens = {}
+
+ def get_tokens_unprocessed(self, text, stack=('root',)):
+ """
+ Split ``text`` into (tokentype, text) pairs.
+
+ ``stack`` is the initial stack (default: ``['root']``)
+ """
+ pos = 0
+ tokendefs = self._tokens
+ statestack = list(stack)
+ statetokens = tokendefs[statestack[-1]]
+ while 1:
+ for rexmatch, action, new_state in statetokens:
+ m = rexmatch(text, pos)
+ if m:
+ if action is not None:
+ if type(action) is _TokenType:
+ yield pos, action, m.group()
+ else:
+ yield from action(self, m)
+ pos = m.end()
+ if new_state is not None:
+ # state transition
+ if isinstance(new_state, tuple):
+ for state in new_state:
+ if state == '#pop':
+ if len(statestack) > 1:
+ statestack.pop()
+ elif state == '#push':
+ statestack.append(statestack[-1])
+ else:
+ statestack.append(state)
+ elif isinstance(new_state, int):
+ # pop, but keep at least one state on the stack
+ # (random code leading to unexpected pops should
+ # not allow exceptions)
+ if abs(new_state) >= len(statestack):
+ del statestack[1:]
+ else:
+ del statestack[new_state:]
+ elif new_state == '#push':
+ statestack.append(statestack[-1])
+ else:
+ assert False, "wrong state def: %r" % new_state
+ statetokens = tokendefs[statestack[-1]]
+ break
+ else:
+ # We are here only if all state tokens have been considered
+ # and there was not a match on any of them.
+ try:
+ if text[pos] == '\n':
+ # at EOL, reset state to "root"
+ statestack = ['root']
+ statetokens = tokendefs['root']
+ yield pos, Text, '\n'
+ pos += 1
+ continue
+ yield pos, Error, text[pos]
+ pos += 1
+ except IndexError:
+ break
+
+
+class LexerContext:
+ """
+ A helper object that holds lexer position data.
+ """
+
+ def __init__(self, text, pos, stack=None, end=None):
+ self.text = text
+ self.pos = pos
+ self.end = end or len(text) # end=0 not supported ;-)
+ self.stack = stack or ['root']
+
+ def __repr__(self):
+ return 'LexerContext(%r, %r, %r)' % (
+ self.text, self.pos, self.stack)
+
+
+class ExtendedRegexLexer(RegexLexer):
+ """
+ A RegexLexer that uses a context object to store its state.
+ """
+
+ def get_tokens_unprocessed(self, text=None, context=None):
+ """
+ Split ``text`` into (tokentype, text) pairs.
+ If ``context`` is given, use this lexer context instead.
+ """
+ tokendefs = self._tokens
+ if not context:
+ ctx = LexerContext(text, 0)
+ statetokens = tokendefs['root']
+ else:
+ ctx = context
+ statetokens = tokendefs[ctx.stack[-1]]
+ text = ctx.text
+ while 1:
+ for rexmatch, action, new_state in statetokens:
+ m = rexmatch(text, ctx.pos, ctx.end)
+ if m:
+ if action is not None:
+ if type(action) is _TokenType:
+ yield ctx.pos, action, m.group()
+ ctx.pos = m.end()
+ else:
+ yield from action(self, m, ctx)
+ if not new_state:
+ # altered the state stack?
+ statetokens = tokendefs[ctx.stack[-1]]
+ # CAUTION: callback must set ctx.pos!
+ if new_state is not None:
+ # state transition
+ if isinstance(new_state, tuple):
+ for state in new_state:
+ if state == '#pop':
+ if len(ctx.stack) > 1:
+ ctx.stack.pop()
+ elif state == '#push':
+ ctx.stack.append(ctx.stack[-1])
+ else:
+ ctx.stack.append(state)
+ elif isinstance(new_state, int):
+ # see RegexLexer for why this check is made
+ if abs(new_state) >= len(ctx.stack):
+ del ctx.stack[1:]
+ else:
+ del ctx.stack[new_state:]
+ elif new_state == '#push':
+ ctx.stack.append(ctx.stack[-1])
+ else:
+ assert False, "wrong state def: %r" % new_state
+ statetokens = tokendefs[ctx.stack[-1]]
+ break
+ else:
+ try:
+ if ctx.pos >= ctx.end:
+ break
+ if text[ctx.pos] == '\n':
+ # at EOL, reset state to "root"
+ ctx.stack = ['root']
+ statetokens = tokendefs['root']
+ yield ctx.pos, Text, '\n'
+ ctx.pos += 1
+ continue
+ yield ctx.pos, Error, text[ctx.pos]
+ ctx.pos += 1
+ except IndexError:
+ break
+
+
+def do_insertions(insertions, tokens):
+ """
+ Helper for lexers which must combine the results of several
+ sublexers.
+
+ ``insertions`` is a list of ``(index, itokens)`` pairs.
+ Each ``itokens`` iterable should be inserted at position
+ ``index`` into the token stream given by the ``tokens``
+ argument.
+
+ The result is a combined token stream.
+
+ TODO: clean up the code here.
+ """
+ insertions = iter(insertions)
+ try:
+ index, itokens = next(insertions)
+ except StopIteration:
+ # no insertions
+ yield from tokens
+ return
+
+ realpos = None
+ insleft = True
+
+ # iterate over the token stream where we want to insert
+ # the tokens from the insertion list.
+ for i, t, v in tokens:
+ # first iteration. store the position of first item
+ if realpos is None:
+ realpos = i
+ oldi = 0
+ while insleft and i + len(v) >= index:
+ tmpval = v[oldi:index - i]
+ if tmpval:
+ yield realpos, t, tmpval
+ realpos += len(tmpval)
+ for it_index, it_token, it_value in itokens:
+ yield realpos, it_token, it_value
+ realpos += len(it_value)
+ oldi = index - i
+ try:
+ index, itokens = next(insertions)
+ except StopIteration:
+ insleft = False
+ break # not strictly necessary
+ if oldi < len(v):
+ yield realpos, t, v[oldi:]
+ realpos += len(v) - oldi
+
+ # leftover tokens
+ while insleft:
+ # no normal tokens, set realpos to zero
+ realpos = realpos or 0
+ for p, t, v in itokens:
+ yield realpos, t, v
+ realpos += len(v)
+ try:
+ index, itokens = next(insertions)
+ except StopIteration:
+ insleft = False
+ break # not strictly necessary
+
+
+class ProfilingRegexLexerMeta(RegexLexerMeta):
+ """Metaclass for ProfilingRegexLexer, collects regex timing info."""
+
+ def _process_regex(cls, regex, rflags, state):
+ if isinstance(regex, words):
+ rex = regex_opt(regex.words, prefix=regex.prefix,
+ suffix=regex.suffix)
+ else:
+ rex = regex
+ compiled = re.compile(rex, rflags)
+
+ def match_func(text, pos, endpos=sys.maxsize):
+ info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0])
+ t0 = time.time()
+ res = compiled.match(text, pos, endpos)
+ t1 = time.time()
+ info[0] += 1
+ info[1] += t1 - t0
+ return res
+ return match_func
+
+
+class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta):
+ """Drop-in replacement for RegexLexer that does profiling of its regexes."""
+
+ _prof_data = []
+ _prof_sort_index = 4 # defaults to time per call
+
+ def get_tokens_unprocessed(self, text, stack=('root',)):
+ # this needs to be a stack, since using(this) will produce nested calls
+ self.__class__._prof_data.append({})
+ yield from RegexLexer.get_tokens_unprocessed(self, text, stack)
+ rawdata = self.__class__._prof_data.pop()
+ data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65],
+ n, 1000 * t, 1000 * t / n)
+ for ((s, r), (n, t)) in rawdata.items()),
+ key=lambda x: x[self._prof_sort_index],
+ reverse=True)
+ sum_total = sum(x[3] for x in data)
+
+ print()
+ print('Profiling result for %s lexing %d chars in %.3f ms' %
+ (self.__class__.__name__, len(text), sum_total))
+ print('=' * 110)
+ print('%-20s %-64s ncalls tottime percall' % ('state', 'regex'))
+ print('-' * 110)
+ for d in data:
+ print('%-20s %-65s %5d %8.4f %8.4f' % d)
+ print('=' * 110)
diff --git a/third_party/python/pip/pip/_vendor/pygments/lexers/__init__.py b/third_party/python/pip/pip/_vendor/pygments/lexers/__init__.py
new file mode 100644
index 0000000000..ed69f24ed3
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/lexers/__init__.py
@@ -0,0 +1,335 @@
+"""
+ pygments.lexers
+ ~~~~~~~~~~~~~~~
+
+ Pygments lexers.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+import sys
+import types
+from fnmatch import fnmatch
+from os.path import basename
+
+from pip._vendor.pygments.lexers._mapping import LEXERS
+from pip._vendor.pygments.modeline import get_filetype_from_buffer
+from pip._vendor.pygments.plugin import find_plugin_lexers
+from pip._vendor.pygments.util import ClassNotFound, guess_decode
+
+COMPAT = {
+ 'Python3Lexer': 'PythonLexer',
+ 'Python3TracebackLexer': 'PythonTracebackLexer',
+}
+
+__all__ = ['get_lexer_by_name', 'get_lexer_for_filename', 'find_lexer_class',
+ 'guess_lexer', 'load_lexer_from_file'] + list(LEXERS) + list(COMPAT)
+
+_lexer_cache = {}
+
+def _load_lexers(module_name):
+ """Load a lexer (and all others in the module too)."""
+ mod = __import__(module_name, None, None, ['__all__'])
+ for lexer_name in mod.__all__:
+ cls = getattr(mod, lexer_name)
+ _lexer_cache[cls.name] = cls
+
+
+def get_all_lexers(plugins=True):
+ """Return a generator of tuples in the form ``(name, aliases,
+ filenames, mimetypes)`` of all know lexers.
+
+ If *plugins* is true (the default), plugin lexers supplied by entrypoints
+ are also returned. Otherwise, only builtin ones are considered.
+ """
+ for item in LEXERS.values():
+ yield item[1:]
+ if plugins:
+ for lexer in find_plugin_lexers():
+ yield lexer.name, lexer.aliases, lexer.filenames, lexer.mimetypes
+
+
+def find_lexer_class(name):
+ """Lookup a lexer class by name.
+
+ Return None if not found.
+ """
+ if name in _lexer_cache:
+ return _lexer_cache[name]
+ # lookup builtin lexers
+ for module_name, lname, aliases, _, _ in LEXERS.values():
+ if name == lname:
+ _load_lexers(module_name)
+ return _lexer_cache[name]
+ # continue with lexers from setuptools entrypoints
+ for cls in find_plugin_lexers():
+ if cls.name == name:
+ return cls
+
+
+def find_lexer_class_by_name(_alias):
+ """Lookup a lexer class by alias.
+
+ Like `get_lexer_by_name`, but does not instantiate the class.
+
+ .. versionadded:: 2.2
+ """
+ if not _alias:
+ raise ClassNotFound('no lexer for alias %r found' % _alias)
+ # lookup builtin lexers
+ for module_name, name, aliases, _, _ in LEXERS.values():
+ if _alias.lower() in aliases:
+ if name not in _lexer_cache:
+ _load_lexers(module_name)
+ return _lexer_cache[name]
+ # continue with lexers from setuptools entrypoints
+ for cls in find_plugin_lexers():
+ if _alias.lower() in cls.aliases:
+ return cls
+ raise ClassNotFound('no lexer for alias %r found' % _alias)
+
+
+def get_lexer_by_name(_alias, **options):
+ """Get a lexer by an alias.
+
+ Raises ClassNotFound if not found.
+ """
+ if not _alias:
+ raise ClassNotFound('no lexer for alias %r found' % _alias)
+
+ # lookup builtin lexers
+ for module_name, name, aliases, _, _ in LEXERS.values():
+ if _alias.lower() in aliases:
+ if name not in _lexer_cache:
+ _load_lexers(module_name)
+ return _lexer_cache[name](**options)
+ # continue with lexers from setuptools entrypoints
+ for cls in find_plugin_lexers():
+ if _alias.lower() in cls.aliases:
+ return cls(**options)
+ raise ClassNotFound('no lexer for alias %r found' % _alias)
+
+
+def load_lexer_from_file(filename, lexername="CustomLexer", **options):
+ """Load a lexer from a file.
+
+ This method expects a file located relative to the current working
+ directory, which contains a Lexer class. By default, it expects the
+ Lexer to be name CustomLexer; you can specify your own class name
+ as the second argument to this function.
+
+ Users should be very careful with the input, because this method
+ is equivalent to running eval on the input file.
+
+ Raises ClassNotFound if there are any problems importing the Lexer.
+
+ .. versionadded:: 2.2
+ """
+ try:
+ # This empty dict will contain the namespace for the exec'd file
+ custom_namespace = {}
+ with open(filename, 'rb') as f:
+ exec(f.read(), custom_namespace)
+ # Retrieve the class `lexername` from that namespace
+ if lexername not in custom_namespace:
+ raise ClassNotFound('no valid %s class found in %s' %
+ (lexername, filename))
+ lexer_class = custom_namespace[lexername]
+ # And finally instantiate it with the options
+ return lexer_class(**options)
+ except OSError as err:
+ raise ClassNotFound('cannot read %s: %s' % (filename, err))
+ except ClassNotFound:
+ raise
+ except Exception as err:
+ raise ClassNotFound('error when loading custom lexer: %s' % err)
+
+
+def find_lexer_class_for_filename(_fn, code=None):
+ """Get a lexer for a filename.
+
+ If multiple lexers match the filename pattern, use ``analyse_text()`` to
+ figure out which one is more appropriate.
+
+ Returns None if not found.
+ """
+ matches = []
+ fn = basename(_fn)
+ for modname, name, _, filenames, _ in LEXERS.values():
+ for filename in filenames:
+ if fnmatch(fn, filename):
+ if name not in _lexer_cache:
+ _load_lexers(modname)
+ matches.append((_lexer_cache[name], filename))
+ for cls in find_plugin_lexers():
+ for filename in cls.filenames:
+ if fnmatch(fn, filename):
+ matches.append((cls, filename))
+
+ if isinstance(code, bytes):
+ # decode it, since all analyse_text functions expect unicode
+ code = guess_decode(code)
+
+ def get_rating(info):
+ cls, filename = info
+ # explicit patterns get a bonus
+ bonus = '*' not in filename and 0.5 or 0
+ # The class _always_ defines analyse_text because it's included in
+ # the Lexer class. The default implementation returns None which
+ # gets turned into 0.0. Run scripts/detect_missing_analyse_text.py
+ # to find lexers which need it overridden.
+ if code:
+ return cls.analyse_text(code) + bonus, cls.__name__
+ return cls.priority + bonus, cls.__name__
+
+ if matches:
+ matches.sort(key=get_rating)
+ # print "Possible lexers, after sort:", matches
+ return matches[-1][0]
+
+
+def get_lexer_for_filename(_fn, code=None, **options):
+ """Get a lexer for a filename.
+
+ If multiple lexers match the filename pattern, use ``analyse_text()`` to
+ figure out which one is more appropriate.
+
+ Raises ClassNotFound if not found.
+ """
+ res = find_lexer_class_for_filename(_fn, code)
+ if not res:
+ raise ClassNotFound('no lexer for filename %r found' % _fn)
+ return res(**options)
+
+
+def get_lexer_for_mimetype(_mime, **options):
+ """Get a lexer for a mimetype.
+
+ Raises ClassNotFound if not found.
+ """
+ for modname, name, _, _, mimetypes in LEXERS.values():
+ if _mime in mimetypes:
+ if name not in _lexer_cache:
+ _load_lexers(modname)
+ return _lexer_cache[name](**options)
+ for cls in find_plugin_lexers():
+ if _mime in cls.mimetypes:
+ return cls(**options)
+ raise ClassNotFound('no lexer for mimetype %r found' % _mime)
+
+
+def _iter_lexerclasses(plugins=True):
+ """Return an iterator over all lexer classes."""
+ for key in sorted(LEXERS):
+ module_name, name = LEXERS[key][:2]
+ if name not in _lexer_cache:
+ _load_lexers(module_name)
+ yield _lexer_cache[name]
+ if plugins:
+ yield from find_plugin_lexers()
+
+
+def guess_lexer_for_filename(_fn, _text, **options):
+ """
+ Lookup all lexers that handle those filenames primary (``filenames``)
+ or secondary (``alias_filenames``). Then run a text analysis for those
+ lexers and choose the best result.
+
+ usage::
+
+ >>> from pygments.lexers import guess_lexer_for_filename
+ >>> guess_lexer_for_filename('hello.html', '<%= @foo %>')
+ <pygments.lexers.templates.RhtmlLexer object at 0xb7d2f32c>
+ >>> guess_lexer_for_filename('hello.html', '<h1>{{ title|e }}</h1>')
+ <pygments.lexers.templates.HtmlDjangoLexer object at 0xb7d2f2ac>
+ >>> guess_lexer_for_filename('style.css', 'a { color: <?= $link ?> }')
+ <pygments.lexers.templates.CssPhpLexer object at 0xb7ba518c>
+ """
+ fn = basename(_fn)
+ primary = {}
+ matching_lexers = set()
+ for lexer in _iter_lexerclasses():
+ for filename in lexer.filenames:
+ if fnmatch(fn, filename):
+ matching_lexers.add(lexer)
+ primary[lexer] = True
+ for filename in lexer.alias_filenames:
+ if fnmatch(fn, filename):
+ matching_lexers.add(lexer)
+ primary[lexer] = False
+ if not matching_lexers:
+ raise ClassNotFound('no lexer for filename %r found' % fn)
+ if len(matching_lexers) == 1:
+ return matching_lexers.pop()(**options)
+ result = []
+ for lexer in matching_lexers:
+ rv = lexer.analyse_text(_text)
+ if rv == 1.0:
+ return lexer(**options)
+ result.append((rv, lexer))
+
+ def type_sort(t):
+ # sort by:
+ # - analyse score
+ # - is primary filename pattern?
+ # - priority
+ # - last resort: class name
+ return (t[0], primary[t[1]], t[1].priority, t[1].__name__)
+ result.sort(key=type_sort)
+
+ return result[-1][1](**options)
+
+
+def guess_lexer(_text, **options):
+ """Guess a lexer by strong distinctions in the text (eg, shebang)."""
+
+ if not isinstance(_text, str):
+ inencoding = options.get('inencoding', options.get('encoding'))
+ if inencoding:
+ _text = _text.decode(inencoding or 'utf8')
+ else:
+ _text, _ = guess_decode(_text)
+
+ # try to get a vim modeline first
+ ft = get_filetype_from_buffer(_text)
+
+ if ft is not None:
+ try:
+ return get_lexer_by_name(ft, **options)
+ except ClassNotFound:
+ pass
+
+ best_lexer = [0.0, None]
+ for lexer in _iter_lexerclasses():
+ rv = lexer.analyse_text(_text)
+ if rv == 1.0:
+ return lexer(**options)
+ if rv > best_lexer[0]:
+ best_lexer[:] = (rv, lexer)
+ if not best_lexer[0] or best_lexer[1] is None:
+ raise ClassNotFound('no lexer matching the text found')
+ return best_lexer[1](**options)
+
+
+class _automodule(types.ModuleType):
+ """Automatically import lexers."""
+
+ def __getattr__(self, name):
+ info = LEXERS.get(name)
+ if info:
+ _load_lexers(info[0])
+ cls = _lexer_cache[info[1]]
+ setattr(self, name, cls)
+ return cls
+ if name in COMPAT:
+ return getattr(self, COMPAT[name])
+ raise AttributeError(name)
+
+
+oldmod = sys.modules[__name__]
+newmod = _automodule(__name__)
+newmod.__dict__.update(oldmod.__dict__)
+sys.modules[__name__] = newmod
+del newmod.newmod, newmod.oldmod, newmod.sys, newmod.types
diff --git a/third_party/python/pip/pip/_vendor/pygments/lexers/_mapping.py b/third_party/python/pip/pip/_vendor/pygments/lexers/_mapping.py
new file mode 100644
index 0000000000..40dcaa3c77
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/lexers/_mapping.py
@@ -0,0 +1,541 @@
+# Automatically generated by scripts/gen_mapfiles.py.
+# DO NOT EDIT BY HAND; run `make mapfiles` instead.
+
+LEXERS = {
+ 'ABAPLexer': ('pip._vendor.pygments.lexers.business', 'ABAP', ('abap',), ('*.abap', '*.ABAP'), ('text/x-abap',)),
+ 'AMDGPULexer': ('pip._vendor.pygments.lexers.amdgpu', 'AMDGPU', ('amdgpu',), ('*.isa',), ()),
+ 'APLLexer': ('pip._vendor.pygments.lexers.apl', 'APL', ('apl',), ('*.apl', '*.aplf', '*.aplo', '*.apln', '*.aplc', '*.apli', '*.dyalog'), ()),
+ 'AbnfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'ABNF', ('abnf',), ('*.abnf',), ('text/x-abnf',)),
+ 'ActionScript3Lexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript 3', ('actionscript3', 'as3'), ('*.as',), ('application/x-actionscript3', 'text/x-actionscript3', 'text/actionscript3')),
+ 'ActionScriptLexer': ('pip._vendor.pygments.lexers.actionscript', 'ActionScript', ('actionscript', 'as'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
+ 'AdaLexer': ('pip._vendor.pygments.lexers.ada', 'Ada', ('ada', 'ada95', 'ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
+ 'AdlLexer': ('pip._vendor.pygments.lexers.archetype', 'ADL', ('adl',), ('*.adl', '*.adls', '*.adlf', '*.adlx'), ()),
+ 'AgdaLexer': ('pip._vendor.pygments.lexers.haskell', 'Agda', ('agda',), ('*.agda',), ('text/x-agda',)),
+ 'AheuiLexer': ('pip._vendor.pygments.lexers.esoteric', 'Aheui', ('aheui',), ('*.aheui',), ()),
+ 'AlloyLexer': ('pip._vendor.pygments.lexers.dsls', 'Alloy', ('alloy',), ('*.als',), ('text/x-alloy',)),
+ 'AmbientTalkLexer': ('pip._vendor.pygments.lexers.ambient', 'AmbientTalk', ('ambienttalk', 'ambienttalk/2', 'at'), ('*.at',), ('text/x-ambienttalk',)),
+ 'AmplLexer': ('pip._vendor.pygments.lexers.ampl', 'Ampl', ('ampl',), ('*.run',), ()),
+ 'Angular2HtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML + Angular2', ('html+ng2',), ('*.ng2',), ()),
+ 'Angular2Lexer': ('pip._vendor.pygments.lexers.templates', 'Angular2', ('ng2',), (), ()),
+ 'AntlrActionScriptLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-actionscript', 'antlr-as'), ('*.G', '*.g'), ()),
+ 'AntlrCSharpLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
+ 'AntlrCppLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
+ 'AntlrJavaLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Java Target', ('antlr-java',), ('*.G', '*.g'), ()),
+ 'AntlrLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR', ('antlr',), (), ()),
+ 'AntlrObjectiveCLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With ObjectiveC Target', ('antlr-objc',), ('*.G', '*.g'), ()),
+ 'AntlrPerlLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Perl Target', ('antlr-perl',), ('*.G', '*.g'), ()),
+ 'AntlrPythonLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Python Target', ('antlr-python',), ('*.G', '*.g'), ()),
+ 'AntlrRubyLexer': ('pip._vendor.pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
+ 'ApacheConfLexer': ('pip._vendor.pygments.lexers.configs', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
+ 'AppleScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'AppleScript', ('applescript',), ('*.applescript',), ()),
+ 'ArduinoLexer': ('pip._vendor.pygments.lexers.c_like', 'Arduino', ('arduino',), ('*.ino',), ('text/x-arduino',)),
+ 'ArrowLexer': ('pip._vendor.pygments.lexers.arrow', 'Arrow', ('arrow',), ('*.arw',), ()),
+ 'AscLexer': ('pip._vendor.pygments.lexers.asc', 'ASCII armored', ('asc', 'pem'), ('*.asc', '*.pem', 'id_dsa', 'id_ecdsa', 'id_ecdsa_sk', 'id_ed25519', 'id_ed25519_sk', 'id_rsa'), ('application/pgp-keys', 'application/pgp-encrypted', 'application/pgp-signature')),
+ 'AspectJLexer': ('pip._vendor.pygments.lexers.jvm', 'AspectJ', ('aspectj',), ('*.aj',), ('text/x-aspectj',)),
+ 'AsymptoteLexer': ('pip._vendor.pygments.lexers.graphics', 'Asymptote', ('asymptote', 'asy'), ('*.asy',), ('text/x-asymptote',)),
+ 'AugeasLexer': ('pip._vendor.pygments.lexers.configs', 'Augeas', ('augeas',), ('*.aug',), ()),
+ 'AutoItLexer': ('pip._vendor.pygments.lexers.automation', 'AutoIt', ('autoit',), ('*.au3',), ('text/x-autoit',)),
+ 'AutohotkeyLexer': ('pip._vendor.pygments.lexers.automation', 'autohotkey', ('autohotkey', 'ahk'), ('*.ahk', '*.ahkl'), ('text/x-autohotkey',)),
+ 'AwkLexer': ('pip._vendor.pygments.lexers.textedit', 'Awk', ('awk', 'gawk', 'mawk', 'nawk'), ('*.awk',), ('application/x-awk',)),
+ 'BBCBasicLexer': ('pip._vendor.pygments.lexers.basic', 'BBC Basic', ('bbcbasic',), ('*.bbc',), ()),
+ 'BBCodeLexer': ('pip._vendor.pygments.lexers.markup', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
+ 'BCLexer': ('pip._vendor.pygments.lexers.algebra', 'BC', ('bc',), ('*.bc',), ()),
+ 'BSTLexer': ('pip._vendor.pygments.lexers.bibtex', 'BST', ('bst', 'bst-pybtex'), ('*.bst',), ()),
+ 'BareLexer': ('pip._vendor.pygments.lexers.bare', 'BARE', ('bare',), ('*.bare',), ()),
+ 'BaseMakefileLexer': ('pip._vendor.pygments.lexers.make', 'Base Makefile', ('basemake',), (), ()),
+ 'BashLexer': ('pip._vendor.pygments.lexers.shell', 'Bash', ('bash', 'sh', 'ksh', 'zsh', 'shell'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass', '*.exheres-0', '*.exlib', '*.zsh', '.bashrc', 'bashrc', '.bash_*', 'bash_*', 'zshrc', '.zshrc', '.kshrc', 'kshrc', 'PKGBUILD'), ('application/x-sh', 'application/x-shellscript', 'text/x-shellscript')),
+ 'BashSessionLexer': ('pip._vendor.pygments.lexers.shell', 'Bash Session', ('console', 'shell-session'), ('*.sh-session', '*.shell-session'), ('application/x-shell-session', 'application/x-sh-session')),
+ 'BatchLexer': ('pip._vendor.pygments.lexers.shell', 'Batchfile', ('batch', 'bat', 'dosbatch', 'winbatch'), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
+ 'BddLexer': ('pip._vendor.pygments.lexers.bdd', 'Bdd', ('bdd',), ('*.feature',), ('text/x-bdd',)),
+ 'BefungeLexer': ('pip._vendor.pygments.lexers.esoteric', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
+ 'BerryLexer': ('pip._vendor.pygments.lexers.berry', 'Berry', ('berry', 'be'), ('*.be',), ('text/x-berry', 'application/x-berry')),
+ 'BibTeXLexer': ('pip._vendor.pygments.lexers.bibtex', 'BibTeX', ('bibtex', 'bib'), ('*.bib',), ('text/x-bibtex',)),
+ 'BlitzBasicLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzBasic', ('blitzbasic', 'b3d', 'bplus'), ('*.bb', '*.decls'), ('text/x-bb',)),
+ 'BlitzMaxLexer': ('pip._vendor.pygments.lexers.basic', 'BlitzMax', ('blitzmax', 'bmax'), ('*.bmx',), ('text/x-bmx',)),
+ 'BnfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'BNF', ('bnf',), ('*.bnf',), ('text/x-bnf',)),
+ 'BoaLexer': ('pip._vendor.pygments.lexers.boa', 'Boa', ('boa',), ('*.boa',), ()),
+ 'BooLexer': ('pip._vendor.pygments.lexers.dotnet', 'Boo', ('boo',), ('*.boo',), ('text/x-boo',)),
+ 'BoogieLexer': ('pip._vendor.pygments.lexers.verification', 'Boogie', ('boogie',), ('*.bpl',), ()),
+ 'BrainfuckLexer': ('pip._vendor.pygments.lexers.esoteric', 'Brainfuck', ('brainfuck', 'bf'), ('*.bf', '*.b'), ('application/x-brainfuck',)),
+ 'BugsLexer': ('pip._vendor.pygments.lexers.modeling', 'BUGS', ('bugs', 'winbugs', 'openbugs'), ('*.bug',), ()),
+ 'CAmkESLexer': ('pip._vendor.pygments.lexers.esoteric', 'CAmkES', ('camkes', 'idl4'), ('*.camkes', '*.idl4'), ()),
+ 'CLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C', ('c',), ('*.c', '*.h', '*.idc', '*.x[bp]m'), ('text/x-chdr', 'text/x-csrc', 'image/x-xbitmap', 'image/x-xpixmap')),
+ 'CMakeLexer': ('pip._vendor.pygments.lexers.make', 'CMake', ('cmake',), ('*.cmake', 'CMakeLists.txt'), ('text/x-cmake',)),
+ 'CObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'c-objdump', ('c-objdump',), ('*.c-objdump',), ('text/x-c-objdump',)),
+ 'CPSALexer': ('pip._vendor.pygments.lexers.lisp', 'CPSA', ('cpsa',), ('*.cpsa',), ()),
+ 'CSSUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'CSS+UL4', ('css+ul4',), ('*.cssul4',), ()),
+ 'CSharpAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-cs', ('aspx-cs',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
+ 'CSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'C#', ('csharp', 'c#', 'cs'), ('*.cs',), ('text/x-csharp',)),
+ 'Ca65Lexer': ('pip._vendor.pygments.lexers.asm', 'ca65 assembler', ('ca65',), ('*.s',), ()),
+ 'CadlLexer': ('pip._vendor.pygments.lexers.archetype', 'cADL', ('cadl',), ('*.cadl',), ()),
+ 'CapDLLexer': ('pip._vendor.pygments.lexers.esoteric', 'CapDL', ('capdl',), ('*.cdl',), ()),
+ 'CapnProtoLexer': ('pip._vendor.pygments.lexers.capnproto', "Cap'n Proto", ('capnp',), ('*.capnp',), ()),
+ 'CbmBasicV2Lexer': ('pip._vendor.pygments.lexers.basic', 'CBM BASIC V2', ('cbmbas',), ('*.bas',), ()),
+ 'CddlLexer': ('pip._vendor.pygments.lexers.cddl', 'CDDL', ('cddl',), ('*.cddl',), ('text/x-cddl',)),
+ 'CeylonLexer': ('pip._vendor.pygments.lexers.jvm', 'Ceylon', ('ceylon',), ('*.ceylon',), ('text/x-ceylon',)),
+ 'Cfengine3Lexer': ('pip._vendor.pygments.lexers.configs', 'CFEngine3', ('cfengine3', 'cf3'), ('*.cf',), ()),
+ 'ChaiscriptLexer': ('pip._vendor.pygments.lexers.scripting', 'ChaiScript', ('chaiscript', 'chai'), ('*.chai',), ('text/x-chaiscript', 'application/x-chaiscript')),
+ 'ChapelLexer': ('pip._vendor.pygments.lexers.chapel', 'Chapel', ('chapel', 'chpl'), ('*.chpl',), ()),
+ 'CharmciLexer': ('pip._vendor.pygments.lexers.c_like', 'Charmci', ('charmci',), ('*.ci',), ()),
+ 'CheetahHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Cheetah', ('html+cheetah', 'html+spitfire', 'htmlcheetah'), (), ('text/html+cheetah', 'text/html+spitfire')),
+ 'CheetahJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Cheetah', ('javascript+cheetah', 'js+cheetah', 'javascript+spitfire', 'js+spitfire'), (), ('application/x-javascript+cheetah', 'text/x-javascript+cheetah', 'text/javascript+cheetah', 'application/x-javascript+spitfire', 'text/x-javascript+spitfire', 'text/javascript+spitfire')),
+ 'CheetahLexer': ('pip._vendor.pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
+ 'CheetahXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
+ 'CirruLexer': ('pip._vendor.pygments.lexers.webmisc', 'Cirru', ('cirru',), ('*.cirru',), ('text/x-cirru',)),
+ 'ClayLexer': ('pip._vendor.pygments.lexers.c_like', 'Clay', ('clay',), ('*.clay',), ('text/x-clay',)),
+ 'CleanLexer': ('pip._vendor.pygments.lexers.clean', 'Clean', ('clean',), ('*.icl', '*.dcl'), ()),
+ 'ClojureLexer': ('pip._vendor.pygments.lexers.jvm', 'Clojure', ('clojure', 'clj'), ('*.clj', '*.cljc'), ('text/x-clojure', 'application/x-clojure')),
+ 'ClojureScriptLexer': ('pip._vendor.pygments.lexers.jvm', 'ClojureScript', ('clojurescript', 'cljs'), ('*.cljs',), ('text/x-clojurescript', 'application/x-clojurescript')),
+ 'CobolFreeformatLexer': ('pip._vendor.pygments.lexers.business', 'COBOLFree', ('cobolfree',), ('*.cbl', '*.CBL'), ()),
+ 'CobolLexer': ('pip._vendor.pygments.lexers.business', 'COBOL', ('cobol',), ('*.cob', '*.COB', '*.cpy', '*.CPY'), ('text/x-cobol',)),
+ 'CoffeeScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'CoffeeScript', ('coffeescript', 'coffee-script', 'coffee'), ('*.coffee',), ('text/coffeescript',)),
+ 'ColdfusionCFCLexer': ('pip._vendor.pygments.lexers.templates', 'Coldfusion CFC', ('cfc',), ('*.cfc',), ()),
+ 'ColdfusionHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'Coldfusion HTML', ('cfm',), ('*.cfm', '*.cfml'), ('application/x-coldfusion',)),
+ 'ColdfusionLexer': ('pip._vendor.pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
+ 'Comal80Lexer': ('pip._vendor.pygments.lexers.comal', 'COMAL-80', ('comal', 'comal80'), ('*.cml', '*.comal'), ()),
+ 'CommonLispLexer': ('pip._vendor.pygments.lexers.lisp', 'Common Lisp', ('common-lisp', 'cl', 'lisp'), ('*.cl', '*.lisp'), ('text/x-common-lisp',)),
+ 'ComponentPascalLexer': ('pip._vendor.pygments.lexers.oberon', 'Component Pascal', ('componentpascal', 'cp'), ('*.cp', '*.cps'), ('text/x-component-pascal',)),
+ 'CoqLexer': ('pip._vendor.pygments.lexers.theorem', 'Coq', ('coq',), ('*.v',), ('text/x-coq',)),
+ 'CplintLexer': ('pip._vendor.pygments.lexers.cplint', 'cplint', ('cplint',), ('*.ecl', '*.prolog', '*.pro', '*.pl', '*.P', '*.lpad', '*.cpl'), ('text/x-cplint',)),
+ 'CppLexer': ('pip._vendor.pygments.lexers.c_cpp', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx', '*.C', '*.H', '*.cp', '*.CPP', '*.tpp'), ('text/x-c++hdr', 'text/x-c++src')),
+ 'CppObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
+ 'CrmshLexer': ('pip._vendor.pygments.lexers.dsls', 'Crmsh', ('crmsh', 'pcmk'), ('*.crmsh', '*.pcmk'), ()),
+ 'CrocLexer': ('pip._vendor.pygments.lexers.d', 'Croc', ('croc',), ('*.croc',), ('text/x-crocsrc',)),
+ 'CryptolLexer': ('pip._vendor.pygments.lexers.haskell', 'Cryptol', ('cryptol', 'cry'), ('*.cry',), ('text/x-cryptol',)),
+ 'CrystalLexer': ('pip._vendor.pygments.lexers.crystal', 'Crystal', ('cr', 'crystal'), ('*.cr',), ('text/x-crystal',)),
+ 'CsoundDocumentLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Document', ('csound-document', 'csound-csd'), ('*.csd',), ()),
+ 'CsoundOrchestraLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Orchestra', ('csound', 'csound-orc'), ('*.orc', '*.udo'), ()),
+ 'CsoundScoreLexer': ('pip._vendor.pygments.lexers.csound', 'Csound Score', ('csound-score', 'csound-sco'), ('*.sco',), ()),
+ 'CssDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Django/Jinja', ('css+django', 'css+jinja'), ('*.css.j2', '*.css.jinja2'), ('text/css+django', 'text/css+jinja')),
+ 'CssErbLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Ruby', ('css+ruby', 'css+erb'), (), ('text/css+ruby',)),
+ 'CssGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Genshi Text', ('css+genshitext', 'css+genshi'), (), ('text/css+genshi',)),
+ 'CssLexer': ('pip._vendor.pygments.lexers.css', 'CSS', ('css',), ('*.css',), ('text/css',)),
+ 'CssPhpLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+PHP', ('css+php',), (), ('text/css+php',)),
+ 'CssSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Smarty', ('css+smarty',), (), ('text/css+smarty',)),
+ 'CudaLexer': ('pip._vendor.pygments.lexers.c_like', 'CUDA', ('cuda', 'cu'), ('*.cu', '*.cuh'), ('text/x-cuda',)),
+ 'CypherLexer': ('pip._vendor.pygments.lexers.graph', 'Cypher', ('cypher',), ('*.cyp', '*.cypher'), ()),
+ 'CythonLexer': ('pip._vendor.pygments.lexers.python', 'Cython', ('cython', 'pyx', 'pyrex'), ('*.pyx', '*.pxd', '*.pxi'), ('text/x-cython', 'application/x-cython')),
+ 'DLexer': ('pip._vendor.pygments.lexers.d', 'D', ('d',), ('*.d', '*.di'), ('text/x-dsrc',)),
+ 'DObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'd-objdump', ('d-objdump',), ('*.d-objdump',), ('text/x-d-objdump',)),
+ 'DarcsPatchLexer': ('pip._vendor.pygments.lexers.diff', 'Darcs Patch', ('dpatch',), ('*.dpatch', '*.darcspatch'), ()),
+ 'DartLexer': ('pip._vendor.pygments.lexers.javascript', 'Dart', ('dart',), ('*.dart',), ('text/x-dart',)),
+ 'Dasm16Lexer': ('pip._vendor.pygments.lexers.asm', 'DASM16', ('dasm16',), ('*.dasm16', '*.dasm'), ('text/x-dasm16',)),
+ 'DebianControlLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Control file', ('debcontrol', 'control'), ('control',), ()),
+ 'DelphiLexer': ('pip._vendor.pygments.lexers.pascal', 'Delphi', ('delphi', 'pas', 'pascal', 'objectpascal'), ('*.pas', '*.dpr'), ('text/x-pascal',)),
+ 'DevicetreeLexer': ('pip._vendor.pygments.lexers.devicetree', 'Devicetree', ('devicetree', 'dts'), ('*.dts', '*.dtsi'), ('text/x-c',)),
+ 'DgLexer': ('pip._vendor.pygments.lexers.python', 'dg', ('dg',), ('*.dg',), ('text/x-dg',)),
+ 'DiffLexer': ('pip._vendor.pygments.lexers.diff', 'Diff', ('diff', 'udiff'), ('*.diff', '*.patch'), ('text/x-diff', 'text/x-patch')),
+ 'DjangoLexer': ('pip._vendor.pygments.lexers.templates', 'Django/Jinja', ('django', 'jinja'), (), ('application/x-django-templating', 'application/x-jinja')),
+ 'DockerLexer': ('pip._vendor.pygments.lexers.configs', 'Docker', ('docker', 'dockerfile'), ('Dockerfile', '*.docker'), ('text/x-dockerfile-config',)),
+ 'DtdLexer': ('pip._vendor.pygments.lexers.html', 'DTD', ('dtd',), ('*.dtd',), ('application/xml-dtd',)),
+ 'DuelLexer': ('pip._vendor.pygments.lexers.webmisc', 'Duel', ('duel', 'jbst', 'jsonml+bst'), ('*.duel', '*.jbst'), ('text/x-duel', 'text/x-jbst')),
+ 'DylanConsoleLexer': ('pip._vendor.pygments.lexers.dylan', 'Dylan session', ('dylan-console', 'dylan-repl'), ('*.dylan-console',), ('text/x-dylan-console',)),
+ 'DylanLexer': ('pip._vendor.pygments.lexers.dylan', 'Dylan', ('dylan',), ('*.dylan', '*.dyl', '*.intr'), ('text/x-dylan',)),
+ 'DylanLidLexer': ('pip._vendor.pygments.lexers.dylan', 'DylanLID', ('dylan-lid', 'lid'), ('*.lid', '*.hdp'), ('text/x-dylan-lid',)),
+ 'ECLLexer': ('pip._vendor.pygments.lexers.ecl', 'ECL', ('ecl',), ('*.ecl',), ('application/x-ecl',)),
+ 'ECLexer': ('pip._vendor.pygments.lexers.c_like', 'eC', ('ec',), ('*.ec', '*.eh'), ('text/x-echdr', 'text/x-ecsrc')),
+ 'EarlGreyLexer': ('pip._vendor.pygments.lexers.javascript', 'Earl Grey', ('earl-grey', 'earlgrey', 'eg'), ('*.eg',), ('text/x-earl-grey',)),
+ 'EasytrieveLexer': ('pip._vendor.pygments.lexers.scripting', 'Easytrieve', ('easytrieve',), ('*.ezt', '*.mac'), ('text/x-easytrieve',)),
+ 'EbnfLexer': ('pip._vendor.pygments.lexers.parsers', 'EBNF', ('ebnf',), ('*.ebnf',), ('text/x-ebnf',)),
+ 'EiffelLexer': ('pip._vendor.pygments.lexers.eiffel', 'Eiffel', ('eiffel',), ('*.e',), ('text/x-eiffel',)),
+ 'ElixirConsoleLexer': ('pip._vendor.pygments.lexers.erlang', 'Elixir iex session', ('iex',), (), ('text/x-elixir-shellsession',)),
+ 'ElixirLexer': ('pip._vendor.pygments.lexers.erlang', 'Elixir', ('elixir', 'ex', 'exs'), ('*.ex', '*.eex', '*.exs', '*.leex'), ('text/x-elixir',)),
+ 'ElmLexer': ('pip._vendor.pygments.lexers.elm', 'Elm', ('elm',), ('*.elm',), ('text/x-elm',)),
+ 'ElpiLexer': ('pip._vendor.pygments.lexers.elpi', 'Elpi', ('elpi',), ('*.elpi',), ('text/x-elpi',)),
+ 'EmacsLispLexer': ('pip._vendor.pygments.lexers.lisp', 'EmacsLisp', ('emacs-lisp', 'elisp', 'emacs'), ('*.el',), ('text/x-elisp', 'application/x-elisp')),
+ 'EmailLexer': ('pip._vendor.pygments.lexers.email', 'E-mail', ('email', 'eml'), ('*.eml',), ('message/rfc822',)),
+ 'ErbLexer': ('pip._vendor.pygments.lexers.templates', 'ERB', ('erb',), (), ('application/x-ruby-templating',)),
+ 'ErlangLexer': ('pip._vendor.pygments.lexers.erlang', 'Erlang', ('erlang',), ('*.erl', '*.hrl', '*.es', '*.escript'), ('text/x-erlang',)),
+ 'ErlangShellLexer': ('pip._vendor.pygments.lexers.erlang', 'Erlang erl session', ('erl',), ('*.erl-sh',), ('text/x-erl-shellsession',)),
+ 'EvoqueHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
+ 'EvoqueLexer': ('pip._vendor.pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
+ 'EvoqueXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
+ 'ExeclineLexer': ('pip._vendor.pygments.lexers.shell', 'execline', ('execline',), ('*.exec',), ()),
+ 'EzhilLexer': ('pip._vendor.pygments.lexers.ezhil', 'Ezhil', ('ezhil',), ('*.n',), ('text/x-ezhil',)),
+ 'FSharpLexer': ('pip._vendor.pygments.lexers.dotnet', 'F#', ('fsharp', 'f#'), ('*.fs', '*.fsi'), ('text/x-fsharp',)),
+ 'FStarLexer': ('pip._vendor.pygments.lexers.ml', 'FStar', ('fstar',), ('*.fst', '*.fsti'), ('text/x-fstar',)),
+ 'FactorLexer': ('pip._vendor.pygments.lexers.factor', 'Factor', ('factor',), ('*.factor',), ('text/x-factor',)),
+ 'FancyLexer': ('pip._vendor.pygments.lexers.ruby', 'Fancy', ('fancy', 'fy'), ('*.fy', '*.fancypack'), ('text/x-fancysrc',)),
+ 'FantomLexer': ('pip._vendor.pygments.lexers.fantom', 'Fantom', ('fan',), ('*.fan',), ('application/x-fantom',)),
+ 'FelixLexer': ('pip._vendor.pygments.lexers.felix', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
+ 'FennelLexer': ('pip._vendor.pygments.lexers.lisp', 'Fennel', ('fennel', 'fnl'), ('*.fnl',), ()),
+ 'FishShellLexer': ('pip._vendor.pygments.lexers.shell', 'Fish', ('fish', 'fishshell'), ('*.fish', '*.load'), ('application/x-fish',)),
+ 'FlatlineLexer': ('pip._vendor.pygments.lexers.dsls', 'Flatline', ('flatline',), (), ('text/x-flatline',)),
+ 'FloScriptLexer': ('pip._vendor.pygments.lexers.floscript', 'FloScript', ('floscript', 'flo'), ('*.flo',), ()),
+ 'ForthLexer': ('pip._vendor.pygments.lexers.forth', 'Forth', ('forth',), ('*.frt', '*.fs'), ('application/x-forth',)),
+ 'FortranFixedLexer': ('pip._vendor.pygments.lexers.fortran', 'FortranFixed', ('fortranfixed',), ('*.f', '*.F'), ()),
+ 'FortranLexer': ('pip._vendor.pygments.lexers.fortran', 'Fortran', ('fortran', 'f90'), ('*.f03', '*.f90', '*.F03', '*.F90'), ('text/x-fortran',)),
+ 'FoxProLexer': ('pip._vendor.pygments.lexers.foxpro', 'FoxPro', ('foxpro', 'vfp', 'clipper', 'xbase'), ('*.PRG', '*.prg'), ()),
+ 'FreeFemLexer': ('pip._vendor.pygments.lexers.freefem', 'Freefem', ('freefem',), ('*.edp',), ('text/x-freefem',)),
+ 'FutharkLexer': ('pip._vendor.pygments.lexers.futhark', 'Futhark', ('futhark',), ('*.fut',), ('text/x-futhark',)),
+ 'GAPLexer': ('pip._vendor.pygments.lexers.algebra', 'GAP', ('gap',), ('*.g', '*.gd', '*.gi', '*.gap'), ()),
+ 'GDScriptLexer': ('pip._vendor.pygments.lexers.gdscript', 'GDScript', ('gdscript', 'gd'), ('*.gd',), ('text/x-gdscript', 'application/x-gdscript')),
+ 'GLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
+ 'GSQLLexer': ('pip._vendor.pygments.lexers.gsql', 'GSQL', ('gsql',), ('*.gsql',), ()),
+ 'GasLexer': ('pip._vendor.pygments.lexers.asm', 'GAS', ('gas', 'asm'), ('*.s', '*.S'), ('text/x-gas',)),
+ 'GcodeLexer': ('pip._vendor.pygments.lexers.gcodelexer', 'g-code', ('gcode',), ('*.gcode',), ()),
+ 'GenshiLexer': ('pip._vendor.pygments.lexers.templates', 'Genshi', ('genshi', 'kid', 'xml+genshi', 'xml+kid'), ('*.kid',), ('application/x-genshi', 'application/x-kid')),
+ 'GenshiTextLexer': ('pip._vendor.pygments.lexers.templates', 'Genshi Text', ('genshitext',), (), ('application/x-genshi-text', 'text/x-genshi')),
+ 'GettextLexer': ('pip._vendor.pygments.lexers.textfmts', 'Gettext Catalog', ('pot', 'po'), ('*.pot', '*.po'), ('application/x-gettext', 'text/x-gettext', 'text/gettext')),
+ 'GherkinLexer': ('pip._vendor.pygments.lexers.testing', 'Gherkin', ('gherkin', 'cucumber'), ('*.feature',), ('text/x-gherkin',)),
+ 'GnuplotLexer': ('pip._vendor.pygments.lexers.graphics', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
+ 'GoLexer': ('pip._vendor.pygments.lexers.go', 'Go', ('go', 'golang'), ('*.go',), ('text/x-gosrc',)),
+ 'GoloLexer': ('pip._vendor.pygments.lexers.jvm', 'Golo', ('golo',), ('*.golo',), ()),
+ 'GoodDataCLLexer': ('pip._vendor.pygments.lexers.business', 'GoodData-CL', ('gooddata-cl',), ('*.gdc',), ('text/x-gooddata-cl',)),
+ 'GosuLexer': ('pip._vendor.pygments.lexers.jvm', 'Gosu', ('gosu',), ('*.gs', '*.gsx', '*.gsp', '*.vark'), ('text/x-gosu',)),
+ 'GosuTemplateLexer': ('pip._vendor.pygments.lexers.jvm', 'Gosu Template', ('gst',), ('*.gst',), ('text/x-gosu-template',)),
+ 'GraphvizLexer': ('pip._vendor.pygments.lexers.graphviz', 'Graphviz', ('graphviz', 'dot'), ('*.gv', '*.dot'), ('text/x-graphviz', 'text/vnd.graphviz')),
+ 'GroffLexer': ('pip._vendor.pygments.lexers.markup', 'Groff', ('groff', 'nroff', 'man'), ('*.[1-9]', '*.man', '*.1p', '*.3pm'), ('application/x-troff', 'text/troff')),
+ 'GroovyLexer': ('pip._vendor.pygments.lexers.jvm', 'Groovy', ('groovy',), ('*.groovy', '*.gradle'), ('text/x-groovy',)),
+ 'HLSLShaderLexer': ('pip._vendor.pygments.lexers.graphics', 'HLSL', ('hlsl',), ('*.hlsl', '*.hlsli'), ('text/x-hlsl',)),
+ 'HTMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'HTML+UL4', ('html+ul4',), ('*.htmlul4',), ()),
+ 'HamlLexer': ('pip._vendor.pygments.lexers.html', 'Haml', ('haml',), ('*.haml',), ('text/x-haml',)),
+ 'HandlebarsHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Handlebars', ('html+handlebars',), ('*.handlebars', '*.hbs'), ('text/html+handlebars', 'text/x-handlebars-template')),
+ 'HandlebarsLexer': ('pip._vendor.pygments.lexers.templates', 'Handlebars', ('handlebars',), (), ()),
+ 'HaskellLexer': ('pip._vendor.pygments.lexers.haskell', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
+ 'HaxeLexer': ('pip._vendor.pygments.lexers.haxe', 'Haxe', ('haxe', 'hxsl', 'hx'), ('*.hx', '*.hxsl'), ('text/haxe', 'text/x-haxe', 'text/x-hx')),
+ 'HexdumpLexer': ('pip._vendor.pygments.lexers.hexdump', 'Hexdump', ('hexdump',), (), ()),
+ 'HsailLexer': ('pip._vendor.pygments.lexers.asm', 'HSAIL', ('hsail', 'hsa'), ('*.hsail',), ('text/x-hsail',)),
+ 'HspecLexer': ('pip._vendor.pygments.lexers.haskell', 'Hspec', ('hspec',), (), ()),
+ 'HtmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja', 'htmldjango'), ('*.html.j2', '*.htm.j2', '*.xhtml.j2', '*.html.jinja2', '*.htm.jinja2', '*.xhtml.jinja2'), ('text/html+django', 'text/html+jinja')),
+ 'HtmlGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
+ 'HtmlLexer': ('pip._vendor.pygments.lexers.html', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
+ 'HtmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+PHP', ('html+php',), ('*.phtml',), ('application/x-php', 'application/x-httpd-php', 'application/x-httpd-php3', 'application/x-httpd-php4', 'application/x-httpd-php5')),
+ 'HtmlSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Smarty', ('html+smarty',), (), ('text/html+smarty',)),
+ 'HttpLexer': ('pip._vendor.pygments.lexers.textfmts', 'HTTP', ('http',), (), ()),
+ 'HxmlLexer': ('pip._vendor.pygments.lexers.haxe', 'Hxml', ('haxeml', 'hxml'), ('*.hxml',), ()),
+ 'HyLexer': ('pip._vendor.pygments.lexers.lisp', 'Hy', ('hylang',), ('*.hy',), ('text/x-hy', 'application/x-hy')),
+ 'HybrisLexer': ('pip._vendor.pygments.lexers.scripting', 'Hybris', ('hybris', 'hy'), ('*.hy', '*.hyb'), ('text/x-hybris', 'application/x-hybris')),
+ 'IDLLexer': ('pip._vendor.pygments.lexers.idl', 'IDL', ('idl',), ('*.pro',), ('text/idl',)),
+ 'IconLexer': ('pip._vendor.pygments.lexers.unicon', 'Icon', ('icon',), ('*.icon', '*.ICON'), ()),
+ 'IdrisLexer': ('pip._vendor.pygments.lexers.haskell', 'Idris', ('idris', 'idr'), ('*.idr',), ('text/x-idris',)),
+ 'IgorLexer': ('pip._vendor.pygments.lexers.igor', 'Igor', ('igor', 'igorpro'), ('*.ipf',), ('text/ipf',)),
+ 'Inform6Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 6', ('inform6', 'i6'), ('*.inf',), ()),
+ 'Inform6TemplateLexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 6 template', ('i6t',), ('*.i6t',), ()),
+ 'Inform7Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'Inform 7', ('inform7', 'i7'), ('*.ni', '*.i7x'), ()),
+ 'IniLexer': ('pip._vendor.pygments.lexers.configs', 'INI', ('ini', 'cfg', 'dosini'), ('*.ini', '*.cfg', '*.inf', '.editorconfig', '*.service', '*.socket', '*.device', '*.mount', '*.automount', '*.swap', '*.target', '*.path', '*.timer', '*.slice', '*.scope'), ('text/x-ini', 'text/inf')),
+ 'IoLexer': ('pip._vendor.pygments.lexers.iolang', 'Io', ('io',), ('*.io',), ('text/x-iosrc',)),
+ 'IokeLexer': ('pip._vendor.pygments.lexers.jvm', 'Ioke', ('ioke', 'ik'), ('*.ik',), ('text/x-iokesrc',)),
+ 'IrcLogsLexer': ('pip._vendor.pygments.lexers.textfmts', 'IRC logs', ('irc',), ('*.weechatlog',), ('text/x-irclog',)),
+ 'IsabelleLexer': ('pip._vendor.pygments.lexers.theorem', 'Isabelle', ('isabelle',), ('*.thy',), ('text/x-isabelle',)),
+ 'JLexer': ('pip._vendor.pygments.lexers.j', 'J', ('j',), ('*.ijs',), ('text/x-j',)),
+ 'JMESPathLexer': ('pip._vendor.pygments.lexers.jmespath', 'JMESPath', ('jmespath', 'jp'), ('*.jp',), ()),
+ 'JSLTLexer': ('pip._vendor.pygments.lexers.jslt', 'JSLT', ('jslt',), ('*.jslt',), ('text/x-jslt',)),
+ 'JagsLexer': ('pip._vendor.pygments.lexers.modeling', 'JAGS', ('jags',), ('*.jag', '*.bug'), ()),
+ 'JasminLexer': ('pip._vendor.pygments.lexers.jvm', 'Jasmin', ('jasmin', 'jasminxt'), ('*.j',), ()),
+ 'JavaLexer': ('pip._vendor.pygments.lexers.jvm', 'Java', ('java',), ('*.java',), ('text/x-java',)),
+ 'JavascriptDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Django/Jinja', ('javascript+django', 'js+django', 'javascript+jinja', 'js+jinja'), ('*.js.j2', '*.js.jinja2'), ('application/x-javascript+django', 'application/x-javascript+jinja', 'text/x-javascript+django', 'text/x-javascript+jinja', 'text/javascript+django', 'text/javascript+jinja')),
+ 'JavascriptErbLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Ruby', ('javascript+ruby', 'js+ruby', 'javascript+erb', 'js+erb'), (), ('application/x-javascript+ruby', 'text/x-javascript+ruby', 'text/javascript+ruby')),
+ 'JavascriptGenshiLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Genshi Text', ('js+genshitext', 'js+genshi', 'javascript+genshitext', 'javascript+genshi'), (), ('application/x-javascript+genshi', 'text/x-javascript+genshi', 'text/javascript+genshi')),
+ 'JavascriptLexer': ('pip._vendor.pygments.lexers.javascript', 'JavaScript', ('javascript', 'js'), ('*.js', '*.jsm', '*.mjs', '*.cjs'), ('application/javascript', 'application/x-javascript', 'text/x-javascript', 'text/javascript')),
+ 'JavascriptPhpLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+PHP', ('javascript+php', 'js+php'), (), ('application/x-javascript+php', 'text/x-javascript+php', 'text/javascript+php')),
+ 'JavascriptSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Smarty', ('javascript+smarty', 'js+smarty'), (), ('application/x-javascript+smarty', 'text/x-javascript+smarty', 'text/javascript+smarty')),
+ 'JavascriptUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Javascript+UL4', ('js+ul4',), ('*.jsul4',), ()),
+ 'JclLexer': ('pip._vendor.pygments.lexers.scripting', 'JCL', ('jcl',), ('*.jcl',), ('text/x-jcl',)),
+ 'JsgfLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'JSGF', ('jsgf',), ('*.jsgf',), ('application/jsgf', 'application/x-jsgf', 'text/jsgf')),
+ 'JsonBareObjectLexer': ('pip._vendor.pygments.lexers.data', 'JSONBareObject', (), (), ()),
+ 'JsonLdLexer': ('pip._vendor.pygments.lexers.data', 'JSON-LD', ('jsonld', 'json-ld'), ('*.jsonld',), ('application/ld+json',)),
+ 'JsonLexer': ('pip._vendor.pygments.lexers.data', 'JSON', ('json', 'json-object'), ('*.json', 'Pipfile.lock'), ('application/json', 'application/json-object')),
+ 'JspLexer': ('pip._vendor.pygments.lexers.templates', 'Java Server Page', ('jsp',), ('*.jsp',), ('application/x-jsp',)),
+ 'JuliaConsoleLexer': ('pip._vendor.pygments.lexers.julia', 'Julia console', ('jlcon', 'julia-repl'), (), ()),
+ 'JuliaLexer': ('pip._vendor.pygments.lexers.julia', 'Julia', ('julia', 'jl'), ('*.jl',), ('text/x-julia', 'application/x-julia')),
+ 'JuttleLexer': ('pip._vendor.pygments.lexers.javascript', 'Juttle', ('juttle',), ('*.juttle',), ('application/juttle', 'application/x-juttle', 'text/x-juttle', 'text/juttle')),
+ 'KLexer': ('pip._vendor.pygments.lexers.q', 'K', ('k',), ('*.k',), ()),
+ 'KalLexer': ('pip._vendor.pygments.lexers.javascript', 'Kal', ('kal',), ('*.kal',), ('text/kal', 'application/kal')),
+ 'KconfigLexer': ('pip._vendor.pygments.lexers.configs', 'Kconfig', ('kconfig', 'menuconfig', 'linux-config', 'kernel-config'), ('Kconfig*', '*Config.in*', 'external.in*', 'standard-modules.in'), ('text/x-kconfig',)),
+ 'KernelLogLexer': ('pip._vendor.pygments.lexers.textfmts', 'Kernel log', ('kmsg', 'dmesg'), ('*.kmsg', '*.dmesg'), ()),
+ 'KokaLexer': ('pip._vendor.pygments.lexers.haskell', 'Koka', ('koka',), ('*.kk', '*.kki'), ('text/x-koka',)),
+ 'KotlinLexer': ('pip._vendor.pygments.lexers.jvm', 'Kotlin', ('kotlin',), ('*.kt', '*.kts'), ('text/x-kotlin',)),
+ 'KuinLexer': ('pip._vendor.pygments.lexers.kuin', 'Kuin', ('kuin',), ('*.kn',), ()),
+ 'LSLLexer': ('pip._vendor.pygments.lexers.scripting', 'LSL', ('lsl',), ('*.lsl',), ('text/x-lsl',)),
+ 'LassoCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Lasso', ('css+lasso',), (), ('text/css+lasso',)),
+ 'LassoHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Lasso', ('html+lasso',), (), ('text/html+lasso', 'application/x-httpd-lasso', 'application/x-httpd-lasso[89]')),
+ 'LassoJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Lasso', ('javascript+lasso', 'js+lasso'), (), ('application/x-javascript+lasso', 'text/x-javascript+lasso', 'text/javascript+lasso')),
+ 'LassoLexer': ('pip._vendor.pygments.lexers.javascript', 'Lasso', ('lasso', 'lassoscript'), ('*.lasso', '*.lasso[89]'), ('text/x-lasso',)),
+ 'LassoXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Lasso', ('xml+lasso',), (), ('application/xml+lasso',)),
+ 'LeanLexer': ('pip._vendor.pygments.lexers.theorem', 'Lean', ('lean',), ('*.lean',), ('text/x-lean',)),
+ 'LessCssLexer': ('pip._vendor.pygments.lexers.css', 'LessCss', ('less',), ('*.less',), ('text/x-less-css',)),
+ 'LighttpdConfLexer': ('pip._vendor.pygments.lexers.configs', 'Lighttpd configuration file', ('lighttpd', 'lighty'), ('lighttpd.conf',), ('text/x-lighttpd-conf',)),
+ 'LilyPondLexer': ('pip._vendor.pygments.lexers.lilypond', 'LilyPond', ('lilypond',), ('*.ly',), ()),
+ 'LimboLexer': ('pip._vendor.pygments.lexers.inferno', 'Limbo', ('limbo',), ('*.b',), ('text/limbo',)),
+ 'LiquidLexer': ('pip._vendor.pygments.lexers.templates', 'liquid', ('liquid',), ('*.liquid',), ()),
+ 'LiterateAgdaLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Agda', ('literate-agda', 'lagda'), ('*.lagda',), ('text/x-literate-agda',)),
+ 'LiterateCryptolLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Cryptol', ('literate-cryptol', 'lcryptol', 'lcry'), ('*.lcry',), ('text/x-literate-cryptol',)),
+ 'LiterateHaskellLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Haskell', ('literate-haskell', 'lhaskell', 'lhs'), ('*.lhs',), ('text/x-literate-haskell',)),
+ 'LiterateIdrisLexer': ('pip._vendor.pygments.lexers.haskell', 'Literate Idris', ('literate-idris', 'lidris', 'lidr'), ('*.lidr',), ('text/x-literate-idris',)),
+ 'LiveScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'LiveScript', ('livescript', 'live-script'), ('*.ls',), ('text/livescript',)),
+ 'LlvmLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM', ('llvm',), ('*.ll',), ('text/x-llvm',)),
+ 'LlvmMirBodyLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM-MIR Body', ('llvm-mir-body',), (), ()),
+ 'LlvmMirLexer': ('pip._vendor.pygments.lexers.asm', 'LLVM-MIR', ('llvm-mir',), ('*.mir',), ()),
+ 'LogosLexer': ('pip._vendor.pygments.lexers.objective', 'Logos', ('logos',), ('*.x', '*.xi', '*.xm', '*.xmi'), ('text/x-logos',)),
+ 'LogtalkLexer': ('pip._vendor.pygments.lexers.prolog', 'Logtalk', ('logtalk',), ('*.lgt', '*.logtalk'), ('text/x-logtalk',)),
+ 'LuaLexer': ('pip._vendor.pygments.lexers.scripting', 'Lua', ('lua',), ('*.lua', '*.wlua'), ('text/x-lua', 'application/x-lua')),
+ 'MCFunctionLexer': ('pip._vendor.pygments.lexers.mcfunction', 'MCFunction', ('mcfunction', 'mcf'), ('*.mcfunction',), ('text/mcfunction',)),
+ 'MIMELexer': ('pip._vendor.pygments.lexers.mime', 'MIME', ('mime',), (), ('multipart/mixed', 'multipart/related', 'multipart/alternative')),
+ 'MOOCodeLexer': ('pip._vendor.pygments.lexers.scripting', 'MOOCode', ('moocode', 'moo'), ('*.moo',), ('text/x-moocode',)),
+ 'MSDOSSessionLexer': ('pip._vendor.pygments.lexers.shell', 'MSDOS Session', ('doscon',), (), ()),
+ 'Macaulay2Lexer': ('pip._vendor.pygments.lexers.macaulay2', 'Macaulay2', ('macaulay2',), ('*.m2',), ()),
+ 'MakefileLexer': ('pip._vendor.pygments.lexers.make', 'Makefile', ('make', 'makefile', 'mf', 'bsdmake'), ('*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile'), ('text/x-makefile',)),
+ 'MakoCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Mako', ('css+mako',), (), ('text/css+mako',)),
+ 'MakoHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Mako', ('html+mako',), (), ('text/html+mako',)),
+ 'MakoJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Mako', ('javascript+mako', 'js+mako'), (), ('application/x-javascript+mako', 'text/x-javascript+mako', 'text/javascript+mako')),
+ 'MakoLexer': ('pip._vendor.pygments.lexers.templates', 'Mako', ('mako',), ('*.mao',), ('application/x-mako',)),
+ 'MakoXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Mako', ('xml+mako',), (), ('application/xml+mako',)),
+ 'MaqlLexer': ('pip._vendor.pygments.lexers.business', 'MAQL', ('maql',), ('*.maql',), ('text/x-gooddata-maql', 'application/x-gooddata-maql')),
+ 'MarkdownLexer': ('pip._vendor.pygments.lexers.markup', 'Markdown', ('markdown', 'md'), ('*.md', '*.markdown'), ('text/x-markdown',)),
+ 'MaskLexer': ('pip._vendor.pygments.lexers.javascript', 'Mask', ('mask',), ('*.mask',), ('text/x-mask',)),
+ 'MasonLexer': ('pip._vendor.pygments.lexers.templates', 'Mason', ('mason',), ('*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'), ('application/x-mason',)),
+ 'MathematicaLexer': ('pip._vendor.pygments.lexers.algebra', 'Mathematica', ('mathematica', 'mma', 'nb'), ('*.nb', '*.cdf', '*.nbp', '*.ma'), ('application/mathematica', 'application/vnd.wolfram.mathematica', 'application/vnd.wolfram.mathematica.package', 'application/vnd.wolfram.cdf')),
+ 'MatlabLexer': ('pip._vendor.pygments.lexers.matlab', 'Matlab', ('matlab',), ('*.m',), ('text/matlab',)),
+ 'MatlabSessionLexer': ('pip._vendor.pygments.lexers.matlab', 'Matlab session', ('matlabsession',), (), ()),
+ 'MaximaLexer': ('pip._vendor.pygments.lexers.maxima', 'Maxima', ('maxima', 'macsyma'), ('*.mac', '*.max'), ()),
+ 'MesonLexer': ('pip._vendor.pygments.lexers.meson', 'Meson', ('meson', 'meson.build'), ('meson.build', 'meson_options.txt'), ('text/x-meson',)),
+ 'MiniDLexer': ('pip._vendor.pygments.lexers.d', 'MiniD', ('minid',), (), ('text/x-minidsrc',)),
+ 'MiniScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'MiniScript', ('miniscript', 'ms'), ('*.ms',), ('text/x-minicript', 'application/x-miniscript')),
+ 'ModelicaLexer': ('pip._vendor.pygments.lexers.modeling', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
+ 'Modula2Lexer': ('pip._vendor.pygments.lexers.modula2', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
+ 'MoinWikiLexer': ('pip._vendor.pygments.lexers.markup', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
+ 'MonkeyLexer': ('pip._vendor.pygments.lexers.basic', 'Monkey', ('monkey',), ('*.monkey',), ('text/x-monkey',)),
+ 'MonteLexer': ('pip._vendor.pygments.lexers.monte', 'Monte', ('monte',), ('*.mt',), ()),
+ 'MoonScriptLexer': ('pip._vendor.pygments.lexers.scripting', 'MoonScript', ('moonscript', 'moon'), ('*.moon',), ('text/x-moonscript', 'application/x-moonscript')),
+ 'MoselLexer': ('pip._vendor.pygments.lexers.mosel', 'Mosel', ('mosel',), ('*.mos',), ()),
+ 'MozPreprocCssLexer': ('pip._vendor.pygments.lexers.markup', 'CSS+mozpreproc', ('css+mozpreproc',), ('*.css.in',), ()),
+ 'MozPreprocHashLexer': ('pip._vendor.pygments.lexers.markup', 'mozhashpreproc', ('mozhashpreproc',), (), ()),
+ 'MozPreprocJavascriptLexer': ('pip._vendor.pygments.lexers.markup', 'Javascript+mozpreproc', ('javascript+mozpreproc',), ('*.js.in',), ()),
+ 'MozPreprocPercentLexer': ('pip._vendor.pygments.lexers.markup', 'mozpercentpreproc', ('mozpercentpreproc',), (), ()),
+ 'MozPreprocXulLexer': ('pip._vendor.pygments.lexers.markup', 'XUL+mozpreproc', ('xul+mozpreproc',), ('*.xul.in',), ()),
+ 'MqlLexer': ('pip._vendor.pygments.lexers.c_like', 'MQL', ('mql', 'mq4', 'mq5', 'mql4', 'mql5'), ('*.mq4', '*.mq5', '*.mqh'), ('text/x-mql',)),
+ 'MscgenLexer': ('pip._vendor.pygments.lexers.dsls', 'Mscgen', ('mscgen', 'msc'), ('*.msc',), ()),
+ 'MuPADLexer': ('pip._vendor.pygments.lexers.algebra', 'MuPAD', ('mupad',), ('*.mu',), ()),
+ 'MxmlLexer': ('pip._vendor.pygments.lexers.actionscript', 'MXML', ('mxml',), ('*.mxml',), ()),
+ 'MySqlLexer': ('pip._vendor.pygments.lexers.sql', 'MySQL', ('mysql',), (), ('text/x-mysql',)),
+ 'MyghtyCssLexer': ('pip._vendor.pygments.lexers.templates', 'CSS+Myghty', ('css+myghty',), (), ('text/css+myghty',)),
+ 'MyghtyHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Myghty', ('html+myghty',), (), ('text/html+myghty',)),
+ 'MyghtyJavascriptLexer': ('pip._vendor.pygments.lexers.templates', 'JavaScript+Myghty', ('javascript+myghty', 'js+myghty'), (), ('application/x-javascript+myghty', 'text/x-javascript+myghty', 'text/javascript+mygthy')),
+ 'MyghtyLexer': ('pip._vendor.pygments.lexers.templates', 'Myghty', ('myghty',), ('*.myt', 'autodelegate'), ('application/x-myghty',)),
+ 'MyghtyXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Myghty', ('xml+myghty',), (), ('application/xml+myghty',)),
+ 'NCLLexer': ('pip._vendor.pygments.lexers.ncl', 'NCL', ('ncl',), ('*.ncl',), ('text/ncl',)),
+ 'NSISLexer': ('pip._vendor.pygments.lexers.installers', 'NSIS', ('nsis', 'nsi', 'nsh'), ('*.nsi', '*.nsh'), ('text/x-nsis',)),
+ 'NasmLexer': ('pip._vendor.pygments.lexers.asm', 'NASM', ('nasm',), ('*.asm', '*.ASM'), ('text/x-nasm',)),
+ 'NasmObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'objdump-nasm', ('objdump-nasm',), ('*.objdump-intel',), ('text/x-nasm-objdump',)),
+ 'NemerleLexer': ('pip._vendor.pygments.lexers.dotnet', 'Nemerle', ('nemerle',), ('*.n',), ('text/x-nemerle',)),
+ 'NesCLexer': ('pip._vendor.pygments.lexers.c_like', 'nesC', ('nesc',), ('*.nc',), ('text/x-nescsrc',)),
+ 'NestedTextLexer': ('pip._vendor.pygments.lexers.configs', 'NestedText', ('nestedtext', 'nt'), ('*.nt',), ()),
+ 'NewLispLexer': ('pip._vendor.pygments.lexers.lisp', 'NewLisp', ('newlisp',), ('*.lsp', '*.nl', '*.kif'), ('text/x-newlisp', 'application/x-newlisp')),
+ 'NewspeakLexer': ('pip._vendor.pygments.lexers.smalltalk', 'Newspeak', ('newspeak',), ('*.ns2',), ('text/x-newspeak',)),
+ 'NginxConfLexer': ('pip._vendor.pygments.lexers.configs', 'Nginx configuration file', ('nginx',), ('nginx.conf',), ('text/x-nginx-conf',)),
+ 'NimrodLexer': ('pip._vendor.pygments.lexers.nimrod', 'Nimrod', ('nimrod', 'nim'), ('*.nim', '*.nimrod'), ('text/x-nim',)),
+ 'NitLexer': ('pip._vendor.pygments.lexers.nit', 'Nit', ('nit',), ('*.nit',), ()),
+ 'NixLexer': ('pip._vendor.pygments.lexers.nix', 'Nix', ('nixos', 'nix'), ('*.nix',), ('text/x-nix',)),
+ 'NodeConsoleLexer': ('pip._vendor.pygments.lexers.javascript', 'Node.js REPL console session', ('nodejsrepl',), (), ('text/x-nodejsrepl',)),
+ 'NotmuchLexer': ('pip._vendor.pygments.lexers.textfmts', 'Notmuch', ('notmuch',), (), ()),
+ 'NuSMVLexer': ('pip._vendor.pygments.lexers.smv', 'NuSMV', ('nusmv',), ('*.smv',), ()),
+ 'NumPyLexer': ('pip._vendor.pygments.lexers.python', 'NumPy', ('numpy',), (), ()),
+ 'ObjdumpLexer': ('pip._vendor.pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
+ 'ObjectiveCLexer': ('pip._vendor.pygments.lexers.objective', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m', '*.h'), ('text/x-objective-c',)),
+ 'ObjectiveCppLexer': ('pip._vendor.pygments.lexers.objective', 'Objective-C++', ('objective-c++', 'objectivec++', 'obj-c++', 'objc++'), ('*.mm', '*.hh'), ('text/x-objective-c++',)),
+ 'ObjectiveJLexer': ('pip._vendor.pygments.lexers.javascript', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
+ 'OcamlLexer': ('pip._vendor.pygments.lexers.ml', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
+ 'OctaveLexer': ('pip._vendor.pygments.lexers.matlab', 'Octave', ('octave',), ('*.m',), ('text/octave',)),
+ 'OdinLexer': ('pip._vendor.pygments.lexers.archetype', 'ODIN', ('odin',), ('*.odin',), ('text/odin',)),
+ 'OmgIdlLexer': ('pip._vendor.pygments.lexers.c_like', 'OMG Interface Definition Language', ('omg-idl',), ('*.idl', '*.pidl'), ()),
+ 'OocLexer': ('pip._vendor.pygments.lexers.ooc', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
+ 'OpaLexer': ('pip._vendor.pygments.lexers.ml', 'Opa', ('opa',), ('*.opa',), ('text/x-opa',)),
+ 'OpenEdgeLexer': ('pip._vendor.pygments.lexers.business', 'OpenEdge ABL', ('openedge', 'abl', 'progress'), ('*.p', '*.cls'), ('text/x-openedge', 'application/x-openedge')),
+ 'OutputLexer': ('pip._vendor.pygments.lexers.special', 'Text output', ('output',), (), ()),
+ 'PacmanConfLexer': ('pip._vendor.pygments.lexers.configs', 'PacmanConf', ('pacmanconf',), ('pacman.conf',), ()),
+ 'PanLexer': ('pip._vendor.pygments.lexers.dsls', 'Pan', ('pan',), ('*.pan',), ()),
+ 'ParaSailLexer': ('pip._vendor.pygments.lexers.parasail', 'ParaSail', ('parasail',), ('*.psi', '*.psl'), ('text/x-parasail',)),
+ 'PawnLexer': ('pip._vendor.pygments.lexers.pawn', 'Pawn', ('pawn',), ('*.p', '*.pwn', '*.inc'), ('text/x-pawn',)),
+ 'PegLexer': ('pip._vendor.pygments.lexers.grammar_notation', 'PEG', ('peg',), ('*.peg',), ('text/x-peg',)),
+ 'Perl6Lexer': ('pip._vendor.pygments.lexers.perl', 'Perl6', ('perl6', 'pl6', 'raku'), ('*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6', '*.6pm', '*.p6m', '*.pm6', '*.t', '*.raku', '*.rakumod', '*.rakutest', '*.rakudoc'), ('text/x-perl6', 'application/x-perl6')),
+ 'PerlLexer': ('pip._vendor.pygments.lexers.perl', 'Perl', ('perl', 'pl'), ('*.pl', '*.pm', '*.t', '*.perl'), ('text/x-perl', 'application/x-perl')),
+ 'PhpLexer': ('pip._vendor.pygments.lexers.php', 'PHP', ('php', 'php3', 'php4', 'php5'), ('*.php', '*.php[345]', '*.inc'), ('text/x-php',)),
+ 'PigLexer': ('pip._vendor.pygments.lexers.jvm', 'Pig', ('pig',), ('*.pig',), ('text/x-pig',)),
+ 'PikeLexer': ('pip._vendor.pygments.lexers.c_like', 'Pike', ('pike',), ('*.pike', '*.pmod'), ('text/x-pike',)),
+ 'PkgConfigLexer': ('pip._vendor.pygments.lexers.configs', 'PkgConfig', ('pkgconfig',), ('*.pc',), ()),
+ 'PlPgsqlLexer': ('pip._vendor.pygments.lexers.sql', 'PL/pgSQL', ('plpgsql',), (), ('text/x-plpgsql',)),
+ 'PointlessLexer': ('pip._vendor.pygments.lexers.pointless', 'Pointless', ('pointless',), ('*.ptls',), ()),
+ 'PonyLexer': ('pip._vendor.pygments.lexers.pony', 'Pony', ('pony',), ('*.pony',), ()),
+ 'PostScriptLexer': ('pip._vendor.pygments.lexers.graphics', 'PostScript', ('postscript', 'postscr'), ('*.ps', '*.eps'), ('application/postscript',)),
+ 'PostgresConsoleLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL console (psql)', ('psql', 'postgresql-console', 'postgres-console'), (), ('text/x-postgresql-psql',)),
+ 'PostgresLexer': ('pip._vendor.pygments.lexers.sql', 'PostgreSQL SQL dialect', ('postgresql', 'postgres'), (), ('text/x-postgresql',)),
+ 'PovrayLexer': ('pip._vendor.pygments.lexers.graphics', 'POVRay', ('pov',), ('*.pov', '*.inc'), ('text/x-povray',)),
+ 'PowerShellLexer': ('pip._vendor.pygments.lexers.shell', 'PowerShell', ('powershell', 'pwsh', 'posh', 'ps1', 'psm1'), ('*.ps1', '*.psm1'), ('text/x-powershell',)),
+ 'PowerShellSessionLexer': ('pip._vendor.pygments.lexers.shell', 'PowerShell Session', ('pwsh-session', 'ps1con'), (), ()),
+ 'PraatLexer': ('pip._vendor.pygments.lexers.praat', 'Praat', ('praat',), ('*.praat', '*.proc', '*.psc'), ()),
+ 'ProcfileLexer': ('pip._vendor.pygments.lexers.procfile', 'Procfile', ('procfile',), ('Procfile',), ()),
+ 'PrologLexer': ('pip._vendor.pygments.lexers.prolog', 'Prolog', ('prolog',), ('*.ecl', '*.prolog', '*.pro', '*.pl'), ('text/x-prolog',)),
+ 'PromQLLexer': ('pip._vendor.pygments.lexers.promql', 'PromQL', ('promql',), ('*.promql',), ()),
+ 'PropertiesLexer': ('pip._vendor.pygments.lexers.configs', 'Properties', ('properties', 'jproperties'), ('*.properties',), ('text/x-java-properties',)),
+ 'ProtoBufLexer': ('pip._vendor.pygments.lexers.dsls', 'Protocol Buffer', ('protobuf', 'proto'), ('*.proto',), ()),
+ 'PsyshConsoleLexer': ('pip._vendor.pygments.lexers.php', 'PsySH console session for PHP', ('psysh',), (), ()),
+ 'PugLexer': ('pip._vendor.pygments.lexers.html', 'Pug', ('pug', 'jade'), ('*.pug', '*.jade'), ('text/x-pug', 'text/x-jade')),
+ 'PuppetLexer': ('pip._vendor.pygments.lexers.dsls', 'Puppet', ('puppet',), ('*.pp',), ()),
+ 'PyPyLogLexer': ('pip._vendor.pygments.lexers.console', 'PyPy Log', ('pypylog', 'pypy'), ('*.pypylog',), ('application/x-pypylog',)),
+ 'Python2Lexer': ('pip._vendor.pygments.lexers.python', 'Python 2.x', ('python2', 'py2'), (), ('text/x-python2', 'application/x-python2')),
+ 'Python2TracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python 2.x Traceback', ('py2tb',), ('*.py2tb',), ('text/x-python2-traceback',)),
+ 'PythonConsoleLexer': ('pip._vendor.pygments.lexers.python', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
+ 'PythonLexer': ('pip._vendor.pygments.lexers.python', 'Python', ('python', 'py', 'sage', 'python3', 'py3'), ('*.py', '*.pyw', '*.jy', '*.sage', '*.sc', 'SConstruct', 'SConscript', '*.bzl', 'BUCK', 'BUILD', 'BUILD.bazel', 'WORKSPACE', '*.tac'), ('text/x-python', 'application/x-python', 'text/x-python3', 'application/x-python3')),
+ 'PythonTracebackLexer': ('pip._vendor.pygments.lexers.python', 'Python Traceback', ('pytb', 'py3tb'), ('*.pytb', '*.py3tb'), ('text/x-python-traceback', 'text/x-python3-traceback')),
+ 'PythonUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'Python+UL4', ('py+ul4',), ('*.pyul4',), ()),
+ 'QBasicLexer': ('pip._vendor.pygments.lexers.basic', 'QBasic', ('qbasic', 'basic'), ('*.BAS', '*.bas'), ('text/basic',)),
+ 'QLexer': ('pip._vendor.pygments.lexers.q', 'Q', ('q',), ('*.q',), ()),
+ 'QVToLexer': ('pip._vendor.pygments.lexers.qvt', 'QVTO', ('qvto', 'qvt'), ('*.qvto',), ()),
+ 'QlikLexer': ('pip._vendor.pygments.lexers.qlik', 'Qlik', ('qlik', 'qlikview', 'qliksense', 'qlikscript'), ('*.qvs', '*.qvw'), ()),
+ 'QmlLexer': ('pip._vendor.pygments.lexers.webmisc', 'QML', ('qml', 'qbs'), ('*.qml', '*.qbs'), ('application/x-qml', 'application/x-qt.qbs+qml')),
+ 'RConsoleLexer': ('pip._vendor.pygments.lexers.r', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
+ 'RNCCompactLexer': ('pip._vendor.pygments.lexers.rnc', 'Relax-NG Compact', ('rng-compact', 'rnc'), ('*.rnc',), ()),
+ 'RPMSpecLexer': ('pip._vendor.pygments.lexers.installers', 'RPMSpec', ('spec',), ('*.spec',), ('text/x-rpm-spec',)),
+ 'RacketLexer': ('pip._vendor.pygments.lexers.lisp', 'Racket', ('racket', 'rkt'), ('*.rkt', '*.rktd', '*.rktl'), ('text/x-racket', 'application/x-racket')),
+ 'RagelCLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
+ 'RagelCppLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
+ 'RagelDLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
+ 'RagelEmbeddedLexer': ('pip._vendor.pygments.lexers.parsers', 'Embedded Ragel', ('ragel-em',), ('*.rl',), ()),
+ 'RagelJavaLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Java Host', ('ragel-java',), ('*.rl',), ()),
+ 'RagelLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel', ('ragel',), (), ()),
+ 'RagelObjectiveCLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Objective C Host', ('ragel-objc',), ('*.rl',), ()),
+ 'RagelRubyLexer': ('pip._vendor.pygments.lexers.parsers', 'Ragel in Ruby Host', ('ragel-ruby', 'ragel-rb'), ('*.rl',), ()),
+ 'RawTokenLexer': ('pip._vendor.pygments.lexers.special', 'Raw token data', (), (), ('application/x-pygments-tokens',)),
+ 'RdLexer': ('pip._vendor.pygments.lexers.r', 'Rd', ('rd',), ('*.Rd',), ('text/x-r-doc',)),
+ 'ReasonLexer': ('pip._vendor.pygments.lexers.ml', 'ReasonML', ('reasonml', 'reason'), ('*.re', '*.rei'), ('text/x-reasonml',)),
+ 'RebolLexer': ('pip._vendor.pygments.lexers.rebol', 'REBOL', ('rebol',), ('*.r', '*.r3', '*.reb'), ('text/x-rebol',)),
+ 'RedLexer': ('pip._vendor.pygments.lexers.rebol', 'Red', ('red', 'red/system'), ('*.red', '*.reds'), ('text/x-red', 'text/x-red-system')),
+ 'RedcodeLexer': ('pip._vendor.pygments.lexers.esoteric', 'Redcode', ('redcode',), ('*.cw',), ()),
+ 'RegeditLexer': ('pip._vendor.pygments.lexers.configs', 'reg', ('registry',), ('*.reg',), ('text/x-windows-registry',)),
+ 'ResourceLexer': ('pip._vendor.pygments.lexers.resource', 'ResourceBundle', ('resourcebundle', 'resource'), (), ()),
+ 'RexxLexer': ('pip._vendor.pygments.lexers.scripting', 'Rexx', ('rexx', 'arexx'), ('*.rexx', '*.rex', '*.rx', '*.arexx'), ('text/x-rexx',)),
+ 'RhtmlLexer': ('pip._vendor.pygments.lexers.templates', 'RHTML', ('rhtml', 'html+erb', 'html+ruby'), ('*.rhtml',), ('text/html+ruby',)),
+ 'RideLexer': ('pip._vendor.pygments.lexers.ride', 'Ride', ('ride',), ('*.ride',), ('text/x-ride',)),
+ 'RitaLexer': ('pip._vendor.pygments.lexers.rita', 'Rita', ('rita',), ('*.rita',), ('text/rita',)),
+ 'RoboconfGraphLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Graph', ('roboconf-graph',), ('*.graph',), ()),
+ 'RoboconfInstancesLexer': ('pip._vendor.pygments.lexers.roboconf', 'Roboconf Instances', ('roboconf-instances',), ('*.instances',), ()),
+ 'RobotFrameworkLexer': ('pip._vendor.pygments.lexers.robotframework', 'RobotFramework', ('robotframework',), ('*.robot', '*.resource'), ('text/x-robotframework',)),
+ 'RqlLexer': ('pip._vendor.pygments.lexers.sql', 'RQL', ('rql',), ('*.rql',), ('text/x-rql',)),
+ 'RslLexer': ('pip._vendor.pygments.lexers.dsls', 'RSL', ('rsl',), ('*.rsl',), ('text/rsl',)),
+ 'RstLexer': ('pip._vendor.pygments.lexers.markup', 'reStructuredText', ('restructuredtext', 'rst', 'rest'), ('*.rst', '*.rest'), ('text/x-rst', 'text/prs.fallenstein.rst')),
+ 'RtsLexer': ('pip._vendor.pygments.lexers.trafficscript', 'TrafficScript', ('trafficscript', 'rts'), ('*.rts',), ()),
+ 'RubyConsoleLexer': ('pip._vendor.pygments.lexers.ruby', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
+ 'RubyLexer': ('pip._vendor.pygments.lexers.ruby', 'Ruby', ('ruby', 'rb', 'duby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx', '*.duby', 'Gemfile', 'Vagrantfile'), ('text/x-ruby', 'application/x-ruby')),
+ 'RustLexer': ('pip._vendor.pygments.lexers.rust', 'Rust', ('rust', 'rs'), ('*.rs', '*.rs.in'), ('text/rust', 'text/x-rust')),
+ 'SASLexer': ('pip._vendor.pygments.lexers.sas', 'SAS', ('sas',), ('*.SAS', '*.sas'), ('text/x-sas', 'text/sas', 'application/x-sas')),
+ 'SLexer': ('pip._vendor.pygments.lexers.r', 'S', ('splus', 's', 'r'), ('*.S', '*.R', '.Rhistory', '.Rprofile', '.Renviron'), ('text/S-plus', 'text/S', 'text/x-r-source', 'text/x-r', 'text/x-R', 'text/x-r-history', 'text/x-r-profile')),
+ 'SMLLexer': ('pip._vendor.pygments.lexers.ml', 'Standard ML', ('sml',), ('*.sml', '*.sig', '*.fun'), ('text/x-standardml', 'application/x-standardml')),
+ 'SNBTLexer': ('pip._vendor.pygments.lexers.mcfunction', 'SNBT', ('snbt',), ('*.snbt',), ('text/snbt',)),
+ 'SarlLexer': ('pip._vendor.pygments.lexers.jvm', 'SARL', ('sarl',), ('*.sarl',), ('text/x-sarl',)),
+ 'SassLexer': ('pip._vendor.pygments.lexers.css', 'Sass', ('sass',), ('*.sass',), ('text/x-sass',)),
+ 'SaviLexer': ('pip._vendor.pygments.lexers.savi', 'Savi', ('savi',), ('*.savi',), ()),
+ 'ScalaLexer': ('pip._vendor.pygments.lexers.jvm', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
+ 'ScamlLexer': ('pip._vendor.pygments.lexers.html', 'Scaml', ('scaml',), ('*.scaml',), ('text/x-scaml',)),
+ 'ScdocLexer': ('pip._vendor.pygments.lexers.scdoc', 'scdoc', ('scdoc', 'scd'), ('*.scd', '*.scdoc'), ()),
+ 'SchemeLexer': ('pip._vendor.pygments.lexers.lisp', 'Scheme', ('scheme', 'scm'), ('*.scm', '*.ss'), ('text/x-scheme', 'application/x-scheme')),
+ 'ScilabLexer': ('pip._vendor.pygments.lexers.matlab', 'Scilab', ('scilab',), ('*.sci', '*.sce', '*.tst'), ('text/scilab',)),
+ 'ScssLexer': ('pip._vendor.pygments.lexers.css', 'SCSS', ('scss',), ('*.scss',), ('text/x-scss',)),
+ 'SedLexer': ('pip._vendor.pygments.lexers.textedit', 'Sed', ('sed', 'gsed', 'ssed'), ('*.sed', '*.[gs]sed'), ('text/x-sed',)),
+ 'ShExCLexer': ('pip._vendor.pygments.lexers.rdf', 'ShExC', ('shexc', 'shex'), ('*.shex',), ('text/shex',)),
+ 'ShenLexer': ('pip._vendor.pygments.lexers.lisp', 'Shen', ('shen',), ('*.shen',), ('text/x-shen', 'application/x-shen')),
+ 'SieveLexer': ('pip._vendor.pygments.lexers.sieve', 'Sieve', ('sieve',), ('*.siv', '*.sieve'), ()),
+ 'SilverLexer': ('pip._vendor.pygments.lexers.verification', 'Silver', ('silver',), ('*.sil', '*.vpr'), ()),
+ 'SingularityLexer': ('pip._vendor.pygments.lexers.configs', 'Singularity', ('singularity',), ('*.def', 'Singularity'), ()),
+ 'SlashLexer': ('pip._vendor.pygments.lexers.slash', 'Slash', ('slash',), ('*.sla',), ()),
+ 'SlimLexer': ('pip._vendor.pygments.lexers.webmisc', 'Slim', ('slim',), ('*.slim',), ('text/x-slim',)),
+ 'SlurmBashLexer': ('pip._vendor.pygments.lexers.shell', 'Slurm', ('slurm', 'sbatch'), ('*.sl',), ()),
+ 'SmaliLexer': ('pip._vendor.pygments.lexers.dalvik', 'Smali', ('smali',), ('*.smali',), ('text/smali',)),
+ 'SmalltalkLexer': ('pip._vendor.pygments.lexers.smalltalk', 'Smalltalk', ('smalltalk', 'squeak', 'st'), ('*.st',), ('text/x-smalltalk',)),
+ 'SmartGameFormatLexer': ('pip._vendor.pygments.lexers.sgf', 'SmartGameFormat', ('sgf',), ('*.sgf',), ()),
+ 'SmartyLexer': ('pip._vendor.pygments.lexers.templates', 'Smarty', ('smarty',), ('*.tpl',), ('application/x-smarty',)),
+ 'SmithyLexer': ('pip._vendor.pygments.lexers.smithy', 'Smithy', ('smithy',), ('*.smithy',), ()),
+ 'SnobolLexer': ('pip._vendor.pygments.lexers.snobol', 'Snobol', ('snobol',), ('*.snobol',), ('text/x-snobol',)),
+ 'SnowballLexer': ('pip._vendor.pygments.lexers.dsls', 'Snowball', ('snowball',), ('*.sbl',), ()),
+ 'SolidityLexer': ('pip._vendor.pygments.lexers.solidity', 'Solidity', ('solidity',), ('*.sol',), ()),
+ 'SophiaLexer': ('pip._vendor.pygments.lexers.sophia', 'Sophia', ('sophia',), ('*.aes',), ()),
+ 'SourcePawnLexer': ('pip._vendor.pygments.lexers.pawn', 'SourcePawn', ('sp',), ('*.sp',), ('text/x-sourcepawn',)),
+ 'SourcesListLexer': ('pip._vendor.pygments.lexers.installers', 'Debian Sourcelist', ('debsources', 'sourceslist', 'sources.list'), ('sources.list',), ()),
+ 'SparqlLexer': ('pip._vendor.pygments.lexers.rdf', 'SPARQL', ('sparql',), ('*.rq', '*.sparql'), ('application/sparql-query',)),
+ 'SpiceLexer': ('pip._vendor.pygments.lexers.spice', 'Spice', ('spice', 'spicelang'), ('*.spice',), ('text/x-spice',)),
+ 'SqlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'SQL+Jinja', ('sql+jinja',), ('*.sql', '*.sql.j2', '*.sql.jinja2'), ()),
+ 'SqlLexer': ('pip._vendor.pygments.lexers.sql', 'SQL', ('sql',), ('*.sql',), ('text/x-sql',)),
+ 'SqliteConsoleLexer': ('pip._vendor.pygments.lexers.sql', 'sqlite3con', ('sqlite3',), ('*.sqlite3-console',), ('text/x-sqlite3-console',)),
+ 'SquidConfLexer': ('pip._vendor.pygments.lexers.configs', 'SquidConf', ('squidconf', 'squid.conf', 'squid'), ('squid.conf',), ('text/x-squidconf',)),
+ 'SrcinfoLexer': ('pip._vendor.pygments.lexers.srcinfo', 'Srcinfo', ('srcinfo',), ('.SRCINFO',), ()),
+ 'SspLexer': ('pip._vendor.pygments.lexers.templates', 'Scalate Server Page', ('ssp',), ('*.ssp',), ('application/x-ssp',)),
+ 'StanLexer': ('pip._vendor.pygments.lexers.modeling', 'Stan', ('stan',), ('*.stan',), ()),
+ 'StataLexer': ('pip._vendor.pygments.lexers.stata', 'Stata', ('stata', 'do'), ('*.do', '*.ado'), ('text/x-stata', 'text/stata', 'application/x-stata')),
+ 'SuperColliderLexer': ('pip._vendor.pygments.lexers.supercollider', 'SuperCollider', ('supercollider', 'sc'), ('*.sc', '*.scd'), ('application/supercollider', 'text/supercollider')),
+ 'SwiftLexer': ('pip._vendor.pygments.lexers.objective', 'Swift', ('swift',), ('*.swift',), ('text/x-swift',)),
+ 'SwigLexer': ('pip._vendor.pygments.lexers.c_like', 'SWIG', ('swig',), ('*.swg', '*.i'), ('text/swig',)),
+ 'SystemVerilogLexer': ('pip._vendor.pygments.lexers.hdl', 'systemverilog', ('systemverilog', 'sv'), ('*.sv', '*.svh'), ('text/x-systemverilog',)),
+ 'TAPLexer': ('pip._vendor.pygments.lexers.testing', 'TAP', ('tap',), ('*.tap',), ()),
+ 'TNTLexer': ('pip._vendor.pygments.lexers.tnt', 'Typographic Number Theory', ('tnt',), ('*.tnt',), ()),
+ 'TOMLLexer': ('pip._vendor.pygments.lexers.configs', 'TOML', ('toml',), ('*.toml', 'Pipfile', 'poetry.lock'), ()),
+ 'Tads3Lexer': ('pip._vendor.pygments.lexers.int_fiction', 'TADS 3', ('tads3',), ('*.t',), ()),
+ 'TalLexer': ('pip._vendor.pygments.lexers.tal', 'Tal', ('tal', 'uxntal'), ('*.tal',), ('text/x-uxntal',)),
+ 'TasmLexer': ('pip._vendor.pygments.lexers.asm', 'TASM', ('tasm',), ('*.asm', '*.ASM', '*.tasm'), ('text/x-tasm',)),
+ 'TclLexer': ('pip._vendor.pygments.lexers.tcl', 'Tcl', ('tcl',), ('*.tcl', '*.rvt'), ('text/x-tcl', 'text/x-script.tcl', 'application/x-tcl')),
+ 'TcshLexer': ('pip._vendor.pygments.lexers.shell', 'Tcsh', ('tcsh', 'csh'), ('*.tcsh', '*.csh'), ('application/x-csh',)),
+ 'TcshSessionLexer': ('pip._vendor.pygments.lexers.shell', 'Tcsh Session', ('tcshcon',), (), ()),
+ 'TeaTemplateLexer': ('pip._vendor.pygments.lexers.templates', 'Tea', ('tea',), ('*.tea',), ('text/x-tea',)),
+ 'TealLexer': ('pip._vendor.pygments.lexers.teal', 'teal', ('teal',), ('*.teal',), ()),
+ 'TeraTermLexer': ('pip._vendor.pygments.lexers.teraterm', 'Tera Term macro', ('teratermmacro', 'teraterm', 'ttl'), ('*.ttl',), ('text/x-teratermmacro',)),
+ 'TermcapLexer': ('pip._vendor.pygments.lexers.configs', 'Termcap', ('termcap',), ('termcap', 'termcap.src'), ()),
+ 'TerminfoLexer': ('pip._vendor.pygments.lexers.configs', 'Terminfo', ('terminfo',), ('terminfo', 'terminfo.src'), ()),
+ 'TerraformLexer': ('pip._vendor.pygments.lexers.configs', 'Terraform', ('terraform', 'tf'), ('*.tf',), ('application/x-tf', 'application/x-terraform')),
+ 'TexLexer': ('pip._vendor.pygments.lexers.markup', 'TeX', ('tex', 'latex'), ('*.tex', '*.aux', '*.toc'), ('text/x-tex', 'text/x-latex')),
+ 'TextLexer': ('pip._vendor.pygments.lexers.special', 'Text only', ('text',), ('*.txt',), ('text/plain',)),
+ 'ThingsDBLexer': ('pip._vendor.pygments.lexers.thingsdb', 'ThingsDB', ('ti', 'thingsdb'), ('*.ti',), ()),
+ 'ThriftLexer': ('pip._vendor.pygments.lexers.dsls', 'Thrift', ('thrift',), ('*.thrift',), ('application/x-thrift',)),
+ 'TiddlyWiki5Lexer': ('pip._vendor.pygments.lexers.markup', 'tiddler', ('tid',), ('*.tid',), ('text/vnd.tiddlywiki',)),
+ 'TodotxtLexer': ('pip._vendor.pygments.lexers.textfmts', 'Todotxt', ('todotxt',), ('todo.txt', '*.todotxt'), ('text/x-todo',)),
+ 'TransactSqlLexer': ('pip._vendor.pygments.lexers.sql', 'Transact-SQL', ('tsql', 't-sql'), ('*.sql',), ('text/x-tsql',)),
+ 'TreetopLexer': ('pip._vendor.pygments.lexers.parsers', 'Treetop', ('treetop',), ('*.treetop', '*.tt'), ()),
+ 'TurtleLexer': ('pip._vendor.pygments.lexers.rdf', 'Turtle', ('turtle',), ('*.ttl',), ('text/turtle', 'application/x-turtle')),
+ 'TwigHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Twig', ('html+twig',), ('*.twig',), ('text/html+twig',)),
+ 'TwigLexer': ('pip._vendor.pygments.lexers.templates', 'Twig', ('twig',), (), ('application/x-twig',)),
+ 'TypeScriptLexer': ('pip._vendor.pygments.lexers.javascript', 'TypeScript', ('typescript', 'ts'), ('*.ts',), ('application/x-typescript', 'text/x-typescript')),
+ 'TypoScriptCssDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptCssData', ('typoscriptcssdata',), (), ()),
+ 'TypoScriptHtmlDataLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScriptHtmlData', ('typoscripthtmldata',), (), ()),
+ 'TypoScriptLexer': ('pip._vendor.pygments.lexers.typoscript', 'TypoScript', ('typoscript',), ('*.typoscript',), ('text/x-typoscript',)),
+ 'UL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'UL4', ('ul4',), ('*.ul4',), ()),
+ 'UcodeLexer': ('pip._vendor.pygments.lexers.unicon', 'ucode', ('ucode',), ('*.u', '*.u1', '*.u2'), ()),
+ 'UniconLexer': ('pip._vendor.pygments.lexers.unicon', 'Unicon', ('unicon',), ('*.icn',), ('text/unicon',)),
+ 'UnixConfigLexer': ('pip._vendor.pygments.lexers.configs', 'Unix/Linux config files', ('unixconfig', 'linuxconfig'), (), ()),
+ 'UrbiscriptLexer': ('pip._vendor.pygments.lexers.urbi', 'UrbiScript', ('urbiscript',), ('*.u',), ('application/x-urbiscript',)),
+ 'UsdLexer': ('pip._vendor.pygments.lexers.usd', 'USD', ('usd', 'usda'), ('*.usd', '*.usda'), ()),
+ 'VBScriptLexer': ('pip._vendor.pygments.lexers.basic', 'VBScript', ('vbscript',), ('*.vbs', '*.VBS'), ()),
+ 'VCLLexer': ('pip._vendor.pygments.lexers.varnish', 'VCL', ('vcl',), ('*.vcl',), ('text/x-vclsrc',)),
+ 'VCLSnippetLexer': ('pip._vendor.pygments.lexers.varnish', 'VCLSnippets', ('vclsnippets', 'vclsnippet'), (), ('text/x-vclsnippet',)),
+ 'VCTreeStatusLexer': ('pip._vendor.pygments.lexers.console', 'VCTreeStatus', ('vctreestatus',), (), ()),
+ 'VGLLexer': ('pip._vendor.pygments.lexers.dsls', 'VGL', ('vgl',), ('*.rpf',), ()),
+ 'ValaLexer': ('pip._vendor.pygments.lexers.c_like', 'Vala', ('vala', 'vapi'), ('*.vala', '*.vapi'), ('text/x-vala',)),
+ 'VbNetAspxLexer': ('pip._vendor.pygments.lexers.dotnet', 'aspx-vb', ('aspx-vb',), ('*.aspx', '*.asax', '*.ascx', '*.ashx', '*.asmx', '*.axd'), ()),
+ 'VbNetLexer': ('pip._vendor.pygments.lexers.dotnet', 'VB.net', ('vb.net', 'vbnet', 'lobas', 'oobas', 'sobas'), ('*.vb', '*.bas'), ('text/x-vbnet', 'text/x-vba')),
+ 'VelocityHtmlLexer': ('pip._vendor.pygments.lexers.templates', 'HTML+Velocity', ('html+velocity',), (), ('text/html+velocity',)),
+ 'VelocityLexer': ('pip._vendor.pygments.lexers.templates', 'Velocity', ('velocity',), ('*.vm', '*.fhtml'), ()),
+ 'VelocityXmlLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Velocity', ('xml+velocity',), (), ('application/xml+velocity',)),
+ 'VerilogLexer': ('pip._vendor.pygments.lexers.hdl', 'verilog', ('verilog', 'v'), ('*.v',), ('text/x-verilog',)),
+ 'VhdlLexer': ('pip._vendor.pygments.lexers.hdl', 'vhdl', ('vhdl',), ('*.vhdl', '*.vhd'), ('text/x-vhdl',)),
+ 'VimLexer': ('pip._vendor.pygments.lexers.textedit', 'VimL', ('vim',), ('*.vim', '.vimrc', '.exrc', '.gvimrc', '_vimrc', '_exrc', '_gvimrc', 'vimrc', 'gvimrc'), ('text/x-vim',)),
+ 'WDiffLexer': ('pip._vendor.pygments.lexers.diff', 'WDiff', ('wdiff',), ('*.wdiff',), ()),
+ 'WatLexer': ('pip._vendor.pygments.lexers.webassembly', 'WebAssembly', ('wast', 'wat'), ('*.wat', '*.wast'), ()),
+ 'WebIDLLexer': ('pip._vendor.pygments.lexers.webidl', 'Web IDL', ('webidl',), ('*.webidl',), ()),
+ 'WhileyLexer': ('pip._vendor.pygments.lexers.whiley', 'Whiley', ('whiley',), ('*.whiley',), ('text/x-whiley',)),
+ 'X10Lexer': ('pip._vendor.pygments.lexers.x10', 'X10', ('x10', 'xten'), ('*.x10',), ('text/x-x10',)),
+ 'XMLUL4Lexer': ('pip._vendor.pygments.lexers.ul4', 'XML+UL4', ('xml+ul4',), ('*.xmlul4',), ()),
+ 'XQueryLexer': ('pip._vendor.pygments.lexers.webmisc', 'XQuery', ('xquery', 'xqy', 'xq', 'xql', 'xqm'), ('*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm'), ('text/xquery', 'application/xquery')),
+ 'XmlDjangoLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Django/Jinja', ('xml+django', 'xml+jinja'), ('*.xml.j2', '*.xml.jinja2'), ('application/xml+django', 'application/xml+jinja')),
+ 'XmlErbLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Ruby', ('xml+ruby', 'xml+erb'), (), ('application/xml+ruby',)),
+ 'XmlLexer': ('pip._vendor.pygments.lexers.html', 'XML', ('xml',), ('*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl', '*.wsf'), ('text/xml', 'application/xml', 'image/svg+xml', 'application/rss+xml', 'application/atom+xml')),
+ 'XmlPhpLexer': ('pip._vendor.pygments.lexers.templates', 'XML+PHP', ('xml+php',), (), ('application/xml+php',)),
+ 'XmlSmartyLexer': ('pip._vendor.pygments.lexers.templates', 'XML+Smarty', ('xml+smarty',), (), ('application/xml+smarty',)),
+ 'XorgLexer': ('pip._vendor.pygments.lexers.xorg', 'Xorg', ('xorg.conf',), ('xorg.conf',), ()),
+ 'XsltLexer': ('pip._vendor.pygments.lexers.html', 'XSLT', ('xslt',), ('*.xsl', '*.xslt', '*.xpl'), ('application/xsl+xml', 'application/xslt+xml')),
+ 'XtendLexer': ('pip._vendor.pygments.lexers.jvm', 'Xtend', ('xtend',), ('*.xtend',), ('text/x-xtend',)),
+ 'XtlangLexer': ('pip._vendor.pygments.lexers.lisp', 'xtlang', ('extempore',), ('*.xtm',), ()),
+ 'YamlJinjaLexer': ('pip._vendor.pygments.lexers.templates', 'YAML+Jinja', ('yaml+jinja', 'salt', 'sls'), ('*.sls', '*.yaml.j2', '*.yml.j2', '*.yaml.jinja2', '*.yml.jinja2'), ('text/x-yaml+jinja', 'text/x-sls')),
+ 'YamlLexer': ('pip._vendor.pygments.lexers.data', 'YAML', ('yaml',), ('*.yaml', '*.yml'), ('text/x-yaml',)),
+ 'YangLexer': ('pip._vendor.pygments.lexers.yang', 'YANG', ('yang',), ('*.yang',), ('application/yang',)),
+ 'ZeekLexer': ('pip._vendor.pygments.lexers.dsls', 'Zeek', ('zeek', 'bro'), ('*.zeek', '*.bro'), ()),
+ 'ZephirLexer': ('pip._vendor.pygments.lexers.php', 'Zephir', ('zephir',), ('*.zep',), ()),
+ 'ZigLexer': ('pip._vendor.pygments.lexers.zig', 'Zig', ('zig',), ('*.zig',), ('text/zig',)),
+ 'apdlexer': ('pip._vendor.pygments.lexers.apdlexer', 'ANSYS parametric design language', ('ansys', 'apdl'), ('*.ans',), ()),
+}
diff --git a/third_party/python/pip/pip/_vendor/pygments/lexers/python.py b/third_party/python/pip/pip/_vendor/pygments/lexers/python.py
new file mode 100644
index 0000000000..c24e3c86ef
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/lexers/python.py
@@ -0,0 +1,1204 @@
+"""
+ pygments.lexers.python
+ ~~~~~~~~~~~~~~~~~~~~~~
+
+ Lexers for Python and related languages.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+import keyword
+
+from pip._vendor.pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
+ default, words, combined, do_insertions, this
+from pip._vendor.pygments.util import get_bool_opt, shebang_matches
+from pip._vendor.pygments.token import Text, Comment, Operator, Keyword, Name, String, \
+ Number, Punctuation, Generic, Other, Error
+from pip._vendor.pygments import unistring as uni
+
+__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer',
+ 'Python2Lexer', 'Python2TracebackLexer',
+ 'CythonLexer', 'DgLexer', 'NumPyLexer']
+
+line_re = re.compile('.*?\n')
+
+
+class PythonLexer(RegexLexer):
+ """
+ For Python source code (version 3.x).
+
+ .. versionadded:: 0.10
+
+ .. versionchanged:: 2.5
+ This is now the default ``PythonLexer``. It is still available as the
+ alias ``Python3Lexer``.
+ """
+
+ name = 'Python'
+ url = 'http://www.python.org'
+ aliases = ['python', 'py', 'sage', 'python3', 'py3']
+ filenames = [
+ '*.py',
+ '*.pyw',
+ # Jython
+ '*.jy',
+ # Sage
+ '*.sage',
+ # SCons
+ '*.sc',
+ 'SConstruct',
+ 'SConscript',
+ # Skylark/Starlark (used by Bazel, Buck, and Pants)
+ '*.bzl',
+ 'BUCK',
+ 'BUILD',
+ 'BUILD.bazel',
+ 'WORKSPACE',
+ # Twisted Application infrastructure
+ '*.tac',
+ ]
+ mimetypes = ['text/x-python', 'application/x-python',
+ 'text/x-python3', 'application/x-python3']
+
+ uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue)
+
+ def innerstring_rules(ttype):
+ return [
+ # the old style '%s' % (...) string formatting (still valid in Py3)
+ (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
+ '[hlL]?[E-GXc-giorsaux%]', String.Interpol),
+ # the new style '{}'.format(...) string formatting
+ (r'\{'
+ r'((\w+)((\.\w+)|(\[[^\]]+\]))*)?' # field name
+ r'(\![sra])?' # conversion
+ r'(\:(.?[<>=\^])?[-+ ]?#?0?(\d+)?,?(\.\d+)?[E-GXb-gnosx%]?)?'
+ r'\}', String.Interpol),
+
+ # backslashes, quotes and formatting signs must be parsed one at a time
+ (r'[^\\\'"%{\n]+', ttype),
+ (r'[\'"\\]', ttype),
+ # unhandled string formatting sign
+ (r'%|(\{{1,2})', ttype)
+ # newlines are an error (use "nl" state)
+ ]
+
+ def fstring_rules(ttype):
+ return [
+ # Assuming that a '}' is the closing brace after format specifier.
+ # Sadly, this means that we won't detect syntax error. But it's
+ # more important to parse correct syntax correctly, than to
+ # highlight invalid syntax.
+ (r'\}', String.Interpol),
+ (r'\{', String.Interpol, 'expr-inside-fstring'),
+ # backslashes, quotes and formatting signs must be parsed one at a time
+ (r'[^\\\'"{}\n]+', ttype),
+ (r'[\'"\\]', ttype),
+ # newlines are an error (use "nl" state)
+ ]
+
+ tokens = {
+ 'root': [
+ (r'\n', Text),
+ (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
+ bygroups(Text, String.Affix, String.Doc)),
+ (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
+ bygroups(Text, String.Affix, String.Doc)),
+ (r'\A#!.+$', Comment.Hashbang),
+ (r'#.*$', Comment.Single),
+ (r'\\\n', Text),
+ (r'\\', Text),
+ include('keywords'),
+ include('soft-keywords'),
+ (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
+ (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
+ (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
+ 'fromimport'),
+ (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
+ 'import'),
+ include('expr'),
+ ],
+ 'expr': [
+ # raw f-strings
+ ('(?i)(rf|fr)(""")',
+ bygroups(String.Affix, String.Double),
+ combined('rfstringescape', 'tdqf')),
+ ("(?i)(rf|fr)(''')",
+ bygroups(String.Affix, String.Single),
+ combined('rfstringescape', 'tsqf')),
+ ('(?i)(rf|fr)(")',
+ bygroups(String.Affix, String.Double),
+ combined('rfstringescape', 'dqf')),
+ ("(?i)(rf|fr)(')",
+ bygroups(String.Affix, String.Single),
+ combined('rfstringescape', 'sqf')),
+ # non-raw f-strings
+ ('([fF])(""")', bygroups(String.Affix, String.Double),
+ combined('fstringescape', 'tdqf')),
+ ("([fF])(''')", bygroups(String.Affix, String.Single),
+ combined('fstringescape', 'tsqf')),
+ ('([fF])(")', bygroups(String.Affix, String.Double),
+ combined('fstringescape', 'dqf')),
+ ("([fF])(')", bygroups(String.Affix, String.Single),
+ combined('fstringescape', 'sqf')),
+ # raw bytes and strings
+ ('(?i)(rb|br|r)(""")',
+ bygroups(String.Affix, String.Double), 'tdqs'),
+ ("(?i)(rb|br|r)(''')",
+ bygroups(String.Affix, String.Single), 'tsqs'),
+ ('(?i)(rb|br|r)(")',
+ bygroups(String.Affix, String.Double), 'dqs'),
+ ("(?i)(rb|br|r)(')",
+ bygroups(String.Affix, String.Single), 'sqs'),
+ # non-raw strings
+ ('([uU]?)(""")', bygroups(String.Affix, String.Double),
+ combined('stringescape', 'tdqs')),
+ ("([uU]?)(''')", bygroups(String.Affix, String.Single),
+ combined('stringescape', 'tsqs')),
+ ('([uU]?)(")', bygroups(String.Affix, String.Double),
+ combined('stringescape', 'dqs')),
+ ("([uU]?)(')", bygroups(String.Affix, String.Single),
+ combined('stringescape', 'sqs')),
+ # non-raw bytes
+ ('([bB])(""")', bygroups(String.Affix, String.Double),
+ combined('bytesescape', 'tdqs')),
+ ("([bB])(''')", bygroups(String.Affix, String.Single),
+ combined('bytesescape', 'tsqs')),
+ ('([bB])(")', bygroups(String.Affix, String.Double),
+ combined('bytesescape', 'dqs')),
+ ("([bB])(')", bygroups(String.Affix, String.Single),
+ combined('bytesescape', 'sqs')),
+
+ (r'[^\S\n]+', Text),
+ include('numbers'),
+ (r'!=|==|<<|>>|:=|[-~+/*%=<>&^|.]', Operator),
+ (r'[]{}:(),;[]', Punctuation),
+ (r'(in|is|and|or|not)\b', Operator.Word),
+ include('expr-keywords'),
+ include('builtins'),
+ include('magicfuncs'),
+ include('magicvars'),
+ include('name'),
+ ],
+ 'expr-inside-fstring': [
+ (r'[{([]', Punctuation, 'expr-inside-fstring-inner'),
+ # without format specifier
+ (r'(=\s*)?' # debug (https://bugs.python.org/issue36817)
+ r'(\![sraf])?' # conversion
+ r'\}', String.Interpol, '#pop'),
+ # with format specifier
+ # we'll catch the remaining '}' in the outer scope
+ (r'(=\s*)?' # debug (https://bugs.python.org/issue36817)
+ r'(\![sraf])?' # conversion
+ r':', String.Interpol, '#pop'),
+ (r'\s+', Text), # allow new lines
+ include('expr'),
+ ],
+ 'expr-inside-fstring-inner': [
+ (r'[{([]', Punctuation, 'expr-inside-fstring-inner'),
+ (r'[])}]', Punctuation, '#pop'),
+ (r'\s+', Text), # allow new lines
+ include('expr'),
+ ],
+ 'expr-keywords': [
+ # Based on https://docs.python.org/3/reference/expressions.html
+ (words((
+ 'async for', 'await', 'else', 'for', 'if', 'lambda',
+ 'yield', 'yield from'), suffix=r'\b'),
+ Keyword),
+ (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant),
+ ],
+ 'keywords': [
+ (words((
+ 'assert', 'async', 'await', 'break', 'continue', 'del', 'elif',
+ 'else', 'except', 'finally', 'for', 'global', 'if', 'lambda',
+ 'pass', 'raise', 'nonlocal', 'return', 'try', 'while', 'yield',
+ 'yield from', 'as', 'with'), suffix=r'\b'),
+ Keyword),
+ (words(('True', 'False', 'None'), suffix=r'\b'), Keyword.Constant),
+ ],
+ 'soft-keywords': [
+ # `match`, `case` and `_` soft keywords
+ (r'(^[ \t]*)' # at beginning of line + possible indentation
+ r'(match|case)\b' # a possible keyword
+ r'(?![ \t]*(?:' # not followed by...
+ r'[:,;=^&|@~)\]}]|(?:' + # characters and keywords that mean this isn't
+ r'|'.join(keyword.kwlist) + r')\b))', # pattern matching
+ bygroups(Text, Keyword), 'soft-keywords-inner'),
+ ],
+ 'soft-keywords-inner': [
+ # optional `_` keyword
+ (r'(\s+)([^\n_]*)(_\b)', bygroups(Text, using(this), Keyword)),
+ default('#pop')
+ ],
+ 'builtins': [
+ (words((
+ '__import__', 'abs', 'all', 'any', 'bin', 'bool', 'bytearray',
+ 'breakpoint', 'bytes', 'chr', 'classmethod', 'compile', 'complex',
+ 'delattr', 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'filter',
+ 'float', 'format', 'frozenset', 'getattr', 'globals', 'hasattr',
+ 'hash', 'hex', 'id', 'input', 'int', 'isinstance', 'issubclass',
+ 'iter', 'len', 'list', 'locals', 'map', 'max', 'memoryview',
+ 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'print',
+ 'property', 'range', 'repr', 'reversed', 'round', 'set', 'setattr',
+ 'slice', 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple',
+ 'type', 'vars', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'),
+ Name.Builtin),
+ (r'(?<!\.)(self|Ellipsis|NotImplemented|cls)\b', Name.Builtin.Pseudo),
+ (words((
+ 'ArithmeticError', 'AssertionError', 'AttributeError',
+ 'BaseException', 'BufferError', 'BytesWarning', 'DeprecationWarning',
+ 'EOFError', 'EnvironmentError', 'Exception', 'FloatingPointError',
+ 'FutureWarning', 'GeneratorExit', 'IOError', 'ImportError',
+ 'ImportWarning', 'IndentationError', 'IndexError', 'KeyError',
+ 'KeyboardInterrupt', 'LookupError', 'MemoryError', 'NameError',
+ 'NotImplementedError', 'OSError', 'OverflowError',
+ 'PendingDeprecationWarning', 'ReferenceError', 'ResourceWarning',
+ 'RuntimeError', 'RuntimeWarning', 'StopIteration',
+ 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit',
+ 'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
+ 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
+ 'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError',
+ 'Warning', 'WindowsError', 'ZeroDivisionError',
+ # new builtin exceptions from PEP 3151
+ 'BlockingIOError', 'ChildProcessError', 'ConnectionError',
+ 'BrokenPipeError', 'ConnectionAbortedError', 'ConnectionRefusedError',
+ 'ConnectionResetError', 'FileExistsError', 'FileNotFoundError',
+ 'InterruptedError', 'IsADirectoryError', 'NotADirectoryError',
+ 'PermissionError', 'ProcessLookupError', 'TimeoutError',
+ # others new in Python 3
+ 'StopAsyncIteration', 'ModuleNotFoundError', 'RecursionError',
+ 'EncodingWarning'),
+ prefix=r'(?<!\.)', suffix=r'\b'),
+ Name.Exception),
+ ],
+ 'magicfuncs': [
+ (words((
+ '__abs__', '__add__', '__aenter__', '__aexit__', '__aiter__',
+ '__and__', '__anext__', '__await__', '__bool__', '__bytes__',
+ '__call__', '__complex__', '__contains__', '__del__', '__delattr__',
+ '__delete__', '__delitem__', '__dir__', '__divmod__', '__enter__',
+ '__eq__', '__exit__', '__float__', '__floordiv__', '__format__',
+ '__ge__', '__get__', '__getattr__', '__getattribute__',
+ '__getitem__', '__gt__', '__hash__', '__iadd__', '__iand__',
+ '__ifloordiv__', '__ilshift__', '__imatmul__', '__imod__',
+ '__imul__', '__index__', '__init__', '__instancecheck__',
+ '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__',
+ '__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__',
+ '__len__', '__length_hint__', '__lshift__', '__lt__', '__matmul__',
+ '__missing__', '__mod__', '__mul__', '__ne__', '__neg__',
+ '__new__', '__next__', '__or__', '__pos__', '__pow__',
+ '__prepare__', '__radd__', '__rand__', '__rdivmod__', '__repr__',
+ '__reversed__', '__rfloordiv__', '__rlshift__', '__rmatmul__',
+ '__rmod__', '__rmul__', '__ror__', '__round__', '__rpow__',
+ '__rrshift__', '__rshift__', '__rsub__', '__rtruediv__',
+ '__rxor__', '__set__', '__setattr__', '__setitem__', '__str__',
+ '__sub__', '__subclasscheck__', '__truediv__',
+ '__xor__'), suffix=r'\b'),
+ Name.Function.Magic),
+ ],
+ 'magicvars': [
+ (words((
+ '__annotations__', '__bases__', '__class__', '__closure__',
+ '__code__', '__defaults__', '__dict__', '__doc__', '__file__',
+ '__func__', '__globals__', '__kwdefaults__', '__module__',
+ '__mro__', '__name__', '__objclass__', '__qualname__',
+ '__self__', '__slots__', '__weakref__'), suffix=r'\b'),
+ Name.Variable.Magic),
+ ],
+ 'numbers': [
+ (r'(\d(?:_?\d)*\.(?:\d(?:_?\d)*)?|(?:\d(?:_?\d)*)?\.\d(?:_?\d)*)'
+ r'([eE][+-]?\d(?:_?\d)*)?', Number.Float),
+ (r'\d(?:_?\d)*[eE][+-]?\d(?:_?\d)*j?', Number.Float),
+ (r'0[oO](?:_?[0-7])+', Number.Oct),
+ (r'0[bB](?:_?[01])+', Number.Bin),
+ (r'0[xX](?:_?[a-fA-F0-9])+', Number.Hex),
+ (r'\d(?:_?\d)*', Number.Integer),
+ ],
+ 'name': [
+ (r'@' + uni_name, Name.Decorator),
+ (r'@', Operator), # new matrix multiplication operator
+ (uni_name, Name),
+ ],
+ 'funcname': [
+ include('magicfuncs'),
+ (uni_name, Name.Function, '#pop'),
+ default('#pop'),
+ ],
+ 'classname': [
+ (uni_name, Name.Class, '#pop'),
+ ],
+ 'import': [
+ (r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
+ (r'\.', Name.Namespace),
+ (uni_name, Name.Namespace),
+ (r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
+ default('#pop') # all else: go back
+ ],
+ 'fromimport': [
+ (r'(\s+)(import)\b', bygroups(Text, Keyword.Namespace), '#pop'),
+ (r'\.', Name.Namespace),
+ # if None occurs here, it's "raise x from None", since None can
+ # never be a module name
+ (r'None\b', Name.Builtin.Pseudo, '#pop'),
+ (uni_name, Name.Namespace),
+ default('#pop'),
+ ],
+ 'rfstringescape': [
+ (r'\{\{', String.Escape),
+ (r'\}\}', String.Escape),
+ ],
+ 'fstringescape': [
+ include('rfstringescape'),
+ include('stringescape'),
+ ],
+ 'bytesescape': [
+ (r'\\([\\abfnrtv"\']|\n|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
+ ],
+ 'stringescape': [
+ (r'\\(N\{.*?\}|u[a-fA-F0-9]{4}|U[a-fA-F0-9]{8})', String.Escape),
+ include('bytesescape')
+ ],
+ 'fstrings-single': fstring_rules(String.Single),
+ 'fstrings-double': fstring_rules(String.Double),
+ 'strings-single': innerstring_rules(String.Single),
+ 'strings-double': innerstring_rules(String.Double),
+ 'dqf': [
+ (r'"', String.Double, '#pop'),
+ (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
+ include('fstrings-double')
+ ],
+ 'sqf': [
+ (r"'", String.Single, '#pop'),
+ (r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
+ include('fstrings-single')
+ ],
+ 'dqs': [
+ (r'"', String.Double, '#pop'),
+ (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
+ include('strings-double')
+ ],
+ 'sqs': [
+ (r"'", String.Single, '#pop'),
+ (r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
+ include('strings-single')
+ ],
+ 'tdqf': [
+ (r'"""', String.Double, '#pop'),
+ include('fstrings-double'),
+ (r'\n', String.Double)
+ ],
+ 'tsqf': [
+ (r"'''", String.Single, '#pop'),
+ include('fstrings-single'),
+ (r'\n', String.Single)
+ ],
+ 'tdqs': [
+ (r'"""', String.Double, '#pop'),
+ include('strings-double'),
+ (r'\n', String.Double)
+ ],
+ 'tsqs': [
+ (r"'''", String.Single, '#pop'),
+ include('strings-single'),
+ (r'\n', String.Single)
+ ],
+ }
+
+ def analyse_text(text):
+ return shebang_matches(text, r'pythonw?(3(\.\d)?)?') or \
+ 'import ' in text[:1000]
+
+
+Python3Lexer = PythonLexer
+
+
+class Python2Lexer(RegexLexer):
+ """
+ For Python 2.x source code.
+
+ .. versionchanged:: 2.5
+ This class has been renamed from ``PythonLexer``. ``PythonLexer`` now
+ refers to the Python 3 variant. File name patterns like ``*.py`` have
+ been moved to Python 3 as well.
+ """
+
+ name = 'Python 2.x'
+ url = 'http://www.python.org'
+ aliases = ['python2', 'py2']
+ filenames = [] # now taken over by PythonLexer (3.x)
+ mimetypes = ['text/x-python2', 'application/x-python2']
+
+ def innerstring_rules(ttype):
+ return [
+ # the old style '%s' % (...) string formatting
+ (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
+ '[hlL]?[E-GXc-giorsux%]', String.Interpol),
+ # backslashes, quotes and formatting signs must be parsed one at a time
+ (r'[^\\\'"%\n]+', ttype),
+ (r'[\'"\\]', ttype),
+ # unhandled string formatting sign
+ (r'%', ttype),
+ # newlines are an error (use "nl" state)
+ ]
+
+ tokens = {
+ 'root': [
+ (r'\n', Text),
+ (r'^(\s*)([rRuUbB]{,2})("""(?:.|\n)*?""")',
+ bygroups(Text, String.Affix, String.Doc)),
+ (r"^(\s*)([rRuUbB]{,2})('''(?:.|\n)*?''')",
+ bygroups(Text, String.Affix, String.Doc)),
+ (r'[^\S\n]+', Text),
+ (r'\A#!.+$', Comment.Hashbang),
+ (r'#.*$', Comment.Single),
+ (r'[]{}:(),;[]', Punctuation),
+ (r'\\\n', Text),
+ (r'\\', Text),
+ (r'(in|is|and|or|not)\b', Operator.Word),
+ (r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator),
+ include('keywords'),
+ (r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
+ (r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
+ (r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
+ 'fromimport'),
+ (r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text),
+ 'import'),
+ include('builtins'),
+ include('magicfuncs'),
+ include('magicvars'),
+ include('backtick'),
+ ('([rR]|[uUbB][rR]|[rR][uUbB])(""")',
+ bygroups(String.Affix, String.Double), 'tdqs'),
+ ("([rR]|[uUbB][rR]|[rR][uUbB])(''')",
+ bygroups(String.Affix, String.Single), 'tsqs'),
+ ('([rR]|[uUbB][rR]|[rR][uUbB])(")',
+ bygroups(String.Affix, String.Double), 'dqs'),
+ ("([rR]|[uUbB][rR]|[rR][uUbB])(')",
+ bygroups(String.Affix, String.Single), 'sqs'),
+ ('([uUbB]?)(""")', bygroups(String.Affix, String.Double),
+ combined('stringescape', 'tdqs')),
+ ("([uUbB]?)(''')", bygroups(String.Affix, String.Single),
+ combined('stringescape', 'tsqs')),
+ ('([uUbB]?)(")', bygroups(String.Affix, String.Double),
+ combined('stringescape', 'dqs')),
+ ("([uUbB]?)(')", bygroups(String.Affix, String.Single),
+ combined('stringescape', 'sqs')),
+ include('name'),
+ include('numbers'),
+ ],
+ 'keywords': [
+ (words((
+ 'assert', 'break', 'continue', 'del', 'elif', 'else', 'except',
+ 'exec', 'finally', 'for', 'global', 'if', 'lambda', 'pass',
+ 'print', 'raise', 'return', 'try', 'while', 'yield',
+ 'yield from', 'as', 'with'), suffix=r'\b'),
+ Keyword),
+ ],
+ 'builtins': [
+ (words((
+ '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin',
+ 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr', 'classmethod',
+ 'cmp', 'coerce', 'compile', 'complex', 'delattr', 'dict', 'dir', 'divmod',
+ 'enumerate', 'eval', 'execfile', 'exit', 'file', 'filter', 'float',
+ 'frozenset', 'getattr', 'globals', 'hasattr', 'hash', 'hex', 'id',
+ 'input', 'int', 'intern', 'isinstance', 'issubclass', 'iter', 'len',
+ 'list', 'locals', 'long', 'map', 'max', 'min', 'next', 'object',
+ 'oct', 'open', 'ord', 'pow', 'property', 'range', 'raw_input', 'reduce',
+ 'reload', 'repr', 'reversed', 'round', 'set', 'setattr', 'slice',
+ 'sorted', 'staticmethod', 'str', 'sum', 'super', 'tuple', 'type',
+ 'unichr', 'unicode', 'vars', 'xrange', 'zip'),
+ prefix=r'(?<!\.)', suffix=r'\b'),
+ Name.Builtin),
+ (r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|cls'
+ r')\b', Name.Builtin.Pseudo),
+ (words((
+ 'ArithmeticError', 'AssertionError', 'AttributeError',
+ 'BaseException', 'DeprecationWarning', 'EOFError', 'EnvironmentError',
+ 'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit',
+ 'IOError', 'ImportError', 'ImportWarning', 'IndentationError',
+ 'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError',
+ 'MemoryError', 'NameError',
+ 'NotImplementedError', 'OSError', 'OverflowError', 'OverflowWarning',
+ 'PendingDeprecationWarning', 'ReferenceError',
+ 'RuntimeError', 'RuntimeWarning', 'StandardError', 'StopIteration',
+ 'SyntaxError', 'SyntaxWarning', 'SystemError', 'SystemExit',
+ 'TabError', 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
+ 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
+ 'UnicodeWarning', 'UserWarning', 'ValueError', 'VMSError', 'Warning',
+ 'WindowsError', 'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'),
+ Name.Exception),
+ ],
+ 'magicfuncs': [
+ (words((
+ '__abs__', '__add__', '__and__', '__call__', '__cmp__', '__coerce__',
+ '__complex__', '__contains__', '__del__', '__delattr__', '__delete__',
+ '__delitem__', '__delslice__', '__div__', '__divmod__', '__enter__',
+ '__eq__', '__exit__', '__float__', '__floordiv__', '__ge__', '__get__',
+ '__getattr__', '__getattribute__', '__getitem__', '__getslice__', '__gt__',
+ '__hash__', '__hex__', '__iadd__', '__iand__', '__idiv__', '__ifloordiv__',
+ '__ilshift__', '__imod__', '__imul__', '__index__', '__init__',
+ '__instancecheck__', '__int__', '__invert__', '__iop__', '__ior__',
+ '__ipow__', '__irshift__', '__isub__', '__iter__', '__itruediv__',
+ '__ixor__', '__le__', '__len__', '__long__', '__lshift__', '__lt__',
+ '__missing__', '__mod__', '__mul__', '__ne__', '__neg__', '__new__',
+ '__nonzero__', '__oct__', '__op__', '__or__', '__pos__', '__pow__',
+ '__radd__', '__rand__', '__rcmp__', '__rdiv__', '__rdivmod__', '__repr__',
+ '__reversed__', '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__',
+ '__rop__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__',
+ '__rtruediv__', '__rxor__', '__set__', '__setattr__', '__setitem__',
+ '__setslice__', '__str__', '__sub__', '__subclasscheck__', '__truediv__',
+ '__unicode__', '__xor__'), suffix=r'\b'),
+ Name.Function.Magic),
+ ],
+ 'magicvars': [
+ (words((
+ '__bases__', '__class__', '__closure__', '__code__', '__defaults__',
+ '__dict__', '__doc__', '__file__', '__func__', '__globals__',
+ '__metaclass__', '__module__', '__mro__', '__name__', '__self__',
+ '__slots__', '__weakref__'),
+ suffix=r'\b'),
+ Name.Variable.Magic),
+ ],
+ 'numbers': [
+ (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float),
+ (r'\d+[eE][+-]?[0-9]+j?', Number.Float),
+ (r'0[0-7]+j?', Number.Oct),
+ (r'0[bB][01]+', Number.Bin),
+ (r'0[xX][a-fA-F0-9]+', Number.Hex),
+ (r'\d+L', Number.Integer.Long),
+ (r'\d+j?', Number.Integer)
+ ],
+ 'backtick': [
+ ('`.*?`', String.Backtick),
+ ],
+ 'name': [
+ (r'@[\w.]+', Name.Decorator),
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ 'funcname': [
+ include('magicfuncs'),
+ (r'[a-zA-Z_]\w*', Name.Function, '#pop'),
+ default('#pop'),
+ ],
+ 'classname': [
+ (r'[a-zA-Z_]\w*', Name.Class, '#pop')
+ ],
+ 'import': [
+ (r'(?:[ \t]|\\\n)+', Text),
+ (r'as\b', Keyword.Namespace),
+ (r',', Operator),
+ (r'[a-zA-Z_][\w.]*', Name.Namespace),
+ default('#pop') # all else: go back
+ ],
+ 'fromimport': [
+ (r'(?:[ \t]|\\\n)+', Text),
+ (r'import\b', Keyword.Namespace, '#pop'),
+ # if None occurs here, it's "raise x from None", since None can
+ # never be a module name
+ (r'None\b', Name.Builtin.Pseudo, '#pop'),
+ # sadly, in "raise x from y" y will be highlighted as namespace too
+ (r'[a-zA-Z_.][\w.]*', Name.Namespace),
+ # anything else here also means "raise x from y" and is therefore
+ # not an error
+ default('#pop'),
+ ],
+ 'stringescape': [
+ (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
+ r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
+ ],
+ 'strings-single': innerstring_rules(String.Single),
+ 'strings-double': innerstring_rules(String.Double),
+ 'dqs': [
+ (r'"', String.Double, '#pop'),
+ (r'\\\\|\\"|\\\n', String.Escape), # included here for raw strings
+ include('strings-double')
+ ],
+ 'sqs': [
+ (r"'", String.Single, '#pop'),
+ (r"\\\\|\\'|\\\n", String.Escape), # included here for raw strings
+ include('strings-single')
+ ],
+ 'tdqs': [
+ (r'"""', String.Double, '#pop'),
+ include('strings-double'),
+ (r'\n', String.Double)
+ ],
+ 'tsqs': [
+ (r"'''", String.Single, '#pop'),
+ include('strings-single'),
+ (r'\n', String.Single)
+ ],
+ }
+
+ def analyse_text(text):
+ return shebang_matches(text, r'pythonw?2(\.\d)?')
+
+
+class PythonConsoleLexer(Lexer):
+ """
+ For Python console output or doctests, such as:
+
+ .. sourcecode:: pycon
+
+ >>> a = 'foo'
+ >>> print a
+ foo
+ >>> 1 / 0
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ ZeroDivisionError: integer division or modulo by zero
+
+ Additional options:
+
+ `python3`
+ Use Python 3 lexer for code. Default is ``True``.
+
+ .. versionadded:: 1.0
+ .. versionchanged:: 2.5
+ Now defaults to ``True``.
+ """
+ name = 'Python console session'
+ aliases = ['pycon']
+ mimetypes = ['text/x-python-doctest']
+
+ def __init__(self, **options):
+ self.python3 = get_bool_opt(options, 'python3', True)
+ Lexer.__init__(self, **options)
+
+ def get_tokens_unprocessed(self, text):
+ if self.python3:
+ pylexer = PythonLexer(**self.options)
+ tblexer = PythonTracebackLexer(**self.options)
+ else:
+ pylexer = Python2Lexer(**self.options)
+ tblexer = Python2TracebackLexer(**self.options)
+
+ curcode = ''
+ insertions = []
+ curtb = ''
+ tbindex = 0
+ tb = 0
+ for match in line_re.finditer(text):
+ line = match.group()
+ if line.startswith('>>> ') or line.startswith('... '):
+ tb = 0
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, line[:4])]))
+ curcode += line[4:]
+ elif line.rstrip() == '...' and not tb:
+ # only a new >>> prompt can end an exception block
+ # otherwise an ellipsis in place of the traceback frames
+ # will be mishandled
+ insertions.append((len(curcode),
+ [(0, Generic.Prompt, '...')]))
+ curcode += line[3:]
+ else:
+ if curcode:
+ yield from do_insertions(
+ insertions, pylexer.get_tokens_unprocessed(curcode))
+ curcode = ''
+ insertions = []
+ if (line.startswith('Traceback (most recent call last):') or
+ re.match(' File "[^"]+", line \\d+\\n$', line)):
+ tb = 1
+ curtb = line
+ tbindex = match.start()
+ elif line == 'KeyboardInterrupt\n':
+ yield match.start(), Name.Class, line
+ elif tb:
+ curtb += line
+ if not (line.startswith(' ') or line.strip() == '...'):
+ tb = 0
+ for i, t, v in tblexer.get_tokens_unprocessed(curtb):
+ yield tbindex+i, t, v
+ curtb = ''
+ else:
+ yield match.start(), Generic.Output, line
+ if curcode:
+ yield from do_insertions(insertions,
+ pylexer.get_tokens_unprocessed(curcode))
+ if curtb:
+ for i, t, v in tblexer.get_tokens_unprocessed(curtb):
+ yield tbindex+i, t, v
+
+
+class PythonTracebackLexer(RegexLexer):
+ """
+ For Python 3.x tracebacks, with support for chained exceptions.
+
+ .. versionadded:: 1.0
+
+ .. versionchanged:: 2.5
+ This is now the default ``PythonTracebackLexer``. It is still available
+ as the alias ``Python3TracebackLexer``.
+ """
+
+ name = 'Python Traceback'
+ aliases = ['pytb', 'py3tb']
+ filenames = ['*.pytb', '*.py3tb']
+ mimetypes = ['text/x-python-traceback', 'text/x-python3-traceback']
+
+ tokens = {
+ 'root': [
+ (r'\n', Text),
+ (r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'),
+ (r'^During handling of the above exception, another '
+ r'exception occurred:\n\n', Generic.Traceback),
+ (r'^The above exception was the direct cause of the '
+ r'following exception:\n\n', Generic.Traceback),
+ (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
+ (r'^.*\n', Other),
+ ],
+ 'intb': [
+ (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
+ bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)),
+ (r'^( File )("[^"]+")(, line )(\d+)(\n)',
+ bygroups(Text, Name.Builtin, Text, Number, Text)),
+ (r'^( )(.+)(\n)',
+ bygroups(Text, using(PythonLexer), Text), 'markers'),
+ (r'^([ \t]*)(\.\.\.)(\n)',
+ bygroups(Text, Comment, Text)), # for doctests...
+ (r'^([^:]+)(: )(.+)(\n)',
+ bygroups(Generic.Error, Text, Name, Text), '#pop'),
+ (r'^([a-zA-Z_][\w.]*)(:?\n)',
+ bygroups(Generic.Error, Text), '#pop')
+ ],
+ 'markers': [
+ # Either `PEP 657 <https://www.python.org/dev/peps/pep-0657/>`
+ # error locations in Python 3.11+, or single-caret markers
+ # for syntax errors before that.
+ (r'^( {4,})([~^]+)(\n)',
+ bygroups(Text, Punctuation.Marker, Text),
+ '#pop'),
+ default('#pop'),
+ ],
+ }
+
+
+Python3TracebackLexer = PythonTracebackLexer
+
+
+class Python2TracebackLexer(RegexLexer):
+ """
+ For Python tracebacks.
+
+ .. versionadded:: 0.7
+
+ .. versionchanged:: 2.5
+ This class has been renamed from ``PythonTracebackLexer``.
+ ``PythonTracebackLexer`` now refers to the Python 3 variant.
+ """
+
+ name = 'Python 2.x Traceback'
+ aliases = ['py2tb']
+ filenames = ['*.py2tb']
+ mimetypes = ['text/x-python2-traceback']
+
+ tokens = {
+ 'root': [
+ # Cover both (most recent call last) and (innermost last)
+ # The optional ^C allows us to catch keyboard interrupt signals.
+ (r'^(\^C)?(Traceback.*\n)',
+ bygroups(Text, Generic.Traceback), 'intb'),
+ # SyntaxError starts with this.
+ (r'^(?= File "[^"]+", line \d+)', Generic.Traceback, 'intb'),
+ (r'^.*\n', Other),
+ ],
+ 'intb': [
+ (r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
+ bygroups(Text, Name.Builtin, Text, Number, Text, Name, Text)),
+ (r'^( File )("[^"]+")(, line )(\d+)(\n)',
+ bygroups(Text, Name.Builtin, Text, Number, Text)),
+ (r'^( )(.+)(\n)',
+ bygroups(Text, using(Python2Lexer), Text), 'marker'),
+ (r'^([ \t]*)(\.\.\.)(\n)',
+ bygroups(Text, Comment, Text)), # for doctests...
+ (r'^([^:]+)(: )(.+)(\n)',
+ bygroups(Generic.Error, Text, Name, Text), '#pop'),
+ (r'^([a-zA-Z_]\w*)(:?\n)',
+ bygroups(Generic.Error, Text), '#pop')
+ ],
+ 'marker': [
+ # For syntax errors.
+ (r'( {4,})(\^)', bygroups(Text, Punctuation.Marker), '#pop'),
+ default('#pop'),
+ ],
+ }
+
+
+class CythonLexer(RegexLexer):
+ """
+ For Pyrex and Cython source code.
+
+ .. versionadded:: 1.1
+ """
+
+ name = 'Cython'
+ url = 'http://cython.org'
+ aliases = ['cython', 'pyx', 'pyrex']
+ filenames = ['*.pyx', '*.pxd', '*.pxi']
+ mimetypes = ['text/x-cython', 'application/x-cython']
+
+ tokens = {
+ 'root': [
+ (r'\n', Text),
+ (r'^(\s*)("""(?:.|\n)*?""")', bygroups(Text, String.Doc)),
+ (r"^(\s*)('''(?:.|\n)*?''')", bygroups(Text, String.Doc)),
+ (r'[^\S\n]+', Text),
+ (r'#.*$', Comment),
+ (r'[]{}:(),;[]', Punctuation),
+ (r'\\\n', Text),
+ (r'\\', Text),
+ (r'(in|is|and|or|not)\b', Operator.Word),
+ (r'(<)([a-zA-Z0-9.?]+)(>)',
+ bygroups(Punctuation, Keyword.Type, Punctuation)),
+ (r'!=|==|<<|>>|[-~+/*%=<>&^|.?]', Operator),
+ (r'(from)(\d+)(<=)(\s+)(<)(\d+)(:)',
+ bygroups(Keyword, Number.Integer, Operator, Name, Operator,
+ Name, Punctuation)),
+ include('keywords'),
+ (r'(def|property)(\s+)', bygroups(Keyword, Text), 'funcname'),
+ (r'(cp?def)(\s+)', bygroups(Keyword, Text), 'cdef'),
+ # (should actually start a block with only cdefs)
+ (r'(cdef)(:)', bygroups(Keyword, Punctuation)),
+ (r'(class|struct)(\s+)', bygroups(Keyword, Text), 'classname'),
+ (r'(from)(\s+)', bygroups(Keyword, Text), 'fromimport'),
+ (r'(c?import)(\s+)', bygroups(Keyword, Text), 'import'),
+ include('builtins'),
+ include('backtick'),
+ ('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'),
+ ("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'),
+ ('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'),
+ ("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'),
+ ('[uU]?"""', String, combined('stringescape', 'tdqs')),
+ ("[uU]?'''", String, combined('stringescape', 'tsqs')),
+ ('[uU]?"', String, combined('stringescape', 'dqs')),
+ ("[uU]?'", String, combined('stringescape', 'sqs')),
+ include('name'),
+ include('numbers'),
+ ],
+ 'keywords': [
+ (words((
+ 'assert', 'async', 'await', 'break', 'by', 'continue', 'ctypedef', 'del', 'elif',
+ 'else', 'except', 'except?', 'exec', 'finally', 'for', 'fused', 'gil',
+ 'global', 'if', 'include', 'lambda', 'nogil', 'pass', 'print',
+ 'raise', 'return', 'try', 'while', 'yield', 'as', 'with'), suffix=r'\b'),
+ Keyword),
+ (r'(DEF|IF|ELIF|ELSE)\b', Comment.Preproc),
+ ],
+ 'builtins': [
+ (words((
+ '__import__', 'abs', 'all', 'any', 'apply', 'basestring', 'bin', 'bint',
+ 'bool', 'buffer', 'bytearray', 'bytes', 'callable', 'chr',
+ 'classmethod', 'cmp', 'coerce', 'compile', 'complex', 'delattr',
+ 'dict', 'dir', 'divmod', 'enumerate', 'eval', 'execfile', 'exit',
+ 'file', 'filter', 'float', 'frozenset', 'getattr', 'globals',
+ 'hasattr', 'hash', 'hex', 'id', 'input', 'int', 'intern', 'isinstance',
+ 'issubclass', 'iter', 'len', 'list', 'locals', 'long', 'map', 'max',
+ 'min', 'next', 'object', 'oct', 'open', 'ord', 'pow', 'property', 'Py_ssize_t',
+ 'range', 'raw_input', 'reduce', 'reload', 'repr', 'reversed',
+ 'round', 'set', 'setattr', 'slice', 'sorted', 'staticmethod',
+ 'str', 'sum', 'super', 'tuple', 'type', 'unichr', 'unicode', 'unsigned',
+ 'vars', 'xrange', 'zip'), prefix=r'(?<!\.)', suffix=r'\b'),
+ Name.Builtin),
+ (r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True|NULL'
+ r')\b', Name.Builtin.Pseudo),
+ (words((
+ 'ArithmeticError', 'AssertionError', 'AttributeError',
+ 'BaseException', 'DeprecationWarning', 'EOFError', 'EnvironmentError',
+ 'Exception', 'FloatingPointError', 'FutureWarning', 'GeneratorExit',
+ 'IOError', 'ImportError', 'ImportWarning', 'IndentationError',
+ 'IndexError', 'KeyError', 'KeyboardInterrupt', 'LookupError',
+ 'MemoryError', 'NameError', 'NotImplemented', 'NotImplementedError',
+ 'OSError', 'OverflowError', 'OverflowWarning',
+ 'PendingDeprecationWarning', 'ReferenceError', 'RuntimeError',
+ 'RuntimeWarning', 'StandardError', 'StopIteration', 'SyntaxError',
+ 'SyntaxWarning', 'SystemError', 'SystemExit', 'TabError',
+ 'TypeError', 'UnboundLocalError', 'UnicodeDecodeError',
+ 'UnicodeEncodeError', 'UnicodeError', 'UnicodeTranslateError',
+ 'UnicodeWarning', 'UserWarning', 'ValueError', 'Warning',
+ 'ZeroDivisionError'), prefix=r'(?<!\.)', suffix=r'\b'),
+ Name.Exception),
+ ],
+ 'numbers': [
+ (r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
+ (r'0\d+', Number.Oct),
+ (r'0[xX][a-fA-F0-9]+', Number.Hex),
+ (r'\d+L', Number.Integer.Long),
+ (r'\d+', Number.Integer)
+ ],
+ 'backtick': [
+ ('`.*?`', String.Backtick),
+ ],
+ 'name': [
+ (r'@\w+', Name.Decorator),
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ 'funcname': [
+ (r'[a-zA-Z_]\w*', Name.Function, '#pop')
+ ],
+ 'cdef': [
+ (r'(public|readonly|extern|api|inline)\b', Keyword.Reserved),
+ (r'(struct|enum|union|class)\b', Keyword),
+ (r'([a-zA-Z_]\w*)(\s*)(?=[(:#=]|$)',
+ bygroups(Name.Function, Text), '#pop'),
+ (r'([a-zA-Z_]\w*)(\s*)(,)',
+ bygroups(Name.Function, Text, Punctuation)),
+ (r'from\b', Keyword, '#pop'),
+ (r'as\b', Keyword),
+ (r':', Punctuation, '#pop'),
+ (r'(?=["\'])', Text, '#pop'),
+ (r'[a-zA-Z_]\w*', Keyword.Type),
+ (r'.', Text),
+ ],
+ 'classname': [
+ (r'[a-zA-Z_]\w*', Name.Class, '#pop')
+ ],
+ 'import': [
+ (r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
+ (r'[a-zA-Z_][\w.]*', Name.Namespace),
+ (r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
+ default('#pop') # all else: go back
+ ],
+ 'fromimport': [
+ (r'(\s+)(c?import)\b', bygroups(Text, Keyword), '#pop'),
+ (r'[a-zA-Z_.][\w.]*', Name.Namespace),
+ # ``cdef foo from "header"``, or ``for foo from 0 < i < 10``
+ default('#pop'),
+ ],
+ 'stringescape': [
+ (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
+ r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
+ ],
+ 'strings': [
+ (r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
+ '[hlL]?[E-GXc-giorsux%]', String.Interpol),
+ (r'[^\\\'"%\n]+', String),
+ # quotes, percents and backslashes must be parsed one at a time
+ (r'[\'"\\]', String),
+ # unhandled string formatting sign
+ (r'%', String)
+ # newlines are an error (use "nl" state)
+ ],
+ 'nl': [
+ (r'\n', String)
+ ],
+ 'dqs': [
+ (r'"', String, '#pop'),
+ (r'\\\\|\\"|\\\n', String.Escape), # included here again for raw strings
+ include('strings')
+ ],
+ 'sqs': [
+ (r"'", String, '#pop'),
+ (r"\\\\|\\'|\\\n", String.Escape), # included here again for raw strings
+ include('strings')
+ ],
+ 'tdqs': [
+ (r'"""', String, '#pop'),
+ include('strings'),
+ include('nl')
+ ],
+ 'tsqs': [
+ (r"'''", String, '#pop'),
+ include('strings'),
+ include('nl')
+ ],
+ }
+
+
+class DgLexer(RegexLexer):
+ """
+ Lexer for dg,
+ a functional and object-oriented programming language
+ running on the CPython 3 VM.
+
+ .. versionadded:: 1.6
+ """
+ name = 'dg'
+ aliases = ['dg']
+ filenames = ['*.dg']
+ mimetypes = ['text/x-dg']
+
+ tokens = {
+ 'root': [
+ (r'\s+', Text),
+ (r'#.*?$', Comment.Single),
+
+ (r'(?i)0b[01]+', Number.Bin),
+ (r'(?i)0o[0-7]+', Number.Oct),
+ (r'(?i)0x[0-9a-f]+', Number.Hex),
+ (r'(?i)[+-]?[0-9]+\.[0-9]+(e[+-]?[0-9]+)?j?', Number.Float),
+ (r'(?i)[+-]?[0-9]+e[+-]?\d+j?', Number.Float),
+ (r'(?i)[+-]?[0-9]+j?', Number.Integer),
+
+ (r"(?i)(br|r?b?)'''", String, combined('stringescape', 'tsqs', 'string')),
+ (r'(?i)(br|r?b?)"""', String, combined('stringescape', 'tdqs', 'string')),
+ (r"(?i)(br|r?b?)'", String, combined('stringescape', 'sqs', 'string')),
+ (r'(?i)(br|r?b?)"', String, combined('stringescape', 'dqs', 'string')),
+
+ (r"`\w+'*`", Operator),
+ (r'\b(and|in|is|or|where)\b', Operator.Word),
+ (r'[!$%&*+\-./:<-@\\^|~;,]+', Operator),
+
+ (words((
+ 'bool', 'bytearray', 'bytes', 'classmethod', 'complex', 'dict', 'dict\'',
+ 'float', 'frozenset', 'int', 'list', 'list\'', 'memoryview', 'object',
+ 'property', 'range', 'set', 'set\'', 'slice', 'staticmethod', 'str',
+ 'super', 'tuple', 'tuple\'', 'type'),
+ prefix=r'(?<!\.)', suffix=r'(?![\'\w])'),
+ Name.Builtin),
+ (words((
+ '__import__', 'abs', 'all', 'any', 'bin', 'bind', 'chr', 'cmp', 'compile',
+ 'complex', 'delattr', 'dir', 'divmod', 'drop', 'dropwhile', 'enumerate',
+ 'eval', 'exhaust', 'filter', 'flip', 'foldl1?', 'format', 'fst',
+ 'getattr', 'globals', 'hasattr', 'hash', 'head', 'hex', 'id', 'init',
+ 'input', 'isinstance', 'issubclass', 'iter', 'iterate', 'last', 'len',
+ 'locals', 'map', 'max', 'min', 'next', 'oct', 'open', 'ord', 'pow',
+ 'print', 'repr', 'reversed', 'round', 'setattr', 'scanl1?', 'snd',
+ 'sorted', 'sum', 'tail', 'take', 'takewhile', 'vars', 'zip'),
+ prefix=r'(?<!\.)', suffix=r'(?![\'\w])'),
+ Name.Builtin),
+ (r"(?<!\.)(self|Ellipsis|NotImplemented|None|True|False)(?!['\w])",
+ Name.Builtin.Pseudo),
+
+ (r"(?<!\.)[A-Z]\w*(Error|Exception|Warning)'*(?!['\w])",
+ Name.Exception),
+ (r"(?<!\.)(Exception|GeneratorExit|KeyboardInterrupt|StopIteration|"
+ r"SystemExit)(?!['\w])", Name.Exception),
+
+ (r"(?<![\w.])(except|finally|for|if|import|not|otherwise|raise|"
+ r"subclass|while|with|yield)(?!['\w])", Keyword.Reserved),
+
+ (r"[A-Z_]+'*(?!['\w])", Name),
+ (r"[A-Z]\w+'*(?!['\w])", Keyword.Type),
+ (r"\w+'*", Name),
+
+ (r'[()]', Punctuation),
+ (r'.', Error),
+ ],
+ 'stringescape': [
+ (r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
+ r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
+ ],
+ 'string': [
+ (r'%(\(\w+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
+ '[hlL]?[E-GXc-giorsux%]', String.Interpol),
+ (r'[^\\\'"%\n]+', String),
+ # quotes, percents and backslashes must be parsed one at a time
+ (r'[\'"\\]', String),
+ # unhandled string formatting sign
+ (r'%', String),
+ (r'\n', String)
+ ],
+ 'dqs': [
+ (r'"', String, '#pop')
+ ],
+ 'sqs': [
+ (r"'", String, '#pop')
+ ],
+ 'tdqs': [
+ (r'"""', String, '#pop')
+ ],
+ 'tsqs': [
+ (r"'''", String, '#pop')
+ ],
+ }
+
+
+class NumPyLexer(PythonLexer):
+ """
+ A Python lexer recognizing Numerical Python builtins.
+
+ .. versionadded:: 0.10
+ """
+
+ name = 'NumPy'
+ url = 'https://numpy.org/'
+ aliases = ['numpy']
+
+ # override the mimetypes to not inherit them from python
+ mimetypes = []
+ filenames = []
+
+ EXTRA_KEYWORDS = {
+ 'abs', 'absolute', 'accumulate', 'add', 'alen', 'all', 'allclose',
+ 'alltrue', 'alterdot', 'amax', 'amin', 'angle', 'any', 'append',
+ 'apply_along_axis', 'apply_over_axes', 'arange', 'arccos', 'arccosh',
+ 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh', 'argmax', 'argmin',
+ 'argsort', 'argwhere', 'around', 'array', 'array2string', 'array_equal',
+ 'array_equiv', 'array_repr', 'array_split', 'array_str', 'arrayrange',
+ 'asanyarray', 'asarray', 'asarray_chkfinite', 'ascontiguousarray',
+ 'asfarray', 'asfortranarray', 'asmatrix', 'asscalar', 'astype',
+ 'atleast_1d', 'atleast_2d', 'atleast_3d', 'average', 'bartlett',
+ 'base_repr', 'beta', 'binary_repr', 'bincount', 'binomial',
+ 'bitwise_and', 'bitwise_not', 'bitwise_or', 'bitwise_xor', 'blackman',
+ 'bmat', 'broadcast', 'byte_bounds', 'bytes', 'byteswap', 'c_',
+ 'can_cast', 'ceil', 'choose', 'clip', 'column_stack', 'common_type',
+ 'compare_chararrays', 'compress', 'concatenate', 'conj', 'conjugate',
+ 'convolve', 'copy', 'corrcoef', 'correlate', 'cos', 'cosh', 'cov',
+ 'cross', 'cumprod', 'cumproduct', 'cumsum', 'delete', 'deprecate',
+ 'diag', 'diagflat', 'diagonal', 'diff', 'digitize', 'disp', 'divide',
+ 'dot', 'dsplit', 'dstack', 'dtype', 'dump', 'dumps', 'ediff1d', 'empty',
+ 'empty_like', 'equal', 'exp', 'expand_dims', 'expm1', 'extract', 'eye',
+ 'fabs', 'fastCopyAndTranspose', 'fft', 'fftfreq', 'fftshift', 'fill',
+ 'finfo', 'fix', 'flat', 'flatnonzero', 'flatten', 'fliplr', 'flipud',
+ 'floor', 'floor_divide', 'fmod', 'frexp', 'fromarrays', 'frombuffer',
+ 'fromfile', 'fromfunction', 'fromiter', 'frompyfunc', 'fromstring',
+ 'generic', 'get_array_wrap', 'get_include', 'get_numarray_include',
+ 'get_numpy_include', 'get_printoptions', 'getbuffer', 'getbufsize',
+ 'geterr', 'geterrcall', 'geterrobj', 'getfield', 'gradient', 'greater',
+ 'greater_equal', 'gumbel', 'hamming', 'hanning', 'histogram',
+ 'histogram2d', 'histogramdd', 'hsplit', 'hstack', 'hypot', 'i0',
+ 'identity', 'ifft', 'imag', 'index_exp', 'indices', 'inf', 'info',
+ 'inner', 'insert', 'int_asbuffer', 'interp', 'intersect1d',
+ 'intersect1d_nu', 'inv', 'invert', 'iscomplex', 'iscomplexobj',
+ 'isfinite', 'isfortran', 'isinf', 'isnan', 'isneginf', 'isposinf',
+ 'isreal', 'isrealobj', 'isscalar', 'issctype', 'issubclass_',
+ 'issubdtype', 'issubsctype', 'item', 'itemset', 'iterable', 'ix_',
+ 'kaiser', 'kron', 'ldexp', 'left_shift', 'less', 'less_equal', 'lexsort',
+ 'linspace', 'load', 'loads', 'loadtxt', 'log', 'log10', 'log1p', 'log2',
+ 'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'logspace',
+ 'lstsq', 'mat', 'matrix', 'max', 'maximum', 'maximum_sctype',
+ 'may_share_memory', 'mean', 'median', 'meshgrid', 'mgrid', 'min',
+ 'minimum', 'mintypecode', 'mod', 'modf', 'msort', 'multiply', 'nan',
+ 'nan_to_num', 'nanargmax', 'nanargmin', 'nanmax', 'nanmin', 'nansum',
+ 'ndenumerate', 'ndim', 'ndindex', 'negative', 'newaxis', 'newbuffer',
+ 'newbyteorder', 'nonzero', 'not_equal', 'obj2sctype', 'ogrid', 'ones',
+ 'ones_like', 'outer', 'permutation', 'piecewise', 'pinv', 'pkgload',
+ 'place', 'poisson', 'poly', 'poly1d', 'polyadd', 'polyder', 'polydiv',
+ 'polyfit', 'polyint', 'polymul', 'polysub', 'polyval', 'power', 'prod',
+ 'product', 'ptp', 'put', 'putmask', 'r_', 'randint', 'random_integers',
+ 'random_sample', 'ranf', 'rank', 'ravel', 'real', 'real_if_close',
+ 'recarray', 'reciprocal', 'reduce', 'remainder', 'repeat', 'require',
+ 'reshape', 'resize', 'restoredot', 'right_shift', 'rint', 'roll',
+ 'rollaxis', 'roots', 'rot90', 'round', 'round_', 'row_stack', 's_',
+ 'sample', 'savetxt', 'sctype2char', 'searchsorted', 'seed', 'select',
+ 'set_numeric_ops', 'set_printoptions', 'set_string_function',
+ 'setbufsize', 'setdiff1d', 'seterr', 'seterrcall', 'seterrobj',
+ 'setfield', 'setflags', 'setmember1d', 'setxor1d', 'shape',
+ 'show_config', 'shuffle', 'sign', 'signbit', 'sin', 'sinc', 'sinh',
+ 'size', 'slice', 'solve', 'sometrue', 'sort', 'sort_complex', 'source',
+ 'split', 'sqrt', 'square', 'squeeze', 'standard_normal', 'std',
+ 'subtract', 'sum', 'svd', 'swapaxes', 'take', 'tan', 'tanh', 'tensordot',
+ 'test', 'tile', 'tofile', 'tolist', 'tostring', 'trace', 'transpose',
+ 'trapz', 'tri', 'tril', 'trim_zeros', 'triu', 'true_divide', 'typeDict',
+ 'typename', 'uniform', 'union1d', 'unique', 'unique1d', 'unravel_index',
+ 'unwrap', 'vander', 'var', 'vdot', 'vectorize', 'view', 'vonmises',
+ 'vsplit', 'vstack', 'weibull', 'where', 'who', 'zeros', 'zeros_like'
+ }
+
+ def get_tokens_unprocessed(self, text):
+ for index, token, value in \
+ PythonLexer.get_tokens_unprocessed(self, text):
+ if token is Name and value in self.EXTRA_KEYWORDS:
+ yield index, Keyword.Pseudo, value
+ else:
+ yield index, token, value
+
+ def analyse_text(text):
+ ltext = text[:1000]
+ return (shebang_matches(text, r'pythonw?(3(\.\d)?)?') or
+ 'import ' in ltext) \
+ and ('import numpy' in ltext or 'from numpy import' in ltext)
diff --git a/third_party/python/pip/pip/_vendor/pygments/modeline.py b/third_party/python/pip/pip/_vendor/pygments/modeline.py
new file mode 100644
index 0000000000..43630835ca
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/modeline.py
@@ -0,0 +1,43 @@
+"""
+ pygments.modeline
+ ~~~~~~~~~~~~~~~~~
+
+ A simple modeline parser (based on pymodeline).
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+
+__all__ = ['get_filetype_from_buffer']
+
+
+modeline_re = re.compile(r'''
+ (?: vi | vim | ex ) (?: [<=>]? \d* )? :
+ .* (?: ft | filetype | syn | syntax ) = ( [^:\s]+ )
+''', re.VERBOSE)
+
+
+def get_filetype_from_line(l):
+ m = modeline_re.search(l)
+ if m:
+ return m.group(1)
+
+
+def get_filetype_from_buffer(buf, max_lines=5):
+ """
+ Scan the buffer for modelines and return filetype if one is found.
+ """
+ lines = buf.splitlines()
+ for l in lines[-1:-max_lines-1:-1]:
+ ret = get_filetype_from_line(l)
+ if ret:
+ return ret
+ for i in range(max_lines, -1, -1):
+ if i < len(lines):
+ ret = get_filetype_from_line(lines[i])
+ if ret:
+ return ret
+
+ return None
diff --git a/third_party/python/pip/pip/_vendor/pygments/plugin.py b/third_party/python/pip/pip/_vendor/pygments/plugin.py
new file mode 100644
index 0000000000..3590bee8d2
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/plugin.py
@@ -0,0 +1,88 @@
+"""
+ pygments.plugin
+ ~~~~~~~~~~~~~~~
+
+ Pygments plugin interface. By default, this tries to use
+ ``importlib.metadata``, which is in the Python standard
+ library since Python 3.8, or its ``importlib_metadata``
+ backport for earlier versions of Python. It falls back on
+ ``pkg_resources`` if not found. Finally, if ``pkg_resources``
+ is not found either, no plugins are loaded at all.
+
+ lexer plugins::
+
+ [pygments.lexers]
+ yourlexer = yourmodule:YourLexer
+
+ formatter plugins::
+
+ [pygments.formatters]
+ yourformatter = yourformatter:YourFormatter
+ /.ext = yourformatter:YourFormatter
+
+ As you can see, you can define extensions for the formatter
+ with a leading slash.
+
+ syntax plugins::
+
+ [pygments.styles]
+ yourstyle = yourstyle:YourStyle
+
+ filter plugin::
+
+ [pygments.filter]
+ yourfilter = yourfilter:YourFilter
+
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+LEXER_ENTRY_POINT = 'pygments.lexers'
+FORMATTER_ENTRY_POINT = 'pygments.formatters'
+STYLE_ENTRY_POINT = 'pygments.styles'
+FILTER_ENTRY_POINT = 'pygments.filters'
+
+
+def iter_entry_points(group_name):
+ try:
+ from importlib.metadata import entry_points
+ except ImportError:
+ try:
+ from importlib_metadata import entry_points
+ except ImportError:
+ try:
+ from pip._vendor.pkg_resources import iter_entry_points
+ except (ImportError, OSError):
+ return []
+ else:
+ return iter_entry_points(group_name)
+ groups = entry_points()
+ if hasattr(groups, 'select'):
+ # New interface in Python 3.10 and newer versions of the
+ # importlib_metadata backport.
+ return groups.select(group=group_name)
+ else:
+ # Older interface, deprecated in Python 3.10 and recent
+ # importlib_metadata, but we need it in Python 3.8 and 3.9.
+ return groups.get(group_name, [])
+
+
+def find_plugin_lexers():
+ for entrypoint in iter_entry_points(LEXER_ENTRY_POINT):
+ yield entrypoint.load()
+
+
+def find_plugin_formatters():
+ for entrypoint in iter_entry_points(FORMATTER_ENTRY_POINT):
+ yield entrypoint.name, entrypoint.load()
+
+
+def find_plugin_styles():
+ for entrypoint in iter_entry_points(STYLE_ENTRY_POINT):
+ yield entrypoint.name, entrypoint.load()
+
+
+def find_plugin_filters():
+ for entrypoint in iter_entry_points(FILTER_ENTRY_POINT):
+ yield entrypoint.name, entrypoint.load()
diff --git a/third_party/python/pip/pip/_vendor/pygments/regexopt.py b/third_party/python/pip/pip/_vendor/pygments/regexopt.py
new file mode 100644
index 0000000000..ae0079199b
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/regexopt.py
@@ -0,0 +1,91 @@
+"""
+ pygments.regexopt
+ ~~~~~~~~~~~~~~~~~
+
+ An algorithm that generates optimized regexes for matching long lists of
+ literal strings.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+from re import escape
+from os.path import commonprefix
+from itertools import groupby
+from operator import itemgetter
+
+CS_ESCAPE = re.compile(r'[\[\^\\\-\]]')
+FIRST_ELEMENT = itemgetter(0)
+
+
+def make_charset(letters):
+ return '[' + CS_ESCAPE.sub(lambda m: '\\' + m.group(), ''.join(letters)) + ']'
+
+
+def regex_opt_inner(strings, open_paren):
+ """Return a regex that matches any string in the sorted list of strings."""
+ close_paren = open_paren and ')' or ''
+ # print strings, repr(open_paren)
+ if not strings:
+ # print '-> nothing left'
+ return ''
+ first = strings[0]
+ if len(strings) == 1:
+ # print '-> only 1 string'
+ return open_paren + escape(first) + close_paren
+ if not first:
+ # print '-> first string empty'
+ return open_paren + regex_opt_inner(strings[1:], '(?:') \
+ + '?' + close_paren
+ if len(first) == 1:
+ # multiple one-char strings? make a charset
+ oneletter = []
+ rest = []
+ for s in strings:
+ if len(s) == 1:
+ oneletter.append(s)
+ else:
+ rest.append(s)
+ if len(oneletter) > 1: # do we have more than one oneletter string?
+ if rest:
+ # print '-> 1-character + rest'
+ return open_paren + regex_opt_inner(rest, '') + '|' \
+ + make_charset(oneletter) + close_paren
+ # print '-> only 1-character'
+ return open_paren + make_charset(oneletter) + close_paren
+ prefix = commonprefix(strings)
+ if prefix:
+ plen = len(prefix)
+ # we have a prefix for all strings
+ # print '-> prefix:', prefix
+ return open_paren + escape(prefix) \
+ + regex_opt_inner([s[plen:] for s in strings], '(?:') \
+ + close_paren
+ # is there a suffix?
+ strings_rev = [s[::-1] for s in strings]
+ suffix = commonprefix(strings_rev)
+ if suffix:
+ slen = len(suffix)
+ # print '-> suffix:', suffix[::-1]
+ return open_paren \
+ + regex_opt_inner(sorted(s[:-slen] for s in strings), '(?:') \
+ + escape(suffix[::-1]) + close_paren
+ # recurse on common 1-string prefixes
+ # print '-> last resort'
+ return open_paren + \
+ '|'.join(regex_opt_inner(list(group[1]), '')
+ for group in groupby(strings, lambda s: s[0] == first[0])) \
+ + close_paren
+
+
+def regex_opt(strings, prefix='', suffix=''):
+ """Return a compiled regex that matches any string in the given list.
+
+ The strings to match must be literal strings, not regexes. They will be
+ regex-escaped.
+
+ *prefix* and *suffix* are pre- and appended to the final regex.
+ """
+ strings = sorted(strings)
+ return prefix + regex_opt_inner(strings, '(') + suffix
diff --git a/third_party/python/pip/pip/_vendor/pygments/scanner.py b/third_party/python/pip/pip/_vendor/pygments/scanner.py
new file mode 100644
index 0000000000..d47ed4828a
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/scanner.py
@@ -0,0 +1,104 @@
+"""
+ pygments.scanner
+ ~~~~~~~~~~~~~~~~
+
+ This library implements a regex based scanner. Some languages
+ like Pascal are easy to parse but have some keywords that
+ depend on the context. Because of this it's impossible to lex
+ that just by using a regular expression lexer like the
+ `RegexLexer`.
+
+ Have a look at the `DelphiLexer` to get an idea of how to use
+ this scanner.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+import re
+
+
+class EndOfText(RuntimeError):
+ """
+ Raise if end of text is reached and the user
+ tried to call a match function.
+ """
+
+
+class Scanner:
+ """
+ Simple scanner
+
+ All method patterns are regular expression strings (not
+ compiled expressions!)
+ """
+
+ def __init__(self, text, flags=0):
+ """
+ :param text: The text which should be scanned
+ :param flags: default regular expression flags
+ """
+ self.data = text
+ self.data_length = len(text)
+ self.start_pos = 0
+ self.pos = 0
+ self.flags = flags
+ self.last = None
+ self.match = None
+ self._re_cache = {}
+
+ def eos(self):
+ """`True` if the scanner reached the end of text."""
+ return self.pos >= self.data_length
+ eos = property(eos, eos.__doc__)
+
+ def check(self, pattern):
+ """
+ Apply `pattern` on the current position and return
+ the match object. (Doesn't touch pos). Use this for
+ lookahead.
+ """
+ if self.eos:
+ raise EndOfText()
+ if pattern not in self._re_cache:
+ self._re_cache[pattern] = re.compile(pattern, self.flags)
+ return self._re_cache[pattern].match(self.data, self.pos)
+
+ def test(self, pattern):
+ """Apply a pattern on the current position and check
+ if it patches. Doesn't touch pos.
+ """
+ return self.check(pattern) is not None
+
+ def scan(self, pattern):
+ """
+ Scan the text for the given pattern and update pos/match
+ and related fields. The return value is a boolean that
+ indicates if the pattern matched. The matched value is
+ stored on the instance as ``match``, the last value is
+ stored as ``last``. ``start_pos`` is the position of the
+ pointer before the pattern was matched, ``pos`` is the
+ end position.
+ """
+ if self.eos:
+ raise EndOfText()
+ if pattern not in self._re_cache:
+ self._re_cache[pattern] = re.compile(pattern, self.flags)
+ self.last = self.match
+ m = self._re_cache[pattern].match(self.data, self.pos)
+ if m is None:
+ return False
+ self.start_pos = m.start()
+ self.pos = m.end()
+ self.match = m.group()
+ return True
+
+ def get_char(self):
+ """Scan exactly one char."""
+ self.scan('.')
+
+ def __repr__(self):
+ return '<%s %d/%d>' % (
+ self.__class__.__name__,
+ self.pos,
+ self.data_length
+ )
diff --git a/third_party/python/pip/pip/_vendor/pygments/sphinxext.py b/third_party/python/pip/pip/_vendor/pygments/sphinxext.py
new file mode 100644
index 0000000000..c41bd49dd4
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/sphinxext.py
@@ -0,0 +1,155 @@
+"""
+ pygments.sphinxext
+ ~~~~~~~~~~~~~~~~~~
+
+ Sphinx extension to generate automatic documentation of lexers,
+ formatters and filters.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import sys
+
+from docutils import nodes
+from docutils.statemachine import ViewList
+from docutils.parsers.rst import Directive
+from sphinx.util.nodes import nested_parse_with_titles
+
+
+MODULEDOC = '''
+.. module:: %s
+
+%s
+%s
+'''
+
+LEXERDOC = '''
+.. class:: %s
+
+ :Short names: %s
+ :Filenames: %s
+ :MIME types: %s
+
+ %s
+
+'''
+
+FMTERDOC = '''
+.. class:: %s
+
+ :Short names: %s
+ :Filenames: %s
+
+ %s
+
+'''
+
+FILTERDOC = '''
+.. class:: %s
+
+ :Name: %s
+
+ %s
+
+'''
+
+
+class PygmentsDoc(Directive):
+ """
+ A directive to collect all lexers/formatters/filters and generate
+ autoclass directives for them.
+ """
+ has_content = False
+ required_arguments = 1
+ optional_arguments = 0
+ final_argument_whitespace = False
+ option_spec = {}
+
+ def run(self):
+ self.filenames = set()
+ if self.arguments[0] == 'lexers':
+ out = self.document_lexers()
+ elif self.arguments[0] == 'formatters':
+ out = self.document_formatters()
+ elif self.arguments[0] == 'filters':
+ out = self.document_filters()
+ else:
+ raise Exception('invalid argument for "pygmentsdoc" directive')
+ node = nodes.compound()
+ vl = ViewList(out.split('\n'), source='')
+ nested_parse_with_titles(self.state, vl, node)
+ for fn in self.filenames:
+ self.state.document.settings.record_dependencies.add(fn)
+ return node.children
+
+ def document_lexers(self):
+ from pip._vendor.pygments.lexers._mapping import LEXERS
+ out = []
+ modules = {}
+ moduledocstrings = {}
+ for classname, data in sorted(LEXERS.items(), key=lambda x: x[0]):
+ module = data[0]
+ mod = __import__(module, None, None, [classname])
+ self.filenames.add(mod.__file__)
+ cls = getattr(mod, classname)
+ if not cls.__doc__:
+ print("Warning: %s does not have a docstring." % classname)
+ docstring = cls.__doc__
+ if isinstance(docstring, bytes):
+ docstring = docstring.decode('utf8')
+ modules.setdefault(module, []).append((
+ classname,
+ ', '.join(data[2]) or 'None',
+ ', '.join(data[3]).replace('*', '\\*').replace('_', '\\') or 'None',
+ ', '.join(data[4]) or 'None',
+ docstring))
+ if module not in moduledocstrings:
+ moddoc = mod.__doc__
+ if isinstance(moddoc, bytes):
+ moddoc = moddoc.decode('utf8')
+ moduledocstrings[module] = moddoc
+
+ for module, lexers in sorted(modules.items(), key=lambda x: x[0]):
+ if moduledocstrings[module] is None:
+ raise Exception("Missing docstring for %s" % (module,))
+ heading = moduledocstrings[module].splitlines()[4].strip().rstrip('.')
+ out.append(MODULEDOC % (module, heading, '-'*len(heading)))
+ for data in lexers:
+ out.append(LEXERDOC % data)
+
+ return ''.join(out)
+
+ def document_formatters(self):
+ from pip._vendor.pygments.formatters import FORMATTERS
+
+ out = []
+ for classname, data in sorted(FORMATTERS.items(), key=lambda x: x[0]):
+ module = data[0]
+ mod = __import__(module, None, None, [classname])
+ self.filenames.add(mod.__file__)
+ cls = getattr(mod, classname)
+ docstring = cls.__doc__
+ if isinstance(docstring, bytes):
+ docstring = docstring.decode('utf8')
+ heading = cls.__name__
+ out.append(FMTERDOC % (heading, ', '.join(data[2]) or 'None',
+ ', '.join(data[3]).replace('*', '\\*') or 'None',
+ docstring))
+ return ''.join(out)
+
+ def document_filters(self):
+ from pip._vendor.pygments.filters import FILTERS
+
+ out = []
+ for name, cls in FILTERS.items():
+ self.filenames.add(sys.modules[cls.__module__].__file__)
+ docstring = cls.__doc__
+ if isinstance(docstring, bytes):
+ docstring = docstring.decode('utf8')
+ out.append(FILTERDOC % (cls.__name__, name, docstring))
+ return ''.join(out)
+
+
+def setup(app):
+ app.add_directive('pygmentsdoc', PygmentsDoc)
diff --git a/third_party/python/pip/pip/_vendor/pygments/style.py b/third_party/python/pip/pip/_vendor/pygments/style.py
new file mode 100644
index 0000000000..84abbc2059
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/style.py
@@ -0,0 +1,197 @@
+"""
+ pygments.style
+ ~~~~~~~~~~~~~~
+
+ Basic style object.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pip._vendor.pygments.token import Token, STANDARD_TYPES
+
+# Default mapping of ansixxx to RGB colors.
+_ansimap = {
+ # dark
+ 'ansiblack': '000000',
+ 'ansired': '7f0000',
+ 'ansigreen': '007f00',
+ 'ansiyellow': '7f7fe0',
+ 'ansiblue': '00007f',
+ 'ansimagenta': '7f007f',
+ 'ansicyan': '007f7f',
+ 'ansigray': 'e5e5e5',
+ # normal
+ 'ansibrightblack': '555555',
+ 'ansibrightred': 'ff0000',
+ 'ansibrightgreen': '00ff00',
+ 'ansibrightyellow': 'ffff00',
+ 'ansibrightblue': '0000ff',
+ 'ansibrightmagenta': 'ff00ff',
+ 'ansibrightcyan': '00ffff',
+ 'ansiwhite': 'ffffff',
+}
+# mapping of deprecated #ansixxx colors to new color names
+_deprecated_ansicolors = {
+ # dark
+ '#ansiblack': 'ansiblack',
+ '#ansidarkred': 'ansired',
+ '#ansidarkgreen': 'ansigreen',
+ '#ansibrown': 'ansiyellow',
+ '#ansidarkblue': 'ansiblue',
+ '#ansipurple': 'ansimagenta',
+ '#ansiteal': 'ansicyan',
+ '#ansilightgray': 'ansigray',
+ # normal
+ '#ansidarkgray': 'ansibrightblack',
+ '#ansired': 'ansibrightred',
+ '#ansigreen': 'ansibrightgreen',
+ '#ansiyellow': 'ansibrightyellow',
+ '#ansiblue': 'ansibrightblue',
+ '#ansifuchsia': 'ansibrightmagenta',
+ '#ansiturquoise': 'ansibrightcyan',
+ '#ansiwhite': 'ansiwhite',
+}
+ansicolors = set(_ansimap)
+
+
+class StyleMeta(type):
+
+ def __new__(mcs, name, bases, dct):
+ obj = type.__new__(mcs, name, bases, dct)
+ for token in STANDARD_TYPES:
+ if token not in obj.styles:
+ obj.styles[token] = ''
+
+ def colorformat(text):
+ if text in ansicolors:
+ return text
+ if text[0:1] == '#':
+ col = text[1:]
+ if len(col) == 6:
+ return col
+ elif len(col) == 3:
+ return col[0] * 2 + col[1] * 2 + col[2] * 2
+ elif text == '':
+ return ''
+ elif text.startswith('var') or text.startswith('calc'):
+ return text
+ assert False, "wrong color format %r" % text
+
+ _styles = obj._styles = {}
+
+ for ttype in obj.styles:
+ for token in ttype.split():
+ if token in _styles:
+ continue
+ ndef = _styles.get(token.parent, None)
+ styledefs = obj.styles.get(token, '').split()
+ if not ndef or token is None:
+ ndef = ['', 0, 0, 0, '', '', 0, 0, 0]
+ elif 'noinherit' in styledefs and token is not Token:
+ ndef = _styles[Token][:]
+ else:
+ ndef = ndef[:]
+ _styles[token] = ndef
+ for styledef in obj.styles.get(token, '').split():
+ if styledef == 'noinherit':
+ pass
+ elif styledef == 'bold':
+ ndef[1] = 1
+ elif styledef == 'nobold':
+ ndef[1] = 0
+ elif styledef == 'italic':
+ ndef[2] = 1
+ elif styledef == 'noitalic':
+ ndef[2] = 0
+ elif styledef == 'underline':
+ ndef[3] = 1
+ elif styledef == 'nounderline':
+ ndef[3] = 0
+ elif styledef[:3] == 'bg:':
+ ndef[4] = colorformat(styledef[3:])
+ elif styledef[:7] == 'border:':
+ ndef[5] = colorformat(styledef[7:])
+ elif styledef == 'roman':
+ ndef[6] = 1
+ elif styledef == 'sans':
+ ndef[7] = 1
+ elif styledef == 'mono':
+ ndef[8] = 1
+ else:
+ ndef[0] = colorformat(styledef)
+
+ return obj
+
+ def style_for_token(cls, token):
+ t = cls._styles[token]
+ ansicolor = bgansicolor = None
+ color = t[0]
+ if color in _deprecated_ansicolors:
+ color = _deprecated_ansicolors[color]
+ if color in ansicolors:
+ ansicolor = color
+ color = _ansimap[color]
+ bgcolor = t[4]
+ if bgcolor in _deprecated_ansicolors:
+ bgcolor = _deprecated_ansicolors[bgcolor]
+ if bgcolor in ansicolors:
+ bgansicolor = bgcolor
+ bgcolor = _ansimap[bgcolor]
+
+ return {
+ 'color': color or None,
+ 'bold': bool(t[1]),
+ 'italic': bool(t[2]),
+ 'underline': bool(t[3]),
+ 'bgcolor': bgcolor or None,
+ 'border': t[5] or None,
+ 'roman': bool(t[6]) or None,
+ 'sans': bool(t[7]) or None,
+ 'mono': bool(t[8]) or None,
+ 'ansicolor': ansicolor,
+ 'bgansicolor': bgansicolor,
+ }
+
+ def list_styles(cls):
+ return list(cls)
+
+ def styles_token(cls, ttype):
+ return ttype in cls._styles
+
+ def __iter__(cls):
+ for token in cls._styles:
+ yield token, cls.style_for_token(token)
+
+ def __len__(cls):
+ return len(cls._styles)
+
+
+class Style(metaclass=StyleMeta):
+
+ #: overall background color (``None`` means transparent)
+ background_color = '#ffffff'
+
+ #: highlight background color
+ highlight_color = '#ffffcc'
+
+ #: line number font color
+ line_number_color = 'inherit'
+
+ #: line number background color
+ line_number_background_color = 'transparent'
+
+ #: special line number font color
+ line_number_special_color = '#000000'
+
+ #: special line number background color
+ line_number_special_background_color = '#ffffc0'
+
+ #: Style definitions for individual token types.
+ styles = {}
+
+ # Attribute for lexers defined within Pygments. If set
+ # to True, the style is not shown in the style gallery
+ # on the website. This is intended for language-specific
+ # styles.
+ web_style_gallery_exclude = False
diff --git a/third_party/python/pip/pip/_vendor/pygments/styles/__init__.py b/third_party/python/pip/pip/_vendor/pygments/styles/__init__.py
new file mode 100644
index 0000000000..44cc0efb08
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/styles/__init__.py
@@ -0,0 +1,97 @@
+"""
+ pygments.styles
+ ~~~~~~~~~~~~~~~
+
+ Contains built-in styles.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+from pip._vendor.pygments.plugin import find_plugin_styles
+from pip._vendor.pygments.util import ClassNotFound
+
+
+#: Maps style names to 'submodule::classname'.
+STYLE_MAP = {
+ 'default': 'default::DefaultStyle',
+ 'emacs': 'emacs::EmacsStyle',
+ 'friendly': 'friendly::FriendlyStyle',
+ 'friendly_grayscale': 'friendly_grayscale::FriendlyGrayscaleStyle',
+ 'colorful': 'colorful::ColorfulStyle',
+ 'autumn': 'autumn::AutumnStyle',
+ 'murphy': 'murphy::MurphyStyle',
+ 'manni': 'manni::ManniStyle',
+ 'material': 'material::MaterialStyle',
+ 'monokai': 'monokai::MonokaiStyle',
+ 'perldoc': 'perldoc::PerldocStyle',
+ 'pastie': 'pastie::PastieStyle',
+ 'borland': 'borland::BorlandStyle',
+ 'trac': 'trac::TracStyle',
+ 'native': 'native::NativeStyle',
+ 'fruity': 'fruity::FruityStyle',
+ 'bw': 'bw::BlackWhiteStyle',
+ 'vim': 'vim::VimStyle',
+ 'vs': 'vs::VisualStudioStyle',
+ 'tango': 'tango::TangoStyle',
+ 'rrt': 'rrt::RrtStyle',
+ 'xcode': 'xcode::XcodeStyle',
+ 'igor': 'igor::IgorStyle',
+ 'paraiso-light': 'paraiso_light::ParaisoLightStyle',
+ 'paraiso-dark': 'paraiso_dark::ParaisoDarkStyle',
+ 'lovelace': 'lovelace::LovelaceStyle',
+ 'algol': 'algol::AlgolStyle',
+ 'algol_nu': 'algol_nu::Algol_NuStyle',
+ 'arduino': 'arduino::ArduinoStyle',
+ 'rainbow_dash': 'rainbow_dash::RainbowDashStyle',
+ 'abap': 'abap::AbapStyle',
+ 'solarized-dark': 'solarized::SolarizedDarkStyle',
+ 'solarized-light': 'solarized::SolarizedLightStyle',
+ 'sas': 'sas::SasStyle',
+ 'staroffice' : 'staroffice::StarofficeStyle',
+ 'stata': 'stata_light::StataLightStyle',
+ 'stata-light': 'stata_light::StataLightStyle',
+ 'stata-dark': 'stata_dark::StataDarkStyle',
+ 'inkpot': 'inkpot::InkPotStyle',
+ 'zenburn': 'zenburn::ZenburnStyle',
+ 'gruvbox-dark': 'gruvbox::GruvboxDarkStyle',
+ 'gruvbox-light': 'gruvbox::GruvboxLightStyle',
+ 'dracula': 'dracula::DraculaStyle',
+ 'one-dark': 'onedark::OneDarkStyle',
+ 'lilypond' : 'lilypond::LilyPondStyle',
+ 'nord': 'nord::NordStyle',
+ 'nord-darker': 'nord::NordDarkerStyle',
+ 'github-dark': 'gh_dark::GhDarkStyle'
+}
+
+
+def get_style_by_name(name):
+ if name in STYLE_MAP:
+ mod, cls = STYLE_MAP[name].split('::')
+ builtin = "yes"
+ else:
+ for found_name, style in find_plugin_styles():
+ if name == found_name:
+ return style
+ # perhaps it got dropped into our styles package
+ builtin = ""
+ mod = name
+ cls = name.title() + "Style"
+
+ try:
+ mod = __import__('pygments.styles.' + mod, None, None, [cls])
+ except ImportError:
+ raise ClassNotFound("Could not find style module %r" % mod +
+ (builtin and ", though it should be builtin") + ".")
+ try:
+ return getattr(mod, cls)
+ except AttributeError:
+ raise ClassNotFound("Could not find style class %r in style module." % cls)
+
+
+def get_all_styles():
+ """Return a generator for all styles by name,
+ both builtin and plugin."""
+ yield from STYLE_MAP
+ for name, _ in find_plugin_styles():
+ yield name
diff --git a/third_party/python/pip/pip/_vendor/pygments/token.py b/third_party/python/pip/pip/_vendor/pygments/token.py
new file mode 100644
index 0000000000..e3e565ad59
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/token.py
@@ -0,0 +1,213 @@
+"""
+ pygments.token
+ ~~~~~~~~~~~~~~
+
+ Basic token types and the standard tokens.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+
+class _TokenType(tuple):
+ parent = None
+
+ def split(self):
+ buf = []
+ node = self
+ while node is not None:
+ buf.append(node)
+ node = node.parent
+ buf.reverse()
+ return buf
+
+ def __init__(self, *args):
+ # no need to call super.__init__
+ self.subtypes = set()
+
+ def __contains__(self, val):
+ return self is val or (
+ type(val) is self.__class__ and
+ val[:len(self)] == self
+ )
+
+ def __getattr__(self, val):
+ if not val or not val[0].isupper():
+ return tuple.__getattribute__(self, val)
+ new = _TokenType(self + (val,))
+ setattr(self, val, new)
+ self.subtypes.add(new)
+ new.parent = self
+ return new
+
+ def __repr__(self):
+ return 'Token' + (self and '.' or '') + '.'.join(self)
+
+ def __copy__(self):
+ # These instances are supposed to be singletons
+ return self
+
+ def __deepcopy__(self, memo):
+ # These instances are supposed to be singletons
+ return self
+
+
+Token = _TokenType()
+
+# Special token types
+Text = Token.Text
+Whitespace = Text.Whitespace
+Escape = Token.Escape
+Error = Token.Error
+# Text that doesn't belong to this lexer (e.g. HTML in PHP)
+Other = Token.Other
+
+# Common token types for source code
+Keyword = Token.Keyword
+Name = Token.Name
+Literal = Token.Literal
+String = Literal.String
+Number = Literal.Number
+Punctuation = Token.Punctuation
+Operator = Token.Operator
+Comment = Token.Comment
+
+# Generic types for non-source code
+Generic = Token.Generic
+
+# String and some others are not direct children of Token.
+# alias them:
+Token.Token = Token
+Token.String = String
+Token.Number = Number
+
+
+def is_token_subtype(ttype, other):
+ """
+ Return True if ``ttype`` is a subtype of ``other``.
+
+ exists for backwards compatibility. use ``ttype in other`` now.
+ """
+ return ttype in other
+
+
+def string_to_tokentype(s):
+ """
+ Convert a string into a token type::
+
+ >>> string_to_token('String.Double')
+ Token.Literal.String.Double
+ >>> string_to_token('Token.Literal.Number')
+ Token.Literal.Number
+ >>> string_to_token('')
+ Token
+
+ Tokens that are already tokens are returned unchanged:
+
+ >>> string_to_token(String)
+ Token.Literal.String
+ """
+ if isinstance(s, _TokenType):
+ return s
+ if not s:
+ return Token
+ node = Token
+ for item in s.split('.'):
+ node = getattr(node, item)
+ return node
+
+
+# Map standard token types to short names, used in CSS class naming.
+# If you add a new item, please be sure to run this file to perform
+# a consistency check for duplicate values.
+STANDARD_TYPES = {
+ Token: '',
+
+ Text: '',
+ Whitespace: 'w',
+ Escape: 'esc',
+ Error: 'err',
+ Other: 'x',
+
+ Keyword: 'k',
+ Keyword.Constant: 'kc',
+ Keyword.Declaration: 'kd',
+ Keyword.Namespace: 'kn',
+ Keyword.Pseudo: 'kp',
+ Keyword.Reserved: 'kr',
+ Keyword.Type: 'kt',
+
+ Name: 'n',
+ Name.Attribute: 'na',
+ Name.Builtin: 'nb',
+ Name.Builtin.Pseudo: 'bp',
+ Name.Class: 'nc',
+ Name.Constant: 'no',
+ Name.Decorator: 'nd',
+ Name.Entity: 'ni',
+ Name.Exception: 'ne',
+ Name.Function: 'nf',
+ Name.Function.Magic: 'fm',
+ Name.Property: 'py',
+ Name.Label: 'nl',
+ Name.Namespace: 'nn',
+ Name.Other: 'nx',
+ Name.Tag: 'nt',
+ Name.Variable: 'nv',
+ Name.Variable.Class: 'vc',
+ Name.Variable.Global: 'vg',
+ Name.Variable.Instance: 'vi',
+ Name.Variable.Magic: 'vm',
+
+ Literal: 'l',
+ Literal.Date: 'ld',
+
+ String: 's',
+ String.Affix: 'sa',
+ String.Backtick: 'sb',
+ String.Char: 'sc',
+ String.Delimiter: 'dl',
+ String.Doc: 'sd',
+ String.Double: 's2',
+ String.Escape: 'se',
+ String.Heredoc: 'sh',
+ String.Interpol: 'si',
+ String.Other: 'sx',
+ String.Regex: 'sr',
+ String.Single: 's1',
+ String.Symbol: 'ss',
+
+ Number: 'm',
+ Number.Bin: 'mb',
+ Number.Float: 'mf',
+ Number.Hex: 'mh',
+ Number.Integer: 'mi',
+ Number.Integer.Long: 'il',
+ Number.Oct: 'mo',
+
+ Operator: 'o',
+ Operator.Word: 'ow',
+
+ Punctuation: 'p',
+ Punctuation.Marker: 'pm',
+
+ Comment: 'c',
+ Comment.Hashbang: 'ch',
+ Comment.Multiline: 'cm',
+ Comment.Preproc: 'cp',
+ Comment.PreprocFile: 'cpf',
+ Comment.Single: 'c1',
+ Comment.Special: 'cs',
+
+ Generic: 'g',
+ Generic.Deleted: 'gd',
+ Generic.Emph: 'ge',
+ Generic.Error: 'gr',
+ Generic.Heading: 'gh',
+ Generic.Inserted: 'gi',
+ Generic.Output: 'go',
+ Generic.Prompt: 'gp',
+ Generic.Strong: 'gs',
+ Generic.Subheading: 'gu',
+ Generic.Traceback: 'gt',
+}
diff --git a/third_party/python/pip/pip/_vendor/pygments/unistring.py b/third_party/python/pip/pip/_vendor/pygments/unistring.py
new file mode 100644
index 0000000000..2e3c80869d
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/unistring.py
@@ -0,0 +1,153 @@
+"""
+ pygments.unistring
+ ~~~~~~~~~~~~~~~~~~
+
+ Strings of all Unicode characters of a certain category.
+ Used for matching in Unicode-aware languages. Run to regenerate.
+
+ Inspired by chartypes_create.py from the MoinMoin project.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+Cc = '\x00-\x1f\x7f-\x9f'
+
+Cf = '\xad\u0600-\u0605\u061c\u06dd\u070f\u08e2\u180e\u200b-\u200f\u202a-\u202e\u2060-\u2064\u2066-\u206f\ufeff\ufff9-\ufffb\U000110bd\U000110cd\U0001bca0-\U0001bca3\U0001d173-\U0001d17a\U000e0001\U000e0020-\U000e007f'
+
+Cn = '\u0378-\u0379\u0380-\u0383\u038b\u038d\u03a2\u0530\u0557-\u0558\u058b-\u058c\u0590\u05c8-\u05cf\u05eb-\u05ee\u05f5-\u05ff\u061d\u070e\u074b-\u074c\u07b2-\u07bf\u07fb-\u07fc\u082e-\u082f\u083f\u085c-\u085d\u085f\u086b-\u089f\u08b5\u08be-\u08d2\u0984\u098d-\u098e\u0991-\u0992\u09a9\u09b1\u09b3-\u09b5\u09ba-\u09bb\u09c5-\u09c6\u09c9-\u09ca\u09cf-\u09d6\u09d8-\u09db\u09de\u09e4-\u09e5\u09ff-\u0a00\u0a04\u0a0b-\u0a0e\u0a11-\u0a12\u0a29\u0a31\u0a34\u0a37\u0a3a-\u0a3b\u0a3d\u0a43-\u0a46\u0a49-\u0a4a\u0a4e-\u0a50\u0a52-\u0a58\u0a5d\u0a5f-\u0a65\u0a77-\u0a80\u0a84\u0a8e\u0a92\u0aa9\u0ab1\u0ab4\u0aba-\u0abb\u0ac6\u0aca\u0ace-\u0acf\u0ad1-\u0adf\u0ae4-\u0ae5\u0af2-\u0af8\u0b00\u0b04\u0b0d-\u0b0e\u0b11-\u0b12\u0b29\u0b31\u0b34\u0b3a-\u0b3b\u0b45-\u0b46\u0b49-\u0b4a\u0b4e-\u0b55\u0b58-\u0b5b\u0b5e\u0b64-\u0b65\u0b78-\u0b81\u0b84\u0b8b-\u0b8d\u0b91\u0b96-\u0b98\u0b9b\u0b9d\u0ba0-\u0ba2\u0ba5-\u0ba7\u0bab-\u0bad\u0bba-\u0bbd\u0bc3-\u0bc5\u0bc9\u0bce-\u0bcf\u0bd1-\u0bd6\u0bd8-\u0be5\u0bfb-\u0bff\u0c0d\u0c11\u0c29\u0c3a-\u0c3c\u0c45\u0c49\u0c4e-\u0c54\u0c57\u0c5b-\u0c5f\u0c64-\u0c65\u0c70-\u0c77\u0c8d\u0c91\u0ca9\u0cb4\u0cba-\u0cbb\u0cc5\u0cc9\u0cce-\u0cd4\u0cd7-\u0cdd\u0cdf\u0ce4-\u0ce5\u0cf0\u0cf3-\u0cff\u0d04\u0d0d\u0d11\u0d45\u0d49\u0d50-\u0d53\u0d64-\u0d65\u0d80-\u0d81\u0d84\u0d97-\u0d99\u0db2\u0dbc\u0dbe-\u0dbf\u0dc7-\u0dc9\u0dcb-\u0dce\u0dd5\u0dd7\u0de0-\u0de5\u0df0-\u0df1\u0df5-\u0e00\u0e3b-\u0e3e\u0e5c-\u0e80\u0e83\u0e85-\u0e86\u0e89\u0e8b-\u0e8c\u0e8e-\u0e93\u0e98\u0ea0\u0ea4\u0ea6\u0ea8-\u0ea9\u0eac\u0eba\u0ebe-\u0ebf\u0ec5\u0ec7\u0ece-\u0ecf\u0eda-\u0edb\u0ee0-\u0eff\u0f48\u0f6d-\u0f70\u0f98\u0fbd\u0fcd\u0fdb-\u0fff\u10c6\u10c8-\u10cc\u10ce-\u10cf\u1249\u124e-\u124f\u1257\u1259\u125e-\u125f\u1289\u128e-\u128f\u12b1\u12b6-\u12b7\u12bf\u12c1\u12c6-\u12c7\u12d7\u1311\u1316-\u1317\u135b-\u135c\u137d-\u137f\u139a-\u139f\u13f6-\u13f7\u13fe-\u13ff\u169d-\u169f\u16f9-\u16ff\u170d\u1715-\u171f\u1737-\u173f\u1754-\u175f\u176d\u1771\u1774-\u177f\u17de-\u17df\u17ea-\u17ef\u17fa-\u17ff\u180f\u181a-\u181f\u1879-\u187f\u18ab-\u18af\u18f6-\u18ff\u191f\u192c-\u192f\u193c-\u193f\u1941-\u1943\u196e-\u196f\u1975-\u197f\u19ac-\u19af\u19ca-\u19cf\u19db-\u19dd\u1a1c-\u1a1d\u1a5f\u1a7d-\u1a7e\u1a8a-\u1a8f\u1a9a-\u1a9f\u1aae-\u1aaf\u1abf-\u1aff\u1b4c-\u1b4f\u1b7d-\u1b7f\u1bf4-\u1bfb\u1c38-\u1c3a\u1c4a-\u1c4c\u1c89-\u1c8f\u1cbb-\u1cbc\u1cc8-\u1ccf\u1cfa-\u1cff\u1dfa\u1f16-\u1f17\u1f1e-\u1f1f\u1f46-\u1f47\u1f4e-\u1f4f\u1f58\u1f5a\u1f5c\u1f5e\u1f7e-\u1f7f\u1fb5\u1fc5\u1fd4-\u1fd5\u1fdc\u1ff0-\u1ff1\u1ff5\u1fff\u2065\u2072-\u2073\u208f\u209d-\u209f\u20c0-\u20cf\u20f1-\u20ff\u218c-\u218f\u2427-\u243f\u244b-\u245f\u2b74-\u2b75\u2b96-\u2b97\u2bc9\u2bff\u2c2f\u2c5f\u2cf4-\u2cf8\u2d26\u2d28-\u2d2c\u2d2e-\u2d2f\u2d68-\u2d6e\u2d71-\u2d7e\u2d97-\u2d9f\u2da7\u2daf\u2db7\u2dbf\u2dc7\u2dcf\u2dd7\u2ddf\u2e4f-\u2e7f\u2e9a\u2ef4-\u2eff\u2fd6-\u2fef\u2ffc-\u2fff\u3040\u3097-\u3098\u3100-\u3104\u3130\u318f\u31bb-\u31bf\u31e4-\u31ef\u321f\u32ff\u4db6-\u4dbf\u9ff0-\u9fff\ua48d-\ua48f\ua4c7-\ua4cf\ua62c-\ua63f\ua6f8-\ua6ff\ua7ba-\ua7f6\ua82c-\ua82f\ua83a-\ua83f\ua878-\ua87f\ua8c6-\ua8cd\ua8da-\ua8df\ua954-\ua95e\ua97d-\ua97f\ua9ce\ua9da-\ua9dd\ua9ff\uaa37-\uaa3f\uaa4e-\uaa4f\uaa5a-\uaa5b\uaac3-\uaada\uaaf7-\uab00\uab07-\uab08\uab0f-\uab10\uab17-\uab1f\uab27\uab2f\uab66-\uab6f\uabee-\uabef\uabfa-\uabff\ud7a4-\ud7af\ud7c7-\ud7ca\ud7fc-\ud7ff\ufa6e-\ufa6f\ufada-\ufaff\ufb07-\ufb12\ufb18-\ufb1c\ufb37\ufb3d\ufb3f\ufb42\ufb45\ufbc2-\ufbd2\ufd40-\ufd4f\ufd90-\ufd91\ufdc8-\ufdef\ufdfe-\ufdff\ufe1a-\ufe1f\ufe53\ufe67\ufe6c-\ufe6f\ufe75\ufefd-\ufefe\uff00\uffbf-\uffc1\uffc8-\uffc9\uffd0-\uffd1\uffd8-\uffd9\uffdd-\uffdf\uffe7\uffef-\ufff8\ufffe-\uffff\U0001000c\U00010027\U0001003b\U0001003e\U0001004e-\U0001004f\U0001005e-\U0001007f\U000100fb-\U000100ff\U00010103-\U00010106\U00010134-\U00010136\U0001018f\U0001019c-\U0001019f\U000101a1-\U000101cf\U000101fe-\U0001027f\U0001029d-\U0001029f\U000102d1-\U000102df\U000102fc-\U000102ff\U00010324-\U0001032c\U0001034b-\U0001034f\U0001037b-\U0001037f\U0001039e\U000103c4-\U000103c7\U000103d6-\U000103ff\U0001049e-\U0001049f\U000104aa-\U000104af\U000104d4-\U000104d7\U000104fc-\U000104ff\U00010528-\U0001052f\U00010564-\U0001056e\U00010570-\U000105ff\U00010737-\U0001073f\U00010756-\U0001075f\U00010768-\U000107ff\U00010806-\U00010807\U00010809\U00010836\U00010839-\U0001083b\U0001083d-\U0001083e\U00010856\U0001089f-\U000108a6\U000108b0-\U000108df\U000108f3\U000108f6-\U000108fa\U0001091c-\U0001091e\U0001093a-\U0001093e\U00010940-\U0001097f\U000109b8-\U000109bb\U000109d0-\U000109d1\U00010a04\U00010a07-\U00010a0b\U00010a14\U00010a18\U00010a36-\U00010a37\U00010a3b-\U00010a3e\U00010a49-\U00010a4f\U00010a59-\U00010a5f\U00010aa0-\U00010abf\U00010ae7-\U00010aea\U00010af7-\U00010aff\U00010b36-\U00010b38\U00010b56-\U00010b57\U00010b73-\U00010b77\U00010b92-\U00010b98\U00010b9d-\U00010ba8\U00010bb0-\U00010bff\U00010c49-\U00010c7f\U00010cb3-\U00010cbf\U00010cf3-\U00010cf9\U00010d28-\U00010d2f\U00010d3a-\U00010e5f\U00010e7f-\U00010eff\U00010f28-\U00010f2f\U00010f5a-\U00010fff\U0001104e-\U00011051\U00011070-\U0001107e\U000110c2-\U000110cc\U000110ce-\U000110cf\U000110e9-\U000110ef\U000110fa-\U000110ff\U00011135\U00011147-\U0001114f\U00011177-\U0001117f\U000111ce-\U000111cf\U000111e0\U000111f5-\U000111ff\U00011212\U0001123f-\U0001127f\U00011287\U00011289\U0001128e\U0001129e\U000112aa-\U000112af\U000112eb-\U000112ef\U000112fa-\U000112ff\U00011304\U0001130d-\U0001130e\U00011311-\U00011312\U00011329\U00011331\U00011334\U0001133a\U00011345-\U00011346\U00011349-\U0001134a\U0001134e-\U0001134f\U00011351-\U00011356\U00011358-\U0001135c\U00011364-\U00011365\U0001136d-\U0001136f\U00011375-\U000113ff\U0001145a\U0001145c\U0001145f-\U0001147f\U000114c8-\U000114cf\U000114da-\U0001157f\U000115b6-\U000115b7\U000115de-\U000115ff\U00011645-\U0001164f\U0001165a-\U0001165f\U0001166d-\U0001167f\U000116b8-\U000116bf\U000116ca-\U000116ff\U0001171b-\U0001171c\U0001172c-\U0001172f\U00011740-\U000117ff\U0001183c-\U0001189f\U000118f3-\U000118fe\U00011900-\U000119ff\U00011a48-\U00011a4f\U00011a84-\U00011a85\U00011aa3-\U00011abf\U00011af9-\U00011bff\U00011c09\U00011c37\U00011c46-\U00011c4f\U00011c6d-\U00011c6f\U00011c90-\U00011c91\U00011ca8\U00011cb7-\U00011cff\U00011d07\U00011d0a\U00011d37-\U00011d39\U00011d3b\U00011d3e\U00011d48-\U00011d4f\U00011d5a-\U00011d5f\U00011d66\U00011d69\U00011d8f\U00011d92\U00011d99-\U00011d9f\U00011daa-\U00011edf\U00011ef9-\U00011fff\U0001239a-\U000123ff\U0001246f\U00012475-\U0001247f\U00012544-\U00012fff\U0001342f-\U000143ff\U00014647-\U000167ff\U00016a39-\U00016a3f\U00016a5f\U00016a6a-\U00016a6d\U00016a70-\U00016acf\U00016aee-\U00016aef\U00016af6-\U00016aff\U00016b46-\U00016b4f\U00016b5a\U00016b62\U00016b78-\U00016b7c\U00016b90-\U00016e3f\U00016e9b-\U00016eff\U00016f45-\U00016f4f\U00016f7f-\U00016f8e\U00016fa0-\U00016fdf\U00016fe2-\U00016fff\U000187f2-\U000187ff\U00018af3-\U0001afff\U0001b11f-\U0001b16f\U0001b2fc-\U0001bbff\U0001bc6b-\U0001bc6f\U0001bc7d-\U0001bc7f\U0001bc89-\U0001bc8f\U0001bc9a-\U0001bc9b\U0001bca4-\U0001cfff\U0001d0f6-\U0001d0ff\U0001d127-\U0001d128\U0001d1e9-\U0001d1ff\U0001d246-\U0001d2df\U0001d2f4-\U0001d2ff\U0001d357-\U0001d35f\U0001d379-\U0001d3ff\U0001d455\U0001d49d\U0001d4a0-\U0001d4a1\U0001d4a3-\U0001d4a4\U0001d4a7-\U0001d4a8\U0001d4ad\U0001d4ba\U0001d4bc\U0001d4c4\U0001d506\U0001d50b-\U0001d50c\U0001d515\U0001d51d\U0001d53a\U0001d53f\U0001d545\U0001d547-\U0001d549\U0001d551\U0001d6a6-\U0001d6a7\U0001d7cc-\U0001d7cd\U0001da8c-\U0001da9a\U0001daa0\U0001dab0-\U0001dfff\U0001e007\U0001e019-\U0001e01a\U0001e022\U0001e025\U0001e02b-\U0001e7ff\U0001e8c5-\U0001e8c6\U0001e8d7-\U0001e8ff\U0001e94b-\U0001e94f\U0001e95a-\U0001e95d\U0001e960-\U0001ec70\U0001ecb5-\U0001edff\U0001ee04\U0001ee20\U0001ee23\U0001ee25-\U0001ee26\U0001ee28\U0001ee33\U0001ee38\U0001ee3a\U0001ee3c-\U0001ee41\U0001ee43-\U0001ee46\U0001ee48\U0001ee4a\U0001ee4c\U0001ee50\U0001ee53\U0001ee55-\U0001ee56\U0001ee58\U0001ee5a\U0001ee5c\U0001ee5e\U0001ee60\U0001ee63\U0001ee65-\U0001ee66\U0001ee6b\U0001ee73\U0001ee78\U0001ee7d\U0001ee7f\U0001ee8a\U0001ee9c-\U0001eea0\U0001eea4\U0001eeaa\U0001eebc-\U0001eeef\U0001eef2-\U0001efff\U0001f02c-\U0001f02f\U0001f094-\U0001f09f\U0001f0af-\U0001f0b0\U0001f0c0\U0001f0d0\U0001f0f6-\U0001f0ff\U0001f10d-\U0001f10f\U0001f16c-\U0001f16f\U0001f1ad-\U0001f1e5\U0001f203-\U0001f20f\U0001f23c-\U0001f23f\U0001f249-\U0001f24f\U0001f252-\U0001f25f\U0001f266-\U0001f2ff\U0001f6d5-\U0001f6df\U0001f6ed-\U0001f6ef\U0001f6fa-\U0001f6ff\U0001f774-\U0001f77f\U0001f7d9-\U0001f7ff\U0001f80c-\U0001f80f\U0001f848-\U0001f84f\U0001f85a-\U0001f85f\U0001f888-\U0001f88f\U0001f8ae-\U0001f8ff\U0001f90c-\U0001f90f\U0001f93f\U0001f971-\U0001f972\U0001f977-\U0001f979\U0001f97b\U0001f9a3-\U0001f9af\U0001f9ba-\U0001f9bf\U0001f9c3-\U0001f9cf\U0001fa00-\U0001fa5f\U0001fa6e-\U0001ffff\U0002a6d7-\U0002a6ff\U0002b735-\U0002b73f\U0002b81e-\U0002b81f\U0002cea2-\U0002ceaf\U0002ebe1-\U0002f7ff\U0002fa1e-\U000e0000\U000e0002-\U000e001f\U000e0080-\U000e00ff\U000e01f0-\U000effff\U000ffffe-\U000fffff\U0010fffe-\U0010ffff'
+
+Co = '\ue000-\uf8ff\U000f0000-\U000ffffd\U00100000-\U0010fffd'
+
+Cs = '\ud800-\udbff\\\udc00\udc01-\udfff'
+
+Ll = 'a-z\xb5\xdf-\xf6\xf8-\xff\u0101\u0103\u0105\u0107\u0109\u010b\u010d\u010f\u0111\u0113\u0115\u0117\u0119\u011b\u011d\u011f\u0121\u0123\u0125\u0127\u0129\u012b\u012d\u012f\u0131\u0133\u0135\u0137-\u0138\u013a\u013c\u013e\u0140\u0142\u0144\u0146\u0148-\u0149\u014b\u014d\u014f\u0151\u0153\u0155\u0157\u0159\u015b\u015d\u015f\u0161\u0163\u0165\u0167\u0169\u016b\u016d\u016f\u0171\u0173\u0175\u0177\u017a\u017c\u017e-\u0180\u0183\u0185\u0188\u018c-\u018d\u0192\u0195\u0199-\u019b\u019e\u01a1\u01a3\u01a5\u01a8\u01aa-\u01ab\u01ad\u01b0\u01b4\u01b6\u01b9-\u01ba\u01bd-\u01bf\u01c6\u01c9\u01cc\u01ce\u01d0\u01d2\u01d4\u01d6\u01d8\u01da\u01dc-\u01dd\u01df\u01e1\u01e3\u01e5\u01e7\u01e9\u01eb\u01ed\u01ef-\u01f0\u01f3\u01f5\u01f9\u01fb\u01fd\u01ff\u0201\u0203\u0205\u0207\u0209\u020b\u020d\u020f\u0211\u0213\u0215\u0217\u0219\u021b\u021d\u021f\u0221\u0223\u0225\u0227\u0229\u022b\u022d\u022f\u0231\u0233-\u0239\u023c\u023f-\u0240\u0242\u0247\u0249\u024b\u024d\u024f-\u0293\u0295-\u02af\u0371\u0373\u0377\u037b-\u037d\u0390\u03ac-\u03ce\u03d0-\u03d1\u03d5-\u03d7\u03d9\u03db\u03dd\u03df\u03e1\u03e3\u03e5\u03e7\u03e9\u03eb\u03ed\u03ef-\u03f3\u03f5\u03f8\u03fb-\u03fc\u0430-\u045f\u0461\u0463\u0465\u0467\u0469\u046b\u046d\u046f\u0471\u0473\u0475\u0477\u0479\u047b\u047d\u047f\u0481\u048b\u048d\u048f\u0491\u0493\u0495\u0497\u0499\u049b\u049d\u049f\u04a1\u04a3\u04a5\u04a7\u04a9\u04ab\u04ad\u04af\u04b1\u04b3\u04b5\u04b7\u04b9\u04bb\u04bd\u04bf\u04c2\u04c4\u04c6\u04c8\u04ca\u04cc\u04ce-\u04cf\u04d1\u04d3\u04d5\u04d7\u04d9\u04db\u04dd\u04df\u04e1\u04e3\u04e5\u04e7\u04e9\u04eb\u04ed\u04ef\u04f1\u04f3\u04f5\u04f7\u04f9\u04fb\u04fd\u04ff\u0501\u0503\u0505\u0507\u0509\u050b\u050d\u050f\u0511\u0513\u0515\u0517\u0519\u051b\u051d\u051f\u0521\u0523\u0525\u0527\u0529\u052b\u052d\u052f\u0560-\u0588\u10d0-\u10fa\u10fd-\u10ff\u13f8-\u13fd\u1c80-\u1c88\u1d00-\u1d2b\u1d6b-\u1d77\u1d79-\u1d9a\u1e01\u1e03\u1e05\u1e07\u1e09\u1e0b\u1e0d\u1e0f\u1e11\u1e13\u1e15\u1e17\u1e19\u1e1b\u1e1d\u1e1f\u1e21\u1e23\u1e25\u1e27\u1e29\u1e2b\u1e2d\u1e2f\u1e31\u1e33\u1e35\u1e37\u1e39\u1e3b\u1e3d\u1e3f\u1e41\u1e43\u1e45\u1e47\u1e49\u1e4b\u1e4d\u1e4f\u1e51\u1e53\u1e55\u1e57\u1e59\u1e5b\u1e5d\u1e5f\u1e61\u1e63\u1e65\u1e67\u1e69\u1e6b\u1e6d\u1e6f\u1e71\u1e73\u1e75\u1e77\u1e79\u1e7b\u1e7d\u1e7f\u1e81\u1e83\u1e85\u1e87\u1e89\u1e8b\u1e8d\u1e8f\u1e91\u1e93\u1e95-\u1e9d\u1e9f\u1ea1\u1ea3\u1ea5\u1ea7\u1ea9\u1eab\u1ead\u1eaf\u1eb1\u1eb3\u1eb5\u1eb7\u1eb9\u1ebb\u1ebd\u1ebf\u1ec1\u1ec3\u1ec5\u1ec7\u1ec9\u1ecb\u1ecd\u1ecf\u1ed1\u1ed3\u1ed5\u1ed7\u1ed9\u1edb\u1edd\u1edf\u1ee1\u1ee3\u1ee5\u1ee7\u1ee9\u1eeb\u1eed\u1eef\u1ef1\u1ef3\u1ef5\u1ef7\u1ef9\u1efb\u1efd\u1eff-\u1f07\u1f10-\u1f15\u1f20-\u1f27\u1f30-\u1f37\u1f40-\u1f45\u1f50-\u1f57\u1f60-\u1f67\u1f70-\u1f7d\u1f80-\u1f87\u1f90-\u1f97\u1fa0-\u1fa7\u1fb0-\u1fb4\u1fb6-\u1fb7\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fc7\u1fd0-\u1fd3\u1fd6-\u1fd7\u1fe0-\u1fe7\u1ff2-\u1ff4\u1ff6-\u1ff7\u210a\u210e-\u210f\u2113\u212f\u2134\u2139\u213c-\u213d\u2146-\u2149\u214e\u2184\u2c30-\u2c5e\u2c61\u2c65-\u2c66\u2c68\u2c6a\u2c6c\u2c71\u2c73-\u2c74\u2c76-\u2c7b\u2c81\u2c83\u2c85\u2c87\u2c89\u2c8b\u2c8d\u2c8f\u2c91\u2c93\u2c95\u2c97\u2c99\u2c9b\u2c9d\u2c9f\u2ca1\u2ca3\u2ca5\u2ca7\u2ca9\u2cab\u2cad\u2caf\u2cb1\u2cb3\u2cb5\u2cb7\u2cb9\u2cbb\u2cbd\u2cbf\u2cc1\u2cc3\u2cc5\u2cc7\u2cc9\u2ccb\u2ccd\u2ccf\u2cd1\u2cd3\u2cd5\u2cd7\u2cd9\u2cdb\u2cdd\u2cdf\u2ce1\u2ce3-\u2ce4\u2cec\u2cee\u2cf3\u2d00-\u2d25\u2d27\u2d2d\ua641\ua643\ua645\ua647\ua649\ua64b\ua64d\ua64f\ua651\ua653\ua655\ua657\ua659\ua65b\ua65d\ua65f\ua661\ua663\ua665\ua667\ua669\ua66b\ua66d\ua681\ua683\ua685\ua687\ua689\ua68b\ua68d\ua68f\ua691\ua693\ua695\ua697\ua699\ua69b\ua723\ua725\ua727\ua729\ua72b\ua72d\ua72f-\ua731\ua733\ua735\ua737\ua739\ua73b\ua73d\ua73f\ua741\ua743\ua745\ua747\ua749\ua74b\ua74d\ua74f\ua751\ua753\ua755\ua757\ua759\ua75b\ua75d\ua75f\ua761\ua763\ua765\ua767\ua769\ua76b\ua76d\ua76f\ua771-\ua778\ua77a\ua77c\ua77f\ua781\ua783\ua785\ua787\ua78c\ua78e\ua791\ua793-\ua795\ua797\ua799\ua79b\ua79d\ua79f\ua7a1\ua7a3\ua7a5\ua7a7\ua7a9\ua7af\ua7b5\ua7b7\ua7b9\ua7fa\uab30-\uab5a\uab60-\uab65\uab70-\uabbf\ufb00-\ufb06\ufb13-\ufb17\uff41-\uff5a\U00010428-\U0001044f\U000104d8-\U000104fb\U00010cc0-\U00010cf2\U000118c0-\U000118df\U00016e60-\U00016e7f\U0001d41a-\U0001d433\U0001d44e-\U0001d454\U0001d456-\U0001d467\U0001d482-\U0001d49b\U0001d4b6-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d4cf\U0001d4ea-\U0001d503\U0001d51e-\U0001d537\U0001d552-\U0001d56b\U0001d586-\U0001d59f\U0001d5ba-\U0001d5d3\U0001d5ee-\U0001d607\U0001d622-\U0001d63b\U0001d656-\U0001d66f\U0001d68a-\U0001d6a5\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6e1\U0001d6fc-\U0001d714\U0001d716-\U0001d71b\U0001d736-\U0001d74e\U0001d750-\U0001d755\U0001d770-\U0001d788\U0001d78a-\U0001d78f\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7c9\U0001d7cb\U0001e922-\U0001e943'
+
+Lm = '\u02b0-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0374\u037a\u0559\u0640\u06e5-\u06e6\u07f4-\u07f5\u07fa\u081a\u0824\u0828\u0971\u0e46\u0ec6\u10fc\u17d7\u1843\u1aa7\u1c78-\u1c7d\u1d2c-\u1d6a\u1d78\u1d9b-\u1dbf\u2071\u207f\u2090-\u209c\u2c7c-\u2c7d\u2d6f\u2e2f\u3005\u3031-\u3035\u303b\u309d-\u309e\u30fc-\u30fe\ua015\ua4f8-\ua4fd\ua60c\ua67f\ua69c-\ua69d\ua717-\ua71f\ua770\ua788\ua7f8-\ua7f9\ua9cf\ua9e6\uaa70\uaadd\uaaf3-\uaaf4\uab5c-\uab5f\uff70\uff9e-\uff9f\U00016b40-\U00016b43\U00016f93-\U00016f9f\U00016fe0-\U00016fe1'
+
+Lo = '\xaa\xba\u01bb\u01c0-\u01c3\u0294\u05d0-\u05ea\u05ef-\u05f2\u0620-\u063f\u0641-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u0800-\u0815\u0840-\u0858\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0972-\u0980\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u09fc\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0af9\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60-\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32-\u0e33\u0e40-\u0e45\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2-\u0eb3\u0ebd\u0ec0-\u0ec4\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u1100-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16f1-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17dc\u1820-\u1842\u1844-\u1878\u1880-\u1884\u1887-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c77\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u2135-\u2138\u2d30-\u2d67\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3006\u303c\u3041-\u3096\u309f\u30a1-\u30fa\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua014\ua016-\ua48c\ua4d0-\ua4f7\ua500-\ua60b\ua610-\ua61f\ua62a-\ua62b\ua66e\ua6a0-\ua6e5\ua78f\ua7f7\ua7fb-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd-\ua8fe\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9e0-\ua9e4\ua9e7-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa6f\uaa71-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadc\uaae0-\uaaea\uaaf2\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff66-\uff6f\uff71-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031f\U0001032d-\U00010340\U00010342-\U00010349\U00010350-\U00010375\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U00010450-\U0001049d\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae4\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010d00-\U00010d23\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f45\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011144\U00011150-\U00011172\U00011176\U00011183-\U000111b2\U000111c1-\U000111c4\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U0001122b\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112de\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133d\U00011350\U0001135d-\U00011361\U00011400-\U00011434\U00011447-\U0001144a\U00011480-\U000114af\U000114c4-\U000114c5\U000114c7\U00011580-\U000115ae\U000115d8-\U000115db\U00011600-\U0001162f\U00011644\U00011680-\U000116aa\U00011700-\U0001171a\U00011800-\U0001182b\U000118ff\U00011a00\U00011a0b-\U00011a32\U00011a3a\U00011a50\U00011a5c-\U00011a83\U00011a86-\U00011a89\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c2e\U00011c40\U00011c72-\U00011c8f\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d30\U00011d46\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d89\U00011d98\U00011ee0-\U00011ef2\U00012000-\U00012399\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016ad0-\U00016aed\U00016b00-\U00016b2f\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016f00-\U00016f44\U00016f50\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001e800-\U0001e8c4\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d'
+
+Lt = '\u01c5\u01c8\u01cb\u01f2\u1f88-\u1f8f\u1f98-\u1f9f\u1fa8-\u1faf\u1fbc\u1fcc\u1ffc'
+
+Lu = 'A-Z\xc0-\xd6\xd8-\xde\u0100\u0102\u0104\u0106\u0108\u010a\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c\u011e\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130\u0132\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145\u0147\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a\u015c\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e\u0170\u0172\u0174\u0176\u0178-\u0179\u017b\u017d\u0181-\u0182\u0184\u0186-\u0187\u0189-\u018b\u018e-\u0191\u0193-\u0194\u0196-\u0198\u019c-\u019d\u019f-\u01a0\u01a2\u01a4\u01a6-\u01a7\u01a9\u01ac\u01ae-\u01af\u01b1-\u01b3\u01b5\u01b7-\u01b8\u01bc\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3\u01d5\u01d7\u01d9\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8\u01ea\u01ec\u01ee\u01f1\u01f4\u01f6-\u01f8\u01fa\u01fc\u01fe\u0200\u0202\u0204\u0206\u0208\u020a\u020c\u020e\u0210\u0212\u0214\u0216\u0218\u021a\u021c\u021e\u0220\u0222\u0224\u0226\u0228\u022a\u022c\u022e\u0230\u0232\u023a-\u023b\u023d-\u023e\u0241\u0243-\u0246\u0248\u024a\u024c\u024e\u0370\u0372\u0376\u037f\u0386\u0388-\u038a\u038c\u038e-\u038f\u0391-\u03a1\u03a3-\u03ab\u03cf\u03d2-\u03d4\u03d8\u03da\u03dc\u03de\u03e0\u03e2\u03e4\u03e6\u03e8\u03ea\u03ec\u03ee\u03f4\u03f7\u03f9-\u03fa\u03fd-\u042f\u0460\u0462\u0464\u0466\u0468\u046a\u046c\u046e\u0470\u0472\u0474\u0476\u0478\u047a\u047c\u047e\u0480\u048a\u048c\u048e\u0490\u0492\u0494\u0496\u0498\u049a\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8\u04aa\u04ac\u04ae\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc\u04be\u04c0-\u04c1\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0\u04d2\u04d4\u04d6\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4\u04e6\u04e8\u04ea\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8\u04fa\u04fc\u04fe\u0500\u0502\u0504\u0506\u0508\u050a\u050c\u050e\u0510\u0512\u0514\u0516\u0518\u051a\u051c\u051e\u0520\u0522\u0524\u0526\u0528\u052a\u052c\u052e\u0531-\u0556\u10a0-\u10c5\u10c7\u10cd\u13a0-\u13f5\u1c90-\u1cba\u1cbd-\u1cbf\u1e00\u1e02\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e\u1e10\u1e12\u1e14\u1e16\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22\u1e24\u1e26\u1e28\u1e2a\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36\u1e38\u1e3a\u1e3c\u1e3e\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a\u1e4c\u1e4e\u1e50\u1e52\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e\u1e60\u1e62\u1e64\u1e66\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72\u1e74\u1e76\u1e78\u1e7a\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86\u1e88\u1e8a\u1e8c\u1e8e\u1e90\u1e92\u1e94\u1e9e\u1ea0\u1ea2\u1ea4\u1ea6\u1ea8\u1eaa\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6\u1eb8\u1eba\u1ebc\u1ebe\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca\u1ecc\u1ece\u1ed0\u1ed2\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede\u1ee0\u1ee2\u1ee4\u1ee6\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2\u1ef4\u1ef6\u1ef8\u1efa\u1efc\u1efe\u1f08-\u1f0f\u1f18-\u1f1d\u1f28-\u1f2f\u1f38-\u1f3f\u1f48-\u1f4d\u1f59\u1f5b\u1f5d\u1f5f\u1f68-\u1f6f\u1fb8-\u1fbb\u1fc8-\u1fcb\u1fd8-\u1fdb\u1fe8-\u1fec\u1ff8-\u1ffb\u2102\u2107\u210b-\u210d\u2110-\u2112\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u2130-\u2133\u213e-\u213f\u2145\u2183\u2c00-\u2c2e\u2c60\u2c62-\u2c64\u2c67\u2c69\u2c6b\u2c6d-\u2c70\u2c72\u2c75\u2c7e-\u2c80\u2c82\u2c84\u2c86\u2c88\u2c8a\u2c8c\u2c8e\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a\u2c9c\u2c9e\u2ca0\u2ca2\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae\u2cb0\u2cb2\u2cb4\u2cb6\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2\u2cc4\u2cc6\u2cc8\u2cca\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6\u2cd8\u2cda\u2cdc\u2cde\u2ce0\u2ce2\u2ceb\u2ced\u2cf2\ua640\ua642\ua644\ua646\ua648\ua64a\ua64c\ua64e\ua650\ua652\ua654\ua656\ua658\ua65a\ua65c\ua65e\ua660\ua662\ua664\ua666\ua668\ua66a\ua66c\ua680\ua682\ua684\ua686\ua688\ua68a\ua68c\ua68e\ua690\ua692\ua694\ua696\ua698\ua69a\ua722\ua724\ua726\ua728\ua72a\ua72c\ua72e\ua732\ua734\ua736\ua738\ua73a\ua73c\ua73e\ua740\ua742\ua744\ua746\ua748\ua74a\ua74c\ua74e\ua750\ua752\ua754\ua756\ua758\ua75a\ua75c\ua75e\ua760\ua762\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b\ua77d-\ua77e\ua780\ua782\ua784\ua786\ua78b\ua78d\ua790\ua792\ua796\ua798\ua79a\ua79c\ua79e\ua7a0\ua7a2\ua7a4\ua7a6\ua7a8\ua7aa-\ua7ae\ua7b0-\ua7b4\ua7b6\ua7b8\uff21-\uff3a\U00010400-\U00010427\U000104b0-\U000104d3\U00010c80-\U00010cb2\U000118a0-\U000118bf\U00016e40-\U00016e5f\U0001d400-\U0001d419\U0001d434-\U0001d44d\U0001d468-\U0001d481\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b5\U0001d4d0-\U0001d4e9\U0001d504-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d538-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d56c-\U0001d585\U0001d5a0-\U0001d5b9\U0001d5d4-\U0001d5ed\U0001d608-\U0001d621\U0001d63c-\U0001d655\U0001d670-\U0001d689\U0001d6a8-\U0001d6c0\U0001d6e2-\U0001d6fa\U0001d71c-\U0001d734\U0001d756-\U0001d76e\U0001d790-\U0001d7a8\U0001d7ca\U0001e900-\U0001e921'
+
+Mc = '\u0903\u093b\u093e-\u0940\u0949-\u094c\u094e-\u094f\u0982-\u0983\u09be-\u09c0\u09c7-\u09c8\u09cb-\u09cc\u09d7\u0a03\u0a3e-\u0a40\u0a83\u0abe-\u0ac0\u0ac9\u0acb-\u0acc\u0b02-\u0b03\u0b3e\u0b40\u0b47-\u0b48\u0b4b-\u0b4c\u0b57\u0bbe-\u0bbf\u0bc1-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcc\u0bd7\u0c01-\u0c03\u0c41-\u0c44\u0c82-\u0c83\u0cbe\u0cc0-\u0cc4\u0cc7-\u0cc8\u0cca-\u0ccb\u0cd5-\u0cd6\u0d02-\u0d03\u0d3e-\u0d40\u0d46-\u0d48\u0d4a-\u0d4c\u0d57\u0d82-\u0d83\u0dcf-\u0dd1\u0dd8-\u0ddf\u0df2-\u0df3\u0f3e-\u0f3f\u0f7f\u102b-\u102c\u1031\u1038\u103b-\u103c\u1056-\u1057\u1062-\u1064\u1067-\u106d\u1083-\u1084\u1087-\u108c\u108f\u109a-\u109c\u17b6\u17be-\u17c5\u17c7-\u17c8\u1923-\u1926\u1929-\u192b\u1930-\u1931\u1933-\u1938\u1a19-\u1a1a\u1a55\u1a57\u1a61\u1a63-\u1a64\u1a6d-\u1a72\u1b04\u1b35\u1b3b\u1b3d-\u1b41\u1b43-\u1b44\u1b82\u1ba1\u1ba6-\u1ba7\u1baa\u1be7\u1bea-\u1bec\u1bee\u1bf2-\u1bf3\u1c24-\u1c2b\u1c34-\u1c35\u1ce1\u1cf2-\u1cf3\u1cf7\u302e-\u302f\ua823-\ua824\ua827\ua880-\ua881\ua8b4-\ua8c3\ua952-\ua953\ua983\ua9b4-\ua9b5\ua9ba-\ua9bb\ua9bd-\ua9c0\uaa2f-\uaa30\uaa33-\uaa34\uaa4d\uaa7b\uaa7d\uaaeb\uaaee-\uaaef\uaaf5\uabe3-\uabe4\uabe6-\uabe7\uabe9-\uabea\uabec\U00011000\U00011002\U00011082\U000110b0-\U000110b2\U000110b7-\U000110b8\U0001112c\U00011145-\U00011146\U00011182\U000111b3-\U000111b5\U000111bf-\U000111c0\U0001122c-\U0001122e\U00011232-\U00011233\U00011235\U000112e0-\U000112e2\U00011302-\U00011303\U0001133e-\U0001133f\U00011341-\U00011344\U00011347-\U00011348\U0001134b-\U0001134d\U00011357\U00011362-\U00011363\U00011435-\U00011437\U00011440-\U00011441\U00011445\U000114b0-\U000114b2\U000114b9\U000114bb-\U000114be\U000114c1\U000115af-\U000115b1\U000115b8-\U000115bb\U000115be\U00011630-\U00011632\U0001163b-\U0001163c\U0001163e\U000116ac\U000116ae-\U000116af\U000116b6\U00011720-\U00011721\U00011726\U0001182c-\U0001182e\U00011838\U00011a39\U00011a57-\U00011a58\U00011a97\U00011c2f\U00011c3e\U00011ca9\U00011cb1\U00011cb4\U00011d8a-\U00011d8e\U00011d93-\U00011d94\U00011d96\U00011ef5-\U00011ef6\U00016f51-\U00016f7e\U0001d165-\U0001d166\U0001d16d-\U0001d172'
+
+Me = '\u0488-\u0489\u1abe\u20dd-\u20e0\u20e2-\u20e4\ua670-\ua672'
+
+Mn = '\u0300-\u036f\u0483-\u0487\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u0610-\u061a\u064b-\u065f\u0670\u06d6-\u06dc\u06df-\u06e4\u06e7-\u06e8\u06ea-\u06ed\u0711\u0730-\u074a\u07a6-\u07b0\u07eb-\u07f3\u07fd\u0816-\u0819\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0859-\u085b\u08d3-\u08e1\u08e3-\u0902\u093a\u093c\u0941-\u0948\u094d\u0951-\u0957\u0962-\u0963\u0981\u09bc\u09c1-\u09c4\u09cd\u09e2-\u09e3\u09fe\u0a01-\u0a02\u0a3c\u0a41-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a70-\u0a71\u0a75\u0a81-\u0a82\u0abc\u0ac1-\u0ac5\u0ac7-\u0ac8\u0acd\u0ae2-\u0ae3\u0afa-\u0aff\u0b01\u0b3c\u0b3f\u0b41-\u0b44\u0b4d\u0b56\u0b62-\u0b63\u0b82\u0bc0\u0bcd\u0c00\u0c04\u0c3e-\u0c40\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c62-\u0c63\u0c81\u0cbc\u0cbf\u0cc6\u0ccc-\u0ccd\u0ce2-\u0ce3\u0d00-\u0d01\u0d3b-\u0d3c\u0d41-\u0d44\u0d4d\u0d62-\u0d63\u0dca\u0dd2-\u0dd4\u0dd6\u0e31\u0e34-\u0e3a\u0e47-\u0e4e\u0eb1\u0eb4-\u0eb9\u0ebb-\u0ebc\u0ec8-\u0ecd\u0f18-\u0f19\u0f35\u0f37\u0f39\u0f71-\u0f7e\u0f80-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u102d-\u1030\u1032-\u1037\u1039-\u103a\u103d-\u103e\u1058-\u1059\u105e-\u1060\u1071-\u1074\u1082\u1085-\u1086\u108d\u109d\u135d-\u135f\u1712-\u1714\u1732-\u1734\u1752-\u1753\u1772-\u1773\u17b4-\u17b5\u17b7-\u17bd\u17c6\u17c9-\u17d3\u17dd\u180b-\u180d\u1885-\u1886\u18a9\u1920-\u1922\u1927-\u1928\u1932\u1939-\u193b\u1a17-\u1a18\u1a1b\u1a56\u1a58-\u1a5e\u1a60\u1a62\u1a65-\u1a6c\u1a73-\u1a7c\u1a7f\u1ab0-\u1abd\u1b00-\u1b03\u1b34\u1b36-\u1b3a\u1b3c\u1b42\u1b6b-\u1b73\u1b80-\u1b81\u1ba2-\u1ba5\u1ba8-\u1ba9\u1bab-\u1bad\u1be6\u1be8-\u1be9\u1bed\u1bef-\u1bf1\u1c2c-\u1c33\u1c36-\u1c37\u1cd0-\u1cd2\u1cd4-\u1ce0\u1ce2-\u1ce8\u1ced\u1cf4\u1cf8-\u1cf9\u1dc0-\u1df9\u1dfb-\u1dff\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2cef-\u2cf1\u2d7f\u2de0-\u2dff\u302a-\u302d\u3099-\u309a\ua66f\ua674-\ua67d\ua69e-\ua69f\ua6f0-\ua6f1\ua802\ua806\ua80b\ua825-\ua826\ua8c4-\ua8c5\ua8e0-\ua8f1\ua8ff\ua926-\ua92d\ua947-\ua951\ua980-\ua982\ua9b3\ua9b6-\ua9b9\ua9bc\ua9e5\uaa29-\uaa2e\uaa31-\uaa32\uaa35-\uaa36\uaa43\uaa4c\uaa7c\uaab0\uaab2-\uaab4\uaab7-\uaab8\uaabe-\uaabf\uaac1\uaaec-\uaaed\uaaf6\uabe5\uabe8\uabed\ufb1e\ufe00-\ufe0f\ufe20-\ufe2f\U000101fd\U000102e0\U00010376-\U0001037a\U00010a01-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a0f\U00010a38-\U00010a3a\U00010a3f\U00010ae5-\U00010ae6\U00010d24-\U00010d27\U00010f46-\U00010f50\U00011001\U00011038-\U00011046\U0001107f-\U00011081\U000110b3-\U000110b6\U000110b9-\U000110ba\U00011100-\U00011102\U00011127-\U0001112b\U0001112d-\U00011134\U00011173\U00011180-\U00011181\U000111b6-\U000111be\U000111c9-\U000111cc\U0001122f-\U00011231\U00011234\U00011236-\U00011237\U0001123e\U000112df\U000112e3-\U000112ea\U00011300-\U00011301\U0001133b-\U0001133c\U00011340\U00011366-\U0001136c\U00011370-\U00011374\U00011438-\U0001143f\U00011442-\U00011444\U00011446\U0001145e\U000114b3-\U000114b8\U000114ba\U000114bf-\U000114c0\U000114c2-\U000114c3\U000115b2-\U000115b5\U000115bc-\U000115bd\U000115bf-\U000115c0\U000115dc-\U000115dd\U00011633-\U0001163a\U0001163d\U0001163f-\U00011640\U000116ab\U000116ad\U000116b0-\U000116b5\U000116b7\U0001171d-\U0001171f\U00011722-\U00011725\U00011727-\U0001172b\U0001182f-\U00011837\U00011839-\U0001183a\U00011a01-\U00011a0a\U00011a33-\U00011a38\U00011a3b-\U00011a3e\U00011a47\U00011a51-\U00011a56\U00011a59-\U00011a5b\U00011a8a-\U00011a96\U00011a98-\U00011a99\U00011c30-\U00011c36\U00011c38-\U00011c3d\U00011c3f\U00011c92-\U00011ca7\U00011caa-\U00011cb0\U00011cb2-\U00011cb3\U00011cb5-\U00011cb6\U00011d31-\U00011d36\U00011d3a\U00011d3c-\U00011d3d\U00011d3f-\U00011d45\U00011d47\U00011d90-\U00011d91\U00011d95\U00011d97\U00011ef3-\U00011ef4\U00016af0-\U00016af4\U00016b30-\U00016b36\U00016f8f-\U00016f92\U0001bc9d-\U0001bc9e\U0001d167-\U0001d169\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U0001da00-\U0001da36\U0001da3b-\U0001da6c\U0001da75\U0001da84\U0001da9b-\U0001da9f\U0001daa1-\U0001daaf\U0001e000-\U0001e006\U0001e008-\U0001e018\U0001e01b-\U0001e021\U0001e023-\U0001e024\U0001e026-\U0001e02a\U0001e8d0-\U0001e8d6\U0001e944-\U0001e94a\U000e0100-\U000e01ef'
+
+Nd = '0-9\u0660-\u0669\u06f0-\u06f9\u07c0-\u07c9\u0966-\u096f\u09e6-\u09ef\u0a66-\u0a6f\u0ae6-\u0aef\u0b66-\u0b6f\u0be6-\u0bef\u0c66-\u0c6f\u0ce6-\u0cef\u0d66-\u0d6f\u0de6-\u0def\u0e50-\u0e59\u0ed0-\u0ed9\u0f20-\u0f29\u1040-\u1049\u1090-\u1099\u17e0-\u17e9\u1810-\u1819\u1946-\u194f\u19d0-\u19d9\u1a80-\u1a89\u1a90-\u1a99\u1b50-\u1b59\u1bb0-\u1bb9\u1c40-\u1c49\u1c50-\u1c59\ua620-\ua629\ua8d0-\ua8d9\ua900-\ua909\ua9d0-\ua9d9\ua9f0-\ua9f9\uaa50-\uaa59\uabf0-\uabf9\uff10-\uff19\U000104a0-\U000104a9\U00010d30-\U00010d39\U00011066-\U0001106f\U000110f0-\U000110f9\U00011136-\U0001113f\U000111d0-\U000111d9\U000112f0-\U000112f9\U00011450-\U00011459\U000114d0-\U000114d9\U00011650-\U00011659\U000116c0-\U000116c9\U00011730-\U00011739\U000118e0-\U000118e9\U00011c50-\U00011c59\U00011d50-\U00011d59\U00011da0-\U00011da9\U00016a60-\U00016a69\U00016b50-\U00016b59\U0001d7ce-\U0001d7ff\U0001e950-\U0001e959'
+
+Nl = '\u16ee-\u16f0\u2160-\u2182\u2185-\u2188\u3007\u3021-\u3029\u3038-\u303a\ua6e6-\ua6ef\U00010140-\U00010174\U00010341\U0001034a\U000103d1-\U000103d5\U00012400-\U0001246e'
+
+No = '\xb2-\xb3\xb9\xbc-\xbe\u09f4-\u09f9\u0b72-\u0b77\u0bf0-\u0bf2\u0c78-\u0c7e\u0d58-\u0d5e\u0d70-\u0d78\u0f2a-\u0f33\u1369-\u137c\u17f0-\u17f9\u19da\u2070\u2074-\u2079\u2080-\u2089\u2150-\u215f\u2189\u2460-\u249b\u24ea-\u24ff\u2776-\u2793\u2cfd\u3192-\u3195\u3220-\u3229\u3248-\u324f\u3251-\u325f\u3280-\u3289\u32b1-\u32bf\ua830-\ua835\U00010107-\U00010133\U00010175-\U00010178\U0001018a-\U0001018b\U000102e1-\U000102fb\U00010320-\U00010323\U00010858-\U0001085f\U00010879-\U0001087f\U000108a7-\U000108af\U000108fb-\U000108ff\U00010916-\U0001091b\U000109bc-\U000109bd\U000109c0-\U000109cf\U000109d2-\U000109ff\U00010a40-\U00010a48\U00010a7d-\U00010a7e\U00010a9d-\U00010a9f\U00010aeb-\U00010aef\U00010b58-\U00010b5f\U00010b78-\U00010b7f\U00010ba9-\U00010baf\U00010cfa-\U00010cff\U00010e60-\U00010e7e\U00010f1d-\U00010f26\U00010f51-\U00010f54\U00011052-\U00011065\U000111e1-\U000111f4\U0001173a-\U0001173b\U000118ea-\U000118f2\U00011c5a-\U00011c6c\U00016b5b-\U00016b61\U00016e80-\U00016e96\U0001d2e0-\U0001d2f3\U0001d360-\U0001d378\U0001e8c7-\U0001e8cf\U0001ec71-\U0001ecab\U0001ecad-\U0001ecaf\U0001ecb1-\U0001ecb4\U0001f100-\U0001f10c'
+
+Pc = '_\u203f-\u2040\u2054\ufe33-\ufe34\ufe4d-\ufe4f\uff3f'
+
+Pd = '\\-\u058a\u05be\u1400\u1806\u2010-\u2015\u2e17\u2e1a\u2e3a-\u2e3b\u2e40\u301c\u3030\u30a0\ufe31-\ufe32\ufe58\ufe63\uff0d'
+
+Pe = ')\\]}\u0f3b\u0f3d\u169c\u2046\u207e\u208e\u2309\u230b\u232a\u2769\u276b\u276d\u276f\u2771\u2773\u2775\u27c6\u27e7\u27e9\u27eb\u27ed\u27ef\u2984\u2986\u2988\u298a\u298c\u298e\u2990\u2992\u2994\u2996\u2998\u29d9\u29db\u29fd\u2e23\u2e25\u2e27\u2e29\u3009\u300b\u300d\u300f\u3011\u3015\u3017\u3019\u301b\u301e-\u301f\ufd3e\ufe18\ufe36\ufe38\ufe3a\ufe3c\ufe3e\ufe40\ufe42\ufe44\ufe48\ufe5a\ufe5c\ufe5e\uff09\uff3d\uff5d\uff60\uff63'
+
+Pf = '\xbb\u2019\u201d\u203a\u2e03\u2e05\u2e0a\u2e0d\u2e1d\u2e21'
+
+Pi = '\xab\u2018\u201b-\u201c\u201f\u2039\u2e02\u2e04\u2e09\u2e0c\u2e1c\u2e20'
+
+Po = "!-#%-'*,.-/:-;?-@\\\\\xa1\xa7\xb6-\xb7\xbf\u037e\u0387\u055a-\u055f\u0589\u05c0\u05c3\u05c6\u05f3-\u05f4\u0609-\u060a\u060c-\u060d\u061b\u061e-\u061f\u066a-\u066d\u06d4\u0700-\u070d\u07f7-\u07f9\u0830-\u083e\u085e\u0964-\u0965\u0970\u09fd\u0a76\u0af0\u0c84\u0df4\u0e4f\u0e5a-\u0e5b\u0f04-\u0f12\u0f14\u0f85\u0fd0-\u0fd4\u0fd9-\u0fda\u104a-\u104f\u10fb\u1360-\u1368\u166d-\u166e\u16eb-\u16ed\u1735-\u1736\u17d4-\u17d6\u17d8-\u17da\u1800-\u1805\u1807-\u180a\u1944-\u1945\u1a1e-\u1a1f\u1aa0-\u1aa6\u1aa8-\u1aad\u1b5a-\u1b60\u1bfc-\u1bff\u1c3b-\u1c3f\u1c7e-\u1c7f\u1cc0-\u1cc7\u1cd3\u2016-\u2017\u2020-\u2027\u2030-\u2038\u203b-\u203e\u2041-\u2043\u2047-\u2051\u2053\u2055-\u205e\u2cf9-\u2cfc\u2cfe-\u2cff\u2d70\u2e00-\u2e01\u2e06-\u2e08\u2e0b\u2e0e-\u2e16\u2e18-\u2e19\u2e1b\u2e1e-\u2e1f\u2e2a-\u2e2e\u2e30-\u2e39\u2e3c-\u2e3f\u2e41\u2e43-\u2e4e\u3001-\u3003\u303d\u30fb\ua4fe-\ua4ff\ua60d-\ua60f\ua673\ua67e\ua6f2-\ua6f7\ua874-\ua877\ua8ce-\ua8cf\ua8f8-\ua8fa\ua8fc\ua92e-\ua92f\ua95f\ua9c1-\ua9cd\ua9de-\ua9df\uaa5c-\uaa5f\uaade-\uaadf\uaaf0-\uaaf1\uabeb\ufe10-\ufe16\ufe19\ufe30\ufe45-\ufe46\ufe49-\ufe4c\ufe50-\ufe52\ufe54-\ufe57\ufe5f-\ufe61\ufe68\ufe6a-\ufe6b\uff01-\uff03\uff05-\uff07\uff0a\uff0c\uff0e-\uff0f\uff1a-\uff1b\uff1f-\uff20\uff3c\uff61\uff64-\uff65\U00010100-\U00010102\U0001039f\U000103d0\U0001056f\U00010857\U0001091f\U0001093f\U00010a50-\U00010a58\U00010a7f\U00010af0-\U00010af6\U00010b39-\U00010b3f\U00010b99-\U00010b9c\U00010f55-\U00010f59\U00011047-\U0001104d\U000110bb-\U000110bc\U000110be-\U000110c1\U00011140-\U00011143\U00011174-\U00011175\U000111c5-\U000111c8\U000111cd\U000111db\U000111dd-\U000111df\U00011238-\U0001123d\U000112a9\U0001144b-\U0001144f\U0001145b\U0001145d\U000114c6\U000115c1-\U000115d7\U00011641-\U00011643\U00011660-\U0001166c\U0001173c-\U0001173e\U0001183b\U00011a3f-\U00011a46\U00011a9a-\U00011a9c\U00011a9e-\U00011aa2\U00011c41-\U00011c45\U00011c70-\U00011c71\U00011ef7-\U00011ef8\U00012470-\U00012474\U00016a6e-\U00016a6f\U00016af5\U00016b37-\U00016b3b\U00016b44\U00016e97-\U00016e9a\U0001bc9f\U0001da87-\U0001da8b\U0001e95e-\U0001e95f"
+
+Ps = '(\\[{\u0f3a\u0f3c\u169b\u201a\u201e\u2045\u207d\u208d\u2308\u230a\u2329\u2768\u276a\u276c\u276e\u2770\u2772\u2774\u27c5\u27e6\u27e8\u27ea\u27ec\u27ee\u2983\u2985\u2987\u2989\u298b\u298d\u298f\u2991\u2993\u2995\u2997\u29d8\u29da\u29fc\u2e22\u2e24\u2e26\u2e28\u2e42\u3008\u300a\u300c\u300e\u3010\u3014\u3016\u3018\u301a\u301d\ufd3f\ufe17\ufe35\ufe37\ufe39\ufe3b\ufe3d\ufe3f\ufe41\ufe43\ufe47\ufe59\ufe5b\ufe5d\uff08\uff3b\uff5b\uff5f\uff62'
+
+Sc = '$\xa2-\xa5\u058f\u060b\u07fe-\u07ff\u09f2-\u09f3\u09fb\u0af1\u0bf9\u0e3f\u17db\u20a0-\u20bf\ua838\ufdfc\ufe69\uff04\uffe0-\uffe1\uffe5-\uffe6\U0001ecb0'
+
+Sk = '\\^`\xa8\xaf\xb4\xb8\u02c2-\u02c5\u02d2-\u02df\u02e5-\u02eb\u02ed\u02ef-\u02ff\u0375\u0384-\u0385\u1fbd\u1fbf-\u1fc1\u1fcd-\u1fcf\u1fdd-\u1fdf\u1fed-\u1fef\u1ffd-\u1ffe\u309b-\u309c\ua700-\ua716\ua720-\ua721\ua789-\ua78a\uab5b\ufbb2-\ufbc1\uff3e\uff40\uffe3\U0001f3fb-\U0001f3ff'
+
+Sm = '+<->|~\xac\xb1\xd7\xf7\u03f6\u0606-\u0608\u2044\u2052\u207a-\u207c\u208a-\u208c\u2118\u2140-\u2144\u214b\u2190-\u2194\u219a-\u219b\u21a0\u21a3\u21a6\u21ae\u21ce-\u21cf\u21d2\u21d4\u21f4-\u22ff\u2320-\u2321\u237c\u239b-\u23b3\u23dc-\u23e1\u25b7\u25c1\u25f8-\u25ff\u266f\u27c0-\u27c4\u27c7-\u27e5\u27f0-\u27ff\u2900-\u2982\u2999-\u29d7\u29dc-\u29fb\u29fe-\u2aff\u2b30-\u2b44\u2b47-\u2b4c\ufb29\ufe62\ufe64-\ufe66\uff0b\uff1c-\uff1e\uff5c\uff5e\uffe2\uffe9-\uffec\U0001d6c1\U0001d6db\U0001d6fb\U0001d715\U0001d735\U0001d74f\U0001d76f\U0001d789\U0001d7a9\U0001d7c3\U0001eef0-\U0001eef1'
+
+So = '\xa6\xa9\xae\xb0\u0482\u058d-\u058e\u060e-\u060f\u06de\u06e9\u06fd-\u06fe\u07f6\u09fa\u0b70\u0bf3-\u0bf8\u0bfa\u0c7f\u0d4f\u0d79\u0f01-\u0f03\u0f13\u0f15-\u0f17\u0f1a-\u0f1f\u0f34\u0f36\u0f38\u0fbe-\u0fc5\u0fc7-\u0fcc\u0fce-\u0fcf\u0fd5-\u0fd8\u109e-\u109f\u1390-\u1399\u1940\u19de-\u19ff\u1b61-\u1b6a\u1b74-\u1b7c\u2100-\u2101\u2103-\u2106\u2108-\u2109\u2114\u2116-\u2117\u211e-\u2123\u2125\u2127\u2129\u212e\u213a-\u213b\u214a\u214c-\u214d\u214f\u218a-\u218b\u2195-\u2199\u219c-\u219f\u21a1-\u21a2\u21a4-\u21a5\u21a7-\u21ad\u21af-\u21cd\u21d0-\u21d1\u21d3\u21d5-\u21f3\u2300-\u2307\u230c-\u231f\u2322-\u2328\u232b-\u237b\u237d-\u239a\u23b4-\u23db\u23e2-\u2426\u2440-\u244a\u249c-\u24e9\u2500-\u25b6\u25b8-\u25c0\u25c2-\u25f7\u2600-\u266e\u2670-\u2767\u2794-\u27bf\u2800-\u28ff\u2b00-\u2b2f\u2b45-\u2b46\u2b4d-\u2b73\u2b76-\u2b95\u2b98-\u2bc8\u2bca-\u2bfe\u2ce5-\u2cea\u2e80-\u2e99\u2e9b-\u2ef3\u2f00-\u2fd5\u2ff0-\u2ffb\u3004\u3012-\u3013\u3020\u3036-\u3037\u303e-\u303f\u3190-\u3191\u3196-\u319f\u31c0-\u31e3\u3200-\u321e\u322a-\u3247\u3250\u3260-\u327f\u328a-\u32b0\u32c0-\u32fe\u3300-\u33ff\u4dc0-\u4dff\ua490-\ua4c6\ua828-\ua82b\ua836-\ua837\ua839\uaa77-\uaa79\ufdfd\uffe4\uffe8\uffed-\uffee\ufffc-\ufffd\U00010137-\U0001013f\U00010179-\U00010189\U0001018c-\U0001018e\U00010190-\U0001019b\U000101a0\U000101d0-\U000101fc\U00010877-\U00010878\U00010ac8\U0001173f\U00016b3c-\U00016b3f\U00016b45\U0001bc9c\U0001d000-\U0001d0f5\U0001d100-\U0001d126\U0001d129-\U0001d164\U0001d16a-\U0001d16c\U0001d183-\U0001d184\U0001d18c-\U0001d1a9\U0001d1ae-\U0001d1e8\U0001d200-\U0001d241\U0001d245\U0001d300-\U0001d356\U0001d800-\U0001d9ff\U0001da37-\U0001da3a\U0001da6d-\U0001da74\U0001da76-\U0001da83\U0001da85-\U0001da86\U0001ecac\U0001f000-\U0001f02b\U0001f030-\U0001f093\U0001f0a0-\U0001f0ae\U0001f0b1-\U0001f0bf\U0001f0c1-\U0001f0cf\U0001f0d1-\U0001f0f5\U0001f110-\U0001f16b\U0001f170-\U0001f1ac\U0001f1e6-\U0001f202\U0001f210-\U0001f23b\U0001f240-\U0001f248\U0001f250-\U0001f251\U0001f260-\U0001f265\U0001f300-\U0001f3fa\U0001f400-\U0001f6d4\U0001f6e0-\U0001f6ec\U0001f6f0-\U0001f6f9\U0001f700-\U0001f773\U0001f780-\U0001f7d8\U0001f800-\U0001f80b\U0001f810-\U0001f847\U0001f850-\U0001f859\U0001f860-\U0001f887\U0001f890-\U0001f8ad\U0001f900-\U0001f90b\U0001f910-\U0001f93e\U0001f940-\U0001f970\U0001f973-\U0001f976\U0001f97a\U0001f97c-\U0001f9a2\U0001f9b0-\U0001f9b9\U0001f9c0-\U0001f9c2\U0001f9d0-\U0001f9ff\U0001fa60-\U0001fa6d'
+
+Zl = '\u2028'
+
+Zp = '\u2029'
+
+Zs = ' \xa0\u1680\u2000-\u200a\u202f\u205f\u3000'
+
+xid_continue = '0-9A-Z_a-z\xaa\xb5\xb7\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0300-\u0374\u0376-\u0377\u037b-\u037d\u037f\u0386-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u0483-\u0487\u048a-\u052f\u0531-\u0556\u0559\u0560-\u0588\u0591-\u05bd\u05bf\u05c1-\u05c2\u05c4-\u05c5\u05c7\u05d0-\u05ea\u05ef-\u05f2\u0610-\u061a\u0620-\u0669\u066e-\u06d3\u06d5-\u06dc\u06df-\u06e8\u06ea-\u06fc\u06ff\u0710-\u074a\u074d-\u07b1\u07c0-\u07f5\u07fa\u07fd\u0800-\u082d\u0840-\u085b\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u08d3-\u08e1\u08e3-\u0963\u0966-\u096f\u0971-\u0983\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bc-\u09c4\u09c7-\u09c8\u09cb-\u09ce\u09d7\u09dc-\u09dd\u09df-\u09e3\u09e6-\u09f1\u09fc\u09fe\u0a01-\u0a03\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a3c\u0a3e-\u0a42\u0a47-\u0a48\u0a4b-\u0a4d\u0a51\u0a59-\u0a5c\u0a5e\u0a66-\u0a75\u0a81-\u0a83\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abc-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ad0\u0ae0-\u0ae3\u0ae6-\u0aef\u0af9-\u0aff\u0b01-\u0b03\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3c-\u0b44\u0b47-\u0b48\u0b4b-\u0b4d\u0b56-\u0b57\u0b5c-\u0b5d\u0b5f-\u0b63\u0b66-\u0b6f\u0b71\u0b82-\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd0\u0bd7\u0be6-\u0bef\u0c00-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d-\u0c44\u0c46-\u0c48\u0c4a-\u0c4d\u0c55-\u0c56\u0c58-\u0c5a\u0c60-\u0c63\u0c66-\u0c6f\u0c80-\u0c83\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbc-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5-\u0cd6\u0cde\u0ce0-\u0ce3\u0ce6-\u0cef\u0cf1-\u0cf2\u0d00-\u0d03\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d44\u0d46-\u0d48\u0d4a-\u0d4e\u0d54-\u0d57\u0d5f-\u0d63\u0d66-\u0d6f\u0d7a-\u0d7f\u0d82-\u0d83\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0de6-\u0def\u0df2-\u0df3\u0e01-\u0e3a\u0e40-\u0e4e\u0e50-\u0e59\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb9\u0ebb-\u0ebd\u0ec0-\u0ec4\u0ec6\u0ec8-\u0ecd\u0ed0-\u0ed9\u0edc-\u0edf\u0f00\u0f18-\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f3e-\u0f47\u0f49-\u0f6c\u0f71-\u0f84\u0f86-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1049\u1050-\u109d\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u135d-\u135f\u1369-\u1371\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1714\u1720-\u1734\u1740-\u1753\u1760-\u176c\u176e-\u1770\u1772-\u1773\u1780-\u17d3\u17d7\u17dc-\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1820-\u1878\u1880-\u18aa\u18b0-\u18f5\u1900-\u191e\u1920-\u192b\u1930-\u193b\u1946-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u19d0-\u19da\u1a00-\u1a1b\u1a20-\u1a5e\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1aa7\u1ab0-\u1abd\u1b00-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1b80-\u1bf3\u1c00-\u1c37\u1c40-\u1c49\u1c4d-\u1c7d\u1c80-\u1c88\u1c90-\u1cba\u1cbd-\u1cbf\u1cd0-\u1cd2\u1cd4-\u1cf9\u1d00-\u1df9\u1dfb-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u203f-\u2040\u2054\u2071\u207f\u2090-\u209c\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d7f-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2de0-\u2dff\u3005-\u3007\u3021-\u302f\u3031-\u3035\u3038-\u303c\u3041-\u3096\u3099-\u309a\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua62b\ua640-\ua66f\ua674-\ua67d\ua67f-\ua6f1\ua717-\ua71f\ua722-\ua788\ua78b-\ua7b9\ua7f7-\ua827\ua840-\ua873\ua880-\ua8c5\ua8d0-\ua8d9\ua8e0-\ua8f7\ua8fb\ua8fd-\ua92d\ua930-\ua953\ua960-\ua97c\ua980-\ua9c0\ua9cf-\ua9d9\ua9e0-\ua9fe\uaa00-\uaa36\uaa40-\uaa4d\uaa50-\uaa59\uaa60-\uaa76\uaa7a-\uaac2\uaadb-\uaadd\uaae0-\uaaef\uaaf2-\uaaf6\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabea\uabec-\uabed\uabf0-\uabf9\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe00-\ufe0f\ufe20-\ufe2f\ufe33-\ufe34\ufe4d-\ufe4f\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff10-\uff19\uff21-\uff3a\uff3f\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U000101fd\U00010280-\U0001029c\U000102a0-\U000102d0\U000102e0\U00010300-\U0001031f\U0001032d-\U0001034a\U00010350-\U0001037a\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U000104a0-\U000104a9\U000104b0-\U000104d3\U000104d8-\U000104fb\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00-\U00010a03\U00010a05-\U00010a06\U00010a0c-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a38-\U00010a3a\U00010a3f\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae6\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010c80-\U00010cb2\U00010cc0-\U00010cf2\U00010d00-\U00010d27\U00010d30-\U00010d39\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f50\U00011000-\U00011046\U00011066-\U0001106f\U0001107f-\U000110ba\U000110d0-\U000110e8\U000110f0-\U000110f9\U00011100-\U00011134\U00011136-\U0001113f\U00011144-\U00011146\U00011150-\U00011173\U00011176\U00011180-\U000111c4\U000111c9-\U000111cc\U000111d0-\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U00011237\U0001123e\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112ea\U000112f0-\U000112f9\U00011300-\U00011303\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133b-\U00011344\U00011347-\U00011348\U0001134b-\U0001134d\U00011350\U00011357\U0001135d-\U00011363\U00011366-\U0001136c\U00011370-\U00011374\U00011400-\U0001144a\U00011450-\U00011459\U0001145e\U00011480-\U000114c5\U000114c7\U000114d0-\U000114d9\U00011580-\U000115b5\U000115b8-\U000115c0\U000115d8-\U000115dd\U00011600-\U00011640\U00011644\U00011650-\U00011659\U00011680-\U000116b7\U000116c0-\U000116c9\U00011700-\U0001171a\U0001171d-\U0001172b\U00011730-\U00011739\U00011800-\U0001183a\U000118a0-\U000118e9\U000118ff\U00011a00-\U00011a3e\U00011a47\U00011a50-\U00011a83\U00011a86-\U00011a99\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c36\U00011c38-\U00011c40\U00011c50-\U00011c59\U00011c72-\U00011c8f\U00011c92-\U00011ca7\U00011ca9-\U00011cb6\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d36\U00011d3a\U00011d3c-\U00011d3d\U00011d3f-\U00011d47\U00011d50-\U00011d59\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d8e\U00011d90-\U00011d91\U00011d93-\U00011d98\U00011da0-\U00011da9\U00011ee0-\U00011ef6\U00012000-\U00012399\U00012400-\U0001246e\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016a60-\U00016a69\U00016ad0-\U00016aed\U00016af0-\U00016af4\U00016b00-\U00016b36\U00016b40-\U00016b43\U00016b50-\U00016b59\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016e40-\U00016e7f\U00016f00-\U00016f44\U00016f50-\U00016f7e\U00016f8f-\U00016f9f\U00016fe0-\U00016fe1\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001bc9d-\U0001bc9e\U0001d165-\U0001d169\U0001d16d-\U0001d172\U0001d17b-\U0001d182\U0001d185-\U0001d18b\U0001d1aa-\U0001d1ad\U0001d242-\U0001d244\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001d7ce-\U0001d7ff\U0001da00-\U0001da36\U0001da3b-\U0001da6c\U0001da75\U0001da84\U0001da9b-\U0001da9f\U0001daa1-\U0001daaf\U0001e000-\U0001e006\U0001e008-\U0001e018\U0001e01b-\U0001e021\U0001e023-\U0001e024\U0001e026-\U0001e02a\U0001e800-\U0001e8c4\U0001e8d0-\U0001e8d6\U0001e900-\U0001e94a\U0001e950-\U0001e959\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d\U000e0100-\U000e01ef'
+
+xid_start = 'A-Z_a-z\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376-\u0377\u037b-\u037d\u037f\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u052f\u0531-\u0556\u0559\u0560-\u0588\u05d0-\u05ea\u05ef-\u05f2\u0620-\u064a\u066e-\u066f\u0671-\u06d3\u06d5\u06e5-\u06e6\u06ee-\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4-\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u0860-\u086a\u08a0-\u08b4\u08b6-\u08bd\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0980\u0985-\u098c\u098f-\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc-\u09dd\u09df-\u09e1\u09f0-\u09f1\u09fc\u0a05-\u0a0a\u0a0f-\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32-\u0a33\u0a35-\u0a36\u0a38-\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2-\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0-\u0ae1\u0af9\u0b05-\u0b0c\u0b0f-\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32-\u0b33\u0b35-\u0b39\u0b3d\u0b5c-\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99-\u0b9a\u0b9c\u0b9e-\u0b9f\u0ba3-\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c39\u0c3d\u0c58-\u0c5a\u0c60-\u0c61\u0c80\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0-\u0ce1\u0cf1-\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d54-\u0d56\u0d5f-\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e40-\u0e46\u0e81-\u0e82\u0e84\u0e87-\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa-\u0eab\u0ead-\u0eb0\u0eb2\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065-\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f5\u13f8-\u13fd\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f8\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1878\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191e\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19b0-\u19c9\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae-\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1c80-\u1c88\u1c90-\u1cba\u1cbd-\u1cbf\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5-\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2118-\u211d\u2124\u2126\u2128\u212a-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2-\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312f\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fef\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a-\ua62b\ua640-\ua66e\ua67f-\ua69d\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua7b9\ua7f7-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua8fd-\ua8fe\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\ua9e0-\ua9e4\ua9e6-\ua9ef\ua9fa-\ua9fe\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa7e-\uaaaf\uaab1\uaab5-\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uab30-\uab5a\uab5c-\uab65\uab70-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40-\ufb41\ufb43-\ufb44\ufb46-\ufbb1\ufbd3-\ufc5d\ufc64-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdf9\ufe71\ufe73\ufe77\ufe79\ufe7b\ufe7d\ufe7f-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d\uffa0-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc\U00010000-\U0001000b\U0001000d-\U00010026\U00010028-\U0001003a\U0001003c-\U0001003d\U0001003f-\U0001004d\U00010050-\U0001005d\U00010080-\U000100fa\U00010140-\U00010174\U00010280-\U0001029c\U000102a0-\U000102d0\U00010300-\U0001031f\U0001032d-\U0001034a\U00010350-\U00010375\U00010380-\U0001039d\U000103a0-\U000103c3\U000103c8-\U000103cf\U000103d1-\U000103d5\U00010400-\U0001049d\U000104b0-\U000104d3\U000104d8-\U000104fb\U00010500-\U00010527\U00010530-\U00010563\U00010600-\U00010736\U00010740-\U00010755\U00010760-\U00010767\U00010800-\U00010805\U00010808\U0001080a-\U00010835\U00010837-\U00010838\U0001083c\U0001083f-\U00010855\U00010860-\U00010876\U00010880-\U0001089e\U000108e0-\U000108f2\U000108f4-\U000108f5\U00010900-\U00010915\U00010920-\U00010939\U00010980-\U000109b7\U000109be-\U000109bf\U00010a00\U00010a10-\U00010a13\U00010a15-\U00010a17\U00010a19-\U00010a35\U00010a60-\U00010a7c\U00010a80-\U00010a9c\U00010ac0-\U00010ac7\U00010ac9-\U00010ae4\U00010b00-\U00010b35\U00010b40-\U00010b55\U00010b60-\U00010b72\U00010b80-\U00010b91\U00010c00-\U00010c48\U00010c80-\U00010cb2\U00010cc0-\U00010cf2\U00010d00-\U00010d23\U00010f00-\U00010f1c\U00010f27\U00010f30-\U00010f45\U00011003-\U00011037\U00011083-\U000110af\U000110d0-\U000110e8\U00011103-\U00011126\U00011144\U00011150-\U00011172\U00011176\U00011183-\U000111b2\U000111c1-\U000111c4\U000111da\U000111dc\U00011200-\U00011211\U00011213-\U0001122b\U00011280-\U00011286\U00011288\U0001128a-\U0001128d\U0001128f-\U0001129d\U0001129f-\U000112a8\U000112b0-\U000112de\U00011305-\U0001130c\U0001130f-\U00011310\U00011313-\U00011328\U0001132a-\U00011330\U00011332-\U00011333\U00011335-\U00011339\U0001133d\U00011350\U0001135d-\U00011361\U00011400-\U00011434\U00011447-\U0001144a\U00011480-\U000114af\U000114c4-\U000114c5\U000114c7\U00011580-\U000115ae\U000115d8-\U000115db\U00011600-\U0001162f\U00011644\U00011680-\U000116aa\U00011700-\U0001171a\U00011800-\U0001182b\U000118a0-\U000118df\U000118ff\U00011a00\U00011a0b-\U00011a32\U00011a3a\U00011a50\U00011a5c-\U00011a83\U00011a86-\U00011a89\U00011a9d\U00011ac0-\U00011af8\U00011c00-\U00011c08\U00011c0a-\U00011c2e\U00011c40\U00011c72-\U00011c8f\U00011d00-\U00011d06\U00011d08-\U00011d09\U00011d0b-\U00011d30\U00011d46\U00011d60-\U00011d65\U00011d67-\U00011d68\U00011d6a-\U00011d89\U00011d98\U00011ee0-\U00011ef2\U00012000-\U00012399\U00012400-\U0001246e\U00012480-\U00012543\U00013000-\U0001342e\U00014400-\U00014646\U00016800-\U00016a38\U00016a40-\U00016a5e\U00016ad0-\U00016aed\U00016b00-\U00016b2f\U00016b40-\U00016b43\U00016b63-\U00016b77\U00016b7d-\U00016b8f\U00016e40-\U00016e7f\U00016f00-\U00016f44\U00016f50\U00016f93-\U00016f9f\U00016fe0-\U00016fe1\U00017000-\U000187f1\U00018800-\U00018af2\U0001b000-\U0001b11e\U0001b170-\U0001b2fb\U0001bc00-\U0001bc6a\U0001bc70-\U0001bc7c\U0001bc80-\U0001bc88\U0001bc90-\U0001bc99\U0001d400-\U0001d454\U0001d456-\U0001d49c\U0001d49e-\U0001d49f\U0001d4a2\U0001d4a5-\U0001d4a6\U0001d4a9-\U0001d4ac\U0001d4ae-\U0001d4b9\U0001d4bb\U0001d4bd-\U0001d4c3\U0001d4c5-\U0001d505\U0001d507-\U0001d50a\U0001d50d-\U0001d514\U0001d516-\U0001d51c\U0001d51e-\U0001d539\U0001d53b-\U0001d53e\U0001d540-\U0001d544\U0001d546\U0001d54a-\U0001d550\U0001d552-\U0001d6a5\U0001d6a8-\U0001d6c0\U0001d6c2-\U0001d6da\U0001d6dc-\U0001d6fa\U0001d6fc-\U0001d714\U0001d716-\U0001d734\U0001d736-\U0001d74e\U0001d750-\U0001d76e\U0001d770-\U0001d788\U0001d78a-\U0001d7a8\U0001d7aa-\U0001d7c2\U0001d7c4-\U0001d7cb\U0001e800-\U0001e8c4\U0001e900-\U0001e943\U0001ee00-\U0001ee03\U0001ee05-\U0001ee1f\U0001ee21-\U0001ee22\U0001ee24\U0001ee27\U0001ee29-\U0001ee32\U0001ee34-\U0001ee37\U0001ee39\U0001ee3b\U0001ee42\U0001ee47\U0001ee49\U0001ee4b\U0001ee4d-\U0001ee4f\U0001ee51-\U0001ee52\U0001ee54\U0001ee57\U0001ee59\U0001ee5b\U0001ee5d\U0001ee5f\U0001ee61-\U0001ee62\U0001ee64\U0001ee67-\U0001ee6a\U0001ee6c-\U0001ee72\U0001ee74-\U0001ee77\U0001ee79-\U0001ee7c\U0001ee7e\U0001ee80-\U0001ee89\U0001ee8b-\U0001ee9b\U0001eea1-\U0001eea3\U0001eea5-\U0001eea9\U0001eeab-\U0001eebb\U00020000-\U0002a6d6\U0002a700-\U0002b734\U0002b740-\U0002b81d\U0002b820-\U0002cea1\U0002ceb0-\U0002ebe0\U0002f800-\U0002fa1d'
+
+cats = ['Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu', 'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf', 'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs']
+
+# Generated from unidata 11.0.0
+
+def combine(*args):
+ return ''.join(globals()[cat] for cat in args)
+
+
+def allexcept(*args):
+ newcats = cats[:]
+ for arg in args:
+ newcats.remove(arg)
+ return ''.join(globals()[cat] for cat in newcats)
+
+
+def _handle_runs(char_list): # pragma: no cover
+ buf = []
+ for c in char_list:
+ if len(c) == 1:
+ if buf and buf[-1][1] == chr(ord(c)-1):
+ buf[-1] = (buf[-1][0], c)
+ else:
+ buf.append((c, c))
+ else:
+ buf.append((c, c))
+ for a, b in buf:
+ if a == b:
+ yield a
+ else:
+ yield '%s-%s' % (a, b)
+
+
+if __name__ == '__main__': # pragma: no cover
+ import unicodedata
+
+ categories = {'xid_start': [], 'xid_continue': []}
+
+ with open(__file__) as fp:
+ content = fp.read()
+
+ header = content[:content.find('Cc =')]
+ footer = content[content.find("def combine("):]
+
+ for code in range(0x110000):
+ c = chr(code)
+ cat = unicodedata.category(c)
+ if ord(c) == 0xdc00:
+ # Hack to avoid combining this combining with the preceding high
+ # surrogate, 0xdbff, when doing a repr.
+ c = '\\' + c
+ elif ord(c) in (0x2d, 0x5b, 0x5c, 0x5d, 0x5e):
+ # Escape regex metachars.
+ c = '\\' + c
+ categories.setdefault(cat, []).append(c)
+ # XID_START and XID_CONTINUE are special categories used for matching
+ # identifiers in Python 3.
+ if c.isidentifier():
+ categories['xid_start'].append(c)
+ if ('a' + c).isidentifier():
+ categories['xid_continue'].append(c)
+
+ with open(__file__, 'w') as fp:
+ fp.write(header)
+
+ for cat in sorted(categories):
+ val = ''.join(_handle_runs(categories[cat]))
+ fp.write('%s = %a\n\n' % (cat, val))
+
+ cats = sorted(categories)
+ cats.remove('xid_start')
+ cats.remove('xid_continue')
+ fp.write('cats = %r\n\n' % cats)
+
+ fp.write('# Generated from unidata %s\n\n' % (unicodedata.unidata_version,))
+
+ fp.write(footer)
diff --git a/third_party/python/pip/pip/_vendor/pygments/util.py b/third_party/python/pip/pip/_vendor/pygments/util.py
new file mode 100644
index 0000000000..8032962dc9
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pygments/util.py
@@ -0,0 +1,308 @@
+"""
+ pygments.util
+ ~~~~~~~~~~~~~
+
+ Utility functions.
+
+ :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
+ :license: BSD, see LICENSE for details.
+"""
+
+import re
+from io import TextIOWrapper
+
+
+split_path_re = re.compile(r'[/\\ ]')
+doctype_lookup_re = re.compile(r'''
+ <!DOCTYPE\s+(
+ [a-zA-Z_][a-zA-Z0-9]*
+ (?: \s+ # optional in HTML5
+ [a-zA-Z_][a-zA-Z0-9]*\s+
+ "[^"]*")?
+ )
+ [^>]*>
+''', re.DOTALL | re.MULTILINE | re.VERBOSE)
+tag_re = re.compile(r'<(.+?)(\s.*?)?>.*?</.+?>',
+ re.IGNORECASE | re.DOTALL | re.MULTILINE)
+xml_decl_re = re.compile(r'\s*<\?xml[^>]*\?>', re.I)
+
+
+class ClassNotFound(ValueError):
+ """Raised if one of the lookup functions didn't find a matching class."""
+
+
+class OptionError(Exception):
+ pass
+
+
+def get_choice_opt(options, optname, allowed, default=None, normcase=False):
+ string = options.get(optname, default)
+ if normcase:
+ string = string.lower()
+ if string not in allowed:
+ raise OptionError('Value for option %s must be one of %s' %
+ (optname, ', '.join(map(str, allowed))))
+ return string
+
+
+def get_bool_opt(options, optname, default=None):
+ string = options.get(optname, default)
+ if isinstance(string, bool):
+ return string
+ elif isinstance(string, int):
+ return bool(string)
+ elif not isinstance(string, str):
+ raise OptionError('Invalid type %r for option %s; use '
+ '1/0, yes/no, true/false, on/off' % (
+ string, optname))
+ elif string.lower() in ('1', 'yes', 'true', 'on'):
+ return True
+ elif string.lower() in ('0', 'no', 'false', 'off'):
+ return False
+ else:
+ raise OptionError('Invalid value %r for option %s; use '
+ '1/0, yes/no, true/false, on/off' % (
+ string, optname))
+
+
+def get_int_opt(options, optname, default=None):
+ string = options.get(optname, default)
+ try:
+ return int(string)
+ except TypeError:
+ raise OptionError('Invalid type %r for option %s; you '
+ 'must give an integer value' % (
+ string, optname))
+ except ValueError:
+ raise OptionError('Invalid value %r for option %s; you '
+ 'must give an integer value' % (
+ string, optname))
+
+
+def get_list_opt(options, optname, default=None):
+ val = options.get(optname, default)
+ if isinstance(val, str):
+ return val.split()
+ elif isinstance(val, (list, tuple)):
+ return list(val)
+ else:
+ raise OptionError('Invalid type %r for option %s; you '
+ 'must give a list value' % (
+ val, optname))
+
+
+def docstring_headline(obj):
+ if not obj.__doc__:
+ return ''
+ res = []
+ for line in obj.__doc__.strip().splitlines():
+ if line.strip():
+ res.append(" " + line.strip())
+ else:
+ break
+ return ''.join(res).lstrip()
+
+
+def make_analysator(f):
+ """Return a static text analyser function that returns float values."""
+ def text_analyse(text):
+ try:
+ rv = f(text)
+ except Exception:
+ return 0.0
+ if not rv:
+ return 0.0
+ try:
+ return min(1.0, max(0.0, float(rv)))
+ except (ValueError, TypeError):
+ return 0.0
+ text_analyse.__doc__ = f.__doc__
+ return staticmethod(text_analyse)
+
+
+def shebang_matches(text, regex):
+ r"""Check if the given regular expression matches the last part of the
+ shebang if one exists.
+
+ >>> from pygments.util import shebang_matches
+ >>> shebang_matches('#!/usr/bin/env python', r'python(2\.\d)?')
+ True
+ >>> shebang_matches('#!/usr/bin/python2.4', r'python(2\.\d)?')
+ True
+ >>> shebang_matches('#!/usr/bin/python-ruby', r'python(2\.\d)?')
+ False
+ >>> shebang_matches('#!/usr/bin/python/ruby', r'python(2\.\d)?')
+ False
+ >>> shebang_matches('#!/usr/bin/startsomethingwith python',
+ ... r'python(2\.\d)?')
+ True
+
+ It also checks for common windows executable file extensions::
+
+ >>> shebang_matches('#!C:\\Python2.4\\Python.exe', r'python(2\.\d)?')
+ True
+
+ Parameters (``'-f'`` or ``'--foo'`` are ignored so ``'perl'`` does
+ the same as ``'perl -e'``)
+
+ Note that this method automatically searches the whole string (eg:
+ the regular expression is wrapped in ``'^$'``)
+ """
+ index = text.find('\n')
+ if index >= 0:
+ first_line = text[:index].lower()
+ else:
+ first_line = text.lower()
+ if first_line.startswith('#!'):
+ try:
+ found = [x for x in split_path_re.split(first_line[2:].strip())
+ if x and not x.startswith('-')][-1]
+ except IndexError:
+ return False
+ regex = re.compile(r'^%s(\.(exe|cmd|bat|bin))?$' % regex, re.IGNORECASE)
+ if regex.search(found) is not None:
+ return True
+ return False
+
+
+def doctype_matches(text, regex):
+ """Check if the doctype matches a regular expression (if present).
+
+ Note that this method only checks the first part of a DOCTYPE.
+ eg: 'html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"'
+ """
+ m = doctype_lookup_re.search(text)
+ if m is None:
+ return False
+ doctype = m.group(1)
+ return re.compile(regex, re.I).match(doctype.strip()) is not None
+
+
+def html_doctype_matches(text):
+ """Check if the file looks like it has a html doctype."""
+ return doctype_matches(text, r'html')
+
+
+_looks_like_xml_cache = {}
+
+
+def looks_like_xml(text):
+ """Check if a doctype exists or if we have some tags."""
+ if xml_decl_re.match(text):
+ return True
+ key = hash(text)
+ try:
+ return _looks_like_xml_cache[key]
+ except KeyError:
+ m = doctype_lookup_re.search(text)
+ if m is not None:
+ return True
+ rv = tag_re.search(text[:1000]) is not None
+ _looks_like_xml_cache[key] = rv
+ return rv
+
+
+def surrogatepair(c):
+ """Given a unicode character code with length greater than 16 bits,
+ return the two 16 bit surrogate pair.
+ """
+ # From example D28 of:
+ # http://www.unicode.org/book/ch03.pdf
+ return (0xd7c0 + (c >> 10), (0xdc00 + (c & 0x3ff)))
+
+
+def format_lines(var_name, seq, raw=False, indent_level=0):
+ """Formats a sequence of strings for output."""
+ lines = []
+ base_indent = ' ' * indent_level * 4
+ inner_indent = ' ' * (indent_level + 1) * 4
+ lines.append(base_indent + var_name + ' = (')
+ if raw:
+ # These should be preformatted reprs of, say, tuples.
+ for i in seq:
+ lines.append(inner_indent + i + ',')
+ else:
+ for i in seq:
+ # Force use of single quotes
+ r = repr(i + '"')
+ lines.append(inner_indent + r[:-2] + r[-1] + ',')
+ lines.append(base_indent + ')')
+ return '\n'.join(lines)
+
+
+def duplicates_removed(it, already_seen=()):
+ """
+ Returns a list with duplicates removed from the iterable `it`.
+
+ Order is preserved.
+ """
+ lst = []
+ seen = set()
+ for i in it:
+ if i in seen or i in already_seen:
+ continue
+ lst.append(i)
+ seen.add(i)
+ return lst
+
+
+class Future:
+ """Generic class to defer some work.
+
+ Handled specially in RegexLexerMeta, to support regex string construction at
+ first use.
+ """
+ def get(self):
+ raise NotImplementedError
+
+
+def guess_decode(text):
+ """Decode *text* with guessed encoding.
+
+ First try UTF-8; this should fail for non-UTF-8 encodings.
+ Then try the preferred locale encoding.
+ Fall back to latin-1, which always works.
+ """
+ try:
+ text = text.decode('utf-8')
+ return text, 'utf-8'
+ except UnicodeDecodeError:
+ try:
+ import locale
+ prefencoding = locale.getpreferredencoding()
+ text = text.decode()
+ return text, prefencoding
+ except (UnicodeDecodeError, LookupError):
+ text = text.decode('latin1')
+ return text, 'latin1'
+
+
+def guess_decode_from_terminal(text, term):
+ """Decode *text* coming from terminal *term*.
+
+ First try the terminal encoding, if given.
+ Then try UTF-8. Then try the preferred locale encoding.
+ Fall back to latin-1, which always works.
+ """
+ if getattr(term, 'encoding', None):
+ try:
+ text = text.decode(term.encoding)
+ except UnicodeDecodeError:
+ pass
+ else:
+ return text, term.encoding
+ return guess_decode(text)
+
+
+def terminal_encoding(term):
+ """Return our best guess of encoding for the given *term*."""
+ if getattr(term, 'encoding', None):
+ return term.encoding
+ import locale
+ return locale.getpreferredencoding()
+
+
+class UnclosingTextIOWrapper(TextIOWrapper):
+ # Don't close underlying buffer on destruction.
+ def close(self):
+ self.flush()
diff --git a/third_party/python/pip/pip/_vendor/pyparsing/__init__.py b/third_party/python/pip/pip/_vendor/pyparsing/__init__.py
new file mode 100644
index 0000000000..75372500ed
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pyparsing/__init__.py
@@ -0,0 +1,331 @@
+# module pyparsing.py
+#
+# Copyright (c) 2003-2022 Paul T. McGuire
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__doc__ = """
+pyparsing module - Classes and methods to define and execute parsing grammars
+=============================================================================
+
+The pyparsing module is an alternative approach to creating and
+executing simple grammars, vs. the traditional lex/yacc approach, or the
+use of regular expressions. With pyparsing, you don't need to learn
+a new syntax for defining grammars or matching expressions - the parsing
+module provides a library of classes that you use to construct the
+grammar directly in Python.
+
+Here is a program to parse "Hello, World!" (or any greeting of the form
+``"<salutation>, <addressee>!"``), built up using :class:`Word`,
+:class:`Literal`, and :class:`And` elements
+(the :meth:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,
+and the strings are auto-converted to :class:`Literal` expressions)::
+
+ from pip._vendor.pyparsing import Word, alphas
+
+ # define grammar of a greeting
+ greet = Word(alphas) + "," + Word(alphas) + "!"
+
+ hello = "Hello, World!"
+ print(hello, "->", greet.parse_string(hello))
+
+The program outputs the following::
+
+ Hello, World! -> ['Hello', ',', 'World', '!']
+
+The Python representation of the grammar is quite readable, owing to the
+self-explanatory class names, and the use of :class:`'+'<And>`,
+:class:`'|'<MatchFirst>`, :class:`'^'<Or>` and :class:`'&'<Each>` operators.
+
+The :class:`ParseResults` object returned from
+:class:`ParserElement.parseString` can be
+accessed as a nested list, a dictionary, or an object with named
+attributes.
+
+The pyparsing module handles some of the problems that are typically
+vexing when writing text parsers:
+
+ - extra or missing whitespace (the above program will also handle
+ "Hello,World!", "Hello , World !", etc.)
+ - quoted strings
+ - embedded comments
+
+
+Getting Started -
+-----------------
+Visit the classes :class:`ParserElement` and :class:`ParseResults` to
+see the base classes that most other pyparsing
+classes inherit from. Use the docstrings for examples of how to:
+
+ - construct literal match expressions from :class:`Literal` and
+ :class:`CaselessLiteral` classes
+ - construct character word-group expressions using the :class:`Word`
+ class
+ - see how to create repetitive expressions using :class:`ZeroOrMore`
+ and :class:`OneOrMore` classes
+ - use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,
+ and :class:`'&'<Each>` operators to combine simple expressions into
+ more complex ones
+ - associate names with your parsed results using
+ :class:`ParserElement.setResultsName`
+ - access the parsed data, which is returned as a :class:`ParseResults`
+ object
+ - find some helpful expression short-cuts like :class:`delimitedList`
+ and :class:`oneOf`
+ - find more useful common expressions in the :class:`pyparsing_common`
+ namespace class
+"""
+from typing import NamedTuple
+
+
+class version_info(NamedTuple):
+ major: int
+ minor: int
+ micro: int
+ releaselevel: str
+ serial: int
+
+ @property
+ def __version__(self):
+ return (
+ "{}.{}.{}".format(self.major, self.minor, self.micro)
+ + (
+ "{}{}{}".format(
+ "r" if self.releaselevel[0] == "c" else "",
+ self.releaselevel[0],
+ self.serial,
+ ),
+ "",
+ )[self.releaselevel == "final"]
+ )
+
+ def __str__(self):
+ return "{} {} / {}".format(__name__, self.__version__, __version_time__)
+
+ def __repr__(self):
+ return "{}.{}({})".format(
+ __name__,
+ type(self).__name__,
+ ", ".join("{}={!r}".format(*nv) for nv in zip(self._fields, self)),
+ )
+
+
+__version_info__ = version_info(3, 0, 9, "final", 0)
+__version_time__ = "05 May 2022 07:02 UTC"
+__version__ = __version_info__.__version__
+__versionTime__ = __version_time__
+__author__ = "Paul McGuire <ptmcg.gm+pyparsing@gmail.com>"
+
+from .util import *
+from .exceptions import *
+from .actions import *
+from .core import __diag__, __compat__
+from .results import *
+from .core import *
+from .core import _builtin_exprs as core_builtin_exprs
+from .helpers import *
+from .helpers import _builtin_exprs as helper_builtin_exprs
+
+from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode
+from .testing import pyparsing_test as testing
+from .common import (
+ pyparsing_common as common,
+ _builtin_exprs as common_builtin_exprs,
+)
+
+# define backward compat synonyms
+if "pyparsing_unicode" not in globals():
+ pyparsing_unicode = unicode
+if "pyparsing_common" not in globals():
+ pyparsing_common = common
+if "pyparsing_test" not in globals():
+ pyparsing_test = testing
+
+core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs
+
+
+__all__ = [
+ "__version__",
+ "__version_time__",
+ "__author__",
+ "__compat__",
+ "__diag__",
+ "And",
+ "AtLineStart",
+ "AtStringStart",
+ "CaselessKeyword",
+ "CaselessLiteral",
+ "CharsNotIn",
+ "Combine",
+ "Dict",
+ "Each",
+ "Empty",
+ "FollowedBy",
+ "Forward",
+ "GoToColumn",
+ "Group",
+ "IndentedBlock",
+ "Keyword",
+ "LineEnd",
+ "LineStart",
+ "Literal",
+ "Located",
+ "PrecededBy",
+ "MatchFirst",
+ "NoMatch",
+ "NotAny",
+ "OneOrMore",
+ "OnlyOnce",
+ "OpAssoc",
+ "Opt",
+ "Optional",
+ "Or",
+ "ParseBaseException",
+ "ParseElementEnhance",
+ "ParseException",
+ "ParseExpression",
+ "ParseFatalException",
+ "ParseResults",
+ "ParseSyntaxException",
+ "ParserElement",
+ "PositionToken",
+ "QuotedString",
+ "RecursiveGrammarException",
+ "Regex",
+ "SkipTo",
+ "StringEnd",
+ "StringStart",
+ "Suppress",
+ "Token",
+ "TokenConverter",
+ "White",
+ "Word",
+ "WordEnd",
+ "WordStart",
+ "ZeroOrMore",
+ "Char",
+ "alphanums",
+ "alphas",
+ "alphas8bit",
+ "any_close_tag",
+ "any_open_tag",
+ "c_style_comment",
+ "col",
+ "common_html_entity",
+ "counted_array",
+ "cpp_style_comment",
+ "dbl_quoted_string",
+ "dbl_slash_comment",
+ "delimited_list",
+ "dict_of",
+ "empty",
+ "hexnums",
+ "html_comment",
+ "identchars",
+ "identbodychars",
+ "java_style_comment",
+ "line",
+ "line_end",
+ "line_start",
+ "lineno",
+ "make_html_tags",
+ "make_xml_tags",
+ "match_only_at_col",
+ "match_previous_expr",
+ "match_previous_literal",
+ "nested_expr",
+ "null_debug_action",
+ "nums",
+ "one_of",
+ "printables",
+ "punc8bit",
+ "python_style_comment",
+ "quoted_string",
+ "remove_quotes",
+ "replace_with",
+ "replace_html_entity",
+ "rest_of_line",
+ "sgl_quoted_string",
+ "srange",
+ "string_end",
+ "string_start",
+ "trace_parse_action",
+ "unicode_string",
+ "with_attribute",
+ "indentedBlock",
+ "original_text_for",
+ "ungroup",
+ "infix_notation",
+ "locatedExpr",
+ "with_class",
+ "CloseMatch",
+ "token_map",
+ "pyparsing_common",
+ "pyparsing_unicode",
+ "unicode_set",
+ "condition_as_parse_action",
+ "pyparsing_test",
+ # pre-PEP8 compatibility names
+ "__versionTime__",
+ "anyCloseTag",
+ "anyOpenTag",
+ "cStyleComment",
+ "commonHTMLEntity",
+ "countedArray",
+ "cppStyleComment",
+ "dblQuotedString",
+ "dblSlashComment",
+ "delimitedList",
+ "dictOf",
+ "htmlComment",
+ "javaStyleComment",
+ "lineEnd",
+ "lineStart",
+ "makeHTMLTags",
+ "makeXMLTags",
+ "matchOnlyAtCol",
+ "matchPreviousExpr",
+ "matchPreviousLiteral",
+ "nestedExpr",
+ "nullDebugAction",
+ "oneOf",
+ "opAssoc",
+ "pythonStyleComment",
+ "quotedString",
+ "removeQuotes",
+ "replaceHTMLEntity",
+ "replaceWith",
+ "restOfLine",
+ "sglQuotedString",
+ "stringEnd",
+ "stringStart",
+ "traceParseAction",
+ "unicodeString",
+ "withAttribute",
+ "indentedBlock",
+ "originalTextFor",
+ "infixNotation",
+ "locatedExpr",
+ "withClass",
+ "tokenMap",
+ "conditionAsParseAction",
+ "autoname_elements",
+]
diff --git a/third_party/python/pip/pip/_vendor/pyparsing/actions.py b/third_party/python/pip/pip/_vendor/pyparsing/actions.py
new file mode 100644
index 0000000000..f72c66e743
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pyparsing/actions.py
@@ -0,0 +1,207 @@
+# actions.py
+
+from .exceptions import ParseException
+from .util import col
+
+
+class OnlyOnce:
+ """
+ Wrapper for parse actions, to ensure they are only called once.
+ """
+
+ def __init__(self, method_call):
+ from .core import _trim_arity
+
+ self.callable = _trim_arity(method_call)
+ self.called = False
+
+ def __call__(self, s, l, t):
+ if not self.called:
+ results = self.callable(s, l, t)
+ self.called = True
+ return results
+ raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset")
+
+ def reset(self):
+ """
+ Allow the associated parse action to be called once more.
+ """
+
+ self.called = False
+
+
+def match_only_at_col(n):
+ """
+ Helper method for defining parse actions that require matching at
+ a specific column in the input text.
+ """
+
+ def verify_col(strg, locn, toks):
+ if col(locn, strg) != n:
+ raise ParseException(strg, locn, "matched token not at column {}".format(n))
+
+ return verify_col
+
+
+def replace_with(repl_str):
+ """
+ Helper method for common parse actions that simply return
+ a literal value. Especially useful when used with
+ :class:`transform_string<ParserElement.transform_string>` ().
+
+ Example::
+
+ num = Word(nums).set_parse_action(lambda toks: int(toks[0]))
+ na = one_of("N/A NA").set_parse_action(replace_with(math.nan))
+ term = na | num
+
+ term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234]
+ """
+ return lambda s, l, t: [repl_str]
+
+
+def remove_quotes(s, l, t):
+ """
+ Helper parse action for removing quotation marks from parsed
+ quoted strings.
+
+ Example::
+
+ # by default, quotation marks are included in parsed results
+ quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
+
+ # use remove_quotes to strip quotation marks from parsed results
+ quoted_string.set_parse_action(remove_quotes)
+ quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
+ """
+ return t[0][1:-1]
+
+
+def with_attribute(*args, **attr_dict):
+ """
+ Helper to create a validating parse action to be used with start
+ tags created with :class:`make_xml_tags` or
+ :class:`make_html_tags`. Use ``with_attribute`` to qualify
+ a starting tag with a required attribute value, to avoid false
+ matches on common tags such as ``<TD>`` or ``<DIV>``.
+
+ Call ``with_attribute`` with a series of attribute names and
+ values. Specify the list of filter attributes names and values as:
+
+ - keyword arguments, as in ``(align="right")``, or
+ - as an explicit dict with ``**`` operator, when an attribute
+ name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
+ - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))``
+
+ For attribute names with a namespace prefix, you must use the second
+ form. Attribute names are matched insensitive to upper/lower case.
+
+ If just testing for ``class`` (with or without a namespace), use
+ :class:`with_class`.
+
+ To verify that the attribute exists, but without specifying a value,
+ pass ``with_attribute.ANY_VALUE`` as the value.
+
+ Example::
+
+ html = '''
+ <div>
+ Some text
+ <div type="grid">1 4 0 1 0</div>
+ <div type="graph">1,3 2,3 1,1</div>
+ <div>this has no type</div>
+ </div>
+
+ '''
+ div,div_end = make_html_tags("div")
+
+ # only match div tag having a type attribute with value "grid"
+ div_grid = div().set_parse_action(with_attribute(type="grid"))
+ grid_expr = div_grid + SkipTo(div | div_end)("body")
+ for grid_header in grid_expr.search_string(html):
+ print(grid_header.body)
+
+ # construct a match with any div tag having a type attribute, regardless of the value
+ div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE))
+ div_expr = div_any_type + SkipTo(div | div_end)("body")
+ for div_header in div_expr.search_string(html):
+ print(div_header.body)
+
+ prints::
+
+ 1 4 0 1 0
+
+ 1 4 0 1 0
+ 1,3 2,3 1,1
+ """
+ if args:
+ attrs = args[:]
+ else:
+ attrs = attr_dict.items()
+ attrs = [(k, v) for k, v in attrs]
+
+ def pa(s, l, tokens):
+ for attrName, attrValue in attrs:
+ if attrName not in tokens:
+ raise ParseException(s, l, "no matching attribute " + attrName)
+ if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue:
+ raise ParseException(
+ s,
+ l,
+ "attribute {!r} has value {!r}, must be {!r}".format(
+ attrName, tokens[attrName], attrValue
+ ),
+ )
+
+ return pa
+
+
+with_attribute.ANY_VALUE = object()
+
+
+def with_class(classname, namespace=""):
+ """
+ Simplified version of :class:`with_attribute` when
+ matching on a div class - made difficult because ``class`` is
+ a reserved word in Python.
+
+ Example::
+
+ html = '''
+ <div>
+ Some text
+ <div class="grid">1 4 0 1 0</div>
+ <div class="graph">1,3 2,3 1,1</div>
+ <div>this &lt;div&gt; has no class</div>
+ </div>
+
+ '''
+ div,div_end = make_html_tags("div")
+ div_grid = div().set_parse_action(with_class("grid"))
+
+ grid_expr = div_grid + SkipTo(div | div_end)("body")
+ for grid_header in grid_expr.search_string(html):
+ print(grid_header.body)
+
+ div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE))
+ div_expr = div_any_type + SkipTo(div | div_end)("body")
+ for div_header in div_expr.search_string(html):
+ print(div_header.body)
+
+ prints::
+
+ 1 4 0 1 0
+
+ 1 4 0 1 0
+ 1,3 2,3 1,1
+ """
+ classattr = "{}:class".format(namespace) if namespace else "class"
+ return with_attribute(**{classattr: classname})
+
+
+# pre-PEP8 compatibility symbols
+replaceWith = replace_with
+removeQuotes = remove_quotes
+withAttribute = with_attribute
+withClass = with_class
+matchOnlyAtCol = match_only_at_col
diff --git a/third_party/python/pip/pip/_vendor/pyparsing/common.py b/third_party/python/pip/pip/_vendor/pyparsing/common.py
new file mode 100644
index 0000000000..1859fb79cc
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pyparsing/common.py
@@ -0,0 +1,424 @@
+# common.py
+from .core import *
+from .helpers import delimited_list, any_open_tag, any_close_tag
+from datetime import datetime
+
+
+# some other useful expressions - using lower-case class name since we are really using this as a namespace
+class pyparsing_common:
+ """Here are some common low-level expressions that may be useful in
+ jump-starting parser development:
+
+ - numeric forms (:class:`integers<integer>`, :class:`reals<real>`,
+ :class:`scientific notation<sci_real>`)
+ - common :class:`programming identifiers<identifier>`
+ - network addresses (:class:`MAC<mac_address>`,
+ :class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`)
+ - ISO8601 :class:`dates<iso8601_date>` and
+ :class:`datetime<iso8601_datetime>`
+ - :class:`UUID<uuid>`
+ - :class:`comma-separated list<comma_separated_list>`
+ - :class:`url`
+
+ Parse actions:
+
+ - :class:`convertToInteger`
+ - :class:`convertToFloat`
+ - :class:`convertToDate`
+ - :class:`convertToDatetime`
+ - :class:`stripHTMLTags`
+ - :class:`upcaseTokens`
+ - :class:`downcaseTokens`
+
+ Example::
+
+ pyparsing_common.number.runTests('''
+ # any int or real number, returned as the appropriate type
+ 100
+ -100
+ +100
+ 3.14159
+ 6.02e23
+ 1e-12
+ ''')
+
+ pyparsing_common.fnumber.runTests('''
+ # any int or real number, returned as float
+ 100
+ -100
+ +100
+ 3.14159
+ 6.02e23
+ 1e-12
+ ''')
+
+ pyparsing_common.hex_integer.runTests('''
+ # hex numbers
+ 100
+ FF
+ ''')
+
+ pyparsing_common.fraction.runTests('''
+ # fractions
+ 1/2
+ -3/4
+ ''')
+
+ pyparsing_common.mixed_integer.runTests('''
+ # mixed fractions
+ 1
+ 1/2
+ -3/4
+ 1-3/4
+ ''')
+
+ import uuid
+ pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
+ pyparsing_common.uuid.runTests('''
+ # uuid
+ 12345678-1234-5678-1234-567812345678
+ ''')
+
+ prints::
+
+ # any int or real number, returned as the appropriate type
+ 100
+ [100]
+
+ -100
+ [-100]
+
+ +100
+ [100]
+
+ 3.14159
+ [3.14159]
+
+ 6.02e23
+ [6.02e+23]
+
+ 1e-12
+ [1e-12]
+
+ # any int or real number, returned as float
+ 100
+ [100.0]
+
+ -100
+ [-100.0]
+
+ +100
+ [100.0]
+
+ 3.14159
+ [3.14159]
+
+ 6.02e23
+ [6.02e+23]
+
+ 1e-12
+ [1e-12]
+
+ # hex numbers
+ 100
+ [256]
+
+ FF
+ [255]
+
+ # fractions
+ 1/2
+ [0.5]
+
+ -3/4
+ [-0.75]
+
+ # mixed fractions
+ 1
+ [1]
+
+ 1/2
+ [0.5]
+
+ -3/4
+ [-0.75]
+
+ 1-3/4
+ [1.75]
+
+ # uuid
+ 12345678-1234-5678-1234-567812345678
+ [UUID('12345678-1234-5678-1234-567812345678')]
+ """
+
+ convert_to_integer = token_map(int)
+ """
+ Parse action for converting parsed integers to Python int
+ """
+
+ convert_to_float = token_map(float)
+ """
+ Parse action for converting parsed numbers to Python float
+ """
+
+ integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer)
+ """expression that parses an unsigned integer, returns an int"""
+
+ hex_integer = (
+ Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16))
+ )
+ """expression that parses a hexadecimal integer, returns an int"""
+
+ signed_integer = (
+ Regex(r"[+-]?\d+")
+ .set_name("signed integer")
+ .set_parse_action(convert_to_integer)
+ )
+ """expression that parses an integer with optional leading sign, returns an int"""
+
+ fraction = (
+ signed_integer().set_parse_action(convert_to_float)
+ + "/"
+ + signed_integer().set_parse_action(convert_to_float)
+ ).set_name("fraction")
+ """fractional expression of an integer divided by an integer, returns a float"""
+ fraction.add_parse_action(lambda tt: tt[0] / tt[-1])
+
+ mixed_integer = (
+ fraction | signed_integer + Opt(Opt("-").suppress() + fraction)
+ ).set_name("fraction or mixed integer-fraction")
+ """mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
+ mixed_integer.add_parse_action(sum)
+
+ real = (
+ Regex(r"[+-]?(?:\d+\.\d*|\.\d+)")
+ .set_name("real number")
+ .set_parse_action(convert_to_float)
+ )
+ """expression that parses a floating point number and returns a float"""
+
+ sci_real = (
+ Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)")
+ .set_name("real number with scientific notation")
+ .set_parse_action(convert_to_float)
+ )
+ """expression that parses a floating point number with optional
+ scientific notation and returns a float"""
+
+ # streamlining this expression makes the docs nicer-looking
+ number = (sci_real | real | signed_integer).setName("number").streamline()
+ """any numeric expression, returns the corresponding Python type"""
+
+ fnumber = (
+ Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?")
+ .set_name("fnumber")
+ .set_parse_action(convert_to_float)
+ )
+ """any int or real number, returned as float"""
+
+ identifier = Word(identchars, identbodychars).set_name("identifier")
+ """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
+
+ ipv4_address = Regex(
+ r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}"
+ ).set_name("IPv4 address")
+ "IPv4 address (``0.0.0.0 - 255.255.255.255``)"
+
+ _ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer")
+ _full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name(
+ "full IPv6 address"
+ )
+ _short_ipv6_address = (
+ Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
+ + "::"
+ + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
+ ).set_name("short IPv6 address")
+ _short_ipv6_address.add_condition(
+ lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8
+ )
+ _mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address")
+ ipv6_address = Combine(
+ (_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name(
+ "IPv6 address"
+ )
+ ).set_name("IPv6 address")
+ "IPv6 address (long, short, or mixed form)"
+
+ mac_address = Regex(
+ r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}"
+ ).set_name("MAC address")
+ "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
+
+ @staticmethod
+ def convert_to_date(fmt: str = "%Y-%m-%d"):
+ """
+ Helper to create a parse action for converting parsed date string to Python datetime.date
+
+ Params -
+ - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``)
+
+ Example::
+
+ date_expr = pyparsing_common.iso8601_date.copy()
+ date_expr.setParseAction(pyparsing_common.convertToDate())
+ print(date_expr.parseString("1999-12-31"))
+
+ prints::
+
+ [datetime.date(1999, 12, 31)]
+ """
+
+ def cvt_fn(ss, ll, tt):
+ try:
+ return datetime.strptime(tt[0], fmt).date()
+ except ValueError as ve:
+ raise ParseException(ss, ll, str(ve))
+
+ return cvt_fn
+
+ @staticmethod
+ def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"):
+ """Helper to create a parse action for converting parsed
+ datetime string to Python datetime.datetime
+
+ Params -
+ - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``)
+
+ Example::
+
+ dt_expr = pyparsing_common.iso8601_datetime.copy()
+ dt_expr.setParseAction(pyparsing_common.convertToDatetime())
+ print(dt_expr.parseString("1999-12-31T23:59:59.999"))
+
+ prints::
+
+ [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
+ """
+
+ def cvt_fn(s, l, t):
+ try:
+ return datetime.strptime(t[0], fmt)
+ except ValueError as ve:
+ raise ParseException(s, l, str(ve))
+
+ return cvt_fn
+
+ iso8601_date = Regex(
+ r"(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?"
+ ).set_name("ISO8601 date")
+ "ISO8601 date (``yyyy-mm-dd``)"
+
+ iso8601_datetime = Regex(
+ r"(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?"
+ ).set_name("ISO8601 datetime")
+ "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``"
+
+ uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID")
+ "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)"
+
+ _html_stripper = any_open_tag.suppress() | any_close_tag.suppress()
+
+ @staticmethod
+ def strip_html_tags(s: str, l: int, tokens: ParseResults):
+ """Parse action to remove HTML tags from web page HTML source
+
+ Example::
+
+ # strip HTML links from normal text
+ text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
+ td, td_end = makeHTMLTags("TD")
+ table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
+ print(table_text.parseString(text).body)
+
+ Prints::
+
+ More info at the pyparsing wiki page
+ """
+ return pyparsing_common._html_stripper.transform_string(tokens[0])
+
+ _commasepitem = (
+ Combine(
+ OneOrMore(
+ ~Literal(",")
+ + ~LineEnd()
+ + Word(printables, exclude_chars=",")
+ + Opt(White(" \t") + ~FollowedBy(LineEnd() | ","))
+ )
+ )
+ .streamline()
+ .set_name("commaItem")
+ )
+ comma_separated_list = delimited_list(
+ Opt(quoted_string.copy() | _commasepitem, default="")
+ ).set_name("comma separated list")
+ """Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
+
+ upcase_tokens = staticmethod(token_map(lambda t: t.upper()))
+ """Parse action to convert tokens to upper case."""
+
+ downcase_tokens = staticmethod(token_map(lambda t: t.lower()))
+ """Parse action to convert tokens to lower case."""
+
+ # fmt: off
+ url = Regex(
+ # https://mathiasbynens.be/demo/url-regex
+ # https://gist.github.com/dperini/729294
+ r"^" +
+ # protocol identifier (optional)
+ # short syntax // still required
+ r"(?:(?:(?P<scheme>https?|ftp):)?\/\/)" +
+ # user:pass BasicAuth (optional)
+ r"(?:(?P<auth>\S+(?::\S*)?)@)?" +
+ r"(?P<host>" +
+ # IP address exclusion
+ # private & local networks
+ r"(?!(?:10|127)(?:\.\d{1,3}){3})" +
+ r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" +
+ r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" +
+ # IP address dotted notation octets
+ # excludes loopback network 0.0.0.0
+ # excludes reserved space >= 224.0.0.0
+ # excludes network & broadcast addresses
+ # (first & last IP address of each class)
+ r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" +
+ r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" +
+ r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" +
+ r"|" +
+ # host & domain names, may end with dot
+ # can be replaced by a shortest alternative
+ # (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+
+ r"(?:" +
+ r"(?:" +
+ r"[a-z0-9\u00a1-\uffff]" +
+ r"[a-z0-9\u00a1-\uffff_-]{0,62}" +
+ r")?" +
+ r"[a-z0-9\u00a1-\uffff]\." +
+ r")+" +
+ # TLD identifier name, may end with dot
+ r"(?:[a-z\u00a1-\uffff]{2,}\.?)" +
+ r")" +
+ # port number (optional)
+ r"(:(?P<port>\d{2,5}))?" +
+ # resource path (optional)
+ r"(?P<path>\/[^?# ]*)?" +
+ # query string (optional)
+ r"(\?(?P<query>[^#]*))?" +
+ # fragment (optional)
+ r"(#(?P<fragment>\S*))?" +
+ r"$"
+ ).set_name("url")
+ # fmt: on
+
+ # pre-PEP8 compatibility names
+ convertToInteger = convert_to_integer
+ convertToFloat = convert_to_float
+ convertToDate = convert_to_date
+ convertToDatetime = convert_to_datetime
+ stripHTMLTags = strip_html_tags
+ upcaseTokens = upcase_tokens
+ downcaseTokens = downcase_tokens
+
+
+_builtin_exprs = [
+ v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement)
+]
diff --git a/third_party/python/pip/pip/_vendor/pyparsing/core.py b/third_party/python/pip/pip/_vendor/pyparsing/core.py
new file mode 100644
index 0000000000..6ff3c766f7
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pyparsing/core.py
@@ -0,0 +1,5814 @@
+#
+# core.py
+#
+import os
+import typing
+from typing import (
+ NamedTuple,
+ Union,
+ Callable,
+ Any,
+ Generator,
+ Tuple,
+ List,
+ TextIO,
+ Set,
+ Sequence,
+)
+from abc import ABC, abstractmethod
+from enum import Enum
+import string
+import copy
+import warnings
+import re
+import sys
+from collections.abc import Iterable
+import traceback
+import types
+from operator import itemgetter
+from functools import wraps
+from threading import RLock
+from pathlib import Path
+
+from .util import (
+ _FifoCache,
+ _UnboundedCache,
+ __config_flags,
+ _collapse_string_to_ranges,
+ _escape_regex_range_chars,
+ _bslash,
+ _flatten,
+ LRUMemo as _LRUMemo,
+ UnboundedMemo as _UnboundedMemo,
+)
+from .exceptions import *
+from .actions import *
+from .results import ParseResults, _ParseResultsWithOffset
+from .unicode import pyparsing_unicode
+
+_MAX_INT = sys.maxsize
+str_type: Tuple[type, ...] = (str, bytes)
+
+#
+# Copyright (c) 2003-2022 Paul T. McGuire
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+
+if sys.version_info >= (3, 8):
+ from functools import cached_property
+else:
+
+ class cached_property:
+ def __init__(self, func):
+ self._func = func
+
+ def __get__(self, instance, owner=None):
+ ret = instance.__dict__[self._func.__name__] = self._func(instance)
+ return ret
+
+
+class __compat__(__config_flags):
+ """
+ A cross-version compatibility configuration for pyparsing features that will be
+ released in a future version. By setting values in this configuration to True,
+ those features can be enabled in prior versions for compatibility development
+ and testing.
+
+ - ``collect_all_And_tokens`` - flag to enable fix for Issue #63 that fixes erroneous grouping
+ of results names when an :class:`And` expression is nested within an :class:`Or` or :class:`MatchFirst`;
+ maintained for compatibility, but setting to ``False`` no longer restores pre-2.3.1
+ behavior
+ """
+
+ _type_desc = "compatibility"
+
+ collect_all_And_tokens = True
+
+ _all_names = [__ for __ in locals() if not __.startswith("_")]
+ _fixed_names = """
+ collect_all_And_tokens
+ """.split()
+
+
+class __diag__(__config_flags):
+ _type_desc = "diagnostic"
+
+ warn_multiple_tokens_in_named_alternation = False
+ warn_ungrouped_named_tokens_in_collection = False
+ warn_name_set_on_empty_Forward = False
+ warn_on_parse_using_empty_Forward = False
+ warn_on_assignment_to_Forward = False
+ warn_on_multiple_string_args_to_oneof = False
+ warn_on_match_first_with_lshift_operator = False
+ enable_debug_on_named_expressions = False
+
+ _all_names = [__ for __ in locals() if not __.startswith("_")]
+ _warning_names = [name for name in _all_names if name.startswith("warn")]
+ _debug_names = [name for name in _all_names if name.startswith("enable_debug")]
+
+ @classmethod
+ def enable_all_warnings(cls) -> None:
+ for name in cls._warning_names:
+ cls.enable(name)
+
+
+class Diagnostics(Enum):
+ """
+ Diagnostic configuration (all default to disabled)
+ - ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results
+ name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions
+ - ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results
+ name is defined on a containing expression with ungrouped subexpressions that also
+ have results names
+ - ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined
+ with a results name, but has no contents defined
+ - ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is
+ defined in a grammar but has never had an expression attached to it
+ - ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined
+ but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'``
+ - ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is
+ incorrectly called with multiple str arguments
+ - ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent
+ calls to :class:`ParserElement.set_name`
+
+ Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`.
+ All warnings can be enabled by calling :class:`enable_all_warnings`.
+ """
+
+ warn_multiple_tokens_in_named_alternation = 0
+ warn_ungrouped_named_tokens_in_collection = 1
+ warn_name_set_on_empty_Forward = 2
+ warn_on_parse_using_empty_Forward = 3
+ warn_on_assignment_to_Forward = 4
+ warn_on_multiple_string_args_to_oneof = 5
+ warn_on_match_first_with_lshift_operator = 6
+ enable_debug_on_named_expressions = 7
+
+
+def enable_diag(diag_enum: Diagnostics) -> None:
+ """
+ Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`).
+ """
+ __diag__.enable(diag_enum.name)
+
+
+def disable_diag(diag_enum: Diagnostics) -> None:
+ """
+ Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`).
+ """
+ __diag__.disable(diag_enum.name)
+
+
+def enable_all_warnings() -> None:
+ """
+ Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`).
+ """
+ __diag__.enable_all_warnings()
+
+
+# hide abstract class
+del __config_flags
+
+
+def _should_enable_warnings(
+ cmd_line_warn_options: typing.Iterable[str], warn_env_var: typing.Optional[str]
+) -> bool:
+ enable = bool(warn_env_var)
+ for warn_opt in cmd_line_warn_options:
+ w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split(
+ ":"
+ )[:5]
+ if not w_action.lower().startswith("i") and (
+ not (w_message or w_category or w_module) or w_module == "pyparsing"
+ ):
+ enable = True
+ elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""):
+ enable = False
+ return enable
+
+
+if _should_enable_warnings(
+ sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS")
+):
+ enable_all_warnings()
+
+
+# build list of single arg builtins, that can be used as parse actions
+_single_arg_builtins = {
+ sum,
+ len,
+ sorted,
+ reversed,
+ list,
+ tuple,
+ set,
+ any,
+ all,
+ min,
+ max,
+}
+
+_generatorType = types.GeneratorType
+ParseAction = Union[
+ Callable[[], Any],
+ Callable[[ParseResults], Any],
+ Callable[[int, ParseResults], Any],
+ Callable[[str, int, ParseResults], Any],
+]
+ParseCondition = Union[
+ Callable[[], bool],
+ Callable[[ParseResults], bool],
+ Callable[[int, ParseResults], bool],
+ Callable[[str, int, ParseResults], bool],
+]
+ParseFailAction = Callable[[str, int, "ParserElement", Exception], None]
+DebugStartAction = Callable[[str, int, "ParserElement", bool], None]
+DebugSuccessAction = Callable[
+ [str, int, int, "ParserElement", ParseResults, bool], None
+]
+DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None]
+
+
+alphas = string.ascii_uppercase + string.ascii_lowercase
+identchars = pyparsing_unicode.Latin1.identchars
+identbodychars = pyparsing_unicode.Latin1.identbodychars
+nums = "0123456789"
+hexnums = nums + "ABCDEFabcdef"
+alphanums = alphas + nums
+printables = "".join([c for c in string.printable if c not in string.whitespace])
+
+_trim_arity_call_line: traceback.StackSummary = None
+
+
+def _trim_arity(func, max_limit=3):
+ """decorator to trim function calls to match the arity of the target"""
+ global _trim_arity_call_line
+
+ if func in _single_arg_builtins:
+ return lambda s, l, t: func(t)
+
+ limit = 0
+ found_arity = False
+
+ def extract_tb(tb, limit=0):
+ frames = traceback.extract_tb(tb, limit=limit)
+ frame_summary = frames[-1]
+ return [frame_summary[:2]]
+
+ # synthesize what would be returned by traceback.extract_stack at the call to
+ # user's parse action 'func', so that we don't incur call penalty at parse time
+
+ # fmt: off
+ LINE_DIFF = 7
+ # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
+ # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
+ _trim_arity_call_line = (_trim_arity_call_line or traceback.extract_stack(limit=2)[-1])
+ pa_call_line_synth = (_trim_arity_call_line[0], _trim_arity_call_line[1] + LINE_DIFF)
+
+ def wrapper(*args):
+ nonlocal found_arity, limit
+ while 1:
+ try:
+ ret = func(*args[limit:])
+ found_arity = True
+ return ret
+ except TypeError as te:
+ # re-raise TypeErrors if they did not come from our arity testing
+ if found_arity:
+ raise
+ else:
+ tb = te.__traceback__
+ trim_arity_type_error = (
+ extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth
+ )
+ del tb
+
+ if trim_arity_type_error:
+ if limit < max_limit:
+ limit += 1
+ continue
+
+ raise
+ # fmt: on
+
+ # copy func name to wrapper for sensible debug output
+ # (can't use functools.wraps, since that messes with function signature)
+ func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
+ wrapper.__name__ = func_name
+ wrapper.__doc__ = func.__doc__
+
+ return wrapper
+
+
+def condition_as_parse_action(
+ fn: ParseCondition, message: str = None, fatal: bool = False
+) -> ParseAction:
+ """
+ Function to convert a simple predicate function that returns ``True`` or ``False``
+ into a parse action. Can be used in places when a parse action is required
+ and :class:`ParserElement.add_condition` cannot be used (such as when adding a condition
+ to an operator level in :class:`infix_notation`).
+
+ Optional keyword arguments:
+
+ - ``message`` - define a custom message to be used in the raised exception
+ - ``fatal`` - if True, will raise :class:`ParseFatalException` to stop parsing immediately;
+ otherwise will raise :class:`ParseException`
+
+ """
+ msg = message if message is not None else "failed user-defined condition"
+ exc_type = ParseFatalException if fatal else ParseException
+ fn = _trim_arity(fn)
+
+ @wraps(fn)
+ def pa(s, l, t):
+ if not bool(fn(s, l, t)):
+ raise exc_type(s, l, msg)
+
+ return pa
+
+
+def _default_start_debug_action(
+ instring: str, loc: int, expr: "ParserElement", cache_hit: bool = False
+):
+ cache_hit_str = "*" if cache_hit else ""
+ print(
+ (
+ "{}Match {} at loc {}({},{})\n {}\n {}^".format(
+ cache_hit_str,
+ expr,
+ loc,
+ lineno(loc, instring),
+ col(loc, instring),
+ line(loc, instring),
+ " " * (col(loc, instring) - 1),
+ )
+ )
+ )
+
+
+def _default_success_debug_action(
+ instring: str,
+ startloc: int,
+ endloc: int,
+ expr: "ParserElement",
+ toks: ParseResults,
+ cache_hit: bool = False,
+):
+ cache_hit_str = "*" if cache_hit else ""
+ print("{}Matched {} -> {}".format(cache_hit_str, expr, toks.as_list()))
+
+
+def _default_exception_debug_action(
+ instring: str,
+ loc: int,
+ expr: "ParserElement",
+ exc: Exception,
+ cache_hit: bool = False,
+):
+ cache_hit_str = "*" if cache_hit else ""
+ print(
+ "{}Match {} failed, {} raised: {}".format(
+ cache_hit_str, expr, type(exc).__name__, exc
+ )
+ )
+
+
+def null_debug_action(*args):
+ """'Do-nothing' debug action, to suppress debugging output during parsing."""
+
+
+class ParserElement(ABC):
+ """Abstract base level parser element class."""
+
+ DEFAULT_WHITE_CHARS: str = " \n\t\r"
+ verbose_stacktrace: bool = False
+ _literalStringClass: typing.Optional[type] = None
+
+ @staticmethod
+ def set_default_whitespace_chars(chars: str) -> None:
+ r"""
+ Overrides the default whitespace chars
+
+ Example::
+
+ # default whitespace chars are space, <TAB> and newline
+ Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
+
+ # change to just treat newline as significant
+ ParserElement.set_default_whitespace_chars(" \t")
+ Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def']
+ """
+ ParserElement.DEFAULT_WHITE_CHARS = chars
+
+ # update whitespace all parse expressions defined in this module
+ for expr in _builtin_exprs:
+ if expr.copyDefaultWhiteChars:
+ expr.whiteChars = set(chars)
+
+ @staticmethod
+ def inline_literals_using(cls: type) -> None:
+ """
+ Set class to be used for inclusion of string literals into a parser.
+
+ Example::
+
+ # default literal class used is Literal
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ date_str.parse_string("1999/12/31") # -> ['1999', '/', '12', '/', '31']
+
+
+ # change to Suppress
+ ParserElement.inline_literals_using(Suppress)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ date_str.parse_string("1999/12/31") # -> ['1999', '12', '31']
+ """
+ ParserElement._literalStringClass = cls
+
+ class DebugActions(NamedTuple):
+ debug_try: typing.Optional[DebugStartAction]
+ debug_match: typing.Optional[DebugSuccessAction]
+ debug_fail: typing.Optional[DebugExceptionAction]
+
+ def __init__(self, savelist: bool = False):
+ self.parseAction: List[ParseAction] = list()
+ self.failAction: typing.Optional[ParseFailAction] = None
+ self.customName = None
+ self._defaultName = None
+ self.resultsName = None
+ self.saveAsList = savelist
+ self.skipWhitespace = True
+ self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
+ self.copyDefaultWhiteChars = True
+ # used when checking for left-recursion
+ self.mayReturnEmpty = False
+ self.keepTabs = False
+ self.ignoreExprs: List["ParserElement"] = list()
+ self.debug = False
+ self.streamlined = False
+ # optimize exception handling for subclasses that don't advance parse index
+ self.mayIndexError = True
+ self.errmsg = ""
+ # mark results names as modal (report only last) or cumulative (list all)
+ self.modalResults = True
+ # custom debug actions
+ self.debugActions = self.DebugActions(None, None, None)
+ # avoid redundant calls to preParse
+ self.callPreparse = True
+ self.callDuringTry = False
+ self.suppress_warnings_: List[Diagnostics] = []
+
+ def suppress_warning(self, warning_type: Diagnostics) -> "ParserElement":
+ """
+ Suppress warnings emitted for a particular diagnostic on this expression.
+
+ Example::
+
+ base = pp.Forward()
+ base.suppress_warning(Diagnostics.warn_on_parse_using_empty_Forward)
+
+ # statement would normally raise a warning, but is now suppressed
+ print(base.parseString("x"))
+
+ """
+ self.suppress_warnings_.append(warning_type)
+ return self
+
+ def copy(self) -> "ParserElement":
+ """
+ Make a copy of this :class:`ParserElement`. Useful for defining
+ different parse actions for the same parsing pattern, using copies of
+ the original parse element.
+
+ Example::
+
+ integer = Word(nums).set_parse_action(lambda toks: int(toks[0]))
+ integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K")
+ integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
+
+ print((integerK | integerM | integer)[1, ...].parse_string("5K 100 640K 256M"))
+
+ prints::
+
+ [5120, 100, 655360, 268435456]
+
+ Equivalent form of ``expr.copy()`` is just ``expr()``::
+
+ integerM = integer().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
+ """
+ cpy = copy.copy(self)
+ cpy.parseAction = self.parseAction[:]
+ cpy.ignoreExprs = self.ignoreExprs[:]
+ if self.copyDefaultWhiteChars:
+ cpy.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
+ return cpy
+
+ def set_results_name(
+ self, name: str, list_all_matches: bool = False, *, listAllMatches: bool = False
+ ) -> "ParserElement":
+ """
+ Define name for referencing matching tokens as a nested attribute
+ of the returned parse results.
+
+ Normally, results names are assigned as you would assign keys in a dict:
+ any existing value is overwritten by later values. If it is necessary to
+ keep all values captured for a particular results name, call ``set_results_name``
+ with ``list_all_matches`` = True.
+
+ NOTE: ``set_results_name`` returns a *copy* of the original :class:`ParserElement` object;
+ this is so that the client can define a basic element, such as an
+ integer, and reference it in multiple places with different names.
+
+ You can also set results names using the abbreviated syntax,
+ ``expr("name")`` in place of ``expr.set_results_name("name")``
+ - see :class:`__call__`. If ``list_all_matches`` is required, use
+ ``expr("name*")``.
+
+ Example::
+
+ date_str = (integer.set_results_name("year") + '/'
+ + integer.set_results_name("month") + '/'
+ + integer.set_results_name("day"))
+
+ # equivalent form:
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+ """
+ listAllMatches = listAllMatches or list_all_matches
+ return self._setResultsName(name, listAllMatches)
+
+ def _setResultsName(self, name, listAllMatches=False):
+ if name is None:
+ return self
+ newself = self.copy()
+ if name.endswith("*"):
+ name = name[:-1]
+ listAllMatches = True
+ newself.resultsName = name
+ newself.modalResults = not listAllMatches
+ return newself
+
+ def set_break(self, break_flag: bool = True) -> "ParserElement":
+ """
+ Method to invoke the Python pdb debugger when this element is
+ about to be parsed. Set ``break_flag`` to ``True`` to enable, ``False`` to
+ disable.
+ """
+ if break_flag:
+ _parseMethod = self._parse
+
+ def breaker(instring, loc, doActions=True, callPreParse=True):
+ import pdb
+
+ # this call to pdb.set_trace() is intentional, not a checkin error
+ pdb.set_trace()
+ return _parseMethod(instring, loc, doActions, callPreParse)
+
+ breaker._originalParseMethod = _parseMethod
+ self._parse = breaker
+ else:
+ if hasattr(self._parse, "_originalParseMethod"):
+ self._parse = self._parse._originalParseMethod
+ return self
+
+ def set_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement":
+ """
+ Define one or more actions to perform when successfully matching parse element definition.
+
+ Parse actions can be called to perform data conversions, do extra validation,
+ update external data structures, or enhance or replace the parsed tokens.
+ Each parse action ``fn`` is a callable method with 0-3 arguments, called as
+ ``fn(s, loc, toks)`` , ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where:
+
+ - s = the original string being parsed (see note below)
+ - loc = the location of the matching substring
+ - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object
+
+ The parsed tokens are passed to the parse action as ParseResults. They can be
+ modified in place using list-style append, extend, and pop operations to update
+ the parsed list elements; and with dictionary-style item set and del operations
+ to add, update, or remove any named results. If the tokens are modified in place,
+ it is not necessary to return them with a return statement.
+
+ Parse actions can also completely replace the given tokens, with another ``ParseResults``
+ object, or with some entirely different object (common for parse actions that perform data
+ conversions). A convenient way to build a new parse result is to define the values
+ using a dict, and then create the return value using :class:`ParseResults.from_dict`.
+
+ If None is passed as the ``fn`` parse action, all previously added parse actions for this
+ expression are cleared.
+
+ Optional keyword arguments:
+
+ - call_during_try = (default= ``False``) indicate if parse action should be run during
+ lookaheads and alternate testing. For parse actions that have side effects, it is
+ important to only call the parse action once it is determined that it is being
+ called as part of a successful parse. For parse actions that perform additional
+ validation, then call_during_try should be passed as True, so that the validation
+ code is included in the preliminary "try" parses.
+
+ Note: the default parsing behavior is to expand tabs in the input string
+ before starting the parsing process. See :class:`parse_string` for more
+ information on parsing strings containing ``<TAB>`` s, and suggested
+ methods to maintain a consistent view of the parsed string, the parse
+ location, and line and column positions within the parsed string.
+
+ Example::
+
+ # parse dates in the form YYYY/MM/DD
+
+ # use parse action to convert toks from str to int at parse time
+ def convert_to_int(toks):
+ return int(toks[0])
+
+ # use a parse action to verify that the date is a valid date
+ def is_valid_date(instring, loc, toks):
+ from datetime import date
+ year, month, day = toks[::2]
+ try:
+ date(year, month, day)
+ except ValueError:
+ raise ParseException(instring, loc, "invalid date given")
+
+ integer = Word(nums)
+ date_str = integer + '/' + integer + '/' + integer
+
+ # add parse actions
+ integer.set_parse_action(convert_to_int)
+ date_str.set_parse_action(is_valid_date)
+
+ # note that integer fields are now ints, not strings
+ date_str.run_tests('''
+ # successful parse - note that integer fields were converted to ints
+ 1999/12/31
+
+ # fail - invalid date
+ 1999/13/31
+ ''')
+ """
+ if list(fns) == [None]:
+ self.parseAction = []
+ else:
+ if not all(callable(fn) for fn in fns):
+ raise TypeError("parse actions must be callable")
+ self.parseAction = [_trim_arity(fn) for fn in fns]
+ self.callDuringTry = kwargs.get(
+ "call_during_try", kwargs.get("callDuringTry", False)
+ )
+ return self
+
+ def add_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement":
+ """
+ Add one or more parse actions to expression's list of parse actions. See :class:`set_parse_action`.
+
+ See examples in :class:`copy`.
+ """
+ self.parseAction += [_trim_arity(fn) for fn in fns]
+ self.callDuringTry = self.callDuringTry or kwargs.get(
+ "call_during_try", kwargs.get("callDuringTry", False)
+ )
+ return self
+
+ def add_condition(self, *fns: ParseCondition, **kwargs) -> "ParserElement":
+ """Add a boolean predicate function to expression's list of parse actions. See
+ :class:`set_parse_action` for function call signatures. Unlike ``set_parse_action``,
+ functions passed to ``add_condition`` need to return boolean success/fail of the condition.
+
+ Optional keyword arguments:
+
+ - message = define a custom message to be used in the raised exception
+ - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise
+ ParseException
+ - call_during_try = boolean to indicate if this method should be called during internal tryParse calls,
+ default=False
+
+ Example::
+
+ integer = Word(nums).set_parse_action(lambda toks: int(toks[0]))
+ year_int = integer.copy()
+ year_int.add_condition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
+ date_str = year_int + '/' + integer + '/' + integer
+
+ result = date_str.parse_string("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0),
+ (line:1, col:1)
+ """
+ for fn in fns:
+ self.parseAction.append(
+ condition_as_parse_action(
+ fn, message=kwargs.get("message"), fatal=kwargs.get("fatal", False)
+ )
+ )
+
+ self.callDuringTry = self.callDuringTry or kwargs.get(
+ "call_during_try", kwargs.get("callDuringTry", False)
+ )
+ return self
+
+ def set_fail_action(self, fn: ParseFailAction) -> "ParserElement":
+ """
+ Define action to perform if parsing fails at this expression.
+ Fail acton fn is a callable function that takes the arguments
+ ``fn(s, loc, expr, err)`` where:
+
+ - s = string being parsed
+ - loc = location where expression match was attempted and failed
+ - expr = the parse expression that failed
+ - err = the exception thrown
+
+ The function returns no value. It may throw :class:`ParseFatalException`
+ if it is desired to stop parsing immediately."""
+ self.failAction = fn
+ return self
+
+ def _skipIgnorables(self, instring, loc):
+ exprsFound = True
+ while exprsFound:
+ exprsFound = False
+ for e in self.ignoreExprs:
+ try:
+ while 1:
+ loc, dummy = e._parse(instring, loc)
+ exprsFound = True
+ except ParseException:
+ pass
+ return loc
+
+ def preParse(self, instring, loc):
+ if self.ignoreExprs:
+ loc = self._skipIgnorables(instring, loc)
+
+ if self.skipWhitespace:
+ instrlen = len(instring)
+ white_chars = self.whiteChars
+ while loc < instrlen and instring[loc] in white_chars:
+ loc += 1
+
+ return loc
+
+ def parseImpl(self, instring, loc, doActions=True):
+ return loc, []
+
+ def postParse(self, instring, loc, tokenlist):
+ return tokenlist
+
+ # @profile
+ def _parseNoCache(
+ self, instring, loc, doActions=True, callPreParse=True
+ ) -> Tuple[int, ParseResults]:
+ TRY, MATCH, FAIL = 0, 1, 2
+ debugging = self.debug # and doActions)
+ len_instring = len(instring)
+
+ if debugging or self.failAction:
+ # print("Match {} at loc {}({}, {})".format(self, loc, lineno(loc, instring), col(loc, instring)))
+ try:
+ if callPreParse and self.callPreparse:
+ pre_loc = self.preParse(instring, loc)
+ else:
+ pre_loc = loc
+ tokens_start = pre_loc
+ if self.debugActions.debug_try:
+ self.debugActions.debug_try(instring, tokens_start, self, False)
+ if self.mayIndexError or pre_loc >= len_instring:
+ try:
+ loc, tokens = self.parseImpl(instring, pre_loc, doActions)
+ except IndexError:
+ raise ParseException(instring, len_instring, self.errmsg, self)
+ else:
+ loc, tokens = self.parseImpl(instring, pre_loc, doActions)
+ except Exception as err:
+ # print("Exception raised:", err)
+ if self.debugActions.debug_fail:
+ self.debugActions.debug_fail(
+ instring, tokens_start, self, err, False
+ )
+ if self.failAction:
+ self.failAction(instring, tokens_start, self, err)
+ raise
+ else:
+ if callPreParse and self.callPreparse:
+ pre_loc = self.preParse(instring, loc)
+ else:
+ pre_loc = loc
+ tokens_start = pre_loc
+ if self.mayIndexError or pre_loc >= len_instring:
+ try:
+ loc, tokens = self.parseImpl(instring, pre_loc, doActions)
+ except IndexError:
+ raise ParseException(instring, len_instring, self.errmsg, self)
+ else:
+ loc, tokens = self.parseImpl(instring, pre_loc, doActions)
+
+ tokens = self.postParse(instring, loc, tokens)
+
+ ret_tokens = ParseResults(
+ tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults
+ )
+ if self.parseAction and (doActions or self.callDuringTry):
+ if debugging:
+ try:
+ for fn in self.parseAction:
+ try:
+ tokens = fn(instring, tokens_start, ret_tokens)
+ except IndexError as parse_action_exc:
+ exc = ParseException("exception raised in parse action")
+ raise exc from parse_action_exc
+
+ if tokens is not None and tokens is not ret_tokens:
+ ret_tokens = ParseResults(
+ tokens,
+ self.resultsName,
+ asList=self.saveAsList
+ and isinstance(tokens, (ParseResults, list)),
+ modal=self.modalResults,
+ )
+ except Exception as err:
+ # print "Exception raised in user parse action:", err
+ if self.debugActions.debug_fail:
+ self.debugActions.debug_fail(
+ instring, tokens_start, self, err, False
+ )
+ raise
+ else:
+ for fn in self.parseAction:
+ try:
+ tokens = fn(instring, tokens_start, ret_tokens)
+ except IndexError as parse_action_exc:
+ exc = ParseException("exception raised in parse action")
+ raise exc from parse_action_exc
+
+ if tokens is not None and tokens is not ret_tokens:
+ ret_tokens = ParseResults(
+ tokens,
+ self.resultsName,
+ asList=self.saveAsList
+ and isinstance(tokens, (ParseResults, list)),
+ modal=self.modalResults,
+ )
+ if debugging:
+ # print("Matched", self, "->", ret_tokens.as_list())
+ if self.debugActions.debug_match:
+ self.debugActions.debug_match(
+ instring, tokens_start, loc, self, ret_tokens, False
+ )
+
+ return loc, ret_tokens
+
+ def try_parse(self, instring: str, loc: int, raise_fatal: bool = False) -> int:
+ try:
+ return self._parse(instring, loc, doActions=False)[0]
+ except ParseFatalException:
+ if raise_fatal:
+ raise
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ def can_parse_next(self, instring: str, loc: int) -> bool:
+ try:
+ self.try_parse(instring, loc)
+ except (ParseException, IndexError):
+ return False
+ else:
+ return True
+
+ # cache for left-recursion in Forward references
+ recursion_lock = RLock()
+ recursion_memos: typing.Dict[
+ Tuple[int, "Forward", bool], Tuple[int, Union[ParseResults, Exception]]
+ ] = {}
+
+ # argument cache for optimizing repeated calls when backtracking through recursive expressions
+ packrat_cache = (
+ {}
+ ) # this is set later by enabled_packrat(); this is here so that reset_cache() doesn't fail
+ packrat_cache_lock = RLock()
+ packrat_cache_stats = [0, 0]
+
+ # this method gets repeatedly called during backtracking with the same arguments -
+ # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
+ def _parseCache(
+ self, instring, loc, doActions=True, callPreParse=True
+ ) -> Tuple[int, ParseResults]:
+ HIT, MISS = 0, 1
+ TRY, MATCH, FAIL = 0, 1, 2
+ lookup = (self, instring, loc, callPreParse, doActions)
+ with ParserElement.packrat_cache_lock:
+ cache = ParserElement.packrat_cache
+ value = cache.get(lookup)
+ if value is cache.not_in_cache:
+ ParserElement.packrat_cache_stats[MISS] += 1
+ try:
+ value = self._parseNoCache(instring, loc, doActions, callPreParse)
+ except ParseBaseException as pe:
+ # cache a copy of the exception, without the traceback
+ cache.set(lookup, pe.__class__(*pe.args))
+ raise
+ else:
+ cache.set(lookup, (value[0], value[1].copy(), loc))
+ return value
+ else:
+ ParserElement.packrat_cache_stats[HIT] += 1
+ if self.debug and self.debugActions.debug_try:
+ try:
+ self.debugActions.debug_try(instring, loc, self, cache_hit=True)
+ except TypeError:
+ pass
+ if isinstance(value, Exception):
+ if self.debug and self.debugActions.debug_fail:
+ try:
+ self.debugActions.debug_fail(
+ instring, loc, self, value, cache_hit=True
+ )
+ except TypeError:
+ pass
+ raise value
+
+ loc_, result, endloc = value[0], value[1].copy(), value[2]
+ if self.debug and self.debugActions.debug_match:
+ try:
+ self.debugActions.debug_match(
+ instring, loc_, endloc, self, result, cache_hit=True
+ )
+ except TypeError:
+ pass
+
+ return loc_, result
+
+ _parse = _parseNoCache
+
+ @staticmethod
+ def reset_cache() -> None:
+ ParserElement.packrat_cache.clear()
+ ParserElement.packrat_cache_stats[:] = [0] * len(
+ ParserElement.packrat_cache_stats
+ )
+ ParserElement.recursion_memos.clear()
+
+ _packratEnabled = False
+ _left_recursion_enabled = False
+
+ @staticmethod
+ def disable_memoization() -> None:
+ """
+ Disables active Packrat or Left Recursion parsing and their memoization
+
+ This method also works if neither Packrat nor Left Recursion are enabled.
+ This makes it safe to call before activating Packrat nor Left Recursion
+ to clear any previous settings.
+ """
+ ParserElement.reset_cache()
+ ParserElement._left_recursion_enabled = False
+ ParserElement._packratEnabled = False
+ ParserElement._parse = ParserElement._parseNoCache
+
+ @staticmethod
+ def enable_left_recursion(
+ cache_size_limit: typing.Optional[int] = None, *, force=False
+ ) -> None:
+ """
+ Enables "bounded recursion" parsing, which allows for both direct and indirect
+ left-recursion. During parsing, left-recursive :class:`Forward` elements are
+ repeatedly matched with a fixed recursion depth that is gradually increased
+ until finding the longest match.
+
+ Example::
+
+ from pip._vendor import pyparsing as pp
+ pp.ParserElement.enable_left_recursion()
+
+ E = pp.Forward("E")
+ num = pp.Word(pp.nums)
+ # match `num`, or `num '+' num`, or `num '+' num '+' num`, ...
+ E <<= E + '+' - num | num
+
+ print(E.parse_string("1+2+3"))
+
+ Recursion search naturally memoizes matches of ``Forward`` elements and may
+ thus skip reevaluation of parse actions during backtracking. This may break
+ programs with parse actions which rely on strict ordering of side-effects.
+
+ Parameters:
+
+ - cache_size_limit - (default=``None``) - memoize at most this many
+ ``Forward`` elements during matching; if ``None`` (the default),
+ memoize all ``Forward`` elements.
+
+ Bounded Recursion parsing works similar but not identical to Packrat parsing,
+ thus the two cannot be used together. Use ``force=True`` to disable any
+ previous, conflicting settings.
+ """
+ if force:
+ ParserElement.disable_memoization()
+ elif ParserElement._packratEnabled:
+ raise RuntimeError("Packrat and Bounded Recursion are not compatible")
+ if cache_size_limit is None:
+ ParserElement.recursion_memos = _UnboundedMemo()
+ elif cache_size_limit > 0:
+ ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit)
+ else:
+ raise NotImplementedError("Memo size of %s" % cache_size_limit)
+ ParserElement._left_recursion_enabled = True
+
+ @staticmethod
+ def enable_packrat(cache_size_limit: int = 128, *, force: bool = False) -> None:
+ """
+ Enables "packrat" parsing, which adds memoizing to the parsing logic.
+ Repeated parse attempts at the same string location (which happens
+ often in many complex grammars) can immediately return a cached value,
+ instead of re-executing parsing/validating code. Memoizing is done of
+ both valid results and parsing exceptions.
+
+ Parameters:
+
+ - cache_size_limit - (default= ``128``) - if an integer value is provided
+ will limit the size of the packrat cache; if None is passed, then
+ the cache size will be unbounded; if 0 is passed, the cache will
+ be effectively disabled.
+
+ This speedup may break existing programs that use parse actions that
+ have side-effects. For this reason, packrat parsing is disabled when
+ you first import pyparsing. To activate the packrat feature, your
+ program must call the class method :class:`ParserElement.enable_packrat`.
+ For best results, call ``enable_packrat()`` immediately after
+ importing pyparsing.
+
+ Example::
+
+ from pip._vendor import pyparsing
+ pyparsing.ParserElement.enable_packrat()
+
+ Packrat parsing works similar but not identical to Bounded Recursion parsing,
+ thus the two cannot be used together. Use ``force=True`` to disable any
+ previous, conflicting settings.
+ """
+ if force:
+ ParserElement.disable_memoization()
+ elif ParserElement._left_recursion_enabled:
+ raise RuntimeError("Packrat and Bounded Recursion are not compatible")
+ if not ParserElement._packratEnabled:
+ ParserElement._packratEnabled = True
+ if cache_size_limit is None:
+ ParserElement.packrat_cache = _UnboundedCache()
+ else:
+ ParserElement.packrat_cache = _FifoCache(cache_size_limit)
+ ParserElement._parse = ParserElement._parseCache
+
+ def parse_string(
+ self, instring: str, parse_all: bool = False, *, parseAll: bool = False
+ ) -> ParseResults:
+ """
+ Parse a string with respect to the parser definition. This function is intended as the primary interface to the
+ client code.
+
+ :param instring: The input string to be parsed.
+ :param parse_all: If set, the entire input string must match the grammar.
+ :param parseAll: retained for pre-PEP8 compatibility, will be removed in a future release.
+ :raises ParseException: Raised if ``parse_all`` is set and the input string does not match the whole grammar.
+ :returns: the parsed data as a :class:`ParseResults` object, which may be accessed as a `list`, a `dict`, or
+ an object with attributes if the given parser includes results names.
+
+ If the input string is required to match the entire grammar, ``parse_all`` flag must be set to ``True``. This
+ is also equivalent to ending the grammar with :class:`StringEnd`().
+
+ To report proper column numbers, ``parse_string`` operates on a copy of the input string where all tabs are
+ converted to spaces (8 spaces per tab, as per the default in ``string.expandtabs``). If the input string
+ contains tabs and the grammar uses parse actions that use the ``loc`` argument to index into the string
+ being parsed, one can ensure a consistent view of the input string by doing one of the following:
+
+ - calling ``parse_with_tabs`` on your grammar before calling ``parse_string`` (see :class:`parse_with_tabs`),
+ - define your parse action using the full ``(s,loc,toks)`` signature, and reference the input string using the
+ parse action's ``s`` argument, or
+ - explicitly expand the tabs in your input string before calling ``parse_string``.
+
+ Examples:
+
+ By default, partial matches are OK.
+
+ >>> res = Word('a').parse_string('aaaaabaaa')
+ >>> print(res)
+ ['aaaaa']
+
+ The parsing behavior varies by the inheriting class of this abstract class. Please refer to the children
+ directly to see more examples.
+
+ It raises an exception if parse_all flag is set and instring does not match the whole grammar.
+
+ >>> res = Word('a').parse_string('aaaaabaaa', parse_all=True)
+ Traceback (most recent call last):
+ ...
+ pyparsing.ParseException: Expected end of text, found 'b' (at char 5), (line:1, col:6)
+ """
+ parseAll = parse_all or parseAll
+
+ ParserElement.reset_cache()
+ if not self.streamlined:
+ self.streamline()
+ for e in self.ignoreExprs:
+ e.streamline()
+ if not self.keepTabs:
+ instring = instring.expandtabs()
+ try:
+ loc, tokens = self._parse(instring, 0)
+ if parseAll:
+ loc = self.preParse(instring, loc)
+ se = Empty() + StringEnd()
+ se._parse(instring, loc)
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clearing out pyparsing internal stack trace
+ raise exc.with_traceback(None)
+ else:
+ return tokens
+
+ def scan_string(
+ self,
+ instring: str,
+ max_matches: int = _MAX_INT,
+ overlap: bool = False,
+ *,
+ debug: bool = False,
+ maxMatches: int = _MAX_INT,
+ ) -> Generator[Tuple[ParseResults, int, int], None, None]:
+ """
+ Scan the input string for expression matches. Each match will return the
+ matching tokens, start location, and end location. May be called with optional
+ ``max_matches`` argument, to clip scanning after 'n' matches are found. If
+ ``overlap`` is specified, then overlapping matches will be reported.
+
+ Note that the start and end locations are reported relative to the string
+ being parsed. See :class:`parse_string` for more information on parsing
+ strings with embedded tabs.
+
+ Example::
+
+ source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
+ print(source)
+ for tokens, start, end in Word(alphas).scan_string(source):
+ print(' '*start + '^'*(end-start))
+ print(' '*start + tokens[0])
+
+ prints::
+
+ sldjf123lsdjjkf345sldkjf879lkjsfd987
+ ^^^^^
+ sldjf
+ ^^^^^^^
+ lsdjjkf
+ ^^^^^^
+ sldkjf
+ ^^^^^^
+ lkjsfd
+ """
+ maxMatches = min(maxMatches, max_matches)
+ if not self.streamlined:
+ self.streamline()
+ for e in self.ignoreExprs:
+ e.streamline()
+
+ if not self.keepTabs:
+ instring = str(instring).expandtabs()
+ instrlen = len(instring)
+ loc = 0
+ preparseFn = self.preParse
+ parseFn = self._parse
+ ParserElement.resetCache()
+ matches = 0
+ try:
+ while loc <= instrlen and matches < maxMatches:
+ try:
+ preloc = preparseFn(instring, loc)
+ nextLoc, tokens = parseFn(instring, preloc, callPreParse=False)
+ except ParseException:
+ loc = preloc + 1
+ else:
+ if nextLoc > loc:
+ matches += 1
+ if debug:
+ print(
+ {
+ "tokens": tokens.asList(),
+ "start": preloc,
+ "end": nextLoc,
+ }
+ )
+ yield tokens, preloc, nextLoc
+ if overlap:
+ nextloc = preparseFn(instring, loc)
+ if nextloc > loc:
+ loc = nextLoc
+ else:
+ loc += 1
+ else:
+ loc = nextLoc
+ else:
+ loc = preloc + 1
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc.with_traceback(None)
+
+ def transform_string(self, instring: str, *, debug: bool = False) -> str:
+ """
+ Extension to :class:`scan_string`, to modify matching text with modified tokens that may
+ be returned from a parse action. To use ``transform_string``, define a grammar and
+ attach a parse action to it that modifies the returned token list.
+ Invoking ``transform_string()`` on a target string will then scan for matches,
+ and replace the matched text patterns according to the logic in the parse
+ action. ``transform_string()`` returns the resulting transformed string.
+
+ Example::
+
+ wd = Word(alphas)
+ wd.set_parse_action(lambda toks: toks[0].title())
+
+ print(wd.transform_string("now is the winter of our discontent made glorious summer by this sun of york."))
+
+ prints::
+
+ Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
+ """
+ out: List[str] = []
+ lastE = 0
+ # force preservation of <TAB>s, to minimize unwanted transformation of string, and to
+ # keep string locs straight between transform_string and scan_string
+ self.keepTabs = True
+ try:
+ for t, s, e in self.scan_string(instring, debug=debug):
+ out.append(instring[lastE:s])
+ if t:
+ if isinstance(t, ParseResults):
+ out += t.as_list()
+ elif isinstance(t, Iterable) and not isinstance(t, str_type):
+ out.extend(t)
+ else:
+ out.append(t)
+ lastE = e
+ out.append(instring[lastE:])
+ out = [o for o in out if o]
+ return "".join([str(s) for s in _flatten(out)])
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc.with_traceback(None)
+
+ def search_string(
+ self,
+ instring: str,
+ max_matches: int = _MAX_INT,
+ *,
+ debug: bool = False,
+ maxMatches: int = _MAX_INT,
+ ) -> ParseResults:
+ """
+ Another extension to :class:`scan_string`, simplifying the access to the tokens found
+ to match the given parse expression. May be called with optional
+ ``max_matches`` argument, to clip searching after 'n' matches are found.
+
+ Example::
+
+ # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
+ cap_word = Word(alphas.upper(), alphas.lower())
+
+ print(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity"))
+
+ # the sum() builtin can be used to merge results into a single ParseResults object
+ print(sum(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity")))
+
+ prints::
+
+ [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
+ ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
+ """
+ maxMatches = min(maxMatches, max_matches)
+ try:
+ return ParseResults(
+ [t for t, s, e in self.scan_string(instring, maxMatches, debug=debug)]
+ )
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc.with_traceback(None)
+
+ def split(
+ self,
+ instring: str,
+ maxsplit: int = _MAX_INT,
+ include_separators: bool = False,
+ *,
+ includeSeparators=False,
+ ) -> Generator[str, None, None]:
+ """
+ Generator method to split a string using the given expression as a separator.
+ May be called with optional ``maxsplit`` argument, to limit the number of splits;
+ and the optional ``include_separators`` argument (default= ``False``), if the separating
+ matching text should be included in the split results.
+
+ Example::
+
+ punc = one_of(list(".,;:/-!?"))
+ print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
+
+ prints::
+
+ ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
+ """
+ includeSeparators = includeSeparators or include_separators
+ last = 0
+ for t, s, e in self.scan_string(instring, max_matches=maxsplit):
+ yield instring[last:s]
+ if includeSeparators:
+ yield t[0]
+ last = e
+ yield instring[last:]
+
+ def __add__(self, other) -> "ParserElement":
+ """
+ Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement`
+ converts them to :class:`Literal`s by default.
+
+ Example::
+
+ greet = Word(alphas) + "," + Word(alphas) + "!"
+ hello = "Hello, World!"
+ print(hello, "->", greet.parse_string(hello))
+
+ prints::
+
+ Hello, World! -> ['Hello', ',', 'World', '!']
+
+ ``...`` may be used as a parse expression as a short form of :class:`SkipTo`.
+
+ Literal('start') + ... + Literal('end')
+
+ is equivalent to:
+
+ Literal('start') + SkipTo('end')("_skipped*") + Literal('end')
+
+ Note that the skipped text is returned with '_skipped' as a results name,
+ and to support having multiple skips in the same parser, the value returned is
+ a list of all skipped text.
+ """
+ if other is Ellipsis:
+ return _PendingSkip(self)
+
+ if isinstance(other, str_type):
+ other = self._literalStringClass(other)
+ if not isinstance(other, ParserElement):
+ raise TypeError(
+ "Cannot combine element of type {} with ParserElement".format(
+ type(other).__name__
+ )
+ )
+ return And([self, other])
+
+ def __radd__(self, other) -> "ParserElement":
+ """
+ Implementation of ``+`` operator when left operand is not a :class:`ParserElement`
+ """
+ if other is Ellipsis:
+ return SkipTo(self)("_skipped*") + self
+
+ if isinstance(other, str_type):
+ other = self._literalStringClass(other)
+ if not isinstance(other, ParserElement):
+ raise TypeError(
+ "Cannot combine element of type {} with ParserElement".format(
+ type(other).__name__
+ )
+ )
+ return other + self
+
+ def __sub__(self, other) -> "ParserElement":
+ """
+ Implementation of ``-`` operator, returns :class:`And` with error stop
+ """
+ if isinstance(other, str_type):
+ other = self._literalStringClass(other)
+ if not isinstance(other, ParserElement):
+ raise TypeError(
+ "Cannot combine element of type {} with ParserElement".format(
+ type(other).__name__
+ )
+ )
+ return self + And._ErrorStop() + other
+
+ def __rsub__(self, other) -> "ParserElement":
+ """
+ Implementation of ``-`` operator when left operand is not a :class:`ParserElement`
+ """
+ if isinstance(other, str_type):
+ other = self._literalStringClass(other)
+ if not isinstance(other, ParserElement):
+ raise TypeError(
+ "Cannot combine element of type {} with ParserElement".format(
+ type(other).__name__
+ )
+ )
+ return other - self
+
+ def __mul__(self, other) -> "ParserElement":
+ """
+ Implementation of ``*`` operator, allows use of ``expr * 3`` in place of
+ ``expr + expr + expr``. Expressions may also be multiplied by a 2-integer
+ tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples
+ may also include ``None`` as in:
+ - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent
+ to ``expr*n + ZeroOrMore(expr)``
+ (read as "at least n instances of ``expr``")
+ - ``expr*(None, n)`` is equivalent to ``expr*(0, n)``
+ (read as "0 to n instances of ``expr``")
+ - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)``
+ - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)``
+
+ Note that ``expr*(None, n)`` does not raise an exception if
+ more than n exprs exist in the input stream; that is,
+ ``expr*(None, n)`` does not enforce a maximum number of expr
+ occurrences. If this behavior is desired, then write
+ ``expr*(None, n) + ~expr``
+ """
+ if other is Ellipsis:
+ other = (0, None)
+ elif isinstance(other, tuple) and other[:1] == (Ellipsis,):
+ other = ((0,) + other[1:] + (None,))[:2]
+
+ if isinstance(other, int):
+ minElements, optElements = other, 0
+ elif isinstance(other, tuple):
+ other = tuple(o if o is not Ellipsis else None for o in other)
+ other = (other + (None, None))[:2]
+ if other[0] is None:
+ other = (0, other[1])
+ if isinstance(other[0], int) and other[1] is None:
+ if other[0] == 0:
+ return ZeroOrMore(self)
+ if other[0] == 1:
+ return OneOrMore(self)
+ else:
+ return self * other[0] + ZeroOrMore(self)
+ elif isinstance(other[0], int) and isinstance(other[1], int):
+ minElements, optElements = other
+ optElements -= minElements
+ else:
+ raise TypeError(
+ "cannot multiply ParserElement and ({}) objects".format(
+ ",".join(type(item).__name__ for item in other)
+ )
+ )
+ else:
+ raise TypeError(
+ "cannot multiply ParserElement and {} objects".format(
+ type(other).__name__
+ )
+ )
+
+ if minElements < 0:
+ raise ValueError("cannot multiply ParserElement by negative value")
+ if optElements < 0:
+ raise ValueError(
+ "second tuple value must be greater or equal to first tuple value"
+ )
+ if minElements == optElements == 0:
+ return And([])
+
+ if optElements:
+
+ def makeOptionalList(n):
+ if n > 1:
+ return Opt(self + makeOptionalList(n - 1))
+ else:
+ return Opt(self)
+
+ if minElements:
+ if minElements == 1:
+ ret = self + makeOptionalList(optElements)
+ else:
+ ret = And([self] * minElements) + makeOptionalList(optElements)
+ else:
+ ret = makeOptionalList(optElements)
+ else:
+ if minElements == 1:
+ ret = self
+ else:
+ ret = And([self] * minElements)
+ return ret
+
+ def __rmul__(self, other) -> "ParserElement":
+ return self.__mul__(other)
+
+ def __or__(self, other) -> "ParserElement":
+ """
+ Implementation of ``|`` operator - returns :class:`MatchFirst`
+ """
+ if other is Ellipsis:
+ return _PendingSkip(self, must_skip=True)
+
+ if isinstance(other, str_type):
+ other = self._literalStringClass(other)
+ if not isinstance(other, ParserElement):
+ raise TypeError(
+ "Cannot combine element of type {} with ParserElement".format(
+ type(other).__name__
+ )
+ )
+ return MatchFirst([self, other])
+
+ def __ror__(self, other) -> "ParserElement":
+ """
+ Implementation of ``|`` operator when left operand is not a :class:`ParserElement`
+ """
+ if isinstance(other, str_type):
+ other = self._literalStringClass(other)
+ if not isinstance(other, ParserElement):
+ raise TypeError(
+ "Cannot combine element of type {} with ParserElement".format(
+ type(other).__name__
+ )
+ )
+ return other | self
+
+ def __xor__(self, other) -> "ParserElement":
+ """
+ Implementation of ``^`` operator - returns :class:`Or`
+ """
+ if isinstance(other, str_type):
+ other = self._literalStringClass(other)
+ if not isinstance(other, ParserElement):
+ raise TypeError(
+ "Cannot combine element of type {} with ParserElement".format(
+ type(other).__name__
+ )
+ )
+ return Or([self, other])
+
+ def __rxor__(self, other) -> "ParserElement":
+ """
+ Implementation of ``^`` operator when left operand is not a :class:`ParserElement`
+ """
+ if isinstance(other, str_type):
+ other = self._literalStringClass(other)
+ if not isinstance(other, ParserElement):
+ raise TypeError(
+ "Cannot combine element of type {} with ParserElement".format(
+ type(other).__name__
+ )
+ )
+ return other ^ self
+
+ def __and__(self, other) -> "ParserElement":
+ """
+ Implementation of ``&`` operator - returns :class:`Each`
+ """
+ if isinstance(other, str_type):
+ other = self._literalStringClass(other)
+ if not isinstance(other, ParserElement):
+ raise TypeError(
+ "Cannot combine element of type {} with ParserElement".format(
+ type(other).__name__
+ )
+ )
+ return Each([self, other])
+
+ def __rand__(self, other) -> "ParserElement":
+ """
+ Implementation of ``&`` operator when left operand is not a :class:`ParserElement`
+ """
+ if isinstance(other, str_type):
+ other = self._literalStringClass(other)
+ if not isinstance(other, ParserElement):
+ raise TypeError(
+ "Cannot combine element of type {} with ParserElement".format(
+ type(other).__name__
+ )
+ )
+ return other & self
+
+ def __invert__(self) -> "ParserElement":
+ """
+ Implementation of ``~`` operator - returns :class:`NotAny`
+ """
+ return NotAny(self)
+
+ # disable __iter__ to override legacy use of sequential access to __getitem__ to
+ # iterate over a sequence
+ __iter__ = None
+
+ def __getitem__(self, key):
+ """
+ use ``[]`` indexing notation as a short form for expression repetition:
+
+ - ``expr[n]`` is equivalent to ``expr*n``
+ - ``expr[m, n]`` is equivalent to ``expr*(m, n)``
+ - ``expr[n, ...]`` or ``expr[n,]`` is equivalent
+ to ``expr*n + ZeroOrMore(expr)``
+ (read as "at least n instances of ``expr``")
+ - ``expr[..., n]`` is equivalent to ``expr*(0, n)``
+ (read as "0 to n instances of ``expr``")
+ - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)``
+ - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)``
+
+ ``None`` may be used in place of ``...``.
+
+ Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception
+ if more than ``n`` ``expr``s exist in the input stream. If this behavior is
+ desired, then write ``expr[..., n] + ~expr``.
+ """
+
+ # convert single arg keys to tuples
+ try:
+ if isinstance(key, str_type):
+ key = (key,)
+ iter(key)
+ except TypeError:
+ key = (key, key)
+
+ if len(key) > 2:
+ raise TypeError(
+ "only 1 or 2 index arguments supported ({}{})".format(
+ key[:5], "... [{}]".format(len(key)) if len(key) > 5 else ""
+ )
+ )
+
+ # clip to 2 elements
+ ret = self * tuple(key[:2])
+ return ret
+
+ def __call__(self, name: str = None) -> "ParserElement":
+ """
+ Shortcut for :class:`set_results_name`, with ``list_all_matches=False``.
+
+ If ``name`` is given with a trailing ``'*'`` character, then ``list_all_matches`` will be
+ passed as ``True``.
+
+ If ``name` is omitted, same as calling :class:`copy`.
+
+ Example::
+
+ # these are equivalent
+ userdata = Word(alphas).set_results_name("name") + Word(nums + "-").set_results_name("socsecno")
+ userdata = Word(alphas)("name") + Word(nums + "-")("socsecno")
+ """
+ if name is not None:
+ return self._setResultsName(name)
+ else:
+ return self.copy()
+
+ def suppress(self) -> "ParserElement":
+ """
+ Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from
+ cluttering up returned output.
+ """
+ return Suppress(self)
+
+ def ignore_whitespace(self, recursive: bool = True) -> "ParserElement":
+ """
+ Enables the skipping of whitespace before matching the characters in the
+ :class:`ParserElement`'s defined pattern.
+
+ :param recursive: If ``True`` (the default), also enable whitespace skipping in child elements (if any)
+ """
+ self.skipWhitespace = True
+ return self
+
+ def leave_whitespace(self, recursive: bool = True) -> "ParserElement":
+ """
+ Disables the skipping of whitespace before matching the characters in the
+ :class:`ParserElement`'s defined pattern. This is normally only used internally by
+ the pyparsing module, but may be needed in some whitespace-sensitive grammars.
+
+ :param recursive: If true (the default), also disable whitespace skipping in child elements (if any)
+ """
+ self.skipWhitespace = False
+ return self
+
+ def set_whitespace_chars(
+ self, chars: Union[Set[str], str], copy_defaults: bool = False
+ ) -> "ParserElement":
+ """
+ Overrides the default whitespace chars
+ """
+ self.skipWhitespace = True
+ self.whiteChars = set(chars)
+ self.copyDefaultWhiteChars = copy_defaults
+ return self
+
+ def parse_with_tabs(self) -> "ParserElement":
+ """
+ Overrides default behavior to expand ``<TAB>`` s to spaces before parsing the input string.
+ Must be called before ``parse_string`` when the input grammar contains elements that
+ match ``<TAB>`` characters.
+ """
+ self.keepTabs = True
+ return self
+
+ def ignore(self, other: "ParserElement") -> "ParserElement":
+ """
+ Define expression to be ignored (e.g., comments) while doing pattern
+ matching; may be called repeatedly, to define multiple comment or other
+ ignorable patterns.
+
+ Example::
+
+ patt = Word(alphas)[1, ...]
+ patt.parse_string('ablaj /* comment */ lskjd')
+ # -> ['ablaj']
+
+ patt.ignore(c_style_comment)
+ patt.parse_string('ablaj /* comment */ lskjd')
+ # -> ['ablaj', 'lskjd']
+ """
+ import typing
+
+ if isinstance(other, str_type):
+ other = Suppress(other)
+
+ if isinstance(other, Suppress):
+ if other not in self.ignoreExprs:
+ self.ignoreExprs.append(other)
+ else:
+ self.ignoreExprs.append(Suppress(other.copy()))
+ return self
+
+ def set_debug_actions(
+ self,
+ start_action: DebugStartAction,
+ success_action: DebugSuccessAction,
+ exception_action: DebugExceptionAction,
+ ) -> "ParserElement":
+ """
+ Customize display of debugging messages while doing pattern matching:
+
+ - ``start_action`` - method to be called when an expression is about to be parsed;
+ should have the signature ``fn(input_string: str, location: int, expression: ParserElement, cache_hit: bool)``
+
+ - ``success_action`` - method to be called when an expression has successfully parsed;
+ should have the signature ``fn(input_string: str, start_location: int, end_location: int, expression: ParserELement, parsed_tokens: ParseResults, cache_hit: bool)``
+
+ - ``exception_action`` - method to be called when expression fails to parse;
+ should have the signature ``fn(input_string: str, location: int, expression: ParserElement, exception: Exception, cache_hit: bool)``
+ """
+ self.debugActions = self.DebugActions(
+ start_action or _default_start_debug_action,
+ success_action or _default_success_debug_action,
+ exception_action or _default_exception_debug_action,
+ )
+ self.debug = True
+ return self
+
+ def set_debug(self, flag: bool = True) -> "ParserElement":
+ """
+ Enable display of debugging messages while doing pattern matching.
+ Set ``flag`` to ``True`` to enable, ``False`` to disable.
+
+ Example::
+
+ wd = Word(alphas).set_name("alphaword")
+ integer = Word(nums).set_name("numword")
+ term = wd | integer
+
+ # turn on debugging for wd
+ wd.set_debug()
+
+ term[1, ...].parse_string("abc 123 xyz 890")
+
+ prints::
+
+ Match alphaword at loc 0(1,1)
+ Matched alphaword -> ['abc']
+ Match alphaword at loc 3(1,4)
+ Exception raised:Expected alphaword (at char 4), (line:1, col:5)
+ Match alphaword at loc 7(1,8)
+ Matched alphaword -> ['xyz']
+ Match alphaword at loc 11(1,12)
+ Exception raised:Expected alphaword (at char 12), (line:1, col:13)
+ Match alphaword at loc 15(1,16)
+ Exception raised:Expected alphaword (at char 15), (line:1, col:16)
+
+ The output shown is that produced by the default debug actions - custom debug actions can be
+ specified using :class:`set_debug_actions`. Prior to attempting
+ to match the ``wd`` expression, the debugging message ``"Match <exprname> at loc <n>(<line>,<col>)"``
+ is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"``
+ message is shown. Also note the use of :class:`set_name` to assign a human-readable name to the expression,
+ which makes debugging and exception messages easier to understand - for instance, the default
+ name created for the :class:`Word` expression without calling ``set_name`` is ``"W:(A-Za-z)"``.
+ """
+ if flag:
+ self.set_debug_actions(
+ _default_start_debug_action,
+ _default_success_debug_action,
+ _default_exception_debug_action,
+ )
+ else:
+ self.debug = False
+ return self
+
+ @property
+ def default_name(self) -> str:
+ if self._defaultName is None:
+ self._defaultName = self._generateDefaultName()
+ return self._defaultName
+
+ @abstractmethod
+ def _generateDefaultName(self):
+ """
+ Child classes must define this method, which defines how the ``default_name`` is set.
+ """
+
+ def set_name(self, name: str) -> "ParserElement":
+ """
+ Define name for this expression, makes debugging and exception messages clearer.
+ Example::
+ Word(nums).parse_string("ABC") # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1)
+ Word(nums).set_name("integer").parse_string("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
+ """
+ self.customName = name
+ self.errmsg = "Expected " + self.name
+ if __diag__.enable_debug_on_named_expressions:
+ self.set_debug()
+ return self
+
+ @property
+ def name(self) -> str:
+ # This will use a user-defined name if available, but otherwise defaults back to the auto-generated name
+ return self.customName if self.customName is not None else self.default_name
+
+ def __str__(self) -> str:
+ return self.name
+
+ def __repr__(self) -> str:
+ return str(self)
+
+ def streamline(self) -> "ParserElement":
+ self.streamlined = True
+ self._defaultName = None
+ return self
+
+ def recurse(self) -> Sequence["ParserElement"]:
+ return []
+
+ def _checkRecursion(self, parseElementList):
+ subRecCheckList = parseElementList[:] + [self]
+ for e in self.recurse():
+ e._checkRecursion(subRecCheckList)
+
+ def validate(self, validateTrace=None) -> None:
+ """
+ Check defined expressions for valid structure, check for infinite recursive definitions.
+ """
+ self._checkRecursion([])
+
+ def parse_file(
+ self,
+ file_or_filename: Union[str, Path, TextIO],
+ encoding: str = "utf-8",
+ parse_all: bool = False,
+ *,
+ parseAll: bool = False,
+ ) -> ParseResults:
+ """
+ Execute the parse expression on the given file or filename.
+ If a filename is specified (instead of a file object),
+ the entire file is opened, read, and closed before parsing.
+ """
+ parseAll = parseAll or parse_all
+ try:
+ file_contents = file_or_filename.read()
+ except AttributeError:
+ with open(file_or_filename, "r", encoding=encoding) as f:
+ file_contents = f.read()
+ try:
+ return self.parse_string(file_contents, parseAll)
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc.with_traceback(None)
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ elif isinstance(other, str_type):
+ return self.matches(other, parse_all=True)
+ elif isinstance(other, ParserElement):
+ return vars(self) == vars(other)
+ return False
+
+ def __hash__(self):
+ return id(self)
+
+ def matches(
+ self, test_string: str, parse_all: bool = True, *, parseAll: bool = True
+ ) -> bool:
+ """
+ Method for quick testing of a parser against a test string. Good for simple
+ inline microtests of sub expressions while building up larger parser.
+
+ Parameters:
+ - ``test_string`` - to test against this expression for a match
+ - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests
+
+ Example::
+
+ expr = Word(nums)
+ assert expr.matches("100")
+ """
+ parseAll = parseAll and parse_all
+ try:
+ self.parse_string(str(test_string), parse_all=parseAll)
+ return True
+ except ParseBaseException:
+ return False
+
+ def run_tests(
+ self,
+ tests: Union[str, List[str]],
+ parse_all: bool = True,
+ comment: typing.Optional[Union["ParserElement", str]] = "#",
+ full_dump: bool = True,
+ print_results: bool = True,
+ failure_tests: bool = False,
+ post_parse: Callable[[str, ParseResults], str] = None,
+ file: typing.Optional[TextIO] = None,
+ with_line_numbers: bool = False,
+ *,
+ parseAll: bool = True,
+ fullDump: bool = True,
+ printResults: bool = True,
+ failureTests: bool = False,
+ postParse: Callable[[str, ParseResults], str] = None,
+ ) -> Tuple[bool, List[Tuple[str, Union[ParseResults, Exception]]]]:
+ """
+ Execute the parse expression on a series of test strings, showing each
+ test, the parsed results or where the parse failed. Quick and easy way to
+ run a parse expression against a list of sample strings.
+
+ Parameters:
+ - ``tests`` - a list of separate test strings, or a multiline string of test strings
+ - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests
+ - ``comment`` - (default= ``'#'``) - expression for indicating embedded comments in the test
+ string; pass None to disable comment filtering
+ - ``full_dump`` - (default= ``True``) - dump results as list followed by results names in nested outline;
+ if False, only dump nested list
+ - ``print_results`` - (default= ``True``) prints test output to stdout
+ - ``failure_tests`` - (default= ``False``) indicates if these tests are expected to fail parsing
+ - ``post_parse`` - (default= ``None``) optional callback for successful parse results; called as
+ `fn(test_string, parse_results)` and returns a string to be added to the test output
+ - ``file`` - (default= ``None``) optional file-like object to which test output will be written;
+ if None, will default to ``sys.stdout``
+ - ``with_line_numbers`` - default= ``False``) show test strings with line and column numbers
+
+ Returns: a (success, results) tuple, where success indicates that all tests succeeded
+ (or failed if ``failure_tests`` is True), and the results contain a list of lines of each
+ test's output
+
+ Example::
+
+ number_expr = pyparsing_common.number.copy()
+
+ result = number_expr.run_tests('''
+ # unsigned integer
+ 100
+ # negative integer
+ -100
+ # float with scientific notation
+ 6.02e23
+ # integer with scientific notation
+ 1e-12
+ ''')
+ print("Success" if result[0] else "Failed!")
+
+ result = number_expr.run_tests('''
+ # stray character
+ 100Z
+ # missing leading digit before '.'
+ -.100
+ # too many '.'
+ 3.14.159
+ ''', failure_tests=True)
+ print("Success" if result[0] else "Failed!")
+
+ prints::
+
+ # unsigned integer
+ 100
+ [100]
+
+ # negative integer
+ -100
+ [-100]
+
+ # float with scientific notation
+ 6.02e23
+ [6.02e+23]
+
+ # integer with scientific notation
+ 1e-12
+ [1e-12]
+
+ Success
+
+ # stray character
+ 100Z
+ ^
+ FAIL: Expected end of text (at char 3), (line:1, col:4)
+
+ # missing leading digit before '.'
+ -.100
+ ^
+ FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
+
+ # too many '.'
+ 3.14.159
+ ^
+ FAIL: Expected end of text (at char 4), (line:1, col:5)
+
+ Success
+
+ Each test string must be on a single line. If you want to test a string that spans multiple
+ lines, create a test like this::
+
+ expr.run_tests(r"this is a test\\n of strings that spans \\n 3 lines")
+
+ (Note that this is a raw string literal, you must include the leading ``'r'``.)
+ """
+ from .testing import pyparsing_test
+
+ parseAll = parseAll and parse_all
+ fullDump = fullDump and full_dump
+ printResults = printResults and print_results
+ failureTests = failureTests or failure_tests
+ postParse = postParse or post_parse
+ if isinstance(tests, str_type):
+ line_strip = type(tests).strip
+ tests = [line_strip(test_line) for test_line in tests.rstrip().splitlines()]
+ if isinstance(comment, str_type):
+ comment = Literal(comment)
+ if file is None:
+ file = sys.stdout
+ print_ = file.write
+
+ result: Union[ParseResults, Exception]
+ allResults = []
+ comments = []
+ success = True
+ NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string)
+ BOM = "\ufeff"
+ for t in tests:
+ if comment is not None and comment.matches(t, False) or comments and not t:
+ comments.append(
+ pyparsing_test.with_line_numbers(t) if with_line_numbers else t
+ )
+ continue
+ if not t:
+ continue
+ out = [
+ "\n" + "\n".join(comments) if comments else "",
+ pyparsing_test.with_line_numbers(t) if with_line_numbers else t,
+ ]
+ comments = []
+ try:
+ # convert newline marks to actual newlines, and strip leading BOM if present
+ t = NL.transform_string(t.lstrip(BOM))
+ result = self.parse_string(t, parse_all=parseAll)
+ except ParseBaseException as pe:
+ fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
+ out.append(pe.explain())
+ out.append("FAIL: " + str(pe))
+ if ParserElement.verbose_stacktrace:
+ out.extend(traceback.format_tb(pe.__traceback__))
+ success = success and failureTests
+ result = pe
+ except Exception as exc:
+ out.append("FAIL-EXCEPTION: {}: {}".format(type(exc).__name__, exc))
+ if ParserElement.verbose_stacktrace:
+ out.extend(traceback.format_tb(exc.__traceback__))
+ success = success and failureTests
+ result = exc
+ else:
+ success = success and not failureTests
+ if postParse is not None:
+ try:
+ pp_value = postParse(t, result)
+ if pp_value is not None:
+ if isinstance(pp_value, ParseResults):
+ out.append(pp_value.dump())
+ else:
+ out.append(str(pp_value))
+ else:
+ out.append(result.dump())
+ except Exception as e:
+ out.append(result.dump(full=fullDump))
+ out.append(
+ "{} failed: {}: {}".format(
+ postParse.__name__, type(e).__name__, e
+ )
+ )
+ else:
+ out.append(result.dump(full=fullDump))
+ out.append("")
+
+ if printResults:
+ print_("\n".join(out))
+
+ allResults.append((t, result))
+
+ return success, allResults
+
+ def create_diagram(
+ self,
+ output_html: Union[TextIO, Path, str],
+ vertical: int = 3,
+ show_results_names: bool = False,
+ show_groups: bool = False,
+ **kwargs,
+ ) -> None:
+ """
+ Create a railroad diagram for the parser.
+
+ Parameters:
+ - output_html (str or file-like object) - output target for generated
+ diagram HTML
+ - vertical (int) - threshold for formatting multiple alternatives vertically
+ instead of horizontally (default=3)
+ - show_results_names - bool flag whether diagram should show annotations for
+ defined results names
+ - show_groups - bool flag whether groups should be highlighted with an unlabeled surrounding box
+ Additional diagram-formatting keyword arguments can also be included;
+ see railroad.Diagram class.
+ """
+
+ try:
+ from .diagram import to_railroad, railroad_to_html
+ except ImportError as ie:
+ raise Exception(
+ "must ``pip install pyparsing[diagrams]`` to generate parser railroad diagrams"
+ ) from ie
+
+ self.streamline()
+
+ railroad = to_railroad(
+ self,
+ vertical=vertical,
+ show_results_names=show_results_names,
+ show_groups=show_groups,
+ diagram_kwargs=kwargs,
+ )
+ if isinstance(output_html, (str, Path)):
+ with open(output_html, "w", encoding="utf-8") as diag_file:
+ diag_file.write(railroad_to_html(railroad))
+ else:
+ # we were passed a file-like object, just write to it
+ output_html.write(railroad_to_html(railroad))
+
+ setDefaultWhitespaceChars = set_default_whitespace_chars
+ inlineLiteralsUsing = inline_literals_using
+ setResultsName = set_results_name
+ setBreak = set_break
+ setParseAction = set_parse_action
+ addParseAction = add_parse_action
+ addCondition = add_condition
+ setFailAction = set_fail_action
+ tryParse = try_parse
+ canParseNext = can_parse_next
+ resetCache = reset_cache
+ enableLeftRecursion = enable_left_recursion
+ enablePackrat = enable_packrat
+ parseString = parse_string
+ scanString = scan_string
+ searchString = search_string
+ transformString = transform_string
+ setWhitespaceChars = set_whitespace_chars
+ parseWithTabs = parse_with_tabs
+ setDebugActions = set_debug_actions
+ setDebug = set_debug
+ defaultName = default_name
+ setName = set_name
+ parseFile = parse_file
+ runTests = run_tests
+ ignoreWhitespace = ignore_whitespace
+ leaveWhitespace = leave_whitespace
+
+
+class _PendingSkip(ParserElement):
+ # internal placeholder class to hold a place were '...' is added to a parser element,
+ # once another ParserElement is added, this placeholder will be replaced with a SkipTo
+ def __init__(self, expr: ParserElement, must_skip: bool = False):
+ super().__init__()
+ self.anchor = expr
+ self.must_skip = must_skip
+
+ def _generateDefaultName(self):
+ return str(self.anchor + Empty()).replace("Empty", "...")
+
+ def __add__(self, other) -> "ParserElement":
+ skipper = SkipTo(other).set_name("...")("_skipped*")
+ if self.must_skip:
+
+ def must_skip(t):
+ if not t._skipped or t._skipped.as_list() == [""]:
+ del t[0]
+ t.pop("_skipped", None)
+
+ def show_skip(t):
+ if t._skipped.as_list()[-1:] == [""]:
+ t.pop("_skipped")
+ t["_skipped"] = "missing <" + repr(self.anchor) + ">"
+
+ return (
+ self.anchor + skipper().add_parse_action(must_skip)
+ | skipper().add_parse_action(show_skip)
+ ) + other
+
+ return self.anchor + skipper + other
+
+ def __repr__(self):
+ return self.defaultName
+
+ def parseImpl(self, *args):
+ raise Exception(
+ "use of `...` expression without following SkipTo target expression"
+ )
+
+
+class Token(ParserElement):
+ """Abstract :class:`ParserElement` subclass, for defining atomic
+ matching patterns.
+ """
+
+ def __init__(self):
+ super().__init__(savelist=False)
+
+ def _generateDefaultName(self):
+ return type(self).__name__
+
+
+class Empty(Token):
+ """
+ An empty token, will always match.
+ """
+
+ def __init__(self):
+ super().__init__()
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+
+
+class NoMatch(Token):
+ """
+ A token that will never match.
+ """
+
+ def __init__(self):
+ super().__init__()
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+ self.errmsg = "Unmatchable token"
+
+ def parseImpl(self, instring, loc, doActions=True):
+ raise ParseException(instring, loc, self.errmsg, self)
+
+
+class Literal(Token):
+ """
+ Token to exactly match a specified string.
+
+ Example::
+
+ Literal('blah').parse_string('blah') # -> ['blah']
+ Literal('blah').parse_string('blahfooblah') # -> ['blah']
+ Literal('blah').parse_string('bla') # -> Exception: Expected "blah"
+
+ For case-insensitive matching, use :class:`CaselessLiteral`.
+
+ For keyword matching (force word break before and after the matched string),
+ use :class:`Keyword` or :class:`CaselessKeyword`.
+ """
+
+ def __init__(self, match_string: str = "", *, matchString: str = ""):
+ super().__init__()
+ match_string = matchString or match_string
+ self.match = match_string
+ self.matchLen = len(match_string)
+ try:
+ self.firstMatchChar = match_string[0]
+ except IndexError:
+ raise ValueError("null string passed to Literal; use Empty() instead")
+ self.errmsg = "Expected " + self.name
+ self.mayReturnEmpty = False
+ self.mayIndexError = False
+
+ # Performance tuning: modify __class__ to select
+ # a parseImpl optimized for single-character check
+ if self.matchLen == 1 and type(self) is Literal:
+ self.__class__ = _SingleCharLiteral
+
+ def _generateDefaultName(self):
+ return repr(self.match)
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if instring[loc] == self.firstMatchChar and instring.startswith(
+ self.match, loc
+ ):
+ return loc + self.matchLen, self.match
+ raise ParseException(instring, loc, self.errmsg, self)
+
+
+class _SingleCharLiteral(Literal):
+ def parseImpl(self, instring, loc, doActions=True):
+ if instring[loc] == self.firstMatchChar:
+ return loc + 1, self.match
+ raise ParseException(instring, loc, self.errmsg, self)
+
+
+ParserElement._literalStringClass = Literal
+
+
+class Keyword(Token):
+ """
+ Token to exactly match a specified string as a keyword, that is,
+ it must be immediately followed by a non-keyword character. Compare
+ with :class:`Literal`:
+
+ - ``Literal("if")`` will match the leading ``'if'`` in
+ ``'ifAndOnlyIf'``.
+ - ``Keyword("if")`` will not; it will only match the leading
+ ``'if'`` in ``'if x=1'``, or ``'if(y==2)'``
+
+ Accepts two optional constructor arguments in addition to the
+ keyword string:
+
+ - ``identChars`` is a string of characters that would be valid
+ identifier characters, defaulting to all alphanumerics + "_" and
+ "$"
+ - ``caseless`` allows case-insensitive matching, default is ``False``.
+
+ Example::
+
+ Keyword("start").parse_string("start") # -> ['start']
+ Keyword("start").parse_string("starting") # -> Exception
+
+ For case-insensitive matching, use :class:`CaselessKeyword`.
+ """
+
+ DEFAULT_KEYWORD_CHARS = alphanums + "_$"
+
+ def __init__(
+ self,
+ match_string: str = "",
+ ident_chars: typing.Optional[str] = None,
+ caseless: bool = False,
+ *,
+ matchString: str = "",
+ identChars: typing.Optional[str] = None,
+ ):
+ super().__init__()
+ identChars = identChars or ident_chars
+ if identChars is None:
+ identChars = Keyword.DEFAULT_KEYWORD_CHARS
+ match_string = matchString or match_string
+ self.match = match_string
+ self.matchLen = len(match_string)
+ try:
+ self.firstMatchChar = match_string[0]
+ except IndexError:
+ raise ValueError("null string passed to Keyword; use Empty() instead")
+ self.errmsg = "Expected {} {}".format(type(self).__name__, self.name)
+ self.mayReturnEmpty = False
+ self.mayIndexError = False
+ self.caseless = caseless
+ if caseless:
+ self.caselessmatch = match_string.upper()
+ identChars = identChars.upper()
+ self.identChars = set(identChars)
+
+ def _generateDefaultName(self):
+ return repr(self.match)
+
+ def parseImpl(self, instring, loc, doActions=True):
+ errmsg = self.errmsg
+ errloc = loc
+ if self.caseless:
+ if instring[loc : loc + self.matchLen].upper() == self.caselessmatch:
+ if loc == 0 or instring[loc - 1].upper() not in self.identChars:
+ if (
+ loc >= len(instring) - self.matchLen
+ or instring[loc + self.matchLen].upper() not in self.identChars
+ ):
+ return loc + self.matchLen, self.match
+ else:
+ # followed by keyword char
+ errmsg += ", was immediately followed by keyword character"
+ errloc = loc + self.matchLen
+ else:
+ # preceded by keyword char
+ errmsg += ", keyword was immediately preceded by keyword character"
+ errloc = loc - 1
+ # else no match just raise plain exception
+
+ else:
+ if (
+ instring[loc] == self.firstMatchChar
+ and self.matchLen == 1
+ or instring.startswith(self.match, loc)
+ ):
+ if loc == 0 or instring[loc - 1] not in self.identChars:
+ if (
+ loc >= len(instring) - self.matchLen
+ or instring[loc + self.matchLen] not in self.identChars
+ ):
+ return loc + self.matchLen, self.match
+ else:
+ # followed by keyword char
+ errmsg += (
+ ", keyword was immediately followed by keyword character"
+ )
+ errloc = loc + self.matchLen
+ else:
+ # preceded by keyword char
+ errmsg += ", keyword was immediately preceded by keyword character"
+ errloc = loc - 1
+ # else no match just raise plain exception
+
+ raise ParseException(instring, errloc, errmsg, self)
+
+ @staticmethod
+ def set_default_keyword_chars(chars) -> None:
+ """
+ Overrides the default characters used by :class:`Keyword` expressions.
+ """
+ Keyword.DEFAULT_KEYWORD_CHARS = chars
+
+ setDefaultKeywordChars = set_default_keyword_chars
+
+
+class CaselessLiteral(Literal):
+ """
+ Token to match a specified string, ignoring case of letters.
+ Note: the matched results will always be in the case of the given
+ match string, NOT the case of the input text.
+
+ Example::
+
+ CaselessLiteral("CMD")[1, ...].parse_string("cmd CMD Cmd10")
+ # -> ['CMD', 'CMD', 'CMD']
+
+ (Contrast with example for :class:`CaselessKeyword`.)
+ """
+
+ def __init__(self, match_string: str = "", *, matchString: str = ""):
+ match_string = matchString or match_string
+ super().__init__(match_string.upper())
+ # Preserve the defining literal.
+ self.returnString = match_string
+ self.errmsg = "Expected " + self.name
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if instring[loc : loc + self.matchLen].upper() == self.match:
+ return loc + self.matchLen, self.returnString
+ raise ParseException(instring, loc, self.errmsg, self)
+
+
+class CaselessKeyword(Keyword):
+ """
+ Caseless version of :class:`Keyword`.
+
+ Example::
+
+ CaselessKeyword("CMD")[1, ...].parse_string("cmd CMD Cmd10")
+ # -> ['CMD', 'CMD']
+
+ (Contrast with example for :class:`CaselessLiteral`.)
+ """
+
+ def __init__(
+ self,
+ match_string: str = "",
+ ident_chars: typing.Optional[str] = None,
+ *,
+ matchString: str = "",
+ identChars: typing.Optional[str] = None,
+ ):
+ identChars = identChars or ident_chars
+ match_string = matchString or match_string
+ super().__init__(match_string, identChars, caseless=True)
+
+
+class CloseMatch(Token):
+ """A variation on :class:`Literal` which matches "close" matches,
+ that is, strings with at most 'n' mismatching characters.
+ :class:`CloseMatch` takes parameters:
+
+ - ``match_string`` - string to be matched
+ - ``caseless`` - a boolean indicating whether to ignore casing when comparing characters
+ - ``max_mismatches`` - (``default=1``) maximum number of
+ mismatches allowed to count as a match
+
+ The results from a successful parse will contain the matched text
+ from the input string and the following named results:
+
+ - ``mismatches`` - a list of the positions within the
+ match_string where mismatches were found
+ - ``original`` - the original match_string used to compare
+ against the input string
+
+ If ``mismatches`` is an empty list, then the match was an exact
+ match.
+
+ Example::
+
+ patt = CloseMatch("ATCATCGAATGGA")
+ patt.parse_string("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
+ patt.parse_string("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
+
+ # exact match
+ patt.parse_string("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
+
+ # close match allowing up to 2 mismatches
+ patt = CloseMatch("ATCATCGAATGGA", max_mismatches=2)
+ patt.parse_string("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
+ """
+
+ def __init__(
+ self,
+ match_string: str,
+ max_mismatches: int = None,
+ *,
+ maxMismatches: int = 1,
+ caseless=False,
+ ):
+ maxMismatches = max_mismatches if max_mismatches is not None else maxMismatches
+ super().__init__()
+ self.match_string = match_string
+ self.maxMismatches = maxMismatches
+ self.errmsg = "Expected {!r} (with up to {} mismatches)".format(
+ self.match_string, self.maxMismatches
+ )
+ self.caseless = caseless
+ self.mayIndexError = False
+ self.mayReturnEmpty = False
+
+ def _generateDefaultName(self):
+ return "{}:{!r}".format(type(self).__name__, self.match_string)
+
+ def parseImpl(self, instring, loc, doActions=True):
+ start = loc
+ instrlen = len(instring)
+ maxloc = start + len(self.match_string)
+
+ if maxloc <= instrlen:
+ match_string = self.match_string
+ match_stringloc = 0
+ mismatches = []
+ maxMismatches = self.maxMismatches
+
+ for match_stringloc, s_m in enumerate(
+ zip(instring[loc:maxloc], match_string)
+ ):
+ src, mat = s_m
+ if self.caseless:
+ src, mat = src.lower(), mat.lower()
+
+ if src != mat:
+ mismatches.append(match_stringloc)
+ if len(mismatches) > maxMismatches:
+ break
+ else:
+ loc = start + match_stringloc + 1
+ results = ParseResults([instring[start:loc]])
+ results["original"] = match_string
+ results["mismatches"] = mismatches
+ return loc, results
+
+ raise ParseException(instring, loc, self.errmsg, self)
+
+
+class Word(Token):
+ """Token for matching words composed of allowed character sets.
+ Parameters:
+ - ``init_chars`` - string of all characters that should be used to
+ match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.;
+ if ``body_chars`` is also specified, then this is the string of
+ initial characters
+ - ``body_chars`` - string of characters that
+ can be used for matching after a matched initial character as
+ given in ``init_chars``; if omitted, same as the initial characters
+ (default=``None``)
+ - ``min`` - minimum number of characters to match (default=1)
+ - ``max`` - maximum number of characters to match (default=0)
+ - ``exact`` - exact number of characters to match (default=0)
+ - ``as_keyword`` - match as a keyword (default=``False``)
+ - ``exclude_chars`` - characters that might be
+ found in the input ``body_chars`` string but which should not be
+ accepted for matching ;useful to define a word of all
+ printables except for one or two characters, for instance
+ (default=``None``)
+
+ :class:`srange` is useful for defining custom character set strings
+ for defining :class:`Word` expressions, using range notation from
+ regular expression character sets.
+
+ A common mistake is to use :class:`Word` to match a specific literal
+ string, as in ``Word("Address")``. Remember that :class:`Word`
+ uses the string argument to define *sets* of matchable characters.
+ This expression would match "Add", "AAA", "dAred", or any other word
+ made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an
+ exact literal string, use :class:`Literal` or :class:`Keyword`.
+
+ pyparsing includes helper strings for building Words:
+
+ - :class:`alphas`
+ - :class:`nums`
+ - :class:`alphanums`
+ - :class:`hexnums`
+ - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255
+ - accented, tilded, umlauted, etc.)
+ - :class:`punc8bit` (non-alphabetic characters in ASCII range
+ 128-255 - currency, symbols, superscripts, diacriticals, etc.)
+ - :class:`printables` (any non-whitespace character)
+
+ ``alphas``, ``nums``, and ``printables`` are also defined in several
+ Unicode sets - see :class:`pyparsing_unicode``.
+
+ Example::
+
+ # a word composed of digits
+ integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
+
+ # a word with a leading capital, and zero or more lowercase
+ capital_word = Word(alphas.upper(), alphas.lower())
+
+ # hostnames are alphanumeric, with leading alpha, and '-'
+ hostname = Word(alphas, alphanums + '-')
+
+ # roman numeral (not a strict parser, accepts invalid mix of characters)
+ roman = Word("IVXLCDM")
+
+ # any string of non-whitespace characters, except for ','
+ csv_value = Word(printables, exclude_chars=",")
+ """
+
+ def __init__(
+ self,
+ init_chars: str = "",
+ body_chars: typing.Optional[str] = None,
+ min: int = 1,
+ max: int = 0,
+ exact: int = 0,
+ as_keyword: bool = False,
+ exclude_chars: typing.Optional[str] = None,
+ *,
+ initChars: typing.Optional[str] = None,
+ bodyChars: typing.Optional[str] = None,
+ asKeyword: bool = False,
+ excludeChars: typing.Optional[str] = None,
+ ):
+ initChars = initChars or init_chars
+ bodyChars = bodyChars or body_chars
+ asKeyword = asKeyword or as_keyword
+ excludeChars = excludeChars or exclude_chars
+ super().__init__()
+ if not initChars:
+ raise ValueError(
+ "invalid {}, initChars cannot be empty string".format(
+ type(self).__name__
+ )
+ )
+
+ initChars = set(initChars)
+ self.initChars = initChars
+ if excludeChars:
+ excludeChars = set(excludeChars)
+ initChars -= excludeChars
+ if bodyChars:
+ bodyChars = set(bodyChars) - excludeChars
+ self.initCharsOrig = "".join(sorted(initChars))
+
+ if bodyChars:
+ self.bodyCharsOrig = "".join(sorted(bodyChars))
+ self.bodyChars = set(bodyChars)
+ else:
+ self.bodyCharsOrig = "".join(sorted(initChars))
+ self.bodyChars = set(initChars)
+
+ self.maxSpecified = max > 0
+
+ if min < 1:
+ raise ValueError(
+ "cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted"
+ )
+
+ self.minLen = min
+
+ if max > 0:
+ self.maxLen = max
+ else:
+ self.maxLen = _MAX_INT
+
+ if exact > 0:
+ self.maxLen = exact
+ self.minLen = exact
+
+ self.errmsg = "Expected " + self.name
+ self.mayIndexError = False
+ self.asKeyword = asKeyword
+
+ # see if we can make a regex for this Word
+ if " " not in self.initChars | self.bodyChars and (min == 1 and exact == 0):
+ if self.bodyChars == self.initChars:
+ if max == 0:
+ repeat = "+"
+ elif max == 1:
+ repeat = ""
+ else:
+ repeat = "{{{},{}}}".format(
+ self.minLen, "" if self.maxLen == _MAX_INT else self.maxLen
+ )
+ self.reString = "[{}]{}".format(
+ _collapse_string_to_ranges(self.initChars),
+ repeat,
+ )
+ elif len(self.initChars) == 1:
+ if max == 0:
+ repeat = "*"
+ else:
+ repeat = "{{0,{}}}".format(max - 1)
+ self.reString = "{}[{}]{}".format(
+ re.escape(self.initCharsOrig),
+ _collapse_string_to_ranges(self.bodyChars),
+ repeat,
+ )
+ else:
+ if max == 0:
+ repeat = "*"
+ elif max == 2:
+ repeat = ""
+ else:
+ repeat = "{{0,{}}}".format(max - 1)
+ self.reString = "[{}][{}]{}".format(
+ _collapse_string_to_ranges(self.initChars),
+ _collapse_string_to_ranges(self.bodyChars),
+ repeat,
+ )
+ if self.asKeyword:
+ self.reString = r"\b" + self.reString + r"\b"
+
+ try:
+ self.re = re.compile(self.reString)
+ except re.error:
+ self.re = None
+ else:
+ self.re_match = self.re.match
+ self.__class__ = _WordRegex
+
+ def _generateDefaultName(self):
+ def charsAsStr(s):
+ max_repr_len = 16
+ s = _collapse_string_to_ranges(s, re_escape=False)
+ if len(s) > max_repr_len:
+ return s[: max_repr_len - 3] + "..."
+ else:
+ return s
+
+ if self.initChars != self.bodyChars:
+ base = "W:({}, {})".format(
+ charsAsStr(self.initChars), charsAsStr(self.bodyChars)
+ )
+ else:
+ base = "W:({})".format(charsAsStr(self.initChars))
+
+ # add length specification
+ if self.minLen > 1 or self.maxLen != _MAX_INT:
+ if self.minLen == self.maxLen:
+ if self.minLen == 1:
+ return base[2:]
+ else:
+ return base + "{{{}}}".format(self.minLen)
+ elif self.maxLen == _MAX_INT:
+ return base + "{{{},...}}".format(self.minLen)
+ else:
+ return base + "{{{},{}}}".format(self.minLen, self.maxLen)
+ return base
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if instring[loc] not in self.initChars:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ start = loc
+ loc += 1
+ instrlen = len(instring)
+ bodychars = self.bodyChars
+ maxloc = start + self.maxLen
+ maxloc = min(maxloc, instrlen)
+ while loc < maxloc and instring[loc] in bodychars:
+ loc += 1
+
+ throwException = False
+ if loc - start < self.minLen:
+ throwException = True
+ elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
+ throwException = True
+ elif self.asKeyword:
+ if (
+ start > 0
+ and instring[start - 1] in bodychars
+ or loc < instrlen
+ and instring[loc] in bodychars
+ ):
+ throwException = True
+
+ if throwException:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ return loc, instring[start:loc]
+
+
+class _WordRegex(Word):
+ def parseImpl(self, instring, loc, doActions=True):
+ result = self.re_match(instring, loc)
+ if not result:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ loc = result.end()
+ return loc, result.group()
+
+
+class Char(_WordRegex):
+ """A short-cut class for defining :class:`Word` ``(characters, exact=1)``,
+ when defining a match of any single character in a string of
+ characters.
+ """
+
+ def __init__(
+ self,
+ charset: str,
+ as_keyword: bool = False,
+ exclude_chars: typing.Optional[str] = None,
+ *,
+ asKeyword: bool = False,
+ excludeChars: typing.Optional[str] = None,
+ ):
+ asKeyword = asKeyword or as_keyword
+ excludeChars = excludeChars or exclude_chars
+ super().__init__(
+ charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars
+ )
+ self.reString = "[{}]".format(_collapse_string_to_ranges(self.initChars))
+ if asKeyword:
+ self.reString = r"\b{}\b".format(self.reString)
+ self.re = re.compile(self.reString)
+ self.re_match = self.re.match
+
+
+class Regex(Token):
+ r"""Token for matching strings that match a given regular
+ expression. Defined with string specifying the regular expression in
+ a form recognized by the stdlib Python `re module <https://docs.python.org/3/library/re.html>`_.
+ If the given regex contains named groups (defined using ``(?P<name>...)``),
+ these will be preserved as named :class:`ParseResults`.
+
+ If instead of the Python stdlib ``re`` module you wish to use a different RE module
+ (such as the ``regex`` module), you can do so by building your ``Regex`` object with
+ a compiled RE that was compiled using ``regex``.
+
+ Example::
+
+ realnum = Regex(r"[+-]?\d+\.\d*")
+ # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
+ roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
+
+ # named fields in a regex will be returned as named results
+ date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
+
+ # the Regex class will accept re's compiled using the regex module
+ import regex
+ parser = pp.Regex(regex.compile(r'[0-9]'))
+ """
+
+ def __init__(
+ self,
+ pattern: Any,
+ flags: Union[re.RegexFlag, int] = 0,
+ as_group_list: bool = False,
+ as_match: bool = False,
+ *,
+ asGroupList: bool = False,
+ asMatch: bool = False,
+ ):
+ """The parameters ``pattern`` and ``flags`` are passed
+ to the ``re.compile()`` function as-is. See the Python
+ `re module <https://docs.python.org/3/library/re.html>`_ module for an
+ explanation of the acceptable patterns and flags.
+ """
+ super().__init__()
+ asGroupList = asGroupList or as_group_list
+ asMatch = asMatch or as_match
+
+ if isinstance(pattern, str_type):
+ if not pattern:
+ raise ValueError("null string passed to Regex; use Empty() instead")
+
+ self._re = None
+ self.reString = self.pattern = pattern
+ self.flags = flags
+
+ elif hasattr(pattern, "pattern") and hasattr(pattern, "match"):
+ self._re = pattern
+ self.pattern = self.reString = pattern.pattern
+ self.flags = flags
+
+ else:
+ raise TypeError(
+ "Regex may only be constructed with a string or a compiled RE object"
+ )
+
+ self.errmsg = "Expected " + self.name
+ self.mayIndexError = False
+ self.asGroupList = asGroupList
+ self.asMatch = asMatch
+ if self.asGroupList:
+ self.parseImpl = self.parseImplAsGroupList
+ if self.asMatch:
+ self.parseImpl = self.parseImplAsMatch
+
+ @cached_property
+ def re(self):
+ if self._re:
+ return self._re
+ else:
+ try:
+ return re.compile(self.pattern, self.flags)
+ except re.error:
+ raise ValueError(
+ "invalid pattern ({!r}) passed to Regex".format(self.pattern)
+ )
+
+ @cached_property
+ def re_match(self):
+ return self.re.match
+
+ @cached_property
+ def mayReturnEmpty(self):
+ return self.re_match("") is not None
+
+ def _generateDefaultName(self):
+ return "Re:({})".format(repr(self.pattern).replace("\\\\", "\\"))
+
+ def parseImpl(self, instring, loc, doActions=True):
+ result = self.re_match(instring, loc)
+ if not result:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ loc = result.end()
+ ret = ParseResults(result.group())
+ d = result.groupdict()
+ if d:
+ for k, v in d.items():
+ ret[k] = v
+ return loc, ret
+
+ def parseImplAsGroupList(self, instring, loc, doActions=True):
+ result = self.re_match(instring, loc)
+ if not result:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ loc = result.end()
+ ret = result.groups()
+ return loc, ret
+
+ def parseImplAsMatch(self, instring, loc, doActions=True):
+ result = self.re_match(instring, loc)
+ if not result:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ loc = result.end()
+ ret = result
+ return loc, ret
+
+ def sub(self, repl: str) -> ParserElement:
+ r"""
+ Return :class:`Regex` with an attached parse action to transform the parsed
+ result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_.
+
+ Example::
+
+ make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>")
+ print(make_html.transform_string("h1:main title:"))
+ # prints "<h1>main title</h1>"
+ """
+ if self.asGroupList:
+ raise TypeError("cannot use sub() with Regex(asGroupList=True)")
+
+ if self.asMatch and callable(repl):
+ raise TypeError("cannot use sub() with a callable with Regex(asMatch=True)")
+
+ if self.asMatch:
+
+ def pa(tokens):
+ return tokens[0].expand(repl)
+
+ else:
+
+ def pa(tokens):
+ return self.re.sub(repl, tokens[0])
+
+ return self.add_parse_action(pa)
+
+
+class QuotedString(Token):
+ r"""
+ Token for matching strings that are delimited by quoting characters.
+
+ Defined with the following parameters:
+
+ - ``quote_char`` - string of one or more characters defining the
+ quote delimiting string
+ - ``esc_char`` - character to re_escape quotes, typically backslash
+ (default= ``None``)
+ - ``esc_quote`` - special quote sequence to re_escape an embedded quote
+ string (such as SQL's ``""`` to re_escape an embedded ``"``)
+ (default= ``None``)
+ - ``multiline`` - boolean indicating whether quotes can span
+ multiple lines (default= ``False``)
+ - ``unquote_results`` - boolean indicating whether the matched text
+ should be unquoted (default= ``True``)
+ - ``end_quote_char`` - string of one or more characters defining the
+ end of the quote delimited string (default= ``None`` => same as
+ quote_char)
+ - ``convert_whitespace_escapes`` - convert escaped whitespace
+ (``'\t'``, ``'\n'``, etc.) to actual whitespace
+ (default= ``True``)
+
+ Example::
+
+ qs = QuotedString('"')
+ print(qs.search_string('lsjdf "This is the quote" sldjf'))
+ complex_qs = QuotedString('{{', end_quote_char='}}')
+ print(complex_qs.search_string('lsjdf {{This is the "quote"}} sldjf'))
+ sql_qs = QuotedString('"', esc_quote='""')
+ print(sql_qs.search_string('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
+
+ prints::
+
+ [['This is the quote']]
+ [['This is the "quote"']]
+ [['This is the quote with "embedded" quotes']]
+ """
+ ws_map = ((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r"))
+
+ def __init__(
+ self,
+ quote_char: str = "",
+ esc_char: typing.Optional[str] = None,
+ esc_quote: typing.Optional[str] = None,
+ multiline: bool = False,
+ unquote_results: bool = True,
+ end_quote_char: typing.Optional[str] = None,
+ convert_whitespace_escapes: bool = True,
+ *,
+ quoteChar: str = "",
+ escChar: typing.Optional[str] = None,
+ escQuote: typing.Optional[str] = None,
+ unquoteResults: bool = True,
+ endQuoteChar: typing.Optional[str] = None,
+ convertWhitespaceEscapes: bool = True,
+ ):
+ super().__init__()
+ escChar = escChar or esc_char
+ escQuote = escQuote or esc_quote
+ unquoteResults = unquoteResults and unquote_results
+ endQuoteChar = endQuoteChar or end_quote_char
+ convertWhitespaceEscapes = (
+ convertWhitespaceEscapes and convert_whitespace_escapes
+ )
+ quote_char = quoteChar or quote_char
+
+ # remove white space from quote chars - wont work anyway
+ quote_char = quote_char.strip()
+ if not quote_char:
+ raise ValueError("quote_char cannot be the empty string")
+
+ if endQuoteChar is None:
+ endQuoteChar = quote_char
+ else:
+ endQuoteChar = endQuoteChar.strip()
+ if not endQuoteChar:
+ raise ValueError("endQuoteChar cannot be the empty string")
+
+ self.quoteChar = quote_char
+ self.quoteCharLen = len(quote_char)
+ self.firstQuoteChar = quote_char[0]
+ self.endQuoteChar = endQuoteChar
+ self.endQuoteCharLen = len(endQuoteChar)
+ self.escChar = escChar
+ self.escQuote = escQuote
+ self.unquoteResults = unquoteResults
+ self.convertWhitespaceEscapes = convertWhitespaceEscapes
+
+ sep = ""
+ inner_pattern = ""
+
+ if escQuote:
+ inner_pattern += r"{}(?:{})".format(sep, re.escape(escQuote))
+ sep = "|"
+
+ if escChar:
+ inner_pattern += r"{}(?:{}.)".format(sep, re.escape(escChar))
+ sep = "|"
+ self.escCharReplacePattern = re.escape(self.escChar) + "(.)"
+
+ if len(self.endQuoteChar) > 1:
+ inner_pattern += (
+ "{}(?:".format(sep)
+ + "|".join(
+ "(?:{}(?!{}))".format(
+ re.escape(self.endQuoteChar[:i]),
+ re.escape(self.endQuoteChar[i:]),
+ )
+ for i in range(len(self.endQuoteChar) - 1, 0, -1)
+ )
+ + ")"
+ )
+ sep = "|"
+
+ if multiline:
+ self.flags = re.MULTILINE | re.DOTALL
+ inner_pattern += r"{}(?:[^{}{}])".format(
+ sep,
+ _escape_regex_range_chars(self.endQuoteChar[0]),
+ (_escape_regex_range_chars(escChar) if escChar is not None else ""),
+ )
+ else:
+ self.flags = 0
+ inner_pattern += r"{}(?:[^{}\n\r{}])".format(
+ sep,
+ _escape_regex_range_chars(self.endQuoteChar[0]),
+ (_escape_regex_range_chars(escChar) if escChar is not None else ""),
+ )
+
+ self.pattern = "".join(
+ [
+ re.escape(self.quoteChar),
+ "(?:",
+ inner_pattern,
+ ")*",
+ re.escape(self.endQuoteChar),
+ ]
+ )
+
+ try:
+ self.re = re.compile(self.pattern, self.flags)
+ self.reString = self.pattern
+ self.re_match = self.re.match
+ except re.error:
+ raise ValueError(
+ "invalid pattern {!r} passed to Regex".format(self.pattern)
+ )
+
+ self.errmsg = "Expected " + self.name
+ self.mayIndexError = False
+ self.mayReturnEmpty = True
+
+ def _generateDefaultName(self):
+ if self.quoteChar == self.endQuoteChar and isinstance(self.quoteChar, str_type):
+ return "string enclosed in {!r}".format(self.quoteChar)
+
+ return "quoted string, starting with {} ending with {}".format(
+ self.quoteChar, self.endQuoteChar
+ )
+
+ def parseImpl(self, instring, loc, doActions=True):
+ result = (
+ instring[loc] == self.firstQuoteChar
+ and self.re_match(instring, loc)
+ or None
+ )
+ if not result:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ loc = result.end()
+ ret = result.group()
+
+ if self.unquoteResults:
+
+ # strip off quotes
+ ret = ret[self.quoteCharLen : -self.endQuoteCharLen]
+
+ if isinstance(ret, str_type):
+ # replace escaped whitespace
+ if "\\" in ret and self.convertWhitespaceEscapes:
+ for wslit, wschar in self.ws_map:
+ ret = ret.replace(wslit, wschar)
+
+ # replace escaped characters
+ if self.escChar:
+ ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
+
+ # replace escaped quotes
+ if self.escQuote:
+ ret = ret.replace(self.escQuote, self.endQuoteChar)
+
+ return loc, ret
+
+
+class CharsNotIn(Token):
+ """Token for matching words composed of characters *not* in a given
+ set (will include whitespace in matched characters if not listed in
+ the provided exclusion set - see example). Defined with string
+ containing all disallowed characters, and an optional minimum,
+ maximum, and/or exact length. The default value for ``min`` is
+ 1 (a minimum value < 1 is not valid); the default values for
+ ``max`` and ``exact`` are 0, meaning no maximum or exact
+ length restriction.
+
+ Example::
+
+ # define a comma-separated-value as anything that is not a ','
+ csv_value = CharsNotIn(',')
+ print(delimited_list(csv_value).parse_string("dkls,lsdkjf,s12 34,@!#,213"))
+
+ prints::
+
+ ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
+ """
+
+ def __init__(
+ self,
+ not_chars: str = "",
+ min: int = 1,
+ max: int = 0,
+ exact: int = 0,
+ *,
+ notChars: str = "",
+ ):
+ super().__init__()
+ self.skipWhitespace = False
+ self.notChars = not_chars or notChars
+ self.notCharsSet = set(self.notChars)
+
+ if min < 1:
+ raise ValueError(
+ "cannot specify a minimum length < 1; use "
+ "Opt(CharsNotIn()) if zero-length char group is permitted"
+ )
+
+ self.minLen = min
+
+ if max > 0:
+ self.maxLen = max
+ else:
+ self.maxLen = _MAX_INT
+
+ if exact > 0:
+ self.maxLen = exact
+ self.minLen = exact
+
+ self.errmsg = "Expected " + self.name
+ self.mayReturnEmpty = self.minLen == 0
+ self.mayIndexError = False
+
+ def _generateDefaultName(self):
+ not_chars_str = _collapse_string_to_ranges(self.notChars)
+ if len(not_chars_str) > 16:
+ return "!W:({}...)".format(self.notChars[: 16 - 3])
+ else:
+ return "!W:({})".format(self.notChars)
+
+ def parseImpl(self, instring, loc, doActions=True):
+ notchars = self.notCharsSet
+ if instring[loc] in notchars:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ start = loc
+ loc += 1
+ maxlen = min(start + self.maxLen, len(instring))
+ while loc < maxlen and instring[loc] not in notchars:
+ loc += 1
+
+ if loc - start < self.minLen:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ return loc, instring[start:loc]
+
+
+class White(Token):
+ """Special matching class for matching whitespace. Normally,
+ whitespace is ignored by pyparsing grammars. This class is included
+ when some whitespace structures are significant. Define with
+ a string containing the whitespace characters to be matched; default
+ is ``" \\t\\r\\n"``. Also takes optional ``min``,
+ ``max``, and ``exact`` arguments, as defined for the
+ :class:`Word` class.
+ """
+
+ whiteStrs = {
+ " ": "<SP>",
+ "\t": "<TAB>",
+ "\n": "<LF>",
+ "\r": "<CR>",
+ "\f": "<FF>",
+ "\u00A0": "<NBSP>",
+ "\u1680": "<OGHAM_SPACE_MARK>",
+ "\u180E": "<MONGOLIAN_VOWEL_SEPARATOR>",
+ "\u2000": "<EN_QUAD>",
+ "\u2001": "<EM_QUAD>",
+ "\u2002": "<EN_SPACE>",
+ "\u2003": "<EM_SPACE>",
+ "\u2004": "<THREE-PER-EM_SPACE>",
+ "\u2005": "<FOUR-PER-EM_SPACE>",
+ "\u2006": "<SIX-PER-EM_SPACE>",
+ "\u2007": "<FIGURE_SPACE>",
+ "\u2008": "<PUNCTUATION_SPACE>",
+ "\u2009": "<THIN_SPACE>",
+ "\u200A": "<HAIR_SPACE>",
+ "\u200B": "<ZERO_WIDTH_SPACE>",
+ "\u202F": "<NNBSP>",
+ "\u205F": "<MMSP>",
+ "\u3000": "<IDEOGRAPHIC_SPACE>",
+ }
+
+ def __init__(self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = 0):
+ super().__init__()
+ self.matchWhite = ws
+ self.set_whitespace_chars(
+ "".join(c for c in self.whiteStrs if c not in self.matchWhite),
+ copy_defaults=True,
+ )
+ # self.leave_whitespace()
+ self.mayReturnEmpty = True
+ self.errmsg = "Expected " + self.name
+
+ self.minLen = min
+
+ if max > 0:
+ self.maxLen = max
+ else:
+ self.maxLen = _MAX_INT
+
+ if exact > 0:
+ self.maxLen = exact
+ self.minLen = exact
+
+ def _generateDefaultName(self):
+ return "".join(White.whiteStrs[c] for c in self.matchWhite)
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if instring[loc] not in self.matchWhite:
+ raise ParseException(instring, loc, self.errmsg, self)
+ start = loc
+ loc += 1
+ maxloc = start + self.maxLen
+ maxloc = min(maxloc, len(instring))
+ while loc < maxloc and instring[loc] in self.matchWhite:
+ loc += 1
+
+ if loc - start < self.minLen:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ return loc, instring[start:loc]
+
+
+class PositionToken(Token):
+ def __init__(self):
+ super().__init__()
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+
+
+class GoToColumn(PositionToken):
+ """Token to advance to a specific column of input text; useful for
+ tabular report scraping.
+ """
+
+ def __init__(self, colno: int):
+ super().__init__()
+ self.col = colno
+
+ def preParse(self, instring, loc):
+ if col(loc, instring) != self.col:
+ instrlen = len(instring)
+ if self.ignoreExprs:
+ loc = self._skipIgnorables(instring, loc)
+ while (
+ loc < instrlen
+ and instring[loc].isspace()
+ and col(loc, instring) != self.col
+ ):
+ loc += 1
+ return loc
+
+ def parseImpl(self, instring, loc, doActions=True):
+ thiscol = col(loc, instring)
+ if thiscol > self.col:
+ raise ParseException(instring, loc, "Text not in expected column", self)
+ newloc = loc + self.col - thiscol
+ ret = instring[loc:newloc]
+ return newloc, ret
+
+
+class LineStart(PositionToken):
+ r"""Matches if current position is at the beginning of a line within
+ the parse string
+
+ Example::
+
+ test = '''\
+ AAA this line
+ AAA and this line
+ AAA but not this one
+ B AAA and definitely not this one
+ '''
+
+ for t in (LineStart() + 'AAA' + restOfLine).search_string(test):
+ print(t)
+
+ prints::
+
+ ['AAA', ' this line']
+ ['AAA', ' and this line']
+
+ """
+
+ def __init__(self):
+ super().__init__()
+ self.leave_whitespace()
+ self.orig_whiteChars = set() | self.whiteChars
+ self.whiteChars.discard("\n")
+ self.skipper = Empty().set_whitespace_chars(self.whiteChars)
+ self.errmsg = "Expected start of line"
+
+ def preParse(self, instring, loc):
+ if loc == 0:
+ return loc
+ else:
+ ret = self.skipper.preParse(instring, loc)
+ if "\n" in self.orig_whiteChars:
+ while instring[ret : ret + 1] == "\n":
+ ret = self.skipper.preParse(instring, ret + 1)
+ return ret
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if col(loc, instring) == 1:
+ return loc, []
+ raise ParseException(instring, loc, self.errmsg, self)
+
+
+class LineEnd(PositionToken):
+ """Matches if current position is at the end of a line within the
+ parse string
+ """
+
+ def __init__(self):
+ super().__init__()
+ self.whiteChars.discard("\n")
+ self.set_whitespace_chars(self.whiteChars, copy_defaults=False)
+ self.errmsg = "Expected end of line"
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if loc < len(instring):
+ if instring[loc] == "\n":
+ return loc + 1, "\n"
+ else:
+ raise ParseException(instring, loc, self.errmsg, self)
+ elif loc == len(instring):
+ return loc + 1, []
+ else:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+
+class StringStart(PositionToken):
+ """Matches if current position is at the beginning of the parse
+ string
+ """
+
+ def __init__(self):
+ super().__init__()
+ self.errmsg = "Expected start of text"
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if loc != 0:
+ # see if entire string up to here is just whitespace and ignoreables
+ if loc != self.preParse(instring, 0):
+ raise ParseException(instring, loc, self.errmsg, self)
+ return loc, []
+
+
+class StringEnd(PositionToken):
+ """
+ Matches if current position is at the end of the parse string
+ """
+
+ def __init__(self):
+ super().__init__()
+ self.errmsg = "Expected end of text"
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if loc < len(instring):
+ raise ParseException(instring, loc, self.errmsg, self)
+ elif loc == len(instring):
+ return loc + 1, []
+ elif loc > len(instring):
+ return loc, []
+ else:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+
+class WordStart(PositionToken):
+ """Matches if the current position is at the beginning of a
+ :class:`Word`, and is not preceded by any character in a given
+ set of ``word_chars`` (default= ``printables``). To emulate the
+ ``\b`` behavior of regular expressions, use
+ ``WordStart(alphanums)``. ``WordStart`` will also match at
+ the beginning of the string being parsed, or at the beginning of
+ a line.
+ """
+
+ def __init__(self, word_chars: str = printables, *, wordChars: str = printables):
+ wordChars = word_chars if wordChars == printables else wordChars
+ super().__init__()
+ self.wordChars = set(wordChars)
+ self.errmsg = "Not at the start of a word"
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if loc != 0:
+ if (
+ instring[loc - 1] in self.wordChars
+ or instring[loc] not in self.wordChars
+ ):
+ raise ParseException(instring, loc, self.errmsg, self)
+ return loc, []
+
+
+class WordEnd(PositionToken):
+ """Matches if the current position is at the end of a :class:`Word`,
+ and is not followed by any character in a given set of ``word_chars``
+ (default= ``printables``). To emulate the ``\b`` behavior of
+ regular expressions, use ``WordEnd(alphanums)``. ``WordEnd``
+ will also match at the end of the string being parsed, or at the end
+ of a line.
+ """
+
+ def __init__(self, word_chars: str = printables, *, wordChars: str = printables):
+ wordChars = word_chars if wordChars == printables else wordChars
+ super().__init__()
+ self.wordChars = set(wordChars)
+ self.skipWhitespace = False
+ self.errmsg = "Not at the end of a word"
+
+ def parseImpl(self, instring, loc, doActions=True):
+ instrlen = len(instring)
+ if instrlen > 0 and loc < instrlen:
+ if (
+ instring[loc] in self.wordChars
+ or instring[loc - 1] not in self.wordChars
+ ):
+ raise ParseException(instring, loc, self.errmsg, self)
+ return loc, []
+
+
+class ParseExpression(ParserElement):
+ """Abstract subclass of ParserElement, for combining and
+ post-processing parsed tokens.
+ """
+
+ def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False):
+ super().__init__(savelist)
+ self.exprs: List[ParserElement]
+ if isinstance(exprs, _generatorType):
+ exprs = list(exprs)
+
+ if isinstance(exprs, str_type):
+ self.exprs = [self._literalStringClass(exprs)]
+ elif isinstance(exprs, ParserElement):
+ self.exprs = [exprs]
+ elif isinstance(exprs, Iterable):
+ exprs = list(exprs)
+ # if sequence of strings provided, wrap with Literal
+ if any(isinstance(expr, str_type) for expr in exprs):
+ exprs = (
+ self._literalStringClass(e) if isinstance(e, str_type) else e
+ for e in exprs
+ )
+ self.exprs = list(exprs)
+ else:
+ try:
+ self.exprs = list(exprs)
+ except TypeError:
+ self.exprs = [exprs]
+ self.callPreparse = False
+
+ def recurse(self) -> Sequence[ParserElement]:
+ return self.exprs[:]
+
+ def append(self, other) -> ParserElement:
+ self.exprs.append(other)
+ self._defaultName = None
+ return self
+
+ def leave_whitespace(self, recursive: bool = True) -> ParserElement:
+ """
+ Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on
+ all contained expressions.
+ """
+ super().leave_whitespace(recursive)
+
+ if recursive:
+ self.exprs = [e.copy() for e in self.exprs]
+ for e in self.exprs:
+ e.leave_whitespace(recursive)
+ return self
+
+ def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
+ """
+ Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on
+ all contained expressions.
+ """
+ super().ignore_whitespace(recursive)
+ if recursive:
+ self.exprs = [e.copy() for e in self.exprs]
+ for e in self.exprs:
+ e.ignore_whitespace(recursive)
+ return self
+
+ def ignore(self, other) -> ParserElement:
+ if isinstance(other, Suppress):
+ if other not in self.ignoreExprs:
+ super().ignore(other)
+ for e in self.exprs:
+ e.ignore(self.ignoreExprs[-1])
+ else:
+ super().ignore(other)
+ for e in self.exprs:
+ e.ignore(self.ignoreExprs[-1])
+ return self
+
+ def _generateDefaultName(self):
+ return "{}:({})".format(self.__class__.__name__, str(self.exprs))
+
+ def streamline(self) -> ParserElement:
+ if self.streamlined:
+ return self
+
+ super().streamline()
+
+ for e in self.exprs:
+ e.streamline()
+
+ # collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)``
+ # but only if there are no parse actions or resultsNames on the nested And's
+ # (likewise for :class:`Or`'s and :class:`MatchFirst`'s)
+ if len(self.exprs) == 2:
+ other = self.exprs[0]
+ if (
+ isinstance(other, self.__class__)
+ and not other.parseAction
+ and other.resultsName is None
+ and not other.debug
+ ):
+ self.exprs = other.exprs[:] + [self.exprs[1]]
+ self._defaultName = None
+ self.mayReturnEmpty |= other.mayReturnEmpty
+ self.mayIndexError |= other.mayIndexError
+
+ other = self.exprs[-1]
+ if (
+ isinstance(other, self.__class__)
+ and not other.parseAction
+ and other.resultsName is None
+ and not other.debug
+ ):
+ self.exprs = self.exprs[:-1] + other.exprs[:]
+ self._defaultName = None
+ self.mayReturnEmpty |= other.mayReturnEmpty
+ self.mayIndexError |= other.mayIndexError
+
+ self.errmsg = "Expected " + str(self)
+
+ return self
+
+ def validate(self, validateTrace=None) -> None:
+ tmp = (validateTrace if validateTrace is not None else [])[:] + [self]
+ for e in self.exprs:
+ e.validate(tmp)
+ self._checkRecursion([])
+
+ def copy(self) -> ParserElement:
+ ret = super().copy()
+ ret.exprs = [e.copy() for e in self.exprs]
+ return ret
+
+ def _setResultsName(self, name, listAllMatches=False):
+ if (
+ __diag__.warn_ungrouped_named_tokens_in_collection
+ and Diagnostics.warn_ungrouped_named_tokens_in_collection
+ not in self.suppress_warnings_
+ ):
+ for e in self.exprs:
+ if (
+ isinstance(e, ParserElement)
+ and e.resultsName
+ and Diagnostics.warn_ungrouped_named_tokens_in_collection
+ not in e.suppress_warnings_
+ ):
+ warnings.warn(
+ "{}: setting results name {!r} on {} expression "
+ "collides with {!r} on contained expression".format(
+ "warn_ungrouped_named_tokens_in_collection",
+ name,
+ type(self).__name__,
+ e.resultsName,
+ ),
+ stacklevel=3,
+ )
+
+ return super()._setResultsName(name, listAllMatches)
+
+ ignoreWhitespace = ignore_whitespace
+ leaveWhitespace = leave_whitespace
+
+
+class And(ParseExpression):
+ """
+ Requires all given :class:`ParseExpression` s to be found in the given order.
+ Expressions may be separated by whitespace.
+ May be constructed using the ``'+'`` operator.
+ May also be constructed using the ``'-'`` operator, which will
+ suppress backtracking.
+
+ Example::
+
+ integer = Word(nums)
+ name_expr = Word(alphas)[1, ...]
+
+ expr = And([integer("id"), name_expr("name"), integer("age")])
+ # more easily written as:
+ expr = integer("id") + name_expr("name") + integer("age")
+ """
+
+ class _ErrorStop(Empty):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.leave_whitespace()
+
+ def _generateDefaultName(self):
+ return "-"
+
+ def __init__(
+ self, exprs_arg: typing.Iterable[ParserElement], savelist: bool = True
+ ):
+ exprs: List[ParserElement] = list(exprs_arg)
+ if exprs and Ellipsis in exprs:
+ tmp = []
+ for i, expr in enumerate(exprs):
+ if expr is Ellipsis:
+ if i < len(exprs) - 1:
+ skipto_arg: ParserElement = (Empty() + exprs[i + 1]).exprs[-1]
+ tmp.append(SkipTo(skipto_arg)("_skipped*"))
+ else:
+ raise Exception(
+ "cannot construct And with sequence ending in ..."
+ )
+ else:
+ tmp.append(expr)
+ exprs[:] = tmp
+ super().__init__(exprs, savelist)
+ if self.exprs:
+ self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+ if not isinstance(self.exprs[0], White):
+ self.set_whitespace_chars(
+ self.exprs[0].whiteChars,
+ copy_defaults=self.exprs[0].copyDefaultWhiteChars,
+ )
+ self.skipWhitespace = self.exprs[0].skipWhitespace
+ else:
+ self.skipWhitespace = False
+ else:
+ self.mayReturnEmpty = True
+ self.callPreparse = True
+
+ def streamline(self) -> ParserElement:
+ # collapse any _PendingSkip's
+ if self.exprs:
+ if any(
+ isinstance(e, ParseExpression)
+ and e.exprs
+ and isinstance(e.exprs[-1], _PendingSkip)
+ for e in self.exprs[:-1]
+ ):
+ for i, e in enumerate(self.exprs[:-1]):
+ if e is None:
+ continue
+ if (
+ isinstance(e, ParseExpression)
+ and e.exprs
+ and isinstance(e.exprs[-1], _PendingSkip)
+ ):
+ e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1]
+ self.exprs[i + 1] = None
+ self.exprs = [e for e in self.exprs if e is not None]
+
+ super().streamline()
+
+ # link any IndentedBlocks to the prior expression
+ for prev, cur in zip(self.exprs, self.exprs[1:]):
+ # traverse cur or any first embedded expr of cur looking for an IndentedBlock
+ # (but watch out for recursive grammar)
+ seen = set()
+ while cur:
+ if id(cur) in seen:
+ break
+ seen.add(id(cur))
+ if isinstance(cur, IndentedBlock):
+ prev.add_parse_action(
+ lambda s, l, t, cur_=cur: setattr(
+ cur_, "parent_anchor", col(l, s)
+ )
+ )
+ break
+ subs = cur.recurse()
+ cur = next(iter(subs), None)
+
+ self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+ return self
+
+ def parseImpl(self, instring, loc, doActions=True):
+ # pass False as callPreParse arg to _parse for first element, since we already
+ # pre-parsed the string as part of our And pre-parsing
+ loc, resultlist = self.exprs[0]._parse(
+ instring, loc, doActions, callPreParse=False
+ )
+ errorStop = False
+ for e in self.exprs[1:]:
+ # if isinstance(e, And._ErrorStop):
+ if type(e) is And._ErrorStop:
+ errorStop = True
+ continue
+ if errorStop:
+ try:
+ loc, exprtokens = e._parse(instring, loc, doActions)
+ except ParseSyntaxException:
+ raise
+ except ParseBaseException as pe:
+ pe.__traceback__ = None
+ raise ParseSyntaxException._from_exception(pe)
+ except IndexError:
+ raise ParseSyntaxException(
+ instring, len(instring), self.errmsg, self
+ )
+ else:
+ loc, exprtokens = e._parse(instring, loc, doActions)
+ if exprtokens or exprtokens.haskeys():
+ resultlist += exprtokens
+ return loc, resultlist
+
+ def __iadd__(self, other):
+ if isinstance(other, str_type):
+ other = self._literalStringClass(other)
+ return self.append(other) # And([self, other])
+
+ def _checkRecursion(self, parseElementList):
+ subRecCheckList = parseElementList[:] + [self]
+ for e in self.exprs:
+ e._checkRecursion(subRecCheckList)
+ if not e.mayReturnEmpty:
+ break
+
+ def _generateDefaultName(self):
+ inner = " ".join(str(e) for e in self.exprs)
+ # strip off redundant inner {}'s
+ while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}":
+ inner = inner[1:-1]
+ return "{" + inner + "}"
+
+
+class Or(ParseExpression):
+ """Requires that at least one :class:`ParseExpression` is found. If
+ two expressions match, the expression that matches the longest
+ string will be used. May be constructed using the ``'^'``
+ operator.
+
+ Example::
+
+ # construct Or using '^' operator
+
+ number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
+ print(number.search_string("123 3.1416 789"))
+
+ prints::
+
+ [['123'], ['3.1416'], ['789']]
+ """
+
+ def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False):
+ super().__init__(exprs, savelist)
+ if self.exprs:
+ self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
+ self.skipWhitespace = all(e.skipWhitespace for e in self.exprs)
+ else:
+ self.mayReturnEmpty = True
+
+ def streamline(self) -> ParserElement:
+ super().streamline()
+ if self.exprs:
+ self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
+ self.saveAsList = any(e.saveAsList for e in self.exprs)
+ self.skipWhitespace = all(
+ e.skipWhitespace and not isinstance(e, White) for e in self.exprs
+ )
+ else:
+ self.saveAsList = False
+ return self
+
+ def parseImpl(self, instring, loc, doActions=True):
+ maxExcLoc = -1
+ maxException = None
+ matches = []
+ fatals = []
+ if all(e.callPreparse for e in self.exprs):
+ loc = self.preParse(instring, loc)
+ for e in self.exprs:
+ try:
+ loc2 = e.try_parse(instring, loc, raise_fatal=True)
+ except ParseFatalException as pfe:
+ pfe.__traceback__ = None
+ pfe.parserElement = e
+ fatals.append(pfe)
+ maxException = None
+ maxExcLoc = -1
+ except ParseException as err:
+ if not fatals:
+ err.__traceback__ = None
+ if err.loc > maxExcLoc:
+ maxException = err
+ maxExcLoc = err.loc
+ except IndexError:
+ if len(instring) > maxExcLoc:
+ maxException = ParseException(
+ instring, len(instring), e.errmsg, self
+ )
+ maxExcLoc = len(instring)
+ else:
+ # save match among all matches, to retry longest to shortest
+ matches.append((loc2, e))
+
+ if matches:
+ # re-evaluate all matches in descending order of length of match, in case attached actions
+ # might change whether or how much they match of the input.
+ matches.sort(key=itemgetter(0), reverse=True)
+
+ if not doActions:
+ # no further conditions or parse actions to change the selection of
+ # alternative, so the first match will be the best match
+ best_expr = matches[0][1]
+ return best_expr._parse(instring, loc, doActions)
+
+ longest = -1, None
+ for loc1, expr1 in matches:
+ if loc1 <= longest[0]:
+ # already have a longer match than this one will deliver, we are done
+ return longest
+
+ try:
+ loc2, toks = expr1._parse(instring, loc, doActions)
+ except ParseException as err:
+ err.__traceback__ = None
+ if err.loc > maxExcLoc:
+ maxException = err
+ maxExcLoc = err.loc
+ else:
+ if loc2 >= loc1:
+ return loc2, toks
+ # didn't match as much as before
+ elif loc2 > longest[0]:
+ longest = loc2, toks
+
+ if longest != (-1, None):
+ return longest
+
+ if fatals:
+ if len(fatals) > 1:
+ fatals.sort(key=lambda e: -e.loc)
+ if fatals[0].loc == fatals[1].loc:
+ fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement))))
+ max_fatal = fatals[0]
+ raise max_fatal
+
+ if maxException is not None:
+ maxException.msg = self.errmsg
+ raise maxException
+ else:
+ raise ParseException(
+ instring, loc, "no defined alternatives to match", self
+ )
+
+ def __ixor__(self, other):
+ if isinstance(other, str_type):
+ other = self._literalStringClass(other)
+ return self.append(other) # Or([self, other])
+
+ def _generateDefaultName(self):
+ return "{" + " ^ ".join(str(e) for e in self.exprs) + "}"
+
+ def _setResultsName(self, name, listAllMatches=False):
+ if (
+ __diag__.warn_multiple_tokens_in_named_alternation
+ and Diagnostics.warn_multiple_tokens_in_named_alternation
+ not in self.suppress_warnings_
+ ):
+ if any(
+ isinstance(e, And)
+ and Diagnostics.warn_multiple_tokens_in_named_alternation
+ not in e.suppress_warnings_
+ for e in self.exprs
+ ):
+ warnings.warn(
+ "{}: setting results name {!r} on {} expression "
+ "will return a list of all parsed tokens in an And alternative, "
+ "in prior versions only the first token was returned; enclose "
+ "contained argument in Group".format(
+ "warn_multiple_tokens_in_named_alternation",
+ name,
+ type(self).__name__,
+ ),
+ stacklevel=3,
+ )
+
+ return super()._setResultsName(name, listAllMatches)
+
+
+class MatchFirst(ParseExpression):
+ """Requires that at least one :class:`ParseExpression` is found. If
+ more than one expression matches, the first one listed is the one that will
+ match. May be constructed using the ``'|'`` operator.
+
+ Example::
+
+ # construct MatchFirst using '|' operator
+
+ # watch the order of expressions to match
+ number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
+ print(number.search_string("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
+
+ # put more selective expression first
+ number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
+ print(number.search_string("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
+ """
+
+ def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False):
+ super().__init__(exprs, savelist)
+ if self.exprs:
+ self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
+ self.skipWhitespace = all(e.skipWhitespace for e in self.exprs)
+ else:
+ self.mayReturnEmpty = True
+
+ def streamline(self) -> ParserElement:
+ if self.streamlined:
+ return self
+
+ super().streamline()
+ if self.exprs:
+ self.saveAsList = any(e.saveAsList for e in self.exprs)
+ self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
+ self.skipWhitespace = all(
+ e.skipWhitespace and not isinstance(e, White) for e in self.exprs
+ )
+ else:
+ self.saveAsList = False
+ self.mayReturnEmpty = True
+ return self
+
+ def parseImpl(self, instring, loc, doActions=True):
+ maxExcLoc = -1
+ maxException = None
+
+ for e in self.exprs:
+ try:
+ return e._parse(
+ instring,
+ loc,
+ doActions,
+ )
+ except ParseFatalException as pfe:
+ pfe.__traceback__ = None
+ pfe.parserElement = e
+ raise
+ except ParseException as err:
+ if err.loc > maxExcLoc:
+ maxException = err
+ maxExcLoc = err.loc
+ except IndexError:
+ if len(instring) > maxExcLoc:
+ maxException = ParseException(
+ instring, len(instring), e.errmsg, self
+ )
+ maxExcLoc = len(instring)
+
+ if maxException is not None:
+ maxException.msg = self.errmsg
+ raise maxException
+ else:
+ raise ParseException(
+ instring, loc, "no defined alternatives to match", self
+ )
+
+ def __ior__(self, other):
+ if isinstance(other, str_type):
+ other = self._literalStringClass(other)
+ return self.append(other) # MatchFirst([self, other])
+
+ def _generateDefaultName(self):
+ return "{" + " | ".join(str(e) for e in self.exprs) + "}"
+
+ def _setResultsName(self, name, listAllMatches=False):
+ if (
+ __diag__.warn_multiple_tokens_in_named_alternation
+ and Diagnostics.warn_multiple_tokens_in_named_alternation
+ not in self.suppress_warnings_
+ ):
+ if any(
+ isinstance(e, And)
+ and Diagnostics.warn_multiple_tokens_in_named_alternation
+ not in e.suppress_warnings_
+ for e in self.exprs
+ ):
+ warnings.warn(
+ "{}: setting results name {!r} on {} expression "
+ "will return a list of all parsed tokens in an And alternative, "
+ "in prior versions only the first token was returned; enclose "
+ "contained argument in Group".format(
+ "warn_multiple_tokens_in_named_alternation",
+ name,
+ type(self).__name__,
+ ),
+ stacklevel=3,
+ )
+
+ return super()._setResultsName(name, listAllMatches)
+
+
+class Each(ParseExpression):
+ """Requires all given :class:`ParseExpression` s to be found, but in
+ any order. Expressions may be separated by whitespace.
+
+ May be constructed using the ``'&'`` operator.
+
+ Example::
+
+ color = one_of("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
+ shape_type = one_of("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
+ integer = Word(nums)
+ shape_attr = "shape:" + shape_type("shape")
+ posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
+ color_attr = "color:" + color("color")
+ size_attr = "size:" + integer("size")
+
+ # use Each (using operator '&') to accept attributes in any order
+ # (shape and posn are required, color and size are optional)
+ shape_spec = shape_attr & posn_attr & Opt(color_attr) & Opt(size_attr)
+
+ shape_spec.run_tests('''
+ shape: SQUARE color: BLACK posn: 100, 120
+ shape: CIRCLE size: 50 color: BLUE posn: 50,80
+ color:GREEN size:20 shape:TRIANGLE posn:20,40
+ '''
+ )
+
+ prints::
+
+ shape: SQUARE color: BLACK posn: 100, 120
+ ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
+ - color: BLACK
+ - posn: ['100', ',', '120']
+ - x: 100
+ - y: 120
+ - shape: SQUARE
+
+
+ shape: CIRCLE size: 50 color: BLUE posn: 50,80
+ ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
+ - color: BLUE
+ - posn: ['50', ',', '80']
+ - x: 50
+ - y: 80
+ - shape: CIRCLE
+ - size: 50
+
+
+ color: GREEN size: 20 shape: TRIANGLE posn: 20,40
+ ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
+ - color: GREEN
+ - posn: ['20', ',', '40']
+ - x: 20
+ - y: 40
+ - shape: TRIANGLE
+ - size: 20
+ """
+
+ def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = True):
+ super().__init__(exprs, savelist)
+ if self.exprs:
+ self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+ else:
+ self.mayReturnEmpty = True
+ self.skipWhitespace = True
+ self.initExprGroups = True
+ self.saveAsList = True
+
+ def streamline(self) -> ParserElement:
+ super().streamline()
+ if self.exprs:
+ self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+ else:
+ self.mayReturnEmpty = True
+ return self
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if self.initExprGroups:
+ self.opt1map = dict(
+ (id(e.expr), e) for e in self.exprs if isinstance(e, Opt)
+ )
+ opt1 = [e.expr for e in self.exprs if isinstance(e, Opt)]
+ opt2 = [
+ e
+ for e in self.exprs
+ if e.mayReturnEmpty and not isinstance(e, (Opt, Regex, ZeroOrMore))
+ ]
+ self.optionals = opt1 + opt2
+ self.multioptionals = [
+ e.expr.set_results_name(e.resultsName, list_all_matches=True)
+ for e in self.exprs
+ if isinstance(e, _MultipleMatch)
+ ]
+ self.multirequired = [
+ e.expr.set_results_name(e.resultsName, list_all_matches=True)
+ for e in self.exprs
+ if isinstance(e, OneOrMore)
+ ]
+ self.required = [
+ e for e in self.exprs if not isinstance(e, (Opt, ZeroOrMore, OneOrMore))
+ ]
+ self.required += self.multirequired
+ self.initExprGroups = False
+
+ tmpLoc = loc
+ tmpReqd = self.required[:]
+ tmpOpt = self.optionals[:]
+ multis = self.multioptionals[:]
+ matchOrder = []
+
+ keepMatching = True
+ failed = []
+ fatals = []
+ while keepMatching:
+ tmpExprs = tmpReqd + tmpOpt + multis
+ failed.clear()
+ fatals.clear()
+ for e in tmpExprs:
+ try:
+ tmpLoc = e.try_parse(instring, tmpLoc, raise_fatal=True)
+ except ParseFatalException as pfe:
+ pfe.__traceback__ = None
+ pfe.parserElement = e
+ fatals.append(pfe)
+ failed.append(e)
+ except ParseException:
+ failed.append(e)
+ else:
+ matchOrder.append(self.opt1map.get(id(e), e))
+ if e in tmpReqd:
+ tmpReqd.remove(e)
+ elif e in tmpOpt:
+ tmpOpt.remove(e)
+ if len(failed) == len(tmpExprs):
+ keepMatching = False
+
+ # look for any ParseFatalExceptions
+ if fatals:
+ if len(fatals) > 1:
+ fatals.sort(key=lambda e: -e.loc)
+ if fatals[0].loc == fatals[1].loc:
+ fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement))))
+ max_fatal = fatals[0]
+ raise max_fatal
+
+ if tmpReqd:
+ missing = ", ".join([str(e) for e in tmpReqd])
+ raise ParseException(
+ instring,
+ loc,
+ "Missing one or more required elements ({})".format(missing),
+ )
+
+ # add any unmatched Opts, in case they have default values defined
+ matchOrder += [e for e in self.exprs if isinstance(e, Opt) and e.expr in tmpOpt]
+
+ total_results = ParseResults([])
+ for e in matchOrder:
+ loc, results = e._parse(instring, loc, doActions)
+ total_results += results
+
+ return loc, total_results
+
+ def _generateDefaultName(self):
+ return "{" + " & ".join(str(e) for e in self.exprs) + "}"
+
+
+class ParseElementEnhance(ParserElement):
+ """Abstract subclass of :class:`ParserElement`, for combining and
+ post-processing parsed tokens.
+ """
+
+ def __init__(self, expr: Union[ParserElement, str], savelist: bool = False):
+ super().__init__(savelist)
+ if isinstance(expr, str_type):
+ if issubclass(self._literalStringClass, Token):
+ expr = self._literalStringClass(expr)
+ elif issubclass(type(self), self._literalStringClass):
+ expr = Literal(expr)
+ else:
+ expr = self._literalStringClass(Literal(expr))
+ self.expr = expr
+ if expr is not None:
+ self.mayIndexError = expr.mayIndexError
+ self.mayReturnEmpty = expr.mayReturnEmpty
+ self.set_whitespace_chars(
+ expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars
+ )
+ self.skipWhitespace = expr.skipWhitespace
+ self.saveAsList = expr.saveAsList
+ self.callPreparse = expr.callPreparse
+ self.ignoreExprs.extend(expr.ignoreExprs)
+
+ def recurse(self) -> Sequence[ParserElement]:
+ return [self.expr] if self.expr is not None else []
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if self.expr is not None:
+ return self.expr._parse(instring, loc, doActions, callPreParse=False)
+ else:
+ raise ParseException(instring, loc, "No expression defined", self)
+
+ def leave_whitespace(self, recursive: bool = True) -> ParserElement:
+ super().leave_whitespace(recursive)
+
+ if recursive:
+ self.expr = self.expr.copy()
+ if self.expr is not None:
+ self.expr.leave_whitespace(recursive)
+ return self
+
+ def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
+ super().ignore_whitespace(recursive)
+
+ if recursive:
+ self.expr = self.expr.copy()
+ if self.expr is not None:
+ self.expr.ignore_whitespace(recursive)
+ return self
+
+ def ignore(self, other) -> ParserElement:
+ if isinstance(other, Suppress):
+ if other not in self.ignoreExprs:
+ super().ignore(other)
+ if self.expr is not None:
+ self.expr.ignore(self.ignoreExprs[-1])
+ else:
+ super().ignore(other)
+ if self.expr is not None:
+ self.expr.ignore(self.ignoreExprs[-1])
+ return self
+
+ def streamline(self) -> ParserElement:
+ super().streamline()
+ if self.expr is not None:
+ self.expr.streamline()
+ return self
+
+ def _checkRecursion(self, parseElementList):
+ if self in parseElementList:
+ raise RecursiveGrammarException(parseElementList + [self])
+ subRecCheckList = parseElementList[:] + [self]
+ if self.expr is not None:
+ self.expr._checkRecursion(subRecCheckList)
+
+ def validate(self, validateTrace=None) -> None:
+ if validateTrace is None:
+ validateTrace = []
+ tmp = validateTrace[:] + [self]
+ if self.expr is not None:
+ self.expr.validate(tmp)
+ self._checkRecursion([])
+
+ def _generateDefaultName(self):
+ return "{}:({})".format(self.__class__.__name__, str(self.expr))
+
+ ignoreWhitespace = ignore_whitespace
+ leaveWhitespace = leave_whitespace
+
+
+class IndentedBlock(ParseElementEnhance):
+ """
+ Expression to match one or more expressions at a given indentation level.
+ Useful for parsing text where structure is implied by indentation (like Python source code).
+ """
+
+ class _Indent(Empty):
+ def __init__(self, ref_col: int):
+ super().__init__()
+ self.errmsg = "expected indent at column {}".format(ref_col)
+ self.add_condition(lambda s, l, t: col(l, s) == ref_col)
+
+ class _IndentGreater(Empty):
+ def __init__(self, ref_col: int):
+ super().__init__()
+ self.errmsg = "expected indent at column greater than {}".format(ref_col)
+ self.add_condition(lambda s, l, t: col(l, s) > ref_col)
+
+ def __init__(
+ self, expr: ParserElement, *, recursive: bool = False, grouped: bool = True
+ ):
+ super().__init__(expr, savelist=True)
+ # if recursive:
+ # raise NotImplementedError("IndentedBlock with recursive is not implemented")
+ self._recursive = recursive
+ self._grouped = grouped
+ self.parent_anchor = 1
+
+ def parseImpl(self, instring, loc, doActions=True):
+ # advance parse position to non-whitespace by using an Empty()
+ # this should be the column to be used for all subsequent indented lines
+ anchor_loc = Empty().preParse(instring, loc)
+
+ # see if self.expr matches at the current location - if not it will raise an exception
+ # and no further work is necessary
+ self.expr.try_parse(instring, anchor_loc, doActions)
+
+ indent_col = col(anchor_loc, instring)
+ peer_detect_expr = self._Indent(indent_col)
+
+ inner_expr = Empty() + peer_detect_expr + self.expr
+ if self._recursive:
+ sub_indent = self._IndentGreater(indent_col)
+ nested_block = IndentedBlock(
+ self.expr, recursive=self._recursive, grouped=self._grouped
+ )
+ nested_block.set_debug(self.debug)
+ nested_block.parent_anchor = indent_col
+ inner_expr += Opt(sub_indent + nested_block)
+
+ inner_expr.set_name(f"inner {hex(id(inner_expr))[-4:].upper()}@{indent_col}")
+ block = OneOrMore(inner_expr)
+
+ trailing_undent = self._Indent(self.parent_anchor) | StringEnd()
+
+ if self._grouped:
+ wrapper = Group
+ else:
+ wrapper = lambda expr: expr
+ return (wrapper(block) + Optional(trailing_undent)).parseImpl(
+ instring, anchor_loc, doActions
+ )
+
+
+class AtStringStart(ParseElementEnhance):
+ """Matches if expression matches at the beginning of the parse
+ string::
+
+ AtStringStart(Word(nums)).parse_string("123")
+ # prints ["123"]
+
+ AtStringStart(Word(nums)).parse_string(" 123")
+ # raises ParseException
+ """
+
+ def __init__(self, expr: Union[ParserElement, str]):
+ super().__init__(expr)
+ self.callPreparse = False
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if loc != 0:
+ raise ParseException(instring, loc, "not found at string start")
+ return super().parseImpl(instring, loc, doActions)
+
+
+class AtLineStart(ParseElementEnhance):
+ r"""Matches if an expression matches at the beginning of a line within
+ the parse string
+
+ Example::
+
+ test = '''\
+ AAA this line
+ AAA and this line
+ AAA but not this one
+ B AAA and definitely not this one
+ '''
+
+ for t in (AtLineStart('AAA') + restOfLine).search_string(test):
+ print(t)
+
+ prints::
+
+ ['AAA', ' this line']
+ ['AAA', ' and this line']
+
+ """
+
+ def __init__(self, expr: Union[ParserElement, str]):
+ super().__init__(expr)
+ self.callPreparse = False
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if col(loc, instring) != 1:
+ raise ParseException(instring, loc, "not found at line start")
+ return super().parseImpl(instring, loc, doActions)
+
+
+class FollowedBy(ParseElementEnhance):
+ """Lookahead matching of the given parse expression.
+ ``FollowedBy`` does *not* advance the parsing position within
+ the input string, it only verifies that the specified parse
+ expression matches at the current position. ``FollowedBy``
+ always returns a null token list. If any results names are defined
+ in the lookahead expression, those *will* be returned for access by
+ name.
+
+ Example::
+
+ # use FollowedBy to match a label only if it is followed by a ':'
+ data_word = Word(alphas)
+ label = data_word + FollowedBy(':')
+ attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
+
+ attr_expr[1, ...].parse_string("shape: SQUARE color: BLACK posn: upper left").pprint()
+
+ prints::
+
+ [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
+ """
+
+ def __init__(self, expr: Union[ParserElement, str]):
+ super().__init__(expr)
+ self.mayReturnEmpty = True
+
+ def parseImpl(self, instring, loc, doActions=True):
+ # by using self._expr.parse and deleting the contents of the returned ParseResults list
+ # we keep any named results that were defined in the FollowedBy expression
+ _, ret = self.expr._parse(instring, loc, doActions=doActions)
+ del ret[:]
+
+ return loc, ret
+
+
+class PrecededBy(ParseElementEnhance):
+ """Lookbehind matching of the given parse expression.
+ ``PrecededBy`` does not advance the parsing position within the
+ input string, it only verifies that the specified parse expression
+ matches prior to the current position. ``PrecededBy`` always
+ returns a null token list, but if a results name is defined on the
+ given expression, it is returned.
+
+ Parameters:
+
+ - expr - expression that must match prior to the current parse
+ location
+ - retreat - (default= ``None``) - (int) maximum number of characters
+ to lookbehind prior to the current parse location
+
+ If the lookbehind expression is a string, :class:`Literal`,
+ :class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn`
+ with a specified exact or maximum length, then the retreat
+ parameter is not required. Otherwise, retreat must be specified to
+ give a maximum number of characters to look back from
+ the current parse position for a lookbehind match.
+
+ Example::
+
+ # VB-style variable names with type prefixes
+ int_var = PrecededBy("#") + pyparsing_common.identifier
+ str_var = PrecededBy("$") + pyparsing_common.identifier
+
+ """
+
+ def __init__(
+ self, expr: Union[ParserElement, str], retreat: typing.Optional[int] = None
+ ):
+ super().__init__(expr)
+ self.expr = self.expr().leave_whitespace()
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+ self.exact = False
+ if isinstance(expr, str_type):
+ retreat = len(expr)
+ self.exact = True
+ elif isinstance(expr, (Literal, Keyword)):
+ retreat = expr.matchLen
+ self.exact = True
+ elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT:
+ retreat = expr.maxLen
+ self.exact = True
+ elif isinstance(expr, PositionToken):
+ retreat = 0
+ self.exact = True
+ self.retreat = retreat
+ self.errmsg = "not preceded by " + str(expr)
+ self.skipWhitespace = False
+ self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None)))
+
+ def parseImpl(self, instring, loc=0, doActions=True):
+ if self.exact:
+ if loc < self.retreat:
+ raise ParseException(instring, loc, self.errmsg)
+ start = loc - self.retreat
+ _, ret = self.expr._parse(instring, start)
+ else:
+ # retreat specified a maximum lookbehind window, iterate
+ test_expr = self.expr + StringEnd()
+ instring_slice = instring[max(0, loc - self.retreat) : loc]
+ last_expr = ParseException(instring, loc, self.errmsg)
+ for offset in range(1, min(loc, self.retreat + 1) + 1):
+ try:
+ # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:]))
+ _, ret = test_expr._parse(
+ instring_slice, len(instring_slice) - offset
+ )
+ except ParseBaseException as pbe:
+ last_expr = pbe
+ else:
+ break
+ else:
+ raise last_expr
+ return loc, ret
+
+
+class Located(ParseElementEnhance):
+ """
+ Decorates a returned token with its starting and ending
+ locations in the input string.
+
+ This helper adds the following results names:
+
+ - ``locn_start`` - location where matched expression begins
+ - ``locn_end`` - location where matched expression ends
+ - ``value`` - the actual parsed results
+
+ Be careful if the input text contains ``<TAB>`` characters, you
+ may want to call :class:`ParserElement.parse_with_tabs`
+
+ Example::
+
+ wd = Word(alphas)
+ for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"):
+ print(match)
+
+ prints::
+
+ [0, ['ljsdf'], 5]
+ [8, ['lksdjjf'], 15]
+ [18, ['lkkjj'], 23]
+
+ """
+
+ def parseImpl(self, instring, loc, doActions=True):
+ start = loc
+ loc, tokens = self.expr._parse(instring, start, doActions, callPreParse=False)
+ ret_tokens = ParseResults([start, tokens, loc])
+ ret_tokens["locn_start"] = start
+ ret_tokens["value"] = tokens
+ ret_tokens["locn_end"] = loc
+ if self.resultsName:
+ # must return as a list, so that the name will be attached to the complete group
+ return loc, [ret_tokens]
+ else:
+ return loc, ret_tokens
+
+
+class NotAny(ParseElementEnhance):
+ """
+ Lookahead to disallow matching with the given parse expression.
+ ``NotAny`` does *not* advance the parsing position within the
+ input string, it only verifies that the specified parse expression
+ does *not* match at the current position. Also, ``NotAny`` does
+ *not* skip over leading whitespace. ``NotAny`` always returns
+ a null token list. May be constructed using the ``'~'`` operator.
+
+ Example::
+
+ AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split())
+
+ # take care not to mistake keywords for identifiers
+ ident = ~(AND | OR | NOT) + Word(alphas)
+ boolean_term = Opt(NOT) + ident
+
+ # very crude boolean expression - to support parenthesis groups and
+ # operation hierarchy, use infix_notation
+ boolean_expr = boolean_term + ((AND | OR) + boolean_term)[...]
+
+ # integers that are followed by "." are actually floats
+ integer = Word(nums) + ~Char(".")
+ """
+
+ def __init__(self, expr: Union[ParserElement, str]):
+ super().__init__(expr)
+ # do NOT use self.leave_whitespace(), don't want to propagate to exprs
+ # self.leave_whitespace()
+ self.skipWhitespace = False
+
+ self.mayReturnEmpty = True
+ self.errmsg = "Found unwanted token, " + str(self.expr)
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if self.expr.can_parse_next(instring, loc):
+ raise ParseException(instring, loc, self.errmsg, self)
+ return loc, []
+
+ def _generateDefaultName(self):
+ return "~{" + str(self.expr) + "}"
+
+
+class _MultipleMatch(ParseElementEnhance):
+ def __init__(
+ self,
+ expr: ParserElement,
+ stop_on: typing.Optional[Union[ParserElement, str]] = None,
+ *,
+ stopOn: typing.Optional[Union[ParserElement, str]] = None,
+ ):
+ super().__init__(expr)
+ stopOn = stopOn or stop_on
+ self.saveAsList = True
+ ender = stopOn
+ if isinstance(ender, str_type):
+ ender = self._literalStringClass(ender)
+ self.stopOn(ender)
+
+ def stopOn(self, ender) -> ParserElement:
+ if isinstance(ender, str_type):
+ ender = self._literalStringClass(ender)
+ self.not_ender = ~ender if ender is not None else None
+ return self
+
+ def parseImpl(self, instring, loc, doActions=True):
+ self_expr_parse = self.expr._parse
+ self_skip_ignorables = self._skipIgnorables
+ check_ender = self.not_ender is not None
+ if check_ender:
+ try_not_ender = self.not_ender.tryParse
+
+ # must be at least one (but first see if we are the stopOn sentinel;
+ # if so, fail)
+ if check_ender:
+ try_not_ender(instring, loc)
+ loc, tokens = self_expr_parse(instring, loc, doActions)
+ try:
+ hasIgnoreExprs = not not self.ignoreExprs
+ while 1:
+ if check_ender:
+ try_not_ender(instring, loc)
+ if hasIgnoreExprs:
+ preloc = self_skip_ignorables(instring, loc)
+ else:
+ preloc = loc
+ loc, tmptokens = self_expr_parse(instring, preloc, doActions)
+ if tmptokens or tmptokens.haskeys():
+ tokens += tmptokens
+ except (ParseException, IndexError):
+ pass
+
+ return loc, tokens
+
+ def _setResultsName(self, name, listAllMatches=False):
+ if (
+ __diag__.warn_ungrouped_named_tokens_in_collection
+ and Diagnostics.warn_ungrouped_named_tokens_in_collection
+ not in self.suppress_warnings_
+ ):
+ for e in [self.expr] + self.expr.recurse():
+ if (
+ isinstance(e, ParserElement)
+ and e.resultsName
+ and Diagnostics.warn_ungrouped_named_tokens_in_collection
+ not in e.suppress_warnings_
+ ):
+ warnings.warn(
+ "{}: setting results name {!r} on {} expression "
+ "collides with {!r} on contained expression".format(
+ "warn_ungrouped_named_tokens_in_collection",
+ name,
+ type(self).__name__,
+ e.resultsName,
+ ),
+ stacklevel=3,
+ )
+
+ return super()._setResultsName(name, listAllMatches)
+
+
+class OneOrMore(_MultipleMatch):
+ """
+ Repetition of one or more of the given expression.
+
+ Parameters:
+ - expr - expression that must match one or more times
+ - stop_on - (default= ``None``) - expression for a terminating sentinel
+ (only required if the sentinel would ordinarily match the repetition
+ expression)
+
+ Example::
+
+ data_word = Word(alphas)
+ label = data_word + FollowedBy(':')
+ attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join))
+
+ text = "shape: SQUARE posn: upper left color: BLACK"
+ attr_expr[1, ...].parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
+
+ # use stop_on attribute for OneOrMore to avoid reading label string as part of the data
+ attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
+ OneOrMore(attr_expr).parse_string(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
+
+ # could also be written as
+ (attr_expr * (1,)).parse_string(text).pprint()
+ """
+
+ def _generateDefaultName(self):
+ return "{" + str(self.expr) + "}..."
+
+
+class ZeroOrMore(_MultipleMatch):
+ """
+ Optional repetition of zero or more of the given expression.
+
+ Parameters:
+ - ``expr`` - expression that must match zero or more times
+ - ``stop_on`` - expression for a terminating sentinel
+ (only required if the sentinel would ordinarily match the repetition
+ expression) - (default= ``None``)
+
+ Example: similar to :class:`OneOrMore`
+ """
+
+ def __init__(
+ self,
+ expr: ParserElement,
+ stop_on: typing.Optional[Union[ParserElement, str]] = None,
+ *,
+ stopOn: typing.Optional[Union[ParserElement, str]] = None,
+ ):
+ super().__init__(expr, stopOn=stopOn or stop_on)
+ self.mayReturnEmpty = True
+
+ def parseImpl(self, instring, loc, doActions=True):
+ try:
+ return super().parseImpl(instring, loc, doActions)
+ except (ParseException, IndexError):
+ return loc, ParseResults([], name=self.resultsName)
+
+ def _generateDefaultName(self):
+ return "[" + str(self.expr) + "]..."
+
+
+class _NullToken:
+ def __bool__(self):
+ return False
+
+ def __str__(self):
+ return ""
+
+
+class Opt(ParseElementEnhance):
+ """
+ Optional matching of the given expression.
+
+ Parameters:
+ - ``expr`` - expression that must match zero or more times
+ - ``default`` (optional) - value to be returned if the optional expression is not found.
+
+ Example::
+
+ # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
+ zip = Combine(Word(nums, exact=5) + Opt('-' + Word(nums, exact=4)))
+ zip.run_tests('''
+ # traditional ZIP code
+ 12345
+
+ # ZIP+4 form
+ 12101-0001
+
+ # invalid ZIP
+ 98765-
+ ''')
+
+ prints::
+
+ # traditional ZIP code
+ 12345
+ ['12345']
+
+ # ZIP+4 form
+ 12101-0001
+ ['12101-0001']
+
+ # invalid ZIP
+ 98765-
+ ^
+ FAIL: Expected end of text (at char 5), (line:1, col:6)
+ """
+
+ __optionalNotMatched = _NullToken()
+
+ def __init__(
+ self, expr: Union[ParserElement, str], default: Any = __optionalNotMatched
+ ):
+ super().__init__(expr, savelist=False)
+ self.saveAsList = self.expr.saveAsList
+ self.defaultValue = default
+ self.mayReturnEmpty = True
+
+ def parseImpl(self, instring, loc, doActions=True):
+ self_expr = self.expr
+ try:
+ loc, tokens = self_expr._parse(instring, loc, doActions, callPreParse=False)
+ except (ParseException, IndexError):
+ default_value = self.defaultValue
+ if default_value is not self.__optionalNotMatched:
+ if self_expr.resultsName:
+ tokens = ParseResults([default_value])
+ tokens[self_expr.resultsName] = default_value
+ else:
+ tokens = [default_value]
+ else:
+ tokens = []
+ return loc, tokens
+
+ def _generateDefaultName(self):
+ inner = str(self.expr)
+ # strip off redundant inner {}'s
+ while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}":
+ inner = inner[1:-1]
+ return "[" + inner + "]"
+
+
+Optional = Opt
+
+
+class SkipTo(ParseElementEnhance):
+ """
+ Token for skipping over all undefined text until the matched
+ expression is found.
+
+ Parameters:
+ - ``expr`` - target expression marking the end of the data to be skipped
+ - ``include`` - if ``True``, the target expression is also parsed
+ (the skipped text and target expression are returned as a 2-element
+ list) (default= ``False``).
+ - ``ignore`` - (default= ``None``) used to define grammars (typically quoted strings and
+ comments) that might contain false matches to the target expression
+ - ``fail_on`` - (default= ``None``) define expressions that are not allowed to be
+ included in the skipped test; if found before the target expression is found,
+ the :class:`SkipTo` is not a match
+
+ Example::
+
+ report = '''
+ Outstanding Issues Report - 1 Jan 2000
+
+ # | Severity | Description | Days Open
+ -----+----------+-------------------------------------------+-----------
+ 101 | Critical | Intermittent system crash | 6
+ 94 | Cosmetic | Spelling error on Login ('log|n') | 14
+ 79 | Minor | System slow when running too many reports | 47
+ '''
+ integer = Word(nums)
+ SEP = Suppress('|')
+ # use SkipTo to simply match everything up until the next SEP
+ # - ignore quoted strings, so that a '|' character inside a quoted string does not match
+ # - parse action will call token.strip() for each matched token, i.e., the description body
+ string_data = SkipTo(SEP, ignore=quoted_string)
+ string_data.set_parse_action(token_map(str.strip))
+ ticket_expr = (integer("issue_num") + SEP
+ + string_data("sev") + SEP
+ + string_data("desc") + SEP
+ + integer("days_open"))
+
+ for tkt in ticket_expr.search_string(report):
+ print tkt.dump()
+
+ prints::
+
+ ['101', 'Critical', 'Intermittent system crash', '6']
+ - days_open: '6'
+ - desc: 'Intermittent system crash'
+ - issue_num: '101'
+ - sev: 'Critical'
+ ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
+ - days_open: '14'
+ - desc: "Spelling error on Login ('log|n')"
+ - issue_num: '94'
+ - sev: 'Cosmetic'
+ ['79', 'Minor', 'System slow when running too many reports', '47']
+ - days_open: '47'
+ - desc: 'System slow when running too many reports'
+ - issue_num: '79'
+ - sev: 'Minor'
+ """
+
+ def __init__(
+ self,
+ other: Union[ParserElement, str],
+ include: bool = False,
+ ignore: bool = None,
+ fail_on: typing.Optional[Union[ParserElement, str]] = None,
+ *,
+ failOn: Union[ParserElement, str] = None,
+ ):
+ super().__init__(other)
+ failOn = failOn or fail_on
+ self.ignoreExpr = ignore
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+ self.includeMatch = include
+ self.saveAsList = False
+ if isinstance(failOn, str_type):
+ self.failOn = self._literalStringClass(failOn)
+ else:
+ self.failOn = failOn
+ self.errmsg = "No match found for " + str(self.expr)
+
+ def parseImpl(self, instring, loc, doActions=True):
+ startloc = loc
+ instrlen = len(instring)
+ self_expr_parse = self.expr._parse
+ self_failOn_canParseNext = (
+ self.failOn.canParseNext if self.failOn is not None else None
+ )
+ self_ignoreExpr_tryParse = (
+ self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
+ )
+
+ tmploc = loc
+ while tmploc <= instrlen:
+ if self_failOn_canParseNext is not None:
+ # break if failOn expression matches
+ if self_failOn_canParseNext(instring, tmploc):
+ break
+
+ if self_ignoreExpr_tryParse is not None:
+ # advance past ignore expressions
+ while 1:
+ try:
+ tmploc = self_ignoreExpr_tryParse(instring, tmploc)
+ except ParseBaseException:
+ break
+
+ try:
+ self_expr_parse(instring, tmploc, doActions=False, callPreParse=False)
+ except (ParseException, IndexError):
+ # no match, advance loc in string
+ tmploc += 1
+ else:
+ # matched skipto expr, done
+ break
+
+ else:
+ # ran off the end of the input string without matching skipto expr, fail
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ # build up return values
+ loc = tmploc
+ skiptext = instring[startloc:loc]
+ skipresult = ParseResults(skiptext)
+
+ if self.includeMatch:
+ loc, mat = self_expr_parse(instring, loc, doActions, callPreParse=False)
+ skipresult += mat
+
+ return loc, skipresult
+
+
+class Forward(ParseElementEnhance):
+ """
+ Forward declaration of an expression to be defined later -
+ used for recursive grammars, such as algebraic infix notation.
+ When the expression is known, it is assigned to the ``Forward``
+ variable using the ``'<<'`` operator.
+
+ Note: take care when assigning to ``Forward`` not to overlook
+ precedence of operators.
+
+ Specifically, ``'|'`` has a lower precedence than ``'<<'``, so that::
+
+ fwd_expr << a | b | c
+
+ will actually be evaluated as::
+
+ (fwd_expr << a) | b | c
+
+ thereby leaving b and c out as parseable alternatives. It is recommended that you
+ explicitly group the values inserted into the ``Forward``::
+
+ fwd_expr << (a | b | c)
+
+ Converting to use the ``'<<='`` operator instead will avoid this problem.
+
+ See :class:`ParseResults.pprint` for an example of a recursive
+ parser created using ``Forward``.
+ """
+
+ def __init__(self, other: typing.Optional[Union[ParserElement, str]] = None):
+ self.caller_frame = traceback.extract_stack(limit=2)[0]
+ super().__init__(other, savelist=False)
+ self.lshift_line = None
+
+ def __lshift__(self, other):
+ if hasattr(self, "caller_frame"):
+ del self.caller_frame
+ if isinstance(other, str_type):
+ other = self._literalStringClass(other)
+ self.expr = other
+ self.mayIndexError = self.expr.mayIndexError
+ self.mayReturnEmpty = self.expr.mayReturnEmpty
+ self.set_whitespace_chars(
+ self.expr.whiteChars, copy_defaults=self.expr.copyDefaultWhiteChars
+ )
+ self.skipWhitespace = self.expr.skipWhitespace
+ self.saveAsList = self.expr.saveAsList
+ self.ignoreExprs.extend(self.expr.ignoreExprs)
+ self.lshift_line = traceback.extract_stack(limit=2)[-2]
+ return self
+
+ def __ilshift__(self, other):
+ return self << other
+
+ def __or__(self, other):
+ caller_line = traceback.extract_stack(limit=2)[-2]
+ if (
+ __diag__.warn_on_match_first_with_lshift_operator
+ and caller_line == self.lshift_line
+ and Diagnostics.warn_on_match_first_with_lshift_operator
+ not in self.suppress_warnings_
+ ):
+ warnings.warn(
+ "using '<<' operator with '|' is probably an error, use '<<='",
+ stacklevel=2,
+ )
+ ret = super().__or__(other)
+ return ret
+
+ def __del__(self):
+ # see if we are getting dropped because of '=' reassignment of var instead of '<<=' or '<<'
+ if (
+ self.expr is None
+ and __diag__.warn_on_assignment_to_Forward
+ and Diagnostics.warn_on_assignment_to_Forward not in self.suppress_warnings_
+ ):
+ warnings.warn_explicit(
+ "Forward defined here but no expression attached later using '<<=' or '<<'",
+ UserWarning,
+ filename=self.caller_frame.filename,
+ lineno=self.caller_frame.lineno,
+ )
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if (
+ self.expr is None
+ and __diag__.warn_on_parse_using_empty_Forward
+ and Diagnostics.warn_on_parse_using_empty_Forward
+ not in self.suppress_warnings_
+ ):
+ # walk stack until parse_string, scan_string, search_string, or transform_string is found
+ parse_fns = [
+ "parse_string",
+ "scan_string",
+ "search_string",
+ "transform_string",
+ ]
+ tb = traceback.extract_stack(limit=200)
+ for i, frm in enumerate(reversed(tb), start=1):
+ if frm.name in parse_fns:
+ stacklevel = i + 1
+ break
+ else:
+ stacklevel = 2
+ warnings.warn(
+ "Forward expression was never assigned a value, will not parse any input",
+ stacklevel=stacklevel,
+ )
+ if not ParserElement._left_recursion_enabled:
+ return super().parseImpl(instring, loc, doActions)
+ # ## Bounded Recursion algorithm ##
+ # Recursion only needs to be processed at ``Forward`` elements, since they are
+ # the only ones that can actually refer to themselves. The general idea is
+ # to handle recursion stepwise: We start at no recursion, then recurse once,
+ # recurse twice, ..., until more recursion offers no benefit (we hit the bound).
+ #
+ # The "trick" here is that each ``Forward`` gets evaluated in two contexts
+ # - to *match* a specific recursion level, and
+ # - to *search* the bounded recursion level
+ # and the two run concurrently. The *search* must *match* each recursion level
+ # to find the best possible match. This is handled by a memo table, which
+ # provides the previous match to the next level match attempt.
+ #
+ # See also "Left Recursion in Parsing Expression Grammars", Medeiros et al.
+ #
+ # There is a complication since we not only *parse* but also *transform* via
+ # actions: We do not want to run the actions too often while expanding. Thus,
+ # we expand using `doActions=False` and only run `doActions=True` if the next
+ # recursion level is acceptable.
+ with ParserElement.recursion_lock:
+ memo = ParserElement.recursion_memos
+ try:
+ # we are parsing at a specific recursion expansion - use it as-is
+ prev_loc, prev_result = memo[loc, self, doActions]
+ if isinstance(prev_result, Exception):
+ raise prev_result
+ return prev_loc, prev_result.copy()
+ except KeyError:
+ act_key = (loc, self, True)
+ peek_key = (loc, self, False)
+ # we are searching for the best recursion expansion - keep on improving
+ # both `doActions` cases must be tracked separately here!
+ prev_loc, prev_peek = memo[peek_key] = (
+ loc - 1,
+ ParseException(
+ instring, loc, "Forward recursion without base case", self
+ ),
+ )
+ if doActions:
+ memo[act_key] = memo[peek_key]
+ while True:
+ try:
+ new_loc, new_peek = super().parseImpl(instring, loc, False)
+ except ParseException:
+ # we failed before getting any match – do not hide the error
+ if isinstance(prev_peek, Exception):
+ raise
+ new_loc, new_peek = prev_loc, prev_peek
+ # the match did not get better: we are done
+ if new_loc <= prev_loc:
+ if doActions:
+ # replace the match for doActions=False as well,
+ # in case the action did backtrack
+ prev_loc, prev_result = memo[peek_key] = memo[act_key]
+ del memo[peek_key], memo[act_key]
+ return prev_loc, prev_result.copy()
+ del memo[peek_key]
+ return prev_loc, prev_peek.copy()
+ # the match did get better: see if we can improve further
+ else:
+ if doActions:
+ try:
+ memo[act_key] = super().parseImpl(instring, loc, True)
+ except ParseException as e:
+ memo[peek_key] = memo[act_key] = (new_loc, e)
+ raise
+ prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek
+
+ def leave_whitespace(self, recursive: bool = True) -> ParserElement:
+ self.skipWhitespace = False
+ return self
+
+ def ignore_whitespace(self, recursive: bool = True) -> ParserElement:
+ self.skipWhitespace = True
+ return self
+
+ def streamline(self) -> ParserElement:
+ if not self.streamlined:
+ self.streamlined = True
+ if self.expr is not None:
+ self.expr.streamline()
+ return self
+
+ def validate(self, validateTrace=None) -> None:
+ if validateTrace is None:
+ validateTrace = []
+
+ if self not in validateTrace:
+ tmp = validateTrace[:] + [self]
+ if self.expr is not None:
+ self.expr.validate(tmp)
+ self._checkRecursion([])
+
+ def _generateDefaultName(self):
+ # Avoid infinite recursion by setting a temporary _defaultName
+ self._defaultName = ": ..."
+
+ # Use the string representation of main expression.
+ retString = "..."
+ try:
+ if self.expr is not None:
+ retString = str(self.expr)[:1000]
+ else:
+ retString = "None"
+ finally:
+ return self.__class__.__name__ + ": " + retString
+
+ def copy(self) -> ParserElement:
+ if self.expr is not None:
+ return super().copy()
+ else:
+ ret = Forward()
+ ret <<= self
+ return ret
+
+ def _setResultsName(self, name, list_all_matches=False):
+ if (
+ __diag__.warn_name_set_on_empty_Forward
+ and Diagnostics.warn_name_set_on_empty_Forward
+ not in self.suppress_warnings_
+ ):
+ if self.expr is None:
+ warnings.warn(
+ "{}: setting results name {!r} on {} expression "
+ "that has no contained expression".format(
+ "warn_name_set_on_empty_Forward", name, type(self).__name__
+ ),
+ stacklevel=3,
+ )
+
+ return super()._setResultsName(name, list_all_matches)
+
+ ignoreWhitespace = ignore_whitespace
+ leaveWhitespace = leave_whitespace
+
+
+class TokenConverter(ParseElementEnhance):
+ """
+ Abstract subclass of :class:`ParseExpression`, for converting parsed results.
+ """
+
+ def __init__(self, expr: Union[ParserElement, str], savelist=False):
+ super().__init__(expr) # , savelist)
+ self.saveAsList = False
+
+
+class Combine(TokenConverter):
+ """Converter to concatenate all matching tokens to a single string.
+ By default, the matching patterns must also be contiguous in the
+ input string; this can be disabled by specifying
+ ``'adjacent=False'`` in the constructor.
+
+ Example::
+
+ real = Word(nums) + '.' + Word(nums)
+ print(real.parse_string('3.1416')) # -> ['3', '.', '1416']
+ # will also erroneously match the following
+ print(real.parse_string('3. 1416')) # -> ['3', '.', '1416']
+
+ real = Combine(Word(nums) + '.' + Word(nums))
+ print(real.parse_string('3.1416')) # -> ['3.1416']
+ # no match when there are internal spaces
+ print(real.parse_string('3. 1416')) # -> Exception: Expected W:(0123...)
+ """
+
+ def __init__(
+ self,
+ expr: ParserElement,
+ join_string: str = "",
+ adjacent: bool = True,
+ *,
+ joinString: typing.Optional[str] = None,
+ ):
+ super().__init__(expr)
+ joinString = joinString if joinString is not None else join_string
+ # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
+ if adjacent:
+ self.leave_whitespace()
+ self.adjacent = adjacent
+ self.skipWhitespace = True
+ self.joinString = joinString
+ self.callPreparse = True
+
+ def ignore(self, other) -> ParserElement:
+ if self.adjacent:
+ ParserElement.ignore(self, other)
+ else:
+ super().ignore(other)
+ return self
+
+ def postParse(self, instring, loc, tokenlist):
+ retToks = tokenlist.copy()
+ del retToks[:]
+ retToks += ParseResults(
+ ["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults
+ )
+
+ if self.resultsName and retToks.haskeys():
+ return [retToks]
+ else:
+ return retToks
+
+
+class Group(TokenConverter):
+ """Converter to return the matched tokens as a list - useful for
+ returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions.
+
+ The optional ``aslist`` argument when set to True will return the
+ parsed tokens as a Python list instead of a pyparsing ParseResults.
+
+ Example::
+
+ ident = Word(alphas)
+ num = Word(nums)
+ term = ident | num
+ func = ident + Opt(delimited_list(term))
+ print(func.parse_string("fn a, b, 100"))
+ # -> ['fn', 'a', 'b', '100']
+
+ func = ident + Group(Opt(delimited_list(term)))
+ print(func.parse_string("fn a, b, 100"))
+ # -> ['fn', ['a', 'b', '100']]
+ """
+
+ def __init__(self, expr: ParserElement, aslist: bool = False):
+ super().__init__(expr)
+ self.saveAsList = True
+ self._asPythonList = aslist
+
+ def postParse(self, instring, loc, tokenlist):
+ if self._asPythonList:
+ return ParseResults.List(
+ tokenlist.asList()
+ if isinstance(tokenlist, ParseResults)
+ else list(tokenlist)
+ )
+ else:
+ return [tokenlist]
+
+
+class Dict(TokenConverter):
+ """Converter to return a repetitive expression as a list, but also
+ as a dictionary. Each element can also be referenced using the first
+ token in the expression as its key. Useful for tabular report
+ scraping when the first column can be used as a item key.
+
+ The optional ``asdict`` argument when set to True will return the
+ parsed tokens as a Python dict instead of a pyparsing ParseResults.
+
+ Example::
+
+ data_word = Word(alphas)
+ label = data_word + FollowedBy(':')
+
+ text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+ attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
+
+ # print attributes as plain groups
+ print(attr_expr[1, ...].parse_string(text).dump())
+
+ # instead of OneOrMore(expr), parse using Dict(Group(expr)[1, ...]) - Dict will auto-assign names
+ result = Dict(Group(attr_expr)[1, ...]).parse_string(text)
+ print(result.dump())
+
+ # access named fields as dict entries, or output as dict
+ print(result['shape'])
+ print(result.as_dict())
+
+ prints::
+
+ ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
+ [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+ - color: 'light blue'
+ - posn: 'upper left'
+ - shape: 'SQUARE'
+ - texture: 'burlap'
+ SQUARE
+ {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
+
+ See more examples at :class:`ParseResults` of accessing fields by results name.
+ """
+
+ def __init__(self, expr: ParserElement, asdict: bool = False):
+ super().__init__(expr)
+ self.saveAsList = True
+ self._asPythonDict = asdict
+
+ def postParse(self, instring, loc, tokenlist):
+ for i, tok in enumerate(tokenlist):
+ if len(tok) == 0:
+ continue
+
+ ikey = tok[0]
+ if isinstance(ikey, int):
+ ikey = str(ikey).strip()
+
+ if len(tok) == 1:
+ tokenlist[ikey] = _ParseResultsWithOffset("", i)
+
+ elif len(tok) == 2 and not isinstance(tok[1], ParseResults):
+ tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i)
+
+ else:
+ try:
+ dictvalue = tok.copy() # ParseResults(i)
+ except Exception:
+ exc = TypeError(
+ "could not extract dict values from parsed results"
+ " - Dict expression must contain Grouped expressions"
+ )
+ raise exc from None
+
+ del dictvalue[0]
+
+ if len(dictvalue) != 1 or (
+ isinstance(dictvalue, ParseResults) and dictvalue.haskeys()
+ ):
+ tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i)
+ else:
+ tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i)
+
+ if self._asPythonDict:
+ return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict()
+ else:
+ return [tokenlist] if self.resultsName else tokenlist
+
+
+class Suppress(TokenConverter):
+ """Converter for ignoring the results of a parsed expression.
+
+ Example::
+
+ source = "a, b, c,d"
+ wd = Word(alphas)
+ wd_list1 = wd + (',' + wd)[...]
+ print(wd_list1.parse_string(source))
+
+ # often, delimiters that are useful during parsing are just in the
+ # way afterward - use Suppress to keep them out of the parsed output
+ wd_list2 = wd + (Suppress(',') + wd)[...]
+ print(wd_list2.parse_string(source))
+
+ # Skipped text (using '...') can be suppressed as well
+ source = "lead in START relevant text END trailing text"
+ start_marker = Keyword("START")
+ end_marker = Keyword("END")
+ find_body = Suppress(...) + start_marker + ... + end_marker
+ print(find_body.parse_string(source)
+
+ prints::
+
+ ['a', ',', 'b', ',', 'c', ',', 'd']
+ ['a', 'b', 'c', 'd']
+ ['START', 'relevant text ', 'END']
+
+ (See also :class:`delimited_list`.)
+ """
+
+ def __init__(self, expr: Union[ParserElement, str], savelist: bool = False):
+ if expr is ...:
+ expr = _PendingSkip(NoMatch())
+ super().__init__(expr)
+
+ def __add__(self, other) -> "ParserElement":
+ if isinstance(self.expr, _PendingSkip):
+ return Suppress(SkipTo(other)) + other
+ else:
+ return super().__add__(other)
+
+ def __sub__(self, other) -> "ParserElement":
+ if isinstance(self.expr, _PendingSkip):
+ return Suppress(SkipTo(other)) - other
+ else:
+ return super().__sub__(other)
+
+ def postParse(self, instring, loc, tokenlist):
+ return []
+
+ def suppress(self) -> ParserElement:
+ return self
+
+
+def trace_parse_action(f: ParseAction) -> ParseAction:
+ """Decorator for debugging parse actions.
+
+ When the parse action is called, this decorator will print
+ ``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``.
+ When the parse action completes, the decorator will print
+ ``"<<"`` followed by the returned value, or any exception that the parse action raised.
+
+ Example::
+
+ wd = Word(alphas)
+
+ @trace_parse_action
+ def remove_duplicate_chars(tokens):
+ return ''.join(sorted(set(''.join(tokens))))
+
+ wds = wd[1, ...].set_parse_action(remove_duplicate_chars)
+ print(wds.parse_string("slkdjs sld sldd sdlf sdljf"))
+
+ prints::
+
+ >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
+ <<leaving remove_duplicate_chars (ret: 'dfjkls')
+ ['dfjkls']
+ """
+ f = _trim_arity(f)
+
+ def z(*paArgs):
+ thisFunc = f.__name__
+ s, l, t = paArgs[-3:]
+ if len(paArgs) > 3:
+ thisFunc = paArgs[0].__class__.__name__ + "." + thisFunc
+ sys.stderr.write(
+ ">>entering {}(line: {!r}, {}, {!r})\n".format(thisFunc, line(l, s), l, t)
+ )
+ try:
+ ret = f(*paArgs)
+ except Exception as exc:
+ sys.stderr.write("<<leaving {} (exception: {})\n".format(thisFunc, exc))
+ raise
+ sys.stderr.write("<<leaving {} (ret: {!r})\n".format(thisFunc, ret))
+ return ret
+
+ z.__name__ = f.__name__
+ return z
+
+
+# convenience constants for positional expressions
+empty = Empty().set_name("empty")
+line_start = LineStart().set_name("line_start")
+line_end = LineEnd().set_name("line_end")
+string_start = StringStart().set_name("string_start")
+string_end = StringEnd().set_name("string_end")
+
+_escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).set_parse_action(
+ lambda s, l, t: t[0][1]
+)
+_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").set_parse_action(
+ lambda s, l, t: chr(int(t[0].lstrip(r"\0x"), 16))
+)
+_escapedOctChar = Regex(r"\\0[0-7]+").set_parse_action(
+ lambda s, l, t: chr(int(t[0][1:], 8))
+)
+_singleChar = (
+ _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r"\]", exact=1)
+)
+_charRange = Group(_singleChar + Suppress("-") + _singleChar)
+_reBracketExpr = (
+ Literal("[")
+ + Opt("^").set_results_name("negate")
+ + Group(OneOrMore(_charRange | _singleChar)).set_results_name("body")
+ + "]"
+)
+
+
+def srange(s: str) -> str:
+ r"""Helper to easily define string ranges for use in :class:`Word`
+ construction. Borrows syntax from regexp ``'[]'`` string range
+ definitions::
+
+ srange("[0-9]") -> "0123456789"
+ srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
+ srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
+
+ The input string must be enclosed in []'s, and the returned string
+ is the expanded character set joined into a single string. The
+ values enclosed in the []'s may be:
+
+ - a single character
+ - an escaped character with a leading backslash (such as ``\-``
+ or ``\]``)
+ - an escaped hex character with a leading ``'\x'``
+ (``\x21``, which is a ``'!'`` character) (``\0x##``
+ is also supported for backwards compatibility)
+ - an escaped octal character with a leading ``'\0'``
+ (``\041``, which is a ``'!'`` character)
+ - a range of any of the above, separated by a dash (``'a-z'``,
+ etc.)
+ - any combination of the above (``'aeiouy'``,
+ ``'a-zA-Z0-9_$'``, etc.)
+ """
+ _expanded = (
+ lambda p: p
+ if not isinstance(p, ParseResults)
+ else "".join(chr(c) for c in range(ord(p[0]), ord(p[1]) + 1))
+ )
+ try:
+ return "".join(_expanded(part) for part in _reBracketExpr.parse_string(s).body)
+ except Exception:
+ return ""
+
+
+def token_map(func, *args) -> ParseAction:
+ """Helper to define a parse action by mapping a function to all
+ elements of a :class:`ParseResults` list. If any additional args are passed,
+ they are forwarded to the given function as additional arguments
+ after the token, as in
+ ``hex_integer = Word(hexnums).set_parse_action(token_map(int, 16))``,
+ which will convert the parsed data to an integer using base 16.
+
+ Example (compare the last to example in :class:`ParserElement.transform_string`::
+
+ hex_ints = Word(hexnums)[1, ...].set_parse_action(token_map(int, 16))
+ hex_ints.run_tests('''
+ 00 11 22 aa FF 0a 0d 1a
+ ''')
+
+ upperword = Word(alphas).set_parse_action(token_map(str.upper))
+ upperword[1, ...].run_tests('''
+ my kingdom for a horse
+ ''')
+
+ wd = Word(alphas).set_parse_action(token_map(str.title))
+ wd[1, ...].set_parse_action(' '.join).run_tests('''
+ now is the winter of our discontent made glorious summer by this sun of york
+ ''')
+
+ prints::
+
+ 00 11 22 aa FF 0a 0d 1a
+ [0, 17, 34, 170, 255, 10, 13, 26]
+
+ my kingdom for a horse
+ ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
+
+ now is the winter of our discontent made glorious summer by this sun of york
+ ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
+ """
+
+ def pa(s, l, t):
+ return [func(tokn, *args) for tokn in t]
+
+ func_name = getattr(func, "__name__", getattr(func, "__class__").__name__)
+ pa.__name__ = func_name
+
+ return pa
+
+
+def autoname_elements() -> None:
+ """
+ Utility to simplify mass-naming of parser elements, for
+ generating railroad diagram with named subdiagrams.
+ """
+ for name, var in sys._getframe().f_back.f_locals.items():
+ if isinstance(var, ParserElement) and not var.customName:
+ var.set_name(name)
+
+
+dbl_quoted_string = Combine(
+ Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
+).set_name("string enclosed in double quotes")
+
+sgl_quoted_string = Combine(
+ Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
+).set_name("string enclosed in single quotes")
+
+quoted_string = Combine(
+ Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
+ | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'"
+).set_name("quotedString using single or double quotes")
+
+unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal")
+
+
+alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
+punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
+
+# build list of built-in expressions, for future reference if a global default value
+# gets updated
+_builtin_exprs: List[ParserElement] = [
+ v for v in vars().values() if isinstance(v, ParserElement)
+]
+
+# backward compatibility names
+tokenMap = token_map
+conditionAsParseAction = condition_as_parse_action
+nullDebugAction = null_debug_action
+sglQuotedString = sgl_quoted_string
+dblQuotedString = dbl_quoted_string
+quotedString = quoted_string
+unicodeString = unicode_string
+lineStart = line_start
+lineEnd = line_end
+stringStart = string_start
+stringEnd = string_end
+traceParseAction = trace_parse_action
diff --git a/third_party/python/pip/pip/_vendor/pyparsing/diagram/__init__.py b/third_party/python/pip/pip/_vendor/pyparsing/diagram/__init__.py
new file mode 100644
index 0000000000..1506d66bf4
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pyparsing/diagram/__init__.py
@@ -0,0 +1,642 @@
+import railroad
+from pip._vendor import pyparsing
+import typing
+from typing import (
+ List,
+ NamedTuple,
+ Generic,
+ TypeVar,
+ Dict,
+ Callable,
+ Set,
+ Iterable,
+)
+from jinja2 import Template
+from io import StringIO
+import inspect
+
+
+jinja2_template_source = """\
+<!DOCTYPE html>
+<html>
+<head>
+ {% if not head %}
+ <style type="text/css">
+ .railroad-heading {
+ font-family: monospace;
+ }
+ </style>
+ {% else %}
+ {{ head | safe }}
+ {% endif %}
+</head>
+<body>
+{{ body | safe }}
+{% for diagram in diagrams %}
+ <div class="railroad-group">
+ <h1 class="railroad-heading">{{ diagram.title }}</h1>
+ <div class="railroad-description">{{ diagram.text }}</div>
+ <div class="railroad-svg">
+ {{ diagram.svg }}
+ </div>
+ </div>
+{% endfor %}
+</body>
+</html>
+"""
+
+template = Template(jinja2_template_source)
+
+# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet
+NamedDiagram = NamedTuple(
+ "NamedDiagram",
+ [("name", str), ("diagram", typing.Optional[railroad.DiagramItem]), ("index", int)],
+)
+"""
+A simple structure for associating a name with a railroad diagram
+"""
+
+T = TypeVar("T")
+
+
+class EachItem(railroad.Group):
+ """
+ Custom railroad item to compose a:
+ - Group containing a
+ - OneOrMore containing a
+ - Choice of the elements in the Each
+ with the group label indicating that all must be matched
+ """
+
+ all_label = "[ALL]"
+
+ def __init__(self, *items):
+ choice_item = railroad.Choice(len(items) - 1, *items)
+ one_or_more_item = railroad.OneOrMore(item=choice_item)
+ super().__init__(one_or_more_item, label=self.all_label)
+
+
+class AnnotatedItem(railroad.Group):
+ """
+ Simple subclass of Group that creates an annotation label
+ """
+
+ def __init__(self, label: str, item):
+ super().__init__(item=item, label="[{}]".format(label) if label else label)
+
+
+class EditablePartial(Generic[T]):
+ """
+ Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been
+ constructed.
+ """
+
+ # We need this here because the railroad constructors actually transform the data, so can't be called until the
+ # entire tree is assembled
+
+ def __init__(self, func: Callable[..., T], args: list, kwargs: dict):
+ self.func = func
+ self.args = args
+ self.kwargs = kwargs
+
+ @classmethod
+ def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]":
+ """
+ If you call this function in the same way that you would call the constructor, it will store the arguments
+ as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3)
+ """
+ return EditablePartial(func=func, args=list(args), kwargs=kwargs)
+
+ @property
+ def name(self):
+ return self.kwargs["name"]
+
+ def __call__(self) -> T:
+ """
+ Evaluate the partial and return the result
+ """
+ args = self.args.copy()
+ kwargs = self.kwargs.copy()
+
+ # This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g.
+ # args=['list', 'of', 'things'])
+ arg_spec = inspect.getfullargspec(self.func)
+ if arg_spec.varargs in self.kwargs:
+ args += kwargs.pop(arg_spec.varargs)
+
+ return self.func(*args, **kwargs)
+
+
+def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str:
+ """
+ Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams
+ :params kwargs: kwargs to be passed in to the template
+ """
+ data = []
+ for diagram in diagrams:
+ if diagram.diagram is None:
+ continue
+ io = StringIO()
+ diagram.diagram.writeSvg(io.write)
+ title = diagram.name
+ if diagram.index == 0:
+ title += " (root)"
+ data.append({"title": title, "text": "", "svg": io.getvalue()})
+
+ return template.render(diagrams=data, **kwargs)
+
+
+def resolve_partial(partial: "EditablePartial[T]") -> T:
+ """
+ Recursively resolves a collection of Partials into whatever type they are
+ """
+ if isinstance(partial, EditablePartial):
+ partial.args = resolve_partial(partial.args)
+ partial.kwargs = resolve_partial(partial.kwargs)
+ return partial()
+ elif isinstance(partial, list):
+ return [resolve_partial(x) for x in partial]
+ elif isinstance(partial, dict):
+ return {key: resolve_partial(x) for key, x in partial.items()}
+ else:
+ return partial
+
+
+def to_railroad(
+ element: pyparsing.ParserElement,
+ diagram_kwargs: typing.Optional[dict] = None,
+ vertical: int = 3,
+ show_results_names: bool = False,
+ show_groups: bool = False,
+) -> List[NamedDiagram]:
+ """
+ Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram
+ creation if you want to access the Railroad tree before it is converted to HTML
+ :param element: base element of the parser being diagrammed
+ :param diagram_kwargs: kwargs to pass to the Diagram() constructor
+ :param vertical: (optional) - int - limit at which number of alternatives should be
+ shown vertically instead of horizontally
+ :param show_results_names - bool to indicate whether results name annotations should be
+ included in the diagram
+ :param show_groups - bool to indicate whether groups should be highlighted with an unlabeled
+ surrounding box
+ """
+ # Convert the whole tree underneath the root
+ lookup = ConverterState(diagram_kwargs=diagram_kwargs or {})
+ _to_diagram_element(
+ element,
+ lookup=lookup,
+ parent=None,
+ vertical=vertical,
+ show_results_names=show_results_names,
+ show_groups=show_groups,
+ )
+
+ root_id = id(element)
+ # Convert the root if it hasn't been already
+ if root_id in lookup:
+ if not element.customName:
+ lookup[root_id].name = ""
+ lookup[root_id].mark_for_extraction(root_id, lookup, force=True)
+
+ # Now that we're finished, we can convert from intermediate structures into Railroad elements
+ diags = list(lookup.diagrams.values())
+ if len(diags) > 1:
+ # collapse out duplicate diags with the same name
+ seen = set()
+ deduped_diags = []
+ for d in diags:
+ # don't extract SkipTo elements, they are uninformative as subdiagrams
+ if d.name == "...":
+ continue
+ if d.name is not None and d.name not in seen:
+ seen.add(d.name)
+ deduped_diags.append(d)
+ resolved = [resolve_partial(partial) for partial in deduped_diags]
+ else:
+ # special case - if just one diagram, always display it, even if
+ # it has no name
+ resolved = [resolve_partial(partial) for partial in diags]
+ return sorted(resolved, key=lambda diag: diag.index)
+
+
+def _should_vertical(
+ specification: int, exprs: Iterable[pyparsing.ParserElement]
+) -> bool:
+ """
+ Returns true if we should return a vertical list of elements
+ """
+ if specification is None:
+ return False
+ else:
+ return len(_visible_exprs(exprs)) >= specification
+
+
+class ElementState:
+ """
+ State recorded for an individual pyparsing Element
+ """
+
+ # Note: this should be a dataclass, but we have to support Python 3.5
+ def __init__(
+ self,
+ element: pyparsing.ParserElement,
+ converted: EditablePartial,
+ parent: EditablePartial,
+ number: int,
+ name: str = None,
+ parent_index: typing.Optional[int] = None,
+ ):
+ #: The pyparsing element that this represents
+ self.element: pyparsing.ParserElement = element
+ #: The name of the element
+ self.name: typing.Optional[str] = name
+ #: The output Railroad element in an unconverted state
+ self.converted: EditablePartial = converted
+ #: The parent Railroad element, which we store so that we can extract this if it's duplicated
+ self.parent: EditablePartial = parent
+ #: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram
+ self.number: int = number
+ #: The index of this inside its parent
+ self.parent_index: typing.Optional[int] = parent_index
+ #: If true, we should extract this out into a subdiagram
+ self.extract: bool = False
+ #: If true, all of this element's children have been filled out
+ self.complete: bool = False
+
+ def mark_for_extraction(
+ self, el_id: int, state: "ConverterState", name: str = None, force: bool = False
+ ):
+ """
+ Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram
+ :param el_id: id of the element
+ :param state: element/diagram state tracker
+ :param name: name to use for this element's text
+ :param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the
+ root element when we know we're finished
+ """
+ self.extract = True
+
+ # Set the name
+ if not self.name:
+ if name:
+ # Allow forcing a custom name
+ self.name = name
+ elif self.element.customName:
+ self.name = self.element.customName
+ else:
+ self.name = ""
+
+ # Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children
+ # to be added
+ # Also, if this is just a string literal etc, don't bother extracting it
+ if force or (self.complete and _worth_extracting(self.element)):
+ state.extract_into_diagram(el_id)
+
+
+class ConverterState:
+ """
+ Stores some state that persists between recursions into the element tree
+ """
+
+ def __init__(self, diagram_kwargs: typing.Optional[dict] = None):
+ #: A dictionary mapping ParserElements to state relating to them
+ self._element_diagram_states: Dict[int, ElementState] = {}
+ #: A dictionary mapping ParserElement IDs to subdiagrams generated from them
+ self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {}
+ #: The index of the next unnamed element
+ self.unnamed_index: int = 1
+ #: The index of the next element. This is used for sorting
+ self.index: int = 0
+ #: Shared kwargs that are used to customize the construction of diagrams
+ self.diagram_kwargs: dict = diagram_kwargs or {}
+ self.extracted_diagram_names: Set[str] = set()
+
+ def __setitem__(self, key: int, value: ElementState):
+ self._element_diagram_states[key] = value
+
+ def __getitem__(self, key: int) -> ElementState:
+ return self._element_diagram_states[key]
+
+ def __delitem__(self, key: int):
+ del self._element_diagram_states[key]
+
+ def __contains__(self, key: int):
+ return key in self._element_diagram_states
+
+ def generate_unnamed(self) -> int:
+ """
+ Generate a number used in the name of an otherwise unnamed diagram
+ """
+ self.unnamed_index += 1
+ return self.unnamed_index
+
+ def generate_index(self) -> int:
+ """
+ Generate a number used to index a diagram
+ """
+ self.index += 1
+ return self.index
+
+ def extract_into_diagram(self, el_id: int):
+ """
+ Used when we encounter the same token twice in the same tree. When this
+ happens, we replace all instances of that token with a terminal, and
+ create a new subdiagram for the token
+ """
+ position = self[el_id]
+
+ # Replace the original definition of this element with a regular block
+ if position.parent:
+ ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name)
+ if "item" in position.parent.kwargs:
+ position.parent.kwargs["item"] = ret
+ elif "items" in position.parent.kwargs:
+ position.parent.kwargs["items"][position.parent_index] = ret
+
+ # If the element we're extracting is a group, skip to its content but keep the title
+ if position.converted.func == railroad.Group:
+ content = position.converted.kwargs["item"]
+ else:
+ content = position.converted
+
+ self.diagrams[el_id] = EditablePartial.from_call(
+ NamedDiagram,
+ name=position.name,
+ diagram=EditablePartial.from_call(
+ railroad.Diagram, content, **self.diagram_kwargs
+ ),
+ index=position.number,
+ )
+
+ del self[el_id]
+
+
+def _worth_extracting(element: pyparsing.ParserElement) -> bool:
+ """
+ Returns true if this element is worth having its own sub-diagram. Simply, if any of its children
+ themselves have children, then its complex enough to extract
+ """
+ children = element.recurse()
+ return any(child.recurse() for child in children)
+
+
+def _apply_diagram_item_enhancements(fn):
+ """
+ decorator to ensure enhancements to a diagram item (such as results name annotations)
+ get applied on return from _to_diagram_element (we do this since there are several
+ returns in _to_diagram_element)
+ """
+
+ def _inner(
+ element: pyparsing.ParserElement,
+ parent: typing.Optional[EditablePartial],
+ lookup: ConverterState = None,
+ vertical: int = None,
+ index: int = 0,
+ name_hint: str = None,
+ show_results_names: bool = False,
+ show_groups: bool = False,
+ ) -> typing.Optional[EditablePartial]:
+
+ ret = fn(
+ element,
+ parent,
+ lookup,
+ vertical,
+ index,
+ name_hint,
+ show_results_names,
+ show_groups,
+ )
+
+ # apply annotation for results name, if present
+ if show_results_names and ret is not None:
+ element_results_name = element.resultsName
+ if element_results_name:
+ # add "*" to indicate if this is a "list all results" name
+ element_results_name += "" if element.modalResults else "*"
+ ret = EditablePartial.from_call(
+ railroad.Group, item=ret, label=element_results_name
+ )
+
+ return ret
+
+ return _inner
+
+
+def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]):
+ non_diagramming_exprs = (
+ pyparsing.ParseElementEnhance,
+ pyparsing.PositionToken,
+ pyparsing.And._ErrorStop,
+ )
+ return [
+ e
+ for e in exprs
+ if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs))
+ ]
+
+
+@_apply_diagram_item_enhancements
+def _to_diagram_element(
+ element: pyparsing.ParserElement,
+ parent: typing.Optional[EditablePartial],
+ lookup: ConverterState = None,
+ vertical: int = None,
+ index: int = 0,
+ name_hint: str = None,
+ show_results_names: bool = False,
+ show_groups: bool = False,
+) -> typing.Optional[EditablePartial]:
+ """
+ Recursively converts a PyParsing Element to a railroad Element
+ :param lookup: The shared converter state that keeps track of useful things
+ :param index: The index of this element within the parent
+ :param parent: The parent of this element in the output tree
+ :param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default),
+ it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never
+ do so
+ :param name_hint: If provided, this will override the generated name
+ :param show_results_names: bool flag indicating whether to add annotations for results names
+ :returns: The converted version of the input element, but as a Partial that hasn't yet been constructed
+ :param show_groups: bool flag indicating whether to show groups using bounding box
+ """
+ exprs = element.recurse()
+ name = name_hint or element.customName or element.__class__.__name__
+
+ # Python's id() is used to provide a unique identifier for elements
+ el_id = id(element)
+
+ element_results_name = element.resultsName
+
+ # Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram
+ if not element.customName:
+ if isinstance(
+ element,
+ (
+ # pyparsing.TokenConverter,
+ # pyparsing.Forward,
+ pyparsing.Located,
+ ),
+ ):
+ # However, if this element has a useful custom name, and its child does not, we can pass it on to the child
+ if exprs:
+ if not exprs[0].customName:
+ propagated_name = name
+ else:
+ propagated_name = None
+
+ return _to_diagram_element(
+ element.expr,
+ parent=parent,
+ lookup=lookup,
+ vertical=vertical,
+ index=index,
+ name_hint=propagated_name,
+ show_results_names=show_results_names,
+ show_groups=show_groups,
+ )
+
+ # If the element isn't worth extracting, we always treat it as the first time we say it
+ if _worth_extracting(element):
+ if el_id in lookup:
+ # If we've seen this element exactly once before, we are only just now finding out that it's a duplicate,
+ # so we have to extract it into a new diagram.
+ looked_up = lookup[el_id]
+ looked_up.mark_for_extraction(el_id, lookup, name=name_hint)
+ ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name)
+ return ret
+
+ elif el_id in lookup.diagrams:
+ # If we have seen the element at least twice before, and have already extracted it into a subdiagram, we
+ # just put in a marker element that refers to the sub-diagram
+ ret = EditablePartial.from_call(
+ railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
+ )
+ return ret
+
+ # Recursively convert child elements
+ # Here we find the most relevant Railroad element for matching pyparsing Element
+ # We use ``items=[]`` here to hold the place for where the child elements will go once created
+ if isinstance(element, pyparsing.And):
+ # detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat
+ # (all will have the same name, and resultsName)
+ if not exprs:
+ return None
+ if len(set((e.name, e.resultsName) for e in exprs)) == 1:
+ ret = EditablePartial.from_call(
+ railroad.OneOrMore, item="", repeat=str(len(exprs))
+ )
+ elif _should_vertical(vertical, exprs):
+ ret = EditablePartial.from_call(railroad.Stack, items=[])
+ else:
+ ret = EditablePartial.from_call(railroad.Sequence, items=[])
+ elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)):
+ if not exprs:
+ return None
+ if _should_vertical(vertical, exprs):
+ ret = EditablePartial.from_call(railroad.Choice, 0, items=[])
+ else:
+ ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[])
+ elif isinstance(element, pyparsing.Each):
+ if not exprs:
+ return None
+ ret = EditablePartial.from_call(EachItem, items=[])
+ elif isinstance(element, pyparsing.NotAny):
+ ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="")
+ elif isinstance(element, pyparsing.FollowedBy):
+ ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="")
+ elif isinstance(element, pyparsing.PrecededBy):
+ ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="")
+ elif isinstance(element, pyparsing.Group):
+ if show_groups:
+ ret = EditablePartial.from_call(AnnotatedItem, label="", item="")
+ else:
+ ret = EditablePartial.from_call(railroad.Group, label="", item="")
+ elif isinstance(element, pyparsing.TokenConverter):
+ ret = EditablePartial.from_call(
+ AnnotatedItem, label=type(element).__name__.lower(), item=""
+ )
+ elif isinstance(element, pyparsing.Opt):
+ ret = EditablePartial.from_call(railroad.Optional, item="")
+ elif isinstance(element, pyparsing.OneOrMore):
+ ret = EditablePartial.from_call(railroad.OneOrMore, item="")
+ elif isinstance(element, pyparsing.ZeroOrMore):
+ ret = EditablePartial.from_call(railroad.ZeroOrMore, item="")
+ elif isinstance(element, pyparsing.Group):
+ ret = EditablePartial.from_call(
+ railroad.Group, item=None, label=element_results_name
+ )
+ elif isinstance(element, pyparsing.Empty) and not element.customName:
+ # Skip unnamed "Empty" elements
+ ret = None
+ elif len(exprs) > 1:
+ ret = EditablePartial.from_call(railroad.Sequence, items=[])
+ elif len(exprs) > 0 and not element_results_name:
+ ret = EditablePartial.from_call(railroad.Group, item="", label=name)
+ else:
+ terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName)
+ ret = terminal
+
+ if ret is None:
+ return
+
+ # Indicate this element's position in the tree so we can extract it if necessary
+ lookup[el_id] = ElementState(
+ element=element,
+ converted=ret,
+ parent=parent,
+ parent_index=index,
+ number=lookup.generate_index(),
+ )
+ if element.customName:
+ lookup[el_id].mark_for_extraction(el_id, lookup, element.customName)
+
+ i = 0
+ for expr in exprs:
+ # Add a placeholder index in case we have to extract the child before we even add it to the parent
+ if "items" in ret.kwargs:
+ ret.kwargs["items"].insert(i, None)
+
+ item = _to_diagram_element(
+ expr,
+ parent=ret,
+ lookup=lookup,
+ vertical=vertical,
+ index=i,
+ show_results_names=show_results_names,
+ show_groups=show_groups,
+ )
+
+ # Some elements don't need to be shown in the diagram
+ if item is not None:
+ if "item" in ret.kwargs:
+ ret.kwargs["item"] = item
+ elif "items" in ret.kwargs:
+ # If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal
+ ret.kwargs["items"][i] = item
+ i += 1
+ elif "items" in ret.kwargs:
+ # If we're supposed to skip this element, remove it from the parent
+ del ret.kwargs["items"][i]
+
+ # If all this items children are none, skip this item
+ if ret and (
+ ("items" in ret.kwargs and len(ret.kwargs["items"]) == 0)
+ or ("item" in ret.kwargs and ret.kwargs["item"] is None)
+ ):
+ ret = EditablePartial.from_call(railroad.Terminal, name)
+
+ # Mark this element as "complete", ie it has all of its children
+ if el_id in lookup:
+ lookup[el_id].complete = True
+
+ if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete:
+ lookup.extract_into_diagram(el_id)
+ if ret is not None:
+ ret = EditablePartial.from_call(
+ railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"]
+ )
+
+ return ret
diff --git a/third_party/python/pip/pip/_vendor/pyparsing/exceptions.py b/third_party/python/pip/pip/_vendor/pyparsing/exceptions.py
new file mode 100644
index 0000000000..a38447bb05
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pyparsing/exceptions.py
@@ -0,0 +1,267 @@
+# exceptions.py
+
+import re
+import sys
+import typing
+
+from .util import col, line, lineno, _collapse_string_to_ranges
+from .unicode import pyparsing_unicode as ppu
+
+
+class ExceptionWordUnicode(ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic):
+ pass
+
+
+_extract_alphanums = _collapse_string_to_ranges(ExceptionWordUnicode.alphanums)
+_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.")
+
+
+class ParseBaseException(Exception):
+ """base exception class for all parsing runtime exceptions"""
+
+ # Performance tuning: we construct a *lot* of these, so keep this
+ # constructor as small and fast as possible
+ def __init__(
+ self,
+ pstr: str,
+ loc: int = 0,
+ msg: typing.Optional[str] = None,
+ elem=None,
+ ):
+ self.loc = loc
+ if msg is None:
+ self.msg = pstr
+ self.pstr = ""
+ else:
+ self.msg = msg
+ self.pstr = pstr
+ self.parser_element = self.parserElement = elem
+ self.args = (pstr, loc, msg)
+
+ @staticmethod
+ def explain_exception(exc, depth=16):
+ """
+ Method to take an exception and translate the Python internal traceback into a list
+ of the pyparsing expressions that caused the exception to be raised.
+
+ Parameters:
+
+ - exc - exception raised during parsing (need not be a ParseException, in support
+ of Python exceptions that might be raised in a parse action)
+ - depth (default=16) - number of levels back in the stack trace to list expression
+ and function names; if None, the full stack trace names will be listed; if 0, only
+ the failing input line, marker, and exception string will be shown
+
+ Returns a multi-line string listing the ParserElements and/or function names in the
+ exception's stack trace.
+ """
+ import inspect
+ from .core import ParserElement
+
+ if depth is None:
+ depth = sys.getrecursionlimit()
+ ret = []
+ if isinstance(exc, ParseBaseException):
+ ret.append(exc.line)
+ ret.append(" " * (exc.column - 1) + "^")
+ ret.append("{}: {}".format(type(exc).__name__, exc))
+
+ if depth > 0:
+ callers = inspect.getinnerframes(exc.__traceback__, context=depth)
+ seen = set()
+ for i, ff in enumerate(callers[-depth:]):
+ frm = ff[0]
+
+ f_self = frm.f_locals.get("self", None)
+ if isinstance(f_self, ParserElement):
+ if frm.f_code.co_name not in ("parseImpl", "_parseNoCache"):
+ continue
+ if id(f_self) in seen:
+ continue
+ seen.add(id(f_self))
+
+ self_type = type(f_self)
+ ret.append(
+ "{}.{} - {}".format(
+ self_type.__module__, self_type.__name__, f_self
+ )
+ )
+
+ elif f_self is not None:
+ self_type = type(f_self)
+ ret.append("{}.{}".format(self_type.__module__, self_type.__name__))
+
+ else:
+ code = frm.f_code
+ if code.co_name in ("wrapper", "<module>"):
+ continue
+
+ ret.append("{}".format(code.co_name))
+
+ depth -= 1
+ if not depth:
+ break
+
+ return "\n".join(ret)
+
+ @classmethod
+ def _from_exception(cls, pe):
+ """
+ internal factory method to simplify creating one type of ParseException
+ from another - avoids having __init__ signature conflicts among subclasses
+ """
+ return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
+
+ @property
+ def line(self) -> str:
+ """
+ Return the line of text where the exception occurred.
+ """
+ return line(self.loc, self.pstr)
+
+ @property
+ def lineno(self) -> int:
+ """
+ Return the 1-based line number of text where the exception occurred.
+ """
+ return lineno(self.loc, self.pstr)
+
+ @property
+ def col(self) -> int:
+ """
+ Return the 1-based column on the line of text where the exception occurred.
+ """
+ return col(self.loc, self.pstr)
+
+ @property
+ def column(self) -> int:
+ """
+ Return the 1-based column on the line of text where the exception occurred.
+ """
+ return col(self.loc, self.pstr)
+
+ def __str__(self) -> str:
+ if self.pstr:
+ if self.loc >= len(self.pstr):
+ foundstr = ", found end of text"
+ else:
+ # pull out next word at error location
+ found_match = _exception_word_extractor.match(self.pstr, self.loc)
+ if found_match is not None:
+ found = found_match.group(0)
+ else:
+ found = self.pstr[self.loc : self.loc + 1]
+ foundstr = (", found %r" % found).replace(r"\\", "\\")
+ else:
+ foundstr = ""
+ return "{}{} (at char {}), (line:{}, col:{})".format(
+ self.msg, foundstr, self.loc, self.lineno, self.column
+ )
+
+ def __repr__(self):
+ return str(self)
+
+ def mark_input_line(self, marker_string: str = None, *, markerString=">!<") -> str:
+ """
+ Extracts the exception line from the input string, and marks
+ the location of the exception with a special symbol.
+ """
+ markerString = marker_string if marker_string is not None else markerString
+ line_str = self.line
+ line_column = self.column - 1
+ if markerString:
+ line_str = "".join(
+ (line_str[:line_column], markerString, line_str[line_column:])
+ )
+ return line_str.strip()
+
+ def explain(self, depth=16) -> str:
+ """
+ Method to translate the Python internal traceback into a list
+ of the pyparsing expressions that caused the exception to be raised.
+
+ Parameters:
+
+ - depth (default=16) - number of levels back in the stack trace to list expression
+ and function names; if None, the full stack trace names will be listed; if 0, only
+ the failing input line, marker, and exception string will be shown
+
+ Returns a multi-line string listing the ParserElements and/or function names in the
+ exception's stack trace.
+
+ Example::
+
+ expr = pp.Word(pp.nums) * 3
+ try:
+ expr.parse_string("123 456 A789")
+ except pp.ParseException as pe:
+ print(pe.explain(depth=0))
+
+ prints::
+
+ 123 456 A789
+ ^
+ ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9)
+
+ Note: the diagnostic output will include string representations of the expressions
+ that failed to parse. These representations will be more helpful if you use `set_name` to
+ give identifiable names to your expressions. Otherwise they will use the default string
+ forms, which may be cryptic to read.
+
+ Note: pyparsing's default truncation of exception tracebacks may also truncate the
+ stack of expressions that are displayed in the ``explain`` output. To get the full listing
+ of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True``
+ """
+ return self.explain_exception(self, depth)
+
+ markInputline = mark_input_line
+
+
+class ParseException(ParseBaseException):
+ """
+ Exception thrown when a parse expression doesn't match the input string
+
+ Example::
+
+ try:
+ Word(nums).set_name("integer").parse_string("ABC")
+ except ParseException as pe:
+ print(pe)
+ print("column: {}".format(pe.column))
+
+ prints::
+
+ Expected integer (at char 0), (line:1, col:1)
+ column: 1
+
+ """
+
+
+class ParseFatalException(ParseBaseException):
+ """
+ User-throwable exception thrown when inconsistent parse content
+ is found; stops all parsing immediately
+ """
+
+
+class ParseSyntaxException(ParseFatalException):
+ """
+ Just like :class:`ParseFatalException`, but thrown internally
+ when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates
+ that parsing is to stop immediately because an unbacktrackable
+ syntax error has been found.
+ """
+
+
+class RecursiveGrammarException(Exception):
+ """
+ Exception thrown by :class:`ParserElement.validate` if the
+ grammar could be left-recursive; parser may need to enable
+ left recursion using :class:`ParserElement.enable_left_recursion<ParserElement.enable_left_recursion>`
+ """
+
+ def __init__(self, parseElementList):
+ self.parseElementTrace = parseElementList
+
+ def __str__(self) -> str:
+ return "RecursiveGrammarException: {}".format(self.parseElementTrace)
diff --git a/third_party/python/pip/pip/_vendor/pyparsing/helpers.py b/third_party/python/pip/pip/_vendor/pyparsing/helpers.py
new file mode 100644
index 0000000000..9588b3b780
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pyparsing/helpers.py
@@ -0,0 +1,1088 @@
+# helpers.py
+import html.entities
+import re
+import typing
+
+from . import __diag__
+from .core import *
+from .util import _bslash, _flatten, _escape_regex_range_chars
+
+
+#
+# global helpers
+#
+def delimited_list(
+ expr: Union[str, ParserElement],
+ delim: Union[str, ParserElement] = ",",
+ combine: bool = False,
+ min: typing.Optional[int] = None,
+ max: typing.Optional[int] = None,
+ *,
+ allow_trailing_delim: bool = False,
+) -> ParserElement:
+ """Helper to define a delimited list of expressions - the delimiter
+ defaults to ','. By default, the list elements and delimiters can
+ have intervening whitespace, and comments, but this can be
+ overridden by passing ``combine=True`` in the constructor. If
+ ``combine`` is set to ``True``, the matching tokens are
+ returned as a single token string, with the delimiters included;
+ otherwise, the matching tokens are returned as a list of tokens,
+ with the delimiters suppressed.
+
+ If ``allow_trailing_delim`` is set to True, then the list may end with
+ a delimiter.
+
+ Example::
+
+ delimited_list(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc']
+ delimited_list(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
+ """
+ if isinstance(expr, str_type):
+ expr = ParserElement._literalStringClass(expr)
+
+ dlName = "{expr} [{delim} {expr}]...{end}".format(
+ expr=str(expr.copy().streamline()),
+ delim=str(delim),
+ end=" [{}]".format(str(delim)) if allow_trailing_delim else "",
+ )
+
+ if not combine:
+ delim = Suppress(delim)
+
+ if min is not None:
+ if min < 1:
+ raise ValueError("min must be greater than 0")
+ min -= 1
+ if max is not None:
+ if min is not None and max <= min:
+ raise ValueError("max must be greater than, or equal to min")
+ max -= 1
+ delimited_list_expr = expr + (delim + expr)[min, max]
+
+ if allow_trailing_delim:
+ delimited_list_expr += Opt(delim)
+
+ if combine:
+ return Combine(delimited_list_expr).set_name(dlName)
+ else:
+ return delimited_list_expr.set_name(dlName)
+
+
+def counted_array(
+ expr: ParserElement,
+ int_expr: typing.Optional[ParserElement] = None,
+ *,
+ intExpr: typing.Optional[ParserElement] = None,
+) -> ParserElement:
+ """Helper to define a counted list of expressions.
+
+ This helper defines a pattern of the form::
+
+ integer expr expr expr...
+
+ where the leading integer tells how many expr expressions follow.
+ The matched tokens returns the array of expr tokens as a list - the
+ leading count token is suppressed.
+
+ If ``int_expr`` is specified, it should be a pyparsing expression
+ that produces an integer value.
+
+ Example::
+
+ counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd']
+
+ # in this parser, the leading integer value is given in binary,
+ # '10' indicating that 2 values are in the array
+ binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2))
+ counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd']
+
+ # if other fields must be parsed after the count but before the
+ # list items, give the fields results names and they will
+ # be preserved in the returned ParseResults:
+ count_with_metadata = integer + Word(alphas)("type")
+ typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items")
+ result = typed_array.parse_string("3 bool True True False")
+ print(result.dump())
+
+ # prints
+ # ['True', 'True', 'False']
+ # - items: ['True', 'True', 'False']
+ # - type: 'bool'
+ """
+ intExpr = intExpr or int_expr
+ array_expr = Forward()
+
+ def count_field_parse_action(s, l, t):
+ nonlocal array_expr
+ n = t[0]
+ array_expr <<= (expr * n) if n else Empty()
+ # clear list contents, but keep any named results
+ del t[:]
+
+ if intExpr is None:
+ intExpr = Word(nums).set_parse_action(lambda t: int(t[0]))
+ else:
+ intExpr = intExpr.copy()
+ intExpr.set_name("arrayLen")
+ intExpr.add_parse_action(count_field_parse_action, call_during_try=True)
+ return (intExpr + array_expr).set_name("(len) " + str(expr) + "...")
+
+
+def match_previous_literal(expr: ParserElement) -> ParserElement:
+ """Helper to define an expression that is indirectly defined from
+ the tokens matched in a previous expression, that is, it looks for
+ a 'repeat' of a previous expression. For example::
+
+ first = Word(nums)
+ second = match_previous_literal(first)
+ match_expr = first + ":" + second
+
+ will match ``"1:1"``, but not ``"1:2"``. Because this
+ matches a previous literal, will also match the leading
+ ``"1:1"`` in ``"1:10"``. If this is not desired, use
+ :class:`match_previous_expr`. Do *not* use with packrat parsing
+ enabled.
+ """
+ rep = Forward()
+
+ def copy_token_to_repeater(s, l, t):
+ if t:
+ if len(t) == 1:
+ rep << t[0]
+ else:
+ # flatten t tokens
+ tflat = _flatten(t.as_list())
+ rep << And(Literal(tt) for tt in tflat)
+ else:
+ rep << Empty()
+
+ expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
+ rep.set_name("(prev) " + str(expr))
+ return rep
+
+
+def match_previous_expr(expr: ParserElement) -> ParserElement:
+ """Helper to define an expression that is indirectly defined from
+ the tokens matched in a previous expression, that is, it looks for
+ a 'repeat' of a previous expression. For example::
+
+ first = Word(nums)
+ second = match_previous_expr(first)
+ match_expr = first + ":" + second
+
+ will match ``"1:1"``, but not ``"1:2"``. Because this
+ matches by expressions, will *not* match the leading ``"1:1"``
+ in ``"1:10"``; the expressions are evaluated first, and then
+ compared, so ``"1"`` is compared with ``"10"``. Do *not* use
+ with packrat parsing enabled.
+ """
+ rep = Forward()
+ e2 = expr.copy()
+ rep <<= e2
+
+ def copy_token_to_repeater(s, l, t):
+ matchTokens = _flatten(t.as_list())
+
+ def must_match_these_tokens(s, l, t):
+ theseTokens = _flatten(t.as_list())
+ if theseTokens != matchTokens:
+ raise ParseException(
+ s, l, "Expected {}, found{}".format(matchTokens, theseTokens)
+ )
+
+ rep.set_parse_action(must_match_these_tokens, callDuringTry=True)
+
+ expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
+ rep.set_name("(prev) " + str(expr))
+ return rep
+
+
+def one_of(
+ strs: Union[typing.Iterable[str], str],
+ caseless: bool = False,
+ use_regex: bool = True,
+ as_keyword: bool = False,
+ *,
+ useRegex: bool = True,
+ asKeyword: bool = False,
+) -> ParserElement:
+ """Helper to quickly define a set of alternative :class:`Literal` s,
+ and makes sure to do longest-first testing when there is a conflict,
+ regardless of the input order, but returns
+ a :class:`MatchFirst` for best performance.
+
+ Parameters:
+
+ - ``strs`` - a string of space-delimited literals, or a collection of
+ string literals
+ - ``caseless`` - treat all literals as caseless - (default= ``False``)
+ - ``use_regex`` - as an optimization, will
+ generate a :class:`Regex` object; otherwise, will generate
+ a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if
+ creating a :class:`Regex` raises an exception) - (default= ``True``)
+ - ``as_keyword`` - enforce :class:`Keyword`-style matching on the
+ generated expressions - (default= ``False``)
+ - ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility,
+ but will be removed in a future release
+
+ Example::
+
+ comp_oper = one_of("< = > <= >= !=")
+ var = Word(alphas)
+ number = Word(nums)
+ term = var | number
+ comparison_expr = term + comp_oper + term
+ print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12"))
+
+ prints::
+
+ [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
+ """
+ asKeyword = asKeyword or as_keyword
+ useRegex = useRegex and use_regex
+
+ if (
+ isinstance(caseless, str_type)
+ and __diag__.warn_on_multiple_string_args_to_oneof
+ ):
+ warnings.warn(
+ "More than one string argument passed to one_of, pass"
+ " choices as a list or space-delimited string",
+ stacklevel=2,
+ )
+
+ if caseless:
+ isequal = lambda a, b: a.upper() == b.upper()
+ masks = lambda a, b: b.upper().startswith(a.upper())
+ parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral
+ else:
+ isequal = lambda a, b: a == b
+ masks = lambda a, b: b.startswith(a)
+ parseElementClass = Keyword if asKeyword else Literal
+
+ symbols: List[str] = []
+ if isinstance(strs, str_type):
+ symbols = strs.split()
+ elif isinstance(strs, Iterable):
+ symbols = list(strs)
+ else:
+ raise TypeError("Invalid argument to one_of, expected string or iterable")
+ if not symbols:
+ return NoMatch()
+
+ # reorder given symbols to take care to avoid masking longer choices with shorter ones
+ # (but only if the given symbols are not just single characters)
+ if any(len(sym) > 1 for sym in symbols):
+ i = 0
+ while i < len(symbols) - 1:
+ cur = symbols[i]
+ for j, other in enumerate(symbols[i + 1 :]):
+ if isequal(other, cur):
+ del symbols[i + j + 1]
+ break
+ elif masks(cur, other):
+ del symbols[i + j + 1]
+ symbols.insert(i, other)
+ break
+ else:
+ i += 1
+
+ if useRegex:
+ re_flags: int = re.IGNORECASE if caseless else 0
+
+ try:
+ if all(len(sym) == 1 for sym in symbols):
+ # symbols are just single characters, create range regex pattern
+ patt = "[{}]".format(
+ "".join(_escape_regex_range_chars(sym) for sym in symbols)
+ )
+ else:
+ patt = "|".join(re.escape(sym) for sym in symbols)
+
+ # wrap with \b word break markers if defining as keywords
+ if asKeyword:
+ patt = r"\b(?:{})\b".format(patt)
+
+ ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols))
+
+ if caseless:
+ # add parse action to return symbols as specified, not in random
+ # casing as found in input string
+ symbol_map = {sym.lower(): sym for sym in symbols}
+ ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()])
+
+ return ret
+
+ except re.error:
+ warnings.warn(
+ "Exception creating Regex for one_of, building MatchFirst", stacklevel=2
+ )
+
+ # last resort, just use MatchFirst
+ return MatchFirst(parseElementClass(sym) for sym in symbols).set_name(
+ " | ".join(symbols)
+ )
+
+
+def dict_of(key: ParserElement, value: ParserElement) -> ParserElement:
+ """Helper to easily and clearly define a dictionary by specifying
+ the respective patterns for the key and value. Takes care of
+ defining the :class:`Dict`, :class:`ZeroOrMore`, and
+ :class:`Group` tokens in the proper order. The key pattern
+ can include delimiting markers or punctuation, as long as they are
+ suppressed, thereby leaving the significant key text. The value
+ pattern can include named results, so that the :class:`Dict` results
+ can include named token fields.
+
+ Example::
+
+ text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+ attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
+ print(attr_expr[1, ...].parse_string(text).dump())
+
+ attr_label = label
+ attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)
+
+ # similar to Dict, but simpler call format
+ result = dict_of(attr_label, attr_value).parse_string(text)
+ print(result.dump())
+ print(result['shape'])
+ print(result.shape) # object attribute access works too
+ print(result.as_dict())
+
+ prints::
+
+ [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+ - color: 'light blue'
+ - posn: 'upper left'
+ - shape: 'SQUARE'
+ - texture: 'burlap'
+ SQUARE
+ SQUARE
+ {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
+ """
+ return Dict(OneOrMore(Group(key + value)))
+
+
+def original_text_for(
+ expr: ParserElement, as_string: bool = True, *, asString: bool = True
+) -> ParserElement:
+ """Helper to return the original, untokenized text for a given
+ expression. Useful to restore the parsed fields of an HTML start
+ tag into the raw tag text itself, or to revert separate tokens with
+ intervening whitespace back to the original matching input text. By
+ default, returns astring containing the original parsed text.
+
+ If the optional ``as_string`` argument is passed as
+ ``False``, then the return value is
+ a :class:`ParseResults` containing any results names that
+ were originally matched, and a single token containing the original
+ matched text from the input string. So if the expression passed to
+ :class:`original_text_for` contains expressions with defined
+ results names, you must set ``as_string`` to ``False`` if you
+ want to preserve those results name values.
+
+ The ``asString`` pre-PEP8 argument is retained for compatibility,
+ but will be removed in a future release.
+
+ Example::
+
+ src = "this is test <b> bold <i>text</i> </b> normal text "
+ for tag in ("b", "i"):
+ opener, closer = make_html_tags(tag)
+ patt = original_text_for(opener + SkipTo(closer) + closer)
+ print(patt.search_string(src)[0])
+
+ prints::
+
+ ['<b> bold <i>text</i> </b>']
+ ['<i>text</i>']
+ """
+ asString = asString and as_string
+
+ locMarker = Empty().set_parse_action(lambda s, loc, t: loc)
+ endlocMarker = locMarker.copy()
+ endlocMarker.callPreparse = False
+ matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
+ if asString:
+ extractText = lambda s, l, t: s[t._original_start : t._original_end]
+ else:
+
+ def extractText(s, l, t):
+ t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]]
+
+ matchExpr.set_parse_action(extractText)
+ matchExpr.ignoreExprs = expr.ignoreExprs
+ matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection)
+ return matchExpr
+
+
+def ungroup(expr: ParserElement) -> ParserElement:
+ """Helper to undo pyparsing's default grouping of And expressions,
+ even if all but one are non-empty.
+ """
+ return TokenConverter(expr).add_parse_action(lambda t: t[0])
+
+
+def locatedExpr(expr: ParserElement) -> ParserElement:
+ """
+ (DEPRECATED - future code should use the Located class)
+ Helper to decorate a returned token with its starting and ending
+ locations in the input string.
+
+ This helper adds the following results names:
+
+ - ``locn_start`` - location where matched expression begins
+ - ``locn_end`` - location where matched expression ends
+ - ``value`` - the actual parsed results
+
+ Be careful if the input text contains ``<TAB>`` characters, you
+ may want to call :class:`ParserElement.parseWithTabs`
+
+ Example::
+
+ wd = Word(alphas)
+ for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
+ print(match)
+
+ prints::
+
+ [[0, 'ljsdf', 5]]
+ [[8, 'lksdjjf', 15]]
+ [[18, 'lkkjj', 23]]
+ """
+ locator = Empty().set_parse_action(lambda ss, ll, tt: ll)
+ return Group(
+ locator("locn_start")
+ + expr("value")
+ + locator.copy().leaveWhitespace()("locn_end")
+ )
+
+
+def nested_expr(
+ opener: Union[str, ParserElement] = "(",
+ closer: Union[str, ParserElement] = ")",
+ content: typing.Optional[ParserElement] = None,
+ ignore_expr: ParserElement = quoted_string(),
+ *,
+ ignoreExpr: ParserElement = quoted_string(),
+) -> ParserElement:
+ """Helper method for defining nested lists enclosed in opening and
+ closing delimiters (``"("`` and ``")"`` are the default).
+
+ Parameters:
+ - ``opener`` - opening character for a nested list
+ (default= ``"("``); can also be a pyparsing expression
+ - ``closer`` - closing character for a nested list
+ (default= ``")"``); can also be a pyparsing expression
+ - ``content`` - expression for items within the nested lists
+ (default= ``None``)
+ - ``ignore_expr`` - expression for ignoring opening and closing delimiters
+ (default= :class:`quoted_string`)
+ - ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility
+ but will be removed in a future release
+
+ If an expression is not provided for the content argument, the
+ nested expression will capture all whitespace-delimited content
+ between delimiters as a list of separate values.
+
+ Use the ``ignore_expr`` argument to define expressions that may
+ contain opening or closing characters that should not be treated as
+ opening or closing characters for nesting, such as quoted_string or
+ a comment expression. Specify multiple expressions using an
+ :class:`Or` or :class:`MatchFirst`. The default is
+ :class:`quoted_string`, but if no expressions are to be ignored, then
+ pass ``None`` for this argument.
+
+ Example::
+
+ data_type = one_of("void int short long char float double")
+ decl_data_type = Combine(data_type + Opt(Word('*')))
+ ident = Word(alphas+'_', alphanums+'_')
+ number = pyparsing_common.number
+ arg = Group(decl_data_type + ident)
+ LPAR, RPAR = map(Suppress, "()")
+
+ code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment))
+
+ c_function = (decl_data_type("type")
+ + ident("name")
+ + LPAR + Opt(delimited_list(arg), [])("args") + RPAR
+ + code_body("body"))
+ c_function.ignore(c_style_comment)
+
+ source_code = '''
+ int is_odd(int x) {
+ return (x%2);
+ }
+
+ int dec_to_hex(char hchar) {
+ if (hchar >= '0' && hchar <= '9') {
+ return (ord(hchar)-ord('0'));
+ } else {
+ return (10+ord(hchar)-ord('A'));
+ }
+ }
+ '''
+ for func in c_function.search_string(source_code):
+ print("%(name)s (%(type)s) args: %(args)s" % func)
+
+
+ prints::
+
+ is_odd (int) args: [['int', 'x']]
+ dec_to_hex (int) args: [['char', 'hchar']]
+ """
+ if ignoreExpr != ignore_expr:
+ ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr
+ if opener == closer:
+ raise ValueError("opening and closing strings cannot be the same")
+ if content is None:
+ if isinstance(opener, str_type) and isinstance(closer, str_type):
+ if len(opener) == 1 and len(closer) == 1:
+ if ignoreExpr is not None:
+ content = Combine(
+ OneOrMore(
+ ~ignoreExpr
+ + CharsNotIn(
+ opener + closer + ParserElement.DEFAULT_WHITE_CHARS,
+ exact=1,
+ )
+ )
+ ).set_parse_action(lambda t: t[0].strip())
+ else:
+ content = empty.copy() + CharsNotIn(
+ opener + closer + ParserElement.DEFAULT_WHITE_CHARS
+ ).set_parse_action(lambda t: t[0].strip())
+ else:
+ if ignoreExpr is not None:
+ content = Combine(
+ OneOrMore(
+ ~ignoreExpr
+ + ~Literal(opener)
+ + ~Literal(closer)
+ + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
+ )
+ ).set_parse_action(lambda t: t[0].strip())
+ else:
+ content = Combine(
+ OneOrMore(
+ ~Literal(opener)
+ + ~Literal(closer)
+ + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
+ )
+ ).set_parse_action(lambda t: t[0].strip())
+ else:
+ raise ValueError(
+ "opening and closing arguments must be strings if no content expression is given"
+ )
+ ret = Forward()
+ if ignoreExpr is not None:
+ ret <<= Group(
+ Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)
+ )
+ else:
+ ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
+ ret.set_name("nested %s%s expression" % (opener, closer))
+ return ret
+
+
+def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")):
+ """Internal helper to construct opening and closing tag expressions, given a tag name"""
+ if isinstance(tagStr, str_type):
+ resname = tagStr
+ tagStr = Keyword(tagStr, caseless=not xml)
+ else:
+ resname = tagStr.name
+
+ tagAttrName = Word(alphas, alphanums + "_-:")
+ if xml:
+ tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes)
+ openTag = (
+ suppress_LT
+ + tagStr("tag")
+ + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
+ + Opt("/", default=[False])("empty").set_parse_action(
+ lambda s, l, t: t[0] == "/"
+ )
+ + suppress_GT
+ )
+ else:
+ tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word(
+ printables, exclude_chars=">"
+ )
+ openTag = (
+ suppress_LT
+ + tagStr("tag")
+ + Dict(
+ ZeroOrMore(
+ Group(
+ tagAttrName.set_parse_action(lambda t: t[0].lower())
+ + Opt(Suppress("=") + tagAttrValue)
+ )
+ )
+ )
+ + Opt("/", default=[False])("empty").set_parse_action(
+ lambda s, l, t: t[0] == "/"
+ )
+ + suppress_GT
+ )
+ closeTag = Combine(Literal("</") + tagStr + ">", adjacent=False)
+
+ openTag.set_name("<%s>" % resname)
+ # add start<tagname> results name in parse action now that ungrouped names are not reported at two levels
+ openTag.add_parse_action(
+ lambda t: t.__setitem__(
+ "start" + "".join(resname.replace(":", " ").title().split()), t.copy()
+ )
+ )
+ closeTag = closeTag(
+ "end" + "".join(resname.replace(":", " ").title().split())
+ ).set_name("</%s>" % resname)
+ openTag.tag = resname
+ closeTag.tag = resname
+ openTag.tag_body = SkipTo(closeTag())
+ return openTag, closeTag
+
+
+def make_html_tags(
+ tag_str: Union[str, ParserElement]
+) -> Tuple[ParserElement, ParserElement]:
+ """Helper to construct opening and closing tag expressions for HTML,
+ given a tag name. Matches tags in either upper or lower case,
+ attributes with namespaces and with quoted or unquoted values.
+
+ Example::
+
+ text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
+ # make_html_tags returns pyparsing expressions for the opening and
+ # closing tags as a 2-tuple
+ a, a_end = make_html_tags("A")
+ link_expr = a + SkipTo(a_end)("link_text") + a_end
+
+ for link in link_expr.search_string(text):
+ # attributes in the <A> tag (like "href" shown here) are
+ # also accessible as named results
+ print(link.link_text, '->', link.href)
+
+ prints::
+
+ pyparsing -> https://github.com/pyparsing/pyparsing/wiki
+ """
+ return _makeTags(tag_str, False)
+
+
+def make_xml_tags(
+ tag_str: Union[str, ParserElement]
+) -> Tuple[ParserElement, ParserElement]:
+ """Helper to construct opening and closing tag expressions for XML,
+ given a tag name. Matches tags only in the given upper/lower case.
+
+ Example: similar to :class:`make_html_tags`
+ """
+ return _makeTags(tag_str, True)
+
+
+any_open_tag: ParserElement
+any_close_tag: ParserElement
+any_open_tag, any_close_tag = make_html_tags(
+ Word(alphas, alphanums + "_:").set_name("any tag")
+)
+
+_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()}
+common_html_entity = Regex("&(?P<entity>" + "|".join(_htmlEntityMap) + ");").set_name(
+ "common HTML entity"
+)
+
+
+def replace_html_entity(t):
+ """Helper parser action to replace common HTML entities with their special characters"""
+ return _htmlEntityMap.get(t.entity)
+
+
+class OpAssoc(Enum):
+ LEFT = 1
+ RIGHT = 2
+
+
+InfixNotationOperatorArgType = Union[
+ ParserElement, str, Tuple[Union[ParserElement, str], Union[ParserElement, str]]
+]
+InfixNotationOperatorSpec = Union[
+ Tuple[
+ InfixNotationOperatorArgType,
+ int,
+ OpAssoc,
+ typing.Optional[ParseAction],
+ ],
+ Tuple[
+ InfixNotationOperatorArgType,
+ int,
+ OpAssoc,
+ ],
+]
+
+
+def infix_notation(
+ base_expr: ParserElement,
+ op_list: List[InfixNotationOperatorSpec],
+ lpar: Union[str, ParserElement] = Suppress("("),
+ rpar: Union[str, ParserElement] = Suppress(")"),
+) -> ParserElement:
+ """Helper method for constructing grammars of expressions made up of
+ operators working in a precedence hierarchy. Operators may be unary
+ or binary, left- or right-associative. Parse actions can also be
+ attached to operator expressions. The generated parser will also
+ recognize the use of parentheses to override operator precedences
+ (see example below).
+
+ Note: if you define a deep operator list, you may see performance
+ issues when using infix_notation. See
+ :class:`ParserElement.enable_packrat` for a mechanism to potentially
+ improve your parser performance.
+
+ Parameters:
+ - ``base_expr`` - expression representing the most basic operand to
+ be used in the expression
+ - ``op_list`` - list of tuples, one for each operator precedence level
+ in the expression grammar; each tuple is of the form ``(op_expr,
+ num_operands, right_left_assoc, (optional)parse_action)``, where:
+
+ - ``op_expr`` is the pyparsing expression for the operator; may also
+ be a string, which will be converted to a Literal; if ``num_operands``
+ is 3, ``op_expr`` is a tuple of two expressions, for the two
+ operators separating the 3 terms
+ - ``num_operands`` is the number of terms for this operator (must be 1,
+ 2, or 3)
+ - ``right_left_assoc`` is the indicator whether the operator is right
+ or left associative, using the pyparsing-defined constants
+ ``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``.
+ - ``parse_action`` is the parse action to be associated with
+ expressions matching this operator expression (the parse action
+ tuple member may be omitted); if the parse action is passed
+ a tuple or list of functions, this is equivalent to calling
+ ``set_parse_action(*fn)``
+ (:class:`ParserElement.set_parse_action`)
+ - ``lpar`` - expression for matching left-parentheses; if passed as a
+ str, then will be parsed as Suppress(lpar). If lpar is passed as
+ an expression (such as ``Literal('(')``), then it will be kept in
+ the parsed results, and grouped with them. (default= ``Suppress('(')``)
+ - ``rpar`` - expression for matching right-parentheses; if passed as a
+ str, then will be parsed as Suppress(rpar). If rpar is passed as
+ an expression (such as ``Literal(')')``), then it will be kept in
+ the parsed results, and grouped with them. (default= ``Suppress(')')``)
+
+ Example::
+
+ # simple example of four-function arithmetic with ints and
+ # variable names
+ integer = pyparsing_common.signed_integer
+ varname = pyparsing_common.identifier
+
+ arith_expr = infix_notation(integer | varname,
+ [
+ ('-', 1, OpAssoc.RIGHT),
+ (one_of('* /'), 2, OpAssoc.LEFT),
+ (one_of('+ -'), 2, OpAssoc.LEFT),
+ ])
+
+ arith_expr.run_tests('''
+ 5+3*6
+ (5+3)*6
+ -2--11
+ ''', full_dump=False)
+
+ prints::
+
+ 5+3*6
+ [[5, '+', [3, '*', 6]]]
+
+ (5+3)*6
+ [[[5, '+', 3], '*', 6]]
+
+ -2--11
+ [[['-', 2], '-', ['-', 11]]]
+ """
+ # captive version of FollowedBy that does not do parse actions or capture results names
+ class _FB(FollowedBy):
+ def parseImpl(self, instring, loc, doActions=True):
+ self.expr.try_parse(instring, loc)
+ return loc, []
+
+ _FB.__name__ = "FollowedBy>"
+
+ ret = Forward()
+ if isinstance(lpar, str):
+ lpar = Suppress(lpar)
+ if isinstance(rpar, str):
+ rpar = Suppress(rpar)
+
+ # if lpar and rpar are not suppressed, wrap in group
+ if not (isinstance(rpar, Suppress) and isinstance(rpar, Suppress)):
+ lastExpr = base_expr | Group(lpar + ret + rpar)
+ else:
+ lastExpr = base_expr | (lpar + ret + rpar)
+
+ for i, operDef in enumerate(op_list):
+ opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4]
+ if isinstance(opExpr, str_type):
+ opExpr = ParserElement._literalStringClass(opExpr)
+ if arity == 3:
+ if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2:
+ raise ValueError(
+ "if numterms=3, opExpr must be a tuple or list of two expressions"
+ )
+ opExpr1, opExpr2 = opExpr
+ term_name = "{}{} term".format(opExpr1, opExpr2)
+ else:
+ term_name = "{} term".format(opExpr)
+
+ if not 1 <= arity <= 3:
+ raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
+
+ if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT):
+ raise ValueError("operator must indicate right or left associativity")
+
+ thisExpr: Forward = Forward().set_name(term_name)
+ if rightLeftAssoc is OpAssoc.LEFT:
+ if arity == 1:
+ matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...])
+ elif arity == 2:
+ if opExpr is not None:
+ matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(
+ lastExpr + (opExpr + lastExpr)[1, ...]
+ )
+ else:
+ matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...])
+ elif arity == 3:
+ matchExpr = _FB(
+ lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr
+ ) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr))
+ elif rightLeftAssoc is OpAssoc.RIGHT:
+ if arity == 1:
+ # try to avoid LR with this extra test
+ if not isinstance(opExpr, Opt):
+ opExpr = Opt(opExpr)
+ matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr)
+ elif arity == 2:
+ if opExpr is not None:
+ matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(
+ lastExpr + (opExpr + thisExpr)[1, ...]
+ )
+ else:
+ matchExpr = _FB(lastExpr + thisExpr) + Group(
+ lastExpr + thisExpr[1, ...]
+ )
+ elif arity == 3:
+ matchExpr = _FB(
+ lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr
+ ) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
+ if pa:
+ if isinstance(pa, (tuple, list)):
+ matchExpr.set_parse_action(*pa)
+ else:
+ matchExpr.set_parse_action(pa)
+ thisExpr <<= (matchExpr | lastExpr).setName(term_name)
+ lastExpr = thisExpr
+ ret <<= lastExpr
+ return ret
+
+
+def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]):
+ """
+ (DEPRECATED - use IndentedBlock class instead)
+ Helper method for defining space-delimited indentation blocks,
+ such as those used to define block statements in Python source code.
+
+ Parameters:
+
+ - ``blockStatementExpr`` - expression defining syntax of statement that
+ is repeated within the indented block
+ - ``indentStack`` - list created by caller to manage indentation stack
+ (multiple ``statementWithIndentedBlock`` expressions within a single
+ grammar should share a common ``indentStack``)
+ - ``indent`` - boolean indicating whether block must be indented beyond
+ the current level; set to ``False`` for block of left-most statements
+ (default= ``True``)
+
+ A valid block must contain at least one ``blockStatement``.
+
+ (Note that indentedBlock uses internal parse actions which make it
+ incompatible with packrat parsing.)
+
+ Example::
+
+ data = '''
+ def A(z):
+ A1
+ B = 100
+ G = A2
+ A2
+ A3
+ B
+ def BB(a,b,c):
+ BB1
+ def BBA():
+ bba1
+ bba2
+ bba3
+ C
+ D
+ def spam(x,y):
+ def eggs(z):
+ pass
+ '''
+
+
+ indentStack = [1]
+ stmt = Forward()
+
+ identifier = Word(alphas, alphanums)
+ funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":")
+ func_body = indentedBlock(stmt, indentStack)
+ funcDef = Group(funcDecl + func_body)
+
+ rvalue = Forward()
+ funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")")
+ rvalue << (funcCall | identifier | Word(nums))
+ assignment = Group(identifier + "=" + rvalue)
+ stmt << (funcDef | assignment | identifier)
+
+ module_body = stmt[1, ...]
+
+ parseTree = module_body.parseString(data)
+ parseTree.pprint()
+
+ prints::
+
+ [['def',
+ 'A',
+ ['(', 'z', ')'],
+ ':',
+ [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
+ 'B',
+ ['def',
+ 'BB',
+ ['(', 'a', 'b', 'c', ')'],
+ ':',
+ [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
+ 'C',
+ 'D',
+ ['def',
+ 'spam',
+ ['(', 'x', 'y', ')'],
+ ':',
+ [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
+ """
+ backup_stacks.append(indentStack[:])
+
+ def reset_stack():
+ indentStack[:] = backup_stacks[-1]
+
+ def checkPeerIndent(s, l, t):
+ if l >= len(s):
+ return
+ curCol = col(l, s)
+ if curCol != indentStack[-1]:
+ if curCol > indentStack[-1]:
+ raise ParseException(s, l, "illegal nesting")
+ raise ParseException(s, l, "not a peer entry")
+
+ def checkSubIndent(s, l, t):
+ curCol = col(l, s)
+ if curCol > indentStack[-1]:
+ indentStack.append(curCol)
+ else:
+ raise ParseException(s, l, "not a subentry")
+
+ def checkUnindent(s, l, t):
+ if l >= len(s):
+ return
+ curCol = col(l, s)
+ if not (indentStack and curCol in indentStack):
+ raise ParseException(s, l, "not an unindent")
+ if curCol < indentStack[-1]:
+ indentStack.pop()
+
+ NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress())
+ INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT")
+ PEER = Empty().set_parse_action(checkPeerIndent).set_name("")
+ UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT")
+ if indent:
+ smExpr = Group(
+ Opt(NL)
+ + INDENT
+ + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
+ + UNDENT
+ )
+ else:
+ smExpr = Group(
+ Opt(NL)
+ + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
+ + Opt(UNDENT)
+ )
+
+ # add a parse action to remove backup_stack from list of backups
+ smExpr.add_parse_action(
+ lambda: backup_stacks.pop(-1) and None if backup_stacks else None
+ )
+ smExpr.set_fail_action(lambda a, b, c, d: reset_stack())
+ blockStatementExpr.ignore(_bslash + LineEnd())
+ return smExpr.set_name("indented block")
+
+
+# it's easy to get these comment structures wrong - they're very common, so may as well make them available
+c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name(
+ "C style comment"
+)
+"Comment of the form ``/* ... */``"
+
+html_comment = Regex(r"<!--[\s\S]*?-->").set_name("HTML comment")
+"Comment of the form ``<!-- ... -->``"
+
+rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line")
+dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment")
+"Comment of the form ``// ... (to end of line)``"
+
+cpp_style_comment = Combine(
+ Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment
+).set_name("C++ style comment")
+"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`"
+
+java_style_comment = cpp_style_comment
+"Same as :class:`cpp_style_comment`"
+
+python_style_comment = Regex(r"#.*").set_name("Python style comment")
+"Comment of the form ``# ... (to end of line)``"
+
+
+# build list of built-in expressions, for future reference if a global default value
+# gets updated
+_builtin_exprs: List[ParserElement] = [
+ v for v in vars().values() if isinstance(v, ParserElement)
+]
+
+
+# pre-PEP8 compatible names
+delimitedList = delimited_list
+countedArray = counted_array
+matchPreviousLiteral = match_previous_literal
+matchPreviousExpr = match_previous_expr
+oneOf = one_of
+dictOf = dict_of
+originalTextFor = original_text_for
+nestedExpr = nested_expr
+makeHTMLTags = make_html_tags
+makeXMLTags = make_xml_tags
+anyOpenTag, anyCloseTag = any_open_tag, any_close_tag
+commonHTMLEntity = common_html_entity
+replaceHTMLEntity = replace_html_entity
+opAssoc = OpAssoc
+infixNotation = infix_notation
+cStyleComment = c_style_comment
+htmlComment = html_comment
+restOfLine = rest_of_line
+dblSlashComment = dbl_slash_comment
+cppStyleComment = cpp_style_comment
+javaStyleComment = java_style_comment
+pythonStyleComment = python_style_comment
diff --git a/third_party/python/pip/pip/_vendor/pyparsing/results.py b/third_party/python/pip/pip/_vendor/pyparsing/results.py
new file mode 100644
index 0000000000..00c9421d3b
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pyparsing/results.py
@@ -0,0 +1,760 @@
+# results.py
+from collections.abc import MutableMapping, Mapping, MutableSequence, Iterator
+import pprint
+from weakref import ref as wkref
+from typing import Tuple, Any
+
+str_type: Tuple[type, ...] = (str, bytes)
+_generator_type = type((_ for _ in ()))
+
+
+class _ParseResultsWithOffset:
+ __slots__ = ["tup"]
+
+ def __init__(self, p1, p2):
+ self.tup = (p1, p2)
+
+ def __getitem__(self, i):
+ return self.tup[i]
+
+ def __getstate__(self):
+ return self.tup
+
+ def __setstate__(self, *args):
+ self.tup = args[0]
+
+
+class ParseResults:
+ """Structured parse results, to provide multiple means of access to
+ the parsed data:
+
+ - as a list (``len(results)``)
+ - by list index (``results[0], results[1]``, etc.)
+ - by attribute (``results.<results_name>`` - see :class:`ParserElement.set_results_name`)
+
+ Example::
+
+ integer = Word(nums)
+ date_str = (integer.set_results_name("year") + '/'
+ + integer.set_results_name("month") + '/'
+ + integer.set_results_name("day"))
+ # equivalent form:
+ # date_str = (integer("year") + '/'
+ # + integer("month") + '/'
+ # + integer("day"))
+
+ # parse_string returns a ParseResults object
+ result = date_str.parse_string("1999/12/31")
+
+ def test(s, fn=repr):
+ print("{} -> {}".format(s, fn(eval(s))))
+ test("list(result)")
+ test("result[0]")
+ test("result['month']")
+ test("result.day")
+ test("'month' in result")
+ test("'minutes' in result")
+ test("result.dump()", str)
+
+ prints::
+
+ list(result) -> ['1999', '/', '12', '/', '31']
+ result[0] -> '1999'
+ result['month'] -> '12'
+ result.day -> '31'
+ 'month' in result -> True
+ 'minutes' in result -> False
+ result.dump() -> ['1999', '/', '12', '/', '31']
+ - day: '31'
+ - month: '12'
+ - year: '1999'
+ """
+
+ _null_values: Tuple[Any, ...] = (None, [], "", ())
+
+ __slots__ = [
+ "_name",
+ "_parent",
+ "_all_names",
+ "_modal",
+ "_toklist",
+ "_tokdict",
+ "__weakref__",
+ ]
+
+ class List(list):
+ """
+ Simple wrapper class to distinguish parsed list results that should be preserved
+ as actual Python lists, instead of being converted to :class:`ParseResults`:
+
+ LBRACK, RBRACK = map(pp.Suppress, "[]")
+ element = pp.Forward()
+ item = ppc.integer
+ element_list = LBRACK + pp.delimited_list(element) + RBRACK
+
+ # add parse actions to convert from ParseResults to actual Python collection types
+ def as_python_list(t):
+ return pp.ParseResults.List(t.as_list())
+ element_list.add_parse_action(as_python_list)
+
+ element <<= item | element_list
+
+ element.run_tests('''
+ 100
+ [2,3,4]
+ [[2, 1],3,4]
+ [(2, 1),3,4]
+ (2,3,4)
+ ''', post_parse=lambda s, r: (r[0], type(r[0])))
+
+ prints:
+
+ 100
+ (100, <class 'int'>)
+
+ [2,3,4]
+ ([2, 3, 4], <class 'list'>)
+
+ [[2, 1],3,4]
+ ([[2, 1], 3, 4], <class 'list'>)
+
+ (Used internally by :class:`Group` when `aslist=True`.)
+ """
+
+ def __new__(cls, contained=None):
+ if contained is None:
+ contained = []
+
+ if not isinstance(contained, list):
+ raise TypeError(
+ "{} may only be constructed with a list,"
+ " not {}".format(cls.__name__, type(contained).__name__)
+ )
+
+ return list.__new__(cls)
+
+ def __new__(cls, toklist=None, name=None, **kwargs):
+ if isinstance(toklist, ParseResults):
+ return toklist
+ self = object.__new__(cls)
+ self._name = None
+ self._parent = None
+ self._all_names = set()
+
+ if toklist is None:
+ self._toklist = []
+ elif isinstance(toklist, (list, _generator_type)):
+ self._toklist = (
+ [toklist[:]]
+ if isinstance(toklist, ParseResults.List)
+ else list(toklist)
+ )
+ else:
+ self._toklist = [toklist]
+ self._tokdict = dict()
+ return self
+
+ # Performance tuning: we construct a *lot* of these, so keep this
+ # constructor as small and fast as possible
+ def __init__(
+ self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance
+ ):
+ self._modal = modal
+ if name is not None and name != "":
+ if isinstance(name, int):
+ name = str(name)
+ if not modal:
+ self._all_names = {name}
+ self._name = name
+ if toklist not in self._null_values:
+ if isinstance(toklist, (str_type, type)):
+ toklist = [toklist]
+ if asList:
+ if isinstance(toklist, ParseResults):
+ self[name] = _ParseResultsWithOffset(
+ ParseResults(toklist._toklist), 0
+ )
+ else:
+ self[name] = _ParseResultsWithOffset(
+ ParseResults(toklist[0]), 0
+ )
+ self[name]._name = name
+ else:
+ try:
+ self[name] = toklist[0]
+ except (KeyError, TypeError, IndexError):
+ if toklist is not self:
+ self[name] = toklist
+ else:
+ self._name = name
+
+ def __getitem__(self, i):
+ if isinstance(i, (int, slice)):
+ return self._toklist[i]
+ else:
+ if i not in self._all_names:
+ return self._tokdict[i][-1][0]
+ else:
+ return ParseResults([v[0] for v in self._tokdict[i]])
+
+ def __setitem__(self, k, v, isinstance=isinstance):
+ if isinstance(v, _ParseResultsWithOffset):
+ self._tokdict[k] = self._tokdict.get(k, list()) + [v]
+ sub = v[0]
+ elif isinstance(k, (int, slice)):
+ self._toklist[k] = v
+ sub = v
+ else:
+ self._tokdict[k] = self._tokdict.get(k, list()) + [
+ _ParseResultsWithOffset(v, 0)
+ ]
+ sub = v
+ if isinstance(sub, ParseResults):
+ sub._parent = wkref(self)
+
+ def __delitem__(self, i):
+ if isinstance(i, (int, slice)):
+ mylen = len(self._toklist)
+ del self._toklist[i]
+
+ # convert int to slice
+ if isinstance(i, int):
+ if i < 0:
+ i += mylen
+ i = slice(i, i + 1)
+ # get removed indices
+ removed = list(range(*i.indices(mylen)))
+ removed.reverse()
+ # fixup indices in token dictionary
+ for name, occurrences in self._tokdict.items():
+ for j in removed:
+ for k, (value, position) in enumerate(occurrences):
+ occurrences[k] = _ParseResultsWithOffset(
+ value, position - (position > j)
+ )
+ else:
+ del self._tokdict[i]
+
+ def __contains__(self, k) -> bool:
+ return k in self._tokdict
+
+ def __len__(self) -> int:
+ return len(self._toklist)
+
+ def __bool__(self) -> bool:
+ return not not (self._toklist or self._tokdict)
+
+ def __iter__(self) -> Iterator:
+ return iter(self._toklist)
+
+ def __reversed__(self) -> Iterator:
+ return iter(self._toklist[::-1])
+
+ def keys(self):
+ return iter(self._tokdict)
+
+ def values(self):
+ return (self[k] for k in self.keys())
+
+ def items(self):
+ return ((k, self[k]) for k in self.keys())
+
+ def haskeys(self) -> bool:
+ """
+ Since ``keys()`` returns an iterator, this method is helpful in bypassing
+ code that looks for the existence of any defined results names."""
+ return bool(self._tokdict)
+
+ def pop(self, *args, **kwargs):
+ """
+ Removes and returns item at specified index (default= ``last``).
+ Supports both ``list`` and ``dict`` semantics for ``pop()``. If
+ passed no argument or an integer argument, it will use ``list``
+ semantics and pop tokens from the list of parsed tokens. If passed
+ a non-integer argument (most likely a string), it will use ``dict``
+ semantics and pop the corresponding value from any defined results
+ names. A second default return value argument is supported, just as in
+ ``dict.pop()``.
+
+ Example::
+
+ numlist = Word(nums)[...]
+ print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
+
+ def remove_first(tokens):
+ tokens.pop(0)
+ numlist.add_parse_action(remove_first)
+ print(numlist.parse_string("0 123 321")) # -> ['123', '321']
+
+ label = Word(alphas)
+ patt = label("LABEL") + Word(nums)[1, ...]
+ print(patt.parse_string("AAB 123 321").dump())
+
+ # Use pop() in a parse action to remove named result (note that corresponding value is not
+ # removed from list form of results)
+ def remove_LABEL(tokens):
+ tokens.pop("LABEL")
+ return tokens
+ patt.add_parse_action(remove_LABEL)
+ print(patt.parse_string("AAB 123 321").dump())
+
+ prints::
+
+ ['AAB', '123', '321']
+ - LABEL: 'AAB'
+
+ ['AAB', '123', '321']
+ """
+ if not args:
+ args = [-1]
+ for k, v in kwargs.items():
+ if k == "default":
+ args = (args[0], v)
+ else:
+ raise TypeError(
+ "pop() got an unexpected keyword argument {!r}".format(k)
+ )
+ if isinstance(args[0], int) or len(args) == 1 or args[0] in self:
+ index = args[0]
+ ret = self[index]
+ del self[index]
+ return ret
+ else:
+ defaultvalue = args[1]
+ return defaultvalue
+
+ def get(self, key, default_value=None):
+ """
+ Returns named result matching the given key, or if there is no
+ such name, then returns the given ``default_value`` or ``None`` if no
+ ``default_value`` is specified.
+
+ Similar to ``dict.get()``.
+
+ Example::
+
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parse_string("1999/12/31")
+ print(result.get("year")) # -> '1999'
+ print(result.get("hour", "not specified")) # -> 'not specified'
+ print(result.get("hour")) # -> None
+ """
+ if key in self:
+ return self[key]
+ else:
+ return default_value
+
+ def insert(self, index, ins_string):
+ """
+ Inserts new element at location index in the list of parsed tokens.
+
+ Similar to ``list.insert()``.
+
+ Example::
+
+ numlist = Word(nums)[...]
+ print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
+
+ # use a parse action to insert the parse location in the front of the parsed results
+ def insert_locn(locn, tokens):
+ tokens.insert(0, locn)
+ numlist.add_parse_action(insert_locn)
+ print(numlist.parse_string("0 123 321")) # -> [0, '0', '123', '321']
+ """
+ self._toklist.insert(index, ins_string)
+ # fixup indices in token dictionary
+ for name, occurrences in self._tokdict.items():
+ for k, (value, position) in enumerate(occurrences):
+ occurrences[k] = _ParseResultsWithOffset(
+ value, position + (position > index)
+ )
+
+ def append(self, item):
+ """
+ Add single element to end of ``ParseResults`` list of elements.
+
+ Example::
+
+ numlist = Word(nums)[...]
+ print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
+
+ # use a parse action to compute the sum of the parsed integers, and add it to the end
+ def append_sum(tokens):
+ tokens.append(sum(map(int, tokens)))
+ numlist.add_parse_action(append_sum)
+ print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321', 444]
+ """
+ self._toklist.append(item)
+
+ def extend(self, itemseq):
+ """
+ Add sequence of elements to end of ``ParseResults`` list of elements.
+
+ Example::
+
+ patt = Word(alphas)[1, ...]
+
+ # use a parse action to append the reverse of the matched strings, to make a palindrome
+ def make_palindrome(tokens):
+ tokens.extend(reversed([t[::-1] for t in tokens]))
+ return ''.join(tokens)
+ patt.add_parse_action(make_palindrome)
+ print(patt.parse_string("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
+ """
+ if isinstance(itemseq, ParseResults):
+ self.__iadd__(itemseq)
+ else:
+ self._toklist.extend(itemseq)
+
+ def clear(self):
+ """
+ Clear all elements and results names.
+ """
+ del self._toklist[:]
+ self._tokdict.clear()
+
+ def __getattr__(self, name):
+ try:
+ return self[name]
+ except KeyError:
+ if name.startswith("__"):
+ raise AttributeError(name)
+ return ""
+
+ def __add__(self, other) -> "ParseResults":
+ ret = self.copy()
+ ret += other
+ return ret
+
+ def __iadd__(self, other) -> "ParseResults":
+ if other._tokdict:
+ offset = len(self._toklist)
+ addoffset = lambda a: offset if a < 0 else a + offset
+ otheritems = other._tokdict.items()
+ otherdictitems = [
+ (k, _ParseResultsWithOffset(v[0], addoffset(v[1])))
+ for k, vlist in otheritems
+ for v in vlist
+ ]
+ for k, v in otherdictitems:
+ self[k] = v
+ if isinstance(v[0], ParseResults):
+ v[0]._parent = wkref(self)
+
+ self._toklist += other._toklist
+ self._all_names |= other._all_names
+ return self
+
+ def __radd__(self, other) -> "ParseResults":
+ if isinstance(other, int) and other == 0:
+ # useful for merging many ParseResults using sum() builtin
+ return self.copy()
+ else:
+ # this may raise a TypeError - so be it
+ return other + self
+
+ def __repr__(self) -> str:
+ return "{}({!r}, {})".format(type(self).__name__, self._toklist, self.as_dict())
+
+ def __str__(self) -> str:
+ return (
+ "["
+ + ", ".join(
+ [
+ str(i) if isinstance(i, ParseResults) else repr(i)
+ for i in self._toklist
+ ]
+ )
+ + "]"
+ )
+
+ def _asStringList(self, sep=""):
+ out = []
+ for item in self._toklist:
+ if out and sep:
+ out.append(sep)
+ if isinstance(item, ParseResults):
+ out += item._asStringList()
+ else:
+ out.append(str(item))
+ return out
+
+ def as_list(self) -> list:
+ """
+ Returns the parse results as a nested list of matching tokens, all converted to strings.
+
+ Example::
+
+ patt = Word(alphas)[1, ...]
+ result = patt.parse_string("sldkj lsdkj sldkj")
+ # even though the result prints in string-like form, it is actually a pyparsing ParseResults
+ print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
+
+ # Use as_list() to create an actual list
+ result_list = result.as_list()
+ print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
+ """
+ return [
+ res.as_list() if isinstance(res, ParseResults) else res
+ for res in self._toklist
+ ]
+
+ def as_dict(self) -> dict:
+ """
+ Returns the named parse results as a nested dictionary.
+
+ Example::
+
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parse_string('12/31/1999')
+ print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
+
+ result_dict = result.as_dict()
+ print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
+
+ # even though a ParseResults supports dict-like access, sometime you just need to have a dict
+ import json
+ print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
+ print(json.dumps(result.as_dict())) # -> {"month": "31", "day": "1999", "year": "12"}
+ """
+
+ def to_item(obj):
+ if isinstance(obj, ParseResults):
+ return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj]
+ else:
+ return obj
+
+ return dict((k, to_item(v)) for k, v in self.items())
+
+ def copy(self) -> "ParseResults":
+ """
+ Returns a new copy of a :class:`ParseResults` object.
+ """
+ ret = ParseResults(self._toklist)
+ ret._tokdict = self._tokdict.copy()
+ ret._parent = self._parent
+ ret._all_names |= self._all_names
+ ret._name = self._name
+ return ret
+
+ def get_name(self):
+ r"""
+ Returns the results name for this token expression. Useful when several
+ different expressions might match at a particular location.
+
+ Example::
+
+ integer = Word(nums)
+ ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
+ house_number_expr = Suppress('#') + Word(nums, alphanums)
+ user_data = (Group(house_number_expr)("house_number")
+ | Group(ssn_expr)("ssn")
+ | Group(integer)("age"))
+ user_info = user_data[1, ...]
+
+ result = user_info.parse_string("22 111-22-3333 #221B")
+ for item in result:
+ print(item.get_name(), ':', item[0])
+
+ prints::
+
+ age : 22
+ ssn : 111-22-3333
+ house_number : 221B
+ """
+ if self._name:
+ return self._name
+ elif self._parent:
+ par = self._parent()
+
+ def find_in_parent(sub):
+ return next(
+ (
+ k
+ for k, vlist in par._tokdict.items()
+ for v, loc in vlist
+ if sub is v
+ ),
+ None,
+ )
+
+ return find_in_parent(self) if par else None
+ elif (
+ len(self) == 1
+ and len(self._tokdict) == 1
+ and next(iter(self._tokdict.values()))[0][1] in (0, -1)
+ ):
+ return next(iter(self._tokdict.keys()))
+ else:
+ return None
+
+ def dump(self, indent="", full=True, include_list=True, _depth=0) -> str:
+ """
+ Diagnostic method for listing out the contents of
+ a :class:`ParseResults`. Accepts an optional ``indent`` argument so
+ that this string can be embedded in a nested display of other data.
+
+ Example::
+
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parse_string('1999/12/31')
+ print(result.dump())
+
+ prints::
+
+ ['1999', '/', '12', '/', '31']
+ - day: '31'
+ - month: '12'
+ - year: '1999'
+ """
+ out = []
+ NL = "\n"
+ out.append(indent + str(self.as_list()) if include_list else "")
+
+ if full:
+ if self.haskeys():
+ items = sorted((str(k), v) for k, v in self.items())
+ for k, v in items:
+ if out:
+ out.append(NL)
+ out.append("{}{}- {}: ".format(indent, (" " * _depth), k))
+ if isinstance(v, ParseResults):
+ if v:
+ out.append(
+ v.dump(
+ indent=indent,
+ full=full,
+ include_list=include_list,
+ _depth=_depth + 1,
+ )
+ )
+ else:
+ out.append(str(v))
+ else:
+ out.append(repr(v))
+ if any(isinstance(vv, ParseResults) for vv in self):
+ v = self
+ for i, vv in enumerate(v):
+ if isinstance(vv, ParseResults):
+ out.append(
+ "\n{}{}[{}]:\n{}{}{}".format(
+ indent,
+ (" " * (_depth)),
+ i,
+ indent,
+ (" " * (_depth + 1)),
+ vv.dump(
+ indent=indent,
+ full=full,
+ include_list=include_list,
+ _depth=_depth + 1,
+ ),
+ )
+ )
+ else:
+ out.append(
+ "\n%s%s[%d]:\n%s%s%s"
+ % (
+ indent,
+ (" " * (_depth)),
+ i,
+ indent,
+ (" " * (_depth + 1)),
+ str(vv),
+ )
+ )
+
+ return "".join(out)
+
+ def pprint(self, *args, **kwargs):
+ """
+ Pretty-printer for parsed results as a list, using the
+ `pprint <https://docs.python.org/3/library/pprint.html>`_ module.
+ Accepts additional positional or keyword args as defined for
+ `pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ .
+
+ Example::
+
+ ident = Word(alphas, alphanums)
+ num = Word(nums)
+ func = Forward()
+ term = ident | num | Group('(' + func + ')')
+ func <<= ident + Group(Optional(delimited_list(term)))
+ result = func.parse_string("fna a,b,(fnb c,d,200),100")
+ result.pprint(width=40)
+
+ prints::
+
+ ['fna',
+ ['a',
+ 'b',
+ ['(', 'fnb', ['c', 'd', '200'], ')'],
+ '100']]
+ """
+ pprint.pprint(self.as_list(), *args, **kwargs)
+
+ # add support for pickle protocol
+ def __getstate__(self):
+ return (
+ self._toklist,
+ (
+ self._tokdict.copy(),
+ self._parent is not None and self._parent() or None,
+ self._all_names,
+ self._name,
+ ),
+ )
+
+ def __setstate__(self, state):
+ self._toklist, (self._tokdict, par, inAccumNames, self._name) = state
+ self._all_names = set(inAccumNames)
+ if par is not None:
+ self._parent = wkref(par)
+ else:
+ self._parent = None
+
+ def __getnewargs__(self):
+ return self._toklist, self._name
+
+ def __dir__(self):
+ return dir(type(self)) + list(self.keys())
+
+ @classmethod
+ def from_dict(cls, other, name=None) -> "ParseResults":
+ """
+ Helper classmethod to construct a ``ParseResults`` from a ``dict``, preserving the
+ name-value relations as results names. If an optional ``name`` argument is
+ given, a nested ``ParseResults`` will be returned.
+ """
+
+ def is_iterable(obj):
+ try:
+ iter(obj)
+ except Exception:
+ return False
+ else:
+ return not isinstance(obj, str_type)
+
+ ret = cls([])
+ for k, v in other.items():
+ if isinstance(v, Mapping):
+ ret += cls.from_dict(v, name=k)
+ else:
+ ret += cls([v], name=k, asList=is_iterable(v))
+ if name is not None:
+ ret = cls([ret], name=name)
+ return ret
+
+ asList = as_list
+ asDict = as_dict
+ getName = get_name
+
+
+MutableMapping.register(ParseResults)
+MutableSequence.register(ParseResults)
diff --git a/third_party/python/pip/pip/_vendor/pyparsing/testing.py b/third_party/python/pip/pip/_vendor/pyparsing/testing.py
new file mode 100644
index 0000000000..84a0ef1707
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pyparsing/testing.py
@@ -0,0 +1,331 @@
+# testing.py
+
+from contextlib import contextmanager
+import typing
+
+from .core import (
+ ParserElement,
+ ParseException,
+ Keyword,
+ __diag__,
+ __compat__,
+)
+
+
+class pyparsing_test:
+ """
+ namespace class for classes useful in writing unit tests
+ """
+
+ class reset_pyparsing_context:
+ """
+ Context manager to be used when writing unit tests that modify pyparsing config values:
+ - packrat parsing
+ - bounded recursion parsing
+ - default whitespace characters.
+ - default keyword characters
+ - literal string auto-conversion class
+ - __diag__ settings
+
+ Example::
+
+ with reset_pyparsing_context():
+ # test that literals used to construct a grammar are automatically suppressed
+ ParserElement.inlineLiteralsUsing(Suppress)
+
+ term = Word(alphas) | Word(nums)
+ group = Group('(' + term[...] + ')')
+
+ # assert that the '()' characters are not included in the parsed tokens
+ self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def'])
+
+ # after exiting context manager, literals are converted to Literal expressions again
+ """
+
+ def __init__(self):
+ self._save_context = {}
+
+ def save(self):
+ self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS
+ self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS
+
+ self._save_context[
+ "literal_string_class"
+ ] = ParserElement._literalStringClass
+
+ self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace
+
+ self._save_context["packrat_enabled"] = ParserElement._packratEnabled
+ if ParserElement._packratEnabled:
+ self._save_context[
+ "packrat_cache_size"
+ ] = ParserElement.packrat_cache.size
+ else:
+ self._save_context["packrat_cache_size"] = None
+ self._save_context["packrat_parse"] = ParserElement._parse
+ self._save_context[
+ "recursion_enabled"
+ ] = ParserElement._left_recursion_enabled
+
+ self._save_context["__diag__"] = {
+ name: getattr(__diag__, name) for name in __diag__._all_names
+ }
+
+ self._save_context["__compat__"] = {
+ "collect_all_And_tokens": __compat__.collect_all_And_tokens
+ }
+
+ return self
+
+ def restore(self):
+ # reset pyparsing global state
+ if (
+ ParserElement.DEFAULT_WHITE_CHARS
+ != self._save_context["default_whitespace"]
+ ):
+ ParserElement.set_default_whitespace_chars(
+ self._save_context["default_whitespace"]
+ )
+
+ ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"]
+
+ Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"]
+ ParserElement.inlineLiteralsUsing(
+ self._save_context["literal_string_class"]
+ )
+
+ for name, value in self._save_context["__diag__"].items():
+ (__diag__.enable if value else __diag__.disable)(name)
+
+ ParserElement._packratEnabled = False
+ if self._save_context["packrat_enabled"]:
+ ParserElement.enable_packrat(self._save_context["packrat_cache_size"])
+ else:
+ ParserElement._parse = self._save_context["packrat_parse"]
+ ParserElement._left_recursion_enabled = self._save_context[
+ "recursion_enabled"
+ ]
+
+ __compat__.collect_all_And_tokens = self._save_context["__compat__"]
+
+ return self
+
+ def copy(self):
+ ret = type(self)()
+ ret._save_context.update(self._save_context)
+ return ret
+
+ def __enter__(self):
+ return self.save()
+
+ def __exit__(self, *args):
+ self.restore()
+
+ class TestParseResultsAsserts:
+ """
+ A mixin class to add parse results assertion methods to normal unittest.TestCase classes.
+ """
+
+ def assertParseResultsEquals(
+ self, result, expected_list=None, expected_dict=None, msg=None
+ ):
+ """
+ Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``,
+ and compare any defined results names with an optional ``expected_dict``.
+ """
+ if expected_list is not None:
+ self.assertEqual(expected_list, result.as_list(), msg=msg)
+ if expected_dict is not None:
+ self.assertEqual(expected_dict, result.as_dict(), msg=msg)
+
+ def assertParseAndCheckList(
+ self, expr, test_string, expected_list, msg=None, verbose=True
+ ):
+ """
+ Convenience wrapper assert to test a parser element and input string, and assert that
+ the resulting ``ParseResults.asList()`` is equal to the ``expected_list``.
+ """
+ result = expr.parse_string(test_string, parse_all=True)
+ if verbose:
+ print(result.dump())
+ else:
+ print(result.as_list())
+ self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg)
+
+ def assertParseAndCheckDict(
+ self, expr, test_string, expected_dict, msg=None, verbose=True
+ ):
+ """
+ Convenience wrapper assert to test a parser element and input string, and assert that
+ the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``.
+ """
+ result = expr.parse_string(test_string, parseAll=True)
+ if verbose:
+ print(result.dump())
+ else:
+ print(result.as_list())
+ self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg)
+
+ def assertRunTestResults(
+ self, run_tests_report, expected_parse_results=None, msg=None
+ ):
+ """
+ Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of
+ list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped
+ with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``.
+ Finally, asserts that the overall ``runTests()`` success value is ``True``.
+
+ :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests
+ :param expected_parse_results (optional): [tuple(str, list, dict, Exception)]
+ """
+ run_test_success, run_test_results = run_tests_report
+
+ if expected_parse_results is not None:
+ merged = [
+ (*rpt, expected)
+ for rpt, expected in zip(run_test_results, expected_parse_results)
+ ]
+ for test_string, result, expected in merged:
+ # expected should be a tuple containing a list and/or a dict or an exception,
+ # and optional failure message string
+ # an empty tuple will skip any result validation
+ fail_msg = next(
+ (exp for exp in expected if isinstance(exp, str)), None
+ )
+ expected_exception = next(
+ (
+ exp
+ for exp in expected
+ if isinstance(exp, type) and issubclass(exp, Exception)
+ ),
+ None,
+ )
+ if expected_exception is not None:
+ with self.assertRaises(
+ expected_exception=expected_exception, msg=fail_msg or msg
+ ):
+ if isinstance(result, Exception):
+ raise result
+ else:
+ expected_list = next(
+ (exp for exp in expected if isinstance(exp, list)), None
+ )
+ expected_dict = next(
+ (exp for exp in expected if isinstance(exp, dict)), None
+ )
+ if (expected_list, expected_dict) != (None, None):
+ self.assertParseResultsEquals(
+ result,
+ expected_list=expected_list,
+ expected_dict=expected_dict,
+ msg=fail_msg or msg,
+ )
+ else:
+ # warning here maybe?
+ print("no validation for {!r}".format(test_string))
+
+ # do this last, in case some specific test results can be reported instead
+ self.assertTrue(
+ run_test_success, msg=msg if msg is not None else "failed runTests"
+ )
+
+ @contextmanager
+ def assertRaisesParseException(self, exc_type=ParseException, msg=None):
+ with self.assertRaises(exc_type, msg=msg):
+ yield
+
+ @staticmethod
+ def with_line_numbers(
+ s: str,
+ start_line: typing.Optional[int] = None,
+ end_line: typing.Optional[int] = None,
+ expand_tabs: bool = True,
+ eol_mark: str = "|",
+ mark_spaces: typing.Optional[str] = None,
+ mark_control: typing.Optional[str] = None,
+ ) -> str:
+ """
+ Helpful method for debugging a parser - prints a string with line and column numbers.
+ (Line and column numbers are 1-based.)
+
+ :param s: tuple(bool, str - string to be printed with line and column numbers
+ :param start_line: int - (optional) starting line number in s to print (default=1)
+ :param end_line: int - (optional) ending line number in s to print (default=len(s))
+ :param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default
+ :param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|")
+ :param mark_spaces: str - (optional) special character to display in place of spaces
+ :param mark_control: str - (optional) convert non-printing control characters to a placeholding
+ character; valid values:
+ - "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊"
+ - any single character string - replace control characters with given string
+ - None (default) - string is displayed as-is
+
+ :return: str - input string with leading line numbers and column number headers
+ """
+ if expand_tabs:
+ s = s.expandtabs()
+ if mark_control is not None:
+ if mark_control == "unicode":
+ tbl = str.maketrans(
+ {c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))}
+ | {127: 0x2421}
+ )
+ eol_mark = ""
+ else:
+ tbl = str.maketrans(
+ {c: mark_control for c in list(range(0, 32)) + [127]}
+ )
+ s = s.translate(tbl)
+ if mark_spaces is not None and mark_spaces != " ":
+ if mark_spaces == "unicode":
+ tbl = str.maketrans({9: 0x2409, 32: 0x2423})
+ s = s.translate(tbl)
+ else:
+ s = s.replace(" ", mark_spaces)
+ if start_line is None:
+ start_line = 1
+ if end_line is None:
+ end_line = len(s)
+ end_line = min(end_line, len(s))
+ start_line = min(max(1, start_line), end_line)
+
+ if mark_control != "unicode":
+ s_lines = s.splitlines()[start_line - 1 : end_line]
+ else:
+ s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]]
+ if not s_lines:
+ return ""
+
+ lineno_width = len(str(end_line))
+ max_line_len = max(len(line) for line in s_lines)
+ lead = " " * (lineno_width + 1)
+ if max_line_len >= 99:
+ header0 = (
+ lead
+ + "".join(
+ "{}{}".format(" " * 99, (i + 1) % 100)
+ for i in range(max(max_line_len // 100, 1))
+ )
+ + "\n"
+ )
+ else:
+ header0 = ""
+ header1 = (
+ header0
+ + lead
+ + "".join(
+ " {}".format((i + 1) % 10)
+ for i in range(-(-max_line_len // 10))
+ )
+ + "\n"
+ )
+ header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n"
+ return (
+ header1
+ + header2
+ + "\n".join(
+ "{:{}d}:{}{}".format(i, lineno_width, line, eol_mark)
+ for i, line in enumerate(s_lines, start=start_line)
+ )
+ + "\n"
+ )
diff --git a/third_party/python/pip/pip/_vendor/pyparsing/unicode.py b/third_party/python/pip/pip/_vendor/pyparsing/unicode.py
new file mode 100644
index 0000000000..0652620391
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pyparsing/unicode.py
@@ -0,0 +1,352 @@
+# unicode.py
+
+import sys
+from itertools import filterfalse
+from typing import List, Tuple, Union
+
+
+class _lazyclassproperty:
+ def __init__(self, fn):
+ self.fn = fn
+ self.__doc__ = fn.__doc__
+ self.__name__ = fn.__name__
+
+ def __get__(self, obj, cls):
+ if cls is None:
+ cls = type(obj)
+ if not hasattr(cls, "_intern") or any(
+ cls._intern is getattr(superclass, "_intern", [])
+ for superclass in cls.__mro__[1:]
+ ):
+ cls._intern = {}
+ attrname = self.fn.__name__
+ if attrname not in cls._intern:
+ cls._intern[attrname] = self.fn(cls)
+ return cls._intern[attrname]
+
+
+UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]]
+
+
+class unicode_set:
+ """
+ A set of Unicode characters, for language-specific strings for
+ ``alphas``, ``nums``, ``alphanums``, and ``printables``.
+ A unicode_set is defined by a list of ranges in the Unicode character
+ set, in a class attribute ``_ranges``. Ranges can be specified using
+ 2-tuples or a 1-tuple, such as::
+
+ _ranges = [
+ (0x0020, 0x007e),
+ (0x00a0, 0x00ff),
+ (0x0100,),
+ ]
+
+ Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x).
+
+ A unicode set can also be defined using multiple inheritance of other unicode sets::
+
+ class CJK(Chinese, Japanese, Korean):
+ pass
+ """
+
+ _ranges: UnicodeRangeList = []
+
+ @_lazyclassproperty
+ def _chars_for_ranges(cls):
+ ret = []
+ for cc in cls.__mro__:
+ if cc is unicode_set:
+ break
+ for rr in getattr(cc, "_ranges", ()):
+ ret.extend(range(rr[0], rr[-1] + 1))
+ return [chr(c) for c in sorted(set(ret))]
+
+ @_lazyclassproperty
+ def printables(cls):
+ "all non-whitespace characters in this range"
+ return "".join(filterfalse(str.isspace, cls._chars_for_ranges))
+
+ @_lazyclassproperty
+ def alphas(cls):
+ "all alphabetic characters in this range"
+ return "".join(filter(str.isalpha, cls._chars_for_ranges))
+
+ @_lazyclassproperty
+ def nums(cls):
+ "all numeric digit characters in this range"
+ return "".join(filter(str.isdigit, cls._chars_for_ranges))
+
+ @_lazyclassproperty
+ def alphanums(cls):
+ "all alphanumeric characters in this range"
+ return cls.alphas + cls.nums
+
+ @_lazyclassproperty
+ def identchars(cls):
+ "all characters in this range that are valid identifier characters, plus underscore '_'"
+ return "".join(
+ sorted(
+ set(
+ "".join(filter(str.isidentifier, cls._chars_for_ranges))
+ + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº"
+ + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ"
+ + "_"
+ )
+ )
+ )
+
+ @_lazyclassproperty
+ def identbodychars(cls):
+ """
+ all characters in this range that are valid identifier body characters,
+ plus the digits 0-9
+ """
+ return "".join(
+ sorted(
+ set(
+ cls.identchars
+ + "0123456789"
+ + "".join(
+ [c for c in cls._chars_for_ranges if ("_" + c).isidentifier()]
+ )
+ )
+ )
+ )
+
+
+class pyparsing_unicode(unicode_set):
+ """
+ A namespace class for defining common language unicode_sets.
+ """
+
+ # fmt: off
+
+ # define ranges in language character sets
+ _ranges: UnicodeRangeList = [
+ (0x0020, sys.maxunicode),
+ ]
+
+ class BasicMultilingualPlane(unicode_set):
+ "Unicode set for the Basic Multilingual Plane"
+ _ranges: UnicodeRangeList = [
+ (0x0020, 0xFFFF),
+ ]
+
+ class Latin1(unicode_set):
+ "Unicode set for Latin-1 Unicode Character Range"
+ _ranges: UnicodeRangeList = [
+ (0x0020, 0x007E),
+ (0x00A0, 0x00FF),
+ ]
+
+ class LatinA(unicode_set):
+ "Unicode set for Latin-A Unicode Character Range"
+ _ranges: UnicodeRangeList = [
+ (0x0100, 0x017F),
+ ]
+
+ class LatinB(unicode_set):
+ "Unicode set for Latin-B Unicode Character Range"
+ _ranges: UnicodeRangeList = [
+ (0x0180, 0x024F),
+ ]
+
+ class Greek(unicode_set):
+ "Unicode set for Greek Unicode Character Ranges"
+ _ranges: UnicodeRangeList = [
+ (0x0342, 0x0345),
+ (0x0370, 0x0377),
+ (0x037A, 0x037F),
+ (0x0384, 0x038A),
+ (0x038C,),
+ (0x038E, 0x03A1),
+ (0x03A3, 0x03E1),
+ (0x03F0, 0x03FF),
+ (0x1D26, 0x1D2A),
+ (0x1D5E,),
+ (0x1D60,),
+ (0x1D66, 0x1D6A),
+ (0x1F00, 0x1F15),
+ (0x1F18, 0x1F1D),
+ (0x1F20, 0x1F45),
+ (0x1F48, 0x1F4D),
+ (0x1F50, 0x1F57),
+ (0x1F59,),
+ (0x1F5B,),
+ (0x1F5D,),
+ (0x1F5F, 0x1F7D),
+ (0x1F80, 0x1FB4),
+ (0x1FB6, 0x1FC4),
+ (0x1FC6, 0x1FD3),
+ (0x1FD6, 0x1FDB),
+ (0x1FDD, 0x1FEF),
+ (0x1FF2, 0x1FF4),
+ (0x1FF6, 0x1FFE),
+ (0x2129,),
+ (0x2719, 0x271A),
+ (0xAB65,),
+ (0x10140, 0x1018D),
+ (0x101A0,),
+ (0x1D200, 0x1D245),
+ (0x1F7A1, 0x1F7A7),
+ ]
+
+ class Cyrillic(unicode_set):
+ "Unicode set for Cyrillic Unicode Character Range"
+ _ranges: UnicodeRangeList = [
+ (0x0400, 0x052F),
+ (0x1C80, 0x1C88),
+ (0x1D2B,),
+ (0x1D78,),
+ (0x2DE0, 0x2DFF),
+ (0xA640, 0xA672),
+ (0xA674, 0xA69F),
+ (0xFE2E, 0xFE2F),
+ ]
+
+ class Chinese(unicode_set):
+ "Unicode set for Chinese Unicode Character Range"
+ _ranges: UnicodeRangeList = [
+ (0x2E80, 0x2E99),
+ (0x2E9B, 0x2EF3),
+ (0x31C0, 0x31E3),
+ (0x3400, 0x4DB5),
+ (0x4E00, 0x9FEF),
+ (0xA700, 0xA707),
+ (0xF900, 0xFA6D),
+ (0xFA70, 0xFAD9),
+ (0x16FE2, 0x16FE3),
+ (0x1F210, 0x1F212),
+ (0x1F214, 0x1F23B),
+ (0x1F240, 0x1F248),
+ (0x20000, 0x2A6D6),
+ (0x2A700, 0x2B734),
+ (0x2B740, 0x2B81D),
+ (0x2B820, 0x2CEA1),
+ (0x2CEB0, 0x2EBE0),
+ (0x2F800, 0x2FA1D),
+ ]
+
+ class Japanese(unicode_set):
+ "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"
+ _ranges: UnicodeRangeList = []
+
+ class Kanji(unicode_set):
+ "Unicode set for Kanji Unicode Character Range"
+ _ranges: UnicodeRangeList = [
+ (0x4E00, 0x9FBF),
+ (0x3000, 0x303F),
+ ]
+
+ class Hiragana(unicode_set):
+ "Unicode set for Hiragana Unicode Character Range"
+ _ranges: UnicodeRangeList = [
+ (0x3041, 0x3096),
+ (0x3099, 0x30A0),
+ (0x30FC,),
+ (0xFF70,),
+ (0x1B001,),
+ (0x1B150, 0x1B152),
+ (0x1F200,),
+ ]
+
+ class Katakana(unicode_set):
+ "Unicode set for Katakana Unicode Character Range"
+ _ranges: UnicodeRangeList = [
+ (0x3099, 0x309C),
+ (0x30A0, 0x30FF),
+ (0x31F0, 0x31FF),
+ (0x32D0, 0x32FE),
+ (0xFF65, 0xFF9F),
+ (0x1B000,),
+ (0x1B164, 0x1B167),
+ (0x1F201, 0x1F202),
+ (0x1F213,),
+ ]
+
+ class Hangul(unicode_set):
+ "Unicode set for Hangul (Korean) Unicode Character Range"
+ _ranges: UnicodeRangeList = [
+ (0x1100, 0x11FF),
+ (0x302E, 0x302F),
+ (0x3131, 0x318E),
+ (0x3200, 0x321C),
+ (0x3260, 0x327B),
+ (0x327E,),
+ (0xA960, 0xA97C),
+ (0xAC00, 0xD7A3),
+ (0xD7B0, 0xD7C6),
+ (0xD7CB, 0xD7FB),
+ (0xFFA0, 0xFFBE),
+ (0xFFC2, 0xFFC7),
+ (0xFFCA, 0xFFCF),
+ (0xFFD2, 0xFFD7),
+ (0xFFDA, 0xFFDC),
+ ]
+
+ Korean = Hangul
+
+ class CJK(Chinese, Japanese, Hangul):
+ "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"
+
+ class Thai(unicode_set):
+ "Unicode set for Thai Unicode Character Range"
+ _ranges: UnicodeRangeList = [
+ (0x0E01, 0x0E3A),
+ (0x0E3F, 0x0E5B)
+ ]
+
+ class Arabic(unicode_set):
+ "Unicode set for Arabic Unicode Character Range"
+ _ranges: UnicodeRangeList = [
+ (0x0600, 0x061B),
+ (0x061E, 0x06FF),
+ (0x0700, 0x077F),
+ ]
+
+ class Hebrew(unicode_set):
+ "Unicode set for Hebrew Unicode Character Range"
+ _ranges: UnicodeRangeList = [
+ (0x0591, 0x05C7),
+ (0x05D0, 0x05EA),
+ (0x05EF, 0x05F4),
+ (0xFB1D, 0xFB36),
+ (0xFB38, 0xFB3C),
+ (0xFB3E,),
+ (0xFB40, 0xFB41),
+ (0xFB43, 0xFB44),
+ (0xFB46, 0xFB4F),
+ ]
+
+ class Devanagari(unicode_set):
+ "Unicode set for Devanagari Unicode Character Range"
+ _ranges: UnicodeRangeList = [
+ (0x0900, 0x097F),
+ (0xA8E0, 0xA8FF)
+ ]
+
+ # fmt: on
+
+
+pyparsing_unicode.Japanese._ranges = (
+ pyparsing_unicode.Japanese.Kanji._ranges
+ + pyparsing_unicode.Japanese.Hiragana._ranges
+ + pyparsing_unicode.Japanese.Katakana._ranges
+)
+
+pyparsing_unicode.BMP = pyparsing_unicode.BasicMultilingualPlane
+
+# add language identifiers using language Unicode
+pyparsing_unicode.العربية = pyparsing_unicode.Arabic
+pyparsing_unicode.中文 = pyparsing_unicode.Chinese
+pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic
+pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek
+pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew
+pyparsing_unicode.日本語 = pyparsing_unicode.Japanese
+pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji
+pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana
+pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana
+pyparsing_unicode.한국어 = pyparsing_unicode.Korean
+pyparsing_unicode.ไทย = pyparsing_unicode.Thai
+pyparsing_unicode.देवनागरी = pyparsing_unicode.Devanagari
diff --git a/third_party/python/pip/pip/_vendor/pyparsing/util.py b/third_party/python/pip/pip/_vendor/pyparsing/util.py
new file mode 100644
index 0000000000..34ce092c6d
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pyparsing/util.py
@@ -0,0 +1,235 @@
+# util.py
+import warnings
+import types
+import collections
+import itertools
+from functools import lru_cache
+from typing import List, Union, Iterable
+
+_bslash = chr(92)
+
+
+class __config_flags:
+ """Internal class for defining compatibility and debugging flags"""
+
+ _all_names: List[str] = []
+ _fixed_names: List[str] = []
+ _type_desc = "configuration"
+
+ @classmethod
+ def _set(cls, dname, value):
+ if dname in cls._fixed_names:
+ warnings.warn(
+ "{}.{} {} is {} and cannot be overridden".format(
+ cls.__name__,
+ dname,
+ cls._type_desc,
+ str(getattr(cls, dname)).upper(),
+ )
+ )
+ return
+ if dname in cls._all_names:
+ setattr(cls, dname, value)
+ else:
+ raise ValueError("no such {} {!r}".format(cls._type_desc, dname))
+
+ enable = classmethod(lambda cls, name: cls._set(name, True))
+ disable = classmethod(lambda cls, name: cls._set(name, False))
+
+
+@lru_cache(maxsize=128)
+def col(loc: int, strg: str) -> int:
+ """
+ Returns current column within a string, counting newlines as line separators.
+ The first column is number 1.
+
+ Note: the default parsing behavior is to expand tabs in the input string
+ before starting the parsing process. See
+ :class:`ParserElement.parseString` for more
+ information on parsing strings containing ``<TAB>`` s, and suggested
+ methods to maintain a consistent view of the parsed string, the parse
+ location, and line and column positions within the parsed string.
+ """
+ s = strg
+ return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc)
+
+
+@lru_cache(maxsize=128)
+def lineno(loc: int, strg: str) -> int:
+ """Returns current line number within a string, counting newlines as line separators.
+ The first line is number 1.
+
+ Note - the default parsing behavior is to expand tabs in the input string
+ before starting the parsing process. See :class:`ParserElement.parseString`
+ for more information on parsing strings containing ``<TAB>`` s, and
+ suggested methods to maintain a consistent view of the parsed string, the
+ parse location, and line and column positions within the parsed string.
+ """
+ return strg.count("\n", 0, loc) + 1
+
+
+@lru_cache(maxsize=128)
+def line(loc: int, strg: str) -> str:
+ """
+ Returns the line of text containing loc within a string, counting newlines as line separators.
+ """
+ last_cr = strg.rfind("\n", 0, loc)
+ next_cr = strg.find("\n", loc)
+ return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :]
+
+
+class _UnboundedCache:
+ def __init__(self):
+ cache = {}
+ cache_get = cache.get
+ self.not_in_cache = not_in_cache = object()
+
+ def get(_, key):
+ return cache_get(key, not_in_cache)
+
+ def set_(_, key, value):
+ cache[key] = value
+
+ def clear(_):
+ cache.clear()
+
+ self.size = None
+ self.get = types.MethodType(get, self)
+ self.set = types.MethodType(set_, self)
+ self.clear = types.MethodType(clear, self)
+
+
+class _FifoCache:
+ def __init__(self, size):
+ self.not_in_cache = not_in_cache = object()
+ cache = collections.OrderedDict()
+ cache_get = cache.get
+
+ def get(_, key):
+ return cache_get(key, not_in_cache)
+
+ def set_(_, key, value):
+ cache[key] = value
+ while len(cache) > size:
+ cache.popitem(last=False)
+
+ def clear(_):
+ cache.clear()
+
+ self.size = size
+ self.get = types.MethodType(get, self)
+ self.set = types.MethodType(set_, self)
+ self.clear = types.MethodType(clear, self)
+
+
+class LRUMemo:
+ """
+ A memoizing mapping that retains `capacity` deleted items
+
+ The memo tracks retained items by their access order; once `capacity` items
+ are retained, the least recently used item is discarded.
+ """
+
+ def __init__(self, capacity):
+ self._capacity = capacity
+ self._active = {}
+ self._memory = collections.OrderedDict()
+
+ def __getitem__(self, key):
+ try:
+ return self._active[key]
+ except KeyError:
+ self._memory.move_to_end(key)
+ return self._memory[key]
+
+ def __setitem__(self, key, value):
+ self._memory.pop(key, None)
+ self._active[key] = value
+
+ def __delitem__(self, key):
+ try:
+ value = self._active.pop(key)
+ except KeyError:
+ pass
+ else:
+ while len(self._memory) >= self._capacity:
+ self._memory.popitem(last=False)
+ self._memory[key] = value
+
+ def clear(self):
+ self._active.clear()
+ self._memory.clear()
+
+
+class UnboundedMemo(dict):
+ """
+ A memoizing mapping that retains all deleted items
+ """
+
+ def __delitem__(self, key):
+ pass
+
+
+def _escape_regex_range_chars(s: str) -> str:
+ # escape these chars: ^-[]
+ for c in r"\^-[]":
+ s = s.replace(c, _bslash + c)
+ s = s.replace("\n", r"\n")
+ s = s.replace("\t", r"\t")
+ return str(s)
+
+
+def _collapse_string_to_ranges(
+ s: Union[str, Iterable[str]], re_escape: bool = True
+) -> str:
+ def is_consecutive(c):
+ c_int = ord(c)
+ is_consecutive.prev, prev = c_int, is_consecutive.prev
+ if c_int - prev > 1:
+ is_consecutive.value = next(is_consecutive.counter)
+ return is_consecutive.value
+
+ is_consecutive.prev = 0
+ is_consecutive.counter = itertools.count()
+ is_consecutive.value = -1
+
+ def escape_re_range_char(c):
+ return "\\" + c if c in r"\^-][" else c
+
+ def no_escape_re_range_char(c):
+ return c
+
+ if not re_escape:
+ escape_re_range_char = no_escape_re_range_char
+
+ ret = []
+ s = "".join(sorted(set(s)))
+ if len(s) > 3:
+ for _, chars in itertools.groupby(s, key=is_consecutive):
+ first = last = next(chars)
+ last = collections.deque(
+ itertools.chain(iter([last]), chars), maxlen=1
+ ).pop()
+ if first == last:
+ ret.append(escape_re_range_char(first))
+ else:
+ sep = "" if ord(last) == ord(first) + 1 else "-"
+ ret.append(
+ "{}{}{}".format(
+ escape_re_range_char(first), sep, escape_re_range_char(last)
+ )
+ )
+ else:
+ ret = [escape_re_range_char(c) for c in s]
+
+ return "".join(ret)
+
+
+def _flatten(ll: list) -> list:
+ ret = []
+ for i in ll:
+ if isinstance(i, list):
+ ret.extend(_flatten(i))
+ else:
+ ret.append(i)
+ return ret
diff --git a/third_party/python/pip/pip/_vendor/pyproject_hooks/__init__.py b/third_party/python/pip/pip/_vendor/pyproject_hooks/__init__.py
new file mode 100644
index 0000000000..ddfcf7f72f
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pyproject_hooks/__init__.py
@@ -0,0 +1,23 @@
+"""Wrappers to call pyproject.toml-based build backend hooks.
+"""
+
+from ._impl import (
+ BackendInvalid,
+ BackendUnavailable,
+ BuildBackendHookCaller,
+ HookMissing,
+ UnsupportedOperation,
+ default_subprocess_runner,
+ quiet_subprocess_runner,
+)
+
+__version__ = '1.0.0'
+__all__ = [
+ 'BackendUnavailable',
+ 'BackendInvalid',
+ 'HookMissing',
+ 'UnsupportedOperation',
+ 'default_subprocess_runner',
+ 'quiet_subprocess_runner',
+ 'BuildBackendHookCaller',
+]
diff --git a/third_party/python/pip/pip/_vendor/pyproject_hooks/_compat.py b/third_party/python/pip/pip/_vendor/pyproject_hooks/_compat.py
new file mode 100644
index 0000000000..95e509c014
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pyproject_hooks/_compat.py
@@ -0,0 +1,8 @@
+__all__ = ("tomllib",)
+
+import sys
+
+if sys.version_info >= (3, 11):
+ import tomllib
+else:
+ from pip._vendor import tomli as tomllib
diff --git a/third_party/python/pip/pip/_vendor/pyproject_hooks/_impl.py b/third_party/python/pip/pip/_vendor/pyproject_hooks/_impl.py
new file mode 100644
index 0000000000..37b0e6531f
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pyproject_hooks/_impl.py
@@ -0,0 +1,330 @@
+import json
+import os
+import sys
+import tempfile
+from contextlib import contextmanager
+from os.path import abspath
+from os.path import join as pjoin
+from subprocess import STDOUT, check_call, check_output
+
+from ._in_process import _in_proc_script_path
+
+
+def write_json(obj, path, **kwargs):
+ with open(path, 'w', encoding='utf-8') as f:
+ json.dump(obj, f, **kwargs)
+
+
+def read_json(path):
+ with open(path, encoding='utf-8') as f:
+ return json.load(f)
+
+
+class BackendUnavailable(Exception):
+ """Will be raised if the backend cannot be imported in the hook process."""
+ def __init__(self, traceback):
+ self.traceback = traceback
+
+
+class BackendInvalid(Exception):
+ """Will be raised if the backend is invalid."""
+ def __init__(self, backend_name, backend_path, message):
+ super().__init__(message)
+ self.backend_name = backend_name
+ self.backend_path = backend_path
+
+
+class HookMissing(Exception):
+ """Will be raised on missing hooks (if a fallback can't be used)."""
+ def __init__(self, hook_name):
+ super().__init__(hook_name)
+ self.hook_name = hook_name
+
+
+class UnsupportedOperation(Exception):
+ """May be raised by build_sdist if the backend indicates that it can't."""
+ def __init__(self, traceback):
+ self.traceback = traceback
+
+
+def default_subprocess_runner(cmd, cwd=None, extra_environ=None):
+ """The default method of calling the wrapper subprocess.
+
+ This uses :func:`subprocess.check_call` under the hood.
+ """
+ env = os.environ.copy()
+ if extra_environ:
+ env.update(extra_environ)
+
+ check_call(cmd, cwd=cwd, env=env)
+
+
+def quiet_subprocess_runner(cmd, cwd=None, extra_environ=None):
+ """Call the subprocess while suppressing output.
+
+ This uses :func:`subprocess.check_output` under the hood.
+ """
+ env = os.environ.copy()
+ if extra_environ:
+ env.update(extra_environ)
+
+ check_output(cmd, cwd=cwd, env=env, stderr=STDOUT)
+
+
+def norm_and_check(source_tree, requested):
+ """Normalise and check a backend path.
+
+ Ensure that the requested backend path is specified as a relative path,
+ and resolves to a location under the given source tree.
+
+ Return an absolute version of the requested path.
+ """
+ if os.path.isabs(requested):
+ raise ValueError("paths must be relative")
+
+ abs_source = os.path.abspath(source_tree)
+ abs_requested = os.path.normpath(os.path.join(abs_source, requested))
+ # We have to use commonprefix for Python 2.7 compatibility. So we
+ # normalise case to avoid problems because commonprefix is a character
+ # based comparison :-(
+ norm_source = os.path.normcase(abs_source)
+ norm_requested = os.path.normcase(abs_requested)
+ if os.path.commonprefix([norm_source, norm_requested]) != norm_source:
+ raise ValueError("paths must be inside source tree")
+
+ return abs_requested
+
+
+class BuildBackendHookCaller:
+ """A wrapper to call the build backend hooks for a source directory.
+ """
+
+ def __init__(
+ self,
+ source_dir,
+ build_backend,
+ backend_path=None,
+ runner=None,
+ python_executable=None,
+ ):
+ """
+ :param source_dir: The source directory to invoke the build backend for
+ :param build_backend: The build backend spec
+ :param backend_path: Additional path entries for the build backend spec
+ :param runner: The :ref:`subprocess runner <Subprocess Runners>` to use
+ :param python_executable:
+ The Python executable used to invoke the build backend
+ """
+ if runner is None:
+ runner = default_subprocess_runner
+
+ self.source_dir = abspath(source_dir)
+ self.build_backend = build_backend
+ if backend_path:
+ backend_path = [
+ norm_and_check(self.source_dir, p) for p in backend_path
+ ]
+ self.backend_path = backend_path
+ self._subprocess_runner = runner
+ if not python_executable:
+ python_executable = sys.executable
+ self.python_executable = python_executable
+
+ @contextmanager
+ def subprocess_runner(self, runner):
+ """A context manager for temporarily overriding the default
+ :ref:`subprocess runner <Subprocess Runners>`.
+
+ .. code-block:: python
+
+ hook_caller = BuildBackendHookCaller(...)
+ with hook_caller.subprocess_runner(quiet_subprocess_runner):
+ ...
+ """
+ prev = self._subprocess_runner
+ self._subprocess_runner = runner
+ try:
+ yield
+ finally:
+ self._subprocess_runner = prev
+
+ def _supported_features(self):
+ """Return the list of optional features supported by the backend."""
+ return self._call_hook('_supported_features', {})
+
+ def get_requires_for_build_wheel(self, config_settings=None):
+ """Get additional dependencies required for building a wheel.
+
+ :returns: A list of :pep:`dependency specifiers <508>`.
+ :rtype: list[str]
+
+ .. admonition:: Fallback
+
+ If the build backend does not defined a hook with this name, an
+ empty list will be returned.
+ """
+ return self._call_hook('get_requires_for_build_wheel', {
+ 'config_settings': config_settings
+ })
+
+ def prepare_metadata_for_build_wheel(
+ self, metadata_directory, config_settings=None,
+ _allow_fallback=True):
+ """Prepare a ``*.dist-info`` folder with metadata for this project.
+
+ :returns: Name of the newly created subfolder within
+ ``metadata_directory``, containing the metadata.
+ :rtype: str
+
+ .. admonition:: Fallback
+
+ If the build backend does not define a hook with this name and
+ ``_allow_fallback`` is truthy, the backend will be asked to build a
+ wheel via the ``build_wheel`` hook and the dist-info extracted from
+ that will be returned.
+ """
+ return self._call_hook('prepare_metadata_for_build_wheel', {
+ 'metadata_directory': abspath(metadata_directory),
+ 'config_settings': config_settings,
+ '_allow_fallback': _allow_fallback,
+ })
+
+ def build_wheel(
+ self, wheel_directory, config_settings=None,
+ metadata_directory=None):
+ """Build a wheel from this project.
+
+ :returns:
+ The name of the newly created wheel within ``wheel_directory``.
+
+ .. admonition:: Interaction with fallback
+
+ If the ``build_wheel`` hook was called in the fallback for
+ :meth:`prepare_metadata_for_build_wheel`, the build backend would
+ not be invoked. Instead, the previously built wheel will be copied
+ to ``wheel_directory`` and the name of that file will be returned.
+ """
+ if metadata_directory is not None:
+ metadata_directory = abspath(metadata_directory)
+ return self._call_hook('build_wheel', {
+ 'wheel_directory': abspath(wheel_directory),
+ 'config_settings': config_settings,
+ 'metadata_directory': metadata_directory,
+ })
+
+ def get_requires_for_build_editable(self, config_settings=None):
+ """Get additional dependencies required for building an editable wheel.
+
+ :returns: A list of :pep:`dependency specifiers <508>`.
+ :rtype: list[str]
+
+ .. admonition:: Fallback
+
+ If the build backend does not defined a hook with this name, an
+ empty list will be returned.
+ """
+ return self._call_hook('get_requires_for_build_editable', {
+ 'config_settings': config_settings
+ })
+
+ def prepare_metadata_for_build_editable(
+ self, metadata_directory, config_settings=None,
+ _allow_fallback=True):
+ """Prepare a ``*.dist-info`` folder with metadata for this project.
+
+ :returns: Name of the newly created subfolder within
+ ``metadata_directory``, containing the metadata.
+ :rtype: str
+
+ .. admonition:: Fallback
+
+ If the build backend does not define a hook with this name and
+ ``_allow_fallback`` is truthy, the backend will be asked to build a
+ wheel via the ``build_editable`` hook and the dist-info
+ extracted from that will be returned.
+ """
+ return self._call_hook('prepare_metadata_for_build_editable', {
+ 'metadata_directory': abspath(metadata_directory),
+ 'config_settings': config_settings,
+ '_allow_fallback': _allow_fallback,
+ })
+
+ def build_editable(
+ self, wheel_directory, config_settings=None,
+ metadata_directory=None):
+ """Build an editable wheel from this project.
+
+ :returns:
+ The name of the newly created wheel within ``wheel_directory``.
+
+ .. admonition:: Interaction with fallback
+
+ If the ``build_editable`` hook was called in the fallback for
+ :meth:`prepare_metadata_for_build_editable`, the build backend
+ would not be invoked. Instead, the previously built wheel will be
+ copied to ``wheel_directory`` and the name of that file will be
+ returned.
+ """
+ if metadata_directory is not None:
+ metadata_directory = abspath(metadata_directory)
+ return self._call_hook('build_editable', {
+ 'wheel_directory': abspath(wheel_directory),
+ 'config_settings': config_settings,
+ 'metadata_directory': metadata_directory,
+ })
+
+ def get_requires_for_build_sdist(self, config_settings=None):
+ """Get additional dependencies required for building an sdist.
+
+ :returns: A list of :pep:`dependency specifiers <508>`.
+ :rtype: list[str]
+ """
+ return self._call_hook('get_requires_for_build_sdist', {
+ 'config_settings': config_settings
+ })
+
+ def build_sdist(self, sdist_directory, config_settings=None):
+ """Build an sdist from this project.
+
+ :returns:
+ The name of the newly created sdist within ``wheel_directory``.
+ """
+ return self._call_hook('build_sdist', {
+ 'sdist_directory': abspath(sdist_directory),
+ 'config_settings': config_settings,
+ })
+
+ def _call_hook(self, hook_name, kwargs):
+ extra_environ = {'PEP517_BUILD_BACKEND': self.build_backend}
+
+ if self.backend_path:
+ backend_path = os.pathsep.join(self.backend_path)
+ extra_environ['PEP517_BACKEND_PATH'] = backend_path
+
+ with tempfile.TemporaryDirectory() as td:
+ hook_input = {'kwargs': kwargs}
+ write_json(hook_input, pjoin(td, 'input.json'), indent=2)
+
+ # Run the hook in a subprocess
+ with _in_proc_script_path() as script:
+ python = self.python_executable
+ self._subprocess_runner(
+ [python, abspath(str(script)), hook_name, td],
+ cwd=self.source_dir,
+ extra_environ=extra_environ
+ )
+
+ data = read_json(pjoin(td, 'output.json'))
+ if data.get('unsupported'):
+ raise UnsupportedOperation(data.get('traceback', ''))
+ if data.get('no_backend'):
+ raise BackendUnavailable(data.get('traceback', ''))
+ if data.get('backend_invalid'):
+ raise BackendInvalid(
+ backend_name=self.build_backend,
+ backend_path=self.backend_path,
+ message=data.get('backend_error', '')
+ )
+ if data.get('hook_missing'):
+ raise HookMissing(data.get('missing_hook_name') or hook_name)
+ return data['return_val']
diff --git a/third_party/python/pip/pip/_vendor/pyproject_hooks/_in_process/__init__.py b/third_party/python/pip/pip/_vendor/pyproject_hooks/_in_process/__init__.py
new file mode 100644
index 0000000000..917fa065b3
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pyproject_hooks/_in_process/__init__.py
@@ -0,0 +1,18 @@
+"""This is a subpackage because the directory is on sys.path for _in_process.py
+
+The subpackage should stay as empty as possible to avoid shadowing modules that
+the backend might import.
+"""
+
+import importlib.resources as resources
+
+try:
+ resources.files
+except AttributeError:
+ # Python 3.8 compatibility
+ def _in_proc_script_path():
+ return resources.path(__package__, '_in_process.py')
+else:
+ def _in_proc_script_path():
+ return resources.as_file(
+ resources.files(__package__).joinpath('_in_process.py'))
diff --git a/third_party/python/pip/pip/_vendor/pyproject_hooks/_in_process/_in_process.py b/third_party/python/pip/pip/_vendor/pyproject_hooks/_in_process/_in_process.py
new file mode 100644
index 0000000000..ee511ff20d
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/pyproject_hooks/_in_process/_in_process.py
@@ -0,0 +1,353 @@
+"""This is invoked in a subprocess to call the build backend hooks.
+
+It expects:
+- Command line args: hook_name, control_dir
+- Environment variables:
+ PEP517_BUILD_BACKEND=entry.point:spec
+ PEP517_BACKEND_PATH=paths (separated with os.pathsep)
+- control_dir/input.json:
+ - {"kwargs": {...}}
+
+Results:
+- control_dir/output.json
+ - {"return_val": ...}
+"""
+import json
+import os
+import os.path
+import re
+import shutil
+import sys
+import traceback
+from glob import glob
+from importlib import import_module
+from os.path import join as pjoin
+
+# This file is run as a script, and `import wrappers` is not zip-safe, so we
+# include write_json() and read_json() from wrappers.py.
+
+
+def write_json(obj, path, **kwargs):
+ with open(path, 'w', encoding='utf-8') as f:
+ json.dump(obj, f, **kwargs)
+
+
+def read_json(path):
+ with open(path, encoding='utf-8') as f:
+ return json.load(f)
+
+
+class BackendUnavailable(Exception):
+ """Raised if we cannot import the backend"""
+ def __init__(self, traceback):
+ self.traceback = traceback
+
+
+class BackendInvalid(Exception):
+ """Raised if the backend is invalid"""
+ def __init__(self, message):
+ self.message = message
+
+
+class HookMissing(Exception):
+ """Raised if a hook is missing and we are not executing the fallback"""
+ def __init__(self, hook_name=None):
+ super().__init__(hook_name)
+ self.hook_name = hook_name
+
+
+def contained_in(filename, directory):
+ """Test if a file is located within the given directory."""
+ filename = os.path.normcase(os.path.abspath(filename))
+ directory = os.path.normcase(os.path.abspath(directory))
+ return os.path.commonprefix([filename, directory]) == directory
+
+
+def _build_backend():
+ """Find and load the build backend"""
+ # Add in-tree backend directories to the front of sys.path.
+ backend_path = os.environ.get('PEP517_BACKEND_PATH')
+ if backend_path:
+ extra_pathitems = backend_path.split(os.pathsep)
+ sys.path[:0] = extra_pathitems
+
+ ep = os.environ['PEP517_BUILD_BACKEND']
+ mod_path, _, obj_path = ep.partition(':')
+ try:
+ obj = import_module(mod_path)
+ except ImportError:
+ raise BackendUnavailable(traceback.format_exc())
+
+ if backend_path:
+ if not any(
+ contained_in(obj.__file__, path)
+ for path in extra_pathitems
+ ):
+ raise BackendInvalid("Backend was not loaded from backend-path")
+
+ if obj_path:
+ for path_part in obj_path.split('.'):
+ obj = getattr(obj, path_part)
+ return obj
+
+
+def _supported_features():
+ """Return the list of options features supported by the backend.
+
+ Returns a list of strings.
+ The only possible value is 'build_editable'.
+ """
+ backend = _build_backend()
+ features = []
+ if hasattr(backend, "build_editable"):
+ features.append("build_editable")
+ return features
+
+
+def get_requires_for_build_wheel(config_settings):
+ """Invoke the optional get_requires_for_build_wheel hook
+
+ Returns [] if the hook is not defined.
+ """
+ backend = _build_backend()
+ try:
+ hook = backend.get_requires_for_build_wheel
+ except AttributeError:
+ return []
+ else:
+ return hook(config_settings)
+
+
+def get_requires_for_build_editable(config_settings):
+ """Invoke the optional get_requires_for_build_editable hook
+
+ Returns [] if the hook is not defined.
+ """
+ backend = _build_backend()
+ try:
+ hook = backend.get_requires_for_build_editable
+ except AttributeError:
+ return []
+ else:
+ return hook(config_settings)
+
+
+def prepare_metadata_for_build_wheel(
+ metadata_directory, config_settings, _allow_fallback):
+ """Invoke optional prepare_metadata_for_build_wheel
+
+ Implements a fallback by building a wheel if the hook isn't defined,
+ unless _allow_fallback is False in which case HookMissing is raised.
+ """
+ backend = _build_backend()
+ try:
+ hook = backend.prepare_metadata_for_build_wheel
+ except AttributeError:
+ if not _allow_fallback:
+ raise HookMissing()
+ else:
+ return hook(metadata_directory, config_settings)
+ # fallback to build_wheel outside the try block to avoid exception chaining
+ # which can be confusing to users and is not relevant
+ whl_basename = backend.build_wheel(metadata_directory, config_settings)
+ return _get_wheel_metadata_from_wheel(whl_basename, metadata_directory,
+ config_settings)
+
+
+def prepare_metadata_for_build_editable(
+ metadata_directory, config_settings, _allow_fallback):
+ """Invoke optional prepare_metadata_for_build_editable
+
+ Implements a fallback by building an editable wheel if the hook isn't
+ defined, unless _allow_fallback is False in which case HookMissing is
+ raised.
+ """
+ backend = _build_backend()
+ try:
+ hook = backend.prepare_metadata_for_build_editable
+ except AttributeError:
+ if not _allow_fallback:
+ raise HookMissing()
+ try:
+ build_hook = backend.build_editable
+ except AttributeError:
+ raise HookMissing(hook_name='build_editable')
+ else:
+ whl_basename = build_hook(metadata_directory, config_settings)
+ return _get_wheel_metadata_from_wheel(whl_basename,
+ metadata_directory,
+ config_settings)
+ else:
+ return hook(metadata_directory, config_settings)
+
+
+WHEEL_BUILT_MARKER = 'PEP517_ALREADY_BUILT_WHEEL'
+
+
+def _dist_info_files(whl_zip):
+ """Identify the .dist-info folder inside a wheel ZipFile."""
+ res = []
+ for path in whl_zip.namelist():
+ m = re.match(r'[^/\\]+-[^/\\]+\.dist-info/', path)
+ if m:
+ res.append(path)
+ if res:
+ return res
+ raise Exception("No .dist-info folder found in wheel")
+
+
+def _get_wheel_metadata_from_wheel(
+ whl_basename, metadata_directory, config_settings):
+ """Extract the metadata from a wheel.
+
+ Fallback for when the build backend does not
+ define the 'get_wheel_metadata' hook.
+ """
+ from zipfile import ZipFile
+ with open(os.path.join(metadata_directory, WHEEL_BUILT_MARKER), 'wb'):
+ pass # Touch marker file
+
+ whl_file = os.path.join(metadata_directory, whl_basename)
+ with ZipFile(whl_file) as zipf:
+ dist_info = _dist_info_files(zipf)
+ zipf.extractall(path=metadata_directory, members=dist_info)
+ return dist_info[0].split('/')[0]
+
+
+def _find_already_built_wheel(metadata_directory):
+ """Check for a wheel already built during the get_wheel_metadata hook.
+ """
+ if not metadata_directory:
+ return None
+ metadata_parent = os.path.dirname(metadata_directory)
+ if not os.path.isfile(pjoin(metadata_parent, WHEEL_BUILT_MARKER)):
+ return None
+
+ whl_files = glob(os.path.join(metadata_parent, '*.whl'))
+ if not whl_files:
+ print('Found wheel built marker, but no .whl files')
+ return None
+ if len(whl_files) > 1:
+ print('Found multiple .whl files; unspecified behaviour. '
+ 'Will call build_wheel.')
+ return None
+
+ # Exactly one .whl file
+ return whl_files[0]
+
+
+def build_wheel(wheel_directory, config_settings, metadata_directory=None):
+ """Invoke the mandatory build_wheel hook.
+
+ If a wheel was already built in the
+ prepare_metadata_for_build_wheel fallback, this
+ will copy it rather than rebuilding the wheel.
+ """
+ prebuilt_whl = _find_already_built_wheel(metadata_directory)
+ if prebuilt_whl:
+ shutil.copy2(prebuilt_whl, wheel_directory)
+ return os.path.basename(prebuilt_whl)
+
+ return _build_backend().build_wheel(wheel_directory, config_settings,
+ metadata_directory)
+
+
+def build_editable(wheel_directory, config_settings, metadata_directory=None):
+ """Invoke the optional build_editable hook.
+
+ If a wheel was already built in the
+ prepare_metadata_for_build_editable fallback, this
+ will copy it rather than rebuilding the wheel.
+ """
+ backend = _build_backend()
+ try:
+ hook = backend.build_editable
+ except AttributeError:
+ raise HookMissing()
+ else:
+ prebuilt_whl = _find_already_built_wheel(metadata_directory)
+ if prebuilt_whl:
+ shutil.copy2(prebuilt_whl, wheel_directory)
+ return os.path.basename(prebuilt_whl)
+
+ return hook(wheel_directory, config_settings, metadata_directory)
+
+
+def get_requires_for_build_sdist(config_settings):
+ """Invoke the optional get_requires_for_build_wheel hook
+
+ Returns [] if the hook is not defined.
+ """
+ backend = _build_backend()
+ try:
+ hook = backend.get_requires_for_build_sdist
+ except AttributeError:
+ return []
+ else:
+ return hook(config_settings)
+
+
+class _DummyException(Exception):
+ """Nothing should ever raise this exception"""
+
+
+class GotUnsupportedOperation(Exception):
+ """For internal use when backend raises UnsupportedOperation"""
+ def __init__(self, traceback):
+ self.traceback = traceback
+
+
+def build_sdist(sdist_directory, config_settings):
+ """Invoke the mandatory build_sdist hook."""
+ backend = _build_backend()
+ try:
+ return backend.build_sdist(sdist_directory, config_settings)
+ except getattr(backend, 'UnsupportedOperation', _DummyException):
+ raise GotUnsupportedOperation(traceback.format_exc())
+
+
+HOOK_NAMES = {
+ 'get_requires_for_build_wheel',
+ 'prepare_metadata_for_build_wheel',
+ 'build_wheel',
+ 'get_requires_for_build_editable',
+ 'prepare_metadata_for_build_editable',
+ 'build_editable',
+ 'get_requires_for_build_sdist',
+ 'build_sdist',
+ '_supported_features',
+}
+
+
+def main():
+ if len(sys.argv) < 3:
+ sys.exit("Needs args: hook_name, control_dir")
+ hook_name = sys.argv[1]
+ control_dir = sys.argv[2]
+ if hook_name not in HOOK_NAMES:
+ sys.exit("Unknown hook: %s" % hook_name)
+ hook = globals()[hook_name]
+
+ hook_input = read_json(pjoin(control_dir, 'input.json'))
+
+ json_out = {'unsupported': False, 'return_val': None}
+ try:
+ json_out['return_val'] = hook(**hook_input['kwargs'])
+ except BackendUnavailable as e:
+ json_out['no_backend'] = True
+ json_out['traceback'] = e.traceback
+ except BackendInvalid as e:
+ json_out['backend_invalid'] = True
+ json_out['backend_error'] = e.message
+ except GotUnsupportedOperation as e:
+ json_out['unsupported'] = True
+ json_out['traceback'] = e.traceback
+ except HookMissing as e:
+ json_out['hook_missing'] = True
+ json_out['missing_hook_name'] = e.hook_name or hook_name
+
+ write_json(json_out, pjoin(control_dir, 'output.json'), indent=2)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/third_party/python/pip/pip/_vendor/requests/__init__.py b/third_party/python/pip/pip/_vendor/requests/__init__.py
new file mode 100644
index 0000000000..a477624803
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/requests/__init__.py
@@ -0,0 +1,182 @@
+# __
+# /__) _ _ _ _ _/ _
+# / ( (- (/ (/ (- _) / _)
+# /
+
+"""
+Requests HTTP Library
+~~~~~~~~~~~~~~~~~~~~~
+
+Requests is an HTTP library, written in Python, for human beings.
+Basic GET usage:
+
+ >>> import requests
+ >>> r = requests.get('https://www.python.org')
+ >>> r.status_code
+ 200
+ >>> b'Python is a programming language' in r.content
+ True
+
+... or POST:
+
+ >>> payload = dict(key1='value1', key2='value2')
+ >>> r = requests.post('https://httpbin.org/post', data=payload)
+ >>> print(r.text)
+ {
+ ...
+ "form": {
+ "key1": "value1",
+ "key2": "value2"
+ },
+ ...
+ }
+
+The other HTTP methods are supported - see `requests.api`. Full documentation
+is at <https://requests.readthedocs.io>.
+
+:copyright: (c) 2017 by Kenneth Reitz.
+:license: Apache 2.0, see LICENSE for more details.
+"""
+
+import warnings
+
+from pip._vendor import urllib3
+
+from .exceptions import RequestsDependencyWarning
+
+charset_normalizer_version = None
+
+try:
+ from pip._vendor.chardet import __version__ as chardet_version
+except ImportError:
+ chardet_version = None
+
+
+def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version):
+ urllib3_version = urllib3_version.split(".")
+ assert urllib3_version != ["dev"] # Verify urllib3 isn't installed from git.
+
+ # Sometimes, urllib3 only reports its version as 16.1.
+ if len(urllib3_version) == 2:
+ urllib3_version.append("0")
+
+ # Check urllib3 for compatibility.
+ major, minor, patch = urllib3_version # noqa: F811
+ major, minor, patch = int(major), int(minor), int(patch)
+ # urllib3 >= 1.21.1, <= 1.26
+ assert major == 1
+ assert minor >= 21
+ assert minor <= 26
+
+ # Check charset_normalizer for compatibility.
+ if chardet_version:
+ major, minor, patch = chardet_version.split(".")[:3]
+ major, minor, patch = int(major), int(minor), int(patch)
+ # chardet_version >= 3.0.2, < 6.0.0
+ assert (3, 0, 2) <= (major, minor, patch) < (6, 0, 0)
+ elif charset_normalizer_version:
+ major, minor, patch = charset_normalizer_version.split(".")[:3]
+ major, minor, patch = int(major), int(minor), int(patch)
+ # charset_normalizer >= 2.0.0 < 4.0.0
+ assert (2, 0, 0) <= (major, minor, patch) < (4, 0, 0)
+ else:
+ raise Exception("You need either charset_normalizer or chardet installed")
+
+
+def _check_cryptography(cryptography_version):
+ # cryptography < 1.3.4
+ try:
+ cryptography_version = list(map(int, cryptography_version.split(".")))
+ except ValueError:
+ return
+
+ if cryptography_version < [1, 3, 4]:
+ warning = "Old version of cryptography ({}) may cause slowdown.".format(
+ cryptography_version
+ )
+ warnings.warn(warning, RequestsDependencyWarning)
+
+
+# Check imported dependencies for compatibility.
+try:
+ check_compatibility(
+ urllib3.__version__, chardet_version, charset_normalizer_version
+ )
+except (AssertionError, ValueError):
+ warnings.warn(
+ "urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported "
+ "version!".format(
+ urllib3.__version__, chardet_version, charset_normalizer_version
+ ),
+ RequestsDependencyWarning,
+ )
+
+# Attempt to enable urllib3's fallback for SNI support
+# if the standard library doesn't support SNI or the
+# 'ssl' library isn't available.
+try:
+ # Note: This logic prevents upgrading cryptography on Windows, if imported
+ # as part of pip.
+ from pip._internal.utils.compat import WINDOWS
+ if not WINDOWS:
+ raise ImportError("pip internals: don't import cryptography on Windows")
+ try:
+ import ssl
+ except ImportError:
+ ssl = None
+
+ if not getattr(ssl, "HAS_SNI", False):
+ from pip._vendor.urllib3.contrib import pyopenssl
+
+ pyopenssl.inject_into_urllib3()
+
+ # Check cryptography version
+ from cryptography import __version__ as cryptography_version
+
+ _check_cryptography(cryptography_version)
+except ImportError:
+ pass
+
+# urllib3's DependencyWarnings should be silenced.
+from pip._vendor.urllib3.exceptions import DependencyWarning
+
+warnings.simplefilter("ignore", DependencyWarning)
+
+# Set default logging handler to avoid "No handler found" warnings.
+import logging
+from logging import NullHandler
+
+from . import packages, utils
+from .__version__ import (
+ __author__,
+ __author_email__,
+ __build__,
+ __cake__,
+ __copyright__,
+ __description__,
+ __license__,
+ __title__,
+ __url__,
+ __version__,
+)
+from .api import delete, get, head, options, patch, post, put, request
+from .exceptions import (
+ ConnectionError,
+ ConnectTimeout,
+ FileModeWarning,
+ HTTPError,
+ JSONDecodeError,
+ ReadTimeout,
+ RequestException,
+ Timeout,
+ TooManyRedirects,
+ URLRequired,
+)
+from .models import PreparedRequest, Request, Response
+from .sessions import Session, session
+from .status_codes import codes
+
+logging.getLogger(__name__).addHandler(NullHandler())
+
+# FileModeWarnings go off per the default.
+warnings.simplefilter("default", FileModeWarning, append=True)
diff --git a/third_party/python/pip/pip/_vendor/requests/__version__.py b/third_party/python/pip/pip/_vendor/requests/__version__.py
new file mode 100644
index 0000000000..69be3dec74
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/requests/__version__.py
@@ -0,0 +1,14 @@
+# .-. .-. .-. . . .-. .-. .-. .-.
+# |( |- |.| | | |- `-. | `-.
+# ' ' `-' `-`.`-' `-' `-' ' `-'
+
+__title__ = "requests"
+__description__ = "Python HTTP for Humans."
+__url__ = "https://requests.readthedocs.io"
+__version__ = "2.28.2"
+__build__ = 0x022802
+__author__ = "Kenneth Reitz"
+__author_email__ = "me@kennethreitz.org"
+__license__ = "Apache 2.0"
+__copyright__ = "Copyright Kenneth Reitz"
+__cake__ = "\u2728 \U0001f370 \u2728"
diff --git a/third_party/python/pip/pip/_vendor/requests/_internal_utils.py b/third_party/python/pip/pip/_vendor/requests/_internal_utils.py
new file mode 100644
index 0000000000..7dc9bc5336
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/requests/_internal_utils.py
@@ -0,0 +1,48 @@
+"""
+requests._internal_utils
+~~~~~~~~~~~~~~
+
+Provides utility functions that are consumed internally by Requests
+which depend on extremely few external helpers (such as compat)
+"""
+import re
+
+from .compat import builtin_str
+
+_VALID_HEADER_NAME_RE_BYTE = re.compile(rb"^[^:\s][^:\r\n]*$")
+_VALID_HEADER_NAME_RE_STR = re.compile(r"^[^:\s][^:\r\n]*$")
+_VALID_HEADER_VALUE_RE_BYTE = re.compile(rb"^\S[^\r\n]*$|^$")
+_VALID_HEADER_VALUE_RE_STR = re.compile(r"^\S[^\r\n]*$|^$")
+
+HEADER_VALIDATORS = {
+ bytes: (_VALID_HEADER_NAME_RE_BYTE, _VALID_HEADER_VALUE_RE_BYTE),
+ str: (_VALID_HEADER_NAME_RE_STR, _VALID_HEADER_VALUE_RE_STR),
+}
+
+
+def to_native_string(string, encoding="ascii"):
+ """Given a string object, regardless of type, returns a representation of
+ that string in the native string type, encoding and decoding where
+ necessary. This assumes ASCII unless told otherwise.
+ """
+ if isinstance(string, builtin_str):
+ out = string
+ else:
+ out = string.decode(encoding)
+
+ return out
+
+
+def unicode_is_ascii(u_string):
+ """Determine if unicode string only contains ASCII characters.
+
+ :param str u_string: unicode string to check. Must be unicode
+ and not Python 2 `str`.
+ :rtype: bool
+ """
+ assert isinstance(u_string, str)
+ try:
+ u_string.encode("ascii")
+ return True
+ except UnicodeEncodeError:
+ return False
diff --git a/third_party/python/pip/pip/_vendor/requests/adapters.py b/third_party/python/pip/pip/_vendor/requests/adapters.py
new file mode 100644
index 0000000000..f68f7d4675
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/requests/adapters.py
@@ -0,0 +1,584 @@
+"""
+requests.adapters
+~~~~~~~~~~~~~~~~~
+
+This module contains the transport adapters that Requests uses to define
+and maintain connections.
+"""
+
+import os.path
+import socket # noqa: F401
+
+from pip._vendor.urllib3.exceptions import ClosedPoolError, ConnectTimeoutError
+from pip._vendor.urllib3.exceptions import HTTPError as _HTTPError
+from pip._vendor.urllib3.exceptions import InvalidHeader as _InvalidHeader
+from pip._vendor.urllib3.exceptions import (
+ LocationValueError,
+ MaxRetryError,
+ NewConnectionError,
+ ProtocolError,
+)
+from pip._vendor.urllib3.exceptions import ProxyError as _ProxyError
+from pip._vendor.urllib3.exceptions import ReadTimeoutError, ResponseError
+from pip._vendor.urllib3.exceptions import SSLError as _SSLError
+from pip._vendor.urllib3.poolmanager import PoolManager, proxy_from_url
+from pip._vendor.urllib3.response import HTTPResponse
+from pip._vendor.urllib3.util import Timeout as TimeoutSauce
+from pip._vendor.urllib3.util import parse_url
+from pip._vendor.urllib3.util.retry import Retry
+
+from .auth import _basic_auth_str
+from .compat import basestring, urlparse
+from .cookies import extract_cookies_to_jar
+from .exceptions import (
+ ConnectionError,
+ ConnectTimeout,
+ InvalidHeader,
+ InvalidProxyURL,
+ InvalidSchema,
+ InvalidURL,
+ ProxyError,
+ ReadTimeout,
+ RetryError,
+ SSLError,
+)
+from .models import Response
+from .structures import CaseInsensitiveDict
+from .utils import (
+ DEFAULT_CA_BUNDLE_PATH,
+ extract_zipped_paths,
+ get_auth_from_url,
+ get_encoding_from_headers,
+ prepend_scheme_if_needed,
+ select_proxy,
+ urldefragauth,
+)
+
+try:
+ from pip._vendor.urllib3.contrib.socks import SOCKSProxyManager
+except ImportError:
+
+ def SOCKSProxyManager(*args, **kwargs):
+ raise InvalidSchema("Missing dependencies for SOCKS support.")
+
+
+DEFAULT_POOLBLOCK = False
+DEFAULT_POOLSIZE = 10
+DEFAULT_RETRIES = 0
+DEFAULT_POOL_TIMEOUT = None
+
+
+class BaseAdapter:
+ """The Base Transport Adapter"""
+
+ def __init__(self):
+ super().__init__()
+
+ def send(
+ self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None
+ ):
+ """Sends PreparedRequest object. Returns Response object.
+
+ :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
+ :param stream: (optional) Whether to stream the request content.
+ :param timeout: (optional) How long to wait for the server to send
+ data before giving up, as a float, or a :ref:`(connect timeout,
+ read timeout) <timeouts>` tuple.
+ :type timeout: float or tuple
+ :param verify: (optional) Either a boolean, in which case it controls whether we verify
+ the server's TLS certificate, or a string, in which case it must be a path
+ to a CA bundle to use
+ :param cert: (optional) Any user-provided SSL certificate to be trusted.
+ :param proxies: (optional) The proxies dictionary to apply to the request.
+ """
+ raise NotImplementedError
+
+ def close(self):
+ """Cleans up adapter specific items."""
+ raise NotImplementedError
+
+
+class HTTPAdapter(BaseAdapter):
+ """The built-in HTTP Adapter for urllib3.
+
+ Provides a general-case interface for Requests sessions to contact HTTP and
+ HTTPS urls by implementing the Transport Adapter interface. This class will
+ usually be created by the :class:`Session <Session>` class under the
+ covers.
+
+ :param pool_connections: The number of urllib3 connection pools to cache.
+ :param pool_maxsize: The maximum number of connections to save in the pool.
+ :param max_retries: The maximum number of retries each connection
+ should attempt. Note, this applies only to failed DNS lookups, socket
+ connections and connection timeouts, never to requests where data has
+ made it to the server. By default, Requests does not retry failed
+ connections. If you need granular control over the conditions under
+ which we retry a request, import urllib3's ``Retry`` class and pass
+ that instead.
+ :param pool_block: Whether the connection pool should block for connections.
+
+ Usage::
+
+ >>> import requests
+ >>> s = requests.Session()
+ >>> a = requests.adapters.HTTPAdapter(max_retries=3)
+ >>> s.mount('http://', a)
+ """
+
+ __attrs__ = [
+ "max_retries",
+ "config",
+ "_pool_connections",
+ "_pool_maxsize",
+ "_pool_block",
+ ]
+
+ def __init__(
+ self,
+ pool_connections=DEFAULT_POOLSIZE,
+ pool_maxsize=DEFAULT_POOLSIZE,
+ max_retries=DEFAULT_RETRIES,
+ pool_block=DEFAULT_POOLBLOCK,
+ ):
+ if max_retries == DEFAULT_RETRIES:
+ self.max_retries = Retry(0, read=False)
+ else:
+ self.max_retries = Retry.from_int(max_retries)
+ self.config = {}
+ self.proxy_manager = {}
+
+ super().__init__()
+
+ self._pool_connections = pool_connections
+ self._pool_maxsize = pool_maxsize
+ self._pool_block = pool_block
+
+ self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
+
+ def __getstate__(self):
+ return {attr: getattr(self, attr, None) for attr in self.__attrs__}
+
+ def __setstate__(self, state):
+ # Can't handle by adding 'proxy_manager' to self.__attrs__ because
+ # self.poolmanager uses a lambda function, which isn't pickleable.
+ self.proxy_manager = {}
+ self.config = {}
+
+ for attr, value in state.items():
+ setattr(self, attr, value)
+
+ self.init_poolmanager(
+ self._pool_connections, self._pool_maxsize, block=self._pool_block
+ )
+
+ def init_poolmanager(
+ self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs
+ ):
+ """Initializes a urllib3 PoolManager.
+
+ This method should not be called from user code, and is only
+ exposed for use when subclassing the
+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+ :param connections: The number of urllib3 connection pools to cache.
+ :param maxsize: The maximum number of connections to save in the pool.
+ :param block: Block when no free connections are available.
+ :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
+ """
+ # save these values for pickling
+ self._pool_connections = connections
+ self._pool_maxsize = maxsize
+ self._pool_block = block
+
+ self.poolmanager = PoolManager(
+ num_pools=connections,
+ maxsize=maxsize,
+ block=block,
+ strict=True,
+ **pool_kwargs,
+ )
+
+ def proxy_manager_for(self, proxy, **proxy_kwargs):
+ """Return urllib3 ProxyManager for the given proxy.
+
+ This method should not be called from user code, and is only
+ exposed for use when subclassing the
+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+ :param proxy: The proxy to return a urllib3 ProxyManager for.
+ :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
+ :returns: ProxyManager
+ :rtype: urllib3.ProxyManager
+ """
+ if proxy in self.proxy_manager:
+ manager = self.proxy_manager[proxy]
+ elif proxy.lower().startswith("socks"):
+ username, password = get_auth_from_url(proxy)
+ manager = self.proxy_manager[proxy] = SOCKSProxyManager(
+ proxy,
+ username=username,
+ password=password,
+ num_pools=self._pool_connections,
+ maxsize=self._pool_maxsize,
+ block=self._pool_block,
+ **proxy_kwargs,
+ )
+ else:
+ proxy_headers = self.proxy_headers(proxy)
+ manager = self.proxy_manager[proxy] = proxy_from_url(
+ proxy,
+ proxy_headers=proxy_headers,
+ num_pools=self._pool_connections,
+ maxsize=self._pool_maxsize,
+ block=self._pool_block,
+ **proxy_kwargs,
+ )
+
+ return manager
+
+ def cert_verify(self, conn, url, verify, cert):
+ """Verify a SSL certificate. This method should not be called from user
+ code, and is only exposed for use when subclassing the
+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+ :param conn: The urllib3 connection object associated with the cert.
+ :param url: The requested URL.
+ :param verify: Either a boolean, in which case it controls whether we verify
+ the server's TLS certificate, or a string, in which case it must be a path
+ to a CA bundle to use
+ :param cert: The SSL certificate to verify.
+ """
+ if url.lower().startswith("https") and verify:
+
+ cert_loc = None
+
+ # Allow self-specified cert location.
+ if verify is not True:
+ cert_loc = verify
+
+ if not cert_loc:
+ cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH)
+
+ if not cert_loc or not os.path.exists(cert_loc):
+ raise OSError(
+ f"Could not find a suitable TLS CA certificate bundle, "
+ f"invalid path: {cert_loc}"
+ )
+
+ conn.cert_reqs = "CERT_REQUIRED"
+
+ if not os.path.isdir(cert_loc):
+ conn.ca_certs = cert_loc
+ else:
+ conn.ca_cert_dir = cert_loc
+ else:
+ conn.cert_reqs = "CERT_NONE"
+ conn.ca_certs = None
+ conn.ca_cert_dir = None
+
+ if cert:
+ if not isinstance(cert, basestring):
+ conn.cert_file = cert[0]
+ conn.key_file = cert[1]
+ else:
+ conn.cert_file = cert
+ conn.key_file = None
+ if conn.cert_file and not os.path.exists(conn.cert_file):
+ raise OSError(
+ f"Could not find the TLS certificate file, "
+ f"invalid path: {conn.cert_file}"
+ )
+ if conn.key_file and not os.path.exists(conn.key_file):
+ raise OSError(
+ f"Could not find the TLS key file, invalid path: {conn.key_file}"
+ )
+
+ def build_response(self, req, resp):
+ """Builds a :class:`Response <requests.Response>` object from a urllib3
+ response. This should not be called from user code, and is only exposed
+ for use when subclassing the
+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
+
+ :param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
+ :param resp: The urllib3 response object.
+ :rtype: requests.Response
+ """
+ response = Response()
+
+ # Fallback to None if there's no status_code, for whatever reason.
+ response.status_code = getattr(resp, "status", None)
+
+ # Make headers case-insensitive.
+ response.headers = CaseInsensitiveDict(getattr(resp, "headers", {}))
+
+ # Set encoding.
+ response.encoding = get_encoding_from_headers(response.headers)
+ response.raw = resp
+ response.reason = response.raw.reason
+
+ if isinstance(req.url, bytes):
+ response.url = req.url.decode("utf-8")
+ else:
+ response.url = req.url
+
+ # Add new cookies from the server.
+ extract_cookies_to_jar(response.cookies, req, resp)
+
+ # Give the Response some context.
+ response.request = req
+ response.connection = self
+
+ return response
+
+ def get_connection(self, url, proxies=None):
+ """Returns a urllib3 connection for the given URL. This should not be
+ called from user code, and is only exposed for use when subclassing the
+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+ :param url: The URL to connect to.
+ :param proxies: (optional) A Requests-style dictionary of proxies used on this request.
+ :rtype: urllib3.ConnectionPool
+ """
+ proxy = select_proxy(url, proxies)
+
+ if proxy:
+ proxy = prepend_scheme_if_needed(proxy, "http")
+ proxy_url = parse_url(proxy)
+ if not proxy_url.host:
+ raise InvalidProxyURL(
+ "Please check proxy URL. It is malformed "
+ "and could be missing the host."
+ )
+ proxy_manager = self.proxy_manager_for(proxy)
+ conn = proxy_manager.connection_from_url(url)
+ else:
+ # Only scheme should be lower case
+ parsed = urlparse(url)
+ url = parsed.geturl()
+ conn = self.poolmanager.connection_from_url(url)
+
+ return conn
+
+ def close(self):
+ """Disposes of any internal state.
+
+ Currently, this closes the PoolManager and any active ProxyManager,
+ which closes any pooled connections.
+ """
+ self.poolmanager.clear()
+ for proxy in self.proxy_manager.values():
+ proxy.clear()
+
+ def request_url(self, request, proxies):
+ """Obtain the url to use when making the final request.
+
+ If the message is being sent through a HTTP proxy, the full URL has to
+ be used. Otherwise, we should only use the path portion of the URL.
+
+ This should not be called from user code, and is only exposed for use
+ when subclassing the
+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+ :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
+ :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
+ :rtype: str
+ """
+ proxy = select_proxy(request.url, proxies)
+ scheme = urlparse(request.url).scheme
+
+ is_proxied_http_request = proxy and scheme != "https"
+ using_socks_proxy = False
+ if proxy:
+ proxy_scheme = urlparse(proxy).scheme.lower()
+ using_socks_proxy = proxy_scheme.startswith("socks")
+
+ url = request.path_url
+ if is_proxied_http_request and not using_socks_proxy:
+ url = urldefragauth(request.url)
+
+ return url
+
+ def add_headers(self, request, **kwargs):
+ """Add any headers needed by the connection. As of v2.0 this does
+ nothing by default, but is left for overriding by users that subclass
+ the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+ This should not be called from user code, and is only exposed for use
+ when subclassing the
+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+ :param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
+ :param kwargs: The keyword arguments from the call to send().
+ """
+ pass
+
+ def proxy_headers(self, proxy):
+ """Returns a dictionary of the headers to add to any request sent
+ through a proxy. This works with urllib3 magic to ensure that they are
+ correctly sent to the proxy, rather than in a tunnelled request if
+ CONNECT is being used.
+
+ This should not be called from user code, and is only exposed for use
+ when subclassing the
+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+ :param proxy: The url of the proxy being used for this request.
+ :rtype: dict
+ """
+ headers = {}
+ username, password = get_auth_from_url(proxy)
+
+ if username:
+ headers["Proxy-Authorization"] = _basic_auth_str(username, password)
+
+ return headers
+
+ def send(
+ self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None
+ ):
+ """Sends PreparedRequest object. Returns Response object.
+
+ :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
+ :param stream: (optional) Whether to stream the request content.
+ :param timeout: (optional) How long to wait for the server to send
+ data before giving up, as a float, or a :ref:`(connect timeout,
+ read timeout) <timeouts>` tuple.
+ :type timeout: float or tuple or urllib3 Timeout object
+ :param verify: (optional) Either a boolean, in which case it controls whether
+ we verify the server's TLS certificate, or a string, in which case it
+ must be a path to a CA bundle to use
+ :param cert: (optional) Any user-provided SSL certificate to be trusted.
+ :param proxies: (optional) The proxies dictionary to apply to the request.
+ :rtype: requests.Response
+ """
+
+ try:
+ conn = self.get_connection(request.url, proxies)
+ except LocationValueError as e:
+ raise InvalidURL(e, request=request)
+
+ self.cert_verify(conn, request.url, verify, cert)
+ url = self.request_url(request, proxies)
+ self.add_headers(
+ request,
+ stream=stream,
+ timeout=timeout,
+ verify=verify,
+ cert=cert,
+ proxies=proxies,
+ )
+
+ chunked = not (request.body is None or "Content-Length" in request.headers)
+
+ if isinstance(timeout, tuple):
+ try:
+ connect, read = timeout
+ timeout = TimeoutSauce(connect=connect, read=read)
+ except ValueError:
+ raise ValueError(
+ f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, "
+ f"or a single float to set both timeouts to the same value."
+ )
+ elif isinstance(timeout, TimeoutSauce):
+ pass
+ else:
+ timeout = TimeoutSauce(connect=timeout, read=timeout)
+
+ try:
+ if not chunked:
+ resp = conn.urlopen(
+ method=request.method,
+ url=url,
+ body=request.body,
+ headers=request.headers,
+ redirect=False,
+ assert_same_host=False,
+ preload_content=False,
+ decode_content=False,
+ retries=self.max_retries,
+ timeout=timeout,
+ )
+
+ # Send the request.
+ else:
+ if hasattr(conn, "proxy_pool"):
+ conn = conn.proxy_pool
+
+ low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
+
+ try:
+ skip_host = "Host" in request.headers
+ low_conn.putrequest(
+ request.method,
+ url,
+ skip_accept_encoding=True,
+ skip_host=skip_host,
+ )
+
+ for header, value in request.headers.items():
+ low_conn.putheader(header, value)
+
+ low_conn.endheaders()
+
+ for i in request.body:
+ low_conn.send(hex(len(i))[2:].encode("utf-8"))
+ low_conn.send(b"\r\n")
+ low_conn.send(i)
+ low_conn.send(b"\r\n")
+ low_conn.send(b"0\r\n\r\n")
+
+ # Receive the response from the server
+ r = low_conn.getresponse()
+
+ resp = HTTPResponse.from_httplib(
+ r,
+ pool=conn,
+ connection=low_conn,
+ preload_content=False,
+ decode_content=False,
+ )
+ except Exception:
+ # If we hit any problems here, clean up the connection.
+ # Then, raise so that we can handle the actual exception.
+ low_conn.close()
+ raise
+
+ except (ProtocolError, OSError) as err:
+ raise ConnectionError(err, request=request)
+
+ except MaxRetryError as e:
+ if isinstance(e.reason, ConnectTimeoutError):
+ # TODO: Remove this in 3.0.0: see #2811
+ if not isinstance(e.reason, NewConnectionError):
+ raise ConnectTimeout(e, request=request)
+
+ if isinstance(e.reason, ResponseError):
+ raise RetryError(e, request=request)
+
+ if isinstance(e.reason, _ProxyError):
+ raise ProxyError(e, request=request)
+
+ if isinstance(e.reason, _SSLError):
+ # This branch is for urllib3 v1.22 and later.
+ raise SSLError(e, request=request)
+
+ raise ConnectionError(e, request=request)
+
+ except ClosedPoolError as e:
+ raise ConnectionError(e, request=request)
+
+ except _ProxyError as e:
+ raise ProxyError(e)
+
+ except (_SSLError, _HTTPError) as e:
+ if isinstance(e, _SSLError):
+ # This branch is for urllib3 versions earlier than v1.22
+ raise SSLError(e, request=request)
+ elif isinstance(e, ReadTimeoutError):
+ raise ReadTimeout(e, request=request)
+ elif isinstance(e, _InvalidHeader):
+ raise InvalidHeader(e, request=request)
+ else:
+ raise
+
+ return self.build_response(request, resp)
diff --git a/third_party/python/pip/pip/_vendor/requests/api.py b/third_party/python/pip/pip/_vendor/requests/api.py
new file mode 100644
index 0000000000..2f71aaed1a
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/requests/api.py
@@ -0,0 +1,157 @@
+"""
+requests.api
+~~~~~~~~~~~~
+
+This module implements the Requests API.
+
+:copyright: (c) 2012 by Kenneth Reitz.
+:license: Apache2, see LICENSE for more details.
+"""
+
+from . import sessions
+
+
+def request(method, url, **kwargs):
+ """Constructs and sends a :class:`Request <Request>`.
+
+ :param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``.
+ :param url: URL for the new :class:`Request` object.
+ :param params: (optional) Dictionary, list of tuples or bytes to send
+ in the query string for the :class:`Request`.
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+ object to send in the body of the :class:`Request`.
+ :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
+ :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
+ :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
+ :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
+ ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
+ or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
+ defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
+ to add for the file.
+ :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
+ :param timeout: (optional) How many seconds to wait for the server to send data
+ before giving up, as a float, or a :ref:`(connect timeout, read
+ timeout) <timeouts>` tuple.
+ :type timeout: float or tuple
+ :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
+ :type allow_redirects: bool
+ :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
+ :param verify: (optional) Either a boolean, in which case it controls whether we verify
+ the server's TLS certificate, or a string, in which case it must be a path
+ to a CA bundle to use. Defaults to ``True``.
+ :param stream: (optional) if ``False``, the response content will be immediately downloaded.
+ :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
+ :return: :class:`Response <Response>` object
+ :rtype: requests.Response
+
+ Usage::
+
+ >>> import requests
+ >>> req = requests.request('GET', 'https://httpbin.org/get')
+ >>> req
+ <Response [200]>
+ """
+
+ # By using the 'with' statement we are sure the session is closed, thus we
+ # avoid leaving sockets open which can trigger a ResourceWarning in some
+ # cases, and look like a memory leak in others.
+ with sessions.Session() as session:
+ return session.request(method=method, url=url, **kwargs)
+
+
+def get(url, params=None, **kwargs):
+ r"""Sends a GET request.
+
+ :param url: URL for the new :class:`Request` object.
+ :param params: (optional) Dictionary, list of tuples or bytes to send
+ in the query string for the :class:`Request`.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :return: :class:`Response <Response>` object
+ :rtype: requests.Response
+ """
+
+ return request("get", url, params=params, **kwargs)
+
+
+def options(url, **kwargs):
+ r"""Sends an OPTIONS request.
+
+ :param url: URL for the new :class:`Request` object.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :return: :class:`Response <Response>` object
+ :rtype: requests.Response
+ """
+
+ return request("options", url, **kwargs)
+
+
+def head(url, **kwargs):
+ r"""Sends a HEAD request.
+
+ :param url: URL for the new :class:`Request` object.
+ :param \*\*kwargs: Optional arguments that ``request`` takes. If
+ `allow_redirects` is not provided, it will be set to `False` (as
+ opposed to the default :meth:`request` behavior).
+ :return: :class:`Response <Response>` object
+ :rtype: requests.Response
+ """
+
+ kwargs.setdefault("allow_redirects", False)
+ return request("head", url, **kwargs)
+
+
+def post(url, data=None, json=None, **kwargs):
+ r"""Sends a POST request.
+
+ :param url: URL for the new :class:`Request` object.
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+ object to send in the body of the :class:`Request`.
+ :param json: (optional) json data to send in the body of the :class:`Request`.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :return: :class:`Response <Response>` object
+ :rtype: requests.Response
+ """
+
+ return request("post", url, data=data, json=json, **kwargs)
+
+
+def put(url, data=None, **kwargs):
+ r"""Sends a PUT request.
+
+ :param url: URL for the new :class:`Request` object.
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+ object to send in the body of the :class:`Request`.
+ :param json: (optional) json data to send in the body of the :class:`Request`.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :return: :class:`Response <Response>` object
+ :rtype: requests.Response
+ """
+
+ return request("put", url, data=data, **kwargs)
+
+
+def patch(url, data=None, **kwargs):
+ r"""Sends a PATCH request.
+
+ :param url: URL for the new :class:`Request` object.
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+ object to send in the body of the :class:`Request`.
+ :param json: (optional) json data to send in the body of the :class:`Request`.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :return: :class:`Response <Response>` object
+ :rtype: requests.Response
+ """
+
+ return request("patch", url, data=data, **kwargs)
+
+
+def delete(url, **kwargs):
+ r"""Sends a DELETE request.
+
+ :param url: URL for the new :class:`Request` object.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :return: :class:`Response <Response>` object
+ :rtype: requests.Response
+ """
+
+ return request("delete", url, **kwargs)
diff --git a/third_party/python/pip/pip/_vendor/requests/auth.py b/third_party/python/pip/pip/_vendor/requests/auth.py
new file mode 100644
index 0000000000..9733686ddb
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/requests/auth.py
@@ -0,0 +1,315 @@
+"""
+requests.auth
+~~~~~~~~~~~~~
+
+This module contains the authentication handlers for Requests.
+"""
+
+import hashlib
+import os
+import re
+import threading
+import time
+import warnings
+from base64 import b64encode
+
+from ._internal_utils import to_native_string
+from .compat import basestring, str, urlparse
+from .cookies import extract_cookies_to_jar
+from .utils import parse_dict_header
+
+CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded"
+CONTENT_TYPE_MULTI_PART = "multipart/form-data"
+
+
+def _basic_auth_str(username, password):
+ """Returns a Basic Auth string."""
+
+ # "I want us to put a big-ol' comment on top of it that
+ # says that this behaviour is dumb but we need to preserve
+ # it because people are relying on it."
+ # - Lukasa
+ #
+ # These are here solely to maintain backwards compatibility
+ # for things like ints. This will be removed in 3.0.0.
+ if not isinstance(username, basestring):
+ warnings.warn(
+ "Non-string usernames will no longer be supported in Requests "
+ "3.0.0. Please convert the object you've passed in ({!r}) to "
+ "a string or bytes object in the near future to avoid "
+ "problems.".format(username),
+ category=DeprecationWarning,
+ )
+ username = str(username)
+
+ if not isinstance(password, basestring):
+ warnings.warn(
+ "Non-string passwords will no longer be supported in Requests "
+ "3.0.0. Please convert the object you've passed in ({!r}) to "
+ "a string or bytes object in the near future to avoid "
+ "problems.".format(type(password)),
+ category=DeprecationWarning,
+ )
+ password = str(password)
+ # -- End Removal --
+
+ if isinstance(username, str):
+ username = username.encode("latin1")
+
+ if isinstance(password, str):
+ password = password.encode("latin1")
+
+ authstr = "Basic " + to_native_string(
+ b64encode(b":".join((username, password))).strip()
+ )
+
+ return authstr
+
+
+class AuthBase:
+ """Base class that all auth implementations derive from"""
+
+ def __call__(self, r):
+ raise NotImplementedError("Auth hooks must be callable.")
+
+
+class HTTPBasicAuth(AuthBase):
+ """Attaches HTTP Basic Authentication to the given Request object."""
+
+ def __init__(self, username, password):
+ self.username = username
+ self.password = password
+
+ def __eq__(self, other):
+ return all(
+ [
+ self.username == getattr(other, "username", None),
+ self.password == getattr(other, "password", None),
+ ]
+ )
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __call__(self, r):
+ r.headers["Authorization"] = _basic_auth_str(self.username, self.password)
+ return r
+
+
+class HTTPProxyAuth(HTTPBasicAuth):
+ """Attaches HTTP Proxy Authentication to a given Request object."""
+
+ def __call__(self, r):
+ r.headers["Proxy-Authorization"] = _basic_auth_str(self.username, self.password)
+ return r
+
+
+class HTTPDigestAuth(AuthBase):
+ """Attaches HTTP Digest Authentication to the given Request object."""
+
+ def __init__(self, username, password):
+ self.username = username
+ self.password = password
+ # Keep state in per-thread local storage
+ self._thread_local = threading.local()
+
+ def init_per_thread_state(self):
+ # Ensure state is initialized just once per-thread
+ if not hasattr(self._thread_local, "init"):
+ self._thread_local.init = True
+ self._thread_local.last_nonce = ""
+ self._thread_local.nonce_count = 0
+ self._thread_local.chal = {}
+ self._thread_local.pos = None
+ self._thread_local.num_401_calls = None
+
+ def build_digest_header(self, method, url):
+ """
+ :rtype: str
+ """
+
+ realm = self._thread_local.chal["realm"]
+ nonce = self._thread_local.chal["nonce"]
+ qop = self._thread_local.chal.get("qop")
+ algorithm = self._thread_local.chal.get("algorithm")
+ opaque = self._thread_local.chal.get("opaque")
+ hash_utf8 = None
+
+ if algorithm is None:
+ _algorithm = "MD5"
+ else:
+ _algorithm = algorithm.upper()
+ # lambdas assume digest modules are imported at the top level
+ if _algorithm == "MD5" or _algorithm == "MD5-SESS":
+
+ def md5_utf8(x):
+ if isinstance(x, str):
+ x = x.encode("utf-8")
+ return hashlib.md5(x).hexdigest()
+
+ hash_utf8 = md5_utf8
+ elif _algorithm == "SHA":
+
+ def sha_utf8(x):
+ if isinstance(x, str):
+ x = x.encode("utf-8")
+ return hashlib.sha1(x).hexdigest()
+
+ hash_utf8 = sha_utf8
+ elif _algorithm == "SHA-256":
+
+ def sha256_utf8(x):
+ if isinstance(x, str):
+ x = x.encode("utf-8")
+ return hashlib.sha256(x).hexdigest()
+
+ hash_utf8 = sha256_utf8
+ elif _algorithm == "SHA-512":
+
+ def sha512_utf8(x):
+ if isinstance(x, str):
+ x = x.encode("utf-8")
+ return hashlib.sha512(x).hexdigest()
+
+ hash_utf8 = sha512_utf8
+
+ KD = lambda s, d: hash_utf8(f"{s}:{d}") # noqa:E731
+
+ if hash_utf8 is None:
+ return None
+
+ # XXX not implemented yet
+ entdig = None
+ p_parsed = urlparse(url)
+ #: path is request-uri defined in RFC 2616 which should not be empty
+ path = p_parsed.path or "/"
+ if p_parsed.query:
+ path += f"?{p_parsed.query}"
+
+ A1 = f"{self.username}:{realm}:{self.password}"
+ A2 = f"{method}:{path}"
+
+ HA1 = hash_utf8(A1)
+ HA2 = hash_utf8(A2)
+
+ if nonce == self._thread_local.last_nonce:
+ self._thread_local.nonce_count += 1
+ else:
+ self._thread_local.nonce_count = 1
+ ncvalue = f"{self._thread_local.nonce_count:08x}"
+ s = str(self._thread_local.nonce_count).encode("utf-8")
+ s += nonce.encode("utf-8")
+ s += time.ctime().encode("utf-8")
+ s += os.urandom(8)
+
+ cnonce = hashlib.sha1(s).hexdigest()[:16]
+ if _algorithm == "MD5-SESS":
+ HA1 = hash_utf8(f"{HA1}:{nonce}:{cnonce}")
+
+ if not qop:
+ respdig = KD(HA1, f"{nonce}:{HA2}")
+ elif qop == "auth" or "auth" in qop.split(","):
+ noncebit = f"{nonce}:{ncvalue}:{cnonce}:auth:{HA2}"
+ respdig = KD(HA1, noncebit)
+ else:
+ # XXX handle auth-int.
+ return None
+
+ self._thread_local.last_nonce = nonce
+
+ # XXX should the partial digests be encoded too?
+ base = (
+ f'username="{self.username}", realm="{realm}", nonce="{nonce}", '
+ f'uri="{path}", response="{respdig}"'
+ )
+ if opaque:
+ base += f', opaque="{opaque}"'
+ if algorithm:
+ base += f', algorithm="{algorithm}"'
+ if entdig:
+ base += f', digest="{entdig}"'
+ if qop:
+ base += f', qop="auth", nc={ncvalue}, cnonce="{cnonce}"'
+
+ return f"Digest {base}"
+
+ def handle_redirect(self, r, **kwargs):
+ """Reset num_401_calls counter on redirects."""
+ if r.is_redirect:
+ self._thread_local.num_401_calls = 1
+
+ def handle_401(self, r, **kwargs):
+ """
+ Takes the given response and tries digest-auth, if needed.
+
+ :rtype: requests.Response
+ """
+
+ # If response is not 4xx, do not auth
+ # See https://github.com/psf/requests/issues/3772
+ if not 400 <= r.status_code < 500:
+ self._thread_local.num_401_calls = 1
+ return r
+
+ if self._thread_local.pos is not None:
+ # Rewind the file position indicator of the body to where
+ # it was to resend the request.
+ r.request.body.seek(self._thread_local.pos)
+ s_auth = r.headers.get("www-authenticate", "")
+
+ if "digest" in s_auth.lower() and self._thread_local.num_401_calls < 2:
+
+ self._thread_local.num_401_calls += 1
+ pat = re.compile(r"digest ", flags=re.IGNORECASE)
+ self._thread_local.chal = parse_dict_header(pat.sub("", s_auth, count=1))
+
+ # Consume content and release the original connection
+ # to allow our new request to reuse the same one.
+ r.content
+ r.close()
+ prep = r.request.copy()
+ extract_cookies_to_jar(prep._cookies, r.request, r.raw)
+ prep.prepare_cookies(prep._cookies)
+
+ prep.headers["Authorization"] = self.build_digest_header(
+ prep.method, prep.url
+ )
+ _r = r.connection.send(prep, **kwargs)
+ _r.history.append(r)
+ _r.request = prep
+
+ return _r
+
+ self._thread_local.num_401_calls = 1
+ return r
+
+ def __call__(self, r):
+ # Initialize per-thread state, if needed
+ self.init_per_thread_state()
+ # If we have a saved nonce, skip the 401
+ if self._thread_local.last_nonce:
+ r.headers["Authorization"] = self.build_digest_header(r.method, r.url)
+ try:
+ self._thread_local.pos = r.body.tell()
+ except AttributeError:
+ # In the case of HTTPDigestAuth being reused and the body of
+ # the previous request was a file-like object, pos has the
+ # file position of the previous body. Ensure it's set to
+ # None.
+ self._thread_local.pos = None
+ r.register_hook("response", self.handle_401)
+ r.register_hook("response", self.handle_redirect)
+ self._thread_local.num_401_calls = 1
+
+ return r
+
+ def __eq__(self, other):
+ return all(
+ [
+ self.username == getattr(other, "username", None),
+ self.password == getattr(other, "password", None),
+ ]
+ )
+
+ def __ne__(self, other):
+ return not self == other
diff --git a/third_party/python/pip/pip/_vendor/requests/certs.py b/third_party/python/pip/pip/_vendor/requests/certs.py
new file mode 100644
index 0000000000..38696a1fb3
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/requests/certs.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+
+"""
+requests.certs
+~~~~~~~~~~~~~~
+
+This module returns the preferred default CA certificate bundle. There is
+only one — the one from the certifi package.
+
+If you are packaging Requests, e.g., for a Linux distribution or a managed
+environment, you can change the definition of where() to return a separately
+packaged CA bundle.
+"""
+
+import os
+
+if "_PIP_STANDALONE_CERT" not in os.environ:
+ from pip._vendor.certifi import where
+else:
+ def where():
+ return os.environ["_PIP_STANDALONE_CERT"]
+
+if __name__ == "__main__":
+ print(where())
diff --git a/third_party/python/pip/pip/_vendor/requests/compat.py b/third_party/python/pip/pip/_vendor/requests/compat.py
new file mode 100644
index 0000000000..9ab2bb4865
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/requests/compat.py
@@ -0,0 +1,67 @@
+"""
+requests.compat
+~~~~~~~~~~~~~~~
+
+This module previously handled import compatibility issues
+between Python 2 and Python 3. It remains for backwards
+compatibility until the next major version.
+"""
+
+from pip._vendor import chardet
+
+import sys
+
+# -------
+# Pythons
+# -------
+
+# Syntax sugar.
+_ver = sys.version_info
+
+#: Python 2.x?
+is_py2 = _ver[0] == 2
+
+#: Python 3.x?
+is_py3 = _ver[0] == 3
+
+# Note: We've patched out simplejson support in pip because it prevents
+# upgrading simplejson on Windows.
+import json
+from json import JSONDecodeError
+
+# Keep OrderedDict for backwards compatibility.
+from collections import OrderedDict
+from collections.abc import Callable, Mapping, MutableMapping
+from http import cookiejar as cookielib
+from http.cookies import Morsel
+from io import StringIO
+
+# --------------
+# Legacy Imports
+# --------------
+from urllib.parse import (
+ quote,
+ quote_plus,
+ unquote,
+ unquote_plus,
+ urldefrag,
+ urlencode,
+ urljoin,
+ urlparse,
+ urlsplit,
+ urlunparse,
+)
+from urllib.request import (
+ getproxies,
+ getproxies_environment,
+ parse_http_list,
+ proxy_bypass,
+ proxy_bypass_environment,
+)
+
+builtin_str = str
+str = str
+bytes = bytes
+basestring = (str, bytes)
+numeric_types = (int, float)
+integer_types = (int,)
diff --git a/third_party/python/pip/pip/_vendor/requests/cookies.py b/third_party/python/pip/pip/_vendor/requests/cookies.py
new file mode 100644
index 0000000000..bf54ab237e
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/requests/cookies.py
@@ -0,0 +1,561 @@
+"""
+requests.cookies
+~~~~~~~~~~~~~~~~
+
+Compatibility code to be able to use `cookielib.CookieJar` with requests.
+
+requests.utils imports from here, so be careful with imports.
+"""
+
+import calendar
+import copy
+import time
+
+from ._internal_utils import to_native_string
+from .compat import Morsel, MutableMapping, cookielib, urlparse, urlunparse
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+
+class MockRequest:
+ """Wraps a `requests.Request` to mimic a `urllib2.Request`.
+
+ The code in `cookielib.CookieJar` expects this interface in order to correctly
+ manage cookie policies, i.e., determine whether a cookie can be set, given the
+ domains of the request and the cookie.
+
+ The original request object is read-only. The client is responsible for collecting
+ the new headers via `get_new_headers()` and interpreting them appropriately. You
+ probably want `get_cookie_header`, defined below.
+ """
+
+ def __init__(self, request):
+ self._r = request
+ self._new_headers = {}
+ self.type = urlparse(self._r.url).scheme
+
+ def get_type(self):
+ return self.type
+
+ def get_host(self):
+ return urlparse(self._r.url).netloc
+
+ def get_origin_req_host(self):
+ return self.get_host()
+
+ def get_full_url(self):
+ # Only return the response's URL if the user hadn't set the Host
+ # header
+ if not self._r.headers.get("Host"):
+ return self._r.url
+ # If they did set it, retrieve it and reconstruct the expected domain
+ host = to_native_string(self._r.headers["Host"], encoding="utf-8")
+ parsed = urlparse(self._r.url)
+ # Reconstruct the URL as we expect it
+ return urlunparse(
+ [
+ parsed.scheme,
+ host,
+ parsed.path,
+ parsed.params,
+ parsed.query,
+ parsed.fragment,
+ ]
+ )
+
+ def is_unverifiable(self):
+ return True
+
+ def has_header(self, name):
+ return name in self._r.headers or name in self._new_headers
+
+ def get_header(self, name, default=None):
+ return self._r.headers.get(name, self._new_headers.get(name, default))
+
+ def add_header(self, key, val):
+ """cookielib has no legitimate use for this method; add it back if you find one."""
+ raise NotImplementedError(
+ "Cookie headers should be added with add_unredirected_header()"
+ )
+
+ def add_unredirected_header(self, name, value):
+ self._new_headers[name] = value
+
+ def get_new_headers(self):
+ return self._new_headers
+
+ @property
+ def unverifiable(self):
+ return self.is_unverifiable()
+
+ @property
+ def origin_req_host(self):
+ return self.get_origin_req_host()
+
+ @property
+ def host(self):
+ return self.get_host()
+
+
+class MockResponse:
+ """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
+
+ ...what? Basically, expose the parsed HTTP headers from the server response
+ the way `cookielib` expects to see them.
+ """
+
+ def __init__(self, headers):
+ """Make a MockResponse for `cookielib` to read.
+
+ :param headers: a httplib.HTTPMessage or analogous carrying the headers
+ """
+ self._headers = headers
+
+ def info(self):
+ return self._headers
+
+ def getheaders(self, name):
+ self._headers.getheaders(name)
+
+
+def extract_cookies_to_jar(jar, request, response):
+ """Extract the cookies from the response into a CookieJar.
+
+ :param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
+ :param request: our own requests.Request object
+ :param response: urllib3.HTTPResponse object
+ """
+ if not (hasattr(response, "_original_response") and response._original_response):
+ return
+ # the _original_response field is the wrapped httplib.HTTPResponse object,
+ req = MockRequest(request)
+ # pull out the HTTPMessage with the headers and put it in the mock:
+ res = MockResponse(response._original_response.msg)
+ jar.extract_cookies(res, req)
+
+
+def get_cookie_header(jar, request):
+ """
+ Produce an appropriate Cookie header string to be sent with `request`, or None.
+
+ :rtype: str
+ """
+ r = MockRequest(request)
+ jar.add_cookie_header(r)
+ return r.get_new_headers().get("Cookie")
+
+
+def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
+ """Unsets a cookie by name, by default over all domains and paths.
+
+ Wraps CookieJar.clear(), is O(n).
+ """
+ clearables = []
+ for cookie in cookiejar:
+ if cookie.name != name:
+ continue
+ if domain is not None and domain != cookie.domain:
+ continue
+ if path is not None and path != cookie.path:
+ continue
+ clearables.append((cookie.domain, cookie.path, cookie.name))
+
+ for domain, path, name in clearables:
+ cookiejar.clear(domain, path, name)
+
+
+class CookieConflictError(RuntimeError):
+ """There are two cookies that meet the criteria specified in the cookie jar.
+ Use .get and .set and include domain and path args in order to be more specific.
+ """
+
+
+class RequestsCookieJar(cookielib.CookieJar, MutableMapping):
+ """Compatibility class; is a cookielib.CookieJar, but exposes a dict
+ interface.
+
+ This is the CookieJar we create by default for requests and sessions that
+ don't specify one, since some clients may expect response.cookies and
+ session.cookies to support dict operations.
+
+ Requests does not use the dict interface internally; it's just for
+ compatibility with external client code. All requests code should work
+ out of the box with externally provided instances of ``CookieJar``, e.g.
+ ``LWPCookieJar`` and ``FileCookieJar``.
+
+ Unlike a regular CookieJar, this class is pickleable.
+
+ .. warning:: dictionary operations that are normally O(1) may be O(n).
+ """
+
+ def get(self, name, default=None, domain=None, path=None):
+ """Dict-like get() that also supports optional domain and path args in
+ order to resolve naming collisions from using one cookie jar over
+ multiple domains.
+
+ .. warning:: operation is O(n), not O(1).
+ """
+ try:
+ return self._find_no_duplicates(name, domain, path)
+ except KeyError:
+ return default
+
+ def set(self, name, value, **kwargs):
+ """Dict-like set() that also supports optional domain and path args in
+ order to resolve naming collisions from using one cookie jar over
+ multiple domains.
+ """
+ # support client code that unsets cookies by assignment of a None value:
+ if value is None:
+ remove_cookie_by_name(
+ self, name, domain=kwargs.get("domain"), path=kwargs.get("path")
+ )
+ return
+
+ if isinstance(value, Morsel):
+ c = morsel_to_cookie(value)
+ else:
+ c = create_cookie(name, value, **kwargs)
+ self.set_cookie(c)
+ return c
+
+ def iterkeys(self):
+ """Dict-like iterkeys() that returns an iterator of names of cookies
+ from the jar.
+
+ .. seealso:: itervalues() and iteritems().
+ """
+ for cookie in iter(self):
+ yield cookie.name
+
+ def keys(self):
+ """Dict-like keys() that returns a list of names of cookies from the
+ jar.
+
+ .. seealso:: values() and items().
+ """
+ return list(self.iterkeys())
+
+ def itervalues(self):
+ """Dict-like itervalues() that returns an iterator of values of cookies
+ from the jar.
+
+ .. seealso:: iterkeys() and iteritems().
+ """
+ for cookie in iter(self):
+ yield cookie.value
+
+ def values(self):
+ """Dict-like values() that returns a list of values of cookies from the
+ jar.
+
+ .. seealso:: keys() and items().
+ """
+ return list(self.itervalues())
+
+ def iteritems(self):
+ """Dict-like iteritems() that returns an iterator of name-value tuples
+ from the jar.
+
+ .. seealso:: iterkeys() and itervalues().
+ """
+ for cookie in iter(self):
+ yield cookie.name, cookie.value
+
+ def items(self):
+ """Dict-like items() that returns a list of name-value tuples from the
+ jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a
+ vanilla python dict of key value pairs.
+
+ .. seealso:: keys() and values().
+ """
+ return list(self.iteritems())
+
+ def list_domains(self):
+ """Utility method to list all the domains in the jar."""
+ domains = []
+ for cookie in iter(self):
+ if cookie.domain not in domains:
+ domains.append(cookie.domain)
+ return domains
+
+ def list_paths(self):
+ """Utility method to list all the paths in the jar."""
+ paths = []
+ for cookie in iter(self):
+ if cookie.path not in paths:
+ paths.append(cookie.path)
+ return paths
+
+ def multiple_domains(self):
+ """Returns True if there are multiple domains in the jar.
+ Returns False otherwise.
+
+ :rtype: bool
+ """
+ domains = []
+ for cookie in iter(self):
+ if cookie.domain is not None and cookie.domain in domains:
+ return True
+ domains.append(cookie.domain)
+ return False # there is only one domain in jar
+
+ def get_dict(self, domain=None, path=None):
+ """Takes as an argument an optional domain and path and returns a plain
+ old Python dict of name-value pairs of cookies that meet the
+ requirements.
+
+ :rtype: dict
+ """
+ dictionary = {}
+ for cookie in iter(self):
+ if (domain is None or cookie.domain == domain) and (
+ path is None or cookie.path == path
+ ):
+ dictionary[cookie.name] = cookie.value
+ return dictionary
+
+ def __contains__(self, name):
+ try:
+ return super().__contains__(name)
+ except CookieConflictError:
+ return True
+
+ def __getitem__(self, name):
+ """Dict-like __getitem__() for compatibility with client code. Throws
+ exception if there are more than one cookie with name. In that case,
+ use the more explicit get() method instead.
+
+ .. warning:: operation is O(n), not O(1).
+ """
+ return self._find_no_duplicates(name)
+
+ def __setitem__(self, name, value):
+ """Dict-like __setitem__ for compatibility with client code. Throws
+ exception if there is already a cookie of that name in the jar. In that
+ case, use the more explicit set() method instead.
+ """
+ self.set(name, value)
+
+ def __delitem__(self, name):
+ """Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
+ ``remove_cookie_by_name()``.
+ """
+ remove_cookie_by_name(self, name)
+
+ def set_cookie(self, cookie, *args, **kwargs):
+ if (
+ hasattr(cookie.value, "startswith")
+ and cookie.value.startswith('"')
+ and cookie.value.endswith('"')
+ ):
+ cookie.value = cookie.value.replace('\\"', "")
+ return super().set_cookie(cookie, *args, **kwargs)
+
+ def update(self, other):
+ """Updates this jar with cookies from another CookieJar or dict-like"""
+ if isinstance(other, cookielib.CookieJar):
+ for cookie in other:
+ self.set_cookie(copy.copy(cookie))
+ else:
+ super().update(other)
+
+ def _find(self, name, domain=None, path=None):
+ """Requests uses this method internally to get cookie values.
+
+ If there are conflicting cookies, _find arbitrarily chooses one.
+ See _find_no_duplicates if you want an exception thrown if there are
+ conflicting cookies.
+
+ :param name: a string containing name of cookie
+ :param domain: (optional) string containing domain of cookie
+ :param path: (optional) string containing path of cookie
+ :return: cookie.value
+ """
+ for cookie in iter(self):
+ if cookie.name == name:
+ if domain is None or cookie.domain == domain:
+ if path is None or cookie.path == path:
+ return cookie.value
+
+ raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}")
+
+ def _find_no_duplicates(self, name, domain=None, path=None):
+ """Both ``__get_item__`` and ``get`` call this function: it's never
+ used elsewhere in Requests.
+
+ :param name: a string containing name of cookie
+ :param domain: (optional) string containing domain of cookie
+ :param path: (optional) string containing path of cookie
+ :raises KeyError: if cookie is not found
+ :raises CookieConflictError: if there are multiple cookies
+ that match name and optionally domain and path
+ :return: cookie.value
+ """
+ toReturn = None
+ for cookie in iter(self):
+ if cookie.name == name:
+ if domain is None or cookie.domain == domain:
+ if path is None or cookie.path == path:
+ if toReturn is not None:
+ # if there are multiple cookies that meet passed in criteria
+ raise CookieConflictError(
+ f"There are multiple cookies with name, {name!r}"
+ )
+ # we will eventually return this as long as no cookie conflict
+ toReturn = cookie.value
+
+ if toReturn:
+ return toReturn
+ raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}")
+
+ def __getstate__(self):
+ """Unlike a normal CookieJar, this class is pickleable."""
+ state = self.__dict__.copy()
+ # remove the unpickleable RLock object
+ state.pop("_cookies_lock")
+ return state
+
+ def __setstate__(self, state):
+ """Unlike a normal CookieJar, this class is pickleable."""
+ self.__dict__.update(state)
+ if "_cookies_lock" not in self.__dict__:
+ self._cookies_lock = threading.RLock()
+
+ def copy(self):
+ """Return a copy of this RequestsCookieJar."""
+ new_cj = RequestsCookieJar()
+ new_cj.set_policy(self.get_policy())
+ new_cj.update(self)
+ return new_cj
+
+ def get_policy(self):
+ """Return the CookiePolicy instance used."""
+ return self._policy
+
+
+def _copy_cookie_jar(jar):
+ if jar is None:
+ return None
+
+ if hasattr(jar, "copy"):
+ # We're dealing with an instance of RequestsCookieJar
+ return jar.copy()
+ # We're dealing with a generic CookieJar instance
+ new_jar = copy.copy(jar)
+ new_jar.clear()
+ for cookie in jar:
+ new_jar.set_cookie(copy.copy(cookie))
+ return new_jar
+
+
+def create_cookie(name, value, **kwargs):
+ """Make a cookie from underspecified parameters.
+
+ By default, the pair of `name` and `value` will be set for the domain ''
+ and sent on every request (this is sometimes called a "supercookie").
+ """
+ result = {
+ "version": 0,
+ "name": name,
+ "value": value,
+ "port": None,
+ "domain": "",
+ "path": "/",
+ "secure": False,
+ "expires": None,
+ "discard": True,
+ "comment": None,
+ "comment_url": None,
+ "rest": {"HttpOnly": None},
+ "rfc2109": False,
+ }
+
+ badargs = set(kwargs) - set(result)
+ if badargs:
+ raise TypeError(
+ f"create_cookie() got unexpected keyword arguments: {list(badargs)}"
+ )
+
+ result.update(kwargs)
+ result["port_specified"] = bool(result["port"])
+ result["domain_specified"] = bool(result["domain"])
+ result["domain_initial_dot"] = result["domain"].startswith(".")
+ result["path_specified"] = bool(result["path"])
+
+ return cookielib.Cookie(**result)
+
+
+def morsel_to_cookie(morsel):
+ """Convert a Morsel object into a Cookie containing the one k/v pair."""
+
+ expires = None
+ if morsel["max-age"]:
+ try:
+ expires = int(time.time() + int(morsel["max-age"]))
+ except ValueError:
+ raise TypeError(f"max-age: {morsel['max-age']} must be integer")
+ elif morsel["expires"]:
+ time_template = "%a, %d-%b-%Y %H:%M:%S GMT"
+ expires = calendar.timegm(time.strptime(morsel["expires"], time_template))
+ return create_cookie(
+ comment=morsel["comment"],
+ comment_url=bool(morsel["comment"]),
+ discard=False,
+ domain=morsel["domain"],
+ expires=expires,
+ name=morsel.key,
+ path=morsel["path"],
+ port=None,
+ rest={"HttpOnly": morsel["httponly"]},
+ rfc2109=False,
+ secure=bool(morsel["secure"]),
+ value=morsel.value,
+ version=morsel["version"] or 0,
+ )
+
+
+def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
+ """Returns a CookieJar from a key/value dictionary.
+
+ :param cookie_dict: Dict of key/values to insert into CookieJar.
+ :param cookiejar: (optional) A cookiejar to add the cookies to.
+ :param overwrite: (optional) If False, will not replace cookies
+ already in the jar with new ones.
+ :rtype: CookieJar
+ """
+ if cookiejar is None:
+ cookiejar = RequestsCookieJar()
+
+ if cookie_dict is not None:
+ names_from_jar = [cookie.name for cookie in cookiejar]
+ for name in cookie_dict:
+ if overwrite or (name not in names_from_jar):
+ cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
+
+ return cookiejar
+
+
+def merge_cookies(cookiejar, cookies):
+ """Add cookies to cookiejar and returns a merged CookieJar.
+
+ :param cookiejar: CookieJar object to add the cookies to.
+ :param cookies: Dictionary or CookieJar object to be added.
+ :rtype: CookieJar
+ """
+ if not isinstance(cookiejar, cookielib.CookieJar):
+ raise ValueError("You can only merge into CookieJar")
+
+ if isinstance(cookies, dict):
+ cookiejar = cookiejar_from_dict(cookies, cookiejar=cookiejar, overwrite=False)
+ elif isinstance(cookies, cookielib.CookieJar):
+ try:
+ cookiejar.update(cookies)
+ except AttributeError:
+ for cookie_in_jar in cookies:
+ cookiejar.set_cookie(cookie_in_jar)
+
+ return cookiejar
diff --git a/third_party/python/pip/pip/_vendor/requests/exceptions.py b/third_party/python/pip/pip/_vendor/requests/exceptions.py
new file mode 100644
index 0000000000..168d07390d
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/requests/exceptions.py
@@ -0,0 +1,141 @@
+"""
+requests.exceptions
+~~~~~~~~~~~~~~~~~~~
+
+This module contains the set of Requests' exceptions.
+"""
+from pip._vendor.urllib3.exceptions import HTTPError as BaseHTTPError
+
+from .compat import JSONDecodeError as CompatJSONDecodeError
+
+
+class RequestException(IOError):
+ """There was an ambiguous exception that occurred while handling your
+ request.
+ """
+
+ def __init__(self, *args, **kwargs):
+ """Initialize RequestException with `request` and `response` objects."""
+ response = kwargs.pop("response", None)
+ self.response = response
+ self.request = kwargs.pop("request", None)
+ if response is not None and not self.request and hasattr(response, "request"):
+ self.request = self.response.request
+ super().__init__(*args, **kwargs)
+
+
+class InvalidJSONError(RequestException):
+ """A JSON error occurred."""
+
+
+class JSONDecodeError(InvalidJSONError, CompatJSONDecodeError):
+ """Couldn't decode the text into json"""
+
+ def __init__(self, *args, **kwargs):
+ """
+ Construct the JSONDecodeError instance first with all
+ args. Then use it's args to construct the IOError so that
+ the json specific args aren't used as IOError specific args
+ and the error message from JSONDecodeError is preserved.
+ """
+ CompatJSONDecodeError.__init__(self, *args)
+ InvalidJSONError.__init__(self, *self.args, **kwargs)
+
+
+class HTTPError(RequestException):
+ """An HTTP error occurred."""
+
+
+class ConnectionError(RequestException):
+ """A Connection error occurred."""
+
+
+class ProxyError(ConnectionError):
+ """A proxy error occurred."""
+
+
+class SSLError(ConnectionError):
+ """An SSL error occurred."""
+
+
+class Timeout(RequestException):
+ """The request timed out.
+
+ Catching this error will catch both
+ :exc:`~requests.exceptions.ConnectTimeout` and
+ :exc:`~requests.exceptions.ReadTimeout` errors.
+ """
+
+
+class ConnectTimeout(ConnectionError, Timeout):
+ """The request timed out while trying to connect to the remote server.
+
+ Requests that produced this error are safe to retry.
+ """
+
+
+class ReadTimeout(Timeout):
+ """The server did not send any data in the allotted amount of time."""
+
+
+class URLRequired(RequestException):
+ """A valid URL is required to make a request."""
+
+
+class TooManyRedirects(RequestException):
+ """Too many redirects."""
+
+
+class MissingSchema(RequestException, ValueError):
+ """The URL scheme (e.g. http or https) is missing."""
+
+
+class InvalidSchema(RequestException, ValueError):
+ """The URL scheme provided is either invalid or unsupported."""
+
+
+class InvalidURL(RequestException, ValueError):
+ """The URL provided was somehow invalid."""
+
+
+class InvalidHeader(RequestException, ValueError):
+ """The header value provided was somehow invalid."""
+
+
+class InvalidProxyURL(InvalidURL):
+ """The proxy URL provided is invalid."""
+
+
+class ChunkedEncodingError(RequestException):
+ """The server declared chunked encoding but sent an invalid chunk."""
+
+
+class ContentDecodingError(RequestException, BaseHTTPError):
+ """Failed to decode response content."""
+
+
+class StreamConsumedError(RequestException, TypeError):
+ """The content for this response was already consumed."""
+
+
+class RetryError(RequestException):
+ """Custom retries logic failed"""
+
+
+class UnrewindableBodyError(RequestException):
+ """Requests encountered an error when trying to rewind a body."""
+
+
+# Warnings
+
+
+class RequestsWarning(Warning):
+ """Base warning for Requests."""
+
+
+class FileModeWarning(RequestsWarning, DeprecationWarning):
+ """A file was opened in text mode, but Requests determined its binary length."""
+
+
+class RequestsDependencyWarning(RequestsWarning):
+ """An imported dependency doesn't match the expected version range."""
diff --git a/third_party/python/pip/pip/_vendor/requests/help.py b/third_party/python/pip/pip/_vendor/requests/help.py
new file mode 100644
index 0000000000..2d292c2f06
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/requests/help.py
@@ -0,0 +1,131 @@
+"""Module containing bug report helper(s)."""
+
+import json
+import platform
+import ssl
+import sys
+
+from pip._vendor import idna
+from pip._vendor import urllib3
+
+from . import __version__ as requests_version
+
+charset_normalizer = None
+
+try:
+ from pip._vendor import chardet
+except ImportError:
+ chardet = None
+
+try:
+ from pip._vendor.urllib3.contrib import pyopenssl
+except ImportError:
+ pyopenssl = None
+ OpenSSL = None
+ cryptography = None
+else:
+ import cryptography
+ import OpenSSL
+
+
+def _implementation():
+ """Return a dict with the Python implementation and version.
+
+ Provide both the name and the version of the Python implementation
+ currently running. For example, on CPython 3.10.3 it will return
+ {'name': 'CPython', 'version': '3.10.3'}.
+
+ This function works best on CPython and PyPy: in particular, it probably
+ doesn't work for Jython or IronPython. Future investigation should be done
+ to work out the correct shape of the code for those platforms.
+ """
+ implementation = platform.python_implementation()
+
+ if implementation == "CPython":
+ implementation_version = platform.python_version()
+ elif implementation == "PyPy":
+ implementation_version = "{}.{}.{}".format(
+ sys.pypy_version_info.major,
+ sys.pypy_version_info.minor,
+ sys.pypy_version_info.micro,
+ )
+ if sys.pypy_version_info.releaselevel != "final":
+ implementation_version = "".join(
+ [implementation_version, sys.pypy_version_info.releaselevel]
+ )
+ elif implementation == "Jython":
+ implementation_version = platform.python_version() # Complete Guess
+ elif implementation == "IronPython":
+ implementation_version = platform.python_version() # Complete Guess
+ else:
+ implementation_version = "Unknown"
+
+ return {"name": implementation, "version": implementation_version}
+
+
+def info():
+ """Generate information for a bug report."""
+ try:
+ platform_info = {
+ "system": platform.system(),
+ "release": platform.release(),
+ }
+ except OSError:
+ platform_info = {
+ "system": "Unknown",
+ "release": "Unknown",
+ }
+
+ implementation_info = _implementation()
+ urllib3_info = {"version": urllib3.__version__}
+ charset_normalizer_info = {"version": None}
+ chardet_info = {"version": None}
+ if charset_normalizer:
+ charset_normalizer_info = {"version": charset_normalizer.__version__}
+ if chardet:
+ chardet_info = {"version": chardet.__version__}
+
+ pyopenssl_info = {
+ "version": None,
+ "openssl_version": "",
+ }
+ if OpenSSL:
+ pyopenssl_info = {
+ "version": OpenSSL.__version__,
+ "openssl_version": f"{OpenSSL.SSL.OPENSSL_VERSION_NUMBER:x}",
+ }
+ cryptography_info = {
+ "version": getattr(cryptography, "__version__", ""),
+ }
+ idna_info = {
+ "version": getattr(idna, "__version__", ""),
+ }
+
+ system_ssl = ssl.OPENSSL_VERSION_NUMBER
+ system_ssl_info = {"version": f"{system_ssl:x}" if system_ssl is not None else ""}
+
+ return {
+ "platform": platform_info,
+ "implementation": implementation_info,
+ "system_ssl": system_ssl_info,
+ "using_pyopenssl": pyopenssl is not None,
+ "using_charset_normalizer": chardet is None,
+ "pyOpenSSL": pyopenssl_info,
+ "urllib3": urllib3_info,
+ "chardet": chardet_info,
+ "charset_normalizer": charset_normalizer_info,
+ "cryptography": cryptography_info,
+ "idna": idna_info,
+ "requests": {
+ "version": requests_version,
+ },
+ }
+
+
+def main():
+ """Pretty-print the bug information as JSON."""
+ print(json.dumps(info(), sort_keys=True, indent=2))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/third_party/python/pip/pip/_vendor/requests/hooks.py b/third_party/python/pip/pip/_vendor/requests/hooks.py
new file mode 100644
index 0000000000..d181ba2ec2
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/requests/hooks.py
@@ -0,0 +1,33 @@
+"""
+requests.hooks
+~~~~~~~~~~~~~~
+
+This module provides the capabilities for the Requests hooks system.
+
+Available hooks:
+
+``response``:
+ The response generated from a Request.
+"""
+HOOKS = ["response"]
+
+
+def default_hooks():
+ return {event: [] for event in HOOKS}
+
+
+# TODO: response is the only one
+
+
+def dispatch_hook(key, hooks, hook_data, **kwargs):
+ """Dispatches a hook dictionary on a given piece of data."""
+ hooks = hooks or {}
+ hooks = hooks.get(key)
+ if hooks:
+ if hasattr(hooks, "__call__"):
+ hooks = [hooks]
+ for hook in hooks:
+ _hook_data = hook(hook_data, **kwargs)
+ if _hook_data is not None:
+ hook_data = _hook_data
+ return hook_data
diff --git a/third_party/python/pip/pip/_vendor/requests/models.py b/third_party/python/pip/pip/_vendor/requests/models.py
new file mode 100644
index 0000000000..76e6f199c0
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/requests/models.py
@@ -0,0 +1,1034 @@
+"""
+requests.models
+~~~~~~~~~~~~~~~
+
+This module contains the primary objects that power Requests.
+"""
+
+import datetime
+
+# Import encoding now, to avoid implicit import later.
+# Implicit import within threads may cause LookupError when standard library is in a ZIP,
+# such as in Embedded Python. See https://github.com/psf/requests/issues/3578.
+import encodings.idna # noqa: F401
+from io import UnsupportedOperation
+
+from pip._vendor.urllib3.exceptions import (
+ DecodeError,
+ LocationParseError,
+ ProtocolError,
+ ReadTimeoutError,
+ SSLError,
+)
+from pip._vendor.urllib3.fields import RequestField
+from pip._vendor.urllib3.filepost import encode_multipart_formdata
+from pip._vendor.urllib3.util import parse_url
+
+from ._internal_utils import to_native_string, unicode_is_ascii
+from .auth import HTTPBasicAuth
+from .compat import (
+ Callable,
+ JSONDecodeError,
+ Mapping,
+ basestring,
+ builtin_str,
+ chardet,
+ cookielib,
+)
+from .compat import json as complexjson
+from .compat import urlencode, urlsplit, urlunparse
+from .cookies import _copy_cookie_jar, cookiejar_from_dict, get_cookie_header
+from .exceptions import (
+ ChunkedEncodingError,
+ ConnectionError,
+ ContentDecodingError,
+ HTTPError,
+ InvalidJSONError,
+ InvalidURL,
+)
+from .exceptions import JSONDecodeError as RequestsJSONDecodeError
+from .exceptions import MissingSchema
+from .exceptions import SSLError as RequestsSSLError
+from .exceptions import StreamConsumedError
+from .hooks import default_hooks
+from .status_codes import codes
+from .structures import CaseInsensitiveDict
+from .utils import (
+ check_header_validity,
+ get_auth_from_url,
+ guess_filename,
+ guess_json_utf,
+ iter_slices,
+ parse_header_links,
+ requote_uri,
+ stream_decode_response_unicode,
+ super_len,
+ to_key_val_list,
+)
+
+#: The set of HTTP status codes that indicate an automatically
+#: processable redirect.
+REDIRECT_STATI = (
+ codes.moved, # 301
+ codes.found, # 302
+ codes.other, # 303
+ codes.temporary_redirect, # 307
+ codes.permanent_redirect, # 308
+)
+
+DEFAULT_REDIRECT_LIMIT = 30
+CONTENT_CHUNK_SIZE = 10 * 1024
+ITER_CHUNK_SIZE = 512
+
+
+class RequestEncodingMixin:
+ @property
+ def path_url(self):
+ """Build the path URL to use."""
+
+ url = []
+
+ p = urlsplit(self.url)
+
+ path = p.path
+ if not path:
+ path = "/"
+
+ url.append(path)
+
+ query = p.query
+ if query:
+ url.append("?")
+ url.append(query)
+
+ return "".join(url)
+
+ @staticmethod
+ def _encode_params(data):
+ """Encode parameters in a piece of data.
+
+ Will successfully encode parameters when passed as a dict or a list of
+ 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
+ if parameters are supplied as a dict.
+ """
+
+ if isinstance(data, (str, bytes)):
+ return data
+ elif hasattr(data, "read"):
+ return data
+ elif hasattr(data, "__iter__"):
+ result = []
+ for k, vs in to_key_val_list(data):
+ if isinstance(vs, basestring) or not hasattr(vs, "__iter__"):
+ vs = [vs]
+ for v in vs:
+ if v is not None:
+ result.append(
+ (
+ k.encode("utf-8") if isinstance(k, str) else k,
+ v.encode("utf-8") if isinstance(v, str) else v,
+ )
+ )
+ return urlencode(result, doseq=True)
+ else:
+ return data
+
+ @staticmethod
+ def _encode_files(files, data):
+ """Build the body for a multipart/form-data request.
+
+ Will successfully encode files when passed as a dict or a list of
+ tuples. Order is retained if data is a list of tuples but arbitrary
+ if parameters are supplied as a dict.
+ The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
+ or 4-tuples (filename, fileobj, contentype, custom_headers).
+ """
+ if not files:
+ raise ValueError("Files must be provided.")
+ elif isinstance(data, basestring):
+ raise ValueError("Data must not be a string.")
+
+ new_fields = []
+ fields = to_key_val_list(data or {})
+ files = to_key_val_list(files or {})
+
+ for field, val in fields:
+ if isinstance(val, basestring) or not hasattr(val, "__iter__"):
+ val = [val]
+ for v in val:
+ if v is not None:
+ # Don't call str() on bytestrings: in Py3 it all goes wrong.
+ if not isinstance(v, bytes):
+ v = str(v)
+
+ new_fields.append(
+ (
+ field.decode("utf-8")
+ if isinstance(field, bytes)
+ else field,
+ v.encode("utf-8") if isinstance(v, str) else v,
+ )
+ )
+
+ for (k, v) in files:
+ # support for explicit filename
+ ft = None
+ fh = None
+ if isinstance(v, (tuple, list)):
+ if len(v) == 2:
+ fn, fp = v
+ elif len(v) == 3:
+ fn, fp, ft = v
+ else:
+ fn, fp, ft, fh = v
+ else:
+ fn = guess_filename(v) or k
+ fp = v
+
+ if isinstance(fp, (str, bytes, bytearray)):
+ fdata = fp
+ elif hasattr(fp, "read"):
+ fdata = fp.read()
+ elif fp is None:
+ continue
+ else:
+ fdata = fp
+
+ rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
+ rf.make_multipart(content_type=ft)
+ new_fields.append(rf)
+
+ body, content_type = encode_multipart_formdata(new_fields)
+
+ return body, content_type
+
+
+class RequestHooksMixin:
+ def register_hook(self, event, hook):
+ """Properly register a hook."""
+
+ if event not in self.hooks:
+ raise ValueError(f'Unsupported event specified, with event name "{event}"')
+
+ if isinstance(hook, Callable):
+ self.hooks[event].append(hook)
+ elif hasattr(hook, "__iter__"):
+ self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
+
+ def deregister_hook(self, event, hook):
+ """Deregister a previously registered hook.
+ Returns True if the hook existed, False if not.
+ """
+
+ try:
+ self.hooks[event].remove(hook)
+ return True
+ except ValueError:
+ return False
+
+
+class Request(RequestHooksMixin):
+ """A user-created :class:`Request <Request>` object.
+
+ Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
+
+ :param method: HTTP method to use.
+ :param url: URL to send.
+ :param headers: dictionary of headers to send.
+ :param files: dictionary of {filename: fileobject} files to multipart upload.
+ :param data: the body to attach to the request. If a dictionary or
+ list of tuples ``[(key, value)]`` is provided, form-encoding will
+ take place.
+ :param json: json for the body to attach to the request (if files or data is not specified).
+ :param params: URL parameters to append to the URL. If a dictionary or
+ list of tuples ``[(key, value)]`` is provided, form-encoding will
+ take place.
+ :param auth: Auth handler or (user, pass) tuple.
+ :param cookies: dictionary or CookieJar of cookies to attach to this request.
+ :param hooks: dictionary of callback hooks, for internal usage.
+
+ Usage::
+
+ >>> import requests
+ >>> req = requests.Request('GET', 'https://httpbin.org/get')
+ >>> req.prepare()
+ <PreparedRequest [GET]>
+ """
+
+ def __init__(
+ self,
+ method=None,
+ url=None,
+ headers=None,
+ files=None,
+ data=None,
+ params=None,
+ auth=None,
+ cookies=None,
+ hooks=None,
+ json=None,
+ ):
+
+ # Default empty dicts for dict params.
+ data = [] if data is None else data
+ files = [] if files is None else files
+ headers = {} if headers is None else headers
+ params = {} if params is None else params
+ hooks = {} if hooks is None else hooks
+
+ self.hooks = default_hooks()
+ for (k, v) in list(hooks.items()):
+ self.register_hook(event=k, hook=v)
+
+ self.method = method
+ self.url = url
+ self.headers = headers
+ self.files = files
+ self.data = data
+ self.json = json
+ self.params = params
+ self.auth = auth
+ self.cookies = cookies
+
+ def __repr__(self):
+ return f"<Request [{self.method}]>"
+
+ def prepare(self):
+ """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
+ p = PreparedRequest()
+ p.prepare(
+ method=self.method,
+ url=self.url,
+ headers=self.headers,
+ files=self.files,
+ data=self.data,
+ json=self.json,
+ params=self.params,
+ auth=self.auth,
+ cookies=self.cookies,
+ hooks=self.hooks,
+ )
+ return p
+
+
+class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
+ """The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
+ containing the exact bytes that will be sent to the server.
+
+ Instances are generated from a :class:`Request <Request>` object, and
+ should not be instantiated manually; doing so may produce undesirable
+ effects.
+
+ Usage::
+
+ >>> import requests
+ >>> req = requests.Request('GET', 'https://httpbin.org/get')
+ >>> r = req.prepare()
+ >>> r
+ <PreparedRequest [GET]>
+
+ >>> s = requests.Session()
+ >>> s.send(r)
+ <Response [200]>
+ """
+
+ def __init__(self):
+ #: HTTP verb to send to the server.
+ self.method = None
+ #: HTTP URL to send the request to.
+ self.url = None
+ #: dictionary of HTTP headers.
+ self.headers = None
+ # The `CookieJar` used to create the Cookie header will be stored here
+ # after prepare_cookies is called
+ self._cookies = None
+ #: request body to send to the server.
+ self.body = None
+ #: dictionary of callback hooks, for internal usage.
+ self.hooks = default_hooks()
+ #: integer denoting starting position of a readable file-like body.
+ self._body_position = None
+
+ def prepare(
+ self,
+ method=None,
+ url=None,
+ headers=None,
+ files=None,
+ data=None,
+ params=None,
+ auth=None,
+ cookies=None,
+ hooks=None,
+ json=None,
+ ):
+ """Prepares the entire request with the given parameters."""
+
+ self.prepare_method(method)
+ self.prepare_url(url, params)
+ self.prepare_headers(headers)
+ self.prepare_cookies(cookies)
+ self.prepare_body(data, files, json)
+ self.prepare_auth(auth, url)
+
+ # Note that prepare_auth must be last to enable authentication schemes
+ # such as OAuth to work on a fully prepared request.
+
+ # This MUST go after prepare_auth. Authenticators could add a hook
+ self.prepare_hooks(hooks)
+
+ def __repr__(self):
+ return f"<PreparedRequest [{self.method}]>"
+
+ def copy(self):
+ p = PreparedRequest()
+ p.method = self.method
+ p.url = self.url
+ p.headers = self.headers.copy() if self.headers is not None else None
+ p._cookies = _copy_cookie_jar(self._cookies)
+ p.body = self.body
+ p.hooks = self.hooks
+ p._body_position = self._body_position
+ return p
+
+ def prepare_method(self, method):
+ """Prepares the given HTTP method."""
+ self.method = method
+ if self.method is not None:
+ self.method = to_native_string(self.method.upper())
+
+ @staticmethod
+ def _get_idna_encoded_host(host):
+ from pip._vendor import idna
+
+ try:
+ host = idna.encode(host, uts46=True).decode("utf-8")
+ except idna.IDNAError:
+ raise UnicodeError
+ return host
+
+ def prepare_url(self, url, params):
+ """Prepares the given HTTP URL."""
+ #: Accept objects that have string representations.
+ #: We're unable to blindly call unicode/str functions
+ #: as this will include the bytestring indicator (b'')
+ #: on python 3.x.
+ #: https://github.com/psf/requests/pull/2238
+ if isinstance(url, bytes):
+ url = url.decode("utf8")
+ else:
+ url = str(url)
+
+ # Remove leading whitespaces from url
+ url = url.lstrip()
+
+ # Don't do any URL preparation for non-HTTP schemes like `mailto`,
+ # `data` etc to work around exceptions from `url_parse`, which
+ # handles RFC 3986 only.
+ if ":" in url and not url.lower().startswith("http"):
+ self.url = url
+ return
+
+ # Support for unicode domain names and paths.
+ try:
+ scheme, auth, host, port, path, query, fragment = parse_url(url)
+ except LocationParseError as e:
+ raise InvalidURL(*e.args)
+
+ if not scheme:
+ raise MissingSchema(
+ f"Invalid URL {url!r}: No scheme supplied. "
+ f"Perhaps you meant https://{url}?"
+ )
+
+ if not host:
+ raise InvalidURL(f"Invalid URL {url!r}: No host supplied")
+
+ # In general, we want to try IDNA encoding the hostname if the string contains
+ # non-ASCII characters. This allows users to automatically get the correct IDNA
+ # behaviour. For strings containing only ASCII characters, we need to also verify
+ # it doesn't start with a wildcard (*), before allowing the unencoded hostname.
+ if not unicode_is_ascii(host):
+ try:
+ host = self._get_idna_encoded_host(host)
+ except UnicodeError:
+ raise InvalidURL("URL has an invalid label.")
+ elif host.startswith(("*", ".")):
+ raise InvalidURL("URL has an invalid label.")
+
+ # Carefully reconstruct the network location
+ netloc = auth or ""
+ if netloc:
+ netloc += "@"
+ netloc += host
+ if port:
+ netloc += f":{port}"
+
+ # Bare domains aren't valid URLs.
+ if not path:
+ path = "/"
+
+ if isinstance(params, (str, bytes)):
+ params = to_native_string(params)
+
+ enc_params = self._encode_params(params)
+ if enc_params:
+ if query:
+ query = f"{query}&{enc_params}"
+ else:
+ query = enc_params
+
+ url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
+ self.url = url
+
+ def prepare_headers(self, headers):
+ """Prepares the given HTTP headers."""
+
+ self.headers = CaseInsensitiveDict()
+ if headers:
+ for header in headers.items():
+ # Raise exception on invalid header value.
+ check_header_validity(header)
+ name, value = header
+ self.headers[to_native_string(name)] = value
+
+ def prepare_body(self, data, files, json=None):
+ """Prepares the given HTTP body data."""
+
+ # Check if file, fo, generator, iterator.
+ # If not, run through normal process.
+
+ # Nottin' on you.
+ body = None
+ content_type = None
+
+ if not data and json is not None:
+ # urllib3 requires a bytes-like body. Python 2's json.dumps
+ # provides this natively, but Python 3 gives a Unicode string.
+ content_type = "application/json"
+
+ try:
+ body = complexjson.dumps(json, allow_nan=False)
+ except ValueError as ve:
+ raise InvalidJSONError(ve, request=self)
+
+ if not isinstance(body, bytes):
+ body = body.encode("utf-8")
+
+ is_stream = all(
+ [
+ hasattr(data, "__iter__"),
+ not isinstance(data, (basestring, list, tuple, Mapping)),
+ ]
+ )
+
+ if is_stream:
+ try:
+ length = super_len(data)
+ except (TypeError, AttributeError, UnsupportedOperation):
+ length = None
+
+ body = data
+
+ if getattr(body, "tell", None) is not None:
+ # Record the current file position before reading.
+ # This will allow us to rewind a file in the event
+ # of a redirect.
+ try:
+ self._body_position = body.tell()
+ except OSError:
+ # This differentiates from None, allowing us to catch
+ # a failed `tell()` later when trying to rewind the body
+ self._body_position = object()
+
+ if files:
+ raise NotImplementedError(
+ "Streamed bodies and files are mutually exclusive."
+ )
+
+ if length:
+ self.headers["Content-Length"] = builtin_str(length)
+ else:
+ self.headers["Transfer-Encoding"] = "chunked"
+ else:
+ # Multi-part file uploads.
+ if files:
+ (body, content_type) = self._encode_files(files, data)
+ else:
+ if data:
+ body = self._encode_params(data)
+ if isinstance(data, basestring) or hasattr(data, "read"):
+ content_type = None
+ else:
+ content_type = "application/x-www-form-urlencoded"
+
+ self.prepare_content_length(body)
+
+ # Add content-type if it wasn't explicitly provided.
+ if content_type and ("content-type" not in self.headers):
+ self.headers["Content-Type"] = content_type
+
+ self.body = body
+
+ def prepare_content_length(self, body):
+ """Prepare Content-Length header based on request method and body"""
+ if body is not None:
+ length = super_len(body)
+ if length:
+ # If length exists, set it. Otherwise, we fallback
+ # to Transfer-Encoding: chunked.
+ self.headers["Content-Length"] = builtin_str(length)
+ elif (
+ self.method not in ("GET", "HEAD")
+ and self.headers.get("Content-Length") is None
+ ):
+ # Set Content-Length to 0 for methods that can have a body
+ # but don't provide one. (i.e. not GET or HEAD)
+ self.headers["Content-Length"] = "0"
+
+ def prepare_auth(self, auth, url=""):
+ """Prepares the given HTTP auth data."""
+
+ # If no Auth is explicitly provided, extract it from the URL first.
+ if auth is None:
+ url_auth = get_auth_from_url(self.url)
+ auth = url_auth if any(url_auth) else None
+
+ if auth:
+ if isinstance(auth, tuple) and len(auth) == 2:
+ # special-case basic HTTP auth
+ auth = HTTPBasicAuth(*auth)
+
+ # Allow auth to make its changes.
+ r = auth(self)
+
+ # Update self to reflect the auth changes.
+ self.__dict__.update(r.__dict__)
+
+ # Recompute Content-Length
+ self.prepare_content_length(self.body)
+
+ def prepare_cookies(self, cookies):
+ """Prepares the given HTTP cookie data.
+
+ This function eventually generates a ``Cookie`` header from the
+ given cookies using cookielib. Due to cookielib's design, the header
+ will not be regenerated if it already exists, meaning this function
+ can only be called once for the life of the
+ :class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
+ to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
+ header is removed beforehand.
+ """
+ if isinstance(cookies, cookielib.CookieJar):
+ self._cookies = cookies
+ else:
+ self._cookies = cookiejar_from_dict(cookies)
+
+ cookie_header = get_cookie_header(self._cookies, self)
+ if cookie_header is not None:
+ self.headers["Cookie"] = cookie_header
+
+ def prepare_hooks(self, hooks):
+ """Prepares the given hooks."""
+ # hooks can be passed as None to the prepare method and to this
+ # method. To prevent iterating over None, simply use an empty list
+ # if hooks is False-y
+ hooks = hooks or []
+ for event in hooks:
+ self.register_hook(event, hooks[event])
+
+
+class Response:
+ """The :class:`Response <Response>` object, which contains a
+ server's response to an HTTP request.
+ """
+
+ __attrs__ = [
+ "_content",
+ "status_code",
+ "headers",
+ "url",
+ "history",
+ "encoding",
+ "reason",
+ "cookies",
+ "elapsed",
+ "request",
+ ]
+
+ def __init__(self):
+ self._content = False
+ self._content_consumed = False
+ self._next = None
+
+ #: Integer Code of responded HTTP Status, e.g. 404 or 200.
+ self.status_code = None
+
+ #: Case-insensitive Dictionary of Response Headers.
+ #: For example, ``headers['content-encoding']`` will return the
+ #: value of a ``'Content-Encoding'`` response header.
+ self.headers = CaseInsensitiveDict()
+
+ #: File-like object representation of response (for advanced usage).
+ #: Use of ``raw`` requires that ``stream=True`` be set on the request.
+ #: This requirement does not apply for use internally to Requests.
+ self.raw = None
+
+ #: Final URL location of Response.
+ self.url = None
+
+ #: Encoding to decode with when accessing r.text.
+ self.encoding = None
+
+ #: A list of :class:`Response <Response>` objects from
+ #: the history of the Request. Any redirect responses will end
+ #: up here. The list is sorted from the oldest to the most recent request.
+ self.history = []
+
+ #: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
+ self.reason = None
+
+ #: A CookieJar of Cookies the server sent back.
+ self.cookies = cookiejar_from_dict({})
+
+ #: The amount of time elapsed between sending the request
+ #: and the arrival of the response (as a timedelta).
+ #: This property specifically measures the time taken between sending
+ #: the first byte of the request and finishing parsing the headers. It
+ #: is therefore unaffected by consuming the response content or the
+ #: value of the ``stream`` keyword argument.
+ self.elapsed = datetime.timedelta(0)
+
+ #: The :class:`PreparedRequest <PreparedRequest>` object to which this
+ #: is a response.
+ self.request = None
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.close()
+
+ def __getstate__(self):
+ # Consume everything; accessing the content attribute makes
+ # sure the content has been fully read.
+ if not self._content_consumed:
+ self.content
+
+ return {attr: getattr(self, attr, None) for attr in self.__attrs__}
+
+ def __setstate__(self, state):
+ for name, value in state.items():
+ setattr(self, name, value)
+
+ # pickled objects do not have .raw
+ setattr(self, "_content_consumed", True)
+ setattr(self, "raw", None)
+
+ def __repr__(self):
+ return f"<Response [{self.status_code}]>"
+
+ def __bool__(self):
+ """Returns True if :attr:`status_code` is less than 400.
+
+ This attribute checks if the status code of the response is between
+ 400 and 600 to see if there was a client error or a server error. If
+ the status code, is between 200 and 400, this will return True. This
+ is **not** a check to see if the response code is ``200 OK``.
+ """
+ return self.ok
+
+ def __nonzero__(self):
+ """Returns True if :attr:`status_code` is less than 400.
+
+ This attribute checks if the status code of the response is between
+ 400 and 600 to see if there was a client error or a server error. If
+ the status code, is between 200 and 400, this will return True. This
+ is **not** a check to see if the response code is ``200 OK``.
+ """
+ return self.ok
+
+ def __iter__(self):
+ """Allows you to use a response as an iterator."""
+ return self.iter_content(128)
+
+ @property
+ def ok(self):
+ """Returns True if :attr:`status_code` is less than 400, False if not.
+
+ This attribute checks if the status code of the response is between
+ 400 and 600 to see if there was a client error or a server error. If
+ the status code is between 200 and 400, this will return True. This
+ is **not** a check to see if the response code is ``200 OK``.
+ """
+ try:
+ self.raise_for_status()
+ except HTTPError:
+ return False
+ return True
+
+ @property
+ def is_redirect(self):
+ """True if this Response is a well-formed HTTP redirect that could have
+ been processed automatically (by :meth:`Session.resolve_redirects`).
+ """
+ return "location" in self.headers and self.status_code in REDIRECT_STATI
+
+ @property
+ def is_permanent_redirect(self):
+ """True if this Response one of the permanent versions of redirect."""
+ return "location" in self.headers and self.status_code in (
+ codes.moved_permanently,
+ codes.permanent_redirect,
+ )
+
+ @property
+ def next(self):
+ """Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
+ return self._next
+
+ @property
+ def apparent_encoding(self):
+ """The apparent encoding, provided by the charset_normalizer or chardet libraries."""
+ return chardet.detect(self.content)["encoding"]
+
+ def iter_content(self, chunk_size=1, decode_unicode=False):
+ """Iterates over the response data. When stream=True is set on the
+ request, this avoids reading the content at once into memory for
+ large responses. The chunk size is the number of bytes it should
+ read into memory. This is not necessarily the length of each item
+ returned as decoding can take place.
+
+ chunk_size must be of type int or None. A value of None will
+ function differently depending on the value of `stream`.
+ stream=True will read data as it arrives in whatever size the
+ chunks are received. If stream=False, data is returned as
+ a single chunk.
+
+ If decode_unicode is True, content will be decoded using the best
+ available encoding based on the response.
+ """
+
+ def generate():
+ # Special case for urllib3.
+ if hasattr(self.raw, "stream"):
+ try:
+ yield from self.raw.stream(chunk_size, decode_content=True)
+ except ProtocolError as e:
+ raise ChunkedEncodingError(e)
+ except DecodeError as e:
+ raise ContentDecodingError(e)
+ except ReadTimeoutError as e:
+ raise ConnectionError(e)
+ except SSLError as e:
+ raise RequestsSSLError(e)
+ else:
+ # Standard file-like object.
+ while True:
+ chunk = self.raw.read(chunk_size)
+ if not chunk:
+ break
+ yield chunk
+
+ self._content_consumed = True
+
+ if self._content_consumed and isinstance(self._content, bool):
+ raise StreamConsumedError()
+ elif chunk_size is not None and not isinstance(chunk_size, int):
+ raise TypeError(
+ f"chunk_size must be an int, it is instead a {type(chunk_size)}."
+ )
+ # simulate reading small chunks of the content
+ reused_chunks = iter_slices(self._content, chunk_size)
+
+ stream_chunks = generate()
+
+ chunks = reused_chunks if self._content_consumed else stream_chunks
+
+ if decode_unicode:
+ chunks = stream_decode_response_unicode(chunks, self)
+
+ return chunks
+
+ def iter_lines(
+ self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None
+ ):
+ """Iterates over the response data, one line at a time. When
+ stream=True is set on the request, this avoids reading the
+ content at once into memory for large responses.
+
+ .. note:: This method is not reentrant safe.
+ """
+
+ pending = None
+
+ for chunk in self.iter_content(
+ chunk_size=chunk_size, decode_unicode=decode_unicode
+ ):
+
+ if pending is not None:
+ chunk = pending + chunk
+
+ if delimiter:
+ lines = chunk.split(delimiter)
+ else:
+ lines = chunk.splitlines()
+
+ if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
+ pending = lines.pop()
+ else:
+ pending = None
+
+ yield from lines
+
+ if pending is not None:
+ yield pending
+
+ @property
+ def content(self):
+ """Content of the response, in bytes."""
+
+ if self._content is False:
+ # Read the contents.
+ if self._content_consumed:
+ raise RuntimeError("The content for this response was already consumed")
+
+ if self.status_code == 0 or self.raw is None:
+ self._content = None
+ else:
+ self._content = b"".join(self.iter_content(CONTENT_CHUNK_SIZE)) or b""
+
+ self._content_consumed = True
+ # don't need to release the connection; that's been handled by urllib3
+ # since we exhausted the data.
+ return self._content
+
+ @property
+ def text(self):
+ """Content of the response, in unicode.
+
+ If Response.encoding is None, encoding will be guessed using
+ ``charset_normalizer`` or ``chardet``.
+
+ The encoding of the response content is determined based solely on HTTP
+ headers, following RFC 2616 to the letter. If you can take advantage of
+ non-HTTP knowledge to make a better guess at the encoding, you should
+ set ``r.encoding`` appropriately before accessing this property.
+ """
+
+ # Try charset from content-type
+ content = None
+ encoding = self.encoding
+
+ if not self.content:
+ return ""
+
+ # Fallback to auto-detected encoding.
+ if self.encoding is None:
+ encoding = self.apparent_encoding
+
+ # Decode unicode from given encoding.
+ try:
+ content = str(self.content, encoding, errors="replace")
+ except (LookupError, TypeError):
+ # A LookupError is raised if the encoding was not found which could
+ # indicate a misspelling or similar mistake.
+ #
+ # A TypeError can be raised if encoding is None
+ #
+ # So we try blindly encoding.
+ content = str(self.content, errors="replace")
+
+ return content
+
+ def json(self, **kwargs):
+ r"""Returns the json-encoded content of a response, if any.
+
+ :param \*\*kwargs: Optional arguments that ``json.loads`` takes.
+ :raises requests.exceptions.JSONDecodeError: If the response body does not
+ contain valid json.
+ """
+
+ if not self.encoding and self.content and len(self.content) > 3:
+ # No encoding set. JSON RFC 4627 section 3 states we should expect
+ # UTF-8, -16 or -32. Detect which one to use; If the detection or
+ # decoding fails, fall back to `self.text` (using charset_normalizer to make
+ # a best guess).
+ encoding = guess_json_utf(self.content)
+ if encoding is not None:
+ try:
+ return complexjson.loads(self.content.decode(encoding), **kwargs)
+ except UnicodeDecodeError:
+ # Wrong UTF codec detected; usually because it's not UTF-8
+ # but some other 8-bit codec. This is an RFC violation,
+ # and the server didn't bother to tell us what codec *was*
+ # used.
+ pass
+ except JSONDecodeError as e:
+ raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)
+
+ try:
+ return complexjson.loads(self.text, **kwargs)
+ except JSONDecodeError as e:
+ # Catch JSON-related errors and raise as requests.JSONDecodeError
+ # This aliases json.JSONDecodeError and simplejson.JSONDecodeError
+ raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)
+
+ @property
+ def links(self):
+ """Returns the parsed header links of the response, if any."""
+
+ header = self.headers.get("link")
+
+ resolved_links = {}
+
+ if header:
+ links = parse_header_links(header)
+
+ for link in links:
+ key = link.get("rel") or link.get("url")
+ resolved_links[key] = link
+
+ return resolved_links
+
+ def raise_for_status(self):
+ """Raises :class:`HTTPError`, if one occurred."""
+
+ http_error_msg = ""
+ if isinstance(self.reason, bytes):
+ # We attempt to decode utf-8 first because some servers
+ # choose to localize their reason strings. If the string
+ # isn't utf-8, we fall back to iso-8859-1 for all other
+ # encodings. (See PR #3538)
+ try:
+ reason = self.reason.decode("utf-8")
+ except UnicodeDecodeError:
+ reason = self.reason.decode("iso-8859-1")
+ else:
+ reason = self.reason
+
+ if 400 <= self.status_code < 500:
+ http_error_msg = (
+ f"{self.status_code} Client Error: {reason} for url: {self.url}"
+ )
+
+ elif 500 <= self.status_code < 600:
+ http_error_msg = (
+ f"{self.status_code} Server Error: {reason} for url: {self.url}"
+ )
+
+ if http_error_msg:
+ raise HTTPError(http_error_msg, response=self)
+
+ def close(self):
+ """Releases the connection back to the pool. Once this method has been
+ called the underlying ``raw`` object must not be accessed again.
+
+ *Note: Should not normally need to be called explicitly.*
+ """
+ if not self._content_consumed:
+ self.raw.close()
+
+ release_conn = getattr(self.raw, "release_conn", None)
+ if release_conn is not None:
+ release_conn()
diff --git a/third_party/python/pip/pip/_vendor/requests/packages.py b/third_party/python/pip/pip/_vendor/requests/packages.py
new file mode 100644
index 0000000000..9582fa730f
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/requests/packages.py
@@ -0,0 +1,16 @@
+import sys
+
+# This code exists for backwards compatibility reasons.
+# I don't like it either. Just look the other way. :)
+
+for package in ('urllib3', 'idna', 'chardet'):
+ vendored_package = "pip._vendor." + package
+ locals()[package] = __import__(vendored_package)
+ # This traversal is apparently necessary such that the identities are
+ # preserved (requests.packages.urllib3.* is urllib3.*)
+ for mod in list(sys.modules):
+ if mod == vendored_package or mod.startswith(vendored_package + '.'):
+ unprefixed_mod = mod[len("pip._vendor."):]
+ sys.modules['pip._vendor.requests.packages.' + unprefixed_mod] = sys.modules[mod]
+
+# Kinda cool, though, right?
diff --git a/third_party/python/pip/pip/_vendor/requests/sessions.py b/third_party/python/pip/pip/_vendor/requests/sessions.py
new file mode 100644
index 0000000000..6cb3b4dae3
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/requests/sessions.py
@@ -0,0 +1,831 @@
+"""
+requests.sessions
+~~~~~~~~~~~~~~~~~
+
+This module provides a Session object to manage and persist settings across
+requests (cookies, auth, proxies).
+"""
+import os
+import sys
+import time
+from collections import OrderedDict
+from datetime import timedelta
+
+from ._internal_utils import to_native_string
+from .adapters import HTTPAdapter
+from .auth import _basic_auth_str
+from .compat import Mapping, cookielib, urljoin, urlparse
+from .cookies import (
+ RequestsCookieJar,
+ cookiejar_from_dict,
+ extract_cookies_to_jar,
+ merge_cookies,
+)
+from .exceptions import (
+ ChunkedEncodingError,
+ ContentDecodingError,
+ InvalidSchema,
+ TooManyRedirects,
+)
+from .hooks import default_hooks, dispatch_hook
+
+# formerly defined here, reexposed here for backward compatibility
+from .models import ( # noqa: F401
+ DEFAULT_REDIRECT_LIMIT,
+ REDIRECT_STATI,
+ PreparedRequest,
+ Request,
+)
+from .status_codes import codes
+from .structures import CaseInsensitiveDict
+from .utils import ( # noqa: F401
+ DEFAULT_PORTS,
+ default_headers,
+ get_auth_from_url,
+ get_environ_proxies,
+ get_netrc_auth,
+ requote_uri,
+ resolve_proxies,
+ rewind_body,
+ should_bypass_proxies,
+ to_key_val_list,
+)
+
+# Preferred clock, based on which one is more accurate on a given system.
+if sys.platform == "win32":
+ preferred_clock = time.perf_counter
+else:
+ preferred_clock = time.time
+
+
+def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
+ """Determines appropriate setting for a given request, taking into account
+ the explicit setting on that request, and the setting in the session. If a
+ setting is a dictionary, they will be merged together using `dict_class`
+ """
+
+ if session_setting is None:
+ return request_setting
+
+ if request_setting is None:
+ return session_setting
+
+ # Bypass if not a dictionary (e.g. verify)
+ if not (
+ isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping)
+ ):
+ return request_setting
+
+ merged_setting = dict_class(to_key_val_list(session_setting))
+ merged_setting.update(to_key_val_list(request_setting))
+
+ # Remove keys that are set to None. Extract keys first to avoid altering
+ # the dictionary during iteration.
+ none_keys = [k for (k, v) in merged_setting.items() if v is None]
+ for key in none_keys:
+ del merged_setting[key]
+
+ return merged_setting
+
+
+def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
+ """Properly merges both requests and session hooks.
+
+ This is necessary because when request_hooks == {'response': []}, the
+ merge breaks Session hooks entirely.
+ """
+ if session_hooks is None or session_hooks.get("response") == []:
+ return request_hooks
+
+ if request_hooks is None or request_hooks.get("response") == []:
+ return session_hooks
+
+ return merge_setting(request_hooks, session_hooks, dict_class)
+
+
+class SessionRedirectMixin:
+ def get_redirect_target(self, resp):
+ """Receives a Response. Returns a redirect URI or ``None``"""
+ # Due to the nature of how requests processes redirects this method will
+ # be called at least once upon the original response and at least twice
+ # on each subsequent redirect response (if any).
+ # If a custom mixin is used to handle this logic, it may be advantageous
+ # to cache the redirect location onto the response object as a private
+ # attribute.
+ if resp.is_redirect:
+ location = resp.headers["location"]
+ # Currently the underlying http module on py3 decode headers
+ # in latin1, but empirical evidence suggests that latin1 is very
+ # rarely used with non-ASCII characters in HTTP headers.
+ # It is more likely to get UTF8 header rather than latin1.
+ # This causes incorrect handling of UTF8 encoded location headers.
+ # To solve this, we re-encode the location in latin1.
+ location = location.encode("latin1")
+ return to_native_string(location, "utf8")
+ return None
+
+ def should_strip_auth(self, old_url, new_url):
+ """Decide whether Authorization header should be removed when redirecting"""
+ old_parsed = urlparse(old_url)
+ new_parsed = urlparse(new_url)
+ if old_parsed.hostname != new_parsed.hostname:
+ return True
+ # Special case: allow http -> https redirect when using the standard
+ # ports. This isn't specified by RFC 7235, but is kept to avoid
+ # breaking backwards compatibility with older versions of requests
+ # that allowed any redirects on the same host.
+ if (
+ old_parsed.scheme == "http"
+ and old_parsed.port in (80, None)
+ and new_parsed.scheme == "https"
+ and new_parsed.port in (443, None)
+ ):
+ return False
+
+ # Handle default port usage corresponding to scheme.
+ changed_port = old_parsed.port != new_parsed.port
+ changed_scheme = old_parsed.scheme != new_parsed.scheme
+ default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None)
+ if (
+ not changed_scheme
+ and old_parsed.port in default_port
+ and new_parsed.port in default_port
+ ):
+ return False
+
+ # Standard case: root URI must match
+ return changed_port or changed_scheme
+
+ def resolve_redirects(
+ self,
+ resp,
+ req,
+ stream=False,
+ timeout=None,
+ verify=True,
+ cert=None,
+ proxies=None,
+ yield_requests=False,
+ **adapter_kwargs,
+ ):
+ """Receives a Response. Returns a generator of Responses or Requests."""
+
+ hist = [] # keep track of history
+
+ url = self.get_redirect_target(resp)
+ previous_fragment = urlparse(req.url).fragment
+ while url:
+ prepared_request = req.copy()
+
+ # Update history and keep track of redirects.
+ # resp.history must ignore the original request in this loop
+ hist.append(resp)
+ resp.history = hist[1:]
+
+ try:
+ resp.content # Consume socket so it can be released
+ except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
+ resp.raw.read(decode_content=False)
+
+ if len(resp.history) >= self.max_redirects:
+ raise TooManyRedirects(
+ f"Exceeded {self.max_redirects} redirects.", response=resp
+ )
+
+ # Release the connection back into the pool.
+ resp.close()
+
+ # Handle redirection without scheme (see: RFC 1808 Section 4)
+ if url.startswith("//"):
+ parsed_rurl = urlparse(resp.url)
+ url = ":".join([to_native_string(parsed_rurl.scheme), url])
+
+ # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2)
+ parsed = urlparse(url)
+ if parsed.fragment == "" and previous_fragment:
+ parsed = parsed._replace(fragment=previous_fragment)
+ elif parsed.fragment:
+ previous_fragment = parsed.fragment
+ url = parsed.geturl()
+
+ # Facilitate relative 'location' headers, as allowed by RFC 7231.
+ # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
+ # Compliant with RFC3986, we percent encode the url.
+ if not parsed.netloc:
+ url = urljoin(resp.url, requote_uri(url))
+ else:
+ url = requote_uri(url)
+
+ prepared_request.url = to_native_string(url)
+
+ self.rebuild_method(prepared_request, resp)
+
+ # https://github.com/psf/requests/issues/1084
+ if resp.status_code not in (
+ codes.temporary_redirect,
+ codes.permanent_redirect,
+ ):
+ # https://github.com/psf/requests/issues/3490
+ purged_headers = ("Content-Length", "Content-Type", "Transfer-Encoding")
+ for header in purged_headers:
+ prepared_request.headers.pop(header, None)
+ prepared_request.body = None
+
+ headers = prepared_request.headers
+ headers.pop("Cookie", None)
+
+ # Extract any cookies sent on the response to the cookiejar
+ # in the new request. Because we've mutated our copied prepared
+ # request, use the old one that we haven't yet touched.
+ extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
+ merge_cookies(prepared_request._cookies, self.cookies)
+ prepared_request.prepare_cookies(prepared_request._cookies)
+
+ # Rebuild auth and proxy information.
+ proxies = self.rebuild_proxies(prepared_request, proxies)
+ self.rebuild_auth(prepared_request, resp)
+
+ # A failed tell() sets `_body_position` to `object()`. This non-None
+ # value ensures `rewindable` will be True, allowing us to raise an
+ # UnrewindableBodyError, instead of hanging the connection.
+ rewindable = prepared_request._body_position is not None and (
+ "Content-Length" in headers or "Transfer-Encoding" in headers
+ )
+
+ # Attempt to rewind consumed file-like object.
+ if rewindable:
+ rewind_body(prepared_request)
+
+ # Override the original request.
+ req = prepared_request
+
+ if yield_requests:
+ yield req
+ else:
+
+ resp = self.send(
+ req,
+ stream=stream,
+ timeout=timeout,
+ verify=verify,
+ cert=cert,
+ proxies=proxies,
+ allow_redirects=False,
+ **adapter_kwargs,
+ )
+
+ extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
+
+ # extract redirect url, if any, for the next loop
+ url = self.get_redirect_target(resp)
+ yield resp
+
+ def rebuild_auth(self, prepared_request, response):
+ """When being redirected we may want to strip authentication from the
+ request to avoid leaking credentials. This method intelligently removes
+ and reapplies authentication where possible to avoid credential loss.
+ """
+ headers = prepared_request.headers
+ url = prepared_request.url
+
+ if "Authorization" in headers and self.should_strip_auth(
+ response.request.url, url
+ ):
+ # If we get redirected to a new host, we should strip out any
+ # authentication headers.
+ del headers["Authorization"]
+
+ # .netrc might have more auth for us on our new host.
+ new_auth = get_netrc_auth(url) if self.trust_env else None
+ if new_auth is not None:
+ prepared_request.prepare_auth(new_auth)
+
+ def rebuild_proxies(self, prepared_request, proxies):
+ """This method re-evaluates the proxy configuration by considering the
+ environment variables. If we are redirected to a URL covered by
+ NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
+ proxy keys for this URL (in case they were stripped by a previous
+ redirect).
+
+ This method also replaces the Proxy-Authorization header where
+ necessary.
+
+ :rtype: dict
+ """
+ headers = prepared_request.headers
+ scheme = urlparse(prepared_request.url).scheme
+ new_proxies = resolve_proxies(prepared_request, proxies, self.trust_env)
+
+ if "Proxy-Authorization" in headers:
+ del headers["Proxy-Authorization"]
+
+ try:
+ username, password = get_auth_from_url(new_proxies[scheme])
+ except KeyError:
+ username, password = None, None
+
+ if username and password:
+ headers["Proxy-Authorization"] = _basic_auth_str(username, password)
+
+ return new_proxies
+
+ def rebuild_method(self, prepared_request, response):
+ """When being redirected we may want to change the method of the request
+ based on certain specs or browser behavior.
+ """
+ method = prepared_request.method
+
+ # https://tools.ietf.org/html/rfc7231#section-6.4.4
+ if response.status_code == codes.see_other and method != "HEAD":
+ method = "GET"
+
+ # Do what the browsers do, despite standards...
+ # First, turn 302s into GETs.
+ if response.status_code == codes.found and method != "HEAD":
+ method = "GET"
+
+ # Second, if a POST is responded to with a 301, turn it into a GET.
+ # This bizarre behaviour is explained in Issue 1704.
+ if response.status_code == codes.moved and method == "POST":
+ method = "GET"
+
+ prepared_request.method = method
+
+
+class Session(SessionRedirectMixin):
+ """A Requests session.
+
+ Provides cookie persistence, connection-pooling, and configuration.
+
+ Basic Usage::
+
+ >>> import requests
+ >>> s = requests.Session()
+ >>> s.get('https://httpbin.org/get')
+ <Response [200]>
+
+ Or as a context manager::
+
+ >>> with requests.Session() as s:
+ ... s.get('https://httpbin.org/get')
+ <Response [200]>
+ """
+
+ __attrs__ = [
+ "headers",
+ "cookies",
+ "auth",
+ "proxies",
+ "hooks",
+ "params",
+ "verify",
+ "cert",
+ "adapters",
+ "stream",
+ "trust_env",
+ "max_redirects",
+ ]
+
+ def __init__(self):
+
+ #: A case-insensitive dictionary of headers to be sent on each
+ #: :class:`Request <Request>` sent from this
+ #: :class:`Session <Session>`.
+ self.headers = default_headers()
+
+ #: Default Authentication tuple or object to attach to
+ #: :class:`Request <Request>`.
+ self.auth = None
+
+ #: Dictionary mapping protocol or protocol and host to the URL of the proxy
+ #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
+ #: be used on each :class:`Request <Request>`.
+ self.proxies = {}
+
+ #: Event-handling hooks.
+ self.hooks = default_hooks()
+
+ #: Dictionary of querystring data to attach to each
+ #: :class:`Request <Request>`. The dictionary values may be lists for
+ #: representing multivalued query parameters.
+ self.params = {}
+
+ #: Stream response content default.
+ self.stream = False
+
+ #: SSL Verification default.
+ #: Defaults to `True`, requiring requests to verify the TLS certificate at the
+ #: remote end.
+ #: If verify is set to `False`, requests will accept any TLS certificate
+ #: presented by the server, and will ignore hostname mismatches and/or
+ #: expired certificates, which will make your application vulnerable to
+ #: man-in-the-middle (MitM) attacks.
+ #: Only set this to `False` for testing.
+ self.verify = True
+
+ #: SSL client certificate default, if String, path to ssl client
+ #: cert file (.pem). If Tuple, ('cert', 'key') pair.
+ self.cert = None
+
+ #: Maximum number of redirects allowed. If the request exceeds this
+ #: limit, a :class:`TooManyRedirects` exception is raised.
+ #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is
+ #: 30.
+ self.max_redirects = DEFAULT_REDIRECT_LIMIT
+
+ #: Trust environment settings for proxy configuration, default
+ #: authentication and similar.
+ self.trust_env = True
+
+ #: A CookieJar containing all currently outstanding cookies set on this
+ #: session. By default it is a
+ #: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
+ #: may be any other ``cookielib.CookieJar`` compatible object.
+ self.cookies = cookiejar_from_dict({})
+
+ # Default connection adapters.
+ self.adapters = OrderedDict()
+ self.mount("https://", HTTPAdapter())
+ self.mount("http://", HTTPAdapter())
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.close()
+
+ def prepare_request(self, request):
+ """Constructs a :class:`PreparedRequest <PreparedRequest>` for
+ transmission and returns it. The :class:`PreparedRequest` has settings
+ merged from the :class:`Request <Request>` instance and those of the
+ :class:`Session`.
+
+ :param request: :class:`Request` instance to prepare with this
+ session's settings.
+ :rtype: requests.PreparedRequest
+ """
+ cookies = request.cookies or {}
+
+ # Bootstrap CookieJar.
+ if not isinstance(cookies, cookielib.CookieJar):
+ cookies = cookiejar_from_dict(cookies)
+
+ # Merge with session cookies
+ merged_cookies = merge_cookies(
+ merge_cookies(RequestsCookieJar(), self.cookies), cookies
+ )
+
+ # Set environment's basic authentication if not explicitly set.
+ auth = request.auth
+ if self.trust_env and not auth and not self.auth:
+ auth = get_netrc_auth(request.url)
+
+ p = PreparedRequest()
+ p.prepare(
+ method=request.method.upper(),
+ url=request.url,
+ files=request.files,
+ data=request.data,
+ json=request.json,
+ headers=merge_setting(
+ request.headers, self.headers, dict_class=CaseInsensitiveDict
+ ),
+ params=merge_setting(request.params, self.params),
+ auth=merge_setting(auth, self.auth),
+ cookies=merged_cookies,
+ hooks=merge_hooks(request.hooks, self.hooks),
+ )
+ return p
+
+ def request(
+ self,
+ method,
+ url,
+ params=None,
+ data=None,
+ headers=None,
+ cookies=None,
+ files=None,
+ auth=None,
+ timeout=None,
+ allow_redirects=True,
+ proxies=None,
+ hooks=None,
+ stream=None,
+ verify=None,
+ cert=None,
+ json=None,
+ ):
+ """Constructs a :class:`Request <Request>`, prepares it and sends it.
+ Returns :class:`Response <Response>` object.
+
+ :param method: method for the new :class:`Request` object.
+ :param url: URL for the new :class:`Request` object.
+ :param params: (optional) Dictionary or bytes to be sent in the query
+ string for the :class:`Request`.
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+ object to send in the body of the :class:`Request`.
+ :param json: (optional) json to send in the body of the
+ :class:`Request`.
+ :param headers: (optional) Dictionary of HTTP Headers to send with the
+ :class:`Request`.
+ :param cookies: (optional) Dict or CookieJar object to send with the
+ :class:`Request`.
+ :param files: (optional) Dictionary of ``'filename': file-like-objects``
+ for multipart encoding upload.
+ :param auth: (optional) Auth tuple or callable to enable
+ Basic/Digest/Custom HTTP Auth.
+ :param timeout: (optional) How long to wait for the server to send
+ data before giving up, as a float, or a :ref:`(connect timeout,
+ read timeout) <timeouts>` tuple.
+ :type timeout: float or tuple
+ :param allow_redirects: (optional) Set to True by default.
+ :type allow_redirects: bool
+ :param proxies: (optional) Dictionary mapping protocol or protocol and
+ hostname to the URL of the proxy.
+ :param stream: (optional) whether to immediately download the response
+ content. Defaults to ``False``.
+ :param verify: (optional) Either a boolean, in which case it controls whether we verify
+ the server's TLS certificate, or a string, in which case it must be a path
+ to a CA bundle to use. Defaults to ``True``. When set to
+ ``False``, requests will accept any TLS certificate presented by
+ the server, and will ignore hostname mismatches and/or expired
+ certificates, which will make your application vulnerable to
+ man-in-the-middle (MitM) attacks. Setting verify to ``False``
+ may be useful during local development or testing.
+ :param cert: (optional) if String, path to ssl client cert file (.pem).
+ If Tuple, ('cert', 'key') pair.
+ :rtype: requests.Response
+ """
+ # Create the Request.
+ req = Request(
+ method=method.upper(),
+ url=url,
+ headers=headers,
+ files=files,
+ data=data or {},
+ json=json,
+ params=params or {},
+ auth=auth,
+ cookies=cookies,
+ hooks=hooks,
+ )
+ prep = self.prepare_request(req)
+
+ proxies = proxies or {}
+
+ settings = self.merge_environment_settings(
+ prep.url, proxies, stream, verify, cert
+ )
+
+ # Send the request.
+ send_kwargs = {
+ "timeout": timeout,
+ "allow_redirects": allow_redirects,
+ }
+ send_kwargs.update(settings)
+ resp = self.send(prep, **send_kwargs)
+
+ return resp
+
+ def get(self, url, **kwargs):
+ r"""Sends a GET request. Returns :class:`Response` object.
+
+ :param url: URL for the new :class:`Request` object.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :rtype: requests.Response
+ """
+
+ kwargs.setdefault("allow_redirects", True)
+ return self.request("GET", url, **kwargs)
+
+ def options(self, url, **kwargs):
+ r"""Sends a OPTIONS request. Returns :class:`Response` object.
+
+ :param url: URL for the new :class:`Request` object.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :rtype: requests.Response
+ """
+
+ kwargs.setdefault("allow_redirects", True)
+ return self.request("OPTIONS", url, **kwargs)
+
+ def head(self, url, **kwargs):
+ r"""Sends a HEAD request. Returns :class:`Response` object.
+
+ :param url: URL for the new :class:`Request` object.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :rtype: requests.Response
+ """
+
+ kwargs.setdefault("allow_redirects", False)
+ return self.request("HEAD", url, **kwargs)
+
+ def post(self, url, data=None, json=None, **kwargs):
+ r"""Sends a POST request. Returns :class:`Response` object.
+
+ :param url: URL for the new :class:`Request` object.
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+ object to send in the body of the :class:`Request`.
+ :param json: (optional) json to send in the body of the :class:`Request`.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :rtype: requests.Response
+ """
+
+ return self.request("POST", url, data=data, json=json, **kwargs)
+
+ def put(self, url, data=None, **kwargs):
+ r"""Sends a PUT request. Returns :class:`Response` object.
+
+ :param url: URL for the new :class:`Request` object.
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+ object to send in the body of the :class:`Request`.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :rtype: requests.Response
+ """
+
+ return self.request("PUT", url, data=data, **kwargs)
+
+ def patch(self, url, data=None, **kwargs):
+ r"""Sends a PATCH request. Returns :class:`Response` object.
+
+ :param url: URL for the new :class:`Request` object.
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+ object to send in the body of the :class:`Request`.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :rtype: requests.Response
+ """
+
+ return self.request("PATCH", url, data=data, **kwargs)
+
+ def delete(self, url, **kwargs):
+ r"""Sends a DELETE request. Returns :class:`Response` object.
+
+ :param url: URL for the new :class:`Request` object.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :rtype: requests.Response
+ """
+
+ return self.request("DELETE", url, **kwargs)
+
+ def send(self, request, **kwargs):
+ """Send a given PreparedRequest.
+
+ :rtype: requests.Response
+ """
+ # Set defaults that the hooks can utilize to ensure they always have
+ # the correct parameters to reproduce the previous request.
+ kwargs.setdefault("stream", self.stream)
+ kwargs.setdefault("verify", self.verify)
+ kwargs.setdefault("cert", self.cert)
+ if "proxies" not in kwargs:
+ kwargs["proxies"] = resolve_proxies(request, self.proxies, self.trust_env)
+
+ # It's possible that users might accidentally send a Request object.
+ # Guard against that specific failure case.
+ if isinstance(request, Request):
+ raise ValueError("You can only send PreparedRequests.")
+
+ # Set up variables needed for resolve_redirects and dispatching of hooks
+ allow_redirects = kwargs.pop("allow_redirects", True)
+ stream = kwargs.get("stream")
+ hooks = request.hooks
+
+ # Get the appropriate adapter to use
+ adapter = self.get_adapter(url=request.url)
+
+ # Start time (approximately) of the request
+ start = preferred_clock()
+
+ # Send the request
+ r = adapter.send(request, **kwargs)
+
+ # Total elapsed time of the request (approximately)
+ elapsed = preferred_clock() - start
+ r.elapsed = timedelta(seconds=elapsed)
+
+ # Response manipulation hooks
+ r = dispatch_hook("response", hooks, r, **kwargs)
+
+ # Persist cookies
+ if r.history:
+
+ # If the hooks create history then we want those cookies too
+ for resp in r.history:
+ extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
+
+ extract_cookies_to_jar(self.cookies, request, r.raw)
+
+ # Resolve redirects if allowed.
+ if allow_redirects:
+ # Redirect resolving generator.
+ gen = self.resolve_redirects(r, request, **kwargs)
+ history = [resp for resp in gen]
+ else:
+ history = []
+
+ # Shuffle things around if there's history.
+ if history:
+ # Insert the first (original) request at the start
+ history.insert(0, r)
+ # Get the last request made
+ r = history.pop()
+ r.history = history
+
+ # If redirects aren't being followed, store the response on the Request for Response.next().
+ if not allow_redirects:
+ try:
+ r._next = next(
+ self.resolve_redirects(r, request, yield_requests=True, **kwargs)
+ )
+ except StopIteration:
+ pass
+
+ if not stream:
+ r.content
+
+ return r
+
+ def merge_environment_settings(self, url, proxies, stream, verify, cert):
+ """
+ Check the environment and merge it with some settings.
+
+ :rtype: dict
+ """
+ # Gather clues from the surrounding environment.
+ if self.trust_env:
+ # Set environment's proxies.
+ no_proxy = proxies.get("no_proxy") if proxies is not None else None
+ env_proxies = get_environ_proxies(url, no_proxy=no_proxy)
+ for (k, v) in env_proxies.items():
+ proxies.setdefault(k, v)
+
+ # Look for requests environment configuration
+ # and be compatible with cURL.
+ if verify is True or verify is None:
+ verify = (
+ os.environ.get("REQUESTS_CA_BUNDLE")
+ or os.environ.get("CURL_CA_BUNDLE")
+ or verify
+ )
+
+ # Merge all the kwargs.
+ proxies = merge_setting(proxies, self.proxies)
+ stream = merge_setting(stream, self.stream)
+ verify = merge_setting(verify, self.verify)
+ cert = merge_setting(cert, self.cert)
+
+ return {"proxies": proxies, "stream": stream, "verify": verify, "cert": cert}
+
+ def get_adapter(self, url):
+ """
+ Returns the appropriate connection adapter for the given URL.
+
+ :rtype: requests.adapters.BaseAdapter
+ """
+ for (prefix, adapter) in self.adapters.items():
+
+ if url.lower().startswith(prefix.lower()):
+ return adapter
+
+ # Nothing matches :-/
+ raise InvalidSchema(f"No connection adapters were found for {url!r}")
+
+ def close(self):
+ """Closes all adapters and as such the session"""
+ for v in self.adapters.values():
+ v.close()
+
+ def mount(self, prefix, adapter):
+ """Registers a connection adapter to a prefix.
+
+ Adapters are sorted in descending order by prefix length.
+ """
+ self.adapters[prefix] = adapter
+ keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
+
+ for key in keys_to_move:
+ self.adapters[key] = self.adapters.pop(key)
+
+ def __getstate__(self):
+ state = {attr: getattr(self, attr, None) for attr in self.__attrs__}
+ return state
+
+ def __setstate__(self, state):
+ for attr, value in state.items():
+ setattr(self, attr, value)
+
+
+def session():
+ """
+ Returns a :class:`Session` for context-management.
+
+ .. deprecated:: 1.0.0
+
+ This method has been deprecated since version 1.0.0 and is only kept for
+ backwards compatibility. New code should use :class:`~requests.sessions.Session`
+ to create a session. This may be removed at a future date.
+
+ :rtype: Session
+ """
+ return Session()
diff --git a/third_party/python/pip/pip/_vendor/requests/status_codes.py b/third_party/python/pip/pip/_vendor/requests/status_codes.py
new file mode 100644
index 0000000000..4bd072be97
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/requests/status_codes.py
@@ -0,0 +1,128 @@
+r"""
+The ``codes`` object defines a mapping from common names for HTTP statuses
+to their numerical codes, accessible either as attributes or as dictionary
+items.
+
+Example::
+
+ >>> import requests
+ >>> requests.codes['temporary_redirect']
+ 307
+ >>> requests.codes.teapot
+ 418
+ >>> requests.codes['\o/']
+ 200
+
+Some codes have multiple names, and both upper- and lower-case versions of
+the names are allowed. For example, ``codes.ok``, ``codes.OK``, and
+``codes.okay`` all correspond to the HTTP status code 200.
+"""
+
+from .structures import LookupDict
+
+_codes = {
+ # Informational.
+ 100: ("continue",),
+ 101: ("switching_protocols",),
+ 102: ("processing",),
+ 103: ("checkpoint",),
+ 122: ("uri_too_long", "request_uri_too_long"),
+ 200: ("ok", "okay", "all_ok", "all_okay", "all_good", "\\o/", "✓"),
+ 201: ("created",),
+ 202: ("accepted",),
+ 203: ("non_authoritative_info", "non_authoritative_information"),
+ 204: ("no_content",),
+ 205: ("reset_content", "reset"),
+ 206: ("partial_content", "partial"),
+ 207: ("multi_status", "multiple_status", "multi_stati", "multiple_stati"),
+ 208: ("already_reported",),
+ 226: ("im_used",),
+ # Redirection.
+ 300: ("multiple_choices",),
+ 301: ("moved_permanently", "moved", "\\o-"),
+ 302: ("found",),
+ 303: ("see_other", "other"),
+ 304: ("not_modified",),
+ 305: ("use_proxy",),
+ 306: ("switch_proxy",),
+ 307: ("temporary_redirect", "temporary_moved", "temporary"),
+ 308: (
+ "permanent_redirect",
+ "resume_incomplete",
+ "resume",
+ ), # "resume" and "resume_incomplete" to be removed in 3.0
+ # Client Error.
+ 400: ("bad_request", "bad"),
+ 401: ("unauthorized",),
+ 402: ("payment_required", "payment"),
+ 403: ("forbidden",),
+ 404: ("not_found", "-o-"),
+ 405: ("method_not_allowed", "not_allowed"),
+ 406: ("not_acceptable",),
+ 407: ("proxy_authentication_required", "proxy_auth", "proxy_authentication"),
+ 408: ("request_timeout", "timeout"),
+ 409: ("conflict",),
+ 410: ("gone",),
+ 411: ("length_required",),
+ 412: ("precondition_failed", "precondition"),
+ 413: ("request_entity_too_large",),
+ 414: ("request_uri_too_large",),
+ 415: ("unsupported_media_type", "unsupported_media", "media_type"),
+ 416: (
+ "requested_range_not_satisfiable",
+ "requested_range",
+ "range_not_satisfiable",
+ ),
+ 417: ("expectation_failed",),
+ 418: ("im_a_teapot", "teapot", "i_am_a_teapot"),
+ 421: ("misdirected_request",),
+ 422: ("unprocessable_entity", "unprocessable"),
+ 423: ("locked",),
+ 424: ("failed_dependency", "dependency"),
+ 425: ("unordered_collection", "unordered"),
+ 426: ("upgrade_required", "upgrade"),
+ 428: ("precondition_required", "precondition"),
+ 429: ("too_many_requests", "too_many"),
+ 431: ("header_fields_too_large", "fields_too_large"),
+ 444: ("no_response", "none"),
+ 449: ("retry_with", "retry"),
+ 450: ("blocked_by_windows_parental_controls", "parental_controls"),
+ 451: ("unavailable_for_legal_reasons", "legal_reasons"),
+ 499: ("client_closed_request",),
+ # Server Error.
+ 500: ("internal_server_error", "server_error", "/o\\", "✗"),
+ 501: ("not_implemented",),
+ 502: ("bad_gateway",),
+ 503: ("service_unavailable", "unavailable"),
+ 504: ("gateway_timeout",),
+ 505: ("http_version_not_supported", "http_version"),
+ 506: ("variant_also_negotiates",),
+ 507: ("insufficient_storage",),
+ 509: ("bandwidth_limit_exceeded", "bandwidth"),
+ 510: ("not_extended",),
+ 511: ("network_authentication_required", "network_auth", "network_authentication"),
+}
+
+codes = LookupDict(name="status_codes")
+
+
+def _init():
+ for code, titles in _codes.items():
+ for title in titles:
+ setattr(codes, title, code)
+ if not title.startswith(("\\", "/")):
+ setattr(codes, title.upper(), code)
+
+ def doc(code):
+ names = ", ".join(f"``{n}``" for n in _codes[code])
+ return "* %d: %s" % (code, names)
+
+ global __doc__
+ __doc__ = (
+ __doc__ + "\n" + "\n".join(doc(code) for code in sorted(_codes))
+ if __doc__ is not None
+ else None
+ )
+
+
+_init()
diff --git a/third_party/python/pip/pip/_vendor/requests/structures.py b/third_party/python/pip/pip/_vendor/requests/structures.py
new file mode 100644
index 0000000000..188e13e482
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/requests/structures.py
@@ -0,0 +1,99 @@
+"""
+requests.structures
+~~~~~~~~~~~~~~~~~~~
+
+Data structures that power Requests.
+"""
+
+from collections import OrderedDict
+
+from .compat import Mapping, MutableMapping
+
+
+class CaseInsensitiveDict(MutableMapping):
+ """A case-insensitive ``dict``-like object.
+
+ Implements all methods and operations of
+ ``MutableMapping`` as well as dict's ``copy``. Also
+ provides ``lower_items``.
+
+ All keys are expected to be strings. The structure remembers the
+ case of the last key to be set, and ``iter(instance)``,
+ ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
+ will contain case-sensitive keys. However, querying and contains
+ testing is case insensitive::
+
+ cid = CaseInsensitiveDict()
+ cid['Accept'] = 'application/json'
+ cid['aCCEPT'] == 'application/json' # True
+ list(cid) == ['Accept'] # True
+
+ For example, ``headers['content-encoding']`` will return the
+ value of a ``'Content-Encoding'`` response header, regardless
+ of how the header name was originally stored.
+
+ If the constructor, ``.update``, or equality comparison
+ operations are given keys that have equal ``.lower()``s, the
+ behavior is undefined.
+ """
+
+ def __init__(self, data=None, **kwargs):
+ self._store = OrderedDict()
+ if data is None:
+ data = {}
+ self.update(data, **kwargs)
+
+ def __setitem__(self, key, value):
+ # Use the lowercased key for lookups, but store the actual
+ # key alongside the value.
+ self._store[key.lower()] = (key, value)
+
+ def __getitem__(self, key):
+ return self._store[key.lower()][1]
+
+ def __delitem__(self, key):
+ del self._store[key.lower()]
+
+ def __iter__(self):
+ return (casedkey for casedkey, mappedvalue in self._store.values())
+
+ def __len__(self):
+ return len(self._store)
+
+ def lower_items(self):
+ """Like iteritems(), but with all lowercase keys."""
+ return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items())
+
+ def __eq__(self, other):
+ if isinstance(other, Mapping):
+ other = CaseInsensitiveDict(other)
+ else:
+ return NotImplemented
+ # Compare insensitively
+ return dict(self.lower_items()) == dict(other.lower_items())
+
+ # Copy is required
+ def copy(self):
+ return CaseInsensitiveDict(self._store.values())
+
+ def __repr__(self):
+ return str(dict(self.items()))
+
+
+class LookupDict(dict):
+ """Dictionary lookup object."""
+
+ def __init__(self, name=None):
+ self.name = name
+ super().__init__()
+
+ def __repr__(self):
+ return f"<lookup '{self.name}'>"
+
+ def __getitem__(self, key):
+ # We allow fall-through here, so values default to None
+
+ return self.__dict__.get(key, None)
+
+ def get(self, key, default=None):
+ return self.__dict__.get(key, default)
diff --git a/third_party/python/pip/pip/_vendor/requests/utils.py b/third_party/python/pip/pip/_vendor/requests/utils.py
new file mode 100644
index 0000000000..33f394d265
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/requests/utils.py
@@ -0,0 +1,1086 @@
+"""
+requests.utils
+~~~~~~~~~~~~~~
+
+This module provides utility functions that are used within Requests
+that are also useful for external consumption.
+"""
+
+import codecs
+import contextlib
+import io
+import os
+import re
+import socket
+import struct
+import sys
+import tempfile
+import warnings
+import zipfile
+from collections import OrderedDict
+
+from pip._vendor.urllib3.util import make_headers, parse_url
+
+from . import certs
+from .__version__ import __version__
+
+# to_native_string is unused here, but imported here for backwards compatibility
+from ._internal_utils import HEADER_VALIDATORS, to_native_string # noqa: F401
+from .compat import (
+ Mapping,
+ basestring,
+ bytes,
+ getproxies,
+ getproxies_environment,
+ integer_types,
+)
+from .compat import parse_http_list as _parse_list_header
+from .compat import (
+ proxy_bypass,
+ proxy_bypass_environment,
+ quote,
+ str,
+ unquote,
+ urlparse,
+ urlunparse,
+)
+from .cookies import cookiejar_from_dict
+from .exceptions import (
+ FileModeWarning,
+ InvalidHeader,
+ InvalidURL,
+ UnrewindableBodyError,
+)
+from .structures import CaseInsensitiveDict
+
+NETRC_FILES = (".netrc", "_netrc")
+
+DEFAULT_CA_BUNDLE_PATH = certs.where()
+
+DEFAULT_PORTS = {"http": 80, "https": 443}
+
+# Ensure that ', ' is used to preserve previous delimiter behavior.
+DEFAULT_ACCEPT_ENCODING = ", ".join(
+ re.split(r",\s*", make_headers(accept_encoding=True)["accept-encoding"])
+)
+
+
+if sys.platform == "win32":
+ # provide a proxy_bypass version on Windows without DNS lookups
+
+ def proxy_bypass_registry(host):
+ try:
+ import winreg
+ except ImportError:
+ return False
+
+ try:
+ internetSettings = winreg.OpenKey(
+ winreg.HKEY_CURRENT_USER,
+ r"Software\Microsoft\Windows\CurrentVersion\Internet Settings",
+ )
+ # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it
+ proxyEnable = int(winreg.QueryValueEx(internetSettings, "ProxyEnable")[0])
+ # ProxyOverride is almost always a string
+ proxyOverride = winreg.QueryValueEx(internetSettings, "ProxyOverride")[0]
+ except (OSError, ValueError):
+ return False
+ if not proxyEnable or not proxyOverride:
+ return False
+
+ # make a check value list from the registry entry: replace the
+ # '<local>' string by the localhost entry and the corresponding
+ # canonical entry.
+ proxyOverride = proxyOverride.split(";")
+ # now check if we match one of the registry values.
+ for test in proxyOverride:
+ if test == "<local>":
+ if "." not in host:
+ return True
+ test = test.replace(".", r"\.") # mask dots
+ test = test.replace("*", r".*") # change glob sequence
+ test = test.replace("?", r".") # change glob char
+ if re.match(test, host, re.I):
+ return True
+ return False
+
+ def proxy_bypass(host): # noqa
+ """Return True, if the host should be bypassed.
+
+ Checks proxy settings gathered from the environment, if specified,
+ or the registry.
+ """
+ if getproxies_environment():
+ return proxy_bypass_environment(host)
+ else:
+ return proxy_bypass_registry(host)
+
+
+def dict_to_sequence(d):
+ """Returns an internal sequence dictionary update."""
+
+ if hasattr(d, "items"):
+ d = d.items()
+
+ return d
+
+
+def super_len(o):
+ total_length = None
+ current_position = 0
+
+ if hasattr(o, "__len__"):
+ total_length = len(o)
+
+ elif hasattr(o, "len"):
+ total_length = o.len
+
+ elif hasattr(o, "fileno"):
+ try:
+ fileno = o.fileno()
+ except (io.UnsupportedOperation, AttributeError):
+ # AttributeError is a surprising exception, seeing as how we've just checked
+ # that `hasattr(o, 'fileno')`. It happens for objects obtained via
+ # `Tarfile.extractfile()`, per issue 5229.
+ pass
+ else:
+ total_length = os.fstat(fileno).st_size
+
+ # Having used fstat to determine the file length, we need to
+ # confirm that this file was opened up in binary mode.
+ if "b" not in o.mode:
+ warnings.warn(
+ (
+ "Requests has determined the content-length for this "
+ "request using the binary size of the file: however, the "
+ "file has been opened in text mode (i.e. without the 'b' "
+ "flag in the mode). This may lead to an incorrect "
+ "content-length. In Requests 3.0, support will be removed "
+ "for files in text mode."
+ ),
+ FileModeWarning,
+ )
+
+ if hasattr(o, "tell"):
+ try:
+ current_position = o.tell()
+ except OSError:
+ # This can happen in some weird situations, such as when the file
+ # is actually a special file descriptor like stdin. In this
+ # instance, we don't know what the length is, so set it to zero and
+ # let requests chunk it instead.
+ if total_length is not None:
+ current_position = total_length
+ else:
+ if hasattr(o, "seek") and total_length is None:
+ # StringIO and BytesIO have seek but no usable fileno
+ try:
+ # seek to end of file
+ o.seek(0, 2)
+ total_length = o.tell()
+
+ # seek back to current position to support
+ # partially read file-like objects
+ o.seek(current_position or 0)
+ except OSError:
+ total_length = 0
+
+ if total_length is None:
+ total_length = 0
+
+ return max(0, total_length - current_position)
+
+
+def get_netrc_auth(url, raise_errors=False):
+ """Returns the Requests tuple auth for a given url from netrc."""
+
+ netrc_file = os.environ.get("NETRC")
+ if netrc_file is not None:
+ netrc_locations = (netrc_file,)
+ else:
+ netrc_locations = (f"~/{f}" for f in NETRC_FILES)
+
+ try:
+ from netrc import NetrcParseError, netrc
+
+ netrc_path = None
+
+ for f in netrc_locations:
+ try:
+ loc = os.path.expanduser(f)
+ except KeyError:
+ # os.path.expanduser can fail when $HOME is undefined and
+ # getpwuid fails. See https://bugs.python.org/issue20164 &
+ # https://github.com/psf/requests/issues/1846
+ return
+
+ if os.path.exists(loc):
+ netrc_path = loc
+ break
+
+ # Abort early if there isn't one.
+ if netrc_path is None:
+ return
+
+ ri = urlparse(url)
+
+ # Strip port numbers from netloc. This weird `if...encode`` dance is
+ # used for Python 3.2, which doesn't support unicode literals.
+ splitstr = b":"
+ if isinstance(url, str):
+ splitstr = splitstr.decode("ascii")
+ host = ri.netloc.split(splitstr)[0]
+
+ try:
+ _netrc = netrc(netrc_path).authenticators(host)
+ if _netrc:
+ # Return with login / password
+ login_i = 0 if _netrc[0] else 1
+ return (_netrc[login_i], _netrc[2])
+ except (NetrcParseError, OSError):
+ # If there was a parsing error or a permissions issue reading the file,
+ # we'll just skip netrc auth unless explicitly asked to raise errors.
+ if raise_errors:
+ raise
+
+ # App Engine hackiness.
+ except (ImportError, AttributeError):
+ pass
+
+
+def guess_filename(obj):
+ """Tries to guess the filename of the given object."""
+ name = getattr(obj, "name", None)
+ if name and isinstance(name, basestring) and name[0] != "<" and name[-1] != ">":
+ return os.path.basename(name)
+
+
+def extract_zipped_paths(path):
+ """Replace nonexistent paths that look like they refer to a member of a zip
+ archive with the location of an extracted copy of the target, or else
+ just return the provided path unchanged.
+ """
+ if os.path.exists(path):
+ # this is already a valid path, no need to do anything further
+ return path
+
+ # find the first valid part of the provided path and treat that as a zip archive
+ # assume the rest of the path is the name of a member in the archive
+ archive, member = os.path.split(path)
+ while archive and not os.path.exists(archive):
+ archive, prefix = os.path.split(archive)
+ if not prefix:
+ # If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split),
+ # we _can_ end up in an infinite loop on a rare corner case affecting a small number of users
+ break
+ member = "/".join([prefix, member])
+
+ if not zipfile.is_zipfile(archive):
+ return path
+
+ zip_file = zipfile.ZipFile(archive)
+ if member not in zip_file.namelist():
+ return path
+
+ # we have a valid zip archive and a valid member of that archive
+ tmp = tempfile.gettempdir()
+ extracted_path = os.path.join(tmp, member.split("/")[-1])
+ if not os.path.exists(extracted_path):
+ # use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition
+ with atomic_open(extracted_path) as file_handler:
+ file_handler.write(zip_file.read(member))
+ return extracted_path
+
+
+@contextlib.contextmanager
+def atomic_open(filename):
+ """Write a file to the disk in an atomic fashion"""
+ tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename))
+ try:
+ with os.fdopen(tmp_descriptor, "wb") as tmp_handler:
+ yield tmp_handler
+ os.replace(tmp_name, filename)
+ except BaseException:
+ os.remove(tmp_name)
+ raise
+
+
+def from_key_val_list(value):
+ """Take an object and test to see if it can be represented as a
+ dictionary. Unless it can not be represented as such, return an
+ OrderedDict, e.g.,
+
+ ::
+
+ >>> from_key_val_list([('key', 'val')])
+ OrderedDict([('key', 'val')])
+ >>> from_key_val_list('string')
+ Traceback (most recent call last):
+ ...
+ ValueError: cannot encode objects that are not 2-tuples
+ >>> from_key_val_list({'key': 'val'})
+ OrderedDict([('key', 'val')])
+
+ :rtype: OrderedDict
+ """
+ if value is None:
+ return None
+
+ if isinstance(value, (str, bytes, bool, int)):
+ raise ValueError("cannot encode objects that are not 2-tuples")
+
+ return OrderedDict(value)
+
+
+def to_key_val_list(value):
+ """Take an object and test to see if it can be represented as a
+ dictionary. If it can be, return a list of tuples, e.g.,
+
+ ::
+
+ >>> to_key_val_list([('key', 'val')])
+ [('key', 'val')]
+ >>> to_key_val_list({'key': 'val'})
+ [('key', 'val')]
+ >>> to_key_val_list('string')
+ Traceback (most recent call last):
+ ...
+ ValueError: cannot encode objects that are not 2-tuples
+
+ :rtype: list
+ """
+ if value is None:
+ return None
+
+ if isinstance(value, (str, bytes, bool, int)):
+ raise ValueError("cannot encode objects that are not 2-tuples")
+
+ if isinstance(value, Mapping):
+ value = value.items()
+
+ return list(value)
+
+
+# From mitsuhiko/werkzeug (used with permission).
+def parse_list_header(value):
+ """Parse lists as described by RFC 2068 Section 2.
+
+ In particular, parse comma-separated lists where the elements of
+ the list may include quoted-strings. A quoted-string could
+ contain a comma. A non-quoted string could have quotes in the
+ middle. Quotes are removed automatically after parsing.
+
+ It basically works like :func:`parse_set_header` just that items
+ may appear multiple times and case sensitivity is preserved.
+
+ The return value is a standard :class:`list`:
+
+ >>> parse_list_header('token, "quoted value"')
+ ['token', 'quoted value']
+
+ To create a header from the :class:`list` again, use the
+ :func:`dump_header` function.
+
+ :param value: a string with a list header.
+ :return: :class:`list`
+ :rtype: list
+ """
+ result = []
+ for item in _parse_list_header(value):
+ if item[:1] == item[-1:] == '"':
+ item = unquote_header_value(item[1:-1])
+ result.append(item)
+ return result
+
+
+# From mitsuhiko/werkzeug (used with permission).
+def parse_dict_header(value):
+ """Parse lists of key, value pairs as described by RFC 2068 Section 2 and
+ convert them into a python dict:
+
+ >>> d = parse_dict_header('foo="is a fish", bar="as well"')
+ >>> type(d) is dict
+ True
+ >>> sorted(d.items())
+ [('bar', 'as well'), ('foo', 'is a fish')]
+
+ If there is no value for a key it will be `None`:
+
+ >>> parse_dict_header('key_without_value')
+ {'key_without_value': None}
+
+ To create a header from the :class:`dict` again, use the
+ :func:`dump_header` function.
+
+ :param value: a string with a dict header.
+ :return: :class:`dict`
+ :rtype: dict
+ """
+ result = {}
+ for item in _parse_list_header(value):
+ if "=" not in item:
+ result[item] = None
+ continue
+ name, value = item.split("=", 1)
+ if value[:1] == value[-1:] == '"':
+ value = unquote_header_value(value[1:-1])
+ result[name] = value
+ return result
+
+
+# From mitsuhiko/werkzeug (used with permission).
+def unquote_header_value(value, is_filename=False):
+ r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
+ This does not use the real unquoting but what browsers are actually
+ using for quoting.
+
+ :param value: the header value to unquote.
+ :rtype: str
+ """
+ if value and value[0] == value[-1] == '"':
+ # this is not the real unquoting, but fixing this so that the
+ # RFC is met will result in bugs with internet explorer and
+ # probably some other browsers as well. IE for example is
+ # uploading files with "C:\foo\bar.txt" as filename
+ value = value[1:-1]
+
+ # if this is a filename and the starting characters look like
+ # a UNC path, then just return the value without quotes. Using the
+ # replace sequence below on a UNC path has the effect of turning
+ # the leading double slash into a single slash and then
+ # _fix_ie_filename() doesn't work correctly. See #458.
+ if not is_filename or value[:2] != "\\\\":
+ return value.replace("\\\\", "\\").replace('\\"', '"')
+ return value
+
+
+def dict_from_cookiejar(cj):
+ """Returns a key/value dictionary from a CookieJar.
+
+ :param cj: CookieJar object to extract cookies from.
+ :rtype: dict
+ """
+
+ cookie_dict = {}
+
+ for cookie in cj:
+ cookie_dict[cookie.name] = cookie.value
+
+ return cookie_dict
+
+
+def add_dict_to_cookiejar(cj, cookie_dict):
+ """Returns a CookieJar from a key/value dictionary.
+
+ :param cj: CookieJar to insert cookies into.
+ :param cookie_dict: Dict of key/values to insert into CookieJar.
+ :rtype: CookieJar
+ """
+
+ return cookiejar_from_dict(cookie_dict, cj)
+
+
+def get_encodings_from_content(content):
+ """Returns encodings from given content string.
+
+ :param content: bytestring to extract encodings from.
+ """
+ warnings.warn(
+ (
+ "In requests 3.0, get_encodings_from_content will be removed. For "
+ "more information, please see the discussion on issue #2266. (This"
+ " warning should only appear once.)"
+ ),
+ DeprecationWarning,
+ )
+
+ charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
+ pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
+ xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
+
+ return (
+ charset_re.findall(content)
+ + pragma_re.findall(content)
+ + xml_re.findall(content)
+ )
+
+
+def _parse_content_type_header(header):
+ """Returns content type and parameters from given header
+
+ :param header: string
+ :return: tuple containing content type and dictionary of
+ parameters
+ """
+
+ tokens = header.split(";")
+ content_type, params = tokens[0].strip(), tokens[1:]
+ params_dict = {}
+ items_to_strip = "\"' "
+
+ for param in params:
+ param = param.strip()
+ if param:
+ key, value = param, True
+ index_of_equals = param.find("=")
+ if index_of_equals != -1:
+ key = param[:index_of_equals].strip(items_to_strip)
+ value = param[index_of_equals + 1 :].strip(items_to_strip)
+ params_dict[key.lower()] = value
+ return content_type, params_dict
+
+
+def get_encoding_from_headers(headers):
+ """Returns encodings from given HTTP Header Dict.
+
+ :param headers: dictionary to extract encoding from.
+ :rtype: str
+ """
+
+ content_type = headers.get("content-type")
+
+ if not content_type:
+ return None
+
+ content_type, params = _parse_content_type_header(content_type)
+
+ if "charset" in params:
+ return params["charset"].strip("'\"")
+
+ if "text" in content_type:
+ return "ISO-8859-1"
+
+ if "application/json" in content_type:
+ # Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset
+ return "utf-8"
+
+
+def stream_decode_response_unicode(iterator, r):
+ """Stream decodes an iterator."""
+
+ if r.encoding is None:
+ yield from iterator
+ return
+
+ decoder = codecs.getincrementaldecoder(r.encoding)(errors="replace")
+ for chunk in iterator:
+ rv = decoder.decode(chunk)
+ if rv:
+ yield rv
+ rv = decoder.decode(b"", final=True)
+ if rv:
+ yield rv
+
+
+def iter_slices(string, slice_length):
+ """Iterate over slices of a string."""
+ pos = 0
+ if slice_length is None or slice_length <= 0:
+ slice_length = len(string)
+ while pos < len(string):
+ yield string[pos : pos + slice_length]
+ pos += slice_length
+
+
+def get_unicode_from_response(r):
+ """Returns the requested content back in unicode.
+
+ :param r: Response object to get unicode content from.
+
+ Tried:
+
+ 1. charset from content-type
+ 2. fall back and replace all unicode characters
+
+ :rtype: str
+ """
+ warnings.warn(
+ (
+ "In requests 3.0, get_unicode_from_response will be removed. For "
+ "more information, please see the discussion on issue #2266. (This"
+ " warning should only appear once.)"
+ ),
+ DeprecationWarning,
+ )
+
+ tried_encodings = []
+
+ # Try charset from content-type
+ encoding = get_encoding_from_headers(r.headers)
+
+ if encoding:
+ try:
+ return str(r.content, encoding)
+ except UnicodeError:
+ tried_encodings.append(encoding)
+
+ # Fall back:
+ try:
+ return str(r.content, encoding, errors="replace")
+ except TypeError:
+ return r.content
+
+
+# The unreserved URI characters (RFC 3986)
+UNRESERVED_SET = frozenset(
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~"
+)
+
+
+def unquote_unreserved(uri):
+ """Un-escape any percent-escape sequences in a URI that are unreserved
+ characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
+
+ :rtype: str
+ """
+ parts = uri.split("%")
+ for i in range(1, len(parts)):
+ h = parts[i][0:2]
+ if len(h) == 2 and h.isalnum():
+ try:
+ c = chr(int(h, 16))
+ except ValueError:
+ raise InvalidURL(f"Invalid percent-escape sequence: '{h}'")
+
+ if c in UNRESERVED_SET:
+ parts[i] = c + parts[i][2:]
+ else:
+ parts[i] = f"%{parts[i]}"
+ else:
+ parts[i] = f"%{parts[i]}"
+ return "".join(parts)
+
+
+def requote_uri(uri):
+ """Re-quote the given URI.
+
+ This function passes the given URI through an unquote/quote cycle to
+ ensure that it is fully and consistently quoted.
+
+ :rtype: str
+ """
+ safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
+ safe_without_percent = "!#$&'()*+,/:;=?@[]~"
+ try:
+ # Unquote only the unreserved characters
+ # Then quote only illegal characters (do not quote reserved,
+ # unreserved, or '%')
+ return quote(unquote_unreserved(uri), safe=safe_with_percent)
+ except InvalidURL:
+ # We couldn't unquote the given URI, so let's try quoting it, but
+ # there may be unquoted '%'s in the URI. We need to make sure they're
+ # properly quoted so they do not cause issues elsewhere.
+ return quote(uri, safe=safe_without_percent)
+
+
+def address_in_network(ip, net):
+ """This function allows you to check if an IP belongs to a network subnet
+
+ Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
+ returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
+
+ :rtype: bool
+ """
+ ipaddr = struct.unpack("=L", socket.inet_aton(ip))[0]
+ netaddr, bits = net.split("/")
+ netmask = struct.unpack("=L", socket.inet_aton(dotted_netmask(int(bits))))[0]
+ network = struct.unpack("=L", socket.inet_aton(netaddr))[0] & netmask
+ return (ipaddr & netmask) == (network & netmask)
+
+
+def dotted_netmask(mask):
+ """Converts mask from /xx format to xxx.xxx.xxx.xxx
+
+ Example: if mask is 24 function returns 255.255.255.0
+
+ :rtype: str
+ """
+ bits = 0xFFFFFFFF ^ (1 << 32 - mask) - 1
+ return socket.inet_ntoa(struct.pack(">I", bits))
+
+
+def is_ipv4_address(string_ip):
+ """
+ :rtype: bool
+ """
+ try:
+ socket.inet_aton(string_ip)
+ except OSError:
+ return False
+ return True
+
+
+def is_valid_cidr(string_network):
+ """
+ Very simple check of the cidr format in no_proxy variable.
+
+ :rtype: bool
+ """
+ if string_network.count("/") == 1:
+ try:
+ mask = int(string_network.split("/")[1])
+ except ValueError:
+ return False
+
+ if mask < 1 or mask > 32:
+ return False
+
+ try:
+ socket.inet_aton(string_network.split("/")[0])
+ except OSError:
+ return False
+ else:
+ return False
+ return True
+
+
+@contextlib.contextmanager
+def set_environ(env_name, value):
+ """Set the environment variable 'env_name' to 'value'
+
+ Save previous value, yield, and then restore the previous value stored in
+ the environment variable 'env_name'.
+
+ If 'value' is None, do nothing"""
+ value_changed = value is not None
+ if value_changed:
+ old_value = os.environ.get(env_name)
+ os.environ[env_name] = value
+ try:
+ yield
+ finally:
+ if value_changed:
+ if old_value is None:
+ del os.environ[env_name]
+ else:
+ os.environ[env_name] = old_value
+
+
+def should_bypass_proxies(url, no_proxy):
+ """
+ Returns whether we should bypass proxies or not.
+
+ :rtype: bool
+ """
+ # Prioritize lowercase environment variables over uppercase
+ # to keep a consistent behaviour with other http projects (curl, wget).
+ def get_proxy(key):
+ return os.environ.get(key) or os.environ.get(key.upper())
+
+ # First check whether no_proxy is defined. If it is, check that the URL
+ # we're getting isn't in the no_proxy list.
+ no_proxy_arg = no_proxy
+ if no_proxy is None:
+ no_proxy = get_proxy("no_proxy")
+ parsed = urlparse(url)
+
+ if parsed.hostname is None:
+ # URLs don't always have hostnames, e.g. file:/// urls.
+ return True
+
+ if no_proxy:
+ # We need to check whether we match here. We need to see if we match
+ # the end of the hostname, both with and without the port.
+ no_proxy = (host for host in no_proxy.replace(" ", "").split(",") if host)
+
+ if is_ipv4_address(parsed.hostname):
+ for proxy_ip in no_proxy:
+ if is_valid_cidr(proxy_ip):
+ if address_in_network(parsed.hostname, proxy_ip):
+ return True
+ elif parsed.hostname == proxy_ip:
+ # If no_proxy ip was defined in plain IP notation instead of cidr notation &
+ # matches the IP of the index
+ return True
+ else:
+ host_with_port = parsed.hostname
+ if parsed.port:
+ host_with_port += f":{parsed.port}"
+
+ for host in no_proxy:
+ if parsed.hostname.endswith(host) or host_with_port.endswith(host):
+ # The URL does match something in no_proxy, so we don't want
+ # to apply the proxies on this URL.
+ return True
+
+ with set_environ("no_proxy", no_proxy_arg):
+ # parsed.hostname can be `None` in cases such as a file URI.
+ try:
+ bypass = proxy_bypass(parsed.hostname)
+ except (TypeError, socket.gaierror):
+ bypass = False
+
+ if bypass:
+ return True
+
+ return False
+
+
+def get_environ_proxies(url, no_proxy=None):
+ """
+ Return a dict of environment proxies.
+
+ :rtype: dict
+ """
+ if should_bypass_proxies(url, no_proxy=no_proxy):
+ return {}
+ else:
+ return getproxies()
+
+
+def select_proxy(url, proxies):
+ """Select a proxy for the url, if applicable.
+
+ :param url: The url being for the request
+ :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
+ """
+ proxies = proxies or {}
+ urlparts = urlparse(url)
+ if urlparts.hostname is None:
+ return proxies.get(urlparts.scheme, proxies.get("all"))
+
+ proxy_keys = [
+ urlparts.scheme + "://" + urlparts.hostname,
+ urlparts.scheme,
+ "all://" + urlparts.hostname,
+ "all",
+ ]
+ proxy = None
+ for proxy_key in proxy_keys:
+ if proxy_key in proxies:
+ proxy = proxies[proxy_key]
+ break
+
+ return proxy
+
+
+def resolve_proxies(request, proxies, trust_env=True):
+ """This method takes proxy information from a request and configuration
+ input to resolve a mapping of target proxies. This will consider settings
+ such a NO_PROXY to strip proxy configurations.
+
+ :param request: Request or PreparedRequest
+ :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
+ :param trust_env: Boolean declaring whether to trust environment configs
+
+ :rtype: dict
+ """
+ proxies = proxies if proxies is not None else {}
+ url = request.url
+ scheme = urlparse(url).scheme
+ no_proxy = proxies.get("no_proxy")
+ new_proxies = proxies.copy()
+
+ if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy):
+ environ_proxies = get_environ_proxies(url, no_proxy=no_proxy)
+
+ proxy = environ_proxies.get(scheme, environ_proxies.get("all"))
+
+ if proxy:
+ new_proxies.setdefault(scheme, proxy)
+ return new_proxies
+
+
+def default_user_agent(name="python-requests"):
+ """
+ Return a string representing the default user agent.
+
+ :rtype: str
+ """
+ return f"{name}/{__version__}"
+
+
+def default_headers():
+ """
+ :rtype: requests.structures.CaseInsensitiveDict
+ """
+ return CaseInsensitiveDict(
+ {
+ "User-Agent": default_user_agent(),
+ "Accept-Encoding": DEFAULT_ACCEPT_ENCODING,
+ "Accept": "*/*",
+ "Connection": "keep-alive",
+ }
+ )
+
+
+def parse_header_links(value):
+ """Return a list of parsed link headers proxies.
+
+ i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
+
+ :rtype: list
+ """
+
+ links = []
+
+ replace_chars = " '\""
+
+ value = value.strip(replace_chars)
+ if not value:
+ return links
+
+ for val in re.split(", *<", value):
+ try:
+ url, params = val.split(";", 1)
+ except ValueError:
+ url, params = val, ""
+
+ link = {"url": url.strip("<> '\"")}
+
+ for param in params.split(";"):
+ try:
+ key, value = param.split("=")
+ except ValueError:
+ break
+
+ link[key.strip(replace_chars)] = value.strip(replace_chars)
+
+ links.append(link)
+
+ return links
+
+
+# Null bytes; no need to recreate these on each call to guess_json_utf
+_null = "\x00".encode("ascii") # encoding to ASCII for Python 3
+_null2 = _null * 2
+_null3 = _null * 3
+
+
+def guess_json_utf(data):
+ """
+ :rtype: str
+ """
+ # JSON always starts with two ASCII characters, so detection is as
+ # easy as counting the nulls and from their location and count
+ # determine the encoding. Also detect a BOM, if present.
+ sample = data[:4]
+ if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
+ return "utf-32" # BOM included
+ if sample[:3] == codecs.BOM_UTF8:
+ return "utf-8-sig" # BOM included, MS style (discouraged)
+ if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
+ return "utf-16" # BOM included
+ nullcount = sample.count(_null)
+ if nullcount == 0:
+ return "utf-8"
+ if nullcount == 2:
+ if sample[::2] == _null2: # 1st and 3rd are null
+ return "utf-16-be"
+ if sample[1::2] == _null2: # 2nd and 4th are null
+ return "utf-16-le"
+ # Did not detect 2 valid UTF-16 ascii-range characters
+ if nullcount == 3:
+ if sample[:3] == _null3:
+ return "utf-32-be"
+ if sample[1:] == _null3:
+ return "utf-32-le"
+ # Did not detect a valid UTF-32 ascii-range character
+ return None
+
+
+def prepend_scheme_if_needed(url, new_scheme):
+ """Given a URL that may or may not have a scheme, prepend the given scheme.
+ Does not replace a present scheme with the one provided as an argument.
+
+ :rtype: str
+ """
+ parsed = parse_url(url)
+ scheme, auth, host, port, path, query, fragment = parsed
+
+ # A defect in urlparse determines that there isn't a netloc present in some
+ # urls. We previously assumed parsing was overly cautious, and swapped the
+ # netloc and path. Due to a lack of tests on the original defect, this is
+ # maintained with parse_url for backwards compatibility.
+ netloc = parsed.netloc
+ if not netloc:
+ netloc, path = path, netloc
+
+ if auth:
+ # parse_url doesn't provide the netloc with auth
+ # so we'll add it ourselves.
+ netloc = "@".join([auth, netloc])
+ if scheme is None:
+ scheme = new_scheme
+ if path is None:
+ path = ""
+
+ return urlunparse((scheme, netloc, path, "", query, fragment))
+
+
+def get_auth_from_url(url):
+ """Given a url with authentication components, extract them into a tuple of
+ username,password.
+
+ :rtype: (str,str)
+ """
+ parsed = urlparse(url)
+
+ try:
+ auth = (unquote(parsed.username), unquote(parsed.password))
+ except (AttributeError, TypeError):
+ auth = ("", "")
+
+ return auth
+
+
+def check_header_validity(header):
+ """Verifies that header parts don't contain leading whitespace
+ reserved characters, or return characters.
+
+ :param header: tuple, in the format (name, value).
+ """
+ name, value = header
+
+ for part in header:
+ if type(part) not in HEADER_VALIDATORS:
+ raise InvalidHeader(
+ f"Header part ({part!r}) from {{{name!r}: {value!r}}} must be "
+ f"of type str or bytes, not {type(part)}"
+ )
+
+ _validate_header_part(name, "name", HEADER_VALIDATORS[type(name)][0])
+ _validate_header_part(value, "value", HEADER_VALIDATORS[type(value)][1])
+
+
+def _validate_header_part(header_part, header_kind, validator):
+ if not validator.match(header_part):
+ raise InvalidHeader(
+ f"Invalid leading whitespace, reserved character(s), or return"
+ f"character(s) in header {header_kind}: {header_part!r}"
+ )
+
+
+def urldefragauth(url):
+ """
+ Given a url remove the fragment and the authentication part.
+
+ :rtype: str
+ """
+ scheme, netloc, path, params, query, fragment = urlparse(url)
+
+ # see func:`prepend_scheme_if_needed`
+ if not netloc:
+ netloc, path = path, netloc
+
+ netloc = netloc.rsplit("@", 1)[-1]
+
+ return urlunparse((scheme, netloc, path, params, query, ""))
+
+
+def rewind_body(prepared_request):
+ """Move file pointer back to its recorded starting position
+ so it can be read again on redirect.
+ """
+ body_seek = getattr(prepared_request.body, "seek", None)
+ if body_seek is not None and isinstance(
+ prepared_request._body_position, integer_types
+ ):
+ try:
+ body_seek(prepared_request._body_position)
+ except OSError:
+ raise UnrewindableBodyError(
+ "An error occurred when rewinding request body for redirect."
+ )
+ else:
+ raise UnrewindableBodyError("Unable to rewind request body for redirect.")
diff --git a/third_party/python/pip/pip/_vendor/resolvelib/__init__.py b/third_party/python/pip/pip/_vendor/resolvelib/__init__.py
new file mode 100644
index 0000000000..ce05fd3027
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/resolvelib/__init__.py
@@ -0,0 +1,26 @@
+__all__ = [
+ "__version__",
+ "AbstractProvider",
+ "AbstractResolver",
+ "BaseReporter",
+ "InconsistentCandidate",
+ "Resolver",
+ "RequirementsConflicted",
+ "ResolutionError",
+ "ResolutionImpossible",
+ "ResolutionTooDeep",
+]
+
+__version__ = "0.8.1"
+
+
+from .providers import AbstractProvider, AbstractResolver
+from .reporters import BaseReporter
+from .resolvers import (
+ InconsistentCandidate,
+ RequirementsConflicted,
+ ResolutionError,
+ ResolutionImpossible,
+ ResolutionTooDeep,
+ Resolver,
+)
diff --git a/third_party/python/pip/pip/_vendor/resolvelib/compat/__init__.py b/third_party/python/pip/pip/_vendor/resolvelib/compat/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/resolvelib/compat/__init__.py
diff --git a/third_party/python/pip/pip/_vendor/resolvelib/compat/collections_abc.py b/third_party/python/pip/pip/_vendor/resolvelib/compat/collections_abc.py
new file mode 100644
index 0000000000..1becc5093c
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/resolvelib/compat/collections_abc.py
@@ -0,0 +1,6 @@
+__all__ = ["Mapping", "Sequence"]
+
+try:
+ from collections.abc import Mapping, Sequence
+except ImportError:
+ from collections import Mapping, Sequence
diff --git a/third_party/python/pip/pip/_vendor/resolvelib/providers.py b/third_party/python/pip/pip/_vendor/resolvelib/providers.py
new file mode 100644
index 0000000000..7d0a9c22a4
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/resolvelib/providers.py
@@ -0,0 +1,133 @@
+class AbstractProvider(object):
+ """Delegate class to provide requirement interface for the resolver."""
+
+ def identify(self, requirement_or_candidate):
+ """Given a requirement, return an identifier for it.
+
+ This is used to identify a requirement, e.g. whether two requirements
+ should have their specifier parts merged.
+ """
+ raise NotImplementedError
+
+ def get_preference(
+ self,
+ identifier,
+ resolutions,
+ candidates,
+ information,
+ backtrack_causes,
+ ):
+ """Produce a sort key for given requirement based on preference.
+
+ The preference is defined as "I think this requirement should be
+ resolved first". The lower the return value is, the more preferred
+ this group of arguments is.
+
+ :param identifier: An identifier as returned by ``identify()``. This
+ identifies the dependency matches of which should be returned.
+ :param resolutions: Mapping of candidates currently pinned by the
+ resolver. Each key is an identifier, and the value a candidate.
+ The candidate may conflict with requirements from ``information``.
+ :param candidates: Mapping of each dependency's possible candidates.
+ Each value is an iterator of candidates.
+ :param information: Mapping of requirement information of each package.
+ Each value is an iterator of *requirement information*.
+ :param backtrack_causes: Sequence of requirement information that were
+ the requirements that caused the resolver to most recently backtrack.
+
+ A *requirement information* instance is a named tuple with two members:
+
+ * ``requirement`` specifies a requirement contributing to the current
+ list of candidates.
+ * ``parent`` specifies the candidate that provides (dependend on) the
+ requirement, or ``None`` to indicate a root requirement.
+
+ The preference could depend on a various of issues, including (not
+ necessarily in this order):
+
+ * Is this package pinned in the current resolution result?
+ * How relaxed is the requirement? Stricter ones should probably be
+ worked on first? (I don't know, actually.)
+ * How many possibilities are there to satisfy this requirement? Those
+ with few left should likely be worked on first, I guess?
+ * Are there any known conflicts for this requirement? We should
+ probably work on those with the most known conflicts.
+
+ A sortable value should be returned (this will be used as the ``key``
+ parameter of the built-in sorting function). The smaller the value is,
+ the more preferred this requirement is (i.e. the sorting function
+ is called with ``reverse=False``).
+ """
+ raise NotImplementedError
+
+ def find_matches(self, identifier, requirements, incompatibilities):
+ """Find all possible candidates that satisfy given constraints.
+
+ :param identifier: An identifier as returned by ``identify()``. This
+ identifies the dependency matches of which should be returned.
+ :param requirements: A mapping of requirements that all returned
+ candidates must satisfy. Each key is an identifier, and the value
+ an iterator of requirements for that dependency.
+ :param incompatibilities: A mapping of known incompatibilities of
+ each dependency. Each key is an identifier, and the value an
+ iterator of incompatibilities known to the resolver. All
+ incompatibilities *must* be excluded from the return value.
+
+ This should try to get candidates based on the requirements' types.
+ For VCS, local, and archive requirements, the one-and-only match is
+ returned, and for a "named" requirement, the index(es) should be
+ consulted to find concrete candidates for this requirement.
+
+ The return value should produce candidates ordered by preference; the
+ most preferred candidate should come first. The return type may be one
+ of the following:
+
+ * A callable that returns an iterator that yields candidates.
+ * An collection of candidates.
+ * An iterable of candidates. This will be consumed immediately into a
+ list of candidates.
+ """
+ raise NotImplementedError
+
+ def is_satisfied_by(self, requirement, candidate):
+ """Whether the given requirement can be satisfied by a candidate.
+
+ The candidate is guarenteed to have been generated from the
+ requirement.
+
+ A boolean should be returned to indicate whether ``candidate`` is a
+ viable solution to the requirement.
+ """
+ raise NotImplementedError
+
+ def get_dependencies(self, candidate):
+ """Get dependencies of a candidate.
+
+ This should return a collection of requirements that `candidate`
+ specifies as its dependencies.
+ """
+ raise NotImplementedError
+
+
+class AbstractResolver(object):
+ """The thing that performs the actual resolution work."""
+
+ base_exception = Exception
+
+ def __init__(self, provider, reporter):
+ self.provider = provider
+ self.reporter = reporter
+
+ def resolve(self, requirements, **kwargs):
+ """Take a collection of constraints, spit out the resolution result.
+
+ This returns a representation of the final resolution state, with one
+ guarenteed attribute ``mapping`` that contains resolved candidates as
+ values. The keys are their respective identifiers.
+
+ :param requirements: A collection of constraints.
+ :param kwargs: Additional keyword arguments that subclasses may accept.
+
+ :raises: ``self.base_exception`` or its subclass.
+ """
+ raise NotImplementedError
diff --git a/third_party/python/pip/pip/_vendor/resolvelib/reporters.py b/third_party/python/pip/pip/_vendor/resolvelib/reporters.py
new file mode 100644
index 0000000000..6695480fff
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/resolvelib/reporters.py
@@ -0,0 +1,43 @@
+class BaseReporter(object):
+ """Delegate class to provider progress reporting for the resolver."""
+
+ def starting(self):
+ """Called before the resolution actually starts."""
+
+ def starting_round(self, index):
+ """Called before each round of resolution starts.
+
+ The index is zero-based.
+ """
+
+ def ending_round(self, index, state):
+ """Called before each round of resolution ends.
+
+ This is NOT called if the resolution ends at this round. Use `ending`
+ if you want to report finalization. The index is zero-based.
+ """
+
+ def ending(self, state):
+ """Called before the resolution ends successfully."""
+
+ def adding_requirement(self, requirement, parent):
+ """Called when adding a new requirement into the resolve criteria.
+
+ :param requirement: The additional requirement to be applied to filter
+ the available candidaites.
+ :param parent: The candidate that requires ``requirement`` as a
+ dependency, or None if ``requirement`` is one of the root
+ requirements passed in from ``Resolver.resolve()``.
+ """
+
+ def resolving_conflicts(self, causes):
+ """Called when starting to attempt requirement conflict resolution.
+
+ :param causes: The information on the collision that caused the backtracking.
+ """
+
+ def backtracking(self, candidate):
+ """Called when rejecting a candidate during backtracking."""
+
+ def pinning(self, candidate):
+ """Called when adding a candidate to the potential solution."""
diff --git a/third_party/python/pip/pip/_vendor/resolvelib/resolvers.py b/third_party/python/pip/pip/_vendor/resolvelib/resolvers.py
new file mode 100644
index 0000000000..787681b03e
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/resolvelib/resolvers.py
@@ -0,0 +1,482 @@
+import collections
+import operator
+
+from .providers import AbstractResolver
+from .structs import DirectedGraph, IteratorMapping, build_iter_view
+
+RequirementInformation = collections.namedtuple(
+ "RequirementInformation", ["requirement", "parent"]
+)
+
+
+class ResolverException(Exception):
+ """A base class for all exceptions raised by this module.
+
+ Exceptions derived by this class should all be handled in this module. Any
+ bubbling pass the resolver should be treated as a bug.
+ """
+
+
+class RequirementsConflicted(ResolverException):
+ def __init__(self, criterion):
+ super(RequirementsConflicted, self).__init__(criterion)
+ self.criterion = criterion
+
+ def __str__(self):
+ return "Requirements conflict: {}".format(
+ ", ".join(repr(r) for r in self.criterion.iter_requirement()),
+ )
+
+
+class InconsistentCandidate(ResolverException):
+ def __init__(self, candidate, criterion):
+ super(InconsistentCandidate, self).__init__(candidate, criterion)
+ self.candidate = candidate
+ self.criterion = criterion
+
+ def __str__(self):
+ return "Provided candidate {!r} does not satisfy {}".format(
+ self.candidate,
+ ", ".join(repr(r) for r in self.criterion.iter_requirement()),
+ )
+
+
+class Criterion(object):
+ """Representation of possible resolution results of a package.
+
+ This holds three attributes:
+
+ * `information` is a collection of `RequirementInformation` pairs.
+ Each pair is a requirement contributing to this criterion, and the
+ candidate that provides the requirement.
+ * `incompatibilities` is a collection of all known not-to-work candidates
+ to exclude from consideration.
+ * `candidates` is a collection containing all possible candidates deducted
+ from the union of contributing requirements and known incompatibilities.
+ It should never be empty, except when the criterion is an attribute of a
+ raised `RequirementsConflicted` (in which case it is always empty).
+
+ .. note::
+ This class is intended to be externally immutable. **Do not** mutate
+ any of its attribute containers.
+ """
+
+ def __init__(self, candidates, information, incompatibilities):
+ self.candidates = candidates
+ self.information = information
+ self.incompatibilities = incompatibilities
+
+ def __repr__(self):
+ requirements = ", ".join(
+ "({!r}, via={!r})".format(req, parent)
+ for req, parent in self.information
+ )
+ return "Criterion({})".format(requirements)
+
+ def iter_requirement(self):
+ return (i.requirement for i in self.information)
+
+ def iter_parent(self):
+ return (i.parent for i in self.information)
+
+
+class ResolutionError(ResolverException):
+ pass
+
+
+class ResolutionImpossible(ResolutionError):
+ def __init__(self, causes):
+ super(ResolutionImpossible, self).__init__(causes)
+ # causes is a list of RequirementInformation objects
+ self.causes = causes
+
+
+class ResolutionTooDeep(ResolutionError):
+ def __init__(self, round_count):
+ super(ResolutionTooDeep, self).__init__(round_count)
+ self.round_count = round_count
+
+
+# Resolution state in a round.
+State = collections.namedtuple("State", "mapping criteria backtrack_causes")
+
+
+class Resolution(object):
+ """Stateful resolution object.
+
+ This is designed as a one-off object that holds information to kick start
+ the resolution process, and holds the results afterwards.
+ """
+
+ def __init__(self, provider, reporter):
+ self._p = provider
+ self._r = reporter
+ self._states = []
+
+ @property
+ def state(self):
+ try:
+ return self._states[-1]
+ except IndexError:
+ raise AttributeError("state")
+
+ def _push_new_state(self):
+ """Push a new state into history.
+
+ This new state will be used to hold resolution results of the next
+ coming round.
+ """
+ base = self._states[-1]
+ state = State(
+ mapping=base.mapping.copy(),
+ criteria=base.criteria.copy(),
+ backtrack_causes=base.backtrack_causes[:],
+ )
+ self._states.append(state)
+
+ def _add_to_criteria(self, criteria, requirement, parent):
+ self._r.adding_requirement(requirement=requirement, parent=parent)
+
+ identifier = self._p.identify(requirement_or_candidate=requirement)
+ criterion = criteria.get(identifier)
+ if criterion:
+ incompatibilities = list(criterion.incompatibilities)
+ else:
+ incompatibilities = []
+
+ matches = self._p.find_matches(
+ identifier=identifier,
+ requirements=IteratorMapping(
+ criteria,
+ operator.methodcaller("iter_requirement"),
+ {identifier: [requirement]},
+ ),
+ incompatibilities=IteratorMapping(
+ criteria,
+ operator.attrgetter("incompatibilities"),
+ {identifier: incompatibilities},
+ ),
+ )
+
+ if criterion:
+ information = list(criterion.information)
+ information.append(RequirementInformation(requirement, parent))
+ else:
+ information = [RequirementInformation(requirement, parent)]
+
+ criterion = Criterion(
+ candidates=build_iter_view(matches),
+ information=information,
+ incompatibilities=incompatibilities,
+ )
+ if not criterion.candidates:
+ raise RequirementsConflicted(criterion)
+ criteria[identifier] = criterion
+
+ def _get_preference(self, name):
+ return self._p.get_preference(
+ identifier=name,
+ resolutions=self.state.mapping,
+ candidates=IteratorMapping(
+ self.state.criteria,
+ operator.attrgetter("candidates"),
+ ),
+ information=IteratorMapping(
+ self.state.criteria,
+ operator.attrgetter("information"),
+ ),
+ backtrack_causes=self.state.backtrack_causes,
+ )
+
+ def _is_current_pin_satisfying(self, name, criterion):
+ try:
+ current_pin = self.state.mapping[name]
+ except KeyError:
+ return False
+ return all(
+ self._p.is_satisfied_by(requirement=r, candidate=current_pin)
+ for r in criterion.iter_requirement()
+ )
+
+ def _get_updated_criteria(self, candidate):
+ criteria = self.state.criteria.copy()
+ for requirement in self._p.get_dependencies(candidate=candidate):
+ self._add_to_criteria(criteria, requirement, parent=candidate)
+ return criteria
+
+ def _attempt_to_pin_criterion(self, name):
+ criterion = self.state.criteria[name]
+
+ causes = []
+ for candidate in criterion.candidates:
+ try:
+ criteria = self._get_updated_criteria(candidate)
+ except RequirementsConflicted as e:
+ causes.append(e.criterion)
+ continue
+
+ # Check the newly-pinned candidate actually works. This should
+ # always pass under normal circumstances, but in the case of a
+ # faulty provider, we will raise an error to notify the implementer
+ # to fix find_matches() and/or is_satisfied_by().
+ satisfied = all(
+ self._p.is_satisfied_by(requirement=r, candidate=candidate)
+ for r in criterion.iter_requirement()
+ )
+ if not satisfied:
+ raise InconsistentCandidate(candidate, criterion)
+
+ self._r.pinning(candidate=candidate)
+ self.state.criteria.update(criteria)
+
+ # Put newly-pinned candidate at the end. This is essential because
+ # backtracking looks at this mapping to get the last pin.
+ self.state.mapping.pop(name, None)
+ self.state.mapping[name] = candidate
+
+ return []
+
+ # All candidates tried, nothing works. This criterion is a dead
+ # end, signal for backtracking.
+ return causes
+
+ def _backtrack(self):
+ """Perform backtracking.
+
+ When we enter here, the stack is like this::
+
+ [ state Z ]
+ [ state Y ]
+ [ state X ]
+ .... earlier states are irrelevant.
+
+ 1. No pins worked for Z, so it does not have a pin.
+ 2. We want to reset state Y to unpinned, and pin another candidate.
+ 3. State X holds what state Y was before the pin, but does not
+ have the incompatibility information gathered in state Y.
+
+ Each iteration of the loop will:
+
+ 1. Discard Z.
+ 2. Discard Y but remember its incompatibility information gathered
+ previously, and the failure we're dealing with right now.
+ 3. Push a new state Y' based on X, and apply the incompatibility
+ information from Y to Y'.
+ 4a. If this causes Y' to conflict, we need to backtrack again. Make Y'
+ the new Z and go back to step 2.
+ 4b. If the incompatibilities apply cleanly, end backtracking.
+ """
+ while len(self._states) >= 3:
+ # Remove the state that triggered backtracking.
+ del self._states[-1]
+
+ # Retrieve the last candidate pin and known incompatibilities.
+ broken_state = self._states.pop()
+ name, candidate = broken_state.mapping.popitem()
+ incompatibilities_from_broken = [
+ (k, list(v.incompatibilities))
+ for k, v in broken_state.criteria.items()
+ ]
+
+ # Also mark the newly known incompatibility.
+ incompatibilities_from_broken.append((name, [candidate]))
+
+ self._r.backtracking(candidate=candidate)
+
+ # Create a new state from the last known-to-work one, and apply
+ # the previously gathered incompatibility information.
+ def _patch_criteria():
+ for k, incompatibilities in incompatibilities_from_broken:
+ if not incompatibilities:
+ continue
+ try:
+ criterion = self.state.criteria[k]
+ except KeyError:
+ continue
+ matches = self._p.find_matches(
+ identifier=k,
+ requirements=IteratorMapping(
+ self.state.criteria,
+ operator.methodcaller("iter_requirement"),
+ ),
+ incompatibilities=IteratorMapping(
+ self.state.criteria,
+ operator.attrgetter("incompatibilities"),
+ {k: incompatibilities},
+ ),
+ )
+ candidates = build_iter_view(matches)
+ if not candidates:
+ return False
+ incompatibilities.extend(criterion.incompatibilities)
+ self.state.criteria[k] = Criterion(
+ candidates=candidates,
+ information=list(criterion.information),
+ incompatibilities=incompatibilities,
+ )
+ return True
+
+ self._push_new_state()
+ success = _patch_criteria()
+
+ # It works! Let's work on this new state.
+ if success:
+ return True
+
+ # State does not work after applying known incompatibilities.
+ # Try the still previous state.
+
+ # No way to backtrack anymore.
+ return False
+
+ def resolve(self, requirements, max_rounds):
+ if self._states:
+ raise RuntimeError("already resolved")
+
+ self._r.starting()
+
+ # Initialize the root state.
+ self._states = [
+ State(
+ mapping=collections.OrderedDict(),
+ criteria={},
+ backtrack_causes=[],
+ )
+ ]
+ for r in requirements:
+ try:
+ self._add_to_criteria(self.state.criteria, r, parent=None)
+ except RequirementsConflicted as e:
+ raise ResolutionImpossible(e.criterion.information)
+
+ # The root state is saved as a sentinel so the first ever pin can have
+ # something to backtrack to if it fails. The root state is basically
+ # pinning the virtual "root" package in the graph.
+ self._push_new_state()
+
+ for round_index in range(max_rounds):
+ self._r.starting_round(index=round_index)
+
+ unsatisfied_names = [
+ key
+ for key, criterion in self.state.criteria.items()
+ if not self._is_current_pin_satisfying(key, criterion)
+ ]
+
+ # All criteria are accounted for. Nothing more to pin, we are done!
+ if not unsatisfied_names:
+ self._r.ending(state=self.state)
+ return self.state
+
+ # Choose the most preferred unpinned criterion to try.
+ name = min(unsatisfied_names, key=self._get_preference)
+ failure_causes = self._attempt_to_pin_criterion(name)
+
+ if failure_causes:
+ causes = [i for c in failure_causes for i in c.information]
+ # Backtrack if pinning fails. The backtrack process puts us in
+ # an unpinned state, so we can work on it in the next round.
+ self._r.resolving_conflicts(causes=causes)
+ success = self._backtrack()
+ self.state.backtrack_causes[:] = causes
+
+ # Dead ends everywhere. Give up.
+ if not success:
+ raise ResolutionImpossible(self.state.backtrack_causes)
+ else:
+ # Pinning was successful. Push a new state to do another pin.
+ self._push_new_state()
+
+ self._r.ending_round(index=round_index, state=self.state)
+
+ raise ResolutionTooDeep(max_rounds)
+
+
+def _has_route_to_root(criteria, key, all_keys, connected):
+ if key in connected:
+ return True
+ if key not in criteria:
+ return False
+ for p in criteria[key].iter_parent():
+ try:
+ pkey = all_keys[id(p)]
+ except KeyError:
+ continue
+ if pkey in connected:
+ connected.add(key)
+ return True
+ if _has_route_to_root(criteria, pkey, all_keys, connected):
+ connected.add(key)
+ return True
+ return False
+
+
+Result = collections.namedtuple("Result", "mapping graph criteria")
+
+
+def _build_result(state):
+ mapping = state.mapping
+ all_keys = {id(v): k for k, v in mapping.items()}
+ all_keys[id(None)] = None
+
+ graph = DirectedGraph()
+ graph.add(None) # Sentinel as root dependencies' parent.
+
+ connected = {None}
+ for key, criterion in state.criteria.items():
+ if not _has_route_to_root(state.criteria, key, all_keys, connected):
+ continue
+ if key not in graph:
+ graph.add(key)
+ for p in criterion.iter_parent():
+ try:
+ pkey = all_keys[id(p)]
+ except KeyError:
+ continue
+ if pkey not in graph:
+ graph.add(pkey)
+ graph.connect(pkey, key)
+
+ return Result(
+ mapping={k: v for k, v in mapping.items() if k in connected},
+ graph=graph,
+ criteria=state.criteria,
+ )
+
+
+class Resolver(AbstractResolver):
+ """The thing that performs the actual resolution work."""
+
+ base_exception = ResolverException
+
+ def resolve(self, requirements, max_rounds=100):
+ """Take a collection of constraints, spit out the resolution result.
+
+ The return value is a representation to the final resolution result. It
+ is a tuple subclass with three public members:
+
+ * `mapping`: A dict of resolved candidates. Each key is an identifier
+ of a requirement (as returned by the provider's `identify` method),
+ and the value is the resolved candidate.
+ * `graph`: A `DirectedGraph` instance representing the dependency tree.
+ The vertices are keys of `mapping`, and each edge represents *why*
+ a particular package is included. A special vertex `None` is
+ included to represent parents of user-supplied requirements.
+ * `criteria`: A dict of "criteria" that hold detailed information on
+ how edges in the graph are derived. Each key is an identifier of a
+ requirement, and the value is a `Criterion` instance.
+
+ The following exceptions may be raised if a resolution cannot be found:
+
+ * `ResolutionImpossible`: A resolution cannot be found for the given
+ combination of requirements. The `causes` attribute of the
+ exception is a list of (requirement, parent), giving the
+ requirements that could not be satisfied.
+ * `ResolutionTooDeep`: The dependency tree is too deeply nested and
+ the resolver gave up. This is usually caused by a circular
+ dependency, but you can try to resolve this by increasing the
+ `max_rounds` argument.
+ """
+ resolution = Resolution(self.provider, self.reporter)
+ state = resolution.resolve(requirements, max_rounds=max_rounds)
+ return _build_result(state)
diff --git a/third_party/python/pip/pip/_vendor/resolvelib/structs.py b/third_party/python/pip/pip/_vendor/resolvelib/structs.py
new file mode 100644
index 0000000000..93d1568bd4
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/resolvelib/structs.py
@@ -0,0 +1,165 @@
+import itertools
+
+from .compat import collections_abc
+
+
+class DirectedGraph(object):
+ """A graph structure with directed edges."""
+
+ def __init__(self):
+ self._vertices = set()
+ self._forwards = {} # <key> -> Set[<key>]
+ self._backwards = {} # <key> -> Set[<key>]
+
+ def __iter__(self):
+ return iter(self._vertices)
+
+ def __len__(self):
+ return len(self._vertices)
+
+ def __contains__(self, key):
+ return key in self._vertices
+
+ def copy(self):
+ """Return a shallow copy of this graph."""
+ other = DirectedGraph()
+ other._vertices = set(self._vertices)
+ other._forwards = {k: set(v) for k, v in self._forwards.items()}
+ other._backwards = {k: set(v) for k, v in self._backwards.items()}
+ return other
+
+ def add(self, key):
+ """Add a new vertex to the graph."""
+ if key in self._vertices:
+ raise ValueError("vertex exists")
+ self._vertices.add(key)
+ self._forwards[key] = set()
+ self._backwards[key] = set()
+
+ def remove(self, key):
+ """Remove a vertex from the graph, disconnecting all edges from/to it."""
+ self._vertices.remove(key)
+ for f in self._forwards.pop(key):
+ self._backwards[f].remove(key)
+ for t in self._backwards.pop(key):
+ self._forwards[t].remove(key)
+
+ def connected(self, f, t):
+ return f in self._backwards[t] and t in self._forwards[f]
+
+ def connect(self, f, t):
+ """Connect two existing vertices.
+
+ Nothing happens if the vertices are already connected.
+ """
+ if t not in self._vertices:
+ raise KeyError(t)
+ self._forwards[f].add(t)
+ self._backwards[t].add(f)
+
+ def iter_edges(self):
+ for f, children in self._forwards.items():
+ for t in children:
+ yield f, t
+
+ def iter_children(self, key):
+ return iter(self._forwards[key])
+
+ def iter_parents(self, key):
+ return iter(self._backwards[key])
+
+
+class IteratorMapping(collections_abc.Mapping):
+ def __init__(self, mapping, accessor, appends=None):
+ self._mapping = mapping
+ self._accessor = accessor
+ self._appends = appends or {}
+
+ def __repr__(self):
+ return "IteratorMapping({!r}, {!r}, {!r})".format(
+ self._mapping,
+ self._accessor,
+ self._appends,
+ )
+
+ def __bool__(self):
+ return bool(self._mapping or self._appends)
+
+ __nonzero__ = __bool__ # XXX: Python 2.
+
+ def __contains__(self, key):
+ return key in self._mapping or key in self._appends
+
+ def __getitem__(self, k):
+ try:
+ v = self._mapping[k]
+ except KeyError:
+ return iter(self._appends[k])
+ return itertools.chain(self._accessor(v), self._appends.get(k, ()))
+
+ def __iter__(self):
+ more = (k for k in self._appends if k not in self._mapping)
+ return itertools.chain(self._mapping, more)
+
+ def __len__(self):
+ more = sum(1 for k in self._appends if k not in self._mapping)
+ return len(self._mapping) + more
+
+
+class _FactoryIterableView(object):
+ """Wrap an iterator factory returned by `find_matches()`.
+
+ Calling `iter()` on this class would invoke the underlying iterator
+ factory, making it a "collection with ordering" that can be iterated
+ through multiple times, but lacks random access methods presented in
+ built-in Python sequence types.
+ """
+
+ def __init__(self, factory):
+ self._factory = factory
+
+ def __repr__(self):
+ return "{}({})".format(type(self).__name__, list(self._factory()))
+
+ def __bool__(self):
+ try:
+ next(self._factory())
+ except StopIteration:
+ return False
+ return True
+
+ __nonzero__ = __bool__ # XXX: Python 2.
+
+ def __iter__(self):
+ return self._factory()
+
+
+class _SequenceIterableView(object):
+ """Wrap an iterable returned by find_matches().
+
+ This is essentially just a proxy to the underlying sequence that provides
+ the same interface as `_FactoryIterableView`.
+ """
+
+ def __init__(self, sequence):
+ self._sequence = sequence
+
+ def __repr__(self):
+ return "{}({})".format(type(self).__name__, self._sequence)
+
+ def __bool__(self):
+ return bool(self._sequence)
+
+ __nonzero__ = __bool__ # XXX: Python 2.
+
+ def __iter__(self):
+ return iter(self._sequence)
+
+
+def build_iter_view(matches):
+ """Build an iterable view from the value returned by `find_matches()`."""
+ if callable(matches):
+ return _FactoryIterableView(matches)
+ if not isinstance(matches, collections_abc.Sequence):
+ matches = list(matches)
+ return _SequenceIterableView(matches)
diff --git a/third_party/python/pip/pip/_vendor/rich/__init__.py b/third_party/python/pip/pip/_vendor/rich/__init__.py
new file mode 100644
index 0000000000..73f58d7740
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/__init__.py
@@ -0,0 +1,177 @@
+"""Rich text and beautiful formatting in the terminal."""
+
+import os
+from typing import IO, TYPE_CHECKING, Any, Callable, Optional, Union
+
+from ._extension import load_ipython_extension # noqa: F401
+
+__all__ = ["get_console", "reconfigure", "print", "inspect", "print_json"]
+
+if TYPE_CHECKING:
+ from .console import Console
+
+# Global console used by alternative print
+_console: Optional["Console"] = None
+
+try:
+ _IMPORT_CWD = os.path.abspath(os.getcwd())
+except FileNotFoundError:
+ # Can happen if the cwd has been deleted
+ _IMPORT_CWD = ""
+
+
+def get_console() -> "Console":
+ """Get a global :class:`~rich.console.Console` instance. This function is used when Rich requires a Console,
+ and hasn't been explicitly given one.
+
+ Returns:
+ Console: A console instance.
+ """
+ global _console
+ if _console is None:
+ from .console import Console
+
+ _console = Console()
+
+ return _console
+
+
+def reconfigure(*args: Any, **kwargs: Any) -> None:
+ """Reconfigures the global console by replacing it with another.
+
+ Args:
+ *args (Any): Positional arguments for the replacement :class:`~rich.console.Console`.
+ **kwargs (Any): Keyword arguments for the replacement :class:`~rich.console.Console`.
+ """
+ from pip._vendor.rich.console import Console
+
+ new_console = Console(*args, **kwargs)
+ _console = get_console()
+ _console.__dict__ = new_console.__dict__
+
+
+def print(
+ *objects: Any,
+ sep: str = " ",
+ end: str = "\n",
+ file: Optional[IO[str]] = None,
+ flush: bool = False,
+) -> None:
+ r"""Print object(s) supplied via positional arguments.
+ This function has an identical signature to the built-in print.
+ For more advanced features, see the :class:`~rich.console.Console` class.
+
+ Args:
+ sep (str, optional): Separator between printed objects. Defaults to " ".
+ end (str, optional): Character to write at end of output. Defaults to "\\n".
+ file (IO[str], optional): File to write to, or None for stdout. Defaults to None.
+ flush (bool, optional): Has no effect as Rich always flushes output. Defaults to False.
+
+ """
+ from .console import Console
+
+ write_console = get_console() if file is None else Console(file=file)
+ return write_console.print(*objects, sep=sep, end=end)
+
+
+def print_json(
+ json: Optional[str] = None,
+ *,
+ data: Any = None,
+ indent: Union[None, int, str] = 2,
+ highlight: bool = True,
+ skip_keys: bool = False,
+ ensure_ascii: bool = False,
+ check_circular: bool = True,
+ allow_nan: bool = True,
+ default: Optional[Callable[[Any], Any]] = None,
+ sort_keys: bool = False,
+) -> None:
+ """Pretty prints JSON. Output will be valid JSON.
+
+ Args:
+ json (str): A string containing JSON.
+ data (Any): If json is not supplied, then encode this data.
+ indent (int, optional): Number of spaces to indent. Defaults to 2.
+ highlight (bool, optional): Enable highlighting of output: Defaults to True.
+ skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.
+ ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.
+ check_circular (bool, optional): Check for circular references. Defaults to True.
+ allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.
+ default (Callable, optional): A callable that converts values that can not be encoded
+ in to something that can be JSON encoded. Defaults to None.
+ sort_keys (bool, optional): Sort dictionary keys. Defaults to False.
+ """
+
+ get_console().print_json(
+ json,
+ data=data,
+ indent=indent,
+ highlight=highlight,
+ skip_keys=skip_keys,
+ ensure_ascii=ensure_ascii,
+ check_circular=check_circular,
+ allow_nan=allow_nan,
+ default=default,
+ sort_keys=sort_keys,
+ )
+
+
+def inspect(
+ obj: Any,
+ *,
+ console: Optional["Console"] = None,
+ title: Optional[str] = None,
+ help: bool = False,
+ methods: bool = False,
+ docs: bool = True,
+ private: bool = False,
+ dunder: bool = False,
+ sort: bool = True,
+ all: bool = False,
+ value: bool = True,
+) -> None:
+ """Inspect any Python object.
+
+ * inspect(<OBJECT>) to see summarized info.
+ * inspect(<OBJECT>, methods=True) to see methods.
+ * inspect(<OBJECT>, help=True) to see full (non-abbreviated) help.
+ * inspect(<OBJECT>, private=True) to see private attributes (single underscore).
+ * inspect(<OBJECT>, dunder=True) to see attributes beginning with double underscore.
+ * inspect(<OBJECT>, all=True) to see all attributes.
+
+ Args:
+ obj (Any): An object to inspect.
+ title (str, optional): Title to display over inspect result, or None use type. Defaults to None.
+ help (bool, optional): Show full help text rather than just first paragraph. Defaults to False.
+ methods (bool, optional): Enable inspection of callables. Defaults to False.
+ docs (bool, optional): Also render doc strings. Defaults to True.
+ private (bool, optional): Show private attributes (beginning with underscore). Defaults to False.
+ dunder (bool, optional): Show attributes starting with double underscore. Defaults to False.
+ sort (bool, optional): Sort attributes alphabetically. Defaults to True.
+ all (bool, optional): Show all attributes. Defaults to False.
+ value (bool, optional): Pretty print value. Defaults to True.
+ """
+ _console = console or get_console()
+ from pip._vendor.rich._inspect import Inspect
+
+ # Special case for inspect(inspect)
+ is_inspect = obj is inspect
+
+ _inspect = Inspect(
+ obj,
+ title=title,
+ help=is_inspect or help,
+ methods=is_inspect or methods,
+ docs=is_inspect or docs,
+ private=private,
+ dunder=dunder,
+ sort=sort,
+ all=all,
+ value=value,
+ )
+ _console.print(_inspect)
+
+
+if __name__ == "__main__": # pragma: no cover
+ print("Hello, **World**")
diff --git a/third_party/python/pip/pip/_vendor/rich/__main__.py b/third_party/python/pip/pip/_vendor/rich/__main__.py
new file mode 100644
index 0000000000..270629fd80
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/__main__.py
@@ -0,0 +1,274 @@
+import colorsys
+import io
+from time import process_time
+
+from pip._vendor.rich import box
+from pip._vendor.rich.color import Color
+from pip._vendor.rich.console import Console, ConsoleOptions, Group, RenderableType, RenderResult
+from pip._vendor.rich.markdown import Markdown
+from pip._vendor.rich.measure import Measurement
+from pip._vendor.rich.pretty import Pretty
+from pip._vendor.rich.segment import Segment
+from pip._vendor.rich.style import Style
+from pip._vendor.rich.syntax import Syntax
+from pip._vendor.rich.table import Table
+from pip._vendor.rich.text import Text
+
+
+class ColorBox:
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+ for y in range(0, 5):
+ for x in range(options.max_width):
+ h = x / options.max_width
+ l = 0.1 + ((y / 5) * 0.7)
+ r1, g1, b1 = colorsys.hls_to_rgb(h, l, 1.0)
+ r2, g2, b2 = colorsys.hls_to_rgb(h, l + 0.7 / 10, 1.0)
+ bgcolor = Color.from_rgb(r1 * 255, g1 * 255, b1 * 255)
+ color = Color.from_rgb(r2 * 255, g2 * 255, b2 * 255)
+ yield Segment("▄", Style(color=color, bgcolor=bgcolor))
+ yield Segment.line()
+
+ def __rich_measure__(
+ self, console: "Console", options: ConsoleOptions
+ ) -> Measurement:
+ return Measurement(1, options.max_width)
+
+
+def make_test_card() -> Table:
+ """Get a renderable that demonstrates a number of features."""
+ table = Table.grid(padding=1, pad_edge=True)
+ table.title = "Rich features"
+ table.add_column("Feature", no_wrap=True, justify="center", style="bold red")
+ table.add_column("Demonstration")
+
+ color_table = Table(
+ box=None,
+ expand=False,
+ show_header=False,
+ show_edge=False,
+ pad_edge=False,
+ )
+ color_table.add_row(
+ (
+ "✓ [bold green]4-bit color[/]\n"
+ "✓ [bold blue]8-bit color[/]\n"
+ "✓ [bold magenta]Truecolor (16.7 million)[/]\n"
+ "✓ [bold yellow]Dumb terminals[/]\n"
+ "✓ [bold cyan]Automatic color conversion"
+ ),
+ ColorBox(),
+ )
+
+ table.add_row("Colors", color_table)
+
+ table.add_row(
+ "Styles",
+ "All ansi styles: [bold]bold[/], [dim]dim[/], [italic]italic[/italic], [underline]underline[/], [strike]strikethrough[/], [reverse]reverse[/], and even [blink]blink[/].",
+ )
+
+ lorem = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Quisque in metus sed sapien ultricies pretium a at justo. Maecenas luctus velit et auctor maximus."
+ lorem_table = Table.grid(padding=1, collapse_padding=True)
+ lorem_table.pad_edge = False
+ lorem_table.add_row(
+ Text(lorem, justify="left", style="green"),
+ Text(lorem, justify="center", style="yellow"),
+ Text(lorem, justify="right", style="blue"),
+ Text(lorem, justify="full", style="red"),
+ )
+ table.add_row(
+ "Text",
+ Group(
+ Text.from_markup(
+ """Word wrap text. Justify [green]left[/], [yellow]center[/], [blue]right[/] or [red]full[/].\n"""
+ ),
+ lorem_table,
+ ),
+ )
+
+ def comparison(renderable1: RenderableType, renderable2: RenderableType) -> Table:
+ table = Table(show_header=False, pad_edge=False, box=None, expand=True)
+ table.add_column("1", ratio=1)
+ table.add_column("2", ratio=1)
+ table.add_row(renderable1, renderable2)
+ return table
+
+ table.add_row(
+ "Asian\nlanguage\nsupport",
+ ":flag_for_china: 该库支持中文,日文和韩文文本!\n:flag_for_japan: ライブラリは中国語、日本語、韓国語のテキストをサポートしています\n:flag_for_south_korea: 이 라이브러리는 중국어, 일본어 및 한국어 텍스트를 지원합니다",
+ )
+
+ markup_example = (
+ "[bold magenta]Rich[/] supports a simple [i]bbcode[/i]-like [b]markup[/b] for [yellow]color[/], [underline]style[/], and emoji! "
+ ":+1: :apple: :ant: :bear: :baguette_bread: :bus: "
+ )
+ table.add_row("Markup", markup_example)
+
+ example_table = Table(
+ show_edge=False,
+ show_header=True,
+ expand=False,
+ row_styles=["none", "dim"],
+ box=box.SIMPLE,
+ )
+ example_table.add_column("[green]Date", style="green", no_wrap=True)
+ example_table.add_column("[blue]Title", style="blue")
+ example_table.add_column(
+ "[cyan]Production Budget",
+ style="cyan",
+ justify="right",
+ no_wrap=True,
+ )
+ example_table.add_column(
+ "[magenta]Box Office",
+ style="magenta",
+ justify="right",
+ no_wrap=True,
+ )
+ example_table.add_row(
+ "Dec 20, 2019",
+ "Star Wars: The Rise of Skywalker",
+ "$275,000,000",
+ "$375,126,118",
+ )
+ example_table.add_row(
+ "May 25, 2018",
+ "[b]Solo[/]: A Star Wars Story",
+ "$275,000,000",
+ "$393,151,347",
+ )
+ example_table.add_row(
+ "Dec 15, 2017",
+ "Star Wars Ep. VIII: The Last Jedi",
+ "$262,000,000",
+ "[bold]$1,332,539,889[/bold]",
+ )
+ example_table.add_row(
+ "May 19, 1999",
+ "Star Wars Ep. [b]I[/b]: [i]The phantom Menace",
+ "$115,000,000",
+ "$1,027,044,677",
+ )
+
+ table.add_row("Tables", example_table)
+
+ code = '''\
+def iter_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
+ """Iterate and generate a tuple with a flag for last value."""
+ iter_values = iter(values)
+ try:
+ previous_value = next(iter_values)
+ except StopIteration:
+ return
+ for value in iter_values:
+ yield False, previous_value
+ previous_value = value
+ yield True, previous_value'''
+
+ pretty_data = {
+ "foo": [
+ 3.1427,
+ (
+ "Paul Atreides",
+ "Vladimir Harkonnen",
+ "Thufir Hawat",
+ ),
+ ],
+ "atomic": (False, True, None),
+ }
+ table.add_row(
+ "Syntax\nhighlighting\n&\npretty\nprinting",
+ comparison(
+ Syntax(code, "python3", line_numbers=True, indent_guides=True),
+ Pretty(pretty_data, indent_guides=True),
+ ),
+ )
+
+ markdown_example = """\
+# Markdown
+
+Supports much of the *markdown* __syntax__!
+
+- Headers
+- Basic formatting: **bold**, *italic*, `code`
+- Block quotes
+- Lists, and more...
+ """
+ table.add_row(
+ "Markdown", comparison("[cyan]" + markdown_example, Markdown(markdown_example))
+ )
+
+ table.add_row(
+ "+more!",
+ """Progress bars, columns, styled logging handler, tracebacks, etc...""",
+ )
+ return table
+
+
+if __name__ == "__main__": # pragma: no cover
+
+ console = Console(
+ file=io.StringIO(),
+ force_terminal=True,
+ )
+ test_card = make_test_card()
+
+ # Print once to warm cache
+ start = process_time()
+ console.print(test_card)
+ pre_cache_taken = round((process_time() - start) * 1000.0, 1)
+
+ console.file = io.StringIO()
+
+ start = process_time()
+ console.print(test_card)
+ taken = round((process_time() - start) * 1000.0, 1)
+
+ c = Console(record=True)
+ c.print(test_card)
+
+ print(f"rendered in {pre_cache_taken}ms (cold cache)")
+ print(f"rendered in {taken}ms (warm cache)")
+
+ from pip._vendor.rich.panel import Panel
+
+ console = Console()
+
+ sponsor_message = Table.grid(padding=1)
+ sponsor_message.add_column(style="green", justify="right")
+ sponsor_message.add_column(no_wrap=True)
+
+ sponsor_message.add_row(
+ "Textualize",
+ "[u blue link=https://github.com/textualize]https://github.com/textualize",
+ )
+ sponsor_message.add_row(
+ "Twitter",
+ "[u blue link=https://twitter.com/willmcgugan]https://twitter.com/willmcgugan",
+ )
+
+ intro_message = Text.from_markup(
+ """\
+We hope you enjoy using Rich!
+
+Rich is maintained with [red]:heart:[/] by [link=https://www.textualize.io]Textualize.io[/]
+
+- Will McGugan"""
+ )
+
+ message = Table.grid(padding=2)
+ message.add_column()
+ message.add_column(no_wrap=True)
+ message.add_row(intro_message, sponsor_message)
+
+ console.print(
+ Panel.fit(
+ message,
+ box=box.ROUNDED,
+ padding=(1, 2),
+ title="[b red]Thanks for trying out Rich!",
+ border_style="bright_blue",
+ ),
+ justify="center",
+ )
diff --git a/third_party/python/pip/pip/_vendor/rich/_cell_widths.py b/third_party/python/pip/pip/_vendor/rich/_cell_widths.py
new file mode 100644
index 0000000000..36286df379
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/_cell_widths.py
@@ -0,0 +1,451 @@
+# Auto generated by make_terminal_widths.py
+
+CELL_WIDTHS = [
+ (0, 0, 0),
+ (1, 31, -1),
+ (127, 159, -1),
+ (768, 879, 0),
+ (1155, 1161, 0),
+ (1425, 1469, 0),
+ (1471, 1471, 0),
+ (1473, 1474, 0),
+ (1476, 1477, 0),
+ (1479, 1479, 0),
+ (1552, 1562, 0),
+ (1611, 1631, 0),
+ (1648, 1648, 0),
+ (1750, 1756, 0),
+ (1759, 1764, 0),
+ (1767, 1768, 0),
+ (1770, 1773, 0),
+ (1809, 1809, 0),
+ (1840, 1866, 0),
+ (1958, 1968, 0),
+ (2027, 2035, 0),
+ (2045, 2045, 0),
+ (2070, 2073, 0),
+ (2075, 2083, 0),
+ (2085, 2087, 0),
+ (2089, 2093, 0),
+ (2137, 2139, 0),
+ (2259, 2273, 0),
+ (2275, 2306, 0),
+ (2362, 2362, 0),
+ (2364, 2364, 0),
+ (2369, 2376, 0),
+ (2381, 2381, 0),
+ (2385, 2391, 0),
+ (2402, 2403, 0),
+ (2433, 2433, 0),
+ (2492, 2492, 0),
+ (2497, 2500, 0),
+ (2509, 2509, 0),
+ (2530, 2531, 0),
+ (2558, 2558, 0),
+ (2561, 2562, 0),
+ (2620, 2620, 0),
+ (2625, 2626, 0),
+ (2631, 2632, 0),
+ (2635, 2637, 0),
+ (2641, 2641, 0),
+ (2672, 2673, 0),
+ (2677, 2677, 0),
+ (2689, 2690, 0),
+ (2748, 2748, 0),
+ (2753, 2757, 0),
+ (2759, 2760, 0),
+ (2765, 2765, 0),
+ (2786, 2787, 0),
+ (2810, 2815, 0),
+ (2817, 2817, 0),
+ (2876, 2876, 0),
+ (2879, 2879, 0),
+ (2881, 2884, 0),
+ (2893, 2893, 0),
+ (2901, 2902, 0),
+ (2914, 2915, 0),
+ (2946, 2946, 0),
+ (3008, 3008, 0),
+ (3021, 3021, 0),
+ (3072, 3072, 0),
+ (3076, 3076, 0),
+ (3134, 3136, 0),
+ (3142, 3144, 0),
+ (3146, 3149, 0),
+ (3157, 3158, 0),
+ (3170, 3171, 0),
+ (3201, 3201, 0),
+ (3260, 3260, 0),
+ (3263, 3263, 0),
+ (3270, 3270, 0),
+ (3276, 3277, 0),
+ (3298, 3299, 0),
+ (3328, 3329, 0),
+ (3387, 3388, 0),
+ (3393, 3396, 0),
+ (3405, 3405, 0),
+ (3426, 3427, 0),
+ (3457, 3457, 0),
+ (3530, 3530, 0),
+ (3538, 3540, 0),
+ (3542, 3542, 0),
+ (3633, 3633, 0),
+ (3636, 3642, 0),
+ (3655, 3662, 0),
+ (3761, 3761, 0),
+ (3764, 3772, 0),
+ (3784, 3789, 0),
+ (3864, 3865, 0),
+ (3893, 3893, 0),
+ (3895, 3895, 0),
+ (3897, 3897, 0),
+ (3953, 3966, 0),
+ (3968, 3972, 0),
+ (3974, 3975, 0),
+ (3981, 3991, 0),
+ (3993, 4028, 0),
+ (4038, 4038, 0),
+ (4141, 4144, 0),
+ (4146, 4151, 0),
+ (4153, 4154, 0),
+ (4157, 4158, 0),
+ (4184, 4185, 0),
+ (4190, 4192, 0),
+ (4209, 4212, 0),
+ (4226, 4226, 0),
+ (4229, 4230, 0),
+ (4237, 4237, 0),
+ (4253, 4253, 0),
+ (4352, 4447, 2),
+ (4957, 4959, 0),
+ (5906, 5908, 0),
+ (5938, 5940, 0),
+ (5970, 5971, 0),
+ (6002, 6003, 0),
+ (6068, 6069, 0),
+ (6071, 6077, 0),
+ (6086, 6086, 0),
+ (6089, 6099, 0),
+ (6109, 6109, 0),
+ (6155, 6157, 0),
+ (6277, 6278, 0),
+ (6313, 6313, 0),
+ (6432, 6434, 0),
+ (6439, 6440, 0),
+ (6450, 6450, 0),
+ (6457, 6459, 0),
+ (6679, 6680, 0),
+ (6683, 6683, 0),
+ (6742, 6742, 0),
+ (6744, 6750, 0),
+ (6752, 6752, 0),
+ (6754, 6754, 0),
+ (6757, 6764, 0),
+ (6771, 6780, 0),
+ (6783, 6783, 0),
+ (6832, 6848, 0),
+ (6912, 6915, 0),
+ (6964, 6964, 0),
+ (6966, 6970, 0),
+ (6972, 6972, 0),
+ (6978, 6978, 0),
+ (7019, 7027, 0),
+ (7040, 7041, 0),
+ (7074, 7077, 0),
+ (7080, 7081, 0),
+ (7083, 7085, 0),
+ (7142, 7142, 0),
+ (7144, 7145, 0),
+ (7149, 7149, 0),
+ (7151, 7153, 0),
+ (7212, 7219, 0),
+ (7222, 7223, 0),
+ (7376, 7378, 0),
+ (7380, 7392, 0),
+ (7394, 7400, 0),
+ (7405, 7405, 0),
+ (7412, 7412, 0),
+ (7416, 7417, 0),
+ (7616, 7673, 0),
+ (7675, 7679, 0),
+ (8203, 8207, 0),
+ (8232, 8238, 0),
+ (8288, 8291, 0),
+ (8400, 8432, 0),
+ (8986, 8987, 2),
+ (9001, 9002, 2),
+ (9193, 9196, 2),
+ (9200, 9200, 2),
+ (9203, 9203, 2),
+ (9725, 9726, 2),
+ (9748, 9749, 2),
+ (9800, 9811, 2),
+ (9855, 9855, 2),
+ (9875, 9875, 2),
+ (9889, 9889, 2),
+ (9898, 9899, 2),
+ (9917, 9918, 2),
+ (9924, 9925, 2),
+ (9934, 9934, 2),
+ (9940, 9940, 2),
+ (9962, 9962, 2),
+ (9970, 9971, 2),
+ (9973, 9973, 2),
+ (9978, 9978, 2),
+ (9981, 9981, 2),
+ (9989, 9989, 2),
+ (9994, 9995, 2),
+ (10024, 10024, 2),
+ (10060, 10060, 2),
+ (10062, 10062, 2),
+ (10067, 10069, 2),
+ (10071, 10071, 2),
+ (10133, 10135, 2),
+ (10160, 10160, 2),
+ (10175, 10175, 2),
+ (11035, 11036, 2),
+ (11088, 11088, 2),
+ (11093, 11093, 2),
+ (11503, 11505, 0),
+ (11647, 11647, 0),
+ (11744, 11775, 0),
+ (11904, 11929, 2),
+ (11931, 12019, 2),
+ (12032, 12245, 2),
+ (12272, 12283, 2),
+ (12288, 12329, 2),
+ (12330, 12333, 0),
+ (12334, 12350, 2),
+ (12353, 12438, 2),
+ (12441, 12442, 0),
+ (12443, 12543, 2),
+ (12549, 12591, 2),
+ (12593, 12686, 2),
+ (12688, 12771, 2),
+ (12784, 12830, 2),
+ (12832, 12871, 2),
+ (12880, 19903, 2),
+ (19968, 42124, 2),
+ (42128, 42182, 2),
+ (42607, 42610, 0),
+ (42612, 42621, 0),
+ (42654, 42655, 0),
+ (42736, 42737, 0),
+ (43010, 43010, 0),
+ (43014, 43014, 0),
+ (43019, 43019, 0),
+ (43045, 43046, 0),
+ (43052, 43052, 0),
+ (43204, 43205, 0),
+ (43232, 43249, 0),
+ (43263, 43263, 0),
+ (43302, 43309, 0),
+ (43335, 43345, 0),
+ (43360, 43388, 2),
+ (43392, 43394, 0),
+ (43443, 43443, 0),
+ (43446, 43449, 0),
+ (43452, 43453, 0),
+ (43493, 43493, 0),
+ (43561, 43566, 0),
+ (43569, 43570, 0),
+ (43573, 43574, 0),
+ (43587, 43587, 0),
+ (43596, 43596, 0),
+ (43644, 43644, 0),
+ (43696, 43696, 0),
+ (43698, 43700, 0),
+ (43703, 43704, 0),
+ (43710, 43711, 0),
+ (43713, 43713, 0),
+ (43756, 43757, 0),
+ (43766, 43766, 0),
+ (44005, 44005, 0),
+ (44008, 44008, 0),
+ (44013, 44013, 0),
+ (44032, 55203, 2),
+ (63744, 64255, 2),
+ (64286, 64286, 0),
+ (65024, 65039, 0),
+ (65040, 65049, 2),
+ (65056, 65071, 0),
+ (65072, 65106, 2),
+ (65108, 65126, 2),
+ (65128, 65131, 2),
+ (65281, 65376, 2),
+ (65504, 65510, 2),
+ (66045, 66045, 0),
+ (66272, 66272, 0),
+ (66422, 66426, 0),
+ (68097, 68099, 0),
+ (68101, 68102, 0),
+ (68108, 68111, 0),
+ (68152, 68154, 0),
+ (68159, 68159, 0),
+ (68325, 68326, 0),
+ (68900, 68903, 0),
+ (69291, 69292, 0),
+ (69446, 69456, 0),
+ (69633, 69633, 0),
+ (69688, 69702, 0),
+ (69759, 69761, 0),
+ (69811, 69814, 0),
+ (69817, 69818, 0),
+ (69888, 69890, 0),
+ (69927, 69931, 0),
+ (69933, 69940, 0),
+ (70003, 70003, 0),
+ (70016, 70017, 0),
+ (70070, 70078, 0),
+ (70089, 70092, 0),
+ (70095, 70095, 0),
+ (70191, 70193, 0),
+ (70196, 70196, 0),
+ (70198, 70199, 0),
+ (70206, 70206, 0),
+ (70367, 70367, 0),
+ (70371, 70378, 0),
+ (70400, 70401, 0),
+ (70459, 70460, 0),
+ (70464, 70464, 0),
+ (70502, 70508, 0),
+ (70512, 70516, 0),
+ (70712, 70719, 0),
+ (70722, 70724, 0),
+ (70726, 70726, 0),
+ (70750, 70750, 0),
+ (70835, 70840, 0),
+ (70842, 70842, 0),
+ (70847, 70848, 0),
+ (70850, 70851, 0),
+ (71090, 71093, 0),
+ (71100, 71101, 0),
+ (71103, 71104, 0),
+ (71132, 71133, 0),
+ (71219, 71226, 0),
+ (71229, 71229, 0),
+ (71231, 71232, 0),
+ (71339, 71339, 0),
+ (71341, 71341, 0),
+ (71344, 71349, 0),
+ (71351, 71351, 0),
+ (71453, 71455, 0),
+ (71458, 71461, 0),
+ (71463, 71467, 0),
+ (71727, 71735, 0),
+ (71737, 71738, 0),
+ (71995, 71996, 0),
+ (71998, 71998, 0),
+ (72003, 72003, 0),
+ (72148, 72151, 0),
+ (72154, 72155, 0),
+ (72160, 72160, 0),
+ (72193, 72202, 0),
+ (72243, 72248, 0),
+ (72251, 72254, 0),
+ (72263, 72263, 0),
+ (72273, 72278, 0),
+ (72281, 72283, 0),
+ (72330, 72342, 0),
+ (72344, 72345, 0),
+ (72752, 72758, 0),
+ (72760, 72765, 0),
+ (72767, 72767, 0),
+ (72850, 72871, 0),
+ (72874, 72880, 0),
+ (72882, 72883, 0),
+ (72885, 72886, 0),
+ (73009, 73014, 0),
+ (73018, 73018, 0),
+ (73020, 73021, 0),
+ (73023, 73029, 0),
+ (73031, 73031, 0),
+ (73104, 73105, 0),
+ (73109, 73109, 0),
+ (73111, 73111, 0),
+ (73459, 73460, 0),
+ (92912, 92916, 0),
+ (92976, 92982, 0),
+ (94031, 94031, 0),
+ (94095, 94098, 0),
+ (94176, 94179, 2),
+ (94180, 94180, 0),
+ (94192, 94193, 2),
+ (94208, 100343, 2),
+ (100352, 101589, 2),
+ (101632, 101640, 2),
+ (110592, 110878, 2),
+ (110928, 110930, 2),
+ (110948, 110951, 2),
+ (110960, 111355, 2),
+ (113821, 113822, 0),
+ (119143, 119145, 0),
+ (119163, 119170, 0),
+ (119173, 119179, 0),
+ (119210, 119213, 0),
+ (119362, 119364, 0),
+ (121344, 121398, 0),
+ (121403, 121452, 0),
+ (121461, 121461, 0),
+ (121476, 121476, 0),
+ (121499, 121503, 0),
+ (121505, 121519, 0),
+ (122880, 122886, 0),
+ (122888, 122904, 0),
+ (122907, 122913, 0),
+ (122915, 122916, 0),
+ (122918, 122922, 0),
+ (123184, 123190, 0),
+ (123628, 123631, 0),
+ (125136, 125142, 0),
+ (125252, 125258, 0),
+ (126980, 126980, 2),
+ (127183, 127183, 2),
+ (127374, 127374, 2),
+ (127377, 127386, 2),
+ (127488, 127490, 2),
+ (127504, 127547, 2),
+ (127552, 127560, 2),
+ (127568, 127569, 2),
+ (127584, 127589, 2),
+ (127744, 127776, 2),
+ (127789, 127797, 2),
+ (127799, 127868, 2),
+ (127870, 127891, 2),
+ (127904, 127946, 2),
+ (127951, 127955, 2),
+ (127968, 127984, 2),
+ (127988, 127988, 2),
+ (127992, 128062, 2),
+ (128064, 128064, 2),
+ (128066, 128252, 2),
+ (128255, 128317, 2),
+ (128331, 128334, 2),
+ (128336, 128359, 2),
+ (128378, 128378, 2),
+ (128405, 128406, 2),
+ (128420, 128420, 2),
+ (128507, 128591, 2),
+ (128640, 128709, 2),
+ (128716, 128716, 2),
+ (128720, 128722, 2),
+ (128725, 128727, 2),
+ (128747, 128748, 2),
+ (128756, 128764, 2),
+ (128992, 129003, 2),
+ (129292, 129338, 2),
+ (129340, 129349, 2),
+ (129351, 129400, 2),
+ (129402, 129483, 2),
+ (129485, 129535, 2),
+ (129648, 129652, 2),
+ (129656, 129658, 2),
+ (129664, 129670, 2),
+ (129680, 129704, 2),
+ (129712, 129718, 2),
+ (129728, 129730, 2),
+ (129744, 129750, 2),
+ (131072, 196605, 2),
+ (196608, 262141, 2),
+ (917760, 917999, 0),
+]
diff --git a/third_party/python/pip/pip/_vendor/rich/_emoji_codes.py b/third_party/python/pip/pip/_vendor/rich/_emoji_codes.py
new file mode 100644
index 0000000000..1f2877bb2b
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/_emoji_codes.py
@@ -0,0 +1,3610 @@
+EMOJI = {
+ "1st_place_medal": "🥇",
+ "2nd_place_medal": "🥈",
+ "3rd_place_medal": "🥉",
+ "ab_button_(blood_type)": "🆎",
+ "atm_sign": "🏧",
+ "a_button_(blood_type)": "🅰",
+ "afghanistan": "🇦🇫",
+ "albania": "🇦🇱",
+ "algeria": "🇩🇿",
+ "american_samoa": "🇦🇸",
+ "andorra": "🇦🇩",
+ "angola": "🇦🇴",
+ "anguilla": "🇦🇮",
+ "antarctica": "🇦🇶",
+ "antigua_&_barbuda": "🇦🇬",
+ "aquarius": "♒",
+ "argentina": "🇦🇷",
+ "aries": "♈",
+ "armenia": "🇦🇲",
+ "aruba": "🇦🇼",
+ "ascension_island": "🇦🇨",
+ "australia": "🇦🇺",
+ "austria": "🇦🇹",
+ "azerbaijan": "🇦🇿",
+ "back_arrow": "🔙",
+ "b_button_(blood_type)": "🅱",
+ "bahamas": "🇧🇸",
+ "bahrain": "🇧🇭",
+ "bangladesh": "🇧🇩",
+ "barbados": "🇧🇧",
+ "belarus": "🇧🇾",
+ "belgium": "🇧🇪",
+ "belize": "🇧🇿",
+ "benin": "🇧🇯",
+ "bermuda": "🇧🇲",
+ "bhutan": "🇧🇹",
+ "bolivia": "🇧🇴",
+ "bosnia_&_herzegovina": "🇧🇦",
+ "botswana": "🇧🇼",
+ "bouvet_island": "🇧🇻",
+ "brazil": "🇧🇷",
+ "british_indian_ocean_territory": "🇮🇴",
+ "british_virgin_islands": "🇻🇬",
+ "brunei": "🇧🇳",
+ "bulgaria": "🇧🇬",
+ "burkina_faso": "🇧🇫",
+ "burundi": "🇧🇮",
+ "cl_button": "🆑",
+ "cool_button": "🆒",
+ "cambodia": "🇰🇭",
+ "cameroon": "🇨🇲",
+ "canada": "🇨🇦",
+ "canary_islands": "🇮🇨",
+ "cancer": "♋",
+ "cape_verde": "🇨🇻",
+ "capricorn": "♑",
+ "caribbean_netherlands": "🇧🇶",
+ "cayman_islands": "🇰🇾",
+ "central_african_republic": "🇨🇫",
+ "ceuta_&_melilla": "🇪🇦",
+ "chad": "🇹🇩",
+ "chile": "🇨🇱",
+ "china": "🇨🇳",
+ "christmas_island": "🇨🇽",
+ "christmas_tree": "🎄",
+ "clipperton_island": "🇨🇵",
+ "cocos_(keeling)_islands": "🇨🇨",
+ "colombia": "🇨🇴",
+ "comoros": "🇰🇲",
+ "congo_-_brazzaville": "🇨🇬",
+ "congo_-_kinshasa": "🇨🇩",
+ "cook_islands": "🇨🇰",
+ "costa_rica": "🇨🇷",
+ "croatia": "🇭🇷",
+ "cuba": "🇨🇺",
+ "curaçao": "🇨🇼",
+ "cyprus": "🇨🇾",
+ "czechia": "🇨🇿",
+ "côte_d’ivoire": "🇨🇮",
+ "denmark": "🇩🇰",
+ "diego_garcia": "🇩🇬",
+ "djibouti": "🇩🇯",
+ "dominica": "🇩🇲",
+ "dominican_republic": "🇩🇴",
+ "end_arrow": "🔚",
+ "ecuador": "🇪🇨",
+ "egypt": "🇪🇬",
+ "el_salvador": "🇸🇻",
+ "england": "🏴\U000e0067\U000e0062\U000e0065\U000e006e\U000e0067\U000e007f",
+ "equatorial_guinea": "🇬🇶",
+ "eritrea": "🇪🇷",
+ "estonia": "🇪🇪",
+ "ethiopia": "🇪🇹",
+ "european_union": "🇪🇺",
+ "free_button": "🆓",
+ "falkland_islands": "🇫🇰",
+ "faroe_islands": "🇫🇴",
+ "fiji": "🇫🇯",
+ "finland": "🇫🇮",
+ "france": "🇫🇷",
+ "french_guiana": "🇬🇫",
+ "french_polynesia": "🇵🇫",
+ "french_southern_territories": "🇹🇫",
+ "gabon": "🇬🇦",
+ "gambia": "🇬🇲",
+ "gemini": "♊",
+ "georgia": "🇬🇪",
+ "germany": "🇩🇪",
+ "ghana": "🇬🇭",
+ "gibraltar": "🇬🇮",
+ "greece": "🇬🇷",
+ "greenland": "🇬🇱",
+ "grenada": "🇬🇩",
+ "guadeloupe": "🇬🇵",
+ "guam": "🇬🇺",
+ "guatemala": "🇬🇹",
+ "guernsey": "🇬🇬",
+ "guinea": "🇬🇳",
+ "guinea-bissau": "🇬🇼",
+ "guyana": "🇬🇾",
+ "haiti": "🇭🇹",
+ "heard_&_mcdonald_islands": "🇭🇲",
+ "honduras": "🇭🇳",
+ "hong_kong_sar_china": "🇭🇰",
+ "hungary": "🇭🇺",
+ "id_button": "🆔",
+ "iceland": "🇮🇸",
+ "india": "🇮🇳",
+ "indonesia": "🇮🇩",
+ "iran": "🇮🇷",
+ "iraq": "🇮🇶",
+ "ireland": "🇮🇪",
+ "isle_of_man": "🇮🇲",
+ "israel": "🇮🇱",
+ "italy": "🇮🇹",
+ "jamaica": "🇯🇲",
+ "japan": "🗾",
+ "japanese_acceptable_button": "🉑",
+ "japanese_application_button": "🈸",
+ "japanese_bargain_button": "🉐",
+ "japanese_castle": "🏯",
+ "japanese_congratulations_button": "㊗",
+ "japanese_discount_button": "🈹",
+ "japanese_dolls": "🎎",
+ "japanese_free_of_charge_button": "🈚",
+ "japanese_here_button": "🈁",
+ "japanese_monthly_amount_button": "🈷",
+ "japanese_no_vacancy_button": "🈵",
+ "japanese_not_free_of_charge_button": "🈶",
+ "japanese_open_for_business_button": "🈺",
+ "japanese_passing_grade_button": "🈴",
+ "japanese_post_office": "🏣",
+ "japanese_prohibited_button": "🈲",
+ "japanese_reserved_button": "🈯",
+ "japanese_secret_button": "㊙",
+ "japanese_service_charge_button": "🈂",
+ "japanese_symbol_for_beginner": "🔰",
+ "japanese_vacancy_button": "🈳",
+ "jersey": "🇯🇪",
+ "jordan": "🇯🇴",
+ "kazakhstan": "🇰🇿",
+ "kenya": "🇰🇪",
+ "kiribati": "🇰🇮",
+ "kosovo": "🇽🇰",
+ "kuwait": "🇰🇼",
+ "kyrgyzstan": "🇰🇬",
+ "laos": "🇱🇦",
+ "latvia": "🇱🇻",
+ "lebanon": "🇱🇧",
+ "leo": "♌",
+ "lesotho": "🇱🇸",
+ "liberia": "🇱🇷",
+ "libra": "♎",
+ "libya": "🇱🇾",
+ "liechtenstein": "🇱🇮",
+ "lithuania": "🇱🇹",
+ "luxembourg": "🇱🇺",
+ "macau_sar_china": "🇲🇴",
+ "macedonia": "🇲🇰",
+ "madagascar": "🇲🇬",
+ "malawi": "🇲🇼",
+ "malaysia": "🇲🇾",
+ "maldives": "🇲🇻",
+ "mali": "🇲🇱",
+ "malta": "🇲🇹",
+ "marshall_islands": "🇲🇭",
+ "martinique": "🇲🇶",
+ "mauritania": "🇲🇷",
+ "mauritius": "🇲🇺",
+ "mayotte": "🇾🇹",
+ "mexico": "🇲🇽",
+ "micronesia": "🇫🇲",
+ "moldova": "🇲🇩",
+ "monaco": "🇲🇨",
+ "mongolia": "🇲🇳",
+ "montenegro": "🇲🇪",
+ "montserrat": "🇲🇸",
+ "morocco": "🇲🇦",
+ "mozambique": "🇲🇿",
+ "mrs._claus": "🤶",
+ "mrs._claus_dark_skin_tone": "🤶🏿",
+ "mrs._claus_light_skin_tone": "🤶🏻",
+ "mrs._claus_medium-dark_skin_tone": "🤶🏾",
+ "mrs._claus_medium-light_skin_tone": "🤶🏼",
+ "mrs._claus_medium_skin_tone": "🤶🏽",
+ "myanmar_(burma)": "🇲🇲",
+ "new_button": "🆕",
+ "ng_button": "🆖",
+ "namibia": "🇳🇦",
+ "nauru": "🇳🇷",
+ "nepal": "🇳🇵",
+ "netherlands": "🇳🇱",
+ "new_caledonia": "🇳🇨",
+ "new_zealand": "🇳🇿",
+ "nicaragua": "🇳🇮",
+ "niger": "🇳🇪",
+ "nigeria": "🇳🇬",
+ "niue": "🇳🇺",
+ "norfolk_island": "🇳🇫",
+ "north_korea": "🇰🇵",
+ "northern_mariana_islands": "🇲🇵",
+ "norway": "🇳🇴",
+ "ok_button": "🆗",
+ "ok_hand": "👌",
+ "ok_hand_dark_skin_tone": "👌🏿",
+ "ok_hand_light_skin_tone": "👌🏻",
+ "ok_hand_medium-dark_skin_tone": "👌🏾",
+ "ok_hand_medium-light_skin_tone": "👌🏼",
+ "ok_hand_medium_skin_tone": "👌🏽",
+ "on!_arrow": "🔛",
+ "o_button_(blood_type)": "🅾",
+ "oman": "🇴🇲",
+ "ophiuchus": "⛎",
+ "p_button": "🅿",
+ "pakistan": "🇵🇰",
+ "palau": "🇵🇼",
+ "palestinian_territories": "🇵🇸",
+ "panama": "🇵🇦",
+ "papua_new_guinea": "🇵🇬",
+ "paraguay": "🇵🇾",
+ "peru": "🇵🇪",
+ "philippines": "🇵🇭",
+ "pisces": "♓",
+ "pitcairn_islands": "🇵🇳",
+ "poland": "🇵🇱",
+ "portugal": "🇵🇹",
+ "puerto_rico": "🇵🇷",
+ "qatar": "🇶🇦",
+ "romania": "🇷🇴",
+ "russia": "🇷🇺",
+ "rwanda": "🇷🇼",
+ "réunion": "🇷🇪",
+ "soon_arrow": "🔜",
+ "sos_button": "🆘",
+ "sagittarius": "♐",
+ "samoa": "🇼🇸",
+ "san_marino": "🇸🇲",
+ "santa_claus": "🎅",
+ "santa_claus_dark_skin_tone": "🎅🏿",
+ "santa_claus_light_skin_tone": "🎅🏻",
+ "santa_claus_medium-dark_skin_tone": "🎅🏾",
+ "santa_claus_medium-light_skin_tone": "🎅🏼",
+ "santa_claus_medium_skin_tone": "🎅🏽",
+ "saudi_arabia": "🇸🇦",
+ "scorpio": "♏",
+ "scotland": "🏴\U000e0067\U000e0062\U000e0073\U000e0063\U000e0074\U000e007f",
+ "senegal": "🇸🇳",
+ "serbia": "🇷🇸",
+ "seychelles": "🇸🇨",
+ "sierra_leone": "🇸🇱",
+ "singapore": "🇸🇬",
+ "sint_maarten": "🇸🇽",
+ "slovakia": "🇸🇰",
+ "slovenia": "🇸🇮",
+ "solomon_islands": "🇸🇧",
+ "somalia": "🇸🇴",
+ "south_africa": "🇿🇦",
+ "south_georgia_&_south_sandwich_islands": "🇬🇸",
+ "south_korea": "🇰🇷",
+ "south_sudan": "🇸🇸",
+ "spain": "🇪🇸",
+ "sri_lanka": "🇱🇰",
+ "st._barthélemy": "🇧🇱",
+ "st._helena": "🇸🇭",
+ "st._kitts_&_nevis": "🇰🇳",
+ "st._lucia": "🇱🇨",
+ "st._martin": "🇲🇫",
+ "st._pierre_&_miquelon": "🇵🇲",
+ "st._vincent_&_grenadines": "🇻🇨",
+ "statue_of_liberty": "🗽",
+ "sudan": "🇸🇩",
+ "suriname": "🇸🇷",
+ "svalbard_&_jan_mayen": "🇸🇯",
+ "swaziland": "🇸🇿",
+ "sweden": "🇸🇪",
+ "switzerland": "🇨🇭",
+ "syria": "🇸🇾",
+ "são_tomé_&_príncipe": "🇸🇹",
+ "t-rex": "🦖",
+ "top_arrow": "🔝",
+ "taiwan": "🇹🇼",
+ "tajikistan": "🇹🇯",
+ "tanzania": "🇹🇿",
+ "taurus": "♉",
+ "thailand": "🇹🇭",
+ "timor-leste": "🇹🇱",
+ "togo": "🇹🇬",
+ "tokelau": "🇹🇰",
+ "tokyo_tower": "🗼",
+ "tonga": "🇹🇴",
+ "trinidad_&_tobago": "🇹🇹",
+ "tristan_da_cunha": "🇹🇦",
+ "tunisia": "🇹🇳",
+ "turkey": "🦃",
+ "turkmenistan": "🇹🇲",
+ "turks_&_caicos_islands": "🇹🇨",
+ "tuvalu": "🇹🇻",
+ "u.s._outlying_islands": "🇺🇲",
+ "u.s._virgin_islands": "🇻🇮",
+ "up!_button": "🆙",
+ "uganda": "🇺🇬",
+ "ukraine": "🇺🇦",
+ "united_arab_emirates": "🇦🇪",
+ "united_kingdom": "🇬🇧",
+ "united_nations": "🇺🇳",
+ "united_states": "🇺🇸",
+ "uruguay": "🇺🇾",
+ "uzbekistan": "🇺🇿",
+ "vs_button": "🆚",
+ "vanuatu": "🇻🇺",
+ "vatican_city": "🇻🇦",
+ "venezuela": "🇻🇪",
+ "vietnam": "🇻🇳",
+ "virgo": "♍",
+ "wales": "🏴\U000e0067\U000e0062\U000e0077\U000e006c\U000e0073\U000e007f",
+ "wallis_&_futuna": "🇼🇫",
+ "western_sahara": "🇪🇭",
+ "yemen": "🇾🇪",
+ "zambia": "🇿🇲",
+ "zimbabwe": "🇿🇼",
+ "abacus": "🧮",
+ "adhesive_bandage": "🩹",
+ "admission_tickets": "🎟",
+ "adult": "🧑",
+ "adult_dark_skin_tone": "🧑🏿",
+ "adult_light_skin_tone": "🧑🏻",
+ "adult_medium-dark_skin_tone": "🧑🏾",
+ "adult_medium-light_skin_tone": "🧑🏼",
+ "adult_medium_skin_tone": "🧑🏽",
+ "aerial_tramway": "🚡",
+ "airplane": "✈",
+ "airplane_arrival": "🛬",
+ "airplane_departure": "🛫",
+ "alarm_clock": "⏰",
+ "alembic": "⚗",
+ "alien": "👽",
+ "alien_monster": "👾",
+ "ambulance": "🚑",
+ "american_football": "🏈",
+ "amphora": "🏺",
+ "anchor": "⚓",
+ "anger_symbol": "💢",
+ "angry_face": "😠",
+ "angry_face_with_horns": "👿",
+ "anguished_face": "😧",
+ "ant": "🐜",
+ "antenna_bars": "📶",
+ "anxious_face_with_sweat": "😰",
+ "articulated_lorry": "🚛",
+ "artist_palette": "🎨",
+ "astonished_face": "😲",
+ "atom_symbol": "⚛",
+ "auto_rickshaw": "🛺",
+ "automobile": "🚗",
+ "avocado": "🥑",
+ "axe": "🪓",
+ "baby": "👶",
+ "baby_angel": "👼",
+ "baby_angel_dark_skin_tone": "👼🏿",
+ "baby_angel_light_skin_tone": "👼🏻",
+ "baby_angel_medium-dark_skin_tone": "👼🏾",
+ "baby_angel_medium-light_skin_tone": "👼🏼",
+ "baby_angel_medium_skin_tone": "👼🏽",
+ "baby_bottle": "🍼",
+ "baby_chick": "🐤",
+ "baby_dark_skin_tone": "👶🏿",
+ "baby_light_skin_tone": "👶🏻",
+ "baby_medium-dark_skin_tone": "👶🏾",
+ "baby_medium-light_skin_tone": "👶🏼",
+ "baby_medium_skin_tone": "👶🏽",
+ "baby_symbol": "🚼",
+ "backhand_index_pointing_down": "👇",
+ "backhand_index_pointing_down_dark_skin_tone": "👇🏿",
+ "backhand_index_pointing_down_light_skin_tone": "👇🏻",
+ "backhand_index_pointing_down_medium-dark_skin_tone": "👇🏾",
+ "backhand_index_pointing_down_medium-light_skin_tone": "👇🏼",
+ "backhand_index_pointing_down_medium_skin_tone": "👇🏽",
+ "backhand_index_pointing_left": "👈",
+ "backhand_index_pointing_left_dark_skin_tone": "👈🏿",
+ "backhand_index_pointing_left_light_skin_tone": "👈🏻",
+ "backhand_index_pointing_left_medium-dark_skin_tone": "👈🏾",
+ "backhand_index_pointing_left_medium-light_skin_tone": "👈🏼",
+ "backhand_index_pointing_left_medium_skin_tone": "👈🏽",
+ "backhand_index_pointing_right": "👉",
+ "backhand_index_pointing_right_dark_skin_tone": "👉🏿",
+ "backhand_index_pointing_right_light_skin_tone": "👉🏻",
+ "backhand_index_pointing_right_medium-dark_skin_tone": "👉🏾",
+ "backhand_index_pointing_right_medium-light_skin_tone": "👉🏼",
+ "backhand_index_pointing_right_medium_skin_tone": "👉🏽",
+ "backhand_index_pointing_up": "👆",
+ "backhand_index_pointing_up_dark_skin_tone": "👆🏿",
+ "backhand_index_pointing_up_light_skin_tone": "👆🏻",
+ "backhand_index_pointing_up_medium-dark_skin_tone": "👆🏾",
+ "backhand_index_pointing_up_medium-light_skin_tone": "👆🏼",
+ "backhand_index_pointing_up_medium_skin_tone": "👆🏽",
+ "bacon": "🥓",
+ "badger": "🦡",
+ "badminton": "🏸",
+ "bagel": "🥯",
+ "baggage_claim": "🛄",
+ "baguette_bread": "🥖",
+ "balance_scale": "⚖",
+ "bald": "🦲",
+ "bald_man": "👨\u200d🦲",
+ "bald_woman": "👩\u200d🦲",
+ "ballet_shoes": "🩰",
+ "balloon": "🎈",
+ "ballot_box_with_ballot": "🗳",
+ "ballot_box_with_check": "☑",
+ "banana": "🍌",
+ "banjo": "🪕",
+ "bank": "🏦",
+ "bar_chart": "📊",
+ "barber_pole": "💈",
+ "baseball": "⚾",
+ "basket": "🧺",
+ "basketball": "🏀",
+ "bat": "🦇",
+ "bathtub": "🛁",
+ "battery": "🔋",
+ "beach_with_umbrella": "🏖",
+ "beaming_face_with_smiling_eyes": "😁",
+ "bear_face": "🐻",
+ "bearded_person": "🧔",
+ "bearded_person_dark_skin_tone": "🧔🏿",
+ "bearded_person_light_skin_tone": "🧔🏻",
+ "bearded_person_medium-dark_skin_tone": "🧔🏾",
+ "bearded_person_medium-light_skin_tone": "🧔🏼",
+ "bearded_person_medium_skin_tone": "🧔🏽",
+ "beating_heart": "💓",
+ "bed": "🛏",
+ "beer_mug": "🍺",
+ "bell": "🔔",
+ "bell_with_slash": "🔕",
+ "bellhop_bell": "🛎",
+ "bento_box": "🍱",
+ "beverage_box": "🧃",
+ "bicycle": "🚲",
+ "bikini": "👙",
+ "billed_cap": "🧢",
+ "biohazard": "☣",
+ "bird": "🐦",
+ "birthday_cake": "🎂",
+ "black_circle": "⚫",
+ "black_flag": "🏴",
+ "black_heart": "🖤",
+ "black_large_square": "⬛",
+ "black_medium-small_square": "◾",
+ "black_medium_square": "◼",
+ "black_nib": "✒",
+ "black_small_square": "▪",
+ "black_square_button": "🔲",
+ "blond-haired_man": "👱\u200d♂️",
+ "blond-haired_man_dark_skin_tone": "👱🏿\u200d♂️",
+ "blond-haired_man_light_skin_tone": "👱🏻\u200d♂️",
+ "blond-haired_man_medium-dark_skin_tone": "👱🏾\u200d♂️",
+ "blond-haired_man_medium-light_skin_tone": "👱🏼\u200d♂️",
+ "blond-haired_man_medium_skin_tone": "👱🏽\u200d♂️",
+ "blond-haired_person": "👱",
+ "blond-haired_person_dark_skin_tone": "👱🏿",
+ "blond-haired_person_light_skin_tone": "👱🏻",
+ "blond-haired_person_medium-dark_skin_tone": "👱🏾",
+ "blond-haired_person_medium-light_skin_tone": "👱🏼",
+ "blond-haired_person_medium_skin_tone": "👱🏽",
+ "blond-haired_woman": "👱\u200d♀️",
+ "blond-haired_woman_dark_skin_tone": "👱🏿\u200d♀️",
+ "blond-haired_woman_light_skin_tone": "👱🏻\u200d♀️",
+ "blond-haired_woman_medium-dark_skin_tone": "👱🏾\u200d♀️",
+ "blond-haired_woman_medium-light_skin_tone": "👱🏼\u200d♀️",
+ "blond-haired_woman_medium_skin_tone": "👱🏽\u200d♀️",
+ "blossom": "🌼",
+ "blowfish": "🐡",
+ "blue_book": "📘",
+ "blue_circle": "🔵",
+ "blue_heart": "💙",
+ "blue_square": "🟦",
+ "boar": "🐗",
+ "bomb": "💣",
+ "bone": "🦴",
+ "bookmark": "🔖",
+ "bookmark_tabs": "📑",
+ "books": "📚",
+ "bottle_with_popping_cork": "🍾",
+ "bouquet": "💐",
+ "bow_and_arrow": "🏹",
+ "bowl_with_spoon": "🥣",
+ "bowling": "🎳",
+ "boxing_glove": "🥊",
+ "boy": "👦",
+ "boy_dark_skin_tone": "👦🏿",
+ "boy_light_skin_tone": "👦🏻",
+ "boy_medium-dark_skin_tone": "👦🏾",
+ "boy_medium-light_skin_tone": "👦🏼",
+ "boy_medium_skin_tone": "👦🏽",
+ "brain": "🧠",
+ "bread": "🍞",
+ "breast-feeding": "🤱",
+ "breast-feeding_dark_skin_tone": "🤱🏿",
+ "breast-feeding_light_skin_tone": "🤱🏻",
+ "breast-feeding_medium-dark_skin_tone": "🤱🏾",
+ "breast-feeding_medium-light_skin_tone": "🤱🏼",
+ "breast-feeding_medium_skin_tone": "🤱🏽",
+ "brick": "🧱",
+ "bride_with_veil": "👰",
+ "bride_with_veil_dark_skin_tone": "👰🏿",
+ "bride_with_veil_light_skin_tone": "👰🏻",
+ "bride_with_veil_medium-dark_skin_tone": "👰🏾",
+ "bride_with_veil_medium-light_skin_tone": "👰🏼",
+ "bride_with_veil_medium_skin_tone": "👰🏽",
+ "bridge_at_night": "🌉",
+ "briefcase": "💼",
+ "briefs": "🩲",
+ "bright_button": "🔆",
+ "broccoli": "🥦",
+ "broken_heart": "💔",
+ "broom": "🧹",
+ "brown_circle": "🟤",
+ "brown_heart": "🤎",
+ "brown_square": "🟫",
+ "bug": "🐛",
+ "building_construction": "🏗",
+ "bullet_train": "🚅",
+ "burrito": "🌯",
+ "bus": "🚌",
+ "bus_stop": "🚏",
+ "bust_in_silhouette": "👤",
+ "busts_in_silhouette": "👥",
+ "butter": "🧈",
+ "butterfly": "🦋",
+ "cactus": "🌵",
+ "calendar": "📆",
+ "call_me_hand": "🤙",
+ "call_me_hand_dark_skin_tone": "🤙🏿",
+ "call_me_hand_light_skin_tone": "🤙🏻",
+ "call_me_hand_medium-dark_skin_tone": "🤙🏾",
+ "call_me_hand_medium-light_skin_tone": "🤙🏼",
+ "call_me_hand_medium_skin_tone": "🤙🏽",
+ "camel": "🐫",
+ "camera": "📷",
+ "camera_with_flash": "📸",
+ "camping": "🏕",
+ "candle": "🕯",
+ "candy": "🍬",
+ "canned_food": "🥫",
+ "canoe": "🛶",
+ "card_file_box": "🗃",
+ "card_index": "📇",
+ "card_index_dividers": "🗂",
+ "carousel_horse": "🎠",
+ "carp_streamer": "🎏",
+ "carrot": "🥕",
+ "castle": "🏰",
+ "cat": "🐱",
+ "cat_face": "🐱",
+ "cat_face_with_tears_of_joy": "😹",
+ "cat_face_with_wry_smile": "😼",
+ "chains": "⛓",
+ "chair": "🪑",
+ "chart_decreasing": "📉",
+ "chart_increasing": "📈",
+ "chart_increasing_with_yen": "💹",
+ "cheese_wedge": "🧀",
+ "chequered_flag": "🏁",
+ "cherries": "🍒",
+ "cherry_blossom": "🌸",
+ "chess_pawn": "♟",
+ "chestnut": "🌰",
+ "chicken": "🐔",
+ "child": "🧒",
+ "child_dark_skin_tone": "🧒🏿",
+ "child_light_skin_tone": "🧒🏻",
+ "child_medium-dark_skin_tone": "🧒🏾",
+ "child_medium-light_skin_tone": "🧒🏼",
+ "child_medium_skin_tone": "🧒🏽",
+ "children_crossing": "🚸",
+ "chipmunk": "🐿",
+ "chocolate_bar": "🍫",
+ "chopsticks": "🥢",
+ "church": "⛪",
+ "cigarette": "🚬",
+ "cinema": "🎦",
+ "circled_m": "Ⓜ",
+ "circus_tent": "🎪",
+ "cityscape": "🏙",
+ "cityscape_at_dusk": "🌆",
+ "clamp": "🗜",
+ "clapper_board": "🎬",
+ "clapping_hands": "👏",
+ "clapping_hands_dark_skin_tone": "👏🏿",
+ "clapping_hands_light_skin_tone": "👏🏻",
+ "clapping_hands_medium-dark_skin_tone": "👏🏾",
+ "clapping_hands_medium-light_skin_tone": "👏🏼",
+ "clapping_hands_medium_skin_tone": "👏🏽",
+ "classical_building": "🏛",
+ "clinking_beer_mugs": "🍻",
+ "clinking_glasses": "🥂",
+ "clipboard": "📋",
+ "clockwise_vertical_arrows": "🔃",
+ "closed_book": "📕",
+ "closed_mailbox_with_lowered_flag": "📪",
+ "closed_mailbox_with_raised_flag": "📫",
+ "closed_umbrella": "🌂",
+ "cloud": "☁",
+ "cloud_with_lightning": "🌩",
+ "cloud_with_lightning_and_rain": "⛈",
+ "cloud_with_rain": "🌧",
+ "cloud_with_snow": "🌨",
+ "clown_face": "🤡",
+ "club_suit": "♣",
+ "clutch_bag": "👝",
+ "coat": "🧥",
+ "cocktail_glass": "🍸",
+ "coconut": "🥥",
+ "coffin": "⚰",
+ "cold_face": "🥶",
+ "collision": "💥",
+ "comet": "☄",
+ "compass": "🧭",
+ "computer_disk": "💽",
+ "computer_mouse": "🖱",
+ "confetti_ball": "🎊",
+ "confounded_face": "😖",
+ "confused_face": "😕",
+ "construction": "🚧",
+ "construction_worker": "👷",
+ "construction_worker_dark_skin_tone": "👷🏿",
+ "construction_worker_light_skin_tone": "👷🏻",
+ "construction_worker_medium-dark_skin_tone": "👷🏾",
+ "construction_worker_medium-light_skin_tone": "👷🏼",
+ "construction_worker_medium_skin_tone": "👷🏽",
+ "control_knobs": "🎛",
+ "convenience_store": "🏪",
+ "cooked_rice": "🍚",
+ "cookie": "🍪",
+ "cooking": "🍳",
+ "copyright": "©",
+ "couch_and_lamp": "🛋",
+ "counterclockwise_arrows_button": "🔄",
+ "couple_with_heart": "💑",
+ "couple_with_heart_man_man": "👨\u200d❤️\u200d👨",
+ "couple_with_heart_woman_man": "👩\u200d❤️\u200d👨",
+ "couple_with_heart_woman_woman": "👩\u200d❤️\u200d👩",
+ "cow": "🐮",
+ "cow_face": "🐮",
+ "cowboy_hat_face": "🤠",
+ "crab": "🦀",
+ "crayon": "🖍",
+ "credit_card": "💳",
+ "crescent_moon": "🌙",
+ "cricket": "🦗",
+ "cricket_game": "🏏",
+ "crocodile": "🐊",
+ "croissant": "🥐",
+ "cross_mark": "❌",
+ "cross_mark_button": "❎",
+ "crossed_fingers": "🤞",
+ "crossed_fingers_dark_skin_tone": "🤞🏿",
+ "crossed_fingers_light_skin_tone": "🤞🏻",
+ "crossed_fingers_medium-dark_skin_tone": "🤞🏾",
+ "crossed_fingers_medium-light_skin_tone": "🤞🏼",
+ "crossed_fingers_medium_skin_tone": "🤞🏽",
+ "crossed_flags": "🎌",
+ "crossed_swords": "⚔",
+ "crown": "👑",
+ "crying_cat_face": "😿",
+ "crying_face": "😢",
+ "crystal_ball": "🔮",
+ "cucumber": "🥒",
+ "cupcake": "🧁",
+ "cup_with_straw": "🥤",
+ "curling_stone": "🥌",
+ "curly_hair": "🦱",
+ "curly-haired_man": "👨\u200d🦱",
+ "curly-haired_woman": "👩\u200d🦱",
+ "curly_loop": "➰",
+ "currency_exchange": "💱",
+ "curry_rice": "🍛",
+ "custard": "🍮",
+ "customs": "🛃",
+ "cut_of_meat": "🥩",
+ "cyclone": "🌀",
+ "dagger": "🗡",
+ "dango": "🍡",
+ "dashing_away": "💨",
+ "deaf_person": "🧏",
+ "deciduous_tree": "🌳",
+ "deer": "🦌",
+ "delivery_truck": "🚚",
+ "department_store": "🏬",
+ "derelict_house": "🏚",
+ "desert": "🏜",
+ "desert_island": "🏝",
+ "desktop_computer": "🖥",
+ "detective": "🕵",
+ "detective_dark_skin_tone": "🕵🏿",
+ "detective_light_skin_tone": "🕵🏻",
+ "detective_medium-dark_skin_tone": "🕵🏾",
+ "detective_medium-light_skin_tone": "🕵🏼",
+ "detective_medium_skin_tone": "🕵🏽",
+ "diamond_suit": "♦",
+ "diamond_with_a_dot": "💠",
+ "dim_button": "🔅",
+ "direct_hit": "🎯",
+ "disappointed_face": "😞",
+ "diving_mask": "🤿",
+ "diya_lamp": "🪔",
+ "dizzy": "💫",
+ "dizzy_face": "😵",
+ "dna": "🧬",
+ "dog": "🐶",
+ "dog_face": "🐶",
+ "dollar_banknote": "💵",
+ "dolphin": "🐬",
+ "door": "🚪",
+ "dotted_six-pointed_star": "🔯",
+ "double_curly_loop": "➿",
+ "double_exclamation_mark": "‼",
+ "doughnut": "🍩",
+ "dove": "🕊",
+ "down-left_arrow": "↙",
+ "down-right_arrow": "↘",
+ "down_arrow": "⬇",
+ "downcast_face_with_sweat": "😓",
+ "downwards_button": "🔽",
+ "dragon": "🐉",
+ "dragon_face": "🐲",
+ "dress": "👗",
+ "drooling_face": "🤤",
+ "drop_of_blood": "🩸",
+ "droplet": "💧",
+ "drum": "🥁",
+ "duck": "🦆",
+ "dumpling": "🥟",
+ "dvd": "📀",
+ "e-mail": "📧",
+ "eagle": "🦅",
+ "ear": "👂",
+ "ear_dark_skin_tone": "👂🏿",
+ "ear_light_skin_tone": "👂🏻",
+ "ear_medium-dark_skin_tone": "👂🏾",
+ "ear_medium-light_skin_tone": "👂🏼",
+ "ear_medium_skin_tone": "👂🏽",
+ "ear_of_corn": "🌽",
+ "ear_with_hearing_aid": "🦻",
+ "egg": "🍳",
+ "eggplant": "🍆",
+ "eight-pointed_star": "✴",
+ "eight-spoked_asterisk": "✳",
+ "eight-thirty": "🕣",
+ "eight_o’clock": "🕗",
+ "eject_button": "⏏",
+ "electric_plug": "🔌",
+ "elephant": "🐘",
+ "eleven-thirty": "🕦",
+ "eleven_o’clock": "🕚",
+ "elf": "🧝",
+ "elf_dark_skin_tone": "🧝🏿",
+ "elf_light_skin_tone": "🧝🏻",
+ "elf_medium-dark_skin_tone": "🧝🏾",
+ "elf_medium-light_skin_tone": "🧝🏼",
+ "elf_medium_skin_tone": "🧝🏽",
+ "envelope": "✉",
+ "envelope_with_arrow": "📩",
+ "euro_banknote": "💶",
+ "evergreen_tree": "🌲",
+ "ewe": "🐑",
+ "exclamation_mark": "❗",
+ "exclamation_question_mark": "⁉",
+ "exploding_head": "🤯",
+ "expressionless_face": "😑",
+ "eye": "👁",
+ "eye_in_speech_bubble": "👁️\u200d🗨️",
+ "eyes": "👀",
+ "face_blowing_a_kiss": "😘",
+ "face_savoring_food": "😋",
+ "face_screaming_in_fear": "😱",
+ "face_vomiting": "🤮",
+ "face_with_hand_over_mouth": "🤭",
+ "face_with_head-bandage": "🤕",
+ "face_with_medical_mask": "😷",
+ "face_with_monocle": "🧐",
+ "face_with_open_mouth": "😮",
+ "face_with_raised_eyebrow": "🤨",
+ "face_with_rolling_eyes": "🙄",
+ "face_with_steam_from_nose": "😤",
+ "face_with_symbols_on_mouth": "🤬",
+ "face_with_tears_of_joy": "😂",
+ "face_with_thermometer": "🤒",
+ "face_with_tongue": "😛",
+ "face_without_mouth": "😶",
+ "factory": "🏭",
+ "fairy": "🧚",
+ "fairy_dark_skin_tone": "🧚🏿",
+ "fairy_light_skin_tone": "🧚🏻",
+ "fairy_medium-dark_skin_tone": "🧚🏾",
+ "fairy_medium-light_skin_tone": "🧚🏼",
+ "fairy_medium_skin_tone": "🧚🏽",
+ "falafel": "🧆",
+ "fallen_leaf": "🍂",
+ "family": "👪",
+ "family_man_boy": "👨\u200d👦",
+ "family_man_boy_boy": "👨\u200d👦\u200d👦",
+ "family_man_girl": "👨\u200d👧",
+ "family_man_girl_boy": "👨\u200d👧\u200d👦",
+ "family_man_girl_girl": "👨\u200d👧\u200d👧",
+ "family_man_man_boy": "👨\u200d👨\u200d👦",
+ "family_man_man_boy_boy": "👨\u200d👨\u200d👦\u200d👦",
+ "family_man_man_girl": "👨\u200d👨\u200d👧",
+ "family_man_man_girl_boy": "👨\u200d👨\u200d👧\u200d👦",
+ "family_man_man_girl_girl": "👨\u200d👨\u200d👧\u200d👧",
+ "family_man_woman_boy": "👨\u200d👩\u200d👦",
+ "family_man_woman_boy_boy": "👨\u200d👩\u200d👦\u200d👦",
+ "family_man_woman_girl": "👨\u200d👩\u200d👧",
+ "family_man_woman_girl_boy": "👨\u200d👩\u200d👧\u200d👦",
+ "family_man_woman_girl_girl": "👨\u200d👩\u200d👧\u200d👧",
+ "family_woman_boy": "👩\u200d👦",
+ "family_woman_boy_boy": "👩\u200d👦\u200d👦",
+ "family_woman_girl": "👩\u200d👧",
+ "family_woman_girl_boy": "👩\u200d👧\u200d👦",
+ "family_woman_girl_girl": "👩\u200d👧\u200d👧",
+ "family_woman_woman_boy": "👩\u200d👩\u200d👦",
+ "family_woman_woman_boy_boy": "👩\u200d👩\u200d👦\u200d👦",
+ "family_woman_woman_girl": "👩\u200d👩\u200d👧",
+ "family_woman_woman_girl_boy": "👩\u200d👩\u200d👧\u200d👦",
+ "family_woman_woman_girl_girl": "👩\u200d👩\u200d👧\u200d👧",
+ "fast-forward_button": "⏩",
+ "fast_down_button": "⏬",
+ "fast_reverse_button": "⏪",
+ "fast_up_button": "⏫",
+ "fax_machine": "📠",
+ "fearful_face": "😨",
+ "female_sign": "♀",
+ "ferris_wheel": "🎡",
+ "ferry": "⛴",
+ "field_hockey": "🏑",
+ "file_cabinet": "🗄",
+ "file_folder": "📁",
+ "film_frames": "🎞",
+ "film_projector": "📽",
+ "fire": "🔥",
+ "fire_extinguisher": "🧯",
+ "firecracker": "🧨",
+ "fire_engine": "🚒",
+ "fireworks": "🎆",
+ "first_quarter_moon": "🌓",
+ "first_quarter_moon_face": "🌛",
+ "fish": "🐟",
+ "fish_cake_with_swirl": "🍥",
+ "fishing_pole": "🎣",
+ "five-thirty": "🕠",
+ "five_o’clock": "🕔",
+ "flag_in_hole": "⛳",
+ "flamingo": "🦩",
+ "flashlight": "🔦",
+ "flat_shoe": "🥿",
+ "fleur-de-lis": "⚜",
+ "flexed_biceps": "💪",
+ "flexed_biceps_dark_skin_tone": "💪🏿",
+ "flexed_biceps_light_skin_tone": "💪🏻",
+ "flexed_biceps_medium-dark_skin_tone": "💪🏾",
+ "flexed_biceps_medium-light_skin_tone": "💪🏼",
+ "flexed_biceps_medium_skin_tone": "💪🏽",
+ "floppy_disk": "💾",
+ "flower_playing_cards": "🎴",
+ "flushed_face": "😳",
+ "flying_disc": "🥏",
+ "flying_saucer": "🛸",
+ "fog": "🌫",
+ "foggy": "🌁",
+ "folded_hands": "🙏",
+ "folded_hands_dark_skin_tone": "🙏🏿",
+ "folded_hands_light_skin_tone": "🙏🏻",
+ "folded_hands_medium-dark_skin_tone": "🙏🏾",
+ "folded_hands_medium-light_skin_tone": "🙏🏼",
+ "folded_hands_medium_skin_tone": "🙏🏽",
+ "foot": "🦶",
+ "footprints": "👣",
+ "fork_and_knife": "🍴",
+ "fork_and_knife_with_plate": "🍽",
+ "fortune_cookie": "🥠",
+ "fountain": "⛲",
+ "fountain_pen": "🖋",
+ "four-thirty": "🕟",
+ "four_leaf_clover": "🍀",
+ "four_o’clock": "🕓",
+ "fox_face": "🦊",
+ "framed_picture": "🖼",
+ "french_fries": "🍟",
+ "fried_shrimp": "🍤",
+ "frog_face": "🐸",
+ "front-facing_baby_chick": "🐥",
+ "frowning_face": "☹",
+ "frowning_face_with_open_mouth": "😦",
+ "fuel_pump": "⛽",
+ "full_moon": "🌕",
+ "full_moon_face": "🌝",
+ "funeral_urn": "⚱",
+ "game_die": "🎲",
+ "garlic": "🧄",
+ "gear": "⚙",
+ "gem_stone": "💎",
+ "genie": "🧞",
+ "ghost": "👻",
+ "giraffe": "🦒",
+ "girl": "👧",
+ "girl_dark_skin_tone": "👧🏿",
+ "girl_light_skin_tone": "👧🏻",
+ "girl_medium-dark_skin_tone": "👧🏾",
+ "girl_medium-light_skin_tone": "👧🏼",
+ "girl_medium_skin_tone": "👧🏽",
+ "glass_of_milk": "🥛",
+ "glasses": "👓",
+ "globe_showing_americas": "🌎",
+ "globe_showing_asia-australia": "🌏",
+ "globe_showing_europe-africa": "🌍",
+ "globe_with_meridians": "🌐",
+ "gloves": "🧤",
+ "glowing_star": "🌟",
+ "goal_net": "🥅",
+ "goat": "🐐",
+ "goblin": "👺",
+ "goggles": "🥽",
+ "gorilla": "🦍",
+ "graduation_cap": "🎓",
+ "grapes": "🍇",
+ "green_apple": "🍏",
+ "green_book": "📗",
+ "green_circle": "🟢",
+ "green_heart": "💚",
+ "green_salad": "🥗",
+ "green_square": "🟩",
+ "grimacing_face": "😬",
+ "grinning_cat_face": "😺",
+ "grinning_cat_face_with_smiling_eyes": "😸",
+ "grinning_face": "😀",
+ "grinning_face_with_big_eyes": "😃",
+ "grinning_face_with_smiling_eyes": "😄",
+ "grinning_face_with_sweat": "😅",
+ "grinning_squinting_face": "😆",
+ "growing_heart": "💗",
+ "guard": "💂",
+ "guard_dark_skin_tone": "💂🏿",
+ "guard_light_skin_tone": "💂🏻",
+ "guard_medium-dark_skin_tone": "💂🏾",
+ "guard_medium-light_skin_tone": "💂🏼",
+ "guard_medium_skin_tone": "💂🏽",
+ "guide_dog": "🦮",
+ "guitar": "🎸",
+ "hamburger": "🍔",
+ "hammer": "🔨",
+ "hammer_and_pick": "⚒",
+ "hammer_and_wrench": "🛠",
+ "hamster_face": "🐹",
+ "hand_with_fingers_splayed": "🖐",
+ "hand_with_fingers_splayed_dark_skin_tone": "🖐🏿",
+ "hand_with_fingers_splayed_light_skin_tone": "🖐🏻",
+ "hand_with_fingers_splayed_medium-dark_skin_tone": "🖐🏾",
+ "hand_with_fingers_splayed_medium-light_skin_tone": "🖐🏼",
+ "hand_with_fingers_splayed_medium_skin_tone": "🖐🏽",
+ "handbag": "👜",
+ "handshake": "🤝",
+ "hatching_chick": "🐣",
+ "headphone": "🎧",
+ "hear-no-evil_monkey": "🙉",
+ "heart_decoration": "💟",
+ "heart_suit": "♥",
+ "heart_with_arrow": "💘",
+ "heart_with_ribbon": "💝",
+ "heavy_check_mark": "✔",
+ "heavy_division_sign": "➗",
+ "heavy_dollar_sign": "💲",
+ "heavy_heart_exclamation": "❣",
+ "heavy_large_circle": "⭕",
+ "heavy_minus_sign": "➖",
+ "heavy_multiplication_x": "✖",
+ "heavy_plus_sign": "➕",
+ "hedgehog": "🦔",
+ "helicopter": "🚁",
+ "herb": "🌿",
+ "hibiscus": "🌺",
+ "high-heeled_shoe": "👠",
+ "high-speed_train": "🚄",
+ "high_voltage": "⚡",
+ "hiking_boot": "🥾",
+ "hindu_temple": "🛕",
+ "hippopotamus": "🦛",
+ "hole": "🕳",
+ "honey_pot": "🍯",
+ "honeybee": "🐝",
+ "horizontal_traffic_light": "🚥",
+ "horse": "🐴",
+ "horse_face": "🐴",
+ "horse_racing": "🏇",
+ "horse_racing_dark_skin_tone": "🏇🏿",
+ "horse_racing_light_skin_tone": "🏇🏻",
+ "horse_racing_medium-dark_skin_tone": "🏇🏾",
+ "horse_racing_medium-light_skin_tone": "🏇🏼",
+ "horse_racing_medium_skin_tone": "🏇🏽",
+ "hospital": "🏥",
+ "hot_beverage": "☕",
+ "hot_dog": "🌭",
+ "hot_face": "🥵",
+ "hot_pepper": "🌶",
+ "hot_springs": "♨",
+ "hotel": "🏨",
+ "hourglass_done": "⌛",
+ "hourglass_not_done": "⏳",
+ "house": "🏠",
+ "house_with_garden": "🏡",
+ "houses": "🏘",
+ "hugging_face": "🤗",
+ "hundred_points": "💯",
+ "hushed_face": "😯",
+ "ice": "🧊",
+ "ice_cream": "🍨",
+ "ice_hockey": "🏒",
+ "ice_skate": "⛸",
+ "inbox_tray": "📥",
+ "incoming_envelope": "📨",
+ "index_pointing_up": "☝",
+ "index_pointing_up_dark_skin_tone": "☝🏿",
+ "index_pointing_up_light_skin_tone": "☝🏻",
+ "index_pointing_up_medium-dark_skin_tone": "☝🏾",
+ "index_pointing_up_medium-light_skin_tone": "☝🏼",
+ "index_pointing_up_medium_skin_tone": "☝🏽",
+ "infinity": "♾",
+ "information": "ℹ",
+ "input_latin_letters": "🔤",
+ "input_latin_lowercase": "🔡",
+ "input_latin_uppercase": "🔠",
+ "input_numbers": "🔢",
+ "input_symbols": "🔣",
+ "jack-o-lantern": "🎃",
+ "jeans": "👖",
+ "jigsaw": "🧩",
+ "joker": "🃏",
+ "joystick": "🕹",
+ "kaaba": "🕋",
+ "kangaroo": "🦘",
+ "key": "🔑",
+ "keyboard": "⌨",
+ "keycap_#": "#️⃣",
+ "keycap_*": "*️⃣",
+ "keycap_0": "0️⃣",
+ "keycap_1": "1️⃣",
+ "keycap_10": "🔟",
+ "keycap_2": "2️⃣",
+ "keycap_3": "3️⃣",
+ "keycap_4": "4️⃣",
+ "keycap_5": "5️⃣",
+ "keycap_6": "6️⃣",
+ "keycap_7": "7️⃣",
+ "keycap_8": "8️⃣",
+ "keycap_9": "9️⃣",
+ "kick_scooter": "🛴",
+ "kimono": "👘",
+ "kiss": "💋",
+ "kiss_man_man": "👨\u200d❤️\u200d💋\u200d👨",
+ "kiss_mark": "💋",
+ "kiss_woman_man": "👩\u200d❤️\u200d💋\u200d👨",
+ "kiss_woman_woman": "👩\u200d❤️\u200d💋\u200d👩",
+ "kissing_cat_face": "😽",
+ "kissing_face": "😗",
+ "kissing_face_with_closed_eyes": "😚",
+ "kissing_face_with_smiling_eyes": "😙",
+ "kitchen_knife": "🔪",
+ "kite": "🪁",
+ "kiwi_fruit": "🥝",
+ "koala": "🐨",
+ "lab_coat": "🥼",
+ "label": "🏷",
+ "lacrosse": "🥍",
+ "lady_beetle": "🐞",
+ "laptop_computer": "💻",
+ "large_blue_diamond": "🔷",
+ "large_orange_diamond": "🔶",
+ "last_quarter_moon": "🌗",
+ "last_quarter_moon_face": "🌜",
+ "last_track_button": "⏮",
+ "latin_cross": "✝",
+ "leaf_fluttering_in_wind": "🍃",
+ "leafy_green": "🥬",
+ "ledger": "📒",
+ "left-facing_fist": "🤛",
+ "left-facing_fist_dark_skin_tone": "🤛🏿",
+ "left-facing_fist_light_skin_tone": "🤛🏻",
+ "left-facing_fist_medium-dark_skin_tone": "🤛🏾",
+ "left-facing_fist_medium-light_skin_tone": "🤛🏼",
+ "left-facing_fist_medium_skin_tone": "🤛🏽",
+ "left-right_arrow": "↔",
+ "left_arrow": "⬅",
+ "left_arrow_curving_right": "↪",
+ "left_luggage": "🛅",
+ "left_speech_bubble": "🗨",
+ "leg": "🦵",
+ "lemon": "🍋",
+ "leopard": "🐆",
+ "level_slider": "🎚",
+ "light_bulb": "💡",
+ "light_rail": "🚈",
+ "link": "🔗",
+ "linked_paperclips": "🖇",
+ "lion_face": "🦁",
+ "lipstick": "💄",
+ "litter_in_bin_sign": "🚮",
+ "lizard": "🦎",
+ "llama": "🦙",
+ "lobster": "🦞",
+ "locked": "🔒",
+ "locked_with_key": "🔐",
+ "locked_with_pen": "🔏",
+ "locomotive": "🚂",
+ "lollipop": "🍭",
+ "lotion_bottle": "🧴",
+ "loudly_crying_face": "😭",
+ "loudspeaker": "📢",
+ "love-you_gesture": "🤟",
+ "love-you_gesture_dark_skin_tone": "🤟🏿",
+ "love-you_gesture_light_skin_tone": "🤟🏻",
+ "love-you_gesture_medium-dark_skin_tone": "🤟🏾",
+ "love-you_gesture_medium-light_skin_tone": "🤟🏼",
+ "love-you_gesture_medium_skin_tone": "🤟🏽",
+ "love_hotel": "🏩",
+ "love_letter": "💌",
+ "luggage": "🧳",
+ "lying_face": "🤥",
+ "mage": "🧙",
+ "mage_dark_skin_tone": "🧙🏿",
+ "mage_light_skin_tone": "🧙🏻",
+ "mage_medium-dark_skin_tone": "🧙🏾",
+ "mage_medium-light_skin_tone": "🧙🏼",
+ "mage_medium_skin_tone": "🧙🏽",
+ "magnet": "🧲",
+ "magnifying_glass_tilted_left": "🔍",
+ "magnifying_glass_tilted_right": "🔎",
+ "mahjong_red_dragon": "🀄",
+ "male_sign": "♂",
+ "man": "👨",
+ "man_and_woman_holding_hands": "👫",
+ "man_artist": "👨\u200d🎨",
+ "man_artist_dark_skin_tone": "👨🏿\u200d🎨",
+ "man_artist_light_skin_tone": "👨🏻\u200d🎨",
+ "man_artist_medium-dark_skin_tone": "👨🏾\u200d🎨",
+ "man_artist_medium-light_skin_tone": "👨🏼\u200d🎨",
+ "man_artist_medium_skin_tone": "👨🏽\u200d🎨",
+ "man_astronaut": "👨\u200d🚀",
+ "man_astronaut_dark_skin_tone": "👨🏿\u200d🚀",
+ "man_astronaut_light_skin_tone": "👨🏻\u200d🚀",
+ "man_astronaut_medium-dark_skin_tone": "👨🏾\u200d🚀",
+ "man_astronaut_medium-light_skin_tone": "👨🏼\u200d🚀",
+ "man_astronaut_medium_skin_tone": "👨🏽\u200d🚀",
+ "man_biking": "🚴\u200d♂️",
+ "man_biking_dark_skin_tone": "🚴🏿\u200d♂️",
+ "man_biking_light_skin_tone": "🚴🏻\u200d♂️",
+ "man_biking_medium-dark_skin_tone": "🚴🏾\u200d♂️",
+ "man_biking_medium-light_skin_tone": "🚴🏼\u200d♂️",
+ "man_biking_medium_skin_tone": "🚴🏽\u200d♂️",
+ "man_bouncing_ball": "⛹️\u200d♂️",
+ "man_bouncing_ball_dark_skin_tone": "⛹🏿\u200d♂️",
+ "man_bouncing_ball_light_skin_tone": "⛹🏻\u200d♂️",
+ "man_bouncing_ball_medium-dark_skin_tone": "⛹🏾\u200d♂️",
+ "man_bouncing_ball_medium-light_skin_tone": "⛹🏼\u200d♂️",
+ "man_bouncing_ball_medium_skin_tone": "⛹🏽\u200d♂️",
+ "man_bowing": "🙇\u200d♂️",
+ "man_bowing_dark_skin_tone": "🙇🏿\u200d♂️",
+ "man_bowing_light_skin_tone": "🙇🏻\u200d♂️",
+ "man_bowing_medium-dark_skin_tone": "🙇🏾\u200d♂️",
+ "man_bowing_medium-light_skin_tone": "🙇🏼\u200d♂️",
+ "man_bowing_medium_skin_tone": "🙇🏽\u200d♂️",
+ "man_cartwheeling": "🤸\u200d♂️",
+ "man_cartwheeling_dark_skin_tone": "🤸🏿\u200d♂️",
+ "man_cartwheeling_light_skin_tone": "🤸🏻\u200d♂️",
+ "man_cartwheeling_medium-dark_skin_tone": "🤸🏾\u200d♂️",
+ "man_cartwheeling_medium-light_skin_tone": "🤸🏼\u200d♂️",
+ "man_cartwheeling_medium_skin_tone": "🤸🏽\u200d♂️",
+ "man_climbing": "🧗\u200d♂️",
+ "man_climbing_dark_skin_tone": "🧗🏿\u200d♂️",
+ "man_climbing_light_skin_tone": "🧗🏻\u200d♂️",
+ "man_climbing_medium-dark_skin_tone": "🧗🏾\u200d♂️",
+ "man_climbing_medium-light_skin_tone": "🧗🏼\u200d♂️",
+ "man_climbing_medium_skin_tone": "🧗🏽\u200d♂️",
+ "man_construction_worker": "👷\u200d♂️",
+ "man_construction_worker_dark_skin_tone": "👷🏿\u200d♂️",
+ "man_construction_worker_light_skin_tone": "👷🏻\u200d♂️",
+ "man_construction_worker_medium-dark_skin_tone": "👷🏾\u200d♂️",
+ "man_construction_worker_medium-light_skin_tone": "👷🏼\u200d♂️",
+ "man_construction_worker_medium_skin_tone": "👷🏽\u200d♂️",
+ "man_cook": "👨\u200d🍳",
+ "man_cook_dark_skin_tone": "👨🏿\u200d🍳",
+ "man_cook_light_skin_tone": "👨🏻\u200d🍳",
+ "man_cook_medium-dark_skin_tone": "👨🏾\u200d🍳",
+ "man_cook_medium-light_skin_tone": "👨🏼\u200d🍳",
+ "man_cook_medium_skin_tone": "👨🏽\u200d🍳",
+ "man_dancing": "🕺",
+ "man_dancing_dark_skin_tone": "🕺🏿",
+ "man_dancing_light_skin_tone": "🕺🏻",
+ "man_dancing_medium-dark_skin_tone": "🕺🏾",
+ "man_dancing_medium-light_skin_tone": "🕺🏼",
+ "man_dancing_medium_skin_tone": "🕺🏽",
+ "man_dark_skin_tone": "👨🏿",
+ "man_detective": "🕵️\u200d♂️",
+ "man_detective_dark_skin_tone": "🕵🏿\u200d♂️",
+ "man_detective_light_skin_tone": "🕵🏻\u200d♂️",
+ "man_detective_medium-dark_skin_tone": "🕵🏾\u200d♂️",
+ "man_detective_medium-light_skin_tone": "🕵🏼\u200d♂️",
+ "man_detective_medium_skin_tone": "🕵🏽\u200d♂️",
+ "man_elf": "🧝\u200d♂️",
+ "man_elf_dark_skin_tone": "🧝🏿\u200d♂️",
+ "man_elf_light_skin_tone": "🧝🏻\u200d♂️",
+ "man_elf_medium-dark_skin_tone": "🧝🏾\u200d♂️",
+ "man_elf_medium-light_skin_tone": "🧝🏼\u200d♂️",
+ "man_elf_medium_skin_tone": "🧝🏽\u200d♂️",
+ "man_facepalming": "🤦\u200d♂️",
+ "man_facepalming_dark_skin_tone": "🤦🏿\u200d♂️",
+ "man_facepalming_light_skin_tone": "🤦🏻\u200d♂️",
+ "man_facepalming_medium-dark_skin_tone": "🤦🏾\u200d♂️",
+ "man_facepalming_medium-light_skin_tone": "🤦🏼\u200d♂️",
+ "man_facepalming_medium_skin_tone": "🤦🏽\u200d♂️",
+ "man_factory_worker": "👨\u200d🏭",
+ "man_factory_worker_dark_skin_tone": "👨🏿\u200d🏭",
+ "man_factory_worker_light_skin_tone": "👨🏻\u200d🏭",
+ "man_factory_worker_medium-dark_skin_tone": "👨🏾\u200d🏭",
+ "man_factory_worker_medium-light_skin_tone": "👨🏼\u200d🏭",
+ "man_factory_worker_medium_skin_tone": "👨🏽\u200d🏭",
+ "man_fairy": "🧚\u200d♂️",
+ "man_fairy_dark_skin_tone": "🧚🏿\u200d♂️",
+ "man_fairy_light_skin_tone": "🧚🏻\u200d♂️",
+ "man_fairy_medium-dark_skin_tone": "🧚🏾\u200d♂️",
+ "man_fairy_medium-light_skin_tone": "🧚🏼\u200d♂️",
+ "man_fairy_medium_skin_tone": "🧚🏽\u200d♂️",
+ "man_farmer": "👨\u200d🌾",
+ "man_farmer_dark_skin_tone": "👨🏿\u200d🌾",
+ "man_farmer_light_skin_tone": "👨🏻\u200d🌾",
+ "man_farmer_medium-dark_skin_tone": "👨🏾\u200d🌾",
+ "man_farmer_medium-light_skin_tone": "👨🏼\u200d🌾",
+ "man_farmer_medium_skin_tone": "👨🏽\u200d🌾",
+ "man_firefighter": "👨\u200d🚒",
+ "man_firefighter_dark_skin_tone": "👨🏿\u200d🚒",
+ "man_firefighter_light_skin_tone": "👨🏻\u200d🚒",
+ "man_firefighter_medium-dark_skin_tone": "👨🏾\u200d🚒",
+ "man_firefighter_medium-light_skin_tone": "👨🏼\u200d🚒",
+ "man_firefighter_medium_skin_tone": "👨🏽\u200d🚒",
+ "man_frowning": "🙍\u200d♂️",
+ "man_frowning_dark_skin_tone": "🙍🏿\u200d♂️",
+ "man_frowning_light_skin_tone": "🙍🏻\u200d♂️",
+ "man_frowning_medium-dark_skin_tone": "🙍🏾\u200d♂️",
+ "man_frowning_medium-light_skin_tone": "🙍🏼\u200d♂️",
+ "man_frowning_medium_skin_tone": "🙍🏽\u200d♂️",
+ "man_genie": "🧞\u200d♂️",
+ "man_gesturing_no": "🙅\u200d♂️",
+ "man_gesturing_no_dark_skin_tone": "🙅🏿\u200d♂️",
+ "man_gesturing_no_light_skin_tone": "🙅🏻\u200d♂️",
+ "man_gesturing_no_medium-dark_skin_tone": "🙅🏾\u200d♂️",
+ "man_gesturing_no_medium-light_skin_tone": "🙅🏼\u200d♂️",
+ "man_gesturing_no_medium_skin_tone": "🙅🏽\u200d♂️",
+ "man_gesturing_ok": "🙆\u200d♂️",
+ "man_gesturing_ok_dark_skin_tone": "🙆🏿\u200d♂️",
+ "man_gesturing_ok_light_skin_tone": "🙆🏻\u200d♂️",
+ "man_gesturing_ok_medium-dark_skin_tone": "🙆🏾\u200d♂️",
+ "man_gesturing_ok_medium-light_skin_tone": "🙆🏼\u200d♂️",
+ "man_gesturing_ok_medium_skin_tone": "🙆🏽\u200d♂️",
+ "man_getting_haircut": "💇\u200d♂️",
+ "man_getting_haircut_dark_skin_tone": "💇🏿\u200d♂️",
+ "man_getting_haircut_light_skin_tone": "💇🏻\u200d♂️",
+ "man_getting_haircut_medium-dark_skin_tone": "💇🏾\u200d♂️",
+ "man_getting_haircut_medium-light_skin_tone": "💇🏼\u200d♂️",
+ "man_getting_haircut_medium_skin_tone": "💇🏽\u200d♂️",
+ "man_getting_massage": "💆\u200d♂️",
+ "man_getting_massage_dark_skin_tone": "💆🏿\u200d♂️",
+ "man_getting_massage_light_skin_tone": "💆🏻\u200d♂️",
+ "man_getting_massage_medium-dark_skin_tone": "💆🏾\u200d♂️",
+ "man_getting_massage_medium-light_skin_tone": "💆🏼\u200d♂️",
+ "man_getting_massage_medium_skin_tone": "💆🏽\u200d♂️",
+ "man_golfing": "🏌️\u200d♂️",
+ "man_golfing_dark_skin_tone": "🏌🏿\u200d♂️",
+ "man_golfing_light_skin_tone": "🏌🏻\u200d♂️",
+ "man_golfing_medium-dark_skin_tone": "🏌🏾\u200d♂️",
+ "man_golfing_medium-light_skin_tone": "🏌🏼\u200d♂️",
+ "man_golfing_medium_skin_tone": "🏌🏽\u200d♂️",
+ "man_guard": "💂\u200d♂️",
+ "man_guard_dark_skin_tone": "💂🏿\u200d♂️",
+ "man_guard_light_skin_tone": "💂🏻\u200d♂️",
+ "man_guard_medium-dark_skin_tone": "💂🏾\u200d♂️",
+ "man_guard_medium-light_skin_tone": "💂🏼\u200d♂️",
+ "man_guard_medium_skin_tone": "💂🏽\u200d♂️",
+ "man_health_worker": "👨\u200d⚕️",
+ "man_health_worker_dark_skin_tone": "👨🏿\u200d⚕️",
+ "man_health_worker_light_skin_tone": "👨🏻\u200d⚕️",
+ "man_health_worker_medium-dark_skin_tone": "👨🏾\u200d⚕️",
+ "man_health_worker_medium-light_skin_tone": "👨🏼\u200d⚕️",
+ "man_health_worker_medium_skin_tone": "👨🏽\u200d⚕️",
+ "man_in_lotus_position": "🧘\u200d♂️",
+ "man_in_lotus_position_dark_skin_tone": "🧘🏿\u200d♂️",
+ "man_in_lotus_position_light_skin_tone": "🧘🏻\u200d♂️",
+ "man_in_lotus_position_medium-dark_skin_tone": "🧘🏾\u200d♂️",
+ "man_in_lotus_position_medium-light_skin_tone": "🧘🏼\u200d♂️",
+ "man_in_lotus_position_medium_skin_tone": "🧘🏽\u200d♂️",
+ "man_in_manual_wheelchair": "👨\u200d🦽",
+ "man_in_motorized_wheelchair": "👨\u200d🦼",
+ "man_in_steamy_room": "🧖\u200d♂️",
+ "man_in_steamy_room_dark_skin_tone": "🧖🏿\u200d♂️",
+ "man_in_steamy_room_light_skin_tone": "🧖🏻\u200d♂️",
+ "man_in_steamy_room_medium-dark_skin_tone": "🧖🏾\u200d♂️",
+ "man_in_steamy_room_medium-light_skin_tone": "🧖🏼\u200d♂️",
+ "man_in_steamy_room_medium_skin_tone": "🧖🏽\u200d♂️",
+ "man_in_suit_levitating": "🕴",
+ "man_in_suit_levitating_dark_skin_tone": "🕴🏿",
+ "man_in_suit_levitating_light_skin_tone": "🕴🏻",
+ "man_in_suit_levitating_medium-dark_skin_tone": "🕴🏾",
+ "man_in_suit_levitating_medium-light_skin_tone": "🕴🏼",
+ "man_in_suit_levitating_medium_skin_tone": "🕴🏽",
+ "man_in_tuxedo": "🤵",
+ "man_in_tuxedo_dark_skin_tone": "🤵🏿",
+ "man_in_tuxedo_light_skin_tone": "🤵🏻",
+ "man_in_tuxedo_medium-dark_skin_tone": "🤵🏾",
+ "man_in_tuxedo_medium-light_skin_tone": "🤵🏼",
+ "man_in_tuxedo_medium_skin_tone": "🤵🏽",
+ "man_judge": "👨\u200d⚖️",
+ "man_judge_dark_skin_tone": "👨🏿\u200d⚖️",
+ "man_judge_light_skin_tone": "👨🏻\u200d⚖️",
+ "man_judge_medium-dark_skin_tone": "👨🏾\u200d⚖️",
+ "man_judge_medium-light_skin_tone": "👨🏼\u200d⚖️",
+ "man_judge_medium_skin_tone": "👨🏽\u200d⚖️",
+ "man_juggling": "🤹\u200d♂️",
+ "man_juggling_dark_skin_tone": "🤹🏿\u200d♂️",
+ "man_juggling_light_skin_tone": "🤹🏻\u200d♂️",
+ "man_juggling_medium-dark_skin_tone": "🤹🏾\u200d♂️",
+ "man_juggling_medium-light_skin_tone": "🤹🏼\u200d♂️",
+ "man_juggling_medium_skin_tone": "🤹🏽\u200d♂️",
+ "man_lifting_weights": "🏋️\u200d♂️",
+ "man_lifting_weights_dark_skin_tone": "🏋🏿\u200d♂️",
+ "man_lifting_weights_light_skin_tone": "🏋🏻\u200d♂️",
+ "man_lifting_weights_medium-dark_skin_tone": "🏋🏾\u200d♂️",
+ "man_lifting_weights_medium-light_skin_tone": "🏋🏼\u200d♂️",
+ "man_lifting_weights_medium_skin_tone": "🏋🏽\u200d♂️",
+ "man_light_skin_tone": "👨🏻",
+ "man_mage": "🧙\u200d♂️",
+ "man_mage_dark_skin_tone": "🧙🏿\u200d♂️",
+ "man_mage_light_skin_tone": "🧙🏻\u200d♂️",
+ "man_mage_medium-dark_skin_tone": "🧙🏾\u200d♂️",
+ "man_mage_medium-light_skin_tone": "🧙🏼\u200d♂️",
+ "man_mage_medium_skin_tone": "🧙🏽\u200d♂️",
+ "man_mechanic": "👨\u200d🔧",
+ "man_mechanic_dark_skin_tone": "👨🏿\u200d🔧",
+ "man_mechanic_light_skin_tone": "👨🏻\u200d🔧",
+ "man_mechanic_medium-dark_skin_tone": "👨🏾\u200d🔧",
+ "man_mechanic_medium-light_skin_tone": "👨🏼\u200d🔧",
+ "man_mechanic_medium_skin_tone": "👨🏽\u200d🔧",
+ "man_medium-dark_skin_tone": "👨🏾",
+ "man_medium-light_skin_tone": "👨🏼",
+ "man_medium_skin_tone": "👨🏽",
+ "man_mountain_biking": "🚵\u200d♂️",
+ "man_mountain_biking_dark_skin_tone": "🚵🏿\u200d♂️",
+ "man_mountain_biking_light_skin_tone": "🚵🏻\u200d♂️",
+ "man_mountain_biking_medium-dark_skin_tone": "🚵🏾\u200d♂️",
+ "man_mountain_biking_medium-light_skin_tone": "🚵🏼\u200d♂️",
+ "man_mountain_biking_medium_skin_tone": "🚵🏽\u200d♂️",
+ "man_office_worker": "👨\u200d💼",
+ "man_office_worker_dark_skin_tone": "👨🏿\u200d💼",
+ "man_office_worker_light_skin_tone": "👨🏻\u200d💼",
+ "man_office_worker_medium-dark_skin_tone": "👨🏾\u200d💼",
+ "man_office_worker_medium-light_skin_tone": "👨🏼\u200d💼",
+ "man_office_worker_medium_skin_tone": "👨🏽\u200d💼",
+ "man_pilot": "👨\u200d✈️",
+ "man_pilot_dark_skin_tone": "👨🏿\u200d✈️",
+ "man_pilot_light_skin_tone": "👨🏻\u200d✈️",
+ "man_pilot_medium-dark_skin_tone": "👨🏾\u200d✈️",
+ "man_pilot_medium-light_skin_tone": "👨🏼\u200d✈️",
+ "man_pilot_medium_skin_tone": "👨🏽\u200d✈️",
+ "man_playing_handball": "🤾\u200d♂️",
+ "man_playing_handball_dark_skin_tone": "🤾🏿\u200d♂️",
+ "man_playing_handball_light_skin_tone": "🤾🏻\u200d♂️",
+ "man_playing_handball_medium-dark_skin_tone": "🤾🏾\u200d♂️",
+ "man_playing_handball_medium-light_skin_tone": "🤾🏼\u200d♂️",
+ "man_playing_handball_medium_skin_tone": "🤾🏽\u200d♂️",
+ "man_playing_water_polo": "🤽\u200d♂️",
+ "man_playing_water_polo_dark_skin_tone": "🤽🏿\u200d♂️",
+ "man_playing_water_polo_light_skin_tone": "🤽🏻\u200d♂️",
+ "man_playing_water_polo_medium-dark_skin_tone": "🤽🏾\u200d♂️",
+ "man_playing_water_polo_medium-light_skin_tone": "🤽🏼\u200d♂️",
+ "man_playing_water_polo_medium_skin_tone": "🤽🏽\u200d♂️",
+ "man_police_officer": "👮\u200d♂️",
+ "man_police_officer_dark_skin_tone": "👮🏿\u200d♂️",
+ "man_police_officer_light_skin_tone": "👮🏻\u200d♂️",
+ "man_police_officer_medium-dark_skin_tone": "👮🏾\u200d♂️",
+ "man_police_officer_medium-light_skin_tone": "👮🏼\u200d♂️",
+ "man_police_officer_medium_skin_tone": "👮🏽\u200d♂️",
+ "man_pouting": "🙎\u200d♂️",
+ "man_pouting_dark_skin_tone": "🙎🏿\u200d♂️",
+ "man_pouting_light_skin_tone": "🙎🏻\u200d♂️",
+ "man_pouting_medium-dark_skin_tone": "🙎🏾\u200d♂️",
+ "man_pouting_medium-light_skin_tone": "🙎🏼\u200d♂️",
+ "man_pouting_medium_skin_tone": "🙎🏽\u200d♂️",
+ "man_raising_hand": "🙋\u200d♂️",
+ "man_raising_hand_dark_skin_tone": "🙋🏿\u200d♂️",
+ "man_raising_hand_light_skin_tone": "🙋🏻\u200d♂️",
+ "man_raising_hand_medium-dark_skin_tone": "🙋🏾\u200d♂️",
+ "man_raising_hand_medium-light_skin_tone": "🙋🏼\u200d♂️",
+ "man_raising_hand_medium_skin_tone": "🙋🏽\u200d♂️",
+ "man_rowing_boat": "🚣\u200d♂️",
+ "man_rowing_boat_dark_skin_tone": "🚣🏿\u200d♂️",
+ "man_rowing_boat_light_skin_tone": "🚣🏻\u200d♂️",
+ "man_rowing_boat_medium-dark_skin_tone": "🚣🏾\u200d♂️",
+ "man_rowing_boat_medium-light_skin_tone": "🚣🏼\u200d♂️",
+ "man_rowing_boat_medium_skin_tone": "🚣🏽\u200d♂️",
+ "man_running": "🏃\u200d♂️",
+ "man_running_dark_skin_tone": "🏃🏿\u200d♂️",
+ "man_running_light_skin_tone": "🏃🏻\u200d♂️",
+ "man_running_medium-dark_skin_tone": "🏃🏾\u200d♂️",
+ "man_running_medium-light_skin_tone": "🏃🏼\u200d♂️",
+ "man_running_medium_skin_tone": "🏃🏽\u200d♂️",
+ "man_scientist": "👨\u200d🔬",
+ "man_scientist_dark_skin_tone": "👨🏿\u200d🔬",
+ "man_scientist_light_skin_tone": "👨🏻\u200d🔬",
+ "man_scientist_medium-dark_skin_tone": "👨🏾\u200d🔬",
+ "man_scientist_medium-light_skin_tone": "👨🏼\u200d🔬",
+ "man_scientist_medium_skin_tone": "👨🏽\u200d🔬",
+ "man_shrugging": "🤷\u200d♂️",
+ "man_shrugging_dark_skin_tone": "🤷🏿\u200d♂️",
+ "man_shrugging_light_skin_tone": "🤷🏻\u200d♂️",
+ "man_shrugging_medium-dark_skin_tone": "🤷🏾\u200d♂️",
+ "man_shrugging_medium-light_skin_tone": "🤷🏼\u200d♂️",
+ "man_shrugging_medium_skin_tone": "🤷🏽\u200d♂️",
+ "man_singer": "👨\u200d🎤",
+ "man_singer_dark_skin_tone": "👨🏿\u200d🎤",
+ "man_singer_light_skin_tone": "👨🏻\u200d🎤",
+ "man_singer_medium-dark_skin_tone": "👨🏾\u200d🎤",
+ "man_singer_medium-light_skin_tone": "👨🏼\u200d🎤",
+ "man_singer_medium_skin_tone": "👨🏽\u200d🎤",
+ "man_student": "👨\u200d🎓",
+ "man_student_dark_skin_tone": "👨🏿\u200d🎓",
+ "man_student_light_skin_tone": "👨🏻\u200d🎓",
+ "man_student_medium-dark_skin_tone": "👨🏾\u200d🎓",
+ "man_student_medium-light_skin_tone": "👨🏼\u200d🎓",
+ "man_student_medium_skin_tone": "👨🏽\u200d🎓",
+ "man_surfing": "🏄\u200d♂️",
+ "man_surfing_dark_skin_tone": "🏄🏿\u200d♂️",
+ "man_surfing_light_skin_tone": "🏄🏻\u200d♂️",
+ "man_surfing_medium-dark_skin_tone": "🏄🏾\u200d♂️",
+ "man_surfing_medium-light_skin_tone": "🏄🏼\u200d♂️",
+ "man_surfing_medium_skin_tone": "🏄🏽\u200d♂️",
+ "man_swimming": "🏊\u200d♂️",
+ "man_swimming_dark_skin_tone": "🏊🏿\u200d♂️",
+ "man_swimming_light_skin_tone": "🏊🏻\u200d♂️",
+ "man_swimming_medium-dark_skin_tone": "🏊🏾\u200d♂️",
+ "man_swimming_medium-light_skin_tone": "🏊🏼\u200d♂️",
+ "man_swimming_medium_skin_tone": "🏊🏽\u200d♂️",
+ "man_teacher": "👨\u200d🏫",
+ "man_teacher_dark_skin_tone": "👨🏿\u200d🏫",
+ "man_teacher_light_skin_tone": "👨🏻\u200d🏫",
+ "man_teacher_medium-dark_skin_tone": "👨🏾\u200d🏫",
+ "man_teacher_medium-light_skin_tone": "👨🏼\u200d🏫",
+ "man_teacher_medium_skin_tone": "👨🏽\u200d🏫",
+ "man_technologist": "👨\u200d💻",
+ "man_technologist_dark_skin_tone": "👨🏿\u200d💻",
+ "man_technologist_light_skin_tone": "👨🏻\u200d💻",
+ "man_technologist_medium-dark_skin_tone": "👨🏾\u200d💻",
+ "man_technologist_medium-light_skin_tone": "👨🏼\u200d💻",
+ "man_technologist_medium_skin_tone": "👨🏽\u200d💻",
+ "man_tipping_hand": "💁\u200d♂️",
+ "man_tipping_hand_dark_skin_tone": "💁🏿\u200d♂️",
+ "man_tipping_hand_light_skin_tone": "💁🏻\u200d♂️",
+ "man_tipping_hand_medium-dark_skin_tone": "💁🏾\u200d♂️",
+ "man_tipping_hand_medium-light_skin_tone": "💁🏼\u200d♂️",
+ "man_tipping_hand_medium_skin_tone": "💁🏽\u200d♂️",
+ "man_vampire": "🧛\u200d♂️",
+ "man_vampire_dark_skin_tone": "🧛🏿\u200d♂️",
+ "man_vampire_light_skin_tone": "🧛🏻\u200d♂️",
+ "man_vampire_medium-dark_skin_tone": "🧛🏾\u200d♂️",
+ "man_vampire_medium-light_skin_tone": "🧛🏼\u200d♂️",
+ "man_vampire_medium_skin_tone": "🧛🏽\u200d♂️",
+ "man_walking": "🚶\u200d♂️",
+ "man_walking_dark_skin_tone": "🚶🏿\u200d♂️",
+ "man_walking_light_skin_tone": "🚶🏻\u200d♂️",
+ "man_walking_medium-dark_skin_tone": "🚶🏾\u200d♂️",
+ "man_walking_medium-light_skin_tone": "🚶🏼\u200d♂️",
+ "man_walking_medium_skin_tone": "🚶🏽\u200d♂️",
+ "man_wearing_turban": "👳\u200d♂️",
+ "man_wearing_turban_dark_skin_tone": "👳🏿\u200d♂️",
+ "man_wearing_turban_light_skin_tone": "👳🏻\u200d♂️",
+ "man_wearing_turban_medium-dark_skin_tone": "👳🏾\u200d♂️",
+ "man_wearing_turban_medium-light_skin_tone": "👳🏼\u200d♂️",
+ "man_wearing_turban_medium_skin_tone": "👳🏽\u200d♂️",
+ "man_with_probing_cane": "👨\u200d🦯",
+ "man_with_chinese_cap": "👲",
+ "man_with_chinese_cap_dark_skin_tone": "👲🏿",
+ "man_with_chinese_cap_light_skin_tone": "👲🏻",
+ "man_with_chinese_cap_medium-dark_skin_tone": "👲🏾",
+ "man_with_chinese_cap_medium-light_skin_tone": "👲🏼",
+ "man_with_chinese_cap_medium_skin_tone": "👲🏽",
+ "man_zombie": "🧟\u200d♂️",
+ "mango": "🥭",
+ "mantelpiece_clock": "🕰",
+ "manual_wheelchair": "🦽",
+ "man’s_shoe": "👞",
+ "map_of_japan": "🗾",
+ "maple_leaf": "🍁",
+ "martial_arts_uniform": "🥋",
+ "mate": "🧉",
+ "meat_on_bone": "🍖",
+ "mechanical_arm": "🦾",
+ "mechanical_leg": "🦿",
+ "medical_symbol": "⚕",
+ "megaphone": "📣",
+ "melon": "🍈",
+ "memo": "📝",
+ "men_with_bunny_ears": "👯\u200d♂️",
+ "men_wrestling": "🤼\u200d♂️",
+ "menorah": "🕎",
+ "men’s_room": "🚹",
+ "mermaid": "🧜\u200d♀️",
+ "mermaid_dark_skin_tone": "🧜🏿\u200d♀️",
+ "mermaid_light_skin_tone": "🧜🏻\u200d♀️",
+ "mermaid_medium-dark_skin_tone": "🧜🏾\u200d♀️",
+ "mermaid_medium-light_skin_tone": "🧜🏼\u200d♀️",
+ "mermaid_medium_skin_tone": "🧜🏽\u200d♀️",
+ "merman": "🧜\u200d♂️",
+ "merman_dark_skin_tone": "🧜🏿\u200d♂️",
+ "merman_light_skin_tone": "🧜🏻\u200d♂️",
+ "merman_medium-dark_skin_tone": "🧜🏾\u200d♂️",
+ "merman_medium-light_skin_tone": "🧜🏼\u200d♂️",
+ "merman_medium_skin_tone": "🧜🏽\u200d♂️",
+ "merperson": "🧜",
+ "merperson_dark_skin_tone": "🧜🏿",
+ "merperson_light_skin_tone": "🧜🏻",
+ "merperson_medium-dark_skin_tone": "🧜🏾",
+ "merperson_medium-light_skin_tone": "🧜🏼",
+ "merperson_medium_skin_tone": "🧜🏽",
+ "metro": "🚇",
+ "microbe": "🦠",
+ "microphone": "🎤",
+ "microscope": "🔬",
+ "middle_finger": "🖕",
+ "middle_finger_dark_skin_tone": "🖕🏿",
+ "middle_finger_light_skin_tone": "🖕🏻",
+ "middle_finger_medium-dark_skin_tone": "🖕🏾",
+ "middle_finger_medium-light_skin_tone": "🖕🏼",
+ "middle_finger_medium_skin_tone": "🖕🏽",
+ "military_medal": "🎖",
+ "milky_way": "🌌",
+ "minibus": "🚐",
+ "moai": "🗿",
+ "mobile_phone": "📱",
+ "mobile_phone_off": "📴",
+ "mobile_phone_with_arrow": "📲",
+ "money-mouth_face": "🤑",
+ "money_bag": "💰",
+ "money_with_wings": "💸",
+ "monkey": "🐒",
+ "monkey_face": "🐵",
+ "monorail": "🚝",
+ "moon_cake": "🥮",
+ "moon_viewing_ceremony": "🎑",
+ "mosque": "🕌",
+ "mosquito": "🦟",
+ "motor_boat": "🛥",
+ "motor_scooter": "🛵",
+ "motorcycle": "🏍",
+ "motorized_wheelchair": "🦼",
+ "motorway": "🛣",
+ "mount_fuji": "🗻",
+ "mountain": "⛰",
+ "mountain_cableway": "🚠",
+ "mountain_railway": "🚞",
+ "mouse": "🐭",
+ "mouse_face": "🐭",
+ "mouth": "👄",
+ "movie_camera": "🎥",
+ "mushroom": "🍄",
+ "musical_keyboard": "🎹",
+ "musical_note": "🎵",
+ "musical_notes": "🎶",
+ "musical_score": "🎼",
+ "muted_speaker": "🔇",
+ "nail_polish": "💅",
+ "nail_polish_dark_skin_tone": "💅🏿",
+ "nail_polish_light_skin_tone": "💅🏻",
+ "nail_polish_medium-dark_skin_tone": "💅🏾",
+ "nail_polish_medium-light_skin_tone": "💅🏼",
+ "nail_polish_medium_skin_tone": "💅🏽",
+ "name_badge": "📛",
+ "national_park": "🏞",
+ "nauseated_face": "🤢",
+ "nazar_amulet": "🧿",
+ "necktie": "👔",
+ "nerd_face": "🤓",
+ "neutral_face": "😐",
+ "new_moon": "🌑",
+ "new_moon_face": "🌚",
+ "newspaper": "📰",
+ "next_track_button": "⏭",
+ "night_with_stars": "🌃",
+ "nine-thirty": "🕤",
+ "nine_o’clock": "🕘",
+ "no_bicycles": "🚳",
+ "no_entry": "⛔",
+ "no_littering": "🚯",
+ "no_mobile_phones": "📵",
+ "no_one_under_eighteen": "🔞",
+ "no_pedestrians": "🚷",
+ "no_smoking": "🚭",
+ "non-potable_water": "🚱",
+ "nose": "👃",
+ "nose_dark_skin_tone": "👃🏿",
+ "nose_light_skin_tone": "👃🏻",
+ "nose_medium-dark_skin_tone": "👃🏾",
+ "nose_medium-light_skin_tone": "👃🏼",
+ "nose_medium_skin_tone": "👃🏽",
+ "notebook": "📓",
+ "notebook_with_decorative_cover": "📔",
+ "nut_and_bolt": "🔩",
+ "octopus": "🐙",
+ "oden": "🍢",
+ "office_building": "🏢",
+ "ogre": "👹",
+ "oil_drum": "🛢",
+ "old_key": "🗝",
+ "old_man": "👴",
+ "old_man_dark_skin_tone": "👴🏿",
+ "old_man_light_skin_tone": "👴🏻",
+ "old_man_medium-dark_skin_tone": "👴🏾",
+ "old_man_medium-light_skin_tone": "👴🏼",
+ "old_man_medium_skin_tone": "👴🏽",
+ "old_woman": "👵",
+ "old_woman_dark_skin_tone": "👵🏿",
+ "old_woman_light_skin_tone": "👵🏻",
+ "old_woman_medium-dark_skin_tone": "👵🏾",
+ "old_woman_medium-light_skin_tone": "👵🏼",
+ "old_woman_medium_skin_tone": "👵🏽",
+ "older_adult": "🧓",
+ "older_adult_dark_skin_tone": "🧓🏿",
+ "older_adult_light_skin_tone": "🧓🏻",
+ "older_adult_medium-dark_skin_tone": "🧓🏾",
+ "older_adult_medium-light_skin_tone": "🧓🏼",
+ "older_adult_medium_skin_tone": "🧓🏽",
+ "om": "🕉",
+ "oncoming_automobile": "🚘",
+ "oncoming_bus": "🚍",
+ "oncoming_fist": "👊",
+ "oncoming_fist_dark_skin_tone": "👊🏿",
+ "oncoming_fist_light_skin_tone": "👊🏻",
+ "oncoming_fist_medium-dark_skin_tone": "👊🏾",
+ "oncoming_fist_medium-light_skin_tone": "👊🏼",
+ "oncoming_fist_medium_skin_tone": "👊🏽",
+ "oncoming_police_car": "🚔",
+ "oncoming_taxi": "🚖",
+ "one-piece_swimsuit": "🩱",
+ "one-thirty": "🕜",
+ "one_o’clock": "🕐",
+ "onion": "🧅",
+ "open_book": "📖",
+ "open_file_folder": "📂",
+ "open_hands": "👐",
+ "open_hands_dark_skin_tone": "👐🏿",
+ "open_hands_light_skin_tone": "👐🏻",
+ "open_hands_medium-dark_skin_tone": "👐🏾",
+ "open_hands_medium-light_skin_tone": "👐🏼",
+ "open_hands_medium_skin_tone": "👐🏽",
+ "open_mailbox_with_lowered_flag": "📭",
+ "open_mailbox_with_raised_flag": "📬",
+ "optical_disk": "💿",
+ "orange_book": "📙",
+ "orange_circle": "🟠",
+ "orange_heart": "🧡",
+ "orange_square": "🟧",
+ "orangutan": "🦧",
+ "orthodox_cross": "☦",
+ "otter": "🦦",
+ "outbox_tray": "📤",
+ "owl": "🦉",
+ "ox": "🐂",
+ "oyster": "🦪",
+ "package": "📦",
+ "page_facing_up": "📄",
+ "page_with_curl": "📃",
+ "pager": "📟",
+ "paintbrush": "🖌",
+ "palm_tree": "🌴",
+ "palms_up_together": "🤲",
+ "palms_up_together_dark_skin_tone": "🤲🏿",
+ "palms_up_together_light_skin_tone": "🤲🏻",
+ "palms_up_together_medium-dark_skin_tone": "🤲🏾",
+ "palms_up_together_medium-light_skin_tone": "🤲🏼",
+ "palms_up_together_medium_skin_tone": "🤲🏽",
+ "pancakes": "🥞",
+ "panda_face": "🐼",
+ "paperclip": "📎",
+ "parrot": "🦜",
+ "part_alternation_mark": "〽",
+ "party_popper": "🎉",
+ "partying_face": "🥳",
+ "passenger_ship": "🛳",
+ "passport_control": "🛂",
+ "pause_button": "⏸",
+ "paw_prints": "🐾",
+ "peace_symbol": "☮",
+ "peach": "🍑",
+ "peacock": "🦚",
+ "peanuts": "🥜",
+ "pear": "🍐",
+ "pen": "🖊",
+ "pencil": "📝",
+ "penguin": "🐧",
+ "pensive_face": "😔",
+ "people_holding_hands": "🧑\u200d🤝\u200d🧑",
+ "people_with_bunny_ears": "👯",
+ "people_wrestling": "🤼",
+ "performing_arts": "🎭",
+ "persevering_face": "😣",
+ "person_biking": "🚴",
+ "person_biking_dark_skin_tone": "🚴🏿",
+ "person_biking_light_skin_tone": "🚴🏻",
+ "person_biking_medium-dark_skin_tone": "🚴🏾",
+ "person_biking_medium-light_skin_tone": "🚴🏼",
+ "person_biking_medium_skin_tone": "🚴🏽",
+ "person_bouncing_ball": "⛹",
+ "person_bouncing_ball_dark_skin_tone": "⛹🏿",
+ "person_bouncing_ball_light_skin_tone": "⛹🏻",
+ "person_bouncing_ball_medium-dark_skin_tone": "⛹🏾",
+ "person_bouncing_ball_medium-light_skin_tone": "⛹🏼",
+ "person_bouncing_ball_medium_skin_tone": "⛹🏽",
+ "person_bowing": "🙇",
+ "person_bowing_dark_skin_tone": "🙇🏿",
+ "person_bowing_light_skin_tone": "🙇🏻",
+ "person_bowing_medium-dark_skin_tone": "🙇🏾",
+ "person_bowing_medium-light_skin_tone": "🙇🏼",
+ "person_bowing_medium_skin_tone": "🙇🏽",
+ "person_cartwheeling": "🤸",
+ "person_cartwheeling_dark_skin_tone": "🤸🏿",
+ "person_cartwheeling_light_skin_tone": "🤸🏻",
+ "person_cartwheeling_medium-dark_skin_tone": "🤸🏾",
+ "person_cartwheeling_medium-light_skin_tone": "🤸🏼",
+ "person_cartwheeling_medium_skin_tone": "🤸🏽",
+ "person_climbing": "🧗",
+ "person_climbing_dark_skin_tone": "🧗🏿",
+ "person_climbing_light_skin_tone": "🧗🏻",
+ "person_climbing_medium-dark_skin_tone": "🧗🏾",
+ "person_climbing_medium-light_skin_tone": "🧗🏼",
+ "person_climbing_medium_skin_tone": "🧗🏽",
+ "person_facepalming": "🤦",
+ "person_facepalming_dark_skin_tone": "🤦🏿",
+ "person_facepalming_light_skin_tone": "🤦🏻",
+ "person_facepalming_medium-dark_skin_tone": "🤦🏾",
+ "person_facepalming_medium-light_skin_tone": "🤦🏼",
+ "person_facepalming_medium_skin_tone": "🤦🏽",
+ "person_fencing": "🤺",
+ "person_frowning": "🙍",
+ "person_frowning_dark_skin_tone": "🙍🏿",
+ "person_frowning_light_skin_tone": "🙍🏻",
+ "person_frowning_medium-dark_skin_tone": "🙍🏾",
+ "person_frowning_medium-light_skin_tone": "🙍🏼",
+ "person_frowning_medium_skin_tone": "🙍🏽",
+ "person_gesturing_no": "🙅",
+ "person_gesturing_no_dark_skin_tone": "🙅🏿",
+ "person_gesturing_no_light_skin_tone": "🙅🏻",
+ "person_gesturing_no_medium-dark_skin_tone": "🙅🏾",
+ "person_gesturing_no_medium-light_skin_tone": "🙅🏼",
+ "person_gesturing_no_medium_skin_tone": "🙅🏽",
+ "person_gesturing_ok": "🙆",
+ "person_gesturing_ok_dark_skin_tone": "🙆🏿",
+ "person_gesturing_ok_light_skin_tone": "🙆🏻",
+ "person_gesturing_ok_medium-dark_skin_tone": "🙆🏾",
+ "person_gesturing_ok_medium-light_skin_tone": "🙆🏼",
+ "person_gesturing_ok_medium_skin_tone": "🙆🏽",
+ "person_getting_haircut": "💇",
+ "person_getting_haircut_dark_skin_tone": "💇🏿",
+ "person_getting_haircut_light_skin_tone": "💇🏻",
+ "person_getting_haircut_medium-dark_skin_tone": "💇🏾",
+ "person_getting_haircut_medium-light_skin_tone": "💇🏼",
+ "person_getting_haircut_medium_skin_tone": "💇🏽",
+ "person_getting_massage": "💆",
+ "person_getting_massage_dark_skin_tone": "💆🏿",
+ "person_getting_massage_light_skin_tone": "💆🏻",
+ "person_getting_massage_medium-dark_skin_tone": "💆🏾",
+ "person_getting_massage_medium-light_skin_tone": "💆🏼",
+ "person_getting_massage_medium_skin_tone": "💆🏽",
+ "person_golfing": "🏌",
+ "person_golfing_dark_skin_tone": "🏌🏿",
+ "person_golfing_light_skin_tone": "🏌🏻",
+ "person_golfing_medium-dark_skin_tone": "🏌🏾",
+ "person_golfing_medium-light_skin_tone": "🏌🏼",
+ "person_golfing_medium_skin_tone": "🏌🏽",
+ "person_in_bed": "🛌",
+ "person_in_bed_dark_skin_tone": "🛌🏿",
+ "person_in_bed_light_skin_tone": "🛌🏻",
+ "person_in_bed_medium-dark_skin_tone": "🛌🏾",
+ "person_in_bed_medium-light_skin_tone": "🛌🏼",
+ "person_in_bed_medium_skin_tone": "🛌🏽",
+ "person_in_lotus_position": "🧘",
+ "person_in_lotus_position_dark_skin_tone": "🧘🏿",
+ "person_in_lotus_position_light_skin_tone": "🧘🏻",
+ "person_in_lotus_position_medium-dark_skin_tone": "🧘🏾",
+ "person_in_lotus_position_medium-light_skin_tone": "🧘🏼",
+ "person_in_lotus_position_medium_skin_tone": "🧘🏽",
+ "person_in_steamy_room": "🧖",
+ "person_in_steamy_room_dark_skin_tone": "🧖🏿",
+ "person_in_steamy_room_light_skin_tone": "🧖🏻",
+ "person_in_steamy_room_medium-dark_skin_tone": "🧖🏾",
+ "person_in_steamy_room_medium-light_skin_tone": "🧖🏼",
+ "person_in_steamy_room_medium_skin_tone": "🧖🏽",
+ "person_juggling": "🤹",
+ "person_juggling_dark_skin_tone": "🤹🏿",
+ "person_juggling_light_skin_tone": "🤹🏻",
+ "person_juggling_medium-dark_skin_tone": "🤹🏾",
+ "person_juggling_medium-light_skin_tone": "🤹🏼",
+ "person_juggling_medium_skin_tone": "🤹🏽",
+ "person_kneeling": "🧎",
+ "person_lifting_weights": "🏋",
+ "person_lifting_weights_dark_skin_tone": "🏋🏿",
+ "person_lifting_weights_light_skin_tone": "🏋🏻",
+ "person_lifting_weights_medium-dark_skin_tone": "🏋🏾",
+ "person_lifting_weights_medium-light_skin_tone": "🏋🏼",
+ "person_lifting_weights_medium_skin_tone": "🏋🏽",
+ "person_mountain_biking": "🚵",
+ "person_mountain_biking_dark_skin_tone": "🚵🏿",
+ "person_mountain_biking_light_skin_tone": "🚵🏻",
+ "person_mountain_biking_medium-dark_skin_tone": "🚵🏾",
+ "person_mountain_biking_medium-light_skin_tone": "🚵🏼",
+ "person_mountain_biking_medium_skin_tone": "🚵🏽",
+ "person_playing_handball": "🤾",
+ "person_playing_handball_dark_skin_tone": "🤾🏿",
+ "person_playing_handball_light_skin_tone": "🤾🏻",
+ "person_playing_handball_medium-dark_skin_tone": "🤾🏾",
+ "person_playing_handball_medium-light_skin_tone": "🤾🏼",
+ "person_playing_handball_medium_skin_tone": "🤾🏽",
+ "person_playing_water_polo": "🤽",
+ "person_playing_water_polo_dark_skin_tone": "🤽🏿",
+ "person_playing_water_polo_light_skin_tone": "🤽🏻",
+ "person_playing_water_polo_medium-dark_skin_tone": "🤽🏾",
+ "person_playing_water_polo_medium-light_skin_tone": "🤽🏼",
+ "person_playing_water_polo_medium_skin_tone": "🤽🏽",
+ "person_pouting": "🙎",
+ "person_pouting_dark_skin_tone": "🙎🏿",
+ "person_pouting_light_skin_tone": "🙎🏻",
+ "person_pouting_medium-dark_skin_tone": "🙎🏾",
+ "person_pouting_medium-light_skin_tone": "🙎🏼",
+ "person_pouting_medium_skin_tone": "🙎🏽",
+ "person_raising_hand": "🙋",
+ "person_raising_hand_dark_skin_tone": "🙋🏿",
+ "person_raising_hand_light_skin_tone": "🙋🏻",
+ "person_raising_hand_medium-dark_skin_tone": "🙋🏾",
+ "person_raising_hand_medium-light_skin_tone": "🙋🏼",
+ "person_raising_hand_medium_skin_tone": "🙋🏽",
+ "person_rowing_boat": "🚣",
+ "person_rowing_boat_dark_skin_tone": "🚣🏿",
+ "person_rowing_boat_light_skin_tone": "🚣🏻",
+ "person_rowing_boat_medium-dark_skin_tone": "🚣🏾",
+ "person_rowing_boat_medium-light_skin_tone": "🚣🏼",
+ "person_rowing_boat_medium_skin_tone": "🚣🏽",
+ "person_running": "🏃",
+ "person_running_dark_skin_tone": "🏃🏿",
+ "person_running_light_skin_tone": "🏃🏻",
+ "person_running_medium-dark_skin_tone": "🏃🏾",
+ "person_running_medium-light_skin_tone": "🏃🏼",
+ "person_running_medium_skin_tone": "🏃🏽",
+ "person_shrugging": "🤷",
+ "person_shrugging_dark_skin_tone": "🤷🏿",
+ "person_shrugging_light_skin_tone": "🤷🏻",
+ "person_shrugging_medium-dark_skin_tone": "🤷🏾",
+ "person_shrugging_medium-light_skin_tone": "🤷🏼",
+ "person_shrugging_medium_skin_tone": "🤷🏽",
+ "person_standing": "🧍",
+ "person_surfing": "🏄",
+ "person_surfing_dark_skin_tone": "🏄🏿",
+ "person_surfing_light_skin_tone": "🏄🏻",
+ "person_surfing_medium-dark_skin_tone": "🏄🏾",
+ "person_surfing_medium-light_skin_tone": "🏄🏼",
+ "person_surfing_medium_skin_tone": "🏄🏽",
+ "person_swimming": "🏊",
+ "person_swimming_dark_skin_tone": "🏊🏿",
+ "person_swimming_light_skin_tone": "🏊🏻",
+ "person_swimming_medium-dark_skin_tone": "🏊🏾",
+ "person_swimming_medium-light_skin_tone": "🏊🏼",
+ "person_swimming_medium_skin_tone": "🏊🏽",
+ "person_taking_bath": "🛀",
+ "person_taking_bath_dark_skin_tone": "🛀🏿",
+ "person_taking_bath_light_skin_tone": "🛀🏻",
+ "person_taking_bath_medium-dark_skin_tone": "🛀🏾",
+ "person_taking_bath_medium-light_skin_tone": "🛀🏼",
+ "person_taking_bath_medium_skin_tone": "🛀🏽",
+ "person_tipping_hand": "💁",
+ "person_tipping_hand_dark_skin_tone": "💁🏿",
+ "person_tipping_hand_light_skin_tone": "💁🏻",
+ "person_tipping_hand_medium-dark_skin_tone": "💁🏾",
+ "person_tipping_hand_medium-light_skin_tone": "💁🏼",
+ "person_tipping_hand_medium_skin_tone": "💁🏽",
+ "person_walking": "🚶",
+ "person_walking_dark_skin_tone": "🚶🏿",
+ "person_walking_light_skin_tone": "🚶🏻",
+ "person_walking_medium-dark_skin_tone": "🚶🏾",
+ "person_walking_medium-light_skin_tone": "🚶🏼",
+ "person_walking_medium_skin_tone": "🚶🏽",
+ "person_wearing_turban": "👳",
+ "person_wearing_turban_dark_skin_tone": "👳🏿",
+ "person_wearing_turban_light_skin_tone": "👳🏻",
+ "person_wearing_turban_medium-dark_skin_tone": "👳🏾",
+ "person_wearing_turban_medium-light_skin_tone": "👳🏼",
+ "person_wearing_turban_medium_skin_tone": "👳🏽",
+ "petri_dish": "🧫",
+ "pick": "⛏",
+ "pie": "🥧",
+ "pig": "🐷",
+ "pig_face": "🐷",
+ "pig_nose": "🐽",
+ "pile_of_poo": "💩",
+ "pill": "💊",
+ "pinching_hand": "🤏",
+ "pine_decoration": "🎍",
+ "pineapple": "🍍",
+ "ping_pong": "🏓",
+ "pirate_flag": "🏴\u200d☠️",
+ "pistol": "🔫",
+ "pizza": "🍕",
+ "place_of_worship": "🛐",
+ "play_button": "▶",
+ "play_or_pause_button": "⏯",
+ "pleading_face": "🥺",
+ "police_car": "🚓",
+ "police_car_light": "🚨",
+ "police_officer": "👮",
+ "police_officer_dark_skin_tone": "👮🏿",
+ "police_officer_light_skin_tone": "👮🏻",
+ "police_officer_medium-dark_skin_tone": "👮🏾",
+ "police_officer_medium-light_skin_tone": "👮🏼",
+ "police_officer_medium_skin_tone": "👮🏽",
+ "poodle": "🐩",
+ "pool_8_ball": "🎱",
+ "popcorn": "🍿",
+ "post_office": "🏣",
+ "postal_horn": "📯",
+ "postbox": "📮",
+ "pot_of_food": "🍲",
+ "potable_water": "🚰",
+ "potato": "🥔",
+ "poultry_leg": "🍗",
+ "pound_banknote": "💷",
+ "pouting_cat_face": "😾",
+ "pouting_face": "😡",
+ "prayer_beads": "📿",
+ "pregnant_woman": "🤰",
+ "pregnant_woman_dark_skin_tone": "🤰🏿",
+ "pregnant_woman_light_skin_tone": "🤰🏻",
+ "pregnant_woman_medium-dark_skin_tone": "🤰🏾",
+ "pregnant_woman_medium-light_skin_tone": "🤰🏼",
+ "pregnant_woman_medium_skin_tone": "🤰🏽",
+ "pretzel": "🥨",
+ "probing_cane": "🦯",
+ "prince": "🤴",
+ "prince_dark_skin_tone": "🤴🏿",
+ "prince_light_skin_tone": "🤴🏻",
+ "prince_medium-dark_skin_tone": "🤴🏾",
+ "prince_medium-light_skin_tone": "🤴🏼",
+ "prince_medium_skin_tone": "🤴🏽",
+ "princess": "👸",
+ "princess_dark_skin_tone": "👸🏿",
+ "princess_light_skin_tone": "👸🏻",
+ "princess_medium-dark_skin_tone": "👸🏾",
+ "princess_medium-light_skin_tone": "👸🏼",
+ "princess_medium_skin_tone": "👸🏽",
+ "printer": "🖨",
+ "prohibited": "🚫",
+ "purple_circle": "🟣",
+ "purple_heart": "💜",
+ "purple_square": "🟪",
+ "purse": "👛",
+ "pushpin": "📌",
+ "question_mark": "❓",
+ "rabbit": "🐰",
+ "rabbit_face": "🐰",
+ "raccoon": "🦝",
+ "racing_car": "🏎",
+ "radio": "📻",
+ "radio_button": "🔘",
+ "radioactive": "☢",
+ "railway_car": "🚃",
+ "railway_track": "🛤",
+ "rainbow": "🌈",
+ "rainbow_flag": "🏳️\u200d🌈",
+ "raised_back_of_hand": "🤚",
+ "raised_back_of_hand_dark_skin_tone": "🤚🏿",
+ "raised_back_of_hand_light_skin_tone": "🤚🏻",
+ "raised_back_of_hand_medium-dark_skin_tone": "🤚🏾",
+ "raised_back_of_hand_medium-light_skin_tone": "🤚🏼",
+ "raised_back_of_hand_medium_skin_tone": "🤚🏽",
+ "raised_fist": "✊",
+ "raised_fist_dark_skin_tone": "✊🏿",
+ "raised_fist_light_skin_tone": "✊🏻",
+ "raised_fist_medium-dark_skin_tone": "✊🏾",
+ "raised_fist_medium-light_skin_tone": "✊🏼",
+ "raised_fist_medium_skin_tone": "✊🏽",
+ "raised_hand": "✋",
+ "raised_hand_dark_skin_tone": "✋🏿",
+ "raised_hand_light_skin_tone": "✋🏻",
+ "raised_hand_medium-dark_skin_tone": "✋🏾",
+ "raised_hand_medium-light_skin_tone": "✋🏼",
+ "raised_hand_medium_skin_tone": "✋🏽",
+ "raising_hands": "🙌",
+ "raising_hands_dark_skin_tone": "🙌🏿",
+ "raising_hands_light_skin_tone": "🙌🏻",
+ "raising_hands_medium-dark_skin_tone": "🙌🏾",
+ "raising_hands_medium-light_skin_tone": "🙌🏼",
+ "raising_hands_medium_skin_tone": "🙌🏽",
+ "ram": "🐏",
+ "rat": "🐀",
+ "razor": "🪒",
+ "ringed_planet": "🪐",
+ "receipt": "🧾",
+ "record_button": "⏺",
+ "recycling_symbol": "♻",
+ "red_apple": "🍎",
+ "red_circle": "🔴",
+ "red_envelope": "🧧",
+ "red_hair": "🦰",
+ "red-haired_man": "👨\u200d🦰",
+ "red-haired_woman": "👩\u200d🦰",
+ "red_heart": "❤",
+ "red_paper_lantern": "🏮",
+ "red_square": "🟥",
+ "red_triangle_pointed_down": "🔻",
+ "red_triangle_pointed_up": "🔺",
+ "registered": "®",
+ "relieved_face": "😌",
+ "reminder_ribbon": "🎗",
+ "repeat_button": "🔁",
+ "repeat_single_button": "🔂",
+ "rescue_worker’s_helmet": "⛑",
+ "restroom": "🚻",
+ "reverse_button": "◀",
+ "revolving_hearts": "💞",
+ "rhinoceros": "🦏",
+ "ribbon": "🎀",
+ "rice_ball": "🍙",
+ "rice_cracker": "🍘",
+ "right-facing_fist": "🤜",
+ "right-facing_fist_dark_skin_tone": "🤜🏿",
+ "right-facing_fist_light_skin_tone": "🤜🏻",
+ "right-facing_fist_medium-dark_skin_tone": "🤜🏾",
+ "right-facing_fist_medium-light_skin_tone": "🤜🏼",
+ "right-facing_fist_medium_skin_tone": "🤜🏽",
+ "right_anger_bubble": "🗯",
+ "right_arrow": "➡",
+ "right_arrow_curving_down": "⤵",
+ "right_arrow_curving_left": "↩",
+ "right_arrow_curving_up": "⤴",
+ "ring": "💍",
+ "roasted_sweet_potato": "🍠",
+ "robot_face": "🤖",
+ "rocket": "🚀",
+ "roll_of_paper": "🧻",
+ "rolled-up_newspaper": "🗞",
+ "roller_coaster": "🎢",
+ "rolling_on_the_floor_laughing": "🤣",
+ "rooster": "🐓",
+ "rose": "🌹",
+ "rosette": "🏵",
+ "round_pushpin": "📍",
+ "rugby_football": "🏉",
+ "running_shirt": "🎽",
+ "running_shoe": "👟",
+ "sad_but_relieved_face": "😥",
+ "safety_pin": "🧷",
+ "safety_vest": "🦺",
+ "salt": "🧂",
+ "sailboat": "⛵",
+ "sake": "🍶",
+ "sandwich": "🥪",
+ "sari": "🥻",
+ "satellite": "📡",
+ "satellite_antenna": "📡",
+ "sauropod": "🦕",
+ "saxophone": "🎷",
+ "scarf": "🧣",
+ "school": "🏫",
+ "school_backpack": "🎒",
+ "scissors": "✂",
+ "scorpion": "🦂",
+ "scroll": "📜",
+ "seat": "💺",
+ "see-no-evil_monkey": "🙈",
+ "seedling": "🌱",
+ "selfie": "🤳",
+ "selfie_dark_skin_tone": "🤳🏿",
+ "selfie_light_skin_tone": "🤳🏻",
+ "selfie_medium-dark_skin_tone": "🤳🏾",
+ "selfie_medium-light_skin_tone": "🤳🏼",
+ "selfie_medium_skin_tone": "🤳🏽",
+ "service_dog": "🐕\u200d🦺",
+ "seven-thirty": "🕢",
+ "seven_o’clock": "🕖",
+ "shallow_pan_of_food": "🥘",
+ "shamrock": "☘",
+ "shark": "🦈",
+ "shaved_ice": "🍧",
+ "sheaf_of_rice": "🌾",
+ "shield": "🛡",
+ "shinto_shrine": "⛩",
+ "ship": "🚢",
+ "shooting_star": "🌠",
+ "shopping_bags": "🛍",
+ "shopping_cart": "🛒",
+ "shortcake": "🍰",
+ "shorts": "🩳",
+ "shower": "🚿",
+ "shrimp": "🦐",
+ "shuffle_tracks_button": "🔀",
+ "shushing_face": "🤫",
+ "sign_of_the_horns": "🤘",
+ "sign_of_the_horns_dark_skin_tone": "🤘🏿",
+ "sign_of_the_horns_light_skin_tone": "🤘🏻",
+ "sign_of_the_horns_medium-dark_skin_tone": "🤘🏾",
+ "sign_of_the_horns_medium-light_skin_tone": "🤘🏼",
+ "sign_of_the_horns_medium_skin_tone": "🤘🏽",
+ "six-thirty": "🕡",
+ "six_o’clock": "🕕",
+ "skateboard": "🛹",
+ "skier": "⛷",
+ "skis": "🎿",
+ "skull": "💀",
+ "skull_and_crossbones": "☠",
+ "skunk": "🦨",
+ "sled": "🛷",
+ "sleeping_face": "😴",
+ "sleepy_face": "😪",
+ "slightly_frowning_face": "🙁",
+ "slightly_smiling_face": "🙂",
+ "slot_machine": "🎰",
+ "sloth": "🦥",
+ "small_airplane": "🛩",
+ "small_blue_diamond": "🔹",
+ "small_orange_diamond": "🔸",
+ "smiling_cat_face_with_heart-eyes": "😻",
+ "smiling_face": "☺",
+ "smiling_face_with_halo": "😇",
+ "smiling_face_with_3_hearts": "🥰",
+ "smiling_face_with_heart-eyes": "😍",
+ "smiling_face_with_horns": "😈",
+ "smiling_face_with_smiling_eyes": "😊",
+ "smiling_face_with_sunglasses": "😎",
+ "smirking_face": "😏",
+ "snail": "🐌",
+ "snake": "🐍",
+ "sneezing_face": "🤧",
+ "snow-capped_mountain": "🏔",
+ "snowboarder": "🏂",
+ "snowboarder_dark_skin_tone": "🏂🏿",
+ "snowboarder_light_skin_tone": "🏂🏻",
+ "snowboarder_medium-dark_skin_tone": "🏂🏾",
+ "snowboarder_medium-light_skin_tone": "🏂🏼",
+ "snowboarder_medium_skin_tone": "🏂🏽",
+ "snowflake": "❄",
+ "snowman": "☃",
+ "snowman_without_snow": "⛄",
+ "soap": "🧼",
+ "soccer_ball": "⚽",
+ "socks": "🧦",
+ "softball": "🥎",
+ "soft_ice_cream": "🍦",
+ "spade_suit": "♠",
+ "spaghetti": "🍝",
+ "sparkle": "❇",
+ "sparkler": "🎇",
+ "sparkles": "✨",
+ "sparkling_heart": "💖",
+ "speak-no-evil_monkey": "🙊",
+ "speaker_high_volume": "🔊",
+ "speaker_low_volume": "🔈",
+ "speaker_medium_volume": "🔉",
+ "speaking_head": "🗣",
+ "speech_balloon": "💬",
+ "speedboat": "🚤",
+ "spider": "🕷",
+ "spider_web": "🕸",
+ "spiral_calendar": "🗓",
+ "spiral_notepad": "🗒",
+ "spiral_shell": "🐚",
+ "spoon": "🥄",
+ "sponge": "🧽",
+ "sport_utility_vehicle": "🚙",
+ "sports_medal": "🏅",
+ "spouting_whale": "🐳",
+ "squid": "🦑",
+ "squinting_face_with_tongue": "😝",
+ "stadium": "🏟",
+ "star-struck": "🤩",
+ "star_and_crescent": "☪",
+ "star_of_david": "✡",
+ "station": "🚉",
+ "steaming_bowl": "🍜",
+ "stethoscope": "🩺",
+ "stop_button": "⏹",
+ "stop_sign": "🛑",
+ "stopwatch": "⏱",
+ "straight_ruler": "📏",
+ "strawberry": "🍓",
+ "studio_microphone": "🎙",
+ "stuffed_flatbread": "🥙",
+ "sun": "☀",
+ "sun_behind_cloud": "⛅",
+ "sun_behind_large_cloud": "🌥",
+ "sun_behind_rain_cloud": "🌦",
+ "sun_behind_small_cloud": "🌤",
+ "sun_with_face": "🌞",
+ "sunflower": "🌻",
+ "sunglasses": "😎",
+ "sunrise": "🌅",
+ "sunrise_over_mountains": "🌄",
+ "sunset": "🌇",
+ "superhero": "🦸",
+ "supervillain": "🦹",
+ "sushi": "🍣",
+ "suspension_railway": "🚟",
+ "swan": "🦢",
+ "sweat_droplets": "💦",
+ "synagogue": "🕍",
+ "syringe": "💉",
+ "t-shirt": "👕",
+ "taco": "🌮",
+ "takeout_box": "🥡",
+ "tanabata_tree": "🎋",
+ "tangerine": "🍊",
+ "taxi": "🚕",
+ "teacup_without_handle": "🍵",
+ "tear-off_calendar": "📆",
+ "teddy_bear": "🧸",
+ "telephone": "☎",
+ "telephone_receiver": "📞",
+ "telescope": "🔭",
+ "television": "📺",
+ "ten-thirty": "🕥",
+ "ten_o’clock": "🕙",
+ "tennis": "🎾",
+ "tent": "⛺",
+ "test_tube": "🧪",
+ "thermometer": "🌡",
+ "thinking_face": "🤔",
+ "thought_balloon": "💭",
+ "thread": "🧵",
+ "three-thirty": "🕞",
+ "three_o’clock": "🕒",
+ "thumbs_down": "👎",
+ "thumbs_down_dark_skin_tone": "👎🏿",
+ "thumbs_down_light_skin_tone": "👎🏻",
+ "thumbs_down_medium-dark_skin_tone": "👎🏾",
+ "thumbs_down_medium-light_skin_tone": "👎🏼",
+ "thumbs_down_medium_skin_tone": "👎🏽",
+ "thumbs_up": "👍",
+ "thumbs_up_dark_skin_tone": "👍🏿",
+ "thumbs_up_light_skin_tone": "👍🏻",
+ "thumbs_up_medium-dark_skin_tone": "👍🏾",
+ "thumbs_up_medium-light_skin_tone": "👍🏼",
+ "thumbs_up_medium_skin_tone": "👍🏽",
+ "ticket": "🎫",
+ "tiger": "🐯",
+ "tiger_face": "🐯",
+ "timer_clock": "⏲",
+ "tired_face": "😫",
+ "toolbox": "🧰",
+ "toilet": "🚽",
+ "tomato": "🍅",
+ "tongue": "👅",
+ "tooth": "🦷",
+ "top_hat": "🎩",
+ "tornado": "🌪",
+ "trackball": "🖲",
+ "tractor": "🚜",
+ "trade_mark": "™",
+ "train": "🚋",
+ "tram": "🚊",
+ "tram_car": "🚋",
+ "triangular_flag": "🚩",
+ "triangular_ruler": "📐",
+ "trident_emblem": "🔱",
+ "trolleybus": "🚎",
+ "trophy": "🏆",
+ "tropical_drink": "🍹",
+ "tropical_fish": "🐠",
+ "trumpet": "🎺",
+ "tulip": "🌷",
+ "tumbler_glass": "🥃",
+ "turtle": "🐢",
+ "twelve-thirty": "🕧",
+ "twelve_o’clock": "🕛",
+ "two-hump_camel": "🐫",
+ "two-thirty": "🕝",
+ "two_hearts": "💕",
+ "two_men_holding_hands": "👬",
+ "two_o’clock": "🕑",
+ "two_women_holding_hands": "👭",
+ "umbrella": "☂",
+ "umbrella_on_ground": "⛱",
+ "umbrella_with_rain_drops": "☔",
+ "unamused_face": "😒",
+ "unicorn_face": "🦄",
+ "unlocked": "🔓",
+ "up-down_arrow": "↕",
+ "up-left_arrow": "↖",
+ "up-right_arrow": "↗",
+ "up_arrow": "⬆",
+ "upside-down_face": "🙃",
+ "upwards_button": "🔼",
+ "vampire": "🧛",
+ "vampire_dark_skin_tone": "🧛🏿",
+ "vampire_light_skin_tone": "🧛🏻",
+ "vampire_medium-dark_skin_tone": "🧛🏾",
+ "vampire_medium-light_skin_tone": "🧛🏼",
+ "vampire_medium_skin_tone": "🧛🏽",
+ "vertical_traffic_light": "🚦",
+ "vibration_mode": "📳",
+ "victory_hand": "✌",
+ "victory_hand_dark_skin_tone": "✌🏿",
+ "victory_hand_light_skin_tone": "✌🏻",
+ "victory_hand_medium-dark_skin_tone": "✌🏾",
+ "victory_hand_medium-light_skin_tone": "✌🏼",
+ "victory_hand_medium_skin_tone": "✌🏽",
+ "video_camera": "📹",
+ "video_game": "🎮",
+ "videocassette": "📼",
+ "violin": "🎻",
+ "volcano": "🌋",
+ "volleyball": "🏐",
+ "vulcan_salute": "🖖",
+ "vulcan_salute_dark_skin_tone": "🖖🏿",
+ "vulcan_salute_light_skin_tone": "🖖🏻",
+ "vulcan_salute_medium-dark_skin_tone": "🖖🏾",
+ "vulcan_salute_medium-light_skin_tone": "🖖🏼",
+ "vulcan_salute_medium_skin_tone": "🖖🏽",
+ "waffle": "🧇",
+ "waning_crescent_moon": "🌘",
+ "waning_gibbous_moon": "🌖",
+ "warning": "⚠",
+ "wastebasket": "🗑",
+ "watch": "⌚",
+ "water_buffalo": "🐃",
+ "water_closet": "🚾",
+ "water_wave": "🌊",
+ "watermelon": "🍉",
+ "waving_hand": "👋",
+ "waving_hand_dark_skin_tone": "👋🏿",
+ "waving_hand_light_skin_tone": "👋🏻",
+ "waving_hand_medium-dark_skin_tone": "👋🏾",
+ "waving_hand_medium-light_skin_tone": "👋🏼",
+ "waving_hand_medium_skin_tone": "👋🏽",
+ "wavy_dash": "〰",
+ "waxing_crescent_moon": "🌒",
+ "waxing_gibbous_moon": "🌔",
+ "weary_cat_face": "🙀",
+ "weary_face": "😩",
+ "wedding": "💒",
+ "whale": "🐳",
+ "wheel_of_dharma": "☸",
+ "wheelchair_symbol": "♿",
+ "white_circle": "⚪",
+ "white_exclamation_mark": "❕",
+ "white_flag": "🏳",
+ "white_flower": "💮",
+ "white_hair": "🦳",
+ "white-haired_man": "👨\u200d🦳",
+ "white-haired_woman": "👩\u200d🦳",
+ "white_heart": "🤍",
+ "white_heavy_check_mark": "✅",
+ "white_large_square": "⬜",
+ "white_medium-small_square": "◽",
+ "white_medium_square": "◻",
+ "white_medium_star": "⭐",
+ "white_question_mark": "❔",
+ "white_small_square": "▫",
+ "white_square_button": "🔳",
+ "wilted_flower": "🥀",
+ "wind_chime": "🎐",
+ "wind_face": "🌬",
+ "wine_glass": "🍷",
+ "winking_face": "😉",
+ "winking_face_with_tongue": "😜",
+ "wolf_face": "🐺",
+ "woman": "👩",
+ "woman_artist": "👩\u200d🎨",
+ "woman_artist_dark_skin_tone": "👩🏿\u200d🎨",
+ "woman_artist_light_skin_tone": "👩🏻\u200d🎨",
+ "woman_artist_medium-dark_skin_tone": "👩🏾\u200d🎨",
+ "woman_artist_medium-light_skin_tone": "👩🏼\u200d🎨",
+ "woman_artist_medium_skin_tone": "👩🏽\u200d🎨",
+ "woman_astronaut": "👩\u200d🚀",
+ "woman_astronaut_dark_skin_tone": "👩🏿\u200d🚀",
+ "woman_astronaut_light_skin_tone": "👩🏻\u200d🚀",
+ "woman_astronaut_medium-dark_skin_tone": "👩🏾\u200d🚀",
+ "woman_astronaut_medium-light_skin_tone": "👩🏼\u200d🚀",
+ "woman_astronaut_medium_skin_tone": "👩🏽\u200d🚀",
+ "woman_biking": "🚴\u200d♀️",
+ "woman_biking_dark_skin_tone": "🚴🏿\u200d♀️",
+ "woman_biking_light_skin_tone": "🚴🏻\u200d♀️",
+ "woman_biking_medium-dark_skin_tone": "🚴🏾\u200d♀️",
+ "woman_biking_medium-light_skin_tone": "🚴🏼\u200d♀️",
+ "woman_biking_medium_skin_tone": "🚴🏽\u200d♀️",
+ "woman_bouncing_ball": "⛹️\u200d♀️",
+ "woman_bouncing_ball_dark_skin_tone": "⛹🏿\u200d♀️",
+ "woman_bouncing_ball_light_skin_tone": "⛹🏻\u200d♀️",
+ "woman_bouncing_ball_medium-dark_skin_tone": "⛹🏾\u200d♀️",
+ "woman_bouncing_ball_medium-light_skin_tone": "⛹🏼\u200d♀️",
+ "woman_bouncing_ball_medium_skin_tone": "⛹🏽\u200d♀️",
+ "woman_bowing": "🙇\u200d♀️",
+ "woman_bowing_dark_skin_tone": "🙇🏿\u200d♀️",
+ "woman_bowing_light_skin_tone": "🙇🏻\u200d♀️",
+ "woman_bowing_medium-dark_skin_tone": "🙇🏾\u200d♀️",
+ "woman_bowing_medium-light_skin_tone": "🙇🏼\u200d♀️",
+ "woman_bowing_medium_skin_tone": "🙇🏽\u200d♀️",
+ "woman_cartwheeling": "🤸\u200d♀️",
+ "woman_cartwheeling_dark_skin_tone": "🤸🏿\u200d♀️",
+ "woman_cartwheeling_light_skin_tone": "🤸🏻\u200d♀️",
+ "woman_cartwheeling_medium-dark_skin_tone": "🤸🏾\u200d♀️",
+ "woman_cartwheeling_medium-light_skin_tone": "🤸🏼\u200d♀️",
+ "woman_cartwheeling_medium_skin_tone": "🤸🏽\u200d♀️",
+ "woman_climbing": "🧗\u200d♀️",
+ "woman_climbing_dark_skin_tone": "🧗🏿\u200d♀️",
+ "woman_climbing_light_skin_tone": "🧗🏻\u200d♀️",
+ "woman_climbing_medium-dark_skin_tone": "🧗🏾\u200d♀️",
+ "woman_climbing_medium-light_skin_tone": "🧗🏼\u200d♀️",
+ "woman_climbing_medium_skin_tone": "🧗🏽\u200d♀️",
+ "woman_construction_worker": "👷\u200d♀️",
+ "woman_construction_worker_dark_skin_tone": "👷🏿\u200d♀️",
+ "woman_construction_worker_light_skin_tone": "👷🏻\u200d♀️",
+ "woman_construction_worker_medium-dark_skin_tone": "👷🏾\u200d♀️",
+ "woman_construction_worker_medium-light_skin_tone": "👷🏼\u200d♀️",
+ "woman_construction_worker_medium_skin_tone": "👷🏽\u200d♀️",
+ "woman_cook": "👩\u200d🍳",
+ "woman_cook_dark_skin_tone": "👩🏿\u200d🍳",
+ "woman_cook_light_skin_tone": "👩🏻\u200d🍳",
+ "woman_cook_medium-dark_skin_tone": "👩🏾\u200d🍳",
+ "woman_cook_medium-light_skin_tone": "👩🏼\u200d🍳",
+ "woman_cook_medium_skin_tone": "👩🏽\u200d🍳",
+ "woman_dancing": "💃",
+ "woman_dancing_dark_skin_tone": "💃🏿",
+ "woman_dancing_light_skin_tone": "💃🏻",
+ "woman_dancing_medium-dark_skin_tone": "💃🏾",
+ "woman_dancing_medium-light_skin_tone": "💃🏼",
+ "woman_dancing_medium_skin_tone": "💃🏽",
+ "woman_dark_skin_tone": "👩🏿",
+ "woman_detective": "🕵️\u200d♀️",
+ "woman_detective_dark_skin_tone": "🕵🏿\u200d♀️",
+ "woman_detective_light_skin_tone": "🕵🏻\u200d♀️",
+ "woman_detective_medium-dark_skin_tone": "🕵🏾\u200d♀️",
+ "woman_detective_medium-light_skin_tone": "🕵🏼\u200d♀️",
+ "woman_detective_medium_skin_tone": "🕵🏽\u200d♀️",
+ "woman_elf": "🧝\u200d♀️",
+ "woman_elf_dark_skin_tone": "🧝🏿\u200d♀️",
+ "woman_elf_light_skin_tone": "🧝🏻\u200d♀️",
+ "woman_elf_medium-dark_skin_tone": "🧝🏾\u200d♀️",
+ "woman_elf_medium-light_skin_tone": "🧝🏼\u200d♀️",
+ "woman_elf_medium_skin_tone": "🧝🏽\u200d♀️",
+ "woman_facepalming": "🤦\u200d♀️",
+ "woman_facepalming_dark_skin_tone": "🤦🏿\u200d♀️",
+ "woman_facepalming_light_skin_tone": "🤦🏻\u200d♀️",
+ "woman_facepalming_medium-dark_skin_tone": "🤦🏾\u200d♀️",
+ "woman_facepalming_medium-light_skin_tone": "🤦🏼\u200d♀️",
+ "woman_facepalming_medium_skin_tone": "🤦🏽\u200d♀️",
+ "woman_factory_worker": "👩\u200d🏭",
+ "woman_factory_worker_dark_skin_tone": "👩🏿\u200d🏭",
+ "woman_factory_worker_light_skin_tone": "👩🏻\u200d🏭",
+ "woman_factory_worker_medium-dark_skin_tone": "👩🏾\u200d🏭",
+ "woman_factory_worker_medium-light_skin_tone": "👩🏼\u200d🏭",
+ "woman_factory_worker_medium_skin_tone": "👩🏽\u200d🏭",
+ "woman_fairy": "🧚\u200d♀️",
+ "woman_fairy_dark_skin_tone": "🧚🏿\u200d♀️",
+ "woman_fairy_light_skin_tone": "🧚🏻\u200d♀️",
+ "woman_fairy_medium-dark_skin_tone": "🧚🏾\u200d♀️",
+ "woman_fairy_medium-light_skin_tone": "🧚🏼\u200d♀️",
+ "woman_fairy_medium_skin_tone": "🧚🏽\u200d♀️",
+ "woman_farmer": "👩\u200d🌾",
+ "woman_farmer_dark_skin_tone": "👩🏿\u200d🌾",
+ "woman_farmer_light_skin_tone": "👩🏻\u200d🌾",
+ "woman_farmer_medium-dark_skin_tone": "👩🏾\u200d🌾",
+ "woman_farmer_medium-light_skin_tone": "👩🏼\u200d🌾",
+ "woman_farmer_medium_skin_tone": "👩🏽\u200d🌾",
+ "woman_firefighter": "👩\u200d🚒",
+ "woman_firefighter_dark_skin_tone": "👩🏿\u200d🚒",
+ "woman_firefighter_light_skin_tone": "👩🏻\u200d🚒",
+ "woman_firefighter_medium-dark_skin_tone": "👩🏾\u200d🚒",
+ "woman_firefighter_medium-light_skin_tone": "👩🏼\u200d🚒",
+ "woman_firefighter_medium_skin_tone": "👩🏽\u200d🚒",
+ "woman_frowning": "🙍\u200d♀️",
+ "woman_frowning_dark_skin_tone": "🙍🏿\u200d♀️",
+ "woman_frowning_light_skin_tone": "🙍🏻\u200d♀️",
+ "woman_frowning_medium-dark_skin_tone": "🙍🏾\u200d♀️",
+ "woman_frowning_medium-light_skin_tone": "🙍🏼\u200d♀️",
+ "woman_frowning_medium_skin_tone": "🙍🏽\u200d♀️",
+ "woman_genie": "🧞\u200d♀️",
+ "woman_gesturing_no": "🙅\u200d♀️",
+ "woman_gesturing_no_dark_skin_tone": "🙅🏿\u200d♀️",
+ "woman_gesturing_no_light_skin_tone": "🙅🏻\u200d♀️",
+ "woman_gesturing_no_medium-dark_skin_tone": "🙅🏾\u200d♀️",
+ "woman_gesturing_no_medium-light_skin_tone": "🙅🏼\u200d♀️",
+ "woman_gesturing_no_medium_skin_tone": "🙅🏽\u200d♀️",
+ "woman_gesturing_ok": "🙆\u200d♀️",
+ "woman_gesturing_ok_dark_skin_tone": "🙆🏿\u200d♀️",
+ "woman_gesturing_ok_light_skin_tone": "🙆🏻\u200d♀️",
+ "woman_gesturing_ok_medium-dark_skin_tone": "🙆🏾\u200d♀️",
+ "woman_gesturing_ok_medium-light_skin_tone": "🙆🏼\u200d♀️",
+ "woman_gesturing_ok_medium_skin_tone": "🙆🏽\u200d♀️",
+ "woman_getting_haircut": "💇\u200d♀️",
+ "woman_getting_haircut_dark_skin_tone": "💇🏿\u200d♀️",
+ "woman_getting_haircut_light_skin_tone": "💇🏻\u200d♀️",
+ "woman_getting_haircut_medium-dark_skin_tone": "💇🏾\u200d♀️",
+ "woman_getting_haircut_medium-light_skin_tone": "💇🏼\u200d♀️",
+ "woman_getting_haircut_medium_skin_tone": "💇🏽\u200d♀️",
+ "woman_getting_massage": "💆\u200d♀️",
+ "woman_getting_massage_dark_skin_tone": "💆🏿\u200d♀️",
+ "woman_getting_massage_light_skin_tone": "💆🏻\u200d♀️",
+ "woman_getting_massage_medium-dark_skin_tone": "💆🏾\u200d♀️",
+ "woman_getting_massage_medium-light_skin_tone": "💆🏼\u200d♀️",
+ "woman_getting_massage_medium_skin_tone": "💆🏽\u200d♀️",
+ "woman_golfing": "🏌️\u200d♀️",
+ "woman_golfing_dark_skin_tone": "🏌🏿\u200d♀️",
+ "woman_golfing_light_skin_tone": "🏌🏻\u200d♀️",
+ "woman_golfing_medium-dark_skin_tone": "🏌🏾\u200d♀️",
+ "woman_golfing_medium-light_skin_tone": "🏌🏼\u200d♀️",
+ "woman_golfing_medium_skin_tone": "🏌🏽\u200d♀️",
+ "woman_guard": "💂\u200d♀️",
+ "woman_guard_dark_skin_tone": "💂🏿\u200d♀️",
+ "woman_guard_light_skin_tone": "💂🏻\u200d♀️",
+ "woman_guard_medium-dark_skin_tone": "💂🏾\u200d♀️",
+ "woman_guard_medium-light_skin_tone": "💂🏼\u200d♀️",
+ "woman_guard_medium_skin_tone": "💂🏽\u200d♀️",
+ "woman_health_worker": "👩\u200d⚕️",
+ "woman_health_worker_dark_skin_tone": "👩🏿\u200d⚕️",
+ "woman_health_worker_light_skin_tone": "👩🏻\u200d⚕️",
+ "woman_health_worker_medium-dark_skin_tone": "👩🏾\u200d⚕️",
+ "woman_health_worker_medium-light_skin_tone": "👩🏼\u200d⚕️",
+ "woman_health_worker_medium_skin_tone": "👩🏽\u200d⚕️",
+ "woman_in_lotus_position": "🧘\u200d♀️",
+ "woman_in_lotus_position_dark_skin_tone": "🧘🏿\u200d♀️",
+ "woman_in_lotus_position_light_skin_tone": "🧘🏻\u200d♀️",
+ "woman_in_lotus_position_medium-dark_skin_tone": "🧘🏾\u200d♀️",
+ "woman_in_lotus_position_medium-light_skin_tone": "🧘🏼\u200d♀️",
+ "woman_in_lotus_position_medium_skin_tone": "🧘🏽\u200d♀️",
+ "woman_in_manual_wheelchair": "👩\u200d🦽",
+ "woman_in_motorized_wheelchair": "👩\u200d🦼",
+ "woman_in_steamy_room": "🧖\u200d♀️",
+ "woman_in_steamy_room_dark_skin_tone": "🧖🏿\u200d♀️",
+ "woman_in_steamy_room_light_skin_tone": "🧖🏻\u200d♀️",
+ "woman_in_steamy_room_medium-dark_skin_tone": "🧖🏾\u200d♀️",
+ "woman_in_steamy_room_medium-light_skin_tone": "🧖🏼\u200d♀️",
+ "woman_in_steamy_room_medium_skin_tone": "🧖🏽\u200d♀️",
+ "woman_judge": "👩\u200d⚖️",
+ "woman_judge_dark_skin_tone": "👩🏿\u200d⚖️",
+ "woman_judge_light_skin_tone": "👩🏻\u200d⚖️",
+ "woman_judge_medium-dark_skin_tone": "👩🏾\u200d⚖️",
+ "woman_judge_medium-light_skin_tone": "👩🏼\u200d⚖️",
+ "woman_judge_medium_skin_tone": "👩🏽\u200d⚖️",
+ "woman_juggling": "🤹\u200d♀️",
+ "woman_juggling_dark_skin_tone": "🤹🏿\u200d♀️",
+ "woman_juggling_light_skin_tone": "🤹🏻\u200d♀️",
+ "woman_juggling_medium-dark_skin_tone": "🤹🏾\u200d♀️",
+ "woman_juggling_medium-light_skin_tone": "🤹🏼\u200d♀️",
+ "woman_juggling_medium_skin_tone": "🤹🏽\u200d♀️",
+ "woman_lifting_weights": "🏋️\u200d♀️",
+ "woman_lifting_weights_dark_skin_tone": "🏋🏿\u200d♀️",
+ "woman_lifting_weights_light_skin_tone": "🏋🏻\u200d♀️",
+ "woman_lifting_weights_medium-dark_skin_tone": "🏋🏾\u200d♀️",
+ "woman_lifting_weights_medium-light_skin_tone": "🏋🏼\u200d♀️",
+ "woman_lifting_weights_medium_skin_tone": "🏋🏽\u200d♀️",
+ "woman_light_skin_tone": "👩🏻",
+ "woman_mage": "🧙\u200d♀️",
+ "woman_mage_dark_skin_tone": "🧙🏿\u200d♀️",
+ "woman_mage_light_skin_tone": "🧙🏻\u200d♀️",
+ "woman_mage_medium-dark_skin_tone": "🧙🏾\u200d♀️",
+ "woman_mage_medium-light_skin_tone": "🧙🏼\u200d♀️",
+ "woman_mage_medium_skin_tone": "🧙🏽\u200d♀️",
+ "woman_mechanic": "👩\u200d🔧",
+ "woman_mechanic_dark_skin_tone": "👩🏿\u200d🔧",
+ "woman_mechanic_light_skin_tone": "👩🏻\u200d🔧",
+ "woman_mechanic_medium-dark_skin_tone": "👩🏾\u200d🔧",
+ "woman_mechanic_medium-light_skin_tone": "👩🏼\u200d🔧",
+ "woman_mechanic_medium_skin_tone": "👩🏽\u200d🔧",
+ "woman_medium-dark_skin_tone": "👩🏾",
+ "woman_medium-light_skin_tone": "👩🏼",
+ "woman_medium_skin_tone": "👩🏽",
+ "woman_mountain_biking": "🚵\u200d♀️",
+ "woman_mountain_biking_dark_skin_tone": "🚵🏿\u200d♀️",
+ "woman_mountain_biking_light_skin_tone": "🚵🏻\u200d♀️",
+ "woman_mountain_biking_medium-dark_skin_tone": "🚵🏾\u200d♀️",
+ "woman_mountain_biking_medium-light_skin_tone": "🚵🏼\u200d♀️",
+ "woman_mountain_biking_medium_skin_tone": "🚵🏽\u200d♀️",
+ "woman_office_worker": "👩\u200d💼",
+ "woman_office_worker_dark_skin_tone": "👩🏿\u200d💼",
+ "woman_office_worker_light_skin_tone": "👩🏻\u200d💼",
+ "woman_office_worker_medium-dark_skin_tone": "👩🏾\u200d💼",
+ "woman_office_worker_medium-light_skin_tone": "👩🏼\u200d💼",
+ "woman_office_worker_medium_skin_tone": "👩🏽\u200d💼",
+ "woman_pilot": "👩\u200d✈️",
+ "woman_pilot_dark_skin_tone": "👩🏿\u200d✈️",
+ "woman_pilot_light_skin_tone": "👩🏻\u200d✈️",
+ "woman_pilot_medium-dark_skin_tone": "👩🏾\u200d✈️",
+ "woman_pilot_medium-light_skin_tone": "👩🏼\u200d✈️",
+ "woman_pilot_medium_skin_tone": "👩🏽\u200d✈️",
+ "woman_playing_handball": "🤾\u200d♀️",
+ "woman_playing_handball_dark_skin_tone": "🤾🏿\u200d♀️",
+ "woman_playing_handball_light_skin_tone": "🤾🏻\u200d♀️",
+ "woman_playing_handball_medium-dark_skin_tone": "🤾🏾\u200d♀️",
+ "woman_playing_handball_medium-light_skin_tone": "🤾🏼\u200d♀️",
+ "woman_playing_handball_medium_skin_tone": "🤾🏽\u200d♀️",
+ "woman_playing_water_polo": "🤽\u200d♀️",
+ "woman_playing_water_polo_dark_skin_tone": "🤽🏿\u200d♀️",
+ "woman_playing_water_polo_light_skin_tone": "🤽🏻\u200d♀️",
+ "woman_playing_water_polo_medium-dark_skin_tone": "🤽🏾\u200d♀️",
+ "woman_playing_water_polo_medium-light_skin_tone": "🤽🏼\u200d♀️",
+ "woman_playing_water_polo_medium_skin_tone": "🤽🏽\u200d♀️",
+ "woman_police_officer": "👮\u200d♀️",
+ "woman_police_officer_dark_skin_tone": "👮🏿\u200d♀️",
+ "woman_police_officer_light_skin_tone": "👮🏻\u200d♀️",
+ "woman_police_officer_medium-dark_skin_tone": "👮🏾\u200d♀️",
+ "woman_police_officer_medium-light_skin_tone": "👮🏼\u200d♀️",
+ "woman_police_officer_medium_skin_tone": "👮🏽\u200d♀️",
+ "woman_pouting": "🙎\u200d♀️",
+ "woman_pouting_dark_skin_tone": "🙎🏿\u200d♀️",
+ "woman_pouting_light_skin_tone": "🙎🏻\u200d♀️",
+ "woman_pouting_medium-dark_skin_tone": "🙎🏾\u200d♀️",
+ "woman_pouting_medium-light_skin_tone": "🙎🏼\u200d♀️",
+ "woman_pouting_medium_skin_tone": "🙎🏽\u200d♀️",
+ "woman_raising_hand": "🙋\u200d♀️",
+ "woman_raising_hand_dark_skin_tone": "🙋🏿\u200d♀️",
+ "woman_raising_hand_light_skin_tone": "🙋🏻\u200d♀️",
+ "woman_raising_hand_medium-dark_skin_tone": "🙋🏾\u200d♀️",
+ "woman_raising_hand_medium-light_skin_tone": "🙋🏼\u200d♀️",
+ "woman_raising_hand_medium_skin_tone": "🙋🏽\u200d♀️",
+ "woman_rowing_boat": "🚣\u200d♀️",
+ "woman_rowing_boat_dark_skin_tone": "🚣🏿\u200d♀️",
+ "woman_rowing_boat_light_skin_tone": "🚣🏻\u200d♀️",
+ "woman_rowing_boat_medium-dark_skin_tone": "🚣🏾\u200d♀️",
+ "woman_rowing_boat_medium-light_skin_tone": "🚣🏼\u200d♀️",
+ "woman_rowing_boat_medium_skin_tone": "🚣🏽\u200d♀️",
+ "woman_running": "🏃\u200d♀️",
+ "woman_running_dark_skin_tone": "🏃🏿\u200d♀️",
+ "woman_running_light_skin_tone": "🏃🏻\u200d♀️",
+ "woman_running_medium-dark_skin_tone": "🏃🏾\u200d♀️",
+ "woman_running_medium-light_skin_tone": "🏃🏼\u200d♀️",
+ "woman_running_medium_skin_tone": "🏃🏽\u200d♀️",
+ "woman_scientist": "👩\u200d🔬",
+ "woman_scientist_dark_skin_tone": "👩🏿\u200d🔬",
+ "woman_scientist_light_skin_tone": "👩🏻\u200d🔬",
+ "woman_scientist_medium-dark_skin_tone": "👩🏾\u200d🔬",
+ "woman_scientist_medium-light_skin_tone": "👩🏼\u200d🔬",
+ "woman_scientist_medium_skin_tone": "👩🏽\u200d🔬",
+ "woman_shrugging": "🤷\u200d♀️",
+ "woman_shrugging_dark_skin_tone": "🤷🏿\u200d♀️",
+ "woman_shrugging_light_skin_tone": "🤷🏻\u200d♀️",
+ "woman_shrugging_medium-dark_skin_tone": "🤷🏾\u200d♀️",
+ "woman_shrugging_medium-light_skin_tone": "🤷🏼\u200d♀️",
+ "woman_shrugging_medium_skin_tone": "🤷🏽\u200d♀️",
+ "woman_singer": "👩\u200d🎤",
+ "woman_singer_dark_skin_tone": "👩🏿\u200d🎤",
+ "woman_singer_light_skin_tone": "👩🏻\u200d🎤",
+ "woman_singer_medium-dark_skin_tone": "👩🏾\u200d🎤",
+ "woman_singer_medium-light_skin_tone": "👩🏼\u200d🎤",
+ "woman_singer_medium_skin_tone": "👩🏽\u200d🎤",
+ "woman_student": "👩\u200d🎓",
+ "woman_student_dark_skin_tone": "👩🏿\u200d🎓",
+ "woman_student_light_skin_tone": "👩🏻\u200d🎓",
+ "woman_student_medium-dark_skin_tone": "👩🏾\u200d🎓",
+ "woman_student_medium-light_skin_tone": "👩🏼\u200d🎓",
+ "woman_student_medium_skin_tone": "👩🏽\u200d🎓",
+ "woman_surfing": "🏄\u200d♀️",
+ "woman_surfing_dark_skin_tone": "🏄🏿\u200d♀️",
+ "woman_surfing_light_skin_tone": "🏄🏻\u200d♀️",
+ "woman_surfing_medium-dark_skin_tone": "🏄🏾\u200d♀️",
+ "woman_surfing_medium-light_skin_tone": "🏄🏼\u200d♀️",
+ "woman_surfing_medium_skin_tone": "🏄🏽\u200d♀️",
+ "woman_swimming": "🏊\u200d♀️",
+ "woman_swimming_dark_skin_tone": "🏊🏿\u200d♀️",
+ "woman_swimming_light_skin_tone": "🏊🏻\u200d♀️",
+ "woman_swimming_medium-dark_skin_tone": "🏊🏾\u200d♀️",
+ "woman_swimming_medium-light_skin_tone": "🏊🏼\u200d♀️",
+ "woman_swimming_medium_skin_tone": "🏊🏽\u200d♀️",
+ "woman_teacher": "👩\u200d🏫",
+ "woman_teacher_dark_skin_tone": "👩🏿\u200d🏫",
+ "woman_teacher_light_skin_tone": "👩🏻\u200d🏫",
+ "woman_teacher_medium-dark_skin_tone": "👩🏾\u200d🏫",
+ "woman_teacher_medium-light_skin_tone": "👩🏼\u200d🏫",
+ "woman_teacher_medium_skin_tone": "👩🏽\u200d🏫",
+ "woman_technologist": "👩\u200d💻",
+ "woman_technologist_dark_skin_tone": "👩🏿\u200d💻",
+ "woman_technologist_light_skin_tone": "👩🏻\u200d💻",
+ "woman_technologist_medium-dark_skin_tone": "👩🏾\u200d💻",
+ "woman_technologist_medium-light_skin_tone": "👩🏼\u200d💻",
+ "woman_technologist_medium_skin_tone": "👩🏽\u200d💻",
+ "woman_tipping_hand": "💁\u200d♀️",
+ "woman_tipping_hand_dark_skin_tone": "💁🏿\u200d♀️",
+ "woman_tipping_hand_light_skin_tone": "💁🏻\u200d♀️",
+ "woman_tipping_hand_medium-dark_skin_tone": "💁🏾\u200d♀️",
+ "woman_tipping_hand_medium-light_skin_tone": "💁🏼\u200d♀️",
+ "woman_tipping_hand_medium_skin_tone": "💁🏽\u200d♀️",
+ "woman_vampire": "🧛\u200d♀️",
+ "woman_vampire_dark_skin_tone": "🧛🏿\u200d♀️",
+ "woman_vampire_light_skin_tone": "🧛🏻\u200d♀️",
+ "woman_vampire_medium-dark_skin_tone": "🧛🏾\u200d♀️",
+ "woman_vampire_medium-light_skin_tone": "🧛🏼\u200d♀️",
+ "woman_vampire_medium_skin_tone": "🧛🏽\u200d♀️",
+ "woman_walking": "🚶\u200d♀️",
+ "woman_walking_dark_skin_tone": "🚶🏿\u200d♀️",
+ "woman_walking_light_skin_tone": "🚶🏻\u200d♀️",
+ "woman_walking_medium-dark_skin_tone": "🚶🏾\u200d♀️",
+ "woman_walking_medium-light_skin_tone": "🚶🏼\u200d♀️",
+ "woman_walking_medium_skin_tone": "🚶🏽\u200d♀️",
+ "woman_wearing_turban": "👳\u200d♀️",
+ "woman_wearing_turban_dark_skin_tone": "👳🏿\u200d♀️",
+ "woman_wearing_turban_light_skin_tone": "👳🏻\u200d♀️",
+ "woman_wearing_turban_medium-dark_skin_tone": "👳🏾\u200d♀️",
+ "woman_wearing_turban_medium-light_skin_tone": "👳🏼\u200d♀️",
+ "woman_wearing_turban_medium_skin_tone": "👳🏽\u200d♀️",
+ "woman_with_headscarf": "🧕",
+ "woman_with_headscarf_dark_skin_tone": "🧕🏿",
+ "woman_with_headscarf_light_skin_tone": "🧕🏻",
+ "woman_with_headscarf_medium-dark_skin_tone": "🧕🏾",
+ "woman_with_headscarf_medium-light_skin_tone": "🧕🏼",
+ "woman_with_headscarf_medium_skin_tone": "🧕🏽",
+ "woman_with_probing_cane": "👩\u200d🦯",
+ "woman_zombie": "🧟\u200d♀️",
+ "woman’s_boot": "👢",
+ "woman’s_clothes": "👚",
+ "woman’s_hat": "👒",
+ "woman’s_sandal": "👡",
+ "women_with_bunny_ears": "👯\u200d♀️",
+ "women_wrestling": "🤼\u200d♀️",
+ "women’s_room": "🚺",
+ "woozy_face": "🥴",
+ "world_map": "🗺",
+ "worried_face": "😟",
+ "wrapped_gift": "🎁",
+ "wrench": "🔧",
+ "writing_hand": "✍",
+ "writing_hand_dark_skin_tone": "✍🏿",
+ "writing_hand_light_skin_tone": "✍🏻",
+ "writing_hand_medium-dark_skin_tone": "✍🏾",
+ "writing_hand_medium-light_skin_tone": "✍🏼",
+ "writing_hand_medium_skin_tone": "✍🏽",
+ "yarn": "🧶",
+ "yawning_face": "🥱",
+ "yellow_circle": "🟡",
+ "yellow_heart": "💛",
+ "yellow_square": "🟨",
+ "yen_banknote": "💴",
+ "yo-yo": "🪀",
+ "yin_yang": "☯",
+ "zany_face": "🤪",
+ "zebra": "🦓",
+ "zipper-mouth_face": "🤐",
+ "zombie": "🧟",
+ "zzz": "💤",
+ "åland_islands": "🇦🇽",
+ "keycap_asterisk": "*⃣",
+ "keycap_digit_eight": "8⃣",
+ "keycap_digit_five": "5⃣",
+ "keycap_digit_four": "4⃣",
+ "keycap_digit_nine": "9⃣",
+ "keycap_digit_one": "1⃣",
+ "keycap_digit_seven": "7⃣",
+ "keycap_digit_six": "6⃣",
+ "keycap_digit_three": "3⃣",
+ "keycap_digit_two": "2⃣",
+ "keycap_digit_zero": "0⃣",
+ "keycap_number_sign": "#⃣",
+ "light_skin_tone": "🏻",
+ "medium_light_skin_tone": "🏼",
+ "medium_skin_tone": "🏽",
+ "medium_dark_skin_tone": "🏾",
+ "dark_skin_tone": "🏿",
+ "regional_indicator_symbol_letter_a": "🇦",
+ "regional_indicator_symbol_letter_b": "🇧",
+ "regional_indicator_symbol_letter_c": "🇨",
+ "regional_indicator_symbol_letter_d": "🇩",
+ "regional_indicator_symbol_letter_e": "🇪",
+ "regional_indicator_symbol_letter_f": "🇫",
+ "regional_indicator_symbol_letter_g": "🇬",
+ "regional_indicator_symbol_letter_h": "🇭",
+ "regional_indicator_symbol_letter_i": "🇮",
+ "regional_indicator_symbol_letter_j": "🇯",
+ "regional_indicator_symbol_letter_k": "🇰",
+ "regional_indicator_symbol_letter_l": "🇱",
+ "regional_indicator_symbol_letter_m": "🇲",
+ "regional_indicator_symbol_letter_n": "🇳",
+ "regional_indicator_symbol_letter_o": "🇴",
+ "regional_indicator_symbol_letter_p": "🇵",
+ "regional_indicator_symbol_letter_q": "🇶",
+ "regional_indicator_symbol_letter_r": "🇷",
+ "regional_indicator_symbol_letter_s": "🇸",
+ "regional_indicator_symbol_letter_t": "🇹",
+ "regional_indicator_symbol_letter_u": "🇺",
+ "regional_indicator_symbol_letter_v": "🇻",
+ "regional_indicator_symbol_letter_w": "🇼",
+ "regional_indicator_symbol_letter_x": "🇽",
+ "regional_indicator_symbol_letter_y": "🇾",
+ "regional_indicator_symbol_letter_z": "🇿",
+ "airplane_arriving": "🛬",
+ "space_invader": "👾",
+ "football": "🏈",
+ "anger": "💢",
+ "angry": "😠",
+ "anguished": "😧",
+ "signal_strength": "📶",
+ "arrows_counterclockwise": "🔄",
+ "arrow_heading_down": "⤵",
+ "arrow_heading_up": "⤴",
+ "art": "🎨",
+ "astonished": "😲",
+ "athletic_shoe": "👟",
+ "atm": "🏧",
+ "car": "🚗",
+ "red_car": "🚗",
+ "angel": "👼",
+ "back": "🔙",
+ "badminton_racquet_and_shuttlecock": "🏸",
+ "dollar": "💵",
+ "euro": "💶",
+ "pound": "💷",
+ "yen": "💴",
+ "barber": "💈",
+ "bath": "🛀",
+ "bear": "🐻",
+ "heartbeat": "💓",
+ "beer": "🍺",
+ "no_bell": "🔕",
+ "bento": "🍱",
+ "bike": "🚲",
+ "bicyclist": "🚴",
+ "8ball": "🎱",
+ "biohazard_sign": "☣",
+ "birthday": "🎂",
+ "black_circle_for_record": "⏺",
+ "clubs": "♣",
+ "diamonds": "♦",
+ "arrow_double_down": "⏬",
+ "hearts": "♥",
+ "rewind": "⏪",
+ "black_left__pointing_double_triangle_with_vertical_bar": "⏮",
+ "arrow_backward": "◀",
+ "black_medium_small_square": "◾",
+ "question": "❓",
+ "fast_forward": "⏩",
+ "black_right__pointing_double_triangle_with_vertical_bar": "⏭",
+ "arrow_forward": "▶",
+ "black_right__pointing_triangle_with_double_vertical_bar": "⏯",
+ "arrow_right": "➡",
+ "spades": "♠",
+ "black_square_for_stop": "⏹",
+ "sunny": "☀",
+ "phone": "☎",
+ "recycle": "♻",
+ "arrow_double_up": "⏫",
+ "busstop": "🚏",
+ "date": "📅",
+ "flags": "🎏",
+ "cat2": "🐈",
+ "joy_cat": "😹",
+ "smirk_cat": "😼",
+ "chart_with_downwards_trend": "📉",
+ "chart_with_upwards_trend": "📈",
+ "chart": "💹",
+ "mega": "📣",
+ "checkered_flag": "🏁",
+ "accept": "🉑",
+ "ideograph_advantage": "🉐",
+ "congratulations": "㊗",
+ "secret": "㊙",
+ "m": "Ⓜ",
+ "city_sunset": "🌆",
+ "clapper": "🎬",
+ "clap": "👏",
+ "beers": "🍻",
+ "clock830": "🕣",
+ "clock8": "🕗",
+ "clock1130": "🕦",
+ "clock11": "🕚",
+ "clock530": "🕠",
+ "clock5": "🕔",
+ "clock430": "🕟",
+ "clock4": "🕓",
+ "clock930": "🕤",
+ "clock9": "🕘",
+ "clock130": "🕜",
+ "clock1": "🕐",
+ "clock730": "🕢",
+ "clock7": "🕖",
+ "clock630": "🕡",
+ "clock6": "🕕",
+ "clock1030": "🕥",
+ "clock10": "🕙",
+ "clock330": "🕞",
+ "clock3": "🕒",
+ "clock1230": "🕧",
+ "clock12": "🕛",
+ "clock230": "🕝",
+ "clock2": "🕑",
+ "arrows_clockwise": "🔃",
+ "repeat": "🔁",
+ "repeat_one": "🔂",
+ "closed_lock_with_key": "🔐",
+ "mailbox_closed": "📪",
+ "mailbox": "📫",
+ "cloud_with_tornado": "🌪",
+ "cocktail": "🍸",
+ "boom": "💥",
+ "compression": "🗜",
+ "confounded": "😖",
+ "confused": "😕",
+ "rice": "🍚",
+ "cow2": "🐄",
+ "cricket_bat_and_ball": "🏏",
+ "x": "❌",
+ "cry": "😢",
+ "curry": "🍛",
+ "dagger_knife": "🗡",
+ "dancer": "💃",
+ "dark_sunglasses": "🕶",
+ "dash": "💨",
+ "truck": "🚚",
+ "derelict_house_building": "🏚",
+ "diamond_shape_with_a_dot_inside": "💠",
+ "dart": "🎯",
+ "disappointed_relieved": "😥",
+ "disappointed": "😞",
+ "do_not_litter": "🚯",
+ "dog2": "🐕",
+ "flipper": "🐬",
+ "loop": "➿",
+ "bangbang": "‼",
+ "double_vertical_bar": "⏸",
+ "dove_of_peace": "🕊",
+ "small_red_triangle_down": "🔻",
+ "arrow_down_small": "🔽",
+ "arrow_down": "⬇",
+ "dromedary_camel": "🐪",
+ "e__mail": "📧",
+ "corn": "🌽",
+ "ear_of_rice": "🌾",
+ "earth_americas": "🌎",
+ "earth_asia": "🌏",
+ "earth_africa": "🌍",
+ "eight_pointed_black_star": "✴",
+ "eight_spoked_asterisk": "✳",
+ "eject_symbol": "⏏",
+ "bulb": "💡",
+ "emoji_modifier_fitzpatrick_type__1__2": "🏻",
+ "emoji_modifier_fitzpatrick_type__3": "🏼",
+ "emoji_modifier_fitzpatrick_type__4": "🏽",
+ "emoji_modifier_fitzpatrick_type__5": "🏾",
+ "emoji_modifier_fitzpatrick_type__6": "🏿",
+ "end": "🔚",
+ "email": "✉",
+ "european_castle": "🏰",
+ "european_post_office": "🏤",
+ "interrobang": "⁉",
+ "expressionless": "😑",
+ "eyeglasses": "👓",
+ "massage": "💆",
+ "yum": "😋",
+ "scream": "😱",
+ "kissing_heart": "😘",
+ "sweat": "😓",
+ "face_with_head__bandage": "🤕",
+ "triumph": "😤",
+ "mask": "😷",
+ "no_good": "🙅",
+ "ok_woman": "🙆",
+ "open_mouth": "😮",
+ "cold_sweat": "😰",
+ "stuck_out_tongue": "😛",
+ "stuck_out_tongue_closed_eyes": "😝",
+ "stuck_out_tongue_winking_eye": "😜",
+ "joy": "😂",
+ "no_mouth": "😶",
+ "santa": "🎅",
+ "fax": "📠",
+ "fearful": "😨",
+ "field_hockey_stick_and_ball": "🏑",
+ "first_quarter_moon_with_face": "🌛",
+ "fish_cake": "🍥",
+ "fishing_pole_and_fish": "🎣",
+ "facepunch": "👊",
+ "punch": "👊",
+ "flag_for_afghanistan": "🇦🇫",
+ "flag_for_albania": "🇦🇱",
+ "flag_for_algeria": "🇩🇿",
+ "flag_for_american_samoa": "🇦🇸",
+ "flag_for_andorra": "🇦🇩",
+ "flag_for_angola": "🇦🇴",
+ "flag_for_anguilla": "🇦🇮",
+ "flag_for_antarctica": "🇦🇶",
+ "flag_for_antigua_&_barbuda": "🇦🇬",
+ "flag_for_argentina": "🇦🇷",
+ "flag_for_armenia": "🇦🇲",
+ "flag_for_aruba": "🇦🇼",
+ "flag_for_ascension_island": "🇦🇨",
+ "flag_for_australia": "🇦🇺",
+ "flag_for_austria": "🇦🇹",
+ "flag_for_azerbaijan": "🇦🇿",
+ "flag_for_bahamas": "🇧🇸",
+ "flag_for_bahrain": "🇧🇭",
+ "flag_for_bangladesh": "🇧🇩",
+ "flag_for_barbados": "🇧🇧",
+ "flag_for_belarus": "🇧🇾",
+ "flag_for_belgium": "🇧🇪",
+ "flag_for_belize": "🇧🇿",
+ "flag_for_benin": "🇧🇯",
+ "flag_for_bermuda": "🇧🇲",
+ "flag_for_bhutan": "🇧🇹",
+ "flag_for_bolivia": "🇧🇴",
+ "flag_for_bosnia_&_herzegovina": "🇧🇦",
+ "flag_for_botswana": "🇧🇼",
+ "flag_for_bouvet_island": "🇧🇻",
+ "flag_for_brazil": "🇧🇷",
+ "flag_for_british_indian_ocean_territory": "🇮🇴",
+ "flag_for_british_virgin_islands": "🇻🇬",
+ "flag_for_brunei": "🇧🇳",
+ "flag_for_bulgaria": "🇧🇬",
+ "flag_for_burkina_faso": "🇧🇫",
+ "flag_for_burundi": "🇧🇮",
+ "flag_for_cambodia": "🇰🇭",
+ "flag_for_cameroon": "🇨🇲",
+ "flag_for_canada": "🇨🇦",
+ "flag_for_canary_islands": "🇮🇨",
+ "flag_for_cape_verde": "🇨🇻",
+ "flag_for_caribbean_netherlands": "🇧🇶",
+ "flag_for_cayman_islands": "🇰🇾",
+ "flag_for_central_african_republic": "🇨🇫",
+ "flag_for_ceuta_&_melilla": "🇪🇦",
+ "flag_for_chad": "🇹🇩",
+ "flag_for_chile": "🇨🇱",
+ "flag_for_china": "🇨🇳",
+ "flag_for_christmas_island": "🇨🇽",
+ "flag_for_clipperton_island": "🇨🇵",
+ "flag_for_cocos__islands": "🇨🇨",
+ "flag_for_colombia": "🇨🇴",
+ "flag_for_comoros": "🇰🇲",
+ "flag_for_congo____brazzaville": "🇨🇬",
+ "flag_for_congo____kinshasa": "🇨🇩",
+ "flag_for_cook_islands": "🇨🇰",
+ "flag_for_costa_rica": "🇨🇷",
+ "flag_for_croatia": "🇭🇷",
+ "flag_for_cuba": "🇨🇺",
+ "flag_for_curaçao": "🇨🇼",
+ "flag_for_cyprus": "🇨🇾",
+ "flag_for_czech_republic": "🇨🇿",
+ "flag_for_côte_d’ivoire": "🇨🇮",
+ "flag_for_denmark": "🇩🇰",
+ "flag_for_diego_garcia": "🇩🇬",
+ "flag_for_djibouti": "🇩🇯",
+ "flag_for_dominica": "🇩🇲",
+ "flag_for_dominican_republic": "🇩🇴",
+ "flag_for_ecuador": "🇪🇨",
+ "flag_for_egypt": "🇪🇬",
+ "flag_for_el_salvador": "🇸🇻",
+ "flag_for_equatorial_guinea": "🇬🇶",
+ "flag_for_eritrea": "🇪🇷",
+ "flag_for_estonia": "🇪🇪",
+ "flag_for_ethiopia": "🇪🇹",
+ "flag_for_european_union": "🇪🇺",
+ "flag_for_falkland_islands": "🇫🇰",
+ "flag_for_faroe_islands": "🇫🇴",
+ "flag_for_fiji": "🇫🇯",
+ "flag_for_finland": "🇫🇮",
+ "flag_for_france": "🇫🇷",
+ "flag_for_french_guiana": "🇬🇫",
+ "flag_for_french_polynesia": "🇵🇫",
+ "flag_for_french_southern_territories": "🇹🇫",
+ "flag_for_gabon": "🇬🇦",
+ "flag_for_gambia": "🇬🇲",
+ "flag_for_georgia": "🇬🇪",
+ "flag_for_germany": "🇩🇪",
+ "flag_for_ghana": "🇬🇭",
+ "flag_for_gibraltar": "🇬🇮",
+ "flag_for_greece": "🇬🇷",
+ "flag_for_greenland": "🇬🇱",
+ "flag_for_grenada": "🇬🇩",
+ "flag_for_guadeloupe": "🇬🇵",
+ "flag_for_guam": "🇬🇺",
+ "flag_for_guatemala": "🇬🇹",
+ "flag_for_guernsey": "🇬🇬",
+ "flag_for_guinea": "🇬🇳",
+ "flag_for_guinea__bissau": "🇬🇼",
+ "flag_for_guyana": "🇬🇾",
+ "flag_for_haiti": "🇭🇹",
+ "flag_for_heard_&_mcdonald_islands": "🇭🇲",
+ "flag_for_honduras": "🇭🇳",
+ "flag_for_hong_kong": "🇭🇰",
+ "flag_for_hungary": "🇭🇺",
+ "flag_for_iceland": "🇮🇸",
+ "flag_for_india": "🇮🇳",
+ "flag_for_indonesia": "🇮🇩",
+ "flag_for_iran": "🇮🇷",
+ "flag_for_iraq": "🇮🇶",
+ "flag_for_ireland": "🇮🇪",
+ "flag_for_isle_of_man": "🇮🇲",
+ "flag_for_israel": "🇮🇱",
+ "flag_for_italy": "🇮🇹",
+ "flag_for_jamaica": "🇯🇲",
+ "flag_for_japan": "🇯🇵",
+ "flag_for_jersey": "🇯🇪",
+ "flag_for_jordan": "🇯🇴",
+ "flag_for_kazakhstan": "🇰🇿",
+ "flag_for_kenya": "🇰🇪",
+ "flag_for_kiribati": "🇰🇮",
+ "flag_for_kosovo": "🇽🇰",
+ "flag_for_kuwait": "🇰🇼",
+ "flag_for_kyrgyzstan": "🇰🇬",
+ "flag_for_laos": "🇱🇦",
+ "flag_for_latvia": "🇱🇻",
+ "flag_for_lebanon": "🇱🇧",
+ "flag_for_lesotho": "🇱🇸",
+ "flag_for_liberia": "🇱🇷",
+ "flag_for_libya": "🇱🇾",
+ "flag_for_liechtenstein": "🇱🇮",
+ "flag_for_lithuania": "🇱🇹",
+ "flag_for_luxembourg": "🇱🇺",
+ "flag_for_macau": "🇲🇴",
+ "flag_for_macedonia": "🇲🇰",
+ "flag_for_madagascar": "🇲🇬",
+ "flag_for_malawi": "🇲🇼",
+ "flag_for_malaysia": "🇲🇾",
+ "flag_for_maldives": "🇲🇻",
+ "flag_for_mali": "🇲🇱",
+ "flag_for_malta": "🇲🇹",
+ "flag_for_marshall_islands": "🇲🇭",
+ "flag_for_martinique": "🇲🇶",
+ "flag_for_mauritania": "🇲🇷",
+ "flag_for_mauritius": "🇲🇺",
+ "flag_for_mayotte": "🇾🇹",
+ "flag_for_mexico": "🇲🇽",
+ "flag_for_micronesia": "🇫🇲",
+ "flag_for_moldova": "🇲🇩",
+ "flag_for_monaco": "🇲🇨",
+ "flag_for_mongolia": "🇲🇳",
+ "flag_for_montenegro": "🇲🇪",
+ "flag_for_montserrat": "🇲🇸",
+ "flag_for_morocco": "🇲🇦",
+ "flag_for_mozambique": "🇲🇿",
+ "flag_for_myanmar": "🇲🇲",
+ "flag_for_namibia": "🇳🇦",
+ "flag_for_nauru": "🇳🇷",
+ "flag_for_nepal": "🇳🇵",
+ "flag_for_netherlands": "🇳🇱",
+ "flag_for_new_caledonia": "🇳🇨",
+ "flag_for_new_zealand": "🇳🇿",
+ "flag_for_nicaragua": "🇳🇮",
+ "flag_for_niger": "🇳🇪",
+ "flag_for_nigeria": "🇳🇬",
+ "flag_for_niue": "🇳🇺",
+ "flag_for_norfolk_island": "🇳🇫",
+ "flag_for_north_korea": "🇰🇵",
+ "flag_for_northern_mariana_islands": "🇲🇵",
+ "flag_for_norway": "🇳🇴",
+ "flag_for_oman": "🇴🇲",
+ "flag_for_pakistan": "🇵🇰",
+ "flag_for_palau": "🇵🇼",
+ "flag_for_palestinian_territories": "🇵🇸",
+ "flag_for_panama": "🇵🇦",
+ "flag_for_papua_new_guinea": "🇵🇬",
+ "flag_for_paraguay": "🇵🇾",
+ "flag_for_peru": "🇵🇪",
+ "flag_for_philippines": "🇵🇭",
+ "flag_for_pitcairn_islands": "🇵🇳",
+ "flag_for_poland": "🇵🇱",
+ "flag_for_portugal": "🇵🇹",
+ "flag_for_puerto_rico": "🇵🇷",
+ "flag_for_qatar": "🇶🇦",
+ "flag_for_romania": "🇷🇴",
+ "flag_for_russia": "🇷🇺",
+ "flag_for_rwanda": "🇷🇼",
+ "flag_for_réunion": "🇷🇪",
+ "flag_for_samoa": "🇼🇸",
+ "flag_for_san_marino": "🇸🇲",
+ "flag_for_saudi_arabia": "🇸🇦",
+ "flag_for_senegal": "🇸🇳",
+ "flag_for_serbia": "🇷🇸",
+ "flag_for_seychelles": "🇸🇨",
+ "flag_for_sierra_leone": "🇸🇱",
+ "flag_for_singapore": "🇸🇬",
+ "flag_for_sint_maarten": "🇸🇽",
+ "flag_for_slovakia": "🇸🇰",
+ "flag_for_slovenia": "🇸🇮",
+ "flag_for_solomon_islands": "🇸🇧",
+ "flag_for_somalia": "🇸🇴",
+ "flag_for_south_africa": "🇿🇦",
+ "flag_for_south_georgia_&_south_sandwich_islands": "🇬🇸",
+ "flag_for_south_korea": "🇰🇷",
+ "flag_for_south_sudan": "🇸🇸",
+ "flag_for_spain": "🇪🇸",
+ "flag_for_sri_lanka": "🇱🇰",
+ "flag_for_st._barthélemy": "🇧🇱",
+ "flag_for_st._helena": "🇸🇭",
+ "flag_for_st._kitts_&_nevis": "🇰🇳",
+ "flag_for_st._lucia": "🇱🇨",
+ "flag_for_st._martin": "🇲🇫",
+ "flag_for_st._pierre_&_miquelon": "🇵🇲",
+ "flag_for_st._vincent_&_grenadines": "🇻🇨",
+ "flag_for_sudan": "🇸🇩",
+ "flag_for_suriname": "🇸🇷",
+ "flag_for_svalbard_&_jan_mayen": "🇸🇯",
+ "flag_for_swaziland": "🇸🇿",
+ "flag_for_sweden": "🇸🇪",
+ "flag_for_switzerland": "🇨🇭",
+ "flag_for_syria": "🇸🇾",
+ "flag_for_são_tomé_&_príncipe": "🇸🇹",
+ "flag_for_taiwan": "🇹🇼",
+ "flag_for_tajikistan": "🇹🇯",
+ "flag_for_tanzania": "🇹🇿",
+ "flag_for_thailand": "🇹🇭",
+ "flag_for_timor__leste": "🇹🇱",
+ "flag_for_togo": "🇹🇬",
+ "flag_for_tokelau": "🇹🇰",
+ "flag_for_tonga": "🇹🇴",
+ "flag_for_trinidad_&_tobago": "🇹🇹",
+ "flag_for_tristan_da_cunha": "🇹🇦",
+ "flag_for_tunisia": "🇹🇳",
+ "flag_for_turkey": "🇹🇷",
+ "flag_for_turkmenistan": "🇹🇲",
+ "flag_for_turks_&_caicos_islands": "🇹🇨",
+ "flag_for_tuvalu": "🇹🇻",
+ "flag_for_u.s._outlying_islands": "🇺🇲",
+ "flag_for_u.s._virgin_islands": "🇻🇮",
+ "flag_for_uganda": "🇺🇬",
+ "flag_for_ukraine": "🇺🇦",
+ "flag_for_united_arab_emirates": "🇦🇪",
+ "flag_for_united_kingdom": "🇬🇧",
+ "flag_for_united_states": "🇺🇸",
+ "flag_for_uruguay": "🇺🇾",
+ "flag_for_uzbekistan": "🇺🇿",
+ "flag_for_vanuatu": "🇻🇺",
+ "flag_for_vatican_city": "🇻🇦",
+ "flag_for_venezuela": "🇻🇪",
+ "flag_for_vietnam": "🇻🇳",
+ "flag_for_wallis_&_futuna": "🇼🇫",
+ "flag_for_western_sahara": "🇪🇭",
+ "flag_for_yemen": "🇾🇪",
+ "flag_for_zambia": "🇿🇲",
+ "flag_for_zimbabwe": "🇿🇼",
+ "flag_for_åland_islands": "🇦🇽",
+ "golf": "⛳",
+ "fleur__de__lis": "⚜",
+ "muscle": "💪",
+ "flushed": "😳",
+ "frame_with_picture": "🖼",
+ "fries": "🍟",
+ "frog": "🐸",
+ "hatched_chick": "🐥",
+ "frowning": "😦",
+ "fuelpump": "⛽",
+ "full_moon_with_face": "🌝",
+ "gem": "💎",
+ "star2": "🌟",
+ "golfer": "🏌",
+ "mortar_board": "🎓",
+ "grimacing": "😬",
+ "smile_cat": "😸",
+ "grinning": "😀",
+ "grin": "😁",
+ "heartpulse": "💗",
+ "guardsman": "💂",
+ "haircut": "💇",
+ "hamster": "🐹",
+ "raising_hand": "🙋",
+ "headphones": "🎧",
+ "hear_no_evil": "🙉",
+ "cupid": "💘",
+ "gift_heart": "💝",
+ "heart": "❤",
+ "exclamation": "❗",
+ "heavy_exclamation_mark": "❗",
+ "heavy_heart_exclamation_mark_ornament": "❣",
+ "o": "⭕",
+ "helm_symbol": "⎈",
+ "helmet_with_white_cross": "⛑",
+ "high_heel": "👠",
+ "bullettrain_side": "🚄",
+ "bullettrain_front": "🚅",
+ "high_brightness": "🔆",
+ "zap": "⚡",
+ "hocho": "🔪",
+ "knife": "🔪",
+ "bee": "🐝",
+ "traffic_light": "🚥",
+ "racehorse": "🐎",
+ "coffee": "☕",
+ "hotsprings": "♨",
+ "hourglass": "⌛",
+ "hourglass_flowing_sand": "⏳",
+ "house_buildings": "🏘",
+ "100": "💯",
+ "hushed": "😯",
+ "ice_hockey_stick_and_puck": "🏒",
+ "imp": "👿",
+ "information_desk_person": "💁",
+ "information_source": "ℹ",
+ "capital_abcd": "🔠",
+ "abc": "🔤",
+ "abcd": "🔡",
+ "1234": "🔢",
+ "symbols": "🔣",
+ "izakaya_lantern": "🏮",
+ "lantern": "🏮",
+ "jack_o_lantern": "🎃",
+ "dolls": "🎎",
+ "japanese_goblin": "👺",
+ "japanese_ogre": "👹",
+ "beginner": "🔰",
+ "zero": "0️⃣",
+ "one": "1️⃣",
+ "ten": "🔟",
+ "two": "2️⃣",
+ "three": "3️⃣",
+ "four": "4️⃣",
+ "five": "5️⃣",
+ "six": "6️⃣",
+ "seven": "7️⃣",
+ "eight": "8️⃣",
+ "nine": "9️⃣",
+ "couplekiss": "💏",
+ "kissing_cat": "😽",
+ "kissing": "😗",
+ "kissing_closed_eyes": "😚",
+ "kissing_smiling_eyes": "😙",
+ "beetle": "🐞",
+ "large_blue_circle": "🔵",
+ "last_quarter_moon_with_face": "🌜",
+ "leaves": "🍃",
+ "mag": "🔍",
+ "left_right_arrow": "↔",
+ "leftwards_arrow_with_hook": "↩",
+ "arrow_left": "⬅",
+ "lock": "🔒",
+ "lock_with_ink_pen": "🔏",
+ "sob": "😭",
+ "low_brightness": "🔅",
+ "lower_left_ballpoint_pen": "🖊",
+ "lower_left_crayon": "🖍",
+ "lower_left_fountain_pen": "🖋",
+ "lower_left_paintbrush": "🖌",
+ "mahjong": "🀄",
+ "couple": "👫",
+ "man_in_business_suit_levitating": "🕴",
+ "man_with_gua_pi_mao": "👲",
+ "man_with_turban": "👳",
+ "mans_shoe": "👞",
+ "shoe": "👞",
+ "menorah_with_nine_branches": "🕎",
+ "mens": "🚹",
+ "minidisc": "💽",
+ "iphone": "📱",
+ "calling": "📲",
+ "money__mouth_face": "🤑",
+ "moneybag": "💰",
+ "rice_scene": "🎑",
+ "mountain_bicyclist": "🚵",
+ "mouse2": "🐁",
+ "lips": "👄",
+ "moyai": "🗿",
+ "notes": "🎶",
+ "nail_care": "💅",
+ "ab": "🆎",
+ "negative_squared_cross_mark": "❎",
+ "a": "🅰",
+ "b": "🅱",
+ "o2": "🅾",
+ "parking": "🅿",
+ "new_moon_with_face": "🌚",
+ "no_entry_sign": "🚫",
+ "underage": "🔞",
+ "non__potable_water": "🚱",
+ "arrow_upper_right": "↗",
+ "arrow_upper_left": "↖",
+ "office": "🏢",
+ "older_man": "👴",
+ "older_woman": "👵",
+ "om_symbol": "🕉",
+ "on": "🔛",
+ "book": "📖",
+ "unlock": "🔓",
+ "mailbox_with_no_mail": "📭",
+ "mailbox_with_mail": "📬",
+ "cd": "💿",
+ "tada": "🎉",
+ "feet": "🐾",
+ "walking": "🚶",
+ "pencil2": "✏",
+ "pensive": "😔",
+ "persevere": "😣",
+ "bow": "🙇",
+ "raised_hands": "🙌",
+ "person_with_ball": "⛹",
+ "person_with_blond_hair": "👱",
+ "pray": "🙏",
+ "person_with_pouting_face": "🙎",
+ "computer": "💻",
+ "pig2": "🐖",
+ "hankey": "💩",
+ "poop": "💩",
+ "shit": "💩",
+ "bamboo": "🎍",
+ "gun": "🔫",
+ "black_joker": "🃏",
+ "rotating_light": "🚨",
+ "cop": "👮",
+ "stew": "🍲",
+ "pouch": "👝",
+ "pouting_cat": "😾",
+ "rage": "😡",
+ "put_litter_in_its_place": "🚮",
+ "rabbit2": "🐇",
+ "racing_motorcycle": "🏍",
+ "radioactive_sign": "☢",
+ "fist": "✊",
+ "hand": "✋",
+ "raised_hand_with_fingers_splayed": "🖐",
+ "raised_hand_with_part_between_middle_and_ring_fingers": "🖖",
+ "blue_car": "🚙",
+ "apple": "🍎",
+ "relieved": "😌",
+ "reversed_hand_with_middle_finger_extended": "🖕",
+ "mag_right": "🔎",
+ "arrow_right_hook": "↪",
+ "sweet_potato": "🍠",
+ "robot": "🤖",
+ "rolled__up_newspaper": "🗞",
+ "rowboat": "🚣",
+ "runner": "🏃",
+ "running": "🏃",
+ "running_shirt_with_sash": "🎽",
+ "boat": "⛵",
+ "scales": "⚖",
+ "school_satchel": "🎒",
+ "scorpius": "♏",
+ "see_no_evil": "🙈",
+ "sheep": "🐑",
+ "stars": "🌠",
+ "cake": "🍰",
+ "six_pointed_star": "🔯",
+ "ski": "🎿",
+ "sleeping_accommodation": "🛌",
+ "sleeping": "😴",
+ "sleepy": "😪",
+ "sleuth_or_spy": "🕵",
+ "heart_eyes_cat": "😻",
+ "smiley_cat": "😺",
+ "innocent": "😇",
+ "heart_eyes": "😍",
+ "smiling_imp": "😈",
+ "smiley": "😃",
+ "sweat_smile": "😅",
+ "smile": "😄",
+ "laughing": "😆",
+ "satisfied": "😆",
+ "blush": "😊",
+ "smirk": "😏",
+ "smoking": "🚬",
+ "snow_capped_mountain": "🏔",
+ "soccer": "⚽",
+ "icecream": "🍦",
+ "soon": "🔜",
+ "arrow_lower_right": "↘",
+ "arrow_lower_left": "↙",
+ "speak_no_evil": "🙊",
+ "speaker": "🔈",
+ "mute": "🔇",
+ "sound": "🔉",
+ "loud_sound": "🔊",
+ "speaking_head_in_silhouette": "🗣",
+ "spiral_calendar_pad": "🗓",
+ "spiral_note_pad": "🗒",
+ "shell": "🐚",
+ "sweat_drops": "💦",
+ "u5272": "🈹",
+ "u5408": "🈴",
+ "u55b6": "🈺",
+ "u6307": "🈯",
+ "u6708": "🈷",
+ "u6709": "🈶",
+ "u6e80": "🈵",
+ "u7121": "🈚",
+ "u7533": "🈸",
+ "u7981": "🈲",
+ "u7a7a": "🈳",
+ "cl": "🆑",
+ "cool": "🆒",
+ "free": "🆓",
+ "id": "🆔",
+ "koko": "🈁",
+ "sa": "🈂",
+ "new": "🆕",
+ "ng": "🆖",
+ "ok": "🆗",
+ "sos": "🆘",
+ "up": "🆙",
+ "vs": "🆚",
+ "steam_locomotive": "🚂",
+ "ramen": "🍜",
+ "partly_sunny": "⛅",
+ "city_sunrise": "🌇",
+ "surfer": "🏄",
+ "swimmer": "🏊",
+ "shirt": "👕",
+ "tshirt": "👕",
+ "table_tennis_paddle_and_ball": "🏓",
+ "tea": "🍵",
+ "tv": "📺",
+ "three_button_mouse": "🖱",
+ "+1": "👍",
+ "thumbsup": "👍",
+ "__1": "👎",
+ "-1": "👎",
+ "thumbsdown": "👎",
+ "thunder_cloud_and_rain": "⛈",
+ "tiger2": "🐅",
+ "tophat": "🎩",
+ "top": "🔝",
+ "tm": "™",
+ "train2": "🚆",
+ "triangular_flag_on_post": "🚩",
+ "trident": "🔱",
+ "twisted_rightwards_arrows": "🔀",
+ "unamused": "😒",
+ "small_red_triangle": "🔺",
+ "arrow_up_small": "🔼",
+ "arrow_up_down": "↕",
+ "upside__down_face": "🙃",
+ "arrow_up": "⬆",
+ "v": "✌",
+ "vhs": "📼",
+ "wc": "🚾",
+ "ocean": "🌊",
+ "waving_black_flag": "🏴",
+ "wave": "👋",
+ "waving_white_flag": "🏳",
+ "moon": "🌔",
+ "scream_cat": "🙀",
+ "weary": "😩",
+ "weight_lifter": "🏋",
+ "whale2": "🐋",
+ "wheelchair": "♿",
+ "point_down": "👇",
+ "grey_exclamation": "❕",
+ "white_frowning_face": "☹",
+ "white_check_mark": "✅",
+ "point_left": "👈",
+ "white_medium_small_square": "◽",
+ "star": "⭐",
+ "grey_question": "❔",
+ "point_right": "👉",
+ "relaxed": "☺",
+ "white_sun_behind_cloud": "🌥",
+ "white_sun_behind_cloud_with_rain": "🌦",
+ "white_sun_with_small_cloud": "🌤",
+ "point_up_2": "👆",
+ "point_up": "☝",
+ "wind_blowing_face": "🌬",
+ "wink": "😉",
+ "wolf": "🐺",
+ "dancers": "👯",
+ "boot": "👢",
+ "womans_clothes": "👚",
+ "womans_hat": "👒",
+ "sandal": "👡",
+ "womens": "🚺",
+ "worried": "😟",
+ "gift": "🎁",
+ "zipper__mouth_face": "🤐",
+ "regional_indicator_a": "🇦",
+ "regional_indicator_b": "🇧",
+ "regional_indicator_c": "🇨",
+ "regional_indicator_d": "🇩",
+ "regional_indicator_e": "🇪",
+ "regional_indicator_f": "🇫",
+ "regional_indicator_g": "🇬",
+ "regional_indicator_h": "🇭",
+ "regional_indicator_i": "🇮",
+ "regional_indicator_j": "🇯",
+ "regional_indicator_k": "🇰",
+ "regional_indicator_l": "🇱",
+ "regional_indicator_m": "🇲",
+ "regional_indicator_n": "🇳",
+ "regional_indicator_o": "🇴",
+ "regional_indicator_p": "🇵",
+ "regional_indicator_q": "🇶",
+ "regional_indicator_r": "🇷",
+ "regional_indicator_s": "🇸",
+ "regional_indicator_t": "🇹",
+ "regional_indicator_u": "🇺",
+ "regional_indicator_v": "🇻",
+ "regional_indicator_w": "🇼",
+ "regional_indicator_x": "🇽",
+ "regional_indicator_y": "🇾",
+ "regional_indicator_z": "🇿",
+}
diff --git a/third_party/python/pip/pip/_vendor/rich/_emoji_replace.py b/third_party/python/pip/pip/_vendor/rich/_emoji_replace.py
new file mode 100644
index 0000000000..bb2cafa180
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/_emoji_replace.py
@@ -0,0 +1,32 @@
+from typing import Callable, Match, Optional
+import re
+
+from ._emoji_codes import EMOJI
+
+
+_ReStringMatch = Match[str] # regex match object
+_ReSubCallable = Callable[[_ReStringMatch], str] # Callable invoked by re.sub
+_EmojiSubMethod = Callable[[_ReSubCallable, str], str] # Sub method of a compiled re
+
+
+def _emoji_replace(
+ text: str,
+ default_variant: Optional[str] = None,
+ _emoji_sub: _EmojiSubMethod = re.compile(r"(:(\S*?)(?:(?:\-)(emoji|text))?:)").sub,
+) -> str:
+ """Replace emoji code in text."""
+ get_emoji = EMOJI.__getitem__
+ variants = {"text": "\uFE0E", "emoji": "\uFE0F"}
+ get_variant = variants.get
+ default_variant_code = variants.get(default_variant, "") if default_variant else ""
+
+ def do_replace(match: Match[str]) -> str:
+ emoji_code, emoji_name, variant = match.groups()
+ try:
+ return get_emoji(emoji_name.lower()) + get_variant(
+ variant, default_variant_code
+ )
+ except KeyError:
+ return emoji_code
+
+ return _emoji_sub(do_replace, text)
diff --git a/third_party/python/pip/pip/_vendor/rich/_export_format.py b/third_party/python/pip/pip/_vendor/rich/_export_format.py
new file mode 100644
index 0000000000..b79c13069b
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/_export_format.py
@@ -0,0 +1,78 @@
+CONSOLE_HTML_FORMAT = """\
+<!DOCTYPE html>
+<head>
+<meta charset="UTF-8">
+<style>
+{stylesheet}
+body {{
+ color: {foreground};
+ background-color: {background};
+}}
+</style>
+</head>
+<html>
+<body>
+ <code>
+ <pre style="font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">{code}</pre>
+ </code>
+</body>
+</html>
+"""
+
+CONSOLE_SVG_FORMAT = """\
+<svg class="rich-terminal" viewBox="0 0 {width} {height}" xmlns="http://www.w3.org/2000/svg">
+ <!-- Generated with Rich https://www.textualize.io -->
+ <style>
+
+ @font-face {{
+ font-family: "Fira Code";
+ src: local("FiraCode-Regular"),
+ url("https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff2/FiraCode-Regular.woff2") format("woff2"),
+ url("https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff/FiraCode-Regular.woff") format("woff");
+ font-style: normal;
+ font-weight: 400;
+ }}
+ @font-face {{
+ font-family: "Fira Code";
+ src: local("FiraCode-Bold"),
+ url("https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff2/FiraCode-Bold.woff2") format("woff2"),
+ url("https://cdnjs.cloudflare.com/ajax/libs/firacode/6.2.0/woff/FiraCode-Bold.woff") format("woff");
+ font-style: bold;
+ font-weight: 700;
+ }}
+
+ .{unique_id}-matrix {{
+ font-family: Fira Code, monospace;
+ font-size: {char_height}px;
+ line-height: {line_height}px;
+ font-variant-east-asian: full-width;
+ }}
+
+ .{unique_id}-title {{
+ font-size: 18px;
+ font-weight: bold;
+ font-family: arial;
+ }}
+
+ {styles}
+ </style>
+
+ <defs>
+ <clipPath id="{unique_id}-clip-terminal">
+ <rect x="0" y="0" width="{terminal_width}" height="{terminal_height}" />
+ </clipPath>
+ {lines}
+ </defs>
+
+ {chrome}
+ <g transform="translate({terminal_x}, {terminal_y})" clip-path="url(#{unique_id}-clip-terminal)">
+ {backgrounds}
+ <g class="{unique_id}-matrix">
+ {matrix}
+ </g>
+ </g>
+</svg>
+"""
+
+_SVG_FONT_FAMILY = "Rich Fira Code"
+_SVG_CLASSES_PREFIX = "rich-svg"
diff --git a/third_party/python/pip/pip/_vendor/rich/_extension.py b/third_party/python/pip/pip/_vendor/rich/_extension.py
new file mode 100644
index 0000000000..cbd6da9be4
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/_extension.py
@@ -0,0 +1,10 @@
+from typing import Any
+
+
+def load_ipython_extension(ip: Any) -> None: # pragma: no cover
+ # prevent circular import
+ from pip._vendor.rich.pretty import install
+ from pip._vendor.rich.traceback import install as tr_install
+
+ install()
+ tr_install()
diff --git a/third_party/python/pip/pip/_vendor/rich/_inspect.py b/third_party/python/pip/pip/_vendor/rich/_inspect.py
new file mode 100644
index 0000000000..30446ceb3f
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/_inspect.py
@@ -0,0 +1,270 @@
+from __future__ import absolute_import
+
+import inspect
+from inspect import cleandoc, getdoc, getfile, isclass, ismodule, signature
+from typing import Any, Collection, Iterable, Optional, Tuple, Type, Union
+
+from .console import Group, RenderableType
+from .control import escape_control_codes
+from .highlighter import ReprHighlighter
+from .jupyter import JupyterMixin
+from .panel import Panel
+from .pretty import Pretty
+from .table import Table
+from .text import Text, TextType
+
+
+def _first_paragraph(doc: str) -> str:
+ """Get the first paragraph from a docstring."""
+ paragraph, _, _ = doc.partition("\n\n")
+ return paragraph
+
+
+class Inspect(JupyterMixin):
+ """A renderable to inspect any Python Object.
+
+ Args:
+ obj (Any): An object to inspect.
+ title (str, optional): Title to display over inspect result, or None use type. Defaults to None.
+ help (bool, optional): Show full help text rather than just first paragraph. Defaults to False.
+ methods (bool, optional): Enable inspection of callables. Defaults to False.
+ docs (bool, optional): Also render doc strings. Defaults to True.
+ private (bool, optional): Show private attributes (beginning with underscore). Defaults to False.
+ dunder (bool, optional): Show attributes starting with double underscore. Defaults to False.
+ sort (bool, optional): Sort attributes alphabetically. Defaults to True.
+ all (bool, optional): Show all attributes. Defaults to False.
+ value (bool, optional): Pretty print value of object. Defaults to True.
+ """
+
+ def __init__(
+ self,
+ obj: Any,
+ *,
+ title: Optional[TextType] = None,
+ help: bool = False,
+ methods: bool = False,
+ docs: bool = True,
+ private: bool = False,
+ dunder: bool = False,
+ sort: bool = True,
+ all: bool = True,
+ value: bool = True,
+ ) -> None:
+ self.highlighter = ReprHighlighter()
+ self.obj = obj
+ self.title = title or self._make_title(obj)
+ if all:
+ methods = private = dunder = True
+ self.help = help
+ self.methods = methods
+ self.docs = docs or help
+ self.private = private or dunder
+ self.dunder = dunder
+ self.sort = sort
+ self.value = value
+
+ def _make_title(self, obj: Any) -> Text:
+ """Make a default title."""
+ title_str = (
+ str(obj)
+ if (isclass(obj) or callable(obj) or ismodule(obj))
+ else str(type(obj))
+ )
+ title_text = self.highlighter(title_str)
+ return title_text
+
+ def __rich__(self) -> Panel:
+ return Panel.fit(
+ Group(*self._render()),
+ title=self.title,
+ border_style="scope.border",
+ padding=(0, 1),
+ )
+
+ def _get_signature(self, name: str, obj: Any) -> Optional[Text]:
+ """Get a signature for a callable."""
+ try:
+ _signature = str(signature(obj)) + ":"
+ except ValueError:
+ _signature = "(...)"
+ except TypeError:
+ return None
+
+ source_filename: Optional[str] = None
+ try:
+ source_filename = getfile(obj)
+ except (OSError, TypeError):
+ # OSError is raised if obj has no source file, e.g. when defined in REPL.
+ pass
+
+ callable_name = Text(name, style="inspect.callable")
+ if source_filename:
+ callable_name.stylize(f"link file://{source_filename}")
+ signature_text = self.highlighter(_signature)
+
+ qualname = name or getattr(obj, "__qualname__", name)
+
+ # If obj is a module, there may be classes (which are callable) to display
+ if inspect.isclass(obj):
+ prefix = "class"
+ elif inspect.iscoroutinefunction(obj):
+ prefix = "async def"
+ else:
+ prefix = "def"
+
+ qual_signature = Text.assemble(
+ (f"{prefix} ", f"inspect.{prefix.replace(' ', '_')}"),
+ (qualname, "inspect.callable"),
+ signature_text,
+ )
+
+ return qual_signature
+
+ def _render(self) -> Iterable[RenderableType]:
+ """Render object."""
+
+ def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]:
+ key, (_error, value) = item
+ return (callable(value), key.strip("_").lower())
+
+ def safe_getattr(attr_name: str) -> Tuple[Any, Any]:
+ """Get attribute or any exception."""
+ try:
+ return (None, getattr(obj, attr_name))
+ except Exception as error:
+ return (error, None)
+
+ obj = self.obj
+ keys = dir(obj)
+ total_items = len(keys)
+ if not self.dunder:
+ keys = [key for key in keys if not key.startswith("__")]
+ if not self.private:
+ keys = [key for key in keys if not key.startswith("_")]
+ not_shown_count = total_items - len(keys)
+ items = [(key, safe_getattr(key)) for key in keys]
+ if self.sort:
+ items.sort(key=sort_items)
+
+ items_table = Table.grid(padding=(0, 1), expand=False)
+ items_table.add_column(justify="right")
+ add_row = items_table.add_row
+ highlighter = self.highlighter
+
+ if callable(obj):
+ signature = self._get_signature("", obj)
+ if signature is not None:
+ yield signature
+ yield ""
+
+ if self.docs:
+ _doc = self._get_formatted_doc(obj)
+ if _doc is not None:
+ doc_text = Text(_doc, style="inspect.help")
+ doc_text = highlighter(doc_text)
+ yield doc_text
+ yield ""
+
+ if self.value and not (isclass(obj) or callable(obj) or ismodule(obj)):
+ yield Panel(
+ Pretty(obj, indent_guides=True, max_length=10, max_string=60),
+ border_style="inspect.value.border",
+ )
+ yield ""
+
+ for key, (error, value) in items:
+ key_text = Text.assemble(
+ (
+ key,
+ "inspect.attr.dunder" if key.startswith("__") else "inspect.attr",
+ ),
+ (" =", "inspect.equals"),
+ )
+ if error is not None:
+ warning = key_text.copy()
+ warning.stylize("inspect.error")
+ add_row(warning, highlighter(repr(error)))
+ continue
+
+ if callable(value):
+ if not self.methods:
+ continue
+
+ _signature_text = self._get_signature(key, value)
+ if _signature_text is None:
+ add_row(key_text, Pretty(value, highlighter=highlighter))
+ else:
+ if self.docs:
+ docs = self._get_formatted_doc(value)
+ if docs is not None:
+ _signature_text.append("\n" if "\n" in docs else " ")
+ doc = highlighter(docs)
+ doc.stylize("inspect.doc")
+ _signature_text.append(doc)
+
+ add_row(key_text, _signature_text)
+ else:
+ add_row(key_text, Pretty(value, highlighter=highlighter))
+ if items_table.row_count:
+ yield items_table
+ elif not_shown_count:
+ yield Text.from_markup(
+ f"[b cyan]{not_shown_count}[/][i] attribute(s) not shown.[/i] "
+ f"Run [b][magenta]inspect[/]([not b]inspect[/])[/b] for options."
+ )
+
+ def _get_formatted_doc(self, object_: Any) -> Optional[str]:
+ """
+ Extract the docstring of an object, process it and returns it.
+ The processing consists in cleaning up the doctring's indentation,
+ taking only its 1st paragraph if `self.help` is not True,
+ and escape its control codes.
+
+ Args:
+ object_ (Any): the object to get the docstring from.
+
+ Returns:
+ Optional[str]: the processed docstring, or None if no docstring was found.
+ """
+ docs = getdoc(object_)
+ if docs is None:
+ return None
+ docs = cleandoc(docs).strip()
+ if not self.help:
+ docs = _first_paragraph(docs)
+ return escape_control_codes(docs)
+
+
+def get_object_types_mro(obj: Union[object, Type[Any]]) -> Tuple[type, ...]:
+ """Returns the MRO of an object's class, or of the object itself if it's a class."""
+ if not hasattr(obj, "__mro__"):
+ # N.B. we cannot use `if type(obj) is type` here because it doesn't work with
+ # some types of classes, such as the ones that use abc.ABCMeta.
+ obj = type(obj)
+ return getattr(obj, "__mro__", ())
+
+
+def get_object_types_mro_as_strings(obj: object) -> Collection[str]:
+ """
+ Returns the MRO of an object's class as full qualified names, or of the object itself if it's a class.
+
+ Examples:
+ `object_types_mro_as_strings(JSONDecoder)` will return `['json.decoder.JSONDecoder', 'builtins.object']`
+ """
+ return [
+ f'{getattr(type_, "__module__", "")}.{getattr(type_, "__qualname__", "")}'
+ for type_ in get_object_types_mro(obj)
+ ]
+
+
+def is_object_one_of_types(
+ obj: object, fully_qualified_types_names: Collection[str]
+) -> bool:
+ """
+ Returns `True` if the given object's class (or the object itself, if it's a class) has one of the
+ fully qualified names in its MRO.
+ """
+ for type_name in get_object_types_mro_as_strings(obj):
+ if type_name in fully_qualified_types_names:
+ return True
+ return False
diff --git a/third_party/python/pip/pip/_vendor/rich/_log_render.py b/third_party/python/pip/pip/_vendor/rich/_log_render.py
new file mode 100644
index 0000000000..fc16c84437
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/_log_render.py
@@ -0,0 +1,94 @@
+from datetime import datetime
+from typing import Iterable, List, Optional, TYPE_CHECKING, Union, Callable
+
+
+from .text import Text, TextType
+
+if TYPE_CHECKING:
+ from .console import Console, ConsoleRenderable, RenderableType
+ from .table import Table
+
+FormatTimeCallable = Callable[[datetime], Text]
+
+
+class LogRender:
+ def __init__(
+ self,
+ show_time: bool = True,
+ show_level: bool = False,
+ show_path: bool = True,
+ time_format: Union[str, FormatTimeCallable] = "[%x %X]",
+ omit_repeated_times: bool = True,
+ level_width: Optional[int] = 8,
+ ) -> None:
+ self.show_time = show_time
+ self.show_level = show_level
+ self.show_path = show_path
+ self.time_format = time_format
+ self.omit_repeated_times = omit_repeated_times
+ self.level_width = level_width
+ self._last_time: Optional[Text] = None
+
+ def __call__(
+ self,
+ console: "Console",
+ renderables: Iterable["ConsoleRenderable"],
+ log_time: Optional[datetime] = None,
+ time_format: Optional[Union[str, FormatTimeCallable]] = None,
+ level: TextType = "",
+ path: Optional[str] = None,
+ line_no: Optional[int] = None,
+ link_path: Optional[str] = None,
+ ) -> "Table":
+ from .containers import Renderables
+ from .table import Table
+
+ output = Table.grid(padding=(0, 1))
+ output.expand = True
+ if self.show_time:
+ output.add_column(style="log.time")
+ if self.show_level:
+ output.add_column(style="log.level", width=self.level_width)
+ output.add_column(ratio=1, style="log.message", overflow="fold")
+ if self.show_path and path:
+ output.add_column(style="log.path")
+ row: List["RenderableType"] = []
+ if self.show_time:
+ log_time = log_time or console.get_datetime()
+ time_format = time_format or self.time_format
+ if callable(time_format):
+ log_time_display = time_format(log_time)
+ else:
+ log_time_display = Text(log_time.strftime(time_format))
+ if log_time_display == self._last_time and self.omit_repeated_times:
+ row.append(Text(" " * len(log_time_display)))
+ else:
+ row.append(log_time_display)
+ self._last_time = log_time_display
+ if self.show_level:
+ row.append(level)
+
+ row.append(Renderables(renderables))
+ if self.show_path and path:
+ path_text = Text()
+ path_text.append(
+ path, style=f"link file://{link_path}" if link_path else ""
+ )
+ if line_no:
+ path_text.append(":")
+ path_text.append(
+ f"{line_no}",
+ style=f"link file://{link_path}#{line_no}" if link_path else "",
+ )
+ row.append(path_text)
+
+ output.add_row(*row)
+ return output
+
+
+if __name__ == "__main__": # pragma: no cover
+ from pip._vendor.rich.console import Console
+
+ c = Console()
+ c.print("[on blue]Hello", justify="right")
+ c.log("[on blue]hello", justify="right")
diff --git a/third_party/python/pip/pip/_vendor/rich/_loop.py b/third_party/python/pip/pip/_vendor/rich/_loop.py
new file mode 100644
index 0000000000..01c6cafbe5
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/_loop.py
@@ -0,0 +1,43 @@
+from typing import Iterable, Tuple, TypeVar
+
+T = TypeVar("T")
+
+
+def loop_first(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
+ """Iterate and generate a tuple with a flag for first value."""
+ iter_values = iter(values)
+ try:
+ value = next(iter_values)
+ except StopIteration:
+ return
+ yield True, value
+ for value in iter_values:
+ yield False, value
+
+
+def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
+ """Iterate and generate a tuple with a flag for last value."""
+ iter_values = iter(values)
+ try:
+ previous_value = next(iter_values)
+ except StopIteration:
+ return
+ for value in iter_values:
+ yield False, previous_value
+ previous_value = value
+ yield True, previous_value
+
+
+def loop_first_last(values: Iterable[T]) -> Iterable[Tuple[bool, bool, T]]:
+ """Iterate and generate a tuple with a flag for first and last value."""
+ iter_values = iter(values)
+ try:
+ previous_value = next(iter_values)
+ except StopIteration:
+ return
+ first = True
+ for value in iter_values:
+ yield first, False, previous_value
+ first = False
+ previous_value = value
+ yield first, True, previous_value
diff --git a/third_party/python/pip/pip/_vendor/rich/_null_file.py b/third_party/python/pip/pip/_vendor/rich/_null_file.py
new file mode 100644
index 0000000000..49038bfcbe
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/_null_file.py
@@ -0,0 +1,83 @@
+from types import TracebackType
+from typing import IO, Iterable, Iterator, List, Optional, Type
+
+
+class NullFile(IO[str]):
+
+ # TODO: "mode", "name" and "closed" are only required for Python 3.6.
+
+ @property
+ def mode(self) -> str:
+ return ""
+
+ @property
+ def name(self) -> str:
+ return "NullFile"
+
+ def closed(self) -> bool:
+ return False
+
+ def close(self) -> None:
+ pass
+
+ def isatty(self) -> bool:
+ return False
+
+ def read(self, __n: int = 1) -> str:
+ return ""
+
+ def readable(self) -> bool:
+ return False
+
+ def readline(self, __limit: int = 1) -> str:
+ return ""
+
+ def readlines(self, __hint: int = 1) -> List[str]:
+ return []
+
+ def seek(self, __offset: int, __whence: int = 1) -> int:
+ return 0
+
+ def seekable(self) -> bool:
+ return False
+
+ def tell(self) -> int:
+ return 0
+
+ def truncate(self, __size: Optional[int] = 1) -> int:
+ return 0
+
+ def writable(self) -> bool:
+ return False
+
+ def writelines(self, __lines: Iterable[str]) -> None:
+ pass
+
+ def __next__(self) -> str:
+ return ""
+
+ def __iter__(self) -> Iterator[str]:
+ return iter([""])
+
+ def __enter__(self) -> IO[str]:
+ pass
+
+ def __exit__(
+ self,
+ __t: Optional[Type[BaseException]],
+ __value: Optional[BaseException],
+ __traceback: Optional[TracebackType],
+ ) -> None:
+ pass
+
+ def write(self, text: str) -> int:
+ return 0
+
+ def flush(self) -> None:
+ pass
+
+ def fileno(self) -> int:
+ return -1
+
+
+NULL_FILE = NullFile()
diff --git a/third_party/python/pip/pip/_vendor/rich/_palettes.py b/third_party/python/pip/pip/_vendor/rich/_palettes.py
new file mode 100644
index 0000000000..3c748d33e4
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/_palettes.py
@@ -0,0 +1,309 @@
+from .palette import Palette
+
+
+# Taken from https://en.wikipedia.org/wiki/ANSI_escape_code (Windows 10 column)
+WINDOWS_PALETTE = Palette(
+ [
+ (12, 12, 12),
+ (197, 15, 31),
+ (19, 161, 14),
+ (193, 156, 0),
+ (0, 55, 218),
+ (136, 23, 152),
+ (58, 150, 221),
+ (204, 204, 204),
+ (118, 118, 118),
+ (231, 72, 86),
+ (22, 198, 12),
+ (249, 241, 165),
+ (59, 120, 255),
+ (180, 0, 158),
+ (97, 214, 214),
+ (242, 242, 242),
+ ]
+)
+
+# # The standard ansi colors (including bright variants)
+STANDARD_PALETTE = Palette(
+ [
+ (0, 0, 0),
+ (170, 0, 0),
+ (0, 170, 0),
+ (170, 85, 0),
+ (0, 0, 170),
+ (170, 0, 170),
+ (0, 170, 170),
+ (170, 170, 170),
+ (85, 85, 85),
+ (255, 85, 85),
+ (85, 255, 85),
+ (255, 255, 85),
+ (85, 85, 255),
+ (255, 85, 255),
+ (85, 255, 255),
+ (255, 255, 255),
+ ]
+)
+
+
+# The 256 color palette
+EIGHT_BIT_PALETTE = Palette(
+ [
+ (0, 0, 0),
+ (128, 0, 0),
+ (0, 128, 0),
+ (128, 128, 0),
+ (0, 0, 128),
+ (128, 0, 128),
+ (0, 128, 128),
+ (192, 192, 192),
+ (128, 128, 128),
+ (255, 0, 0),
+ (0, 255, 0),
+ (255, 255, 0),
+ (0, 0, 255),
+ (255, 0, 255),
+ (0, 255, 255),
+ (255, 255, 255),
+ (0, 0, 0),
+ (0, 0, 95),
+ (0, 0, 135),
+ (0, 0, 175),
+ (0, 0, 215),
+ (0, 0, 255),
+ (0, 95, 0),
+ (0, 95, 95),
+ (0, 95, 135),
+ (0, 95, 175),
+ (0, 95, 215),
+ (0, 95, 255),
+ (0, 135, 0),
+ (0, 135, 95),
+ (0, 135, 135),
+ (0, 135, 175),
+ (0, 135, 215),
+ (0, 135, 255),
+ (0, 175, 0),
+ (0, 175, 95),
+ (0, 175, 135),
+ (0, 175, 175),
+ (0, 175, 215),
+ (0, 175, 255),
+ (0, 215, 0),
+ (0, 215, 95),
+ (0, 215, 135),
+ (0, 215, 175),
+ (0, 215, 215),
+ (0, 215, 255),
+ (0, 255, 0),
+ (0, 255, 95),
+ (0, 255, 135),
+ (0, 255, 175),
+ (0, 255, 215),
+ (0, 255, 255),
+ (95, 0, 0),
+ (95, 0, 95),
+ (95, 0, 135),
+ (95, 0, 175),
+ (95, 0, 215),
+ (95, 0, 255),
+ (95, 95, 0),
+ (95, 95, 95),
+ (95, 95, 135),
+ (95, 95, 175),
+ (95, 95, 215),
+ (95, 95, 255),
+ (95, 135, 0),
+ (95, 135, 95),
+ (95, 135, 135),
+ (95, 135, 175),
+ (95, 135, 215),
+ (95, 135, 255),
+ (95, 175, 0),
+ (95, 175, 95),
+ (95, 175, 135),
+ (95, 175, 175),
+ (95, 175, 215),
+ (95, 175, 255),
+ (95, 215, 0),
+ (95, 215, 95),
+ (95, 215, 135),
+ (95, 215, 175),
+ (95, 215, 215),
+ (95, 215, 255),
+ (95, 255, 0),
+ (95, 255, 95),
+ (95, 255, 135),
+ (95, 255, 175),
+ (95, 255, 215),
+ (95, 255, 255),
+ (135, 0, 0),
+ (135, 0, 95),
+ (135, 0, 135),
+ (135, 0, 175),
+ (135, 0, 215),
+ (135, 0, 255),
+ (135, 95, 0),
+ (135, 95, 95),
+ (135, 95, 135),
+ (135, 95, 175),
+ (135, 95, 215),
+ (135, 95, 255),
+ (135, 135, 0),
+ (135, 135, 95),
+ (135, 135, 135),
+ (135, 135, 175),
+ (135, 135, 215),
+ (135, 135, 255),
+ (135, 175, 0),
+ (135, 175, 95),
+ (135, 175, 135),
+ (135, 175, 175),
+ (135, 175, 215),
+ (135, 175, 255),
+ (135, 215, 0),
+ (135, 215, 95),
+ (135, 215, 135),
+ (135, 215, 175),
+ (135, 215, 215),
+ (135, 215, 255),
+ (135, 255, 0),
+ (135, 255, 95),
+ (135, 255, 135),
+ (135, 255, 175),
+ (135, 255, 215),
+ (135, 255, 255),
+ (175, 0, 0),
+ (175, 0, 95),
+ (175, 0, 135),
+ (175, 0, 175),
+ (175, 0, 215),
+ (175, 0, 255),
+ (175, 95, 0),
+ (175, 95, 95),
+ (175, 95, 135),
+ (175, 95, 175),
+ (175, 95, 215),
+ (175, 95, 255),
+ (175, 135, 0),
+ (175, 135, 95),
+ (175, 135, 135),
+ (175, 135, 175),
+ (175, 135, 215),
+ (175, 135, 255),
+ (175, 175, 0),
+ (175, 175, 95),
+ (175, 175, 135),
+ (175, 175, 175),
+ (175, 175, 215),
+ (175, 175, 255),
+ (175, 215, 0),
+ (175, 215, 95),
+ (175, 215, 135),
+ (175, 215, 175),
+ (175, 215, 215),
+ (175, 215, 255),
+ (175, 255, 0),
+ (175, 255, 95),
+ (175, 255, 135),
+ (175, 255, 175),
+ (175, 255, 215),
+ (175, 255, 255),
+ (215, 0, 0),
+ (215, 0, 95),
+ (215, 0, 135),
+ (215, 0, 175),
+ (215, 0, 215),
+ (215, 0, 255),
+ (215, 95, 0),
+ (215, 95, 95),
+ (215, 95, 135),
+ (215, 95, 175),
+ (215, 95, 215),
+ (215, 95, 255),
+ (215, 135, 0),
+ (215, 135, 95),
+ (215, 135, 135),
+ (215, 135, 175),
+ (215, 135, 215),
+ (215, 135, 255),
+ (215, 175, 0),
+ (215, 175, 95),
+ (215, 175, 135),
+ (215, 175, 175),
+ (215, 175, 215),
+ (215, 175, 255),
+ (215, 215, 0),
+ (215, 215, 95),
+ (215, 215, 135),
+ (215, 215, 175),
+ (215, 215, 215),
+ (215, 215, 255),
+ (215, 255, 0),
+ (215, 255, 95),
+ (215, 255, 135),
+ (215, 255, 175),
+ (215, 255, 215),
+ (215, 255, 255),
+ (255, 0, 0),
+ (255, 0, 95),
+ (255, 0, 135),
+ (255, 0, 175),
+ (255, 0, 215),
+ (255, 0, 255),
+ (255, 95, 0),
+ (255, 95, 95),
+ (255, 95, 135),
+ (255, 95, 175),
+ (255, 95, 215),
+ (255, 95, 255),
+ (255, 135, 0),
+ (255, 135, 95),
+ (255, 135, 135),
+ (255, 135, 175),
+ (255, 135, 215),
+ (255, 135, 255),
+ (255, 175, 0),
+ (255, 175, 95),
+ (255, 175, 135),
+ (255, 175, 175),
+ (255, 175, 215),
+ (255, 175, 255),
+ (255, 215, 0),
+ (255, 215, 95),
+ (255, 215, 135),
+ (255, 215, 175),
+ (255, 215, 215),
+ (255, 215, 255),
+ (255, 255, 0),
+ (255, 255, 95),
+ (255, 255, 135),
+ (255, 255, 175),
+ (255, 255, 215),
+ (255, 255, 255),
+ (8, 8, 8),
+ (18, 18, 18),
+ (28, 28, 28),
+ (38, 38, 38),
+ (48, 48, 48),
+ (58, 58, 58),
+ (68, 68, 68),
+ (78, 78, 78),
+ (88, 88, 88),
+ (98, 98, 98),
+ (108, 108, 108),
+ (118, 118, 118),
+ (128, 128, 128),
+ (138, 138, 138),
+ (148, 148, 148),
+ (158, 158, 158),
+ (168, 168, 168),
+ (178, 178, 178),
+ (188, 188, 188),
+ (198, 198, 198),
+ (208, 208, 208),
+ (218, 218, 218),
+ (228, 228, 228),
+ (238, 238, 238),
+ ]
+)
diff --git a/third_party/python/pip/pip/_vendor/rich/_pick.py b/third_party/python/pip/pip/_vendor/rich/_pick.py
new file mode 100644
index 0000000000..4f6d8b2d79
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/_pick.py
@@ -0,0 +1,17 @@
+from typing import Optional
+
+
+def pick_bool(*values: Optional[bool]) -> bool:
+ """Pick the first non-none bool or return the last value.
+
+ Args:
+ *values (bool): Any number of boolean or None values.
+
+ Returns:
+ bool: First non-none boolean.
+ """
+ assert values, "1 or more values required"
+ for value in values:
+ if value is not None:
+ return value
+ return bool(value)
diff --git a/third_party/python/pip/pip/_vendor/rich/_ratio.py b/third_party/python/pip/pip/_vendor/rich/_ratio.py
new file mode 100644
index 0000000000..e8a3a674e0
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/_ratio.py
@@ -0,0 +1,160 @@
+import sys
+from fractions import Fraction
+from math import ceil
+from typing import cast, List, Optional, Sequence
+
+if sys.version_info >= (3, 8):
+ from typing import Protocol
+else:
+ from pip._vendor.typing_extensions import Protocol # pragma: no cover
+
+
+class Edge(Protocol):
+ """Any object that defines an edge (such as Layout)."""
+
+ size: Optional[int] = None
+ ratio: int = 1
+ minimum_size: int = 1
+
+
+def ratio_resolve(total: int, edges: Sequence[Edge]) -> List[int]:
+ """Divide total space to satisfy size, ratio, and minimum_size, constraints.
+
+ The returned list of integers should add up to total in most cases, unless it is
+ impossible to satisfy all the constraints. For instance, if there are two edges
+ with a minimum size of 20 each and `total` is 30 then the returned list will be
+ greater than total. In practice, this would mean that a Layout object would
+ clip the rows that would overflow the screen height.
+
+ Args:
+ total (int): Total number of characters.
+ edges (List[Edge]): Edges within total space.
+
+ Returns:
+ List[int]: Number of characters for each edge.
+ """
+ # Size of edge or None for yet to be determined
+ sizes = [(edge.size or None) for edge in edges]
+
+ _Fraction = Fraction
+
+ # While any edges haven't been calculated
+ while None in sizes:
+ # Get flexible edges and index to map these back on to sizes list
+ flexible_edges = [
+ (index, edge)
+ for index, (size, edge) in enumerate(zip(sizes, edges))
+ if size is None
+ ]
+ # Remaining space in total
+ remaining = total - sum(size or 0 for size in sizes)
+ if remaining <= 0:
+ # No room for flexible edges
+ return [
+ ((edge.minimum_size or 1) if size is None else size)
+ for size, edge in zip(sizes, edges)
+ ]
+ # Calculate number of characters in a ratio portion
+ portion = _Fraction(
+ remaining, sum((edge.ratio or 1) for _, edge in flexible_edges)
+ )
+
+ # If any edges will be less than their minimum, replace size with the minimum
+ for index, edge in flexible_edges:
+ if portion * edge.ratio <= edge.minimum_size:
+ sizes[index] = edge.minimum_size
+ # New fixed size will invalidate calculations, so we need to repeat the process
+ break
+ else:
+ # Distribute flexible space and compensate for rounding error
+ # Since edge sizes can only be integers we need to add the remainder
+ # to the following line
+ remainder = _Fraction(0)
+ for index, edge in flexible_edges:
+ size, remainder = divmod(portion * edge.ratio + remainder, 1)
+ sizes[index] = size
+ break
+ # Sizes now contains integers only
+ return cast(List[int], sizes)
+
+
+def ratio_reduce(
+ total: int, ratios: List[int], maximums: List[int], values: List[int]
+) -> List[int]:
+ """Divide an integer total in to parts based on ratios.
+
+ Args:
+ total (int): The total to divide.
+ ratios (List[int]): A list of integer ratios.
+ maximums (List[int]): List of maximums values for each slot.
+ values (List[int]): List of values
+
+ Returns:
+ List[int]: A list of integers guaranteed to sum to total.
+ """
+ ratios = [ratio if _max else 0 for ratio, _max in zip(ratios, maximums)]
+ total_ratio = sum(ratios)
+ if not total_ratio:
+ return values[:]
+ total_remaining = total
+ result: List[int] = []
+ append = result.append
+ for ratio, maximum, value in zip(ratios, maximums, values):
+ if ratio and total_ratio > 0:
+ distributed = min(maximum, round(ratio * total_remaining / total_ratio))
+ append(value - distributed)
+ total_remaining -= distributed
+ total_ratio -= ratio
+ else:
+ append(value)
+ return result
+
+
+def ratio_distribute(
+ total: int, ratios: List[int], minimums: Optional[List[int]] = None
+) -> List[int]:
+ """Distribute an integer total in to parts based on ratios.
+
+ Args:
+ total (int): The total to divide.
+ ratios (List[int]): A list of integer ratios.
+ minimums (List[int]): List of minimum values for each slot.
+
+ Returns:
+ List[int]: A list of integers guaranteed to sum to total.
+ """
+ if minimums:
+ ratios = [ratio if _min else 0 for ratio, _min in zip(ratios, minimums)]
+ total_ratio = sum(ratios)
+ assert total_ratio > 0, "Sum of ratios must be > 0"
+
+ total_remaining = total
+ distributed_total: List[int] = []
+ append = distributed_total.append
+ if minimums is None:
+ _minimums = [0] * len(ratios)
+ else:
+ _minimums = minimums
+ for ratio, minimum in zip(ratios, _minimums):
+ if total_ratio > 0:
+ distributed = max(minimum, ceil(ratio * total_remaining / total_ratio))
+ else:
+ distributed = total_remaining
+ append(distributed)
+ total_ratio -= ratio
+ total_remaining -= distributed
+ return distributed_total
+
+
+if __name__ == "__main__":
+ from dataclasses import dataclass
+
+ @dataclass
+ class E:
+
+ size: Optional[int] = None
+ ratio: int = 1
+ minimum_size: int = 1
+
+ resolved = ratio_resolve(110, [E(None, 1, 1), E(None, 1, 1), E(None, 1, 1)])
+ print(sum(resolved))
diff --git a/third_party/python/pip/pip/_vendor/rich/_spinners.py b/third_party/python/pip/pip/_vendor/rich/_spinners.py
new file mode 100644
index 0000000000..d0bb1fe751
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/_spinners.py
@@ -0,0 +1,482 @@
+"""
+Spinners are from:
+* cli-spinners:
+ MIT License
+ Copyright (c) Sindre Sorhus <sindresorhus@gmail.com> (sindresorhus.com)
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights to
+ use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+ the Software, and to permit persons to whom the Software is furnished to do so,
+ subject to the following conditions:
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
+ INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
+ FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ IN THE SOFTWARE.
+"""
+
+SPINNERS = {
+ "dots": {
+ "interval": 80,
+ "frames": "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏",
+ },
+ "dots2": {"interval": 80, "frames": "⣾⣽⣻⢿⡿⣟⣯⣷"},
+ "dots3": {
+ "interval": 80,
+ "frames": "⠋⠙⠚⠞⠖⠦⠴⠲⠳⠓",
+ },
+ "dots4": {
+ "interval": 80,
+ "frames": "⠄⠆⠇⠋⠙⠸⠰⠠⠰⠸⠙⠋⠇⠆",
+ },
+ "dots5": {
+ "interval": 80,
+ "frames": "⠋⠙⠚⠒⠂⠂⠒⠲⠴⠦⠖⠒⠐⠐⠒⠓⠋",
+ },
+ "dots6": {
+ "interval": 80,
+ "frames": "⠁⠉⠙⠚⠒⠂⠂⠒⠲⠴⠤⠄⠄⠤⠴⠲⠒⠂⠂⠒⠚⠙⠉⠁",
+ },
+ "dots7": {
+ "interval": 80,
+ "frames": "⠈⠉⠋⠓⠒⠐⠐⠒⠖⠦⠤⠠⠠⠤⠦⠖⠒⠐⠐⠒⠓⠋⠉⠈",
+ },
+ "dots8": {
+ "interval": 80,
+ "frames": "⠁⠁⠉⠙⠚⠒⠂⠂⠒⠲⠴⠤⠄⠄⠤⠠⠠⠤⠦⠖⠒⠐⠐⠒⠓⠋⠉⠈⠈",
+ },
+ "dots9": {"interval": 80, "frames": "⢹⢺⢼⣸⣇⡧⡗⡏"},
+ "dots10": {"interval": 80, "frames": "⢄⢂⢁⡁⡈⡐⡠"},
+ "dots11": {"interval": 100, "frames": "⠁⠂⠄⡀⢀⠠⠐⠈"},
+ "dots12": {
+ "interval": 80,
+ "frames": [
+ "⢀⠀",
+ "⡀⠀",
+ "⠄⠀",
+ "⢂⠀",
+ "⡂⠀",
+ "⠅⠀",
+ "⢃⠀",
+ "⡃⠀",
+ "⠍⠀",
+ "⢋⠀",
+ "⡋⠀",
+ "⠍⠁",
+ "⢋⠁",
+ "⡋⠁",
+ "⠍⠉",
+ "⠋⠉",
+ "⠋⠉",
+ "⠉⠙",
+ "⠉⠙",
+ "⠉⠩",
+ "⠈⢙",
+ "⠈⡙",
+ "⢈⠩",
+ "⡀⢙",
+ "⠄⡙",
+ "⢂⠩",
+ "⡂⢘",
+ "⠅⡘",
+ "⢃⠨",
+ "⡃⢐",
+ "⠍⡐",
+ "⢋⠠",
+ "⡋⢀",
+ "⠍⡁",
+ "⢋⠁",
+ "⡋⠁",
+ "⠍⠉",
+ "⠋⠉",
+ "⠋⠉",
+ "⠉⠙",
+ "⠉⠙",
+ "⠉⠩",
+ "⠈⢙",
+ "⠈⡙",
+ "⠈⠩",
+ "⠀⢙",
+ "⠀⡙",
+ "⠀⠩",
+ "⠀⢘",
+ "⠀⡘",
+ "⠀⠨",
+ "⠀⢐",
+ "⠀⡐",
+ "⠀⠠",
+ "⠀⢀",
+ "⠀⡀",
+ ],
+ },
+ "dots8Bit": {
+ "interval": 80,
+ "frames": "⠀⠁⠂⠃⠄⠅⠆⠇⡀⡁⡂⡃⡄⡅⡆⡇⠈⠉⠊⠋⠌⠍⠎⠏⡈⡉⡊⡋⡌⡍⡎⡏⠐⠑⠒⠓⠔⠕⠖⠗⡐⡑⡒⡓⡔⡕⡖⡗⠘⠙⠚⠛⠜⠝⠞⠟⡘⡙"
+ "⡚⡛⡜⡝⡞⡟⠠⠡⠢⠣⠤⠥⠦⠧⡠⡡⡢⡣⡤⡥⡦⡧⠨⠩⠪⠫⠬⠭⠮⠯⡨⡩⡪⡫⡬⡭⡮⡯⠰⠱⠲⠳⠴⠵⠶⠷⡰⡱⡲⡳⡴⡵⡶⡷⠸⠹⠺⠻"
+ "⠼⠽⠾⠿⡸⡹⡺⡻⡼⡽⡾⡿⢀⢁⢂⢃⢄⢅⢆⢇⣀⣁⣂⣃⣄⣅⣆⣇⢈⢉⢊⢋⢌⢍⢎⢏⣈⣉⣊⣋⣌⣍⣎⣏⢐⢑⢒⢓⢔⢕⢖⢗⣐⣑⣒⣓⣔⣕"
+ "⣖⣗⢘⢙⢚⢛⢜⢝⢞⢟⣘⣙⣚⣛⣜⣝⣞⣟⢠⢡⢢⢣⢤⢥⢦⢧⣠⣡⣢⣣⣤⣥⣦⣧⢨⢩⢪⢫⢬⢭⢮⢯⣨⣩⣪⣫⣬⣭⣮⣯⢰⢱⢲⢳⢴⢵⢶⢷"
+ "⣰⣱⣲⣳⣴⣵⣶⣷⢸⢹⢺⢻⢼⢽⢾⢿⣸⣹⣺⣻⣼⣽⣾⣿",
+ },
+ "line": {"interval": 130, "frames": ["-", "\\", "|", "/"]},
+ "line2": {"interval": 100, "frames": "⠂-–—–-"},
+ "pipe": {"interval": 100, "frames": "┤┘┴└├┌┬┐"},
+ "simpleDots": {"interval": 400, "frames": [". ", ".. ", "...", " "]},
+ "simpleDotsScrolling": {
+ "interval": 200,
+ "frames": [". ", ".. ", "...", " ..", " .", " "],
+ },
+ "star": {"interval": 70, "frames": "✶✸✹✺✹✷"},
+ "star2": {"interval": 80, "frames": "+x*"},
+ "flip": {
+ "interval": 70,
+ "frames": "___-``'´-___",
+ },
+ "hamburger": {"interval": 100, "frames": "☱☲☴"},
+ "growVertical": {
+ "interval": 120,
+ "frames": "▁▃▄▅▆▇▆▅▄▃",
+ },
+ "growHorizontal": {
+ "interval": 120,
+ "frames": "▏▎▍▌▋▊▉▊▋▌▍▎",
+ },
+ "balloon": {"interval": 140, "frames": " .oO@* "},
+ "balloon2": {"interval": 120, "frames": ".oO°Oo."},
+ "noise": {"interval": 100, "frames": "▓▒░"},
+ "bounce": {"interval": 120, "frames": "⠁⠂⠄⠂"},
+ "boxBounce": {"interval": 120, "frames": "▖▘▝▗"},
+ "boxBounce2": {"interval": 100, "frames": "▌▀▐▄"},
+ "triangle": {"interval": 50, "frames": "◢◣◤◥"},
+ "arc": {"interval": 100, "frames": "◜◠◝◞◡◟"},
+ "circle": {"interval": 120, "frames": "◡⊙◠"},
+ "squareCorners": {"interval": 180, "frames": "◰◳◲◱"},
+ "circleQuarters": {"interval": 120, "frames": "◴◷◶◵"},
+ "circleHalves": {"interval": 50, "frames": "◐◓◑◒"},
+ "squish": {"interval": 100, "frames": "╫╪"},
+ "toggle": {"interval": 250, "frames": "⊶⊷"},
+ "toggle2": {"interval": 80, "frames": "▫▪"},
+ "toggle3": {"interval": 120, "frames": "□■"},
+ "toggle4": {"interval": 100, "frames": "■□▪▫"},
+ "toggle5": {"interval": 100, "frames": "▮▯"},
+ "toggle6": {"interval": 300, "frames": "ဝ၀"},
+ "toggle7": {"interval": 80, "frames": "⦾⦿"},
+ "toggle8": {"interval": 100, "frames": "◍◌"},
+ "toggle9": {"interval": 100, "frames": "◉◎"},
+ "toggle10": {"interval": 100, "frames": "㊂㊀㊁"},
+ "toggle11": {"interval": 50, "frames": "⧇⧆"},
+ "toggle12": {"interval": 120, "frames": "☗☖"},
+ "toggle13": {"interval": 80, "frames": "=*-"},
+ "arrow": {"interval": 100, "frames": "←↖↑↗→↘↓↙"},
+ "arrow2": {
+ "interval": 80,
+ "frames": ["⬆️ ", "↗️ ", "➡️ ", "↘️ ", "⬇️ ", "↙️ ", "⬅️ ", "↖️ "],
+ },
+ "arrow3": {
+ "interval": 120,
+ "frames": ["▹▹▹▹▹", "▸▹▹▹▹", "▹▸▹▹▹", "▹▹▸▹▹", "▹▹▹▸▹", "▹▹▹▹▸"],
+ },
+ "bouncingBar": {
+ "interval": 80,
+ "frames": [
+ "[ ]",
+ "[= ]",
+ "[== ]",
+ "[=== ]",
+ "[ ===]",
+ "[ ==]",
+ "[ =]",
+ "[ ]",
+ "[ =]",
+ "[ ==]",
+ "[ ===]",
+ "[====]",
+ "[=== ]",
+ "[== ]",
+ "[= ]",
+ ],
+ },
+ "bouncingBall": {
+ "interval": 80,
+ "frames": [
+ "( ● )",
+ "( ● )",
+ "( ● )",
+ "( ● )",
+ "( ●)",
+ "( ● )",
+ "( ● )",
+ "( ● )",
+ "( ● )",
+ "(● )",
+ ],
+ },
+ "smiley": {"interval": 200, "frames": ["😄 ", "😝 "]},
+ "monkey": {"interval": 300, "frames": ["🙈 ", "🙈 ", "🙉 ", "🙊 "]},
+ "hearts": {"interval": 100, "frames": ["💛 ", "💙 ", "💜 ", "💚 ", "❤️ "]},
+ "clock": {
+ "interval": 100,
+ "frames": [
+ "🕛 ",
+ "🕐 ",
+ "🕑 ",
+ "🕒 ",
+ "🕓 ",
+ "🕔 ",
+ "🕕 ",
+ "🕖 ",
+ "🕗 ",
+ "🕘 ",
+ "🕙 ",
+ "🕚 ",
+ ],
+ },
+ "earth": {"interval": 180, "frames": ["🌍 ", "🌎 ", "🌏 "]},
+ "material": {
+ "interval": 17,
+ "frames": [
+ "█▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
+ "██▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
+ "███▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
+ "████▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
+ "██████▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
+ "██████▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
+ "███████▁▁▁▁▁▁▁▁▁▁▁▁▁",
+ "████████▁▁▁▁▁▁▁▁▁▁▁▁",
+ "█████████▁▁▁▁▁▁▁▁▁▁▁",
+ "█████████▁▁▁▁▁▁▁▁▁▁▁",
+ "██████████▁▁▁▁▁▁▁▁▁▁",
+ "███████████▁▁▁▁▁▁▁▁▁",
+ "█████████████▁▁▁▁▁▁▁",
+ "██████████████▁▁▁▁▁▁",
+ "██████████████▁▁▁▁▁▁",
+ "▁██████████████▁▁▁▁▁",
+ "▁██████████████▁▁▁▁▁",
+ "▁██████████████▁▁▁▁▁",
+ "▁▁██████████████▁▁▁▁",
+ "▁▁▁██████████████▁▁▁",
+ "▁▁▁▁█████████████▁▁▁",
+ "▁▁▁▁██████████████▁▁",
+ "▁▁▁▁██████████████▁▁",
+ "▁▁▁▁▁██████████████▁",
+ "▁▁▁▁▁██████████████▁",
+ "▁▁▁▁▁██████████████▁",
+ "▁▁▁▁▁▁██████████████",
+ "▁▁▁▁▁▁██████████████",
+ "▁▁▁▁▁▁▁█████████████",
+ "▁▁▁▁▁▁▁█████████████",
+ "▁▁▁▁▁▁▁▁████████████",
+ "▁▁▁▁▁▁▁▁████████████",
+ "▁▁▁▁▁▁▁▁▁███████████",
+ "▁▁▁▁▁▁▁▁▁███████████",
+ "▁▁▁▁▁▁▁▁▁▁██████████",
+ "▁▁▁▁▁▁▁▁▁▁██████████",
+ "▁▁▁▁▁▁▁▁▁▁▁▁████████",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁███████",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁▁██████",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█████",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█████",
+ "█▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████",
+ "██▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁███",
+ "██▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁███",
+ "███▁▁▁▁▁▁▁▁▁▁▁▁▁▁███",
+ "████▁▁▁▁▁▁▁▁▁▁▁▁▁▁██",
+ "█████▁▁▁▁▁▁▁▁▁▁▁▁▁▁█",
+ "█████▁▁▁▁▁▁▁▁▁▁▁▁▁▁█",
+ "██████▁▁▁▁▁▁▁▁▁▁▁▁▁█",
+ "████████▁▁▁▁▁▁▁▁▁▁▁▁",
+ "█████████▁▁▁▁▁▁▁▁▁▁▁",
+ "█████████▁▁▁▁▁▁▁▁▁▁▁",
+ "█████████▁▁▁▁▁▁▁▁▁▁▁",
+ "█████████▁▁▁▁▁▁▁▁▁▁▁",
+ "███████████▁▁▁▁▁▁▁▁▁",
+ "████████████▁▁▁▁▁▁▁▁",
+ "████████████▁▁▁▁▁▁▁▁",
+ "██████████████▁▁▁▁▁▁",
+ "██████████████▁▁▁▁▁▁",
+ "▁██████████████▁▁▁▁▁",
+ "▁██████████████▁▁▁▁▁",
+ "▁▁▁█████████████▁▁▁▁",
+ "▁▁▁▁▁████████████▁▁▁",
+ "▁▁▁▁▁████████████▁▁▁",
+ "▁▁▁▁▁▁███████████▁▁▁",
+ "▁▁▁▁▁▁▁▁█████████▁▁▁",
+ "▁▁▁▁▁▁▁▁█████████▁▁▁",
+ "▁▁▁▁▁▁▁▁▁█████████▁▁",
+ "▁▁▁▁▁▁▁▁▁█████████▁▁",
+ "▁▁▁▁▁▁▁▁▁▁█████████▁",
+ "▁▁▁▁▁▁▁▁▁▁▁████████▁",
+ "▁▁▁▁▁▁▁▁▁▁▁████████▁",
+ "▁▁▁▁▁▁▁▁▁▁▁▁███████▁",
+ "▁▁▁▁▁▁▁▁▁▁▁▁███████▁",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁███████",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁███████",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█████",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁████",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁███",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁███",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁██",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁██",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁██",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁█",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
+ "▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁▁",
+ ],
+ },
+ "moon": {
+ "interval": 80,
+ "frames": ["🌑 ", "🌒 ", "🌓 ", "🌔 ", "🌕 ", "🌖 ", "🌗 ", "🌘 "],
+ },
+ "runner": {"interval": 140, "frames": ["🚶 ", "🏃 "]},
+ "pong": {
+ "interval": 80,
+ "frames": [
+ "▐⠂ ▌",
+ "▐⠈ ▌",
+ "▐ ⠂ ▌",
+ "▐ ⠠ ▌",
+ "▐ ⡀ ▌",
+ "▐ ⠠ ▌",
+ "▐ ⠂ ▌",
+ "▐ ⠈ ▌",
+ "▐ ⠂ ▌",
+ "▐ ⠠ ▌",
+ "▐ ⡀ ▌",
+ "▐ ⠠ ▌",
+ "▐ ⠂ ▌",
+ "▐ ⠈ ▌",
+ "▐ ⠂▌",
+ "▐ ⠠▌",
+ "▐ ⡀▌",
+ "▐ ⠠ ▌",
+ "▐ ⠂ ▌",
+ "▐ ⠈ ▌",
+ "▐ ⠂ ▌",
+ "▐ ⠠ ▌",
+ "▐ ⡀ ▌",
+ "▐ ⠠ ▌",
+ "▐ ⠂ ▌",
+ "▐ ⠈ ▌",
+ "▐ ⠂ ▌",
+ "▐ ⠠ ▌",
+ "▐ ⡀ ▌",
+ "▐⠠ ▌",
+ ],
+ },
+ "shark": {
+ "interval": 120,
+ "frames": [
+ "▐|\\____________▌",
+ "▐_|\\___________▌",
+ "▐__|\\__________▌",
+ "▐___|\\_________▌",
+ "▐____|\\________▌",
+ "▐_____|\\_______▌",
+ "▐______|\\______▌",
+ "▐_______|\\_____▌",
+ "▐________|\\____▌",
+ "▐_________|\\___▌",
+ "▐__________|\\__▌",
+ "▐___________|\\_▌",
+ "▐____________|\\▌",
+ "▐____________/|▌",
+ "▐___________/|_▌",
+ "▐__________/|__▌",
+ "▐_________/|___▌",
+ "▐________/|____▌",
+ "▐_______/|_____▌",
+ "▐______/|______▌",
+ "▐_____/|_______▌",
+ "▐____/|________▌",
+ "▐___/|_________▌",
+ "▐__/|__________▌",
+ "▐_/|___________▌",
+ "▐/|____________▌",
+ ],
+ },
+ "dqpb": {"interval": 100, "frames": "dqpb"},
+ "weather": {
+ "interval": 100,
+ "frames": [
+ "☀️ ",
+ "☀️ ",
+ "☀️ ",
+ "🌤 ",
+ "⛅️ ",
+ "🌥 ",
+ "☁️ ",
+ "🌧 ",
+ "🌨 ",
+ "🌧 ",
+ "🌨 ",
+ "🌧 ",
+ "🌨 ",
+ "⛈ ",
+ "🌨 ",
+ "🌧 ",
+ "🌨 ",
+ "☁️ ",
+ "🌥 ",
+ "⛅️ ",
+ "🌤 ",
+ "☀️ ",
+ "☀️ ",
+ ],
+ },
+ "christmas": {"interval": 400, "frames": "🌲🎄"},
+ "grenade": {
+ "interval": 80,
+ "frames": [
+ "، ",
+ "′ ",
+ " ´ ",
+ " ‾ ",
+ " ⸌",
+ " ⸊",
+ " |",
+ " ⁎",
+ " ⁕",
+ " ෴ ",
+ " ⁓",
+ " ",
+ " ",
+ " ",
+ ],
+ },
+ "point": {"interval": 125, "frames": ["∙∙∙", "●∙∙", "∙●∙", "∙∙●", "∙∙∙"]},
+ "layer": {"interval": 150, "frames": "-=≡"},
+ "betaWave": {
+ "interval": 80,
+ "frames": [
+ "ρββββββ",
+ "βρβββββ",
+ "ββρββββ",
+ "βββρβββ",
+ "ββββρββ",
+ "βββββρβ",
+ "ββββββρ",
+ ],
+ },
+ "aesthetic": {
+ "interval": 80,
+ "frames": [
+ "▰▱▱▱▱▱▱",
+ "▰▰▱▱▱▱▱",
+ "▰▰▰▱▱▱▱",
+ "▰▰▰▰▱▱▱",
+ "▰▰▰▰▰▱▱",
+ "▰▰▰▰▰▰▱",
+ "▰▰▰▰▰▰▰",
+ "▰▱▱▱▱▱▱",
+ ],
+ },
+}
diff --git a/third_party/python/pip/pip/_vendor/rich/_stack.py b/third_party/python/pip/pip/_vendor/rich/_stack.py
new file mode 100644
index 0000000000..194564e761
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/_stack.py
@@ -0,0 +1,16 @@
+from typing import List, TypeVar
+
+T = TypeVar("T")
+
+
+class Stack(List[T]):
+ """A small shim over builtin list."""
+
+ @property
+ def top(self) -> T:
+ """Get top of stack."""
+ return self[-1]
+
+ def push(self, item: T) -> None:
+ """Push an item on to the stack (append in stack nomenclature)."""
+ self.append(item)
diff --git a/third_party/python/pip/pip/_vendor/rich/_timer.py b/third_party/python/pip/pip/_vendor/rich/_timer.py
new file mode 100644
index 0000000000..a2ca6be03c
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/_timer.py
@@ -0,0 +1,19 @@
+"""
+Timer context manager, only used in debug.
+
+"""
+
+from time import time
+
+import contextlib
+from typing import Generator
+
+
+@contextlib.contextmanager
+def timer(subject: str = "time") -> Generator[None, None, None]:
+ """print the elapsed time. (only used in debugging)"""
+ start = time()
+ yield
+ elapsed = time() - start
+ elapsed_ms = elapsed * 1000
+ print(f"{subject} elapsed {elapsed_ms:.1f}ms")
diff --git a/third_party/python/pip/pip/_vendor/rich/_win32_console.py b/third_party/python/pip/pip/_vendor/rich/_win32_console.py
new file mode 100644
index 0000000000..81b1082905
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/_win32_console.py
@@ -0,0 +1,662 @@
+"""Light wrapper around the Win32 Console API - this module should only be imported on Windows
+
+The API that this module wraps is documented at https://docs.microsoft.com/en-us/windows/console/console-functions
+"""
+import ctypes
+import sys
+from typing import Any
+
+windll: Any = None
+if sys.platform == "win32":
+ windll = ctypes.LibraryLoader(ctypes.WinDLL)
+else:
+ raise ImportError(f"{__name__} can only be imported on Windows")
+
+import time
+from ctypes import Structure, byref, wintypes
+from typing import IO, NamedTuple, Type, cast
+
+from pip._vendor.rich.color import ColorSystem
+from pip._vendor.rich.style import Style
+
+STDOUT = -11
+ENABLE_VIRTUAL_TERMINAL_PROCESSING = 4
+
+COORD = wintypes._COORD
+
+
+class LegacyWindowsError(Exception):
+ pass
+
+
+class WindowsCoordinates(NamedTuple):
+ """Coordinates in the Windows Console API are (y, x), not (x, y).
+ This class is intended to prevent that confusion.
+ Rows and columns are indexed from 0.
+ This class can be used in place of wintypes._COORD in arguments and argtypes.
+ """
+
+ row: int
+ col: int
+
+ @classmethod
+ def from_param(cls, value: "WindowsCoordinates") -> COORD:
+ """Converts a WindowsCoordinates into a wintypes _COORD structure.
+ This classmethod is internally called by ctypes to perform the conversion.
+
+ Args:
+ value (WindowsCoordinates): The input coordinates to convert.
+
+ Returns:
+ wintypes._COORD: The converted coordinates struct.
+ """
+ return COORD(value.col, value.row)
+
+
+class CONSOLE_SCREEN_BUFFER_INFO(Structure):
+ _fields_ = [
+ ("dwSize", COORD),
+ ("dwCursorPosition", COORD),
+ ("wAttributes", wintypes.WORD),
+ ("srWindow", wintypes.SMALL_RECT),
+ ("dwMaximumWindowSize", COORD),
+ ]
+
+
+class CONSOLE_CURSOR_INFO(ctypes.Structure):
+ _fields_ = [("dwSize", wintypes.DWORD), ("bVisible", wintypes.BOOL)]
+
+
+_GetStdHandle = windll.kernel32.GetStdHandle
+_GetStdHandle.argtypes = [
+ wintypes.DWORD,
+]
+_GetStdHandle.restype = wintypes.HANDLE
+
+
+def GetStdHandle(handle: int = STDOUT) -> wintypes.HANDLE:
+ """Retrieves a handle to the specified standard device (standard input, standard output, or standard error).
+
+ Args:
+ handle (int): Integer identifier for the handle. Defaults to -11 (stdout).
+
+ Returns:
+ wintypes.HANDLE: The handle
+ """
+ return cast(wintypes.HANDLE, _GetStdHandle(handle))
+
+
+_GetConsoleMode = windll.kernel32.GetConsoleMode
+_GetConsoleMode.argtypes = [wintypes.HANDLE, wintypes.LPDWORD]
+_GetConsoleMode.restype = wintypes.BOOL
+
+
+def GetConsoleMode(std_handle: wintypes.HANDLE) -> int:
+ """Retrieves the current input mode of a console's input buffer
+ or the current output mode of a console screen buffer.
+
+ Args:
+ std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
+
+ Raises:
+ LegacyWindowsError: If any error occurs while calling the Windows console API.
+
+ Returns:
+ int: Value representing the current console mode as documented at
+ https://docs.microsoft.com/en-us/windows/console/getconsolemode#parameters
+ """
+
+ console_mode = wintypes.DWORD()
+ success = bool(_GetConsoleMode(std_handle, console_mode))
+ if not success:
+ raise LegacyWindowsError("Unable to get legacy Windows Console Mode")
+ return console_mode.value
+
+
+_FillConsoleOutputCharacterW = windll.kernel32.FillConsoleOutputCharacterW
+_FillConsoleOutputCharacterW.argtypes = [
+ wintypes.HANDLE,
+ ctypes.c_char,
+ wintypes.DWORD,
+ cast(Type[COORD], WindowsCoordinates),
+ ctypes.POINTER(wintypes.DWORD),
+]
+_FillConsoleOutputCharacterW.restype = wintypes.BOOL
+
+
+def FillConsoleOutputCharacter(
+ std_handle: wintypes.HANDLE,
+ char: str,
+ length: int,
+ start: WindowsCoordinates,
+) -> int:
+ """Writes a character to the console screen buffer a specified number of times, beginning at the specified coordinates.
+
+ Args:
+ std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
+ char (str): The character to write. Must be a string of length 1.
+ length (int): The number of times to write the character.
+ start (WindowsCoordinates): The coordinates to start writing at.
+
+ Returns:
+ int: The number of characters written.
+ """
+ character = ctypes.c_char(char.encode())
+ num_characters = wintypes.DWORD(length)
+ num_written = wintypes.DWORD(0)
+ _FillConsoleOutputCharacterW(
+ std_handle,
+ character,
+ num_characters,
+ start,
+ byref(num_written),
+ )
+ return num_written.value
+
+
+_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
+_FillConsoleOutputAttribute.argtypes = [
+ wintypes.HANDLE,
+ wintypes.WORD,
+ wintypes.DWORD,
+ cast(Type[COORD], WindowsCoordinates),
+ ctypes.POINTER(wintypes.DWORD),
+]
+_FillConsoleOutputAttribute.restype = wintypes.BOOL
+
+
+def FillConsoleOutputAttribute(
+ std_handle: wintypes.HANDLE,
+ attributes: int,
+ length: int,
+ start: WindowsCoordinates,
+) -> int:
+ """Sets the character attributes for a specified number of character cells,
+ beginning at the specified coordinates in a screen buffer.
+
+ Args:
+ std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
+ attributes (int): Integer value representing the foreground and background colours of the cells.
+ length (int): The number of cells to set the output attribute of.
+ start (WindowsCoordinates): The coordinates of the first cell whose attributes are to be set.
+
+ Returns:
+ int: The number of cells whose attributes were actually set.
+ """
+ num_cells = wintypes.DWORD(length)
+ style_attrs = wintypes.WORD(attributes)
+ num_written = wintypes.DWORD(0)
+ _FillConsoleOutputAttribute(
+ std_handle, style_attrs, num_cells, start, byref(num_written)
+ )
+ return num_written.value
+
+
+_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
+_SetConsoleTextAttribute.argtypes = [
+ wintypes.HANDLE,
+ wintypes.WORD,
+]
+_SetConsoleTextAttribute.restype = wintypes.BOOL
+
+
+def SetConsoleTextAttribute(
+ std_handle: wintypes.HANDLE, attributes: wintypes.WORD
+) -> bool:
+ """Set the colour attributes for all text written after this function is called.
+
+ Args:
+ std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
+ attributes (int): Integer value representing the foreground and background colours.
+
+
+ Returns:
+ bool: True if the attribute was set successfully, otherwise False.
+ """
+ return bool(_SetConsoleTextAttribute(std_handle, attributes))
+
+
+_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
+_GetConsoleScreenBufferInfo.argtypes = [
+ wintypes.HANDLE,
+ ctypes.POINTER(CONSOLE_SCREEN_BUFFER_INFO),
+]
+_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
+
+
+def GetConsoleScreenBufferInfo(
+ std_handle: wintypes.HANDLE,
+) -> CONSOLE_SCREEN_BUFFER_INFO:
+ """Retrieves information about the specified console screen buffer.
+
+ Args:
+ std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
+
+ Returns:
+ CONSOLE_SCREEN_BUFFER_INFO: A CONSOLE_SCREEN_BUFFER_INFO ctype struct contain information about
+ screen size, cursor position, colour attributes, and more."""
+ console_screen_buffer_info = CONSOLE_SCREEN_BUFFER_INFO()
+ _GetConsoleScreenBufferInfo(std_handle, byref(console_screen_buffer_info))
+ return console_screen_buffer_info
+
+
+_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
+_SetConsoleCursorPosition.argtypes = [
+ wintypes.HANDLE,
+ cast(Type[COORD], WindowsCoordinates),
+]
+_SetConsoleCursorPosition.restype = wintypes.BOOL
+
+
+def SetConsoleCursorPosition(
+ std_handle: wintypes.HANDLE, coords: WindowsCoordinates
+) -> bool:
+ """Set the position of the cursor in the console screen
+
+ Args:
+ std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
+ coords (WindowsCoordinates): The coordinates to move the cursor to.
+
+ Returns:
+ bool: True if the function succeeds, otherwise False.
+ """
+ return bool(_SetConsoleCursorPosition(std_handle, coords))
+
+
+_GetConsoleCursorInfo = windll.kernel32.GetConsoleCursorInfo
+_GetConsoleCursorInfo.argtypes = [
+ wintypes.HANDLE,
+ ctypes.POINTER(CONSOLE_CURSOR_INFO),
+]
+_GetConsoleCursorInfo.restype = wintypes.BOOL
+
+
+def GetConsoleCursorInfo(
+ std_handle: wintypes.HANDLE, cursor_info: CONSOLE_CURSOR_INFO
+) -> bool:
+ """Get the cursor info - used to get cursor visibility and width
+
+ Args:
+ std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
+ cursor_info (CONSOLE_CURSOR_INFO): CONSOLE_CURSOR_INFO ctype struct that receives information
+ about the console's cursor.
+
+ Returns:
+ bool: True if the function succeeds, otherwise False.
+ """
+ return bool(_GetConsoleCursorInfo(std_handle, byref(cursor_info)))
+
+
+_SetConsoleCursorInfo = windll.kernel32.SetConsoleCursorInfo
+_SetConsoleCursorInfo.argtypes = [
+ wintypes.HANDLE,
+ ctypes.POINTER(CONSOLE_CURSOR_INFO),
+]
+_SetConsoleCursorInfo.restype = wintypes.BOOL
+
+
+def SetConsoleCursorInfo(
+ std_handle: wintypes.HANDLE, cursor_info: CONSOLE_CURSOR_INFO
+) -> bool:
+ """Set the cursor info - used for adjusting cursor visibility and width
+
+ Args:
+ std_handle (wintypes.HANDLE): A handle to the console input buffer or the console screen buffer.
+ cursor_info (CONSOLE_CURSOR_INFO): CONSOLE_CURSOR_INFO ctype struct containing the new cursor info.
+
+ Returns:
+ bool: True if the function succeeds, otherwise False.
+ """
+ return bool(_SetConsoleCursorInfo(std_handle, byref(cursor_info)))
+
+
+_SetConsoleTitle = windll.kernel32.SetConsoleTitleW
+_SetConsoleTitle.argtypes = [wintypes.LPCWSTR]
+_SetConsoleTitle.restype = wintypes.BOOL
+
+
+def SetConsoleTitle(title: str) -> bool:
+ """Sets the title of the current console window
+
+ Args:
+ title (str): The new title of the console window.
+
+ Returns:
+ bool: True if the function succeeds, otherwise False.
+ """
+ return bool(_SetConsoleTitle(title))
+
+
+class LegacyWindowsTerm:
+ """This class allows interaction with the legacy Windows Console API. It should only be used in the context
+ of environments where virtual terminal processing is not available. However, if it is used in a Windows environment,
+ the entire API should work.
+
+ Args:
+ file (IO[str]): The file which the Windows Console API HANDLE is retrieved from, defaults to sys.stdout.
+ """
+
+ BRIGHT_BIT = 8
+
+ # Indices are ANSI color numbers, values are the corresponding Windows Console API color numbers
+ ANSI_TO_WINDOWS = [
+ 0, # black The Windows colours are defined in wincon.h as follows:
+ 4, # red define FOREGROUND_BLUE 0x0001 -- 0000 0001
+ 2, # green define FOREGROUND_GREEN 0x0002 -- 0000 0010
+ 6, # yellow define FOREGROUND_RED 0x0004 -- 0000 0100
+ 1, # blue define FOREGROUND_INTENSITY 0x0008 -- 0000 1000
+ 5, # magenta define BACKGROUND_BLUE 0x0010 -- 0001 0000
+ 3, # cyan define BACKGROUND_GREEN 0x0020 -- 0010 0000
+ 7, # white define BACKGROUND_RED 0x0040 -- 0100 0000
+ 8, # bright black (grey) define BACKGROUND_INTENSITY 0x0080 -- 1000 0000
+ 12, # bright red
+ 10, # bright green
+ 14, # bright yellow
+ 9, # bright blue
+ 13, # bright magenta
+ 11, # bright cyan
+ 15, # bright white
+ ]
+
+ def __init__(self, file: "IO[str]") -> None:
+ handle = GetStdHandle(STDOUT)
+ self._handle = handle
+ default_text = GetConsoleScreenBufferInfo(handle).wAttributes
+ self._default_text = default_text
+
+ self._default_fore = default_text & 7
+ self._default_back = (default_text >> 4) & 7
+ self._default_attrs = self._default_fore | (self._default_back << 4)
+
+ self._file = file
+ self.write = file.write
+ self.flush = file.flush
+
+ @property
+ def cursor_position(self) -> WindowsCoordinates:
+ """Returns the current position of the cursor (0-based)
+
+ Returns:
+ WindowsCoordinates: The current cursor position.
+ """
+ coord: COORD = GetConsoleScreenBufferInfo(self._handle).dwCursorPosition
+ return WindowsCoordinates(row=cast(int, coord.Y), col=cast(int, coord.X))
+
+ @property
+ def screen_size(self) -> WindowsCoordinates:
+ """Returns the current size of the console screen buffer, in character columns and rows
+
+ Returns:
+ WindowsCoordinates: The width and height of the screen as WindowsCoordinates.
+ """
+ screen_size: COORD = GetConsoleScreenBufferInfo(self._handle).dwSize
+ return WindowsCoordinates(
+ row=cast(int, screen_size.Y), col=cast(int, screen_size.X)
+ )
+
+ def write_text(self, text: str) -> None:
+ """Write text directly to the terminal without any modification of styles
+
+ Args:
+ text (str): The text to write to the console
+ """
+ self.write(text)
+ self.flush()
+
+ def write_styled(self, text: str, style: Style) -> None:
+ """Write styled text to the terminal.
+
+ Args:
+ text (str): The text to write
+ style (Style): The style of the text
+ """
+ color = style.color
+ bgcolor = style.bgcolor
+ if style.reverse:
+ color, bgcolor = bgcolor, color
+
+ if color:
+ fore = color.downgrade(ColorSystem.WINDOWS).number
+ fore = fore if fore is not None else 7 # Default to ANSI 7: White
+ if style.bold:
+ fore = fore | self.BRIGHT_BIT
+ if style.dim:
+ fore = fore & ~self.BRIGHT_BIT
+ fore = self.ANSI_TO_WINDOWS[fore]
+ else:
+ fore = self._default_fore
+
+ if bgcolor:
+ back = bgcolor.downgrade(ColorSystem.WINDOWS).number
+ back = back if back is not None else 0 # Default to ANSI 0: Black
+ back = self.ANSI_TO_WINDOWS[back]
+ else:
+ back = self._default_back
+
+ assert fore is not None
+ assert back is not None
+
+ SetConsoleTextAttribute(
+ self._handle, attributes=ctypes.c_ushort(fore | (back << 4))
+ )
+ self.write_text(text)
+ SetConsoleTextAttribute(self._handle, attributes=self._default_text)
+
+ def move_cursor_to(self, new_position: WindowsCoordinates) -> None:
+ """Set the position of the cursor
+
+ Args:
+ new_position (WindowsCoordinates): The WindowsCoordinates representing the new position of the cursor.
+ """
+ if new_position.col < 0 or new_position.row < 0:
+ return
+ SetConsoleCursorPosition(self._handle, coords=new_position)
+
+ def erase_line(self) -> None:
+ """Erase all content on the line the cursor is currently located at"""
+ screen_size = self.screen_size
+ cursor_position = self.cursor_position
+ cells_to_erase = screen_size.col
+ start_coordinates = WindowsCoordinates(row=cursor_position.row, col=0)
+ FillConsoleOutputCharacter(
+ self._handle, " ", length=cells_to_erase, start=start_coordinates
+ )
+ FillConsoleOutputAttribute(
+ self._handle,
+ self._default_attrs,
+ length=cells_to_erase,
+ start=start_coordinates,
+ )
+
+ def erase_end_of_line(self) -> None:
+ """Erase all content from the cursor position to the end of that line"""
+ cursor_position = self.cursor_position
+ cells_to_erase = self.screen_size.col - cursor_position.col
+ FillConsoleOutputCharacter(
+ self._handle, " ", length=cells_to_erase, start=cursor_position
+ )
+ FillConsoleOutputAttribute(
+ self._handle,
+ self._default_attrs,
+ length=cells_to_erase,
+ start=cursor_position,
+ )
+
+ def erase_start_of_line(self) -> None:
+ """Erase all content from the cursor position to the start of that line"""
+ row, col = self.cursor_position
+ start = WindowsCoordinates(row, 0)
+ FillConsoleOutputCharacter(self._handle, " ", length=col, start=start)
+ FillConsoleOutputAttribute(
+ self._handle, self._default_attrs, length=col, start=start
+ )
+
+ def move_cursor_up(self) -> None:
+ """Move the cursor up a single cell"""
+ cursor_position = self.cursor_position
+ SetConsoleCursorPosition(
+ self._handle,
+ coords=WindowsCoordinates(
+ row=cursor_position.row - 1, col=cursor_position.col
+ ),
+ )
+
+ def move_cursor_down(self) -> None:
+ """Move the cursor down a single cell"""
+ cursor_position = self.cursor_position
+ SetConsoleCursorPosition(
+ self._handle,
+ coords=WindowsCoordinates(
+ row=cursor_position.row + 1,
+ col=cursor_position.col,
+ ),
+ )
+
+ def move_cursor_forward(self) -> None:
+ """Move the cursor forward a single cell. Wrap to the next line if required."""
+ row, col = self.cursor_position
+ if col == self.screen_size.col - 1:
+ row += 1
+ col = 0
+ else:
+ col += 1
+ SetConsoleCursorPosition(
+ self._handle, coords=WindowsCoordinates(row=row, col=col)
+ )
+
+ def move_cursor_to_column(self, column: int) -> None:
+ """Move cursor to the column specified by the zero-based column index, staying on the same row
+
+ Args:
+ column (int): The zero-based column index to move the cursor to.
+ """
+ row, _ = self.cursor_position
+ SetConsoleCursorPosition(self._handle, coords=WindowsCoordinates(row, column))
+
+ def move_cursor_backward(self) -> None:
+ """Move the cursor backward a single cell. Wrap to the previous line if required."""
+ row, col = self.cursor_position
+ if col == 0:
+ row -= 1
+ col = self.screen_size.col - 1
+ else:
+ col -= 1
+ SetConsoleCursorPosition(
+ self._handle, coords=WindowsCoordinates(row=row, col=col)
+ )
+
+ def hide_cursor(self) -> None:
+ """Hide the cursor"""
+ current_cursor_size = self._get_cursor_size()
+ invisible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=0)
+ SetConsoleCursorInfo(self._handle, cursor_info=invisible_cursor)
+
+ def show_cursor(self) -> None:
+ """Show the cursor"""
+ current_cursor_size = self._get_cursor_size()
+ visible_cursor = CONSOLE_CURSOR_INFO(dwSize=current_cursor_size, bVisible=1)
+ SetConsoleCursorInfo(self._handle, cursor_info=visible_cursor)
+
+ def set_title(self, title: str) -> None:
+ """Set the title of the terminal window
+
+ Args:
+ title (str): The new title of the console window
+ """
+ assert len(title) < 255, "Console title must be less than 255 characters"
+ SetConsoleTitle(title)
+
+ def _get_cursor_size(self) -> int:
+ """Get the percentage of the character cell that is filled by the cursor"""
+ cursor_info = CONSOLE_CURSOR_INFO()
+ GetConsoleCursorInfo(self._handle, cursor_info=cursor_info)
+ return int(cursor_info.dwSize)
+
+
+if __name__ == "__main__":
+ handle = GetStdHandle()
+
+ from pip._vendor.rich.console import Console
+
+ console = Console()
+
+ term = LegacyWindowsTerm(sys.stdout)
+ term.set_title("Win32 Console Examples")
+
+ style = Style(color="black", bgcolor="red")
+
+ heading = Style.parse("black on green")
+
+ # Check colour output
+ console.rule("Checking colour output")
+ console.print("[on red]on red!")
+ console.print("[blue]blue!")
+ console.print("[yellow]yellow!")
+ console.print("[bold yellow]bold yellow!")
+ console.print("[bright_yellow]bright_yellow!")
+ console.print("[dim bright_yellow]dim bright_yellow!")
+ console.print("[italic cyan]italic cyan!")
+ console.print("[bold white on blue]bold white on blue!")
+ console.print("[reverse bold white on blue]reverse bold white on blue!")
+ console.print("[bold black on cyan]bold black on cyan!")
+ console.print("[black on green]black on green!")
+ console.print("[blue on green]blue on green!")
+ console.print("[white on black]white on black!")
+ console.print("[black on white]black on white!")
+ console.print("[#1BB152 on #DA812D]#1BB152 on #DA812D!")
+
+ # Check cursor movement
+ console.rule("Checking cursor movement")
+ console.print()
+ term.move_cursor_backward()
+ term.move_cursor_backward()
+ term.write_text("went back and wrapped to prev line")
+ time.sleep(1)
+ term.move_cursor_up()
+ term.write_text("we go up")
+ time.sleep(1)
+ term.move_cursor_down()
+ term.write_text("and down")
+ time.sleep(1)
+ term.move_cursor_up()
+ term.move_cursor_backward()
+ term.move_cursor_backward()
+ term.write_text("we went up and back 2")
+ time.sleep(1)
+ term.move_cursor_down()
+ term.move_cursor_backward()
+ term.move_cursor_backward()
+ term.write_text("we went down and back 2")
+ time.sleep(1)
+
+ # Check erasing of lines
+ term.hide_cursor()
+ console.print()
+ console.rule("Checking line erasing")
+ console.print("\n...Deleting to the start of the line...")
+ term.write_text("The red arrow shows the cursor location, and direction of erase")
+ time.sleep(1)
+ term.move_cursor_to_column(16)
+ term.write_styled("<", Style.parse("black on red"))
+ term.move_cursor_backward()
+ time.sleep(1)
+ term.erase_start_of_line()
+ time.sleep(1)
+
+ console.print("\n\n...And to the end of the line...")
+ term.write_text("The red arrow shows the cursor location, and direction of erase")
+ time.sleep(1)
+
+ term.move_cursor_to_column(16)
+ term.write_styled(">", Style.parse("black on red"))
+ time.sleep(1)
+ term.erase_end_of_line()
+ time.sleep(1)
+
+ console.print("\n\n...Now the whole line will be erased...")
+ term.write_styled("I'm going to disappear!", style=Style.parse("black on cyan"))
+ time.sleep(1)
+ term.erase_line()
+
+ term.show_cursor()
+ print("\n")
diff --git a/third_party/python/pip/pip/_vendor/rich/_windows.py b/third_party/python/pip/pip/_vendor/rich/_windows.py
new file mode 100644
index 0000000000..10fc0d7e9f
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/_windows.py
@@ -0,0 +1,72 @@
+import sys
+from dataclasses import dataclass
+
+
+@dataclass
+class WindowsConsoleFeatures:
+ """Windows features available."""
+
+ vt: bool = False
+ """The console supports VT codes."""
+ truecolor: bool = False
+ """The console supports truecolor."""
+
+
+try:
+ import ctypes
+ from ctypes import LibraryLoader
+
+ if sys.platform == "win32":
+ windll = LibraryLoader(ctypes.WinDLL)
+ else:
+ windll = None
+ raise ImportError("Not windows")
+
+ from pip._vendor.rich._win32_console import (
+ ENABLE_VIRTUAL_TERMINAL_PROCESSING,
+ GetConsoleMode,
+ GetStdHandle,
+ LegacyWindowsError,
+ )
+
+except (AttributeError, ImportError, ValueError):
+
+ # Fallback if we can't load the Windows DLL
+ def get_windows_console_features() -> WindowsConsoleFeatures:
+ features = WindowsConsoleFeatures()
+ return features
+
+else:
+
+ def get_windows_console_features() -> WindowsConsoleFeatures:
+ """Get windows console features.
+
+ Returns:
+ WindowsConsoleFeatures: An instance of WindowsConsoleFeatures.
+ """
+ handle = GetStdHandle()
+ try:
+ console_mode = GetConsoleMode(handle)
+ success = True
+ except LegacyWindowsError:
+ console_mode = 0
+ success = False
+ vt = bool(success and console_mode & ENABLE_VIRTUAL_TERMINAL_PROCESSING)
+ truecolor = False
+ if vt:
+ win_version = sys.getwindowsversion()
+ truecolor = win_version.major > 10 or (
+ win_version.major == 10 and win_version.build >= 15063
+ )
+ features = WindowsConsoleFeatures(vt=vt, truecolor=truecolor)
+ return features
+
+
+if __name__ == "__main__":
+ import platform
+
+ features = get_windows_console_features()
+ from pip._vendor.rich import print
+
+ print(f'platform="{platform.system()}"')
+ print(repr(features))
diff --git a/third_party/python/pip/pip/_vendor/rich/_windows_renderer.py b/third_party/python/pip/pip/_vendor/rich/_windows_renderer.py
new file mode 100644
index 0000000000..5ece05649e
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/_windows_renderer.py
@@ -0,0 +1,56 @@
+from typing import Iterable, Sequence, Tuple, cast
+
+from pip._vendor.rich._win32_console import LegacyWindowsTerm, WindowsCoordinates
+from pip._vendor.rich.segment import ControlCode, ControlType, Segment
+
+
+def legacy_windows_render(buffer: Iterable[Segment], term: LegacyWindowsTerm) -> None:
+ """Makes appropriate Windows Console API calls based on the segments in the buffer.
+
+ Args:
+ buffer (Iterable[Segment]): Iterable of Segments to convert to Win32 API calls.
+ term (LegacyWindowsTerm): Used to call the Windows Console API.
+ """
+ for text, style, control in buffer:
+ if not control:
+ if style:
+ term.write_styled(text, style)
+ else:
+ term.write_text(text)
+ else:
+ control_codes: Sequence[ControlCode] = control
+ for control_code in control_codes:
+ control_type = control_code[0]
+ if control_type == ControlType.CURSOR_MOVE_TO:
+ _, x, y = cast(Tuple[ControlType, int, int], control_code)
+ term.move_cursor_to(WindowsCoordinates(row=y - 1, col=x - 1))
+ elif control_type == ControlType.CARRIAGE_RETURN:
+ term.write_text("\r")
+ elif control_type == ControlType.HOME:
+ term.move_cursor_to(WindowsCoordinates(0, 0))
+ elif control_type == ControlType.CURSOR_UP:
+ term.move_cursor_up()
+ elif control_type == ControlType.CURSOR_DOWN:
+ term.move_cursor_down()
+ elif control_type == ControlType.CURSOR_FORWARD:
+ term.move_cursor_forward()
+ elif control_type == ControlType.CURSOR_BACKWARD:
+ term.move_cursor_backward()
+ elif control_type == ControlType.CURSOR_MOVE_TO_COLUMN:
+ _, column = cast(Tuple[ControlType, int], control_code)
+ term.move_cursor_to_column(column - 1)
+ elif control_type == ControlType.HIDE_CURSOR:
+ term.hide_cursor()
+ elif control_type == ControlType.SHOW_CURSOR:
+ term.show_cursor()
+ elif control_type == ControlType.ERASE_IN_LINE:
+ _, mode = cast(Tuple[ControlType, int], control_code)
+ if mode == 0:
+ term.erase_end_of_line()
+ elif mode == 1:
+ term.erase_start_of_line()
+ elif mode == 2:
+ term.erase_line()
+ elif control_type == ControlType.SET_WINDOW_TITLE:
+ _, title = cast(Tuple[ControlType, str], control_code)
+ term.set_title(title)
diff --git a/third_party/python/pip/pip/_vendor/rich/_wrap.py b/third_party/python/pip/pip/_vendor/rich/_wrap.py
new file mode 100644
index 0000000000..c45f193f74
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/_wrap.py
@@ -0,0 +1,56 @@
+import re
+from typing import Iterable, List, Tuple
+
+from ._loop import loop_last
+from .cells import cell_len, chop_cells
+
+re_word = re.compile(r"\s*\S+\s*")
+
+
+def words(text: str) -> Iterable[Tuple[int, int, str]]:
+ position = 0
+ word_match = re_word.match(text, position)
+ while word_match is not None:
+ start, end = word_match.span()
+ word = word_match.group(0)
+ yield start, end, word
+ word_match = re_word.match(text, end)
+
+
+def divide_line(text: str, width: int, fold: bool = True) -> List[int]:
+ divides: List[int] = []
+ append = divides.append
+ line_position = 0
+ _cell_len = cell_len
+ for start, _end, word in words(text):
+ word_length = _cell_len(word.rstrip())
+ if line_position + word_length > width:
+ if word_length > width:
+ if fold:
+ chopped_words = chop_cells(word, max_size=width, position=0)
+ for last, line in loop_last(chopped_words):
+ if start:
+ append(start)
+
+ if last:
+ line_position = _cell_len(line)
+ else:
+ start += len(line)
+ else:
+ if start:
+ append(start)
+ line_position = _cell_len(word)
+ elif line_position and start:
+ append(start)
+ line_position = _cell_len(word)
+ else:
+ line_position += _cell_len(word)
+ return divides
+
+
+if __name__ == "__main__": # pragma: no cover
+ from .console import Console
+
+ console = Console(width=10)
+ console.print("12345 abcdefghijklmnopqrstuvwyxzABCDEFGHIJKLMNOPQRSTUVWXYZ 12345")
+ print(chop_cells("abcdefghijklmnopqrstuvwxyz", 10, position=2))
diff --git a/third_party/python/pip/pip/_vendor/rich/abc.py b/third_party/python/pip/pip/_vendor/rich/abc.py
new file mode 100644
index 0000000000..e6e498efab
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/abc.py
@@ -0,0 +1,33 @@
+from abc import ABC
+
+
+class RichRenderable(ABC):
+ """An abstract base class for Rich renderables.
+
+ Note that there is no need to extend this class, the intended use is to check if an
+ object supports the Rich renderable protocol. For example::
+
+ if isinstance(my_object, RichRenderable):
+ console.print(my_object)
+
+ """
+
+ @classmethod
+ def __subclasshook__(cls, other: type) -> bool:
+ """Check if this class supports the rich render protocol."""
+ return hasattr(other, "__rich_console__") or hasattr(other, "__rich__")
+
+
+if __name__ == "__main__": # pragma: no cover
+ from pip._vendor.rich.text import Text
+
+ t = Text()
+ print(isinstance(Text, RichRenderable))
+ print(isinstance(t, RichRenderable))
+
+ class Foo:
+ pass
+
+ f = Foo()
+ print(isinstance(f, RichRenderable))
+ print(isinstance("", RichRenderable))
diff --git a/third_party/python/pip/pip/_vendor/rich/align.py b/third_party/python/pip/pip/_vendor/rich/align.py
new file mode 100644
index 0000000000..d5abb59473
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/align.py
@@ -0,0 +1,311 @@
+import sys
+from itertools import chain
+from typing import TYPE_CHECKING, Iterable, Optional
+
+if sys.version_info >= (3, 8):
+ from typing import Literal
+else:
+ from pip._vendor.typing_extensions import Literal # pragma: no cover
+
+from .constrain import Constrain
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .segment import Segment
+from .style import StyleType
+
+if TYPE_CHECKING:
+ from .console import Console, ConsoleOptions, RenderableType, RenderResult
+
+AlignMethod = Literal["left", "center", "right"]
+VerticalAlignMethod = Literal["top", "middle", "bottom"]
+
+
+class Align(JupyterMixin):
+ """Align a renderable by adding spaces if necessary.
+
+ Args:
+ renderable (RenderableType): A console renderable.
+ align (AlignMethod): One of "left", "center", or "right""
+ style (StyleType, optional): An optional style to apply to the background.
+ vertical (Optional[VerticalAlginMethod], optional): Optional vertical align, one of "top", "middle", or "bottom". Defaults to None.
+ pad (bool, optional): Pad the right with spaces. Defaults to True.
+ width (int, optional): Restrict contents to given width, or None to use default width. Defaults to None.
+ height (int, optional): Set height of align renderable, or None to fit to contents. Defaults to None.
+
+ Raises:
+ ValueError: if ``align`` is not one of the expected values.
+ """
+
+ def __init__(
+ self,
+ renderable: "RenderableType",
+ align: AlignMethod = "left",
+ style: Optional[StyleType] = None,
+ *,
+ vertical: Optional[VerticalAlignMethod] = None,
+ pad: bool = True,
+ width: Optional[int] = None,
+ height: Optional[int] = None,
+ ) -> None:
+ if align not in ("left", "center", "right"):
+ raise ValueError(
+ f'invalid value for align, expected "left", "center", or "right" (not {align!r})'
+ )
+ if vertical is not None and vertical not in ("top", "middle", "bottom"):
+ raise ValueError(
+ f'invalid value for vertical, expected "top", "middle", or "bottom" (not {vertical!r})'
+ )
+ self.renderable = renderable
+ self.align = align
+ self.style = style
+ self.vertical = vertical
+ self.pad = pad
+ self.width = width
+ self.height = height
+
+ def __repr__(self) -> str:
+ return f"Align({self.renderable!r}, {self.align!r})"
+
+ @classmethod
+ def left(
+ cls,
+ renderable: "RenderableType",
+ style: Optional[StyleType] = None,
+ *,
+ vertical: Optional[VerticalAlignMethod] = None,
+ pad: bool = True,
+ width: Optional[int] = None,
+ height: Optional[int] = None,
+ ) -> "Align":
+ """Align a renderable to the left."""
+ return cls(
+ renderable,
+ "left",
+ style=style,
+ vertical=vertical,
+ pad=pad,
+ width=width,
+ height=height,
+ )
+
+ @classmethod
+ def center(
+ cls,
+ renderable: "RenderableType",
+ style: Optional[StyleType] = None,
+ *,
+ vertical: Optional[VerticalAlignMethod] = None,
+ pad: bool = True,
+ width: Optional[int] = None,
+ height: Optional[int] = None,
+ ) -> "Align":
+ """Align a renderable to the center."""
+ return cls(
+ renderable,
+ "center",
+ style=style,
+ vertical=vertical,
+ pad=pad,
+ width=width,
+ height=height,
+ )
+
+ @classmethod
+ def right(
+ cls,
+ renderable: "RenderableType",
+ style: Optional[StyleType] = None,
+ *,
+ vertical: Optional[VerticalAlignMethod] = None,
+ pad: bool = True,
+ width: Optional[int] = None,
+ height: Optional[int] = None,
+ ) -> "Align":
+ """Align a renderable to the right."""
+ return cls(
+ renderable,
+ "right",
+ style=style,
+ vertical=vertical,
+ pad=pad,
+ width=width,
+ height=height,
+ )
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ align = self.align
+ width = console.measure(self.renderable, options=options).maximum
+ rendered = console.render(
+ Constrain(
+ self.renderable, width if self.width is None else min(width, self.width)
+ ),
+ options.update(height=None),
+ )
+ lines = list(Segment.split_lines(rendered))
+ width, height = Segment.get_shape(lines)
+ lines = Segment.set_shape(lines, width, height)
+ new_line = Segment.line()
+ excess_space = options.max_width - width
+ style = console.get_style(self.style) if self.style is not None else None
+
+ def generate_segments() -> Iterable[Segment]:
+ if excess_space <= 0:
+ # Exact fit
+ for line in lines:
+ yield from line
+ yield new_line
+
+ elif align == "left":
+ # Pad on the right
+ pad = Segment(" " * excess_space, style) if self.pad else None
+ for line in lines:
+ yield from line
+ if pad:
+ yield pad
+ yield new_line
+
+ elif align == "center":
+ # Pad left and right
+ left = excess_space // 2
+ pad = Segment(" " * left, style)
+ pad_right = (
+ Segment(" " * (excess_space - left), style) if self.pad else None
+ )
+ for line in lines:
+ if left:
+ yield pad
+ yield from line
+ if pad_right:
+ yield pad_right
+ yield new_line
+
+ elif align == "right":
+ # Padding on left
+ pad = Segment(" " * excess_space, style)
+ for line in lines:
+ yield pad
+ yield from line
+ yield new_line
+
+ blank_line = (
+ Segment(f"{' ' * (self.width or options.max_width)}\n", style)
+ if self.pad
+ else Segment("\n")
+ )
+
+ def blank_lines(count: int) -> Iterable[Segment]:
+ if count > 0:
+ for _ in range(count):
+ yield blank_line
+
+ vertical_height = self.height or options.height
+ iter_segments: Iterable[Segment]
+ if self.vertical and vertical_height is not None:
+ if self.vertical == "top":
+ bottom_space = vertical_height - height
+ iter_segments = chain(generate_segments(), blank_lines(bottom_space))
+ elif self.vertical == "middle":
+ top_space = (vertical_height - height) // 2
+ bottom_space = vertical_height - top_space - height
+ iter_segments = chain(
+ blank_lines(top_space),
+ generate_segments(),
+ blank_lines(bottom_space),
+ )
+ else: # self.vertical == "bottom":
+ top_space = vertical_height - height
+ iter_segments = chain(blank_lines(top_space), generate_segments())
+ else:
+ iter_segments = generate_segments()
+ if self.style:
+ style = console.get_style(self.style)
+ iter_segments = Segment.apply_style(iter_segments, style)
+ yield from iter_segments
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> Measurement:
+ measurement = Measurement.get(console, options, self.renderable)
+ return measurement
+
+
+class VerticalCenter(JupyterMixin):
+ """Vertically aligns a renderable.
+
+ Warn:
+ This class is deprecated and may be removed in a future version. Use Align class with
+ `vertical="middle"`.
+
+ Args:
+ renderable (RenderableType): A renderable object.
+ """
+
+ def __init__(
+ self,
+ renderable: "RenderableType",
+ style: Optional[StyleType] = None,
+ ) -> None:
+ self.renderable = renderable
+ self.style = style
+
+ def __repr__(self) -> str:
+ return f"VerticalCenter({self.renderable!r})"
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ style = console.get_style(self.style) if self.style is not None else None
+ lines = console.render_lines(
+ self.renderable, options.update(height=None), pad=False
+ )
+ width, _height = Segment.get_shape(lines)
+ new_line = Segment.line()
+ height = options.height or options.size.height
+ top_space = (height - len(lines)) // 2
+ bottom_space = height - top_space - len(lines)
+ blank_line = Segment(f"{' ' * width}", style)
+
+ def blank_lines(count: int) -> Iterable[Segment]:
+ for _ in range(count):
+ yield blank_line
+ yield new_line
+
+ if top_space > 0:
+ yield from blank_lines(top_space)
+ for line in lines:
+ yield from line
+ yield new_line
+ if bottom_space > 0:
+ yield from blank_lines(bottom_space)
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> Measurement:
+ measurement = Measurement.get(console, options, self.renderable)
+ return measurement
+
+
+if __name__ == "__main__": # pragma: no cover
+ from pip._vendor.rich.console import Console, Group
+ from pip._vendor.rich.highlighter import ReprHighlighter
+ from pip._vendor.rich.panel import Panel
+
+ highlighter = ReprHighlighter()
+ console = Console()
+
+ panel = Panel(
+ Group(
+ Align.left(highlighter("align='left'")),
+ Align.center(highlighter("align='center'")),
+ Align.right(highlighter("align='right'")),
+ ),
+ width=60,
+ style="on dark_blue",
+ title="Algin",
+ )
+
+ console.print(
+ Align.center(panel, vertical="middle", style="on red", height=console.height)
+ )
diff --git a/third_party/python/pip/pip/_vendor/rich/ansi.py b/third_party/python/pip/pip/_vendor/rich/ansi.py
new file mode 100644
index 0000000000..92ef519411
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/ansi.py
@@ -0,0 +1,237 @@
+import re
+import sys
+from contextlib import suppress
+from typing import Iterable, NamedTuple, Optional
+
+from .color import Color
+from .style import Style
+from .text import Text
+
+re_ansi = re.compile(
+ r"""
+(?:\x1b\](.*?)\x1b\\)|
+(?:\x1b([(@-Z\\-_]|\[[0-?]*[ -/]*[@-~]))
+""",
+ re.VERBOSE,
+)
+
+
+class _AnsiToken(NamedTuple):
+ """Result of ansi tokenized string."""
+
+ plain: str = ""
+ sgr: Optional[str] = ""
+ osc: Optional[str] = ""
+
+
+def _ansi_tokenize(ansi_text: str) -> Iterable[_AnsiToken]:
+ """Tokenize a string in to plain text and ANSI codes.
+
+ Args:
+ ansi_text (str): A String containing ANSI codes.
+
+ Yields:
+ AnsiToken: A named tuple of (plain, sgr, osc)
+ """
+
+ position = 0
+ sgr: Optional[str]
+ osc: Optional[str]
+ for match in re_ansi.finditer(ansi_text):
+ start, end = match.span(0)
+ osc, sgr = match.groups()
+ if start > position:
+ yield _AnsiToken(ansi_text[position:start])
+ if sgr:
+ if sgr.endswith("m"):
+ yield _AnsiToken("", sgr[1:-1], osc)
+ else:
+ yield _AnsiToken("", sgr, osc)
+ position = end
+ if position < len(ansi_text):
+ yield _AnsiToken(ansi_text[position:])
+
+
+SGR_STYLE_MAP = {
+ 1: "bold",
+ 2: "dim",
+ 3: "italic",
+ 4: "underline",
+ 5: "blink",
+ 6: "blink2",
+ 7: "reverse",
+ 8: "conceal",
+ 9: "strike",
+ 21: "underline2",
+ 22: "not dim not bold",
+ 23: "not italic",
+ 24: "not underline",
+ 25: "not blink",
+ 26: "not blink2",
+ 27: "not reverse",
+ 28: "not conceal",
+ 29: "not strike",
+ 30: "color(0)",
+ 31: "color(1)",
+ 32: "color(2)",
+ 33: "color(3)",
+ 34: "color(4)",
+ 35: "color(5)",
+ 36: "color(6)",
+ 37: "color(7)",
+ 39: "default",
+ 40: "on color(0)",
+ 41: "on color(1)",
+ 42: "on color(2)",
+ 43: "on color(3)",
+ 44: "on color(4)",
+ 45: "on color(5)",
+ 46: "on color(6)",
+ 47: "on color(7)",
+ 49: "on default",
+ 51: "frame",
+ 52: "encircle",
+ 53: "overline",
+ 54: "not frame not encircle",
+ 55: "not overline",
+ 90: "color(8)",
+ 91: "color(9)",
+ 92: "color(10)",
+ 93: "color(11)",
+ 94: "color(12)",
+ 95: "color(13)",
+ 96: "color(14)",
+ 97: "color(15)",
+ 100: "on color(8)",
+ 101: "on color(9)",
+ 102: "on color(10)",
+ 103: "on color(11)",
+ 104: "on color(12)",
+ 105: "on color(13)",
+ 106: "on color(14)",
+ 107: "on color(15)",
+}
+
+
+class AnsiDecoder:
+ """Translate ANSI code in to styled Text."""
+
+ def __init__(self) -> None:
+ self.style = Style.null()
+
+ def decode(self, terminal_text: str) -> Iterable[Text]:
+ """Decode ANSI codes in an iterable of lines.
+
+ Args:
+ lines (Iterable[str]): An iterable of lines of terminal output.
+
+ Yields:
+ Text: Marked up Text.
+ """
+ for line in terminal_text.splitlines():
+ yield self.decode_line(line)
+
+ def decode_line(self, line: str) -> Text:
+ """Decode a line containing ansi codes.
+
+ Args:
+ line (str): A line of terminal output.
+
+ Returns:
+ Text: A Text instance marked up according to ansi codes.
+ """
+ from_ansi = Color.from_ansi
+ from_rgb = Color.from_rgb
+ _Style = Style
+ text = Text()
+ append = text.append
+ line = line.rsplit("\r", 1)[-1]
+ for plain_text, sgr, osc in _ansi_tokenize(line):
+ if plain_text:
+ append(plain_text, self.style or None)
+ elif osc is not None:
+ if osc.startswith("8;"):
+ _params, semicolon, link = osc[2:].partition(";")
+ if semicolon:
+ self.style = self.style.update_link(link or None)
+ elif sgr is not None:
+ # Translate in to semi-colon separated codes
+ # Ignore invalid codes, because we want to be lenient
+ codes = [
+ min(255, int(_code) if _code else 0)
+ for _code in sgr.split(";")
+ if _code.isdigit() or _code == ""
+ ]
+ iter_codes = iter(codes)
+ for code in iter_codes:
+ if code == 0:
+ # reset
+ self.style = _Style.null()
+ elif code in SGR_STYLE_MAP:
+ # styles
+ self.style += _Style.parse(SGR_STYLE_MAP[code])
+ elif code == 38:
+ #  Foreground
+ with suppress(StopIteration):
+ color_type = next(iter_codes)
+ if color_type == 5:
+ self.style += _Style.from_color(
+ from_ansi(next(iter_codes))
+ )
+ elif color_type == 2:
+ self.style += _Style.from_color(
+ from_rgb(
+ next(iter_codes),
+ next(iter_codes),
+ next(iter_codes),
+ )
+ )
+ elif code == 48:
+ # Background
+ with suppress(StopIteration):
+ color_type = next(iter_codes)
+ if color_type == 5:
+ self.style += _Style.from_color(
+ None, from_ansi(next(iter_codes))
+ )
+ elif color_type == 2:
+ self.style += _Style.from_color(
+ None,
+ from_rgb(
+ next(iter_codes),
+ next(iter_codes),
+ next(iter_codes),
+ ),
+ )
+
+ return text
+
+
+if sys.platform != "win32" and __name__ == "__main__": # pragma: no cover
+ import io
+ import os
+ import pty
+ import sys
+
+ decoder = AnsiDecoder()
+
+ stdout = io.BytesIO()
+
+ def read(fd: int) -> bytes:
+ data = os.read(fd, 1024)
+ stdout.write(data)
+ return data
+
+ pty.spawn(sys.argv[1:], read)
+
+ from .console import Console
+
+ console = Console(record=True)
+
+ stdout_result = stdout.getvalue().decode("utf-8")
+ print(stdout_result)
+
+ for line in decoder.decode(stdout_result):
+ console.print(line)
+
+ console.save_html("stdout.html")
diff --git a/third_party/python/pip/pip/_vendor/rich/bar.py b/third_party/python/pip/pip/_vendor/rich/bar.py
new file mode 100644
index 0000000000..ed86a552d1
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/bar.py
@@ -0,0 +1,94 @@
+from typing import Optional, Union
+
+from .color import Color
+from .console import Console, ConsoleOptions, RenderResult
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .segment import Segment
+from .style import Style
+
+# There are left-aligned characters for 1/8 to 7/8, but
+# the right-aligned characters exist only for 1/8 and 4/8.
+BEGIN_BLOCK_ELEMENTS = ["█", "█", "█", "▐", "▐", "▐", "▕", "▕"]
+END_BLOCK_ELEMENTS = [" ", "▏", "▎", "▍", "▌", "▋", "▊", "▉"]
+FULL_BLOCK = "█"
+
+
+class Bar(JupyterMixin):
+ """Renders a solid block bar.
+
+ Args:
+ size (float): Value for the end of the bar.
+ begin (float): Begin point (between 0 and size, inclusive).
+ end (float): End point (between 0 and size, inclusive).
+ width (int, optional): Width of the bar, or ``None`` for maximum width. Defaults to None.
+ color (Union[Color, str], optional): Color of the bar. Defaults to "default".
+ bgcolor (Union[Color, str], optional): Color of bar background. Defaults to "default".
+ """
+
+ def __init__(
+ self,
+ size: float,
+ begin: float,
+ end: float,
+ *,
+ width: Optional[int] = None,
+ color: Union[Color, str] = "default",
+ bgcolor: Union[Color, str] = "default",
+ ):
+ self.size = size
+ self.begin = max(begin, 0)
+ self.end = min(end, size)
+ self.width = width
+ self.style = Style(color=color, bgcolor=bgcolor)
+
+ def __repr__(self) -> str:
+ return f"Bar({self.size}, {self.begin}, {self.end})"
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+
+ width = min(
+ self.width if self.width is not None else options.max_width,
+ options.max_width,
+ )
+
+ if self.begin >= self.end:
+ yield Segment(" " * width, self.style)
+ yield Segment.line()
+ return
+
+ prefix_complete_eights = int(width * 8 * self.begin / self.size)
+ prefix_bar_count = prefix_complete_eights // 8
+ prefix_eights_count = prefix_complete_eights % 8
+
+ body_complete_eights = int(width * 8 * self.end / self.size)
+ body_bar_count = body_complete_eights // 8
+ body_eights_count = body_complete_eights % 8
+
+ # When start and end fall into the same cell, we ideally should render
+ # a symbol that's "center-aligned", but there is no good symbol in Unicode.
+ # In this case, we fall back to right-aligned block symbol for simplicity.
+
+ prefix = " " * prefix_bar_count
+ if prefix_eights_count:
+ prefix += BEGIN_BLOCK_ELEMENTS[prefix_eights_count]
+
+ body = FULL_BLOCK * body_bar_count
+ if body_eights_count:
+ body += END_BLOCK_ELEMENTS[body_eights_count]
+
+ suffix = " " * (width - len(body))
+
+ yield Segment(prefix + body[len(prefix) :] + suffix, self.style)
+ yield Segment.line()
+
+ def __rich_measure__(
+ self, console: Console, options: ConsoleOptions
+ ) -> Measurement:
+ return (
+ Measurement(self.width, self.width)
+ if self.width is not None
+ else Measurement(4, options.max_width)
+ )
diff --git a/third_party/python/pip/pip/_vendor/rich/box.py b/third_party/python/pip/pip/_vendor/rich/box.py
new file mode 100644
index 0000000000..97d2a94445
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/box.py
@@ -0,0 +1,517 @@
+import sys
+from typing import TYPE_CHECKING, Iterable, List
+
+if sys.version_info >= (3, 8):
+ from typing import Literal
+else:
+ from pip._vendor.typing_extensions import Literal # pragma: no cover
+
+
+from ._loop import loop_last
+
+if TYPE_CHECKING:
+ from pip._vendor.rich.console import ConsoleOptions
+
+
+class Box:
+ """Defines characters to render boxes.
+
+ ┌─┬┐ top
+ │ ││ head
+ ├─┼┤ head_row
+ │ ││ mid
+ ├─┼┤ row
+ ├─┼┤ foot_row
+ │ ││ foot
+ └─┴┘ bottom
+
+ Args:
+ box (str): Characters making up box.
+ ascii (bool, optional): True if this box uses ascii characters only. Default is False.
+ """
+
+ def __init__(self, box: str, *, ascii: bool = False) -> None:
+ self._box = box
+ self.ascii = ascii
+ line1, line2, line3, line4, line5, line6, line7, line8 = box.splitlines()
+ # top
+ self.top_left, self.top, self.top_divider, self.top_right = iter(line1)
+ # head
+ self.head_left, _, self.head_vertical, self.head_right = iter(line2)
+ # head_row
+ (
+ self.head_row_left,
+ self.head_row_horizontal,
+ self.head_row_cross,
+ self.head_row_right,
+ ) = iter(line3)
+
+ # mid
+ self.mid_left, _, self.mid_vertical, self.mid_right = iter(line4)
+ # row
+ self.row_left, self.row_horizontal, self.row_cross, self.row_right = iter(line5)
+ # foot_row
+ (
+ self.foot_row_left,
+ self.foot_row_horizontal,
+ self.foot_row_cross,
+ self.foot_row_right,
+ ) = iter(line6)
+ # foot
+ self.foot_left, _, self.foot_vertical, self.foot_right = iter(line7)
+ # bottom
+ self.bottom_left, self.bottom, self.bottom_divider, self.bottom_right = iter(
+ line8
+ )
+
+ def __repr__(self) -> str:
+ return "Box(...)"
+
+ def __str__(self) -> str:
+ return self._box
+
+ def substitute(self, options: "ConsoleOptions", safe: bool = True) -> "Box":
+ """Substitute this box for another if it won't render due to platform issues.
+
+ Args:
+ options (ConsoleOptions): Console options used in rendering.
+ safe (bool, optional): Substitute this for another Box if there are known problems
+ displaying on the platform (currently only relevant on Windows). Default is True.
+
+ Returns:
+ Box: A different Box or the same Box.
+ """
+ box = self
+ if options.legacy_windows and safe:
+ box = LEGACY_WINDOWS_SUBSTITUTIONS.get(box, box)
+ if options.ascii_only and not box.ascii:
+ box = ASCII
+ return box
+
+ def get_plain_headed_box(self) -> "Box":
+ """If this box uses special characters for the borders of the header, then
+ return the equivalent box that does not.
+
+ Returns:
+ Box: The most similar Box that doesn't use header-specific box characters.
+ If the current Box already satisfies this criterion, then it's returned.
+ """
+ return PLAIN_HEADED_SUBSTITUTIONS.get(self, self)
+
+ def get_top(self, widths: Iterable[int]) -> str:
+ """Get the top of a simple box.
+
+ Args:
+ widths (List[int]): Widths of columns.
+
+ Returns:
+ str: A string of box characters.
+ """
+
+ parts: List[str] = []
+ append = parts.append
+ append(self.top_left)
+ for last, width in loop_last(widths):
+ append(self.top * width)
+ if not last:
+ append(self.top_divider)
+ append(self.top_right)
+ return "".join(parts)
+
+ def get_row(
+ self,
+ widths: Iterable[int],
+ level: Literal["head", "row", "foot", "mid"] = "row",
+ edge: bool = True,
+ ) -> str:
+ """Get the top of a simple box.
+
+ Args:
+ width (List[int]): Widths of columns.
+
+ Returns:
+ str: A string of box characters.
+ """
+ if level == "head":
+ left = self.head_row_left
+ horizontal = self.head_row_horizontal
+ cross = self.head_row_cross
+ right = self.head_row_right
+ elif level == "row":
+ left = self.row_left
+ horizontal = self.row_horizontal
+ cross = self.row_cross
+ right = self.row_right
+ elif level == "mid":
+ left = self.mid_left
+ horizontal = " "
+ cross = self.mid_vertical
+ right = self.mid_right
+ elif level == "foot":
+ left = self.foot_row_left
+ horizontal = self.foot_row_horizontal
+ cross = self.foot_row_cross
+ right = self.foot_row_right
+ else:
+ raise ValueError("level must be 'head', 'row' or 'foot'")
+
+ parts: List[str] = []
+ append = parts.append
+ if edge:
+ append(left)
+ for last, width in loop_last(widths):
+ append(horizontal * width)
+ if not last:
+ append(cross)
+ if edge:
+ append(right)
+ return "".join(parts)
+
+ def get_bottom(self, widths: Iterable[int]) -> str:
+ """Get the bottom of a simple box.
+
+ Args:
+ widths (List[int]): Widths of columns.
+
+ Returns:
+ str: A string of box characters.
+ """
+
+ parts: List[str] = []
+ append = parts.append
+ append(self.bottom_left)
+ for last, width in loop_last(widths):
+ append(self.bottom * width)
+ if not last:
+ append(self.bottom_divider)
+ append(self.bottom_right)
+ return "".join(parts)
+
+
+ASCII: Box = Box(
+ """\
++--+
+| ||
+|-+|
+| ||
+|-+|
+|-+|
+| ||
++--+
+""",
+ ascii=True,
+)
+
+ASCII2: Box = Box(
+ """\
++-++
+| ||
++-++
+| ||
++-++
++-++
+| ||
++-++
+""",
+ ascii=True,
+)
+
+ASCII_DOUBLE_HEAD: Box = Box(
+ """\
++-++
+| ||
++=++
+| ||
++-++
++-++
+| ||
++-++
+""",
+ ascii=True,
+)
+
+SQUARE: Box = Box(
+ """\
+┌─┬┐
+│ ││
+├─┼┤
+│ ││
+├─┼┤
+├─┼┤
+│ ││
+└─┴┘
+"""
+)
+
+SQUARE_DOUBLE_HEAD: Box = Box(
+ """\
+┌─┬┐
+│ ││
+╞═╪╡
+│ ││
+├─┼┤
+├─┼┤
+│ ││
+└─┴┘
+"""
+)
+
+MINIMAL: Box = Box(
+ """\
+ ╷
+ │
+╶─┼╴
+ │
+╶─┼╴
+╶─┼╴
+ │
+ ╵
+"""
+)
+
+
+MINIMAL_HEAVY_HEAD: Box = Box(
+ """\
+ ╷
+ │
+╺━┿╸
+ │
+╶─┼╴
+╶─┼╴
+ │
+ ╵
+"""
+)
+
+MINIMAL_DOUBLE_HEAD: Box = Box(
+ """\
+ ╷
+ │
+ ═╪
+ │
+ ─┼
+ ─┼
+ │
+ ╵
+"""
+)
+
+
+SIMPLE: Box = Box(
+ """\
+
+
+ ──
+
+
+ ──
+
+
+"""
+)
+
+SIMPLE_HEAD: Box = Box(
+ """\
+
+
+ ──
+
+
+
+
+
+"""
+)
+
+
+SIMPLE_HEAVY: Box = Box(
+ """\
+
+
+ ━━
+
+
+ ━━
+
+
+"""
+)
+
+
+HORIZONTALS: Box = Box(
+ """\
+ ──
+
+ ──
+
+ ──
+ ──
+
+ ──
+"""
+)
+
+ROUNDED: Box = Box(
+ """\
+╭─┬╮
+│ ││
+├─┼┤
+│ ││
+├─┼┤
+├─┼┤
+│ ││
+╰─┴╯
+"""
+)
+
+HEAVY: Box = Box(
+ """\
+┏━┳┓
+┃ ┃┃
+┣━╋┫
+┃ ┃┃
+┣━╋┫
+┣━╋┫
+┃ ┃┃
+┗━┻┛
+"""
+)
+
+HEAVY_EDGE: Box = Box(
+ """\
+┏━┯┓
+┃ │┃
+┠─┼┨
+┃ │┃
+┠─┼┨
+┠─┼┨
+┃ │┃
+┗━┷┛
+"""
+)
+
+HEAVY_HEAD: Box = Box(
+ """\
+┏━┳┓
+┃ ┃┃
+┡━╇┩
+│ ││
+├─┼┤
+├─┼┤
+│ ││
+└─┴┘
+"""
+)
+
+DOUBLE: Box = Box(
+ """\
+╔═╦╗
+║ ║║
+╠═╬╣
+║ ║║
+╠═╬╣
+╠═╬╣
+║ ║║
+╚═╩╝
+"""
+)
+
+DOUBLE_EDGE: Box = Box(
+ """\
+╔═╤╗
+║ │║
+╟─┼╢
+║ │║
+╟─┼╢
+╟─┼╢
+║ │║
+╚═╧╝
+"""
+)
+
+MARKDOWN: Box = Box(
+ """\
+
+| ||
+|-||
+| ||
+|-||
+|-||
+| ||
+
+""",
+ ascii=True,
+)
+
+# Map Boxes that don't render with raster fonts on to equivalent that do
+LEGACY_WINDOWS_SUBSTITUTIONS = {
+ ROUNDED: SQUARE,
+ MINIMAL_HEAVY_HEAD: MINIMAL,
+ SIMPLE_HEAVY: SIMPLE,
+ HEAVY: SQUARE,
+ HEAVY_EDGE: SQUARE,
+ HEAVY_HEAD: SQUARE,
+}
+
+# Map headed boxes to their headerless equivalents
+PLAIN_HEADED_SUBSTITUTIONS = {
+ HEAVY_HEAD: SQUARE,
+ SQUARE_DOUBLE_HEAD: SQUARE,
+ MINIMAL_DOUBLE_HEAD: MINIMAL,
+ MINIMAL_HEAVY_HEAD: MINIMAL,
+ ASCII_DOUBLE_HEAD: ASCII2,
+}
+
+
+if __name__ == "__main__": # pragma: no cover
+
+ from pip._vendor.rich.columns import Columns
+ from pip._vendor.rich.panel import Panel
+
+ from . import box as box
+ from .console import Console
+ from .table import Table
+ from .text import Text
+
+ console = Console(record=True)
+
+ BOXES = [
+ "ASCII",
+ "ASCII2",
+ "ASCII_DOUBLE_HEAD",
+ "SQUARE",
+ "SQUARE_DOUBLE_HEAD",
+ "MINIMAL",
+ "MINIMAL_HEAVY_HEAD",
+ "MINIMAL_DOUBLE_HEAD",
+ "SIMPLE",
+ "SIMPLE_HEAD",
+ "SIMPLE_HEAVY",
+ "HORIZONTALS",
+ "ROUNDED",
+ "HEAVY",
+ "HEAVY_EDGE",
+ "HEAVY_HEAD",
+ "DOUBLE",
+ "DOUBLE_EDGE",
+ "MARKDOWN",
+ ]
+
+ console.print(Panel("[bold green]Box Constants", style="green"), justify="center")
+ console.print()
+
+ columns = Columns(expand=True, padding=2)
+ for box_name in sorted(BOXES):
+ table = Table(
+ show_footer=True, style="dim", border_style="not dim", expand=True
+ )
+ table.add_column("Header 1", "Footer 1")
+ table.add_column("Header 2", "Footer 2")
+ table.add_row("Cell", "Cell")
+ table.add_row("Cell", "Cell")
+ table.box = getattr(box, box_name)
+ table.title = Text(f"box.{box_name}", style="magenta")
+ columns.add_renderable(table)
+ console.print(columns)
+
+ # console.save_svg("box.svg")
diff --git a/third_party/python/pip/pip/_vendor/rich/cells.py b/third_party/python/pip/pip/_vendor/rich/cells.py
new file mode 100644
index 0000000000..139b949f7f
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/cells.py
@@ -0,0 +1,154 @@
+import re
+from functools import lru_cache
+from typing import Callable, List
+
+from ._cell_widths import CELL_WIDTHS
+
+# Regex to match sequence of the most common character ranges
+_is_single_cell_widths = re.compile("^[\u0020-\u006f\u00a0\u02ff\u0370-\u0482]*$").match
+
+
+@lru_cache(4096)
+def cached_cell_len(text: str) -> int:
+ """Get the number of cells required to display text.
+
+ This method always caches, which may use up a lot of memory. It is recommended to use
+ `cell_len` over this method.
+
+ Args:
+ text (str): Text to display.
+
+ Returns:
+ int: Get the number of cells required to display text.
+ """
+ _get_size = get_character_cell_size
+ total_size = sum(_get_size(character) for character in text)
+ return total_size
+
+
+def cell_len(text: str, _cell_len: Callable[[str], int] = cached_cell_len) -> int:
+ """Get the number of cells required to display text.
+
+ Args:
+ text (str): Text to display.
+
+ Returns:
+ int: Get the number of cells required to display text.
+ """
+ if len(text) < 512:
+ return _cell_len(text)
+ _get_size = get_character_cell_size
+ total_size = sum(_get_size(character) for character in text)
+ return total_size
+
+
+@lru_cache(maxsize=4096)
+def get_character_cell_size(character: str) -> int:
+ """Get the cell size of a character.
+
+ Args:
+ character (str): A single character.
+
+ Returns:
+ int: Number of cells (0, 1 or 2) occupied by that character.
+ """
+ return _get_codepoint_cell_size(ord(character))
+
+
+@lru_cache(maxsize=4096)
+def _get_codepoint_cell_size(codepoint: int) -> int:
+ """Get the cell size of a character.
+
+ Args:
+ character (str): A single character.
+
+ Returns:
+ int: Number of cells (0, 1 or 2) occupied by that character.
+ """
+
+ _table = CELL_WIDTHS
+ lower_bound = 0
+ upper_bound = len(_table) - 1
+ index = (lower_bound + upper_bound) // 2
+ while True:
+ start, end, width = _table[index]
+ if codepoint < start:
+ upper_bound = index - 1
+ elif codepoint > end:
+ lower_bound = index + 1
+ else:
+ return 0 if width == -1 else width
+ if upper_bound < lower_bound:
+ break
+ index = (lower_bound + upper_bound) // 2
+ return 1
+
+
+def set_cell_size(text: str, total: int) -> str:
+ """Set the length of a string to fit within given number of cells."""
+
+ if _is_single_cell_widths(text):
+ size = len(text)
+ if size < total:
+ return text + " " * (total - size)
+ return text[:total]
+
+ if total <= 0:
+ return ""
+ cell_size = cell_len(text)
+ if cell_size == total:
+ return text
+ if cell_size < total:
+ return text + " " * (total - cell_size)
+
+ start = 0
+ end = len(text)
+
+ # Binary search until we find the right size
+ while True:
+ pos = (start + end) // 2
+ before = text[: pos + 1]
+ before_len = cell_len(before)
+ if before_len == total + 1 and cell_len(before[-1]) == 2:
+ return before[:-1] + " "
+ if before_len == total:
+ return before
+ if before_len > total:
+ end = pos
+ else:
+ start = pos
+
+
+# TODO: This is inefficient
+# TODO: This might not work with CWJ type characters
+def chop_cells(text: str, max_size: int, position: int = 0) -> List[str]:
+ """Break text in to equal (cell) length strings, returning the characters in reverse
+ order"""
+ _get_character_cell_size = get_character_cell_size
+ characters = [
+ (character, _get_character_cell_size(character)) for character in text
+ ]
+ total_size = position
+ lines: List[List[str]] = [[]]
+ append = lines[-1].append
+
+ for character, size in reversed(characters):
+ if total_size + size > max_size:
+ lines.append([character])
+ append = lines[-1].append
+ total_size = size
+ else:
+ total_size += size
+ append(character)
+
+ return ["".join(line) for line in lines]
+
+
+if __name__ == "__main__": # pragma: no cover
+
+ print(get_character_cell_size("😽"))
+ for line in chop_cells("""这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑。""", 8):
+ print(line)
+ for n in range(80, 1, -1):
+ print(set_cell_size("""这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑。""", n) + "|")
+ print("x" * n)
diff --git a/third_party/python/pip/pip/_vendor/rich/color.py b/third_party/python/pip/pip/_vendor/rich/color.py
new file mode 100644
index 0000000000..ef2e895d7c
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/color.py
@@ -0,0 +1,618 @@
+import platform
+import re
+from colorsys import rgb_to_hls
+from enum import IntEnum
+from functools import lru_cache
+from typing import TYPE_CHECKING, NamedTuple, Optional, Tuple
+
+from ._palettes import EIGHT_BIT_PALETTE, STANDARD_PALETTE, WINDOWS_PALETTE
+from .color_triplet import ColorTriplet
+from .repr import Result, rich_repr
+from .terminal_theme import DEFAULT_TERMINAL_THEME
+
+if TYPE_CHECKING: # pragma: no cover
+ from .terminal_theme import TerminalTheme
+ from .text import Text
+
+
+WINDOWS = platform.system() == "Windows"
+
+
+class ColorSystem(IntEnum):
+ """One of the 3 color system supported by terminals."""
+
+ STANDARD = 1
+ EIGHT_BIT = 2
+ TRUECOLOR = 3
+ WINDOWS = 4
+
+ def __repr__(self) -> str:
+ return f"ColorSystem.{self.name}"
+
+ def __str__(self) -> str:
+ return repr(self)
+
+
+class ColorType(IntEnum):
+ """Type of color stored in Color class."""
+
+ DEFAULT = 0
+ STANDARD = 1
+ EIGHT_BIT = 2
+ TRUECOLOR = 3
+ WINDOWS = 4
+
+ def __repr__(self) -> str:
+ return f"ColorType.{self.name}"
+
+
+ANSI_COLOR_NAMES = {
+ "black": 0,
+ "red": 1,
+ "green": 2,
+ "yellow": 3,
+ "blue": 4,
+ "magenta": 5,
+ "cyan": 6,
+ "white": 7,
+ "bright_black": 8,
+ "bright_red": 9,
+ "bright_green": 10,
+ "bright_yellow": 11,
+ "bright_blue": 12,
+ "bright_magenta": 13,
+ "bright_cyan": 14,
+ "bright_white": 15,
+ "grey0": 16,
+ "gray0": 16,
+ "navy_blue": 17,
+ "dark_blue": 18,
+ "blue3": 20,
+ "blue1": 21,
+ "dark_green": 22,
+ "deep_sky_blue4": 25,
+ "dodger_blue3": 26,
+ "dodger_blue2": 27,
+ "green4": 28,
+ "spring_green4": 29,
+ "turquoise4": 30,
+ "deep_sky_blue3": 32,
+ "dodger_blue1": 33,
+ "green3": 40,
+ "spring_green3": 41,
+ "dark_cyan": 36,
+ "light_sea_green": 37,
+ "deep_sky_blue2": 38,
+ "deep_sky_blue1": 39,
+ "spring_green2": 47,
+ "cyan3": 43,
+ "dark_turquoise": 44,
+ "turquoise2": 45,
+ "green1": 46,
+ "spring_green1": 48,
+ "medium_spring_green": 49,
+ "cyan2": 50,
+ "cyan1": 51,
+ "dark_red": 88,
+ "deep_pink4": 125,
+ "purple4": 55,
+ "purple3": 56,
+ "blue_violet": 57,
+ "orange4": 94,
+ "grey37": 59,
+ "gray37": 59,
+ "medium_purple4": 60,
+ "slate_blue3": 62,
+ "royal_blue1": 63,
+ "chartreuse4": 64,
+ "dark_sea_green4": 71,
+ "pale_turquoise4": 66,
+ "steel_blue": 67,
+ "steel_blue3": 68,
+ "cornflower_blue": 69,
+ "chartreuse3": 76,
+ "cadet_blue": 73,
+ "sky_blue3": 74,
+ "steel_blue1": 81,
+ "pale_green3": 114,
+ "sea_green3": 78,
+ "aquamarine3": 79,
+ "medium_turquoise": 80,
+ "chartreuse2": 112,
+ "sea_green2": 83,
+ "sea_green1": 85,
+ "aquamarine1": 122,
+ "dark_slate_gray2": 87,
+ "dark_magenta": 91,
+ "dark_violet": 128,
+ "purple": 129,
+ "light_pink4": 95,
+ "plum4": 96,
+ "medium_purple3": 98,
+ "slate_blue1": 99,
+ "yellow4": 106,
+ "wheat4": 101,
+ "grey53": 102,
+ "gray53": 102,
+ "light_slate_grey": 103,
+ "light_slate_gray": 103,
+ "medium_purple": 104,
+ "light_slate_blue": 105,
+ "dark_olive_green3": 149,
+ "dark_sea_green": 108,
+ "light_sky_blue3": 110,
+ "sky_blue2": 111,
+ "dark_sea_green3": 150,
+ "dark_slate_gray3": 116,
+ "sky_blue1": 117,
+ "chartreuse1": 118,
+ "light_green": 120,
+ "pale_green1": 156,
+ "dark_slate_gray1": 123,
+ "red3": 160,
+ "medium_violet_red": 126,
+ "magenta3": 164,
+ "dark_orange3": 166,
+ "indian_red": 167,
+ "hot_pink3": 168,
+ "medium_orchid3": 133,
+ "medium_orchid": 134,
+ "medium_purple2": 140,
+ "dark_goldenrod": 136,
+ "light_salmon3": 173,
+ "rosy_brown": 138,
+ "grey63": 139,
+ "gray63": 139,
+ "medium_purple1": 141,
+ "gold3": 178,
+ "dark_khaki": 143,
+ "navajo_white3": 144,
+ "grey69": 145,
+ "gray69": 145,
+ "light_steel_blue3": 146,
+ "light_steel_blue": 147,
+ "yellow3": 184,
+ "dark_sea_green2": 157,
+ "light_cyan3": 152,
+ "light_sky_blue1": 153,
+ "green_yellow": 154,
+ "dark_olive_green2": 155,
+ "dark_sea_green1": 193,
+ "pale_turquoise1": 159,
+ "deep_pink3": 162,
+ "magenta2": 200,
+ "hot_pink2": 169,
+ "orchid": 170,
+ "medium_orchid1": 207,
+ "orange3": 172,
+ "light_pink3": 174,
+ "pink3": 175,
+ "plum3": 176,
+ "violet": 177,
+ "light_goldenrod3": 179,
+ "tan": 180,
+ "misty_rose3": 181,
+ "thistle3": 182,
+ "plum2": 183,
+ "khaki3": 185,
+ "light_goldenrod2": 222,
+ "light_yellow3": 187,
+ "grey84": 188,
+ "gray84": 188,
+ "light_steel_blue1": 189,
+ "yellow2": 190,
+ "dark_olive_green1": 192,
+ "honeydew2": 194,
+ "light_cyan1": 195,
+ "red1": 196,
+ "deep_pink2": 197,
+ "deep_pink1": 199,
+ "magenta1": 201,
+ "orange_red1": 202,
+ "indian_red1": 204,
+ "hot_pink": 206,
+ "dark_orange": 208,
+ "salmon1": 209,
+ "light_coral": 210,
+ "pale_violet_red1": 211,
+ "orchid2": 212,
+ "orchid1": 213,
+ "orange1": 214,
+ "sandy_brown": 215,
+ "light_salmon1": 216,
+ "light_pink1": 217,
+ "pink1": 218,
+ "plum1": 219,
+ "gold1": 220,
+ "navajo_white1": 223,
+ "misty_rose1": 224,
+ "thistle1": 225,
+ "yellow1": 226,
+ "light_goldenrod1": 227,
+ "khaki1": 228,
+ "wheat1": 229,
+ "cornsilk1": 230,
+ "grey100": 231,
+ "gray100": 231,
+ "grey3": 232,
+ "gray3": 232,
+ "grey7": 233,
+ "gray7": 233,
+ "grey11": 234,
+ "gray11": 234,
+ "grey15": 235,
+ "gray15": 235,
+ "grey19": 236,
+ "gray19": 236,
+ "grey23": 237,
+ "gray23": 237,
+ "grey27": 238,
+ "gray27": 238,
+ "grey30": 239,
+ "gray30": 239,
+ "grey35": 240,
+ "gray35": 240,
+ "grey39": 241,
+ "gray39": 241,
+ "grey42": 242,
+ "gray42": 242,
+ "grey46": 243,
+ "gray46": 243,
+ "grey50": 244,
+ "gray50": 244,
+ "grey54": 245,
+ "gray54": 245,
+ "grey58": 246,
+ "gray58": 246,
+ "grey62": 247,
+ "gray62": 247,
+ "grey66": 248,
+ "gray66": 248,
+ "grey70": 249,
+ "gray70": 249,
+ "grey74": 250,
+ "gray74": 250,
+ "grey78": 251,
+ "gray78": 251,
+ "grey82": 252,
+ "gray82": 252,
+ "grey85": 253,
+ "gray85": 253,
+ "grey89": 254,
+ "gray89": 254,
+ "grey93": 255,
+ "gray93": 255,
+}
+
+
+class ColorParseError(Exception):
+ """The color could not be parsed."""
+
+
+RE_COLOR = re.compile(
+ r"""^
+\#([0-9a-f]{6})$|
+color\(([0-9]{1,3})\)$|
+rgb\(([\d\s,]+)\)$
+""",
+ re.VERBOSE,
+)
+
+
+@rich_repr
+class Color(NamedTuple):
+ """Terminal color definition."""
+
+ name: str
+ """The name of the color (typically the input to Color.parse)."""
+ type: ColorType
+ """The type of the color."""
+ number: Optional[int] = None
+ """The color number, if a standard color, or None."""
+ triplet: Optional[ColorTriplet] = None
+ """A triplet of color components, if an RGB color."""
+
+ def __rich__(self) -> "Text":
+ """Displays the actual color if Rich printed."""
+ from .style import Style
+ from .text import Text
+
+ return Text.assemble(
+ f"<color {self.name!r} ({self.type.name.lower()})",
+ ("⬤", Style(color=self)),
+ " >",
+ )
+
+ def __rich_repr__(self) -> Result:
+ yield self.name
+ yield self.type
+ yield "number", self.number, None
+ yield "triplet", self.triplet, None
+
+ @property
+ def system(self) -> ColorSystem:
+ """Get the native color system for this color."""
+ if self.type == ColorType.DEFAULT:
+ return ColorSystem.STANDARD
+ return ColorSystem(int(self.type))
+
+ @property
+ def is_system_defined(self) -> bool:
+ """Check if the color is ultimately defined by the system."""
+ return self.system not in (ColorSystem.EIGHT_BIT, ColorSystem.TRUECOLOR)
+
+ @property
+ def is_default(self) -> bool:
+ """Check if the color is a default color."""
+ return self.type == ColorType.DEFAULT
+
+ def get_truecolor(
+ self, theme: Optional["TerminalTheme"] = None, foreground: bool = True
+ ) -> ColorTriplet:
+ """Get an equivalent color triplet for this color.
+
+ Args:
+ theme (TerminalTheme, optional): Optional terminal theme, or None to use default. Defaults to None.
+ foreground (bool, optional): True for a foreground color, or False for background. Defaults to True.
+
+ Returns:
+ ColorTriplet: A color triplet containing RGB components.
+ """
+
+ if theme is None:
+ theme = DEFAULT_TERMINAL_THEME
+ if self.type == ColorType.TRUECOLOR:
+ assert self.triplet is not None
+ return self.triplet
+ elif self.type == ColorType.EIGHT_BIT:
+ assert self.number is not None
+ return EIGHT_BIT_PALETTE[self.number]
+ elif self.type == ColorType.STANDARD:
+ assert self.number is not None
+ return theme.ansi_colors[self.number]
+ elif self.type == ColorType.WINDOWS:
+ assert self.number is not None
+ return WINDOWS_PALETTE[self.number]
+ else: # self.type == ColorType.DEFAULT:
+ assert self.number is None
+ return theme.foreground_color if foreground else theme.background_color
+
+ @classmethod
+ def from_ansi(cls, number: int) -> "Color":
+ """Create a Color number from it's 8-bit ansi number.
+
+ Args:
+ number (int): A number between 0-255 inclusive.
+
+ Returns:
+ Color: A new Color instance.
+ """
+ return cls(
+ name=f"color({number})",
+ type=(ColorType.STANDARD if number < 16 else ColorType.EIGHT_BIT),
+ number=number,
+ )
+
+ @classmethod
+ def from_triplet(cls, triplet: "ColorTriplet") -> "Color":
+ """Create a truecolor RGB color from a triplet of values.
+
+ Args:
+ triplet (ColorTriplet): A color triplet containing red, green and blue components.
+
+ Returns:
+ Color: A new color object.
+ """
+ return cls(name=triplet.hex, type=ColorType.TRUECOLOR, triplet=triplet)
+
+ @classmethod
+ def from_rgb(cls, red: float, green: float, blue: float) -> "Color":
+ """Create a truecolor from three color components in the range(0->255).
+
+ Args:
+ red (float): Red component in range 0-255.
+ green (float): Green component in range 0-255.
+ blue (float): Blue component in range 0-255.
+
+ Returns:
+ Color: A new color object.
+ """
+ return cls.from_triplet(ColorTriplet(int(red), int(green), int(blue)))
+
+ @classmethod
+ def default(cls) -> "Color":
+ """Get a Color instance representing the default color.
+
+ Returns:
+ Color: Default color.
+ """
+ return cls(name="default", type=ColorType.DEFAULT)
+
+ @classmethod
+ @lru_cache(maxsize=1024)
+ def parse(cls, color: str) -> "Color":
+ """Parse a color definition."""
+ original_color = color
+ color = color.lower().strip()
+
+ if color == "default":
+ return cls(color, type=ColorType.DEFAULT)
+
+ color_number = ANSI_COLOR_NAMES.get(color)
+ if color_number is not None:
+ return cls(
+ color,
+ type=(ColorType.STANDARD if color_number < 16 else ColorType.EIGHT_BIT),
+ number=color_number,
+ )
+
+ color_match = RE_COLOR.match(color)
+ if color_match is None:
+ raise ColorParseError(f"{original_color!r} is not a valid color")
+
+ color_24, color_8, color_rgb = color_match.groups()
+ if color_24:
+ triplet = ColorTriplet(
+ int(color_24[0:2], 16), int(color_24[2:4], 16), int(color_24[4:6], 16)
+ )
+ return cls(color, ColorType.TRUECOLOR, triplet=triplet)
+
+ elif color_8:
+ number = int(color_8)
+ if number > 255:
+ raise ColorParseError(f"color number must be <= 255 in {color!r}")
+ return cls(
+ color,
+ type=(ColorType.STANDARD if number < 16 else ColorType.EIGHT_BIT),
+ number=number,
+ )
+
+ else: # color_rgb:
+ components = color_rgb.split(",")
+ if len(components) != 3:
+ raise ColorParseError(
+ f"expected three components in {original_color!r}"
+ )
+ red, green, blue = components
+ triplet = ColorTriplet(int(red), int(green), int(blue))
+ if not all(component <= 255 for component in triplet):
+ raise ColorParseError(
+ f"color components must be <= 255 in {original_color!r}"
+ )
+ return cls(color, ColorType.TRUECOLOR, triplet=triplet)
+
+ @lru_cache(maxsize=1024)
+ def get_ansi_codes(self, foreground: bool = True) -> Tuple[str, ...]:
+ """Get the ANSI escape codes for this color."""
+ _type = self.type
+ if _type == ColorType.DEFAULT:
+ return ("39" if foreground else "49",)
+
+ elif _type == ColorType.WINDOWS:
+ number = self.number
+ assert number is not None
+ fore, back = (30, 40) if number < 8 else (82, 92)
+ return (str(fore + number if foreground else back + number),)
+
+ elif _type == ColorType.STANDARD:
+ number = self.number
+ assert number is not None
+ fore, back = (30, 40) if number < 8 else (82, 92)
+ return (str(fore + number if foreground else back + number),)
+
+ elif _type == ColorType.EIGHT_BIT:
+ assert self.number is not None
+ return ("38" if foreground else "48", "5", str(self.number))
+
+ else: # self.standard == ColorStandard.TRUECOLOR:
+ assert self.triplet is not None
+ red, green, blue = self.triplet
+ return ("38" if foreground else "48", "2", str(red), str(green), str(blue))
+
+ @lru_cache(maxsize=1024)
+ def downgrade(self, system: ColorSystem) -> "Color":
+ """Downgrade a color system to a system with fewer colors."""
+
+ if self.type in [ColorType.DEFAULT, system]:
+ return self
+ # Convert to 8-bit color from truecolor color
+ if system == ColorSystem.EIGHT_BIT and self.system == ColorSystem.TRUECOLOR:
+ assert self.triplet is not None
+ red, green, blue = self.triplet.normalized
+ _h, l, s = rgb_to_hls(red, green, blue)
+ # If saturation is under 10% assume it is grayscale
+ if s < 0.1:
+ gray = round(l * 25.0)
+ if gray == 0:
+ color_number = 16
+ elif gray == 25:
+ color_number = 231
+ else:
+ color_number = 231 + gray
+ return Color(self.name, ColorType.EIGHT_BIT, number=color_number)
+
+ color_number = (
+ 16 + 36 * round(red * 5.0) + 6 * round(green * 5.0) + round(blue * 5.0)
+ )
+ return Color(self.name, ColorType.EIGHT_BIT, number=color_number)
+
+ # Convert to standard from truecolor or 8-bit
+ elif system == ColorSystem.STANDARD:
+ if self.system == ColorSystem.TRUECOLOR:
+ assert self.triplet is not None
+ triplet = self.triplet
+ else: # self.system == ColorSystem.EIGHT_BIT
+ assert self.number is not None
+ triplet = ColorTriplet(*EIGHT_BIT_PALETTE[self.number])
+
+ color_number = STANDARD_PALETTE.match(triplet)
+ return Color(self.name, ColorType.STANDARD, number=color_number)
+
+ elif system == ColorSystem.WINDOWS:
+ if self.system == ColorSystem.TRUECOLOR:
+ assert self.triplet is not None
+ triplet = self.triplet
+ else: # self.system == ColorSystem.EIGHT_BIT
+ assert self.number is not None
+ if self.number < 16:
+ return Color(self.name, ColorType.WINDOWS, number=self.number)
+ triplet = ColorTriplet(*EIGHT_BIT_PALETTE[self.number])
+
+ color_number = WINDOWS_PALETTE.match(triplet)
+ return Color(self.name, ColorType.WINDOWS, number=color_number)
+
+ return self
+
+
+def parse_rgb_hex(hex_color: str) -> ColorTriplet:
+ """Parse six hex characters in to RGB triplet."""
+ assert len(hex_color) == 6, "must be 6 characters"
+ color = ColorTriplet(
+ int(hex_color[0:2], 16), int(hex_color[2:4], 16), int(hex_color[4:6], 16)
+ )
+ return color
+
+
+def blend_rgb(
+ color1: ColorTriplet, color2: ColorTriplet, cross_fade: float = 0.5
+) -> ColorTriplet:
+ """Blend one RGB color in to another."""
+ r1, g1, b1 = color1
+ r2, g2, b2 = color2
+ new_color = ColorTriplet(
+ int(r1 + (r2 - r1) * cross_fade),
+ int(g1 + (g2 - g1) * cross_fade),
+ int(b1 + (b2 - b1) * cross_fade),
+ )
+ return new_color
+
+
+if __name__ == "__main__": # pragma: no cover
+
+ from .console import Console
+ from .table import Table
+ from .text import Text
+
+ console = Console()
+
+ table = Table(show_footer=False, show_edge=True)
+ table.add_column("Color", width=10, overflow="ellipsis")
+ table.add_column("Number", justify="right", style="yellow")
+ table.add_column("Name", style="green")
+ table.add_column("Hex", style="blue")
+ table.add_column("RGB", style="magenta")
+
+ colors = sorted((v, k) for k, v in ANSI_COLOR_NAMES.items())
+ for color_number, name in colors:
+ if "grey" in name:
+ continue
+ color_cell = Text(" " * 10, style=f"on {name}")
+ if color_number < 16:
+ table.add_row(color_cell, f"{color_number}", Text(f'"{name}"'))
+ else:
+ color = EIGHT_BIT_PALETTE[color_number] # type: ignore[has-type]
+ table.add_row(
+ color_cell, str(color_number), Text(f'"{name}"'), color.hex, color.rgb
+ )
+
+ console.print(table)
diff --git a/third_party/python/pip/pip/_vendor/rich/color_triplet.py b/third_party/python/pip/pip/_vendor/rich/color_triplet.py
new file mode 100644
index 0000000000..02cab32825
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/color_triplet.py
@@ -0,0 +1,38 @@
+from typing import NamedTuple, Tuple
+
+
+class ColorTriplet(NamedTuple):
+ """The red, green, and blue components of a color."""
+
+ red: int
+ """Red component in 0 to 255 range."""
+ green: int
+ """Green component in 0 to 255 range."""
+ blue: int
+ """Blue component in 0 to 255 range."""
+
+ @property
+ def hex(self) -> str:
+ """get the color triplet in CSS style."""
+ red, green, blue = self
+ return f"#{red:02x}{green:02x}{blue:02x}"
+
+ @property
+ def rgb(self) -> str:
+ """The color in RGB format.
+
+ Returns:
+ str: An rgb color, e.g. ``"rgb(100,23,255)"``.
+ """
+ red, green, blue = self
+ return f"rgb({red},{green},{blue})"
+
+ @property
+ def normalized(self) -> Tuple[float, float, float]:
+ """Convert components into floats between 0 and 1.
+
+ Returns:
+ Tuple[float, float, float]: A tuple of three normalized colour components.
+ """
+ red, green, blue = self
+ return red / 255.0, green / 255.0, blue / 255.0
diff --git a/third_party/python/pip/pip/_vendor/rich/columns.py b/third_party/python/pip/pip/_vendor/rich/columns.py
new file mode 100644
index 0000000000..669a3a7074
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/columns.py
@@ -0,0 +1,187 @@
+from collections import defaultdict
+from itertools import chain
+from operator import itemgetter
+from typing import Dict, Iterable, List, Optional, Tuple
+
+from .align import Align, AlignMethod
+from .console import Console, ConsoleOptions, RenderableType, RenderResult
+from .constrain import Constrain
+from .measure import Measurement
+from .padding import Padding, PaddingDimensions
+from .table import Table
+from .text import TextType
+from .jupyter import JupyterMixin
+
+
+class Columns(JupyterMixin):
+ """Display renderables in neat columns.
+
+ Args:
+ renderables (Iterable[RenderableType]): Any number of Rich renderables (including str).
+ width (int, optional): The desired width of the columns, or None to auto detect. Defaults to None.
+ padding (PaddingDimensions, optional): Optional padding around cells. Defaults to (0, 1).
+ expand (bool, optional): Expand columns to full width. Defaults to False.
+ equal (bool, optional): Arrange in to equal sized columns. Defaults to False.
+ column_first (bool, optional): Align items from top to bottom (rather than left to right). Defaults to False.
+ right_to_left (bool, optional): Start column from right hand side. Defaults to False.
+ align (str, optional): Align value ("left", "right", or "center") or None for default. Defaults to None.
+ title (TextType, optional): Optional title for Columns.
+ """
+
+ def __init__(
+ self,
+ renderables: Optional[Iterable[RenderableType]] = None,
+ padding: PaddingDimensions = (0, 1),
+ *,
+ width: Optional[int] = None,
+ expand: bool = False,
+ equal: bool = False,
+ column_first: bool = False,
+ right_to_left: bool = False,
+ align: Optional[AlignMethod] = None,
+ title: Optional[TextType] = None,
+ ) -> None:
+ self.renderables = list(renderables or [])
+ self.width = width
+ self.padding = padding
+ self.expand = expand
+ self.equal = equal
+ self.column_first = column_first
+ self.right_to_left = right_to_left
+ self.align: Optional[AlignMethod] = align
+ self.title = title
+
+ def add_renderable(self, renderable: RenderableType) -> None:
+ """Add a renderable to the columns.
+
+ Args:
+ renderable (RenderableType): Any renderable object.
+ """
+ self.renderables.append(renderable)
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+ render_str = console.render_str
+ renderables = [
+ render_str(renderable) if isinstance(renderable, str) else renderable
+ for renderable in self.renderables
+ ]
+ if not renderables:
+ return
+ _top, right, _bottom, left = Padding.unpack(self.padding)
+ width_padding = max(left, right)
+ max_width = options.max_width
+ widths: Dict[int, int] = defaultdict(int)
+ column_count = len(renderables)
+
+ get_measurement = Measurement.get
+ renderable_widths = [
+ get_measurement(console, options, renderable).maximum
+ for renderable in renderables
+ ]
+ if self.equal:
+ renderable_widths = [max(renderable_widths)] * len(renderable_widths)
+
+ def iter_renderables(
+ column_count: int,
+ ) -> Iterable[Tuple[int, Optional[RenderableType]]]:
+ item_count = len(renderables)
+ if self.column_first:
+ width_renderables = list(zip(renderable_widths, renderables))
+
+ column_lengths: List[int] = [item_count // column_count] * column_count
+ for col_no in range(item_count % column_count):
+ column_lengths[col_no] += 1
+
+ row_count = (item_count + column_count - 1) // column_count
+ cells = [[-1] * column_count for _ in range(row_count)]
+ row = col = 0
+ for index in range(item_count):
+ cells[row][col] = index
+ column_lengths[col] -= 1
+ if column_lengths[col]:
+ row += 1
+ else:
+ col += 1
+ row = 0
+ for index in chain.from_iterable(cells):
+ if index == -1:
+ break
+ yield width_renderables[index]
+ else:
+ yield from zip(renderable_widths, renderables)
+ # Pad odd elements with spaces
+ if item_count % column_count:
+ for _ in range(column_count - (item_count % column_count)):
+ yield 0, None
+
+ table = Table.grid(padding=self.padding, collapse_padding=True, pad_edge=False)
+ table.expand = self.expand
+ table.title = self.title
+
+ if self.width is not None:
+ column_count = (max_width) // (self.width + width_padding)
+ for _ in range(column_count):
+ table.add_column(width=self.width)
+ else:
+ while column_count > 1:
+ widths.clear()
+ column_no = 0
+ for renderable_width, _ in iter_renderables(column_count):
+ widths[column_no] = max(widths[column_no], renderable_width)
+ total_width = sum(widths.values()) + width_padding * (
+ len(widths) - 1
+ )
+ if total_width > max_width:
+ column_count = len(widths) - 1
+ break
+ else:
+ column_no = (column_no + 1) % column_count
+ else:
+ break
+
+ get_renderable = itemgetter(1)
+ _renderables = [
+ get_renderable(_renderable)
+ for _renderable in iter_renderables(column_count)
+ ]
+ if self.equal:
+ _renderables = [
+ None
+ if renderable is None
+ else Constrain(renderable, renderable_widths[0])
+ for renderable in _renderables
+ ]
+ if self.align:
+ align = self.align
+ _Align = Align
+ _renderables = [
+ None if renderable is None else _Align(renderable, align)
+ for renderable in _renderables
+ ]
+
+ right_to_left = self.right_to_left
+ add_row = table.add_row
+ for start in range(0, len(_renderables), column_count):
+ row = _renderables[start : start + column_count]
+ if right_to_left:
+ row = row[::-1]
+ add_row(*row)
+ yield table
+
+
+if __name__ == "__main__": # pragma: no cover
+ import os
+
+ console = Console()
+
+ files = [f"{i} {s}" for i, s in enumerate(sorted(os.listdir()))]
+ columns = Columns(files, padding=(0, 1), expand=False, equal=False)
+ console.print(columns)
+ console.rule()
+ columns.column_first = True
+ console.print(columns)
+ columns.right_to_left = True
+ console.rule()
+ console.print(columns)
diff --git a/third_party/python/pip/pip/_vendor/rich/console.py b/third_party/python/pip/pip/_vendor/rich/console.py
new file mode 100644
index 0000000000..f805f2dea7
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/console.py
@@ -0,0 +1,2612 @@
+import inspect
+import io
+import os
+import platform
+import sys
+import threading
+import zlib
+from abc import ABC, abstractmethod
+from dataclasses import dataclass, field
+from datetime import datetime
+from functools import wraps
+from getpass import getpass
+from html import escape
+from inspect import isclass
+from itertools import islice
+from math import ceil
+from time import monotonic
+from types import FrameType, ModuleType, TracebackType
+from typing import (
+ IO,
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Dict,
+ Iterable,
+ List,
+ Mapping,
+ NamedTuple,
+ Optional,
+ TextIO,
+ Tuple,
+ Type,
+ Union,
+ cast,
+)
+
+from pip._vendor.rich._null_file import NULL_FILE
+
+if sys.version_info >= (3, 8):
+ from typing import Literal, Protocol, runtime_checkable
+else:
+ from pip._vendor.typing_extensions import (
+ Literal,
+ Protocol,
+ runtime_checkable,
+ ) # pragma: no cover
+
+from . import errors, themes
+from ._emoji_replace import _emoji_replace
+from ._export_format import CONSOLE_HTML_FORMAT, CONSOLE_SVG_FORMAT
+from ._log_render import FormatTimeCallable, LogRender
+from .align import Align, AlignMethod
+from .color import ColorSystem, blend_rgb
+from .control import Control
+from .emoji import EmojiVariant
+from .highlighter import NullHighlighter, ReprHighlighter
+from .markup import render as render_markup
+from .measure import Measurement, measure_renderables
+from .pager import Pager, SystemPager
+from .pretty import Pretty, is_expandable
+from .protocol import rich_cast
+from .region import Region
+from .scope import render_scope
+from .screen import Screen
+from .segment import Segment
+from .style import Style, StyleType
+from .styled import Styled
+from .terminal_theme import DEFAULT_TERMINAL_THEME, SVG_EXPORT_THEME, TerminalTheme
+from .text import Text, TextType
+from .theme import Theme, ThemeStack
+
+if TYPE_CHECKING:
+ from ._windows import WindowsConsoleFeatures
+ from .live import Live
+ from .status import Status
+
+JUPYTER_DEFAULT_COLUMNS = 115
+JUPYTER_DEFAULT_LINES = 100
+WINDOWS = platform.system() == "Windows"
+
+HighlighterType = Callable[[Union[str, "Text"]], "Text"]
+JustifyMethod = Literal["default", "left", "center", "right", "full"]
+OverflowMethod = Literal["fold", "crop", "ellipsis", "ignore"]
+
+
+class NoChange:
+ pass
+
+
+NO_CHANGE = NoChange()
+
+try:
+ _STDIN_FILENO = sys.__stdin__.fileno()
+except Exception:
+ _STDIN_FILENO = 0
+try:
+ _STDOUT_FILENO = sys.__stdout__.fileno()
+except Exception:
+ _STDOUT_FILENO = 1
+try:
+ _STDERR_FILENO = sys.__stderr__.fileno()
+except Exception:
+ _STDERR_FILENO = 2
+
+_STD_STREAMS = (_STDIN_FILENO, _STDOUT_FILENO, _STDERR_FILENO)
+_STD_STREAMS_OUTPUT = (_STDOUT_FILENO, _STDERR_FILENO)
+
+
+_TERM_COLORS = {
+ "kitty": ColorSystem.EIGHT_BIT,
+ "256color": ColorSystem.EIGHT_BIT,
+ "16color": ColorSystem.STANDARD,
+}
+
+
+class ConsoleDimensions(NamedTuple):
+ """Size of the terminal."""
+
+ width: int
+ """The width of the console in 'cells'."""
+ height: int
+ """The height of the console in lines."""
+
+
+@dataclass
+class ConsoleOptions:
+ """Options for __rich_console__ method."""
+
+ size: ConsoleDimensions
+ """Size of console."""
+ legacy_windows: bool
+ """legacy_windows: flag for legacy windows."""
+ min_width: int
+ """Minimum width of renderable."""
+ max_width: int
+ """Maximum width of renderable."""
+ is_terminal: bool
+ """True if the target is a terminal, otherwise False."""
+ encoding: str
+ """Encoding of terminal."""
+ max_height: int
+ """Height of container (starts as terminal)"""
+ justify: Optional[JustifyMethod] = None
+ """Justify value override for renderable."""
+ overflow: Optional[OverflowMethod] = None
+ """Overflow value override for renderable."""
+ no_wrap: Optional[bool] = False
+ """Disable wrapping for text."""
+ highlight: Optional[bool] = None
+ """Highlight override for render_str."""
+ markup: Optional[bool] = None
+ """Enable markup when rendering strings."""
+ height: Optional[int] = None
+
+ @property
+ def ascii_only(self) -> bool:
+ """Check if renderables should use ascii only."""
+ return not self.encoding.startswith("utf")
+
+ def copy(self) -> "ConsoleOptions":
+ """Return a copy of the options.
+
+ Returns:
+ ConsoleOptions: a copy of self.
+ """
+ options: ConsoleOptions = ConsoleOptions.__new__(ConsoleOptions)
+ options.__dict__ = self.__dict__.copy()
+ return options
+
+ def update(
+ self,
+ *,
+ width: Union[int, NoChange] = NO_CHANGE,
+ min_width: Union[int, NoChange] = NO_CHANGE,
+ max_width: Union[int, NoChange] = NO_CHANGE,
+ justify: Union[Optional[JustifyMethod], NoChange] = NO_CHANGE,
+ overflow: Union[Optional[OverflowMethod], NoChange] = NO_CHANGE,
+ no_wrap: Union[Optional[bool], NoChange] = NO_CHANGE,
+ highlight: Union[Optional[bool], NoChange] = NO_CHANGE,
+ markup: Union[Optional[bool], NoChange] = NO_CHANGE,
+ height: Union[Optional[int], NoChange] = NO_CHANGE,
+ ) -> "ConsoleOptions":
+ """Update values, return a copy."""
+ options = self.copy()
+ if not isinstance(width, NoChange):
+ options.min_width = options.max_width = max(0, width)
+ if not isinstance(min_width, NoChange):
+ options.min_width = min_width
+ if not isinstance(max_width, NoChange):
+ options.max_width = max_width
+ if not isinstance(justify, NoChange):
+ options.justify = justify
+ if not isinstance(overflow, NoChange):
+ options.overflow = overflow
+ if not isinstance(no_wrap, NoChange):
+ options.no_wrap = no_wrap
+ if not isinstance(highlight, NoChange):
+ options.highlight = highlight
+ if not isinstance(markup, NoChange):
+ options.markup = markup
+ if not isinstance(height, NoChange):
+ if height is not None:
+ options.max_height = height
+ options.height = None if height is None else max(0, height)
+ return options
+
+ def update_width(self, width: int) -> "ConsoleOptions":
+ """Update just the width, return a copy.
+
+ Args:
+ width (int): New width (sets both min_width and max_width)
+
+ Returns:
+ ~ConsoleOptions: New console options instance.
+ """
+ options = self.copy()
+ options.min_width = options.max_width = max(0, width)
+ return options
+
+ def update_height(self, height: int) -> "ConsoleOptions":
+ """Update the height, and return a copy.
+
+ Args:
+ height (int): New height
+
+ Returns:
+ ~ConsoleOptions: New Console options instance.
+ """
+ options = self.copy()
+ options.max_height = options.height = height
+ return options
+
+ def reset_height(self) -> "ConsoleOptions":
+ """Return a copy of the options with height set to ``None``.
+
+ Returns:
+ ~ConsoleOptions: New console options instance.
+ """
+ options = self.copy()
+ options.height = None
+ return options
+
+ def update_dimensions(self, width: int, height: int) -> "ConsoleOptions":
+ """Update the width and height, and return a copy.
+
+ Args:
+ width (int): New width (sets both min_width and max_width).
+ height (int): New height.
+
+ Returns:
+ ~ConsoleOptions: New console options instance.
+ """
+ options = self.copy()
+ options.min_width = options.max_width = max(0, width)
+ options.height = options.max_height = height
+ return options
+
+
+@runtime_checkable
+class RichCast(Protocol):
+ """An object that may be 'cast' to a console renderable."""
+
+ def __rich__(
+ self,
+ ) -> Union["ConsoleRenderable", "RichCast", str]: # pragma: no cover
+ ...
+
+
+@runtime_checkable
+class ConsoleRenderable(Protocol):
+ """An object that supports the console protocol."""
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult": # pragma: no cover
+ ...
+
+
+# A type that may be rendered by Console.
+RenderableType = Union[ConsoleRenderable, RichCast, str]
+
+# The result of calling a __rich_console__ method.
+RenderResult = Iterable[Union[RenderableType, Segment]]
+
+_null_highlighter = NullHighlighter()
+
+
+class CaptureError(Exception):
+ """An error in the Capture context manager."""
+
+
+class NewLine:
+ """A renderable to generate new line(s)"""
+
+ def __init__(self, count: int = 1) -> None:
+ self.count = count
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> Iterable[Segment]:
+ yield Segment("\n" * self.count)
+
+
+class ScreenUpdate:
+ """Render a list of lines at a given offset."""
+
+ def __init__(self, lines: List[List[Segment]], x: int, y: int) -> None:
+ self._lines = lines
+ self.x = x
+ self.y = y
+
+ def __rich_console__(
+ self, console: "Console", options: ConsoleOptions
+ ) -> RenderResult:
+ x = self.x
+ move_to = Control.move_to
+ for offset, line in enumerate(self._lines, self.y):
+ yield move_to(x, offset)
+ yield from line
+
+
+class Capture:
+ """Context manager to capture the result of printing to the console.
+ See :meth:`~rich.console.Console.capture` for how to use.
+
+ Args:
+ console (Console): A console instance to capture output.
+ """
+
+ def __init__(self, console: "Console") -> None:
+ self._console = console
+ self._result: Optional[str] = None
+
+ def __enter__(self) -> "Capture":
+ self._console.begin_capture()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ self._result = self._console.end_capture()
+
+ def get(self) -> str:
+ """Get the result of the capture."""
+ if self._result is None:
+ raise CaptureError(
+ "Capture result is not available until context manager exits."
+ )
+ return self._result
+
+
+class ThemeContext:
+ """A context manager to use a temporary theme. See :meth:`~rich.console.Console.use_theme` for usage."""
+
+ def __init__(self, console: "Console", theme: Theme, inherit: bool = True) -> None:
+ self.console = console
+ self.theme = theme
+ self.inherit = inherit
+
+ def __enter__(self) -> "ThemeContext":
+ self.console.push_theme(self.theme)
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ self.console.pop_theme()
+
+
+class PagerContext:
+ """A context manager that 'pages' content. See :meth:`~rich.console.Console.pager` for usage."""
+
+ def __init__(
+ self,
+ console: "Console",
+ pager: Optional[Pager] = None,
+ styles: bool = False,
+ links: bool = False,
+ ) -> None:
+ self._console = console
+ self.pager = SystemPager() if pager is None else pager
+ self.styles = styles
+ self.links = links
+
+ def __enter__(self) -> "PagerContext":
+ self._console._enter_buffer()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ if exc_type is None:
+ with self._console._lock:
+ buffer: List[Segment] = self._console._buffer[:]
+ del self._console._buffer[:]
+ segments: Iterable[Segment] = buffer
+ if not self.styles:
+ segments = Segment.strip_styles(segments)
+ elif not self.links:
+ segments = Segment.strip_links(segments)
+ content = self._console._render_buffer(segments)
+ self.pager.show(content)
+ self._console._exit_buffer()
+
+
+class ScreenContext:
+ """A context manager that enables an alternative screen. See :meth:`~rich.console.Console.screen` for usage."""
+
+ def __init__(
+ self, console: "Console", hide_cursor: bool, style: StyleType = ""
+ ) -> None:
+ self.console = console
+ self.hide_cursor = hide_cursor
+ self.screen = Screen(style=style)
+ self._changed = False
+
+ def update(
+ self, *renderables: RenderableType, style: Optional[StyleType] = None
+ ) -> None:
+ """Update the screen.
+
+ Args:
+ renderable (RenderableType, optional): Optional renderable to replace current renderable,
+ or None for no change. Defaults to None.
+ style: (Style, optional): Replacement style, or None for no change. Defaults to None.
+ """
+ if renderables:
+ self.screen.renderable = (
+ Group(*renderables) if len(renderables) > 1 else renderables[0]
+ )
+ if style is not None:
+ self.screen.style = style
+ self.console.print(self.screen, end="")
+
+ def __enter__(self) -> "ScreenContext":
+ self._changed = self.console.set_alt_screen(True)
+ if self._changed and self.hide_cursor:
+ self.console.show_cursor(False)
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ if self._changed:
+ self.console.set_alt_screen(False)
+ if self.hide_cursor:
+ self.console.show_cursor(True)
+
+
+class Group:
+ """Takes a group of renderables and returns a renderable object that renders the group.
+
+ Args:
+ renderables (Iterable[RenderableType]): An iterable of renderable objects.
+ fit (bool, optional): Fit dimension of group to contents, or fill available space. Defaults to True.
+ """
+
+ def __init__(self, *renderables: "RenderableType", fit: bool = True) -> None:
+ self._renderables = renderables
+ self.fit = fit
+ self._render: Optional[List[RenderableType]] = None
+
+ @property
+ def renderables(self) -> List["RenderableType"]:
+ if self._render is None:
+ self._render = list(self._renderables)
+ return self._render
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "Measurement":
+ if self.fit:
+ return measure_renderables(console, options, self.renderables)
+ else:
+ return Measurement(options.max_width, options.max_width)
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> RenderResult:
+ yield from self.renderables
+
+
+def group(fit: bool = True) -> Callable[..., Callable[..., Group]]:
+ """A decorator that turns an iterable of renderables in to a group.
+
+ Args:
+ fit (bool, optional): Fit dimension of group to contents, or fill available space. Defaults to True.
+ """
+
+ def decorator(
+ method: Callable[..., Iterable[RenderableType]]
+ ) -> Callable[..., Group]:
+ """Convert a method that returns an iterable of renderables in to a Group."""
+
+ @wraps(method)
+ def _replace(*args: Any, **kwargs: Any) -> Group:
+ renderables = method(*args, **kwargs)
+ return Group(*renderables, fit=fit)
+
+ return _replace
+
+ return decorator
+
+
+def _is_jupyter() -> bool: # pragma: no cover
+ """Check if we're running in a Jupyter notebook."""
+ try:
+ get_ipython # type: ignore[name-defined]
+ except NameError:
+ return False
+ ipython = get_ipython() # type: ignore[name-defined]
+ shell = ipython.__class__.__name__
+ if (
+ "google.colab" in str(ipython.__class__)
+ or os.getenv("DATABRICKS_RUNTIME_VERSION")
+ or shell == "ZMQInteractiveShell"
+ ):
+ return True # Jupyter notebook or qtconsole
+ elif shell == "TerminalInteractiveShell":
+ return False # Terminal running IPython
+ else:
+ return False # Other type (?)
+
+
+COLOR_SYSTEMS = {
+ "standard": ColorSystem.STANDARD,
+ "256": ColorSystem.EIGHT_BIT,
+ "truecolor": ColorSystem.TRUECOLOR,
+ "windows": ColorSystem.WINDOWS,
+}
+
+_COLOR_SYSTEMS_NAMES = {system: name for name, system in COLOR_SYSTEMS.items()}
+
+
+@dataclass
+class ConsoleThreadLocals(threading.local):
+ """Thread local values for Console context."""
+
+ theme_stack: ThemeStack
+ buffer: List[Segment] = field(default_factory=list)
+ buffer_index: int = 0
+
+
+class RenderHook(ABC):
+ """Provides hooks in to the render process."""
+
+ @abstractmethod
+ def process_renderables(
+ self, renderables: List[ConsoleRenderable]
+ ) -> List[ConsoleRenderable]:
+ """Called with a list of objects to render.
+
+ This method can return a new list of renderables, or modify and return the same list.
+
+ Args:
+ renderables (List[ConsoleRenderable]): A number of renderable objects.
+
+ Returns:
+ List[ConsoleRenderable]: A replacement list of renderables.
+ """
+
+
+_windows_console_features: Optional["WindowsConsoleFeatures"] = None
+
+
+def get_windows_console_features() -> "WindowsConsoleFeatures": # pragma: no cover
+ global _windows_console_features
+ if _windows_console_features is not None:
+ return _windows_console_features
+ from ._windows import get_windows_console_features
+
+ _windows_console_features = get_windows_console_features()
+ return _windows_console_features
+
+
+def detect_legacy_windows() -> bool:
+ """Detect legacy Windows."""
+ return WINDOWS and not get_windows_console_features().vt
+
+
+class Console:
+ """A high level console interface.
+
+ Args:
+ color_system (str, optional): The color system supported by your terminal,
+ either ``"standard"``, ``"256"`` or ``"truecolor"``. Leave as ``"auto"`` to autodetect.
+ force_terminal (Optional[bool], optional): Enable/disable terminal control codes, or None to auto-detect terminal. Defaults to None.
+ force_jupyter (Optional[bool], optional): Enable/disable Jupyter rendering, or None to auto-detect Jupyter. Defaults to None.
+ force_interactive (Optional[bool], optional): Enable/disable interactive mode, or None to auto detect. Defaults to None.
+ soft_wrap (Optional[bool], optional): Set soft wrap default on print method. Defaults to False.
+ theme (Theme, optional): An optional style theme object, or ``None`` for default theme.
+ stderr (bool, optional): Use stderr rather than stdout if ``file`` is not specified. Defaults to False.
+ file (IO, optional): A file object where the console should write to. Defaults to stdout.
+ quiet (bool, Optional): Boolean to suppress all output. Defaults to False.
+ width (int, optional): The width of the terminal. Leave as default to auto-detect width.
+ height (int, optional): The height of the terminal. Leave as default to auto-detect height.
+ style (StyleType, optional): Style to apply to all output, or None for no style. Defaults to None.
+ no_color (Optional[bool], optional): Enabled no color mode, or None to auto detect. Defaults to None.
+ tab_size (int, optional): Number of spaces used to replace a tab character. Defaults to 8.
+ record (bool, optional): Boolean to enable recording of terminal output,
+ required to call :meth:`export_html`, :meth:`export_svg`, and :meth:`export_text`. Defaults to False.
+ markup (bool, optional): Boolean to enable :ref:`console_markup`. Defaults to True.
+ emoji (bool, optional): Enable emoji code. Defaults to True.
+ emoji_variant (str, optional): Optional emoji variant, either "text" or "emoji". Defaults to None.
+ highlight (bool, optional): Enable automatic highlighting. Defaults to True.
+ log_time (bool, optional): Boolean to enable logging of time by :meth:`log` methods. Defaults to True.
+ log_path (bool, optional): Boolean to enable the logging of the caller by :meth:`log`. Defaults to True.
+ log_time_format (Union[str, TimeFormatterCallable], optional): If ``log_time`` is enabled, either string for strftime or callable that formats the time. Defaults to "[%X] ".
+ highlighter (HighlighterType, optional): Default highlighter.
+ legacy_windows (bool, optional): Enable legacy Windows mode, or ``None`` to auto detect. Defaults to ``None``.
+ safe_box (bool, optional): Restrict box options that don't render on legacy Windows.
+ get_datetime (Callable[[], datetime], optional): Callable that gets the current time as a datetime.datetime object (used by Console.log),
+ or None for datetime.now.
+ get_time (Callable[[], time], optional): Callable that gets the current time in seconds, default uses time.monotonic.
+ """
+
+ _environ: Mapping[str, str] = os.environ
+
+ def __init__(
+ self,
+ *,
+ color_system: Optional[
+ Literal["auto", "standard", "256", "truecolor", "windows"]
+ ] = "auto",
+ force_terminal: Optional[bool] = None,
+ force_jupyter: Optional[bool] = None,
+ force_interactive: Optional[bool] = None,
+ soft_wrap: bool = False,
+ theme: Optional[Theme] = None,
+ stderr: bool = False,
+ file: Optional[IO[str]] = None,
+ quiet: bool = False,
+ width: Optional[int] = None,
+ height: Optional[int] = None,
+ style: Optional[StyleType] = None,
+ no_color: Optional[bool] = None,
+ tab_size: int = 8,
+ record: bool = False,
+ markup: bool = True,
+ emoji: bool = True,
+ emoji_variant: Optional[EmojiVariant] = None,
+ highlight: bool = True,
+ log_time: bool = True,
+ log_path: bool = True,
+ log_time_format: Union[str, FormatTimeCallable] = "[%X]",
+ highlighter: Optional["HighlighterType"] = ReprHighlighter(),
+ legacy_windows: Optional[bool] = None,
+ safe_box: bool = True,
+ get_datetime: Optional[Callable[[], datetime]] = None,
+ get_time: Optional[Callable[[], float]] = None,
+ _environ: Optional[Mapping[str, str]] = None,
+ ):
+ # Copy of os.environ allows us to replace it for testing
+ if _environ is not None:
+ self._environ = _environ
+
+ self.is_jupyter = _is_jupyter() if force_jupyter is None else force_jupyter
+ if self.is_jupyter:
+ if width is None:
+ jupyter_columns = self._environ.get("JUPYTER_COLUMNS")
+ if jupyter_columns is not None and jupyter_columns.isdigit():
+ width = int(jupyter_columns)
+ else:
+ width = JUPYTER_DEFAULT_COLUMNS
+ if height is None:
+ jupyter_lines = self._environ.get("JUPYTER_LINES")
+ if jupyter_lines is not None and jupyter_lines.isdigit():
+ height = int(jupyter_lines)
+ else:
+ height = JUPYTER_DEFAULT_LINES
+
+ self.tab_size = tab_size
+ self.record = record
+ self._markup = markup
+ self._emoji = emoji
+ self._emoji_variant: Optional[EmojiVariant] = emoji_variant
+ self._highlight = highlight
+ self.legacy_windows: bool = (
+ (detect_legacy_windows() and not self.is_jupyter)
+ if legacy_windows is None
+ else legacy_windows
+ )
+
+ if width is None:
+ columns = self._environ.get("COLUMNS")
+ if columns is not None and columns.isdigit():
+ width = int(columns) - self.legacy_windows
+ if height is None:
+ lines = self._environ.get("LINES")
+ if lines is not None and lines.isdigit():
+ height = int(lines)
+
+ self.soft_wrap = soft_wrap
+ self._width = width
+ self._height = height
+
+ self._color_system: Optional[ColorSystem]
+
+ self._force_terminal = None
+ if force_terminal is not None:
+ self._force_terminal = force_terminal
+ else:
+ # If FORCE_COLOR env var has any value at all, we force terminal.
+ force_color = self._environ.get("FORCE_COLOR")
+ if force_color is not None:
+ self._force_terminal = True
+
+ self._file = file
+ self.quiet = quiet
+ self.stderr = stderr
+
+ if color_system is None:
+ self._color_system = None
+ elif color_system == "auto":
+ self._color_system = self._detect_color_system()
+ else:
+ self._color_system = COLOR_SYSTEMS[color_system]
+
+ self._lock = threading.RLock()
+ self._log_render = LogRender(
+ show_time=log_time,
+ show_path=log_path,
+ time_format=log_time_format,
+ )
+ self.highlighter: HighlighterType = highlighter or _null_highlighter
+ self.safe_box = safe_box
+ self.get_datetime = get_datetime or datetime.now
+ self.get_time = get_time or monotonic
+ self.style = style
+ self.no_color = (
+ no_color if no_color is not None else "NO_COLOR" in self._environ
+ )
+ self.is_interactive = (
+ (self.is_terminal and not self.is_dumb_terminal)
+ if force_interactive is None
+ else force_interactive
+ )
+
+ self._record_buffer_lock = threading.RLock()
+ self._thread_locals = ConsoleThreadLocals(
+ theme_stack=ThemeStack(themes.DEFAULT if theme is None else theme)
+ )
+ self._record_buffer: List[Segment] = []
+ self._render_hooks: List[RenderHook] = []
+ self._live: Optional["Live"] = None
+ self._is_alt_screen = False
+
+ def __repr__(self) -> str:
+ return f"<console width={self.width} {str(self._color_system)}>"
+
+ @property
+ def file(self) -> IO[str]:
+ """Get the file object to write to."""
+ file = self._file or (sys.stderr if self.stderr else sys.stdout)
+ file = getattr(file, "rich_proxied_file", file)
+ if file is None:
+ file = NULL_FILE
+ return file
+
+ @file.setter
+ def file(self, new_file: IO[str]) -> None:
+ """Set a new file object."""
+ self._file = new_file
+
+ @property
+ def _buffer(self) -> List[Segment]:
+ """Get a thread local buffer."""
+ return self._thread_locals.buffer
+
+ @property
+ def _buffer_index(self) -> int:
+ """Get a thread local buffer."""
+ return self._thread_locals.buffer_index
+
+ @_buffer_index.setter
+ def _buffer_index(self, value: int) -> None:
+ self._thread_locals.buffer_index = value
+
+ @property
+ def _theme_stack(self) -> ThemeStack:
+ """Get the thread local theme stack."""
+ return self._thread_locals.theme_stack
+
+ def _detect_color_system(self) -> Optional[ColorSystem]:
+ """Detect color system from env vars."""
+ if self.is_jupyter:
+ return ColorSystem.TRUECOLOR
+ if not self.is_terminal or self.is_dumb_terminal:
+ return None
+ if WINDOWS: # pragma: no cover
+ if self.legacy_windows: # pragma: no cover
+ return ColorSystem.WINDOWS
+ windows_console_features = get_windows_console_features()
+ return (
+ ColorSystem.TRUECOLOR
+ if windows_console_features.truecolor
+ else ColorSystem.EIGHT_BIT
+ )
+ else:
+ color_term = self._environ.get("COLORTERM", "").strip().lower()
+ if color_term in ("truecolor", "24bit"):
+ return ColorSystem.TRUECOLOR
+ term = self._environ.get("TERM", "").strip().lower()
+ _term_name, _hyphen, colors = term.rpartition("-")
+ color_system = _TERM_COLORS.get(colors, ColorSystem.STANDARD)
+ return color_system
+
+ def _enter_buffer(self) -> None:
+ """Enter in to a buffer context, and buffer all output."""
+ self._buffer_index += 1
+
+ def _exit_buffer(self) -> None:
+ """Leave buffer context, and render content if required."""
+ self._buffer_index -= 1
+ self._check_buffer()
+
+ def set_live(self, live: "Live") -> None:
+ """Set Live instance. Used by Live context manager.
+
+ Args:
+ live (Live): Live instance using this Console.
+
+ Raises:
+ errors.LiveError: If this Console has a Live context currently active.
+ """
+ with self._lock:
+ if self._live is not None:
+ raise errors.LiveError("Only one live display may be active at once")
+ self._live = live
+
+ def clear_live(self) -> None:
+ """Clear the Live instance."""
+ with self._lock:
+ self._live = None
+
+ def push_render_hook(self, hook: RenderHook) -> None:
+ """Add a new render hook to the stack.
+
+ Args:
+ hook (RenderHook): Render hook instance.
+ """
+ with self._lock:
+ self._render_hooks.append(hook)
+
+ def pop_render_hook(self) -> None:
+ """Pop the last renderhook from the stack."""
+ with self._lock:
+ self._render_hooks.pop()
+
+ def __enter__(self) -> "Console":
+ """Own context manager to enter buffer context."""
+ self._enter_buffer()
+ return self
+
+ def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
+ """Exit buffer context."""
+ self._exit_buffer()
+
+ def begin_capture(self) -> None:
+ """Begin capturing console output. Call :meth:`end_capture` to exit capture mode and return output."""
+ self._enter_buffer()
+
+ def end_capture(self) -> str:
+ """End capture mode and return captured string.
+
+ Returns:
+ str: Console output.
+ """
+ render_result = self._render_buffer(self._buffer)
+ del self._buffer[:]
+ self._exit_buffer()
+ return render_result
+
+ def push_theme(self, theme: Theme, *, inherit: bool = True) -> None:
+ """Push a new theme on to the top of the stack, replacing the styles from the previous theme.
+ Generally speaking, you should call :meth:`~rich.console.Console.use_theme` to get a context manager, rather
+ than calling this method directly.
+
+ Args:
+ theme (Theme): A theme instance.
+ inherit (bool, optional): Inherit existing styles. Defaults to True.
+ """
+ self._theme_stack.push_theme(theme, inherit=inherit)
+
+ def pop_theme(self) -> None:
+ """Remove theme from top of stack, restoring previous theme."""
+ self._theme_stack.pop_theme()
+
+ def use_theme(self, theme: Theme, *, inherit: bool = True) -> ThemeContext:
+ """Use a different theme for the duration of the context manager.
+
+ Args:
+ theme (Theme): Theme instance to user.
+ inherit (bool, optional): Inherit existing console styles. Defaults to True.
+
+ Returns:
+ ThemeContext: [description]
+ """
+ return ThemeContext(self, theme, inherit)
+
+ @property
+ def color_system(self) -> Optional[str]:
+ """Get color system string.
+
+ Returns:
+ Optional[str]: "standard", "256" or "truecolor".
+ """
+
+ if self._color_system is not None:
+ return _COLOR_SYSTEMS_NAMES[self._color_system]
+ else:
+ return None
+
+ @property
+ def encoding(self) -> str:
+ """Get the encoding of the console file, e.g. ``"utf-8"``.
+
+ Returns:
+ str: A standard encoding string.
+ """
+ return (getattr(self.file, "encoding", "utf-8") or "utf-8").lower()
+
+ @property
+ def is_terminal(self) -> bool:
+ """Check if the console is writing to a terminal.
+
+ Returns:
+ bool: True if the console writing to a device capable of
+ understanding terminal codes, otherwise False.
+ """
+ if self._force_terminal is not None:
+ return self._force_terminal
+
+ if hasattr(sys.stdin, "__module__") and sys.stdin.__module__.startswith(
+ "idlelib"
+ ):
+ # Return False for Idle which claims to be a tty but can't handle ansi codes
+ return False
+
+ isatty: Optional[Callable[[], bool]] = getattr(self.file, "isatty", None)
+ try:
+ return False if isatty is None else isatty()
+ except ValueError:
+ # in some situation (at the end of a pytest run for example) isatty() can raise
+ # ValueError: I/O operation on closed file
+ # return False because we aren't in a terminal anymore
+ return False
+
+ @property
+ def is_dumb_terminal(self) -> bool:
+ """Detect dumb terminal.
+
+ Returns:
+ bool: True if writing to a dumb terminal, otherwise False.
+
+ """
+ _term = self._environ.get("TERM", "")
+ is_dumb = _term.lower() in ("dumb", "unknown")
+ return self.is_terminal and is_dumb
+
+ @property
+ def options(self) -> ConsoleOptions:
+ """Get default console options."""
+ return ConsoleOptions(
+ max_height=self.size.height,
+ size=self.size,
+ legacy_windows=self.legacy_windows,
+ min_width=1,
+ max_width=self.width,
+ encoding=self.encoding,
+ is_terminal=self.is_terminal,
+ )
+
+ @property
+ def size(self) -> ConsoleDimensions:
+ """Get the size of the console.
+
+ Returns:
+ ConsoleDimensions: A named tuple containing the dimensions.
+ """
+
+ if self._width is not None and self._height is not None:
+ return ConsoleDimensions(self._width - self.legacy_windows, self._height)
+
+ if self.is_dumb_terminal:
+ return ConsoleDimensions(80, 25)
+
+ width: Optional[int] = None
+ height: Optional[int] = None
+
+ if WINDOWS: # pragma: no cover
+ try:
+ width, height = os.get_terminal_size()
+ except (AttributeError, ValueError, OSError): # Probably not a terminal
+ pass
+ else:
+ for file_descriptor in _STD_STREAMS:
+ try:
+ width, height = os.get_terminal_size(file_descriptor)
+ except (AttributeError, ValueError, OSError):
+ pass
+ else:
+ break
+
+ columns = self._environ.get("COLUMNS")
+ if columns is not None and columns.isdigit():
+ width = int(columns)
+ lines = self._environ.get("LINES")
+ if lines is not None and lines.isdigit():
+ height = int(lines)
+
+ # get_terminal_size can report 0, 0 if run from pseudo-terminal
+ width = width or 80
+ height = height or 25
+ return ConsoleDimensions(
+ width - self.legacy_windows if self._width is None else self._width,
+ height if self._height is None else self._height,
+ )
+
+ @size.setter
+ def size(self, new_size: Tuple[int, int]) -> None:
+ """Set a new size for the terminal.
+
+ Args:
+ new_size (Tuple[int, int]): New width and height.
+ """
+ width, height = new_size
+ self._width = width
+ self._height = height
+
+ @property
+ def width(self) -> int:
+ """Get the width of the console.
+
+ Returns:
+ int: The width (in characters) of the console.
+ """
+ return self.size.width
+
+ @width.setter
+ def width(self, width: int) -> None:
+ """Set width.
+
+ Args:
+ width (int): New width.
+ """
+ self._width = width
+
+ @property
+ def height(self) -> int:
+ """Get the height of the console.
+
+ Returns:
+ int: The height (in lines) of the console.
+ """
+ return self.size.height
+
+ @height.setter
+ def height(self, height: int) -> None:
+ """Set height.
+
+ Args:
+ height (int): new height.
+ """
+ self._height = height
+
+ def bell(self) -> None:
+ """Play a 'bell' sound (if supported by the terminal)."""
+ self.control(Control.bell())
+
+ def capture(self) -> Capture:
+ """A context manager to *capture* the result of print() or log() in a string,
+ rather than writing it to the console.
+
+ Example:
+ >>> from rich.console import Console
+ >>> console = Console()
+ >>> with console.capture() as capture:
+ ... console.print("[bold magenta]Hello World[/]")
+ >>> print(capture.get())
+
+ Returns:
+ Capture: Context manager with disables writing to the terminal.
+ """
+ capture = Capture(self)
+ return capture
+
+ def pager(
+ self, pager: Optional[Pager] = None, styles: bool = False, links: bool = False
+ ) -> PagerContext:
+ """A context manager to display anything printed within a "pager". The pager application
+ is defined by the system and will typically support at least pressing a key to scroll.
+
+ Args:
+ pager (Pager, optional): A pager object, or None to use :class:`~rich.pager.SystemPager`. Defaults to None.
+ styles (bool, optional): Show styles in pager. Defaults to False.
+ links (bool, optional): Show links in pager. Defaults to False.
+
+ Example:
+ >>> from rich.console import Console
+ >>> from rich.__main__ import make_test_card
+ >>> console = Console()
+ >>> with console.pager():
+ console.print(make_test_card())
+
+ Returns:
+ PagerContext: A context manager.
+ """
+ return PagerContext(self, pager=pager, styles=styles, links=links)
+
+ def line(self, count: int = 1) -> None:
+ """Write new line(s).
+
+ Args:
+ count (int, optional): Number of new lines. Defaults to 1.
+ """
+
+ assert count >= 0, "count must be >= 0"
+ self.print(NewLine(count))
+
+ def clear(self, home: bool = True) -> None:
+ """Clear the screen.
+
+ Args:
+ home (bool, optional): Also move the cursor to 'home' position. Defaults to True.
+ """
+ if home:
+ self.control(Control.clear(), Control.home())
+ else:
+ self.control(Control.clear())
+
+ def status(
+ self,
+ status: RenderableType,
+ *,
+ spinner: str = "dots",
+ spinner_style: str = "status.spinner",
+ speed: float = 1.0,
+ refresh_per_second: float = 12.5,
+ ) -> "Status":
+ """Display a status and spinner.
+
+ Args:
+ status (RenderableType): A status renderable (str or Text typically).
+ spinner (str, optional): Name of spinner animation (see python -m rich.spinner). Defaults to "dots".
+ spinner_style (StyleType, optional): Style of spinner. Defaults to "status.spinner".
+ speed (float, optional): Speed factor for spinner animation. Defaults to 1.0.
+ refresh_per_second (float, optional): Number of refreshes per second. Defaults to 12.5.
+
+ Returns:
+ Status: A Status object that may be used as a context manager.
+ """
+ from .status import Status
+
+ status_renderable = Status(
+ status,
+ console=self,
+ spinner=spinner,
+ spinner_style=spinner_style,
+ speed=speed,
+ refresh_per_second=refresh_per_second,
+ )
+ return status_renderable
+
+ def show_cursor(self, show: bool = True) -> bool:
+ """Show or hide the cursor.
+
+ Args:
+ show (bool, optional): Set visibility of the cursor.
+ """
+ if self.is_terminal:
+ self.control(Control.show_cursor(show))
+ return True
+ return False
+
+ def set_alt_screen(self, enable: bool = True) -> bool:
+ """Enables alternative screen mode.
+
+ Note, if you enable this mode, you should ensure that is disabled before
+ the application exits. See :meth:`~rich.Console.screen` for a context manager
+ that handles this for you.
+
+ Args:
+ enable (bool, optional): Enable (True) or disable (False) alternate screen. Defaults to True.
+
+ Returns:
+ bool: True if the control codes were written.
+
+ """
+ changed = False
+ if self.is_terminal and not self.legacy_windows:
+ self.control(Control.alt_screen(enable))
+ changed = True
+ self._is_alt_screen = enable
+ return changed
+
+ @property
+ def is_alt_screen(self) -> bool:
+ """Check if the alt screen was enabled.
+
+ Returns:
+ bool: True if the alt screen was enabled, otherwise False.
+ """
+ return self._is_alt_screen
+
+ def set_window_title(self, title: str) -> bool:
+ """Set the title of the console terminal window.
+
+ Warning: There is no means within Rich of "resetting" the window title to its
+ previous value, meaning the title you set will persist even after your application
+ exits.
+
+ ``fish`` shell resets the window title before and after each command by default,
+ negating this issue. Windows Terminal and command prompt will also reset the title for you.
+ Most other shells and terminals, however, do not do this.
+
+ Some terminals may require configuration changes before you can set the title.
+ Some terminals may not support setting the title at all.
+
+ Other software (including the terminal itself, the shell, custom prompts, plugins, etc.)
+ may also set the terminal window title. This could result in whatever value you write
+ using this method being overwritten.
+
+ Args:
+ title (str): The new title of the terminal window.
+
+ Returns:
+ bool: True if the control code to change the terminal title was
+ written, otherwise False. Note that a return value of True
+ does not guarantee that the window title has actually changed,
+ since the feature may be unsupported/disabled in some terminals.
+ """
+ if self.is_terminal:
+ self.control(Control.title(title))
+ return True
+ return False
+
+ def screen(
+ self, hide_cursor: bool = True, style: Optional[StyleType] = None
+ ) -> "ScreenContext":
+ """Context manager to enable and disable 'alternative screen' mode.
+
+ Args:
+ hide_cursor (bool, optional): Also hide the cursor. Defaults to False.
+ style (Style, optional): Optional style for screen. Defaults to None.
+
+ Returns:
+ ~ScreenContext: Context which enables alternate screen on enter, and disables it on exit.
+ """
+ return ScreenContext(self, hide_cursor=hide_cursor, style=style or "")
+
+ def measure(
+ self, renderable: RenderableType, *, options: Optional[ConsoleOptions] = None
+ ) -> Measurement:
+ """Measure a renderable. Returns a :class:`~rich.measure.Measurement` object which contains
+ information regarding the number of characters required to print the renderable.
+
+ Args:
+ renderable (RenderableType): Any renderable or string.
+ options (Optional[ConsoleOptions], optional): Options to use when measuring, or None
+ to use default options. Defaults to None.
+
+ Returns:
+ Measurement: A measurement of the renderable.
+ """
+ measurement = Measurement.get(self, options or self.options, renderable)
+ return measurement
+
+ def render(
+ self, renderable: RenderableType, options: Optional[ConsoleOptions] = None
+ ) -> Iterable[Segment]:
+ """Render an object in to an iterable of `Segment` instances.
+
+ This method contains the logic for rendering objects with the console protocol.
+ You are unlikely to need to use it directly, unless you are extending the library.
+
+ Args:
+ renderable (RenderableType): An object supporting the console protocol, or
+ an object that may be converted to a string.
+ options (ConsoleOptions, optional): An options object, or None to use self.options. Defaults to None.
+
+ Returns:
+ Iterable[Segment]: An iterable of segments that may be rendered.
+ """
+
+ _options = options or self.options
+ if _options.max_width < 1:
+ # No space to render anything. This prevents potential recursion errors.
+ return
+ render_iterable: RenderResult
+
+ renderable = rich_cast(renderable)
+ if hasattr(renderable, "__rich_console__") and not isclass(renderable):
+ render_iterable = renderable.__rich_console__(self, _options) # type: ignore[union-attr]
+ elif isinstance(renderable, str):
+ text_renderable = self.render_str(
+ renderable, highlight=_options.highlight, markup=_options.markup
+ )
+ render_iterable = text_renderable.__rich_console__(self, _options)
+ else:
+ raise errors.NotRenderableError(
+ f"Unable to render {renderable!r}; "
+ "A str, Segment or object with __rich_console__ method is required"
+ )
+
+ try:
+ iter_render = iter(render_iterable)
+ except TypeError:
+ raise errors.NotRenderableError(
+ f"object {render_iterable!r} is not renderable"
+ )
+ _Segment = Segment
+ _options = _options.reset_height()
+ for render_output in iter_render:
+ if isinstance(render_output, _Segment):
+ yield render_output
+ else:
+ yield from self.render(render_output, _options)
+
+ def render_lines(
+ self,
+ renderable: RenderableType,
+ options: Optional[ConsoleOptions] = None,
+ *,
+ style: Optional[Style] = None,
+ pad: bool = True,
+ new_lines: bool = False,
+ ) -> List[List[Segment]]:
+ """Render objects in to a list of lines.
+
+ The output of render_lines is useful when further formatting of rendered console text
+ is required, such as the Panel class which draws a border around any renderable object.
+
+ Args:
+ renderable (RenderableType): Any object renderable in the console.
+ options (Optional[ConsoleOptions], optional): Console options, or None to use self.options. Default to ``None``.
+ style (Style, optional): Optional style to apply to renderables. Defaults to ``None``.
+ pad (bool, optional): Pad lines shorter than render width. Defaults to ``True``.
+ new_lines (bool, optional): Include "\n" characters at end of lines.
+
+ Returns:
+ List[List[Segment]]: A list of lines, where a line is a list of Segment objects.
+ """
+ with self._lock:
+ render_options = options or self.options
+ _rendered = self.render(renderable, render_options)
+ if style:
+ _rendered = Segment.apply_style(_rendered, style)
+
+ render_height = render_options.height
+ if render_height is not None:
+ render_height = max(0, render_height)
+
+ lines = list(
+ islice(
+ Segment.split_and_crop_lines(
+ _rendered,
+ render_options.max_width,
+ include_new_lines=new_lines,
+ pad=pad,
+ style=style,
+ ),
+ None,
+ render_height,
+ )
+ )
+ if render_options.height is not None:
+ extra_lines = render_options.height - len(lines)
+ if extra_lines > 0:
+ pad_line = [
+ [Segment(" " * render_options.max_width, style), Segment("\n")]
+ if new_lines
+ else [Segment(" " * render_options.max_width, style)]
+ ]
+ lines.extend(pad_line * extra_lines)
+
+ return lines
+
+ def render_str(
+ self,
+ text: str,
+ *,
+ style: Union[str, Style] = "",
+ justify: Optional[JustifyMethod] = None,
+ overflow: Optional[OverflowMethod] = None,
+ emoji: Optional[bool] = None,
+ markup: Optional[bool] = None,
+ highlight: Optional[bool] = None,
+ highlighter: Optional[HighlighterType] = None,
+ ) -> "Text":
+ """Convert a string to a Text instance. This is called automatically if
+ you print or log a string.
+
+ Args:
+ text (str): Text to render.
+ style (Union[str, Style], optional): Style to apply to rendered text.
+ justify (str, optional): Justify method: "default", "left", "center", "full", or "right". Defaults to ``None``.
+ overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to ``None``.
+ emoji (Optional[bool], optional): Enable emoji, or ``None`` to use Console default.
+ markup (Optional[bool], optional): Enable markup, or ``None`` to use Console default.
+ highlight (Optional[bool], optional): Enable highlighting, or ``None`` to use Console default.
+ highlighter (HighlighterType, optional): Optional highlighter to apply.
+ Returns:
+ ConsoleRenderable: Renderable object.
+
+ """
+ emoji_enabled = emoji or (emoji is None and self._emoji)
+ markup_enabled = markup or (markup is None and self._markup)
+ highlight_enabled = highlight or (highlight is None and self._highlight)
+
+ if markup_enabled:
+ rich_text = render_markup(
+ text,
+ style=style,
+ emoji=emoji_enabled,
+ emoji_variant=self._emoji_variant,
+ )
+ rich_text.justify = justify
+ rich_text.overflow = overflow
+ else:
+ rich_text = Text(
+ _emoji_replace(text, default_variant=self._emoji_variant)
+ if emoji_enabled
+ else text,
+ justify=justify,
+ overflow=overflow,
+ style=style,
+ )
+
+ _highlighter = (highlighter or self.highlighter) if highlight_enabled else None
+ if _highlighter is not None:
+ highlight_text = _highlighter(str(rich_text))
+ highlight_text.copy_styles(rich_text)
+ return highlight_text
+
+ return rich_text
+
+ def get_style(
+ self, name: Union[str, Style], *, default: Optional[Union[Style, str]] = None
+ ) -> Style:
+ """Get a Style instance by its theme name or parse a definition.
+
+ Args:
+ name (str): The name of a style or a style definition.
+
+ Returns:
+ Style: A Style object.
+
+ Raises:
+ MissingStyle: If no style could be parsed from name.
+
+ """
+ if isinstance(name, Style):
+ return name
+
+ try:
+ style = self._theme_stack.get(name)
+ if style is None:
+ style = Style.parse(name)
+ return style.copy() if style.link else style
+ except errors.StyleSyntaxError as error:
+ if default is not None:
+ return self.get_style(default)
+ raise errors.MissingStyle(
+ f"Failed to get style {name!r}; {error}"
+ ) from None
+
+ def _collect_renderables(
+ self,
+ objects: Iterable[Any],
+ sep: str,
+ end: str,
+ *,
+ justify: Optional[JustifyMethod] = None,
+ emoji: Optional[bool] = None,
+ markup: Optional[bool] = None,
+ highlight: Optional[bool] = None,
+ ) -> List[ConsoleRenderable]:
+ """Combine a number of renderables and text into one renderable.
+
+ Args:
+ objects (Iterable[Any]): Anything that Rich can render.
+ sep (str): String to write between print data.
+ end (str): String to write at end of print data.
+ justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``.
+ emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default.
+ markup (Optional[bool], optional): Enable markup, or ``None`` to use console default.
+ highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default.
+
+ Returns:
+ List[ConsoleRenderable]: A list of things to render.
+ """
+ renderables: List[ConsoleRenderable] = []
+ _append = renderables.append
+ text: List[Text] = []
+ append_text = text.append
+
+ append = _append
+ if justify in ("left", "center", "right"):
+
+ def align_append(renderable: RenderableType) -> None:
+ _append(Align(renderable, cast(AlignMethod, justify)))
+
+ append = align_append
+
+ _highlighter: HighlighterType = _null_highlighter
+ if highlight or (highlight is None and self._highlight):
+ _highlighter = self.highlighter
+
+ def check_text() -> None:
+ if text:
+ sep_text = Text(sep, justify=justify, end=end)
+ append(sep_text.join(text))
+ del text[:]
+
+ for renderable in objects:
+ renderable = rich_cast(renderable)
+ if isinstance(renderable, str):
+ append_text(
+ self.render_str(
+ renderable, emoji=emoji, markup=markup, highlighter=_highlighter
+ )
+ )
+ elif isinstance(renderable, Text):
+ append_text(renderable)
+ elif isinstance(renderable, ConsoleRenderable):
+ check_text()
+ append(renderable)
+ elif is_expandable(renderable):
+ check_text()
+ append(Pretty(renderable, highlighter=_highlighter))
+ else:
+ append_text(_highlighter(str(renderable)))
+
+ check_text()
+
+ if self.style is not None:
+ style = self.get_style(self.style)
+ renderables = [Styled(renderable, style) for renderable in renderables]
+
+ return renderables
+
+ def rule(
+ self,
+ title: TextType = "",
+ *,
+ characters: str = "─",
+ style: Union[str, Style] = "rule.line",
+ align: AlignMethod = "center",
+ ) -> None:
+ """Draw a line with optional centered title.
+
+ Args:
+ title (str, optional): Text to render over the rule. Defaults to "".
+ characters (str, optional): Character(s) to form the line. Defaults to "─".
+ style (str, optional): Style of line. Defaults to "rule.line".
+ align (str, optional): How to align the title, one of "left", "center", or "right". Defaults to "center".
+ """
+ from .rule import Rule
+
+ rule = Rule(title=title, characters=characters, style=style, align=align)
+ self.print(rule)
+
+ def control(self, *control: Control) -> None:
+ """Insert non-printing control codes.
+
+ Args:
+ control_codes (str): Control codes, such as those that may move the cursor.
+ """
+ if not self.is_dumb_terminal:
+ with self:
+ self._buffer.extend(_control.segment for _control in control)
+
+ def out(
+ self,
+ *objects: Any,
+ sep: str = " ",
+ end: str = "\n",
+ style: Optional[Union[str, Style]] = None,
+ highlight: Optional[bool] = None,
+ ) -> None:
+ """Output to the terminal. This is a low-level way of writing to the terminal which unlike
+ :meth:`~rich.console.Console.print` won't pretty print, wrap text, or apply markup, but will
+ optionally apply highlighting and a basic style.
+
+ Args:
+ sep (str, optional): String to write between print data. Defaults to " ".
+ end (str, optional): String to write at end of print data. Defaults to "\\\\n".
+ style (Union[str, Style], optional): A style to apply to output. Defaults to None.
+ highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use
+ console default. Defaults to ``None``.
+ """
+ raw_output: str = sep.join(str(_object) for _object in objects)
+ self.print(
+ raw_output,
+ style=style,
+ highlight=highlight,
+ emoji=False,
+ markup=False,
+ no_wrap=True,
+ overflow="ignore",
+ crop=False,
+ end=end,
+ )
+
+ def print(
+ self,
+ *objects: Any,
+ sep: str = " ",
+ end: str = "\n",
+ style: Optional[Union[str, Style]] = None,
+ justify: Optional[JustifyMethod] = None,
+ overflow: Optional[OverflowMethod] = None,
+ no_wrap: Optional[bool] = None,
+ emoji: Optional[bool] = None,
+ markup: Optional[bool] = None,
+ highlight: Optional[bool] = None,
+ width: Optional[int] = None,
+ height: Optional[int] = None,
+ crop: bool = True,
+ soft_wrap: Optional[bool] = None,
+ new_line_start: bool = False,
+ ) -> None:
+ """Print to the console.
+
+ Args:
+ objects (positional args): Objects to log to the terminal.
+ sep (str, optional): String to write between print data. Defaults to " ".
+ end (str, optional): String to write at end of print data. Defaults to "\\\\n".
+ style (Union[str, Style], optional): A style to apply to output. Defaults to None.
+ justify (str, optional): Justify method: "default", "left", "right", "center", or "full". Defaults to ``None``.
+ overflow (str, optional): Overflow method: "ignore", "crop", "fold", or "ellipsis". Defaults to None.
+ no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to None.
+ emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to ``None``.
+ markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to ``None``.
+ highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to ``None``.
+ width (Optional[int], optional): Width of output, or ``None`` to auto-detect. Defaults to ``None``.
+ crop (Optional[bool], optional): Crop output to width of terminal. Defaults to True.
+ soft_wrap (bool, optional): Enable soft wrap mode which disables word wrapping and cropping of text or ``None`` for
+ Console default. Defaults to ``None``.
+ new_line_start (bool, False): Insert a new line at the start if the output contains more than one line. Defaults to ``False``.
+ """
+ if not objects:
+ objects = (NewLine(),)
+
+ if soft_wrap is None:
+ soft_wrap = self.soft_wrap
+ if soft_wrap:
+ if no_wrap is None:
+ no_wrap = True
+ if overflow is None:
+ overflow = "ignore"
+ crop = False
+ render_hooks = self._render_hooks[:]
+ with self:
+ renderables = self._collect_renderables(
+ objects,
+ sep,
+ end,
+ justify=justify,
+ emoji=emoji,
+ markup=markup,
+ highlight=highlight,
+ )
+ for hook in render_hooks:
+ renderables = hook.process_renderables(renderables)
+ render_options = self.options.update(
+ justify=justify,
+ overflow=overflow,
+ width=min(width, self.width) if width is not None else NO_CHANGE,
+ height=height,
+ no_wrap=no_wrap,
+ markup=markup,
+ highlight=highlight,
+ )
+
+ new_segments: List[Segment] = []
+ extend = new_segments.extend
+ render = self.render
+ if style is None:
+ for renderable in renderables:
+ extend(render(renderable, render_options))
+ else:
+ for renderable in renderables:
+ extend(
+ Segment.apply_style(
+ render(renderable, render_options), self.get_style(style)
+ )
+ )
+ if new_line_start:
+ if (
+ len("".join(segment.text for segment in new_segments).splitlines())
+ > 1
+ ):
+ new_segments.insert(0, Segment.line())
+ if crop:
+ buffer_extend = self._buffer.extend
+ for line in Segment.split_and_crop_lines(
+ new_segments, self.width, pad=False
+ ):
+ buffer_extend(line)
+ else:
+ self._buffer.extend(new_segments)
+
+ def print_json(
+ self,
+ json: Optional[str] = None,
+ *,
+ data: Any = None,
+ indent: Union[None, int, str] = 2,
+ highlight: bool = True,
+ skip_keys: bool = False,
+ ensure_ascii: bool = False,
+ check_circular: bool = True,
+ allow_nan: bool = True,
+ default: Optional[Callable[[Any], Any]] = None,
+ sort_keys: bool = False,
+ ) -> None:
+ """Pretty prints JSON. Output will be valid JSON.
+
+ Args:
+ json (Optional[str]): A string containing JSON.
+ data (Any): If json is not supplied, then encode this data.
+ indent (Union[None, int, str], optional): Number of spaces to indent. Defaults to 2.
+ highlight (bool, optional): Enable highlighting of output: Defaults to True.
+ skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.
+ ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.
+ check_circular (bool, optional): Check for circular references. Defaults to True.
+ allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.
+ default (Callable, optional): A callable that converts values that can not be encoded
+ in to something that can be JSON encoded. Defaults to None.
+ sort_keys (bool, optional): Sort dictionary keys. Defaults to False.
+ """
+ from pip._vendor.rich.json import JSON
+
+ if json is None:
+ json_renderable = JSON.from_data(
+ data,
+ indent=indent,
+ highlight=highlight,
+ skip_keys=skip_keys,
+ ensure_ascii=ensure_ascii,
+ check_circular=check_circular,
+ allow_nan=allow_nan,
+ default=default,
+ sort_keys=sort_keys,
+ )
+ else:
+ if not isinstance(json, str):
+ raise TypeError(
+ f"json must be str. Did you mean print_json(data={json!r}) ?"
+ )
+ json_renderable = JSON(
+ json,
+ indent=indent,
+ highlight=highlight,
+ skip_keys=skip_keys,
+ ensure_ascii=ensure_ascii,
+ check_circular=check_circular,
+ allow_nan=allow_nan,
+ default=default,
+ sort_keys=sort_keys,
+ )
+ self.print(json_renderable, soft_wrap=True)
+
+ def update_screen(
+ self,
+ renderable: RenderableType,
+ *,
+ region: Optional[Region] = None,
+ options: Optional[ConsoleOptions] = None,
+ ) -> None:
+ """Update the screen at a given offset.
+
+ Args:
+ renderable (RenderableType): A Rich renderable.
+ region (Region, optional): Region of screen to update, or None for entire screen. Defaults to None.
+ x (int, optional): x offset. Defaults to 0.
+ y (int, optional): y offset. Defaults to 0.
+
+ Raises:
+ errors.NoAltScreen: If the Console isn't in alt screen mode.
+
+ """
+ if not self.is_alt_screen:
+ raise errors.NoAltScreen("Alt screen must be enabled to call update_screen")
+ render_options = options or self.options
+ if region is None:
+ x = y = 0
+ render_options = render_options.update_dimensions(
+ render_options.max_width, render_options.height or self.height
+ )
+ else:
+ x, y, width, height = region
+ render_options = render_options.update_dimensions(width, height)
+
+ lines = self.render_lines(renderable, options=render_options)
+ self.update_screen_lines(lines, x, y)
+
+ def update_screen_lines(
+ self, lines: List[List[Segment]], x: int = 0, y: int = 0
+ ) -> None:
+ """Update lines of the screen at a given offset.
+
+ Args:
+ lines (List[List[Segment]]): Rendered lines (as produced by :meth:`~rich.Console.render_lines`).
+ x (int, optional): x offset (column no). Defaults to 0.
+ y (int, optional): y offset (column no). Defaults to 0.
+
+ Raises:
+ errors.NoAltScreen: If the Console isn't in alt screen mode.
+ """
+ if not self.is_alt_screen:
+ raise errors.NoAltScreen("Alt screen must be enabled to call update_screen")
+ screen_update = ScreenUpdate(lines, x, y)
+ segments = self.render(screen_update)
+ self._buffer.extend(segments)
+ self._check_buffer()
+
+ def print_exception(
+ self,
+ *,
+ width: Optional[int] = 100,
+ extra_lines: int = 3,
+ theme: Optional[str] = None,
+ word_wrap: bool = False,
+ show_locals: bool = False,
+ suppress: Iterable[Union[str, ModuleType]] = (),
+ max_frames: int = 100,
+ ) -> None:
+ """Prints a rich render of the last exception and traceback.
+
+ Args:
+ width (Optional[int], optional): Number of characters used to render code. Defaults to 100.
+ extra_lines (int, optional): Additional lines of code to render. Defaults to 3.
+ theme (str, optional): Override pygments theme used in traceback
+ word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.
+ show_locals (bool, optional): Enable display of local variables. Defaults to False.
+ suppress (Iterable[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.
+ max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
+ """
+ from .traceback import Traceback
+
+ traceback = Traceback(
+ width=width,
+ extra_lines=extra_lines,
+ theme=theme,
+ word_wrap=word_wrap,
+ show_locals=show_locals,
+ suppress=suppress,
+ max_frames=max_frames,
+ )
+ self.print(traceback)
+
+ @staticmethod
+ def _caller_frame_info(
+ offset: int,
+ currentframe: Callable[[], Optional[FrameType]] = inspect.currentframe,
+ ) -> Tuple[str, int, Dict[str, Any]]:
+ """Get caller frame information.
+
+ Args:
+ offset (int): the caller offset within the current frame stack.
+ currentframe (Callable[[], Optional[FrameType]], optional): the callable to use to
+ retrieve the current frame. Defaults to ``inspect.currentframe``.
+
+ Returns:
+ Tuple[str, int, Dict[str, Any]]: A tuple containing the filename, the line number and
+ the dictionary of local variables associated with the caller frame.
+
+ Raises:
+ RuntimeError: If the stack offset is invalid.
+ """
+ # Ignore the frame of this local helper
+ offset += 1
+
+ frame = currentframe()
+ if frame is not None:
+ # Use the faster currentframe where implemented
+ while offset and frame is not None:
+ frame = frame.f_back
+ offset -= 1
+ assert frame is not None
+ return frame.f_code.co_filename, frame.f_lineno, frame.f_locals
+ else:
+ # Fallback to the slower stack
+ frame_info = inspect.stack()[offset]
+ return frame_info.filename, frame_info.lineno, frame_info.frame.f_locals
+
+ def log(
+ self,
+ *objects: Any,
+ sep: str = " ",
+ end: str = "\n",
+ style: Optional[Union[str, Style]] = None,
+ justify: Optional[JustifyMethod] = None,
+ emoji: Optional[bool] = None,
+ markup: Optional[bool] = None,
+ highlight: Optional[bool] = None,
+ log_locals: bool = False,
+ _stack_offset: int = 1,
+ ) -> None:
+ """Log rich content to the terminal.
+
+ Args:
+ objects (positional args): Objects to log to the terminal.
+ sep (str, optional): String to write between print data. Defaults to " ".
+ end (str, optional): String to write at end of print data. Defaults to "\\\\n".
+ style (Union[str, Style], optional): A style to apply to output. Defaults to None.
+ justify (str, optional): One of "left", "right", "center", or "full". Defaults to ``None``.
+ overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None.
+ emoji (Optional[bool], optional): Enable emoji code, or ``None`` to use console default. Defaults to None.
+ markup (Optional[bool], optional): Enable markup, or ``None`` to use console default. Defaults to None.
+ highlight (Optional[bool], optional): Enable automatic highlighting, or ``None`` to use console default. Defaults to None.
+ log_locals (bool, optional): Boolean to enable logging of locals where ``log()``
+ was called. Defaults to False.
+ _stack_offset (int, optional): Offset of caller from end of call stack. Defaults to 1.
+ """
+ if not objects:
+ objects = (NewLine(),)
+
+ render_hooks = self._render_hooks[:]
+
+ with self:
+ renderables = self._collect_renderables(
+ objects,
+ sep,
+ end,
+ justify=justify,
+ emoji=emoji,
+ markup=markup,
+ highlight=highlight,
+ )
+ if style is not None:
+ renderables = [Styled(renderable, style) for renderable in renderables]
+
+ filename, line_no, locals = self._caller_frame_info(_stack_offset)
+ link_path = None if filename.startswith("<") else os.path.abspath(filename)
+ path = filename.rpartition(os.sep)[-1]
+ if log_locals:
+ locals_map = {
+ key: value
+ for key, value in locals.items()
+ if not key.startswith("__")
+ }
+ renderables.append(render_scope(locals_map, title="[i]locals"))
+
+ renderables = [
+ self._log_render(
+ self,
+ renderables,
+ log_time=self.get_datetime(),
+ path=path,
+ line_no=line_no,
+ link_path=link_path,
+ )
+ ]
+ for hook in render_hooks:
+ renderables = hook.process_renderables(renderables)
+ new_segments: List[Segment] = []
+ extend = new_segments.extend
+ render = self.render
+ render_options = self.options
+ for renderable in renderables:
+ extend(render(renderable, render_options))
+ buffer_extend = self._buffer.extend
+ for line in Segment.split_and_crop_lines(
+ new_segments, self.width, pad=False
+ ):
+ buffer_extend(line)
+
+ def _check_buffer(self) -> None:
+ """Check if the buffer may be rendered. Render it if it can (e.g. Console.quiet is False)
+ Rendering is supported on Windows, Unix and Jupyter environments. For
+ legacy Windows consoles, the win32 API is called directly.
+ This method will also record what it renders if recording is enabled via Console.record.
+ """
+ if self.quiet:
+ del self._buffer[:]
+ return
+ with self._lock:
+ if self.record:
+ with self._record_buffer_lock:
+ self._record_buffer.extend(self._buffer[:])
+
+ if self._buffer_index == 0:
+
+ if self.is_jupyter: # pragma: no cover
+ from .jupyter import display
+
+ display(self._buffer, self._render_buffer(self._buffer[:]))
+ del self._buffer[:]
+ else:
+ if WINDOWS:
+ use_legacy_windows_render = False
+ if self.legacy_windows:
+ try:
+ use_legacy_windows_render = (
+ self.file.fileno() in _STD_STREAMS_OUTPUT
+ )
+ except (ValueError, io.UnsupportedOperation):
+ pass
+
+ if use_legacy_windows_render:
+ from pip._vendor.rich._win32_console import LegacyWindowsTerm
+ from pip._vendor.rich._windows_renderer import legacy_windows_render
+
+ buffer = self._buffer[:]
+ if self.no_color and self._color_system:
+ buffer = list(Segment.remove_color(buffer))
+
+ legacy_windows_render(buffer, LegacyWindowsTerm(self.file))
+ else:
+ # Either a non-std stream on legacy Windows, or modern Windows.
+ text = self._render_buffer(self._buffer[:])
+ # https://bugs.python.org/issue37871
+ write = self.file.write
+ for line in text.splitlines(True):
+ try:
+ write(line)
+ except UnicodeEncodeError as error:
+ error.reason = f"{error.reason}\n*** You may need to add PYTHONIOENCODING=utf-8 to your environment ***"
+ raise
+ else:
+ text = self._render_buffer(self._buffer[:])
+ try:
+ self.file.write(text)
+ except UnicodeEncodeError as error:
+ error.reason = f"{error.reason}\n*** You may need to add PYTHONIOENCODING=utf-8 to your environment ***"
+ raise
+
+ self.file.flush()
+ del self._buffer[:]
+
+ def _render_buffer(self, buffer: Iterable[Segment]) -> str:
+ """Render buffered output, and clear buffer."""
+ output: List[str] = []
+ append = output.append
+ color_system = self._color_system
+ legacy_windows = self.legacy_windows
+ not_terminal = not self.is_terminal
+ if self.no_color and color_system:
+ buffer = Segment.remove_color(buffer)
+ for text, style, control in buffer:
+ if style:
+ append(
+ style.render(
+ text,
+ color_system=color_system,
+ legacy_windows=legacy_windows,
+ )
+ )
+ elif not (not_terminal and control):
+ append(text)
+
+ rendered = "".join(output)
+ return rendered
+
+ def input(
+ self,
+ prompt: TextType = "",
+ *,
+ markup: bool = True,
+ emoji: bool = True,
+ password: bool = False,
+ stream: Optional[TextIO] = None,
+ ) -> str:
+ """Displays a prompt and waits for input from the user. The prompt may contain color / style.
+
+ It works in the same way as Python's builtin :func:`input` function and provides elaborate line editing and history features if Python's builtin :mod:`readline` module is previously loaded.
+
+ Args:
+ prompt (Union[str, Text]): Text to render in the prompt.
+ markup (bool, optional): Enable console markup (requires a str prompt). Defaults to True.
+ emoji (bool, optional): Enable emoji (requires a str prompt). Defaults to True.
+ password: (bool, optional): Hide typed text. Defaults to False.
+ stream: (TextIO, optional): Optional file to read input from (rather than stdin). Defaults to None.
+
+ Returns:
+ str: Text read from stdin.
+ """
+ if prompt:
+ self.print(prompt, markup=markup, emoji=emoji, end="")
+ if password:
+ result = getpass("", stream=stream)
+ else:
+ if stream:
+ result = stream.readline()
+ else:
+ result = input()
+ return result
+
+ def export_text(self, *, clear: bool = True, styles: bool = False) -> str:
+ """Generate text from console contents (requires record=True argument in constructor).
+
+ Args:
+ clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
+ styles (bool, optional): If ``True``, ansi escape codes will be included. ``False`` for plain text.
+ Defaults to ``False``.
+
+ Returns:
+ str: String containing console contents.
+
+ """
+ assert (
+ self.record
+ ), "To export console contents set record=True in the constructor or instance"
+
+ with self._record_buffer_lock:
+ if styles:
+ text = "".join(
+ (style.render(text) if style else text)
+ for text, style, _ in self._record_buffer
+ )
+ else:
+ text = "".join(
+ segment.text
+ for segment in self._record_buffer
+ if not segment.control
+ )
+ if clear:
+ del self._record_buffer[:]
+ return text
+
+ def save_text(self, path: str, *, clear: bool = True, styles: bool = False) -> None:
+ """Generate text from console and save to a given location (requires record=True argument in constructor).
+
+ Args:
+ path (str): Path to write text files.
+ clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
+ styles (bool, optional): If ``True``, ansi style codes will be included. ``False`` for plain text.
+ Defaults to ``False``.
+
+ """
+ text = self.export_text(clear=clear, styles=styles)
+ with open(path, "wt", encoding="utf-8") as write_file:
+ write_file.write(text)
+
+ def export_html(
+ self,
+ *,
+ theme: Optional[TerminalTheme] = None,
+ clear: bool = True,
+ code_format: Optional[str] = None,
+ inline_styles: bool = False,
+ ) -> str:
+ """Generate HTML from console contents (requires record=True argument in constructor).
+
+ Args:
+ theme (TerminalTheme, optional): TerminalTheme object containing console colors.
+ clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
+ code_format (str, optional): Format string to render HTML. In addition to '{foreground}',
+ '{background}', and '{code}', should contain '{stylesheet}' if inline_styles is ``False``.
+ inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files
+ larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag.
+ Defaults to False.
+
+ Returns:
+ str: String containing console contents as HTML.
+ """
+ assert (
+ self.record
+ ), "To export console contents set record=True in the constructor or instance"
+ fragments: List[str] = []
+ append = fragments.append
+ _theme = theme or DEFAULT_TERMINAL_THEME
+ stylesheet = ""
+
+ render_code_format = CONSOLE_HTML_FORMAT if code_format is None else code_format
+
+ with self._record_buffer_lock:
+ if inline_styles:
+ for text, style, _ in Segment.filter_control(
+ Segment.simplify(self._record_buffer)
+ ):
+ text = escape(text)
+ if style:
+ rule = style.get_html_style(_theme)
+ if style.link:
+ text = f'<a href="{style.link}">{text}</a>'
+ text = f'<span style="{rule}">{text}</span>' if rule else text
+ append(text)
+ else:
+ styles: Dict[str, int] = {}
+ for text, style, _ in Segment.filter_control(
+ Segment.simplify(self._record_buffer)
+ ):
+ text = escape(text)
+ if style:
+ rule = style.get_html_style(_theme)
+ style_number = styles.setdefault(rule, len(styles) + 1)
+ if style.link:
+ text = f'<a class="r{style_number}" href="{style.link}">{text}</a>'
+ else:
+ text = f'<span class="r{style_number}">{text}</span>'
+ append(text)
+ stylesheet_rules: List[str] = []
+ stylesheet_append = stylesheet_rules.append
+ for style_rule, style_number in styles.items():
+ if style_rule:
+ stylesheet_append(f".r{style_number} {{{style_rule}}}")
+ stylesheet = "\n".join(stylesheet_rules)
+
+ rendered_code = render_code_format.format(
+ code="".join(fragments),
+ stylesheet=stylesheet,
+ foreground=_theme.foreground_color.hex,
+ background=_theme.background_color.hex,
+ )
+ if clear:
+ del self._record_buffer[:]
+ return rendered_code
+
+ def save_html(
+ self,
+ path: str,
+ *,
+ theme: Optional[TerminalTheme] = None,
+ clear: bool = True,
+ code_format: str = CONSOLE_HTML_FORMAT,
+ inline_styles: bool = False,
+ ) -> None:
+ """Generate HTML from console contents and write to a file (requires record=True argument in constructor).
+
+ Args:
+ path (str): Path to write html file.
+ theme (TerminalTheme, optional): TerminalTheme object containing console colors.
+ clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``.
+ code_format (str, optional): Format string to render HTML. In addition to '{foreground}',
+ '{background}', and '{code}', should contain '{stylesheet}' if inline_styles is ``False``.
+ inline_styles (bool, optional): If ``True`` styles will be inlined in to spans, which makes files
+ larger but easier to cut and paste markup. If ``False``, styles will be embedded in a style tag.
+ Defaults to False.
+
+ """
+ html = self.export_html(
+ theme=theme,
+ clear=clear,
+ code_format=code_format,
+ inline_styles=inline_styles,
+ )
+ with open(path, "wt", encoding="utf-8") as write_file:
+ write_file.write(html)
+
+ def export_svg(
+ self,
+ *,
+ title: str = "Rich",
+ theme: Optional[TerminalTheme] = None,
+ clear: bool = True,
+ code_format: str = CONSOLE_SVG_FORMAT,
+ font_aspect_ratio: float = 0.61,
+ unique_id: Optional[str] = None,
+ ) -> str:
+ """
+ Generate an SVG from the console contents (requires record=True in Console constructor).
+
+ Args:
+ title (str, optional): The title of the tab in the output image
+ theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal
+ clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``
+ code_format (str, optional): Format string used to generate the SVG. Rich will inject a number of variables
+ into the string in order to form the final SVG output. The default template used and the variables
+ injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable.
+ font_aspect_ratio (float, optional): The width to height ratio of the font used in the ``code_format``
+ string. Defaults to 0.61, which is the width to height ratio of Fira Code (the default font).
+ If you aren't specifying a different font inside ``code_format``, you probably don't need this.
+ unique_id (str, optional): unique id that is used as the prefix for various elements (CSS styles, node
+ ids). If not set, this defaults to a computed value based on the recorded content.
+ """
+
+ from pip._vendor.rich.cells import cell_len
+
+ style_cache: Dict[Style, str] = {}
+
+ def get_svg_style(style: Style) -> str:
+ """Convert a Style to CSS rules for SVG."""
+ if style in style_cache:
+ return style_cache[style]
+ css_rules = []
+ color = (
+ _theme.foreground_color
+ if (style.color is None or style.color.is_default)
+ else style.color.get_truecolor(_theme)
+ )
+ bgcolor = (
+ _theme.background_color
+ if (style.bgcolor is None or style.bgcolor.is_default)
+ else style.bgcolor.get_truecolor(_theme)
+ )
+ if style.reverse:
+ color, bgcolor = bgcolor, color
+ if style.dim:
+ color = blend_rgb(color, bgcolor, 0.4)
+ css_rules.append(f"fill: {color.hex}")
+ if style.bold:
+ css_rules.append("font-weight: bold")
+ if style.italic:
+ css_rules.append("font-style: italic;")
+ if style.underline:
+ css_rules.append("text-decoration: underline;")
+ if style.strike:
+ css_rules.append("text-decoration: line-through;")
+
+ css = ";".join(css_rules)
+ style_cache[style] = css
+ return css
+
+ _theme = theme or SVG_EXPORT_THEME
+
+ width = self.width
+ char_height = 20
+ char_width = char_height * font_aspect_ratio
+ line_height = char_height * 1.22
+
+ margin_top = 1
+ margin_right = 1
+ margin_bottom = 1
+ margin_left = 1
+
+ padding_top = 40
+ padding_right = 8
+ padding_bottom = 8
+ padding_left = 8
+
+ padding_width = padding_left + padding_right
+ padding_height = padding_top + padding_bottom
+ margin_width = margin_left + margin_right
+ margin_height = margin_top + margin_bottom
+
+ text_backgrounds: List[str] = []
+ text_group: List[str] = []
+ classes: Dict[str, int] = {}
+ style_no = 1
+
+ def escape_text(text: str) -> str:
+ """HTML escape text and replace spaces with nbsp."""
+ return escape(text).replace(" ", "&#160;")
+
+ def make_tag(
+ name: str, content: Optional[str] = None, **attribs: object
+ ) -> str:
+ """Make a tag from name, content, and attributes."""
+
+ def stringify(value: object) -> str:
+ if isinstance(value, (float)):
+ return format(value, "g")
+ return str(value)
+
+ tag_attribs = " ".join(
+ f'{k.lstrip("_").replace("_", "-")}="{stringify(v)}"'
+ for k, v in attribs.items()
+ )
+ return (
+ f"<{name} {tag_attribs}>{content}</{name}>"
+ if content
+ else f"<{name} {tag_attribs}/>"
+ )
+
+ with self._record_buffer_lock:
+ segments = list(Segment.filter_control(self._record_buffer))
+ if clear:
+ self._record_buffer.clear()
+
+ if unique_id is None:
+ unique_id = "terminal-" + str(
+ zlib.adler32(
+ ("".join(repr(segment) for segment in segments)).encode(
+ "utf-8",
+ "ignore",
+ )
+ + title.encode("utf-8", "ignore")
+ )
+ )
+ y = 0
+ for y, line in enumerate(Segment.split_and_crop_lines(segments, length=width)):
+ x = 0
+ for text, style, _control in line:
+ style = style or Style()
+ rules = get_svg_style(style)
+ if rules not in classes:
+ classes[rules] = style_no
+ style_no += 1
+ class_name = f"r{classes[rules]}"
+
+ if style.reverse:
+ has_background = True
+ background = (
+ _theme.foreground_color.hex
+ if style.color is None
+ else style.color.get_truecolor(_theme).hex
+ )
+ else:
+ bgcolor = style.bgcolor
+ has_background = bgcolor is not None and not bgcolor.is_default
+ background = (
+ _theme.background_color.hex
+ if style.bgcolor is None
+ else style.bgcolor.get_truecolor(_theme).hex
+ )
+
+ text_length = cell_len(text)
+ if has_background:
+ text_backgrounds.append(
+ make_tag(
+ "rect",
+ fill=background,
+ x=x * char_width,
+ y=y * line_height + 1.5,
+ width=char_width * text_length,
+ height=line_height + 0.25,
+ shape_rendering="crispEdges",
+ )
+ )
+
+ if text != " " * len(text):
+ text_group.append(
+ make_tag(
+ "text",
+ escape_text(text),
+ _class=f"{unique_id}-{class_name}",
+ x=x * char_width,
+ y=y * line_height + char_height,
+ textLength=char_width * len(text),
+ clip_path=f"url(#{unique_id}-line-{y})",
+ )
+ )
+ x += cell_len(text)
+
+ line_offsets = [line_no * line_height + 1.5 for line_no in range(y)]
+ lines = "\n".join(
+ f"""<clipPath id="{unique_id}-line-{line_no}">
+ {make_tag("rect", x=0, y=offset, width=char_width * width, height=line_height + 0.25)}
+ </clipPath>"""
+ for line_no, offset in enumerate(line_offsets)
+ )
+
+ styles = "\n".join(
+ f".{unique_id}-r{rule_no} {{ {css} }}" for css, rule_no in classes.items()
+ )
+ backgrounds = "".join(text_backgrounds)
+ matrix = "".join(text_group)
+
+ terminal_width = ceil(width * char_width + padding_width)
+ terminal_height = (y + 1) * line_height + padding_height
+ chrome = make_tag(
+ "rect",
+ fill=_theme.background_color.hex,
+ stroke="rgba(255,255,255,0.35)",
+ stroke_width="1",
+ x=margin_left,
+ y=margin_top,
+ width=terminal_width,
+ height=terminal_height,
+ rx=8,
+ )
+
+ title_color = _theme.foreground_color.hex
+ if title:
+ chrome += make_tag(
+ "text",
+ escape_text(title),
+ _class=f"{unique_id}-title",
+ fill=title_color,
+ text_anchor="middle",
+ x=terminal_width // 2,
+ y=margin_top + char_height + 6,
+ )
+ chrome += f"""
+ <g transform="translate(26,22)">
+ <circle cx="0" cy="0" r="7" fill="#ff5f57"/>
+ <circle cx="22" cy="0" r="7" fill="#febc2e"/>
+ <circle cx="44" cy="0" r="7" fill="#28c840"/>
+ </g>
+ """
+
+ svg = code_format.format(
+ unique_id=unique_id,
+ char_width=char_width,
+ char_height=char_height,
+ line_height=line_height,
+ terminal_width=char_width * width - 1,
+ terminal_height=(y + 1) * line_height - 1,
+ width=terminal_width + margin_width,
+ height=terminal_height + margin_height,
+ terminal_x=margin_left + padding_left,
+ terminal_y=margin_top + padding_top,
+ styles=styles,
+ chrome=chrome,
+ backgrounds=backgrounds,
+ matrix=matrix,
+ lines=lines,
+ )
+ return svg
+
+ def save_svg(
+ self,
+ path: str,
+ *,
+ title: str = "Rich",
+ theme: Optional[TerminalTheme] = None,
+ clear: bool = True,
+ code_format: str = CONSOLE_SVG_FORMAT,
+ font_aspect_ratio: float = 0.61,
+ unique_id: Optional[str] = None,
+ ) -> None:
+ """Generate an SVG file from the console contents (requires record=True in Console constructor).
+
+ Args:
+ path (str): The path to write the SVG to.
+ title (str, optional): The title of the tab in the output image
+ theme (TerminalTheme, optional): The ``TerminalTheme`` object to use to style the terminal
+ clear (bool, optional): Clear record buffer after exporting. Defaults to ``True``
+ code_format (str, optional): Format string used to generate the SVG. Rich will inject a number of variables
+ into the string in order to form the final SVG output. The default template used and the variables
+ injected by Rich can be found by inspecting the ``console.CONSOLE_SVG_FORMAT`` variable.
+ font_aspect_ratio (float, optional): The width to height ratio of the font used in the ``code_format``
+ string. Defaults to 0.61, which is the width to height ratio of Fira Code (the default font).
+ If you aren't specifying a different font inside ``code_format``, you probably don't need this.
+ unique_id (str, optional): unique id that is used as the prefix for various elements (CSS styles, node
+ ids). If not set, this defaults to a computed value based on the recorded content.
+ """
+ svg = self.export_svg(
+ title=title,
+ theme=theme,
+ clear=clear,
+ code_format=code_format,
+ font_aspect_ratio=font_aspect_ratio,
+ unique_id=unique_id,
+ )
+ with open(path, "wt", encoding="utf-8") as write_file:
+ write_file.write(svg)
+
+
+def _svg_hash(svg_main_code: str) -> str:
+ """Returns a unique hash for the given SVG main code.
+
+ Args:
+ svg_main_code (str): The content we're going to inject in the SVG envelope.
+
+ Returns:
+ str: a hash of the given content
+ """
+ return str(zlib.adler32(svg_main_code.encode()))
+
+
+if __name__ == "__main__": # pragma: no cover
+ console = Console(record=True)
+
+ console.log(
+ "JSONRPC [i]request[/i]",
+ 5,
+ 1.3,
+ True,
+ False,
+ None,
+ {
+ "jsonrpc": "2.0",
+ "method": "subtract",
+ "params": {"minuend": 42, "subtrahend": 23},
+ "id": 3,
+ },
+ )
+
+ console.log("Hello, World!", "{'a': 1}", repr(console))
+
+ console.print(
+ {
+ "name": None,
+ "empty": [],
+ "quiz": {
+ "sport": {
+ "answered": True,
+ "q1": {
+ "question": "Which one is correct team name in NBA?",
+ "options": [
+ "New York Bulls",
+ "Los Angeles Kings",
+ "Golden State Warriors",
+ "Huston Rocket",
+ ],
+ "answer": "Huston Rocket",
+ },
+ },
+ "maths": {
+ "answered": False,
+ "q1": {
+ "question": "5 + 7 = ?",
+ "options": [10, 11, 12, 13],
+ "answer": 12,
+ },
+ "q2": {
+ "question": "12 - 8 = ?",
+ "options": [1, 2, 3, 4],
+ "answer": 4,
+ },
+ },
+ },
+ }
+ )
diff --git a/third_party/python/pip/pip/_vendor/rich/constrain.py b/third_party/python/pip/pip/_vendor/rich/constrain.py
new file mode 100644
index 0000000000..65fdf56342
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/constrain.py
@@ -0,0 +1,37 @@
+from typing import Optional, TYPE_CHECKING
+
+from .jupyter import JupyterMixin
+from .measure import Measurement
+
+if TYPE_CHECKING:
+ from .console import Console, ConsoleOptions, RenderableType, RenderResult
+
+
+class Constrain(JupyterMixin):
+ """Constrain the width of a renderable to a given number of characters.
+
+ Args:
+ renderable (RenderableType): A renderable object.
+ width (int, optional): The maximum width (in characters) to render. Defaults to 80.
+ """
+
+ def __init__(self, renderable: "RenderableType", width: Optional[int] = 80) -> None:
+ self.renderable = renderable
+ self.width = width
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ if self.width is None:
+ yield self.renderable
+ else:
+ child_options = options.update_width(min(self.width, options.max_width))
+ yield from console.render(self.renderable, child_options)
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "Measurement":
+ if self.width is not None:
+ options = options.update_width(self.width)
+ measurement = Measurement.get(console, options, self.renderable)
+ return measurement
diff --git a/third_party/python/pip/pip/_vendor/rich/containers.py b/third_party/python/pip/pip/_vendor/rich/containers.py
new file mode 100644
index 0000000000..e29cf36899
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/containers.py
@@ -0,0 +1,167 @@
+from itertools import zip_longest
+from typing import (
+ Iterator,
+ Iterable,
+ List,
+ Optional,
+ Union,
+ overload,
+ TypeVar,
+ TYPE_CHECKING,
+)
+
+if TYPE_CHECKING:
+ from .console import (
+ Console,
+ ConsoleOptions,
+ JustifyMethod,
+ OverflowMethod,
+ RenderResult,
+ RenderableType,
+ )
+ from .text import Text
+
+from .cells import cell_len
+from .measure import Measurement
+
+T = TypeVar("T")
+
+
+class Renderables:
+ """A list subclass which renders its contents to the console."""
+
+ def __init__(
+ self, renderables: Optional[Iterable["RenderableType"]] = None
+ ) -> None:
+ self._renderables: List["RenderableType"] = (
+ list(renderables) if renderables is not None else []
+ )
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ """Console render method to insert line-breaks."""
+ yield from self._renderables
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "Measurement":
+ dimensions = [
+ Measurement.get(console, options, renderable)
+ for renderable in self._renderables
+ ]
+ if not dimensions:
+ return Measurement(1, 1)
+ _min = max(dimension.minimum for dimension in dimensions)
+ _max = max(dimension.maximum for dimension in dimensions)
+ return Measurement(_min, _max)
+
+ def append(self, renderable: "RenderableType") -> None:
+ self._renderables.append(renderable)
+
+ def __iter__(self) -> Iterable["RenderableType"]:
+ return iter(self._renderables)
+
+
+class Lines:
+ """A list subclass which can render to the console."""
+
+ def __init__(self, lines: Iterable["Text"] = ()) -> None:
+ self._lines: List["Text"] = list(lines)
+
+ def __repr__(self) -> str:
+ return f"Lines({self._lines!r})"
+
+ def __iter__(self) -> Iterator["Text"]:
+ return iter(self._lines)
+
+ @overload
+ def __getitem__(self, index: int) -> "Text":
+ ...
+
+ @overload
+ def __getitem__(self, index: slice) -> List["Text"]:
+ ...
+
+ def __getitem__(self, index: Union[slice, int]) -> Union["Text", List["Text"]]:
+ return self._lines[index]
+
+ def __setitem__(self, index: int, value: "Text") -> "Lines":
+ self._lines[index] = value
+ return self
+
+ def __len__(self) -> int:
+ return self._lines.__len__()
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ """Console render method to insert line-breaks."""
+ yield from self._lines
+
+ def append(self, line: "Text") -> None:
+ self._lines.append(line)
+
+ def extend(self, lines: Iterable["Text"]) -> None:
+ self._lines.extend(lines)
+
+ def pop(self, index: int = -1) -> "Text":
+ return self._lines.pop(index)
+
+ def justify(
+ self,
+ console: "Console",
+ width: int,
+ justify: "JustifyMethod" = "left",
+ overflow: "OverflowMethod" = "fold",
+ ) -> None:
+ """Justify and overflow text to a given width.
+
+ Args:
+ console (Console): Console instance.
+ width (int): Number of characters per line.
+ justify (str, optional): Default justify method for text: "left", "center", "full" or "right". Defaults to "left".
+ overflow (str, optional): Default overflow for text: "crop", "fold", or "ellipsis". Defaults to "fold".
+
+ """
+ from .text import Text
+
+ if justify == "left":
+ for line in self._lines:
+ line.truncate(width, overflow=overflow, pad=True)
+ elif justify == "center":
+ for line in self._lines:
+ line.rstrip()
+ line.truncate(width, overflow=overflow)
+ line.pad_left((width - cell_len(line.plain)) // 2)
+ line.pad_right(width - cell_len(line.plain))
+ elif justify == "right":
+ for line in self._lines:
+ line.rstrip()
+ line.truncate(width, overflow=overflow)
+ line.pad_left(width - cell_len(line.plain))
+ elif justify == "full":
+ for line_index, line in enumerate(self._lines):
+ if line_index == len(self._lines) - 1:
+ break
+ words = line.split(" ")
+ words_size = sum(cell_len(word.plain) for word in words)
+ num_spaces = len(words) - 1
+ spaces = [1 for _ in range(num_spaces)]
+ index = 0
+ if spaces:
+ while words_size + num_spaces < width:
+ spaces[len(spaces) - index - 1] += 1
+ num_spaces += 1
+ index = (index + 1) % len(spaces)
+ tokens: List[Text] = []
+ for index, (word, next_word) in enumerate(
+ zip_longest(words, words[1:])
+ ):
+ tokens.append(word)
+ if index < len(spaces):
+ style = word.get_style_at_offset(console, -1)
+ next_style = next_word.get_style_at_offset(console, 0)
+ space_style = style if style == next_style else line.style
+ tokens.append(Text(" " * spaces[index], style=space_style))
+ self[line_index] = Text("").join(tokens)
diff --git a/third_party/python/pip/pip/_vendor/rich/control.py b/third_party/python/pip/pip/_vendor/rich/control.py
new file mode 100644
index 0000000000..88fcb92951
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/control.py
@@ -0,0 +1,225 @@
+import sys
+import time
+from typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Union
+
+if sys.version_info >= (3, 8):
+ from typing import Final
+else:
+ from pip._vendor.typing_extensions import Final # pragma: no cover
+
+from .segment import ControlCode, ControlType, Segment
+
+if TYPE_CHECKING:
+ from .console import Console, ConsoleOptions, RenderResult
+
+STRIP_CONTROL_CODES: Final = [
+ 7, # Bell
+ 8, # Backspace
+ 11, # Vertical tab
+ 12, # Form feed
+ 13, # Carriage return
+]
+_CONTROL_STRIP_TRANSLATE: Final = {
+ _codepoint: None for _codepoint in STRIP_CONTROL_CODES
+}
+
+CONTROL_ESCAPE: Final = {
+ 7: "\\a",
+ 8: "\\b",
+ 11: "\\v",
+ 12: "\\f",
+ 13: "\\r",
+}
+
+CONTROL_CODES_FORMAT: Dict[int, Callable[..., str]] = {
+ ControlType.BELL: lambda: "\x07",
+ ControlType.CARRIAGE_RETURN: lambda: "\r",
+ ControlType.HOME: lambda: "\x1b[H",
+ ControlType.CLEAR: lambda: "\x1b[2J",
+ ControlType.ENABLE_ALT_SCREEN: lambda: "\x1b[?1049h",
+ ControlType.DISABLE_ALT_SCREEN: lambda: "\x1b[?1049l",
+ ControlType.SHOW_CURSOR: lambda: "\x1b[?25h",
+ ControlType.HIDE_CURSOR: lambda: "\x1b[?25l",
+ ControlType.CURSOR_UP: lambda param: f"\x1b[{param}A",
+ ControlType.CURSOR_DOWN: lambda param: f"\x1b[{param}B",
+ ControlType.CURSOR_FORWARD: lambda param: f"\x1b[{param}C",
+ ControlType.CURSOR_BACKWARD: lambda param: f"\x1b[{param}D",
+ ControlType.CURSOR_MOVE_TO_COLUMN: lambda param: f"\x1b[{param+1}G",
+ ControlType.ERASE_IN_LINE: lambda param: f"\x1b[{param}K",
+ ControlType.CURSOR_MOVE_TO: lambda x, y: f"\x1b[{y+1};{x+1}H",
+ ControlType.SET_WINDOW_TITLE: lambda title: f"\x1b]0;{title}\x07",
+}
+
+
+class Control:
+ """A renderable that inserts a control code (non printable but may move cursor).
+
+ Args:
+ *codes (str): Positional arguments are either a :class:`~rich.segment.ControlType` enum or a
+ tuple of ControlType and an integer parameter
+ """
+
+ __slots__ = ["segment"]
+
+ def __init__(self, *codes: Union[ControlType, ControlCode]) -> None:
+ control_codes: List[ControlCode] = [
+ (code,) if isinstance(code, ControlType) else code for code in codes
+ ]
+ _format_map = CONTROL_CODES_FORMAT
+ rendered_codes = "".join(
+ _format_map[code](*parameters) for code, *parameters in control_codes
+ )
+ self.segment = Segment(rendered_codes, None, control_codes)
+
+ @classmethod
+ def bell(cls) -> "Control":
+ """Ring the 'bell'."""
+ return cls(ControlType.BELL)
+
+ @classmethod
+ def home(cls) -> "Control":
+ """Move cursor to 'home' position."""
+ return cls(ControlType.HOME)
+
+ @classmethod
+ def move(cls, x: int = 0, y: int = 0) -> "Control":
+ """Move cursor relative to current position.
+
+ Args:
+ x (int): X offset.
+ y (int): Y offset.
+
+ Returns:
+ ~Control: Control object.
+
+ """
+
+ def get_codes() -> Iterable[ControlCode]:
+ control = ControlType
+ if x:
+ yield (
+ control.CURSOR_FORWARD if x > 0 else control.CURSOR_BACKWARD,
+ abs(x),
+ )
+ if y:
+ yield (
+ control.CURSOR_DOWN if y > 0 else control.CURSOR_UP,
+ abs(y),
+ )
+
+ control = cls(*get_codes())
+ return control
+
+ @classmethod
+ def move_to_column(cls, x: int, y: int = 0) -> "Control":
+ """Move to the given column, optionally add offset to row.
+
+ Returns:
+ x (int): absolute x (column)
+ y (int): optional y offset (row)
+
+ Returns:
+ ~Control: Control object.
+ """
+
+ return (
+ cls(
+ (ControlType.CURSOR_MOVE_TO_COLUMN, x),
+ (
+ ControlType.CURSOR_DOWN if y > 0 else ControlType.CURSOR_UP,
+ abs(y),
+ ),
+ )
+ if y
+ else cls((ControlType.CURSOR_MOVE_TO_COLUMN, x))
+ )
+
+ @classmethod
+ def move_to(cls, x: int, y: int) -> "Control":
+ """Move cursor to absolute position.
+
+ Args:
+ x (int): x offset (column)
+ y (int): y offset (row)
+
+ Returns:
+ ~Control: Control object.
+ """
+ return cls((ControlType.CURSOR_MOVE_TO, x, y))
+
+ @classmethod
+ def clear(cls) -> "Control":
+ """Clear the screen."""
+ return cls(ControlType.CLEAR)
+
+ @classmethod
+ def show_cursor(cls, show: bool) -> "Control":
+ """Show or hide the cursor."""
+ return cls(ControlType.SHOW_CURSOR if show else ControlType.HIDE_CURSOR)
+
+ @classmethod
+ def alt_screen(cls, enable: bool) -> "Control":
+ """Enable or disable alt screen."""
+ if enable:
+ return cls(ControlType.ENABLE_ALT_SCREEN, ControlType.HOME)
+ else:
+ return cls(ControlType.DISABLE_ALT_SCREEN)
+
+ @classmethod
+ def title(cls, title: str) -> "Control":
+ """Set the terminal window title
+
+ Args:
+ title (str): The new terminal window title
+ """
+ return cls((ControlType.SET_WINDOW_TITLE, title))
+
+ def __str__(self) -> str:
+ return self.segment.text
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ if self.segment.text:
+ yield self.segment
+
+
+def strip_control_codes(
+ text: str, _translate_table: Dict[int, None] = _CONTROL_STRIP_TRANSLATE
+) -> str:
+ """Remove control codes from text.
+
+ Args:
+ text (str): A string possibly contain control codes.
+
+ Returns:
+ str: String with control codes removed.
+ """
+ return text.translate(_translate_table)
+
+
+def escape_control_codes(
+ text: str,
+ _translate_table: Dict[int, str] = CONTROL_ESCAPE,
+) -> str:
+ """Replace control codes with their "escaped" equivalent in the given text.
+ (e.g. "\b" becomes "\\b")
+
+ Args:
+ text (str): A string possibly containing control codes.
+
+ Returns:
+ str: String with control codes replaced with their escaped version.
+ """
+ return text.translate(_translate_table)
+
+
+if __name__ == "__main__": # pragma: no cover
+ from pip._vendor.rich.console import Console
+
+ console = Console()
+ console.print("Look at the title of your terminal window ^")
+ # console.print(Control((ControlType.SET_WINDOW_TITLE, "Hello, world!")))
+ for i in range(10):
+ console.set_window_title("🚀 Loading" + "." * i)
+ time.sleep(0.5)
diff --git a/third_party/python/pip/pip/_vendor/rich/default_styles.py b/third_party/python/pip/pip/_vendor/rich/default_styles.py
new file mode 100644
index 0000000000..46e9ea52c5
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/default_styles.py
@@ -0,0 +1,188 @@
+from typing import Dict
+
+from .style import Style
+
+DEFAULT_STYLES: Dict[str, Style] = {
+ "none": Style.null(),
+ "reset": Style(
+ color="default",
+ bgcolor="default",
+ dim=False,
+ bold=False,
+ italic=False,
+ underline=False,
+ blink=False,
+ blink2=False,
+ reverse=False,
+ conceal=False,
+ strike=False,
+ ),
+ "dim": Style(dim=True),
+ "bright": Style(dim=False),
+ "bold": Style(bold=True),
+ "strong": Style(bold=True),
+ "code": Style(reverse=True, bold=True),
+ "italic": Style(italic=True),
+ "emphasize": Style(italic=True),
+ "underline": Style(underline=True),
+ "blink": Style(blink=True),
+ "blink2": Style(blink2=True),
+ "reverse": Style(reverse=True),
+ "strike": Style(strike=True),
+ "black": Style(color="black"),
+ "red": Style(color="red"),
+ "green": Style(color="green"),
+ "yellow": Style(color="yellow"),
+ "magenta": Style(color="magenta"),
+ "cyan": Style(color="cyan"),
+ "white": Style(color="white"),
+ "inspect.attr": Style(color="yellow", italic=True),
+ "inspect.attr.dunder": Style(color="yellow", italic=True, dim=True),
+ "inspect.callable": Style(bold=True, color="red"),
+ "inspect.async_def": Style(italic=True, color="bright_cyan"),
+ "inspect.def": Style(italic=True, color="bright_cyan"),
+ "inspect.class": Style(italic=True, color="bright_cyan"),
+ "inspect.error": Style(bold=True, color="red"),
+ "inspect.equals": Style(),
+ "inspect.help": Style(color="cyan"),
+ "inspect.doc": Style(dim=True),
+ "inspect.value.border": Style(color="green"),
+ "live.ellipsis": Style(bold=True, color="red"),
+ "layout.tree.row": Style(dim=False, color="red"),
+ "layout.tree.column": Style(dim=False, color="blue"),
+ "logging.keyword": Style(bold=True, color="yellow"),
+ "logging.level.notset": Style(dim=True),
+ "logging.level.debug": Style(color="green"),
+ "logging.level.info": Style(color="blue"),
+ "logging.level.warning": Style(color="red"),
+ "logging.level.error": Style(color="red", bold=True),
+ "logging.level.critical": Style(color="red", bold=True, reverse=True),
+ "log.level": Style.null(),
+ "log.time": Style(color="cyan", dim=True),
+ "log.message": Style.null(),
+ "log.path": Style(dim=True),
+ "repr.ellipsis": Style(color="yellow"),
+ "repr.indent": Style(color="green", dim=True),
+ "repr.error": Style(color="red", bold=True),
+ "repr.str": Style(color="green", italic=False, bold=False),
+ "repr.brace": Style(bold=True),
+ "repr.comma": Style(bold=True),
+ "repr.ipv4": Style(bold=True, color="bright_green"),
+ "repr.ipv6": Style(bold=True, color="bright_green"),
+ "repr.eui48": Style(bold=True, color="bright_green"),
+ "repr.eui64": Style(bold=True, color="bright_green"),
+ "repr.tag_start": Style(bold=True),
+ "repr.tag_name": Style(color="bright_magenta", bold=True),
+ "repr.tag_contents": Style(color="default"),
+ "repr.tag_end": Style(bold=True),
+ "repr.attrib_name": Style(color="yellow", italic=False),
+ "repr.attrib_equal": Style(bold=True),
+ "repr.attrib_value": Style(color="magenta", italic=False),
+ "repr.number": Style(color="cyan", bold=True, italic=False),
+ "repr.number_complex": Style(color="cyan", bold=True, italic=False), # same
+ "repr.bool_true": Style(color="bright_green", italic=True),
+ "repr.bool_false": Style(color="bright_red", italic=True),
+ "repr.none": Style(color="magenta", italic=True),
+ "repr.url": Style(underline=True, color="bright_blue", italic=False, bold=False),
+ "repr.uuid": Style(color="bright_yellow", bold=False),
+ "repr.call": Style(color="magenta", bold=True),
+ "repr.path": Style(color="magenta"),
+ "repr.filename": Style(color="bright_magenta"),
+ "rule.line": Style(color="bright_green"),
+ "rule.text": Style.null(),
+ "json.brace": Style(bold=True),
+ "json.bool_true": Style(color="bright_green", italic=True),
+ "json.bool_false": Style(color="bright_red", italic=True),
+ "json.null": Style(color="magenta", italic=True),
+ "json.number": Style(color="cyan", bold=True, italic=False),
+ "json.str": Style(color="green", italic=False, bold=False),
+ "json.key": Style(color="blue", bold=True),
+ "prompt": Style.null(),
+ "prompt.choices": Style(color="magenta", bold=True),
+ "prompt.default": Style(color="cyan", bold=True),
+ "prompt.invalid": Style(color="red"),
+ "prompt.invalid.choice": Style(color="red"),
+ "pretty": Style.null(),
+ "scope.border": Style(color="blue"),
+ "scope.key": Style(color="yellow", italic=True),
+ "scope.key.special": Style(color="yellow", italic=True, dim=True),
+ "scope.equals": Style(color="red"),
+ "table.header": Style(bold=True),
+ "table.footer": Style(bold=True),
+ "table.cell": Style.null(),
+ "table.title": Style(italic=True),
+ "table.caption": Style(italic=True, dim=True),
+ "traceback.error": Style(color="red", italic=True),
+ "traceback.border.syntax_error": Style(color="bright_red"),
+ "traceback.border": Style(color="red"),
+ "traceback.text": Style.null(),
+ "traceback.title": Style(color="red", bold=True),
+ "traceback.exc_type": Style(color="bright_red", bold=True),
+ "traceback.exc_value": Style.null(),
+ "traceback.offset": Style(color="bright_red", bold=True),
+ "bar.back": Style(color="grey23"),
+ "bar.complete": Style(color="rgb(249,38,114)"),
+ "bar.finished": Style(color="rgb(114,156,31)"),
+ "bar.pulse": Style(color="rgb(249,38,114)"),
+ "progress.description": Style.null(),
+ "progress.filesize": Style(color="green"),
+ "progress.filesize.total": Style(color="green"),
+ "progress.download": Style(color="green"),
+ "progress.elapsed": Style(color="yellow"),
+ "progress.percentage": Style(color="magenta"),
+ "progress.remaining": Style(color="cyan"),
+ "progress.data.speed": Style(color="red"),
+ "progress.spinner": Style(color="green"),
+ "status.spinner": Style(color="green"),
+ "tree": Style(),
+ "tree.line": Style(),
+ "markdown.paragraph": Style(),
+ "markdown.text": Style(),
+ "markdown.emph": Style(italic=True),
+ "markdown.strong": Style(bold=True),
+ "markdown.code": Style(bgcolor="black", color="bright_white"),
+ "markdown.code_block": Style(dim=True, color="cyan", bgcolor="black"),
+ "markdown.block_quote": Style(color="magenta"),
+ "markdown.list": Style(color="cyan"),
+ "markdown.item": Style(),
+ "markdown.item.bullet": Style(color="yellow", bold=True),
+ "markdown.item.number": Style(color="yellow", bold=True),
+ "markdown.hr": Style(color="yellow"),
+ "markdown.h1.border": Style(),
+ "markdown.h1": Style(bold=True),
+ "markdown.h2": Style(bold=True, underline=True),
+ "markdown.h3": Style(bold=True),
+ "markdown.h4": Style(bold=True, dim=True),
+ "markdown.h5": Style(underline=True),
+ "markdown.h6": Style(italic=True),
+ "markdown.h7": Style(italic=True, dim=True),
+ "markdown.link": Style(color="bright_blue"),
+ "markdown.link_url": Style(color="blue"),
+ "iso8601.date": Style(color="blue"),
+ "iso8601.time": Style(color="magenta"),
+ "iso8601.timezone": Style(color="yellow"),
+}
+
+
+if __name__ == "__main__": # pragma: no cover
+ import argparse
+ import io
+
+ from pip._vendor.rich.console import Console
+ from pip._vendor.rich.table import Table
+ from pip._vendor.rich.text import Text
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--html", action="store_true", help="Export as HTML table")
+ args = parser.parse_args()
+ html: bool = args.html
+ console = Console(record=True, width=70, file=io.StringIO()) if html else Console()
+
+ table = Table("Name", "Styling")
+
+ for style_name, style in DEFAULT_STYLES.items():
+ table.add_row(Text(style_name, style=style), str(style))
+
+ console.print(table)
+ if html:
+ print(console.export_html(inline_styles=True))
diff --git a/third_party/python/pip/pip/_vendor/rich/diagnose.py b/third_party/python/pip/pip/_vendor/rich/diagnose.py
new file mode 100644
index 0000000000..ad36183898
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/diagnose.py
@@ -0,0 +1,37 @@
+import os
+import platform
+
+from pip._vendor.rich import inspect
+from pip._vendor.rich.console import Console, get_windows_console_features
+from pip._vendor.rich.panel import Panel
+from pip._vendor.rich.pretty import Pretty
+
+
+def report() -> None: # pragma: no cover
+ """Print a report to the terminal with debugging information"""
+ console = Console()
+ inspect(console)
+ features = get_windows_console_features()
+ inspect(features)
+
+ env_names = (
+ "TERM",
+ "COLORTERM",
+ "CLICOLOR",
+ "NO_COLOR",
+ "TERM_PROGRAM",
+ "COLUMNS",
+ "LINES",
+ "JUPYTER_COLUMNS",
+ "JUPYTER_LINES",
+ "JPY_PARENT_PID",
+ "VSCODE_VERBOSE_LOGGING",
+ )
+ env = {name: os.getenv(name) for name in env_names}
+ console.print(Panel.fit((Pretty(env)), title="[b]Environment Variables"))
+
+ console.print(f'platform="{platform.system()}"')
+
+
+if __name__ == "__main__": # pragma: no cover
+ report()
diff --git a/third_party/python/pip/pip/_vendor/rich/emoji.py b/third_party/python/pip/pip/_vendor/rich/emoji.py
new file mode 100644
index 0000000000..791f0465de
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/emoji.py
@@ -0,0 +1,96 @@
+import sys
+from typing import TYPE_CHECKING, Optional, Union
+
+from .jupyter import JupyterMixin
+from .segment import Segment
+from .style import Style
+from ._emoji_codes import EMOJI
+from ._emoji_replace import _emoji_replace
+
+if sys.version_info >= (3, 8):
+ from typing import Literal
+else:
+ from pip._vendor.typing_extensions import Literal # pragma: no cover
+
+
+if TYPE_CHECKING:
+ from .console import Console, ConsoleOptions, RenderResult
+
+
+EmojiVariant = Literal["emoji", "text"]
+
+
+class NoEmoji(Exception):
+ """No emoji by that name."""
+
+
+class Emoji(JupyterMixin):
+ __slots__ = ["name", "style", "_char", "variant"]
+
+ VARIANTS = {"text": "\uFE0E", "emoji": "\uFE0F"}
+
+ def __init__(
+ self,
+ name: str,
+ style: Union[str, Style] = "none",
+ variant: Optional[EmojiVariant] = None,
+ ) -> None:
+ """A single emoji character.
+
+ Args:
+ name (str): Name of emoji.
+ style (Union[str, Style], optional): Optional style. Defaults to None.
+
+ Raises:
+ NoEmoji: If the emoji doesn't exist.
+ """
+ self.name = name
+ self.style = style
+ self.variant = variant
+ try:
+ self._char = EMOJI[name]
+ except KeyError:
+ raise NoEmoji(f"No emoji called {name!r}")
+ if variant is not None:
+ self._char += self.VARIANTS.get(variant, "")
+
+ @classmethod
+ def replace(cls, text: str) -> str:
+ """Replace emoji markup with corresponding unicode characters.
+
+ Args:
+ text (str): A string with emojis codes, e.g. "Hello :smiley:!"
+
+ Returns:
+ str: A string with emoji codes replaces with actual emoji.
+ """
+ return _emoji_replace(text)
+
+ def __repr__(self) -> str:
+ return f"<emoji {self.name!r}>"
+
+ def __str__(self) -> str:
+ return self._char
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ yield Segment(self._char, console.get_style(self.style))
+
+
+if __name__ == "__main__": # pragma: no cover
+ import sys
+
+ from pip._vendor.rich.columns import Columns
+ from pip._vendor.rich.console import Console
+
+ console = Console(record=True)
+
+ columns = Columns(
+ (f":{name}: {name}" for name in sorted(EMOJI.keys()) if "\u200D" not in name),
+ column_first=True,
+ )
+
+ console.print(columns)
+ if len(sys.argv) > 1:
+ console.save_html(sys.argv[1])
diff --git a/third_party/python/pip/pip/_vendor/rich/errors.py b/third_party/python/pip/pip/_vendor/rich/errors.py
new file mode 100644
index 0000000000..0bcbe53ef5
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/errors.py
@@ -0,0 +1,34 @@
+class ConsoleError(Exception):
+ """An error in console operation."""
+
+
+class StyleError(Exception):
+ """An error in styles."""
+
+
+class StyleSyntaxError(ConsoleError):
+ """Style was badly formatted."""
+
+
+class MissingStyle(StyleError):
+ """No such style."""
+
+
+class StyleStackError(ConsoleError):
+ """Style stack is invalid."""
+
+
+class NotRenderableError(ConsoleError):
+ """Object is not renderable."""
+
+
+class MarkupError(ConsoleError):
+ """Markup was badly formatted."""
+
+
+class LiveError(ConsoleError):
+ """Error related to Live display."""
+
+
+class NoAltScreen(ConsoleError):
+ """Alt screen mode was required."""
diff --git a/third_party/python/pip/pip/_vendor/rich/file_proxy.py b/third_party/python/pip/pip/_vendor/rich/file_proxy.py
new file mode 100644
index 0000000000..cc69f22f3c
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/file_proxy.py
@@ -0,0 +1,54 @@
+import io
+from typing import IO, TYPE_CHECKING, Any, List
+
+from .ansi import AnsiDecoder
+from .text import Text
+
+if TYPE_CHECKING:
+ from .console import Console
+
+
+class FileProxy(io.TextIOBase):
+ """Wraps a file (e.g. sys.stdout) and redirects writes to a console."""
+
+ def __init__(self, console: "Console", file: IO[str]) -> None:
+ self.__console = console
+ self.__file = file
+ self.__buffer: List[str] = []
+ self.__ansi_decoder = AnsiDecoder()
+
+ @property
+ def rich_proxied_file(self) -> IO[str]:
+ """Get proxied file."""
+ return self.__file
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self.__file, name)
+
+ def write(self, text: str) -> int:
+ if not isinstance(text, str):
+ raise TypeError(f"write() argument must be str, not {type(text).__name__}")
+ buffer = self.__buffer
+ lines: List[str] = []
+ while text:
+ line, new_line, text = text.partition("\n")
+ if new_line:
+ lines.append("".join(buffer) + line)
+ del buffer[:]
+ else:
+ buffer.append(line)
+ break
+ if lines:
+ console = self.__console
+ with console:
+ output = Text("\n").join(
+ self.__ansi_decoder.decode_line(line) for line in lines
+ )
+ console.print(output)
+ return len(text)
+
+ def flush(self) -> None:
+ output = "".join(self.__buffer)
+ if output:
+ self.__console.print(output)
+ del self.__buffer[:]
diff --git a/third_party/python/pip/pip/_vendor/rich/filesize.py b/third_party/python/pip/pip/_vendor/rich/filesize.py
new file mode 100644
index 0000000000..99f118e201
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/filesize.py
@@ -0,0 +1,89 @@
+# coding: utf-8
+"""Functions for reporting filesizes. Borrowed from https://github.com/PyFilesystem/pyfilesystem2
+
+The functions declared in this module should cover the different
+use cases needed to generate a string representation of a file size
+using several different units. Since there are many standards regarding
+file size units, three different functions have been implemented.
+
+See Also:
+ * `Wikipedia: Binary prefix <https://en.wikipedia.org/wiki/Binary_prefix>`_
+
+"""
+
+__all__ = ["decimal"]
+
+from typing import Iterable, List, Optional, Tuple
+
+
+def _to_str(
+ size: int,
+ suffixes: Iterable[str],
+ base: int,
+ *,
+ precision: Optional[int] = 1,
+ separator: Optional[str] = " ",
+) -> str:
+ if size == 1:
+ return "1 byte"
+ elif size < base:
+ return "{:,} bytes".format(size)
+
+ for i, suffix in enumerate(suffixes, 2): # noqa: B007
+ unit = base**i
+ if size < unit:
+ break
+ return "{:,.{precision}f}{separator}{}".format(
+ (base * size / unit),
+ suffix,
+ precision=precision,
+ separator=separator,
+ )
+
+
+def pick_unit_and_suffix(size: int, suffixes: List[str], base: int) -> Tuple[int, str]:
+ """Pick a suffix and base for the given size."""
+ for i, suffix in enumerate(suffixes):
+ unit = base**i
+ if size < unit * base:
+ break
+ return unit, suffix
+
+
+def decimal(
+ size: int,
+ *,
+ precision: Optional[int] = 1,
+ separator: Optional[str] = " ",
+) -> str:
+ """Convert a filesize in to a string (powers of 1000, SI prefixes).
+
+ In this convention, ``1000 B = 1 kB``.
+
+ This is typically the format used to advertise the storage
+ capacity of USB flash drives and the like (*256 MB* meaning
+ actually a storage capacity of more than *256 000 000 B*),
+ or used by **Mac OS X** since v10.6 to report file sizes.
+
+ Arguments:
+ int (size): A file size.
+ int (precision): The number of decimal places to include (default = 1).
+ str (separator): The string to separate the value from the units (default = " ").
+
+ Returns:
+ `str`: A string containing a abbreviated file size and units.
+
+ Example:
+ >>> filesize.decimal(30000)
+ '30.0 kB'
+ >>> filesize.decimal(30000, precision=2, separator="")
+ '30.00kB'
+
+ """
+ return _to_str(
+ size,
+ ("kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"),
+ 1000,
+ precision=precision,
+ separator=separator,
+ )
diff --git a/third_party/python/pip/pip/_vendor/rich/highlighter.py b/third_party/python/pip/pip/_vendor/rich/highlighter.py
new file mode 100644
index 0000000000..82293dffc4
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/highlighter.py
@@ -0,0 +1,232 @@
+import re
+from abc import ABC, abstractmethod
+from typing import List, Union
+
+from .text import Span, Text
+
+
+def _combine_regex(*regexes: str) -> str:
+ """Combine a number of regexes in to a single regex.
+
+ Returns:
+ str: New regex with all regexes ORed together.
+ """
+ return "|".join(regexes)
+
+
+class Highlighter(ABC):
+ """Abstract base class for highlighters."""
+
+ def __call__(self, text: Union[str, Text]) -> Text:
+ """Highlight a str or Text instance.
+
+ Args:
+ text (Union[str, ~Text]): Text to highlight.
+
+ Raises:
+ TypeError: If not called with text or str.
+
+ Returns:
+ Text: A test instance with highlighting applied.
+ """
+ if isinstance(text, str):
+ highlight_text = Text(text)
+ elif isinstance(text, Text):
+ highlight_text = text.copy()
+ else:
+ raise TypeError(f"str or Text instance required, not {text!r}")
+ self.highlight(highlight_text)
+ return highlight_text
+
+ @abstractmethod
+ def highlight(self, text: Text) -> None:
+ """Apply highlighting in place to text.
+
+ Args:
+ text (~Text): A text object highlight.
+ """
+
+
+class NullHighlighter(Highlighter):
+ """A highlighter object that doesn't highlight.
+
+ May be used to disable highlighting entirely.
+
+ """
+
+ def highlight(self, text: Text) -> None:
+ """Nothing to do"""
+
+
+class RegexHighlighter(Highlighter):
+ """Applies highlighting from a list of regular expressions."""
+
+ highlights: List[str] = []
+ base_style: str = ""
+
+ def highlight(self, text: Text) -> None:
+ """Highlight :class:`rich.text.Text` using regular expressions.
+
+ Args:
+ text (~Text): Text to highlighted.
+
+ """
+
+ highlight_regex = text.highlight_regex
+ for re_highlight in self.highlights:
+ highlight_regex(re_highlight, style_prefix=self.base_style)
+
+
+class ReprHighlighter(RegexHighlighter):
+ """Highlights the text typically produced from ``__repr__`` methods."""
+
+ base_style = "repr."
+ highlights = [
+ r"(?P<tag_start><)(?P<tag_name>[-\w.:|]*)(?P<tag_contents>[\w\W]*?)(?P<tag_end>>)",
+ r'(?P<attrib_name>[\w_]{1,50})=(?P<attrib_value>"?[\w_]+"?)?',
+ r"(?P<brace>[][{}()])",
+ _combine_regex(
+ r"(?P<ipv4>[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})",
+ r"(?P<ipv6>([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4})",
+ r"(?P<eui64>(?:[0-9A-Fa-f]{1,2}-){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){7}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){3}[0-9A-Fa-f]{4})",
+ r"(?P<eui48>(?:[0-9A-Fa-f]{1,2}-){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{1,2}:){5}[0-9A-Fa-f]{1,2}|(?:[0-9A-Fa-f]{4}\.){2}[0-9A-Fa-f]{4})",
+ r"(?P<uuid>[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12})",
+ r"(?P<call>[\w.]*?)\(",
+ r"\b(?P<bool_true>True)\b|\b(?P<bool_false>False)\b|\b(?P<none>None)\b",
+ r"(?P<ellipsis>\.\.\.)",
+ r"(?P<number_complex>(?<!\w)(?:\-?[0-9]+\.?[0-9]*(?:e[-+]?\d+?)?)(?:[-+](?:[0-9]+\.?[0-9]*(?:e[-+]?\d+)?))?j)",
+ r"(?P<number>(?<!\w)\-?[0-9]+\.?[0-9]*(e[-+]?\d+?)?\b|0x[0-9a-fA-F]*)",
+ r"(?P<path>\B(/[-\w._+]+)*\/)(?P<filename>[-\w._+]*)?",
+ r"(?<![\\\w])(?P<str>b?'''.*?(?<!\\)'''|b?'.*?(?<!\\)'|b?\"\"\".*?(?<!\\)\"\"\"|b?\".*?(?<!\\)\")",
+ r"(?P<url>(file|https|http|ws|wss)://[-0-9a-zA-Z$_+!`(),.?/;:&=%#]*)",
+ ),
+ ]
+
+
+class JSONHighlighter(RegexHighlighter):
+ """Highlights JSON"""
+
+ # Captures the start and end of JSON strings, handling escaped quotes
+ JSON_STR = r"(?<![\\\w])(?P<str>b?\".*?(?<!\\)\")"
+ JSON_WHITESPACE = {" ", "\n", "\r", "\t"}
+
+ base_style = "json."
+ highlights = [
+ _combine_regex(
+ r"(?P<brace>[\{\[\(\)\]\}])",
+ r"\b(?P<bool_true>true)\b|\b(?P<bool_false>false)\b|\b(?P<null>null)\b",
+ r"(?P<number>(?<!\w)\-?[0-9]+\.?[0-9]*(e[\-\+]?\d+?)?\b|0x[0-9a-fA-F]*)",
+ JSON_STR,
+ ),
+ ]
+
+ def highlight(self, text: Text) -> None:
+ super().highlight(text)
+
+ # Additional work to handle highlighting JSON keys
+ plain = text.plain
+ append = text.spans.append
+ whitespace = self.JSON_WHITESPACE
+ for match in re.finditer(self.JSON_STR, plain):
+ start, end = match.span()
+ cursor = end
+ while cursor < len(plain):
+ char = plain[cursor]
+ cursor += 1
+ if char == ":":
+ append(Span(start, end, "json.key"))
+ elif char in whitespace:
+ continue
+ break
+
+
+class ISO8601Highlighter(RegexHighlighter):
+ """Highlights the ISO8601 date time strings.
+ Regex reference: https://www.oreilly.com/library/view/regular-expressions-cookbook/9781449327453/ch04s07.html
+ """
+
+ base_style = "iso8601."
+ highlights = [
+ #
+ # Dates
+ #
+ # Calendar month (e.g. 2008-08). The hyphen is required
+ r"^(?P<year>[0-9]{4})-(?P<month>1[0-2]|0[1-9])$",
+ # Calendar date w/o hyphens (e.g. 20080830)
+ r"^(?P<date>(?P<year>[0-9]{4})(?P<month>1[0-2]|0[1-9])(?P<day>3[01]|0[1-9]|[12][0-9]))$",
+ # Ordinal date (e.g. 2008-243). The hyphen is optional
+ r"^(?P<date>(?P<year>[0-9]{4})-?(?P<day>36[0-6]|3[0-5][0-9]|[12][0-9]{2}|0[1-9][0-9]|00[1-9]))$",
+ #
+ # Weeks
+ #
+ # Week of the year (e.g., 2008-W35). The hyphen is optional
+ r"^(?P<date>(?P<year>[0-9]{4})-?W(?P<week>5[0-3]|[1-4][0-9]|0[1-9]))$",
+ # Week date (e.g., 2008-W35-6). The hyphens are optional
+ r"^(?P<date>(?P<year>[0-9]{4})-?W(?P<week>5[0-3]|[1-4][0-9]|0[1-9])-?(?P<day>[1-7]))$",
+ #
+ # Times
+ #
+ # Hours and minutes (e.g., 17:21). The colon is optional
+ r"^(?P<time>(?P<hour>2[0-3]|[01][0-9]):?(?P<minute>[0-5][0-9]))$",
+ # Hours, minutes, and seconds w/o colons (e.g., 172159)
+ r"^(?P<time>(?P<hour>2[0-3]|[01][0-9])(?P<minute>[0-5][0-9])(?P<second>[0-5][0-9]))$",
+ # Time zone designator (e.g., Z, +07 or +07:00). The colons and the minutes are optional
+ r"^(?P<timezone>(Z|[+-](?:2[0-3]|[01][0-9])(?::?(?:[0-5][0-9]))?))$",
+ # Hours, minutes, and seconds with time zone designator (e.g., 17:21:59+07:00).
+ # All the colons are optional. The minutes in the time zone designator are also optional
+ r"^(?P<time>(?P<hour>2[0-3]|[01][0-9])(?P<minute>[0-5][0-9])(?P<second>[0-5][0-9]))(?P<timezone>Z|[+-](?:2[0-3]|[01][0-9])(?::?(?:[0-5][0-9]))?)$",
+ #
+ # Date and Time
+ #
+ # Calendar date with hours, minutes, and seconds (e.g., 2008-08-30 17:21:59 or 20080830 172159).
+ # A space is required between the date and the time. The hyphens and colons are optional.
+ # This regex matches dates and times that specify some hyphens or colons but omit others.
+ # This does not follow ISO 8601
+ r"^(?P<date>(?P<year>[0-9]{4})(?P<hyphen>-)?(?P<month>1[0-2]|0[1-9])(?(hyphen)-)(?P<day>3[01]|0[1-9]|[12][0-9])) (?P<time>(?P<hour>2[0-3]|[01][0-9])(?(hyphen):)(?P<minute>[0-5][0-9])(?(hyphen):)(?P<second>[0-5][0-9]))$",
+ #
+ # XML Schema dates and times
+ #
+ # Date, with optional time zone (e.g., 2008-08-30 or 2008-08-30+07:00).
+ # Hyphens are required. This is the XML Schema 'date' type
+ r"^(?P<date>(?P<year>-?(?:[1-9][0-9]*)?[0-9]{4})-(?P<month>1[0-2]|0[1-9])-(?P<day>3[01]|0[1-9]|[12][0-9]))(?P<timezone>Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$",
+ # Time, with optional fractional seconds and time zone (e.g., 01:45:36 or 01:45:36.123+07:00).
+ # There is no limit on the number of digits for the fractional seconds. This is the XML Schema 'time' type
+ r"^(?P<time>(?P<hour>2[0-3]|[01][0-9]):(?P<minute>[0-5][0-9]):(?P<second>[0-5][0-9])(?P<frac>\.[0-9]+)?)(?P<timezone>Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$",
+ # Date and time, with optional fractional seconds and time zone (e.g., 2008-08-30T01:45:36 or 2008-08-30T01:45:36.123Z).
+ # This is the XML Schema 'dateTime' type
+ r"^(?P<date>(?P<year>-?(?:[1-9][0-9]*)?[0-9]{4})-(?P<month>1[0-2]|0[1-9])-(?P<day>3[01]|0[1-9]|[12][0-9]))T(?P<time>(?P<hour>2[0-3]|[01][0-9]):(?P<minute>[0-5][0-9]):(?P<second>[0-5][0-9])(?P<ms>\.[0-9]+)?)(?P<timezone>Z|[+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$",
+ ]
+
+
+if __name__ == "__main__": # pragma: no cover
+ from .console import Console
+
+ console = Console()
+ console.print("[bold green]hello world![/bold green]")
+ console.print("'[bold green]hello world![/bold green]'")
+
+ console.print(" /foo")
+ console.print("/foo/")
+ console.print("/foo/bar")
+ console.print("foo/bar/baz")
+
+ console.print("/foo/bar/baz?foo=bar+egg&egg=baz")
+ console.print("/foo/bar/baz/")
+ console.print("/foo/bar/baz/egg")
+ console.print("/foo/bar/baz/egg.py")
+ console.print("/foo/bar/baz/egg.py word")
+ console.print(" /foo/bar/baz/egg.py word")
+ console.print("foo /foo/bar/baz/egg.py word")
+ console.print("foo /foo/bar/ba._++z/egg+.py word")
+ console.print("https://example.org?foo=bar#header")
+
+ console.print(1234567.34)
+ console.print(1 / 2)
+ console.print(-1 / 123123123123)
+
+ console.print(
+ "127.0.1.1 bar 192.168.1.4 2001:0db8:85a3:0000:0000:8a2e:0370:7334 foo"
+ )
+ import json
+
+ console.print_json(json.dumps(obj={"name": "apple", "count": 1}), indent=None)
diff --git a/third_party/python/pip/pip/_vendor/rich/json.py b/third_party/python/pip/pip/_vendor/rich/json.py
new file mode 100644
index 0000000000..21b642ab8e
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/json.py
@@ -0,0 +1,140 @@
+from json import loads, dumps
+from typing import Any, Callable, Optional, Union
+
+from .text import Text
+from .highlighter import JSONHighlighter, NullHighlighter
+
+
+class JSON:
+ """A renderable which pretty prints JSON.
+
+ Args:
+ json (str): JSON encoded data.
+ indent (Union[None, int, str], optional): Number of characters to indent by. Defaults to 2.
+ highlight (bool, optional): Enable highlighting. Defaults to True.
+ skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.
+ ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.
+ check_circular (bool, optional): Check for circular references. Defaults to True.
+ allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.
+ default (Callable, optional): A callable that converts values that can not be encoded
+ in to something that can be JSON encoded. Defaults to None.
+ sort_keys (bool, optional): Sort dictionary keys. Defaults to False.
+ """
+
+ def __init__(
+ self,
+ json: str,
+ indent: Union[None, int, str] = 2,
+ highlight: bool = True,
+ skip_keys: bool = False,
+ ensure_ascii: bool = False,
+ check_circular: bool = True,
+ allow_nan: bool = True,
+ default: Optional[Callable[[Any], Any]] = None,
+ sort_keys: bool = False,
+ ) -> None:
+ data = loads(json)
+ json = dumps(
+ data,
+ indent=indent,
+ skipkeys=skip_keys,
+ ensure_ascii=ensure_ascii,
+ check_circular=check_circular,
+ allow_nan=allow_nan,
+ default=default,
+ sort_keys=sort_keys,
+ )
+ highlighter = JSONHighlighter() if highlight else NullHighlighter()
+ self.text = highlighter(json)
+ self.text.no_wrap = True
+ self.text.overflow = None
+
+ @classmethod
+ def from_data(
+ cls,
+ data: Any,
+ indent: Union[None, int, str] = 2,
+ highlight: bool = True,
+ skip_keys: bool = False,
+ ensure_ascii: bool = False,
+ check_circular: bool = True,
+ allow_nan: bool = True,
+ default: Optional[Callable[[Any], Any]] = None,
+ sort_keys: bool = False,
+ ) -> "JSON":
+ """Encodes a JSON object from arbitrary data.
+
+ Args:
+ data (Any): An object that may be encoded in to JSON
+ indent (Union[None, int, str], optional): Number of characters to indent by. Defaults to 2.
+ highlight (bool, optional): Enable highlighting. Defaults to True.
+ default (Callable, optional): Optional callable which will be called for objects that cannot be serialized. Defaults to None.
+ skip_keys (bool, optional): Skip keys not of a basic type. Defaults to False.
+ ensure_ascii (bool, optional): Escape all non-ascii characters. Defaults to False.
+ check_circular (bool, optional): Check for circular references. Defaults to True.
+ allow_nan (bool, optional): Allow NaN and Infinity values. Defaults to True.
+ default (Callable, optional): A callable that converts values that can not be encoded
+ in to something that can be JSON encoded. Defaults to None.
+ sort_keys (bool, optional): Sort dictionary keys. Defaults to False.
+
+ Returns:
+ JSON: New JSON object from the given data.
+ """
+ json_instance: "JSON" = cls.__new__(cls)
+ json = dumps(
+ data,
+ indent=indent,
+ skipkeys=skip_keys,
+ ensure_ascii=ensure_ascii,
+ check_circular=check_circular,
+ allow_nan=allow_nan,
+ default=default,
+ sort_keys=sort_keys,
+ )
+ highlighter = JSONHighlighter() if highlight else NullHighlighter()
+ json_instance.text = highlighter(json)
+ json_instance.text.no_wrap = True
+ json_instance.text.overflow = None
+ return json_instance
+
+ def __rich__(self) -> Text:
+ return self.text
+
+
+if __name__ == "__main__":
+
+ import argparse
+ import sys
+
+ parser = argparse.ArgumentParser(description="Pretty print json")
+ parser.add_argument(
+ "path",
+ metavar="PATH",
+ help="path to file, or - for stdin",
+ )
+ parser.add_argument(
+ "-i",
+ "--indent",
+ metavar="SPACES",
+ type=int,
+ help="Number of spaces in an indent",
+ default=2,
+ )
+ args = parser.parse_args()
+
+ from pip._vendor.rich.console import Console
+
+ console = Console()
+ error_console = Console(stderr=True)
+
+ try:
+ if args.path == "-":
+ json_data = sys.stdin.read()
+ else:
+ with open(args.path, "rt") as json_file:
+ json_data = json_file.read()
+ except Exception as error:
+ error_console.print(f"Unable to read {args.path!r}; {error}")
+ sys.exit(-1)
+
+ console.print(JSON(json_data, indent=args.indent), soft_wrap=True)
diff --git a/third_party/python/pip/pip/_vendor/rich/jupyter.py b/third_party/python/pip/pip/_vendor/rich/jupyter.py
new file mode 100644
index 0000000000..22f4d716ac
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/jupyter.py
@@ -0,0 +1,101 @@
+from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Sequence
+
+if TYPE_CHECKING:
+ from pip._vendor.rich.console import ConsoleRenderable
+
+from . import get_console
+from .segment import Segment
+from .terminal_theme import DEFAULT_TERMINAL_THEME
+
+if TYPE_CHECKING:
+ from pip._vendor.rich.console import ConsoleRenderable
+
+JUPYTER_HTML_FORMAT = """\
+<pre style="white-space:pre;overflow-x:auto;line-height:normal;font-family:Menlo,'DejaVu Sans Mono',consolas,'Courier New',monospace">{code}</pre>
+"""
+
+
+class JupyterRenderable:
+ """A shim to write html to Jupyter notebook."""
+
+ def __init__(self, html: str, text: str) -> None:
+ self.html = html
+ self.text = text
+
+ def _repr_mimebundle_(
+ self, include: Sequence[str], exclude: Sequence[str], **kwargs: Any
+ ) -> Dict[str, str]:
+ data = {"text/plain": self.text, "text/html": self.html}
+ if include:
+ data = {k: v for (k, v) in data.items() if k in include}
+ if exclude:
+ data = {k: v for (k, v) in data.items() if k not in exclude}
+ return data
+
+
+class JupyterMixin:
+ """Add to an Rich renderable to make it render in Jupyter notebook."""
+
+ __slots__ = ()
+
+ def _repr_mimebundle_(
+ self: "ConsoleRenderable",
+ include: Sequence[str],
+ exclude: Sequence[str],
+ **kwargs: Any,
+ ) -> Dict[str, str]:
+ console = get_console()
+ segments = list(console.render(self, console.options))
+ html = _render_segments(segments)
+ text = console._render_buffer(segments)
+ data = {"text/plain": text, "text/html": html}
+ if include:
+ data = {k: v for (k, v) in data.items() if k in include}
+ if exclude:
+ data = {k: v for (k, v) in data.items() if k not in exclude}
+ return data
+
+
+def _render_segments(segments: Iterable[Segment]) -> str:
+ def escape(text: str) -> str:
+ """Escape html."""
+ return text.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;")
+
+ fragments: List[str] = []
+ append_fragment = fragments.append
+ theme = DEFAULT_TERMINAL_THEME
+ for text, style, control in Segment.simplify(segments):
+ if control:
+ continue
+ text = escape(text)
+ if style:
+ rule = style.get_html_style(theme)
+ text = f'<span style="{rule}">{text}</span>' if rule else text
+ if style.link:
+ text = f'<a href="{style.link}" target="_blank">{text}</a>'
+ append_fragment(text)
+
+ code = "".join(fragments)
+ html = JUPYTER_HTML_FORMAT.format(code=code)
+
+ return html
+
+
+def display(segments: Iterable[Segment], text: str) -> None:
+ """Render segments to Jupyter."""
+ html = _render_segments(segments)
+ jupyter_renderable = JupyterRenderable(html, text)
+ try:
+ from IPython.display import display as ipython_display
+
+ ipython_display(jupyter_renderable)
+ except ModuleNotFoundError:
+ # Handle the case where the Console has force_jupyter=True,
+ # but IPython is not installed.
+ pass
+
+
+def print(*args: Any, **kwargs: Any) -> None:
+ """Proxy for Console print."""
+ console = get_console()
+ return console.print(*args, **kwargs)
diff --git a/third_party/python/pip/pip/_vendor/rich/layout.py b/third_party/python/pip/pip/_vendor/rich/layout.py
new file mode 100644
index 0000000000..849356ea9a
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/layout.py
@@ -0,0 +1,443 @@
+from abc import ABC, abstractmethod
+from itertools import islice
+from operator import itemgetter
+from threading import RLock
+from typing import (
+ TYPE_CHECKING,
+ Dict,
+ Iterable,
+ List,
+ NamedTuple,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+)
+
+from ._ratio import ratio_resolve
+from .align import Align
+from .console import Console, ConsoleOptions, RenderableType, RenderResult
+from .highlighter import ReprHighlighter
+from .panel import Panel
+from .pretty import Pretty
+from .region import Region
+from .repr import Result, rich_repr
+from .segment import Segment
+from .style import StyleType
+
+if TYPE_CHECKING:
+ from pip._vendor.rich.tree import Tree
+
+
+class LayoutRender(NamedTuple):
+ """An individual layout render."""
+
+ region: Region
+ render: List[List[Segment]]
+
+
+RegionMap = Dict["Layout", Region]
+RenderMap = Dict["Layout", LayoutRender]
+
+
+class LayoutError(Exception):
+ """Layout related error."""
+
+
+class NoSplitter(LayoutError):
+ """Requested splitter does not exist."""
+
+
+class _Placeholder:
+ """An internal renderable used as a Layout placeholder."""
+
+ highlighter = ReprHighlighter()
+
+ def __init__(self, layout: "Layout", style: StyleType = "") -> None:
+ self.layout = layout
+ self.style = style
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+ width = options.max_width
+ height = options.height or options.size.height
+ layout = self.layout
+ title = (
+ f"{layout.name!r} ({width} x {height})"
+ if layout.name
+ else f"({width} x {height})"
+ )
+ yield Panel(
+ Align.center(Pretty(layout), vertical="middle"),
+ style=self.style,
+ title=self.highlighter(title),
+ border_style="blue",
+ height=height,
+ )
+
+
+class Splitter(ABC):
+ """Base class for a splitter."""
+
+ name: str = ""
+
+ @abstractmethod
+ def get_tree_icon(self) -> str:
+ """Get the icon (emoji) used in layout.tree"""
+
+ @abstractmethod
+ def divide(
+ self, children: Sequence["Layout"], region: Region
+ ) -> Iterable[Tuple["Layout", Region]]:
+ """Divide a region amongst several child layouts.
+
+ Args:
+ children (Sequence(Layout)): A number of child layouts.
+ region (Region): A rectangular region to divide.
+ """
+
+
+class RowSplitter(Splitter):
+ """Split a layout region in to rows."""
+
+ name = "row"
+
+ def get_tree_icon(self) -> str:
+ return "[layout.tree.row]⬌"
+
+ def divide(
+ self, children: Sequence["Layout"], region: Region
+ ) -> Iterable[Tuple["Layout", Region]]:
+ x, y, width, height = region
+ render_widths = ratio_resolve(width, children)
+ offset = 0
+ _Region = Region
+ for child, child_width in zip(children, render_widths):
+ yield child, _Region(x + offset, y, child_width, height)
+ offset += child_width
+
+
+class ColumnSplitter(Splitter):
+ """Split a layout region in to columns."""
+
+ name = "column"
+
+ def get_tree_icon(self) -> str:
+ return "[layout.tree.column]⬍"
+
+ def divide(
+ self, children: Sequence["Layout"], region: Region
+ ) -> Iterable[Tuple["Layout", Region]]:
+ x, y, width, height = region
+ render_heights = ratio_resolve(height, children)
+ offset = 0
+ _Region = Region
+ for child, child_height in zip(children, render_heights):
+ yield child, _Region(x, y + offset, width, child_height)
+ offset += child_height
+
+
+@rich_repr
+class Layout:
+ """A renderable to divide a fixed height in to rows or columns.
+
+ Args:
+ renderable (RenderableType, optional): Renderable content, or None for placeholder. Defaults to None.
+ name (str, optional): Optional identifier for Layout. Defaults to None.
+ size (int, optional): Optional fixed size of layout. Defaults to None.
+ minimum_size (int, optional): Minimum size of layout. Defaults to 1.
+ ratio (int, optional): Optional ratio for flexible layout. Defaults to 1.
+ visible (bool, optional): Visibility of layout. Defaults to True.
+ """
+
+ splitters = {"row": RowSplitter, "column": ColumnSplitter}
+
+ def __init__(
+ self,
+ renderable: Optional[RenderableType] = None,
+ *,
+ name: Optional[str] = None,
+ size: Optional[int] = None,
+ minimum_size: int = 1,
+ ratio: int = 1,
+ visible: bool = True,
+ ) -> None:
+ self._renderable = renderable or _Placeholder(self)
+ self.size = size
+ self.minimum_size = minimum_size
+ self.ratio = ratio
+ self.name = name
+ self.visible = visible
+ self.splitter: Splitter = self.splitters["column"]()
+ self._children: List[Layout] = []
+ self._render_map: RenderMap = {}
+ self._lock = RLock()
+
+ def __rich_repr__(self) -> Result:
+ yield "name", self.name, None
+ yield "size", self.size, None
+ yield "minimum_size", self.minimum_size, 1
+ yield "ratio", self.ratio, 1
+
+ @property
+ def renderable(self) -> RenderableType:
+ """Layout renderable."""
+ return self if self._children else self._renderable
+
+ @property
+ def children(self) -> List["Layout"]:
+ """Gets (visible) layout children."""
+ return [child for child in self._children if child.visible]
+
+ @property
+ def map(self) -> RenderMap:
+ """Get a map of the last render."""
+ return self._render_map
+
+ def get(self, name: str) -> Optional["Layout"]:
+ """Get a named layout, or None if it doesn't exist.
+
+ Args:
+ name (str): Name of layout.
+
+ Returns:
+ Optional[Layout]: Layout instance or None if no layout was found.
+ """
+ if self.name == name:
+ return self
+ else:
+ for child in self._children:
+ named_layout = child.get(name)
+ if named_layout is not None:
+ return named_layout
+ return None
+
+ def __getitem__(self, name: str) -> "Layout":
+ layout = self.get(name)
+ if layout is None:
+ raise KeyError(f"No layout with name {name!r}")
+ return layout
+
+ @property
+ def tree(self) -> "Tree":
+ """Get a tree renderable to show layout structure."""
+ from pip._vendor.rich.styled import Styled
+ from pip._vendor.rich.table import Table
+ from pip._vendor.rich.tree import Tree
+
+ def summary(layout: "Layout") -> Table:
+
+ icon = layout.splitter.get_tree_icon()
+
+ table = Table.grid(padding=(0, 1, 0, 0))
+
+ text: RenderableType = (
+ Pretty(layout) if layout.visible else Styled(Pretty(layout), "dim")
+ )
+ table.add_row(icon, text)
+ _summary = table
+ return _summary
+
+ layout = self
+ tree = Tree(
+ summary(layout),
+ guide_style=f"layout.tree.{layout.splitter.name}",
+ highlight=True,
+ )
+
+ def recurse(tree: "Tree", layout: "Layout") -> None:
+ for child in layout._children:
+ recurse(
+ tree.add(
+ summary(child),
+ guide_style=f"layout.tree.{child.splitter.name}",
+ ),
+ child,
+ )
+
+ recurse(tree, self)
+ return tree
+
+ def split(
+ self,
+ *layouts: Union["Layout", RenderableType],
+ splitter: Union[Splitter, str] = "column",
+ ) -> None:
+ """Split the layout in to multiple sub-layouts.
+
+ Args:
+ *layouts (Layout): Positional arguments should be (sub) Layout instances.
+ splitter (Union[Splitter, str]): Splitter instance or name of splitter.
+ """
+ _layouts = [
+ layout if isinstance(layout, Layout) else Layout(layout)
+ for layout in layouts
+ ]
+ try:
+ self.splitter = (
+ splitter
+ if isinstance(splitter, Splitter)
+ else self.splitters[splitter]()
+ )
+ except KeyError:
+ raise NoSplitter(f"No splitter called {splitter!r}")
+ self._children[:] = _layouts
+
+ def add_split(self, *layouts: Union["Layout", RenderableType]) -> None:
+ """Add a new layout(s) to existing split.
+
+ Args:
+ *layouts (Union[Layout, RenderableType]): Positional arguments should be renderables or (sub) Layout instances.
+
+ """
+ _layouts = (
+ layout if isinstance(layout, Layout) else Layout(layout)
+ for layout in layouts
+ )
+ self._children.extend(_layouts)
+
+ def split_row(self, *layouts: Union["Layout", RenderableType]) -> None:
+ """Split the layout in to a row (layouts side by side).
+
+ Args:
+ *layouts (Layout): Positional arguments should be (sub) Layout instances.
+ """
+ self.split(*layouts, splitter="row")
+
+ def split_column(self, *layouts: Union["Layout", RenderableType]) -> None:
+ """Split the layout in to a column (layouts stacked on top of each other).
+
+ Args:
+ *layouts (Layout): Positional arguments should be (sub) Layout instances.
+ """
+ self.split(*layouts, splitter="column")
+
+ def unsplit(self) -> None:
+ """Reset splits to initial state."""
+ del self._children[:]
+
+ def update(self, renderable: RenderableType) -> None:
+ """Update renderable.
+
+ Args:
+ renderable (RenderableType): New renderable object.
+ """
+ with self._lock:
+ self._renderable = renderable
+
+ def refresh_screen(self, console: "Console", layout_name: str) -> None:
+ """Refresh a sub-layout.
+
+ Args:
+ console (Console): Console instance where Layout is to be rendered.
+ layout_name (str): Name of layout.
+ """
+ with self._lock:
+ layout = self[layout_name]
+ region, _lines = self._render_map[layout]
+ (x, y, width, height) = region
+ lines = console.render_lines(
+ layout, console.options.update_dimensions(width, height)
+ )
+ self._render_map[layout] = LayoutRender(region, lines)
+ console.update_screen_lines(lines, x, y)
+
+ def _make_region_map(self, width: int, height: int) -> RegionMap:
+ """Create a dict that maps layout on to Region."""
+ stack: List[Tuple[Layout, Region]] = [(self, Region(0, 0, width, height))]
+ push = stack.append
+ pop = stack.pop
+ layout_regions: List[Tuple[Layout, Region]] = []
+ append_layout_region = layout_regions.append
+ while stack:
+ append_layout_region(pop())
+ layout, region = layout_regions[-1]
+ children = layout.children
+ if children:
+ for child_and_region in layout.splitter.divide(children, region):
+ push(child_and_region)
+
+ region_map = {
+ layout: region
+ for layout, region in sorted(layout_regions, key=itemgetter(1))
+ }
+ return region_map
+
+ def render(self, console: Console, options: ConsoleOptions) -> RenderMap:
+ """Render the sub_layouts.
+
+ Args:
+ console (Console): Console instance.
+ options (ConsoleOptions): Console options.
+
+ Returns:
+ RenderMap: A dict that maps Layout on to a tuple of Region, lines
+ """
+ render_width = options.max_width
+ render_height = options.height or console.height
+ region_map = self._make_region_map(render_width, render_height)
+ layout_regions = [
+ (layout, region)
+ for layout, region in region_map.items()
+ if not layout.children
+ ]
+ render_map: Dict["Layout", "LayoutRender"] = {}
+ render_lines = console.render_lines
+ update_dimensions = options.update_dimensions
+
+ for layout, region in layout_regions:
+ lines = render_lines(
+ layout.renderable, update_dimensions(region.width, region.height)
+ )
+ render_map[layout] = LayoutRender(region, lines)
+ return render_map
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+ with self._lock:
+ width = options.max_width or console.width
+ height = options.height or console.height
+ render_map = self.render(console, options.update_dimensions(width, height))
+ self._render_map = render_map
+ layout_lines: List[List[Segment]] = [[] for _ in range(height)]
+ _islice = islice
+ for (region, lines) in render_map.values():
+ _x, y, _layout_width, layout_height = region
+ for row, line in zip(
+ _islice(layout_lines, y, y + layout_height), lines
+ ):
+ row.extend(line)
+
+ new_line = Segment.line()
+ for layout_row in layout_lines:
+ yield from layout_row
+ yield new_line
+
+
+if __name__ == "__main__":
+ from pip._vendor.rich.console import Console
+
+ console = Console()
+ layout = Layout()
+
+ layout.split_column(
+ Layout(name="header", size=3),
+ Layout(ratio=1, name="main"),
+ Layout(size=10, name="footer"),
+ )
+
+ layout["main"].split_row(Layout(name="side"), Layout(name="body", ratio=2))
+
+ layout["body"].split_row(Layout(name="content", ratio=2), Layout(name="s2"))
+
+ layout["s2"].split_column(
+ Layout(name="top"), Layout(name="middle"), Layout(name="bottom")
+ )
+
+ layout["side"].split_column(Layout(layout.tree, name="left1"), Layout(name="left2"))
+
+ layout["content"].update("foo")
+
+ console.print(layout)
diff --git a/third_party/python/pip/pip/_vendor/rich/live.py b/third_party/python/pip/pip/_vendor/rich/live.py
new file mode 100644
index 0000000000..e635fe5c97
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/live.py
@@ -0,0 +1,373 @@
+import sys
+from threading import Event, RLock, Thread
+from types import TracebackType
+from typing import IO, Any, Callable, List, Optional, TextIO, Type, cast
+
+from . import get_console
+from .console import Console, ConsoleRenderable, RenderableType, RenderHook
+from .control import Control
+from .file_proxy import FileProxy
+from .jupyter import JupyterMixin
+from .live_render import LiveRender, VerticalOverflowMethod
+from .screen import Screen
+from .text import Text
+
+
+class _RefreshThread(Thread):
+ """A thread that calls refresh() at regular intervals."""
+
+ def __init__(self, live: "Live", refresh_per_second: float) -> None:
+ self.live = live
+ self.refresh_per_second = refresh_per_second
+ self.done = Event()
+ super().__init__(daemon=True)
+
+ def stop(self) -> None:
+ self.done.set()
+
+ def run(self) -> None:
+ while not self.done.wait(1 / self.refresh_per_second):
+ with self.live._lock:
+ if not self.done.is_set():
+ self.live.refresh()
+
+
+class Live(JupyterMixin, RenderHook):
+ """Renders an auto-updating live display of any given renderable.
+
+ Args:
+ renderable (RenderableType, optional): The renderable to live display. Defaults to displaying nothing.
+ console (Console, optional): Optional Console instance. Default will an internal Console instance writing to stdout.
+ screen (bool, optional): Enable alternate screen mode. Defaults to False.
+ auto_refresh (bool, optional): Enable auto refresh. If disabled, you will need to call `refresh()` or `update()` with refresh flag. Defaults to True
+ refresh_per_second (float, optional): Number of times per second to refresh the live display. Defaults to 4.
+ transient (bool, optional): Clear the renderable on exit (has no effect when screen=True). Defaults to False.
+ redirect_stdout (bool, optional): Enable redirection of stdout, so ``print`` may be used. Defaults to True.
+ redirect_stderr (bool, optional): Enable redirection of stderr. Defaults to True.
+ vertical_overflow (VerticalOverflowMethod, optional): How to handle renderable when it is too tall for the console. Defaults to "ellipsis".
+ get_renderable (Callable[[], RenderableType], optional): Optional callable to get renderable. Defaults to None.
+ """
+
+ def __init__(
+ self,
+ renderable: Optional[RenderableType] = None,
+ *,
+ console: Optional[Console] = None,
+ screen: bool = False,
+ auto_refresh: bool = True,
+ refresh_per_second: float = 4,
+ transient: bool = False,
+ redirect_stdout: bool = True,
+ redirect_stderr: bool = True,
+ vertical_overflow: VerticalOverflowMethod = "ellipsis",
+ get_renderable: Optional[Callable[[], RenderableType]] = None,
+ ) -> None:
+ assert refresh_per_second > 0, "refresh_per_second must be > 0"
+ self._renderable = renderable
+ self.console = console if console is not None else get_console()
+ self._screen = screen
+ self._alt_screen = False
+
+ self._redirect_stdout = redirect_stdout
+ self._redirect_stderr = redirect_stderr
+ self._restore_stdout: Optional[IO[str]] = None
+ self._restore_stderr: Optional[IO[str]] = None
+
+ self._lock = RLock()
+ self.ipy_widget: Optional[Any] = None
+ self.auto_refresh = auto_refresh
+ self._started: bool = False
+ self.transient = True if screen else transient
+
+ self._refresh_thread: Optional[_RefreshThread] = None
+ self.refresh_per_second = refresh_per_second
+
+ self.vertical_overflow = vertical_overflow
+ self._get_renderable = get_renderable
+ self._live_render = LiveRender(
+ self.get_renderable(), vertical_overflow=vertical_overflow
+ )
+
+ @property
+ def is_started(self) -> bool:
+ """Check if live display has been started."""
+ return self._started
+
+ def get_renderable(self) -> RenderableType:
+ renderable = (
+ self._get_renderable()
+ if self._get_renderable is not None
+ else self._renderable
+ )
+ return renderable or ""
+
+ def start(self, refresh: bool = False) -> None:
+ """Start live rendering display.
+
+ Args:
+ refresh (bool, optional): Also refresh. Defaults to False.
+ """
+ with self._lock:
+ if self._started:
+ return
+ self.console.set_live(self)
+ self._started = True
+ if self._screen:
+ self._alt_screen = self.console.set_alt_screen(True)
+ self.console.show_cursor(False)
+ self._enable_redirect_io()
+ self.console.push_render_hook(self)
+ if refresh:
+ try:
+ self.refresh()
+ except Exception:
+ # If refresh fails, we want to stop the redirection of sys.stderr,
+ # so the error stacktrace is properly displayed in the terminal.
+ # (or, if the code that calls Rich captures the exception and wants to display something,
+ # let this be displayed in the terminal).
+ self.stop()
+ raise
+ if self.auto_refresh:
+ self._refresh_thread = _RefreshThread(self, self.refresh_per_second)
+ self._refresh_thread.start()
+
+ def stop(self) -> None:
+ """Stop live rendering display."""
+ with self._lock:
+ if not self._started:
+ return
+ self.console.clear_live()
+ self._started = False
+
+ if self.auto_refresh and self._refresh_thread is not None:
+ self._refresh_thread.stop()
+ self._refresh_thread = None
+ # allow it to fully render on the last even if overflow
+ self.vertical_overflow = "visible"
+ with self.console:
+ try:
+ if not self._alt_screen and not self.console.is_jupyter:
+ self.refresh()
+ finally:
+ self._disable_redirect_io()
+ self.console.pop_render_hook()
+ if not self._alt_screen and self.console.is_terminal:
+ self.console.line()
+ self.console.show_cursor(True)
+ if self._alt_screen:
+ self.console.set_alt_screen(False)
+
+ if self.transient and not self._alt_screen:
+ self.console.control(self._live_render.restore_cursor())
+ if self.ipy_widget is not None and self.transient:
+ self.ipy_widget.close() # pragma: no cover
+
+ def __enter__(self) -> "Live":
+ self.start(refresh=self._renderable is not None)
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ self.stop()
+
+ def _enable_redirect_io(self) -> None:
+ """Enable redirecting of stdout / stderr."""
+ if self.console.is_terminal or self.console.is_jupyter:
+ if self._redirect_stdout and not isinstance(sys.stdout, FileProxy):
+ self._restore_stdout = sys.stdout
+ sys.stdout = cast("TextIO", FileProxy(self.console, sys.stdout))
+ if self._redirect_stderr and not isinstance(sys.stderr, FileProxy):
+ self._restore_stderr = sys.stderr
+ sys.stderr = cast("TextIO", FileProxy(self.console, sys.stderr))
+
+ def _disable_redirect_io(self) -> None:
+ """Disable redirecting of stdout / stderr."""
+ if self._restore_stdout:
+ sys.stdout = cast("TextIO", self._restore_stdout)
+ self._restore_stdout = None
+ if self._restore_stderr:
+ sys.stderr = cast("TextIO", self._restore_stderr)
+ self._restore_stderr = None
+
+ @property
+ def renderable(self) -> RenderableType:
+ """Get the renderable that is being displayed
+
+ Returns:
+ RenderableType: Displayed renderable.
+ """
+ renderable = self.get_renderable()
+ return Screen(renderable) if self._alt_screen else renderable
+
+ def update(self, renderable: RenderableType, *, refresh: bool = False) -> None:
+ """Update the renderable that is being displayed
+
+ Args:
+ renderable (RenderableType): New renderable to use.
+ refresh (bool, optional): Refresh the display. Defaults to False.
+ """
+ with self._lock:
+ self._renderable = renderable
+ if refresh:
+ self.refresh()
+
+ def refresh(self) -> None:
+ """Update the display of the Live Render."""
+ with self._lock:
+ self._live_render.set_renderable(self.renderable)
+ if self.console.is_jupyter: # pragma: no cover
+ try:
+ from IPython.display import display
+ from ipywidgets import Output
+ except ImportError:
+ import warnings
+
+ warnings.warn('install "ipywidgets" for Jupyter support')
+ else:
+ if self.ipy_widget is None:
+ self.ipy_widget = Output()
+ display(self.ipy_widget)
+
+ with self.ipy_widget:
+ self.ipy_widget.clear_output(wait=True)
+ self.console.print(self._live_render.renderable)
+ elif self.console.is_terminal and not self.console.is_dumb_terminal:
+ with self.console:
+ self.console.print(Control())
+ elif (
+ not self._started and not self.transient
+ ): # if it is finished allow files or dumb-terminals to see final result
+ with self.console:
+ self.console.print(Control())
+
+ def process_renderables(
+ self, renderables: List[ConsoleRenderable]
+ ) -> List[ConsoleRenderable]:
+ """Process renderables to restore cursor and display progress."""
+ self._live_render.vertical_overflow = self.vertical_overflow
+ if self.console.is_interactive:
+ # lock needs acquiring as user can modify live_render renderable at any time unlike in Progress.
+ with self._lock:
+ reset = (
+ Control.home()
+ if self._alt_screen
+ else self._live_render.position_cursor()
+ )
+ renderables = [reset, *renderables, self._live_render]
+ elif (
+ not self._started and not self.transient
+ ): # if it is finished render the final output for files or dumb_terminals
+ renderables = [*renderables, self._live_render]
+
+ return renderables
+
+
+if __name__ == "__main__": # pragma: no cover
+ import random
+ import time
+ from itertools import cycle
+ from typing import Dict, List, Tuple
+
+ from .align import Align
+ from .console import Console
+ from .live import Live as Live
+ from .panel import Panel
+ from .rule import Rule
+ from .syntax import Syntax
+ from .table import Table
+
+ console = Console()
+
+ syntax = Syntax(
+ '''def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
+ """Iterate and generate a tuple with a flag for last value."""
+ iter_values = iter(values)
+ try:
+ previous_value = next(iter_values)
+ except StopIteration:
+ return
+ for value in iter_values:
+ yield False, previous_value
+ previous_value = value
+ yield True, previous_value''',
+ "python",
+ line_numbers=True,
+ )
+
+ table = Table("foo", "bar", "baz")
+ table.add_row("1", "2", "3")
+
+ progress_renderables = [
+ "You can make the terminal shorter and taller to see the live table hide"
+ "Text may be printed while the progress bars are rendering.",
+ Panel("In fact, [i]any[/i] renderable will work"),
+ "Such as [magenta]tables[/]...",
+ table,
+ "Pretty printed structures...",
+ {"type": "example", "text": "Pretty printed"},
+ "Syntax...",
+ syntax,
+ Rule("Give it a try!"),
+ ]
+
+ examples = cycle(progress_renderables)
+
+ exchanges = [
+ "SGD",
+ "MYR",
+ "EUR",
+ "USD",
+ "AUD",
+ "JPY",
+ "CNH",
+ "HKD",
+ "CAD",
+ "INR",
+ "DKK",
+ "GBP",
+ "RUB",
+ "NZD",
+ "MXN",
+ "IDR",
+ "TWD",
+ "THB",
+ "VND",
+ ]
+ with Live(console=console) as live_table:
+ exchange_rate_dict: Dict[Tuple[str, str], float] = {}
+
+ for index in range(100):
+ select_exchange = exchanges[index % len(exchanges)]
+
+ for exchange in exchanges:
+ if exchange == select_exchange:
+ continue
+ time.sleep(0.4)
+ if random.randint(0, 10) < 1:
+ console.log(next(examples))
+ exchange_rate_dict[(select_exchange, exchange)] = 200 / (
+ (random.random() * 320) + 1
+ )
+ if len(exchange_rate_dict) > len(exchanges) - 1:
+ exchange_rate_dict.pop(list(exchange_rate_dict.keys())[0])
+ table = Table(title="Exchange Rates")
+
+ table.add_column("Source Currency")
+ table.add_column("Destination Currency")
+ table.add_column("Exchange Rate")
+
+ for ((source, dest), exchange_rate) in exchange_rate_dict.items():
+ table.add_row(
+ source,
+ dest,
+ Text(
+ f"{exchange_rate:.4f}",
+ style="red" if exchange_rate < 1.0 else "green",
+ ),
+ )
+
+ live_table.update(Align.center(table))
diff --git a/third_party/python/pip/pip/_vendor/rich/live_render.py b/third_party/python/pip/pip/_vendor/rich/live_render.py
new file mode 100644
index 0000000000..b90fbf7f35
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/live_render.py
@@ -0,0 +1,113 @@
+import sys
+from typing import Optional, Tuple
+
+if sys.version_info >= (3, 8):
+ from typing import Literal
+else:
+ from pip._vendor.typing_extensions import Literal # pragma: no cover
+
+
+from ._loop import loop_last
+from .console import Console, ConsoleOptions, RenderableType, RenderResult
+from .control import Control
+from .segment import ControlType, Segment
+from .style import StyleType
+from .text import Text
+
+VerticalOverflowMethod = Literal["crop", "ellipsis", "visible"]
+
+
+class LiveRender:
+ """Creates a renderable that may be updated.
+
+ Args:
+ renderable (RenderableType): Any renderable object.
+ style (StyleType, optional): An optional style to apply to the renderable. Defaults to "".
+ """
+
+ def __init__(
+ self,
+ renderable: RenderableType,
+ style: StyleType = "",
+ vertical_overflow: VerticalOverflowMethod = "ellipsis",
+ ) -> None:
+ self.renderable = renderable
+ self.style = style
+ self.vertical_overflow = vertical_overflow
+ self._shape: Optional[Tuple[int, int]] = None
+
+ def set_renderable(self, renderable: RenderableType) -> None:
+ """Set a new renderable.
+
+ Args:
+ renderable (RenderableType): Any renderable object, including str.
+ """
+ self.renderable = renderable
+
+ def position_cursor(self) -> Control:
+ """Get control codes to move cursor to beginning of live render.
+
+ Returns:
+ Control: A control instance that may be printed.
+ """
+ if self._shape is not None:
+ _, height = self._shape
+ return Control(
+ ControlType.CARRIAGE_RETURN,
+ (ControlType.ERASE_IN_LINE, 2),
+ *(
+ (
+ (ControlType.CURSOR_UP, 1),
+ (ControlType.ERASE_IN_LINE, 2),
+ )
+ * (height - 1)
+ )
+ )
+ return Control()
+
+ def restore_cursor(self) -> Control:
+ """Get control codes to clear the render and restore the cursor to its previous position.
+
+ Returns:
+ Control: A Control instance that may be printed.
+ """
+ if self._shape is not None:
+ _, height = self._shape
+ return Control(
+ ControlType.CARRIAGE_RETURN,
+ *((ControlType.CURSOR_UP, 1), (ControlType.ERASE_IN_LINE, 2)) * height
+ )
+ return Control()
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+
+ renderable = self.renderable
+ style = console.get_style(self.style)
+ lines = console.render_lines(renderable, options, style=style, pad=False)
+ shape = Segment.get_shape(lines)
+
+ _, height = shape
+ if height > options.size.height:
+ if self.vertical_overflow == "crop":
+ lines = lines[: options.size.height]
+ shape = Segment.get_shape(lines)
+ elif self.vertical_overflow == "ellipsis":
+ lines = lines[: (options.size.height - 1)]
+ overflow_text = Text(
+ "...",
+ overflow="crop",
+ justify="center",
+ end="",
+ style="live.ellipsis",
+ )
+ lines.append(list(console.render(overflow_text)))
+ shape = Segment.get_shape(lines)
+ self._shape = shape
+
+ new_line = Segment.line()
+ for last, line in loop_last(lines):
+ yield from line
+ if not last:
+ yield new_line
diff --git a/third_party/python/pip/pip/_vendor/rich/logging.py b/third_party/python/pip/pip/_vendor/rich/logging.py
new file mode 100644
index 0000000000..91368dda78
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/logging.py
@@ -0,0 +1,289 @@
+import logging
+from datetime import datetime
+from logging import Handler, LogRecord
+from pathlib import Path
+from types import ModuleType
+from typing import ClassVar, Iterable, List, Optional, Type, Union
+
+from pip._vendor.rich._null_file import NullFile
+
+from . import get_console
+from ._log_render import FormatTimeCallable, LogRender
+from .console import Console, ConsoleRenderable
+from .highlighter import Highlighter, ReprHighlighter
+from .text import Text
+from .traceback import Traceback
+
+
+class RichHandler(Handler):
+ """A logging handler that renders output with Rich. The time / level / message and file are displayed in columns.
+ The level is color coded, and the message is syntax highlighted.
+
+ Note:
+ Be careful when enabling console markup in log messages if you have configured logging for libraries not
+ under your control. If a dependency writes messages containing square brackets, it may not produce the intended output.
+
+ Args:
+ level (Union[int, str], optional): Log level. Defaults to logging.NOTSET.
+ console (:class:`~rich.console.Console`, optional): Optional console instance to write logs.
+ Default will use a global console instance writing to stdout.
+ show_time (bool, optional): Show a column for the time. Defaults to True.
+ omit_repeated_times (bool, optional): Omit repetition of the same time. Defaults to True.
+ show_level (bool, optional): Show a column for the level. Defaults to True.
+ show_path (bool, optional): Show the path to the original log call. Defaults to True.
+ enable_link_path (bool, optional): Enable terminal link of path column to file. Defaults to True.
+ highlighter (Highlighter, optional): Highlighter to style log messages, or None to use ReprHighlighter. Defaults to None.
+ markup (bool, optional): Enable console markup in log messages. Defaults to False.
+ rich_tracebacks (bool, optional): Enable rich tracebacks with syntax highlighting and formatting. Defaults to False.
+ tracebacks_width (Optional[int], optional): Number of characters used to render tracebacks, or None for full width. Defaults to None.
+ tracebacks_extra_lines (int, optional): Additional lines of code to render tracebacks, or None for full width. Defaults to None.
+ tracebacks_theme (str, optional): Override pygments theme used in traceback.
+ tracebacks_word_wrap (bool, optional): Enable word wrapping of long tracebacks lines. Defaults to True.
+ tracebacks_show_locals (bool, optional): Enable display of locals in tracebacks. Defaults to False.
+ tracebacks_suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.
+ locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to 10.
+ locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.
+ log_time_format (Union[str, TimeFormatterCallable], optional): If ``log_time`` is enabled, either string for strftime or callable that formats the time. Defaults to "[%x %X] ".
+ keywords (List[str], optional): List of words to highlight instead of ``RichHandler.KEYWORDS``.
+ """
+
+ KEYWORDS: ClassVar[Optional[List[str]]] = [
+ "GET",
+ "POST",
+ "HEAD",
+ "PUT",
+ "DELETE",
+ "OPTIONS",
+ "TRACE",
+ "PATCH",
+ ]
+ HIGHLIGHTER_CLASS: ClassVar[Type[Highlighter]] = ReprHighlighter
+
+ def __init__(
+ self,
+ level: Union[int, str] = logging.NOTSET,
+ console: Optional[Console] = None,
+ *,
+ show_time: bool = True,
+ omit_repeated_times: bool = True,
+ show_level: bool = True,
+ show_path: bool = True,
+ enable_link_path: bool = True,
+ highlighter: Optional[Highlighter] = None,
+ markup: bool = False,
+ rich_tracebacks: bool = False,
+ tracebacks_width: Optional[int] = None,
+ tracebacks_extra_lines: int = 3,
+ tracebacks_theme: Optional[str] = None,
+ tracebacks_word_wrap: bool = True,
+ tracebacks_show_locals: bool = False,
+ tracebacks_suppress: Iterable[Union[str, ModuleType]] = (),
+ locals_max_length: int = 10,
+ locals_max_string: int = 80,
+ log_time_format: Union[str, FormatTimeCallable] = "[%x %X]",
+ keywords: Optional[List[str]] = None,
+ ) -> None:
+ super().__init__(level=level)
+ self.console = console or get_console()
+ self.highlighter = highlighter or self.HIGHLIGHTER_CLASS()
+ self._log_render = LogRender(
+ show_time=show_time,
+ show_level=show_level,
+ show_path=show_path,
+ time_format=log_time_format,
+ omit_repeated_times=omit_repeated_times,
+ level_width=None,
+ )
+ self.enable_link_path = enable_link_path
+ self.markup = markup
+ self.rich_tracebacks = rich_tracebacks
+ self.tracebacks_width = tracebacks_width
+ self.tracebacks_extra_lines = tracebacks_extra_lines
+ self.tracebacks_theme = tracebacks_theme
+ self.tracebacks_word_wrap = tracebacks_word_wrap
+ self.tracebacks_show_locals = tracebacks_show_locals
+ self.tracebacks_suppress = tracebacks_suppress
+ self.locals_max_length = locals_max_length
+ self.locals_max_string = locals_max_string
+ self.keywords = keywords
+
+ def get_level_text(self, record: LogRecord) -> Text:
+ """Get the level name from the record.
+
+ Args:
+ record (LogRecord): LogRecord instance.
+
+ Returns:
+ Text: A tuple of the style and level name.
+ """
+ level_name = record.levelname
+ level_text = Text.styled(
+ level_name.ljust(8), f"logging.level.{level_name.lower()}"
+ )
+ return level_text
+
+ def emit(self, record: LogRecord) -> None:
+ """Invoked by logging."""
+ message = self.format(record)
+ traceback = None
+ if (
+ self.rich_tracebacks
+ and record.exc_info
+ and record.exc_info != (None, None, None)
+ ):
+ exc_type, exc_value, exc_traceback = record.exc_info
+ assert exc_type is not None
+ assert exc_value is not None
+ traceback = Traceback.from_exception(
+ exc_type,
+ exc_value,
+ exc_traceback,
+ width=self.tracebacks_width,
+ extra_lines=self.tracebacks_extra_lines,
+ theme=self.tracebacks_theme,
+ word_wrap=self.tracebacks_word_wrap,
+ show_locals=self.tracebacks_show_locals,
+ locals_max_length=self.locals_max_length,
+ locals_max_string=self.locals_max_string,
+ suppress=self.tracebacks_suppress,
+ )
+ message = record.getMessage()
+ if self.formatter:
+ record.message = record.getMessage()
+ formatter = self.formatter
+ if hasattr(formatter, "usesTime") and formatter.usesTime():
+ record.asctime = formatter.formatTime(record, formatter.datefmt)
+ message = formatter.formatMessage(record)
+
+ message_renderable = self.render_message(record, message)
+ log_renderable = self.render(
+ record=record, traceback=traceback, message_renderable=message_renderable
+ )
+ if isinstance(self.console.file, NullFile):
+ # Handles pythonw, where stdout/stderr are null, and we return NullFile
+ # instance from Console.file. In this case, we still want to make a log record
+ # even though we won't be writing anything to a file.
+ self.handleError(record)
+ else:
+ try:
+ self.console.print(log_renderable)
+ except Exception:
+ self.handleError(record)
+
+ def render_message(self, record: LogRecord, message: str) -> "ConsoleRenderable":
+ """Render message text in to Text.
+
+ Args:
+ record (LogRecord): logging Record.
+ message (str): String containing log message.
+
+ Returns:
+ ConsoleRenderable: Renderable to display log message.
+ """
+ use_markup = getattr(record, "markup", self.markup)
+ message_text = Text.from_markup(message) if use_markup else Text(message)
+
+ highlighter = getattr(record, "highlighter", self.highlighter)
+ if highlighter:
+ message_text = highlighter(message_text)
+
+ if self.keywords is None:
+ self.keywords = self.KEYWORDS
+
+ if self.keywords:
+ message_text.highlight_words(self.keywords, "logging.keyword")
+
+ return message_text
+
+ def render(
+ self,
+ *,
+ record: LogRecord,
+ traceback: Optional[Traceback],
+ message_renderable: "ConsoleRenderable",
+ ) -> "ConsoleRenderable":
+ """Render log for display.
+
+ Args:
+ record (LogRecord): logging Record.
+ traceback (Optional[Traceback]): Traceback instance or None for no Traceback.
+ message_renderable (ConsoleRenderable): Renderable (typically Text) containing log message contents.
+
+ Returns:
+ ConsoleRenderable: Renderable to display log.
+ """
+ path = Path(record.pathname).name
+ level = self.get_level_text(record)
+ time_format = None if self.formatter is None else self.formatter.datefmt
+ log_time = datetime.fromtimestamp(record.created)
+
+ log_renderable = self._log_render(
+ self.console,
+ [message_renderable] if not traceback else [message_renderable, traceback],
+ log_time=log_time,
+ time_format=time_format,
+ level=level,
+ path=path,
+ line_no=record.lineno,
+ link_path=record.pathname if self.enable_link_path else None,
+ )
+ return log_renderable
+
+
+if __name__ == "__main__": # pragma: no cover
+ from time import sleep
+
+ FORMAT = "%(message)s"
+ # FORMAT = "%(asctime)-15s - %(levelname)s - %(message)s"
+ logging.basicConfig(
+ level="NOTSET",
+ format=FORMAT,
+ datefmt="[%X]",
+ handlers=[RichHandler(rich_tracebacks=True, tracebacks_show_locals=True)],
+ )
+ log = logging.getLogger("rich")
+
+ log.info("Server starting...")
+ log.info("Listening on http://127.0.0.1:8080")
+ sleep(1)
+
+ log.info("GET /index.html 200 1298")
+ log.info("GET /imgs/backgrounds/back1.jpg 200 54386")
+ log.info("GET /css/styles.css 200 54386")
+ log.warning("GET /favicon.ico 404 242")
+ sleep(1)
+
+ log.debug(
+ "JSONRPC request\n--> %r\n<-- %r",
+ {
+ "version": "1.1",
+ "method": "confirmFruitPurchase",
+ "params": [["apple", "orange", "mangoes", "pomelo"], 1.123],
+ "id": "194521489",
+ },
+ {"version": "1.1", "result": True, "error": None, "id": "194521489"},
+ )
+ log.debug(
+ "Loading configuration file /adasd/asdasd/qeqwe/qwrqwrqwr/sdgsdgsdg/werwerwer/dfgerert/ertertert/ertetert/werwerwer"
+ )
+ log.error("Unable to find 'pomelo' in database!")
+ log.info("POST /jsonrpc/ 200 65532")
+ log.info("POST /admin/ 401 42234")
+ log.warning("password was rejected for admin site.")
+
+ def divide() -> None:
+ number = 1
+ divisor = 0
+ foos = ["foo"] * 100
+ log.debug("in divide")
+ try:
+ number / divisor
+ except:
+ log.exception("An error of some kind occurred!")
+
+ divide()
+ sleep(1)
+ log.critical("Out of memory!")
+ log.info("Server exited with code=-1")
+ log.info("[bold]EXITING...[/bold]", extra=dict(markup=True))
diff --git a/third_party/python/pip/pip/_vendor/rich/markup.py b/third_party/python/pip/pip/_vendor/rich/markup.py
new file mode 100644
index 0000000000..fd80d8c112
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/markup.py
@@ -0,0 +1,246 @@
+import re
+from ast import literal_eval
+from operator import attrgetter
+from typing import Callable, Iterable, List, Match, NamedTuple, Optional, Tuple, Union
+
+from ._emoji_replace import _emoji_replace
+from .emoji import EmojiVariant
+from .errors import MarkupError
+from .style import Style
+from .text import Span, Text
+
+RE_TAGS = re.compile(
+ r"""((\\*)\[([a-z#/@][^[]*?)])""",
+ re.VERBOSE,
+)
+
+RE_HANDLER = re.compile(r"^([\w.]*?)(\(.*?\))?$")
+
+
+class Tag(NamedTuple):
+ """A tag in console markup."""
+
+ name: str
+ """The tag name. e.g. 'bold'."""
+ parameters: Optional[str]
+ """Any additional parameters after the name."""
+
+ def __str__(self) -> str:
+ return (
+ self.name if self.parameters is None else f"{self.name} {self.parameters}"
+ )
+
+ @property
+ def markup(self) -> str:
+ """Get the string representation of this tag."""
+ return (
+ f"[{self.name}]"
+ if self.parameters is None
+ else f"[{self.name}={self.parameters}]"
+ )
+
+
+_ReStringMatch = Match[str] # regex match object
+_ReSubCallable = Callable[[_ReStringMatch], str] # Callable invoked by re.sub
+_EscapeSubMethod = Callable[[_ReSubCallable, str], str] # Sub method of a compiled re
+
+
+def escape(
+ markup: str,
+ _escape: _EscapeSubMethod = re.compile(r"(\\*)(\[[a-z#/@][^[]*?])").sub,
+) -> str:
+ """Escapes text so that it won't be interpreted as markup.
+
+ Args:
+ markup (str): Content to be inserted in to markup.
+
+ Returns:
+ str: Markup with square brackets escaped.
+ """
+
+ def escape_backslashes(match: Match[str]) -> str:
+ """Called by re.sub replace matches."""
+ backslashes, text = match.groups()
+ return f"{backslashes}{backslashes}\\{text}"
+
+ markup = _escape(escape_backslashes, markup)
+ return markup
+
+
+def _parse(markup: str) -> Iterable[Tuple[int, Optional[str], Optional[Tag]]]:
+ """Parse markup in to an iterable of tuples of (position, text, tag).
+
+ Args:
+ markup (str): A string containing console markup
+
+ """
+ position = 0
+ _divmod = divmod
+ _Tag = Tag
+ for match in RE_TAGS.finditer(markup):
+ full_text, escapes, tag_text = match.groups()
+ start, end = match.span()
+ if start > position:
+ yield start, markup[position:start], None
+ if escapes:
+ backslashes, escaped = _divmod(len(escapes), 2)
+ if backslashes:
+ # Literal backslashes
+ yield start, "\\" * backslashes, None
+ start += backslashes * 2
+ if escaped:
+ # Escape of tag
+ yield start, full_text[len(escapes) :], None
+ position = end
+ continue
+ text, equals, parameters = tag_text.partition("=")
+ yield start, None, _Tag(text, parameters if equals else None)
+ position = end
+ if position < len(markup):
+ yield position, markup[position:], None
+
+
+def render(
+ markup: str,
+ style: Union[str, Style] = "",
+ emoji: bool = True,
+ emoji_variant: Optional[EmojiVariant] = None,
+) -> Text:
+ """Render console markup in to a Text instance.
+
+ Args:
+ markup (str): A string containing console markup.
+ emoji (bool, optional): Also render emoji code. Defaults to True.
+
+ Raises:
+ MarkupError: If there is a syntax error in the markup.
+
+ Returns:
+ Text: A test instance.
+ """
+ emoji_replace = _emoji_replace
+ if "[" not in markup:
+ return Text(
+ emoji_replace(markup, default_variant=emoji_variant) if emoji else markup,
+ style=style,
+ )
+ text = Text(style=style)
+ append = text.append
+ normalize = Style.normalize
+
+ style_stack: List[Tuple[int, Tag]] = []
+ pop = style_stack.pop
+
+ spans: List[Span] = []
+ append_span = spans.append
+
+ _Span = Span
+ _Tag = Tag
+
+ def pop_style(style_name: str) -> Tuple[int, Tag]:
+ """Pop tag matching given style name."""
+ for index, (_, tag) in enumerate(reversed(style_stack), 1):
+ if tag.name == style_name:
+ return pop(-index)
+ raise KeyError(style_name)
+
+ for position, plain_text, tag in _parse(markup):
+ if plain_text is not None:
+ # Handle open brace escapes, where the brace is not part of a tag.
+ plain_text = plain_text.replace("\\[", "[")
+ append(emoji_replace(plain_text) if emoji else plain_text)
+ elif tag is not None:
+ if tag.name.startswith("/"): # Closing tag
+ style_name = tag.name[1:].strip()
+
+ if style_name: # explicit close
+ style_name = normalize(style_name)
+ try:
+ start, open_tag = pop_style(style_name)
+ except KeyError:
+ raise MarkupError(
+ f"closing tag '{tag.markup}' at position {position} doesn't match any open tag"
+ ) from None
+ else: # implicit close
+ try:
+ start, open_tag = pop()
+ except IndexError:
+ raise MarkupError(
+ f"closing tag '[/]' at position {position} has nothing to close"
+ ) from None
+
+ if open_tag.name.startswith("@"):
+ if open_tag.parameters:
+ handler_name = ""
+ parameters = open_tag.parameters.strip()
+ handler_match = RE_HANDLER.match(parameters)
+ if handler_match is not None:
+ handler_name, match_parameters = handler_match.groups()
+ parameters = (
+ "()" if match_parameters is None else match_parameters
+ )
+
+ try:
+ meta_params = literal_eval(parameters)
+ except SyntaxError as error:
+ raise MarkupError(
+ f"error parsing {parameters!r} in {open_tag.parameters!r}; {error.msg}"
+ )
+ except Exception as error:
+ raise MarkupError(
+ f"error parsing {open_tag.parameters!r}; {error}"
+ ) from None
+
+ if handler_name:
+ meta_params = (
+ handler_name,
+ meta_params
+ if isinstance(meta_params, tuple)
+ else (meta_params,),
+ )
+
+ else:
+ meta_params = ()
+
+ append_span(
+ _Span(
+ start, len(text), Style(meta={open_tag.name: meta_params})
+ )
+ )
+ else:
+ append_span(_Span(start, len(text), str(open_tag)))
+
+ else: # Opening tag
+ normalized_tag = _Tag(normalize(tag.name), tag.parameters)
+ style_stack.append((len(text), normalized_tag))
+
+ text_length = len(text)
+ while style_stack:
+ start, tag = style_stack.pop()
+ style = str(tag)
+ if style:
+ append_span(_Span(start, text_length, style))
+
+ text.spans = sorted(spans[::-1], key=attrgetter("start"))
+ return text
+
+
+if __name__ == "__main__": # pragma: no cover
+
+ MARKUP = [
+ "[red]Hello World[/red]",
+ "[magenta]Hello [b]World[/b]",
+ "[bold]Bold[italic] bold and italic [/bold]italic[/italic]",
+ "Click [link=https://www.willmcgugan.com]here[/link] to visit my Blog",
+ ":warning-emoji: [bold red blink] DANGER![/]",
+ ]
+
+ from pip._vendor.rich import print
+ from pip._vendor.rich.table import Table
+
+ grid = Table("Markup", "Result", padding=(0, 1))
+
+ for markup in MARKUP:
+ grid.add_row(Text(markup), markup)
+
+ print(grid)
diff --git a/third_party/python/pip/pip/_vendor/rich/measure.py b/third_party/python/pip/pip/_vendor/rich/measure.py
new file mode 100644
index 0000000000..a508ffa80b
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/measure.py
@@ -0,0 +1,151 @@
+from operator import itemgetter
+from typing import TYPE_CHECKING, Callable, NamedTuple, Optional, Sequence
+
+from . import errors
+from .protocol import is_renderable, rich_cast
+
+if TYPE_CHECKING:
+ from .console import Console, ConsoleOptions, RenderableType
+
+
+class Measurement(NamedTuple):
+ """Stores the minimum and maximum widths (in characters) required to render an object."""
+
+ minimum: int
+ """Minimum number of cells required to render."""
+ maximum: int
+ """Maximum number of cells required to render."""
+
+ @property
+ def span(self) -> int:
+ """Get difference between maximum and minimum."""
+ return self.maximum - self.minimum
+
+ def normalize(self) -> "Measurement":
+ """Get measurement that ensures that minimum <= maximum and minimum >= 0
+
+ Returns:
+ Measurement: A normalized measurement.
+ """
+ minimum, maximum = self
+ minimum = min(max(0, minimum), maximum)
+ return Measurement(max(0, minimum), max(0, max(minimum, maximum)))
+
+ def with_maximum(self, width: int) -> "Measurement":
+ """Get a RenderableWith where the widths are <= width.
+
+ Args:
+ width (int): Maximum desired width.
+
+ Returns:
+ Measurement: New Measurement object.
+ """
+ minimum, maximum = self
+ return Measurement(min(minimum, width), min(maximum, width))
+
+ def with_minimum(self, width: int) -> "Measurement":
+ """Get a RenderableWith where the widths are >= width.
+
+ Args:
+ width (int): Minimum desired width.
+
+ Returns:
+ Measurement: New Measurement object.
+ """
+ minimum, maximum = self
+ width = max(0, width)
+ return Measurement(max(minimum, width), max(maximum, width))
+
+ def clamp(
+ self, min_width: Optional[int] = None, max_width: Optional[int] = None
+ ) -> "Measurement":
+ """Clamp a measurement within the specified range.
+
+ Args:
+ min_width (int): Minimum desired width, or ``None`` for no minimum. Defaults to None.
+ max_width (int): Maximum desired width, or ``None`` for no maximum. Defaults to None.
+
+ Returns:
+ Measurement: New Measurement object.
+ """
+ measurement = self
+ if min_width is not None:
+ measurement = measurement.with_minimum(min_width)
+ if max_width is not None:
+ measurement = measurement.with_maximum(max_width)
+ return measurement
+
+ @classmethod
+ def get(
+ cls, console: "Console", options: "ConsoleOptions", renderable: "RenderableType"
+ ) -> "Measurement":
+ """Get a measurement for a renderable.
+
+ Args:
+ console (~rich.console.Console): Console instance.
+ options (~rich.console.ConsoleOptions): Console options.
+ renderable (RenderableType): An object that may be rendered with Rich.
+
+ Raises:
+ errors.NotRenderableError: If the object is not renderable.
+
+ Returns:
+ Measurement: Measurement object containing range of character widths required to render the object.
+ """
+ _max_width = options.max_width
+ if _max_width < 1:
+ return Measurement(0, 0)
+ if isinstance(renderable, str):
+ renderable = console.render_str(
+ renderable, markup=options.markup, highlight=False
+ )
+ renderable = rich_cast(renderable)
+ if is_renderable(renderable):
+ get_console_width: Optional[
+ Callable[["Console", "ConsoleOptions"], "Measurement"]
+ ] = getattr(renderable, "__rich_measure__", None)
+ if get_console_width is not None:
+ render_width = (
+ get_console_width(console, options)
+ .normalize()
+ .with_maximum(_max_width)
+ )
+ if render_width.maximum < 1:
+ return Measurement(0, 0)
+ return render_width.normalize()
+ else:
+ return Measurement(0, _max_width)
+ else:
+ raise errors.NotRenderableError(
+ f"Unable to get render width for {renderable!r}; "
+ "a str, Segment, or object with __rich_console__ method is required"
+ )
+
+
+def measure_renderables(
+ console: "Console",
+ options: "ConsoleOptions",
+ renderables: Sequence["RenderableType"],
+) -> "Measurement":
+ """Get a measurement that would fit a number of renderables.
+
+ Args:
+ console (~rich.console.Console): Console instance.
+ options (~rich.console.ConsoleOptions): Console options.
+ renderables (Iterable[RenderableType]): One or more renderable objects.
+
+ Returns:
+ Measurement: Measurement object containing range of character widths required to
+ contain all given renderables.
+ """
+ if not renderables:
+ return Measurement(0, 0)
+ get_measurement = Measurement.get
+ measurements = [
+ get_measurement(console, options, renderable) for renderable in renderables
+ ]
+ measured_width = Measurement(
+ max(measurements, key=itemgetter(0)).minimum,
+ max(measurements, key=itemgetter(1)).maximum,
+ )
+ return measured_width
diff --git a/third_party/python/pip/pip/_vendor/rich/padding.py b/third_party/python/pip/pip/_vendor/rich/padding.py
new file mode 100644
index 0000000000..1b2204f59f
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/padding.py
@@ -0,0 +1,141 @@
+from typing import cast, List, Optional, Tuple, TYPE_CHECKING, Union
+
+if TYPE_CHECKING:
+ from .console import (
+ Console,
+ ConsoleOptions,
+ RenderableType,
+ RenderResult,
+ )
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .style import Style
+from .segment import Segment
+
+
+PaddingDimensions = Union[int, Tuple[int], Tuple[int, int], Tuple[int, int, int, int]]
+
+
+class Padding(JupyterMixin):
+ """Draw space around content.
+
+ Example:
+ >>> print(Padding("Hello", (2, 4), style="on blue"))
+
+ Args:
+ renderable (RenderableType): String or other renderable.
+ pad (Union[int, Tuple[int]]): Padding for top, right, bottom, and left borders.
+ May be specified with 1, 2, or 4 integers (CSS style).
+ style (Union[str, Style], optional): Style for padding characters. Defaults to "none".
+ expand (bool, optional): Expand padding to fit available width. Defaults to True.
+ """
+
+ def __init__(
+ self,
+ renderable: "RenderableType",
+ pad: "PaddingDimensions" = (0, 0, 0, 0),
+ *,
+ style: Union[str, Style] = "none",
+ expand: bool = True,
+ ):
+ self.renderable = renderable
+ self.top, self.right, self.bottom, self.left = self.unpack(pad)
+ self.style = style
+ self.expand = expand
+
+ @classmethod
+ def indent(cls, renderable: "RenderableType", level: int) -> "Padding":
+ """Make padding instance to render an indent.
+
+ Args:
+ renderable (RenderableType): String or other renderable.
+ level (int): Number of characters to indent.
+
+ Returns:
+ Padding: A Padding instance.
+ """
+
+ return Padding(renderable, pad=(0, 0, 0, level), expand=False)
+
+ @staticmethod
+ def unpack(pad: "PaddingDimensions") -> Tuple[int, int, int, int]:
+ """Unpack padding specified in CSS style."""
+ if isinstance(pad, int):
+ return (pad, pad, pad, pad)
+ if len(pad) == 1:
+ _pad = pad[0]
+ return (_pad, _pad, _pad, _pad)
+ if len(pad) == 2:
+ pad_top, pad_right = cast(Tuple[int, int], pad)
+ return (pad_top, pad_right, pad_top, pad_right)
+ if len(pad) == 4:
+ top, right, bottom, left = cast(Tuple[int, int, int, int], pad)
+ return (top, right, bottom, left)
+ raise ValueError(f"1, 2 or 4 integers required for padding; {len(pad)} given")
+
+ def __repr__(self) -> str:
+ return f"Padding({self.renderable!r}, ({self.top},{self.right},{self.bottom},{self.left}))"
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ style = console.get_style(self.style)
+ if self.expand:
+ width = options.max_width
+ else:
+ width = min(
+ Measurement.get(console, options, self.renderable).maximum
+ + self.left
+ + self.right,
+ options.max_width,
+ )
+ render_options = options.update_width(width - self.left - self.right)
+ if render_options.height is not None:
+ render_options = render_options.update_height(
+ height=render_options.height - self.top - self.bottom
+ )
+ lines = console.render_lines(
+ self.renderable, render_options, style=style, pad=True
+ )
+ _Segment = Segment
+
+ left = _Segment(" " * self.left, style) if self.left else None
+ right = (
+ [_Segment(f'{" " * self.right}', style), _Segment.line()]
+ if self.right
+ else [_Segment.line()]
+ )
+ blank_line: Optional[List[Segment]] = None
+ if self.top:
+ blank_line = [_Segment(f'{" " * width}\n', style)]
+ yield from blank_line * self.top
+ if left:
+ for line in lines:
+ yield left
+ yield from line
+ yield from right
+ else:
+ for line in lines:
+ yield from line
+ yield from right
+ if self.bottom:
+ blank_line = blank_line or [_Segment(f'{" " * width}\n', style)]
+ yield from blank_line * self.bottom
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "Measurement":
+ max_width = options.max_width
+ extra_width = self.left + self.right
+ if max_width - extra_width < 1:
+ return Measurement(max_width, max_width)
+ measure_min, measure_max = Measurement.get(console, options, self.renderable)
+ measurement = Measurement(measure_min + extra_width, measure_max + extra_width)
+ measurement = measurement.with_maximum(max_width)
+ return measurement
+
+
+if __name__ == "__main__": # pragma: no cover
+ from pip._vendor.rich import print
+
+ print(Padding("Hello, World", (2, 4), style="on blue"))
diff --git a/third_party/python/pip/pip/_vendor/rich/pager.py b/third_party/python/pip/pip/_vendor/rich/pager.py
new file mode 100644
index 0000000000..a3f7aa62af
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/pager.py
@@ -0,0 +1,34 @@
+from abc import ABC, abstractmethod
+from typing import Any
+
+
+class Pager(ABC):
+ """Base class for a pager."""
+
+ @abstractmethod
+ def show(self, content: str) -> None:
+ """Show content in pager.
+
+ Args:
+ content (str): Content to be displayed.
+ """
+
+
+class SystemPager(Pager):
+ """Uses the pager installed on the system."""
+
+ def _pager(self, content: str) -> Any: #  pragma: no cover
+ return __import__("pydoc").pager(content)
+
+ def show(self, content: str) -> None:
+ """Use the same pager used by pydoc."""
+ self._pager(content)
+
+
+if __name__ == "__main__": # pragma: no cover
+ from .__main__ import make_test_card
+ from .console import Console
+
+ console = Console()
+ with console.pager(styles=True):
+ console.print(make_test_card())
diff --git a/third_party/python/pip/pip/_vendor/rich/palette.py b/third_party/python/pip/pip/_vendor/rich/palette.py
new file mode 100644
index 0000000000..fa0c4dd403
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/palette.py
@@ -0,0 +1,100 @@
+from math import sqrt
+from functools import lru_cache
+from typing import Sequence, Tuple, TYPE_CHECKING
+
+from .color_triplet import ColorTriplet
+
+if TYPE_CHECKING:
+ from pip._vendor.rich.table import Table
+
+
+class Palette:
+ """A palette of available colors."""
+
+ def __init__(self, colors: Sequence[Tuple[int, int, int]]):
+ self._colors = colors
+
+ def __getitem__(self, number: int) -> ColorTriplet:
+ return ColorTriplet(*self._colors[number])
+
+ def __rich__(self) -> "Table":
+ from pip._vendor.rich.color import Color
+ from pip._vendor.rich.style import Style
+ from pip._vendor.rich.text import Text
+ from pip._vendor.rich.table import Table
+
+ table = Table(
+ "index",
+ "RGB",
+ "Color",
+ title="Palette",
+ caption=f"{len(self._colors)} colors",
+ highlight=True,
+ caption_justify="right",
+ )
+ for index, color in enumerate(self._colors):
+ table.add_row(
+ str(index),
+ repr(color),
+ Text(" " * 16, style=Style(bgcolor=Color.from_rgb(*color))),
+ )
+ return table
+
+ # This is somewhat inefficient and needs caching
+ @lru_cache(maxsize=1024)
+ def match(self, color: Tuple[int, int, int]) -> int:
+ """Find a color from a palette that most closely matches a given color.
+
+ Args:
+ color (Tuple[int, int, int]): RGB components in range 0 > 255.
+
+ Returns:
+ int: Index of closes matching color.
+ """
+ red1, green1, blue1 = color
+ _sqrt = sqrt
+ get_color = self._colors.__getitem__
+
+ def get_color_distance(index: int) -> float:
+ """Get the distance to a color."""
+ red2, green2, blue2 = get_color(index)
+ red_mean = (red1 + red2) // 2
+ red = red1 - red2
+ green = green1 - green2
+ blue = blue1 - blue2
+ return _sqrt(
+ (((512 + red_mean) * red * red) >> 8)
+ + 4 * green * green
+ + (((767 - red_mean) * blue * blue) >> 8)
+ )
+
+ min_index = min(range(len(self._colors)), key=get_color_distance)
+ return min_index
+
+
+if __name__ == "__main__": # pragma: no cover
+ import colorsys
+ from typing import Iterable
+ from pip._vendor.rich.color import Color
+ from pip._vendor.rich.console import Console, ConsoleOptions
+ from pip._vendor.rich.segment import Segment
+ from pip._vendor.rich.style import Style
+
+ class ColorBox:
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> Iterable[Segment]:
+ height = console.size.height - 3
+ for y in range(0, height):
+ for x in range(options.max_width):
+ h = x / options.max_width
+ l = y / (height + 1)
+ r1, g1, b1 = colorsys.hls_to_rgb(h, l, 1.0)
+ r2, g2, b2 = colorsys.hls_to_rgb(h, l + (1 / height / 2), 1.0)
+ bgcolor = Color.from_rgb(r1 * 255, g1 * 255, b1 * 255)
+ color = Color.from_rgb(r2 * 255, g2 * 255, b2 * 255)
+ yield Segment("▄", Style(color=color, bgcolor=bgcolor))
+ yield Segment.line()
+
+ console = Console()
+ console.print(ColorBox())
diff --git a/third_party/python/pip/pip/_vendor/rich/panel.py b/third_party/python/pip/pip/_vendor/rich/panel.py
new file mode 100644
index 0000000000..d522d80b51
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/panel.py
@@ -0,0 +1,308 @@
+from typing import TYPE_CHECKING, Optional
+
+from .align import AlignMethod
+from .box import ROUNDED, Box
+from .cells import cell_len
+from .jupyter import JupyterMixin
+from .measure import Measurement, measure_renderables
+from .padding import Padding, PaddingDimensions
+from .segment import Segment
+from .style import Style, StyleType
+from .text import Text, TextType
+
+if TYPE_CHECKING:
+ from .console import Console, ConsoleOptions, RenderableType, RenderResult
+
+
+class Panel(JupyterMixin):
+ """A console renderable that draws a border around its contents.
+
+ Example:
+ >>> console.print(Panel("Hello, World!"))
+
+ Args:
+ renderable (RenderableType): A console renderable object.
+ box (Box, optional): A Box instance that defines the look of the border (see :ref:`appendix_box`.
+ Defaults to box.ROUNDED.
+ safe_box (bool, optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True.
+ expand (bool, optional): If True the panel will stretch to fill the console
+ width, otherwise it will be sized to fit the contents. Defaults to True.
+ style (str, optional): The style of the panel (border and contents). Defaults to "none".
+ border_style (str, optional): The style of the border. Defaults to "none".
+ width (Optional[int], optional): Optional width of panel. Defaults to None to auto-detect.
+ height (Optional[int], optional): Optional height of panel. Defaults to None to auto-detect.
+ padding (Optional[PaddingDimensions]): Optional padding around renderable. Defaults to 0.
+ highlight (bool, optional): Enable automatic highlighting of panel title (if str). Defaults to False.
+ """
+
+ def __init__(
+ self,
+ renderable: "RenderableType",
+ box: Box = ROUNDED,
+ *,
+ title: Optional[TextType] = None,
+ title_align: AlignMethod = "center",
+ subtitle: Optional[TextType] = None,
+ subtitle_align: AlignMethod = "center",
+ safe_box: Optional[bool] = None,
+ expand: bool = True,
+ style: StyleType = "none",
+ border_style: StyleType = "none",
+ width: Optional[int] = None,
+ height: Optional[int] = None,
+ padding: PaddingDimensions = (0, 1),
+ highlight: bool = False,
+ ) -> None:
+ self.renderable = renderable
+ self.box = box
+ self.title = title
+ self.title_align: AlignMethod = title_align
+ self.subtitle = subtitle
+ self.subtitle_align = subtitle_align
+ self.safe_box = safe_box
+ self.expand = expand
+ self.style = style
+ self.border_style = border_style
+ self.width = width
+ self.height = height
+ self.padding = padding
+ self.highlight = highlight
+
+ @classmethod
+ def fit(
+ cls,
+ renderable: "RenderableType",
+ box: Box = ROUNDED,
+ *,
+ title: Optional[TextType] = None,
+ title_align: AlignMethod = "center",
+ subtitle: Optional[TextType] = None,
+ subtitle_align: AlignMethod = "center",
+ safe_box: Optional[bool] = None,
+ style: StyleType = "none",
+ border_style: StyleType = "none",
+ width: Optional[int] = None,
+ padding: PaddingDimensions = (0, 1),
+ ) -> "Panel":
+ """An alternative constructor that sets expand=False."""
+ return cls(
+ renderable,
+ box,
+ title=title,
+ title_align=title_align,
+ subtitle=subtitle,
+ subtitle_align=subtitle_align,
+ safe_box=safe_box,
+ style=style,
+ border_style=border_style,
+ width=width,
+ padding=padding,
+ expand=False,
+ )
+
+ @property
+ def _title(self) -> Optional[Text]:
+ if self.title:
+ title_text = (
+ Text.from_markup(self.title)
+ if isinstance(self.title, str)
+ else self.title.copy()
+ )
+ title_text.end = ""
+ title_text.plain = title_text.plain.replace("\n", " ")
+ title_text.no_wrap = True
+ title_text.expand_tabs()
+ title_text.pad(1)
+ return title_text
+ return None
+
+ @property
+ def _subtitle(self) -> Optional[Text]:
+ if self.subtitle:
+ subtitle_text = (
+ Text.from_markup(self.subtitle)
+ if isinstance(self.subtitle, str)
+ else self.subtitle.copy()
+ )
+ subtitle_text.end = ""
+ subtitle_text.plain = subtitle_text.plain.replace("\n", " ")
+ subtitle_text.no_wrap = True
+ subtitle_text.expand_tabs()
+ subtitle_text.pad(1)
+ return subtitle_text
+ return None
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ _padding = Padding.unpack(self.padding)
+ renderable = (
+ Padding(self.renderable, _padding) if any(_padding) else self.renderable
+ )
+ style = console.get_style(self.style)
+ border_style = style + console.get_style(self.border_style)
+ width = (
+ options.max_width
+ if self.width is None
+ else min(options.max_width, self.width)
+ )
+
+ safe_box: bool = console.safe_box if self.safe_box is None else self.safe_box
+ box = self.box.substitute(options, safe=safe_box)
+
+ def align_text(
+ text: Text, width: int, align: str, character: str, style: Style
+ ) -> Text:
+ """Gets new aligned text.
+
+ Args:
+ text (Text): Title or subtitle text.
+ width (int): Desired width.
+ align (str): Alignment.
+ character (str): Character for alignment.
+ style (Style): Border style
+
+ Returns:
+ Text: New text instance
+ """
+ text = text.copy()
+ text.truncate(width)
+ excess_space = width - cell_len(text.plain)
+ if excess_space:
+ if align == "left":
+ return Text.assemble(
+ text,
+ (character * excess_space, style),
+ no_wrap=True,
+ end="",
+ )
+ elif align == "center":
+ left = excess_space // 2
+ return Text.assemble(
+ (character * left, style),
+ text,
+ (character * (excess_space - left), style),
+ no_wrap=True,
+ end="",
+ )
+ else:
+ return Text.assemble(
+ (character * excess_space, style),
+ text,
+ no_wrap=True,
+ end="",
+ )
+ return text
+
+ title_text = self._title
+ if title_text is not None:
+ title_text.stylize_before(border_style)
+
+ child_width = (
+ width - 2
+ if self.expand
+ else console.measure(
+ renderable, options=options.update_width(width - 2)
+ ).maximum
+ )
+ child_height = self.height or options.height or None
+ if child_height:
+ child_height -= 2
+ if title_text is not None:
+ child_width = min(
+ options.max_width - 2, max(child_width, title_text.cell_len + 2)
+ )
+
+ width = child_width + 2
+ child_options = options.update(
+ width=child_width, height=child_height, highlight=self.highlight
+ )
+ lines = console.render_lines(renderable, child_options, style=style)
+
+ line_start = Segment(box.mid_left, border_style)
+ line_end = Segment(f"{box.mid_right}", border_style)
+ new_line = Segment.line()
+ if title_text is None or width <= 4:
+ yield Segment(box.get_top([width - 2]), border_style)
+ else:
+ title_text = align_text(
+ title_text,
+ width - 4,
+ self.title_align,
+ box.top,
+ border_style,
+ )
+ yield Segment(box.top_left + box.top, border_style)
+ yield from console.render(title_text, child_options.update_width(width - 4))
+ yield Segment(box.top + box.top_right, border_style)
+
+ yield new_line
+ for line in lines:
+ yield line_start
+ yield from line
+ yield line_end
+ yield new_line
+
+ subtitle_text = self._subtitle
+ if subtitle_text is not None:
+ subtitle_text.stylize_before(border_style)
+
+ if subtitle_text is None or width <= 4:
+ yield Segment(box.get_bottom([width - 2]), border_style)
+ else:
+ subtitle_text = align_text(
+ subtitle_text,
+ width - 4,
+ self.subtitle_align,
+ box.bottom,
+ border_style,
+ )
+ yield Segment(box.bottom_left + box.bottom, border_style)
+ yield from console.render(
+ subtitle_text, child_options.update_width(width - 4)
+ )
+ yield Segment(box.bottom + box.bottom_right, border_style)
+
+ yield new_line
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "Measurement":
+ _title = self._title
+ _, right, _, left = Padding.unpack(self.padding)
+ padding = left + right
+ renderables = [self.renderable, _title] if _title else [self.renderable]
+
+ if self.width is None:
+ width = (
+ measure_renderables(
+ console,
+ options.update_width(options.max_width - padding - 2),
+ renderables,
+ ).maximum
+ + padding
+ + 2
+ )
+ else:
+ width = self.width
+ return Measurement(width, width)
+
+
+if __name__ == "__main__": # pragma: no cover
+ from .console import Console
+
+ c = Console()
+
+ from .box import DOUBLE, ROUNDED
+ from .padding import Padding
+
+ p = Panel(
+ "Hello, World!",
+ title="rich.Panel",
+ style="white on blue",
+ box=DOUBLE,
+ padding=1,
+ )
+
+ c.print()
+ c.print(p)
diff --git a/third_party/python/pip/pip/_vendor/rich/pretty.py b/third_party/python/pip/pip/_vendor/rich/pretty.py
new file mode 100644
index 0000000000..847b558c9c
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/pretty.py
@@ -0,0 +1,1029 @@
+import builtins
+import collections
+import dataclasses
+import inspect
+import os
+import sys
+from array import array
+from collections import Counter, UserDict, UserList, defaultdict, deque
+from dataclasses import dataclass, fields, is_dataclass
+from inspect import isclass
+from itertools import islice
+from types import MappingProxyType
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ DefaultDict,
+ Dict,
+ Iterable,
+ List,
+ Optional,
+ Sequence,
+ Set,
+ Tuple,
+ Union,
+)
+
+from pip._vendor.rich.repr import RichReprResult
+
+try:
+ import attr as _attr_module
+
+ _has_attrs = True
+except ImportError: # pragma: no cover
+ _has_attrs = False
+
+from . import get_console
+from ._loop import loop_last
+from ._pick import pick_bool
+from .abc import RichRenderable
+from .cells import cell_len
+from .highlighter import ReprHighlighter
+from .jupyter import JupyterMixin, JupyterRenderable
+from .measure import Measurement
+from .text import Text
+
+if TYPE_CHECKING:
+ from .console import (
+ Console,
+ ConsoleOptions,
+ HighlighterType,
+ JustifyMethod,
+ OverflowMethod,
+ RenderResult,
+ )
+
+
+JUPYTER_CLASSES_TO_NOT_RENDER = {
+ # Matplotlib "Artists" manage their own rendering in a Jupyter notebook, and we should not try to render them too.
+ # "Typically, all [Matplotlib] visible elements in a figure are subclasses of Artist."
+ "matplotlib.artist.Artist",
+}
+
+
+def _is_attr_object(obj: Any) -> bool:
+ """Check if an object was created with attrs module."""
+ return _has_attrs and _attr_module.has(type(obj))
+
+
+def _get_attr_fields(obj: Any) -> Sequence["_attr_module.Attribute[Any]"]:
+ """Get fields for an attrs object."""
+ return _attr_module.fields(type(obj)) if _has_attrs else []
+
+
+def _is_dataclass_repr(obj: object) -> bool:
+ """Check if an instance of a dataclass contains the default repr.
+
+ Args:
+ obj (object): A dataclass instance.
+
+ Returns:
+ bool: True if the default repr is used, False if there is a custom repr.
+ """
+ # Digging in to a lot of internals here
+ # Catching all exceptions in case something is missing on a non CPython implementation
+ try:
+ return obj.__repr__.__code__.co_filename == dataclasses.__file__
+ except Exception: # pragma: no coverage
+ return False
+
+
+_dummy_namedtuple = collections.namedtuple("_dummy_namedtuple", [])
+
+
+def _has_default_namedtuple_repr(obj: object) -> bool:
+ """Check if an instance of namedtuple contains the default repr
+
+ Args:
+ obj (object): A namedtuple
+
+ Returns:
+ bool: True if the default repr is used, False if there's a custom repr.
+ """
+ obj_file = None
+ try:
+ obj_file = inspect.getfile(obj.__repr__)
+ except (OSError, TypeError):
+ # OSError handles case where object is defined in __main__ scope, e.g. REPL - no filename available.
+ # TypeError trapped defensively, in case of object without filename slips through.
+ pass
+ default_repr_file = inspect.getfile(_dummy_namedtuple.__repr__)
+ return obj_file == default_repr_file
+
+
+def _ipy_display_hook(
+ value: Any,
+ console: Optional["Console"] = None,
+ overflow: "OverflowMethod" = "ignore",
+ crop: bool = False,
+ indent_guides: bool = False,
+ max_length: Optional[int] = None,
+ max_string: Optional[int] = None,
+ max_depth: Optional[int] = None,
+ expand_all: bool = False,
+) -> None:
+ # needed here to prevent circular import:
+ from ._inspect import is_object_one_of_types
+ from .console import ConsoleRenderable
+
+ # always skip rich generated jupyter renderables or None values
+ if _safe_isinstance(value, JupyterRenderable) or value is None:
+ return
+
+ console = console or get_console()
+ if console.is_jupyter:
+ # Delegate rendering to IPython if the object (and IPython) supports it
+ # https://ipython.readthedocs.io/en/stable/config/integrating.html#rich-display
+ ipython_repr_methods = [
+ "_repr_html_",
+ "_repr_markdown_",
+ "_repr_json_",
+ "_repr_latex_",
+ "_repr_jpeg_",
+ "_repr_png_",
+ "_repr_svg_",
+ "_repr_mimebundle_",
+ ]
+ for repr_method in ipython_repr_methods:
+ method = getattr(value, repr_method, None)
+ if inspect.ismethod(method):
+ # Calling the method ourselves isn't ideal. The interface for the `_repr_*_` methods
+ # specifies that if they return None, then they should not be rendered
+ # by the notebook.
+ try:
+ repr_result = method()
+ except Exception:
+ continue # If the method raises, treat it as if it doesn't exist, try any others
+ if repr_result is not None:
+ return # Delegate rendering to IPython
+
+ # When in a Jupyter notebook let's avoid the display of some specific classes,
+ # as they result in the rendering of useless and noisy lines such as `<Figure size 432x288 with 1 Axes>`.
+ # What does this do?
+ # --> if the class has "matplotlib.artist.Artist" in its hierarchy for example, we don't render it.
+ if is_object_one_of_types(value, JUPYTER_CLASSES_TO_NOT_RENDER):
+ return
+
+ # certain renderables should start on a new line
+ if _safe_isinstance(value, ConsoleRenderable):
+ console.line()
+
+ console.print(
+ value
+ if _safe_isinstance(value, RichRenderable)
+ else Pretty(
+ value,
+ overflow=overflow,
+ indent_guides=indent_guides,
+ max_length=max_length,
+ max_string=max_string,
+ max_depth=max_depth,
+ expand_all=expand_all,
+ margin=12,
+ ),
+ crop=crop,
+ new_line_start=True,
+ )
+
+
+def _safe_isinstance(
+ obj: object, class_or_tuple: Union[type, Tuple[type, ...]]
+) -> bool:
+ """isinstance can fail in rare cases, for example types with no __class__"""
+ try:
+ return isinstance(obj, class_or_tuple)
+ except Exception:
+ return False
+
+
+def install(
+ console: Optional["Console"] = None,
+ overflow: "OverflowMethod" = "ignore",
+ crop: bool = False,
+ indent_guides: bool = False,
+ max_length: Optional[int] = None,
+ max_string: Optional[int] = None,
+ max_depth: Optional[int] = None,
+ expand_all: bool = False,
+) -> None:
+ """Install automatic pretty printing in the Python REPL.
+
+ Args:
+ console (Console, optional): Console instance or ``None`` to use global console. Defaults to None.
+ overflow (Optional[OverflowMethod], optional): Overflow method. Defaults to "ignore".
+ crop (Optional[bool], optional): Enable cropping of long lines. Defaults to False.
+ indent_guides (bool, optional): Enable indentation guides. Defaults to False.
+ max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to None.
+ max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
+ max_depth (int, optional): Maximum depth of nested data structures, or None for no maximum. Defaults to None.
+ expand_all (bool, optional): Expand all containers. Defaults to False.
+ max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
+ """
+ from pip._vendor.rich import get_console
+
+ console = console or get_console()
+ assert console is not None
+
+ def display_hook(value: Any) -> None:
+ """Replacement sys.displayhook which prettifies objects with Rich."""
+ if value is not None:
+ assert console is not None
+ builtins._ = None # type: ignore[attr-defined]
+ console.print(
+ value
+ if _safe_isinstance(value, RichRenderable)
+ else Pretty(
+ value,
+ overflow=overflow,
+ indent_guides=indent_guides,
+ max_length=max_length,
+ max_string=max_string,
+ max_depth=max_depth,
+ expand_all=expand_all,
+ ),
+ crop=crop,
+ )
+ builtins._ = value # type: ignore[attr-defined]
+
+ try: # pragma: no cover
+ ip = get_ipython() # type: ignore[name-defined]
+ from IPython.core.formatters import BaseFormatter
+
+ class RichFormatter(BaseFormatter): # type: ignore[misc]
+ pprint: bool = True
+
+ def __call__(self, value: Any) -> Any:
+ if self.pprint:
+ return _ipy_display_hook(
+ value,
+ console=get_console(),
+ overflow=overflow,
+ indent_guides=indent_guides,
+ max_length=max_length,
+ max_string=max_string,
+ max_depth=max_depth,
+ expand_all=expand_all,
+ )
+ else:
+ return repr(value)
+
+ # replace plain text formatter with rich formatter
+ rich_formatter = RichFormatter()
+ ip.display_formatter.formatters["text/plain"] = rich_formatter
+ except Exception:
+ sys.displayhook = display_hook
+
+
+class Pretty(JupyterMixin):
+ """A rich renderable that pretty prints an object.
+
+ Args:
+ _object (Any): An object to pretty print.
+ highlighter (HighlighterType, optional): Highlighter object to apply to result, or None for ReprHighlighter. Defaults to None.
+ indent_size (int, optional): Number of spaces in indent. Defaults to 4.
+ justify (JustifyMethod, optional): Justify method, or None for default. Defaults to None.
+ overflow (OverflowMethod, optional): Overflow method, or None for default. Defaults to None.
+ no_wrap (Optional[bool], optional): Disable word wrapping. Defaults to False.
+ indent_guides (bool, optional): Enable indentation guides. Defaults to False.
+ max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to None.
+ max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
+ max_depth (int, optional): Maximum depth of nested data structures, or None for no maximum. Defaults to None.
+ expand_all (bool, optional): Expand all containers. Defaults to False.
+ margin (int, optional): Subtrace a margin from width to force containers to expand earlier. Defaults to 0.
+ insert_line (bool, optional): Insert a new line if the output has multiple new lines. Defaults to False.
+ """
+
+ def __init__(
+ self,
+ _object: Any,
+ highlighter: Optional["HighlighterType"] = None,
+ *,
+ indent_size: int = 4,
+ justify: Optional["JustifyMethod"] = None,
+ overflow: Optional["OverflowMethod"] = None,
+ no_wrap: Optional[bool] = False,
+ indent_guides: bool = False,
+ max_length: Optional[int] = None,
+ max_string: Optional[int] = None,
+ max_depth: Optional[int] = None,
+ expand_all: bool = False,
+ margin: int = 0,
+ insert_line: bool = False,
+ ) -> None:
+ self._object = _object
+ self.highlighter = highlighter or ReprHighlighter()
+ self.indent_size = indent_size
+ self.justify: Optional["JustifyMethod"] = justify
+ self.overflow: Optional["OverflowMethod"] = overflow
+ self.no_wrap = no_wrap
+ self.indent_guides = indent_guides
+ self.max_length = max_length
+ self.max_string = max_string
+ self.max_depth = max_depth
+ self.expand_all = expand_all
+ self.margin = margin
+ self.insert_line = insert_line
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ pretty_str = pretty_repr(
+ self._object,
+ max_width=options.max_width - self.margin,
+ indent_size=self.indent_size,
+ max_length=self.max_length,
+ max_string=self.max_string,
+ max_depth=self.max_depth,
+ expand_all=self.expand_all,
+ )
+ pretty_text = Text.from_ansi(
+ pretty_str,
+ justify=self.justify or options.justify,
+ overflow=self.overflow or options.overflow,
+ no_wrap=pick_bool(self.no_wrap, options.no_wrap),
+ style="pretty",
+ )
+ pretty_text = (
+ self.highlighter(pretty_text)
+ if pretty_text
+ else Text(
+ f"{type(self._object)}.__repr__ returned empty string",
+ style="dim italic",
+ )
+ )
+ if self.indent_guides and not options.ascii_only:
+ pretty_text = pretty_text.with_indent_guides(
+ self.indent_size, style="repr.indent"
+ )
+ if self.insert_line and "\n" in pretty_text:
+ yield ""
+ yield pretty_text
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "Measurement":
+ pretty_str = pretty_repr(
+ self._object,
+ max_width=options.max_width,
+ indent_size=self.indent_size,
+ max_length=self.max_length,
+ max_string=self.max_string,
+ expand_all=self.expand_all,
+ )
+ text_width = (
+ max(cell_len(line) for line in pretty_str.splitlines()) if pretty_str else 0
+ )
+ return Measurement(text_width, text_width)
+
+
+def _get_braces_for_defaultdict(_object: DefaultDict[Any, Any]) -> Tuple[str, str, str]:
+ return (
+ f"defaultdict({_object.default_factory!r}, {{",
+ "})",
+ f"defaultdict({_object.default_factory!r}, {{}})",
+ )
+
+
+def _get_braces_for_array(_object: "array[Any]") -> Tuple[str, str, str]:
+ return (f"array({_object.typecode!r}, [", "])", f"array({_object.typecode!r})")
+
+
+_BRACES: Dict[type, Callable[[Any], Tuple[str, str, str]]] = {
+ os._Environ: lambda _object: ("environ({", "})", "environ({})"),
+ array: _get_braces_for_array,
+ defaultdict: _get_braces_for_defaultdict,
+ Counter: lambda _object: ("Counter({", "})", "Counter()"),
+ deque: lambda _object: ("deque([", "])", "deque()"),
+ dict: lambda _object: ("{", "}", "{}"),
+ UserDict: lambda _object: ("{", "}", "{}"),
+ frozenset: lambda _object: ("frozenset({", "})", "frozenset()"),
+ list: lambda _object: ("[", "]", "[]"),
+ UserList: lambda _object: ("[", "]", "[]"),
+ set: lambda _object: ("{", "}", "set()"),
+ tuple: lambda _object: ("(", ")", "()"),
+ MappingProxyType: lambda _object: ("mappingproxy({", "})", "mappingproxy({})"),
+}
+_CONTAINERS = tuple(_BRACES.keys())
+_MAPPING_CONTAINERS = (dict, os._Environ, MappingProxyType, UserDict)
+
+
+def is_expandable(obj: Any) -> bool:
+ """Check if an object may be expanded by pretty print."""
+ return (
+ _safe_isinstance(obj, _CONTAINERS)
+ or (is_dataclass(obj))
+ or (hasattr(obj, "__rich_repr__"))
+ or _is_attr_object(obj)
+ ) and not isclass(obj)
+
+
+@dataclass
+class Node:
+ """A node in a repr tree. May be atomic or a container."""
+
+ key_repr: str = ""
+ value_repr: str = ""
+ open_brace: str = ""
+ close_brace: str = ""
+ empty: str = ""
+ last: bool = False
+ is_tuple: bool = False
+ is_namedtuple: bool = False
+ children: Optional[List["Node"]] = None
+ key_separator = ": "
+ separator: str = ", "
+
+ def iter_tokens(self) -> Iterable[str]:
+ """Generate tokens for this node."""
+ if self.key_repr:
+ yield self.key_repr
+ yield self.key_separator
+ if self.value_repr:
+ yield self.value_repr
+ elif self.children is not None:
+ if self.children:
+ yield self.open_brace
+ if self.is_tuple and not self.is_namedtuple and len(self.children) == 1:
+ yield from self.children[0].iter_tokens()
+ yield ","
+ else:
+ for child in self.children:
+ yield from child.iter_tokens()
+ if not child.last:
+ yield self.separator
+ yield self.close_brace
+ else:
+ yield self.empty
+
+ def check_length(self, start_length: int, max_length: int) -> bool:
+ """Check the length fits within a limit.
+
+ Args:
+ start_length (int): Starting length of the line (indent, prefix, suffix).
+ max_length (int): Maximum length.
+
+ Returns:
+ bool: True if the node can be rendered within max length, otherwise False.
+ """
+ total_length = start_length
+ for token in self.iter_tokens():
+ total_length += cell_len(token)
+ if total_length > max_length:
+ return False
+ return True
+
+ def __str__(self) -> str:
+ repr_text = "".join(self.iter_tokens())
+ return repr_text
+
+ def render(
+ self, max_width: int = 80, indent_size: int = 4, expand_all: bool = False
+ ) -> str:
+ """Render the node to a pretty repr.
+
+ Args:
+ max_width (int, optional): Maximum width of the repr. Defaults to 80.
+ indent_size (int, optional): Size of indents. Defaults to 4.
+ expand_all (bool, optional): Expand all levels. Defaults to False.
+
+ Returns:
+ str: A repr string of the original object.
+ """
+ lines = [_Line(node=self, is_root=True)]
+ line_no = 0
+ while line_no < len(lines):
+ line = lines[line_no]
+ if line.expandable and not line.expanded:
+ if expand_all or not line.check_length(max_width):
+ lines[line_no : line_no + 1] = line.expand(indent_size)
+ line_no += 1
+
+ repr_str = "\n".join(str(line) for line in lines)
+ return repr_str
+
+
+@dataclass
+class _Line:
+ """A line in repr output."""
+
+ parent: Optional["_Line"] = None
+ is_root: bool = False
+ node: Optional[Node] = None
+ text: str = ""
+ suffix: str = ""
+ whitespace: str = ""
+ expanded: bool = False
+ last: bool = False
+
+ @property
+ def expandable(self) -> bool:
+ """Check if the line may be expanded."""
+ return bool(self.node is not None and self.node.children)
+
+ def check_length(self, max_length: int) -> bool:
+ """Check this line fits within a given number of cells."""
+ start_length = (
+ len(self.whitespace) + cell_len(self.text) + cell_len(self.suffix)
+ )
+ assert self.node is not None
+ return self.node.check_length(start_length, max_length)
+
+ def expand(self, indent_size: int) -> Iterable["_Line"]:
+ """Expand this line by adding children on their own line."""
+ node = self.node
+ assert node is not None
+ whitespace = self.whitespace
+ assert node.children
+ if node.key_repr:
+ new_line = yield _Line(
+ text=f"{node.key_repr}{node.key_separator}{node.open_brace}",
+ whitespace=whitespace,
+ )
+ else:
+ new_line = yield _Line(text=node.open_brace, whitespace=whitespace)
+ child_whitespace = self.whitespace + " " * indent_size
+ tuple_of_one = node.is_tuple and len(node.children) == 1
+ for last, child in loop_last(node.children):
+ separator = "," if tuple_of_one else node.separator
+ line = _Line(
+ parent=new_line,
+ node=child,
+ whitespace=child_whitespace,
+ suffix=separator,
+ last=last and not tuple_of_one,
+ )
+ yield line
+
+ yield _Line(
+ text=node.close_brace,
+ whitespace=whitespace,
+ suffix=self.suffix,
+ last=self.last,
+ )
+
+ def __str__(self) -> str:
+ if self.last:
+ return f"{self.whitespace}{self.text}{self.node or ''}"
+ else:
+ return (
+ f"{self.whitespace}{self.text}{self.node or ''}{self.suffix.rstrip()}"
+ )
+
+
+def _is_namedtuple(obj: Any) -> bool:
+ """Checks if an object is most likely a namedtuple. It is possible
+ to craft an object that passes this check and isn't a namedtuple, but
+ there is only a minuscule chance of this happening unintentionally.
+
+ Args:
+ obj (Any): The object to test
+
+ Returns:
+ bool: True if the object is a namedtuple. False otherwise.
+ """
+ try:
+ fields = getattr(obj, "_fields", None)
+ except Exception:
+ # Being very defensive - if we cannot get the attr then its not a namedtuple
+ return False
+ return isinstance(obj, tuple) and isinstance(fields, tuple)
+
+
+def traverse(
+ _object: Any,
+ max_length: Optional[int] = None,
+ max_string: Optional[int] = None,
+ max_depth: Optional[int] = None,
+) -> Node:
+ """Traverse object and generate a tree.
+
+ Args:
+ _object (Any): Object to be traversed.
+ max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to None.
+ max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
+ Defaults to None.
+ max_depth (int, optional): Maximum depth of data structures, or None for no maximum.
+ Defaults to None.
+
+ Returns:
+ Node: The root of a tree structure which can be used to render a pretty repr.
+ """
+
+ def to_repr(obj: Any) -> str:
+ """Get repr string for an object, but catch errors."""
+ if (
+ max_string is not None
+ and _safe_isinstance(obj, (bytes, str))
+ and len(obj) > max_string
+ ):
+ truncated = len(obj) - max_string
+ obj_repr = f"{obj[:max_string]!r}+{truncated}"
+ else:
+ try:
+ obj_repr = repr(obj)
+ except Exception as error:
+ obj_repr = f"<repr-error {str(error)!r}>"
+ return obj_repr
+
+ visited_ids: Set[int] = set()
+ push_visited = visited_ids.add
+ pop_visited = visited_ids.remove
+
+ def _traverse(obj: Any, root: bool = False, depth: int = 0) -> Node:
+ """Walk the object depth first."""
+
+ obj_id = id(obj)
+ if obj_id in visited_ids:
+ # Recursion detected
+ return Node(value_repr="...")
+
+ obj_type = type(obj)
+ py_version = (sys.version_info.major, sys.version_info.minor)
+ children: List[Node]
+ reached_max_depth = max_depth is not None and depth >= max_depth
+
+ def iter_rich_args(rich_args: Any) -> Iterable[Union[Any, Tuple[str, Any]]]:
+ for arg in rich_args:
+ if _safe_isinstance(arg, tuple):
+ if len(arg) == 3:
+ key, child, default = arg
+ if default == child:
+ continue
+ yield key, child
+ elif len(arg) == 2:
+ key, child = arg
+ yield key, child
+ elif len(arg) == 1:
+ yield arg[0]
+ else:
+ yield arg
+
+ try:
+ fake_attributes = hasattr(
+ obj, "awehoi234_wdfjwljet234_234wdfoijsdfmmnxpi492"
+ )
+ except Exception:
+ fake_attributes = False
+
+ rich_repr_result: Optional[RichReprResult] = None
+ if not fake_attributes:
+ try:
+ if hasattr(obj, "__rich_repr__") and not isclass(obj):
+ rich_repr_result = obj.__rich_repr__()
+ except Exception:
+ pass
+
+ if rich_repr_result is not None:
+ push_visited(obj_id)
+ angular = getattr(obj.__rich_repr__, "angular", False)
+ args = list(iter_rich_args(rich_repr_result))
+ class_name = obj.__class__.__name__
+
+ if args:
+ children = []
+ append = children.append
+
+ if reached_max_depth:
+ if angular:
+ node = Node(value_repr=f"<{class_name}...>")
+ else:
+ node = Node(value_repr=f"{class_name}(...)")
+ else:
+ if angular:
+ node = Node(
+ open_brace=f"<{class_name} ",
+ close_brace=">",
+ children=children,
+ last=root,
+ separator=" ",
+ )
+ else:
+ node = Node(
+ open_brace=f"{class_name}(",
+ close_brace=")",
+ children=children,
+ last=root,
+ )
+ for last, arg in loop_last(args):
+ if _safe_isinstance(arg, tuple):
+ key, child = arg
+ child_node = _traverse(child, depth=depth + 1)
+ child_node.last = last
+ child_node.key_repr = key
+ child_node.key_separator = "="
+ append(child_node)
+ else:
+ child_node = _traverse(arg, depth=depth + 1)
+ child_node.last = last
+ append(child_node)
+ else:
+ node = Node(
+ value_repr=f"<{class_name}>" if angular else f"{class_name}()",
+ children=[],
+ last=root,
+ )
+ pop_visited(obj_id)
+ elif _is_attr_object(obj) and not fake_attributes:
+ push_visited(obj_id)
+ children = []
+ append = children.append
+
+ attr_fields = _get_attr_fields(obj)
+ if attr_fields:
+ if reached_max_depth:
+ node = Node(value_repr=f"{obj.__class__.__name__}(...)")
+ else:
+ node = Node(
+ open_brace=f"{obj.__class__.__name__}(",
+ close_brace=")",
+ children=children,
+ last=root,
+ )
+
+ def iter_attrs() -> Iterable[
+ Tuple[str, Any, Optional[Callable[[Any], str]]]
+ ]:
+ """Iterate over attr fields and values."""
+ for attr in attr_fields:
+ if attr.repr:
+ try:
+ value = getattr(obj, attr.name)
+ except Exception as error:
+ # Can happen, albeit rarely
+ yield (attr.name, error, None)
+ else:
+ yield (
+ attr.name,
+ value,
+ attr.repr if callable(attr.repr) else None,
+ )
+
+ for last, (name, value, repr_callable) in loop_last(iter_attrs()):
+ if repr_callable:
+ child_node = Node(value_repr=str(repr_callable(value)))
+ else:
+ child_node = _traverse(value, depth=depth + 1)
+ child_node.last = last
+ child_node.key_repr = name
+ child_node.key_separator = "="
+ append(child_node)
+ else:
+ node = Node(
+ value_repr=f"{obj.__class__.__name__}()", children=[], last=root
+ )
+ pop_visited(obj_id)
+ elif (
+ is_dataclass(obj)
+ and not _safe_isinstance(obj, type)
+ and not fake_attributes
+ and (_is_dataclass_repr(obj) or py_version == (3, 6))
+ ):
+ push_visited(obj_id)
+ children = []
+ append = children.append
+ if reached_max_depth:
+ node = Node(value_repr=f"{obj.__class__.__name__}(...)")
+ else:
+ node = Node(
+ open_brace=f"{obj.__class__.__name__}(",
+ close_brace=")",
+ children=children,
+ last=root,
+ )
+
+ for last, field in loop_last(
+ field for field in fields(obj) if field.repr
+ ):
+ child_node = _traverse(getattr(obj, field.name), depth=depth + 1)
+ child_node.key_repr = field.name
+ child_node.last = last
+ child_node.key_separator = "="
+ append(child_node)
+
+ pop_visited(obj_id)
+ elif _is_namedtuple(obj) and _has_default_namedtuple_repr(obj):
+ push_visited(obj_id)
+ class_name = obj.__class__.__name__
+ if reached_max_depth:
+ # If we've reached the max depth, we still show the class name, but not its contents
+ node = Node(
+ value_repr=f"{class_name}(...)",
+ )
+ else:
+ children = []
+ append = children.append
+ node = Node(
+ open_brace=f"{class_name}(",
+ close_brace=")",
+ children=children,
+ empty=f"{class_name}()",
+ )
+ for last, (key, value) in loop_last(obj._asdict().items()):
+ child_node = _traverse(value, depth=depth + 1)
+ child_node.key_repr = key
+ child_node.last = last
+ child_node.key_separator = "="
+ append(child_node)
+ pop_visited(obj_id)
+ elif _safe_isinstance(obj, _CONTAINERS):
+ for container_type in _CONTAINERS:
+ if _safe_isinstance(obj, container_type):
+ obj_type = container_type
+ break
+
+ push_visited(obj_id)
+
+ open_brace, close_brace, empty = _BRACES[obj_type](obj)
+
+ if reached_max_depth:
+ node = Node(value_repr=f"{open_brace}...{close_brace}")
+ elif obj_type.__repr__ != type(obj).__repr__:
+ node = Node(value_repr=to_repr(obj), last=root)
+ elif obj:
+ children = []
+ node = Node(
+ open_brace=open_brace,
+ close_brace=close_brace,
+ children=children,
+ last=root,
+ )
+ append = children.append
+ num_items = len(obj)
+ last_item_index = num_items - 1
+
+ if _safe_isinstance(obj, _MAPPING_CONTAINERS):
+ iter_items = iter(obj.items())
+ if max_length is not None:
+ iter_items = islice(iter_items, max_length)
+ for index, (key, child) in enumerate(iter_items):
+ child_node = _traverse(child, depth=depth + 1)
+ child_node.key_repr = to_repr(key)
+ child_node.last = index == last_item_index
+ append(child_node)
+ else:
+ iter_values = iter(obj)
+ if max_length is not None:
+ iter_values = islice(iter_values, max_length)
+ for index, child in enumerate(iter_values):
+ child_node = _traverse(child, depth=depth + 1)
+ child_node.last = index == last_item_index
+ append(child_node)
+ if max_length is not None and num_items > max_length:
+ append(Node(value_repr=f"... +{num_items - max_length}", last=True))
+ else:
+ node = Node(empty=empty, children=[], last=root)
+
+ pop_visited(obj_id)
+ else:
+ node = Node(value_repr=to_repr(obj), last=root)
+ node.is_tuple = _safe_isinstance(obj, tuple)
+ node.is_namedtuple = _is_namedtuple(obj)
+ return node
+
+ node = _traverse(_object, root=True)
+ return node
+
+
+def pretty_repr(
+ _object: Any,
+ *,
+ max_width: int = 80,
+ indent_size: int = 4,
+ max_length: Optional[int] = None,
+ max_string: Optional[int] = None,
+ max_depth: Optional[int] = None,
+ expand_all: bool = False,
+) -> str:
+ """Prettify repr string by expanding on to new lines to fit within a given width.
+
+ Args:
+ _object (Any): Object to repr.
+ max_width (int, optional): Desired maximum width of repr string. Defaults to 80.
+ indent_size (int, optional): Number of spaces to indent. Defaults to 4.
+ max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to None.
+ max_string (int, optional): Maximum length of string before truncating, or None to disable truncating.
+ Defaults to None.
+ max_depth (int, optional): Maximum depth of nested data structure, or None for no depth.
+ Defaults to None.
+ expand_all (bool, optional): Expand all containers regardless of available width. Defaults to False.
+
+ Returns:
+ str: A possibly multi-line representation of the object.
+ """
+
+ if _safe_isinstance(_object, Node):
+ node = _object
+ else:
+ node = traverse(
+ _object, max_length=max_length, max_string=max_string, max_depth=max_depth
+ )
+ repr_str: str = node.render(
+ max_width=max_width, indent_size=indent_size, expand_all=expand_all
+ )
+ return repr_str
+
+
+def pprint(
+ _object: Any,
+ *,
+ console: Optional["Console"] = None,
+ indent_guides: bool = True,
+ max_length: Optional[int] = None,
+ max_string: Optional[int] = None,
+ max_depth: Optional[int] = None,
+ expand_all: bool = False,
+) -> None:
+ """A convenience function for pretty printing.
+
+ Args:
+ _object (Any): Object to pretty print.
+ console (Console, optional): Console instance, or None to use default. Defaults to None.
+ max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to None.
+ max_string (int, optional): Maximum length of strings before truncating, or None to disable. Defaults to None.
+ max_depth (int, optional): Maximum depth for nested data structures, or None for unlimited depth. Defaults to None.
+ indent_guides (bool, optional): Enable indentation guides. Defaults to True.
+ expand_all (bool, optional): Expand all containers. Defaults to False.
+ """
+ _console = get_console() if console is None else console
+ _console.print(
+ Pretty(
+ _object,
+ max_length=max_length,
+ max_string=max_string,
+ max_depth=max_depth,
+ indent_guides=indent_guides,
+ expand_all=expand_all,
+ overflow="ignore",
+ ),
+ soft_wrap=True,
+ )
+
+
+if __name__ == "__main__": # pragma: no cover
+
+ class BrokenRepr:
+ def __repr__(self) -> str:
+ 1 / 0
+ return "this will fail"
+
+ from typing import NamedTuple
+
+ class StockKeepingUnit(NamedTuple):
+ name: str
+ description: str
+ price: float
+ category: str
+ reviews: List[str]
+
+ d = defaultdict(int)
+ d["foo"] = 5
+ data = {
+ "foo": [
+ 1,
+ "Hello World!",
+ 100.123,
+ 323.232,
+ 432324.0,
+ {5, 6, 7, (1, 2, 3, 4), 8},
+ ],
+ "bar": frozenset({1, 2, 3}),
+ "defaultdict": defaultdict(
+ list, {"crumble": ["apple", "rhubarb", "butter", "sugar", "flour"]}
+ ),
+ "counter": Counter(
+ [
+ "apple",
+ "orange",
+ "pear",
+ "kumquat",
+ "kumquat",
+ "durian" * 100,
+ ]
+ ),
+ "atomic": (False, True, None),
+ "namedtuple": StockKeepingUnit(
+ "Sparkling British Spring Water",
+ "Carbonated spring water",
+ 0.9,
+ "water",
+ ["its amazing!", "its terrible!"],
+ ),
+ "Broken": BrokenRepr(),
+ }
+ data["foo"].append(data) # type: ignore[attr-defined]
+
+ from pip._vendor.rich import print
+
+ # print(Pretty(data, indent_guides=True, max_string=20))
+
+ class Thing:
+ def __repr__(self) -> str:
+ return "Hello\x1b[38;5;239m World!"
+
+ print(Pretty(Thing()))
diff --git a/third_party/python/pip/pip/_vendor/rich/progress.py b/third_party/python/pip/pip/_vendor/rich/progress.py
new file mode 100644
index 0000000000..e7d163c137
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/progress.py
@@ -0,0 +1,1707 @@
+import io
+import sys
+import typing
+import warnings
+from abc import ABC, abstractmethod
+from collections import deque
+from collections.abc import Sized
+from dataclasses import dataclass, field
+from datetime import timedelta
+from io import RawIOBase, UnsupportedOperation
+from math import ceil
+from mmap import mmap
+from os import PathLike, stat
+from threading import Event, RLock, Thread
+from types import TracebackType
+from typing import (
+ Any,
+ BinaryIO,
+ Callable,
+ ContextManager,
+ Deque,
+ Dict,
+ Generic,
+ Iterable,
+ List,
+ NamedTuple,
+ NewType,
+ Optional,
+ Sequence,
+ TextIO,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+)
+
+if sys.version_info >= (3, 8):
+ from typing import Literal
+else:
+ from pip._vendor.typing_extensions import Literal # pragma: no cover
+
+from . import filesize, get_console
+from .console import Console, Group, JustifyMethod, RenderableType
+from .highlighter import Highlighter
+from .jupyter import JupyterMixin
+from .live import Live
+from .progress_bar import ProgressBar
+from .spinner import Spinner
+from .style import StyleType
+from .table import Column, Table
+from .text import Text, TextType
+
+TaskID = NewType("TaskID", int)
+
+ProgressType = TypeVar("ProgressType")
+
+GetTimeCallable = Callable[[], float]
+
+
+_I = typing.TypeVar("_I", TextIO, BinaryIO)
+
+
+class _TrackThread(Thread):
+ """A thread to periodically update progress."""
+
+ def __init__(self, progress: "Progress", task_id: "TaskID", update_period: float):
+ self.progress = progress
+ self.task_id = task_id
+ self.update_period = update_period
+ self.done = Event()
+
+ self.completed = 0
+ super().__init__()
+
+ def run(self) -> None:
+ task_id = self.task_id
+ advance = self.progress.advance
+ update_period = self.update_period
+ last_completed = 0
+ wait = self.done.wait
+ while not wait(update_period):
+ completed = self.completed
+ if last_completed != completed:
+ advance(task_id, completed - last_completed)
+ last_completed = completed
+
+ self.progress.update(self.task_id, completed=self.completed, refresh=True)
+
+ def __enter__(self) -> "_TrackThread":
+ self.start()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ self.done.set()
+ self.join()
+
+
+def track(
+ sequence: Union[Sequence[ProgressType], Iterable[ProgressType]],
+ description: str = "Working...",
+ total: Optional[float] = None,
+ auto_refresh: bool = True,
+ console: Optional[Console] = None,
+ transient: bool = False,
+ get_time: Optional[Callable[[], float]] = None,
+ refresh_per_second: float = 10,
+ style: StyleType = "bar.back",
+ complete_style: StyleType = "bar.complete",
+ finished_style: StyleType = "bar.finished",
+ pulse_style: StyleType = "bar.pulse",
+ update_period: float = 0.1,
+ disable: bool = False,
+ show_speed: bool = True,
+) -> Iterable[ProgressType]:
+ """Track progress by iterating over a sequence.
+
+ Args:
+ sequence (Iterable[ProgressType]): A sequence (must support "len") you wish to iterate over.
+ description (str, optional): Description of task show next to progress bar. Defaults to "Working".
+ total: (float, optional): Total number of steps. Default is len(sequence).
+ auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True.
+ transient: (bool, optional): Clear the progress on exit. Defaults to False.
+ console (Console, optional): Console to write to. Default creates internal Console instance.
+ refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10.
+ style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
+ complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
+ finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished".
+ pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
+ update_period (float, optional): Minimum time (in seconds) between calls to update(). Defaults to 0.1.
+ disable (bool, optional): Disable display of progress.
+ show_speed (bool, optional): Show speed if total isn't known. Defaults to True.
+ Returns:
+ Iterable[ProgressType]: An iterable of the values in the sequence.
+
+ """
+
+ columns: List["ProgressColumn"] = (
+ [TextColumn("[progress.description]{task.description}")] if description else []
+ )
+ columns.extend(
+ (
+ BarColumn(
+ style=style,
+ complete_style=complete_style,
+ finished_style=finished_style,
+ pulse_style=pulse_style,
+ ),
+ TaskProgressColumn(show_speed=show_speed),
+ TimeRemainingColumn(),
+ )
+ )
+ progress = Progress(
+ *columns,
+ auto_refresh=auto_refresh,
+ console=console,
+ transient=transient,
+ get_time=get_time,
+ refresh_per_second=refresh_per_second or 10,
+ disable=disable,
+ )
+
+ with progress:
+ yield from progress.track(
+ sequence, total=total, description=description, update_period=update_period
+ )
+
+
+class _Reader(RawIOBase, BinaryIO):
+ """A reader that tracks progress while it's being read from."""
+
+ def __init__(
+ self,
+ handle: BinaryIO,
+ progress: "Progress",
+ task: TaskID,
+ close_handle: bool = True,
+ ) -> None:
+ self.handle = handle
+ self.progress = progress
+ self.task = task
+ self.close_handle = close_handle
+ self._closed = False
+
+ def __enter__(self) -> "_Reader":
+ self.handle.__enter__()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ self.close()
+
+ def __iter__(self) -> BinaryIO:
+ return self
+
+ def __next__(self) -> bytes:
+ line = next(self.handle)
+ self.progress.advance(self.task, advance=len(line))
+ return line
+
+ @property
+ def closed(self) -> bool:
+ return self._closed
+
+ def fileno(self) -> int:
+ return self.handle.fileno()
+
+ def isatty(self) -> bool:
+ return self.handle.isatty()
+
+ @property
+ def mode(self) -> str:
+ return self.handle.mode
+
+ @property
+ def name(self) -> str:
+ return self.handle.name
+
+ def readable(self) -> bool:
+ return self.handle.readable()
+
+ def seekable(self) -> bool:
+ return self.handle.seekable()
+
+ def writable(self) -> bool:
+ return False
+
+ def read(self, size: int = -1) -> bytes:
+ block = self.handle.read(size)
+ self.progress.advance(self.task, advance=len(block))
+ return block
+
+ def readinto(self, b: Union[bytearray, memoryview, mmap]): # type: ignore[no-untyped-def, override]
+ n = self.handle.readinto(b) # type: ignore[attr-defined]
+ self.progress.advance(self.task, advance=n)
+ return n
+
+ def readline(self, size: int = -1) -> bytes: # type: ignore[override]
+ line = self.handle.readline(size)
+ self.progress.advance(self.task, advance=len(line))
+ return line
+
+ def readlines(self, hint: int = -1) -> List[bytes]:
+ lines = self.handle.readlines(hint)
+ self.progress.advance(self.task, advance=sum(map(len, lines)))
+ return lines
+
+ def close(self) -> None:
+ if self.close_handle:
+ self.handle.close()
+ self._closed = True
+
+ def seek(self, offset: int, whence: int = 0) -> int:
+ pos = self.handle.seek(offset, whence)
+ self.progress.update(self.task, completed=pos)
+ return pos
+
+ def tell(self) -> int:
+ return self.handle.tell()
+
+ def write(self, s: Any) -> int:
+ raise UnsupportedOperation("write")
+
+
+class _ReadContext(ContextManager[_I], Generic[_I]):
+ """A utility class to handle a context for both a reader and a progress."""
+
+ def __init__(self, progress: "Progress", reader: _I) -> None:
+ self.progress = progress
+ self.reader: _I = reader
+
+ def __enter__(self) -> _I:
+ self.progress.start()
+ return self.reader.__enter__()
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ self.progress.stop()
+ self.reader.__exit__(exc_type, exc_val, exc_tb)
+
+
+def wrap_file(
+ file: BinaryIO,
+ total: int,
+ *,
+ description: str = "Reading...",
+ auto_refresh: bool = True,
+ console: Optional[Console] = None,
+ transient: bool = False,
+ get_time: Optional[Callable[[], float]] = None,
+ refresh_per_second: float = 10,
+ style: StyleType = "bar.back",
+ complete_style: StyleType = "bar.complete",
+ finished_style: StyleType = "bar.finished",
+ pulse_style: StyleType = "bar.pulse",
+ disable: bool = False,
+) -> ContextManager[BinaryIO]:
+ """Read bytes from a file while tracking progress.
+
+ Args:
+ file (Union[str, PathLike[str], BinaryIO]): The path to the file to read, or a file-like object in binary mode.
+ total (int): Total number of bytes to read.
+ description (str, optional): Description of task show next to progress bar. Defaults to "Reading".
+ auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True.
+ transient: (bool, optional): Clear the progress on exit. Defaults to False.
+ console (Console, optional): Console to write to. Default creates internal Console instance.
+ refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10.
+ style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
+ complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
+ finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished".
+ pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
+ disable (bool, optional): Disable display of progress.
+ Returns:
+ ContextManager[BinaryIO]: A context manager yielding a progress reader.
+
+ """
+
+ columns: List["ProgressColumn"] = (
+ [TextColumn("[progress.description]{task.description}")] if description else []
+ )
+ columns.extend(
+ (
+ BarColumn(
+ style=style,
+ complete_style=complete_style,
+ finished_style=finished_style,
+ pulse_style=pulse_style,
+ ),
+ DownloadColumn(),
+ TimeRemainingColumn(),
+ )
+ )
+ progress = Progress(
+ *columns,
+ auto_refresh=auto_refresh,
+ console=console,
+ transient=transient,
+ get_time=get_time,
+ refresh_per_second=refresh_per_second or 10,
+ disable=disable,
+ )
+
+ reader = progress.wrap_file(file, total=total, description=description)
+ return _ReadContext(progress, reader)
+
+
+@typing.overload
+def open(
+ file: Union[str, "PathLike[str]", bytes],
+ mode: Union[Literal["rt"], Literal["r"]],
+ buffering: int = -1,
+ encoding: Optional[str] = None,
+ errors: Optional[str] = None,
+ newline: Optional[str] = None,
+ *,
+ total: Optional[int] = None,
+ description: str = "Reading...",
+ auto_refresh: bool = True,
+ console: Optional[Console] = None,
+ transient: bool = False,
+ get_time: Optional[Callable[[], float]] = None,
+ refresh_per_second: float = 10,
+ style: StyleType = "bar.back",
+ complete_style: StyleType = "bar.complete",
+ finished_style: StyleType = "bar.finished",
+ pulse_style: StyleType = "bar.pulse",
+ disable: bool = False,
+) -> ContextManager[TextIO]:
+ pass
+
+
+@typing.overload
+def open(
+ file: Union[str, "PathLike[str]", bytes],
+ mode: Literal["rb"],
+ buffering: int = -1,
+ encoding: Optional[str] = None,
+ errors: Optional[str] = None,
+ newline: Optional[str] = None,
+ *,
+ total: Optional[int] = None,
+ description: str = "Reading...",
+ auto_refresh: bool = True,
+ console: Optional[Console] = None,
+ transient: bool = False,
+ get_time: Optional[Callable[[], float]] = None,
+ refresh_per_second: float = 10,
+ style: StyleType = "bar.back",
+ complete_style: StyleType = "bar.complete",
+ finished_style: StyleType = "bar.finished",
+ pulse_style: StyleType = "bar.pulse",
+ disable: bool = False,
+) -> ContextManager[BinaryIO]:
+ pass
+
+
+def open(
+ file: Union[str, "PathLike[str]", bytes],
+ mode: Union[Literal["rb"], Literal["rt"], Literal["r"]] = "r",
+ buffering: int = -1,
+ encoding: Optional[str] = None,
+ errors: Optional[str] = None,
+ newline: Optional[str] = None,
+ *,
+ total: Optional[int] = None,
+ description: str = "Reading...",
+ auto_refresh: bool = True,
+ console: Optional[Console] = None,
+ transient: bool = False,
+ get_time: Optional[Callable[[], float]] = None,
+ refresh_per_second: float = 10,
+ style: StyleType = "bar.back",
+ complete_style: StyleType = "bar.complete",
+ finished_style: StyleType = "bar.finished",
+ pulse_style: StyleType = "bar.pulse",
+ disable: bool = False,
+) -> Union[ContextManager[BinaryIO], ContextManager[TextIO]]:
+ """Read bytes from a file while tracking progress.
+
+ Args:
+ path (Union[str, PathLike[str], BinaryIO]): The path to the file to read, or a file-like object in binary mode.
+ mode (str): The mode to use to open the file. Only supports "r", "rb" or "rt".
+ buffering (int): The buffering strategy to use, see :func:`io.open`.
+ encoding (str, optional): The encoding to use when reading in text mode, see :func:`io.open`.
+ errors (str, optional): The error handling strategy for decoding errors, see :func:`io.open`.
+ newline (str, optional): The strategy for handling newlines in text mode, see :func:`io.open`
+ total: (int, optional): Total number of bytes to read. Must be provided if reading from a file handle. Default for a path is os.stat(file).st_size.
+ description (str, optional): Description of task show next to progress bar. Defaults to "Reading".
+ auto_refresh (bool, optional): Automatic refresh, disable to force a refresh after each iteration. Default is True.
+ transient: (bool, optional): Clear the progress on exit. Defaults to False.
+ console (Console, optional): Console to write to. Default creates internal Console instance.
+ refresh_per_second (float): Number of times per second to refresh the progress information. Defaults to 10.
+ style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
+ complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
+ finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished".
+ pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
+ disable (bool, optional): Disable display of progress.
+ encoding (str, optional): The encoding to use when reading in text mode.
+
+ Returns:
+ ContextManager[BinaryIO]: A context manager yielding a progress reader.
+
+ """
+
+ columns: List["ProgressColumn"] = (
+ [TextColumn("[progress.description]{task.description}")] if description else []
+ )
+ columns.extend(
+ (
+ BarColumn(
+ style=style,
+ complete_style=complete_style,
+ finished_style=finished_style,
+ pulse_style=pulse_style,
+ ),
+ DownloadColumn(),
+ TimeRemainingColumn(),
+ )
+ )
+ progress = Progress(
+ *columns,
+ auto_refresh=auto_refresh,
+ console=console,
+ transient=transient,
+ get_time=get_time,
+ refresh_per_second=refresh_per_second or 10,
+ disable=disable,
+ )
+
+ reader = progress.open(
+ file,
+ mode=mode,
+ buffering=buffering,
+ encoding=encoding,
+ errors=errors,
+ newline=newline,
+ total=total,
+ description=description,
+ )
+ return _ReadContext(progress, reader) # type: ignore[return-value, type-var]
+
+
+class ProgressColumn(ABC):
+ """Base class for a widget to use in progress display."""
+
+ max_refresh: Optional[float] = None
+
+ def __init__(self, table_column: Optional[Column] = None) -> None:
+ self._table_column = table_column
+ self._renderable_cache: Dict[TaskID, Tuple[float, RenderableType]] = {}
+ self._update_time: Optional[float] = None
+
+ def get_table_column(self) -> Column:
+ """Get a table column, used to build tasks table."""
+ return self._table_column or Column()
+
+ def __call__(self, task: "Task") -> RenderableType:
+ """Called by the Progress object to return a renderable for the given task.
+
+ Args:
+ task (Task): An object containing information regarding the task.
+
+ Returns:
+ RenderableType: Anything renderable (including str).
+ """
+ current_time = task.get_time()
+ if self.max_refresh is not None and not task.completed:
+ try:
+ timestamp, renderable = self._renderable_cache[task.id]
+ except KeyError:
+ pass
+ else:
+ if timestamp + self.max_refresh > current_time:
+ return renderable
+
+ renderable = self.render(task)
+ self._renderable_cache[task.id] = (current_time, renderable)
+ return renderable
+
+ @abstractmethod
+ def render(self, task: "Task") -> RenderableType:
+ """Should return a renderable object."""
+
+
+class RenderableColumn(ProgressColumn):
+ """A column to insert an arbitrary column.
+
+ Args:
+ renderable (RenderableType, optional): Any renderable. Defaults to empty string.
+ """
+
+ def __init__(
+ self, renderable: RenderableType = "", *, table_column: Optional[Column] = None
+ ):
+ self.renderable = renderable
+ super().__init__(table_column=table_column)
+
+ def render(self, task: "Task") -> RenderableType:
+ return self.renderable
+
+
+class SpinnerColumn(ProgressColumn):
+ """A column with a 'spinner' animation.
+
+ Args:
+ spinner_name (str, optional): Name of spinner animation. Defaults to "dots".
+ style (StyleType, optional): Style of spinner. Defaults to "progress.spinner".
+ speed (float, optional): Speed factor of spinner. Defaults to 1.0.
+ finished_text (TextType, optional): Text used when task is finished. Defaults to " ".
+ """
+
+ def __init__(
+ self,
+ spinner_name: str = "dots",
+ style: Optional[StyleType] = "progress.spinner",
+ speed: float = 1.0,
+ finished_text: TextType = " ",
+ table_column: Optional[Column] = None,
+ ):
+ self.spinner = Spinner(spinner_name, style=style, speed=speed)
+ self.finished_text = (
+ Text.from_markup(finished_text)
+ if isinstance(finished_text, str)
+ else finished_text
+ )
+ super().__init__(table_column=table_column)
+
+ def set_spinner(
+ self,
+ spinner_name: str,
+ spinner_style: Optional[StyleType] = "progress.spinner",
+ speed: float = 1.0,
+ ) -> None:
+ """Set a new spinner.
+
+ Args:
+ spinner_name (str): Spinner name, see python -m rich.spinner.
+ spinner_style (Optional[StyleType], optional): Spinner style. Defaults to "progress.spinner".
+ speed (float, optional): Speed factor of spinner. Defaults to 1.0.
+ """
+ self.spinner = Spinner(spinner_name, style=spinner_style, speed=speed)
+
+ def render(self, task: "Task") -> RenderableType:
+ text = (
+ self.finished_text
+ if task.finished
+ else self.spinner.render(task.get_time())
+ )
+ return text
+
+
+class TextColumn(ProgressColumn):
+ """A column containing text."""
+
+ def __init__(
+ self,
+ text_format: str,
+ style: StyleType = "none",
+ justify: JustifyMethod = "left",
+ markup: bool = True,
+ highlighter: Optional[Highlighter] = None,
+ table_column: Optional[Column] = None,
+ ) -> None:
+ self.text_format = text_format
+ self.justify: JustifyMethod = justify
+ self.style = style
+ self.markup = markup
+ self.highlighter = highlighter
+ super().__init__(table_column=table_column or Column(no_wrap=True))
+
+ def render(self, task: "Task") -> Text:
+ _text = self.text_format.format(task=task)
+ if self.markup:
+ text = Text.from_markup(_text, style=self.style, justify=self.justify)
+ else:
+ text = Text(_text, style=self.style, justify=self.justify)
+ if self.highlighter:
+ self.highlighter.highlight(text)
+ return text
+
+
+class BarColumn(ProgressColumn):
+ """Renders a visual progress bar.
+
+ Args:
+ bar_width (Optional[int], optional): Width of bar or None for full width. Defaults to 40.
+ style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
+ complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
+ finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished".
+ pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
+ """
+
+ def __init__(
+ self,
+ bar_width: Optional[int] = 40,
+ style: StyleType = "bar.back",
+ complete_style: StyleType = "bar.complete",
+ finished_style: StyleType = "bar.finished",
+ pulse_style: StyleType = "bar.pulse",
+ table_column: Optional[Column] = None,
+ ) -> None:
+ self.bar_width = bar_width
+ self.style = style
+ self.complete_style = complete_style
+ self.finished_style = finished_style
+ self.pulse_style = pulse_style
+ super().__init__(table_column=table_column)
+
+ def render(self, task: "Task") -> ProgressBar:
+ """Gets a progress bar widget for a task."""
+ return ProgressBar(
+ total=max(0, task.total) if task.total is not None else None,
+ completed=max(0, task.completed),
+ width=None if self.bar_width is None else max(1, self.bar_width),
+ pulse=not task.started,
+ animation_time=task.get_time(),
+ style=self.style,
+ complete_style=self.complete_style,
+ finished_style=self.finished_style,
+ pulse_style=self.pulse_style,
+ )
+
+
+class TimeElapsedColumn(ProgressColumn):
+ """Renders time elapsed."""
+
+ def render(self, task: "Task") -> Text:
+ """Show time remaining."""
+ elapsed = task.finished_time if task.finished else task.elapsed
+ if elapsed is None:
+ return Text("-:--:--", style="progress.elapsed")
+ delta = timedelta(seconds=int(elapsed))
+ return Text(str(delta), style="progress.elapsed")
+
+
+class TaskProgressColumn(TextColumn):
+ """Show task progress as a percentage.
+
+ Args:
+ text_format (str, optional): Format for percentage display. Defaults to "[progress.percentage]{task.percentage:>3.0f}%".
+ text_format_no_percentage (str, optional): Format if percentage is unknown. Defaults to "".
+ style (StyleType, optional): Style of output. Defaults to "none".
+ justify (JustifyMethod, optional): Text justification. Defaults to "left".
+ markup (bool, optional): Enable markup. Defaults to True.
+ highlighter (Optional[Highlighter], optional): Highlighter to apply to output. Defaults to None.
+ table_column (Optional[Column], optional): Table Column to use. Defaults to None.
+ show_speed (bool, optional): Show speed if total is unknown. Defaults to False.
+ """
+
+ def __init__(
+ self,
+ text_format: str = "[progress.percentage]{task.percentage:>3.0f}%",
+ text_format_no_percentage: str = "",
+ style: StyleType = "none",
+ justify: JustifyMethod = "left",
+ markup: bool = True,
+ highlighter: Optional[Highlighter] = None,
+ table_column: Optional[Column] = None,
+ show_speed: bool = False,
+ ) -> None:
+
+ self.text_format_no_percentage = text_format_no_percentage
+ self.show_speed = show_speed
+ super().__init__(
+ text_format=text_format,
+ style=style,
+ justify=justify,
+ markup=markup,
+ highlighter=highlighter,
+ table_column=table_column,
+ )
+
+ @classmethod
+ def render_speed(cls, speed: Optional[float]) -> Text:
+ """Render the speed in iterations per second.
+
+ Args:
+ task (Task): A Task object.
+
+ Returns:
+ Text: Text object containing the task speed.
+ """
+ if speed is None:
+ return Text("", style="progress.percentage")
+ unit, suffix = filesize.pick_unit_and_suffix(
+ int(speed),
+ ["", "×10³", "×10⁶", "×10⁹", "×10¹²"],
+ 1000,
+ )
+ data_speed = speed / unit
+ return Text(f"{data_speed:.1f}{suffix} it/s", style="progress.percentage")
+
+ def render(self, task: "Task") -> Text:
+ if task.total is None and self.show_speed:
+ return self.render_speed(task.finished_speed or task.speed)
+ text_format = (
+ self.text_format_no_percentage if task.total is None else self.text_format
+ )
+ _text = text_format.format(task=task)
+ if self.markup:
+ text = Text.from_markup(_text, style=self.style, justify=self.justify)
+ else:
+ text = Text(_text, style=self.style, justify=self.justify)
+ if self.highlighter:
+ self.highlighter.highlight(text)
+ return text
+
+
+class TimeRemainingColumn(ProgressColumn):
+ """Renders estimated time remaining.
+
+ Args:
+ compact (bool, optional): Render MM:SS when time remaining is less than an hour. Defaults to False.
+ elapsed_when_finished (bool, optional): Render time elapsed when the task is finished. Defaults to False.
+ """
+
+ # Only refresh twice a second to prevent jitter
+ max_refresh = 0.5
+
+ def __init__(
+ self,
+ compact: bool = False,
+ elapsed_when_finished: bool = False,
+ table_column: Optional[Column] = None,
+ ):
+ self.compact = compact
+ self.elapsed_when_finished = elapsed_when_finished
+ super().__init__(table_column=table_column)
+
+ def render(self, task: "Task") -> Text:
+ """Show time remaining."""
+ if self.elapsed_when_finished and task.finished:
+ task_time = task.finished_time
+ style = "progress.elapsed"
+ else:
+ task_time = task.time_remaining
+ style = "progress.remaining"
+
+ if task.total is None:
+ return Text("", style=style)
+
+ if task_time is None:
+ return Text("--:--" if self.compact else "-:--:--", style=style)
+
+ # Based on https://github.com/tqdm/tqdm/blob/master/tqdm/std.py
+ minutes, seconds = divmod(int(task_time), 60)
+ hours, minutes = divmod(minutes, 60)
+
+ if self.compact and not hours:
+ formatted = f"{minutes:02d}:{seconds:02d}"
+ else:
+ formatted = f"{hours:d}:{minutes:02d}:{seconds:02d}"
+
+ return Text(formatted, style=style)
+
+
+class FileSizeColumn(ProgressColumn):
+ """Renders completed filesize."""
+
+ def render(self, task: "Task") -> Text:
+ """Show data completed."""
+ data_size = filesize.decimal(int(task.completed))
+ return Text(data_size, style="progress.filesize")
+
+
+class TotalFileSizeColumn(ProgressColumn):
+ """Renders total filesize."""
+
+ def render(self, task: "Task") -> Text:
+ """Show data completed."""
+ data_size = filesize.decimal(int(task.total)) if task.total is not None else ""
+ return Text(data_size, style="progress.filesize.total")
+
+
+class MofNCompleteColumn(ProgressColumn):
+ """Renders completed count/total, e.g. ' 10/1000'.
+
+ Best for bounded tasks with int quantities.
+
+ Space pads the completed count so that progress length does not change as task progresses
+ past powers of 10.
+
+ Args:
+ separator (str, optional): Text to separate completed and total values. Defaults to "/".
+ """
+
+ def __init__(self, separator: str = "/", table_column: Optional[Column] = None):
+ self.separator = separator
+ super().__init__(table_column=table_column)
+
+ def render(self, task: "Task") -> Text:
+ """Show completed/total."""
+ completed = int(task.completed)
+ total = int(task.total) if task.total is not None else "?"
+ total_width = len(str(total))
+ return Text(
+ f"{completed:{total_width}d}{self.separator}{total}",
+ style="progress.download",
+ )
+
+
+class DownloadColumn(ProgressColumn):
+ """Renders file size downloaded and total, e.g. '0.5/2.3 GB'.
+
+ Args:
+ binary_units (bool, optional): Use binary units, KiB, MiB etc. Defaults to False.
+ """
+
+ def __init__(
+ self, binary_units: bool = False, table_column: Optional[Column] = None
+ ) -> None:
+ self.binary_units = binary_units
+ super().__init__(table_column=table_column)
+
+ def render(self, task: "Task") -> Text:
+ """Calculate common unit for completed and total."""
+ completed = int(task.completed)
+
+ unit_and_suffix_calculation_base = (
+ int(task.total) if task.total is not None else completed
+ )
+ if self.binary_units:
+ unit, suffix = filesize.pick_unit_and_suffix(
+ unit_and_suffix_calculation_base,
+ ["bytes", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"],
+ 1024,
+ )
+ else:
+ unit, suffix = filesize.pick_unit_and_suffix(
+ unit_and_suffix_calculation_base,
+ ["bytes", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"],
+ 1000,
+ )
+ precision = 0 if unit == 1 else 1
+
+ completed_ratio = completed / unit
+ completed_str = f"{completed_ratio:,.{precision}f}"
+
+ if task.total is not None:
+ total = int(task.total)
+ total_ratio = total / unit
+ total_str = f"{total_ratio:,.{precision}f}"
+ else:
+ total_str = "?"
+
+ download_status = f"{completed_str}/{total_str} {suffix}"
+ download_text = Text(download_status, style="progress.download")
+ return download_text
+
+
+class TransferSpeedColumn(ProgressColumn):
+ """Renders human readable transfer speed."""
+
+ def render(self, task: "Task") -> Text:
+ """Show data transfer speed."""
+ speed = task.finished_speed or task.speed
+ if speed is None:
+ return Text("?", style="progress.data.speed")
+ data_speed = filesize.decimal(int(speed))
+ return Text(f"{data_speed}/s", style="progress.data.speed")
+
+
+class ProgressSample(NamedTuple):
+ """Sample of progress for a given time."""
+
+ timestamp: float
+ """Timestamp of sample."""
+ completed: float
+ """Number of steps completed."""
+
+
+@dataclass
+class Task:
+ """Information regarding a progress task.
+
+ This object should be considered read-only outside of the :class:`~Progress` class.
+
+ """
+
+ id: TaskID
+ """Task ID associated with this task (used in Progress methods)."""
+
+ description: str
+ """str: Description of the task."""
+
+ total: Optional[float]
+ """Optional[float]: Total number of steps in this task."""
+
+ completed: float
+ """float: Number of steps completed"""
+
+ _get_time: GetTimeCallable
+ """Callable to get the current time."""
+
+ finished_time: Optional[float] = None
+ """float: Time task was finished."""
+
+ visible: bool = True
+ """bool: Indicates if this task is visible in the progress display."""
+
+ fields: Dict[str, Any] = field(default_factory=dict)
+ """dict: Arbitrary fields passed in via Progress.update."""
+
+ start_time: Optional[float] = field(default=None, init=False, repr=False)
+ """Optional[float]: Time this task was started, or None if not started."""
+
+ stop_time: Optional[float] = field(default=None, init=False, repr=False)
+ """Optional[float]: Time this task was stopped, or None if not stopped."""
+
+ finished_speed: Optional[float] = None
+ """Optional[float]: The last speed for a finished task."""
+
+ _progress: Deque[ProgressSample] = field(
+ default_factory=lambda: deque(maxlen=1000), init=False, repr=False
+ )
+
+ _lock: RLock = field(repr=False, default_factory=RLock)
+ """Thread lock."""
+
+ def get_time(self) -> float:
+ """float: Get the current time, in seconds."""
+ return self._get_time()
+
+ @property
+ def started(self) -> bool:
+ """bool: Check if the task as started."""
+ return self.start_time is not None
+
+ @property
+ def remaining(self) -> Optional[float]:
+ """Optional[float]: Get the number of steps remaining, if a non-None total was set."""
+ if self.total is None:
+ return None
+ return self.total - self.completed
+
+ @property
+ def elapsed(self) -> Optional[float]:
+ """Optional[float]: Time elapsed since task was started, or ``None`` if the task hasn't started."""
+ if self.start_time is None:
+ return None
+ if self.stop_time is not None:
+ return self.stop_time - self.start_time
+ return self.get_time() - self.start_time
+
+ @property
+ def finished(self) -> bool:
+ """Check if the task has finished."""
+ return self.finished_time is not None
+
+ @property
+ def percentage(self) -> float:
+ """float: Get progress of task as a percentage. If a None total was set, returns 0"""
+ if not self.total:
+ return 0.0
+ completed = (self.completed / self.total) * 100.0
+ completed = min(100.0, max(0.0, completed))
+ return completed
+
+ @property
+ def speed(self) -> Optional[float]:
+ """Optional[float]: Get the estimated speed in steps per second."""
+ if self.start_time is None:
+ return None
+ with self._lock:
+ progress = self._progress
+ if not progress:
+ return None
+ total_time = progress[-1].timestamp - progress[0].timestamp
+ if total_time == 0:
+ return None
+ iter_progress = iter(progress)
+ next(iter_progress)
+ total_completed = sum(sample.completed for sample in iter_progress)
+ speed = total_completed / total_time
+ return speed
+
+ @property
+ def time_remaining(self) -> Optional[float]:
+ """Optional[float]: Get estimated time to completion, or ``None`` if no data."""
+ if self.finished:
+ return 0.0
+ speed = self.speed
+ if not speed:
+ return None
+ remaining = self.remaining
+ if remaining is None:
+ return None
+ estimate = ceil(remaining / speed)
+ return estimate
+
+ def _reset(self) -> None:
+ """Reset progress."""
+ self._progress.clear()
+ self.finished_time = None
+ self.finished_speed = None
+
+
+class Progress(JupyterMixin):
+ """Renders an auto-updating progress bar(s).
+
+ Args:
+ console (Console, optional): Optional Console instance. Default will an internal Console instance writing to stdout.
+ auto_refresh (bool, optional): Enable auto refresh. If disabled, you will need to call `refresh()`.
+ refresh_per_second (Optional[float], optional): Number of times per second to refresh the progress information or None to use default (10). Defaults to None.
+ speed_estimate_period: (float, optional): Period (in seconds) used to calculate the speed estimate. Defaults to 30.
+ transient: (bool, optional): Clear the progress on exit. Defaults to False.
+ redirect_stdout: (bool, optional): Enable redirection of stdout, so ``print`` may be used. Defaults to True.
+ redirect_stderr: (bool, optional): Enable redirection of stderr. Defaults to True.
+ get_time: (Callable, optional): A callable that gets the current time, or None to use Console.get_time. Defaults to None.
+ disable (bool, optional): Disable progress display. Defaults to False
+ expand (bool, optional): Expand tasks table to fit width. Defaults to False.
+ """
+
+ def __init__(
+ self,
+ *columns: Union[str, ProgressColumn],
+ console: Optional[Console] = None,
+ auto_refresh: bool = True,
+ refresh_per_second: float = 10,
+ speed_estimate_period: float = 30.0,
+ transient: bool = False,
+ redirect_stdout: bool = True,
+ redirect_stderr: bool = True,
+ get_time: Optional[GetTimeCallable] = None,
+ disable: bool = False,
+ expand: bool = False,
+ ) -> None:
+ assert refresh_per_second > 0, "refresh_per_second must be > 0"
+ self._lock = RLock()
+ self.columns = columns or self.get_default_columns()
+ self.speed_estimate_period = speed_estimate_period
+
+ self.disable = disable
+ self.expand = expand
+ self._tasks: Dict[TaskID, Task] = {}
+ self._task_index: TaskID = TaskID(0)
+ self.live = Live(
+ console=console or get_console(),
+ auto_refresh=auto_refresh,
+ refresh_per_second=refresh_per_second,
+ transient=transient,
+ redirect_stdout=redirect_stdout,
+ redirect_stderr=redirect_stderr,
+ get_renderable=self.get_renderable,
+ )
+ self.get_time = get_time or self.console.get_time
+ self.print = self.console.print
+ self.log = self.console.log
+
+ @classmethod
+ def get_default_columns(cls) -> Tuple[ProgressColumn, ...]:
+ """Get the default columns used for a new Progress instance:
+ - a text column for the description (TextColumn)
+ - the bar itself (BarColumn)
+ - a text column showing completion percentage (TextColumn)
+ - an estimated-time-remaining column (TimeRemainingColumn)
+ If the Progress instance is created without passing a columns argument,
+ the default columns defined here will be used.
+
+ You can also create a Progress instance using custom columns before
+ and/or after the defaults, as in this example:
+
+ progress = Progress(
+ SpinnerColumn(),
+ *Progress.default_columns(),
+ "Elapsed:",
+ TimeElapsedColumn(),
+ )
+
+ This code shows the creation of a Progress display, containing
+ a spinner to the left, the default columns, and a labeled elapsed
+ time column.
+ """
+ return (
+ TextColumn("[progress.description]{task.description}"),
+ BarColumn(),
+ TaskProgressColumn(),
+ TimeRemainingColumn(),
+ )
+
+ @property
+ def console(self) -> Console:
+ return self.live.console
+
+ @property
+ def tasks(self) -> List[Task]:
+ """Get a list of Task instances."""
+ with self._lock:
+ return list(self._tasks.values())
+
+ @property
+ def task_ids(self) -> List[TaskID]:
+ """A list of task IDs."""
+ with self._lock:
+ return list(self._tasks.keys())
+
+ @property
+ def finished(self) -> bool:
+ """Check if all tasks have been completed."""
+ with self._lock:
+ if not self._tasks:
+ return True
+ return all(task.finished for task in self._tasks.values())
+
+ def start(self) -> None:
+ """Start the progress display."""
+ if not self.disable:
+ self.live.start(refresh=True)
+
+ def stop(self) -> None:
+ """Stop the progress display."""
+ self.live.stop()
+ if not self.console.is_interactive:
+ self.console.print()
+
+ def __enter__(self) -> "Progress":
+ self.start()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ self.stop()
+
+ def track(
+ self,
+ sequence: Union[Iterable[ProgressType], Sequence[ProgressType]],
+ total: Optional[float] = None,
+ task_id: Optional[TaskID] = None,
+ description: str = "Working...",
+ update_period: float = 0.1,
+ ) -> Iterable[ProgressType]:
+ """Track progress by iterating over a sequence.
+
+ Args:
+ sequence (Sequence[ProgressType]): A sequence of values you want to iterate over and track progress.
+ total: (float, optional): Total number of steps. Default is len(sequence).
+ task_id: (TaskID): Task to track. Default is new task.
+ description: (str, optional): Description of task, if new task is created.
+ update_period (float, optional): Minimum time (in seconds) between calls to update(). Defaults to 0.1.
+
+ Returns:
+ Iterable[ProgressType]: An iterable of values taken from the provided sequence.
+ """
+
+ task_total: Optional[float] = None
+ if total is None:
+ if isinstance(sequence, Sized):
+ task_total = float(len(sequence))
+ else:
+ task_total = total
+
+ if task_id is None:
+ task_id = self.add_task(description, total=task_total)
+ else:
+ self.update(task_id, total=task_total)
+
+ if self.live.auto_refresh:
+ with _TrackThread(self, task_id, update_period) as track_thread:
+ for value in sequence:
+ yield value
+ track_thread.completed += 1
+ else:
+ advance = self.advance
+ refresh = self.refresh
+ for value in sequence:
+ yield value
+ advance(task_id, 1)
+ refresh()
+
+ def wrap_file(
+ self,
+ file: BinaryIO,
+ total: Optional[int] = None,
+ *,
+ task_id: Optional[TaskID] = None,
+ description: str = "Reading...",
+ ) -> BinaryIO:
+ """Track progress file reading from a binary file.
+
+ Args:
+ file (BinaryIO): A file-like object opened in binary mode.
+ total (int, optional): Total number of bytes to read. This must be provided unless a task with a total is also given.
+ task_id (TaskID): Task to track. Default is new task.
+ description (str, optional): Description of task, if new task is created.
+
+ Returns:
+ BinaryIO: A readable file-like object in binary mode.
+
+ Raises:
+ ValueError: When no total value can be extracted from the arguments or the task.
+ """
+ # attempt to recover the total from the task
+ total_bytes: Optional[float] = None
+ if total is not None:
+ total_bytes = total
+ elif task_id is not None:
+ with self._lock:
+ total_bytes = self._tasks[task_id].total
+ if total_bytes is None:
+ raise ValueError(
+ f"unable to get the total number of bytes, please specify 'total'"
+ )
+
+ # update total of task or create new task
+ if task_id is None:
+ task_id = self.add_task(description, total=total_bytes)
+ else:
+ self.update(task_id, total=total_bytes)
+
+ return _Reader(file, self, task_id, close_handle=False)
+
+ @typing.overload
+ def open(
+ self,
+ file: Union[str, "PathLike[str]", bytes],
+ mode: Literal["rb"],
+ buffering: int = -1,
+ encoding: Optional[str] = None,
+ errors: Optional[str] = None,
+ newline: Optional[str] = None,
+ *,
+ total: Optional[int] = None,
+ task_id: Optional[TaskID] = None,
+ description: str = "Reading...",
+ ) -> BinaryIO:
+ pass
+
+ @typing.overload
+ def open(
+ self,
+ file: Union[str, "PathLike[str]", bytes],
+ mode: Union[Literal["r"], Literal["rt"]],
+ buffering: int = -1,
+ encoding: Optional[str] = None,
+ errors: Optional[str] = None,
+ newline: Optional[str] = None,
+ *,
+ total: Optional[int] = None,
+ task_id: Optional[TaskID] = None,
+ description: str = "Reading...",
+ ) -> TextIO:
+ pass
+
+ def open(
+ self,
+ file: Union[str, "PathLike[str]", bytes],
+ mode: Union[Literal["rb"], Literal["rt"], Literal["r"]] = "r",
+ buffering: int = -1,
+ encoding: Optional[str] = None,
+ errors: Optional[str] = None,
+ newline: Optional[str] = None,
+ *,
+ total: Optional[int] = None,
+ task_id: Optional[TaskID] = None,
+ description: str = "Reading...",
+ ) -> Union[BinaryIO, TextIO]:
+ """Track progress while reading from a binary file.
+
+ Args:
+ path (Union[str, PathLike[str]]): The path to the file to read.
+ mode (str): The mode to use to open the file. Only supports "r", "rb" or "rt".
+ buffering (int): The buffering strategy to use, see :func:`io.open`.
+ encoding (str, optional): The encoding to use when reading in text mode, see :func:`io.open`.
+ errors (str, optional): The error handling strategy for decoding errors, see :func:`io.open`.
+ newline (str, optional): The strategy for handling newlines in text mode, see :func:`io.open`.
+ total (int, optional): Total number of bytes to read. If none given, os.stat(path).st_size is used.
+ task_id (TaskID): Task to track. Default is new task.
+ description (str, optional): Description of task, if new task is created.
+
+ Returns:
+ BinaryIO: A readable file-like object in binary mode.
+
+ Raises:
+ ValueError: When an invalid mode is given.
+ """
+ # normalize the mode (always rb, rt)
+ _mode = "".join(sorted(mode, reverse=False))
+ if _mode not in ("br", "rt", "r"):
+ raise ValueError("invalid mode {!r}".format(mode))
+
+ # patch buffering to provide the same behaviour as the builtin `open`
+ line_buffering = buffering == 1
+ if _mode == "br" and buffering == 1:
+ warnings.warn(
+ "line buffering (buffering=1) isn't supported in binary mode, the default buffer size will be used",
+ RuntimeWarning,
+ )
+ buffering = -1
+ elif _mode == "rt" or _mode == "r":
+ if buffering == 0:
+ raise ValueError("can't have unbuffered text I/O")
+ elif buffering == 1:
+ buffering = -1
+
+ # attempt to get the total with `os.stat`
+ if total is None:
+ total = stat(file).st_size
+
+ # update total of task or create new task
+ if task_id is None:
+ task_id = self.add_task(description, total=total)
+ else:
+ self.update(task_id, total=total)
+
+ # open the file in binary mode,
+ handle = io.open(file, "rb", buffering=buffering)
+ reader = _Reader(handle, self, task_id, close_handle=True)
+
+ # wrap the reader in a `TextIOWrapper` if text mode
+ if mode == "r" or mode == "rt":
+ return io.TextIOWrapper(
+ reader,
+ encoding=encoding,
+ errors=errors,
+ newline=newline,
+ line_buffering=line_buffering,
+ )
+
+ return reader
+
+ def start_task(self, task_id: TaskID) -> None:
+ """Start a task.
+
+ Starts a task (used when calculating elapsed time). You may need to call this manually,
+ if you called ``add_task`` with ``start=False``.
+
+ Args:
+ task_id (TaskID): ID of task.
+ """
+ with self._lock:
+ task = self._tasks[task_id]
+ if task.start_time is None:
+ task.start_time = self.get_time()
+
+ def stop_task(self, task_id: TaskID) -> None:
+ """Stop a task.
+
+ This will freeze the elapsed time on the task.
+
+ Args:
+ task_id (TaskID): ID of task.
+ """
+ with self._lock:
+ task = self._tasks[task_id]
+ current_time = self.get_time()
+ if task.start_time is None:
+ task.start_time = current_time
+ task.stop_time = current_time
+
+ def update(
+ self,
+ task_id: TaskID,
+ *,
+ total: Optional[float] = None,
+ completed: Optional[float] = None,
+ advance: Optional[float] = None,
+ description: Optional[str] = None,
+ visible: Optional[bool] = None,
+ refresh: bool = False,
+ **fields: Any,
+ ) -> None:
+ """Update information associated with a task.
+
+ Args:
+ task_id (TaskID): Task id (returned by add_task).
+ total (float, optional): Updates task.total if not None.
+ completed (float, optional): Updates task.completed if not None.
+ advance (float, optional): Add a value to task.completed if not None.
+ description (str, optional): Change task description if not None.
+ visible (bool, optional): Set visible flag if not None.
+ refresh (bool): Force a refresh of progress information. Default is False.
+ **fields (Any): Additional data fields required for rendering.
+ """
+ with self._lock:
+ task = self._tasks[task_id]
+ completed_start = task.completed
+
+ if total is not None and total != task.total:
+ task.total = total
+ task._reset()
+ if advance is not None:
+ task.completed += advance
+ if completed is not None:
+ task.completed = completed
+ if description is not None:
+ task.description = description
+ if visible is not None:
+ task.visible = visible
+ task.fields.update(fields)
+ update_completed = task.completed - completed_start
+
+ current_time = self.get_time()
+ old_sample_time = current_time - self.speed_estimate_period
+ _progress = task._progress
+
+ popleft = _progress.popleft
+ while _progress and _progress[0].timestamp < old_sample_time:
+ popleft()
+ if update_completed > 0:
+ _progress.append(ProgressSample(current_time, update_completed))
+ if (
+ task.total is not None
+ and task.completed >= task.total
+ and task.finished_time is None
+ ):
+ task.finished_time = task.elapsed
+
+ if refresh:
+ self.refresh()
+
+ def reset(
+ self,
+ task_id: TaskID,
+ *,
+ start: bool = True,
+ total: Optional[float] = None,
+ completed: int = 0,
+ visible: Optional[bool] = None,
+ description: Optional[str] = None,
+ **fields: Any,
+ ) -> None:
+ """Reset a task so completed is 0 and the clock is reset.
+
+ Args:
+ task_id (TaskID): ID of task.
+ start (bool, optional): Start the task after reset. Defaults to True.
+ total (float, optional): New total steps in task, or None to use current total. Defaults to None.
+ completed (int, optional): Number of steps completed. Defaults to 0.
+ visible (bool, optional): Enable display of the task. Defaults to True.
+ description (str, optional): Change task description if not None. Defaults to None.
+ **fields (str): Additional data fields required for rendering.
+ """
+ current_time = self.get_time()
+ with self._lock:
+ task = self._tasks[task_id]
+ task._reset()
+ task.start_time = current_time if start else None
+ if total is not None:
+ task.total = total
+ task.completed = completed
+ if visible is not None:
+ task.visible = visible
+ if fields:
+ task.fields = fields
+ if description is not None:
+ task.description = description
+ task.finished_time = None
+ self.refresh()
+
+ def advance(self, task_id: TaskID, advance: float = 1) -> None:
+ """Advance task by a number of steps.
+
+ Args:
+ task_id (TaskID): ID of task.
+ advance (float): Number of steps to advance. Default is 1.
+ """
+ current_time = self.get_time()
+ with self._lock:
+ task = self._tasks[task_id]
+ completed_start = task.completed
+ task.completed += advance
+ update_completed = task.completed - completed_start
+ old_sample_time = current_time - self.speed_estimate_period
+ _progress = task._progress
+
+ popleft = _progress.popleft
+ while _progress and _progress[0].timestamp < old_sample_time:
+ popleft()
+ while len(_progress) > 1000:
+ popleft()
+ _progress.append(ProgressSample(current_time, update_completed))
+ if (
+ task.total is not None
+ and task.completed >= task.total
+ and task.finished_time is None
+ ):
+ task.finished_time = task.elapsed
+ task.finished_speed = task.speed
+
+ def refresh(self) -> None:
+ """Refresh (render) the progress information."""
+ if not self.disable and self.live.is_started:
+ self.live.refresh()
+
+ def get_renderable(self) -> RenderableType:
+ """Get a renderable for the progress display."""
+ renderable = Group(*self.get_renderables())
+ return renderable
+
+ def get_renderables(self) -> Iterable[RenderableType]:
+ """Get a number of renderables for the progress display."""
+ table = self.make_tasks_table(self.tasks)
+ yield table
+
+ def make_tasks_table(self, tasks: Iterable[Task]) -> Table:
+ """Get a table to render the Progress display.
+
+ Args:
+ tasks (Iterable[Task]): An iterable of Task instances, one per row of the table.
+
+ Returns:
+ Table: A table instance.
+ """
+ table_columns = (
+ (
+ Column(no_wrap=True)
+ if isinstance(_column, str)
+ else _column.get_table_column().copy()
+ )
+ for _column in self.columns
+ )
+ table = Table.grid(*table_columns, padding=(0, 1), expand=self.expand)
+
+ for task in tasks:
+ if task.visible:
+ table.add_row(
+ *(
+ (
+ column.format(task=task)
+ if isinstance(column, str)
+ else column(task)
+ )
+ for column in self.columns
+ )
+ )
+ return table
+
+ def __rich__(self) -> RenderableType:
+ """Makes the Progress class itself renderable."""
+ with self._lock:
+ return self.get_renderable()
+
+ def add_task(
+ self,
+ description: str,
+ start: bool = True,
+ total: Optional[float] = 100.0,
+ completed: int = 0,
+ visible: bool = True,
+ **fields: Any,
+ ) -> TaskID:
+ """Add a new 'task' to the Progress display.
+
+ Args:
+ description (str): A description of the task.
+ start (bool, optional): Start the task immediately (to calculate elapsed time). If set to False,
+ you will need to call `start` manually. Defaults to True.
+ total (float, optional): Number of total steps in the progress if known.
+ Set to None to render a pulsing animation. Defaults to 100.
+ completed (int, optional): Number of steps completed so far. Defaults to 0.
+ visible (bool, optional): Enable display of the task. Defaults to True.
+ **fields (str): Additional data fields required for rendering.
+
+ Returns:
+ TaskID: An ID you can use when calling `update`.
+ """
+ with self._lock:
+ task = Task(
+ self._task_index,
+ description,
+ total,
+ completed,
+ visible=visible,
+ fields=fields,
+ _get_time=self.get_time,
+ _lock=self._lock,
+ )
+ self._tasks[self._task_index] = task
+ if start:
+ self.start_task(self._task_index)
+ new_task_index = self._task_index
+ self._task_index = TaskID(int(self._task_index) + 1)
+ self.refresh()
+ return new_task_index
+
+ def remove_task(self, task_id: TaskID) -> None:
+ """Delete a task if it exists.
+
+ Args:
+ task_id (TaskID): A task ID.
+
+ """
+ with self._lock:
+ del self._tasks[task_id]
+
+
+if __name__ == "__main__": # pragma: no coverage
+
+ import random
+ import time
+
+ from .panel import Panel
+ from .rule import Rule
+ from .syntax import Syntax
+ from .table import Table
+
+ syntax = Syntax(
+ '''def loop_last(values: Iterable[T]) -> Iterable[Tuple[bool, T]]:
+ """Iterate and generate a tuple with a flag for last value."""
+ iter_values = iter(values)
+ try:
+ previous_value = next(iter_values)
+ except StopIteration:
+ return
+ for value in iter_values:
+ yield False, previous_value
+ previous_value = value
+ yield True, previous_value''',
+ "python",
+ line_numbers=True,
+ )
+
+ table = Table("foo", "bar", "baz")
+ table.add_row("1", "2", "3")
+
+ progress_renderables = [
+ "Text may be printed while the progress bars are rendering.",
+ Panel("In fact, [i]any[/i] renderable will work"),
+ "Such as [magenta]tables[/]...",
+ table,
+ "Pretty printed structures...",
+ {"type": "example", "text": "Pretty printed"},
+ "Syntax...",
+ syntax,
+ Rule("Give it a try!"),
+ ]
+
+ from itertools import cycle
+
+ examples = cycle(progress_renderables)
+
+ console = Console(record=True)
+
+ with Progress(
+ SpinnerColumn(),
+ *Progress.get_default_columns(),
+ TimeElapsedColumn(),
+ console=console,
+ transient=False,
+ ) as progress:
+
+ task1 = progress.add_task("[red]Downloading", total=1000)
+ task2 = progress.add_task("[green]Processing", total=1000)
+ task3 = progress.add_task("[yellow]Thinking", total=None)
+
+ while not progress.finished:
+ progress.update(task1, advance=0.5)
+ progress.update(task2, advance=0.3)
+ time.sleep(0.01)
+ if random.randint(0, 100) < 1:
+ progress.log(next(examples))
diff --git a/third_party/python/pip/pip/_vendor/rich/progress_bar.py b/third_party/python/pip/pip/_vendor/rich/progress_bar.py
new file mode 100644
index 0000000000..67361df2e4
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/progress_bar.py
@@ -0,0 +1,224 @@
+import math
+from functools import lru_cache
+from time import monotonic
+from typing import Iterable, List, Optional
+
+from .color import Color, blend_rgb
+from .color_triplet import ColorTriplet
+from .console import Console, ConsoleOptions, RenderResult
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .segment import Segment
+from .style import Style, StyleType
+
+# Number of characters before 'pulse' animation repeats
+PULSE_SIZE = 20
+
+
+class ProgressBar(JupyterMixin):
+ """Renders a (progress) bar. Used by rich.progress.
+
+ Args:
+ total (float, optional): Number of steps in the bar. Defaults to 100. Set to None to render a pulsing animation.
+ completed (float, optional): Number of steps completed. Defaults to 0.
+ width (int, optional): Width of the bar, or ``None`` for maximum width. Defaults to None.
+ pulse (bool, optional): Enable pulse effect. Defaults to False. Will pulse if a None total was passed.
+ style (StyleType, optional): Style for the bar background. Defaults to "bar.back".
+ complete_style (StyleType, optional): Style for the completed bar. Defaults to "bar.complete".
+ finished_style (StyleType, optional): Style for a finished bar. Defaults to "bar.finished".
+ pulse_style (StyleType, optional): Style for pulsing bars. Defaults to "bar.pulse".
+ animation_time (Optional[float], optional): Time in seconds to use for animation, or None to use system time.
+ """
+
+ def __init__(
+ self,
+ total: Optional[float] = 100.0,
+ completed: float = 0,
+ width: Optional[int] = None,
+ pulse: bool = False,
+ style: StyleType = "bar.back",
+ complete_style: StyleType = "bar.complete",
+ finished_style: StyleType = "bar.finished",
+ pulse_style: StyleType = "bar.pulse",
+ animation_time: Optional[float] = None,
+ ):
+ self.total = total
+ self.completed = completed
+ self.width = width
+ self.pulse = pulse
+ self.style = style
+ self.complete_style = complete_style
+ self.finished_style = finished_style
+ self.pulse_style = pulse_style
+ self.animation_time = animation_time
+
+ self._pulse_segments: Optional[List[Segment]] = None
+
+ def __repr__(self) -> str:
+ return f"<Bar {self.completed!r} of {self.total!r}>"
+
+ @property
+ def percentage_completed(self) -> Optional[float]:
+ """Calculate percentage complete."""
+ if self.total is None:
+ return None
+ completed = (self.completed / self.total) * 100.0
+ completed = min(100, max(0.0, completed))
+ return completed
+
+ @lru_cache(maxsize=16)
+ def _get_pulse_segments(
+ self,
+ fore_style: Style,
+ back_style: Style,
+ color_system: str,
+ no_color: bool,
+ ascii: bool = False,
+ ) -> List[Segment]:
+ """Get a list of segments to render a pulse animation.
+
+ Returns:
+ List[Segment]: A list of segments, one segment per character.
+ """
+ bar = "-" if ascii else "━"
+ segments: List[Segment] = []
+ if color_system not in ("standard", "eight_bit", "truecolor") or no_color:
+ segments += [Segment(bar, fore_style)] * (PULSE_SIZE // 2)
+ segments += [Segment(" " if no_color else bar, back_style)] * (
+ PULSE_SIZE - (PULSE_SIZE // 2)
+ )
+ return segments
+
+ append = segments.append
+ fore_color = (
+ fore_style.color.get_truecolor()
+ if fore_style.color
+ else ColorTriplet(255, 0, 255)
+ )
+ back_color = (
+ back_style.color.get_truecolor()
+ if back_style.color
+ else ColorTriplet(0, 0, 0)
+ )
+ cos = math.cos
+ pi = math.pi
+ _Segment = Segment
+ _Style = Style
+ from_triplet = Color.from_triplet
+
+ for index in range(PULSE_SIZE):
+ position = index / PULSE_SIZE
+ fade = 0.5 + cos((position * pi * 2)) / 2.0
+ color = blend_rgb(fore_color, back_color, cross_fade=fade)
+ append(_Segment(bar, _Style(color=from_triplet(color))))
+ return segments
+
+ def update(self, completed: float, total: Optional[float] = None) -> None:
+ """Update progress with new values.
+
+ Args:
+ completed (float): Number of steps completed.
+ total (float, optional): Total number of steps, or ``None`` to not change. Defaults to None.
+ """
+ self.completed = completed
+ self.total = total if total is not None else self.total
+
+ def _render_pulse(
+ self, console: Console, width: int, ascii: bool = False
+ ) -> Iterable[Segment]:
+ """Renders the pulse animation.
+
+ Args:
+ console (Console): Console instance.
+ width (int): Width in characters of pulse animation.
+
+ Returns:
+ RenderResult: [description]
+
+ Yields:
+ Iterator[Segment]: Segments to render pulse
+ """
+ fore_style = console.get_style(self.pulse_style, default="white")
+ back_style = console.get_style(self.style, default="black")
+
+ pulse_segments = self._get_pulse_segments(
+ fore_style, back_style, console.color_system, console.no_color, ascii=ascii
+ )
+ segment_count = len(pulse_segments)
+ current_time = (
+ monotonic() if self.animation_time is None else self.animation_time
+ )
+ segments = pulse_segments * (int(width / segment_count) + 2)
+ offset = int(-current_time * 15) % segment_count
+ segments = segments[offset : offset + width]
+ yield from segments
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+
+ width = min(self.width or options.max_width, options.max_width)
+ ascii = options.legacy_windows or options.ascii_only
+ should_pulse = self.pulse or self.total is None
+ if should_pulse:
+ yield from self._render_pulse(console, width, ascii=ascii)
+ return
+
+ completed: Optional[float] = (
+ min(self.total, max(0, self.completed)) if self.total is not None else None
+ )
+
+ bar = "-" if ascii else "━"
+ half_bar_right = " " if ascii else "╸"
+ half_bar_left = " " if ascii else "╺"
+ complete_halves = (
+ int(width * 2 * completed / self.total)
+ if self.total and completed is not None
+ else width * 2
+ )
+ bar_count = complete_halves // 2
+ half_bar_count = complete_halves % 2
+ style = console.get_style(self.style)
+ is_finished = self.total is None or self.completed >= self.total
+ complete_style = console.get_style(
+ self.finished_style if is_finished else self.complete_style
+ )
+ _Segment = Segment
+ if bar_count:
+ yield _Segment(bar * bar_count, complete_style)
+ if half_bar_count:
+ yield _Segment(half_bar_right * half_bar_count, complete_style)
+
+ if not console.no_color:
+ remaining_bars = width - bar_count - half_bar_count
+ if remaining_bars and console.color_system is not None:
+ if not half_bar_count and bar_count:
+ yield _Segment(half_bar_left, style)
+ remaining_bars -= 1
+ if remaining_bars:
+ yield _Segment(bar * remaining_bars, style)
+
+ def __rich_measure__(
+ self, console: Console, options: ConsoleOptions
+ ) -> Measurement:
+ return (
+ Measurement(self.width, self.width)
+ if self.width is not None
+ else Measurement(4, options.max_width)
+ )
+
+
+if __name__ == "__main__": # pragma: no cover
+ console = Console()
+ bar = ProgressBar(width=50, total=100)
+
+ import time
+
+ console.show_cursor(False)
+ for n in range(0, 101, 1):
+ bar.update(n)
+ console.print(bar)
+ console.file.write("\r")
+ time.sleep(0.05)
+ console.show_cursor(True)
+ console.print()
diff --git a/third_party/python/pip/pip/_vendor/rich/prompt.py b/third_party/python/pip/pip/_vendor/rich/prompt.py
new file mode 100644
index 0000000000..2bd0a7724f
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/prompt.py
@@ -0,0 +1,376 @@
+from typing import Any, Generic, List, Optional, TextIO, TypeVar, Union, overload
+
+from . import get_console
+from .console import Console
+from .text import Text, TextType
+
+PromptType = TypeVar("PromptType")
+DefaultType = TypeVar("DefaultType")
+
+
+class PromptError(Exception):
+ """Exception base class for prompt related errors."""
+
+
+class InvalidResponse(PromptError):
+ """Exception to indicate a response was invalid. Raise this within process_response() to indicate an error
+ and provide an error message.
+
+ Args:
+ message (Union[str, Text]): Error message.
+ """
+
+ def __init__(self, message: TextType) -> None:
+ self.message = message
+
+ def __rich__(self) -> TextType:
+ return self.message
+
+
+class PromptBase(Generic[PromptType]):
+ """Ask the user for input until a valid response is received. This is the base class, see one of
+ the concrete classes for examples.
+
+ Args:
+ prompt (TextType, optional): Prompt text. Defaults to "".
+ console (Console, optional): A Console instance or None to use global console. Defaults to None.
+ password (bool, optional): Enable password input. Defaults to False.
+ choices (List[str], optional): A list of valid choices. Defaults to None.
+ show_default (bool, optional): Show default in prompt. Defaults to True.
+ show_choices (bool, optional): Show choices in prompt. Defaults to True.
+ """
+
+ response_type: type = str
+
+ validate_error_message = "[prompt.invalid]Please enter a valid value"
+ illegal_choice_message = (
+ "[prompt.invalid.choice]Please select one of the available options"
+ )
+ prompt_suffix = ": "
+
+ choices: Optional[List[str]] = None
+
+ def __init__(
+ self,
+ prompt: TextType = "",
+ *,
+ console: Optional[Console] = None,
+ password: bool = False,
+ choices: Optional[List[str]] = None,
+ show_default: bool = True,
+ show_choices: bool = True,
+ ) -> None:
+ self.console = console or get_console()
+ self.prompt = (
+ Text.from_markup(prompt, style="prompt")
+ if isinstance(prompt, str)
+ else prompt
+ )
+ self.password = password
+ if choices is not None:
+ self.choices = choices
+ self.show_default = show_default
+ self.show_choices = show_choices
+
+ @classmethod
+ @overload
+ def ask(
+ cls,
+ prompt: TextType = "",
+ *,
+ console: Optional[Console] = None,
+ password: bool = False,
+ choices: Optional[List[str]] = None,
+ show_default: bool = True,
+ show_choices: bool = True,
+ default: DefaultType,
+ stream: Optional[TextIO] = None,
+ ) -> Union[DefaultType, PromptType]:
+ ...
+
+ @classmethod
+ @overload
+ def ask(
+ cls,
+ prompt: TextType = "",
+ *,
+ console: Optional[Console] = None,
+ password: bool = False,
+ choices: Optional[List[str]] = None,
+ show_default: bool = True,
+ show_choices: bool = True,
+ stream: Optional[TextIO] = None,
+ ) -> PromptType:
+ ...
+
+ @classmethod
+ def ask(
+ cls,
+ prompt: TextType = "",
+ *,
+ console: Optional[Console] = None,
+ password: bool = False,
+ choices: Optional[List[str]] = None,
+ show_default: bool = True,
+ show_choices: bool = True,
+ default: Any = ...,
+ stream: Optional[TextIO] = None,
+ ) -> Any:
+ """Shortcut to construct and run a prompt loop and return the result.
+
+ Example:
+ >>> filename = Prompt.ask("Enter a filename")
+
+ Args:
+ prompt (TextType, optional): Prompt text. Defaults to "".
+ console (Console, optional): A Console instance or None to use global console. Defaults to None.
+ password (bool, optional): Enable password input. Defaults to False.
+ choices (List[str], optional): A list of valid choices. Defaults to None.
+ show_default (bool, optional): Show default in prompt. Defaults to True.
+ show_choices (bool, optional): Show choices in prompt. Defaults to True.
+ stream (TextIO, optional): Optional text file open for reading to get input. Defaults to None.
+ """
+ _prompt = cls(
+ prompt,
+ console=console,
+ password=password,
+ choices=choices,
+ show_default=show_default,
+ show_choices=show_choices,
+ )
+ return _prompt(default=default, stream=stream)
+
+ def render_default(self, default: DefaultType) -> Text:
+ """Turn the supplied default in to a Text instance.
+
+ Args:
+ default (DefaultType): Default value.
+
+ Returns:
+ Text: Text containing rendering of default value.
+ """
+ return Text(f"({default})", "prompt.default")
+
+ def make_prompt(self, default: DefaultType) -> Text:
+ """Make prompt text.
+
+ Args:
+ default (DefaultType): Default value.
+
+ Returns:
+ Text: Text to display in prompt.
+ """
+ prompt = self.prompt.copy()
+ prompt.end = ""
+
+ if self.show_choices and self.choices:
+ _choices = "/".join(self.choices)
+ choices = f"[{_choices}]"
+ prompt.append(" ")
+ prompt.append(choices, "prompt.choices")
+
+ if (
+ default != ...
+ and self.show_default
+ and isinstance(default, (str, self.response_type))
+ ):
+ prompt.append(" ")
+ _default = self.render_default(default)
+ prompt.append(_default)
+
+ prompt.append(self.prompt_suffix)
+
+ return prompt
+
+ @classmethod
+ def get_input(
+ cls,
+ console: Console,
+ prompt: TextType,
+ password: bool,
+ stream: Optional[TextIO] = None,
+ ) -> str:
+ """Get input from user.
+
+ Args:
+ console (Console): Console instance.
+ prompt (TextType): Prompt text.
+ password (bool): Enable password entry.
+
+ Returns:
+ str: String from user.
+ """
+ return console.input(prompt, password=password, stream=stream)
+
+ def check_choice(self, value: str) -> bool:
+ """Check value is in the list of valid choices.
+
+ Args:
+ value (str): Value entered by user.
+
+ Returns:
+ bool: True if choice was valid, otherwise False.
+ """
+ assert self.choices is not None
+ return value.strip() in self.choices
+
+ def process_response(self, value: str) -> PromptType:
+ """Process response from user, convert to prompt type.
+
+ Args:
+ value (str): String typed by user.
+
+ Raises:
+ InvalidResponse: If ``value`` is invalid.
+
+ Returns:
+ PromptType: The value to be returned from ask method.
+ """
+ value = value.strip()
+ try:
+ return_value: PromptType = self.response_type(value)
+ except ValueError:
+ raise InvalidResponse(self.validate_error_message)
+
+ if self.choices is not None and not self.check_choice(value):
+ raise InvalidResponse(self.illegal_choice_message)
+
+ return return_value
+
+ def on_validate_error(self, value: str, error: InvalidResponse) -> None:
+ """Called to handle validation error.
+
+ Args:
+ value (str): String entered by user.
+ error (InvalidResponse): Exception instance the initiated the error.
+ """
+ self.console.print(error)
+
+ def pre_prompt(self) -> None:
+ """Hook to display something before the prompt."""
+
+ @overload
+ def __call__(self, *, stream: Optional[TextIO] = None) -> PromptType:
+ ...
+
+ @overload
+ def __call__(
+ self, *, default: DefaultType, stream: Optional[TextIO] = None
+ ) -> Union[PromptType, DefaultType]:
+ ...
+
+ def __call__(self, *, default: Any = ..., stream: Optional[TextIO] = None) -> Any:
+ """Run the prompt loop.
+
+ Args:
+ default (Any, optional): Optional default value.
+
+ Returns:
+ PromptType: Processed value.
+ """
+ while True:
+ self.pre_prompt()
+ prompt = self.make_prompt(default)
+ value = self.get_input(self.console, prompt, self.password, stream=stream)
+ if value == "" and default != ...:
+ return default
+ try:
+ return_value = self.process_response(value)
+ except InvalidResponse as error:
+ self.on_validate_error(value, error)
+ continue
+ else:
+ return return_value
+
+
+class Prompt(PromptBase[str]):
+ """A prompt that returns a str.
+
+ Example:
+ >>> name = Prompt.ask("Enter your name")
+
+
+ """
+
+ response_type = str
+
+
+class IntPrompt(PromptBase[int]):
+ """A prompt that returns an integer.
+
+ Example:
+ >>> burrito_count = IntPrompt.ask("How many burritos do you want to order")
+
+ """
+
+ response_type = int
+ validate_error_message = "[prompt.invalid]Please enter a valid integer number"
+
+
+class FloatPrompt(PromptBase[int]):
+ """A prompt that returns a float.
+
+ Example:
+ >>> temperature = FloatPrompt.ask("Enter desired temperature")
+
+ """
+
+ response_type = float
+ validate_error_message = "[prompt.invalid]Please enter a number"
+
+
+class Confirm(PromptBase[bool]):
+ """A yes / no confirmation prompt.
+
+ Example:
+ >>> if Confirm.ask("Continue"):
+ run_job()
+
+ """
+
+ response_type = bool
+ validate_error_message = "[prompt.invalid]Please enter Y or N"
+ choices: List[str] = ["y", "n"]
+
+ def render_default(self, default: DefaultType) -> Text:
+ """Render the default as (y) or (n) rather than True/False."""
+ yes, no = self.choices
+ return Text(f"({yes})" if default else f"({no})", style="prompt.default")
+
+ def process_response(self, value: str) -> bool:
+ """Convert choices to a bool."""
+ value = value.strip().lower()
+ if value not in self.choices:
+ raise InvalidResponse(self.validate_error_message)
+ return value == self.choices[0]
+
+
+if __name__ == "__main__": # pragma: no cover
+
+ from pip._vendor.rich import print
+
+ if Confirm.ask("Run [i]prompt[/i] tests?", default=True):
+ while True:
+ result = IntPrompt.ask(
+ ":rocket: Enter a number between [b]1[/b] and [b]10[/b]", default=5
+ )
+ if result >= 1 and result <= 10:
+ break
+ print(":pile_of_poo: [prompt.invalid]Number must be between 1 and 10")
+ print(f"number={result}")
+
+ while True:
+ password = Prompt.ask(
+ "Please enter a password [cyan](must be at least 5 characters)",
+ password=True,
+ )
+ if len(password) >= 5:
+ break
+ print("[prompt.invalid]password too short")
+ print(f"password={password!r}")
+
+ fruit = Prompt.ask("Enter a fruit", choices=["apple", "orange", "pear"])
+ print(f"fruit={fruit!r}")
+
+ else:
+ print("[b]OK :loudly_crying_face:")
diff --git a/third_party/python/pip/pip/_vendor/rich/protocol.py b/third_party/python/pip/pip/_vendor/rich/protocol.py
new file mode 100644
index 0000000000..12ab23713a
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/protocol.py
@@ -0,0 +1,42 @@
+from typing import Any, cast, Set, TYPE_CHECKING
+from inspect import isclass
+
+if TYPE_CHECKING:
+ from pip._vendor.rich.console import RenderableType
+
+_GIBBERISH = """aihwerij235234ljsdnp34ksodfipwoe234234jlskjdf"""
+
+
+def is_renderable(check_object: Any) -> bool:
+ """Check if an object may be rendered by Rich."""
+ return (
+ isinstance(check_object, str)
+ or hasattr(check_object, "__rich__")
+ or hasattr(check_object, "__rich_console__")
+ )
+
+
+def rich_cast(renderable: object) -> "RenderableType":
+ """Cast an object to a renderable by calling __rich__ if present.
+
+ Args:
+ renderable (object): A potentially renderable object
+
+ Returns:
+ object: The result of recursively calling __rich__.
+ """
+ from pip._vendor.rich.console import RenderableType
+
+ rich_visited_set: Set[type] = set() # Prevent potential infinite loop
+ while hasattr(renderable, "__rich__") and not isclass(renderable):
+ # Detect object which claim to have all the attributes
+ if hasattr(renderable, _GIBBERISH):
+ return repr(renderable)
+ cast_method = getattr(renderable, "__rich__")
+ renderable = cast_method()
+ renderable_type = type(renderable)
+ if renderable_type in rich_visited_set:
+ break
+ rich_visited_set.add(renderable_type)
+
+ return cast(RenderableType, renderable)
diff --git a/third_party/python/pip/pip/_vendor/rich/region.py b/third_party/python/pip/pip/_vendor/rich/region.py
new file mode 100644
index 0000000000..75b3631c38
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/region.py
@@ -0,0 +1,10 @@
+from typing import NamedTuple
+
+
+class Region(NamedTuple):
+ """Defines a rectangular region of the screen."""
+
+ x: int
+ y: int
+ width: int
+ height: int
diff --git a/third_party/python/pip/pip/_vendor/rich/repr.py b/third_party/python/pip/pip/_vendor/rich/repr.py
new file mode 100644
index 0000000000..72d1a7e30b
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/repr.py
@@ -0,0 +1,149 @@
+import inspect
+from functools import partial
+from typing import (
+ Any,
+ Callable,
+ Iterable,
+ List,
+ Optional,
+ Tuple,
+ Type,
+ TypeVar,
+ Union,
+ overload,
+)
+
+T = TypeVar("T")
+
+
+Result = Iterable[Union[Any, Tuple[Any], Tuple[str, Any], Tuple[str, Any, Any]]]
+RichReprResult = Result
+
+
+class ReprError(Exception):
+ """An error occurred when attempting to build a repr."""
+
+
+@overload
+def auto(cls: Optional[Type[T]]) -> Type[T]:
+ ...
+
+
+@overload
+def auto(*, angular: bool = False) -> Callable[[Type[T]], Type[T]]:
+ ...
+
+
+def auto(
+ cls: Optional[Type[T]] = None, *, angular: Optional[bool] = None
+) -> Union[Type[T], Callable[[Type[T]], Type[T]]]:
+ """Class decorator to create __repr__ from __rich_repr__"""
+
+ def do_replace(cls: Type[T], angular: Optional[bool] = None) -> Type[T]:
+ def auto_repr(self: T) -> str:
+ """Create repr string from __rich_repr__"""
+ repr_str: List[str] = []
+ append = repr_str.append
+
+ angular: bool = getattr(self.__rich_repr__, "angular", False) # type: ignore[attr-defined]
+ for arg in self.__rich_repr__(): # type: ignore[attr-defined]
+ if isinstance(arg, tuple):
+ if len(arg) == 1:
+ append(repr(arg[0]))
+ else:
+ key, value, *default = arg
+ if key is None:
+ append(repr(value))
+ else:
+ if len(default) and default[0] == value:
+ continue
+ append(f"{key}={value!r}")
+ else:
+ append(repr(arg))
+ if angular:
+ return f"<{self.__class__.__name__} {' '.join(repr_str)}>"
+ else:
+ return f"{self.__class__.__name__}({', '.join(repr_str)})"
+
+ def auto_rich_repr(self: Type[T]) -> Result:
+ """Auto generate __rich_rep__ from signature of __init__"""
+ try:
+ signature = inspect.signature(self.__init__)
+ for name, param in signature.parameters.items():
+ if param.kind == param.POSITIONAL_ONLY:
+ yield getattr(self, name)
+ elif param.kind in (
+ param.POSITIONAL_OR_KEYWORD,
+ param.KEYWORD_ONLY,
+ ):
+ if param.default == param.empty:
+ yield getattr(self, param.name)
+ else:
+ yield param.name, getattr(self, param.name), param.default
+ except Exception as error:
+ raise ReprError(
+ f"Failed to auto generate __rich_repr__; {error}"
+ ) from None
+
+ if not hasattr(cls, "__rich_repr__"):
+ auto_rich_repr.__doc__ = "Build a rich repr"
+ cls.__rich_repr__ = auto_rich_repr # type: ignore[attr-defined]
+
+ auto_repr.__doc__ = "Return repr(self)"
+ cls.__repr__ = auto_repr # type: ignore[assignment]
+ if angular is not None:
+ cls.__rich_repr__.angular = angular # type: ignore[attr-defined]
+ return cls
+
+ if cls is None:
+ return partial(do_replace, angular=angular)
+ else:
+ return do_replace(cls, angular=angular)
+
+
+@overload
+def rich_repr(cls: Optional[Type[T]]) -> Type[T]:
+ ...
+
+
+@overload
+def rich_repr(*, angular: bool = False) -> Callable[[Type[T]], Type[T]]:
+ ...
+
+
+def rich_repr(
+ cls: Optional[Type[T]] = None, *, angular: bool = False
+) -> Union[Type[T], Callable[[Type[T]], Type[T]]]:
+ if cls is None:
+ return auto(angular=angular)
+ else:
+ return auto(cls)
+
+
+if __name__ == "__main__":
+
+ @auto
+ class Foo:
+ def __rich_repr__(self) -> Result:
+ yield "foo"
+ yield "bar", {"shopping": ["eggs", "ham", "pineapple"]}
+ yield "buy", "hand sanitizer"
+
+ foo = Foo()
+ from pip._vendor.rich.console import Console
+
+ console = Console()
+
+ console.rule("Standard repr")
+ console.print(foo)
+
+ console.print(foo, width=60)
+ console.print(foo, width=30)
+
+ console.rule("Angular repr")
+ Foo.__rich_repr__.angular = True # type: ignore[attr-defined]
+
+ console.print(foo)
+
+ console.print(foo, width=60)
+ console.print(foo, width=30)
diff --git a/third_party/python/pip/pip/_vendor/rich/rule.py b/third_party/python/pip/pip/_vendor/rich/rule.py
new file mode 100644
index 0000000000..0b78f7a4ec
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/rule.py
@@ -0,0 +1,134 @@
+from typing import Union
+
+from .align import AlignMethod
+from .cells import cell_len, set_cell_size
+from .console import Console, ConsoleOptions, RenderResult
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .style import Style
+from .text import Text
+
+
+class Rule(JupyterMixin):
+ """A console renderable to draw a horizontal rule (line).
+
+ Args:
+ title (Union[str, Text], optional): Text to render in the rule. Defaults to "".
+ characters (str, optional): Character(s) used to draw the line. Defaults to "─".
+ style (StyleType, optional): Style of Rule. Defaults to "rule.line".
+ end (str, optional): Character at end of Rule. defaults to "\\\\n"
+ align (str, optional): How to align the title, one of "left", "center", or "right". Defaults to "center".
+ """
+
+ def __init__(
+ self,
+ title: Union[str, Text] = "",
+ *,
+ characters: str = "─",
+ style: Union[str, Style] = "rule.line",
+ end: str = "\n",
+ align: AlignMethod = "center",
+ ) -> None:
+ if cell_len(characters) < 1:
+ raise ValueError(
+ "'characters' argument must have a cell width of at least 1"
+ )
+ if align not in ("left", "center", "right"):
+ raise ValueError(
+ f'invalid value for align, expected "left", "center", "right" (not {align!r})'
+ )
+ self.title = title
+ self.characters = characters
+ self.style = style
+ self.end = end
+ self.align = align
+
+ def __repr__(self) -> str:
+ return f"Rule({self.title!r}, {self.characters!r})"
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+ width = options.max_width
+
+ # Python3.6 doesn't have an isascii method on str
+ isascii = getattr(str, "isascii", None) or (
+ lambda s: all(ord(c) < 128 for c in s)
+ )
+ characters = (
+ "-"
+ if (options.ascii_only and not isascii(self.characters))
+ else self.characters
+ )
+
+ chars_len = cell_len(characters)
+ if not self.title:
+ yield self._rule_line(chars_len, width)
+ return
+
+ if isinstance(self.title, Text):
+ title_text = self.title
+ else:
+ title_text = console.render_str(self.title, style="rule.text")
+
+ title_text.plain = title_text.plain.replace("\n", " ")
+ title_text.expand_tabs()
+
+ required_space = 4 if self.align == "center" else 2
+ truncate_width = max(0, width - required_space)
+ if not truncate_width:
+ yield self._rule_line(chars_len, width)
+ return
+
+ rule_text = Text(end=self.end)
+ if self.align == "center":
+ title_text.truncate(truncate_width, overflow="ellipsis")
+ side_width = (width - cell_len(title_text.plain)) // 2
+ left = Text(characters * (side_width // chars_len + 1))
+ left.truncate(side_width - 1)
+ right_length = width - cell_len(left.plain) - cell_len(title_text.plain)
+ right = Text(characters * (side_width // chars_len + 1))
+ right.truncate(right_length)
+ rule_text.append(left.plain + " ", self.style)
+ rule_text.append(title_text)
+ rule_text.append(" " + right.plain, self.style)
+ elif self.align == "left":
+ title_text.truncate(truncate_width, overflow="ellipsis")
+ rule_text.append(title_text)
+ rule_text.append(" ")
+ rule_text.append(characters * (width - rule_text.cell_len), self.style)
+ elif self.align == "right":
+ title_text.truncate(truncate_width, overflow="ellipsis")
+ rule_text.append(characters * (width - title_text.cell_len - 1), self.style)
+ rule_text.append(" ")
+ rule_text.append(title_text)
+
+ rule_text.plain = set_cell_size(rule_text.plain, width)
+ yield rule_text
+
+ def _rule_line(self, chars_len: int, width: int) -> Text:
+ rule_text = Text(self.characters * ((width // chars_len) + 1), self.style)
+ rule_text.truncate(width)
+ rule_text.plain = set_cell_size(rule_text.plain, width)
+ return rule_text
+
+ def __rich_measure__(
+ self, console: Console, options: ConsoleOptions
+ ) -> Measurement:
+ return Measurement(1, 1)
+
+
+if __name__ == "__main__": # pragma: no cover
+ import sys
+
+ from pip._vendor.rich.console import Console
+
+ try:
+ text = sys.argv[1]
+ except IndexError:
+ text = "Hello, World"
+ console = Console()
+ console.print(Rule(title=text))
+
+ console = Console()
+ console.print(Rule("foo"), width=4)
diff --git a/third_party/python/pip/pip/_vendor/rich/scope.py b/third_party/python/pip/pip/_vendor/rich/scope.py
new file mode 100644
index 0000000000..c9d134cc3c
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/scope.py
@@ -0,0 +1,86 @@
+from collections.abc import Mapping
+from typing import TYPE_CHECKING, Any, Optional, Tuple
+
+from .highlighter import ReprHighlighter
+from .panel import Panel
+from .pretty import Pretty
+from .table import Table
+from .text import Text, TextType
+
+if TYPE_CHECKING:
+ from .console import ConsoleRenderable
+
+
+def render_scope(
+ scope: "Mapping[str, Any]",
+ *,
+ title: Optional[TextType] = None,
+ sort_keys: bool = True,
+ indent_guides: bool = False,
+ max_length: Optional[int] = None,
+ max_string: Optional[int] = None,
+) -> "ConsoleRenderable":
+ """Render python variables in a given scope.
+
+ Args:
+ scope (Mapping): A mapping containing variable names and values.
+ title (str, optional): Optional title. Defaults to None.
+ sort_keys (bool, optional): Enable sorting of items. Defaults to True.
+ indent_guides (bool, optional): Enable indentation guides. Defaults to False.
+ max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to None.
+ max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to None.
+
+ Returns:
+ ConsoleRenderable: A renderable object.
+ """
+ highlighter = ReprHighlighter()
+ items_table = Table.grid(padding=(0, 1), expand=False)
+ items_table.add_column(justify="right")
+
+ def sort_items(item: Tuple[str, Any]) -> Tuple[bool, str]:
+ """Sort special variables first, then alphabetically."""
+ key, _ = item
+ return (not key.startswith("__"), key.lower())
+
+ items = sorted(scope.items(), key=sort_items) if sort_keys else scope.items()
+ for key, value in items:
+ key_text = Text.assemble(
+ (key, "scope.key.special" if key.startswith("__") else "scope.key"),
+ (" =", "scope.equals"),
+ )
+ items_table.add_row(
+ key_text,
+ Pretty(
+ value,
+ highlighter=highlighter,
+ indent_guides=indent_guides,
+ max_length=max_length,
+ max_string=max_string,
+ ),
+ )
+ return Panel.fit(
+ items_table,
+ title=title,
+ border_style="scope.border",
+ padding=(0, 1),
+ )
+
+
+if __name__ == "__main__": # pragma: no cover
+ from pip._vendor.rich import print
+
+ print()
+
+ def test(foo: float, bar: float) -> None:
+ list_of_things = [1, 2, 3, None, 4, True, False, "Hello World"]
+ dict_of_things = {
+ "version": "1.1",
+ "method": "confirmFruitPurchase",
+ "params": [["apple", "orange", "mangoes", "pomelo"], 1.123],
+ "id": "194521489",
+ }
+ print(render_scope(locals(), title="[i]locals", sort_keys=False))
+
+ test(20.3423, 3.1427)
+ print()
diff --git a/third_party/python/pip/pip/_vendor/rich/screen.py b/third_party/python/pip/pip/_vendor/rich/screen.py
new file mode 100644
index 0000000000..7f416e1e79
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/screen.py
@@ -0,0 +1,54 @@
+from typing import Optional, TYPE_CHECKING
+
+from .segment import Segment
+from .style import StyleType
+from ._loop import loop_last
+
+
+if TYPE_CHECKING:
+ from .console import (
+ Console,
+ ConsoleOptions,
+ RenderResult,
+ RenderableType,
+ Group,
+ )
+
+
+class Screen:
+ """A renderable that fills the terminal screen and crops excess.
+
+ Args:
+ renderable (RenderableType): Child renderable.
+ style (StyleType, optional): Optional background style. Defaults to None.
+ """
+
+ renderable: "RenderableType"
+
+ def __init__(
+ self,
+ *renderables: "RenderableType",
+ style: Optional[StyleType] = None,
+ application_mode: bool = False,
+ ) -> None:
+ from pip._vendor.rich.console import Group
+
+ self.renderable = Group(*renderables)
+ self.style = style
+ self.application_mode = application_mode
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ width, height = options.size
+ style = console.get_style(self.style) if self.style else None
+ render_options = options.update(width=width, height=height)
+ lines = console.render_lines(
+ self.renderable or "", render_options, style=style, pad=True
+ )
+ lines = Segment.set_shape(lines, width, height, style=style)
+ new_line = Segment("\n\r") if self.application_mode else Segment.line()
+ for last, line in loop_last(lines):
+ yield from line
+ if not last:
+ yield new_line
diff --git a/third_party/python/pip/pip/_vendor/rich/segment.py b/third_party/python/pip/pip/_vendor/rich/segment.py
new file mode 100644
index 0000000000..1ea5435adc
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/segment.py
@@ -0,0 +1,739 @@
+from enum import IntEnum
+from functools import lru_cache
+from itertools import filterfalse
+from logging import getLogger
+from operator import attrgetter
+from typing import (
+ TYPE_CHECKING,
+ Dict,
+ Iterable,
+ List,
+ NamedTuple,
+ Optional,
+ Sequence,
+ Tuple,
+ Type,
+ Union,
+)
+
+from .cells import (
+ _is_single_cell_widths,
+ cached_cell_len,
+ cell_len,
+ get_character_cell_size,
+ set_cell_size,
+)
+from .repr import Result, rich_repr
+from .style import Style
+
+if TYPE_CHECKING:
+ from .console import Console, ConsoleOptions, RenderResult
+
+log = getLogger("rich")
+
+
+class ControlType(IntEnum):
+ """Non-printable control codes which typically translate to ANSI codes."""
+
+ BELL = 1
+ CARRIAGE_RETURN = 2
+ HOME = 3
+ CLEAR = 4
+ SHOW_CURSOR = 5
+ HIDE_CURSOR = 6
+ ENABLE_ALT_SCREEN = 7
+ DISABLE_ALT_SCREEN = 8
+ CURSOR_UP = 9
+ CURSOR_DOWN = 10
+ CURSOR_FORWARD = 11
+ CURSOR_BACKWARD = 12
+ CURSOR_MOVE_TO_COLUMN = 13
+ CURSOR_MOVE_TO = 14
+ ERASE_IN_LINE = 15
+ SET_WINDOW_TITLE = 16
+
+
+ControlCode = Union[
+ Tuple[ControlType],
+ Tuple[ControlType, Union[int, str]],
+ Tuple[ControlType, int, int],
+]
+
+
+@rich_repr()
+class Segment(NamedTuple):
+ """A piece of text with associated style. Segments are produced by the Console render process and
+ are ultimately converted in to strings to be written to the terminal.
+
+ Args:
+ text (str): A piece of text.
+ style (:class:`~rich.style.Style`, optional): An optional style to apply to the text.
+ control (Tuple[ControlCode], optional): Optional sequence of control codes.
+
+ Attributes:
+ cell_length (int): The cell length of this Segment.
+ """
+
+ text: str
+ style: Optional[Style] = None
+ control: Optional[Sequence[ControlCode]] = None
+
+ @property
+ def cell_length(self) -> int:
+ """The number of terminal cells required to display self.text.
+
+ Returns:
+ int: A number of cells.
+ """
+ text, _style, control = self
+ return 0 if control else cell_len(text)
+
+ def __rich_repr__(self) -> Result:
+ yield self.text
+ if self.control is None:
+ if self.style is not None:
+ yield self.style
+ else:
+ yield self.style
+ yield self.control
+
+ def __bool__(self) -> bool:
+ """Check if the segment contains text."""
+ return bool(self.text)
+
+ @property
+ def is_control(self) -> bool:
+ """Check if the segment contains control codes."""
+ return self.control is not None
+
+ @classmethod
+ @lru_cache(1024 * 16)
+ def _split_cells(cls, segment: "Segment", cut: int) -> Tuple["Segment", "Segment"]:
+
+ text, style, control = segment
+ _Segment = Segment
+
+ cell_length = segment.cell_length
+ if cut >= cell_length:
+ return segment, _Segment("", style, control)
+
+ cell_size = get_character_cell_size
+
+ pos = int((cut / cell_length) * len(text))
+
+ before = text[:pos]
+ cell_pos = cell_len(before)
+ if cell_pos == cut:
+ return (
+ _Segment(before, style, control),
+ _Segment(text[pos:], style, control),
+ )
+ while pos < len(text):
+ char = text[pos]
+ pos += 1
+ cell_pos += cell_size(char)
+ before = text[:pos]
+ if cell_pos == cut:
+ return (
+ _Segment(before, style, control),
+ _Segment(text[pos:], style, control),
+ )
+ if cell_pos > cut:
+ return (
+ _Segment(before[: pos - 1] + " ", style, control),
+ _Segment(" " + text[pos:], style, control),
+ )
+
+ raise AssertionError("Will never reach here")
+
+ def split_cells(self, cut: int) -> Tuple["Segment", "Segment"]:
+ """Split segment in to two segments at the specified column.
+
+ If the cut point falls in the middle of a 2-cell wide character then it is replaced
+ by two spaces, to preserve the display width of the parent segment.
+
+ Returns:
+ Tuple[Segment, Segment]: Two segments.
+ """
+ text, style, control = self
+
+ if _is_single_cell_widths(text):
+ # Fast path with all 1 cell characters
+ if cut >= len(text):
+ return self, Segment("", style, control)
+ return (
+ Segment(text[:cut], style, control),
+ Segment(text[cut:], style, control),
+ )
+
+ return self._split_cells(self, cut)
+
+ @classmethod
+ def line(cls) -> "Segment":
+ """Make a new line segment."""
+ return cls("\n")
+
+ @classmethod
+ def apply_style(
+ cls,
+ segments: Iterable["Segment"],
+ style: Optional[Style] = None,
+ post_style: Optional[Style] = None,
+ ) -> Iterable["Segment"]:
+ """Apply style(s) to an iterable of segments.
+
+ Returns an iterable of segments where the style is replaced by ``style + segment.style + post_style``.
+
+ Args:
+ segments (Iterable[Segment]): Segments to process.
+ style (Style, optional): Base style. Defaults to None.
+ post_style (Style, optional): Style to apply on top of segment style. Defaults to None.
+
+ Returns:
+ Iterable[Segments]: A new iterable of segments (possibly the same iterable).
+ """
+ result_segments = segments
+ if style:
+ apply = style.__add__
+ result_segments = (
+ cls(text, None if control else apply(_style), control)
+ for text, _style, control in result_segments
+ )
+ if post_style:
+ result_segments = (
+ cls(
+ text,
+ (
+ None
+ if control
+ else (_style + post_style if _style else post_style)
+ ),
+ control,
+ )
+ for text, _style, control in result_segments
+ )
+ return result_segments
+
+ @classmethod
+ def filter_control(
+ cls, segments: Iterable["Segment"], is_control: bool = False
+ ) -> Iterable["Segment"]:
+ """Filter segments by ``is_control`` attribute.
+
+ Args:
+ segments (Iterable[Segment]): An iterable of Segment instances.
+ is_control (bool, optional): is_control flag to match in search.
+
+ Returns:
+ Iterable[Segment]: And iterable of Segment instances.
+
+ """
+ if is_control:
+ return filter(attrgetter("control"), segments)
+ else:
+ return filterfalse(attrgetter("control"), segments)
+
+ @classmethod
+ def split_lines(cls, segments: Iterable["Segment"]) -> Iterable[List["Segment"]]:
+ """Split a sequence of segments in to a list of lines.
+
+ Args:
+ segments (Iterable[Segment]): Segments potentially containing line feeds.
+
+ Yields:
+ Iterable[List[Segment]]: Iterable of segment lists, one per line.
+ """
+ line: List[Segment] = []
+ append = line.append
+
+ for segment in segments:
+ if "\n" in segment.text and not segment.control:
+ text, style, _ = segment
+ while text:
+ _text, new_line, text = text.partition("\n")
+ if _text:
+ append(cls(_text, style))
+ if new_line:
+ yield line
+ line = []
+ append = line.append
+ else:
+ append(segment)
+ if line:
+ yield line
+
+ @classmethod
+ def split_and_crop_lines(
+ cls,
+ segments: Iterable["Segment"],
+ length: int,
+ style: Optional[Style] = None,
+ pad: bool = True,
+ include_new_lines: bool = True,
+ ) -> Iterable[List["Segment"]]:
+ """Split segments in to lines, and crop lines greater than a given length.
+
+ Args:
+ segments (Iterable[Segment]): An iterable of segments, probably
+ generated from console.render.
+ length (int): Desired line length.
+ style (Style, optional): Style to use for any padding.
+ pad (bool): Enable padding of lines that are less than `length`.
+
+ Returns:
+ Iterable[List[Segment]]: An iterable of lines of segments.
+ """
+ line: List[Segment] = []
+ append = line.append
+
+ adjust_line_length = cls.adjust_line_length
+ new_line_segment = cls("\n")
+
+ for segment in segments:
+ if "\n" in segment.text and not segment.control:
+ text, segment_style, _ = segment
+ while text:
+ _text, new_line, text = text.partition("\n")
+ if _text:
+ append(cls(_text, segment_style))
+ if new_line:
+ cropped_line = adjust_line_length(
+ line, length, style=style, pad=pad
+ )
+ if include_new_lines:
+ cropped_line.append(new_line_segment)
+ yield cropped_line
+ del line[:]
+ else:
+ append(segment)
+ if line:
+ yield adjust_line_length(line, length, style=style, pad=pad)
+
+ @classmethod
+ def adjust_line_length(
+ cls,
+ line: List["Segment"],
+ length: int,
+ style: Optional[Style] = None,
+ pad: bool = True,
+ ) -> List["Segment"]:
+ """Adjust a line to a given width (cropping or padding as required).
+
+ Args:
+ segments (Iterable[Segment]): A list of segments in a single line.
+ length (int): The desired width of the line.
+ style (Style, optional): The style of padding if used (space on the end). Defaults to None.
+ pad (bool, optional): Pad lines with spaces if they are shorter than `length`. Defaults to True.
+
+ Returns:
+ List[Segment]: A line of segments with the desired length.
+ """
+ line_length = sum(segment.cell_length for segment in line)
+ new_line: List[Segment]
+
+ if line_length < length:
+ if pad:
+ new_line = line + [cls(" " * (length - line_length), style)]
+ else:
+ new_line = line[:]
+ elif line_length > length:
+ new_line = []
+ append = new_line.append
+ line_length = 0
+ for segment in line:
+ segment_length = segment.cell_length
+ if line_length + segment_length < length or segment.control:
+ append(segment)
+ line_length += segment_length
+ else:
+ text, segment_style, _ = segment
+ text = set_cell_size(text, length - line_length)
+ append(cls(text, segment_style))
+ break
+ else:
+ new_line = line[:]
+ return new_line
+
+ @classmethod
+ def get_line_length(cls, line: List["Segment"]) -> int:
+ """Get the length of list of segments.
+
+ Args:
+ line (List[Segment]): A line encoded as a list of Segments (assumes no '\\\\n' characters),
+
+ Returns:
+ int: The length of the line.
+ """
+ _cell_len = cell_len
+ return sum(_cell_len(segment.text) for segment in line)
+
+ @classmethod
+ def get_shape(cls, lines: List[List["Segment"]]) -> Tuple[int, int]:
+ """Get the shape (enclosing rectangle) of a list of lines.
+
+ Args:
+ lines (List[List[Segment]]): A list of lines (no '\\\\n' characters).
+
+ Returns:
+ Tuple[int, int]: Width and height in characters.
+ """
+ get_line_length = cls.get_line_length
+ max_width = max(get_line_length(line) for line in lines) if lines else 0
+ return (max_width, len(lines))
+
+ @classmethod
+ def set_shape(
+ cls,
+ lines: List[List["Segment"]],
+ width: int,
+ height: Optional[int] = None,
+ style: Optional[Style] = None,
+ new_lines: bool = False,
+ ) -> List[List["Segment"]]:
+ """Set the shape of a list of lines (enclosing rectangle).
+
+ Args:
+ lines (List[List[Segment]]): A list of lines.
+ width (int): Desired width.
+ height (int, optional): Desired height or None for no change.
+ style (Style, optional): Style of any padding added.
+ new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
+
+ Returns:
+ List[List[Segment]]: New list of lines.
+ """
+ _height = height or len(lines)
+
+ blank = (
+ [cls(" " * width + "\n", style)] if new_lines else [cls(" " * width, style)]
+ )
+
+ adjust_line_length = cls.adjust_line_length
+ shaped_lines = lines[:_height]
+ shaped_lines[:] = [
+ adjust_line_length(line, width, style=style) for line in lines
+ ]
+ if len(shaped_lines) < _height:
+ shaped_lines.extend([blank] * (_height - len(shaped_lines)))
+ return shaped_lines
+
+ @classmethod
+ def align_top(
+ cls: Type["Segment"],
+ lines: List[List["Segment"]],
+ width: int,
+ height: int,
+ style: Style,
+ new_lines: bool = False,
+ ) -> List[List["Segment"]]:
+ """Aligns lines to top (adds extra lines to bottom as required).
+
+ Args:
+ lines (List[List[Segment]]): A list of lines.
+ width (int): Desired width.
+ height (int, optional): Desired height or None for no change.
+ style (Style): Style of any padding added.
+ new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
+
+ Returns:
+ List[List[Segment]]: New list of lines.
+ """
+ extra_lines = height - len(lines)
+ if not extra_lines:
+ return lines[:]
+ lines = lines[:height]
+ blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
+ lines = lines + [[blank]] * extra_lines
+ return lines
+
+ @classmethod
+ def align_bottom(
+ cls: Type["Segment"],
+ lines: List[List["Segment"]],
+ width: int,
+ height: int,
+ style: Style,
+ new_lines: bool = False,
+ ) -> List[List["Segment"]]:
+ """Aligns render to bottom (adds extra lines above as required).
+
+ Args:
+ lines (List[List[Segment]]): A list of lines.
+ width (int): Desired width.
+ height (int, optional): Desired height or None for no change.
+ style (Style): Style of any padding added. Defaults to None.
+ new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
+
+ Returns:
+ List[List[Segment]]: New list of lines.
+ """
+ extra_lines = height - len(lines)
+ if not extra_lines:
+ return lines[:]
+ lines = lines[:height]
+ blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
+ lines = [[blank]] * extra_lines + lines
+ return lines
+
+ @classmethod
+ def align_middle(
+ cls: Type["Segment"],
+ lines: List[List["Segment"]],
+ width: int,
+ height: int,
+ style: Style,
+ new_lines: bool = False,
+ ) -> List[List["Segment"]]:
+ """Aligns lines to middle (adds extra lines to above and below as required).
+
+ Args:
+ lines (List[List[Segment]]): A list of lines.
+ width (int): Desired width.
+ height (int, optional): Desired height or None for no change.
+ style (Style): Style of any padding added.
+ new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
+
+ Returns:
+ List[List[Segment]]: New list of lines.
+ """
+ extra_lines = height - len(lines)
+ if not extra_lines:
+ return lines[:]
+ lines = lines[:height]
+ blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
+ top_lines = extra_lines // 2
+ bottom_lines = extra_lines - top_lines
+ lines = [[blank]] * top_lines + lines + [[blank]] * bottom_lines
+ return lines
+
+ @classmethod
+ def simplify(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
+ """Simplify an iterable of segments by combining contiguous segments with the same style.
+
+ Args:
+ segments (Iterable[Segment]): An iterable of segments.
+
+ Returns:
+ Iterable[Segment]: A possibly smaller iterable of segments that will render the same way.
+ """
+ iter_segments = iter(segments)
+ try:
+ last_segment = next(iter_segments)
+ except StopIteration:
+ return
+
+ _Segment = Segment
+ for segment in iter_segments:
+ if last_segment.style == segment.style and not segment.control:
+ last_segment = _Segment(
+ last_segment.text + segment.text, last_segment.style
+ )
+ else:
+ yield last_segment
+ last_segment = segment
+ yield last_segment
+
+ @classmethod
+ def strip_links(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
+ """Remove all links from an iterable of styles.
+
+ Args:
+ segments (Iterable[Segment]): An iterable segments.
+
+ Yields:
+ Segment: Segments with link removed.
+ """
+ for segment in segments:
+ if segment.control or segment.style is None:
+ yield segment
+ else:
+ text, style, _control = segment
+ yield cls(text, style.update_link(None) if style else None)
+
+ @classmethod
+ def strip_styles(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
+ """Remove all styles from an iterable of segments.
+
+ Args:
+ segments (Iterable[Segment]): An iterable segments.
+
+ Yields:
+ Segment: Segments with styles replace with None
+ """
+ for text, _style, control in segments:
+ yield cls(text, None, control)
+
+ @classmethod
+ def remove_color(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
+ """Remove all color from an iterable of segments.
+
+ Args:
+ segments (Iterable[Segment]): An iterable segments.
+
+ Yields:
+ Segment: Segments with colorless style.
+ """
+
+ cache: Dict[Style, Style] = {}
+ for text, style, control in segments:
+ if style:
+ colorless_style = cache.get(style)
+ if colorless_style is None:
+ colorless_style = style.without_color
+ cache[style] = colorless_style
+ yield cls(text, colorless_style, control)
+ else:
+ yield cls(text, None, control)
+
+ @classmethod
+ def divide(
+ cls, segments: Iterable["Segment"], cuts: Iterable[int]
+ ) -> Iterable[List["Segment"]]:
+ """Divides an iterable of segments in to portions.
+
+ Args:
+ cuts (Iterable[int]): Cell positions where to divide.
+
+ Yields:
+ [Iterable[List[Segment]]]: An iterable of Segments in List.
+ """
+ split_segments: List["Segment"] = []
+ add_segment = split_segments.append
+
+ iter_cuts = iter(cuts)
+
+ while True:
+ cut = next(iter_cuts, -1)
+ if cut == -1:
+ return []
+ if cut != 0:
+ break
+ yield []
+ pos = 0
+
+ segments_clear = split_segments.clear
+ segments_copy = split_segments.copy
+
+ _cell_len = cached_cell_len
+ for segment in segments:
+ text, _style, control = segment
+ while text:
+ end_pos = pos if control else pos + _cell_len(text)
+ if end_pos < cut:
+ add_segment(segment)
+ pos = end_pos
+ break
+
+ if end_pos == cut:
+ add_segment(segment)
+ yield segments_copy()
+ segments_clear()
+ pos = end_pos
+
+ cut = next(iter_cuts, -1)
+ if cut == -1:
+ if split_segments:
+ yield segments_copy()
+ return
+
+ break
+
+ else:
+ before, segment = segment.split_cells(cut - pos)
+ text, _style, control = segment
+ add_segment(before)
+ yield segments_copy()
+ segments_clear()
+ pos = cut
+
+ cut = next(iter_cuts, -1)
+ if cut == -1:
+ if split_segments:
+ yield segments_copy()
+ return
+
+ yield segments_copy()
+
+
+class Segments:
+ """A simple renderable to render an iterable of segments. This class may be useful if
+ you want to print segments outside of a __rich_console__ method.
+
+ Args:
+ segments (Iterable[Segment]): An iterable of segments.
+ new_lines (bool, optional): Add new lines between segments. Defaults to False.
+ """
+
+ def __init__(self, segments: Iterable[Segment], new_lines: bool = False) -> None:
+ self.segments = list(segments)
+ self.new_lines = new_lines
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ if self.new_lines:
+ line = Segment.line()
+ for segment in self.segments:
+ yield segment
+ yield line
+ else:
+ yield from self.segments
+
+
+class SegmentLines:
+ def __init__(self, lines: Iterable[List[Segment]], new_lines: bool = False) -> None:
+ """A simple renderable containing a number of lines of segments. May be used as an intermediate
+ in rendering process.
+
+ Args:
+ lines (Iterable[List[Segment]]): Lists of segments forming lines.
+ new_lines (bool, optional): Insert new lines after each line. Defaults to False.
+ """
+ self.lines = list(lines)
+ self.new_lines = new_lines
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ if self.new_lines:
+ new_line = Segment.line()
+ for line in self.lines:
+ yield from line
+ yield new_line
+ else:
+ for line in self.lines:
+ yield from line
+
+
+if __name__ == "__main__": # pragma: no cover
+ from pip._vendor.rich.console import Console
+ from pip._vendor.rich.syntax import Syntax
+ from pip._vendor.rich.text import Text
+
+ code = """from rich.console import Console
+console = Console()
+text = Text.from_markup("Hello, [bold magenta]World[/]!")
+console.print(text)"""
+
+ text = Text.from_markup("Hello, [bold magenta]World[/]!")
+
+ console = Console()
+
+ console.rule("rich.Segment")
+ console.print(
+ "A Segment is the last step in the Rich render process before generating text with ANSI codes."
+ )
+ console.print("\nConsider the following code:\n")
+ console.print(Syntax(code, "python", line_numbers=True))
+ console.print()
+ console.print(
+ "When you call [b]print()[/b], Rich [i]renders[/i] the object in to the the following:\n"
+ )
+ fragments = list(console.render(text))
+ console.print(fragments)
+ console.print()
+ console.print("The Segments are then processed to produce the following output:\n")
+ console.print(text)
+ console.print(
+ "\nYou will only need to know this if you are implementing your own Rich renderables."
+ )
diff --git a/third_party/python/pip/pip/_vendor/rich/spinner.py b/third_party/python/pip/pip/_vendor/rich/spinner.py
new file mode 100644
index 0000000000..0879088e14
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/spinner.py
@@ -0,0 +1,136 @@
+from typing import cast, List, Optional, TYPE_CHECKING, Union
+
+from ._spinners import SPINNERS
+from .measure import Measurement
+from .table import Table
+from .text import Text
+
+if TYPE_CHECKING:
+ from .console import Console, ConsoleOptions, RenderResult, RenderableType
+ from .style import StyleType
+
+
+class Spinner:
+ def __init__(
+ self,
+ name: str,
+ text: "RenderableType" = "",
+ *,
+ style: Optional["StyleType"] = None,
+ speed: float = 1.0,
+ ) -> None:
+ """A spinner animation.
+
+ Args:
+ name (str): Name of spinner (run python -m rich.spinner).
+ text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "".
+ style (StyleType, optional): Style for spinner animation. Defaults to None.
+ speed (float, optional): Speed factor for animation. Defaults to 1.0.
+
+ Raises:
+ KeyError: If name isn't one of the supported spinner animations.
+ """
+ try:
+ spinner = SPINNERS[name]
+ except KeyError:
+ raise KeyError(f"no spinner called {name!r}")
+ self.text: "Union[RenderableType, Text]" = (
+ Text.from_markup(text) if isinstance(text, str) else text
+ )
+ self.frames = cast(List[str], spinner["frames"])[:]
+ self.interval = cast(float, spinner["interval"])
+ self.start_time: Optional[float] = None
+ self.style = style
+ self.speed = speed
+ self.frame_no_offset: float = 0.0
+ self._update_speed = 0.0
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ yield self.render(console.get_time())
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> Measurement:
+ text = self.render(0)
+ return Measurement.get(console, options, text)
+
+ def render(self, time: float) -> "RenderableType":
+ """Render the spinner for a given time.
+
+ Args:
+ time (float): Time in seconds.
+
+ Returns:
+ RenderableType: A renderable containing animation frame.
+ """
+ if self.start_time is None:
+ self.start_time = time
+
+ frame_no = ((time - self.start_time) * self.speed) / (
+ self.interval / 1000.0
+ ) + self.frame_no_offset
+ frame = Text(
+ self.frames[int(frame_no) % len(self.frames)], style=self.style or ""
+ )
+
+ if self._update_speed:
+ self.frame_no_offset = frame_no
+ self.start_time = time
+ self.speed = self._update_speed
+ self._update_speed = 0.0
+
+ if not self.text:
+ return frame
+ elif isinstance(self.text, (str, Text)):
+ return Text.assemble(frame, " ", self.text)
+ else:
+ table = Table.grid(padding=1)
+ table.add_row(frame, self.text)
+ return table
+
+ def update(
+ self,
+ *,
+ text: "RenderableType" = "",
+ style: Optional["StyleType"] = None,
+ speed: Optional[float] = None,
+ ) -> None:
+ """Updates attributes of a spinner after it has been started.
+
+ Args:
+ text (RenderableType, optional): A renderable to display at the right of the spinner (str or Text typically). Defaults to "".
+ style (StyleType, optional): Style for spinner animation. Defaults to None.
+ speed (float, optional): Speed factor for animation. Defaults to None.
+ """
+ if text:
+ self.text = Text.from_markup(text) if isinstance(text, str) else text
+ if style:
+ self.style = style
+ if speed:
+ self._update_speed = speed
+
+
+if __name__ == "__main__": # pragma: no cover
+ from time import sleep
+
+ from .columns import Columns
+ from .panel import Panel
+ from .live import Live
+
+ all_spinners = Columns(
+ [
+ Spinner(spinner_name, text=Text(repr(spinner_name), style="green"))
+ for spinner_name in sorted(SPINNERS.keys())
+ ],
+ column_first=True,
+ expand=True,
+ )
+
+ with Live(
+ Panel(all_spinners, title="Spinners", border_style="blue"),
+ refresh_per_second=20,
+ ) as live:
+ while True:
+ sleep(0.1)
diff --git a/third_party/python/pip/pip/_vendor/rich/status.py b/third_party/python/pip/pip/_vendor/rich/status.py
new file mode 100644
index 0000000000..09eff405ec
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/status.py
@@ -0,0 +1,132 @@
+from types import TracebackType
+from typing import Optional, Type
+
+from .console import Console, RenderableType
+from .jupyter import JupyterMixin
+from .live import Live
+from .spinner import Spinner
+from .style import StyleType
+
+
+class Status(JupyterMixin):
+ """Displays a status indicator with a 'spinner' animation.
+
+ Args:
+ status (RenderableType): A status renderable (str or Text typically).
+ console (Console, optional): Console instance to use, or None for global console. Defaults to None.
+ spinner (str, optional): Name of spinner animation (see python -m rich.spinner). Defaults to "dots".
+ spinner_style (StyleType, optional): Style of spinner. Defaults to "status.spinner".
+ speed (float, optional): Speed factor for spinner animation. Defaults to 1.0.
+ refresh_per_second (float, optional): Number of refreshes per second. Defaults to 12.5.
+ """
+
+ def __init__(
+ self,
+ status: RenderableType,
+ *,
+ console: Optional[Console] = None,
+ spinner: str = "dots",
+ spinner_style: StyleType = "status.spinner",
+ speed: float = 1.0,
+ refresh_per_second: float = 12.5,
+ ):
+ self.status = status
+ self.spinner_style = spinner_style
+ self.speed = speed
+ self._spinner = Spinner(spinner, text=status, style=spinner_style, speed=speed)
+ self._live = Live(
+ self.renderable,
+ console=console,
+ refresh_per_second=refresh_per_second,
+ transient=True,
+ )
+
+ @property
+ def renderable(self) -> Spinner:
+ return self._spinner
+
+ @property
+ def console(self) -> "Console":
+ """Get the Console used by the Status objects."""
+ return self._live.console
+
+ def update(
+ self,
+ status: Optional[RenderableType] = None,
+ *,
+ spinner: Optional[str] = None,
+ spinner_style: Optional[StyleType] = None,
+ speed: Optional[float] = None,
+ ) -> None:
+ """Update status.
+
+ Args:
+ status (Optional[RenderableType], optional): New status renderable or None for no change. Defaults to None.
+ spinner (Optional[str], optional): New spinner or None for no change. Defaults to None.
+ spinner_style (Optional[StyleType], optional): New spinner style or None for no change. Defaults to None.
+ speed (Optional[float], optional): Speed factor for spinner animation or None for no change. Defaults to None.
+ """
+ if status is not None:
+ self.status = status
+ if spinner_style is not None:
+ self.spinner_style = spinner_style
+ if speed is not None:
+ self.speed = speed
+ if spinner is not None:
+ self._spinner = Spinner(
+ spinner, text=self.status, style=self.spinner_style, speed=self.speed
+ )
+ self._live.update(self.renderable, refresh=True)
+ else:
+ self._spinner.update(
+ text=self.status, style=self.spinner_style, speed=self.speed
+ )
+
+ def start(self) -> None:
+ """Start the status animation."""
+ self._live.start()
+
+ def stop(self) -> None:
+ """Stop the spinner animation."""
+ self._live.stop()
+
+ def __rich__(self) -> RenderableType:
+ return self.renderable
+
+ def __enter__(self) -> "Status":
+ self.start()
+ return self
+
+ def __exit__(
+ self,
+ exc_type: Optional[Type[BaseException]],
+ exc_val: Optional[BaseException],
+ exc_tb: Optional[TracebackType],
+ ) -> None:
+ self.stop()
+
+
+if __name__ == "__main__": # pragma: no cover
+
+ from time import sleep
+
+ from .console import Console
+
+ console = Console()
+ with console.status("[magenta]Covid detector booting up") as status:
+ sleep(3)
+ console.log("Importing advanced AI")
+ sleep(3)
+ console.log("Advanced Covid AI Ready")
+ sleep(3)
+ status.update(status="[bold blue] Scanning for Covid", spinner="earth")
+ sleep(3)
+ console.log("Found 10,000,000,000 copies of Covid32.exe")
+ sleep(3)
+ status.update(
+ status="[bold red]Moving Covid32.exe to Trash",
+ spinner="bouncingBall",
+ spinner_style="yellow",
+ )
+ sleep(5)
+ console.print("[bold green]Covid deleted successfully")
diff --git a/third_party/python/pip/pip/_vendor/rich/style.py b/third_party/python/pip/pip/_vendor/rich/style.py
new file mode 100644
index 0000000000..ad388aadb0
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/style.py
@@ -0,0 +1,773 @@
+import sys
+from functools import lru_cache
+from marshal import dumps, loads
+from random import randint
+from typing import Any, Dict, Iterable, List, Optional, Type, Union, cast
+
+from . import errors
+from .color import Color, ColorParseError, ColorSystem, blend_rgb
+from .repr import Result, rich_repr
+from .terminal_theme import DEFAULT_TERMINAL_THEME, TerminalTheme
+
+# Style instances and style definitions are often interchangeable
+StyleType = Union[str, "Style"]
+
+
+class _Bit:
+ """A descriptor to get/set a style attribute bit."""
+
+ __slots__ = ["bit"]
+
+ def __init__(self, bit_no: int) -> None:
+ self.bit = 1 << bit_no
+
+ def __get__(self, obj: "Style", objtype: Type["Style"]) -> Optional[bool]:
+ if obj._set_attributes & self.bit:
+ return obj._attributes & self.bit != 0
+ return None
+
+
+@rich_repr
+class Style:
+ """A terminal style.
+
+ A terminal style consists of a color (`color`), a background color (`bgcolor`), and a number of attributes, such
+ as bold, italic etc. The attributes have 3 states: they can either be on
+ (``True``), off (``False``), or not set (``None``).
+
+ Args:
+ color (Union[Color, str], optional): Color of terminal text. Defaults to None.
+ bgcolor (Union[Color, str], optional): Color of terminal background. Defaults to None.
+ bold (bool, optional): Enable bold text. Defaults to None.
+ dim (bool, optional): Enable dim text. Defaults to None.
+ italic (bool, optional): Enable italic text. Defaults to None.
+ underline (bool, optional): Enable underlined text. Defaults to None.
+ blink (bool, optional): Enabled blinking text. Defaults to None.
+ blink2 (bool, optional): Enable fast blinking text. Defaults to None.
+ reverse (bool, optional): Enabled reverse text. Defaults to None.
+ conceal (bool, optional): Enable concealed text. Defaults to None.
+ strike (bool, optional): Enable strikethrough text. Defaults to None.
+ underline2 (bool, optional): Enable doubly underlined text. Defaults to None.
+ frame (bool, optional): Enable framed text. Defaults to None.
+ encircle (bool, optional): Enable encircled text. Defaults to None.
+ overline (bool, optional): Enable overlined text. Defaults to None.
+ link (str, link): Link URL. Defaults to None.
+
+ """
+
+ _color: Optional[Color]
+ _bgcolor: Optional[Color]
+ _attributes: int
+ _set_attributes: int
+ _hash: Optional[int]
+ _null: bool
+ _meta: Optional[bytes]
+
+ __slots__ = [
+ "_color",
+ "_bgcolor",
+ "_attributes",
+ "_set_attributes",
+ "_link",
+ "_link_id",
+ "_ansi",
+ "_style_definition",
+ "_hash",
+ "_null",
+ "_meta",
+ ]
+
+ # maps bits on to SGR parameter
+ _style_map = {
+ 0: "1",
+ 1: "2",
+ 2: "3",
+ 3: "4",
+ 4: "5",
+ 5: "6",
+ 6: "7",
+ 7: "8",
+ 8: "9",
+ 9: "21",
+ 10: "51",
+ 11: "52",
+ 12: "53",
+ }
+
+ STYLE_ATTRIBUTES = {
+ "dim": "dim",
+ "d": "dim",
+ "bold": "bold",
+ "b": "bold",
+ "italic": "italic",
+ "i": "italic",
+ "underline": "underline",
+ "u": "underline",
+ "blink": "blink",
+ "blink2": "blink2",
+ "reverse": "reverse",
+ "r": "reverse",
+ "conceal": "conceal",
+ "c": "conceal",
+ "strike": "strike",
+ "s": "strike",
+ "underline2": "underline2",
+ "uu": "underline2",
+ "frame": "frame",
+ "encircle": "encircle",
+ "overline": "overline",
+ "o": "overline",
+ }
+
+ def __init__(
+ self,
+ *,
+ color: Optional[Union[Color, str]] = None,
+ bgcolor: Optional[Union[Color, str]] = None,
+ bold: Optional[bool] = None,
+ dim: Optional[bool] = None,
+ italic: Optional[bool] = None,
+ underline: Optional[bool] = None,
+ blink: Optional[bool] = None,
+ blink2: Optional[bool] = None,
+ reverse: Optional[bool] = None,
+ conceal: Optional[bool] = None,
+ strike: Optional[bool] = None,
+ underline2: Optional[bool] = None,
+ frame: Optional[bool] = None,
+ encircle: Optional[bool] = None,
+ overline: Optional[bool] = None,
+ link: Optional[str] = None,
+ meta: Optional[Dict[str, Any]] = None,
+ ):
+ self._ansi: Optional[str] = None
+ self._style_definition: Optional[str] = None
+
+ def _make_color(color: Union[Color, str]) -> Color:
+ return color if isinstance(color, Color) else Color.parse(color)
+
+ self._color = None if color is None else _make_color(color)
+ self._bgcolor = None if bgcolor is None else _make_color(bgcolor)
+ self._set_attributes = sum(
+ (
+ bold is not None,
+ dim is not None and 2,
+ italic is not None and 4,
+ underline is not None and 8,
+ blink is not None and 16,
+ blink2 is not None and 32,
+ reverse is not None and 64,
+ conceal is not None and 128,
+ strike is not None and 256,
+ underline2 is not None and 512,
+ frame is not None and 1024,
+ encircle is not None and 2048,
+ overline is not None and 4096,
+ )
+ )
+ self._attributes = (
+ sum(
+ (
+ bold and 1 or 0,
+ dim and 2 or 0,
+ italic and 4 or 0,
+ underline and 8 or 0,
+ blink and 16 or 0,
+ blink2 and 32 or 0,
+ reverse and 64 or 0,
+ conceal and 128 or 0,
+ strike and 256 or 0,
+ underline2 and 512 or 0,
+ frame and 1024 or 0,
+ encircle and 2048 or 0,
+ overline and 4096 or 0,
+ )
+ )
+ if self._set_attributes
+ else 0
+ )
+
+ self._link = link
+ self._meta = None if meta is None else dumps(meta)
+ self._link_id = (
+ f"{randint(0, 999999)}{hash(self._meta)}" if (link or meta) else ""
+ )
+ self._hash: Optional[int] = None
+ self._null = not (self._set_attributes or color or bgcolor or link or meta)
+
+ @classmethod
+ def null(cls) -> "Style":
+ """Create an 'null' style, equivalent to Style(), but more performant."""
+ return NULL_STYLE
+
+ @classmethod
+ def from_color(
+ cls, color: Optional[Color] = None, bgcolor: Optional[Color] = None
+ ) -> "Style":
+ """Create a new style with colors and no attributes.
+
+ Returns:
+ color (Optional[Color]): A (foreground) color, or None for no color. Defaults to None.
+ bgcolor (Optional[Color]): A (background) color, or None for no color. Defaults to None.
+ """
+ style: Style = cls.__new__(Style)
+ style._ansi = None
+ style._style_definition = None
+ style._color = color
+ style._bgcolor = bgcolor
+ style._set_attributes = 0
+ style._attributes = 0
+ style._link = None
+ style._link_id = ""
+ style._meta = None
+ style._null = not (color or bgcolor)
+ style._hash = None
+ return style
+
+ @classmethod
+ def from_meta(cls, meta: Optional[Dict[str, Any]]) -> "Style":
+ """Create a new style with meta data.
+
+ Returns:
+ meta (Optional[Dict[str, Any]]): A dictionary of meta data. Defaults to None.
+ """
+ style: Style = cls.__new__(Style)
+ style._ansi = None
+ style._style_definition = None
+ style._color = None
+ style._bgcolor = None
+ style._set_attributes = 0
+ style._attributes = 0
+ style._link = None
+ style._meta = dumps(meta)
+ style._link_id = f"{randint(0, 999999)}{hash(style._meta)}"
+ style._hash = None
+ style._null = not (meta)
+ return style
+
+ @classmethod
+ def on(cls, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> "Style":
+ """Create a blank style with meta information.
+
+ Example:
+ style = Style.on(click=self.on_click)
+
+ Args:
+ meta (Optional[Dict[str, Any]], optional): An optional dict of meta information.
+ **handlers (Any): Keyword arguments are translated in to handlers.
+
+ Returns:
+ Style: A Style with meta information attached.
+ """
+ meta = {} if meta is None else meta
+ meta.update({f"@{key}": value for key, value in handlers.items()})
+ return cls.from_meta(meta)
+
+ bold = _Bit(0)
+ dim = _Bit(1)
+ italic = _Bit(2)
+ underline = _Bit(3)
+ blink = _Bit(4)
+ blink2 = _Bit(5)
+ reverse = _Bit(6)
+ conceal = _Bit(7)
+ strike = _Bit(8)
+ underline2 = _Bit(9)
+ frame = _Bit(10)
+ encircle = _Bit(11)
+ overline = _Bit(12)
+
+ @property
+ def link_id(self) -> str:
+ """Get a link id, used in ansi code for links."""
+ return self._link_id
+
+ def __str__(self) -> str:
+ """Re-generate style definition from attributes."""
+ if self._style_definition is None:
+ attributes: List[str] = []
+ append = attributes.append
+ bits = self._set_attributes
+ if bits & 0b0000000001111:
+ if bits & 1:
+ append("bold" if self.bold else "not bold")
+ if bits & (1 << 1):
+ append("dim" if self.dim else "not dim")
+ if bits & (1 << 2):
+ append("italic" if self.italic else "not italic")
+ if bits & (1 << 3):
+ append("underline" if self.underline else "not underline")
+ if bits & 0b0000111110000:
+ if bits & (1 << 4):
+ append("blink" if self.blink else "not blink")
+ if bits & (1 << 5):
+ append("blink2" if self.blink2 else "not blink2")
+ if bits & (1 << 6):
+ append("reverse" if self.reverse else "not reverse")
+ if bits & (1 << 7):
+ append("conceal" if self.conceal else "not conceal")
+ if bits & (1 << 8):
+ append("strike" if self.strike else "not strike")
+ if bits & 0b1111000000000:
+ if bits & (1 << 9):
+ append("underline2" if self.underline2 else "not underline2")
+ if bits & (1 << 10):
+ append("frame" if self.frame else "not frame")
+ if bits & (1 << 11):
+ append("encircle" if self.encircle else "not encircle")
+ if bits & (1 << 12):
+ append("overline" if self.overline else "not overline")
+ if self._color is not None:
+ append(self._color.name)
+ if self._bgcolor is not None:
+ append("on")
+ append(self._bgcolor.name)
+ if self._link:
+ append("link")
+ append(self._link)
+ self._style_definition = " ".join(attributes) or "none"
+ return self._style_definition
+
+ def __bool__(self) -> bool:
+ """A Style is false if it has no attributes, colors, or links."""
+ return not self._null
+
+ def _make_ansi_codes(self, color_system: ColorSystem) -> str:
+ """Generate ANSI codes for this style.
+
+ Args:
+ color_system (ColorSystem): Color system.
+
+ Returns:
+ str: String containing codes.
+ """
+
+ if self._ansi is None:
+ sgr: List[str] = []
+ append = sgr.append
+ _style_map = self._style_map
+ attributes = self._attributes & self._set_attributes
+ if attributes:
+ if attributes & 1:
+ append(_style_map[0])
+ if attributes & 2:
+ append(_style_map[1])
+ if attributes & 4:
+ append(_style_map[2])
+ if attributes & 8:
+ append(_style_map[3])
+ if attributes & 0b0000111110000:
+ for bit in range(4, 9):
+ if attributes & (1 << bit):
+ append(_style_map[bit])
+ if attributes & 0b1111000000000:
+ for bit in range(9, 13):
+ if attributes & (1 << bit):
+ append(_style_map[bit])
+ if self._color is not None:
+ sgr.extend(self._color.downgrade(color_system).get_ansi_codes())
+ if self._bgcolor is not None:
+ sgr.extend(
+ self._bgcolor.downgrade(color_system).get_ansi_codes(
+ foreground=False
+ )
+ )
+ self._ansi = ";".join(sgr)
+ return self._ansi
+
+ @classmethod
+ @lru_cache(maxsize=1024)
+ def normalize(cls, style: str) -> str:
+ """Normalize a style definition so that styles with the same effect have the same string
+ representation.
+
+ Args:
+ style (str): A style definition.
+
+ Returns:
+ str: Normal form of style definition.
+ """
+ try:
+ return str(cls.parse(style))
+ except errors.StyleSyntaxError:
+ return style.strip().lower()
+
+ @classmethod
+ def pick_first(cls, *values: Optional[StyleType]) -> StyleType:
+ """Pick first non-None style."""
+ for value in values:
+ if value is not None:
+ return value
+ raise ValueError("expected at least one non-None style")
+
+ def __rich_repr__(self) -> Result:
+ yield "color", self.color, None
+ yield "bgcolor", self.bgcolor, None
+ yield "bold", self.bold, None,
+ yield "dim", self.dim, None,
+ yield "italic", self.italic, None
+ yield "underline", self.underline, None,
+ yield "blink", self.blink, None
+ yield "blink2", self.blink2, None
+ yield "reverse", self.reverse, None
+ yield "conceal", self.conceal, None
+ yield "strike", self.strike, None
+ yield "underline2", self.underline2, None
+ yield "frame", self.frame, None
+ yield "encircle", self.encircle, None
+ yield "link", self.link, None
+ if self._meta:
+ yield "meta", self.meta
+
+ def __eq__(self, other: Any) -> bool:
+ if not isinstance(other, Style):
+ return NotImplemented
+ return self.__hash__() == other.__hash__()
+
+ def __ne__(self, other: Any) -> bool:
+ if not isinstance(other, Style):
+ return NotImplemented
+ return self.__hash__() != other.__hash__()
+
+ def __hash__(self) -> int:
+ if self._hash is not None:
+ return self._hash
+ self._hash = hash(
+ (
+ self._color,
+ self._bgcolor,
+ self._attributes,
+ self._set_attributes,
+ self._link,
+ self._meta,
+ )
+ )
+ return self._hash
+
+ @property
+ def color(self) -> Optional[Color]:
+ """The foreground color or None if it is not set."""
+ return self._color
+
+ @property
+ def bgcolor(self) -> Optional[Color]:
+ """The background color or None if it is not set."""
+ return self._bgcolor
+
+ @property
+ def link(self) -> Optional[str]:
+ """Link text, if set."""
+ return self._link
+
+ @property
+ def transparent_background(self) -> bool:
+ """Check if the style specified a transparent background."""
+ return self.bgcolor is None or self.bgcolor.is_default
+
+ @property
+ def background_style(self) -> "Style":
+ """A Style with background only."""
+ return Style(bgcolor=self.bgcolor)
+
+ @property
+ def meta(self) -> Dict[str, Any]:
+ """Get meta information (can not be changed after construction)."""
+ return {} if self._meta is None else cast(Dict[str, Any], loads(self._meta))
+
+ @property
+ def without_color(self) -> "Style":
+ """Get a copy of the style with color removed."""
+ if self._null:
+ return NULL_STYLE
+ style: Style = self.__new__(Style)
+ style._ansi = None
+ style._style_definition = None
+ style._color = None
+ style._bgcolor = None
+ style._attributes = self._attributes
+ style._set_attributes = self._set_attributes
+ style._link = self._link
+ style._link_id = f"{randint(0, 999999)}" if self._link else ""
+ style._null = False
+ style._meta = None
+ style._hash = None
+ return style
+
+ @classmethod
+ @lru_cache(maxsize=4096)
+ def parse(cls, style_definition: str) -> "Style":
+ """Parse a style definition.
+
+ Args:
+ style_definition (str): A string containing a style.
+
+ Raises:
+ errors.StyleSyntaxError: If the style definition syntax is invalid.
+
+ Returns:
+ `Style`: A Style instance.
+ """
+ if style_definition.strip() == "none" or not style_definition:
+ return cls.null()
+
+ STYLE_ATTRIBUTES = cls.STYLE_ATTRIBUTES
+ color: Optional[str] = None
+ bgcolor: Optional[str] = None
+ attributes: Dict[str, Optional[Any]] = {}
+ link: Optional[str] = None
+
+ words = iter(style_definition.split())
+ for original_word in words:
+ word = original_word.lower()
+ if word == "on":
+ word = next(words, "")
+ if not word:
+ raise errors.StyleSyntaxError("color expected after 'on'")
+ try:
+ Color.parse(word) is None
+ except ColorParseError as error:
+ raise errors.StyleSyntaxError(
+ f"unable to parse {word!r} as background color; {error}"
+ ) from None
+ bgcolor = word
+
+ elif word == "not":
+ word = next(words, "")
+ attribute = STYLE_ATTRIBUTES.get(word)
+ if attribute is None:
+ raise errors.StyleSyntaxError(
+ f"expected style attribute after 'not', found {word!r}"
+ )
+ attributes[attribute] = False
+
+ elif word == "link":
+ word = next(words, "")
+ if not word:
+ raise errors.StyleSyntaxError("URL expected after 'link'")
+ link = word
+
+ elif word in STYLE_ATTRIBUTES:
+ attributes[STYLE_ATTRIBUTES[word]] = True
+
+ else:
+ try:
+ Color.parse(word)
+ except ColorParseError as error:
+ raise errors.StyleSyntaxError(
+ f"unable to parse {word!r} as color; {error}"
+ ) from None
+ color = word
+ style = Style(color=color, bgcolor=bgcolor, link=link, **attributes)
+ return style
+
+ @lru_cache(maxsize=1024)
+ def get_html_style(self, theme: Optional[TerminalTheme] = None) -> str:
+ """Get a CSS style rule."""
+ theme = theme or DEFAULT_TERMINAL_THEME
+ css: List[str] = []
+ append = css.append
+
+ color = self.color
+ bgcolor = self.bgcolor
+ if self.reverse:
+ color, bgcolor = bgcolor, color
+ if self.dim:
+ foreground_color = (
+ theme.foreground_color if color is None else color.get_truecolor(theme)
+ )
+ color = Color.from_triplet(
+ blend_rgb(foreground_color, theme.background_color, 0.5)
+ )
+ if color is not None:
+ theme_color = color.get_truecolor(theme)
+ append(f"color: {theme_color.hex}")
+ append(f"text-decoration-color: {theme_color.hex}")
+ if bgcolor is not None:
+ theme_color = bgcolor.get_truecolor(theme, foreground=False)
+ append(f"background-color: {theme_color.hex}")
+ if self.bold:
+ append("font-weight: bold")
+ if self.italic:
+ append("font-style: italic")
+ if self.underline:
+ append("text-decoration: underline")
+ if self.strike:
+ append("text-decoration: line-through")
+ if self.overline:
+ append("text-decoration: overline")
+ return "; ".join(css)
+
+ @classmethod
+ def combine(cls, styles: Iterable["Style"]) -> "Style":
+ """Combine styles and get result.
+
+ Args:
+ styles (Iterable[Style]): Styles to combine.
+
+ Returns:
+ Style: A new style instance.
+ """
+ iter_styles = iter(styles)
+ return sum(iter_styles, next(iter_styles))
+
+ @classmethod
+ def chain(cls, *styles: "Style") -> "Style":
+ """Combine styles from positional argument in to a single style.
+
+ Args:
+ *styles (Iterable[Style]): Styles to combine.
+
+ Returns:
+ Style: A new style instance.
+ """
+ iter_styles = iter(styles)
+ return sum(iter_styles, next(iter_styles))
+
+ def copy(self) -> "Style":
+ """Get a copy of this style.
+
+ Returns:
+ Style: A new Style instance with identical attributes.
+ """
+ if self._null:
+ return NULL_STYLE
+ style: Style = self.__new__(Style)
+ style._ansi = self._ansi
+ style._style_definition = self._style_definition
+ style._color = self._color
+ style._bgcolor = self._bgcolor
+ style._attributes = self._attributes
+ style._set_attributes = self._set_attributes
+ style._link = self._link
+ style._link_id = f"{randint(0, 999999)}" if self._link else ""
+ style._hash = self._hash
+ style._null = False
+ style._meta = self._meta
+ return style
+
+ def update_link(self, link: Optional[str] = None) -> "Style":
+ """Get a copy with a different value for link.
+
+ Args:
+ link (str, optional): New value for link. Defaults to None.
+
+ Returns:
+ Style: A new Style instance.
+ """
+ style: Style = self.__new__(Style)
+ style._ansi = self._ansi
+ style._style_definition = self._style_definition
+ style._color = self._color
+ style._bgcolor = self._bgcolor
+ style._attributes = self._attributes
+ style._set_attributes = self._set_attributes
+ style._link = link
+ style._link_id = f"{randint(0, 999999)}" if link else ""
+ style._hash = None
+ style._null = False
+ style._meta = self._meta
+ return style
+
+ def render(
+ self,
+ text: str = "",
+ *,
+ color_system: Optional[ColorSystem] = ColorSystem.TRUECOLOR,
+ legacy_windows: bool = False,
+ ) -> str:
+ """Render the ANSI codes for the style.
+
+ Args:
+ text (str, optional): A string to style. Defaults to "".
+ color_system (Optional[ColorSystem], optional): Color system to render to. Defaults to ColorSystem.TRUECOLOR.
+
+ Returns:
+ str: A string containing ANSI style codes.
+ """
+ if not text or color_system is None:
+ return text
+ attrs = self._ansi or self._make_ansi_codes(color_system)
+ rendered = f"\x1b[{attrs}m{text}\x1b[0m" if attrs else text
+ if self._link and not legacy_windows:
+ rendered = (
+ f"\x1b]8;id={self._link_id};{self._link}\x1b\\{rendered}\x1b]8;;\x1b\\"
+ )
+ return rendered
+
+ def test(self, text: Optional[str] = None) -> None:
+ """Write text with style directly to terminal.
+
+ This method is for testing purposes only.
+
+ Args:
+ text (Optional[str], optional): Text to style or None for style name.
+
+ """
+ text = text or str(self)
+ sys.stdout.write(f"{self.render(text)}\n")
+
+ @lru_cache(maxsize=1024)
+ def _add(self, style: Optional["Style"]) -> "Style":
+ if style is None or style._null:
+ return self
+ if self._null:
+ return style
+ new_style: Style = self.__new__(Style)
+ new_style._ansi = None
+ new_style._style_definition = None
+ new_style._color = style._color or self._color
+ new_style._bgcolor = style._bgcolor or self._bgcolor
+ new_style._attributes = (self._attributes & ~style._set_attributes) | (
+ style._attributes & style._set_attributes
+ )
+ new_style._set_attributes = self._set_attributes | style._set_attributes
+ new_style._link = style._link or self._link
+ new_style._link_id = style._link_id or self._link_id
+ new_style._null = style._null
+ if self._meta and style._meta:
+ new_style._meta = dumps({**self.meta, **style.meta})
+ else:
+ new_style._meta = self._meta or style._meta
+ new_style._hash = None
+ return new_style
+
+ def __add__(self, style: Optional["Style"]) -> "Style":
+ combined_style = self._add(style)
+ return combined_style.copy() if combined_style.link else combined_style
+
+
+NULL_STYLE = Style()
+
+
+class StyleStack:
+ """A stack of styles."""
+
+ __slots__ = ["_stack"]
+
+ def __init__(self, default_style: "Style") -> None:
+ self._stack: List[Style] = [default_style]
+
+ def __repr__(self) -> str:
+ return f"<stylestack {self._stack!r}>"
+
+ @property
+ def current(self) -> Style:
+ """Get the Style at the top of the stack."""
+ return self._stack[-1]
+
+ def push(self, style: Style) -> None:
+ """Push a new style on to the stack.
+
+ Args:
+ style (Style): New style to combine with current style.
+ """
+ self._stack.append(self._stack[-1] + style)
+
+ def pop(self) -> Style:
+ """Pop last style and discard.
+
+ Returns:
+ Style: New current style (also available as stack.current)
+ """
+ self._stack.pop()
+ return self._stack[-1]
diff --git a/third_party/python/pip/pip/_vendor/rich/styled.py b/third_party/python/pip/pip/_vendor/rich/styled.py
new file mode 100644
index 0000000000..91cd0db31c
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/styled.py
@@ -0,0 +1,42 @@
+from typing import TYPE_CHECKING
+
+from .measure import Measurement
+from .segment import Segment
+from .style import StyleType
+
+if TYPE_CHECKING:
+ from .console import Console, ConsoleOptions, RenderResult, RenderableType
+
+
+class Styled:
+ """Apply a style to a renderable.
+
+ Args:
+ renderable (RenderableType): Any renderable.
+ style (StyleType): A style to apply across the entire renderable.
+ """
+
+ def __init__(self, renderable: "RenderableType", style: "StyleType") -> None:
+ self.renderable = renderable
+ self.style = style
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+ style = console.get_style(self.style)
+ rendered_segments = console.render(self.renderable, options)
+ segments = Segment.apply_style(rendered_segments, style)
+ return segments
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> Measurement:
+ return Measurement.get(console, options, self.renderable)
+
+
+if __name__ == "__main__": # pragma: no cover
+ from pip._vendor.rich import print
+ from pip._vendor.rich.panel import Panel
+
+ panel = Styled(Panel("hello"), "on blue")
+ print(panel)
diff --git a/third_party/python/pip/pip/_vendor/rich/syntax.py b/third_party/python/pip/pip/_vendor/rich/syntax.py
new file mode 100644
index 0000000000..01bdd04398
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/syntax.py
@@ -0,0 +1,945 @@
+import os.path
+import platform
+import re
+import sys
+import textwrap
+from abc import ABC, abstractmethod
+from typing import (
+ Any,
+ Dict,
+ Iterable,
+ List,
+ NamedTuple,
+ Optional,
+ Sequence,
+ Set,
+ Tuple,
+ Type,
+ Union,
+)
+
+from pip._vendor.pygments.lexer import Lexer
+from pip._vendor.pygments.lexers import get_lexer_by_name, guess_lexer_for_filename
+from pip._vendor.pygments.style import Style as PygmentsStyle
+from pip._vendor.pygments.styles import get_style_by_name
+from pip._vendor.pygments.token import (
+ Comment,
+ Error,
+ Generic,
+ Keyword,
+ Name,
+ Number,
+ Operator,
+ String,
+ Token,
+ Whitespace,
+)
+from pip._vendor.pygments.util import ClassNotFound
+
+from pip._vendor.rich.containers import Lines
+from pip._vendor.rich.padding import Padding, PaddingDimensions
+
+from ._loop import loop_first
+from .cells import cell_len
+from .color import Color, blend_rgb
+from .console import Console, ConsoleOptions, JustifyMethod, RenderResult
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .segment import Segment, Segments
+from .style import Style, StyleType
+from .text import Text
+
+TokenType = Tuple[str, ...]
+
+WINDOWS = platform.system() == "Windows"
+DEFAULT_THEME = "monokai"
+
+# The following styles are based on https://github.com/pygments/pygments/blob/master/pygments/formatters/terminal.py
+# A few modifications were made
+
+ANSI_LIGHT: Dict[TokenType, Style] = {
+ Token: Style(),
+ Whitespace: Style(color="white"),
+ Comment: Style(dim=True),
+ Comment.Preproc: Style(color="cyan"),
+ Keyword: Style(color="blue"),
+ Keyword.Type: Style(color="cyan"),
+ Operator.Word: Style(color="magenta"),
+ Name.Builtin: Style(color="cyan"),
+ Name.Function: Style(color="green"),
+ Name.Namespace: Style(color="cyan", underline=True),
+ Name.Class: Style(color="green", underline=True),
+ Name.Exception: Style(color="cyan"),
+ Name.Decorator: Style(color="magenta", bold=True),
+ Name.Variable: Style(color="red"),
+ Name.Constant: Style(color="red"),
+ Name.Attribute: Style(color="cyan"),
+ Name.Tag: Style(color="bright_blue"),
+ String: Style(color="yellow"),
+ Number: Style(color="blue"),
+ Generic.Deleted: Style(color="bright_red"),
+ Generic.Inserted: Style(color="green"),
+ Generic.Heading: Style(bold=True),
+ Generic.Subheading: Style(color="magenta", bold=True),
+ Generic.Prompt: Style(bold=True),
+ Generic.Error: Style(color="bright_red"),
+ Error: Style(color="red", underline=True),
+}
+
+ANSI_DARK: Dict[TokenType, Style] = {
+ Token: Style(),
+ Whitespace: Style(color="bright_black"),
+ Comment: Style(dim=True),
+ Comment.Preproc: Style(color="bright_cyan"),
+ Keyword: Style(color="bright_blue"),
+ Keyword.Type: Style(color="bright_cyan"),
+ Operator.Word: Style(color="bright_magenta"),
+ Name.Builtin: Style(color="bright_cyan"),
+ Name.Function: Style(color="bright_green"),
+ Name.Namespace: Style(color="bright_cyan", underline=True),
+ Name.Class: Style(color="bright_green", underline=True),
+ Name.Exception: Style(color="bright_cyan"),
+ Name.Decorator: Style(color="bright_magenta", bold=True),
+ Name.Variable: Style(color="bright_red"),
+ Name.Constant: Style(color="bright_red"),
+ Name.Attribute: Style(color="bright_cyan"),
+ Name.Tag: Style(color="bright_blue"),
+ String: Style(color="yellow"),
+ Number: Style(color="bright_blue"),
+ Generic.Deleted: Style(color="bright_red"),
+ Generic.Inserted: Style(color="bright_green"),
+ Generic.Heading: Style(bold=True),
+ Generic.Subheading: Style(color="bright_magenta", bold=True),
+ Generic.Prompt: Style(bold=True),
+ Generic.Error: Style(color="bright_red"),
+ Error: Style(color="red", underline=True),
+}
+
+RICH_SYNTAX_THEMES = {"ansi_light": ANSI_LIGHT, "ansi_dark": ANSI_DARK}
+NUMBERS_COLUMN_DEFAULT_PADDING = 2
+
+
+class SyntaxTheme(ABC):
+ """Base class for a syntax theme."""
+
+ @abstractmethod
+ def get_style_for_token(self, token_type: TokenType) -> Style:
+ """Get a style for a given Pygments token."""
+ raise NotImplementedError # pragma: no cover
+
+ @abstractmethod
+ def get_background_style(self) -> Style:
+ """Get the background color."""
+ raise NotImplementedError # pragma: no cover
+
+
+class PygmentsSyntaxTheme(SyntaxTheme):
+ """Syntax theme that delegates to Pygments theme."""
+
+ def __init__(self, theme: Union[str, Type[PygmentsStyle]]) -> None:
+ self._style_cache: Dict[TokenType, Style] = {}
+ if isinstance(theme, str):
+ try:
+ self._pygments_style_class = get_style_by_name(theme)
+ except ClassNotFound:
+ self._pygments_style_class = get_style_by_name("default")
+ else:
+ self._pygments_style_class = theme
+
+ self._background_color = self._pygments_style_class.background_color
+ self._background_style = Style(bgcolor=self._background_color)
+
+ def get_style_for_token(self, token_type: TokenType) -> Style:
+ """Get a style from a Pygments class."""
+ try:
+ return self._style_cache[token_type]
+ except KeyError:
+ try:
+ pygments_style = self._pygments_style_class.style_for_token(token_type)
+ except KeyError:
+ style = Style.null()
+ else:
+ color = pygments_style["color"]
+ bgcolor = pygments_style["bgcolor"]
+ style = Style(
+ color="#" + color if color else "#000000",
+ bgcolor="#" + bgcolor if bgcolor else self._background_color,
+ bold=pygments_style["bold"],
+ italic=pygments_style["italic"],
+ underline=pygments_style["underline"],
+ )
+ self._style_cache[token_type] = style
+ return style
+
+ def get_background_style(self) -> Style:
+ return self._background_style
+
+
+class ANSISyntaxTheme(SyntaxTheme):
+ """Syntax theme to use standard colors."""
+
+ def __init__(self, style_map: Dict[TokenType, Style]) -> None:
+ self.style_map = style_map
+ self._missing_style = Style.null()
+ self._background_style = Style.null()
+ self._style_cache: Dict[TokenType, Style] = {}
+
+ def get_style_for_token(self, token_type: TokenType) -> Style:
+ """Look up style in the style map."""
+ try:
+ return self._style_cache[token_type]
+ except KeyError:
+ # Styles form a hierarchy
+ # We need to go from most to least specific
+ # e.g. ("foo", "bar", "baz") to ("foo", "bar") to ("foo",)
+ get_style = self.style_map.get
+ token = tuple(token_type)
+ style = self._missing_style
+ while token:
+ _style = get_style(token)
+ if _style is not None:
+ style = _style
+ break
+ token = token[:-1]
+ self._style_cache[token_type] = style
+ return style
+
+ def get_background_style(self) -> Style:
+ return self._background_style
+
+
+SyntaxPosition = Tuple[int, int]
+
+
+class _SyntaxHighlightRange(NamedTuple):
+ """
+ A range to highlight in a Syntax object.
+ `start` and `end` are 2-integers tuples, where the first integer is the line number
+ (starting from 1) and the second integer is the column index (starting from 0).
+ """
+
+ style: StyleType
+ start: SyntaxPosition
+ end: SyntaxPosition
+
+
+class Syntax(JupyterMixin):
+ """Construct a Syntax object to render syntax highlighted code.
+
+ Args:
+ code (str): Code to highlight.
+ lexer (Lexer | str): Lexer to use (see https://pygments.org/docs/lexers/)
+ theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "monokai".
+ dedent (bool, optional): Enable stripping of initial whitespace. Defaults to False.
+ line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False.
+ start_line (int, optional): Starting number for line numbers. Defaults to 1.
+ line_range (Tuple[int | None, int | None], optional): If given should be a tuple of the start and end line to render.
+ A value of None in the tuple indicates the range is open in that direction.
+ highlight_lines (Set[int]): A set of line numbers to highlight.
+ code_width: Width of code to render (not including line numbers), or ``None`` to use all available width.
+ tab_size (int, optional): Size of tabs. Defaults to 4.
+ word_wrap (bool, optional): Enable word wrapping.
+ background_color (str, optional): Optional background color, or None to use theme color. Defaults to None.
+ indent_guides (bool, optional): Show indent guides. Defaults to False.
+ padding (PaddingDimensions): Padding to apply around the syntax. Defaults to 0 (no padding).
+ """
+
+ _pygments_style_class: Type[PygmentsStyle]
+ _theme: SyntaxTheme
+
+ @classmethod
+ def get_theme(cls, name: Union[str, SyntaxTheme]) -> SyntaxTheme:
+ """Get a syntax theme instance."""
+ if isinstance(name, SyntaxTheme):
+ return name
+ theme: SyntaxTheme
+ if name in RICH_SYNTAX_THEMES:
+ theme = ANSISyntaxTheme(RICH_SYNTAX_THEMES[name])
+ else:
+ theme = PygmentsSyntaxTheme(name)
+ return theme
+
+ def __init__(
+ self,
+ code: str,
+ lexer: Union[Lexer, str],
+ *,
+ theme: Union[str, SyntaxTheme] = DEFAULT_THEME,
+ dedent: bool = False,
+ line_numbers: bool = False,
+ start_line: int = 1,
+ line_range: Optional[Tuple[Optional[int], Optional[int]]] = None,
+ highlight_lines: Optional[Set[int]] = None,
+ code_width: Optional[int] = None,
+ tab_size: int = 4,
+ word_wrap: bool = False,
+ background_color: Optional[str] = None,
+ indent_guides: bool = False,
+ padding: PaddingDimensions = 0,
+ ) -> None:
+ self.code = code
+ self._lexer = lexer
+ self.dedent = dedent
+ self.line_numbers = line_numbers
+ self.start_line = start_line
+ self.line_range = line_range
+ self.highlight_lines = highlight_lines or set()
+ self.code_width = code_width
+ self.tab_size = tab_size
+ self.word_wrap = word_wrap
+ self.background_color = background_color
+ self.background_style = (
+ Style(bgcolor=background_color) if background_color else Style()
+ )
+ self.indent_guides = indent_guides
+ self.padding = padding
+
+ self._theme = self.get_theme(theme)
+ self._stylized_ranges: List[_SyntaxHighlightRange] = []
+
+ @classmethod
+ def from_path(
+ cls,
+ path: str,
+ encoding: str = "utf-8",
+ lexer: Optional[Union[Lexer, str]] = None,
+ theme: Union[str, SyntaxTheme] = DEFAULT_THEME,
+ dedent: bool = False,
+ line_numbers: bool = False,
+ line_range: Optional[Tuple[int, int]] = None,
+ start_line: int = 1,
+ highlight_lines: Optional[Set[int]] = None,
+ code_width: Optional[int] = None,
+ tab_size: int = 4,
+ word_wrap: bool = False,
+ background_color: Optional[str] = None,
+ indent_guides: bool = False,
+ padding: PaddingDimensions = 0,
+ ) -> "Syntax":
+ """Construct a Syntax object from a file.
+
+ Args:
+ path (str): Path to file to highlight.
+ encoding (str): Encoding of file.
+ lexer (str | Lexer, optional): Lexer to use. If None, lexer will be auto-detected from path/file content.
+ theme (str, optional): Color theme, aka Pygments style (see https://pygments.org/docs/styles/#getting-a-list-of-available-styles). Defaults to "emacs".
+ dedent (bool, optional): Enable stripping of initial whitespace. Defaults to True.
+ line_numbers (bool, optional): Enable rendering of line numbers. Defaults to False.
+ start_line (int, optional): Starting number for line numbers. Defaults to 1.
+ line_range (Tuple[int, int], optional): If given should be a tuple of the start and end line to render.
+ highlight_lines (Set[int]): A set of line numbers to highlight.
+ code_width: Width of code to render (not including line numbers), or ``None`` to use all available width.
+ tab_size (int, optional): Size of tabs. Defaults to 4.
+ word_wrap (bool, optional): Enable word wrapping of code.
+ background_color (str, optional): Optional background color, or None to use theme color. Defaults to None.
+ indent_guides (bool, optional): Show indent guides. Defaults to False.
+ padding (PaddingDimensions): Padding to apply around the syntax. Defaults to 0 (no padding).
+
+ Returns:
+ [Syntax]: A Syntax object that may be printed to the console
+ """
+ with open(path, "rt", encoding=encoding) as code_file:
+ code = code_file.read()
+
+ if not lexer:
+ lexer = cls.guess_lexer(path, code=code)
+
+ return cls(
+ code,
+ lexer,
+ theme=theme,
+ dedent=dedent,
+ line_numbers=line_numbers,
+ line_range=line_range,
+ start_line=start_line,
+ highlight_lines=highlight_lines,
+ code_width=code_width,
+ tab_size=tab_size,
+ word_wrap=word_wrap,
+ background_color=background_color,
+ indent_guides=indent_guides,
+ padding=padding,
+ )
+
+ @classmethod
+ def guess_lexer(cls, path: str, code: Optional[str] = None) -> str:
+ """Guess the alias of the Pygments lexer to use based on a path and an optional string of code.
+ If code is supplied, it will use a combination of the code and the filename to determine the
+ best lexer to use. For example, if the file is ``index.html`` and the file contains Django
+ templating syntax, then "html+django" will be returned. If the file is ``index.html``, and no
+ templating language is used, the "html" lexer will be used. If no string of code
+ is supplied, the lexer will be chosen based on the file extension..
+
+ Args:
+ path (AnyStr): The path to the file containing the code you wish to know the lexer for.
+ code (str, optional): Optional string of code that will be used as a fallback if no lexer
+ is found for the supplied path.
+
+ Returns:
+ str: The name of the Pygments lexer that best matches the supplied path/code.
+ """
+ lexer: Optional[Lexer] = None
+ lexer_name = "default"
+ if code:
+ try:
+ lexer = guess_lexer_for_filename(path, code)
+ except ClassNotFound:
+ pass
+
+ if not lexer:
+ try:
+ _, ext = os.path.splitext(path)
+ if ext:
+ extension = ext.lstrip(".").lower()
+ lexer = get_lexer_by_name(extension)
+ except ClassNotFound:
+ pass
+
+ if lexer:
+ if lexer.aliases:
+ lexer_name = lexer.aliases[0]
+ else:
+ lexer_name = lexer.name
+
+ return lexer_name
+
+ def _get_base_style(self) -> Style:
+ """Get the base style."""
+ default_style = self._theme.get_background_style() + self.background_style
+ return default_style
+
+ def _get_token_color(self, token_type: TokenType) -> Optional[Color]:
+ """Get a color (if any) for the given token.
+
+ Args:
+ token_type (TokenType): A token type tuple from Pygments.
+
+ Returns:
+ Optional[Color]: Color from theme, or None for no color.
+ """
+ style = self._theme.get_style_for_token(token_type)
+ return style.color
+
+ @property
+ def lexer(self) -> Optional[Lexer]:
+ """The lexer for this syntax, or None if no lexer was found.
+
+ Tries to find the lexer by name if a string was passed to the constructor.
+ """
+
+ if isinstance(self._lexer, Lexer):
+ return self._lexer
+ try:
+ return get_lexer_by_name(
+ self._lexer,
+ stripnl=False,
+ ensurenl=True,
+ tabsize=self.tab_size,
+ )
+ except ClassNotFound:
+ return None
+
+ def highlight(
+ self,
+ code: str,
+ line_range: Optional[Tuple[Optional[int], Optional[int]]] = None,
+ ) -> Text:
+ """Highlight code and return a Text instance.
+
+ Args:
+ code (str): Code to highlight.
+ line_range(Tuple[int, int], optional): Optional line range to highlight.
+
+ Returns:
+ Text: A text instance containing highlighted syntax.
+ """
+
+ base_style = self._get_base_style()
+ justify: JustifyMethod = (
+ "default" if base_style.transparent_background else "left"
+ )
+
+ text = Text(
+ justify=justify,
+ style=base_style,
+ tab_size=self.tab_size,
+ no_wrap=not self.word_wrap,
+ )
+ _get_theme_style = self._theme.get_style_for_token
+
+ lexer = self.lexer
+
+ if lexer is None:
+ text.append(code)
+ else:
+ if line_range:
+ # More complicated path to only stylize a portion of the code
+ # This speeds up further operations as there are less spans to process
+ line_start, line_end = line_range
+
+ def line_tokenize() -> Iterable[Tuple[Any, str]]:
+ """Split tokens to one per line."""
+ assert lexer # required to make MyPy happy - we know lexer is not None at this point
+
+ for token_type, token in lexer.get_tokens(code):
+ while token:
+ line_token, new_line, token = token.partition("\n")
+ yield token_type, line_token + new_line
+
+ def tokens_to_spans() -> Iterable[Tuple[str, Optional[Style]]]:
+ """Convert tokens to spans."""
+ tokens = iter(line_tokenize())
+ line_no = 0
+ _line_start = line_start - 1 if line_start else 0
+
+ # Skip over tokens until line start
+ while line_no < _line_start:
+ _token_type, token = next(tokens)
+ yield (token, None)
+ if token.endswith("\n"):
+ line_no += 1
+ # Generate spans until line end
+ for token_type, token in tokens:
+ yield (token, _get_theme_style(token_type))
+ if token.endswith("\n"):
+ line_no += 1
+ if line_end and line_no >= line_end:
+ break
+
+ text.append_tokens(tokens_to_spans())
+
+ else:
+ text.append_tokens(
+ (token, _get_theme_style(token_type))
+ for token_type, token in lexer.get_tokens(code)
+ )
+ if self.background_color is not None:
+ text.stylize(f"on {self.background_color}")
+
+ if self._stylized_ranges:
+ self._apply_stylized_ranges(text)
+
+ return text
+
+ def stylize_range(
+ self, style: StyleType, start: SyntaxPosition, end: SyntaxPosition
+ ) -> None:
+ """
+ Adds a custom style on a part of the code, that will be applied to the syntax display when it's rendered.
+ Line numbers are 1-based, while column indexes are 0-based.
+
+ Args:
+ style (StyleType): The style to apply.
+ start (Tuple[int, int]): The start of the range, in the form `[line number, column index]`.
+ end (Tuple[int, int]): The end of the range, in the form `[line number, column index]`.
+ """
+ self._stylized_ranges.append(_SyntaxHighlightRange(style, start, end))
+
+ def _get_line_numbers_color(self, blend: float = 0.3) -> Color:
+ background_style = self._theme.get_background_style() + self.background_style
+ background_color = background_style.bgcolor
+ if background_color is None or background_color.is_system_defined:
+ return Color.default()
+ foreground_color = self._get_token_color(Token.Text)
+ if foreground_color is None or foreground_color.is_system_defined:
+ return foreground_color or Color.default()
+ new_color = blend_rgb(
+ background_color.get_truecolor(),
+ foreground_color.get_truecolor(),
+ cross_fade=blend,
+ )
+ return Color.from_triplet(new_color)
+
+ @property
+ def _numbers_column_width(self) -> int:
+ """Get the number of characters used to render the numbers column."""
+ column_width = 0
+ if self.line_numbers:
+ column_width = (
+ len(str(self.start_line + self.code.count("\n")))
+ + NUMBERS_COLUMN_DEFAULT_PADDING
+ )
+ return column_width
+
+ def _get_number_styles(self, console: Console) -> Tuple[Style, Style, Style]:
+ """Get background, number, and highlight styles for line numbers."""
+ background_style = self._get_base_style()
+ if background_style.transparent_background:
+ return Style.null(), Style(dim=True), Style.null()
+ if console.color_system in ("256", "truecolor"):
+ number_style = Style.chain(
+ background_style,
+ self._theme.get_style_for_token(Token.Text),
+ Style(color=self._get_line_numbers_color()),
+ self.background_style,
+ )
+ highlight_number_style = Style.chain(
+ background_style,
+ self._theme.get_style_for_token(Token.Text),
+ Style(bold=True, color=self._get_line_numbers_color(0.9)),
+ self.background_style,
+ )
+ else:
+ number_style = background_style + Style(dim=True)
+ highlight_number_style = background_style + Style(dim=False)
+ return background_style, number_style, highlight_number_style
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "Measurement":
+
+ _, right, _, left = Padding.unpack(self.padding)
+ padding = left + right
+ if self.code_width is not None:
+ width = self.code_width + self._numbers_column_width + padding + 1
+ return Measurement(self._numbers_column_width, width)
+ lines = self.code.splitlines()
+ width = (
+ self._numbers_column_width
+ + padding
+ + (max(cell_len(line) for line in lines) if lines else 0)
+ )
+ if self.line_numbers:
+ width += 1
+ return Measurement(self._numbers_column_width, width)
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+ segments = Segments(self._get_syntax(console, options))
+ if self.padding:
+ yield Padding(
+ segments, style=self._theme.get_background_style(), pad=self.padding
+ )
+ else:
+ yield segments
+
+ def _get_syntax(
+ self,
+ console: Console,
+ options: ConsoleOptions,
+ ) -> Iterable[Segment]:
+ """
+ Get the Segments for the Syntax object, excluding any vertical/horizontal padding
+ """
+ transparent_background = self._get_base_style().transparent_background
+ code_width = (
+ (
+ (options.max_width - self._numbers_column_width - 1)
+ if self.line_numbers
+ else options.max_width
+ )
+ if self.code_width is None
+ else self.code_width
+ )
+
+ ends_on_nl, processed_code = self._process_code(self.code)
+ text = self.highlight(processed_code, self.line_range)
+
+ if not self.line_numbers and not self.word_wrap and not self.line_range:
+ if not ends_on_nl:
+ text.remove_suffix("\n")
+ # Simple case of just rendering text
+ style = (
+ self._get_base_style()
+ + self._theme.get_style_for_token(Comment)
+ + Style(dim=True)
+ + self.background_style
+ )
+ if self.indent_guides and not options.ascii_only:
+ text = text.with_indent_guides(self.tab_size, style=style)
+ text.overflow = "crop"
+ if style.transparent_background:
+ yield from console.render(
+ text, options=options.update(width=code_width)
+ )
+ else:
+ syntax_lines = console.render_lines(
+ text,
+ options.update(width=code_width, height=None, justify="left"),
+ style=self.background_style,
+ pad=True,
+ new_lines=True,
+ )
+ for syntax_line in syntax_lines:
+ yield from syntax_line
+ return
+
+ start_line, end_line = self.line_range or (None, None)
+ line_offset = 0
+ if start_line:
+ line_offset = max(0, start_line - 1)
+ lines: Union[List[Text], Lines] = text.split("\n", allow_blank=ends_on_nl)
+ if self.line_range:
+ lines = lines[line_offset:end_line]
+
+ if self.indent_guides and not options.ascii_only:
+ style = (
+ self._get_base_style()
+ + self._theme.get_style_for_token(Comment)
+ + Style(dim=True)
+ + self.background_style
+ )
+ lines = (
+ Text("\n")
+ .join(lines)
+ .with_indent_guides(self.tab_size, style=style)
+ .split("\n", allow_blank=True)
+ )
+
+ numbers_column_width = self._numbers_column_width
+ render_options = options.update(width=code_width)
+
+ highlight_line = self.highlight_lines.__contains__
+ _Segment = Segment
+ new_line = _Segment("\n")
+
+ line_pointer = "> " if options.legacy_windows else "❱ "
+
+ (
+ background_style,
+ number_style,
+ highlight_number_style,
+ ) = self._get_number_styles(console)
+
+ for line_no, line in enumerate(lines, self.start_line + line_offset):
+ if self.word_wrap:
+ wrapped_lines = console.render_lines(
+ line,
+ render_options.update(height=None, justify="left"),
+ style=background_style,
+ pad=not transparent_background,
+ )
+ else:
+ segments = list(line.render(console, end=""))
+ if options.no_wrap:
+ wrapped_lines = [segments]
+ else:
+ wrapped_lines = [
+ _Segment.adjust_line_length(
+ segments,
+ render_options.max_width,
+ style=background_style,
+ pad=not transparent_background,
+ )
+ ]
+
+ if self.line_numbers:
+ wrapped_line_left_pad = _Segment(
+ " " * numbers_column_width + " ", background_style
+ )
+ for first, wrapped_line in loop_first(wrapped_lines):
+ if first:
+ line_column = str(line_no).rjust(numbers_column_width - 2) + " "
+ if highlight_line(line_no):
+ yield _Segment(line_pointer, Style(color="red"))
+ yield _Segment(line_column, highlight_number_style)
+ else:
+ yield _Segment(" ", highlight_number_style)
+ yield _Segment(line_column, number_style)
+ else:
+ yield wrapped_line_left_pad
+ yield from wrapped_line
+ yield new_line
+ else:
+ for wrapped_line in wrapped_lines:
+ yield from wrapped_line
+ yield new_line
+
+ def _apply_stylized_ranges(self, text: Text) -> None:
+ """
+ Apply stylized ranges to a text instance,
+ using the given code to determine the right portion to apply the style to.
+
+ Args:
+ text (Text): Text instance to apply the style to.
+ """
+ code = text.plain
+ newlines_offsets = [
+ # Let's add outer boundaries at each side of the list:
+ 0,
+ # N.B. using "\n" here is much faster than using metacharacters such as "^" or "\Z":
+ *[
+ match.start() + 1
+ for match in re.finditer("\n", code, flags=re.MULTILINE)
+ ],
+ len(code) + 1,
+ ]
+
+ for stylized_range in self._stylized_ranges:
+ start = _get_code_index_for_syntax_position(
+ newlines_offsets, stylized_range.start
+ )
+ end = _get_code_index_for_syntax_position(
+ newlines_offsets, stylized_range.end
+ )
+ if start is not None and end is not None:
+ text.stylize(stylized_range.style, start, end)
+
+ def _process_code(self, code: str) -> Tuple[bool, str]:
+ """
+ Applies various processing to a raw code string
+ (normalises it so it always ends with a line return, dedents it if necessary, etc.)
+
+ Args:
+ code (str): The raw code string to process
+
+ Returns:
+ Tuple[bool, str]: the boolean indicates whether the raw code ends with a line return,
+ while the string is the processed code.
+ """
+ ends_on_nl = code.endswith("\n")
+ processed_code = code if ends_on_nl else code + "\n"
+ processed_code = (
+ textwrap.dedent(processed_code) if self.dedent else processed_code
+ )
+ processed_code = processed_code.expandtabs(self.tab_size)
+ return ends_on_nl, processed_code
+
+
+def _get_code_index_for_syntax_position(
+ newlines_offsets: Sequence[int], position: SyntaxPosition
+) -> Optional[int]:
+ """
+ Returns the index of the code string for the given positions.
+
+ Args:
+ newlines_offsets (Sequence[int]): The offset of each newline character found in the code snippet.
+ position (SyntaxPosition): The position to search for.
+
+ Returns:
+ Optional[int]: The index of the code string for this position, or `None`
+ if the given position's line number is out of range (if it's the column that is out of range
+ we silently clamp its value so that it reaches the end of the line)
+ """
+ lines_count = len(newlines_offsets)
+
+ line_number, column_index = position
+ if line_number > lines_count or len(newlines_offsets) < (line_number + 1):
+ return None # `line_number` is out of range
+ line_index = line_number - 1
+ line_length = newlines_offsets[line_index + 1] - newlines_offsets[line_index] - 1
+ # If `column_index` is out of range: let's silently clamp it:
+ column_index = min(line_length, column_index)
+ return newlines_offsets[line_index] + column_index
+
+
+if __name__ == "__main__": # pragma: no cover
+
+ import argparse
+ import sys
+
+ parser = argparse.ArgumentParser(
+ description="Render syntax to the console with Rich"
+ )
+ parser.add_argument(
+ "path",
+ metavar="PATH",
+ help="path to file, or - for stdin",
+ )
+ parser.add_argument(
+ "-c",
+ "--force-color",
+ dest="force_color",
+ action="store_true",
+ default=None,
+ help="force color for non-terminals",
+ )
+ parser.add_argument(
+ "-i",
+ "--indent-guides",
+ dest="indent_guides",
+ action="store_true",
+ default=False,
+ help="display indent guides",
+ )
+ parser.add_argument(
+ "-l",
+ "--line-numbers",
+ dest="line_numbers",
+ action="store_true",
+ help="render line numbers",
+ )
+ parser.add_argument(
+ "-w",
+ "--width",
+ type=int,
+ dest="width",
+ default=None,
+ help="width of output (default will auto-detect)",
+ )
+ parser.add_argument(
+ "-r",
+ "--wrap",
+ dest="word_wrap",
+ action="store_true",
+ default=False,
+ help="word wrap long lines",
+ )
+ parser.add_argument(
+ "-s",
+ "--soft-wrap",
+ action="store_true",
+ dest="soft_wrap",
+ default=False,
+ help="enable soft wrapping mode",
+ )
+ parser.add_argument(
+ "-t", "--theme", dest="theme", default="monokai", help="pygments theme"
+ )
+ parser.add_argument(
+ "-b",
+ "--background-color",
+ dest="background_color",
+ default=None,
+ help="Override background color",
+ )
+ parser.add_argument(
+ "-x",
+ "--lexer",
+ default=None,
+ dest="lexer_name",
+ help="Lexer name",
+ )
+ parser.add_argument(
+ "-p", "--padding", type=int, default=0, dest="padding", help="Padding"
+ )
+ parser.add_argument(
+ "--highlight-line",
+ type=int,
+ default=None,
+ dest="highlight_line",
+ help="The line number (not index!) to highlight",
+ )
+ args = parser.parse_args()
+
+ from pip._vendor.rich.console import Console
+
+ console = Console(force_terminal=args.force_color, width=args.width)
+
+ if args.path == "-":
+ code = sys.stdin.read()
+ syntax = Syntax(
+ code=code,
+ lexer=args.lexer_name,
+ line_numbers=args.line_numbers,
+ word_wrap=args.word_wrap,
+ theme=args.theme,
+ background_color=args.background_color,
+ indent_guides=args.indent_guides,
+ padding=args.padding,
+ highlight_lines={args.highlight_line},
+ )
+ else:
+ syntax = Syntax.from_path(
+ args.path,
+ lexer=args.lexer_name,
+ line_numbers=args.line_numbers,
+ word_wrap=args.word_wrap,
+ theme=args.theme,
+ background_color=args.background_color,
+ indent_guides=args.indent_guides,
+ padding=args.padding,
+ highlight_lines={args.highlight_line},
+ )
+ console.print(syntax, soft_wrap=args.soft_wrap)
diff --git a/third_party/python/pip/pip/_vendor/rich/table.py b/third_party/python/pip/pip/_vendor/rich/table.py
new file mode 100644
index 0000000000..17409f2ee8
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/table.py
@@ -0,0 +1,1002 @@
+from dataclasses import dataclass, field, replace
+from typing import (
+ TYPE_CHECKING,
+ Dict,
+ Iterable,
+ List,
+ NamedTuple,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+)
+
+from . import box, errors
+from ._loop import loop_first_last, loop_last
+from ._pick import pick_bool
+from ._ratio import ratio_distribute, ratio_reduce
+from .align import VerticalAlignMethod
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .padding import Padding, PaddingDimensions
+from .protocol import is_renderable
+from .segment import Segment
+from .style import Style, StyleType
+from .text import Text, TextType
+
+if TYPE_CHECKING:
+ from .console import (
+ Console,
+ ConsoleOptions,
+ JustifyMethod,
+ OverflowMethod,
+ RenderableType,
+ RenderResult,
+ )
+
+
+@dataclass
+class Column:
+ """Defines a column within a ~Table.
+
+ Args:
+ title (Union[str, Text], optional): The title of the table rendered at the top. Defaults to None.
+ caption (Union[str, Text], optional): The table caption rendered below. Defaults to None.
+ width (int, optional): The width in characters of the table, or ``None`` to automatically fit. Defaults to None.
+ min_width (Optional[int], optional): The minimum width of the table, or ``None`` for no minimum. Defaults to None.
+ box (box.Box, optional): One of the constants in box.py used to draw the edges (see :ref:`appendix_box`), or ``None`` for no box lines. Defaults to box.HEAVY_HEAD.
+ safe_box (Optional[bool], optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True.
+ padding (PaddingDimensions, optional): Padding for cells (top, right, bottom, left). Defaults to (0, 1).
+ collapse_padding (bool, optional): Enable collapsing of padding around cells. Defaults to False.
+ pad_edge (bool, optional): Enable padding of edge cells. Defaults to True.
+ expand (bool, optional): Expand the table to fit the available space if ``True``, otherwise the table width will be auto-calculated. Defaults to False.
+ show_header (bool, optional): Show a header row. Defaults to True.
+ show_footer (bool, optional): Show a footer row. Defaults to False.
+ show_edge (bool, optional): Draw a box around the outside of the table. Defaults to True.
+ show_lines (bool, optional): Draw lines between every row. Defaults to False.
+ leading (bool, optional): Number of blank lines between rows (precludes ``show_lines``). Defaults to 0.
+ style (Union[str, Style], optional): Default style for the table. Defaults to "none".
+ row_styles (List[Union, str], optional): Optional list of row styles, if more than one style is given then the styles will alternate. Defaults to None.
+ header_style (Union[str, Style], optional): Style of the header. Defaults to "table.header".
+ footer_style (Union[str, Style], optional): Style of the footer. Defaults to "table.footer".
+ border_style (Union[str, Style], optional): Style of the border. Defaults to None.
+ title_style (Union[str, Style], optional): Style of the title. Defaults to None.
+ caption_style (Union[str, Style], optional): Style of the caption. Defaults to None.
+ title_justify (str, optional): Justify method for title. Defaults to "center".
+ caption_justify (str, optional): Justify method for caption. Defaults to "center".
+ highlight (bool, optional): Highlight cell contents (if str). Defaults to False.
+ """
+
+ header: "RenderableType" = ""
+ """RenderableType: Renderable for the header (typically a string)"""
+
+ footer: "RenderableType" = ""
+ """RenderableType: Renderable for the footer (typically a string)"""
+
+ header_style: StyleType = ""
+ """StyleType: The style of the header."""
+
+ footer_style: StyleType = ""
+ """StyleType: The style of the footer."""
+
+ style: StyleType = ""
+ """StyleType: The style of the column."""
+
+ justify: "JustifyMethod" = "left"
+ """str: How to justify text within the column ("left", "center", "right", or "full")"""
+
+ vertical: "VerticalAlignMethod" = "top"
+ """str: How to vertically align content ("top", "middle", or "bottom")"""
+
+ overflow: "OverflowMethod" = "ellipsis"
+ """str: Overflow method."""
+
+ width: Optional[int] = None
+ """Optional[int]: Width of the column, or ``None`` (default) to auto calculate width."""
+
+ min_width: Optional[int] = None
+ """Optional[int]: Minimum width of column, or ``None`` for no minimum. Defaults to None."""
+
+ max_width: Optional[int] = None
+ """Optional[int]: Maximum width of column, or ``None`` for no maximum. Defaults to None."""
+
+ ratio: Optional[int] = None
+ """Optional[int]: Ratio to use when calculating column width, or ``None`` (default) to adapt to column contents."""
+
+ no_wrap: bool = False
+ """bool: Prevent wrapping of text within the column. Defaults to ``False``."""
+
+ _index: int = 0
+ """Index of column."""
+
+ _cells: List["RenderableType"] = field(default_factory=list)
+
+ def copy(self) -> "Column":
+ """Return a copy of this Column."""
+ return replace(self, _cells=[])
+
+ @property
+ def cells(self) -> Iterable["RenderableType"]:
+ """Get all cells in the column, not including header."""
+ yield from self._cells
+
+ @property
+ def flexible(self) -> bool:
+ """Check if this column is flexible."""
+ return self.ratio is not None
+
+
+@dataclass
+class Row:
+ """Information regarding a row."""
+
+ style: Optional[StyleType] = None
+ """Style to apply to row."""
+
+ end_section: bool = False
+ """Indicated end of section, which will force a line beneath the row."""
+
+
+class _Cell(NamedTuple):
+ """A single cell in a table."""
+
+ style: StyleType
+ """Style to apply to cell."""
+ renderable: "RenderableType"
+ """Cell renderable."""
+ vertical: VerticalAlignMethod
+ """Cell vertical alignment."""
+
+
+class Table(JupyterMixin):
+ """A console renderable to draw a table.
+
+ Args:
+ *headers (Union[Column, str]): Column headers, either as a string, or :class:`~rich.table.Column` instance.
+ title (Union[str, Text], optional): The title of the table rendered at the top. Defaults to None.
+ caption (Union[str, Text], optional): The table caption rendered below. Defaults to None.
+ width (int, optional): The width in characters of the table, or ``None`` to automatically fit. Defaults to None.
+ min_width (Optional[int], optional): The minimum width of the table, or ``None`` for no minimum. Defaults to None.
+ box (box.Box, optional): One of the constants in box.py used to draw the edges (see :ref:`appendix_box`), or ``None`` for no box lines. Defaults to box.HEAVY_HEAD.
+ safe_box (Optional[bool], optional): Disable box characters that don't display on windows legacy terminal with *raster* fonts. Defaults to True.
+ padding (PaddingDimensions, optional): Padding for cells (top, right, bottom, left). Defaults to (0, 1).
+ collapse_padding (bool, optional): Enable collapsing of padding around cells. Defaults to False.
+ pad_edge (bool, optional): Enable padding of edge cells. Defaults to True.
+ expand (bool, optional): Expand the table to fit the available space if ``True``, otherwise the table width will be auto-calculated. Defaults to False.
+ show_header (bool, optional): Show a header row. Defaults to True.
+ show_footer (bool, optional): Show a footer row. Defaults to False.
+ show_edge (bool, optional): Draw a box around the outside of the table. Defaults to True.
+ show_lines (bool, optional): Draw lines between every row. Defaults to False.
+ leading (bool, optional): Number of blank lines between rows (precludes ``show_lines``). Defaults to 0.
+ style (Union[str, Style], optional): Default style for the table. Defaults to "none".
+ row_styles (List[Union, str], optional): Optional list of row styles, if more than one style is given then the styles will alternate. Defaults to None.
+ header_style (Union[str, Style], optional): Style of the header. Defaults to "table.header".
+ footer_style (Union[str, Style], optional): Style of the footer. Defaults to "table.footer".
+ border_style (Union[str, Style], optional): Style of the border. Defaults to None.
+ title_style (Union[str, Style], optional): Style of the title. Defaults to None.
+ caption_style (Union[str, Style], optional): Style of the caption. Defaults to None.
+ title_justify (str, optional): Justify method for title. Defaults to "center".
+ caption_justify (str, optional): Justify method for caption. Defaults to "center".
+ highlight (bool, optional): Highlight cell contents (if str). Defaults to False.
+ """
+
+ columns: List[Column]
+ rows: List[Row]
+
+ def __init__(
+ self,
+ *headers: Union[Column, str],
+ title: Optional[TextType] = None,
+ caption: Optional[TextType] = None,
+ width: Optional[int] = None,
+ min_width: Optional[int] = None,
+ box: Optional[box.Box] = box.HEAVY_HEAD,
+ safe_box: Optional[bool] = None,
+ padding: PaddingDimensions = (0, 1),
+ collapse_padding: bool = False,
+ pad_edge: bool = True,
+ expand: bool = False,
+ show_header: bool = True,
+ show_footer: bool = False,
+ show_edge: bool = True,
+ show_lines: bool = False,
+ leading: int = 0,
+ style: StyleType = "none",
+ row_styles: Optional[Iterable[StyleType]] = None,
+ header_style: Optional[StyleType] = "table.header",
+ footer_style: Optional[StyleType] = "table.footer",
+ border_style: Optional[StyleType] = None,
+ title_style: Optional[StyleType] = None,
+ caption_style: Optional[StyleType] = None,
+ title_justify: "JustifyMethod" = "center",
+ caption_justify: "JustifyMethod" = "center",
+ highlight: bool = False,
+ ) -> None:
+
+ self.columns: List[Column] = []
+ self.rows: List[Row] = []
+ self.title = title
+ self.caption = caption
+ self.width = width
+ self.min_width = min_width
+ self.box = box
+ self.safe_box = safe_box
+ self._padding = Padding.unpack(padding)
+ self.pad_edge = pad_edge
+ self._expand = expand
+ self.show_header = show_header
+ self.show_footer = show_footer
+ self.show_edge = show_edge
+ self.show_lines = show_lines
+ self.leading = leading
+ self.collapse_padding = collapse_padding
+ self.style = style
+ self.header_style = header_style or ""
+ self.footer_style = footer_style or ""
+ self.border_style = border_style
+ self.title_style = title_style
+ self.caption_style = caption_style
+ self.title_justify: "JustifyMethod" = title_justify
+ self.caption_justify: "JustifyMethod" = caption_justify
+ self.highlight = highlight
+ self.row_styles: Sequence[StyleType] = list(row_styles or [])
+ append_column = self.columns.append
+ for header in headers:
+ if isinstance(header, str):
+ self.add_column(header=header)
+ else:
+ header._index = len(self.columns)
+ append_column(header)
+
+ @classmethod
+ def grid(
+ cls,
+ *headers: Union[Column, str],
+ padding: PaddingDimensions = 0,
+ collapse_padding: bool = True,
+ pad_edge: bool = False,
+ expand: bool = False,
+ ) -> "Table":
+ """Get a table with no lines, headers, or footer.
+
+ Args:
+ *headers (Union[Column, str]): Column headers, either as a string, or :class:`~rich.table.Column` instance.
+ padding (PaddingDimensions, optional): Get padding around cells. Defaults to 0.
+ collapse_padding (bool, optional): Enable collapsing of padding around cells. Defaults to True.
+ pad_edge (bool, optional): Enable padding around edges of table. Defaults to False.
+ expand (bool, optional): Expand the table to fit the available space if ``True``, otherwise the table width will be auto-calculated. Defaults to False.
+
+ Returns:
+ Table: A table instance.
+ """
+ return cls(
+ *headers,
+ box=None,
+ padding=padding,
+ collapse_padding=collapse_padding,
+ show_header=False,
+ show_footer=False,
+ show_edge=False,
+ pad_edge=pad_edge,
+ expand=expand,
+ )
+
+ @property
+ def expand(self) -> bool:
+ """Setting a non-None self.width implies expand."""
+ return self._expand or self.width is not None
+
+ @expand.setter
+ def expand(self, expand: bool) -> None:
+ """Set expand."""
+ self._expand = expand
+
+ @property
+ def _extra_width(self) -> int:
+ """Get extra width to add to cell content."""
+ width = 0
+ if self.box and self.show_edge:
+ width += 2
+ if self.box:
+ width += len(self.columns) - 1
+ return width
+
+ @property
+ def row_count(self) -> int:
+ """Get the current number of rows."""
+ return len(self.rows)
+
+ def get_row_style(self, console: "Console", index: int) -> StyleType:
+ """Get the current row style."""
+ style = Style.null()
+ if self.row_styles:
+ style += console.get_style(self.row_styles[index % len(self.row_styles)])
+ row_style = self.rows[index].style
+ if row_style is not None:
+ style += console.get_style(row_style)
+ return style
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> Measurement:
+ max_width = options.max_width
+ if self.width is not None:
+ max_width = self.width
+ if max_width < 0:
+ return Measurement(0, 0)
+
+ extra_width = self._extra_width
+ max_width = sum(
+ self._calculate_column_widths(
+ console, options.update_width(max_width - extra_width)
+ )
+ )
+ _measure_column = self._measure_column
+
+ measurements = [
+ _measure_column(console, options.update_width(max_width), column)
+ for column in self.columns
+ ]
+ minimum_width = (
+ sum(measurement.minimum for measurement in measurements) + extra_width
+ )
+ maximum_width = (
+ sum(measurement.maximum for measurement in measurements) + extra_width
+ if (self.width is None)
+ else self.width
+ )
+ measurement = Measurement(minimum_width, maximum_width)
+ measurement = measurement.clamp(self.min_width)
+ return measurement
+
+ @property
+ def padding(self) -> Tuple[int, int, int, int]:
+ """Get cell padding."""
+ return self._padding
+
+ @padding.setter
+ def padding(self, padding: PaddingDimensions) -> "Table":
+ """Set cell padding."""
+ self._padding = Padding.unpack(padding)
+ return self
+
+ def add_column(
+ self,
+ header: "RenderableType" = "",
+ footer: "RenderableType" = "",
+ *,
+ header_style: Optional[StyleType] = None,
+ footer_style: Optional[StyleType] = None,
+ style: Optional[StyleType] = None,
+ justify: "JustifyMethod" = "left",
+ vertical: "VerticalAlignMethod" = "top",
+ overflow: "OverflowMethod" = "ellipsis",
+ width: Optional[int] = None,
+ min_width: Optional[int] = None,
+ max_width: Optional[int] = None,
+ ratio: Optional[int] = None,
+ no_wrap: bool = False,
+ ) -> None:
+ """Add a column to the table.
+
+ Args:
+ header (RenderableType, optional): Text or renderable for the header.
+ Defaults to "".
+ footer (RenderableType, optional): Text or renderable for the footer.
+ Defaults to "".
+ header_style (Union[str, Style], optional): Style for the header, or None for default. Defaults to None.
+ footer_style (Union[str, Style], optional): Style for the footer, or None for default. Defaults to None.
+ style (Union[str, Style], optional): Style for the column cells, or None for default. Defaults to None.
+ justify (JustifyMethod, optional): Alignment for cells. Defaults to "left".
+ vertical (VerticalAlignMethod, optional): Vertical alignment, one of "top", "middle", or "bottom". Defaults to "top".
+ overflow (OverflowMethod): Overflow method: "crop", "fold", "ellipsis". Defaults to "ellipsis".
+ width (int, optional): Desired width of column in characters, or None to fit to contents. Defaults to None.
+ min_width (Optional[int], optional): Minimum width of column, or ``None`` for no minimum. Defaults to None.
+ max_width (Optional[int], optional): Maximum width of column, or ``None`` for no maximum. Defaults to None.
+ ratio (int, optional): Flexible ratio for the column (requires ``Table.expand`` or ``Table.width``). Defaults to None.
+ no_wrap (bool, optional): Set to ``True`` to disable wrapping of this column.
+ """
+
+ column = Column(
+ _index=len(self.columns),
+ header=header,
+ footer=footer,
+ header_style=header_style or "",
+ footer_style=footer_style or "",
+ style=style or "",
+ justify=justify,
+ vertical=vertical,
+ overflow=overflow,
+ width=width,
+ min_width=min_width,
+ max_width=max_width,
+ ratio=ratio,
+ no_wrap=no_wrap,
+ )
+ self.columns.append(column)
+
+ def add_row(
+ self,
+ *renderables: Optional["RenderableType"],
+ style: Optional[StyleType] = None,
+ end_section: bool = False,
+ ) -> None:
+ """Add a row of renderables.
+
+ Args:
+ *renderables (None or renderable): Each cell in a row must be a renderable object (including str),
+ or ``None`` for a blank cell.
+ style (StyleType, optional): An optional style to apply to the entire row. Defaults to None.
+ end_section (bool, optional): End a section and draw a line. Defaults to False.
+
+ Raises:
+ errors.NotRenderableError: If you add something that can't be rendered.
+ """
+
+ def add_cell(column: Column, renderable: "RenderableType") -> None:
+ column._cells.append(renderable)
+
+ cell_renderables: List[Optional["RenderableType"]] = list(renderables)
+
+ columns = self.columns
+ if len(cell_renderables) < len(columns):
+ cell_renderables = [
+ *cell_renderables,
+ *[None] * (len(columns) - len(cell_renderables)),
+ ]
+ for index, renderable in enumerate(cell_renderables):
+ if index == len(columns):
+ column = Column(_index=index)
+ for _ in self.rows:
+ add_cell(column, Text(""))
+ self.columns.append(column)
+ else:
+ column = columns[index]
+ if renderable is None:
+ add_cell(column, "")
+ elif is_renderable(renderable):
+ add_cell(column, renderable)
+ else:
+ raise errors.NotRenderableError(
+ f"unable to render {type(renderable).__name__}; a string or other renderable object is required"
+ )
+ self.rows.append(Row(style=style, end_section=end_section))
+
+ def add_section(self) -> None:
+ """Add a new section (draw a line after current row)."""
+
+ if self.rows:
+ self.rows[-1].end_section = True
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+
+ if not self.columns:
+ yield Segment("\n")
+ return
+
+ max_width = options.max_width
+ if self.width is not None:
+ max_width = self.width
+
+ extra_width = self._extra_width
+ widths = self._calculate_column_widths(
+ console, options.update_width(max_width - extra_width)
+ )
+ table_width = sum(widths) + extra_width
+
+ render_options = options.update(
+ width=table_width, highlight=self.highlight, height=None
+ )
+
+ def render_annotation(
+ text: TextType, style: StyleType, justify: "JustifyMethod" = "center"
+ ) -> "RenderResult":
+ render_text = (
+ console.render_str(text, style=style, highlight=False)
+ if isinstance(text, str)
+ else text
+ )
+ return console.render(
+ render_text, options=render_options.update(justify=justify)
+ )
+
+ if self.title:
+ yield from render_annotation(
+ self.title,
+ style=Style.pick_first(self.title_style, "table.title"),
+ justify=self.title_justify,
+ )
+ yield from self._render(console, render_options, widths)
+ if self.caption:
+ yield from render_annotation(
+ self.caption,
+ style=Style.pick_first(self.caption_style, "table.caption"),
+ justify=self.caption_justify,
+ )
+
+ def _calculate_column_widths(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> List[int]:
+ """Calculate the widths of each column, including padding, not including borders."""
+ max_width = options.max_width
+ columns = self.columns
+ width_ranges = [
+ self._measure_column(console, options, column) for column in columns
+ ]
+ widths = [_range.maximum or 1 for _range in width_ranges]
+ get_padding_width = self._get_padding_width
+ extra_width = self._extra_width
+ if self.expand:
+ ratios = [col.ratio or 0 for col in columns if col.flexible]
+ if any(ratios):
+ fixed_widths = [
+ 0 if column.flexible else _range.maximum
+ for _range, column in zip(width_ranges, columns)
+ ]
+ flex_minimum = [
+ (column.width or 1) + get_padding_width(column._index)
+ for column in columns
+ if column.flexible
+ ]
+ flexible_width = max_width - sum(fixed_widths)
+ flex_widths = ratio_distribute(flexible_width, ratios, flex_minimum)
+ iter_flex_widths = iter(flex_widths)
+ for index, column in enumerate(columns):
+ if column.flexible:
+ widths[index] = fixed_widths[index] + next(iter_flex_widths)
+ table_width = sum(widths)
+
+ if table_width > max_width:
+ widths = self._collapse_widths(
+ widths,
+ [(column.width is None and not column.no_wrap) for column in columns],
+ max_width,
+ )
+ table_width = sum(widths)
+ # last resort, reduce columns evenly
+ if table_width > max_width:
+ excess_width = table_width - max_width
+ widths = ratio_reduce(excess_width, [1] * len(widths), widths, widths)
+ table_width = sum(widths)
+
+ width_ranges = [
+ self._measure_column(console, options.update_width(width), column)
+ for width, column in zip(widths, columns)
+ ]
+ widths = [_range.maximum or 0 for _range in width_ranges]
+
+ if (table_width < max_width and self.expand) or (
+ self.min_width is not None and table_width < (self.min_width - extra_width)
+ ):
+ _max_width = (
+ max_width
+ if self.min_width is None
+ else min(self.min_width - extra_width, max_width)
+ )
+ pad_widths = ratio_distribute(_max_width - table_width, widths)
+ widths = [_width + pad for _width, pad in zip(widths, pad_widths)]
+
+ return widths
+
+ @classmethod
+ def _collapse_widths(
+ cls, widths: List[int], wrapable: List[bool], max_width: int
+ ) -> List[int]:
+ """Reduce widths so that the total is under max_width.
+
+ Args:
+ widths (List[int]): List of widths.
+ wrapable (List[bool]): List of booleans that indicate if a column may shrink.
+ max_width (int): Maximum width to reduce to.
+
+ Returns:
+ List[int]: A new list of widths.
+ """
+ total_width = sum(widths)
+ excess_width = total_width - max_width
+ if any(wrapable):
+ while total_width and excess_width > 0:
+ max_column = max(
+ width for width, allow_wrap in zip(widths, wrapable) if allow_wrap
+ )
+ second_max_column = max(
+ width if allow_wrap and width != max_column else 0
+ for width, allow_wrap in zip(widths, wrapable)
+ )
+ column_difference = max_column - second_max_column
+ ratios = [
+ (1 if (width == max_column and allow_wrap) else 0)
+ for width, allow_wrap in zip(widths, wrapable)
+ ]
+ if not any(ratios) or not column_difference:
+ break
+ max_reduce = [min(excess_width, column_difference)] * len(widths)
+ widths = ratio_reduce(excess_width, ratios, max_reduce, widths)
+
+ total_width = sum(widths)
+ excess_width = total_width - max_width
+ return widths
+
+ def _get_cells(
+ self, console: "Console", column_index: int, column: Column
+ ) -> Iterable[_Cell]:
+ """Get all the cells with padding and optional header."""
+
+ collapse_padding = self.collapse_padding
+ pad_edge = self.pad_edge
+ padding = self.padding
+ any_padding = any(padding)
+
+ first_column = column_index == 0
+ last_column = column_index == len(self.columns) - 1
+
+ _padding_cache: Dict[Tuple[bool, bool], Tuple[int, int, int, int]] = {}
+
+ def get_padding(first_row: bool, last_row: bool) -> Tuple[int, int, int, int]:
+ cached = _padding_cache.get((first_row, last_row))
+ if cached:
+ return cached
+ top, right, bottom, left = padding
+
+ if collapse_padding:
+ if not first_column:
+ left = max(0, left - right)
+ if not last_row:
+ bottom = max(0, top - bottom)
+
+ if not pad_edge:
+ if first_column:
+ left = 0
+ if last_column:
+ right = 0
+ if first_row:
+ top = 0
+ if last_row:
+ bottom = 0
+ _padding = (top, right, bottom, left)
+ _padding_cache[(first_row, last_row)] = _padding
+ return _padding
+
+ raw_cells: List[Tuple[StyleType, "RenderableType"]] = []
+ _append = raw_cells.append
+ get_style = console.get_style
+ if self.show_header:
+ header_style = get_style(self.header_style or "") + get_style(
+ column.header_style
+ )
+ _append((header_style, column.header))
+ cell_style = get_style(column.style or "")
+ for cell in column.cells:
+ _append((cell_style, cell))
+ if self.show_footer:
+ footer_style = get_style(self.footer_style or "") + get_style(
+ column.footer_style
+ )
+ _append((footer_style, column.footer))
+
+ if any_padding:
+ _Padding = Padding
+ for first, last, (style, renderable) in loop_first_last(raw_cells):
+ yield _Cell(
+ style,
+ _Padding(renderable, get_padding(first, last)),
+ getattr(renderable, "vertical", None) or column.vertical,
+ )
+ else:
+ for (style, renderable) in raw_cells:
+ yield _Cell(
+ style,
+ renderable,
+ getattr(renderable, "vertical", None) or column.vertical,
+ )
+
+ def _get_padding_width(self, column_index: int) -> int:
+ """Get extra width from padding."""
+ _, pad_right, _, pad_left = self.padding
+ if self.collapse_padding:
+ if column_index > 0:
+ pad_left = max(0, pad_left - pad_right)
+ return pad_left + pad_right
+
+ def _measure_column(
+ self,
+ console: "Console",
+ options: "ConsoleOptions",
+ column: Column,
+ ) -> Measurement:
+ """Get the minimum and maximum width of the column."""
+
+ max_width = options.max_width
+ if max_width < 1:
+ return Measurement(0, 0)
+
+ padding_width = self._get_padding_width(column._index)
+
+ if column.width is not None:
+ # Fixed width column
+ return Measurement(
+ column.width + padding_width, column.width + padding_width
+ ).with_maximum(max_width)
+ # Flexible column, we need to measure contents
+ min_widths: List[int] = []
+ max_widths: List[int] = []
+ append_min = min_widths.append
+ append_max = max_widths.append
+ get_render_width = Measurement.get
+ for cell in self._get_cells(console, column._index, column):
+ _min, _max = get_render_width(console, options, cell.renderable)
+ append_min(_min)
+ append_max(_max)
+
+ measurement = Measurement(
+ max(min_widths) if min_widths else 1,
+ max(max_widths) if max_widths else max_width,
+ ).with_maximum(max_width)
+ measurement = measurement.clamp(
+ None if column.min_width is None else column.min_width + padding_width,
+ None if column.max_width is None else column.max_width + padding_width,
+ )
+ return measurement
+
+ def _render(
+ self, console: "Console", options: "ConsoleOptions", widths: List[int]
+ ) -> "RenderResult":
+ table_style = console.get_style(self.style or "")
+
+ border_style = table_style + console.get_style(self.border_style or "")
+ _column_cells = (
+ self._get_cells(console, column_index, column)
+ for column_index, column in enumerate(self.columns)
+ )
+ row_cells: List[Tuple[_Cell, ...]] = list(zip(*_column_cells))
+ _box = (
+ self.box.substitute(
+ options, safe=pick_bool(self.safe_box, console.safe_box)
+ )
+ if self.box
+ else None
+ )
+ _box = _box.get_plain_headed_box() if _box and not self.show_header else _box
+
+ new_line = Segment.line()
+
+ columns = self.columns
+ show_header = self.show_header
+ show_footer = self.show_footer
+ show_edge = self.show_edge
+ show_lines = self.show_lines
+ leading = self.leading
+
+ _Segment = Segment
+ if _box:
+ box_segments = [
+ (
+ _Segment(_box.head_left, border_style),
+ _Segment(_box.head_right, border_style),
+ _Segment(_box.head_vertical, border_style),
+ ),
+ (
+ _Segment(_box.foot_left, border_style),
+ _Segment(_box.foot_right, border_style),
+ _Segment(_box.foot_vertical, border_style),
+ ),
+ (
+ _Segment(_box.mid_left, border_style),
+ _Segment(_box.mid_right, border_style),
+ _Segment(_box.mid_vertical, border_style),
+ ),
+ ]
+ if show_edge:
+ yield _Segment(_box.get_top(widths), border_style)
+ yield new_line
+ else:
+ box_segments = []
+
+ get_row_style = self.get_row_style
+ get_style = console.get_style
+
+ for index, (first, last, row_cell) in enumerate(loop_first_last(row_cells)):
+ header_row = first and show_header
+ footer_row = last and show_footer
+ row = (
+ self.rows[index - show_header]
+ if (not header_row and not footer_row)
+ else None
+ )
+ max_height = 1
+ cells: List[List[List[Segment]]] = []
+ if header_row or footer_row:
+ row_style = Style.null()
+ else:
+ row_style = get_style(
+ get_row_style(console, index - 1 if show_header else index)
+ )
+ for width, cell, column in zip(widths, row_cell, columns):
+ render_options = options.update(
+ width=width,
+ justify=column.justify,
+ no_wrap=column.no_wrap,
+ overflow=column.overflow,
+ height=None,
+ )
+ lines = console.render_lines(
+ cell.renderable,
+ render_options,
+ style=get_style(cell.style) + row_style,
+ )
+ max_height = max(max_height, len(lines))
+ cells.append(lines)
+
+ row_height = max(len(cell) for cell in cells)
+
+ def align_cell(
+ cell: List[List[Segment]],
+ vertical: "VerticalAlignMethod",
+ width: int,
+ style: Style,
+ ) -> List[List[Segment]]:
+ if header_row:
+ vertical = "bottom"
+ elif footer_row:
+ vertical = "top"
+
+ if vertical == "top":
+ return _Segment.align_top(cell, width, row_height, style)
+ elif vertical == "middle":
+ return _Segment.align_middle(cell, width, row_height, style)
+ return _Segment.align_bottom(cell, width, row_height, style)
+
+ cells[:] = [
+ _Segment.set_shape(
+ align_cell(
+ cell,
+ _cell.vertical,
+ width,
+ get_style(_cell.style) + row_style,
+ ),
+ width,
+ max_height,
+ )
+ for width, _cell, cell, column in zip(widths, row_cell, cells, columns)
+ ]
+
+ if _box:
+ if last and show_footer:
+ yield _Segment(
+ _box.get_row(widths, "foot", edge=show_edge), border_style
+ )
+ yield new_line
+ left, right, _divider = box_segments[0 if first else (2 if last else 1)]
+
+ # If the column divider is whitespace also style it with the row background
+ divider = (
+ _divider
+ if _divider.text.strip()
+ else _Segment(
+ _divider.text, row_style.background_style + _divider.style
+ )
+ )
+ for line_no in range(max_height):
+ if show_edge:
+ yield left
+ for last_cell, rendered_cell in loop_last(cells):
+ yield from rendered_cell[line_no]
+ if not last_cell:
+ yield divider
+ if show_edge:
+ yield right
+ yield new_line
+ else:
+ for line_no in range(max_height):
+ for rendered_cell in cells:
+ yield from rendered_cell[line_no]
+ yield new_line
+ if _box and first and show_header:
+ yield _Segment(
+ _box.get_row(widths, "head", edge=show_edge), border_style
+ )
+ yield new_line
+ end_section = row and row.end_section
+ if _box and (show_lines or leading or end_section):
+ if (
+ not last
+ and not (show_footer and index >= len(row_cells) - 2)
+ and not (show_header and header_row)
+ ):
+ if leading:
+ yield _Segment(
+ _box.get_row(widths, "mid", edge=show_edge) * leading,
+ border_style,
+ )
+ else:
+ yield _Segment(
+ _box.get_row(widths, "row", edge=show_edge), border_style
+ )
+ yield new_line
+
+ if _box and show_edge:
+ yield _Segment(_box.get_bottom(widths), border_style)
+ yield new_line
+
+
+if __name__ == "__main__": # pragma: no cover
+ from pip._vendor.rich.console import Console
+ from pip._vendor.rich.highlighter import ReprHighlighter
+ from pip._vendor.rich.table import Table as Table
+
+ from ._timer import timer
+
+ with timer("Table render"):
+ table = Table(
+ title="Star Wars Movies",
+ caption="Rich example table",
+ caption_justify="right",
+ )
+
+ table.add_column(
+ "Released", header_style="bright_cyan", style="cyan", no_wrap=True
+ )
+ table.add_column("Title", style="magenta")
+ table.add_column("Box Office", justify="right", style="green")
+
+ table.add_row(
+ "Dec 20, 2019",
+ "Star Wars: The Rise of Skywalker",
+ "$952,110,690",
+ )
+ table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
+ table.add_row(
+ "Dec 15, 2017",
+ "Star Wars Ep. V111: The Last Jedi",
+ "$1,332,539,889",
+ style="on black",
+ end_section=True,
+ )
+ table.add_row(
+ "Dec 16, 2016",
+ "Rogue One: A Star Wars Story",
+ "$1,332,439,889",
+ )
+
+ def header(text: str) -> None:
+ console.print()
+ console.rule(highlight(text))
+ console.print()
+
+ console = Console()
+ highlight = ReprHighlighter()
+ header("Example Table")
+ console.print(table, justify="center")
+
+ table.expand = True
+ header("expand=True")
+ console.print(table)
+
+ table.width = 50
+ header("width=50")
+
+ console.print(table, justify="center")
+
+ table.width = None
+ table.expand = False
+ table.row_styles = ["dim", "none"]
+ header("row_styles=['dim', 'none']")
+
+ console.print(table, justify="center")
+
+ table.width = None
+ table.expand = False
+ table.row_styles = ["dim", "none"]
+ table.leading = 1
+ header("leading=1, row_styles=['dim', 'none']")
+ console.print(table, justify="center")
+
+ table.width = None
+ table.expand = False
+ table.row_styles = ["dim", "none"]
+ table.show_lines = True
+ table.leading = 0
+ header("show_lines=True, row_styles=['dim', 'none']")
+ console.print(table, justify="center")
diff --git a/third_party/python/pip/pip/_vendor/rich/terminal_theme.py b/third_party/python/pip/pip/_vendor/rich/terminal_theme.py
new file mode 100644
index 0000000000..565e9d960f
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/terminal_theme.py
@@ -0,0 +1,153 @@
+from typing import List, Optional, Tuple
+
+from .color_triplet import ColorTriplet
+from .palette import Palette
+
+_ColorTuple = Tuple[int, int, int]
+
+
+class TerminalTheme:
+ """A color theme used when exporting console content.
+
+ Args:
+ background (Tuple[int, int, int]): The background color.
+ foreground (Tuple[int, int, int]): The foreground (text) color.
+ normal (List[Tuple[int, int, int]]): A list of 8 normal intensity colors.
+ bright (List[Tuple[int, int, int]], optional): A list of 8 bright colors, or None
+ to repeat normal intensity. Defaults to None.
+ """
+
+ def __init__(
+ self,
+ background: _ColorTuple,
+ foreground: _ColorTuple,
+ normal: List[_ColorTuple],
+ bright: Optional[List[_ColorTuple]] = None,
+ ) -> None:
+ self.background_color = ColorTriplet(*background)
+ self.foreground_color = ColorTriplet(*foreground)
+ self.ansi_colors = Palette(normal + (bright or normal))
+
+
+DEFAULT_TERMINAL_THEME = TerminalTheme(
+ (255, 255, 255),
+ (0, 0, 0),
+ [
+ (0, 0, 0),
+ (128, 0, 0),
+ (0, 128, 0),
+ (128, 128, 0),
+ (0, 0, 128),
+ (128, 0, 128),
+ (0, 128, 128),
+ (192, 192, 192),
+ ],
+ [
+ (128, 128, 128),
+ (255, 0, 0),
+ (0, 255, 0),
+ (255, 255, 0),
+ (0, 0, 255),
+ (255, 0, 255),
+ (0, 255, 255),
+ (255, 255, 255),
+ ],
+)
+
+MONOKAI = TerminalTheme(
+ (12, 12, 12),
+ (217, 217, 217),
+ [
+ (26, 26, 26),
+ (244, 0, 95),
+ (152, 224, 36),
+ (253, 151, 31),
+ (157, 101, 255),
+ (244, 0, 95),
+ (88, 209, 235),
+ (196, 197, 181),
+ (98, 94, 76),
+ ],
+ [
+ (244, 0, 95),
+ (152, 224, 36),
+ (224, 213, 97),
+ (157, 101, 255),
+ (244, 0, 95),
+ (88, 209, 235),
+ (246, 246, 239),
+ ],
+)
+DIMMED_MONOKAI = TerminalTheme(
+ (25, 25, 25),
+ (185, 188, 186),
+ [
+ (58, 61, 67),
+ (190, 63, 72),
+ (135, 154, 59),
+ (197, 166, 53),
+ (79, 118, 161),
+ (133, 92, 141),
+ (87, 143, 164),
+ (185, 188, 186),
+ (136, 137, 135),
+ ],
+ [
+ (251, 0, 31),
+ (15, 114, 47),
+ (196, 112, 51),
+ (24, 109, 227),
+ (251, 0, 103),
+ (46, 112, 109),
+ (253, 255, 185),
+ ],
+)
+NIGHT_OWLISH = TerminalTheme(
+ (255, 255, 255),
+ (64, 63, 83),
+ [
+ (1, 22, 39),
+ (211, 66, 62),
+ (42, 162, 152),
+ (218, 170, 1),
+ (72, 118, 214),
+ (64, 63, 83),
+ (8, 145, 106),
+ (122, 129, 129),
+ (122, 129, 129),
+ ],
+ [
+ (247, 110, 110),
+ (73, 208, 197),
+ (218, 194, 107),
+ (92, 167, 228),
+ (105, 112, 152),
+ (0, 201, 144),
+ (152, 159, 177),
+ ],
+)
+
+SVG_EXPORT_THEME = TerminalTheme(
+ (41, 41, 41),
+ (197, 200, 198),
+ [
+ (75, 78, 85),
+ (204, 85, 90),
+ (152, 168, 75),
+ (208, 179, 68),
+ (96, 138, 177),
+ (152, 114, 159),
+ (104, 160, 179),
+ (197, 200, 198),
+ (154, 155, 153),
+ ],
+ [
+ (255, 38, 39),
+ (0, 130, 61),
+ (208, 132, 66),
+ (25, 132, 233),
+ (255, 44, 122),
+ (57, 130, 128),
+ (253, 253, 197),
+ ],
+)
diff --git a/third_party/python/pip/pip/_vendor/rich/text.py b/third_party/python/pip/pip/_vendor/rich/text.py
new file mode 100644
index 0000000000..b14055aa7b
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/text.py
@@ -0,0 +1,1311 @@
+import re
+from functools import partial, reduce
+from math import gcd
+from operator import itemgetter
+from typing import (
+ TYPE_CHECKING,
+ Any,
+ Callable,
+ Dict,
+ Iterable,
+ List,
+ NamedTuple,
+ Optional,
+ Tuple,
+ Union,
+)
+
+from ._loop import loop_last
+from ._pick import pick_bool
+from ._wrap import divide_line
+from .align import AlignMethod
+from .cells import cell_len, set_cell_size
+from .containers import Lines
+from .control import strip_control_codes
+from .emoji import EmojiVariant
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .segment import Segment
+from .style import Style, StyleType
+
+if TYPE_CHECKING: # pragma: no cover
+ from .console import Console, ConsoleOptions, JustifyMethod, OverflowMethod
+
+DEFAULT_JUSTIFY: "JustifyMethod" = "default"
+DEFAULT_OVERFLOW: "OverflowMethod" = "fold"
+
+
+_re_whitespace = re.compile(r"\s+$")
+
+TextType = Union[str, "Text"]
+
+GetStyleCallable = Callable[[str], Optional[StyleType]]
+
+
+class Span(NamedTuple):
+ """A marked up region in some text."""
+
+ start: int
+ """Span start index."""
+ end: int
+ """Span end index."""
+ style: Union[str, Style]
+ """Style associated with the span."""
+
+ def __repr__(self) -> str:
+ return (
+ f"Span({self.start}, {self.end}, {self.style!r})"
+ if (isinstance(self.style, Style) and self.style._meta)
+ else f"Span({self.start}, {self.end}, {repr(self.style)})"
+ )
+
+ def __bool__(self) -> bool:
+ return self.end > self.start
+
+ def split(self, offset: int) -> Tuple["Span", Optional["Span"]]:
+ """Split a span in to 2 from a given offset."""
+
+ if offset < self.start:
+ return self, None
+ if offset >= self.end:
+ return self, None
+
+ start, end, style = self
+ span1 = Span(start, min(end, offset), style)
+ span2 = Span(span1.end, end, style)
+ return span1, span2
+
+ def move(self, offset: int) -> "Span":
+ """Move start and end by a given offset.
+
+ Args:
+ offset (int): Number of characters to add to start and end.
+
+ Returns:
+ TextSpan: A new TextSpan with adjusted position.
+ """
+ start, end, style = self
+ return Span(start + offset, end + offset, style)
+
+ def right_crop(self, offset: int) -> "Span":
+ """Crop the span at the given offset.
+
+ Args:
+ offset (int): A value between start and end.
+
+ Returns:
+ Span: A new (possibly smaller) span.
+ """
+ start, end, style = self
+ if offset >= end:
+ return self
+ return Span(start, min(offset, end), style)
+
+
+class Text(JupyterMixin):
+ """Text with color / style.
+
+ Args:
+ text (str, optional): Default unstyled text. Defaults to "".
+ style (Union[str, Style], optional): Base style for text. Defaults to "".
+ justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
+ overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
+ no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
+ end (str, optional): Character to end text with. Defaults to "\\\\n".
+ tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to 8.
+ spans (List[Span], optional). A list of predefined style spans. Defaults to None.
+ """
+
+ __slots__ = [
+ "_text",
+ "style",
+ "justify",
+ "overflow",
+ "no_wrap",
+ "end",
+ "tab_size",
+ "_spans",
+ "_length",
+ ]
+
+ def __init__(
+ self,
+ text: str = "",
+ style: Union[str, Style] = "",
+ *,
+ justify: Optional["JustifyMethod"] = None,
+ overflow: Optional["OverflowMethod"] = None,
+ no_wrap: Optional[bool] = None,
+ end: str = "\n",
+ tab_size: Optional[int] = 8,
+ spans: Optional[List[Span]] = None,
+ ) -> None:
+ sanitized_text = strip_control_codes(text)
+ self._text = [sanitized_text]
+ self.style = style
+ self.justify: Optional["JustifyMethod"] = justify
+ self.overflow: Optional["OverflowMethod"] = overflow
+ self.no_wrap = no_wrap
+ self.end = end
+ self.tab_size = tab_size
+ self._spans: List[Span] = spans or []
+ self._length: int = len(sanitized_text)
+
+ def __len__(self) -> int:
+ return self._length
+
+ def __bool__(self) -> bool:
+ return bool(self._length)
+
+ def __str__(self) -> str:
+ return self.plain
+
+ def __repr__(self) -> str:
+ return f"<text {self.plain!r} {self._spans!r}>"
+
+ def __add__(self, other: Any) -> "Text":
+ if isinstance(other, (str, Text)):
+ result = self.copy()
+ result.append(other)
+ return result
+ return NotImplemented
+
+ def __eq__(self, other: object) -> bool:
+ if not isinstance(other, Text):
+ return NotImplemented
+ return self.plain == other.plain and self._spans == other._spans
+
+ def __contains__(self, other: object) -> bool:
+ if isinstance(other, str):
+ return other in self.plain
+ elif isinstance(other, Text):
+ return other.plain in self.plain
+ return False
+
+ def __getitem__(self, slice: Union[int, slice]) -> "Text":
+ def get_text_at(offset: int) -> "Text":
+ _Span = Span
+ text = Text(
+ self.plain[offset],
+ spans=[
+ _Span(0, 1, style)
+ for start, end, style in self._spans
+ if end > offset >= start
+ ],
+ end="",
+ )
+ return text
+
+ if isinstance(slice, int):
+ return get_text_at(slice)
+ else:
+ start, stop, step = slice.indices(len(self.plain))
+ if step == 1:
+ lines = self.divide([start, stop])
+ return lines[1]
+ else:
+ # This would be a bit of work to implement efficiently
+ # For now, its not required
+ raise TypeError("slices with step!=1 are not supported")
+
+ @property
+ def cell_len(self) -> int:
+ """Get the number of cells required to render this text."""
+ return cell_len(self.plain)
+
+ @property
+ def markup(self) -> str:
+ """Get console markup to render this Text.
+
+ Returns:
+ str: A string potentially creating markup tags.
+ """
+ from .markup import escape
+
+ output: List[str] = []
+
+ plain = self.plain
+ markup_spans = [
+ (0, False, self.style),
+ *((span.start, False, span.style) for span in self._spans),
+ *((span.end, True, span.style) for span in self._spans),
+ (len(plain), True, self.style),
+ ]
+ markup_spans.sort(key=itemgetter(0, 1))
+ position = 0
+ append = output.append
+ for offset, closing, style in markup_spans:
+ if offset > position:
+ append(escape(plain[position:offset]))
+ position = offset
+ if style:
+ append(f"[/{style}]" if closing else f"[{style}]")
+ markup = "".join(output)
+ return markup
+
+ @classmethod
+ def from_markup(
+ cls,
+ text: str,
+ *,
+ style: Union[str, Style] = "",
+ emoji: bool = True,
+ emoji_variant: Optional[EmojiVariant] = None,
+ justify: Optional["JustifyMethod"] = None,
+ overflow: Optional["OverflowMethod"] = None,
+ end: str = "\n",
+ ) -> "Text":
+ """Create Text instance from markup.
+
+ Args:
+ text (str): A string containing console markup.
+ emoji (bool, optional): Also render emoji code. Defaults to True.
+ justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
+ overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
+ end (str, optional): Character to end text with. Defaults to "\\\\n".
+
+ Returns:
+ Text: A Text instance with markup rendered.
+ """
+ from .markup import render
+
+ rendered_text = render(text, style, emoji=emoji, emoji_variant=emoji_variant)
+ rendered_text.justify = justify
+ rendered_text.overflow = overflow
+ rendered_text.end = end
+ return rendered_text
+
+ @classmethod
+ def from_ansi(
+ cls,
+ text: str,
+ *,
+ style: Union[str, Style] = "",
+ justify: Optional["JustifyMethod"] = None,
+ overflow: Optional["OverflowMethod"] = None,
+ no_wrap: Optional[bool] = None,
+ end: str = "\n",
+ tab_size: Optional[int] = 8,
+ ) -> "Text":
+ """Create a Text object from a string containing ANSI escape codes.
+
+ Args:
+ text (str): A string containing escape codes.
+ style (Union[str, Style], optional): Base style for text. Defaults to "".
+ justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
+ overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
+ no_wrap (bool, optional): Disable text wrapping, or None for default. Defaults to None.
+ end (str, optional): Character to end text with. Defaults to "\\\\n".
+ tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to 8.
+ """
+ from .ansi import AnsiDecoder
+
+ joiner = Text(
+ "\n",
+ justify=justify,
+ overflow=overflow,
+ no_wrap=no_wrap,
+ end=end,
+ tab_size=tab_size,
+ style=style,
+ )
+ decoder = AnsiDecoder()
+ result = joiner.join(line for line in decoder.decode(text))
+ return result
+
+ @classmethod
+ def styled(
+ cls,
+ text: str,
+ style: StyleType = "",
+ *,
+ justify: Optional["JustifyMethod"] = None,
+ overflow: Optional["OverflowMethod"] = None,
+ ) -> "Text":
+ """Construct a Text instance with a pre-applied styled. A style applied in this way won't be used
+ to pad the text when it is justified.
+
+ Args:
+ text (str): A string containing console markup.
+ style (Union[str, Style]): Style to apply to the text. Defaults to "".
+ justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
+ overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
+
+ Returns:
+ Text: A text instance with a style applied to the entire string.
+ """
+ styled_text = cls(text, justify=justify, overflow=overflow)
+ styled_text.stylize(style)
+ return styled_text
+
+ @classmethod
+ def assemble(
+ cls,
+ *parts: Union[str, "Text", Tuple[str, StyleType]],
+ style: Union[str, Style] = "",
+ justify: Optional["JustifyMethod"] = None,
+ overflow: Optional["OverflowMethod"] = None,
+ no_wrap: Optional[bool] = None,
+ end: str = "\n",
+ tab_size: int = 8,
+ meta: Optional[Dict[str, Any]] = None,
+ ) -> "Text":
+ """Construct a text instance by combining a sequence of strings with optional styles.
+ The positional arguments should be either strings, or a tuple of string + style.
+
+ Args:
+ style (Union[str, Style], optional): Base style for text. Defaults to "".
+ justify (str, optional): Justify method: "left", "center", "full", "right". Defaults to None.
+ overflow (str, optional): Overflow method: "crop", "fold", "ellipsis". Defaults to None.
+ end (str, optional): Character to end text with. Defaults to "\\\\n".
+ tab_size (int): Number of spaces per tab, or ``None`` to use ``console.tab_size``. Defaults to 8.
+ meta (Dict[str, Any], optional). Meta data to apply to text, or None for no meta data. Default to None
+
+ Returns:
+ Text: A new text instance.
+ """
+ text = cls(
+ style=style,
+ justify=justify,
+ overflow=overflow,
+ no_wrap=no_wrap,
+ end=end,
+ tab_size=tab_size,
+ )
+ append = text.append
+ _Text = Text
+ for part in parts:
+ if isinstance(part, (_Text, str)):
+ append(part)
+ else:
+ append(*part)
+ if meta:
+ text.apply_meta(meta)
+ return text
+
+ @property
+ def plain(self) -> str:
+ """Get the text as a single string."""
+ if len(self._text) != 1:
+ self._text[:] = ["".join(self._text)]
+ return self._text[0]
+
+ @plain.setter
+ def plain(self, new_text: str) -> None:
+ """Set the text to a new value."""
+ if new_text != self.plain:
+ sanitized_text = strip_control_codes(new_text)
+ self._text[:] = [sanitized_text]
+ old_length = self._length
+ self._length = len(sanitized_text)
+ if old_length > self._length:
+ self._trim_spans()
+
+ @property
+ def spans(self) -> List[Span]:
+ """Get a reference to the internal list of spans."""
+ return self._spans
+
+ @spans.setter
+ def spans(self, spans: List[Span]) -> None:
+ """Set spans."""
+ self._spans = spans[:]
+
+ def blank_copy(self, plain: str = "") -> "Text":
+ """Return a new Text instance with copied meta data (but not the string or spans)."""
+ copy_self = Text(
+ plain,
+ style=self.style,
+ justify=self.justify,
+ overflow=self.overflow,
+ no_wrap=self.no_wrap,
+ end=self.end,
+ tab_size=self.tab_size,
+ )
+ return copy_self
+
+ def copy(self) -> "Text":
+ """Return a copy of this instance."""
+ copy_self = Text(
+ self.plain,
+ style=self.style,
+ justify=self.justify,
+ overflow=self.overflow,
+ no_wrap=self.no_wrap,
+ end=self.end,
+ tab_size=self.tab_size,
+ )
+ copy_self._spans[:] = self._spans
+ return copy_self
+
+ def stylize(
+ self,
+ style: Union[str, Style],
+ start: int = 0,
+ end: Optional[int] = None,
+ ) -> None:
+ """Apply a style to the text, or a portion of the text.
+
+ Args:
+ style (Union[str, Style]): Style instance or style definition to apply.
+ start (int): Start offset (negative indexing is supported). Defaults to 0.
+ end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
+ """
+ if style:
+ length = len(self)
+ if start < 0:
+ start = length + start
+ if end is None:
+ end = length
+ if end < 0:
+ end = length + end
+ if start >= length or end <= start:
+ # Span not in text or not valid
+ return
+ self._spans.append(Span(start, min(length, end), style))
+
+ def stylize_before(
+ self,
+ style: Union[str, Style],
+ start: int = 0,
+ end: Optional[int] = None,
+ ) -> None:
+ """Apply a style to the text, or a portion of the text. Styles will be applied before other styles already present.
+
+ Args:
+ style (Union[str, Style]): Style instance or style definition to apply.
+ start (int): Start offset (negative indexing is supported). Defaults to 0.
+ end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
+ """
+ if style:
+ length = len(self)
+ if start < 0:
+ start = length + start
+ if end is None:
+ end = length
+ if end < 0:
+ end = length + end
+ if start >= length or end <= start:
+ # Span not in text or not valid
+ return
+ self._spans.insert(0, Span(start, min(length, end), style))
+
+ def apply_meta(
+ self, meta: Dict[str, Any], start: int = 0, end: Optional[int] = None
+ ) -> None:
+ """Apply meta data to the text, or a portion of the text.
+
+ Args:
+ meta (Dict[str, Any]): A dict of meta information.
+ start (int): Start offset (negative indexing is supported). Defaults to 0.
+ end (Optional[int], optional): End offset (negative indexing is supported), or None for end of text. Defaults to None.
+
+ """
+ style = Style.from_meta(meta)
+ self.stylize(style, start=start, end=end)
+
+ def on(self, meta: Optional[Dict[str, Any]] = None, **handlers: Any) -> "Text":
+ """Apply event handlers (used by Textual project).
+
+ Example:
+ >>> from rich.text import Text
+ >>> text = Text("hello world")
+ >>> text.on(click="view.toggle('world')")
+
+ Args:
+ meta (Dict[str, Any]): Mapping of meta information.
+ **handlers: Keyword args are prefixed with "@" to defined handlers.
+
+ Returns:
+ Text: Self is returned to method may be chained.
+ """
+ meta = {} if meta is None else meta
+ meta.update({f"@{key}": value for key, value in handlers.items()})
+ self.stylize(Style.from_meta(meta))
+ return self
+
+ def remove_suffix(self, suffix: str) -> None:
+ """Remove a suffix if it exists.
+
+ Args:
+ suffix (str): Suffix to remove.
+ """
+ if self.plain.endswith(suffix):
+ self.right_crop(len(suffix))
+
+ def get_style_at_offset(self, console: "Console", offset: int) -> Style:
+ """Get the style of a character at give offset.
+
+ Args:
+ console (~Console): Console where text will be rendered.
+ offset (int): Offset in to text (negative indexing supported)
+
+ Returns:
+ Style: A Style instance.
+ """
+ # TODO: This is a little inefficient, it is only used by full justify
+ if offset < 0:
+ offset = len(self) + offset
+ get_style = console.get_style
+ style = get_style(self.style).copy()
+ for start, end, span_style in self._spans:
+ if end > offset >= start:
+ style += get_style(span_style, default="")
+ return style
+
+ def highlight_regex(
+ self,
+ re_highlight: str,
+ style: Optional[Union[GetStyleCallable, StyleType]] = None,
+ *,
+ style_prefix: str = "",
+ ) -> int:
+ """Highlight text with a regular expression, where group names are
+ translated to styles.
+
+ Args:
+ re_highlight (str): A regular expression.
+ style (Union[GetStyleCallable, StyleType]): Optional style to apply to whole match, or a callable
+ which accepts the matched text and returns a style. Defaults to None.
+ style_prefix (str, optional): Optional prefix to add to style group names.
+
+ Returns:
+ int: Number of regex matches
+ """
+ count = 0
+ append_span = self._spans.append
+ _Span = Span
+ plain = self.plain
+ for match in re.finditer(re_highlight, plain):
+ get_span = match.span
+ if style:
+ start, end = get_span()
+ match_style = style(plain[start:end]) if callable(style) else style
+ if match_style is not None and end > start:
+ append_span(_Span(start, end, match_style))
+
+ count += 1
+ for name in match.groupdict().keys():
+ start, end = get_span(name)
+ if start != -1 and end > start:
+ append_span(_Span(start, end, f"{style_prefix}{name}"))
+ return count
+
+ def highlight_words(
+ self,
+ words: Iterable[str],
+ style: Union[str, Style],
+ *,
+ case_sensitive: bool = True,
+ ) -> int:
+ """Highlight words with a style.
+
+ Args:
+ words (Iterable[str]): Worlds to highlight.
+ style (Union[str, Style]): Style to apply.
+ case_sensitive (bool, optional): Enable case sensitive matchings. Defaults to True.
+
+ Returns:
+ int: Number of words highlighted.
+ """
+ re_words = "|".join(re.escape(word) for word in words)
+ add_span = self._spans.append
+ count = 0
+ _Span = Span
+ for match in re.finditer(
+ re_words, self.plain, flags=0 if case_sensitive else re.IGNORECASE
+ ):
+ start, end = match.span(0)
+ add_span(_Span(start, end, style))
+ count += 1
+ return count
+
+ def rstrip(self) -> None:
+ """Strip whitespace from end of text."""
+ self.plain = self.plain.rstrip()
+
+ def rstrip_end(self, size: int) -> None:
+ """Remove whitespace beyond a certain width at the end of the text.
+
+ Args:
+ size (int): The desired size of the text.
+ """
+ text_length = len(self)
+ if text_length > size:
+ excess = text_length - size
+ whitespace_match = _re_whitespace.search(self.plain)
+ if whitespace_match is not None:
+ whitespace_count = len(whitespace_match.group(0))
+ self.right_crop(min(whitespace_count, excess))
+
+ def set_length(self, new_length: int) -> None:
+ """Set new length of the text, clipping or padding is required."""
+ length = len(self)
+ if length != new_length:
+ if length < new_length:
+ self.pad_right(new_length - length)
+ else:
+ self.right_crop(length - new_length)
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> Iterable[Segment]:
+ tab_size: int = console.tab_size or self.tab_size or 8
+ justify = self.justify or options.justify or DEFAULT_JUSTIFY
+
+ overflow = self.overflow or options.overflow or DEFAULT_OVERFLOW
+
+ lines = self.wrap(
+ console,
+ options.max_width,
+ justify=justify,
+ overflow=overflow,
+ tab_size=tab_size or 8,
+ no_wrap=pick_bool(self.no_wrap, options.no_wrap, False),
+ )
+ all_lines = Text("\n").join(lines)
+ yield from all_lines.render(console, end=self.end)
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> Measurement:
+ text = self.plain
+ lines = text.splitlines()
+ max_text_width = max(cell_len(line) for line in lines) if lines else 0
+ words = text.split()
+ min_text_width = (
+ max(cell_len(word) for word in words) if words else max_text_width
+ )
+ return Measurement(min_text_width, max_text_width)
+
+ def render(self, console: "Console", end: str = "") -> Iterable["Segment"]:
+ """Render the text as Segments.
+
+ Args:
+ console (Console): Console instance.
+ end (Optional[str], optional): Optional end character.
+
+ Returns:
+ Iterable[Segment]: Result of render that may be written to the console.
+ """
+ _Segment = Segment
+ text = self.plain
+ if not self._spans:
+ yield Segment(text)
+ if end:
+ yield _Segment(end)
+ return
+ get_style = partial(console.get_style, default=Style.null())
+
+ enumerated_spans = list(enumerate(self._spans, 1))
+ style_map = {index: get_style(span.style) for index, span in enumerated_spans}
+ style_map[0] = get_style(self.style)
+
+ spans = [
+ (0, False, 0),
+ *((span.start, False, index) for index, span in enumerated_spans),
+ *((span.end, True, index) for index, span in enumerated_spans),
+ (len(text), True, 0),
+ ]
+ spans.sort(key=itemgetter(0, 1))
+
+ stack: List[int] = []
+ stack_append = stack.append
+ stack_pop = stack.remove
+
+ style_cache: Dict[Tuple[Style, ...], Style] = {}
+ style_cache_get = style_cache.get
+ combine = Style.combine
+
+ def get_current_style() -> Style:
+ """Construct current style from stack."""
+ styles = tuple(style_map[_style_id] for _style_id in sorted(stack))
+ cached_style = style_cache_get(styles)
+ if cached_style is not None:
+ return cached_style
+ current_style = combine(styles)
+ style_cache[styles] = current_style
+ return current_style
+
+ for (offset, leaving, style_id), (next_offset, _, _) in zip(spans, spans[1:]):
+ if leaving:
+ stack_pop(style_id)
+ else:
+ stack_append(style_id)
+ if next_offset > offset:
+ yield _Segment(text[offset:next_offset], get_current_style())
+ if end:
+ yield _Segment(end)
+
+ def join(self, lines: Iterable["Text"]) -> "Text":
+ """Join text together with this instance as the separator.
+
+ Args:
+ lines (Iterable[Text]): An iterable of Text instances to join.
+
+ Returns:
+ Text: A new text instance containing join text.
+ """
+
+ new_text = self.blank_copy()
+
+ def iter_text() -> Iterable["Text"]:
+ if self.plain:
+ for last, line in loop_last(lines):
+ yield line
+ if not last:
+ yield self
+ else:
+ yield from lines
+
+ extend_text = new_text._text.extend
+ append_span = new_text._spans.append
+ extend_spans = new_text._spans.extend
+ offset = 0
+ _Span = Span
+
+ for text in iter_text():
+ extend_text(text._text)
+ if text.style:
+ append_span(_Span(offset, offset + len(text), text.style))
+ extend_spans(
+ _Span(offset + start, offset + end, style)
+ for start, end, style in text._spans
+ )
+ offset += len(text)
+ new_text._length = offset
+ return new_text
+
+ def expand_tabs(self, tab_size: Optional[int] = None) -> None:
+ """Converts tabs to spaces.
+
+ Args:
+ tab_size (int, optional): Size of tabs. Defaults to 8.
+
+ """
+ if "\t" not in self.plain:
+ return
+ pos = 0
+ if tab_size is None:
+ tab_size = self.tab_size
+ assert tab_size is not None
+ result = self.blank_copy()
+ append = result.append
+
+ _style = self.style
+ for line in self.split("\n", include_separator=True):
+ parts = line.split("\t", include_separator=True)
+ for part in parts:
+ if part.plain.endswith("\t"):
+ part._text = [part.plain[:-1] + " "]
+ append(part)
+ pos += len(part)
+ spaces = tab_size - ((pos - 1) % tab_size) - 1
+ if spaces:
+ append(" " * spaces, _style)
+ pos += spaces
+ else:
+ append(part)
+ self._text = [result.plain]
+ self._length = len(self.plain)
+ self._spans[:] = result._spans
+
+ def truncate(
+ self,
+ max_width: int,
+ *,
+ overflow: Optional["OverflowMethod"] = None,
+ pad: bool = False,
+ ) -> None:
+ """Truncate text if it is longer that a given width.
+
+ Args:
+ max_width (int): Maximum number of characters in text.
+ overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None, to use self.overflow.
+ pad (bool, optional): Pad with spaces if the length is less than max_width. Defaults to False.
+ """
+ _overflow = overflow or self.overflow or DEFAULT_OVERFLOW
+ if _overflow != "ignore":
+ length = cell_len(self.plain)
+ if length > max_width:
+ if _overflow == "ellipsis":
+ self.plain = set_cell_size(self.plain, max_width - 1) + "…"
+ else:
+ self.plain = set_cell_size(self.plain, max_width)
+ if pad and length < max_width:
+ spaces = max_width - length
+ self._text = [f"{self.plain}{' ' * spaces}"]
+ self._length = len(self.plain)
+
+ def _trim_spans(self) -> None:
+ """Remove or modify any spans that are over the end of the text."""
+ max_offset = len(self.plain)
+ _Span = Span
+ self._spans[:] = [
+ (
+ span
+ if span.end < max_offset
+ else _Span(span.start, min(max_offset, span.end), span.style)
+ )
+ for span in self._spans
+ if span.start < max_offset
+ ]
+
+ def pad(self, count: int, character: str = " ") -> None:
+ """Pad left and right with a given number of characters.
+
+ Args:
+ count (int): Width of padding.
+ """
+ assert len(character) == 1, "Character must be a string of length 1"
+ if count:
+ pad_characters = character * count
+ self.plain = f"{pad_characters}{self.plain}{pad_characters}"
+ _Span = Span
+ self._spans[:] = [
+ _Span(start + count, end + count, style)
+ for start, end, style in self._spans
+ ]
+
+ def pad_left(self, count: int, character: str = " ") -> None:
+ """Pad the left with a given character.
+
+ Args:
+ count (int): Number of characters to pad.
+ character (str, optional): Character to pad with. Defaults to " ".
+ """
+ assert len(character) == 1, "Character must be a string of length 1"
+ if count:
+ self.plain = f"{character * count}{self.plain}"
+ _Span = Span
+ self._spans[:] = [
+ _Span(start + count, end + count, style)
+ for start, end, style in self._spans
+ ]
+
+ def pad_right(self, count: int, character: str = " ") -> None:
+ """Pad the right with a given character.
+
+ Args:
+ count (int): Number of characters to pad.
+ character (str, optional): Character to pad with. Defaults to " ".
+ """
+ assert len(character) == 1, "Character must be a string of length 1"
+ if count:
+ self.plain = f"{self.plain}{character * count}"
+
+ def align(self, align: AlignMethod, width: int, character: str = " ") -> None:
+ """Align text to a given width.
+
+ Args:
+ align (AlignMethod): One of "left", "center", or "right".
+ width (int): Desired width.
+ character (str, optional): Character to pad with. Defaults to " ".
+ """
+ self.truncate(width)
+ excess_space = width - cell_len(self.plain)
+ if excess_space:
+ if align == "left":
+ self.pad_right(excess_space, character)
+ elif align == "center":
+ left = excess_space // 2
+ self.pad_left(left, character)
+ self.pad_right(excess_space - left, character)
+ else:
+ self.pad_left(excess_space, character)
+
+ def append(
+ self, text: Union["Text", str], style: Optional[Union[str, "Style"]] = None
+ ) -> "Text":
+ """Add text with an optional style.
+
+ Args:
+ text (Union[Text, str]): A str or Text to append.
+ style (str, optional): A style name. Defaults to None.
+
+ Returns:
+ Text: Returns self for chaining.
+ """
+
+ if not isinstance(text, (str, Text)):
+ raise TypeError("Only str or Text can be appended to Text")
+
+ if len(text):
+ if isinstance(text, str):
+ sanitized_text = strip_control_codes(text)
+ self._text.append(sanitized_text)
+ offset = len(self)
+ text_length = len(sanitized_text)
+ if style is not None:
+ self._spans.append(Span(offset, offset + text_length, style))
+ self._length += text_length
+ elif isinstance(text, Text):
+ _Span = Span
+ if style is not None:
+ raise ValueError(
+ "style must not be set when appending Text instance"
+ )
+ text_length = self._length
+ if text.style is not None:
+ self._spans.append(
+ _Span(text_length, text_length + len(text), text.style)
+ )
+ self._text.append(text.plain)
+ self._spans.extend(
+ _Span(start + text_length, end + text_length, style)
+ for start, end, style in text._spans
+ )
+ self._length += len(text)
+ return self
+
+ def append_text(self, text: "Text") -> "Text":
+ """Append another Text instance. This method is more performant that Text.append, but
+ only works for Text.
+
+ Returns:
+ Text: Returns self for chaining.
+ """
+ _Span = Span
+ text_length = self._length
+ if text.style is not None:
+ self._spans.append(_Span(text_length, text_length + len(text), text.style))
+ self._text.append(text.plain)
+ self._spans.extend(
+ _Span(start + text_length, end + text_length, style)
+ for start, end, style in text._spans
+ )
+ self._length += len(text)
+ return self
+
+ def append_tokens(
+ self, tokens: Iterable[Tuple[str, Optional[StyleType]]]
+ ) -> "Text":
+ """Append iterable of str and style. Style may be a Style instance or a str style definition.
+
+ Args:
+ pairs (Iterable[Tuple[str, Optional[StyleType]]]): An iterable of tuples containing str content and style.
+
+ Returns:
+ Text: Returns self for chaining.
+ """
+ append_text = self._text.append
+ append_span = self._spans.append
+ _Span = Span
+ offset = len(self)
+ for content, style in tokens:
+ append_text(content)
+ if style is not None:
+ append_span(_Span(offset, offset + len(content), style))
+ offset += len(content)
+ self._length = offset
+ return self
+
+ def copy_styles(self, text: "Text") -> None:
+ """Copy styles from another Text instance.
+
+ Args:
+ text (Text): A Text instance to copy styles from, must be the same length.
+ """
+ self._spans.extend(text._spans)
+
+ def split(
+ self,
+ separator: str = "\n",
+ *,
+ include_separator: bool = False,
+ allow_blank: bool = False,
+ ) -> Lines:
+ """Split rich text in to lines, preserving styles.
+
+ Args:
+ separator (str, optional): String to split on. Defaults to "\\\\n".
+ include_separator (bool, optional): Include the separator in the lines. Defaults to False.
+ allow_blank (bool, optional): Return a blank line if the text ends with a separator. Defaults to False.
+
+ Returns:
+ List[RichText]: A list of rich text, one per line of the original.
+ """
+ assert separator, "separator must not be empty"
+
+ text = self.plain
+ if separator not in text:
+ return Lines([self.copy()])
+
+ if include_separator:
+ lines = self.divide(
+ match.end() for match in re.finditer(re.escape(separator), text)
+ )
+ else:
+
+ def flatten_spans() -> Iterable[int]:
+ for match in re.finditer(re.escape(separator), text):
+ start, end = match.span()
+ yield start
+ yield end
+
+ lines = Lines(
+ line for line in self.divide(flatten_spans()) if line.plain != separator
+ )
+
+ if not allow_blank and text.endswith(separator):
+ lines.pop()
+
+ return lines
+
+ def divide(self, offsets: Iterable[int]) -> Lines:
+ """Divide text in to a number of lines at given offsets.
+
+ Args:
+ offsets (Iterable[int]): Offsets used to divide text.
+
+ Returns:
+ Lines: New RichText instances between offsets.
+ """
+ _offsets = list(offsets)
+
+ if not _offsets:
+ return Lines([self.copy()])
+
+ text = self.plain
+ text_length = len(text)
+ divide_offsets = [0, *_offsets, text_length]
+ line_ranges = list(zip(divide_offsets, divide_offsets[1:]))
+
+ style = self.style
+ justify = self.justify
+ overflow = self.overflow
+ _Text = Text
+ new_lines = Lines(
+ _Text(
+ text[start:end],
+ style=style,
+ justify=justify,
+ overflow=overflow,
+ )
+ for start, end in line_ranges
+ )
+ if not self._spans:
+ return new_lines
+
+ _line_appends = [line._spans.append for line in new_lines._lines]
+ line_count = len(line_ranges)
+ _Span = Span
+
+ for span_start, span_end, style in self._spans:
+
+ lower_bound = 0
+ upper_bound = line_count
+ start_line_no = (lower_bound + upper_bound) // 2
+
+ while True:
+ line_start, line_end = line_ranges[start_line_no]
+ if span_start < line_start:
+ upper_bound = start_line_no - 1
+ elif span_start > line_end:
+ lower_bound = start_line_no + 1
+ else:
+ break
+ start_line_no = (lower_bound + upper_bound) // 2
+
+ if span_end < line_end:
+ end_line_no = start_line_no
+ else:
+ end_line_no = lower_bound = start_line_no
+ upper_bound = line_count
+
+ while True:
+ line_start, line_end = line_ranges[end_line_no]
+ if span_end < line_start:
+ upper_bound = end_line_no - 1
+ elif span_end > line_end:
+ lower_bound = end_line_no + 1
+ else:
+ break
+ end_line_no = (lower_bound + upper_bound) // 2
+
+ for line_no in range(start_line_no, end_line_no + 1):
+ line_start, line_end = line_ranges[line_no]
+ new_start = max(0, span_start - line_start)
+ new_end = min(span_end - line_start, line_end - line_start)
+ if new_end > new_start:
+ _line_appends[line_no](_Span(new_start, new_end, style))
+
+ return new_lines
+
+ def right_crop(self, amount: int = 1) -> None:
+ """Remove a number of characters from the end of the text."""
+ max_offset = len(self.plain) - amount
+ _Span = Span
+ self._spans[:] = [
+ (
+ span
+ if span.end < max_offset
+ else _Span(span.start, min(max_offset, span.end), span.style)
+ )
+ for span in self._spans
+ if span.start < max_offset
+ ]
+ self._text = [self.plain[:-amount]]
+ self._length -= amount
+
+ def wrap(
+ self,
+ console: "Console",
+ width: int,
+ *,
+ justify: Optional["JustifyMethod"] = None,
+ overflow: Optional["OverflowMethod"] = None,
+ tab_size: int = 8,
+ no_wrap: Optional[bool] = None,
+ ) -> Lines:
+ """Word wrap the text.
+
+ Args:
+ console (Console): Console instance.
+ width (int): Number of characters per line.
+ emoji (bool, optional): Also render emoji code. Defaults to True.
+ justify (str, optional): Justify method: "default", "left", "center", "full", "right". Defaults to "default".
+ overflow (str, optional): Overflow method: "crop", "fold", or "ellipsis". Defaults to None.
+ tab_size (int, optional): Default tab size. Defaults to 8.
+ no_wrap (bool, optional): Disable wrapping, Defaults to False.
+
+ Returns:
+ Lines: Number of lines.
+ """
+ wrap_justify = justify or self.justify or DEFAULT_JUSTIFY
+ wrap_overflow = overflow or self.overflow or DEFAULT_OVERFLOW
+
+ no_wrap = pick_bool(no_wrap, self.no_wrap, False) or overflow == "ignore"
+
+ lines = Lines()
+ for line in self.split(allow_blank=True):
+ if "\t" in line:
+ line.expand_tabs(tab_size)
+ if no_wrap:
+ new_lines = Lines([line])
+ else:
+ offsets = divide_line(str(line), width, fold=wrap_overflow == "fold")
+ new_lines = line.divide(offsets)
+ for line in new_lines:
+ line.rstrip_end(width)
+ if wrap_justify:
+ new_lines.justify(
+ console, width, justify=wrap_justify, overflow=wrap_overflow
+ )
+ for line in new_lines:
+ line.truncate(width, overflow=wrap_overflow)
+ lines.extend(new_lines)
+ return lines
+
+ def fit(self, width: int) -> Lines:
+ """Fit the text in to given width by chopping in to lines.
+
+ Args:
+ width (int): Maximum characters in a line.
+
+ Returns:
+ Lines: List of lines.
+ """
+ lines: Lines = Lines()
+ append = lines.append
+ for line in self.split():
+ line.set_length(width)
+ append(line)
+ return lines
+
+ def detect_indentation(self) -> int:
+ """Auto-detect indentation of code.
+
+ Returns:
+ int: Number of spaces used to indent code.
+ """
+
+ _indentations = {
+ len(match.group(1))
+ for match in re.finditer(r"^( *)(.*)$", self.plain, flags=re.MULTILINE)
+ }
+
+ try:
+ indentation = (
+ reduce(gcd, [indent for indent in _indentations if not indent % 2]) or 1
+ )
+ except TypeError:
+ indentation = 1
+
+ return indentation
+
+ def with_indent_guides(
+ self,
+ indent_size: Optional[int] = None,
+ *,
+ character: str = "│",
+ style: StyleType = "dim green",
+ ) -> "Text":
+ """Adds indent guide lines to text.
+
+ Args:
+ indent_size (Optional[int]): Size of indentation, or None to auto detect. Defaults to None.
+ character (str, optional): Character to use for indentation. Defaults to "│".
+ style (Union[Style, str], optional): Style of indent guides.
+
+ Returns:
+ Text: New text with indentation guides.
+ """
+
+ _indent_size = self.detect_indentation() if indent_size is None else indent_size
+
+ text = self.copy()
+ text.expand_tabs()
+ indent_line = f"{character}{' ' * (_indent_size - 1)}"
+
+ re_indent = re.compile(r"^( *)(.*)$")
+ new_lines: List[Text] = []
+ add_line = new_lines.append
+ blank_lines = 0
+ for line in text.split(allow_blank=True):
+ match = re_indent.match(line.plain)
+ if not match or not match.group(2):
+ blank_lines += 1
+ continue
+ indent = match.group(1)
+ full_indents, remaining_space = divmod(len(indent), _indent_size)
+ new_indent = f"{indent_line * full_indents}{' ' * remaining_space}"
+ line.plain = new_indent + line.plain[len(new_indent) :]
+ line.stylize(style, 0, len(new_indent))
+ if blank_lines:
+ new_lines.extend([Text(new_indent, style=style)] * blank_lines)
+ blank_lines = 0
+ add_line(line)
+ if blank_lines:
+ new_lines.extend([Text("", style=style)] * blank_lines)
+
+ new_text = text.blank_copy("\n").join(new_lines)
+ return new_text
+
+
+if __name__ == "__main__": # pragma: no cover
+ from pip._vendor.rich.console import Console
+
+ text = Text(
+ """\nLorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.\n"""
+ )
+ text.highlight_words(["Lorem"], "bold")
+ text.highlight_words(["ipsum"], "italic")
+
+ console = Console()
+
+ console.rule("justify='left'")
+ console.print(text, style="red")
+ console.print()
+
+ console.rule("justify='center'")
+ console.print(text, style="green", justify="center")
+ console.print()
+
+ console.rule("justify='right'")
+ console.print(text, style="blue", justify="right")
+ console.print()
+
+ console.rule("justify='full'")
+ console.print(text, style="magenta", justify="full")
+ console.print()
diff --git a/third_party/python/pip/pip/_vendor/rich/theme.py b/third_party/python/pip/pip/_vendor/rich/theme.py
new file mode 100644
index 0000000000..bfb3c7f821
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/theme.py
@@ -0,0 +1,112 @@
+import configparser
+from typing import Dict, List, IO, Mapping, Optional
+
+from .default_styles import DEFAULT_STYLES
+from .style import Style, StyleType
+
+
+class Theme:
+ """A container for style information, used by :class:`~rich.console.Console`.
+
+ Args:
+ styles (Dict[str, Style], optional): A mapping of style names on to styles. Defaults to None for a theme with no styles.
+ inherit (bool, optional): Inherit default styles. Defaults to True.
+ """
+
+ styles: Dict[str, Style]
+
+ def __init__(
+ self, styles: Optional[Mapping[str, StyleType]] = None, inherit: bool = True
+ ):
+ self.styles = DEFAULT_STYLES.copy() if inherit else {}
+ if styles is not None:
+ self.styles.update(
+ {
+ name: style if isinstance(style, Style) else Style.parse(style)
+ for name, style in styles.items()
+ }
+ )
+
+ @property
+ def config(self) -> str:
+ """Get contents of a config file for this theme."""
+ config = "[styles]\n" + "\n".join(
+ f"{name} = {style}" for name, style in sorted(self.styles.items())
+ )
+ return config
+
+ @classmethod
+ def from_file(
+ cls, config_file: IO[str], source: Optional[str] = None, inherit: bool = True
+ ) -> "Theme":
+ """Load a theme from a text mode file.
+
+ Args:
+ config_file (IO[str]): An open conf file.
+ source (str, optional): The filename of the open file. Defaults to None.
+ inherit (bool, optional): Inherit default styles. Defaults to True.
+
+ Returns:
+ Theme: A New theme instance.
+ """
+ config = configparser.ConfigParser()
+ config.read_file(config_file, source=source)
+ styles = {name: Style.parse(value) for name, value in config.items("styles")}
+ theme = Theme(styles, inherit=inherit)
+ return theme
+
+ @classmethod
+ def read(cls, path: str, inherit: bool = True) -> "Theme":
+ """Read a theme from a path.
+
+ Args:
+ path (str): Path to a config file readable by Python configparser module.
+ inherit (bool, optional): Inherit default styles. Defaults to True.
+
+ Returns:
+ Theme: A new theme instance.
+ """
+ with open(path, "rt") as config_file:
+ return cls.from_file(config_file, source=path, inherit=inherit)
+
+
+class ThemeStackError(Exception):
+ """Base exception for errors related to the theme stack."""
+
+
+class ThemeStack:
+ """A stack of themes.
+
+ Args:
+ theme (Theme): A theme instance
+ """
+
+ def __init__(self, theme: Theme) -> None:
+ self._entries: List[Dict[str, Style]] = [theme.styles]
+ self.get = self._entries[-1].get
+
+ def push_theme(self, theme: Theme, inherit: bool = True) -> None:
+ """Push a theme on the top of the stack.
+
+ Args:
+ theme (Theme): A Theme instance.
+ inherit (boolean, optional): Inherit styles from current top of stack.
+ """
+ styles: Dict[str, Style]
+ styles = (
+ {**self._entries[-1], **theme.styles} if inherit else theme.styles.copy()
+ )
+ self._entries.append(styles)
+ self.get = self._entries[-1].get
+
+ def pop_theme(self) -> None:
+ """Pop (and discard) the top-most theme."""
+ if len(self._entries) == 1:
+ raise ThemeStackError("Unable to pop base theme")
+ self._entries.pop()
+ self.get = self._entries[-1].get
+
+
+if __name__ == "__main__": # pragma: no cover
+ theme = Theme()
+ print(theme.config)
diff --git a/third_party/python/pip/pip/_vendor/rich/themes.py b/third_party/python/pip/pip/_vendor/rich/themes.py
new file mode 100644
index 0000000000..bf6db104a2
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/themes.py
@@ -0,0 +1,5 @@
+from .default_styles import DEFAULT_STYLES
+from .theme import Theme
+
+
+DEFAULT = Theme(DEFAULT_STYLES)
diff --git a/third_party/python/pip/pip/_vendor/rich/traceback.py b/third_party/python/pip/pip/_vendor/rich/traceback.py
new file mode 100644
index 0000000000..1f481298f6
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/traceback.py
@@ -0,0 +1,677 @@
+from __future__ import absolute_import
+
+import os
+import platform
+import sys
+from dataclasses import dataclass, field
+from traceback import walk_tb
+from types import ModuleType, TracebackType
+from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Type, Union
+
+from pip._vendor.pygments.lexers import guess_lexer_for_filename
+from pip._vendor.pygments.token import Comment, Keyword, Name, Number, Operator, String
+from pip._vendor.pygments.token import Text as TextToken
+from pip._vendor.pygments.token import Token
+from pip._vendor.pygments.util import ClassNotFound
+
+from . import pretty
+from ._loop import loop_last
+from .columns import Columns
+from .console import Console, ConsoleOptions, ConsoleRenderable, RenderResult, group
+from .constrain import Constrain
+from .highlighter import RegexHighlighter, ReprHighlighter
+from .panel import Panel
+from .scope import render_scope
+from .style import Style
+from .syntax import Syntax
+from .text import Text
+from .theme import Theme
+
+WINDOWS = platform.system() == "Windows"
+
+LOCALS_MAX_LENGTH = 10
+LOCALS_MAX_STRING = 80
+
+
+def install(
+ *,
+ console: Optional[Console] = None,
+ width: Optional[int] = 100,
+ extra_lines: int = 3,
+ theme: Optional[str] = None,
+ word_wrap: bool = False,
+ show_locals: bool = False,
+ indent_guides: bool = True,
+ suppress: Iterable[Union[str, ModuleType]] = (),
+ max_frames: int = 100,
+) -> Callable[[Type[BaseException], BaseException, Optional[TracebackType]], Any]:
+ """Install a rich traceback handler.
+
+ Once installed, any tracebacks will be printed with syntax highlighting and rich formatting.
+
+
+ Args:
+ console (Optional[Console], optional): Console to write exception to. Default uses internal Console instance.
+ width (Optional[int], optional): Width (in characters) of traceback. Defaults to 100.
+ extra_lines (int, optional): Extra lines of code. Defaults to 3.
+ theme (Optional[str], optional): Pygments theme to use in traceback. Defaults to ``None`` which will pick
+ a theme appropriate for the platform.
+ word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.
+ show_locals (bool, optional): Enable display of local variables. Defaults to False.
+ indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True.
+ suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.
+
+ Returns:
+ Callable: The previous exception handler that was replaced.
+
+ """
+ traceback_console = Console(file=sys.stderr) if console is None else console
+
+ def excepthook(
+ type_: Type[BaseException],
+ value: BaseException,
+ traceback: Optional[TracebackType],
+ ) -> None:
+ traceback_console.print(
+ Traceback.from_exception(
+ type_,
+ value,
+ traceback,
+ width=width,
+ extra_lines=extra_lines,
+ theme=theme,
+ word_wrap=word_wrap,
+ show_locals=show_locals,
+ indent_guides=indent_guides,
+ suppress=suppress,
+ max_frames=max_frames,
+ )
+ )
+
+ def ipy_excepthook_closure(ip: Any) -> None: # pragma: no cover
+ tb_data = {} # store information about showtraceback call
+ default_showtraceback = ip.showtraceback # keep reference of default traceback
+
+ def ipy_show_traceback(*args: Any, **kwargs: Any) -> None:
+ """wrap the default ip.showtraceback to store info for ip._showtraceback"""
+ nonlocal tb_data
+ tb_data = kwargs
+ default_showtraceback(*args, **kwargs)
+
+ def ipy_display_traceback(
+ *args: Any, is_syntax: bool = False, **kwargs: Any
+ ) -> None:
+ """Internally called traceback from ip._showtraceback"""
+ nonlocal tb_data
+ exc_tuple = ip._get_exc_info()
+
+ # do not display trace on syntax error
+ tb: Optional[TracebackType] = None if is_syntax else exc_tuple[2]
+
+ # determine correct tb_offset
+ compiled = tb_data.get("running_compiled_code", False)
+ tb_offset = tb_data.get("tb_offset", 1 if compiled else 0)
+ # remove ipython internal frames from trace with tb_offset
+ for _ in range(tb_offset):
+ if tb is None:
+ break
+ tb = tb.tb_next
+
+ excepthook(exc_tuple[0], exc_tuple[1], tb)
+ tb_data = {} # clear data upon usage
+
+ # replace _showtraceback instead of showtraceback to allow ipython features such as debugging to work
+ # this is also what the ipython docs recommends to modify when subclassing InteractiveShell
+ ip._showtraceback = ipy_display_traceback
+ # add wrapper to capture tb_data
+ ip.showtraceback = ipy_show_traceback
+ ip.showsyntaxerror = lambda *args, **kwargs: ipy_display_traceback(
+ *args, is_syntax=True, **kwargs
+ )
+
+ try: # pragma: no cover
+ # if within ipython, use customized traceback
+ ip = get_ipython() # type: ignore[name-defined]
+ ipy_excepthook_closure(ip)
+ return sys.excepthook
+ except Exception:
+ # otherwise use default system hook
+ old_excepthook = sys.excepthook
+ sys.excepthook = excepthook
+ return old_excepthook
+
+
+@dataclass
+class Frame:
+ filename: str
+ lineno: int
+ name: str
+ line: str = ""
+ locals: Optional[Dict[str, pretty.Node]] = None
+
+
+@dataclass
+class _SyntaxError:
+ offset: int
+ filename: str
+ line: str
+ lineno: int
+ msg: str
+
+
+@dataclass
+class Stack:
+ exc_type: str
+ exc_value: str
+ syntax_error: Optional[_SyntaxError] = None
+ is_cause: bool = False
+ frames: List[Frame] = field(default_factory=list)
+
+
+@dataclass
+class Trace:
+ stacks: List[Stack]
+
+
+class PathHighlighter(RegexHighlighter):
+ highlights = [r"(?P<dim>.*/)(?P<bold>.+)"]
+
+
+class Traceback:
+ """A Console renderable that renders a traceback.
+
+ Args:
+ trace (Trace, optional): A `Trace` object produced from `extract`. Defaults to None, which uses
+ the last exception.
+ width (Optional[int], optional): Number of characters used to traceback. Defaults to 100.
+ extra_lines (int, optional): Additional lines of code to render. Defaults to 3.
+ theme (str, optional): Override pygments theme used in traceback.
+ word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.
+ show_locals (bool, optional): Enable display of local variables. Defaults to False.
+ indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True.
+ locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to 10.
+ locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.
+ suppress (Sequence[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.
+ max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
+
+ """
+
+ LEXERS = {
+ "": "text",
+ ".py": "python",
+ ".pxd": "cython",
+ ".pyx": "cython",
+ ".pxi": "pyrex",
+ }
+
+ def __init__(
+ self,
+ trace: Optional[Trace] = None,
+ width: Optional[int] = 100,
+ extra_lines: int = 3,
+ theme: Optional[str] = None,
+ word_wrap: bool = False,
+ show_locals: bool = False,
+ indent_guides: bool = True,
+ locals_max_length: int = LOCALS_MAX_LENGTH,
+ locals_max_string: int = LOCALS_MAX_STRING,
+ suppress: Iterable[Union[str, ModuleType]] = (),
+ max_frames: int = 100,
+ ):
+ if trace is None:
+ exc_type, exc_value, traceback = sys.exc_info()
+ if exc_type is None or exc_value is None or traceback is None:
+ raise ValueError(
+ "Value for 'trace' required if not called in except: block"
+ )
+ trace = self.extract(
+ exc_type, exc_value, traceback, show_locals=show_locals
+ )
+ self.trace = trace
+ self.width = width
+ self.extra_lines = extra_lines
+ self.theme = Syntax.get_theme(theme or "ansi_dark")
+ self.word_wrap = word_wrap
+ self.show_locals = show_locals
+ self.indent_guides = indent_guides
+ self.locals_max_length = locals_max_length
+ self.locals_max_string = locals_max_string
+
+ self.suppress: Sequence[str] = []
+ for suppress_entity in suppress:
+ if not isinstance(suppress_entity, str):
+ assert (
+ suppress_entity.__file__ is not None
+ ), f"{suppress_entity!r} must be a module with '__file__' attribute"
+ path = os.path.dirname(suppress_entity.__file__)
+ else:
+ path = suppress_entity
+ path = os.path.normpath(os.path.abspath(path))
+ self.suppress.append(path)
+ self.max_frames = max(4, max_frames) if max_frames > 0 else 0
+
+ @classmethod
+ def from_exception(
+ cls,
+ exc_type: Type[Any],
+ exc_value: BaseException,
+ traceback: Optional[TracebackType],
+ width: Optional[int] = 100,
+ extra_lines: int = 3,
+ theme: Optional[str] = None,
+ word_wrap: bool = False,
+ show_locals: bool = False,
+ indent_guides: bool = True,
+ locals_max_length: int = LOCALS_MAX_LENGTH,
+ locals_max_string: int = LOCALS_MAX_STRING,
+ suppress: Iterable[Union[str, ModuleType]] = (),
+ max_frames: int = 100,
+ ) -> "Traceback":
+ """Create a traceback from exception info
+
+ Args:
+ exc_type (Type[BaseException]): Exception type.
+ exc_value (BaseException): Exception value.
+ traceback (TracebackType): Python Traceback object.
+ width (Optional[int], optional): Number of characters used to traceback. Defaults to 100.
+ extra_lines (int, optional): Additional lines of code to render. Defaults to 3.
+ theme (str, optional): Override pygments theme used in traceback.
+ word_wrap (bool, optional): Enable word wrapping of long lines. Defaults to False.
+ show_locals (bool, optional): Enable display of local variables. Defaults to False.
+ indent_guides (bool, optional): Enable indent guides in code and locals. Defaults to True.
+ locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to 10.
+ locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.
+ suppress (Iterable[Union[str, ModuleType]]): Optional sequence of modules or paths to exclude from traceback.
+ max_frames (int): Maximum number of frames to show in a traceback, 0 for no maximum. Defaults to 100.
+
+ Returns:
+ Traceback: A Traceback instance that may be printed.
+ """
+ rich_traceback = cls.extract(
+ exc_type, exc_value, traceback, show_locals=show_locals
+ )
+ return cls(
+ rich_traceback,
+ width=width,
+ extra_lines=extra_lines,
+ theme=theme,
+ word_wrap=word_wrap,
+ show_locals=show_locals,
+ indent_guides=indent_guides,
+ locals_max_length=locals_max_length,
+ locals_max_string=locals_max_string,
+ suppress=suppress,
+ max_frames=max_frames,
+ )
+
+ @classmethod
+ def extract(
+ cls,
+ exc_type: Type[BaseException],
+ exc_value: BaseException,
+ traceback: Optional[TracebackType],
+ show_locals: bool = False,
+ locals_max_length: int = LOCALS_MAX_LENGTH,
+ locals_max_string: int = LOCALS_MAX_STRING,
+ ) -> Trace:
+ """Extract traceback information.
+
+ Args:
+ exc_type (Type[BaseException]): Exception type.
+ exc_value (BaseException): Exception value.
+ traceback (TracebackType): Python Traceback object.
+ show_locals (bool, optional): Enable display of local variables. Defaults to False.
+ locals_max_length (int, optional): Maximum length of containers before abbreviating, or None for no abbreviation.
+ Defaults to 10.
+ locals_max_string (int, optional): Maximum length of string before truncating, or None to disable. Defaults to 80.
+
+ Returns:
+ Trace: A Trace instance which you can use to construct a `Traceback`.
+ """
+
+ stacks: List[Stack] = []
+ is_cause = False
+
+ from pip._vendor.rich import _IMPORT_CWD
+
+ def safe_str(_object: Any) -> str:
+ """Don't allow exceptions from __str__ to propagate."""
+ try:
+ return str(_object)
+ except Exception:
+ return "<exception str() failed>"
+
+ while True:
+ stack = Stack(
+ exc_type=safe_str(exc_type.__name__),
+ exc_value=safe_str(exc_value),
+ is_cause=is_cause,
+ )
+
+ if isinstance(exc_value, SyntaxError):
+ stack.syntax_error = _SyntaxError(
+ offset=exc_value.offset or 0,
+ filename=exc_value.filename or "?",
+ lineno=exc_value.lineno or 0,
+ line=exc_value.text or "",
+ msg=exc_value.msg,
+ )
+
+ stacks.append(stack)
+ append = stack.frames.append
+
+ for frame_summary, line_no in walk_tb(traceback):
+ filename = frame_summary.f_code.co_filename
+ if filename and not filename.startswith("<"):
+ if not os.path.isabs(filename):
+ filename = os.path.join(_IMPORT_CWD, filename)
+ if frame_summary.f_locals.get("_rich_traceback_omit", False):
+ continue
+ frame = Frame(
+ filename=filename or "?",
+ lineno=line_no,
+ name=frame_summary.f_code.co_name,
+ locals={
+ key: pretty.traverse(
+ value,
+ max_length=locals_max_length,
+ max_string=locals_max_string,
+ )
+ for key, value in frame_summary.f_locals.items()
+ }
+ if show_locals
+ else None,
+ )
+ append(frame)
+ if frame_summary.f_locals.get("_rich_traceback_guard", False):
+ del stack.frames[:]
+
+ cause = getattr(exc_value, "__cause__", None)
+ if cause:
+ exc_type = cause.__class__
+ exc_value = cause
+ # __traceback__ can be None, e.g. for exceptions raised by the
+ # 'multiprocessing' module
+ traceback = cause.__traceback__
+ is_cause = True
+ continue
+
+ cause = exc_value.__context__
+ if cause and not getattr(exc_value, "__suppress_context__", False):
+ exc_type = cause.__class__
+ exc_value = cause
+ traceback = cause.__traceback__
+ is_cause = False
+ continue
+ # No cover, code is reached but coverage doesn't recognize it.
+ break # pragma: no cover
+
+ trace = Trace(stacks=stacks)
+ return trace
+
+ def __rich_console__(
+ self, console: Console, options: ConsoleOptions
+ ) -> RenderResult:
+ theme = self.theme
+ background_style = theme.get_background_style()
+ token_style = theme.get_style_for_token
+
+ traceback_theme = Theme(
+ {
+ "pretty": token_style(TextToken),
+ "pygments.text": token_style(Token),
+ "pygments.string": token_style(String),
+ "pygments.function": token_style(Name.Function),
+ "pygments.number": token_style(Number),
+ "repr.indent": token_style(Comment) + Style(dim=True),
+ "repr.str": token_style(String),
+ "repr.brace": token_style(TextToken) + Style(bold=True),
+ "repr.number": token_style(Number),
+ "repr.bool_true": token_style(Keyword.Constant),
+ "repr.bool_false": token_style(Keyword.Constant),
+ "repr.none": token_style(Keyword.Constant),
+ "scope.border": token_style(String.Delimiter),
+ "scope.equals": token_style(Operator),
+ "scope.key": token_style(Name),
+ "scope.key.special": token_style(Name.Constant) + Style(dim=True),
+ },
+ inherit=False,
+ )
+
+ highlighter = ReprHighlighter()
+ for last, stack in loop_last(reversed(self.trace.stacks)):
+ if stack.frames:
+ stack_renderable: ConsoleRenderable = Panel(
+ self._render_stack(stack),
+ title="[traceback.title]Traceback [dim](most recent call last)",
+ style=background_style,
+ border_style="traceback.border",
+ expand=True,
+ padding=(0, 1),
+ )
+ stack_renderable = Constrain(stack_renderable, self.width)
+ with console.use_theme(traceback_theme):
+ yield stack_renderable
+ if stack.syntax_error is not None:
+ with console.use_theme(traceback_theme):
+ yield Constrain(
+ Panel(
+ self._render_syntax_error(stack.syntax_error),
+ style=background_style,
+ border_style="traceback.border.syntax_error",
+ expand=True,
+ padding=(0, 1),
+ width=self.width,
+ ),
+ self.width,
+ )
+ yield Text.assemble(
+ (f"{stack.exc_type}: ", "traceback.exc_type"),
+ highlighter(stack.syntax_error.msg),
+ )
+ elif stack.exc_value:
+ yield Text.assemble(
+ (f"{stack.exc_type}: ", "traceback.exc_type"),
+ highlighter(stack.exc_value),
+ )
+ else:
+ yield Text.assemble((f"{stack.exc_type}", "traceback.exc_type"))
+
+ if not last:
+ if stack.is_cause:
+ yield Text.from_markup(
+ "\n[i]The above exception was the direct cause of the following exception:\n",
+ )
+ else:
+ yield Text.from_markup(
+ "\n[i]During handling of the above exception, another exception occurred:\n",
+ )
+
+ @group()
+ def _render_syntax_error(self, syntax_error: _SyntaxError) -> RenderResult:
+ highlighter = ReprHighlighter()
+ path_highlighter = PathHighlighter()
+ if syntax_error.filename != "<stdin>":
+ text = Text.assemble(
+ (f" {syntax_error.filename}", "pygments.string"),
+ (":", "pygments.text"),
+ (str(syntax_error.lineno), "pygments.number"),
+ style="pygments.text",
+ )
+ yield path_highlighter(text)
+ syntax_error_text = highlighter(syntax_error.line.rstrip())
+ syntax_error_text.no_wrap = True
+ offset = min(syntax_error.offset - 1, len(syntax_error_text))
+ syntax_error_text.stylize("bold underline", offset, offset)
+ syntax_error_text += Text.from_markup(
+ "\n" + " " * offset + "[traceback.offset]▲[/]",
+ style="pygments.text",
+ )
+ yield syntax_error_text
+
+ @classmethod
+ def _guess_lexer(cls, filename: str, code: str) -> str:
+ ext = os.path.splitext(filename)[-1]
+ if not ext:
+ # No extension, look at first line to see if it is a hashbang
+ # Note, this is an educated guess and not a guarantee
+ # If it fails, the only downside is that the code is highlighted strangely
+ new_line_index = code.index("\n")
+ first_line = code[:new_line_index] if new_line_index != -1 else code
+ if first_line.startswith("#!") and "python" in first_line.lower():
+ return "python"
+ try:
+ return cls.LEXERS.get(ext) or guess_lexer_for_filename(filename, code).name
+ except ClassNotFound:
+ return "text"
+
+ @group()
+ def _render_stack(self, stack: Stack) -> RenderResult:
+ path_highlighter = PathHighlighter()
+ theme = self.theme
+ code_cache: Dict[str, str] = {}
+
+ def read_code(filename: str) -> str:
+ """Read files, and cache results on filename.
+
+ Args:
+ filename (str): Filename to read
+
+ Returns:
+ str: Contents of file
+ """
+ code = code_cache.get(filename)
+ if code is None:
+ with open(
+ filename, "rt", encoding="utf-8", errors="replace"
+ ) as code_file:
+ code = code_file.read()
+ code_cache[filename] = code
+ return code
+
+ def render_locals(frame: Frame) -> Iterable[ConsoleRenderable]:
+ if frame.locals:
+ yield render_scope(
+ frame.locals,
+ title="locals",
+ indent_guides=self.indent_guides,
+ max_length=self.locals_max_length,
+ max_string=self.locals_max_string,
+ )
+
+ exclude_frames: Optional[range] = None
+ if self.max_frames != 0:
+ exclude_frames = range(
+ self.max_frames // 2,
+ len(stack.frames) - self.max_frames // 2,
+ )
+
+ excluded = False
+ for frame_index, frame in enumerate(stack.frames):
+
+ if exclude_frames and frame_index in exclude_frames:
+ excluded = True
+ continue
+
+ if excluded:
+ assert exclude_frames is not None
+ yield Text(
+ f"\n... {len(exclude_frames)} frames hidden ...",
+ justify="center",
+ style="traceback.error",
+ )
+ excluded = False
+
+ first = frame_index == 0
+ frame_filename = frame.filename
+ suppressed = any(frame_filename.startswith(path) for path in self.suppress)
+
+ text = Text.assemble(
+ path_highlighter(Text(frame.filename, style="pygments.string")),
+ (":", "pygments.text"),
+ (str(frame.lineno), "pygments.number"),
+ " in ",
+ (frame.name, "pygments.function"),
+ style="pygments.text",
+ )
+ if not frame.filename.startswith("<") and not first:
+ yield ""
+ yield text
+ if frame.filename.startswith("<"):
+ yield from render_locals(frame)
+ continue
+ if not suppressed:
+ try:
+ code = read_code(frame.filename)
+ lexer_name = self._guess_lexer(frame.filename, code)
+ syntax = Syntax(
+ code,
+ lexer_name,
+ theme=theme,
+ line_numbers=True,
+ line_range=(
+ frame.lineno - self.extra_lines,
+ frame.lineno + self.extra_lines,
+ ),
+ highlight_lines={frame.lineno},
+ word_wrap=self.word_wrap,
+ code_width=88,
+ indent_guides=self.indent_guides,
+ dedent=False,
+ )
+ yield ""
+ except Exception as error:
+ yield Text.assemble(
+ (f"\n{error}", "traceback.error"),
+ )
+ else:
+ yield (
+ Columns(
+ [
+ syntax,
+ *render_locals(frame),
+ ],
+ padding=1,
+ )
+ if frame.locals
+ else syntax
+ )
+
+
+if __name__ == "__main__": # pragma: no cover
+
+ from .console import Console
+
+ console = Console()
+ import sys
+
+ def bar(a: Any) -> None: # 这是对亚洲语言支持的测试。面对模棱两可的想法,拒绝猜测的诱惑
+ one = 1
+ print(one / a)
+
+ def foo(a: Any) -> None:
+ _rich_traceback_guard = True
+ zed = {
+ "characters": {
+ "Paul Atreides",
+ "Vladimir Harkonnen",
+ "Thufir Hawat",
+ "Duncan Idaho",
+ },
+ "atomic_types": (None, False, True),
+ }
+ bar(a)
+
+ def error() -> None:
+
+ try:
+ try:
+ foo(0)
+ except:
+ slfkjsldkfj # type: ignore[name-defined]
+ except:
+ console.print_exception(show_locals=True)
+
+ error()
diff --git a/third_party/python/pip/pip/_vendor/rich/tree.py b/third_party/python/pip/pip/_vendor/rich/tree.py
new file mode 100644
index 0000000000..afe8da1a4a
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/rich/tree.py
@@ -0,0 +1,251 @@
+from typing import Iterator, List, Optional, Tuple
+
+from ._loop import loop_first, loop_last
+from .console import Console, ConsoleOptions, RenderableType, RenderResult
+from .jupyter import JupyterMixin
+from .measure import Measurement
+from .segment import Segment
+from .style import Style, StyleStack, StyleType
+from .styled import Styled
+
+
+class Tree(JupyterMixin):
+ """A renderable for a tree structure.
+
+ Args:
+ label (RenderableType): The renderable or str for the tree label.
+ style (StyleType, optional): Style of this tree. Defaults to "tree".
+ guide_style (StyleType, optional): Style of the guide lines. Defaults to "tree.line".
+ expanded (bool, optional): Also display children. Defaults to True.
+ highlight (bool, optional): Highlight renderable (if str). Defaults to False.
+ """
+
+ def __init__(
+ self,
+ label: RenderableType,
+ *,
+ style: StyleType = "tree",
+ guide_style: StyleType = "tree.line",
+ expanded: bool = True,
+ highlight: bool = False,
+ hide_root: bool = False,
+ ) -> None:
+ self.label = label
+ self.style = style
+ self.guide_style = guide_style
+ self.children: List[Tree] = []
+ self.expanded = expanded
+ self.highlight = highlight
+ self.hide_root = hide_root
+
+ def add(
+ self,
+ label: RenderableType,
+ *,
+ style: Optional[StyleType] = None,
+ guide_style: Optional[StyleType] = None,
+ expanded: bool = True,
+ highlight: Optional[bool] = False,
+ ) -> "Tree":
+ """Add a child tree.
+
+ Args:
+ label (RenderableType): The renderable or str for the tree label.
+ style (StyleType, optional): Style of this tree. Defaults to "tree".
+ guide_style (StyleType, optional): Style of the guide lines. Defaults to "tree.line".
+ expanded (bool, optional): Also display children. Defaults to True.
+ highlight (Optional[bool], optional): Highlight renderable (if str). Defaults to False.
+
+ Returns:
+ Tree: A new child Tree, which may be further modified.
+ """
+ node = Tree(
+ label,
+ style=self.style if style is None else style,
+ guide_style=self.guide_style if guide_style is None else guide_style,
+ expanded=expanded,
+ highlight=self.highlight if highlight is None else highlight,
+ )
+ self.children.append(node)
+ return node
+
+ def __rich_console__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "RenderResult":
+
+ stack: List[Iterator[Tuple[bool, Tree]]] = []
+ pop = stack.pop
+ push = stack.append
+ new_line = Segment.line()
+
+ get_style = console.get_style
+ null_style = Style.null()
+ guide_style = get_style(self.guide_style, default="") or null_style
+ SPACE, CONTINUE, FORK, END = range(4)
+
+ ASCII_GUIDES = (" ", "| ", "+-- ", "`-- ")
+ TREE_GUIDES = [
+ (" ", "│ ", "├── ", "└── "),
+ (" ", "┃ ", "┣━━ ", "┗━━ "),
+ (" ", "║ ", "╠══ ", "╚══ "),
+ ]
+ _Segment = Segment
+
+ def make_guide(index: int, style: Style) -> Segment:
+ """Make a Segment for a level of the guide lines."""
+ if options.ascii_only:
+ line = ASCII_GUIDES[index]
+ else:
+ guide = 1 if style.bold else (2 if style.underline2 else 0)
+ line = TREE_GUIDES[0 if options.legacy_windows else guide][index]
+ return _Segment(line, style)
+
+ levels: List[Segment] = [make_guide(CONTINUE, guide_style)]
+ push(iter(loop_last([self])))
+
+ guide_style_stack = StyleStack(get_style(self.guide_style))
+ style_stack = StyleStack(get_style(self.style))
+ remove_guide_styles = Style(bold=False, underline2=False)
+
+ depth = 0
+
+ while stack:
+ stack_node = pop()
+ try:
+ last, node = next(stack_node)
+ except StopIteration:
+ levels.pop()
+ if levels:
+ guide_style = levels[-1].style or null_style
+ levels[-1] = make_guide(FORK, guide_style)
+ guide_style_stack.pop()
+ style_stack.pop()
+ continue
+ push(stack_node)
+ if last:
+ levels[-1] = make_guide(END, levels[-1].style or null_style)
+
+ guide_style = guide_style_stack.current + get_style(node.guide_style)
+ style = style_stack.current + get_style(node.style)
+ prefix = levels[(2 if self.hide_root else 1) :]
+ renderable_lines = console.render_lines(
+ Styled(node.label, style),
+ options.update(
+ width=options.max_width
+ - sum(level.cell_length for level in prefix),
+ highlight=self.highlight,
+ height=None,
+ ),
+ pad=options.justify is not None,
+ )
+
+ if not (depth == 0 and self.hide_root):
+ for first, line in loop_first(renderable_lines):
+ if prefix:
+ yield from _Segment.apply_style(
+ prefix,
+ style.background_style,
+ post_style=remove_guide_styles,
+ )
+ yield from line
+ yield new_line
+ if first and prefix:
+ prefix[-1] = make_guide(
+ SPACE if last else CONTINUE, prefix[-1].style or null_style
+ )
+
+ if node.expanded and node.children:
+ levels[-1] = make_guide(
+ SPACE if last else CONTINUE, levels[-1].style or null_style
+ )
+ levels.append(
+ make_guide(END if len(node.children) == 1 else FORK, guide_style)
+ )
+ style_stack.push(get_style(node.style))
+ guide_style_stack.push(get_style(node.guide_style))
+ push(iter(loop_last(node.children)))
+ depth += 1
+
+ def __rich_measure__(
+ self, console: "Console", options: "ConsoleOptions"
+ ) -> "Measurement":
+ stack: List[Iterator[Tree]] = [iter([self])]
+ pop = stack.pop
+ push = stack.append
+ minimum = 0
+ maximum = 0
+ measure = Measurement.get
+ level = 0
+ while stack:
+ iter_tree = pop()
+ try:
+ tree = next(iter_tree)
+ except StopIteration:
+ level -= 1
+ continue
+ push(iter_tree)
+ min_measure, max_measure = measure(console, options, tree.label)
+ indent = level * 4
+ minimum = max(min_measure + indent, minimum)
+ maximum = max(max_measure + indent, maximum)
+ if tree.expanded and tree.children:
+ push(iter(tree.children))
+ level += 1
+ return Measurement(minimum, maximum)
+
+
+if __name__ == "__main__": # pragma: no cover
+
+ from pip._vendor.rich.console import Group
+ from pip._vendor.rich.markdown import Markdown
+ from pip._vendor.rich.panel import Panel
+ from pip._vendor.rich.syntax import Syntax
+ from pip._vendor.rich.table import Table
+
+ table = Table(row_styles=["", "dim"])
+
+ table.add_column("Released", style="cyan", no_wrap=True)
+ table.add_column("Title", style="magenta")
+ table.add_column("Box Office", justify="right", style="green")
+
+ table.add_row("Dec 20, 2019", "Star Wars: The Rise of Skywalker", "$952,110,690")
+ table.add_row("May 25, 2018", "Solo: A Star Wars Story", "$393,151,347")
+ table.add_row("Dec 15, 2017", "Star Wars Ep. V111: The Last Jedi", "$1,332,539,889")
+ table.add_row("Dec 16, 2016", "Rogue One: A Star Wars Story", "$1,332,439,889")
+
+ code = """\
+class Segment(NamedTuple):
+ text: str = ""
+ style: Optional[Style] = None
+ is_control: bool = False
+"""
+ syntax = Syntax(code, "python", theme="monokai", line_numbers=True)
+
+ markdown = Markdown(
+ """\
+### example.md
+> Hello, World!
+>
+> Markdown _all_ the things
+"""
+ )
+
+ root = Tree("🌲 [b green]Rich Tree", highlight=True, hide_root=True)
+
+ node = root.add(":file_folder: Renderables", guide_style="red")
+ simple_node = node.add(":file_folder: [bold yellow]Atomic", guide_style="uu green")
+ simple_node.add(Group("📄 Syntax", syntax))
+ simple_node.add(Group("📄 Markdown", Panel(markdown, border_style="green")))
+
+ containers_node = node.add(
+ ":file_folder: [bold magenta]Containers", guide_style="bold magenta"
+ )
+ containers_node.expanded = True
+ panel = Panel.fit("Just a panel", border_style="red")
+ containers_node.add(Group("📄 Panels", panel))
+
+ containers_node.add(Group("📄 [b magenta]Table", table))
+
+ console = Console()
+
+ console.print(root)
diff --git a/third_party/python/pip/pip/_vendor/six.py b/third_party/python/pip/pip/_vendor/six.py
new file mode 100644
index 0000000000..4e15675d8b
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/six.py
@@ -0,0 +1,998 @@
+# Copyright (c) 2010-2020 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.16.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+
+ def __len__(self):
+ return 1 << 31
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+if PY34:
+ from importlib.util import spec_from_loader
+else:
+ spec_from_loader = None
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ result = self._resolve()
+ setattr(obj, self.name, result) # Invokes __set__.
+ try:
+ # This is a bit ugly, but it avoids running this again by
+ # removing this descriptor.
+ delattr(obj.__class__, self.name)
+ except AttributeError:
+ pass
+ return result
+
+
+class MovedModule(_LazyDescr):
+
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+ def __getattr__(self, attr):
+ _module = self._resolve()
+ value = getattr(_module, attr)
+ setattr(self, attr, value)
+ return value
+
+
+class _LazyModule(types.ModuleType):
+
+ def __init__(self, name):
+ super(_LazyModule, self).__init__(name)
+ self.__doc__ = self.__class__.__doc__
+
+ def __dir__(self):
+ attrs = ["__doc__", "__name__"]
+ attrs += [attr.name for attr in self._moved_attributes]
+ return attrs
+
+ # Subclasses should override this
+ _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+ """
+ A meta path importer to import six.moves and its submodules.
+
+ This class implements a PEP302 finder and loader. It should be compatible
+ with Python 2.5 and all existing versions of Python3
+ """
+
+ def __init__(self, six_module_name):
+ self.name = six_module_name
+ self.known_modules = {}
+
+ def _add_module(self, mod, *fullnames):
+ for fullname in fullnames:
+ self.known_modules[self.name + "." + fullname] = mod
+
+ def _get_module(self, fullname):
+ return self.known_modules[self.name + "." + fullname]
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.known_modules:
+ return self
+ return None
+
+ def find_spec(self, fullname, path, target=None):
+ if fullname in self.known_modules:
+ return spec_from_loader(fullname, self)
+ return None
+
+ def __get_module(self, fullname):
+ try:
+ return self.known_modules[fullname]
+ except KeyError:
+ raise ImportError("This loader does not know module " + fullname)
+
+ def load_module(self, fullname):
+ try:
+ # in case of a reload
+ return sys.modules[fullname]
+ except KeyError:
+ pass
+ mod = self.__get_module(fullname)
+ if isinstance(mod, MovedModule):
+ mod = mod._resolve()
+ else:
+ mod.__loader__ = self
+ sys.modules[fullname] = mod
+ return mod
+
+ def is_package(self, fullname):
+ """
+ Return true, if the named module is a package.
+
+ We need this method to get correct spec objects with
+ Python 3.4 (see PEP451)
+ """
+ return hasattr(self.__get_module(fullname), "__path__")
+
+ def get_code(self, fullname):
+ """Return None
+
+ Required, if is_package is implemented"""
+ self.__get_module(fullname) # eventually raises ImportError
+ return None
+ get_source = get_code # same as get_code
+
+ def create_module(self, spec):
+ return self.load_module(spec.name)
+
+ def exec_module(self, module):
+ pass
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+ """Lazy loading of moved objects"""
+ __path__ = [] # mark as package
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("intern", "__builtin__", "sys"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+ MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("getoutput", "commands", "subprocess"),
+ MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("UserDict", "UserDict", "collections"),
+ MovedAttribute("UserList", "UserList", "collections"),
+ MovedAttribute("UserString", "UserString", "collections"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+ MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("_thread", "thread", "_thread"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser",
+ "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog",
+ "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+ "tkinter.simpledialog"),
+ MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+ MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+ _moved_attributes += [
+ MovedModule("winreg", "_winreg"),
+ ]
+
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+ if isinstance(attr, MovedModule):
+ _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+ MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+ MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+ MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+ MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+ MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("quote", "urllib", "urllib.parse"),
+ MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
+ MovedAttribute("urlencode", "urllib", "urllib.parse"),
+ MovedAttribute("splitquery", "urllib", "urllib.parse"),
+ MovedAttribute("splittag", "urllib", "urllib.parse"),
+ MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("splitvalue", "urllib", "urllib.parse"),
+ MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+ setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+ "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+ MovedAttribute("URLError", "urllib2", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+ setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+ "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+ MovedAttribute("urlopen", "urllib2", "urllib.request"),
+ MovedAttribute("install_opener", "urllib2", "urllib.request"),
+ MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("pathname2url", "urllib", "urllib.request"),
+ MovedAttribute("url2pathname", "urllib", "urllib.request"),
+ MovedAttribute("getproxies", "urllib", "urllib.request"),
+ MovedAttribute("Request", "urllib2", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+ MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+ MovedAttribute("URLopener", "urllib", "urllib.request"),
+ MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+ MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+ MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
+ MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+ setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+ "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+ MovedAttribute("addbase", "urllib", "urllib.response"),
+ MovedAttribute("addclosehook", "urllib", "urllib.response"),
+ MovedAttribute("addinfo", "urllib", "urllib.response"),
+ MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+ setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+ "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+ setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+ "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+ """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+ __path__ = [] # mark as package
+ parse = _importer._get_module("moves.urllib_parse")
+ error = _importer._get_module("moves.urllib_error")
+ request = _importer._get_module("moves.urllib_request")
+ response = _importer._get_module("moves.urllib_response")
+ robotparser = _importer._get_module("moves.urllib_robotparser")
+
+ def __dir__(self):
+ return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+ "moves.urllib")
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+
+try:
+ advance_iterator = next
+except NameError:
+ def advance_iterator(it):
+ return it.next()
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+ def get_unbound_function(unbound):
+ return unbound
+
+ create_bound_method = types.MethodType
+
+ def create_unbound_method(func, cls):
+ return func
+
+ Iterator = object
+else:
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ def create_bound_method(func, obj):
+ return types.MethodType(func, obj, obj.__class__)
+
+ def create_unbound_method(func, cls):
+ return types.MethodType(func, None, cls)
+
+ class Iterator(object):
+
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(get_unbound_function,
+ """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+ def iterkeys(d, **kw):
+ return iter(d.keys(**kw))
+
+ def itervalues(d, **kw):
+ return iter(d.values(**kw))
+
+ def iteritems(d, **kw):
+ return iter(d.items(**kw))
+
+ def iterlists(d, **kw):
+ return iter(d.lists(**kw))
+
+ viewkeys = operator.methodcaller("keys")
+
+ viewvalues = operator.methodcaller("values")
+
+ viewitems = operator.methodcaller("items")
+else:
+ def iterkeys(d, **kw):
+ return d.iterkeys(**kw)
+
+ def itervalues(d, **kw):
+ return d.itervalues(**kw)
+
+ def iteritems(d, **kw):
+ return d.iteritems(**kw)
+
+ def iterlists(d, **kw):
+ return d.iterlists(**kw)
+
+ viewkeys = operator.methodcaller("viewkeys")
+
+ viewvalues = operator.methodcaller("viewvalues")
+
+ viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+ "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+ "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+ def b(s):
+ return s.encode("latin-1")
+
+ def u(s):
+ return s
+ unichr = chr
+ import struct
+ int2byte = struct.Struct(">B").pack
+ del struct
+ byte2int = operator.itemgetter(0)
+ indexbytes = operator.getitem
+ iterbytes = iter
+ import io
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+ del io
+ _assertCountEqual = "assertCountEqual"
+ if sys.version_info[1] <= 1:
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
+ else:
+ _assertRaisesRegex = "assertRaisesRegex"
+ _assertRegex = "assertRegex"
+ _assertNotRegex = "assertNotRegex"
+else:
+ def b(s):
+ return s
+ # Workaround for standalone backslash
+
+ def u(s):
+ return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+ unichr = unichr
+ int2byte = chr
+
+ def byte2int(bs):
+ return ord(bs[0])
+
+ def indexbytes(buf, i):
+ return ord(buf[i])
+ iterbytes = functools.partial(itertools.imap, ord)
+ import StringIO
+ StringIO = BytesIO = StringIO.StringIO
+ _assertCountEqual = "assertItemsEqual"
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+ return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+ return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+ return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+def assertNotRegex(self, *args, **kwargs):
+ return getattr(self, _assertNotRegex)(*args, **kwargs)
+
+
+if PY3:
+ exec_ = getattr(moves.builtins, "exec")
+
+ def reraise(tp, value, tb=None):
+ try:
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+ finally:
+ value = None
+ tb = None
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+ exec_("""def reraise(tp, value, tb=None):
+ try:
+ raise tp, value, tb
+ finally:
+ tb = None
+""")
+
+
+if sys.version_info[:2] > (3,):
+ exec_("""def raise_from(value, from_value):
+ try:
+ raise value from from_value
+ finally:
+ value = None
+""")
+else:
+ def raise_from(value, from_value):
+ raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+ def print_(*args, **kwargs):
+ """The new-style print function for Python 2.4 and 2.5."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ # If the file has an encoding, encode unicode with it.
+ if (isinstance(fp, file) and
+ isinstance(data, unicode) and
+ fp.encoding is not None):
+ errors = getattr(fp, "errors", None)
+ if errors is None:
+ errors = "strict"
+ data = data.encode(fp.encoding, errors)
+ fp.write(data)
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+if sys.version_info[:2] < (3, 3):
+ _print = print_
+
+ def print_(*args, **kwargs):
+ fp = kwargs.get("file", sys.stdout)
+ flush = kwargs.pop("flush", False)
+ _print(*args, **kwargs)
+ if flush and fp is not None:
+ fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+ # This does exactly the same what the :func:`py3:functools.update_wrapper`
+ # function does on Python versions after 3.2. It sets the ``__wrapped__``
+ # attribute on ``wrapper`` object and it doesn't raise an error if any of
+ # the attributes mentioned in ``assigned`` and ``updated`` are missing on
+ # ``wrapped`` object.
+ def _update_wrapper(wrapper, wrapped,
+ assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ for attr in assigned:
+ try:
+ value = getattr(wrapped, attr)
+ except AttributeError:
+ continue
+ else:
+ setattr(wrapper, attr, value)
+ for attr in updated:
+ getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
+ wrapper.__wrapped__ = wrapped
+ return wrapper
+ _update_wrapper.__doc__ = functools.update_wrapper.__doc__
+
+ def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ return functools.partial(_update_wrapper, wrapped=wrapped,
+ assigned=assigned, updated=updated)
+ wraps.__doc__ = functools.wraps.__doc__
+
+else:
+ wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(type):
+
+ def __new__(cls, name, this_bases, d):
+ if sys.version_info[:2] >= (3, 7):
+ # This version introduced PEP 560 that requires a bit
+ # of extra care (we mimic what is done by __build_class__).
+ resolved_bases = types.resolve_bases(bases)
+ if resolved_bases is not bases:
+ d['__orig_bases__'] = bases
+ else:
+ resolved_bases = bases
+ return meta(name, resolved_bases, d)
+
+ @classmethod
+ def __prepare__(cls, name, this_bases):
+ return meta.__prepare__(name, bases)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ slots = orig_vars.get('__slots__')
+ if slots is not None:
+ if isinstance(slots, str):
+ slots = [slots]
+ for slots_var in slots:
+ orig_vars.pop(slots_var)
+ orig_vars.pop('__dict__', None)
+ orig_vars.pop('__weakref__', None)
+ if hasattr(cls, '__qualname__'):
+ orig_vars['__qualname__'] = cls.__qualname__
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+ return wrapper
+
+
+def ensure_binary(s, encoding='utf-8', errors='strict'):
+ """Coerce **s** to six.binary_type.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> encoded to `bytes`
+ - `bytes` -> `bytes`
+ """
+ if isinstance(s, binary_type):
+ return s
+ if isinstance(s, text_type):
+ return s.encode(encoding, errors)
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def ensure_str(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to `str`.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ # Optimization: Fast return for the common case.
+ if type(s) is str:
+ return s
+ if PY2 and isinstance(s, text_type):
+ return s.encode(encoding, errors)
+ elif PY3 and isinstance(s, binary_type):
+ return s.decode(encoding, errors)
+ elif not isinstance(s, (text_type, binary_type)):
+ raise TypeError("not expecting type '%s'" % type(s))
+ return s
+
+
+def ensure_text(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to six.text_type.
+
+ For Python 2:
+ - `unicode` -> `unicode`
+ - `str` -> `unicode`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if isinstance(s, binary_type):
+ return s.decode(encoding, errors)
+ elif isinstance(s, text_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def python_2_unicode_compatible(klass):
+ """
+ A class decorator that defines __unicode__ and __str__ methods under Python 2.
+ Under Python 3 it does nothing.
+
+ To support Python 2 and 3 with a single code base, define a __str__ method
+ returning text and apply this decorator to the class.
+ """
+ if PY2:
+ if '__str__' not in klass.__dict__:
+ raise ValueError("@python_2_unicode_compatible cannot be applied "
+ "to %s because it doesn't define __str__()." %
+ klass.__name__)
+ klass.__unicode__ = klass.__str__
+ klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+ return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = [] # required for PEP 302 and PEP 451
+__package__ = __name__ # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+ __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+ for i, importer in enumerate(sys.meta_path):
+ # Here's some real nastiness: Another "instance" of the six module might
+ # be floating around. Therefore, we can't use isinstance() to check for
+ # the six meta path importer, since the other six instance will have
+ # inserted an importer with different class.
+ if (type(importer).__name__ == "_SixMetaPathImporter" and
+ importer.name == __name__):
+ del sys.meta_path[i]
+ break
+ del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/third_party/python/pip/pip/_vendor/tenacity/__init__.py b/third_party/python/pip/pip/_vendor/tenacity/__init__.py
new file mode 100644
index 0000000000..ab3be3bf63
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/tenacity/__init__.py
@@ -0,0 +1,519 @@
+# Copyright 2016-2018 Julien Danjou
+# Copyright 2017 Elisey Zanko
+# Copyright 2016 Étienne Bersac
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import functools
+import sys
+import threading
+import time
+import typing as t
+import warnings
+from abc import ABC, abstractmethod
+from concurrent import futures
+from inspect import iscoroutinefunction
+
+# Import all built-in retry strategies for easier usage.
+from .retry import retry_base # noqa
+from .retry import retry_all # noqa
+from .retry import retry_always # noqa
+from .retry import retry_any # noqa
+from .retry import retry_if_exception # noqa
+from .retry import retry_if_exception_type # noqa
+from .retry import retry_if_exception_cause_type # noqa
+from .retry import retry_if_not_exception_type # noqa
+from .retry import retry_if_not_result # noqa
+from .retry import retry_if_result # noqa
+from .retry import retry_never # noqa
+from .retry import retry_unless_exception_type # noqa
+from .retry import retry_if_exception_message # noqa
+from .retry import retry_if_not_exception_message # noqa
+
+# Import all nap strategies for easier usage.
+from .nap import sleep # noqa
+from .nap import sleep_using_event # noqa
+
+# Import all built-in stop strategies for easier usage.
+from .stop import stop_after_attempt # noqa
+from .stop import stop_after_delay # noqa
+from .stop import stop_all # noqa
+from .stop import stop_any # noqa
+from .stop import stop_never # noqa
+from .stop import stop_when_event_set # noqa
+
+# Import all built-in wait strategies for easier usage.
+from .wait import wait_chain # noqa
+from .wait import wait_combine # noqa
+from .wait import wait_exponential # noqa
+from .wait import wait_fixed # noqa
+from .wait import wait_incrementing # noqa
+from .wait import wait_none # noqa
+from .wait import wait_random # noqa
+from .wait import wait_random_exponential # noqa
+from .wait import wait_random_exponential as wait_full_jitter # noqa
+from .wait import wait_exponential_jitter # noqa
+
+# Import all built-in before strategies for easier usage.
+from .before import before_log # noqa
+from .before import before_nothing # noqa
+
+# Import all built-in after strategies for easier usage.
+from .after import after_log # noqa
+from .after import after_nothing # noqa
+
+# Import all built-in after strategies for easier usage.
+from .before_sleep import before_sleep_log # noqa
+from .before_sleep import before_sleep_nothing # noqa
+
+# Replace a conditional import with a hard-coded None so that pip does
+# not attempt to use tornado even if it is present in the environment.
+# If tornado is non-None, tenacity will attempt to execute some code
+# that is sensitive to the version of tornado, which could break pip
+# if an old version is found.
+tornado = None # type: ignore
+
+if t.TYPE_CHECKING:
+ import types
+
+ from .wait import wait_base
+ from .stop import stop_base
+
+
+WrappedFn = t.TypeVar("WrappedFn", bound=t.Callable)
+_RetValT = t.TypeVar("_RetValT")
+
+
+@t.overload
+def retry(fn: WrappedFn) -> WrappedFn:
+ pass
+
+
+@t.overload
+def retry(*dargs: t.Any, **dkw: t.Any) -> t.Callable[[WrappedFn], WrappedFn]: # noqa
+ pass
+
+
+def retry(*dargs: t.Any, **dkw: t.Any) -> t.Union[WrappedFn, t.Callable[[WrappedFn], WrappedFn]]: # noqa
+ """Wrap a function with a new `Retrying` object.
+
+ :param dargs: positional arguments passed to Retrying object
+ :param dkw: keyword arguments passed to the Retrying object
+ """
+ # support both @retry and @retry() as valid syntax
+ if len(dargs) == 1 and callable(dargs[0]):
+ return retry()(dargs[0])
+ else:
+
+ def wrap(f: WrappedFn) -> WrappedFn:
+ if isinstance(f, retry_base):
+ warnings.warn(
+ f"Got retry_base instance ({f.__class__.__name__}) as callable argument, "
+ f"this will probably hang indefinitely (did you mean retry={f.__class__.__name__}(...)?)"
+ )
+ if iscoroutinefunction(f):
+ r: "BaseRetrying" = AsyncRetrying(*dargs, **dkw)
+ elif tornado and hasattr(tornado.gen, "is_coroutine_function") and tornado.gen.is_coroutine_function(f):
+ r = TornadoRetrying(*dargs, **dkw)
+ else:
+ r = Retrying(*dargs, **dkw)
+
+ return r.wraps(f)
+
+ return wrap
+
+
+class TryAgain(Exception):
+ """Always retry the executed function when raised."""
+
+
+NO_RESULT = object()
+
+
+class DoAttempt:
+ pass
+
+
+class DoSleep(float):
+ pass
+
+
+class BaseAction:
+ """Base class for representing actions to take by retry object.
+
+ Concrete implementations must define:
+ - __init__: to initialize all necessary fields
+ - REPR_FIELDS: class variable specifying attributes to include in repr(self)
+ - NAME: for identification in retry object methods and callbacks
+ """
+
+ REPR_FIELDS: t.Sequence[str] = ()
+ NAME: t.Optional[str] = None
+
+ def __repr__(self) -> str:
+ state_str = ", ".join(f"{field}={getattr(self, field)!r}" for field in self.REPR_FIELDS)
+ return f"{self.__class__.__name__}({state_str})"
+
+ def __str__(self) -> str:
+ return repr(self)
+
+
+class RetryAction(BaseAction):
+ REPR_FIELDS = ("sleep",)
+ NAME = "retry"
+
+ def __init__(self, sleep: t.SupportsFloat) -> None:
+ self.sleep = float(sleep)
+
+
+_unset = object()
+
+
+def _first_set(first: t.Union[t.Any, object], second: t.Any) -> t.Any:
+ return second if first is _unset else first
+
+
+class RetryError(Exception):
+ """Encapsulates the last attempt instance right before giving up."""
+
+ def __init__(self, last_attempt: "Future") -> None:
+ self.last_attempt = last_attempt
+ super().__init__(last_attempt)
+
+ def reraise(self) -> "t.NoReturn":
+ if self.last_attempt.failed:
+ raise self.last_attempt.result()
+ raise self
+
+ def __str__(self) -> str:
+ return f"{self.__class__.__name__}[{self.last_attempt}]"
+
+
+class AttemptManager:
+ """Manage attempt context."""
+
+ def __init__(self, retry_state: "RetryCallState"):
+ self.retry_state = retry_state
+
+ def __enter__(self) -> None:
+ pass
+
+ def __exit__(
+ self,
+ exc_type: t.Optional[t.Type[BaseException]],
+ exc_value: t.Optional[BaseException],
+ traceback: t.Optional["types.TracebackType"],
+ ) -> t.Optional[bool]:
+ if isinstance(exc_value, BaseException):
+ self.retry_state.set_exception((exc_type, exc_value, traceback))
+ return True # Swallow exception.
+ else:
+ # We don't have the result, actually.
+ self.retry_state.set_result(None)
+ return None
+
+
+class BaseRetrying(ABC):
+ def __init__(
+ self,
+ sleep: t.Callable[[t.Union[int, float]], None] = sleep,
+ stop: "stop_base" = stop_never,
+ wait: "wait_base" = wait_none(),
+ retry: retry_base = retry_if_exception_type(),
+ before: t.Callable[["RetryCallState"], None] = before_nothing,
+ after: t.Callable[["RetryCallState"], None] = after_nothing,
+ before_sleep: t.Optional[t.Callable[["RetryCallState"], None]] = None,
+ reraise: bool = False,
+ retry_error_cls: t.Type[RetryError] = RetryError,
+ retry_error_callback: t.Optional[t.Callable[["RetryCallState"], t.Any]] = None,
+ ):
+ self.sleep = sleep
+ self.stop = stop
+ self.wait = wait
+ self.retry = retry
+ self.before = before
+ self.after = after
+ self.before_sleep = before_sleep
+ self.reraise = reraise
+ self._local = threading.local()
+ self.retry_error_cls = retry_error_cls
+ self.retry_error_callback = retry_error_callback
+
+ def copy(
+ self,
+ sleep: t.Union[t.Callable[[t.Union[int, float]], None], object] = _unset,
+ stop: t.Union["stop_base", object] = _unset,
+ wait: t.Union["wait_base", object] = _unset,
+ retry: t.Union[retry_base, object] = _unset,
+ before: t.Union[t.Callable[["RetryCallState"], None], object] = _unset,
+ after: t.Union[t.Callable[["RetryCallState"], None], object] = _unset,
+ before_sleep: t.Union[t.Optional[t.Callable[["RetryCallState"], None]], object] = _unset,
+ reraise: t.Union[bool, object] = _unset,
+ retry_error_cls: t.Union[t.Type[RetryError], object] = _unset,
+ retry_error_callback: t.Union[t.Optional[t.Callable[["RetryCallState"], t.Any]], object] = _unset,
+ ) -> "BaseRetrying":
+ """Copy this object with some parameters changed if needed."""
+ return self.__class__(
+ sleep=_first_set(sleep, self.sleep),
+ stop=_first_set(stop, self.stop),
+ wait=_first_set(wait, self.wait),
+ retry=_first_set(retry, self.retry),
+ before=_first_set(before, self.before),
+ after=_first_set(after, self.after),
+ before_sleep=_first_set(before_sleep, self.before_sleep),
+ reraise=_first_set(reraise, self.reraise),
+ retry_error_cls=_first_set(retry_error_cls, self.retry_error_cls),
+ retry_error_callback=_first_set(retry_error_callback, self.retry_error_callback),
+ )
+
+ def __repr__(self) -> str:
+ return (
+ f"<{self.__class__.__name__} object at 0x{id(self):x} ("
+ f"stop={self.stop}, "
+ f"wait={self.wait}, "
+ f"sleep={self.sleep}, "
+ f"retry={self.retry}, "
+ f"before={self.before}, "
+ f"after={self.after})>"
+ )
+
+ @property
+ def statistics(self) -> t.Dict[str, t.Any]:
+ """Return a dictionary of runtime statistics.
+
+ This dictionary will be empty when the controller has never been
+ ran. When it is running or has ran previously it should have (but
+ may not) have useful and/or informational keys and values when
+ running is underway and/or completed.
+
+ .. warning:: The keys in this dictionary **should** be some what
+ stable (not changing), but there existence **may**
+ change between major releases as new statistics are
+ gathered or removed so before accessing keys ensure that
+ they actually exist and handle when they do not.
+
+ .. note:: The values in this dictionary are local to the thread
+ running call (so if multiple threads share the same retrying
+ object - either directly or indirectly) they will each have
+ there own view of statistics they have collected (in the
+ future we may provide a way to aggregate the various
+ statistics from each thread).
+ """
+ try:
+ return self._local.statistics
+ except AttributeError:
+ self._local.statistics = {}
+ return self._local.statistics
+
+ def wraps(self, f: WrappedFn) -> WrappedFn:
+ """Wrap a function for retrying.
+
+ :param f: A function to wraps for retrying.
+ """
+
+ @functools.wraps(f)
+ def wrapped_f(*args: t.Any, **kw: t.Any) -> t.Any:
+ return self(f, *args, **kw)
+
+ def retry_with(*args: t.Any, **kwargs: t.Any) -> WrappedFn:
+ return self.copy(*args, **kwargs).wraps(f)
+
+ wrapped_f.retry = self
+ wrapped_f.retry_with = retry_with
+
+ return wrapped_f
+
+ def begin(self) -> None:
+ self.statistics.clear()
+ self.statistics["start_time"] = time.monotonic()
+ self.statistics["attempt_number"] = 1
+ self.statistics["idle_for"] = 0
+
+ def iter(self, retry_state: "RetryCallState") -> t.Union[DoAttempt, DoSleep, t.Any]: # noqa
+ fut = retry_state.outcome
+ if fut is None:
+ if self.before is not None:
+ self.before(retry_state)
+ return DoAttempt()
+
+ is_explicit_retry = retry_state.outcome.failed and isinstance(retry_state.outcome.exception(), TryAgain)
+ if not (is_explicit_retry or self.retry(retry_state=retry_state)):
+ return fut.result()
+
+ if self.after is not None:
+ self.after(retry_state)
+
+ self.statistics["delay_since_first_attempt"] = retry_state.seconds_since_start
+ if self.stop(retry_state=retry_state):
+ if self.retry_error_callback:
+ return self.retry_error_callback(retry_state)
+ retry_exc = self.retry_error_cls(fut)
+ if self.reraise:
+ raise retry_exc.reraise()
+ raise retry_exc from fut.exception()
+
+ if self.wait:
+ sleep = self.wait(retry_state=retry_state)
+ else:
+ sleep = 0.0
+ retry_state.next_action = RetryAction(sleep)
+ retry_state.idle_for += sleep
+ self.statistics["idle_for"] += sleep
+ self.statistics["attempt_number"] += 1
+
+ if self.before_sleep is not None:
+ self.before_sleep(retry_state)
+
+ return DoSleep(sleep)
+
+ def __iter__(self) -> t.Generator[AttemptManager, None, None]:
+ self.begin()
+
+ retry_state = RetryCallState(self, fn=None, args=(), kwargs={})
+ while True:
+ do = self.iter(retry_state=retry_state)
+ if isinstance(do, DoAttempt):
+ yield AttemptManager(retry_state=retry_state)
+ elif isinstance(do, DoSleep):
+ retry_state.prepare_for_next_attempt()
+ self.sleep(do)
+ else:
+ break
+
+ @abstractmethod
+ def __call__(self, fn: t.Callable[..., _RetValT], *args: t.Any, **kwargs: t.Any) -> _RetValT:
+ pass
+
+
+class Retrying(BaseRetrying):
+ """Retrying controller."""
+
+ def __call__(self, fn: t.Callable[..., _RetValT], *args: t.Any, **kwargs: t.Any) -> _RetValT:
+ self.begin()
+
+ retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
+ while True:
+ do = self.iter(retry_state=retry_state)
+ if isinstance(do, DoAttempt):
+ try:
+ result = fn(*args, **kwargs)
+ except BaseException: # noqa: B902
+ retry_state.set_exception(sys.exc_info())
+ else:
+ retry_state.set_result(result)
+ elif isinstance(do, DoSleep):
+ retry_state.prepare_for_next_attempt()
+ self.sleep(do)
+ else:
+ return do
+
+
+class Future(futures.Future):
+ """Encapsulates a (future or past) attempted call to a target function."""
+
+ def __init__(self, attempt_number: int) -> None:
+ super().__init__()
+ self.attempt_number = attempt_number
+
+ @property
+ def failed(self) -> bool:
+ """Return whether a exception is being held in this future."""
+ return self.exception() is not None
+
+ @classmethod
+ def construct(cls, attempt_number: int, value: t.Any, has_exception: bool) -> "Future":
+ """Construct a new Future object."""
+ fut = cls(attempt_number)
+ if has_exception:
+ fut.set_exception(value)
+ else:
+ fut.set_result(value)
+ return fut
+
+
+class RetryCallState:
+ """State related to a single call wrapped with Retrying."""
+
+ def __init__(
+ self,
+ retry_object: BaseRetrying,
+ fn: t.Optional[WrappedFn],
+ args: t.Any,
+ kwargs: t.Any,
+ ) -> None:
+ #: Retry call start timestamp
+ self.start_time = time.monotonic()
+ #: Retry manager object
+ self.retry_object = retry_object
+ #: Function wrapped by this retry call
+ self.fn = fn
+ #: Arguments of the function wrapped by this retry call
+ self.args = args
+ #: Keyword arguments of the function wrapped by this retry call
+ self.kwargs = kwargs
+
+ #: The number of the current attempt
+ self.attempt_number: int = 1
+ #: Last outcome (result or exception) produced by the function
+ self.outcome: t.Optional[Future] = None
+ #: Timestamp of the last outcome
+ self.outcome_timestamp: t.Optional[float] = None
+ #: Time spent sleeping in retries
+ self.idle_for: float = 0.0
+ #: Next action as decided by the retry manager
+ self.next_action: t.Optional[RetryAction] = None
+
+ @property
+ def seconds_since_start(self) -> t.Optional[float]:
+ if self.outcome_timestamp is None:
+ return None
+ return self.outcome_timestamp - self.start_time
+
+ def prepare_for_next_attempt(self) -> None:
+ self.outcome = None
+ self.outcome_timestamp = None
+ self.attempt_number += 1
+ self.next_action = None
+
+ def set_result(self, val: t.Any) -> None:
+ ts = time.monotonic()
+ fut = Future(self.attempt_number)
+ fut.set_result(val)
+ self.outcome, self.outcome_timestamp = fut, ts
+
+ def set_exception(self, exc_info: t.Tuple[t.Type[BaseException], BaseException, "types.TracebackType"]) -> None:
+ ts = time.monotonic()
+ fut = Future(self.attempt_number)
+ fut.set_exception(exc_info[1])
+ self.outcome, self.outcome_timestamp = fut, ts
+
+ def __repr__(self):
+ if self.outcome is None:
+ result = "none yet"
+ elif self.outcome.failed:
+ exception = self.outcome.exception()
+ result = f"failed ({exception.__class__.__name__} {exception})"
+ else:
+ result = f"returned {self.outcome.result()}"
+
+ slept = float(round(self.idle_for, 2))
+ clsname = self.__class__.__name__
+ return f"<{clsname} {id(self)}: attempt #{self.attempt_number}; slept for {slept}; last result: {result}>"
+
+
+from pip._vendor.tenacity._asyncio import AsyncRetrying # noqa:E402,I100
+
+if tornado:
+ from pip._vendor.tenacity.tornadoweb import TornadoRetrying
diff --git a/third_party/python/pip/pip/_vendor/tenacity/_asyncio.py b/third_party/python/pip/pip/_vendor/tenacity/_asyncio.py
new file mode 100644
index 0000000000..0f32b5f620
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/tenacity/_asyncio.py
@@ -0,0 +1,92 @@
+# Copyright 2016 Étienne Bersac
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import functools
+import sys
+import typing
+from asyncio import sleep
+
+from pip._vendor.tenacity import AttemptManager
+from pip._vendor.tenacity import BaseRetrying
+from pip._vendor.tenacity import DoAttempt
+from pip._vendor.tenacity import DoSleep
+from pip._vendor.tenacity import RetryCallState
+
+WrappedFn = typing.TypeVar("WrappedFn", bound=typing.Callable)
+_RetValT = typing.TypeVar("_RetValT")
+
+
+class AsyncRetrying(BaseRetrying):
+ def __init__(self, sleep: typing.Callable[[float], typing.Awaitable] = sleep, **kwargs: typing.Any) -> None:
+ super().__init__(**kwargs)
+ self.sleep = sleep
+
+ async def __call__( # type: ignore # Change signature from supertype
+ self,
+ fn: typing.Callable[..., typing.Awaitable[_RetValT]],
+ *args: typing.Any,
+ **kwargs: typing.Any,
+ ) -> _RetValT:
+ self.begin()
+
+ retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
+ while True:
+ do = self.iter(retry_state=retry_state)
+ if isinstance(do, DoAttempt):
+ try:
+ result = await fn(*args, **kwargs)
+ except BaseException: # noqa: B902
+ retry_state.set_exception(sys.exc_info())
+ else:
+ retry_state.set_result(result)
+ elif isinstance(do, DoSleep):
+ retry_state.prepare_for_next_attempt()
+ await self.sleep(do)
+ else:
+ return do
+
+ def __aiter__(self) -> "AsyncRetrying":
+ self.begin()
+ self._retry_state = RetryCallState(self, fn=None, args=(), kwargs={})
+ return self
+
+ async def __anext__(self) -> typing.Union[AttemptManager, typing.Any]:
+ while True:
+ do = self.iter(retry_state=self._retry_state)
+ if do is None:
+ raise StopAsyncIteration
+ elif isinstance(do, DoAttempt):
+ return AttemptManager(retry_state=self._retry_state)
+ elif isinstance(do, DoSleep):
+ self._retry_state.prepare_for_next_attempt()
+ await self.sleep(do)
+ else:
+ return do
+
+ def wraps(self, fn: WrappedFn) -> WrappedFn:
+ fn = super().wraps(fn)
+ # Ensure wrapper is recognized as a coroutine function.
+
+ @functools.wraps(fn)
+ async def async_wrapped(*args: typing.Any, **kwargs: typing.Any) -> typing.Any:
+ return await fn(*args, **kwargs)
+
+ # Preserve attributes
+ async_wrapped.retry = fn.retry
+ async_wrapped.retry_with = fn.retry_with
+
+ return async_wrapped
diff --git a/third_party/python/pip/pip/_vendor/tenacity/_utils.py b/third_party/python/pip/pip/_vendor/tenacity/_utils.py
new file mode 100644
index 0000000000..d5c4c9de59
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/tenacity/_utils.py
@@ -0,0 +1,68 @@
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import typing
+
+
+# sys.maxsize:
+# An integer giving the maximum value a variable of type Py_ssize_t can take.
+MAX_WAIT = sys.maxsize / 2
+
+
+def find_ordinal(pos_num: int) -> str:
+ # See: https://en.wikipedia.org/wiki/English_numerals#Ordinal_numbers
+ if pos_num == 0:
+ return "th"
+ elif pos_num == 1:
+ return "st"
+ elif pos_num == 2:
+ return "nd"
+ elif pos_num == 3:
+ return "rd"
+ elif 4 <= pos_num <= 20:
+ return "th"
+ else:
+ return find_ordinal(pos_num % 10)
+
+
+def to_ordinal(pos_num: int) -> str:
+ return f"{pos_num}{find_ordinal(pos_num)}"
+
+
+def get_callback_name(cb: typing.Callable[..., typing.Any]) -> str:
+ """Get a callback fully-qualified name.
+
+ If no name can be produced ``repr(cb)`` is called and returned.
+ """
+ segments = []
+ try:
+ segments.append(cb.__qualname__)
+ except AttributeError:
+ try:
+ segments.append(cb.__name__)
+ except AttributeError:
+ pass
+ if not segments:
+ return repr(cb)
+ else:
+ try:
+ # When running under sphinx it appears this can be none?
+ if cb.__module__:
+ segments.insert(0, cb.__module__)
+ except AttributeError:
+ pass
+ return ".".join(segments)
diff --git a/third_party/python/pip/pip/_vendor/tenacity/after.py b/third_party/python/pip/pip/_vendor/tenacity/after.py
new file mode 100644
index 0000000000..c056700f9f
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/tenacity/after.py
@@ -0,0 +1,46 @@
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from pip._vendor.tenacity import _utils
+
+if typing.TYPE_CHECKING:
+ import logging
+
+ from pip._vendor.tenacity import RetryCallState
+
+
+def after_nothing(retry_state: "RetryCallState") -> None:
+ """After call strategy that does nothing."""
+
+
+def after_log(
+ logger: "logging.Logger",
+ log_level: int,
+ sec_format: str = "%0.3f",
+) -> typing.Callable[["RetryCallState"], None]:
+ """After call strategy that logs to some logger the finished attempt."""
+
+ def log_it(retry_state: "RetryCallState") -> None:
+ logger.log(
+ log_level,
+ f"Finished call to '{_utils.get_callback_name(retry_state.fn)}' "
+ f"after {sec_format % retry_state.seconds_since_start}(s), "
+ f"this was the {_utils.to_ordinal(retry_state.attempt_number)} time calling it.",
+ )
+
+ return log_it
diff --git a/third_party/python/pip/pip/_vendor/tenacity/before.py b/third_party/python/pip/pip/_vendor/tenacity/before.py
new file mode 100644
index 0000000000..a72c2c5f70
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/tenacity/before.py
@@ -0,0 +1,41 @@
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from pip._vendor.tenacity import _utils
+
+if typing.TYPE_CHECKING:
+ import logging
+
+ from pip._vendor.tenacity import RetryCallState
+
+
+def before_nothing(retry_state: "RetryCallState") -> None:
+ """Before call strategy that does nothing."""
+
+
+def before_log(logger: "logging.Logger", log_level: int) -> typing.Callable[["RetryCallState"], None]:
+ """Before call strategy that logs to some logger the attempt."""
+
+ def log_it(retry_state: "RetryCallState") -> None:
+ logger.log(
+ log_level,
+ f"Starting call to '{_utils.get_callback_name(retry_state.fn)}', "
+ f"this is the {_utils.to_ordinal(retry_state.attempt_number)} time calling it.",
+ )
+
+ return log_it
diff --git a/third_party/python/pip/pip/_vendor/tenacity/before_sleep.py b/third_party/python/pip/pip/_vendor/tenacity/before_sleep.py
new file mode 100644
index 0000000000..b35564fbad
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/tenacity/before_sleep.py
@@ -0,0 +1,58 @@
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import typing
+
+from pip._vendor.tenacity import _utils
+
+if typing.TYPE_CHECKING:
+ import logging
+
+ from pip._vendor.tenacity import RetryCallState
+
+
+def before_sleep_nothing(retry_state: "RetryCallState") -> None:
+ """Before call strategy that does nothing."""
+
+
+def before_sleep_log(
+ logger: "logging.Logger",
+ log_level: int,
+ exc_info: bool = False,
+) -> typing.Callable[["RetryCallState"], None]:
+ """Before call strategy that logs to some logger the attempt."""
+
+ def log_it(retry_state: "RetryCallState") -> None:
+ if retry_state.outcome.failed:
+ ex = retry_state.outcome.exception()
+ verb, value = "raised", f"{ex.__class__.__name__}: {ex}"
+
+ if exc_info:
+ local_exc_info = retry_state.outcome.exception()
+ else:
+ local_exc_info = False
+ else:
+ verb, value = "returned", retry_state.outcome.result()
+ local_exc_info = False # exc_info does not apply when no exception
+
+ logger.log(
+ log_level,
+ f"Retrying {_utils.get_callback_name(retry_state.fn)} "
+ f"in {retry_state.next_action.sleep} seconds as it {verb} {value}.",
+ exc_info=local_exc_info,
+ )
+
+ return log_it
diff --git a/third_party/python/pip/pip/_vendor/tenacity/nap.py b/third_party/python/pip/pip/_vendor/tenacity/nap.py
new file mode 100644
index 0000000000..72aa5bfd4b
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/tenacity/nap.py
@@ -0,0 +1,43 @@
+# Copyright 2016 Étienne Bersac
+# Copyright 2016 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import time
+import typing
+
+if typing.TYPE_CHECKING:
+ import threading
+
+
+def sleep(seconds: float) -> None:
+ """
+ Sleep strategy that delays execution for a given number of seconds.
+
+ This is the default strategy, and may be mocked out for unit testing.
+ """
+ time.sleep(seconds)
+
+
+class sleep_using_event:
+ """Sleep strategy that waits on an event to be set."""
+
+ def __init__(self, event: "threading.Event") -> None:
+ self.event = event
+
+ def __call__(self, timeout: typing.Optional[float]) -> None:
+ # NOTE(harlowja): this may *not* actually wait for timeout
+ # seconds if the event is set (ie this may eject out early).
+ self.event.wait(timeout=timeout)
diff --git a/third_party/python/pip/pip/_vendor/tenacity/retry.py b/third_party/python/pip/pip/_vendor/tenacity/retry.py
new file mode 100644
index 0000000000..9ebeb62d5c
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/tenacity/retry.py
@@ -0,0 +1,240 @@
+# Copyright 2016–2021 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import re
+import typing
+
+if typing.TYPE_CHECKING:
+ from pip._vendor.tenacity import RetryCallState
+
+
+class retry_base(abc.ABC):
+ """Abstract base class for retry strategies."""
+
+ @abc.abstractmethod
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ pass
+
+ def __and__(self, other: "retry_base") -> "retry_all":
+ return retry_all(self, other)
+
+ def __or__(self, other: "retry_base") -> "retry_any":
+ return retry_any(self, other)
+
+
+class _retry_never(retry_base):
+ """Retry strategy that never rejects any result."""
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return False
+
+
+retry_never = _retry_never()
+
+
+class _retry_always(retry_base):
+ """Retry strategy that always rejects any result."""
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return True
+
+
+retry_always = _retry_always()
+
+
+class retry_if_exception(retry_base):
+ """Retry strategy that retries if an exception verifies a predicate."""
+
+ def __init__(self, predicate: typing.Callable[[BaseException], bool]) -> None:
+ self.predicate = predicate
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ if retry_state.outcome.failed:
+ return self.predicate(retry_state.outcome.exception())
+ else:
+ return False
+
+
+class retry_if_exception_type(retry_if_exception):
+ """Retries if an exception has been raised of one or more types."""
+
+ def __init__(
+ self,
+ exception_types: typing.Union[
+ typing.Type[BaseException],
+ typing.Tuple[typing.Type[BaseException], ...],
+ ] = Exception,
+ ) -> None:
+ self.exception_types = exception_types
+ super().__init__(lambda e: isinstance(e, exception_types))
+
+
+class retry_if_not_exception_type(retry_if_exception):
+ """Retries except an exception has been raised of one or more types."""
+
+ def __init__(
+ self,
+ exception_types: typing.Union[
+ typing.Type[BaseException],
+ typing.Tuple[typing.Type[BaseException], ...],
+ ] = Exception,
+ ) -> None:
+ self.exception_types = exception_types
+ super().__init__(lambda e: not isinstance(e, exception_types))
+
+
+class retry_unless_exception_type(retry_if_exception):
+ """Retries until an exception is raised of one or more types."""
+
+ def __init__(
+ self,
+ exception_types: typing.Union[
+ typing.Type[BaseException],
+ typing.Tuple[typing.Type[BaseException], ...],
+ ] = Exception,
+ ) -> None:
+ self.exception_types = exception_types
+ super().__init__(lambda e: not isinstance(e, exception_types))
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ # always retry if no exception was raised
+ if not retry_state.outcome.failed:
+ return True
+ return self.predicate(retry_state.outcome.exception())
+
+
+class retry_if_exception_cause_type(retry_base):
+ """Retries if any of the causes of the raised exception is of one or more types.
+
+ The check on the type of the cause of the exception is done recursively (until finding
+ an exception in the chain that has no `__cause__`)
+ """
+
+ def __init__(
+ self,
+ exception_types: typing.Union[
+ typing.Type[BaseException],
+ typing.Tuple[typing.Type[BaseException], ...],
+ ] = Exception,
+ ) -> None:
+ self.exception_cause_types = exception_types
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ if retry_state.outcome.failed:
+ exc = retry_state.outcome.exception()
+ while exc is not None:
+ if isinstance(exc.__cause__, self.exception_cause_types):
+ return True
+ exc = exc.__cause__
+
+ return False
+
+
+class retry_if_result(retry_base):
+ """Retries if the result verifies a predicate."""
+
+ def __init__(self, predicate: typing.Callable[[typing.Any], bool]) -> None:
+ self.predicate = predicate
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ if not retry_state.outcome.failed:
+ return self.predicate(retry_state.outcome.result())
+ else:
+ return False
+
+
+class retry_if_not_result(retry_base):
+ """Retries if the result refutes a predicate."""
+
+ def __init__(self, predicate: typing.Callable[[typing.Any], bool]) -> None:
+ self.predicate = predicate
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ if not retry_state.outcome.failed:
+ return not self.predicate(retry_state.outcome.result())
+ else:
+ return False
+
+
+class retry_if_exception_message(retry_if_exception):
+ """Retries if an exception message equals or matches."""
+
+ def __init__(
+ self,
+ message: typing.Optional[str] = None,
+ match: typing.Optional[str] = None,
+ ) -> None:
+ if message and match:
+ raise TypeError(f"{self.__class__.__name__}() takes either 'message' or 'match', not both")
+
+ # set predicate
+ if message:
+
+ def message_fnc(exception: BaseException) -> bool:
+ return message == str(exception)
+
+ predicate = message_fnc
+ elif match:
+ prog = re.compile(match)
+
+ def match_fnc(exception: BaseException) -> bool:
+ return bool(prog.match(str(exception)))
+
+ predicate = match_fnc
+ else:
+ raise TypeError(f"{self.__class__.__name__}() missing 1 required argument 'message' or 'match'")
+
+ super().__init__(predicate)
+
+
+class retry_if_not_exception_message(retry_if_exception_message):
+ """Retries until an exception message equals or matches."""
+
+ def __init__(
+ self,
+ message: typing.Optional[str] = None,
+ match: typing.Optional[str] = None,
+ ) -> None:
+ super().__init__(message, match)
+ # invert predicate
+ if_predicate = self.predicate
+ self.predicate = lambda *args_, **kwargs_: not if_predicate(*args_, **kwargs_)
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ if not retry_state.outcome.failed:
+ return True
+ return self.predicate(retry_state.outcome.exception())
+
+
+class retry_any(retry_base):
+ """Retries if any of the retries condition is valid."""
+
+ def __init__(self, *retries: retry_base) -> None:
+ self.retries = retries
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return any(r(retry_state) for r in self.retries)
+
+
+class retry_all(retry_base):
+ """Retries if all the retries condition are valid."""
+
+ def __init__(self, *retries: retry_base) -> None:
+ self.retries = retries
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return all(r(retry_state) for r in self.retries)
diff --git a/third_party/python/pip/pip/_vendor/tenacity/stop.py b/third_party/python/pip/pip/_vendor/tenacity/stop.py
new file mode 100644
index 0000000000..faaae9a8dd
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/tenacity/stop.py
@@ -0,0 +1,96 @@
+# Copyright 2016–2021 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import abc
+import typing
+
+if typing.TYPE_CHECKING:
+ import threading
+
+ from pip._vendor.tenacity import RetryCallState
+
+
+class stop_base(abc.ABC):
+ """Abstract base class for stop strategies."""
+
+ @abc.abstractmethod
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ pass
+
+ def __and__(self, other: "stop_base") -> "stop_all":
+ return stop_all(self, other)
+
+ def __or__(self, other: "stop_base") -> "stop_any":
+ return stop_any(self, other)
+
+
+class stop_any(stop_base):
+ """Stop if any of the stop condition is valid."""
+
+ def __init__(self, *stops: stop_base) -> None:
+ self.stops = stops
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return any(x(retry_state) for x in self.stops)
+
+
+class stop_all(stop_base):
+ """Stop if all the stop conditions are valid."""
+
+ def __init__(self, *stops: stop_base) -> None:
+ self.stops = stops
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return all(x(retry_state) for x in self.stops)
+
+
+class _stop_never(stop_base):
+ """Never stop."""
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return False
+
+
+stop_never = _stop_never()
+
+
+class stop_when_event_set(stop_base):
+ """Stop when the given event is set."""
+
+ def __init__(self, event: "threading.Event") -> None:
+ self.event = event
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return self.event.is_set()
+
+
+class stop_after_attempt(stop_base):
+ """Stop when the previous attempt >= max_attempt."""
+
+ def __init__(self, max_attempt_number: int) -> None:
+ self.max_attempt_number = max_attempt_number
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return retry_state.attempt_number >= self.max_attempt_number
+
+
+class stop_after_delay(stop_base):
+ """Stop when the time from the first attempt >= limit."""
+
+ def __init__(self, max_delay: float) -> None:
+ self.max_delay = max_delay
+
+ def __call__(self, retry_state: "RetryCallState") -> bool:
+ return retry_state.seconds_since_start >= self.max_delay
diff --git a/third_party/python/pip/pip/_vendor/tenacity/tornadoweb.py b/third_party/python/pip/pip/_vendor/tenacity/tornadoweb.py
new file mode 100644
index 0000000000..8f7731af0e
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/tenacity/tornadoweb.py
@@ -0,0 +1,59 @@
+# Copyright 2017 Elisey Zanko
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sys
+import typing
+
+from pip._vendor.tenacity import BaseRetrying
+from pip._vendor.tenacity import DoAttempt
+from pip._vendor.tenacity import DoSleep
+from pip._vendor.tenacity import RetryCallState
+
+from tornado import gen
+
+if typing.TYPE_CHECKING:
+ from tornado.concurrent import Future
+
+_RetValT = typing.TypeVar("_RetValT")
+
+
+class TornadoRetrying(BaseRetrying):
+ def __init__(self, sleep: "typing.Callable[[float], Future[None]]" = gen.sleep, **kwargs: typing.Any) -> None:
+ super().__init__(**kwargs)
+ self.sleep = sleep
+
+ @gen.coroutine
+ def __call__( # type: ignore # Change signature from supertype
+ self,
+ fn: "typing.Callable[..., typing.Union[typing.Generator[typing.Any, typing.Any, _RetValT], Future[_RetValT]]]",
+ *args: typing.Any,
+ **kwargs: typing.Any,
+ ) -> "typing.Generator[typing.Any, typing.Any, _RetValT]":
+ self.begin()
+
+ retry_state = RetryCallState(retry_object=self, fn=fn, args=args, kwargs=kwargs)
+ while True:
+ do = self.iter(retry_state=retry_state)
+ if isinstance(do, DoAttempt):
+ try:
+ result = yield fn(*args, **kwargs)
+ except BaseException: # noqa: B902
+ retry_state.set_exception(sys.exc_info())
+ else:
+ retry_state.set_result(result)
+ elif isinstance(do, DoSleep):
+ retry_state.prepare_for_next_attempt()
+ yield self.sleep(do)
+ else:
+ raise gen.Return(do)
diff --git a/third_party/python/pip/pip/_vendor/tenacity/wait.py b/third_party/python/pip/pip/_vendor/tenacity/wait.py
new file mode 100644
index 0000000000..8fdfc8f9d4
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/tenacity/wait.py
@@ -0,0 +1,232 @@
+# Copyright 2016–2021 Julien Danjou
+# Copyright 2016 Joshua Harlow
+# Copyright 2013-2014 Ray Holder
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import abc
+import random
+import typing
+from datetime import timedelta
+
+from pip._vendor.tenacity import _utils
+
+if typing.TYPE_CHECKING:
+ from pip._vendor.tenacity import RetryCallState
+
+wait_unit_type = typing.Union[int, float, timedelta]
+
+
+def to_seconds(wait_unit: wait_unit_type) -> float:
+ return float(wait_unit.total_seconds() if isinstance(wait_unit, timedelta) else wait_unit)
+
+
+class wait_base(abc.ABC):
+ """Abstract base class for wait strategies."""
+
+ @abc.abstractmethod
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ pass
+
+ def __add__(self, other: "wait_base") -> "wait_combine":
+ return wait_combine(self, other)
+
+ def __radd__(self, other: "wait_base") -> typing.Union["wait_combine", "wait_base"]:
+ # make it possible to use multiple waits with the built-in sum function
+ if other == 0:
+ return self
+ return self.__add__(other)
+
+
+class wait_fixed(wait_base):
+ """Wait strategy that waits a fixed amount of time between each retry."""
+
+ def __init__(self, wait: wait_unit_type) -> None:
+ self.wait_fixed = to_seconds(wait)
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ return self.wait_fixed
+
+
+class wait_none(wait_fixed):
+ """Wait strategy that doesn't wait at all before retrying."""
+
+ def __init__(self) -> None:
+ super().__init__(0)
+
+
+class wait_random(wait_base):
+ """Wait strategy that waits a random amount of time between min/max."""
+
+ def __init__(self, min: wait_unit_type = 0, max: wait_unit_type = 1) -> None: # noqa
+ self.wait_random_min = to_seconds(min)
+ self.wait_random_max = to_seconds(max)
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ return self.wait_random_min + (random.random() * (self.wait_random_max - self.wait_random_min))
+
+
+class wait_combine(wait_base):
+ """Combine several waiting strategies."""
+
+ def __init__(self, *strategies: wait_base) -> None:
+ self.wait_funcs = strategies
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ return sum(x(retry_state=retry_state) for x in self.wait_funcs)
+
+
+class wait_chain(wait_base):
+ """Chain two or more waiting strategies.
+
+ If all strategies are exhausted, the very last strategy is used
+ thereafter.
+
+ For example::
+
+ @retry(wait=wait_chain(*[wait_fixed(1) for i in range(3)] +
+ [wait_fixed(2) for j in range(5)] +
+ [wait_fixed(5) for k in range(4)))
+ def wait_chained():
+ print("Wait 1s for 3 attempts, 2s for 5 attempts and 5s
+ thereafter.")
+ """
+
+ def __init__(self, *strategies: wait_base) -> None:
+ self.strategies = strategies
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ wait_func_no = min(max(retry_state.attempt_number, 1), len(self.strategies))
+ wait_func = self.strategies[wait_func_no - 1]
+ return wait_func(retry_state=retry_state)
+
+
+class wait_incrementing(wait_base):
+ """Wait an incremental amount of time after each attempt.
+
+ Starting at a starting value and incrementing by a value for each attempt
+ (and restricting the upper limit to some maximum value).
+ """
+
+ def __init__(
+ self,
+ start: wait_unit_type = 0,
+ increment: wait_unit_type = 100,
+ max: wait_unit_type = _utils.MAX_WAIT, # noqa
+ ) -> None:
+ self.start = to_seconds(start)
+ self.increment = to_seconds(increment)
+ self.max = to_seconds(max)
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ result = self.start + (self.increment * (retry_state.attempt_number - 1))
+ return max(0, min(result, self.max))
+
+
+class wait_exponential(wait_base):
+ """Wait strategy that applies exponential backoff.
+
+ It allows for a customized multiplier and an ability to restrict the
+ upper and lower limits to some maximum and minimum value.
+
+ The intervals are fixed (i.e. there is no jitter), so this strategy is
+ suitable for balancing retries against latency when a required resource is
+ unavailable for an unknown duration, but *not* suitable for resolving
+ contention between multiple processes for a shared resource. Use
+ wait_random_exponential for the latter case.
+ """
+
+ def __init__(
+ self,
+ multiplier: typing.Union[int, float] = 1,
+ max: wait_unit_type = _utils.MAX_WAIT, # noqa
+ exp_base: typing.Union[int, float] = 2,
+ min: wait_unit_type = 0, # noqa
+ ) -> None:
+ self.multiplier = multiplier
+ self.min = to_seconds(min)
+ self.max = to_seconds(max)
+ self.exp_base = exp_base
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ try:
+ exp = self.exp_base ** (retry_state.attempt_number - 1)
+ result = self.multiplier * exp
+ except OverflowError:
+ return self.max
+ return max(max(0, self.min), min(result, self.max))
+
+
+class wait_random_exponential(wait_exponential):
+ """Random wait with exponentially widening window.
+
+ An exponential backoff strategy used to mediate contention between multiple
+ uncoordinated processes for a shared resource in distributed systems. This
+ is the sense in which "exponential backoff" is meant in e.g. Ethernet
+ networking, and corresponds to the "Full Jitter" algorithm described in
+ this blog post:
+
+ https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
+
+ Each retry occurs at a random time in a geometrically expanding interval.
+ It allows for a custom multiplier and an ability to restrict the upper
+ limit of the random interval to some maximum value.
+
+ Example::
+
+ wait_random_exponential(multiplier=0.5, # initial window 0.5s
+ max=60) # max 60s timeout
+
+ When waiting for an unavailable resource to become available again, as
+ opposed to trying to resolve contention for a shared resource, the
+ wait_exponential strategy (which uses a fixed interval) may be preferable.
+
+ """
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ high = super().__call__(retry_state=retry_state)
+ return random.uniform(0, high)
+
+
+class wait_exponential_jitter(wait_base):
+ """Wait strategy that applies exponential backoff and jitter.
+
+ It allows for a customized initial wait, maximum wait and jitter.
+
+ This implements the strategy described here:
+ https://cloud.google.com/storage/docs/retry-strategy
+
+ The wait time is min(initial * (2**n + random.uniform(0, jitter)), maximum)
+ where n is the retry count.
+ """
+
+ def __init__(
+ self,
+ initial: float = 1,
+ max: float = _utils.MAX_WAIT, # noqa
+ exp_base: float = 2,
+ jitter: float = 1,
+ ) -> None:
+ self.initial = initial
+ self.max = max
+ self.exp_base = exp_base
+ self.jitter = jitter
+
+ def __call__(self, retry_state: "RetryCallState") -> float:
+ jitter = random.uniform(0, self.jitter)
+ try:
+ exp = self.exp_base ** (retry_state.attempt_number - 1)
+ result = self.initial * exp + jitter
+ except OverflowError:
+ result = self.max
+ return max(0, min(result, self.max))
diff --git a/third_party/python/pip/pip/_vendor/tomli/__init__.py b/third_party/python/pip/pip/_vendor/tomli/__init__.py
new file mode 100644
index 0000000000..4c6ec97ec6
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/tomli/__init__.py
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: MIT
+# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
+# Licensed to PSF under a Contributor Agreement.
+
+__all__ = ("loads", "load", "TOMLDecodeError")
+__version__ = "2.0.1" # DO NOT EDIT THIS LINE MANUALLY. LET bump2version UTILITY DO IT
+
+from ._parser import TOMLDecodeError, load, loads
+
+# Pretend this exception was created here.
+TOMLDecodeError.__module__ = __name__
diff --git a/third_party/python/pip/pip/_vendor/tomli/_parser.py b/third_party/python/pip/pip/_vendor/tomli/_parser.py
new file mode 100644
index 0000000000..f1bb0aa19a
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/tomli/_parser.py
@@ -0,0 +1,691 @@
+# SPDX-License-Identifier: MIT
+# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
+# Licensed to PSF under a Contributor Agreement.
+
+from __future__ import annotations
+
+from collections.abc import Iterable
+import string
+from types import MappingProxyType
+from typing import Any, BinaryIO, NamedTuple
+
+from ._re import (
+ RE_DATETIME,
+ RE_LOCALTIME,
+ RE_NUMBER,
+ match_to_datetime,
+ match_to_localtime,
+ match_to_number,
+)
+from ._types import Key, ParseFloat, Pos
+
+ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127))
+
+# Neither of these sets include quotation mark or backslash. They are
+# currently handled as separate cases in the parser functions.
+ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t")
+ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n")
+
+ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS
+ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ILLEGAL_MULTILINE_BASIC_STR_CHARS
+
+ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS
+
+TOML_WS = frozenset(" \t")
+TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n")
+BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_")
+KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'")
+HEXDIGIT_CHARS = frozenset(string.hexdigits)
+
+BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType(
+ {
+ "\\b": "\u0008", # backspace
+ "\\t": "\u0009", # tab
+ "\\n": "\u000A", # linefeed
+ "\\f": "\u000C", # form feed
+ "\\r": "\u000D", # carriage return
+ '\\"': "\u0022", # quote
+ "\\\\": "\u005C", # backslash
+ }
+)
+
+
+class TOMLDecodeError(ValueError):
+ """An error raised if a document is not valid TOML."""
+
+
+def load(__fp: BinaryIO, *, parse_float: ParseFloat = float) -> dict[str, Any]:
+ """Parse TOML from a binary file object."""
+ b = __fp.read()
+ try:
+ s = b.decode()
+ except AttributeError:
+ raise TypeError(
+ "File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`"
+ ) from None
+ return loads(s, parse_float=parse_float)
+
+
+def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # noqa: C901
+ """Parse TOML from a string."""
+
+ # The spec allows converting "\r\n" to "\n", even in string
+ # literals. Let's do so to simplify parsing.
+ src = __s.replace("\r\n", "\n")
+ pos = 0
+ out = Output(NestedDict(), Flags())
+ header: Key = ()
+ parse_float = make_safe_parse_float(parse_float)
+
+ # Parse one statement at a time
+ # (typically means one line in TOML source)
+ while True:
+ # 1. Skip line leading whitespace
+ pos = skip_chars(src, pos, TOML_WS)
+
+ # 2. Parse rules. Expect one of the following:
+ # - end of file
+ # - end of line
+ # - comment
+ # - key/value pair
+ # - append dict to list (and move to its namespace)
+ # - create dict (and move to its namespace)
+ # Skip trailing whitespace when applicable.
+ try:
+ char = src[pos]
+ except IndexError:
+ break
+ if char == "\n":
+ pos += 1
+ continue
+ if char in KEY_INITIAL_CHARS:
+ pos = key_value_rule(src, pos, out, header, parse_float)
+ pos = skip_chars(src, pos, TOML_WS)
+ elif char == "[":
+ try:
+ second_char: str | None = src[pos + 1]
+ except IndexError:
+ second_char = None
+ out.flags.finalize_pending()
+ if second_char == "[":
+ pos, header = create_list_rule(src, pos, out)
+ else:
+ pos, header = create_dict_rule(src, pos, out)
+ pos = skip_chars(src, pos, TOML_WS)
+ elif char != "#":
+ raise suffixed_err(src, pos, "Invalid statement")
+
+ # 3. Skip comment
+ pos = skip_comment(src, pos)
+
+ # 4. Expect end of line or end of file
+ try:
+ char = src[pos]
+ except IndexError:
+ break
+ if char != "\n":
+ raise suffixed_err(
+ src, pos, "Expected newline or end of document after a statement"
+ )
+ pos += 1
+
+ return out.data.dict
+
+
+class Flags:
+ """Flags that map to parsed keys/namespaces."""
+
+ # Marks an immutable namespace (inline array or inline table).
+ FROZEN = 0
+ # Marks a nest that has been explicitly created and can no longer
+ # be opened using the "[table]" syntax.
+ EXPLICIT_NEST = 1
+
+ def __init__(self) -> None:
+ self._flags: dict[str, dict] = {}
+ self._pending_flags: set[tuple[Key, int]] = set()
+
+ def add_pending(self, key: Key, flag: int) -> None:
+ self._pending_flags.add((key, flag))
+
+ def finalize_pending(self) -> None:
+ for key, flag in self._pending_flags:
+ self.set(key, flag, recursive=False)
+ self._pending_flags.clear()
+
+ def unset_all(self, key: Key) -> None:
+ cont = self._flags
+ for k in key[:-1]:
+ if k not in cont:
+ return
+ cont = cont[k]["nested"]
+ cont.pop(key[-1], None)
+
+ def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003
+ cont = self._flags
+ key_parent, key_stem = key[:-1], key[-1]
+ for k in key_parent:
+ if k not in cont:
+ cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}}
+ cont = cont[k]["nested"]
+ if key_stem not in cont:
+ cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}}
+ cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag)
+
+ def is_(self, key: Key, flag: int) -> bool:
+ if not key:
+ return False # document root has no flags
+ cont = self._flags
+ for k in key[:-1]:
+ if k not in cont:
+ return False
+ inner_cont = cont[k]
+ if flag in inner_cont["recursive_flags"]:
+ return True
+ cont = inner_cont["nested"]
+ key_stem = key[-1]
+ if key_stem in cont:
+ cont = cont[key_stem]
+ return flag in cont["flags"] or flag in cont["recursive_flags"]
+ return False
+
+
+class NestedDict:
+ def __init__(self) -> None:
+ # The parsed content of the TOML document
+ self.dict: dict[str, Any] = {}
+
+ def get_or_create_nest(
+ self,
+ key: Key,
+ *,
+ access_lists: bool = True,
+ ) -> dict:
+ cont: Any = self.dict
+ for k in key:
+ if k not in cont:
+ cont[k] = {}
+ cont = cont[k]
+ if access_lists and isinstance(cont, list):
+ cont = cont[-1]
+ if not isinstance(cont, dict):
+ raise KeyError("There is no nest behind this key")
+ return cont
+
+ def append_nest_to_list(self, key: Key) -> None:
+ cont = self.get_or_create_nest(key[:-1])
+ last_key = key[-1]
+ if last_key in cont:
+ list_ = cont[last_key]
+ if not isinstance(list_, list):
+ raise KeyError("An object other than list found behind this key")
+ list_.append({})
+ else:
+ cont[last_key] = [{}]
+
+
+class Output(NamedTuple):
+ data: NestedDict
+ flags: Flags
+
+
+def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos:
+ try:
+ while src[pos] in chars:
+ pos += 1
+ except IndexError:
+ pass
+ return pos
+
+
+def skip_until(
+ src: str,
+ pos: Pos,
+ expect: str,
+ *,
+ error_on: frozenset[str],
+ error_on_eof: bool,
+) -> Pos:
+ try:
+ new_pos = src.index(expect, pos)
+ except ValueError:
+ new_pos = len(src)
+ if error_on_eof:
+ raise suffixed_err(src, new_pos, f"Expected {expect!r}") from None
+
+ if not error_on.isdisjoint(src[pos:new_pos]):
+ while src[pos] not in error_on:
+ pos += 1
+ raise suffixed_err(src, pos, f"Found invalid character {src[pos]!r}")
+ return new_pos
+
+
+def skip_comment(src: str, pos: Pos) -> Pos:
+ try:
+ char: str | None = src[pos]
+ except IndexError:
+ char = None
+ if char == "#":
+ return skip_until(
+ src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False
+ )
+ return pos
+
+
+def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos:
+ while True:
+ pos_before_skip = pos
+ pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
+ pos = skip_comment(src, pos)
+ if pos == pos_before_skip:
+ return pos
+
+
+def create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
+ pos += 1 # Skip "["
+ pos = skip_chars(src, pos, TOML_WS)
+ pos, key = parse_key(src, pos)
+
+ if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN):
+ raise suffixed_err(src, pos, f"Cannot declare {key} twice")
+ out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
+ try:
+ out.data.get_or_create_nest(key)
+ except KeyError:
+ raise suffixed_err(src, pos, "Cannot overwrite a value") from None
+
+ if not src.startswith("]", pos):
+ raise suffixed_err(src, pos, "Expected ']' at the end of a table declaration")
+ return pos + 1, key
+
+
+def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]:
+ pos += 2 # Skip "[["
+ pos = skip_chars(src, pos, TOML_WS)
+ pos, key = parse_key(src, pos)
+
+ if out.flags.is_(key, Flags.FROZEN):
+ raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}")
+ # Free the namespace now that it points to another empty list item...
+ out.flags.unset_all(key)
+ # ...but this key precisely is still prohibited from table declaration
+ out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False)
+ try:
+ out.data.append_nest_to_list(key)
+ except KeyError:
+ raise suffixed_err(src, pos, "Cannot overwrite a value") from None
+
+ if not src.startswith("]]", pos):
+ raise suffixed_err(src, pos, "Expected ']]' at the end of an array declaration")
+ return pos + 2, key
+
+
+def key_value_rule(
+ src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat
+) -> Pos:
+ pos, key, value = parse_key_value_pair(src, pos, parse_float)
+ key_parent, key_stem = key[:-1], key[-1]
+ abs_key_parent = header + key_parent
+
+ relative_path_cont_keys = (header + key[:i] for i in range(1, len(key)))
+ for cont_key in relative_path_cont_keys:
+ # Check that dotted key syntax does not redefine an existing table
+ if out.flags.is_(cont_key, Flags.EXPLICIT_NEST):
+ raise suffixed_err(src, pos, f"Cannot redefine namespace {cont_key}")
+ # Containers in the relative path can't be opened with the table syntax or
+ # dotted key/value syntax in following table sections.
+ out.flags.add_pending(cont_key, Flags.EXPLICIT_NEST)
+
+ if out.flags.is_(abs_key_parent, Flags.FROZEN):
+ raise suffixed_err(
+ src, pos, f"Cannot mutate immutable namespace {abs_key_parent}"
+ )
+
+ try:
+ nest = out.data.get_or_create_nest(abs_key_parent)
+ except KeyError:
+ raise suffixed_err(src, pos, "Cannot overwrite a value") from None
+ if key_stem in nest:
+ raise suffixed_err(src, pos, "Cannot overwrite a value")
+ # Mark inline table and array namespaces recursively immutable
+ if isinstance(value, (dict, list)):
+ out.flags.set(header + key, Flags.FROZEN, recursive=True)
+ nest[key_stem] = value
+ return pos
+
+
+def parse_key_value_pair(
+ src: str, pos: Pos, parse_float: ParseFloat
+) -> tuple[Pos, Key, Any]:
+ pos, key = parse_key(src, pos)
+ try:
+ char: str | None = src[pos]
+ except IndexError:
+ char = None
+ if char != "=":
+ raise suffixed_err(src, pos, "Expected '=' after a key in a key/value pair")
+ pos += 1
+ pos = skip_chars(src, pos, TOML_WS)
+ pos, value = parse_value(src, pos, parse_float)
+ return pos, key, value
+
+
+def parse_key(src: str, pos: Pos) -> tuple[Pos, Key]:
+ pos, key_part = parse_key_part(src, pos)
+ key: Key = (key_part,)
+ pos = skip_chars(src, pos, TOML_WS)
+ while True:
+ try:
+ char: str | None = src[pos]
+ except IndexError:
+ char = None
+ if char != ".":
+ return pos, key
+ pos += 1
+ pos = skip_chars(src, pos, TOML_WS)
+ pos, key_part = parse_key_part(src, pos)
+ key += (key_part,)
+ pos = skip_chars(src, pos, TOML_WS)
+
+
+def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]:
+ try:
+ char: str | None = src[pos]
+ except IndexError:
+ char = None
+ if char in BARE_KEY_CHARS:
+ start_pos = pos
+ pos = skip_chars(src, pos, BARE_KEY_CHARS)
+ return pos, src[start_pos:pos]
+ if char == "'":
+ return parse_literal_str(src, pos)
+ if char == '"':
+ return parse_one_line_basic_str(src, pos)
+ raise suffixed_err(src, pos, "Invalid initial character for a key part")
+
+
+def parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]:
+ pos += 1
+ return parse_basic_str(src, pos, multiline=False)
+
+
+def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list]:
+ pos += 1
+ array: list = []
+
+ pos = skip_comments_and_array_ws(src, pos)
+ if src.startswith("]", pos):
+ return pos + 1, array
+ while True:
+ pos, val = parse_value(src, pos, parse_float)
+ array.append(val)
+ pos = skip_comments_and_array_ws(src, pos)
+
+ c = src[pos : pos + 1]
+ if c == "]":
+ return pos + 1, array
+ if c != ",":
+ raise suffixed_err(src, pos, "Unclosed array")
+ pos += 1
+
+ pos = skip_comments_and_array_ws(src, pos)
+ if src.startswith("]", pos):
+ return pos + 1, array
+
+
+def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, dict]:
+ pos += 1
+ nested_dict = NestedDict()
+ flags = Flags()
+
+ pos = skip_chars(src, pos, TOML_WS)
+ if src.startswith("}", pos):
+ return pos + 1, nested_dict.dict
+ while True:
+ pos, key, value = parse_key_value_pair(src, pos, parse_float)
+ key_parent, key_stem = key[:-1], key[-1]
+ if flags.is_(key, Flags.FROZEN):
+ raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}")
+ try:
+ nest = nested_dict.get_or_create_nest(key_parent, access_lists=False)
+ except KeyError:
+ raise suffixed_err(src, pos, "Cannot overwrite a value") from None
+ if key_stem in nest:
+ raise suffixed_err(src, pos, f"Duplicate inline table key {key_stem!r}")
+ nest[key_stem] = value
+ pos = skip_chars(src, pos, TOML_WS)
+ c = src[pos : pos + 1]
+ if c == "}":
+ return pos + 1, nested_dict.dict
+ if c != ",":
+ raise suffixed_err(src, pos, "Unclosed inline table")
+ if isinstance(value, (dict, list)):
+ flags.set(key, Flags.FROZEN, recursive=True)
+ pos += 1
+ pos = skip_chars(src, pos, TOML_WS)
+
+
+def parse_basic_str_escape(
+ src: str, pos: Pos, *, multiline: bool = False
+) -> tuple[Pos, str]:
+ escape_id = src[pos : pos + 2]
+ pos += 2
+ if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}:
+ # Skip whitespace until next non-whitespace character or end of
+ # the doc. Error if non-whitespace is found before newline.
+ if escape_id != "\\\n":
+ pos = skip_chars(src, pos, TOML_WS)
+ try:
+ char = src[pos]
+ except IndexError:
+ return pos, ""
+ if char != "\n":
+ raise suffixed_err(src, pos, "Unescaped '\\' in a string")
+ pos += 1
+ pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE)
+ return pos, ""
+ if escape_id == "\\u":
+ return parse_hex_char(src, pos, 4)
+ if escape_id == "\\U":
+ return parse_hex_char(src, pos, 8)
+ try:
+ return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id]
+ except KeyError:
+ raise suffixed_err(src, pos, "Unescaped '\\' in a string") from None
+
+
+def parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]:
+ return parse_basic_str_escape(src, pos, multiline=True)
+
+
+def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]:
+ hex_str = src[pos : pos + hex_len]
+ if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str):
+ raise suffixed_err(src, pos, "Invalid hex value")
+ pos += hex_len
+ hex_int = int(hex_str, 16)
+ if not is_unicode_scalar_value(hex_int):
+ raise suffixed_err(src, pos, "Escaped character is not a Unicode scalar value")
+ return pos, chr(hex_int)
+
+
+def parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]:
+ pos += 1 # Skip starting apostrophe
+ start_pos = pos
+ pos = skip_until(
+ src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True
+ )
+ return pos + 1, src[start_pos:pos] # Skip ending apostrophe
+
+
+def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> tuple[Pos, str]:
+ pos += 3
+ if src.startswith("\n", pos):
+ pos += 1
+
+ if literal:
+ delim = "'"
+ end_pos = skip_until(
+ src,
+ pos,
+ "'''",
+ error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS,
+ error_on_eof=True,
+ )
+ result = src[pos:end_pos]
+ pos = end_pos + 3
+ else:
+ delim = '"'
+ pos, result = parse_basic_str(src, pos, multiline=True)
+
+ # Add at maximum two extra apostrophes/quotes if the end sequence
+ # is 4 or 5 chars long instead of just 3.
+ if not src.startswith(delim, pos):
+ return pos, result
+ pos += 1
+ if not src.startswith(delim, pos):
+ return pos, result + delim
+ pos += 1
+ return pos, result + (delim * 2)
+
+
+def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]:
+ if multiline:
+ error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS
+ parse_escapes = parse_basic_str_escape_multiline
+ else:
+ error_on = ILLEGAL_BASIC_STR_CHARS
+ parse_escapes = parse_basic_str_escape
+ result = ""
+ start_pos = pos
+ while True:
+ try:
+ char = src[pos]
+ except IndexError:
+ raise suffixed_err(src, pos, "Unterminated string") from None
+ if char == '"':
+ if not multiline:
+ return pos + 1, result + src[start_pos:pos]
+ if src.startswith('"""', pos):
+ return pos + 3, result + src[start_pos:pos]
+ pos += 1
+ continue
+ if char == "\\":
+ result += src[start_pos:pos]
+ pos, parsed_escape = parse_escapes(src, pos)
+ result += parsed_escape
+ start_pos = pos
+ continue
+ if char in error_on:
+ raise suffixed_err(src, pos, f"Illegal character {char!r}")
+ pos += 1
+
+
+def parse_value( # noqa: C901
+ src: str, pos: Pos, parse_float: ParseFloat
+) -> tuple[Pos, Any]:
+ try:
+ char: str | None = src[pos]
+ except IndexError:
+ char = None
+
+ # IMPORTANT: order conditions based on speed of checking and likelihood
+
+ # Basic strings
+ if char == '"':
+ if src.startswith('"""', pos):
+ return parse_multiline_str(src, pos, literal=False)
+ return parse_one_line_basic_str(src, pos)
+
+ # Literal strings
+ if char == "'":
+ if src.startswith("'''", pos):
+ return parse_multiline_str(src, pos, literal=True)
+ return parse_literal_str(src, pos)
+
+ # Booleans
+ if char == "t":
+ if src.startswith("true", pos):
+ return pos + 4, True
+ if char == "f":
+ if src.startswith("false", pos):
+ return pos + 5, False
+
+ # Arrays
+ if char == "[":
+ return parse_array(src, pos, parse_float)
+
+ # Inline tables
+ if char == "{":
+ return parse_inline_table(src, pos, parse_float)
+
+ # Dates and times
+ datetime_match = RE_DATETIME.match(src, pos)
+ if datetime_match:
+ try:
+ datetime_obj = match_to_datetime(datetime_match)
+ except ValueError as e:
+ raise suffixed_err(src, pos, "Invalid date or datetime") from e
+ return datetime_match.end(), datetime_obj
+ localtime_match = RE_LOCALTIME.match(src, pos)
+ if localtime_match:
+ return localtime_match.end(), match_to_localtime(localtime_match)
+
+ # Integers and "normal" floats.
+ # The regex will greedily match any type starting with a decimal
+ # char, so needs to be located after handling of dates and times.
+ number_match = RE_NUMBER.match(src, pos)
+ if number_match:
+ return number_match.end(), match_to_number(number_match, parse_float)
+
+ # Special floats
+ first_three = src[pos : pos + 3]
+ if first_three in {"inf", "nan"}:
+ return pos + 3, parse_float(first_three)
+ first_four = src[pos : pos + 4]
+ if first_four in {"-inf", "+inf", "-nan", "+nan"}:
+ return pos + 4, parse_float(first_four)
+
+ raise suffixed_err(src, pos, "Invalid value")
+
+
+def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError:
+ """Return a `TOMLDecodeError` where error message is suffixed with
+ coordinates in source."""
+
+ def coord_repr(src: str, pos: Pos) -> str:
+ if pos >= len(src):
+ return "end of document"
+ line = src.count("\n", 0, pos) + 1
+ if line == 1:
+ column = pos + 1
+ else:
+ column = pos - src.rindex("\n", 0, pos)
+ return f"line {line}, column {column}"
+
+ return TOMLDecodeError(f"{msg} (at {coord_repr(src, pos)})")
+
+
+def is_unicode_scalar_value(codepoint: int) -> bool:
+ return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111)
+
+
+def make_safe_parse_float(parse_float: ParseFloat) -> ParseFloat:
+ """A decorator to make `parse_float` safe.
+
+ `parse_float` must not return dicts or lists, because these types
+ would be mixed with parsed TOML tables and arrays, thus confusing
+ the parser. The returned decorated callable raises `ValueError`
+ instead of returning illegal types.
+ """
+ # The default `float` callable never returns illegal types. Optimize it.
+ if parse_float is float: # type: ignore[comparison-overlap]
+ return float
+
+ def safe_parse_float(float_str: str) -> Any:
+ float_value = parse_float(float_str)
+ if isinstance(float_value, (dict, list)):
+ raise ValueError("parse_float must not return dicts or lists")
+ return float_value
+
+ return safe_parse_float
diff --git a/third_party/python/pip/pip/_vendor/tomli/_re.py b/third_party/python/pip/pip/_vendor/tomli/_re.py
new file mode 100644
index 0000000000..994bb7493f
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/tomli/_re.py
@@ -0,0 +1,107 @@
+# SPDX-License-Identifier: MIT
+# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
+# Licensed to PSF under a Contributor Agreement.
+
+from __future__ import annotations
+
+from datetime import date, datetime, time, timedelta, timezone, tzinfo
+from functools import lru_cache
+import re
+from typing import Any
+
+from ._types import ParseFloat
+
+# E.g.
+# - 00:32:00.999999
+# - 00:32:00
+_TIME_RE_STR = r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9])(?:\.([0-9]{1,6})[0-9]*)?"
+
+RE_NUMBER = re.compile(
+ r"""
+0
+(?:
+ x[0-9A-Fa-f](?:_?[0-9A-Fa-f])* # hex
+ |
+ b[01](?:_?[01])* # bin
+ |
+ o[0-7](?:_?[0-7])* # oct
+)
+|
+[+-]?(?:0|[1-9](?:_?[0-9])*) # dec, integer part
+(?P<floatpart>
+ (?:\.[0-9](?:_?[0-9])*)? # optional fractional part
+ (?:[eE][+-]?[0-9](?:_?[0-9])*)? # optional exponent part
+)
+""",
+ flags=re.VERBOSE,
+)
+RE_LOCALTIME = re.compile(_TIME_RE_STR)
+RE_DATETIME = re.compile(
+ rf"""
+([0-9]{{4}})-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01]) # date, e.g. 1988-10-27
+(?:
+ [Tt ]
+ {_TIME_RE_STR}
+ (?:([Zz])|([+-])([01][0-9]|2[0-3]):([0-5][0-9]))? # optional time offset
+)?
+""",
+ flags=re.VERBOSE,
+)
+
+
+def match_to_datetime(match: re.Match) -> datetime | date:
+ """Convert a `RE_DATETIME` match to `datetime.datetime` or `datetime.date`.
+
+ Raises ValueError if the match does not correspond to a valid date
+ or datetime.
+ """
+ (
+ year_str,
+ month_str,
+ day_str,
+ hour_str,
+ minute_str,
+ sec_str,
+ micros_str,
+ zulu_time,
+ offset_sign_str,
+ offset_hour_str,
+ offset_minute_str,
+ ) = match.groups()
+ year, month, day = int(year_str), int(month_str), int(day_str)
+ if hour_str is None:
+ return date(year, month, day)
+ hour, minute, sec = int(hour_str), int(minute_str), int(sec_str)
+ micros = int(micros_str.ljust(6, "0")) if micros_str else 0
+ if offset_sign_str:
+ tz: tzinfo | None = cached_tz(
+ offset_hour_str, offset_minute_str, offset_sign_str
+ )
+ elif zulu_time:
+ tz = timezone.utc
+ else: # local date-time
+ tz = None
+ return datetime(year, month, day, hour, minute, sec, micros, tzinfo=tz)
+
+
+@lru_cache(maxsize=None)
+def cached_tz(hour_str: str, minute_str: str, sign_str: str) -> timezone:
+ sign = 1 if sign_str == "+" else -1
+ return timezone(
+ timedelta(
+ hours=sign * int(hour_str),
+ minutes=sign * int(minute_str),
+ )
+ )
+
+
+def match_to_localtime(match: re.Match) -> time:
+ hour_str, minute_str, sec_str, micros_str = match.groups()
+ micros = int(micros_str.ljust(6, "0")) if micros_str else 0
+ return time(int(hour_str), int(minute_str), int(sec_str), micros)
+
+
+def match_to_number(match: re.Match, parse_float: ParseFloat) -> Any:
+ if match.group("floatpart"):
+ return parse_float(match.group())
+ return int(match.group(), 0)
diff --git a/third_party/python/pip/pip/_vendor/tomli/_types.py b/third_party/python/pip/pip/_vendor/tomli/_types.py
new file mode 100644
index 0000000000..d949412e03
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/tomli/_types.py
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: MIT
+# SPDX-FileCopyrightText: 2021 Taneli Hukkinen
+# Licensed to PSF under a Contributor Agreement.
+
+from typing import Any, Callable, Tuple
+
+# Type annotations
+ParseFloat = Callable[[str], Any]
+Key = Tuple[str, ...]
+Pos = int
diff --git a/third_party/python/pip/pip/_vendor/typing_extensions.py b/third_party/python/pip/pip/_vendor/typing_extensions.py
new file mode 100644
index 0000000000..34199c2a98
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/typing_extensions.py
@@ -0,0 +1,2209 @@
+import abc
+import collections
+import collections.abc
+import functools
+import operator
+import sys
+import types as _types
+import typing
+
+
+__all__ = [
+ # Super-special typing primitives.
+ 'Any',
+ 'ClassVar',
+ 'Concatenate',
+ 'Final',
+ 'LiteralString',
+ 'ParamSpec',
+ 'ParamSpecArgs',
+ 'ParamSpecKwargs',
+ 'Self',
+ 'Type',
+ 'TypeVar',
+ 'TypeVarTuple',
+ 'Unpack',
+
+ # ABCs (from collections.abc).
+ 'Awaitable',
+ 'AsyncIterator',
+ 'AsyncIterable',
+ 'Coroutine',
+ 'AsyncGenerator',
+ 'AsyncContextManager',
+ 'ChainMap',
+
+ # Concrete collection types.
+ 'ContextManager',
+ 'Counter',
+ 'Deque',
+ 'DefaultDict',
+ 'NamedTuple',
+ 'OrderedDict',
+ 'TypedDict',
+
+ # Structural checks, a.k.a. protocols.
+ 'SupportsIndex',
+
+ # One-off things.
+ 'Annotated',
+ 'assert_never',
+ 'assert_type',
+ 'clear_overloads',
+ 'dataclass_transform',
+ 'get_overloads',
+ 'final',
+ 'get_args',
+ 'get_origin',
+ 'get_type_hints',
+ 'IntVar',
+ 'is_typeddict',
+ 'Literal',
+ 'NewType',
+ 'overload',
+ 'override',
+ 'Protocol',
+ 'reveal_type',
+ 'runtime',
+ 'runtime_checkable',
+ 'Text',
+ 'TypeAlias',
+ 'TypeGuard',
+ 'TYPE_CHECKING',
+ 'Never',
+ 'NoReturn',
+ 'Required',
+ 'NotRequired',
+]
+
+# for backward compatibility
+PEP_560 = True
+GenericMeta = type
+
+# The functions below are modified copies of typing internal helpers.
+# They are needed by _ProtocolMeta and they provide support for PEP 646.
+
+_marker = object()
+
+
+def _check_generic(cls, parameters, elen=_marker):
+ """Check correct count for parameters of a generic cls (internal helper).
+ This gives a nice error message in case of count mismatch.
+ """
+ if not elen:
+ raise TypeError(f"{cls} is not a generic class")
+ if elen is _marker:
+ if not hasattr(cls, "__parameters__") or not cls.__parameters__:
+ raise TypeError(f"{cls} is not a generic class")
+ elen = len(cls.__parameters__)
+ alen = len(parameters)
+ if alen != elen:
+ if hasattr(cls, "__parameters__"):
+ parameters = [p for p in cls.__parameters__ if not _is_unpack(p)]
+ num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters)
+ if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples):
+ return
+ raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};"
+ f" actual {alen}, expected {elen}")
+
+
+if sys.version_info >= (3, 10):
+ def _should_collect_from_parameters(t):
+ return isinstance(
+ t, (typing._GenericAlias, _types.GenericAlias, _types.UnionType)
+ )
+elif sys.version_info >= (3, 9):
+ def _should_collect_from_parameters(t):
+ return isinstance(t, (typing._GenericAlias, _types.GenericAlias))
+else:
+ def _should_collect_from_parameters(t):
+ return isinstance(t, typing._GenericAlias) and not t._special
+
+
+def _collect_type_vars(types, typevar_types=None):
+ """Collect all type variable contained in types in order of
+ first appearance (lexicographic order). For example::
+
+ _collect_type_vars((T, List[S, T])) == (T, S)
+ """
+ if typevar_types is None:
+ typevar_types = typing.TypeVar
+ tvars = []
+ for t in types:
+ if (
+ isinstance(t, typevar_types) and
+ t not in tvars and
+ not _is_unpack(t)
+ ):
+ tvars.append(t)
+ if _should_collect_from_parameters(t):
+ tvars.extend([t for t in t.__parameters__ if t not in tvars])
+ return tuple(tvars)
+
+
+NoReturn = typing.NoReturn
+
+# Some unconstrained type variables. These are used by the container types.
+# (These are not for export.)
+T = typing.TypeVar('T') # Any type.
+KT = typing.TypeVar('KT') # Key type.
+VT = typing.TypeVar('VT') # Value type.
+T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
+T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
+
+
+if sys.version_info >= (3, 11):
+ from typing import Any
+else:
+
+ class _AnyMeta(type):
+ def __instancecheck__(self, obj):
+ if self is Any:
+ raise TypeError("typing_extensions.Any cannot be used with isinstance()")
+ return super().__instancecheck__(obj)
+
+ def __repr__(self):
+ if self is Any:
+ return "typing_extensions.Any"
+ return super().__repr__()
+
+ class Any(metaclass=_AnyMeta):
+ """Special type indicating an unconstrained type.
+ - Any is compatible with every type.
+ - Any assumed to have all methods.
+ - All values assumed to be instances of Any.
+ Note that all the above statements are true from the point of view of
+ static type checkers. At runtime, Any should not be used with instance
+ checks.
+ """
+ def __new__(cls, *args, **kwargs):
+ if cls is Any:
+ raise TypeError("Any cannot be instantiated")
+ return super().__new__(cls, *args, **kwargs)
+
+
+ClassVar = typing.ClassVar
+
+# On older versions of typing there is an internal class named "Final".
+# 3.8+
+if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
+ Final = typing.Final
+# 3.7
+else:
+ class _FinalForm(typing._SpecialForm, _root=True):
+
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ def __getitem__(self, parameters):
+ item = typing._type_check(parameters,
+ f'{self._name} accepts only a single type.')
+ return typing._GenericAlias(self, (item,))
+
+ Final = _FinalForm('Final',
+ doc="""A special typing construct to indicate that a name
+ cannot be re-assigned or overridden in a subclass.
+ For example:
+
+ MAX_SIZE: Final = 9000
+ MAX_SIZE += 1 # Error reported by type checker
+
+ class Connection:
+ TIMEOUT: Final[int] = 10
+ class FastConnector(Connection):
+ TIMEOUT = 1 # Error reported by type checker
+
+ There is no runtime checking of these properties.""")
+
+if sys.version_info >= (3, 11):
+ final = typing.final
+else:
+ # @final exists in 3.8+, but we backport it for all versions
+ # before 3.11 to keep support for the __final__ attribute.
+ # See https://bugs.python.org/issue46342
+ def final(f):
+ """This decorator can be used to indicate to type checkers that
+ the decorated method cannot be overridden, and decorated class
+ cannot be subclassed. For example:
+
+ class Base:
+ @final
+ def done(self) -> None:
+ ...
+ class Sub(Base):
+ def done(self) -> None: # Error reported by type checker
+ ...
+ @final
+ class Leaf:
+ ...
+ class Other(Leaf): # Error reported by type checker
+ ...
+
+ There is no runtime checking of these properties. The decorator
+ sets the ``__final__`` attribute to ``True`` on the decorated object
+ to allow runtime introspection.
+ """
+ try:
+ f.__final__ = True
+ except (AttributeError, TypeError):
+ # Skip the attribute silently if it is not writable.
+ # AttributeError happens if the object has __slots__ or a
+ # read-only property, TypeError if it's a builtin class.
+ pass
+ return f
+
+
+def IntVar(name):
+ return typing.TypeVar(name)
+
+
+# 3.8+:
+if hasattr(typing, 'Literal'):
+ Literal = typing.Literal
+# 3.7:
+else:
+ class _LiteralForm(typing._SpecialForm, _root=True):
+
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ def __getitem__(self, parameters):
+ return typing._GenericAlias(self, parameters)
+
+ Literal = _LiteralForm('Literal',
+ doc="""A type that can be used to indicate to type checkers
+ that the corresponding value has a value literally equivalent
+ to the provided parameter. For example:
+
+ var: Literal[4] = 4
+
+ The type checker understands that 'var' is literally equal to
+ the value 4 and no other value.
+
+ Literal[...] cannot be subclassed. There is no runtime
+ checking verifying that the parameter is actually a value
+ instead of a type.""")
+
+
+_overload_dummy = typing._overload_dummy # noqa
+
+
+if hasattr(typing, "get_overloads"): # 3.11+
+ overload = typing.overload
+ get_overloads = typing.get_overloads
+ clear_overloads = typing.clear_overloads
+else:
+ # {module: {qualname: {firstlineno: func}}}
+ _overload_registry = collections.defaultdict(
+ functools.partial(collections.defaultdict, dict)
+ )
+
+ def overload(func):
+ """Decorator for overloaded functions/methods.
+
+ In a stub file, place two or more stub definitions for the same
+ function in a row, each decorated with @overload. For example:
+
+ @overload
+ def utf8(value: None) -> None: ...
+ @overload
+ def utf8(value: bytes) -> bytes: ...
+ @overload
+ def utf8(value: str) -> bytes: ...
+
+ In a non-stub file (i.e. a regular .py file), do the same but
+ follow it with an implementation. The implementation should *not*
+ be decorated with @overload. For example:
+
+ @overload
+ def utf8(value: None) -> None: ...
+ @overload
+ def utf8(value: bytes) -> bytes: ...
+ @overload
+ def utf8(value: str) -> bytes: ...
+ def utf8(value):
+ # implementation goes here
+
+ The overloads for a function can be retrieved at runtime using the
+ get_overloads() function.
+ """
+ # classmethod and staticmethod
+ f = getattr(func, "__func__", func)
+ try:
+ _overload_registry[f.__module__][f.__qualname__][
+ f.__code__.co_firstlineno
+ ] = func
+ except AttributeError:
+ # Not a normal function; ignore.
+ pass
+ return _overload_dummy
+
+ def get_overloads(func):
+ """Return all defined overloads for *func* as a sequence."""
+ # classmethod and staticmethod
+ f = getattr(func, "__func__", func)
+ if f.__module__ not in _overload_registry:
+ return []
+ mod_dict = _overload_registry[f.__module__]
+ if f.__qualname__ not in mod_dict:
+ return []
+ return list(mod_dict[f.__qualname__].values())
+
+ def clear_overloads():
+ """Clear all overloads in the registry."""
+ _overload_registry.clear()
+
+
+# This is not a real generic class. Don't use outside annotations.
+Type = typing.Type
+
+# Various ABCs mimicking those in collections.abc.
+# A few are simply re-exported for completeness.
+
+
+Awaitable = typing.Awaitable
+Coroutine = typing.Coroutine
+AsyncIterable = typing.AsyncIterable
+AsyncIterator = typing.AsyncIterator
+Deque = typing.Deque
+ContextManager = typing.ContextManager
+AsyncContextManager = typing.AsyncContextManager
+DefaultDict = typing.DefaultDict
+
+# 3.7.2+
+if hasattr(typing, 'OrderedDict'):
+ OrderedDict = typing.OrderedDict
+# 3.7.0-3.7.2
+else:
+ OrderedDict = typing._alias(collections.OrderedDict, (KT, VT))
+
+Counter = typing.Counter
+ChainMap = typing.ChainMap
+AsyncGenerator = typing.AsyncGenerator
+NewType = typing.NewType
+Text = typing.Text
+TYPE_CHECKING = typing.TYPE_CHECKING
+
+
+_PROTO_WHITELIST = ['Callable', 'Awaitable',
+ 'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator',
+ 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
+ 'ContextManager', 'AsyncContextManager']
+
+
+def _get_protocol_attrs(cls):
+ attrs = set()
+ for base in cls.__mro__[:-1]: # without object
+ if base.__name__ in ('Protocol', 'Generic'):
+ continue
+ annotations = getattr(base, '__annotations__', {})
+ for attr in list(base.__dict__.keys()) + list(annotations.keys()):
+ if (not attr.startswith('_abc_') and attr not in (
+ '__abstractmethods__', '__annotations__', '__weakref__',
+ '_is_protocol', '_is_runtime_protocol', '__dict__',
+ '__args__', '__slots__',
+ '__next_in_mro__', '__parameters__', '__origin__',
+ '__orig_bases__', '__extra__', '__tree_hash__',
+ '__doc__', '__subclasshook__', '__init__', '__new__',
+ '__module__', '_MutableMapping__marker', '_gorg')):
+ attrs.add(attr)
+ return attrs
+
+
+def _is_callable_members_only(cls):
+ return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
+
+
+def _maybe_adjust_parameters(cls):
+ """Helper function used in Protocol.__init_subclass__ and _TypedDictMeta.__new__.
+
+ The contents of this function are very similar
+ to logic found in typing.Generic.__init_subclass__
+ on the CPython main branch.
+ """
+ tvars = []
+ if '__orig_bases__' in cls.__dict__:
+ tvars = typing._collect_type_vars(cls.__orig_bases__)
+ # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn].
+ # If found, tvars must be a subset of it.
+ # If not found, tvars is it.
+ # Also check for and reject plain Generic,
+ # and reject multiple Generic[...] and/or Protocol[...].
+ gvars = None
+ for base in cls.__orig_bases__:
+ if (isinstance(base, typing._GenericAlias) and
+ base.__origin__ in (typing.Generic, Protocol)):
+ # for error messages
+ the_base = base.__origin__.__name__
+ if gvars is not None:
+ raise TypeError(
+ "Cannot inherit from Generic[...]"
+ " and/or Protocol[...] multiple types.")
+ gvars = base.__parameters__
+ if gvars is None:
+ gvars = tvars
+ else:
+ tvarset = set(tvars)
+ gvarset = set(gvars)
+ if not tvarset <= gvarset:
+ s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
+ s_args = ', '.join(str(g) for g in gvars)
+ raise TypeError(f"Some type variables ({s_vars}) are"
+ f" not listed in {the_base}[{s_args}]")
+ tvars = gvars
+ cls.__parameters__ = tuple(tvars)
+
+
+# 3.8+
+if hasattr(typing, 'Protocol'):
+ Protocol = typing.Protocol
+# 3.7
+else:
+
+ def _no_init(self, *args, **kwargs):
+ if type(self)._is_protocol:
+ raise TypeError('Protocols cannot be instantiated')
+
+ class _ProtocolMeta(abc.ABCMeta): # noqa: B024
+ # This metaclass is a bit unfortunate and exists only because of the lack
+ # of __instancehook__.
+ def __instancecheck__(cls, instance):
+ # We need this method for situations where attributes are
+ # assigned in __init__.
+ if ((not getattr(cls, '_is_protocol', False) or
+ _is_callable_members_only(cls)) and
+ issubclass(instance.__class__, cls)):
+ return True
+ if cls._is_protocol:
+ if all(hasattr(instance, attr) and
+ (not callable(getattr(cls, attr, None)) or
+ getattr(instance, attr) is not None)
+ for attr in _get_protocol_attrs(cls)):
+ return True
+ return super().__instancecheck__(instance)
+
+ class Protocol(metaclass=_ProtocolMeta):
+ # There is quite a lot of overlapping code with typing.Generic.
+ # Unfortunately it is hard to avoid this while these live in two different
+ # modules. The duplicated code will be removed when Protocol is moved to typing.
+ """Base class for protocol classes. Protocol classes are defined as::
+
+ class Proto(Protocol):
+ def meth(self) -> int:
+ ...
+
+ Such classes are primarily used with static type checkers that recognize
+ structural subtyping (static duck-typing), for example::
+
+ class C:
+ def meth(self) -> int:
+ return 0
+
+ def func(x: Proto) -> int:
+ return x.meth()
+
+ func(C()) # Passes static type check
+
+ See PEP 544 for details. Protocol classes decorated with
+ @typing_extensions.runtime act as simple-minded runtime protocol that checks
+ only the presence of given attributes, ignoring their type signatures.
+
+ Protocol classes can be generic, they are defined as::
+
+ class GenProto(Protocol[T]):
+ def meth(self) -> T:
+ ...
+ """
+ __slots__ = ()
+ _is_protocol = True
+
+ def __new__(cls, *args, **kwds):
+ if cls is Protocol:
+ raise TypeError("Type Protocol cannot be instantiated; "
+ "it can only be used as a base class")
+ return super().__new__(cls)
+
+ @typing._tp_cache
+ def __class_getitem__(cls, params):
+ if not isinstance(params, tuple):
+ params = (params,)
+ if not params and cls is not typing.Tuple:
+ raise TypeError(
+ f"Parameter list to {cls.__qualname__}[...] cannot be empty")
+ msg = "Parameters to generic types must be types."
+ params = tuple(typing._type_check(p, msg) for p in params) # noqa
+ if cls is Protocol:
+ # Generic can only be subscripted with unique type variables.
+ if not all(isinstance(p, typing.TypeVar) for p in params):
+ i = 0
+ while isinstance(params[i], typing.TypeVar):
+ i += 1
+ raise TypeError(
+ "Parameters to Protocol[...] must all be type variables."
+ f" Parameter {i + 1} is {params[i]}")
+ if len(set(params)) != len(params):
+ raise TypeError(
+ "Parameters to Protocol[...] must all be unique")
+ else:
+ # Subscripting a regular Generic subclass.
+ _check_generic(cls, params, len(cls.__parameters__))
+ return typing._GenericAlias(cls, params)
+
+ def __init_subclass__(cls, *args, **kwargs):
+ if '__orig_bases__' in cls.__dict__:
+ error = typing.Generic in cls.__orig_bases__
+ else:
+ error = typing.Generic in cls.__bases__
+ if error:
+ raise TypeError("Cannot inherit from plain Generic")
+ _maybe_adjust_parameters(cls)
+
+ # Determine if this is a protocol or a concrete subclass.
+ if not cls.__dict__.get('_is_protocol', None):
+ cls._is_protocol = any(b is Protocol for b in cls.__bases__)
+
+ # Set (or override) the protocol subclass hook.
+ def _proto_hook(other):
+ if not cls.__dict__.get('_is_protocol', None):
+ return NotImplemented
+ if not getattr(cls, '_is_runtime_protocol', False):
+ if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
+ return NotImplemented
+ raise TypeError("Instance and class checks can only be used with"
+ " @runtime protocols")
+ if not _is_callable_members_only(cls):
+ if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
+ return NotImplemented
+ raise TypeError("Protocols with non-method members"
+ " don't support issubclass()")
+ if not isinstance(other, type):
+ # Same error as for issubclass(1, int)
+ raise TypeError('issubclass() arg 1 must be a class')
+ for attr in _get_protocol_attrs(cls):
+ for base in other.__mro__:
+ if attr in base.__dict__:
+ if base.__dict__[attr] is None:
+ return NotImplemented
+ break
+ annotations = getattr(base, '__annotations__', {})
+ if (isinstance(annotations, typing.Mapping) and
+ attr in annotations and
+ isinstance(other, _ProtocolMeta) and
+ other._is_protocol):
+ break
+ else:
+ return NotImplemented
+ return True
+ if '__subclasshook__' not in cls.__dict__:
+ cls.__subclasshook__ = _proto_hook
+
+ # We have nothing more to do for non-protocols.
+ if not cls._is_protocol:
+ return
+
+ # Check consistency of bases.
+ for base in cls.__bases__:
+ if not (base in (object, typing.Generic) or
+ base.__module__ == 'collections.abc' and
+ base.__name__ in _PROTO_WHITELIST or
+ isinstance(base, _ProtocolMeta) and base._is_protocol):
+ raise TypeError('Protocols can only inherit from other'
+ f' protocols, got {repr(base)}')
+ cls.__init__ = _no_init
+
+
+# 3.8+
+if hasattr(typing, 'runtime_checkable'):
+ runtime_checkable = typing.runtime_checkable
+# 3.7
+else:
+ def runtime_checkable(cls):
+ """Mark a protocol class as a runtime protocol, so that it
+ can be used with isinstance() and issubclass(). Raise TypeError
+ if applied to a non-protocol class.
+
+ This allows a simple-minded structural check very similar to the
+ one-offs in collections.abc such as Hashable.
+ """
+ if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
+ raise TypeError('@runtime_checkable can be only applied to protocol classes,'
+ f' got {cls!r}')
+ cls._is_runtime_protocol = True
+ return cls
+
+
+# Exists for backwards compatibility.
+runtime = runtime_checkable
+
+
+# 3.8+
+if hasattr(typing, 'SupportsIndex'):
+ SupportsIndex = typing.SupportsIndex
+# 3.7
+else:
+ @runtime_checkable
+ class SupportsIndex(Protocol):
+ __slots__ = ()
+
+ @abc.abstractmethod
+ def __index__(self) -> int:
+ pass
+
+
+if hasattr(typing, "Required"):
+ # The standard library TypedDict in Python 3.8 does not store runtime information
+ # about which (if any) keys are optional. See https://bugs.python.org/issue38834
+ # The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
+ # keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
+ # The standard library TypedDict below Python 3.11 does not store runtime
+ # information about optional and required keys when using Required or NotRequired.
+ # Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11.
+ TypedDict = typing.TypedDict
+ _TypedDictMeta = typing._TypedDictMeta
+ is_typeddict = typing.is_typeddict
+else:
+ def _check_fails(cls, other):
+ try:
+ if sys._getframe(1).f_globals['__name__'] not in ['abc',
+ 'functools',
+ 'typing']:
+ # Typed dicts are only for static structural subtyping.
+ raise TypeError('TypedDict does not support instance and class checks')
+ except (AttributeError, ValueError):
+ pass
+ return False
+
+ def _dict_new(*args, **kwargs):
+ if not args:
+ raise TypeError('TypedDict.__new__(): not enough arguments')
+ _, args = args[0], args[1:] # allow the "cls" keyword be passed
+ return dict(*args, **kwargs)
+
+ _dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)'
+
+ def _typeddict_new(*args, total=True, **kwargs):
+ if not args:
+ raise TypeError('TypedDict.__new__(): not enough arguments')
+ _, args = args[0], args[1:] # allow the "cls" keyword be passed
+ if args:
+ typename, args = args[0], args[1:] # allow the "_typename" keyword be passed
+ elif '_typename' in kwargs:
+ typename = kwargs.pop('_typename')
+ import warnings
+ warnings.warn("Passing '_typename' as keyword argument is deprecated",
+ DeprecationWarning, stacklevel=2)
+ else:
+ raise TypeError("TypedDict.__new__() missing 1 required positional "
+ "argument: '_typename'")
+ if args:
+ try:
+ fields, = args # allow the "_fields" keyword be passed
+ except ValueError:
+ raise TypeError('TypedDict.__new__() takes from 2 to 3 '
+ f'positional arguments but {len(args) + 2} '
+ 'were given')
+ elif '_fields' in kwargs and len(kwargs) == 1:
+ fields = kwargs.pop('_fields')
+ import warnings
+ warnings.warn("Passing '_fields' as keyword argument is deprecated",
+ DeprecationWarning, stacklevel=2)
+ else:
+ fields = None
+
+ if fields is None:
+ fields = kwargs
+ elif kwargs:
+ raise TypeError("TypedDict takes either a dict or keyword arguments,"
+ " but not both")
+
+ ns = {'__annotations__': dict(fields)}
+ try:
+ # Setting correct module is necessary to make typed dict classes pickleable.
+ ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
+ except (AttributeError, ValueError):
+ pass
+
+ return _TypedDictMeta(typename, (), ns, total=total)
+
+ _typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,'
+ ' /, *, total=True, **kwargs)')
+
+ class _TypedDictMeta(type):
+ def __init__(cls, name, bases, ns, total=True):
+ super().__init__(name, bases, ns)
+
+ def __new__(cls, name, bases, ns, total=True):
+ # Create new typed dict class object.
+ # This method is called directly when TypedDict is subclassed,
+ # or via _typeddict_new when TypedDict is instantiated. This way
+ # TypedDict supports all three syntaxes described in its docstring.
+ # Subclasses and instances of TypedDict return actual dictionaries
+ # via _dict_new.
+ ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new
+ # Don't insert typing.Generic into __bases__ here,
+ # or Generic.__init_subclass__ will raise TypeError
+ # in the super().__new__() call.
+ # Instead, monkey-patch __bases__ onto the class after it's been created.
+ tp_dict = super().__new__(cls, name, (dict,), ns)
+
+ if any(issubclass(base, typing.Generic) for base in bases):
+ tp_dict.__bases__ = (typing.Generic, dict)
+ _maybe_adjust_parameters(tp_dict)
+
+ annotations = {}
+ own_annotations = ns.get('__annotations__', {})
+ msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
+ own_annotations = {
+ n: typing._type_check(tp, msg) for n, tp in own_annotations.items()
+ }
+ required_keys = set()
+ optional_keys = set()
+
+ for base in bases:
+ annotations.update(base.__dict__.get('__annotations__', {}))
+ required_keys.update(base.__dict__.get('__required_keys__', ()))
+ optional_keys.update(base.__dict__.get('__optional_keys__', ()))
+
+ annotations.update(own_annotations)
+ for annotation_key, annotation_type in own_annotations.items():
+ annotation_origin = get_origin(annotation_type)
+ if annotation_origin is Annotated:
+ annotation_args = get_args(annotation_type)
+ if annotation_args:
+ annotation_type = annotation_args[0]
+ annotation_origin = get_origin(annotation_type)
+
+ if annotation_origin is Required:
+ required_keys.add(annotation_key)
+ elif annotation_origin is NotRequired:
+ optional_keys.add(annotation_key)
+ elif total:
+ required_keys.add(annotation_key)
+ else:
+ optional_keys.add(annotation_key)
+
+ tp_dict.__annotations__ = annotations
+ tp_dict.__required_keys__ = frozenset(required_keys)
+ tp_dict.__optional_keys__ = frozenset(optional_keys)
+ if not hasattr(tp_dict, '__total__'):
+ tp_dict.__total__ = total
+ return tp_dict
+
+ __instancecheck__ = __subclasscheck__ = _check_fails
+
+ TypedDict = _TypedDictMeta('TypedDict', (dict,), {})
+ TypedDict.__module__ = __name__
+ TypedDict.__doc__ = \
+ """A simple typed name space. At runtime it is equivalent to a plain dict.
+
+ TypedDict creates a dictionary type that expects all of its
+ instances to have a certain set of keys, with each key
+ associated with a value of a consistent type. This expectation
+ is not checked at runtime but is only enforced by type checkers.
+ Usage::
+
+ class Point2D(TypedDict):
+ x: int
+ y: int
+ label: str
+
+ a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
+ b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
+
+ assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
+
+ The type info can be accessed via the Point2D.__annotations__ dict, and
+ the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
+ TypedDict supports two additional equivalent forms::
+
+ Point2D = TypedDict('Point2D', x=int, y=int, label=str)
+ Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
+
+ The class syntax is only supported in Python 3.6+, while two other
+ syntax forms work for Python 2.7 and 3.2+
+ """
+
+ if hasattr(typing, "_TypedDictMeta"):
+ _TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta)
+ else:
+ _TYPEDDICT_TYPES = (_TypedDictMeta,)
+
+ def is_typeddict(tp):
+ """Check if an annotation is a TypedDict class
+
+ For example::
+ class Film(TypedDict):
+ title: str
+ year: int
+
+ is_typeddict(Film) # => True
+ is_typeddict(Union[list, str]) # => False
+ """
+ return isinstance(tp, tuple(_TYPEDDICT_TYPES))
+
+
+if hasattr(typing, "assert_type"):
+ assert_type = typing.assert_type
+
+else:
+ def assert_type(__val, __typ):
+ """Assert (to the type checker) that the value is of the given type.
+
+ When the type checker encounters a call to assert_type(), it
+ emits an error if the value is not of the specified type::
+
+ def greet(name: str) -> None:
+ assert_type(name, str) # ok
+ assert_type(name, int) # type checker error
+
+ At runtime this returns the first argument unchanged and otherwise
+ does nothing.
+ """
+ return __val
+
+
+if hasattr(typing, "Required"):
+ get_type_hints = typing.get_type_hints
+else:
+ import functools
+ import types
+
+ # replaces _strip_annotations()
+ def _strip_extras(t):
+ """Strips Annotated, Required and NotRequired from a given type."""
+ if isinstance(t, _AnnotatedAlias):
+ return _strip_extras(t.__origin__)
+ if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired):
+ return _strip_extras(t.__args__[0])
+ if isinstance(t, typing._GenericAlias):
+ stripped_args = tuple(_strip_extras(a) for a in t.__args__)
+ if stripped_args == t.__args__:
+ return t
+ return t.copy_with(stripped_args)
+ if hasattr(types, "GenericAlias") and isinstance(t, types.GenericAlias):
+ stripped_args = tuple(_strip_extras(a) for a in t.__args__)
+ if stripped_args == t.__args__:
+ return t
+ return types.GenericAlias(t.__origin__, stripped_args)
+ if hasattr(types, "UnionType") and isinstance(t, types.UnionType):
+ stripped_args = tuple(_strip_extras(a) for a in t.__args__)
+ if stripped_args == t.__args__:
+ return t
+ return functools.reduce(operator.or_, stripped_args)
+
+ return t
+
+ def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
+ """Return type hints for an object.
+
+ This is often the same as obj.__annotations__, but it handles
+ forward references encoded as string literals, adds Optional[t] if a
+ default value equal to None is set and recursively replaces all
+ 'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T'
+ (unless 'include_extras=True').
+
+ The argument may be a module, class, method, or function. The annotations
+ are returned as a dictionary. For classes, annotations include also
+ inherited members.
+
+ TypeError is raised if the argument is not of a type that can contain
+ annotations, and an empty dictionary is returned if no annotations are
+ present.
+
+ BEWARE -- the behavior of globalns and localns is counterintuitive
+ (unless you are familiar with how eval() and exec() work). The
+ search order is locals first, then globals.
+
+ - If no dict arguments are passed, an attempt is made to use the
+ globals from obj (or the respective module's globals for classes),
+ and these are also used as the locals. If the object does not appear
+ to have globals, an empty dictionary is used.
+
+ - If one dict argument is passed, it is used for both globals and
+ locals.
+
+ - If two dict arguments are passed, they specify globals and
+ locals, respectively.
+ """
+ if hasattr(typing, "Annotated"):
+ hint = typing.get_type_hints(
+ obj, globalns=globalns, localns=localns, include_extras=True
+ )
+ else:
+ hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
+ if include_extras:
+ return hint
+ return {k: _strip_extras(t) for k, t in hint.items()}
+
+
+# Python 3.9+ has PEP 593 (Annotated)
+if hasattr(typing, 'Annotated'):
+ Annotated = typing.Annotated
+ # Not exported and not a public API, but needed for get_origin() and get_args()
+ # to work.
+ _AnnotatedAlias = typing._AnnotatedAlias
+# 3.7-3.8
+else:
+ class _AnnotatedAlias(typing._GenericAlias, _root=True):
+ """Runtime representation of an annotated type.
+
+ At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
+ with extra annotations. The alias behaves like a normal typing alias,
+ instantiating is the same as instantiating the underlying type, binding
+ it to types is also the same.
+ """
+ def __init__(self, origin, metadata):
+ if isinstance(origin, _AnnotatedAlias):
+ metadata = origin.__metadata__ + metadata
+ origin = origin.__origin__
+ super().__init__(origin, origin)
+ self.__metadata__ = metadata
+
+ def copy_with(self, params):
+ assert len(params) == 1
+ new_type = params[0]
+ return _AnnotatedAlias(new_type, self.__metadata__)
+
+ def __repr__(self):
+ return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, "
+ f"{', '.join(repr(a) for a in self.__metadata__)}]")
+
+ def __reduce__(self):
+ return operator.getitem, (
+ Annotated, (self.__origin__,) + self.__metadata__
+ )
+
+ def __eq__(self, other):
+ if not isinstance(other, _AnnotatedAlias):
+ return NotImplemented
+ if self.__origin__ != other.__origin__:
+ return False
+ return self.__metadata__ == other.__metadata__
+
+ def __hash__(self):
+ return hash((self.__origin__, self.__metadata__))
+
+ class Annotated:
+ """Add context specific metadata to a type.
+
+ Example: Annotated[int, runtime_check.Unsigned] indicates to the
+ hypothetical runtime_check module that this type is an unsigned int.
+ Every other consumer of this type can ignore this metadata and treat
+ this type as int.
+
+ The first argument to Annotated must be a valid type (and will be in
+ the __origin__ field), the remaining arguments are kept as a tuple in
+ the __extra__ field.
+
+ Details:
+
+ - It's an error to call `Annotated` with less than two arguments.
+ - Nested Annotated are flattened::
+
+ Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
+
+ - Instantiating an annotated type is equivalent to instantiating the
+ underlying type::
+
+ Annotated[C, Ann1](5) == C(5)
+
+ - Annotated can be used as a generic type alias::
+
+ Optimized = Annotated[T, runtime.Optimize()]
+ Optimized[int] == Annotated[int, runtime.Optimize()]
+
+ OptimizedList = Annotated[List[T], runtime.Optimize()]
+ OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
+ """
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwargs):
+ raise TypeError("Type Annotated cannot be instantiated.")
+
+ @typing._tp_cache
+ def __class_getitem__(cls, params):
+ if not isinstance(params, tuple) or len(params) < 2:
+ raise TypeError("Annotated[...] should be used "
+ "with at least two arguments (a type and an "
+ "annotation).")
+ allowed_special_forms = (ClassVar, Final)
+ if get_origin(params[0]) in allowed_special_forms:
+ origin = params[0]
+ else:
+ msg = "Annotated[t, ...]: t must be a type."
+ origin = typing._type_check(params[0], msg)
+ metadata = tuple(params[1:])
+ return _AnnotatedAlias(origin, metadata)
+
+ def __init_subclass__(cls, *args, **kwargs):
+ raise TypeError(
+ f"Cannot subclass {cls.__module__}.Annotated"
+ )
+
+# Python 3.8 has get_origin() and get_args() but those implementations aren't
+# Annotated-aware, so we can't use those. Python 3.9's versions don't support
+# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do.
+if sys.version_info[:2] >= (3, 10):
+ get_origin = typing.get_origin
+ get_args = typing.get_args
+# 3.7-3.9
+else:
+ try:
+ # 3.9+
+ from typing import _BaseGenericAlias
+ except ImportError:
+ _BaseGenericAlias = typing._GenericAlias
+ try:
+ # 3.9+
+ from typing import GenericAlias as _typing_GenericAlias
+ except ImportError:
+ _typing_GenericAlias = typing._GenericAlias
+
+ def get_origin(tp):
+ """Get the unsubscripted version of a type.
+
+ This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
+ and Annotated. Return None for unsupported types. Examples::
+
+ get_origin(Literal[42]) is Literal
+ get_origin(int) is None
+ get_origin(ClassVar[int]) is ClassVar
+ get_origin(Generic) is Generic
+ get_origin(Generic[T]) is Generic
+ get_origin(Union[T, int]) is Union
+ get_origin(List[Tuple[T, T]][int]) == list
+ get_origin(P.args) is P
+ """
+ if isinstance(tp, _AnnotatedAlias):
+ return Annotated
+ if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias,
+ ParamSpecArgs, ParamSpecKwargs)):
+ return tp.__origin__
+ if tp is typing.Generic:
+ return typing.Generic
+ return None
+
+ def get_args(tp):
+ """Get type arguments with all substitutions performed.
+
+ For unions, basic simplifications used by Union constructor are performed.
+ Examples::
+ get_args(Dict[str, int]) == (str, int)
+ get_args(int) == ()
+ get_args(Union[int, Union[T, int], str][int]) == (int, str)
+ get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
+ get_args(Callable[[], T][int]) == ([], int)
+ """
+ if isinstance(tp, _AnnotatedAlias):
+ return (tp.__origin__,) + tp.__metadata__
+ if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)):
+ if getattr(tp, "_special", False):
+ return ()
+ res = tp.__args__
+ if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
+ res = (list(res[:-1]), res[-1])
+ return res
+ return ()
+
+
+# 3.10+
+if hasattr(typing, 'TypeAlias'):
+ TypeAlias = typing.TypeAlias
+# 3.9
+elif sys.version_info[:2] >= (3, 9):
+ class _TypeAliasForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ @_TypeAliasForm
+ def TypeAlias(self, parameters):
+ """Special marker indicating that an assignment should
+ be recognized as a proper type alias definition by type
+ checkers.
+
+ For example::
+
+ Predicate: TypeAlias = Callable[..., bool]
+
+ It's invalid when used anywhere except as in the example above.
+ """
+ raise TypeError(f"{self} is not subscriptable")
+# 3.7-3.8
+else:
+ class _TypeAliasForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ TypeAlias = _TypeAliasForm('TypeAlias',
+ doc="""Special marker indicating that an assignment should
+ be recognized as a proper type alias definition by type
+ checkers.
+
+ For example::
+
+ Predicate: TypeAlias = Callable[..., bool]
+
+ It's invalid when used anywhere except as in the example
+ above.""")
+
+
+class _DefaultMixin:
+ """Mixin for TypeVarLike defaults."""
+
+ __slots__ = ()
+
+ def __init__(self, default):
+ if isinstance(default, (tuple, list)):
+ self.__default__ = tuple((typing._type_check(d, "Default must be a type")
+ for d in default))
+ elif default:
+ self.__default__ = typing._type_check(default, "Default must be a type")
+ else:
+ self.__default__ = None
+
+
+# Add default and infer_variance parameters from PEP 696 and 695
+class TypeVar(typing.TypeVar, _DefaultMixin, _root=True):
+ """Type variable."""
+
+ __module__ = 'typing'
+
+ def __init__(self, name, *constraints, bound=None,
+ covariant=False, contravariant=False,
+ default=None, infer_variance=False):
+ super().__init__(name, *constraints, bound=bound, covariant=covariant,
+ contravariant=contravariant)
+ _DefaultMixin.__init__(self, default)
+ self.__infer_variance__ = infer_variance
+
+ # for pickling:
+ try:
+ def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
+ except (AttributeError, ValueError):
+ def_mod = None
+ if def_mod != 'typing_extensions':
+ self.__module__ = def_mod
+
+
+# Python 3.10+ has PEP 612
+if hasattr(typing, 'ParamSpecArgs'):
+ ParamSpecArgs = typing.ParamSpecArgs
+ ParamSpecKwargs = typing.ParamSpecKwargs
+# 3.7-3.9
+else:
+ class _Immutable:
+ """Mixin to indicate that object should not be copied."""
+ __slots__ = ()
+
+ def __copy__(self):
+ return self
+
+ def __deepcopy__(self, memo):
+ return self
+
+ class ParamSpecArgs(_Immutable):
+ """The args for a ParamSpec object.
+
+ Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
+
+ ParamSpecArgs objects have a reference back to their ParamSpec:
+
+ P.args.__origin__ is P
+
+ This type is meant for runtime introspection and has no special meaning to
+ static type checkers.
+ """
+ def __init__(self, origin):
+ self.__origin__ = origin
+
+ def __repr__(self):
+ return f"{self.__origin__.__name__}.args"
+
+ def __eq__(self, other):
+ if not isinstance(other, ParamSpecArgs):
+ return NotImplemented
+ return self.__origin__ == other.__origin__
+
+ class ParamSpecKwargs(_Immutable):
+ """The kwargs for a ParamSpec object.
+
+ Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
+
+ ParamSpecKwargs objects have a reference back to their ParamSpec:
+
+ P.kwargs.__origin__ is P
+
+ This type is meant for runtime introspection and has no special meaning to
+ static type checkers.
+ """
+ def __init__(self, origin):
+ self.__origin__ = origin
+
+ def __repr__(self):
+ return f"{self.__origin__.__name__}.kwargs"
+
+ def __eq__(self, other):
+ if not isinstance(other, ParamSpecKwargs):
+ return NotImplemented
+ return self.__origin__ == other.__origin__
+
+# 3.10+
+if hasattr(typing, 'ParamSpec'):
+
+ # Add default Parameter - PEP 696
+ class ParamSpec(typing.ParamSpec, _DefaultMixin, _root=True):
+ """Parameter specification variable."""
+
+ __module__ = 'typing'
+
+ def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
+ default=None):
+ super().__init__(name, bound=bound, covariant=covariant,
+ contravariant=contravariant)
+ _DefaultMixin.__init__(self, default)
+
+ # for pickling:
+ try:
+ def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
+ except (AttributeError, ValueError):
+ def_mod = None
+ if def_mod != 'typing_extensions':
+ self.__module__ = def_mod
+
+# 3.7-3.9
+else:
+
+ # Inherits from list as a workaround for Callable checks in Python < 3.9.2.
+ class ParamSpec(list, _DefaultMixin):
+ """Parameter specification variable.
+
+ Usage::
+
+ P = ParamSpec('P')
+
+ Parameter specification variables exist primarily for the benefit of static
+ type checkers. They are used to forward the parameter types of one
+ callable to another callable, a pattern commonly found in higher order
+ functions and decorators. They are only valid when used in ``Concatenate``,
+ or s the first argument to ``Callable``. In Python 3.10 and higher,
+ they are also supported in user-defined Generics at runtime.
+ See class Generic for more information on generic types. An
+ example for annotating a decorator::
+
+ T = TypeVar('T')
+ P = ParamSpec('P')
+
+ def add_logging(f: Callable[P, T]) -> Callable[P, T]:
+ '''A type-safe decorator to add logging to a function.'''
+ def inner(*args: P.args, **kwargs: P.kwargs) -> T:
+ logging.info(f'{f.__name__} was called')
+ return f(*args, **kwargs)
+ return inner
+
+ @add_logging
+ def add_two(x: float, y: float) -> float:
+ '''Add two numbers together.'''
+ return x + y
+
+ Parameter specification variables defined with covariant=True or
+ contravariant=True can be used to declare covariant or contravariant
+ generic types. These keyword arguments are valid, but their actual semantics
+ are yet to be decided. See PEP 612 for details.
+
+ Parameter specification variables can be introspected. e.g.:
+
+ P.__name__ == 'T'
+ P.__bound__ == None
+ P.__covariant__ == False
+ P.__contravariant__ == False
+
+ Note that only parameter specification variables defined in global scope can
+ be pickled.
+ """
+
+ # Trick Generic __parameters__.
+ __class__ = typing.TypeVar
+
+ @property
+ def args(self):
+ return ParamSpecArgs(self)
+
+ @property
+ def kwargs(self):
+ return ParamSpecKwargs(self)
+
+ def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
+ default=None):
+ super().__init__([self])
+ self.__name__ = name
+ self.__covariant__ = bool(covariant)
+ self.__contravariant__ = bool(contravariant)
+ if bound:
+ self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
+ else:
+ self.__bound__ = None
+ _DefaultMixin.__init__(self, default)
+
+ # for pickling:
+ try:
+ def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
+ except (AttributeError, ValueError):
+ def_mod = None
+ if def_mod != 'typing_extensions':
+ self.__module__ = def_mod
+
+ def __repr__(self):
+ if self.__covariant__:
+ prefix = '+'
+ elif self.__contravariant__:
+ prefix = '-'
+ else:
+ prefix = '~'
+ return prefix + self.__name__
+
+ def __hash__(self):
+ return object.__hash__(self)
+
+ def __eq__(self, other):
+ return self is other
+
+ def __reduce__(self):
+ return self.__name__
+
+ # Hack to get typing._type_check to pass.
+ def __call__(self, *args, **kwargs):
+ pass
+
+
+# 3.7-3.9
+if not hasattr(typing, 'Concatenate'):
+ # Inherits from list as a workaround for Callable checks in Python < 3.9.2.
+ class _ConcatenateGenericAlias(list):
+
+ # Trick Generic into looking into this for __parameters__.
+ __class__ = typing._GenericAlias
+
+ # Flag in 3.8.
+ _special = False
+
+ def __init__(self, origin, args):
+ super().__init__(args)
+ self.__origin__ = origin
+ self.__args__ = args
+
+ def __repr__(self):
+ _type_repr = typing._type_repr
+ return (f'{_type_repr(self.__origin__)}'
+ f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]')
+
+ def __hash__(self):
+ return hash((self.__origin__, self.__args__))
+
+ # Hack to get typing._type_check to pass in Generic.
+ def __call__(self, *args, **kwargs):
+ pass
+
+ @property
+ def __parameters__(self):
+ return tuple(
+ tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec))
+ )
+
+
+# 3.7-3.9
+@typing._tp_cache
+def _concatenate_getitem(self, parameters):
+ if parameters == ():
+ raise TypeError("Cannot take a Concatenate of no types.")
+ if not isinstance(parameters, tuple):
+ parameters = (parameters,)
+ if not isinstance(parameters[-1], ParamSpec):
+ raise TypeError("The last parameter to Concatenate should be a "
+ "ParamSpec variable.")
+ msg = "Concatenate[arg, ...]: each arg must be a type."
+ parameters = tuple(typing._type_check(p, msg) for p in parameters)
+ return _ConcatenateGenericAlias(self, parameters)
+
+
+# 3.10+
+if hasattr(typing, 'Concatenate'):
+ Concatenate = typing.Concatenate
+ _ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa
+# 3.9
+elif sys.version_info[:2] >= (3, 9):
+ @_TypeAliasForm
+ def Concatenate(self, parameters):
+ """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
+ higher order function which adds, removes or transforms parameters of a
+ callable.
+
+ For example::
+
+ Callable[Concatenate[int, P], int]
+
+ See PEP 612 for detailed information.
+ """
+ return _concatenate_getitem(self, parameters)
+# 3.7-8
+else:
+ class _ConcatenateForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ def __getitem__(self, parameters):
+ return _concatenate_getitem(self, parameters)
+
+ Concatenate = _ConcatenateForm(
+ 'Concatenate',
+ doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
+ higher order function which adds, removes or transforms parameters of a
+ callable.
+
+ For example::
+
+ Callable[Concatenate[int, P], int]
+
+ See PEP 612 for detailed information.
+ """)
+
+# 3.10+
+if hasattr(typing, 'TypeGuard'):
+ TypeGuard = typing.TypeGuard
+# 3.9
+elif sys.version_info[:2] >= (3, 9):
+ class _TypeGuardForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ @_TypeGuardForm
+ def TypeGuard(self, parameters):
+ """Special typing form used to annotate the return type of a user-defined
+ type guard function. ``TypeGuard`` only accepts a single type argument.
+ At runtime, functions marked this way should return a boolean.
+
+ ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
+ type checkers to determine a more precise type of an expression within a
+ program's code flow. Usually type narrowing is done by analyzing
+ conditional code flow and applying the narrowing to a block of code. The
+ conditional expression here is sometimes referred to as a "type guard".
+
+ Sometimes it would be convenient to use a user-defined boolean function
+ as a type guard. Such a function should use ``TypeGuard[...]`` as its
+ return type to alert static type checkers to this intention.
+
+ Using ``-> TypeGuard`` tells the static type checker that for a given
+ function:
+
+ 1. The return value is a boolean.
+ 2. If the return value is ``True``, the type of its argument
+ is the type inside ``TypeGuard``.
+
+ For example::
+
+ def is_str(val: Union[str, float]):
+ # "isinstance" type guard
+ if isinstance(val, str):
+ # Type of ``val`` is narrowed to ``str``
+ ...
+ else:
+ # Else, type of ``val`` is narrowed to ``float``.
+ ...
+
+ Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
+ form of ``TypeA`` (it can even be a wider form) and this may lead to
+ type-unsafe results. The main reason is to allow for things like
+ narrowing ``List[object]`` to ``List[str]`` even though the latter is not
+ a subtype of the former, since ``List`` is invariant. The responsibility of
+ writing type-safe type guards is left to the user.
+
+ ``TypeGuard`` also works with type variables. For more information, see
+ PEP 647 (User-Defined Type Guards).
+ """
+ item = typing._type_check(parameters, f'{self} accepts only a single type.')
+ return typing._GenericAlias(self, (item,))
+# 3.7-3.8
+else:
+ class _TypeGuardForm(typing._SpecialForm, _root=True):
+
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ def __getitem__(self, parameters):
+ item = typing._type_check(parameters,
+ f'{self._name} accepts only a single type')
+ return typing._GenericAlias(self, (item,))
+
+ TypeGuard = _TypeGuardForm(
+ 'TypeGuard',
+ doc="""Special typing form used to annotate the return type of a user-defined
+ type guard function. ``TypeGuard`` only accepts a single type argument.
+ At runtime, functions marked this way should return a boolean.
+
+ ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
+ type checkers to determine a more precise type of an expression within a
+ program's code flow. Usually type narrowing is done by analyzing
+ conditional code flow and applying the narrowing to a block of code. The
+ conditional expression here is sometimes referred to as a "type guard".
+
+ Sometimes it would be convenient to use a user-defined boolean function
+ as a type guard. Such a function should use ``TypeGuard[...]`` as its
+ return type to alert static type checkers to this intention.
+
+ Using ``-> TypeGuard`` tells the static type checker that for a given
+ function:
+
+ 1. The return value is a boolean.
+ 2. If the return value is ``True``, the type of its argument
+ is the type inside ``TypeGuard``.
+
+ For example::
+
+ def is_str(val: Union[str, float]):
+ # "isinstance" type guard
+ if isinstance(val, str):
+ # Type of ``val`` is narrowed to ``str``
+ ...
+ else:
+ # Else, type of ``val`` is narrowed to ``float``.
+ ...
+
+ Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
+ form of ``TypeA`` (it can even be a wider form) and this may lead to
+ type-unsafe results. The main reason is to allow for things like
+ narrowing ``List[object]`` to ``List[str]`` even though the latter is not
+ a subtype of the former, since ``List`` is invariant. The responsibility of
+ writing type-safe type guards is left to the user.
+
+ ``TypeGuard`` also works with type variables. For more information, see
+ PEP 647 (User-Defined Type Guards).
+ """)
+
+
+# Vendored from cpython typing._SpecialFrom
+class _SpecialForm(typing._Final, _root=True):
+ __slots__ = ('_name', '__doc__', '_getitem')
+
+ def __init__(self, getitem):
+ self._getitem = getitem
+ self._name = getitem.__name__
+ self.__doc__ = getitem.__doc__
+
+ def __getattr__(self, item):
+ if item in {'__name__', '__qualname__'}:
+ return self._name
+
+ raise AttributeError(item)
+
+ def __mro_entries__(self, bases):
+ raise TypeError(f"Cannot subclass {self!r}")
+
+ def __repr__(self):
+ return f'typing_extensions.{self._name}'
+
+ def __reduce__(self):
+ return self._name
+
+ def __call__(self, *args, **kwds):
+ raise TypeError(f"Cannot instantiate {self!r}")
+
+ def __or__(self, other):
+ return typing.Union[self, other]
+
+ def __ror__(self, other):
+ return typing.Union[other, self]
+
+ def __instancecheck__(self, obj):
+ raise TypeError(f"{self} cannot be used with isinstance()")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError(f"{self} cannot be used with issubclass()")
+
+ @typing._tp_cache
+ def __getitem__(self, parameters):
+ return self._getitem(self, parameters)
+
+
+if hasattr(typing, "LiteralString"):
+ LiteralString = typing.LiteralString
+else:
+ @_SpecialForm
+ def LiteralString(self, params):
+ """Represents an arbitrary literal string.
+
+ Example::
+
+ from pip._vendor.typing_extensions import LiteralString
+
+ def query(sql: LiteralString) -> ...:
+ ...
+
+ query("SELECT * FROM table") # ok
+ query(f"SELECT * FROM {input()}") # not ok
+
+ See PEP 675 for details.
+
+ """
+ raise TypeError(f"{self} is not subscriptable")
+
+
+if hasattr(typing, "Self"):
+ Self = typing.Self
+else:
+ @_SpecialForm
+ def Self(self, params):
+ """Used to spell the type of "self" in classes.
+
+ Example::
+
+ from typing import Self
+
+ class ReturnsSelf:
+ def parse(self, data: bytes) -> Self:
+ ...
+ return self
+
+ """
+
+ raise TypeError(f"{self} is not subscriptable")
+
+
+if hasattr(typing, "Never"):
+ Never = typing.Never
+else:
+ @_SpecialForm
+ def Never(self, params):
+ """The bottom type, a type that has no members.
+
+ This can be used to define a function that should never be
+ called, or a function that never returns::
+
+ from pip._vendor.typing_extensions import Never
+
+ def never_call_me(arg: Never) -> None:
+ pass
+
+ def int_or_str(arg: int | str) -> None:
+ never_call_me(arg) # type checker error
+ match arg:
+ case int():
+ print("It's an int")
+ case str():
+ print("It's a str")
+ case _:
+ never_call_me(arg) # ok, arg is of type Never
+
+ """
+
+ raise TypeError(f"{self} is not subscriptable")
+
+
+if hasattr(typing, 'Required'):
+ Required = typing.Required
+ NotRequired = typing.NotRequired
+elif sys.version_info[:2] >= (3, 9):
+ class _ExtensionsSpecialForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ @_ExtensionsSpecialForm
+ def Required(self, parameters):
+ """A special typing construct to mark a key of a total=False TypedDict
+ as required. For example:
+
+ class Movie(TypedDict, total=False):
+ title: Required[str]
+ year: int
+
+ m = Movie(
+ title='The Matrix', # typechecker error if key is omitted
+ year=1999,
+ )
+
+ There is no runtime checking that a required key is actually provided
+ when instantiating a related TypedDict.
+ """
+ item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
+ return typing._GenericAlias(self, (item,))
+
+ @_ExtensionsSpecialForm
+ def NotRequired(self, parameters):
+ """A special typing construct to mark a key of a TypedDict as
+ potentially missing. For example:
+
+ class Movie(TypedDict):
+ title: str
+ year: NotRequired[int]
+
+ m = Movie(
+ title='The Matrix', # typechecker error if key is omitted
+ year=1999,
+ )
+ """
+ item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
+ return typing._GenericAlias(self, (item,))
+
+else:
+ class _RequiredForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ def __getitem__(self, parameters):
+ item = typing._type_check(parameters,
+ f'{self._name} accepts only a single type.')
+ return typing._GenericAlias(self, (item,))
+
+ Required = _RequiredForm(
+ 'Required',
+ doc="""A special typing construct to mark a key of a total=False TypedDict
+ as required. For example:
+
+ class Movie(TypedDict, total=False):
+ title: Required[str]
+ year: int
+
+ m = Movie(
+ title='The Matrix', # typechecker error if key is omitted
+ year=1999,
+ )
+
+ There is no runtime checking that a required key is actually provided
+ when instantiating a related TypedDict.
+ """)
+ NotRequired = _RequiredForm(
+ 'NotRequired',
+ doc="""A special typing construct to mark a key of a TypedDict as
+ potentially missing. For example:
+
+ class Movie(TypedDict):
+ title: str
+ year: NotRequired[int]
+
+ m = Movie(
+ title='The Matrix', # typechecker error if key is omitted
+ year=1999,
+ )
+ """)
+
+
+if hasattr(typing, "Unpack"): # 3.11+
+ Unpack = typing.Unpack
+elif sys.version_info[:2] >= (3, 9):
+ class _UnpackSpecialForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ class _UnpackAlias(typing._GenericAlias, _root=True):
+ __class__ = typing.TypeVar
+
+ @_UnpackSpecialForm
+ def Unpack(self, parameters):
+ """A special typing construct to unpack a variadic type. For example:
+
+ Shape = TypeVarTuple('Shape')
+ Batch = NewType('Batch', int)
+
+ def add_batch_axis(
+ x: Array[Unpack[Shape]]
+ ) -> Array[Batch, Unpack[Shape]]: ...
+
+ """
+ item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
+ return _UnpackAlias(self, (item,))
+
+ def _is_unpack(obj):
+ return isinstance(obj, _UnpackAlias)
+
+else:
+ class _UnpackAlias(typing._GenericAlias, _root=True):
+ __class__ = typing.TypeVar
+
+ class _UnpackForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ def __getitem__(self, parameters):
+ item = typing._type_check(parameters,
+ f'{self._name} accepts only a single type.')
+ return _UnpackAlias(self, (item,))
+
+ Unpack = _UnpackForm(
+ 'Unpack',
+ doc="""A special typing construct to unpack a variadic type. For example:
+
+ Shape = TypeVarTuple('Shape')
+ Batch = NewType('Batch', int)
+
+ def add_batch_axis(
+ x: Array[Unpack[Shape]]
+ ) -> Array[Batch, Unpack[Shape]]: ...
+
+ """)
+
+ def _is_unpack(obj):
+ return isinstance(obj, _UnpackAlias)
+
+
+if hasattr(typing, "TypeVarTuple"): # 3.11+
+
+ # Add default Parameter - PEP 696
+ class TypeVarTuple(typing.TypeVarTuple, _DefaultMixin, _root=True):
+ """Type variable tuple."""
+
+ def __init__(self, name, *, default=None):
+ super().__init__(name)
+ _DefaultMixin.__init__(self, default)
+
+ # for pickling:
+ try:
+ def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
+ except (AttributeError, ValueError):
+ def_mod = None
+ if def_mod != 'typing_extensions':
+ self.__module__ = def_mod
+
+else:
+ class TypeVarTuple(_DefaultMixin):
+ """Type variable tuple.
+
+ Usage::
+
+ Ts = TypeVarTuple('Ts')
+
+ In the same way that a normal type variable is a stand-in for a single
+ type such as ``int``, a type variable *tuple* is a stand-in for a *tuple*
+ type such as ``Tuple[int, str]``.
+
+ Type variable tuples can be used in ``Generic`` declarations.
+ Consider the following example::
+
+ class Array(Generic[*Ts]): ...
+
+ The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``,
+ where ``T1`` and ``T2`` are type variables. To use these type variables
+ as type parameters of ``Array``, we must *unpack* the type variable tuple using
+ the star operator: ``*Ts``. The signature of ``Array`` then behaves
+ as if we had simply written ``class Array(Generic[T1, T2]): ...``.
+ In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows
+ us to parameterise the class with an *arbitrary* number of type parameters.
+
+ Type variable tuples can be used anywhere a normal ``TypeVar`` can.
+ This includes class definitions, as shown above, as well as function
+ signatures and variable annotations::
+
+ class Array(Generic[*Ts]):
+
+ def __init__(self, shape: Tuple[*Ts]):
+ self._shape: Tuple[*Ts] = shape
+
+ def get_shape(self) -> Tuple[*Ts]:
+ return self._shape
+
+ shape = (Height(480), Width(640))
+ x: Array[Height, Width] = Array(shape)
+ y = abs(x) # Inferred type is Array[Height, Width]
+ z = x + x # ... is Array[Height, Width]
+ x.get_shape() # ... is tuple[Height, Width]
+
+ """
+
+ # Trick Generic __parameters__.
+ __class__ = typing.TypeVar
+
+ def __iter__(self):
+ yield self.__unpacked__
+
+ def __init__(self, name, *, default=None):
+ self.__name__ = name
+ _DefaultMixin.__init__(self, default)
+
+ # for pickling:
+ try:
+ def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
+ except (AttributeError, ValueError):
+ def_mod = None
+ if def_mod != 'typing_extensions':
+ self.__module__ = def_mod
+
+ self.__unpacked__ = Unpack[self]
+
+ def __repr__(self):
+ return self.__name__
+
+ def __hash__(self):
+ return object.__hash__(self)
+
+ def __eq__(self, other):
+ return self is other
+
+ def __reduce__(self):
+ return self.__name__
+
+ def __init_subclass__(self, *args, **kwds):
+ if '_root' not in kwds:
+ raise TypeError("Cannot subclass special typing classes")
+
+
+if hasattr(typing, "reveal_type"):
+ reveal_type = typing.reveal_type
+else:
+ def reveal_type(__obj: T) -> T:
+ """Reveal the inferred type of a variable.
+
+ When a static type checker encounters a call to ``reveal_type()``,
+ it will emit the inferred type of the argument::
+
+ x: int = 1
+ reveal_type(x)
+
+ Running a static type checker (e.g., ``mypy``) on this example
+ will produce output similar to 'Revealed type is "builtins.int"'.
+
+ At runtime, the function prints the runtime type of the
+ argument and returns it unchanged.
+
+ """
+ print(f"Runtime type is {type(__obj).__name__!r}", file=sys.stderr)
+ return __obj
+
+
+if hasattr(typing, "assert_never"):
+ assert_never = typing.assert_never
+else:
+ def assert_never(__arg: Never) -> Never:
+ """Assert to the type checker that a line of code is unreachable.
+
+ Example::
+
+ def int_or_str(arg: int | str) -> None:
+ match arg:
+ case int():
+ print("It's an int")
+ case str():
+ print("It's a str")
+ case _:
+ assert_never(arg)
+
+ If a type checker finds that a call to assert_never() is
+ reachable, it will emit an error.
+
+ At runtime, this throws an exception when called.
+
+ """
+ raise AssertionError("Expected code to be unreachable")
+
+
+if hasattr(typing, 'dataclass_transform'):
+ dataclass_transform = typing.dataclass_transform
+else:
+ def dataclass_transform(
+ *,
+ eq_default: bool = True,
+ order_default: bool = False,
+ kw_only_default: bool = False,
+ field_specifiers: typing.Tuple[
+ typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]],
+ ...
+ ] = (),
+ **kwargs: typing.Any,
+ ) -> typing.Callable[[T], T]:
+ """Decorator that marks a function, class, or metaclass as providing
+ dataclass-like behavior.
+
+ Example:
+
+ from pip._vendor.typing_extensions import dataclass_transform
+
+ _T = TypeVar("_T")
+
+ # Used on a decorator function
+ @dataclass_transform()
+ def create_model(cls: type[_T]) -> type[_T]:
+ ...
+ return cls
+
+ @create_model
+ class CustomerModel:
+ id: int
+ name: str
+
+ # Used on a base class
+ @dataclass_transform()
+ class ModelBase: ...
+
+ class CustomerModel(ModelBase):
+ id: int
+ name: str
+
+ # Used on a metaclass
+ @dataclass_transform()
+ class ModelMeta(type): ...
+
+ class ModelBase(metaclass=ModelMeta): ...
+
+ class CustomerModel(ModelBase):
+ id: int
+ name: str
+
+ Each of the ``CustomerModel`` classes defined in this example will now
+ behave similarly to a dataclass created with the ``@dataclasses.dataclass``
+ decorator. For example, the type checker will synthesize an ``__init__``
+ method.
+
+ The arguments to this decorator can be used to customize this behavior:
+ - ``eq_default`` indicates whether the ``eq`` parameter is assumed to be
+ True or False if it is omitted by the caller.
+ - ``order_default`` indicates whether the ``order`` parameter is
+ assumed to be True or False if it is omitted by the caller.
+ - ``kw_only_default`` indicates whether the ``kw_only`` parameter is
+ assumed to be True or False if it is omitted by the caller.
+ - ``field_specifiers`` specifies a static list of supported classes
+ or functions that describe fields, similar to ``dataclasses.field()``.
+
+ At runtime, this decorator records its arguments in the
+ ``__dataclass_transform__`` attribute on the decorated object.
+
+ See PEP 681 for details.
+
+ """
+ def decorator(cls_or_fn):
+ cls_or_fn.__dataclass_transform__ = {
+ "eq_default": eq_default,
+ "order_default": order_default,
+ "kw_only_default": kw_only_default,
+ "field_specifiers": field_specifiers,
+ "kwargs": kwargs,
+ }
+ return cls_or_fn
+ return decorator
+
+
+if hasattr(typing, "override"):
+ override = typing.override
+else:
+ _F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any])
+
+ def override(__arg: _F) -> _F:
+ """Indicate that a method is intended to override a method in a base class.
+
+ Usage:
+
+ class Base:
+ def method(self) -> None: ...
+ pass
+
+ class Child(Base):
+ @override
+ def method(self) -> None:
+ super().method()
+
+ When this decorator is applied to a method, the type checker will
+ validate that it overrides a method with the same name on a base class.
+ This helps prevent bugs that may occur when a base class is changed
+ without an equivalent change to a child class.
+
+ See PEP 698 for details.
+
+ """
+ return __arg
+
+
+# We have to do some monkey patching to deal with the dual nature of
+# Unpack/TypeVarTuple:
+# - We want Unpack to be a kind of TypeVar so it gets accepted in
+# Generic[Unpack[Ts]]
+# - We want it to *not* be treated as a TypeVar for the purposes of
+# counting generic parameters, so that when we subscript a generic,
+# the runtime doesn't try to substitute the Unpack with the subscripted type.
+if not hasattr(typing, "TypeVarTuple"):
+ typing._collect_type_vars = _collect_type_vars
+ typing._check_generic = _check_generic
+
+
+# Backport typing.NamedTuple as it exists in Python 3.11.
+# In 3.11, the ability to define generic `NamedTuple`s was supported.
+# This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8.
+if sys.version_info >= (3, 11):
+ NamedTuple = typing.NamedTuple
+else:
+ def _caller():
+ try:
+ return sys._getframe(2).f_globals.get('__name__', '__main__')
+ except (AttributeError, ValueError): # For platforms without _getframe()
+ return None
+
+ def _make_nmtuple(name, types, module, defaults=()):
+ fields = [n for n, t in types]
+ annotations = {n: typing._type_check(t, f"field {n} annotation must be a type")
+ for n, t in types}
+ nm_tpl = collections.namedtuple(name, fields,
+ defaults=defaults, module=module)
+ nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations
+ # The `_field_types` attribute was removed in 3.9;
+ # in earlier versions, it is the same as the `__annotations__` attribute
+ if sys.version_info < (3, 9):
+ nm_tpl._field_types = annotations
+ return nm_tpl
+
+ _prohibited_namedtuple_fields = typing._prohibited
+ _special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'})
+
+ class _NamedTupleMeta(type):
+ def __new__(cls, typename, bases, ns):
+ assert _NamedTuple in bases
+ for base in bases:
+ if base is not _NamedTuple and base is not typing.Generic:
+ raise TypeError(
+ 'can only inherit from a NamedTuple type and Generic')
+ bases = tuple(tuple if base is _NamedTuple else base for base in bases)
+ types = ns.get('__annotations__', {})
+ default_names = []
+ for field_name in types:
+ if field_name in ns:
+ default_names.append(field_name)
+ elif default_names:
+ raise TypeError(f"Non-default namedtuple field {field_name} "
+ f"cannot follow default field"
+ f"{'s' if len(default_names) > 1 else ''} "
+ f"{', '.join(default_names)}")
+ nm_tpl = _make_nmtuple(
+ typename, types.items(),
+ defaults=[ns[n] for n in default_names],
+ module=ns['__module__']
+ )
+ nm_tpl.__bases__ = bases
+ if typing.Generic in bases:
+ class_getitem = typing.Generic.__class_getitem__.__func__
+ nm_tpl.__class_getitem__ = classmethod(class_getitem)
+ # update from user namespace without overriding special namedtuple attributes
+ for key in ns:
+ if key in _prohibited_namedtuple_fields:
+ raise AttributeError("Cannot overwrite NamedTuple attribute " + key)
+ elif key not in _special_namedtuple_fields and key not in nm_tpl._fields:
+ setattr(nm_tpl, key, ns[key])
+ if typing.Generic in bases:
+ nm_tpl.__init_subclass__()
+ return nm_tpl
+
+ def NamedTuple(__typename, __fields=None, **kwargs):
+ if __fields is None:
+ __fields = kwargs.items()
+ elif kwargs:
+ raise TypeError("Either list of fields or keywords"
+ " can be provided to NamedTuple, not both")
+ return _make_nmtuple(__typename, __fields, module=_caller())
+
+ NamedTuple.__doc__ = typing.NamedTuple.__doc__
+ _NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {})
+
+ # On 3.8+, alter the signature so that it matches typing.NamedTuple.
+ # The signature of typing.NamedTuple on >=3.8 is invalid syntax in Python 3.7,
+ # so just leave the signature as it is on 3.7.
+ if sys.version_info >= (3, 8):
+ NamedTuple.__text_signature__ = '(typename, fields=None, /, **kwargs)'
+
+ def _namedtuple_mro_entries(bases):
+ assert NamedTuple in bases
+ return (_NamedTuple,)
+
+ NamedTuple.__mro_entries__ = _namedtuple_mro_entries
diff --git a/third_party/python/pip/pip/_vendor/urllib3/__init__.py b/third_party/python/pip/pip/_vendor/urllib3/__init__.py
new file mode 100644
index 0000000000..c6fa38212f
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/__init__.py
@@ -0,0 +1,102 @@
+"""
+Python HTTP library with thread-safe connection pooling, file post support, user friendly, and more
+"""
+from __future__ import absolute_import
+
+# Set default logging handler to avoid "No handler found" warnings.
+import logging
+import warnings
+from logging import NullHandler
+
+from . import exceptions
+from ._version import __version__
+from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url
+from .filepost import encode_multipart_formdata
+from .poolmanager import PoolManager, ProxyManager, proxy_from_url
+from .response import HTTPResponse
+from .util.request import make_headers
+from .util.retry import Retry
+from .util.timeout import Timeout
+from .util.url import get_host
+
+# === NOTE TO REPACKAGERS AND VENDORS ===
+# Please delete this block, this logic is only
+# for urllib3 being distributed via PyPI.
+# See: https://github.com/urllib3/urllib3/issues/2680
+try:
+ import urllib3_secure_extra # type: ignore # noqa: F401
+except ImportError:
+ pass
+else:
+ warnings.warn(
+ "'urllib3[secure]' extra is deprecated and will be removed "
+ "in a future release of urllib3 2.x. Read more in this issue: "
+ "https://github.com/urllib3/urllib3/issues/2680",
+ category=DeprecationWarning,
+ stacklevel=2,
+ )
+
+__author__ = "Andrey Petrov (andrey.petrov@shazow.net)"
+__license__ = "MIT"
+__version__ = __version__
+
+__all__ = (
+ "HTTPConnectionPool",
+ "HTTPSConnectionPool",
+ "PoolManager",
+ "ProxyManager",
+ "HTTPResponse",
+ "Retry",
+ "Timeout",
+ "add_stderr_logger",
+ "connection_from_url",
+ "disable_warnings",
+ "encode_multipart_formdata",
+ "get_host",
+ "make_headers",
+ "proxy_from_url",
+)
+
+logging.getLogger(__name__).addHandler(NullHandler())
+
+
+def add_stderr_logger(level=logging.DEBUG):
+ """
+ Helper for quickly adding a StreamHandler to the logger. Useful for
+ debugging.
+
+ Returns the handler after adding it.
+ """
+ # This method needs to be in this __init__.py to get the __name__ correct
+ # even if urllib3 is vendored within another package.
+ logger = logging.getLogger(__name__)
+ handler = logging.StreamHandler()
+ handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s"))
+ logger.addHandler(handler)
+ logger.setLevel(level)
+ logger.debug("Added a stderr logging handler to logger: %s", __name__)
+ return handler
+
+
+# ... Clean up.
+del NullHandler
+
+
+# All warning filters *must* be appended unless you're really certain that they
+# shouldn't be: otherwise, it's very hard for users to use most Python
+# mechanisms to silence them.
+# SecurityWarning's always go off by default.
+warnings.simplefilter("always", exceptions.SecurityWarning, append=True)
+# SubjectAltNameWarning's should go off once per host
+warnings.simplefilter("default", exceptions.SubjectAltNameWarning, append=True)
+# InsecurePlatformWarning's don't vary between requests, so we keep it default.
+warnings.simplefilter("default", exceptions.InsecurePlatformWarning, append=True)
+# SNIMissingWarnings should go off only once.
+warnings.simplefilter("default", exceptions.SNIMissingWarning, append=True)
+
+
+def disable_warnings(category=exceptions.HTTPWarning):
+ """
+ Helper for quickly disabling all urllib3 warnings.
+ """
+ warnings.simplefilter("ignore", category)
diff --git a/third_party/python/pip/pip/_vendor/urllib3/_collections.py b/third_party/python/pip/pip/_vendor/urllib3/_collections.py
new file mode 100644
index 0000000000..da9857e986
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/_collections.py
@@ -0,0 +1,337 @@
+from __future__ import absolute_import
+
+try:
+ from collections.abc import Mapping, MutableMapping
+except ImportError:
+ from collections import Mapping, MutableMapping
+try:
+ from threading import RLock
+except ImportError: # Platform-specific: No threads available
+
+ class RLock:
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ pass
+
+
+from collections import OrderedDict
+
+from .exceptions import InvalidHeader
+from .packages import six
+from .packages.six import iterkeys, itervalues
+
+__all__ = ["RecentlyUsedContainer", "HTTPHeaderDict"]
+
+
+_Null = object()
+
+
+class RecentlyUsedContainer(MutableMapping):
+ """
+ Provides a thread-safe dict-like container which maintains up to
+ ``maxsize`` keys while throwing away the least-recently-used keys beyond
+ ``maxsize``.
+
+ :param maxsize:
+ Maximum number of recent elements to retain.
+
+ :param dispose_func:
+ Every time an item is evicted from the container,
+ ``dispose_func(value)`` is called. Callback which will get called
+ """
+
+ ContainerCls = OrderedDict
+
+ def __init__(self, maxsize=10, dispose_func=None):
+ self._maxsize = maxsize
+ self.dispose_func = dispose_func
+
+ self._container = self.ContainerCls()
+ self.lock = RLock()
+
+ def __getitem__(self, key):
+ # Re-insert the item, moving it to the end of the eviction line.
+ with self.lock:
+ item = self._container.pop(key)
+ self._container[key] = item
+ return item
+
+ def __setitem__(self, key, value):
+ evicted_value = _Null
+ with self.lock:
+ # Possibly evict the existing value of 'key'
+ evicted_value = self._container.get(key, _Null)
+ self._container[key] = value
+
+ # If we didn't evict an existing value, we might have to evict the
+ # least recently used item from the beginning of the container.
+ if len(self._container) > self._maxsize:
+ _key, evicted_value = self._container.popitem(last=False)
+
+ if self.dispose_func and evicted_value is not _Null:
+ self.dispose_func(evicted_value)
+
+ def __delitem__(self, key):
+ with self.lock:
+ value = self._container.pop(key)
+
+ if self.dispose_func:
+ self.dispose_func(value)
+
+ def __len__(self):
+ with self.lock:
+ return len(self._container)
+
+ def __iter__(self):
+ raise NotImplementedError(
+ "Iteration over this class is unlikely to be threadsafe."
+ )
+
+ def clear(self):
+ with self.lock:
+ # Copy pointers to all values, then wipe the mapping
+ values = list(itervalues(self._container))
+ self._container.clear()
+
+ if self.dispose_func:
+ for value in values:
+ self.dispose_func(value)
+
+ def keys(self):
+ with self.lock:
+ return list(iterkeys(self._container))
+
+
+class HTTPHeaderDict(MutableMapping):
+ """
+ :param headers:
+ An iterable of field-value pairs. Must not contain multiple field names
+ when compared case-insensitively.
+
+ :param kwargs:
+ Additional field-value pairs to pass in to ``dict.update``.
+
+ A ``dict`` like container for storing HTTP Headers.
+
+ Field names are stored and compared case-insensitively in compliance with
+ RFC 7230. Iteration provides the first case-sensitive key seen for each
+ case-insensitive pair.
+
+ Using ``__setitem__`` syntax overwrites fields that compare equal
+ case-insensitively in order to maintain ``dict``'s api. For fields that
+ compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
+ in a loop.
+
+ If multiple fields that are equal case-insensitively are passed to the
+ constructor or ``.update``, the behavior is undefined and some will be
+ lost.
+
+ >>> headers = HTTPHeaderDict()
+ >>> headers.add('Set-Cookie', 'foo=bar')
+ >>> headers.add('set-cookie', 'baz=quxx')
+ >>> headers['content-length'] = '7'
+ >>> headers['SET-cookie']
+ 'foo=bar, baz=quxx'
+ >>> headers['Content-Length']
+ '7'
+ """
+
+ def __init__(self, headers=None, **kwargs):
+ super(HTTPHeaderDict, self).__init__()
+ self._container = OrderedDict()
+ if headers is not None:
+ if isinstance(headers, HTTPHeaderDict):
+ self._copy_from(headers)
+ else:
+ self.extend(headers)
+ if kwargs:
+ self.extend(kwargs)
+
+ def __setitem__(self, key, val):
+ self._container[key.lower()] = [key, val]
+ return self._container[key.lower()]
+
+ def __getitem__(self, key):
+ val = self._container[key.lower()]
+ return ", ".join(val[1:])
+
+ def __delitem__(self, key):
+ del self._container[key.lower()]
+
+ def __contains__(self, key):
+ return key.lower() in self._container
+
+ def __eq__(self, other):
+ if not isinstance(other, Mapping) and not hasattr(other, "keys"):
+ return False
+ if not isinstance(other, type(self)):
+ other = type(self)(other)
+ return dict((k.lower(), v) for k, v in self.itermerged()) == dict(
+ (k.lower(), v) for k, v in other.itermerged()
+ )
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ if six.PY2: # Python 2
+ iterkeys = MutableMapping.iterkeys
+ itervalues = MutableMapping.itervalues
+
+ __marker = object()
+
+ def __len__(self):
+ return len(self._container)
+
+ def __iter__(self):
+ # Only provide the originally cased names
+ for vals in self._container.values():
+ yield vals[0]
+
+ def pop(self, key, default=__marker):
+ """D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+ If key is not found, d is returned if given, otherwise KeyError is raised.
+ """
+ # Using the MutableMapping function directly fails due to the private marker.
+ # Using ordinary dict.pop would expose the internal structures.
+ # So let's reinvent the wheel.
+ try:
+ value = self[key]
+ except KeyError:
+ if default is self.__marker:
+ raise
+ return default
+ else:
+ del self[key]
+ return value
+
+ def discard(self, key):
+ try:
+ del self[key]
+ except KeyError:
+ pass
+
+ def add(self, key, val):
+ """Adds a (name, value) pair, doesn't overwrite the value if it already
+ exists.
+
+ >>> headers = HTTPHeaderDict(foo='bar')
+ >>> headers.add('Foo', 'baz')
+ >>> headers['foo']
+ 'bar, baz'
+ """
+ key_lower = key.lower()
+ new_vals = [key, val]
+ # Keep the common case aka no item present as fast as possible
+ vals = self._container.setdefault(key_lower, new_vals)
+ if new_vals is not vals:
+ vals.append(val)
+
+ def extend(self, *args, **kwargs):
+ """Generic import function for any type of header-like object.
+ Adapted version of MutableMapping.update in order to insert items
+ with self.add instead of self.__setitem__
+ """
+ if len(args) > 1:
+ raise TypeError(
+ "extend() takes at most 1 positional "
+ "arguments ({0} given)".format(len(args))
+ )
+ other = args[0] if len(args) >= 1 else ()
+
+ if isinstance(other, HTTPHeaderDict):
+ for key, val in other.iteritems():
+ self.add(key, val)
+ elif isinstance(other, Mapping):
+ for key in other:
+ self.add(key, other[key])
+ elif hasattr(other, "keys"):
+ for key in other.keys():
+ self.add(key, other[key])
+ else:
+ for key, value in other:
+ self.add(key, value)
+
+ for key, value in kwargs.items():
+ self.add(key, value)
+
+ def getlist(self, key, default=__marker):
+ """Returns a list of all the values for the named field. Returns an
+ empty list if the key doesn't exist."""
+ try:
+ vals = self._container[key.lower()]
+ except KeyError:
+ if default is self.__marker:
+ return []
+ return default
+ else:
+ return vals[1:]
+
+ # Backwards compatibility for httplib
+ getheaders = getlist
+ getallmatchingheaders = getlist
+ iget = getlist
+
+ # Backwards compatibility for http.cookiejar
+ get_all = getlist
+
+ def __repr__(self):
+ return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
+
+ def _copy_from(self, other):
+ for key in other:
+ val = other.getlist(key)
+ if isinstance(val, list):
+ # Don't need to convert tuples
+ val = list(val)
+ self._container[key.lower()] = [key] + val
+
+ def copy(self):
+ clone = type(self)()
+ clone._copy_from(self)
+ return clone
+
+ def iteritems(self):
+ """Iterate over all header lines, including duplicate ones."""
+ for key in self:
+ vals = self._container[key.lower()]
+ for val in vals[1:]:
+ yield vals[0], val
+
+ def itermerged(self):
+ """Iterate over all headers, merging duplicate ones together."""
+ for key in self:
+ val = self._container[key.lower()]
+ yield val[0], ", ".join(val[1:])
+
+ def items(self):
+ return list(self.iteritems())
+
+ @classmethod
+ def from_httplib(cls, message): # Python 2
+ """Read headers from a Python 2 httplib message object."""
+ # python2.7 does not expose a proper API for exporting multiheaders
+ # efficiently. This function re-reads raw lines from the message
+ # object and extracts the multiheaders properly.
+ obs_fold_continued_leaders = (" ", "\t")
+ headers = []
+
+ for line in message.headers:
+ if line.startswith(obs_fold_continued_leaders):
+ if not headers:
+ # We received a header line that starts with OWS as described
+ # in RFC-7230 S3.2.4. This indicates a multiline header, but
+ # there exists no previous header to which we can attach it.
+ raise InvalidHeader(
+ "Header continuation with no previous header: %s" % line
+ )
+ else:
+ key, value = headers[-1]
+ headers[-1] = (key, value + " " + line.strip())
+ continue
+
+ key, value = line.split(":", 1)
+ headers.append((key, value.strip()))
+
+ return cls(headers)
diff --git a/third_party/python/pip/pip/_vendor/urllib3/_version.py b/third_party/python/pip/pip/_vendor/urllib3/_version.py
new file mode 100644
index 0000000000..7c031661ba
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/_version.py
@@ -0,0 +1,2 @@
+# This file is protected via CODEOWNERS
+__version__ = "1.26.14"
diff --git a/third_party/python/pip/pip/_vendor/urllib3/connection.py b/third_party/python/pip/pip/_vendor/urllib3/connection.py
new file mode 100644
index 0000000000..10fb36c4e3
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/connection.py
@@ -0,0 +1,567 @@
+from __future__ import absolute_import
+
+import datetime
+import logging
+import os
+import re
+import socket
+import warnings
+from socket import error as SocketError
+from socket import timeout as SocketTimeout
+
+from .packages import six
+from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
+from .packages.six.moves.http_client import HTTPException # noqa: F401
+from .util.proxy import create_proxy_ssl_context
+
+try: # Compiled with SSL?
+ import ssl
+
+ BaseSSLError = ssl.SSLError
+except (ImportError, AttributeError): # Platform-specific: No SSL.
+ ssl = None
+
+ class BaseSSLError(BaseException):
+ pass
+
+
+try:
+ # Python 3: not a no-op, we're adding this to the namespace so it can be imported.
+ ConnectionError = ConnectionError
+except NameError:
+ # Python 2
+ class ConnectionError(Exception):
+ pass
+
+
+try: # Python 3:
+ # Not a no-op, we're adding this to the namespace so it can be imported.
+ BrokenPipeError = BrokenPipeError
+except NameError: # Python 2:
+
+ class BrokenPipeError(Exception):
+ pass
+
+
+from ._collections import HTTPHeaderDict # noqa (historical, removed in v2)
+from ._version import __version__
+from .exceptions import (
+ ConnectTimeoutError,
+ NewConnectionError,
+ SubjectAltNameWarning,
+ SystemTimeWarning,
+)
+from .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection
+from .util.ssl_ import (
+ assert_fingerprint,
+ create_urllib3_context,
+ is_ipaddress,
+ resolve_cert_reqs,
+ resolve_ssl_version,
+ ssl_wrap_socket,
+)
+from .util.ssl_match_hostname import CertificateError, match_hostname
+
+log = logging.getLogger(__name__)
+
+port_by_scheme = {"http": 80, "https": 443}
+
+# When it comes time to update this value as a part of regular maintenance
+# (ie test_recent_date is failing) update it to ~6 months before the current date.
+RECENT_DATE = datetime.date(2022, 1, 1)
+
+_CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]")
+
+
+class HTTPConnection(_HTTPConnection, object):
+ """
+ Based on :class:`http.client.HTTPConnection` but provides an extra constructor
+ backwards-compatibility layer between older and newer Pythons.
+
+ Additional keyword parameters are used to configure attributes of the connection.
+ Accepted parameters include:
+
+ - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
+ - ``source_address``: Set the source address for the current connection.
+ - ``socket_options``: Set specific options on the underlying socket. If not specified, then
+ defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
+ Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
+
+ For example, if you wish to enable TCP Keep Alive in addition to the defaults,
+ you might pass:
+
+ .. code-block:: python
+
+ HTTPConnection.default_socket_options + [
+ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
+ ]
+
+ Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
+ """
+
+ default_port = port_by_scheme["http"]
+
+ #: Disable Nagle's algorithm by default.
+ #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
+ default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
+
+ #: Whether this connection verifies the host's certificate.
+ is_verified = False
+
+ #: Whether this proxy connection (if used) verifies the proxy host's
+ #: certificate.
+ proxy_is_verified = None
+
+ def __init__(self, *args, **kw):
+ if not six.PY2:
+ kw.pop("strict", None)
+
+ # Pre-set source_address.
+ self.source_address = kw.get("source_address")
+
+ #: The socket options provided by the user. If no options are
+ #: provided, we use the default options.
+ self.socket_options = kw.pop("socket_options", self.default_socket_options)
+
+ # Proxy options provided by the user.
+ self.proxy = kw.pop("proxy", None)
+ self.proxy_config = kw.pop("proxy_config", None)
+
+ _HTTPConnection.__init__(self, *args, **kw)
+
+ @property
+ def host(self):
+ """
+ Getter method to remove any trailing dots that indicate the hostname is an FQDN.
+
+ In general, SSL certificates don't include the trailing dot indicating a
+ fully-qualified domain name, and thus, they don't validate properly when
+ checked against a domain name that includes the dot. In addition, some
+ servers may not expect to receive the trailing dot when provided.
+
+ However, the hostname with trailing dot is critical to DNS resolution; doing a
+ lookup with the trailing dot will properly only resolve the appropriate FQDN,
+ whereas a lookup without a trailing dot will search the system's search domain
+ list. Thus, it's important to keep the original host around for use only in
+ those cases where it's appropriate (i.e., when doing DNS lookup to establish the
+ actual TCP connection across which we're going to send HTTP requests).
+ """
+ return self._dns_host.rstrip(".")
+
+ @host.setter
+ def host(self, value):
+ """
+ Setter for the `host` property.
+
+ We assume that only urllib3 uses the _dns_host attribute; httplib itself
+ only uses `host`, and it seems reasonable that other libraries follow suit.
+ """
+ self._dns_host = value
+
+ def _new_conn(self):
+ """Establish a socket connection and set nodelay settings on it.
+
+ :return: New socket connection.
+ """
+ extra_kw = {}
+ if self.source_address:
+ extra_kw["source_address"] = self.source_address
+
+ if self.socket_options:
+ extra_kw["socket_options"] = self.socket_options
+
+ try:
+ conn = connection.create_connection(
+ (self._dns_host, self.port), self.timeout, **extra_kw
+ )
+
+ except SocketTimeout:
+ raise ConnectTimeoutError(
+ self,
+ "Connection to %s timed out. (connect timeout=%s)"
+ % (self.host, self.timeout),
+ )
+
+ except SocketError as e:
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % e
+ )
+
+ return conn
+
+ def _is_using_tunnel(self):
+ # Google App Engine's httplib does not define _tunnel_host
+ return getattr(self, "_tunnel_host", None)
+
+ def _prepare_conn(self, conn):
+ self.sock = conn
+ if self._is_using_tunnel():
+ # TODO: Fix tunnel so it doesn't depend on self.sock state.
+ self._tunnel()
+ # Mark this connection as not reusable
+ self.auto_open = 0
+
+ def connect(self):
+ conn = self._new_conn()
+ self._prepare_conn(conn)
+
+ def putrequest(self, method, url, *args, **kwargs):
+ """ """
+ # Empty docstring because the indentation of CPython's implementation
+ # is broken but we don't want this method in our documentation.
+ match = _CONTAINS_CONTROL_CHAR_RE.search(method)
+ if match:
+ raise ValueError(
+ "Method cannot contain non-token characters %r (found at least %r)"
+ % (method, match.group())
+ )
+
+ return _HTTPConnection.putrequest(self, method, url, *args, **kwargs)
+
+ def putheader(self, header, *values):
+ """ """
+ if not any(isinstance(v, str) and v == SKIP_HEADER for v in values):
+ _HTTPConnection.putheader(self, header, *values)
+ elif six.ensure_str(header.lower()) not in SKIPPABLE_HEADERS:
+ raise ValueError(
+ "urllib3.util.SKIP_HEADER only supports '%s'"
+ % ("', '".join(map(str.title, sorted(SKIPPABLE_HEADERS))),)
+ )
+
+ def request(self, method, url, body=None, headers=None):
+ if headers is None:
+ headers = {}
+ else:
+ # Avoid modifying the headers passed into .request()
+ headers = headers.copy()
+ if "user-agent" not in (six.ensure_str(k.lower()) for k in headers):
+ headers["User-Agent"] = _get_default_user_agent()
+ super(HTTPConnection, self).request(method, url, body=body, headers=headers)
+
+ def request_chunked(self, method, url, body=None, headers=None):
+ """
+ Alternative to the common request method, which sends the
+ body with chunked encoding and not as one block
+ """
+ headers = headers or {}
+ header_keys = set([six.ensure_str(k.lower()) for k in headers])
+ skip_accept_encoding = "accept-encoding" in header_keys
+ skip_host = "host" in header_keys
+ self.putrequest(
+ method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host
+ )
+ if "user-agent" not in header_keys:
+ self.putheader("User-Agent", _get_default_user_agent())
+ for header, value in headers.items():
+ self.putheader(header, value)
+ if "transfer-encoding" not in header_keys:
+ self.putheader("Transfer-Encoding", "chunked")
+ self.endheaders()
+
+ if body is not None:
+ stringish_types = six.string_types + (bytes,)
+ if isinstance(body, stringish_types):
+ body = (body,)
+ for chunk in body:
+ if not chunk:
+ continue
+ if not isinstance(chunk, bytes):
+ chunk = chunk.encode("utf8")
+ len_str = hex(len(chunk))[2:]
+ to_send = bytearray(len_str.encode())
+ to_send += b"\r\n"
+ to_send += chunk
+ to_send += b"\r\n"
+ self.send(to_send)
+
+ # After the if clause, to always have a closed body
+ self.send(b"0\r\n\r\n")
+
+
+class HTTPSConnection(HTTPConnection):
+ """
+ Many of the parameters to this constructor are passed to the underlying SSL
+ socket by means of :py:func:`urllib3.util.ssl_wrap_socket`.
+ """
+
+ default_port = port_by_scheme["https"]
+
+ cert_reqs = None
+ ca_certs = None
+ ca_cert_dir = None
+ ca_cert_data = None
+ ssl_version = None
+ assert_fingerprint = None
+ tls_in_tls_required = False
+
+ def __init__(
+ self,
+ host,
+ port=None,
+ key_file=None,
+ cert_file=None,
+ key_password=None,
+ strict=None,
+ timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ ssl_context=None,
+ server_hostname=None,
+ **kw
+ ):
+
+ HTTPConnection.__init__(self, host, port, strict=strict, timeout=timeout, **kw)
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.key_password = key_password
+ self.ssl_context = ssl_context
+ self.server_hostname = server_hostname
+
+ # Required property for Google AppEngine 1.9.0 which otherwise causes
+ # HTTPS requests to go out as HTTP. (See Issue #356)
+ self._protocol = "https"
+
+ def set_cert(
+ self,
+ key_file=None,
+ cert_file=None,
+ cert_reqs=None,
+ key_password=None,
+ ca_certs=None,
+ assert_hostname=None,
+ assert_fingerprint=None,
+ ca_cert_dir=None,
+ ca_cert_data=None,
+ ):
+ """
+ This method should only be called once, before the connection is used.
+ """
+ # If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also
+ # have an SSLContext object in which case we'll use its verify_mode.
+ if cert_reqs is None:
+ if self.ssl_context is not None:
+ cert_reqs = self.ssl_context.verify_mode
+ else:
+ cert_reqs = resolve_cert_reqs(None)
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.cert_reqs = cert_reqs
+ self.key_password = key_password
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+ self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
+ self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
+ self.ca_cert_data = ca_cert_data
+
+ def connect(self):
+ # Add certificate verification
+ self.sock = conn = self._new_conn()
+ hostname = self.host
+ tls_in_tls = False
+
+ if self._is_using_tunnel():
+ if self.tls_in_tls_required:
+ self.sock = conn = self._connect_tls_proxy(hostname, conn)
+ tls_in_tls = True
+
+ # Calls self._set_hostport(), so self.host is
+ # self._tunnel_host below.
+ self._tunnel()
+ # Mark this connection as not reusable
+ self.auto_open = 0
+
+ # Override the host with the one we're requesting data from.
+ hostname = self._tunnel_host
+
+ server_hostname = hostname
+ if self.server_hostname is not None:
+ server_hostname = self.server_hostname
+
+ is_time_off = datetime.date.today() < RECENT_DATE
+ if is_time_off:
+ warnings.warn(
+ (
+ "System time is way off (before {0}). This will probably "
+ "lead to SSL verification errors"
+ ).format(RECENT_DATE),
+ SystemTimeWarning,
+ )
+
+ # Wrap socket using verification with the root certs in
+ # trusted_root_certs
+ default_ssl_context = False
+ if self.ssl_context is None:
+ default_ssl_context = True
+ self.ssl_context = create_urllib3_context(
+ ssl_version=resolve_ssl_version(self.ssl_version),
+ cert_reqs=resolve_cert_reqs(self.cert_reqs),
+ )
+
+ context = self.ssl_context
+ context.verify_mode = resolve_cert_reqs(self.cert_reqs)
+
+ # Try to load OS default certs if none are given.
+ # Works well on Windows (requires Python3.4+)
+ if (
+ not self.ca_certs
+ and not self.ca_cert_dir
+ and not self.ca_cert_data
+ and default_ssl_context
+ and hasattr(context, "load_default_certs")
+ ):
+ context.load_default_certs()
+
+ self.sock = ssl_wrap_socket(
+ sock=conn,
+ keyfile=self.key_file,
+ certfile=self.cert_file,
+ key_password=self.key_password,
+ ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
+ ca_cert_data=self.ca_cert_data,
+ server_hostname=server_hostname,
+ ssl_context=context,
+ tls_in_tls=tls_in_tls,
+ )
+
+ # If we're using all defaults and the connection
+ # is TLSv1 or TLSv1.1 we throw a DeprecationWarning
+ # for the host.
+ if (
+ default_ssl_context
+ and self.ssl_version is None
+ and hasattr(self.sock, "version")
+ and self.sock.version() in {"TLSv1", "TLSv1.1"}
+ ):
+ warnings.warn(
+ "Negotiating TLSv1/TLSv1.1 by default is deprecated "
+ "and will be disabled in urllib3 v2.0.0. Connecting to "
+ "'%s' with '%s' can be enabled by explicitly opting-in "
+ "with 'ssl_version'" % (self.host, self.sock.version()),
+ DeprecationWarning,
+ )
+
+ if self.assert_fingerprint:
+ assert_fingerprint(
+ self.sock.getpeercert(binary_form=True), self.assert_fingerprint
+ )
+ elif (
+ context.verify_mode != ssl.CERT_NONE
+ and not getattr(context, "check_hostname", False)
+ and self.assert_hostname is not False
+ ):
+ # While urllib3 attempts to always turn off hostname matching from
+ # the TLS library, this cannot always be done. So we check whether
+ # the TLS Library still thinks it's matching hostnames.
+ cert = self.sock.getpeercert()
+ if not cert.get("subjectAltName", ()):
+ warnings.warn(
+ (
+ "Certificate for {0} has no `subjectAltName`, falling back to check for a "
+ "`commonName` for now. This feature is being removed by major browsers and "
+ "deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 "
+ "for details.)".format(hostname)
+ ),
+ SubjectAltNameWarning,
+ )
+ _match_hostname(cert, self.assert_hostname or server_hostname)
+
+ self.is_verified = (
+ context.verify_mode == ssl.CERT_REQUIRED
+ or self.assert_fingerprint is not None
+ )
+
+ def _connect_tls_proxy(self, hostname, conn):
+ """
+ Establish a TLS connection to the proxy using the provided SSL context.
+ """
+ proxy_config = self.proxy_config
+ ssl_context = proxy_config.ssl_context
+ if ssl_context:
+ # If the user provided a proxy context, we assume CA and client
+ # certificates have already been set
+ return ssl_wrap_socket(
+ sock=conn,
+ server_hostname=hostname,
+ ssl_context=ssl_context,
+ )
+
+ ssl_context = create_proxy_ssl_context(
+ self.ssl_version,
+ self.cert_reqs,
+ self.ca_certs,
+ self.ca_cert_dir,
+ self.ca_cert_data,
+ )
+
+ # If no cert was provided, use only the default options for server
+ # certificate validation
+ socket = ssl_wrap_socket(
+ sock=conn,
+ ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
+ ca_cert_data=self.ca_cert_data,
+ server_hostname=hostname,
+ ssl_context=ssl_context,
+ )
+
+ if ssl_context.verify_mode != ssl.CERT_NONE and not getattr(
+ ssl_context, "check_hostname", False
+ ):
+ # While urllib3 attempts to always turn off hostname matching from
+ # the TLS library, this cannot always be done. So we check whether
+ # the TLS Library still thinks it's matching hostnames.
+ cert = socket.getpeercert()
+ if not cert.get("subjectAltName", ()):
+ warnings.warn(
+ (
+ "Certificate for {0} has no `subjectAltName`, falling back to check for a "
+ "`commonName` for now. This feature is being removed by major browsers and "
+ "deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 "
+ "for details.)".format(hostname)
+ ),
+ SubjectAltNameWarning,
+ )
+ _match_hostname(cert, hostname)
+
+ self.proxy_is_verified = ssl_context.verify_mode == ssl.CERT_REQUIRED
+ return socket
+
+
+def _match_hostname(cert, asserted_hostname):
+ # Our upstream implementation of ssl.match_hostname()
+ # only applies this normalization to IP addresses so it doesn't
+ # match DNS SANs so we do the same thing!
+ stripped_hostname = asserted_hostname.strip("u[]")
+ if is_ipaddress(stripped_hostname):
+ asserted_hostname = stripped_hostname
+
+ try:
+ match_hostname(cert, asserted_hostname)
+ except CertificateError as e:
+ log.warning(
+ "Certificate did not match expected hostname: %s. Certificate: %s",
+ asserted_hostname,
+ cert,
+ )
+ # Add cert to exception and reraise so client code can inspect
+ # the cert when catching the exception, if they want to
+ e._peer_cert = cert
+ raise
+
+
+def _get_default_user_agent():
+ return "python-urllib3/%s" % __version__
+
+
+class DummyConnection(object):
+ """Used to detect a failed ConnectionCls import."""
+
+ pass
+
+
+if not ssl:
+ HTTPSConnection = DummyConnection # noqa: F811
+
+
+VerifiedHTTPSConnection = HTTPSConnection
diff --git a/third_party/python/pip/pip/_vendor/urllib3/connectionpool.py b/third_party/python/pip/pip/_vendor/urllib3/connectionpool.py
new file mode 100644
index 0000000000..7087392792
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/connectionpool.py
@@ -0,0 +1,1110 @@
+from __future__ import absolute_import
+
+import errno
+import logging
+import re
+import socket
+import sys
+import warnings
+from socket import error as SocketError
+from socket import timeout as SocketTimeout
+
+from .connection import (
+ BaseSSLError,
+ BrokenPipeError,
+ DummyConnection,
+ HTTPConnection,
+ HTTPException,
+ HTTPSConnection,
+ VerifiedHTTPSConnection,
+ port_by_scheme,
+)
+from .exceptions import (
+ ClosedPoolError,
+ EmptyPoolError,
+ HeaderParsingError,
+ HostChangedError,
+ InsecureRequestWarning,
+ LocationValueError,
+ MaxRetryError,
+ NewConnectionError,
+ ProtocolError,
+ ProxyError,
+ ReadTimeoutError,
+ SSLError,
+ TimeoutError,
+)
+from .packages import six
+from .packages.six.moves import queue
+from .request import RequestMethods
+from .response import HTTPResponse
+from .util.connection import is_connection_dropped
+from .util.proxy import connection_requires_http_tunnel
+from .util.queue import LifoQueue
+from .util.request import set_file_position
+from .util.response import assert_header_parsing
+from .util.retry import Retry
+from .util.ssl_match_hostname import CertificateError
+from .util.timeout import Timeout
+from .util.url import Url, _encode_target
+from .util.url import _normalize_host as normalize_host
+from .util.url import get_host, parse_url
+
+xrange = six.moves.xrange
+
+log = logging.getLogger(__name__)
+
+_Default = object()
+
+
+# Pool objects
+class ConnectionPool(object):
+ """
+ Base class for all connection pools, such as
+ :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
+
+ .. note::
+ ConnectionPool.urlopen() does not normalize or percent-encode target URIs
+ which is useful if your target server doesn't support percent-encoded
+ target URIs.
+ """
+
+ scheme = None
+ QueueCls = LifoQueue
+
+ def __init__(self, host, port=None):
+ if not host:
+ raise LocationValueError("No host specified.")
+
+ self.host = _normalize_host(host, scheme=self.scheme)
+ self._proxy_host = host.lower()
+ self.port = port
+
+ def __str__(self):
+ return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def close(self):
+ """
+ Close all pooled connections and disable the pool.
+ """
+ pass
+
+
+# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
+_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
+
+
+class HTTPConnectionPool(ConnectionPool, RequestMethods):
+ """
+ Thread-safe connection pool for one host.
+
+ :param host:
+ Host used for this HTTP Connection (e.g. "localhost"), passed into
+ :class:`http.client.HTTPConnection`.
+
+ :param port:
+ Port used for this HTTP Connection (None is equivalent to 80), passed
+ into :class:`http.client.HTTPConnection`.
+
+ :param strict:
+ Causes BadStatusLine to be raised if the status line can't be parsed
+ as a valid HTTP/1.0 or 1.1 status line, passed into
+ :class:`http.client.HTTPConnection`.
+
+ .. note::
+ Only works in Python 2. This parameter is ignored in Python 3.
+
+ :param timeout:
+ Socket timeout in seconds for each individual connection. This can
+ be a float or integer, which sets the timeout for the HTTP request,
+ or an instance of :class:`urllib3.util.Timeout` which gives you more
+ fine-grained control over request timeouts. After the constructor has
+ been parsed, this is always a `urllib3.util.Timeout` object.
+
+ :param maxsize:
+ Number of connections to save that can be reused. More than 1 is useful
+ in multithreaded situations. If ``block`` is set to False, more
+ connections will be created but they will not be saved once they've
+ been used.
+
+ :param block:
+ If set to True, no more than ``maxsize`` connections will be used at
+ a time. When no free connections are available, the call will block
+ until a connection has been released. This is a useful side effect for
+ particular multithreaded situations where one does not want to use more
+ than maxsize connections per host to prevent flooding.
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+
+ :param retries:
+ Retry configuration to use by default with requests in this pool.
+
+ :param _proxy:
+ Parsed proxy URL, should not be used directly, instead, see
+ :class:`urllib3.ProxyManager`
+
+ :param _proxy_headers:
+ A dictionary with proxy headers, should not be used directly,
+ instead, see :class:`urllib3.ProxyManager`
+
+ :param \\**conn_kw:
+ Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
+ :class:`urllib3.connection.HTTPSConnection` instances.
+ """
+
+ scheme = "http"
+ ConnectionCls = HTTPConnection
+ ResponseCls = HTTPResponse
+
+ def __init__(
+ self,
+ host,
+ port=None,
+ strict=False,
+ timeout=Timeout.DEFAULT_TIMEOUT,
+ maxsize=1,
+ block=False,
+ headers=None,
+ retries=None,
+ _proxy=None,
+ _proxy_headers=None,
+ _proxy_config=None,
+ **conn_kw
+ ):
+ ConnectionPool.__init__(self, host, port)
+ RequestMethods.__init__(self, headers)
+
+ self.strict = strict
+
+ if not isinstance(timeout, Timeout):
+ timeout = Timeout.from_float(timeout)
+
+ if retries is None:
+ retries = Retry.DEFAULT
+
+ self.timeout = timeout
+ self.retries = retries
+
+ self.pool = self.QueueCls(maxsize)
+ self.block = block
+
+ self.proxy = _proxy
+ self.proxy_headers = _proxy_headers or {}
+ self.proxy_config = _proxy_config
+
+ # Fill the queue up so that doing get() on it will block properly
+ for _ in xrange(maxsize):
+ self.pool.put(None)
+
+ # These are mostly for testing and debugging purposes.
+ self.num_connections = 0
+ self.num_requests = 0
+ self.conn_kw = conn_kw
+
+ if self.proxy:
+ # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
+ # We cannot know if the user has added default socket options, so we cannot replace the
+ # list.
+ self.conn_kw.setdefault("socket_options", [])
+
+ self.conn_kw["proxy"] = self.proxy
+ self.conn_kw["proxy_config"] = self.proxy_config
+
+ def _new_conn(self):
+ """
+ Return a fresh :class:`HTTPConnection`.
+ """
+ self.num_connections += 1
+ log.debug(
+ "Starting new HTTP connection (%d): %s:%s",
+ self.num_connections,
+ self.host,
+ self.port or "80",
+ )
+
+ conn = self.ConnectionCls(
+ host=self.host,
+ port=self.port,
+ timeout=self.timeout.connect_timeout,
+ strict=self.strict,
+ **self.conn_kw
+ )
+ return conn
+
+ def _get_conn(self, timeout=None):
+ """
+ Get a connection. Will return a pooled connection if one is available.
+
+ If no connections are available and :prop:`.block` is ``False``, then a
+ fresh connection is returned.
+
+ :param timeout:
+ Seconds to wait before giving up and raising
+ :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
+ :prop:`.block` is ``True``.
+ """
+ conn = None
+ try:
+ conn = self.pool.get(block=self.block, timeout=timeout)
+
+ except AttributeError: # self.pool is None
+ raise ClosedPoolError(self, "Pool is closed.")
+
+ except queue.Empty:
+ if self.block:
+ raise EmptyPoolError(
+ self,
+ "Pool reached maximum size and no more connections are allowed.",
+ )
+ pass # Oh well, we'll create a new connection then
+
+ # If this is a persistent connection, check if it got disconnected
+ if conn and is_connection_dropped(conn):
+ log.debug("Resetting dropped connection: %s", self.host)
+ conn.close()
+ if getattr(conn, "auto_open", 1) == 0:
+ # This is a proxied connection that has been mutated by
+ # http.client._tunnel() and cannot be reused (since it would
+ # attempt to bypass the proxy)
+ conn = None
+
+ return conn or self._new_conn()
+
+ def _put_conn(self, conn):
+ """
+ Put a connection back into the pool.
+
+ :param conn:
+ Connection object for the current host and port as returned by
+ :meth:`._new_conn` or :meth:`._get_conn`.
+
+ If the pool is already full, the connection is closed and discarded
+ because we exceeded maxsize. If connections are discarded frequently,
+ then maxsize should be increased.
+
+ If the pool is closed, then the connection will be closed and discarded.
+ """
+ try:
+ self.pool.put(conn, block=False)
+ return # Everything is dandy, done.
+ except AttributeError:
+ # self.pool is None.
+ pass
+ except queue.Full:
+ # This should never happen if self.block == True
+ log.warning(
+ "Connection pool is full, discarding connection: %s. Connection pool size: %s",
+ self.host,
+ self.pool.qsize(),
+ )
+ # Connection never got put back into the pool, close it.
+ if conn:
+ conn.close()
+
+ def _validate_conn(self, conn):
+ """
+ Called right before a request is made, after the socket is created.
+ """
+ pass
+
+ def _prepare_proxy(self, conn):
+ # Nothing to do for HTTP connections.
+ pass
+
+ def _get_timeout(self, timeout):
+ """Helper that always returns a :class:`urllib3.util.Timeout`"""
+ if timeout is _Default:
+ return self.timeout.clone()
+
+ if isinstance(timeout, Timeout):
+ return timeout.clone()
+ else:
+ # User passed us an int/float. This is for backwards compatibility,
+ # can be removed later
+ return Timeout.from_float(timeout)
+
+ def _raise_timeout(self, err, url, timeout_value):
+ """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
+
+ if isinstance(err, SocketTimeout):
+ raise ReadTimeoutError(
+ self, url, "Read timed out. (read timeout=%s)" % timeout_value
+ )
+
+ # See the above comment about EAGAIN in Python 3. In Python 2 we have
+ # to specifically catch it and throw the timeout error
+ if hasattr(err, "errno") and err.errno in _blocking_errnos:
+ raise ReadTimeoutError(
+ self, url, "Read timed out. (read timeout=%s)" % timeout_value
+ )
+
+ # Catch possible read timeouts thrown as SSL errors. If not the
+ # case, rethrow the original. We need to do this because of:
+ # http://bugs.python.org/issue10272
+ if "timed out" in str(err) or "did not complete (read)" in str(
+ err
+ ): # Python < 2.7.4
+ raise ReadTimeoutError(
+ self, url, "Read timed out. (read timeout=%s)" % timeout_value
+ )
+
+ def _make_request(
+ self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw
+ ):
+ """
+ Perform a request on a given urllib connection object taken from our
+ pool.
+
+ :param conn:
+ a connection from one of our connection pools
+
+ :param timeout:
+ Socket timeout in seconds for the request. This can be a
+ float or integer, which will set the same timeout value for
+ the socket connect and the socket read, or an instance of
+ :class:`urllib3.util.Timeout`, which gives you more fine-grained
+ control over your timeouts.
+ """
+ self.num_requests += 1
+
+ timeout_obj = self._get_timeout(timeout)
+ timeout_obj.start_connect()
+ conn.timeout = timeout_obj.connect_timeout
+
+ # Trigger any extra validation we need to do.
+ try:
+ self._validate_conn(conn)
+ except (SocketTimeout, BaseSSLError) as e:
+ # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
+ self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
+ raise
+
+ # conn.request() calls http.client.*.request, not the method in
+ # urllib3.request. It also calls makefile (recv) on the socket.
+ try:
+ if chunked:
+ conn.request_chunked(method, url, **httplib_request_kw)
+ else:
+ conn.request(method, url, **httplib_request_kw)
+
+ # We are swallowing BrokenPipeError (errno.EPIPE) since the server is
+ # legitimately able to close the connection after sending a valid response.
+ # With this behaviour, the received response is still readable.
+ except BrokenPipeError:
+ # Python 3
+ pass
+ except IOError as e:
+ # Python 2 and macOS/Linux
+ # EPIPE and ESHUTDOWN are BrokenPipeError on Python 2, and EPROTOTYPE is needed on macOS
+ # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
+ if e.errno not in {
+ errno.EPIPE,
+ errno.ESHUTDOWN,
+ errno.EPROTOTYPE,
+ }:
+ raise
+
+ # Reset the timeout for the recv() on the socket
+ read_timeout = timeout_obj.read_timeout
+
+ # App Engine doesn't have a sock attr
+ if getattr(conn, "sock", None):
+ # In Python 3 socket.py will catch EAGAIN and return None when you
+ # try and read into the file pointer created by http.client, which
+ # instead raises a BadStatusLine exception. Instead of catching
+ # the exception and assuming all BadStatusLine exceptions are read
+ # timeouts, check for a zero timeout before making the request.
+ if read_timeout == 0:
+ raise ReadTimeoutError(
+ self, url, "Read timed out. (read timeout=%s)" % read_timeout
+ )
+ if read_timeout is Timeout.DEFAULT_TIMEOUT:
+ conn.sock.settimeout(socket.getdefaulttimeout())
+ else: # None or a value
+ conn.sock.settimeout(read_timeout)
+
+ # Receive the response from the server
+ try:
+ try:
+ # Python 2.7, use buffering of HTTP responses
+ httplib_response = conn.getresponse(buffering=True)
+ except TypeError:
+ # Python 3
+ try:
+ httplib_response = conn.getresponse()
+ except BaseException as e:
+ # Remove the TypeError from the exception chain in
+ # Python 3 (including for exceptions like SystemExit).
+ # Otherwise it looks like a bug in the code.
+ six.raise_from(e, None)
+ except (SocketTimeout, BaseSSLError, SocketError) as e:
+ self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
+ raise
+
+ # AppEngine doesn't have a version attr.
+ http_version = getattr(conn, "_http_vsn_str", "HTTP/?")
+ log.debug(
+ '%s://%s:%s "%s %s %s" %s %s',
+ self.scheme,
+ self.host,
+ self.port,
+ method,
+ url,
+ http_version,
+ httplib_response.status,
+ httplib_response.length,
+ )
+
+ try:
+ assert_header_parsing(httplib_response.msg)
+ except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3
+ log.warning(
+ "Failed to parse headers (url=%s): %s",
+ self._absolute_url(url),
+ hpe,
+ exc_info=True,
+ )
+
+ return httplib_response
+
+ def _absolute_url(self, path):
+ return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
+
+ def close(self):
+ """
+ Close all pooled connections and disable the pool.
+ """
+ if self.pool is None:
+ return
+ # Disable access to the pool
+ old_pool, self.pool = self.pool, None
+
+ try:
+ while True:
+ conn = old_pool.get(block=False)
+ if conn:
+ conn.close()
+
+ except queue.Empty:
+ pass # Done.
+
+ def is_same_host(self, url):
+ """
+ Check if the given ``url`` is a member of the same host as this
+ connection pool.
+ """
+ if url.startswith("/"):
+ return True
+
+ # TODO: Add optional support for socket.gethostbyname checking.
+ scheme, host, port = get_host(url)
+ if host is not None:
+ host = _normalize_host(host, scheme=scheme)
+
+ # Use explicit default port for comparison when none is given
+ if self.port and not port:
+ port = port_by_scheme.get(scheme)
+ elif not self.port and port == port_by_scheme.get(scheme):
+ port = None
+
+ return (scheme, host, port) == (self.scheme, self.host, self.port)
+
+ def urlopen(
+ self,
+ method,
+ url,
+ body=None,
+ headers=None,
+ retries=None,
+ redirect=True,
+ assert_same_host=True,
+ timeout=_Default,
+ pool_timeout=None,
+ release_conn=None,
+ chunked=False,
+ body_pos=None,
+ **response_kw
+ ):
+ """
+ Get a connection from the pool and perform an HTTP request. This is the
+ lowest level call for making a request, so you'll need to specify all
+ the raw details.
+
+ .. note::
+
+ More commonly, it's appropriate to use a convenience method provided
+ by :class:`.RequestMethods`, such as :meth:`request`.
+
+ .. note::
+
+ `release_conn` will only behave as expected if
+ `preload_content=False` because we want to make
+ `preload_content=False` the default behaviour someday soon without
+ breaking backwards compatibility.
+
+ :param method:
+ HTTP request method (such as GET, POST, PUT, etc.)
+
+ :param url:
+ The URL to perform the request on.
+
+ :param body:
+ Data to send in the request body, either :class:`str`, :class:`bytes`,
+ an iterable of :class:`str`/:class:`bytes`, or a file-like object.
+
+ :param headers:
+ Dictionary of custom headers to send, such as User-Agent,
+ If-None-Match, etc. If None, pool headers are used. If provided,
+ these headers completely replace any pool-specific headers.
+
+ :param retries:
+ Configure the number of retries to allow before raising a
+ :class:`~urllib3.exceptions.MaxRetryError` exception.
+
+ Pass ``None`` to retry until you receive a response. Pass a
+ :class:`~urllib3.util.retry.Retry` object for fine-grained control
+ over different types of retries.
+ Pass an integer number to retry connection errors that many times,
+ but no other types of errors. Pass zero to never retry.
+
+ If ``False``, then retries are disabled and any exception is raised
+ immediately. Also, instead of raising a MaxRetryError on redirects,
+ the redirect response will be returned.
+
+ :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
+
+ :param redirect:
+ If True, automatically handle redirects (status codes 301, 302,
+ 303, 307, 308). Each redirect counts as a retry. Disabling retries
+ will disable redirect, too.
+
+ :param assert_same_host:
+ If ``True``, will make sure that the host of the pool requests is
+ consistent else will raise HostChangedError. When ``False``, you can
+ use the pool on an HTTP proxy and request foreign hosts.
+
+ :param timeout:
+ If specified, overrides the default timeout for this one
+ request. It may be a float (in seconds) or an instance of
+ :class:`urllib3.util.Timeout`.
+
+ :param pool_timeout:
+ If set and the pool is set to block=True, then this method will
+ block for ``pool_timeout`` seconds and raise EmptyPoolError if no
+ connection is available within the time period.
+
+ :param release_conn:
+ If False, then the urlopen call will not release the connection
+ back into the pool once a response is received (but will release if
+ you read the entire contents of the response such as when
+ `preload_content=True`). This is useful if you're not preloading
+ the response's content immediately. You will need to call
+ ``r.release_conn()`` on the response ``r`` to return the connection
+ back into the pool. If None, it takes the value of
+ ``response_kw.get('preload_content', True)``.
+
+ :param chunked:
+ If True, urllib3 will send the body using chunked transfer
+ encoding. Otherwise, urllib3 will send the body using the standard
+ content-length form. Defaults to False.
+
+ :param int body_pos:
+ Position to seek to in file-like body in the event of a retry or
+ redirect. Typically this won't need to be set because urllib3 will
+ auto-populate the value when needed.
+
+ :param \\**response_kw:
+ Additional parameters are passed to
+ :meth:`urllib3.response.HTTPResponse.from_httplib`
+ """
+
+ parsed_url = parse_url(url)
+ destination_scheme = parsed_url.scheme
+
+ if headers is None:
+ headers = self.headers
+
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
+
+ if release_conn is None:
+ release_conn = response_kw.get("preload_content", True)
+
+ # Check host
+ if assert_same_host and not self.is_same_host(url):
+ raise HostChangedError(self, url, retries)
+
+ # Ensure that the URL we're connecting to is properly encoded
+ if url.startswith("/"):
+ url = six.ensure_str(_encode_target(url))
+ else:
+ url = six.ensure_str(parsed_url.url)
+
+ conn = None
+
+ # Track whether `conn` needs to be released before
+ # returning/raising/recursing. Update this variable if necessary, and
+ # leave `release_conn` constant throughout the function. That way, if
+ # the function recurses, the original value of `release_conn` will be
+ # passed down into the recursive call, and its value will be respected.
+ #
+ # See issue #651 [1] for details.
+ #
+ # [1] <https://github.com/urllib3/urllib3/issues/651>
+ release_this_conn = release_conn
+
+ http_tunnel_required = connection_requires_http_tunnel(
+ self.proxy, self.proxy_config, destination_scheme
+ )
+
+ # Merge the proxy headers. Only done when not using HTTP CONNECT. We
+ # have to copy the headers dict so we can safely change it without those
+ # changes being reflected in anyone else's copy.
+ if not http_tunnel_required:
+ headers = headers.copy()
+ headers.update(self.proxy_headers)
+
+ # Must keep the exception bound to a separate variable or else Python 3
+ # complains about UnboundLocalError.
+ err = None
+
+ # Keep track of whether we cleanly exited the except block. This
+ # ensures we do proper cleanup in finally.
+ clean_exit = False
+
+ # Rewind body position, if needed. Record current position
+ # for future rewinds in the event of a redirect/retry.
+ body_pos = set_file_position(body, body_pos)
+
+ try:
+ # Request a connection from the queue.
+ timeout_obj = self._get_timeout(timeout)
+ conn = self._get_conn(timeout=pool_timeout)
+
+ conn.timeout = timeout_obj.connect_timeout
+
+ is_new_proxy_conn = self.proxy is not None and not getattr(
+ conn, "sock", None
+ )
+ if is_new_proxy_conn and http_tunnel_required:
+ self._prepare_proxy(conn)
+
+ # Make the request on the httplib connection object.
+ httplib_response = self._make_request(
+ conn,
+ method,
+ url,
+ timeout=timeout_obj,
+ body=body,
+ headers=headers,
+ chunked=chunked,
+ )
+
+ # If we're going to release the connection in ``finally:``, then
+ # the response doesn't need to know about the connection. Otherwise
+ # it will also try to release it and we'll have a double-release
+ # mess.
+ response_conn = conn if not release_conn else None
+
+ # Pass method to Response for length checking
+ response_kw["request_method"] = method
+
+ # Import httplib's response into our own wrapper object
+ response = self.ResponseCls.from_httplib(
+ httplib_response,
+ pool=self,
+ connection=response_conn,
+ retries=retries,
+ **response_kw
+ )
+
+ # Everything went great!
+ clean_exit = True
+
+ except EmptyPoolError:
+ # Didn't get a connection from the pool, no need to clean up
+ clean_exit = True
+ release_this_conn = False
+ raise
+
+ except (
+ TimeoutError,
+ HTTPException,
+ SocketError,
+ ProtocolError,
+ BaseSSLError,
+ SSLError,
+ CertificateError,
+ ) as e:
+ # Discard the connection for these exceptions. It will be
+ # replaced during the next _get_conn() call.
+ clean_exit = False
+
+ def _is_ssl_error_message_from_http_proxy(ssl_error):
+ # We're trying to detect the message 'WRONG_VERSION_NUMBER' but
+ # SSLErrors are kinda all over the place when it comes to the message,
+ # so we try to cover our bases here!
+ message = " ".join(re.split("[^a-z]", str(ssl_error).lower()))
+ return (
+ "wrong version number" in message or "unknown protocol" in message
+ )
+
+ # Try to detect a common user error with proxies which is to
+ # set an HTTP proxy to be HTTPS when it should be 'http://'
+ # (ie {'http': 'http://proxy', 'https': 'https://proxy'})
+ # Instead we add a nice error message and point to a URL.
+ if (
+ isinstance(e, BaseSSLError)
+ and self.proxy
+ and _is_ssl_error_message_from_http_proxy(e)
+ and conn.proxy
+ and conn.proxy.scheme == "https"
+ ):
+ e = ProxyError(
+ "Your proxy appears to only use HTTP and not HTTPS, "
+ "try changing your proxy URL to be HTTP. See: "
+ "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
+ "#https-proxy-error-http-proxy",
+ SSLError(e),
+ )
+ elif isinstance(e, (BaseSSLError, CertificateError)):
+ e = SSLError(e)
+ elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
+ e = ProxyError("Cannot connect to proxy.", e)
+ elif isinstance(e, (SocketError, HTTPException)):
+ e = ProtocolError("Connection aborted.", e)
+
+ retries = retries.increment(
+ method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
+ )
+ retries.sleep()
+
+ # Keep track of the error for the retry warning.
+ err = e
+
+ finally:
+ if not clean_exit:
+ # We hit some kind of exception, handled or otherwise. We need
+ # to throw the connection away unless explicitly told not to.
+ # Close the connection, set the variable to None, and make sure
+ # we put the None back in the pool to avoid leaking it.
+ conn = conn and conn.close()
+ release_this_conn = True
+
+ if release_this_conn:
+ # Put the connection back to be reused. If the connection is
+ # expired then it will be None, which will get replaced with a
+ # fresh connection during _get_conn.
+ self._put_conn(conn)
+
+ if not conn:
+ # Try again
+ log.warning(
+ "Retrying (%r) after connection broken by '%r': %s", retries, err, url
+ )
+ return self.urlopen(
+ method,
+ url,
+ body,
+ headers,
+ retries,
+ redirect,
+ assert_same_host,
+ timeout=timeout,
+ pool_timeout=pool_timeout,
+ release_conn=release_conn,
+ chunked=chunked,
+ body_pos=body_pos,
+ **response_kw
+ )
+
+ # Handle redirect?
+ redirect_location = redirect and response.get_redirect_location()
+ if redirect_location:
+ if response.status == 303:
+ method = "GET"
+
+ try:
+ retries = retries.increment(method, url, response=response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ response.drain_conn()
+ raise
+ return response
+
+ response.drain_conn()
+ retries.sleep_for_retry(response)
+ log.debug("Redirecting %s -> %s", url, redirect_location)
+ return self.urlopen(
+ method,
+ redirect_location,
+ body,
+ headers,
+ retries=retries,
+ redirect=redirect,
+ assert_same_host=assert_same_host,
+ timeout=timeout,
+ pool_timeout=pool_timeout,
+ release_conn=release_conn,
+ chunked=chunked,
+ body_pos=body_pos,
+ **response_kw
+ )
+
+ # Check if we should retry the HTTP response.
+ has_retry_after = bool(response.headers.get("Retry-After"))
+ if retries.is_retry(method, response.status, has_retry_after):
+ try:
+ retries = retries.increment(method, url, response=response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_status:
+ response.drain_conn()
+ raise
+ return response
+
+ response.drain_conn()
+ retries.sleep(response)
+ log.debug("Retry: %s", url)
+ return self.urlopen(
+ method,
+ url,
+ body,
+ headers,
+ retries=retries,
+ redirect=redirect,
+ assert_same_host=assert_same_host,
+ timeout=timeout,
+ pool_timeout=pool_timeout,
+ release_conn=release_conn,
+ chunked=chunked,
+ body_pos=body_pos,
+ **response_kw
+ )
+
+ return response
+
+
+class HTTPSConnectionPool(HTTPConnectionPool):
+ """
+ Same as :class:`.HTTPConnectionPool`, but HTTPS.
+
+ :class:`.HTTPSConnection` uses one of ``assert_fingerprint``,
+ ``assert_hostname`` and ``host`` in this order to verify connections.
+ If ``assert_hostname`` is False, no verification is done.
+
+ The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
+ ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
+ is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
+ the connection socket into an SSL socket.
+ """
+
+ scheme = "https"
+ ConnectionCls = HTTPSConnection
+
+ def __init__(
+ self,
+ host,
+ port=None,
+ strict=False,
+ timeout=Timeout.DEFAULT_TIMEOUT,
+ maxsize=1,
+ block=False,
+ headers=None,
+ retries=None,
+ _proxy=None,
+ _proxy_headers=None,
+ key_file=None,
+ cert_file=None,
+ cert_reqs=None,
+ key_password=None,
+ ca_certs=None,
+ ssl_version=None,
+ assert_hostname=None,
+ assert_fingerprint=None,
+ ca_cert_dir=None,
+ **conn_kw
+ ):
+
+ HTTPConnectionPool.__init__(
+ self,
+ host,
+ port,
+ strict,
+ timeout,
+ maxsize,
+ block,
+ headers,
+ retries,
+ _proxy,
+ _proxy_headers,
+ **conn_kw
+ )
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.cert_reqs = cert_reqs
+ self.key_password = key_password
+ self.ca_certs = ca_certs
+ self.ca_cert_dir = ca_cert_dir
+ self.ssl_version = ssl_version
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+
+ def _prepare_conn(self, conn):
+ """
+ Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
+ and establish the tunnel if proxy is used.
+ """
+
+ if isinstance(conn, VerifiedHTTPSConnection):
+ conn.set_cert(
+ key_file=self.key_file,
+ key_password=self.key_password,
+ cert_file=self.cert_file,
+ cert_reqs=self.cert_reqs,
+ ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
+ assert_hostname=self.assert_hostname,
+ assert_fingerprint=self.assert_fingerprint,
+ )
+ conn.ssl_version = self.ssl_version
+ return conn
+
+ def _prepare_proxy(self, conn):
+ """
+ Establishes a tunnel connection through HTTP CONNECT.
+
+ Tunnel connection is established early because otherwise httplib would
+ improperly set Host: header to proxy's IP:port.
+ """
+
+ conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers)
+
+ if self.proxy.scheme == "https":
+ conn.tls_in_tls_required = True
+
+ conn.connect()
+
+ def _new_conn(self):
+ """
+ Return a fresh :class:`http.client.HTTPSConnection`.
+ """
+ self.num_connections += 1
+ log.debug(
+ "Starting new HTTPS connection (%d): %s:%s",
+ self.num_connections,
+ self.host,
+ self.port or "443",
+ )
+
+ if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
+ raise SSLError(
+ "Can't connect to HTTPS URL because the SSL module is not available."
+ )
+
+ actual_host = self.host
+ actual_port = self.port
+ if self.proxy is not None:
+ actual_host = self.proxy.host
+ actual_port = self.proxy.port
+
+ conn = self.ConnectionCls(
+ host=actual_host,
+ port=actual_port,
+ timeout=self.timeout.connect_timeout,
+ strict=self.strict,
+ cert_file=self.cert_file,
+ key_file=self.key_file,
+ key_password=self.key_password,
+ **self.conn_kw
+ )
+
+ return self._prepare_conn(conn)
+
+ def _validate_conn(self, conn):
+ """
+ Called right before a request is made, after the socket is created.
+ """
+ super(HTTPSConnectionPool, self)._validate_conn(conn)
+
+ # Force connect early to allow us to validate the connection.
+ if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
+ conn.connect()
+
+ if not conn.is_verified:
+ warnings.warn(
+ (
+ "Unverified HTTPS request is being made to host '%s'. "
+ "Adding certificate verification is strongly advised. See: "
+ "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
+ "#ssl-warnings" % conn.host
+ ),
+ InsecureRequestWarning,
+ )
+
+ if getattr(conn, "proxy_is_verified", None) is False:
+ warnings.warn(
+ (
+ "Unverified HTTPS connection done to an HTTPS proxy. "
+ "Adding certificate verification is strongly advised. See: "
+ "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
+ "#ssl-warnings"
+ ),
+ InsecureRequestWarning,
+ )
+
+
+def connection_from_url(url, **kw):
+ """
+ Given a url, return an :class:`.ConnectionPool` instance of its host.
+
+ This is a shortcut for not having to parse out the scheme, host, and port
+ of the url before creating an :class:`.ConnectionPool` instance.
+
+ :param url:
+ Absolute URL string that must include the scheme. Port is optional.
+
+ :param \\**kw:
+ Passes additional parameters to the constructor of the appropriate
+ :class:`.ConnectionPool`. Useful for specifying things like
+ timeout, maxsize, headers, etc.
+
+ Example::
+
+ >>> conn = connection_from_url('http://google.com/')
+ >>> r = conn.request('GET', '/')
+ """
+ scheme, host, port = get_host(url)
+ port = port or port_by_scheme.get(scheme, 80)
+ if scheme == "https":
+ return HTTPSConnectionPool(host, port=port, **kw)
+ else:
+ return HTTPConnectionPool(host, port=port, **kw)
+
+
+def _normalize_host(host, scheme):
+ """
+ Normalize hosts for comparisons and use with sockets.
+ """
+
+ host = normalize_host(host, scheme)
+
+ # httplib doesn't like it when we include brackets in IPv6 addresses
+ # Specifically, if we include brackets but also pass the port then
+ # httplib crazily doubles up the square brackets on the Host header.
+ # Instead, we need to make sure we never pass ``None`` as the port.
+ # However, for backward compatibility reasons we can't actually
+ # *assert* that. See http://bugs.python.org/issue28539
+ if host.startswith("[") and host.endswith("]"):
+ host = host[1:-1]
+ return host
diff --git a/third_party/python/pip/pip/_vendor/urllib3/contrib/__init__.py b/third_party/python/pip/pip/_vendor/urllib3/contrib/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/contrib/__init__.py
diff --git a/third_party/python/pip/pip/_vendor/urllib3/contrib/_appengine_environ.py b/third_party/python/pip/pip/_vendor/urllib3/contrib/_appengine_environ.py
new file mode 100644
index 0000000000..8765b907d7
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/contrib/_appengine_environ.py
@@ -0,0 +1,36 @@
+"""
+This module provides means to detect the App Engine environment.
+"""
+
+import os
+
+
+def is_appengine():
+ return is_local_appengine() or is_prod_appengine()
+
+
+def is_appengine_sandbox():
+ """Reports if the app is running in the first generation sandbox.
+
+ The second generation runtimes are technically still in a sandbox, but it
+ is much less restrictive, so generally you shouldn't need to check for it.
+ see https://cloud.google.com/appengine/docs/standard/runtimes
+ """
+ return is_appengine() and os.environ["APPENGINE_RUNTIME"] == "python27"
+
+
+def is_local_appengine():
+ return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
+ "SERVER_SOFTWARE", ""
+ ).startswith("Development/")
+
+
+def is_prod_appengine():
+ return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
+ "SERVER_SOFTWARE", ""
+ ).startswith("Google App Engine/")
+
+
+def is_prod_appengine_mvms():
+ """Deprecated."""
+ return False
diff --git a/third_party/python/pip/pip/_vendor/urllib3/contrib/_securetransport/__init__.py b/third_party/python/pip/pip/_vendor/urllib3/contrib/_securetransport/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/contrib/_securetransport/__init__.py
diff --git a/third_party/python/pip/pip/_vendor/urllib3/contrib/_securetransport/bindings.py b/third_party/python/pip/pip/_vendor/urllib3/contrib/_securetransport/bindings.py
new file mode 100644
index 0000000000..264d564dbd
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/contrib/_securetransport/bindings.py
@@ -0,0 +1,519 @@
+"""
+This module uses ctypes to bind a whole bunch of functions and constants from
+SecureTransport. The goal here is to provide the low-level API to
+SecureTransport. These are essentially the C-level functions and constants, and
+they're pretty gross to work with.
+
+This code is a bastardised version of the code found in Will Bond's oscrypto
+library. An enormous debt is owed to him for blazing this trail for us. For
+that reason, this code should be considered to be covered both by urllib3's
+license and by oscrypto's:
+
+ Copyright (c) 2015-2016 Will Bond <will@wbond.net>
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+"""
+from __future__ import absolute_import
+
+import platform
+from ctypes import (
+ CDLL,
+ CFUNCTYPE,
+ POINTER,
+ c_bool,
+ c_byte,
+ c_char_p,
+ c_int32,
+ c_long,
+ c_size_t,
+ c_uint32,
+ c_ulong,
+ c_void_p,
+)
+from ctypes.util import find_library
+
+from ...packages.six import raise_from
+
+if platform.system() != "Darwin":
+ raise ImportError("Only macOS is supported")
+
+version = platform.mac_ver()[0]
+version_info = tuple(map(int, version.split(".")))
+if version_info < (10, 8):
+ raise OSError(
+ "Only OS X 10.8 and newer are supported, not %s.%s"
+ % (version_info[0], version_info[1])
+ )
+
+
+def load_cdll(name, macos10_16_path):
+ """Loads a CDLL by name, falling back to known path on 10.16+"""
+ try:
+ # Big Sur is technically 11 but we use 10.16 due to the Big Sur
+ # beta being labeled as 10.16.
+ if version_info >= (10, 16):
+ path = macos10_16_path
+ else:
+ path = find_library(name)
+ if not path:
+ raise OSError # Caught and reraised as 'ImportError'
+ return CDLL(path, use_errno=True)
+ except OSError:
+ raise_from(ImportError("The library %s failed to load" % name), None)
+
+
+Security = load_cdll(
+ "Security", "/System/Library/Frameworks/Security.framework/Security"
+)
+CoreFoundation = load_cdll(
+ "CoreFoundation",
+ "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation",
+)
+
+
+Boolean = c_bool
+CFIndex = c_long
+CFStringEncoding = c_uint32
+CFData = c_void_p
+CFString = c_void_p
+CFArray = c_void_p
+CFMutableArray = c_void_p
+CFDictionary = c_void_p
+CFError = c_void_p
+CFType = c_void_p
+CFTypeID = c_ulong
+
+CFTypeRef = POINTER(CFType)
+CFAllocatorRef = c_void_p
+
+OSStatus = c_int32
+
+CFDataRef = POINTER(CFData)
+CFStringRef = POINTER(CFString)
+CFArrayRef = POINTER(CFArray)
+CFMutableArrayRef = POINTER(CFMutableArray)
+CFDictionaryRef = POINTER(CFDictionary)
+CFArrayCallBacks = c_void_p
+CFDictionaryKeyCallBacks = c_void_p
+CFDictionaryValueCallBacks = c_void_p
+
+SecCertificateRef = POINTER(c_void_p)
+SecExternalFormat = c_uint32
+SecExternalItemType = c_uint32
+SecIdentityRef = POINTER(c_void_p)
+SecItemImportExportFlags = c_uint32
+SecItemImportExportKeyParameters = c_void_p
+SecKeychainRef = POINTER(c_void_p)
+SSLProtocol = c_uint32
+SSLCipherSuite = c_uint32
+SSLContextRef = POINTER(c_void_p)
+SecTrustRef = POINTER(c_void_p)
+SSLConnectionRef = c_uint32
+SecTrustResultType = c_uint32
+SecTrustOptionFlags = c_uint32
+SSLProtocolSide = c_uint32
+SSLConnectionType = c_uint32
+SSLSessionOption = c_uint32
+
+
+try:
+ Security.SecItemImport.argtypes = [
+ CFDataRef,
+ CFStringRef,
+ POINTER(SecExternalFormat),
+ POINTER(SecExternalItemType),
+ SecItemImportExportFlags,
+ POINTER(SecItemImportExportKeyParameters),
+ SecKeychainRef,
+ POINTER(CFArrayRef),
+ ]
+ Security.SecItemImport.restype = OSStatus
+
+ Security.SecCertificateGetTypeID.argtypes = []
+ Security.SecCertificateGetTypeID.restype = CFTypeID
+
+ Security.SecIdentityGetTypeID.argtypes = []
+ Security.SecIdentityGetTypeID.restype = CFTypeID
+
+ Security.SecKeyGetTypeID.argtypes = []
+ Security.SecKeyGetTypeID.restype = CFTypeID
+
+ Security.SecCertificateCreateWithData.argtypes = [CFAllocatorRef, CFDataRef]
+ Security.SecCertificateCreateWithData.restype = SecCertificateRef
+
+ Security.SecCertificateCopyData.argtypes = [SecCertificateRef]
+ Security.SecCertificateCopyData.restype = CFDataRef
+
+ Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p]
+ Security.SecCopyErrorMessageString.restype = CFStringRef
+
+ Security.SecIdentityCreateWithCertificate.argtypes = [
+ CFTypeRef,
+ SecCertificateRef,
+ POINTER(SecIdentityRef),
+ ]
+ Security.SecIdentityCreateWithCertificate.restype = OSStatus
+
+ Security.SecKeychainCreate.argtypes = [
+ c_char_p,
+ c_uint32,
+ c_void_p,
+ Boolean,
+ c_void_p,
+ POINTER(SecKeychainRef),
+ ]
+ Security.SecKeychainCreate.restype = OSStatus
+
+ Security.SecKeychainDelete.argtypes = [SecKeychainRef]
+ Security.SecKeychainDelete.restype = OSStatus
+
+ Security.SecPKCS12Import.argtypes = [
+ CFDataRef,
+ CFDictionaryRef,
+ POINTER(CFArrayRef),
+ ]
+ Security.SecPKCS12Import.restype = OSStatus
+
+ SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t))
+ SSLWriteFunc = CFUNCTYPE(
+ OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t)
+ )
+
+ Security.SSLSetIOFuncs.argtypes = [SSLContextRef, SSLReadFunc, SSLWriteFunc]
+ Security.SSLSetIOFuncs.restype = OSStatus
+
+ Security.SSLSetPeerID.argtypes = [SSLContextRef, c_char_p, c_size_t]
+ Security.SSLSetPeerID.restype = OSStatus
+
+ Security.SSLSetCertificate.argtypes = [SSLContextRef, CFArrayRef]
+ Security.SSLSetCertificate.restype = OSStatus
+
+ Security.SSLSetCertificateAuthorities.argtypes = [SSLContextRef, CFTypeRef, Boolean]
+ Security.SSLSetCertificateAuthorities.restype = OSStatus
+
+ Security.SSLSetConnection.argtypes = [SSLContextRef, SSLConnectionRef]
+ Security.SSLSetConnection.restype = OSStatus
+
+ Security.SSLSetPeerDomainName.argtypes = [SSLContextRef, c_char_p, c_size_t]
+ Security.SSLSetPeerDomainName.restype = OSStatus
+
+ Security.SSLHandshake.argtypes = [SSLContextRef]
+ Security.SSLHandshake.restype = OSStatus
+
+ Security.SSLRead.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)]
+ Security.SSLRead.restype = OSStatus
+
+ Security.SSLWrite.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)]
+ Security.SSLWrite.restype = OSStatus
+
+ Security.SSLClose.argtypes = [SSLContextRef]
+ Security.SSLClose.restype = OSStatus
+
+ Security.SSLGetNumberSupportedCiphers.argtypes = [SSLContextRef, POINTER(c_size_t)]
+ Security.SSLGetNumberSupportedCiphers.restype = OSStatus
+
+ Security.SSLGetSupportedCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ POINTER(c_size_t),
+ ]
+ Security.SSLGetSupportedCiphers.restype = OSStatus
+
+ Security.SSLSetEnabledCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ c_size_t,
+ ]
+ Security.SSLSetEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetNumberEnabledCiphers.argtype = [SSLContextRef, POINTER(c_size_t)]
+ Security.SSLGetNumberEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetEnabledCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ POINTER(c_size_t),
+ ]
+ Security.SSLGetEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetNegotiatedCipher.argtypes = [SSLContextRef, POINTER(SSLCipherSuite)]
+ Security.SSLGetNegotiatedCipher.restype = OSStatus
+
+ Security.SSLGetNegotiatedProtocolVersion.argtypes = [
+ SSLContextRef,
+ POINTER(SSLProtocol),
+ ]
+ Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus
+
+ Security.SSLCopyPeerTrust.argtypes = [SSLContextRef, POINTER(SecTrustRef)]
+ Security.SSLCopyPeerTrust.restype = OSStatus
+
+ Security.SecTrustSetAnchorCertificates.argtypes = [SecTrustRef, CFArrayRef]
+ Security.SecTrustSetAnchorCertificates.restype = OSStatus
+
+ Security.SecTrustSetAnchorCertificatesOnly.argstypes = [SecTrustRef, Boolean]
+ Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus
+
+ Security.SecTrustEvaluate.argtypes = [SecTrustRef, POINTER(SecTrustResultType)]
+ Security.SecTrustEvaluate.restype = OSStatus
+
+ Security.SecTrustGetCertificateCount.argtypes = [SecTrustRef]
+ Security.SecTrustGetCertificateCount.restype = CFIndex
+
+ Security.SecTrustGetCertificateAtIndex.argtypes = [SecTrustRef, CFIndex]
+ Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef
+
+ Security.SSLCreateContext.argtypes = [
+ CFAllocatorRef,
+ SSLProtocolSide,
+ SSLConnectionType,
+ ]
+ Security.SSLCreateContext.restype = SSLContextRef
+
+ Security.SSLSetSessionOption.argtypes = [SSLContextRef, SSLSessionOption, Boolean]
+ Security.SSLSetSessionOption.restype = OSStatus
+
+ Security.SSLSetProtocolVersionMin.argtypes = [SSLContextRef, SSLProtocol]
+ Security.SSLSetProtocolVersionMin.restype = OSStatus
+
+ Security.SSLSetProtocolVersionMax.argtypes = [SSLContextRef, SSLProtocol]
+ Security.SSLSetProtocolVersionMax.restype = OSStatus
+
+ try:
+ Security.SSLSetALPNProtocols.argtypes = [SSLContextRef, CFArrayRef]
+ Security.SSLSetALPNProtocols.restype = OSStatus
+ except AttributeError:
+ # Supported only in 10.12+
+ pass
+
+ Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p]
+ Security.SecCopyErrorMessageString.restype = CFStringRef
+
+ Security.SSLReadFunc = SSLReadFunc
+ Security.SSLWriteFunc = SSLWriteFunc
+ Security.SSLContextRef = SSLContextRef
+ Security.SSLProtocol = SSLProtocol
+ Security.SSLCipherSuite = SSLCipherSuite
+ Security.SecIdentityRef = SecIdentityRef
+ Security.SecKeychainRef = SecKeychainRef
+ Security.SecTrustRef = SecTrustRef
+ Security.SecTrustResultType = SecTrustResultType
+ Security.SecExternalFormat = SecExternalFormat
+ Security.OSStatus = OSStatus
+
+ Security.kSecImportExportPassphrase = CFStringRef.in_dll(
+ Security, "kSecImportExportPassphrase"
+ )
+ Security.kSecImportItemIdentity = CFStringRef.in_dll(
+ Security, "kSecImportItemIdentity"
+ )
+
+ # CoreFoundation time!
+ CoreFoundation.CFRetain.argtypes = [CFTypeRef]
+ CoreFoundation.CFRetain.restype = CFTypeRef
+
+ CoreFoundation.CFRelease.argtypes = [CFTypeRef]
+ CoreFoundation.CFRelease.restype = None
+
+ CoreFoundation.CFGetTypeID.argtypes = [CFTypeRef]
+ CoreFoundation.CFGetTypeID.restype = CFTypeID
+
+ CoreFoundation.CFStringCreateWithCString.argtypes = [
+ CFAllocatorRef,
+ c_char_p,
+ CFStringEncoding,
+ ]
+ CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
+
+ CoreFoundation.CFStringGetCStringPtr.argtypes = [CFStringRef, CFStringEncoding]
+ CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
+
+ CoreFoundation.CFStringGetCString.argtypes = [
+ CFStringRef,
+ c_char_p,
+ CFIndex,
+ CFStringEncoding,
+ ]
+ CoreFoundation.CFStringGetCString.restype = c_bool
+
+ CoreFoundation.CFDataCreate.argtypes = [CFAllocatorRef, c_char_p, CFIndex]
+ CoreFoundation.CFDataCreate.restype = CFDataRef
+
+ CoreFoundation.CFDataGetLength.argtypes = [CFDataRef]
+ CoreFoundation.CFDataGetLength.restype = CFIndex
+
+ CoreFoundation.CFDataGetBytePtr.argtypes = [CFDataRef]
+ CoreFoundation.CFDataGetBytePtr.restype = c_void_p
+
+ CoreFoundation.CFDictionaryCreate.argtypes = [
+ CFAllocatorRef,
+ POINTER(CFTypeRef),
+ POINTER(CFTypeRef),
+ CFIndex,
+ CFDictionaryKeyCallBacks,
+ CFDictionaryValueCallBacks,
+ ]
+ CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
+
+ CoreFoundation.CFDictionaryGetValue.argtypes = [CFDictionaryRef, CFTypeRef]
+ CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef
+
+ CoreFoundation.CFArrayCreate.argtypes = [
+ CFAllocatorRef,
+ POINTER(CFTypeRef),
+ CFIndex,
+ CFArrayCallBacks,
+ ]
+ CoreFoundation.CFArrayCreate.restype = CFArrayRef
+
+ CoreFoundation.CFArrayCreateMutable.argtypes = [
+ CFAllocatorRef,
+ CFIndex,
+ CFArrayCallBacks,
+ ]
+ CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef
+
+ CoreFoundation.CFArrayAppendValue.argtypes = [CFMutableArrayRef, c_void_p]
+ CoreFoundation.CFArrayAppendValue.restype = None
+
+ CoreFoundation.CFArrayGetCount.argtypes = [CFArrayRef]
+ CoreFoundation.CFArrayGetCount.restype = CFIndex
+
+ CoreFoundation.CFArrayGetValueAtIndex.argtypes = [CFArrayRef, CFIndex]
+ CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p
+
+ CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll(
+ CoreFoundation, "kCFAllocatorDefault"
+ )
+ CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(
+ CoreFoundation, "kCFTypeArrayCallBacks"
+ )
+ CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(
+ CoreFoundation, "kCFTypeDictionaryKeyCallBacks"
+ )
+ CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(
+ CoreFoundation, "kCFTypeDictionaryValueCallBacks"
+ )
+
+ CoreFoundation.CFTypeRef = CFTypeRef
+ CoreFoundation.CFArrayRef = CFArrayRef
+ CoreFoundation.CFStringRef = CFStringRef
+ CoreFoundation.CFDictionaryRef = CFDictionaryRef
+
+except (AttributeError):
+ raise ImportError("Error initializing ctypes")
+
+
+class CFConst(object):
+ """
+ A class object that acts as essentially a namespace for CoreFoundation
+ constants.
+ """
+
+ kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
+
+
+class SecurityConst(object):
+ """
+ A class object that acts as essentially a namespace for Security constants.
+ """
+
+ kSSLSessionOptionBreakOnServerAuth = 0
+
+ kSSLProtocol2 = 1
+ kSSLProtocol3 = 2
+ kTLSProtocol1 = 4
+ kTLSProtocol11 = 7
+ kTLSProtocol12 = 8
+ # SecureTransport does not support TLS 1.3 even if there's a constant for it
+ kTLSProtocol13 = 10
+ kTLSProtocolMaxSupported = 999
+
+ kSSLClientSide = 1
+ kSSLStreamType = 0
+
+ kSecFormatPEMSequence = 10
+
+ kSecTrustResultInvalid = 0
+ kSecTrustResultProceed = 1
+ # This gap is present on purpose: this was kSecTrustResultConfirm, which
+ # is deprecated.
+ kSecTrustResultDeny = 3
+ kSecTrustResultUnspecified = 4
+ kSecTrustResultRecoverableTrustFailure = 5
+ kSecTrustResultFatalTrustFailure = 6
+ kSecTrustResultOtherError = 7
+
+ errSSLProtocol = -9800
+ errSSLWouldBlock = -9803
+ errSSLClosedGraceful = -9805
+ errSSLClosedNoNotify = -9816
+ errSSLClosedAbort = -9806
+
+ errSSLXCertChainInvalid = -9807
+ errSSLCrypto = -9809
+ errSSLInternal = -9810
+ errSSLCertExpired = -9814
+ errSSLCertNotYetValid = -9815
+ errSSLUnknownRootCert = -9812
+ errSSLNoRootCert = -9813
+ errSSLHostNameMismatch = -9843
+ errSSLPeerHandshakeFail = -9824
+ errSSLPeerUserCancelled = -9839
+ errSSLWeakPeerEphemeralDHKey = -9850
+ errSSLServerAuthCompleted = -9841
+ errSSLRecordOverflow = -9847
+
+ errSecVerifyFailed = -67808
+ errSecNoTrustSettings = -25263
+ errSecItemNotFound = -25300
+ errSecInvalidTrustSettings = -25262
+
+ # Cipher suites. We only pick the ones our default cipher string allows.
+ # Source: https://developer.apple.com/documentation/security/1550981-ssl_cipher_suite_values
+ TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
+ TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030
+ TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
+ TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F
+ TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA9
+ TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA8
+ TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F
+ TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014
+ TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B
+ TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
+ TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
+ TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013
+ TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067
+ TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
+ TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D
+ TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C
+ TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D
+ TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C
+ TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
+ TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
+ TLS_AES_128_GCM_SHA256 = 0x1301
+ TLS_AES_256_GCM_SHA384 = 0x1302
+ TLS_AES_128_CCM_8_SHA256 = 0x1305
+ TLS_AES_128_CCM_SHA256 = 0x1304
diff --git a/third_party/python/pip/pip/_vendor/urllib3/contrib/_securetransport/low_level.py b/third_party/python/pip/pip/_vendor/urllib3/contrib/_securetransport/low_level.py
new file mode 100644
index 0000000000..fa0b245d27
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/contrib/_securetransport/low_level.py
@@ -0,0 +1,397 @@
+"""
+Low-level helpers for the SecureTransport bindings.
+
+These are Python functions that are not directly related to the high-level APIs
+but are necessary to get them to work. They include a whole bunch of low-level
+CoreFoundation messing about and memory management. The concerns in this module
+are almost entirely about trying to avoid memory leaks and providing
+appropriate and useful assistance to the higher-level code.
+"""
+import base64
+import ctypes
+import itertools
+import os
+import re
+import ssl
+import struct
+import tempfile
+
+from .bindings import CFConst, CoreFoundation, Security
+
+# This regular expression is used to grab PEM data out of a PEM bundle.
+_PEM_CERTS_RE = re.compile(
+ b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
+)
+
+
+def _cf_data_from_bytes(bytestring):
+ """
+ Given a bytestring, create a CFData object from it. This CFData object must
+ be CFReleased by the caller.
+ """
+ return CoreFoundation.CFDataCreate(
+ CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
+ )
+
+
+def _cf_dictionary_from_tuples(tuples):
+ """
+ Given a list of Python tuples, create an associated CFDictionary.
+ """
+ dictionary_size = len(tuples)
+
+ # We need to get the dictionary keys and values out in the same order.
+ keys = (t[0] for t in tuples)
+ values = (t[1] for t in tuples)
+ cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
+ cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
+
+ return CoreFoundation.CFDictionaryCreate(
+ CoreFoundation.kCFAllocatorDefault,
+ cf_keys,
+ cf_values,
+ dictionary_size,
+ CoreFoundation.kCFTypeDictionaryKeyCallBacks,
+ CoreFoundation.kCFTypeDictionaryValueCallBacks,
+ )
+
+
+def _cfstr(py_bstr):
+ """
+ Given a Python binary data, create a CFString.
+ The string must be CFReleased by the caller.
+ """
+ c_str = ctypes.c_char_p(py_bstr)
+ cf_str = CoreFoundation.CFStringCreateWithCString(
+ CoreFoundation.kCFAllocatorDefault,
+ c_str,
+ CFConst.kCFStringEncodingUTF8,
+ )
+ return cf_str
+
+
+def _create_cfstring_array(lst):
+ """
+ Given a list of Python binary data, create an associated CFMutableArray.
+ The array must be CFReleased by the caller.
+
+ Raises an ssl.SSLError on failure.
+ """
+ cf_arr = None
+ try:
+ cf_arr = CoreFoundation.CFArrayCreateMutable(
+ CoreFoundation.kCFAllocatorDefault,
+ 0,
+ ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
+ )
+ if not cf_arr:
+ raise MemoryError("Unable to allocate memory!")
+ for item in lst:
+ cf_str = _cfstr(item)
+ if not cf_str:
+ raise MemoryError("Unable to allocate memory!")
+ try:
+ CoreFoundation.CFArrayAppendValue(cf_arr, cf_str)
+ finally:
+ CoreFoundation.CFRelease(cf_str)
+ except BaseException as e:
+ if cf_arr:
+ CoreFoundation.CFRelease(cf_arr)
+ raise ssl.SSLError("Unable to allocate array: %s" % (e,))
+ return cf_arr
+
+
+def _cf_string_to_unicode(value):
+ """
+ Creates a Unicode string from a CFString object. Used entirely for error
+ reporting.
+
+ Yes, it annoys me quite a lot that this function is this complex.
+ """
+ value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
+
+ string = CoreFoundation.CFStringGetCStringPtr(
+ value_as_void_p, CFConst.kCFStringEncodingUTF8
+ )
+ if string is None:
+ buffer = ctypes.create_string_buffer(1024)
+ result = CoreFoundation.CFStringGetCString(
+ value_as_void_p, buffer, 1024, CFConst.kCFStringEncodingUTF8
+ )
+ if not result:
+ raise OSError("Error copying C string from CFStringRef")
+ string = buffer.value
+ if string is not None:
+ string = string.decode("utf-8")
+ return string
+
+
+def _assert_no_error(error, exception_class=None):
+ """
+ Checks the return code and throws an exception if there is an error to
+ report
+ """
+ if error == 0:
+ return
+
+ cf_error_string = Security.SecCopyErrorMessageString(error, None)
+ output = _cf_string_to_unicode(cf_error_string)
+ CoreFoundation.CFRelease(cf_error_string)
+
+ if output is None or output == u"":
+ output = u"OSStatus %s" % error
+
+ if exception_class is None:
+ exception_class = ssl.SSLError
+
+ raise exception_class(output)
+
+
+def _cert_array_from_pem(pem_bundle):
+ """
+ Given a bundle of certs in PEM format, turns them into a CFArray of certs
+ that can be used to validate a cert chain.
+ """
+ # Normalize the PEM bundle's line endings.
+ pem_bundle = pem_bundle.replace(b"\r\n", b"\n")
+
+ der_certs = [
+ base64.b64decode(match.group(1)) for match in _PEM_CERTS_RE.finditer(pem_bundle)
+ ]
+ if not der_certs:
+ raise ssl.SSLError("No root certificates specified")
+
+ cert_array = CoreFoundation.CFArrayCreateMutable(
+ CoreFoundation.kCFAllocatorDefault,
+ 0,
+ ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
+ )
+ if not cert_array:
+ raise ssl.SSLError("Unable to allocate memory!")
+
+ try:
+ for der_bytes in der_certs:
+ certdata = _cf_data_from_bytes(der_bytes)
+ if not certdata:
+ raise ssl.SSLError("Unable to allocate memory!")
+ cert = Security.SecCertificateCreateWithData(
+ CoreFoundation.kCFAllocatorDefault, certdata
+ )
+ CoreFoundation.CFRelease(certdata)
+ if not cert:
+ raise ssl.SSLError("Unable to build cert object!")
+
+ CoreFoundation.CFArrayAppendValue(cert_array, cert)
+ CoreFoundation.CFRelease(cert)
+ except Exception:
+ # We need to free the array before the exception bubbles further.
+ # We only want to do that if an error occurs: otherwise, the caller
+ # should free.
+ CoreFoundation.CFRelease(cert_array)
+ raise
+
+ return cert_array
+
+
+def _is_cert(item):
+ """
+ Returns True if a given CFTypeRef is a certificate.
+ """
+ expected = Security.SecCertificateGetTypeID()
+ return CoreFoundation.CFGetTypeID(item) == expected
+
+
+def _is_identity(item):
+ """
+ Returns True if a given CFTypeRef is an identity.
+ """
+ expected = Security.SecIdentityGetTypeID()
+ return CoreFoundation.CFGetTypeID(item) == expected
+
+
+def _temporary_keychain():
+ """
+ This function creates a temporary Mac keychain that we can use to work with
+ credentials. This keychain uses a one-time password and a temporary file to
+ store the data. We expect to have one keychain per socket. The returned
+ SecKeychainRef must be freed by the caller, including calling
+ SecKeychainDelete.
+
+ Returns a tuple of the SecKeychainRef and the path to the temporary
+ directory that contains it.
+ """
+ # Unfortunately, SecKeychainCreate requires a path to a keychain. This
+ # means we cannot use mkstemp to use a generic temporary file. Instead,
+ # we're going to create a temporary directory and a filename to use there.
+ # This filename will be 8 random bytes expanded into base64. We also need
+ # some random bytes to password-protect the keychain we're creating, so we
+ # ask for 40 random bytes.
+ random_bytes = os.urandom(40)
+ filename = base64.b16encode(random_bytes[:8]).decode("utf-8")
+ password = base64.b16encode(random_bytes[8:]) # Must be valid UTF-8
+ tempdirectory = tempfile.mkdtemp()
+
+ keychain_path = os.path.join(tempdirectory, filename).encode("utf-8")
+
+ # We now want to create the keychain itself.
+ keychain = Security.SecKeychainRef()
+ status = Security.SecKeychainCreate(
+ keychain_path, len(password), password, False, None, ctypes.byref(keychain)
+ )
+ _assert_no_error(status)
+
+ # Having created the keychain, we want to pass it off to the caller.
+ return keychain, tempdirectory
+
+
+def _load_items_from_file(keychain, path):
+ """
+ Given a single file, loads all the trust objects from it into arrays and
+ the keychain.
+ Returns a tuple of lists: the first list is a list of identities, the
+ second a list of certs.
+ """
+ certificates = []
+ identities = []
+ result_array = None
+
+ with open(path, "rb") as f:
+ raw_filedata = f.read()
+
+ try:
+ filedata = CoreFoundation.CFDataCreate(
+ CoreFoundation.kCFAllocatorDefault, raw_filedata, len(raw_filedata)
+ )
+ result_array = CoreFoundation.CFArrayRef()
+ result = Security.SecItemImport(
+ filedata, # cert data
+ None, # Filename, leaving it out for now
+ None, # What the type of the file is, we don't care
+ None, # what's in the file, we don't care
+ 0, # import flags
+ None, # key params, can include passphrase in the future
+ keychain, # The keychain to insert into
+ ctypes.byref(result_array), # Results
+ )
+ _assert_no_error(result)
+
+ # A CFArray is not very useful to us as an intermediary
+ # representation, so we are going to extract the objects we want
+ # and then free the array. We don't need to keep hold of keys: the
+ # keychain already has them!
+ result_count = CoreFoundation.CFArrayGetCount(result_array)
+ for index in range(result_count):
+ item = CoreFoundation.CFArrayGetValueAtIndex(result_array, index)
+ item = ctypes.cast(item, CoreFoundation.CFTypeRef)
+
+ if _is_cert(item):
+ CoreFoundation.CFRetain(item)
+ certificates.append(item)
+ elif _is_identity(item):
+ CoreFoundation.CFRetain(item)
+ identities.append(item)
+ finally:
+ if result_array:
+ CoreFoundation.CFRelease(result_array)
+
+ CoreFoundation.CFRelease(filedata)
+
+ return (identities, certificates)
+
+
+def _load_client_cert_chain(keychain, *paths):
+ """
+ Load certificates and maybe keys from a number of files. Has the end goal
+ of returning a CFArray containing one SecIdentityRef, and then zero or more
+ SecCertificateRef objects, suitable for use as a client certificate trust
+ chain.
+ """
+ # Ok, the strategy.
+ #
+ # This relies on knowing that macOS will not give you a SecIdentityRef
+ # unless you have imported a key into a keychain. This is a somewhat
+ # artificial limitation of macOS (for example, it doesn't necessarily
+ # affect iOS), but there is nothing inside Security.framework that lets you
+ # get a SecIdentityRef without having a key in a keychain.
+ #
+ # So the policy here is we take all the files and iterate them in order.
+ # Each one will use SecItemImport to have one or more objects loaded from
+ # it. We will also point at a keychain that macOS can use to work with the
+ # private key.
+ #
+ # Once we have all the objects, we'll check what we actually have. If we
+ # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
+ # we'll take the first certificate (which we assume to be our leaf) and
+ # ask the keychain to give us a SecIdentityRef with that cert's associated
+ # key.
+ #
+ # We'll then return a CFArray containing the trust chain: one
+ # SecIdentityRef and then zero-or-more SecCertificateRef objects. The
+ # responsibility for freeing this CFArray will be with the caller. This
+ # CFArray must remain alive for the entire connection, so in practice it
+ # will be stored with a single SSLSocket, along with the reference to the
+ # keychain.
+ certificates = []
+ identities = []
+
+ # Filter out bad paths.
+ paths = (path for path in paths if path)
+
+ try:
+ for file_path in paths:
+ new_identities, new_certs = _load_items_from_file(keychain, file_path)
+ identities.extend(new_identities)
+ certificates.extend(new_certs)
+
+ # Ok, we have everything. The question is: do we have an identity? If
+ # not, we want to grab one from the first cert we have.
+ if not identities:
+ new_identity = Security.SecIdentityRef()
+ status = Security.SecIdentityCreateWithCertificate(
+ keychain, certificates[0], ctypes.byref(new_identity)
+ )
+ _assert_no_error(status)
+ identities.append(new_identity)
+
+ # We now want to release the original certificate, as we no longer
+ # need it.
+ CoreFoundation.CFRelease(certificates.pop(0))
+
+ # We now need to build a new CFArray that holds the trust chain.
+ trust_chain = CoreFoundation.CFArrayCreateMutable(
+ CoreFoundation.kCFAllocatorDefault,
+ 0,
+ ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
+ )
+ for item in itertools.chain(identities, certificates):
+ # ArrayAppendValue does a CFRetain on the item. That's fine,
+ # because the finally block will release our other refs to them.
+ CoreFoundation.CFArrayAppendValue(trust_chain, item)
+
+ return trust_chain
+ finally:
+ for obj in itertools.chain(identities, certificates):
+ CoreFoundation.CFRelease(obj)
+
+
+TLS_PROTOCOL_VERSIONS = {
+ "SSLv2": (0, 2),
+ "SSLv3": (3, 0),
+ "TLSv1": (3, 1),
+ "TLSv1.1": (3, 2),
+ "TLSv1.2": (3, 3),
+}
+
+
+def _build_tls_unknown_ca_alert(version):
+ """
+ Builds a TLS alert record for an unknown CA.
+ """
+ ver_maj, ver_min = TLS_PROTOCOL_VERSIONS[version]
+ severity_fatal = 0x02
+ description_unknown_ca = 0x30
+ msg = struct.pack(">BB", severity_fatal, description_unknown_ca)
+ msg_len = len(msg)
+ record_type_alert = 0x15
+ record = struct.pack(">BBBH", record_type_alert, ver_maj, ver_min, msg_len) + msg
+ return record
diff --git a/third_party/python/pip/pip/_vendor/urllib3/contrib/appengine.py b/third_party/python/pip/pip/_vendor/urllib3/contrib/appengine.py
new file mode 100644
index 0000000000..1717ee22cd
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/contrib/appengine.py
@@ -0,0 +1,314 @@
+"""
+This module provides a pool manager that uses Google App Engine's
+`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
+
+Example usage::
+
+ from pip._vendor.urllib3 import PoolManager
+ from pip._vendor.urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
+
+ if is_appengine_sandbox():
+ # AppEngineManager uses AppEngine's URLFetch API behind the scenes
+ http = AppEngineManager()
+ else:
+ # PoolManager uses a socket-level API behind the scenes
+ http = PoolManager()
+
+ r = http.request('GET', 'https://google.com/')
+
+There are `limitations <https://cloud.google.com/appengine/docs/python/\
+urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
+the best choice for your application. There are three options for using
+urllib3 on Google App Engine:
+
+1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
+ cost-effective in many circumstances as long as your usage is within the
+ limitations.
+2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
+ Sockets also have `limitations and restrictions
+ <https://cloud.google.com/appengine/docs/python/sockets/\
+ #limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
+ To use sockets, be sure to specify the following in your ``app.yaml``::
+
+ env_variables:
+ GAE_USE_SOCKETS_HTTPLIB : 'true'
+
+3. If you are using `App Engine Flexible
+<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
+:class:`PoolManager` without any configuration or special environment variables.
+"""
+
+from __future__ import absolute_import
+
+import io
+import logging
+import warnings
+
+from ..exceptions import (
+ HTTPError,
+ HTTPWarning,
+ MaxRetryError,
+ ProtocolError,
+ SSLError,
+ TimeoutError,
+)
+from ..packages.six.moves.urllib.parse import urljoin
+from ..request import RequestMethods
+from ..response import HTTPResponse
+from ..util.retry import Retry
+from ..util.timeout import Timeout
+from . import _appengine_environ
+
+try:
+ from google.appengine.api import urlfetch
+except ImportError:
+ urlfetch = None
+
+
+log = logging.getLogger(__name__)
+
+
+class AppEnginePlatformWarning(HTTPWarning):
+ pass
+
+
+class AppEnginePlatformError(HTTPError):
+ pass
+
+
+class AppEngineManager(RequestMethods):
+ """
+ Connection manager for Google App Engine sandbox applications.
+
+ This manager uses the URLFetch service directly instead of using the
+ emulated httplib, and is subject to URLFetch limitations as described in
+ the App Engine documentation `here
+ <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
+
+ Notably it will raise an :class:`AppEnginePlatformError` if:
+ * URLFetch is not available.
+ * If you attempt to use this on App Engine Flexible, as full socket
+ support is available.
+ * If a request size is more than 10 megabytes.
+ * If a response size is more than 32 megabytes.
+ * If you use an unsupported request method such as OPTIONS.
+
+ Beyond those cases, it will raise normal urllib3 errors.
+ """
+
+ def __init__(
+ self,
+ headers=None,
+ retries=None,
+ validate_certificate=True,
+ urlfetch_retries=True,
+ ):
+ if not urlfetch:
+ raise AppEnginePlatformError(
+ "URLFetch is not available in this environment."
+ )
+
+ warnings.warn(
+ "urllib3 is using URLFetch on Google App Engine sandbox instead "
+ "of sockets. To use sockets directly instead of URLFetch see "
+ "https://urllib3.readthedocs.io/en/1.26.x/reference/urllib3.contrib.html.",
+ AppEnginePlatformWarning,
+ )
+
+ RequestMethods.__init__(self, headers)
+ self.validate_certificate = validate_certificate
+ self.urlfetch_retries = urlfetch_retries
+
+ self.retries = retries or Retry.DEFAULT
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def urlopen(
+ self,
+ method,
+ url,
+ body=None,
+ headers=None,
+ retries=None,
+ redirect=True,
+ timeout=Timeout.DEFAULT_TIMEOUT,
+ **response_kw
+ ):
+
+ retries = self._get_retries(retries, redirect)
+
+ try:
+ follow_redirects = redirect and retries.redirect != 0 and retries.total
+ response = urlfetch.fetch(
+ url,
+ payload=body,
+ method=method,
+ headers=headers or {},
+ allow_truncated=False,
+ follow_redirects=self.urlfetch_retries and follow_redirects,
+ deadline=self._get_absolute_timeout(timeout),
+ validate_certificate=self.validate_certificate,
+ )
+ except urlfetch.DeadlineExceededError as e:
+ raise TimeoutError(self, e)
+
+ except urlfetch.InvalidURLError as e:
+ if "too large" in str(e):
+ raise AppEnginePlatformError(
+ "URLFetch request too large, URLFetch only "
+ "supports requests up to 10mb in size.",
+ e,
+ )
+ raise ProtocolError(e)
+
+ except urlfetch.DownloadError as e:
+ if "Too many redirects" in str(e):
+ raise MaxRetryError(self, url, reason=e)
+ raise ProtocolError(e)
+
+ except urlfetch.ResponseTooLargeError as e:
+ raise AppEnginePlatformError(
+ "URLFetch response too large, URLFetch only supports"
+ "responses up to 32mb in size.",
+ e,
+ )
+
+ except urlfetch.SSLCertificateError as e:
+ raise SSLError(e)
+
+ except urlfetch.InvalidMethodError as e:
+ raise AppEnginePlatformError(
+ "URLFetch does not support method: %s" % method, e
+ )
+
+ http_response = self._urlfetch_response_to_http_response(
+ response, retries=retries, **response_kw
+ )
+
+ # Handle redirect?
+ redirect_location = redirect and http_response.get_redirect_location()
+ if redirect_location:
+ # Check for redirect response
+ if self.urlfetch_retries and retries.raise_on_redirect:
+ raise MaxRetryError(self, url, "too many redirects")
+ else:
+ if http_response.status == 303:
+ method = "GET"
+
+ try:
+ retries = retries.increment(
+ method, url, response=http_response, _pool=self
+ )
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ raise MaxRetryError(self, url, "too many redirects")
+ return http_response
+
+ retries.sleep_for_retry(http_response)
+ log.debug("Redirecting %s -> %s", url, redirect_location)
+ redirect_url = urljoin(url, redirect_location)
+ return self.urlopen(
+ method,
+ redirect_url,
+ body,
+ headers,
+ retries=retries,
+ redirect=redirect,
+ timeout=timeout,
+ **response_kw
+ )
+
+ # Check if we should retry the HTTP response.
+ has_retry_after = bool(http_response.headers.get("Retry-After"))
+ if retries.is_retry(method, http_response.status, has_retry_after):
+ retries = retries.increment(method, url, response=http_response, _pool=self)
+ log.debug("Retry: %s", url)
+ retries.sleep(http_response)
+ return self.urlopen(
+ method,
+ url,
+ body=body,
+ headers=headers,
+ retries=retries,
+ redirect=redirect,
+ timeout=timeout,
+ **response_kw
+ )
+
+ return http_response
+
+ def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
+
+ if is_prod_appengine():
+ # Production GAE handles deflate encoding automatically, but does
+ # not remove the encoding header.
+ content_encoding = urlfetch_resp.headers.get("content-encoding")
+
+ if content_encoding == "deflate":
+ del urlfetch_resp.headers["content-encoding"]
+
+ transfer_encoding = urlfetch_resp.headers.get("transfer-encoding")
+ # We have a full response's content,
+ # so let's make sure we don't report ourselves as chunked data.
+ if transfer_encoding == "chunked":
+ encodings = transfer_encoding.split(",")
+ encodings.remove("chunked")
+ urlfetch_resp.headers["transfer-encoding"] = ",".join(encodings)
+
+ original_response = HTTPResponse(
+ # In order for decoding to work, we must present the content as
+ # a file-like object.
+ body=io.BytesIO(urlfetch_resp.content),
+ msg=urlfetch_resp.header_msg,
+ headers=urlfetch_resp.headers,
+ status=urlfetch_resp.status_code,
+ **response_kw
+ )
+
+ return HTTPResponse(
+ body=io.BytesIO(urlfetch_resp.content),
+ headers=urlfetch_resp.headers,
+ status=urlfetch_resp.status_code,
+ original_response=original_response,
+ **response_kw
+ )
+
+ def _get_absolute_timeout(self, timeout):
+ if timeout is Timeout.DEFAULT_TIMEOUT:
+ return None # Defer to URLFetch's default.
+ if isinstance(timeout, Timeout):
+ if timeout._read is not None or timeout._connect is not None:
+ warnings.warn(
+ "URLFetch does not support granular timeout settings, "
+ "reverting to total or default URLFetch timeout.",
+ AppEnginePlatformWarning,
+ )
+ return timeout.total
+ return timeout
+
+ def _get_retries(self, retries, redirect):
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
+
+ if retries.connect or retries.read or retries.redirect:
+ warnings.warn(
+ "URLFetch only supports total retries and does not "
+ "recognize connect, read, or redirect retry parameters.",
+ AppEnginePlatformWarning,
+ )
+
+ return retries
+
+
+# Alias methods from _appengine_environ to maintain public API interface.
+
+is_appengine = _appengine_environ.is_appengine
+is_appengine_sandbox = _appengine_environ.is_appengine_sandbox
+is_local_appengine = _appengine_environ.is_local_appengine
+is_prod_appengine = _appengine_environ.is_prod_appengine
+is_prod_appengine_mvms = _appengine_environ.is_prod_appengine_mvms
diff --git a/third_party/python/pip/pip/_vendor/urllib3/contrib/ntlmpool.py b/third_party/python/pip/pip/_vendor/urllib3/contrib/ntlmpool.py
new file mode 100644
index 0000000000..471665754e
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/contrib/ntlmpool.py
@@ -0,0 +1,130 @@
+"""
+NTLM authenticating pool, contributed by erikcederstran
+
+Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
+"""
+from __future__ import absolute_import
+
+import warnings
+from logging import getLogger
+
+from ntlm import ntlm
+
+from .. import HTTPSConnectionPool
+from ..packages.six.moves.http_client import HTTPSConnection
+
+warnings.warn(
+ "The 'urllib3.contrib.ntlmpool' module is deprecated and will be removed "
+ "in urllib3 v2.0 release, urllib3 is not able to support it properly due "
+ "to reasons listed in issue: https://github.com/urllib3/urllib3/issues/2282. "
+ "If you are a user of this module please comment in the mentioned issue.",
+ DeprecationWarning,
+)
+
+log = getLogger(__name__)
+
+
+class NTLMConnectionPool(HTTPSConnectionPool):
+ """
+ Implements an NTLM authentication version of an urllib3 connection pool
+ """
+
+ scheme = "https"
+
+ def __init__(self, user, pw, authurl, *args, **kwargs):
+ """
+ authurl is a random URL on the server that is protected by NTLM.
+ user is the Windows user, probably in the DOMAIN\\username format.
+ pw is the password for the user.
+ """
+ super(NTLMConnectionPool, self).__init__(*args, **kwargs)
+ self.authurl = authurl
+ self.rawuser = user
+ user_parts = user.split("\\", 1)
+ self.domain = user_parts[0].upper()
+ self.user = user_parts[1]
+ self.pw = pw
+
+ def _new_conn(self):
+ # Performs the NTLM handshake that secures the connection. The socket
+ # must be kept open while requests are performed.
+ self.num_connections += 1
+ log.debug(
+ "Starting NTLM HTTPS connection no. %d: https://%s%s",
+ self.num_connections,
+ self.host,
+ self.authurl,
+ )
+
+ headers = {"Connection": "Keep-Alive"}
+ req_header = "Authorization"
+ resp_header = "www-authenticate"
+
+ conn = HTTPSConnection(host=self.host, port=self.port)
+
+ # Send negotiation message
+ headers[req_header] = "NTLM %s" % ntlm.create_NTLM_NEGOTIATE_MESSAGE(
+ self.rawuser
+ )
+ log.debug("Request headers: %s", headers)
+ conn.request("GET", self.authurl, None, headers)
+ res = conn.getresponse()
+ reshdr = dict(res.headers)
+ log.debug("Response status: %s %s", res.status, res.reason)
+ log.debug("Response headers: %s", reshdr)
+ log.debug("Response data: %s [...]", res.read(100))
+
+ # Remove the reference to the socket, so that it can not be closed by
+ # the response object (we want to keep the socket open)
+ res.fp = None
+
+ # Server should respond with a challenge message
+ auth_header_values = reshdr[resp_header].split(", ")
+ auth_header_value = None
+ for s in auth_header_values:
+ if s[:5] == "NTLM ":
+ auth_header_value = s[5:]
+ if auth_header_value is None:
+ raise Exception(
+ "Unexpected %s response header: %s" % (resp_header, reshdr[resp_header])
+ )
+
+ # Send authentication message
+ ServerChallenge, NegotiateFlags = ntlm.parse_NTLM_CHALLENGE_MESSAGE(
+ auth_header_value
+ )
+ auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(
+ ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags
+ )
+ headers[req_header] = "NTLM %s" % auth_msg
+ log.debug("Request headers: %s", headers)
+ conn.request("GET", self.authurl, None, headers)
+ res = conn.getresponse()
+ log.debug("Response status: %s %s", res.status, res.reason)
+ log.debug("Response headers: %s", dict(res.headers))
+ log.debug("Response data: %s [...]", res.read()[:100])
+ if res.status != 200:
+ if res.status == 401:
+ raise Exception("Server rejected request: wrong username or password")
+ raise Exception("Wrong server response: %s %s" % (res.status, res.reason))
+
+ res.fp = None
+ log.debug("Connection established")
+ return conn
+
+ def urlopen(
+ self,
+ method,
+ url,
+ body=None,
+ headers=None,
+ retries=3,
+ redirect=True,
+ assert_same_host=True,
+ ):
+ if headers is None:
+ headers = {}
+ headers["Connection"] = "Keep-Alive"
+ return super(NTLMConnectionPool, self).urlopen(
+ method, url, body, headers, retries, redirect, assert_same_host
+ )
diff --git a/third_party/python/pip/pip/_vendor/urllib3/contrib/pyopenssl.py b/third_party/python/pip/pip/_vendor/urllib3/contrib/pyopenssl.py
new file mode 100644
index 0000000000..19e4aa97cc
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/contrib/pyopenssl.py
@@ -0,0 +1,518 @@
+"""
+TLS with SNI_-support for Python 2. Follow these instructions if you would
+like to verify TLS certificates in Python 2. Note, the default libraries do
+*not* do certificate checking; you need to do additional work to validate
+certificates yourself.
+
+This needs the following packages installed:
+
+* `pyOpenSSL`_ (tested with 16.0.0)
+* `cryptography`_ (minimum 1.3.4, from pyopenssl)
+* `idna`_ (minimum 2.0, from cryptography)
+
+However, pyopenssl depends on cryptography, which depends on idna, so while we
+use all three directly here we end up having relatively few packages required.
+
+You can install them with the following command:
+
+.. code-block:: bash
+
+ $ python -m pip install pyopenssl cryptography idna
+
+To activate certificate checking, call
+:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
+before you begin making HTTP requests. This can be done in a ``sitecustomize``
+module, or at any other time before your application begins using ``urllib3``,
+like this:
+
+.. code-block:: python
+
+ try:
+ import pip._vendor.urllib3.contrib.pyopenssl as pyopenssl
+ pyopenssl.inject_into_urllib3()
+ except ImportError:
+ pass
+
+Now you can use :mod:`urllib3` as you normally would, and it will support SNI
+when the required modules are installed.
+
+Activating this module also has the positive side effect of disabling SSL/TLS
+compression in Python 2 (see `CRIME attack`_).
+
+.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
+.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
+.. _pyopenssl: https://www.pyopenssl.org
+.. _cryptography: https://cryptography.io
+.. _idna: https://github.com/kjd/idna
+"""
+from __future__ import absolute_import
+
+import OpenSSL.crypto
+import OpenSSL.SSL
+from cryptography import x509
+from cryptography.hazmat.backends.openssl import backend as openssl_backend
+
+try:
+ from cryptography.x509 import UnsupportedExtension
+except ImportError:
+ # UnsupportedExtension is gone in cryptography >= 2.1.0
+ class UnsupportedExtension(Exception):
+ pass
+
+
+from io import BytesIO
+from socket import error as SocketError
+from socket import timeout
+
+try: # Platform-specific: Python 2
+ from socket import _fileobject
+except ImportError: # Platform-specific: Python 3
+ _fileobject = None
+ from ..packages.backports.makefile import backport_makefile
+
+import logging
+import ssl
+import sys
+import warnings
+
+from .. import util
+from ..packages import six
+from ..util.ssl_ import PROTOCOL_TLS_CLIENT
+
+warnings.warn(
+ "'urllib3.contrib.pyopenssl' module is deprecated and will be removed "
+ "in a future release of urllib3 2.x. Read more in this issue: "
+ "https://github.com/urllib3/urllib3/issues/2680",
+ category=DeprecationWarning,
+ stacklevel=2,
+)
+
+__all__ = ["inject_into_urllib3", "extract_from_urllib3"]
+
+# SNI always works.
+HAS_SNI = True
+
+# Map from urllib3 to PyOpenSSL compatible parameter-values.
+_openssl_versions = {
+ util.PROTOCOL_TLS: OpenSSL.SSL.SSLv23_METHOD,
+ PROTOCOL_TLS_CLIENT: OpenSSL.SSL.SSLv23_METHOD,
+ ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
+}
+
+if hasattr(ssl, "PROTOCOL_SSLv3") and hasattr(OpenSSL.SSL, "SSLv3_METHOD"):
+ _openssl_versions[ssl.PROTOCOL_SSLv3] = OpenSSL.SSL.SSLv3_METHOD
+
+if hasattr(ssl, "PROTOCOL_TLSv1_1") and hasattr(OpenSSL.SSL, "TLSv1_1_METHOD"):
+ _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
+
+if hasattr(ssl, "PROTOCOL_TLSv1_2") and hasattr(OpenSSL.SSL, "TLSv1_2_METHOD"):
+ _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
+
+
+_stdlib_to_openssl_verify = {
+ ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
+ ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
+ ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
+ + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
+}
+_openssl_to_stdlib_verify = dict((v, k) for k, v in _stdlib_to_openssl_verify.items())
+
+# OpenSSL will only write 16K at a time
+SSL_WRITE_BLOCKSIZE = 16384
+
+orig_util_HAS_SNI = util.HAS_SNI
+orig_util_SSLContext = util.ssl_.SSLContext
+
+
+log = logging.getLogger(__name__)
+
+
+def inject_into_urllib3():
+ "Monkey-patch urllib3 with PyOpenSSL-backed SSL-support."
+
+ _validate_dependencies_met()
+
+ util.SSLContext = PyOpenSSLContext
+ util.ssl_.SSLContext = PyOpenSSLContext
+ util.HAS_SNI = HAS_SNI
+ util.ssl_.HAS_SNI = HAS_SNI
+ util.IS_PYOPENSSL = True
+ util.ssl_.IS_PYOPENSSL = True
+
+
+def extract_from_urllib3():
+ "Undo monkey-patching by :func:`inject_into_urllib3`."
+
+ util.SSLContext = orig_util_SSLContext
+ util.ssl_.SSLContext = orig_util_SSLContext
+ util.HAS_SNI = orig_util_HAS_SNI
+ util.ssl_.HAS_SNI = orig_util_HAS_SNI
+ util.IS_PYOPENSSL = False
+ util.ssl_.IS_PYOPENSSL = False
+
+
+def _validate_dependencies_met():
+ """
+ Verifies that PyOpenSSL's package-level dependencies have been met.
+ Throws `ImportError` if they are not met.
+ """
+ # Method added in `cryptography==1.1`; not available in older versions
+ from cryptography.x509.extensions import Extensions
+
+ if getattr(Extensions, "get_extension_for_class", None) is None:
+ raise ImportError(
+ "'cryptography' module missing required functionality. "
+ "Try upgrading to v1.3.4 or newer."
+ )
+
+ # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
+ # attribute is only present on those versions.
+ from OpenSSL.crypto import X509
+
+ x509 = X509()
+ if getattr(x509, "_x509", None) is None:
+ raise ImportError(
+ "'pyOpenSSL' module missing required functionality. "
+ "Try upgrading to v0.14 or newer."
+ )
+
+
+def _dnsname_to_stdlib(name):
+ """
+ Converts a dNSName SubjectAlternativeName field to the form used by the
+ standard library on the given Python version.
+
+ Cryptography produces a dNSName as a unicode string that was idna-decoded
+ from ASCII bytes. We need to idna-encode that string to get it back, and
+ then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
+ uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
+
+ If the name cannot be idna-encoded then we return None signalling that
+ the name given should be skipped.
+ """
+
+ def idna_encode(name):
+ """
+ Borrowed wholesale from the Python Cryptography Project. It turns out
+ that we can't just safely call `idna.encode`: it can explode for
+ wildcard names. This avoids that problem.
+ """
+ from pip._vendor import idna
+
+ try:
+ for prefix in [u"*.", u"."]:
+ if name.startswith(prefix):
+ name = name[len(prefix) :]
+ return prefix.encode("ascii") + idna.encode(name)
+ return idna.encode(name)
+ except idna.core.IDNAError:
+ return None
+
+ # Don't send IPv6 addresses through the IDNA encoder.
+ if ":" in name:
+ return name
+
+ name = idna_encode(name)
+ if name is None:
+ return None
+ elif sys.version_info >= (3, 0):
+ name = name.decode("utf-8")
+ return name
+
+
+def get_subj_alt_name(peer_cert):
+ """
+ Given an PyOpenSSL certificate, provides all the subject alternative names.
+ """
+ # Pass the cert to cryptography, which has much better APIs for this.
+ if hasattr(peer_cert, "to_cryptography"):
+ cert = peer_cert.to_cryptography()
+ else:
+ der = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, peer_cert)
+ cert = x509.load_der_x509_certificate(der, openssl_backend)
+
+ # We want to find the SAN extension. Ask Cryptography to locate it (it's
+ # faster than looping in Python)
+ try:
+ ext = cert.extensions.get_extension_for_class(x509.SubjectAlternativeName).value
+ except x509.ExtensionNotFound:
+ # No such extension, return the empty list.
+ return []
+ except (
+ x509.DuplicateExtension,
+ UnsupportedExtension,
+ x509.UnsupportedGeneralNameType,
+ UnicodeError,
+ ) as e:
+ # A problem has been found with the quality of the certificate. Assume
+ # no SAN field is present.
+ log.warning(
+ "A problem was encountered with the certificate that prevented "
+ "urllib3 from finding the SubjectAlternativeName field. This can "
+ "affect certificate validation. The error was %s",
+ e,
+ )
+ return []
+
+ # We want to return dNSName and iPAddress fields. We need to cast the IPs
+ # back to strings because the match_hostname function wants them as
+ # strings.
+ # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
+ # decoded. This is pretty frustrating, but that's what the standard library
+ # does with certificates, and so we need to attempt to do the same.
+ # We also want to skip over names which cannot be idna encoded.
+ names = [
+ ("DNS", name)
+ for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName))
+ if name is not None
+ ]
+ names.extend(
+ ("IP Address", str(name)) for name in ext.get_values_for_type(x509.IPAddress)
+ )
+
+ return names
+
+
+class WrappedSocket(object):
+ """API-compatibility wrapper for Python OpenSSL's Connection-class.
+
+ Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
+ collector of pypy.
+ """
+
+ def __init__(self, connection, socket, suppress_ragged_eofs=True):
+ self.connection = connection
+ self.socket = socket
+ self.suppress_ragged_eofs = suppress_ragged_eofs
+ self._makefile_refs = 0
+ self._closed = False
+
+ def fileno(self):
+ return self.socket.fileno()
+
+ # Copy-pasted from Python 3.5 source code
+ def _decref_socketios(self):
+ if self._makefile_refs > 0:
+ self._makefile_refs -= 1
+ if self._closed:
+ self.close()
+
+ def recv(self, *args, **kwargs):
+ try:
+ data = self.connection.recv(*args, **kwargs)
+ except OpenSSL.SSL.SysCallError as e:
+ if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"):
+ return b""
+ else:
+ raise SocketError(str(e))
+ except OpenSSL.SSL.ZeroReturnError:
+ if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
+ return b""
+ else:
+ raise
+ except OpenSSL.SSL.WantReadError:
+ if not util.wait_for_read(self.socket, self.socket.gettimeout()):
+ raise timeout("The read operation timed out")
+ else:
+ return self.recv(*args, **kwargs)
+
+ # TLS 1.3 post-handshake authentication
+ except OpenSSL.SSL.Error as e:
+ raise ssl.SSLError("read error: %r" % e)
+ else:
+ return data
+
+ def recv_into(self, *args, **kwargs):
+ try:
+ return self.connection.recv_into(*args, **kwargs)
+ except OpenSSL.SSL.SysCallError as e:
+ if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"):
+ return 0
+ else:
+ raise SocketError(str(e))
+ except OpenSSL.SSL.ZeroReturnError:
+ if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
+ return 0
+ else:
+ raise
+ except OpenSSL.SSL.WantReadError:
+ if not util.wait_for_read(self.socket, self.socket.gettimeout()):
+ raise timeout("The read operation timed out")
+ else:
+ return self.recv_into(*args, **kwargs)
+
+ # TLS 1.3 post-handshake authentication
+ except OpenSSL.SSL.Error as e:
+ raise ssl.SSLError("read error: %r" % e)
+
+ def settimeout(self, timeout):
+ return self.socket.settimeout(timeout)
+
+ def _send_until_done(self, data):
+ while True:
+ try:
+ return self.connection.send(data)
+ except OpenSSL.SSL.WantWriteError:
+ if not util.wait_for_write(self.socket, self.socket.gettimeout()):
+ raise timeout()
+ continue
+ except OpenSSL.SSL.SysCallError as e:
+ raise SocketError(str(e))
+
+ def sendall(self, data):
+ total_sent = 0
+ while total_sent < len(data):
+ sent = self._send_until_done(
+ data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE]
+ )
+ total_sent += sent
+
+ def shutdown(self):
+ # FIXME rethrow compatible exceptions should we ever use this
+ self.connection.shutdown()
+
+ def close(self):
+ if self._makefile_refs < 1:
+ try:
+ self._closed = True
+ return self.connection.close()
+ except OpenSSL.SSL.Error:
+ return
+ else:
+ self._makefile_refs -= 1
+
+ def getpeercert(self, binary_form=False):
+ x509 = self.connection.get_peer_certificate()
+
+ if not x509:
+ return x509
+
+ if binary_form:
+ return OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, x509)
+
+ return {
+ "subject": ((("commonName", x509.get_subject().CN),),),
+ "subjectAltName": get_subj_alt_name(x509),
+ }
+
+ def version(self):
+ return self.connection.get_protocol_version_name()
+
+ def _reuse(self):
+ self._makefile_refs += 1
+
+ def _drop(self):
+ if self._makefile_refs < 1:
+ self.close()
+ else:
+ self._makefile_refs -= 1
+
+
+if _fileobject: # Platform-specific: Python 2
+
+ def makefile(self, mode, bufsize=-1):
+ self._makefile_refs += 1
+ return _fileobject(self, mode, bufsize, close=True)
+
+else: # Platform-specific: Python 3
+ makefile = backport_makefile
+
+WrappedSocket.makefile = makefile
+
+
+class PyOpenSSLContext(object):
+ """
+ I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
+ for translating the interface of the standard library ``SSLContext`` object
+ to calls into PyOpenSSL.
+ """
+
+ def __init__(self, protocol):
+ self.protocol = _openssl_versions[protocol]
+ self._ctx = OpenSSL.SSL.Context(self.protocol)
+ self._options = 0
+ self.check_hostname = False
+
+ @property
+ def options(self):
+ return self._options
+
+ @options.setter
+ def options(self, value):
+ self._options = value
+ self._ctx.set_options(value)
+
+ @property
+ def verify_mode(self):
+ return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
+
+ @verify_mode.setter
+ def verify_mode(self, value):
+ self._ctx.set_verify(_stdlib_to_openssl_verify[value], _verify_callback)
+
+ def set_default_verify_paths(self):
+ self._ctx.set_default_verify_paths()
+
+ def set_ciphers(self, ciphers):
+ if isinstance(ciphers, six.text_type):
+ ciphers = ciphers.encode("utf-8")
+ self._ctx.set_cipher_list(ciphers)
+
+ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+ if cafile is not None:
+ cafile = cafile.encode("utf-8")
+ if capath is not None:
+ capath = capath.encode("utf-8")
+ try:
+ self._ctx.load_verify_locations(cafile, capath)
+ if cadata is not None:
+ self._ctx.load_verify_locations(BytesIO(cadata))
+ except OpenSSL.SSL.Error as e:
+ raise ssl.SSLError("unable to load trusted certificates: %r" % e)
+
+ def load_cert_chain(self, certfile, keyfile=None, password=None):
+ self._ctx.use_certificate_chain_file(certfile)
+ if password is not None:
+ if not isinstance(password, six.binary_type):
+ password = password.encode("utf-8")
+ self._ctx.set_passwd_cb(lambda *_: password)
+ self._ctx.use_privatekey_file(keyfile or certfile)
+
+ def set_alpn_protocols(self, protocols):
+ protocols = [six.ensure_binary(p) for p in protocols]
+ return self._ctx.set_alpn_protos(protocols)
+
+ def wrap_socket(
+ self,
+ sock,
+ server_side=False,
+ do_handshake_on_connect=True,
+ suppress_ragged_eofs=True,
+ server_hostname=None,
+ ):
+ cnx = OpenSSL.SSL.Connection(self._ctx, sock)
+
+ if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
+ server_hostname = server_hostname.encode("utf-8")
+
+ if server_hostname is not None:
+ cnx.set_tlsext_host_name(server_hostname)
+
+ cnx.set_connect_state()
+
+ while True:
+ try:
+ cnx.do_handshake()
+ except OpenSSL.SSL.WantReadError:
+ if not util.wait_for_read(sock, sock.gettimeout()):
+ raise timeout("select timed out")
+ continue
+ except OpenSSL.SSL.Error as e:
+ raise ssl.SSLError("bad handshake: %r" % e)
+ break
+
+ return WrappedSocket(cnx, sock)
+
+
+def _verify_callback(cnx, x509, err_no, err_depth, return_code):
+ return err_no == 0
diff --git a/third_party/python/pip/pip/_vendor/urllib3/contrib/securetransport.py b/third_party/python/pip/pip/_vendor/urllib3/contrib/securetransport.py
new file mode 100644
index 0000000000..4a06bc69d5
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/contrib/securetransport.py
@@ -0,0 +1,921 @@
+"""
+SecureTranport support for urllib3 via ctypes.
+
+This makes platform-native TLS available to urllib3 users on macOS without the
+use of a compiler. This is an important feature because the Python Package
+Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL
+that ships with macOS is not capable of doing TLSv1.2. The only way to resolve
+this is to give macOS users an alternative solution to the problem, and that
+solution is to use SecureTransport.
+
+We use ctypes here because this solution must not require a compiler. That's
+because pip is not allowed to require a compiler either.
+
+This is not intended to be a seriously long-term solution to this problem.
+The hope is that PEP 543 will eventually solve this issue for us, at which
+point we can retire this contrib module. But in the short term, we need to
+solve the impending tire fire that is Python on Mac without this kind of
+contrib module. So...here we are.
+
+To use this module, simply import and inject it::
+
+ import pip._vendor.urllib3.contrib.securetransport as securetransport
+ securetransport.inject_into_urllib3()
+
+Happy TLSing!
+
+This code is a bastardised version of the code found in Will Bond's oscrypto
+library. An enormous debt is owed to him for blazing this trail for us. For
+that reason, this code should be considered to be covered both by urllib3's
+license and by oscrypto's:
+
+.. code-block::
+
+ Copyright (c) 2015-2016 Will Bond <will@wbond.net>
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+"""
+from __future__ import absolute_import
+
+import contextlib
+import ctypes
+import errno
+import os.path
+import shutil
+import socket
+import ssl
+import struct
+import threading
+import weakref
+
+from pip._vendor import six
+
+from .. import util
+from ..util.ssl_ import PROTOCOL_TLS_CLIENT
+from ._securetransport.bindings import CoreFoundation, Security, SecurityConst
+from ._securetransport.low_level import (
+ _assert_no_error,
+ _build_tls_unknown_ca_alert,
+ _cert_array_from_pem,
+ _create_cfstring_array,
+ _load_client_cert_chain,
+ _temporary_keychain,
+)
+
+try: # Platform-specific: Python 2
+ from socket import _fileobject
+except ImportError: # Platform-specific: Python 3
+ _fileobject = None
+ from ..packages.backports.makefile import backport_makefile
+
+__all__ = ["inject_into_urllib3", "extract_from_urllib3"]
+
+# SNI always works
+HAS_SNI = True
+
+orig_util_HAS_SNI = util.HAS_SNI
+orig_util_SSLContext = util.ssl_.SSLContext
+
+# This dictionary is used by the read callback to obtain a handle to the
+# calling wrapped socket. This is a pretty silly approach, but for now it'll
+# do. I feel like I should be able to smuggle a handle to the wrapped socket
+# directly in the SSLConnectionRef, but for now this approach will work I
+# guess.
+#
+# We need to lock around this structure for inserts, but we don't do it for
+# reads/writes in the callbacks. The reasoning here goes as follows:
+#
+# 1. It is not possible to call into the callbacks before the dictionary is
+# populated, so once in the callback the id must be in the dictionary.
+# 2. The callbacks don't mutate the dictionary, they only read from it, and
+# so cannot conflict with any of the insertions.
+#
+# This is good: if we had to lock in the callbacks we'd drastically slow down
+# the performance of this code.
+_connection_refs = weakref.WeakValueDictionary()
+_connection_ref_lock = threading.Lock()
+
+# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over
+# for no better reason than we need *a* limit, and this one is right there.
+SSL_WRITE_BLOCKSIZE = 16384
+
+# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to
+# individual cipher suites. We need to do this because this is how
+# SecureTransport wants them.
+CIPHER_SUITES = [
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
+ SecurityConst.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_AES_256_GCM_SHA384,
+ SecurityConst.TLS_AES_128_GCM_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_AES_128_CCM_8_SHA256,
+ SecurityConst.TLS_AES_128_CCM_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA,
+]
+
+# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of
+# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version.
+# TLSv1 to 1.2 are supported on macOS 10.8+
+_protocol_to_min_max = {
+ util.PROTOCOL_TLS: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12),
+ PROTOCOL_TLS_CLIENT: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12),
+}
+
+if hasattr(ssl, "PROTOCOL_SSLv2"):
+ _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = (
+ SecurityConst.kSSLProtocol2,
+ SecurityConst.kSSLProtocol2,
+ )
+if hasattr(ssl, "PROTOCOL_SSLv3"):
+ _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = (
+ SecurityConst.kSSLProtocol3,
+ SecurityConst.kSSLProtocol3,
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = (
+ SecurityConst.kTLSProtocol1,
+ SecurityConst.kTLSProtocol1,
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1_1"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = (
+ SecurityConst.kTLSProtocol11,
+ SecurityConst.kTLSProtocol11,
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1_2"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = (
+ SecurityConst.kTLSProtocol12,
+ SecurityConst.kTLSProtocol12,
+ )
+
+
+def inject_into_urllib3():
+ """
+ Monkey-patch urllib3 with SecureTransport-backed SSL-support.
+ """
+ util.SSLContext = SecureTransportContext
+ util.ssl_.SSLContext = SecureTransportContext
+ util.HAS_SNI = HAS_SNI
+ util.ssl_.HAS_SNI = HAS_SNI
+ util.IS_SECURETRANSPORT = True
+ util.ssl_.IS_SECURETRANSPORT = True
+
+
+def extract_from_urllib3():
+ """
+ Undo monkey-patching by :func:`inject_into_urllib3`.
+ """
+ util.SSLContext = orig_util_SSLContext
+ util.ssl_.SSLContext = orig_util_SSLContext
+ util.HAS_SNI = orig_util_HAS_SNI
+ util.ssl_.HAS_SNI = orig_util_HAS_SNI
+ util.IS_SECURETRANSPORT = False
+ util.ssl_.IS_SECURETRANSPORT = False
+
+
+def _read_callback(connection_id, data_buffer, data_length_pointer):
+ """
+ SecureTransport read callback. This is called by ST to request that data
+ be returned from the socket.
+ """
+ wrapped_socket = None
+ try:
+ wrapped_socket = _connection_refs.get(connection_id)
+ if wrapped_socket is None:
+ return SecurityConst.errSSLInternal
+ base_socket = wrapped_socket.socket
+
+ requested_length = data_length_pointer[0]
+
+ timeout = wrapped_socket.gettimeout()
+ error = None
+ read_count = 0
+
+ try:
+ while read_count < requested_length:
+ if timeout is None or timeout >= 0:
+ if not util.wait_for_read(base_socket, timeout):
+ raise socket.error(errno.EAGAIN, "timed out")
+
+ remaining = requested_length - read_count
+ buffer = (ctypes.c_char * remaining).from_address(
+ data_buffer + read_count
+ )
+ chunk_size = base_socket.recv_into(buffer, remaining)
+ read_count += chunk_size
+ if not chunk_size:
+ if not read_count:
+ return SecurityConst.errSSLClosedGraceful
+ break
+ except (socket.error) as e:
+ error = e.errno
+
+ if error is not None and error != errno.EAGAIN:
+ data_length_pointer[0] = read_count
+ if error == errno.ECONNRESET or error == errno.EPIPE:
+ return SecurityConst.errSSLClosedAbort
+ raise
+
+ data_length_pointer[0] = read_count
+
+ if read_count != requested_length:
+ return SecurityConst.errSSLWouldBlock
+
+ return 0
+ except Exception as e:
+ if wrapped_socket is not None:
+ wrapped_socket._exception = e
+ return SecurityConst.errSSLInternal
+
+
+def _write_callback(connection_id, data_buffer, data_length_pointer):
+ """
+ SecureTransport write callback. This is called by ST to request that data
+ actually be sent on the network.
+ """
+ wrapped_socket = None
+ try:
+ wrapped_socket = _connection_refs.get(connection_id)
+ if wrapped_socket is None:
+ return SecurityConst.errSSLInternal
+ base_socket = wrapped_socket.socket
+
+ bytes_to_write = data_length_pointer[0]
+ data = ctypes.string_at(data_buffer, bytes_to_write)
+
+ timeout = wrapped_socket.gettimeout()
+ error = None
+ sent = 0
+
+ try:
+ while sent < bytes_to_write:
+ if timeout is None or timeout >= 0:
+ if not util.wait_for_write(base_socket, timeout):
+ raise socket.error(errno.EAGAIN, "timed out")
+ chunk_sent = base_socket.send(data)
+ sent += chunk_sent
+
+ # This has some needless copying here, but I'm not sure there's
+ # much value in optimising this data path.
+ data = data[chunk_sent:]
+ except (socket.error) as e:
+ error = e.errno
+
+ if error is not None and error != errno.EAGAIN:
+ data_length_pointer[0] = sent
+ if error == errno.ECONNRESET or error == errno.EPIPE:
+ return SecurityConst.errSSLClosedAbort
+ raise
+
+ data_length_pointer[0] = sent
+
+ if sent != bytes_to_write:
+ return SecurityConst.errSSLWouldBlock
+
+ return 0
+ except Exception as e:
+ if wrapped_socket is not None:
+ wrapped_socket._exception = e
+ return SecurityConst.errSSLInternal
+
+
+# We need to keep these two objects references alive: if they get GC'd while
+# in use then SecureTransport could attempt to call a function that is in freed
+# memory. That would be...uh...bad. Yeah, that's the word. Bad.
+_read_callback_pointer = Security.SSLReadFunc(_read_callback)
+_write_callback_pointer = Security.SSLWriteFunc(_write_callback)
+
+
+class WrappedSocket(object):
+ """
+ API-compatibility wrapper for Python's OpenSSL wrapped socket object.
+
+ Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage
+ collector of PyPy.
+ """
+
+ def __init__(self, socket):
+ self.socket = socket
+ self.context = None
+ self._makefile_refs = 0
+ self._closed = False
+ self._exception = None
+ self._keychain = None
+ self._keychain_dir = None
+ self._client_cert_chain = None
+
+ # We save off the previously-configured timeout and then set it to
+ # zero. This is done because we use select and friends to handle the
+ # timeouts, but if we leave the timeout set on the lower socket then
+ # Python will "kindly" call select on that socket again for us. Avoid
+ # that by forcing the timeout to zero.
+ self._timeout = self.socket.gettimeout()
+ self.socket.settimeout(0)
+
+ @contextlib.contextmanager
+ def _raise_on_error(self):
+ """
+ A context manager that can be used to wrap calls that do I/O from
+ SecureTransport. If any of the I/O callbacks hit an exception, this
+ context manager will correctly propagate the exception after the fact.
+ This avoids silently swallowing those exceptions.
+
+ It also correctly forces the socket closed.
+ """
+ self._exception = None
+
+ # We explicitly don't catch around this yield because in the unlikely
+ # event that an exception was hit in the block we don't want to swallow
+ # it.
+ yield
+ if self._exception is not None:
+ exception, self._exception = self._exception, None
+ self.close()
+ raise exception
+
+ def _set_ciphers(self):
+ """
+ Sets up the allowed ciphers. By default this matches the set in
+ util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
+ custom and doesn't allow changing at this time, mostly because parsing
+ OpenSSL cipher strings is going to be a freaking nightmare.
+ """
+ ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
+ result = Security.SSLSetEnabledCiphers(
+ self.context, ciphers, len(CIPHER_SUITES)
+ )
+ _assert_no_error(result)
+
+ def _set_alpn_protocols(self, protocols):
+ """
+ Sets up the ALPN protocols on the context.
+ """
+ if not protocols:
+ return
+ protocols_arr = _create_cfstring_array(protocols)
+ try:
+ result = Security.SSLSetALPNProtocols(self.context, protocols_arr)
+ _assert_no_error(result)
+ finally:
+ CoreFoundation.CFRelease(protocols_arr)
+
+ def _custom_validate(self, verify, trust_bundle):
+ """
+ Called when we have set custom validation. We do this in two cases:
+ first, when cert validation is entirely disabled; and second, when
+ using a custom trust DB.
+ Raises an SSLError if the connection is not trusted.
+ """
+ # If we disabled cert validation, just say: cool.
+ if not verify:
+ return
+
+ successes = (
+ SecurityConst.kSecTrustResultUnspecified,
+ SecurityConst.kSecTrustResultProceed,
+ )
+ try:
+ trust_result = self._evaluate_trust(trust_bundle)
+ if trust_result in successes:
+ return
+ reason = "error code: %d" % (trust_result,)
+ except Exception as e:
+ # Do not trust on error
+ reason = "exception: %r" % (e,)
+
+ # SecureTransport does not send an alert nor shuts down the connection.
+ rec = _build_tls_unknown_ca_alert(self.version())
+ self.socket.sendall(rec)
+ # close the connection immediately
+ # l_onoff = 1, activate linger
+ # l_linger = 0, linger for 0 seoncds
+ opts = struct.pack("ii", 1, 0)
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, opts)
+ self.close()
+ raise ssl.SSLError("certificate verify failed, %s" % reason)
+
+ def _evaluate_trust(self, trust_bundle):
+ # We want data in memory, so load it up.
+ if os.path.isfile(trust_bundle):
+ with open(trust_bundle, "rb") as f:
+ trust_bundle = f.read()
+
+ cert_array = None
+ trust = Security.SecTrustRef()
+
+ try:
+ # Get a CFArray that contains the certs we want.
+ cert_array = _cert_array_from_pem(trust_bundle)
+
+ # Ok, now the hard part. We want to get the SecTrustRef that ST has
+ # created for this connection, shove our CAs into it, tell ST to
+ # ignore everything else it knows, and then ask if it can build a
+ # chain. This is a buuuunch of code.
+ result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust))
+ _assert_no_error(result)
+ if not trust:
+ raise ssl.SSLError("Failed to copy trust reference")
+
+ result = Security.SecTrustSetAnchorCertificates(trust, cert_array)
+ _assert_no_error(result)
+
+ result = Security.SecTrustSetAnchorCertificatesOnly(trust, True)
+ _assert_no_error(result)
+
+ trust_result = Security.SecTrustResultType()
+ result = Security.SecTrustEvaluate(trust, ctypes.byref(trust_result))
+ _assert_no_error(result)
+ finally:
+ if trust:
+ CoreFoundation.CFRelease(trust)
+
+ if cert_array is not None:
+ CoreFoundation.CFRelease(cert_array)
+
+ return trust_result.value
+
+ def handshake(
+ self,
+ server_hostname,
+ verify,
+ trust_bundle,
+ min_version,
+ max_version,
+ client_cert,
+ client_key,
+ client_key_passphrase,
+ alpn_protocols,
+ ):
+ """
+ Actually performs the TLS handshake. This is run automatically by
+ wrapped socket, and shouldn't be needed in user code.
+ """
+ # First, we do the initial bits of connection setup. We need to create
+ # a context, set its I/O funcs, and set the connection reference.
+ self.context = Security.SSLCreateContext(
+ None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
+ )
+ result = Security.SSLSetIOFuncs(
+ self.context, _read_callback_pointer, _write_callback_pointer
+ )
+ _assert_no_error(result)
+
+ # Here we need to compute the handle to use. We do this by taking the
+ # id of self modulo 2**31 - 1. If this is already in the dictionary, we
+ # just keep incrementing by one until we find a free space.
+ with _connection_ref_lock:
+ handle = id(self) % 2147483647
+ while handle in _connection_refs:
+ handle = (handle + 1) % 2147483647
+ _connection_refs[handle] = self
+
+ result = Security.SSLSetConnection(self.context, handle)
+ _assert_no_error(result)
+
+ # If we have a server hostname, we should set that too.
+ if server_hostname:
+ if not isinstance(server_hostname, bytes):
+ server_hostname = server_hostname.encode("utf-8")
+
+ result = Security.SSLSetPeerDomainName(
+ self.context, server_hostname, len(server_hostname)
+ )
+ _assert_no_error(result)
+
+ # Setup the ciphers.
+ self._set_ciphers()
+
+ # Setup the ALPN protocols.
+ self._set_alpn_protocols(alpn_protocols)
+
+ # Set the minimum and maximum TLS versions.
+ result = Security.SSLSetProtocolVersionMin(self.context, min_version)
+ _assert_no_error(result)
+
+ result = Security.SSLSetProtocolVersionMax(self.context, max_version)
+ _assert_no_error(result)
+
+ # If there's a trust DB, we need to use it. We do that by telling
+ # SecureTransport to break on server auth. We also do that if we don't
+ # want to validate the certs at all: we just won't actually do any
+ # authing in that case.
+ if not verify or trust_bundle is not None:
+ result = Security.SSLSetSessionOption(
+ self.context, SecurityConst.kSSLSessionOptionBreakOnServerAuth, True
+ )
+ _assert_no_error(result)
+
+ # If there's a client cert, we need to use it.
+ if client_cert:
+ self._keychain, self._keychain_dir = _temporary_keychain()
+ self._client_cert_chain = _load_client_cert_chain(
+ self._keychain, client_cert, client_key
+ )
+ result = Security.SSLSetCertificate(self.context, self._client_cert_chain)
+ _assert_no_error(result)
+
+ while True:
+ with self._raise_on_error():
+ result = Security.SSLHandshake(self.context)
+
+ if result == SecurityConst.errSSLWouldBlock:
+ raise socket.timeout("handshake timed out")
+ elif result == SecurityConst.errSSLServerAuthCompleted:
+ self._custom_validate(verify, trust_bundle)
+ continue
+ else:
+ _assert_no_error(result)
+ break
+
+ def fileno(self):
+ return self.socket.fileno()
+
+ # Copy-pasted from Python 3.5 source code
+ def _decref_socketios(self):
+ if self._makefile_refs > 0:
+ self._makefile_refs -= 1
+ if self._closed:
+ self.close()
+
+ def recv(self, bufsiz):
+ buffer = ctypes.create_string_buffer(bufsiz)
+ bytes_read = self.recv_into(buffer, bufsiz)
+ data = buffer[:bytes_read]
+ return data
+
+ def recv_into(self, buffer, nbytes=None):
+ # Read short on EOF.
+ if self._closed:
+ return 0
+
+ if nbytes is None:
+ nbytes = len(buffer)
+
+ buffer = (ctypes.c_char * nbytes).from_buffer(buffer)
+ processed_bytes = ctypes.c_size_t(0)
+
+ with self._raise_on_error():
+ result = Security.SSLRead(
+ self.context, buffer, nbytes, ctypes.byref(processed_bytes)
+ )
+
+ # There are some result codes that we want to treat as "not always
+ # errors". Specifically, those are errSSLWouldBlock,
+ # errSSLClosedGraceful, and errSSLClosedNoNotify.
+ if result == SecurityConst.errSSLWouldBlock:
+ # If we didn't process any bytes, then this was just a time out.
+ # However, we can get errSSLWouldBlock in situations when we *did*
+ # read some data, and in those cases we should just read "short"
+ # and return.
+ if processed_bytes.value == 0:
+ # Timed out, no data read.
+ raise socket.timeout("recv timed out")
+ elif result in (
+ SecurityConst.errSSLClosedGraceful,
+ SecurityConst.errSSLClosedNoNotify,
+ ):
+ # The remote peer has closed this connection. We should do so as
+ # well. Note that we don't actually return here because in
+ # principle this could actually be fired along with return data.
+ # It's unlikely though.
+ self.close()
+ else:
+ _assert_no_error(result)
+
+ # Ok, we read and probably succeeded. We should return whatever data
+ # was actually read.
+ return processed_bytes.value
+
+ def settimeout(self, timeout):
+ self._timeout = timeout
+
+ def gettimeout(self):
+ return self._timeout
+
+ def send(self, data):
+ processed_bytes = ctypes.c_size_t(0)
+
+ with self._raise_on_error():
+ result = Security.SSLWrite(
+ self.context, data, len(data), ctypes.byref(processed_bytes)
+ )
+
+ if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0:
+ # Timed out
+ raise socket.timeout("send timed out")
+ else:
+ _assert_no_error(result)
+
+ # We sent, and probably succeeded. Tell them how much we sent.
+ return processed_bytes.value
+
+ def sendall(self, data):
+ total_sent = 0
+ while total_sent < len(data):
+ sent = self.send(data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE])
+ total_sent += sent
+
+ def shutdown(self):
+ with self._raise_on_error():
+ Security.SSLClose(self.context)
+
+ def close(self):
+ # TODO: should I do clean shutdown here? Do I have to?
+ if self._makefile_refs < 1:
+ self._closed = True
+ if self.context:
+ CoreFoundation.CFRelease(self.context)
+ self.context = None
+ if self._client_cert_chain:
+ CoreFoundation.CFRelease(self._client_cert_chain)
+ self._client_cert_chain = None
+ if self._keychain:
+ Security.SecKeychainDelete(self._keychain)
+ CoreFoundation.CFRelease(self._keychain)
+ shutil.rmtree(self._keychain_dir)
+ self._keychain = self._keychain_dir = None
+ return self.socket.close()
+ else:
+ self._makefile_refs -= 1
+
+ def getpeercert(self, binary_form=False):
+ # Urgh, annoying.
+ #
+ # Here's how we do this:
+ #
+ # 1. Call SSLCopyPeerTrust to get hold of the trust object for this
+ # connection.
+ # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf.
+ # 3. To get the CN, call SecCertificateCopyCommonName and process that
+ # string so that it's of the appropriate type.
+ # 4. To get the SAN, we need to do something a bit more complex:
+ # a. Call SecCertificateCopyValues to get the data, requesting
+ # kSecOIDSubjectAltName.
+ # b. Mess about with this dictionary to try to get the SANs out.
+ #
+ # This is gross. Really gross. It's going to be a few hundred LoC extra
+ # just to repeat something that SecureTransport can *already do*. So my
+ # operating assumption at this time is that what we want to do is
+ # instead to just flag to urllib3 that it shouldn't do its own hostname
+ # validation when using SecureTransport.
+ if not binary_form:
+ raise ValueError("SecureTransport only supports dumping binary certs")
+ trust = Security.SecTrustRef()
+ certdata = None
+ der_bytes = None
+
+ try:
+ # Grab the trust store.
+ result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust))
+ _assert_no_error(result)
+ if not trust:
+ # Probably we haven't done the handshake yet. No biggie.
+ return None
+
+ cert_count = Security.SecTrustGetCertificateCount(trust)
+ if not cert_count:
+ # Also a case that might happen if we haven't handshaked.
+ # Handshook? Handshaken?
+ return None
+
+ leaf = Security.SecTrustGetCertificateAtIndex(trust, 0)
+ assert leaf
+
+ # Ok, now we want the DER bytes.
+ certdata = Security.SecCertificateCopyData(leaf)
+ assert certdata
+
+ data_length = CoreFoundation.CFDataGetLength(certdata)
+ data_buffer = CoreFoundation.CFDataGetBytePtr(certdata)
+ der_bytes = ctypes.string_at(data_buffer, data_length)
+ finally:
+ if certdata:
+ CoreFoundation.CFRelease(certdata)
+ if trust:
+ CoreFoundation.CFRelease(trust)
+
+ return der_bytes
+
+ def version(self):
+ protocol = Security.SSLProtocol()
+ result = Security.SSLGetNegotiatedProtocolVersion(
+ self.context, ctypes.byref(protocol)
+ )
+ _assert_no_error(result)
+ if protocol.value == SecurityConst.kTLSProtocol13:
+ raise ssl.SSLError("SecureTransport does not support TLS 1.3")
+ elif protocol.value == SecurityConst.kTLSProtocol12:
+ return "TLSv1.2"
+ elif protocol.value == SecurityConst.kTLSProtocol11:
+ return "TLSv1.1"
+ elif protocol.value == SecurityConst.kTLSProtocol1:
+ return "TLSv1"
+ elif protocol.value == SecurityConst.kSSLProtocol3:
+ return "SSLv3"
+ elif protocol.value == SecurityConst.kSSLProtocol2:
+ return "SSLv2"
+ else:
+ raise ssl.SSLError("Unknown TLS version: %r" % protocol)
+
+ def _reuse(self):
+ self._makefile_refs += 1
+
+ def _drop(self):
+ if self._makefile_refs < 1:
+ self.close()
+ else:
+ self._makefile_refs -= 1
+
+
+if _fileobject: # Platform-specific: Python 2
+
+ def makefile(self, mode, bufsize=-1):
+ self._makefile_refs += 1
+ return _fileobject(self, mode, bufsize, close=True)
+
+else: # Platform-specific: Python 3
+
+ def makefile(self, mode="r", buffering=None, *args, **kwargs):
+ # We disable buffering with SecureTransport because it conflicts with
+ # the buffering that ST does internally (see issue #1153 for more).
+ buffering = 0
+ return backport_makefile(self, mode, buffering, *args, **kwargs)
+
+
+WrappedSocket.makefile = makefile
+
+
+class SecureTransportContext(object):
+ """
+ I am a wrapper class for the SecureTransport library, to translate the
+ interface of the standard library ``SSLContext`` object to calls into
+ SecureTransport.
+ """
+
+ def __init__(self, protocol):
+ self._min_version, self._max_version = _protocol_to_min_max[protocol]
+ self._options = 0
+ self._verify = False
+ self._trust_bundle = None
+ self._client_cert = None
+ self._client_key = None
+ self._client_key_passphrase = None
+ self._alpn_protocols = None
+
+ @property
+ def check_hostname(self):
+ """
+ SecureTransport cannot have its hostname checking disabled. For more,
+ see the comment on getpeercert() in this file.
+ """
+ return True
+
+ @check_hostname.setter
+ def check_hostname(self, value):
+ """
+ SecureTransport cannot have its hostname checking disabled. For more,
+ see the comment on getpeercert() in this file.
+ """
+ pass
+
+ @property
+ def options(self):
+ # TODO: Well, crap.
+ #
+ # So this is the bit of the code that is the most likely to cause us
+ # trouble. Essentially we need to enumerate all of the SSL options that
+ # users might want to use and try to see if we can sensibly translate
+ # them, or whether we should just ignore them.
+ return self._options
+
+ @options.setter
+ def options(self, value):
+ # TODO: Update in line with above.
+ self._options = value
+
+ @property
+ def verify_mode(self):
+ return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE
+
+ @verify_mode.setter
+ def verify_mode(self, value):
+ self._verify = True if value == ssl.CERT_REQUIRED else False
+
+ def set_default_verify_paths(self):
+ # So, this has to do something a bit weird. Specifically, what it does
+ # is nothing.
+ #
+ # This means that, if we had previously had load_verify_locations
+ # called, this does not undo that. We need to do that because it turns
+ # out that the rest of the urllib3 code will attempt to load the
+ # default verify paths if it hasn't been told about any paths, even if
+ # the context itself was sometime earlier. We resolve that by just
+ # ignoring it.
+ pass
+
+ def load_default_certs(self):
+ return self.set_default_verify_paths()
+
+ def set_ciphers(self, ciphers):
+ # For now, we just require the default cipher string.
+ if ciphers != util.ssl_.DEFAULT_CIPHERS:
+ raise ValueError("SecureTransport doesn't support custom cipher strings")
+
+ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+ # OK, we only really support cadata and cafile.
+ if capath is not None:
+ raise ValueError("SecureTransport does not support cert directories")
+
+ # Raise if cafile does not exist.
+ if cafile is not None:
+ with open(cafile):
+ pass
+
+ self._trust_bundle = cafile or cadata
+
+ def load_cert_chain(self, certfile, keyfile=None, password=None):
+ self._client_cert = certfile
+ self._client_key = keyfile
+ self._client_cert_passphrase = password
+
+ def set_alpn_protocols(self, protocols):
+ """
+ Sets the ALPN protocols that will later be set on the context.
+
+ Raises a NotImplementedError if ALPN is not supported.
+ """
+ if not hasattr(Security, "SSLSetALPNProtocols"):
+ raise NotImplementedError(
+ "SecureTransport supports ALPN only in macOS 10.12+"
+ )
+ self._alpn_protocols = [six.ensure_binary(p) for p in protocols]
+
+ def wrap_socket(
+ self,
+ sock,
+ server_side=False,
+ do_handshake_on_connect=True,
+ suppress_ragged_eofs=True,
+ server_hostname=None,
+ ):
+ # So, what do we do here? Firstly, we assert some properties. This is a
+ # stripped down shim, so there is some functionality we don't support.
+ # See PEP 543 for the real deal.
+ assert not server_side
+ assert do_handshake_on_connect
+ assert suppress_ragged_eofs
+
+ # Ok, we're good to go. Now we want to create the wrapped socket object
+ # and store it in the appropriate place.
+ wrapped_socket = WrappedSocket(sock)
+
+ # Now we can handshake
+ wrapped_socket.handshake(
+ server_hostname,
+ self._verify,
+ self._trust_bundle,
+ self._min_version,
+ self._max_version,
+ self._client_cert,
+ self._client_key,
+ self._client_key_passphrase,
+ self._alpn_protocols,
+ )
+ return wrapped_socket
diff --git a/third_party/python/pip/pip/_vendor/urllib3/contrib/socks.py b/third_party/python/pip/pip/_vendor/urllib3/contrib/socks.py
new file mode 100644
index 0000000000..c326e80dd1
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/contrib/socks.py
@@ -0,0 +1,216 @@
+# -*- coding: utf-8 -*-
+"""
+This module contains provisional support for SOCKS proxies from within
+urllib3. This module supports SOCKS4, SOCKS4A (an extension of SOCKS4), and
+SOCKS5. To enable its functionality, either install PySocks or install this
+module with the ``socks`` extra.
+
+The SOCKS implementation supports the full range of urllib3 features. It also
+supports the following SOCKS features:
+
+- SOCKS4A (``proxy_url='socks4a://...``)
+- SOCKS4 (``proxy_url='socks4://...``)
+- SOCKS5 with remote DNS (``proxy_url='socks5h://...``)
+- SOCKS5 with local DNS (``proxy_url='socks5://...``)
+- Usernames and passwords for the SOCKS proxy
+
+.. note::
+ It is recommended to use ``socks5h://`` or ``socks4a://`` schemes in
+ your ``proxy_url`` to ensure that DNS resolution is done from the remote
+ server instead of client-side when connecting to a domain name.
+
+SOCKS4 supports IPv4 and domain names with the SOCKS4A extension. SOCKS5
+supports IPv4, IPv6, and domain names.
+
+When connecting to a SOCKS4 proxy the ``username`` portion of the ``proxy_url``
+will be sent as the ``userid`` section of the SOCKS request:
+
+.. code-block:: python
+
+ proxy_url="socks4a://<userid>@proxy-host"
+
+When connecting to a SOCKS5 proxy the ``username`` and ``password`` portion
+of the ``proxy_url`` will be sent as the username/password to authenticate
+with the proxy:
+
+.. code-block:: python
+
+ proxy_url="socks5h://<username>:<password>@proxy-host"
+
+"""
+from __future__ import absolute_import
+
+try:
+ import socks
+except ImportError:
+ import warnings
+
+ from ..exceptions import DependencyWarning
+
+ warnings.warn(
+ (
+ "SOCKS support in urllib3 requires the installation of optional "
+ "dependencies: specifically, PySocks. For more information, see "
+ "https://urllib3.readthedocs.io/en/1.26.x/contrib.html#socks-proxies"
+ ),
+ DependencyWarning,
+ )
+ raise
+
+from socket import error as SocketError
+from socket import timeout as SocketTimeout
+
+from ..connection import HTTPConnection, HTTPSConnection
+from ..connectionpool import HTTPConnectionPool, HTTPSConnectionPool
+from ..exceptions import ConnectTimeoutError, NewConnectionError
+from ..poolmanager import PoolManager
+from ..util.url import parse_url
+
+try:
+ import ssl
+except ImportError:
+ ssl = None
+
+
+class SOCKSConnection(HTTPConnection):
+ """
+ A plain-text HTTP connection that connects via a SOCKS proxy.
+ """
+
+ def __init__(self, *args, **kwargs):
+ self._socks_options = kwargs.pop("_socks_options")
+ super(SOCKSConnection, self).__init__(*args, **kwargs)
+
+ def _new_conn(self):
+ """
+ Establish a new connection via the SOCKS proxy.
+ """
+ extra_kw = {}
+ if self.source_address:
+ extra_kw["source_address"] = self.source_address
+
+ if self.socket_options:
+ extra_kw["socket_options"] = self.socket_options
+
+ try:
+ conn = socks.create_connection(
+ (self.host, self.port),
+ proxy_type=self._socks_options["socks_version"],
+ proxy_addr=self._socks_options["proxy_host"],
+ proxy_port=self._socks_options["proxy_port"],
+ proxy_username=self._socks_options["username"],
+ proxy_password=self._socks_options["password"],
+ proxy_rdns=self._socks_options["rdns"],
+ timeout=self.timeout,
+ **extra_kw
+ )
+
+ except SocketTimeout:
+ raise ConnectTimeoutError(
+ self,
+ "Connection to %s timed out. (connect timeout=%s)"
+ % (self.host, self.timeout),
+ )
+
+ except socks.ProxyError as e:
+ # This is fragile as hell, but it seems to be the only way to raise
+ # useful errors here.
+ if e.socket_err:
+ error = e.socket_err
+ if isinstance(error, SocketTimeout):
+ raise ConnectTimeoutError(
+ self,
+ "Connection to %s timed out. (connect timeout=%s)"
+ % (self.host, self.timeout),
+ )
+ else:
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % error
+ )
+ else:
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % e
+ )
+
+ except SocketError as e: # Defensive: PySocks should catch all these.
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % e
+ )
+
+ return conn
+
+
+# We don't need to duplicate the Verified/Unverified distinction from
+# urllib3/connection.py here because the HTTPSConnection will already have been
+# correctly set to either the Verified or Unverified form by that module. This
+# means the SOCKSHTTPSConnection will automatically be the correct type.
+class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
+ pass
+
+
+class SOCKSHTTPConnectionPool(HTTPConnectionPool):
+ ConnectionCls = SOCKSConnection
+
+
+class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
+ ConnectionCls = SOCKSHTTPSConnection
+
+
+class SOCKSProxyManager(PoolManager):
+ """
+ A version of the urllib3 ProxyManager that routes connections via the
+ defined SOCKS proxy.
+ """
+
+ pool_classes_by_scheme = {
+ "http": SOCKSHTTPConnectionPool,
+ "https": SOCKSHTTPSConnectionPool,
+ }
+
+ def __init__(
+ self,
+ proxy_url,
+ username=None,
+ password=None,
+ num_pools=10,
+ headers=None,
+ **connection_pool_kw
+ ):
+ parsed = parse_url(proxy_url)
+
+ if username is None and password is None and parsed.auth is not None:
+ split = parsed.auth.split(":")
+ if len(split) == 2:
+ username, password = split
+ if parsed.scheme == "socks5":
+ socks_version = socks.PROXY_TYPE_SOCKS5
+ rdns = False
+ elif parsed.scheme == "socks5h":
+ socks_version = socks.PROXY_TYPE_SOCKS5
+ rdns = True
+ elif parsed.scheme == "socks4":
+ socks_version = socks.PROXY_TYPE_SOCKS4
+ rdns = False
+ elif parsed.scheme == "socks4a":
+ socks_version = socks.PROXY_TYPE_SOCKS4
+ rdns = True
+ else:
+ raise ValueError("Unable to determine SOCKS version from %s" % proxy_url)
+
+ self.proxy_url = proxy_url
+
+ socks_options = {
+ "socks_version": socks_version,
+ "proxy_host": parsed.host,
+ "proxy_port": parsed.port,
+ "username": username,
+ "password": password,
+ "rdns": rdns,
+ }
+ connection_pool_kw["_socks_options"] = socks_options
+
+ super(SOCKSProxyManager, self).__init__(
+ num_pools, headers, **connection_pool_kw
+ )
+
+ self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
diff --git a/third_party/python/pip/pip/_vendor/urllib3/exceptions.py b/third_party/python/pip/pip/_vendor/urllib3/exceptions.py
new file mode 100644
index 0000000000..cba6f3f560
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/exceptions.py
@@ -0,0 +1,323 @@
+from __future__ import absolute_import
+
+from .packages.six.moves.http_client import IncompleteRead as httplib_IncompleteRead
+
+# Base Exceptions
+
+
+class HTTPError(Exception):
+ """Base exception used by this module."""
+
+ pass
+
+
+class HTTPWarning(Warning):
+ """Base warning used by this module."""
+
+ pass
+
+
+class PoolError(HTTPError):
+ """Base exception for errors caused within a pool."""
+
+ def __init__(self, pool, message):
+ self.pool = pool
+ HTTPError.__init__(self, "%s: %s" % (pool, message))
+
+ def __reduce__(self):
+ # For pickling purposes.
+ return self.__class__, (None, None)
+
+
+class RequestError(PoolError):
+ """Base exception for PoolErrors that have associated URLs."""
+
+ def __init__(self, pool, url, message):
+ self.url = url
+ PoolError.__init__(self, pool, message)
+
+ def __reduce__(self):
+ # For pickling purposes.
+ return self.__class__, (None, self.url, None)
+
+
+class SSLError(HTTPError):
+ """Raised when SSL certificate fails in an HTTPS connection."""
+
+ pass
+
+
+class ProxyError(HTTPError):
+ """Raised when the connection to a proxy fails."""
+
+ def __init__(self, message, error, *args):
+ super(ProxyError, self).__init__(message, error, *args)
+ self.original_error = error
+
+
+class DecodeError(HTTPError):
+ """Raised when automatic decoding based on Content-Type fails."""
+
+ pass
+
+
+class ProtocolError(HTTPError):
+ """Raised when something unexpected happens mid-request/response."""
+
+ pass
+
+
+#: Renamed to ProtocolError but aliased for backwards compatibility.
+ConnectionError = ProtocolError
+
+
+# Leaf Exceptions
+
+
+class MaxRetryError(RequestError):
+ """Raised when the maximum number of retries is exceeded.
+
+ :param pool: The connection pool
+ :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
+ :param string url: The requested Url
+ :param exceptions.Exception reason: The underlying error
+
+ """
+
+ def __init__(self, pool, url, reason=None):
+ self.reason = reason
+
+ message = "Max retries exceeded with url: %s (Caused by %r)" % (url, reason)
+
+ RequestError.__init__(self, pool, url, message)
+
+
+class HostChangedError(RequestError):
+ """Raised when an existing pool gets a request for a foreign host."""
+
+ def __init__(self, pool, url, retries=3):
+ message = "Tried to open a foreign host with url: %s" % url
+ RequestError.__init__(self, pool, url, message)
+ self.retries = retries
+
+
+class TimeoutStateError(HTTPError):
+ """Raised when passing an invalid state to a timeout"""
+
+ pass
+
+
+class TimeoutError(HTTPError):
+ """Raised when a socket timeout error occurs.
+
+ Catching this error will catch both :exc:`ReadTimeoutErrors
+ <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
+ """
+
+ pass
+
+
+class ReadTimeoutError(TimeoutError, RequestError):
+ """Raised when a socket timeout occurs while receiving data from a server"""
+
+ pass
+
+
+# This timeout error does not have a URL attached and needs to inherit from the
+# base HTTPError
+class ConnectTimeoutError(TimeoutError):
+ """Raised when a socket timeout occurs while connecting to a server"""
+
+ pass
+
+
+class NewConnectionError(ConnectTimeoutError, PoolError):
+ """Raised when we fail to establish a new connection. Usually ECONNREFUSED."""
+
+ pass
+
+
+class EmptyPoolError(PoolError):
+ """Raised when a pool runs out of connections and no more are allowed."""
+
+ pass
+
+
+class ClosedPoolError(PoolError):
+ """Raised when a request enters a pool after the pool has been closed."""
+
+ pass
+
+
+class LocationValueError(ValueError, HTTPError):
+ """Raised when there is something wrong with a given URL input."""
+
+ pass
+
+
+class LocationParseError(LocationValueError):
+ """Raised when get_host or similar fails to parse the URL input."""
+
+ def __init__(self, location):
+ message = "Failed to parse: %s" % location
+ HTTPError.__init__(self, message)
+
+ self.location = location
+
+
+class URLSchemeUnknown(LocationValueError):
+ """Raised when a URL input has an unsupported scheme."""
+
+ def __init__(self, scheme):
+ message = "Not supported URL scheme %s" % scheme
+ super(URLSchemeUnknown, self).__init__(message)
+
+ self.scheme = scheme
+
+
+class ResponseError(HTTPError):
+ """Used as a container for an error reason supplied in a MaxRetryError."""
+
+ GENERIC_ERROR = "too many error responses"
+ SPECIFIC_ERROR = "too many {status_code} error responses"
+
+
+class SecurityWarning(HTTPWarning):
+ """Warned when performing security reducing actions"""
+
+ pass
+
+
+class SubjectAltNameWarning(SecurityWarning):
+ """Warned when connecting to a host with a certificate missing a SAN."""
+
+ pass
+
+
+class InsecureRequestWarning(SecurityWarning):
+ """Warned when making an unverified HTTPS request."""
+
+ pass
+
+
+class SystemTimeWarning(SecurityWarning):
+ """Warned when system time is suspected to be wrong"""
+
+ pass
+
+
+class InsecurePlatformWarning(SecurityWarning):
+ """Warned when certain TLS/SSL configuration is not available on a platform."""
+
+ pass
+
+
+class SNIMissingWarning(HTTPWarning):
+ """Warned when making a HTTPS request without SNI available."""
+
+ pass
+
+
+class DependencyWarning(HTTPWarning):
+ """
+ Warned when an attempt is made to import a module with missing optional
+ dependencies.
+ """
+
+ pass
+
+
+class ResponseNotChunked(ProtocolError, ValueError):
+ """Response needs to be chunked in order to read it as chunks."""
+
+ pass
+
+
+class BodyNotHttplibCompatible(HTTPError):
+ """
+ Body should be :class:`http.client.HTTPResponse` like
+ (have an fp attribute which returns raw chunks) for read_chunked().
+ """
+
+ pass
+
+
+class IncompleteRead(HTTPError, httplib_IncompleteRead):
+ """
+ Response length doesn't match expected Content-Length
+
+ Subclass of :class:`http.client.IncompleteRead` to allow int value
+ for ``partial`` to avoid creating large objects on streamed reads.
+ """
+
+ def __init__(self, partial, expected):
+ super(IncompleteRead, self).__init__(partial, expected)
+
+ def __repr__(self):
+ return "IncompleteRead(%i bytes read, %i more expected)" % (
+ self.partial,
+ self.expected,
+ )
+
+
+class InvalidChunkLength(HTTPError, httplib_IncompleteRead):
+ """Invalid chunk length in a chunked response."""
+
+ def __init__(self, response, length):
+ super(InvalidChunkLength, self).__init__(
+ response.tell(), response.length_remaining
+ )
+ self.response = response
+ self.length = length
+
+ def __repr__(self):
+ return "InvalidChunkLength(got length %r, %i bytes read)" % (
+ self.length,
+ self.partial,
+ )
+
+
+class InvalidHeader(HTTPError):
+ """The header provided was somehow invalid."""
+
+ pass
+
+
+class ProxySchemeUnknown(AssertionError, URLSchemeUnknown):
+ """ProxyManager does not support the supplied scheme"""
+
+ # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
+
+ def __init__(self, scheme):
+ # 'localhost' is here because our URL parser parses
+ # localhost:8080 -> scheme=localhost, remove if we fix this.
+ if scheme == "localhost":
+ scheme = None
+ if scheme is None:
+ message = "Proxy URL had no scheme, should start with http:// or https://"
+ else:
+ message = (
+ "Proxy URL had unsupported scheme %s, should use http:// or https://"
+ % scheme
+ )
+ super(ProxySchemeUnknown, self).__init__(message)
+
+
+class ProxySchemeUnsupported(ValueError):
+ """Fetching HTTPS resources through HTTPS proxies is unsupported"""
+
+ pass
+
+
+class HeaderParsingError(HTTPError):
+ """Raised by assert_header_parsing, but we convert it to a log.warning statement."""
+
+ def __init__(self, defects, unparsed_data):
+ message = "%s, unparsed data: %r" % (defects or "Unknown", unparsed_data)
+ super(HeaderParsingError, self).__init__(message)
+
+
+class UnrewindableBodyError(HTTPError):
+ """urllib3 encountered an error when trying to rewind a body"""
+
+ pass
diff --git a/third_party/python/pip/pip/_vendor/urllib3/fields.py b/third_party/python/pip/pip/_vendor/urllib3/fields.py
new file mode 100644
index 0000000000..9d630f491d
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/fields.py
@@ -0,0 +1,274 @@
+from __future__ import absolute_import
+
+import email.utils
+import mimetypes
+import re
+
+from .packages import six
+
+
+def guess_content_type(filename, default="application/octet-stream"):
+ """
+ Guess the "Content-Type" of a file.
+
+ :param filename:
+ The filename to guess the "Content-Type" of using :mod:`mimetypes`.
+ :param default:
+ If no "Content-Type" can be guessed, default to `default`.
+ """
+ if filename:
+ return mimetypes.guess_type(filename)[0] or default
+ return default
+
+
+def format_header_param_rfc2231(name, value):
+ """
+ Helper function to format and quote a single header parameter using the
+ strategy defined in RFC 2231.
+
+ Particularly useful for header parameters which might contain
+ non-ASCII values, like file names. This follows
+ `RFC 2388 Section 4.4 <https://tools.ietf.org/html/rfc2388#section-4.4>`_.
+
+ :param name:
+ The name of the parameter, a string expected to be ASCII only.
+ :param value:
+ The value of the parameter, provided as ``bytes`` or `str``.
+ :ret:
+ An RFC-2231-formatted unicode string.
+ """
+ if isinstance(value, six.binary_type):
+ value = value.decode("utf-8")
+
+ if not any(ch in value for ch in '"\\\r\n'):
+ result = u'%s="%s"' % (name, value)
+ try:
+ result.encode("ascii")
+ except (UnicodeEncodeError, UnicodeDecodeError):
+ pass
+ else:
+ return result
+
+ if six.PY2: # Python 2:
+ value = value.encode("utf-8")
+
+ # encode_rfc2231 accepts an encoded string and returns an ascii-encoded
+ # string in Python 2 but accepts and returns unicode strings in Python 3
+ value = email.utils.encode_rfc2231(value, "utf-8")
+ value = "%s*=%s" % (name, value)
+
+ if six.PY2: # Python 2:
+ value = value.decode("utf-8")
+
+ return value
+
+
+_HTML5_REPLACEMENTS = {
+ u"\u0022": u"%22",
+ # Replace "\" with "\\".
+ u"\u005C": u"\u005C\u005C",
+}
+
+# All control characters from 0x00 to 0x1F *except* 0x1B.
+_HTML5_REPLACEMENTS.update(
+ {
+ six.unichr(cc): u"%{:02X}".format(cc)
+ for cc in range(0x00, 0x1F + 1)
+ if cc not in (0x1B,)
+ }
+)
+
+
+def _replace_multiple(value, needles_and_replacements):
+ def replacer(match):
+ return needles_and_replacements[match.group(0)]
+
+ pattern = re.compile(
+ r"|".join([re.escape(needle) for needle in needles_and_replacements.keys()])
+ )
+
+ result = pattern.sub(replacer, value)
+
+ return result
+
+
+def format_header_param_html5(name, value):
+ """
+ Helper function to format and quote a single header parameter using the
+ HTML5 strategy.
+
+ Particularly useful for header parameters which might contain
+ non-ASCII values, like file names. This follows the `HTML5 Working Draft
+ Section 4.10.22.7`_ and matches the behavior of curl and modern browsers.
+
+ .. _HTML5 Working Draft Section 4.10.22.7:
+ https://w3c.github.io/html/sec-forms.html#multipart-form-data
+
+ :param name:
+ The name of the parameter, a string expected to be ASCII only.
+ :param value:
+ The value of the parameter, provided as ``bytes`` or `str``.
+ :ret:
+ A unicode string, stripped of troublesome characters.
+ """
+ if isinstance(value, six.binary_type):
+ value = value.decode("utf-8")
+
+ value = _replace_multiple(value, _HTML5_REPLACEMENTS)
+
+ return u'%s="%s"' % (name, value)
+
+
+# For backwards-compatibility.
+format_header_param = format_header_param_html5
+
+
+class RequestField(object):
+ """
+ A data container for request body parameters.
+
+ :param name:
+ The name of this request field. Must be unicode.
+ :param data:
+ The data/value body.
+ :param filename:
+ An optional filename of the request field. Must be unicode.
+ :param headers:
+ An optional dict-like object of headers to initially use for the field.
+ :param header_formatter:
+ An optional callable that is used to encode and format the headers. By
+ default, this is :func:`format_header_param_html5`.
+ """
+
+ def __init__(
+ self,
+ name,
+ data,
+ filename=None,
+ headers=None,
+ header_formatter=format_header_param_html5,
+ ):
+ self._name = name
+ self._filename = filename
+ self.data = data
+ self.headers = {}
+ if headers:
+ self.headers = dict(headers)
+ self.header_formatter = header_formatter
+
+ @classmethod
+ def from_tuples(cls, fieldname, value, header_formatter=format_header_param_html5):
+ """
+ A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
+
+ Supports constructing :class:`~urllib3.fields.RequestField` from
+ parameter of key/value strings AND key/filetuple. A filetuple is a
+ (filename, data, MIME type) tuple where the MIME type is optional.
+ For example::
+
+ 'foo': 'bar',
+ 'fakefile': ('foofile.txt', 'contents of foofile'),
+ 'realfile': ('barfile.txt', open('realfile').read()),
+ 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
+ 'nonamefile': 'contents of nonamefile field',
+
+ Field names and filenames must be unicode.
+ """
+ if isinstance(value, tuple):
+ if len(value) == 3:
+ filename, data, content_type = value
+ else:
+ filename, data = value
+ content_type = guess_content_type(filename)
+ else:
+ filename = None
+ content_type = None
+ data = value
+
+ request_param = cls(
+ fieldname, data, filename=filename, header_formatter=header_formatter
+ )
+ request_param.make_multipart(content_type=content_type)
+
+ return request_param
+
+ def _render_part(self, name, value):
+ """
+ Overridable helper function to format a single header parameter. By
+ default, this calls ``self.header_formatter``.
+
+ :param name:
+ The name of the parameter, a string expected to be ASCII only.
+ :param value:
+ The value of the parameter, provided as a unicode string.
+ """
+
+ return self.header_formatter(name, value)
+
+ def _render_parts(self, header_parts):
+ """
+ Helper function to format and quote a single header.
+
+ Useful for single headers that are composed of multiple items. E.g.,
+ 'Content-Disposition' fields.
+
+ :param header_parts:
+ A sequence of (k, v) tuples or a :class:`dict` of (k, v) to format
+ as `k1="v1"; k2="v2"; ...`.
+ """
+ parts = []
+ iterable = header_parts
+ if isinstance(header_parts, dict):
+ iterable = header_parts.items()
+
+ for name, value in iterable:
+ if value is not None:
+ parts.append(self._render_part(name, value))
+
+ return u"; ".join(parts)
+
+ def render_headers(self):
+ """
+ Renders the headers for this request field.
+ """
+ lines = []
+
+ sort_keys = ["Content-Disposition", "Content-Type", "Content-Location"]
+ for sort_key in sort_keys:
+ if self.headers.get(sort_key, False):
+ lines.append(u"%s: %s" % (sort_key, self.headers[sort_key]))
+
+ for header_name, header_value in self.headers.items():
+ if header_name not in sort_keys:
+ if header_value:
+ lines.append(u"%s: %s" % (header_name, header_value))
+
+ lines.append(u"\r\n")
+ return u"\r\n".join(lines)
+
+ def make_multipart(
+ self, content_disposition=None, content_type=None, content_location=None
+ ):
+ """
+ Makes this request field into a multipart request field.
+
+ This method overrides "Content-Disposition", "Content-Type" and
+ "Content-Location" headers to the request parameter.
+
+ :param content_type:
+ The 'Content-Type' of the request body.
+ :param content_location:
+ The 'Content-Location' of the request body.
+
+ """
+ self.headers["Content-Disposition"] = content_disposition or u"form-data"
+ self.headers["Content-Disposition"] += u"; ".join(
+ [
+ u"",
+ self._render_parts(
+ ((u"name", self._name), (u"filename", self._filename))
+ ),
+ ]
+ )
+ self.headers["Content-Type"] = content_type
+ self.headers["Content-Location"] = content_location
diff --git a/third_party/python/pip/pip/_vendor/urllib3/filepost.py b/third_party/python/pip/pip/_vendor/urllib3/filepost.py
new file mode 100644
index 0000000000..36c9252c64
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/filepost.py
@@ -0,0 +1,98 @@
+from __future__ import absolute_import
+
+import binascii
+import codecs
+import os
+from io import BytesIO
+
+from .fields import RequestField
+from .packages import six
+from .packages.six import b
+
+writer = codecs.lookup("utf-8")[3]
+
+
+def choose_boundary():
+ """
+ Our embarrassingly-simple replacement for mimetools.choose_boundary.
+ """
+ boundary = binascii.hexlify(os.urandom(16))
+ if not six.PY2:
+ boundary = boundary.decode("ascii")
+ return boundary
+
+
+def iter_field_objects(fields):
+ """
+ Iterate over fields.
+
+ Supports list of (k, v) tuples and dicts, and lists of
+ :class:`~urllib3.fields.RequestField`.
+
+ """
+ if isinstance(fields, dict):
+ i = six.iteritems(fields)
+ else:
+ i = iter(fields)
+
+ for field in i:
+ if isinstance(field, RequestField):
+ yield field
+ else:
+ yield RequestField.from_tuples(*field)
+
+
+def iter_fields(fields):
+ """
+ .. deprecated:: 1.6
+
+ Iterate over fields.
+
+ The addition of :class:`~urllib3.fields.RequestField` makes this function
+ obsolete. Instead, use :func:`iter_field_objects`, which returns
+ :class:`~urllib3.fields.RequestField` objects.
+
+ Supports list of (k, v) tuples and dicts.
+ """
+ if isinstance(fields, dict):
+ return ((k, v) for k, v in six.iteritems(fields))
+
+ return ((k, v) for k, v in fields)
+
+
+def encode_multipart_formdata(fields, boundary=None):
+ """
+ Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
+
+ :param fields:
+ Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
+
+ :param boundary:
+ If not specified, then a random boundary will be generated using
+ :func:`urllib3.filepost.choose_boundary`.
+ """
+ body = BytesIO()
+ if boundary is None:
+ boundary = choose_boundary()
+
+ for field in iter_field_objects(fields):
+ body.write(b("--%s\r\n" % (boundary)))
+
+ writer(body).write(field.render_headers())
+ data = field.data
+
+ if isinstance(data, int):
+ data = str(data) # Backwards compatibility
+
+ if isinstance(data, six.text_type):
+ writer(body).write(data)
+ else:
+ body.write(data)
+
+ body.write(b"\r\n")
+
+ body.write(b("--%s--\r\n" % (boundary)))
+
+ content_type = str("multipart/form-data; boundary=%s" % boundary)
+
+ return body.getvalue(), content_type
diff --git a/third_party/python/pip/pip/_vendor/urllib3/packages/__init__.py b/third_party/python/pip/pip/_vendor/urllib3/packages/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/packages/__init__.py
diff --git a/third_party/python/pip/pip/_vendor/urllib3/packages/backports/__init__.py b/third_party/python/pip/pip/_vendor/urllib3/packages/backports/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/packages/backports/__init__.py
diff --git a/third_party/python/pip/pip/_vendor/urllib3/packages/backports/makefile.py b/third_party/python/pip/pip/_vendor/urllib3/packages/backports/makefile.py
new file mode 100644
index 0000000000..b8fb2154b6
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/packages/backports/makefile.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+"""
+backports.makefile
+~~~~~~~~~~~~~~~~~~
+
+Backports the Python 3 ``socket.makefile`` method for use with anything that
+wants to create a "fake" socket object.
+"""
+import io
+from socket import SocketIO
+
+
+def backport_makefile(
+ self, mode="r", buffering=None, encoding=None, errors=None, newline=None
+):
+ """
+ Backport of ``socket.makefile`` from Python 3.5.
+ """
+ if not set(mode) <= {"r", "w", "b"}:
+ raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
+ writing = "w" in mode
+ reading = "r" in mode or not writing
+ assert reading or writing
+ binary = "b" in mode
+ rawmode = ""
+ if reading:
+ rawmode += "r"
+ if writing:
+ rawmode += "w"
+ raw = SocketIO(self, rawmode)
+ self._makefile_refs += 1
+ if buffering is None:
+ buffering = -1
+ if buffering < 0:
+ buffering = io.DEFAULT_BUFFER_SIZE
+ if buffering == 0:
+ if not binary:
+ raise ValueError("unbuffered streams must be binary")
+ return raw
+ if reading and writing:
+ buffer = io.BufferedRWPair(raw, raw, buffering)
+ elif reading:
+ buffer = io.BufferedReader(raw, buffering)
+ else:
+ assert writing
+ buffer = io.BufferedWriter(raw, buffering)
+ if binary:
+ return buffer
+ text = io.TextIOWrapper(buffer, encoding, errors, newline)
+ text.mode = mode
+ return text
diff --git a/third_party/python/pip/pip/_vendor/urllib3/packages/six.py b/third_party/python/pip/pip/_vendor/urllib3/packages/six.py
new file mode 100644
index 0000000000..f099a3dcd2
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/packages/six.py
@@ -0,0 +1,1076 @@
+# Copyright (c) 2010-2020 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.16.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+ string_types = (str,)
+ integer_types = (int,)
+ class_types = (type,)
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = (basestring,)
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+ def __len__(self):
+ return 1 << 31
+
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+if PY34:
+ from importlib.util import spec_from_loader
+else:
+ spec_from_loader = None
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ result = self._resolve()
+ setattr(obj, self.name, result) # Invokes __set__.
+ try:
+ # This is a bit ugly, but it avoids running this again by
+ # removing this descriptor.
+ delattr(obj.__class__, self.name)
+ except AttributeError:
+ pass
+ return result
+
+
+class MovedModule(_LazyDescr):
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+ def __getattr__(self, attr):
+ _module = self._resolve()
+ value = getattr(_module, attr)
+ setattr(self, attr, value)
+ return value
+
+
+class _LazyModule(types.ModuleType):
+ def __init__(self, name):
+ super(_LazyModule, self).__init__(name)
+ self.__doc__ = self.__class__.__doc__
+
+ def __dir__(self):
+ attrs = ["__doc__", "__name__"]
+ attrs += [attr.name for attr in self._moved_attributes]
+ return attrs
+
+ # Subclasses should override this
+ _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+ """
+ A meta path importer to import six.moves and its submodules.
+
+ This class implements a PEP302 finder and loader. It should be compatible
+ with Python 2.5 and all existing versions of Python3
+ """
+
+ def __init__(self, six_module_name):
+ self.name = six_module_name
+ self.known_modules = {}
+
+ def _add_module(self, mod, *fullnames):
+ for fullname in fullnames:
+ self.known_modules[self.name + "." + fullname] = mod
+
+ def _get_module(self, fullname):
+ return self.known_modules[self.name + "." + fullname]
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.known_modules:
+ return self
+ return None
+
+ def find_spec(self, fullname, path, target=None):
+ if fullname in self.known_modules:
+ return spec_from_loader(fullname, self)
+ return None
+
+ def __get_module(self, fullname):
+ try:
+ return self.known_modules[fullname]
+ except KeyError:
+ raise ImportError("This loader does not know module " + fullname)
+
+ def load_module(self, fullname):
+ try:
+ # in case of a reload
+ return sys.modules[fullname]
+ except KeyError:
+ pass
+ mod = self.__get_module(fullname)
+ if isinstance(mod, MovedModule):
+ mod = mod._resolve()
+ else:
+ mod.__loader__ = self
+ sys.modules[fullname] = mod
+ return mod
+
+ def is_package(self, fullname):
+ """
+ Return true, if the named module is a package.
+
+ We need this method to get correct spec objects with
+ Python 3.4 (see PEP451)
+ """
+ return hasattr(self.__get_module(fullname), "__path__")
+
+ def get_code(self, fullname):
+ """Return None
+
+ Required, if is_package is implemented"""
+ self.__get_module(fullname) # eventually raises ImportError
+ return None
+
+ get_source = get_code # same as get_code
+
+ def create_module(self, spec):
+ return self.load_module(spec.name)
+
+ def exec_module(self, module):
+ pass
+
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+ """Lazy loading of moved objects"""
+
+ __path__ = [] # mark as package
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute(
+ "filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"
+ ),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("intern", "__builtin__", "sys"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+ MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("getoutput", "commands", "subprocess"),
+ MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute(
+ "reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"
+ ),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("UserDict", "UserDict", "collections"),
+ MovedAttribute("UserList", "UserList", "collections"),
+ MovedAttribute("UserString", "UserString", "collections"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+ MovedAttribute(
+ "zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"
+ ),
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule(
+ "collections_abc",
+ "collections",
+ "collections.abc" if sys.version_info >= (3, 3) else "collections",
+ ),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
+ MovedModule(
+ "_dummy_thread",
+ "dummy_thread",
+ "_dummy_thread" if sys.version_info < (3, 9) else "_thread",
+ ),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule(
+ "email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"
+ ),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("_thread", "thread", "_thread"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"),
+ MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+ MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+ _moved_attributes += [
+ MovedModule("winreg", "_winreg"),
+ ]
+
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+ if isinstance(attr, MovedModule):
+ _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+ MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+ MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+ MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+ MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+ MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("quote", "urllib", "urllib.parse"),
+ MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute(
+ "unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"
+ ),
+ MovedAttribute("urlencode", "urllib", "urllib.parse"),
+ MovedAttribute("splitquery", "urllib", "urllib.parse"),
+ MovedAttribute("splittag", "urllib", "urllib.parse"),
+ MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("splitvalue", "urllib", "urllib.parse"),
+ MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+ setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(
+ Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+ "moves.urllib_parse",
+ "moves.urllib.parse",
+)
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+ MovedAttribute("URLError", "urllib2", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+ setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(
+ Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+ "moves.urllib_error",
+ "moves.urllib.error",
+)
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+ MovedAttribute("urlopen", "urllib2", "urllib.request"),
+ MovedAttribute("install_opener", "urllib2", "urllib.request"),
+ MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("pathname2url", "urllib", "urllib.request"),
+ MovedAttribute("url2pathname", "urllib", "urllib.request"),
+ MovedAttribute("getproxies", "urllib", "urllib.request"),
+ MovedAttribute("Request", "urllib2", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+ MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+ MovedAttribute("URLopener", "urllib", "urllib.request"),
+ MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+ MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+ MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
+ MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+ setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(
+ Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+ "moves.urllib_request",
+ "moves.urllib.request",
+)
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+ MovedAttribute("addbase", "urllib", "urllib.response"),
+ MovedAttribute("addclosehook", "urllib", "urllib.response"),
+ MovedAttribute("addinfo", "urllib", "urllib.response"),
+ MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+ setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(
+ Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+ "moves.urllib_response",
+ "moves.urllib.response",
+)
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+ setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = (
+ _urllib_robotparser_moved_attributes
+)
+
+_importer._add_module(
+ Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+ "moves.urllib_robotparser",
+ "moves.urllib.robotparser",
+)
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+ """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+
+ __path__ = [] # mark as package
+ parse = _importer._get_module("moves.urllib_parse")
+ error = _importer._get_module("moves.urllib_error")
+ request = _importer._get_module("moves.urllib_request")
+ response = _importer._get_module("moves.urllib_response")
+ robotparser = _importer._get_module("moves.urllib_robotparser")
+
+ def __dir__(self):
+ return ["parse", "error", "request", "response", "robotparser"]
+
+
+_importer._add_module(
+ Module_six_moves_urllib(__name__ + ".moves.urllib"), "moves.urllib"
+)
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+
+try:
+ advance_iterator = next
+except NameError:
+
+ def advance_iterator(it):
+ return it.next()
+
+
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+
+ def get_unbound_function(unbound):
+ return unbound
+
+ create_bound_method = types.MethodType
+
+ def create_unbound_method(func, cls):
+ return func
+
+ Iterator = object
+else:
+
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ def create_bound_method(func, obj):
+ return types.MethodType(func, obj, obj.__class__)
+
+ def create_unbound_method(func, cls):
+ return types.MethodType(func, None, cls)
+
+ class Iterator(object):
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(
+ get_unbound_function, """Get the function out of a possibly unbound function"""
+)
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+
+ def iterkeys(d, **kw):
+ return iter(d.keys(**kw))
+
+ def itervalues(d, **kw):
+ return iter(d.values(**kw))
+
+ def iteritems(d, **kw):
+ return iter(d.items(**kw))
+
+ def iterlists(d, **kw):
+ return iter(d.lists(**kw))
+
+ viewkeys = operator.methodcaller("keys")
+
+ viewvalues = operator.methodcaller("values")
+
+ viewitems = operator.methodcaller("items")
+else:
+
+ def iterkeys(d, **kw):
+ return d.iterkeys(**kw)
+
+ def itervalues(d, **kw):
+ return d.itervalues(**kw)
+
+ def iteritems(d, **kw):
+ return d.iteritems(**kw)
+
+ def iterlists(d, **kw):
+ return d.iterlists(**kw)
+
+ viewkeys = operator.methodcaller("viewkeys")
+
+ viewvalues = operator.methodcaller("viewvalues")
+
+ viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems, "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(
+ iterlists, "Return an iterator over the (key, [values]) pairs of a dictionary."
+)
+
+
+if PY3:
+
+ def b(s):
+ return s.encode("latin-1")
+
+ def u(s):
+ return s
+
+ unichr = chr
+ import struct
+
+ int2byte = struct.Struct(">B").pack
+ del struct
+ byte2int = operator.itemgetter(0)
+ indexbytes = operator.getitem
+ iterbytes = iter
+ import io
+
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+ del io
+ _assertCountEqual = "assertCountEqual"
+ if sys.version_info[1] <= 1:
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
+ else:
+ _assertRaisesRegex = "assertRaisesRegex"
+ _assertRegex = "assertRegex"
+ _assertNotRegex = "assertNotRegex"
+else:
+
+ def b(s):
+ return s
+
+ # Workaround for standalone backslash
+
+ def u(s):
+ return unicode(s.replace(r"\\", r"\\\\"), "unicode_escape")
+
+ unichr = unichr
+ int2byte = chr
+
+ def byte2int(bs):
+ return ord(bs[0])
+
+ def indexbytes(buf, i):
+ return ord(buf[i])
+
+ iterbytes = functools.partial(itertools.imap, ord)
+ import StringIO
+
+ StringIO = BytesIO = StringIO.StringIO
+ _assertCountEqual = "assertItemsEqual"
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ _assertNotRegex = "assertNotRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+ return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+ return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+ return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+def assertNotRegex(self, *args, **kwargs):
+ return getattr(self, _assertNotRegex)(*args, **kwargs)
+
+
+if PY3:
+ exec_ = getattr(moves.builtins, "exec")
+
+ def reraise(tp, value, tb=None):
+ try:
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+ finally:
+ value = None
+ tb = None
+
+else:
+
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec ("""exec _code_ in _globs_, _locs_""")
+
+ exec_(
+ """def reraise(tp, value, tb=None):
+ try:
+ raise tp, value, tb
+ finally:
+ tb = None
+"""
+ )
+
+
+if sys.version_info[:2] > (3,):
+ exec_(
+ """def raise_from(value, from_value):
+ try:
+ raise value from from_value
+ finally:
+ value = None
+"""
+ )
+else:
+
+ def raise_from(value, from_value):
+ raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+
+ def print_(*args, **kwargs):
+ """The new-style print function for Python 2.4 and 2.5."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ # If the file has an encoding, encode unicode with it.
+ if (
+ isinstance(fp, file)
+ and isinstance(data, unicode)
+ and fp.encoding is not None
+ ):
+ errors = getattr(fp, "errors", None)
+ if errors is None:
+ errors = "strict"
+ data = data.encode(fp.encoding, errors)
+ fp.write(data)
+
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+
+
+if sys.version_info[:2] < (3, 3):
+ _print = print_
+
+ def print_(*args, **kwargs):
+ fp = kwargs.get("file", sys.stdout)
+ flush = kwargs.pop("flush", False)
+ _print(*args, **kwargs)
+ if flush and fp is not None:
+ fp.flush()
+
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+ # This does exactly the same what the :func:`py3:functools.update_wrapper`
+ # function does on Python versions after 3.2. It sets the ``__wrapped__``
+ # attribute on ``wrapper`` object and it doesn't raise an error if any of
+ # the attributes mentioned in ``assigned`` and ``updated`` are missing on
+ # ``wrapped`` object.
+ def _update_wrapper(
+ wrapper,
+ wrapped,
+ assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES,
+ ):
+ for attr in assigned:
+ try:
+ value = getattr(wrapped, attr)
+ except AttributeError:
+ continue
+ else:
+ setattr(wrapper, attr, value)
+ for attr in updated:
+ getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
+ wrapper.__wrapped__ = wrapped
+ return wrapper
+
+ _update_wrapper.__doc__ = functools.update_wrapper.__doc__
+
+ def wraps(
+ wrapped,
+ assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES,
+ ):
+ return functools.partial(
+ _update_wrapper, wrapped=wrapped, assigned=assigned, updated=updated
+ )
+
+ wraps.__doc__ = functools.wraps.__doc__
+
+else:
+ wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(type):
+ def __new__(cls, name, this_bases, d):
+ if sys.version_info[:2] >= (3, 7):
+ # This version introduced PEP 560 that requires a bit
+ # of extra care (we mimic what is done by __build_class__).
+ resolved_bases = types.resolve_bases(bases)
+ if resolved_bases is not bases:
+ d["__orig_bases__"] = bases
+ else:
+ resolved_bases = bases
+ return meta(name, resolved_bases, d)
+
+ @classmethod
+ def __prepare__(cls, name, this_bases):
+ return meta.__prepare__(name, bases)
+
+ return type.__new__(metaclass, "temporary_class", (), {})
+
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ slots = orig_vars.get("__slots__")
+ if slots is not None:
+ if isinstance(slots, str):
+ slots = [slots]
+ for slots_var in slots:
+ orig_vars.pop(slots_var)
+ orig_vars.pop("__dict__", None)
+ orig_vars.pop("__weakref__", None)
+ if hasattr(cls, "__qualname__"):
+ orig_vars["__qualname__"] = cls.__qualname__
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+
+ return wrapper
+
+
+def ensure_binary(s, encoding="utf-8", errors="strict"):
+ """Coerce **s** to six.binary_type.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> encoded to `bytes`
+ - `bytes` -> `bytes`
+ """
+ if isinstance(s, binary_type):
+ return s
+ if isinstance(s, text_type):
+ return s.encode(encoding, errors)
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def ensure_str(s, encoding="utf-8", errors="strict"):
+ """Coerce *s* to `str`.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ # Optimization: Fast return for the common case.
+ if type(s) is str:
+ return s
+ if PY2 and isinstance(s, text_type):
+ return s.encode(encoding, errors)
+ elif PY3 and isinstance(s, binary_type):
+ return s.decode(encoding, errors)
+ elif not isinstance(s, (text_type, binary_type)):
+ raise TypeError("not expecting type '%s'" % type(s))
+ return s
+
+
+def ensure_text(s, encoding="utf-8", errors="strict"):
+ """Coerce *s* to six.text_type.
+
+ For Python 2:
+ - `unicode` -> `unicode`
+ - `str` -> `unicode`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if isinstance(s, binary_type):
+ return s.decode(encoding, errors)
+ elif isinstance(s, text_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def python_2_unicode_compatible(klass):
+ """
+ A class decorator that defines __unicode__ and __str__ methods under Python 2.
+ Under Python 3 it does nothing.
+
+ To support Python 2 and 3 with a single code base, define a __str__ method
+ returning text and apply this decorator to the class.
+ """
+ if PY2:
+ if "__str__" not in klass.__dict__:
+ raise ValueError(
+ "@python_2_unicode_compatible cannot be applied "
+ "to %s because it doesn't define __str__()." % klass.__name__
+ )
+ klass.__unicode__ = klass.__str__
+ klass.__str__ = lambda self: self.__unicode__().encode("utf-8")
+ return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = [] # required for PEP 302 and PEP 451
+__package__ = __name__ # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+ __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+ for i, importer in enumerate(sys.meta_path):
+ # Here's some real nastiness: Another "instance" of the six module might
+ # be floating around. Therefore, we can't use isinstance() to check for
+ # the six meta path importer, since the other six instance will have
+ # inserted an importer with different class.
+ if (
+ type(importer).__name__ == "_SixMetaPathImporter"
+ and importer.name == __name__
+ ):
+ del sys.meta_path[i]
+ break
+ del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/third_party/python/pip/pip/_vendor/urllib3/poolmanager.py b/third_party/python/pip/pip/_vendor/urllib3/poolmanager.py
new file mode 100644
index 0000000000..ca4ec34118
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/poolmanager.py
@@ -0,0 +1,537 @@
+from __future__ import absolute_import
+
+import collections
+import functools
+import logging
+
+from ._collections import RecentlyUsedContainer
+from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme
+from .exceptions import (
+ LocationValueError,
+ MaxRetryError,
+ ProxySchemeUnknown,
+ ProxySchemeUnsupported,
+ URLSchemeUnknown,
+)
+from .packages import six
+from .packages.six.moves.urllib.parse import urljoin
+from .request import RequestMethods
+from .util.proxy import connection_requires_http_tunnel
+from .util.retry import Retry
+from .util.url import parse_url
+
+__all__ = ["PoolManager", "ProxyManager", "proxy_from_url"]
+
+
+log = logging.getLogger(__name__)
+
+SSL_KEYWORDS = (
+ "key_file",
+ "cert_file",
+ "cert_reqs",
+ "ca_certs",
+ "ssl_version",
+ "ca_cert_dir",
+ "ssl_context",
+ "key_password",
+ "server_hostname",
+)
+
+# All known keyword arguments that could be provided to the pool manager, its
+# pools, or the underlying connections. This is used to construct a pool key.
+_key_fields = (
+ "key_scheme", # str
+ "key_host", # str
+ "key_port", # int
+ "key_timeout", # int or float or Timeout
+ "key_retries", # int or Retry
+ "key_strict", # bool
+ "key_block", # bool
+ "key_source_address", # str
+ "key_key_file", # str
+ "key_key_password", # str
+ "key_cert_file", # str
+ "key_cert_reqs", # str
+ "key_ca_certs", # str
+ "key_ssl_version", # str
+ "key_ca_cert_dir", # str
+ "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
+ "key_maxsize", # int
+ "key_headers", # dict
+ "key__proxy", # parsed proxy url
+ "key__proxy_headers", # dict
+ "key__proxy_config", # class
+ "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples
+ "key__socks_options", # dict
+ "key_assert_hostname", # bool or string
+ "key_assert_fingerprint", # str
+ "key_server_hostname", # str
+)
+
+#: The namedtuple class used to construct keys for the connection pool.
+#: All custom key schemes should include the fields in this key at a minimum.
+PoolKey = collections.namedtuple("PoolKey", _key_fields)
+
+_proxy_config_fields = ("ssl_context", "use_forwarding_for_https")
+ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields)
+
+
+def _default_key_normalizer(key_class, request_context):
+ """
+ Create a pool key out of a request context dictionary.
+
+ According to RFC 3986, both the scheme and host are case-insensitive.
+ Therefore, this function normalizes both before constructing the pool
+ key for an HTTPS request. If you wish to change this behaviour, provide
+ alternate callables to ``key_fn_by_scheme``.
+
+ :param key_class:
+ The class to use when constructing the key. This should be a namedtuple
+ with the ``scheme`` and ``host`` keys at a minimum.
+ :type key_class: namedtuple
+ :param request_context:
+ A dictionary-like object that contain the context for a request.
+ :type request_context: dict
+
+ :return: A namedtuple that can be used as a connection pool key.
+ :rtype: PoolKey
+ """
+ # Since we mutate the dictionary, make a copy first
+ context = request_context.copy()
+ context["scheme"] = context["scheme"].lower()
+ context["host"] = context["host"].lower()
+
+ # These are both dictionaries and need to be transformed into frozensets
+ for key in ("headers", "_proxy_headers", "_socks_options"):
+ if key in context and context[key] is not None:
+ context[key] = frozenset(context[key].items())
+
+ # The socket_options key may be a list and needs to be transformed into a
+ # tuple.
+ socket_opts = context.get("socket_options")
+ if socket_opts is not None:
+ context["socket_options"] = tuple(socket_opts)
+
+ # Map the kwargs to the names in the namedtuple - this is necessary since
+ # namedtuples can't have fields starting with '_'.
+ for key in list(context.keys()):
+ context["key_" + key] = context.pop(key)
+
+ # Default to ``None`` for keys missing from the context
+ for field in key_class._fields:
+ if field not in context:
+ context[field] = None
+
+ return key_class(**context)
+
+
+#: A dictionary that maps a scheme to a callable that creates a pool key.
+#: This can be used to alter the way pool keys are constructed, if desired.
+#: Each PoolManager makes a copy of this dictionary so they can be configured
+#: globally here, or individually on the instance.
+key_fn_by_scheme = {
+ "http": functools.partial(_default_key_normalizer, PoolKey),
+ "https": functools.partial(_default_key_normalizer, PoolKey),
+}
+
+pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool}
+
+
+class PoolManager(RequestMethods):
+ """
+ Allows for arbitrary requests while transparently keeping track of
+ necessary connection pools for you.
+
+ :param num_pools:
+ Number of connection pools to cache before discarding the least
+ recently used pool.
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+
+ :param \\**connection_pool_kw:
+ Additional parameters are used to create fresh
+ :class:`urllib3.connectionpool.ConnectionPool` instances.
+
+ Example::
+
+ >>> manager = PoolManager(num_pools=2)
+ >>> r = manager.request('GET', 'http://google.com/')
+ >>> r = manager.request('GET', 'http://google.com/mail')
+ >>> r = manager.request('GET', 'http://yahoo.com/')
+ >>> len(manager.pools)
+ 2
+
+ """
+
+ proxy = None
+ proxy_config = None
+
+ def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
+ RequestMethods.__init__(self, headers)
+ self.connection_pool_kw = connection_pool_kw
+ self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close())
+
+ # Locally set the pool classes and keys so other PoolManagers can
+ # override them.
+ self.pool_classes_by_scheme = pool_classes_by_scheme
+ self.key_fn_by_scheme = key_fn_by_scheme.copy()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.clear()
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def _new_pool(self, scheme, host, port, request_context=None):
+ """
+ Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and
+ any additional pool keyword arguments.
+
+ If ``request_context`` is provided, it is provided as keyword arguments
+ to the pool class used. This method is used to actually create the
+ connection pools handed out by :meth:`connection_from_url` and
+ companion methods. It is intended to be overridden for customization.
+ """
+ pool_cls = self.pool_classes_by_scheme[scheme]
+ if request_context is None:
+ request_context = self.connection_pool_kw.copy()
+
+ # Although the context has everything necessary to create the pool,
+ # this function has historically only used the scheme, host, and port
+ # in the positional args. When an API change is acceptable these can
+ # be removed.
+ for key in ("scheme", "host", "port"):
+ request_context.pop(key, None)
+
+ if scheme == "http":
+ for kw in SSL_KEYWORDS:
+ request_context.pop(kw, None)
+
+ return pool_cls(host, port, **request_context)
+
+ def clear(self):
+ """
+ Empty our store of pools and direct them all to close.
+
+ This will not affect in-flight connections, but they will not be
+ re-used after completion.
+ """
+ self.pools.clear()
+
+ def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
+ """
+ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme.
+
+ If ``port`` isn't given, it will be derived from the ``scheme`` using
+ ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
+ provided, it is merged with the instance's ``connection_pool_kw``
+ variable and used to create the new connection pool, if one is
+ needed.
+ """
+
+ if not host:
+ raise LocationValueError("No host specified.")
+
+ request_context = self._merge_pool_kwargs(pool_kwargs)
+ request_context["scheme"] = scheme or "http"
+ if not port:
+ port = port_by_scheme.get(request_context["scheme"].lower(), 80)
+ request_context["port"] = port
+ request_context["host"] = host
+
+ return self.connection_from_context(request_context)
+
+ def connection_from_context(self, request_context):
+ """
+ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context.
+
+ ``request_context`` must at least contain the ``scheme`` key and its
+ value must be a key in ``key_fn_by_scheme`` instance variable.
+ """
+ scheme = request_context["scheme"].lower()
+ pool_key_constructor = self.key_fn_by_scheme.get(scheme)
+ if not pool_key_constructor:
+ raise URLSchemeUnknown(scheme)
+ pool_key = pool_key_constructor(request_context)
+
+ return self.connection_from_pool_key(pool_key, request_context=request_context)
+
+ def connection_from_pool_key(self, pool_key, request_context=None):
+ """
+ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key.
+
+ ``pool_key`` should be a namedtuple that only contains immutable
+ objects. At a minimum it must have the ``scheme``, ``host``, and
+ ``port`` fields.
+ """
+ with self.pools.lock:
+ # If the scheme, host, or port doesn't match existing open
+ # connections, open a new ConnectionPool.
+ pool = self.pools.get(pool_key)
+ if pool:
+ return pool
+
+ # Make a fresh ConnectionPool of the desired type
+ scheme = request_context["scheme"]
+ host = request_context["host"]
+ port = request_context["port"]
+ pool = self._new_pool(scheme, host, port, request_context=request_context)
+ self.pools[pool_key] = pool
+
+ return pool
+
+ def connection_from_url(self, url, pool_kwargs=None):
+ """
+ Similar to :func:`urllib3.connectionpool.connection_from_url`.
+
+ If ``pool_kwargs`` is not provided and a new pool needs to be
+ constructed, ``self.connection_pool_kw`` is used to initialize
+ the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
+ is provided, it is used instead. Note that if a new pool does not
+ need to be created for the request, the provided ``pool_kwargs`` are
+ not used.
+ """
+ u = parse_url(url)
+ return self.connection_from_host(
+ u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs
+ )
+
+ def _merge_pool_kwargs(self, override):
+ """
+ Merge a dictionary of override values for self.connection_pool_kw.
+
+ This does not modify self.connection_pool_kw and returns a new dict.
+ Any keys in the override dictionary with a value of ``None`` are
+ removed from the merged dictionary.
+ """
+ base_pool_kwargs = self.connection_pool_kw.copy()
+ if override:
+ for key, value in override.items():
+ if value is None:
+ try:
+ del base_pool_kwargs[key]
+ except KeyError:
+ pass
+ else:
+ base_pool_kwargs[key] = value
+ return base_pool_kwargs
+
+ def _proxy_requires_url_absolute_form(self, parsed_url):
+ """
+ Indicates if the proxy requires the complete destination URL in the
+ request. Normally this is only needed when not using an HTTP CONNECT
+ tunnel.
+ """
+ if self.proxy is None:
+ return False
+
+ return not connection_requires_http_tunnel(
+ self.proxy, self.proxy_config, parsed_url.scheme
+ )
+
+ def _validate_proxy_scheme_url_selection(self, url_scheme):
+ """
+ Validates that were not attempting to do TLS in TLS connections on
+ Python2 or with unsupported SSL implementations.
+ """
+ if self.proxy is None or url_scheme != "https":
+ return
+
+ if self.proxy.scheme != "https":
+ return
+
+ if six.PY2 and not self.proxy_config.use_forwarding_for_https:
+ raise ProxySchemeUnsupported(
+ "Contacting HTTPS destinations through HTTPS proxies "
+ "'via CONNECT tunnels' is not supported in Python 2"
+ )
+
+ def urlopen(self, method, url, redirect=True, **kw):
+ """
+ Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
+ with custom cross-host redirect logic and only sends the request-uri
+ portion of the ``url``.
+
+ The given ``url`` parameter must be absolute, such that an appropriate
+ :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
+ """
+ u = parse_url(url)
+ self._validate_proxy_scheme_url_selection(u.scheme)
+
+ conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
+
+ kw["assert_same_host"] = False
+ kw["redirect"] = False
+
+ if "headers" not in kw:
+ kw["headers"] = self.headers.copy()
+
+ if self._proxy_requires_url_absolute_form(u):
+ response = conn.urlopen(method, url, **kw)
+ else:
+ response = conn.urlopen(method, u.request_uri, **kw)
+
+ redirect_location = redirect and response.get_redirect_location()
+ if not redirect_location:
+ return response
+
+ # Support relative URLs for redirecting.
+ redirect_location = urljoin(url, redirect_location)
+
+ # RFC 7231, Section 6.4.4
+ if response.status == 303:
+ method = "GET"
+
+ retries = kw.get("retries")
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(retries, redirect=redirect)
+
+ # Strip headers marked as unsafe to forward to the redirected location.
+ # Check remove_headers_on_redirect to avoid a potential network call within
+ # conn.is_same_host() which may use socket.gethostbyname() in the future.
+ if retries.remove_headers_on_redirect and not conn.is_same_host(
+ redirect_location
+ ):
+ headers = list(six.iterkeys(kw["headers"]))
+ for header in headers:
+ if header.lower() in retries.remove_headers_on_redirect:
+ kw["headers"].pop(header, None)
+
+ try:
+ retries = retries.increment(method, url, response=response, _pool=conn)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ response.drain_conn()
+ raise
+ return response
+
+ kw["retries"] = retries
+ kw["redirect"] = redirect
+
+ log.info("Redirecting %s -> %s", url, redirect_location)
+
+ response.drain_conn()
+ return self.urlopen(method, redirect_location, **kw)
+
+
+class ProxyManager(PoolManager):
+ """
+ Behaves just like :class:`PoolManager`, but sends all requests through
+ the defined proxy, using the CONNECT method for HTTPS URLs.
+
+ :param proxy_url:
+ The URL of the proxy to be used.
+
+ :param proxy_headers:
+ A dictionary containing headers that will be sent to the proxy. In case
+ of HTTP they are being sent with each request, while in the
+ HTTPS/CONNECT case they are sent only once. Could be used for proxy
+ authentication.
+
+ :param proxy_ssl_context:
+ The proxy SSL context is used to establish the TLS connection to the
+ proxy when using HTTPS proxies.
+
+ :param use_forwarding_for_https:
+ (Defaults to False) If set to True will forward requests to the HTTPS
+ proxy to be made on behalf of the client instead of creating a TLS
+ tunnel via the CONNECT method. **Enabling this flag means that request
+ and response headers and content will be visible from the HTTPS proxy**
+ whereas tunneling keeps request and response headers and content
+ private. IP address, target hostname, SNI, and port are always visible
+ to an HTTPS proxy even when this flag is disabled.
+
+ Example:
+ >>> proxy = urllib3.ProxyManager('http://localhost:3128/')
+ >>> r1 = proxy.request('GET', 'http://google.com/')
+ >>> r2 = proxy.request('GET', 'http://httpbin.org/')
+ >>> len(proxy.pools)
+ 1
+ >>> r3 = proxy.request('GET', 'https://httpbin.org/')
+ >>> r4 = proxy.request('GET', 'https://twitter.com/')
+ >>> len(proxy.pools)
+ 3
+
+ """
+
+ def __init__(
+ self,
+ proxy_url,
+ num_pools=10,
+ headers=None,
+ proxy_headers=None,
+ proxy_ssl_context=None,
+ use_forwarding_for_https=False,
+ **connection_pool_kw
+ ):
+
+ if isinstance(proxy_url, HTTPConnectionPool):
+ proxy_url = "%s://%s:%i" % (
+ proxy_url.scheme,
+ proxy_url.host,
+ proxy_url.port,
+ )
+ proxy = parse_url(proxy_url)
+
+ if proxy.scheme not in ("http", "https"):
+ raise ProxySchemeUnknown(proxy.scheme)
+
+ if not proxy.port:
+ port = port_by_scheme.get(proxy.scheme, 80)
+ proxy = proxy._replace(port=port)
+
+ self.proxy = proxy
+ self.proxy_headers = proxy_headers or {}
+ self.proxy_ssl_context = proxy_ssl_context
+ self.proxy_config = ProxyConfig(proxy_ssl_context, use_forwarding_for_https)
+
+ connection_pool_kw["_proxy"] = self.proxy
+ connection_pool_kw["_proxy_headers"] = self.proxy_headers
+ connection_pool_kw["_proxy_config"] = self.proxy_config
+
+ super(ProxyManager, self).__init__(num_pools, headers, **connection_pool_kw)
+
+ def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
+ if scheme == "https":
+ return super(ProxyManager, self).connection_from_host(
+ host, port, scheme, pool_kwargs=pool_kwargs
+ )
+
+ return super(ProxyManager, self).connection_from_host(
+ self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs
+ )
+
+ def _set_proxy_headers(self, url, headers=None):
+ """
+ Sets headers needed by proxies: specifically, the Accept and Host
+ headers. Only sets headers not provided by the user.
+ """
+ headers_ = {"Accept": "*/*"}
+
+ netloc = parse_url(url).netloc
+ if netloc:
+ headers_["Host"] = netloc
+
+ if headers:
+ headers_.update(headers)
+ return headers_
+
+ def urlopen(self, method, url, redirect=True, **kw):
+ "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
+ u = parse_url(url)
+ if not connection_requires_http_tunnel(self.proxy, self.proxy_config, u.scheme):
+ # For connections using HTTP CONNECT, httplib sets the necessary
+ # headers on the CONNECT to the proxy. If we're not using CONNECT,
+ # we'll definitely need to set 'Host' at the very least.
+ headers = kw.get("headers", self.headers)
+ kw["headers"] = self._set_proxy_headers(url, headers)
+
+ return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
+
+
+def proxy_from_url(url, **kw):
+ return ProxyManager(proxy_url=url, **kw)
diff --git a/third_party/python/pip/pip/_vendor/urllib3/request.py b/third_party/python/pip/pip/_vendor/urllib3/request.py
new file mode 100644
index 0000000000..398386a5b9
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/request.py
@@ -0,0 +1,170 @@
+from __future__ import absolute_import
+
+from .filepost import encode_multipart_formdata
+from .packages.six.moves.urllib.parse import urlencode
+
+__all__ = ["RequestMethods"]
+
+
+class RequestMethods(object):
+ """
+ Convenience mixin for classes who implement a :meth:`urlopen` method, such
+ as :class:`urllib3.HTTPConnectionPool` and
+ :class:`urllib3.PoolManager`.
+
+ Provides behavior for making common types of HTTP request methods and
+ decides which type of request field encoding to use.
+
+ Specifically,
+
+ :meth:`.request_encode_url` is for sending requests whose fields are
+ encoded in the URL (such as GET, HEAD, DELETE).
+
+ :meth:`.request_encode_body` is for sending requests whose fields are
+ encoded in the *body* of the request using multipart or www-form-urlencoded
+ (such as for POST, PUT, PATCH).
+
+ :meth:`.request` is for making any kind of request, it will look up the
+ appropriate encoding format and use one of the above two methods to make
+ the request.
+
+ Initializer parameters:
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+ """
+
+ _encode_url_methods = {"DELETE", "GET", "HEAD", "OPTIONS"}
+
+ def __init__(self, headers=None):
+ self.headers = headers or {}
+
+ def urlopen(
+ self,
+ method,
+ url,
+ body=None,
+ headers=None,
+ encode_multipart=True,
+ multipart_boundary=None,
+ **kw
+ ): # Abstract
+ raise NotImplementedError(
+ "Classes extending RequestMethods must implement "
+ "their own ``urlopen`` method."
+ )
+
+ def request(self, method, url, fields=None, headers=None, **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the appropriate encoding of
+ ``fields`` based on the ``method`` used.
+
+ This is a convenience method that requires the least amount of manual
+ effort. It can be used in most situations, while still having the
+ option to drop down to more specific methods when necessary, such as
+ :meth:`request_encode_url`, :meth:`request_encode_body`,
+ or even the lowest level :meth:`urlopen`.
+ """
+ method = method.upper()
+
+ urlopen_kw["request_url"] = url
+
+ if method in self._encode_url_methods:
+ return self.request_encode_url(
+ method, url, fields=fields, headers=headers, **urlopen_kw
+ )
+ else:
+ return self.request_encode_body(
+ method, url, fields=fields, headers=headers, **urlopen_kw
+ )
+
+ def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the ``fields`` encoded in
+ the url. This is useful for request methods like GET, HEAD, DELETE, etc.
+ """
+ if headers is None:
+ headers = self.headers
+
+ extra_kw = {"headers": headers}
+ extra_kw.update(urlopen_kw)
+
+ if fields:
+ url += "?" + urlencode(fields)
+
+ return self.urlopen(method, url, **extra_kw)
+
+ def request_encode_body(
+ self,
+ method,
+ url,
+ fields=None,
+ headers=None,
+ encode_multipart=True,
+ multipart_boundary=None,
+ **urlopen_kw
+ ):
+ """
+ Make a request using :meth:`urlopen` with the ``fields`` encoded in
+ the body. This is useful for request methods like POST, PUT, PATCH, etc.
+
+ When ``encode_multipart=True`` (default), then
+ :func:`urllib3.encode_multipart_formdata` is used to encode
+ the payload with the appropriate content type. Otherwise
+ :func:`urllib.parse.urlencode` is used with the
+ 'application/x-www-form-urlencoded' content type.
+
+ Multipart encoding must be used when posting files, and it's reasonably
+ safe to use it in other times too. However, it may break request
+ signing, such as with OAuth.
+
+ Supports an optional ``fields`` parameter of key/value strings AND
+ key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
+ the MIME type is optional. For example::
+
+ fields = {
+ 'foo': 'bar',
+ 'fakefile': ('foofile.txt', 'contents of foofile'),
+ 'realfile': ('barfile.txt', open('realfile').read()),
+ 'typedfile': ('bazfile.bin', open('bazfile').read(),
+ 'image/jpeg'),
+ 'nonamefile': 'contents of nonamefile field',
+ }
+
+ When uploading a file, providing a filename (the first parameter of the
+ tuple) is optional but recommended to best mimic behavior of browsers.
+
+ Note that if ``headers`` are supplied, the 'Content-Type' header will
+ be overwritten because it depends on the dynamic random boundary string
+ which is used to compose the body of the request. The random boundary
+ string can be explicitly set with the ``multipart_boundary`` parameter.
+ """
+ if headers is None:
+ headers = self.headers
+
+ extra_kw = {"headers": {}}
+
+ if fields:
+ if "body" in urlopen_kw:
+ raise TypeError(
+ "request got values for both 'fields' and 'body', can only specify one."
+ )
+
+ if encode_multipart:
+ body, content_type = encode_multipart_formdata(
+ fields, boundary=multipart_boundary
+ )
+ else:
+ body, content_type = (
+ urlencode(fields),
+ "application/x-www-form-urlencoded",
+ )
+
+ extra_kw["body"] = body
+ extra_kw["headers"] = {"Content-Type": content_type}
+
+ extra_kw["headers"].update(headers)
+ extra_kw.update(urlopen_kw)
+
+ return self.urlopen(method, url, **extra_kw)
diff --git a/third_party/python/pip/pip/_vendor/urllib3/response.py b/third_party/python/pip/pip/_vendor/urllib3/response.py
new file mode 100644
index 0000000000..8909f8454e
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/response.py
@@ -0,0 +1,879 @@
+from __future__ import absolute_import
+
+import io
+import logging
+import sys
+import warnings
+import zlib
+from contextlib import contextmanager
+from socket import error as SocketError
+from socket import timeout as SocketTimeout
+
+brotli = None
+
+from . import util
+from ._collections import HTTPHeaderDict
+from .connection import BaseSSLError, HTTPException
+from .exceptions import (
+ BodyNotHttplibCompatible,
+ DecodeError,
+ HTTPError,
+ IncompleteRead,
+ InvalidChunkLength,
+ InvalidHeader,
+ ProtocolError,
+ ReadTimeoutError,
+ ResponseNotChunked,
+ SSLError,
+)
+from .packages import six
+from .util.response import is_fp_closed, is_response_to_head
+
+log = logging.getLogger(__name__)
+
+
+class DeflateDecoder(object):
+ def __init__(self):
+ self._first_try = True
+ self._data = b""
+ self._obj = zlib.decompressobj()
+
+ def __getattr__(self, name):
+ return getattr(self._obj, name)
+
+ def decompress(self, data):
+ if not data:
+ return data
+
+ if not self._first_try:
+ return self._obj.decompress(data)
+
+ self._data += data
+ try:
+ decompressed = self._obj.decompress(data)
+ if decompressed:
+ self._first_try = False
+ self._data = None
+ return decompressed
+ except zlib.error:
+ self._first_try = False
+ self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
+ try:
+ return self.decompress(self._data)
+ finally:
+ self._data = None
+
+
+class GzipDecoderState(object):
+
+ FIRST_MEMBER = 0
+ OTHER_MEMBERS = 1
+ SWALLOW_DATA = 2
+
+
+class GzipDecoder(object):
+ def __init__(self):
+ self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
+ self._state = GzipDecoderState.FIRST_MEMBER
+
+ def __getattr__(self, name):
+ return getattr(self._obj, name)
+
+ def decompress(self, data):
+ ret = bytearray()
+ if self._state == GzipDecoderState.SWALLOW_DATA or not data:
+ return bytes(ret)
+ while True:
+ try:
+ ret += self._obj.decompress(data)
+ except zlib.error:
+ previous_state = self._state
+ # Ignore data after the first error
+ self._state = GzipDecoderState.SWALLOW_DATA
+ if previous_state == GzipDecoderState.OTHER_MEMBERS:
+ # Allow trailing garbage acceptable in other gzip clients
+ return bytes(ret)
+ raise
+ data = self._obj.unused_data
+ if not data:
+ return bytes(ret)
+ self._state = GzipDecoderState.OTHER_MEMBERS
+ self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
+
+
+if brotli is not None:
+
+ class BrotliDecoder(object):
+ # Supports both 'brotlipy' and 'Brotli' packages
+ # since they share an import name. The top branches
+ # are for 'brotlipy' and bottom branches for 'Brotli'
+ def __init__(self):
+ self._obj = brotli.Decompressor()
+ if hasattr(self._obj, "decompress"):
+ self.decompress = self._obj.decompress
+ else:
+ self.decompress = self._obj.process
+
+ def flush(self):
+ if hasattr(self._obj, "flush"):
+ return self._obj.flush()
+ return b""
+
+
+class MultiDecoder(object):
+ """
+ From RFC7231:
+ If one or more encodings have been applied to a representation, the
+ sender that applied the encodings MUST generate a Content-Encoding
+ header field that lists the content codings in the order in which
+ they were applied.
+ """
+
+ def __init__(self, modes):
+ self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")]
+
+ def flush(self):
+ return self._decoders[0].flush()
+
+ def decompress(self, data):
+ for d in reversed(self._decoders):
+ data = d.decompress(data)
+ return data
+
+
+def _get_decoder(mode):
+ if "," in mode:
+ return MultiDecoder(mode)
+
+ if mode == "gzip":
+ return GzipDecoder()
+
+ if brotli is not None and mode == "br":
+ return BrotliDecoder()
+
+ return DeflateDecoder()
+
+
+class HTTPResponse(io.IOBase):
+ """
+ HTTP Response container.
+
+ Backwards-compatible with :class:`http.client.HTTPResponse` but the response ``body`` is
+ loaded and decoded on-demand when the ``data`` property is accessed. This
+ class is also compatible with the Python standard library's :mod:`io`
+ module, and can hence be treated as a readable object in the context of that
+ framework.
+
+ Extra parameters for behaviour not present in :class:`http.client.HTTPResponse`:
+
+ :param preload_content:
+ If True, the response's body will be preloaded during construction.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+
+ :param original_response:
+ When this HTTPResponse wrapper is generated from an :class:`http.client.HTTPResponse`
+ object, it's convenient to include the original for debug purposes. It's
+ otherwise unused.
+
+ :param retries:
+ The retries contains the last :class:`~urllib3.util.retry.Retry` that
+ was used during the request.
+
+ :param enforce_content_length:
+ Enforce content length checking. Body returned by server must match
+ value of Content-Length header, if present. Otherwise, raise error.
+ """
+
+ CONTENT_DECODERS = ["gzip", "deflate"]
+ if brotli is not None:
+ CONTENT_DECODERS += ["br"]
+ REDIRECT_STATUSES = [301, 302, 303, 307, 308]
+
+ def __init__(
+ self,
+ body="",
+ headers=None,
+ status=0,
+ version=0,
+ reason=None,
+ strict=0,
+ preload_content=True,
+ decode_content=True,
+ original_response=None,
+ pool=None,
+ connection=None,
+ msg=None,
+ retries=None,
+ enforce_content_length=False,
+ request_method=None,
+ request_url=None,
+ auto_close=True,
+ ):
+
+ if isinstance(headers, HTTPHeaderDict):
+ self.headers = headers
+ else:
+ self.headers = HTTPHeaderDict(headers)
+ self.status = status
+ self.version = version
+ self.reason = reason
+ self.strict = strict
+ self.decode_content = decode_content
+ self.retries = retries
+ self.enforce_content_length = enforce_content_length
+ self.auto_close = auto_close
+
+ self._decoder = None
+ self._body = None
+ self._fp = None
+ self._original_response = original_response
+ self._fp_bytes_read = 0
+ self.msg = msg
+ self._request_url = request_url
+
+ if body and isinstance(body, (six.string_types, bytes)):
+ self._body = body
+
+ self._pool = pool
+ self._connection = connection
+
+ if hasattr(body, "read"):
+ self._fp = body
+
+ # Are we using the chunked-style of transfer encoding?
+ self.chunked = False
+ self.chunk_left = None
+ tr_enc = self.headers.get("transfer-encoding", "").lower()
+ # Don't incur the penalty of creating a list and then discarding it
+ encodings = (enc.strip() for enc in tr_enc.split(","))
+ if "chunked" in encodings:
+ self.chunked = True
+
+ # Determine length of response
+ self.length_remaining = self._init_length(request_method)
+
+ # If requested, preload the body.
+ if preload_content and not self._body:
+ self._body = self.read(decode_content=decode_content)
+
+ def get_redirect_location(self):
+ """
+ Should we redirect and where to?
+
+ :returns: Truthy redirect location string if we got a redirect status
+ code and valid location. ``None`` if redirect status and no
+ location. ``False`` if not a redirect status code.
+ """
+ if self.status in self.REDIRECT_STATUSES:
+ return self.headers.get("location")
+
+ return False
+
+ def release_conn(self):
+ if not self._pool or not self._connection:
+ return
+
+ self._pool._put_conn(self._connection)
+ self._connection = None
+
+ def drain_conn(self):
+ """
+ Read and discard any remaining HTTP response data in the response connection.
+
+ Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
+ """
+ try:
+ self.read()
+ except (HTTPError, SocketError, BaseSSLError, HTTPException):
+ pass
+
+ @property
+ def data(self):
+ # For backwards-compat with earlier urllib3 0.4 and earlier.
+ if self._body:
+ return self._body
+
+ if self._fp:
+ return self.read(cache_content=True)
+
+ @property
+ def connection(self):
+ return self._connection
+
+ def isclosed(self):
+ return is_fp_closed(self._fp)
+
+ def tell(self):
+ """
+ Obtain the number of bytes pulled over the wire so far. May differ from
+ the amount of content returned by :meth:``urllib3.response.HTTPResponse.read``
+ if bytes are encoded on the wire (e.g, compressed).
+ """
+ return self._fp_bytes_read
+
+ def _init_length(self, request_method):
+ """
+ Set initial length value for Response content if available.
+ """
+ length = self.headers.get("content-length")
+
+ if length is not None:
+ if self.chunked:
+ # This Response will fail with an IncompleteRead if it can't be
+ # received as chunked. This method falls back to attempt reading
+ # the response before raising an exception.
+ log.warning(
+ "Received response with both Content-Length and "
+ "Transfer-Encoding set. This is expressly forbidden "
+ "by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
+ "attempting to process response as Transfer-Encoding: "
+ "chunked."
+ )
+ return None
+
+ try:
+ # RFC 7230 section 3.3.2 specifies multiple content lengths can
+ # be sent in a single Content-Length header
+ # (e.g. Content-Length: 42, 42). This line ensures the values
+ # are all valid ints and that as long as the `set` length is 1,
+ # all values are the same. Otherwise, the header is invalid.
+ lengths = set([int(val) for val in length.split(",")])
+ if len(lengths) > 1:
+ raise InvalidHeader(
+ "Content-Length contained multiple "
+ "unmatching values (%s)" % length
+ )
+ length = lengths.pop()
+ except ValueError:
+ length = None
+ else:
+ if length < 0:
+ length = None
+
+ # Convert status to int for comparison
+ # In some cases, httplib returns a status of "_UNKNOWN"
+ try:
+ status = int(self.status)
+ except ValueError:
+ status = 0
+
+ # Check for responses that shouldn't include a body
+ if status in (204, 304) or 100 <= status < 200 or request_method == "HEAD":
+ length = 0
+
+ return length
+
+ def _init_decoder(self):
+ """
+ Set-up the _decoder attribute if necessary.
+ """
+ # Note: content-encoding value should be case-insensitive, per RFC 7230
+ # Section 3.2
+ content_encoding = self.headers.get("content-encoding", "").lower()
+ if self._decoder is None:
+ if content_encoding in self.CONTENT_DECODERS:
+ self._decoder = _get_decoder(content_encoding)
+ elif "," in content_encoding:
+ encodings = [
+ e.strip()
+ for e in content_encoding.split(",")
+ if e.strip() in self.CONTENT_DECODERS
+ ]
+ if len(encodings):
+ self._decoder = _get_decoder(content_encoding)
+
+ DECODER_ERROR_CLASSES = (IOError, zlib.error)
+ if brotli is not None:
+ DECODER_ERROR_CLASSES += (brotli.error,)
+
+ def _decode(self, data, decode_content, flush_decoder):
+ """
+ Decode the data passed in and potentially flush the decoder.
+ """
+ if not decode_content:
+ return data
+
+ try:
+ if self._decoder:
+ data = self._decoder.decompress(data)
+ except self.DECODER_ERROR_CLASSES as e:
+ content_encoding = self.headers.get("content-encoding", "").lower()
+ raise DecodeError(
+ "Received response with content-encoding: %s, but "
+ "failed to decode it." % content_encoding,
+ e,
+ )
+ if flush_decoder:
+ data += self._flush_decoder()
+
+ return data
+
+ def _flush_decoder(self):
+ """
+ Flushes the decoder. Should only be called if the decoder is actually
+ being used.
+ """
+ if self._decoder:
+ buf = self._decoder.decompress(b"")
+ return buf + self._decoder.flush()
+
+ return b""
+
+ @contextmanager
+ def _error_catcher(self):
+ """
+ Catch low-level python exceptions, instead re-raising urllib3
+ variants, so that low-level exceptions are not leaked in the
+ high-level api.
+
+ On exit, release the connection back to the pool.
+ """
+ clean_exit = False
+
+ try:
+ try:
+ yield
+
+ except SocketTimeout:
+ # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
+ # there is yet no clean way to get at it from this context.
+ raise ReadTimeoutError(self._pool, None, "Read timed out.")
+
+ except BaseSSLError as e:
+ # FIXME: Is there a better way to differentiate between SSLErrors?
+ if "read operation timed out" not in str(e):
+ # SSL errors related to framing/MAC get wrapped and reraised here
+ raise SSLError(e)
+
+ raise ReadTimeoutError(self._pool, None, "Read timed out.")
+
+ except (HTTPException, SocketError) as e:
+ # This includes IncompleteRead.
+ raise ProtocolError("Connection broken: %r" % e, e)
+
+ # If no exception is thrown, we should avoid cleaning up
+ # unnecessarily.
+ clean_exit = True
+ finally:
+ # If we didn't terminate cleanly, we need to throw away our
+ # connection.
+ if not clean_exit:
+ # The response may not be closed but we're not going to use it
+ # anymore so close it now to ensure that the connection is
+ # released back to the pool.
+ if self._original_response:
+ self._original_response.close()
+
+ # Closing the response may not actually be sufficient to close
+ # everything, so if we have a hold of the connection close that
+ # too.
+ if self._connection:
+ self._connection.close()
+
+ # If we hold the original response but it's closed now, we should
+ # return the connection back to the pool.
+ if self._original_response and self._original_response.isclosed():
+ self.release_conn()
+
+ def _fp_read(self, amt):
+ """
+ Read a response with the thought that reading the number of bytes
+ larger than can fit in a 32-bit int at a time via SSL in some
+ known cases leads to an overflow error that has to be prevented
+ if `amt` or `self.length_remaining` indicate that a problem may
+ happen.
+
+ The known cases:
+ * 3.8 <= CPython < 3.9.7 because of a bug
+ https://github.com/urllib3/urllib3/issues/2513#issuecomment-1152559900.
+ * urllib3 injected with pyOpenSSL-backed SSL-support.
+ * CPython < 3.10 only when `amt` does not fit 32-bit int.
+ """
+ assert self._fp
+ c_int_max = 2 ** 31 - 1
+ if (
+ (
+ (amt and amt > c_int_max)
+ or (self.length_remaining and self.length_remaining > c_int_max)
+ )
+ and not util.IS_SECURETRANSPORT
+ and (util.IS_PYOPENSSL or sys.version_info < (3, 10))
+ ):
+ buffer = io.BytesIO()
+ # Besides `max_chunk_amt` being a maximum chunk size, it
+ # affects memory overhead of reading a response by this
+ # method in CPython.
+ # `c_int_max` equal to 2 GiB - 1 byte is the actual maximum
+ # chunk size that does not lead to an overflow error, but
+ # 256 MiB is a compromise.
+ max_chunk_amt = 2 ** 28
+ while amt is None or amt != 0:
+ if amt is not None:
+ chunk_amt = min(amt, max_chunk_amt)
+ amt -= chunk_amt
+ else:
+ chunk_amt = max_chunk_amt
+ data = self._fp.read(chunk_amt)
+ if not data:
+ break
+ buffer.write(data)
+ del data # to reduce peak memory usage by `max_chunk_amt`.
+ return buffer.getvalue()
+ else:
+ # StringIO doesn't like amt=None
+ return self._fp.read(amt) if amt is not None else self._fp.read()
+
+ def read(self, amt=None, decode_content=None, cache_content=False):
+ """
+ Similar to :meth:`http.client.HTTPResponse.read`, but with two additional
+ parameters: ``decode_content`` and ``cache_content``.
+
+ :param amt:
+ How much of the content to read. If specified, caching is skipped
+ because it doesn't make sense to cache partial content as the full
+ response.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+
+ :param cache_content:
+ If True, will save the returned data such that the same result is
+ returned despite of the state of the underlying file object. This
+ is useful if you want the ``.data`` property to continue working
+ after having ``.read()`` the file object. (Overridden if ``amt`` is
+ set.)
+ """
+ self._init_decoder()
+ if decode_content is None:
+ decode_content = self.decode_content
+
+ if self._fp is None:
+ return
+
+ flush_decoder = False
+ fp_closed = getattr(self._fp, "closed", False)
+
+ with self._error_catcher():
+ data = self._fp_read(amt) if not fp_closed else b""
+ if amt is None:
+ flush_decoder = True
+ else:
+ cache_content = False
+ if (
+ amt != 0 and not data
+ ): # Platform-specific: Buggy versions of Python.
+ # Close the connection when no data is returned
+ #
+ # This is redundant to what httplib/http.client _should_
+ # already do. However, versions of python released before
+ # December 15, 2012 (http://bugs.python.org/issue16298) do
+ # not properly close the connection in all cases. There is
+ # no harm in redundantly calling close.
+ self._fp.close()
+ flush_decoder = True
+ if self.enforce_content_length and self.length_remaining not in (
+ 0,
+ None,
+ ):
+ # This is an edge case that httplib failed to cover due
+ # to concerns of backward compatibility. We're
+ # addressing it here to make sure IncompleteRead is
+ # raised during streaming, so all calls with incorrect
+ # Content-Length are caught.
+ raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
+
+ if data:
+ self._fp_bytes_read += len(data)
+ if self.length_remaining is not None:
+ self.length_remaining -= len(data)
+
+ data = self._decode(data, decode_content, flush_decoder)
+
+ if cache_content:
+ self._body = data
+
+ return data
+
+ def stream(self, amt=2 ** 16, decode_content=None):
+ """
+ A generator wrapper for the read() method. A call will block until
+ ``amt`` bytes have been read from the connection or until the
+ connection is closed.
+
+ :param amt:
+ How much of the content to read. The generator will return up to
+ much data per iteration, but may return less. This is particularly
+ likely when using compressed data. However, the empty string will
+ never be returned.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+ """
+ if self.chunked and self.supports_chunked_reads():
+ for line in self.read_chunked(amt, decode_content=decode_content):
+ yield line
+ else:
+ while not is_fp_closed(self._fp):
+ data = self.read(amt=amt, decode_content=decode_content)
+
+ if data:
+ yield data
+
+ @classmethod
+ def from_httplib(ResponseCls, r, **response_kw):
+ """
+ Given an :class:`http.client.HTTPResponse` instance ``r``, return a
+ corresponding :class:`urllib3.response.HTTPResponse` object.
+
+ Remaining parameters are passed to the HTTPResponse constructor, along
+ with ``original_response=r``.
+ """
+ headers = r.msg
+
+ if not isinstance(headers, HTTPHeaderDict):
+ if six.PY2:
+ # Python 2.7
+ headers = HTTPHeaderDict.from_httplib(headers)
+ else:
+ headers = HTTPHeaderDict(headers.items())
+
+ # HTTPResponse objects in Python 3 don't have a .strict attribute
+ strict = getattr(r, "strict", 0)
+ resp = ResponseCls(
+ body=r,
+ headers=headers,
+ status=r.status,
+ version=r.version,
+ reason=r.reason,
+ strict=strict,
+ original_response=r,
+ **response_kw
+ )
+ return resp
+
+ # Backwards-compatibility methods for http.client.HTTPResponse
+ def getheaders(self):
+ warnings.warn(
+ "HTTPResponse.getheaders() is deprecated and will be removed "
+ "in urllib3 v2.1.0. Instead access HTTPResponse.headers directly.",
+ category=DeprecationWarning,
+ stacklevel=2,
+ )
+ return self.headers
+
+ def getheader(self, name, default=None):
+ warnings.warn(
+ "HTTPResponse.getheader() is deprecated and will be removed "
+ "in urllib3 v2.1.0. Instead use HTTPResponse.headers.get(name, default).",
+ category=DeprecationWarning,
+ stacklevel=2,
+ )
+ return self.headers.get(name, default)
+
+ # Backwards compatibility for http.cookiejar
+ def info(self):
+ return self.headers
+
+ # Overrides from io.IOBase
+ def close(self):
+ if not self.closed:
+ self._fp.close()
+
+ if self._connection:
+ self._connection.close()
+
+ if not self.auto_close:
+ io.IOBase.close(self)
+
+ @property
+ def closed(self):
+ if not self.auto_close:
+ return io.IOBase.closed.__get__(self)
+ elif self._fp is None:
+ return True
+ elif hasattr(self._fp, "isclosed"):
+ return self._fp.isclosed()
+ elif hasattr(self._fp, "closed"):
+ return self._fp.closed
+ else:
+ return True
+
+ def fileno(self):
+ if self._fp is None:
+ raise IOError("HTTPResponse has no file to get a fileno from")
+ elif hasattr(self._fp, "fileno"):
+ return self._fp.fileno()
+ else:
+ raise IOError(
+ "The file-like object this HTTPResponse is wrapped "
+ "around has no file descriptor"
+ )
+
+ def flush(self):
+ if (
+ self._fp is not None
+ and hasattr(self._fp, "flush")
+ and not getattr(self._fp, "closed", False)
+ ):
+ return self._fp.flush()
+
+ def readable(self):
+ # This method is required for `io` module compatibility.
+ return True
+
+ def readinto(self, b):
+ # This method is required for `io` module compatibility.
+ temp = self.read(len(b))
+ if len(temp) == 0:
+ return 0
+ else:
+ b[: len(temp)] = temp
+ return len(temp)
+
+ def supports_chunked_reads(self):
+ """
+ Checks if the underlying file-like object looks like a
+ :class:`http.client.HTTPResponse` object. We do this by testing for
+ the fp attribute. If it is present we assume it returns raw chunks as
+ processed by read_chunked().
+ """
+ return hasattr(self._fp, "fp")
+
+ def _update_chunk_length(self):
+ # First, we'll figure out length of a chunk and then
+ # we'll try to read it from socket.
+ if self.chunk_left is not None:
+ return
+ line = self._fp.fp.readline()
+ line = line.split(b";", 1)[0]
+ try:
+ self.chunk_left = int(line, 16)
+ except ValueError:
+ # Invalid chunked protocol response, abort.
+ self.close()
+ raise InvalidChunkLength(self, line)
+
+ def _handle_chunk(self, amt):
+ returned_chunk = None
+ if amt is None:
+ chunk = self._fp._safe_read(self.chunk_left)
+ returned_chunk = chunk
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ elif amt < self.chunk_left:
+ value = self._fp._safe_read(amt)
+ self.chunk_left = self.chunk_left - amt
+ returned_chunk = value
+ elif amt == self.chunk_left:
+ value = self._fp._safe_read(amt)
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ returned_chunk = value
+ else: # amt > self.chunk_left
+ returned_chunk = self._fp._safe_read(self.chunk_left)
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ return returned_chunk
+
+ def read_chunked(self, amt=None, decode_content=None):
+ """
+ Similar to :meth:`HTTPResponse.read`, but with an additional
+ parameter: ``decode_content``.
+
+ :param amt:
+ How much of the content to read. If specified, caching is skipped
+ because it doesn't make sense to cache partial content as the full
+ response.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+ """
+ self._init_decoder()
+ # FIXME: Rewrite this method and make it a class with a better structured logic.
+ if not self.chunked:
+ raise ResponseNotChunked(
+ "Response is not chunked. "
+ "Header 'transfer-encoding: chunked' is missing."
+ )
+ if not self.supports_chunked_reads():
+ raise BodyNotHttplibCompatible(
+ "Body should be http.client.HTTPResponse like. "
+ "It should have have an fp attribute which returns raw chunks."
+ )
+
+ with self._error_catcher():
+ # Don't bother reading the body of a HEAD request.
+ if self._original_response and is_response_to_head(self._original_response):
+ self._original_response.close()
+ return
+
+ # If a response is already read and closed
+ # then return immediately.
+ if self._fp.fp is None:
+ return
+
+ while True:
+ self._update_chunk_length()
+ if self.chunk_left == 0:
+ break
+ chunk = self._handle_chunk(amt)
+ decoded = self._decode(
+ chunk, decode_content=decode_content, flush_decoder=False
+ )
+ if decoded:
+ yield decoded
+
+ if decode_content:
+ # On CPython and PyPy, we should never need to flush the
+ # decoder. However, on Jython we *might* need to, so
+ # lets defensively do it anyway.
+ decoded = self._flush_decoder()
+ if decoded: # Platform-specific: Jython.
+ yield decoded
+
+ # Chunk content ends with \r\n: discard it.
+ while True:
+ line = self._fp.fp.readline()
+ if not line:
+ # Some sites may not end with '\r\n'.
+ break
+ if line == b"\r\n":
+ break
+
+ # We read everything; close the "file".
+ if self._original_response:
+ self._original_response.close()
+
+ def geturl(self):
+ """
+ Returns the URL that was the source of this response.
+ If the request that generated this response redirected, this method
+ will return the final redirect location.
+ """
+ if self.retries is not None and len(self.retries.history):
+ return self.retries.history[-1].redirect_location
+ else:
+ return self._request_url
+
+ def __iter__(self):
+ buffer = []
+ for chunk in self.stream(decode_content=True):
+ if b"\n" in chunk:
+ chunk = chunk.split(b"\n")
+ yield b"".join(buffer) + chunk[0] + b"\n"
+ for x in chunk[1:-1]:
+ yield x + b"\n"
+ if chunk[-1]:
+ buffer = [chunk[-1]]
+ else:
+ buffer = []
+ else:
+ buffer.append(chunk)
+ if buffer:
+ yield b"".join(buffer)
diff --git a/third_party/python/pip/pip/_vendor/urllib3/util/__init__.py b/third_party/python/pip/pip/_vendor/urllib3/util/__init__.py
new file mode 100644
index 0000000000..4547fc522b
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/util/__init__.py
@@ -0,0 +1,49 @@
+from __future__ import absolute_import
+
+# For backwards compatibility, provide imports that used to be here.
+from .connection import is_connection_dropped
+from .request import SKIP_HEADER, SKIPPABLE_HEADERS, make_headers
+from .response import is_fp_closed
+from .retry import Retry
+from .ssl_ import (
+ ALPN_PROTOCOLS,
+ HAS_SNI,
+ IS_PYOPENSSL,
+ IS_SECURETRANSPORT,
+ PROTOCOL_TLS,
+ SSLContext,
+ assert_fingerprint,
+ resolve_cert_reqs,
+ resolve_ssl_version,
+ ssl_wrap_socket,
+)
+from .timeout import Timeout, current_time
+from .url import Url, get_host, parse_url, split_first
+from .wait import wait_for_read, wait_for_write
+
+__all__ = (
+ "HAS_SNI",
+ "IS_PYOPENSSL",
+ "IS_SECURETRANSPORT",
+ "SSLContext",
+ "PROTOCOL_TLS",
+ "ALPN_PROTOCOLS",
+ "Retry",
+ "Timeout",
+ "Url",
+ "assert_fingerprint",
+ "current_time",
+ "is_connection_dropped",
+ "is_fp_closed",
+ "get_host",
+ "parse_url",
+ "make_headers",
+ "resolve_cert_reqs",
+ "resolve_ssl_version",
+ "split_first",
+ "ssl_wrap_socket",
+ "wait_for_read",
+ "wait_for_write",
+ "SKIP_HEADER",
+ "SKIPPABLE_HEADERS",
+)
diff --git a/third_party/python/pip/pip/_vendor/urllib3/util/connection.py b/third_party/python/pip/pip/_vendor/urllib3/util/connection.py
new file mode 100644
index 0000000000..6af1138f26
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/util/connection.py
@@ -0,0 +1,149 @@
+from __future__ import absolute_import
+
+import socket
+
+from ..contrib import _appengine_environ
+from ..exceptions import LocationParseError
+from ..packages import six
+from .wait import NoWayToWaitForSocketError, wait_for_read
+
+
+def is_connection_dropped(conn): # Platform-specific
+ """
+ Returns True if the connection is dropped and should be closed.
+
+ :param conn:
+ :class:`http.client.HTTPConnection` object.
+
+ Note: For platforms like AppEngine, this will always return ``False`` to
+ let the platform handle connection recycling transparently for us.
+ """
+ sock = getattr(conn, "sock", False)
+ if sock is False: # Platform-specific: AppEngine
+ return False
+ if sock is None: # Connection already closed (such as by httplib).
+ return True
+ try:
+ # Returns True if readable, which here means it's been dropped
+ return wait_for_read(sock, timeout=0.0)
+ except NoWayToWaitForSocketError: # Platform-specific: AppEngine
+ return False
+
+
+# This function is copied from socket.py in the Python 2.7 standard
+# library test suite. Added to its signature is only `socket_options`.
+# One additional modification is that we avoid binding to IPv6 servers
+# discovered in DNS if the system doesn't have IPv6 functionality.
+def create_connection(
+ address,
+ timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ source_address=None,
+ socket_options=None,
+):
+ """Connect to *address* and return the socket object.
+
+ Convenience function. Connect to *address* (a 2-tuple ``(host,
+ port)``) and return the socket object. Passing the optional
+ *timeout* parameter will set the timeout on the socket instance
+ before attempting to connect. If no *timeout* is supplied, the
+ global default timeout setting returned by :func:`socket.getdefaulttimeout`
+ is used. If *source_address* is set it must be a tuple of (host, port)
+ for the socket to bind as a source address before making the connection.
+ An host of '' or port 0 tells the OS to use the default.
+ """
+
+ host, port = address
+ if host.startswith("["):
+ host = host.strip("[]")
+ err = None
+
+ # Using the value from allowed_gai_family() in the context of getaddrinfo lets
+ # us select whether to work with IPv4 DNS records, IPv6 records, or both.
+ # The original create_connection function always returns all records.
+ family = allowed_gai_family()
+
+ try:
+ host.encode("idna")
+ except UnicodeError:
+ return six.raise_from(
+ LocationParseError(u"'%s', label empty or too long" % host), None
+ )
+
+ for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ sock = None
+ try:
+ sock = socket.socket(af, socktype, proto)
+
+ # If provided, set socket level options before connecting.
+ _set_socket_options(sock, socket_options)
+
+ if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
+ sock.settimeout(timeout)
+ if source_address:
+ sock.bind(source_address)
+ sock.connect(sa)
+ return sock
+
+ except socket.error as e:
+ err = e
+ if sock is not None:
+ sock.close()
+ sock = None
+
+ if err is not None:
+ raise err
+
+ raise socket.error("getaddrinfo returns an empty list")
+
+
+def _set_socket_options(sock, options):
+ if options is None:
+ return
+
+ for opt in options:
+ sock.setsockopt(*opt)
+
+
+def allowed_gai_family():
+ """This function is designed to work in the context of
+ getaddrinfo, where family=socket.AF_UNSPEC is the default and
+ will perform a DNS search for both IPv6 and IPv4 records."""
+
+ family = socket.AF_INET
+ if HAS_IPV6:
+ family = socket.AF_UNSPEC
+ return family
+
+
+def _has_ipv6(host):
+ """Returns True if the system can bind an IPv6 address."""
+ sock = None
+ has_ipv6 = False
+
+ # App Engine doesn't support IPV6 sockets and actually has a quota on the
+ # number of sockets that can be used, so just early out here instead of
+ # creating a socket needlessly.
+ # See https://github.com/urllib3/urllib3/issues/1446
+ if _appengine_environ.is_appengine_sandbox():
+ return False
+
+ if socket.has_ipv6:
+ # has_ipv6 returns true if cPython was compiled with IPv6 support.
+ # It does not tell us if the system has IPv6 support enabled. To
+ # determine that we must bind to an IPv6 address.
+ # https://github.com/urllib3/urllib3/pull/611
+ # https://bugs.python.org/issue658327
+ try:
+ sock = socket.socket(socket.AF_INET6)
+ sock.bind((host, 0))
+ has_ipv6 = True
+ except Exception:
+ pass
+
+ if sock:
+ sock.close()
+ return has_ipv6
+
+
+HAS_IPV6 = _has_ipv6("::1")
diff --git a/third_party/python/pip/pip/_vendor/urllib3/util/proxy.py b/third_party/python/pip/pip/_vendor/urllib3/util/proxy.py
new file mode 100644
index 0000000000..2199cc7b7f
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/util/proxy.py
@@ -0,0 +1,57 @@
+from .ssl_ import create_urllib3_context, resolve_cert_reqs, resolve_ssl_version
+
+
+def connection_requires_http_tunnel(
+ proxy_url=None, proxy_config=None, destination_scheme=None
+):
+ """
+ Returns True if the connection requires an HTTP CONNECT through the proxy.
+
+ :param URL proxy_url:
+ URL of the proxy.
+ :param ProxyConfig proxy_config:
+ Proxy configuration from poolmanager.py
+ :param str destination_scheme:
+ The scheme of the destination. (i.e https, http, etc)
+ """
+ # If we're not using a proxy, no way to use a tunnel.
+ if proxy_url is None:
+ return False
+
+ # HTTP destinations never require tunneling, we always forward.
+ if destination_scheme == "http":
+ return False
+
+ # Support for forwarding with HTTPS proxies and HTTPS destinations.
+ if (
+ proxy_url.scheme == "https"
+ and proxy_config
+ and proxy_config.use_forwarding_for_https
+ ):
+ return False
+
+ # Otherwise always use a tunnel.
+ return True
+
+
+def create_proxy_ssl_context(
+ ssl_version, cert_reqs, ca_certs=None, ca_cert_dir=None, ca_cert_data=None
+):
+ """
+ Generates a default proxy ssl context if one hasn't been provided by the
+ user.
+ """
+ ssl_context = create_urllib3_context(
+ ssl_version=resolve_ssl_version(ssl_version),
+ cert_reqs=resolve_cert_reqs(cert_reqs),
+ )
+
+ if (
+ not ca_certs
+ and not ca_cert_dir
+ and not ca_cert_data
+ and hasattr(ssl_context, "load_default_certs")
+ ):
+ ssl_context.load_default_certs()
+
+ return ssl_context
diff --git a/third_party/python/pip/pip/_vendor/urllib3/util/queue.py b/third_party/python/pip/pip/_vendor/urllib3/util/queue.py
new file mode 100644
index 0000000000..41784104ee
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/util/queue.py
@@ -0,0 +1,22 @@
+import collections
+
+from ..packages import six
+from ..packages.six.moves import queue
+
+if six.PY2:
+ # Queue is imported for side effects on MS Windows. See issue #229.
+ import Queue as _unused_module_Queue # noqa: F401
+
+
+class LifoQueue(queue.Queue):
+ def _init(self, _):
+ self.queue = collections.deque()
+
+ def _qsize(self, len=len):
+ return len(self.queue)
+
+ def _put(self, item):
+ self.queue.append(item)
+
+ def _get(self):
+ return self.queue.pop()
diff --git a/third_party/python/pip/pip/_vendor/urllib3/util/request.py b/third_party/python/pip/pip/_vendor/urllib3/util/request.py
new file mode 100644
index 0000000000..330766ef4f
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/util/request.py
@@ -0,0 +1,137 @@
+from __future__ import absolute_import
+
+from base64 import b64encode
+
+from ..exceptions import UnrewindableBodyError
+from ..packages.six import b, integer_types
+
+# Pass as a value within ``headers`` to skip
+# emitting some HTTP headers that are added automatically.
+# The only headers that are supported are ``Accept-Encoding``,
+# ``Host``, and ``User-Agent``.
+SKIP_HEADER = "@@@SKIP_HEADER@@@"
+SKIPPABLE_HEADERS = frozenset(["accept-encoding", "host", "user-agent"])
+
+ACCEPT_ENCODING = "gzip,deflate"
+
+_FAILEDTELL = object()
+
+
+def make_headers(
+ keep_alive=None,
+ accept_encoding=None,
+ user_agent=None,
+ basic_auth=None,
+ proxy_basic_auth=None,
+ disable_cache=None,
+):
+ """
+ Shortcuts for generating request headers.
+
+ :param keep_alive:
+ If ``True``, adds 'connection: keep-alive' header.
+
+ :param accept_encoding:
+ Can be a boolean, list, or string.
+ ``True`` translates to 'gzip,deflate'.
+ List will get joined by comma.
+ String will be used as provided.
+
+ :param user_agent:
+ String representing the user-agent you want, such as
+ "python-urllib3/0.6"
+
+ :param basic_auth:
+ Colon-separated username:password string for 'authorization: basic ...'
+ auth header.
+
+ :param proxy_basic_auth:
+ Colon-separated username:password string for 'proxy-authorization: basic ...'
+ auth header.
+
+ :param disable_cache:
+ If ``True``, adds 'cache-control: no-cache' header.
+
+ Example::
+
+ >>> make_headers(keep_alive=True, user_agent="Batman/1.0")
+ {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
+ >>> make_headers(accept_encoding=True)
+ {'accept-encoding': 'gzip,deflate'}
+ """
+ headers = {}
+ if accept_encoding:
+ if isinstance(accept_encoding, str):
+ pass
+ elif isinstance(accept_encoding, list):
+ accept_encoding = ",".join(accept_encoding)
+ else:
+ accept_encoding = ACCEPT_ENCODING
+ headers["accept-encoding"] = accept_encoding
+
+ if user_agent:
+ headers["user-agent"] = user_agent
+
+ if keep_alive:
+ headers["connection"] = "keep-alive"
+
+ if basic_auth:
+ headers["authorization"] = "Basic " + b64encode(b(basic_auth)).decode("utf-8")
+
+ if proxy_basic_auth:
+ headers["proxy-authorization"] = "Basic " + b64encode(
+ b(proxy_basic_auth)
+ ).decode("utf-8")
+
+ if disable_cache:
+ headers["cache-control"] = "no-cache"
+
+ return headers
+
+
+def set_file_position(body, pos):
+ """
+ If a position is provided, move file to that point.
+ Otherwise, we'll attempt to record a position for future use.
+ """
+ if pos is not None:
+ rewind_body(body, pos)
+ elif getattr(body, "tell", None) is not None:
+ try:
+ pos = body.tell()
+ except (IOError, OSError):
+ # This differentiates from None, allowing us to catch
+ # a failed `tell()` later when trying to rewind the body.
+ pos = _FAILEDTELL
+
+ return pos
+
+
+def rewind_body(body, body_pos):
+ """
+ Attempt to rewind body to a certain position.
+ Primarily used for request redirects and retries.
+
+ :param body:
+ File-like object that supports seek.
+
+ :param int pos:
+ Position to seek to in file.
+ """
+ body_seek = getattr(body, "seek", None)
+ if body_seek is not None and isinstance(body_pos, integer_types):
+ try:
+ body_seek(body_pos)
+ except (IOError, OSError):
+ raise UnrewindableBodyError(
+ "An error occurred when rewinding request body for redirect/retry."
+ )
+ elif body_pos is _FAILEDTELL:
+ raise UnrewindableBodyError(
+ "Unable to record file position for rewinding "
+ "request body during a redirect/retry."
+ )
+ else:
+ raise ValueError(
+ "body_pos must be of type integer, instead it was %s." % type(body_pos)
+ )
diff --git a/third_party/python/pip/pip/_vendor/urllib3/util/response.py b/third_party/python/pip/pip/_vendor/urllib3/util/response.py
new file mode 100644
index 0000000000..5ea609cced
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/util/response.py
@@ -0,0 +1,107 @@
+from __future__ import absolute_import
+
+from email.errors import MultipartInvariantViolationDefect, StartBoundaryNotFoundDefect
+
+from ..exceptions import HeaderParsingError
+from ..packages.six.moves import http_client as httplib
+
+
+def is_fp_closed(obj):
+ """
+ Checks whether a given file-like object is closed.
+
+ :param obj:
+ The file-like object to check.
+ """
+
+ try:
+ # Check `isclosed()` first, in case Python3 doesn't set `closed`.
+ # GH Issue #928
+ return obj.isclosed()
+ except AttributeError:
+ pass
+
+ try:
+ # Check via the official file-like-object way.
+ return obj.closed
+ except AttributeError:
+ pass
+
+ try:
+ # Check if the object is a container for another file-like object that
+ # gets released on exhaustion (e.g. HTTPResponse).
+ return obj.fp is None
+ except AttributeError:
+ pass
+
+ raise ValueError("Unable to determine whether fp is closed.")
+
+
+def assert_header_parsing(headers):
+ """
+ Asserts whether all headers have been successfully parsed.
+ Extracts encountered errors from the result of parsing headers.
+
+ Only works on Python 3.
+
+ :param http.client.HTTPMessage headers: Headers to verify.
+
+ :raises urllib3.exceptions.HeaderParsingError:
+ If parsing errors are found.
+ """
+
+ # This will fail silently if we pass in the wrong kind of parameter.
+ # To make debugging easier add an explicit check.
+ if not isinstance(headers, httplib.HTTPMessage):
+ raise TypeError("expected httplib.Message, got {0}.".format(type(headers)))
+
+ defects = getattr(headers, "defects", None)
+ get_payload = getattr(headers, "get_payload", None)
+
+ unparsed_data = None
+ if get_payload:
+ # get_payload is actually email.message.Message.get_payload;
+ # we're only interested in the result if it's not a multipart message
+ if not headers.is_multipart():
+ payload = get_payload()
+
+ if isinstance(payload, (bytes, str)):
+ unparsed_data = payload
+ if defects:
+ # httplib is assuming a response body is available
+ # when parsing headers even when httplib only sends
+ # header data to parse_headers() This results in
+ # defects on multipart responses in particular.
+ # See: https://github.com/urllib3/urllib3/issues/800
+
+ # So we ignore the following defects:
+ # - StartBoundaryNotFoundDefect:
+ # The claimed start boundary was never found.
+ # - MultipartInvariantViolationDefect:
+ # A message claimed to be a multipart but no subparts were found.
+ defects = [
+ defect
+ for defect in defects
+ if not isinstance(
+ defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect)
+ )
+ ]
+
+ if defects or unparsed_data:
+ raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
+
+
+def is_response_to_head(response):
+ """
+ Checks whether the request of a response has been a HEAD-request.
+ Handles the quirks of AppEngine.
+
+ :param http.client.HTTPResponse response:
+ Response to check if the originating request
+ used 'HEAD' as a method.
+ """
+ # FIXME: Can we do this somehow without accessing private httplib _method?
+ method = response._method
+ if isinstance(method, int): # Platform-specific: Appengine
+ return method == 3
+ return method.upper() == "HEAD"
diff --git a/third_party/python/pip/pip/_vendor/urllib3/util/retry.py b/third_party/python/pip/pip/_vendor/urllib3/util/retry.py
new file mode 100644
index 0000000000..2490d5e5b6
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/util/retry.py
@@ -0,0 +1,620 @@
+from __future__ import absolute_import
+
+import email
+import logging
+import re
+import time
+import warnings
+from collections import namedtuple
+from itertools import takewhile
+
+from ..exceptions import (
+ ConnectTimeoutError,
+ InvalidHeader,
+ MaxRetryError,
+ ProtocolError,
+ ProxyError,
+ ReadTimeoutError,
+ ResponseError,
+)
+from ..packages import six
+
+log = logging.getLogger(__name__)
+
+
+# Data structure for representing the metadata of requests that result in a retry.
+RequestHistory = namedtuple(
+ "RequestHistory", ["method", "url", "error", "status", "redirect_location"]
+)
+
+
+# TODO: In v2 we can remove this sentinel and metaclass with deprecated options.
+_Default = object()
+
+
+class _RetryMeta(type):
+ @property
+ def DEFAULT_METHOD_WHITELIST(cls):
+ warnings.warn(
+ "Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and "
+ "will be removed in v2.0. Use 'Retry.DEFAULT_ALLOWED_METHODS' instead",
+ DeprecationWarning,
+ )
+ return cls.DEFAULT_ALLOWED_METHODS
+
+ @DEFAULT_METHOD_WHITELIST.setter
+ def DEFAULT_METHOD_WHITELIST(cls, value):
+ warnings.warn(
+ "Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and "
+ "will be removed in v2.0. Use 'Retry.DEFAULT_ALLOWED_METHODS' instead",
+ DeprecationWarning,
+ )
+ cls.DEFAULT_ALLOWED_METHODS = value
+
+ @property
+ def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls):
+ warnings.warn(
+ "Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and "
+ "will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead",
+ DeprecationWarning,
+ )
+ return cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
+
+ @DEFAULT_REDIRECT_HEADERS_BLACKLIST.setter
+ def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls, value):
+ warnings.warn(
+ "Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and "
+ "will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead",
+ DeprecationWarning,
+ )
+ cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT = value
+
+ @property
+ def BACKOFF_MAX(cls):
+ warnings.warn(
+ "Using 'Retry.BACKOFF_MAX' is deprecated and "
+ "will be removed in v2.0. Use 'Retry.DEFAULT_BACKOFF_MAX' instead",
+ DeprecationWarning,
+ )
+ return cls.DEFAULT_BACKOFF_MAX
+
+ @BACKOFF_MAX.setter
+ def BACKOFF_MAX(cls, value):
+ warnings.warn(
+ "Using 'Retry.BACKOFF_MAX' is deprecated and "
+ "will be removed in v2.0. Use 'Retry.DEFAULT_BACKOFF_MAX' instead",
+ DeprecationWarning,
+ )
+ cls.DEFAULT_BACKOFF_MAX = value
+
+
+@six.add_metaclass(_RetryMeta)
+class Retry(object):
+ """Retry configuration.
+
+ Each retry attempt will create a new Retry object with updated values, so
+ they can be safely reused.
+
+ Retries can be defined as a default for a pool::
+
+ retries = Retry(connect=5, read=2, redirect=5)
+ http = PoolManager(retries=retries)
+ response = http.request('GET', 'http://example.com/')
+
+ Or per-request (which overrides the default for the pool)::
+
+ response = http.request('GET', 'http://example.com/', retries=Retry(10))
+
+ Retries can be disabled by passing ``False``::
+
+ response = http.request('GET', 'http://example.com/', retries=False)
+
+ Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
+ retries are disabled, in which case the causing exception will be raised.
+
+ :param int total:
+ Total number of retries to allow. Takes precedence over other counts.
+
+ Set to ``None`` to remove this constraint and fall back on other
+ counts.
+
+ Set to ``0`` to fail on the first retry.
+
+ Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+ :param int connect:
+ How many connection-related errors to retry on.
+
+ These are errors raised before the request is sent to the remote server,
+ which we assume has not triggered the server to process the request.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param int read:
+ How many times to retry on read errors.
+
+ These errors are raised after the request was sent to the server, so the
+ request may have side-effects.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param int redirect:
+ How many redirects to perform. Limit this to avoid infinite redirect
+ loops.
+
+ A redirect is a HTTP response with a status code 301, 302, 303, 307 or
+ 308.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+ :param int status:
+ How many times to retry on bad status codes.
+
+ These are retries made on responses, where status code matches
+ ``status_forcelist``.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param int other:
+ How many times to retry on other errors.
+
+ Other errors are errors that are not connect, read, redirect or status errors.
+ These errors might be raised after the request was sent to the server, so the
+ request might have side-effects.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ If ``total`` is not set, it's a good idea to set this to 0 to account
+ for unexpected edge cases and avoid infinite retry loops.
+
+ :param iterable allowed_methods:
+ Set of uppercased HTTP method verbs that we should retry on.
+
+ By default, we only retry on methods which are considered to be
+ idempotent (multiple requests with the same parameters end with the
+ same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.
+
+ Set to a ``False`` value to retry on any verb.
+
+ .. warning::
+
+ Previously this parameter was named ``method_whitelist``, that
+ usage is deprecated in v1.26.0 and will be removed in v2.0.
+
+ :param iterable status_forcelist:
+ A set of integer HTTP status codes that we should force a retry on.
+ A retry is initiated if the request method is in ``allowed_methods``
+ and the response status code is in ``status_forcelist``.
+
+ By default, this is disabled with ``None``.
+
+ :param float backoff_factor:
+ A backoff factor to apply between attempts after the second try
+ (most errors are resolved immediately by a second try without a
+ delay). urllib3 will sleep for::
+
+ {backoff factor} * (2 ** ({number of total retries} - 1))
+
+ seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
+ for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
+ than :attr:`Retry.DEFAULT_BACKOFF_MAX`.
+
+ By default, backoff is disabled (set to 0).
+
+ :param bool raise_on_redirect: Whether, if the number of redirects is
+ exhausted, to raise a MaxRetryError, or to return a response with a
+ response code in the 3xx range.
+
+ :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
+ whether we should raise an exception, or return a response,
+ if status falls in ``status_forcelist`` range and retries have
+ been exhausted.
+
+ :param tuple history: The history of the request encountered during
+ each call to :meth:`~Retry.increment`. The list is in the order
+ the requests occurred. Each list item is of class :class:`RequestHistory`.
+
+ :param bool respect_retry_after_header:
+ Whether to respect Retry-After header on status codes defined as
+ :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
+
+ :param iterable remove_headers_on_redirect:
+ Sequence of headers to remove from the request when a response
+ indicating a redirect is returned before firing off the redirected
+ request.
+ """
+
+ #: Default methods to be used for ``allowed_methods``
+ DEFAULT_ALLOWED_METHODS = frozenset(
+ ["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"]
+ )
+
+ #: Default status codes to be used for ``status_forcelist``
+ RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
+
+ #: Default headers to be used for ``remove_headers_on_redirect``
+ DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(["Authorization"])
+
+ #: Maximum backoff time.
+ DEFAULT_BACKOFF_MAX = 120
+
+ def __init__(
+ self,
+ total=10,
+ connect=None,
+ read=None,
+ redirect=None,
+ status=None,
+ other=None,
+ allowed_methods=_Default,
+ status_forcelist=None,
+ backoff_factor=0,
+ raise_on_redirect=True,
+ raise_on_status=True,
+ history=None,
+ respect_retry_after_header=True,
+ remove_headers_on_redirect=_Default,
+ # TODO: Deprecated, remove in v2.0
+ method_whitelist=_Default,
+ ):
+
+ if method_whitelist is not _Default:
+ if allowed_methods is not _Default:
+ raise ValueError(
+ "Using both 'allowed_methods' and "
+ "'method_whitelist' together is not allowed. "
+ "Instead only use 'allowed_methods'"
+ )
+ warnings.warn(
+ "Using 'method_whitelist' with Retry is deprecated and "
+ "will be removed in v2.0. Use 'allowed_methods' instead",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ allowed_methods = method_whitelist
+ if allowed_methods is _Default:
+ allowed_methods = self.DEFAULT_ALLOWED_METHODS
+ if remove_headers_on_redirect is _Default:
+ remove_headers_on_redirect = self.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
+
+ self.total = total
+ self.connect = connect
+ self.read = read
+ self.status = status
+ self.other = other
+
+ if redirect is False or total is False:
+ redirect = 0
+ raise_on_redirect = False
+
+ self.redirect = redirect
+ self.status_forcelist = status_forcelist or set()
+ self.allowed_methods = allowed_methods
+ self.backoff_factor = backoff_factor
+ self.raise_on_redirect = raise_on_redirect
+ self.raise_on_status = raise_on_status
+ self.history = history or tuple()
+ self.respect_retry_after_header = respect_retry_after_header
+ self.remove_headers_on_redirect = frozenset(
+ [h.lower() for h in remove_headers_on_redirect]
+ )
+
+ def new(self, **kw):
+ params = dict(
+ total=self.total,
+ connect=self.connect,
+ read=self.read,
+ redirect=self.redirect,
+ status=self.status,
+ other=self.other,
+ status_forcelist=self.status_forcelist,
+ backoff_factor=self.backoff_factor,
+ raise_on_redirect=self.raise_on_redirect,
+ raise_on_status=self.raise_on_status,
+ history=self.history,
+ remove_headers_on_redirect=self.remove_headers_on_redirect,
+ respect_retry_after_header=self.respect_retry_after_header,
+ )
+
+ # TODO: If already given in **kw we use what's given to us
+ # If not given we need to figure out what to pass. We decide
+ # based on whether our class has the 'method_whitelist' property
+ # and if so we pass the deprecated 'method_whitelist' otherwise
+ # we use 'allowed_methods'. Remove in v2.0
+ if "method_whitelist" not in kw and "allowed_methods" not in kw:
+ if "method_whitelist" in self.__dict__:
+ warnings.warn(
+ "Using 'method_whitelist' with Retry is deprecated and "
+ "will be removed in v2.0. Use 'allowed_methods' instead",
+ DeprecationWarning,
+ )
+ params["method_whitelist"] = self.allowed_methods
+ else:
+ params["allowed_methods"] = self.allowed_methods
+
+ params.update(kw)
+ return type(self)(**params)
+
+ @classmethod
+ def from_int(cls, retries, redirect=True, default=None):
+ """Backwards-compatibility for the old retries format."""
+ if retries is None:
+ retries = default if default is not None else cls.DEFAULT
+
+ if isinstance(retries, Retry):
+ return retries
+
+ redirect = bool(redirect) and None
+ new_retries = cls(retries, redirect=redirect)
+ log.debug("Converted retries value: %r -> %r", retries, new_retries)
+ return new_retries
+
+ def get_backoff_time(self):
+ """Formula for computing the current backoff
+
+ :rtype: float
+ """
+ # We want to consider only the last consecutive errors sequence (Ignore redirects).
+ consecutive_errors_len = len(
+ list(
+ takewhile(lambda x: x.redirect_location is None, reversed(self.history))
+ )
+ )
+ if consecutive_errors_len <= 1:
+ return 0
+
+ backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
+ return min(self.DEFAULT_BACKOFF_MAX, backoff_value)
+
+ def parse_retry_after(self, retry_after):
+ # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
+ if re.match(r"^\s*[0-9]+\s*$", retry_after):
+ seconds = int(retry_after)
+ else:
+ retry_date_tuple = email.utils.parsedate_tz(retry_after)
+ if retry_date_tuple is None:
+ raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
+ if retry_date_tuple[9] is None: # Python 2
+ # Assume UTC if no timezone was specified
+ # On Python2.7, parsedate_tz returns None for a timezone offset
+ # instead of 0 if no timezone is given, where mktime_tz treats
+ # a None timezone offset as local time.
+ retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:]
+
+ retry_date = email.utils.mktime_tz(retry_date_tuple)
+ seconds = retry_date - time.time()
+
+ if seconds < 0:
+ seconds = 0
+
+ return seconds
+
+ def get_retry_after(self, response):
+ """Get the value of Retry-After in seconds."""
+
+ retry_after = response.headers.get("Retry-After")
+
+ if retry_after is None:
+ return None
+
+ return self.parse_retry_after(retry_after)
+
+ def sleep_for_retry(self, response=None):
+ retry_after = self.get_retry_after(response)
+ if retry_after:
+ time.sleep(retry_after)
+ return True
+
+ return False
+
+ def _sleep_backoff(self):
+ backoff = self.get_backoff_time()
+ if backoff <= 0:
+ return
+ time.sleep(backoff)
+
+ def sleep(self, response=None):
+ """Sleep between retry attempts.
+
+ This method will respect a server's ``Retry-After`` response header
+ and sleep the duration of the time requested. If that is not present, it
+ will use an exponential backoff. By default, the backoff factor is 0 and
+ this method will return immediately.
+ """
+
+ if self.respect_retry_after_header and response:
+ slept = self.sleep_for_retry(response)
+ if slept:
+ return
+
+ self._sleep_backoff()
+
+ def _is_connection_error(self, err):
+ """Errors when we're fairly sure that the server did not receive the
+ request, so it should be safe to retry.
+ """
+ if isinstance(err, ProxyError):
+ err = err.original_error
+ return isinstance(err, ConnectTimeoutError)
+
+ def _is_read_error(self, err):
+ """Errors that occur after the request has been started, so we should
+ assume that the server began processing it.
+ """
+ return isinstance(err, (ReadTimeoutError, ProtocolError))
+
+ def _is_method_retryable(self, method):
+ """Checks if a given HTTP method should be retried upon, depending if
+ it is included in the allowed_methods
+ """
+ # TODO: For now favor if the Retry implementation sets its own method_whitelist
+ # property outside of our constructor to avoid breaking custom implementations.
+ if "method_whitelist" in self.__dict__:
+ warnings.warn(
+ "Using 'method_whitelist' with Retry is deprecated and "
+ "will be removed in v2.0. Use 'allowed_methods' instead",
+ DeprecationWarning,
+ )
+ allowed_methods = self.method_whitelist
+ else:
+ allowed_methods = self.allowed_methods
+
+ if allowed_methods and method.upper() not in allowed_methods:
+ return False
+ return True
+
+ def is_retry(self, method, status_code, has_retry_after=False):
+ """Is this method/status code retryable? (Based on allowlists and control
+ variables such as the number of total retries to allow, whether to
+ respect the Retry-After header, whether this header is present, and
+ whether the returned status code is on the list of status codes to
+ be retried upon on the presence of the aforementioned header)
+ """
+ if not self._is_method_retryable(method):
+ return False
+
+ if self.status_forcelist and status_code in self.status_forcelist:
+ return True
+
+ return (
+ self.total
+ and self.respect_retry_after_header
+ and has_retry_after
+ and (status_code in self.RETRY_AFTER_STATUS_CODES)
+ )
+
+ def is_exhausted(self):
+ """Are we out of retries?"""
+ retry_counts = (
+ self.total,
+ self.connect,
+ self.read,
+ self.redirect,
+ self.status,
+ self.other,
+ )
+ retry_counts = list(filter(None, retry_counts))
+ if not retry_counts:
+ return False
+
+ return min(retry_counts) < 0
+
+ def increment(
+ self,
+ method=None,
+ url=None,
+ response=None,
+ error=None,
+ _pool=None,
+ _stacktrace=None,
+ ):
+ """Return a new Retry object with incremented retry counters.
+
+ :param response: A response object, or None, if the server did not
+ return a response.
+ :type response: :class:`~urllib3.response.HTTPResponse`
+ :param Exception error: An error encountered during the request, or
+ None if the response was received successfully.
+
+ :return: A new ``Retry`` object.
+ """
+ if self.total is False and error:
+ # Disabled, indicate to re-raise the error.
+ raise six.reraise(type(error), error, _stacktrace)
+
+ total = self.total
+ if total is not None:
+ total -= 1
+
+ connect = self.connect
+ read = self.read
+ redirect = self.redirect
+ status_count = self.status
+ other = self.other
+ cause = "unknown"
+ status = None
+ redirect_location = None
+
+ if error and self._is_connection_error(error):
+ # Connect retry?
+ if connect is False:
+ raise six.reraise(type(error), error, _stacktrace)
+ elif connect is not None:
+ connect -= 1
+
+ elif error and self._is_read_error(error):
+ # Read retry?
+ if read is False or not self._is_method_retryable(method):
+ raise six.reraise(type(error), error, _stacktrace)
+ elif read is not None:
+ read -= 1
+
+ elif error:
+ # Other retry?
+ if other is not None:
+ other -= 1
+
+ elif response and response.get_redirect_location():
+ # Redirect retry?
+ if redirect is not None:
+ redirect -= 1
+ cause = "too many redirects"
+ redirect_location = response.get_redirect_location()
+ status = response.status
+
+ else:
+ # Incrementing because of a server error like a 500 in
+ # status_forcelist and the given method is in the allowed_methods
+ cause = ResponseError.GENERIC_ERROR
+ if response and response.status:
+ if status_count is not None:
+ status_count -= 1
+ cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)
+ status = response.status
+
+ history = self.history + (
+ RequestHistory(method, url, error, status, redirect_location),
+ )
+
+ new_retry = self.new(
+ total=total,
+ connect=connect,
+ read=read,
+ redirect=redirect,
+ status=status_count,
+ other=other,
+ history=history,
+ )
+
+ if new_retry.is_exhausted():
+ raise MaxRetryError(_pool, url, error or ResponseError(cause))
+
+ log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
+
+ return new_retry
+
+ def __repr__(self):
+ return (
+ "{cls.__name__}(total={self.total}, connect={self.connect}, "
+ "read={self.read}, redirect={self.redirect}, status={self.status})"
+ ).format(cls=type(self), self=self)
+
+ def __getattr__(self, item):
+ if item == "method_whitelist":
+ # TODO: Remove this deprecated alias in v2.0
+ warnings.warn(
+ "Using 'method_whitelist' with Retry is deprecated and "
+ "will be removed in v2.0. Use 'allowed_methods' instead",
+ DeprecationWarning,
+ )
+ return self.allowed_methods
+ try:
+ return getattr(super(Retry, self), item)
+ except AttributeError:
+ return getattr(Retry, item)
+
+
+# For backwards compatibility (equivalent to pre-v1.9):
+Retry.DEFAULT = Retry(3)
diff --git a/third_party/python/pip/pip/_vendor/urllib3/util/ssl_.py b/third_party/python/pip/pip/_vendor/urllib3/util/ssl_.py
new file mode 100644
index 0000000000..2b45d391d4
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/util/ssl_.py
@@ -0,0 +1,495 @@
+from __future__ import absolute_import
+
+import hmac
+import os
+import sys
+import warnings
+from binascii import hexlify, unhexlify
+from hashlib import md5, sha1, sha256
+
+from ..exceptions import (
+ InsecurePlatformWarning,
+ ProxySchemeUnsupported,
+ SNIMissingWarning,
+ SSLError,
+)
+from ..packages import six
+from .url import BRACELESS_IPV6_ADDRZ_RE, IPV4_RE
+
+SSLContext = None
+SSLTransport = None
+HAS_SNI = False
+IS_PYOPENSSL = False
+IS_SECURETRANSPORT = False
+ALPN_PROTOCOLS = ["http/1.1"]
+
+# Maps the length of a digest to a possible hash function producing this digest
+HASHFUNC_MAP = {32: md5, 40: sha1, 64: sha256}
+
+
+def _const_compare_digest_backport(a, b):
+ """
+ Compare two digests of equal length in constant time.
+
+ The digests must be of type str/bytes.
+ Returns True if the digests match, and False otherwise.
+ """
+ result = abs(len(a) - len(b))
+ for left, right in zip(bytearray(a), bytearray(b)):
+ result |= left ^ right
+ return result == 0
+
+
+_const_compare_digest = getattr(hmac, "compare_digest", _const_compare_digest_backport)
+
+try: # Test for SSL features
+ import ssl
+ from ssl import CERT_REQUIRED, wrap_socket
+except ImportError:
+ pass
+
+try:
+ from ssl import HAS_SNI # Has SNI?
+except ImportError:
+ pass
+
+try:
+ from .ssltransport import SSLTransport
+except ImportError:
+ pass
+
+
+try: # Platform-specific: Python 3.6
+ from ssl import PROTOCOL_TLS
+
+ PROTOCOL_SSLv23 = PROTOCOL_TLS
+except ImportError:
+ try:
+ from ssl import PROTOCOL_SSLv23 as PROTOCOL_TLS
+
+ PROTOCOL_SSLv23 = PROTOCOL_TLS
+ except ImportError:
+ PROTOCOL_SSLv23 = PROTOCOL_TLS = 2
+
+try:
+ from ssl import PROTOCOL_TLS_CLIENT
+except ImportError:
+ PROTOCOL_TLS_CLIENT = PROTOCOL_TLS
+
+
+try:
+ from ssl import OP_NO_COMPRESSION, OP_NO_SSLv2, OP_NO_SSLv3
+except ImportError:
+ OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
+ OP_NO_COMPRESSION = 0x20000
+
+
+try: # OP_NO_TICKET was added in Python 3.6
+ from ssl import OP_NO_TICKET
+except ImportError:
+ OP_NO_TICKET = 0x4000
+
+
+# A secure default.
+# Sources for more information on TLS ciphers:
+#
+# - https://wiki.mozilla.org/Security/Server_Side_TLS
+# - https://www.ssllabs.com/projects/best-practices/index.html
+# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
+#
+# The general intent is:
+# - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
+# - prefer ECDHE over DHE for better performance,
+# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
+# security,
+# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
+# - disable NULL authentication, MD5 MACs, DSS, and other
+# insecure ciphers for security reasons.
+# - NOTE: TLS 1.3 cipher suites are managed through a different interface
+# not exposed by CPython (yet!) and are enabled by default if they're available.
+DEFAULT_CIPHERS = ":".join(
+ [
+ "ECDHE+AESGCM",
+ "ECDHE+CHACHA20",
+ "DHE+AESGCM",
+ "DHE+CHACHA20",
+ "ECDH+AESGCM",
+ "DH+AESGCM",
+ "ECDH+AES",
+ "DH+AES",
+ "RSA+AESGCM",
+ "RSA+AES",
+ "!aNULL",
+ "!eNULL",
+ "!MD5",
+ "!DSS",
+ ]
+)
+
+try:
+ from ssl import SSLContext # Modern SSL?
+except ImportError:
+
+ class SSLContext(object): # Platform-specific: Python 2
+ def __init__(self, protocol_version):
+ self.protocol = protocol_version
+ # Use default values from a real SSLContext
+ self.check_hostname = False
+ self.verify_mode = ssl.CERT_NONE
+ self.ca_certs = None
+ self.options = 0
+ self.certfile = None
+ self.keyfile = None
+ self.ciphers = None
+
+ def load_cert_chain(self, certfile, keyfile):
+ self.certfile = certfile
+ self.keyfile = keyfile
+
+ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+ self.ca_certs = cafile
+
+ if capath is not None:
+ raise SSLError("CA directories not supported in older Pythons")
+
+ if cadata is not None:
+ raise SSLError("CA data not supported in older Pythons")
+
+ def set_ciphers(self, cipher_suite):
+ self.ciphers = cipher_suite
+
+ def wrap_socket(self, socket, server_hostname=None, server_side=False):
+ warnings.warn(
+ "A true SSLContext object is not available. This prevents "
+ "urllib3 from configuring SSL appropriately and may cause "
+ "certain SSL connections to fail. You can upgrade to a newer "
+ "version of Python to solve this. For more information, see "
+ "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
+ "#ssl-warnings",
+ InsecurePlatformWarning,
+ )
+ kwargs = {
+ "keyfile": self.keyfile,
+ "certfile": self.certfile,
+ "ca_certs": self.ca_certs,
+ "cert_reqs": self.verify_mode,
+ "ssl_version": self.protocol,
+ "server_side": server_side,
+ }
+ return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
+
+
+def assert_fingerprint(cert, fingerprint):
+ """
+ Checks if given fingerprint matches the supplied certificate.
+
+ :param cert:
+ Certificate as bytes object.
+ :param fingerprint:
+ Fingerprint as string of hexdigits, can be interspersed by colons.
+ """
+
+ fingerprint = fingerprint.replace(":", "").lower()
+ digest_length = len(fingerprint)
+ hashfunc = HASHFUNC_MAP.get(digest_length)
+ if not hashfunc:
+ raise SSLError("Fingerprint of invalid length: {0}".format(fingerprint))
+
+ # We need encode() here for py32; works on py2 and p33.
+ fingerprint_bytes = unhexlify(fingerprint.encode())
+
+ cert_digest = hashfunc(cert).digest()
+
+ if not _const_compare_digest(cert_digest, fingerprint_bytes):
+ raise SSLError(
+ 'Fingerprints did not match. Expected "{0}", got "{1}".'.format(
+ fingerprint, hexlify(cert_digest)
+ )
+ )
+
+
+def resolve_cert_reqs(candidate):
+ """
+ Resolves the argument to a numeric constant, which can be passed to
+ the wrap_socket function/method from the ssl module.
+ Defaults to :data:`ssl.CERT_REQUIRED`.
+ If given a string it is assumed to be the name of the constant in the
+ :mod:`ssl` module or its abbreviation.
+ (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
+ If it's neither `None` nor a string we assume it is already the numeric
+ constant which can directly be passed to wrap_socket.
+ """
+ if candidate is None:
+ return CERT_REQUIRED
+
+ if isinstance(candidate, str):
+ res = getattr(ssl, candidate, None)
+ if res is None:
+ res = getattr(ssl, "CERT_" + candidate)
+ return res
+
+ return candidate
+
+
+def resolve_ssl_version(candidate):
+ """
+ like resolve_cert_reqs
+ """
+ if candidate is None:
+ return PROTOCOL_TLS
+
+ if isinstance(candidate, str):
+ res = getattr(ssl, candidate, None)
+ if res is None:
+ res = getattr(ssl, "PROTOCOL_" + candidate)
+ return res
+
+ return candidate
+
+
+def create_urllib3_context(
+ ssl_version=None, cert_reqs=None, options=None, ciphers=None
+):
+ """All arguments have the same meaning as ``ssl_wrap_socket``.
+
+ By default, this function does a lot of the same work that
+ ``ssl.create_default_context`` does on Python 3.4+. It:
+
+ - Disables SSLv2, SSLv3, and compression
+ - Sets a restricted set of server ciphers
+
+ If you wish to enable SSLv3, you can do::
+
+ from pip._vendor.urllib3.util import ssl_
+ context = ssl_.create_urllib3_context()
+ context.options &= ~ssl_.OP_NO_SSLv3
+
+ You can do the same to enable compression (substituting ``COMPRESSION``
+ for ``SSLv3`` in the last line above).
+
+ :param ssl_version:
+ The desired protocol version to use. This will default to
+ PROTOCOL_SSLv23 which will negotiate the highest protocol that both
+ the server and your installation of OpenSSL support.
+ :param cert_reqs:
+ Whether to require the certificate verification. This defaults to
+ ``ssl.CERT_REQUIRED``.
+ :param options:
+ Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
+ ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``, and ``ssl.OP_NO_TICKET``.
+ :param ciphers:
+ Which cipher suites to allow the server to select.
+ :returns:
+ Constructed SSLContext object with specified options
+ :rtype: SSLContext
+ """
+ # PROTOCOL_TLS is deprecated in Python 3.10
+ if not ssl_version or ssl_version == PROTOCOL_TLS:
+ ssl_version = PROTOCOL_TLS_CLIENT
+
+ context = SSLContext(ssl_version)
+
+ context.set_ciphers(ciphers or DEFAULT_CIPHERS)
+
+ # Setting the default here, as we may have no ssl module on import
+ cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
+
+ if options is None:
+ options = 0
+ # SSLv2 is easily broken and is considered harmful and dangerous
+ options |= OP_NO_SSLv2
+ # SSLv3 has several problems and is now dangerous
+ options |= OP_NO_SSLv3
+ # Disable compression to prevent CRIME attacks for OpenSSL 1.0+
+ # (issue #309)
+ options |= OP_NO_COMPRESSION
+ # TLSv1.2 only. Unless set explicitly, do not request tickets.
+ # This may save some bandwidth on wire, and although the ticket is encrypted,
+ # there is a risk associated with it being on wire,
+ # if the server is not rotating its ticketing keys properly.
+ options |= OP_NO_TICKET
+
+ context.options |= options
+
+ # Enable post-handshake authentication for TLS 1.3, see GH #1634. PHA is
+ # necessary for conditional client cert authentication with TLS 1.3.
+ # The attribute is None for OpenSSL <= 1.1.0 or does not exist in older
+ # versions of Python. We only enable on Python 3.7.4+ or if certificate
+ # verification is enabled to work around Python issue #37428
+ # See: https://bugs.python.org/issue37428
+ if (cert_reqs == ssl.CERT_REQUIRED or sys.version_info >= (3, 7, 4)) and getattr(
+ context, "post_handshake_auth", None
+ ) is not None:
+ context.post_handshake_auth = True
+
+ def disable_check_hostname():
+ if (
+ getattr(context, "check_hostname", None) is not None
+ ): # Platform-specific: Python 3.2
+ # We do our own verification, including fingerprints and alternative
+ # hostnames. So disable it here
+ context.check_hostname = False
+
+ # The order of the below lines setting verify_mode and check_hostname
+ # matter due to safe-guards SSLContext has to prevent an SSLContext with
+ # check_hostname=True, verify_mode=NONE/OPTIONAL. This is made even more
+ # complex because we don't know whether PROTOCOL_TLS_CLIENT will be used
+ # or not so we don't know the initial state of the freshly created SSLContext.
+ if cert_reqs == ssl.CERT_REQUIRED:
+ context.verify_mode = cert_reqs
+ disable_check_hostname()
+ else:
+ disable_check_hostname()
+ context.verify_mode = cert_reqs
+
+ # Enable logging of TLS session keys via defacto standard environment variable
+ # 'SSLKEYLOGFILE', if the feature is available (Python 3.8+). Skip empty values.
+ if hasattr(context, "keylog_filename"):
+ sslkeylogfile = os.environ.get("SSLKEYLOGFILE")
+ if sslkeylogfile:
+ context.keylog_filename = sslkeylogfile
+
+ return context
+
+
+def ssl_wrap_socket(
+ sock,
+ keyfile=None,
+ certfile=None,
+ cert_reqs=None,
+ ca_certs=None,
+ server_hostname=None,
+ ssl_version=None,
+ ciphers=None,
+ ssl_context=None,
+ ca_cert_dir=None,
+ key_password=None,
+ ca_cert_data=None,
+ tls_in_tls=False,
+):
+ """
+ All arguments except for server_hostname, ssl_context, and ca_cert_dir have
+ the same meaning as they do when using :func:`ssl.wrap_socket`.
+
+ :param server_hostname:
+ When SNI is supported, the expected hostname of the certificate
+ :param ssl_context:
+ A pre-made :class:`SSLContext` object. If none is provided, one will
+ be created using :func:`create_urllib3_context`.
+ :param ciphers:
+ A string of ciphers we wish the client to support.
+ :param ca_cert_dir:
+ A directory containing CA certificates in multiple separate files, as
+ supported by OpenSSL's -CApath flag or the capath argument to
+ SSLContext.load_verify_locations().
+ :param key_password:
+ Optional password if the keyfile is encrypted.
+ :param ca_cert_data:
+ Optional string containing CA certificates in PEM format suitable for
+ passing as the cadata parameter to SSLContext.load_verify_locations()
+ :param tls_in_tls:
+ Use SSLTransport to wrap the existing socket.
+ """
+ context = ssl_context
+ if context is None:
+ # Note: This branch of code and all the variables in it are no longer
+ # used by urllib3 itself. We should consider deprecating and removing
+ # this code.
+ context = create_urllib3_context(ssl_version, cert_reqs, ciphers=ciphers)
+
+ if ca_certs or ca_cert_dir or ca_cert_data:
+ try:
+ context.load_verify_locations(ca_certs, ca_cert_dir, ca_cert_data)
+ except (IOError, OSError) as e:
+ raise SSLError(e)
+
+ elif ssl_context is None and hasattr(context, "load_default_certs"):
+ # try to load OS default certs; works well on Windows (require Python3.4+)
+ context.load_default_certs()
+
+ # Attempt to detect if we get the goofy behavior of the
+ # keyfile being encrypted and OpenSSL asking for the
+ # passphrase via the terminal and instead error out.
+ if keyfile and key_password is None and _is_key_file_encrypted(keyfile):
+ raise SSLError("Client private key is encrypted, password is required")
+
+ if certfile:
+ if key_password is None:
+ context.load_cert_chain(certfile, keyfile)
+ else:
+ context.load_cert_chain(certfile, keyfile, key_password)
+
+ try:
+ if hasattr(context, "set_alpn_protocols"):
+ context.set_alpn_protocols(ALPN_PROTOCOLS)
+ except NotImplementedError: # Defensive: in CI, we always have set_alpn_protocols
+ pass
+
+ # If we detect server_hostname is an IP address then the SNI
+ # extension should not be used according to RFC3546 Section 3.1
+ use_sni_hostname = server_hostname and not is_ipaddress(server_hostname)
+ # SecureTransport uses server_hostname in certificate verification.
+ send_sni = (use_sni_hostname and HAS_SNI) or (
+ IS_SECURETRANSPORT and server_hostname
+ )
+ # Do not warn the user if server_hostname is an invalid SNI hostname.
+ if not HAS_SNI and use_sni_hostname:
+ warnings.warn(
+ "An HTTPS request has been made, but the SNI (Server Name "
+ "Indication) extension to TLS is not available on this platform. "
+ "This may cause the server to present an incorrect TLS "
+ "certificate, which can cause validation failures. You can upgrade to "
+ "a newer version of Python to solve this. For more information, see "
+ "https://urllib3.readthedocs.io/en/1.26.x/advanced-usage.html"
+ "#ssl-warnings",
+ SNIMissingWarning,
+ )
+
+ if send_sni:
+ ssl_sock = _ssl_wrap_socket_impl(
+ sock, context, tls_in_tls, server_hostname=server_hostname
+ )
+ else:
+ ssl_sock = _ssl_wrap_socket_impl(sock, context, tls_in_tls)
+ return ssl_sock
+
+
+def is_ipaddress(hostname):
+ """Detects whether the hostname given is an IPv4 or IPv6 address.
+ Also detects IPv6 addresses with Zone IDs.
+
+ :param str hostname: Hostname to examine.
+ :return: True if the hostname is an IP address, False otherwise.
+ """
+ if not six.PY2 and isinstance(hostname, bytes):
+ # IDN A-label bytes are ASCII compatible.
+ hostname = hostname.decode("ascii")
+ return bool(IPV4_RE.match(hostname) or BRACELESS_IPV6_ADDRZ_RE.match(hostname))
+
+
+def _is_key_file_encrypted(key_file):
+ """Detects if a key file is encrypted or not."""
+ with open(key_file, "r") as f:
+ for line in f:
+ # Look for Proc-Type: 4,ENCRYPTED
+ if "ENCRYPTED" in line:
+ return True
+
+ return False
+
+
+def _ssl_wrap_socket_impl(sock, ssl_context, tls_in_tls, server_hostname=None):
+ if tls_in_tls:
+ if not SSLTransport:
+ # Import error, ssl is not available.
+ raise ProxySchemeUnsupported(
+ "TLS in TLS requires support for the 'ssl' module"
+ )
+
+ SSLTransport._validate_ssl_context_for_tls_in_tls(ssl_context)
+ return SSLTransport(sock, ssl_context, server_hostname)
+
+ if server_hostname:
+ return ssl_context.wrap_socket(sock, server_hostname=server_hostname)
+ else:
+ return ssl_context.wrap_socket(sock)
diff --git a/third_party/python/pip/pip/_vendor/urllib3/util/ssl_match_hostname.py b/third_party/python/pip/pip/_vendor/urllib3/util/ssl_match_hostname.py
new file mode 100644
index 0000000000..1dd950c489
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/util/ssl_match_hostname.py
@@ -0,0 +1,159 @@
+"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
+
+# Note: This file is under the PSF license as the code comes from the python
+# stdlib. http://docs.python.org/3/license.html
+
+import re
+import sys
+
+# ipaddress has been backported to 2.6+ in pypi. If it is installed on the
+# system, use it to handle IPAddress ServerAltnames (this was added in
+# python-3.5) otherwise only do DNS matching. This allows
+# util.ssl_match_hostname to continue to be used in Python 2.7.
+try:
+ import ipaddress
+except ImportError:
+ ipaddress = None
+
+__version__ = "3.5.0.1"
+
+
+class CertificateError(ValueError):
+ pass
+
+
+def _dnsname_match(dn, hostname, max_wildcards=1):
+ """Matching according to RFC 6125, section 6.4.3
+
+ http://tools.ietf.org/html/rfc6125#section-6.4.3
+ """
+ pats = []
+ if not dn:
+ return False
+
+ # Ported from python3-syntax:
+ # leftmost, *remainder = dn.split(r'.')
+ parts = dn.split(r".")
+ leftmost = parts[0]
+ remainder = parts[1:]
+
+ wildcards = leftmost.count("*")
+ if wildcards > max_wildcards:
+ # Issue #17980: avoid denials of service by refusing more
+ # than one wildcard per fragment. A survey of established
+ # policy among SSL implementations showed it to be a
+ # reasonable choice.
+ raise CertificateError(
+ "too many wildcards in certificate DNS name: " + repr(dn)
+ )
+
+ # speed up common case w/o wildcards
+ if not wildcards:
+ return dn.lower() == hostname.lower()
+
+ # RFC 6125, section 6.4.3, subitem 1.
+ # The client SHOULD NOT attempt to match a presented identifier in which
+ # the wildcard character comprises a label other than the left-most label.
+ if leftmost == "*":
+ # When '*' is a fragment by itself, it matches a non-empty dotless
+ # fragment.
+ pats.append("[^.]+")
+ elif leftmost.startswith("xn--") or hostname.startswith("xn--"):
+ # RFC 6125, section 6.4.3, subitem 3.
+ # The client SHOULD NOT attempt to match a presented identifier
+ # where the wildcard character is embedded within an A-label or
+ # U-label of an internationalized domain name.
+ pats.append(re.escape(leftmost))
+ else:
+ # Otherwise, '*' matches any dotless string, e.g. www*
+ pats.append(re.escape(leftmost).replace(r"\*", "[^.]*"))
+
+ # add the remaining fragments, ignore any wildcards
+ for frag in remainder:
+ pats.append(re.escape(frag))
+
+ pat = re.compile(r"\A" + r"\.".join(pats) + r"\Z", re.IGNORECASE)
+ return pat.match(hostname)
+
+
+def _to_unicode(obj):
+ if isinstance(obj, str) and sys.version_info < (3,):
+ # ignored flake8 # F821 to support python 2.7 function
+ obj = unicode(obj, encoding="ascii", errors="strict") # noqa: F821
+ return obj
+
+
+def _ipaddress_match(ipname, host_ip):
+ """Exact matching of IP addresses.
+
+ RFC 6125 explicitly doesn't define an algorithm for this
+ (section 1.7.2 - "Out of Scope").
+ """
+ # OpenSSL may add a trailing newline to a subjectAltName's IP address
+ # Divergence from upstream: ipaddress can't handle byte str
+ ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
+ return ip == host_ip
+
+
+def match_hostname(cert, hostname):
+ """Verify that *cert* (in decoded format as returned by
+ SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
+ rules are followed, but IP addresses are not accepted for *hostname*.
+
+ CertificateError is raised on failure. On success, the function
+ returns nothing.
+ """
+ if not cert:
+ raise ValueError(
+ "empty or no certificate, match_hostname needs a "
+ "SSL socket or SSL context with either "
+ "CERT_OPTIONAL or CERT_REQUIRED"
+ )
+ try:
+ # Divergence from upstream: ipaddress can't handle byte str
+ host_ip = ipaddress.ip_address(_to_unicode(hostname))
+ except (UnicodeError, ValueError):
+ # ValueError: Not an IP address (common case)
+ # UnicodeError: Divergence from upstream: Have to deal with ipaddress not taking
+ # byte strings. addresses should be all ascii, so we consider it not
+ # an ipaddress in this case
+ host_ip = None
+ except AttributeError:
+ # Divergence from upstream: Make ipaddress library optional
+ if ipaddress is None:
+ host_ip = None
+ else: # Defensive
+ raise
+ dnsnames = []
+ san = cert.get("subjectAltName", ())
+ for key, value in san:
+ if key == "DNS":
+ if host_ip is None and _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ elif key == "IP Address":
+ if host_ip is not None and _ipaddress_match(value, host_ip):
+ return
+ dnsnames.append(value)
+ if not dnsnames:
+ # The subject is only checked when there is no dNSName entry
+ # in subjectAltName
+ for sub in cert.get("subject", ()):
+ for key, value in sub:
+ # XXX according to RFC 2818, the most specific Common Name
+ # must be used.
+ if key == "commonName":
+ if _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ if len(dnsnames) > 1:
+ raise CertificateError(
+ "hostname %r "
+ "doesn't match either of %s" % (hostname, ", ".join(map(repr, dnsnames)))
+ )
+ elif len(dnsnames) == 1:
+ raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0]))
+ else:
+ raise CertificateError(
+ "no appropriate commonName or subjectAltName fields were found"
+ )
diff --git a/third_party/python/pip/pip/_vendor/urllib3/util/ssltransport.py b/third_party/python/pip/pip/_vendor/urllib3/util/ssltransport.py
new file mode 100644
index 0000000000..4a7105d179
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/util/ssltransport.py
@@ -0,0 +1,221 @@
+import io
+import socket
+import ssl
+
+from ..exceptions import ProxySchemeUnsupported
+from ..packages import six
+
+SSL_BLOCKSIZE = 16384
+
+
+class SSLTransport:
+ """
+ The SSLTransport wraps an existing socket and establishes an SSL connection.
+
+ Contrary to Python's implementation of SSLSocket, it allows you to chain
+ multiple TLS connections together. It's particularly useful if you need to
+ implement TLS within TLS.
+
+ The class supports most of the socket API operations.
+ """
+
+ @staticmethod
+ def _validate_ssl_context_for_tls_in_tls(ssl_context):
+ """
+ Raises a ProxySchemeUnsupported if the provided ssl_context can't be used
+ for TLS in TLS.
+
+ The only requirement is that the ssl_context provides the 'wrap_bio'
+ methods.
+ """
+
+ if not hasattr(ssl_context, "wrap_bio"):
+ if six.PY2:
+ raise ProxySchemeUnsupported(
+ "TLS in TLS requires SSLContext.wrap_bio() which isn't "
+ "supported on Python 2"
+ )
+ else:
+ raise ProxySchemeUnsupported(
+ "TLS in TLS requires SSLContext.wrap_bio() which isn't "
+ "available on non-native SSLContext"
+ )
+
+ def __init__(
+ self, socket, ssl_context, server_hostname=None, suppress_ragged_eofs=True
+ ):
+ """
+ Create an SSLTransport around socket using the provided ssl_context.
+ """
+ self.incoming = ssl.MemoryBIO()
+ self.outgoing = ssl.MemoryBIO()
+
+ self.suppress_ragged_eofs = suppress_ragged_eofs
+ self.socket = socket
+
+ self.sslobj = ssl_context.wrap_bio(
+ self.incoming, self.outgoing, server_hostname=server_hostname
+ )
+
+ # Perform initial handshake.
+ self._ssl_io_loop(self.sslobj.do_handshake)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *_):
+ self.close()
+
+ def fileno(self):
+ return self.socket.fileno()
+
+ def read(self, len=1024, buffer=None):
+ return self._wrap_ssl_read(len, buffer)
+
+ def recv(self, len=1024, flags=0):
+ if flags != 0:
+ raise ValueError("non-zero flags not allowed in calls to recv")
+ return self._wrap_ssl_read(len)
+
+ def recv_into(self, buffer, nbytes=None, flags=0):
+ if flags != 0:
+ raise ValueError("non-zero flags not allowed in calls to recv_into")
+ if buffer and (nbytes is None):
+ nbytes = len(buffer)
+ elif nbytes is None:
+ nbytes = 1024
+ return self.read(nbytes, buffer)
+
+ def sendall(self, data, flags=0):
+ if flags != 0:
+ raise ValueError("non-zero flags not allowed in calls to sendall")
+ count = 0
+ with memoryview(data) as view, view.cast("B") as byte_view:
+ amount = len(byte_view)
+ while count < amount:
+ v = self.send(byte_view[count:])
+ count += v
+
+ def send(self, data, flags=0):
+ if flags != 0:
+ raise ValueError("non-zero flags not allowed in calls to send")
+ response = self._ssl_io_loop(self.sslobj.write, data)
+ return response
+
+ def makefile(
+ self, mode="r", buffering=None, encoding=None, errors=None, newline=None
+ ):
+ """
+ Python's httpclient uses makefile and buffered io when reading HTTP
+ messages and we need to support it.
+
+ This is unfortunately a copy and paste of socket.py makefile with small
+ changes to point to the socket directly.
+ """
+ if not set(mode) <= {"r", "w", "b"}:
+ raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
+
+ writing = "w" in mode
+ reading = "r" in mode or not writing
+ assert reading or writing
+ binary = "b" in mode
+ rawmode = ""
+ if reading:
+ rawmode += "r"
+ if writing:
+ rawmode += "w"
+ raw = socket.SocketIO(self, rawmode)
+ self.socket._io_refs += 1
+ if buffering is None:
+ buffering = -1
+ if buffering < 0:
+ buffering = io.DEFAULT_BUFFER_SIZE
+ if buffering == 0:
+ if not binary:
+ raise ValueError("unbuffered streams must be binary")
+ return raw
+ if reading and writing:
+ buffer = io.BufferedRWPair(raw, raw, buffering)
+ elif reading:
+ buffer = io.BufferedReader(raw, buffering)
+ else:
+ assert writing
+ buffer = io.BufferedWriter(raw, buffering)
+ if binary:
+ return buffer
+ text = io.TextIOWrapper(buffer, encoding, errors, newline)
+ text.mode = mode
+ return text
+
+ def unwrap(self):
+ self._ssl_io_loop(self.sslobj.unwrap)
+
+ def close(self):
+ self.socket.close()
+
+ def getpeercert(self, binary_form=False):
+ return self.sslobj.getpeercert(binary_form)
+
+ def version(self):
+ return self.sslobj.version()
+
+ def cipher(self):
+ return self.sslobj.cipher()
+
+ def selected_alpn_protocol(self):
+ return self.sslobj.selected_alpn_protocol()
+
+ def selected_npn_protocol(self):
+ return self.sslobj.selected_npn_protocol()
+
+ def shared_ciphers(self):
+ return self.sslobj.shared_ciphers()
+
+ def compression(self):
+ return self.sslobj.compression()
+
+ def settimeout(self, value):
+ self.socket.settimeout(value)
+
+ def gettimeout(self):
+ return self.socket.gettimeout()
+
+ def _decref_socketios(self):
+ self.socket._decref_socketios()
+
+ def _wrap_ssl_read(self, len, buffer=None):
+ try:
+ return self._ssl_io_loop(self.sslobj.read, len, buffer)
+ except ssl.SSLError as e:
+ if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:
+ return 0 # eof, return 0.
+ else:
+ raise
+
+ def _ssl_io_loop(self, func, *args):
+ """Performs an I/O loop between incoming/outgoing and the socket."""
+ should_loop = True
+ ret = None
+
+ while should_loop:
+ errno = None
+ try:
+ ret = func(*args)
+ except ssl.SSLError as e:
+ if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
+ # WANT_READ, and WANT_WRITE are expected, others are not.
+ raise e
+ errno = e.errno
+
+ buf = self.outgoing.read()
+ self.socket.sendall(buf)
+
+ if errno is None:
+ should_loop = False
+ elif errno == ssl.SSL_ERROR_WANT_READ:
+ buf = self.socket.recv(SSL_BLOCKSIZE)
+ if buf:
+ self.incoming.write(buf)
+ else:
+ self.incoming.write_eof()
+ return ret
diff --git a/third_party/python/pip/pip/_vendor/urllib3/util/timeout.py b/third_party/python/pip/pip/_vendor/urllib3/util/timeout.py
new file mode 100644
index 0000000000..ff69593b05
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/util/timeout.py
@@ -0,0 +1,268 @@
+from __future__ import absolute_import
+
+import time
+
+# The default socket timeout, used by httplib to indicate that no timeout was
+# specified by the user
+from socket import _GLOBAL_DEFAULT_TIMEOUT
+
+from ..exceptions import TimeoutStateError
+
+# A sentinel value to indicate that no timeout was specified by the user in
+# urllib3
+_Default = object()
+
+
+# Use time.monotonic if available.
+current_time = getattr(time, "monotonic", time.time)
+
+
+class Timeout(object):
+ """Timeout configuration.
+
+ Timeouts can be defined as a default for a pool:
+
+ .. code-block:: python
+
+ timeout = Timeout(connect=2.0, read=7.0)
+ http = PoolManager(timeout=timeout)
+ response = http.request('GET', 'http://example.com/')
+
+ Or per-request (which overrides the default for the pool):
+
+ .. code-block:: python
+
+ response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
+
+ Timeouts can be disabled by setting all the parameters to ``None``:
+
+ .. code-block:: python
+
+ no_timeout = Timeout(connect=None, read=None)
+ response = http.request('GET', 'http://example.com/, timeout=no_timeout)
+
+
+ :param total:
+ This combines the connect and read timeouts into one; the read timeout
+ will be set to the time leftover from the connect attempt. In the
+ event that both a connect timeout and a total are specified, or a read
+ timeout and a total are specified, the shorter timeout will be applied.
+
+ Defaults to None.
+
+ :type total: int, float, or None
+
+ :param connect:
+ The maximum amount of time (in seconds) to wait for a connection
+ attempt to a server to succeed. Omitting the parameter will default the
+ connect timeout to the system default, probably `the global default
+ timeout in socket.py
+ <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
+ None will set an infinite timeout for connection attempts.
+
+ :type connect: int, float, or None
+
+ :param read:
+ The maximum amount of time (in seconds) to wait between consecutive
+ read operations for a response from the server. Omitting the parameter
+ will default the read timeout to the system default, probably `the
+ global default timeout in socket.py
+ <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
+ None will set an infinite timeout.
+
+ :type read: int, float, or None
+
+ .. note::
+
+ Many factors can affect the total amount of time for urllib3 to return
+ an HTTP response.
+
+ For example, Python's DNS resolver does not obey the timeout specified
+ on the socket. Other factors that can affect total request time include
+ high CPU load, high swap, the program running at a low priority level,
+ or other behaviors.
+
+ In addition, the read and total timeouts only measure the time between
+ read operations on the socket connecting the client and the server,
+ not the total amount of time for the request to return a complete
+ response. For most requests, the timeout is raised because the server
+ has not sent the first byte in the specified time. This is not always
+ the case; if a server streams one byte every fifteen seconds, a timeout
+ of 20 seconds will not trigger, even though the request will take
+ several minutes to complete.
+
+ If your goal is to cut off any request after a set amount of wall clock
+ time, consider having a second "watcher" thread to cut off a slow
+ request.
+ """
+
+ #: A sentinel object representing the default timeout value
+ DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
+
+ def __init__(self, total=None, connect=_Default, read=_Default):
+ self._connect = self._validate_timeout(connect, "connect")
+ self._read = self._validate_timeout(read, "read")
+ self.total = self._validate_timeout(total, "total")
+ self._start_connect = None
+
+ def __repr__(self):
+ return "%s(connect=%r, read=%r, total=%r)" % (
+ type(self).__name__,
+ self._connect,
+ self._read,
+ self.total,
+ )
+
+ # __str__ provided for backwards compatibility
+ __str__ = __repr__
+
+ @classmethod
+ def _validate_timeout(cls, value, name):
+ """Check that a timeout attribute is valid.
+
+ :param value: The timeout value to validate
+ :param name: The name of the timeout attribute to validate. This is
+ used to specify in error messages.
+ :return: The validated and casted version of the given value.
+ :raises ValueError: If it is a numeric value less than or equal to
+ zero, or the type is not an integer, float, or None.
+ """
+ if value is _Default:
+ return cls.DEFAULT_TIMEOUT
+
+ if value is None or value is cls.DEFAULT_TIMEOUT:
+ return value
+
+ if isinstance(value, bool):
+ raise ValueError(
+ "Timeout cannot be a boolean value. It must "
+ "be an int, float or None."
+ )
+ try:
+ float(value)
+ except (TypeError, ValueError):
+ raise ValueError(
+ "Timeout value %s was %s, but it must be an "
+ "int, float or None." % (name, value)
+ )
+
+ try:
+ if value <= 0:
+ raise ValueError(
+ "Attempted to set %s timeout to %s, but the "
+ "timeout cannot be set to a value less "
+ "than or equal to 0." % (name, value)
+ )
+ except TypeError:
+ # Python 3
+ raise ValueError(
+ "Timeout value %s was %s, but it must be an "
+ "int, float or None." % (name, value)
+ )
+
+ return value
+
+ @classmethod
+ def from_float(cls, timeout):
+ """Create a new Timeout from a legacy timeout value.
+
+ The timeout value used by httplib.py sets the same timeout on the
+ connect(), and recv() socket requests. This creates a :class:`Timeout`
+ object that sets the individual timeouts to the ``timeout`` value
+ passed to this function.
+
+ :param timeout: The legacy timeout value.
+ :type timeout: integer, float, sentinel default object, or None
+ :return: Timeout object
+ :rtype: :class:`Timeout`
+ """
+ return Timeout(read=timeout, connect=timeout)
+
+ def clone(self):
+ """Create a copy of the timeout object
+
+ Timeout properties are stored per-pool but each request needs a fresh
+ Timeout object to ensure each one has its own start/stop configured.
+
+ :return: a copy of the timeout object
+ :rtype: :class:`Timeout`
+ """
+ # We can't use copy.deepcopy because that will also create a new object
+ # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
+ # detect the user default.
+ return Timeout(connect=self._connect, read=self._read, total=self.total)
+
+ def start_connect(self):
+ """Start the timeout clock, used during a connect() attempt
+
+ :raises urllib3.exceptions.TimeoutStateError: if you attempt
+ to start a timer that has been started already.
+ """
+ if self._start_connect is not None:
+ raise TimeoutStateError("Timeout timer has already been started.")
+ self._start_connect = current_time()
+ return self._start_connect
+
+ def get_connect_duration(self):
+ """Gets the time elapsed since the call to :meth:`start_connect`.
+
+ :return: Elapsed time in seconds.
+ :rtype: float
+ :raises urllib3.exceptions.TimeoutStateError: if you attempt
+ to get duration for a timer that hasn't been started.
+ """
+ if self._start_connect is None:
+ raise TimeoutStateError(
+ "Can't get connect duration for timer that has not started."
+ )
+ return current_time() - self._start_connect
+
+ @property
+ def connect_timeout(self):
+ """Get the value to use when setting a connection timeout.
+
+ This will be a positive float or integer, the value None
+ (never timeout), or the default system timeout.
+
+ :return: Connect timeout.
+ :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+ """
+ if self.total is None:
+ return self._connect
+
+ if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
+ return self.total
+
+ return min(self._connect, self.total)
+
+ @property
+ def read_timeout(self):
+ """Get the value for the read timeout.
+
+ This assumes some time has elapsed in the connection timeout and
+ computes the read timeout appropriately.
+
+ If self.total is set, the read timeout is dependent on the amount of
+ time taken by the connect timeout. If the connection time has not been
+ established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
+ raised.
+
+ :return: Value to use for the read timeout.
+ :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+ :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
+ has not yet been called on this object.
+ """
+ if (
+ self.total is not None
+ and self.total is not self.DEFAULT_TIMEOUT
+ and self._read is not None
+ and self._read is not self.DEFAULT_TIMEOUT
+ ):
+ # In case the connect timeout has not yet been established.
+ if self._start_connect is None:
+ return self._read
+ return max(0, min(self.total - self.get_connect_duration(), self._read))
+ elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
+ return max(0, self.total - self.get_connect_duration())
+ else:
+ return self._read
diff --git a/third_party/python/pip/pip/_vendor/urllib3/util/url.py b/third_party/python/pip/pip/_vendor/urllib3/util/url.py
new file mode 100644
index 0000000000..d6d0bbcea6
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/util/url.py
@@ -0,0 +1,435 @@
+from __future__ import absolute_import
+
+import re
+from collections import namedtuple
+
+from ..exceptions import LocationParseError
+from ..packages import six
+
+url_attrs = ["scheme", "auth", "host", "port", "path", "query", "fragment"]
+
+# We only want to normalize urls with an HTTP(S) scheme.
+# urllib3 infers URLs without a scheme (None) to be http.
+NORMALIZABLE_SCHEMES = ("http", "https", None)
+
+# Almost all of these patterns were derived from the
+# 'rfc3986' module: https://github.com/python-hyper/rfc3986
+PERCENT_RE = re.compile(r"%[a-fA-F0-9]{2}")
+SCHEME_RE = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+-]*:|/)")
+URI_RE = re.compile(
+ r"^(?:([a-zA-Z][a-zA-Z0-9+.-]*):)?"
+ r"(?://([^\\/?#]*))?"
+ r"([^?#]*)"
+ r"(?:\?([^#]*))?"
+ r"(?:#(.*))?$",
+ re.UNICODE | re.DOTALL,
+)
+
+IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}"
+HEX_PAT = "[0-9A-Fa-f]{1,4}"
+LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=HEX_PAT, ipv4=IPV4_PAT)
+_subs = {"hex": HEX_PAT, "ls32": LS32_PAT}
+_variations = [
+ # 6( h16 ":" ) ls32
+ "(?:%(hex)s:){6}%(ls32)s",
+ # "::" 5( h16 ":" ) ls32
+ "::(?:%(hex)s:){5}%(ls32)s",
+ # [ h16 ] "::" 4( h16 ":" ) ls32
+ "(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s",
+ # [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
+ "(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s",
+ # [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
+ "(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s",
+ # [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
+ "(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s",
+ # [ *4( h16 ":" ) h16 ] "::" ls32
+ "(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s",
+ # [ *5( h16 ":" ) h16 ] "::" h16
+ "(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s",
+ # [ *6( h16 ":" ) h16 ] "::"
+ "(?:(?:%(hex)s:){0,6}%(hex)s)?::",
+]
+
+UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~"
+IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")"
+ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+"
+IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]"
+REG_NAME_PAT = r"(?:[^\[\]%:/?#]|%[a-fA-F0-9]{2})*"
+TARGET_RE = re.compile(r"^(/[^?#]*)(?:\?([^#]*))?(?:#.*)?$")
+
+IPV4_RE = re.compile("^" + IPV4_PAT + "$")
+IPV6_RE = re.compile("^" + IPV6_PAT + "$")
+IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$")
+BRACELESS_IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT[2:-2] + "$")
+ZONE_ID_RE = re.compile("(" + ZONE_ID_PAT + r")\]$")
+
+_HOST_PORT_PAT = ("^(%s|%s|%s)(?::0*?(|0|[1-9][0-9]{0,4}))?$") % (
+ REG_NAME_PAT,
+ IPV4_PAT,
+ IPV6_ADDRZ_PAT,
+)
+_HOST_PORT_RE = re.compile(_HOST_PORT_PAT, re.UNICODE | re.DOTALL)
+
+UNRESERVED_CHARS = set(
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._-~"
+)
+SUB_DELIM_CHARS = set("!$&'()*+,;=")
+USERINFO_CHARS = UNRESERVED_CHARS | SUB_DELIM_CHARS | {":"}
+PATH_CHARS = USERINFO_CHARS | {"@", "/"}
+QUERY_CHARS = FRAGMENT_CHARS = PATH_CHARS | {"?"}
+
+
+class Url(namedtuple("Url", url_attrs)):
+ """
+ Data structure for representing an HTTP URL. Used as a return value for
+ :func:`parse_url`. Both the scheme and host are normalized as they are
+ both case-insensitive according to RFC 3986.
+ """
+
+ __slots__ = ()
+
+ def __new__(
+ cls,
+ scheme=None,
+ auth=None,
+ host=None,
+ port=None,
+ path=None,
+ query=None,
+ fragment=None,
+ ):
+ if path and not path.startswith("/"):
+ path = "/" + path
+ if scheme is not None:
+ scheme = scheme.lower()
+ return super(Url, cls).__new__(
+ cls, scheme, auth, host, port, path, query, fragment
+ )
+
+ @property
+ def hostname(self):
+ """For backwards-compatibility with urlparse. We're nice like that."""
+ return self.host
+
+ @property
+ def request_uri(self):
+ """Absolute path including the query string."""
+ uri = self.path or "/"
+
+ if self.query is not None:
+ uri += "?" + self.query
+
+ return uri
+
+ @property
+ def netloc(self):
+ """Network location including host and port"""
+ if self.port:
+ return "%s:%d" % (self.host, self.port)
+ return self.host
+
+ @property
+ def url(self):
+ """
+ Convert self into a url
+
+ This function should more or less round-trip with :func:`.parse_url`. The
+ returned url may not be exactly the same as the url inputted to
+ :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
+ with a blank port will have : removed).
+
+ Example: ::
+
+ >>> U = parse_url('http://google.com/mail/')
+ >>> U.url
+ 'http://google.com/mail/'
+ >>> Url('http', 'username:password', 'host.com', 80,
+ ... '/path', 'query', 'fragment').url
+ 'http://username:password@host.com:80/path?query#fragment'
+ """
+ scheme, auth, host, port, path, query, fragment = self
+ url = u""
+
+ # We use "is not None" we want things to happen with empty strings (or 0 port)
+ if scheme is not None:
+ url += scheme + u"://"
+ if auth is not None:
+ url += auth + u"@"
+ if host is not None:
+ url += host
+ if port is not None:
+ url += u":" + str(port)
+ if path is not None:
+ url += path
+ if query is not None:
+ url += u"?" + query
+ if fragment is not None:
+ url += u"#" + fragment
+
+ return url
+
+ def __str__(self):
+ return self.url
+
+
+def split_first(s, delims):
+ """
+ .. deprecated:: 1.25
+
+ Given a string and an iterable of delimiters, split on the first found
+ delimiter. Return two split parts and the matched delimiter.
+
+ If not found, then the first part is the full input string.
+
+ Example::
+
+ >>> split_first('foo/bar?baz', '?/=')
+ ('foo', 'bar?baz', '/')
+ >>> split_first('foo/bar?baz', '123')
+ ('foo/bar?baz', '', None)
+
+ Scales linearly with number of delims. Not ideal for large number of delims.
+ """
+ min_idx = None
+ min_delim = None
+ for d in delims:
+ idx = s.find(d)
+ if idx < 0:
+ continue
+
+ if min_idx is None or idx < min_idx:
+ min_idx = idx
+ min_delim = d
+
+ if min_idx is None or min_idx < 0:
+ return s, "", None
+
+ return s[:min_idx], s[min_idx + 1 :], min_delim
+
+
+def _encode_invalid_chars(component, allowed_chars, encoding="utf-8"):
+ """Percent-encodes a URI component without reapplying
+ onto an already percent-encoded component.
+ """
+ if component is None:
+ return component
+
+ component = six.ensure_text(component)
+
+ # Normalize existing percent-encoded bytes.
+ # Try to see if the component we're encoding is already percent-encoded
+ # so we can skip all '%' characters but still encode all others.
+ component, percent_encodings = PERCENT_RE.subn(
+ lambda match: match.group(0).upper(), component
+ )
+
+ uri_bytes = component.encode("utf-8", "surrogatepass")
+ is_percent_encoded = percent_encodings == uri_bytes.count(b"%")
+ encoded_component = bytearray()
+
+ for i in range(0, len(uri_bytes)):
+ # Will return a single character bytestring on both Python 2 & 3
+ byte = uri_bytes[i : i + 1]
+ byte_ord = ord(byte)
+ if (is_percent_encoded and byte == b"%") or (
+ byte_ord < 128 and byte.decode() in allowed_chars
+ ):
+ encoded_component += byte
+ continue
+ encoded_component.extend(b"%" + (hex(byte_ord)[2:].encode().zfill(2).upper()))
+
+ return encoded_component.decode(encoding)
+
+
+def _remove_path_dot_segments(path):
+ # See http://tools.ietf.org/html/rfc3986#section-5.2.4 for pseudo-code
+ segments = path.split("/") # Turn the path into a list of segments
+ output = [] # Initialize the variable to use to store output
+
+ for segment in segments:
+ # '.' is the current directory, so ignore it, it is superfluous
+ if segment == ".":
+ continue
+ # Anything other than '..', should be appended to the output
+ elif segment != "..":
+ output.append(segment)
+ # In this case segment == '..', if we can, we should pop the last
+ # element
+ elif output:
+ output.pop()
+
+ # If the path starts with '/' and the output is empty or the first string
+ # is non-empty
+ if path.startswith("/") and (not output or output[0]):
+ output.insert(0, "")
+
+ # If the path starts with '/.' or '/..' ensure we add one more empty
+ # string to add a trailing '/'
+ if path.endswith(("/.", "/..")):
+ output.append("")
+
+ return "/".join(output)
+
+
+def _normalize_host(host, scheme):
+ if host:
+ if isinstance(host, six.binary_type):
+ host = six.ensure_str(host)
+
+ if scheme in NORMALIZABLE_SCHEMES:
+ is_ipv6 = IPV6_ADDRZ_RE.match(host)
+ if is_ipv6:
+ # IPv6 hosts of the form 'a::b%zone' are encoded in a URL as
+ # such per RFC 6874: 'a::b%25zone'. Unquote the ZoneID
+ # separator as necessary to return a valid RFC 4007 scoped IP.
+ match = ZONE_ID_RE.search(host)
+ if match:
+ start, end = match.span(1)
+ zone_id = host[start:end]
+
+ if zone_id.startswith("%25") and zone_id != "%25":
+ zone_id = zone_id[3:]
+ else:
+ zone_id = zone_id[1:]
+ zone_id = "%" + _encode_invalid_chars(zone_id, UNRESERVED_CHARS)
+ return host[:start].lower() + zone_id + host[end:]
+ else:
+ return host.lower()
+ elif not IPV4_RE.match(host):
+ return six.ensure_str(
+ b".".join([_idna_encode(label) for label in host.split(".")])
+ )
+ return host
+
+
+def _idna_encode(name):
+ if name and any([ord(x) > 128 for x in name]):
+ try:
+ from pip._vendor import idna
+ except ImportError:
+ six.raise_from(
+ LocationParseError("Unable to parse URL without the 'idna' module"),
+ None,
+ )
+ try:
+ return idna.encode(name.lower(), strict=True, std3_rules=True)
+ except idna.IDNAError:
+ six.raise_from(
+ LocationParseError(u"Name '%s' is not a valid IDNA label" % name), None
+ )
+ return name.lower().encode("ascii")
+
+
+def _encode_target(target):
+ """Percent-encodes a request target so that there are no invalid characters"""
+ path, query = TARGET_RE.match(target).groups()
+ target = _encode_invalid_chars(path, PATH_CHARS)
+ query = _encode_invalid_chars(query, QUERY_CHARS)
+ if query is not None:
+ target += "?" + query
+ return target
+
+
+def parse_url(url):
+ """
+ Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
+ performed to parse incomplete urls. Fields not provided will be None.
+ This parser is RFC 3986 and RFC 6874 compliant.
+
+ The parser logic and helper functions are based heavily on
+ work done in the ``rfc3986`` module.
+
+ :param str url: URL to parse into a :class:`.Url` namedtuple.
+
+ Partly backwards-compatible with :mod:`urlparse`.
+
+ Example::
+
+ >>> parse_url('http://google.com/mail/')
+ Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
+ >>> parse_url('google.com:80')
+ Url(scheme=None, host='google.com', port=80, path=None, ...)
+ >>> parse_url('/foo?bar')
+ Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
+ """
+ if not url:
+ # Empty
+ return Url()
+
+ source_url = url
+ if not SCHEME_RE.search(url):
+ url = "//" + url
+
+ try:
+ scheme, authority, path, query, fragment = URI_RE.match(url).groups()
+ normalize_uri = scheme is None or scheme.lower() in NORMALIZABLE_SCHEMES
+
+ if scheme:
+ scheme = scheme.lower()
+
+ if authority:
+ auth, _, host_port = authority.rpartition("@")
+ auth = auth or None
+ host, port = _HOST_PORT_RE.match(host_port).groups()
+ if auth and normalize_uri:
+ auth = _encode_invalid_chars(auth, USERINFO_CHARS)
+ if port == "":
+ port = None
+ else:
+ auth, host, port = None, None, None
+
+ if port is not None:
+ port = int(port)
+ if not (0 <= port <= 65535):
+ raise LocationParseError(url)
+
+ host = _normalize_host(host, scheme)
+
+ if normalize_uri and path:
+ path = _remove_path_dot_segments(path)
+ path = _encode_invalid_chars(path, PATH_CHARS)
+ if normalize_uri and query:
+ query = _encode_invalid_chars(query, QUERY_CHARS)
+ if normalize_uri and fragment:
+ fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS)
+
+ except (ValueError, AttributeError):
+ return six.raise_from(LocationParseError(source_url), None)
+
+ # For the sake of backwards compatibility we put empty
+ # string values for path if there are any defined values
+ # beyond the path in the URL.
+ # TODO: Remove this when we break backwards compatibility.
+ if not path:
+ if query is not None or fragment is not None:
+ path = ""
+ else:
+ path = None
+
+ # Ensure that each part of the URL is a `str` for
+ # backwards compatibility.
+ if isinstance(url, six.text_type):
+ ensure_func = six.ensure_text
+ else:
+ ensure_func = six.ensure_str
+
+ def ensure_type(x):
+ return x if x is None else ensure_func(x)
+
+ return Url(
+ scheme=ensure_type(scheme),
+ auth=ensure_type(auth),
+ host=ensure_type(host),
+ port=port,
+ path=ensure_type(path),
+ query=ensure_type(query),
+ fragment=ensure_type(fragment),
+ )
+
+
+def get_host(url):
+ """
+ Deprecated. Use :func:`parse_url` instead.
+ """
+ p = parse_url(url)
+ return p.scheme or "http", p.hostname, p.port
diff --git a/third_party/python/pip/pip/_vendor/urllib3/util/wait.py b/third_party/python/pip/pip/_vendor/urllib3/util/wait.py
new file mode 100644
index 0000000000..21b4590b3d
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/urllib3/util/wait.py
@@ -0,0 +1,152 @@
+import errno
+import select
+import sys
+from functools import partial
+
+try:
+ from time import monotonic
+except ImportError:
+ from time import time as monotonic
+
+__all__ = ["NoWayToWaitForSocketError", "wait_for_read", "wait_for_write"]
+
+
+class NoWayToWaitForSocketError(Exception):
+ pass
+
+
+# How should we wait on sockets?
+#
+# There are two types of APIs you can use for waiting on sockets: the fancy
+# modern stateful APIs like epoll/kqueue, and the older stateless APIs like
+# select/poll. The stateful APIs are more efficient when you have a lots of
+# sockets to keep track of, because you can set them up once and then use them
+# lots of times. But we only ever want to wait on a single socket at a time
+# and don't want to keep track of state, so the stateless APIs are actually
+# more efficient. So we want to use select() or poll().
+#
+# Now, how do we choose between select() and poll()? On traditional Unixes,
+# select() has a strange calling convention that makes it slow, or fail
+# altogether, for high-numbered file descriptors. The point of poll() is to fix
+# that, so on Unixes, we prefer poll().
+#
+# On Windows, there is no poll() (or at least Python doesn't provide a wrapper
+# for it), but that's OK, because on Windows, select() doesn't have this
+# strange calling convention; plain select() works fine.
+#
+# So: on Windows we use select(), and everywhere else we use poll(). We also
+# fall back to select() in case poll() is somehow broken or missing.
+
+if sys.version_info >= (3, 5):
+ # Modern Python, that retries syscalls by default
+ def _retry_on_intr(fn, timeout):
+ return fn(timeout)
+
+else:
+ # Old and broken Pythons.
+ def _retry_on_intr(fn, timeout):
+ if timeout is None:
+ deadline = float("inf")
+ else:
+ deadline = monotonic() + timeout
+
+ while True:
+ try:
+ return fn(timeout)
+ # OSError for 3 <= pyver < 3.5, select.error for pyver <= 2.7
+ except (OSError, select.error) as e:
+ # 'e.args[0]' incantation works for both OSError and select.error
+ if e.args[0] != errno.EINTR:
+ raise
+ else:
+ timeout = deadline - monotonic()
+ if timeout < 0:
+ timeout = 0
+ if timeout == float("inf"):
+ timeout = None
+ continue
+
+
+def select_wait_for_socket(sock, read=False, write=False, timeout=None):
+ if not read and not write:
+ raise RuntimeError("must specify at least one of read=True, write=True")
+ rcheck = []
+ wcheck = []
+ if read:
+ rcheck.append(sock)
+ if write:
+ wcheck.append(sock)
+ # When doing a non-blocking connect, most systems signal success by
+ # marking the socket writable. Windows, though, signals success by marked
+ # it as "exceptional". We paper over the difference by checking the write
+ # sockets for both conditions. (The stdlib selectors module does the same
+ # thing.)
+ fn = partial(select.select, rcheck, wcheck, wcheck)
+ rready, wready, xready = _retry_on_intr(fn, timeout)
+ return bool(rready or wready or xready)
+
+
+def poll_wait_for_socket(sock, read=False, write=False, timeout=None):
+ if not read and not write:
+ raise RuntimeError("must specify at least one of read=True, write=True")
+ mask = 0
+ if read:
+ mask |= select.POLLIN
+ if write:
+ mask |= select.POLLOUT
+ poll_obj = select.poll()
+ poll_obj.register(sock, mask)
+
+ # For some reason, poll() takes timeout in milliseconds
+ def do_poll(t):
+ if t is not None:
+ t *= 1000
+ return poll_obj.poll(t)
+
+ return bool(_retry_on_intr(do_poll, timeout))
+
+
+def null_wait_for_socket(*args, **kwargs):
+ raise NoWayToWaitForSocketError("no select-equivalent available")
+
+
+def _have_working_poll():
+ # Apparently some systems have a select.poll that fails as soon as you try
+ # to use it, either due to strange configuration or broken monkeypatching
+ # from libraries like eventlet/greenlet.
+ try:
+ poll_obj = select.poll()
+ _retry_on_intr(poll_obj.poll, 0)
+ except (AttributeError, OSError):
+ return False
+ else:
+ return True
+
+
+def wait_for_socket(*args, **kwargs):
+ # We delay choosing which implementation to use until the first time we're
+ # called. We could do it at import time, but then we might make the wrong
+ # decision if someone goes wild with monkeypatching select.poll after
+ # we're imported.
+ global wait_for_socket
+ if _have_working_poll():
+ wait_for_socket = poll_wait_for_socket
+ elif hasattr(select, "select"):
+ wait_for_socket = select_wait_for_socket
+ else: # Platform-specific: Appengine.
+ wait_for_socket = null_wait_for_socket
+ return wait_for_socket(*args, **kwargs)
+
+
+def wait_for_read(sock, timeout=None):
+ """Waits for reading to be available on a given socket.
+ Returns True if the socket is readable, or False if the timeout expired.
+ """
+ return wait_for_socket(sock, read=True, timeout=timeout)
+
+
+def wait_for_write(sock, timeout=None):
+ """Waits for writing to be available on a given socket.
+ Returns True if the socket is readable, or False if the timeout expired.
+ """
+ return wait_for_socket(sock, write=True, timeout=timeout)
diff --git a/third_party/python/pip/pip/_vendor/vendor.txt b/third_party/python/pip/pip/_vendor/vendor.txt
new file mode 100644
index 0000000000..67452d89fc
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/vendor.txt
@@ -0,0 +1,23 @@
+CacheControl==0.12.11 # Make sure to update the license in pyproject.toml for this.
+colorama==0.4.6
+distlib==0.3.6
+distro==1.8.0
+msgpack==1.0.4
+packaging==21.3
+platformdirs==2.6.2
+pyparsing==3.0.9
+pyproject-hooks==1.0.0
+requests==2.28.2
+ certifi==2022.12.7
+ chardet==5.1.0
+ idna==3.4
+ urllib3==1.26.14
+rich==12.6.0
+ pygments==2.13.0
+ typing_extensions==4.4.0
+resolvelib==0.8.1
+setuptools==44.0.0
+six==1.16.0
+tenacity==8.1.0
+tomli==2.0.1
+webencodings==0.5.1
diff --git a/third_party/python/pip/pip/_vendor/webencodings/__init__.py b/third_party/python/pip/pip/_vendor/webencodings/__init__.py
new file mode 100644
index 0000000000..d21d697c88
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/webencodings/__init__.py
@@ -0,0 +1,342 @@
+# coding: utf-8
+"""
+
+ webencodings
+ ~~~~~~~~~~~~
+
+ This is a Python implementation of the `WHATWG Encoding standard
+ <http://encoding.spec.whatwg.org/>`. See README for details.
+
+ :copyright: Copyright 2012 by Simon Sapin
+ :license: BSD, see LICENSE for details.
+
+"""
+
+from __future__ import unicode_literals
+
+import codecs
+
+from .labels import LABELS
+
+
+VERSION = '0.5.1'
+
+
+# Some names in Encoding are not valid Python aliases. Remap these.
+PYTHON_NAMES = {
+ 'iso-8859-8-i': 'iso-8859-8',
+ 'x-mac-cyrillic': 'mac-cyrillic',
+ 'macintosh': 'mac-roman',
+ 'windows-874': 'cp874'}
+
+CACHE = {}
+
+
+def ascii_lower(string):
+ r"""Transform (only) ASCII letters to lower case: A-Z is mapped to a-z.
+
+ :param string: An Unicode string.
+ :returns: A new Unicode string.
+
+ This is used for `ASCII case-insensitive
+ <http://encoding.spec.whatwg.org/#ascii-case-insensitive>`_
+ matching of encoding labels.
+ The same matching is also used, among other things,
+ for `CSS keywords <http://dev.w3.org/csswg/css-values/#keywords>`_.
+
+ This is different from the :meth:`~py:str.lower` method of Unicode strings
+ which also affect non-ASCII characters,
+ sometimes mapping them into the ASCII range:
+
+ >>> keyword = u'Bac\N{KELVIN SIGN}ground'
+ >>> assert keyword.lower() == u'background'
+ >>> assert ascii_lower(keyword) != keyword.lower()
+ >>> assert ascii_lower(keyword) == u'bac\N{KELVIN SIGN}ground'
+
+ """
+ # This turns out to be faster than unicode.translate()
+ return string.encode('utf8').lower().decode('utf8')
+
+
+def lookup(label):
+ """
+ Look for an encoding by its label.
+ This is the spec’s `get an encoding
+ <http://encoding.spec.whatwg.org/#concept-encoding-get>`_ algorithm.
+ Supported labels are listed there.
+
+ :param label: A string.
+ :returns:
+ An :class:`Encoding` object, or :obj:`None` for an unknown label.
+
+ """
+ # Only strip ASCII whitespace: U+0009, U+000A, U+000C, U+000D, and U+0020.
+ label = ascii_lower(label.strip('\t\n\f\r '))
+ name = LABELS.get(label)
+ if name is None:
+ return None
+ encoding = CACHE.get(name)
+ if encoding is None:
+ if name == 'x-user-defined':
+ from .x_user_defined import codec_info
+ else:
+ python_name = PYTHON_NAMES.get(name, name)
+ # Any python_name value that gets to here should be valid.
+ codec_info = codecs.lookup(python_name)
+ encoding = Encoding(name, codec_info)
+ CACHE[name] = encoding
+ return encoding
+
+
+def _get_encoding(encoding_or_label):
+ """
+ Accept either an encoding object or label.
+
+ :param encoding: An :class:`Encoding` object or a label string.
+ :returns: An :class:`Encoding` object.
+ :raises: :exc:`~exceptions.LookupError` for an unknown label.
+
+ """
+ if hasattr(encoding_or_label, 'codec_info'):
+ return encoding_or_label
+
+ encoding = lookup(encoding_or_label)
+ if encoding is None:
+ raise LookupError('Unknown encoding label: %r' % encoding_or_label)
+ return encoding
+
+
+class Encoding(object):
+ """Reresents a character encoding such as UTF-8,
+ that can be used for decoding or encoding.
+
+ .. attribute:: name
+
+ Canonical name of the encoding
+
+ .. attribute:: codec_info
+
+ The actual implementation of the encoding,
+ a stdlib :class:`~codecs.CodecInfo` object.
+ See :func:`codecs.register`.
+
+ """
+ def __init__(self, name, codec_info):
+ self.name = name
+ self.codec_info = codec_info
+
+ def __repr__(self):
+ return '<Encoding %s>' % self.name
+
+
+#: The UTF-8 encoding. Should be used for new content and formats.
+UTF8 = lookup('utf-8')
+
+_UTF16LE = lookup('utf-16le')
+_UTF16BE = lookup('utf-16be')
+
+
+def decode(input, fallback_encoding, errors='replace'):
+ """
+ Decode a single string.
+
+ :param input: A byte string
+ :param fallback_encoding:
+ An :class:`Encoding` object or a label string.
+ The encoding to use if :obj:`input` does note have a BOM.
+ :param errors: Type of error handling. See :func:`codecs.register`.
+ :raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
+ :return:
+ A ``(output, encoding)`` tuple of an Unicode string
+ and an :obj:`Encoding`.
+
+ """
+ # Fail early if `encoding` is an invalid label.
+ fallback_encoding = _get_encoding(fallback_encoding)
+ bom_encoding, input = _detect_bom(input)
+ encoding = bom_encoding or fallback_encoding
+ return encoding.codec_info.decode(input, errors)[0], encoding
+
+
+def _detect_bom(input):
+ """Return (bom_encoding, input), with any BOM removed from the input."""
+ if input.startswith(b'\xFF\xFE'):
+ return _UTF16LE, input[2:]
+ if input.startswith(b'\xFE\xFF'):
+ return _UTF16BE, input[2:]
+ if input.startswith(b'\xEF\xBB\xBF'):
+ return UTF8, input[3:]
+ return None, input
+
+
+def encode(input, encoding=UTF8, errors='strict'):
+ """
+ Encode a single string.
+
+ :param input: An Unicode string.
+ :param encoding: An :class:`Encoding` object or a label string.
+ :param errors: Type of error handling. See :func:`codecs.register`.
+ :raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
+ :return: A byte string.
+
+ """
+ return _get_encoding(encoding).codec_info.encode(input, errors)[0]
+
+
+def iter_decode(input, fallback_encoding, errors='replace'):
+ """
+ "Pull"-based decoder.
+
+ :param input:
+ An iterable of byte strings.
+
+ The input is first consumed just enough to determine the encoding
+ based on the precense of a BOM,
+ then consumed on demand when the return value is.
+ :param fallback_encoding:
+ An :class:`Encoding` object or a label string.
+ The encoding to use if :obj:`input` does note have a BOM.
+ :param errors: Type of error handling. See :func:`codecs.register`.
+ :raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
+ :returns:
+ An ``(output, encoding)`` tuple.
+ :obj:`output` is an iterable of Unicode strings,
+ :obj:`encoding` is the :obj:`Encoding` that is being used.
+
+ """
+
+ decoder = IncrementalDecoder(fallback_encoding, errors)
+ generator = _iter_decode_generator(input, decoder)
+ encoding = next(generator)
+ return generator, encoding
+
+
+def _iter_decode_generator(input, decoder):
+ """Return a generator that first yields the :obj:`Encoding`,
+ then yields output chukns as Unicode strings.
+
+ """
+ decode = decoder.decode
+ input = iter(input)
+ for chunck in input:
+ output = decode(chunck)
+ if output:
+ assert decoder.encoding is not None
+ yield decoder.encoding
+ yield output
+ break
+ else:
+ # Input exhausted without determining the encoding
+ output = decode(b'', final=True)
+ assert decoder.encoding is not None
+ yield decoder.encoding
+ if output:
+ yield output
+ return
+
+ for chunck in input:
+ output = decode(chunck)
+ if output:
+ yield output
+ output = decode(b'', final=True)
+ if output:
+ yield output
+
+
+def iter_encode(input, encoding=UTF8, errors='strict'):
+ """
+ “Pull”-based encoder.
+
+ :param input: An iterable of Unicode strings.
+ :param encoding: An :class:`Encoding` object or a label string.
+ :param errors: Type of error handling. See :func:`codecs.register`.
+ :raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
+ :returns: An iterable of byte strings.
+
+ """
+ # Fail early if `encoding` is an invalid label.
+ encode = IncrementalEncoder(encoding, errors).encode
+ return _iter_encode_generator(input, encode)
+
+
+def _iter_encode_generator(input, encode):
+ for chunck in input:
+ output = encode(chunck)
+ if output:
+ yield output
+ output = encode('', final=True)
+ if output:
+ yield output
+
+
+class IncrementalDecoder(object):
+ """
+ “Push”-based decoder.
+
+ :param fallback_encoding:
+ An :class:`Encoding` object or a label string.
+ The encoding to use if :obj:`input` does note have a BOM.
+ :param errors: Type of error handling. See :func:`codecs.register`.
+ :raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
+
+ """
+ def __init__(self, fallback_encoding, errors='replace'):
+ # Fail early if `encoding` is an invalid label.
+ self._fallback_encoding = _get_encoding(fallback_encoding)
+ self._errors = errors
+ self._buffer = b''
+ self._decoder = None
+ #: The actual :class:`Encoding` that is being used,
+ #: or :obj:`None` if that is not determined yet.
+ #: (Ie. if there is not enough input yet to determine
+ #: if there is a BOM.)
+ self.encoding = None # Not known yet.
+
+ def decode(self, input, final=False):
+ """Decode one chunk of the input.
+
+ :param input: A byte string.
+ :param final:
+ Indicate that no more input is available.
+ Must be :obj:`True` if this is the last call.
+ :returns: An Unicode string.
+
+ """
+ decoder = self._decoder
+ if decoder is not None:
+ return decoder(input, final)
+
+ input = self._buffer + input
+ encoding, input = _detect_bom(input)
+ if encoding is None:
+ if len(input) < 3 and not final: # Not enough data yet.
+ self._buffer = input
+ return ''
+ else: # No BOM
+ encoding = self._fallback_encoding
+ decoder = encoding.codec_info.incrementaldecoder(self._errors).decode
+ self._decoder = decoder
+ self.encoding = encoding
+ return decoder(input, final)
+
+
+class IncrementalEncoder(object):
+ """
+ “Push”-based encoder.
+
+ :param encoding: An :class:`Encoding` object or a label string.
+ :param errors: Type of error handling. See :func:`codecs.register`.
+ :raises: :exc:`~exceptions.LookupError` for an unknown encoding label.
+
+ .. method:: encode(input, final=False)
+
+ :param input: An Unicode string.
+ :param final:
+ Indicate that no more input is available.
+ Must be :obj:`True` if this is the last call.
+ :returns: A byte string.
+
+ """
+ def __init__(self, encoding=UTF8, errors='strict'):
+ encoding = _get_encoding(encoding)
+ self.encode = encoding.codec_info.incrementalencoder(errors).encode
diff --git a/third_party/python/pip/pip/_vendor/webencodings/labels.py b/third_party/python/pip/pip/_vendor/webencodings/labels.py
new file mode 100644
index 0000000000..29cbf91ef7
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/webencodings/labels.py
@@ -0,0 +1,231 @@
+"""
+
+ webencodings.labels
+ ~~~~~~~~~~~~~~~~~~~
+
+ Map encoding labels to their name.
+
+ :copyright: Copyright 2012 by Simon Sapin
+ :license: BSD, see LICENSE for details.
+
+"""
+
+# XXX Do not edit!
+# This file is automatically generated by mklabels.py
+
+LABELS = {
+ 'unicode-1-1-utf-8': 'utf-8',
+ 'utf-8': 'utf-8',
+ 'utf8': 'utf-8',
+ '866': 'ibm866',
+ 'cp866': 'ibm866',
+ 'csibm866': 'ibm866',
+ 'ibm866': 'ibm866',
+ 'csisolatin2': 'iso-8859-2',
+ 'iso-8859-2': 'iso-8859-2',
+ 'iso-ir-101': 'iso-8859-2',
+ 'iso8859-2': 'iso-8859-2',
+ 'iso88592': 'iso-8859-2',
+ 'iso_8859-2': 'iso-8859-2',
+ 'iso_8859-2:1987': 'iso-8859-2',
+ 'l2': 'iso-8859-2',
+ 'latin2': 'iso-8859-2',
+ 'csisolatin3': 'iso-8859-3',
+ 'iso-8859-3': 'iso-8859-3',
+ 'iso-ir-109': 'iso-8859-3',
+ 'iso8859-3': 'iso-8859-3',
+ 'iso88593': 'iso-8859-3',
+ 'iso_8859-3': 'iso-8859-3',
+ 'iso_8859-3:1988': 'iso-8859-3',
+ 'l3': 'iso-8859-3',
+ 'latin3': 'iso-8859-3',
+ 'csisolatin4': 'iso-8859-4',
+ 'iso-8859-4': 'iso-8859-4',
+ 'iso-ir-110': 'iso-8859-4',
+ 'iso8859-4': 'iso-8859-4',
+ 'iso88594': 'iso-8859-4',
+ 'iso_8859-4': 'iso-8859-4',
+ 'iso_8859-4:1988': 'iso-8859-4',
+ 'l4': 'iso-8859-4',
+ 'latin4': 'iso-8859-4',
+ 'csisolatincyrillic': 'iso-8859-5',
+ 'cyrillic': 'iso-8859-5',
+ 'iso-8859-5': 'iso-8859-5',
+ 'iso-ir-144': 'iso-8859-5',
+ 'iso8859-5': 'iso-8859-5',
+ 'iso88595': 'iso-8859-5',
+ 'iso_8859-5': 'iso-8859-5',
+ 'iso_8859-5:1988': 'iso-8859-5',
+ 'arabic': 'iso-8859-6',
+ 'asmo-708': 'iso-8859-6',
+ 'csiso88596e': 'iso-8859-6',
+ 'csiso88596i': 'iso-8859-6',
+ 'csisolatinarabic': 'iso-8859-6',
+ 'ecma-114': 'iso-8859-6',
+ 'iso-8859-6': 'iso-8859-6',
+ 'iso-8859-6-e': 'iso-8859-6',
+ 'iso-8859-6-i': 'iso-8859-6',
+ 'iso-ir-127': 'iso-8859-6',
+ 'iso8859-6': 'iso-8859-6',
+ 'iso88596': 'iso-8859-6',
+ 'iso_8859-6': 'iso-8859-6',
+ 'iso_8859-6:1987': 'iso-8859-6',
+ 'csisolatingreek': 'iso-8859-7',
+ 'ecma-118': 'iso-8859-7',
+ 'elot_928': 'iso-8859-7',
+ 'greek': 'iso-8859-7',
+ 'greek8': 'iso-8859-7',
+ 'iso-8859-7': 'iso-8859-7',
+ 'iso-ir-126': 'iso-8859-7',
+ 'iso8859-7': 'iso-8859-7',
+ 'iso88597': 'iso-8859-7',
+ 'iso_8859-7': 'iso-8859-7',
+ 'iso_8859-7:1987': 'iso-8859-7',
+ 'sun_eu_greek': 'iso-8859-7',
+ 'csiso88598e': 'iso-8859-8',
+ 'csisolatinhebrew': 'iso-8859-8',
+ 'hebrew': 'iso-8859-8',
+ 'iso-8859-8': 'iso-8859-8',
+ 'iso-8859-8-e': 'iso-8859-8',
+ 'iso-ir-138': 'iso-8859-8',
+ 'iso8859-8': 'iso-8859-8',
+ 'iso88598': 'iso-8859-8',
+ 'iso_8859-8': 'iso-8859-8',
+ 'iso_8859-8:1988': 'iso-8859-8',
+ 'visual': 'iso-8859-8',
+ 'csiso88598i': 'iso-8859-8-i',
+ 'iso-8859-8-i': 'iso-8859-8-i',
+ 'logical': 'iso-8859-8-i',
+ 'csisolatin6': 'iso-8859-10',
+ 'iso-8859-10': 'iso-8859-10',
+ 'iso-ir-157': 'iso-8859-10',
+ 'iso8859-10': 'iso-8859-10',
+ 'iso885910': 'iso-8859-10',
+ 'l6': 'iso-8859-10',
+ 'latin6': 'iso-8859-10',
+ 'iso-8859-13': 'iso-8859-13',
+ 'iso8859-13': 'iso-8859-13',
+ 'iso885913': 'iso-8859-13',
+ 'iso-8859-14': 'iso-8859-14',
+ 'iso8859-14': 'iso-8859-14',
+ 'iso885914': 'iso-8859-14',
+ 'csisolatin9': 'iso-8859-15',
+ 'iso-8859-15': 'iso-8859-15',
+ 'iso8859-15': 'iso-8859-15',
+ 'iso885915': 'iso-8859-15',
+ 'iso_8859-15': 'iso-8859-15',
+ 'l9': 'iso-8859-15',
+ 'iso-8859-16': 'iso-8859-16',
+ 'cskoi8r': 'koi8-r',
+ 'koi': 'koi8-r',
+ 'koi8': 'koi8-r',
+ 'koi8-r': 'koi8-r',
+ 'koi8_r': 'koi8-r',
+ 'koi8-u': 'koi8-u',
+ 'csmacintosh': 'macintosh',
+ 'mac': 'macintosh',
+ 'macintosh': 'macintosh',
+ 'x-mac-roman': 'macintosh',
+ 'dos-874': 'windows-874',
+ 'iso-8859-11': 'windows-874',
+ 'iso8859-11': 'windows-874',
+ 'iso885911': 'windows-874',
+ 'tis-620': 'windows-874',
+ 'windows-874': 'windows-874',
+ 'cp1250': 'windows-1250',
+ 'windows-1250': 'windows-1250',
+ 'x-cp1250': 'windows-1250',
+ 'cp1251': 'windows-1251',
+ 'windows-1251': 'windows-1251',
+ 'x-cp1251': 'windows-1251',
+ 'ansi_x3.4-1968': 'windows-1252',
+ 'ascii': 'windows-1252',
+ 'cp1252': 'windows-1252',
+ 'cp819': 'windows-1252',
+ 'csisolatin1': 'windows-1252',
+ 'ibm819': 'windows-1252',
+ 'iso-8859-1': 'windows-1252',
+ 'iso-ir-100': 'windows-1252',
+ 'iso8859-1': 'windows-1252',
+ 'iso88591': 'windows-1252',
+ 'iso_8859-1': 'windows-1252',
+ 'iso_8859-1:1987': 'windows-1252',
+ 'l1': 'windows-1252',
+ 'latin1': 'windows-1252',
+ 'us-ascii': 'windows-1252',
+ 'windows-1252': 'windows-1252',
+ 'x-cp1252': 'windows-1252',
+ 'cp1253': 'windows-1253',
+ 'windows-1253': 'windows-1253',
+ 'x-cp1253': 'windows-1253',
+ 'cp1254': 'windows-1254',
+ 'csisolatin5': 'windows-1254',
+ 'iso-8859-9': 'windows-1254',
+ 'iso-ir-148': 'windows-1254',
+ 'iso8859-9': 'windows-1254',
+ 'iso88599': 'windows-1254',
+ 'iso_8859-9': 'windows-1254',
+ 'iso_8859-9:1989': 'windows-1254',
+ 'l5': 'windows-1254',
+ 'latin5': 'windows-1254',
+ 'windows-1254': 'windows-1254',
+ 'x-cp1254': 'windows-1254',
+ 'cp1255': 'windows-1255',
+ 'windows-1255': 'windows-1255',
+ 'x-cp1255': 'windows-1255',
+ 'cp1256': 'windows-1256',
+ 'windows-1256': 'windows-1256',
+ 'x-cp1256': 'windows-1256',
+ 'cp1257': 'windows-1257',
+ 'windows-1257': 'windows-1257',
+ 'x-cp1257': 'windows-1257',
+ 'cp1258': 'windows-1258',
+ 'windows-1258': 'windows-1258',
+ 'x-cp1258': 'windows-1258',
+ 'x-mac-cyrillic': 'x-mac-cyrillic',
+ 'x-mac-ukrainian': 'x-mac-cyrillic',
+ 'chinese': 'gbk',
+ 'csgb2312': 'gbk',
+ 'csiso58gb231280': 'gbk',
+ 'gb2312': 'gbk',
+ 'gb_2312': 'gbk',
+ 'gb_2312-80': 'gbk',
+ 'gbk': 'gbk',
+ 'iso-ir-58': 'gbk',
+ 'x-gbk': 'gbk',
+ 'gb18030': 'gb18030',
+ 'hz-gb-2312': 'hz-gb-2312',
+ 'big5': 'big5',
+ 'big5-hkscs': 'big5',
+ 'cn-big5': 'big5',
+ 'csbig5': 'big5',
+ 'x-x-big5': 'big5',
+ 'cseucpkdfmtjapanese': 'euc-jp',
+ 'euc-jp': 'euc-jp',
+ 'x-euc-jp': 'euc-jp',
+ 'csiso2022jp': 'iso-2022-jp',
+ 'iso-2022-jp': 'iso-2022-jp',
+ 'csshiftjis': 'shift_jis',
+ 'ms_kanji': 'shift_jis',
+ 'shift-jis': 'shift_jis',
+ 'shift_jis': 'shift_jis',
+ 'sjis': 'shift_jis',
+ 'windows-31j': 'shift_jis',
+ 'x-sjis': 'shift_jis',
+ 'cseuckr': 'euc-kr',
+ 'csksc56011987': 'euc-kr',
+ 'euc-kr': 'euc-kr',
+ 'iso-ir-149': 'euc-kr',
+ 'korean': 'euc-kr',
+ 'ks_c_5601-1987': 'euc-kr',
+ 'ks_c_5601-1989': 'euc-kr',
+ 'ksc5601': 'euc-kr',
+ 'ksc_5601': 'euc-kr',
+ 'windows-949': 'euc-kr',
+ 'csiso2022kr': 'iso-2022-kr',
+ 'iso-2022-kr': 'iso-2022-kr',
+ 'utf-16be': 'utf-16be',
+ 'utf-16': 'utf-16le',
+ 'utf-16le': 'utf-16le',
+ 'x-user-defined': 'x-user-defined',
+}
diff --git a/third_party/python/pip/pip/_vendor/webencodings/mklabels.py b/third_party/python/pip/pip/_vendor/webencodings/mklabels.py
new file mode 100644
index 0000000000..295dc928ba
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/webencodings/mklabels.py
@@ -0,0 +1,59 @@
+"""
+
+ webencodings.mklabels
+ ~~~~~~~~~~~~~~~~~~~~~
+
+ Regenarate the webencodings.labels module.
+
+ :copyright: Copyright 2012 by Simon Sapin
+ :license: BSD, see LICENSE for details.
+
+"""
+
+import json
+try:
+ from urllib import urlopen
+except ImportError:
+ from urllib.request import urlopen
+
+
+def assert_lower(string):
+ assert string == string.lower()
+ return string
+
+
+def generate(url):
+ parts = ['''\
+"""
+
+ webencodings.labels
+ ~~~~~~~~~~~~~~~~~~~
+
+ Map encoding labels to their name.
+
+ :copyright: Copyright 2012 by Simon Sapin
+ :license: BSD, see LICENSE for details.
+
+"""
+
+# XXX Do not edit!
+# This file is automatically generated by mklabels.py
+
+LABELS = {
+''']
+ labels = [
+ (repr(assert_lower(label)).lstrip('u'),
+ repr(encoding['name']).lstrip('u'))
+ for category in json.loads(urlopen(url).read().decode('ascii'))
+ for encoding in category['encodings']
+ for label in encoding['labels']]
+ max_len = max(len(label) for label, name in labels)
+ parts.extend(
+ ' %s:%s %s,\n' % (label, ' ' * (max_len - len(label)), name)
+ for label, name in labels)
+ parts.append('}')
+ return ''.join(parts)
+
+
+if __name__ == '__main__':
+ print(generate('http://encoding.spec.whatwg.org/encodings.json'))
diff --git a/third_party/python/pip/pip/_vendor/webencodings/tests.py b/third_party/python/pip/pip/_vendor/webencodings/tests.py
new file mode 100644
index 0000000000..e12c10d033
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/webencodings/tests.py
@@ -0,0 +1,153 @@
+# coding: utf-8
+"""
+
+ webencodings.tests
+ ~~~~~~~~~~~~~~~~~~
+
+ A basic test suite for Encoding.
+
+ :copyright: Copyright 2012 by Simon Sapin
+ :license: BSD, see LICENSE for details.
+
+"""
+
+from __future__ import unicode_literals
+
+from . import (lookup, LABELS, decode, encode, iter_decode, iter_encode,
+ IncrementalDecoder, IncrementalEncoder, UTF8)
+
+
+def assert_raises(exception, function, *args, **kwargs):
+ try:
+ function(*args, **kwargs)
+ except exception:
+ return
+ else: # pragma: no cover
+ raise AssertionError('Did not raise %s.' % exception)
+
+
+def test_labels():
+ assert lookup('utf-8').name == 'utf-8'
+ assert lookup('Utf-8').name == 'utf-8'
+ assert lookup('UTF-8').name == 'utf-8'
+ assert lookup('utf8').name == 'utf-8'
+ assert lookup('utf8').name == 'utf-8'
+ assert lookup('utf8 ').name == 'utf-8'
+ assert lookup(' \r\nutf8\t').name == 'utf-8'
+ assert lookup('u8') is None # Python label.
+ assert lookup('utf-8 ') is None # Non-ASCII white space.
+
+ assert lookup('US-ASCII').name == 'windows-1252'
+ assert lookup('iso-8859-1').name == 'windows-1252'
+ assert lookup('latin1').name == 'windows-1252'
+ assert lookup('LATIN1').name == 'windows-1252'
+ assert lookup('latin-1') is None
+ assert lookup('LATİN1') is None # ASCII-only case insensitivity.
+
+
+def test_all_labels():
+ for label in LABELS:
+ assert decode(b'', label) == ('', lookup(label))
+ assert encode('', label) == b''
+ for repeat in [0, 1, 12]:
+ output, _ = iter_decode([b''] * repeat, label)
+ assert list(output) == []
+ assert list(iter_encode([''] * repeat, label)) == []
+ decoder = IncrementalDecoder(label)
+ assert decoder.decode(b'') == ''
+ assert decoder.decode(b'', final=True) == ''
+ encoder = IncrementalEncoder(label)
+ assert encoder.encode('') == b''
+ assert encoder.encode('', final=True) == b''
+ # All encoding names are valid labels too:
+ for name in set(LABELS.values()):
+ assert lookup(name).name == name
+
+
+def test_invalid_label():
+ assert_raises(LookupError, decode, b'\xEF\xBB\xBF\xc3\xa9', 'invalid')
+ assert_raises(LookupError, encode, 'é', 'invalid')
+ assert_raises(LookupError, iter_decode, [], 'invalid')
+ assert_raises(LookupError, iter_encode, [], 'invalid')
+ assert_raises(LookupError, IncrementalDecoder, 'invalid')
+ assert_raises(LookupError, IncrementalEncoder, 'invalid')
+
+
+def test_decode():
+ assert decode(b'\x80', 'latin1') == ('€', lookup('latin1'))
+ assert decode(b'\x80', lookup('latin1')) == ('€', lookup('latin1'))
+ assert decode(b'\xc3\xa9', 'utf8') == ('é', lookup('utf8'))
+ assert decode(b'\xc3\xa9', UTF8) == ('é', lookup('utf8'))
+ assert decode(b'\xc3\xa9', 'ascii') == ('é', lookup('ascii'))
+ assert decode(b'\xEF\xBB\xBF\xc3\xa9', 'ascii') == ('é', lookup('utf8')) # UTF-8 with BOM
+
+ assert decode(b'\xFE\xFF\x00\xe9', 'ascii') == ('é', lookup('utf-16be')) # UTF-16-BE with BOM
+ assert decode(b'\xFF\xFE\xe9\x00', 'ascii') == ('é', lookup('utf-16le')) # UTF-16-LE with BOM
+ assert decode(b'\xFE\xFF\xe9\x00', 'ascii') == ('\ue900', lookup('utf-16be'))
+ assert decode(b'\xFF\xFE\x00\xe9', 'ascii') == ('\ue900', lookup('utf-16le'))
+
+ assert decode(b'\x00\xe9', 'UTF-16BE') == ('é', lookup('utf-16be'))
+ assert decode(b'\xe9\x00', 'UTF-16LE') == ('é', lookup('utf-16le'))
+ assert decode(b'\xe9\x00', 'UTF-16') == ('é', lookup('utf-16le'))
+
+ assert decode(b'\xe9\x00', 'UTF-16BE') == ('\ue900', lookup('utf-16be'))
+ assert decode(b'\x00\xe9', 'UTF-16LE') == ('\ue900', lookup('utf-16le'))
+ assert decode(b'\x00\xe9', 'UTF-16') == ('\ue900', lookup('utf-16le'))
+
+
+def test_encode():
+ assert encode('é', 'latin1') == b'\xe9'
+ assert encode('é', 'utf8') == b'\xc3\xa9'
+ assert encode('é', 'utf8') == b'\xc3\xa9'
+ assert encode('é', 'utf-16') == b'\xe9\x00'
+ assert encode('é', 'utf-16le') == b'\xe9\x00'
+ assert encode('é', 'utf-16be') == b'\x00\xe9'
+
+
+def test_iter_decode():
+ def iter_decode_to_string(input, fallback_encoding):
+ output, _encoding = iter_decode(input, fallback_encoding)
+ return ''.join(output)
+ assert iter_decode_to_string([], 'latin1') == ''
+ assert iter_decode_to_string([b''], 'latin1') == ''
+ assert iter_decode_to_string([b'\xe9'], 'latin1') == 'é'
+ assert iter_decode_to_string([b'hello'], 'latin1') == 'hello'
+ assert iter_decode_to_string([b'he', b'llo'], 'latin1') == 'hello'
+ assert iter_decode_to_string([b'hell', b'o'], 'latin1') == 'hello'
+ assert iter_decode_to_string([b'\xc3\xa9'], 'latin1') == 'é'
+ assert iter_decode_to_string([b'\xEF\xBB\xBF\xc3\xa9'], 'latin1') == 'é'
+ assert iter_decode_to_string([
+ b'\xEF\xBB\xBF', b'\xc3', b'\xa9'], 'latin1') == 'é'
+ assert iter_decode_to_string([
+ b'\xEF\xBB\xBF', b'a', b'\xc3'], 'latin1') == 'a\uFFFD'
+ assert iter_decode_to_string([
+ b'', b'\xEF', b'', b'', b'\xBB\xBF\xc3', b'\xa9'], 'latin1') == 'é'
+ assert iter_decode_to_string([b'\xEF\xBB\xBF'], 'latin1') == ''
+ assert iter_decode_to_string([b'\xEF\xBB'], 'latin1') == 'ï»'
+ assert iter_decode_to_string([b'\xFE\xFF\x00\xe9'], 'latin1') == 'é'
+ assert iter_decode_to_string([b'\xFF\xFE\xe9\x00'], 'latin1') == 'é'
+ assert iter_decode_to_string([
+ b'', b'\xFF', b'', b'', b'\xFE\xe9', b'\x00'], 'latin1') == 'é'
+ assert iter_decode_to_string([
+ b'', b'h\xe9', b'llo'], 'x-user-defined') == 'h\uF7E9llo'
+
+
+def test_iter_encode():
+ assert b''.join(iter_encode([], 'latin1')) == b''
+ assert b''.join(iter_encode([''], 'latin1')) == b''
+ assert b''.join(iter_encode(['é'], 'latin1')) == b'\xe9'
+ assert b''.join(iter_encode(['', 'é', '', ''], 'latin1')) == b'\xe9'
+ assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16')) == b'\xe9\x00'
+ assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16le')) == b'\xe9\x00'
+ assert b''.join(iter_encode(['', 'é', '', ''], 'utf-16be')) == b'\x00\xe9'
+ assert b''.join(iter_encode([
+ '', 'h\uF7E9', '', 'llo'], 'x-user-defined')) == b'h\xe9llo'
+
+
+def test_x_user_defined():
+ encoded = b'2,\x0c\x0b\x1aO\xd9#\xcb\x0f\xc9\xbbt\xcf\xa8\xca'
+ decoded = '2,\x0c\x0b\x1aO\uf7d9#\uf7cb\x0f\uf7c9\uf7bbt\uf7cf\uf7a8\uf7ca'
+ encoded = b'aa'
+ decoded = 'aa'
+ assert decode(encoded, 'x-user-defined') == (decoded, lookup('x-user-defined'))
+ assert encode(decoded, 'x-user-defined') == encoded
diff --git a/third_party/python/pip/pip/_vendor/webencodings/x_user_defined.py b/third_party/python/pip/pip/_vendor/webencodings/x_user_defined.py
new file mode 100644
index 0000000000..d16e326024
--- /dev/null
+++ b/third_party/python/pip/pip/_vendor/webencodings/x_user_defined.py
@@ -0,0 +1,325 @@
+# coding: utf-8
+"""
+
+ webencodings.x_user_defined
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ An implementation of the x-user-defined encoding.
+
+ :copyright: Copyright 2012 by Simon Sapin
+ :license: BSD, see LICENSE for details.
+
+"""
+
+from __future__ import unicode_literals
+
+import codecs
+
+
+### Codec APIs
+
+class Codec(codecs.Codec):
+
+ def encode(self, input, errors='strict'):
+ return codecs.charmap_encode(input, errors, encoding_table)
+
+ def decode(self, input, errors='strict'):
+ return codecs.charmap_decode(input, errors, decoding_table)
+
+
+class IncrementalEncoder(codecs.IncrementalEncoder):
+ def encode(self, input, final=False):
+ return codecs.charmap_encode(input, self.errors, encoding_table)[0]
+
+
+class IncrementalDecoder(codecs.IncrementalDecoder):
+ def decode(self, input, final=False):
+ return codecs.charmap_decode(input, self.errors, decoding_table)[0]
+
+
+class StreamWriter(Codec, codecs.StreamWriter):
+ pass
+
+
+class StreamReader(Codec, codecs.StreamReader):
+ pass
+
+
+### encodings module API
+
+codec_info = codecs.CodecInfo(
+ name='x-user-defined',
+ encode=Codec().encode,
+ decode=Codec().decode,
+ incrementalencoder=IncrementalEncoder,
+ incrementaldecoder=IncrementalDecoder,
+ streamreader=StreamReader,
+ streamwriter=StreamWriter,
+)
+
+
+### Decoding Table
+
+# Python 3:
+# for c in range(256): print(' %r' % chr(c if c < 128 else c + 0xF700))
+decoding_table = (
+ '\x00'
+ '\x01'
+ '\x02'
+ '\x03'
+ '\x04'
+ '\x05'
+ '\x06'
+ '\x07'
+ '\x08'
+ '\t'
+ '\n'
+ '\x0b'
+ '\x0c'
+ '\r'
+ '\x0e'
+ '\x0f'
+ '\x10'
+ '\x11'
+ '\x12'
+ '\x13'
+ '\x14'
+ '\x15'
+ '\x16'
+ '\x17'
+ '\x18'
+ '\x19'
+ '\x1a'
+ '\x1b'
+ '\x1c'
+ '\x1d'
+ '\x1e'
+ '\x1f'
+ ' '
+ '!'
+ '"'
+ '#'
+ '$'
+ '%'
+ '&'
+ "'"
+ '('
+ ')'
+ '*'
+ '+'
+ ','
+ '-'
+ '.'
+ '/'
+ '0'
+ '1'
+ '2'
+ '3'
+ '4'
+ '5'
+ '6'
+ '7'
+ '8'
+ '9'
+ ':'
+ ';'
+ '<'
+ '='
+ '>'
+ '?'
+ '@'
+ 'A'
+ 'B'
+ 'C'
+ 'D'
+ 'E'
+ 'F'
+ 'G'
+ 'H'
+ 'I'
+ 'J'
+ 'K'
+ 'L'
+ 'M'
+ 'N'
+ 'O'
+ 'P'
+ 'Q'
+ 'R'
+ 'S'
+ 'T'
+ 'U'
+ 'V'
+ 'W'
+ 'X'
+ 'Y'
+ 'Z'
+ '['
+ '\\'
+ ']'
+ '^'
+ '_'
+ '`'
+ 'a'
+ 'b'
+ 'c'
+ 'd'
+ 'e'
+ 'f'
+ 'g'
+ 'h'
+ 'i'
+ 'j'
+ 'k'
+ 'l'
+ 'm'
+ 'n'
+ 'o'
+ 'p'
+ 'q'
+ 'r'
+ 's'
+ 't'
+ 'u'
+ 'v'
+ 'w'
+ 'x'
+ 'y'
+ 'z'
+ '{'
+ '|'
+ '}'
+ '~'
+ '\x7f'
+ '\uf780'
+ '\uf781'
+ '\uf782'
+ '\uf783'
+ '\uf784'
+ '\uf785'
+ '\uf786'
+ '\uf787'
+ '\uf788'
+ '\uf789'
+ '\uf78a'
+ '\uf78b'
+ '\uf78c'
+ '\uf78d'
+ '\uf78e'
+ '\uf78f'
+ '\uf790'
+ '\uf791'
+ '\uf792'
+ '\uf793'
+ '\uf794'
+ '\uf795'
+ '\uf796'
+ '\uf797'
+ '\uf798'
+ '\uf799'
+ '\uf79a'
+ '\uf79b'
+ '\uf79c'
+ '\uf79d'
+ '\uf79e'
+ '\uf79f'
+ '\uf7a0'
+ '\uf7a1'
+ '\uf7a2'
+ '\uf7a3'
+ '\uf7a4'
+ '\uf7a5'
+ '\uf7a6'
+ '\uf7a7'
+ '\uf7a8'
+ '\uf7a9'
+ '\uf7aa'
+ '\uf7ab'
+ '\uf7ac'
+ '\uf7ad'
+ '\uf7ae'
+ '\uf7af'
+ '\uf7b0'
+ '\uf7b1'
+ '\uf7b2'
+ '\uf7b3'
+ '\uf7b4'
+ '\uf7b5'
+ '\uf7b6'
+ '\uf7b7'
+ '\uf7b8'
+ '\uf7b9'
+ '\uf7ba'
+ '\uf7bb'
+ '\uf7bc'
+ '\uf7bd'
+ '\uf7be'
+ '\uf7bf'
+ '\uf7c0'
+ '\uf7c1'
+ '\uf7c2'
+ '\uf7c3'
+ '\uf7c4'
+ '\uf7c5'
+ '\uf7c6'
+ '\uf7c7'
+ '\uf7c8'
+ '\uf7c9'
+ '\uf7ca'
+ '\uf7cb'
+ '\uf7cc'
+ '\uf7cd'
+ '\uf7ce'
+ '\uf7cf'
+ '\uf7d0'
+ '\uf7d1'
+ '\uf7d2'
+ '\uf7d3'
+ '\uf7d4'
+ '\uf7d5'
+ '\uf7d6'
+ '\uf7d7'
+ '\uf7d8'
+ '\uf7d9'
+ '\uf7da'
+ '\uf7db'
+ '\uf7dc'
+ '\uf7dd'
+ '\uf7de'
+ '\uf7df'
+ '\uf7e0'
+ '\uf7e1'
+ '\uf7e2'
+ '\uf7e3'
+ '\uf7e4'
+ '\uf7e5'
+ '\uf7e6'
+ '\uf7e7'
+ '\uf7e8'
+ '\uf7e9'
+ '\uf7ea'
+ '\uf7eb'
+ '\uf7ec'
+ '\uf7ed'
+ '\uf7ee'
+ '\uf7ef'
+ '\uf7f0'
+ '\uf7f1'
+ '\uf7f2'
+ '\uf7f3'
+ '\uf7f4'
+ '\uf7f5'
+ '\uf7f6'
+ '\uf7f7'
+ '\uf7f8'
+ '\uf7f9'
+ '\uf7fa'
+ '\uf7fb'
+ '\uf7fc'
+ '\uf7fd'
+ '\uf7fe'
+ '\uf7ff'
+)
+
+### Encoding table
+encoding_table = codecs.charmap_build(decoding_table)
diff --git a/third_party/python/pip/pip/py.typed b/third_party/python/pip/pip/py.typed
new file mode 100644
index 0000000000..493b53e4e7
--- /dev/null
+++ b/third_party/python/pip/pip/py.typed
@@ -0,0 +1,4 @@
+pip is a command line program. While it is implemented in Python, and so is
+available for import, you must not use pip's internal APIs in this way. Typing
+information is provided as a convenience only and is not a guarantee. Expect
+unannounced changes to the API and types in releases.
diff --git a/third_party/python/pip_tools/pip_tools-5.5.0.dist-info/LICENSE b/third_party/python/pip_tools/pip_tools-5.5.0.dist-info/LICENSE
new file mode 100644
index 0000000000..64719ca9f5
--- /dev/null
+++ b/third_party/python/pip_tools/pip_tools-5.5.0.dist-info/LICENSE
@@ -0,0 +1,26 @@
+Copyright (c). All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of pip-tools nor the names of its contributors may be
+ used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/third_party/python/pip_tools/pip_tools-5.5.0.dist-info/METADATA b/third_party/python/pip_tools/pip_tools-5.5.0.dist-info/METADATA
new file mode 100644
index 0000000000..48f18ca73c
--- /dev/null
+++ b/third_party/python/pip_tools/pip_tools-5.5.0.dist-info/METADATA
@@ -0,0 +1,535 @@
+Metadata-Version: 2.1
+Name: pip-tools
+Version: 5.5.0
+Summary: pip-tools keeps your pinned dependencies fresh.
+Home-page: https://github.com/jazzband/pip-tools/
+Author: Vincent Driessen
+Author-email: me@nvie.com
+License: BSD
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: System :: Systems Administration
+Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7
+Requires-Dist: click (>=7)
+Requires-Dist: pip (>=20.1)
+Provides-Extra: coverage
+Requires-Dist: pytest-cov ; extra == 'coverage'
+Provides-Extra: testing
+Requires-Dist: mock ; extra == 'testing'
+Requires-Dist: pytest ; extra == 'testing'
+Requires-Dist: pytest-rerunfailures ; extra == 'testing'
+
+|jazzband| |pypi| |pyversions| |buildstatus-gha| |codecov|
+
+==================================
+pip-tools = pip-compile + pip-sync
+==================================
+
+A set of command line tools to help you keep your ``pip``-based packages fresh,
+even when you've pinned them. You do pin them, right? (In building your Python application and its dependencies for production, you want to make sure that your builds are predictable and deterministic.)
+
+.. image:: https://github.com/jazzband/pip-tools/raw/master/img/pip-tools-overview.png
+ :alt: pip-tools overview for phase II
+
+.. |buildstatus-gha| image:: https://github.com/jazzband/pip-tools/workflows/CI/badge.svg
+ :alt: GitHub Actions build status
+ :target: https://github.com/jazzband/pip-tools/actions?query=workflow%3ACI
+.. |codecov| image:: https://codecov.io/gh/jazzband/pip-tools/branch/master/graph/badge.svg
+ :alt: Coverage
+ :target: https://codecov.io/gh/jazzband/pip-tools
+.. |jazzband| image:: https://jazzband.co/static/img/badge.svg
+ :alt: Jazzband
+ :target: https://jazzband.co/
+.. |pypi| image:: https://img.shields.io/pypi/v/pip-tools.svg
+ :alt: PyPI version
+ :target: https://pypi.org/project/pip-tools/
+.. |pyversions| image:: https://img.shields.io/pypi/pyversions/pip-tools.svg
+ :alt: Supported Python versions
+ :target: https://pypi.org/project/pip-tools/
+.. _You do pin them, right?: http://nvie.com/posts/pin-your-packages/
+
+
+Installation
+============
+
+Similar to ``pip``, ``pip-tools`` must be installed in each of your project's
+`virtual environments`_:
+
+.. code-block:: bash
+
+ $ source /path/to/venv/bin/activate
+ (venv)$ python -m pip install pip-tools
+
+**Note**: all of the remaining example commands assume you've activated your
+project's virtual environment.
+
+.. _virtual environments: https://packaging.python.org/tutorials/installing-packages/#creating-virtual-environments
+
+Example usage for ``pip-compile``
+=================================
+
+The ``pip-compile`` command lets you compile a ``requirements.txt`` file from
+your dependencies, specified in either ``setup.py`` or ``requirements.in``.
+
+Run it with ``pip-compile`` or ``python -m piptools compile``. If you use
+multiple Python versions, you can run ``pip-compile`` as ``py -X.Y -m piptools
+compile`` on Windows and ``pythonX.Y -m piptools compile`` on other systems.
+
+``pip-compile`` should be run from the same virtual environment as your
+project so conditional dependencies that require a specific Python version,
+or other environment markers, resolve relative to your project's
+environment.
+
+**Note**: ensure you don't have ``requirements.txt`` if you compile
+``setup.py`` or ``requirements.in`` from scratch, otherwise, it might
+interfere.
+
+Requirements from ``setup.py``
+------------------------------
+
+Suppose you have a Django project, and want to pin it for production.
+If you have a ``setup.py`` with ``install_requires=['django']``, then run
+``pip-compile`` without any arguments:
+
+.. code-block:: bash
+
+ $ pip-compile
+ #
+ # This file is autogenerated by pip-compile
+ # To update, run:
+ #
+ # pip-compile
+ #
+ asgiref==3.2.3
+ # via django
+ django==3.0.3
+ # via my_django_project (setup.py)
+ pytz==2019.3
+ # via django
+ sqlparse==0.3.0
+ # via django
+
+``pip-compile`` will produce your ``requirements.txt``, with all the Django
+dependencies (and all underlying dependencies) pinned.
+
+Without ``setup.py``
+--------------------
+
+If you don't use ``setup.py`` (`it's easy to write one`_), you can create a
+``requirements.in`` file to declare the Django dependency:
+
+.. code-block:: ini
+
+ # requirements.in
+ django
+
+Now, run ``pip-compile requirements.in``:
+
+.. code-block:: bash
+
+ $ pip-compile requirements.in
+ #
+ # This file is autogenerated by pip-compile
+ # To update, run:
+ #
+ # pip-compile requirements.in
+ #
+ asgiref==3.2.3
+ # via django
+ django==3.0.3
+ # via -r requirements.in
+ pytz==2019.3
+ # via django
+ sqlparse==0.3.0
+ # via django
+
+And it will produce your ``requirements.txt``, with all the Django dependencies
+(and all underlying dependencies) pinned.
+
+.. _it's easy to write one: https://packaging.python.org/guides/distributing-packages-using-setuptools/#configuring-your-project
+
+Using hashes
+------------
+
+If you would like to use *Hash-Checking Mode* available in ``pip`` since
+version 8.0, ``pip-compile`` offers ``--generate-hashes`` flag:
+
+.. code-block:: bash
+
+ $ pip-compile --generate-hashes requirements.in
+ #
+ # This file is autogenerated by pip-compile
+ # To update, run:
+ #
+ # pip-compile --generate-hashes requirements.in
+ #
+ asgiref==3.2.3 \
+ --hash=sha256:7e06d934a7718bf3975acbf87780ba678957b87c7adc056f13b6215d610695a0 \
+ --hash=sha256:ea448f92fc35a0ef4b1508f53a04c4670255a3f33d22a81c8fc9c872036adbe5 \
+ # via django
+ django==3.0.3 \
+ --hash=sha256:2f1ba1db8648484dd5c238fb62504777b7ad090c81c5f1fd8d5eb5ec21b5f283 \
+ --hash=sha256:c91c91a7ad6ef67a874a4f76f58ba534f9208412692a840e1d125eb5c279cb0a \
+ # via -r requirements.in
+ pytz==2019.3 \
+ --hash=sha256:1c557d7d0e871de1f5ccd5833f60fb2550652da6be2693c1e02300743d21500d \
+ --hash=sha256:b02c06db6cf09c12dd25137e563b31700d3b80fcc4ad23abb7a315f2789819be \
+ # via django
+ sqlparse==0.3.0 \
+ --hash=sha256:40afe6b8d4b1117e7dff5504d7a8ce07d9a1b15aeeade8a2d10f130a834f8177 \
+ --hash=sha256:7c3dca29c022744e95b547e867cee89f4fce4373f3549ccd8797d8eb52cdb873 \
+ # via django
+
+Updating requirements
+---------------------
+
+To update all packages, periodically re-run ``pip-compile --upgrade``.
+
+To update a specific package to the latest or a specific version use the
+``--upgrade-package`` or ``-P`` flag:
+
+.. code-block:: bash
+
+ # only update the django package
+ $ pip-compile --upgrade-package django
+
+ # update both the django and requests packages
+ $ pip-compile --upgrade-package django --upgrade-package requests
+
+ # update the django package to the latest, and requests to v2.0.0
+ $ pip-compile --upgrade-package django --upgrade-package requests==2.0.0
+
+You can combine ``--upgrade`` and ``--upgrade-package`` in one command, to
+provide constraints on the allowed upgrades. For example to upgrade all
+packages whilst constraining requests to the latest version less than 3.0:
+
+.. code-block:: bash
+
+ $ pip-compile --upgrade --upgrade-package 'requests<3.0'
+
+Output File
+-----------
+
+To output the pinned requirements in a filename other than
+``requirements.txt``, use ``--output-file``. This might be useful for compiling
+multiple files, for example with different constraints on django to test a
+library with both versions using `tox <https://tox.readthedocs.io/en/latest/>`__:
+
+.. code-block:: bash
+
+ $ pip-compile --upgrade-package 'django<1.0' --output-file requirements-django0x.txt
+ $ pip-compile --upgrade-package 'django<2.0' --output-file requirements-django1x.txt
+
+Or to output to standard output, use ``--output-file=-``:
+
+.. code-block:: bash
+
+ $ pip-compile --output-file=- > requirements.txt
+ $ pip-compile - --output-file=- < requirements.in > requirements.txt
+
+Forwarding options to ``pip``
+-----------------------------
+
+Any valid ``pip`` flags or arguments may be passed on with ``pip-compile``'s
+``--pip-args`` option, e.g.
+
+.. code-block:: bash
+
+ $ pip-compile requirements.in --pip-args '--retries 10 --timeout 30'
+
+Configuration
+-------------
+
+You might be wrapping the ``pip-compile`` command in another script. To avoid
+confusing consumers of your custom script you can override the update command
+generated at the top of requirements files by setting the
+``CUSTOM_COMPILE_COMMAND`` environment variable.
+
+.. code-block:: bash
+
+ $ CUSTOM_COMPILE_COMMAND="./pipcompilewrapper" pip-compile requirements.in
+ #
+ # This file is autogenerated by pip-compile
+ # To update, run:
+ #
+ # ./pipcompilewrapper
+ #
+ asgiref==3.2.3
+ # via django
+ django==3.0.3
+ # via -r requirements.in
+ pytz==2019.3
+ # via django
+ sqlparse==0.3.0
+ # via django
+
+Workflow for layered requirements
+---------------------------------
+
+If you have different environments that you need to install different but
+compatible packages for, then you can create layered requirements files and use
+one layer to constrain the other.
+
+For example, if you have a Django project where you want the newest ``2.1``
+release in production and when developing you want to use the Django debug
+toolbar, then you can create two ``*.in`` files, one for each layer:
+
+.. code-block:: ini
+
+ # requirements.in
+ django<2.2
+
+At the top of the development requirements ``dev-requirements.in`` you use ``-c
+requirements.txt`` to constrain the dev requirements to packages already
+selected for production in ``requirements.txt``.
+
+.. code-block:: ini
+
+ # dev-requirements.in
+ -c requirements.txt
+ django-debug-toolbar
+
+First, compile ``requirements.txt`` as usual:
+
+.. code-block:: bash
+
+ $ pip-compile
+ #
+ # This file is autogenerated by pip-compile
+ # To update, run:
+ #
+ # pip-compile
+ #
+ django==2.1.15
+ # via -r requirements.in
+ pytz==2019.3
+ # via django
+
+
+Now compile the dev requirements and the ``requirements.txt`` file is used as
+a constraint:
+
+.. code-block:: bash
+
+ $ pip-compile dev-requirements.in
+ #
+ # This file is autogenerated by pip-compile
+ # To update, run:
+ #
+ # pip-compile dev-requirements.in
+ #
+ django-debug-toolbar==2.2
+ # via -r dev-requirements.in
+ django==2.1.15
+ # via
+ # -c requirements.txt
+ # django-debug-toolbar
+ pytz==2019.3
+ # via
+ # -c requirements.txt
+ # django
+ sqlparse==0.3.0
+ # via django-debug-toolbar
+
+As you can see above, even though a ``2.2`` release of Django is available, the
+dev requirements only include a ``2.1`` version of Django because they were
+constrained. Now both compiled requirements files can be installed safely in
+the dev environment.
+
+To install requirements in production stage use:
+
+.. code-block:: bash
+
+ $ pip-sync
+
+You can install requirements in development stage by:
+
+.. code-block:: bash
+
+ $ pip-sync requirements.txt dev-requirements.txt
+
+
+Version control integration
+---------------------------
+
+You might use ``pip-compile`` as a hook for the `pre-commit <https://github.com/pre-commit/pre-commit>`_.
+See `pre-commit docs <https://pre-commit.com/>`_ for instructions.
+Sample ``.pre-commit-config.yaml``:
+
+.. code-block:: yaml
+
+ repos:
+ - repo: https://github.com/jazzband/pip-tools
+ rev: 5.0.0
+ hooks:
+ - id: pip-compile
+
+You might want to customize ``pip-compile`` args by configuring ``args`` and/or ``files``, for example:
+
+.. code-block:: yaml
+
+ repos:
+ - repo: https://github.com/jazzband/pip-tools
+ rev: 5.0.0
+ hooks:
+ - id: pip-compile
+ files: ^requirements/production\.(in|txt)$
+ args: [--index-url=https://example.com, requirements/production.in]
+
+
+Example usage for ``pip-sync``
+==============================
+
+Now that you have a ``requirements.txt``, you can use ``pip-sync`` to update
+your virtual environment to reflect exactly what's in there. This will
+install/upgrade/uninstall everything necessary to match the
+``requirements.txt`` contents.
+
+Run it with ``pip-sync`` or ``python -m piptools sync``. If you use multiple
+Python versions, you can also run ``py -X.Y -m piptools sync`` on Windows and
+``pythonX.Y -m piptools sync`` on other systems.
+
+``pip-sync`` must be installed into and run from the same virtual
+environment as your project to identify which packages to install
+or upgrade.
+
+**Be careful**: ``pip-sync`` is meant to be used only with a
+``requirements.txt`` generated by ``pip-compile``.
+
+.. code-block:: bash
+
+ $ pip-sync
+ Uninstalling flake8-2.4.1:
+ Successfully uninstalled flake8-2.4.1
+ Collecting click==4.1
+ Downloading click-4.1-py2.py3-none-any.whl (62kB)
+ 100% |................................| 65kB 1.8MB/s
+ Found existing installation: click 4.0
+ Uninstalling click-4.0:
+ Successfully uninstalled click-4.0
+ Successfully installed click-4.1
+
+To sync multiple ``*.txt`` dependency lists, just pass them in via command
+line arguments, e.g.
+
+.. code-block:: bash
+
+ $ pip-sync dev-requirements.txt requirements.txt
+
+Passing in empty arguments would cause it to default to ``requirements.txt``.
+
+Any valid ``pip install`` flags or arguments may be passed with ``pip-sync``'s
+``--pip-args`` option, e.g.
+
+.. code-block:: bash
+
+ $ pip-sync requirements.txt --pip-args '--no-cache-dir --no-deps'
+
+If you use multiple Python versions, you can run ``pip-sync`` as
+``py -X.Y -m piptools sync ...`` on Windows and
+``pythonX.Y -m piptools sync ...`` on other systems.
+
+**Note**: ``pip-sync`` will not upgrade or uninstall packaging tools like
+``setuptools``, ``pip``, or ``pip-tools`` itself. Use ``python -m pip install --upgrade``
+to upgrade those packages.
+
+Should I commit ``requirements.in`` and ``requirements.txt`` to source control?
+===============================================================================
+
+Generally, yes. If you want a reproducible environment installation available from your source control,
+then yes, you should commit both ``requirements.in`` and ``requirements.txt`` to source control.
+
+Note that if you are deploying on multiple Python environments (read the section below),
+then you must commit a seperate output file for each Python environment.
+We suggest to use the ``{env}-requirements.txt`` format
+(ex: ``win32-py2.7-requirements.txt``, ``macos-py3.6-requirements.txt``, etc.).
+
+
+Cross-environment usage of ``requirements.in``/``requirements.txt`` and ``pip-compile``
+=======================================================================================
+
+The dependencies of a package can change depending on the Python environment in which it
+is installed. Here, we define a Python environment as the combination of Operating
+System, Python version (2.7, 3.6, etc.), and Python implementation (CPython, PyPy,
+etc.). For an exact definition, refer to the possible combinations of `PEP 508
+environment markers`_.
+
+As the resulting ``requirements.txt`` can differ for each environment, users must
+execute ``pip-compile`` **on each Python environment separately** to generate a
+``requirements.txt`` valid for each said environment. The same ``requirements.in`` can
+be used as the source file for all environments, using `PEP 508 environment markers`_ as
+needed, the same way it would be done for regular ``pip`` cross-environment usage.
+
+If the generated ``requirements.txt`` remains exactly the same for all Python
+environments, then it can be used across Python environments safely. **But** users
+should be careful as any package update can introduce environment-dependant
+dependencies, making any newly generated ``requirements.txt`` environment-dependant too.
+As a general rule, it's advised that users should still always execute ``pip-compile``
+on each targeted Python environment to avoid issues.
+
+.. _PEP 508 environment markers: https://www.python.org/dev/peps/pep-0508/#environment-markers
+
+Other useful tools
+==================
+
+- `pipdeptree`_ to print the dependency tree of the installed packages.
+- ``requirements.in``/``requirements.txt`` syntax highlighting:
+
+ * `requirements.txt.vim`_ for Vim.
+ * `Python extension for VS Code`_ for VS Code.
+ * `pip-requirements.el`_ for Emacs.
+
+.. _pipdeptree: https://github.com/naiquevin/pipdeptree
+.. _requirements.txt.vim: https://github.com/raimon49/requirements.txt.vim
+.. _Python extension for VS Code: https://marketplace.visualstudio.com/items?itemName=ms-python.python
+.. _pip-requirements.el: https://github.com/Wilfred/pip-requirements.el
+
+
+Deprecations
+============
+
+This section lists ``pip-tools`` features that are currently deprecated.
+
+- ``--index/--no-index`` command-line options, use instead
+ ``--emit-index-url/--no-emit-index-url`` (since 5.2.0).
+
+- In future versions, the ``--allow-unsafe`` behavior will be enabled by
+ default. Use ``--no-allow-unsafe`` to keep the old behavior. It is
+ recommended to pass the ``--allow-unsafe`` now to adapt to the upcoming
+ change.
+
+Versions and compatibility
+==========================
+
+The table below summarizes the latest ``pip-tools`` versions with the required
+``pip`` and Python versions. Generally, ``pip-tools`` supports the same Python
+versions as the required ``pip`` versions.
+
++---------------+----------------+----------------+
+| pip-tools | pip | Python |
++===============+================+================+
+| 4.5.* | 8.1.3 - 20.0.2 | 2.7, 3.5 - 3.8 |
++---------------+----------------+----------------+
+| 5.0.0 - 5.3.0 | 20.0 - 20.1.1 | 2.7, 3.5 - 3.8 |
++---------------+----------------+----------------+
+| 5.4.0 | 20.1 - 20.3.* | 2.7, 3.5 - 3.8 |
++---------------+----------------+----------------+
+| >= 5.5.0 | 20.1 - 20.3.* | 2.7, 3.5 - 3.9 |
++---------------+----------------+----------------+
+
+
diff --git a/third_party/python/pip_tools/pip_tools-5.5.0.dist-info/RECORD b/third_party/python/pip_tools/pip_tools-5.5.0.dist-info/RECORD
new file mode 100644
index 0000000000..35941ede11
--- /dev/null
+++ b/third_party/python/pip_tools/pip_tools-5.5.0.dist-info/RECORD
@@ -0,0 +1,28 @@
+piptools/__init__.py,sha256=8thn-dOvLOIMk8yl333hqovhg76G31sLazxfZ01E3NQ,379
+piptools/__main__.py,sha256=AyR6tAMb4vvaRLpmGIOAraTVt-XRdy3J3A2TcU_D_tM,267
+piptools/cache.py,sha256=ChWcRnWmG7TDhdTTNI4Hq_hAj5emFBXzT56cMh3sMJY,5694
+piptools/click.py,sha256=6G25l-dJ7Bx7KqI9EOr86rHfN_r-1x_Ums1uMY7BM-8,128
+piptools/exceptions.py,sha256=IxtKvKycggxF6ilRRS-dusqIYa8fQp0czqiZ-OSqoP4,2074
+piptools/locations.py,sha256=tQNE273jbYQN2jSIbPO171VAdT44aWHCIz-sl352Zxg,776
+piptools/logging.py,sha256=CCQlSuo5NbjxusFef8tM3IGkS2upYU88zFnSlYEXCAk,1492
+piptools/resolver.py,sha256=WjIl95AMr3nCcS9Tit07EpohSnhzkUUPBb9cmlbT9sY,16172
+piptools/sync.py,sha256=co7EGsnrE6pZ6Sco5vZgprvZ2m-rXM6zwX188bNSN9o,7048
+piptools/utils.py,sha256=gVvsb5OqI4DHvSU5XMrIppIjtBAnGimFGBnRVWyLjzg,11679
+piptools/writer.py,sha256=-2D7aqLHe64DOJ9c5iOhEYTKLtrIe2pFmsDTUgtpsNo,8205
+piptools/_compat/__init__.py,sha256=3T3-yQL4Wjej3xmRq716nnOwOXOsaVfNYl4BXjegPaA,606
+piptools/_compat/contextlib.py,sha256=4K3xvf7Ujd_KfUVkXRVLxvyQ2L36072t02Vifc7c6TU,602
+piptools/_compat/pip_compat.py,sha256=RfjQn6ezQqHOLpo1uMka_pI9MYN55OHnrdy3XaKKkTk,701
+piptools/_compat/tempfile.py,sha256=m5d22eGPL2vFwUi0kp33_Tr9UQg1QBQnQ1LJ04HlAzI,2862
+piptools/repositories/__init__.py,sha256=AuVddJd2QqAuTgmmF4JEm1JZ8O7BS9oFMnRmzVOSBxc,95
+piptools/repositories/base.py,sha256=YQdb5XUo-24fjUpS8PXMCWMQY57KGV8byonRr3ndgiE,1887
+piptools/repositories/local.py,sha256=cPRdAGCNFHguAuk2k47e7_XUas9Z_Ct-rbLd9WY59YI,3230
+piptools/repositories/pypi.py,sha256=FEehUjtegCl4A0UGfEvVaMnx6XoaX6YzExlG4WAlkCg,20182
+piptools/scripts/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+piptools/scripts/compile.py,sha256=wrMT0IDmlfK9Q2BY6zwiDoWMEaGond86NEMIclKPcBc,15385
+piptools/scripts/sync.py,sha256=bySckxKFNPfWxbZtFA5vkgfAHjAS7Sqdhi1DuG2PVq4,6383
+pip_tools-5.5.0.dist-info/LICENSE,sha256=3a_52MBuKjuNIlTVi9c-oEzjQZfYDMMrCXlIXgZmX5o,1500
+pip_tools-5.5.0.dist-info/METADATA,sha256=s5H4BeCtxh9SskntX3JnSJP8IuPycf2780T0DMpBdPY,18712
+pip_tools-5.5.0.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110
+pip_tools-5.5.0.dist-info/entry_points.txt,sha256=kxgyoCOYYMl9wcS2pOl6ruVZTy1h9Sbk8SUJ1gifJ8s,99
+pip_tools-5.5.0.dist-info/top_level.txt,sha256=_16nvfow-EENN1JQ_WjY7bFroJvb5IYH5xLIHc3kd7U,9
+pip_tools-5.5.0.dist-info/RECORD,,
diff --git a/third_party/python/pip_tools/pip_tools-5.5.0.dist-info/WHEEL b/third_party/python/pip_tools/pip_tools-5.5.0.dist-info/WHEEL
new file mode 100644
index 0000000000..01b8fc7d4a
--- /dev/null
+++ b/third_party/python/pip_tools/pip_tools-5.5.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.36.2)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/pip_tools/pip_tools-5.5.0.dist-info/entry_points.txt b/third_party/python/pip_tools/pip_tools-5.5.0.dist-info/entry_points.txt
new file mode 100644
index 0000000000..383fce0bf0
--- /dev/null
+++ b/third_party/python/pip_tools/pip_tools-5.5.0.dist-info/entry_points.txt
@@ -0,0 +1,4 @@
+[console_scripts]
+pip-compile = piptools.scripts.compile:cli
+pip-sync = piptools.scripts.sync:cli
+
diff --git a/third_party/python/pip_tools/pip_tools-5.5.0.dist-info/top_level.txt b/third_party/python/pip_tools/pip_tools-5.5.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..9f14c19c7f
--- /dev/null
+++ b/third_party/python/pip_tools/pip_tools-5.5.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+piptools
diff --git a/third_party/python/pip_tools/piptools/__init__.py b/third_party/python/pip_tools/piptools/__init__.py
new file mode 100644
index 0000000000..9f0c95aa56
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/__init__.py
@@ -0,0 +1,11 @@
+import locale
+
+from piptools.click import secho
+
+# Needed for locale.getpreferredencoding(False) to work
+# in pip._internal.utils.encoding.auto_decode
+try:
+ locale.setlocale(locale.LC_ALL, "")
+except locale.Error as e: # pragma: no cover
+ # setlocale can apparently crash if locale are uninitialized
+ secho("Ignoring error when setting locale: {}".format(e), fg="red")
diff --git a/third_party/python/pip_tools/piptools/__main__.py b/third_party/python/pip_tools/piptools/__main__.py
new file mode 100644
index 0000000000..2d8b75e85d
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/__main__.py
@@ -0,0 +1,17 @@
+import click
+
+from piptools.scripts import compile, sync
+
+
+@click.group()
+def cli():
+ pass
+
+
+cli.add_command(compile.cli, "compile")
+cli.add_command(sync.cli, "sync")
+
+
+# Enable ``python -m piptools ...``.
+if __name__ == "__main__": # pragma: no branch
+ cli()
diff --git a/third_party/python/pip_tools/piptools/_compat/__init__.py b/third_party/python/pip_tools/piptools/_compat/__init__.py
new file mode 100644
index 0000000000..de28628db2
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/_compat/__init__.py
@@ -0,0 +1,26 @@
+# coding: utf-8
+# flake8: noqa
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import errno
+import os
+
+from pip._vendor import six
+
+from .pip_compat import PIP_VERSION, parse_requirements
+
+if six.PY2:
+ from .tempfile import TemporaryDirectory
+else:
+ from tempfile import TemporaryDirectory
+
+
+def makedirs(name, mode=0o777, exist_ok=False):
+ if six.PY2:
+ try:
+ os.makedirs(name, mode)
+ except OSError as e:
+ if not exist_ok or e.errno != errno.EEXIST:
+ raise
+ else:
+ os.makedirs(name, mode, exist_ok)
diff --git a/third_party/python/pip_tools/piptools/_compat/contextlib.py b/third_party/python/pip_tools/piptools/_compat/contextlib.py
new file mode 100644
index 0000000000..04039ccb01
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/_compat/contextlib.py
@@ -0,0 +1,18 @@
+# Ported from python 3.7 contextlib.py
+class nullcontext(object):
+ """Context manager that does no additional processing.
+ Used as a stand-in for a normal context manager, when a particular
+ block of code is only sometimes used with a normal context manager:
+ cm = optional_cm if condition else nullcontext()
+ with cm:
+ # Perform operation, using optional_cm if condition is True
+ """
+
+ def __init__(self, enter_result=None):
+ self.enter_result = enter_result
+
+ def __enter__(self):
+ return self.enter_result
+
+ def __exit__(self, *excinfo):
+ pass
diff --git a/third_party/python/pip_tools/piptools/_compat/pip_compat.py b/third_party/python/pip_tools/piptools/_compat/pip_compat.py
new file mode 100644
index 0000000000..6cd24a0ff9
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/_compat/pip_compat.py
@@ -0,0 +1,18 @@
+# -*- coding=utf-8 -*-
+from __future__ import absolute_import
+
+import pip
+from pip._internal.req import parse_requirements as _parse_requirements
+from pip._internal.req.constructors import install_req_from_parsed_requirement
+from pip._vendor.packaging.version import parse as parse_version
+
+PIP_VERSION = tuple(map(int, parse_version(pip.__version__).base_version.split(".")))
+
+
+def parse_requirements(
+ filename, session, finder=None, options=None, constraint=False, isolated=False
+):
+ for parsed_req in _parse_requirements(
+ filename, session, finder=finder, options=options, constraint=constraint
+ ):
+ yield install_req_from_parsed_requirement(parsed_req, isolated=isolated)
diff --git a/third_party/python/pip_tools/piptools/_compat/tempfile.py b/third_party/python/pip_tools/piptools/_compat/tempfile.py
new file mode 100644
index 0000000000..dc7e9ef997
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/_compat/tempfile.py
@@ -0,0 +1,88 @@
+# coding: utf-8
+from __future__ import absolute_import, division, print_function
+
+import os as _os
+import sys as _sys
+import warnings as _warnings
+from tempfile import mkdtemp
+
+
+class TemporaryDirectory(object):
+ """Create and return a temporary directory. This has the same
+ behavior as mkdtemp but can be used as a context manager. For
+ example:
+
+ with TemporaryDirectory() as tmpdir:
+ ...
+
+ Upon exiting the context, the directory and everything contained
+ in it are removed.
+ """
+
+ def __init__(self, suffix="", prefix="tmp", dir=None):
+ self._closed = False
+ self.name = None # Handle mkdtemp raising an exception
+ self.name = mkdtemp(suffix, prefix, dir)
+
+ def __repr__(self):
+ return "<{} {!r}>".format(self.__class__.__name__, self.name)
+
+ def __enter__(self):
+ return self.name
+
+ def cleanup(self):
+ if self.name and not self._closed:
+ try:
+ self._rmtree(self.name)
+ except (TypeError, AttributeError) as ex:
+ # Issue #10188: Emit a warning on stderr
+ # if the directory could not be cleaned
+ # up due to missing globals
+ if "None" not in str(ex):
+ raise
+ print(
+ "ERROR: {!r} while cleaning up {!r}".format(ex, self),
+ file=_sys.stderr,
+ )
+ return
+ self._closed = True
+
+ def __exit__(self, exc, value, tb):
+ self.cleanup()
+
+ def __del__(self):
+ # Issue a ResourceWarning if implicit cleanup needed
+ self.cleanup()
+
+ # XXX (ncoghlan): The following code attempts to make
+ # this class tolerant of the module nulling out process
+ # that happens during CPython interpreter shutdown
+ # Alas, it doesn't actually manage it. See issue #10188
+ _listdir = staticmethod(_os.listdir)
+ _path_join = staticmethod(_os.path.join)
+ _isdir = staticmethod(_os.path.isdir)
+ _islink = staticmethod(_os.path.islink)
+ _remove = staticmethod(_os.remove)
+ _rmdir = staticmethod(_os.rmdir)
+ _warn = _warnings.warn
+
+ def _rmtree(self, path):
+ # Essentially a stripped down version of shutil.rmtree. We can't
+ # use globals because they may be None'ed out at shutdown.
+ for name in self._listdir(path):
+ fullname = self._path_join(path, name)
+ try:
+ isdir = self._isdir(fullname) and not self._islink(fullname)
+ except OSError:
+ isdir = False
+ if isdir:
+ self._rmtree(fullname)
+ else:
+ try:
+ self._remove(fullname)
+ except OSError:
+ pass
+ try:
+ self._rmdir(path)
+ except OSError:
+ pass
diff --git a/third_party/python/pip_tools/piptools/cache.py b/third_party/python/pip_tools/piptools/cache.py
new file mode 100644
index 0000000000..301d38bd52
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/cache.py
@@ -0,0 +1,173 @@
+# coding: utf-8
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import errno
+import json
+import os
+import platform
+import sys
+
+from pip._vendor.packaging.requirements import Requirement
+
+from ._compat import makedirs
+from .exceptions import PipToolsError
+from .utils import as_tuple, key_from_req, lookup_table
+
+_PEP425_PY_TAGS = {"cpython": "cp", "pypy": "pp", "ironpython": "ip", "jython": "jy"}
+
+
+def _implementation_name():
+ """similar to PEP 425, however the minor version is separated from the
+ major to differentation "3.10" and "31.0".
+ """
+ implementation_name = platform.python_implementation().lower()
+ implementation = _PEP425_PY_TAGS.get(implementation_name, "??")
+ return "{}{}.{}".format(implementation, *sys.version_info)
+
+
+class CorruptCacheError(PipToolsError):
+ def __init__(self, path):
+ self.path = path
+
+ def __str__(self):
+ lines = [
+ "The dependency cache seems to have been corrupted.",
+ "Inspect, or delete, the following file:",
+ " {}".format(self.path),
+ ]
+ return os.linesep.join(lines)
+
+
+def read_cache_file(cache_file_path):
+ with open(cache_file_path, "r") as cache_file:
+ try:
+ doc = json.load(cache_file)
+ except ValueError:
+ raise CorruptCacheError(cache_file_path)
+
+ # Check version and load the contents
+ if doc["__format__"] != 1:
+ raise ValueError("Unknown cache file format")
+ return doc["dependencies"]
+
+
+class DependencyCache(object):
+ """
+ Creates a new persistent dependency cache for the current Python version.
+ The cache file is written to the appropriate user cache dir for the
+ current platform, i.e.
+
+ ~/.cache/pip-tools/depcache-pyX.Y.json
+
+ Where py indicates the Python implementation.
+ Where X.Y indicates the Python version.
+ """
+
+ def __init__(self, cache_dir):
+ makedirs(cache_dir, exist_ok=True)
+ cache_filename = "depcache-{}.json".format(_implementation_name())
+
+ self._cache_file = os.path.join(cache_dir, cache_filename)
+ self._cache = None
+
+ @property
+ def cache(self):
+ """
+ The dictionary that is the actual in-memory cache. This property
+ lazily loads the cache from disk.
+ """
+ if self._cache is None:
+ self.read_cache()
+ return self._cache
+
+ def as_cache_key(self, ireq):
+ """
+ Given a requirement, return its cache key. This behavior is a little weird
+ in order to allow backwards compatibility with cache files. For a requirement
+ without extras, this will return, for example:
+
+ ("ipython", "2.1.0")
+
+ For a requirement with extras, the extras will be comma-separated and appended
+ to the version, inside brackets, like so:
+
+ ("ipython", "2.1.0[nbconvert,notebook]")
+ """
+ name, version, extras = as_tuple(ireq)
+ if not extras:
+ extras_string = ""
+ else:
+ extras_string = "[{}]".format(",".join(extras))
+ return name, "{}{}".format(version, extras_string)
+
+ def read_cache(self):
+ """Reads the cached contents into memory."""
+ try:
+ self._cache = read_cache_file(self._cache_file)
+ except IOError as e:
+ if e.errno != errno.ENOENT:
+ raise
+ self._cache = {}
+
+ def write_cache(self):
+ """Writes the cache to disk as JSON."""
+ doc = {"__format__": 1, "dependencies": self._cache}
+ with open(self._cache_file, "w") as f:
+ json.dump(doc, f, sort_keys=True)
+
+ def clear(self):
+ self._cache = {}
+ self.write_cache()
+
+ def __contains__(self, ireq):
+ pkgname, pkgversion_and_extras = self.as_cache_key(ireq)
+ return pkgversion_and_extras in self.cache.get(pkgname, {})
+
+ def __getitem__(self, ireq):
+ pkgname, pkgversion_and_extras = self.as_cache_key(ireq)
+ return self.cache[pkgname][pkgversion_and_extras]
+
+ def __setitem__(self, ireq, values):
+ pkgname, pkgversion_and_extras = self.as_cache_key(ireq)
+ self.cache.setdefault(pkgname, {})
+ self.cache[pkgname][pkgversion_and_extras] = values
+ self.write_cache()
+
+ def reverse_dependencies(self, ireqs):
+ """
+ Returns a lookup table of reverse dependencies for all the given ireqs.
+
+ Since this is all static, it only works if the dependency cache
+ contains the complete data, otherwise you end up with a partial view.
+ This is typically no problem if you use this function after the entire
+ dependency tree is resolved.
+ """
+ ireqs_as_cache_values = [self.as_cache_key(ireq) for ireq in ireqs]
+ return self._reverse_dependencies(ireqs_as_cache_values)
+
+ def _reverse_dependencies(self, cache_keys):
+ """
+ Returns a lookup table of reverse dependencies for all the given cache keys.
+
+ Example input:
+
+ [('pep8', '1.5.7'),
+ ('flake8', '2.4.0'),
+ ('mccabe', '0.3'),
+ ('pyflakes', '0.8.1')]
+
+ Example output:
+
+ {'pep8': ['flake8'],
+ 'flake8': [],
+ 'mccabe': ['flake8'],
+ 'pyflakes': ['flake8']}
+
+ """
+ # First, collect all the dependencies into a sequence of (parent, child)
+ # tuples, like [('flake8', 'pep8'), ('flake8', 'mccabe'), ...]
+ return lookup_table(
+ (key_from_req(Requirement(dep_name)), name)
+ for name, version_and_extras in cache_keys
+ for dep_name in self.cache[name][version_and_extras]
+ )
diff --git a/third_party/python/pip_tools/piptools/click.py b/third_party/python/pip_tools/piptools/click.py
new file mode 100644
index 0000000000..86f1612c6a
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/click.py
@@ -0,0 +1,6 @@
+from __future__ import absolute_import
+
+import click
+from click import * # noqa
+
+click.disable_unicode_literals_warning = True
diff --git a/third_party/python/pip_tools/piptools/exceptions.py b/third_party/python/pip_tools/piptools/exceptions.py
new file mode 100644
index 0000000000..5278972741
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/exceptions.py
@@ -0,0 +1,66 @@
+from pip._internal.utils.misc import redact_auth_from_url
+
+
+class PipToolsError(Exception):
+ pass
+
+
+class NoCandidateFound(PipToolsError):
+ def __init__(self, ireq, candidates_tried, finder):
+ self.ireq = ireq
+ self.candidates_tried = candidates_tried
+ self.finder = finder
+
+ def __str__(self):
+ versions = []
+ pre_versions = []
+
+ for candidate in sorted(self.candidates_tried):
+ version = str(candidate.version)
+ if candidate.version.is_prerelease:
+ pre_versions.append(version)
+ else:
+ versions.append(version)
+
+ lines = ["Could not find a version that matches {}".format(self.ireq)]
+
+ if versions:
+ lines.append("Tried: {}".format(", ".join(versions)))
+
+ if pre_versions:
+ if self.finder.allow_all_prereleases:
+ line = "Tried"
+ else:
+ line = "Skipped"
+
+ line += " pre-versions: {}".format(", ".join(pre_versions))
+ lines.append(line)
+
+ if versions or pre_versions:
+ lines.append(
+ "There are incompatible versions in the resolved dependencies:"
+ )
+ source_ireqs = getattr(self.ireq, "_source_ireqs", [])
+ lines.extend(" {}".format(ireq) for ireq in source_ireqs)
+ else:
+ redacted_urls = tuple(
+ redact_auth_from_url(url) for url in self.finder.index_urls
+ )
+ lines.append("No versions found")
+ lines.append(
+ "{} {} reachable?".format(
+ "Were" if len(redacted_urls) > 1 else "Was",
+ " or ".join(redacted_urls),
+ )
+ )
+ return "\n".join(lines)
+
+
+class IncompatibleRequirements(PipToolsError):
+ def __init__(self, ireq_a, ireq_b):
+ self.ireq_a = ireq_a
+ self.ireq_b = ireq_b
+
+ def __str__(self):
+ message = "Incompatible requirements found: {} and {}"
+ return message.format(self.ireq_a, self.ireq_b)
diff --git a/third_party/python/pip_tools/piptools/locations.py b/third_party/python/pip_tools/piptools/locations.py
new file mode 100644
index 0000000000..9ca0ffe436
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/locations.py
@@ -0,0 +1,25 @@
+import os
+from shutil import rmtree
+
+from pip._internal.utils.appdirs import user_cache_dir
+
+from .click import secho
+
+# The user_cache_dir helper comes straight from pip itself
+CACHE_DIR = user_cache_dir("pip-tools")
+
+# NOTE
+# We used to store the cache dir under ~/.pip-tools, which is not the
+# preferred place to store caches for any platform. This has been addressed
+# in pip-tools==1.0.5, but to be good citizens, we point this out explicitly
+# to the user when this directory is still found.
+LEGACY_CACHE_DIR = os.path.expanduser("~/.pip-tools")
+
+if os.path.exists(LEGACY_CACHE_DIR):
+ secho(
+ "Removing old cache dir {} (new cache dir is {})".format(
+ LEGACY_CACHE_DIR, CACHE_DIR
+ ),
+ fg="yellow",
+ )
+ rmtree(LEGACY_CACHE_DIR)
diff --git a/third_party/python/pip_tools/piptools/logging.py b/third_party/python/pip_tools/piptools/logging.py
new file mode 100644
index 0000000000..dcf068f7a2
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/logging.py
@@ -0,0 +1,62 @@
+# coding: utf-8
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import contextlib
+import logging
+import sys
+
+from . import click
+
+# Initialise the builtin logging module for other component using it.
+# Ex: pip
+logging.basicConfig()
+
+
+class LogContext(object):
+ stream = sys.stderr
+
+ def __init__(self, verbosity=0, indent_width=2):
+ self.verbosity = verbosity
+ self.current_indent = 0
+ self._indent_width = indent_width
+
+ def log(self, message, *args, **kwargs):
+ kwargs.setdefault("err", True)
+ prefix = " " * self.current_indent
+ click.secho(prefix + message, *args, **kwargs)
+
+ def debug(self, *args, **kwargs):
+ if self.verbosity >= 1:
+ self.log(*args, **kwargs)
+
+ def info(self, *args, **kwargs):
+ if self.verbosity >= 0:
+ self.log(*args, **kwargs)
+
+ def warning(self, *args, **kwargs):
+ kwargs.setdefault("fg", "yellow")
+ self.log(*args, **kwargs)
+
+ def error(self, *args, **kwargs):
+ kwargs.setdefault("fg", "red")
+ self.log(*args, **kwargs)
+
+ def _indent(self):
+ self.current_indent += self._indent_width
+
+ def _dedent(self):
+ self.current_indent -= self._indent_width
+
+ @contextlib.contextmanager
+ def indentation(self):
+ """
+ Increase indentation.
+ """
+ self._indent()
+ try:
+ yield
+ finally:
+ self._dedent()
+
+
+log = LogContext()
diff --git a/third_party/python/pip_tools/piptools/repositories/__init__.py b/third_party/python/pip_tools/piptools/repositories/__init__.py
new file mode 100644
index 0000000000..ce5142e8c6
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/repositories/__init__.py
@@ -0,0 +1,3 @@
+# flake8: noqa
+from .local import LocalRequirementsRepository
+from .pypi import PyPIRepository
diff --git a/third_party/python/pip_tools/piptools/repositories/base.py b/third_party/python/pip_tools/piptools/repositories/base.py
new file mode 100644
index 0000000000..54849cb7f8
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/repositories/base.py
@@ -0,0 +1,57 @@
+# coding: utf-8
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+from abc import ABCMeta, abstractmethod
+from contextlib import contextmanager
+
+from pip._vendor.six import add_metaclass
+
+
+@add_metaclass(ABCMeta)
+class BaseRepository(object):
+ def clear_caches(self):
+ """Should clear any caches used by the implementation."""
+
+ @abstractmethod
+ @contextmanager
+ def freshen_build_caches(self):
+ """Should start with fresh build/source caches."""
+
+ @abstractmethod
+ def find_best_match(self, ireq):
+ """
+ Return a Version object that indicates the best match for the given
+ InstallRequirement according to the repository.
+ """
+
+ @abstractmethod
+ def get_dependencies(self, ireq):
+ """
+ Given a pinned, URL, or editable InstallRequirement, returns a set of
+ dependencies (also InstallRequirements, but not necessarily pinned).
+ They indicate the secondary dependencies for the given requirement.
+ """
+
+ @abstractmethod
+ def get_hashes(self, ireq):
+ """
+ Given a pinned InstallRequire, returns a set of hashes that represent
+ all of the files for a given requirement. It is not acceptable for an
+ editable or unpinned requirement to be passed to this function.
+ """
+
+ @abstractmethod
+ @contextmanager
+ def allow_all_wheels(self):
+ """
+ Monkey patches pip.Wheel to allow wheels from all platforms and Python versions.
+ """
+
+ @abstractmethod
+ def copy_ireq_dependencies(self, source, dest):
+ """
+ Notifies the repository that `dest` is a copy of `source`, and so it
+ has the same dependencies. Otherwise, once we prepare an ireq to assign
+ it its name, we would lose track of those dependencies on combining
+ that ireq with others.
+ """
diff --git a/third_party/python/pip_tools/piptools/repositories/local.py b/third_party/python/pip_tools/piptools/repositories/local.py
new file mode 100644
index 0000000000..f185f35c3c
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/repositories/local.py
@@ -0,0 +1,97 @@
+# coding: utf-8
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+from contextlib import contextmanager
+
+from pip._internal.utils.hashes import FAVORITE_HASH
+
+from piptools.utils import as_tuple, key_from_ireq, make_install_requirement
+
+from .base import BaseRepository
+
+
+def ireq_satisfied_by_existing_pin(ireq, existing_pin):
+ """
+ Return True if the given InstallationRequirement is satisfied by the
+ previously encountered version pin.
+ """
+ version = next(iter(existing_pin.req.specifier)).version
+ return ireq.req.specifier.contains(
+ version, prereleases=existing_pin.req.specifier.prereleases
+ )
+
+
+class LocalRequirementsRepository(BaseRepository):
+ """
+ The LocalRequirementsRepository proxied the _real_ repository by first
+ checking if a requirement can be satisfied by existing pins (i.e. the
+ result of a previous compile step).
+
+ In effect, if a requirement can be satisfied with a version pinned in the
+ requirements file, we prefer that version over the best match found in
+ PyPI. This keeps updates to the requirements.txt down to a minimum.
+ """
+
+ def __init__(self, existing_pins, proxied_repository, reuse_hashes=True):
+ self._reuse_hashes = reuse_hashes
+ self.repository = proxied_repository
+ self.existing_pins = existing_pins
+
+ @property
+ def options(self):
+ return self.repository.options
+
+ @property
+ def finder(self):
+ return self.repository.finder
+
+ @property
+ def session(self):
+ return self.repository.session
+
+ @property
+ def DEFAULT_INDEX_URL(self):
+ return self.repository.DEFAULT_INDEX_URL
+
+ def clear_caches(self):
+ self.repository.clear_caches()
+
+ @contextmanager
+ def freshen_build_caches(self):
+ with self.repository.freshen_build_caches():
+ yield
+
+ def find_best_match(self, ireq, prereleases=None):
+ key = key_from_ireq(ireq)
+ existing_pin = self.existing_pins.get(key)
+ if existing_pin and ireq_satisfied_by_existing_pin(ireq, existing_pin):
+ project, version, _ = as_tuple(existing_pin)
+ return make_install_requirement(
+ project, version, ireq.extras, constraint=ireq.constraint
+ )
+ else:
+ return self.repository.find_best_match(ireq, prereleases)
+
+ def get_dependencies(self, ireq):
+ return self.repository.get_dependencies(ireq)
+
+ def get_hashes(self, ireq):
+ existing_pin = self._reuse_hashes and self.existing_pins.get(
+ key_from_ireq(ireq)
+ )
+ if existing_pin and ireq_satisfied_by_existing_pin(ireq, existing_pin):
+ hashes = existing_pin.hash_options
+ hexdigests = hashes.get(FAVORITE_HASH)
+ if hexdigests:
+ return {
+ ":".join([FAVORITE_HASH, hexdigest]) for hexdigest in hexdigests
+ }
+ return self.repository.get_hashes(ireq)
+
+ @contextmanager
+ def allow_all_wheels(self):
+ with self.repository.allow_all_wheels():
+ yield
+
+ def copy_ireq_dependencies(self, source, dest):
+ self.repository.copy_ireq_dependencies(source, dest)
diff --git a/third_party/python/pip_tools/piptools/repositories/pypi.py b/third_party/python/pip_tools/piptools/repositories/pypi.py
new file mode 100644
index 0000000000..7a988bfc1f
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/repositories/pypi.py
@@ -0,0 +1,531 @@
+# coding: utf-8
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import collections
+import hashlib
+import itertools
+import logging
+import os
+from contextlib import contextmanager
+from shutil import rmtree
+
+from pip._internal.cache import WheelCache
+from pip._internal.cli.progress_bars import BAR_TYPES
+from pip._internal.commands import create_command
+from pip._internal.models.index import PackageIndex, PyPI
+from pip._internal.models.link import Link
+from pip._internal.models.wheel import Wheel
+from pip._internal.req import RequirementSet
+from pip._internal.req.req_tracker import get_requirement_tracker
+from pip._internal.utils.hashes import FAVORITE_HASH
+from pip._internal.utils.logging import indent_log, setup_logging
+from pip._internal.utils.misc import normalize_path
+from pip._internal.utils.temp_dir import TempDirectory, global_tempdir_manager
+from pip._internal.utils.urls import path_to_url, url_to_path
+from pip._vendor.requests import RequestException
+
+from .._compat import PIP_VERSION, TemporaryDirectory, contextlib, makedirs
+from ..click import progressbar
+from ..exceptions import NoCandidateFound
+from ..logging import log
+from ..utils import (
+ as_tuple,
+ fs_str,
+ is_pinned_requirement,
+ is_url_requirement,
+ lookup_table,
+ make_install_requirement,
+)
+from .base import BaseRepository
+
+FILE_CHUNK_SIZE = 4096
+FileStream = collections.namedtuple("FileStream", "stream size")
+
+
+class PyPIRepository(BaseRepository):
+ DEFAULT_INDEX_URL = PyPI.simple_url
+ HASHABLE_PACKAGE_TYPES = {"bdist_wheel", "sdist"}
+
+ """
+ The PyPIRepository will use the provided Finder instance to lookup
+ packages. Typically, it looks up packages on PyPI (the default implicit
+ config), but any other PyPI mirror can be used if index_urls is
+ changed/configured on the Finder.
+ """
+
+ def __init__(self, pip_args, cache_dir):
+ # Use pip's parser for pip.conf management and defaults.
+ # General options (find_links, index_url, extra_index_url, trusted_host,
+ # and pre) are deferred to pip.
+ self.command = create_command("install")
+ extra_pip_args = (
+ []
+ if PIP_VERSION[:2] <= (20, 2)
+ else ["--use-deprecated", "legacy-resolver"]
+ )
+ self.options, _ = self.command.parse_args(pip_args + extra_pip_args)
+ if self.options.cache_dir:
+ self.options.cache_dir = normalize_path(self.options.cache_dir)
+
+ self.options.require_hashes = False
+ self.options.ignore_dependencies = False
+
+ self.session = self.command._build_session(self.options)
+ self.finder = self.command._build_package_finder(
+ options=self.options, session=self.session
+ )
+
+ # Caches
+ # stores project_name => InstallationCandidate mappings for all
+ # versions reported by PyPI, so we only have to ask once for each
+ # project
+ self._available_candidates_cache = {}
+
+ # stores InstallRequirement => list(InstallRequirement) mappings
+ # of all secondary dependencies for the given requirement, so we
+ # only have to go to disk once for each requirement
+ self._dependencies_cache = {}
+
+ # Setup file paths
+ self._build_dir = None
+ self._source_dir = None
+ self._cache_dir = normalize_path(cache_dir)
+ self._download_dir = fs_str(os.path.join(self._cache_dir, "pkgs"))
+ if PIP_VERSION[:2] <= (20, 2):
+ self._wheel_download_dir = fs_str(os.path.join(self._cache_dir, "wheels"))
+
+ self._setup_logging()
+
+ @contextmanager
+ def freshen_build_caches(self):
+ """
+ Start with fresh build/source caches. Will remove any old build
+ caches from disk automatically.
+ """
+ self._build_dir = TemporaryDirectory(fs_str("build"))
+ self._source_dir = TemporaryDirectory(fs_str("source"))
+ try:
+ yield
+ finally:
+ self._build_dir.cleanup()
+ self._build_dir = None
+ self._source_dir.cleanup()
+ self._source_dir = None
+
+ @property
+ def build_dir(self):
+ return self._build_dir.name if self._build_dir else None
+
+ @property
+ def source_dir(self):
+ return self._source_dir.name if self._source_dir else None
+
+ def clear_caches(self):
+ rmtree(self._download_dir, ignore_errors=True)
+ if PIP_VERSION[:2] <= (20, 2):
+ rmtree(self._wheel_download_dir, ignore_errors=True)
+
+ def find_all_candidates(self, req_name):
+ if req_name not in self._available_candidates_cache:
+ candidates = self.finder.find_all_candidates(req_name)
+ self._available_candidates_cache[req_name] = candidates
+ return self._available_candidates_cache[req_name]
+
+ def find_best_match(self, ireq, prereleases=None):
+ """
+ Returns a Version object that indicates the best match for the given
+ InstallRequirement according to the external repository.
+ """
+ if ireq.editable or is_url_requirement(ireq):
+ return ireq # return itself as the best match
+
+ all_candidates = self.find_all_candidates(ireq.name)
+ candidates_by_version = lookup_table(all_candidates, key=lambda c: c.version)
+ matching_versions = ireq.specifier.filter(
+ (candidate.version for candidate in all_candidates), prereleases=prereleases
+ )
+
+ matching_candidates = list(
+ itertools.chain.from_iterable(
+ candidates_by_version[ver] for ver in matching_versions
+ )
+ )
+ if not matching_candidates:
+ raise NoCandidateFound(ireq, all_candidates, self.finder)
+
+ evaluator = self.finder.make_candidate_evaluator(ireq.name)
+ best_candidate_result = evaluator.compute_best_candidate(matching_candidates)
+ best_candidate = best_candidate_result.best_candidate
+
+ # Turn the candidate into a pinned InstallRequirement
+ return make_install_requirement(
+ best_candidate.name,
+ best_candidate.version,
+ ireq.extras,
+ constraint=ireq.constraint,
+ )
+
+ def resolve_reqs(self, download_dir, ireq, wheel_cache):
+ with get_requirement_tracker() as req_tracker, TempDirectory(
+ kind="resolver"
+ ) as temp_dir, indent_log():
+ preparer_kwargs = dict(
+ temp_build_dir=temp_dir,
+ options=self.options,
+ req_tracker=req_tracker,
+ session=self.session,
+ finder=self.finder,
+ use_user_site=False,
+ download_dir=download_dir,
+ )
+ if PIP_VERSION[:2] <= (20, 2):
+ preparer_kwargs["wheel_download_dir"] = self._wheel_download_dir
+ preparer = self.command.make_requirement_preparer(**preparer_kwargs)
+
+ reqset = RequirementSet()
+ if PIP_VERSION[:2] <= (20, 1):
+ ireq.is_direct = True
+ else:
+ ireq.user_supplied = True
+ reqset.add_requirement(ireq)
+
+ resolver = self.command.make_resolver(
+ preparer=preparer,
+ finder=self.finder,
+ options=self.options,
+ wheel_cache=wheel_cache,
+ use_user_site=False,
+ ignore_installed=True,
+ ignore_requires_python=False,
+ force_reinstall=False,
+ upgrade_strategy="to-satisfy-only",
+ )
+ results = resolver._resolve_one(reqset, ireq)
+ if not ireq.prepared:
+ # If still not prepared, e.g. a constraint, do enough to assign
+ # the ireq a name:
+ if PIP_VERSION[:2] <= (20, 2):
+ resolver._get_abstract_dist_for(ireq)
+ else:
+ resolver._get_dist_for(ireq)
+
+ return set(results)
+
+ def get_dependencies(self, ireq):
+ """
+ Given a pinned, URL, or editable InstallRequirement, returns a set of
+ dependencies (also InstallRequirements, but not necessarily pinned).
+ They indicate the secondary dependencies for the given requirement.
+ """
+ if not (
+ ireq.editable or is_url_requirement(ireq) or is_pinned_requirement(ireq)
+ ):
+ raise TypeError(
+ "Expected url, pinned or editable InstallRequirement, got {}".format(
+ ireq
+ )
+ )
+
+ if ireq not in self._dependencies_cache:
+ if ireq.editable and (ireq.source_dir and os.path.exists(ireq.source_dir)):
+ # No download_dir for locally available editable requirements.
+ # If a download_dir is passed, pip will unnecessarily archive
+ # the entire source directory
+ download_dir = None
+ elif ireq.link and ireq.link.is_vcs:
+ # No download_dir for VCS sources. This also works around pip
+ # using git-checkout-index, which gets rid of the .git dir.
+ download_dir = None
+ else:
+ download_dir = self._get_download_path(ireq)
+ makedirs(download_dir, exist_ok=True)
+ if PIP_VERSION[:2] <= (20, 2):
+ makedirs(self._wheel_download_dir, exist_ok=True)
+
+ with global_tempdir_manager():
+ wheel_cache = WheelCache(self._cache_dir, self.options.format_control)
+ self._dependencies_cache[ireq] = self.resolve_reqs(
+ download_dir, ireq, wheel_cache
+ )
+
+ return self._dependencies_cache[ireq]
+
+ def copy_ireq_dependencies(self, source, dest):
+ try:
+ self._dependencies_cache[dest] = self._dependencies_cache[source]
+ except KeyError:
+ # `source` may not be in cache yet.
+ pass
+
+ def _get_project(self, ireq):
+ """
+ Return a dict of a project info from PyPI JSON API for a given
+ InstallRequirement. Return None on HTTP/JSON error or if a package
+ is not found on PyPI server.
+
+ API reference: https://warehouse.readthedocs.io/api-reference/json/
+ """
+ package_indexes = (
+ PackageIndex(url=index_url, file_storage_domain="")
+ for index_url in self.finder.search_scope.index_urls
+ )
+ for package_index in package_indexes:
+ url = "{url}/{name}/json".format(url=package_index.pypi_url, name=ireq.name)
+ try:
+ response = self.session.get(url)
+ except RequestException as e:
+ log.debug(
+ "Fetch package info from PyPI failed: {url}: {e}".format(
+ url=url, e=e
+ )
+ )
+ continue
+
+ # Skip this PyPI server, because there is no package
+ # or JSON API might be not supported
+ if response.status_code == 404:
+ continue
+
+ try:
+ data = response.json()
+ except ValueError as e:
+ log.debug(
+ "Cannot parse JSON response from PyPI: {url}: {e}".format(
+ url=url, e=e
+ )
+ )
+ continue
+ return data
+ return None
+
+ def _get_download_path(self, ireq):
+ """
+ Determine the download dir location in a way which avoids name
+ collisions.
+ """
+ if ireq.link:
+ salt = hashlib.sha224(ireq.link.url_without_fragment.encode()).hexdigest()
+ # Nest directories to avoid running out of top level dirs on some FS
+ # (see pypi _get_cache_path_parts, which inspired this)
+ salt = [salt[:2], salt[2:4], salt[4:6], salt[6:]]
+ return os.path.join(self._download_dir, *salt)
+ else:
+ return self._download_dir
+
+ def get_hashes(self, ireq):
+ """
+ Given an InstallRequirement, return a set of hashes that represent all
+ of the files for a given requirement. Unhashable requirements return an
+ empty set. Unpinned requirements raise a TypeError.
+ """
+
+ if ireq.link:
+ link = ireq.link
+
+ if link.is_vcs or (link.is_file and link.is_existing_dir()):
+ # Return empty set for unhashable requirements.
+ # Unhashable logic modeled on pip's
+ # RequirementPreparer.prepare_linked_requirement
+ return set()
+
+ if is_url_requirement(ireq):
+ # Directly hash URL requirements.
+ # URL requirements may have been previously downloaded and cached
+ # locally by self.resolve_reqs()
+ cached_path = os.path.join(self._get_download_path(ireq), link.filename)
+ if os.path.exists(cached_path):
+ cached_link = Link(path_to_url(cached_path))
+ else:
+ cached_link = link
+ return {self._get_file_hash(cached_link)}
+
+ if not is_pinned_requirement(ireq):
+ raise TypeError("Expected pinned requirement, got {}".format(ireq))
+
+ log.debug(ireq.name)
+
+ with log.indentation():
+ hashes = self._get_hashes_from_pypi(ireq)
+ if hashes is None:
+ log.log("Couldn't get hashes from PyPI, fallback to hashing files")
+ return self._get_hashes_from_files(ireq)
+
+ return hashes
+
+ def _get_hashes_from_pypi(self, ireq):
+ """
+ Return a set of hashes from PyPI JSON API for a given InstallRequirement.
+ Return None if fetching data is failed or missing digests.
+ """
+ project = self._get_project(ireq)
+ if project is None:
+ return None
+
+ _, version, _ = as_tuple(ireq)
+
+ try:
+ release_files = project["releases"][version]
+ except KeyError:
+ log.debug("Missing release files on PyPI")
+ return None
+
+ try:
+ hashes = {
+ "{algo}:{digest}".format(
+ algo=FAVORITE_HASH, digest=file_["digests"][FAVORITE_HASH]
+ )
+ for file_ in release_files
+ if file_["packagetype"] in self.HASHABLE_PACKAGE_TYPES
+ }
+ except KeyError:
+ log.debug("Missing digests of release files on PyPI")
+ return None
+
+ return hashes
+
+ def _get_hashes_from_files(self, ireq):
+ """
+ Return a set of hashes for all release files of a given InstallRequirement.
+ """
+ # We need to get all of the candidates that match our current version
+ # pin, these will represent all of the files that could possibly
+ # satisfy this constraint.
+ all_candidates = self.find_all_candidates(ireq.name)
+ candidates_by_version = lookup_table(all_candidates, key=lambda c: c.version)
+ matching_versions = list(
+ ireq.specifier.filter((candidate.version for candidate in all_candidates))
+ )
+ matching_candidates = candidates_by_version[matching_versions[0]]
+
+ return {
+ self._get_file_hash(candidate.link) for candidate in matching_candidates
+ }
+
+ def _get_file_hash(self, link):
+ log.debug("Hashing {}".format(link.show_url))
+ h = hashlib.new(FAVORITE_HASH)
+ with open_local_or_remote_file(link, self.session) as f:
+ # Chunks to iterate
+ chunks = iter(lambda: f.stream.read(FILE_CHUNK_SIZE), b"")
+
+ # Choose a context manager depending on verbosity
+ if log.verbosity >= 1:
+ iter_length = f.size / FILE_CHUNK_SIZE if f.size else None
+ bar_template = "{prefix} |%(bar)s| %(info)s".format(
+ prefix=" " * log.current_indent
+ )
+ context_manager = progressbar(
+ chunks,
+ length=iter_length,
+ # Make it look like default pip progress bar
+ fill_char="█",
+ empty_char=" ",
+ bar_template=bar_template,
+ width=32,
+ )
+ else:
+ context_manager = contextlib.nullcontext(chunks)
+
+ # Iterate over the chosen context manager
+ with context_manager as bar:
+ for chunk in bar:
+ h.update(chunk)
+ return ":".join([FAVORITE_HASH, h.hexdigest()])
+
+ @contextmanager
+ def allow_all_wheels(self):
+ """
+ Monkey patches pip.Wheel to allow wheels from all platforms and Python versions.
+
+ This also saves the candidate cache and set a new one, or else the results from
+ the previous non-patched calls will interfere.
+ """
+
+ def _wheel_supported(self, tags=None):
+ # Ignore current platform. Support everything.
+ return True
+
+ def _wheel_support_index_min(self, tags=None):
+ # All wheels are equal priority for sorting.
+ return 0
+
+ original_wheel_supported = Wheel.supported
+ original_support_index_min = Wheel.support_index_min
+ original_cache = self._available_candidates_cache
+
+ Wheel.supported = _wheel_supported
+ Wheel.support_index_min = _wheel_support_index_min
+ self._available_candidates_cache = {}
+
+ try:
+ yield
+ finally:
+ Wheel.supported = original_wheel_supported
+ Wheel.support_index_min = original_support_index_min
+ self._available_candidates_cache = original_cache
+
+ def _setup_logging(self):
+ """
+ Setup pip's logger. Ensure pip is verbose same as pip-tools and sync
+ pip's log stream with LogContext.stream.
+ """
+ # Default pip's logger is noisy, so decrease it's verbosity
+ setup_logging(
+ verbosity=log.verbosity - 1,
+ no_color=self.options.no_color,
+ user_log_file=self.options.log,
+ )
+
+ # Sync pip's console handler stream with LogContext.stream
+ logger = logging.getLogger()
+ for handler in logger.handlers:
+ if handler.name == "console": # pragma: no branch
+ handler.stream = log.stream
+ break
+ else: # pragma: no cover
+ # There is always a console handler. This warning would be a signal that
+ # this block should be removed/revisited, because of pip possibly
+ # refactored-out logging config.
+ log.warning("Couldn't find a 'console' logging handler")
+
+ # Sync pip's progress bars stream with LogContext.stream
+ for bar_cls in itertools.chain(*BAR_TYPES.values()):
+ bar_cls.file = log.stream
+
+
+@contextmanager
+def open_local_or_remote_file(link, session):
+ """
+ Open local or remote file for reading.
+
+ :type link: pip.index.Link
+ :type session: requests.Session
+ :raises ValueError: If link points to a local directory.
+ :return: a context manager to a FileStream with the opened file-like object
+ """
+ url = link.url_without_fragment
+
+ if link.is_file:
+ # Local URL
+ local_path = url_to_path(url)
+ if os.path.isdir(local_path):
+ raise ValueError("Cannot open directory for read: {}".format(url))
+ else:
+ st = os.stat(local_path)
+ with open(local_path, "rb") as local_file:
+ yield FileStream(stream=local_file, size=st.st_size)
+ else:
+ # Remote URL
+ headers = {"Accept-Encoding": "identity"}
+ response = session.get(url, headers=headers, stream=True)
+
+ # Content length must be int or None
+ try:
+ content_length = int(response.headers["content-length"])
+ except (ValueError, KeyError, TypeError):
+ content_length = None
+
+ try:
+ yield FileStream(stream=response.raw, size=content_length)
+ finally:
+ response.close()
diff --git a/third_party/python/pip_tools/piptools/resolver.py b/third_party/python/pip_tools/piptools/resolver.py
new file mode 100644
index 0000000000..d46a04a9e3
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/resolver.py
@@ -0,0 +1,405 @@
+# coding: utf-8
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import copy
+from functools import partial
+from itertools import chain, count, groupby
+
+from pip._internal.req.constructors import install_req_from_line
+from pip._internal.req.req_tracker import update_env_context_manager
+
+from . import click
+from .logging import log
+from .utils import (
+ UNSAFE_PACKAGES,
+ format_requirement,
+ format_specifier,
+ is_pinned_requirement,
+ is_url_requirement,
+ key_from_ireq,
+)
+
+green = partial(click.style, fg="green")
+magenta = partial(click.style, fg="magenta")
+
+
+class RequirementSummary(object):
+ """
+ Summary of a requirement's properties for comparison purposes.
+ """
+
+ def __init__(self, ireq):
+ self.req = ireq.req
+ self.key = key_from_ireq(ireq)
+ self.extras = frozenset(ireq.extras)
+ self.specifier = ireq.specifier
+
+ def __eq__(self, other):
+ return (
+ self.key == other.key
+ and self.specifier == other.specifier
+ and self.extras == other.extras
+ )
+
+ def __hash__(self):
+ return hash((self.key, self.specifier, self.extras))
+
+ def __str__(self):
+ return repr((self.key, str(self.specifier), sorted(self.extras)))
+
+
+def combine_install_requirements(repository, ireqs):
+ """
+ Return a single install requirement that reflects a combination of
+ all the inputs.
+ """
+ # We will store the source ireqs in a _source_ireqs attribute;
+ # if any of the inputs have this, then use those sources directly.
+ source_ireqs = []
+ for ireq in ireqs:
+ source_ireqs.extend(getattr(ireq, "_source_ireqs", [ireq]))
+
+ # Optimization. Don't bother with combination logic.
+ if len(source_ireqs) == 1:
+ return source_ireqs[0]
+
+ # deepcopy the accumulator so as to not modify the inputs
+ combined_ireq = copy.deepcopy(source_ireqs[0])
+ repository.copy_ireq_dependencies(source_ireqs[0], combined_ireq)
+
+ for ireq in source_ireqs[1:]:
+ # NOTE we may be losing some info on dropped reqs here
+ combined_ireq.req.specifier &= ireq.req.specifier
+ if combined_ireq.constraint:
+ # We don't find dependencies for constraint ireqs, so copy them
+ # from non-constraints:
+ repository.copy_ireq_dependencies(ireq, combined_ireq)
+ combined_ireq.constraint &= ireq.constraint
+ # Return a sorted, de-duped tuple of extras
+ combined_ireq.extras = tuple(
+ sorted(set(tuple(combined_ireq.extras) + tuple(ireq.extras)))
+ )
+
+ # InstallRequirements objects are assumed to come from only one source, and
+ # so they support only a single comes_from entry. This function breaks this
+ # model. As a workaround, we deterministically choose a single source for
+ # the comes_from entry, and add an extra _source_ireqs attribute to keep
+ # track of multiple sources for use within pip-tools.
+ if len(source_ireqs) > 1:
+ if any(ireq.comes_from is None for ireq in source_ireqs):
+ # None indicates package was directly specified.
+ combined_ireq.comes_from = None
+ else:
+ # Populate the comes_from field from one of the sources.
+ # Requirement input order is not stable, so we need to sort:
+ # We choose the shortest entry in order to keep the printed
+ # representation as concise as possible.
+ combined_ireq.comes_from = min(
+ (ireq.comes_from for ireq in source_ireqs),
+ key=lambda x: (len(str(x)), str(x)),
+ )
+ combined_ireq._source_ireqs = source_ireqs
+ return combined_ireq
+
+
+class Resolver(object):
+ def __init__(
+ self,
+ constraints,
+ repository,
+ cache,
+ prereleases=False,
+ clear_caches=False,
+ allow_unsafe=False,
+ ):
+ """
+ This class resolves a given set of constraints (a collection of
+ InstallRequirement objects) by consulting the given Repository and the
+ DependencyCache.
+ """
+ self.our_constraints = set(constraints)
+ self.their_constraints = set()
+ self.repository = repository
+ self.dependency_cache = cache
+ self.prereleases = prereleases
+ self.clear_caches = clear_caches
+ self.allow_unsafe = allow_unsafe
+ self.unsafe_constraints = set()
+
+ @property
+ def constraints(self):
+ return set(
+ self._group_constraints(chain(self.our_constraints, self.their_constraints))
+ )
+
+ def resolve_hashes(self, ireqs):
+ """
+ Finds acceptable hashes for all of the given InstallRequirements.
+ """
+ log.debug("")
+ log.debug("Generating hashes:")
+ with self.repository.allow_all_wheels(), log.indentation():
+ return {ireq: self.repository.get_hashes(ireq) for ireq in ireqs}
+
+ def resolve(self, max_rounds=10):
+ """
+ Finds concrete package versions for all the given InstallRequirements
+ and their recursive dependencies. The end result is a flat list of
+ (name, version) tuples. (Or an editable package.)
+
+ Resolves constraints one round at a time, until they don't change
+ anymore. Protects against infinite loops by breaking out after a max
+ number rounds.
+ """
+ if self.clear_caches:
+ self.dependency_cache.clear()
+ self.repository.clear_caches()
+
+ # Ignore existing packages
+ # NOTE: str() wrapping necessary for Python 2/3 compat
+ with update_env_context_manager(PIP_EXISTS_ACTION=str("i")):
+ for current_round in count(start=1): # pragma: no branch
+ if current_round > max_rounds:
+ raise RuntimeError(
+ "No stable configuration of concrete packages "
+ "could be found for the given constraints after "
+ "{max_rounds} rounds of resolving.\n"
+ "This is likely a bug.".format(max_rounds=max_rounds)
+ )
+
+ log.debug("")
+ log.debug(magenta("{:^60}".format("ROUND {}".format(current_round))))
+ # If a package version (foo==2.0) was built in a previous round,
+ # and in this round a different version of foo needs to be built
+ # (i.e. foo==1.0), the directory will exist already, which will
+ # cause a pip build failure. The trick is to start with a new
+ # build cache dir for every round, so this can never happen.
+ with self.repository.freshen_build_caches():
+ has_changed, best_matches = self._resolve_one_round()
+ log.debug("-" * 60)
+ log.debug(
+ "Result of round {}: {}".format(
+ current_round,
+ "not stable" if has_changed else "stable, done",
+ )
+ )
+ if not has_changed:
+ break
+
+ # Only include hard requirements and not pip constraints
+ results = {req for req in best_matches if not req.constraint}
+
+ # Filter out unsafe requirements.
+ self.unsafe_constraints = set()
+ if not self.allow_unsafe:
+ # reverse_dependencies is used to filter out packages that are only
+ # required by unsafe packages. This logic is incomplete, as it would
+ # fail to filter sub-sub-dependencies of unsafe packages. None of the
+ # UNSAFE_PACKAGES currently have any dependencies at all (which makes
+ # sense for installation tools) so this seems sufficient.
+ reverse_dependencies = self.reverse_dependencies(results)
+ for req in results.copy():
+ required_by = reverse_dependencies.get(req.name.lower(), [])
+ if req.name in UNSAFE_PACKAGES or (
+ required_by and all(name in UNSAFE_PACKAGES for name in required_by)
+ ):
+ self.unsafe_constraints.add(req)
+ results.remove(req)
+
+ return results
+
+ def _group_constraints(self, constraints):
+ """
+ Groups constraints (remember, InstallRequirements!) by their key name,
+ and combining their SpecifierSets into a single InstallRequirement per
+ package. For example, given the following constraints:
+
+ Django<1.9,>=1.4.2
+ django~=1.5
+ Flask~=0.7
+
+ This will be combined into a single entry per package:
+
+ django~=1.5,<1.9,>=1.4.2
+ flask~=0.7
+
+ """
+ constraints = list(constraints)
+ for ireq in constraints:
+ if ireq.name is None:
+ # get_dependencies has side-effect of assigning name to ireq
+ # (so we can group by the name below).
+ self.repository.get_dependencies(ireq)
+
+ # Sort first by name, i.e. the groupby key. Then within each group,
+ # sort editables first.
+ # This way, we don't bother with combining editables, since the first
+ # ireq will be editable, if one exists.
+ for _, ireqs in groupby(
+ sorted(constraints, key=(lambda x: (key_from_ireq(x), not x.editable))),
+ key=key_from_ireq,
+ ):
+ yield combine_install_requirements(self.repository, ireqs)
+
+ def _resolve_one_round(self):
+ """
+ Resolves one level of the current constraints, by finding the best
+ match for each package in the repository and adding all requirements
+ for those best package versions. Some of these constraints may be new
+ or updated.
+
+ Returns whether new constraints appeared in this round. If no
+ constraints were added or changed, this indicates a stable
+ configuration.
+ """
+ # Sort this list for readability of terminal output
+ constraints = sorted(self.constraints, key=key_from_ireq)
+
+ log.debug("Current constraints:")
+ with log.indentation():
+ for constraint in constraints:
+ log.debug(str(constraint))
+
+ log.debug("")
+ log.debug("Finding the best candidates:")
+ with log.indentation():
+ best_matches = {self.get_best_match(ireq) for ireq in constraints}
+
+ # Find the new set of secondary dependencies
+ log.debug("")
+ log.debug("Finding secondary dependencies:")
+
+ their_constraints = []
+ with log.indentation():
+ for best_match in best_matches:
+ their_constraints.extend(self._iter_dependencies(best_match))
+ # Grouping constraints to make clean diff between rounds
+ theirs = set(self._group_constraints(their_constraints))
+
+ # NOTE: We need to compare RequirementSummary objects, since
+ # InstallRequirement does not define equality
+ diff = {RequirementSummary(t) for t in theirs} - {
+ RequirementSummary(t) for t in self.their_constraints
+ }
+ removed = {RequirementSummary(t) for t in self.their_constraints} - {
+ RequirementSummary(t) for t in theirs
+ }
+
+ has_changed = len(diff) > 0 or len(removed) > 0
+ if has_changed:
+ log.debug("")
+ log.debug("New dependencies found in this round:")
+ with log.indentation():
+ for new_dependency in sorted(diff, key=key_from_ireq):
+ log.debug("adding {}".format(new_dependency))
+ log.debug("Removed dependencies in this round:")
+ with log.indentation():
+ for removed_dependency in sorted(removed, key=key_from_ireq):
+ log.debug("removing {}".format(removed_dependency))
+
+ # Store the last round's results in the their_constraints
+ self.their_constraints = theirs
+ return has_changed, best_matches
+
+ def get_best_match(self, ireq):
+ """
+ Returns a (pinned or editable) InstallRequirement, indicating the best
+ match to use for the given InstallRequirement (in the form of an
+ InstallRequirement).
+
+ Example:
+ Given the constraint Flask>=0.10, may return Flask==0.10.1 at
+ a certain moment in time.
+
+ Pinned requirements will always return themselves, i.e.
+
+ Flask==0.10.1 => Flask==0.10.1
+
+ """
+ if ireq.editable or is_url_requirement(ireq):
+ # NOTE: it's much quicker to immediately return instead of
+ # hitting the index server
+ best_match = ireq
+ elif is_pinned_requirement(ireq):
+ # NOTE: it's much quicker to immediately return instead of
+ # hitting the index server
+ best_match = ireq
+ elif ireq.constraint:
+ # NOTE: This is not a requirement (yet) and does not need
+ # to be resolved
+ best_match = ireq
+ else:
+ best_match = self.repository.find_best_match(
+ ireq, prereleases=self.prereleases
+ )
+
+ # Format the best match
+ log.debug(
+ "found candidate {} (constraint was {})".format(
+ format_requirement(best_match), format_specifier(ireq)
+ )
+ )
+ best_match.comes_from = ireq.comes_from
+ if hasattr(ireq, "_source_ireqs"):
+ best_match._source_ireqs = ireq._source_ireqs
+ return best_match
+
+ def _iter_dependencies(self, ireq):
+ """
+ Given a pinned, url, or editable InstallRequirement, collects all the
+ secondary dependencies for them, either by looking them up in a local
+ cache, or by reaching out to the repository.
+
+ Editable requirements will never be looked up, as they may have
+ changed at any time.
+ """
+ # Pip does not resolve dependencies of constraints. We skip handling
+ # constraints here as well to prevent the cache from being polluted.
+ # Constraints that are later determined to be dependencies will be
+ # marked as non-constraints in later rounds by
+ # `combine_install_requirements`, and will be properly resolved.
+ # See https://github.com/pypa/pip/
+ # blob/6896dfcd831330c13e076a74624d95fa55ff53f4/src/pip/_internal/
+ # legacy_resolve.py#L325
+ if ireq.constraint:
+ return
+
+ if ireq.editable or is_url_requirement(ireq):
+ for dependency in self.repository.get_dependencies(ireq):
+ yield dependency
+ return
+ elif not is_pinned_requirement(ireq):
+ raise TypeError(
+ "Expected pinned or editable requirement, got {}".format(ireq)
+ )
+
+ # Now, either get the dependencies from the dependency cache (for
+ # speed), or reach out to the external repository to
+ # download and inspect the package version and get dependencies
+ # from there
+ if ireq not in self.dependency_cache:
+ log.debug(
+ "{} not in cache, need to check index".format(format_requirement(ireq)),
+ fg="yellow",
+ )
+ dependencies = self.repository.get_dependencies(ireq)
+ self.dependency_cache[ireq] = sorted(str(ireq.req) for ireq in dependencies)
+
+ # Example: ['Werkzeug>=0.9', 'Jinja2>=2.4']
+ dependency_strings = self.dependency_cache[ireq]
+ log.debug(
+ "{:25} requires {}".format(
+ format_requirement(ireq),
+ ", ".join(sorted(dependency_strings, key=lambda s: s.lower())) or "-",
+ )
+ )
+ for dependency_string in dependency_strings:
+ yield install_req_from_line(
+ dependency_string, constraint=ireq.constraint, comes_from=ireq
+ )
+
+ def reverse_dependencies(self, ireqs):
+ non_editable = [
+ ireq for ireq in ireqs if not (ireq.editable or is_url_requirement(ireq))
+ ]
+ return self.dependency_cache.reverse_dependencies(non_editable)
diff --git a/third_party/python/pip_tools/piptools/scripts/__init__.py b/third_party/python/pip_tools/piptools/scripts/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/scripts/__init__.py
diff --git a/third_party/python/pip_tools/piptools/scripts/compile.py b/third_party/python/pip_tools/piptools/scripts/compile.py
new file mode 100644
index 0000000000..ca650e4913
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/scripts/compile.py
@@ -0,0 +1,495 @@
+# coding: utf-8
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import os
+import shlex
+import sys
+import tempfile
+import warnings
+
+from click import Command
+from click.utils import safecall
+from pip._internal.commands import create_command
+from pip._internal.req.constructors import install_req_from_line
+from pip._internal.utils.misc import redact_auth_from_url
+
+from .. import click
+from .._compat import parse_requirements
+from ..cache import DependencyCache
+from ..exceptions import PipToolsError
+from ..locations import CACHE_DIR
+from ..logging import log
+from ..repositories import LocalRequirementsRepository, PyPIRepository
+from ..resolver import Resolver
+from ..utils import UNSAFE_PACKAGES, dedup, is_pinned_requirement, key_from_ireq
+from ..writer import OutputWriter
+
+DEFAULT_REQUIREMENTS_FILE = "requirements.in"
+DEFAULT_REQUIREMENTS_OUTPUT_FILE = "requirements.txt"
+
+
+def _get_default_option(option_name):
+ """
+ Get default value of the pip's option (including option from pip.conf)
+ by a given option name.
+ """
+ install_command = create_command("install")
+ default_values = install_command.parser.get_default_values()
+ return getattr(default_values, option_name)
+
+
+class BaseCommand(Command):
+ _os_args = None
+
+ def parse_args(self, ctx, args):
+ """
+ Override base `parse_args` to store the argument part of `sys.argv`.
+ """
+ self._os_args = set(args)
+ return super(BaseCommand, self).parse_args(ctx, args)
+
+ def has_arg(self, arg_name):
+ """
+ Detect whether a given arg name (including negative counterparts
+ to the arg, e.g. --no-arg) is present in the argument part of `sys.argv`.
+ """
+ command_options = {option.name: option for option in self.params}
+ option = command_options[arg_name]
+ args = set(option.opts + option.secondary_opts)
+ return bool(self._os_args & args)
+
+
+@click.command(
+ cls=BaseCommand, context_settings={"help_option_names": ("-h", "--help")}
+)
+@click.version_option()
+@click.pass_context
+@click.option("-v", "--verbose", count=True, help="Show more output")
+@click.option("-q", "--quiet", count=True, help="Give less output")
+@click.option(
+ "-n",
+ "--dry-run",
+ is_flag=True,
+ help="Only show what would happen, don't change anything",
+)
+@click.option(
+ "-p",
+ "--pre",
+ is_flag=True,
+ default=None,
+ help="Allow resolving to prereleases (default is not)",
+)
+@click.option(
+ "-r",
+ "--rebuild",
+ is_flag=True,
+ help="Clear any caches upfront, rebuild from scratch",
+)
+@click.option(
+ "-f",
+ "--find-links",
+ multiple=True,
+ help="Look for archives in this directory or on this HTML page",
+)
+@click.option(
+ "-i",
+ "--index-url",
+ help="Change index URL (defaults to {index_url})".format(
+ index_url=redact_auth_from_url(_get_default_option("index_url"))
+ ),
+)
+@click.option(
+ "--extra-index-url", multiple=True, help="Add additional index URL to search"
+)
+@click.option("--cert", help="Path to alternate CA bundle.")
+@click.option(
+ "--client-cert",
+ help="Path to SSL client certificate, a single file containing "
+ "the private key and the certificate in PEM format.",
+)
+@click.option(
+ "--trusted-host",
+ multiple=True,
+ help="Mark this host as trusted, even though it does not have "
+ "valid or any HTTPS.",
+)
+@click.option(
+ "--header/--no-header",
+ is_flag=True,
+ default=True,
+ help="Add header to generated file",
+)
+@click.option(
+ "--index/--no-index",
+ is_flag=True,
+ default=True,
+ help="DEPRECATED: Add index URL to generated file",
+)
+@click.option(
+ "--emit-trusted-host/--no-emit-trusted-host",
+ is_flag=True,
+ default=True,
+ help="Add trusted host option to generated file",
+)
+@click.option(
+ "--annotate/--no-annotate",
+ is_flag=True,
+ default=True,
+ help="Annotate results, indicating where dependencies come from",
+)
+@click.option(
+ "-U",
+ "--upgrade",
+ is_flag=True,
+ default=False,
+ help="Try to upgrade all dependencies to their latest versions",
+)
+@click.option(
+ "-P",
+ "--upgrade-package",
+ "upgrade_packages",
+ nargs=1,
+ multiple=True,
+ help="Specify particular packages to upgrade.",
+)
+@click.option(
+ "-o",
+ "--output-file",
+ nargs=1,
+ default=None,
+ type=click.File("w+b", atomic=True, lazy=True),
+ help=(
+ "Output file name. Required if more than one input file is given. "
+ "Will be derived from input file otherwise."
+ ),
+)
+@click.option(
+ "--allow-unsafe/--no-allow-unsafe",
+ is_flag=True,
+ default=False,
+ help=(
+ "Pin packages considered unsafe: {}.\n\n"
+ "WARNING: Future versions of pip-tools will enable this behavior by default. "
+ "Use --no-allow-unsafe to keep the old behavior. It is recommended to pass the "
+ "--allow-unsafe now to adapt to the upcoming change.".format(
+ ", ".join(sorted(UNSAFE_PACKAGES))
+ )
+ ),
+)
+@click.option(
+ "--generate-hashes",
+ is_flag=True,
+ default=False,
+ help="Generate pip 8 style hashes in the resulting requirements file.",
+)
+@click.option(
+ "--reuse-hashes/--no-reuse-hashes",
+ is_flag=True,
+ default=True,
+ help=(
+ "Improve the speed of --generate-hashes by reusing the hashes from an "
+ "existing output file."
+ ),
+)
+@click.option(
+ "--max-rounds",
+ default=10,
+ help="Maximum number of rounds before resolving the requirements aborts.",
+)
+@click.argument("src_files", nargs=-1, type=click.Path(exists=True, allow_dash=True))
+@click.option(
+ "--build-isolation/--no-build-isolation",
+ is_flag=True,
+ default=True,
+ help="Enable isolation when building a modern source distribution. "
+ "Build dependencies specified by PEP 518 must be already installed "
+ "if build isolation is disabled.",
+)
+@click.option(
+ "--emit-find-links/--no-emit-find-links",
+ is_flag=True,
+ default=True,
+ help="Add the find-links option to generated file",
+)
+@click.option(
+ "--cache-dir",
+ help="Store the cache data in DIRECTORY.",
+ default=CACHE_DIR,
+ show_default=True,
+ type=click.Path(file_okay=False, writable=True),
+)
+@click.option("--pip-args", help="Arguments to pass directly to the pip command.")
+@click.option(
+ "--emit-index-url/--no-emit-index-url",
+ is_flag=True,
+ default=True,
+ help="Add index URL to generated file",
+)
+def cli(
+ ctx,
+ verbose,
+ quiet,
+ dry_run,
+ pre,
+ rebuild,
+ find_links,
+ index_url,
+ extra_index_url,
+ cert,
+ client_cert,
+ trusted_host,
+ header,
+ index,
+ emit_trusted_host,
+ annotate,
+ upgrade,
+ upgrade_packages,
+ output_file,
+ allow_unsafe,
+ generate_hashes,
+ reuse_hashes,
+ src_files,
+ max_rounds,
+ build_isolation,
+ emit_find_links,
+ cache_dir,
+ pip_args,
+ emit_index_url,
+):
+ """Compiles requirements.txt from requirements.in specs."""
+ log.verbosity = verbose - quiet
+
+ if len(src_files) == 0:
+ if os.path.exists(DEFAULT_REQUIREMENTS_FILE):
+ src_files = (DEFAULT_REQUIREMENTS_FILE,)
+ elif os.path.exists("setup.py"):
+ src_files = ("setup.py",)
+ else:
+ raise click.BadParameter(
+ (
+ "If you do not specify an input file, "
+ "the default is {} or setup.py"
+ ).format(DEFAULT_REQUIREMENTS_FILE)
+ )
+
+ if not output_file:
+ # An output file must be provided for stdin
+ if src_files == ("-",):
+ raise click.BadParameter("--output-file is required if input is from stdin")
+ # Use default requirements output file if there is a setup.py the source file
+ elif src_files == ("setup.py",):
+ file_name = DEFAULT_REQUIREMENTS_OUTPUT_FILE
+ # An output file must be provided if there are multiple source files
+ elif len(src_files) > 1:
+ raise click.BadParameter(
+ "--output-file is required if two or more input files are given."
+ )
+ # Otherwise derive the output file from the source file
+ else:
+ base_name = src_files[0].rsplit(".", 1)[0]
+ file_name = base_name + ".txt"
+
+ output_file = click.open_file(file_name, "w+b", atomic=True, lazy=True)
+
+ # Close the file at the end of the context execution
+ ctx.call_on_close(safecall(output_file.close_intelligently))
+
+ if cli.has_arg("index") and cli.has_arg("emit_index_url"):
+ raise click.BadParameter(
+ "--index/--no-index and --emit-index-url/--no-emit-index-url "
+ "are mutually exclusive."
+ )
+ elif cli.has_arg("index"):
+ warnings.warn(
+ "--index and --no-index are deprecated and will be removed "
+ "in future versions. Use --emit-index-url/--no-emit-index-url instead.",
+ category=FutureWarning,
+ )
+ emit_index_url = index
+
+ ###
+ # Setup
+ ###
+
+ right_args = shlex.split(pip_args or "")
+ pip_args = []
+ for link in find_links:
+ pip_args.extend(["-f", link])
+ if index_url:
+ pip_args.extend(["-i", index_url])
+ for extra_index in extra_index_url:
+ pip_args.extend(["--extra-index-url", extra_index])
+ if cert:
+ pip_args.extend(["--cert", cert])
+ if client_cert:
+ pip_args.extend(["--client-cert", client_cert])
+ if pre:
+ pip_args.extend(["--pre"])
+ for host in trusted_host:
+ pip_args.extend(["--trusted-host", host])
+
+ if not build_isolation:
+ pip_args.append("--no-build-isolation")
+ pip_args.extend(right_args)
+
+ repository = PyPIRepository(pip_args, cache_dir=cache_dir)
+
+ # Parse all constraints coming from --upgrade-package/-P
+ upgrade_reqs_gen = (install_req_from_line(pkg) for pkg in upgrade_packages)
+ upgrade_install_reqs = {
+ key_from_ireq(install_req): install_req for install_req in upgrade_reqs_gen
+ }
+
+ existing_pins_to_upgrade = set()
+
+ # Proxy with a LocalRequirementsRepository if --upgrade is not specified
+ # (= default invocation)
+ if not upgrade and os.path.exists(output_file.name):
+ # Use a temporary repository to ensure outdated(removed) options from
+ # existing requirements.txt wouldn't get into the current repository.
+ tmp_repository = PyPIRepository(pip_args, cache_dir=cache_dir)
+ ireqs = parse_requirements(
+ output_file.name,
+ finder=tmp_repository.finder,
+ session=tmp_repository.session,
+ options=tmp_repository.options,
+ )
+
+ # Exclude packages from --upgrade-package/-P from the existing
+ # constraints, and separately gather pins to be upgraded
+ existing_pins = {}
+ for ireq in filter(is_pinned_requirement, ireqs):
+ key = key_from_ireq(ireq)
+ if key in upgrade_install_reqs:
+ existing_pins_to_upgrade.add(key)
+ else:
+ existing_pins[key] = ireq
+ repository = LocalRequirementsRepository(
+ existing_pins, repository, reuse_hashes=reuse_hashes
+ )
+
+ ###
+ # Parsing/collecting initial requirements
+ ###
+
+ constraints = []
+ for src_file in src_files:
+ is_setup_file = os.path.basename(src_file) == "setup.py"
+ if is_setup_file or src_file == "-":
+ # pip requires filenames and not files. Since we want to support
+ # piping from stdin, we need to briefly save the input from stdin
+ # to a temporary file and have pip read that. also used for
+ # reading requirements from install_requires in setup.py.
+ tmpfile = tempfile.NamedTemporaryFile(mode="wt", delete=False)
+ if is_setup_file:
+ from distutils.core import run_setup
+
+ dist = run_setup(src_file)
+ tmpfile.write("\n".join(dist.install_requires))
+ comes_from = "{name} ({filename})".format(
+ name=dist.get_name(), filename=src_file
+ )
+ else:
+ tmpfile.write(sys.stdin.read())
+ comes_from = "-r -"
+ tmpfile.flush()
+ reqs = list(
+ parse_requirements(
+ tmpfile.name,
+ finder=repository.finder,
+ session=repository.session,
+ options=repository.options,
+ )
+ )
+ for req in reqs:
+ req.comes_from = comes_from
+ constraints.extend(reqs)
+ else:
+ constraints.extend(
+ parse_requirements(
+ src_file,
+ finder=repository.finder,
+ session=repository.session,
+ options=repository.options,
+ )
+ )
+
+ primary_packages = {
+ key_from_ireq(ireq) for ireq in constraints if not ireq.constraint
+ }
+
+ allowed_upgrades = primary_packages | existing_pins_to_upgrade
+ constraints.extend(
+ ireq for key, ireq in upgrade_install_reqs.items() if key in allowed_upgrades
+ )
+
+ # Filter out pip environment markers which do not match (PEP496)
+ constraints = [
+ req for req in constraints if req.markers is None or req.markers.evaluate()
+ ]
+
+ log.debug("Using indexes:")
+ with log.indentation():
+ for index_url in dedup(repository.finder.index_urls):
+ log.debug(redact_auth_from_url(index_url))
+
+ if repository.finder.find_links:
+ log.debug("")
+ log.debug("Using links:")
+ with log.indentation():
+ for find_link in dedup(repository.finder.find_links):
+ log.debug(redact_auth_from_url(find_link))
+
+ try:
+ resolver = Resolver(
+ constraints,
+ repository,
+ prereleases=repository.finder.allow_all_prereleases or pre,
+ cache=DependencyCache(cache_dir),
+ clear_caches=rebuild,
+ allow_unsafe=allow_unsafe,
+ )
+ results = resolver.resolve(max_rounds=max_rounds)
+ if generate_hashes:
+ hashes = resolver.resolve_hashes(results)
+ else:
+ hashes = None
+ except PipToolsError as e:
+ log.error(str(e))
+ sys.exit(2)
+
+ log.debug("")
+
+ ##
+ # Output
+ ##
+
+ writer = OutputWriter(
+ src_files,
+ output_file,
+ click_ctx=ctx,
+ dry_run=dry_run,
+ emit_header=header,
+ emit_index_url=emit_index_url,
+ emit_trusted_host=emit_trusted_host,
+ annotate=annotate,
+ generate_hashes=generate_hashes,
+ default_index_url=repository.DEFAULT_INDEX_URL,
+ index_urls=repository.finder.index_urls,
+ trusted_hosts=repository.finder.trusted_hosts,
+ format_control=repository.finder.format_control,
+ allow_unsafe=allow_unsafe,
+ find_links=repository.finder.find_links,
+ emit_find_links=emit_find_links,
+ )
+ writer.write(
+ results=results,
+ unsafe_requirements=resolver.unsafe_constraints,
+ markers={
+ key_from_ireq(ireq): ireq.markers for ireq in constraints if ireq.markers
+ },
+ hashes=hashes,
+ )
+
+ if dry_run:
+ log.info("Dry-run, so nothing updated.")
diff --git a/third_party/python/pip_tools/piptools/scripts/sync.py b/third_party/python/pip_tools/piptools/scripts/sync.py
new file mode 100644
index 0000000000..9759b302f0
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/scripts/sync.py
@@ -0,0 +1,214 @@
+# coding: utf-8
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import itertools
+import os
+import shlex
+import sys
+
+from pip._internal.commands import create_command
+from pip._internal.utils.misc import get_installed_distributions
+
+from .. import click, sync
+from .._compat import parse_requirements
+from ..exceptions import PipToolsError
+from ..logging import log
+from ..repositories import PyPIRepository
+from ..utils import flat_map
+
+DEFAULT_REQUIREMENTS_FILE = "requirements.txt"
+
+
+@click.command(context_settings={"help_option_names": ("-h", "--help")})
+@click.version_option()
+@click.option(
+ "-a",
+ "--ask",
+ is_flag=True,
+ help="Show what would happen, then ask whether to continue",
+)
+@click.option(
+ "-n",
+ "--dry-run",
+ is_flag=True,
+ help="Only show what would happen, don't change anything",
+)
+@click.option("--force", is_flag=True, help="Proceed even if conflicts are found")
+@click.option(
+ "-f",
+ "--find-links",
+ multiple=True,
+ help="Look for archives in this directory or on this HTML page",
+)
+@click.option("-i", "--index-url", help="Change index URL (defaults to PyPI)")
+@click.option(
+ "--extra-index-url", multiple=True, help="Add additional index URL to search"
+)
+@click.option(
+ "--trusted-host",
+ multiple=True,
+ help="Mark this host as trusted, even though it does not have valid or any HTTPS.",
+)
+@click.option(
+ "--no-index",
+ is_flag=True,
+ help="Ignore package index (only looking at --find-links URLs instead)",
+)
+@click.option("-v", "--verbose", count=True, help="Show more output")
+@click.option("-q", "--quiet", count=True, help="Give less output")
+@click.option(
+ "--user", "user_only", is_flag=True, help="Restrict attention to user directory"
+)
+@click.option("--cert", help="Path to alternate CA bundle.")
+@click.option(
+ "--client-cert",
+ help="Path to SSL client certificate, a single file containing "
+ "the private key and the certificate in PEM format.",
+)
+@click.argument("src_files", required=False, type=click.Path(exists=True), nargs=-1)
+@click.option("--pip-args", help="Arguments to pass directly to pip install.")
+def cli(
+ ask,
+ dry_run,
+ force,
+ find_links,
+ index_url,
+ extra_index_url,
+ trusted_host,
+ no_index,
+ verbose,
+ quiet,
+ user_only,
+ cert,
+ client_cert,
+ src_files,
+ pip_args,
+):
+ """Synchronize virtual environment with requirements.txt."""
+ log.verbosity = verbose - quiet
+
+ if not src_files:
+ if os.path.exists(DEFAULT_REQUIREMENTS_FILE):
+ src_files = (DEFAULT_REQUIREMENTS_FILE,)
+ else:
+ msg = "No requirement files given and no {} found in the current directory"
+ log.error(msg.format(DEFAULT_REQUIREMENTS_FILE))
+ sys.exit(2)
+
+ if any(src_file.endswith(".in") for src_file in src_files):
+ msg = (
+ "Some input files have the .in extension, which is most likely an error "
+ "and can cause weird behaviour. You probably meant to use "
+ "the corresponding *.txt file?"
+ )
+ if force:
+ log.warning("WARNING: " + msg)
+ else:
+ log.error("ERROR: " + msg)
+ sys.exit(2)
+
+ install_command = create_command("install")
+ options, _ = install_command.parse_args([])
+ session = install_command._build_session(options)
+ finder = install_command._build_package_finder(options=options, session=session)
+
+ # Parse requirements file. Note, all options inside requirements file
+ # will be collected by the finder.
+ requirements = flat_map(
+ lambda src: parse_requirements(src, finder=finder, session=session), src_files
+ )
+
+ try:
+ requirements = sync.merge(requirements, ignore_conflicts=force)
+ except PipToolsError as e:
+ log.error(str(e))
+ sys.exit(2)
+
+ installed_dists = get_installed_distributions(skip=[], user_only=user_only)
+ to_install, to_uninstall = sync.diff(requirements, installed_dists)
+
+ install_flags = (
+ _compose_install_flags(
+ finder,
+ no_index=no_index,
+ index_url=index_url,
+ extra_index_url=extra_index_url,
+ trusted_host=trusted_host,
+ find_links=find_links,
+ user_only=user_only,
+ cert=cert,
+ client_cert=client_cert,
+ )
+ + shlex.split(pip_args or "")
+ )
+ sys.exit(
+ sync.sync(
+ to_install,
+ to_uninstall,
+ dry_run=dry_run,
+ install_flags=install_flags,
+ ask=ask,
+ )
+ )
+
+
+def _compose_install_flags(
+ finder,
+ no_index=False,
+ index_url=None,
+ extra_index_url=None,
+ trusted_host=None,
+ find_links=None,
+ user_only=False,
+ cert=None,
+ client_cert=None,
+):
+ """
+ Compose install flags with the given finder and CLI options.
+ """
+ result = []
+
+ # Build --index-url/--extra-index-url/--no-index
+ if no_index:
+ result.append("--no-index")
+ elif index_url:
+ result.extend(["--index-url", index_url])
+ elif finder.index_urls:
+ finder_index_url = finder.index_urls[0]
+ if finder_index_url != PyPIRepository.DEFAULT_INDEX_URL:
+ result.extend(["--index-url", finder_index_url])
+ for extra_index in finder.index_urls[1:]:
+ result.extend(["--extra-index-url", extra_index])
+ else:
+ result.append("--no-index")
+
+ for extra_index in extra_index_url:
+ result.extend(["--extra-index-url", extra_index])
+
+ # Build --trusted-hosts
+ for host in itertools.chain(trusted_host, finder.trusted_hosts):
+ result.extend(["--trusted-host", host])
+
+ # Build --find-links
+ for link in itertools.chain(find_links, finder.find_links):
+ result.extend(["--find-links", link])
+
+ # Build format controls --no-binary/--only-binary
+ for format_control in ("no_binary", "only_binary"):
+ formats = getattr(finder.format_control, format_control)
+ if not formats:
+ continue
+ result.extend(
+ ["--" + format_control.replace("_", "-"), ",".join(sorted(formats))]
+ )
+
+ if user_only:
+ result.append("--user")
+
+ if cert:
+ result.extend(["--cert", cert])
+
+ if client_cert:
+ result.extend(["--client-cert", client_cert])
+
+ return result
diff --git a/third_party/python/pip_tools/piptools/sync.py b/third_party/python/pip_tools/piptools/sync.py
new file mode 100644
index 0000000000..9967682c7d
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/sync.py
@@ -0,0 +1,216 @@
+import collections
+import os
+import sys
+import tempfile
+from subprocess import check_call # nosec
+
+from pip._internal.commands.freeze import DEV_PKGS
+from pip._internal.utils.compat import stdlib_pkgs
+
+from . import click
+from .exceptions import IncompatibleRequirements
+from .logging import log
+from .utils import (
+ flat_map,
+ format_requirement,
+ get_hashes_from_ireq,
+ is_url_requirement,
+ key_from_ireq,
+ key_from_req,
+)
+
+PACKAGES_TO_IGNORE = (
+ ["-markerlib", "pip", "pip-tools", "pip-review", "pkg-resources"]
+ + list(stdlib_pkgs)
+ + list(DEV_PKGS)
+)
+
+
+def dependency_tree(installed_keys, root_key):
+ """
+ Calculate the dependency tree for the package `root_key` and return
+ a collection of all its dependencies. Uses a DFS traversal algorithm.
+
+ `installed_keys` should be a {key: requirement} mapping, e.g.
+ {'django': from_line('django==1.8')}
+ `root_key` should be the key to return the dependency tree for.
+ """
+ dependencies = set()
+ queue = collections.deque()
+
+ if root_key in installed_keys:
+ dep = installed_keys[root_key]
+ queue.append(dep)
+
+ while queue:
+ v = queue.popleft()
+ key = key_from_req(v)
+ if key in dependencies:
+ continue
+
+ dependencies.add(key)
+
+ for dep_specifier in v.requires():
+ dep_name = key_from_req(dep_specifier)
+ if dep_name in installed_keys:
+ dep = installed_keys[dep_name]
+
+ if dep_specifier.specifier.contains(dep.version):
+ queue.append(dep)
+
+ return dependencies
+
+
+def get_dists_to_ignore(installed):
+ """
+ Returns a collection of package names to ignore when performing pip-sync,
+ based on the currently installed environment. For example, when pip-tools
+ is installed in the local environment, it should be ignored, including all
+ of its dependencies (e.g. click). When pip-tools is not installed
+ locally, click should also be installed/uninstalled depending on the given
+ requirements.
+ """
+ installed_keys = {key_from_req(r): r for r in installed}
+ return list(
+ flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE)
+ )
+
+
+def merge(requirements, ignore_conflicts):
+ by_key = {}
+
+ for ireq in requirements:
+ # Limitation: URL requirements are merged by precise string match, so
+ # "file:///example.zip#egg=example", "file:///example.zip", and
+ # "example==1.0" will not merge with each other
+ if ireq.match_markers():
+ key = key_from_ireq(ireq)
+
+ if not ignore_conflicts:
+ existing_ireq = by_key.get(key)
+ if existing_ireq:
+ # NOTE: We check equality here since we can assume that the
+ # requirements are all pinned
+ if ireq.specifier != existing_ireq.specifier:
+ raise IncompatibleRequirements(ireq, existing_ireq)
+
+ # TODO: Always pick the largest specifier in case of a conflict
+ by_key[key] = ireq
+ return by_key.values()
+
+
+def diff_key_from_ireq(ireq):
+ """
+ Calculate a key for comparing a compiled requirement with installed modules.
+ For URL requirements, only provide a useful key if the url includes
+ #egg=name==version, which will set ireq.req.name and ireq.specifier.
+ Otherwise return ireq.link so the key will not match and the package will
+ reinstall. Reinstall is necessary to ensure that packages will reinstall
+ if the URL is changed but the version is not.
+ """
+ if is_url_requirement(ireq):
+ if (
+ ireq.req
+ and (getattr(ireq.req, "key", None) or getattr(ireq.req, "name", None))
+ and ireq.specifier
+ ):
+ return key_from_ireq(ireq)
+ return str(ireq.link)
+ return key_from_ireq(ireq)
+
+
+def diff(compiled_requirements, installed_dists):
+ """
+ Calculate which packages should be installed or uninstalled, given a set
+ of compiled requirements and a list of currently installed modules.
+ """
+ requirements_lut = {diff_key_from_ireq(r): r for r in compiled_requirements}
+
+ satisfied = set() # holds keys
+ to_install = set() # holds InstallRequirement objects
+ to_uninstall = set() # holds keys
+
+ pkgs_to_ignore = get_dists_to_ignore(installed_dists)
+ for dist in installed_dists:
+ key = key_from_req(dist)
+ if key not in requirements_lut or not requirements_lut[key].match_markers():
+ to_uninstall.add(key)
+ elif requirements_lut[key].specifier.contains(dist.version):
+ satisfied.add(key)
+
+ for key, requirement in requirements_lut.items():
+ if key not in satisfied and requirement.match_markers():
+ to_install.add(requirement)
+
+ # Make sure to not uninstall any packages that should be ignored
+ to_uninstall -= set(pkgs_to_ignore)
+
+ return (to_install, to_uninstall)
+
+
+def sync(to_install, to_uninstall, dry_run=False, install_flags=None, ask=False):
+ """
+ Install and uninstalls the given sets of modules.
+ """
+ exit_code = 0
+
+ if not to_uninstall and not to_install:
+ log.info("Everything up-to-date", err=False)
+ return exit_code
+
+ pip_flags = []
+ if log.verbosity < 0:
+ pip_flags += ["-q"]
+
+ if ask:
+ dry_run = True
+
+ if dry_run:
+ if to_uninstall:
+ click.echo("Would uninstall:")
+ for pkg in sorted(to_uninstall):
+ click.echo(" {}".format(pkg))
+
+ if to_install:
+ click.echo("Would install:")
+ for ireq in sorted(to_install, key=key_from_ireq):
+ click.echo(" {}".format(format_requirement(ireq)))
+
+ exit_code = 1
+
+ if ask and click.confirm("Would you like to proceed with these changes?"):
+ dry_run = False
+ exit_code = 0
+
+ if not dry_run:
+ if to_uninstall:
+ check_call( # nosec
+ [sys.executable, "-m", "pip", "uninstall", "-y"]
+ + pip_flags
+ + sorted(to_uninstall)
+ )
+
+ if to_install:
+ if install_flags is None:
+ install_flags = []
+ # prepare requirement lines
+ req_lines = []
+ for ireq in sorted(to_install, key=key_from_ireq):
+ ireq_hashes = get_hashes_from_ireq(ireq)
+ req_lines.append(format_requirement(ireq, hashes=ireq_hashes))
+
+ # save requirement lines to a temporary file
+ tmp_req_file = tempfile.NamedTemporaryFile(mode="wt", delete=False)
+ tmp_req_file.write("\n".join(req_lines))
+ tmp_req_file.close()
+
+ try:
+ check_call( # nosec
+ [sys.executable, "-m", "pip", "install", "-r", tmp_req_file.name]
+ + pip_flags
+ + install_flags
+ )
+ finally:
+ os.unlink(tmp_req_file.name)
+
+ return exit_code
diff --git a/third_party/python/pip_tools/piptools/utils.py b/third_party/python/pip_tools/piptools/utils.py
new file mode 100644
index 0000000000..4b20ba6e38
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/utils.py
@@ -0,0 +1,384 @@
+# coding: utf-8
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import sys
+from collections import OrderedDict
+from itertools import chain
+
+from click.utils import LazyFile
+from pip._internal.req.constructors import install_req_from_line
+from pip._internal.utils.misc import redact_auth_from_url
+from pip._internal.vcs import is_url
+from pip._vendor import six
+from pip._vendor.six.moves import shlex_quote
+
+from .click import style
+
+UNSAFE_PACKAGES = {"setuptools", "distribute", "pip"}
+COMPILE_EXCLUDE_OPTIONS = {
+ "--dry-run",
+ "--quiet",
+ "--rebuild",
+ "--upgrade",
+ "--upgrade-package",
+ "--verbose",
+ "--cache-dir",
+ "--no-reuse-hashes",
+}
+
+
+def key_from_ireq(ireq):
+ """Get a standardized key for an InstallRequirement."""
+ if ireq.req is None and ireq.link is not None:
+ return str(ireq.link)
+ else:
+ return key_from_req(ireq.req)
+
+
+def key_from_req(req):
+ """Get an all-lowercase version of the requirement's name."""
+ if hasattr(req, "key"):
+ # from pkg_resources, such as installed dists for pip-sync
+ key = req.key
+ else:
+ # from packaging, such as install requirements from requirements.txt
+ key = req.name
+
+ key = key.replace("_", "-").lower()
+ return key
+
+
+def comment(text):
+ return style(text, fg="green")
+
+
+def make_install_requirement(name, version, extras, constraint=False):
+ # If no extras are specified, the extras string is blank
+ extras_string = ""
+ if extras:
+ # Sort extras for stability
+ extras_string = "[{}]".format(",".join(sorted(extras)))
+
+ return install_req_from_line(
+ str("{}{}=={}".format(name, extras_string, version)), constraint=constraint
+ )
+
+
+def is_url_requirement(ireq):
+ """
+ Return True if requirement was specified as a path or URL.
+ ireq.original_link will have been set by InstallRequirement.__init__
+ """
+ return bool(ireq.original_link)
+
+
+def format_requirement(ireq, marker=None, hashes=None):
+ """
+ Generic formatter for pretty printing InstallRequirements to the terminal
+ in a less verbose way than using its `__str__` method.
+ """
+ if ireq.editable:
+ line = "-e {}".format(ireq.link.url)
+ elif is_url_requirement(ireq):
+ line = ireq.link.url
+ else:
+ line = str(ireq.req).lower()
+
+ if marker:
+ line = "{} ; {}".format(line, marker)
+
+ if hashes:
+ for hash_ in sorted(hashes):
+ line += " \\\n --hash={}".format(hash_)
+
+ return line
+
+
+def format_specifier(ireq):
+ """
+ Generic formatter for pretty printing the specifier part of
+ InstallRequirements to the terminal.
+ """
+ # TODO: Ideally, this is carried over to the pip library itself
+ specs = ireq.specifier if ireq.req is not None else []
+ specs = sorted(specs, key=lambda x: x.version)
+ return ",".join(str(s) for s in specs) or "<any>"
+
+
+def is_pinned_requirement(ireq):
+ """
+ Returns whether an InstallRequirement is a "pinned" requirement.
+
+ An InstallRequirement is considered pinned if:
+
+ - Is not editable
+ - It has exactly one specifier
+ - That specifier is "=="
+ - The version does not contain a wildcard
+
+ Examples:
+ django==1.8 # pinned
+ django>1.8 # NOT pinned
+ django~=1.8 # NOT pinned
+ django==1.* # NOT pinned
+ """
+ if ireq.editable:
+ return False
+
+ if ireq.req is None or len(ireq.specifier) != 1:
+ return False
+
+ spec = next(iter(ireq.specifier))
+ return spec.operator in {"==", "==="} and not spec.version.endswith(".*")
+
+
+def as_tuple(ireq):
+ """
+ Pulls out the (name: str, version:str, extras:(str)) tuple from
+ the pinned InstallRequirement.
+ """
+ if not is_pinned_requirement(ireq):
+ raise TypeError("Expected a pinned InstallRequirement, got {}".format(ireq))
+
+ name = key_from_ireq(ireq)
+ version = next(iter(ireq.specifier)).version
+ extras = tuple(sorted(ireq.extras))
+ return name, version, extras
+
+
+def flat_map(fn, collection):
+ """Map a function over a collection and flatten the result by one-level"""
+ return chain.from_iterable(map(fn, collection))
+
+
+def lookup_table(values, key=None, keyval=None, unique=False, use_lists=False):
+ """
+ Builds a dict-based lookup table (index) elegantly.
+
+ Supports building normal and unique lookup tables. For example:
+
+ >>> assert lookup_table(
+ ... ['foo', 'bar', 'baz', 'qux', 'quux'], lambda s: s[0]) == {
+ ... 'b': {'bar', 'baz'},
+ ... 'f': {'foo'},
+ ... 'q': {'quux', 'qux'}
+ ... }
+
+ For key functions that uniquely identify values, set unique=True:
+
+ >>> assert lookup_table(
+ ... ['foo', 'bar', 'baz', 'qux', 'quux'], lambda s: s[0],
+ ... unique=True) == {
+ ... 'b': 'baz',
+ ... 'f': 'foo',
+ ... 'q': 'quux'
+ ... }
+
+ For the values represented as lists, set use_lists=True:
+
+ >>> assert lookup_table(
+ ... ['foo', 'bar', 'baz', 'qux', 'quux'], lambda s: s[0],
+ ... use_lists=True) == {
+ ... 'b': ['bar', 'baz'],
+ ... 'f': ['foo'],
+ ... 'q': ['qux', 'quux']
+ ... }
+
+ The values of the resulting lookup table will be lists, not sets.
+
+ For extra power, you can even change the values while building up the LUT.
+ To do so, use the `keyval` function instead of the `key` arg:
+
+ >>> assert lookup_table(
+ ... ['foo', 'bar', 'baz', 'qux', 'quux'],
+ ... keyval=lambda s: (s[0], s[1:])) == {
+ ... 'b': {'ar', 'az'},
+ ... 'f': {'oo'},
+ ... 'q': {'uux', 'ux'}
+ ... }
+
+ """
+ if keyval is None:
+ if key is None:
+
+ def keyval(v):
+ return v
+
+ else:
+
+ def keyval(v):
+ return (key(v), v)
+
+ if unique:
+ return dict(keyval(v) for v in values)
+
+ lut = {}
+ for value in values:
+ k, v = keyval(value)
+ try:
+ s = lut[k]
+ except KeyError:
+ if use_lists:
+ s = lut[k] = list()
+ else:
+ s = lut[k] = set()
+ if use_lists:
+ s.append(v)
+ else:
+ s.add(v)
+ return dict(lut)
+
+
+def dedup(iterable):
+ """Deduplicate an iterable object like iter(set(iterable)) but
+ order-preserved.
+ """
+ return iter(OrderedDict.fromkeys(iterable))
+
+
+def name_from_req(req):
+ """Get the name of the requirement"""
+ if hasattr(req, "project_name"):
+ # from pkg_resources, such as installed dists for pip-sync
+ return req.project_name
+ else:
+ # from packaging, such as install requirements from requirements.txt
+ return req.name
+
+
+def fs_str(string):
+ """
+ Convert given string to a correctly encoded filesystem string.
+
+ On Python 2, if the input string is unicode, converts it to bytes
+ encoded with the filesystem encoding.
+
+ On Python 3 returns the string as is, since Python 3 uses unicode
+ paths and the input string shouldn't be bytes.
+
+ :type string: str|unicode
+ :rtype: str
+ """
+ if isinstance(string, str):
+ return string
+ if isinstance(string, bytes):
+ raise TypeError("fs_str() argument must not be bytes")
+ return string.encode(_fs_encoding)
+
+
+_fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
+
+
+def get_hashes_from_ireq(ireq):
+ """
+ Given an InstallRequirement, return a list of string hashes in
+ the format "{algorithm}:{hash}". Return an empty list if there are no hashes
+ in the requirement options.
+ """
+ result = []
+ for algorithm, hexdigests in ireq.hash_options.items():
+ for hash_ in hexdigests:
+ result.append("{}:{}".format(algorithm, hash_))
+ return result
+
+
+def force_text(s):
+ """
+ Return a string representing `s`.
+ """
+ if s is None:
+ return ""
+ if not isinstance(s, six.string_types):
+ return six.text_type(s)
+ return s
+
+
+def get_compile_command(click_ctx):
+ """
+ Returns a normalized compile command depending on cli context.
+
+ The command will be normalized by:
+ - expanding options short to long
+ - removing values that are already default
+ - sorting the arguments
+ - removing one-off arguments like '--upgrade'
+ - removing arguments that don't change build behaviour like '--verbose'
+ """
+ from piptools.scripts.compile import cli
+
+ # Map of the compile cli options (option name -> click.Option)
+ compile_options = {option.name: option for option in cli.params}
+
+ left_args = []
+ right_args = []
+
+ for option_name, value in click_ctx.params.items():
+ option = compile_options[option_name]
+
+ # Collect variadic args separately, they will be added
+ # at the end of the command later
+ if option.nargs < 0:
+ # These will necessarily be src_files
+ # Re-add click-stripped '--' if any start with '-'
+ if any(val.startswith("-") and val != "-" for val in value):
+ right_args.append("--")
+ right_args.extend([shlex_quote(force_text(val)) for val in value])
+ continue
+
+ # Get the latest option name (usually it'll be a long name)
+ option_long_name = option.opts[-1]
+
+ # Exclude one-off options (--upgrade/--upgrade-package/--rebuild/...)
+ # or options that don't change compile behaviour (--verbose/--dry-run/...)
+ if option_long_name in COMPILE_EXCLUDE_OPTIONS:
+ continue
+
+ # Skip options without a value
+ if option.default is None and not value:
+ continue
+
+ # Skip options with a default value
+ if option.default == value:
+ continue
+
+ # Use a file name for file-like objects
+ if isinstance(value, LazyFile):
+ value = value.name
+
+ # Convert value to the list
+ if not isinstance(value, (tuple, list)):
+ value = [value]
+
+ for val in value:
+ # Flags don't have a value, thus add to args true or false option long name
+ if option.is_flag:
+ # If there are false-options, choose an option name depending on a value
+ if option.secondary_opts:
+ # Get the latest false-option
+ secondary_option_long_name = option.secondary_opts[-1]
+ arg = option_long_name if val else secondary_option_long_name
+ # There are no false-options, use true-option
+ else:
+ arg = option_long_name
+ left_args.append(shlex_quote(arg))
+ # Append to args the option with a value
+ else:
+ if isinstance(val, six.string_types) and is_url(val):
+ val = redact_auth_from_url(val)
+ if option.name == "pip_args":
+ # shlex_quote would produce functional but noisily quoted results,
+ # e.g. --pip-args='--cache-dir='"'"'/tmp/with spaces'"'"''
+ # Instead, we try to get more legible quoting via repr:
+ left_args.append(
+ "{option}={value}".format(
+ option=option_long_name, value=repr(fs_str(force_text(val)))
+ )
+ )
+ else:
+ left_args.append(
+ "{option}={value}".format(
+ option=option_long_name, value=shlex_quote(force_text(val))
+ )
+ )
+
+ return " ".join(["pip-compile"] + sorted(left_args) + sorted(right_args))
diff --git a/third_party/python/pip_tools/piptools/writer.py b/third_party/python/pip_tools/piptools/writer.py
new file mode 100644
index 0000000000..515df198eb
--- /dev/null
+++ b/third_party/python/pip_tools/piptools/writer.py
@@ -0,0 +1,243 @@
+from __future__ import unicode_literals
+
+import os
+import re
+from itertools import chain
+
+from pip._vendor import six
+
+from .click import unstyle
+from .logging import log
+from .utils import (
+ UNSAFE_PACKAGES,
+ comment,
+ dedup,
+ format_requirement,
+ get_compile_command,
+ key_from_ireq,
+)
+
+MESSAGE_UNHASHED_PACKAGE = comment(
+ "# WARNING: pip install will require the following package to be hashed."
+ "\n# Consider using a hashable URL like "
+ "https://github.com/jazzband/pip-tools/archive/SOMECOMMIT.zip"
+)
+
+MESSAGE_UNSAFE_PACKAGES_UNPINNED = comment(
+ "# WARNING: The following packages were not pinned, but pip requires them to be"
+ "\n# pinned when the requirements file includes hashes. "
+ "Consider using the --allow-unsafe flag."
+)
+
+MESSAGE_UNSAFE_PACKAGES = comment(
+ "# The following packages are considered to be unsafe in a requirements file:"
+)
+
+MESSAGE_UNINSTALLABLE = (
+ "The generated requirements file may be rejected by pip install. "
+ "See # WARNING lines for details."
+)
+
+
+strip_comes_from_line_re = re.compile(r" \(line \d+\)$")
+
+
+def _comes_from_as_string(ireq):
+ if isinstance(ireq.comes_from, six.string_types):
+ return strip_comes_from_line_re.sub("", ireq.comes_from)
+ return key_from_ireq(ireq.comes_from)
+
+
+class OutputWriter(object):
+ def __init__(
+ self,
+ src_files,
+ dst_file,
+ click_ctx,
+ dry_run,
+ emit_header,
+ emit_index_url,
+ emit_trusted_host,
+ annotate,
+ generate_hashes,
+ default_index_url,
+ index_urls,
+ trusted_hosts,
+ format_control,
+ allow_unsafe,
+ find_links,
+ emit_find_links,
+ ):
+ self.src_files = src_files
+ self.dst_file = dst_file
+ self.click_ctx = click_ctx
+ self.dry_run = dry_run
+ self.emit_header = emit_header
+ self.emit_index_url = emit_index_url
+ self.emit_trusted_host = emit_trusted_host
+ self.annotate = annotate
+ self.generate_hashes = generate_hashes
+ self.default_index_url = default_index_url
+ self.index_urls = index_urls
+ self.trusted_hosts = trusted_hosts
+ self.format_control = format_control
+ self.allow_unsafe = allow_unsafe
+ self.find_links = find_links
+ self.emit_find_links = emit_find_links
+
+ def _sort_key(self, ireq):
+ return (not ireq.editable, str(ireq.req).lower())
+
+ def write_header(self):
+ if self.emit_header:
+ yield comment("#")
+ yield comment("# This file is autogenerated by pip-compile")
+ yield comment("# To update, run:")
+ yield comment("#")
+ compile_command = os.environ.get(
+ "CUSTOM_COMPILE_COMMAND"
+ ) or get_compile_command(self.click_ctx)
+ yield comment("# {}".format(compile_command))
+ yield comment("#")
+
+ def write_index_options(self):
+ if self.emit_index_url:
+ for index, index_url in enumerate(dedup(self.index_urls)):
+ if index_url.rstrip("/") == self.default_index_url:
+ continue
+ flag = "--index-url" if index == 0 else "--extra-index-url"
+ yield "{} {}".format(flag, index_url)
+
+ def write_trusted_hosts(self):
+ if self.emit_trusted_host:
+ for trusted_host in dedup(self.trusted_hosts):
+ yield "--trusted-host {}".format(trusted_host)
+
+ def write_format_controls(self):
+ for nb in dedup(sorted(self.format_control.no_binary)):
+ yield "--no-binary {}".format(nb)
+ for ob in dedup(sorted(self.format_control.only_binary)):
+ yield "--only-binary {}".format(ob)
+
+ def write_find_links(self):
+ if self.emit_find_links:
+ for find_link in dedup(self.find_links):
+ yield "--find-links {}".format(find_link)
+
+ def write_flags(self):
+ emitted = False
+ for line in chain(
+ self.write_index_options(),
+ self.write_find_links(),
+ self.write_trusted_hosts(),
+ self.write_format_controls(),
+ ):
+ emitted = True
+ yield line
+ if emitted:
+ yield ""
+
+ def _iter_lines(self, results, unsafe_requirements=None, markers=None, hashes=None):
+ # default values
+ unsafe_requirements = unsafe_requirements or []
+ markers = markers or {}
+ hashes = hashes or {}
+
+ # Check for unhashed or unpinned packages if at least one package does have
+ # hashes, which will trigger pip install's --require-hashes mode.
+ warn_uninstallable = False
+ has_hashes = hashes and any(hash for hash in hashes.values())
+
+ yielded = False
+
+ for line in self.write_header():
+ yield line
+ yielded = True
+ for line in self.write_flags():
+ yield line
+ yielded = True
+
+ unsafe_requirements = (
+ {r for r in results if r.name in UNSAFE_PACKAGES}
+ if not unsafe_requirements
+ else unsafe_requirements
+ )
+ packages = {r for r in results if r.name not in UNSAFE_PACKAGES}
+
+ if packages:
+ packages = sorted(packages, key=self._sort_key)
+ for ireq in packages:
+ if has_hashes and not hashes.get(ireq):
+ yield MESSAGE_UNHASHED_PACKAGE
+ warn_uninstallable = True
+ line = self._format_requirement(
+ ireq, markers.get(key_from_ireq(ireq)), hashes=hashes
+ )
+ yield line
+ yielded = True
+
+ if unsafe_requirements:
+ unsafe_requirements = sorted(unsafe_requirements, key=self._sort_key)
+ yield ""
+ yielded = True
+ if has_hashes and not self.allow_unsafe:
+ yield MESSAGE_UNSAFE_PACKAGES_UNPINNED
+ warn_uninstallable = True
+ else:
+ yield MESSAGE_UNSAFE_PACKAGES
+
+ for ireq in unsafe_requirements:
+ ireq_key = key_from_ireq(ireq)
+ if not self.allow_unsafe:
+ yield comment("# {}".format(ireq_key))
+ else:
+ line = self._format_requirement(
+ ireq, marker=markers.get(ireq_key), hashes=hashes
+ )
+ yield line
+
+ # Yield even when there's no real content, so that blank files are written
+ if not yielded:
+ yield ""
+
+ if warn_uninstallable:
+ log.warning(MESSAGE_UNINSTALLABLE)
+
+ def write(self, results, unsafe_requirements, markers, hashes):
+
+ for line in self._iter_lines(results, unsafe_requirements, markers, hashes):
+ log.info(line)
+ if not self.dry_run:
+ self.dst_file.write(unstyle(line).encode("utf-8"))
+ self.dst_file.write(os.linesep.encode("utf-8"))
+
+ def _format_requirement(self, ireq, marker=None, hashes=None):
+ ireq_hashes = (hashes if hashes is not None else {}).get(ireq)
+
+ line = format_requirement(ireq, marker=marker, hashes=ireq_hashes)
+
+ if not self.annotate:
+ return line
+
+ # Annotate what packages or reqs-ins this package is required by
+ required_by = set()
+ if hasattr(ireq, "_source_ireqs"):
+ required_by |= {
+ _comes_from_as_string(src_ireq)
+ for src_ireq in ireq._source_ireqs
+ if src_ireq.comes_from
+ }
+ elif ireq.comes_from:
+ required_by.add(_comes_from_as_string(ireq))
+ if required_by:
+ required_by = sorted(required_by)
+ if len(required_by) == 1:
+ source = required_by[0]
+ annotation = " # via " + source
+ else:
+ annotation_lines = [" # via"]
+ for source in required_by:
+ annotation_lines.append(" # " + source)
+ annotation = "\n".join(annotation_lines)
+ line = "{}\n{}".format(line, comment(annotation))
+ return line
diff --git a/third_party/python/pkgutil_resolve_name/pkgutil_resolve_name-1.3.10.dist-info/LICENSE b/third_party/python/pkgutil_resolve_name/pkgutil_resolve_name-1.3.10.dist-info/LICENSE
new file mode 100644
index 0000000000..b76b412e28
--- /dev/null
+++ b/third_party/python/pkgutil_resolve_name/pkgutil_resolve_name-1.3.10.dist-info/LICENSE
@@ -0,0 +1,75 @@
+The MIT License (MIT)
+
+Copyright (c) 2020 Thomas Grainger.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+Portions of this pkgutil-resolve-name Software may utilize the following copyrighted material, the use of which is hereby acknowledged.
+
+cpython: https://github.com/python/cpython/tree/1ed61617a4a6632905ad6a0b440cd2cafb8b6414
+
+
+PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+--------------------------------------------
+
+1. This LICENSE AGREEMENT is between the Python Software Foundation
+("PSF"), and the Individual or Organization ("Licensee") accessing and
+otherwise using this software ("Python") in source or binary form and
+its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, PSF hereby
+grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+analyze, test, perform and/or display publicly, prepare derivative works,
+distribute, and otherwise use Python alone or in any derivative version,
+provided, however, that PSF's License Agreement and PSF's notice of copyright,
+i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Python Software Foundation;
+All Rights Reserved" are retained in Python alone or in any derivative version
+prepared by Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python.
+
+4. PSF is making Python available to Licensee on an "AS IS"
+basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between PSF and
+Licensee. This License Agreement does not grant permission to use PSF
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using Python, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
diff --git a/third_party/python/pkgutil_resolve_name/pkgutil_resolve_name-1.3.10.dist-info/METADATA b/third_party/python/pkgutil_resolve_name/pkgutil_resolve_name-1.3.10.dist-info/METADATA
new file mode 100644
index 0000000000..0d08c6f369
--- /dev/null
+++ b/third_party/python/pkgutil_resolve_name/pkgutil_resolve_name-1.3.10.dist-info/METADATA
@@ -0,0 +1,19 @@
+Metadata-Version: 2.1
+Name: pkgutil_resolve_name
+Version: 1.3.10
+Summary: Resolve a name to an object.
+Home-page: https://github.com/graingert/pkgutil-resolve-name
+Author: Vinay Sajip
+Author-email: vinay_sajip@yahoo.co.uk
+Maintainer: Thomas Grainger
+Maintainer-email: pkgutil-resolve-name@graingert.co.uk
+Requires-Python: >=3.6
+Description-Content-Type: text/x-rst
+Classifier: License :: OSI Approved :: MIT License
+
+pkgutil-resolve-name
+====================
+
+A backport of Python 3.9's ``pkgutil.resolve_name``.
+See the `Python 3.9 documentation <https://docs.python.org/3.9/library/pkgutil.html#pkgutil.resolve_name>`__.
+
diff --git a/third_party/python/pkgutil_resolve_name/pkgutil_resolve_name-1.3.10.dist-info/RECORD b/third_party/python/pkgutil_resolve_name/pkgutil_resolve_name-1.3.10.dist-info/RECORD
new file mode 100644
index 0000000000..775ec2bd67
--- /dev/null
+++ b/third_party/python/pkgutil_resolve_name/pkgutil_resolve_name-1.3.10.dist-info/RECORD
@@ -0,0 +1,5 @@
+pkgutil_resolve_name.py,sha256=vH1SKMtmjviGJ2AIUvZVyYQ11fsvfRdhWhh5rQYKYbw,4391
+pkgutil_resolve_name-1.3.10.dist-info/LICENSE,sha256=v4kmc0kkUzDmazjSCeKSyu_oypRpN3oVPKcL1Fmhfng,3790
+pkgutil_resolve_name-1.3.10.dist-info/WHEEL,sha256=o-Q2E8s7BKkCJ1EC2uQdaymZVVu2aUt1e8uTTFpzHVs,81
+pkgutil_resolve_name-1.3.10.dist-info/METADATA,sha256=D8-pgw1aFU9HTIkjloHLKncKWsCGNYFYhzmn6qmjDe0,624
+pkgutil_resolve_name-1.3.10.dist-info/RECORD,,
diff --git a/third_party/python/pkgutil_resolve_name/pkgutil_resolve_name-1.3.10.dist-info/WHEEL b/third_party/python/pkgutil_resolve_name/pkgutil_resolve_name-1.3.10.dist-info/WHEEL
new file mode 100644
index 0000000000..c71ae61dfb
--- /dev/null
+++ b/third_party/python/pkgutil_resolve_name/pkgutil_resolve_name-1.3.10.dist-info/WHEEL
@@ -0,0 +1,4 @@
+Wheel-Version: 1.0
+Generator: flit 3.2.0
+Root-Is-Purelib: true
+Tag: py3-none-any
diff --git a/third_party/python/pkgutil_resolve_name/pkgutil_resolve_name.py b/third_party/python/pkgutil_resolve_name/pkgutil_resolve_name.py
new file mode 100644
index 0000000000..10bb99c362
--- /dev/null
+++ b/third_party/python/pkgutil_resolve_name/pkgutil_resolve_name.py
@@ -0,0 +1,112 @@
+"""
+Resolve a name to an object.
+
+It is expected that `name` will be a string in one of the following
+formats, where W is shorthand for a valid Python identifier and dot stands
+for a literal period in these pseudo-regexes:
+
+W(.W)*
+W(.W)*:(W(.W)*)?
+
+The first form is intended for backward compatibility only. It assumes that
+some part of the dotted name is a package, and the rest is an object
+somewhere within that package, possibly nested inside other objects.
+Because the place where the package stops and the object hierarchy starts
+can't be inferred by inspection, repeated attempts to import must be done
+with this form.
+
+In the second form, the caller makes the division point clear through the
+provision of a single colon: the dotted name to the left of the colon is a
+package to be imported, and the dotted name to the right is the object
+hierarchy within that package. Only one import is needed in this form. If
+it ends with the colon, then a module object is returned.
+
+The function will return an object (which might be a module), or raise one
+of the following exceptions:
+
+ValueError - if `name` isn't in a recognised format
+ImportError - if an import failed when it shouldn't have
+AttributeError - if a failure occurred when traversing the object hierarchy
+ within the imported package to get to the desired object)
+"""
+
+import importlib
+import re
+
+__version__ = "1.3.10"
+
+
+_NAME_PATTERN = None
+
+def resolve_name(name):
+ """
+ Resolve a name to an object.
+
+ It is expected that `name` will be a string in one of the following
+ formats, where W is shorthand for a valid Python identifier and dot stands
+ for a literal period in these pseudo-regexes:
+
+ W(.W)*
+ W(.W)*:(W(.W)*)?
+
+ The first form is intended for backward compatibility only. It assumes that
+ some part of the dotted name is a package, and the rest is an object
+ somewhere within that package, possibly nested inside other objects.
+ Because the place where the package stops and the object hierarchy starts
+ can't be inferred by inspection, repeated attempts to import must be done
+ with this form.
+
+ In the second form, the caller makes the division point clear through the
+ provision of a single colon: the dotted name to the left of the colon is a
+ package to be imported, and the dotted name to the right is the object
+ hierarchy within that package. Only one import is needed in this form. If
+ it ends with the colon, then a module object is returned.
+
+ The function will return an object (which might be a module), or raise one
+ of the following exceptions:
+
+ ValueError - if `name` isn't in a recognised format
+ ImportError - if an import failed when it shouldn't have
+ AttributeError - if a failure occurred when traversing the object hierarchy
+ within the imported package to get to the desired object)
+ """
+ global _NAME_PATTERN
+ if _NAME_PATTERN is None:
+ # Lazy import to speedup Python startup time
+ import re
+ dotted_words = r'(?!\d)(\w+)(\.(?!\d)(\w+))*'
+ _NAME_PATTERN = re.compile(f'^(?P<pkg>{dotted_words})'
+ f'(?P<cln>:(?P<obj>{dotted_words})?)?$',
+ re.UNICODE)
+
+ m = _NAME_PATTERN.match(name)
+ if not m:
+ raise ValueError(f'invalid format: {name!r}')
+ gd = m.groupdict()
+ if gd.get('cln'):
+ # there is a colon - a one-step import is all that's needed
+ mod = importlib.import_module(gd['pkg'])
+ parts = gd.get('obj')
+ parts = parts.split('.') if parts else []
+ else:
+ # no colon - have to iterate to find the package boundary
+ parts = name.split('.')
+ modname = parts.pop(0)
+ # first part *must* be a module/package.
+ mod = importlib.import_module(modname)
+ while parts:
+ p = parts[0]
+ s = f'{modname}.{p}'
+ try:
+ mod = importlib.import_module(s)
+ parts.pop(0)
+ modname = s
+ except ImportError:
+ break
+ # if we reach this point, mod is the module, already imported, and
+ # parts is the list of parts in the object hierarchy to be traversed, or
+ # an empty list if just the module is wanted.
+ result = mod
+ for p in parts:
+ result = getattr(result, p)
+ return result
diff --git a/third_party/python/ply/ANNOUNCE b/third_party/python/ply/ANNOUNCE
new file mode 100644
index 0000000000..c430051cf4
--- /dev/null
+++ b/third_party/python/ply/ANNOUNCE
@@ -0,0 +1,40 @@
+January 31, 2017
+
+ Announcing : PLY-3.10 (Python Lex-Yacc)
+
+ http://www.dabeaz.com/ply
+
+I'm pleased to announce PLY-3.10--a pure Python implementation of the
+common parsing tools lex and yacc. PLY-3.10 is a minor bug fix
+release. It supports both Python 2 and Python 3.
+
+If you are new to PLY, here are a few highlights:
+
+- PLY is closely modeled after traditional lex/yacc. If you know how
+ to use these or similar tools in other languages, you will find
+ PLY to be comparable.
+
+- PLY provides very extensive error reporting and diagnostic
+ information to assist in parser construction. The original
+ implementation was developed for instructional purposes. As
+ a result, the system tries to identify the most common types
+ of errors made by novice users.
+
+- PLY provides full support for empty productions, error recovery,
+ precedence rules, and ambiguous grammars.
+
+- Parsing is based on LR-parsing which is fast, memory efficient,
+ better suited to large grammars, and which has a number of nice
+ properties when dealing with syntax errors and other parsing
+ problems. Currently, PLY can build its parsing tables using
+ either SLR or LALR(1) algorithms.
+
+More information about PLY can be obtained on the PLY webpage at:
+
+ http://www.dabeaz.com/ply
+
+PLY is freely available.
+
+Cheers,
+
+David Beazley (http://www.dabeaz.com) \ No newline at end of file
diff --git a/third_party/python/ply/CHANGES b/third_party/python/ply/CHANGES
new file mode 100644
index 0000000000..815c23184e
--- /dev/null
+++ b/third_party/python/ply/CHANGES
@@ -0,0 +1,1394 @@
+Version 3.10
+---------------------
+01/31/17: beazley
+ Changed grammar signature computation to not involve hashing
+ functions. Parts are just combined into a big string.
+
+10/07/16: beazley
+ Fixed Issue #101: Incorrect shift-reduce conflict resolution with
+ precedence specifier.
+
+ PLY was incorrectly resolving shift-reduce conflicts in certain
+ cases. For example, in the example/calc/calc.py example, you
+ could trigger it doing this:
+
+ calc > -3 - 4
+ 1 (correct answer should be -7)
+ calc >
+
+ Issue and suggested patch contributed by https://github.com/RomaVis
+
+Version 3.9
+---------------------
+08/30/16: beazley
+ Exposed the parser state number as the parser.state attribute
+ in productions and error functions. For example:
+
+ def p_somerule(p):
+ '''
+ rule : A B C
+ '''
+ print('State:', p.parser.state)
+
+ May address issue #65 (publish current state in error callback).
+
+08/30/16: beazley
+ Fixed Issue #88. Python3 compatibility with ply/cpp.
+
+08/30/16: beazley
+ Fixed Issue #93. Ply can crash if SyntaxError is raised inside
+ a production. Not actually sure if the original implementation
+ worked as documented at all. Yacc has been modified to follow
+ the spec as outlined in the CHANGES noted for 11/27/07 below.
+
+08/30/16: beazley
+ Fixed Issue #97. Failure with code validation when the original
+ source files aren't present. Validation step now ignores
+ the missing file.
+
+08/30/16: beazley
+ Minor fixes to version numbers.
+
+Version 3.8
+---------------------
+10/02/15: beazley
+ Fixed issues related to Python 3.5. Patch contributed by Barry Warsaw.
+
+Version 3.7
+---------------------
+08/25/15: beazley
+ Fixed problems when reading table files from pickled data.
+
+05/07/15: beazley
+ Fixed regression in handling of table modules if specified as module
+ objects. See https://github.com/dabeaz/ply/issues/63
+
+Version 3.6
+---------------------
+04/25/15: beazley
+ If PLY is unable to create the 'parser.out' or 'parsetab.py' files due
+ to permission issues, it now just issues a warning message and
+ continues to operate. This could happen if a module using PLY
+ is installed in a funny way where tables have to be regenerated, but
+ for whatever reason, the user doesn't have write permission on
+ the directory where PLY wants to put them.
+
+04/24/15: beazley
+ Fixed some issues related to use of packages and table file
+ modules. Just to emphasize, PLY now generates its special
+ files such as 'parsetab.py' and 'lextab.py' in the *SAME*
+ directory as the source file that uses lex() and yacc().
+
+ If for some reason, you want to change the name of the table
+ module, use the tabmodule and lextab options:
+
+ lexer = lex.lex(lextab='spamlextab')
+ parser = yacc.yacc(tabmodule='spamparsetab')
+
+ If you specify a simple name as shown, the module will still be
+ created in the same directory as the file invoking lex() or yacc().
+ If you want the table files to be placed into a different package,
+ then give a fully qualified package name. For example:
+
+ lexer = lex.lex(lextab='pkgname.files.lextab')
+ parser = yacc.yacc(tabmodule='pkgname.files.parsetab')
+
+ For this to work, 'pkgname.files' must already exist as a valid
+ Python package (i.e., the directories must already exist and be
+ set up with the proper __init__.py files, etc.).
+
+Version 3.5
+---------------------
+04/21/15: beazley
+ Added support for defaulted_states in the parser. A
+ defaulted_state is a state where the only legal action is a
+ reduction of a single grammar rule across all valid input
+ tokens. For such states, the rule is reduced and the
+ reading of the next lookahead token is delayed until it is
+ actually needed at a later point in time.
+
+ This delay in consuming the next lookahead token is a
+ potentially important feature in advanced parsing
+ applications that require tight interaction between the
+ lexer and the parser. For example, a grammar rule change
+ modify the lexer state upon reduction and have such changes
+ take effect before the next input token is read.
+
+ *** POTENTIAL INCOMPATIBILITY ***
+ One potential danger of defaulted_states is that syntax
+ errors might be deferred to a a later point of processing
+ than where they were detected in past versions of PLY.
+ Thus, it's possible that your error handling could change
+ slightly on the same inputs. defaulted_states do not change
+ the overall parsing of the input (i.e., the same grammar is
+ accepted).
+
+ If for some reason, you need to disable defaulted states,
+ you can do this:
+
+ parser = yacc.yacc()
+ parser.defaulted_states = {}
+
+04/21/15: beazley
+ Fixed debug logging in the parser. It wasn't properly reporting goto states
+ on grammar rule reductions.
+
+04/20/15: beazley
+ Added actions to be defined to character literals (Issue #32). For example:
+
+ literals = [ '{', '}' ]
+
+ def t_lbrace(t):
+ r'\{'
+ # Some action
+ t.type = '{'
+ return t
+
+ def t_rbrace(t):
+ r'\}'
+ # Some action
+ t.type = '}'
+ return t
+
+04/19/15: beazley
+ Import of the 'parsetab.py' file is now constrained to only consider the
+ directory specified by the outputdir argument to yacc(). If not supplied,
+ the import will only consider the directory in which the grammar is defined.
+ This should greatly reduce problems with the wrong parsetab.py file being
+ imported by mistake. For example, if it's found somewhere else on the path
+ by accident.
+
+ *** POTENTIAL INCOMPATIBILITY *** It's possible that this might break some
+ packaging/deployment setup if PLY was instructed to place its parsetab.py
+ in a different location. You'll have to specify a proper outputdir= argument
+ to yacc() to fix this if needed.
+
+04/19/15: beazley
+ Changed default output directory to be the same as that in which the
+ yacc grammar is defined. If your grammar is in a file 'calc.py',
+ then the parsetab.py and parser.out files should be generated in the
+ same directory as that file. The destination directory can be changed
+ using the outputdir= argument to yacc().
+
+04/19/15: beazley
+ Changed the parsetab.py file signature slightly so that the parsetab won't
+ regenerate if created on a different major version of Python (ie., a
+ parsetab created on Python 2 will work with Python 3).
+
+04/16/15: beazley
+ Fixed Issue #44 call_errorfunc() should return the result of errorfunc()
+
+04/16/15: beazley
+ Support for versions of Python <2.7 is officially dropped. PLY may work, but
+ the unit tests requires Python 2.7 or newer.
+
+04/16/15: beazley
+ Fixed bug related to calling yacc(start=...). PLY wasn't regenerating the
+ table file correctly for this case.
+
+04/16/15: beazley
+ Added skipped tests for PyPy and Java. Related to use of Python's -O option.
+
+05/29/13: beazley
+ Added filter to make unit tests pass under 'python -3'.
+ Reported by Neil Muller.
+
+05/29/13: beazley
+ Fixed CPP_INTEGER regex in ply/cpp.py (Issue 21).
+ Reported by @vbraun.
+
+05/29/13: beazley
+ Fixed yacc validation bugs when from __future__ import unicode_literals
+ is being used. Reported by Kenn Knowles.
+
+05/29/13: beazley
+ Added support for Travis-CI. Contributed by Kenn Knowles.
+
+05/29/13: beazley
+ Added a .gitignore file. Suggested by Kenn Knowles.
+
+05/29/13: beazley
+ Fixed validation problems for source files that include a
+ different source code encoding specifier. Fix relies on
+ the inspect module. Should work on Python 2.6 and newer.
+ Not sure about older versions of Python.
+ Contributed by Michael Droettboom
+
+05/21/13: beazley
+ Fixed unit tests for yacc to eliminate random failures due to dict hash value
+ randomization in Python 3.3
+ Reported by Arfrever
+
+10/15/12: beazley
+ Fixed comment whitespace processing bugs in ply/cpp.py.
+ Reported by Alexei Pososin.
+
+10/15/12: beazley
+ Fixed token names in ply/ctokens.py to match rule names.
+ Reported by Alexei Pososin.
+
+04/26/12: beazley
+ Changes to functions available in panic mode error recover. In previous versions
+ of PLY, the following global functions were available for use in the p_error() rule:
+
+ yacc.errok() # Reset error state
+ yacc.token() # Get the next token
+ yacc.restart() # Reset the parsing stack
+
+ The use of global variables was problematic for code involving multiple parsers
+ and frankly was a poor design overall. These functions have been moved to methods
+ of the parser instance created by the yacc() function. You should write code like
+ this:
+
+ def p_error(p):
+ ...
+ parser.errok()
+
+ parser = yacc.yacc()
+
+ *** POTENTIAL INCOMPATIBILITY *** The original global functions now issue a
+ DeprecationWarning.
+
+04/19/12: beazley
+ Fixed some problems with line and position tracking and the use of error
+ symbols. If you have a grammar rule involving an error rule like this:
+
+ def p_assignment_bad(p):
+ '''assignment : location EQUALS error SEMI'''
+ ...
+
+ You can now do line and position tracking on the error token. For example:
+
+ def p_assignment_bad(p):
+ '''assignment : location EQUALS error SEMI'''
+ start_line = p.lineno(3)
+ start_pos = p.lexpos(3)
+
+ If the trackng=True option is supplied to parse(), you can additionally get
+ spans:
+
+ def p_assignment_bad(p):
+ '''assignment : location EQUALS error SEMI'''
+ start_line, end_line = p.linespan(3)
+ start_pos, end_pos = p.lexspan(3)
+
+ Note that error handling is still a hairy thing in PLY. This won't work
+ unless your lexer is providing accurate information. Please report bugs.
+ Suggested by a bug reported by Davis Herring.
+
+04/18/12: beazley
+ Change to doc string handling in lex module. Regex patterns are now first
+ pulled from a function's .regex attribute. If that doesn't exist, then
+ .doc is checked as a fallback. The @TOKEN decorator now sets the .regex
+ attribute of a function instead of its doc string.
+ Changed suggested by Kristoffer Ellersgaard Koch.
+
+04/18/12: beazley
+ Fixed issue #1: Fixed _tabversion. It should use __tabversion__ instead of __version__
+ Reported by Daniele Tricoli
+
+04/18/12: beazley
+ Fixed issue #8: Literals empty list causes IndexError
+ Reported by Walter Nissen.
+
+04/18/12: beazley
+ Fixed issue #12: Typo in code snippet in documentation
+ Reported by florianschanda.
+
+04/18/12: beazley
+ Fixed issue #10: Correctly escape t_XOREQUAL pattern.
+ Reported by Andy Kittner.
+
+Version 3.4
+---------------------
+02/17/11: beazley
+ Minor patch to make cpp.py compatible with Python 3. Note: This
+ is an experimental file not currently used by the rest of PLY.
+
+02/17/11: beazley
+ Fixed setup.py trove classifiers to properly list PLY as
+ Python 3 compatible.
+
+01/02/11: beazley
+ Migration of repository to github.
+
+Version 3.3
+-----------------------------
+08/25/09: beazley
+ Fixed issue 15 related to the set_lineno() method in yacc. Reported by
+ mdsherry.
+
+08/25/09: beazley
+ Fixed a bug related to regular expression compilation flags not being
+ properly stored in lextab.py files created by the lexer when running
+ in optimize mode. Reported by Bruce Frederiksen.
+
+
+Version 3.2
+-----------------------------
+03/24/09: beazley
+ Added an extra check to not print duplicated warning messages
+ about reduce/reduce conflicts.
+
+03/24/09: beazley
+ Switched PLY over to a BSD-license.
+
+03/23/09: beazley
+ Performance optimization. Discovered a few places to make
+ speedups in LR table generation.
+
+03/23/09: beazley
+ New warning message. PLY now warns about rules never
+ reduced due to reduce/reduce conflicts. Suggested by
+ Bruce Frederiksen.
+
+03/23/09: beazley
+ Some clean-up of warning messages related to reduce/reduce errors.
+
+03/23/09: beazley
+ Added a new picklefile option to yacc() to write the parsing
+ tables to a filename using the pickle module. Here is how
+ it works:
+
+ yacc(picklefile="parsetab.p")
+
+ This option can be used if the normal parsetab.py file is
+ extremely large. For example, on jython, it is impossible
+ to read parsing tables if the parsetab.py exceeds a certain
+ threshold.
+
+ The filename supplied to the picklefile option is opened
+ relative to the current working directory of the Python
+ interpreter. If you need to refer to the file elsewhere,
+ you will need to supply an absolute or relative path.
+
+ For maximum portability, the pickle file is written
+ using protocol 0.
+
+03/13/09: beazley
+ Fixed a bug in parser.out generation where the rule numbers
+ where off by one.
+
+03/13/09: beazley
+ Fixed a string formatting bug with one of the error messages.
+ Reported by Richard Reitmeyer
+
+Version 3.1
+-----------------------------
+02/28/09: beazley
+ Fixed broken start argument to yacc(). PLY-3.0 broke this
+ feature by accident.
+
+02/28/09: beazley
+ Fixed debugging output. yacc() no longer reports shift/reduce
+ or reduce/reduce conflicts if debugging is turned off. This
+ restores similar behavior in PLY-2.5. Reported by Andrew Waters.
+
+Version 3.0
+-----------------------------
+02/03/09: beazley
+ Fixed missing lexer attribute on certain tokens when
+ invoking the parser p_error() function. Reported by
+ Bart Whiteley.
+
+02/02/09: beazley
+ The lex() command now does all error-reporting and diagonistics
+ using the logging module interface. Pass in a Logger object
+ using the errorlog parameter to specify a different logger.
+
+02/02/09: beazley
+ Refactored ply.lex to use a more object-oriented and organized
+ approach to collecting lexer information.
+
+02/01/09: beazley
+ Removed the nowarn option from lex(). All output is controlled
+ by passing in a logger object. Just pass in a logger with a high
+ level setting to suppress output. This argument was never
+ documented to begin with so hopefully no one was relying upon it.
+
+02/01/09: beazley
+ Discovered and removed a dead if-statement in the lexer. This
+ resulted in a 6-7% speedup in lexing when I tested it.
+
+01/13/09: beazley
+ Minor change to the procedure for signalling a syntax error in a
+ production rule. A normal SyntaxError exception should be raised
+ instead of yacc.SyntaxError.
+
+01/13/09: beazley
+ Added a new method p.set_lineno(n,lineno) that can be used to set the
+ line number of symbol n in grammar rules. This simplifies manual
+ tracking of line numbers.
+
+01/11/09: beazley
+ Vastly improved debugging support for yacc.parse(). Instead of passing
+ debug as an integer, you can supply a Logging object (see the logging
+ module). Messages will be generated at the ERROR, INFO, and DEBUG
+ logging levels, each level providing progressively more information.
+ The debugging trace also shows states, grammar rule, values passed
+ into grammar rules, and the result of each reduction.
+
+01/09/09: beazley
+ The yacc() command now does all error-reporting and diagnostics using
+ the interface of the logging module. Use the errorlog parameter to
+ specify a logging object for error messages. Use the debuglog parameter
+ to specify a logging object for the 'parser.out' output.
+
+01/09/09: beazley
+ *HUGE* refactoring of the the ply.yacc() implementation. The high-level
+ user interface is backwards compatible, but the internals are completely
+ reorganized into classes. No more global variables. The internals
+ are also more extensible. For example, you can use the classes to
+ construct a LALR(1) parser in an entirely different manner than
+ what is currently the case. Documentation is forthcoming.
+
+01/07/09: beazley
+ Various cleanup and refactoring of yacc internals.
+
+01/06/09: beazley
+ Fixed a bug with precedence assignment. yacc was assigning the precedence
+ each rule based on the left-most token, when in fact, it should have been
+ using the right-most token. Reported by Bruce Frederiksen.
+
+11/27/08: beazley
+ Numerous changes to support Python 3.0 including removal of deprecated
+ statements (e.g., has_key) and the additional of compatibility code
+ to emulate features from Python 2 that have been removed, but which
+ are needed. Fixed the unit testing suite to work with Python 3.0.
+ The code should be backwards compatible with Python 2.
+
+11/26/08: beazley
+ Loosened the rules on what kind of objects can be passed in as the
+ "module" parameter to lex() and yacc(). Previously, you could only use
+ a module or an instance. Now, PLY just uses dir() to get a list of
+ symbols on whatever the object is without regard for its type.
+
+11/26/08: beazley
+ Changed all except: statements to be compatible with Python2.x/3.x syntax.
+
+11/26/08: beazley
+ Changed all raise Exception, value statements to raise Exception(value) for
+ forward compatibility.
+
+11/26/08: beazley
+ Removed all print statements from lex and yacc, using sys.stdout and sys.stderr
+ directly. Preparation for Python 3.0 support.
+
+11/04/08: beazley
+ Fixed a bug with referring to symbols on the the parsing stack using negative
+ indices.
+
+05/29/08: beazley
+ Completely revamped the testing system to use the unittest module for everything.
+ Added additional tests to cover new errors/warnings.
+
+Version 2.5
+-----------------------------
+05/28/08: beazley
+ Fixed a bug with writing lex-tables in optimized mode and start states.
+ Reported by Kevin Henry.
+
+Version 2.4
+-----------------------------
+05/04/08: beazley
+ A version number is now embedded in the table file signature so that
+ yacc can more gracefully accomodate changes to the output format
+ in the future.
+
+05/04/08: beazley
+ Removed undocumented .pushback() method on grammar productions. I'm
+ not sure this ever worked and can't recall ever using it. Might have
+ been an abandoned idea that never really got fleshed out. This
+ feature was never described or tested so removing it is hopefully
+ harmless.
+
+05/04/08: beazley
+ Added extra error checking to yacc() to detect precedence rules defined
+ for undefined terminal symbols. This allows yacc() to detect a potential
+ problem that can be really tricky to debug if no warning message or error
+ message is generated about it.
+
+05/04/08: beazley
+ lex() now has an outputdir that can specify the output directory for
+ tables when running in optimize mode. For example:
+
+ lexer = lex.lex(optimize=True, lextab="ltab", outputdir="foo/bar")
+
+ The behavior of specifying a table module and output directory are
+ more aligned with the behavior of yacc().
+
+05/04/08: beazley
+ [Issue 9]
+ Fixed filename bug in when specifying the modulename in lex() and yacc().
+ If you specified options such as the following:
+
+ parser = yacc.yacc(tabmodule="foo.bar.parsetab",outputdir="foo/bar")
+
+ yacc would create a file "foo.bar.parsetab.py" in the given directory.
+ Now, it simply generates a file "parsetab.py" in that directory.
+ Bug reported by cptbinho.
+
+05/04/08: beazley
+ Slight modification to lex() and yacc() to allow their table files
+ to be loaded from a previously loaded module. This might make
+ it easier to load the parsing tables from a complicated package
+ structure. For example:
+
+ import foo.bar.spam.parsetab as parsetab
+ parser = yacc.yacc(tabmodule=parsetab)
+
+ Note: lex and yacc will never regenerate the table file if used
+ in the form---you will get a warning message instead.
+ This idea suggested by Brian Clapper.
+
+
+04/28/08: beazley
+ Fixed a big with p_error() functions being picked up correctly
+ when running in yacc(optimize=1) mode. Patch contributed by
+ Bart Whiteley.
+
+02/28/08: beazley
+ Fixed a bug with 'nonassoc' precedence rules. Basically the
+ non-precedence was being ignored and not producing the correct
+ run-time behavior in the parser.
+
+02/16/08: beazley
+ Slight relaxation of what the input() method to a lexer will
+ accept as a string. Instead of testing the input to see
+ if the input is a string or unicode string, it checks to see
+ if the input object looks like it contains string data.
+ This change makes it possible to pass string-like objects
+ in as input. For example, the object returned by mmap.
+
+ import mmap, os
+ data = mmap.mmap(os.open(filename,os.O_RDONLY),
+ os.path.getsize(filename),
+ access=mmap.ACCESS_READ)
+ lexer.input(data)
+
+
+11/29/07: beazley
+ Modification of ply.lex to allow token functions to aliased.
+ This is subtle, but it makes it easier to create libraries and
+ to reuse token specifications. For example, suppose you defined
+ a function like this:
+
+ def number(t):
+ r'\d+'
+ t.value = int(t.value)
+ return t
+
+ This change would allow you to define a token rule as follows:
+
+ t_NUMBER = number
+
+ In this case, the token type will be set to 'NUMBER' and use
+ the associated number() function to process tokens.
+
+11/28/07: beazley
+ Slight modification to lex and yacc to grab symbols from both
+ the local and global dictionaries of the caller. This
+ modification allows lexers and parsers to be defined using
+ inner functions and closures.
+
+11/28/07: beazley
+ Performance optimization: The lexer.lexmatch and t.lexer
+ attributes are no longer set for lexer tokens that are not
+ defined by functions. The only normal use of these attributes
+ would be in lexer rules that need to perform some kind of
+ special processing. Thus, it doesn't make any sense to set
+ them on every token.
+
+ *** POTENTIAL INCOMPATIBILITY *** This might break code
+ that is mucking around with internal lexer state in some
+ sort of magical way.
+
+11/27/07: beazley
+ Added the ability to put the parser into error-handling mode
+ from within a normal production. To do this, simply raise
+ a yacc.SyntaxError exception like this:
+
+ def p_some_production(p):
+ 'some_production : prod1 prod2'
+ ...
+ raise yacc.SyntaxError # Signal an error
+
+ A number of things happen after this occurs:
+
+ - The last symbol shifted onto the symbol stack is discarded
+ and parser state backed up to what it was before the
+ the rule reduction.
+
+ - The current lookahead symbol is saved and replaced by
+ the 'error' symbol.
+
+ - The parser enters error recovery mode where it tries
+ to either reduce the 'error' rule or it starts
+ discarding items off of the stack until the parser
+ resets.
+
+ When an error is manually set, the parser does *not* call
+ the p_error() function (if any is defined).
+ *** NEW FEATURE *** Suggested on the mailing list
+
+11/27/07: beazley
+ Fixed structure bug in examples/ansic. Reported by Dion Blazakis.
+
+11/27/07: beazley
+ Fixed a bug in the lexer related to start conditions and ignored
+ token rules. If a rule was defined that changed state, but
+ returned no token, the lexer could be left in an inconsistent
+ state. Reported by
+
+11/27/07: beazley
+ Modified setup.py to support Python Eggs. Patch contributed by
+ Simon Cross.
+
+11/09/07: beazely
+ Fixed a bug in error handling in yacc. If a syntax error occurred and the
+ parser rolled the entire parse stack back, the parser would be left in in
+ inconsistent state that would cause it to trigger incorrect actions on
+ subsequent input. Reported by Ton Biegstraaten, Justin King, and others.
+
+11/09/07: beazley
+ Fixed a bug when passing empty input strings to yacc.parse(). This
+ would result in an error message about "No input given". Reported
+ by Andrew Dalke.
+
+Version 2.3
+-----------------------------
+02/20/07: beazley
+ Fixed a bug with character literals if the literal '.' appeared as the
+ last symbol of a grammar rule. Reported by Ales Smrcka.
+
+02/19/07: beazley
+ Warning messages are now redirected to stderr instead of being printed
+ to standard output.
+
+02/19/07: beazley
+ Added a warning message to lex.py if it detects a literal backslash
+ character inside the t_ignore declaration. This is to help
+ problems that might occur if someone accidentally defines t_ignore
+ as a Python raw string. For example:
+
+ t_ignore = r' \t'
+
+ The idea for this is from an email I received from David Cimimi who
+ reported bizarre behavior in lexing as a result of defining t_ignore
+ as a raw string by accident.
+
+02/18/07: beazley
+ Performance improvements. Made some changes to the internal
+ table organization and LR parser to improve parsing performance.
+
+02/18/07: beazley
+ Automatic tracking of line number and position information must now be
+ enabled by a special flag to parse(). For example:
+
+ yacc.parse(data,tracking=True)
+
+ In many applications, it's just not that important to have the
+ parser automatically track all line numbers. By making this an
+ optional feature, it allows the parser to run significantly faster
+ (more than a 20% speed increase in many cases). Note: positional
+ information is always available for raw tokens---this change only
+ applies to positional information associated with nonterminal
+ grammar symbols.
+ *** POTENTIAL INCOMPATIBILITY ***
+
+02/18/07: beazley
+ Yacc no longer supports extended slices of grammar productions.
+ However, it does support regular slices. For example:
+
+ def p_foo(p):
+ '''foo: a b c d e'''
+ p[0] = p[1:3]
+
+ This change is a performance improvement to the parser--it streamlines
+ normal access to the grammar values since slices are now handled in
+ a __getslice__() method as opposed to __getitem__().
+
+02/12/07: beazley
+ Fixed a bug in the handling of token names when combined with
+ start conditions. Bug reported by Todd O'Bryan.
+
+Version 2.2
+------------------------------
+11/01/06: beazley
+ Added lexpos() and lexspan() methods to grammar symbols. These
+ mirror the same functionality of lineno() and linespan(). For
+ example:
+
+ def p_expr(p):
+ 'expr : expr PLUS expr'
+ p.lexpos(1) # Lexing position of left-hand-expression
+ p.lexpos(1) # Lexing position of PLUS
+ start,end = p.lexspan(3) # Lexing range of right hand expression
+
+11/01/06: beazley
+ Minor change to error handling. The recommended way to skip characters
+ in the input is to use t.lexer.skip() as shown here:
+
+ def t_error(t):
+ print "Illegal character '%s'" % t.value[0]
+ t.lexer.skip(1)
+
+ The old approach of just using t.skip(1) will still work, but won't
+ be documented.
+
+10/31/06: beazley
+ Discarded tokens can now be specified as simple strings instead of
+ functions. To do this, simply include the text "ignore_" in the
+ token declaration. For example:
+
+ t_ignore_cppcomment = r'//.*'
+
+ Previously, this had to be done with a function. For example:
+
+ def t_ignore_cppcomment(t):
+ r'//.*'
+ pass
+
+ If start conditions/states are being used, state names should appear
+ before the "ignore_" text.
+
+10/19/06: beazley
+ The Lex module now provides support for flex-style start conditions
+ as described at http://www.gnu.org/software/flex/manual/html_chapter/flex_11.html.
+ Please refer to this document to understand this change note. Refer to
+ the PLY documentation for PLY-specific explanation of how this works.
+
+ To use start conditions, you first need to declare a set of states in
+ your lexer file:
+
+ states = (
+ ('foo','exclusive'),
+ ('bar','inclusive')
+ )
+
+ This serves the same role as the %s and %x specifiers in flex.
+
+ One a state has been declared, tokens for that state can be
+ declared by defining rules of the form t_state_TOK. For example:
+
+ t_PLUS = '\+' # Rule defined in INITIAL state
+ t_foo_NUM = '\d+' # Rule defined in foo state
+ t_bar_NUM = '\d+' # Rule defined in bar state
+
+ t_foo_bar_NUM = '\d+' # Rule defined in both foo and bar
+ t_ANY_NUM = '\d+' # Rule defined in all states
+
+ In addition to defining tokens for each state, the t_ignore and t_error
+ specifications can be customized for specific states. For example:
+
+ t_foo_ignore = " " # Ignored characters for foo state
+ def t_bar_error(t):
+ # Handle errors in bar state
+
+ With token rules, the following methods can be used to change states
+
+ def t_TOKNAME(t):
+ t.lexer.begin('foo') # Begin state 'foo'
+ t.lexer.push_state('foo') # Begin state 'foo', push old state
+ # onto a stack
+ t.lexer.pop_state() # Restore previous state
+ t.lexer.current_state() # Returns name of current state
+
+ These methods mirror the BEGIN(), yy_push_state(), yy_pop_state(), and
+ yy_top_state() functions in flex.
+
+ The use of start states can be used as one way to write sub-lexers.
+ For example, the lexer or parser might instruct the lexer to start
+ generating a different set of tokens depending on the context.
+
+ example/yply/ylex.py shows the use of start states to grab C/C++
+ code fragments out of traditional yacc specification files.
+
+ *** NEW FEATURE *** Suggested by Daniel Larraz with whom I also
+ discussed various aspects of the design.
+
+10/19/06: beazley
+ Minor change to the way in which yacc.py was reporting shift/reduce
+ conflicts. Although the underlying LALR(1) algorithm was correct,
+ PLY was under-reporting the number of conflicts compared to yacc/bison
+ when precedence rules were in effect. This change should make PLY
+ report the same number of conflicts as yacc.
+
+10/19/06: beazley
+ Modified yacc so that grammar rules could also include the '-'
+ character. For example:
+
+ def p_expr_list(p):
+ 'expression-list : expression-list expression'
+
+ Suggested by Oldrich Jedlicka.
+
+10/18/06: beazley
+ Attribute lexer.lexmatch added so that token rules can access the re
+ match object that was generated. For example:
+
+ def t_FOO(t):
+ r'some regex'
+ m = t.lexer.lexmatch
+ # Do something with m
+
+
+ This may be useful if you want to access named groups specified within
+ the regex for a specific token. Suggested by Oldrich Jedlicka.
+
+10/16/06: beazley
+ Changed the error message that results if an illegal character
+ is encountered and no default error function is defined in lex.
+ The exception is now more informative about the actual cause of
+ the error.
+
+Version 2.1
+------------------------------
+10/02/06: beazley
+ The last Lexer object built by lex() can be found in lex.lexer.
+ The last Parser object built by yacc() can be found in yacc.parser.
+
+10/02/06: beazley
+ New example added: examples/yply
+
+ This example uses PLY to convert Unix-yacc specification files to
+ PLY programs with the same grammar. This may be useful if you
+ want to convert a grammar from bison/yacc to use with PLY.
+
+10/02/06: beazley
+ Added support for a start symbol to be specified in the yacc
+ input file itself. Just do this:
+
+ start = 'name'
+
+ where 'name' matches some grammar rule. For example:
+
+ def p_name(p):
+ 'name : A B C'
+ ...
+
+ This mirrors the functionality of the yacc %start specifier.
+
+09/30/06: beazley
+ Some new examples added.:
+
+ examples/GardenSnake : A simple indentation based language similar
+ to Python. Shows how you might handle
+ whitespace. Contributed by Andrew Dalke.
+
+ examples/BASIC : An implementation of 1964 Dartmouth BASIC.
+ Contributed by Dave against his better
+ judgement.
+
+09/28/06: beazley
+ Minor patch to allow named groups to be used in lex regular
+ expression rules. For example:
+
+ t_QSTRING = r'''(?P<quote>['"]).*?(?P=quote)'''
+
+ Patch submitted by Adam Ring.
+
+09/28/06: beazley
+ LALR(1) is now the default parsing method. To use SLR, use
+ yacc.yacc(method="SLR"). Note: there is no performance impact
+ on parsing when using LALR(1) instead of SLR. However, constructing
+ the parsing tables will take a little longer.
+
+09/26/06: beazley
+ Change to line number tracking. To modify line numbers, modify
+ the line number of the lexer itself. For example:
+
+ def t_NEWLINE(t):
+ r'\n'
+ t.lexer.lineno += 1
+
+ This modification is both cleanup and a performance optimization.
+ In past versions, lex was monitoring every token for changes in
+ the line number. This extra processing is unnecessary for a vast
+ majority of tokens. Thus, this new approach cleans it up a bit.
+
+ *** POTENTIAL INCOMPATIBILITY ***
+ You will need to change code in your lexer that updates the line
+ number. For example, "t.lineno += 1" becomes "t.lexer.lineno += 1"
+
+09/26/06: beazley
+ Added the lexing position to tokens as an attribute lexpos. This
+ is the raw index into the input text at which a token appears.
+ This information can be used to compute column numbers and other
+ details (e.g., scan backwards from lexpos to the first newline
+ to get a column position).
+
+09/25/06: beazley
+ Changed the name of the __copy__() method on the Lexer class
+ to clone(). This is used to clone a Lexer object (e.g., if
+ you're running different lexers at the same time).
+
+09/21/06: beazley
+ Limitations related to the use of the re module have been eliminated.
+ Several users reported problems with regular expressions exceeding
+ more than 100 named groups. To solve this, lex.py is now capable
+ of automatically splitting its master regular regular expression into
+ smaller expressions as needed. This should, in theory, make it
+ possible to specify an arbitrarily large number of tokens.
+
+09/21/06: beazley
+ Improved error checking in lex.py. Rules that match the empty string
+ are now rejected (otherwise they cause the lexer to enter an infinite
+ loop). An extra check for rules containing '#' has also been added.
+ Since lex compiles regular expressions in verbose mode, '#' is interpreted
+ as a regex comment, it is critical to use '\#' instead.
+
+09/18/06: beazley
+ Added a @TOKEN decorator function to lex.py that can be used to
+ define token rules where the documentation string might be computed
+ in some way.
+
+ digit = r'([0-9])'
+ nondigit = r'([_A-Za-z])'
+ identifier = r'(' + nondigit + r'(' + digit + r'|' + nondigit + r')*)'
+
+ from ply.lex import TOKEN
+
+ @TOKEN(identifier)
+ def t_ID(t):
+ # Do whatever
+
+ The @TOKEN decorator merely sets the documentation string of the
+ associated token function as needed for lex to work.
+
+ Note: An alternative solution is the following:
+
+ def t_ID(t):
+ # Do whatever
+
+ t_ID.__doc__ = identifier
+
+ Note: Decorators require the use of Python 2.4 or later. If compatibility
+ with old versions is needed, use the latter solution.
+
+ The need for this feature was suggested by Cem Karan.
+
+09/14/06: beazley
+ Support for single-character literal tokens has been added to yacc.
+ These literals must be enclosed in quotes. For example:
+
+ def p_expr(p):
+ "expr : expr '+' expr"
+ ...
+
+ def p_expr(p):
+ 'expr : expr "-" expr'
+ ...
+
+ In addition to this, it is necessary to tell the lexer module about
+ literal characters. This is done by defining the variable 'literals'
+ as a list of characters. This should be defined in the module that
+ invokes the lex.lex() function. For example:
+
+ literals = ['+','-','*','/','(',')','=']
+
+ or simply
+
+ literals = '+=*/()='
+
+ It is important to note that literals can only be a single character.
+ When the lexer fails to match a token using its normal regular expression
+ rules, it will check the current character against the literal list.
+ If found, it will be returned with a token type set to match the literal
+ character. Otherwise, an illegal character will be signalled.
+
+
+09/14/06: beazley
+ Modified PLY to install itself as a proper Python package called 'ply'.
+ This will make it a little more friendly to other modules. This
+ changes the usage of PLY only slightly. Just do this to import the
+ modules
+
+ import ply.lex as lex
+ import ply.yacc as yacc
+
+ Alternatively, you can do this:
+
+ from ply import *
+
+ Which imports both the lex and yacc modules.
+ Change suggested by Lee June.
+
+09/13/06: beazley
+ Changed the handling of negative indices when used in production rules.
+ A negative production index now accesses already parsed symbols on the
+ parsing stack. For example,
+
+ def p_foo(p):
+ "foo: A B C D"
+ print p[1] # Value of 'A' symbol
+ print p[2] # Value of 'B' symbol
+ print p[-1] # Value of whatever symbol appears before A
+ # on the parsing stack.
+
+ p[0] = some_val # Sets the value of the 'foo' grammer symbol
+
+ This behavior makes it easier to work with embedded actions within the
+ parsing rules. For example, in C-yacc, it is possible to write code like
+ this:
+
+ bar: A { printf("seen an A = %d\n", $1); } B { do_stuff; }
+
+ In this example, the printf() code executes immediately after A has been
+ parsed. Within the embedded action code, $1 refers to the A symbol on
+ the stack.
+
+ To perform this equivalent action in PLY, you need to write a pair
+ of rules like this:
+
+ def p_bar(p):
+ "bar : A seen_A B"
+ do_stuff
+
+ def p_seen_A(p):
+ "seen_A :"
+ print "seen an A =", p[-1]
+
+ The second rule "seen_A" is merely a empty production which should be
+ reduced as soon as A is parsed in the "bar" rule above. The use
+ of the negative index p[-1] is used to access whatever symbol appeared
+ before the seen_A symbol.
+
+ This feature also makes it possible to support inherited attributes.
+ For example:
+
+ def p_decl(p):
+ "decl : scope name"
+
+ def p_scope(p):
+ """scope : GLOBAL
+ | LOCAL"""
+ p[0] = p[1]
+
+ def p_name(p):
+ "name : ID"
+ if p[-1] == "GLOBAL":
+ # ...
+ else if p[-1] == "LOCAL":
+ #...
+
+ In this case, the name rule is inheriting an attribute from the
+ scope declaration that precedes it.
+
+ *** POTENTIAL INCOMPATIBILITY ***
+ If you are currently using negative indices within existing grammar rules,
+ your code will break. This should be extremely rare if non-existent in
+ most cases. The argument to various grammar rules is not usually not
+ processed in the same way as a list of items.
+
+Version 2.0
+------------------------------
+09/07/06: beazley
+ Major cleanup and refactoring of the LR table generation code. Both SLR
+ and LALR(1) table generation is now performed by the same code base with
+ only minor extensions for extra LALR(1) processing.
+
+09/07/06: beazley
+ Completely reimplemented the entire LALR(1) parsing engine to use the
+ DeRemer and Pennello algorithm for calculating lookahead sets. This
+ significantly improves the performance of generating LALR(1) tables
+ and has the added feature of actually working correctly! If you
+ experienced weird behavior with LALR(1) in prior releases, this should
+ hopefully resolve all of those problems. Many thanks to
+ Andrew Waters and Markus Schoepflin for submitting bug reports
+ and helping me test out the revised LALR(1) support.
+
+Version 1.8
+------------------------------
+08/02/06: beazley
+ Fixed a problem related to the handling of default actions in LALR(1)
+ parsing. If you experienced subtle and/or bizarre behavior when trying
+ to use the LALR(1) engine, this may correct those problems. Patch
+ contributed by Russ Cox. Note: This patch has been superceded by
+ revisions for LALR(1) parsing in Ply-2.0.
+
+08/02/06: beazley
+ Added support for slicing of productions in yacc.
+ Patch contributed by Patrick Mezard.
+
+Version 1.7
+------------------------------
+03/02/06: beazley
+ Fixed infinite recursion problem ReduceToTerminals() function that
+ would sometimes come up in LALR(1) table generation. Reported by
+ Markus Schoepflin.
+
+03/01/06: beazley
+ Added "reflags" argument to lex(). For example:
+
+ lex.lex(reflags=re.UNICODE)
+
+ This can be used to specify optional flags to the re.compile() function
+ used inside the lexer. This may be necessary for special situations such
+ as processing Unicode (e.g., if you want escapes like \w and \b to consult
+ the Unicode character property database). The need for this suggested by
+ Andreas Jung.
+
+03/01/06: beazley
+ Fixed a bug with an uninitialized variable on repeated instantiations of parser
+ objects when the write_tables=0 argument was used. Reported by Michael Brown.
+
+03/01/06: beazley
+ Modified lex.py to accept Unicode strings both as the regular expressions for
+ tokens and as input. Hopefully this is the only change needed for Unicode support.
+ Patch contributed by Johan Dahl.
+
+03/01/06: beazley
+ Modified the class-based interface to work with new-style or old-style classes.
+ Patch contributed by Michael Brown (although I tweaked it slightly so it would work
+ with older versions of Python).
+
+Version 1.6
+------------------------------
+05/27/05: beazley
+ Incorporated patch contributed by Christopher Stawarz to fix an extremely
+ devious bug in LALR(1) parser generation. This patch should fix problems
+ numerous people reported with LALR parsing.
+
+05/27/05: beazley
+ Fixed problem with lex.py copy constructor. Reported by Dave Aitel, Aaron Lav,
+ and Thad Austin.
+
+05/27/05: beazley
+ Added outputdir option to yacc() to control output directory. Contributed
+ by Christopher Stawarz.
+
+05/27/05: beazley
+ Added rununit.py test script to run tests using the Python unittest module.
+ Contributed by Miki Tebeka.
+
+Version 1.5
+------------------------------
+05/26/04: beazley
+ Major enhancement. LALR(1) parsing support is now working.
+ This feature was implemented by Elias Ioup (ezioup@alumni.uchicago.edu)
+ and optimized by David Beazley. To use LALR(1) parsing do
+ the following:
+
+ yacc.yacc(method="LALR")
+
+ Computing LALR(1) parsing tables takes about twice as long as
+ the default SLR method. However, LALR(1) allows you to handle
+ more complex grammars. For example, the ANSI C grammar
+ (in example/ansic) has 13 shift-reduce conflicts with SLR, but
+ only has 1 shift-reduce conflict with LALR(1).
+
+05/20/04: beazley
+ Added a __len__ method to parser production lists. Can
+ be used in parser rules like this:
+
+ def p_somerule(p):
+ """a : B C D
+ | E F"
+ if (len(p) == 3):
+ # Must have been first rule
+ elif (len(p) == 2):
+ # Must be second rule
+
+ Suggested by Joshua Gerth and others.
+
+Version 1.4
+------------------------------
+04/23/04: beazley
+ Incorporated a variety of patches contributed by Eric Raymond.
+ These include:
+
+ 0. Cleans up some comments so they don't wrap on an 80-column display.
+ 1. Directs compiler errors to stderr where they belong.
+ 2. Implements and documents automatic line counting when \n is ignored.
+ 3. Changes the way progress messages are dumped when debugging is on.
+ The new format is both less verbose and conveys more information than
+ the old, including shift and reduce actions.
+
+04/23/04: beazley
+ Added a Python setup.py file to simply installation. Contributed
+ by Adam Kerrison.
+
+04/23/04: beazley
+ Added patches contributed by Adam Kerrison.
+
+ - Some output is now only shown when debugging is enabled. This
+ means that PLY will be completely silent when not in debugging mode.
+
+ - An optional parameter "write_tables" can be passed to yacc() to
+ control whether or not parsing tables are written. By default,
+ it is true, but it can be turned off if you don't want the yacc
+ table file. Note: disabling this will cause yacc() to regenerate
+ the parsing table each time.
+
+04/23/04: beazley
+ Added patches contributed by David McNab. This patch addes two
+ features:
+
+ - The parser can be supplied as a class instead of a module.
+ For an example of this, see the example/classcalc directory.
+
+ - Debugging output can be directed to a filename of the user's
+ choice. Use
+
+ yacc(debugfile="somefile.out")
+
+
+Version 1.3
+------------------------------
+12/10/02: jmdyck
+ Various minor adjustments to the code that Dave checked in today.
+ Updated test/yacc_{inf,unused}.exp to reflect today's changes.
+
+12/10/02: beazley
+ Incorporated a variety of minor bug fixes to empty production
+ handling and infinite recursion checking. Contributed by
+ Michael Dyck.
+
+12/10/02: beazley
+ Removed bogus recover() method call in yacc.restart()
+
+Version 1.2
+------------------------------
+11/27/02: beazley
+ Lexer and parser objects are now available as an attribute
+ of tokens and slices respectively. For example:
+
+ def t_NUMBER(t):
+ r'\d+'
+ print t.lexer
+
+ def p_expr_plus(t):
+ 'expr: expr PLUS expr'
+ print t.lexer
+ print t.parser
+
+ This can be used for state management (if needed).
+
+10/31/02: beazley
+ Modified yacc.py to work with Python optimize mode. To make
+ this work, you need to use
+
+ yacc.yacc(optimize=1)
+
+ Furthermore, you need to first run Python in normal mode
+ to generate the necessary parsetab.py files. After that,
+ you can use python -O or python -OO.
+
+ Note: optimized mode turns off a lot of error checking.
+ Only use when you are sure that your grammar is working.
+ Make sure parsetab.py is up to date!
+
+10/30/02: beazley
+ Added cloning of Lexer objects. For example:
+
+ import copy
+ l = lex.lex()
+ lc = copy.copy(l)
+
+ l.input("Some text")
+ lc.input("Some other text")
+ ...
+
+ This might be useful if the same "lexer" is meant to
+ be used in different contexts---or if multiple lexers
+ are running concurrently.
+
+10/30/02: beazley
+ Fixed subtle bug with first set computation and empty productions.
+ Patch submitted by Michael Dyck.
+
+10/30/02: beazley
+ Fixed error messages to use "filename:line: message" instead
+ of "filename:line. message". This makes error reporting more
+ friendly to emacs. Patch submitted by Franois Pinard.
+
+10/30/02: beazley
+ Improvements to parser.out file. Terminals and nonterminals
+ are sorted instead of being printed in random order.
+ Patch submitted by Franois Pinard.
+
+10/30/02: beazley
+ Improvements to parser.out file output. Rules are now printed
+ in a way that's easier to understand. Contributed by Russ Cox.
+
+10/30/02: beazley
+ Added 'nonassoc' associativity support. This can be used
+ to disable the chaining of operators like a < b < c.
+ To use, simply specify 'nonassoc' in the precedence table
+
+ precedence = (
+ ('nonassoc', 'LESSTHAN', 'GREATERTHAN'), # Nonassociative operators
+ ('left', 'PLUS', 'MINUS'),
+ ('left', 'TIMES', 'DIVIDE'),
+ ('right', 'UMINUS'), # Unary minus operator
+ )
+
+ Patch contributed by Russ Cox.
+
+10/30/02: beazley
+ Modified the lexer to provide optional support for Python -O and -OO
+ modes. To make this work, Python *first* needs to be run in
+ unoptimized mode. This reads the lexing information and creates a
+ file "lextab.py". Then, run lex like this:
+
+ # module foo.py
+ ...
+ ...
+ lex.lex(optimize=1)
+
+ Once the lextab file has been created, subsequent calls to
+ lex.lex() will read data from the lextab file instead of using
+ introspection. In optimized mode (-O, -OO) everything should
+ work normally despite the loss of doc strings.
+
+ To change the name of the file 'lextab.py' use the following:
+
+ lex.lex(lextab="footab")
+
+ (this creates a file footab.py)
+
+
+Version 1.1 October 25, 2001
+------------------------------
+
+10/25/01: beazley
+ Modified the table generator to produce much more compact data.
+ This should greatly reduce the size of the parsetab.py[c] file.
+ Caveat: the tables still need to be constructed so a little more
+ work is done in parsetab on import.
+
+10/25/01: beazley
+ There may be a possible bug in the cycle detector that reports errors
+ about infinite recursion. I'm having a little trouble tracking it
+ down, but if you get this problem, you can disable the cycle
+ detector as follows:
+
+ yacc.yacc(check_recursion = 0)
+
+10/25/01: beazley
+ Fixed a bug in lex.py that sometimes caused illegal characters to be
+ reported incorrectly. Reported by Sverre Jrgensen.
+
+7/8/01 : beazley
+ Added a reference to the underlying lexer object when tokens are handled by
+ functions. The lexer is available as the 'lexer' attribute. This
+ was added to provide better lexing support for languages such as Fortran
+ where certain types of tokens can't be conveniently expressed as regular
+ expressions (and where the tokenizing function may want to perform a
+ little backtracking). Suggested by Pearu Peterson.
+
+6/20/01 : beazley
+ Modified yacc() function so that an optional starting symbol can be specified.
+ For example:
+
+ yacc.yacc(start="statement")
+
+ Normally yacc always treats the first production rule as the starting symbol.
+ However, if you are debugging your grammar it may be useful to specify
+ an alternative starting symbol. Idea suggested by Rich Salz.
+
+Version 1.0 June 18, 2001
+--------------------------
+Initial public offering
+
diff --git a/third_party/python/ply/MANIFEST.in b/third_party/python/ply/MANIFEST.in
new file mode 100644
index 0000000000..0d37431b0b
--- /dev/null
+++ b/third_party/python/ply/MANIFEST.in
@@ -0,0 +1,8 @@
+recursive-include example *
+recursive-include doc *
+recursive-include test *
+include ANNOUNCE
+include README.md
+include CHANGES
+include TODO
+global-exclude *.pyc
diff --git a/third_party/python/ply/PKG-INFO b/third_party/python/ply/PKG-INFO
new file mode 100644
index 0000000000..6eedf42595
--- /dev/null
+++ b/third_party/python/ply/PKG-INFO
@@ -0,0 +1,22 @@
+Metadata-Version: 1.1
+Name: ply
+Version: 3.10
+Summary: Python Lex & Yacc
+Home-page: http://www.dabeaz.com/ply/
+Author: David Beazley
+Author-email: dave@dabeaz.com
+License: BSD
+Description:
+ PLY is yet another implementation of lex and yacc for Python. Some notable
+ features include the fact that its implemented entirely in Python and it
+ uses LALR(1) parsing which is efficient and well suited for larger grammars.
+
+ PLY provides most of the standard lex/yacc features including support for empty
+ productions, precedence rules, error recovery, and support for ambiguous grammars.
+
+ PLY is extremely easy to use and provides very extensive error checking.
+ It is compatible with both Python 2 and Python 3.
+
+Platform: UNKNOWN
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 2
diff --git a/third_party/python/ply/README.md b/third_party/python/ply/README.md
new file mode 100644
index 0000000000..e428f1b14a
--- /dev/null
+++ b/third_party/python/ply/README.md
@@ -0,0 +1,273 @@
+PLY (Python Lex-Yacc) Version 3.10
+
+Copyright (C) 2001-2017
+David M. Beazley (Dabeaz LLC)
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+* Neither the name of the David Beazley or Dabeaz LLC may be used to
+ endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Introduction
+============
+
+PLY is a 100% Python implementation of the common parsing tools lex
+and yacc. Here are a few highlights:
+
+ - PLY is very closely modeled after traditional lex/yacc.
+ If you know how to use these tools in C, you will find PLY
+ to be similar.
+
+ - PLY provides *very* extensive error reporting and diagnostic
+ information to assist in parser construction. The original
+ implementation was developed for instructional purposes. As
+ a result, the system tries to identify the most common types
+ of errors made by novice users.
+
+ - PLY provides full support for empty productions, error recovery,
+ precedence specifiers, and moderately ambiguous grammars.
+
+ - Parsing is based on LR-parsing which is fast, memory efficient,
+ better suited to large grammars, and which has a number of nice
+ properties when dealing with syntax errors and other parsing problems.
+ Currently, PLY builds its parsing tables using the LALR(1)
+ algorithm used in yacc.
+
+ - PLY uses Python introspection features to build lexers and parsers.
+ This greatly simplifies the task of parser construction since it reduces
+ the number of files and eliminates the need to run a separate lex/yacc
+ tool before running your program.
+
+ - PLY can be used to build parsers for "real" programming languages.
+ Although it is not ultra-fast due to its Python implementation,
+ PLY can be used to parse grammars consisting of several hundred
+ rules (as might be found for a language like C). The lexer and LR
+ parser are also reasonably efficient when parsing typically
+ sized programs. People have used PLY to build parsers for
+ C, C++, ADA, and other real programming languages.
+
+How to Use
+==========
+
+PLY consists of two files : lex.py and yacc.py. These are contained
+within the 'ply' directory which may also be used as a Python package.
+To use PLY, simply copy the 'ply' directory to your project and import
+lex and yacc from the associated 'ply' package. For example:
+
+ import ply.lex as lex
+ import ply.yacc as yacc
+
+Alternatively, you can copy just the files lex.py and yacc.py
+individually and use them as modules. For example:
+
+ import lex
+ import yacc
+
+The file setup.py can be used to install ply using distutils.
+
+The file doc/ply.html contains complete documentation on how to use
+the system.
+
+The example directory contains several different examples including a
+PLY specification for ANSI C as given in K&R 2nd Ed.
+
+A simple example is found at the end of this document
+
+Requirements
+============
+PLY requires the use of Python 2.6 or greater. However, you should
+use the latest Python release if possible. It should work on just
+about any platform. PLY has been tested with both CPython and Jython.
+It also seems to work with IronPython.
+
+Resources
+=========
+More information about PLY can be obtained on the PLY webpage at:
+
+ http://www.dabeaz.com/ply
+
+For a detailed overview of parsing theory, consult the excellent
+book "Compilers : Principles, Techniques, and Tools" by Aho, Sethi, and
+Ullman. The topics found in "Lex & Yacc" by Levine, Mason, and Brown
+may also be useful.
+
+The GitHub page for PLY can be found at:
+
+ https://github.com/dabeaz/ply
+
+An old and relatively inactive discussion group for PLY is found at:
+
+ http://groups.google.com/group/ply-hack
+
+Acknowledgments
+===============
+A special thanks is in order for all of the students in CS326 who
+suffered through about 25 different versions of these tools :-).
+
+The CHANGES file acknowledges those who have contributed patches.
+
+Elias Ioup did the first implementation of LALR(1) parsing in PLY-1.x.
+Andrew Waters and Markus Schoepflin were instrumental in reporting bugs
+and testing a revised LALR(1) implementation for PLY-2.0.
+
+Special Note for PLY-3.0
+========================
+PLY-3.0 the first PLY release to support Python 3. However, backwards
+compatibility with Python 2.6 is still preserved. PLY provides dual
+Python 2/3 compatibility by restricting its implementation to a common
+subset of basic language features. You should not convert PLY using
+2to3--it is not necessary and may in fact break the implementation.
+
+Example
+=======
+
+Here is a simple example showing a PLY implementation of a calculator
+with variables.
+
+ # -----------------------------------------------------------------------------
+ # calc.py
+ #
+ # A simple calculator with variables.
+ # -----------------------------------------------------------------------------
+
+ tokens = (
+ 'NAME','NUMBER',
+ 'PLUS','MINUS','TIMES','DIVIDE','EQUALS',
+ 'LPAREN','RPAREN',
+ )
+
+ # Tokens
+
+ t_PLUS = r'\+'
+ t_MINUS = r'-'
+ t_TIMES = r'\*'
+ t_DIVIDE = r'/'
+ t_EQUALS = r'='
+ t_LPAREN = r'\('
+ t_RPAREN = r'\)'
+ t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
+
+ def t_NUMBER(t):
+ r'\d+'
+ t.value = int(t.value)
+ return t
+
+ # Ignored characters
+ t_ignore = " \t"
+
+ def t_newline(t):
+ r'\n+'
+ t.lexer.lineno += t.value.count("\n")
+
+ def t_error(t):
+ print("Illegal character '%s'" % t.value[0])
+ t.lexer.skip(1)
+
+ # Build the lexer
+ import ply.lex as lex
+ lex.lex()
+
+ # Precedence rules for the arithmetic operators
+ precedence = (
+ ('left','PLUS','MINUS'),
+ ('left','TIMES','DIVIDE'),
+ ('right','UMINUS'),
+ )
+
+ # dictionary of names (for storing variables)
+ names = { }
+
+ def p_statement_assign(p):
+ 'statement : NAME EQUALS expression'
+ names[p[1]] = p[3]
+
+ def p_statement_expr(p):
+ 'statement : expression'
+ print(p[1])
+
+ def p_expression_binop(p):
+ '''expression : expression PLUS expression
+ | expression MINUS expression
+ | expression TIMES expression
+ | expression DIVIDE expression'''
+ if p[2] == '+' : p[0] = p[1] + p[3]
+ elif p[2] == '-': p[0] = p[1] - p[3]
+ elif p[2] == '*': p[0] = p[1] * p[3]
+ elif p[2] == '/': p[0] = p[1] / p[3]
+
+ def p_expression_uminus(p):
+ 'expression : MINUS expression %prec UMINUS'
+ p[0] = -p[2]
+
+ def p_expression_group(p):
+ 'expression : LPAREN expression RPAREN'
+ p[0] = p[2]
+
+ def p_expression_number(p):
+ 'expression : NUMBER'
+ p[0] = p[1]
+
+ def p_expression_name(p):
+ 'expression : NAME'
+ try:
+ p[0] = names[p[1]]
+ except LookupError:
+ print("Undefined name '%s'" % p[1])
+ p[0] = 0
+
+ def p_error(p):
+ print("Syntax error at '%s'" % p.value)
+
+ import ply.yacc as yacc
+ yacc.yacc()
+
+ while True:
+ try:
+ s = raw_input('calc > ') # use input() on Python 3
+ except EOFError:
+ break
+ yacc.parse(s)
+
+
+Bug Reports and Patches
+=======================
+My goal with PLY is to simply have a decent lex/yacc implementation
+for Python. As a general rule, I don't spend huge amounts of time
+working on it unless I receive very specific bug reports and/or
+patches to fix problems. I also try to incorporate submitted feature
+requests and enhancements into each new version. Please visit the PLY
+github page at https://github.com/dabeaz/ply to submit issues and pull
+requests. To contact me about bugs and/or new features, please send
+email to dave@dabeaz.com.
+
+-- Dave
+
+
+
+
+
+
+
+
+
diff --git a/third_party/python/ply/TODO b/third_party/python/ply/TODO
new file mode 100644
index 0000000000..f4800aacf4
--- /dev/null
+++ b/third_party/python/ply/TODO
@@ -0,0 +1,16 @@
+The PLY to-do list:
+
+1. Finish writing the C Preprocessor module. Started in the
+ file ply/cpp.py
+
+2. Create and document libraries of useful tokens.
+
+3. Expand the examples/yply tool that parses bison/yacc
+ files.
+
+4. Think of various diabolical things to do with the
+ new yacc internals. For example, it is now possible
+ to specify grammrs using completely different schemes
+ than the reflection approach used by PLY.
+
+
diff --git a/third_party/python/ply/example/BASIC/README b/third_party/python/ply/example/BASIC/README
new file mode 100644
index 0000000000..be24a3005e
--- /dev/null
+++ b/third_party/python/ply/example/BASIC/README
@@ -0,0 +1,79 @@
+Inspired by a September 14, 2006 Salon article "Why Johnny Can't Code" by
+David Brin (http://www.salon.com/tech/feature/2006/09/14/basic/index.html),
+I thought that a fully working BASIC interpreter might be an interesting,
+if not questionable, PLY example. Uh, okay, so maybe it's just a bad idea,
+but in any case, here it is.
+
+In this example, you'll find a rough implementation of 1964 Dartmouth BASIC
+as described in the manual at:
+
+ http://www.bitsavers.org/pdf/dartmouth/BASIC_Oct64.pdf
+
+See also:
+
+ http://en.wikipedia.org/wiki/Dartmouth_BASIC
+
+This dialect is downright primitive---there are no string variables
+and no facilities for interactive input. Moreover, subroutines and functions
+are brain-dead even more than they usually are for BASIC. Of course,
+the GOTO statement is provided.
+
+Nevertheless, there are a few interesting aspects of this example:
+
+ - It illustrates a fully working interpreter including lexing, parsing,
+ and interpretation of instructions.
+
+ - The parser shows how to catch and report various kinds of parsing
+ errors in a more graceful way.
+
+ - The example both parses files (supplied on command line) and
+ interactive input entered line by line.
+
+ - It shows how you might represent parsed information. In this case,
+ each BASIC statement is encoded into a Python tuple containing the
+ statement type and parameters. These tuples are then stored in
+ a dictionary indexed by program line numbers.
+
+ - Even though it's just BASIC, the parser contains more than 80
+ rules and 150 parsing states. Thus, it's a little more meaty than
+ the calculator example.
+
+To use the example, run it as follows:
+
+ % python basic.py hello.bas
+ HELLO WORLD
+ %
+
+or use it interactively:
+
+ % python basic.py
+ [BASIC] 10 PRINT "HELLO WORLD"
+ [BASIC] 20 END
+ [BASIC] RUN
+ HELLO WORLD
+ [BASIC]
+
+The following files are defined:
+
+ basic.py - High level script that controls everything
+ basiclex.py - BASIC tokenizer
+ basparse.py - BASIC parser
+ basinterp.py - BASIC interpreter that runs parsed programs.
+
+In addition, a number of sample BASIC programs (.bas suffix) are
+provided. These were taken out of the Dartmouth manual.
+
+Disclaimer: I haven't spent a ton of time testing this and it's likely that
+I've skimped here and there on a few finer details (e.g., strictly enforcing
+variable naming rules). However, the interpreter seems to be able to run
+the examples in the BASIC manual.
+
+Have fun!
+
+-Dave
+
+
+
+
+
+
diff --git a/third_party/python/ply/example/BASIC/basic.py b/third_party/python/ply/example/BASIC/basic.py
new file mode 100644
index 0000000000..70ac9e7c74
--- /dev/null
+++ b/third_party/python/ply/example/BASIC/basic.py
@@ -0,0 +1,65 @@
+# An implementation of Dartmouth BASIC (1964)
+#
+
+import sys
+sys.path.insert(0, "../..")
+
+if sys.version_info[0] >= 3:
+ raw_input = input
+
+import basiclex
+import basparse
+import basinterp
+
+# If a filename has been specified, we try to run it.
+# If a runtime error occurs, we bail out and enter
+# interactive mode below
+if len(sys.argv) == 2:
+ data = open(sys.argv[1]).read()
+ prog = basparse.parse(data)
+ if not prog:
+ raise SystemExit
+ b = basinterp.BasicInterpreter(prog)
+ try:
+ b.run()
+ raise SystemExit
+ except RuntimeError:
+ pass
+
+else:
+ b = basinterp.BasicInterpreter({})
+
+# Interactive mode. This incrementally adds/deletes statements
+# from the program stored in the BasicInterpreter object. In
+# addition, special commands 'NEW','LIST',and 'RUN' are added.
+# Specifying a line number with no code deletes that line from
+# the program.
+
+while 1:
+ try:
+ line = raw_input("[BASIC] ")
+ except EOFError:
+ raise SystemExit
+ if not line:
+ continue
+ line += "\n"
+ prog = basparse.parse(line)
+ if not prog:
+ continue
+
+ keys = list(prog)
+ if keys[0] > 0:
+ b.add_statements(prog)
+ else:
+ stat = prog[keys[0]]
+ if stat[0] == 'RUN':
+ try:
+ b.run()
+ except RuntimeError:
+ pass
+ elif stat[0] == 'LIST':
+ b.list()
+ elif stat[0] == 'BLANK':
+ b.del_line(stat[1])
+ elif stat[0] == 'NEW':
+ b.new()
diff --git a/third_party/python/ply/example/BASIC/basiclex.py b/third_party/python/ply/example/BASIC/basiclex.py
new file mode 100644
index 0000000000..4151f4c34f
--- /dev/null
+++ b/third_party/python/ply/example/BASIC/basiclex.py
@@ -0,0 +1,61 @@
+# An implementation of Dartmouth BASIC (1964)
+
+from ply import *
+
+keywords = (
+ 'LET', 'READ', 'DATA', 'PRINT', 'GOTO', 'IF', 'THEN', 'FOR', 'NEXT', 'TO', 'STEP',
+ 'END', 'STOP', 'DEF', 'GOSUB', 'DIM', 'REM', 'RETURN', 'RUN', 'LIST', 'NEW',
+)
+
+tokens = keywords + (
+ 'EQUALS', 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'POWER',
+ 'LPAREN', 'RPAREN', 'LT', 'LE', 'GT', 'GE', 'NE',
+ 'COMMA', 'SEMI', 'INTEGER', 'FLOAT', 'STRING',
+ 'ID', 'NEWLINE'
+)
+
+t_ignore = ' \t'
+
+
+def t_REM(t):
+ r'REM .*'
+ return t
+
+
+def t_ID(t):
+ r'[A-Z][A-Z0-9]*'
+ if t.value in keywords:
+ t.type = t.value
+ return t
+
+t_EQUALS = r'='
+t_PLUS = r'\+'
+t_MINUS = r'-'
+t_TIMES = r'\*'
+t_POWER = r'\^'
+t_DIVIDE = r'/'
+t_LPAREN = r'\('
+t_RPAREN = r'\)'
+t_LT = r'<'
+t_LE = r'<='
+t_GT = r'>'
+t_GE = r'>='
+t_NE = r'<>'
+t_COMMA = r'\,'
+t_SEMI = r';'
+t_INTEGER = r'\d+'
+t_FLOAT = r'((\d*\.\d+)(E[\+-]?\d+)?|([1-9]\d*E[\+-]?\d+))'
+t_STRING = r'\".*?\"'
+
+
+def t_NEWLINE(t):
+ r'\n'
+ t.lexer.lineno += 1
+ return t
+
+
+def t_error(t):
+ print("Illegal character %s" % t.value[0])
+ t.lexer.skip(1)
+
+lex.lex(debug=0)
diff --git a/third_party/python/ply/example/BASIC/basiclog.py b/third_party/python/ply/example/BASIC/basiclog.py
new file mode 100644
index 0000000000..9dcc7feda6
--- /dev/null
+++ b/third_party/python/ply/example/BASIC/basiclog.py
@@ -0,0 +1,73 @@
+# An implementation of Dartmouth BASIC (1964)
+#
+
+import sys
+sys.path.insert(0, "../..")
+
+if sys.version_info[0] >= 3:
+ raw_input = input
+
+import logging
+logging.basicConfig(
+ level=logging.INFO,
+ filename="parselog.txt",
+ filemode="w"
+)
+log = logging.getLogger()
+
+import basiclex
+import basparse
+import basinterp
+
+# If a filename has been specified, we try to run it.
+# If a runtime error occurs, we bail out and enter
+# interactive mode below
+if len(sys.argv) == 2:
+ data = open(sys.argv[1]).read()
+ prog = basparse.parse(data, debug=log)
+ if not prog:
+ raise SystemExit
+ b = basinterp.BasicInterpreter(prog)
+ try:
+ b.run()
+ raise SystemExit
+ except RuntimeError:
+ pass
+
+else:
+ b = basinterp.BasicInterpreter({})
+
+# Interactive mode. This incrementally adds/deletes statements
+# from the program stored in the BasicInterpreter object. In
+# addition, special commands 'NEW','LIST',and 'RUN' are added.
+# Specifying a line number with no code deletes that line from
+# the program.
+
+while 1:
+ try:
+ line = raw_input("[BASIC] ")
+ except EOFError:
+ raise SystemExit
+ if not line:
+ continue
+ line += "\n"
+ prog = basparse.parse(line, debug=log)
+ if not prog:
+ continue
+
+ keys = list(prog)
+ if keys[0] > 0:
+ b.add_statements(prog)
+ else:
+ stat = prog[keys[0]]
+ if stat[0] == 'RUN':
+ try:
+ b.run()
+ except RuntimeError:
+ pass
+ elif stat[0] == 'LIST':
+ b.list()
+ elif stat[0] == 'BLANK':
+ b.del_line(stat[1])
+ elif stat[0] == 'NEW':
+ b.new()
diff --git a/third_party/python/ply/example/BASIC/basinterp.py b/third_party/python/ply/example/BASIC/basinterp.py
new file mode 100644
index 0000000000..67762c797b
--- /dev/null
+++ b/third_party/python/ply/example/BASIC/basinterp.py
@@ -0,0 +1,496 @@
+# This file provides the runtime support for running a basic program
+# Assumes the program has been parsed using basparse.py
+
+import sys
+import math
+import random
+
+
+class BasicInterpreter:
+
+ # Initialize the interpreter. prog is a dictionary
+ # containing (line,statement) mappings
+ def __init__(self, prog):
+ self.prog = prog
+
+ self.functions = { # Built-in function table
+ 'SIN': lambda z: math.sin(self.eval(z)),
+ 'COS': lambda z: math.cos(self.eval(z)),
+ 'TAN': lambda z: math.tan(self.eval(z)),
+ 'ATN': lambda z: math.atan(self.eval(z)),
+ 'EXP': lambda z: math.exp(self.eval(z)),
+ 'ABS': lambda z: abs(self.eval(z)),
+ 'LOG': lambda z: math.log(self.eval(z)),
+ 'SQR': lambda z: math.sqrt(self.eval(z)),
+ 'INT': lambda z: int(self.eval(z)),
+ 'RND': lambda z: random.random()
+ }
+
+ # Collect all data statements
+ def collect_data(self):
+ self.data = []
+ for lineno in self.stat:
+ if self.prog[lineno][0] == 'DATA':
+ self.data = self.data + self.prog[lineno][1]
+ self.dc = 0 # Initialize the data counter
+
+ # Check for end statements
+ def check_end(self):
+ has_end = 0
+ for lineno in self.stat:
+ if self.prog[lineno][0] == 'END' and not has_end:
+ has_end = lineno
+ if not has_end:
+ print("NO END INSTRUCTION")
+ self.error = 1
+ return
+ if has_end != lineno:
+ print("END IS NOT LAST")
+ self.error = 1
+
+ # Check loops
+ def check_loops(self):
+ for pc in range(len(self.stat)):
+ lineno = self.stat[pc]
+ if self.prog[lineno][0] == 'FOR':
+ forinst = self.prog[lineno]
+ loopvar = forinst[1]
+ for i in range(pc + 1, len(self.stat)):
+ if self.prog[self.stat[i]][0] == 'NEXT':
+ nextvar = self.prog[self.stat[i]][1]
+ if nextvar != loopvar:
+ continue
+ self.loopend[pc] = i
+ break
+ else:
+ print("FOR WITHOUT NEXT AT LINE %s" % self.stat[pc])
+ self.error = 1
+
+ # Evaluate an expression
+ def eval(self, expr):
+ etype = expr[0]
+ if etype == 'NUM':
+ return expr[1]
+ elif etype == 'GROUP':
+ return self.eval(expr[1])
+ elif etype == 'UNARY':
+ if expr[1] == '-':
+ return -self.eval(expr[2])
+ elif etype == 'BINOP':
+ if expr[1] == '+':
+ return self.eval(expr[2]) + self.eval(expr[3])
+ elif expr[1] == '-':
+ return self.eval(expr[2]) - self.eval(expr[3])
+ elif expr[1] == '*':
+ return self.eval(expr[2]) * self.eval(expr[3])
+ elif expr[1] == '/':
+ return float(self.eval(expr[2])) / self.eval(expr[3])
+ elif expr[1] == '^':
+ return abs(self.eval(expr[2]))**self.eval(expr[3])
+ elif etype == 'VAR':
+ var, dim1, dim2 = expr[1]
+ if not dim1 and not dim2:
+ if var in self.vars:
+ return self.vars[var]
+ else:
+ print("UNDEFINED VARIABLE %s AT LINE %s" %
+ (var, self.stat[self.pc]))
+ raise RuntimeError
+ # May be a list lookup or a function evaluation
+ if dim1 and not dim2:
+ if var in self.functions:
+ # A function
+ return self.functions[var](dim1)
+ else:
+ # A list evaluation
+ if var in self.lists:
+ dim1val = self.eval(dim1)
+ if dim1val < 1 or dim1val > len(self.lists[var]):
+ print("LIST INDEX OUT OF BOUNDS AT LINE %s" %
+ self.stat[self.pc])
+ raise RuntimeError
+ return self.lists[var][dim1val - 1]
+ if dim1 and dim2:
+ if var in self.tables:
+ dim1val = self.eval(dim1)
+ dim2val = self.eval(dim2)
+ if dim1val < 1 or dim1val > len(self.tables[var]) or dim2val < 1 or dim2val > len(self.tables[var][0]):
+ print("TABLE INDEX OUT OUT BOUNDS AT LINE %s" %
+ self.stat[self.pc])
+ raise RuntimeError
+ return self.tables[var][dim1val - 1][dim2val - 1]
+ print("UNDEFINED VARIABLE %s AT LINE %s" %
+ (var, self.stat[self.pc]))
+ raise RuntimeError
+
+ # Evaluate a relational expression
+ def releval(self, expr):
+ etype = expr[1]
+ lhs = self.eval(expr[2])
+ rhs = self.eval(expr[3])
+ if etype == '<':
+ if lhs < rhs:
+ return 1
+ else:
+ return 0
+
+ elif etype == '<=':
+ if lhs <= rhs:
+ return 1
+ else:
+ return 0
+
+ elif etype == '>':
+ if lhs > rhs:
+ return 1
+ else:
+ return 0
+
+ elif etype == '>=':
+ if lhs >= rhs:
+ return 1
+ else:
+ return 0
+
+ elif etype == '=':
+ if lhs == rhs:
+ return 1
+ else:
+ return 0
+
+ elif etype == '<>':
+ if lhs != rhs:
+ return 1
+ else:
+ return 0
+
+ # Assignment
+ def assign(self, target, value):
+ var, dim1, dim2 = target
+ if not dim1 and not dim2:
+ self.vars[var] = self.eval(value)
+ elif dim1 and not dim2:
+ # List assignment
+ dim1val = self.eval(dim1)
+ if not var in self.lists:
+ self.lists[var] = [0] * 10
+
+ if dim1val > len(self.lists[var]):
+ print ("DIMENSION TOO LARGE AT LINE %s" % self.stat[self.pc])
+ raise RuntimeError
+ self.lists[var][dim1val - 1] = self.eval(value)
+ elif dim1 and dim2:
+ dim1val = self.eval(dim1)
+ dim2val = self.eval(dim2)
+ if not var in self.tables:
+ temp = [0] * 10
+ v = []
+ for i in range(10):
+ v.append(temp[:])
+ self.tables[var] = v
+ # Variable already exists
+ if dim1val > len(self.tables[var]) or dim2val > len(self.tables[var][0]):
+ print("DIMENSION TOO LARGE AT LINE %s" % self.stat[self.pc])
+ raise RuntimeError
+ self.tables[var][dim1val - 1][dim2val - 1] = self.eval(value)
+
+ # Change the current line number
+ def goto(self, linenum):
+ if not linenum in self.prog:
+ print("UNDEFINED LINE NUMBER %d AT LINE %d" %
+ (linenum, self.stat[self.pc]))
+ raise RuntimeError
+ self.pc = self.stat.index(linenum)
+
+ # Run it
+ def run(self):
+ self.vars = {} # All variables
+ self.lists = {} # List variables
+ self.tables = {} # Tables
+ self.loops = [] # Currently active loops
+ self.loopend = {} # Mapping saying where loops end
+ self.gosub = None # Gosub return point (if any)
+ self.error = 0 # Indicates program error
+
+ self.stat = list(self.prog) # Ordered list of all line numbers
+ self.stat.sort()
+ self.pc = 0 # Current program counter
+
+ # Processing prior to running
+
+ self.collect_data() # Collect all of the data statements
+ self.check_end()
+ self.check_loops()
+
+ if self.error:
+ raise RuntimeError
+
+ while 1:
+ line = self.stat[self.pc]
+ instr = self.prog[line]
+
+ op = instr[0]
+
+ # END and STOP statements
+ if op == 'END' or op == 'STOP':
+ break # We're done
+
+ # GOTO statement
+ elif op == 'GOTO':
+ newline = instr[1]
+ self.goto(newline)
+ continue
+
+ # PRINT statement
+ elif op == 'PRINT':
+ plist = instr[1]
+ out = ""
+ for label, val in plist:
+ if out:
+ out += ' ' * (15 - (len(out) % 15))
+ out += label
+ if val:
+ if label:
+ out += " "
+ eval = self.eval(val)
+ out += str(eval)
+ sys.stdout.write(out)
+ end = instr[2]
+ if not (end == ',' or end == ';'):
+ sys.stdout.write("\n")
+ if end == ',':
+ sys.stdout.write(" " * (15 - (len(out) % 15)))
+ if end == ';':
+ sys.stdout.write(" " * (3 - (len(out) % 3)))
+
+ # LET statement
+ elif op == 'LET':
+ target = instr[1]
+ value = instr[2]
+ self.assign(target, value)
+
+ # READ statement
+ elif op == 'READ':
+ for target in instr[1]:
+ if self.dc < len(self.data):
+ value = ('NUM', self.data[self.dc])
+ self.assign(target, value)
+ self.dc += 1
+ else:
+ # No more data. Program ends
+ return
+ elif op == 'IF':
+ relop = instr[1]
+ newline = instr[2]
+ if (self.releval(relop)):
+ self.goto(newline)
+ continue
+
+ elif op == 'FOR':
+ loopvar = instr[1]
+ initval = instr[2]
+ finval = instr[3]
+ stepval = instr[4]
+
+ # Check to see if this is a new loop
+ if not self.loops or self.loops[-1][0] != self.pc:
+ # Looks like a new loop. Make the initial assignment
+ newvalue = initval
+ self.assign((loopvar, None, None), initval)
+ if not stepval:
+ stepval = ('NUM', 1)
+ stepval = self.eval(stepval) # Evaluate step here
+ self.loops.append((self.pc, stepval))
+ else:
+ # It's a repeat of the previous loop
+ # Update the value of the loop variable according to the
+ # step
+ stepval = ('NUM', self.loops[-1][1])
+ newvalue = (
+ 'BINOP', '+', ('VAR', (loopvar, None, None)), stepval)
+
+ if self.loops[-1][1] < 0:
+ relop = '>='
+ else:
+ relop = '<='
+ if not self.releval(('RELOP', relop, newvalue, finval)):
+ # Loop is done. Jump to the NEXT
+ self.pc = self.loopend[self.pc]
+ self.loops.pop()
+ else:
+ self.assign((loopvar, None, None), newvalue)
+
+ elif op == 'NEXT':
+ if not self.loops:
+ print("NEXT WITHOUT FOR AT LINE %s" % line)
+ return
+
+ nextvar = instr[1]
+ self.pc = self.loops[-1][0]
+ loopinst = self.prog[self.stat[self.pc]]
+ forvar = loopinst[1]
+ if nextvar != forvar:
+ print("NEXT DOESN'T MATCH FOR AT LINE %s" % line)
+ return
+ continue
+ elif op == 'GOSUB':
+ newline = instr[1]
+ if self.gosub:
+ print("ALREADY IN A SUBROUTINE AT LINE %s" % line)
+ return
+ self.gosub = self.stat[self.pc]
+ self.goto(newline)
+ continue
+
+ elif op == 'RETURN':
+ if not self.gosub:
+ print("RETURN WITHOUT A GOSUB AT LINE %s" % line)
+ return
+ self.goto(self.gosub)
+ self.gosub = None
+
+ elif op == 'FUNC':
+ fname = instr[1]
+ pname = instr[2]
+ expr = instr[3]
+
+ def eval_func(pvalue, name=pname, self=self, expr=expr):
+ self.assign((pname, None, None), pvalue)
+ return self.eval(expr)
+ self.functions[fname] = eval_func
+
+ elif op == 'DIM':
+ for vname, x, y in instr[1]:
+ if y == 0:
+ # Single dimension variable
+ self.lists[vname] = [0] * x
+ else:
+ # Double dimension variable
+ temp = [0] * y
+ v = []
+ for i in range(x):
+ v.append(temp[:])
+ self.tables[vname] = v
+
+ self.pc += 1
+
+ # Utility functions for program listing
+ def expr_str(self, expr):
+ etype = expr[0]
+ if etype == 'NUM':
+ return str(expr[1])
+ elif etype == 'GROUP':
+ return "(%s)" % self.expr_str(expr[1])
+ elif etype == 'UNARY':
+ if expr[1] == '-':
+ return "-" + str(expr[2])
+ elif etype == 'BINOP':
+ return "%s %s %s" % (self.expr_str(expr[2]), expr[1], self.expr_str(expr[3]))
+ elif etype == 'VAR':
+ return self.var_str(expr[1])
+
+ def relexpr_str(self, expr):
+ return "%s %s %s" % (self.expr_str(expr[2]), expr[1], self.expr_str(expr[3]))
+
+ def var_str(self, var):
+ varname, dim1, dim2 = var
+ if not dim1 and not dim2:
+ return varname
+ if dim1 and not dim2:
+ return "%s(%s)" % (varname, self.expr_str(dim1))
+ return "%s(%s,%s)" % (varname, self.expr_str(dim1), self.expr_str(dim2))
+
+ # Create a program listing
+ def list(self):
+ stat = list(self.prog) # Ordered list of all line numbers
+ stat.sort()
+ for line in stat:
+ instr = self.prog[line]
+ op = instr[0]
+ if op in ['END', 'STOP', 'RETURN']:
+ print("%s %s" % (line, op))
+ continue
+ elif op == 'REM':
+ print("%s %s" % (line, instr[1]))
+ elif op == 'PRINT':
+ _out = "%s %s " % (line, op)
+ first = 1
+ for p in instr[1]:
+ if not first:
+ _out += ", "
+ if p[0] and p[1]:
+ _out += '"%s"%s' % (p[0], self.expr_str(p[1]))
+ elif p[1]:
+ _out += self.expr_str(p[1])
+ else:
+ _out += '"%s"' % (p[0],)
+ first = 0
+ if instr[2]:
+ _out += instr[2]
+ print(_out)
+ elif op == 'LET':
+ print("%s LET %s = %s" %
+ (line, self.var_str(instr[1]), self.expr_str(instr[2])))
+ elif op == 'READ':
+ _out = "%s READ " % line
+ first = 1
+ for r in instr[1]:
+ if not first:
+ _out += ","
+ _out += self.var_str(r)
+ first = 0
+ print(_out)
+ elif op == 'IF':
+ print("%s IF %s THEN %d" %
+ (line, self.relexpr_str(instr[1]), instr[2]))
+ elif op == 'GOTO' or op == 'GOSUB':
+ print("%s %s %s" % (line, op, instr[1]))
+ elif op == 'FOR':
+ _out = "%s FOR %s = %s TO %s" % (
+ line, instr[1], self.expr_str(instr[2]), self.expr_str(instr[3]))
+ if instr[4]:
+ _out += " STEP %s" % (self.expr_str(instr[4]))
+ print(_out)
+ elif op == 'NEXT':
+ print("%s NEXT %s" % (line, instr[1]))
+ elif op == 'FUNC':
+ print("%s DEF %s(%s) = %s" %
+ (line, instr[1], instr[2], self.expr_str(instr[3])))
+ elif op == 'DIM':
+ _out = "%s DIM " % line
+ first = 1
+ for vname, x, y in instr[1]:
+ if not first:
+ _out += ","
+ first = 0
+ if y == 0:
+ _out += "%s(%d)" % (vname, x)
+ else:
+ _out += "%s(%d,%d)" % (vname, x, y)
+
+ print(_out)
+ elif op == 'DATA':
+ _out = "%s DATA " % line
+ first = 1
+ for v in instr[1]:
+ if not first:
+ _out += ","
+ first = 0
+ _out += v
+ print(_out)
+
+ # Erase the current program
+ def new(self):
+ self.prog = {}
+
+ # Insert statements
+ def add_statements(self, prog):
+ for line, stat in prog.items():
+ self.prog[line] = stat
+
+ # Delete a statement
+ def del_line(self, lineno):
+ try:
+ del self.prog[lineno]
+ except KeyError:
+ pass
diff --git a/third_party/python/ply/example/BASIC/basparse.py b/third_party/python/ply/example/BASIC/basparse.py
new file mode 100644
index 0000000000..d610c7d909
--- /dev/null
+++ b/third_party/python/ply/example/BASIC/basparse.py
@@ -0,0 +1,474 @@
+# An implementation of Dartmouth BASIC (1964)
+#
+
+from ply import *
+import basiclex
+
+tokens = basiclex.tokens
+
+precedence = (
+ ('left', 'PLUS', 'MINUS'),
+ ('left', 'TIMES', 'DIVIDE'),
+ ('left', 'POWER'),
+ ('right', 'UMINUS')
+)
+
+# A BASIC program is a series of statements. We represent the program as a
+# dictionary of tuples indexed by line number.
+
+
+def p_program(p):
+ '''program : program statement
+ | statement'''
+
+ if len(p) == 2 and p[1]:
+ p[0] = {}
+ line, stat = p[1]
+ p[0][line] = stat
+ elif len(p) == 3:
+ p[0] = p[1]
+ if not p[0]:
+ p[0] = {}
+ if p[2]:
+ line, stat = p[2]
+ p[0][line] = stat
+
+# This catch-all rule is used for any catastrophic errors. In this case,
+# we simply return nothing
+
+
+def p_program_error(p):
+ '''program : error'''
+ p[0] = None
+ p.parser.error = 1
+
+# Format of all BASIC statements.
+
+
+def p_statement(p):
+ '''statement : INTEGER command NEWLINE'''
+ if isinstance(p[2], str):
+ print("%s %s %s" % (p[2], "AT LINE", p[1]))
+ p[0] = None
+ p.parser.error = 1
+ else:
+ lineno = int(p[1])
+ p[0] = (lineno, p[2])
+
+# Interactive statements.
+
+
+def p_statement_interactive(p):
+ '''statement : RUN NEWLINE
+ | LIST NEWLINE
+ | NEW NEWLINE'''
+ p[0] = (0, (p[1], 0))
+
+# Blank line number
+
+
+def p_statement_blank(p):
+ '''statement : INTEGER NEWLINE'''
+ p[0] = (0, ('BLANK', int(p[1])))
+
+# Error handling for malformed statements
+
+
+def p_statement_bad(p):
+ '''statement : INTEGER error NEWLINE'''
+ print("MALFORMED STATEMENT AT LINE %s" % p[1])
+ p[0] = None
+ p.parser.error = 1
+
+# Blank line
+
+
+def p_statement_newline(p):
+ '''statement : NEWLINE'''
+ p[0] = None
+
+# LET statement
+
+
+def p_command_let(p):
+ '''command : LET variable EQUALS expr'''
+ p[0] = ('LET', p[2], p[4])
+
+
+def p_command_let_bad(p):
+ '''command : LET variable EQUALS error'''
+ p[0] = "BAD EXPRESSION IN LET"
+
+# READ statement
+
+
+def p_command_read(p):
+ '''command : READ varlist'''
+ p[0] = ('READ', p[2])
+
+
+def p_command_read_bad(p):
+ '''command : READ error'''
+ p[0] = "MALFORMED VARIABLE LIST IN READ"
+
+# DATA statement
+
+
+def p_command_data(p):
+ '''command : DATA numlist'''
+ p[0] = ('DATA', p[2])
+
+
+def p_command_data_bad(p):
+ '''command : DATA error'''
+ p[0] = "MALFORMED NUMBER LIST IN DATA"
+
+# PRINT statement
+
+
+def p_command_print(p):
+ '''command : PRINT plist optend'''
+ p[0] = ('PRINT', p[2], p[3])
+
+
+def p_command_print_bad(p):
+ '''command : PRINT error'''
+ p[0] = "MALFORMED PRINT STATEMENT"
+
+# Optional ending on PRINT. Either a comma (,) or semicolon (;)
+
+
+def p_optend(p):
+ '''optend : COMMA
+ | SEMI
+ |'''
+ if len(p) == 2:
+ p[0] = p[1]
+ else:
+ p[0] = None
+
+# PRINT statement with no arguments
+
+
+def p_command_print_empty(p):
+ '''command : PRINT'''
+ p[0] = ('PRINT', [], None)
+
+# GOTO statement
+
+
+def p_command_goto(p):
+ '''command : GOTO INTEGER'''
+ p[0] = ('GOTO', int(p[2]))
+
+
+def p_command_goto_bad(p):
+ '''command : GOTO error'''
+ p[0] = "INVALID LINE NUMBER IN GOTO"
+
+# IF-THEN statement
+
+
+def p_command_if(p):
+ '''command : IF relexpr THEN INTEGER'''
+ p[0] = ('IF', p[2], int(p[4]))
+
+
+def p_command_if_bad(p):
+ '''command : IF error THEN INTEGER'''
+ p[0] = "BAD RELATIONAL EXPRESSION"
+
+
+def p_command_if_bad2(p):
+ '''command : IF relexpr THEN error'''
+ p[0] = "INVALID LINE NUMBER IN THEN"
+
+# FOR statement
+
+
+def p_command_for(p):
+ '''command : FOR ID EQUALS expr TO expr optstep'''
+ p[0] = ('FOR', p[2], p[4], p[6], p[7])
+
+
+def p_command_for_bad_initial(p):
+ '''command : FOR ID EQUALS error TO expr optstep'''
+ p[0] = "BAD INITIAL VALUE IN FOR STATEMENT"
+
+
+def p_command_for_bad_final(p):
+ '''command : FOR ID EQUALS expr TO error optstep'''
+ p[0] = "BAD FINAL VALUE IN FOR STATEMENT"
+
+
+def p_command_for_bad_step(p):
+ '''command : FOR ID EQUALS expr TO expr STEP error'''
+ p[0] = "MALFORMED STEP IN FOR STATEMENT"
+
+# Optional STEP qualifier on FOR statement
+
+
+def p_optstep(p):
+ '''optstep : STEP expr
+ | empty'''
+ if len(p) == 3:
+ p[0] = p[2]
+ else:
+ p[0] = None
+
+# NEXT statement
+
+
+def p_command_next(p):
+ '''command : NEXT ID'''
+
+ p[0] = ('NEXT', p[2])
+
+
+def p_command_next_bad(p):
+ '''command : NEXT error'''
+ p[0] = "MALFORMED NEXT"
+
+# END statement
+
+
+def p_command_end(p):
+ '''command : END'''
+ p[0] = ('END',)
+
+# REM statement
+
+
+def p_command_rem(p):
+ '''command : REM'''
+ p[0] = ('REM', p[1])
+
+# STOP statement
+
+
+def p_command_stop(p):
+ '''command : STOP'''
+ p[0] = ('STOP',)
+
+# DEF statement
+
+
+def p_command_def(p):
+ '''command : DEF ID LPAREN ID RPAREN EQUALS expr'''
+ p[0] = ('FUNC', p[2], p[4], p[7])
+
+
+def p_command_def_bad_rhs(p):
+ '''command : DEF ID LPAREN ID RPAREN EQUALS error'''
+ p[0] = "BAD EXPRESSION IN DEF STATEMENT"
+
+
+def p_command_def_bad_arg(p):
+ '''command : DEF ID LPAREN error RPAREN EQUALS expr'''
+ p[0] = "BAD ARGUMENT IN DEF STATEMENT"
+
+# GOSUB statement
+
+
+def p_command_gosub(p):
+ '''command : GOSUB INTEGER'''
+ p[0] = ('GOSUB', int(p[2]))
+
+
+def p_command_gosub_bad(p):
+ '''command : GOSUB error'''
+ p[0] = "INVALID LINE NUMBER IN GOSUB"
+
+# RETURN statement
+
+
+def p_command_return(p):
+ '''command : RETURN'''
+ p[0] = ('RETURN',)
+
+# DIM statement
+
+
+def p_command_dim(p):
+ '''command : DIM dimlist'''
+ p[0] = ('DIM', p[2])
+
+
+def p_command_dim_bad(p):
+ '''command : DIM error'''
+ p[0] = "MALFORMED VARIABLE LIST IN DIM"
+
+# List of variables supplied to DIM statement
+
+
+def p_dimlist(p):
+ '''dimlist : dimlist COMMA dimitem
+ | dimitem'''
+ if len(p) == 4:
+ p[0] = p[1]
+ p[0].append(p[3])
+ else:
+ p[0] = [p[1]]
+
+# DIM items
+
+
+def p_dimitem_single(p):
+ '''dimitem : ID LPAREN INTEGER RPAREN'''
+ p[0] = (p[1], eval(p[3]), 0)
+
+
+def p_dimitem_double(p):
+ '''dimitem : ID LPAREN INTEGER COMMA INTEGER RPAREN'''
+ p[0] = (p[1], eval(p[3]), eval(p[5]))
+
+# Arithmetic expressions
+
+
+def p_expr_binary(p):
+ '''expr : expr PLUS expr
+ | expr MINUS expr
+ | expr TIMES expr
+ | expr DIVIDE expr
+ | expr POWER expr'''
+
+ p[0] = ('BINOP', p[2], p[1], p[3])
+
+
+def p_expr_number(p):
+ '''expr : INTEGER
+ | FLOAT'''
+ p[0] = ('NUM', eval(p[1]))
+
+
+def p_expr_variable(p):
+ '''expr : variable'''
+ p[0] = ('VAR', p[1])
+
+
+def p_expr_group(p):
+ '''expr : LPAREN expr RPAREN'''
+ p[0] = ('GROUP', p[2])
+
+
+def p_expr_unary(p):
+ '''expr : MINUS expr %prec UMINUS'''
+ p[0] = ('UNARY', '-', p[2])
+
+# Relational expressions
+
+
+def p_relexpr(p):
+ '''relexpr : expr LT expr
+ | expr LE expr
+ | expr GT expr
+ | expr GE expr
+ | expr EQUALS expr
+ | expr NE expr'''
+ p[0] = ('RELOP', p[2], p[1], p[3])
+
+# Variables
+
+
+def p_variable(p):
+ '''variable : ID
+ | ID LPAREN expr RPAREN
+ | ID LPAREN expr COMMA expr RPAREN'''
+ if len(p) == 2:
+ p[0] = (p[1], None, None)
+ elif len(p) == 5:
+ p[0] = (p[1], p[3], None)
+ else:
+ p[0] = (p[1], p[3], p[5])
+
+# Builds a list of variable targets as a Python list
+
+
+def p_varlist(p):
+ '''varlist : varlist COMMA variable
+ | variable'''
+ if len(p) > 2:
+ p[0] = p[1]
+ p[0].append(p[3])
+ else:
+ p[0] = [p[1]]
+
+
+# Builds a list of numbers as a Python list
+
+def p_numlist(p):
+ '''numlist : numlist COMMA number
+ | number'''
+
+ if len(p) > 2:
+ p[0] = p[1]
+ p[0].append(p[3])
+ else:
+ p[0] = [p[1]]
+
+# A number. May be an integer or a float
+
+
+def p_number(p):
+ '''number : INTEGER
+ | FLOAT'''
+ p[0] = eval(p[1])
+
+# A signed number.
+
+
+def p_number_signed(p):
+ '''number : MINUS INTEGER
+ | MINUS FLOAT'''
+ p[0] = eval("-" + p[2])
+
+# List of targets for a print statement
+# Returns a list of tuples (label,expr)
+
+
+def p_plist(p):
+ '''plist : plist COMMA pitem
+ | pitem'''
+ if len(p) > 3:
+ p[0] = p[1]
+ p[0].append(p[3])
+ else:
+ p[0] = [p[1]]
+
+
+def p_item_string(p):
+ '''pitem : STRING'''
+ p[0] = (p[1][1:-1], None)
+
+
+def p_item_string_expr(p):
+ '''pitem : STRING expr'''
+ p[0] = (p[1][1:-1], p[2])
+
+
+def p_item_expr(p):
+ '''pitem : expr'''
+ p[0] = ("", p[1])
+
+# Empty
+
+
+def p_empty(p):
+ '''empty : '''
+
+# Catastrophic error handler
+
+
+def p_error(p):
+ if not p:
+ print("SYNTAX ERROR AT EOF")
+
+bparser = yacc.yacc()
+
+
+def parse(data, debug=0):
+ bparser.error = 0
+ p = bparser.parse(data, debug=debug)
+ if bparser.error:
+ return None
+ return p
diff --git a/third_party/python/ply/example/BASIC/dim.bas b/third_party/python/ply/example/BASIC/dim.bas
new file mode 100644
index 0000000000..87bd95b32e
--- /dev/null
+++ b/third_party/python/ply/example/BASIC/dim.bas
@@ -0,0 +1,14 @@
+5 DIM A(50,15)
+10 FOR I = 1 TO 50
+20 FOR J = 1 TO 15
+30 LET A(I,J) = I + J
+35 REM PRINT I,J, A(I,J)
+40 NEXT J
+50 NEXT I
+100 FOR I = 1 TO 50
+110 FOR J = 1 TO 15
+120 PRINT A(I,J),
+130 NEXT J
+140 PRINT
+150 NEXT I
+999 END
diff --git a/third_party/python/ply/example/BASIC/func.bas b/third_party/python/ply/example/BASIC/func.bas
new file mode 100644
index 0000000000..447ee16a92
--- /dev/null
+++ b/third_party/python/ply/example/BASIC/func.bas
@@ -0,0 +1,5 @@
+10 DEF FDX(X) = 2*X
+20 FOR I = 0 TO 100
+30 PRINT FDX(I)
+40 NEXT I
+50 END
diff --git a/third_party/python/ply/example/BASIC/gcd.bas b/third_party/python/ply/example/BASIC/gcd.bas
new file mode 100644
index 0000000000..d0b7746089
--- /dev/null
+++ b/third_party/python/ply/example/BASIC/gcd.bas
@@ -0,0 +1,22 @@
+10 PRINT "A","B","C","GCD"
+20 READ A,B,C
+30 LET X = A
+40 LET Y = B
+50 GOSUB 200
+60 LET X = G
+70 LET Y = C
+80 GOSUB 200
+90 PRINT A, B, C, G
+100 GOTO 20
+110 DATA 60, 90, 120
+120 DATA 38456, 64872, 98765
+130 DATA 32, 384, 72
+200 LET Q = INT(X/Y)
+210 LET R = X - Q*Y
+220 IF R = 0 THEN 300
+230 LET X = Y
+240 LET Y = R
+250 GOTO 200
+300 LET G = Y
+310 RETURN
+999 END
diff --git a/third_party/python/ply/example/BASIC/gosub.bas b/third_party/python/ply/example/BASIC/gosub.bas
new file mode 100644
index 0000000000..99737b16f1
--- /dev/null
+++ b/third_party/python/ply/example/BASIC/gosub.bas
@@ -0,0 +1,13 @@
+100 LET X = 3
+110 GOSUB 400
+120 PRINT U, V, W
+200 LET X = 5
+210 GOSUB 400
+220 LET Z = U + 2*V + 3*W
+230 PRINT Z
+240 GOTO 999
+400 LET U = X*X
+410 LET V = X*X*X
+420 LET W = X*X*X*X + X*X*X + X*X + X
+430 RETURN
+999 END
diff --git a/third_party/python/ply/example/BASIC/hello.bas b/third_party/python/ply/example/BASIC/hello.bas
new file mode 100644
index 0000000000..cc6f0b0b51
--- /dev/null
+++ b/third_party/python/ply/example/BASIC/hello.bas
@@ -0,0 +1,4 @@
+5 REM HELLO WORLD PROGAM
+10 PRINT "HELLO WORLD"
+99 END
+
diff --git a/third_party/python/ply/example/BASIC/linear.bas b/third_party/python/ply/example/BASIC/linear.bas
new file mode 100644
index 0000000000..56c08220b3
--- /dev/null
+++ b/third_party/python/ply/example/BASIC/linear.bas
@@ -0,0 +1,17 @@
+1 REM ::: SOLVE A SYSTEM OF LINEAR EQUATIONS
+2 REM ::: A1*X1 + A2*X2 = B1
+3 REM ::: A3*X1 + A4*X2 = B2
+4 REM --------------------------------------
+10 READ A1, A2, A3, A4
+15 LET D = A1 * A4 - A3 * A2
+20 IF D = 0 THEN 65
+30 READ B1, B2
+37 LET X1 = (B1*A4 - B2*A2) / D
+42 LET X2 = (A1*B2 - A3*B1) / D
+55 PRINT X1, X2
+60 GOTO 30
+65 PRINT "NO UNIQUE SOLUTION"
+70 DATA 1, 2, 4
+80 DATA 2, -7, 5
+85 DATA 1, 3, 4, -7
+90 END
diff --git a/third_party/python/ply/example/BASIC/maxsin.bas b/third_party/python/ply/example/BASIC/maxsin.bas
new file mode 100644
index 0000000000..b96901530c
--- /dev/null
+++ b/third_party/python/ply/example/BASIC/maxsin.bas
@@ -0,0 +1,12 @@
+5 PRINT "X VALUE", "SINE", "RESOLUTION"
+10 READ D
+20 LET M = -1
+30 FOR X = 0 TO 3 STEP D
+40 IF SIN(X) <= M THEN 80
+50 LET X0 = X
+60 LET M = SIN(X)
+80 NEXT X
+85 PRINT X0, M, D
+90 GOTO 10
+100 DATA .1, .01, .001
+110 END
diff --git a/third_party/python/ply/example/BASIC/powers.bas b/third_party/python/ply/example/BASIC/powers.bas
new file mode 100644
index 0000000000..a454dc3e21
--- /dev/null
+++ b/third_party/python/ply/example/BASIC/powers.bas
@@ -0,0 +1,13 @@
+5 PRINT "THIS PROGRAM COMPUTES AND PRINTS THE NTH POWERS"
+6 PRINT "OF THE NUMBERS LESS THAN OR EQUAL TO N FOR VARIOUS"
+7 PRINT "N FROM 1 THROUGH 7"
+8 PRINT
+10 FOR N = 1 TO 7
+15 PRINT "N = "N
+20 FOR I = 1 TO N
+30 PRINT I^N,
+40 NEXT I
+50 PRINT
+60 PRINT
+70 NEXT N
+80 END
diff --git a/third_party/python/ply/example/BASIC/rand.bas b/third_party/python/ply/example/BASIC/rand.bas
new file mode 100644
index 0000000000..4ff7a14670
--- /dev/null
+++ b/third_party/python/ply/example/BASIC/rand.bas
@@ -0,0 +1,4 @@
+10 FOR I = 1 TO 20
+20 PRINT INT(10*RND(0))
+30 NEXT I
+40 END
diff --git a/third_party/python/ply/example/BASIC/sales.bas b/third_party/python/ply/example/BASIC/sales.bas
new file mode 100644
index 0000000000..a39aefb762
--- /dev/null
+++ b/third_party/python/ply/example/BASIC/sales.bas
@@ -0,0 +1,20 @@
+10 FOR I = 1 TO 3
+20 READ P(I)
+30 NEXT I
+40 FOR I = 1 TO 3
+50 FOR J = 1 TO 5
+60 READ S(I,J)
+70 NEXT J
+80 NEXT I
+90 FOR J = 1 TO 5
+100 LET S = 0
+110 FOR I = 1 TO 3
+120 LET S = S + P(I) * S(I,J)
+130 NEXT I
+140 PRINT "TOTAL SALES FOR SALESMAN"J, "$"S
+150 NEXT J
+200 DATA 1.25, 4.30, 2.50
+210 DATA 40, 20, 37, 29, 42
+220 DATA 10, 16, 3, 21, 8
+230 DATA 35, 47, 29, 16, 33
+300 END
diff --git a/third_party/python/ply/example/BASIC/sears.bas b/third_party/python/ply/example/BASIC/sears.bas
new file mode 100644
index 0000000000..5ced3974e2
--- /dev/null
+++ b/third_party/python/ply/example/BASIC/sears.bas
@@ -0,0 +1,18 @@
+1 REM :: THIS PROGRAM COMPUTES HOW MANY TIMES YOU HAVE TO FOLD
+2 REM :: A PIECE OF PAPER SO THAT IT IS TALLER THAN THE
+3 REM :: SEARS TOWER.
+4 REM :: S = HEIGHT OF TOWER (METERS)
+5 REM :: T = THICKNESS OF PAPER (MILLIMETERS)
+10 LET S = 442
+20 LET T = 0.1
+30 REM CONVERT T TO METERS
+40 LET T = T * .001
+50 LET F = 1
+60 LET H = T
+100 IF H > S THEN 200
+120 LET H = 2 * H
+125 LET F = F + 1
+130 GOTO 100
+200 PRINT "NUMBER OF FOLDS ="F
+220 PRINT "FINAL HEIGHT ="H
+999 END
diff --git a/third_party/python/ply/example/BASIC/sqrt1.bas b/third_party/python/ply/example/BASIC/sqrt1.bas
new file mode 100644
index 0000000000..6673a91524
--- /dev/null
+++ b/third_party/python/ply/example/BASIC/sqrt1.bas
@@ -0,0 +1,5 @@
+10 LET X = 0
+20 LET X = X + 1
+30 PRINT X, SQR(X)
+40 IF X < 100 THEN 20
+50 END
diff --git a/third_party/python/ply/example/BASIC/sqrt2.bas b/third_party/python/ply/example/BASIC/sqrt2.bas
new file mode 100644
index 0000000000..862d85ef26
--- /dev/null
+++ b/third_party/python/ply/example/BASIC/sqrt2.bas
@@ -0,0 +1,4 @@
+10 FOR X = 1 TO 100
+20 PRINT X, SQR(X)
+30 NEXT X
+40 END
diff --git a/third_party/python/ply/example/GardenSnake/GardenSnake.py b/third_party/python/ply/example/GardenSnake/GardenSnake.py
new file mode 100644
index 0000000000..8b493b40dc
--- /dev/null
+++ b/third_party/python/ply/example/GardenSnake/GardenSnake.py
@@ -0,0 +1,777 @@
+# GardenSnake - a parser generator demonstration program
+#
+# This implements a modified version of a subset of Python:
+# - only 'def', 'return' and 'if' statements
+# - 'if' only has 'then' clause (no elif nor else)
+# - single-quoted strings only, content in raw format
+# - numbers are decimal.Decimal instances (not integers or floats)
+# - no print statment; use the built-in 'print' function
+# - only < > == + - / * implemented (and unary + -)
+# - assignment and tuple assignment work
+# - no generators of any sort
+# - no ... well, no quite a lot
+
+# Why? I'm thinking about a new indentation-based configuration
+# language for a project and wanted to figure out how to do it. Once
+# I got that working I needed a way to test it out. My original AST
+# was dumb so I decided to target Python's AST and compile it into
+# Python code. Plus, it's pretty cool that it only took a day or so
+# from sitting down with Ply to having working code.
+
+# This uses David Beazley's Ply from http://www.dabeaz.com/ply/
+
+# This work is hereby released into the Public Domain. To view a copy of
+# the public domain dedication, visit
+# http://creativecommons.org/licenses/publicdomain/ or send a letter to
+# Creative Commons, 543 Howard Street, 5th Floor, San Francisco,
+# California, 94105, USA.
+#
+# Portions of this work are derived from Python's Grammar definition
+# and may be covered under the Python copyright and license
+#
+# Andrew Dalke / Dalke Scientific Software, LLC
+# 30 August 2006 / Cape Town, South Africa
+
+# Changelog:
+# 30 August - added link to CC license; removed the "swapcase" encoding
+
+# Modifications for inclusion in PLY distribution
+import sys
+sys.path.insert(0, "../..")
+from ply import *
+
+##### Lexer ######
+#import lex
+import decimal
+
+tokens = (
+ 'DEF',
+ 'IF',
+ 'NAME',
+ 'NUMBER', # Python decimals
+ 'STRING', # single quoted strings only; syntax of raw strings
+ 'LPAR',
+ 'RPAR',
+ 'COLON',
+ 'EQ',
+ 'ASSIGN',
+ 'LT',
+ 'GT',
+ 'PLUS',
+ 'MINUS',
+ 'MULT',
+ 'DIV',
+ 'RETURN',
+ 'WS',
+ 'NEWLINE',
+ 'COMMA',
+ 'SEMICOLON',
+ 'INDENT',
+ 'DEDENT',
+ 'ENDMARKER',
+)
+
+#t_NUMBER = r'\d+'
+# taken from decmial.py but without the leading sign
+
+
+def t_NUMBER(t):
+ r"""(\d+(\.\d*)?|\.\d+)([eE][-+]? \d+)?"""
+ t.value = decimal.Decimal(t.value)
+ return t
+
+
+def t_STRING(t):
+ r"'([^\\']+|\\'|\\\\)*'" # I think this is right ...
+ t.value = t.value[1:-1].decode("string-escape") # .swapcase() # for fun
+ return t
+
+t_COLON = r':'
+t_EQ = r'=='
+t_ASSIGN = r'='
+t_LT = r'<'
+t_GT = r'>'
+t_PLUS = r'\+'
+t_MINUS = r'-'
+t_MULT = r'\*'
+t_DIV = r'/'
+t_COMMA = r','
+t_SEMICOLON = r';'
+
+# Ply nicely documented how to do this.
+
+RESERVED = {
+ "def": "DEF",
+ "if": "IF",
+ "return": "RETURN",
+}
+
+
+def t_NAME(t):
+ r'[a-zA-Z_][a-zA-Z0-9_]*'
+ t.type = RESERVED.get(t.value, "NAME")
+ return t
+
+# Putting this before t_WS let it consume lines with only comments in
+# them so the latter code never sees the WS part. Not consuming the
+# newline. Needed for "if 1: #comment"
+
+
+def t_comment(t):
+ r"[ ]*\043[^\n]*" # \043 is '#'
+ pass
+
+
+# Whitespace
+def t_WS(t):
+ r' [ ]+ '
+ if t.lexer.at_line_start and t.lexer.paren_count == 0:
+ return t
+
+# Don't generate newline tokens when inside of parenthesis, eg
+# a = (1,
+# 2, 3)
+
+
+def t_newline(t):
+ r'\n+'
+ t.lexer.lineno += len(t.value)
+ t.type = "NEWLINE"
+ if t.lexer.paren_count == 0:
+ return t
+
+
+def t_LPAR(t):
+ r'\('
+ t.lexer.paren_count += 1
+ return t
+
+
+def t_RPAR(t):
+ r'\)'
+ # check for underflow? should be the job of the parser
+ t.lexer.paren_count -= 1
+ return t
+
+
+def t_error(t):
+ raise SyntaxError("Unknown symbol %r" % (t.value[0],))
+ print "Skipping", repr(t.value[0])
+ t.lexer.skip(1)
+
+# I implemented INDENT / DEDENT generation as a post-processing filter
+
+# The original lex token stream contains WS and NEWLINE characters.
+# WS will only occur before any other tokens on a line.
+
+# I have three filters. One tags tokens by adding two attributes.
+# "must_indent" is True if the token must be indented from the
+# previous code. The other is "at_line_start" which is True for WS
+# and the first non-WS/non-NEWLINE on a line. It flags the check so
+# see if the new line has changed indication level.
+
+# Python's syntax has three INDENT states
+# 0) no colon hence no need to indent
+# 1) "if 1: go()" - simple statements have a COLON but no need for an indent
+# 2) "if 1:\n go()" - complex statements have a COLON NEWLINE and must indent
+NO_INDENT = 0
+MAY_INDENT = 1
+MUST_INDENT = 2
+
+# only care about whitespace at the start of a line
+
+
+def track_tokens_filter(lexer, tokens):
+ lexer.at_line_start = at_line_start = True
+ indent = NO_INDENT
+ saw_colon = False
+ for token in tokens:
+ token.at_line_start = at_line_start
+
+ if token.type == "COLON":
+ at_line_start = False
+ indent = MAY_INDENT
+ token.must_indent = False
+
+ elif token.type == "NEWLINE":
+ at_line_start = True
+ if indent == MAY_INDENT:
+ indent = MUST_INDENT
+ token.must_indent = False
+
+ elif token.type == "WS":
+ assert token.at_line_start == True
+ at_line_start = True
+ token.must_indent = False
+
+ else:
+ # A real token; only indent after COLON NEWLINE
+ if indent == MUST_INDENT:
+ token.must_indent = True
+ else:
+ token.must_indent = False
+ at_line_start = False
+ indent = NO_INDENT
+
+ yield token
+ lexer.at_line_start = at_line_start
+
+
+def _new_token(type, lineno):
+ tok = lex.LexToken()
+ tok.type = type
+ tok.value = None
+ tok.lineno = lineno
+ return tok
+
+# Synthesize a DEDENT tag
+
+
+def DEDENT(lineno):
+ return _new_token("DEDENT", lineno)
+
+# Synthesize an INDENT tag
+
+
+def INDENT(lineno):
+ return _new_token("INDENT", lineno)
+
+
+# Track the indentation level and emit the right INDENT / DEDENT events.
+def indentation_filter(tokens):
+ # A stack of indentation levels; will never pop item 0
+ levels = [0]
+ token = None
+ depth = 0
+ prev_was_ws = False
+ for token in tokens:
+ # if 1:
+ # print "Process", token,
+ # if token.at_line_start:
+ # print "at_line_start",
+ # if token.must_indent:
+ # print "must_indent",
+ # print
+
+ # WS only occurs at the start of the line
+ # There may be WS followed by NEWLINE so
+ # only track the depth here. Don't indent/dedent
+ # until there's something real.
+ if token.type == "WS":
+ assert depth == 0
+ depth = len(token.value)
+ prev_was_ws = True
+ # WS tokens are never passed to the parser
+ continue
+
+ if token.type == "NEWLINE":
+ depth = 0
+ if prev_was_ws or token.at_line_start:
+ # ignore blank lines
+ continue
+ # pass the other cases on through
+ yield token
+ continue
+
+ # then it must be a real token (not WS, not NEWLINE)
+ # which can affect the indentation level
+
+ prev_was_ws = False
+ if token.must_indent:
+ # The current depth must be larger than the previous level
+ if not (depth > levels[-1]):
+ raise IndentationError("expected an indented block")
+
+ levels.append(depth)
+ yield INDENT(token.lineno)
+
+ elif token.at_line_start:
+ # Must be on the same level or one of the previous levels
+ if depth == levels[-1]:
+ # At the same level
+ pass
+ elif depth > levels[-1]:
+ raise IndentationError(
+ "indentation increase but not in new block")
+ else:
+ # Back up; but only if it matches a previous level
+ try:
+ i = levels.index(depth)
+ except ValueError:
+ raise IndentationError("inconsistent indentation")
+ for _ in range(i + 1, len(levels)):
+ yield DEDENT(token.lineno)
+ levels.pop()
+
+ yield token
+
+ ### Finished processing ###
+
+ # Must dedent any remaining levels
+ if len(levels) > 1:
+ assert token is not None
+ for _ in range(1, len(levels)):
+ yield DEDENT(token.lineno)
+
+
+# The top-level filter adds an ENDMARKER, if requested.
+# Python's grammar uses it.
+def filter(lexer, add_endmarker=True):
+ token = None
+ tokens = iter(lexer.token, None)
+ tokens = track_tokens_filter(lexer, tokens)
+ for token in indentation_filter(tokens):
+ yield token
+
+ if add_endmarker:
+ lineno = 1
+ if token is not None:
+ lineno = token.lineno
+ yield _new_token("ENDMARKER", lineno)
+
+# Combine Ply and my filters into a new lexer
+
+
+class IndentLexer(object):
+
+ def __init__(self, debug=0, optimize=0, lextab='lextab', reflags=0):
+ self.lexer = lex.lex(debug=debug, optimize=optimize,
+ lextab=lextab, reflags=reflags)
+ self.token_stream = None
+
+ def input(self, s, add_endmarker=True):
+ self.lexer.paren_count = 0
+ self.lexer.input(s)
+ self.token_stream = filter(self.lexer, add_endmarker)
+
+ def token(self):
+ try:
+ return self.token_stream.next()
+ except StopIteration:
+ return None
+
+########## Parser (tokens -> AST) ######
+
+# also part of Ply
+#import yacc
+
+# I use the Python AST
+from compiler import ast
+
+# Helper function
+
+
+def Assign(left, right):
+ names = []
+ if isinstance(left, ast.Name):
+ # Single assignment on left
+ return ast.Assign([ast.AssName(left.name, 'OP_ASSIGN')], right)
+ elif isinstance(left, ast.Tuple):
+ # List of things - make sure they are Name nodes
+ names = []
+ for child in left.getChildren():
+ if not isinstance(child, ast.Name):
+ raise SyntaxError("that assignment not supported")
+ names.append(child.name)
+ ass_list = [ast.AssName(name, 'OP_ASSIGN') for name in names]
+ return ast.Assign([ast.AssTuple(ass_list)], right)
+ else:
+ raise SyntaxError("Can't do that yet")
+
+
+# The grammar comments come from Python's Grammar/Grammar file
+
+# NB: compound_stmt in single_input is followed by extra NEWLINE!
+# file_input: (NEWLINE | stmt)* ENDMARKER
+def p_file_input_end(p):
+ """file_input_end : file_input ENDMARKER"""
+ p[0] = ast.Stmt(p[1])
+
+
+def p_file_input(p):
+ """file_input : file_input NEWLINE
+ | file_input stmt
+ | NEWLINE
+ | stmt"""
+ if isinstance(p[len(p) - 1], basestring):
+ if len(p) == 3:
+ p[0] = p[1]
+ else:
+ p[0] = [] # p == 2 --> only a blank line
+ else:
+ if len(p) == 3:
+ p[0] = p[1] + p[2]
+ else:
+ p[0] = p[1]
+
+
+# funcdef: [decorators] 'def' NAME parameters ':' suite
+# ignoring decorators
+def p_funcdef(p):
+ "funcdef : DEF NAME parameters COLON suite"
+ p[0] = ast.Function(None, p[2], tuple(p[3]), (), 0, None, p[5])
+
+# parameters: '(' [varargslist] ')'
+
+
+def p_parameters(p):
+ """parameters : LPAR RPAR
+ | LPAR varargslist RPAR"""
+ if len(p) == 3:
+ p[0] = []
+ else:
+ p[0] = p[2]
+
+
+# varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' '**' NAME] | '**' NAME) |
+# highly simplified
+def p_varargslist(p):
+ """varargslist : varargslist COMMA NAME
+ | NAME"""
+ if len(p) == 4:
+ p[0] = p[1] + p[3]
+ else:
+ p[0] = [p[1]]
+
+# stmt: simple_stmt | compound_stmt
+
+
+def p_stmt_simple(p):
+ """stmt : simple_stmt"""
+ # simple_stmt is a list
+ p[0] = p[1]
+
+
+def p_stmt_compound(p):
+ """stmt : compound_stmt"""
+ p[0] = [p[1]]
+
+# simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE
+
+
+def p_simple_stmt(p):
+ """simple_stmt : small_stmts NEWLINE
+ | small_stmts SEMICOLON NEWLINE"""
+ p[0] = p[1]
+
+
+def p_small_stmts(p):
+ """small_stmts : small_stmts SEMICOLON small_stmt
+ | small_stmt"""
+ if len(p) == 4:
+ p[0] = p[1] + [p[3]]
+ else:
+ p[0] = [p[1]]
+
+# small_stmt: expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt |
+# import_stmt | global_stmt | exec_stmt | assert_stmt
+
+
+def p_small_stmt(p):
+ """small_stmt : flow_stmt
+ | expr_stmt"""
+ p[0] = p[1]
+
+# expr_stmt: testlist (augassign (yield_expr|testlist) |
+# ('=' (yield_expr|testlist))*)
+# augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' |
+# '<<=' | '>>=' | '**=' | '//=')
+
+
+def p_expr_stmt(p):
+ """expr_stmt : testlist ASSIGN testlist
+ | testlist """
+ if len(p) == 2:
+ # a list of expressions
+ p[0] = ast.Discard(p[1])
+ else:
+ p[0] = Assign(p[1], p[3])
+
+
+def p_flow_stmt(p):
+ "flow_stmt : return_stmt"
+ p[0] = p[1]
+
+# return_stmt: 'return' [testlist]
+
+
+def p_return_stmt(p):
+ "return_stmt : RETURN testlist"
+ p[0] = ast.Return(p[2])
+
+
+def p_compound_stmt(p):
+ """compound_stmt : if_stmt
+ | funcdef"""
+ p[0] = p[1]
+
+
+def p_if_stmt(p):
+ 'if_stmt : IF test COLON suite'
+ p[0] = ast.If([(p[2], p[4])], None)
+
+
+def p_suite(p):
+ """suite : simple_stmt
+ | NEWLINE INDENT stmts DEDENT"""
+ if len(p) == 2:
+ p[0] = ast.Stmt(p[1])
+ else:
+ p[0] = ast.Stmt(p[3])
+
+
+def p_stmts(p):
+ """stmts : stmts stmt
+ | stmt"""
+ if len(p) == 3:
+ p[0] = p[1] + p[2]
+ else:
+ p[0] = p[1]
+
+# No using Python's approach because Ply supports precedence
+
+# comparison: expr (comp_op expr)*
+# arith_expr: term (('+'|'-') term)*
+# term: factor (('*'|'/'|'%'|'//') factor)*
+# factor: ('+'|'-'|'~') factor | power
+# comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not'
+
+
+def make_lt_compare((left, right)):
+ return ast.Compare(left, [('<', right), ])
+
+
+def make_gt_compare((left, right)):
+ return ast.Compare(left, [('>', right), ])
+
+
+def make_eq_compare((left, right)):
+ return ast.Compare(left, [('==', right), ])
+
+
+binary_ops = {
+ "+": ast.Add,
+ "-": ast.Sub,
+ "*": ast.Mul,
+ "/": ast.Div,
+ "<": make_lt_compare,
+ ">": make_gt_compare,
+ "==": make_eq_compare,
+}
+unary_ops = {
+ "+": ast.UnaryAdd,
+ "-": ast.UnarySub,
+}
+precedence = (
+ ("left", "EQ", "GT", "LT"),
+ ("left", "PLUS", "MINUS"),
+ ("left", "MULT", "DIV"),
+)
+
+
+def p_comparison(p):
+ """comparison : comparison PLUS comparison
+ | comparison MINUS comparison
+ | comparison MULT comparison
+ | comparison DIV comparison
+ | comparison LT comparison
+ | comparison EQ comparison
+ | comparison GT comparison
+ | PLUS comparison
+ | MINUS comparison
+ | power"""
+ if len(p) == 4:
+ p[0] = binary_ops[p[2]]((p[1], p[3]))
+ elif len(p) == 3:
+ p[0] = unary_ops[p[1]](p[2])
+ else:
+ p[0] = p[1]
+
+# power: atom trailer* ['**' factor]
+# trailers enables function calls. I only allow one level of calls
+# so this is 'trailer'
+
+
+def p_power(p):
+ """power : atom
+ | atom trailer"""
+ if len(p) == 2:
+ p[0] = p[1]
+ else:
+ if p[2][0] == "CALL":
+ p[0] = ast.CallFunc(p[1], p[2][1], None, None)
+ else:
+ raise AssertionError("not implemented")
+
+
+def p_atom_name(p):
+ """atom : NAME"""
+ p[0] = ast.Name(p[1])
+
+
+def p_atom_number(p):
+ """atom : NUMBER
+ | STRING"""
+ p[0] = ast.Const(p[1])
+
+
+def p_atom_tuple(p):
+ """atom : LPAR testlist RPAR"""
+ p[0] = p[2]
+
+# trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME
+
+
+def p_trailer(p):
+ "trailer : LPAR arglist RPAR"
+ p[0] = ("CALL", p[2])
+
+# testlist: test (',' test)* [',']
+# Contains shift/reduce error
+
+
+def p_testlist(p):
+ """testlist : testlist_multi COMMA
+ | testlist_multi """
+ if len(p) == 2:
+ p[0] = p[1]
+ else:
+ # May need to promote singleton to tuple
+ if isinstance(p[1], list):
+ p[0] = p[1]
+ else:
+ p[0] = [p[1]]
+ # Convert into a tuple?
+ if isinstance(p[0], list):
+ p[0] = ast.Tuple(p[0])
+
+
+def p_testlist_multi(p):
+ """testlist_multi : testlist_multi COMMA test
+ | test"""
+ if len(p) == 2:
+ # singleton
+ p[0] = p[1]
+ else:
+ if isinstance(p[1], list):
+ p[0] = p[1] + [p[3]]
+ else:
+ # singleton -> tuple
+ p[0] = [p[1], p[3]]
+
+
+# test: or_test ['if' or_test 'else' test] | lambdef
+# as I don't support 'and', 'or', and 'not' this works down to 'comparison'
+def p_test(p):
+ "test : comparison"
+ p[0] = p[1]
+
+
+# arglist: (argument ',')* (argument [',']| '*' test [',' '**' test] | '**' test)
+# XXX INCOMPLETE: this doesn't allow the trailing comma
+def p_arglist(p):
+ """arglist : arglist COMMA argument
+ | argument"""
+ if len(p) == 4:
+ p[0] = p[1] + [p[3]]
+ else:
+ p[0] = [p[1]]
+
+# argument: test [gen_for] | test '=' test # Really [keyword '='] test
+
+
+def p_argument(p):
+ "argument : test"
+ p[0] = p[1]
+
+
+def p_error(p):
+ # print "Error!", repr(p)
+ raise SyntaxError(p)
+
+
+class GardenSnakeParser(object):
+
+ def __init__(self, lexer=None):
+ if lexer is None:
+ lexer = IndentLexer()
+ self.lexer = lexer
+ self.parser = yacc.yacc(start="file_input_end")
+
+ def parse(self, code):
+ self.lexer.input(code)
+ result = self.parser.parse(lexer=self.lexer)
+ return ast.Module(None, result)
+
+
+###### Code generation ######
+
+from compiler import misc, syntax, pycodegen
+
+
+class GardenSnakeCompiler(object):
+
+ def __init__(self):
+ self.parser = GardenSnakeParser()
+
+ def compile(self, code, filename="<string>"):
+ tree = self.parser.parse(code)
+ # print tree
+ misc.set_filename(filename, tree)
+ syntax.check(tree)
+ gen = pycodegen.ModuleCodeGenerator(tree)
+ code = gen.getCode()
+ return code
+
+####### Test code #######
+
+compile = GardenSnakeCompiler().compile
+
+code = r"""
+
+print('LET\'S TRY THIS \\OUT')
+
+#Comment here
+def x(a):
+ print('called with',a)
+ if a == 1:
+ return 2
+ if a*2 > 10: return 999 / 4
+ # Another comment here
+
+ return a+2*3
+
+ints = (1, 2,
+ 3, 4,
+5)
+print('mutiline-expression', ints)
+
+t = 4+1/3*2+6*(9-5+1)
+print('predence test; should be 34+2/3:', t, t==(34+2/3))
+
+print('numbers', 1,2,3,4,5)
+if 1:
+ 8
+ a=9
+ print(x(a))
+
+print(x(1))
+print(x(2))
+print(x(8),'3')
+print('this is decimal', 1/5)
+print('BIG DECIMAL', 1.234567891234567e12345)
+
+"""
+
+# Set up the GardenSnake run-time environment
+
+
+def print_(*args):
+ print "-->", " ".join(map(str, args))
+
+globals()["print"] = print_
+
+compiled_code = compile(code)
+
+exec compiled_code in globals()
+print "Done"
diff --git a/third_party/python/ply/example/GardenSnake/README b/third_party/python/ply/example/GardenSnake/README
new file mode 100644
index 0000000000..4d8be2db05
--- /dev/null
+++ b/third_party/python/ply/example/GardenSnake/README
@@ -0,0 +1,5 @@
+This example is Andrew Dalke's GardenSnake language. It shows how to process an
+indentation-like language like Python. Further details can be found here:
+
+http://dalkescientific.com/writings/diary/archive/2006/08/30/gardensnake_language.html
+
diff --git a/third_party/python/ply/example/README b/third_party/python/ply/example/README
new file mode 100644
index 0000000000..63519b557f
--- /dev/null
+++ b/third_party/python/ply/example/README
@@ -0,0 +1,10 @@
+Simple examples:
+ calc - Simple calculator
+ classcalc - Simple calculate defined as a class
+
+Complex examples
+ ansic - ANSI C grammar from K&R
+ BASIC - A small BASIC interpreter
+ GardenSnake - A simple python-like language
+ yply - Converts Unix yacc files to PLY programs.
+
diff --git a/third_party/python/ply/example/ansic/README b/third_party/python/ply/example/ansic/README
new file mode 100644
index 0000000000..e049d3b4e4
--- /dev/null
+++ b/third_party/python/ply/example/ansic/README
@@ -0,0 +1,2 @@
+This example is incomplete. Was going to specify an ANSI C parser.
+This is part of it.
diff --git a/third_party/python/ply/example/ansic/clex.py b/third_party/python/ply/example/ansic/clex.py
new file mode 100644
index 0000000000..4bde1d730b
--- /dev/null
+++ b/third_party/python/ply/example/ansic/clex.py
@@ -0,0 +1,168 @@
+# ----------------------------------------------------------------------
+# clex.py
+#
+# A lexer for ANSI C.
+# ----------------------------------------------------------------------
+
+import sys
+sys.path.insert(0, "../..")
+
+import ply.lex as lex
+
+# Reserved words
+reserved = (
+ 'AUTO', 'BREAK', 'CASE', 'CHAR', 'CONST', 'CONTINUE', 'DEFAULT', 'DO', 'DOUBLE',
+ 'ELSE', 'ENUM', 'EXTERN', 'FLOAT', 'FOR', 'GOTO', 'IF', 'INT', 'LONG', 'REGISTER',
+ 'RETURN', 'SHORT', 'SIGNED', 'SIZEOF', 'STATIC', 'STRUCT', 'SWITCH', 'TYPEDEF',
+ 'UNION', 'UNSIGNED', 'VOID', 'VOLATILE', 'WHILE',
+)
+
+tokens = reserved + (
+ # Literals (identifier, integer constant, float constant, string constant,
+ # char const)
+ 'ID', 'TYPEID', 'ICONST', 'FCONST', 'SCONST', 'CCONST',
+
+ # Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
+ 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD',
+ 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
+ 'LOR', 'LAND', 'LNOT',
+ 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
+
+ # Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
+ 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
+ 'LSHIFTEQUAL', 'RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
+
+ # Increment/decrement (++,--)
+ 'PLUSPLUS', 'MINUSMINUS',
+
+ # Structure dereference (->)
+ 'ARROW',
+
+ # Conditional operator (?)
+ 'CONDOP',
+
+ # Delimeters ( ) [ ] { } , . ; :
+ 'LPAREN', 'RPAREN',
+ 'LBRACKET', 'RBRACKET',
+ 'LBRACE', 'RBRACE',
+ 'COMMA', 'PERIOD', 'SEMI', 'COLON',
+
+ # Ellipsis (...)
+ 'ELLIPSIS',
+)
+
+# Completely ignored characters
+t_ignore = ' \t\x0c'
+
+# Newlines
+
+
+def t_NEWLINE(t):
+ r'\n+'
+ t.lexer.lineno += t.value.count("\n")
+
+# Operators
+t_PLUS = r'\+'
+t_MINUS = r'-'
+t_TIMES = r'\*'
+t_DIVIDE = r'/'
+t_MOD = r'%'
+t_OR = r'\|'
+t_AND = r'&'
+t_NOT = r'~'
+t_XOR = r'\^'
+t_LSHIFT = r'<<'
+t_RSHIFT = r'>>'
+t_LOR = r'\|\|'
+t_LAND = r'&&'
+t_LNOT = r'!'
+t_LT = r'<'
+t_GT = r'>'
+t_LE = r'<='
+t_GE = r'>='
+t_EQ = r'=='
+t_NE = r'!='
+
+# Assignment operators
+
+t_EQUALS = r'='
+t_TIMESEQUAL = r'\*='
+t_DIVEQUAL = r'/='
+t_MODEQUAL = r'%='
+t_PLUSEQUAL = r'\+='
+t_MINUSEQUAL = r'-='
+t_LSHIFTEQUAL = r'<<='
+t_RSHIFTEQUAL = r'>>='
+t_ANDEQUAL = r'&='
+t_OREQUAL = r'\|='
+t_XOREQUAL = r'\^='
+
+# Increment/decrement
+t_PLUSPLUS = r'\+\+'
+t_MINUSMINUS = r'--'
+
+# ->
+t_ARROW = r'->'
+
+# ?
+t_CONDOP = r'\?'
+
+# Delimeters
+t_LPAREN = r'\('
+t_RPAREN = r'\)'
+t_LBRACKET = r'\['
+t_RBRACKET = r'\]'
+t_LBRACE = r'\{'
+t_RBRACE = r'\}'
+t_COMMA = r','
+t_PERIOD = r'\.'
+t_SEMI = r';'
+t_COLON = r':'
+t_ELLIPSIS = r'\.\.\.'
+
+# Identifiers and reserved words
+
+reserved_map = {}
+for r in reserved:
+ reserved_map[r.lower()] = r
+
+
+def t_ID(t):
+ r'[A-Za-z_][\w_]*'
+ t.type = reserved_map.get(t.value, "ID")
+ return t
+
+# Integer literal
+t_ICONST = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
+
+# Floating literal
+t_FCONST = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
+
+# String literal
+t_SCONST = r'\"([^\\\n]|(\\.))*?\"'
+
+# Character constant 'c' or L'c'
+t_CCONST = r'(L)?\'([^\\\n]|(\\.))*?\''
+
+# Comments
+
+
+def t_comment(t):
+ r'/\*(.|\n)*?\*/'
+ t.lexer.lineno += t.value.count('\n')
+
+# Preprocessor directive (ignored)
+
+
+def t_preprocessor(t):
+ r'\#(.)*?\n'
+ t.lexer.lineno += 1
+
+
+def t_error(t):
+ print("Illegal character %s" % repr(t.value[0]))
+ t.lexer.skip(1)
+
+lexer = lex.lex()
+if __name__ == "__main__":
+ lex.runmain(lexer)
diff --git a/third_party/python/ply/example/ansic/cparse.py b/third_party/python/ply/example/ansic/cparse.py
new file mode 100644
index 0000000000..5fe9bce042
--- /dev/null
+++ b/third_party/python/ply/example/ansic/cparse.py
@@ -0,0 +1,1048 @@
+# -----------------------------------------------------------------------------
+# cparse.py
+#
+# Simple parser for ANSI C. Based on the grammar in K&R, 2nd Ed.
+# -----------------------------------------------------------------------------
+
+import sys
+import clex
+import ply.yacc as yacc
+
+# Get the token map
+tokens = clex.tokens
+
+# translation-unit:
+
+
+def p_translation_unit_1(t):
+ 'translation_unit : external_declaration'
+ pass
+
+
+def p_translation_unit_2(t):
+ 'translation_unit : translation_unit external_declaration'
+ pass
+
+# external-declaration:
+
+
+def p_external_declaration_1(t):
+ 'external_declaration : function_definition'
+ pass
+
+
+def p_external_declaration_2(t):
+ 'external_declaration : declaration'
+ pass
+
+# function-definition:
+
+
+def p_function_definition_1(t):
+ 'function_definition : declaration_specifiers declarator declaration_list compound_statement'
+ pass
+
+
+def p_function_definition_2(t):
+ 'function_definition : declarator declaration_list compound_statement'
+ pass
+
+
+def p_function_definition_3(t):
+ 'function_definition : declarator compound_statement'
+ pass
+
+
+def p_function_definition_4(t):
+ 'function_definition : declaration_specifiers declarator compound_statement'
+ pass
+
+# declaration:
+
+
+def p_declaration_1(t):
+ 'declaration : declaration_specifiers init_declarator_list SEMI'
+ pass
+
+
+def p_declaration_2(t):
+ 'declaration : declaration_specifiers SEMI'
+ pass
+
+# declaration-list:
+
+
+def p_declaration_list_1(t):
+ 'declaration_list : declaration'
+ pass
+
+
+def p_declaration_list_2(t):
+ 'declaration_list : declaration_list declaration '
+ pass
+
+# declaration-specifiers
+
+
+def p_declaration_specifiers_1(t):
+ 'declaration_specifiers : storage_class_specifier declaration_specifiers'
+ pass
+
+
+def p_declaration_specifiers_2(t):
+ 'declaration_specifiers : type_specifier declaration_specifiers'
+ pass
+
+
+def p_declaration_specifiers_3(t):
+ 'declaration_specifiers : type_qualifier declaration_specifiers'
+ pass
+
+
+def p_declaration_specifiers_4(t):
+ 'declaration_specifiers : storage_class_specifier'
+ pass
+
+
+def p_declaration_specifiers_5(t):
+ 'declaration_specifiers : type_specifier'
+ pass
+
+
+def p_declaration_specifiers_6(t):
+ 'declaration_specifiers : type_qualifier'
+ pass
+
+# storage-class-specifier
+
+
+def p_storage_class_specifier(t):
+ '''storage_class_specifier : AUTO
+ | REGISTER
+ | STATIC
+ | EXTERN
+ | TYPEDEF
+ '''
+ pass
+
+# type-specifier:
+
+
+def p_type_specifier(t):
+ '''type_specifier : VOID
+ | CHAR
+ | SHORT
+ | INT
+ | LONG
+ | FLOAT
+ | DOUBLE
+ | SIGNED
+ | UNSIGNED
+ | struct_or_union_specifier
+ | enum_specifier
+ | TYPEID
+ '''
+ pass
+
+# type-qualifier:
+
+
+def p_type_qualifier(t):
+ '''type_qualifier : CONST
+ | VOLATILE'''
+ pass
+
+# struct-or-union-specifier
+
+
+def p_struct_or_union_specifier_1(t):
+ 'struct_or_union_specifier : struct_or_union ID LBRACE struct_declaration_list RBRACE'
+ pass
+
+
+def p_struct_or_union_specifier_2(t):
+ 'struct_or_union_specifier : struct_or_union LBRACE struct_declaration_list RBRACE'
+ pass
+
+
+def p_struct_or_union_specifier_3(t):
+ 'struct_or_union_specifier : struct_or_union ID'
+ pass
+
+# struct-or-union:
+
+
+def p_struct_or_union(t):
+ '''struct_or_union : STRUCT
+ | UNION
+ '''
+ pass
+
+# struct-declaration-list:
+
+
+def p_struct_declaration_list_1(t):
+ 'struct_declaration_list : struct_declaration'
+ pass
+
+
+def p_struct_declaration_list_2(t):
+ 'struct_declaration_list : struct_declaration_list struct_declaration'
+ pass
+
+# init-declarator-list:
+
+
+def p_init_declarator_list_1(t):
+ 'init_declarator_list : init_declarator'
+ pass
+
+
+def p_init_declarator_list_2(t):
+ 'init_declarator_list : init_declarator_list COMMA init_declarator'
+ pass
+
+# init-declarator
+
+
+def p_init_declarator_1(t):
+ 'init_declarator : declarator'
+ pass
+
+
+def p_init_declarator_2(t):
+ 'init_declarator : declarator EQUALS initializer'
+ pass
+
+# struct-declaration:
+
+
+def p_struct_declaration(t):
+ 'struct_declaration : specifier_qualifier_list struct_declarator_list SEMI'
+ pass
+
+# specifier-qualifier-list:
+
+
+def p_specifier_qualifier_list_1(t):
+ 'specifier_qualifier_list : type_specifier specifier_qualifier_list'
+ pass
+
+
+def p_specifier_qualifier_list_2(t):
+ 'specifier_qualifier_list : type_specifier'
+ pass
+
+
+def p_specifier_qualifier_list_3(t):
+ 'specifier_qualifier_list : type_qualifier specifier_qualifier_list'
+ pass
+
+
+def p_specifier_qualifier_list_4(t):
+ 'specifier_qualifier_list : type_qualifier'
+ pass
+
+# struct-declarator-list:
+
+
+def p_struct_declarator_list_1(t):
+ 'struct_declarator_list : struct_declarator'
+ pass
+
+
+def p_struct_declarator_list_2(t):
+ 'struct_declarator_list : struct_declarator_list COMMA struct_declarator'
+ pass
+
+# struct-declarator:
+
+
+def p_struct_declarator_1(t):
+ 'struct_declarator : declarator'
+ pass
+
+
+def p_struct_declarator_2(t):
+ 'struct_declarator : declarator COLON constant_expression'
+ pass
+
+
+def p_struct_declarator_3(t):
+ 'struct_declarator : COLON constant_expression'
+ pass
+
+# enum-specifier:
+
+
+def p_enum_specifier_1(t):
+ 'enum_specifier : ENUM ID LBRACE enumerator_list RBRACE'
+ pass
+
+
+def p_enum_specifier_2(t):
+ 'enum_specifier : ENUM LBRACE enumerator_list RBRACE'
+ pass
+
+
+def p_enum_specifier_3(t):
+ 'enum_specifier : ENUM ID'
+ pass
+
+# enumerator_list:
+
+
+def p_enumerator_list_1(t):
+ 'enumerator_list : enumerator'
+ pass
+
+
+def p_enumerator_list_2(t):
+ 'enumerator_list : enumerator_list COMMA enumerator'
+ pass
+
+# enumerator:
+
+
+def p_enumerator_1(t):
+ 'enumerator : ID'
+ pass
+
+
+def p_enumerator_2(t):
+ 'enumerator : ID EQUALS constant_expression'
+ pass
+
+# declarator:
+
+
+def p_declarator_1(t):
+ 'declarator : pointer direct_declarator'
+ pass
+
+
+def p_declarator_2(t):
+ 'declarator : direct_declarator'
+ pass
+
+# direct-declarator:
+
+
+def p_direct_declarator_1(t):
+ 'direct_declarator : ID'
+ pass
+
+
+def p_direct_declarator_2(t):
+ 'direct_declarator : LPAREN declarator RPAREN'
+ pass
+
+
+def p_direct_declarator_3(t):
+ 'direct_declarator : direct_declarator LBRACKET constant_expression_opt RBRACKET'
+ pass
+
+
+def p_direct_declarator_4(t):
+ 'direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN '
+ pass
+
+
+def p_direct_declarator_5(t):
+ 'direct_declarator : direct_declarator LPAREN identifier_list RPAREN '
+ pass
+
+
+def p_direct_declarator_6(t):
+ 'direct_declarator : direct_declarator LPAREN RPAREN '
+ pass
+
+# pointer:
+
+
+def p_pointer_1(t):
+ 'pointer : TIMES type_qualifier_list'
+ pass
+
+
+def p_pointer_2(t):
+ 'pointer : TIMES'
+ pass
+
+
+def p_pointer_3(t):
+ 'pointer : TIMES type_qualifier_list pointer'
+ pass
+
+
+def p_pointer_4(t):
+ 'pointer : TIMES pointer'
+ pass
+
+# type-qualifier-list:
+
+
+def p_type_qualifier_list_1(t):
+ 'type_qualifier_list : type_qualifier'
+ pass
+
+
+def p_type_qualifier_list_2(t):
+ 'type_qualifier_list : type_qualifier_list type_qualifier'
+ pass
+
+# parameter-type-list:
+
+
+def p_parameter_type_list_1(t):
+ 'parameter_type_list : parameter_list'
+ pass
+
+
+def p_parameter_type_list_2(t):
+ 'parameter_type_list : parameter_list COMMA ELLIPSIS'
+ pass
+
+# parameter-list:
+
+
+def p_parameter_list_1(t):
+ 'parameter_list : parameter_declaration'
+ pass
+
+
+def p_parameter_list_2(t):
+ 'parameter_list : parameter_list COMMA parameter_declaration'
+ pass
+
+# parameter-declaration:
+
+
+def p_parameter_declaration_1(t):
+ 'parameter_declaration : declaration_specifiers declarator'
+ pass
+
+
+def p_parameter_declaration_2(t):
+ 'parameter_declaration : declaration_specifiers abstract_declarator_opt'
+ pass
+
+# identifier-list:
+
+
+def p_identifier_list_1(t):
+ 'identifier_list : ID'
+ pass
+
+
+def p_identifier_list_2(t):
+ 'identifier_list : identifier_list COMMA ID'
+ pass
+
+# initializer:
+
+
+def p_initializer_1(t):
+ 'initializer : assignment_expression'
+ pass
+
+
+def p_initializer_2(t):
+ '''initializer : LBRACE initializer_list RBRACE
+ | LBRACE initializer_list COMMA RBRACE'''
+ pass
+
+# initializer-list:
+
+
+def p_initializer_list_1(t):
+ 'initializer_list : initializer'
+ pass
+
+
+def p_initializer_list_2(t):
+ 'initializer_list : initializer_list COMMA initializer'
+ pass
+
+# type-name:
+
+
+def p_type_name(t):
+ 'type_name : specifier_qualifier_list abstract_declarator_opt'
+ pass
+
+
+def p_abstract_declarator_opt_1(t):
+ 'abstract_declarator_opt : empty'
+ pass
+
+
+def p_abstract_declarator_opt_2(t):
+ 'abstract_declarator_opt : abstract_declarator'
+ pass
+
+# abstract-declarator:
+
+
+def p_abstract_declarator_1(t):
+ 'abstract_declarator : pointer '
+ pass
+
+
+def p_abstract_declarator_2(t):
+ 'abstract_declarator : pointer direct_abstract_declarator'
+ pass
+
+
+def p_abstract_declarator_3(t):
+ 'abstract_declarator : direct_abstract_declarator'
+ pass
+
+# direct-abstract-declarator:
+
+
+def p_direct_abstract_declarator_1(t):
+ 'direct_abstract_declarator : LPAREN abstract_declarator RPAREN'
+ pass
+
+
+def p_direct_abstract_declarator_2(t):
+ 'direct_abstract_declarator : direct_abstract_declarator LBRACKET constant_expression_opt RBRACKET'
+ pass
+
+
+def p_direct_abstract_declarator_3(t):
+ 'direct_abstract_declarator : LBRACKET constant_expression_opt RBRACKET'
+ pass
+
+
+def p_direct_abstract_declarator_4(t):
+ 'direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN'
+ pass
+
+
+def p_direct_abstract_declarator_5(t):
+ 'direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN'
+ pass
+
+# Optional fields in abstract declarators
+
+
+def p_constant_expression_opt_1(t):
+ 'constant_expression_opt : empty'
+ pass
+
+
+def p_constant_expression_opt_2(t):
+ 'constant_expression_opt : constant_expression'
+ pass
+
+
+def p_parameter_type_list_opt_1(t):
+ 'parameter_type_list_opt : empty'
+ pass
+
+
+def p_parameter_type_list_opt_2(t):
+ 'parameter_type_list_opt : parameter_type_list'
+ pass
+
+# statement:
+
+
+def p_statement(t):
+ '''
+ statement : labeled_statement
+ | expression_statement
+ | compound_statement
+ | selection_statement
+ | iteration_statement
+ | jump_statement
+ '''
+ pass
+
+# labeled-statement:
+
+
+def p_labeled_statement_1(t):
+ 'labeled_statement : ID COLON statement'
+ pass
+
+
+def p_labeled_statement_2(t):
+ 'labeled_statement : CASE constant_expression COLON statement'
+ pass
+
+
+def p_labeled_statement_3(t):
+ 'labeled_statement : DEFAULT COLON statement'
+ pass
+
+# expression-statement:
+
+
+def p_expression_statement(t):
+ 'expression_statement : expression_opt SEMI'
+ pass
+
+# compound-statement:
+
+
+def p_compound_statement_1(t):
+ 'compound_statement : LBRACE declaration_list statement_list RBRACE'
+ pass
+
+
+def p_compound_statement_2(t):
+ 'compound_statement : LBRACE statement_list RBRACE'
+ pass
+
+
+def p_compound_statement_3(t):
+ 'compound_statement : LBRACE declaration_list RBRACE'
+ pass
+
+
+def p_compound_statement_4(t):
+ 'compound_statement : LBRACE RBRACE'
+ pass
+
+# statement-list:
+
+
+def p_statement_list_1(t):
+ 'statement_list : statement'
+ pass
+
+
+def p_statement_list_2(t):
+ 'statement_list : statement_list statement'
+ pass
+
+# selection-statement
+
+
+def p_selection_statement_1(t):
+ 'selection_statement : IF LPAREN expression RPAREN statement'
+ pass
+
+
+def p_selection_statement_2(t):
+ 'selection_statement : IF LPAREN expression RPAREN statement ELSE statement '
+ pass
+
+
+def p_selection_statement_3(t):
+ 'selection_statement : SWITCH LPAREN expression RPAREN statement '
+ pass
+
+# iteration_statement:
+
+
+def p_iteration_statement_1(t):
+ 'iteration_statement : WHILE LPAREN expression RPAREN statement'
+ pass
+
+
+def p_iteration_statement_2(t):
+ 'iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN statement '
+ pass
+
+
+def p_iteration_statement_3(t):
+ 'iteration_statement : DO statement WHILE LPAREN expression RPAREN SEMI'
+ pass
+
+# jump_statement:
+
+
+def p_jump_statement_1(t):
+ 'jump_statement : GOTO ID SEMI'
+ pass
+
+
+def p_jump_statement_2(t):
+ 'jump_statement : CONTINUE SEMI'
+ pass
+
+
+def p_jump_statement_3(t):
+ 'jump_statement : BREAK SEMI'
+ pass
+
+
+def p_jump_statement_4(t):
+ 'jump_statement : RETURN expression_opt SEMI'
+ pass
+
+
+def p_expression_opt_1(t):
+ 'expression_opt : empty'
+ pass
+
+
+def p_expression_opt_2(t):
+ 'expression_opt : expression'
+ pass
+
+# expression:
+
+
+def p_expression_1(t):
+ 'expression : assignment_expression'
+ pass
+
+
+def p_expression_2(t):
+ 'expression : expression COMMA assignment_expression'
+ pass
+
+# assigment_expression:
+
+
+def p_assignment_expression_1(t):
+ 'assignment_expression : conditional_expression'
+ pass
+
+
+def p_assignment_expression_2(t):
+ 'assignment_expression : unary_expression assignment_operator assignment_expression'
+ pass
+
+# assignment_operator:
+
+
+def p_assignment_operator(t):
+ '''
+ assignment_operator : EQUALS
+ | TIMESEQUAL
+ | DIVEQUAL
+ | MODEQUAL
+ | PLUSEQUAL
+ | MINUSEQUAL
+ | LSHIFTEQUAL
+ | RSHIFTEQUAL
+ | ANDEQUAL
+ | OREQUAL
+ | XOREQUAL
+ '''
+ pass
+
+# conditional-expression
+
+
+def p_conditional_expression_1(t):
+ 'conditional_expression : logical_or_expression'
+ pass
+
+
+def p_conditional_expression_2(t):
+ 'conditional_expression : logical_or_expression CONDOP expression COLON conditional_expression '
+ pass
+
+# constant-expression
+
+
+def p_constant_expression(t):
+ 'constant_expression : conditional_expression'
+ pass
+
+# logical-or-expression
+
+
+def p_logical_or_expression_1(t):
+ 'logical_or_expression : logical_and_expression'
+ pass
+
+
+def p_logical_or_expression_2(t):
+ 'logical_or_expression : logical_or_expression LOR logical_and_expression'
+ pass
+
+# logical-and-expression
+
+
+def p_logical_and_expression_1(t):
+ 'logical_and_expression : inclusive_or_expression'
+ pass
+
+
+def p_logical_and_expression_2(t):
+ 'logical_and_expression : logical_and_expression LAND inclusive_or_expression'
+ pass
+
+# inclusive-or-expression:
+
+
+def p_inclusive_or_expression_1(t):
+ 'inclusive_or_expression : exclusive_or_expression'
+ pass
+
+
+def p_inclusive_or_expression_2(t):
+ 'inclusive_or_expression : inclusive_or_expression OR exclusive_or_expression'
+ pass
+
+# exclusive-or-expression:
+
+
+def p_exclusive_or_expression_1(t):
+ 'exclusive_or_expression : and_expression'
+ pass
+
+
+def p_exclusive_or_expression_2(t):
+ 'exclusive_or_expression : exclusive_or_expression XOR and_expression'
+ pass
+
+# AND-expression
+
+
+def p_and_expression_1(t):
+ 'and_expression : equality_expression'
+ pass
+
+
+def p_and_expression_2(t):
+ 'and_expression : and_expression AND equality_expression'
+ pass
+
+
+# equality-expression:
+def p_equality_expression_1(t):
+ 'equality_expression : relational_expression'
+ pass
+
+
+def p_equality_expression_2(t):
+ 'equality_expression : equality_expression EQ relational_expression'
+ pass
+
+
+def p_equality_expression_3(t):
+ 'equality_expression : equality_expression NE relational_expression'
+ pass
+
+
+# relational-expression:
+def p_relational_expression_1(t):
+ 'relational_expression : shift_expression'
+ pass
+
+
+def p_relational_expression_2(t):
+ 'relational_expression : relational_expression LT shift_expression'
+ pass
+
+
+def p_relational_expression_3(t):
+ 'relational_expression : relational_expression GT shift_expression'
+ pass
+
+
+def p_relational_expression_4(t):
+ 'relational_expression : relational_expression LE shift_expression'
+ pass
+
+
+def p_relational_expression_5(t):
+ 'relational_expression : relational_expression GE shift_expression'
+ pass
+
+# shift-expression
+
+
+def p_shift_expression_1(t):
+ 'shift_expression : additive_expression'
+ pass
+
+
+def p_shift_expression_2(t):
+ 'shift_expression : shift_expression LSHIFT additive_expression'
+ pass
+
+
+def p_shift_expression_3(t):
+ 'shift_expression : shift_expression RSHIFT additive_expression'
+ pass
+
+# additive-expression
+
+
+def p_additive_expression_1(t):
+ 'additive_expression : multiplicative_expression'
+ pass
+
+
+def p_additive_expression_2(t):
+ 'additive_expression : additive_expression PLUS multiplicative_expression'
+ pass
+
+
+def p_additive_expression_3(t):
+ 'additive_expression : additive_expression MINUS multiplicative_expression'
+ pass
+
+# multiplicative-expression
+
+
+def p_multiplicative_expression_1(t):
+ 'multiplicative_expression : cast_expression'
+ pass
+
+
+def p_multiplicative_expression_2(t):
+ 'multiplicative_expression : multiplicative_expression TIMES cast_expression'
+ pass
+
+
+def p_multiplicative_expression_3(t):
+ 'multiplicative_expression : multiplicative_expression DIVIDE cast_expression'
+ pass
+
+
+def p_multiplicative_expression_4(t):
+ 'multiplicative_expression : multiplicative_expression MOD cast_expression'
+ pass
+
+# cast-expression:
+
+
+def p_cast_expression_1(t):
+ 'cast_expression : unary_expression'
+ pass
+
+
+def p_cast_expression_2(t):
+ 'cast_expression : LPAREN type_name RPAREN cast_expression'
+ pass
+
+# unary-expression:
+
+
+def p_unary_expression_1(t):
+ 'unary_expression : postfix_expression'
+ pass
+
+
+def p_unary_expression_2(t):
+ 'unary_expression : PLUSPLUS unary_expression'
+ pass
+
+
+def p_unary_expression_3(t):
+ 'unary_expression : MINUSMINUS unary_expression'
+ pass
+
+
+def p_unary_expression_4(t):
+ 'unary_expression : unary_operator cast_expression'
+ pass
+
+
+def p_unary_expression_5(t):
+ 'unary_expression : SIZEOF unary_expression'
+ pass
+
+
+def p_unary_expression_6(t):
+ 'unary_expression : SIZEOF LPAREN type_name RPAREN'
+ pass
+
+# unary-operator
+
+
+def p_unary_operator(t):
+ '''unary_operator : AND
+ | TIMES
+ | PLUS
+ | MINUS
+ | NOT
+ | LNOT '''
+ pass
+
+# postfix-expression:
+
+
+def p_postfix_expression_1(t):
+ 'postfix_expression : primary_expression'
+ pass
+
+
+def p_postfix_expression_2(t):
+ 'postfix_expression : postfix_expression LBRACKET expression RBRACKET'
+ pass
+
+
+def p_postfix_expression_3(t):
+ 'postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN'
+ pass
+
+
+def p_postfix_expression_4(t):
+ 'postfix_expression : postfix_expression LPAREN RPAREN'
+ pass
+
+
+def p_postfix_expression_5(t):
+ 'postfix_expression : postfix_expression PERIOD ID'
+ pass
+
+
+def p_postfix_expression_6(t):
+ 'postfix_expression : postfix_expression ARROW ID'
+ pass
+
+
+def p_postfix_expression_7(t):
+ 'postfix_expression : postfix_expression PLUSPLUS'
+ pass
+
+
+def p_postfix_expression_8(t):
+ 'postfix_expression : postfix_expression MINUSMINUS'
+ pass
+
+# primary-expression:
+
+
+def p_primary_expression(t):
+ '''primary_expression : ID
+ | constant
+ | SCONST
+ | LPAREN expression RPAREN'''
+ pass
+
+# argument-expression-list:
+
+
+def p_argument_expression_list(t):
+ '''argument_expression_list : assignment_expression
+ | argument_expression_list COMMA assignment_expression'''
+ pass
+
+# constant:
+
+
+def p_constant(t):
+ '''constant : ICONST
+ | FCONST
+ | CCONST'''
+ pass
+
+
+def p_empty(t):
+ 'empty : '
+ pass
+
+
+def p_error(t):
+ print("Whoa. We're hosed")
+
+import profile
+# Build the grammar
+
+yacc.yacc()
+#yacc.yacc(method='LALR',write_tables=False,debug=False)
+
+#profile.run("yacc.yacc(method='LALR')")
diff --git a/third_party/python/ply/example/calc/calc.py b/third_party/python/ply/example/calc/calc.py
new file mode 100644
index 0000000000..824c3d7d0a
--- /dev/null
+++ b/third_party/python/ply/example/calc/calc.py
@@ -0,0 +1,123 @@
+# -----------------------------------------------------------------------------
+# calc.py
+#
+# A simple calculator with variables. This is from O'Reilly's
+# "Lex and Yacc", p. 63.
+# -----------------------------------------------------------------------------
+
+import sys
+sys.path.insert(0, "../..")
+
+if sys.version_info[0] >= 3:
+ raw_input = input
+
+tokens = (
+ 'NAME', 'NUMBER',
+)
+
+literals = ['=', '+', '-', '*', '/', '(', ')']
+
+# Tokens
+
+t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
+
+
+def t_NUMBER(t):
+ r'\d+'
+ t.value = int(t.value)
+ return t
+
+t_ignore = " \t"
+
+
+def t_newline(t):
+ r'\n+'
+ t.lexer.lineno += t.value.count("\n")
+
+
+def t_error(t):
+ print("Illegal character '%s'" % t.value[0])
+ t.lexer.skip(1)
+
+# Build the lexer
+import ply.lex as lex
+lex.lex()
+
+# Parsing rules
+
+precedence = (
+ ('left', '+', '-'),
+ ('left', '*', '/'),
+ ('right', 'UMINUS'),
+)
+
+# dictionary of names
+names = {}
+
+
+def p_statement_assign(p):
+ 'statement : NAME "=" expression'
+ names[p[1]] = p[3]
+
+
+def p_statement_expr(p):
+ 'statement : expression'
+ print(p[1])
+
+
+def p_expression_binop(p):
+ '''expression : expression '+' expression
+ | expression '-' expression
+ | expression '*' expression
+ | expression '/' expression'''
+ if p[2] == '+':
+ p[0] = p[1] + p[3]
+ elif p[2] == '-':
+ p[0] = p[1] - p[3]
+ elif p[2] == '*':
+ p[0] = p[1] * p[3]
+ elif p[2] == '/':
+ p[0] = p[1] / p[3]
+
+
+def p_expression_uminus(p):
+ "expression : '-' expression %prec UMINUS"
+ p[0] = -p[2]
+
+
+def p_expression_group(p):
+ "expression : '(' expression ')'"
+ p[0] = p[2]
+
+
+def p_expression_number(p):
+ "expression : NUMBER"
+ p[0] = p[1]
+
+
+def p_expression_name(p):
+ "expression : NAME"
+ try:
+ p[0] = names[p[1]]
+ except LookupError:
+ print("Undefined name '%s'" % p[1])
+ p[0] = 0
+
+
+def p_error(p):
+ if p:
+ print("Syntax error at '%s'" % p.value)
+ else:
+ print("Syntax error at EOF")
+
+import ply.yacc as yacc
+yacc.yacc()
+
+while 1:
+ try:
+ s = raw_input('calc > ')
+ except EOFError:
+ break
+ if not s:
+ continue
+ yacc.parse(s)
diff --git a/third_party/python/ply/example/calcdebug/calc.py b/third_party/python/ply/example/calcdebug/calc.py
new file mode 100644
index 0000000000..06831e2ca5
--- /dev/null
+++ b/third_party/python/ply/example/calcdebug/calc.py
@@ -0,0 +1,129 @@
+# -----------------------------------------------------------------------------
+# calc.py
+#
+# This example shows how to run the parser in a debugging mode
+# with output routed to a logging object.
+# -----------------------------------------------------------------------------
+
+import sys
+sys.path.insert(0, "../..")
+
+if sys.version_info[0] >= 3:
+ raw_input = input
+
+tokens = (
+ 'NAME', 'NUMBER',
+)
+
+literals = ['=', '+', '-', '*', '/', '(', ')']
+
+# Tokens
+
+t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
+
+
+def t_NUMBER(t):
+ r'\d+'
+ t.value = int(t.value)
+ return t
+
+t_ignore = " \t"
+
+
+def t_newline(t):
+ r'\n+'
+ t.lexer.lineno += t.value.count("\n")
+
+
+def t_error(t):
+ print("Illegal character '%s'" % t.value[0])
+ t.lexer.skip(1)
+
+# Build the lexer
+import ply.lex as lex
+lex.lex()
+
+# Parsing rules
+
+precedence = (
+ ('left', '+', '-'),
+ ('left', '*', '/'),
+ ('right', 'UMINUS'),
+)
+
+# dictionary of names
+names = {}
+
+
+def p_statement_assign(p):
+ 'statement : NAME "=" expression'
+ names[p[1]] = p[3]
+
+
+def p_statement_expr(p):
+ 'statement : expression'
+ print(p[1])
+
+
+def p_expression_binop(p):
+ '''expression : expression '+' expression
+ | expression '-' expression
+ | expression '*' expression
+ | expression '/' expression'''
+ if p[2] == '+':
+ p[0] = p[1] + p[3]
+ elif p[2] == '-':
+ p[0] = p[1] - p[3]
+ elif p[2] == '*':
+ p[0] = p[1] * p[3]
+ elif p[2] == '/':
+ p[0] = p[1] / p[3]
+
+
+def p_expression_uminus(p):
+ "expression : '-' expression %prec UMINUS"
+ p[0] = -p[2]
+
+
+def p_expression_group(p):
+ "expression : '(' expression ')'"
+ p[0] = p[2]
+
+
+def p_expression_number(p):
+ "expression : NUMBER"
+ p[0] = p[1]
+
+
+def p_expression_name(p):
+ "expression : NAME"
+ try:
+ p[0] = names[p[1]]
+ except LookupError:
+ print("Undefined name '%s'" % p[1])
+ p[0] = 0
+
+
+def p_error(p):
+ if p:
+ print("Syntax error at '%s'" % p.value)
+ else:
+ print("Syntax error at EOF")
+
+import ply.yacc as yacc
+yacc.yacc()
+
+import logging
+logging.basicConfig(
+ level=logging.INFO,
+ filename="parselog.txt"
+)
+
+while 1:
+ try:
+ s = raw_input('calc > ')
+ except EOFError:
+ break
+ if not s:
+ continue
+ yacc.parse(s, debug=logging.getLogger())
diff --git a/third_party/python/ply/example/calceof/calc.py b/third_party/python/ply/example/calceof/calc.py
new file mode 100644
index 0000000000..22b39a41a8
--- /dev/null
+++ b/third_party/python/ply/example/calceof/calc.py
@@ -0,0 +1,132 @@
+# -----------------------------------------------------------------------------
+# calc.py
+#
+# A simple calculator with variables. Asks the user for more input and
+# demonstrates the use of the t_eof() rule.
+# -----------------------------------------------------------------------------
+
+import sys
+sys.path.insert(0, "../..")
+
+if sys.version_info[0] >= 3:
+ raw_input = input
+
+tokens = (
+ 'NAME', 'NUMBER',
+)
+
+literals = ['=', '+', '-', '*', '/', '(', ')']
+
+# Tokens
+
+t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
+
+
+def t_NUMBER(t):
+ r'\d+'
+ t.value = int(t.value)
+ return t
+
+t_ignore = " \t"
+
+
+def t_newline(t):
+ r'\n+'
+ t.lexer.lineno += t.value.count("\n")
+
+
+def t_eof(t):
+ more = raw_input('... ')
+ if more:
+ t.lexer.input(more + '\n')
+ return t.lexer.token()
+ else:
+ return None
+
+
+def t_error(t):
+ print("Illegal character '%s'" % t.value[0])
+ t.lexer.skip(1)
+
+# Build the lexer
+import ply.lex as lex
+lex.lex()
+
+# Parsing rules
+
+precedence = (
+ ('left', '+', '-'),
+ ('left', '*', '/'),
+ ('right', 'UMINUS'),
+)
+
+# dictionary of names
+names = {}
+
+
+def p_statement_assign(p):
+ 'statement : NAME "=" expression'
+ names[p[1]] = p[3]
+
+
+def p_statement_expr(p):
+ 'statement : expression'
+ print(p[1])
+
+
+def p_expression_binop(p):
+ '''expression : expression '+' expression
+ | expression '-' expression
+ | expression '*' expression
+ | expression '/' expression'''
+ if p[2] == '+':
+ p[0] = p[1] + p[3]
+ elif p[2] == '-':
+ p[0] = p[1] - p[3]
+ elif p[2] == '*':
+ p[0] = p[1] * p[3]
+ elif p[2] == '/':
+ p[0] = p[1] / p[3]
+
+
+def p_expression_uminus(p):
+ "expression : '-' expression %prec UMINUS"
+ p[0] = -p[2]
+
+
+def p_expression_group(p):
+ "expression : '(' expression ')'"
+ p[0] = p[2]
+
+
+def p_expression_number(p):
+ "expression : NUMBER"
+ p[0] = p[1]
+
+
+def p_expression_name(p):
+ "expression : NAME"
+ try:
+ p[0] = names[p[1]]
+ except LookupError:
+ print("Undefined name '%s'" % p[1])
+ p[0] = 0
+
+
+def p_error(p):
+ if p:
+ print("Syntax error at '%s'" % p.value)
+ else:
+ print("Syntax error at EOF")
+
+import ply.yacc as yacc
+yacc.yacc()
+
+while 1:
+ try:
+ s = raw_input('calc > ')
+ except EOFError:
+ break
+ if not s:
+ continue
+ yacc.parse(s + '\n')
diff --git a/third_party/python/ply/example/classcalc/calc.py b/third_party/python/ply/example/classcalc/calc.py
new file mode 100755
index 0000000000..ada4afd426
--- /dev/null
+++ b/third_party/python/ply/example/classcalc/calc.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+
+# -----------------------------------------------------------------------------
+# calc.py
+#
+# A simple calculator with variables. This is from O'Reilly's
+# "Lex and Yacc", p. 63.
+#
+# Class-based example contributed to PLY by David McNab
+# -----------------------------------------------------------------------------
+
+import sys
+sys.path.insert(0, "../..")
+
+if sys.version_info[0] >= 3:
+ raw_input = input
+
+import ply.lex as lex
+import ply.yacc as yacc
+import os
+
+
+class Parser:
+ """
+ Base class for a lexer/parser that has the rules defined as methods
+ """
+ tokens = ()
+ precedence = ()
+
+ def __init__(self, **kw):
+ self.debug = kw.get('debug', 0)
+ self.names = {}
+ try:
+ modname = os.path.split(os.path.splitext(__file__)[0])[
+ 1] + "_" + self.__class__.__name__
+ except:
+ modname = "parser" + "_" + self.__class__.__name__
+ self.debugfile = modname + ".dbg"
+ self.tabmodule = modname + "_" + "parsetab"
+ # print self.debugfile, self.tabmodule
+
+ # Build the lexer and parser
+ lex.lex(module=self, debug=self.debug)
+ yacc.yacc(module=self,
+ debug=self.debug,
+ debugfile=self.debugfile,
+ tabmodule=self.tabmodule)
+
+ def run(self):
+ while 1:
+ try:
+ s = raw_input('calc > ')
+ except EOFError:
+ break
+ if not s:
+ continue
+ yacc.parse(s)
+
+
+class Calc(Parser):
+
+ tokens = (
+ 'NAME', 'NUMBER',
+ 'PLUS', 'MINUS', 'EXP', 'TIMES', 'DIVIDE', 'EQUALS',
+ 'LPAREN', 'RPAREN',
+ )
+
+ # Tokens
+
+ t_PLUS = r'\+'
+ t_MINUS = r'-'
+ t_EXP = r'\*\*'
+ t_TIMES = r'\*'
+ t_DIVIDE = r'/'
+ t_EQUALS = r'='
+ t_LPAREN = r'\('
+ t_RPAREN = r'\)'
+ t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
+
+ def t_NUMBER(self, t):
+ r'\d+'
+ try:
+ t.value = int(t.value)
+ except ValueError:
+ print("Integer value too large %s" % t.value)
+ t.value = 0
+ # print "parsed number %s" % repr(t.value)
+ return t
+
+ t_ignore = " \t"
+
+ def t_newline(self, t):
+ r'\n+'
+ t.lexer.lineno += t.value.count("\n")
+
+ def t_error(self, t):
+ print("Illegal character '%s'" % t.value[0])
+ t.lexer.skip(1)
+
+ # Parsing rules
+
+ precedence = (
+ ('left', 'PLUS', 'MINUS'),
+ ('left', 'TIMES', 'DIVIDE'),
+ ('left', 'EXP'),
+ ('right', 'UMINUS'),
+ )
+
+ def p_statement_assign(self, p):
+ 'statement : NAME EQUALS expression'
+ self.names[p[1]] = p[3]
+
+ def p_statement_expr(self, p):
+ 'statement : expression'
+ print(p[1])
+
+ def p_expression_binop(self, p):
+ """
+ expression : expression PLUS expression
+ | expression MINUS expression
+ | expression TIMES expression
+ | expression DIVIDE expression
+ | expression EXP expression
+ """
+ # print [repr(p[i]) for i in range(0,4)]
+ if p[2] == '+':
+ p[0] = p[1] + p[3]
+ elif p[2] == '-':
+ p[0] = p[1] - p[3]
+ elif p[2] == '*':
+ p[0] = p[1] * p[3]
+ elif p[2] == '/':
+ p[0] = p[1] / p[3]
+ elif p[2] == '**':
+ p[0] = p[1] ** p[3]
+
+ def p_expression_uminus(self, p):
+ 'expression : MINUS expression %prec UMINUS'
+ p[0] = -p[2]
+
+ def p_expression_group(self, p):
+ 'expression : LPAREN expression RPAREN'
+ p[0] = p[2]
+
+ def p_expression_number(self, p):
+ 'expression : NUMBER'
+ p[0] = p[1]
+
+ def p_expression_name(self, p):
+ 'expression : NAME'
+ try:
+ p[0] = self.names[p[1]]
+ except LookupError:
+ print("Undefined name '%s'" % p[1])
+ p[0] = 0
+
+ def p_error(self, p):
+ if p:
+ print("Syntax error at '%s'" % p.value)
+ else:
+ print("Syntax error at EOF")
+
+if __name__ == '__main__':
+ calc = Calc()
+ calc.run()
diff --git a/third_party/python/ply/example/cleanup.sh b/third_party/python/ply/example/cleanup.sh
new file mode 100755
index 0000000000..3e115f41c4
--- /dev/null
+++ b/third_party/python/ply/example/cleanup.sh
@@ -0,0 +1,2 @@
+#!/bin/sh
+rm -f */*.pyc */parsetab.py */parser.out */*~ */*.class
diff --git a/third_party/python/ply/example/closurecalc/calc.py b/third_party/python/ply/example/closurecalc/calc.py
new file mode 100644
index 0000000000..6031b05813
--- /dev/null
+++ b/third_party/python/ply/example/closurecalc/calc.py
@@ -0,0 +1,132 @@
+# -----------------------------------------------------------------------------
+# calc.py
+#
+# A calculator parser that makes use of closures. The function make_calculator()
+# returns a function that accepts an input string and returns a result. All
+# lexing rules, parsing rules, and internal state are held inside the function.
+# -----------------------------------------------------------------------------
+
+import sys
+sys.path.insert(0, "../..")
+
+if sys.version_info[0] >= 3:
+ raw_input = input
+
+# Make a calculator function
+
+
+def make_calculator():
+ import ply.lex as lex
+ import ply.yacc as yacc
+
+ # ------- Internal calculator state
+
+ variables = {} # Dictionary of stored variables
+
+ # ------- Calculator tokenizing rules
+
+ tokens = (
+ 'NAME', 'NUMBER',
+ )
+
+ literals = ['=', '+', '-', '*', '/', '(', ')']
+
+ t_ignore = " \t"
+
+ t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
+
+ def t_NUMBER(t):
+ r'\d+'
+ t.value = int(t.value)
+ return t
+
+ def t_newline(t):
+ r'\n+'
+ t.lexer.lineno += t.value.count("\n")
+
+ def t_error(t):
+ print("Illegal character '%s'" % t.value[0])
+ t.lexer.skip(1)
+
+ # Build the lexer
+ lexer = lex.lex()
+
+ # ------- Calculator parsing rules
+
+ precedence = (
+ ('left', '+', '-'),
+ ('left', '*', '/'),
+ ('right', 'UMINUS'),
+ )
+
+ def p_statement_assign(p):
+ 'statement : NAME "=" expression'
+ variables[p[1]] = p[3]
+ p[0] = None
+
+ def p_statement_expr(p):
+ 'statement : expression'
+ p[0] = p[1]
+
+ def p_expression_binop(p):
+ '''expression : expression '+' expression
+ | expression '-' expression
+ | expression '*' expression
+ | expression '/' expression'''
+ if p[2] == '+':
+ p[0] = p[1] + p[3]
+ elif p[2] == '-':
+ p[0] = p[1] - p[3]
+ elif p[2] == '*':
+ p[0] = p[1] * p[3]
+ elif p[2] == '/':
+ p[0] = p[1] / p[3]
+
+ def p_expression_uminus(p):
+ "expression : '-' expression %prec UMINUS"
+ p[0] = -p[2]
+
+ def p_expression_group(p):
+ "expression : '(' expression ')'"
+ p[0] = p[2]
+
+ def p_expression_number(p):
+ "expression : NUMBER"
+ p[0] = p[1]
+
+ def p_expression_name(p):
+ "expression : NAME"
+ try:
+ p[0] = variables[p[1]]
+ except LookupError:
+ print("Undefined name '%s'" % p[1])
+ p[0] = 0
+
+ def p_error(p):
+ if p:
+ print("Syntax error at '%s'" % p.value)
+ else:
+ print("Syntax error at EOF")
+
+ # Build the parser
+ parser = yacc.yacc()
+
+ # ------- Input function
+
+ def input(text):
+ result = parser.parse(text, lexer=lexer)
+ return result
+
+ return input
+
+# Make a calculator object and use it
+calc = make_calculator()
+
+while True:
+ try:
+ s = raw_input("calc > ")
+ except EOFError:
+ break
+ r = calc(s)
+ if r:
+ print(r)
diff --git a/third_party/python/ply/example/hedit/hedit.py b/third_party/python/ply/example/hedit/hedit.py
new file mode 100644
index 0000000000..32da745677
--- /dev/null
+++ b/third_party/python/ply/example/hedit/hedit.py
@@ -0,0 +1,48 @@
+# -----------------------------------------------------------------------------
+# hedit.py
+#
+# Paring of Fortran H Edit descriptions (Contributed by Pearu Peterson)
+#
+# These tokens can't be easily tokenized because they are of the following
+# form:
+#
+# nHc1...cn
+#
+# where n is a positive integer and c1 ... cn are characters.
+#
+# This example shows how to modify the state of the lexer to parse
+# such tokens
+# -----------------------------------------------------------------------------
+
+import sys
+sys.path.insert(0, "../..")
+
+
+tokens = (
+ 'H_EDIT_DESCRIPTOR',
+)
+
+# Tokens
+t_ignore = " \t\n"
+
+
+def t_H_EDIT_DESCRIPTOR(t):
+ r"\d+H.*" # This grabs all of the remaining text
+ i = t.value.index('H')
+ n = eval(t.value[:i])
+
+ # Adjust the tokenizing position
+ t.lexer.lexpos -= len(t.value) - (i + 1 + n)
+
+ t.value = t.value[i + 1:i + 1 + n]
+ return t
+
+
+def t_error(t):
+ print("Illegal character '%s'" % t.value[0])
+ t.lexer.skip(1)
+
+# Build the lexer
+import ply.lex as lex
+lex.lex()
+lex.runmain()
diff --git a/third_party/python/ply/example/newclasscalc/calc.py b/third_party/python/ply/example/newclasscalc/calc.py
new file mode 100755
index 0000000000..43c9506a8a
--- /dev/null
+++ b/third_party/python/ply/example/newclasscalc/calc.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python
+
+# -----------------------------------------------------------------------------
+# calc.py
+#
+# A simple calculator with variables. This is from O'Reilly's
+# "Lex and Yacc", p. 63.
+#
+# Class-based example contributed to PLY by David McNab.
+#
+# Modified to use new-style classes. Test case.
+# -----------------------------------------------------------------------------
+
+import sys
+sys.path.insert(0, "../..")
+
+if sys.version_info[0] >= 3:
+ raw_input = input
+
+import ply.lex as lex
+import ply.yacc as yacc
+import os
+
+
+class Parser(object):
+ """
+ Base class for a lexer/parser that has the rules defined as methods
+ """
+ tokens = ()
+ precedence = ()
+
+ def __init__(self, **kw):
+ self.debug = kw.get('debug', 0)
+ self.names = {}
+ try:
+ modname = os.path.split(os.path.splitext(__file__)[0])[
+ 1] + "_" + self.__class__.__name__
+ except:
+ modname = "parser" + "_" + self.__class__.__name__
+ self.debugfile = modname + ".dbg"
+ self.tabmodule = modname + "_" + "parsetab"
+ # print self.debugfile, self.tabmodule
+
+ # Build the lexer and parser
+ lex.lex(module=self, debug=self.debug)
+ yacc.yacc(module=self,
+ debug=self.debug,
+ debugfile=self.debugfile,
+ tabmodule=self.tabmodule)
+
+ def run(self):
+ while 1:
+ try:
+ s = raw_input('calc > ')
+ except EOFError:
+ break
+ if not s:
+ continue
+ yacc.parse(s)
+
+
+class Calc(Parser):
+
+ tokens = (
+ 'NAME', 'NUMBER',
+ 'PLUS', 'MINUS', 'EXP', 'TIMES', 'DIVIDE', 'EQUALS',
+ 'LPAREN', 'RPAREN',
+ )
+
+ # Tokens
+
+ t_PLUS = r'\+'
+ t_MINUS = r'-'
+ t_EXP = r'\*\*'
+ t_TIMES = r'\*'
+ t_DIVIDE = r'/'
+ t_EQUALS = r'='
+ t_LPAREN = r'\('
+ t_RPAREN = r'\)'
+ t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
+
+ def t_NUMBER(self, t):
+ r'\d+'
+ try:
+ t.value = int(t.value)
+ except ValueError:
+ print("Integer value too large %s" % t.value)
+ t.value = 0
+ # print "parsed number %s" % repr(t.value)
+ return t
+
+ t_ignore = " \t"
+
+ def t_newline(self, t):
+ r'\n+'
+ t.lexer.lineno += t.value.count("\n")
+
+ def t_error(self, t):
+ print("Illegal character '%s'" % t.value[0])
+ t.lexer.skip(1)
+
+ # Parsing rules
+
+ precedence = (
+ ('left', 'PLUS', 'MINUS'),
+ ('left', 'TIMES', 'DIVIDE'),
+ ('left', 'EXP'),
+ ('right', 'UMINUS'),
+ )
+
+ def p_statement_assign(self, p):
+ 'statement : NAME EQUALS expression'
+ self.names[p[1]] = p[3]
+
+ def p_statement_expr(self, p):
+ 'statement : expression'
+ print(p[1])
+
+ def p_expression_binop(self, p):
+ """
+ expression : expression PLUS expression
+ | expression MINUS expression
+ | expression TIMES expression
+ | expression DIVIDE expression
+ | expression EXP expression
+ """
+ # print [repr(p[i]) for i in range(0,4)]
+ if p[2] == '+':
+ p[0] = p[1] + p[3]
+ elif p[2] == '-':
+ p[0] = p[1] - p[3]
+ elif p[2] == '*':
+ p[0] = p[1] * p[3]
+ elif p[2] == '/':
+ p[0] = p[1] / p[3]
+ elif p[2] == '**':
+ p[0] = p[1] ** p[3]
+
+ def p_expression_uminus(self, p):
+ 'expression : MINUS expression %prec UMINUS'
+ p[0] = -p[2]
+
+ def p_expression_group(self, p):
+ 'expression : LPAREN expression RPAREN'
+ p[0] = p[2]
+
+ def p_expression_number(self, p):
+ 'expression : NUMBER'
+ p[0] = p[1]
+
+ def p_expression_name(self, p):
+ 'expression : NAME'
+ try:
+ p[0] = self.names[p[1]]
+ except LookupError:
+ print("Undefined name '%s'" % p[1])
+ p[0] = 0
+
+ def p_error(self, p):
+ if p:
+ print("Syntax error at '%s'" % p.value)
+ else:
+ print("Syntax error at EOF")
+
+if __name__ == '__main__':
+ calc = Calc()
+ calc.run()
diff --git a/third_party/python/ply/example/optcalc/README b/third_party/python/ply/example/optcalc/README
new file mode 100644
index 0000000000..53dd5fcd55
--- /dev/null
+++ b/third_party/python/ply/example/optcalc/README
@@ -0,0 +1,9 @@
+An example showing how to use Python optimized mode.
+To run:
+
+ - First run 'python calc.py'
+
+ - Then run 'python -OO calc.py'
+
+If working correctly, the second version should run the
+same way.
diff --git a/third_party/python/ply/example/optcalc/calc.py b/third_party/python/ply/example/optcalc/calc.py
new file mode 100644
index 0000000000..0c223e5994
--- /dev/null
+++ b/third_party/python/ply/example/optcalc/calc.py
@@ -0,0 +1,134 @@
+# -----------------------------------------------------------------------------
+# calc.py
+#
+# A simple calculator with variables. This is from O'Reilly's
+# "Lex and Yacc", p. 63.
+# -----------------------------------------------------------------------------
+
+import sys
+sys.path.insert(0, "../..")
+
+if sys.version_info[0] >= 3:
+ raw_input = input
+
+tokens = (
+ 'NAME', 'NUMBER',
+ 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'EQUALS',
+ 'LPAREN', 'RPAREN',
+)
+
+# Tokens
+
+t_PLUS = r'\+'
+t_MINUS = r'-'
+t_TIMES = r'\*'
+t_DIVIDE = r'/'
+t_EQUALS = r'='
+t_LPAREN = r'\('
+t_RPAREN = r'\)'
+t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
+
+
+def t_NUMBER(t):
+ r'\d+'
+ try:
+ t.value = int(t.value)
+ except ValueError:
+ print("Integer value too large %s" % t.value)
+ t.value = 0
+ return t
+
+t_ignore = " \t"
+
+
+def t_newline(t):
+ r'\n+'
+ t.lexer.lineno += t.value.count("\n")
+
+
+def t_error(t):
+ print("Illegal character '%s'" % t.value[0])
+ t.lexer.skip(1)
+
+# Build the lexer
+import ply.lex as lex
+lex.lex(optimize=1)
+
+# Parsing rules
+
+precedence = (
+ ('left', 'PLUS', 'MINUS'),
+ ('left', 'TIMES', 'DIVIDE'),
+ ('right', 'UMINUS'),
+)
+
+# dictionary of names
+names = {}
+
+
+def p_statement_assign(t):
+ 'statement : NAME EQUALS expression'
+ names[t[1]] = t[3]
+
+
+def p_statement_expr(t):
+ 'statement : expression'
+ print(t[1])
+
+
+def p_expression_binop(t):
+ '''expression : expression PLUS expression
+ | expression MINUS expression
+ | expression TIMES expression
+ | expression DIVIDE expression'''
+ if t[2] == '+':
+ t[0] = t[1] + t[3]
+ elif t[2] == '-':
+ t[0] = t[1] - t[3]
+ elif t[2] == '*':
+ t[0] = t[1] * t[3]
+ elif t[2] == '/':
+ t[0] = t[1] / t[3]
+ elif t[2] == '<':
+ t[0] = t[1] < t[3]
+
+
+def p_expression_uminus(t):
+ 'expression : MINUS expression %prec UMINUS'
+ t[0] = -t[2]
+
+
+def p_expression_group(t):
+ 'expression : LPAREN expression RPAREN'
+ t[0] = t[2]
+
+
+def p_expression_number(t):
+ 'expression : NUMBER'
+ t[0] = t[1]
+
+
+def p_expression_name(t):
+ 'expression : NAME'
+ try:
+ t[0] = names[t[1]]
+ except LookupError:
+ print("Undefined name '%s'" % t[1])
+ t[0] = 0
+
+
+def p_error(t):
+ if t:
+ print("Syntax error at '%s'" % t.value)
+ else:
+ print("Syntax error at EOF")
+
+import ply.yacc as yacc
+yacc.yacc(optimize=1)
+
+while 1:
+ try:
+ s = raw_input('calc > ')
+ except EOFError:
+ break
+ yacc.parse(s)
diff --git a/third_party/python/ply/example/unicalc/calc.py b/third_party/python/ply/example/unicalc/calc.py
new file mode 100644
index 0000000000..901c4b9d76
--- /dev/null
+++ b/third_party/python/ply/example/unicalc/calc.py
@@ -0,0 +1,133 @@
+# -----------------------------------------------------------------------------
+# calc.py
+#
+# A simple calculator with variables. This is from O'Reilly's
+# "Lex and Yacc", p. 63.
+#
+# This example uses unicode strings for tokens, docstrings, and input.
+# -----------------------------------------------------------------------------
+
+import sys
+sys.path.insert(0, "../..")
+
+tokens = (
+ 'NAME', 'NUMBER',
+ 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'EQUALS',
+ 'LPAREN', 'RPAREN',
+)
+
+# Tokens
+
+t_PLUS = ur'\+'
+t_MINUS = ur'-'
+t_TIMES = ur'\*'
+t_DIVIDE = ur'/'
+t_EQUALS = ur'='
+t_LPAREN = ur'\('
+t_RPAREN = ur'\)'
+t_NAME = ur'[a-zA-Z_][a-zA-Z0-9_]*'
+
+
+def t_NUMBER(t):
+ ur'\d+'
+ try:
+ t.value = int(t.value)
+ except ValueError:
+ print "Integer value too large", t.value
+ t.value = 0
+ return t
+
+t_ignore = u" \t"
+
+
+def t_newline(t):
+ ur'\n+'
+ t.lexer.lineno += t.value.count("\n")
+
+
+def t_error(t):
+ print "Illegal character '%s'" % t.value[0]
+ t.lexer.skip(1)
+
+# Build the lexer
+import ply.lex as lex
+lex.lex()
+
+# Parsing rules
+
+precedence = (
+ ('left', 'PLUS', 'MINUS'),
+ ('left', 'TIMES', 'DIVIDE'),
+ ('right', 'UMINUS'),
+)
+
+# dictionary of names
+names = {}
+
+
+def p_statement_assign(p):
+ 'statement : NAME EQUALS expression'
+ names[p[1]] = p[3]
+
+
+def p_statement_expr(p):
+ 'statement : expression'
+ print p[1]
+
+
+def p_expression_binop(p):
+ '''expression : expression PLUS expression
+ | expression MINUS expression
+ | expression TIMES expression
+ | expression DIVIDE expression'''
+ if p[2] == u'+':
+ p[0] = p[1] + p[3]
+ elif p[2] == u'-':
+ p[0] = p[1] - p[3]
+ elif p[2] == u'*':
+ p[0] = p[1] * p[3]
+ elif p[2] == u'/':
+ p[0] = p[1] / p[3]
+
+
+def p_expression_uminus(p):
+ 'expression : MINUS expression %prec UMINUS'
+ p[0] = -p[2]
+
+
+def p_expression_group(p):
+ 'expression : LPAREN expression RPAREN'
+ p[0] = p[2]
+
+
+def p_expression_number(p):
+ 'expression : NUMBER'
+ p[0] = p[1]
+
+
+def p_expression_name(p):
+ 'expression : NAME'
+ try:
+ p[0] = names[p[1]]
+ except LookupError:
+ print "Undefined name '%s'" % p[1]
+ p[0] = 0
+
+
+def p_error(p):
+ if p:
+ print "Syntax error at '%s'" % p.value
+ else:
+ print "Syntax error at EOF"
+
+import ply.yacc as yacc
+yacc.yacc()
+
+while 1:
+ try:
+ s = raw_input('calc > ')
+ except EOFError:
+ break
+ if not s:
+ continue
+ yacc.parse(unicode(s))
diff --git a/third_party/python/ply/example/yply/README b/third_party/python/ply/example/yply/README
new file mode 100644
index 0000000000..bfadf36436
--- /dev/null
+++ b/third_party/python/ply/example/yply/README
@@ -0,0 +1,41 @@
+yply.py
+
+This example implements a program yply.py that converts a UNIX-yacc
+specification file into a PLY-compatible program. To use, simply
+run it like this:
+
+ % python yply.py [-nocode] inputfile.y >myparser.py
+
+The output of this program is Python code. In the output,
+any C code in the original file is included, but is commented out.
+If you use the -nocode option, then all of the C code in the
+original file is just discarded.
+
+To use the resulting grammer with PLY, you'll need to edit the
+myparser.py file. Within this file, some stub code is included that
+can be used to test the construction of the parsing tables. However,
+you'll need to do more editing to make a workable parser.
+
+Disclaimer: This just an example I threw together in an afternoon.
+It might have some bugs. However, it worked when I tried it on
+a yacc-specified C++ parser containing 442 rules and 855 parsing
+states.
+
+Comments:
+
+1. This example does not parse specification files meant for lex/flex.
+ You'll need to specify the tokenizer on your own.
+
+2. This example shows a number of interesting PLY features including
+
+ - Parsing of literal text delimited by nested parentheses
+ - Some interaction between the parser and the lexer.
+ - Use of literals in the grammar specification
+ - One pass compilation. The program just emits the result,
+ there is no intermediate parse tree.
+
+3. This program could probably be cleaned up and enhanced a lot.
+ It would be great if someone wanted to work on this (hint).
+
+-Dave
+
diff --git a/third_party/python/ply/example/yply/ylex.py b/third_party/python/ply/example/yply/ylex.py
new file mode 100644
index 0000000000..16410e250e
--- /dev/null
+++ b/third_party/python/ply/example/yply/ylex.py
@@ -0,0 +1,119 @@
+# lexer for yacc-grammars
+#
+# Author: David Beazley (dave@dabeaz.com)
+# Date : October 2, 2006
+
+import sys
+sys.path.append("../..")
+
+from ply import *
+
+tokens = (
+ 'LITERAL', 'SECTION', 'TOKEN', 'LEFT', 'RIGHT', 'PREC', 'START', 'TYPE', 'NONASSOC', 'UNION', 'CODE',
+ 'ID', 'QLITERAL', 'NUMBER',
+)
+
+states = (('code', 'exclusive'),)
+
+literals = [';', ',', '<', '>', '|', ':']
+t_ignore = ' \t'
+
+t_TOKEN = r'%token'
+t_LEFT = r'%left'
+t_RIGHT = r'%right'
+t_NONASSOC = r'%nonassoc'
+t_PREC = r'%prec'
+t_START = r'%start'
+t_TYPE = r'%type'
+t_UNION = r'%union'
+t_ID = r'[a-zA-Z_][a-zA-Z_0-9]*'
+t_QLITERAL = r'''(?P<quote>['"]).*?(?P=quote)'''
+t_NUMBER = r'\d+'
+
+
+def t_SECTION(t):
+ r'%%'
+ if getattr(t.lexer, "lastsection", 0):
+ t.value = t.lexer.lexdata[t.lexpos + 2:]
+ t.lexer.lexpos = len(t.lexer.lexdata)
+ else:
+ t.lexer.lastsection = 0
+ return t
+
+# Comments
+
+
+def t_ccomment(t):
+ r'/\*(.|\n)*?\*/'
+ t.lexer.lineno += t.value.count('\n')
+
+t_ignore_cppcomment = r'//.*'
+
+
+def t_LITERAL(t):
+ r'%\{(.|\n)*?%\}'
+ t.lexer.lineno += t.value.count("\n")
+ return t
+
+
+def t_NEWLINE(t):
+ r'\n'
+ t.lexer.lineno += 1
+
+
+def t_code(t):
+ r'\{'
+ t.lexer.codestart = t.lexpos
+ t.lexer.level = 1
+ t.lexer.begin('code')
+
+
+def t_code_ignore_string(t):
+ r'\"([^\\\n]|(\\.))*?\"'
+
+
+def t_code_ignore_char(t):
+ r'\'([^\\\n]|(\\.))*?\''
+
+
+def t_code_ignore_comment(t):
+ r'/\*(.|\n)*?\*/'
+
+
+def t_code_ignore_cppcom(t):
+ r'//.*'
+
+
+def t_code_lbrace(t):
+ r'\{'
+ t.lexer.level += 1
+
+
+def t_code_rbrace(t):
+ r'\}'
+ t.lexer.level -= 1
+ if t.lexer.level == 0:
+ t.type = 'CODE'
+ t.value = t.lexer.lexdata[t.lexer.codestart:t.lexpos + 1]
+ t.lexer.begin('INITIAL')
+ t.lexer.lineno += t.value.count('\n')
+ return t
+
+t_code_ignore_nonspace = r'[^\s\}\'\"\{]+'
+t_code_ignore_whitespace = r'\s+'
+t_code_ignore = ""
+
+
+def t_code_error(t):
+ raise RuntimeError
+
+
+def t_error(t):
+ print("%d: Illegal character '%s'" % (t.lexer.lineno, t.value[0]))
+ print(t.value)
+ t.lexer.skip(1)
+
+lex.lex()
+
+if __name__ == '__main__':
+ lex.runmain()
diff --git a/third_party/python/ply/example/yply/yparse.py b/third_party/python/ply/example/yply/yparse.py
new file mode 100644
index 0000000000..1f2e8d0922
--- /dev/null
+++ b/third_party/python/ply/example/yply/yparse.py
@@ -0,0 +1,244 @@
+# parser for Unix yacc-based grammars
+#
+# Author: David Beazley (dave@dabeaz.com)
+# Date : October 2, 2006
+
+import ylex
+tokens = ylex.tokens
+
+from ply import *
+
+tokenlist = []
+preclist = []
+
+emit_code = 1
+
+
+def p_yacc(p):
+ '''yacc : defsection rulesection'''
+
+
+def p_defsection(p):
+ '''defsection : definitions SECTION
+ | SECTION'''
+ p.lexer.lastsection = 1
+ print("tokens = ", repr(tokenlist))
+ print()
+ print("precedence = ", repr(preclist))
+ print()
+ print("# -------------- RULES ----------------")
+ print()
+
+
+def p_rulesection(p):
+ '''rulesection : rules SECTION'''
+
+ print("# -------------- RULES END ----------------")
+ print_code(p[2], 0)
+
+
+def p_definitions(p):
+ '''definitions : definitions definition
+ | definition'''
+
+
+def p_definition_literal(p):
+ '''definition : LITERAL'''
+ print_code(p[1], 0)
+
+
+def p_definition_start(p):
+ '''definition : START ID'''
+ print("start = '%s'" % p[2])
+
+
+def p_definition_token(p):
+ '''definition : toktype opttype idlist optsemi '''
+ for i in p[3]:
+ if i[0] not in "'\"":
+ tokenlist.append(i)
+ if p[1] == '%left':
+ preclist.append(('left',) + tuple(p[3]))
+ elif p[1] == '%right':
+ preclist.append(('right',) + tuple(p[3]))
+ elif p[1] == '%nonassoc':
+ preclist.append(('nonassoc',) + tuple(p[3]))
+
+
+def p_toktype(p):
+ '''toktype : TOKEN
+ | LEFT
+ | RIGHT
+ | NONASSOC'''
+ p[0] = p[1]
+
+
+def p_opttype(p):
+ '''opttype : '<' ID '>'
+ | empty'''
+
+
+def p_idlist(p):
+ '''idlist : idlist optcomma tokenid
+ | tokenid'''
+ if len(p) == 2:
+ p[0] = [p[1]]
+ else:
+ p[0] = p[1]
+ p[1].append(p[3])
+
+
+def p_tokenid(p):
+ '''tokenid : ID
+ | ID NUMBER
+ | QLITERAL
+ | QLITERAL NUMBER'''
+ p[0] = p[1]
+
+
+def p_optsemi(p):
+ '''optsemi : ';'
+ | empty'''
+
+
+def p_optcomma(p):
+ '''optcomma : ','
+ | empty'''
+
+
+def p_definition_type(p):
+ '''definition : TYPE '<' ID '>' namelist optsemi'''
+ # type declarations are ignored
+
+
+def p_namelist(p):
+ '''namelist : namelist optcomma ID
+ | ID'''
+
+
+def p_definition_union(p):
+ '''definition : UNION CODE optsemi'''
+ # Union declarations are ignored
+
+
+def p_rules(p):
+ '''rules : rules rule
+ | rule'''
+ if len(p) == 2:
+ rule = p[1]
+ else:
+ rule = p[2]
+
+ # Print out a Python equivalent of this rule
+
+ embedded = [] # Embedded actions (a mess)
+ embed_count = 0
+
+ rulename = rule[0]
+ rulecount = 1
+ for r in rule[1]:
+ # r contains one of the rule possibilities
+ print("def p_%s_%d(p):" % (rulename, rulecount))
+ prod = []
+ prodcode = ""
+ for i in range(len(r)):
+ item = r[i]
+ if item[0] == '{': # A code block
+ if i == len(r) - 1:
+ prodcode = item
+ break
+ else:
+ # an embedded action
+ embed_name = "_embed%d_%s" % (embed_count, rulename)
+ prod.append(embed_name)
+ embedded.append((embed_name, item))
+ embed_count += 1
+ else:
+ prod.append(item)
+ print(" '''%s : %s'''" % (rulename, " ".join(prod)))
+ # Emit code
+ print_code(prodcode, 4)
+ print()
+ rulecount += 1
+
+ for e, code in embedded:
+ print("def p_%s(p):" % e)
+ print(" '''%s : '''" % e)
+ print_code(code, 4)
+ print()
+
+
+def p_rule(p):
+ '''rule : ID ':' rulelist ';' '''
+ p[0] = (p[1], [p[3]])
+
+
+def p_rule2(p):
+ '''rule : ID ':' rulelist morerules ';' '''
+ p[4].insert(0, p[3])
+ p[0] = (p[1], p[4])
+
+
+def p_rule_empty(p):
+ '''rule : ID ':' ';' '''
+ p[0] = (p[1], [[]])
+
+
+def p_rule_empty2(p):
+ '''rule : ID ':' morerules ';' '''
+
+ p[3].insert(0, [])
+ p[0] = (p[1], p[3])
+
+
+def p_morerules(p):
+ '''morerules : morerules '|' rulelist
+ | '|' rulelist
+ | '|' '''
+
+ if len(p) == 2:
+ p[0] = [[]]
+ elif len(p) == 3:
+ p[0] = [p[2]]
+ else:
+ p[0] = p[1]
+ p[0].append(p[3])
+
+# print("morerules", len(p), p[0])
+
+
+def p_rulelist(p):
+ '''rulelist : rulelist ruleitem
+ | ruleitem'''
+
+ if len(p) == 2:
+ p[0] = [p[1]]
+ else:
+ p[0] = p[1]
+ p[1].append(p[2])
+
+
+def p_ruleitem(p):
+ '''ruleitem : ID
+ | QLITERAL
+ | CODE
+ | PREC'''
+ p[0] = p[1]
+
+
+def p_empty(p):
+ '''empty : '''
+
+
+def p_error(p):
+ pass
+
+yacc.yacc(debug=0)
+
+
+def print_code(code, indent):
+ if not emit_code:
+ return
+ codelines = code.splitlines()
+ for c in codelines:
+ print("%s# %s" % (" " * indent, c))
diff --git a/third_party/python/ply/example/yply/yply.py b/third_party/python/ply/example/yply/yply.py
new file mode 100755
index 0000000000..e24616c831
--- /dev/null
+++ b/third_party/python/ply/example/yply/yply.py
@@ -0,0 +1,51 @@
+#!/usr/local/bin/python
+# yply.py
+#
+# Author: David Beazley (dave@dabeaz.com)
+# Date : October 2, 2006
+#
+# Converts a UNIX-yacc specification file into a PLY-compatible
+# specification. To use, simply do this:
+#
+# % python yply.py [-nocode] inputfile.y >myparser.py
+#
+# The output of this program is Python code. In the output,
+# any C code in the original file is included, but is commented.
+# If you use the -nocode option, then all of the C code in the
+# original file is discarded.
+#
+# Disclaimer: This just an example I threw together in an afternoon.
+# It might have some bugs. However, it worked when I tried it on
+# a yacc-specified C++ parser containing 442 rules and 855 parsing
+# states.
+#
+
+import sys
+sys.path.insert(0, "../..")
+
+import ylex
+import yparse
+
+from ply import *
+
+if len(sys.argv) == 1:
+ print("usage : yply.py [-nocode] inputfile")
+ raise SystemExit
+
+if len(sys.argv) == 3:
+ if sys.argv[1] == '-nocode':
+ yparse.emit_code = 0
+ else:
+ print("Unknown option '%s'" % sys.argv[1])
+ raise SystemExit
+ filename = sys.argv[2]
+else:
+ filename = sys.argv[1]
+
+yacc.parse(open(filename).read())
+
+print("""
+if __name__ == '__main__':
+ from ply import *
+ yacc.yacc()
+""")
diff --git a/third_party/python/ply/ply.egg-info/PKG-INFO b/third_party/python/ply/ply.egg-info/PKG-INFO
new file mode 100644
index 0000000000..6eedf42595
--- /dev/null
+++ b/third_party/python/ply/ply.egg-info/PKG-INFO
@@ -0,0 +1,22 @@
+Metadata-Version: 1.1
+Name: ply
+Version: 3.10
+Summary: Python Lex & Yacc
+Home-page: http://www.dabeaz.com/ply/
+Author: David Beazley
+Author-email: dave@dabeaz.com
+License: BSD
+Description:
+ PLY is yet another implementation of lex and yacc for Python. Some notable
+ features include the fact that its implemented entirely in Python and it
+ uses LALR(1) parsing which is efficient and well suited for larger grammars.
+
+ PLY provides most of the standard lex/yacc features including support for empty
+ productions, precedence rules, error recovery, and support for ambiguous grammars.
+
+ PLY is extremely easy to use and provides very extensive error checking.
+ It is compatible with both Python 2 and Python 3.
+
+Platform: UNKNOWN
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 2
diff --git a/third_party/python/ply/ply.egg-info/SOURCES.txt b/third_party/python/ply/ply.egg-info/SOURCES.txt
new file mode 100644
index 0000000000..2dff7dd29b
--- /dev/null
+++ b/third_party/python/ply/ply.egg-info/SOURCES.txt
@@ -0,0 +1,172 @@
+ANNOUNCE
+CHANGES
+MANIFEST.in
+README.md
+TODO
+setup.cfg
+setup.py
+doc/internal.html
+doc/makedoc.py
+doc/ply.html
+example/README
+example/cleanup.sh
+example/BASIC/README
+example/BASIC/basic.py
+example/BASIC/basiclex.py
+example/BASIC/basiclog.py
+example/BASIC/basinterp.py
+example/BASIC/basparse.py
+example/BASIC/dim.bas
+example/BASIC/func.bas
+example/BASIC/gcd.bas
+example/BASIC/gosub.bas
+example/BASIC/hello.bas
+example/BASIC/linear.bas
+example/BASIC/maxsin.bas
+example/BASIC/powers.bas
+example/BASIC/rand.bas
+example/BASIC/sales.bas
+example/BASIC/sears.bas
+example/BASIC/sqrt1.bas
+example/BASIC/sqrt2.bas
+example/GardenSnake/GardenSnake.py
+example/GardenSnake/README
+example/ansic/README
+example/ansic/clex.py
+example/ansic/cparse.py
+example/calc/calc.py
+example/calcdebug/calc.py
+example/calceof/calc.py
+example/classcalc/calc.py
+example/closurecalc/calc.py
+example/hedit/hedit.py
+example/newclasscalc/calc.py
+example/optcalc/README
+example/optcalc/calc.py
+example/unicalc/calc.py
+example/yply/README
+example/yply/ylex.py
+example/yply/yparse.py
+example/yply/yply.py
+ply/__init__.py
+ply/cpp.py
+ply/ctokens.py
+ply/lex.py
+ply/yacc.py
+ply/ygen.py
+ply.egg-info/PKG-INFO
+ply.egg-info/SOURCES.txt
+ply.egg-info/dependency_links.txt
+ply.egg-info/top_level.txt
+test/README
+test/calclex.py
+test/cleanup.sh
+test/lex_closure.py
+test/lex_doc1.py
+test/lex_dup1.py
+test/lex_dup2.py
+test/lex_dup3.py
+test/lex_empty.py
+test/lex_error1.py
+test/lex_error2.py
+test/lex_error3.py
+test/lex_error4.py
+test/lex_hedit.py
+test/lex_ignore.py
+test/lex_ignore2.py
+test/lex_literal1.py
+test/lex_literal2.py
+test/lex_literal3.py
+test/lex_many_tokens.py
+test/lex_module.py
+test/lex_module_import.py
+test/lex_object.py
+test/lex_opt_alias.py
+test/lex_optimize.py
+test/lex_optimize2.py
+test/lex_optimize3.py
+test/lex_re1.py
+test/lex_re2.py
+test/lex_re3.py
+test/lex_rule1.py
+test/lex_rule2.py
+test/lex_rule3.py
+test/lex_state1.py
+test/lex_state2.py
+test/lex_state3.py
+test/lex_state4.py
+test/lex_state5.py
+test/lex_state_noerror.py
+test/lex_state_norule.py
+test/lex_state_try.py
+test/lex_token1.py
+test/lex_token2.py
+test/lex_token3.py
+test/lex_token4.py
+test/lex_token5.py
+test/lex_token_dup.py
+test/testlex.py
+test/testyacc.py
+test/yacc_badargs.py
+test/yacc_badid.py
+test/yacc_badprec.py
+test/yacc_badprec2.py
+test/yacc_badprec3.py
+test/yacc_badrule.py
+test/yacc_badtok.py
+test/yacc_dup.py
+test/yacc_error1.py
+test/yacc_error2.py
+test/yacc_error3.py
+test/yacc_error4.py
+test/yacc_error5.py
+test/yacc_error6.py
+test/yacc_error7.py
+test/yacc_inf.py
+test/yacc_literal.py
+test/yacc_misplaced.py
+test/yacc_missing1.py
+test/yacc_nested.py
+test/yacc_nodoc.py
+test/yacc_noerror.py
+test/yacc_nop.py
+test/yacc_notfunc.py
+test/yacc_notok.py
+test/yacc_prec1.py
+test/yacc_rr.py
+test/yacc_rr_unused.py
+test/yacc_simple.py
+test/yacc_sr.py
+test/yacc_term1.py
+test/yacc_unicode_literals.py
+test/yacc_unused.py
+test/yacc_unused_rule.py
+test/yacc_uprec.py
+test/yacc_uprec2.py
+test/pkg_test1/__init__.py
+test/pkg_test1/parsing/__init__.py
+test/pkg_test1/parsing/calclex.py
+test/pkg_test1/parsing/calcparse.py
+test/pkg_test2/__init__.py
+test/pkg_test2/parsing/__init__.py
+test/pkg_test2/parsing/calclex.py
+test/pkg_test2/parsing/calcparse.py
+test/pkg_test3/__init__.py
+test/pkg_test3/generated/__init__.py
+test/pkg_test3/parsing/__init__.py
+test/pkg_test3/parsing/calclex.py
+test/pkg_test3/parsing/calcparse.py
+test/pkg_test4/__init__.py
+test/pkg_test4/parsing/__init__.py
+test/pkg_test4/parsing/calclex.py
+test/pkg_test4/parsing/calcparse.py
+test/pkg_test5/__init__.py
+test/pkg_test5/parsing/__init__.py
+test/pkg_test5/parsing/calclex.py
+test/pkg_test5/parsing/calcparse.py
+test/pkg_test6/__init__.py
+test/pkg_test6/parsing/__init__.py
+test/pkg_test6/parsing/calclex.py
+test/pkg_test6/parsing/calcparse.py
+test/pkg_test6/parsing/expression.py
+test/pkg_test6/parsing/statement.py \ No newline at end of file
diff --git a/third_party/python/ply/ply.egg-info/dependency_links.txt b/third_party/python/ply/ply.egg-info/dependency_links.txt
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/third_party/python/ply/ply.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/third_party/python/ply/ply.egg-info/top_level.txt b/third_party/python/ply/ply.egg-info/top_level.txt
new file mode 100644
index 0000000000..90412f0683
--- /dev/null
+++ b/third_party/python/ply/ply.egg-info/top_level.txt
@@ -0,0 +1 @@
+ply
diff --git a/third_party/python/ply/ply/__init__.py b/third_party/python/ply/ply/__init__.py
new file mode 100644
index 0000000000..6e53cddcf6
--- /dev/null
+++ b/third_party/python/ply/ply/__init__.py
@@ -0,0 +1,5 @@
+# PLY package
+# Author: David Beazley (dave@dabeaz.com)
+
+__version__ = '3.9'
+__all__ = ['lex','yacc']
diff --git a/third_party/python/ply/ply/cpp.py b/third_party/python/ply/ply/cpp.py
new file mode 100644
index 0000000000..b6bfc69614
--- /dev/null
+++ b/third_party/python/ply/ply/cpp.py
@@ -0,0 +1,918 @@
+# -----------------------------------------------------------------------------
+# cpp.py
+#
+# Author: David Beazley (http://www.dabeaz.com)
+# Copyright (C) 2007
+# All rights reserved
+#
+# This module implements an ANSI-C style lexical preprocessor for PLY.
+# -----------------------------------------------------------------------------
+from __future__ import generators
+
+import sys
+
+# Some Python 3 compatibility shims
+if sys.version_info.major < 3:
+ STRING_TYPES = (str, unicode)
+else:
+ STRING_TYPES = str
+ xrange = range
+
+# -----------------------------------------------------------------------------
+# Default preprocessor lexer definitions. These tokens are enough to get
+# a basic preprocessor working. Other modules may import these if they want
+# -----------------------------------------------------------------------------
+
+tokens = (
+ 'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT1', 'CPP_COMMENT2', 'CPP_POUND','CPP_DPOUND'
+)
+
+literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\""
+
+# Whitespace
+def t_CPP_WS(t):
+ r'\s+'
+ t.lexer.lineno += t.value.count("\n")
+ return t
+
+t_CPP_POUND = r'\#'
+t_CPP_DPOUND = r'\#\#'
+
+# Identifier
+t_CPP_ID = r'[A-Za-z_][\w_]*'
+
+# Integer literal
+def CPP_INTEGER(t):
+ r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)'
+ return t
+
+t_CPP_INTEGER = CPP_INTEGER
+
+# Floating literal
+t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
+
+# String literal
+def t_CPP_STRING(t):
+ r'\"([^\\\n]|(\\(.|\n)))*?\"'
+ t.lexer.lineno += t.value.count("\n")
+ return t
+
+# Character constant 'c' or L'c'
+def t_CPP_CHAR(t):
+ r'(L)?\'([^\\\n]|(\\(.|\n)))*?\''
+ t.lexer.lineno += t.value.count("\n")
+ return t
+
+# Comment
+def t_CPP_COMMENT1(t):
+ r'(/\*(.|\n)*?\*/)'
+ ncr = t.value.count("\n")
+ t.lexer.lineno += ncr
+ # replace with one space or a number of '\n'
+ t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' '
+ return t
+
+# Line comment
+def t_CPP_COMMENT2(t):
+ r'(//.*?(\n|$))'
+ # replace with '/n'
+ t.type = 'CPP_WS'; t.value = '\n'
+ return t
+
+def t_error(t):
+ t.type = t.value[0]
+ t.value = t.value[0]
+ t.lexer.skip(1)
+ return t
+
+import re
+import copy
+import time
+import os.path
+
+# -----------------------------------------------------------------------------
+# trigraph()
+#
+# Given an input string, this function replaces all trigraph sequences.
+# The following mapping is used:
+#
+# ??= #
+# ??/ \
+# ??' ^
+# ??( [
+# ??) ]
+# ??! |
+# ??< {
+# ??> }
+# ??- ~
+# -----------------------------------------------------------------------------
+
+_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''')
+_trigraph_rep = {
+ '=':'#',
+ '/':'\\',
+ "'":'^',
+ '(':'[',
+ ')':']',
+ '!':'|',
+ '<':'{',
+ '>':'}',
+ '-':'~'
+}
+
+def trigraph(input):
+ return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input)
+
+# ------------------------------------------------------------------
+# Macro object
+#
+# This object holds information about preprocessor macros
+#
+# .name - Macro name (string)
+# .value - Macro value (a list of tokens)
+# .arglist - List of argument names
+# .variadic - Boolean indicating whether or not variadic macro
+# .vararg - Name of the variadic parameter
+#
+# When a macro is created, the macro replacement token sequence is
+# pre-scanned and used to create patch lists that are later used
+# during macro expansion
+# ------------------------------------------------------------------
+
+class Macro(object):
+ def __init__(self,name,value,arglist=None,variadic=False):
+ self.name = name
+ self.value = value
+ self.arglist = arglist
+ self.variadic = variadic
+ if variadic:
+ self.vararg = arglist[-1]
+ self.source = None
+
+# ------------------------------------------------------------------
+# Preprocessor object
+#
+# Object representing a preprocessor. Contains macro definitions,
+# include directories, and other information
+# ------------------------------------------------------------------
+
+class Preprocessor(object):
+ def __init__(self,lexer=None):
+ if lexer is None:
+ lexer = lex.lexer
+ self.lexer = lexer
+ self.macros = { }
+ self.path = []
+ self.temp_path = []
+
+ # Probe the lexer for selected tokens
+ self.lexprobe()
+
+ tm = time.localtime()
+ self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm))
+ self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm))
+ self.parser = None
+
+ # -----------------------------------------------------------------------------
+ # tokenize()
+ #
+ # Utility function. Given a string of text, tokenize into a list of tokens
+ # -----------------------------------------------------------------------------
+
+ def tokenize(self,text):
+ tokens = []
+ self.lexer.input(text)
+ while True:
+ tok = self.lexer.token()
+ if not tok: break
+ tokens.append(tok)
+ return tokens
+
+ # ---------------------------------------------------------------------
+ # error()
+ #
+ # Report a preprocessor error/warning of some kind
+ # ----------------------------------------------------------------------
+
+ def error(self,file,line,msg):
+ print("%s:%d %s" % (file,line,msg))
+
+ # ----------------------------------------------------------------------
+ # lexprobe()
+ #
+ # This method probes the preprocessor lexer object to discover
+ # the token types of symbols that are important to the preprocessor.
+ # If this works right, the preprocessor will simply "work"
+ # with any suitable lexer regardless of how tokens have been named.
+ # ----------------------------------------------------------------------
+
+ def lexprobe(self):
+
+ # Determine the token type for identifiers
+ self.lexer.input("identifier")
+ tok = self.lexer.token()
+ if not tok or tok.value != "identifier":
+ print("Couldn't determine identifier type")
+ else:
+ self.t_ID = tok.type
+
+ # Determine the token type for integers
+ self.lexer.input("12345")
+ tok = self.lexer.token()
+ if not tok or int(tok.value) != 12345:
+ print("Couldn't determine integer type")
+ else:
+ self.t_INTEGER = tok.type
+ self.t_INTEGER_TYPE = type(tok.value)
+
+ # Determine the token type for strings enclosed in double quotes
+ self.lexer.input("\"filename\"")
+ tok = self.lexer.token()
+ if not tok or tok.value != "\"filename\"":
+ print("Couldn't determine string type")
+ else:
+ self.t_STRING = tok.type
+
+ # Determine the token type for whitespace--if any
+ self.lexer.input(" ")
+ tok = self.lexer.token()
+ if not tok or tok.value != " ":
+ self.t_SPACE = None
+ else:
+ self.t_SPACE = tok.type
+
+ # Determine the token type for newlines
+ self.lexer.input("\n")
+ tok = self.lexer.token()
+ if not tok or tok.value != "\n":
+ self.t_NEWLINE = None
+ print("Couldn't determine token for newlines")
+ else:
+ self.t_NEWLINE = tok.type
+
+ self.t_WS = (self.t_SPACE, self.t_NEWLINE)
+
+ # Check for other characters used by the preprocessor
+ chars = [ '<','>','#','##','\\','(',')',',','.']
+ for c in chars:
+ self.lexer.input(c)
+ tok = self.lexer.token()
+ if not tok or tok.value != c:
+ print("Unable to lex '%s' required for preprocessor" % c)
+
+ # ----------------------------------------------------------------------
+ # add_path()
+ #
+ # Adds a search path to the preprocessor.
+ # ----------------------------------------------------------------------
+
+ def add_path(self,path):
+ self.path.append(path)
+
+ # ----------------------------------------------------------------------
+ # group_lines()
+ #
+ # Given an input string, this function splits it into lines. Trailing whitespace
+ # is removed. Any line ending with \ is grouped with the next line. This
+ # function forms the lowest level of the preprocessor---grouping into text into
+ # a line-by-line format.
+ # ----------------------------------------------------------------------
+
+ def group_lines(self,input):
+ lex = self.lexer.clone()
+ lines = [x.rstrip() for x in input.splitlines()]
+ for i in xrange(len(lines)):
+ j = i+1
+ while lines[i].endswith('\\') and (j < len(lines)):
+ lines[i] = lines[i][:-1]+lines[j]
+ lines[j] = ""
+ j += 1
+
+ input = "\n".join(lines)
+ lex.input(input)
+ lex.lineno = 1
+
+ current_line = []
+ while True:
+ tok = lex.token()
+ if not tok:
+ break
+ current_line.append(tok)
+ if tok.type in self.t_WS and '\n' in tok.value:
+ yield current_line
+ current_line = []
+
+ if current_line:
+ yield current_line
+
+ # ----------------------------------------------------------------------
+ # tokenstrip()
+ #
+ # Remove leading/trailing whitespace tokens from a token list
+ # ----------------------------------------------------------------------
+
+ def tokenstrip(self,tokens):
+ i = 0
+ while i < len(tokens) and tokens[i].type in self.t_WS:
+ i += 1
+ del tokens[:i]
+ i = len(tokens)-1
+ while i >= 0 and tokens[i].type in self.t_WS:
+ i -= 1
+ del tokens[i+1:]
+ return tokens
+
+
+ # ----------------------------------------------------------------------
+ # collect_args()
+ #
+ # Collects comma separated arguments from a list of tokens. The arguments
+ # must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions)
+ # where tokencount is the number of tokens consumed, args is a list of arguments,
+ # and positions is a list of integers containing the starting index of each
+ # argument. Each argument is represented by a list of tokens.
+ #
+ # When collecting arguments, leading and trailing whitespace is removed
+ # from each argument.
+ #
+ # This function properly handles nested parenthesis and commas---these do not
+ # define new arguments.
+ # ----------------------------------------------------------------------
+
+ def collect_args(self,tokenlist):
+ args = []
+ positions = []
+ current_arg = []
+ nesting = 1
+ tokenlen = len(tokenlist)
+
+ # Search for the opening '('.
+ i = 0
+ while (i < tokenlen) and (tokenlist[i].type in self.t_WS):
+ i += 1
+
+ if (i < tokenlen) and (tokenlist[i].value == '('):
+ positions.append(i+1)
+ else:
+ self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments")
+ return 0, [], []
+
+ i += 1
+
+ while i < tokenlen:
+ t = tokenlist[i]
+ if t.value == '(':
+ current_arg.append(t)
+ nesting += 1
+ elif t.value == ')':
+ nesting -= 1
+ if nesting == 0:
+ if current_arg:
+ args.append(self.tokenstrip(current_arg))
+ positions.append(i)
+ return i+1,args,positions
+ current_arg.append(t)
+ elif t.value == ',' and nesting == 1:
+ args.append(self.tokenstrip(current_arg))
+ positions.append(i+1)
+ current_arg = []
+ else:
+ current_arg.append(t)
+ i += 1
+
+ # Missing end argument
+ self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments")
+ return 0, [],[]
+
+ # ----------------------------------------------------------------------
+ # macro_prescan()
+ #
+ # Examine the macro value (token sequence) and identify patch points
+ # This is used to speed up macro expansion later on---we'll know
+ # right away where to apply patches to the value to form the expansion
+ # ----------------------------------------------------------------------
+
+ def macro_prescan(self,macro):
+ macro.patch = [] # Standard macro arguments
+ macro.str_patch = [] # String conversion expansion
+ macro.var_comma_patch = [] # Variadic macro comma patch
+ i = 0
+ while i < len(macro.value):
+ if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist:
+ argnum = macro.arglist.index(macro.value[i].value)
+ # Conversion of argument to a string
+ if i > 0 and macro.value[i-1].value == '#':
+ macro.value[i] = copy.copy(macro.value[i])
+ macro.value[i].type = self.t_STRING
+ del macro.value[i-1]
+ macro.str_patch.append((argnum,i-1))
+ continue
+ # Concatenation
+ elif (i > 0 and macro.value[i-1].value == '##'):
+ macro.patch.append(('c',argnum,i-1))
+ del macro.value[i-1]
+ continue
+ elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'):
+ macro.patch.append(('c',argnum,i))
+ i += 1
+ continue
+ # Standard expansion
+ else:
+ macro.patch.append(('e',argnum,i))
+ elif macro.value[i].value == '##':
+ if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \
+ ((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \
+ (macro.value[i+1].value == macro.vararg):
+ macro.var_comma_patch.append(i-1)
+ i += 1
+ macro.patch.sort(key=lambda x: x[2],reverse=True)
+
+ # ----------------------------------------------------------------------
+ # macro_expand_args()
+ #
+ # Given a Macro and list of arguments (each a token list), this method
+ # returns an expanded version of a macro. The return value is a token sequence
+ # representing the replacement macro tokens
+ # ----------------------------------------------------------------------
+
+ def macro_expand_args(self,macro,args):
+ # Make a copy of the macro token sequence
+ rep = [copy.copy(_x) for _x in macro.value]
+
+ # Make string expansion patches. These do not alter the length of the replacement sequence
+
+ str_expansion = {}
+ for argnum, i in macro.str_patch:
+ if argnum not in str_expansion:
+ str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\")
+ rep[i] = copy.copy(rep[i])
+ rep[i].value = str_expansion[argnum]
+
+ # Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid
+ comma_patch = False
+ if macro.variadic and not args[-1]:
+ for i in macro.var_comma_patch:
+ rep[i] = None
+ comma_patch = True
+
+ # Make all other patches. The order of these matters. It is assumed that the patch list
+ # has been sorted in reverse order of patch location since replacements will cause the
+ # size of the replacement sequence to expand from the patch point.
+
+ expanded = { }
+ for ptype, argnum, i in macro.patch:
+ # Concatenation. Argument is left unexpanded
+ if ptype == 'c':
+ rep[i:i+1] = args[argnum]
+ # Normal expansion. Argument is macro expanded first
+ elif ptype == 'e':
+ if argnum not in expanded:
+ expanded[argnum] = self.expand_macros(args[argnum])
+ rep[i:i+1] = expanded[argnum]
+
+ # Get rid of removed comma if necessary
+ if comma_patch:
+ rep = [_i for _i in rep if _i]
+
+ return rep
+
+
+ # ----------------------------------------------------------------------
+ # expand_macros()
+ #
+ # Given a list of tokens, this function performs macro expansion.
+ # The expanded argument is a dictionary that contains macros already
+ # expanded. This is used to prevent infinite recursion.
+ # ----------------------------------------------------------------------
+
+ def expand_macros(self,tokens,expanded=None):
+ if expanded is None:
+ expanded = {}
+ i = 0
+ while i < len(tokens):
+ t = tokens[i]
+ if t.type == self.t_ID:
+ if t.value in self.macros and t.value not in expanded:
+ # Yes, we found a macro match
+ expanded[t.value] = True
+
+ m = self.macros[t.value]
+ if not m.arglist:
+ # A simple macro
+ ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded)
+ for e in ex:
+ e.lineno = t.lineno
+ tokens[i:i+1] = ex
+ i += len(ex)
+ else:
+ # A macro with arguments
+ j = i + 1
+ while j < len(tokens) and tokens[j].type in self.t_WS:
+ j += 1
+ if tokens[j].value == '(':
+ tokcount,args,positions = self.collect_args(tokens[j:])
+ if not m.variadic and len(args) != len(m.arglist):
+ self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist)))
+ i = j + tokcount
+ elif m.variadic and len(args) < len(m.arglist)-1:
+ if len(m.arglist) > 2:
+ self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1))
+ else:
+ self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1))
+ i = j + tokcount
+ else:
+ if m.variadic:
+ if len(args) == len(m.arglist)-1:
+ args.append([])
+ else:
+ args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1]
+ del args[len(m.arglist):]
+
+ # Get macro replacement text
+ rep = self.macro_expand_args(m,args)
+ rep = self.expand_macros(rep,expanded)
+ for r in rep:
+ r.lineno = t.lineno
+ tokens[i:j+tokcount] = rep
+ i += len(rep)
+ del expanded[t.value]
+ continue
+ elif t.value == '__LINE__':
+ t.type = self.t_INTEGER
+ t.value = self.t_INTEGER_TYPE(t.lineno)
+
+ i += 1
+ return tokens
+
+ # ----------------------------------------------------------------------
+ # evalexpr()
+ #
+ # Evaluate an expression token sequence for the purposes of evaluating
+ # integral expressions.
+ # ----------------------------------------------------------------------
+
+ def evalexpr(self,tokens):
+ # tokens = tokenize(line)
+ # Search for defined macros
+ i = 0
+ while i < len(tokens):
+ if tokens[i].type == self.t_ID and tokens[i].value == 'defined':
+ j = i + 1
+ needparen = False
+ result = "0L"
+ while j < len(tokens):
+ if tokens[j].type in self.t_WS:
+ j += 1
+ continue
+ elif tokens[j].type == self.t_ID:
+ if tokens[j].value in self.macros:
+ result = "1L"
+ else:
+ result = "0L"
+ if not needparen: break
+ elif tokens[j].value == '(':
+ needparen = True
+ elif tokens[j].value == ')':
+ break
+ else:
+ self.error(self.source,tokens[i].lineno,"Malformed defined()")
+ j += 1
+ tokens[i].type = self.t_INTEGER
+ tokens[i].value = self.t_INTEGER_TYPE(result)
+ del tokens[i+1:j+1]
+ i += 1
+ tokens = self.expand_macros(tokens)
+ for i,t in enumerate(tokens):
+ if t.type == self.t_ID:
+ tokens[i] = copy.copy(t)
+ tokens[i].type = self.t_INTEGER
+ tokens[i].value = self.t_INTEGER_TYPE("0L")
+ elif t.type == self.t_INTEGER:
+ tokens[i] = copy.copy(t)
+ # Strip off any trailing suffixes
+ tokens[i].value = str(tokens[i].value)
+ while tokens[i].value[-1] not in "0123456789abcdefABCDEF":
+ tokens[i].value = tokens[i].value[:-1]
+
+ expr = "".join([str(x.value) for x in tokens])
+ expr = expr.replace("&&"," and ")
+ expr = expr.replace("||"," or ")
+ expr = expr.replace("!"," not ")
+ try:
+ result = eval(expr)
+ except Exception:
+ self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression")
+ result = 0
+ return result
+
+ # ----------------------------------------------------------------------
+ # parsegen()
+ #
+ # Parse an input string/
+ # ----------------------------------------------------------------------
+ def parsegen(self,input,source=None):
+
+ # Replace trigraph sequences
+ t = trigraph(input)
+ lines = self.group_lines(t)
+
+ if not source:
+ source = ""
+
+ self.define("__FILE__ \"%s\"" % source)
+
+ self.source = source
+ chunk = []
+ enable = True
+ iftrigger = False
+ ifstack = []
+
+ for x in lines:
+ for i,tok in enumerate(x):
+ if tok.type not in self.t_WS: break
+ if tok.value == '#':
+ # Preprocessor directive
+
+ # insert necessary whitespace instead of eaten tokens
+ for tok in x:
+ if tok.type in self.t_WS and '\n' in tok.value:
+ chunk.append(tok)
+
+ dirtokens = self.tokenstrip(x[i+1:])
+ if dirtokens:
+ name = dirtokens[0].value
+ args = self.tokenstrip(dirtokens[1:])
+ else:
+ name = ""
+ args = []
+
+ if name == 'define':
+ if enable:
+ for tok in self.expand_macros(chunk):
+ yield tok
+ chunk = []
+ self.define(args)
+ elif name == 'include':
+ if enable:
+ for tok in self.expand_macros(chunk):
+ yield tok
+ chunk = []
+ oldfile = self.macros['__FILE__']
+ for tok in self.include(args):
+ yield tok
+ self.macros['__FILE__'] = oldfile
+ self.source = source
+ elif name == 'undef':
+ if enable:
+ for tok in self.expand_macros(chunk):
+ yield tok
+ chunk = []
+ self.undef(args)
+ elif name == 'ifdef':
+ ifstack.append((enable,iftrigger))
+ if enable:
+ if not args[0].value in self.macros:
+ enable = False
+ iftrigger = False
+ else:
+ iftrigger = True
+ elif name == 'ifndef':
+ ifstack.append((enable,iftrigger))
+ if enable:
+ if args[0].value in self.macros:
+ enable = False
+ iftrigger = False
+ else:
+ iftrigger = True
+ elif name == 'if':
+ ifstack.append((enable,iftrigger))
+ if enable:
+ result = self.evalexpr(args)
+ if not result:
+ enable = False
+ iftrigger = False
+ else:
+ iftrigger = True
+ elif name == 'elif':
+ if ifstack:
+ if ifstack[-1][0]: # We only pay attention if outer "if" allows this
+ if enable: # If already true, we flip enable False
+ enable = False
+ elif not iftrigger: # If False, but not triggered yet, we'll check expression
+ result = self.evalexpr(args)
+ if result:
+ enable = True
+ iftrigger = True
+ else:
+ self.error(self.source,dirtokens[0].lineno,"Misplaced #elif")
+
+ elif name == 'else':
+ if ifstack:
+ if ifstack[-1][0]:
+ if enable:
+ enable = False
+ elif not iftrigger:
+ enable = True
+ iftrigger = True
+ else:
+ self.error(self.source,dirtokens[0].lineno,"Misplaced #else")
+
+ elif name == 'endif':
+ if ifstack:
+ enable,iftrigger = ifstack.pop()
+ else:
+ self.error(self.source,dirtokens[0].lineno,"Misplaced #endif")
+ else:
+ # Unknown preprocessor directive
+ pass
+
+ else:
+ # Normal text
+ if enable:
+ chunk.extend(x)
+
+ for tok in self.expand_macros(chunk):
+ yield tok
+ chunk = []
+
+ # ----------------------------------------------------------------------
+ # include()
+ #
+ # Implementation of file-inclusion
+ # ----------------------------------------------------------------------
+
+ def include(self,tokens):
+ # Try to extract the filename and then process an include file
+ if not tokens:
+ return
+ if tokens:
+ if tokens[0].value != '<' and tokens[0].type != self.t_STRING:
+ tokens = self.expand_macros(tokens)
+
+ if tokens[0].value == '<':
+ # Include <...>
+ i = 1
+ while i < len(tokens):
+ if tokens[i].value == '>':
+ break
+ i += 1
+ else:
+ print("Malformed #include <...>")
+ return
+ filename = "".join([x.value for x in tokens[1:i]])
+ path = self.path + [""] + self.temp_path
+ elif tokens[0].type == self.t_STRING:
+ filename = tokens[0].value[1:-1]
+ path = self.temp_path + [""] + self.path
+ else:
+ print("Malformed #include statement")
+ return
+ for p in path:
+ iname = os.path.join(p,filename)
+ try:
+ data = open(iname,"r").read()
+ dname = os.path.dirname(iname)
+ if dname:
+ self.temp_path.insert(0,dname)
+ for tok in self.parsegen(data,filename):
+ yield tok
+ if dname:
+ del self.temp_path[0]
+ break
+ except IOError:
+ pass
+ else:
+ print("Couldn't find '%s'" % filename)
+
+ # ----------------------------------------------------------------------
+ # define()
+ #
+ # Define a new macro
+ # ----------------------------------------------------------------------
+
+ def define(self,tokens):
+ if isinstance(tokens,STRING_TYPES):
+ tokens = self.tokenize(tokens)
+
+ linetok = tokens
+ try:
+ name = linetok[0]
+ if len(linetok) > 1:
+ mtype = linetok[1]
+ else:
+ mtype = None
+ if not mtype:
+ m = Macro(name.value,[])
+ self.macros[name.value] = m
+ elif mtype.type in self.t_WS:
+ # A normal macro
+ m = Macro(name.value,self.tokenstrip(linetok[2:]))
+ self.macros[name.value] = m
+ elif mtype.value == '(':
+ # A macro with arguments
+ tokcount, args, positions = self.collect_args(linetok[1:])
+ variadic = False
+ for a in args:
+ if variadic:
+ print("No more arguments may follow a variadic argument")
+ break
+ astr = "".join([str(_i.value) for _i in a])
+ if astr == "...":
+ variadic = True
+ a[0].type = self.t_ID
+ a[0].value = '__VA_ARGS__'
+ variadic = True
+ del a[1:]
+ continue
+ elif astr[-3:] == "..." and a[0].type == self.t_ID:
+ variadic = True
+ del a[1:]
+ # If, for some reason, "." is part of the identifier, strip off the name for the purposes
+ # of macro expansion
+ if a[0].value[-3:] == '...':
+ a[0].value = a[0].value[:-3]
+ continue
+ if len(a) > 1 or a[0].type != self.t_ID:
+ print("Invalid macro argument")
+ break
+ else:
+ mvalue = self.tokenstrip(linetok[1+tokcount:])
+ i = 0
+ while i < len(mvalue):
+ if i+1 < len(mvalue):
+ if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##':
+ del mvalue[i]
+ continue
+ elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS:
+ del mvalue[i+1]
+ i += 1
+ m = Macro(name.value,mvalue,[x[0].value for x in args],variadic)
+ self.macro_prescan(m)
+ self.macros[name.value] = m
+ else:
+ print("Bad macro definition")
+ except LookupError:
+ print("Bad macro definition")
+
+ # ----------------------------------------------------------------------
+ # undef()
+ #
+ # Undefine a macro
+ # ----------------------------------------------------------------------
+
+ def undef(self,tokens):
+ id = tokens[0].value
+ try:
+ del self.macros[id]
+ except LookupError:
+ pass
+
+ # ----------------------------------------------------------------------
+ # parse()
+ #
+ # Parse input text.
+ # ----------------------------------------------------------------------
+ def parse(self,input,source=None,ignore={}):
+ self.ignore = ignore
+ self.parser = self.parsegen(input,source)
+
+ # ----------------------------------------------------------------------
+ # token()
+ #
+ # Method to return individual tokens
+ # ----------------------------------------------------------------------
+ def token(self):
+ try:
+ while True:
+ tok = next(self.parser)
+ if tok.type not in self.ignore: return tok
+ except StopIteration:
+ self.parser = None
+ return None
+
+if __name__ == '__main__':
+ import ply.lex as lex
+ lexer = lex.lex()
+
+ # Run a preprocessor
+ import sys
+ f = open(sys.argv[1])
+ input = f.read()
+
+ p = Preprocessor(lexer)
+ p.parse(input,sys.argv[1])
+ while True:
+ tok = p.token()
+ if not tok: break
+ print(p.source, tok)
+
+
+
+
+
+
+
+
+
+
+
diff --git a/third_party/python/ply/ply/ctokens.py b/third_party/python/ply/ply/ctokens.py
new file mode 100644
index 0000000000..f6f6952d60
--- /dev/null
+++ b/third_party/python/ply/ply/ctokens.py
@@ -0,0 +1,133 @@
+# ----------------------------------------------------------------------
+# ctokens.py
+#
+# Token specifications for symbols in ANSI C and C++. This file is
+# meant to be used as a library in other tokenizers.
+# ----------------------------------------------------------------------
+
+# Reserved words
+
+tokens = [
+ # Literals (identifier, integer constant, float constant, string constant, char const)
+ 'ID', 'TYPEID', 'INTEGER', 'FLOAT', 'STRING', 'CHARACTER',
+
+ # Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=)
+ 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO',
+ 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
+ 'LOR', 'LAND', 'LNOT',
+ 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
+
+ # Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=)
+ 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL',
+ 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL',
+
+ # Increment/decrement (++,--)
+ 'INCREMENT', 'DECREMENT',
+
+ # Structure dereference (->)
+ 'ARROW',
+
+ # Ternary operator (?)
+ 'TERNARY',
+
+ # Delimeters ( ) [ ] { } , . ; :
+ 'LPAREN', 'RPAREN',
+ 'LBRACKET', 'RBRACKET',
+ 'LBRACE', 'RBRACE',
+ 'COMMA', 'PERIOD', 'SEMI', 'COLON',
+
+ # Ellipsis (...)
+ 'ELLIPSIS',
+]
+
+# Operators
+t_PLUS = r'\+'
+t_MINUS = r'-'
+t_TIMES = r'\*'
+t_DIVIDE = r'/'
+t_MODULO = r'%'
+t_OR = r'\|'
+t_AND = r'&'
+t_NOT = r'~'
+t_XOR = r'\^'
+t_LSHIFT = r'<<'
+t_RSHIFT = r'>>'
+t_LOR = r'\|\|'
+t_LAND = r'&&'
+t_LNOT = r'!'
+t_LT = r'<'
+t_GT = r'>'
+t_LE = r'<='
+t_GE = r'>='
+t_EQ = r'=='
+t_NE = r'!='
+
+# Assignment operators
+
+t_EQUALS = r'='
+t_TIMESEQUAL = r'\*='
+t_DIVEQUAL = r'/='
+t_MODEQUAL = r'%='
+t_PLUSEQUAL = r'\+='
+t_MINUSEQUAL = r'-='
+t_LSHIFTEQUAL = r'<<='
+t_RSHIFTEQUAL = r'>>='
+t_ANDEQUAL = r'&='
+t_OREQUAL = r'\|='
+t_XOREQUAL = r'\^='
+
+# Increment/decrement
+t_INCREMENT = r'\+\+'
+t_DECREMENT = r'--'
+
+# ->
+t_ARROW = r'->'
+
+# ?
+t_TERNARY = r'\?'
+
+# Delimeters
+t_LPAREN = r'\('
+t_RPAREN = r'\)'
+t_LBRACKET = r'\['
+t_RBRACKET = r'\]'
+t_LBRACE = r'\{'
+t_RBRACE = r'\}'
+t_COMMA = r','
+t_PERIOD = r'\.'
+t_SEMI = r';'
+t_COLON = r':'
+t_ELLIPSIS = r'\.\.\.'
+
+# Identifiers
+t_ID = r'[A-Za-z_][A-Za-z0-9_]*'
+
+# Integer literal
+t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?'
+
+# Floating literal
+t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?'
+
+# String literal
+t_STRING = r'\"([^\\\n]|(\\.))*?\"'
+
+# Character constant 'c' or L'c'
+t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\''
+
+# Comment (C-Style)
+def t_COMMENT(t):
+ r'/\*(.|\n)*?\*/'
+ t.lexer.lineno += t.value.count('\n')
+ return t
+
+# Comment (C++-Style)
+def t_CPPCOMMENT(t):
+ r'//.*\n'
+ t.lexer.lineno += 1
+ return t
+
+
+
+
+
+
diff --git a/third_party/python/ply/ply/lex.py b/third_party/python/ply/ply/lex.py
new file mode 100644
index 0000000000..3e240d1aa2
--- /dev/null
+++ b/third_party/python/ply/ply/lex.py
@@ -0,0 +1,1100 @@
+# -----------------------------------------------------------------------------
+# ply: lex.py
+#
+# Copyright (C) 2001-2017
+# David M. Beazley (Dabeaz LLC)
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of the David Beazley or Dabeaz LLC may be used to
+# endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# -----------------------------------------------------------------------------
+
+__version__ = '3.10'
+__tabversion__ = '3.10'
+
+import re
+import sys
+import types
+import copy
+import os
+import inspect
+
+# This tuple contains known string types
+try:
+ # Python 2.6
+ StringTypes = (types.StringType, types.UnicodeType)
+except AttributeError:
+ # Python 3.0
+ StringTypes = (str, bytes)
+
+# This regular expression is used to match valid token names
+_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
+
+# Exception thrown when invalid token encountered and no default error
+# handler is defined.
+class LexError(Exception):
+ def __init__(self, message, s):
+ self.args = (message,)
+ self.text = s
+
+
+# Token class. This class is used to represent the tokens produced.
+class LexToken(object):
+ def __str__(self):
+ return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos)
+
+ def __repr__(self):
+ return str(self)
+
+
+# This object is a stand-in for a logging object created by the
+# logging module.
+
+class PlyLogger(object):
+ def __init__(self, f):
+ self.f = f
+
+ def critical(self, msg, *args, **kwargs):
+ self.f.write((msg % args) + '\n')
+
+ def warning(self, msg, *args, **kwargs):
+ self.f.write('WARNING: ' + (msg % args) + '\n')
+
+ def error(self, msg, *args, **kwargs):
+ self.f.write('ERROR: ' + (msg % args) + '\n')
+
+ info = critical
+ debug = critical
+
+
+# Null logger is used when no output is generated. Does nothing.
+class NullLogger(object):
+ def __getattribute__(self, name):
+ return self
+
+ def __call__(self, *args, **kwargs):
+ return self
+
+
+# -----------------------------------------------------------------------------
+# === Lexing Engine ===
+#
+# The following Lexer class implements the lexer runtime. There are only
+# a few public methods and attributes:
+#
+# input() - Store a new string in the lexer
+# token() - Get the next token
+# clone() - Clone the lexer
+#
+# lineno - Current line number
+# lexpos - Current position in the input string
+# -----------------------------------------------------------------------------
+
+class Lexer:
+ def __init__(self):
+ self.lexre = None # Master regular expression. This is a list of
+ # tuples (re, findex) where re is a compiled
+ # regular expression and findex is a list
+ # mapping regex group numbers to rules
+ self.lexretext = None # Current regular expression strings
+ self.lexstatere = {} # Dictionary mapping lexer states to master regexs
+ self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
+ self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
+ self.lexstate = 'INITIAL' # Current lexer state
+ self.lexstatestack = [] # Stack of lexer states
+ self.lexstateinfo = None # State information
+ self.lexstateignore = {} # Dictionary of ignored characters for each state
+ self.lexstateerrorf = {} # Dictionary of error functions for each state
+ self.lexstateeoff = {} # Dictionary of eof functions for each state
+ self.lexreflags = 0 # Optional re compile flags
+ self.lexdata = None # Actual input data (as a string)
+ self.lexpos = 0 # Current position in input text
+ self.lexlen = 0 # Length of the input text
+ self.lexerrorf = None # Error rule (if any)
+ self.lexeoff = None # EOF rule (if any)
+ self.lextokens = None # List of valid tokens
+ self.lexignore = '' # Ignored characters
+ self.lexliterals = '' # Literal characters that can be passed through
+ self.lexmodule = None # Module
+ self.lineno = 1 # Current line number
+ self.lexoptimize = False # Optimized mode
+
+ def clone(self, object=None):
+ c = copy.copy(self)
+
+ # If the object parameter has been supplied, it means we are attaching the
+ # lexer to a new object. In this case, we have to rebind all methods in
+ # the lexstatere and lexstateerrorf tables.
+
+ if object:
+ newtab = {}
+ for key, ritem in self.lexstatere.items():
+ newre = []
+ for cre, findex in ritem:
+ newfindex = []
+ for f in findex:
+ if not f or not f[0]:
+ newfindex.append(f)
+ continue
+ newfindex.append((getattr(object, f[0].__name__), f[1]))
+ newre.append((cre, newfindex))
+ newtab[key] = newre
+ c.lexstatere = newtab
+ c.lexstateerrorf = {}
+ for key, ef in self.lexstateerrorf.items():
+ c.lexstateerrorf[key] = getattr(object, ef.__name__)
+ c.lexmodule = object
+ return c
+
+ # ------------------------------------------------------------
+ # writetab() - Write lexer information to a table file
+ # ------------------------------------------------------------
+ def writetab(self, lextab, outputdir=''):
+ if isinstance(lextab, types.ModuleType):
+ raise IOError("Won't overwrite existing lextab module")
+ basetabmodule = lextab.split('.')[-1]
+ filename = os.path.join(outputdir, basetabmodule) + '.py'
+ with open(filename, 'w') as tf:
+ tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
+ tf.write('_tabversion = %s\n' % repr(__tabversion__))
+ tf.write('_lextokens = set(%s)\n' % repr(tuple(self.lextokens)))
+ tf.write('_lexreflags = %s\n' % repr(self.lexreflags))
+ tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
+ tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
+
+ # Rewrite the lexstatere table, replacing function objects with function names
+ tabre = {}
+ for statename, lre in self.lexstatere.items():
+ titem = []
+ for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
+ titem.append((retext, _funcs_to_names(func, renames)))
+ tabre[statename] = titem
+
+ tf.write('_lexstatere = %s\n' % repr(tabre))
+ tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
+
+ taberr = {}
+ for statename, ef in self.lexstateerrorf.items():
+ taberr[statename] = ef.__name__ if ef else None
+ tf.write('_lexstateerrorf = %s\n' % repr(taberr))
+
+ tabeof = {}
+ for statename, ef in self.lexstateeoff.items():
+ tabeof[statename] = ef.__name__ if ef else None
+ tf.write('_lexstateeoff = %s\n' % repr(tabeof))
+
+ # ------------------------------------------------------------
+ # readtab() - Read lexer information from a tab file
+ # ------------------------------------------------------------
+ def readtab(self, tabfile, fdict):
+ if isinstance(tabfile, types.ModuleType):
+ lextab = tabfile
+ else:
+ exec('import %s' % tabfile)
+ lextab = sys.modules[tabfile]
+
+ if getattr(lextab, '_tabversion', '0.0') != __tabversion__:
+ raise ImportError('Inconsistent PLY version')
+
+ self.lextokens = lextab._lextokens
+ self.lexreflags = lextab._lexreflags
+ self.lexliterals = lextab._lexliterals
+ self.lextokens_all = self.lextokens | set(self.lexliterals)
+ self.lexstateinfo = lextab._lexstateinfo
+ self.lexstateignore = lextab._lexstateignore
+ self.lexstatere = {}
+ self.lexstateretext = {}
+ for statename, lre in lextab._lexstatere.items():
+ titem = []
+ txtitem = []
+ for pat, func_name in lre:
+ titem.append((re.compile(pat, lextab._lexreflags), _names_to_funcs(func_name, fdict)))
+
+ self.lexstatere[statename] = titem
+ self.lexstateretext[statename] = txtitem
+
+ self.lexstateerrorf = {}
+ for statename, ef in lextab._lexstateerrorf.items():
+ self.lexstateerrorf[statename] = fdict[ef]
+
+ self.lexstateeoff = {}
+ for statename, ef in lextab._lexstateeoff.items():
+ self.lexstateeoff[statename] = fdict[ef]
+
+ self.begin('INITIAL')
+
+ # ------------------------------------------------------------
+ # input() - Push a new string into the lexer
+ # ------------------------------------------------------------
+ def input(self, s):
+ # Pull off the first character to see if s looks like a string
+ c = s[:1]
+ if not isinstance(c, StringTypes):
+ raise ValueError('Expected a string')
+ self.lexdata = s
+ self.lexpos = 0
+ self.lexlen = len(s)
+
+ # ------------------------------------------------------------
+ # begin() - Changes the lexing state
+ # ------------------------------------------------------------
+ def begin(self, state):
+ if state not in self.lexstatere:
+ raise ValueError('Undefined state')
+ self.lexre = self.lexstatere[state]
+ self.lexretext = self.lexstateretext[state]
+ self.lexignore = self.lexstateignore.get(state, '')
+ self.lexerrorf = self.lexstateerrorf.get(state, None)
+ self.lexeoff = self.lexstateeoff.get(state, None)
+ self.lexstate = state
+
+ # ------------------------------------------------------------
+ # push_state() - Changes the lexing state and saves old on stack
+ # ------------------------------------------------------------
+ def push_state(self, state):
+ self.lexstatestack.append(self.lexstate)
+ self.begin(state)
+
+ # ------------------------------------------------------------
+ # pop_state() - Restores the previous state
+ # ------------------------------------------------------------
+ def pop_state(self):
+ self.begin(self.lexstatestack.pop())
+
+ # ------------------------------------------------------------
+ # current_state() - Returns the current lexing state
+ # ------------------------------------------------------------
+ def current_state(self):
+ return self.lexstate
+
+ # ------------------------------------------------------------
+ # skip() - Skip ahead n characters
+ # ------------------------------------------------------------
+ def skip(self, n):
+ self.lexpos += n
+
+ # ------------------------------------------------------------
+ # opttoken() - Return the next token from the Lexer
+ #
+ # Note: This function has been carefully implemented to be as fast
+ # as possible. Don't make changes unless you really know what
+ # you are doing
+ # ------------------------------------------------------------
+ def token(self):
+ # Make local copies of frequently referenced attributes
+ lexpos = self.lexpos
+ lexlen = self.lexlen
+ lexignore = self.lexignore
+ lexdata = self.lexdata
+
+ while lexpos < lexlen:
+ # This code provides some short-circuit code for whitespace, tabs, and other ignored characters
+ if lexdata[lexpos] in lexignore:
+ lexpos += 1
+ continue
+
+ # Look for a regular expression match
+ for lexre, lexindexfunc in self.lexre:
+ m = lexre.match(lexdata, lexpos)
+ if not m:
+ continue
+
+ # Create a token for return
+ tok = LexToken()
+ tok.value = m.group()
+ tok.lineno = self.lineno
+ tok.lexpos = lexpos
+
+ i = m.lastindex
+ func, tok.type = lexindexfunc[i]
+
+ if not func:
+ # If no token type was set, it's an ignored token
+ if tok.type:
+ self.lexpos = m.end()
+ return tok
+ else:
+ lexpos = m.end()
+ break
+
+ lexpos = m.end()
+
+ # If token is processed by a function, call it
+
+ tok.lexer = self # Set additional attributes useful in token rules
+ self.lexmatch = m
+ self.lexpos = lexpos
+
+ newtok = func(tok)
+
+ # Every function must return a token, if nothing, we just move to next token
+ if not newtok:
+ lexpos = self.lexpos # This is here in case user has updated lexpos.
+ lexignore = self.lexignore # This is here in case there was a state change
+ break
+
+ # Verify type of the token. If not in the token map, raise an error
+ if not self.lexoptimize:
+ if newtok.type not in self.lextokens_all:
+ raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
+ func.__code__.co_filename, func.__code__.co_firstlineno,
+ func.__name__, newtok.type), lexdata[lexpos:])
+
+ return newtok
+ else:
+ # No match, see if in literals
+ if lexdata[lexpos] in self.lexliterals:
+ tok = LexToken()
+ tok.value = lexdata[lexpos]
+ tok.lineno = self.lineno
+ tok.type = tok.value
+ tok.lexpos = lexpos
+ self.lexpos = lexpos + 1
+ return tok
+
+ # No match. Call t_error() if defined.
+ if self.lexerrorf:
+ tok = LexToken()
+ tok.value = self.lexdata[lexpos:]
+ tok.lineno = self.lineno
+ tok.type = 'error'
+ tok.lexer = self
+ tok.lexpos = lexpos
+ self.lexpos = lexpos
+ newtok = self.lexerrorf(tok)
+ if lexpos == self.lexpos:
+ # Error method didn't change text position at all. This is an error.
+ raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
+ lexpos = self.lexpos
+ if not newtok:
+ continue
+ return newtok
+
+ self.lexpos = lexpos
+ raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
+
+ if self.lexeoff:
+ tok = LexToken()
+ tok.type = 'eof'
+ tok.value = ''
+ tok.lineno = self.lineno
+ tok.lexpos = lexpos
+ tok.lexer = self
+ self.lexpos = lexpos
+ newtok = self.lexeoff(tok)
+ return newtok
+
+ self.lexpos = lexpos + 1
+ if self.lexdata is None:
+ raise RuntimeError('No input string given with input()')
+ return None
+
+ # Iterator interface
+ def __iter__(self):
+ return self
+
+ def next(self):
+ t = self.token()
+ if t is None:
+ raise StopIteration
+ return t
+
+ __next__ = next
+
+# -----------------------------------------------------------------------------
+# ==== Lex Builder ===
+#
+# The functions and classes below are used to collect lexing information
+# and build a Lexer object from it.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# _get_regex(func)
+#
+# Returns the regular expression assigned to a function either as a doc string
+# or as a .regex attribute attached by the @TOKEN decorator.
+# -----------------------------------------------------------------------------
+def _get_regex(func):
+ return getattr(func, 'regex', func.__doc__)
+
+# -----------------------------------------------------------------------------
+# get_caller_module_dict()
+#
+# This function returns a dictionary containing all of the symbols defined within
+# a caller further down the call stack. This is used to get the environment
+# associated with the yacc() call if none was provided.
+# -----------------------------------------------------------------------------
+def get_caller_module_dict(levels):
+ f = sys._getframe(levels)
+ ldict = f.f_globals.copy()
+ if f.f_globals != f.f_locals:
+ ldict.update(f.f_locals)
+ return ldict
+
+# -----------------------------------------------------------------------------
+# _funcs_to_names()
+#
+# Given a list of regular expression functions, this converts it to a list
+# suitable for output to a table file
+# -----------------------------------------------------------------------------
+def _funcs_to_names(funclist, namelist):
+ result = []
+ for f, name in zip(funclist, namelist):
+ if f and f[0]:
+ result.append((name, f[1]))
+ else:
+ result.append(f)
+ return result
+
+# -----------------------------------------------------------------------------
+# _names_to_funcs()
+#
+# Given a list of regular expression function names, this converts it back to
+# functions.
+# -----------------------------------------------------------------------------
+def _names_to_funcs(namelist, fdict):
+ result = []
+ for n in namelist:
+ if n and n[0]:
+ result.append((fdict[n[0]], n[1]))
+ else:
+ result.append(n)
+ return result
+
+# -----------------------------------------------------------------------------
+# _form_master_re()
+#
+# This function takes a list of all of the regex components and attempts to
+# form the master regular expression. Given limitations in the Python re
+# module, it may be necessary to break the master regex into separate expressions.
+# -----------------------------------------------------------------------------
+def _form_master_re(relist, reflags, ldict, toknames):
+ if not relist:
+ return []
+ regex = '|'.join(relist)
+ try:
+ lexre = re.compile(regex, reflags)
+
+ # Build the index to function map for the matching engine
+ lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
+ lexindexnames = lexindexfunc[:]
+
+ for f, i in lexre.groupindex.items():
+ handle = ldict.get(f, None)
+ if type(handle) in (types.FunctionType, types.MethodType):
+ lexindexfunc[i] = (handle, toknames[f])
+ lexindexnames[i] = f
+ elif handle is not None:
+ lexindexnames[i] = f
+ if f.find('ignore_') > 0:
+ lexindexfunc[i] = (None, None)
+ else:
+ lexindexfunc[i] = (None, toknames[f])
+
+ return [(lexre, lexindexfunc)], [regex], [lexindexnames]
+ except Exception:
+ m = int(len(relist)/2)
+ if m == 0:
+ m = 1
+ llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
+ rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
+ return (llist+rlist), (lre+rre), (lnames+rnames)
+
+# -----------------------------------------------------------------------------
+# def _statetoken(s,names)
+#
+# Given a declaration name s of the form "t_" and a dictionary whose keys are
+# state names, this function returns a tuple (states,tokenname) where states
+# is a tuple of state names and tokenname is the name of the token. For example,
+# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
+# -----------------------------------------------------------------------------
+def _statetoken(s, names):
+ nonstate = 1
+ parts = s.split('_')
+ for i, part in enumerate(parts[1:], 1):
+ if part not in names and part != 'ANY':
+ break
+
+ if i > 1:
+ states = tuple(parts[1:i])
+ else:
+ states = ('INITIAL',)
+
+ if 'ANY' in states:
+ states = tuple(names)
+
+ tokenname = '_'.join(parts[i:])
+ return (states, tokenname)
+
+
+# -----------------------------------------------------------------------------
+# LexerReflect()
+#
+# This class represents information needed to build a lexer as extracted from a
+# user's input file.
+# -----------------------------------------------------------------------------
+class LexerReflect(object):
+ def __init__(self, ldict, log=None, reflags=0):
+ self.ldict = ldict
+ self.error_func = None
+ self.tokens = []
+ self.reflags = reflags
+ self.stateinfo = {'INITIAL': 'inclusive'}
+ self.modules = set()
+ self.error = False
+ self.log = PlyLogger(sys.stderr) if log is None else log
+
+ # Get all of the basic information
+ def get_all(self):
+ self.get_tokens()
+ self.get_literals()
+ self.get_states()
+ self.get_rules()
+
+ # Validate all of the information
+ def validate_all(self):
+ self.validate_tokens()
+ self.validate_literals()
+ self.validate_rules()
+ return self.error
+
+ # Get the tokens map
+ def get_tokens(self):
+ tokens = self.ldict.get('tokens', None)
+ if not tokens:
+ self.log.error('No token list is defined')
+ self.error = True
+ return
+
+ if not isinstance(tokens, (list, tuple)):
+ self.log.error('tokens must be a list or tuple')
+ self.error = True
+ return
+
+ if not tokens:
+ self.log.error('tokens is empty')
+ self.error = True
+ return
+
+ self.tokens = tokens
+
+ # Validate the tokens
+ def validate_tokens(self):
+ terminals = {}
+ for n in self.tokens:
+ if not _is_identifier.match(n):
+ self.log.error("Bad token name '%s'", n)
+ self.error = True
+ if n in terminals:
+ self.log.warning("Token '%s' multiply defined", n)
+ terminals[n] = 1
+
+ # Get the literals specifier
+ def get_literals(self):
+ self.literals = self.ldict.get('literals', '')
+ if not self.literals:
+ self.literals = ''
+
+ # Validate literals
+ def validate_literals(self):
+ try:
+ for c in self.literals:
+ if not isinstance(c, StringTypes) or len(c) > 1:
+ self.log.error('Invalid literal %s. Must be a single character', repr(c))
+ self.error = True
+
+ except TypeError:
+ self.log.error('Invalid literals specification. literals must be a sequence of characters')
+ self.error = True
+
+ def get_states(self):
+ self.states = self.ldict.get('states', None)
+ # Build statemap
+ if self.states:
+ if not isinstance(self.states, (tuple, list)):
+ self.log.error('states must be defined as a tuple or list')
+ self.error = True
+ else:
+ for s in self.states:
+ if not isinstance(s, tuple) or len(s) != 2:
+ self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s))
+ self.error = True
+ continue
+ name, statetype = s
+ if not isinstance(name, StringTypes):
+ self.log.error('State name %s must be a string', repr(name))
+ self.error = True
+ continue
+ if not (statetype == 'inclusive' or statetype == 'exclusive'):
+ self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name)
+ self.error = True
+ continue
+ if name in self.stateinfo:
+ self.log.error("State '%s' already defined", name)
+ self.error = True
+ continue
+ self.stateinfo[name] = statetype
+
+ # Get all of the symbols with a t_ prefix and sort them into various
+ # categories (functions, strings, error functions, and ignore characters)
+
+ def get_rules(self):
+ tsymbols = [f for f in self.ldict if f[:2] == 't_']
+
+ # Now build up a list of functions and a list of strings
+ self.toknames = {} # Mapping of symbols to token names
+ self.funcsym = {} # Symbols defined as functions
+ self.strsym = {} # Symbols defined as strings
+ self.ignore = {} # Ignore strings by state
+ self.errorf = {} # Error functions by state
+ self.eoff = {} # EOF functions by state
+
+ for s in self.stateinfo:
+ self.funcsym[s] = []
+ self.strsym[s] = []
+
+ if len(tsymbols) == 0:
+ self.log.error('No rules of the form t_rulename are defined')
+ self.error = True
+ return
+
+ for f in tsymbols:
+ t = self.ldict[f]
+ states, tokname = _statetoken(f, self.stateinfo)
+ self.toknames[f] = tokname
+
+ if hasattr(t, '__call__'):
+ if tokname == 'error':
+ for s in states:
+ self.errorf[s] = t
+ elif tokname == 'eof':
+ for s in states:
+ self.eoff[s] = t
+ elif tokname == 'ignore':
+ line = t.__code__.co_firstlineno
+ file = t.__code__.co_filename
+ self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
+ self.error = True
+ else:
+ for s in states:
+ self.funcsym[s].append((f, t))
+ elif isinstance(t, StringTypes):
+ if tokname == 'ignore':
+ for s in states:
+ self.ignore[s] = t
+ if '\\' in t:
+ self.log.warning("%s contains a literal backslash '\\'", f)
+
+ elif tokname == 'error':
+ self.log.error("Rule '%s' must be defined as a function", f)
+ self.error = True
+ else:
+ for s in states:
+ self.strsym[s].append((f, t))
+ else:
+ self.log.error('%s not defined as a function or string', f)
+ self.error = True
+
+ # Sort the functions by line number
+ for f in self.funcsym.values():
+ f.sort(key=lambda x: x[1].__code__.co_firstlineno)
+
+ # Sort the strings by regular expression length
+ for s in self.strsym.values():
+ s.sort(key=lambda x: len(x[1]), reverse=True)
+
+ # Validate all of the t_rules collected
+ def validate_rules(self):
+ for state in self.stateinfo:
+ # Validate all rules defined by functions
+
+ for fname, f in self.funcsym[state]:
+ line = f.__code__.co_firstlineno
+ file = f.__code__.co_filename
+ module = inspect.getmodule(f)
+ self.modules.add(module)
+
+ tokname = self.toknames[fname]
+ if isinstance(f, types.MethodType):
+ reqargs = 2
+ else:
+ reqargs = 1
+ nargs = f.__code__.co_argcount
+ if nargs > reqargs:
+ self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
+ self.error = True
+ continue
+
+ if nargs < reqargs:
+ self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
+ self.error = True
+ continue
+
+ if not _get_regex(f):
+ self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__)
+ self.error = True
+ continue
+
+ try:
+ c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags)
+ if c.match(''):
+ self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
+ self.error = True
+ except re.error as e:
+ self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
+ if '#' in _get_regex(f):
+ self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__)
+ self.error = True
+
+ # Validate all rules defined by strings
+ for name, r in self.strsym[state]:
+ tokname = self.toknames[name]
+ if tokname == 'error':
+ self.log.error("Rule '%s' must be defined as a function", name)
+ self.error = True
+ continue
+
+ if tokname not in self.tokens and tokname.find('ignore_') < 0:
+ self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname)
+ self.error = True
+ continue
+
+ try:
+ c = re.compile('(?P<%s>%s)' % (name, r), self.reflags)
+ if (c.match('')):
+ self.log.error("Regular expression for rule '%s' matches empty string", name)
+ self.error = True
+ except re.error as e:
+ self.log.error("Invalid regular expression for rule '%s'. %s", name, e)
+ if '#' in r:
+ self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name)
+ self.error = True
+
+ if not self.funcsym[state] and not self.strsym[state]:
+ self.log.error("No rules defined for state '%s'", state)
+ self.error = True
+
+ # Validate the error function
+ efunc = self.errorf.get(state, None)
+ if efunc:
+ f = efunc
+ line = f.__code__.co_firstlineno
+ file = f.__code__.co_filename
+ module = inspect.getmodule(f)
+ self.modules.add(module)
+
+ if isinstance(f, types.MethodType):
+ reqargs = 2
+ else:
+ reqargs = 1
+ nargs = f.__code__.co_argcount
+ if nargs > reqargs:
+ self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
+ self.error = True
+
+ if nargs < reqargs:
+ self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
+ self.error = True
+
+ for module in self.modules:
+ self.validate_module(module)
+
+ # -----------------------------------------------------------------------------
+ # validate_module()
+ #
+ # This checks to see if there are duplicated t_rulename() functions or strings
+ # in the parser input file. This is done using a simple regular expression
+ # match on each line in the source code of the given module.
+ # -----------------------------------------------------------------------------
+
+ def validate_module(self, module):
+ try:
+ lines, linen = inspect.getsourcelines(module)
+ except IOError:
+ return
+
+ fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
+ sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
+
+ counthash = {}
+ linen += 1
+ for line in lines:
+ m = fre.match(line)
+ if not m:
+ m = sre.match(line)
+ if m:
+ name = m.group(1)
+ prev = counthash.get(name)
+ if not prev:
+ counthash[name] = linen
+ else:
+ filename = inspect.getsourcefile(module)
+ self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
+ self.error = True
+ linen += 1
+
+# -----------------------------------------------------------------------------
+# lex(module)
+#
+# Build all of the regular expression rules from definitions in the supplied module
+# -----------------------------------------------------------------------------
+def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
+ reflags=int(re.VERBOSE), nowarn=False, outputdir=None, debuglog=None, errorlog=None):
+
+ if lextab is None:
+ lextab = 'lextab'
+
+ global lexer
+
+ ldict = None
+ stateinfo = {'INITIAL': 'inclusive'}
+ lexobj = Lexer()
+ lexobj.lexoptimize = optimize
+ global token, input
+
+ if errorlog is None:
+ errorlog = PlyLogger(sys.stderr)
+
+ if debug:
+ if debuglog is None:
+ debuglog = PlyLogger(sys.stderr)
+
+ # Get the module dictionary used for the lexer
+ if object:
+ module = object
+
+ # Get the module dictionary used for the parser
+ if module:
+ _items = [(k, getattr(module, k)) for k in dir(module)]
+ ldict = dict(_items)
+ # If no __file__ attribute is available, try to obtain it from the __module__ instead
+ if '__file__' not in ldict:
+ ldict['__file__'] = sys.modules[ldict['__module__']].__file__
+ else:
+ ldict = get_caller_module_dict(2)
+
+ # Determine if the module is package of a package or not.
+ # If so, fix the tabmodule setting so that tables load correctly
+ pkg = ldict.get('__package__')
+ if pkg and isinstance(lextab, str):
+ if '.' not in lextab:
+ lextab = pkg + '.' + lextab
+
+ # Collect parser information from the dictionary
+ linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
+ linfo.get_all()
+ if not optimize:
+ if linfo.validate_all():
+ raise SyntaxError("Can't build lexer")
+
+ if optimize and lextab:
+ try:
+ lexobj.readtab(lextab, ldict)
+ token = lexobj.token
+ input = lexobj.input
+ lexer = lexobj
+ return lexobj
+
+ except ImportError:
+ pass
+
+ # Dump some basic debugging information
+ if debug:
+ debuglog.info('lex: tokens = %r', linfo.tokens)
+ debuglog.info('lex: literals = %r', linfo.literals)
+ debuglog.info('lex: states = %r', linfo.stateinfo)
+
+ # Build a dictionary of valid token names
+ lexobj.lextokens = set()
+ for n in linfo.tokens:
+ lexobj.lextokens.add(n)
+
+ # Get literals specification
+ if isinstance(linfo.literals, (list, tuple)):
+ lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
+ else:
+ lexobj.lexliterals = linfo.literals
+
+ lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
+
+ # Get the stateinfo dictionary
+ stateinfo = linfo.stateinfo
+
+ regexs = {}
+ # Build the master regular expressions
+ for state in stateinfo:
+ regex_list = []
+
+ # Add rules defined by functions first
+ for fname, f in linfo.funcsym[state]:
+ line = f.__code__.co_firstlineno
+ file = f.__code__.co_filename
+ regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
+ if debug:
+ debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
+
+ # Now add all of the simple rules
+ for name, r in linfo.strsym[state]:
+ regex_list.append('(?P<%s>%s)' % (name, r))
+ if debug:
+ debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
+
+ regexs[state] = regex_list
+
+ # Build the master regular expressions
+
+ if debug:
+ debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
+
+ for state in regexs:
+ lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
+ lexobj.lexstatere[state] = lexre
+ lexobj.lexstateretext[state] = re_text
+ lexobj.lexstaterenames[state] = re_names
+ if debug:
+ for i, text in enumerate(re_text):
+ debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
+
+ # For inclusive states, we need to add the regular expressions from the INITIAL state
+ for state, stype in stateinfo.items():
+ if state != 'INITIAL' and stype == 'inclusive':
+ lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
+ lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
+ lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
+
+ lexobj.lexstateinfo = stateinfo
+ lexobj.lexre = lexobj.lexstatere['INITIAL']
+ lexobj.lexretext = lexobj.lexstateretext['INITIAL']
+ lexobj.lexreflags = reflags
+
+ # Set up ignore variables
+ lexobj.lexstateignore = linfo.ignore
+ lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
+
+ # Set up error functions
+ lexobj.lexstateerrorf = linfo.errorf
+ lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
+ if not lexobj.lexerrorf:
+ errorlog.warning('No t_error rule is defined')
+
+ # Set up eof functions
+ lexobj.lexstateeoff = linfo.eoff
+ lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
+
+ # Check state information for ignore and error rules
+ for s, stype in stateinfo.items():
+ if stype == 'exclusive':
+ if s not in linfo.errorf:
+ errorlog.warning("No error rule is defined for exclusive state '%s'", s)
+ if s not in linfo.ignore and lexobj.lexignore:
+ errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
+ elif stype == 'inclusive':
+ if s not in linfo.errorf:
+ linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
+ if s not in linfo.ignore:
+ linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
+
+ # Create global versions of the token() and input() functions
+ token = lexobj.token
+ input = lexobj.input
+ lexer = lexobj
+
+ # If in optimize mode, we write the lextab
+ if lextab and optimize:
+ if outputdir is None:
+ # If no output directory is set, the location of the output files
+ # is determined according to the following rules:
+ # - If lextab specifies a package, files go into that package directory
+ # - Otherwise, files go in the same directory as the specifying module
+ if isinstance(lextab, types.ModuleType):
+ srcfile = lextab.__file__
+ else:
+ if '.' not in lextab:
+ srcfile = ldict['__file__']
+ else:
+ parts = lextab.split('.')
+ pkgname = '.'.join(parts[:-1])
+ exec('import %s' % pkgname)
+ srcfile = getattr(sys.modules[pkgname], '__file__', '')
+ outputdir = os.path.dirname(srcfile)
+ try:
+ lexobj.writetab(lextab, outputdir)
+ except IOError as e:
+ errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
+
+ return lexobj
+
+# -----------------------------------------------------------------------------
+# runmain()
+#
+# This runs the lexer as a main program
+# -----------------------------------------------------------------------------
+
+def runmain(lexer=None, data=None):
+ if not data:
+ try:
+ filename = sys.argv[1]
+ f = open(filename)
+ data = f.read()
+ f.close()
+ except IndexError:
+ sys.stdout.write('Reading from standard input (type EOF to end):\n')
+ data = sys.stdin.read()
+
+ if lexer:
+ _input = lexer.input
+ else:
+ _input = input
+ _input(data)
+ if lexer:
+ _token = lexer.token
+ else:
+ _token = token
+
+ while True:
+ tok = _token()
+ if not tok:
+ break
+ sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos))
+
+# -----------------------------------------------------------------------------
+# @TOKEN(regex)
+#
+# This decorator function can be used to set the regex expression on a function
+# when its docstring might need to be set in an alternative way
+# -----------------------------------------------------------------------------
+
+def TOKEN(r):
+ def set_regex(f):
+ if hasattr(r, '__call__'):
+ f.regex = _get_regex(r)
+ else:
+ f.regex = r
+ return f
+ return set_regex
+
+# Alternative spelling of the TOKEN decorator
+Token = TOKEN
+
diff --git a/third_party/python/ply/ply/yacc.py b/third_party/python/ply/ply/yacc.py
new file mode 100644
index 0000000000..03bd86ee07
--- /dev/null
+++ b/third_party/python/ply/ply/yacc.py
@@ -0,0 +1,3494 @@
+# -----------------------------------------------------------------------------
+# ply: yacc.py
+#
+# Copyright (C) 2001-2017
+# David M. Beazley (Dabeaz LLC)
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of the David Beazley or Dabeaz LLC may be used to
+# endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# -----------------------------------------------------------------------------
+#
+# This implements an LR parser that is constructed from grammar rules defined
+# as Python functions. The grammer is specified by supplying the BNF inside
+# Python documentation strings. The inspiration for this technique was borrowed
+# from John Aycock's Spark parsing system. PLY might be viewed as cross between
+# Spark and the GNU bison utility.
+#
+# The current implementation is only somewhat object-oriented. The
+# LR parser itself is defined in terms of an object (which allows multiple
+# parsers to co-exist). However, most of the variables used during table
+# construction are defined in terms of global variables. Users shouldn't
+# notice unless they are trying to define multiple parsers at the same
+# time using threads (in which case they should have their head examined).
+#
+# This implementation supports both SLR and LALR(1) parsing. LALR(1)
+# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
+# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
+# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
+# by the more efficient DeRemer and Pennello algorithm.
+#
+# :::::::: WARNING :::::::
+#
+# Construction of LR parsing tables is fairly complicated and expensive.
+# To make this module run fast, a *LOT* of work has been put into
+# optimization---often at the expensive of readability and what might
+# consider to be good Python "coding style." Modify the code at your
+# own risk!
+# ----------------------------------------------------------------------------
+
+import re
+import types
+import sys
+import os.path
+import inspect
+import base64
+import warnings
+
+__version__ = '3.10'
+__tabversion__ = '3.10'
+
+#-----------------------------------------------------------------------------
+# === User configurable parameters ===
+#
+# Change these to modify the default behavior of yacc (if you wish)
+#-----------------------------------------------------------------------------
+
+yaccdebug = True # Debugging mode. If set, yacc generates a
+ # a 'parser.out' file in the current directory
+
+debug_file = 'parser.out' # Default name of the debugging file
+tab_module = 'parsetab' # Default name of the table module
+default_lr = 'LALR' # Default LR table generation method
+
+error_count = 3 # Number of symbols that must be shifted to leave recovery mode
+
+yaccdevel = False # Set to True if developing yacc. This turns off optimized
+ # implementations of certain functions.
+
+resultlimit = 40 # Size limit of results when running in debug mode.
+
+pickle_protocol = 0 # Protocol to use when writing pickle files
+
+# String type-checking compatibility
+if sys.version_info[0] < 3:
+ string_types = basestring
+else:
+ string_types = str
+
+MAXINT = sys.maxsize
+
+# This object is a stand-in for a logging object created by the
+# logging module. PLY will use this by default to create things
+# such as the parser.out file. If a user wants more detailed
+# information, they can create their own logging object and pass
+# it into PLY.
+
+class PlyLogger(object):
+ def __init__(self, f):
+ self.f = f
+
+ def debug(self, msg, *args, **kwargs):
+ self.f.write((msg % args) + '\n')
+
+ info = debug
+
+ def warning(self, msg, *args, **kwargs):
+ self.f.write('WARNING: ' + (msg % args) + '\n')
+
+ def error(self, msg, *args, **kwargs):
+ self.f.write('ERROR: ' + (msg % args) + '\n')
+
+ critical = debug
+
+# Null logger is used when no output is generated. Does nothing.
+class NullLogger(object):
+ def __getattribute__(self, name):
+ return self
+
+ def __call__(self, *args, **kwargs):
+ return self
+
+# Exception raised for yacc-related errors
+class YaccError(Exception):
+ pass
+
+# Format the result message that the parser produces when running in debug mode.
+def format_result(r):
+ repr_str = repr(r)
+ if '\n' in repr_str:
+ repr_str = repr(repr_str)
+ if len(repr_str) > resultlimit:
+ repr_str = repr_str[:resultlimit] + ' ...'
+ result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str)
+ return result
+
+# Format stack entries when the parser is running in debug mode
+def format_stack_entry(r):
+ repr_str = repr(r)
+ if '\n' in repr_str:
+ repr_str = repr(repr_str)
+ if len(repr_str) < 16:
+ return repr_str
+ else:
+ return '<%s @ 0x%x>' % (type(r).__name__, id(r))
+
+# Panic mode error recovery support. This feature is being reworked--much of the
+# code here is to offer a deprecation/backwards compatible transition
+
+_errok = None
+_token = None
+_restart = None
+_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error().
+Instead, invoke the methods on the associated parser instance:
+
+ def p_error(p):
+ ...
+ # Use parser.errok(), parser.token(), parser.restart()
+ ...
+
+ parser = yacc.yacc()
+'''
+
+def errok():
+ warnings.warn(_warnmsg)
+ return _errok()
+
+def restart():
+ warnings.warn(_warnmsg)
+ return _restart()
+
+def token():
+ warnings.warn(_warnmsg)
+ return _token()
+
+# Utility function to call the p_error() function with some deprecation hacks
+def call_errorfunc(errorfunc, token, parser):
+ global _errok, _token, _restart
+ _errok = parser.errok
+ _token = parser.token
+ _restart = parser.restart
+ r = errorfunc(token)
+ try:
+ del _errok, _token, _restart
+ except NameError:
+ pass
+ return r
+
+#-----------------------------------------------------------------------------
+# === LR Parsing Engine ===
+#
+# The following classes are used for the LR parser itself. These are not
+# used during table construction and are independent of the actual LR
+# table generation algorithm
+#-----------------------------------------------------------------------------
+
+# This class is used to hold non-terminal grammar symbols during parsing.
+# It normally has the following attributes set:
+# .type = Grammar symbol type
+# .value = Symbol value
+# .lineno = Starting line number
+# .endlineno = Ending line number (optional, set automatically)
+# .lexpos = Starting lex position
+# .endlexpos = Ending lex position (optional, set automatically)
+
+class YaccSymbol:
+ def __str__(self):
+ return self.type
+
+ def __repr__(self):
+ return str(self)
+
+# This class is a wrapper around the objects actually passed to each
+# grammar rule. Index lookup and assignment actually assign the
+# .value attribute of the underlying YaccSymbol object.
+# The lineno() method returns the line number of a given
+# item (or 0 if not defined). The linespan() method returns
+# a tuple of (startline,endline) representing the range of lines
+# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
+# representing the range of positional information for a symbol.
+
+class YaccProduction:
+ def __init__(self, s, stack=None):
+ self.slice = s
+ self.stack = stack
+ self.lexer = None
+ self.parser = None
+
+ def __getitem__(self, n):
+ if isinstance(n, slice):
+ return [s.value for s in self.slice[n]]
+ elif n >= 0:
+ return self.slice[n].value
+ else:
+ return self.stack[n].value
+
+ def __setitem__(self, n, v):
+ self.slice[n].value = v
+
+ def __getslice__(self, i, j):
+ return [s.value for s in self.slice[i:j]]
+
+ def __len__(self):
+ return len(self.slice)
+
+ def lineno(self, n):
+ return getattr(self.slice[n], 'lineno', 0)
+
+ def set_lineno(self, n, lineno):
+ self.slice[n].lineno = lineno
+
+ def linespan(self, n):
+ startline = getattr(self.slice[n], 'lineno', 0)
+ endline = getattr(self.slice[n], 'endlineno', startline)
+ return startline, endline
+
+ def lexpos(self, n):
+ return getattr(self.slice[n], 'lexpos', 0)
+
+ def lexspan(self, n):
+ startpos = getattr(self.slice[n], 'lexpos', 0)
+ endpos = getattr(self.slice[n], 'endlexpos', startpos)
+ return startpos, endpos
+
+ def error(self):
+ raise SyntaxError
+
+# -----------------------------------------------------------------------------
+# == LRParser ==
+#
+# The LR Parsing engine.
+# -----------------------------------------------------------------------------
+
+class LRParser:
+ def __init__(self, lrtab, errorf):
+ self.productions = lrtab.lr_productions
+ self.action = lrtab.lr_action
+ self.goto = lrtab.lr_goto
+ self.errorfunc = errorf
+ self.set_defaulted_states()
+ self.errorok = True
+
+ def errok(self):
+ self.errorok = True
+
+ def restart(self):
+ del self.statestack[:]
+ del self.symstack[:]
+ sym = YaccSymbol()
+ sym.type = '$end'
+ self.symstack.append(sym)
+ self.statestack.append(0)
+
+ # Defaulted state support.
+ # This method identifies parser states where there is only one possible reduction action.
+ # For such states, the parser can make a choose to make a rule reduction without consuming
+ # the next look-ahead token. This delayed invocation of the tokenizer can be useful in
+ # certain kinds of advanced parsing situations where the lexer and parser interact with
+ # each other or change states (i.e., manipulation of scope, lexer states, etc.).
+ #
+ # See: http://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
+ def set_defaulted_states(self):
+ self.defaulted_states = {}
+ for state, actions in self.action.items():
+ rules = list(actions.values())
+ if len(rules) == 1 and rules[0] < 0:
+ self.defaulted_states[state] = rules[0]
+
+ def disable_defaulted_states(self):
+ self.defaulted_states = {}
+
+ def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
+ if debug or yaccdevel:
+ if isinstance(debug, int):
+ debug = PlyLogger(sys.stderr)
+ return self.parsedebug(input, lexer, debug, tracking, tokenfunc)
+ elif tracking:
+ return self.parseopt(input, lexer, debug, tracking, tokenfunc)
+ else:
+ return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
+
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # parsedebug().
+ #
+ # This is the debugging enabled version of parse(). All changes made to the
+ # parsing engine should be made here. Optimized versions of this function
+ # are automatically created by the ply/ygen.py script. This script cuts out
+ # sections enclosed in markers such as this:
+ #
+ # #--! DEBUG
+ # statements
+ # #--! DEBUG
+ #
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
+ #--! parsedebug-start
+ lookahead = None # Current lookahead symbol
+ lookaheadstack = [] # Stack of lookahead symbols
+ actions = self.action # Local reference to action table (to avoid lookup on self.)
+ goto = self.goto # Local reference to goto table (to avoid lookup on self.)
+ prod = self.productions # Local reference to production list (to avoid lookup on self.)
+ defaulted_states = self.defaulted_states # Local reference to defaulted states
+ pslice = YaccProduction(None) # Production object passed to grammar rules
+ errorcount = 0 # Used during error recovery
+
+ #--! DEBUG
+ debug.info('PLY: PARSE DEBUG START')
+ #--! DEBUG
+
+ # If no lexer was given, we will try to use the lex module
+ if not lexer:
+ from . import lex
+ lexer = lex.lexer
+
+ # Set up the lexer and parser objects on pslice
+ pslice.lexer = lexer
+ pslice.parser = self
+
+ # If input was supplied, pass to lexer
+ if input is not None:
+ lexer.input(input)
+
+ if tokenfunc is None:
+ # Tokenize function
+ get_token = lexer.token
+ else:
+ get_token = tokenfunc
+
+ # Set the parser() token method (sometimes used in error recovery)
+ self.token = get_token
+
+ # Set up the state and symbol stacks
+
+ statestack = [] # Stack of parsing states
+ self.statestack = statestack
+ symstack = [] # Stack of grammar symbols
+ self.symstack = symstack
+
+ pslice.stack = symstack # Put in the production
+ errtoken = None # Err token
+
+ # The start state is assumed to be (0,$end)
+
+ statestack.append(0)
+ sym = YaccSymbol()
+ sym.type = '$end'
+ symstack.append(sym)
+ state = 0
+ while True:
+ # Get the next symbol on the input. If a lookahead symbol
+ # is already set, we just use that. Otherwise, we'll pull
+ # the next token off of the lookaheadstack or from the lexer
+
+ #--! DEBUG
+ debug.debug('')
+ debug.debug('State : %s', state)
+ #--! DEBUG
+
+ if state not in defaulted_states:
+ if not lookahead:
+ if not lookaheadstack:
+ lookahead = get_token() # Get the next token
+ else:
+ lookahead = lookaheadstack.pop()
+ if not lookahead:
+ lookahead = YaccSymbol()
+ lookahead.type = '$end'
+
+ # Check the action table
+ ltype = lookahead.type
+ t = actions[state].get(ltype)
+ else:
+ t = defaulted_states[state]
+ #--! DEBUG
+ debug.debug('Defaulted state %s: Reduce using %d', state, -t)
+ #--! DEBUG
+
+ #--! DEBUG
+ debug.debug('Stack : %s',
+ ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
+ #--! DEBUG
+
+ if t is not None:
+ if t > 0:
+ # shift a symbol on the stack
+ statestack.append(t)
+ state = t
+
+ #--! DEBUG
+ debug.debug('Action : Shift and goto state %s', t)
+ #--! DEBUG
+
+ symstack.append(lookahead)
+ lookahead = None
+
+ # Decrease error count on successful shift
+ if errorcount:
+ errorcount -= 1
+ continue
+
+ if t < 0:
+ # reduce a symbol on the stack, emit a production
+ p = prod[-t]
+ pname = p.name
+ plen = p.len
+
+ # Get production function
+ sym = YaccSymbol()
+ sym.type = pname # Production name
+ sym.value = None
+
+ #--! DEBUG
+ if plen:
+ debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str,
+ '['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']',
+ goto[statestack[-1-plen]][pname])
+ else:
+ debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [],
+ goto[statestack[-1]][pname])
+
+ #--! DEBUG
+
+ if plen:
+ targ = symstack[-plen-1:]
+ targ[0] = sym
+
+ #--! TRACKING
+ if tracking:
+ t1 = targ[1]
+ sym.lineno = t1.lineno
+ sym.lexpos = t1.lexpos
+ t1 = targ[-1]
+ sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
+ sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
+ #--! TRACKING
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # below as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ del symstack[-plen:]
+ self.state = state
+ p.callable(pslice)
+ del statestack[-plen:]
+ #--! DEBUG
+ debug.info('Result : %s', format_result(pslice[0]))
+ #--! DEBUG
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead) # Save the current lookahead token
+ symstack.extend(targ[1:-1]) # Put the production slice back on the stack
+ statestack.pop() # Pop back one state (before the reduce)
+ state = statestack[-1]
+ sym.type = 'error'
+ sym.value = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = False
+
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ else:
+
+ #--! TRACKING
+ if tracking:
+ sym.lineno = lexer.lineno
+ sym.lexpos = lexer.lexpos
+ #--! TRACKING
+
+ targ = [sym]
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # above as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ self.state = state
+ p.callable(pslice)
+ #--! DEBUG
+ debug.info('Result : %s', format_result(pslice[0]))
+ #--! DEBUG
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead) # Save the current lookahead token
+ statestack.pop() # Pop back one state (before the reduce)
+ state = statestack[-1]
+ sym.type = 'error'
+ sym.value = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = False
+
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ if t == 0:
+ n = symstack[-1]
+ result = getattr(n, 'value', None)
+ #--! DEBUG
+ debug.info('Done : Returning %s', format_result(result))
+ debug.info('PLY: PARSE DEBUG END')
+ #--! DEBUG
+ return result
+
+ if t is None:
+
+ #--! DEBUG
+ debug.error('Error : %s',
+ ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
+ #--! DEBUG
+
+ # We have some kind of parsing error here. To handle
+ # this, we are going to push the current token onto
+ # the tokenstack and replace it with an 'error' token.
+ # If there are any synchronization rules, they may
+ # catch it.
+ #
+ # In addition to pushing the error token, we call call
+ # the user defined p_error() function if this is the
+ # first syntax error. This function is only called if
+ # errorcount == 0.
+ if errorcount == 0 or self.errorok:
+ errorcount = error_count
+ self.errorok = False
+ errtoken = lookahead
+ if errtoken.type == '$end':
+ errtoken = None # End of file!
+ if self.errorfunc:
+ if errtoken and not hasattr(errtoken, 'lexer'):
+ errtoken.lexer = lexer
+ self.state = state
+ tok = call_errorfunc(self.errorfunc, errtoken, self)
+ if self.errorok:
+ # User must have done some kind of panic
+ # mode recovery on their own. The
+ # returned token is the next lookahead
+ lookahead = tok
+ errtoken = None
+ continue
+ else:
+ if errtoken:
+ if hasattr(errtoken, 'lineno'):
+ lineno = lookahead.lineno
+ else:
+ lineno = 0
+ if lineno:
+ sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
+ else:
+ sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
+ else:
+ sys.stderr.write('yacc: Parse error in input. EOF\n')
+ return
+
+ else:
+ errorcount = error_count
+
+ # case 1: the statestack only has 1 entry on it. If we're in this state, the
+ # entire parse has been rolled back and we're completely hosed. The token is
+ # discarded and we just keep going.
+
+ if len(statestack) <= 1 and lookahead.type != '$end':
+ lookahead = None
+ errtoken = None
+ state = 0
+ # Nuke the pushback stack
+ del lookaheadstack[:]
+ continue
+
+ # case 2: the statestack has a couple of entries on it, but we're
+ # at the end of the file. nuke the top entry and generate an error token
+
+ # Start nuking entries on the stack
+ if lookahead.type == '$end':
+ # Whoa. We're really hosed here. Bail out
+ return
+
+ if lookahead.type != 'error':
+ sym = symstack[-1]
+ if sym.type == 'error':
+ # Hmmm. Error is on top of stack, we'll just nuke input
+ # symbol and continue
+ #--! TRACKING
+ if tracking:
+ sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
+ sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
+ #--! TRACKING
+ lookahead = None
+ continue
+
+ # Create the error symbol for the first time and make it the new lookahead symbol
+ t = YaccSymbol()
+ t.type = 'error'
+
+ if hasattr(lookahead, 'lineno'):
+ t.lineno = t.endlineno = lookahead.lineno
+ if hasattr(lookahead, 'lexpos'):
+ t.lexpos = t.endlexpos = lookahead.lexpos
+ t.value = lookahead
+ lookaheadstack.append(lookahead)
+ lookahead = t
+ else:
+ sym = symstack.pop()
+ #--! TRACKING
+ if tracking:
+ lookahead.lineno = sym.lineno
+ lookahead.lexpos = sym.lexpos
+ #--! TRACKING
+ statestack.pop()
+ state = statestack[-1]
+
+ continue
+
+ # Call an error function here
+ raise RuntimeError('yacc: internal parser error!!!\n')
+
+ #--! parsedebug-end
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # parseopt().
+ #
+ # Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY!
+ # This code is automatically generated by the ply/ygen.py script. Make
+ # changes to the parsedebug() method instead.
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
+ #--! parseopt-start
+ lookahead = None # Current lookahead symbol
+ lookaheadstack = [] # Stack of lookahead symbols
+ actions = self.action # Local reference to action table (to avoid lookup on self.)
+ goto = self.goto # Local reference to goto table (to avoid lookup on self.)
+ prod = self.productions # Local reference to production list (to avoid lookup on self.)
+ defaulted_states = self.defaulted_states # Local reference to defaulted states
+ pslice = YaccProduction(None) # Production object passed to grammar rules
+ errorcount = 0 # Used during error recovery
+
+
+ # If no lexer was given, we will try to use the lex module
+ if not lexer:
+ from . import lex
+ lexer = lex.lexer
+
+ # Set up the lexer and parser objects on pslice
+ pslice.lexer = lexer
+ pslice.parser = self
+
+ # If input was supplied, pass to lexer
+ if input is not None:
+ lexer.input(input)
+
+ if tokenfunc is None:
+ # Tokenize function
+ get_token = lexer.token
+ else:
+ get_token = tokenfunc
+
+ # Set the parser() token method (sometimes used in error recovery)
+ self.token = get_token
+
+ # Set up the state and symbol stacks
+
+ statestack = [] # Stack of parsing states
+ self.statestack = statestack
+ symstack = [] # Stack of grammar symbols
+ self.symstack = symstack
+
+ pslice.stack = symstack # Put in the production
+ errtoken = None # Err token
+
+ # The start state is assumed to be (0,$end)
+
+ statestack.append(0)
+ sym = YaccSymbol()
+ sym.type = '$end'
+ symstack.append(sym)
+ state = 0
+ while True:
+ # Get the next symbol on the input. If a lookahead symbol
+ # is already set, we just use that. Otherwise, we'll pull
+ # the next token off of the lookaheadstack or from the lexer
+
+
+ if state not in defaulted_states:
+ if not lookahead:
+ if not lookaheadstack:
+ lookahead = get_token() # Get the next token
+ else:
+ lookahead = lookaheadstack.pop()
+ if not lookahead:
+ lookahead = YaccSymbol()
+ lookahead.type = '$end'
+
+ # Check the action table
+ ltype = lookahead.type
+ t = actions[state].get(ltype)
+ else:
+ t = defaulted_states[state]
+
+
+ if t is not None:
+ if t > 0:
+ # shift a symbol on the stack
+ statestack.append(t)
+ state = t
+
+
+ symstack.append(lookahead)
+ lookahead = None
+
+ # Decrease error count on successful shift
+ if errorcount:
+ errorcount -= 1
+ continue
+
+ if t < 0:
+ # reduce a symbol on the stack, emit a production
+ p = prod[-t]
+ pname = p.name
+ plen = p.len
+
+ # Get production function
+ sym = YaccSymbol()
+ sym.type = pname # Production name
+ sym.value = None
+
+
+ if plen:
+ targ = symstack[-plen-1:]
+ targ[0] = sym
+
+ #--! TRACKING
+ if tracking:
+ t1 = targ[1]
+ sym.lineno = t1.lineno
+ sym.lexpos = t1.lexpos
+ t1 = targ[-1]
+ sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
+ sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
+ #--! TRACKING
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # below as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ del symstack[-plen:]
+ self.state = state
+ p.callable(pslice)
+ del statestack[-plen:]
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead) # Save the current lookahead token
+ symstack.extend(targ[1:-1]) # Put the production slice back on the stack
+ statestack.pop() # Pop back one state (before the reduce)
+ state = statestack[-1]
+ sym.type = 'error'
+ sym.value = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = False
+
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ else:
+
+ #--! TRACKING
+ if tracking:
+ sym.lineno = lexer.lineno
+ sym.lexpos = lexer.lexpos
+ #--! TRACKING
+
+ targ = [sym]
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # above as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ self.state = state
+ p.callable(pslice)
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead) # Save the current lookahead token
+ statestack.pop() # Pop back one state (before the reduce)
+ state = statestack[-1]
+ sym.type = 'error'
+ sym.value = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = False
+
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ if t == 0:
+ n = symstack[-1]
+ result = getattr(n, 'value', None)
+ return result
+
+ if t is None:
+
+
+ # We have some kind of parsing error here. To handle
+ # this, we are going to push the current token onto
+ # the tokenstack and replace it with an 'error' token.
+ # If there are any synchronization rules, they may
+ # catch it.
+ #
+ # In addition to pushing the error token, we call call
+ # the user defined p_error() function if this is the
+ # first syntax error. This function is only called if
+ # errorcount == 0.
+ if errorcount == 0 or self.errorok:
+ errorcount = error_count
+ self.errorok = False
+ errtoken = lookahead
+ if errtoken.type == '$end':
+ errtoken = None # End of file!
+ if self.errorfunc:
+ if errtoken and not hasattr(errtoken, 'lexer'):
+ errtoken.lexer = lexer
+ self.state = state
+ tok = call_errorfunc(self.errorfunc, errtoken, self)
+ if self.errorok:
+ # User must have done some kind of panic
+ # mode recovery on their own. The
+ # returned token is the next lookahead
+ lookahead = tok
+ errtoken = None
+ continue
+ else:
+ if errtoken:
+ if hasattr(errtoken, 'lineno'):
+ lineno = lookahead.lineno
+ else:
+ lineno = 0
+ if lineno:
+ sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
+ else:
+ sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
+ else:
+ sys.stderr.write('yacc: Parse error in input. EOF\n')
+ return
+
+ else:
+ errorcount = error_count
+
+ # case 1: the statestack only has 1 entry on it. If we're in this state, the
+ # entire parse has been rolled back and we're completely hosed. The token is
+ # discarded and we just keep going.
+
+ if len(statestack) <= 1 and lookahead.type != '$end':
+ lookahead = None
+ errtoken = None
+ state = 0
+ # Nuke the pushback stack
+ del lookaheadstack[:]
+ continue
+
+ # case 2: the statestack has a couple of entries on it, but we're
+ # at the end of the file. nuke the top entry and generate an error token
+
+ # Start nuking entries on the stack
+ if lookahead.type == '$end':
+ # Whoa. We're really hosed here. Bail out
+ return
+
+ if lookahead.type != 'error':
+ sym = symstack[-1]
+ if sym.type == 'error':
+ # Hmmm. Error is on top of stack, we'll just nuke input
+ # symbol and continue
+ #--! TRACKING
+ if tracking:
+ sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
+ sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
+ #--! TRACKING
+ lookahead = None
+ continue
+
+ # Create the error symbol for the first time and make it the new lookahead symbol
+ t = YaccSymbol()
+ t.type = 'error'
+
+ if hasattr(lookahead, 'lineno'):
+ t.lineno = t.endlineno = lookahead.lineno
+ if hasattr(lookahead, 'lexpos'):
+ t.lexpos = t.endlexpos = lookahead.lexpos
+ t.value = lookahead
+ lookaheadstack.append(lookahead)
+ lookahead = t
+ else:
+ sym = symstack.pop()
+ #--! TRACKING
+ if tracking:
+ lookahead.lineno = sym.lineno
+ lookahead.lexpos = sym.lexpos
+ #--! TRACKING
+ statestack.pop()
+ state = statestack[-1]
+
+ continue
+
+ # Call an error function here
+ raise RuntimeError('yacc: internal parser error!!!\n')
+
+ #--! parseopt-end
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # parseopt_notrack().
+ #
+ # Optimized version of parseopt() with line number tracking removed.
+ # DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated
+ # by the ply/ygen.py script. Make changes to the parsedebug() method instead.
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
+ #--! parseopt-notrack-start
+ lookahead = None # Current lookahead symbol
+ lookaheadstack = [] # Stack of lookahead symbols
+ actions = self.action # Local reference to action table (to avoid lookup on self.)
+ goto = self.goto # Local reference to goto table (to avoid lookup on self.)
+ prod = self.productions # Local reference to production list (to avoid lookup on self.)
+ defaulted_states = self.defaulted_states # Local reference to defaulted states
+ pslice = YaccProduction(None) # Production object passed to grammar rules
+ errorcount = 0 # Used during error recovery
+
+
+ # If no lexer was given, we will try to use the lex module
+ if not lexer:
+ from . import lex
+ lexer = lex.lexer
+
+ # Set up the lexer and parser objects on pslice
+ pslice.lexer = lexer
+ pslice.parser = self
+
+ # If input was supplied, pass to lexer
+ if input is not None:
+ lexer.input(input)
+
+ if tokenfunc is None:
+ # Tokenize function
+ get_token = lexer.token
+ else:
+ get_token = tokenfunc
+
+ # Set the parser() token method (sometimes used in error recovery)
+ self.token = get_token
+
+ # Set up the state and symbol stacks
+
+ statestack = [] # Stack of parsing states
+ self.statestack = statestack
+ symstack = [] # Stack of grammar symbols
+ self.symstack = symstack
+
+ pslice.stack = symstack # Put in the production
+ errtoken = None # Err token
+
+ # The start state is assumed to be (0,$end)
+
+ statestack.append(0)
+ sym = YaccSymbol()
+ sym.type = '$end'
+ symstack.append(sym)
+ state = 0
+ while True:
+ # Get the next symbol on the input. If a lookahead symbol
+ # is already set, we just use that. Otherwise, we'll pull
+ # the next token off of the lookaheadstack or from the lexer
+
+
+ if state not in defaulted_states:
+ if not lookahead:
+ if not lookaheadstack:
+ lookahead = get_token() # Get the next token
+ else:
+ lookahead = lookaheadstack.pop()
+ if not lookahead:
+ lookahead = YaccSymbol()
+ lookahead.type = '$end'
+
+ # Check the action table
+ ltype = lookahead.type
+ t = actions[state].get(ltype)
+ else:
+ t = defaulted_states[state]
+
+
+ if t is not None:
+ if t > 0:
+ # shift a symbol on the stack
+ statestack.append(t)
+ state = t
+
+
+ symstack.append(lookahead)
+ lookahead = None
+
+ # Decrease error count on successful shift
+ if errorcount:
+ errorcount -= 1
+ continue
+
+ if t < 0:
+ # reduce a symbol on the stack, emit a production
+ p = prod[-t]
+ pname = p.name
+ plen = p.len
+
+ # Get production function
+ sym = YaccSymbol()
+ sym.type = pname # Production name
+ sym.value = None
+
+
+ if plen:
+ targ = symstack[-plen-1:]
+ targ[0] = sym
+
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # below as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ del symstack[-plen:]
+ self.state = state
+ p.callable(pslice)
+ del statestack[-plen:]
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead) # Save the current lookahead token
+ symstack.extend(targ[1:-1]) # Put the production slice back on the stack
+ statestack.pop() # Pop back one state (before the reduce)
+ state = statestack[-1]
+ sym.type = 'error'
+ sym.value = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = False
+
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ else:
+
+
+ targ = [sym]
+
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ # The code enclosed in this section is duplicated
+ # above as a performance optimization. Make sure
+ # changes get made in both locations.
+
+ pslice.slice = targ
+
+ try:
+ # Call the grammar rule with our special slice object
+ self.state = state
+ p.callable(pslice)
+ symstack.append(sym)
+ state = goto[statestack[-1]][pname]
+ statestack.append(state)
+ except SyntaxError:
+ # If an error was set. Enter error recovery state
+ lookaheadstack.append(lookahead) # Save the current lookahead token
+ statestack.pop() # Pop back one state (before the reduce)
+ state = statestack[-1]
+ sym.type = 'error'
+ sym.value = 'error'
+ lookahead = sym
+ errorcount = error_count
+ self.errorok = False
+
+ continue
+ # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+
+ if t == 0:
+ n = symstack[-1]
+ result = getattr(n, 'value', None)
+ return result
+
+ if t is None:
+
+
+ # We have some kind of parsing error here. To handle
+ # this, we are going to push the current token onto
+ # the tokenstack and replace it with an 'error' token.
+ # If there are any synchronization rules, they may
+ # catch it.
+ #
+ # In addition to pushing the error token, we call call
+ # the user defined p_error() function if this is the
+ # first syntax error. This function is only called if
+ # errorcount == 0.
+ if errorcount == 0 or self.errorok:
+ errorcount = error_count
+ self.errorok = False
+ errtoken = lookahead
+ if errtoken.type == '$end':
+ errtoken = None # End of file!
+ if self.errorfunc:
+ if errtoken and not hasattr(errtoken, 'lexer'):
+ errtoken.lexer = lexer
+ self.state = state
+ tok = call_errorfunc(self.errorfunc, errtoken, self)
+ if self.errorok:
+ # User must have done some kind of panic
+ # mode recovery on their own. The
+ # returned token is the next lookahead
+ lookahead = tok
+ errtoken = None
+ continue
+ else:
+ if errtoken:
+ if hasattr(errtoken, 'lineno'):
+ lineno = lookahead.lineno
+ else:
+ lineno = 0
+ if lineno:
+ sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
+ else:
+ sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
+ else:
+ sys.stderr.write('yacc: Parse error in input. EOF\n')
+ return
+
+ else:
+ errorcount = error_count
+
+ # case 1: the statestack only has 1 entry on it. If we're in this state, the
+ # entire parse has been rolled back and we're completely hosed. The token is
+ # discarded and we just keep going.
+
+ if len(statestack) <= 1 and lookahead.type != '$end':
+ lookahead = None
+ errtoken = None
+ state = 0
+ # Nuke the pushback stack
+ del lookaheadstack[:]
+ continue
+
+ # case 2: the statestack has a couple of entries on it, but we're
+ # at the end of the file. nuke the top entry and generate an error token
+
+ # Start nuking entries on the stack
+ if lookahead.type == '$end':
+ # Whoa. We're really hosed here. Bail out
+ return
+
+ if lookahead.type != 'error':
+ sym = symstack[-1]
+ if sym.type == 'error':
+ # Hmmm. Error is on top of stack, we'll just nuke input
+ # symbol and continue
+ lookahead = None
+ continue
+
+ # Create the error symbol for the first time and make it the new lookahead symbol
+ t = YaccSymbol()
+ t.type = 'error'
+
+ if hasattr(lookahead, 'lineno'):
+ t.lineno = t.endlineno = lookahead.lineno
+ if hasattr(lookahead, 'lexpos'):
+ t.lexpos = t.endlexpos = lookahead.lexpos
+ t.value = lookahead
+ lookaheadstack.append(lookahead)
+ lookahead = t
+ else:
+ sym = symstack.pop()
+ statestack.pop()
+ state = statestack[-1]
+
+ continue
+
+ # Call an error function here
+ raise RuntimeError('yacc: internal parser error!!!\n')
+
+ #--! parseopt-notrack-end
+
+# -----------------------------------------------------------------------------
+# === Grammar Representation ===
+#
+# The following functions, classes, and variables are used to represent and
+# manipulate the rules that make up a grammar.
+# -----------------------------------------------------------------------------
+
+# regex matching identifiers
+_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
+
+# -----------------------------------------------------------------------------
+# class Production:
+#
+# This class stores the raw information about a single production or grammar rule.
+# A grammar rule refers to a specification such as this:
+#
+# expr : expr PLUS term
+#
+# Here are the basic attributes defined on all productions
+#
+# name - Name of the production. For example 'expr'
+# prod - A list of symbols on the right side ['expr','PLUS','term']
+# prec - Production precedence level
+# number - Production number.
+# func - Function that executes on reduce
+# file - File where production function is defined
+# lineno - Line number where production function is defined
+#
+# The following attributes are defined or optional.
+#
+# len - Length of the production (number of symbols on right hand side)
+# usyms - Set of unique symbols found in the production
+# -----------------------------------------------------------------------------
+
+class Production(object):
+ reduced = 0
+ def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0):
+ self.name = name
+ self.prod = tuple(prod)
+ self.number = number
+ self.func = func
+ self.callable = None
+ self.file = file
+ self.line = line
+ self.prec = precedence
+
+ # Internal settings used during table construction
+
+ self.len = len(self.prod) # Length of the production
+
+ # Create a list of unique production symbols used in the production
+ self.usyms = []
+ for s in self.prod:
+ if s not in self.usyms:
+ self.usyms.append(s)
+
+ # List of all LR items for the production
+ self.lr_items = []
+ self.lr_next = None
+
+ # Create a string representation
+ if self.prod:
+ self.str = '%s -> %s' % (self.name, ' '.join(self.prod))
+ else:
+ self.str = '%s -> <empty>' % self.name
+
+ def __str__(self):
+ return self.str
+
+ def __repr__(self):
+ return 'Production(' + str(self) + ')'
+
+ def __len__(self):
+ return len(self.prod)
+
+ def __nonzero__(self):
+ return 1
+
+ def __getitem__(self, index):
+ return self.prod[index]
+
+ # Return the nth lr_item from the production (or None if at the end)
+ def lr_item(self, n):
+ if n > len(self.prod):
+ return None
+ p = LRItem(self, n)
+ # Precompute the list of productions immediately following.
+ try:
+ p.lr_after = Prodnames[p.prod[n+1]]
+ except (IndexError, KeyError):
+ p.lr_after = []
+ try:
+ p.lr_before = p.prod[n-1]
+ except IndexError:
+ p.lr_before = None
+ return p
+
+ # Bind the production function name to a callable
+ def bind(self, pdict):
+ if self.func:
+ self.callable = pdict[self.func]
+
+# This class serves as a minimal standin for Production objects when
+# reading table data from files. It only contains information
+# actually used by the LR parsing engine, plus some additional
+# debugging information.
+class MiniProduction(object):
+ def __init__(self, str, name, len, func, file, line):
+ self.name = name
+ self.len = len
+ self.func = func
+ self.callable = None
+ self.file = file
+ self.line = line
+ self.str = str
+
+ def __str__(self):
+ return self.str
+
+ def __repr__(self):
+ return 'MiniProduction(%s)' % self.str
+
+ # Bind the production function name to a callable
+ def bind(self, pdict):
+ if self.func:
+ self.callable = pdict[self.func]
+
+
+# -----------------------------------------------------------------------------
+# class LRItem
+#
+# This class represents a specific stage of parsing a production rule. For
+# example:
+#
+# expr : expr . PLUS term
+#
+# In the above, the "." represents the current location of the parse. Here
+# basic attributes:
+#
+# name - Name of the production. For example 'expr'
+# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
+# number - Production number.
+#
+# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
+# then lr_next refers to 'expr -> expr PLUS . term'
+# lr_index - LR item index (location of the ".") in the prod list.
+# lookaheads - LALR lookahead symbols for this item
+# len - Length of the production (number of symbols on right hand side)
+# lr_after - List of all productions that immediately follow
+# lr_before - Grammar symbol immediately before
+# -----------------------------------------------------------------------------
+
+class LRItem(object):
+ def __init__(self, p, n):
+ self.name = p.name
+ self.prod = list(p.prod)
+ self.number = p.number
+ self.lr_index = n
+ self.lookaheads = {}
+ self.prod.insert(n, '.')
+ self.prod = tuple(self.prod)
+ self.len = len(self.prod)
+ self.usyms = p.usyms
+
+ def __str__(self):
+ if self.prod:
+ s = '%s -> %s' % (self.name, ' '.join(self.prod))
+ else:
+ s = '%s -> <empty>' % self.name
+ return s
+
+ def __repr__(self):
+ return 'LRItem(' + str(self) + ')'
+
+# -----------------------------------------------------------------------------
+# rightmost_terminal()
+#
+# Return the rightmost terminal from a list of symbols. Used in add_production()
+# -----------------------------------------------------------------------------
+def rightmost_terminal(symbols, terminals):
+ i = len(symbols) - 1
+ while i >= 0:
+ if symbols[i] in terminals:
+ return symbols[i]
+ i -= 1
+ return None
+
+# -----------------------------------------------------------------------------
+# === GRAMMAR CLASS ===
+#
+# The following class represents the contents of the specified grammar along
+# with various computed properties such as first sets, follow sets, LR items, etc.
+# This data is used for critical parts of the table generation process later.
+# -----------------------------------------------------------------------------
+
+class GrammarError(YaccError):
+ pass
+
+class Grammar(object):
+ def __init__(self, terminals):
+ self.Productions = [None] # A list of all of the productions. The first
+ # entry is always reserved for the purpose of
+ # building an augmented grammar
+
+ self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all
+ # productions of that nonterminal.
+
+ self.Prodmap = {} # A dictionary that is only used to detect duplicate
+ # productions.
+
+ self.Terminals = {} # A dictionary mapping the names of terminal symbols to a
+ # list of the rules where they are used.
+
+ for term in terminals:
+ self.Terminals[term] = []
+
+ self.Terminals['error'] = []
+
+ self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list
+ # of rule numbers where they are used.
+
+ self.First = {} # A dictionary of precomputed FIRST(x) symbols
+
+ self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols
+
+ self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the
+ # form ('right',level) or ('nonassoc', level) or ('left',level)
+
+ self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer.
+ # This is only used to provide error checking and to generate
+ # a warning about unused precedence rules.
+
+ self.Start = None # Starting symbol for the grammar
+
+
+ def __len__(self):
+ return len(self.Productions)
+
+ def __getitem__(self, index):
+ return self.Productions[index]
+
+ # -----------------------------------------------------------------------------
+ # set_precedence()
+ #
+ # Sets the precedence for a given terminal. assoc is the associativity such as
+ # 'left','right', or 'nonassoc'. level is a numeric level.
+ #
+ # -----------------------------------------------------------------------------
+
+ def set_precedence(self, term, assoc, level):
+ assert self.Productions == [None], 'Must call set_precedence() before add_production()'
+ if term in self.Precedence:
+ raise GrammarError('Precedence already specified for terminal %r' % term)
+ if assoc not in ['left', 'right', 'nonassoc']:
+ raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
+ self.Precedence[term] = (assoc, level)
+
+ # -----------------------------------------------------------------------------
+ # add_production()
+ #
+ # Given an action function, this function assembles a production rule and
+ # computes its precedence level.
+ #
+ # The production rule is supplied as a list of symbols. For example,
+ # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
+ # symbols ['expr','PLUS','term'].
+ #
+ # Precedence is determined by the precedence of the right-most non-terminal
+ # or the precedence of a terminal specified by %prec.
+ #
+ # A variety of error checks are performed to make sure production symbols
+ # are valid and that %prec is used correctly.
+ # -----------------------------------------------------------------------------
+
+ def add_production(self, prodname, syms, func=None, file='', line=0):
+
+ if prodname in self.Terminals:
+ raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname))
+ if prodname == 'error':
+ raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname))
+ if not _is_identifier.match(prodname):
+ raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname))
+
+ # Look for literal tokens
+ for n, s in enumerate(syms):
+ if s[0] in "'\"":
+ try:
+ c = eval(s)
+ if (len(c) > 1):
+ raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' %
+ (file, line, s, prodname))
+ if c not in self.Terminals:
+ self.Terminals[c] = []
+ syms[n] = c
+ continue
+ except SyntaxError:
+ pass
+ if not _is_identifier.match(s) and s != '%prec':
+ raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname))
+
+ # Determine the precedence level
+ if '%prec' in syms:
+ if syms[-1] == '%prec':
+ raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line))
+ if syms[-2] != '%prec':
+ raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' %
+ (file, line))
+ precname = syms[-1]
+ prodprec = self.Precedence.get(precname)
+ if not prodprec:
+ raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname))
+ else:
+ self.UsedPrecedence.add(precname)
+ del syms[-2:] # Drop %prec from the rule
+ else:
+ # If no %prec, precedence is determined by the rightmost terminal symbol
+ precname = rightmost_terminal(syms, self.Terminals)
+ prodprec = self.Precedence.get(precname, ('right', 0))
+
+ # See if the rule is already in the rulemap
+ map = '%s -> %s' % (prodname, syms)
+ if map in self.Prodmap:
+ m = self.Prodmap[map]
+ raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) +
+ 'Previous definition at %s:%d' % (m.file, m.line))
+
+ # From this point on, everything is valid. Create a new Production instance
+ pnumber = len(self.Productions)
+ if prodname not in self.Nonterminals:
+ self.Nonterminals[prodname] = []
+
+ # Add the production number to Terminals and Nonterminals
+ for t in syms:
+ if t in self.Terminals:
+ self.Terminals[t].append(pnumber)
+ else:
+ if t not in self.Nonterminals:
+ self.Nonterminals[t] = []
+ self.Nonterminals[t].append(pnumber)
+
+ # Create a production and add it to the list of productions
+ p = Production(pnumber, prodname, syms, prodprec, func, file, line)
+ self.Productions.append(p)
+ self.Prodmap[map] = p
+
+ # Add to the global productions list
+ try:
+ self.Prodnames[prodname].append(p)
+ except KeyError:
+ self.Prodnames[prodname] = [p]
+
+ # -----------------------------------------------------------------------------
+ # set_start()
+ #
+ # Sets the starting symbol and creates the augmented grammar. Production
+ # rule 0 is S' -> start where start is the start symbol.
+ # -----------------------------------------------------------------------------
+
+ def set_start(self, start=None):
+ if not start:
+ start = self.Productions[1].name
+ if start not in self.Nonterminals:
+ raise GrammarError('start symbol %s undefined' % start)
+ self.Productions[0] = Production(0, "S'", [start])
+ self.Nonterminals[start].append(0)
+ self.Start = start
+
+ # -----------------------------------------------------------------------------
+ # find_unreachable()
+ #
+ # Find all of the nonterminal symbols that can't be reached from the starting
+ # symbol. Returns a list of nonterminals that can't be reached.
+ # -----------------------------------------------------------------------------
+
+ def find_unreachable(self):
+
+ # Mark all symbols that are reachable from a symbol s
+ def mark_reachable_from(s):
+ if s in reachable:
+ return
+ reachable.add(s)
+ for p in self.Prodnames.get(s, []):
+ for r in p.prod:
+ mark_reachable_from(r)
+
+ reachable = set()
+ mark_reachable_from(self.Productions[0].prod[0])
+ return [s for s in self.Nonterminals if s not in reachable]
+
+ # -----------------------------------------------------------------------------
+ # infinite_cycles()
+ #
+ # This function looks at the various parsing rules and tries to detect
+ # infinite recursion cycles (grammar rules where there is no possible way
+ # to derive a string of only terminals).
+ # -----------------------------------------------------------------------------
+
+ def infinite_cycles(self):
+ terminates = {}
+
+ # Terminals:
+ for t in self.Terminals:
+ terminates[t] = True
+
+ terminates['$end'] = True
+
+ # Nonterminals:
+
+ # Initialize to false:
+ for n in self.Nonterminals:
+ terminates[n] = False
+
+ # Then propagate termination until no change:
+ while True:
+ some_change = False
+ for (n, pl) in self.Prodnames.items():
+ # Nonterminal n terminates iff any of its productions terminates.
+ for p in pl:
+ # Production p terminates iff all of its rhs symbols terminate.
+ for s in p.prod:
+ if not terminates[s]:
+ # The symbol s does not terminate,
+ # so production p does not terminate.
+ p_terminates = False
+ break
+ else:
+ # didn't break from the loop,
+ # so every symbol s terminates
+ # so production p terminates.
+ p_terminates = True
+
+ if p_terminates:
+ # symbol n terminates!
+ if not terminates[n]:
+ terminates[n] = True
+ some_change = True
+ # Don't need to consider any more productions for this n.
+ break
+
+ if not some_change:
+ break
+
+ infinite = []
+ for (s, term) in terminates.items():
+ if not term:
+ if s not in self.Prodnames and s not in self.Terminals and s != 'error':
+ # s is used-but-not-defined, and we've already warned of that,
+ # so it would be overkill to say that it's also non-terminating.
+ pass
+ else:
+ infinite.append(s)
+
+ return infinite
+
+ # -----------------------------------------------------------------------------
+ # undefined_symbols()
+ #
+ # Find all symbols that were used the grammar, but not defined as tokens or
+ # grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
+ # and prod is the production where the symbol was used.
+ # -----------------------------------------------------------------------------
+ def undefined_symbols(self):
+ result = []
+ for p in self.Productions:
+ if not p:
+ continue
+
+ for s in p.prod:
+ if s not in self.Prodnames and s not in self.Terminals and s != 'error':
+ result.append((s, p))
+ return result
+
+ # -----------------------------------------------------------------------------
+ # unused_terminals()
+ #
+ # Find all terminals that were defined, but not used by the grammar. Returns
+ # a list of all symbols.
+ # -----------------------------------------------------------------------------
+ def unused_terminals(self):
+ unused_tok = []
+ for s, v in self.Terminals.items():
+ if s != 'error' and not v:
+ unused_tok.append(s)
+
+ return unused_tok
+
+ # ------------------------------------------------------------------------------
+ # unused_rules()
+ #
+ # Find all grammar rules that were defined, but not used (maybe not reachable)
+ # Returns a list of productions.
+ # ------------------------------------------------------------------------------
+
+ def unused_rules(self):
+ unused_prod = []
+ for s, v in self.Nonterminals.items():
+ if not v:
+ p = self.Prodnames[s][0]
+ unused_prod.append(p)
+ return unused_prod
+
+ # -----------------------------------------------------------------------------
+ # unused_precedence()
+ #
+ # Returns a list of tuples (term,precedence) corresponding to precedence
+ # rules that were never used by the grammar. term is the name of the terminal
+ # on which precedence was applied and precedence is a string such as 'left' or
+ # 'right' corresponding to the type of precedence.
+ # -----------------------------------------------------------------------------
+
+ def unused_precedence(self):
+ unused = []
+ for termname in self.Precedence:
+ if not (termname in self.Terminals or termname in self.UsedPrecedence):
+ unused.append((termname, self.Precedence[termname][0]))
+
+ return unused
+
+ # -------------------------------------------------------------------------
+ # _first()
+ #
+ # Compute the value of FIRST1(beta) where beta is a tuple of symbols.
+ #
+ # During execution of compute_first1, the result may be incomplete.
+ # Afterward (e.g., when called from compute_follow()), it will be complete.
+ # -------------------------------------------------------------------------
+ def _first(self, beta):
+
+ # We are computing First(x1,x2,x3,...,xn)
+ result = []
+ for x in beta:
+ x_produces_empty = False
+
+ # Add all the non-<empty> symbols of First[x] to the result.
+ for f in self.First[x]:
+ if f == '<empty>':
+ x_produces_empty = True
+ else:
+ if f not in result:
+ result.append(f)
+
+ if x_produces_empty:
+ # We have to consider the next x in beta,
+ # i.e. stay in the loop.
+ pass
+ else:
+ # We don't have to consider any further symbols in beta.
+ break
+ else:
+ # There was no 'break' from the loop,
+ # so x_produces_empty was true for all x in beta,
+ # so beta produces empty as well.
+ result.append('<empty>')
+
+ return result
+
+ # -------------------------------------------------------------------------
+ # compute_first()
+ #
+ # Compute the value of FIRST1(X) for all symbols
+ # -------------------------------------------------------------------------
+ def compute_first(self):
+ if self.First:
+ return self.First
+
+ # Terminals:
+ for t in self.Terminals:
+ self.First[t] = [t]
+
+ self.First['$end'] = ['$end']
+
+ # Nonterminals:
+
+ # Initialize to the empty set:
+ for n in self.Nonterminals:
+ self.First[n] = []
+
+ # Then propagate symbols until no change:
+ while True:
+ some_change = False
+ for n in self.Nonterminals:
+ for p in self.Prodnames[n]:
+ for f in self._first(p.prod):
+ if f not in self.First[n]:
+ self.First[n].append(f)
+ some_change = True
+ if not some_change:
+ break
+
+ return self.First
+
+ # ---------------------------------------------------------------------
+ # compute_follow()
+ #
+ # Computes all of the follow sets for every non-terminal symbol. The
+ # follow set is the set of all symbols that might follow a given
+ # non-terminal. See the Dragon book, 2nd Ed. p. 189.
+ # ---------------------------------------------------------------------
+ def compute_follow(self, start=None):
+ # If already computed, return the result
+ if self.Follow:
+ return self.Follow
+
+ # If first sets not computed yet, do that first.
+ if not self.First:
+ self.compute_first()
+
+ # Add '$end' to the follow list of the start symbol
+ for k in self.Nonterminals:
+ self.Follow[k] = []
+
+ if not start:
+ start = self.Productions[1].name
+
+ self.Follow[start] = ['$end']
+
+ while True:
+ didadd = False
+ for p in self.Productions[1:]:
+ # Here is the production set
+ for i, B in enumerate(p.prod):
+ if B in self.Nonterminals:
+ # Okay. We got a non-terminal in a production
+ fst = self._first(p.prod[i+1:])
+ hasempty = False
+ for f in fst:
+ if f != '<empty>' and f not in self.Follow[B]:
+ self.Follow[B].append(f)
+ didadd = True
+ if f == '<empty>':
+ hasempty = True
+ if hasempty or i == (len(p.prod)-1):
+ # Add elements of follow(a) to follow(b)
+ for f in self.Follow[p.name]:
+ if f not in self.Follow[B]:
+ self.Follow[B].append(f)
+ didadd = True
+ if not didadd:
+ break
+ return self.Follow
+
+
+ # -----------------------------------------------------------------------------
+ # build_lritems()
+ #
+ # This function walks the list of productions and builds a complete set of the
+ # LR items. The LR items are stored in two ways: First, they are uniquely
+ # numbered and placed in the list _lritems. Second, a linked list of LR items
+ # is built for each production. For example:
+ #
+ # E -> E PLUS E
+ #
+ # Creates the list
+ #
+ # [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
+ # -----------------------------------------------------------------------------
+
+ def build_lritems(self):
+ for p in self.Productions:
+ lastlri = p
+ i = 0
+ lr_items = []
+ while True:
+ if i > len(p):
+ lri = None
+ else:
+ lri = LRItem(p, i)
+ # Precompute the list of productions immediately following
+ try:
+ lri.lr_after = self.Prodnames[lri.prod[i+1]]
+ except (IndexError, KeyError):
+ lri.lr_after = []
+ try:
+ lri.lr_before = lri.prod[i-1]
+ except IndexError:
+ lri.lr_before = None
+
+ lastlri.lr_next = lri
+ if not lri:
+ break
+ lr_items.append(lri)
+ lastlri = lri
+ i += 1
+ p.lr_items = lr_items
+
+# -----------------------------------------------------------------------------
+# == Class LRTable ==
+#
+# This basic class represents a basic table of LR parsing information.
+# Methods for generating the tables are not defined here. They are defined
+# in the derived class LRGeneratedTable.
+# -----------------------------------------------------------------------------
+
+class VersionError(YaccError):
+ pass
+
+class LRTable(object):
+ def __init__(self):
+ self.lr_action = None
+ self.lr_goto = None
+ self.lr_productions = None
+ self.lr_method = None
+
+ def read_table(self, module):
+ if isinstance(module, types.ModuleType):
+ parsetab = module
+ else:
+ exec('import %s' % module)
+ parsetab = sys.modules[module]
+
+ if parsetab._tabversion != __tabversion__:
+ raise VersionError('yacc table file version is out of date')
+
+ self.lr_action = parsetab._lr_action
+ self.lr_goto = parsetab._lr_goto
+
+ self.lr_productions = []
+ for p in parsetab._lr_productions:
+ self.lr_productions.append(MiniProduction(*p))
+
+ self.lr_method = parsetab._lr_method
+ return parsetab._lr_signature
+
+ def read_pickle(self, filename):
+ try:
+ import cPickle as pickle
+ except ImportError:
+ import pickle
+
+ if not os.path.exists(filename):
+ raise ImportError
+
+ in_f = open(filename, 'rb')
+
+ tabversion = pickle.load(in_f)
+ if tabversion != __tabversion__:
+ raise VersionError('yacc table file version is out of date')
+ self.lr_method = pickle.load(in_f)
+ signature = pickle.load(in_f)
+ self.lr_action = pickle.load(in_f)
+ self.lr_goto = pickle.load(in_f)
+ productions = pickle.load(in_f)
+
+ self.lr_productions = []
+ for p in productions:
+ self.lr_productions.append(MiniProduction(*p))
+
+ in_f.close()
+ return signature
+
+ # Bind all production function names to callable objects in pdict
+ def bind_callables(self, pdict):
+ for p in self.lr_productions:
+ p.bind(pdict)
+
+
+# -----------------------------------------------------------------------------
+# === LR Generator ===
+#
+# The following classes and functions are used to generate LR parsing tables on
+# a grammar.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# digraph()
+# traverse()
+#
+# The following two functions are used to compute set valued functions
+# of the form:
+#
+# F(x) = F'(x) U U{F(y) | x R y}
+#
+# This is used to compute the values of Read() sets as well as FOLLOW sets
+# in LALR(1) generation.
+#
+# Inputs: X - An input set
+# R - A relation
+# FP - Set-valued function
+# ------------------------------------------------------------------------------
+
+def digraph(X, R, FP):
+ N = {}
+ for x in X:
+ N[x] = 0
+ stack = []
+ F = {}
+ for x in X:
+ if N[x] == 0:
+ traverse(x, N, stack, F, X, R, FP)
+ return F
+
+def traverse(x, N, stack, F, X, R, FP):
+ stack.append(x)
+ d = len(stack)
+ N[x] = d
+ F[x] = FP(x) # F(X) <- F'(x)
+
+ rel = R(x) # Get y's related to x
+ for y in rel:
+ if N[y] == 0:
+ traverse(y, N, stack, F, X, R, FP)
+ N[x] = min(N[x], N[y])
+ for a in F.get(y, []):
+ if a not in F[x]:
+ F[x].append(a)
+ if N[x] == d:
+ N[stack[-1]] = MAXINT
+ F[stack[-1]] = F[x]
+ element = stack.pop()
+ while element != x:
+ N[stack[-1]] = MAXINT
+ F[stack[-1]] = F[x]
+ element = stack.pop()
+
+class LALRError(YaccError):
+ pass
+
+# -----------------------------------------------------------------------------
+# == LRGeneratedTable ==
+#
+# This class implements the LR table generation algorithm. There are no
+# public methods except for write()
+# -----------------------------------------------------------------------------
+
+class LRGeneratedTable(LRTable):
+ def __init__(self, grammar, method='LALR', log=None):
+ if method not in ['SLR', 'LALR']:
+ raise LALRError('Unsupported method %s' % method)
+
+ self.grammar = grammar
+ self.lr_method = method
+
+ # Set up the logger
+ if not log:
+ log = NullLogger()
+ self.log = log
+
+ # Internal attributes
+ self.lr_action = {} # Action table
+ self.lr_goto = {} # Goto table
+ self.lr_productions = grammar.Productions # Copy of grammar Production array
+ self.lr_goto_cache = {} # Cache of computed gotos
+ self.lr0_cidhash = {} # Cache of closures
+
+ self._add_count = 0 # Internal counter used to detect cycles
+
+ # Diagonistic information filled in by the table generator
+ self.sr_conflict = 0
+ self.rr_conflict = 0
+ self.conflicts = [] # List of conflicts
+
+ self.sr_conflicts = []
+ self.rr_conflicts = []
+
+ # Build the tables
+ self.grammar.build_lritems()
+ self.grammar.compute_first()
+ self.grammar.compute_follow()
+ self.lr_parse_table()
+
+ # Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
+
+ def lr0_closure(self, I):
+ self._add_count += 1
+
+ # Add everything in I to J
+ J = I[:]
+ didadd = True
+ while didadd:
+ didadd = False
+ for j in J:
+ for x in j.lr_after:
+ if getattr(x, 'lr0_added', 0) == self._add_count:
+ continue
+ # Add B --> .G to J
+ J.append(x.lr_next)
+ x.lr0_added = self._add_count
+ didadd = True
+
+ return J
+
+ # Compute the LR(0) goto function goto(I,X) where I is a set
+ # of LR(0) items and X is a grammar symbol. This function is written
+ # in a way that guarantees uniqueness of the generated goto sets
+ # (i.e. the same goto set will never be returned as two different Python
+ # objects). With uniqueness, we can later do fast set comparisons using
+ # id(obj) instead of element-wise comparison.
+
+ def lr0_goto(self, I, x):
+ # First we look for a previously cached entry
+ g = self.lr_goto_cache.get((id(I), x))
+ if g:
+ return g
+
+ # Now we generate the goto set in a way that guarantees uniqueness
+ # of the result
+
+ s = self.lr_goto_cache.get(x)
+ if not s:
+ s = {}
+ self.lr_goto_cache[x] = s
+
+ gs = []
+ for p in I:
+ n = p.lr_next
+ if n and n.lr_before == x:
+ s1 = s.get(id(n))
+ if not s1:
+ s1 = {}
+ s[id(n)] = s1
+ gs.append(n)
+ s = s1
+ g = s.get('$end')
+ if not g:
+ if gs:
+ g = self.lr0_closure(gs)
+ s['$end'] = g
+ else:
+ s['$end'] = gs
+ self.lr_goto_cache[(id(I), x)] = g
+ return g
+
+ # Compute the LR(0) sets of item function
+ def lr0_items(self):
+ C = [self.lr0_closure([self.grammar.Productions[0].lr_next])]
+ i = 0
+ for I in C:
+ self.lr0_cidhash[id(I)] = i
+ i += 1
+
+ # Loop over the items in C and each grammar symbols
+ i = 0
+ while i < len(C):
+ I = C[i]
+ i += 1
+
+ # Collect all of the symbols that could possibly be in the goto(I,X) sets
+ asyms = {}
+ for ii in I:
+ for s in ii.usyms:
+ asyms[s] = None
+
+ for x in asyms:
+ g = self.lr0_goto(I, x)
+ if not g or id(g) in self.lr0_cidhash:
+ continue
+ self.lr0_cidhash[id(g)] = len(C)
+ C.append(g)
+
+ return C
+
+ # -----------------------------------------------------------------------------
+ # ==== LALR(1) Parsing ====
+ #
+ # LALR(1) parsing is almost exactly the same as SLR except that instead of
+ # relying upon Follow() sets when performing reductions, a more selective
+ # lookahead set that incorporates the state of the LR(0) machine is utilized.
+ # Thus, we mainly just have to focus on calculating the lookahead sets.
+ #
+ # The method used here is due to DeRemer and Pennelo (1982).
+ #
+ # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
+ # Lookahead Sets", ACM Transactions on Programming Languages and Systems,
+ # Vol. 4, No. 4, Oct. 1982, pp. 615-649
+ #
+ # Further details can also be found in:
+ #
+ # J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
+ # McGraw-Hill Book Company, (1985).
+ #
+ # -----------------------------------------------------------------------------
+
+ # -----------------------------------------------------------------------------
+ # compute_nullable_nonterminals()
+ #
+ # Creates a dictionary containing all of the non-terminals that might produce
+ # an empty production.
+ # -----------------------------------------------------------------------------
+
+ def compute_nullable_nonterminals(self):
+ nullable = set()
+ num_nullable = 0
+ while True:
+ for p in self.grammar.Productions[1:]:
+ if p.len == 0:
+ nullable.add(p.name)
+ continue
+ for t in p.prod:
+ if t not in nullable:
+ break
+ else:
+ nullable.add(p.name)
+ if len(nullable) == num_nullable:
+ break
+ num_nullable = len(nullable)
+ return nullable
+
+ # -----------------------------------------------------------------------------
+ # find_nonterminal_trans(C)
+ #
+ # Given a set of LR(0) items, this functions finds all of the non-terminal
+ # transitions. These are transitions in which a dot appears immediately before
+ # a non-terminal. Returns a list of tuples of the form (state,N) where state
+ # is the state number and N is the nonterminal symbol.
+ #
+ # The input C is the set of LR(0) items.
+ # -----------------------------------------------------------------------------
+
+ def find_nonterminal_transitions(self, C):
+ trans = []
+ for stateno, state in enumerate(C):
+ for p in state:
+ if p.lr_index < p.len - 1:
+ t = (stateno, p.prod[p.lr_index+1])
+ if t[1] in self.grammar.Nonterminals:
+ if t not in trans:
+ trans.append(t)
+ return trans
+
+ # -----------------------------------------------------------------------------
+ # dr_relation()
+ #
+ # Computes the DR(p,A) relationships for non-terminal transitions. The input
+ # is a tuple (state,N) where state is a number and N is a nonterminal symbol.
+ #
+ # Returns a list of terminals.
+ # -----------------------------------------------------------------------------
+
+ def dr_relation(self, C, trans, nullable):
+ dr_set = {}
+ state, N = trans
+ terms = []
+
+ g = self.lr0_goto(C[state], N)
+ for p in g:
+ if p.lr_index < p.len - 1:
+ a = p.prod[p.lr_index+1]
+ if a in self.grammar.Terminals:
+ if a not in terms:
+ terms.append(a)
+
+ # This extra bit is to handle the start state
+ if state == 0 and N == self.grammar.Productions[0].prod[0]:
+ terms.append('$end')
+
+ return terms
+
+ # -----------------------------------------------------------------------------
+ # reads_relation()
+ #
+ # Computes the READS() relation (p,A) READS (t,C).
+ # -----------------------------------------------------------------------------
+
+ def reads_relation(self, C, trans, empty):
+ # Look for empty transitions
+ rel = []
+ state, N = trans
+
+ g = self.lr0_goto(C[state], N)
+ j = self.lr0_cidhash.get(id(g), -1)
+ for p in g:
+ if p.lr_index < p.len - 1:
+ a = p.prod[p.lr_index + 1]
+ if a in empty:
+ rel.append((j, a))
+
+ return rel
+
+ # -----------------------------------------------------------------------------
+ # compute_lookback_includes()
+ #
+ # Determines the lookback and includes relations
+ #
+ # LOOKBACK:
+ #
+ # This relation is determined by running the LR(0) state machine forward.
+ # For example, starting with a production "N : . A B C", we run it forward
+ # to obtain "N : A B C ." We then build a relationship between this final
+ # state and the starting state. These relationships are stored in a dictionary
+ # lookdict.
+ #
+ # INCLUDES:
+ #
+ # Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
+ #
+ # This relation is used to determine non-terminal transitions that occur
+ # inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
+ # if the following holds:
+ #
+ # B -> LAT, where T -> epsilon and p' -L-> p
+ #
+ # L is essentially a prefix (which may be empty), T is a suffix that must be
+ # able to derive an empty string. State p' must lead to state p with the string L.
+ #
+ # -----------------------------------------------------------------------------
+
+ def compute_lookback_includes(self, C, trans, nullable):
+ lookdict = {} # Dictionary of lookback relations
+ includedict = {} # Dictionary of include relations
+
+ # Make a dictionary of non-terminal transitions
+ dtrans = {}
+ for t in trans:
+ dtrans[t] = 1
+
+ # Loop over all transitions and compute lookbacks and includes
+ for state, N in trans:
+ lookb = []
+ includes = []
+ for p in C[state]:
+ if p.name != N:
+ continue
+
+ # Okay, we have a name match. We now follow the production all the way
+ # through the state machine until we get the . on the right hand side
+
+ lr_index = p.lr_index
+ j = state
+ while lr_index < p.len - 1:
+ lr_index = lr_index + 1
+ t = p.prod[lr_index]
+
+ # Check to see if this symbol and state are a non-terminal transition
+ if (j, t) in dtrans:
+ # Yes. Okay, there is some chance that this is an includes relation
+ # the only way to know for certain is whether the rest of the
+ # production derives empty
+
+ li = lr_index + 1
+ while li < p.len:
+ if p.prod[li] in self.grammar.Terminals:
+ break # No forget it
+ if p.prod[li] not in nullable:
+ break
+ li = li + 1
+ else:
+ # Appears to be a relation between (j,t) and (state,N)
+ includes.append((j, t))
+
+ g = self.lr0_goto(C[j], t) # Go to next set
+ j = self.lr0_cidhash.get(id(g), -1) # Go to next state
+
+ # When we get here, j is the final state, now we have to locate the production
+ for r in C[j]:
+ if r.name != p.name:
+ continue
+ if r.len != p.len:
+ continue
+ i = 0
+ # This look is comparing a production ". A B C" with "A B C ."
+ while i < r.lr_index:
+ if r.prod[i] != p.prod[i+1]:
+ break
+ i = i + 1
+ else:
+ lookb.append((j, r))
+ for i in includes:
+ if i not in includedict:
+ includedict[i] = []
+ includedict[i].append((state, N))
+ lookdict[(state, N)] = lookb
+
+ return lookdict, includedict
+
+ # -----------------------------------------------------------------------------
+ # compute_read_sets()
+ #
+ # Given a set of LR(0) items, this function computes the read sets.
+ #
+ # Inputs: C = Set of LR(0) items
+ # ntrans = Set of nonterminal transitions
+ # nullable = Set of empty transitions
+ #
+ # Returns a set containing the read sets
+ # -----------------------------------------------------------------------------
+
+ def compute_read_sets(self, C, ntrans, nullable):
+ FP = lambda x: self.dr_relation(C, x, nullable)
+ R = lambda x: self.reads_relation(C, x, nullable)
+ F = digraph(ntrans, R, FP)
+ return F
+
+ # -----------------------------------------------------------------------------
+ # compute_follow_sets()
+ #
+ # Given a set of LR(0) items, a set of non-terminal transitions, a readset,
+ # and an include set, this function computes the follow sets
+ #
+ # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
+ #
+ # Inputs:
+ # ntrans = Set of nonterminal transitions
+ # readsets = Readset (previously computed)
+ # inclsets = Include sets (previously computed)
+ #
+ # Returns a set containing the follow sets
+ # -----------------------------------------------------------------------------
+
+ def compute_follow_sets(self, ntrans, readsets, inclsets):
+ FP = lambda x: readsets[x]
+ R = lambda x: inclsets.get(x, [])
+ F = digraph(ntrans, R, FP)
+ return F
+
+ # -----------------------------------------------------------------------------
+ # add_lookaheads()
+ #
+ # Attaches the lookahead symbols to grammar rules.
+ #
+ # Inputs: lookbacks - Set of lookback relations
+ # followset - Computed follow set
+ #
+ # This function directly attaches the lookaheads to productions contained
+ # in the lookbacks set
+ # -----------------------------------------------------------------------------
+
+ def add_lookaheads(self, lookbacks, followset):
+ for trans, lb in lookbacks.items():
+ # Loop over productions in lookback
+ for state, p in lb:
+ if state not in p.lookaheads:
+ p.lookaheads[state] = []
+ f = followset.get(trans, [])
+ for a in f:
+ if a not in p.lookaheads[state]:
+ p.lookaheads[state].append(a)
+
+ # -----------------------------------------------------------------------------
+ # add_lalr_lookaheads()
+ #
+ # This function does all of the work of adding lookahead information for use
+ # with LALR parsing
+ # -----------------------------------------------------------------------------
+
+ def add_lalr_lookaheads(self, C):
+ # Determine all of the nullable nonterminals
+ nullable = self.compute_nullable_nonterminals()
+
+ # Find all non-terminal transitions
+ trans = self.find_nonterminal_transitions(C)
+
+ # Compute read sets
+ readsets = self.compute_read_sets(C, trans, nullable)
+
+ # Compute lookback/includes relations
+ lookd, included = self.compute_lookback_includes(C, trans, nullable)
+
+ # Compute LALR FOLLOW sets
+ followsets = self.compute_follow_sets(trans, readsets, included)
+
+ # Add all of the lookaheads
+ self.add_lookaheads(lookd, followsets)
+
+ # -----------------------------------------------------------------------------
+ # lr_parse_table()
+ #
+ # This function constructs the parse tables for SLR or LALR
+ # -----------------------------------------------------------------------------
+ def lr_parse_table(self):
+ Productions = self.grammar.Productions
+ Precedence = self.grammar.Precedence
+ goto = self.lr_goto # Goto array
+ action = self.lr_action # Action array
+ log = self.log # Logger for output
+
+ actionp = {} # Action production array (temporary)
+
+ log.info('Parsing method: %s', self.lr_method)
+
+ # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
+ # This determines the number of states
+
+ C = self.lr0_items()
+
+ if self.lr_method == 'LALR':
+ self.add_lalr_lookaheads(C)
+
+ # Build the parser table, state by state
+ st = 0
+ for I in C:
+ # Loop over each production in I
+ actlist = [] # List of actions
+ st_action = {}
+ st_actionp = {}
+ st_goto = {}
+ log.info('')
+ log.info('state %d', st)
+ log.info('')
+ for p in I:
+ log.info(' (%d) %s', p.number, p)
+ log.info('')
+
+ for p in I:
+ if p.len == p.lr_index + 1:
+ if p.name == "S'":
+ # Start symbol. Accept!
+ st_action['$end'] = 0
+ st_actionp['$end'] = p
+ else:
+ # We are at the end of a production. Reduce!
+ if self.lr_method == 'LALR':
+ laheads = p.lookaheads[st]
+ else:
+ laheads = self.grammar.Follow[p.name]
+ for a in laheads:
+ actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p)))
+ r = st_action.get(a)
+ if r is not None:
+ # Whoa. Have a shift/reduce or reduce/reduce conflict
+ if r > 0:
+ # Need to decide on shift or reduce here
+ # By default we favor shifting. Need to add
+ # some precedence rules here.
+
+ # Shift precedence comes from the token
+ sprec, slevel = Precedence.get(a, ('right', 0))
+
+ # Reduce precedence comes from rule being reduced (p)
+ rprec, rlevel = Productions[p.number].prec
+
+ if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
+ # We really need to reduce here.
+ st_action[a] = -p.number
+ st_actionp[a] = p
+ if not slevel and not rlevel:
+ log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
+ self.sr_conflicts.append((st, a, 'reduce'))
+ Productions[p.number].reduced += 1
+ elif (slevel == rlevel) and (rprec == 'nonassoc'):
+ st_action[a] = None
+ else:
+ # Hmmm. Guess we'll keep the shift
+ if not rlevel:
+ log.info(' ! shift/reduce conflict for %s resolved as shift', a)
+ self.sr_conflicts.append((st, a, 'shift'))
+ elif r < 0:
+ # Reduce/reduce conflict. In this case, we favor the rule
+ # that was defined first in the grammar file
+ oldp = Productions[-r]
+ pp = Productions[p.number]
+ if oldp.line > pp.line:
+ st_action[a] = -p.number
+ st_actionp[a] = p
+ chosenp, rejectp = pp, oldp
+ Productions[p.number].reduced += 1
+ Productions[oldp.number].reduced -= 1
+ else:
+ chosenp, rejectp = oldp, pp
+ self.rr_conflicts.append((st, chosenp, rejectp))
+ log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)',
+ a, st_actionp[a].number, st_actionp[a])
+ else:
+ raise LALRError('Unknown conflict in state %d' % st)
+ else:
+ st_action[a] = -p.number
+ st_actionp[a] = p
+ Productions[p.number].reduced += 1
+ else:
+ i = p.lr_index
+ a = p.prod[i+1] # Get symbol right after the "."
+ if a in self.grammar.Terminals:
+ g = self.lr0_goto(I, a)
+ j = self.lr0_cidhash.get(id(g), -1)
+ if j >= 0:
+ # We are in a shift state
+ actlist.append((a, p, 'shift and go to state %d' % j))
+ r = st_action.get(a)
+ if r is not None:
+ # Whoa have a shift/reduce or shift/shift conflict
+ if r > 0:
+ if r != j:
+ raise LALRError('Shift/shift conflict in state %d' % st)
+ elif r < 0:
+ # Do a precedence check.
+ # - if precedence of reduce rule is higher, we reduce.
+ # - if precedence of reduce is same and left assoc, we reduce.
+ # - otherwise we shift
+
+ # Shift precedence comes from the token
+ sprec, slevel = Precedence.get(a, ('right', 0))
+
+ # Reduce precedence comes from the rule that could have been reduced
+ rprec, rlevel = Productions[st_actionp[a].number].prec
+
+ if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
+ # We decide to shift here... highest precedence to shift
+ Productions[st_actionp[a].number].reduced -= 1
+ st_action[a] = j
+ st_actionp[a] = p
+ if not rlevel:
+ log.info(' ! shift/reduce conflict for %s resolved as shift', a)
+ self.sr_conflicts.append((st, a, 'shift'))
+ elif (slevel == rlevel) and (rprec == 'nonassoc'):
+ st_action[a] = None
+ else:
+ # Hmmm. Guess we'll keep the reduce
+ if not slevel and not rlevel:
+ log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
+ self.sr_conflicts.append((st, a, 'reduce'))
+
+ else:
+ raise LALRError('Unknown conflict in state %d' % st)
+ else:
+ st_action[a] = j
+ st_actionp[a] = p
+
+ # Print the actions associated with each terminal
+ _actprint = {}
+ for a, p, m in actlist:
+ if a in st_action:
+ if p is st_actionp[a]:
+ log.info(' %-15s %s', a, m)
+ _actprint[(a, m)] = 1
+ log.info('')
+ # Print the actions that were not used. (debugging)
+ not_used = 0
+ for a, p, m in actlist:
+ if a in st_action:
+ if p is not st_actionp[a]:
+ if not (a, m) in _actprint:
+ log.debug(' ! %-15s [ %s ]', a, m)
+ not_used = 1
+ _actprint[(a, m)] = 1
+ if not_used:
+ log.debug('')
+
+ # Construct the goto table for this state
+
+ nkeys = {}
+ for ii in I:
+ for s in ii.usyms:
+ if s in self.grammar.Nonterminals:
+ nkeys[s] = None
+ for n in nkeys:
+ g = self.lr0_goto(I, n)
+ j = self.lr0_cidhash.get(id(g), -1)
+ if j >= 0:
+ st_goto[n] = j
+ log.info(' %-30s shift and go to state %d', n, j)
+
+ action[st] = st_action
+ actionp[st] = st_actionp
+ goto[st] = st_goto
+ st += 1
+
+ # -----------------------------------------------------------------------------
+ # write()
+ #
+ # This function writes the LR parsing tables to a file
+ # -----------------------------------------------------------------------------
+
+ def write_table(self, tabmodule, outputdir='', signature=''):
+ if isinstance(tabmodule, types.ModuleType):
+ raise IOError("Won't overwrite existing tabmodule")
+
+ basemodulename = tabmodule.split('.')[-1]
+ filename = os.path.join(outputdir, basemodulename) + '.py'
+ try:
+ f = open(filename, 'w')
+
+ f.write('''
+# %s
+# This file is automatically generated. Do not edit.
+_tabversion = %r
+
+_lr_method = %r
+
+_lr_signature = %r
+ ''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature))
+
+ # Change smaller to 0 to go back to original tables
+ smaller = 1
+
+ # Factor out names to try and make smaller
+ if smaller:
+ items = {}
+
+ for s, nd in self.lr_action.items():
+ for name, v in nd.items():
+ i = items.get(name)
+ if not i:
+ i = ([], [])
+ items[name] = i
+ i[0].append(s)
+ i[1].append(v)
+
+ f.write('\n_lr_action_items = {')
+ for k, v in items.items():
+ f.write('%r:([' % k)
+ for i in v[0]:
+ f.write('%r,' % i)
+ f.write('],[')
+ for i in v[1]:
+ f.write('%r,' % i)
+
+ f.write(']),')
+ f.write('}\n')
+
+ f.write('''
+_lr_action = {}
+for _k, _v in _lr_action_items.items():
+ for _x,_y in zip(_v[0],_v[1]):
+ if not _x in _lr_action: _lr_action[_x] = {}
+ _lr_action[_x][_k] = _y
+del _lr_action_items
+''')
+
+ else:
+ f.write('\n_lr_action = { ')
+ for k, v in self.lr_action.items():
+ f.write('(%r,%r):%r,' % (k[0], k[1], v))
+ f.write('}\n')
+
+ if smaller:
+ # Factor out names to try and make smaller
+ items = {}
+
+ for s, nd in self.lr_goto.items():
+ for name, v in nd.items():
+ i = items.get(name)
+ if not i:
+ i = ([], [])
+ items[name] = i
+ i[0].append(s)
+ i[1].append(v)
+
+ f.write('\n_lr_goto_items = {')
+ for k, v in items.items():
+ f.write('%r:([' % k)
+ for i in v[0]:
+ f.write('%r,' % i)
+ f.write('],[')
+ for i in v[1]:
+ f.write('%r,' % i)
+
+ f.write(']),')
+ f.write('}\n')
+
+ f.write('''
+_lr_goto = {}
+for _k, _v in _lr_goto_items.items():
+ for _x, _y in zip(_v[0], _v[1]):
+ if not _x in _lr_goto: _lr_goto[_x] = {}
+ _lr_goto[_x][_k] = _y
+del _lr_goto_items
+''')
+ else:
+ f.write('\n_lr_goto = { ')
+ for k, v in self.lr_goto.items():
+ f.write('(%r,%r):%r,' % (k[0], k[1], v))
+ f.write('}\n')
+
+ # Write production table
+ f.write('_lr_productions = [\n')
+ for p in self.lr_productions:
+ if p.func:
+ f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len,
+ p.func, os.path.basename(p.file), p.line))
+ else:
+ f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len))
+ f.write(']\n')
+ f.close()
+
+ except IOError as e:
+ raise
+
+
+ # -----------------------------------------------------------------------------
+ # pickle_table()
+ #
+ # This function pickles the LR parsing tables to a supplied file object
+ # -----------------------------------------------------------------------------
+
+ def pickle_table(self, filename, signature=''):
+ try:
+ import cPickle as pickle
+ except ImportError:
+ import pickle
+ with open(filename, 'wb') as outf:
+ pickle.dump(__tabversion__, outf, pickle_protocol)
+ pickle.dump(self.lr_method, outf, pickle_protocol)
+ pickle.dump(signature, outf, pickle_protocol)
+ pickle.dump(self.lr_action, outf, pickle_protocol)
+ pickle.dump(self.lr_goto, outf, pickle_protocol)
+
+ outp = []
+ for p in self.lr_productions:
+ if p.func:
+ outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line))
+ else:
+ outp.append((str(p), p.name, p.len, None, None, None))
+ pickle.dump(outp, outf, pickle_protocol)
+
+# -----------------------------------------------------------------------------
+# === INTROSPECTION ===
+#
+# The following functions and classes are used to implement the PLY
+# introspection features followed by the yacc() function itself.
+# -----------------------------------------------------------------------------
+
+# -----------------------------------------------------------------------------
+# get_caller_module_dict()
+#
+# This function returns a dictionary containing all of the symbols defined within
+# a caller further down the call stack. This is used to get the environment
+# associated with the yacc() call if none was provided.
+# -----------------------------------------------------------------------------
+
+def get_caller_module_dict(levels):
+ f = sys._getframe(levels)
+ ldict = f.f_globals.copy()
+ if f.f_globals != f.f_locals:
+ ldict.update(f.f_locals)
+ return ldict
+
+# -----------------------------------------------------------------------------
+# parse_grammar()
+#
+# This takes a raw grammar rule string and parses it into production data
+# -----------------------------------------------------------------------------
+def parse_grammar(doc, file, line):
+ grammar = []
+ # Split the doc string into lines
+ pstrings = doc.splitlines()
+ lastp = None
+ dline = line
+ for ps in pstrings:
+ dline += 1
+ p = ps.split()
+ if not p:
+ continue
+ try:
+ if p[0] == '|':
+ # This is a continuation of a previous rule
+ if not lastp:
+ raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline))
+ prodname = lastp
+ syms = p[1:]
+ else:
+ prodname = p[0]
+ lastp = prodname
+ syms = p[2:]
+ assign = p[1]
+ if assign != ':' and assign != '::=':
+ raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline))
+
+ grammar.append((file, dline, prodname, syms))
+ except SyntaxError:
+ raise
+ except Exception:
+ raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip()))
+
+ return grammar
+
+# -----------------------------------------------------------------------------
+# ParserReflect()
+#
+# This class represents information extracted for building a parser including
+# start symbol, error function, tokens, precedence list, action functions,
+# etc.
+# -----------------------------------------------------------------------------
+class ParserReflect(object):
+ def __init__(self, pdict, log=None):
+ self.pdict = pdict
+ self.start = None
+ self.error_func = None
+ self.tokens = None
+ self.modules = set()
+ self.grammar = []
+ self.error = False
+
+ if log is None:
+ self.log = PlyLogger(sys.stderr)
+ else:
+ self.log = log
+
+ # Get all of the basic information
+ def get_all(self):
+ self.get_start()
+ self.get_error_func()
+ self.get_tokens()
+ self.get_precedence()
+ self.get_pfunctions()
+
+ # Validate all of the information
+ def validate_all(self):
+ self.validate_start()
+ self.validate_error_func()
+ self.validate_tokens()
+ self.validate_precedence()
+ self.validate_pfunctions()
+ self.validate_modules()
+ return self.error
+
+ # Compute a signature over the grammar
+ def signature(self):
+ parts = []
+ try:
+ if self.start:
+ parts.append(self.start)
+ if self.prec:
+ parts.append(''.join([''.join(p) for p in self.prec]))
+ if self.tokens:
+ parts.append(' '.join(self.tokens))
+ for f in self.pfuncs:
+ if f[3]:
+ parts.append(f[3])
+ except (TypeError, ValueError):
+ pass
+ return ''.join(parts)
+
+ # -----------------------------------------------------------------------------
+ # validate_modules()
+ #
+ # This method checks to see if there are duplicated p_rulename() functions
+ # in the parser module file. Without this function, it is really easy for
+ # users to make mistakes by cutting and pasting code fragments (and it's a real
+ # bugger to try and figure out why the resulting parser doesn't work). Therefore,
+ # we just do a little regular expression pattern matching of def statements
+ # to try and detect duplicates.
+ # -----------------------------------------------------------------------------
+
+ def validate_modules(self):
+ # Match def p_funcname(
+ fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
+
+ for module in self.modules:
+ try:
+ lines, linen = inspect.getsourcelines(module)
+ except IOError:
+ continue
+
+ counthash = {}
+ for linen, line in enumerate(lines):
+ linen += 1
+ m = fre.match(line)
+ if m:
+ name = m.group(1)
+ prev = counthash.get(name)
+ if not prev:
+ counthash[name] = linen
+ else:
+ filename = inspect.getsourcefile(module)
+ self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d',
+ filename, linen, name, prev)
+
+ # Get the start symbol
+ def get_start(self):
+ self.start = self.pdict.get('start')
+
+ # Validate the start symbol
+ def validate_start(self):
+ if self.start is not None:
+ if not isinstance(self.start, string_types):
+ self.log.error("'start' must be a string")
+
+ # Look for error handler
+ def get_error_func(self):
+ self.error_func = self.pdict.get('p_error')
+
+ # Validate the error function
+ def validate_error_func(self):
+ if self.error_func:
+ if isinstance(self.error_func, types.FunctionType):
+ ismethod = 0
+ elif isinstance(self.error_func, types.MethodType):
+ ismethod = 1
+ else:
+ self.log.error("'p_error' defined, but is not a function or method")
+ self.error = True
+ return
+
+ eline = self.error_func.__code__.co_firstlineno
+ efile = self.error_func.__code__.co_filename
+ module = inspect.getmodule(self.error_func)
+ self.modules.add(module)
+
+ argcount = self.error_func.__code__.co_argcount - ismethod
+ if argcount != 1:
+ self.log.error('%s:%d: p_error() requires 1 argument', efile, eline)
+ self.error = True
+
+ # Get the tokens map
+ def get_tokens(self):
+ tokens = self.pdict.get('tokens')
+ if not tokens:
+ self.log.error('No token list is defined')
+ self.error = True
+ return
+
+ if not isinstance(tokens, (list, tuple)):
+ self.log.error('tokens must be a list or tuple')
+ self.error = True
+ return
+
+ if not tokens:
+ self.log.error('tokens is empty')
+ self.error = True
+ return
+
+ self.tokens = tokens
+
+ # Validate the tokens
+ def validate_tokens(self):
+ # Validate the tokens.
+ if 'error' in self.tokens:
+ self.log.error("Illegal token name 'error'. Is a reserved word")
+ self.error = True
+ return
+
+ terminals = set()
+ for n in self.tokens:
+ if n in terminals:
+ self.log.warning('Token %r multiply defined', n)
+ terminals.add(n)
+
+ # Get the precedence map (if any)
+ def get_precedence(self):
+ self.prec = self.pdict.get('precedence')
+
+ # Validate and parse the precedence map
+ def validate_precedence(self):
+ preclist = []
+ if self.prec:
+ if not isinstance(self.prec, (list, tuple)):
+ self.log.error('precedence must be a list or tuple')
+ self.error = True
+ return
+ for level, p in enumerate(self.prec):
+ if not isinstance(p, (list, tuple)):
+ self.log.error('Bad precedence table')
+ self.error = True
+ return
+
+ if len(p) < 2:
+ self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p)
+ self.error = True
+ return
+ assoc = p[0]
+ if not isinstance(assoc, string_types):
+ self.log.error('precedence associativity must be a string')
+ self.error = True
+ return
+ for term in p[1:]:
+ if not isinstance(term, string_types):
+ self.log.error('precedence items must be strings')
+ self.error = True
+ return
+ preclist.append((term, assoc, level+1))
+ self.preclist = preclist
+
+ # Get all p_functions from the grammar
+ def get_pfunctions(self):
+ p_functions = []
+ for name, item in self.pdict.items():
+ if not name.startswith('p_') or name == 'p_error':
+ continue
+ if isinstance(item, (types.FunctionType, types.MethodType)):
+ line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno)
+ module = inspect.getmodule(item)
+ p_functions.append((line, module, name, item.__doc__))
+
+ # Sort all of the actions by line number; make sure to stringify
+ # modules to make them sortable, since `line` may not uniquely sort all
+ # p functions
+ p_functions.sort(key=lambda p_function: (
+ p_function[0],
+ str(p_function[1]),
+ p_function[2],
+ p_function[3]))
+ self.pfuncs = p_functions
+
+ # Validate all of the p_functions
+ def validate_pfunctions(self):
+ grammar = []
+ # Check for non-empty symbols
+ if len(self.pfuncs) == 0:
+ self.log.error('no rules of the form p_rulename are defined')
+ self.error = True
+ return
+
+ for line, module, name, doc in self.pfuncs:
+ file = inspect.getsourcefile(module)
+ func = self.pdict[name]
+ if isinstance(func, types.MethodType):
+ reqargs = 2
+ else:
+ reqargs = 1
+ if func.__code__.co_argcount > reqargs:
+ self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__)
+ self.error = True
+ elif func.__code__.co_argcount < reqargs:
+ self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__)
+ self.error = True
+ elif not func.__doc__:
+ self.log.warning('%s:%d: No documentation string specified in function %r (ignored)',
+ file, line, func.__name__)
+ else:
+ try:
+ parsed_g = parse_grammar(doc, file, line)
+ for g in parsed_g:
+ grammar.append((name, g))
+ except SyntaxError as e:
+ self.log.error(str(e))
+ self.error = True
+
+ # Looks like a valid grammar rule
+ # Mark the file in which defined.
+ self.modules.add(module)
+
+ # Secondary validation step that looks for p_ definitions that are not functions
+ # or functions that look like they might be grammar rules.
+
+ for n, v in self.pdict.items():
+ if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)):
+ continue
+ if n.startswith('t_'):
+ continue
+ if n.startswith('p_') and n != 'p_error':
+ self.log.warning('%r not defined as a function', n)
+ if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or
+ (isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)):
+ if v.__doc__:
+ try:
+ doc = v.__doc__.split(' ')
+ if doc[1] == ':':
+ self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix',
+ v.__code__.co_filename, v.__code__.co_firstlineno, n)
+ except IndexError:
+ pass
+
+ self.grammar = grammar
+
+# -----------------------------------------------------------------------------
+# yacc(module)
+#
+# Build a parser
+# -----------------------------------------------------------------------------
+
+def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
+ check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file,
+ outputdir=None, debuglog=None, errorlog=None, picklefile=None):
+
+ if tabmodule is None:
+ tabmodule = tab_module
+
+ # Reference to the parsing method of the last built parser
+ global parse
+
+ # If pickling is enabled, table files are not created
+ if picklefile:
+ write_tables = 0
+
+ if errorlog is None:
+ errorlog = PlyLogger(sys.stderr)
+
+ # Get the module dictionary used for the parser
+ if module:
+ _items = [(k, getattr(module, k)) for k in dir(module)]
+ pdict = dict(_items)
+ # If no __file__ attribute is available, try to obtain it from the __module__ instead
+ if '__file__' not in pdict:
+ pdict['__file__'] = sys.modules[pdict['__module__']].__file__
+ else:
+ pdict = get_caller_module_dict(2)
+
+ if outputdir is None:
+ # If no output directory is set, the location of the output files
+ # is determined according to the following rules:
+ # - If tabmodule specifies a package, files go into that package directory
+ # - Otherwise, files go in the same directory as the specifying module
+ if isinstance(tabmodule, types.ModuleType):
+ srcfile = tabmodule.__file__
+ else:
+ if '.' not in tabmodule:
+ srcfile = pdict['__file__']
+ else:
+ parts = tabmodule.split('.')
+ pkgname = '.'.join(parts[:-1])
+ exec('import %s' % pkgname)
+ srcfile = getattr(sys.modules[pkgname], '__file__', '')
+ outputdir = os.path.dirname(srcfile)
+
+ # Determine if the module is package of a package or not.
+ # If so, fix the tabmodule setting so that tables load correctly
+ pkg = pdict.get('__package__')
+ if pkg and isinstance(tabmodule, str):
+ if '.' not in tabmodule:
+ tabmodule = pkg + '.' + tabmodule
+
+
+
+ # Set start symbol if it's specified directly using an argument
+ if start is not None:
+ pdict['start'] = start
+
+ # Collect parser information from the dictionary
+ pinfo = ParserReflect(pdict, log=errorlog)
+ pinfo.get_all()
+
+ if pinfo.error:
+ raise YaccError('Unable to build parser')
+
+ # Check signature against table files (if any)
+ signature = pinfo.signature()
+
+ # Read the tables
+ try:
+ lr = LRTable()
+ if picklefile:
+ read_signature = lr.read_pickle(picklefile)
+ else:
+ read_signature = lr.read_table(tabmodule)
+ if optimize or (read_signature == signature):
+ try:
+ lr.bind_callables(pinfo.pdict)
+ parser = LRParser(lr, pinfo.error_func)
+ parse = parser.parse
+ return parser
+ except Exception as e:
+ errorlog.warning('There was a problem loading the table file: %r', e)
+ except VersionError as e:
+ errorlog.warning(str(e))
+ except ImportError:
+ pass
+
+ if debuglog is None:
+ if debug:
+ try:
+ debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w'))
+ except IOError as e:
+ errorlog.warning("Couldn't open %r. %s" % (debugfile, e))
+ debuglog = NullLogger()
+ else:
+ debuglog = NullLogger()
+
+ debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__)
+
+ errors = False
+
+ # Validate the parser information
+ if pinfo.validate_all():
+ raise YaccError('Unable to build parser')
+
+ if not pinfo.error_func:
+ errorlog.warning('no p_error() function is defined')
+
+ # Create a grammar object
+ grammar = Grammar(pinfo.tokens)
+
+ # Set precedence level for terminals
+ for term, assoc, level in pinfo.preclist:
+ try:
+ grammar.set_precedence(term, assoc, level)
+ except GrammarError as e:
+ errorlog.warning('%s', e)
+
+ # Add productions to the grammar
+ for funcname, gram in pinfo.grammar:
+ file, line, prodname, syms = gram
+ try:
+ grammar.add_production(prodname, syms, funcname, file, line)
+ except GrammarError as e:
+ errorlog.error('%s', e)
+ errors = True
+
+ # Set the grammar start symbols
+ try:
+ if start is None:
+ grammar.set_start(pinfo.start)
+ else:
+ grammar.set_start(start)
+ except GrammarError as e:
+ errorlog.error(str(e))
+ errors = True
+
+ if errors:
+ raise YaccError('Unable to build parser')
+
+ # Verify the grammar structure
+ undefined_symbols = grammar.undefined_symbols()
+ for sym, prod in undefined_symbols:
+ errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym)
+ errors = True
+
+ unused_terminals = grammar.unused_terminals()
+ if unused_terminals:
+ debuglog.info('')
+ debuglog.info('Unused terminals:')
+ debuglog.info('')
+ for term in unused_terminals:
+ errorlog.warning('Token %r defined, but not used', term)
+ debuglog.info(' %s', term)
+
+ # Print out all productions to the debug log
+ if debug:
+ debuglog.info('')
+ debuglog.info('Grammar')
+ debuglog.info('')
+ for n, p in enumerate(grammar.Productions):
+ debuglog.info('Rule %-5d %s', n, p)
+
+ # Find unused non-terminals
+ unused_rules = grammar.unused_rules()
+ for prod in unused_rules:
+ errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name)
+
+ if len(unused_terminals) == 1:
+ errorlog.warning('There is 1 unused token')
+ if len(unused_terminals) > 1:
+ errorlog.warning('There are %d unused tokens', len(unused_terminals))
+
+ if len(unused_rules) == 1:
+ errorlog.warning('There is 1 unused rule')
+ if len(unused_rules) > 1:
+ errorlog.warning('There are %d unused rules', len(unused_rules))
+
+ if debug:
+ debuglog.info('')
+ debuglog.info('Terminals, with rules where they appear')
+ debuglog.info('')
+ terms = list(grammar.Terminals)
+ terms.sort()
+ for term in terms:
+ debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]]))
+
+ debuglog.info('')
+ debuglog.info('Nonterminals, with rules where they appear')
+ debuglog.info('')
+ nonterms = list(grammar.Nonterminals)
+ nonterms.sort()
+ for nonterm in nonterms:
+ debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]]))
+ debuglog.info('')
+
+ if check_recursion:
+ unreachable = grammar.find_unreachable()
+ for u in unreachable:
+ errorlog.warning('Symbol %r is unreachable', u)
+
+ infinite = grammar.infinite_cycles()
+ for inf in infinite:
+ errorlog.error('Infinite recursion detected for symbol %r', inf)
+ errors = True
+
+ unused_prec = grammar.unused_precedence()
+ for term, assoc in unused_prec:
+ errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term)
+ errors = True
+
+ if errors:
+ raise YaccError('Unable to build parser')
+
+ # Run the LRGeneratedTable on the grammar
+ if debug:
+ errorlog.debug('Generating %s tables', method)
+
+ lr = LRGeneratedTable(grammar, method, debuglog)
+
+ if debug:
+ num_sr = len(lr.sr_conflicts)
+
+ # Report shift/reduce and reduce/reduce conflicts
+ if num_sr == 1:
+ errorlog.warning('1 shift/reduce conflict')
+ elif num_sr > 1:
+ errorlog.warning('%d shift/reduce conflicts', num_sr)
+
+ num_rr = len(lr.rr_conflicts)
+ if num_rr == 1:
+ errorlog.warning('1 reduce/reduce conflict')
+ elif num_rr > 1:
+ errorlog.warning('%d reduce/reduce conflicts', num_rr)
+
+ # Write out conflicts to the output file
+ if debug and (lr.sr_conflicts or lr.rr_conflicts):
+ debuglog.warning('')
+ debuglog.warning('Conflicts:')
+ debuglog.warning('')
+
+ for state, tok, resolution in lr.sr_conflicts:
+ debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution)
+
+ already_reported = set()
+ for state, rule, rejected in lr.rr_conflicts:
+ if (state, id(rule), id(rejected)) in already_reported:
+ continue
+ debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
+ debuglog.warning('rejected rule (%s) in state %d', rejected, state)
+ errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
+ errorlog.warning('rejected rule (%s) in state %d', rejected, state)
+ already_reported.add((state, id(rule), id(rejected)))
+
+ warned_never = []
+ for state, rule, rejected in lr.rr_conflicts:
+ if not rejected.reduced and (rejected not in warned_never):
+ debuglog.warning('Rule (%s) is never reduced', rejected)
+ errorlog.warning('Rule (%s) is never reduced', rejected)
+ warned_never.append(rejected)
+
+ # Write the table file if requested
+ if write_tables:
+ try:
+ lr.write_table(tabmodule, outputdir, signature)
+ except IOError as e:
+ errorlog.warning("Couldn't create %r. %s" % (tabmodule, e))
+
+ # Write a pickled version of the tables
+ if picklefile:
+ try:
+ lr.pickle_table(picklefile, signature)
+ except IOError as e:
+ errorlog.warning("Couldn't create %r. %s" % (picklefile, e))
+
+ # Build the parser
+ lr.bind_callables(pinfo.pdict)
+ parser = LRParser(lr, pinfo.error_func)
+
+ parse = parser.parse
+ return parser
diff --git a/third_party/python/ply/ply/ygen.py b/third_party/python/ply/ply/ygen.py
new file mode 100644
index 0000000000..acf5ca1a37
--- /dev/null
+++ b/third_party/python/ply/ply/ygen.py
@@ -0,0 +1,74 @@
+# ply: ygen.py
+#
+# This is a support program that auto-generates different versions of the YACC parsing
+# function with different features removed for the purposes of performance.
+#
+# Users should edit the method LParser.parsedebug() in yacc.py. The source code
+# for that method is then used to create the other methods. See the comments in
+# yacc.py for further details.
+
+import os.path
+import shutil
+
+def get_source_range(lines, tag):
+ srclines = enumerate(lines)
+ start_tag = '#--! %s-start' % tag
+ end_tag = '#--! %s-end' % tag
+
+ for start_index, line in srclines:
+ if line.strip().startswith(start_tag):
+ break
+
+ for end_index, line in srclines:
+ if line.strip().endswith(end_tag):
+ break
+
+ return (start_index + 1, end_index)
+
+def filter_section(lines, tag):
+ filtered_lines = []
+ include = True
+ tag_text = '#--! %s' % tag
+ for line in lines:
+ if line.strip().startswith(tag_text):
+ include = not include
+ elif include:
+ filtered_lines.append(line)
+ return filtered_lines
+
+def main():
+ dirname = os.path.dirname(__file__)
+ shutil.copy2(os.path.join(dirname, 'yacc.py'), os.path.join(dirname, 'yacc.py.bak'))
+ with open(os.path.join(dirname, 'yacc.py'), 'r') as f:
+ lines = f.readlines()
+
+ parse_start, parse_end = get_source_range(lines, 'parsedebug')
+ parseopt_start, parseopt_end = get_source_range(lines, 'parseopt')
+ parseopt_notrack_start, parseopt_notrack_end = get_source_range(lines, 'parseopt-notrack')
+
+ # Get the original source
+ orig_lines = lines[parse_start:parse_end]
+
+ # Filter the DEBUG sections out
+ parseopt_lines = filter_section(orig_lines, 'DEBUG')
+
+ # Filter the TRACKING sections out
+ parseopt_notrack_lines = filter_section(parseopt_lines, 'TRACKING')
+
+ # Replace the parser source sections with updated versions
+ lines[parseopt_notrack_start:parseopt_notrack_end] = parseopt_notrack_lines
+ lines[parseopt_start:parseopt_end] = parseopt_lines
+
+ lines = [line.rstrip()+'\n' for line in lines]
+ with open(os.path.join(dirname, 'yacc.py'), 'w') as f:
+ f.writelines(lines)
+
+ print('Updated yacc.py')
+
+if __name__ == '__main__':
+ main()
+
+
+
+
+
diff --git a/third_party/python/ply/setup.cfg b/third_party/python/ply/setup.cfg
new file mode 100644
index 0000000000..4ec8a167da
--- /dev/null
+++ b/third_party/python/ply/setup.cfg
@@ -0,0 +1,11 @@
+[bdist_wheel]
+universal = 1
+
+[metadata]
+description-file = README.md
+
+[egg_info]
+tag_build =
+tag_date = 0
+tag_svn_revision = 0
+
diff --git a/third_party/python/ply/setup.py b/third_party/python/ply/setup.py
new file mode 100644
index 0000000000..ee8ccd0ccf
--- /dev/null
+++ b/third_party/python/ply/setup.py
@@ -0,0 +1,31 @@
+try:
+ from setuptools import setup
+except ImportError:
+ from distutils.core import setup
+
+setup(name = "ply",
+ description="Python Lex & Yacc",
+ long_description = """
+PLY is yet another implementation of lex and yacc for Python. Some notable
+features include the fact that its implemented entirely in Python and it
+uses LALR(1) parsing which is efficient and well suited for larger grammars.
+
+PLY provides most of the standard lex/yacc features including support for empty
+productions, precedence rules, error recovery, and support for ambiguous grammars.
+
+PLY is extremely easy to use and provides very extensive error checking.
+It is compatible with both Python 2 and Python 3.
+""",
+ license="""BSD""",
+ version = "3.10",
+ author = "David Beazley",
+ author_email = "dave@dabeaz.com",
+ maintainer = "David Beazley",
+ maintainer_email = "dave@dabeaz.com",
+ url = "http://www.dabeaz.com/ply/",
+ packages = ['ply'],
+ classifiers = [
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 2',
+ ]
+ )
diff --git a/third_party/python/poetry.lock b/third_party/python/poetry.lock
new file mode 100644
index 0000000000..c4d90d056e
--- /dev/null
+++ b/third_party/python/poetry.lock
@@ -0,0 +1,1282 @@
+# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand.
+
+[[package]]
+name = "aiohttp"
+version = "3.7.4.post0"
+description = "Async http client/server framework (asyncio)"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "aiohttp-3.7.4.post0-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:3cf75f7cdc2397ed4442594b935a11ed5569961333d49b7539ea741be2cc79d5"},
+ {file = "aiohttp-3.7.4.post0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:4b302b45040890cea949ad092479e01ba25911a15e648429c7c5aae9650c67a8"},
+ {file = "aiohttp-3.7.4.post0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:fe60131d21b31fd1a14bd43e6bb88256f69dfc3188b3a89d736d6c71ed43ec95"},
+ {file = "aiohttp-3.7.4.post0-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:393f389841e8f2dfc86f774ad22f00923fdee66d238af89b70ea314c4aefd290"},
+ {file = "aiohttp-3.7.4.post0-cp36-cp36m-manylinux2014_ppc64le.whl", hash = "sha256:c6e9dcb4cb338d91a73f178d866d051efe7c62a7166653a91e7d9fb18274058f"},
+ {file = "aiohttp-3.7.4.post0-cp36-cp36m-manylinux2014_s390x.whl", hash = "sha256:5df68496d19f849921f05f14f31bd6ef53ad4b00245da3195048c69934521809"},
+ {file = "aiohttp-3.7.4.post0-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:0563c1b3826945eecd62186f3f5c7d31abb7391fedc893b7e2b26303b5a9f3fe"},
+ {file = "aiohttp-3.7.4.post0-cp36-cp36m-win32.whl", hash = "sha256:3d78619672183be860b96ed96f533046ec97ca067fd46ac1f6a09cd9b7484287"},
+ {file = "aiohttp-3.7.4.post0-cp36-cp36m-win_amd64.whl", hash = "sha256:f705e12750171c0ab4ef2a3c76b9a4024a62c4103e3a55dd6f99265b9bc6fcfc"},
+ {file = "aiohttp-3.7.4.post0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:230a8f7e24298dea47659251abc0fd8b3c4e38a664c59d4b89cca7f6c09c9e87"},
+ {file = "aiohttp-3.7.4.post0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:2e19413bf84934d651344783c9f5e22dee452e251cfd220ebadbed2d9931dbf0"},
+ {file = "aiohttp-3.7.4.post0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:e4b2b334e68b18ac9817d828ba44d8fcb391f6acb398bcc5062b14b2cbeac970"},
+ {file = "aiohttp-3.7.4.post0-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:d012ad7911653a906425d8473a1465caa9f8dea7fcf07b6d870397b774ea7c0f"},
+ {file = "aiohttp-3.7.4.post0-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:40eced07f07a9e60e825554a31f923e8d3997cfc7fb31dbc1328c70826e04cde"},
+ {file = "aiohttp-3.7.4.post0-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:209b4a8ee987eccc91e2bd3ac36adee0e53a5970b8ac52c273f7f8fd4872c94c"},
+ {file = "aiohttp-3.7.4.post0-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:14762875b22d0055f05d12abc7f7d61d5fd4fe4642ce1a249abdf8c700bf1fd8"},
+ {file = "aiohttp-3.7.4.post0-cp37-cp37m-win32.whl", hash = "sha256:7615dab56bb07bff74bc865307aeb89a8bfd9941d2ef9d817b9436da3a0ea54f"},
+ {file = "aiohttp-3.7.4.post0-cp37-cp37m-win_amd64.whl", hash = "sha256:d9e13b33afd39ddeb377eff2c1c4f00544e191e1d1dee5b6c51ddee8ea6f0cf5"},
+ {file = "aiohttp-3.7.4.post0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:547da6cacac20666422d4882cfcd51298d45f7ccb60a04ec27424d2f36ba3eaf"},
+ {file = "aiohttp-3.7.4.post0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:af9aa9ef5ba1fd5b8c948bb11f44891968ab30356d65fd0cc6707d989cd521df"},
+ {file = "aiohttp-3.7.4.post0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:64322071e046020e8797117b3658b9c2f80e3267daec409b350b6a7a05041213"},
+ {file = "aiohttp-3.7.4.post0-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:bb437315738aa441251214dad17428cafda9cdc9729499f1d6001748e1d432f4"},
+ {file = "aiohttp-3.7.4.post0-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:e54962802d4b8b18b6207d4a927032826af39395a3bd9196a5af43fc4e60b009"},
+ {file = "aiohttp-3.7.4.post0-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:a00bb73540af068ca7390e636c01cbc4f644961896fa9363154ff43fd37af2f5"},
+ {file = "aiohttp-3.7.4.post0-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:79ebfc238612123a713a457d92afb4096e2148be17df6c50fb9bf7a81c2f8013"},
+ {file = "aiohttp-3.7.4.post0-cp38-cp38-win32.whl", hash = "sha256:515dfef7f869a0feb2afee66b957cc7bbe9ad0cdee45aec7fdc623f4ecd4fb16"},
+ {file = "aiohttp-3.7.4.post0-cp38-cp38-win_amd64.whl", hash = "sha256:114b281e4d68302a324dd33abb04778e8557d88947875cbf4e842c2c01a030c5"},
+ {file = "aiohttp-3.7.4.post0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:7b18b97cf8ee5452fa5f4e3af95d01d84d86d32c5e2bfa260cf041749d66360b"},
+ {file = "aiohttp-3.7.4.post0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:15492a6368d985b76a2a5fdd2166cddfea5d24e69eefed4630cbaae5c81d89bd"},
+ {file = "aiohttp-3.7.4.post0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:bdb230b4943891321e06fc7def63c7aace16095be7d9cf3b1e01be2f10fba439"},
+ {file = "aiohttp-3.7.4.post0-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:cffe3ab27871bc3ea47df5d8f7013945712c46a3cc5a95b6bee15887f1675c22"},
+ {file = "aiohttp-3.7.4.post0-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:f881853d2643a29e643609da57b96d5f9c9b93f62429dcc1cbb413c7d07f0e1a"},
+ {file = "aiohttp-3.7.4.post0-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:a5ca29ee66f8343ed336816c553e82d6cade48a3ad702b9ffa6125d187e2dedb"},
+ {file = "aiohttp-3.7.4.post0-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:17c073de315745a1510393a96e680d20af8e67e324f70b42accbd4cb3315c9fb"},
+ {file = "aiohttp-3.7.4.post0-cp39-cp39-win32.whl", hash = "sha256:932bb1ea39a54e9ea27fc9232163059a0b8855256f4052e776357ad9add6f1c9"},
+ {file = "aiohttp-3.7.4.post0-cp39-cp39-win_amd64.whl", hash = "sha256:02f46fc0e3c5ac58b80d4d56eb0a7c7d97fcef69ace9326289fb9f1955e65cfe"},
+ {file = "aiohttp-3.7.4.post0.tar.gz", hash = "sha256:493d3299ebe5f5a7c66b9819eacdcfbbaaf1a8e84911ddffcdc48888497afecf"},
+]
+
+[package.dependencies]
+async-timeout = ">=3.0,<4.0"
+attrs = ">=17.3.0"
+chardet = ">=2.0,<5.0"
+multidict = ">=4.5,<7.0"
+typing-extensions = ">=3.6.5"
+yarl = ">=1.0,<2.0"
+
+[package.extras]
+speedups = ["aiodns", "brotlipy", "cchardet"]
+
+[[package]]
+name = "ansicon"
+version = "1.89.0"
+description = "Python wrapper for loading Jason Hood's ANSICON"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "ansicon-1.89.0-py2.py3-none-any.whl", hash = "sha256:f1def52d17f65c2c9682cf8370c03f541f410c1752d6a14029f97318e4b9dfec"},
+ {file = "ansicon-1.89.0.tar.gz", hash = "sha256:e4d039def5768a47e4afec8e89e83ec3ae5a26bf00ad851f914d1240b444d2b1"},
+]
+
+[[package]]
+name = "appdirs"
+version = "1.4.4"
+description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"},
+ {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"},
+]
+
+[[package]]
+name = "async-timeout"
+version = "3.0.1"
+description = "Timeout context manager for asyncio programs"
+category = "main"
+optional = false
+python-versions = ">=3.5.3"
+files = [
+ {file = "async-timeout-3.0.1.tar.gz", hash = "sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f"},
+ {file = "async_timeout-3.0.1-py3-none-any.whl", hash = "sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3"},
+]
+
+[[package]]
+name = "attrs"
+version = "23.1.0"
+description = "Classes Without Boilerplate"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"},
+ {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"},
+]
+
+[package.dependencies]
+importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
+
+[package.extras]
+cov = ["attrs[tests]", "coverage[toml] (>=5.3)"]
+dev = ["attrs[docs,tests]", "pre-commit"]
+docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"]
+tests = ["attrs[tests-no-zope]", "zope-interface"]
+tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+
+[[package]]
+name = "blessed"
+version = "1.19.1"
+description = "Easy, practical library for making terminal apps, by providing an elegant, well-documented interface to Colors, Keyboard input, and screen Positioning capabilities."
+category = "main"
+optional = false
+python-versions = ">=2.7"
+files = [
+ {file = "blessed-1.19.1-py2.py3-none-any.whl", hash = "sha256:63b8554ae2e0e7f43749b6715c734cc8f3883010a809bf16790102563e6cf25b"},
+ {file = "blessed-1.19.1.tar.gz", hash = "sha256:9a0d099695bf621d4680dd6c73f6ad547f6a3442fbdbe80c4b1daa1edbc492fc"},
+]
+
+[package.dependencies]
+jinxed = {version = ">=1.1.0", markers = "platform_system == \"Windows\""}
+six = ">=1.9.0"
+wcwidth = ">=0.1.4"
+
+[[package]]
+name = "cbor2"
+version = "4.0.1"
+description = "Pure Python CBOR (de)serializer with extensive tag support"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "cbor2-4.0.1-py2.py3-none-any.whl", hash = "sha256:b0eb916c9ea226aa81e9091607737475d5b0e5c314fe8d5a87179fba449cd190"},
+ {file = "cbor2-4.0.1.tar.gz", hash = "sha256:cee0d01e520563b5a73c72eace5c428bb68aefb1b3f7aee5d692d3af6a1e5172"},
+]
+
+[package.extras]
+testing = ["pytest", "pytest-cov"]
+
+[[package]]
+name = "certifi"
+version = "2022.12.7"
+description = "Python package for providing Mozilla's CA Bundle."
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"},
+ {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"},
+]
+
+[[package]]
+name = "chardet"
+version = "4.0.0"
+description = "Universal encoding detector for Python 2 and 3"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+files = [
+ {file = "chardet-4.0.0-py2.py3-none-any.whl", hash = "sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5"},
+ {file = "chardet-4.0.0.tar.gz", hash = "sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa"},
+]
+
+[[package]]
+name = "click"
+version = "7.1.2"
+description = "Composable command line interface toolkit"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+files = [
+ {file = "click-7.1.2-py2.py3-none-any.whl", hash = "sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc"},
+ {file = "click-7.1.2.tar.gz", hash = "sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a"},
+]
+
+[[package]]
+name = "colorama"
+version = "0.4.5"
+description = "Cross-platform colored terminal text."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+files = [
+ {file = "colorama-0.4.5-py2.py3-none-any.whl", hash = "sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da"},
+ {file = "colorama-0.4.5.tar.gz", hash = "sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4"},
+]
+
+[[package]]
+name = "compare-locales"
+version = "9.0.1"
+description = "Lint Mozilla localizations"
+category = "main"
+optional = false
+python-versions = ">=3.7, <4"
+files = [
+ {file = "compare-locales-9.0.1.tar.gz", hash = "sha256:eda953796841cbfab508ee35f7613a38ae7fbeed48bd26bf5cda9063bd638f06"},
+ {file = "compare_locales-9.0.1-py2.py3-none-any.whl", hash = "sha256:2de0f1d382749fffa6a482d462daff0d70bbc99d48520a0bf8459b22dc7fe9da"},
+]
+
+[package.dependencies]
+"fluent.syntax" = ">=0.18.0,<0.20"
+six = "*"
+toml = "*"
+
+[[package]]
+name = "cookies"
+version = "2.2.1"
+description = "Friendlier RFC 6265-compliant cookie parser/renderer"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "cookies-2.2.1-py2.py3-none-any.whl", hash = "sha256:15bee753002dff684987b8df8c235288eb8d45f8191ae056254812dfd42c81d3"},
+ {file = "cookies-2.2.1.tar.gz", hash = "sha256:d6b698788cae4cfa4e62ef8643a9ca332b79bd96cb314294b864ae8d7eb3ee8e"},
+]
+
+[[package]]
+name = "cram"
+version = "0.7"
+description = "A simple testing framework for command line applications"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "cram-0.7-py2.py3-none-any.whl", hash = "sha256:008e4e8b4d325cf040964b5f62460535b004a7bc816d54f8527a4d299edfe4a3"},
+ {file = "cram-0.7.tar.gz", hash = "sha256:7da7445af2ce15b90aad5ec4792f857cef5786d71f14377e9eb994d8b8337f2f"},
+]
+
+[[package]]
+name = "diskcache"
+version = "4.1.0"
+description = "Disk Cache -- Disk and file backed persistent cache."
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "diskcache-4.1.0-py2.py3-none-any.whl", hash = "sha256:69b253a6ffe95bb4bafb483b97c24fca3c2c6c47b82e92b36486969a7e80d47d"},
+ {file = "diskcache-4.1.0.tar.gz", hash = "sha256:bcee5a59f9c264e2809e58d01be6569a3bbb1e36a1e0fb83f7ef9b2075f95ce0"},
+]
+
+[[package]]
+name = "distro"
+version = "1.4.0"
+description = "Distro - an OS platform information API"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "distro-1.4.0-py2.py3-none-any.whl", hash = "sha256:eedf82a470ebe7d010f1872c17237c79ab04097948800029994fa458e52fb4b4"},
+ {file = "distro-1.4.0.tar.gz", hash = "sha256:362dde65d846d23baee4b5c058c8586f219b5a54be1cf5fc6ff55c4578392f57"},
+]
+
+[[package]]
+name = "ecdsa"
+version = "0.15"
+description = "ECDSA cryptographic signature library (pure python)"
+category = "main"
+optional = false
+python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
+files = [
+ {file = "ecdsa-0.15-py2.py3-none-any.whl", hash = "sha256:867ec9cf6df0b03addc8ef66b56359643cb5d0c1dc329df76ba7ecfe256c8061"},
+ {file = "ecdsa-0.15.tar.gz", hash = "sha256:8f12ac317f8a1318efa75757ef0a651abe12e51fc1af8838fb91079445227277"},
+]
+
+[package.dependencies]
+six = ">=1.9.0"
+
+[package.extras]
+gmpy = ["gmpy"]
+gmpy2 = ["gmpy2"]
+
+[[package]]
+name = "esprima"
+version = "4.0.1"
+description = "ECMAScript parsing infrastructure for multipurpose analysis in Python"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "esprima-4.0.1.tar.gz", hash = "sha256:08db1a876d3c2910db9cfaeb83108193af5411fc3a3a66ebefacd390d21323ee"},
+]
+
+[[package]]
+name = "fluent-migrate"
+version = "0.12.0"
+description = "Toolchain to migrate legacy translation to Fluent."
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "fluent.migrate-0.12.0-py2.py3-none-any.whl", hash = "sha256:e3564c92d1f53700e98792f1be1ff954488d431ff9f5ec290a4ab13b5de69487"},
+ {file = "fluent.migrate-0.12.0.tar.gz", hash = "sha256:926e69e94975521a974b206e242a479310c2cbca1865ca26bf40fa3c7a357338"},
+]
+
+[package.dependencies]
+compare-locales = ">=9.0.1,<10.0"
+"fluent.syntax" = ">=0.19.0,<0.20"
+
+[package.extras]
+hg = ["python-hglib"]
+
+[[package]]
+name = "fluent-syntax"
+version = "0.19.0"
+description = "Localization library for expressive translations."
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "fluent.syntax-0.19.0-py2.py3-none-any.whl", hash = "sha256:b352b3475fac6c6ed5f06527921f432aac073d764445508ee5218aeccc7cc5c4"},
+ {file = "fluent.syntax-0.19.0.tar.gz", hash = "sha256:920326d7f46864b9758f0044e9968e3112198bc826acee16ddd8f11d359004fd"},
+]
+
+[package.dependencies]
+typing-extensions = ">=3.7,<5"
+
+[[package]]
+name = "giturlparse"
+version = "0.10.0"
+description = "A Git URL parsing module (supports parsing and rewriting)"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "giturlparse-0.10.0-py2.py3-none-any.whl", hash = "sha256:04ba1a3a099c3093fa8d24a422913c6a9b2c2cd22bcffc939cf72e3e98f672d7"},
+ {file = "giturlparse-0.10.0.tar.gz", hash = "sha256:2595ab291d30717cda8474b874c9fd509f1b9802ad7f6968c36a45e4b13eb337"},
+]
+
+[[package]]
+name = "glean-parser"
+version = "7.2.1"
+description = "Parser tools for Mozilla's Glean telemetry"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "glean_parser-7.2.1-py3-none-any.whl", hash = "sha256:651cfee34422ea1db90bbf1cb03732bd8c598773bf95daa289a62addeaf10295"},
+ {file = "glean_parser-7.2.1.tar.gz", hash = "sha256:11496ac004fe421b914c7fbdc9a1d620e4821d56e1d9f65523d3858cdb907bbd"},
+]
+
+[package.dependencies]
+appdirs = ">=1.4"
+Click = ">=7"
+diskcache = ">=4"
+Jinja2 = ">=2.10.1"
+jsonschema = ">=3.0.2"
+MarkupSafe = ">=1.1.1,<=2.0.1"
+PyYAML = ">=5.3.1"
+
+[[package]]
+name = "idna"
+version = "2.10"
+description = "Internationalized Domain Names in Applications (IDNA)"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "idna-2.10-py2.py3-none-any.whl", hash = "sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0"},
+ {file = "idna-2.10.tar.gz", hash = "sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6"},
+]
+
+[[package]]
+name = "importlib-metadata"
+version = "6.0.0"
+description = "Read metadata from Python packages"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "importlib_metadata-6.0.0-py3-none-any.whl", hash = "sha256:7efb448ec9a5e313a57655d35aa54cd3e01b7e1fbcf72dce1bf06119420f5bad"},
+ {file = "importlib_metadata-6.0.0.tar.gz", hash = "sha256:e354bedeb60efa6affdcc8ae121b73544a7aa74156d047311948f6d711cd378d"},
+]
+
+[package.dependencies]
+typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""}
+zipp = ">=0.5"
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+perf = ["ipython"]
+testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"]
+
+[[package]]
+name = "importlib-resources"
+version = "5.12.0"
+description = "Read resources from Python packages"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "importlib_resources-5.12.0-py3-none-any.whl", hash = "sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a"},
+ {file = "importlib_resources-5.12.0.tar.gz", hash = "sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6"},
+]
+
+[package.dependencies]
+zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""}
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"]
+
+[[package]]
+name = "jinja2"
+version = "2.11.3"
+description = "A very fast and expressive template engine."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+files = [
+ {file = "Jinja2-2.11.3-py2.py3-none-any.whl", hash = "sha256:03e47ad063331dd6a3f04a43eddca8a966a26ba0c5b7207a9a9e4e08f1b29419"},
+ {file = "Jinja2-2.11.3.tar.gz", hash = "sha256:a6d58433de0ae800347cab1fa3043cebbabe8baa9d29e668f1c768cb87a333c6"},
+]
+
+[package.dependencies]
+MarkupSafe = ">=0.23"
+
+[package.extras]
+i18n = ["Babel (>=0.8)"]
+
+[[package]]
+name = "jinxed"
+version = "1.2.0"
+description = "Jinxed Terminal Library"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "jinxed-1.2.0-py2.py3-none-any.whl", hash = "sha256:cfc2b2e4e3b4326954d546ba6d6b9a7a796ddcb0aef8d03161d005177eb0d48b"},
+ {file = "jinxed-1.2.0.tar.gz", hash = "sha256:032acda92d5c57cd216033cbbd53de731e6ed50deb63eb4781336ca55f72cda5"},
+]
+
+[package.dependencies]
+ansicon = {version = "*", markers = "platform_system == \"Windows\""}
+
+[[package]]
+name = "jsmin"
+version = "3.0.0"
+description = "JavaScript minifier."
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "jsmin-3.0.0.tar.gz", hash = "sha256:88fc1bd6033a47c5911dbcada7d279c7a8b7ad0841909590f6a742c20c4d2e08"},
+]
+
+[[package]]
+name = "json-e"
+version = "2.7.0"
+description = "A data-structure parameterization system written for embedding context in JSON objects"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "json-e-2.7.0.tar.gz", hash = "sha256:d8c1ec3f5bbc7728c3a504ebe58829f283c64eca230871e4eefe974b4cdaae4a"},
+]
+
+[package.extras]
+release = ["towncrier"]
+
+[[package]]
+name = "jsonschema"
+version = "4.17.3"
+description = "An implementation of JSON Schema validation for Python"
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"},
+ {file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"},
+]
+
+[package.dependencies]
+attrs = ">=17.4.0"
+importlib-metadata = {version = "*", markers = "python_version < \"3.8\""}
+importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
+pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""}
+pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2"
+typing-extensions = {version = "*", markers = "python_version < \"3.8\""}
+
+[package.extras]
+format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
+format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"]
+
+[[package]]
+name = "looseversion"
+version = "1.0.1"
+description = "Version numbering for anarchists and software realists"
+category = "main"
+optional = false
+python-versions = ">=3"
+files = [
+ {file = "looseversion-1.0.1-py3-none-any.whl", hash = "sha256:a205beabd0ffd40488edb9ccb3a39134510fc7c0c2847a25079f559e59c004ac"},
+ {file = "looseversion-1.0.1.tar.gz", hash = "sha256:b339dfde67680e9c5c2e96673e52bee9f94d2f0e1b8f4cbfd86d32311e86b952"},
+]
+
+[[package]]
+name = "markupsafe"
+version = "2.0.1"
+description = "Safely add untrusted strings to HTML/XML markup."
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53"},
+ {file = "MarkupSafe-2.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:36bc903cbb393720fad60fc28c10de6acf10dc6cc883f3e24ee4012371399a38"},
+ {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2d7d807855b419fc2ed3e631034685db6079889a1f01d5d9dac950f764da3dad"},
+ {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:add36cb2dbb8b736611303cd3bfcee00afd96471b09cda130da3581cbdc56a6d"},
+ {file = "MarkupSafe-2.0.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:168cd0a3642de83558a5153c8bd34f175a9a6e7f6dc6384b9655d2697312a646"},
+ {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:4dc8f9fb58f7364b63fd9f85013b780ef83c11857ae79f2feda41e270468dd9b"},
+ {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:20dca64a3ef2d6e4d5d615a3fd418ad3bde77a47ec8a23d984a12b5b4c74491a"},
+ {file = "MarkupSafe-2.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:cdfba22ea2f0029c9261a4bd07e830a8da012291fbe44dc794e488b6c9bb353a"},
+ {file = "MarkupSafe-2.0.1-cp310-cp310-win32.whl", hash = "sha256:99df47edb6bda1249d3e80fdabb1dab8c08ef3975f69aed437cb69d0a5de1e28"},
+ {file = "MarkupSafe-2.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:e0f138900af21926a02425cf736db95be9f4af72ba1bb21453432a07f6082134"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf5d821ffabf0ef3533c39c518f3357b171a1651c1ff6827325e4489b0e46c3c"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0d4b31cc67ab36e3392bbf3862cfbadac3db12bdd8b02a2731f509ed5b829724"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:baa1a4e8f868845af802979fcdbf0bb11f94f1cb7ced4c4b8a351bb60d108145"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:deb993cacb280823246a026e3b2d81c493c53de6acfd5e6bfe31ab3402bb37dd"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:63f3268ba69ace99cab4e3e3b5840b03340efed0948ab8f78d2fd87ee5442a4f"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:8d206346619592c6200148b01a2142798c989edcb9c896f9ac9722a99d4e77e6"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-win32.whl", hash = "sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d"},
+ {file = "MarkupSafe-2.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e9936f0b261d4df76ad22f8fee3ae83b60d7c3e871292cd42f40b81b70afae85"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:2a7d351cbd8cfeb19ca00de495e224dea7e7d919659c2841bbb7f420ad03e2d6"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:60bf42e36abfaf9aff1f50f52644b336d4f0a3fd6d8a60ca0d054ac9f713a864"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:d6c7ebd4e944c85e2c3421e612a7057a2f48d478d79e61800d81468a8d842207"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f0567c4dc99f264f49fe27da5f735f414c4e7e7dd850cfd8e69f0862d7c74ea9"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:89c687013cb1cd489a0f0ac24febe8c7a666e6e221b783e53ac50ebf68e45d86"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-win32.whl", hash = "sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415"},
+ {file = "MarkupSafe-2.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5bb28c636d87e840583ee3adeb78172efc47c8b26127267f54a9c0ec251d41a9"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_i686.whl", hash = "sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fcf051089389abe060c9cd7caa212c707e58153afa2c649f00346ce6d260f1b"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5855f8438a7d1d458206a2466bf82b0f104a3724bf96a1c781ab731e4201731a"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3dd007d54ee88b46be476e293f48c85048603f5f516008bee124ddd891398ed6"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:aca6377c0cb8a8253e493c6b451565ac77e98c2951c45f913e0b52facdcff83f"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:04635854b943835a6ea959e948d19dcd311762c5c0c6e1f0e16ee57022669194"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6300b8454aa6930a24b9618fbb54b5a68135092bc666f7b06901f897fa5c2fee"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-win32.whl", hash = "sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64"},
+ {file = "MarkupSafe-2.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux1_i686.whl", hash = "sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c47adbc92fc1bb2b3274c4b3a43ae0e4573d9fbff4f54cd484555edbf030baf1"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:37205cac2a79194e3750b0af2a5720d95f786a55ce7df90c3af697bfa100eaac"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1f2ade76b9903f39aa442b4aadd2177decb66525062db244b35d71d0ee8599b6"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4296f2b1ce8c86a6aea78613c34bb1a672ea0e3de9c6ba08a960efe0b0a09047"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f02365d4e99430a12647f09b6cc8bab61a6564363f313126f775eb4f6ef798e"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5b6d930f030f8ed98e3e6c98ffa0652bdb82601e7a016ec2ab5d7ff23baa78d1"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-win32.whl", hash = "sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74"},
+ {file = "MarkupSafe-2.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8"},
+ {file = "MarkupSafe-2.0.1.tar.gz", hash = "sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a"},
+]
+
+[[package]]
+name = "mohawk"
+version = "0.3.4"
+description = "Library for Hawk HTTP authorization"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "mohawk-0.3.4-py2-none-any.whl", hash = "sha256:b3f85ffa93a5c7d2f9cc591246ef9f8ac4a9fa716bfd5bae0377699a2d89d78c"},
+ {file = "mohawk-0.3.4.tar.gz", hash = "sha256:e98b331d9fa9ece7b8be26094cbe2d57613ae882133cc755167268a984bc0ab3"},
+]
+
+[package.dependencies]
+six = "*"
+
+[[package]]
+name = "mozilla-repo-urls"
+version = "0.1.1"
+description = "Process Mozilla's repository URLs. The intent is to centralize URLs parsing."
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "mozilla-repo-urls-0.1.1.tar.gz", hash = "sha256:7364da790751db2a060eb45adbf1d7db89a145ed279ba235f3425db9dd255915"},
+ {file = "mozilla_repo_urls-0.1.1-py3-none-any.whl", hash = "sha256:30510d3519479aa70211145d0ac9cf6e2fadcb8d30fa3b196bb957bd773502ba"},
+]
+
+[package.dependencies]
+giturlparse = "*"
+
+[[package]]
+name = "mozilla-version"
+version = "2.0.0"
+description = "Process Firefox versions numbers. Tells whether they are valid or not, whether they are nightlies or regular releases, whether this version precedes that other."
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "mozilla-version-2.0.0.tar.gz", hash = "sha256:09697ddc5f55ad8d76521bf3e37aaec4d5bfd7fd4c9018a1cbb0e8cf6c536538"},
+ {file = "mozilla_version-2.0.0-py3-none-any.whl", hash = "sha256:50807a1f4000a7db6bfe95b0ffb1bade429cd8e56cbab70fd3eff5dd46ebb794"},
+]
+
+[package.dependencies]
+attrs = ">=19.2"
+
+[[package]]
+name = "multidict"
+version = "5.1.0"
+description = "multidict implementation"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "multidict-5.1.0-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f"},
+ {file = "multidict-5.1.0-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf"},
+ {file = "multidict-5.1.0-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281"},
+ {file = "multidict-5.1.0-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d"},
+ {file = "multidict-5.1.0-cp36-cp36m-manylinux2014_ppc64le.whl", hash = "sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d"},
+ {file = "multidict-5.1.0-cp36-cp36m-manylinux2014_s390x.whl", hash = "sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da"},
+ {file = "multidict-5.1.0-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224"},
+ {file = "multidict-5.1.0-cp36-cp36m-win32.whl", hash = "sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26"},
+ {file = "multidict-5.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6"},
+ {file = "multidict-5.1.0-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76"},
+ {file = "multidict-5.1.0-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a"},
+ {file = "multidict-5.1.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f"},
+ {file = "multidict-5.1.0-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348"},
+ {file = "multidict-5.1.0-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93"},
+ {file = "multidict-5.1.0-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9"},
+ {file = "multidict-5.1.0-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37"},
+ {file = "multidict-5.1.0-cp37-cp37m-win32.whl", hash = "sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5"},
+ {file = "multidict-5.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632"},
+ {file = "multidict-5.1.0-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952"},
+ {file = "multidict-5.1.0-cp38-cp38-manylinux1_i686.whl", hash = "sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79"},
+ {file = "multidict-5.1.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456"},
+ {file = "multidict-5.1.0-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7"},
+ {file = "multidict-5.1.0-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635"},
+ {file = "multidict-5.1.0-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a"},
+ {file = "multidict-5.1.0-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea"},
+ {file = "multidict-5.1.0-cp38-cp38-win32.whl", hash = "sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656"},
+ {file = "multidict-5.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3"},
+ {file = "multidict-5.1.0-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93"},
+ {file = "multidict-5.1.0-cp39-cp39-manylinux1_i686.whl", hash = "sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647"},
+ {file = "multidict-5.1.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d"},
+ {file = "multidict-5.1.0-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8"},
+ {file = "multidict-5.1.0-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1"},
+ {file = "multidict-5.1.0-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841"},
+ {file = "multidict-5.1.0-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda"},
+ {file = "multidict-5.1.0-cp39-cp39-win32.whl", hash = "sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80"},
+ {file = "multidict-5.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359"},
+ {file = "multidict-5.1.0.tar.gz", hash = "sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5"},
+]
+
+[[package]]
+name = "packaging"
+version = "21.3"
+description = "Core utilities for Python packages"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"},
+ {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"},
+]
+
+[package.dependencies]
+pyparsing = ">=2.0.2,<3.0.5 || >3.0.5"
+
+[[package]]
+name = "pathspec"
+version = "0.9.0"
+description = "Utility library for gitignore style pattern matching of file paths."
+category = "main"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
+files = [
+ {file = "pathspec-0.9.0-py2.py3-none-any.whl", hash = "sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a"},
+ {file = "pathspec-0.9.0.tar.gz", hash = "sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1"},
+]
+
+[[package]]
+name = "pip"
+version = "23.0.1"
+description = "The PyPA recommended tool for installing Python packages."
+category = "main"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pip-23.0.1-py3-none-any.whl", hash = "sha256:236bcb61156d76c4b8a05821b988c7b8c35bf0da28a4b614e8d6ab5212c25c6f"},
+ {file = "pip-23.0.1.tar.gz", hash = "sha256:cd015ea1bfb0fcef59d8a286c1f8bebcb983f6317719d415dc5351efb7cd7024"},
+]
+
+[[package]]
+name = "pip-tools"
+version = "5.5.0"
+description = "pip-tools keeps your pinned dependencies fresh."
+category = "main"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
+files = [
+ {file = "pip-tools-5.5.0.tar.gz", hash = "sha256:cb0108391366b3ef336185097b3c2c0f3fa115b15098dafbda5e78aef70ea114"},
+ {file = "pip_tools-5.5.0-py2.py3-none-any.whl", hash = "sha256:10841c1e56c234d610d0466447685b9ea4ee4a2c274f858c0ef3c33d9bd0d985"},
+]
+
+[package.dependencies]
+click = ">=7"
+pip = ">=20.1"
+
+[package.extras]
+coverage = ["pytest-cov"]
+testing = ["mock", "pytest", "pytest-rerunfailures"]
+
+[[package]]
+name = "pkgutil-resolve-name"
+version = "1.3.10"
+description = "Resolve a name to an object."
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"},
+ {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"},
+]
+
+[[package]]
+name = "ply"
+version = "3.10"
+description = "Python Lex & Yacc"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "ply-3.10.tar.gz", hash = "sha256:96e94af7dd7031d8d6dd6e2a8e0de593b511c211a86e28a9c9621c275ac8bacb"},
+]
+
+[[package]]
+name = "pyasn1"
+version = "0.4.8"
+description = "ASN.1 types and codecs"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pyasn1-0.4.8-py2.py3-none-any.whl", hash = "sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d"},
+ {file = "pyasn1-0.4.8.tar.gz", hash = "sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba"},
+]
+
+[[package]]
+name = "pyasn1-modules"
+version = "0.2.8"
+description = "A collection of ASN.1-based protocols modules."
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pyasn1-modules-0.2.8.tar.gz", hash = "sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e"},
+ {file = "pyasn1_modules-0.2.8-py2.py3-none-any.whl", hash = "sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74"},
+]
+
+[package.dependencies]
+pyasn1 = ">=0.4.6,<0.5.0"
+
+[[package]]
+name = "pylru"
+version = "1.0.9"
+description = "A least recently used (LRU) cache implementation"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pylru-1.0.9.tar.gz", hash = "sha256:71376192671f0ad1690b2a7427d39a29b1df994c8469a9b46b03ed7e28c0172c"},
+]
+
+[[package]]
+name = "pyparsing"
+version = "2.4.7"
+description = "Python parsing module"
+category = "main"
+optional = false
+python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
+files = [
+ {file = "pyparsing-2.4.7-py2.py3-none-any.whl", hash = "sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b"},
+ {file = "pyparsing-2.4.7.tar.gz", hash = "sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1"},
+]
+
+[[package]]
+name = "pyrsistent"
+version = "0.16.0"
+description = "Persistent/Functional/Immutable data structures"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "pyrsistent-0.16.0.tar.gz", hash = "sha256:28669905fe725965daa16184933676547c5bb40a5153055a8dee2a4bd7933ad3"},
+]
+
+[package.dependencies]
+six = "*"
+
+[[package]]
+name = "python-hglib"
+version = "2.4"
+description = "Mercurial Python library"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "python-hglib-2.4.tar.gz", hash = "sha256:693d6ed92a6566e78802c7a03c256cda33d08c63ad3f00fcfa11379b184b9462"},
+]
+
+[[package]]
+name = "pyyaml"
+version = "5.4.1"
+description = "YAML parser and emitter for Python"
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
+files = [
+ {file = "PyYAML-5.4.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922"},
+ {file = "PyYAML-5.4.1-cp27-cp27m-win32.whl", hash = "sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393"},
+ {file = "PyYAML-5.4.1-cp27-cp27m-win_amd64.whl", hash = "sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8"},
+ {file = "PyYAML-5.4.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185"},
+ {file = "PyYAML-5.4.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253"},
+ {file = "PyYAML-5.4.1-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc"},
+ {file = "PyYAML-5.4.1-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347"},
+ {file = "PyYAML-5.4.1-cp36-cp36m-manylinux2014_s390x.whl", hash = "sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541"},
+ {file = "PyYAML-5.4.1-cp36-cp36m-win32.whl", hash = "sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5"},
+ {file = "PyYAML-5.4.1-cp36-cp36m-win_amd64.whl", hash = "sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df"},
+ {file = "PyYAML-5.4.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018"},
+ {file = "PyYAML-5.4.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63"},
+ {file = "PyYAML-5.4.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa"},
+ {file = "PyYAML-5.4.1-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0"},
+ {file = "PyYAML-5.4.1-cp37-cp37m-win32.whl", hash = "sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b"},
+ {file = "PyYAML-5.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf"},
+ {file = "PyYAML-5.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46"},
+ {file = "PyYAML-5.4.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb"},
+ {file = "PyYAML-5.4.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247"},
+ {file = "PyYAML-5.4.1-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc"},
+ {file = "PyYAML-5.4.1-cp38-cp38-win32.whl", hash = "sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc"},
+ {file = "PyYAML-5.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696"},
+ {file = "PyYAML-5.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77"},
+ {file = "PyYAML-5.4.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183"},
+ {file = "PyYAML-5.4.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122"},
+ {file = "PyYAML-5.4.1-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6"},
+ {file = "PyYAML-5.4.1-cp39-cp39-win32.whl", hash = "sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10"},
+ {file = "PyYAML-5.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db"},
+ {file = "PyYAML-5.4.1.tar.gz", hash = "sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e"},
+]
+
+[[package]]
+name = "redo"
+version = "2.0.3"
+description = "Utilities to retry Python callables."
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "redo-2.0.3-py2.py3-none-any.whl", hash = "sha256:36784bf8ae766e14f9db0e377ccfa02835d648321d2007b6ae0bf4fd612c0f94"},
+ {file = "redo-2.0.3.tar.gz", hash = "sha256:71161cb0e928d824092a5f16203939bbc0867ce4c4685db263cf22c3ae7634a8"},
+]
+
+[[package]]
+name = "requests"
+version = "2.25.1"
+description = "Python HTTP for Humans."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
+files = [
+ {file = "requests-2.25.1-py2.py3-none-any.whl", hash = "sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e"},
+ {file = "requests-2.25.1.tar.gz", hash = "sha256:27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804"},
+]
+
+[package.dependencies]
+certifi = ">=2017.4.17"
+chardet = ">=3.0.2,<5"
+idna = ">=2.5,<3"
+urllib3 = ">=1.21.1,<1.27"
+
+[package.extras]
+security = ["cryptography (>=1.3.4)", "pyOpenSSL (>=0.14)"]
+socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"]
+
+[[package]]
+name = "requests-unixsocket"
+version = "0.2.0"
+description = "Use requests to talk HTTP via a UNIX domain socket"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "requests-unixsocket-0.2.0.tar.gz", hash = "sha256:9e5c1a20afc3cf786197ae59c79bcdb0e7565f218f27df5f891307ee8817c1ea"},
+ {file = "requests_unixsocket-0.2.0-py2.py3-none-any.whl", hash = "sha256:014d07bfb66dc805a011a8b4b306cf4ec96d2eddb589f6b2b5765e626f0dc0cc"},
+]
+
+[package.dependencies]
+requests = ">=1.1"
+urllib3 = ">=1.8"
+
+[[package]]
+name = "responses"
+version = "0.10.6"
+description = "A utility library for mocking out the `requests` Python library."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "responses-0.10.6-py2.py3-none-any.whl", hash = "sha256:97193c0183d63fba8cd3a041c75464e4b09ea0aff6328800d1546598567dde0b"},
+ {file = "responses-0.10.6.tar.gz", hash = "sha256:502d9c0c8008439cfcdef7e251f507fcfdd503b56e8c0c87c3c3e3393953f790"},
+]
+
+[package.dependencies]
+requests = ">=2.0"
+six = "*"
+
+[package.extras]
+tests = ["coverage (>=3.7.1,<5.0.0)", "flake8", "pytest", "pytest-cov", "pytest-localserver"]
+
+[[package]]
+name = "rsa"
+version = "3.1.4"
+description = "Pure-Python RSA implementation"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "rsa-3.1.4.tar.gz", hash = "sha256:e2b0b05936c276b1edd2e1525553233b666df9e29b5c3ba223eed738277c82a0"},
+]
+
+[package.dependencies]
+pyasn1 = ">=0.1.3"
+
+[[package]]
+name = "sentry-sdk"
+version = "0.14.3"
+description = "Python client for Sentry (https://getsentry.com)"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "sentry-sdk-0.14.3.tar.gz", hash = "sha256:bb90a4e19c7233a580715fc986cc44be2c48fc10b31e71580a2037e1c94b6950"},
+ {file = "sentry_sdk-0.14.3-py2.py3-none-any.whl", hash = "sha256:23808d571d2461a4ce3784ec12bbee5bdb8c026c143fe79d36cef8a6d653e71f"},
+]
+
+[package.dependencies]
+certifi = "*"
+urllib3 = ">=1.10.0"
+
+[package.extras]
+aiohttp = ["aiohttp (>=3.5)"]
+beam = ["beam (>=2.12)"]
+bottle = ["bottle (>=0.12.13)"]
+celery = ["celery (>=3)"]
+django = ["django (>=1.8)"]
+falcon = ["falcon (>=1.4)"]
+flask = ["blinker (>=1.1)", "flask (>=0.11)"]
+pyspark = ["pyspark (>=2.4.4)"]
+sanic = ["sanic (>=0.8)"]
+sqlalchemy = ["sqlalchemy (>=1.2)"]
+tornado = ["tornado (>=5)"]
+
+[[package]]
+name = "setuptools"
+version = "51.2.0"
+description = "Easily download, build, install, upgrade, and uninstall Python packages"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "setuptools-51.2.0-py3-none-any.whl", hash = "sha256:56948bf25c682e166cf2bfe7c1ad63e5745849b50d1ae7b0f8bff5decdcf34f2"},
+ {file = "setuptools-51.2.0.tar.gz", hash = "sha256:7ef59b1790b3491f8d321f531eccc11517a07a4d7637e498465cd834d80d4c2c"},
+]
+
+[package.extras]
+certs = ["certifi (==2016.9.26)"]
+docs = ["jaraco.packaging (>=8.2)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx"]
+ssl = ["wincertstore (==0.2)"]
+testing = ["flake8-2020", "jaraco.envs", "jaraco.test (>=3.2.0)", "mock", "paver", "pip (>=19.1)", "pytest (>=3.5,!=3.7.3)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=1.2.3)", "pytest-cov", "pytest-flake8", "pytest-mypy", "pytest-virtualenv (>=1.2.7)", "virtualenv (>=13.0.0)", "wheel"]
+
+[[package]]
+name = "six"
+version = "1.13.0"
+description = "Python 2 and 3 compatibility utilities"
+category = "main"
+optional = false
+python-versions = ">=2.6, !=3.0.*, !=3.1.*"
+files = [
+ {file = "six-1.13.0-py2.py3-none-any.whl", hash = "sha256:1f1b7d42e254082a9db6279deae68afb421ceba6158efa6131de7b3003ee93fd"},
+ {file = "six-1.13.0.tar.gz", hash = "sha256:30f610279e8b2578cab6db20741130331735c781b56053c59c4076da27f06b66"},
+]
+
+[[package]]
+name = "slugid"
+version = "2.0.0"
+description = "Base64 encoded uuid v4 slugs"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "slugid-2.0.0-py2.py3-none-any.whl", hash = "sha256:aec8b0e01c4ad32e38e12d609eab3ec912fd129aaf6b2ded0199b56a5f8fd67c"},
+ {file = "slugid-2.0.0.tar.gz", hash = "sha256:a950d98b72691178bdd4d6c52743c4a2aa039207cf7a97d71060a111ff9ba297"},
+]
+
+[[package]]
+name = "taskcluster"
+version = "44.2.2"
+description = "Python client for Taskcluster"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "taskcluster-44.2.2-py2-none-any.whl", hash = "sha256:c1b0e82be25b1ed17e07c90b24a382634b2bfce273fdf2682d94568abe10716c"},
+ {file = "taskcluster-44.2.2-py3-none-any.whl", hash = "sha256:846d73c597f0f47dd8525c85c8d9bc41111d5200b090690d3f16b2f57c56a2e1"},
+ {file = "taskcluster-44.2.2.tar.gz", hash = "sha256:0266a6a901e1a2ec838984a7f24e7adb6d58f9f2e221a7f613388f8f23f786fc"},
+]
+
+[package.dependencies]
+aiohttp = {version = ">=3.7.4", markers = "python_version >= \"3.6\""}
+async-timeout = {version = ">=2.0.0", markers = "python_version >= \"3.6\""}
+mohawk = ">=0.3.4"
+requests = ">=2.4.3"
+six = ">=1.10.0"
+slugid = ">=2"
+taskcluster-urls = ">=12.1.0"
+
+[package.extras]
+test = ["aiofiles", "coverage", "flake8", "httmock", "httptest", "hypothesis", "mock", "psutil", "pytest", "pytest-asyncio", "pytest-cov", "pytest-mock", "python-dateutil", "setuptools-lint", "subprocess32", "tox"]
+
+[[package]]
+name = "taskcluster-taskgraph"
+version = "3.5.2"
+description = "Build taskcluster taskgraphs"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "taskcluster-taskgraph-3.5.2.tar.gz", hash = "sha256:62f1a320d6b310f65151904a9992719a9b2c4c41ef8f57be810899fd3c5d2703"},
+ {file = "taskcluster_taskgraph-3.5.2-py3-none-any.whl", hash = "sha256:6a024ba2383f56e11b764500f92837afb825612a49d24bde9791dfa7aa7ddaec"},
+]
+
+[package.dependencies]
+appdirs = ">=1.4"
+attrs = ">=19.1.0"
+json-e = ">=2.7"
+mozilla-repo-urls = "*"
+PyYAML = ">=5.4"
+redo = ">=2.0"
+requests = ">=2.25"
+requests-unixsocket = ">=0.2"
+slugid = ">=2.0"
+taskcluster-urls = ">=11.0"
+voluptuous = ">=0.12.1"
+
+[package.extras]
+load-image = ["zstandard"]
+
+[[package]]
+name = "taskcluster-urls"
+version = "13.0.1"
+description = "Standardized url generator for taskcluster resources."
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "taskcluster-urls-13.0.1.tar.gz", hash = "sha256:b25e122ecec249c4299ac7b20b08db76e3e2025bdaeb699a9d444556de5fd367"},
+ {file = "taskcluster_urls-13.0.1-py2-none-any.whl", hash = "sha256:5e25e7e6818e8877178b175ff43d2e6548afad72694aa125f404a7329ece0973"},
+ {file = "taskcluster_urls-13.0.1-py3-none-any.whl", hash = "sha256:f66dcbd6572a6216ab65949f0fa0b91f2df647918028436c384e6af5cd12ae2b"},
+]
+
+[[package]]
+name = "toml"
+version = "0.10.2"
+description = "Python Library for Tom's Obvious, Minimal Language"
+category = "main"
+optional = false
+python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
+files = [
+ {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"},
+ {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"},
+]
+
+[[package]]
+name = "tqdm"
+version = "4.62.3"
+description = "Fast, Extensible Progress Meter"
+category = "main"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
+files = [
+ {file = "tqdm-4.62.3-py2.py3-none-any.whl", hash = "sha256:8dd278a422499cd6b727e6ae4061c40b48fce8b76d1ccbf5d34fca9b7f925b0c"},
+ {file = "tqdm-4.62.3.tar.gz", hash = "sha256:d359de7217506c9851b7869f3708d8ee53ed70a1b8edbba4dbcb47442592920d"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[package.extras]
+dev = ["py-make (>=0.1.0)", "twine", "wheel"]
+notebook = ["ipywidgets (>=6)"]
+telegram = ["requests"]
+
+[[package]]
+name = "typing-extensions"
+version = "3.10.0.0"
+description = "Backported and Experimental Type Hints for Python 3.5+"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "typing_extensions-3.10.0.0-py2-none-any.whl", hash = "sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497"},
+ {file = "typing_extensions-3.10.0.0-py3-none-any.whl", hash = "sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84"},
+ {file = "typing_extensions-3.10.0.0.tar.gz", hash = "sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342"},
+]
+
+[[package]]
+name = "urllib3"
+version = "1.26.0"
+description = "HTTP library with thread-safe connection pooling, file post, and more."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4"
+files = [
+ {file = "urllib3-1.26.0-py2.py3-none-any.whl", hash = "sha256:bad31cb622ceee0ab46c4c884cf61957def0ff2e644de0a7a093678844c9ccac"},
+ {file = "urllib3-1.26.0.tar.gz", hash = "sha256:4849f132941d68144df0a3785ccc4fe423430ba5db0108d045c8cadbc90f517a"},
+]
+
+[package.extras]
+brotli = ["brotlipy (>=0.6.0)"]
+secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)"]
+socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
+
+[[package]]
+name = "voluptuous"
+version = "0.12.1"
+description = ""
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "voluptuous-0.12.1-py3-none-any.whl", hash = "sha256:8ace33fcf9e6b1f59406bfaf6b8ec7bcc44266a9f29080b4deb4fe6ff2492386"},
+ {file = "voluptuous-0.12.1.tar.gz", hash = "sha256:663572419281ddfaf4b4197fd4942d181630120fb39b333e3adad70aeb56444b"},
+]
+
+[[package]]
+name = "wcwidth"
+version = "0.2.5"
+description = "Measures the displayed width of unicode strings in a terminal"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "wcwidth-0.2.5-py2.py3-none-any.whl", hash = "sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784"},
+ {file = "wcwidth-0.2.5.tar.gz", hash = "sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83"},
+]
+
+[[package]]
+name = "wheel"
+version = "0.37.0"
+description = "A built-package format for Python"
+category = "main"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
+files = [
+ {file = "wheel-0.37.0-py2.py3-none-any.whl", hash = "sha256:21014b2bd93c6d0034b6ba5d35e4eb284340e09d63c59aef6fc14b0f346146fd"},
+ {file = "wheel-0.37.0.tar.gz", hash = "sha256:e2ef7239991699e3355d54f8e968a21bb940a1dbf34a4d226741e64462516fad"},
+]
+
+[package.extras]
+test = ["pytest (>=3.0.0)", "pytest-cov"]
+
+[[package]]
+name = "yamllint"
+version = "1.23.0"
+description = "A linter for YAML files."
+category = "main"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "yamllint-1.23.0-py2.py3-none-any.whl", hash = "sha256:0fa69bf8a86182b7fe14918bdd3a30354c869966bbc7cbfff176af71bda9c806"},
+ {file = "yamllint-1.23.0.tar.gz", hash = "sha256:59f3ff77f44e7f46be6aecdb985830f73a1c51e290b7082a7d38c2ae1940f4a9"},
+]
+
+[package.dependencies]
+pathspec = ">=0.5.3"
+pyyaml = "*"
+
+[[package]]
+name = "yarl"
+version = "1.6.3"
+description = "Yet another URL library"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "yarl-1.6.3-cp36-cp36m-macosx_10_14_x86_64.whl", hash = "sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434"},
+ {file = "yarl-1.6.3-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478"},
+ {file = "yarl-1.6.3-cp36-cp36m-manylinux2014_aarch64.whl", hash = "sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6"},
+ {file = "yarl-1.6.3-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e"},
+ {file = "yarl-1.6.3-cp36-cp36m-manylinux2014_ppc64le.whl", hash = "sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406"},
+ {file = "yarl-1.6.3-cp36-cp36m-manylinux2014_s390x.whl", hash = "sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76"},
+ {file = "yarl-1.6.3-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366"},
+ {file = "yarl-1.6.3-cp36-cp36m-win32.whl", hash = "sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721"},
+ {file = "yarl-1.6.3-cp36-cp36m-win_amd64.whl", hash = "sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643"},
+ {file = "yarl-1.6.3-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e"},
+ {file = "yarl-1.6.3-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3"},
+ {file = "yarl-1.6.3-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8"},
+ {file = "yarl-1.6.3-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a"},
+ {file = "yarl-1.6.3-cp37-cp37m-manylinux2014_ppc64le.whl", hash = "sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c"},
+ {file = "yarl-1.6.3-cp37-cp37m-manylinux2014_s390x.whl", hash = "sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f"},
+ {file = "yarl-1.6.3-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970"},
+ {file = "yarl-1.6.3-cp37-cp37m-win32.whl", hash = "sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e"},
+ {file = "yarl-1.6.3-cp37-cp37m-win_amd64.whl", hash = "sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50"},
+ {file = "yarl-1.6.3-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2"},
+ {file = "yarl-1.6.3-cp38-cp38-manylinux1_i686.whl", hash = "sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec"},
+ {file = "yarl-1.6.3-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71"},
+ {file = "yarl-1.6.3-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc"},
+ {file = "yarl-1.6.3-cp38-cp38-manylinux2014_ppc64le.whl", hash = "sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959"},
+ {file = "yarl-1.6.3-cp38-cp38-manylinux2014_s390x.whl", hash = "sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2"},
+ {file = "yarl-1.6.3-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2"},
+ {file = "yarl-1.6.3-cp38-cp38-win32.whl", hash = "sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896"},
+ {file = "yarl-1.6.3-cp38-cp38-win_amd64.whl", hash = "sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a"},
+ {file = "yarl-1.6.3-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e"},
+ {file = "yarl-1.6.3-cp39-cp39-manylinux1_i686.whl", hash = "sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724"},
+ {file = "yarl-1.6.3-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c"},
+ {file = "yarl-1.6.3-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25"},
+ {file = "yarl-1.6.3-cp39-cp39-manylinux2014_ppc64le.whl", hash = "sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96"},
+ {file = "yarl-1.6.3-cp39-cp39-manylinux2014_s390x.whl", hash = "sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0"},
+ {file = "yarl-1.6.3-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4"},
+ {file = "yarl-1.6.3-cp39-cp39-win32.whl", hash = "sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424"},
+ {file = "yarl-1.6.3-cp39-cp39-win_amd64.whl", hash = "sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6"},
+ {file = "yarl-1.6.3.tar.gz", hash = "sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10"},
+]
+
+[package.dependencies]
+idna = ">=2.0"
+multidict = ">=4.0"
+typing-extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""}
+
+[[package]]
+name = "zipp"
+version = "3.4.1"
+description = "Backport of pathlib-compatible object wrapper for zip files"
+category = "main"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "zipp-3.4.1-py3-none-any.whl", hash = "sha256:51cb66cc54621609dd593d1787f286ee42a5c0adbb4b29abea5a63edc3e03098"},
+ {file = "zipp-3.4.1.tar.gz", hash = "sha256:3607921face881ba3e026887d8150cca609d517579abe052ac81fc5aeffdbd76"},
+]
+
+[package.extras]
+docs = ["jaraco.packaging (>=8.2)", "rst.linker (>=1.9)", "sphinx"]
+testing = ["func-timeout", "jaraco.itertools", "pytest (>=4.6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=1.2.3)", "pytest-cov", "pytest-enabler", "pytest-flake8", "pytest-mypy"]
+
+[metadata]
+lock-version = "2.0"
+python-versions = "^3.7"
+content-hash = "73400b896922dda273e939258f76bbb8d752ec811ce33fd56ec735516b932c2b"
diff --git a/third_party/python/pyasn1/pyasn1-0.4.8.dist-info/LICENSE.rst b/third_party/python/pyasn1/pyasn1-0.4.8.dist-info/LICENSE.rst
new file mode 100644
index 0000000000..ac630e821c
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1-0.4.8.dist-info/LICENSE.rst
@@ -0,0 +1,24 @@
+Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/third_party/python/pyasn1/pyasn1-0.4.8.dist-info/METADATA b/third_party/python/pyasn1/pyasn1-0.4.8.dist-info/METADATA
new file mode 100644
index 0000000000..d68429de7e
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1-0.4.8.dist-info/METADATA
@@ -0,0 +1,38 @@
+Metadata-Version: 2.1
+Name: pyasn1
+Version: 0.4.8
+Summary: ASN.1 types and codecs
+Home-page: https://github.com/etingof/pyasn1
+Author: Ilya Etingof
+Author-email: etingof@gmail.com
+Maintainer: Ilya Etingof <etingof@gmail.com>
+License: BSD
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Education
+Classifier: Intended Audience :: Information Technology
+Classifier: Intended Audience :: System Administrators
+Classifier: Intended Audience :: Telecommunications Industry
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Natural Language :: English
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.4
+Classifier: Programming Language :: Python :: 2.5
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.2
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Topic :: Communications
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+
+Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)
+
+
diff --git a/third_party/python/pyasn1/pyasn1-0.4.8.dist-info/RECORD b/third_party/python/pyasn1/pyasn1-0.4.8.dist-info/RECORD
new file mode 100644
index 0000000000..54b2de2e86
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1-0.4.8.dist-info/RECORD
@@ -0,0 +1,42 @@
+pyasn1/__init__.py,sha256=1Rn8wrJioqfDz7ORFwMehoT15xHOVeiiQD5pZW37D8s,175
+pyasn1/debug.py,sha256=HWGbLlEPLoCNyHqBd1Vd_KK91TppEn3CA4YgUxktT2k,3726
+pyasn1/error.py,sha256=DIn2FWY3ACYNbk_42b3ny2bevkehpK2lOqfAsfdkvBE,2257
+pyasn1/codec/__init__.py,sha256=EEDlJYS172EH39GUidN_8FbkNcWY9OVV8e30AV58pn0,59
+pyasn1/codec/ber/__init__.py,sha256=EEDlJYS172EH39GUidN_8FbkNcWY9OVV8e30AV58pn0,59
+pyasn1/codec/ber/decoder.py,sha256=7-WINr38zVEa3KUkmshh8FjK6QnFaA8Y7j7XaTgYfRk,59708
+pyasn1/codec/ber/encoder.py,sha256=xHl01PCIAiHZXev4x01sjbCgAUKcsTT6SzaLI3nt-9E,27741
+pyasn1/codec/ber/eoo.py,sha256=eZ6lEyHdayMcMmNqtceDIyzf7u5lOeZoRK-WEUxVThI,626
+pyasn1/codec/cer/__init__.py,sha256=EEDlJYS172EH39GUidN_8FbkNcWY9OVV8e30AV58pn0,59
+pyasn1/codec/cer/decoder.py,sha256=ZYBqtDGNiYmKDpKDvioMDf-TYVWoJeZY3I8TEAKuk5s,3745
+pyasn1/codec/cer/encoder.py,sha256=PGtzcIelIHj5d5Yqc5FATMEIWCJybQYFlCaK1gy-NIA,9409
+pyasn1/codec/der/__init__.py,sha256=EEDlJYS172EH39GUidN_8FbkNcWY9OVV8e30AV58pn0,59
+pyasn1/codec/der/decoder.py,sha256=kinXcogMDPGlR3f7hmAxRv2YbQyeP-UhuKM0r8gkbeA,2722
+pyasn1/codec/der/encoder.py,sha256=ZfRRxSCefQyLg0DLNb4zllaYf5_AWGIv3SPzB83Ln2I,3073
+pyasn1/codec/native/__init__.py,sha256=EEDlJYS172EH39GUidN_8FbkNcWY9OVV8e30AV58pn0,59
+pyasn1/codec/native/decoder.py,sha256=4Q29tdKyytK3Oz-m94MSWxxPi_GhcBKvUfvPNKQcL0Y,7671
+pyasn1/codec/native/encoder.py,sha256=0eMLWR49dwMA1X4si0XswR1kX1aDAWyCeUNTpEbChag,8002
+pyasn1/compat/__init__.py,sha256=EEDlJYS172EH39GUidN_8FbkNcWY9OVV8e30AV58pn0,59
+pyasn1/compat/binary.py,sha256=mgWqHmr_SMEdB2WVVr6jyYMnodSbPP6IByE5qKccWLM,698
+pyasn1/compat/calling.py,sha256=uTk3nJtGrElqJi8t34SoO8-eWFBG0gwNhXrlo1YmFEE,379
+pyasn1/compat/dateandtime.py,sha256=zHvXXBp4t3XJ6teg_tz6qgNDevzd93qnrLoEbNxZQ_E,482
+pyasn1/compat/integer.py,sha256=k6tqyxXMC0zJoU-Rz4oUPPoUpTmWXE6Prnzu0tkmmks,2988
+pyasn1/compat/octets.py,sha256=ICe-DVLBIOHmNSz-sp3ioMh--smodJ4VW3Ju0ogJMWA,1359
+pyasn1/compat/string.py,sha256=exqXJmPM6vYj4MjzsjciQdpUcJprRdgrLma8I4UcYHA,505
+pyasn1/type/__init__.py,sha256=EEDlJYS172EH39GUidN_8FbkNcWY9OVV8e30AV58pn0,59
+pyasn1/type/base.py,sha256=TX7qdOX3EPiY7-11MY4fwK2Hy6nQsrdQ_M41aUcApno,22386
+pyasn1/type/char.py,sha256=5HH8r1IqZMDCsfDlQHVCRphLlFuZ93bE2NW78CgeUTI,11397
+pyasn1/type/constraint.py,sha256=0Qsth_0JctnDMvOSe5R-vd9IosgjqkKZT_X9lBRXtuI,22132
+pyasn1/type/error.py,sha256=4_BHdjX-AL5WMTpU-tX1Nfo_P88c2z1sDvqPU-S9Bns,246
+pyasn1/type/namedtype.py,sha256=VIL3H3oPgA0zNrDSeAhKmi4CZGTb69uDBVNJzzRk3wM,16368
+pyasn1/type/namedval.py,sha256=dXYWiVTihvBy4RiebGY3AlIXsJvW78mJ1L7JSw-H7Qw,4886
+pyasn1/type/opentype.py,sha256=pUpnPqv8o4AFeIsmGHDTFfuxXAq7FvG3hrTEnoAgBO8,2848
+pyasn1/type/tag.py,sha256=nAK54C0_F_DL4_IaWRthIfIYBOTuXZoVVcbcbqgZiVA,9486
+pyasn1/type/tagmap.py,sha256=2bwm0hqxG2gvXYheOI_iasfl2Z_B93qU7y39EHteUvs,2998
+pyasn1/type/univ.py,sha256=FXc_VOStZfC-xIVTznpFO0qTq1aO4XyJFU0ayQWgPMY,108921
+pyasn1/type/useful.py,sha256=r_K6UhgcrJ0ej658X-s9522I9T7oYVdmEKcbXTkZMds,5368
+pyasn1-0.4.8.dist-info/LICENSE.rst,sha256=IsXMaSKrXWn7oy2MXuTN0UmBUIy1OvwOvYVZOEf9laU,1334
+pyasn1-0.4.8.dist-info/METADATA,sha256=Mx_DbLo2GA_t9nOIsqu-18vjHdTjMR1LtUzdcfLzE0Y,1521
+pyasn1-0.4.8.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110
+pyasn1-0.4.8.dist-info/top_level.txt,sha256=dnNEQt3nIDIO5mSCCOB5obQHrjDOUsRycdBujc2vrWE,7
+pyasn1-0.4.8.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
+pyasn1-0.4.8.dist-info/RECORD,,
diff --git a/third_party/python/pyasn1/pyasn1-0.4.8.dist-info/WHEEL b/third_party/python/pyasn1/pyasn1-0.4.8.dist-info/WHEEL
new file mode 100644
index 0000000000..8b701e93c2
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1-0.4.8.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.6)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/pyasn1/pyasn1-0.4.8.dist-info/top_level.txt b/third_party/python/pyasn1/pyasn1-0.4.8.dist-info/top_level.txt
new file mode 100644
index 0000000000..38fe414575
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1-0.4.8.dist-info/top_level.txt
@@ -0,0 +1 @@
+pyasn1
diff --git a/third_party/python/pyasn1/pyasn1-0.4.8.dist-info/zip-safe b/third_party/python/pyasn1/pyasn1-0.4.8.dist-info/zip-safe
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1-0.4.8.dist-info/zip-safe
@@ -0,0 +1 @@
+
diff --git a/third_party/python/pyasn1/pyasn1/__init__.py b/third_party/python/pyasn1/pyasn1/__init__.py
new file mode 100644
index 0000000000..5a56a707c8
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/__init__.py
@@ -0,0 +1,7 @@
+import sys
+
+# https://www.python.org/dev/peps/pep-0396/
+__version__ = '0.4.8'
+
+if sys.version_info[:2] < (2, 4):
+ raise RuntimeError('PyASN1 requires Python 2.4 or later')
diff --git a/third_party/python/pyasn1/pyasn1/codec/__init__.py b/third_party/python/pyasn1/pyasn1/codec/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/codec/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/third_party/python/pyasn1/pyasn1/codec/ber/__init__.py b/third_party/python/pyasn1/pyasn1/codec/ber/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/codec/ber/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/third_party/python/pyasn1/pyasn1/codec/ber/decoder.py b/third_party/python/pyasn1/pyasn1/codec/ber/decoder.py
new file mode 100644
index 0000000000..5ff485fbeb
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/codec/ber/decoder.py
@@ -0,0 +1,1682 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+from pyasn1 import debug
+from pyasn1 import error
+from pyasn1.codec.ber import eoo
+from pyasn1.compat.integer import from_bytes
+from pyasn1.compat.octets import oct2int, octs2ints, ints2octs, null
+from pyasn1.type import base
+from pyasn1.type import char
+from pyasn1.type import tag
+from pyasn1.type import tagmap
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+__all__ = ['decode']
+
+LOG = debug.registerLoggee(__name__, flags=debug.DEBUG_DECODER)
+
+noValue = base.noValue
+
+
+class AbstractDecoder(object):
+ protoComponent = None
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ raise error.PyAsn1Error('Decoder not implemented for %s' % (tagSet,))
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ raise error.PyAsn1Error('Indefinite length mode decoder not implemented for %s' % (tagSet,))
+
+
+class AbstractSimpleDecoder(AbstractDecoder):
+ @staticmethod
+ def substrateCollector(asn1Object, substrate, length):
+ return substrate[:length], substrate[length:]
+
+ def _createComponent(self, asn1Spec, tagSet, value, **options):
+ if options.get('native'):
+ return value
+ elif asn1Spec is None:
+ return self.protoComponent.clone(value, tagSet=tagSet)
+ elif value is noValue:
+ return asn1Spec
+ else:
+ return asn1Spec.clone(value)
+
+
+class ExplicitTagDecoder(AbstractSimpleDecoder):
+ protoComponent = univ.Any('')
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if substrateFun:
+ return substrateFun(
+ self._createComponent(asn1Spec, tagSet, '', **options),
+ substrate, length
+ )
+
+ head, tail = substrate[:length], substrate[length:]
+
+ value, _ = decodeFun(head, asn1Spec, tagSet, length, **options)
+
+ if LOG:
+ LOG('explicit tag container carries %d octets of trailing payload '
+ '(will be lost!): %s' % (len(_), debug.hexdump(_)))
+
+ return value, tail
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if substrateFun:
+ return substrateFun(
+ self._createComponent(asn1Spec, tagSet, '', **options),
+ substrate, length
+ )
+
+ value, substrate = decodeFun(substrate, asn1Spec, tagSet, length, **options)
+
+ eooMarker, substrate = decodeFun(substrate, allowEoo=True, **options)
+
+ if eooMarker is eoo.endOfOctets:
+ return value, substrate
+ else:
+ raise error.PyAsn1Error('Missing end-of-octets terminator')
+
+
+explicitTagDecoder = ExplicitTagDecoder()
+
+
+class IntegerDecoder(AbstractSimpleDecoder):
+ protoComponent = univ.Integer(0)
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+
+ if tagSet[0].tagFormat != tag.tagFormatSimple:
+ raise error.PyAsn1Error('Simple tag format expected')
+
+ head, tail = substrate[:length], substrate[length:]
+
+ if not head:
+ return self._createComponent(asn1Spec, tagSet, 0, **options), tail
+
+ value = from_bytes(head, signed=True)
+
+ return self._createComponent(asn1Spec, tagSet, value, **options), tail
+
+
+class BooleanDecoder(IntegerDecoder):
+ protoComponent = univ.Boolean(0)
+
+ def _createComponent(self, asn1Spec, tagSet, value, **options):
+ return IntegerDecoder._createComponent(
+ self, asn1Spec, tagSet, value and 1 or 0, **options)
+
+
+class BitStringDecoder(AbstractSimpleDecoder):
+ protoComponent = univ.BitString(())
+ supportConstructedForm = True
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ head, tail = substrate[:length], substrate[length:]
+
+ if substrateFun:
+ return substrateFun(self._createComponent(
+ asn1Spec, tagSet, noValue, **options), substrate, length)
+
+ if not head:
+ raise error.PyAsn1Error('Empty BIT STRING substrate')
+
+ if tagSet[0].tagFormat == tag.tagFormatSimple: # XXX what tag to check?
+
+ trailingBits = oct2int(head[0])
+ if trailingBits > 7:
+ raise error.PyAsn1Error(
+ 'Trailing bits overflow %s' % trailingBits
+ )
+
+ value = self.protoComponent.fromOctetString(
+ head[1:], internalFormat=True, padding=trailingBits)
+
+ return self._createComponent(asn1Spec, tagSet, value, **options), tail
+
+ if not self.supportConstructedForm:
+ raise error.PyAsn1Error('Constructed encoding form prohibited '
+ 'at %s' % self.__class__.__name__)
+
+ if LOG:
+ LOG('assembling constructed serialization')
+
+ # All inner fragments are of the same type, treat them as octet string
+ substrateFun = self.substrateCollector
+
+ bitString = self.protoComponent.fromOctetString(null, internalFormat=True)
+
+ while head:
+ component, head = decodeFun(head, self.protoComponent,
+ substrateFun=substrateFun, **options)
+
+ trailingBits = oct2int(component[0])
+ if trailingBits > 7:
+ raise error.PyAsn1Error(
+ 'Trailing bits overflow %s' % trailingBits
+ )
+
+ bitString = self.protoComponent.fromOctetString(
+ component[1:], internalFormat=True,
+ prepend=bitString, padding=trailingBits
+ )
+
+ return self._createComponent(asn1Spec, tagSet, bitString, **options), tail
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+
+ if substrateFun:
+ return substrateFun(self._createComponent(asn1Spec, tagSet, noValue, **options), substrate, length)
+
+ # All inner fragments are of the same type, treat them as octet string
+ substrateFun = self.substrateCollector
+
+ bitString = self.protoComponent.fromOctetString(null, internalFormat=True)
+
+ while substrate:
+ component, substrate = decodeFun(substrate, self.protoComponent,
+ substrateFun=substrateFun,
+ allowEoo=True, **options)
+ if component is eoo.endOfOctets:
+ break
+
+ trailingBits = oct2int(component[0])
+ if trailingBits > 7:
+ raise error.PyAsn1Error(
+ 'Trailing bits overflow %s' % trailingBits
+ )
+
+ bitString = self.protoComponent.fromOctetString(
+ component[1:], internalFormat=True,
+ prepend=bitString, padding=trailingBits
+ )
+
+ else:
+ raise error.SubstrateUnderrunError('No EOO seen before substrate ends')
+
+ return self._createComponent(asn1Spec, tagSet, bitString, **options), substrate
+
+
+class OctetStringDecoder(AbstractSimpleDecoder):
+ protoComponent = univ.OctetString('')
+ supportConstructedForm = True
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ head, tail = substrate[:length], substrate[length:]
+
+ if substrateFun:
+ return substrateFun(self._createComponent(asn1Spec, tagSet, noValue, **options),
+ substrate, length)
+
+ if tagSet[0].tagFormat == tag.tagFormatSimple: # XXX what tag to check?
+ return self._createComponent(asn1Spec, tagSet, head, **options), tail
+
+ if not self.supportConstructedForm:
+ raise error.PyAsn1Error('Constructed encoding form prohibited at %s' % self.__class__.__name__)
+
+ if LOG:
+ LOG('assembling constructed serialization')
+
+ # All inner fragments are of the same type, treat them as octet string
+ substrateFun = self.substrateCollector
+
+ header = null
+
+ while head:
+ component, head = decodeFun(head, self.protoComponent,
+ substrateFun=substrateFun,
+ **options)
+ header += component
+
+ return self._createComponent(asn1Spec, tagSet, header, **options), tail
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if substrateFun and substrateFun is not self.substrateCollector:
+ asn1Object = self._createComponent(asn1Spec, tagSet, noValue, **options)
+ return substrateFun(asn1Object, substrate, length)
+
+ # All inner fragments are of the same type, treat them as octet string
+ substrateFun = self.substrateCollector
+
+ header = null
+
+ while substrate:
+ component, substrate = decodeFun(substrate,
+ self.protoComponent,
+ substrateFun=substrateFun,
+ allowEoo=True, **options)
+ if component is eoo.endOfOctets:
+ break
+
+ header += component
+
+ else:
+ raise error.SubstrateUnderrunError(
+ 'No EOO seen before substrate ends'
+ )
+
+ return self._createComponent(asn1Spec, tagSet, header, **options), substrate
+
+
+class NullDecoder(AbstractSimpleDecoder):
+ protoComponent = univ.Null('')
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+
+ if tagSet[0].tagFormat != tag.tagFormatSimple:
+ raise error.PyAsn1Error('Simple tag format expected')
+
+ head, tail = substrate[:length], substrate[length:]
+
+ component = self._createComponent(asn1Spec, tagSet, '', **options)
+
+ if head:
+ raise error.PyAsn1Error('Unexpected %d-octet substrate for Null' % length)
+
+ return component, tail
+
+
+class ObjectIdentifierDecoder(AbstractSimpleDecoder):
+ protoComponent = univ.ObjectIdentifier(())
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if tagSet[0].tagFormat != tag.tagFormatSimple:
+ raise error.PyAsn1Error('Simple tag format expected')
+
+ head, tail = substrate[:length], substrate[length:]
+ if not head:
+ raise error.PyAsn1Error('Empty substrate')
+
+ head = octs2ints(head)
+
+ oid = ()
+ index = 0
+ substrateLen = len(head)
+ while index < substrateLen:
+ subId = head[index]
+ index += 1
+ if subId < 128:
+ oid += (subId,)
+ elif subId > 128:
+ # Construct subid from a number of octets
+ nextSubId = subId
+ subId = 0
+ while nextSubId >= 128:
+ subId = (subId << 7) + (nextSubId & 0x7F)
+ if index >= substrateLen:
+ raise error.SubstrateUnderrunError(
+ 'Short substrate for sub-OID past %s' % (oid,)
+ )
+ nextSubId = head[index]
+ index += 1
+ oid += ((subId << 7) + nextSubId,)
+ elif subId == 128:
+ # ASN.1 spec forbids leading zeros (0x80) in OID
+ # encoding, tolerating it opens a vulnerability. See
+ # https://www.esat.kuleuven.be/cosic/publications/article-1432.pdf
+ # page 7
+ raise error.PyAsn1Error('Invalid octet 0x80 in OID encoding')
+
+ # Decode two leading arcs
+ if 0 <= oid[0] <= 39:
+ oid = (0,) + oid
+ elif 40 <= oid[0] <= 79:
+ oid = (1, oid[0] - 40) + oid[1:]
+ elif oid[0] >= 80:
+ oid = (2, oid[0] - 80) + oid[1:]
+ else:
+ raise error.PyAsn1Error('Malformed first OID octet: %s' % head[0])
+
+ return self._createComponent(asn1Spec, tagSet, oid, **options), tail
+
+
+class RealDecoder(AbstractSimpleDecoder):
+ protoComponent = univ.Real()
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if tagSet[0].tagFormat != tag.tagFormatSimple:
+ raise error.PyAsn1Error('Simple tag format expected')
+
+ head, tail = substrate[:length], substrate[length:]
+
+ if not head:
+ return self._createComponent(asn1Spec, tagSet, 0.0, **options), tail
+
+ fo = oct2int(head[0])
+ head = head[1:]
+ if fo & 0x80: # binary encoding
+ if not head:
+ raise error.PyAsn1Error("Incomplete floating-point value")
+
+ if LOG:
+ LOG('decoding binary encoded REAL')
+
+ n = (fo & 0x03) + 1
+
+ if n == 4:
+ n = oct2int(head[0])
+ head = head[1:]
+
+ eo, head = head[:n], head[n:]
+
+ if not eo or not head:
+ raise error.PyAsn1Error('Real exponent screwed')
+
+ e = oct2int(eo[0]) & 0x80 and -1 or 0
+
+ while eo: # exponent
+ e <<= 8
+ e |= oct2int(eo[0])
+ eo = eo[1:]
+
+ b = fo >> 4 & 0x03 # base bits
+
+ if b > 2:
+ raise error.PyAsn1Error('Illegal Real base')
+
+ if b == 1: # encbase = 8
+ e *= 3
+
+ elif b == 2: # encbase = 16
+ e *= 4
+ p = 0
+
+ while head: # value
+ p <<= 8
+ p |= oct2int(head[0])
+ head = head[1:]
+
+ if fo & 0x40: # sign bit
+ p = -p
+
+ sf = fo >> 2 & 0x03 # scale bits
+ p *= 2 ** sf
+ value = (p, 2, e)
+
+ elif fo & 0x40: # infinite value
+ if LOG:
+ LOG('decoding infinite REAL')
+
+ value = fo & 0x01 and '-inf' or 'inf'
+
+ elif fo & 0xc0 == 0: # character encoding
+ if not head:
+ raise error.PyAsn1Error("Incomplete floating-point value")
+
+ if LOG:
+ LOG('decoding character encoded REAL')
+
+ try:
+ if fo & 0x3 == 0x1: # NR1
+ value = (int(head), 10, 0)
+
+ elif fo & 0x3 == 0x2: # NR2
+ value = float(head)
+
+ elif fo & 0x3 == 0x3: # NR3
+ value = float(head)
+
+ else:
+ raise error.SubstrateUnderrunError(
+ 'Unknown NR (tag %s)' % fo
+ )
+
+ except ValueError:
+ raise error.SubstrateUnderrunError(
+ 'Bad character Real syntax'
+ )
+
+ else:
+ raise error.SubstrateUnderrunError(
+ 'Unknown encoding (tag %s)' % fo
+ )
+
+ return self._createComponent(asn1Spec, tagSet, value, **options), tail
+
+
+class AbstractConstructedDecoder(AbstractDecoder):
+ protoComponent = None
+
+
+class UniversalConstructedTypeDecoder(AbstractConstructedDecoder):
+ protoRecordComponent = None
+ protoSequenceComponent = None
+
+ def _getComponentTagMap(self, asn1Object, idx):
+ raise NotImplementedError()
+
+ def _getComponentPositionByType(self, asn1Object, tagSet, idx):
+ raise NotImplementedError()
+
+ def _decodeComponents(self, substrate, tagSet=None, decodeFun=None, **options):
+ components = []
+ componentTypes = set()
+
+ while substrate:
+ component, substrate = decodeFun(substrate, **options)
+ if component is eoo.endOfOctets:
+ break
+
+ components.append(component)
+ componentTypes.add(component.tagSet)
+
+ # Now we have to guess is it SEQUENCE/SET or SEQUENCE OF/SET OF
+ # The heuristics is:
+ # * 1+ components of different types -> likely SEQUENCE/SET
+ # * otherwise -> likely SEQUENCE OF/SET OF
+ if len(componentTypes) > 1:
+ protoComponent = self.protoRecordComponent
+
+ else:
+ protoComponent = self.protoSequenceComponent
+
+ asn1Object = protoComponent.clone(
+ # construct tagSet from base tag from prototype ASN.1 object
+ # and additional tags recovered from the substrate
+ tagSet=tag.TagSet(protoComponent.tagSet.baseTag, *tagSet.superTags)
+ )
+
+ if LOG:
+ LOG('guessed %r container type (pass `asn1Spec` to guide the '
+ 'decoder)' % asn1Object)
+
+ for idx, component in enumerate(components):
+ asn1Object.setComponentByPosition(
+ idx, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False
+ )
+
+ return asn1Object, substrate
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if tagSet[0].tagFormat != tag.tagFormatConstructed:
+ raise error.PyAsn1Error('Constructed tag format expected')
+
+ head, tail = substrate[:length], substrate[length:]
+
+ if substrateFun is not None:
+ if asn1Spec is not None:
+ asn1Object = asn1Spec.clone()
+
+ elif self.protoComponent is not None:
+ asn1Object = self.protoComponent.clone(tagSet=tagSet)
+
+ else:
+ asn1Object = self.protoRecordComponent, self.protoSequenceComponent
+
+ return substrateFun(asn1Object, substrate, length)
+
+ if asn1Spec is None:
+ asn1Object, trailing = self._decodeComponents(
+ head, tagSet=tagSet, decodeFun=decodeFun, **options
+ )
+
+ if trailing:
+ if LOG:
+ LOG('Unused trailing %d octets encountered: %s' % (
+ len(trailing), debug.hexdump(trailing)))
+
+ return asn1Object, tail
+
+ asn1Object = asn1Spec.clone()
+ asn1Object.clear()
+
+ if asn1Spec.typeId in (univ.Sequence.typeId, univ.Set.typeId):
+
+ namedTypes = asn1Spec.componentType
+
+ isSetType = asn1Spec.typeId == univ.Set.typeId
+ isDeterministic = not isSetType and not namedTypes.hasOptionalOrDefault
+
+ if LOG:
+ LOG('decoding %sdeterministic %s type %r chosen by type ID' % (
+ not isDeterministic and 'non-' or '', isSetType and 'SET' or '',
+ asn1Spec))
+
+ seenIndices = set()
+ idx = 0
+ while head:
+ if not namedTypes:
+ componentType = None
+
+ elif isSetType:
+ componentType = namedTypes.tagMapUnique
+
+ else:
+ try:
+ if isDeterministic:
+ componentType = namedTypes[idx].asn1Object
+
+ elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted:
+ componentType = namedTypes.getTagMapNearPosition(idx)
+
+ else:
+ componentType = namedTypes[idx].asn1Object
+
+ except IndexError:
+ raise error.PyAsn1Error(
+ 'Excessive components decoded at %r' % (asn1Spec,)
+ )
+
+ component, head = decodeFun(head, componentType, **options)
+
+ if not isDeterministic and namedTypes:
+ if isSetType:
+ idx = namedTypes.getPositionByType(component.effectiveTagSet)
+
+ elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted:
+ idx = namedTypes.getPositionNearType(component.effectiveTagSet, idx)
+
+ asn1Object.setComponentByPosition(
+ idx, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False
+ )
+
+ seenIndices.add(idx)
+ idx += 1
+
+ if LOG:
+ LOG('seen component indices %s' % seenIndices)
+
+ if namedTypes:
+ if not namedTypes.requiredComponents.issubset(seenIndices):
+ raise error.PyAsn1Error(
+ 'ASN.1 object %s has uninitialized '
+ 'components' % asn1Object.__class__.__name__)
+
+ if namedTypes.hasOpenTypes:
+
+ openTypes = options.get('openTypes', {})
+
+ if LOG:
+ LOG('user-specified open types map:')
+
+ for k, v in openTypes.items():
+ LOG('%s -> %r' % (k, v))
+
+ if openTypes or options.get('decodeOpenTypes', False):
+
+ for idx, namedType in enumerate(namedTypes.namedTypes):
+ if not namedType.openType:
+ continue
+
+ if namedType.isOptional and not asn1Object.getComponentByPosition(idx).isValue:
+ continue
+
+ governingValue = asn1Object.getComponentByName(
+ namedType.openType.name
+ )
+
+ try:
+ openType = openTypes[governingValue]
+
+ except KeyError:
+
+ if LOG:
+ LOG('default open types map of component '
+ '"%s.%s" governed by component "%s.%s"'
+ ':' % (asn1Object.__class__.__name__,
+ namedType.name,
+ asn1Object.__class__.__name__,
+ namedType.openType.name))
+
+ for k, v in namedType.openType.items():
+ LOG('%s -> %r' % (k, v))
+
+ try:
+ openType = namedType.openType[governingValue]
+
+ except KeyError:
+ if LOG:
+ LOG('failed to resolve open type by governing '
+ 'value %r' % (governingValue,))
+ continue
+
+ if LOG:
+ LOG('resolved open type %r by governing '
+ 'value %r' % (openType, governingValue))
+
+ containerValue = asn1Object.getComponentByPosition(idx)
+
+ if containerValue.typeId in (
+ univ.SetOf.typeId, univ.SequenceOf.typeId):
+
+ for pos, containerElement in enumerate(
+ containerValue):
+
+ component, rest = decodeFun(
+ containerValue[pos].asOctets(),
+ asn1Spec=openType, **options
+ )
+
+ containerValue[pos] = component
+
+ else:
+ component, rest = decodeFun(
+ asn1Object.getComponentByPosition(idx).asOctets(),
+ asn1Spec=openType, **options
+ )
+
+ asn1Object.setComponentByPosition(idx, component)
+
+ else:
+ inconsistency = asn1Object.isInconsistent
+ if inconsistency:
+ raise inconsistency
+
+ else:
+ asn1Object = asn1Spec.clone()
+ asn1Object.clear()
+
+ componentType = asn1Spec.componentType
+
+ if LOG:
+ LOG('decoding type %r chosen by given `asn1Spec`' % componentType)
+
+ idx = 0
+
+ while head:
+ component, head = decodeFun(head, componentType, **options)
+ asn1Object.setComponentByPosition(
+ idx, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False
+ )
+
+ idx += 1
+
+ return asn1Object, tail
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if tagSet[0].tagFormat != tag.tagFormatConstructed:
+ raise error.PyAsn1Error('Constructed tag format expected')
+
+ if substrateFun is not None:
+ if asn1Spec is not None:
+ asn1Object = asn1Spec.clone()
+
+ elif self.protoComponent is not None:
+ asn1Object = self.protoComponent.clone(tagSet=tagSet)
+
+ else:
+ asn1Object = self.protoRecordComponent, self.protoSequenceComponent
+
+ return substrateFun(asn1Object, substrate, length)
+
+ if asn1Spec is None:
+ return self._decodeComponents(
+ substrate, tagSet=tagSet, decodeFun=decodeFun,
+ **dict(options, allowEoo=True)
+ )
+
+ asn1Object = asn1Spec.clone()
+ asn1Object.clear()
+
+ if asn1Spec.typeId in (univ.Sequence.typeId, univ.Set.typeId):
+
+ namedTypes = asn1Object.componentType
+
+ isSetType = asn1Object.typeId == univ.Set.typeId
+ isDeterministic = not isSetType and not namedTypes.hasOptionalOrDefault
+
+ if LOG:
+ LOG('decoding %sdeterministic %s type %r chosen by type ID' % (
+ not isDeterministic and 'non-' or '', isSetType and 'SET' or '',
+ asn1Spec))
+
+ seenIndices = set()
+ idx = 0
+ while substrate:
+ if len(namedTypes) <= idx:
+ asn1Spec = None
+
+ elif isSetType:
+ asn1Spec = namedTypes.tagMapUnique
+
+ else:
+ try:
+ if isDeterministic:
+ asn1Spec = namedTypes[idx].asn1Object
+
+ elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted:
+ asn1Spec = namedTypes.getTagMapNearPosition(idx)
+
+ else:
+ asn1Spec = namedTypes[idx].asn1Object
+
+ except IndexError:
+ raise error.PyAsn1Error(
+ 'Excessive components decoded at %r' % (asn1Object,)
+ )
+
+ component, substrate = decodeFun(substrate, asn1Spec, allowEoo=True, **options)
+ if component is eoo.endOfOctets:
+ break
+
+ if not isDeterministic and namedTypes:
+ if isSetType:
+ idx = namedTypes.getPositionByType(component.effectiveTagSet)
+ elif namedTypes[idx].isOptional or namedTypes[idx].isDefaulted:
+ idx = namedTypes.getPositionNearType(component.effectiveTagSet, idx)
+
+ asn1Object.setComponentByPosition(
+ idx, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False
+ )
+
+ seenIndices.add(idx)
+ idx += 1
+
+ else:
+ raise error.SubstrateUnderrunError(
+ 'No EOO seen before substrate ends'
+ )
+
+ if LOG:
+ LOG('seen component indices %s' % seenIndices)
+
+ if namedTypes:
+ if not namedTypes.requiredComponents.issubset(seenIndices):
+ raise error.PyAsn1Error('ASN.1 object %s has uninitialized components' % asn1Object.__class__.__name__)
+
+ if namedTypes.hasOpenTypes:
+
+ openTypes = options.get('openTypes', {})
+
+ if LOG:
+ LOG('user-specified open types map:')
+
+ for k, v in openTypes.items():
+ LOG('%s -> %r' % (k, v))
+
+ if openTypes or options.get('decodeOpenTypes', False):
+
+ for idx, namedType in enumerate(namedTypes.namedTypes):
+ if not namedType.openType:
+ continue
+
+ if namedType.isOptional and not asn1Object.getComponentByPosition(idx).isValue:
+ continue
+
+ governingValue = asn1Object.getComponentByName(
+ namedType.openType.name
+ )
+
+ try:
+ openType = openTypes[governingValue]
+
+ except KeyError:
+
+ if LOG:
+ LOG('default open types map of component '
+ '"%s.%s" governed by component "%s.%s"'
+ ':' % (asn1Object.__class__.__name__,
+ namedType.name,
+ asn1Object.__class__.__name__,
+ namedType.openType.name))
+
+ for k, v in namedType.openType.items():
+ LOG('%s -> %r' % (k, v))
+
+ try:
+ openType = namedType.openType[governingValue]
+
+ except KeyError:
+ if LOG:
+ LOG('failed to resolve open type by governing '
+ 'value %r' % (governingValue,))
+ continue
+
+ if LOG:
+ LOG('resolved open type %r by governing '
+ 'value %r' % (openType, governingValue))
+
+ containerValue = asn1Object.getComponentByPosition(idx)
+
+ if containerValue.typeId in (
+ univ.SetOf.typeId, univ.SequenceOf.typeId):
+
+ for pos, containerElement in enumerate(
+ containerValue):
+
+ component, rest = decodeFun(
+ containerValue[pos].asOctets(),
+ asn1Spec=openType, **dict(options, allowEoo=True)
+ )
+
+ containerValue[pos] = component
+
+ else:
+ component, rest = decodeFun(
+ asn1Object.getComponentByPosition(idx).asOctets(),
+ asn1Spec=openType, **dict(options, allowEoo=True)
+ )
+
+ if component is not eoo.endOfOctets:
+ asn1Object.setComponentByPosition(idx, component)
+
+ else:
+ inconsistency = asn1Object.isInconsistent
+ if inconsistency:
+ raise inconsistency
+
+ else:
+ asn1Object = asn1Spec.clone()
+ asn1Object.clear()
+
+ componentType = asn1Spec.componentType
+
+ if LOG:
+ LOG('decoding type %r chosen by given `asn1Spec`' % componentType)
+
+ idx = 0
+
+ while substrate:
+ component, substrate = decodeFun(substrate, componentType, allowEoo=True, **options)
+
+ if component is eoo.endOfOctets:
+ break
+
+ asn1Object.setComponentByPosition(
+ idx, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False
+ )
+
+ idx += 1
+
+ else:
+ raise error.SubstrateUnderrunError(
+ 'No EOO seen before substrate ends'
+ )
+
+ return asn1Object, substrate
+
+
+class SequenceOrSequenceOfDecoder(UniversalConstructedTypeDecoder):
+ protoRecordComponent = univ.Sequence()
+ protoSequenceComponent = univ.SequenceOf()
+
+
+class SequenceDecoder(SequenceOrSequenceOfDecoder):
+ protoComponent = univ.Sequence()
+
+
+class SequenceOfDecoder(SequenceOrSequenceOfDecoder):
+ protoComponent = univ.SequenceOf()
+
+
+class SetOrSetOfDecoder(UniversalConstructedTypeDecoder):
+ protoRecordComponent = univ.Set()
+ protoSequenceComponent = univ.SetOf()
+
+
+class SetDecoder(SetOrSetOfDecoder):
+ protoComponent = univ.Set()
+
+
+
+class SetOfDecoder(SetOrSetOfDecoder):
+ protoComponent = univ.SetOf()
+
+
+class ChoiceDecoder(AbstractConstructedDecoder):
+ protoComponent = univ.Choice()
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ head, tail = substrate[:length], substrate[length:]
+
+ if asn1Spec is None:
+ asn1Object = self.protoComponent.clone(tagSet=tagSet)
+
+ else:
+ asn1Object = asn1Spec.clone()
+
+ if substrateFun:
+ return substrateFun(asn1Object, substrate, length)
+
+ if asn1Object.tagSet == tagSet:
+ if LOG:
+ LOG('decoding %s as explicitly tagged CHOICE' % (tagSet,))
+
+ component, head = decodeFun(
+ head, asn1Object.componentTagMap, **options
+ )
+
+ else:
+ if LOG:
+ LOG('decoding %s as untagged CHOICE' % (tagSet,))
+
+ component, head = decodeFun(
+ head, asn1Object.componentTagMap,
+ tagSet, length, state, **options
+ )
+
+ effectiveTagSet = component.effectiveTagSet
+
+ if LOG:
+ LOG('decoded component %s, effective tag set %s' % (component, effectiveTagSet))
+
+ asn1Object.setComponentByType(
+ effectiveTagSet, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False,
+ innerFlag=False
+ )
+
+ return asn1Object, tail
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if asn1Spec is None:
+ asn1Object = self.protoComponent.clone(tagSet=tagSet)
+ else:
+ asn1Object = asn1Spec.clone()
+
+ if substrateFun:
+ return substrateFun(asn1Object, substrate, length)
+
+ if asn1Object.tagSet == tagSet:
+ if LOG:
+ LOG('decoding %s as explicitly tagged CHOICE' % (tagSet,))
+
+ component, substrate = decodeFun(
+ substrate, asn1Object.componentType.tagMapUnique, **options
+ )
+
+ # eat up EOO marker
+ eooMarker, substrate = decodeFun(
+ substrate, allowEoo=True, **options
+ )
+
+ if eooMarker is not eoo.endOfOctets:
+ raise error.PyAsn1Error('No EOO seen before substrate ends')
+
+ else:
+ if LOG:
+ LOG('decoding %s as untagged CHOICE' % (tagSet,))
+
+ component, substrate = decodeFun(
+ substrate, asn1Object.componentType.tagMapUnique,
+ tagSet, length, state, **options
+ )
+
+ effectiveTagSet = component.effectiveTagSet
+
+ if LOG:
+ LOG('decoded component %s, effective tag set %s' % (component, effectiveTagSet))
+
+ asn1Object.setComponentByType(
+ effectiveTagSet, component,
+ verifyConstraints=False,
+ matchTags=False, matchConstraints=False,
+ innerFlag=False
+ )
+
+ return asn1Object, substrate
+
+
+class AnyDecoder(AbstractSimpleDecoder):
+ protoComponent = univ.Any()
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if asn1Spec is None:
+ isUntagged = True
+
+ elif asn1Spec.__class__ is tagmap.TagMap:
+ isUntagged = tagSet not in asn1Spec.tagMap
+
+ else:
+ isUntagged = tagSet != asn1Spec.tagSet
+
+ if isUntagged:
+ fullSubstrate = options['fullSubstrate']
+
+ # untagged Any container, recover inner header substrate
+ length += len(fullSubstrate) - len(substrate)
+ substrate = fullSubstrate
+
+ if LOG:
+ LOG('decoding as untagged ANY, substrate %s' % debug.hexdump(substrate))
+
+ if substrateFun:
+ return substrateFun(self._createComponent(asn1Spec, tagSet, noValue, **options),
+ substrate, length)
+
+ head, tail = substrate[:length], substrate[length:]
+
+ return self._createComponent(asn1Spec, tagSet, head, **options), tail
+
+ def indefLenValueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ if asn1Spec is None:
+ isTagged = False
+
+ elif asn1Spec.__class__ is tagmap.TagMap:
+ isTagged = tagSet in asn1Spec.tagMap
+
+ else:
+ isTagged = tagSet == asn1Spec.tagSet
+
+ if isTagged:
+ # tagged Any type -- consume header substrate
+ header = null
+
+ if LOG:
+ LOG('decoding as tagged ANY')
+
+ else:
+ fullSubstrate = options['fullSubstrate']
+
+ # untagged Any, recover header substrate
+ header = fullSubstrate[:-len(substrate)]
+
+ if LOG:
+ LOG('decoding as untagged ANY, header substrate %s' % debug.hexdump(header))
+
+ # Any components do not inherit initial tag
+ asn1Spec = self.protoComponent
+
+ if substrateFun and substrateFun is not self.substrateCollector:
+ asn1Object = self._createComponent(asn1Spec, tagSet, noValue, **options)
+ return substrateFun(asn1Object, header + substrate, length + len(header))
+
+ if LOG:
+ LOG('assembling constructed serialization')
+
+ # All inner fragments are of the same type, treat them as octet string
+ substrateFun = self.substrateCollector
+
+ while substrate:
+ component, substrate = decodeFun(substrate, asn1Spec,
+ substrateFun=substrateFun,
+ allowEoo=True, **options)
+ if component is eoo.endOfOctets:
+ break
+
+ header += component
+
+ else:
+ raise error.SubstrateUnderrunError(
+ 'No EOO seen before substrate ends'
+ )
+
+ if substrateFun:
+ return header, substrate
+
+ else:
+ return self._createComponent(asn1Spec, tagSet, header, **options), substrate
+
+
+# character string types
+class UTF8StringDecoder(OctetStringDecoder):
+ protoComponent = char.UTF8String()
+
+
+class NumericStringDecoder(OctetStringDecoder):
+ protoComponent = char.NumericString()
+
+
+class PrintableStringDecoder(OctetStringDecoder):
+ protoComponent = char.PrintableString()
+
+
+class TeletexStringDecoder(OctetStringDecoder):
+ protoComponent = char.TeletexString()
+
+
+class VideotexStringDecoder(OctetStringDecoder):
+ protoComponent = char.VideotexString()
+
+
+class IA5StringDecoder(OctetStringDecoder):
+ protoComponent = char.IA5String()
+
+
+class GraphicStringDecoder(OctetStringDecoder):
+ protoComponent = char.GraphicString()
+
+
+class VisibleStringDecoder(OctetStringDecoder):
+ protoComponent = char.VisibleString()
+
+
+class GeneralStringDecoder(OctetStringDecoder):
+ protoComponent = char.GeneralString()
+
+
+class UniversalStringDecoder(OctetStringDecoder):
+ protoComponent = char.UniversalString()
+
+
+class BMPStringDecoder(OctetStringDecoder):
+ protoComponent = char.BMPString()
+
+
+# "useful" types
+class ObjectDescriptorDecoder(OctetStringDecoder):
+ protoComponent = useful.ObjectDescriptor()
+
+
+class GeneralizedTimeDecoder(OctetStringDecoder):
+ protoComponent = useful.GeneralizedTime()
+
+
+class UTCTimeDecoder(OctetStringDecoder):
+ protoComponent = useful.UTCTime()
+
+
+tagMap = {
+ univ.Integer.tagSet: IntegerDecoder(),
+ univ.Boolean.tagSet: BooleanDecoder(),
+ univ.BitString.tagSet: BitStringDecoder(),
+ univ.OctetString.tagSet: OctetStringDecoder(),
+ univ.Null.tagSet: NullDecoder(),
+ univ.ObjectIdentifier.tagSet: ObjectIdentifierDecoder(),
+ univ.Enumerated.tagSet: IntegerDecoder(),
+ univ.Real.tagSet: RealDecoder(),
+ univ.Sequence.tagSet: SequenceOrSequenceOfDecoder(), # conflicts with SequenceOf
+ univ.Set.tagSet: SetOrSetOfDecoder(), # conflicts with SetOf
+ univ.Choice.tagSet: ChoiceDecoder(), # conflicts with Any
+ # character string types
+ char.UTF8String.tagSet: UTF8StringDecoder(),
+ char.NumericString.tagSet: NumericStringDecoder(),
+ char.PrintableString.tagSet: PrintableStringDecoder(),
+ char.TeletexString.tagSet: TeletexStringDecoder(),
+ char.VideotexString.tagSet: VideotexStringDecoder(),
+ char.IA5String.tagSet: IA5StringDecoder(),
+ char.GraphicString.tagSet: GraphicStringDecoder(),
+ char.VisibleString.tagSet: VisibleStringDecoder(),
+ char.GeneralString.tagSet: GeneralStringDecoder(),
+ char.UniversalString.tagSet: UniversalStringDecoder(),
+ char.BMPString.tagSet: BMPStringDecoder(),
+ # useful types
+ useful.ObjectDescriptor.tagSet: ObjectDescriptorDecoder(),
+ useful.GeneralizedTime.tagSet: GeneralizedTimeDecoder(),
+ useful.UTCTime.tagSet: UTCTimeDecoder()
+}
+
+# Type-to-codec map for ambiguous ASN.1 types
+typeMap = {
+ univ.Set.typeId: SetDecoder(),
+ univ.SetOf.typeId: SetOfDecoder(),
+ univ.Sequence.typeId: SequenceDecoder(),
+ univ.SequenceOf.typeId: SequenceOfDecoder(),
+ univ.Choice.typeId: ChoiceDecoder(),
+ univ.Any.typeId: AnyDecoder()
+}
+
+# Put in non-ambiguous types for faster codec lookup
+for typeDecoder in tagMap.values():
+ if typeDecoder.protoComponent is not None:
+ typeId = typeDecoder.protoComponent.__class__.typeId
+ if typeId is not None and typeId not in typeMap:
+ typeMap[typeId] = typeDecoder
+
+
+(stDecodeTag,
+ stDecodeLength,
+ stGetValueDecoder,
+ stGetValueDecoderByAsn1Spec,
+ stGetValueDecoderByTag,
+ stTryAsExplicitTag,
+ stDecodeValue,
+ stDumpRawValue,
+ stErrorCondition,
+ stStop) = [x for x in range(10)]
+
+
+class Decoder(object):
+ defaultErrorState = stErrorCondition
+ #defaultErrorState = stDumpRawValue
+ defaultRawDecoder = AnyDecoder()
+ supportIndefLength = True
+
+ # noinspection PyDefaultArgument
+ def __init__(self, tagMap, typeMap={}):
+ self.__tagMap = tagMap
+ self.__typeMap = typeMap
+ # Tag & TagSet objects caches
+ self.__tagCache = {}
+ self.__tagSetCache = {}
+ self.__eooSentinel = ints2octs((0, 0))
+
+ def __call__(self, substrate, asn1Spec=None,
+ tagSet=None, length=None, state=stDecodeTag,
+ decodeFun=None, substrateFun=None,
+ **options):
+
+ if LOG:
+ LOG('decoder called at scope %s with state %d, working with up to %d octets of substrate: %s' % (debug.scope, state, len(substrate), debug.hexdump(substrate)))
+
+ allowEoo = options.pop('allowEoo', False)
+
+ # Look for end-of-octets sentinel
+ if allowEoo and self.supportIndefLength:
+ if substrate[:2] == self.__eooSentinel:
+ if LOG:
+ LOG('end-of-octets sentinel found')
+ return eoo.endOfOctets, substrate[2:]
+
+ value = noValue
+
+ tagMap = self.__tagMap
+ typeMap = self.__typeMap
+ tagCache = self.__tagCache
+ tagSetCache = self.__tagSetCache
+
+ fullSubstrate = substrate
+
+ while state is not stStop:
+
+ if state is stDecodeTag:
+ if not substrate:
+ raise error.SubstrateUnderrunError(
+ 'Short octet stream on tag decoding'
+ )
+
+ # Decode tag
+ isShortTag = True
+ firstOctet = substrate[0]
+ substrate = substrate[1:]
+
+ try:
+ lastTag = tagCache[firstOctet]
+
+ except KeyError:
+ integerTag = oct2int(firstOctet)
+ tagClass = integerTag & 0xC0
+ tagFormat = integerTag & 0x20
+ tagId = integerTag & 0x1F
+
+ if tagId == 0x1F:
+ isShortTag = False
+ lengthOctetIdx = 0
+ tagId = 0
+
+ try:
+ while True:
+ integerTag = oct2int(substrate[lengthOctetIdx])
+ lengthOctetIdx += 1
+ tagId <<= 7
+ tagId |= (integerTag & 0x7F)
+ if not integerTag & 0x80:
+ break
+
+ substrate = substrate[lengthOctetIdx:]
+
+ except IndexError:
+ raise error.SubstrateUnderrunError(
+ 'Short octet stream on long tag decoding'
+ )
+
+ lastTag = tag.Tag(
+ tagClass=tagClass, tagFormat=tagFormat, tagId=tagId
+ )
+
+ if isShortTag:
+ # cache short tags
+ tagCache[firstOctet] = lastTag
+
+ if tagSet is None:
+ if isShortTag:
+ try:
+ tagSet = tagSetCache[firstOctet]
+
+ except KeyError:
+ # base tag not recovered
+ tagSet = tag.TagSet((), lastTag)
+ tagSetCache[firstOctet] = tagSet
+ else:
+ tagSet = tag.TagSet((), lastTag)
+
+ else:
+ tagSet = lastTag + tagSet
+
+ state = stDecodeLength
+
+ if LOG:
+ LOG('tag decoded into %s, decoding length' % tagSet)
+
+ if state is stDecodeLength:
+ # Decode length
+ if not substrate:
+ raise error.SubstrateUnderrunError(
+ 'Short octet stream on length decoding'
+ )
+
+ firstOctet = oct2int(substrate[0])
+
+ if firstOctet < 128:
+ size = 1
+ length = firstOctet
+
+ elif firstOctet > 128:
+ size = firstOctet & 0x7F
+ # encoded in size bytes
+ encodedLength = octs2ints(substrate[1:size + 1])
+ # missing check on maximum size, which shouldn't be a
+ # problem, we can handle more than is possible
+ if len(encodedLength) != size:
+ raise error.SubstrateUnderrunError(
+ '%s<%s at %s' % (size, len(encodedLength), tagSet)
+ )
+
+ length = 0
+ for lengthOctet in encodedLength:
+ length <<= 8
+ length |= lengthOctet
+ size += 1
+
+ else:
+ size = 1
+ length = -1
+
+ substrate = substrate[size:]
+
+ if length == -1:
+ if not self.supportIndefLength:
+ raise error.PyAsn1Error('Indefinite length encoding not supported by this codec')
+
+ else:
+ if len(substrate) < length:
+ raise error.SubstrateUnderrunError('%d-octet short' % (length - len(substrate)))
+
+ state = stGetValueDecoder
+
+ if LOG:
+ LOG('value length decoded into %d, payload substrate is: %s' % (length, debug.hexdump(length == -1 and substrate or substrate[:length])))
+
+ if state is stGetValueDecoder:
+ if asn1Spec is None:
+ state = stGetValueDecoderByTag
+
+ else:
+ state = stGetValueDecoderByAsn1Spec
+ #
+ # There're two ways of creating subtypes in ASN.1 what influences
+ # decoder operation. These methods are:
+ # 1) Either base types used in or no IMPLICIT tagging has been
+ # applied on subtyping.
+ # 2) Subtype syntax drops base type information (by means of
+ # IMPLICIT tagging.
+ # The first case allows for complete tag recovery from substrate
+ # while the second one requires original ASN.1 type spec for
+ # decoding.
+ #
+ # In either case a set of tags (tagSet) is coming from substrate
+ # in an incremental, tag-by-tag fashion (this is the case of
+ # EXPLICIT tag which is most basic). Outermost tag comes first
+ # from the wire.
+ #
+ if state is stGetValueDecoderByTag:
+ try:
+ concreteDecoder = tagMap[tagSet]
+
+ except KeyError:
+ concreteDecoder = None
+
+ if concreteDecoder:
+ state = stDecodeValue
+
+ else:
+ try:
+ concreteDecoder = tagMap[tagSet[:1]]
+
+ except KeyError:
+ concreteDecoder = None
+
+ if concreteDecoder:
+ state = stDecodeValue
+ else:
+ state = stTryAsExplicitTag
+
+ if LOG:
+ LOG('codec %s chosen by a built-in type, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "<none>", state is stDecodeValue and 'value' or 'as explicit tag'))
+ debug.scope.push(concreteDecoder is None and '?' or concreteDecoder.protoComponent.__class__.__name__)
+
+ if state is stGetValueDecoderByAsn1Spec:
+
+ if asn1Spec.__class__ is tagmap.TagMap:
+ try:
+ chosenSpec = asn1Spec[tagSet]
+
+ except KeyError:
+ chosenSpec = None
+
+ if LOG:
+ LOG('candidate ASN.1 spec is a map of:')
+
+ for firstOctet, v in asn1Spec.presentTypes.items():
+ LOG(' %s -> %s' % (firstOctet, v.__class__.__name__))
+
+ if asn1Spec.skipTypes:
+ LOG('but neither of: ')
+ for firstOctet, v in asn1Spec.skipTypes.items():
+ LOG(' %s -> %s' % (firstOctet, v.__class__.__name__))
+ LOG('new candidate ASN.1 spec is %s, chosen by %s' % (chosenSpec is None and '<none>' or chosenSpec.prettyPrintType(), tagSet))
+
+ elif tagSet == asn1Spec.tagSet or tagSet in asn1Spec.tagMap:
+ chosenSpec = asn1Spec
+ if LOG:
+ LOG('candidate ASN.1 spec is %s' % asn1Spec.__class__.__name__)
+
+ else:
+ chosenSpec = None
+
+ if chosenSpec is not None:
+ try:
+ # ambiguous type or just faster codec lookup
+ concreteDecoder = typeMap[chosenSpec.typeId]
+
+ if LOG:
+ LOG('value decoder chosen for an ambiguous type by type ID %s' % (chosenSpec.typeId,))
+
+ except KeyError:
+ # use base type for codec lookup to recover untagged types
+ baseTagSet = tag.TagSet(chosenSpec.tagSet.baseTag, chosenSpec.tagSet.baseTag)
+ try:
+ # base type or tagged subtype
+ concreteDecoder = tagMap[baseTagSet]
+
+ if LOG:
+ LOG('value decoder chosen by base %s' % (baseTagSet,))
+
+ except KeyError:
+ concreteDecoder = None
+
+ if concreteDecoder:
+ asn1Spec = chosenSpec
+ state = stDecodeValue
+
+ else:
+ state = stTryAsExplicitTag
+
+ else:
+ concreteDecoder = None
+ state = stTryAsExplicitTag
+
+ if LOG:
+ LOG('codec %s chosen by ASN.1 spec, decoding %s' % (state is stDecodeValue and concreteDecoder.__class__.__name__ or "<none>", state is stDecodeValue and 'value' or 'as explicit tag'))
+ debug.scope.push(chosenSpec is None and '?' or chosenSpec.__class__.__name__)
+
+ if state is stDecodeValue:
+ if not options.get('recursiveFlag', True) and not substrateFun: # deprecate this
+ substrateFun = lambda a, b, c: (a, b[:c])
+
+ options.update(fullSubstrate=fullSubstrate)
+
+ if length == -1: # indef length
+ value, substrate = concreteDecoder.indefLenValueDecoder(
+ substrate, asn1Spec,
+ tagSet, length, stGetValueDecoder,
+ self, substrateFun,
+ **options
+ )
+
+ else:
+ value, substrate = concreteDecoder.valueDecoder(
+ substrate, asn1Spec,
+ tagSet, length, stGetValueDecoder,
+ self, substrateFun,
+ **options
+ )
+
+ if LOG:
+ LOG('codec %s yields type %s, value:\n%s\n...remaining substrate is: %s' % (concreteDecoder.__class__.__name__, value.__class__.__name__, isinstance(value, base.Asn1Item) and value.prettyPrint() or value, substrate and debug.hexdump(substrate) or '<none>'))
+
+ state = stStop
+ break
+
+ if state is stTryAsExplicitTag:
+ if (tagSet and
+ tagSet[0].tagFormat == tag.tagFormatConstructed and
+ tagSet[0].tagClass != tag.tagClassUniversal):
+ # Assume explicit tagging
+ concreteDecoder = explicitTagDecoder
+ state = stDecodeValue
+
+ else:
+ concreteDecoder = None
+ state = self.defaultErrorState
+
+ if LOG:
+ LOG('codec %s chosen, decoding %s' % (concreteDecoder and concreteDecoder.__class__.__name__ or "<none>", state is stDecodeValue and 'value' or 'as failure'))
+
+ if state is stDumpRawValue:
+ concreteDecoder = self.defaultRawDecoder
+
+ if LOG:
+ LOG('codec %s chosen, decoding value' % concreteDecoder.__class__.__name__)
+
+ state = stDecodeValue
+
+ if state is stErrorCondition:
+ raise error.PyAsn1Error(
+ '%s not in asn1Spec: %r' % (tagSet, asn1Spec)
+ )
+
+ if LOG:
+ debug.scope.pop()
+ LOG('decoder left scope %s, call completed' % debug.scope)
+
+ return value, substrate
+
+
+#: Turns BER octet stream into an ASN.1 object.
+#:
+#: Takes BER octet-stream and decode it into an ASN.1 object
+#: (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which
+#: may be a scalar or an arbitrary nested structure.
+#:
+#: Parameters
+#: ----------
+#: substrate: :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
+#: BER octet-stream
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec: any pyasn1 type object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#: A pyasn1 type object to act as a template guiding the decoder. Depending on the ASN.1 structure
+#: being decoded, *asn1Spec* may or may not be required. Most common reason for
+#: it to require is that ASN.1 structure is encoded in *IMPLICIT* tagging mode.
+#:
+#: Returns
+#: -------
+#: : :py:class:`tuple`
+#: A tuple of pyasn1 object recovered from BER substrate (:py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: and the unprocessed trailing portion of the *substrate* (may be empty)
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error, ~pyasn1.error.SubstrateUnderrunError
+#: On decoding errors
+#:
+#: Examples
+#: --------
+#: Decode BER serialisation without ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> s, _ = decode(b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03')
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+#: Decode BER serialisation with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> s, _ = decode(b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03', asn1Spec=seq)
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+decode = Decoder(tagMap, typeMap)
+
+# XXX
+# non-recursive decoding; return position rather than substrate
diff --git a/third_party/python/pyasn1/pyasn1/codec/ber/encoder.py b/third_party/python/pyasn1/pyasn1/codec/ber/encoder.py
new file mode 100644
index 0000000000..778aa86706
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/codec/ber/encoder.py
@@ -0,0 +1,890 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+
+from pyasn1 import debug
+from pyasn1 import error
+from pyasn1.codec.ber import eoo
+from pyasn1.compat.integer import to_bytes
+from pyasn1.compat.octets import (int2oct, oct2int, ints2octs, null,
+ str2octs, isOctetsType)
+from pyasn1.type import char
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+__all__ = ['encode']
+
+LOG = debug.registerLoggee(__name__, flags=debug.DEBUG_ENCODER)
+
+
+class AbstractItemEncoder(object):
+ supportIndefLenMode = True
+
+ # An outcome of otherwise legit call `encodeFun(eoo.endOfOctets)`
+ eooIntegerSubstrate = (0, 0)
+ eooOctetsSubstrate = ints2octs(eooIntegerSubstrate)
+
+ # noinspection PyMethodMayBeStatic
+ def encodeTag(self, singleTag, isConstructed):
+ tagClass, tagFormat, tagId = singleTag
+ encodedTag = tagClass | tagFormat
+ if isConstructed:
+ encodedTag |= tag.tagFormatConstructed
+
+ if tagId < 31:
+ return encodedTag | tagId,
+
+ else:
+ substrate = tagId & 0x7f,
+
+ tagId >>= 7
+
+ while tagId:
+ substrate = (0x80 | (tagId & 0x7f),) + substrate
+ tagId >>= 7
+
+ return (encodedTag | 0x1F,) + substrate
+
+ def encodeLength(self, length, defMode):
+ if not defMode and self.supportIndefLenMode:
+ return (0x80,)
+
+ if length < 0x80:
+ return length,
+
+ else:
+ substrate = ()
+ while length:
+ substrate = (length & 0xff,) + substrate
+ length >>= 8
+
+ substrateLen = len(substrate)
+
+ if substrateLen > 126:
+ raise error.PyAsn1Error('Length octets overflow (%d)' % substrateLen)
+
+ return (0x80 | substrateLen,) + substrate
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ raise error.PyAsn1Error('Not implemented')
+
+ def encode(self, value, asn1Spec=None, encodeFun=None, **options):
+
+ if asn1Spec is None:
+ tagSet = value.tagSet
+ else:
+ tagSet = asn1Spec.tagSet
+
+ # untagged item?
+ if not tagSet:
+ substrate, isConstructed, isOctets = self.encodeValue(
+ value, asn1Spec, encodeFun, **options
+ )
+ return substrate
+
+ defMode = options.get('defMode', True)
+
+ substrate = null
+
+ for idx, singleTag in enumerate(tagSet.superTags):
+
+ defModeOverride = defMode
+
+ # base tag?
+ if not idx:
+ try:
+ substrate, isConstructed, isOctets = self.encodeValue(
+ value, asn1Spec, encodeFun, **options
+ )
+
+ except error.PyAsn1Error:
+ exc = sys.exc_info()
+ raise error.PyAsn1Error(
+ 'Error encoding %r: %s' % (value, exc[1]))
+
+ if LOG:
+ LOG('encoded %svalue %s into %s' % (
+ isConstructed and 'constructed ' or '', value, substrate
+ ))
+
+ if not substrate and isConstructed and options.get('ifNotEmpty', False):
+ return substrate
+
+ if not isConstructed:
+ defModeOverride = True
+
+ if LOG:
+ LOG('overridden encoding mode into definitive for primitive type')
+
+ header = self.encodeTag(singleTag, isConstructed)
+
+ if LOG:
+ LOG('encoded %stag %s into %s' % (
+ isConstructed and 'constructed ' or '',
+ singleTag, debug.hexdump(ints2octs(header))))
+
+ header += self.encodeLength(len(substrate), defModeOverride)
+
+ if LOG:
+ LOG('encoded %s octets (tag + payload) into %s' % (
+ len(substrate), debug.hexdump(ints2octs(header))))
+
+ if isOctets:
+ substrate = ints2octs(header) + substrate
+
+ if not defModeOverride:
+ substrate += self.eooOctetsSubstrate
+
+ else:
+ substrate = header + substrate
+
+ if not defModeOverride:
+ substrate += self.eooIntegerSubstrate
+
+ if not isOctets:
+ substrate = ints2octs(substrate)
+
+ return substrate
+
+
+class EndOfOctetsEncoder(AbstractItemEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ return null, False, True
+
+
+class BooleanEncoder(AbstractItemEncoder):
+ supportIndefLenMode = False
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ return value and (1,) or (0,), False, False
+
+
+class IntegerEncoder(AbstractItemEncoder):
+ supportIndefLenMode = False
+ supportCompactZero = False
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if value == 0:
+ if LOG:
+ LOG('encoding %spayload for zero INTEGER' % (
+ self.supportCompactZero and 'no ' or ''
+ ))
+
+ # de-facto way to encode zero
+ if self.supportCompactZero:
+ return (), False, False
+ else:
+ return (0,), False, False
+
+ return to_bytes(int(value), signed=True), False, True
+
+
+class BitStringEncoder(AbstractItemEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if asn1Spec is not None:
+ # TODO: try to avoid ASN.1 schema instantiation
+ value = asn1Spec.clone(value)
+
+ valueLength = len(value)
+ if valueLength % 8:
+ alignedValue = value << (8 - valueLength % 8)
+ else:
+ alignedValue = value
+
+ maxChunkSize = options.get('maxChunkSize', 0)
+ if not maxChunkSize or len(alignedValue) <= maxChunkSize * 8:
+ substrate = alignedValue.asOctets()
+ return int2oct(len(substrate) * 8 - valueLength) + substrate, False, True
+
+ if LOG:
+ LOG('encoding into up to %s-octet chunks' % maxChunkSize)
+
+ baseTag = value.tagSet.baseTag
+
+ # strip off explicit tags
+ if baseTag:
+ tagSet = tag.TagSet(baseTag, baseTag)
+
+ else:
+ tagSet = tag.TagSet()
+
+ alignedValue = alignedValue.clone(tagSet=tagSet)
+
+ stop = 0
+ substrate = null
+ while stop < valueLength:
+ start = stop
+ stop = min(start + maxChunkSize * 8, valueLength)
+ substrate += encodeFun(alignedValue[start:stop], asn1Spec, **options)
+
+ return substrate, True, True
+
+
+class OctetStringEncoder(AbstractItemEncoder):
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+
+ if asn1Spec is None:
+ substrate = value.asOctets()
+
+ elif not isOctetsType(value):
+ substrate = asn1Spec.clone(value).asOctets()
+
+ else:
+ substrate = value
+
+ maxChunkSize = options.get('maxChunkSize', 0)
+
+ if not maxChunkSize or len(substrate) <= maxChunkSize:
+ return substrate, False, True
+
+ if LOG:
+ LOG('encoding into up to %s-octet chunks' % maxChunkSize)
+
+ # strip off explicit tags for inner chunks
+
+ if asn1Spec is None:
+ baseTag = value.tagSet.baseTag
+
+ # strip off explicit tags
+ if baseTag:
+ tagSet = tag.TagSet(baseTag, baseTag)
+
+ else:
+ tagSet = tag.TagSet()
+
+ asn1Spec = value.clone(tagSet=tagSet)
+
+ elif not isOctetsType(value):
+ baseTag = asn1Spec.tagSet.baseTag
+
+ # strip off explicit tags
+ if baseTag:
+ tagSet = tag.TagSet(baseTag, baseTag)
+
+ else:
+ tagSet = tag.TagSet()
+
+ asn1Spec = asn1Spec.clone(tagSet=tagSet)
+
+ pos = 0
+ substrate = null
+
+ while True:
+ chunk = value[pos:pos + maxChunkSize]
+ if not chunk:
+ break
+
+ substrate += encodeFun(chunk, asn1Spec, **options)
+ pos += maxChunkSize
+
+ return substrate, True, True
+
+
+class NullEncoder(AbstractItemEncoder):
+ supportIndefLenMode = False
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ return null, False, True
+
+
+class ObjectIdentifierEncoder(AbstractItemEncoder):
+ supportIndefLenMode = False
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if asn1Spec is not None:
+ value = asn1Spec.clone(value)
+
+ oid = value.asTuple()
+
+ # Build the first pair
+ try:
+ first = oid[0]
+ second = oid[1]
+
+ except IndexError:
+ raise error.PyAsn1Error('Short OID %s' % (value,))
+
+ if 0 <= second <= 39:
+ if first == 1:
+ oid = (second + 40,) + oid[2:]
+ elif first == 0:
+ oid = (second,) + oid[2:]
+ elif first == 2:
+ oid = (second + 80,) + oid[2:]
+ else:
+ raise error.PyAsn1Error('Impossible first/second arcs at %s' % (value,))
+
+ elif first == 2:
+ oid = (second + 80,) + oid[2:]
+
+ else:
+ raise error.PyAsn1Error('Impossible first/second arcs at %s' % (value,))
+
+ octets = ()
+
+ # Cycle through subIds
+ for subOid in oid:
+ if 0 <= subOid <= 127:
+ # Optimize for the common case
+ octets += (subOid,)
+
+ elif subOid > 127:
+ # Pack large Sub-Object IDs
+ res = (subOid & 0x7f,)
+ subOid >>= 7
+
+ while subOid:
+ res = (0x80 | (subOid & 0x7f),) + res
+ subOid >>= 7
+
+ # Add packed Sub-Object ID to resulted Object ID
+ octets += res
+
+ else:
+ raise error.PyAsn1Error('Negative OID arc %s at %s' % (subOid, value))
+
+ return octets, False, False
+
+
+class RealEncoder(AbstractItemEncoder):
+ supportIndefLenMode = 0
+ binEncBase = 2 # set to None to choose encoding base automatically
+
+ @staticmethod
+ def _dropFloatingPoint(m, encbase, e):
+ ms, es = 1, 1
+ if m < 0:
+ ms = -1 # mantissa sign
+
+ if e < 0:
+ es = -1 # exponent sign
+
+ m *= ms
+
+ if encbase == 8:
+ m *= 2 ** (abs(e) % 3 * es)
+ e = abs(e) // 3 * es
+
+ elif encbase == 16:
+ m *= 2 ** (abs(e) % 4 * es)
+ e = abs(e) // 4 * es
+
+ while True:
+ if int(m) != m:
+ m *= encbase
+ e -= 1
+ continue
+ break
+
+ return ms, int(m), encbase, e
+
+ def _chooseEncBase(self, value):
+ m, b, e = value
+ encBase = [2, 8, 16]
+ if value.binEncBase in encBase:
+ return self._dropFloatingPoint(m, value.binEncBase, e)
+
+ elif self.binEncBase in encBase:
+ return self._dropFloatingPoint(m, self.binEncBase, e)
+
+ # auto choosing base 2/8/16
+ mantissa = [m, m, m]
+ exponent = [e, e, e]
+ sign = 1
+ encbase = 2
+ e = float('inf')
+
+ for i in range(3):
+ (sign,
+ mantissa[i],
+ encBase[i],
+ exponent[i]) = self._dropFloatingPoint(mantissa[i], encBase[i], exponent[i])
+
+ if abs(exponent[i]) < abs(e) or (abs(exponent[i]) == abs(e) and mantissa[i] < m):
+ e = exponent[i]
+ m = int(mantissa[i])
+ encbase = encBase[i]
+
+ if LOG:
+ LOG('automatically chosen REAL encoding base %s, sign %s, mantissa %s, '
+ 'exponent %s' % (encbase, sign, m, e))
+
+ return sign, m, encbase, e
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if asn1Spec is not None:
+ value = asn1Spec.clone(value)
+
+ if value.isPlusInf:
+ return (0x40,), False, False
+
+ if value.isMinusInf:
+ return (0x41,), False, False
+
+ m, b, e = value
+
+ if not m:
+ return null, False, True
+
+ if b == 10:
+ if LOG:
+ LOG('encoding REAL into character form')
+
+ return str2octs('\x03%dE%s%d' % (m, e == 0 and '+' or '', e)), False, True
+
+ elif b == 2:
+ fo = 0x80 # binary encoding
+ ms, m, encbase, e = self._chooseEncBase(value)
+
+ if ms < 0: # mantissa sign
+ fo |= 0x40 # sign bit
+
+ # exponent & mantissa normalization
+ if encbase == 2:
+ while m & 0x1 == 0:
+ m >>= 1
+ e += 1
+
+ elif encbase == 8:
+ while m & 0x7 == 0:
+ m >>= 3
+ e += 1
+ fo |= 0x10
+
+ else: # encbase = 16
+ while m & 0xf == 0:
+ m >>= 4
+ e += 1
+ fo |= 0x20
+
+ sf = 0 # scale factor
+
+ while m & 0x1 == 0:
+ m >>= 1
+ sf += 1
+
+ if sf > 3:
+ raise error.PyAsn1Error('Scale factor overflow') # bug if raised
+
+ fo |= sf << 2
+ eo = null
+ if e == 0 or e == -1:
+ eo = int2oct(e & 0xff)
+
+ else:
+ while e not in (0, -1):
+ eo = int2oct(e & 0xff) + eo
+ e >>= 8
+
+ if e == 0 and eo and oct2int(eo[0]) & 0x80:
+ eo = int2oct(0) + eo
+
+ if e == -1 and eo and not (oct2int(eo[0]) & 0x80):
+ eo = int2oct(0xff) + eo
+
+ n = len(eo)
+ if n > 0xff:
+ raise error.PyAsn1Error('Real exponent overflow')
+
+ if n == 1:
+ pass
+
+ elif n == 2:
+ fo |= 1
+
+ elif n == 3:
+ fo |= 2
+
+ else:
+ fo |= 3
+ eo = int2oct(n & 0xff) + eo
+
+ po = null
+
+ while m:
+ po = int2oct(m & 0xff) + po
+ m >>= 8
+
+ substrate = int2oct(fo) + eo + po
+
+ return substrate, False, True
+
+ else:
+ raise error.PyAsn1Error('Prohibited Real base %s' % b)
+
+
+class SequenceEncoder(AbstractItemEncoder):
+ omitEmptyOptionals = False
+
+ # TODO: handling three flavors of input is too much -- split over codecs
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+
+ substrate = null
+
+ omitEmptyOptionals = options.get(
+ 'omitEmptyOptionals', self.omitEmptyOptionals)
+
+ if LOG:
+ LOG('%sencoding empty OPTIONAL components' % (
+ omitEmptyOptionals and 'not ' or ''))
+
+ if asn1Spec is None:
+ # instance of ASN.1 schema
+ inconsistency = value.isInconsistent
+ if inconsistency:
+ raise inconsistency
+
+ namedTypes = value.componentType
+
+ for idx, component in enumerate(value.values()):
+ if namedTypes:
+ namedType = namedTypes[idx]
+
+ if namedType.isOptional and not component.isValue:
+ if LOG:
+ LOG('not encoding OPTIONAL component %r' % (namedType,))
+ continue
+
+ if namedType.isDefaulted and component == namedType.asn1Object:
+ if LOG:
+ LOG('not encoding DEFAULT component %r' % (namedType,))
+ continue
+
+ if omitEmptyOptionals:
+ options.update(ifNotEmpty=namedType.isOptional)
+
+ # wrap open type blob if needed
+ if namedTypes and namedType.openType:
+
+ wrapType = namedType.asn1Object
+
+ if wrapType.typeId in (
+ univ.SetOf.typeId, univ.SequenceOf.typeId):
+
+ substrate += encodeFun(
+ component, asn1Spec,
+ **dict(options, wrapType=wrapType.componentType))
+
+ else:
+ chunk = encodeFun(component, asn1Spec, **options)
+
+ if wrapType.isSameTypeWith(component):
+ substrate += chunk
+
+ else:
+ substrate += encodeFun(chunk, wrapType, **options)
+
+ if LOG:
+ LOG('wrapped with wrap type %r' % (wrapType,))
+
+ else:
+ substrate += encodeFun(component, asn1Spec, **options)
+
+ else:
+ # bare Python value + ASN.1 schema
+ for idx, namedType in enumerate(asn1Spec.componentType.namedTypes):
+
+ try:
+ component = value[namedType.name]
+
+ except KeyError:
+ raise error.PyAsn1Error('Component name "%s" not found in %r' % (
+ namedType.name, value))
+
+ if namedType.isOptional and namedType.name not in value:
+ if LOG:
+ LOG('not encoding OPTIONAL component %r' % (namedType,))
+ continue
+
+ if namedType.isDefaulted and component == namedType.asn1Object:
+ if LOG:
+ LOG('not encoding DEFAULT component %r' % (namedType,))
+ continue
+
+ if omitEmptyOptionals:
+ options.update(ifNotEmpty=namedType.isOptional)
+
+ componentSpec = namedType.asn1Object
+
+ # wrap open type blob if needed
+ if namedType.openType:
+
+ if componentSpec.typeId in (
+ univ.SetOf.typeId, univ.SequenceOf.typeId):
+
+ substrate += encodeFun(
+ component, componentSpec,
+ **dict(options, wrapType=componentSpec.componentType))
+
+ else:
+ chunk = encodeFun(component, componentSpec, **options)
+
+ if componentSpec.isSameTypeWith(component):
+ substrate += chunk
+
+ else:
+ substrate += encodeFun(chunk, componentSpec, **options)
+
+ if LOG:
+ LOG('wrapped with wrap type %r' % (componentSpec,))
+
+ else:
+ substrate += encodeFun(component, componentSpec, **options)
+
+ return substrate, True, True
+
+
+class SequenceOfEncoder(AbstractItemEncoder):
+ def _encodeComponents(self, value, asn1Spec, encodeFun, **options):
+
+ if asn1Spec is None:
+ inconsistency = value.isInconsistent
+ if inconsistency:
+ raise inconsistency
+
+ else:
+ asn1Spec = asn1Spec.componentType
+
+ chunks = []
+
+ wrapType = options.pop('wrapType', None)
+
+ for idx, component in enumerate(value):
+ chunk = encodeFun(component, asn1Spec, **options)
+
+ if (wrapType is not None and
+ not wrapType.isSameTypeWith(component)):
+ # wrap encoded value with wrapper container (e.g. ANY)
+ chunk = encodeFun(chunk, wrapType, **options)
+
+ if LOG:
+ LOG('wrapped with wrap type %r' % (wrapType,))
+
+ chunks.append(chunk)
+
+ return chunks
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ chunks = self._encodeComponents(
+ value, asn1Spec, encodeFun, **options)
+
+ return null.join(chunks), True, True
+
+
+class ChoiceEncoder(AbstractItemEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if asn1Spec is None:
+ component = value.getComponent()
+ else:
+ names = [namedType.name for namedType in asn1Spec.componentType.namedTypes
+ if namedType.name in value]
+ if len(names) != 1:
+ raise error.PyAsn1Error('%s components for Choice at %r' % (len(names) and 'Multiple ' or 'None ', value))
+
+ name = names[0]
+
+ component = value[name]
+ asn1Spec = asn1Spec[name]
+
+ return encodeFun(component, asn1Spec, **options), True, True
+
+
+class AnyEncoder(OctetStringEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if asn1Spec is None:
+ value = value.asOctets()
+ elif not isOctetsType(value):
+ value = asn1Spec.clone(value).asOctets()
+
+ return value, not options.get('defMode', True), True
+
+
+tagMap = {
+ eoo.endOfOctets.tagSet: EndOfOctetsEncoder(),
+ univ.Boolean.tagSet: BooleanEncoder(),
+ univ.Integer.tagSet: IntegerEncoder(),
+ univ.BitString.tagSet: BitStringEncoder(),
+ univ.OctetString.tagSet: OctetStringEncoder(),
+ univ.Null.tagSet: NullEncoder(),
+ univ.ObjectIdentifier.tagSet: ObjectIdentifierEncoder(),
+ univ.Enumerated.tagSet: IntegerEncoder(),
+ univ.Real.tagSet: RealEncoder(),
+ # Sequence & Set have same tags as SequenceOf & SetOf
+ univ.SequenceOf.tagSet: SequenceOfEncoder(),
+ univ.SetOf.tagSet: SequenceOfEncoder(),
+ univ.Choice.tagSet: ChoiceEncoder(),
+ # character string types
+ char.UTF8String.tagSet: OctetStringEncoder(),
+ char.NumericString.tagSet: OctetStringEncoder(),
+ char.PrintableString.tagSet: OctetStringEncoder(),
+ char.TeletexString.tagSet: OctetStringEncoder(),
+ char.VideotexString.tagSet: OctetStringEncoder(),
+ char.IA5String.tagSet: OctetStringEncoder(),
+ char.GraphicString.tagSet: OctetStringEncoder(),
+ char.VisibleString.tagSet: OctetStringEncoder(),
+ char.GeneralString.tagSet: OctetStringEncoder(),
+ char.UniversalString.tagSet: OctetStringEncoder(),
+ char.BMPString.tagSet: OctetStringEncoder(),
+ # useful types
+ useful.ObjectDescriptor.tagSet: OctetStringEncoder(),
+ useful.GeneralizedTime.tagSet: OctetStringEncoder(),
+ useful.UTCTime.tagSet: OctetStringEncoder()
+}
+
+# Put in ambiguous & non-ambiguous types for faster codec lookup
+typeMap = {
+ univ.Boolean.typeId: BooleanEncoder(),
+ univ.Integer.typeId: IntegerEncoder(),
+ univ.BitString.typeId: BitStringEncoder(),
+ univ.OctetString.typeId: OctetStringEncoder(),
+ univ.Null.typeId: NullEncoder(),
+ univ.ObjectIdentifier.typeId: ObjectIdentifierEncoder(),
+ univ.Enumerated.typeId: IntegerEncoder(),
+ univ.Real.typeId: RealEncoder(),
+ # Sequence & Set have same tags as SequenceOf & SetOf
+ univ.Set.typeId: SequenceEncoder(),
+ univ.SetOf.typeId: SequenceOfEncoder(),
+ univ.Sequence.typeId: SequenceEncoder(),
+ univ.SequenceOf.typeId: SequenceOfEncoder(),
+ univ.Choice.typeId: ChoiceEncoder(),
+ univ.Any.typeId: AnyEncoder(),
+ # character string types
+ char.UTF8String.typeId: OctetStringEncoder(),
+ char.NumericString.typeId: OctetStringEncoder(),
+ char.PrintableString.typeId: OctetStringEncoder(),
+ char.TeletexString.typeId: OctetStringEncoder(),
+ char.VideotexString.typeId: OctetStringEncoder(),
+ char.IA5String.typeId: OctetStringEncoder(),
+ char.GraphicString.typeId: OctetStringEncoder(),
+ char.VisibleString.typeId: OctetStringEncoder(),
+ char.GeneralString.typeId: OctetStringEncoder(),
+ char.UniversalString.typeId: OctetStringEncoder(),
+ char.BMPString.typeId: OctetStringEncoder(),
+ # useful types
+ useful.ObjectDescriptor.typeId: OctetStringEncoder(),
+ useful.GeneralizedTime.typeId: OctetStringEncoder(),
+ useful.UTCTime.typeId: OctetStringEncoder()
+}
+
+
+class Encoder(object):
+ fixedDefLengthMode = None
+ fixedChunkSize = None
+
+ # noinspection PyDefaultArgument
+ def __init__(self, tagMap, typeMap={}):
+ self.__tagMap = tagMap
+ self.__typeMap = typeMap
+
+ def __call__(self, value, asn1Spec=None, **options):
+ try:
+ if asn1Spec is None:
+ typeId = value.typeId
+ else:
+ typeId = asn1Spec.typeId
+
+ except AttributeError:
+ raise error.PyAsn1Error('Value %r is not ASN.1 type instance '
+ 'and "asn1Spec" not given' % (value,))
+
+ if LOG:
+ LOG('encoder called in %sdef mode, chunk size %s for '
+ 'type %s, value:\n%s' % (not options.get('defMode', True) and 'in' or '', options.get('maxChunkSize', 0), asn1Spec is None and value.prettyPrintType() or asn1Spec.prettyPrintType(), value))
+
+ if self.fixedDefLengthMode is not None:
+ options.update(defMode=self.fixedDefLengthMode)
+
+ if self.fixedChunkSize is not None:
+ options.update(maxChunkSize=self.fixedChunkSize)
+
+
+ try:
+ concreteEncoder = self.__typeMap[typeId]
+
+ if LOG:
+ LOG('using value codec %s chosen by type ID %s' % (concreteEncoder.__class__.__name__, typeId))
+
+ except KeyError:
+ if asn1Spec is None:
+ tagSet = value.tagSet
+ else:
+ tagSet = asn1Spec.tagSet
+
+ # use base type for codec lookup to recover untagged types
+ baseTagSet = tag.TagSet(tagSet.baseTag, tagSet.baseTag)
+
+ try:
+ concreteEncoder = self.__tagMap[baseTagSet]
+
+ except KeyError:
+ raise error.PyAsn1Error('No encoder for %r (%s)' % (value, tagSet))
+
+ if LOG:
+ LOG('using value codec %s chosen by tagSet %s' % (concreteEncoder.__class__.__name__, tagSet))
+
+ substrate = concreteEncoder.encode(value, asn1Spec, self, **options)
+
+ if LOG:
+ LOG('codec %s built %s octets of substrate: %s\nencoder completed' % (concreteEncoder, len(substrate), debug.hexdump(substrate)))
+
+ return substrate
+
+#: Turns ASN.1 object into BER octet stream.
+#:
+#: Takes any ASN.1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: walks all its components recursively and produces a BER octet stream.
+#:
+#: Parameters
+#: ----------
+#: value: either a Python or pyasn1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: A Python or pyasn1 object to encode. If Python object is given, `asnSpec`
+#: parameter is required to guide the encoding process.
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec:
+#: Optional ASN.1 schema or value object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#:
+#: defMode: :py:class:`bool`
+#: If :obj:`False`, produces indefinite length encoding
+#:
+#: maxChunkSize: :py:class:`int`
+#: Maximum chunk size in chunked encoding mode (0 denotes unlimited chunk size)
+#:
+#: Returns
+#: -------
+#: : :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
+#: Given ASN.1 object encoded into BER octetstream
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error
+#: On encoding errors
+#:
+#: Examples
+#: --------
+#: Encode Python value into BER with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> encode([1, 2, 3], asn1Spec=seq)
+#: b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03'
+#:
+#: Encode ASN.1 value object into BER
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> seq.extend([1, 2, 3])
+#: >>> encode(seq)
+#: b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03'
+#:
+encode = Encoder(tagMap, typeMap)
diff --git a/third_party/python/pyasn1/pyasn1/codec/ber/eoo.py b/third_party/python/pyasn1/pyasn1/codec/ber/eoo.py
new file mode 100644
index 0000000000..48eb859e97
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/codec/ber/eoo.py
@@ -0,0 +1,28 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+from pyasn1.type import base
+from pyasn1.type import tag
+
+__all__ = ['endOfOctets']
+
+
+class EndOfOctets(base.SimpleAsn1Type):
+ defaultValue = 0
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x00)
+ )
+
+ _instance = None
+
+ def __new__(cls, *args, **kwargs):
+ if cls._instance is None:
+ cls._instance = object.__new__(cls, *args, **kwargs)
+
+ return cls._instance
+
+
+endOfOctets = EndOfOctets()
diff --git a/third_party/python/pyasn1/pyasn1/codec/cer/__init__.py b/third_party/python/pyasn1/pyasn1/codec/cer/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/codec/cer/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/third_party/python/pyasn1/pyasn1/codec/cer/decoder.py b/third_party/python/pyasn1/pyasn1/codec/cer/decoder.py
new file mode 100644
index 0000000000..3e86fd0bc1
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/codec/cer/decoder.py
@@ -0,0 +1,114 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+from pyasn1 import error
+from pyasn1.codec.ber import decoder
+from pyasn1.compat.octets import oct2int
+from pyasn1.type import univ
+
+__all__ = ['decode']
+
+
+class BooleanDecoder(decoder.AbstractSimpleDecoder):
+ protoComponent = univ.Boolean(0)
+
+ def valueDecoder(self, substrate, asn1Spec,
+ tagSet=None, length=None, state=None,
+ decodeFun=None, substrateFun=None,
+ **options):
+ head, tail = substrate[:length], substrate[length:]
+ if not head or length != 1:
+ raise error.PyAsn1Error('Not single-octet Boolean payload')
+ byte = oct2int(head[0])
+ # CER/DER specifies encoding of TRUE as 0xFF and FALSE as 0x0, while
+ # BER allows any non-zero value as TRUE; cf. sections 8.2.2. and 11.1
+ # in https://www.itu.int/ITU-T/studygroups/com17/languages/X.690-0207.pdf
+ if byte == 0xff:
+ value = 1
+ elif byte == 0x00:
+ value = 0
+ else:
+ raise error.PyAsn1Error('Unexpected Boolean payload: %s' % byte)
+ return self._createComponent(asn1Spec, tagSet, value, **options), tail
+
+# TODO: prohibit non-canonical encoding
+BitStringDecoder = decoder.BitStringDecoder
+OctetStringDecoder = decoder.OctetStringDecoder
+RealDecoder = decoder.RealDecoder
+
+tagMap = decoder.tagMap.copy()
+tagMap.update(
+ {univ.Boolean.tagSet: BooleanDecoder(),
+ univ.BitString.tagSet: BitStringDecoder(),
+ univ.OctetString.tagSet: OctetStringDecoder(),
+ univ.Real.tagSet: RealDecoder()}
+)
+
+typeMap = decoder.typeMap.copy()
+
+# Put in non-ambiguous types for faster codec lookup
+for typeDecoder in tagMap.values():
+ if typeDecoder.protoComponent is not None:
+ typeId = typeDecoder.protoComponent.__class__.typeId
+ if typeId is not None and typeId not in typeMap:
+ typeMap[typeId] = typeDecoder
+
+
+class Decoder(decoder.Decoder):
+ pass
+
+
+#: Turns CER octet stream into an ASN.1 object.
+#:
+#: Takes CER octet-stream and decode it into an ASN.1 object
+#: (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which
+#: may be a scalar or an arbitrary nested structure.
+#:
+#: Parameters
+#: ----------
+#: substrate: :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
+#: CER octet-stream
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec: any pyasn1 type object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#: A pyasn1 type object to act as a template guiding the decoder. Depending on the ASN.1 structure
+#: being decoded, *asn1Spec* may or may not be required. Most common reason for
+#: it to require is that ASN.1 structure is encoded in *IMPLICIT* tagging mode.
+#:
+#: Returns
+#: -------
+#: : :py:class:`tuple`
+#: A tuple of pyasn1 object recovered from CER substrate (:py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: and the unprocessed trailing portion of the *substrate* (may be empty)
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error, ~pyasn1.error.SubstrateUnderrunError
+#: On decoding errors
+#:
+#: Examples
+#: --------
+#: Decode CER serialisation without ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> s, _ = decode(b'0\x80\x02\x01\x01\x02\x01\x02\x02\x01\x03\x00\x00')
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+#: Decode CER serialisation with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> s, _ = decode(b'0\x80\x02\x01\x01\x02\x01\x02\x02\x01\x03\x00\x00', asn1Spec=seq)
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+decode = Decoder(tagMap, decoder.typeMap)
diff --git a/third_party/python/pyasn1/pyasn1/codec/cer/encoder.py b/third_party/python/pyasn1/pyasn1/codec/cer/encoder.py
new file mode 100644
index 0000000000..935b696561
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/codec/cer/encoder.py
@@ -0,0 +1,313 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+from pyasn1 import error
+from pyasn1.codec.ber import encoder
+from pyasn1.compat.octets import str2octs, null
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+__all__ = ['encode']
+
+
+class BooleanEncoder(encoder.IntegerEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ if value == 0:
+ substrate = (0,)
+ else:
+ substrate = (255,)
+ return substrate, False, False
+
+
+class RealEncoder(encoder.RealEncoder):
+ def _chooseEncBase(self, value):
+ m, b, e = value
+ return self._dropFloatingPoint(m, b, e)
+
+
+# specialized GeneralStringEncoder here
+
+class TimeEncoderMixIn(object):
+ Z_CHAR = ord('Z')
+ PLUS_CHAR = ord('+')
+ MINUS_CHAR = ord('-')
+ COMMA_CHAR = ord(',')
+ DOT_CHAR = ord('.')
+ ZERO_CHAR = ord('0')
+
+ MIN_LENGTH = 12
+ MAX_LENGTH = 19
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ # CER encoding constraints:
+ # - minutes are mandatory, seconds are optional
+ # - sub-seconds must NOT be zero / no meaningless zeros
+ # - no hanging fraction dot
+ # - time in UTC (Z)
+ # - only dot is allowed for fractions
+
+ if asn1Spec is not None:
+ value = asn1Spec.clone(value)
+
+ numbers = value.asNumbers()
+
+ if self.PLUS_CHAR in numbers or self.MINUS_CHAR in numbers:
+ raise error.PyAsn1Error('Must be UTC time: %r' % value)
+
+ if numbers[-1] != self.Z_CHAR:
+ raise error.PyAsn1Error('Missing "Z" time zone specifier: %r' % value)
+
+ if self.COMMA_CHAR in numbers:
+ raise error.PyAsn1Error('Comma in fractions disallowed: %r' % value)
+
+ if self.DOT_CHAR in numbers:
+
+ isModified = False
+
+ numbers = list(numbers)
+
+ searchIndex = min(numbers.index(self.DOT_CHAR) + 4, len(numbers) - 1)
+
+ while numbers[searchIndex] != self.DOT_CHAR:
+ if numbers[searchIndex] == self.ZERO_CHAR:
+ del numbers[searchIndex]
+ isModified = True
+
+ searchIndex -= 1
+
+ searchIndex += 1
+
+ if searchIndex < len(numbers):
+ if numbers[searchIndex] == self.Z_CHAR:
+ # drop hanging comma
+ del numbers[searchIndex - 1]
+ isModified = True
+
+ if isModified:
+ value = value.clone(numbers)
+
+ if not self.MIN_LENGTH < len(numbers) < self.MAX_LENGTH:
+ raise error.PyAsn1Error('Length constraint violated: %r' % value)
+
+ options.update(maxChunkSize=1000)
+
+ return encoder.OctetStringEncoder.encodeValue(
+ self, value, asn1Spec, encodeFun, **options
+ )
+
+
+class GeneralizedTimeEncoder(TimeEncoderMixIn, encoder.OctetStringEncoder):
+ MIN_LENGTH = 12
+ MAX_LENGTH = 20
+
+
+class UTCTimeEncoder(TimeEncoderMixIn, encoder.OctetStringEncoder):
+ MIN_LENGTH = 10
+ MAX_LENGTH = 14
+
+
+class SetOfEncoder(encoder.SequenceOfEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+ chunks = self._encodeComponents(
+ value, asn1Spec, encodeFun, **options)
+
+ # sort by serialised and padded components
+ if len(chunks) > 1:
+ zero = str2octs('\x00')
+ maxLen = max(map(len, chunks))
+ paddedChunks = [
+ (x.ljust(maxLen, zero), x) for x in chunks
+ ]
+ paddedChunks.sort(key=lambda x: x[0])
+
+ chunks = [x[1] for x in paddedChunks]
+
+ return null.join(chunks), True, True
+
+
+class SequenceOfEncoder(encoder.SequenceOfEncoder):
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+
+ if options.get('ifNotEmpty', False) and not len(value):
+ return null, True, True
+
+ chunks = self._encodeComponents(
+ value, asn1Spec, encodeFun, **options)
+
+ return null.join(chunks), True, True
+
+
+class SetEncoder(encoder.SequenceEncoder):
+ @staticmethod
+ def _componentSortKey(componentAndType):
+ """Sort SET components by tag
+
+ Sort regardless of the Choice value (static sort)
+ """
+ component, asn1Spec = componentAndType
+
+ if asn1Spec is None:
+ asn1Spec = component
+
+ if asn1Spec.typeId == univ.Choice.typeId and not asn1Spec.tagSet:
+ if asn1Spec.tagSet:
+ return asn1Spec.tagSet
+ else:
+ return asn1Spec.componentType.minTagSet
+ else:
+ return asn1Spec.tagSet
+
+ def encodeValue(self, value, asn1Spec, encodeFun, **options):
+
+ substrate = null
+
+ comps = []
+ compsMap = {}
+
+ if asn1Spec is None:
+ # instance of ASN.1 schema
+ inconsistency = value.isInconsistent
+ if inconsistency:
+ raise inconsistency
+
+ namedTypes = value.componentType
+
+ for idx, component in enumerate(value.values()):
+ if namedTypes:
+ namedType = namedTypes[idx]
+
+ if namedType.isOptional and not component.isValue:
+ continue
+
+ if namedType.isDefaulted and component == namedType.asn1Object:
+ continue
+
+ compsMap[id(component)] = namedType
+
+ else:
+ compsMap[id(component)] = None
+
+ comps.append((component, asn1Spec))
+
+ else:
+ # bare Python value + ASN.1 schema
+ for idx, namedType in enumerate(asn1Spec.componentType.namedTypes):
+
+ try:
+ component = value[namedType.name]
+
+ except KeyError:
+ raise error.PyAsn1Error('Component name "%s" not found in %r' % (namedType.name, value))
+
+ if namedType.isOptional and namedType.name not in value:
+ continue
+
+ if namedType.isDefaulted and component == namedType.asn1Object:
+ continue
+
+ compsMap[id(component)] = namedType
+ comps.append((component, asn1Spec[idx]))
+
+ for comp, compType in sorted(comps, key=self._componentSortKey):
+ namedType = compsMap[id(comp)]
+
+ if namedType:
+ options.update(ifNotEmpty=namedType.isOptional)
+
+ chunk = encodeFun(comp, compType, **options)
+
+ # wrap open type blob if needed
+ if namedType and namedType.openType:
+ wrapType = namedType.asn1Object
+ if wrapType.tagSet and not wrapType.isSameTypeWith(comp):
+ chunk = encodeFun(chunk, wrapType, **options)
+
+ substrate += chunk
+
+ return substrate, True, True
+
+
+class SequenceEncoder(encoder.SequenceEncoder):
+ omitEmptyOptionals = True
+
+
+tagMap = encoder.tagMap.copy()
+tagMap.update({
+ univ.Boolean.tagSet: BooleanEncoder(),
+ univ.Real.tagSet: RealEncoder(),
+ useful.GeneralizedTime.tagSet: GeneralizedTimeEncoder(),
+ useful.UTCTime.tagSet: UTCTimeEncoder(),
+ # Sequence & Set have same tags as SequenceOf & SetOf
+ univ.SetOf.tagSet: SetOfEncoder(),
+ univ.Sequence.typeId: SequenceEncoder()
+})
+
+typeMap = encoder.typeMap.copy()
+typeMap.update({
+ univ.Boolean.typeId: BooleanEncoder(),
+ univ.Real.typeId: RealEncoder(),
+ useful.GeneralizedTime.typeId: GeneralizedTimeEncoder(),
+ useful.UTCTime.typeId: UTCTimeEncoder(),
+ # Sequence & Set have same tags as SequenceOf & SetOf
+ univ.Set.typeId: SetEncoder(),
+ univ.SetOf.typeId: SetOfEncoder(),
+ univ.Sequence.typeId: SequenceEncoder(),
+ univ.SequenceOf.typeId: SequenceOfEncoder()
+})
+
+
+class Encoder(encoder.Encoder):
+ fixedDefLengthMode = False
+ fixedChunkSize = 1000
+
+#: Turns ASN.1 object into CER octet stream.
+#:
+#: Takes any ASN.1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: walks all its components recursively and produces a CER octet stream.
+#:
+#: Parameters
+#: ----------
+#: value: either a Python or pyasn1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: A Python or pyasn1 object to encode. If Python object is given, `asnSpec`
+#: parameter is required to guide the encoding process.
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec:
+#: Optional ASN.1 schema or value object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#:
+#: Returns
+#: -------
+#: : :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
+#: Given ASN.1 object encoded into BER octet-stream
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error
+#: On encoding errors
+#:
+#: Examples
+#: --------
+#: Encode Python value into CER with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> encode([1, 2, 3], asn1Spec=seq)
+#: b'0\x80\x02\x01\x01\x02\x01\x02\x02\x01\x03\x00\x00'
+#:
+#: Encode ASN.1 value object into CER
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> seq.extend([1, 2, 3])
+#: >>> encode(seq)
+#: b'0\x80\x02\x01\x01\x02\x01\x02\x02\x01\x03\x00\x00'
+#:
+encode = Encoder(tagMap, typeMap)
+
+# EncoderFactory queries class instance and builds a map of tags -> encoders
diff --git a/third_party/python/pyasn1/pyasn1/codec/der/__init__.py b/third_party/python/pyasn1/pyasn1/codec/der/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/codec/der/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/third_party/python/pyasn1/pyasn1/codec/der/decoder.py b/third_party/python/pyasn1/pyasn1/codec/der/decoder.py
new file mode 100644
index 0000000000..1a13fdb5be
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/codec/der/decoder.py
@@ -0,0 +1,94 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+from pyasn1.codec.cer import decoder
+from pyasn1.type import univ
+
+__all__ = ['decode']
+
+
+class BitStringDecoder(decoder.BitStringDecoder):
+ supportConstructedForm = False
+
+
+class OctetStringDecoder(decoder.OctetStringDecoder):
+ supportConstructedForm = False
+
+# TODO: prohibit non-canonical encoding
+RealDecoder = decoder.RealDecoder
+
+tagMap = decoder.tagMap.copy()
+tagMap.update(
+ {univ.BitString.tagSet: BitStringDecoder(),
+ univ.OctetString.tagSet: OctetStringDecoder(),
+ univ.Real.tagSet: RealDecoder()}
+)
+
+typeMap = decoder.typeMap.copy()
+
+# Put in non-ambiguous types for faster codec lookup
+for typeDecoder in tagMap.values():
+ if typeDecoder.protoComponent is not None:
+ typeId = typeDecoder.protoComponent.__class__.typeId
+ if typeId is not None and typeId not in typeMap:
+ typeMap[typeId] = typeDecoder
+
+
+class Decoder(decoder.Decoder):
+ supportIndefLength = False
+
+
+#: Turns DER octet stream into an ASN.1 object.
+#:
+#: Takes DER octet-stream and decode it into an ASN.1 object
+#: (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which
+#: may be a scalar or an arbitrary nested structure.
+#:
+#: Parameters
+#: ----------
+#: substrate: :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
+#: DER octet-stream
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec: any pyasn1 type object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#: A pyasn1 type object to act as a template guiding the decoder. Depending on the ASN.1 structure
+#: being decoded, *asn1Spec* may or may not be required. Most common reason for
+#: it to require is that ASN.1 structure is encoded in *IMPLICIT* tagging mode.
+#:
+#: Returns
+#: -------
+#: : :py:class:`tuple`
+#: A tuple of pyasn1 object recovered from DER substrate (:py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: and the unprocessed trailing portion of the *substrate* (may be empty)
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error, ~pyasn1.error.SubstrateUnderrunError
+#: On decoding errors
+#:
+#: Examples
+#: --------
+#: Decode DER serialisation without ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> s, _ = decode(b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03')
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+#: Decode DER serialisation with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> s, _ = decode(b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03', asn1Spec=seq)
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+decode = Decoder(tagMap, typeMap)
diff --git a/third_party/python/pyasn1/pyasn1/codec/der/encoder.py b/third_party/python/pyasn1/pyasn1/codec/der/encoder.py
new file mode 100644
index 0000000000..90e982daa4
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/codec/der/encoder.py
@@ -0,0 +1,107 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+from pyasn1 import error
+from pyasn1.codec.cer import encoder
+from pyasn1.type import univ
+
+__all__ = ['encode']
+
+
+class SetEncoder(encoder.SetEncoder):
+ @staticmethod
+ def _componentSortKey(componentAndType):
+ """Sort SET components by tag
+
+ Sort depending on the actual Choice value (dynamic sort)
+ """
+ component, asn1Spec = componentAndType
+
+ if asn1Spec is None:
+ compType = component
+ else:
+ compType = asn1Spec
+
+ if compType.typeId == univ.Choice.typeId and not compType.tagSet:
+ if asn1Spec is None:
+ return component.getComponent().tagSet
+ else:
+ # TODO: move out of sorting key function
+ names = [namedType.name for namedType in asn1Spec.componentType.namedTypes
+ if namedType.name in component]
+ if len(names) != 1:
+ raise error.PyAsn1Error(
+ '%s components for Choice at %r' % (len(names) and 'Multiple ' or 'None ', component))
+
+ # TODO: support nested CHOICE ordering
+ return asn1Spec[names[0]].tagSet
+
+ else:
+ return compType.tagSet
+
+tagMap = encoder.tagMap.copy()
+tagMap.update({
+ # Set & SetOf have same tags
+ univ.Set.tagSet: SetEncoder()
+})
+
+typeMap = encoder.typeMap.copy()
+typeMap.update({
+ # Set & SetOf have same tags
+ univ.Set.typeId: SetEncoder()
+})
+
+
+class Encoder(encoder.Encoder):
+ fixedDefLengthMode = True
+ fixedChunkSize = 0
+
+#: Turns ASN.1 object into DER octet stream.
+#:
+#: Takes any ASN.1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: walks all its components recursively and produces a DER octet stream.
+#:
+#: Parameters
+#: ----------
+#: value: either a Python or pyasn1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: A Python or pyasn1 object to encode. If Python object is given, `asnSpec`
+#: parameter is required to guide the encoding process.
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec:
+#: Optional ASN.1 schema or value object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#:
+#: Returns
+#: -------
+#: : :py:class:`bytes` (Python 3) or :py:class:`str` (Python 2)
+#: Given ASN.1 object encoded into BER octet-stream
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error
+#: On encoding errors
+#:
+#: Examples
+#: --------
+#: Encode Python value into DER with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> encode([1, 2, 3], asn1Spec=seq)
+#: b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03'
+#:
+#: Encode ASN.1 value object into DER
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> seq.extend([1, 2, 3])
+#: >>> encode(seq)
+#: b'0\t\x02\x01\x01\x02\x01\x02\x02\x01\x03'
+#:
+encode = Encoder(tagMap, typeMap)
diff --git a/third_party/python/pyasn1/pyasn1/codec/native/__init__.py b/third_party/python/pyasn1/pyasn1/codec/native/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/codec/native/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/third_party/python/pyasn1/pyasn1/codec/native/decoder.py b/third_party/python/pyasn1/pyasn1/codec/native/decoder.py
new file mode 100644
index 0000000000..104b92e6d3
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/codec/native/decoder.py
@@ -0,0 +1,213 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+from pyasn1 import debug
+from pyasn1 import error
+from pyasn1.type import base
+from pyasn1.type import char
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+__all__ = ['decode']
+
+LOG = debug.registerLoggee(__name__, flags=debug.DEBUG_DECODER)
+
+
+class AbstractScalarDecoder(object):
+ def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
+ return asn1Spec.clone(pyObject)
+
+
+class BitStringDecoder(AbstractScalarDecoder):
+ def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
+ return asn1Spec.clone(univ.BitString.fromBinaryString(pyObject))
+
+
+class SequenceOrSetDecoder(object):
+ def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
+ asn1Value = asn1Spec.clone()
+
+ componentsTypes = asn1Spec.componentType
+
+ for field in asn1Value:
+ if field in pyObject:
+ asn1Value[field] = decodeFun(pyObject[field], componentsTypes[field].asn1Object, **options)
+
+ return asn1Value
+
+
+class SequenceOfOrSetOfDecoder(object):
+ def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
+ asn1Value = asn1Spec.clone()
+
+ for pyValue in pyObject:
+ asn1Value.append(decodeFun(pyValue, asn1Spec.componentType), **options)
+
+ return asn1Value
+
+
+class ChoiceDecoder(object):
+ def __call__(self, pyObject, asn1Spec, decodeFun=None, **options):
+ asn1Value = asn1Spec.clone()
+
+ componentsTypes = asn1Spec.componentType
+
+ for field in pyObject:
+ if field in componentsTypes:
+ asn1Value[field] = decodeFun(pyObject[field], componentsTypes[field].asn1Object, **options)
+ break
+
+ return asn1Value
+
+
+tagMap = {
+ univ.Integer.tagSet: AbstractScalarDecoder(),
+ univ.Boolean.tagSet: AbstractScalarDecoder(),
+ univ.BitString.tagSet: BitStringDecoder(),
+ univ.OctetString.tagSet: AbstractScalarDecoder(),
+ univ.Null.tagSet: AbstractScalarDecoder(),
+ univ.ObjectIdentifier.tagSet: AbstractScalarDecoder(),
+ univ.Enumerated.tagSet: AbstractScalarDecoder(),
+ univ.Real.tagSet: AbstractScalarDecoder(),
+ univ.Sequence.tagSet: SequenceOrSetDecoder(), # conflicts with SequenceOf
+ univ.Set.tagSet: SequenceOrSetDecoder(), # conflicts with SetOf
+ univ.Choice.tagSet: ChoiceDecoder(), # conflicts with Any
+ # character string types
+ char.UTF8String.tagSet: AbstractScalarDecoder(),
+ char.NumericString.tagSet: AbstractScalarDecoder(),
+ char.PrintableString.tagSet: AbstractScalarDecoder(),
+ char.TeletexString.tagSet: AbstractScalarDecoder(),
+ char.VideotexString.tagSet: AbstractScalarDecoder(),
+ char.IA5String.tagSet: AbstractScalarDecoder(),
+ char.GraphicString.tagSet: AbstractScalarDecoder(),
+ char.VisibleString.tagSet: AbstractScalarDecoder(),
+ char.GeneralString.tagSet: AbstractScalarDecoder(),
+ char.UniversalString.tagSet: AbstractScalarDecoder(),
+ char.BMPString.tagSet: AbstractScalarDecoder(),
+ # useful types
+ useful.ObjectDescriptor.tagSet: AbstractScalarDecoder(),
+ useful.GeneralizedTime.tagSet: AbstractScalarDecoder(),
+ useful.UTCTime.tagSet: AbstractScalarDecoder()
+}
+
+# Put in ambiguous & non-ambiguous types for faster codec lookup
+typeMap = {
+ univ.Integer.typeId: AbstractScalarDecoder(),
+ univ.Boolean.typeId: AbstractScalarDecoder(),
+ univ.BitString.typeId: BitStringDecoder(),
+ univ.OctetString.typeId: AbstractScalarDecoder(),
+ univ.Null.typeId: AbstractScalarDecoder(),
+ univ.ObjectIdentifier.typeId: AbstractScalarDecoder(),
+ univ.Enumerated.typeId: AbstractScalarDecoder(),
+ univ.Real.typeId: AbstractScalarDecoder(),
+ # ambiguous base types
+ univ.Set.typeId: SequenceOrSetDecoder(),
+ univ.SetOf.typeId: SequenceOfOrSetOfDecoder(),
+ univ.Sequence.typeId: SequenceOrSetDecoder(),
+ univ.SequenceOf.typeId: SequenceOfOrSetOfDecoder(),
+ univ.Choice.typeId: ChoiceDecoder(),
+ univ.Any.typeId: AbstractScalarDecoder(),
+ # character string types
+ char.UTF8String.typeId: AbstractScalarDecoder(),
+ char.NumericString.typeId: AbstractScalarDecoder(),
+ char.PrintableString.typeId: AbstractScalarDecoder(),
+ char.TeletexString.typeId: AbstractScalarDecoder(),
+ char.VideotexString.typeId: AbstractScalarDecoder(),
+ char.IA5String.typeId: AbstractScalarDecoder(),
+ char.GraphicString.typeId: AbstractScalarDecoder(),
+ char.VisibleString.typeId: AbstractScalarDecoder(),
+ char.GeneralString.typeId: AbstractScalarDecoder(),
+ char.UniversalString.typeId: AbstractScalarDecoder(),
+ char.BMPString.typeId: AbstractScalarDecoder(),
+ # useful types
+ useful.ObjectDescriptor.typeId: AbstractScalarDecoder(),
+ useful.GeneralizedTime.typeId: AbstractScalarDecoder(),
+ useful.UTCTime.typeId: AbstractScalarDecoder()
+}
+
+
+class Decoder(object):
+
+ # noinspection PyDefaultArgument
+ def __init__(self, tagMap, typeMap):
+ self.__tagMap = tagMap
+ self.__typeMap = typeMap
+
+ def __call__(self, pyObject, asn1Spec, **options):
+
+ if LOG:
+ debug.scope.push(type(pyObject).__name__)
+ LOG('decoder called at scope %s, working with type %s' % (debug.scope, type(pyObject).__name__))
+
+ if asn1Spec is None or not isinstance(asn1Spec, base.Asn1Item):
+ raise error.PyAsn1Error('asn1Spec is not valid (should be an instance of an ASN.1 Item, not %s)' % asn1Spec.__class__.__name__)
+
+ try:
+ valueDecoder = self.__typeMap[asn1Spec.typeId]
+
+ except KeyError:
+ # use base type for codec lookup to recover untagged types
+ baseTagSet = tag.TagSet(asn1Spec.tagSet.baseTag, asn1Spec.tagSet.baseTag)
+
+ try:
+ valueDecoder = self.__tagMap[baseTagSet]
+ except KeyError:
+ raise error.PyAsn1Error('Unknown ASN.1 tag %s' % asn1Spec.tagSet)
+
+ if LOG:
+ LOG('calling decoder %s on Python type %s <%s>' % (type(valueDecoder).__name__, type(pyObject).__name__, repr(pyObject)))
+
+ value = valueDecoder(pyObject, asn1Spec, self, **options)
+
+ if LOG:
+ LOG('decoder %s produced ASN.1 type %s <%s>' % (type(valueDecoder).__name__, type(value).__name__, repr(value)))
+ debug.scope.pop()
+
+ return value
+
+
+#: Turns Python objects of built-in types into ASN.1 objects.
+#:
+#: Takes Python objects of built-in types and turns them into a tree of
+#: ASN.1 objects (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative) which
+#: may be a scalar or an arbitrary nested structure.
+#:
+#: Parameters
+#: ----------
+#: pyObject: :py:class:`object`
+#: A scalar or nested Python objects
+#:
+#: Keyword Args
+#: ------------
+#: asn1Spec: any pyasn1 type object e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#: A pyasn1 type object to act as a template guiding the decoder. It is required
+#: for successful interpretation of Python objects mapping into their ASN.1
+#: representations.
+#:
+#: Returns
+#: -------
+#: : :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+#: A scalar or constructed pyasn1 object
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error
+#: On decoding errors
+#:
+#: Examples
+#: --------
+#: Decode native Python object into ASN.1 objects with ASN.1 schema
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> s, _ = decode([1, 2, 3], asn1Spec=seq)
+#: >>> str(s)
+#: SequenceOf:
+#: 1 2 3
+#:
+decode = Decoder(tagMap, typeMap)
diff --git a/third_party/python/pyasn1/pyasn1/codec/native/encoder.py b/third_party/python/pyasn1/pyasn1/codec/native/encoder.py
new file mode 100644
index 0000000000..4318abde6f
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/codec/native/encoder.py
@@ -0,0 +1,256 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+try:
+ from collections import OrderedDict
+
+except ImportError:
+ OrderedDict = dict
+
+from pyasn1 import debug
+from pyasn1 import error
+from pyasn1.type import base
+from pyasn1.type import char
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+__all__ = ['encode']
+
+LOG = debug.registerLoggee(__name__, flags=debug.DEBUG_ENCODER)
+
+
+class AbstractItemEncoder(object):
+ def encode(self, value, encodeFun, **options):
+ raise error.PyAsn1Error('Not implemented')
+
+
+class BooleanEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return bool(value)
+
+
+class IntegerEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return int(value)
+
+
+class BitStringEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return str(value)
+
+
+class OctetStringEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return value.asOctets()
+
+
+class TextStringEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return str(value)
+
+
+class NullEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return None
+
+
+class ObjectIdentifierEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return str(value)
+
+
+class RealEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return float(value)
+
+
+class SetEncoder(AbstractItemEncoder):
+ protoDict = dict
+
+ def encode(self, value, encodeFun, **options):
+ inconsistency = value.isInconsistent
+ if inconsistency:
+ raise inconsistency
+
+ namedTypes = value.componentType
+ substrate = self.protoDict()
+
+ for idx, (key, subValue) in enumerate(value.items()):
+ if namedTypes and namedTypes[idx].isOptional and not value[idx].isValue:
+ continue
+ substrate[key] = encodeFun(subValue, **options)
+ return substrate
+
+
+class SequenceEncoder(SetEncoder):
+ protoDict = OrderedDict
+
+
+class SequenceOfEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ inconsistency = value.isInconsistent
+ if inconsistency:
+ raise inconsistency
+ return [encodeFun(x, **options) for x in value]
+
+
+class ChoiceEncoder(SequenceEncoder):
+ pass
+
+
+class AnyEncoder(AbstractItemEncoder):
+ def encode(self, value, encodeFun, **options):
+ return value.asOctets()
+
+
+tagMap = {
+ univ.Boolean.tagSet: BooleanEncoder(),
+ univ.Integer.tagSet: IntegerEncoder(),
+ univ.BitString.tagSet: BitStringEncoder(),
+ univ.OctetString.tagSet: OctetStringEncoder(),
+ univ.Null.tagSet: NullEncoder(),
+ univ.ObjectIdentifier.tagSet: ObjectIdentifierEncoder(),
+ univ.Enumerated.tagSet: IntegerEncoder(),
+ univ.Real.tagSet: RealEncoder(),
+ # Sequence & Set have same tags as SequenceOf & SetOf
+ univ.SequenceOf.tagSet: SequenceOfEncoder(),
+ univ.SetOf.tagSet: SequenceOfEncoder(),
+ univ.Choice.tagSet: ChoiceEncoder(),
+ # character string types
+ char.UTF8String.tagSet: TextStringEncoder(),
+ char.NumericString.tagSet: TextStringEncoder(),
+ char.PrintableString.tagSet: TextStringEncoder(),
+ char.TeletexString.tagSet: TextStringEncoder(),
+ char.VideotexString.tagSet: TextStringEncoder(),
+ char.IA5String.tagSet: TextStringEncoder(),
+ char.GraphicString.tagSet: TextStringEncoder(),
+ char.VisibleString.tagSet: TextStringEncoder(),
+ char.GeneralString.tagSet: TextStringEncoder(),
+ char.UniversalString.tagSet: TextStringEncoder(),
+ char.BMPString.tagSet: TextStringEncoder(),
+ # useful types
+ useful.ObjectDescriptor.tagSet: OctetStringEncoder(),
+ useful.GeneralizedTime.tagSet: OctetStringEncoder(),
+ useful.UTCTime.tagSet: OctetStringEncoder()
+}
+
+
+# Put in ambiguous & non-ambiguous types for faster codec lookup
+typeMap = {
+ univ.Boolean.typeId: BooleanEncoder(),
+ univ.Integer.typeId: IntegerEncoder(),
+ univ.BitString.typeId: BitStringEncoder(),
+ univ.OctetString.typeId: OctetStringEncoder(),
+ univ.Null.typeId: NullEncoder(),
+ univ.ObjectIdentifier.typeId: ObjectIdentifierEncoder(),
+ univ.Enumerated.typeId: IntegerEncoder(),
+ univ.Real.typeId: RealEncoder(),
+ # Sequence & Set have same tags as SequenceOf & SetOf
+ univ.Set.typeId: SetEncoder(),
+ univ.SetOf.typeId: SequenceOfEncoder(),
+ univ.Sequence.typeId: SequenceEncoder(),
+ univ.SequenceOf.typeId: SequenceOfEncoder(),
+ univ.Choice.typeId: ChoiceEncoder(),
+ univ.Any.typeId: AnyEncoder(),
+ # character string types
+ char.UTF8String.typeId: OctetStringEncoder(),
+ char.NumericString.typeId: OctetStringEncoder(),
+ char.PrintableString.typeId: OctetStringEncoder(),
+ char.TeletexString.typeId: OctetStringEncoder(),
+ char.VideotexString.typeId: OctetStringEncoder(),
+ char.IA5String.typeId: OctetStringEncoder(),
+ char.GraphicString.typeId: OctetStringEncoder(),
+ char.VisibleString.typeId: OctetStringEncoder(),
+ char.GeneralString.typeId: OctetStringEncoder(),
+ char.UniversalString.typeId: OctetStringEncoder(),
+ char.BMPString.typeId: OctetStringEncoder(),
+ # useful types
+ useful.ObjectDescriptor.typeId: OctetStringEncoder(),
+ useful.GeneralizedTime.typeId: OctetStringEncoder(),
+ useful.UTCTime.typeId: OctetStringEncoder()
+}
+
+
+class Encoder(object):
+
+ # noinspection PyDefaultArgument
+ def __init__(self, tagMap, typeMap={}):
+ self.__tagMap = tagMap
+ self.__typeMap = typeMap
+
+ def __call__(self, value, **options):
+ if not isinstance(value, base.Asn1Item):
+ raise error.PyAsn1Error('value is not valid (should be an instance of an ASN.1 Item)')
+
+ if LOG:
+ debug.scope.push(type(value).__name__)
+ LOG('encoder called for type %s <%s>' % (type(value).__name__, value.prettyPrint()))
+
+ tagSet = value.tagSet
+
+ try:
+ concreteEncoder = self.__typeMap[value.typeId]
+
+ except KeyError:
+ # use base type for codec lookup to recover untagged types
+ baseTagSet = tag.TagSet(value.tagSet.baseTag, value.tagSet.baseTag)
+
+ try:
+ concreteEncoder = self.__tagMap[baseTagSet]
+
+ except KeyError:
+ raise error.PyAsn1Error('No encoder for %s' % (value,))
+
+ if LOG:
+ LOG('using value codec %s chosen by %s' % (concreteEncoder.__class__.__name__, tagSet))
+
+ pyObject = concreteEncoder.encode(value, self, **options)
+
+ if LOG:
+ LOG('encoder %s produced: %s' % (type(concreteEncoder).__name__, repr(pyObject)))
+ debug.scope.pop()
+
+ return pyObject
+
+
+#: Turns ASN.1 object into a Python built-in type object(s).
+#:
+#: Takes any ASN.1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: walks all its components recursively and produces a Python built-in type or a tree
+#: of those.
+#:
+#: One exception is that instead of :py:class:`dict`, the :py:class:`OrderedDict`
+#: can be produced (whenever available) to preserve ordering of the components
+#: in ASN.1 SEQUENCE.
+#:
+#: Parameters
+#: ----------
+# asn1Value: any pyasn1 object (e.g. :py:class:`~pyasn1.type.base.PyAsn1Item` derivative)
+#: pyasn1 object to encode (or a tree of them)
+#:
+#: Returns
+#: -------
+#: : :py:class:`object`
+#: Python built-in type instance (or a tree of them)
+#:
+#: Raises
+#: ------
+#: ~pyasn1.error.PyAsn1Error
+#: On encoding errors
+#:
+#: Examples
+#: --------
+#: Encode ASN.1 value object into native Python types
+#:
+#: .. code-block:: pycon
+#:
+#: >>> seq = SequenceOf(componentType=Integer())
+#: >>> seq.extend([1, 2, 3])
+#: >>> encode(seq)
+#: [1, 2, 3]
+#:
+encode = Encoder(tagMap, typeMap)
diff --git a/third_party/python/pyasn1/pyasn1/compat/__init__.py b/third_party/python/pyasn1/pyasn1/compat/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/compat/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/third_party/python/pyasn1/pyasn1/compat/binary.py b/third_party/python/pyasn1/pyasn1/compat/binary.py
new file mode 100644
index 0000000000..addbdc9caa
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/compat/binary.py
@@ -0,0 +1,33 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+from sys import version_info
+
+if version_info[0:2] < (2, 6):
+ def bin(value):
+ bitstring = []
+
+ if value > 0:
+ prefix = '0b'
+ elif value < 0:
+ prefix = '-0b'
+ value = abs(value)
+ else:
+ prefix = '0b0'
+
+ while value:
+ if value & 1 == 1:
+ bitstring.append('1')
+ else:
+ bitstring.append('0')
+
+ value >>= 1
+
+ bitstring.reverse()
+
+ return prefix + ''.join(bitstring)
+else:
+ bin = bin
diff --git a/third_party/python/pyasn1/pyasn1/compat/calling.py b/third_party/python/pyasn1/pyasn1/compat/calling.py
new file mode 100644
index 0000000000..778a3d15d0
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/compat/calling.py
@@ -0,0 +1,20 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+from sys import version_info
+
+__all__ = ['callable']
+
+
+if (2, 7) < version_info[:2] < (3, 2):
+ import collections
+
+ def callable(x):
+ return isinstance(x, collections.Callable)
+
+else:
+
+ callable = callable
diff --git a/third_party/python/pyasn1/pyasn1/compat/dateandtime.py b/third_party/python/pyasn1/pyasn1/compat/dateandtime.py
new file mode 100644
index 0000000000..5e471bf761
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/compat/dateandtime.py
@@ -0,0 +1,22 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import time
+from datetime import datetime
+from sys import version_info
+
+__all__ = ['strptime']
+
+
+if version_info[:2] <= (2, 4):
+
+ def strptime(text, dateFormat):
+ return datetime(*(time.strptime(text, dateFormat)[0:6]))
+
+else:
+
+ def strptime(text, dateFormat):
+ return datetime.strptime(text, dateFormat)
diff --git a/third_party/python/pyasn1/pyasn1/compat/integer.py b/third_party/python/pyasn1/pyasn1/compat/integer.py
new file mode 100644
index 0000000000..4b31791d5e
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/compat/integer.py
@@ -0,0 +1,110 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+
+try:
+ import platform
+
+ implementation = platform.python_implementation()
+
+except (ImportError, AttributeError):
+ implementation = 'CPython'
+
+from pyasn1.compat.octets import oct2int, null, ensureString
+
+if sys.version_info[0:2] < (3, 2) or implementation != 'CPython':
+ from binascii import a2b_hex, b2a_hex
+
+ if sys.version_info[0] > 2:
+ long = int
+
+ def from_bytes(octets, signed=False):
+ if not octets:
+ return 0
+
+ value = long(b2a_hex(ensureString(octets)), 16)
+
+ if signed and oct2int(octets[0]) & 0x80:
+ return value - (1 << len(octets) * 8)
+
+ return value
+
+ def to_bytes(value, signed=False, length=0):
+ if value < 0:
+ if signed:
+ bits = bitLength(value)
+
+ # two's complement form
+ maxValue = 1 << bits
+ valueToEncode = (value + maxValue) % maxValue
+
+ else:
+ raise OverflowError('can\'t convert negative int to unsigned')
+ elif value == 0 and length == 0:
+ return null
+ else:
+ bits = 0
+ valueToEncode = value
+
+ hexValue = hex(valueToEncode)[2:]
+ if hexValue.endswith('L'):
+ hexValue = hexValue[:-1]
+
+ if len(hexValue) & 1:
+ hexValue = '0' + hexValue
+
+ # padding may be needed for two's complement encoding
+ if value != valueToEncode or length:
+ hexLength = len(hexValue) * 4
+
+ padLength = max(length, bits)
+
+ if padLength > hexLength:
+ hexValue = '00' * ((padLength - hexLength - 1) // 8 + 1) + hexValue
+ elif length and hexLength - length > 7:
+ raise OverflowError('int too big to convert')
+
+ firstOctet = int(hexValue[:2], 16)
+
+ if signed:
+ if firstOctet & 0x80:
+ if value >= 0:
+ hexValue = '00' + hexValue
+ elif value < 0:
+ hexValue = 'ff' + hexValue
+
+ octets_value = a2b_hex(hexValue)
+
+ return octets_value
+
+ def bitLength(number):
+ # bits in unsigned number
+ hexValue = hex(abs(number))
+ bits = len(hexValue) - 2
+ if hexValue.endswith('L'):
+ bits -= 1
+ if bits & 1:
+ bits += 1
+ bits *= 4
+ # TODO: strip lhs zeros
+ return bits
+
+else:
+
+ def from_bytes(octets, signed=False):
+ return int.from_bytes(bytes(octets), 'big', signed=signed)
+
+ def to_bytes(value, signed=False, length=0):
+ length = max(value.bit_length(), length)
+
+ if signed and length % 8 == 0:
+ length += 1
+
+ return value.to_bytes(length // 8 + (length % 8 and 1 or 0), 'big', signed=signed)
+
+ def bitLength(number):
+ return int(number).bit_length()
diff --git a/third_party/python/pyasn1/pyasn1/compat/octets.py b/third_party/python/pyasn1/pyasn1/compat/octets.py
new file mode 100644
index 0000000000..99d23bb3f1
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/compat/octets.py
@@ -0,0 +1,46 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+from sys import version_info
+
+if version_info[0] <= 2:
+ int2oct = chr
+ # noinspection PyPep8
+ ints2octs = lambda s: ''.join([int2oct(x) for x in s])
+ null = ''
+ oct2int = ord
+ # TODO: refactor to return a sequence of ints
+ # noinspection PyPep8
+ octs2ints = lambda s: [oct2int(x) for x in s]
+ # noinspection PyPep8
+ str2octs = lambda x: x
+ # noinspection PyPep8
+ octs2str = lambda x: x
+ # noinspection PyPep8
+ isOctetsType = lambda s: isinstance(s, str)
+ # noinspection PyPep8
+ isStringType = lambda s: isinstance(s, (str, unicode))
+ # noinspection PyPep8
+ ensureString = str
+else:
+ ints2octs = bytes
+ # noinspection PyPep8
+ int2oct = lambda x: ints2octs((x,))
+ null = ints2octs()
+ # noinspection PyPep8
+ oct2int = lambda x: x
+ # noinspection PyPep8
+ octs2ints = lambda x: x
+ # noinspection PyPep8
+ str2octs = lambda x: x.encode('iso-8859-1')
+ # noinspection PyPep8
+ octs2str = lambda x: x.decode('iso-8859-1')
+ # noinspection PyPep8
+ isOctetsType = lambda s: isinstance(s, bytes)
+ # noinspection PyPep8
+ isStringType = lambda s: isinstance(s, str)
+ # noinspection PyPep8
+ ensureString = bytes
diff --git a/third_party/python/pyasn1/pyasn1/compat/string.py b/third_party/python/pyasn1/pyasn1/compat/string.py
new file mode 100644
index 0000000000..b9bc8c3802
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/compat/string.py
@@ -0,0 +1,26 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+from sys import version_info
+
+if version_info[:2] <= (2, 5):
+
+ def partition(string, sep):
+ try:
+ a, c = string.split(sep, 1)
+
+ except ValueError:
+ a, b, c = string, '', ''
+
+ else:
+ b = sep
+
+ return a, b, c
+
+else:
+
+ def partition(string, sep):
+ return string.partition(sep)
diff --git a/third_party/python/pyasn1/pyasn1/debug.py b/third_party/python/pyasn1/pyasn1/debug.py
new file mode 100644
index 0000000000..8707aa887e
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/debug.py
@@ -0,0 +1,157 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import logging
+import sys
+
+from pyasn1 import __version__
+from pyasn1 import error
+from pyasn1.compat.octets import octs2ints
+
+__all__ = ['Debug', 'setLogger', 'hexdump']
+
+DEBUG_NONE = 0x0000
+DEBUG_ENCODER = 0x0001
+DEBUG_DECODER = 0x0002
+DEBUG_ALL = 0xffff
+
+FLAG_MAP = {
+ 'none': DEBUG_NONE,
+ 'encoder': DEBUG_ENCODER,
+ 'decoder': DEBUG_DECODER,
+ 'all': DEBUG_ALL
+}
+
+LOGGEE_MAP = {}
+
+
+class Printer(object):
+ # noinspection PyShadowingNames
+ def __init__(self, logger=None, handler=None, formatter=None):
+ if logger is None:
+ logger = logging.getLogger('pyasn1')
+
+ logger.setLevel(logging.DEBUG)
+
+ if handler is None:
+ handler = logging.StreamHandler()
+
+ if formatter is None:
+ formatter = logging.Formatter('%(asctime)s %(name)s: %(message)s')
+
+ handler.setFormatter(formatter)
+ handler.setLevel(logging.DEBUG)
+ logger.addHandler(handler)
+
+ self.__logger = logger
+
+ def __call__(self, msg):
+ self.__logger.debug(msg)
+
+ def __str__(self):
+ return '<python logging>'
+
+
+if hasattr(logging, 'NullHandler'):
+ NullHandler = logging.NullHandler
+
+else:
+ # Python 2.6 and older
+ class NullHandler(logging.Handler):
+ def emit(self, record):
+ pass
+
+
+class Debug(object):
+ defaultPrinter = Printer()
+
+ def __init__(self, *flags, **options):
+ self._flags = DEBUG_NONE
+
+ if 'loggerName' in options:
+ # route our logs to parent logger
+ self._printer = Printer(
+ logger=logging.getLogger(options['loggerName']),
+ handler=NullHandler()
+ )
+
+ elif 'printer' in options:
+ self._printer = options.get('printer')
+
+ else:
+ self._printer = self.defaultPrinter
+
+ self._printer('running pyasn1 %s, debug flags %s' % (__version__, ', '.join(flags)))
+
+ for flag in flags:
+ inverse = flag and flag[0] in ('!', '~')
+ if inverse:
+ flag = flag[1:]
+ try:
+ if inverse:
+ self._flags &= ~FLAG_MAP[flag]
+ else:
+ self._flags |= FLAG_MAP[flag]
+ except KeyError:
+ raise error.PyAsn1Error('bad debug flag %s' % flag)
+
+ self._printer("debug category '%s' %s" % (flag, inverse and 'disabled' or 'enabled'))
+
+ def __str__(self):
+ return 'logger %s, flags %x' % (self._printer, self._flags)
+
+ def __call__(self, msg):
+ self._printer(msg)
+
+ def __and__(self, flag):
+ return self._flags & flag
+
+ def __rand__(self, flag):
+ return flag & self._flags
+
+_LOG = DEBUG_NONE
+
+
+def setLogger(userLogger):
+ global _LOG
+
+ if userLogger:
+ _LOG = userLogger
+ else:
+ _LOG = DEBUG_NONE
+
+ # Update registered logging clients
+ for module, (name, flags) in LOGGEE_MAP.items():
+ setattr(module, name, _LOG & flags and _LOG or DEBUG_NONE)
+
+
+def registerLoggee(module, name='LOG', flags=DEBUG_NONE):
+ LOGGEE_MAP[sys.modules[module]] = name, flags
+ setLogger(_LOG)
+ return _LOG
+
+
+def hexdump(octets):
+ return ' '.join(
+ ['%s%.2X' % (n % 16 == 0 and ('\n%.5d: ' % n) or '', x)
+ for n, x in zip(range(len(octets)), octs2ints(octets))]
+ )
+
+
+class Scope(object):
+ def __init__(self):
+ self._list = []
+
+ def __str__(self): return '.'.join(self._list)
+
+ def push(self, token):
+ self._list.append(token)
+
+ def pop(self):
+ return self._list.pop()
+
+
+scope = Scope()
diff --git a/third_party/python/pyasn1/pyasn1/error.py b/third_party/python/pyasn1/pyasn1/error.py
new file mode 100644
index 0000000000..4f48db2516
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/error.py
@@ -0,0 +1,75 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+
+class PyAsn1Error(Exception):
+ """Base pyasn1 exception
+
+ `PyAsn1Error` is the base exception class (based on
+ :class:`Exception`) that represents all possible ASN.1 related
+ errors.
+ """
+
+
+class ValueConstraintError(PyAsn1Error):
+ """ASN.1 type constraints violation exception
+
+ The `ValueConstraintError` exception indicates an ASN.1 value
+ constraint violation.
+
+ It might happen on value object instantiation (for scalar types) or on
+ serialization (for constructed types).
+ """
+
+
+class SubstrateUnderrunError(PyAsn1Error):
+ """ASN.1 data structure deserialization error
+
+ The `SubstrateUnderrunError` exception indicates insufficient serialised
+ data on input of a de-serialization codec.
+ """
+
+
+class PyAsn1UnicodeError(PyAsn1Error, UnicodeError):
+ """Unicode text processing error
+
+ The `PyAsn1UnicodeError` exception is a base class for errors relating to
+ unicode text de/serialization.
+
+ Apart from inheriting from :class:`PyAsn1Error`, it also inherits from
+ :class:`UnicodeError` to help the caller catching unicode-related errors.
+ """
+ def __init__(self, message, unicode_error=None):
+ if isinstance(unicode_error, UnicodeError):
+ UnicodeError.__init__(self, *unicode_error.args)
+ PyAsn1Error.__init__(self, message)
+
+
+class PyAsn1UnicodeDecodeError(PyAsn1UnicodeError, UnicodeDecodeError):
+ """Unicode text decoding error
+
+ The `PyAsn1UnicodeDecodeError` exception represents a failure to
+ deserialize unicode text.
+
+ Apart from inheriting from :class:`PyAsn1UnicodeError`, it also inherits
+ from :class:`UnicodeDecodeError` to help the caller catching unicode-related
+ errors.
+ """
+
+
+class PyAsn1UnicodeEncodeError(PyAsn1UnicodeError, UnicodeEncodeError):
+ """Unicode text encoding error
+
+ The `PyAsn1UnicodeEncodeError` exception represents a failure to
+ serialize unicode text.
+
+ Apart from inheriting from :class:`PyAsn1UnicodeError`, it also inherits
+ from :class:`UnicodeEncodeError` to help the caller catching
+ unicode-related errors.
+ """
+
+
diff --git a/third_party/python/pyasn1/pyasn1/type/__init__.py b/third_party/python/pyasn1/pyasn1/type/__init__.py
new file mode 100644
index 0000000000..8c3066b2e6
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/type/__init__.py
@@ -0,0 +1 @@
+# This file is necessary to make this directory a package.
diff --git a/third_party/python/pyasn1/pyasn1/type/base.py b/third_party/python/pyasn1/pyasn1/type/base.py
new file mode 100644
index 0000000000..994f1c99b3
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/type/base.py
@@ -0,0 +1,707 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+
+from pyasn1 import error
+from pyasn1.compat import calling
+from pyasn1.type import constraint
+from pyasn1.type import tag
+from pyasn1.type import tagmap
+
+__all__ = ['Asn1Item', 'Asn1Type', 'SimpleAsn1Type',
+ 'ConstructedAsn1Type']
+
+
+class Asn1Item(object):
+ @classmethod
+ def getTypeId(cls, increment=1):
+ try:
+ Asn1Item._typeCounter += increment
+ except AttributeError:
+ Asn1Item._typeCounter = increment
+ return Asn1Item._typeCounter
+
+
+class Asn1Type(Asn1Item):
+ """Base class for all classes representing ASN.1 types.
+
+ In the user code, |ASN.1| class is normally used only for telling
+ ASN.1 objects from others.
+
+ Note
+ ----
+ For as long as ASN.1 is concerned, a way to compare ASN.1 types
+ is to use :meth:`isSameTypeWith` and :meth:`isSuperTypeOf` methods.
+ """
+ #: Set or return a :py:class:`~pyasn1.type.tag.TagSet` object representing
+ #: ASN.1 tag(s) associated with |ASN.1| type.
+ tagSet = tag.TagSet()
+
+ #: Default :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ #: object imposing constraints on initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Disambiguation ASN.1 types identification
+ typeId = None
+
+ def __init__(self, **kwargs):
+ readOnly = {
+ 'tagSet': self.tagSet,
+ 'subtypeSpec': self.subtypeSpec
+ }
+
+ readOnly.update(kwargs)
+
+ self.__dict__.update(readOnly)
+
+ self._readOnly = readOnly
+
+ def __setattr__(self, name, value):
+ if name[0] != '_' and name in self._readOnly:
+ raise error.PyAsn1Error('read-only instance attribute "%s"' % name)
+
+ self.__dict__[name] = value
+
+ def __str__(self):
+ return self.prettyPrint()
+
+ @property
+ def readOnly(self):
+ return self._readOnly
+
+ @property
+ def effectiveTagSet(self):
+ """For |ASN.1| type is equivalent to *tagSet*
+ """
+ return self.tagSet # used by untagged types
+
+ @property
+ def tagMap(self):
+ """Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping ASN.1 tags to ASN.1 objects within callee object.
+ """
+ return tagmap.TagMap({self.tagSet: self})
+
+ def isSameTypeWith(self, other, matchTags=True, matchConstraints=True):
+ """Examine |ASN.1| type for equality with other ASN.1 type.
+
+ ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints
+ (:py:mod:`~pyasn1.type.constraint`) are examined when carrying
+ out ASN.1 types comparison.
+
+ Python class inheritance relationship is NOT considered.
+
+ Parameters
+ ----------
+ other: a pyasn1 type object
+ Class instance representing ASN.1 type.
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`True` if *other* is |ASN.1| type,
+ :obj:`False` otherwise.
+ """
+ return (self is other or
+ (not matchTags or self.tagSet == other.tagSet) and
+ (not matchConstraints or self.subtypeSpec == other.subtypeSpec))
+
+ def isSuperTypeOf(self, other, matchTags=True, matchConstraints=True):
+ """Examine |ASN.1| type for subtype relationship with other ASN.1 type.
+
+ ASN.1 tags (:py:mod:`~pyasn1.type.tag`) and constraints
+ (:py:mod:`~pyasn1.type.constraint`) are examined when carrying
+ out ASN.1 types comparison.
+
+ Python class inheritance relationship is NOT considered.
+
+ Parameters
+ ----------
+ other: a pyasn1 type object
+ Class instance representing ASN.1 type.
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`True` if *other* is a subtype of |ASN.1| type,
+ :obj:`False` otherwise.
+ """
+ return (not matchTags or
+ (self.tagSet.isSuperTagSetOf(other.tagSet)) and
+ (not matchConstraints or self.subtypeSpec.isSuperTypeOf(other.subtypeSpec)))
+
+ @staticmethod
+ def isNoValue(*values):
+ for value in values:
+ if value is not noValue:
+ return False
+ return True
+
+ def prettyPrint(self, scope=0):
+ raise NotImplementedError()
+
+ # backward compatibility
+
+ def getTagSet(self):
+ return self.tagSet
+
+ def getEffectiveTagSet(self):
+ return self.effectiveTagSet
+
+ def getTagMap(self):
+ return self.tagMap
+
+ def getSubtypeSpec(self):
+ return self.subtypeSpec
+
+ # backward compatibility
+ def hasValue(self):
+ return self.isValue
+
+# Backward compatibility
+Asn1ItemBase = Asn1Type
+
+
+class NoValue(object):
+ """Create a singleton instance of NoValue class.
+
+ The *NoValue* sentinel object represents an instance of ASN.1 schema
+ object as opposed to ASN.1 value object.
+
+ Only ASN.1 schema-related operations can be performed on ASN.1
+ schema objects.
+
+ Warning
+ -------
+ Any operation attempted on the *noValue* object will raise the
+ *PyAsn1Error* exception.
+ """
+ skipMethods = set(
+ ('__slots__',
+ # attributes
+ '__getattribute__',
+ '__getattr__',
+ '__setattr__',
+ '__delattr__',
+ # class instance
+ '__class__',
+ '__init__',
+ '__del__',
+ '__new__',
+ '__repr__',
+ '__qualname__',
+ '__objclass__',
+ 'im_class',
+ '__sizeof__',
+ # pickle protocol
+ '__reduce__',
+ '__reduce_ex__',
+ '__getnewargs__',
+ '__getinitargs__',
+ '__getstate__',
+ '__setstate__')
+ )
+
+ _instance = None
+
+ def __new__(cls):
+ if cls._instance is None:
+ def getPlug(name):
+ def plug(self, *args, **kw):
+ raise error.PyAsn1Error('Attempted "%s" operation on ASN.1 schema object' % name)
+ return plug
+
+ op_names = [name
+ for typ in (str, int, list, dict)
+ for name in dir(typ)
+ if (name not in cls.skipMethods and
+ name.startswith('__') and
+ name.endswith('__') and
+ calling.callable(getattr(typ, name)))]
+
+ for name in set(op_names):
+ setattr(cls, name, getPlug(name))
+
+ cls._instance = object.__new__(cls)
+
+ return cls._instance
+
+ def __getattr__(self, attr):
+ if attr in self.skipMethods:
+ raise AttributeError('Attribute %s not present' % attr)
+
+ raise error.PyAsn1Error('Attempted "%s" operation on ASN.1 schema object' % attr)
+
+ def __repr__(self):
+ return '<%s object>' % self.__class__.__name__
+
+
+noValue = NoValue()
+
+
+class SimpleAsn1Type(Asn1Type):
+ """Base class for all simple classes representing ASN.1 types.
+
+ ASN.1 distinguishes types by their ability to hold other objects.
+ Scalar types are known as *simple* in ASN.1.
+
+ In the user code, |ASN.1| class is normally used only for telling
+ ASN.1 objects from others.
+
+ Note
+ ----
+ For as long as ASN.1 is concerned, a way to compare ASN.1 types
+ is to use :meth:`isSameTypeWith` and :meth:`isSuperTypeOf` methods.
+ """
+ #: Default payload value
+ defaultValue = noValue
+
+ def __init__(self, value=noValue, **kwargs):
+ Asn1Type.__init__(self, **kwargs)
+ if value is noValue:
+ value = self.defaultValue
+ else:
+ value = self.prettyIn(value)
+ try:
+ self.subtypeSpec(value)
+
+ except error.PyAsn1Error:
+ exType, exValue, exTb = sys.exc_info()
+ raise exType('%s at %s' % (exValue, self.__class__.__name__))
+
+ self._value = value
+
+ def __repr__(self):
+ representation = '%s %s object' % (
+ self.__class__.__name__, self.isValue and 'value' or 'schema')
+
+ for attr, value in self.readOnly.items():
+ if value:
+ representation += ', %s %s' % (attr, value)
+
+ if self.isValue:
+ value = self.prettyPrint()
+ if len(value) > 32:
+ value = value[:16] + '...' + value[-16:]
+ representation += ', payload [%s]' % value
+
+ return '<%s>' % representation
+
+ def __eq__(self, other):
+ return self is other and True or self._value == other
+
+ def __ne__(self, other):
+ return self._value != other
+
+ def __lt__(self, other):
+ return self._value < other
+
+ def __le__(self, other):
+ return self._value <= other
+
+ def __gt__(self, other):
+ return self._value > other
+
+ def __ge__(self, other):
+ return self._value >= other
+
+ if sys.version_info[0] <= 2:
+ def __nonzero__(self):
+ return self._value and True or False
+ else:
+ def __bool__(self):
+ return self._value and True or False
+
+ def __hash__(self):
+ return hash(self._value)
+
+ @property
+ def isValue(self):
+ """Indicate that |ASN.1| object represents ASN.1 value.
+
+ If *isValue* is :obj:`False` then this object represents just
+ ASN.1 schema.
+
+ If *isValue* is :obj:`True` then, in addition to its ASN.1 schema
+ features, this object can also be used like a Python built-in object
+ (e.g. :class:`int`, :class:`str`, :class:`dict` etc.).
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`False` if object represents just ASN.1 schema.
+ :obj:`True` if object represents ASN.1 schema and can be used as a normal value.
+
+ Note
+ ----
+ There is an important distinction between PyASN1 schema and value objects.
+ The PyASN1 schema objects can only participate in ASN.1 schema-related
+ operations (e.g. defining or testing the structure of the data). Most
+ obvious uses of ASN.1 schema is to guide serialisation codecs whilst
+ encoding/decoding serialised ASN.1 contents.
+
+ The PyASN1 value objects can **additionally** participate in many operations
+ involving regular Python objects (e.g. arithmetic, comprehension etc).
+ """
+ return self._value is not noValue
+
+ def clone(self, value=noValue, **kwargs):
+ """Create a modified version of |ASN.1| schema or value object.
+
+ The `clone()` method accepts the same set arguments as |ASN.1|
+ class takes on instantiation except that all arguments
+ of the `clone()` method are optional.
+
+ Whatever arguments are supplied, they are used to create a copy
+ of `self` taking precedence over the ones used to instantiate `self`.
+
+ Note
+ ----
+ Due to the immutable nature of the |ASN.1| object, if no arguments
+ are supplied, no new |ASN.1| object will be created and `self` will
+ be returned instead.
+ """
+ if value is noValue:
+ if not kwargs:
+ return self
+
+ value = self._value
+
+ initializers = self.readOnly.copy()
+ initializers.update(kwargs)
+
+ return self.__class__(value, **initializers)
+
+ def subtype(self, value=noValue, **kwargs):
+ """Create a specialization of |ASN.1| schema or value object.
+
+ The subtype relationship between ASN.1 types has no correlation with
+ subtype relationship between Python types. ASN.1 type is mainly identified
+ by its tag(s) (:py:class:`~pyasn1.type.tag.TagSet`) and value range
+ constraints (:py:class:`~pyasn1.type.constraint.ConstraintsIntersection`).
+ These ASN.1 type properties are implemented as |ASN.1| attributes.
+
+ The `subtype()` method accepts the same set arguments as |ASN.1|
+ class takes on instantiation except that all parameters
+ of the `subtype()` method are optional.
+
+ With the exception of the arguments described below, the rest of
+ supplied arguments they are used to create a copy of `self` taking
+ precedence over the ones used to instantiate `self`.
+
+ The following arguments to `subtype()` create a ASN.1 subtype out of
+ |ASN.1| type:
+
+ Other Parameters
+ ----------------
+ implicitTag: :py:class:`~pyasn1.type.tag.Tag`
+ Implicitly apply given ASN.1 tag object to `self`'s
+ :py:class:`~pyasn1.type.tag.TagSet`, then use the result as
+ new object's ASN.1 tag(s).
+
+ explicitTag: :py:class:`~pyasn1.type.tag.Tag`
+ Explicitly apply given ASN.1 tag object to `self`'s
+ :py:class:`~pyasn1.type.tag.TagSet`, then use the result as
+ new object's ASN.1 tag(s).
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Add ASN.1 constraints object to one of the `self`'s, then
+ use the result as new object's ASN.1 constraints.
+
+ Returns
+ -------
+ :
+ new instance of |ASN.1| schema or value object
+
+ Note
+ ----
+ Due to the immutable nature of the |ASN.1| object, if no arguments
+ are supplied, no new |ASN.1| object will be created and `self` will
+ be returned instead.
+ """
+ if value is noValue:
+ if not kwargs:
+ return self
+
+ value = self._value
+
+ initializers = self.readOnly.copy()
+
+ implicitTag = kwargs.pop('implicitTag', None)
+ if implicitTag is not None:
+ initializers['tagSet'] = self.tagSet.tagImplicitly(implicitTag)
+
+ explicitTag = kwargs.pop('explicitTag', None)
+ if explicitTag is not None:
+ initializers['tagSet'] = self.tagSet.tagExplicitly(explicitTag)
+
+ for arg, option in kwargs.items():
+ initializers[arg] += option
+
+ return self.__class__(value, **initializers)
+
+ def prettyIn(self, value):
+ return value
+
+ def prettyOut(self, value):
+ return str(value)
+
+ def prettyPrint(self, scope=0):
+ return self.prettyOut(self._value)
+
+ def prettyPrintType(self, scope=0):
+ return '%s -> %s' % (self.tagSet, self.__class__.__name__)
+
+# Backward compatibility
+AbstractSimpleAsn1Item = SimpleAsn1Type
+
+#
+# Constructed types:
+# * There are five of them: Sequence, SequenceOf/SetOf, Set and Choice
+# * ASN1 types and values are represened by Python class instances
+# * Value initialization is made for defaulted components only
+# * Primary method of component addressing is by-position. Data model for base
+# type is Python sequence. Additional type-specific addressing methods
+# may be implemented for particular types.
+# * SequenceOf and SetOf types do not implement any additional methods
+# * Sequence, Set and Choice types also implement by-identifier addressing
+# * Sequence, Set and Choice types also implement by-asn1-type (tag) addressing
+# * Sequence and Set types may include optional and defaulted
+# components
+# * Constructed types hold a reference to component types used for value
+# verification and ordering.
+# * Component type is a scalar type for SequenceOf/SetOf types and a list
+# of types for Sequence/Set/Choice.
+#
+
+
+class ConstructedAsn1Type(Asn1Type):
+ """Base class for all constructed classes representing ASN.1 types.
+
+ ASN.1 distinguishes types by their ability to hold other objects.
+ Those "nesting" types are known as *constructed* in ASN.1.
+
+ In the user code, |ASN.1| class is normally used only for telling
+ ASN.1 objects from others.
+
+ Note
+ ----
+ For as long as ASN.1 is concerned, a way to compare ASN.1 types
+ is to use :meth:`isSameTypeWith` and :meth:`isSuperTypeOf` methods.
+ """
+
+ #: If :obj:`True`, requires exact component type matching,
+ #: otherwise subtype relation is only enforced
+ strictConstraints = False
+
+ componentType = None
+
+ # backward compatibility, unused
+ sizeSpec = constraint.ConstraintsIntersection()
+
+ def __init__(self, **kwargs):
+ readOnly = {
+ 'componentType': self.componentType,
+ # backward compatibility, unused
+ 'sizeSpec': self.sizeSpec
+ }
+
+ # backward compatibility: preserve legacy sizeSpec support
+ kwargs = self._moveSizeSpec(**kwargs)
+
+ readOnly.update(kwargs)
+
+ Asn1Type.__init__(self, **readOnly)
+
+ def _moveSizeSpec(self, **kwargs):
+ # backward compatibility, unused
+ sizeSpec = kwargs.pop('sizeSpec', self.sizeSpec)
+ if sizeSpec:
+ subtypeSpec = kwargs.pop('subtypeSpec', self.subtypeSpec)
+ if subtypeSpec:
+ subtypeSpec = sizeSpec
+
+ else:
+ subtypeSpec += sizeSpec
+
+ kwargs['subtypeSpec'] = subtypeSpec
+
+ return kwargs
+
+ def __repr__(self):
+ representation = '%s %s object' % (
+ self.__class__.__name__, self.isValue and 'value' or 'schema'
+ )
+
+ for attr, value in self.readOnly.items():
+ if value is not noValue:
+ representation += ', %s=%r' % (attr, value)
+
+ if self.isValue and self.components:
+ representation += ', payload [%s]' % ', '.join(
+ [repr(x) for x in self.components])
+
+ return '<%s>' % representation
+
+ def __eq__(self, other):
+ return self is other or self.components == other
+
+ def __ne__(self, other):
+ return self.components != other
+
+ def __lt__(self, other):
+ return self.components < other
+
+ def __le__(self, other):
+ return self.components <= other
+
+ def __gt__(self, other):
+ return self.components > other
+
+ def __ge__(self, other):
+ return self.components >= other
+
+ if sys.version_info[0] <= 2:
+ def __nonzero__(self):
+ return bool(self.components)
+ else:
+ def __bool__(self):
+ return bool(self.components)
+
+ @property
+ def components(self):
+ raise error.PyAsn1Error('Method not implemented')
+
+ def _cloneComponentValues(self, myClone, cloneValueFlag):
+ pass
+
+ def clone(self, **kwargs):
+ """Create a modified version of |ASN.1| schema object.
+
+ The `clone()` method accepts the same set arguments as |ASN.1|
+ class takes on instantiation except that all arguments
+ of the `clone()` method are optional.
+
+ Whatever arguments are supplied, they are used to create a copy
+ of `self` taking precedence over the ones used to instantiate `self`.
+
+ Possible values of `self` are never copied over thus `clone()` can
+ only create a new schema object.
+
+ Returns
+ -------
+ :
+ new instance of |ASN.1| type/value
+
+ Note
+ ----
+ Due to the mutable nature of the |ASN.1| object, even if no arguments
+ are supplied, a new |ASN.1| object will be created and returned.
+ """
+ cloneValueFlag = kwargs.pop('cloneValueFlag', False)
+
+ initializers = self.readOnly.copy()
+ initializers.update(kwargs)
+
+ clone = self.__class__(**initializers)
+
+ if cloneValueFlag:
+ self._cloneComponentValues(clone, cloneValueFlag)
+
+ return clone
+
+ def subtype(self, **kwargs):
+ """Create a specialization of |ASN.1| schema object.
+
+ The `subtype()` method accepts the same set arguments as |ASN.1|
+ class takes on instantiation except that all parameters
+ of the `subtype()` method are optional.
+
+ With the exception of the arguments described below, the rest of
+ supplied arguments they are used to create a copy of `self` taking
+ precedence over the ones used to instantiate `self`.
+
+ The following arguments to `subtype()` create a ASN.1 subtype out of
+ |ASN.1| type.
+
+ Other Parameters
+ ----------------
+ implicitTag: :py:class:`~pyasn1.type.tag.Tag`
+ Implicitly apply given ASN.1 tag object to `self`'s
+ :py:class:`~pyasn1.type.tag.TagSet`, then use the result as
+ new object's ASN.1 tag(s).
+
+ explicitTag: :py:class:`~pyasn1.type.tag.Tag`
+ Explicitly apply given ASN.1 tag object to `self`'s
+ :py:class:`~pyasn1.type.tag.TagSet`, then use the result as
+ new object's ASN.1 tag(s).
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Add ASN.1 constraints object to one of the `self`'s, then
+ use the result as new object's ASN.1 constraints.
+
+
+ Returns
+ -------
+ :
+ new instance of |ASN.1| type/value
+
+ Note
+ ----
+ Due to the mutable nature of the |ASN.1| object, even if no arguments
+ are supplied, a new |ASN.1| object will be created and returned.
+ """
+
+ initializers = self.readOnly.copy()
+
+ cloneValueFlag = kwargs.pop('cloneValueFlag', False)
+
+ implicitTag = kwargs.pop('implicitTag', None)
+ if implicitTag is not None:
+ initializers['tagSet'] = self.tagSet.tagImplicitly(implicitTag)
+
+ explicitTag = kwargs.pop('explicitTag', None)
+ if explicitTag is not None:
+ initializers['tagSet'] = self.tagSet.tagExplicitly(explicitTag)
+
+ for arg, option in kwargs.items():
+ initializers[arg] += option
+
+ clone = self.__class__(**initializers)
+
+ if cloneValueFlag:
+ self._cloneComponentValues(clone, cloneValueFlag)
+
+ return clone
+
+ def getComponentByPosition(self, idx):
+ raise error.PyAsn1Error('Method not implemented')
+
+ def setComponentByPosition(self, idx, value, verifyConstraints=True):
+ raise error.PyAsn1Error('Method not implemented')
+
+ def setComponents(self, *args, **kwargs):
+ for idx, value in enumerate(args):
+ self[idx] = value
+ for k in kwargs:
+ self[k] = kwargs[k]
+ return self
+
+ # backward compatibility
+
+ def setDefaultComponents(self):
+ pass
+
+ def getComponentType(self):
+ return self.componentType
+
+ # backward compatibility, unused
+ def verifySizeSpec(self):
+ self.subtypeSpec(self)
+
+
+ # Backward compatibility
+AbstractConstructedAsn1Item = ConstructedAsn1Type
diff --git a/third_party/python/pyasn1/pyasn1/type/char.py b/third_party/python/pyasn1/pyasn1/type/char.py
new file mode 100644
index 0000000000..06074da0f7
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/type/char.py
@@ -0,0 +1,335 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+
+from pyasn1 import error
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+__all__ = ['NumericString', 'PrintableString', 'TeletexString', 'T61String', 'VideotexString',
+ 'IA5String', 'GraphicString', 'VisibleString', 'ISO646String',
+ 'GeneralString', 'UniversalString', 'BMPString', 'UTF8String']
+
+NoValue = univ.NoValue
+noValue = univ.noValue
+
+
+class AbstractCharacterString(univ.OctetString):
+ """Creates |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`,
+ its objects are immutable and duck-type Python 2 :class:`str` or Python 3
+ :class:`bytes`. When used in octet-stream context, |ASN.1| type assumes
+ "|encoding|" encoding.
+
+ Keyword Args
+ ------------
+ value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
+ :class:`unicode` object (Python 2) or :class:`str` (Python 3),
+ alternatively :class:`str` (Python 2) or :class:`bytes` (Python 3)
+ representing octet-stream of serialised unicode string
+ (note `encoding` parameter) or |ASN.1| class instance.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ encoding: :py:class:`str`
+ Unicode codec ID to encode/decode :class:`unicode` (Python 2) or
+ :class:`str` (Python 3) the payload when |ASN.1| object is used
+ in octet-stream context.
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+ """
+
+ if sys.version_info[0] <= 2:
+ def __str__(self):
+ try:
+ # `str` is Py2 text representation
+ return self._value.encode(self.encoding)
+
+ except UnicodeEncodeError:
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeEncodeError(
+ "Can't encode string '%s' with codec "
+ "%s" % (self._value, self.encoding), exc
+ )
+
+ def __unicode__(self):
+ return unicode(self._value)
+
+ def prettyIn(self, value):
+ try:
+ if isinstance(value, unicode):
+ return value
+ elif isinstance(value, str):
+ return value.decode(self.encoding)
+ elif isinstance(value, (tuple, list)):
+ return self.prettyIn(''.join([chr(x) for x in value]))
+ elif isinstance(value, univ.OctetString):
+ return value.asOctets().decode(self.encoding)
+ else:
+ return unicode(value)
+
+ except (UnicodeDecodeError, LookupError):
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeDecodeError(
+ "Can't decode string '%s' with codec "
+ "%s" % (value, self.encoding), exc
+ )
+
+ def asOctets(self, padding=True):
+ return str(self)
+
+ def asNumbers(self, padding=True):
+ return tuple([ord(x) for x in str(self)])
+
+ else:
+ def __str__(self):
+ # `unicode` is Py3 text representation
+ return str(self._value)
+
+ def __bytes__(self):
+ try:
+ return self._value.encode(self.encoding)
+ except UnicodeEncodeError:
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeEncodeError(
+ "Can't encode string '%s' with codec "
+ "%s" % (self._value, self.encoding), exc
+ )
+
+ def prettyIn(self, value):
+ try:
+ if isinstance(value, str):
+ return value
+ elif isinstance(value, bytes):
+ return value.decode(self.encoding)
+ elif isinstance(value, (tuple, list)):
+ return self.prettyIn(bytes(value))
+ elif isinstance(value, univ.OctetString):
+ return value.asOctets().decode(self.encoding)
+ else:
+ return str(value)
+
+ except (UnicodeDecodeError, LookupError):
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeDecodeError(
+ "Can't decode string '%s' with codec "
+ "%s" % (value, self.encoding), exc
+ )
+
+ def asOctets(self, padding=True):
+ return bytes(self)
+
+ def asNumbers(self, padding=True):
+ return tuple(bytes(self))
+
+ #
+ # See OctetString.prettyPrint() for the explanation
+ #
+
+ def prettyOut(self, value):
+ return value
+
+ def prettyPrint(self, scope=0):
+ # first see if subclass has its own .prettyOut()
+ value = self.prettyOut(self._value)
+
+ if value is not self._value:
+ return value
+
+ return AbstractCharacterString.__str__(self)
+
+ def __reversed__(self):
+ return reversed(self._value)
+
+
+class NumericString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 18)
+ )
+ encoding = 'us-ascii'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class PrintableString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 19)
+ )
+ encoding = 'us-ascii'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class TeletexString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 20)
+ )
+ encoding = 'iso-8859-1'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class T61String(TeletexString):
+ __doc__ = TeletexString.__doc__
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class VideotexString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 21)
+ )
+ encoding = 'iso-8859-1'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class IA5String(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 22)
+ )
+ encoding = 'us-ascii'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class GraphicString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 25)
+ )
+ encoding = 'iso-8859-1'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class VisibleString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 26)
+ )
+ encoding = 'us-ascii'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class ISO646String(VisibleString):
+ __doc__ = VisibleString.__doc__
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+class GeneralString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 27)
+ )
+ encoding = 'iso-8859-1'
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class UniversalString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 28)
+ )
+ encoding = "utf-32-be"
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class BMPString(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 30)
+ )
+ encoding = "utf-16-be"
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
+
+
+class UTF8String(AbstractCharacterString):
+ __doc__ = AbstractCharacterString.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = AbstractCharacterString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 12)
+ )
+ encoding = "utf-8"
+
+ # Optimization for faster codec lookup
+ typeId = AbstractCharacterString.getTypeId()
diff --git a/third_party/python/pyasn1/pyasn1/type/constraint.py b/third_party/python/pyasn1/pyasn1/type/constraint.py
new file mode 100644
index 0000000000..8f152e9e9c
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/type/constraint.py
@@ -0,0 +1,756 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Original concept and code by Mike C. Fletcher.
+#
+import sys
+
+from pyasn1.type import error
+
+__all__ = ['SingleValueConstraint', 'ContainedSubtypeConstraint',
+ 'ValueRangeConstraint', 'ValueSizeConstraint',
+ 'PermittedAlphabetConstraint', 'InnerTypeConstraint',
+ 'ConstraintsExclusion', 'ConstraintsIntersection',
+ 'ConstraintsUnion']
+
+
+class AbstractConstraint(object):
+
+ def __init__(self, *values):
+ self._valueMap = set()
+ self._setValues(values)
+ self.__hash = hash((self.__class__.__name__, self._values))
+
+ def __call__(self, value, idx=None):
+ if not self._values:
+ return
+
+ try:
+ self._testValue(value, idx)
+
+ except error.ValueConstraintError:
+ raise error.ValueConstraintError(
+ '%s failed at: %r' % (self, sys.exc_info()[1])
+ )
+
+ def __repr__(self):
+ representation = '%s object' % (self.__class__.__name__)
+
+ if self._values:
+ representation += ', consts %s' % ', '.join(
+ [repr(x) for x in self._values])
+
+ return '<%s>' % representation
+
+ def __eq__(self, other):
+ return self is other and True or self._values == other
+
+ def __ne__(self, other):
+ return self._values != other
+
+ def __lt__(self, other):
+ return self._values < other
+
+ def __le__(self, other):
+ return self._values <= other
+
+ def __gt__(self, other):
+ return self._values > other
+
+ def __ge__(self, other):
+ return self._values >= other
+
+ if sys.version_info[0] <= 2:
+ def __nonzero__(self):
+ return self._values and True or False
+ else:
+ def __bool__(self):
+ return self._values and True or False
+
+ def __hash__(self):
+ return self.__hash
+
+ def _setValues(self, values):
+ self._values = values
+
+ def _testValue(self, value, idx):
+ raise error.ValueConstraintError(value)
+
+ # Constraints derivation logic
+ def getValueMap(self):
+ return self._valueMap
+
+ def isSuperTypeOf(self, otherConstraint):
+ # TODO: fix possible comparison of set vs scalars here
+ return (otherConstraint is self or
+ not self._values or
+ otherConstraint == self or
+ self in otherConstraint.getValueMap())
+
+ def isSubTypeOf(self, otherConstraint):
+ return (otherConstraint is self or
+ not self or
+ otherConstraint == self or
+ otherConstraint in self._valueMap)
+
+
+class SingleValueConstraint(AbstractConstraint):
+ """Create a SingleValueConstraint object.
+
+ The SingleValueConstraint satisfies any value that
+ is present in the set of permitted values.
+
+ Objects of this type are iterable (emitting constraint values) and
+ can act as operands for some arithmetic operations e.g. addition
+ and subtraction. The latter can be used for combining multiple
+ SingleValueConstraint objects into one.
+
+ The SingleValueConstraint object can be applied to
+ any ASN.1 type.
+
+ Parameters
+ ----------
+ *values: :class:`int`
+ Full set of values permitted by this constraint object.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class DivisorOfSix(Integer):
+ '''
+ ASN.1 specification:
+
+ Divisor-Of-6 ::= INTEGER (1 | 2 | 3 | 6)
+ '''
+ subtypeSpec = SingleValueConstraint(1, 2, 3, 6)
+
+ # this will succeed
+ divisor_of_six = DivisorOfSix(1)
+
+ # this will raise ValueConstraintError
+ divisor_of_six = DivisorOfSix(7)
+ """
+ def _setValues(self, values):
+ self._values = values
+ self._set = set(values)
+
+ def _testValue(self, value, idx):
+ if value not in self._set:
+ raise error.ValueConstraintError(value)
+
+ # Constrains can be merged or reduced
+
+ def __contains__(self, item):
+ return item in self._set
+
+ def __iter__(self):
+ return iter(self._set)
+
+ def __sub__(self, constraint):
+ return self.__class__(*(self._set.difference(constraint)))
+
+ def __add__(self, constraint):
+ return self.__class__(*(self._set.union(constraint)))
+
+ def __sub__(self, constraint):
+ return self.__class__(*(self._set.difference(constraint)))
+
+
+class ContainedSubtypeConstraint(AbstractConstraint):
+ """Create a ContainedSubtypeConstraint object.
+
+ The ContainedSubtypeConstraint satisfies any value that
+ is present in the set of permitted values and also
+ satisfies included constraints.
+
+ The ContainedSubtypeConstraint object can be applied to
+ any ASN.1 type.
+
+ Parameters
+ ----------
+ *values:
+ Full set of values and constraint objects permitted
+ by this constraint object.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class DivisorOfEighteen(Integer):
+ '''
+ ASN.1 specification:
+
+ Divisors-of-18 ::= INTEGER (INCLUDES Divisors-of-6 | 9 | 18)
+ '''
+ subtypeSpec = ContainedSubtypeConstraint(
+ SingleValueConstraint(1, 2, 3, 6), 9, 18
+ )
+
+ # this will succeed
+ divisor_of_eighteen = DivisorOfEighteen(9)
+
+ # this will raise ValueConstraintError
+ divisor_of_eighteen = DivisorOfEighteen(10)
+ """
+ def _testValue(self, value, idx):
+ for constraint in self._values:
+ if isinstance(constraint, AbstractConstraint):
+ constraint(value, idx)
+ elif value not in self._set:
+ raise error.ValueConstraintError(value)
+
+
+class ValueRangeConstraint(AbstractConstraint):
+ """Create a ValueRangeConstraint object.
+
+ The ValueRangeConstraint satisfies any value that
+ falls in the range of permitted values.
+
+ The ValueRangeConstraint object can only be applied
+ to :class:`~pyasn1.type.univ.Integer` and
+ :class:`~pyasn1.type.univ.Real` types.
+
+ Parameters
+ ----------
+ start: :class:`int`
+ Minimum permitted value in the range (inclusive)
+
+ end: :class:`int`
+ Maximum permitted value in the range (inclusive)
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class TeenAgeYears(Integer):
+ '''
+ ASN.1 specification:
+
+ TeenAgeYears ::= INTEGER (13 .. 19)
+ '''
+ subtypeSpec = ValueRangeConstraint(13, 19)
+
+ # this will succeed
+ teen_year = TeenAgeYears(18)
+
+ # this will raise ValueConstraintError
+ teen_year = TeenAgeYears(20)
+ """
+ def _testValue(self, value, idx):
+ if value < self.start or value > self.stop:
+ raise error.ValueConstraintError(value)
+
+ def _setValues(self, values):
+ if len(values) != 2:
+ raise error.PyAsn1Error(
+ '%s: bad constraint values' % (self.__class__.__name__,)
+ )
+ self.start, self.stop = values
+ if self.start > self.stop:
+ raise error.PyAsn1Error(
+ '%s: screwed constraint values (start > stop): %s > %s' % (
+ self.__class__.__name__,
+ self.start, self.stop
+ )
+ )
+ AbstractConstraint._setValues(self, values)
+
+
+class ValueSizeConstraint(ValueRangeConstraint):
+ """Create a ValueSizeConstraint object.
+
+ The ValueSizeConstraint satisfies any value for
+ as long as its size falls within the range of
+ permitted sizes.
+
+ The ValueSizeConstraint object can be applied
+ to :class:`~pyasn1.type.univ.BitString`,
+ :class:`~pyasn1.type.univ.OctetString` (including
+ all :ref:`character ASN.1 types <type.char>`),
+ :class:`~pyasn1.type.univ.SequenceOf`
+ and :class:`~pyasn1.type.univ.SetOf` types.
+
+ Parameters
+ ----------
+ minimum: :class:`int`
+ Minimum permitted size of the value (inclusive)
+
+ maximum: :class:`int`
+ Maximum permitted size of the value (inclusive)
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class BaseballTeamRoster(SetOf):
+ '''
+ ASN.1 specification:
+
+ BaseballTeamRoster ::= SET SIZE (1..25) OF PlayerNames
+ '''
+ componentType = PlayerNames()
+ subtypeSpec = ValueSizeConstraint(1, 25)
+
+ # this will succeed
+ team = BaseballTeamRoster()
+ team.extend(['Jan', 'Matej'])
+ encode(team)
+
+ # this will raise ValueConstraintError
+ team = BaseballTeamRoster()
+ team.extend(['Jan'] * 26)
+ encode(team)
+
+ Note
+ ----
+ Whenever ValueSizeConstraint is applied to mutable types
+ (e.g. :class:`~pyasn1.type.univ.SequenceOf`,
+ :class:`~pyasn1.type.univ.SetOf`), constraint
+ validation only happens at the serialisation phase rather
+ than schema instantiation phase (as it is with immutable
+ types).
+ """
+ def _testValue(self, value, idx):
+ valueSize = len(value)
+ if valueSize < self.start or valueSize > self.stop:
+ raise error.ValueConstraintError(value)
+
+
+class PermittedAlphabetConstraint(SingleValueConstraint):
+ """Create a PermittedAlphabetConstraint object.
+
+ The PermittedAlphabetConstraint satisfies any character
+ string for as long as all its characters are present in
+ the set of permitted characters.
+
+ Objects of this type are iterable (emitting constraint values) and
+ can act as operands for some arithmetic operations e.g. addition
+ and subtraction.
+
+ The PermittedAlphabetConstraint object can only be applied
+ to the :ref:`character ASN.1 types <type.char>` such as
+ :class:`~pyasn1.type.char.IA5String`.
+
+ Parameters
+ ----------
+ *alphabet: :class:`str`
+ Full set of characters permitted by this constraint object.
+
+ Example
+ -------
+ .. code-block:: python
+
+ class BooleanValue(IA5String):
+ '''
+ ASN.1 specification:
+
+ BooleanValue ::= IA5String (FROM ('T' | 'F'))
+ '''
+ subtypeSpec = PermittedAlphabetConstraint('T', 'F')
+
+ # this will succeed
+ truth = BooleanValue('T')
+ truth = BooleanValue('TF')
+
+ # this will raise ValueConstraintError
+ garbage = BooleanValue('TAF')
+
+ ASN.1 `FROM ... EXCEPT ...` clause can be modelled by combining multiple
+ PermittedAlphabetConstraint objects into one:
+
+ Example
+ -------
+ .. code-block:: python
+
+ class Lipogramme(IA5String):
+ '''
+ ASN.1 specification:
+
+ Lipogramme ::=
+ IA5String (FROM (ALL EXCEPT ("e"|"E")))
+ '''
+ subtypeSpec = (
+ PermittedAlphabetConstraint(*string.printable) -
+ PermittedAlphabetConstraint('e', 'E')
+ )
+
+ # this will succeed
+ lipogramme = Lipogramme('A work of fiction?')
+
+ # this will raise ValueConstraintError
+ lipogramme = Lipogramme('Eel')
+
+ Note
+ ----
+ Although `ConstraintsExclusion` object could seemingly be used for this
+ purpose, practically, for it to work, it needs to represent its operand
+ constraints as sets and intersect one with the other. That would require
+ the insight into the constraint values (and their types) that are otherwise
+ hidden inside the constraint object.
+
+ Therefore it's more practical to model `EXCEPT` clause at
+ `PermittedAlphabetConstraint` level instead.
+ """
+ def _setValues(self, values):
+ self._values = values
+ self._set = set(values)
+
+ def _testValue(self, value, idx):
+ if not self._set.issuperset(value):
+ raise error.ValueConstraintError(value)
+
+
+class ComponentPresentConstraint(AbstractConstraint):
+ """Create a ComponentPresentConstraint object.
+
+ The ComponentPresentConstraint is only satisfied when the value
+ is not `None`.
+
+ The ComponentPresentConstraint object is typically used with
+ `WithComponentsConstraint`.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ present = ComponentPresentConstraint()
+
+ # this will succeed
+ present('whatever')
+
+ # this will raise ValueConstraintError
+ present(None)
+ """
+ def _setValues(self, values):
+ self._values = ('<must be present>',)
+
+ if values:
+ raise error.PyAsn1Error('No arguments expected')
+
+ def _testValue(self, value, idx):
+ if value is None:
+ raise error.ValueConstraintError(
+ 'Component is not present:')
+
+
+class ComponentAbsentConstraint(AbstractConstraint):
+ """Create a ComponentAbsentConstraint object.
+
+ The ComponentAbsentConstraint is only satisfied when the value
+ is `None`.
+
+ The ComponentAbsentConstraint object is typically used with
+ `WithComponentsConstraint`.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ absent = ComponentAbsentConstraint()
+
+ # this will succeed
+ absent(None)
+
+ # this will raise ValueConstraintError
+ absent('whatever')
+ """
+ def _setValues(self, values):
+ self._values = ('<must be absent>',)
+
+ if values:
+ raise error.PyAsn1Error('No arguments expected')
+
+ def _testValue(self, value, idx):
+ if value is not None:
+ raise error.ValueConstraintError(
+ 'Component is not absent: %r' % value)
+
+
+class WithComponentsConstraint(AbstractConstraint):
+ """Create a WithComponentsConstraint object.
+
+ The `WithComponentsConstraint` satisfies any mapping object that has
+ constrained fields present or absent, what is indicated by
+ `ComponentPresentConstraint` and `ComponentAbsentConstraint`
+ objects respectively.
+
+ The `WithComponentsConstraint` object is typically applied
+ to :class:`~pyasn1.type.univ.Set` or
+ :class:`~pyasn1.type.univ.Sequence` types.
+
+ Parameters
+ ----------
+ *fields: :class:`tuple`
+ Zero or more tuples of (`field`, `constraint`) indicating constrained
+ fields.
+
+ Notes
+ -----
+ On top of the primary use of `WithComponentsConstraint` (ensuring presence
+ or absence of particular components of a :class:`~pyasn1.type.univ.Set` or
+ :class:`~pyasn1.type.univ.Sequence`), it is also possible to pass any other
+ constraint objects or their combinations. In case of scalar fields, these
+ constraints will be verified in addition to the constraints belonging to
+ scalar components themselves. However, formally, these additional
+ constraints do not change the type of these ASN.1 objects.
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class Item(Sequence): # Set is similar
+ '''
+ ASN.1 specification:
+
+ Item ::= SEQUENCE {
+ id INTEGER OPTIONAL,
+ name OCTET STRING OPTIONAL
+ } WITH COMPONENTS id PRESENT, name ABSENT | id ABSENT, name PRESENT
+ '''
+ componentType = NamedTypes(
+ OptionalNamedType('id', Integer()),
+ OptionalNamedType('name', OctetString())
+ )
+ withComponents = ConstraintsUnion(
+ WithComponentsConstraint(
+ ('id', ComponentPresentConstraint()),
+ ('name', ComponentAbsentConstraint())
+ ),
+ WithComponentsConstraint(
+ ('id', ComponentAbsentConstraint()),
+ ('name', ComponentPresentConstraint())
+ )
+ )
+
+ item = Item()
+
+ # This will succeed
+ item['id'] = 1
+
+ # This will succeed
+ item.reset()
+ item['name'] = 'John'
+
+ # This will fail (on encoding)
+ item.reset()
+ descr['id'] = 1
+ descr['name'] = 'John'
+ """
+ def _testValue(self, value, idx):
+ for field, constraint in self._values:
+ constraint(value.get(field))
+
+ def _setValues(self, values):
+ AbstractConstraint._setValues(self, values)
+
+
+# This is a bit kludgy, meaning two op modes within a single constraint
+class InnerTypeConstraint(AbstractConstraint):
+ """Value must satisfy the type and presence constraints"""
+
+ def _testValue(self, value, idx):
+ if self.__singleTypeConstraint:
+ self.__singleTypeConstraint(value)
+ elif self.__multipleTypeConstraint:
+ if idx not in self.__multipleTypeConstraint:
+ raise error.ValueConstraintError(value)
+ constraint, status = self.__multipleTypeConstraint[idx]
+ if status == 'ABSENT': # XXX presence is not checked!
+ raise error.ValueConstraintError(value)
+ constraint(value)
+
+ def _setValues(self, values):
+ self.__multipleTypeConstraint = {}
+ self.__singleTypeConstraint = None
+ for v in values:
+ if isinstance(v, tuple):
+ self.__multipleTypeConstraint[v[0]] = v[1], v[2]
+ else:
+ self.__singleTypeConstraint = v
+ AbstractConstraint._setValues(self, values)
+
+
+# Logic operations on constraints
+
+class ConstraintsExclusion(AbstractConstraint):
+ """Create a ConstraintsExclusion logic operator object.
+
+ The ConstraintsExclusion logic operator succeeds when the
+ value does *not* satisfy the operand constraint.
+
+ The ConstraintsExclusion object can be applied to
+ any constraint and logic operator object.
+
+ Parameters
+ ----------
+ *constraints:
+ Constraint or logic operator objects.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class LuckyNumber(Integer):
+ subtypeSpec = ConstraintsExclusion(
+ SingleValueConstraint(13)
+ )
+
+ # this will succeed
+ luckyNumber = LuckyNumber(12)
+
+ # this will raise ValueConstraintError
+ luckyNumber = LuckyNumber(13)
+
+ Note
+ ----
+ The `FROM ... EXCEPT ...` ASN.1 clause should be modeled by combining
+ constraint objects into one. See `PermittedAlphabetConstraint` for more
+ information.
+ """
+ def _testValue(self, value, idx):
+ for constraint in self._values:
+ try:
+ constraint(value, idx)
+
+ except error.ValueConstraintError:
+ continue
+
+ raise error.ValueConstraintError(value)
+
+ def _setValues(self, values):
+ AbstractConstraint._setValues(self, values)
+
+
+class AbstractConstraintSet(AbstractConstraint):
+
+ def __getitem__(self, idx):
+ return self._values[idx]
+
+ def __iter__(self):
+ return iter(self._values)
+
+ def __add__(self, value):
+ return self.__class__(*(self._values + (value,)))
+
+ def __radd__(self, value):
+ return self.__class__(*((value,) + self._values))
+
+ def __len__(self):
+ return len(self._values)
+
+ # Constraints inclusion in sets
+
+ def _setValues(self, values):
+ self._values = values
+ for constraint in values:
+ if constraint:
+ self._valueMap.add(constraint)
+ self._valueMap.update(constraint.getValueMap())
+
+
+class ConstraintsIntersection(AbstractConstraintSet):
+ """Create a ConstraintsIntersection logic operator object.
+
+ The ConstraintsIntersection logic operator only succeeds
+ if *all* its operands succeed.
+
+ The ConstraintsIntersection object can be applied to
+ any constraint and logic operator objects.
+
+ The ConstraintsIntersection object duck-types the immutable
+ container object like Python :py:class:`tuple`.
+
+ Parameters
+ ----------
+ *constraints:
+ Constraint or logic operator objects.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class CapitalAndSmall(IA5String):
+ '''
+ ASN.1 specification:
+
+ CapitalAndSmall ::=
+ IA5String (FROM ("A".."Z"|"a".."z"))
+ '''
+ subtypeSpec = ConstraintsIntersection(
+ PermittedAlphabetConstraint('A', 'Z'),
+ PermittedAlphabetConstraint('a', 'z')
+ )
+
+ # this will succeed
+ capital_and_small = CapitalAndSmall('Hello')
+
+ # this will raise ValueConstraintError
+ capital_and_small = CapitalAndSmall('hello')
+ """
+ def _testValue(self, value, idx):
+ for constraint in self._values:
+ constraint(value, idx)
+
+
+class ConstraintsUnion(AbstractConstraintSet):
+ """Create a ConstraintsUnion logic operator object.
+
+ The ConstraintsUnion logic operator succeeds if
+ *at least* a single operand succeeds.
+
+ The ConstraintsUnion object can be applied to
+ any constraint and logic operator objects.
+
+ The ConstraintsUnion object duck-types the immutable
+ container object like Python :py:class:`tuple`.
+
+ Parameters
+ ----------
+ *constraints:
+ Constraint or logic operator objects.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class CapitalOrSmall(IA5String):
+ '''
+ ASN.1 specification:
+
+ CapitalOrSmall ::=
+ IA5String (FROM ("A".."Z") | FROM ("a".."z"))
+ '''
+ subtypeSpec = ConstraintsUnion(
+ PermittedAlphabetConstraint('A', 'Z'),
+ PermittedAlphabetConstraint('a', 'z')
+ )
+
+ # this will succeed
+ capital_or_small = CapitalAndSmall('Hello')
+
+ # this will raise ValueConstraintError
+ capital_or_small = CapitalOrSmall('hello!')
+ """
+ def _testValue(self, value, idx):
+ for constraint in self._values:
+ try:
+ constraint(value, idx)
+ except error.ValueConstraintError:
+ pass
+ else:
+ return
+
+ raise error.ValueConstraintError(
+ 'all of %s failed for "%s"' % (self._values, value)
+ )
+
+# TODO:
+# refactor InnerTypeConstraint
+# add tests for type check
+# implement other constraint types
+# make constraint validation easy to skip
diff --git a/third_party/python/pyasn1/pyasn1/type/error.py b/third_party/python/pyasn1/pyasn1/type/error.py
new file mode 100644
index 0000000000..80fcf3bdcd
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/type/error.py
@@ -0,0 +1,11 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+from pyasn1.error import PyAsn1Error
+
+
+class ValueConstraintError(PyAsn1Error):
+ pass
diff --git a/third_party/python/pyasn1/pyasn1/type/namedtype.py b/third_party/python/pyasn1/pyasn1/type/namedtype.py
new file mode 100644
index 0000000000..cbc14293e0
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/type/namedtype.py
@@ -0,0 +1,561 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import sys
+
+from pyasn1 import error
+from pyasn1.type import tag
+from pyasn1.type import tagmap
+
+__all__ = ['NamedType', 'OptionalNamedType', 'DefaultedNamedType',
+ 'NamedTypes']
+
+try:
+ any
+
+except NameError:
+ any = lambda x: bool(filter(bool, x))
+
+
+class NamedType(object):
+ """Create named field object for a constructed ASN.1 type.
+
+ The |NamedType| object represents a single name and ASN.1 type of a constructed ASN.1 type.
+
+ |NamedType| objects are immutable and duck-type Python :class:`tuple` objects
+ holding *name* and *asn1Object* components.
+
+ Parameters
+ ----------
+ name: :py:class:`str`
+ Field name
+
+ asn1Object:
+ ASN.1 type object
+ """
+ isOptional = False
+ isDefaulted = False
+
+ def __init__(self, name, asn1Object, openType=None):
+ self.__name = name
+ self.__type = asn1Object
+ self.__nameAndType = name, asn1Object
+ self.__openType = openType
+
+ def __repr__(self):
+ representation = '%s=%r' % (self.name, self.asn1Object)
+
+ if self.openType:
+ representation += ', open type %r' % self.openType
+
+ return '<%s object, type %s>' % (
+ self.__class__.__name__, representation)
+
+ def __eq__(self, other):
+ return self.__nameAndType == other
+
+ def __ne__(self, other):
+ return self.__nameAndType != other
+
+ def __lt__(self, other):
+ return self.__nameAndType < other
+
+ def __le__(self, other):
+ return self.__nameAndType <= other
+
+ def __gt__(self, other):
+ return self.__nameAndType > other
+
+ def __ge__(self, other):
+ return self.__nameAndType >= other
+
+ def __hash__(self):
+ return hash(self.__nameAndType)
+
+ def __getitem__(self, idx):
+ return self.__nameAndType[idx]
+
+ def __iter__(self):
+ return iter(self.__nameAndType)
+
+ @property
+ def name(self):
+ return self.__name
+
+ @property
+ def asn1Object(self):
+ return self.__type
+
+ @property
+ def openType(self):
+ return self.__openType
+
+ # Backward compatibility
+
+ def getName(self):
+ return self.name
+
+ def getType(self):
+ return self.asn1Object
+
+
+class OptionalNamedType(NamedType):
+ __doc__ = NamedType.__doc__
+
+ isOptional = True
+
+
+class DefaultedNamedType(NamedType):
+ __doc__ = NamedType.__doc__
+
+ isDefaulted = True
+
+
+class NamedTypes(object):
+ """Create a collection of named fields for a constructed ASN.1 type.
+
+ The NamedTypes object represents a collection of named fields of a constructed ASN.1 type.
+
+ *NamedTypes* objects are immutable and duck-type Python :class:`dict` objects
+ holding *name* as keys and ASN.1 type object as values.
+
+ Parameters
+ ----------
+ *namedTypes: :class:`~pyasn1.type.namedtype.NamedType`
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class Description(Sequence):
+ '''
+ ASN.1 specification:
+
+ Description ::= SEQUENCE {
+ surname IA5String,
+ first-name IA5String OPTIONAL,
+ age INTEGER DEFAULT 40
+ }
+ '''
+ componentType = NamedTypes(
+ NamedType('surname', IA5String()),
+ OptionalNamedType('first-name', IA5String()),
+ DefaultedNamedType('age', Integer(40))
+ )
+
+ descr = Description()
+ descr['surname'] = 'Smith'
+ descr['first-name'] = 'John'
+ """
+ def __init__(self, *namedTypes, **kwargs):
+ self.__namedTypes = namedTypes
+ self.__namedTypesLen = len(self.__namedTypes)
+ self.__minTagSet = self.__computeMinTagSet()
+ self.__nameToPosMap = self.__computeNameToPosMap()
+ self.__tagToPosMap = self.__computeTagToPosMap()
+ self.__ambiguousTypes = 'terminal' not in kwargs and self.__computeAmbiguousTypes() or {}
+ self.__uniqueTagMap = self.__computeTagMaps(unique=True)
+ self.__nonUniqueTagMap = self.__computeTagMaps(unique=False)
+ self.__hasOptionalOrDefault = any([True for namedType in self.__namedTypes
+ if namedType.isDefaulted or namedType.isOptional])
+ self.__hasOpenTypes = any([True for namedType in self.__namedTypes
+ if namedType.openType])
+
+ self.__requiredComponents = frozenset(
+ [idx for idx, nt in enumerate(self.__namedTypes) if not nt.isOptional and not nt.isDefaulted]
+ )
+ self.__keys = frozenset([namedType.name for namedType in self.__namedTypes])
+ self.__values = tuple([namedType.asn1Object for namedType in self.__namedTypes])
+ self.__items = tuple([(namedType.name, namedType.asn1Object) for namedType in self.__namedTypes])
+
+ def __repr__(self):
+ representation = ', '.join(['%r' % x for x in self.__namedTypes])
+ return '<%s object, types %s>' % (
+ self.__class__.__name__, representation)
+
+ def __eq__(self, other):
+ return self.__namedTypes == other
+
+ def __ne__(self, other):
+ return self.__namedTypes != other
+
+ def __lt__(self, other):
+ return self.__namedTypes < other
+
+ def __le__(self, other):
+ return self.__namedTypes <= other
+
+ def __gt__(self, other):
+ return self.__namedTypes > other
+
+ def __ge__(self, other):
+ return self.__namedTypes >= other
+
+ def __hash__(self):
+ return hash(self.__namedTypes)
+
+ def __getitem__(self, idx):
+ try:
+ return self.__namedTypes[idx]
+
+ except TypeError:
+ return self.__namedTypes[self.__nameToPosMap[idx]]
+
+ def __contains__(self, key):
+ return key in self.__nameToPosMap
+
+ def __iter__(self):
+ return (x[0] for x in self.__namedTypes)
+
+ if sys.version_info[0] <= 2:
+ def __nonzero__(self):
+ return self.__namedTypesLen > 0
+ else:
+ def __bool__(self):
+ return self.__namedTypesLen > 0
+
+ def __len__(self):
+ return self.__namedTypesLen
+
+ # Python dict protocol
+
+ def values(self):
+ return self.__values
+
+ def keys(self):
+ return self.__keys
+
+ def items(self):
+ return self.__items
+
+ def clone(self):
+ return self.__class__(*self.__namedTypes)
+
+ class PostponedError(object):
+ def __init__(self, errorMsg):
+ self.__errorMsg = errorMsg
+
+ def __getitem__(self, item):
+ raise error.PyAsn1Error(self.__errorMsg)
+
+ def __computeTagToPosMap(self):
+ tagToPosMap = {}
+ for idx, namedType in enumerate(self.__namedTypes):
+ tagMap = namedType.asn1Object.tagMap
+ if isinstance(tagMap, NamedTypes.PostponedError):
+ return tagMap
+ if not tagMap:
+ continue
+ for _tagSet in tagMap.presentTypes:
+ if _tagSet in tagToPosMap:
+ return NamedTypes.PostponedError('Duplicate component tag %s at %s' % (_tagSet, namedType))
+ tagToPosMap[_tagSet] = idx
+
+ return tagToPosMap
+
+ def __computeNameToPosMap(self):
+ nameToPosMap = {}
+ for idx, namedType in enumerate(self.__namedTypes):
+ if namedType.name in nameToPosMap:
+ return NamedTypes.PostponedError('Duplicate component name %s at %s' % (namedType.name, namedType))
+ nameToPosMap[namedType.name] = idx
+
+ return nameToPosMap
+
+ def __computeAmbiguousTypes(self):
+ ambiguousTypes = {}
+ partialAmbiguousTypes = ()
+ for idx, namedType in reversed(tuple(enumerate(self.__namedTypes))):
+ if namedType.isOptional or namedType.isDefaulted:
+ partialAmbiguousTypes = (namedType,) + partialAmbiguousTypes
+ else:
+ partialAmbiguousTypes = (namedType,)
+ if len(partialAmbiguousTypes) == len(self.__namedTypes):
+ ambiguousTypes[idx] = self
+ else:
+ ambiguousTypes[idx] = NamedTypes(*partialAmbiguousTypes, **dict(terminal=True))
+ return ambiguousTypes
+
+ def getTypeByPosition(self, idx):
+ """Return ASN.1 type object by its position in fields set.
+
+ Parameters
+ ----------
+ idx: :py:class:`int`
+ Field index
+
+ Returns
+ -------
+ :
+ ASN.1 type
+
+ Raises
+ ------
+ ~pyasn1.error.PyAsn1Error
+ If given position is out of fields range
+ """
+ try:
+ return self.__namedTypes[idx].asn1Object
+
+ except IndexError:
+ raise error.PyAsn1Error('Type position out of range')
+
+ def getPositionByType(self, tagSet):
+ """Return field position by its ASN.1 type.
+
+ Parameters
+ ----------
+ tagSet: :class:`~pysnmp.type.tag.TagSet`
+ ASN.1 tag set distinguishing one ASN.1 type from others.
+
+ Returns
+ -------
+ : :py:class:`int`
+ ASN.1 type position in fields set
+
+ Raises
+ ------
+ ~pyasn1.error.PyAsn1Error
+ If *tagSet* is not present or ASN.1 types are not unique within callee *NamedTypes*
+ """
+ try:
+ return self.__tagToPosMap[tagSet]
+
+ except KeyError:
+ raise error.PyAsn1Error('Type %s not found' % (tagSet,))
+
+ def getNameByPosition(self, idx):
+ """Return field name by its position in fields set.
+
+ Parameters
+ ----------
+ idx: :py:class:`idx`
+ Field index
+
+ Returns
+ -------
+ : :py:class:`str`
+ Field name
+
+ Raises
+ ------
+ ~pyasn1.error.PyAsn1Error
+ If given field name is not present in callee *NamedTypes*
+ """
+ try:
+ return self.__namedTypes[idx].name
+
+ except IndexError:
+ raise error.PyAsn1Error('Type position out of range')
+
+ def getPositionByName(self, name):
+ """Return field position by filed name.
+
+ Parameters
+ ----------
+ name: :py:class:`str`
+ Field name
+
+ Returns
+ -------
+ : :py:class:`int`
+ Field position in fields set
+
+ Raises
+ ------
+ ~pyasn1.error.PyAsn1Error
+ If *name* is not present or not unique within callee *NamedTypes*
+ """
+ try:
+ return self.__nameToPosMap[name]
+
+ except KeyError:
+ raise error.PyAsn1Error('Name %s not found' % (name,))
+
+ def getTagMapNearPosition(self, idx):
+ """Return ASN.1 types that are allowed at or past given field position.
+
+ Some ASN.1 serialisation allow for skipping optional and defaulted fields.
+ Some constructed ASN.1 types allow reordering of the fields. When recovering
+ such objects it may be important to know which types can possibly be
+ present at any given position in the field sets.
+
+ Parameters
+ ----------
+ idx: :py:class:`int`
+ Field index
+
+ Returns
+ -------
+ : :class:`~pyasn1.type.tagmap.TagMap`
+ Map if ASN.1 types allowed at given field position
+
+ Raises
+ ------
+ ~pyasn1.error.PyAsn1Error
+ If given position is out of fields range
+ """
+ try:
+ return self.__ambiguousTypes[idx].tagMap
+
+ except KeyError:
+ raise error.PyAsn1Error('Type position out of range')
+
+ def getPositionNearType(self, tagSet, idx):
+ """Return the closest field position where given ASN.1 type is allowed.
+
+ Some ASN.1 serialisation allow for skipping optional and defaulted fields.
+ Some constructed ASN.1 types allow reordering of the fields. When recovering
+ such objects it may be important to know at which field position, in field set,
+ given *tagSet* is allowed at or past *idx* position.
+
+ Parameters
+ ----------
+ tagSet: :class:`~pyasn1.type.tag.TagSet`
+ ASN.1 type which field position to look up
+
+ idx: :py:class:`int`
+ Field position at or past which to perform ASN.1 type look up
+
+ Returns
+ -------
+ : :py:class:`int`
+ Field position in fields set
+
+ Raises
+ ------
+ ~pyasn1.error.PyAsn1Error
+ If *tagSet* is not present or not unique within callee *NamedTypes*
+ or *idx* is out of fields range
+ """
+ try:
+ return idx + self.__ambiguousTypes[idx].getPositionByType(tagSet)
+
+ except KeyError:
+ raise error.PyAsn1Error('Type position out of range')
+
+ def __computeMinTagSet(self):
+ minTagSet = None
+ for namedType in self.__namedTypes:
+ asn1Object = namedType.asn1Object
+
+ try:
+ tagSet = asn1Object.minTagSet
+
+ except AttributeError:
+ tagSet = asn1Object.tagSet
+
+ if minTagSet is None or tagSet < minTagSet:
+ minTagSet = tagSet
+
+ return minTagSet or tag.TagSet()
+
+ @property
+ def minTagSet(self):
+ """Return the minimal TagSet among ASN.1 type in callee *NamedTypes*.
+
+ Some ASN.1 types/serialisation protocols require ASN.1 types to be
+ arranged based on their numerical tag value. The *minTagSet* property
+ returns that.
+
+ Returns
+ -------
+ : :class:`~pyasn1.type.tagset.TagSet`
+ Minimal TagSet among ASN.1 types in callee *NamedTypes*
+ """
+ return self.__minTagSet
+
+ def __computeTagMaps(self, unique):
+ presentTypes = {}
+ skipTypes = {}
+ defaultType = None
+ for namedType in self.__namedTypes:
+ tagMap = namedType.asn1Object.tagMap
+ if isinstance(tagMap, NamedTypes.PostponedError):
+ return tagMap
+ for tagSet in tagMap:
+ if unique and tagSet in presentTypes:
+ return NamedTypes.PostponedError('Non-unique tagSet %s of %s at %s' % (tagSet, namedType, self))
+ presentTypes[tagSet] = namedType.asn1Object
+ skipTypes.update(tagMap.skipTypes)
+
+ if defaultType is None:
+ defaultType = tagMap.defaultType
+ elif tagMap.defaultType is not None:
+ return NamedTypes.PostponedError('Duplicate default ASN.1 type at %s' % (self,))
+
+ return tagmap.TagMap(presentTypes, skipTypes, defaultType)
+
+ @property
+ def tagMap(self):
+ """Return a *TagMap* object from tags and types recursively.
+
+ Return a :class:`~pyasn1.type.tagmap.TagMap` object by
+ combining tags from *TagMap* objects of children types and
+ associating them with their immediate child type.
+
+ Example
+ -------
+ .. code-block:: python
+
+ OuterType ::= CHOICE {
+ innerType INTEGER
+ }
+
+ Calling *.tagMap* on *OuterType* will yield a map like this:
+
+ .. code-block:: python
+
+ Integer.tagSet -> Choice
+ """
+ return self.__nonUniqueTagMap
+
+ @property
+ def tagMapUnique(self):
+ """Return a *TagMap* object from unique tags and types recursively.
+
+ Return a :class:`~pyasn1.type.tagmap.TagMap` object by
+ combining tags from *TagMap* objects of children types and
+ associating them with their immediate child type.
+
+ Example
+ -------
+ .. code-block:: python
+
+ OuterType ::= CHOICE {
+ innerType INTEGER
+ }
+
+ Calling *.tagMapUnique* on *OuterType* will yield a map like this:
+
+ .. code-block:: python
+
+ Integer.tagSet -> Choice
+
+ Note
+ ----
+
+ Duplicate *TagSet* objects found in the tree of children
+ types would cause error.
+ """
+ return self.__uniqueTagMap
+
+ @property
+ def hasOptionalOrDefault(self):
+ return self.__hasOptionalOrDefault
+
+ @property
+ def hasOpenTypes(self):
+ return self.__hasOpenTypes
+
+ @property
+ def namedTypes(self):
+ return tuple(self.__namedTypes)
+
+ @property
+ def requiredComponents(self):
+ return self.__requiredComponents
diff --git a/third_party/python/pyasn1/pyasn1/type/namedval.py b/third_party/python/pyasn1/pyasn1/type/namedval.py
new file mode 100644
index 0000000000..424759784b
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/type/namedval.py
@@ -0,0 +1,192 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# ASN.1 named integers
+#
+from pyasn1 import error
+
+__all__ = ['NamedValues']
+
+
+class NamedValues(object):
+ """Create named values object.
+
+ The |NamedValues| object represents a collection of string names
+ associated with numeric IDs. These objects are used for giving
+ names to otherwise numerical values.
+
+ |NamedValues| objects are immutable and duck-type Python
+ :class:`dict` object mapping ID to name and vice-versa.
+
+ Parameters
+ ----------
+ *args: variable number of two-element :py:class:`tuple`
+
+ name: :py:class:`str`
+ Value label
+
+ value: :py:class:`int`
+ Numeric value
+
+ Keyword Args
+ ------------
+ name: :py:class:`str`
+ Value label
+
+ value: :py:class:`int`
+ Numeric value
+
+ Examples
+ --------
+
+ .. code-block:: pycon
+
+ >>> nv = NamedValues('a', 'b', ('c', 0), d=1)
+ >>> nv
+ >>> {'c': 0, 'd': 1, 'a': 2, 'b': 3}
+ >>> nv[0]
+ 'c'
+ >>> nv['a']
+ 2
+ """
+ def __init__(self, *args, **kwargs):
+ self.__names = {}
+ self.__numbers = {}
+
+ anonymousNames = []
+
+ for namedValue in args:
+ if isinstance(namedValue, (tuple, list)):
+ try:
+ name, number = namedValue
+
+ except ValueError:
+ raise error.PyAsn1Error('Not a proper attribute-value pair %r' % (namedValue,))
+
+ else:
+ anonymousNames.append(namedValue)
+ continue
+
+ if name in self.__names:
+ raise error.PyAsn1Error('Duplicate name %s' % (name,))
+
+ if number in self.__numbers:
+ raise error.PyAsn1Error('Duplicate number %s=%s' % (name, number))
+
+ self.__names[name] = number
+ self.__numbers[number] = name
+
+ for name, number in kwargs.items():
+ if name in self.__names:
+ raise error.PyAsn1Error('Duplicate name %s' % (name,))
+
+ if number in self.__numbers:
+ raise error.PyAsn1Error('Duplicate number %s=%s' % (name, number))
+
+ self.__names[name] = number
+ self.__numbers[number] = name
+
+ if anonymousNames:
+
+ number = self.__numbers and max(self.__numbers) + 1 or 0
+
+ for name in anonymousNames:
+
+ if name in self.__names:
+ raise error.PyAsn1Error('Duplicate name %s' % (name,))
+
+ self.__names[name] = number
+ self.__numbers[number] = name
+
+ number += 1
+
+ def __repr__(self):
+ representation = ', '.join(['%s=%d' % x for x in self.items()])
+
+ if len(representation) > 64:
+ representation = representation[:32] + '...' + representation[-32:]
+
+ return '<%s object, enums %s>' % (
+ self.__class__.__name__, representation)
+
+ def __eq__(self, other):
+ return dict(self) == other
+
+ def __ne__(self, other):
+ return dict(self) != other
+
+ def __lt__(self, other):
+ return dict(self) < other
+
+ def __le__(self, other):
+ return dict(self) <= other
+
+ def __gt__(self, other):
+ return dict(self) > other
+
+ def __ge__(self, other):
+ return dict(self) >= other
+
+ def __hash__(self):
+ return hash(self.items())
+
+ # Python dict protocol (read-only)
+
+ def __getitem__(self, key):
+ try:
+ return self.__numbers[key]
+
+ except KeyError:
+ return self.__names[key]
+
+ def __len__(self):
+ return len(self.__names)
+
+ def __contains__(self, key):
+ return key in self.__names or key in self.__numbers
+
+ def __iter__(self):
+ return iter(self.__names)
+
+ def values(self):
+ return iter(self.__numbers)
+
+ def keys(self):
+ return iter(self.__names)
+
+ def items(self):
+ for name in self.__names:
+ yield name, self.__names[name]
+
+ # support merging
+
+ def __add__(self, namedValues):
+ return self.__class__(*tuple(self.items()) + tuple(namedValues.items()))
+
+ # XXX clone/subtype?
+
+ def clone(self, *args, **kwargs):
+ new = self.__class__(*args, **kwargs)
+ return self + new
+
+ # legacy protocol
+
+ def getName(self, value):
+ if value in self.__numbers:
+ return self.__numbers[value]
+
+ def getValue(self, name):
+ if name in self.__names:
+ return self.__names[name]
+
+ def getValues(self, *names):
+ try:
+ return [self.__names[name] for name in names]
+
+ except KeyError:
+ raise error.PyAsn1Error(
+ 'Unknown bit identifier(s): %s' % (set(names).difference(self.__names),)
+ )
diff --git a/third_party/python/pyasn1/pyasn1/type/opentype.py b/third_party/python/pyasn1/pyasn1/type/opentype.py
new file mode 100644
index 0000000000..29645f0f8d
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/type/opentype.py
@@ -0,0 +1,104 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+
+__all__ = ['OpenType']
+
+
+class OpenType(object):
+ """Create ASN.1 type map indexed by a value
+
+ The *OpenType* object models an untyped field of a constructed ASN.1
+ type. In ASN.1 syntax it is usually represented by the
+ `ANY DEFINED BY` for scalars or `SET OF ANY DEFINED BY`,
+ `SEQUENCE OF ANY DEFINED BY` for container types clauses. Typically
+ used together with :class:`~pyasn1.type.univ.Any` object.
+
+ OpenType objects duck-type a read-only Python :class:`dict` objects,
+ however the passed `typeMap` is not copied, but stored by reference.
+ That means the user can manipulate `typeMap` at run time having this
+ reflected on *OpenType* object behavior.
+
+ The |OpenType| class models an untyped field of a constructed ASN.1
+ type. In ASN.1 syntax it is usually represented by the
+ `ANY DEFINED BY` for scalars or `SET OF ANY DEFINED BY`,
+ `SEQUENCE OF ANY DEFINED BY` for container types clauses. Typically
+ used with :class:`~pyasn1.type.univ.Any` type.
+
+ Parameters
+ ----------
+ name: :py:class:`str`
+ Field name
+
+ typeMap: :py:class:`dict`
+ A map of value->ASN.1 type. It's stored by reference and can be
+ mutated later to register new mappings.
+
+ Examples
+ --------
+
+ For untyped scalars:
+
+ .. code-block:: python
+
+ openType = OpenType(
+ 'id', {1: Integer(),
+ 2: OctetString()}
+ )
+ Sequence(
+ componentType=NamedTypes(
+ NamedType('id', Integer()),
+ NamedType('blob', Any(), openType=openType)
+ )
+ )
+
+ For untyped `SET OF` or `SEQUENCE OF` vectors:
+
+ .. code-block:: python
+
+ openType = OpenType(
+ 'id', {1: Integer(),
+ 2: OctetString()}
+ )
+ Sequence(
+ componentType=NamedTypes(
+ NamedType('id', Integer()),
+ NamedType('blob', SetOf(componentType=Any()),
+ openType=openType)
+ )
+ )
+ """
+
+ def __init__(self, name, typeMap=None):
+ self.__name = name
+ if typeMap is None:
+ self.__typeMap = {}
+ else:
+ self.__typeMap = typeMap
+
+ @property
+ def name(self):
+ return self.__name
+
+ # Python dict protocol
+
+ def values(self):
+ return self.__typeMap.values()
+
+ def keys(self):
+ return self.__typeMap.keys()
+
+ def items(self):
+ return self.__typeMap.items()
+
+ def __contains__(self, key):
+ return key in self.__typeMap
+
+ def __getitem__(self, key):
+ return self.__typeMap[key]
+
+ def __iter__(self):
+ return iter(self.__typeMap)
diff --git a/third_party/python/pyasn1/pyasn1/type/tag.py b/third_party/python/pyasn1/pyasn1/type/tag.py
new file mode 100644
index 0000000000..b88a73417a
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/type/tag.py
@@ -0,0 +1,335 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+from pyasn1 import error
+
+__all__ = ['tagClassUniversal', 'tagClassApplication', 'tagClassContext',
+ 'tagClassPrivate', 'tagFormatSimple', 'tagFormatConstructed',
+ 'tagCategoryImplicit', 'tagCategoryExplicit',
+ 'tagCategoryUntagged', 'Tag', 'TagSet']
+
+#: Identifier for ASN.1 class UNIVERSAL
+tagClassUniversal = 0x00
+
+#: Identifier for ASN.1 class APPLICATION
+tagClassApplication = 0x40
+
+#: Identifier for ASN.1 class context-specific
+tagClassContext = 0x80
+
+#: Identifier for ASN.1 class private
+tagClassPrivate = 0xC0
+
+#: Identifier for "simple" ASN.1 structure (e.g. scalar)
+tagFormatSimple = 0x00
+
+#: Identifier for "constructed" ASN.1 structure (e.g. may have inner components)
+tagFormatConstructed = 0x20
+
+tagCategoryImplicit = 0x01
+tagCategoryExplicit = 0x02
+tagCategoryUntagged = 0x04
+
+
+class Tag(object):
+ """Create ASN.1 tag
+
+ Represents ASN.1 tag that can be attached to a ASN.1 type to make
+ types distinguishable from each other.
+
+ *Tag* objects are immutable and duck-type Python :class:`tuple` objects
+ holding three integer components of a tag.
+
+ Parameters
+ ----------
+ tagClass: :py:class:`int`
+ Tag *class* value
+
+ tagFormat: :py:class:`int`
+ Tag *format* value
+
+ tagId: :py:class:`int`
+ Tag ID value
+ """
+ def __init__(self, tagClass, tagFormat, tagId):
+ if tagId < 0:
+ raise error.PyAsn1Error('Negative tag ID (%s) not allowed' % tagId)
+ self.__tagClass = tagClass
+ self.__tagFormat = tagFormat
+ self.__tagId = tagId
+ self.__tagClassId = tagClass, tagId
+ self.__hash = hash(self.__tagClassId)
+
+ def __repr__(self):
+ representation = '[%s:%s:%s]' % (
+ self.__tagClass, self.__tagFormat, self.__tagId)
+ return '<%s object, tag %s>' % (
+ self.__class__.__name__, representation)
+
+ def __eq__(self, other):
+ return self.__tagClassId == other
+
+ def __ne__(self, other):
+ return self.__tagClassId != other
+
+ def __lt__(self, other):
+ return self.__tagClassId < other
+
+ def __le__(self, other):
+ return self.__tagClassId <= other
+
+ def __gt__(self, other):
+ return self.__tagClassId > other
+
+ def __ge__(self, other):
+ return self.__tagClassId >= other
+
+ def __hash__(self):
+ return self.__hash
+
+ def __getitem__(self, idx):
+ if idx == 0:
+ return self.__tagClass
+ elif idx == 1:
+ return self.__tagFormat
+ elif idx == 2:
+ return self.__tagId
+ else:
+ raise IndexError()
+
+ def __iter__(self):
+ yield self.__tagClass
+ yield self.__tagFormat
+ yield self.__tagId
+
+ def __and__(self, otherTag):
+ return self.__class__(self.__tagClass & otherTag.tagClass,
+ self.__tagFormat & otherTag.tagFormat,
+ self.__tagId & otherTag.tagId)
+
+ def __or__(self, otherTag):
+ return self.__class__(self.__tagClass | otherTag.tagClass,
+ self.__tagFormat | otherTag.tagFormat,
+ self.__tagId | otherTag.tagId)
+
+ @property
+ def tagClass(self):
+ """ASN.1 tag class
+
+ Returns
+ -------
+ : :py:class:`int`
+ Tag class
+ """
+ return self.__tagClass
+
+ @property
+ def tagFormat(self):
+ """ASN.1 tag format
+
+ Returns
+ -------
+ : :py:class:`int`
+ Tag format
+ """
+ return self.__tagFormat
+
+ @property
+ def tagId(self):
+ """ASN.1 tag ID
+
+ Returns
+ -------
+ : :py:class:`int`
+ Tag ID
+ """
+ return self.__tagId
+
+
+class TagSet(object):
+ """Create a collection of ASN.1 tags
+
+ Represents a combination of :class:`~pyasn1.type.tag.Tag` objects
+ that can be attached to a ASN.1 type to make types distinguishable
+ from each other.
+
+ *TagSet* objects are immutable and duck-type Python :class:`tuple` objects
+ holding arbitrary number of :class:`~pyasn1.type.tag.Tag` objects.
+
+ Parameters
+ ----------
+ baseTag: :class:`~pyasn1.type.tag.Tag`
+ Base *Tag* object. This tag survives IMPLICIT tagging.
+
+ *superTags: :class:`~pyasn1.type.tag.Tag`
+ Additional *Tag* objects taking part in subtyping.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class OrderNumber(NumericString):
+ '''
+ ASN.1 specification
+
+ Order-number ::=
+ [APPLICATION 5] IMPLICIT NumericString
+ '''
+ tagSet = NumericString.tagSet.tagImplicitly(
+ Tag(tagClassApplication, tagFormatSimple, 5)
+ )
+
+ orderNumber = OrderNumber('1234')
+ """
+ def __init__(self, baseTag=(), *superTags):
+ self.__baseTag = baseTag
+ self.__superTags = superTags
+ self.__superTagsClassId = tuple(
+ [(superTag.tagClass, superTag.tagId) for superTag in superTags]
+ )
+ self.__lenOfSuperTags = len(superTags)
+ self.__hash = hash(self.__superTagsClassId)
+
+ def __repr__(self):
+ representation = '-'.join(['%s:%s:%s' % (x.tagClass, x.tagFormat, x.tagId)
+ for x in self.__superTags])
+ if representation:
+ representation = 'tags ' + representation
+ else:
+ representation = 'untagged'
+
+ return '<%s object, %s>' % (self.__class__.__name__, representation)
+
+ def __add__(self, superTag):
+ return self.__class__(self.__baseTag, *self.__superTags + (superTag,))
+
+ def __radd__(self, superTag):
+ return self.__class__(self.__baseTag, *(superTag,) + self.__superTags)
+
+ def __getitem__(self, i):
+ if i.__class__ is slice:
+ return self.__class__(self.__baseTag, *self.__superTags[i])
+ else:
+ return self.__superTags[i]
+
+ def __eq__(self, other):
+ return self.__superTagsClassId == other
+
+ def __ne__(self, other):
+ return self.__superTagsClassId != other
+
+ def __lt__(self, other):
+ return self.__superTagsClassId < other
+
+ def __le__(self, other):
+ return self.__superTagsClassId <= other
+
+ def __gt__(self, other):
+ return self.__superTagsClassId > other
+
+ def __ge__(self, other):
+ return self.__superTagsClassId >= other
+
+ def __hash__(self):
+ return self.__hash
+
+ def __len__(self):
+ return self.__lenOfSuperTags
+
+ @property
+ def baseTag(self):
+ """Return base ASN.1 tag
+
+ Returns
+ -------
+ : :class:`~pyasn1.type.tag.Tag`
+ Base tag of this *TagSet*
+ """
+ return self.__baseTag
+
+ @property
+ def superTags(self):
+ """Return ASN.1 tags
+
+ Returns
+ -------
+ : :py:class:`tuple`
+ Tuple of :class:`~pyasn1.type.tag.Tag` objects that this *TagSet* contains
+ """
+ return self.__superTags
+
+ def tagExplicitly(self, superTag):
+ """Return explicitly tagged *TagSet*
+
+ Create a new *TagSet* representing callee *TagSet* explicitly tagged
+ with passed tag(s). With explicit tagging mode, new tags are appended
+ to existing tag(s).
+
+ Parameters
+ ----------
+ superTag: :class:`~pyasn1.type.tag.Tag`
+ *Tag* object to tag this *TagSet*
+
+ Returns
+ -------
+ : :class:`~pyasn1.type.tag.TagSet`
+ New *TagSet* object
+ """
+ if superTag.tagClass == tagClassUniversal:
+ raise error.PyAsn1Error("Can't tag with UNIVERSAL class tag")
+ if superTag.tagFormat != tagFormatConstructed:
+ superTag = Tag(superTag.tagClass, tagFormatConstructed, superTag.tagId)
+ return self + superTag
+
+ def tagImplicitly(self, superTag):
+ """Return implicitly tagged *TagSet*
+
+ Create a new *TagSet* representing callee *TagSet* implicitly tagged
+ with passed tag(s). With implicit tagging mode, new tag(s) replace the
+ last existing tag.
+
+ Parameters
+ ----------
+ superTag: :class:`~pyasn1.type.tag.Tag`
+ *Tag* object to tag this *TagSet*
+
+ Returns
+ -------
+ : :class:`~pyasn1.type.tag.TagSet`
+ New *TagSet* object
+ """
+ if self.__superTags:
+ superTag = Tag(superTag.tagClass, self.__superTags[-1].tagFormat, superTag.tagId)
+ return self[:-1] + superTag
+
+ def isSuperTagSetOf(self, tagSet):
+ """Test type relationship against given *TagSet*
+
+ The callee is considered to be a supertype of given *TagSet*
+ tag-wise if all tags in *TagSet* are present in the callee and
+ they are in the same order.
+
+ Parameters
+ ----------
+ tagSet: :class:`~pyasn1.type.tag.TagSet`
+ *TagSet* object to evaluate against the callee
+
+ Returns
+ -------
+ : :py:class:`bool`
+ :obj:`True` if callee is a supertype of *tagSet*
+ """
+ if len(tagSet) < self.__lenOfSuperTags:
+ return False
+ return self.__superTags == tagSet[:self.__lenOfSuperTags]
+
+ # Backward compatibility
+
+ def getBaseTag(self):
+ return self.__baseTag
+
+def initTagSet(tag):
+ return TagSet(tag, tag)
diff --git a/third_party/python/pyasn1/pyasn1/type/tagmap.py b/third_party/python/pyasn1/pyasn1/type/tagmap.py
new file mode 100644
index 0000000000..6f5163b4e8
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/type/tagmap.py
@@ -0,0 +1,96 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+from pyasn1 import error
+
+__all__ = ['TagMap']
+
+
+class TagMap(object):
+ """Map *TagSet* objects to ASN.1 types
+
+ Create an object mapping *TagSet* object to ASN.1 type.
+
+ *TagMap* objects are immutable and duck-type read-only Python
+ :class:`dict` objects holding *TagSet* objects as keys and ASN.1
+ type objects as values.
+
+ Parameters
+ ----------
+ presentTypes: :py:class:`dict`
+ Map of :class:`~pyasn1.type.tag.TagSet` to ASN.1 objects considered
+ as being unconditionally present in the *TagMap*.
+
+ skipTypes: :py:class:`dict`
+ A collection of :class:`~pyasn1.type.tag.TagSet` objects considered
+ as absent in the *TagMap* even when *defaultType* is present.
+
+ defaultType: ASN.1 type object
+ An ASN.1 type object callee *TagMap* returns for any *TagSet* key not present
+ in *presentTypes* (unless given key is present in *skipTypes*).
+ """
+ def __init__(self, presentTypes=None, skipTypes=None, defaultType=None):
+ self.__presentTypes = presentTypes or {}
+ self.__skipTypes = skipTypes or {}
+ self.__defaultType = defaultType
+
+ def __contains__(self, tagSet):
+ return (tagSet in self.__presentTypes or
+ self.__defaultType is not None and tagSet not in self.__skipTypes)
+
+ def __getitem__(self, tagSet):
+ try:
+ return self.__presentTypes[tagSet]
+ except KeyError:
+ if self.__defaultType is None:
+ raise KeyError()
+ elif tagSet in self.__skipTypes:
+ raise error.PyAsn1Error('Key in negative map')
+ else:
+ return self.__defaultType
+
+ def __iter__(self):
+ return iter(self.__presentTypes)
+
+ def __repr__(self):
+ representation = '%s object' % self.__class__.__name__
+
+ if self.__presentTypes:
+ representation += ', present %s' % repr(self.__presentTypes)
+
+ if self.__skipTypes:
+ representation += ', skip %s' % repr(self.__skipTypes)
+
+ if self.__defaultType is not None:
+ representation += ', default %s' % repr(self.__defaultType)
+
+ return '<%s>' % representation
+
+ @property
+ def presentTypes(self):
+ """Return *TagSet* to ASN.1 type map present in callee *TagMap*"""
+ return self.__presentTypes
+
+ @property
+ def skipTypes(self):
+ """Return *TagSet* collection unconditionally absent in callee *TagMap*"""
+ return self.__skipTypes
+
+ @property
+ def defaultType(self):
+ """Return default ASN.1 type being returned for any missing *TagSet*"""
+ return self.__defaultType
+
+ # Backward compatibility
+
+ def getPosMap(self):
+ return self.presentTypes
+
+ def getNegMap(self):
+ return self.skipTypes
+
+ def getDef(self):
+ return self.defaultType
diff --git a/third_party/python/pyasn1/pyasn1/type/univ.py b/third_party/python/pyasn1/pyasn1/type/univ.py
new file mode 100644
index 0000000000..aa688b22af
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/type/univ.py
@@ -0,0 +1,3321 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import math
+import sys
+
+from pyasn1 import error
+from pyasn1.codec.ber import eoo
+from pyasn1.compat import binary
+from pyasn1.compat import integer
+from pyasn1.compat import octets
+from pyasn1.type import base
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import tagmap
+
+NoValue = base.NoValue
+noValue = NoValue()
+
+__all__ = ['Integer', 'Boolean', 'BitString', 'OctetString', 'Null',
+ 'ObjectIdentifier', 'Real', 'Enumerated',
+ 'SequenceOfAndSetOfBase', 'SequenceOf', 'SetOf',
+ 'SequenceAndSetBase', 'Sequence', 'Set', 'Choice', 'Any',
+ 'NoValue', 'noValue']
+
+# "Simple" ASN.1 types (yet incomplete)
+
+
+class Integer(base.SimpleAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python :class:`int` objects.
+
+ Keyword Args
+ ------------
+ value: :class:`int`, :class:`str` or |ASN.1| object
+ Python :class:`int` or :class:`str` literal or |ASN.1| class
+ instance. If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ namedValues: :py:class:`~pyasn1.type.namedval.NamedValues`
+ Object representing non-default symbolic aliases for numbers
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class ErrorCode(Integer):
+ '''
+ ASN.1 specification:
+
+ ErrorCode ::=
+ INTEGER { disk-full(1), no-disk(-1),
+ disk-not-formatted(2) }
+
+ error ErrorCode ::= disk-full
+ '''
+ namedValues = NamedValues(
+ ('disk-full', 1), ('no-disk', -1),
+ ('disk-not-formatted', 2)
+ )
+
+ error = ErrorCode('disk-full')
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x02)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ #: Default :py:class:`~pyasn1.type.namedval.NamedValues` object
+ #: representing symbolic aliases for numbers
+ namedValues = namedval.NamedValues()
+
+ # Optimization for faster codec lookup
+ typeId = base.SimpleAsn1Type.getTypeId()
+
+ def __init__(self, value=noValue, **kwargs):
+ if 'namedValues' not in kwargs:
+ kwargs['namedValues'] = self.namedValues
+
+ base.SimpleAsn1Type.__init__(self, value, **kwargs)
+
+ def __and__(self, value):
+ return self.clone(self._value & value)
+
+ def __rand__(self, value):
+ return self.clone(value & self._value)
+
+ def __or__(self, value):
+ return self.clone(self._value | value)
+
+ def __ror__(self, value):
+ return self.clone(value | self._value)
+
+ def __xor__(self, value):
+ return self.clone(self._value ^ value)
+
+ def __rxor__(self, value):
+ return self.clone(value ^ self._value)
+
+ def __lshift__(self, value):
+ return self.clone(self._value << value)
+
+ def __rshift__(self, value):
+ return self.clone(self._value >> value)
+
+ def __add__(self, value):
+ return self.clone(self._value + value)
+
+ def __radd__(self, value):
+ return self.clone(value + self._value)
+
+ def __sub__(self, value):
+ return self.clone(self._value - value)
+
+ def __rsub__(self, value):
+ return self.clone(value - self._value)
+
+ def __mul__(self, value):
+ return self.clone(self._value * value)
+
+ def __rmul__(self, value):
+ return self.clone(value * self._value)
+
+ def __mod__(self, value):
+ return self.clone(self._value % value)
+
+ def __rmod__(self, value):
+ return self.clone(value % self._value)
+
+ def __pow__(self, value, modulo=None):
+ return self.clone(pow(self._value, value, modulo))
+
+ def __rpow__(self, value):
+ return self.clone(pow(value, self._value))
+
+ def __floordiv__(self, value):
+ return self.clone(self._value // value)
+
+ def __rfloordiv__(self, value):
+ return self.clone(value // self._value)
+
+ if sys.version_info[0] <= 2:
+ def __div__(self, value):
+ if isinstance(value, float):
+ return Real(self._value / value)
+ else:
+ return self.clone(self._value / value)
+
+ def __rdiv__(self, value):
+ if isinstance(value, float):
+ return Real(value / self._value)
+ else:
+ return self.clone(value / self._value)
+ else:
+ def __truediv__(self, value):
+ return Real(self._value / value)
+
+ def __rtruediv__(self, value):
+ return Real(value / self._value)
+
+ def __divmod__(self, value):
+ return self.clone(divmod(self._value, value))
+
+ def __rdivmod__(self, value):
+ return self.clone(divmod(value, self._value))
+
+ __hash__ = base.SimpleAsn1Type.__hash__
+
+ def __int__(self):
+ return int(self._value)
+
+ if sys.version_info[0] <= 2:
+ def __long__(self):
+ return long(self._value)
+
+ def __float__(self):
+ return float(self._value)
+
+ def __abs__(self):
+ return self.clone(abs(self._value))
+
+ def __index__(self):
+ return int(self._value)
+
+ def __pos__(self):
+ return self.clone(+self._value)
+
+ def __neg__(self):
+ return self.clone(-self._value)
+
+ def __invert__(self):
+ return self.clone(~self._value)
+
+ def __round__(self, n=0):
+ r = round(self._value, n)
+ if n:
+ return self.clone(r)
+ else:
+ return r
+
+ def __floor__(self):
+ return math.floor(self._value)
+
+ def __ceil__(self):
+ return math.ceil(self._value)
+
+ if sys.version_info[0:2] > (2, 5):
+ def __trunc__(self):
+ return self.clone(math.trunc(self._value))
+
+ def __lt__(self, value):
+ return self._value < value
+
+ def __le__(self, value):
+ return self._value <= value
+
+ def __eq__(self, value):
+ return self._value == value
+
+ def __ne__(self, value):
+ return self._value != value
+
+ def __gt__(self, value):
+ return self._value > value
+
+ def __ge__(self, value):
+ return self._value >= value
+
+ def prettyIn(self, value):
+ try:
+ return int(value)
+
+ except ValueError:
+ try:
+ return self.namedValues[value]
+
+ except KeyError:
+ raise error.PyAsn1Error(
+ 'Can\'t coerce %r into integer: %s' % (value, sys.exc_info()[1])
+ )
+
+ def prettyOut(self, value):
+ try:
+ return str(self.namedValues[value])
+
+ except KeyError:
+ return str(value)
+
+ # backward compatibility
+
+ def getNamedValues(self):
+ return self.namedValues
+
+
+class Boolean(Integer):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python :class:`int` objects.
+
+ Keyword Args
+ ------------
+ value: :class:`int`, :class:`str` or |ASN.1| object
+ Python :class:`int` or :class:`str` literal or |ASN.1| class
+ instance. If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s).Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ namedValues: :py:class:`~pyasn1.type.namedval.NamedValues`
+ Object representing non-default symbolic aliases for numbers
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class RoundResult(Boolean):
+ '''
+ ASN.1 specification:
+
+ RoundResult ::= BOOLEAN
+
+ ok RoundResult ::= TRUE
+ ko RoundResult ::= FALSE
+ '''
+ ok = RoundResult(True)
+ ko = RoundResult(False)
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x01),
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = Integer.subtypeSpec + constraint.SingleValueConstraint(0, 1)
+
+ #: Default :py:class:`~pyasn1.type.namedval.NamedValues` object
+ #: representing symbolic aliases for numbers
+ namedValues = namedval.NamedValues(('False', 0), ('True', 1))
+
+ # Optimization for faster codec lookup
+ typeId = Integer.getTypeId()
+
+if sys.version_info[0] < 3:
+ SizedIntegerBase = long
+else:
+ SizedIntegerBase = int
+
+
+class SizedInteger(SizedIntegerBase):
+ bitLength = leadingZeroBits = None
+
+ def setBitLength(self, bitLength):
+ self.bitLength = bitLength
+ self.leadingZeroBits = max(bitLength - integer.bitLength(self), 0)
+ return self
+
+ def __len__(self):
+ if self.bitLength is None:
+ self.setBitLength(integer.bitLength(self))
+
+ return self.bitLength
+
+
+class BitString(base.SimpleAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type both Python :class:`tuple` (as a tuple
+ of bits) and :class:`int` objects.
+
+ Keyword Args
+ ------------
+ value: :class:`int`, :class:`str` or |ASN.1| object
+ Python :class:`int` or :class:`str` literal representing binary
+ or hexadecimal number or sequence of integer bits or |ASN.1| object.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ namedValues: :py:class:`~pyasn1.type.namedval.NamedValues`
+ Object representing non-default symbolic aliases for numbers
+
+ binValue: :py:class:`str`
+ Binary string initializer to use instead of the *value*.
+ Example: '10110011'.
+
+ hexValue: :py:class:`str`
+ Hexadecimal string initializer to use instead of the *value*.
+ Example: 'DEADBEEF'.
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class Rights(BitString):
+ '''
+ ASN.1 specification:
+
+ Rights ::= BIT STRING { user-read(0), user-write(1),
+ group-read(2), group-write(3),
+ other-read(4), other-write(5) }
+
+ group1 Rights ::= { group-read, group-write }
+ group2 Rights ::= '0011'B
+ group3 Rights ::= '3'H
+ '''
+ namedValues = NamedValues(
+ ('user-read', 0), ('user-write', 1),
+ ('group-read', 2), ('group-write', 3),
+ ('other-read', 4), ('other-write', 5)
+ )
+
+ group1 = Rights(('group-read', 'group-write'))
+ group2 = Rights('0011')
+ group3 = Rights(0x3)
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x03)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ #: Default :py:class:`~pyasn1.type.namedval.NamedValues` object
+ #: representing symbolic aliases for numbers
+ namedValues = namedval.NamedValues()
+
+ # Optimization for faster codec lookup
+ typeId = base.SimpleAsn1Type.getTypeId()
+
+ defaultBinValue = defaultHexValue = noValue
+
+ def __init__(self, value=noValue, **kwargs):
+ if value is noValue:
+ if kwargs:
+ try:
+ value = self.fromBinaryString(kwargs.pop('binValue'), internalFormat=True)
+
+ except KeyError:
+ pass
+
+ try:
+ value = self.fromHexString(kwargs.pop('hexValue'), internalFormat=True)
+
+ except KeyError:
+ pass
+
+ if value is noValue:
+ if self.defaultBinValue is not noValue:
+ value = self.fromBinaryString(self.defaultBinValue, internalFormat=True)
+
+ elif self.defaultHexValue is not noValue:
+ value = self.fromHexString(self.defaultHexValue, internalFormat=True)
+
+ if 'namedValues' not in kwargs:
+ kwargs['namedValues'] = self.namedValues
+
+ base.SimpleAsn1Type.__init__(self, value, **kwargs)
+
+ def __str__(self):
+ return self.asBinary()
+
+ def __eq__(self, other):
+ other = self.prettyIn(other)
+ return self is other or self._value == other and len(self._value) == len(other)
+
+ def __ne__(self, other):
+ other = self.prettyIn(other)
+ return self._value != other or len(self._value) != len(other)
+
+ def __lt__(self, other):
+ other = self.prettyIn(other)
+ return len(self._value) < len(other) or len(self._value) == len(other) and self._value < other
+
+ def __le__(self, other):
+ other = self.prettyIn(other)
+ return len(self._value) <= len(other) or len(self._value) == len(other) and self._value <= other
+
+ def __gt__(self, other):
+ other = self.prettyIn(other)
+ return len(self._value) > len(other) or len(self._value) == len(other) and self._value > other
+
+ def __ge__(self, other):
+ other = self.prettyIn(other)
+ return len(self._value) >= len(other) or len(self._value) == len(other) and self._value >= other
+
+ # Immutable sequence object protocol
+
+ def __len__(self):
+ return len(self._value)
+
+ def __getitem__(self, i):
+ if i.__class__ is slice:
+ return self.clone([self[x] for x in range(*i.indices(len(self)))])
+ else:
+ length = len(self._value) - 1
+ if i > length or i < 0:
+ raise IndexError('bit index out of range')
+ return (self._value >> (length - i)) & 1
+
+ def __iter__(self):
+ length = len(self._value)
+ while length:
+ length -= 1
+ yield (self._value >> length) & 1
+
+ def __reversed__(self):
+ return reversed(tuple(self))
+
+ # arithmetic operators
+
+ def __add__(self, value):
+ value = self.prettyIn(value)
+ return self.clone(SizedInteger(self._value << len(value) | value).setBitLength(len(self._value) + len(value)))
+
+ def __radd__(self, value):
+ value = self.prettyIn(value)
+ return self.clone(SizedInteger(value << len(self._value) | self._value).setBitLength(len(self._value) + len(value)))
+
+ def __mul__(self, value):
+ bitString = self._value
+ while value > 1:
+ bitString <<= len(self._value)
+ bitString |= self._value
+ value -= 1
+ return self.clone(bitString)
+
+ def __rmul__(self, value):
+ return self * value
+
+ def __lshift__(self, count):
+ return self.clone(SizedInteger(self._value << count).setBitLength(len(self._value) + count))
+
+ def __rshift__(self, count):
+ return self.clone(SizedInteger(self._value >> count).setBitLength(max(0, len(self._value) - count)))
+
+ def __int__(self):
+ return self._value
+
+ def __float__(self):
+ return float(self._value)
+
+ if sys.version_info[0] < 3:
+ def __long__(self):
+ return self._value
+
+ def asNumbers(self):
+ """Get |ASN.1| value as a sequence of 8-bit integers.
+
+ If |ASN.1| object length is not a multiple of 8, result
+ will be left-padded with zeros.
+ """
+ return tuple(octets.octs2ints(self.asOctets()))
+
+ def asOctets(self):
+ """Get |ASN.1| value as a sequence of octets.
+
+ If |ASN.1| object length is not a multiple of 8, result
+ will be left-padded with zeros.
+ """
+ return integer.to_bytes(self._value, length=len(self))
+
+ def asInteger(self):
+ """Get |ASN.1| value as a single integer value.
+ """
+ return self._value
+
+ def asBinary(self):
+ """Get |ASN.1| value as a text string of bits.
+ """
+ binString = binary.bin(self._value)[2:]
+ return '0' * (len(self._value) - len(binString)) + binString
+
+ @classmethod
+ def fromHexString(cls, value, internalFormat=False, prepend=None):
+ """Create a |ASN.1| object initialized from the hex string.
+
+ Parameters
+ ----------
+ value: :class:`str`
+ Text string like 'DEADBEEF'
+ """
+ try:
+ value = SizedInteger(value, 16).setBitLength(len(value) * 4)
+
+ except ValueError:
+ raise error.PyAsn1Error('%s.fromHexString() error: %s' % (cls.__name__, sys.exc_info()[1]))
+
+ if prepend is not None:
+ value = SizedInteger(
+ (SizedInteger(prepend) << len(value)) | value
+ ).setBitLength(len(prepend) + len(value))
+
+ if not internalFormat:
+ value = cls(value)
+
+ return value
+
+ @classmethod
+ def fromBinaryString(cls, value, internalFormat=False, prepend=None):
+ """Create a |ASN.1| object initialized from a string of '0' and '1'.
+
+ Parameters
+ ----------
+ value: :class:`str`
+ Text string like '1010111'
+ """
+ try:
+ value = SizedInteger(value or '0', 2).setBitLength(len(value))
+
+ except ValueError:
+ raise error.PyAsn1Error('%s.fromBinaryString() error: %s' % (cls.__name__, sys.exc_info()[1]))
+
+ if prepend is not None:
+ value = SizedInteger(
+ (SizedInteger(prepend) << len(value)) | value
+ ).setBitLength(len(prepend) + len(value))
+
+ if not internalFormat:
+ value = cls(value)
+
+ return value
+
+ @classmethod
+ def fromOctetString(cls, value, internalFormat=False, prepend=None, padding=0):
+ """Create a |ASN.1| object initialized from a string.
+
+ Parameters
+ ----------
+ value: :class:`str` (Py2) or :class:`bytes` (Py3)
+ Text string like '\\\\x01\\\\xff' (Py2) or b'\\\\x01\\\\xff' (Py3)
+ """
+ value = SizedInteger(integer.from_bytes(value) >> padding).setBitLength(len(value) * 8 - padding)
+
+ if prepend is not None:
+ value = SizedInteger(
+ (SizedInteger(prepend) << len(value)) | value
+ ).setBitLength(len(prepend) + len(value))
+
+ if not internalFormat:
+ value = cls(value)
+
+ return value
+
+ def prettyIn(self, value):
+ if isinstance(value, SizedInteger):
+ return value
+ elif octets.isStringType(value):
+ if not value:
+ return SizedInteger(0).setBitLength(0)
+
+ elif value[0] == '\'': # "'1011'B" -- ASN.1 schema representation (deprecated)
+ if value[-2:] == '\'B':
+ return self.fromBinaryString(value[1:-2], internalFormat=True)
+ elif value[-2:] == '\'H':
+ return self.fromHexString(value[1:-2], internalFormat=True)
+ else:
+ raise error.PyAsn1Error(
+ 'Bad BIT STRING value notation %s' % (value,)
+ )
+
+ elif self.namedValues and not value.isdigit(): # named bits like 'Urgent, Active'
+ names = [x.strip() for x in value.split(',')]
+
+ try:
+
+ bitPositions = [self.namedValues[name] for name in names]
+
+ except KeyError:
+ raise error.PyAsn1Error('unknown bit name(s) in %r' % (names,))
+
+ rightmostPosition = max(bitPositions)
+
+ number = 0
+ for bitPosition in bitPositions:
+ number |= 1 << (rightmostPosition - bitPosition)
+
+ return SizedInteger(number).setBitLength(rightmostPosition + 1)
+
+ elif value.startswith('0x'):
+ return self.fromHexString(value[2:], internalFormat=True)
+
+ elif value.startswith('0b'):
+ return self.fromBinaryString(value[2:], internalFormat=True)
+
+ else: # assume plain binary string like '1011'
+ return self.fromBinaryString(value, internalFormat=True)
+
+ elif isinstance(value, (tuple, list)):
+ return self.fromBinaryString(''.join([b and '1' or '0' for b in value]), internalFormat=True)
+
+ elif isinstance(value, BitString):
+ return SizedInteger(value).setBitLength(len(value))
+
+ elif isinstance(value, intTypes):
+ return SizedInteger(value)
+
+ else:
+ raise error.PyAsn1Error(
+ 'Bad BitString initializer type \'%s\'' % (value,)
+ )
+
+
+try:
+ # noinspection PyStatementEffect
+ all
+
+except NameError: # Python 2.4
+ # noinspection PyShadowingBuiltins
+ def all(iterable):
+ for element in iterable:
+ if not element:
+ return False
+ return True
+
+
+class OctetString(base.SimpleAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python 2 :class:`str` or
+ Python 3 :class:`bytes`. When used in Unicode context, |ASN.1| type
+ assumes "|encoding|" serialisation.
+
+ Keyword Args
+ ------------
+ value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
+ class:`str` (Python 2) or :class:`bytes` (Python 3), alternatively
+ class:`unicode` object (Python 2) or :class:`str` (Python 3)
+ representing character string to be serialised into octets
+ (note `encoding` parameter) or |ASN.1| object.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ encoding: :py:class:`str`
+ Unicode codec ID to encode/decode :class:`unicode` (Python 2) or
+ :class:`str` (Python 3) the payload when |ASN.1| object is used
+ in text string context.
+
+ binValue: :py:class:`str`
+ Binary string initializer to use instead of the *value*.
+ Example: '10110011'.
+
+ hexValue: :py:class:`str`
+ Hexadecimal string initializer to use instead of the *value*.
+ Example: 'DEADBEEF'.
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class Icon(OctetString):
+ '''
+ ASN.1 specification:
+
+ Icon ::= OCTET STRING
+
+ icon1 Icon ::= '001100010011001000110011'B
+ icon2 Icon ::= '313233'H
+ '''
+ icon1 = Icon.fromBinaryString('001100010011001000110011')
+ icon2 = Icon.fromHexString('313233')
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x04)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Optimization for faster codec lookup
+ typeId = base.SimpleAsn1Type.getTypeId()
+
+ defaultBinValue = defaultHexValue = noValue
+ encoding = 'iso-8859-1'
+
+ def __init__(self, value=noValue, **kwargs):
+ if kwargs:
+ if value is noValue:
+ try:
+ value = self.fromBinaryString(kwargs.pop('binValue'))
+
+ except KeyError:
+ pass
+
+ try:
+ value = self.fromHexString(kwargs.pop('hexValue'))
+
+ except KeyError:
+ pass
+
+ if value is noValue:
+ if self.defaultBinValue is not noValue:
+ value = self.fromBinaryString(self.defaultBinValue)
+
+ elif self.defaultHexValue is not noValue:
+ value = self.fromHexString(self.defaultHexValue)
+
+ if 'encoding' not in kwargs:
+ kwargs['encoding'] = self.encoding
+
+ base.SimpleAsn1Type.__init__(self, value, **kwargs)
+
+ if sys.version_info[0] <= 2:
+ def prettyIn(self, value):
+ if isinstance(value, str):
+ return value
+
+ elif isinstance(value, unicode):
+ try:
+ return value.encode(self.encoding)
+
+ except (LookupError, UnicodeEncodeError):
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeEncodeError(
+ "Can't encode string '%s' with codec "
+ "%s" % (value, self.encoding), exc
+ )
+
+ elif isinstance(value, (tuple, list)):
+ try:
+ return ''.join([chr(x) for x in value])
+
+ except ValueError:
+ raise error.PyAsn1Error(
+ "Bad %s initializer '%s'" % (self.__class__.__name__, value)
+ )
+
+ else:
+ return str(value)
+
+ def __str__(self):
+ return str(self._value)
+
+ def __unicode__(self):
+ try:
+ return self._value.decode(self.encoding)
+
+ except UnicodeDecodeError:
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeDecodeError(
+ "Can't decode string '%s' with codec "
+ "%s" % (self._value, self.encoding), exc
+ )
+
+ def asOctets(self):
+ return str(self._value)
+
+ def asNumbers(self):
+ return tuple([ord(x) for x in self._value])
+
+ else:
+ def prettyIn(self, value):
+ if isinstance(value, bytes):
+ return value
+
+ elif isinstance(value, str):
+ try:
+ return value.encode(self.encoding)
+
+ except UnicodeEncodeError:
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeEncodeError(
+ "Can't encode string '%s' with '%s' "
+ "codec" % (value, self.encoding), exc
+ )
+ elif isinstance(value, OctetString): # a shortcut, bytes() would work the same way
+ return value.asOctets()
+
+ elif isinstance(value, base.SimpleAsn1Type): # this mostly targets Integer objects
+ return self.prettyIn(str(value))
+
+ elif isinstance(value, (tuple, list)):
+ return self.prettyIn(bytes(value))
+
+ else:
+ return bytes(value)
+
+ def __str__(self):
+ try:
+ return self._value.decode(self.encoding)
+
+ except UnicodeDecodeError:
+ exc = sys.exc_info()[1]
+ raise error.PyAsn1UnicodeDecodeError(
+ "Can't decode string '%s' with '%s' codec at "
+ "'%s'" % (self._value, self.encoding,
+ self.__class__.__name__), exc
+ )
+
+ def __bytes__(self):
+ return bytes(self._value)
+
+ def asOctets(self):
+ return bytes(self._value)
+
+ def asNumbers(self):
+ return tuple(self._value)
+
+ #
+ # Normally, `.prettyPrint()` is called from `__str__()`. Historically,
+ # OctetString.prettyPrint() used to return hexified payload
+ # representation in cases when non-printable content is present. At the
+ # same time `str()` used to produce either octet-stream (Py2) or
+ # text (Py3) representations.
+ #
+ # Therefore `OctetString.__str__()` -> `.prettyPrint()` call chain is
+ # reversed to preserve the original behaviour.
+ #
+ # Eventually we should deprecate `.prettyPrint()` / `.prettyOut()` harness
+ # and end up with just `__str__()` producing hexified representation while
+ # both text and octet-stream representation should only be requested via
+ # the `.asOctets()` method.
+ #
+ # Note: ASN.1 OCTET STRING is never mean to contain text!
+ #
+
+ def prettyOut(self, value):
+ return value
+
+ def prettyPrint(self, scope=0):
+ # first see if subclass has its own .prettyOut()
+ value = self.prettyOut(self._value)
+
+ if value is not self._value:
+ return value
+
+ numbers = self.asNumbers()
+
+ for x in numbers:
+ # hexify if needed
+ if x < 32 or x > 126:
+ return '0x' + ''.join(('%.2x' % x for x in numbers))
+ else:
+ # this prevents infinite recursion
+ return OctetString.__str__(self)
+
+ @staticmethod
+ def fromBinaryString(value):
+ """Create a |ASN.1| object initialized from a string of '0' and '1'.
+
+ Parameters
+ ----------
+ value: :class:`str`
+ Text string like '1010111'
+ """
+ bitNo = 8
+ byte = 0
+ r = []
+ for v in value:
+ if bitNo:
+ bitNo -= 1
+ else:
+ bitNo = 7
+ r.append(byte)
+ byte = 0
+ if v in ('0', '1'):
+ v = int(v)
+ else:
+ raise error.PyAsn1Error(
+ 'Non-binary OCTET STRING initializer %s' % (v,)
+ )
+ byte |= v << bitNo
+
+ r.append(byte)
+
+ return octets.ints2octs(r)
+
+ @staticmethod
+ def fromHexString(value):
+ """Create a |ASN.1| object initialized from the hex string.
+
+ Parameters
+ ----------
+ value: :class:`str`
+ Text string like 'DEADBEEF'
+ """
+ r = []
+ p = []
+ for v in value:
+ if p:
+ r.append(int(p + v, 16))
+ p = None
+ else:
+ p = v
+ if p:
+ r.append(int(p + '0', 16))
+
+ return octets.ints2octs(r)
+
+ # Immutable sequence object protocol
+
+ def __len__(self):
+ return len(self._value)
+
+ def __getitem__(self, i):
+ if i.__class__ is slice:
+ return self.clone(self._value[i])
+ else:
+ return self._value[i]
+
+ def __iter__(self):
+ return iter(self._value)
+
+ def __contains__(self, value):
+ return value in self._value
+
+ def __add__(self, value):
+ return self.clone(self._value + self.prettyIn(value))
+
+ def __radd__(self, value):
+ return self.clone(self.prettyIn(value) + self._value)
+
+ def __mul__(self, value):
+ return self.clone(self._value * value)
+
+ def __rmul__(self, value):
+ return self * value
+
+ def __int__(self):
+ return int(self._value)
+
+ def __float__(self):
+ return float(self._value)
+
+ def __reversed__(self):
+ return reversed(self._value)
+
+
+class Null(OctetString):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python :class:`str` objects
+ (always empty).
+
+ Keyword Args
+ ------------
+ value: :class:`str` or |ASN.1| object
+ Python empty :class:`str` literal or any object that evaluates to :obj:`False`
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class Ack(Null):
+ '''
+ ASN.1 specification:
+
+ Ack ::= NULL
+ '''
+ ack = Ack('')
+ """
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x05)
+ )
+ subtypeSpec = OctetString.subtypeSpec + constraint.SingleValueConstraint(octets.str2octs(''))
+
+ # Optimization for faster codec lookup
+ typeId = OctetString.getTypeId()
+
+ def prettyIn(self, value):
+ if value:
+ return value
+
+ return octets.str2octs('')
+
+if sys.version_info[0] <= 2:
+ intTypes = (int, long)
+else:
+ intTypes = (int,)
+
+numericTypes = intTypes + (float,)
+
+
+class ObjectIdentifier(base.SimpleAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python :class:`tuple` objects
+ (tuple of non-negative integers).
+
+ Keyword Args
+ ------------
+ value: :class:`tuple`, :class:`str` or |ASN.1| object
+ Python sequence of :class:`int` or :class:`str` literal or |ASN.1| object.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class ID(ObjectIdentifier):
+ '''
+ ASN.1 specification:
+
+ ID ::= OBJECT IDENTIFIER
+
+ id-edims ID ::= { joint-iso-itu-t mhs-motif(6) edims(7) }
+ id-bp ID ::= { id-edims 11 }
+ '''
+ id_edims = ID('2.6.7')
+ id_bp = id_edims + (11,)
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x06)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Optimization for faster codec lookup
+ typeId = base.SimpleAsn1Type.getTypeId()
+
+ def __add__(self, other):
+ return self.clone(self._value + other)
+
+ def __radd__(self, other):
+ return self.clone(other + self._value)
+
+ def asTuple(self):
+ return self._value
+
+ # Sequence object protocol
+
+ def __len__(self):
+ return len(self._value)
+
+ def __getitem__(self, i):
+ if i.__class__ is slice:
+ return self.clone(self._value[i])
+ else:
+ return self._value[i]
+
+ def __iter__(self):
+ return iter(self._value)
+
+ def __contains__(self, value):
+ return value in self._value
+
+ def index(self, suboid):
+ return self._value.index(suboid)
+
+ def isPrefixOf(self, other):
+ """Indicate if this |ASN.1| object is a prefix of other |ASN.1| object.
+
+ Parameters
+ ----------
+ other: |ASN.1| object
+ |ASN.1| object
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`True` if this |ASN.1| object is a parent (e.g. prefix) of the other |ASN.1| object
+ or :obj:`False` otherwise.
+ """
+ l = len(self)
+ if l <= len(other):
+ if self._value[:l] == other[:l]:
+ return True
+ return False
+
+ def prettyIn(self, value):
+ if isinstance(value, ObjectIdentifier):
+ return tuple(value)
+ elif octets.isStringType(value):
+ if '-' in value:
+ raise error.PyAsn1Error(
+ 'Malformed Object ID %s at %s: %s' % (value, self.__class__.__name__, sys.exc_info()[1])
+ )
+ try:
+ return tuple([int(subOid) for subOid in value.split('.') if subOid])
+ except ValueError:
+ raise error.PyAsn1Error(
+ 'Malformed Object ID %s at %s: %s' % (value, self.__class__.__name__, sys.exc_info()[1])
+ )
+
+ try:
+ tupleOfInts = tuple([int(subOid) for subOid in value if subOid >= 0])
+
+ except (ValueError, TypeError):
+ raise error.PyAsn1Error(
+ 'Malformed Object ID %s at %s: %s' % (value, self.__class__.__name__, sys.exc_info()[1])
+ )
+
+ if len(tupleOfInts) == len(value):
+ return tupleOfInts
+
+ raise error.PyAsn1Error('Malformed Object ID %s at %s' % (value, self.__class__.__name__))
+
+ def prettyOut(self, value):
+ return '.'.join([str(x) for x in value])
+
+
+class Real(base.SimpleAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python :class:`float` objects.
+ Additionally, |ASN.1| objects behave like a :class:`tuple` in which case its
+ elements are mantissa, base and exponent.
+
+ Keyword Args
+ ------------
+ value: :class:`tuple`, :class:`float` or |ASN.1| object
+ Python sequence of :class:`int` (representing mantissa, base and
+ exponent) or :class:`float` instance or |ASN.1| object.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class Pi(Real):
+ '''
+ ASN.1 specification:
+
+ Pi ::= REAL
+
+ pi Pi ::= { mantissa 314159, base 10, exponent -5 }
+
+ '''
+ pi = Pi((314159, 10, -5))
+ """
+ binEncBase = None # binEncBase = 16 is recommended for large numbers
+
+ try:
+ _plusInf = float('inf')
+ _minusInf = float('-inf')
+ _inf = _plusInf, _minusInf
+
+ except ValueError:
+ # Infinity support is platform and Python dependent
+ _plusInf = _minusInf = None
+ _inf = ()
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x09)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Optimization for faster codec lookup
+ typeId = base.SimpleAsn1Type.getTypeId()
+
+ @staticmethod
+ def __normalizeBase10(value):
+ m, b, e = value
+ while m and m % 10 == 0:
+ m /= 10
+ e += 1
+ return m, b, e
+
+ def prettyIn(self, value):
+ if isinstance(value, tuple) and len(value) == 3:
+ if (not isinstance(value[0], numericTypes) or
+ not isinstance(value[1], intTypes) or
+ not isinstance(value[2], intTypes)):
+ raise error.PyAsn1Error('Lame Real value syntax: %s' % (value,))
+ if (isinstance(value[0], float) and
+ self._inf and value[0] in self._inf):
+ return value[0]
+ if value[1] not in (2, 10):
+ raise error.PyAsn1Error(
+ 'Prohibited base for Real value: %s' % (value[1],)
+ )
+ if value[1] == 10:
+ value = self.__normalizeBase10(value)
+ return value
+ elif isinstance(value, intTypes):
+ return self.__normalizeBase10((value, 10, 0))
+ elif isinstance(value, float) or octets.isStringType(value):
+ if octets.isStringType(value):
+ try:
+ value = float(value)
+ except ValueError:
+ raise error.PyAsn1Error(
+ 'Bad real value syntax: %s' % (value,)
+ )
+ if self._inf and value in self._inf:
+ return value
+ else:
+ e = 0
+ while int(value) != value:
+ value *= 10
+ e -= 1
+ return self.__normalizeBase10((int(value), 10, e))
+ elif isinstance(value, Real):
+ return tuple(value)
+ raise error.PyAsn1Error(
+ 'Bad real value syntax: %s' % (value,)
+ )
+
+ def prettyPrint(self, scope=0):
+ try:
+ return self.prettyOut(float(self))
+
+ except OverflowError:
+ return '<overflow>'
+
+ @property
+ def isPlusInf(self):
+ """Indicate PLUS-INFINITY object value
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`True` if calling object represents plus infinity
+ or :obj:`False` otherwise.
+
+ """
+ return self._value == self._plusInf
+
+ @property
+ def isMinusInf(self):
+ """Indicate MINUS-INFINITY object value
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`True` if calling object represents minus infinity
+ or :obj:`False` otherwise.
+ """
+ return self._value == self._minusInf
+
+ @property
+ def isInf(self):
+ return self._value in self._inf
+
+ def __add__(self, value):
+ return self.clone(float(self) + value)
+
+ def __radd__(self, value):
+ return self + value
+
+ def __mul__(self, value):
+ return self.clone(float(self) * value)
+
+ def __rmul__(self, value):
+ return self * value
+
+ def __sub__(self, value):
+ return self.clone(float(self) - value)
+
+ def __rsub__(self, value):
+ return self.clone(value - float(self))
+
+ def __mod__(self, value):
+ return self.clone(float(self) % value)
+
+ def __rmod__(self, value):
+ return self.clone(value % float(self))
+
+ def __pow__(self, value, modulo=None):
+ return self.clone(pow(float(self), value, modulo))
+
+ def __rpow__(self, value):
+ return self.clone(pow(value, float(self)))
+
+ if sys.version_info[0] <= 2:
+ def __div__(self, value):
+ return self.clone(float(self) / value)
+
+ def __rdiv__(self, value):
+ return self.clone(value / float(self))
+ else:
+ def __truediv__(self, value):
+ return self.clone(float(self) / value)
+
+ def __rtruediv__(self, value):
+ return self.clone(value / float(self))
+
+ def __divmod__(self, value):
+ return self.clone(float(self) // value)
+
+ def __rdivmod__(self, value):
+ return self.clone(value // float(self))
+
+ def __int__(self):
+ return int(float(self))
+
+ if sys.version_info[0] <= 2:
+ def __long__(self):
+ return long(float(self))
+
+ def __float__(self):
+ if self._value in self._inf:
+ return self._value
+ else:
+ return float(
+ self._value[0] * pow(self._value[1], self._value[2])
+ )
+
+ def __abs__(self):
+ return self.clone(abs(float(self)))
+
+ def __pos__(self):
+ return self.clone(+float(self))
+
+ def __neg__(self):
+ return self.clone(-float(self))
+
+ def __round__(self, n=0):
+ r = round(float(self), n)
+ if n:
+ return self.clone(r)
+ else:
+ return r
+
+ def __floor__(self):
+ return self.clone(math.floor(float(self)))
+
+ def __ceil__(self):
+ return self.clone(math.ceil(float(self)))
+
+ if sys.version_info[0:2] > (2, 5):
+ def __trunc__(self):
+ return self.clone(math.trunc(float(self)))
+
+ def __lt__(self, value):
+ return float(self) < value
+
+ def __le__(self, value):
+ return float(self) <= value
+
+ def __eq__(self, value):
+ return float(self) == value
+
+ def __ne__(self, value):
+ return float(self) != value
+
+ def __gt__(self, value):
+ return float(self) > value
+
+ def __ge__(self, value):
+ return float(self) >= value
+
+ if sys.version_info[0] <= 2:
+ def __nonzero__(self):
+ return bool(float(self))
+ else:
+ def __bool__(self):
+ return bool(float(self))
+
+ __hash__ = base.SimpleAsn1Type.__hash__
+
+ def __getitem__(self, idx):
+ if self._value in self._inf:
+ raise error.PyAsn1Error('Invalid infinite value operation')
+ else:
+ return self._value[idx]
+
+ # compatibility stubs
+
+ def isPlusInfinity(self):
+ return self.isPlusInf
+
+ def isMinusInfinity(self):
+ return self.isMinusInf
+
+ def isInfinity(self):
+ return self.isInf
+
+
+class Enumerated(Integer):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`, its
+ objects are immutable and duck-type Python :class:`int` objects.
+
+ Keyword Args
+ ------------
+ value: :class:`int`, :class:`str` or |ASN.1| object
+ Python :class:`int` or :class:`str` literal or |ASN.1| object.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ namedValues: :py:class:`~pyasn1.type.namedval.NamedValues`
+ Object representing non-default symbolic aliases for numbers
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class RadioButton(Enumerated):
+ '''
+ ASN.1 specification:
+
+ RadioButton ::= ENUMERATED { button1(0), button2(1),
+ button3(2) }
+
+ selected-by-default RadioButton ::= button1
+ '''
+ namedValues = NamedValues(
+ ('button1', 0), ('button2', 1),
+ ('button3', 2)
+ )
+
+ selected_by_default = RadioButton('button1')
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x0A)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Optimization for faster codec lookup
+ typeId = Integer.getTypeId()
+
+ #: Default :py:class:`~pyasn1.type.namedval.NamedValues` object
+ #: representing symbolic aliases for numbers
+ namedValues = namedval.NamedValues()
+
+
+# "Structured" ASN.1 types
+
+class SequenceOfAndSetOfBase(base.ConstructedAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.ConstructedAsn1Type`,
+ its objects are mutable and duck-type Python :class:`list` objects.
+
+ Keyword Args
+ ------------
+ componentType : :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ A pyasn1 object representing ASN.1 type allowed within |ASN.1| type
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type can only occur on explicit
+ `.isInconsistent` call.
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class LotteryDraw(SequenceOf): # SetOf is similar
+ '''
+ ASN.1 specification:
+
+ LotteryDraw ::= SEQUENCE OF INTEGER
+ '''
+ componentType = Integer()
+
+ lotteryDraw = LotteryDraw()
+ lotteryDraw.extend([123, 456, 789])
+ """
+ def __init__(self, *args, **kwargs):
+ # support positional params for backward compatibility
+ if args:
+ for key, value in zip(('componentType', 'tagSet',
+ 'subtypeSpec'), args):
+ if key in kwargs:
+ raise error.PyAsn1Error('Conflicting positional and keyword params!')
+ kwargs['componentType'] = value
+
+ self._componentValues = noValue
+
+ base.ConstructedAsn1Type.__init__(self, **kwargs)
+
+ # Python list protocol
+
+ def __getitem__(self, idx):
+ try:
+ return self.getComponentByPosition(idx)
+
+ except error.PyAsn1Error:
+ raise IndexError(sys.exc_info()[1])
+
+ def __setitem__(self, idx, value):
+ try:
+ self.setComponentByPosition(idx, value)
+
+ except error.PyAsn1Error:
+ raise IndexError(sys.exc_info()[1])
+
+ def append(self, value):
+ if self._componentValues is noValue:
+ pos = 0
+
+ else:
+ pos = len(self._componentValues)
+
+ self[pos] = value
+
+ def count(self, value):
+ return list(self._componentValues.values()).count(value)
+
+ def extend(self, values):
+ for value in values:
+ self.append(value)
+
+ if self._componentValues is noValue:
+ self._componentValues = {}
+
+ def index(self, value, start=0, stop=None):
+ if stop is None:
+ stop = len(self)
+
+ indices, values = zip(*self._componentValues.items())
+
+ # TODO: remove when Py2.5 support is gone
+ values = list(values)
+
+ try:
+ return indices[values.index(value, start, stop)]
+
+ except error.PyAsn1Error:
+ raise ValueError(sys.exc_info()[1])
+
+ def reverse(self):
+ self._componentValues.reverse()
+
+ def sort(self, key=None, reverse=False):
+ self._componentValues = dict(
+ enumerate(sorted(self._componentValues.values(),
+ key=key, reverse=reverse)))
+
+ def __len__(self):
+ if self._componentValues is noValue or not self._componentValues:
+ return 0
+
+ return max(self._componentValues) + 1
+
+ def __iter__(self):
+ for idx in range(0, len(self)):
+ yield self.getComponentByPosition(idx)
+
+ def _cloneComponentValues(self, myClone, cloneValueFlag):
+ for idx, componentValue in self._componentValues.items():
+ if componentValue is not noValue:
+ if isinstance(componentValue, base.ConstructedAsn1Type):
+ myClone.setComponentByPosition(
+ idx, componentValue.clone(cloneValueFlag=cloneValueFlag)
+ )
+ else:
+ myClone.setComponentByPosition(idx, componentValue.clone())
+
+ def getComponentByPosition(self, idx, default=noValue, instantiate=True):
+ """Return |ASN.1| type component value by position.
+
+ Equivalent to Python sequence subscription operation (e.g. `[]`).
+
+ Parameters
+ ----------
+ idx : :class:`int`
+ Component index (zero-based). Must either refer to an existing
+ component or to N+1 component (if *componentType* is set). In the latter
+ case a new component type gets instantiated and appended to the |ASN.1|
+ sequence.
+
+ Keyword Args
+ ------------
+ default: :class:`object`
+ If set and requested component is a schema object, return the `default`
+ object instead of the requested component.
+
+ instantiate: :class:`bool`
+ If :obj:`True` (default), inner component will be automatically instantiated.
+ If :obj:`False` either existing component or the :class:`NoValue` object will be
+ returned.
+
+ Returns
+ -------
+ : :py:class:`~pyasn1.type.base.PyAsn1Item`
+ Instantiate |ASN.1| component type or return existing component value
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ # can also be SetOf
+ class MySequenceOf(SequenceOf):
+ componentType = OctetString()
+
+ s = MySequenceOf()
+
+ # returns component #0 with `.isValue` property False
+ s.getComponentByPosition(0)
+
+ # returns None
+ s.getComponentByPosition(0, default=None)
+
+ s.clear()
+
+ # returns noValue
+ s.getComponentByPosition(0, instantiate=False)
+
+ # sets component #0 to OctetString() ASN.1 schema
+ # object and returns it
+ s.getComponentByPosition(0, instantiate=True)
+
+ # sets component #0 to ASN.1 value object
+ s.setComponentByPosition(0, 'ABCD')
+
+ # returns OctetString('ABCD') value object
+ s.getComponentByPosition(0, instantiate=False)
+
+ s.clear()
+
+ # returns noValue
+ s.getComponentByPosition(0, instantiate=False)
+ """
+ if isinstance(idx, slice):
+ indices = tuple(range(len(self)))
+ return [self.getComponentByPosition(subidx, default, instantiate)
+ for subidx in indices[idx]]
+
+ if idx < 0:
+ idx = len(self) + idx
+ if idx < 0:
+ raise error.PyAsn1Error(
+ 'SequenceOf/SetOf index is out of range')
+
+ try:
+ componentValue = self._componentValues[idx]
+
+ except (KeyError, error.PyAsn1Error):
+ if not instantiate:
+ return default
+
+ self.setComponentByPosition(idx)
+
+ componentValue = self._componentValues[idx]
+
+ if default is noValue or componentValue.isValue:
+ return componentValue
+ else:
+ return default
+
+ def setComponentByPosition(self, idx, value=noValue,
+ verifyConstraints=True,
+ matchTags=True,
+ matchConstraints=True):
+ """Assign |ASN.1| type component by position.
+
+ Equivalent to Python sequence item assignment operation (e.g. `[]`)
+ or list.append() (when idx == len(self)).
+
+ Parameters
+ ----------
+ idx: :class:`int`
+ Component index (zero-based). Must either refer to existing
+ component or to N+1 component. In the latter case a new component
+ type gets instantiated (if *componentType* is set, or given ASN.1
+ object is taken otherwise) and appended to the |ASN.1| sequence.
+
+ Keyword Args
+ ------------
+ value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ A Python value to initialize |ASN.1| component with (if *componentType* is set)
+ or ASN.1 value object to assign to |ASN.1| component.
+ If `value` is not given, schema object will be set as a component.
+
+ verifyConstraints: :class:`bool`
+ If :obj:`False`, skip constraints validation
+
+ matchTags: :class:`bool`
+ If :obj:`False`, skip component tags matching
+
+ matchConstraints: :class:`bool`
+ If :obj:`False`, skip component constraints matching
+
+ Returns
+ -------
+ self
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer
+ IndexError
+ When idx > len(self)
+ """
+ if isinstance(idx, slice):
+ indices = tuple(range(len(self)))
+ startIdx = indices and indices[idx][0] or 0
+ for subIdx, subValue in enumerate(value):
+ self.setComponentByPosition(
+ startIdx + subIdx, subValue, verifyConstraints,
+ matchTags, matchConstraints)
+ return self
+
+ if idx < 0:
+ idx = len(self) + idx
+ if idx < 0:
+ raise error.PyAsn1Error(
+ 'SequenceOf/SetOf index is out of range')
+
+ componentType = self.componentType
+
+ if self._componentValues is noValue:
+ componentValues = {}
+
+ else:
+ componentValues = self._componentValues
+
+ currentValue = componentValues.get(idx, noValue)
+
+ if value is noValue:
+ if componentType is not None:
+ value = componentType.clone()
+
+ elif currentValue is noValue:
+ raise error.PyAsn1Error('Component type not defined')
+
+ elif not isinstance(value, base.Asn1Item):
+ if (componentType is not None and
+ isinstance(componentType, base.SimpleAsn1Type)):
+ value = componentType.clone(value=value)
+
+ elif (currentValue is not noValue and
+ isinstance(currentValue, base.SimpleAsn1Type)):
+ value = currentValue.clone(value=value)
+
+ else:
+ raise error.PyAsn1Error(
+ 'Non-ASN.1 value %r and undefined component'
+ ' type at %r' % (value, self))
+
+ elif componentType is not None and (matchTags or matchConstraints):
+ subtypeChecker = (
+ self.strictConstraints and
+ componentType.isSameTypeWith or
+ componentType.isSuperTypeOf)
+
+ if not subtypeChecker(value, verifyConstraints and matchTags,
+ verifyConstraints and matchConstraints):
+ # TODO: we should wrap componentType with UnnamedType to carry
+ # additional properties associated with componentType
+ if componentType.typeId != Any.typeId:
+ raise error.PyAsn1Error(
+ 'Component value is tag-incompatible: %r vs '
+ '%r' % (value, componentType))
+
+ componentValues[idx] = value
+
+ self._componentValues = componentValues
+
+ return self
+
+ @property
+ def componentTagMap(self):
+ if self.componentType is not None:
+ return self.componentType.tagMap
+
+ @property
+ def components(self):
+ return [self._componentValues[idx]
+ for idx in sorted(self._componentValues)]
+
+ def clear(self):
+ """Remove all components and become an empty |ASN.1| value object.
+
+ Has the same effect on |ASN.1| object as it does on :class:`list`
+ built-in.
+ """
+ self._componentValues = {}
+ return self
+
+ def reset(self):
+ """Remove all components and become a |ASN.1| schema object.
+
+ See :meth:`isValue` property for more information on the
+ distinction between value and schema objects.
+ """
+ self._componentValues = noValue
+ return self
+
+ def prettyPrint(self, scope=0):
+ scope += 1
+ representation = self.__class__.__name__ + ':\n'
+
+ if not self.isValue:
+ return representation
+
+ for idx, componentValue in enumerate(self):
+ representation += ' ' * scope
+ if (componentValue is noValue and
+ self.componentType is not None):
+ representation += '<empty>'
+ else:
+ representation += componentValue.prettyPrint(scope)
+
+ return representation
+
+ def prettyPrintType(self, scope=0):
+ scope += 1
+ representation = '%s -> %s {\n' % (self.tagSet, self.__class__.__name__)
+ if self.componentType is not None:
+ representation += ' ' * scope
+ representation += self.componentType.prettyPrintType(scope)
+ return representation + '\n' + ' ' * (scope - 1) + '}'
+
+
+ @property
+ def isValue(self):
+ """Indicate that |ASN.1| object represents ASN.1 value.
+
+ If *isValue* is :obj:`False` then this object represents just ASN.1 schema.
+
+ If *isValue* is :obj:`True` then, in addition to its ASN.1 schema features,
+ this object can also be used like a Python built-in object
+ (e.g. :class:`int`, :class:`str`, :class:`dict` etc.).
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`False` if object represents just ASN.1 schema.
+ :obj:`True` if object represents ASN.1 schema and can be used as a normal value.
+
+ Note
+ ----
+ There is an important distinction between PyASN1 schema and value objects.
+ The PyASN1 schema objects can only participate in ASN.1 schema-related
+ operations (e.g. defining or testing the structure of the data). Most
+ obvious uses of ASN.1 schema is to guide serialisation codecs whilst
+ encoding/decoding serialised ASN.1 contents.
+
+ The PyASN1 value objects can **additionally** participate in many operations
+ involving regular Python objects (e.g. arithmetic, comprehension etc).
+ """
+ if self._componentValues is noValue:
+ return False
+
+ if len(self._componentValues) != len(self):
+ return False
+
+ for componentValue in self._componentValues.values():
+ if componentValue is noValue or not componentValue.isValue:
+ return False
+
+ return True
+
+ @property
+ def isInconsistent(self):
+ """Run necessary checks to ensure |ASN.1| object consistency.
+
+ Default action is to verify |ASN.1| object against constraints imposed
+ by `subtypeSpec`.
+
+ Raises
+ ------
+ :py:class:`~pyasn1.error.PyAsn1tError` on any inconsistencies found
+ """
+ if self.componentType is noValue or not self.subtypeSpec:
+ return False
+
+ if self._componentValues is noValue:
+ return True
+
+ mapping = {}
+
+ for idx, value in self._componentValues.items():
+ # Absent fields are not in the mapping
+ if value is noValue:
+ continue
+
+ mapping[idx] = value
+
+ try:
+ # Represent SequenceOf/SetOf as a bare dict to constraints chain
+ self.subtypeSpec(mapping)
+
+ except error.PyAsn1Error:
+ exc = sys.exc_info()[1]
+ return exc
+
+ return False
+
+class SequenceOf(SequenceOfAndSetOfBase):
+ __doc__ = SequenceOfAndSetOfBase.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
+ )
+
+ #: Default :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ #: object representing ASN.1 type allowed within |ASN.1| type
+ componentType = None
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Disambiguation ASN.1 types identification
+ typeId = SequenceOfAndSetOfBase.getTypeId()
+
+
+class SetOf(SequenceOfAndSetOfBase):
+ __doc__ = SequenceOfAndSetOfBase.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
+ )
+
+ #: Default :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ #: object representing ASN.1 type allowed within |ASN.1| type
+ componentType = None
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Disambiguation ASN.1 types identification
+ typeId = SequenceOfAndSetOfBase.getTypeId()
+
+
+class SequenceAndSetBase(base.ConstructedAsn1Type):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.ConstructedAsn1Type`,
+ its objects are mutable and duck-type Python :class:`dict` objects.
+
+ Keyword Args
+ ------------
+ componentType: :py:class:`~pyasn1.type.namedtype.NamedType`
+ Object holding named ASN.1 types allowed within this collection
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type can only occur on explicit
+ `.isInconsistent` call.
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class Description(Sequence): # Set is similar
+ '''
+ ASN.1 specification:
+
+ Description ::= SEQUENCE {
+ surname IA5String,
+ first-name IA5String OPTIONAL,
+ age INTEGER DEFAULT 40
+ }
+ '''
+ componentType = NamedTypes(
+ NamedType('surname', IA5String()),
+ OptionalNamedType('first-name', IA5String()),
+ DefaultedNamedType('age', Integer(40))
+ )
+
+ descr = Description()
+ descr['surname'] = 'Smith'
+ descr['first-name'] = 'John'
+ """
+ #: Default :py:class:`~pyasn1.type.namedtype.NamedTypes`
+ #: object representing named ASN.1 types allowed within |ASN.1| type
+ componentType = namedtype.NamedTypes()
+
+
+ class DynamicNames(object):
+ """Fields names/positions mapping for component-less objects"""
+ def __init__(self):
+ self._keyToIdxMap = {}
+ self._idxToKeyMap = {}
+
+ def __len__(self):
+ return len(self._keyToIdxMap)
+
+ def __contains__(self, item):
+ return item in self._keyToIdxMap or item in self._idxToKeyMap
+
+ def __iter__(self):
+ return (self._idxToKeyMap[idx] for idx in range(len(self._idxToKeyMap)))
+
+ def __getitem__(self, item):
+ try:
+ return self._keyToIdxMap[item]
+
+ except KeyError:
+ return self._idxToKeyMap[item]
+
+ def getNameByPosition(self, idx):
+ try:
+ return self._idxToKeyMap[idx]
+
+ except KeyError:
+ raise error.PyAsn1Error('Type position out of range')
+
+ def getPositionByName(self, name):
+ try:
+ return self._keyToIdxMap[name]
+
+ except KeyError:
+ raise error.PyAsn1Error('Name %s not found' % (name,))
+
+ def addField(self, idx):
+ self._keyToIdxMap['field-%d' % idx] = idx
+ self._idxToKeyMap[idx] = 'field-%d' % idx
+
+
+ def __init__(self, **kwargs):
+ base.ConstructedAsn1Type.__init__(self, **kwargs)
+ self._componentTypeLen = len(self.componentType)
+ if self._componentTypeLen:
+ self._componentValues = []
+ else:
+ self._componentValues = noValue
+ self._dynamicNames = self._componentTypeLen or self.DynamicNames()
+
+ def __getitem__(self, idx):
+ if octets.isStringType(idx):
+ try:
+ return self.getComponentByName(idx)
+
+ except error.PyAsn1Error:
+ # duck-typing dict
+ raise KeyError(sys.exc_info()[1])
+
+ else:
+ try:
+ return self.getComponentByPosition(idx)
+
+ except error.PyAsn1Error:
+ # duck-typing list
+ raise IndexError(sys.exc_info()[1])
+
+ def __setitem__(self, idx, value):
+ if octets.isStringType(idx):
+ try:
+ self.setComponentByName(idx, value)
+
+ except error.PyAsn1Error:
+ # duck-typing dict
+ raise KeyError(sys.exc_info()[1])
+
+ else:
+ try:
+ self.setComponentByPosition(idx, value)
+
+ except error.PyAsn1Error:
+ # duck-typing list
+ raise IndexError(sys.exc_info()[1])
+
+ def __contains__(self, key):
+ if self._componentTypeLen:
+ return key in self.componentType
+ else:
+ return key in self._dynamicNames
+
+ def __len__(self):
+ return len(self._componentValues)
+
+ def __iter__(self):
+ return iter(self.componentType or self._dynamicNames)
+
+ # Python dict protocol
+
+ def values(self):
+ for idx in range(self._componentTypeLen or len(self._dynamicNames)):
+ yield self[idx]
+
+ def keys(self):
+ return iter(self)
+
+ def items(self):
+ for idx in range(self._componentTypeLen or len(self._dynamicNames)):
+ if self._componentTypeLen:
+ yield self.componentType[idx].name, self[idx]
+ else:
+ yield self._dynamicNames[idx], self[idx]
+
+ def update(self, *iterValue, **mappingValue):
+ for k, v in iterValue:
+ self[k] = v
+ for k in mappingValue:
+ self[k] = mappingValue[k]
+
+ def clear(self):
+ """Remove all components and become an empty |ASN.1| value object.
+
+ Has the same effect on |ASN.1| object as it does on :class:`dict`
+ built-in.
+ """
+ self._componentValues = []
+ self._dynamicNames = self.DynamicNames()
+ return self
+
+ def reset(self):
+ """Remove all components and become a |ASN.1| schema object.
+
+ See :meth:`isValue` property for more information on the
+ distinction between value and schema objects.
+ """
+ self._componentValues = noValue
+ self._dynamicNames = self.DynamicNames()
+ return self
+
+ @property
+ def components(self):
+ return self._componentValues
+
+ def _cloneComponentValues(self, myClone, cloneValueFlag):
+ if self._componentValues is noValue:
+ return
+
+ for idx, componentValue in enumerate(self._componentValues):
+ if componentValue is not noValue:
+ if isinstance(componentValue, base.ConstructedAsn1Type):
+ myClone.setComponentByPosition(
+ idx, componentValue.clone(cloneValueFlag=cloneValueFlag)
+ )
+ else:
+ myClone.setComponentByPosition(idx, componentValue.clone())
+
+ def getComponentByName(self, name, default=noValue, instantiate=True):
+ """Returns |ASN.1| type component by name.
+
+ Equivalent to Python :class:`dict` subscription operation (e.g. `[]`).
+
+ Parameters
+ ----------
+ name: :class:`str`
+ |ASN.1| type component name
+
+ Keyword Args
+ ------------
+ default: :class:`object`
+ If set and requested component is a schema object, return the `default`
+ object instead of the requested component.
+
+ instantiate: :class:`bool`
+ If :obj:`True` (default), inner component will be automatically
+ instantiated.
+ If :obj:`False` either existing component or the :class:`NoValue`
+ object will be returned.
+
+ Returns
+ -------
+ : :py:class:`~pyasn1.type.base.PyAsn1Item`
+ Instantiate |ASN.1| component type or return existing
+ component value
+ """
+ if self._componentTypeLen:
+ idx = self.componentType.getPositionByName(name)
+ else:
+ try:
+ idx = self._dynamicNames.getPositionByName(name)
+
+ except KeyError:
+ raise error.PyAsn1Error('Name %s not found' % (name,))
+
+ return self.getComponentByPosition(idx, default=default, instantiate=instantiate)
+
+ def setComponentByName(self, name, value=noValue,
+ verifyConstraints=True,
+ matchTags=True,
+ matchConstraints=True):
+ """Assign |ASN.1| type component by name.
+
+ Equivalent to Python :class:`dict` item assignment operation (e.g. `[]`).
+
+ Parameters
+ ----------
+ name: :class:`str`
+ |ASN.1| type component name
+
+ Keyword Args
+ ------------
+ value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ A Python value to initialize |ASN.1| component with (if *componentType* is set)
+ or ASN.1 value object to assign to |ASN.1| component.
+ If `value` is not given, schema object will be set as a component.
+
+ verifyConstraints: :class:`bool`
+ If :obj:`False`, skip constraints validation
+
+ matchTags: :class:`bool`
+ If :obj:`False`, skip component tags matching
+
+ matchConstraints: :class:`bool`
+ If :obj:`False`, skip component constraints matching
+
+ Returns
+ -------
+ self
+ """
+ if self._componentTypeLen:
+ idx = self.componentType.getPositionByName(name)
+ else:
+ try:
+ idx = self._dynamicNames.getPositionByName(name)
+
+ except KeyError:
+ raise error.PyAsn1Error('Name %s not found' % (name,))
+
+ return self.setComponentByPosition(
+ idx, value, verifyConstraints, matchTags, matchConstraints
+ )
+
+ def getComponentByPosition(self, idx, default=noValue, instantiate=True):
+ """Returns |ASN.1| type component by index.
+
+ Equivalent to Python sequence subscription operation (e.g. `[]`).
+
+ Parameters
+ ----------
+ idx: :class:`int`
+ Component index (zero-based). Must either refer to an existing
+ component or (if *componentType* is set) new ASN.1 schema object gets
+ instantiated.
+
+ Keyword Args
+ ------------
+ default: :class:`object`
+ If set and requested component is a schema object, return the `default`
+ object instead of the requested component.
+
+ instantiate: :class:`bool`
+ If :obj:`True` (default), inner component will be automatically
+ instantiated.
+ If :obj:`False` either existing component or the :class:`NoValue`
+ object will be returned.
+
+ Returns
+ -------
+ : :py:class:`~pyasn1.type.base.PyAsn1Item`
+ a PyASN1 object
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ # can also be Set
+ class MySequence(Sequence):
+ componentType = NamedTypes(
+ NamedType('id', OctetString())
+ )
+
+ s = MySequence()
+
+ # returns component #0 with `.isValue` property False
+ s.getComponentByPosition(0)
+
+ # returns None
+ s.getComponentByPosition(0, default=None)
+
+ s.clear()
+
+ # returns noValue
+ s.getComponentByPosition(0, instantiate=False)
+
+ # sets component #0 to OctetString() ASN.1 schema
+ # object and returns it
+ s.getComponentByPosition(0, instantiate=True)
+
+ # sets component #0 to ASN.1 value object
+ s.setComponentByPosition(0, 'ABCD')
+
+ # returns OctetString('ABCD') value object
+ s.getComponentByPosition(0, instantiate=False)
+
+ s.clear()
+
+ # returns noValue
+ s.getComponentByPosition(0, instantiate=False)
+ """
+ try:
+ if self._componentValues is noValue:
+ componentValue = noValue
+
+ else:
+ componentValue = self._componentValues[idx]
+
+ except IndexError:
+ componentValue = noValue
+
+ if not instantiate:
+ if componentValue is noValue or not componentValue.isValue:
+ return default
+ else:
+ return componentValue
+
+ if componentValue is noValue:
+ self.setComponentByPosition(idx)
+
+ componentValue = self._componentValues[idx]
+
+ if default is noValue or componentValue.isValue:
+ return componentValue
+ else:
+ return default
+
+ def setComponentByPosition(self, idx, value=noValue,
+ verifyConstraints=True,
+ matchTags=True,
+ matchConstraints=True):
+ """Assign |ASN.1| type component by position.
+
+ Equivalent to Python sequence item assignment operation (e.g. `[]`).
+
+ Parameters
+ ----------
+ idx : :class:`int`
+ Component index (zero-based). Must either refer to existing
+ component (if *componentType* is set) or to N+1 component
+ otherwise. In the latter case a new component of given ASN.1
+ type gets instantiated and appended to |ASN.1| sequence.
+
+ Keyword Args
+ ------------
+ value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ A Python value to initialize |ASN.1| component with (if *componentType* is set)
+ or ASN.1 value object to assign to |ASN.1| component.
+ If `value` is not given, schema object will be set as a component.
+
+ verifyConstraints : :class:`bool`
+ If :obj:`False`, skip constraints validation
+
+ matchTags: :class:`bool`
+ If :obj:`False`, skip component tags matching
+
+ matchConstraints: :class:`bool`
+ If :obj:`False`, skip component constraints matching
+
+ Returns
+ -------
+ self
+ """
+ componentType = self.componentType
+ componentTypeLen = self._componentTypeLen
+
+ if self._componentValues is noValue:
+ componentValues = []
+
+ else:
+ componentValues = self._componentValues
+
+ try:
+ currentValue = componentValues[idx]
+
+ except IndexError:
+ currentValue = noValue
+ if componentTypeLen:
+ if componentTypeLen < idx:
+ raise error.PyAsn1Error('component index out of range')
+
+ componentValues = [noValue] * componentTypeLen
+
+ if value is noValue:
+ if componentTypeLen:
+ value = componentType.getTypeByPosition(idx)
+ if isinstance(value, base.ConstructedAsn1Type):
+ value = value.clone(cloneValueFlag=componentType[idx].isDefaulted)
+
+ elif currentValue is noValue:
+ raise error.PyAsn1Error('Component type not defined')
+
+ elif not isinstance(value, base.Asn1Item):
+ if componentTypeLen:
+ subComponentType = componentType.getTypeByPosition(idx)
+ if isinstance(subComponentType, base.SimpleAsn1Type):
+ value = subComponentType.clone(value=value)
+
+ else:
+ raise error.PyAsn1Error('%s can cast only scalar values' % componentType.__class__.__name__)
+
+ elif currentValue is not noValue and isinstance(currentValue, base.SimpleAsn1Type):
+ value = currentValue.clone(value=value)
+
+ else:
+ raise error.PyAsn1Error('%s undefined component type' % componentType.__class__.__name__)
+
+ elif ((verifyConstraints or matchTags or matchConstraints) and
+ componentTypeLen):
+ subComponentType = componentType.getTypeByPosition(idx)
+ if subComponentType is not noValue:
+ subtypeChecker = (self.strictConstraints and
+ subComponentType.isSameTypeWith or
+ subComponentType.isSuperTypeOf)
+
+ if not subtypeChecker(value, verifyConstraints and matchTags,
+ verifyConstraints and matchConstraints):
+ if not componentType[idx].openType:
+ raise error.PyAsn1Error('Component value is tag-incompatible: %r vs %r' % (value, componentType))
+
+ if componentTypeLen or idx in self._dynamicNames:
+ componentValues[idx] = value
+
+ elif len(componentValues) == idx:
+ componentValues.append(value)
+ self._dynamicNames.addField(idx)
+
+ else:
+ raise error.PyAsn1Error('Component index out of range')
+
+ self._componentValues = componentValues
+
+ return self
+
+ @property
+ def isValue(self):
+ """Indicate that |ASN.1| object represents ASN.1 value.
+
+ If *isValue* is :obj:`False` then this object represents just ASN.1 schema.
+
+ If *isValue* is :obj:`True` then, in addition to its ASN.1 schema features,
+ this object can also be used like a Python built-in object (e.g.
+ :class:`int`, :class:`str`, :class:`dict` etc.).
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`False` if object represents just ASN.1 schema.
+ :obj:`True` if object represents ASN.1 schema and can be used as a
+ normal value.
+
+ Note
+ ----
+ There is an important distinction between PyASN1 schema and value objects.
+ The PyASN1 schema objects can only participate in ASN.1 schema-related
+ operations (e.g. defining or testing the structure of the data). Most
+ obvious uses of ASN.1 schema is to guide serialisation codecs whilst
+ encoding/decoding serialised ASN.1 contents.
+
+ The PyASN1 value objects can **additionally** participate in many operations
+ involving regular Python objects (e.g. arithmetic, comprehension etc).
+
+ It is sufficient for |ASN.1| objects to have all non-optional and non-defaulted
+ components being value objects to be considered as a value objects as a whole.
+ In other words, even having one or more optional components not turned into
+ value objects, |ASN.1| object is still considered as a value object. Defaulted
+ components are normally value objects by default.
+ """
+ if self._componentValues is noValue:
+ return False
+
+ componentType = self.componentType
+
+ if componentType:
+ for idx, subComponentType in enumerate(componentType.namedTypes):
+ if subComponentType.isDefaulted or subComponentType.isOptional:
+ continue
+
+ if not self._componentValues:
+ return False
+
+ componentValue = self._componentValues[idx]
+ if componentValue is noValue or not componentValue.isValue:
+ return False
+
+ else:
+ for componentValue in self._componentValues:
+ if componentValue is noValue or not componentValue.isValue:
+ return False
+
+ return True
+
+ @property
+ def isInconsistent(self):
+ """Run necessary checks to ensure |ASN.1| object consistency.
+
+ Default action is to verify |ASN.1| object against constraints imposed
+ by `subtypeSpec`.
+
+ Raises
+ ------
+ :py:class:`~pyasn1.error.PyAsn1tError` on any inconsistencies found
+ """
+ if self.componentType is noValue or not self.subtypeSpec:
+ return False
+
+ if self._componentValues is noValue:
+ return True
+
+ mapping = {}
+
+ for idx, value in enumerate(self._componentValues):
+ # Absent fields are not in the mapping
+ if value is noValue:
+ continue
+
+ name = self.componentType.getNameByPosition(idx)
+
+ mapping[name] = value
+
+ try:
+ # Represent Sequence/Set as a bare dict to constraints chain
+ self.subtypeSpec(mapping)
+
+ except error.PyAsn1Error:
+ exc = sys.exc_info()[1]
+ return exc
+
+ return False
+
+ def prettyPrint(self, scope=0):
+ """Return an object representation string.
+
+ Returns
+ -------
+ : :class:`str`
+ Human-friendly object representation.
+ """
+ scope += 1
+ representation = self.__class__.__name__ + ':\n'
+ for idx, componentValue in enumerate(self._componentValues):
+ if componentValue is not noValue and componentValue.isValue:
+ representation += ' ' * scope
+ if self.componentType:
+ representation += self.componentType.getNameByPosition(idx)
+ else:
+ representation += self._dynamicNames.getNameByPosition(idx)
+ representation = '%s=%s\n' % (
+ representation, componentValue.prettyPrint(scope)
+ )
+ return representation
+
+ def prettyPrintType(self, scope=0):
+ scope += 1
+ representation = '%s -> %s {\n' % (self.tagSet, self.__class__.__name__)
+ for idx, componentType in enumerate(self.componentType.values() or self._componentValues):
+ representation += ' ' * scope
+ if self.componentType:
+ representation += '"%s"' % self.componentType.getNameByPosition(idx)
+ else:
+ representation += '"%s"' % self._dynamicNames.getNameByPosition(idx)
+ representation = '%s = %s\n' % (
+ representation, componentType.prettyPrintType(scope)
+ )
+ return representation + '\n' + ' ' * (scope - 1) + '}'
+
+ # backward compatibility
+
+ def setDefaultComponents(self):
+ return self
+
+ def getComponentType(self):
+ if self._componentTypeLen:
+ return self.componentType
+
+ def getNameByPosition(self, idx):
+ if self._componentTypeLen:
+ return self.componentType[idx].name
+
+class Sequence(SequenceAndSetBase):
+ __doc__ = SequenceAndSetBase.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
+ )
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ #: Default collection of ASN.1 types of component (e.g. :py:class:`~pyasn1.type.namedtype.NamedType`)
+ #: object imposing size constraint on |ASN.1| objects
+ componentType = namedtype.NamedTypes()
+
+ # Disambiguation ASN.1 types identification
+ typeId = SequenceAndSetBase.getTypeId()
+
+ # backward compatibility
+
+ def getComponentTagMapNearPosition(self, idx):
+ if self.componentType:
+ return self.componentType.getTagMapNearPosition(idx)
+
+ def getComponentPositionNearType(self, tagSet, idx):
+ if self.componentType:
+ return self.componentType.getPositionNearType(tagSet, idx)
+ else:
+ return idx
+
+
+class Set(SequenceAndSetBase):
+ __doc__ = SequenceAndSetBase.__doc__
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.initTagSet(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
+ )
+
+ #: Default collection of ASN.1 types of component (e.g. :py:class:`~pyasn1.type.namedtype.NamedType`)
+ #: object representing ASN.1 type allowed within |ASN.1| type
+ componentType = namedtype.NamedTypes()
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Disambiguation ASN.1 types identification
+ typeId = SequenceAndSetBase.getTypeId()
+
+ def getComponent(self, innerFlag=False):
+ return self
+
+ def getComponentByType(self, tagSet, default=noValue,
+ instantiate=True, innerFlag=False):
+ """Returns |ASN.1| type component by ASN.1 tag.
+
+ Parameters
+ ----------
+ tagSet : :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing ASN.1 tags to identify one of
+ |ASN.1| object component
+
+ Keyword Args
+ ------------
+ default: :class:`object`
+ If set and requested component is a schema object, return the `default`
+ object instead of the requested component.
+
+ instantiate: :class:`bool`
+ If :obj:`True` (default), inner component will be automatically
+ instantiated.
+ If :obj:`False` either existing component or the :class:`noValue`
+ object will be returned.
+
+ Returns
+ -------
+ : :py:class:`~pyasn1.type.base.PyAsn1Item`
+ a pyasn1 object
+ """
+ componentValue = self.getComponentByPosition(
+ self.componentType.getPositionByType(tagSet),
+ default=default, instantiate=instantiate
+ )
+ if innerFlag and isinstance(componentValue, Set):
+ # get inner component by inner tagSet
+ return componentValue.getComponent(innerFlag=True)
+ else:
+ # get outer component by inner tagSet
+ return componentValue
+
+ def setComponentByType(self, tagSet, value=noValue,
+ verifyConstraints=True,
+ matchTags=True,
+ matchConstraints=True,
+ innerFlag=False):
+ """Assign |ASN.1| type component by ASN.1 tag.
+
+ Parameters
+ ----------
+ tagSet : :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing ASN.1 tags to identify one of
+ |ASN.1| object component
+
+ Keyword Args
+ ------------
+ value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ A Python value to initialize |ASN.1| component with (if *componentType* is set)
+ or ASN.1 value object to assign to |ASN.1| component.
+ If `value` is not given, schema object will be set as a component.
+
+ verifyConstraints : :class:`bool`
+ If :obj:`False`, skip constraints validation
+
+ matchTags: :class:`bool`
+ If :obj:`False`, skip component tags matching
+
+ matchConstraints: :class:`bool`
+ If :obj:`False`, skip component constraints matching
+
+ innerFlag: :class:`bool`
+ If :obj:`True`, search for matching *tagSet* recursively.
+
+ Returns
+ -------
+ self
+ """
+ idx = self.componentType.getPositionByType(tagSet)
+
+ if innerFlag: # set inner component by inner tagSet
+ componentType = self.componentType.getTypeByPosition(idx)
+
+ if componentType.tagSet:
+ return self.setComponentByPosition(
+ idx, value, verifyConstraints, matchTags, matchConstraints
+ )
+ else:
+ componentType = self.getComponentByPosition(idx)
+ return componentType.setComponentByType(
+ tagSet, value, verifyConstraints, matchTags, matchConstraints, innerFlag=innerFlag
+ )
+ else: # set outer component by inner tagSet
+ return self.setComponentByPosition(
+ idx, value, verifyConstraints, matchTags, matchConstraints
+ )
+
+ @property
+ def componentTagMap(self):
+ if self.componentType:
+ return self.componentType.tagMapUnique
+
+
+class Choice(Set):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.ConstructedAsn1Type`,
+ its objects are mutable and duck-type Python :class:`list` objects.
+
+ Keyword Args
+ ------------
+ componentType: :py:class:`~pyasn1.type.namedtype.NamedType`
+ Object holding named ASN.1 types allowed within this collection
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type can only occur on explicit
+ `.isInconsistent` call.
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ class Afters(Choice):
+ '''
+ ASN.1 specification:
+
+ Afters ::= CHOICE {
+ cheese [0] IA5String,
+ dessert [1] IA5String
+ }
+ '''
+ componentType = NamedTypes(
+ NamedType('cheese', IA5String().subtype(
+ implicitTag=Tag(tagClassContext, tagFormatSimple, 0)
+ ),
+ NamedType('dessert', IA5String().subtype(
+ implicitTag=Tag(tagClassContext, tagFormatSimple, 1)
+ )
+ )
+
+ afters = Afters()
+ afters['cheese'] = 'Mascarpone'
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.TagSet() # untagged
+
+ #: Default collection of ASN.1 types of component (e.g. :py:class:`~pyasn1.type.namedtype.NamedType`)
+ #: object representing ASN.1 type allowed within |ASN.1| type
+ componentType = namedtype.NamedTypes()
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection(
+ constraint.ValueSizeConstraint(1, 1)
+ )
+
+ # Disambiguation ASN.1 types identification
+ typeId = Set.getTypeId()
+
+ _currentIdx = None
+
+ def __eq__(self, other):
+ if self._componentValues:
+ return self._componentValues[self._currentIdx] == other
+ return NotImplemented
+
+ def __ne__(self, other):
+ if self._componentValues:
+ return self._componentValues[self._currentIdx] != other
+ return NotImplemented
+
+ def __lt__(self, other):
+ if self._componentValues:
+ return self._componentValues[self._currentIdx] < other
+ return NotImplemented
+
+ def __le__(self, other):
+ if self._componentValues:
+ return self._componentValues[self._currentIdx] <= other
+ return NotImplemented
+
+ def __gt__(self, other):
+ if self._componentValues:
+ return self._componentValues[self._currentIdx] > other
+ return NotImplemented
+
+ def __ge__(self, other):
+ if self._componentValues:
+ return self._componentValues[self._currentIdx] >= other
+ return NotImplemented
+
+ if sys.version_info[0] <= 2:
+ def __nonzero__(self):
+ return self._componentValues and True or False
+ else:
+ def __bool__(self):
+ return self._componentValues and True or False
+
+ def __len__(self):
+ return self._currentIdx is not None and 1 or 0
+
+ def __contains__(self, key):
+ if self._currentIdx is None:
+ return False
+ return key == self.componentType[self._currentIdx].getName()
+
+ def __iter__(self):
+ if self._currentIdx is None:
+ raise StopIteration
+ yield self.componentType[self._currentIdx].getName()
+
+ # Python dict protocol
+
+ def values(self):
+ if self._currentIdx is not None:
+ yield self._componentValues[self._currentIdx]
+
+ def keys(self):
+ if self._currentIdx is not None:
+ yield self.componentType[self._currentIdx].getName()
+
+ def items(self):
+ if self._currentIdx is not None:
+ yield self.componentType[self._currentIdx].getName(), self[self._currentIdx]
+
+ def checkConsistency(self):
+ if self._currentIdx is None:
+ raise error.PyAsn1Error('Component not chosen')
+
+ def _cloneComponentValues(self, myClone, cloneValueFlag):
+ try:
+ component = self.getComponent()
+ except error.PyAsn1Error:
+ pass
+ else:
+ if isinstance(component, Choice):
+ tagSet = component.effectiveTagSet
+ else:
+ tagSet = component.tagSet
+ if isinstance(component, base.ConstructedAsn1Type):
+ myClone.setComponentByType(
+ tagSet, component.clone(cloneValueFlag=cloneValueFlag)
+ )
+ else:
+ myClone.setComponentByType(tagSet, component.clone())
+
+ def getComponentByPosition(self, idx, default=noValue, instantiate=True):
+ __doc__ = Set.__doc__
+
+ if self._currentIdx is None or self._currentIdx != idx:
+ return Set.getComponentByPosition(self, idx, default=default,
+ instantiate=instantiate)
+
+ return self._componentValues[idx]
+
+ def setComponentByPosition(self, idx, value=noValue,
+ verifyConstraints=True,
+ matchTags=True,
+ matchConstraints=True):
+ """Assign |ASN.1| type component by position.
+
+ Equivalent to Python sequence item assignment operation (e.g. `[]`).
+
+ Parameters
+ ----------
+ idx: :class:`int`
+ Component index (zero-based). Must either refer to existing
+ component or to N+1 component. In the latter case a new component
+ type gets instantiated (if *componentType* is set, or given ASN.1
+ object is taken otherwise) and appended to the |ASN.1| sequence.
+
+ Keyword Args
+ ------------
+ value: :class:`object` or :py:class:`~pyasn1.type.base.PyAsn1Item` derivative
+ A Python value to initialize |ASN.1| component with (if *componentType* is set)
+ or ASN.1 value object to assign to |ASN.1| component. Once a new value is
+ set to *idx* component, previous value is dropped.
+ If `value` is not given, schema object will be set as a component.
+
+ verifyConstraints : :class:`bool`
+ If :obj:`False`, skip constraints validation
+
+ matchTags: :class:`bool`
+ If :obj:`False`, skip component tags matching
+
+ matchConstraints: :class:`bool`
+ If :obj:`False`, skip component constraints matching
+
+ Returns
+ -------
+ self
+ """
+ oldIdx = self._currentIdx
+ Set.setComponentByPosition(self, idx, value, verifyConstraints, matchTags, matchConstraints)
+ self._currentIdx = idx
+ if oldIdx is not None and oldIdx != idx:
+ self._componentValues[oldIdx] = noValue
+ return self
+
+ @property
+ def effectiveTagSet(self):
+ """Return a :class:`~pyasn1.type.tag.TagSet` object of the currently initialized component or self (if |ASN.1| is tagged)."""
+ if self.tagSet:
+ return self.tagSet
+ else:
+ component = self.getComponent()
+ return component.effectiveTagSet
+
+ @property
+ def tagMap(self):
+ """"Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping
+ ASN.1 tags to ASN.1 objects contained within callee.
+ """
+ if self.tagSet:
+ return Set.tagMap.fget(self)
+ else:
+ return self.componentType.tagMapUnique
+
+ def getComponent(self, innerFlag=False):
+ """Return currently assigned component of the |ASN.1| object.
+
+ Returns
+ -------
+ : :py:class:`~pyasn1.type.base.PyAsn1Item`
+ a PyASN1 object
+ """
+ if self._currentIdx is None:
+ raise error.PyAsn1Error('Component not chosen')
+ else:
+ c = self._componentValues[self._currentIdx]
+ if innerFlag and isinstance(c, Choice):
+ return c.getComponent(innerFlag)
+ else:
+ return c
+
+ def getName(self, innerFlag=False):
+ """Return the name of currently assigned component of the |ASN.1| object.
+
+ Returns
+ -------
+ : :py:class:`str`
+ |ASN.1| component name
+ """
+ if self._currentIdx is None:
+ raise error.PyAsn1Error('Component not chosen')
+ else:
+ if innerFlag:
+ c = self._componentValues[self._currentIdx]
+ if isinstance(c, Choice):
+ return c.getName(innerFlag)
+ return self.componentType.getNameByPosition(self._currentIdx)
+
+ @property
+ def isValue(self):
+ """Indicate that |ASN.1| object represents ASN.1 value.
+
+ If *isValue* is :obj:`False` then this object represents just ASN.1 schema.
+
+ If *isValue* is :obj:`True` then, in addition to its ASN.1 schema features,
+ this object can also be used like a Python built-in object (e.g.
+ :class:`int`, :class:`str`, :class:`dict` etc.).
+
+ Returns
+ -------
+ : :class:`bool`
+ :obj:`False` if object represents just ASN.1 schema.
+ :obj:`True` if object represents ASN.1 schema and can be used as a normal
+ value.
+
+ Note
+ ----
+ There is an important distinction between PyASN1 schema and value objects.
+ The PyASN1 schema objects can only participate in ASN.1 schema-related
+ operations (e.g. defining or testing the structure of the data). Most
+ obvious uses of ASN.1 schema is to guide serialisation codecs whilst
+ encoding/decoding serialised ASN.1 contents.
+
+ The PyASN1 value objects can **additionally** participate in many operations
+ involving regular Python objects (e.g. arithmetic, comprehension etc).
+ """
+ if self._currentIdx is None:
+ return False
+
+ componentValue = self._componentValues[self._currentIdx]
+
+ return componentValue is not noValue and componentValue.isValue
+
+ def clear(self):
+ self._currentIdx = None
+ return Set.clear(self)
+
+ # compatibility stubs
+
+ def getMinTagSet(self):
+ return self.minTagSet
+
+
+class Any(OctetString):
+ """Create |ASN.1| schema or value object.
+
+ |ASN.1| class is based on :class:`~pyasn1.type.base.SimpleAsn1Type`,
+ its objects are immutable and duck-type Python 2 :class:`str` or Python 3
+ :class:`bytes`. When used in Unicode context, |ASN.1| type assumes
+ "|encoding|" serialisation.
+
+ Keyword Args
+ ------------
+ value: :class:`unicode`, :class:`str`, :class:`bytes` or |ASN.1| object
+ :class:`str` (Python 2) or :class:`bytes` (Python 3), alternatively
+ :class:`unicode` object (Python 2) or :class:`str` (Python 3)
+ representing character string to be serialised into octets (note
+ `encoding` parameter) or |ASN.1| object.
+ If `value` is not given, schema object will be created.
+
+ tagSet: :py:class:`~pyasn1.type.tag.TagSet`
+ Object representing non-default ASN.1 tag(s)
+
+ subtypeSpec: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection`
+ Object representing non-default ASN.1 subtype constraint(s). Constraints
+ verification for |ASN.1| type occurs automatically on object
+ instantiation.
+
+ encoding: :py:class:`str`
+ Unicode codec ID to encode/decode :class:`unicode` (Python 2) or
+ :class:`str` (Python 3) the payload when |ASN.1| object is used
+ in text string context.
+
+ binValue: :py:class:`str`
+ Binary string initializer to use instead of the *value*.
+ Example: '10110011'.
+
+ hexValue: :py:class:`str`
+ Hexadecimal string initializer to use instead of the *value*.
+ Example: 'DEADBEEF'.
+
+ Raises
+ ------
+ ~pyasn1.error.ValueConstraintError, ~pyasn1.error.PyAsn1Error
+ On constraint violation or bad initializer.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ class Error(Sequence):
+ '''
+ ASN.1 specification:
+
+ Error ::= SEQUENCE {
+ code INTEGER,
+ parameter ANY DEFINED BY code -- Either INTEGER or REAL
+ }
+ '''
+ componentType=NamedTypes(
+ NamedType('code', Integer()),
+ NamedType('parameter', Any(),
+ openType=OpenType('code', {1: Integer(),
+ 2: Real()}))
+ )
+
+ error = Error()
+ error['code'] = 1
+ error['parameter'] = Integer(1234)
+ """
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.tag.TagSet` object representing ASN.1 tag(s)
+ #: associated with |ASN.1| type.
+ tagSet = tag.TagSet() # untagged
+
+ #: Set (on class, not on instance) or return a
+ #: :py:class:`~pyasn1.type.constraint.ConstraintsIntersection` object
+ #: imposing constraints on |ASN.1| type initialization values.
+ subtypeSpec = constraint.ConstraintsIntersection()
+
+ # Disambiguation ASN.1 types identification
+ typeId = OctetString.getTypeId()
+
+ @property
+ def tagMap(self):
+ """"Return a :class:`~pyasn1.type.tagmap.TagMap` object mapping
+ ASN.1 tags to ASN.1 objects contained within callee.
+ """
+ try:
+ return self._tagMap
+
+ except AttributeError:
+ self._tagMap = tagmap.TagMap(
+ {self.tagSet: self},
+ {eoo.endOfOctets.tagSet: eoo.endOfOctets},
+ self
+ )
+
+ return self._tagMap
+
+# XXX
+# coercion rules?
diff --git a/third_party/python/pyasn1/pyasn1/type/useful.py b/third_party/python/pyasn1/pyasn1/type/useful.py
new file mode 100644
index 0000000000..7536b95cee
--- /dev/null
+++ b/third_party/python/pyasn1/pyasn1/type/useful.py
@@ -0,0 +1,191 @@
+#
+# This file is part of pyasn1 software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import datetime
+
+from pyasn1 import error
+from pyasn1.compat import dateandtime
+from pyasn1.compat import string
+from pyasn1.type import char
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+__all__ = ['ObjectDescriptor', 'GeneralizedTime', 'UTCTime']
+
+NoValue = univ.NoValue
+noValue = univ.noValue
+
+
+class ObjectDescriptor(char.GraphicString):
+ __doc__ = char.GraphicString.__doc__
+
+ #: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
+ tagSet = char.GraphicString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 7)
+ )
+
+ # Optimization for faster codec lookup
+ typeId = char.GraphicString.getTypeId()
+
+
+class TimeMixIn(object):
+
+ _yearsDigits = 4
+ _hasSubsecond = False
+ _optionalMinutes = False
+ _shortTZ = False
+
+ class FixedOffset(datetime.tzinfo):
+ """Fixed offset in minutes east from UTC."""
+
+ # defaulted arguments required
+ # https: // docs.python.org / 2.3 / lib / datetime - tzinfo.html
+ def __init__(self, offset=0, name='UTC'):
+ self.__offset = datetime.timedelta(minutes=offset)
+ self.__name = name
+
+ def utcoffset(self, dt):
+ return self.__offset
+
+ def tzname(self, dt):
+ return self.__name
+
+ def dst(self, dt):
+ return datetime.timedelta(0)
+
+ UTC = FixedOffset()
+
+ @property
+ def asDateTime(self):
+ """Create :py:class:`datetime.datetime` object from a |ASN.1| object.
+
+ Returns
+ -------
+ :
+ new instance of :py:class:`datetime.datetime` object
+ """
+ text = str(self)
+ if text.endswith('Z'):
+ tzinfo = TimeMixIn.UTC
+ text = text[:-1]
+
+ elif '-' in text or '+' in text:
+ if '+' in text:
+ text, plusminus, tz = string.partition(text, '+')
+ else:
+ text, plusminus, tz = string.partition(text, '-')
+
+ if self._shortTZ and len(tz) == 2:
+ tz += '00'
+
+ if len(tz) != 4:
+ raise error.PyAsn1Error('malformed time zone offset %s' % tz)
+
+ try:
+ minutes = int(tz[:2]) * 60 + int(tz[2:])
+ if plusminus == '-':
+ minutes *= -1
+
+ except ValueError:
+ raise error.PyAsn1Error('unknown time specification %s' % self)
+
+ tzinfo = TimeMixIn.FixedOffset(minutes, '?')
+
+ else:
+ tzinfo = None
+
+ if '.' in text or ',' in text:
+ if '.' in text:
+ text, _, ms = string.partition(text, '.')
+ else:
+ text, _, ms = string.partition(text, ',')
+
+ try:
+ ms = int(ms) * 1000
+
+ except ValueError:
+ raise error.PyAsn1Error('bad sub-second time specification %s' % self)
+
+ else:
+ ms = 0
+
+ if self._optionalMinutes and len(text) - self._yearsDigits == 6:
+ text += '0000'
+ elif len(text) - self._yearsDigits == 8:
+ text += '00'
+
+ try:
+ dt = dateandtime.strptime(text, self._yearsDigits == 4 and '%Y%m%d%H%M%S' or '%y%m%d%H%M%S')
+
+ except ValueError:
+ raise error.PyAsn1Error('malformed datetime format %s' % self)
+
+ return dt.replace(microsecond=ms, tzinfo=tzinfo)
+
+ @classmethod
+ def fromDateTime(cls, dt):
+ """Create |ASN.1| object from a :py:class:`datetime.datetime` object.
+
+ Parameters
+ ----------
+ dt: :py:class:`datetime.datetime` object
+ The `datetime.datetime` object to initialize the |ASN.1| object
+ from
+
+ Returns
+ -------
+ :
+ new instance of |ASN.1| value
+ """
+ text = dt.strftime(cls._yearsDigits == 4 and '%Y%m%d%H%M%S' or '%y%m%d%H%M%S')
+ if cls._hasSubsecond:
+ text += '.%d' % (dt.microsecond // 1000)
+
+ if dt.utcoffset():
+ seconds = dt.utcoffset().seconds
+ if seconds < 0:
+ text += '-'
+ else:
+ text += '+'
+ text += '%.2d%.2d' % (seconds // 3600, seconds % 3600)
+ else:
+ text += 'Z'
+
+ return cls(text)
+
+
+class GeneralizedTime(char.VisibleString, TimeMixIn):
+ __doc__ = char.VisibleString.__doc__
+
+ #: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
+ tagSet = char.VisibleString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 24)
+ )
+
+ # Optimization for faster codec lookup
+ typeId = char.VideotexString.getTypeId()
+
+ _yearsDigits = 4
+ _hasSubsecond = True
+ _optionalMinutes = True
+ _shortTZ = True
+
+
+class UTCTime(char.VisibleString, TimeMixIn):
+ __doc__ = char.VisibleString.__doc__
+
+ #: Default :py:class:`~pyasn1.type.tag.TagSet` object for |ASN.1| objects
+ tagSet = char.VisibleString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 23)
+ )
+
+ # Optimization for faster codec lookup
+ typeId = char.VideotexString.getTypeId()
+
+ _yearsDigits = 2
+ _hasSubsecond = False
+ _optionalMinutes = False
+ _shortTZ = False
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/LICENSE.txt b/third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/LICENSE.txt
new file mode 100644
index 0000000000..ac630e821c
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/LICENSE.txt
@@ -0,0 +1,24 @@
+Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/METADATA b/third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/METADATA
new file mode 100644
index 0000000000..52a1623272
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/METADATA
@@ -0,0 +1,42 @@
+Metadata-Version: 2.1
+Name: pyasn1-modules
+Version: 0.2.8
+Summary: A collection of ASN.1-based protocols modules.
+Home-page: https://github.com/etingof/pyasn1-modules
+Author: Ilya Etingof
+Author-email: etingof@gmail.com
+Maintainer: Ilya Etingof <etingof@gmail.com>
+License: BSD-2-Clause
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Education
+Classifier: Intended Audience :: Information Technology
+Classifier: Intended Audience :: System Administrators
+Classifier: Intended Audience :: Telecommunications Industry
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Natural Language :: English
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.4
+Classifier: Programming Language :: Python :: 2.5
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.2
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Topic :: Communications
+Classifier: Topic :: System :: Monitoring
+Classifier: Topic :: System :: Networking :: Monitoring
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Dist: pyasn1 (<0.5.0,>=0.4.6)
+
+A collection of ASN.1 modules expressed in form of pyasn1 classes. Includes protocols PDUs definition (SNMP, LDAP etc.) and various data structures (X.509, PKCS etc.).
+
+
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/RECORD b/third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/RECORD
new file mode 100644
index 0000000000..56851eb2b8
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/RECORD
@@ -0,0 +1,113 @@
+pyasn1_modules/__init__.py,sha256=dVxDhxuk2UnZm-vUYJxsjuoO1cQuyjimpP5F0Rk3B8M,65
+pyasn1_modules/pem.py,sha256=j3qNWa4Bbgq6NKs343cUGYrhyUzHUDAU-hC23aeCIog,2058
+pyasn1_modules/rfc1155.py,sha256=9xUfGI35hFQ3OF4UxGd3V_B0DMflGfnLqjjmlEngDqs,2683
+pyasn1_modules/rfc1157.py,sha256=GnLq_jcPLdUHREomh7HmsT9ZyNnPDw4NLEEFwppCyJQ,3554
+pyasn1_modules/rfc1901.py,sha256=Uq8zJ4HdCClnV0du14_hF3ggNdHAM-heaxuz23cwoOQ,646
+pyasn1_modules/rfc1902.py,sha256=JUYq7hBfik2w-_Ju17hpe_j00QKAZEguwe20BK8fC9I,3705
+pyasn1_modules/rfc1905.py,sha256=qTqjTF4L4Wz4svQADIjliqyqPs0mnD8GDqhgngJWdN0,4831
+pyasn1_modules/rfc2251.py,sha256=hBEhoVDvkNxSQY6zeX6WxpKL1i2vqrIPZ5Jmvl5KC7M,26931
+pyasn1_modules/rfc2314.py,sha256=r9tTFaflcmVtQVTDoDo_-OYk5W98gD4NglZX78j3a6M,1313
+pyasn1_modules/rfc2315.py,sha256=ZfyNEbwHz-q0y3twhBZlogIeWNrP_lcBUAzIfcXYGUo,9666
+pyasn1_modules/rfc2437.py,sha256=9l6YNwD0BUrdhmg5NAs_K3PqrwfJVDR-5N9_EjeRRk4,2623
+pyasn1_modules/rfc2459.py,sha256=TYZuSTbv868F5dXKi83H-ShqCwy7SQIyUAMBWVDgc2Q,50002
+pyasn1_modules/rfc2511.py,sha256=S6Bggb2UR45IRdSNVdWFVfedsa1Om2VoZILlY-oL6QU,10350
+pyasn1_modules/rfc2560.py,sha256=QfVWkw4GJXKVsjDUPh9ORF2kpi5XQTLlZdIB677qvv8,8406
+pyasn1_modules/rfc2631.py,sha256=Het4nHPVFj6oElpEANYkKQuincUa0ms5SOt94Ph8jhs,1219
+pyasn1_modules/rfc2634.py,sha256=7sTu3YysbHImknLk7CbdQIjJjt6cC849-XqkuEDgFPk,9425
+pyasn1_modules/rfc2985.py,sha256=8GL8jkWGpN1t7sVaEtyhVgfCM80XhlYOUEi9jhcAX0E,14359
+pyasn1_modules/rfc2986.py,sha256=sjlXnV2fnyaYqZjgepsneTqXiwk2N0mrdExEuEHp92I,1896
+pyasn1_modules/rfc3114.py,sha256=02eDCK2blUNybTaGX85vxGfCTnzHXXa9BP9IaVVocK8,1961
+pyasn1_modules/rfc3161.py,sha256=9kz_TvQ5_OpBPuHQDAh2WyqKeOThgxPq8E5iBB-sNp8,4260
+pyasn1_modules/rfc3274.py,sha256=ZULbMN3wksvv_fWvT_C1vskxuh_IzRCAD9QD1hdk-lo,1670
+pyasn1_modules/rfc3279.py,sha256=uRaWfvIw4WXBoJN9gcAhsW8MTDymGoa-FrrC2k033TI,6807
+pyasn1_modules/rfc3280.py,sha256=nra0JN8HEPg3XorP-ry8H1Wb7xiG81VBGSFmKFCEldU,46620
+pyasn1_modules/rfc3281.py,sha256=s0MV7DaVXhap8bIeKqCbjmrwrMytxBTFPFl2TD21g6Y,9866
+pyasn1_modules/rfc3412.py,sha256=_PQEwCmLcxlNlflAv-xQbfwTr_Fks7FvmBfCGQIF3ME,1956
+pyasn1_modules/rfc3414.py,sha256=lbn5t4ycmhbg6smNvpZwcX3L1VaU0ns3VYplyHCyVc0,1167
+pyasn1_modules/rfc3447.py,sha256=c5KidhoTIibl1nvqvEIbBSBFmbQcAns75GDpFwMHUhM,1605
+pyasn1_modules/rfc3560.py,sha256=3Ud7sY7OAV_4KGNn_hg5xZblEkxE_ILH1kP2TI-KbZw,1818
+pyasn1_modules/rfc3565.py,sha256=nRephcXY7ioG5I4iaT6mSQYGwaouRQXoMnp2kFQQOE0,1438
+pyasn1_modules/rfc3709.py,sha256=KAaG7SKTT9Ef-Kza5Zn_qXkZppul8Wt8MPSkzS4qs5o,6469
+pyasn1_modules/rfc3770.py,sha256=ue0Qaiys8J86M-8EtLNrcfuXm87Mr2GQ4f30lSs0vXE,1743
+pyasn1_modules/rfc3779.py,sha256=x8HYKGCaGO3BohCREHQUEa1oYGArWIC2J0PftxiPrjI,3260
+pyasn1_modules/rfc3852.py,sha256=Ekx1BOSu7Bsg1IFO96uDZ4iGCGzu-r5n0KPwvxT18BY,20101
+pyasn1_modules/rfc4043.py,sha256=OWPgVzfK3Hs5sNQJSqUBkInhgikv-x15-xLSg30xwNE,1067
+pyasn1_modules/rfc4055.py,sha256=f2rlyaBeNhl287b_qLLsNpjgwxYRVzBgbOH28UnJZwQ,10392
+pyasn1_modules/rfc4073.py,sha256=bHVssQE3yXwetes1TPWAT30UhOEinHj8vEBaYjWC24g,1636
+pyasn1_modules/rfc4108.py,sha256=-I63Z0crn_Elvr85nSa9BqAlRx7cIJfEb9ItPDkq8JY,10598
+pyasn1_modules/rfc4210.py,sha256=PmJyGAnQGbG3H0Jzo4G4MfIg4kk7Ebd4CTKA0jYGynw,28469
+pyasn1_modules/rfc4211.py,sha256=W2YVMJWUEsRNGvdEmf4Ktoav5mwHfDhJyaPsCso9hFA,12110
+pyasn1_modules/rfc4334.py,sha256=Q-fcYksrunAo1t07HE2jm5WlQgFAf5o39utpel0ZjcI,1586
+pyasn1_modules/rfc4985.py,sha256=oWCBG3tknFLUJOeG4aKF7JrkA4qMjPyJFGTnf7xmPd8,961
+pyasn1_modules/rfc5035.py,sha256=xgw9ztAM_bJKlIUCzni2zcE_z3ErEuXpWRPJpXI1KEw,4523
+pyasn1_modules/rfc5083.py,sha256=ENXIEL0CYrTqvf_iwpvAkBBJpi2pOFNBDFEYc37yqF8,1888
+pyasn1_modules/rfc5084.py,sha256=i9sFdUklbdTQodTya4BNFnpeFxGIB2uS1aNkfFdZpu4,2855
+pyasn1_modules/rfc5208.py,sha256=O2ZDYy-lqXeQcK_9gryuvm71TUnzIF7yL8j_LrIBEQc,1432
+pyasn1_modules/rfc5280.py,sha256=GweilWgd70v1Z0YovOSU3Bnu7vvu4rMscgE6WhksBkg,51236
+pyasn1_modules/rfc5480.py,sha256=GzBTgKQ68V-L-Qy0SBrCQMgqR5mGF7U73uXlBzfV2Jk,4834
+pyasn1_modules/rfc5649.py,sha256=3A--LQL7iw8DGXSDyiSUeh6wwFPKQQGyVY94mNzY0Ek,830
+pyasn1_modules/rfc5652.py,sha256=jmL6fOHqTAQvceW9mtkAZpOaxkemRjWDrdpL4pglQkk,21451
+pyasn1_modules/rfc5751.py,sha256=M8kTLARhdqh3UqmlZv_FWJfuJb-ph7P6MVGxSP7Q4wQ,3198
+pyasn1_modules/rfc5755.py,sha256=RZ28NeCnEAGr2pLRSNFw0BRb_b_eulmxag-lRTmUeTo,12081
+pyasn1_modules/rfc5913.py,sha256=OayMmpi29ZlQI1EszIxXaU8Mhwi41BrH5esoyS80efQ,1161
+pyasn1_modules/rfc5914.py,sha256=nXOb4SvESbEFYI8h0nEYkRArNZ9w5Zqxva_4uAdMXNY,3714
+pyasn1_modules/rfc5915.py,sha256=VqMRd_Ksm0LFvE5XX4_MO6BdFG7Ch7NdQcwT_DMWAK4,1056
+pyasn1_modules/rfc5916.py,sha256=gHrFO9lX21h6Wa3JnEqyjuqXQlcTE0loUIu913Sit0E,800
+pyasn1_modules/rfc5917.py,sha256=nM08rGm9D3O8uqSbmshvp7_fHl2dYaTdhUGVJQHe0xc,1511
+pyasn1_modules/rfc5924.py,sha256=_8TqEJ9Q7cFSd2u3Za6rzlNPqGLl7IA4oHtYVpoJhdA,425
+pyasn1_modules/rfc5934.py,sha256=77z96SeP4iM2R6Rl5-Vx7OaENA8ZQvzrfhDVZRy9lqk,23798
+pyasn1_modules/rfc5940.py,sha256=66rMmgyKBhay-RZsWaKz7PUGwp0bqEAVULPb4Edk1vk,1613
+pyasn1_modules/rfc5958.py,sha256=NZPx-7FvjzgErz2lTURiRq8m3XCZ7D9QbGDhtIF-zCE,2650
+pyasn1_modules/rfc5990.py,sha256=-b0St64ba3LVRGSeNmbGoMIbkU8c8FDpo4zFWF0PCFM,5505
+pyasn1_modules/rfc6010.py,sha256=F43AYVFUwu-2_xjJE2Wmw1Wdt0K7l3vg0_fCa_QHqBU,2347
+pyasn1_modules/rfc6019.py,sha256=vzj5tfG4694-ucpErpAtE1DVOE4-v0dkN894Zr9xm4o,1086
+pyasn1_modules/rfc6031.py,sha256=X2cjNyVnrX3G2zG7kD4Rq__kF6-ftmmnqHlCQJDCuMU,12137
+pyasn1_modules/rfc6032.py,sha256=uNAu5zLHg0b583xxzFNUZxCnJaCzMw1iobzREuejMoM,1950
+pyasn1_modules/rfc6120.py,sha256=JehGZD8Y0Bdhr_ojpMSjHgnRHEdUXauZxqLxRwns6Cc,818
+pyasn1_modules/rfc6170.py,sha256=sL2yPZzO--MI4ToeAwlFEP-x6I0-etuJxT2mgAPjEO4,409
+pyasn1_modules/rfc6187.py,sha256=jOMiIhw4HAUn7hj37gKImNU_hK8TamAfd0V0Jrwh_YU,489
+pyasn1_modules/rfc6210.py,sha256=wLifK_EShv1a4TOhGJ-k9zA1kVVYVDNjS-Rh0ohmCh0,1052
+pyasn1_modules/rfc6211.py,sha256=XotTBQVseK7y0nJB4Fx-npdhRHeH53IM84kGupWIprk,2257
+pyasn1_modules/rfc6402-1.py,sha256=F2t7dYFdqYQ_PiG9JoUlNMcRvIghrbJPoNgdjcKGSuc,17049
+pyasn1_modules/rfc6402.py,sha256=0ciItKf7voeSCTZl1kKYd6gyQ68IZzwMe1-fj16etKs,17148
+pyasn1_modules/rfc6482.py,sha256=10_Xyb2TaPFx72IUCZtu81aH5rmYihhdL0P-PVby1ys,2085
+pyasn1_modules/rfc6486.py,sha256=a3_5OJvkz2G7xWOC0dqbNqJQDsHQAOU62AWin107c4k,1916
+pyasn1_modules/rfc6487.py,sha256=gTUVkFYJyUcr1E4uoeN2cXPNaXyjYbixupbBKFQA4jQ,472
+pyasn1_modules/rfc6664.py,sha256=nq8F5wDeO49FoBGVQDx8ivvg_GsubdWa1bpZM_40Tms,4270
+pyasn1_modules/rfc6955.py,sha256=FBVb8LpHKMZjR3wOJtm-BPbi5EMiRoGuUWh41r1soCU,2814
+pyasn1_modules/rfc6960.py,sha256=BhEDCLLrae4RaCpMuKJc0kw1bGs56V0_F-NxiO9ctuw,7913
+pyasn1_modules/rfc7030.py,sha256=t-s2BDyX3Zk2sy_jMQl-P2I2NXFOn7huu0wFcM-2sqs,1441
+pyasn1_modules/rfc7191.py,sha256=uMsBzJ9167wxsiPYDQUnZQFVFNfgUxnCwRNeKnXxNGM,7062
+pyasn1_modules/rfc7229.py,sha256=GSiUz4QkYODfnIvLRXKiabyno9Gmd6CX0zWR7HoIpCk,743
+pyasn1_modules/rfc7292.py,sha256=wORjDGD_aqHoujB2wu6nNrEjYTw3VO_xDp-Qx0VWLbc,8478
+pyasn1_modules/rfc7296.py,sha256=eAZpZ2dgUhxbJrLLGtDff4UspauG7Tr5dj8WELYHnUM,885
+pyasn1_modules/rfc7508.py,sha256=ZmJFbQO934Fs8wxcpO0gg5fU0d8yEFlkkFD3KMUQbAE,2182
+pyasn1_modules/rfc7585.py,sha256=T0-sdzPJoop1jbB2RJ-wzUnf6t6CeD2eMMXpcz55JEg,1076
+pyasn1_modules/rfc7633.py,sha256=8P_fBWkoGk3rsk7SEAm6QZcPjoRGTRGQuasWMLOrLKY,841
+pyasn1_modules/rfc7773.py,sha256=6UGPWyVYuicKe6snZCnD1wuAu1MOVgzPoSALL2uvTrI,1315
+pyasn1_modules/rfc7894-1.py,sha256=gTmuu4C3BxGdhbZDuWPix84Cm2z0HqaounDjm2bBpXo,2792
+pyasn1_modules/rfc7894.py,sha256=HLaSBoOUB-_cSE5935TXAnuFBVpZBv6jBnLOPp_-LNk,2769
+pyasn1_modules/rfc7906.py,sha256=mDf1pWwVNlCcEQfswUhtQDStAnwS-5xbZtjMlfnWLdI,18921
+pyasn1_modules/rfc7914.py,sha256=JxWGnXV-V13xzOn7c7-_3vxDNpkPtdZIYU4KF2kFXR4,1493
+pyasn1_modules/rfc8017.py,sha256=pwPRSchvMtXuatcCLULHuvSL8kAPEqkC4aIJjd5vEAo,4178
+pyasn1_modules/rfc8018.py,sha256=8_49xA3vEOdlGUhasw2xTUv4TpHBvjRuoonMT_k1TTk,6166
+pyasn1_modules/rfc8103.py,sha256=pNYAFfKCNrg9ZmRKsNNwr2ooptEABF3gMaPbqCroRnQ,1017
+pyasn1_modules/rfc8209.py,sha256=9EQ077rjD9uoTZWIOGmeOaHLDDq0IRXh3Rt0eYB-Ysc,393
+pyasn1_modules/rfc8226.py,sha256=mudlVgrsJ6XeHnFmxBNW_NgcYcFsHUvK04_MTr3UkRM,4291
+pyasn1_modules/rfc8358.py,sha256=aiHaXQAaaP-q5c90x_uZHSpQRTB-yekwhe6V9-EtrFg,1136
+pyasn1_modules/rfc8360.py,sha256=T4sY6o2VLVPnZ9s4yJ8PzfVA8Y60ne-1KcVNtw5yt-s,1075
+pyasn1_modules/rfc8398.py,sha256=i3lwgf__9oJzOaaHJKWmDAx3d_deKNCCuvIDWqQWiJ4,1192
+pyasn1_modules/rfc8410.py,sha256=nteKyTKcIwVlgh1qUl-8kE63kKG-KgWtLrfF92TWyyQ,971
+pyasn1_modules/rfc8418.py,sha256=eTCPTOm6t-RyHd6PlowLogDzUO72lRddESYLiSiOpC0,1109
+pyasn1_modules/rfc8419.py,sha256=qcvBlXxqvsCvG_F6AKKjqBderqbWwBy8zjZOjAPdYU4,1704
+pyasn1_modules/rfc8479.py,sha256=rDKzrp-MmEF0t3E7lqKXhgwcggvx8NoWVbtJHGLxDYM,1142
+pyasn1_modules/rfc8494.py,sha256=GMht1RdAbjHLtSqHdJ2cLO8HXRz6SLIPE254T4oy0S4,2363
+pyasn1_modules/rfc8520.py,sha256=_o00lv2MYciOqo0UKjlZBQNY_MzzgQt1SV9VXCI0T9A,1496
+pyasn1_modules/rfc8619.py,sha256=qSYiBefLSFukLg6VIgR6dnhX-uBwJMItxqHjNXnBgM0,1136
+pyasn1_modules/rfc8649.py,sha256=oHCQK7g4vKs1B0IO9GgiidTyPOk4pz5bYkXSRmBOAHo,982
+pyasn1_modules-0.2.8.dist-info/LICENSE.txt,sha256=IsXMaSKrXWn7oy2MXuTN0UmBUIy1OvwOvYVZOEf9laU,1334
+pyasn1_modules-0.2.8.dist-info/METADATA,sha256=PEBnqiw2gqgX8JBjpbgffFX8MaJHjfU3fOzJvrjjaY8,1852
+pyasn1_modules-0.2.8.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110
+pyasn1_modules-0.2.8.dist-info/top_level.txt,sha256=e_AojfE1DNY4M8P9LAS7qh8Fx3eOmovobqkr7NEjlg4,15
+pyasn1_modules-0.2.8.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
+pyasn1_modules-0.2.8.dist-info/RECORD,,
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/WHEEL b/third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/WHEEL
new file mode 100644
index 0000000000..8b701e93c2
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.6)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/top_level.txt b/third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/top_level.txt
new file mode 100644
index 0000000000..9dad8496ee
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/top_level.txt
@@ -0,0 +1 @@
+pyasn1_modules
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/zip-safe b/third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/zip-safe
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules-0.2.8.dist-info/zip-safe
@@ -0,0 +1 @@
+
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/__init__.py b/third_party/python/pyasn1_modules/pyasn1_modules/__init__.py
new file mode 100644
index 0000000000..917ac12b1b
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/__init__.py
@@ -0,0 +1,2 @@
+# http://www.python.org/dev/peps/pep-0396/
+__version__ = '0.2.8'
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/pem.py b/third_party/python/pyasn1_modules/pyasn1_modules/pem.py
new file mode 100644
index 0000000000..a6090bdd21
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/pem.py
@@ -0,0 +1,65 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+import base64
+import sys
+
+stSpam, stHam, stDump = 0, 1, 2
+
+
+# The markers parameters is in form ('start1', 'stop1'), ('start2', 'stop2')...
+# Return is (marker-index, substrate)
+def readPemBlocksFromFile(fileObj, *markers):
+ startMarkers = dict(map(lambda x: (x[1], x[0]),
+ enumerate(map(lambda y: y[0], markers))))
+ stopMarkers = dict(map(lambda x: (x[1], x[0]),
+ enumerate(map(lambda y: y[1], markers))))
+ idx = -1
+ substrate = ''
+ certLines = []
+ state = stSpam
+ while True:
+ certLine = fileObj.readline()
+ if not certLine:
+ break
+ certLine = certLine.strip()
+ if state == stSpam:
+ if certLine in startMarkers:
+ certLines = []
+ idx = startMarkers[certLine]
+ state = stHam
+ continue
+ if state == stHam:
+ if certLine in stopMarkers and stopMarkers[certLine] == idx:
+ state = stDump
+ else:
+ certLines.append(certLine)
+ if state == stDump:
+ if sys.version_info[0] <= 2:
+ substrate = ''.join([base64.b64decode(x) for x in certLines])
+ else:
+ substrate = ''.encode().join([base64.b64decode(x.encode()) for x in certLines])
+ break
+ return idx, substrate
+
+
+# Backward compatibility routine
+def readPemFromFile(fileObj,
+ startMarker='-----BEGIN CERTIFICATE-----',
+ endMarker='-----END CERTIFICATE-----'):
+ idx, substrate = readPemBlocksFromFile(fileObj, (startMarker, endMarker))
+ return substrate
+
+
+def readBase64fromText(text):
+ if sys.version_info[0] <= 2:
+ return base64.b64decode(text)
+ else:
+ return base64.b64decode(text.encode())
+
+
+def readBase64FromFile(fileObj):
+ return readBase64fromText(fileObj.read())
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc1155.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc1155.py
new file mode 100644
index 0000000000..611e97eb74
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc1155.py
@@ -0,0 +1,96 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv1 message syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc1155.txt
+#
+# Sample captures from:
+# http://wiki.wireshark.org/SampleCaptures/
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+
+class ObjectName(univ.ObjectIdentifier):
+ pass
+
+
+class SimpleSyntax(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('number', univ.Integer()),
+ namedtype.NamedType('string', univ.OctetString()),
+ namedtype.NamedType('object', univ.ObjectIdentifier()),
+ namedtype.NamedType('empty', univ.Null())
+ )
+
+
+class IpAddress(univ.OctetString):
+ tagSet = univ.OctetString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(
+ 4, 4
+ )
+
+
+class NetworkAddress(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('internet', IpAddress())
+ )
+
+
+class Counter(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 1)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class Gauge(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 2)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class TimeTicks(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 3)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class Opaque(univ.OctetString):
+ tagSet = univ.OctetString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 4)
+ )
+
+
+class ApplicationSyntax(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('address', NetworkAddress()),
+ namedtype.NamedType('counter', Counter()),
+ namedtype.NamedType('gauge', Gauge()),
+ namedtype.NamedType('ticks', TimeTicks()),
+ namedtype.NamedType('arbitrary', Opaque())
+ )
+
+
+class ObjectSyntax(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('simple', SimpleSyntax()),
+ namedtype.NamedType('application-wide', ApplicationSyntax())
+ )
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc1157.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc1157.py
new file mode 100644
index 0000000000..b80d926a26
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc1157.py
@@ -0,0 +1,126 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv1 message syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc1157.txt
+#
+# Sample captures from:
+# http://wiki.wireshark.org/SampleCaptures/
+#
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc1155
+
+
+class Version(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('version-1', 0)
+ )
+ defaultValue = 0
+
+
+class Community(univ.OctetString):
+ pass
+
+
+class RequestID(univ.Integer):
+ pass
+
+
+class ErrorStatus(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('noError', 0),
+ ('tooBig', 1),
+ ('noSuchName', 2),
+ ('badValue', 3),
+ ('readOnly', 4),
+ ('genErr', 5)
+ )
+
+
+class ErrorIndex(univ.Integer):
+ pass
+
+
+class VarBind(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', rfc1155.ObjectName()),
+ namedtype.NamedType('value', rfc1155.ObjectSyntax())
+ )
+
+
+class VarBindList(univ.SequenceOf):
+ componentType = VarBind()
+
+
+class _RequestBase(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('request-id', RequestID()),
+ namedtype.NamedType('error-status', ErrorStatus()),
+ namedtype.NamedType('error-index', ErrorIndex()),
+ namedtype.NamedType('variable-bindings', VarBindList())
+ )
+
+
+class GetRequestPDU(_RequestBase):
+ tagSet = _RequestBase.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
+ )
+
+
+class GetNextRequestPDU(_RequestBase):
+ tagSet = _RequestBase.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
+ )
+
+
+class GetResponsePDU(_RequestBase):
+ tagSet = _RequestBase.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
+ )
+
+
+class SetRequestPDU(_RequestBase):
+ tagSet = _RequestBase.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
+ )
+
+
+class TrapPDU(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('enterprise', univ.ObjectIdentifier()),
+ namedtype.NamedType('agent-addr', rfc1155.NetworkAddress()),
+ namedtype.NamedType('generic-trap', univ.Integer().clone(
+ namedValues=namedval.NamedValues(('coldStart', 0), ('warmStart', 1), ('linkDown', 2), ('linkUp', 3),
+ ('authenticationFailure', 4), ('egpNeighborLoss', 5),
+ ('enterpriseSpecific', 6)))),
+ namedtype.NamedType('specific-trap', univ.Integer()),
+ namedtype.NamedType('time-stamp', rfc1155.TimeTicks()),
+ namedtype.NamedType('variable-bindings', VarBindList())
+ )
+
+
+class Pdus(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('get-request', GetRequestPDU()),
+ namedtype.NamedType('get-next-request', GetNextRequestPDU()),
+ namedtype.NamedType('get-response', GetResponsePDU()),
+ namedtype.NamedType('set-request', SetRequestPDU()),
+ namedtype.NamedType('trap', TrapPDU())
+ )
+
+
+class Message(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('community', Community()),
+ namedtype.NamedType('data', Pdus())
+ )
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc1901.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc1901.py
new file mode 100644
index 0000000000..04533da0da
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc1901.py
@@ -0,0 +1,22 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv2c message syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc1901.txt
+#
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+
+class Message(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('version-2c', 1)))),
+ namedtype.NamedType('community', univ.OctetString()),
+ namedtype.NamedType('data', univ.Any())
+ )
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc1902.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc1902.py
new file mode 100644
index 0000000000..d1a1648978
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc1902.py
@@ -0,0 +1,129 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv2c message syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc1902.txt
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+
+class Integer(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ -2147483648, 2147483647
+ )
+
+
+class Integer32(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ -2147483648, 2147483647
+ )
+
+
+class OctetString(univ.OctetString):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(
+ 0, 65535
+ )
+
+
+class IpAddress(univ.OctetString):
+ tagSet = univ.OctetString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x00)
+ )
+ subtypeSpec = univ.OctetString.subtypeSpec + constraint.ValueSizeConstraint(
+ 4, 4
+ )
+
+
+class Counter32(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x01)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class Gauge32(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x02)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class Unsigned32(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x02)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class TimeTicks(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x03)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 4294967295
+ )
+
+
+class Opaque(univ.OctetString):
+ tagSet = univ.OctetString.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x04)
+ )
+
+
+class Counter64(univ.Integer):
+ tagSet = univ.Integer.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 0x06)
+ )
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, 18446744073709551615
+ )
+
+
+class Bits(univ.OctetString):
+ pass
+
+
+class ObjectName(univ.ObjectIdentifier):
+ pass
+
+
+class SimpleSyntax(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('integer-value', Integer()),
+ namedtype.NamedType('string-value', OctetString()),
+ namedtype.NamedType('objectID-value', univ.ObjectIdentifier())
+ )
+
+
+class ApplicationSyntax(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ipAddress-value', IpAddress()),
+ namedtype.NamedType('counter-value', Counter32()),
+ namedtype.NamedType('timeticks-value', TimeTicks()),
+ namedtype.NamedType('arbitrary-value', Opaque()),
+ namedtype.NamedType('big-counter-value', Counter64()),
+ # This conflicts with Counter32
+ # namedtype.NamedType('unsigned-integer-value', Unsigned32()),
+ namedtype.NamedType('gauge32-value', Gauge32())
+ ) # BITS misplaced?
+
+
+class ObjectSyntax(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('simple', SimpleSyntax()),
+ namedtype.NamedType('application-wide', ApplicationSyntax())
+ )
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc1905.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc1905.py
new file mode 100644
index 0000000000..72c44ed436
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc1905.py
@@ -0,0 +1,135 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv2c PDU syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc1905.txt
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc1902
+
+max_bindings = rfc1902.Integer(2147483647)
+
+
+class _BindValue(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('value', rfc1902.ObjectSyntax()),
+ namedtype.NamedType('unSpecified', univ.Null()),
+ namedtype.NamedType('noSuchObject',
+ univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('noSuchInstance',
+ univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('endOfMibView',
+ univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class VarBind(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', rfc1902.ObjectName()),
+ namedtype.NamedType('', _BindValue())
+ )
+
+
+class VarBindList(univ.SequenceOf):
+ componentType = VarBind()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(
+ 0, max_bindings
+ )
+
+
+class PDU(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('request-id', rfc1902.Integer32()),
+ namedtype.NamedType('error-status', univ.Integer(
+ namedValues=namedval.NamedValues(('noError', 0), ('tooBig', 1), ('noSuchName', 2), ('badValue', 3),
+ ('readOnly', 4), ('genErr', 5), ('noAccess', 6), ('wrongType', 7),
+ ('wrongLength', 8), ('wrongEncoding', 9), ('wrongValue', 10),
+ ('noCreation', 11), ('inconsistentValue', 12), ('resourceUnavailable', 13),
+ ('commitFailed', 14), ('undoFailed', 15), ('authorizationError', 16),
+ ('notWritable', 17), ('inconsistentName', 18)))),
+ namedtype.NamedType('error-index',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))),
+ namedtype.NamedType('variable-bindings', VarBindList())
+ )
+
+
+class BulkPDU(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('request-id', rfc1902.Integer32()),
+ namedtype.NamedType('non-repeaters',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))),
+ namedtype.NamedType('max-repetitions',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, max_bindings))),
+ namedtype.NamedType('variable-bindings', VarBindList())
+ )
+
+
+class GetRequestPDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
+ )
+
+
+class GetNextRequestPDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
+ )
+
+
+class ResponsePDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
+ )
+
+
+class SetRequestPDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
+ )
+
+
+class GetBulkRequestPDU(BulkPDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5)
+ )
+
+
+class InformRequestPDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6)
+ )
+
+
+class SNMPv2TrapPDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7)
+ )
+
+
+class ReportPDU(PDU):
+ tagSet = PDU.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8)
+ )
+
+
+class PDUs(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('get-request', GetRequestPDU()),
+ namedtype.NamedType('get-next-request', GetNextRequestPDU()),
+ namedtype.NamedType('get-bulk-request', GetBulkRequestPDU()),
+ namedtype.NamedType('response', ResponsePDU()),
+ namedtype.NamedType('set-request', SetRequestPDU()),
+ namedtype.NamedType('inform-request', InformRequestPDU()),
+ namedtype.NamedType('snmpV2-trap', SNMPv2TrapPDU()),
+ namedtype.NamedType('report', ReportPDU())
+ )
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc2251.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2251.py
new file mode 100644
index 0000000000..84c3d87c23
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2251.py
@@ -0,0 +1,563 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# LDAP message syntax
+#
+# ASN.1 source from:
+# http://www.trl.ibm.com/projects/xml/xss4j/data/asn1/grammars/ldap.asn
+#
+# Sample captures from:
+# http://wiki.wireshark.org/SampleCaptures/
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+maxInt = univ.Integer(2147483647)
+
+
+class LDAPString(univ.OctetString):
+ pass
+
+
+class LDAPOID(univ.OctetString):
+ pass
+
+
+class LDAPDN(LDAPString):
+ pass
+
+
+class RelativeLDAPDN(LDAPString):
+ pass
+
+
+class AttributeType(LDAPString):
+ pass
+
+
+class AttributeDescription(LDAPString):
+ pass
+
+
+class AttributeDescriptionList(univ.SequenceOf):
+ componentType = AttributeDescription()
+
+
+class AttributeValue(univ.OctetString):
+ pass
+
+
+class AssertionValue(univ.OctetString):
+ pass
+
+
+class AttributeValueAssertion(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attributeDesc', AttributeDescription()),
+ namedtype.NamedType('assertionValue', AssertionValue())
+ )
+
+
+class Attribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeDescription()),
+ namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
+ )
+
+
+class MatchingRuleId(LDAPString):
+ pass
+
+
+class Control(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('controlType', LDAPOID()),
+ namedtype.DefaultedNamedType('criticality', univ.Boolean('False')),
+ namedtype.OptionalNamedType('controlValue', univ.OctetString())
+ )
+
+
+class Controls(univ.SequenceOf):
+ componentType = Control()
+
+
+class LDAPURL(LDAPString):
+ pass
+
+
+class Referral(univ.SequenceOf):
+ componentType = LDAPURL()
+
+
+class SaslCredentials(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('mechanism', LDAPString()),
+ namedtype.OptionalNamedType('credentials', univ.OctetString())
+ )
+
+
+class AuthenticationChoice(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('simple', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('reserved-1', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('reserved-2', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('sasl',
+ SaslCredentials().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+ )
+
+
+class BindRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 0)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, 127))),
+ namedtype.NamedType('name', LDAPDN()),
+ namedtype.NamedType('authentication', AuthenticationChoice())
+ )
+
+
+class PartialAttributeList(univ.SequenceOf):
+ componentType = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeDescription()),
+ namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
+ )
+ )
+
+
+class SearchResultEntry(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 4)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('objectName', LDAPDN()),
+ namedtype.NamedType('attributes', PartialAttributeList())
+ )
+
+
+class MatchingRuleAssertion(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('matchingRule', MatchingRuleId().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('type', AttributeDescription().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('matchValue',
+ AssertionValue().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.DefaultedNamedType('dnAttributes', univ.Boolean('False').subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+ )
+
+
+class SubstringFilter(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeDescription()),
+ namedtype.NamedType('substrings',
+ univ.SequenceOf(
+ componentType=univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'initial', LDAPString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
+ ),
+ namedtype.NamedType(
+ 'any', LDAPString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))
+ ),
+ namedtype.NamedType(
+ 'final', LDAPString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))
+ )
+ )
+ )
+ )
+ )
+ )
+
+
+# Ugly hack to handle recursive Filter reference (up to 3-levels deep).
+
+class Filter3(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('equalityMatch', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('substrings', SubstringFilter().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('greaterOrEqual', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.NamedType('lessOrEqual', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))),
+ namedtype.NamedType('present', AttributeDescription().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('approxMatch', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8))),
+ namedtype.NamedType('extensibleMatch', MatchingRuleAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)))
+ )
+
+
+class Filter2(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('and', univ.SetOf(componentType=Filter3()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('or', univ.SetOf(componentType=Filter3()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('not',
+ Filter3().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('equalityMatch', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('substrings', SubstringFilter().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('greaterOrEqual', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.NamedType('lessOrEqual', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))),
+ namedtype.NamedType('present', AttributeDescription().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('approxMatch', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8))),
+ namedtype.NamedType('extensibleMatch', MatchingRuleAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)))
+ )
+
+
+class Filter(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('and', univ.SetOf(componentType=Filter2()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('or', univ.SetOf(componentType=Filter2()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('not',
+ Filter2().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('equalityMatch', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('substrings', SubstringFilter().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('greaterOrEqual', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.NamedType('lessOrEqual', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))),
+ namedtype.NamedType('present', AttributeDescription().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('approxMatch', AttributeValueAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8))),
+ namedtype.NamedType('extensibleMatch', MatchingRuleAssertion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)))
+ )
+
+
+# End of Filter hack
+
+class SearchRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 3)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('baseObject', LDAPDN()),
+ namedtype.NamedType('scope', univ.Enumerated(
+ namedValues=namedval.NamedValues(('baseObject', 0), ('singleLevel', 1), ('wholeSubtree', 2)))),
+ namedtype.NamedType('derefAliases', univ.Enumerated(
+ namedValues=namedval.NamedValues(('neverDerefAliases', 0), ('derefInSearching', 1),
+ ('derefFindingBaseObj', 2), ('derefAlways', 3)))),
+ namedtype.NamedType('sizeLimit',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, maxInt))),
+ namedtype.NamedType('timeLimit',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, maxInt))),
+ namedtype.NamedType('typesOnly', univ.Boolean()),
+ namedtype.NamedType('filter', Filter()),
+ namedtype.NamedType('attributes', AttributeDescriptionList())
+ )
+
+
+class UnbindRequest(univ.Null):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatSimple, 2)
+ )
+
+
+class BindResponse(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('resultCode', univ.Enumerated(
+ namedValues=namedval.NamedValues(('success', 0), ('operationsError', 1), ('protocolError', 2),
+ ('timeLimitExceeded', 3), ('sizeLimitExceeded', 4), ('compareFalse', 5),
+ ('compareTrue', 6), ('authMethodNotSupported', 7),
+ ('strongAuthRequired', 8), ('reserved-9', 9), ('referral', 10),
+ ('adminLimitExceeded', 11), ('unavailableCriticalExtension', 12),
+ ('confidentialityRequired', 13), ('saslBindInProgress', 14),
+ ('noSuchAttribute', 16), ('undefinedAttributeType', 17),
+ ('inappropriateMatching', 18), ('constraintViolation', 19),
+ ('attributeOrValueExists', 20), ('invalidAttributeSyntax', 21),
+ ('noSuchObject', 32), ('aliasProblem', 33), ('invalidDNSyntax', 34),
+ ('reserved-35', 35), ('aliasDereferencingProblem', 36),
+ ('inappropriateAuthentication', 48), ('invalidCredentials', 49),
+ ('insufficientAccessRights', 50), ('busy', 51), ('unavailable', 52),
+ ('unwillingToPerform', 53), ('loopDetect', 54), ('namingViolation', 64),
+ ('objectClassViolation', 65), ('notAllowedOnNonLeaf', 66),
+ ('notAllowedOnRDN', 67), ('entryAlreadyExists', 68),
+ ('objectClassModsProhibited', 69), ('reserved-70', 70),
+ ('affectsMultipleDSAs', 71), ('other', 80), ('reserved-81', 81),
+ ('reserved-82', 82), ('reserved-83', 83), ('reserved-84', 84),
+ ('reserved-85', 85), ('reserved-86', 86), ('reserved-87', 87),
+ ('reserved-88', 88), ('reserved-89', 89), ('reserved-90', 90)))),
+ namedtype.NamedType('matchedDN', LDAPDN()),
+ namedtype.NamedType('errorMessage', LDAPString()),
+ namedtype.OptionalNamedType('referral', Referral().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.OptionalNamedType('serverSaslCreds', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7)))
+ )
+
+
+class LDAPResult(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('resultCode', univ.Enumerated(
+ namedValues=namedval.NamedValues(('success', 0), ('operationsError', 1), ('protocolError', 2),
+ ('timeLimitExceeded', 3), ('sizeLimitExceeded', 4), ('compareFalse', 5),
+ ('compareTrue', 6), ('authMethodNotSupported', 7),
+ ('strongAuthRequired', 8), ('reserved-9', 9), ('referral', 10),
+ ('adminLimitExceeded', 11), ('unavailableCriticalExtension', 12),
+ ('confidentialityRequired', 13), ('saslBindInProgress', 14),
+ ('noSuchAttribute', 16), ('undefinedAttributeType', 17),
+ ('inappropriateMatching', 18), ('constraintViolation', 19),
+ ('attributeOrValueExists', 20), ('invalidAttributeSyntax', 21),
+ ('noSuchObject', 32), ('aliasProblem', 33), ('invalidDNSyntax', 34),
+ ('reserved-35', 35), ('aliasDereferencingProblem', 36),
+ ('inappropriateAuthentication', 48), ('invalidCredentials', 49),
+ ('insufficientAccessRights', 50), ('busy', 51), ('unavailable', 52),
+ ('unwillingToPerform', 53), ('loopDetect', 54), ('namingViolation', 64),
+ ('objectClassViolation', 65), ('notAllowedOnNonLeaf', 66),
+ ('notAllowedOnRDN', 67), ('entryAlreadyExists', 68),
+ ('objectClassModsProhibited', 69), ('reserved-70', 70),
+ ('affectsMultipleDSAs', 71), ('other', 80), ('reserved-81', 81),
+ ('reserved-82', 82), ('reserved-83', 83), ('reserved-84', 84),
+ ('reserved-85', 85), ('reserved-86', 86), ('reserved-87', 87),
+ ('reserved-88', 88), ('reserved-89', 89), ('reserved-90', 90)))),
+ namedtype.NamedType('matchedDN', LDAPDN()),
+ namedtype.NamedType('errorMessage', LDAPString()),
+ namedtype.OptionalNamedType('referral', Referral().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+ )
+
+
+class SearchResultReference(univ.SequenceOf):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 19)
+ )
+ componentType = LDAPURL()
+
+
+class SearchResultDone(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 5)
+ )
+
+
+class AttributeTypeAndValues(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeDescription()),
+ namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
+ )
+
+
+class ModifyRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 6)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('object', LDAPDN()),
+ namedtype.NamedType('modification',
+ univ.SequenceOf(
+ componentType=univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'operation', univ.Enumerated(namedValues=namedval.NamedValues(('add', 0), ('delete', 1), ('replace', 2)))
+ ),
+ namedtype.NamedType('modification', AttributeTypeAndValues())))
+ )
+ )
+ )
+
+
+class ModifyResponse(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 7)
+ )
+
+
+class AttributeList(univ.SequenceOf):
+ componentType = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeDescription()),
+ namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
+ )
+ )
+
+
+class AddRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 8)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('entry', LDAPDN()),
+ namedtype.NamedType('attributes', AttributeList())
+ )
+
+
+class AddResponse(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 9)
+ )
+
+
+class DelRequest(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 10)
+ )
+
+
+class DelResponse(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 11)
+ )
+
+
+class ModifyDNRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 12)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('entry', LDAPDN()),
+ namedtype.NamedType('newrdn', RelativeLDAPDN()),
+ namedtype.NamedType('deleteoldrdn', univ.Boolean()),
+ namedtype.OptionalNamedType('newSuperior',
+ LDAPDN().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+
+ )
+
+
+class ModifyDNResponse(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 13)
+ )
+
+
+class CompareRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 14)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('entry', LDAPDN()),
+ namedtype.NamedType('ava', AttributeValueAssertion())
+ )
+
+
+class CompareResponse(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 15)
+ )
+
+
+class AbandonRequest(LDAPResult):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 16)
+ )
+
+
+class ExtendedRequest(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 23)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('requestName',
+ LDAPOID().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('requestValue', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class ExtendedResponse(univ.Sequence):
+ tagSet = univ.Sequence.tagSet.tagImplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 24)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('resultCode', univ.Enumerated(
+ namedValues=namedval.NamedValues(('success', 0), ('operationsError', 1), ('protocolError', 2),
+ ('timeLimitExceeded', 3), ('sizeLimitExceeded', 4), ('compareFalse', 5),
+ ('compareTrue', 6), ('authMethodNotSupported', 7),
+ ('strongAuthRequired', 8), ('reserved-9', 9), ('referral', 10),
+ ('adminLimitExceeded', 11), ('unavailableCriticalExtension', 12),
+ ('confidentialityRequired', 13), ('saslBindInProgress', 14),
+ ('noSuchAttribute', 16), ('undefinedAttributeType', 17),
+ ('inappropriateMatching', 18), ('constraintViolation', 19),
+ ('attributeOrValueExists', 20), ('invalidAttributeSyntax', 21),
+ ('noSuchObject', 32), ('aliasProblem', 33), ('invalidDNSyntax', 34),
+ ('reserved-35', 35), ('aliasDereferencingProblem', 36),
+ ('inappropriateAuthentication', 48), ('invalidCredentials', 49),
+ ('insufficientAccessRights', 50), ('busy', 51), ('unavailable', 52),
+ ('unwillingToPerform', 53), ('loopDetect', 54), ('namingViolation', 64),
+ ('objectClassViolation', 65), ('notAllowedOnNonLeaf', 66),
+ ('notAllowedOnRDN', 67), ('entryAlreadyExists', 68),
+ ('objectClassModsProhibited', 69), ('reserved-70', 70),
+ ('affectsMultipleDSAs', 71), ('other', 80), ('reserved-81', 81),
+ ('reserved-82', 82), ('reserved-83', 83), ('reserved-84', 84),
+ ('reserved-85', 85), ('reserved-86', 86), ('reserved-87', 87),
+ ('reserved-88', 88), ('reserved-89', 89), ('reserved-90', 90)))),
+ namedtype.NamedType('matchedDN', LDAPDN()),
+ namedtype.NamedType('errorMessage', LDAPString()),
+ namedtype.OptionalNamedType('referral', Referral().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+
+ namedtype.OptionalNamedType('responseName', LDAPOID().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 10))),
+ namedtype.OptionalNamedType('response', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 11)))
+ )
+
+
+class MessageID(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(
+ 0, maxInt
+ )
+
+
+class LDAPMessage(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('messageID', MessageID()),
+ namedtype.NamedType(
+ 'protocolOp', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('bindRequest', BindRequest()),
+ namedtype.NamedType('bindResponse', BindResponse()),
+ namedtype.NamedType('unbindRequest', UnbindRequest()),
+ namedtype.NamedType('searchRequest', SearchRequest()),
+ namedtype.NamedType('searchResEntry', SearchResultEntry()),
+ namedtype.NamedType('searchResDone', SearchResultDone()),
+ namedtype.NamedType('searchResRef', SearchResultReference()),
+ namedtype.NamedType('modifyRequest', ModifyRequest()),
+ namedtype.NamedType('modifyResponse', ModifyResponse()),
+ namedtype.NamedType('addRequest', AddRequest()),
+ namedtype.NamedType('addResponse', AddResponse()),
+ namedtype.NamedType('delRequest', DelRequest()),
+ namedtype.NamedType('delResponse', DelResponse()),
+ namedtype.NamedType('modDNRequest', ModifyDNRequest()),
+ namedtype.NamedType('modDNResponse', ModifyDNResponse()),
+ namedtype.NamedType('compareRequest', CompareRequest()),
+ namedtype.NamedType('compareResponse', CompareResponse()),
+ namedtype.NamedType('abandonRequest', AbandonRequest()),
+ namedtype.NamedType('extendedReq', ExtendedRequest()),
+ namedtype.NamedType('extendedResp', ExtendedResponse())
+ )
+ )
+ ),
+ namedtype.OptionalNamedType('controls', Controls().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc2314.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2314.py
new file mode 100644
index 0000000000..a453217680
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2314.py
@@ -0,0 +1,48 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS#10 syntax
+#
+# ASN.1 source from:
+# http://tools.ietf.org/html/rfc2314
+#
+# Sample captures could be obtained with "openssl req" command
+#
+from pyasn1_modules.rfc2459 import *
+
+
+class Attributes(univ.SetOf):
+ componentType = Attribute()
+
+
+class Version(univ.Integer):
+ pass
+
+
+class CertificationRequestInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('subject', Name()),
+ namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
+ namedtype.NamedType('attributes',
+ Attributes().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
+
+
+class Signature(univ.BitString):
+ pass
+
+
+class SignatureAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class CertificationRequest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificationRequestInfo', CertificationRequestInfo()),
+ namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signature', Signature())
+ )
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc2315.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2315.py
new file mode 100644
index 0000000000..a98c9a9e1f
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2315.py
@@ -0,0 +1,294 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS#7 message syntax
+#
+# ASN.1 source from:
+# https://opensource.apple.com/source/Security/Security-55179.1/libsecurity_asn1/asn1/pkcs7.asn.auto.html
+#
+# Sample captures from:
+# openssl crl2pkcs7 -nocrl -certfile cert1.cer -out outfile.p7b
+#
+from pyasn1_modules.rfc2459 import *
+
+
+class Attribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('values', univ.SetOf(componentType=AttributeValue()))
+ )
+
+
+class AttributeValueAssertion(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attributeType', AttributeType()),
+ namedtype.NamedType('attributeValue', AttributeValue(),
+ openType=opentype.OpenType('type', certificateAttributesMap))
+ )
+
+
+pkcs_7 = univ.ObjectIdentifier('1.2.840.113549.1.7')
+data = univ.ObjectIdentifier('1.2.840.113549.1.7.1')
+signedData = univ.ObjectIdentifier('1.2.840.113549.1.7.2')
+envelopedData = univ.ObjectIdentifier('1.2.840.113549.1.7.3')
+signedAndEnvelopedData = univ.ObjectIdentifier('1.2.840.113549.1.7.4')
+digestedData = univ.ObjectIdentifier('1.2.840.113549.1.7.5')
+encryptedData = univ.ObjectIdentifier('1.2.840.113549.1.7.6')
+
+
+class ContentType(univ.ObjectIdentifier):
+ pass
+
+
+class ContentEncryptionAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class EncryptedContent(univ.OctetString):
+ pass
+
+
+contentTypeMap = {}
+
+
+class EncryptedContentInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('contentEncryptionAlgorithm', ContentEncryptionAlgorithmIdentifier()),
+ namedtype.OptionalNamedType(
+ 'encryptedContent', EncryptedContent().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
+ ),
+ openType=opentype.OpenType('contentType', contentTypeMap)
+ )
+ )
+
+
+class Version(univ.Integer): # overrides x509.Version
+ pass
+
+
+class EncryptedData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo())
+ )
+
+
+class DigestAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class DigestAlgorithmIdentifiers(univ.SetOf):
+ componentType = DigestAlgorithmIdentifier()
+
+
+class Digest(univ.OctetString):
+ pass
+
+
+class ContentInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.OptionalNamedType(
+ 'content',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)),
+ openType=opentype.OpenType('contentType', contentTypeMap)
+ )
+ )
+
+
+class DigestedData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.NamedType('contentInfo', ContentInfo()),
+ namedtype.NamedType('digest', Digest())
+ )
+
+
+class IssuerAndSerialNumber(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber())
+ )
+
+
+class KeyEncryptionAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class EncryptedKey(univ.OctetString):
+ pass
+
+
+class RecipientInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+ )
+
+
+class RecipientInfos(univ.SetOf):
+ componentType = RecipientInfo()
+
+
+class Attributes(univ.SetOf):
+ componentType = Attribute()
+
+
+class ExtendedCertificateInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('certificate', Certificate()),
+ namedtype.NamedType('attributes', Attributes())
+ )
+
+
+class SignatureAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class Signature(univ.BitString):
+ pass
+
+
+class ExtendedCertificate(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extendedCertificateInfo', ExtendedCertificateInfo()),
+ namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signature', Signature())
+ )
+
+
+class ExtendedCertificateOrCertificate(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', Certificate()),
+ namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
+
+
+class ExtendedCertificatesAndCertificates(univ.SetOf):
+ componentType = ExtendedCertificateOrCertificate()
+
+
+class SerialNumber(univ.Integer):
+ pass
+
+
+class CRLEntry(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('userCertificate', SerialNumber()),
+ namedtype.NamedType('revocationDate', useful.UTCTime())
+ )
+
+
+class TBSCertificateRevocationList(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('lastUpdate', useful.UTCTime()),
+ namedtype.NamedType('nextUpdate', useful.UTCTime()),
+ namedtype.OptionalNamedType('revokedCertificates', univ.SequenceOf(componentType=CRLEntry()))
+ )
+
+
+class CertificateRevocationList(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertificateRevocationList', TBSCertificateRevocationList()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+ )
+
+
+class CertificateRevocationLists(univ.SetOf):
+ componentType = CertificateRevocationList()
+
+
+class DigestEncryptionAlgorithmIdentifier(AlgorithmIdentifier):
+ pass
+
+
+class EncryptedDigest(univ.OctetString):
+ pass
+
+
+class SignerInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.OptionalNamedType('authenticatedAttributes', Attributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('digestEncryptionAlgorithm', DigestEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedDigest', EncryptedDigest()),
+ namedtype.OptionalNamedType('unauthenticatedAttributes', Attributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class SignerInfos(univ.SetOf):
+ componentType = SignerInfo()
+
+
+class SignedAndEnvelopedData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('recipientInfos', RecipientInfos()),
+ namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
+ namedtype.OptionalNamedType('certificates', ExtendedCertificatesAndCertificates().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('crls', CertificateRevocationLists().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('signerInfos', SignerInfos())
+ )
+
+
+class EnvelopedData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('recipientInfos', RecipientInfos()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo())
+ )
+
+
+class DigestInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.NamedType('digest', Digest())
+ )
+
+
+class SignedData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.OptionalNamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
+ namedtype.NamedType('contentInfo', ContentInfo()),
+ namedtype.OptionalNamedType('certificates', ExtendedCertificatesAndCertificates().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('crls', CertificateRevocationLists().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('signerInfos', SignerInfos())
+ )
+
+
+class Data(univ.OctetString):
+ pass
+
+_contentTypeMapUpdate = {
+ data: Data(),
+ signedData: SignedData(),
+ envelopedData: EnvelopedData(),
+ signedAndEnvelopedData: SignedAndEnvelopedData(),
+ digestedData: DigestedData(),
+ encryptedData: EncryptedData()
+}
+
+contentTypeMap.update(_contentTypeMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc2437.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2437.py
new file mode 100644
index 0000000000..1139eb4bcc
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2437.py
@@ -0,0 +1,69 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS#1 syntax
+#
+# ASN.1 source from:
+# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2.asn
+#
+# Sample captures could be obtained with "openssl genrsa" command
+#
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules.rfc2459 import AlgorithmIdentifier
+
+pkcs_1 = univ.ObjectIdentifier('1.2.840.113549.1.1')
+rsaEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.1')
+md2WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.2')
+md4WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.3')
+md5WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.4')
+sha1WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.5')
+rsaOAEPEncryptionSET = univ.ObjectIdentifier('1.2.840.113549.1.1.6')
+id_RSAES_OAEP = univ.ObjectIdentifier('1.2.840.113549.1.1.7')
+id_mgf1 = univ.ObjectIdentifier('1.2.840.113549.1.1.8')
+id_pSpecified = univ.ObjectIdentifier('1.2.840.113549.1.1.9')
+id_sha1 = univ.ObjectIdentifier('1.3.14.3.2.26')
+
+MAX = float('inf')
+
+
+class Version(univ.Integer):
+ pass
+
+
+class RSAPrivateKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('modulus', univ.Integer()),
+ namedtype.NamedType('publicExponent', univ.Integer()),
+ namedtype.NamedType('privateExponent', univ.Integer()),
+ namedtype.NamedType('prime1', univ.Integer()),
+ namedtype.NamedType('prime2', univ.Integer()),
+ namedtype.NamedType('exponent1', univ.Integer()),
+ namedtype.NamedType('exponent2', univ.Integer()),
+ namedtype.NamedType('coefficient', univ.Integer())
+ )
+
+
+class RSAPublicKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('modulus', univ.Integer()),
+ namedtype.NamedType('publicExponent', univ.Integer())
+ )
+
+
+# XXX defaults not set
+class RSAES_OAEP_params(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashFunc', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('maskGenFunc', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('pSourceFunc', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+ )
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc2459.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2459.py
new file mode 100644
index 0000000000..9f3578797a
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2459.py
@@ -0,0 +1,1339 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Updated by Russ Housley to resolve the TODO regarding the Certificate
+# Policies Certificate Extension.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# X.509 message syntax
+#
+# ASN.1 source from:
+# http://www.trl.ibm.com/projects/xml/xss4j/data/asn1/grammars/x509.asn
+# http://www.ietf.org/rfc/rfc2459.txt
+#
+# Sample captures from:
+# http://wiki.wireshark.org/SampleCaptures/
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+MAX = float('inf')
+
+#
+# PKIX1Explicit88
+#
+
+# Upper Bounds
+ub_name = univ.Integer(32768)
+ub_common_name = univ.Integer(64)
+ub_locality_name = univ.Integer(128)
+ub_state_name = univ.Integer(128)
+ub_organization_name = univ.Integer(64)
+ub_organizational_unit_name = univ.Integer(64)
+ub_title = univ.Integer(64)
+ub_match = univ.Integer(128)
+ub_emailaddress_length = univ.Integer(128)
+ub_common_name_length = univ.Integer(64)
+ub_country_name_alpha_length = univ.Integer(2)
+ub_country_name_numeric_length = univ.Integer(3)
+ub_domain_defined_attributes = univ.Integer(4)
+ub_domain_defined_attribute_type_length = univ.Integer(8)
+ub_domain_defined_attribute_value_length = univ.Integer(128)
+ub_domain_name_length = univ.Integer(16)
+ub_extension_attributes = univ.Integer(256)
+ub_e163_4_number_length = univ.Integer(15)
+ub_e163_4_sub_address_length = univ.Integer(40)
+ub_generation_qualifier_length = univ.Integer(3)
+ub_given_name_length = univ.Integer(16)
+ub_initials_length = univ.Integer(5)
+ub_integer_options = univ.Integer(256)
+ub_numeric_user_id_length = univ.Integer(32)
+ub_organization_name_length = univ.Integer(64)
+ub_organizational_unit_name_length = univ.Integer(32)
+ub_organizational_units = univ.Integer(4)
+ub_pds_name_length = univ.Integer(16)
+ub_pds_parameter_length = univ.Integer(30)
+ub_pds_physical_address_lines = univ.Integer(6)
+ub_postal_code_length = univ.Integer(16)
+ub_surname_length = univ.Integer(40)
+ub_terminal_id_length = univ.Integer(24)
+ub_unformatted_address_length = univ.Integer(180)
+ub_x121_address_length = univ.Integer(16)
+
+
+class UniversalString(char.UniversalString):
+ pass
+
+
+class BMPString(char.BMPString):
+ pass
+
+
+class UTF8String(char.UTF8String):
+ pass
+
+
+id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
+id_pe = univ.ObjectIdentifier('1.3.6.1.5.5.7.1')
+id_qt = univ.ObjectIdentifier('1.3.6.1.5.5.7.2')
+id_kp = univ.ObjectIdentifier('1.3.6.1.5.5.7.3')
+id_ad = univ.ObjectIdentifier('1.3.6.1.5.5.7.48')
+
+id_qt_cps = univ.ObjectIdentifier('1.3.6.1.5.5.7.2.1')
+id_qt_unotice = univ.ObjectIdentifier('1.3.6.1.5.5.7.2.2')
+
+id_ad_ocsp = univ.ObjectIdentifier('1.3.6.1.5.5.7.48.1')
+id_ad_caIssuers = univ.ObjectIdentifier('1.3.6.1.5.5.7.48.2')
+
+
+
+
+id_at = univ.ObjectIdentifier('2.5.4')
+id_at_name = univ.ObjectIdentifier('2.5.4.41')
+# preserve misspelled variable for compatibility
+id_at_sutname = id_at_surname = univ.ObjectIdentifier('2.5.4.4')
+id_at_givenName = univ.ObjectIdentifier('2.5.4.42')
+id_at_initials = univ.ObjectIdentifier('2.5.4.43')
+id_at_generationQualifier = univ.ObjectIdentifier('2.5.4.44')
+
+
+class X520name(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name)))
+ )
+
+
+id_at_commonName = univ.ObjectIdentifier('2.5.4.3')
+
+
+class X520CommonName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name)))
+ )
+
+
+id_at_localityName = univ.ObjectIdentifier('2.5.4.7')
+
+
+class X520LocalityName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name)))
+ )
+
+
+id_at_stateOrProvinceName = univ.ObjectIdentifier('2.5.4.8')
+
+
+class X520StateOrProvinceName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name)))
+ )
+
+
+id_at_organizationName = univ.ObjectIdentifier('2.5.4.10')
+
+
+class X520OrganizationName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name)))
+ )
+
+
+id_at_organizationalUnitName = univ.ObjectIdentifier('2.5.4.11')
+
+
+class X520OrganizationalUnitName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name)))
+ )
+
+
+id_at_title = univ.ObjectIdentifier('2.5.4.12')
+
+
+class X520Title(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title)))
+ )
+
+
+id_at_dnQualifier = univ.ObjectIdentifier('2.5.4.46')
+
+
+class X520dnQualifier(char.PrintableString):
+ pass
+
+
+id_at_countryName = univ.ObjectIdentifier('2.5.4.6')
+
+
+class X520countryName(char.PrintableString):
+ subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(2, 2)
+
+
+pkcs_9 = univ.ObjectIdentifier('1.2.840.113549.1.9')
+
+emailAddress = univ.ObjectIdentifier('1.2.840.113549.1.9.1')
+
+
+class Pkcs9email(char.IA5String):
+ subtypeSpec = char.IA5String.subtypeSpec + constraint.ValueSizeConstraint(1, ub_emailaddress_length)
+
+
+# ----
+
+class DSAPrivateKey(univ.Sequence):
+ """PKIX compliant DSA private key structure"""
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('v1', 0)))),
+ namedtype.NamedType('p', univ.Integer()),
+ namedtype.NamedType('q', univ.Integer()),
+ namedtype.NamedType('g', univ.Integer()),
+ namedtype.NamedType('public', univ.Integer()),
+ namedtype.NamedType('private', univ.Integer())
+ )
+
+
+# ----
+
+
+class DirectoryString(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('ia5String', char.IA5String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ # hm, this should not be here!? XXX
+ )
+
+
+# certificate and CRL specific structures begin here
+
+class AlgorithmIdentifier(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('parameters', univ.Any())
+ )
+
+
+
+# Algorithm OIDs and parameter structures
+
+pkcs_1 = univ.ObjectIdentifier('1.2.840.113549.1.1')
+rsaEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.1')
+md2WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.2')
+md5WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.4')
+sha1WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.5')
+id_dsa_with_sha1 = univ.ObjectIdentifier('1.2.840.10040.4.3')
+
+
+class Dss_Sig_Value(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('r', univ.Integer()),
+ namedtype.NamedType('s', univ.Integer())
+ )
+
+
+dhpublicnumber = univ.ObjectIdentifier('1.2.840.10046.2.1')
+
+
+class ValidationParms(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('seed', univ.BitString()),
+ namedtype.NamedType('pgenCounter', univ.Integer())
+ )
+
+
+class DomainParameters(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('p', univ.Integer()),
+ namedtype.NamedType('g', univ.Integer()),
+ namedtype.NamedType('q', univ.Integer()),
+ namedtype.NamedType('j', univ.Integer()),
+ namedtype.OptionalNamedType('validationParms', ValidationParms())
+ )
+
+
+id_dsa = univ.ObjectIdentifier('1.2.840.10040.4.1')
+
+
+class Dss_Parms(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('p', univ.Integer()),
+ namedtype.NamedType('q', univ.Integer()),
+ namedtype.NamedType('g', univ.Integer())
+ )
+
+
+# x400 address syntax starts here
+
+teletex_domain_defined_attributes = univ.Integer(6)
+
+
+class TeletexDomainDefinedAttribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
+ namedtype.NamedType('value', char.TeletexString())
+ )
+
+
+class TeletexDomainDefinedAttributes(univ.SequenceOf):
+ componentType = TeletexDomainDefinedAttribute()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
+
+
+terminal_type = univ.Integer(23)
+
+
+class TerminalType(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, ub_integer_options)
+ namedValues = namedval.NamedValues(
+ ('telex', 3),
+ ('teletelex', 4),
+ ('g3-facsimile', 5),
+ ('g4-facsimile', 6),
+ ('ia5-terminal', 7),
+ ('videotex', 8)
+ )
+
+
+class PresentationAddress(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('sSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('tSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('nAddresses', univ.SetOf(componentType=univ.OctetString()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3),
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ )
+
+
+extended_network_address = univ.Integer(22)
+
+
+class E163_4_address(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('number', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_number_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('sub-address', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_sub_address_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class ExtendedNetworkAddress(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('e163-4-address', E163_4_address()),
+ namedtype.NamedType('psap-address', PresentationAddress().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class PDSParameter(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('printable-string', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))),
+ namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))
+ )
+
+
+local_postal_attributes = univ.Integer(21)
+
+
+class LocalPostalAttributes(PDSParameter):
+ pass
+
+
+class UniquePostalName(PDSParameter):
+ pass
+
+
+unique_postal_name = univ.Integer(20)
+
+poste_restante_address = univ.Integer(19)
+
+
+class PosteRestanteAddress(PDSParameter):
+ pass
+
+
+post_office_box_address = univ.Integer(18)
+
+
+class PostOfficeBoxAddress(PDSParameter):
+ pass
+
+
+street_address = univ.Integer(17)
+
+
+class StreetAddress(PDSParameter):
+ pass
+
+
+class UnformattedPostalAddress(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('printable-address', univ.SequenceOf(componentType=char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_physical_address_lines)))),
+ namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_unformatted_address_length)))
+ )
+
+
+physical_delivery_office_name = univ.Integer(10)
+
+
+class PhysicalDeliveryOfficeName(PDSParameter):
+ pass
+
+
+physical_delivery_office_number = univ.Integer(11)
+
+
+class PhysicalDeliveryOfficeNumber(PDSParameter):
+ pass
+
+
+extension_OR_address_components = univ.Integer(12)
+
+
+class ExtensionORAddressComponents(PDSParameter):
+ pass
+
+
+physical_delivery_personal_name = univ.Integer(13)
+
+
+class PhysicalDeliveryPersonalName(PDSParameter):
+ pass
+
+
+physical_delivery_organization_name = univ.Integer(14)
+
+
+class PhysicalDeliveryOrganizationName(PDSParameter):
+ pass
+
+
+extension_physical_delivery_address_components = univ.Integer(15)
+
+
+class ExtensionPhysicalDeliveryAddressComponents(PDSParameter):
+ pass
+
+
+unformatted_postal_address = univ.Integer(16)
+
+postal_code = univ.Integer(9)
+
+
+class PostalCode(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))),
+ namedtype.NamedType('printable-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length)))
+ )
+
+
+class PhysicalDeliveryCountryName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length,
+ ub_country_name_numeric_length))),
+ namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
+ )
+
+
+class PDSName(char.PrintableString):
+ subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_pds_name_length)
+
+
+physical_delivery_country_name = univ.Integer(8)
+
+
+class TeletexOrganizationalUnitName(char.TeletexString):
+ subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
+
+
+pds_name = univ.Integer(7)
+
+teletex_organizational_unit_names = univ.Integer(5)
+
+
+class TeletexOrganizationalUnitNames(univ.SequenceOf):
+ componentType = TeletexOrganizationalUnitName()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_organizational_units)
+
+
+teletex_personal_name = univ.Integer(4)
+
+
+class TeletexPersonalName(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('surname', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('given-name', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('initials', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generation-qualifier', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+ )
+
+
+teletex_organization_name = univ.Integer(3)
+
+
+class TeletexOrganizationName(char.TeletexString):
+ subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organization_name_length)
+
+
+teletex_common_name = univ.Integer(2)
+
+
+class TeletexCommonName(char.TeletexString):
+ subtypeSpec = char.TeletexString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_common_name_length)
+
+
+class CommonName(char.PrintableString):
+ subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_common_name_length)
+
+
+common_name = univ.Integer(1)
+
+
+class ExtensionAttribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extension-attribute-type', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_extension_attributes),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('extension-attribute-value',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class ExtensionAttributes(univ.SetOf):
+ componentType = ExtensionAttribute()
+ sizeSpec = univ.SetOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_extension_attributes)
+
+
+class BuiltInDomainDefinedAttribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
+ namedtype.NamedType('value', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
+ )
+
+
+class BuiltInDomainDefinedAttributes(univ.SequenceOf):
+ componentType = BuiltInDomainDefinedAttribute()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
+
+
+class OrganizationalUnitName(char.PrintableString):
+ subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
+
+
+class OrganizationalUnitNames(univ.SequenceOf):
+ componentType = OrganizationalUnitName()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, ub_organizational_units)
+
+
+class PersonalName(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('surname', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('given-name', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('initials', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generation-qualifier', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+ )
+
+
+class NumericUserIdentifier(char.NumericString):
+ subtypeSpec = char.NumericString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_numeric_user_id_length)
+
+
+class OrganizationName(char.PrintableString):
+ subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_organization_name_length)
+
+
+class PrivateDomainName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))),
+ namedtype.NamedType('printable', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length)))
+ )
+
+
+class TerminalIdentifier(char.PrintableString):
+ subtypeSpec = char.PrintableString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_terminal_id_length)
+
+
+class X121Address(char.NumericString):
+ subtypeSpec = char.NumericString.subtypeSpec + constraint.ValueSizeConstraint(1, ub_x121_address_length)
+
+
+class NetworkAddress(X121Address):
+ pass
+
+
+class AdministrationDomainName(univ.Choice):
+ tagSet = univ.Choice.tagSet.tagExplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 2)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))),
+ namedtype.NamedType('printable', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length)))
+ )
+
+
+class CountryName(univ.Choice):
+ tagSet = univ.Choice.tagSet.tagExplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1)
+ )
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length,
+ ub_country_name_numeric_length))),
+ namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
+ )
+
+
+class BuiltInStandardAttributes(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('country-name', CountryName()),
+ namedtype.OptionalNamedType('administration-domain-name', AdministrationDomainName()),
+ namedtype.OptionalNamedType('network-address', NetworkAddress().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('terminal-identifier', TerminalIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('private-domain-name', PrivateDomainName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('organization-name', OrganizationName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('numeric-user-identifier', NumericUserIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.OptionalNamedType('personal-name', PersonalName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
+ namedtype.OptionalNamedType('organizational-unit-names', OrganizationalUnitNames().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6)))
+ )
+
+
+class ORAddress(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('built-in-standard-attributes', BuiltInStandardAttributes()),
+ namedtype.OptionalNamedType('built-in-domain-defined-attributes', BuiltInDomainDefinedAttributes()),
+ namedtype.OptionalNamedType('extension-attributes', ExtensionAttributes())
+ )
+
+
+#
+# PKIX1Implicit88
+#
+
+id_ce_invalidityDate = univ.ObjectIdentifier('2.5.29.24')
+
+
+class InvalidityDate(useful.GeneralizedTime):
+ pass
+
+
+id_holdinstruction_none = univ.ObjectIdentifier('2.2.840.10040.2.1')
+id_holdinstruction_callissuer = univ.ObjectIdentifier('2.2.840.10040.2.2')
+id_holdinstruction_reject = univ.ObjectIdentifier('2.2.840.10040.2.3')
+
+holdInstruction = univ.ObjectIdentifier('2.2.840.10040.2')
+
+id_ce_holdInstructionCode = univ.ObjectIdentifier('2.5.29.23')
+
+
+class HoldInstructionCode(univ.ObjectIdentifier):
+ pass
+
+
+id_ce_cRLReasons = univ.ObjectIdentifier('2.5.29.21')
+
+
+class CRLReason(univ.Enumerated):
+ namedValues = namedval.NamedValues(
+ ('unspecified', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6),
+ ('removeFromCRL', 8)
+ )
+
+
+id_ce_cRLNumber = univ.ObjectIdentifier('2.5.29.20')
+
+
+class CRLNumber(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, MAX)
+
+
+class BaseCRLNumber(CRLNumber):
+ pass
+
+
+id_kp_serverAuth = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.1')
+id_kp_clientAuth = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.2')
+id_kp_codeSigning = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.3')
+id_kp_emailProtection = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.4')
+id_kp_ipsecEndSystem = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.5')
+id_kp_ipsecTunnel = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.6')
+id_kp_ipsecUser = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.7')
+id_kp_timeStamping = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.8')
+id_pe_authorityInfoAccess = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.1')
+id_ce_extKeyUsage = univ.ObjectIdentifier('2.5.29.37')
+
+
+class KeyPurposeId(univ.ObjectIdentifier):
+ pass
+
+
+class ExtKeyUsageSyntax(univ.SequenceOf):
+ componentType = KeyPurposeId()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class ReasonFlags(univ.BitString):
+ namedValues = namedval.NamedValues(
+ ('unused', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6)
+ )
+
+
+class SkipCerts(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueSizeConstraint(0, MAX)
+
+
+id_ce_policyConstraints = univ.ObjectIdentifier('2.5.29.36')
+
+
+class PolicyConstraints(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('requireExplicitPolicy', SkipCerts().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('inhibitPolicyMapping', SkipCerts().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+id_ce_basicConstraints = univ.ObjectIdentifier('2.5.29.19')
+
+
+class BasicConstraints(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('cA', univ.Boolean(False)),
+ namedtype.OptionalNamedType('pathLenConstraint',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
+ )
+
+
+id_ce_subjectDirectoryAttributes = univ.ObjectIdentifier('2.5.29.9')
+
+
+class EDIPartyName(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('nameAssigner', DirectoryString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('partyName',
+ DirectoryString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+
+id_ce_deltaCRLIndicator = univ.ObjectIdentifier('2.5.29.27')
+
+
+
+class BaseDistance(univ.Integer):
+ subtypeSpec = univ.Integer.subtypeSpec + constraint.ValueRangeConstraint(0, MAX)
+
+
+id_ce_cRLDistributionPoints = univ.ObjectIdentifier('2.5.29.31')
+
+
+id_ce_issuingDistributionPoint = univ.ObjectIdentifier('2.5.29.28')
+
+
+
+
+id_ce_nameConstraints = univ.ObjectIdentifier('2.5.29.30')
+
+
+class DisplayText(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('visibleString',
+ char.VisibleString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200)))
+ )
+
+
+class NoticeReference(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('organization', DisplayText()),
+ namedtype.NamedType('noticeNumbers', univ.SequenceOf(componentType=univ.Integer()))
+ )
+
+
+class UserNotice(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('noticeRef', NoticeReference()),
+ namedtype.OptionalNamedType('explicitText', DisplayText())
+ )
+
+
+class CPSuri(char.IA5String):
+ pass
+
+
+class PolicyQualifierId(univ.ObjectIdentifier):
+ subtypeSpec = univ.ObjectIdentifier.subtypeSpec + constraint.SingleValueConstraint(id_qt_cps, id_qt_unotice)
+
+
+class CertPolicyId(univ.ObjectIdentifier):
+ pass
+
+
+class PolicyQualifierInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyQualifierId', PolicyQualifierId()),
+ namedtype.NamedType('qualifier', univ.Any())
+ )
+
+
+id_ce_certificatePolicies = univ.ObjectIdentifier('2.5.29.32')
+
+
+class PolicyInformation(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyIdentifier', CertPolicyId()),
+ namedtype.OptionalNamedType('policyQualifiers', univ.SequenceOf(componentType=PolicyQualifierInfo()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+class CertificatePolicies(univ.SequenceOf):
+ componentType = PolicyInformation()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+id_ce_policyMappings = univ.ObjectIdentifier('2.5.29.33')
+
+
+class PolicyMapping(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerDomainPolicy', CertPolicyId()),
+ namedtype.NamedType('subjectDomainPolicy', CertPolicyId())
+ )
+
+
+class PolicyMappings(univ.SequenceOf):
+ componentType = PolicyMapping()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+id_ce_privateKeyUsagePeriod = univ.ObjectIdentifier('2.5.29.16')
+
+
+class PrivateKeyUsagePeriod(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('notBefore', useful.GeneralizedTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+id_ce_keyUsage = univ.ObjectIdentifier('2.5.29.15')
+
+
+class KeyUsage(univ.BitString):
+ namedValues = namedval.NamedValues(
+ ('digitalSignature', 0),
+ ('nonRepudiation', 1),
+ ('keyEncipherment', 2),
+ ('dataEncipherment', 3),
+ ('keyAgreement', 4),
+ ('keyCertSign', 5),
+ ('cRLSign', 6),
+ ('encipherOnly', 7),
+ ('decipherOnly', 8)
+ )
+
+
+id_ce = univ.ObjectIdentifier('2.5.29')
+
+id_ce_authorityKeyIdentifier = univ.ObjectIdentifier('2.5.29.35')
+
+
+class KeyIdentifier(univ.OctetString):
+ pass
+
+
+id_ce_subjectKeyIdentifier = univ.ObjectIdentifier('2.5.29.14')
+
+
+class SubjectKeyIdentifier(KeyIdentifier):
+ pass
+
+
+id_ce_certificateIssuer = univ.ObjectIdentifier('2.5.29.29')
+
+
+id_ce_subjectAltName = univ.ObjectIdentifier('2.5.29.17')
+
+
+id_ce_issuerAltName = univ.ObjectIdentifier('2.5.29.18')
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class AttributeType(univ.ObjectIdentifier):
+ pass
+
+certificateAttributesMap = {}
+
+
+class AttributeTypeAndValue(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('value', AttributeValue(),
+ openType=opentype.OpenType('type', certificateAttributesMap))
+ )
+
+
+class Attribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('vals', univ.SetOf(componentType=AttributeValue()))
+ )
+
+
+class SubjectDirectoryAttributes(univ.SequenceOf):
+ componentType = Attribute()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class RelativeDistinguishedName(univ.SetOf):
+ componentType = AttributeTypeAndValue()
+
+
+class RDNSequence(univ.SequenceOf):
+ componentType = RelativeDistinguishedName()
+
+
+class Name(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('', RDNSequence())
+ )
+
+class CertificateSerialNumber(univ.Integer):
+ pass
+
+
+class AnotherName(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type-id', univ.ObjectIdentifier()),
+ namedtype.NamedType('value',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class GeneralName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherName',
+ AnotherName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('rfc822Name',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('dNSName',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('x400Address',
+ ORAddress().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('directoryName',
+ Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.NamedType('ediPartyName',
+ EDIPartyName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
+ namedtype.NamedType('uniformResourceIdentifier',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.NamedType('iPAddress', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)))
+ )
+
+
+class GeneralNames(univ.SequenceOf):
+ componentType = GeneralName()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class AccessDescription(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('accessMethod', univ.ObjectIdentifier()),
+ namedtype.NamedType('accessLocation', GeneralName())
+ )
+
+
+class AuthorityInfoAccessSyntax(univ.SequenceOf):
+ componentType = AccessDescription()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class AuthorityKeyIdentifier(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('keyIdentifier', KeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('authorityCertIssuer', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('authorityCertSerialNumber', CertificateSerialNumber().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class DistributionPointName(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('fullName', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('nameRelativeToCRLIssuer', RelativeDistinguishedName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class DistributionPoint(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('reasons', ReasonFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('cRLIssuer', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+ )
+
+
+class CRLDistPointsSyntax(univ.SequenceOf):
+ componentType = DistributionPoint()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class IssuingDistributionPoint(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('onlyContainsUserCerts', univ.Boolean(False).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('onlyContainsCACerts', univ.Boolean(False).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('onlySomeReasons', ReasonFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('indirectCRL', univ.Boolean(False).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+ )
+
+
+class GeneralSubtree(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('base', GeneralName()),
+ namedtype.DefaultedNamedType('minimum', BaseDistance(0).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('maximum', BaseDistance().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class GeneralSubtrees(univ.SequenceOf):
+ componentType = GeneralSubtree()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class NameConstraints(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('permittedSubtrees', GeneralSubtrees().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('excludedSubtrees', GeneralSubtrees().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class CertificateIssuer(GeneralNames):
+ pass
+
+
+class SubjectAltName(GeneralNames):
+ pass
+
+
+class IssuerAltName(GeneralNames):
+ pass
+
+
+certificateExtensionsMap = {}
+
+
+class Extension(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extnID', univ.ObjectIdentifier()),
+ namedtype.DefaultedNamedType('critical', univ.Boolean('False')),
+ namedtype.NamedType('extnValue', univ.OctetString(),
+ openType=opentype.OpenType('extnID', certificateExtensionsMap))
+ )
+
+
+class Extensions(univ.SequenceOf):
+ componentType = Extension()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class SubjectPublicKeyInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('subjectPublicKey', univ.BitString())
+ )
+
+
+class UniqueIdentifier(univ.BitString):
+ pass
+
+
+class Time(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('utcTime', useful.UTCTime()),
+ namedtype.NamedType('generalTime', useful.GeneralizedTime())
+ )
+
+
+class Validity(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('notBefore', Time()),
+ namedtype.NamedType('notAfter', Time())
+ )
+
+
+class Version(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('v1', 0), ('v2', 1), ('v3', 2)
+ )
+
+
+class TBSCertificate(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', Version('v1').subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('validity', Validity()),
+ namedtype.NamedType('subject', Name()),
+ namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
+ namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('extensions', Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+ )
+
+
+class Certificate(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertificate', TBSCertificate()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signatureValue', univ.BitString())
+ )
+
+# CRL structures
+
+class RevokedCertificate(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('userCertificate', CertificateSerialNumber()),
+ namedtype.NamedType('revocationDate', Time()),
+ namedtype.OptionalNamedType('crlEntryExtensions', Extensions())
+ )
+
+
+class TBSCertList(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('version', Version()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('thisUpdate', Time()),
+ namedtype.OptionalNamedType('nextUpdate', Time()),
+ namedtype.OptionalNamedType('revokedCertificates', univ.SequenceOf(componentType=RevokedCertificate())),
+ namedtype.OptionalNamedType('crlExtensions', Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
+
+
+class CertificateList(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertList', TBSCertList()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+ )
+
+# map of AttributeType -> AttributeValue
+
+_certificateAttributesMapUpdate = {
+ id_at_name: X520name(),
+ id_at_surname: X520name(),
+ id_at_givenName: X520name(),
+ id_at_initials: X520name(),
+ id_at_generationQualifier: X520name(),
+ id_at_commonName: X520CommonName(),
+ id_at_localityName: X520LocalityName(),
+ id_at_stateOrProvinceName: X520StateOrProvinceName(),
+ id_at_organizationName: X520OrganizationName(),
+ id_at_organizationalUnitName: X520OrganizationalUnitName(),
+ id_at_title: X520Title(),
+ id_at_dnQualifier: X520dnQualifier(),
+ id_at_countryName: X520countryName(),
+ emailAddress: Pkcs9email(),
+}
+
+certificateAttributesMap.update(_certificateAttributesMapUpdate)
+
+
+# map of Certificate Extension OIDs to Extensions
+
+_certificateExtensionsMapUpdate = {
+ id_ce_authorityKeyIdentifier: AuthorityKeyIdentifier(),
+ id_ce_subjectKeyIdentifier: SubjectKeyIdentifier(),
+ id_ce_keyUsage: KeyUsage(),
+ id_ce_privateKeyUsagePeriod: PrivateKeyUsagePeriod(),
+ id_ce_certificatePolicies: CertificatePolicies(),
+ id_ce_policyMappings: PolicyMappings(),
+ id_ce_subjectAltName: SubjectAltName(),
+ id_ce_issuerAltName: IssuerAltName(),
+ id_ce_subjectDirectoryAttributes: SubjectDirectoryAttributes(),
+ id_ce_basicConstraints: BasicConstraints(),
+ id_ce_nameConstraints: NameConstraints(),
+ id_ce_policyConstraints: PolicyConstraints(),
+ id_ce_extKeyUsage: ExtKeyUsageSyntax(),
+ id_ce_cRLDistributionPoints: CRLDistPointsSyntax(),
+ id_pe_authorityInfoAccess: AuthorityInfoAccessSyntax(),
+ id_ce_cRLNumber: univ.Integer(),
+ id_ce_deltaCRLIndicator: BaseCRLNumber(),
+ id_ce_issuingDistributionPoint: IssuingDistributionPoint(),
+ id_ce_cRLReasons: CRLReason(),
+ id_ce_holdInstructionCode: univ.ObjectIdentifier(),
+ id_ce_invalidityDate: useful.GeneralizedTime(),
+ id_ce_certificateIssuer: GeneralNames(),
+}
+
+certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
+
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc2511.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2511.py
new file mode 100644
index 0000000000..5dd6fc224a
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2511.py
@@ -0,0 +1,258 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# X.509 certificate Request Message Format (CRMF) syntax
+#
+# ASN.1 source from:
+# http://tools.ietf.org/html/rfc2511
+#
+# Sample captures could be obtained with OpenSSL
+#
+from pyasn1_modules import rfc2315
+from pyasn1_modules.rfc2459 import *
+
+MAX = float('inf')
+
+id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
+id_pkip = univ.ObjectIdentifier('1.3.6.1.5.5.7.5')
+id_regCtrl = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1')
+id_regCtrl_regToken = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.1')
+id_regCtrl_authenticator = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.2')
+id_regCtrl_pkiPublicationInfo = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.3')
+id_regCtrl_pkiArchiveOptions = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.4')
+id_regCtrl_oldCertID = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.5')
+id_regCtrl_protocolEncrKey = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.1.6')
+id_regInfo = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.2')
+id_regInfo_utf8Pairs = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.2.1')
+id_regInfo_certReq = univ.ObjectIdentifier('1.3.6.1.5.5.7.5.2.2')
+
+
+# This should be in PKIX Certificate Extensions module
+
+class GeneralName(univ.OctetString):
+ pass
+
+
+# end of PKIX Certificate Extensions module
+
+class UTF8Pairs(char.UTF8String):
+ pass
+
+
+class ProtocolEncrKey(SubjectPublicKeyInfo):
+ pass
+
+
+class CertId(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', GeneralName()),
+ namedtype.NamedType('serialNumber', univ.Integer())
+ )
+
+
+class OldCertId(CertId):
+ pass
+
+
+class KeyGenParameters(univ.OctetString):
+ pass
+
+
+class EncryptedValue(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('intendedAlg', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('symmAlg', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('encSymmKey', univ.BitString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('keyAlg', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.OptionalNamedType('valueHint', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('encValue', univ.BitString())
+ )
+
+
+class EncryptedKey(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptedValue', EncryptedValue()),
+ namedtype.NamedType('envelopedData', rfc2315.EnvelopedData().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
+
+
+class PKIArchiveOptions(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptedPrivKey', EncryptedKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('keyGenParameters', KeyGenParameters().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('archiveRemGenPrivKey',
+ univ.Boolean().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class SinglePubInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pubMethod', univ.Integer(
+ namedValues=namedval.NamedValues(('dontCare', 0), ('x500', 1), ('web', 2), ('ldap', 3)))),
+ namedtype.OptionalNamedType('pubLocation', GeneralName())
+ )
+
+
+class PKIPublicationInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('action',
+ univ.Integer(namedValues=namedval.NamedValues(('dontPublish', 0), ('pleasePublish', 1)))),
+ namedtype.OptionalNamedType('pubInfos', univ.SequenceOf(componentType=SinglePubInfo()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+class Authenticator(char.UTF8String):
+ pass
+
+
+class RegToken(char.UTF8String):
+ pass
+
+
+class SubsequentMessage(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('encrCert', 0),
+ ('challengeResp', 1)
+ )
+
+
+class POPOPrivKey(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('thisMessage',
+ univ.BitString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('subsequentMessage', SubsequentMessage().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('dhMAC',
+ univ.BitString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class PBMParameter(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('salt', univ.OctetString()),
+ namedtype.NamedType('owf', AlgorithmIdentifier()),
+ namedtype.NamedType('iterationCount', univ.Integer()),
+ namedtype.NamedType('mac', AlgorithmIdentifier())
+ )
+
+
+class PKMACValue(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algId', AlgorithmIdentifier()),
+ namedtype.NamedType('value', univ.BitString())
+ )
+
+
+class POPOSigningKeyInput(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'authInfo', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'sender', GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
+ ),
+ namedtype.NamedType('publicKeyMAC', PKMACValue())
+ )
+ )
+ ),
+ namedtype.NamedType('publicKey', SubjectPublicKeyInfo())
+ )
+
+
+class POPOSigningKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('poposkInput', POPOSigningKeyInput().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('algorithmIdentifier', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+ )
+
+
+class ProofOfPossession(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('raVerified',
+ univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('signature', POPOSigningKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('keyEncipherment', POPOPrivKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('keyAgreement', POPOPrivKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+ )
+
+
+class Controls(univ.SequenceOf):
+ componentType = AttributeTypeAndValue()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class OptionalValidity(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('notBefore',
+ Time().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('notAfter',
+ Time().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class CertTemplate(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('version', Version().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('serialNumber', univ.Integer().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('signingAlg', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('issuer', Name().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.OptionalNamedType('validity', OptionalValidity().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.OptionalNamedType('subject', Name().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.OptionalNamedType('publicKey', SubjectPublicKeyInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6))),
+ namedtype.OptionalNamedType('issuerUID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.OptionalNamedType('subjectUID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))),
+ namedtype.OptionalNamedType('extensions', Extensions().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)))
+ )
+
+
+class CertRequest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReqId', univ.Integer()),
+ namedtype.NamedType('certTemplate', CertTemplate()),
+ namedtype.OptionalNamedType('controls', Controls())
+ )
+
+
+class CertReq(CertRequest):
+ pass
+
+
+class CertReqMsg(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReq', CertRequest()),
+ namedtype.OptionalNamedType('pop', ProofOfPossession()),
+ namedtype.OptionalNamedType('regInfo', univ.SequenceOf(componentType=AttributeTypeAndValue()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+class CertReqMessages(univ.SequenceOf):
+ componentType = CertReqMsg()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc2560.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2560.py
new file mode 100644
index 0000000000..c37e25b65e
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2560.py
@@ -0,0 +1,225 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# OCSP request/response syntax
+#
+# Derived from a minimal OCSP library (RFC2560) code written by
+# Bud P. Bruegger <bud@ancitel.it>
+# Copyright: Ancitel, S.p.a, Rome, Italy
+# License: BSD
+#
+
+#
+# current limitations:
+# * request and response works only for a single certificate
+# * only some values are parsed out of the response
+# * the request does't set a nonce nor signature
+# * there is no signature validation of the response
+# * dates are left as strings in GeneralizedTime format -- datetime.datetime
+# would be nicer
+#
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc2459
+
+
+# Start of OCSP module definitions
+
+# This should be in directory Authentication Framework (X.509) module
+
+class CRLReason(univ.Enumerated):
+ namedValues = namedval.NamedValues(
+ ('unspecified', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6),
+ ('removeFromCRL', 8),
+ ('privilegeWithdrawn', 9),
+ ('aACompromise', 10)
+ )
+
+
+# end of directory Authentication Framework (X.509) module
+
+# This should be in PKIX Certificate Extensions module
+
+class GeneralName(univ.OctetString):
+ pass
+
+
+# end of PKIX Certificate Extensions module
+
+id_kp_OCSPSigning = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 3, 9))
+id_pkix_ocsp = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1))
+id_pkix_ocsp_basic = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 1))
+id_pkix_ocsp_nonce = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 2))
+id_pkix_ocsp_crl = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 3))
+id_pkix_ocsp_response = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 4))
+id_pkix_ocsp_nocheck = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 5))
+id_pkix_ocsp_archive_cutoff = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 6))
+id_pkix_ocsp_service_locator = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, 48, 1, 7))
+
+
+class AcceptableResponses(univ.SequenceOf):
+ componentType = univ.ObjectIdentifier()
+
+
+class ArchiveCutoff(useful.GeneralizedTime):
+ pass
+
+
+class UnknownInfo(univ.Null):
+ pass
+
+
+class RevokedInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('revocationTime', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('revocationReason', CRLReason().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class CertID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlgorithm', rfc2459.AlgorithmIdentifier()),
+ namedtype.NamedType('issuerNameHash', univ.OctetString()),
+ namedtype.NamedType('issuerKeyHash', univ.OctetString()),
+ namedtype.NamedType('serialNumber', rfc2459.CertificateSerialNumber())
+ )
+
+
+class CertStatus(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('good',
+ univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('revoked',
+ RevokedInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('unknown',
+ UnknownInfo().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class SingleResponse(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certID', CertID()),
+ namedtype.NamedType('certStatus', CertStatus()),
+ namedtype.NamedType('thisUpdate', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('nextUpdate', useful.GeneralizedTime().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('singleExtensions', rfc2459.Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class KeyHash(univ.OctetString):
+ pass
+
+
+class ResponderID(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('byName',
+ rfc2459.Name().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('byKey',
+ KeyHash().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class Version(univ.Integer):
+ namedValues = namedval.NamedValues(('v1', 0))
+
+
+class ResponseData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', Version('v1').subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('responderID', ResponderID()),
+ namedtype.NamedType('producedAt', useful.GeneralizedTime()),
+ namedtype.NamedType('responses', univ.SequenceOf(componentType=SingleResponse())),
+ namedtype.OptionalNamedType('responseExtensions', rfc2459.Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class BasicOCSPResponse(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsResponseData', ResponseData()),
+ namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString()),
+ namedtype.OptionalNamedType('certs', univ.SequenceOf(componentType=rfc2459.Certificate()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class ResponseBytes(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('responseType', univ.ObjectIdentifier()),
+ namedtype.NamedType('response', univ.OctetString())
+ )
+
+
+class OCSPResponseStatus(univ.Enumerated):
+ namedValues = namedval.NamedValues(
+ ('successful', 0),
+ ('malformedRequest', 1),
+ ('internalError', 2),
+ ('tryLater', 3),
+ ('undefinedStatus', 4), # should never occur
+ ('sigRequired', 5),
+ ('unauthorized', 6)
+ )
+
+
+class OCSPResponse(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('responseStatus', OCSPResponseStatus()),
+ namedtype.OptionalNamedType('responseBytes', ResponseBytes().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class Request(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('reqCert', CertID()),
+ namedtype.OptionalNamedType('singleRequestExtensions', rfc2459.Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class Signature(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signatureAlgorithm', rfc2459.AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString()),
+ namedtype.OptionalNamedType('certs', univ.SequenceOf(componentType=rfc2459.Certificate()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class TBSRequest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', Version('v1').subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('requestorName', GeneralName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('requestList', univ.SequenceOf(componentType=Request())),
+ namedtype.OptionalNamedType('requestExtensions', rfc2459.Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class OCSPRequest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsRequest', TBSRequest()),
+ namedtype.OptionalNamedType('optionalSignature', Signature().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc2631.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2631.py
new file mode 100644
index 0000000000..44e537101c
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2631.py
@@ -0,0 +1,37 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Diffie-Hellman Key Agreement
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc2631.txt
+# https://www.rfc-editor.org/errata/eid5897
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+
+class KeySpecificInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', univ.ObjectIdentifier()),
+ namedtype.NamedType('counter', univ.OctetString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(4, 4)))
+ )
+
+
+class OtherInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyInfo', KeySpecificInfo()),
+ namedtype.OptionalNamedType('partyAInfo', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('suppPubInfo', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc2634.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2634.py
new file mode 100644
index 0000000000..2099a4b206
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2634.py
@@ -0,0 +1,336 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add a map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Enhanced Security Services for S/MIME
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc2634.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedval
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+ContentType = rfc5652.ContentType
+
+IssuerAndSerialNumber = rfc5652.IssuerAndSerialNumber
+
+SubjectKeyIdentifier = rfc5652.SubjectKeyIdentifier
+
+PolicyInformation = rfc5280.PolicyInformation
+
+GeneralNames = rfc5280.GeneralNames
+
+CertificateSerialNumber = rfc5280.CertificateSerialNumber
+
+
+# Signing Certificate Attribute
+# Warning: It is better to use SigningCertificateV2 from RFC 5035
+
+id_aa_signingCertificate = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.12')
+
+class Hash(univ.OctetString):
+ pass # SHA-1 hash of entire certificate; RFC 5035 supports other hash algorithms
+
+
+class IssuerSerial(univ.Sequence):
+ pass
+
+IssuerSerial.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', GeneralNames()),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber())
+)
+
+
+class ESSCertID(univ.Sequence):
+ pass
+
+ESSCertID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certHash', Hash()),
+ namedtype.OptionalNamedType('issuerSerial', IssuerSerial())
+)
+
+
+class SigningCertificate(univ.Sequence):
+ pass
+
+SigningCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certs', univ.SequenceOf(
+ componentType=ESSCertID())),
+ namedtype.OptionalNamedType('policies', univ.SequenceOf(
+ componentType=PolicyInformation()))
+)
+
+
+# Mail List Expansion History Attribute
+
+id_aa_mlExpandHistory = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.3')
+
+ub_ml_expansion_history = univ.Integer(64)
+
+
+class EntityIdentifier(univ.Choice):
+ pass
+
+EntityIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier())
+)
+
+
+class MLReceiptPolicy(univ.Choice):
+ pass
+
+MLReceiptPolicy.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('none', univ.Null().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('insteadOf', univ.SequenceOf(
+ componentType=GeneralNames()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('inAdditionTo', univ.SequenceOf(
+ componentType=GeneralNames()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class MLData(univ.Sequence):
+ pass
+
+MLData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('mailListIdentifier', EntityIdentifier()),
+ namedtype.NamedType('expansionTime', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('mlReceiptPolicy', MLReceiptPolicy())
+)
+
+class MLExpansionHistory(univ.SequenceOf):
+ pass
+
+MLExpansionHistory.componentType = MLData()
+MLExpansionHistory.sizeSpec = constraint.ValueSizeConstraint(1, ub_ml_expansion_history)
+
+
+# ESS Security Label Attribute
+
+id_aa_securityLabel = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.2')
+
+ub_privacy_mark_length = univ.Integer(128)
+
+ub_security_categories = univ.Integer(64)
+
+ub_integer_options = univ.Integer(256)
+
+
+class ESSPrivacyMark(univ.Choice):
+ pass
+
+ESSPrivacyMark.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_privacy_mark_length))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class SecurityClassification(univ.Integer):
+ pass
+
+SecurityClassification.subtypeSpec=constraint.ValueRangeConstraint(0, ub_integer_options)
+
+SecurityClassification.namedValues = namedval.NamedValues(
+ ('unmarked', 0),
+ ('unclassified', 1),
+ ('restricted', 2),
+ ('confidential', 3),
+ ('secret', 4),
+ ('top-secret', 5)
+)
+
+
+class SecurityPolicyIdentifier(univ.ObjectIdentifier):
+ pass
+
+
+class SecurityCategory(univ.Sequence):
+ pass
+
+SecurityCategory.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('value', univ.Any().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class SecurityCategories(univ.SetOf):
+ pass
+
+SecurityCategories.componentType = SecurityCategory()
+SecurityCategories.sizeSpec = constraint.ValueSizeConstraint(1, ub_security_categories)
+
+
+class ESSSecurityLabel(univ.Set):
+ pass
+
+ESSSecurityLabel.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('security-policy-identifier', SecurityPolicyIdentifier()),
+ namedtype.OptionalNamedType('security-classification', SecurityClassification()),
+ namedtype.OptionalNamedType('privacy-mark', ESSPrivacyMark()),
+ namedtype.OptionalNamedType('security-categories', SecurityCategories())
+)
+
+
+# Equivalent Labels Attribute
+
+id_aa_equivalentLabels = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.9')
+
+class EquivalentLabels(univ.SequenceOf):
+ pass
+
+EquivalentLabels.componentType = ESSSecurityLabel()
+
+
+# Content Identifier Attribute
+
+id_aa_contentIdentifier = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.7')
+
+class ContentIdentifier(univ.OctetString):
+ pass
+
+
+# Content Reference Attribute
+
+id_aa_contentReference = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.10')
+
+class ContentReference(univ.Sequence):
+ pass
+
+ContentReference.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('signedContentIdentifier', ContentIdentifier()),
+ namedtype.NamedType('originatorSignatureValue', univ.OctetString())
+)
+
+
+# Message Signature Digest Attribute
+
+id_aa_msgSigDigest = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.5')
+
+class MsgSigDigest(univ.OctetString):
+ pass
+
+
+# Content Hints Attribute
+
+id_aa_contentHint = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.4')
+
+class ContentHints(univ.Sequence):
+ pass
+
+ContentHints.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('contentDescription', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('contentType', ContentType())
+)
+
+
+# Receipt Request Attribute
+
+class AllOrFirstTier(univ.Integer):
+ pass
+
+AllOrFirstTier.namedValues = namedval.NamedValues(
+ ('allReceipts', 0),
+ ('firstTierRecipients', 1)
+)
+
+
+class ReceiptsFrom(univ.Choice):
+ pass
+
+ReceiptsFrom.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('allOrFirstTier', AllOrFirstTier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('receiptList', univ.SequenceOf(
+ componentType=GeneralNames()).subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+id_aa_receiptRequest = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.1')
+
+ub_receiptsTo = univ.Integer(16)
+
+class ReceiptRequest(univ.Sequence):
+ pass
+
+ReceiptRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signedContentIdentifier', ContentIdentifier()),
+ namedtype.NamedType('receiptsFrom', ReceiptsFrom()),
+ namedtype.NamedType('receiptsTo', univ.SequenceOf(componentType=GeneralNames()).subtype(sizeSpec=constraint.ValueSizeConstraint(1, ub_receiptsTo)))
+)
+
+# Receipt Content Type
+
+class ESSVersion(univ.Integer):
+ pass
+
+ESSVersion.namedValues = namedval.NamedValues(
+ ('v1', 1)
+)
+
+
+id_ct_receipt = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.1')
+
+class Receipt(univ.Sequence):
+ pass
+
+Receipt.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', ESSVersion()),
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('signedContentIdentifier', ContentIdentifier()),
+ namedtype.NamedType('originatorSignatureValue', univ.OctetString())
+)
+
+
+# Map of Attribute Type to the Attribute structure is added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_signingCertificate: SigningCertificate(),
+ id_aa_mlExpandHistory: MLExpansionHistory(),
+ id_aa_securityLabel: ESSSecurityLabel(),
+ id_aa_equivalentLabels: EquivalentLabels(),
+ id_aa_contentIdentifier: ContentIdentifier(),
+ id_aa_contentReference: ContentReference(),
+ id_aa_msgSigDigest: MsgSigDigest(),
+ id_aa_contentHint: ContentHints(),
+ id_aa_receiptRequest: ReceiptRequest(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_receipt: Receipt(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc2985.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2985.py
new file mode 100644
index 0000000000..75bccf097d
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2985.py
@@ -0,0 +1,588 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS#9: Selected Attribute Types (Version 2.0)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc2985.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc7292
+from pyasn1_modules import rfc5958
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+MAX = float('inf')
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+Attribute = rfc5280.Attribute
+
+EmailAddress = rfc5280.EmailAddress
+
+Extensions = rfc5280.Extensions
+
+Time = rfc5280.Time
+
+X520countryName = rfc5280.X520countryName
+
+X520SerialNumber = rfc5280.X520SerialNumber
+
+
+# Imports from RFC 5652
+
+ContentInfo = rfc5652.ContentInfo
+
+ContentType = rfc5652.ContentType
+
+Countersignature = rfc5652.Countersignature
+
+MessageDigest = rfc5652.MessageDigest
+
+SignerInfo = rfc5652.SignerInfo
+
+SigningTime = rfc5652.SigningTime
+
+
+# Imports from RFC 5958
+
+EncryptedPrivateKeyInfo = rfc5958.EncryptedPrivateKeyInfo
+
+
+# Imports from RFC 7292
+
+PFX = rfc7292.PFX
+
+
+# TODO:
+# Need a place to import PKCS15Token; it does not yet appear in an RFC
+
+
+# SingleAttribute is the same as Attribute in RFC 5280, except that the
+# attrValues SET must have one and only one member
+
+class AttributeType(univ.ObjectIdentifier):
+ pass
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class AttributeValues(univ.SetOf):
+ pass
+
+AttributeValues.componentType = AttributeValue()
+
+
+class SingleAttributeValues(univ.SetOf):
+ pass
+
+SingleAttributeValues.componentType = AttributeValue()
+
+
+class SingleAttribute(univ.Sequence):
+ pass
+
+SingleAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('values',
+ AttributeValues().subtype(sizeSpec=constraint.ValueSizeConstraint(1, 1)),
+ openType=opentype.OpenType('type', rfc5280.certificateAttributesMap)
+ )
+)
+
+
+# CMSAttribute is the same as Attribute in RFC 5652, and CMSSingleAttribute
+# is the companion where the attrValues SET must have one and only one member
+
+CMSAttribute = rfc5652.Attribute
+
+
+class CMSSingleAttribute(univ.Sequence):
+ pass
+
+CMSSingleAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', AttributeType()),
+ namedtype.NamedType('attrValues',
+ AttributeValues().subtype(sizeSpec=constraint.ValueSizeConstraint(1, 1)),
+ openType=opentype.OpenType('attrType', rfc5652.cmsAttributesMap)
+ )
+)
+
+
+# DirectoryString is the same as RFC 5280, except the length is limited to 255
+
+class DirectoryString(univ.Choice):
+ pass
+
+DirectoryString.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255)))
+)
+
+
+# PKCS9String is DirectoryString with an additional choice of IA5String,
+# and the SIZE is limited to 255
+
+class PKCS9String(univ.Choice):
+ pass
+
+PKCS9String.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ia5String', char.IA5String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('directoryString', DirectoryString())
+)
+
+
+# Upper Bounds
+
+pkcs_9_ub_pkcs9String = univ.Integer(255)
+
+pkcs_9_ub_challengePassword = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_ub_emailAddress = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_ub_friendlyName = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_ub_match = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_ub_signingDescription = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_ub_unstructuredAddress = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_ub_unstructuredName = univ.Integer(pkcs_9_ub_pkcs9String)
+
+
+ub_name = univ.Integer(32768)
+
+pkcs_9_ub_placeOfBirth = univ.Integer(ub_name)
+
+pkcs_9_ub_pseudonym = univ.Integer(ub_name)
+
+
+# Object Identifier Arcs
+
+ietf_at = _OID(1, 3, 6, 1, 5, 5, 7, 9)
+
+id_at = _OID(2, 5, 4)
+
+pkcs_9 = _OID(1, 2, 840, 113549, 1, 9)
+
+pkcs_9_mo = _OID(pkcs_9, 0)
+
+smime = _OID(pkcs_9, 16)
+
+certTypes = _OID(pkcs_9, 22)
+
+crlTypes = _OID(pkcs_9, 23)
+
+pkcs_9_oc = _OID(pkcs_9, 24)
+
+pkcs_9_at = _OID(pkcs_9, 25)
+
+pkcs_9_sx = _OID(pkcs_9, 26)
+
+pkcs_9_mr = _OID(pkcs_9, 27)
+
+
+# Object Identifiers for Syntaxes for use with LDAP-accessible directories
+
+pkcs_9_sx_pkcs9String = _OID(pkcs_9_sx, 1)
+
+pkcs_9_sx_signingTime = _OID(pkcs_9_sx, 2)
+
+
+# Object Identifiers for object classes
+
+pkcs_9_oc_pkcsEntity = _OID(pkcs_9_oc, 1)
+
+pkcs_9_oc_naturalPerson = _OID(pkcs_9_oc, 2)
+
+
+# Object Identifiers for matching rules
+
+pkcs_9_mr_caseIgnoreMatch = _OID(pkcs_9_mr, 1)
+
+pkcs_9_mr_signingTimeMatch = _OID(pkcs_9_mr, 2)
+
+
+# PKCS #7 PDU
+
+pkcs_9_at_pkcs7PDU = _OID(pkcs_9_at, 5)
+
+pKCS7PDU = Attribute()
+pKCS7PDU['type'] = pkcs_9_at_pkcs7PDU
+pKCS7PDU['values'][0] = ContentInfo()
+
+
+# PKCS #12 token
+
+pkcs_9_at_userPKCS12 = _OID(2, 16, 840, 1, 113730, 3, 1, 216)
+
+userPKCS12 = Attribute()
+userPKCS12['type'] = pkcs_9_at_userPKCS12
+userPKCS12['values'][0] = PFX()
+
+
+# PKCS #15 token
+
+pkcs_9_at_pkcs15Token = _OID(pkcs_9_at, 1)
+
+# TODO: Once PKCS15Token can be imported, this can be included
+#
+# pKCS15Token = Attribute()
+# userPKCS12['type'] = pkcs_9_at_pkcs15Token
+# userPKCS12['values'][0] = PKCS15Token()
+
+
+# PKCS #8 encrypted private key information
+
+pkcs_9_at_encryptedPrivateKeyInfo = _OID(pkcs_9_at, 2)
+
+encryptedPrivateKeyInfo = Attribute()
+encryptedPrivateKeyInfo['type'] = pkcs_9_at_encryptedPrivateKeyInfo
+encryptedPrivateKeyInfo['values'][0] = EncryptedPrivateKeyInfo()
+
+
+# Electronic-mail address
+
+pkcs_9_at_emailAddress = rfc5280.id_emailAddress
+
+emailAddress = Attribute()
+emailAddress['type'] = pkcs_9_at_emailAddress
+emailAddress['values'][0] = EmailAddress()
+
+
+# Unstructured name
+
+pkcs_9_at_unstructuredName = _OID(pkcs_9, 2)
+
+unstructuredName = Attribute()
+unstructuredName['type'] = pkcs_9_at_unstructuredName
+unstructuredName['values'][0] = PKCS9String()
+
+
+# Unstructured address
+
+pkcs_9_at_unstructuredAddress = _OID(pkcs_9, 8)
+
+unstructuredAddress = Attribute()
+unstructuredAddress['type'] = pkcs_9_at_unstructuredAddress
+unstructuredAddress['values'][0] = DirectoryString()
+
+
+# Date of birth
+
+pkcs_9_at_dateOfBirth = _OID(ietf_at, 1)
+
+dateOfBirth = SingleAttribute()
+dateOfBirth['type'] = pkcs_9_at_dateOfBirth
+dateOfBirth['values'][0] = useful.GeneralizedTime()
+
+
+# Place of birth
+
+pkcs_9_at_placeOfBirth = _OID(ietf_at, 2)
+
+placeOfBirth = SingleAttribute()
+placeOfBirth['type'] = pkcs_9_at_placeOfBirth
+placeOfBirth['values'][0] = DirectoryString()
+
+
+# Gender
+
+class GenderString(char.PrintableString):
+ pass
+
+GenderString.subtypeSpec = constraint.ValueSizeConstraint(1, 1)
+GenderString.subtypeSpec = constraint.SingleValueConstraint("M", "F", "m", "f")
+
+
+pkcs_9_at_gender = _OID(ietf_at, 3)
+
+gender = SingleAttribute()
+gender['type'] = pkcs_9_at_gender
+gender['values'][0] = GenderString()
+
+
+# Country of citizenship
+
+pkcs_9_at_countryOfCitizenship = _OID(ietf_at, 4)
+
+countryOfCitizenship = Attribute()
+countryOfCitizenship['type'] = pkcs_9_at_countryOfCitizenship
+countryOfCitizenship['values'][0] = X520countryName()
+
+
+# Country of residence
+
+pkcs_9_at_countryOfResidence = _OID(ietf_at, 5)
+
+countryOfResidence = Attribute()
+countryOfResidence['type'] = pkcs_9_at_countryOfResidence
+countryOfResidence['values'][0] = X520countryName()
+
+
+# Pseudonym
+
+id_at_pseudonym = _OID(2, 5, 4, 65)
+
+pseudonym = Attribute()
+pseudonym['type'] = id_at_pseudonym
+pseudonym['values'][0] = DirectoryString()
+
+
+# Serial number
+
+id_at_serialNumber = rfc5280.id_at_serialNumber
+
+serialNumber = Attribute()
+serialNumber['type'] = id_at_serialNumber
+serialNumber['values'][0] = X520SerialNumber()
+
+
+# Content type
+
+pkcs_9_at_contentType = rfc5652.id_contentType
+
+contentType = CMSSingleAttribute()
+contentType['attrType'] = pkcs_9_at_contentType
+contentType['attrValues'][0] = ContentType()
+
+
+# Message digest
+
+pkcs_9_at_messageDigest = rfc5652.id_messageDigest
+
+messageDigest = CMSSingleAttribute()
+messageDigest['attrType'] = pkcs_9_at_messageDigest
+messageDigest['attrValues'][0] = MessageDigest()
+
+
+# Signing time
+
+pkcs_9_at_signingTime = rfc5652.id_signingTime
+
+signingTime = CMSSingleAttribute()
+signingTime['attrType'] = pkcs_9_at_signingTime
+signingTime['attrValues'][0] = SigningTime()
+
+
+# Random nonce
+
+class RandomNonce(univ.OctetString):
+ pass
+
+RandomNonce.subtypeSpec = constraint.ValueSizeConstraint(4, MAX)
+
+
+pkcs_9_at_randomNonce = _OID(pkcs_9_at, 3)
+
+randomNonce = CMSSingleAttribute()
+randomNonce['attrType'] = pkcs_9_at_randomNonce
+randomNonce['attrValues'][0] = RandomNonce()
+
+
+# Sequence number
+
+class SequenceNumber(univ.Integer):
+ pass
+
+SequenceNumber.subtypeSpec = constraint.ValueRangeConstraint(1, MAX)
+
+
+pkcs_9_at_sequenceNumber = _OID(pkcs_9_at, 4)
+
+sequenceNumber = CMSSingleAttribute()
+sequenceNumber['attrType'] = pkcs_9_at_sequenceNumber
+sequenceNumber['attrValues'][0] = SequenceNumber()
+
+
+# Countersignature
+
+pkcs_9_at_counterSignature = rfc5652.id_countersignature
+
+counterSignature = CMSAttribute()
+counterSignature['attrType'] = pkcs_9_at_counterSignature
+counterSignature['attrValues'][0] = Countersignature()
+
+
+# Challenge password
+
+pkcs_9_at_challengePassword = _OID(pkcs_9, 7)
+
+challengePassword = SingleAttribute()
+challengePassword['type'] = pkcs_9_at_challengePassword
+challengePassword['values'][0] = DirectoryString()
+
+
+# Extension request
+
+class ExtensionRequest(Extensions):
+ pass
+
+
+pkcs_9_at_extensionRequest = _OID(pkcs_9, 14)
+
+extensionRequest = SingleAttribute()
+extensionRequest['type'] = pkcs_9_at_extensionRequest
+extensionRequest['values'][0] = ExtensionRequest()
+
+
+# Extended-certificate attributes (deprecated)
+
+class AttributeSet(univ.SetOf):
+ pass
+
+AttributeSet.componentType = Attribute()
+
+
+pkcs_9_at_extendedCertificateAttributes = _OID(pkcs_9, 9)
+
+extendedCertificateAttributes = SingleAttribute()
+extendedCertificateAttributes['type'] = pkcs_9_at_extendedCertificateAttributes
+extendedCertificateAttributes['values'][0] = AttributeSet()
+
+
+# Friendly name
+
+class FriendlyName(char.BMPString):
+ pass
+
+FriendlyName.subtypeSpec = constraint.ValueSizeConstraint(1, pkcs_9_ub_friendlyName)
+
+
+pkcs_9_at_friendlyName = _OID(pkcs_9, 20)
+
+friendlyName = SingleAttribute()
+friendlyName['type'] = pkcs_9_at_friendlyName
+friendlyName['values'][0] = FriendlyName()
+
+
+# Local key identifier
+
+pkcs_9_at_localKeyId = _OID(pkcs_9, 21)
+
+localKeyId = SingleAttribute()
+localKeyId['type'] = pkcs_9_at_localKeyId
+localKeyId['values'][0] = univ.OctetString()
+
+
+# Signing description
+
+pkcs_9_at_signingDescription = _OID(pkcs_9, 13)
+
+signingDescription = CMSSingleAttribute()
+signingDescription['attrType'] = pkcs_9_at_signingDescription
+signingDescription['attrValues'][0] = DirectoryString()
+
+
+# S/MIME capabilities
+
+class SMIMECapability(AlgorithmIdentifier):
+ pass
+
+
+class SMIMECapabilities(univ.SequenceOf):
+ pass
+
+SMIMECapabilities.componentType = SMIMECapability()
+
+
+pkcs_9_at_smimeCapabilities = _OID(pkcs_9, 15)
+
+smimeCapabilities = CMSSingleAttribute()
+smimeCapabilities['attrType'] = pkcs_9_at_smimeCapabilities
+smimeCapabilities['attrValues'][0] = SMIMECapabilities()
+
+
+# Certificate Attribute Map
+
+_certificateAttributesMapUpdate = {
+ # Attribute types for use with the "pkcsEntity" object class
+ pkcs_9_at_pkcs7PDU: ContentInfo(),
+ pkcs_9_at_userPKCS12: PFX(),
+ # TODO: Once PKCS15Token can be imported, this can be included
+ # pkcs_9_at_pkcs15Token: PKCS15Token(),
+ pkcs_9_at_encryptedPrivateKeyInfo: EncryptedPrivateKeyInfo(),
+ # Attribute types for use with the "naturalPerson" object class
+ pkcs_9_at_emailAddress: EmailAddress(),
+ pkcs_9_at_unstructuredName: PKCS9String(),
+ pkcs_9_at_unstructuredAddress: DirectoryString(),
+ pkcs_9_at_dateOfBirth: useful.GeneralizedTime(),
+ pkcs_9_at_placeOfBirth: DirectoryString(),
+ pkcs_9_at_gender: GenderString(),
+ pkcs_9_at_countryOfCitizenship: X520countryName(),
+ pkcs_9_at_countryOfResidence: X520countryName(),
+ id_at_pseudonym: DirectoryString(),
+ id_at_serialNumber: X520SerialNumber(),
+ # Attribute types for use with PKCS #10 certificate requests
+ pkcs_9_at_challengePassword: DirectoryString(),
+ pkcs_9_at_extensionRequest: ExtensionRequest(),
+ pkcs_9_at_extendedCertificateAttributes: AttributeSet(),
+}
+
+rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
+
+
+# CMS Attribute Map
+
+# Note: pkcs_9_at_smimeCapabilities is not included in the map because
+# the definition in RFC 5751 is preferred, which produces the same
+# encoding, but it allows different parameters for SMIMECapability
+# and AlgorithmIdentifier.
+
+_cmsAttributesMapUpdate = {
+ # Attribute types for use in PKCS #7 data (a.k.a. CMS)
+ pkcs_9_at_contentType: ContentType(),
+ pkcs_9_at_messageDigest: MessageDigest(),
+ pkcs_9_at_signingTime: SigningTime(),
+ pkcs_9_at_randomNonce: RandomNonce(),
+ pkcs_9_at_sequenceNumber: SequenceNumber(),
+ pkcs_9_at_counterSignature: Countersignature(),
+ # Attributes for use in PKCS #12 "PFX" PDUs or PKCS #15 tokens
+ pkcs_9_at_friendlyName: FriendlyName(),
+ pkcs_9_at_localKeyId: univ.OctetString(),
+ pkcs_9_at_signingDescription: DirectoryString(),
+ # pkcs_9_at_smimeCapabilities: SMIMECapabilities(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc2986.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2986.py
new file mode 100644
index 0000000000..34acbd58d0
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc2986.py
@@ -0,0 +1,75 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Joel Johnson with asn1ate tool.
+# Modified by Russ Housley to add support for opentypes by importing
+# definitions from rfc5280 so that the same maps are used.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS #10: Certification Request Syntax Specification
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc2986.txt
+#
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+AttributeType = rfc5280.AttributeType
+
+AttributeValue = rfc5280.AttributeValue
+
+AttributeTypeAndValue = rfc5280.AttributeTypeAndValue
+
+Attribute = rfc5280.Attribute
+
+RelativeDistinguishedName = rfc5280.RelativeDistinguishedName
+
+RDNSequence = rfc5280.RDNSequence
+
+Name = rfc5280.Name
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+SubjectPublicKeyInfo = rfc5280.SubjectPublicKeyInfo
+
+
+class Attributes(univ.SetOf):
+ pass
+
+
+Attributes.componentType = Attribute()
+
+
+class CertificationRequestInfo(univ.Sequence):
+ pass
+
+
+CertificationRequestInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer()),
+ namedtype.NamedType('subject', Name()),
+ namedtype.NamedType('subjectPKInfo', SubjectPublicKeyInfo()),
+ namedtype.NamedType('attributes',
+ Attributes().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))
+ )
+)
+
+
+class CertificationRequest(univ.Sequence):
+ pass
+
+
+CertificationRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificationRequestInfo', CertificationRequestInfo()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc3114.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3114.py
new file mode 100644
index 0000000000..badcb1f214
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3114.py
@@ -0,0 +1,77 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# TEST Company Classification Policies
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3114.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5755
+
+
+id_smime = univ.ObjectIdentifier((1, 2, 840, 113549, 1, 9, 16, ))
+
+id_tsp = id_smime + (7, )
+
+id_tsp_TEST_Amoco = id_tsp + (1, )
+
+class Amoco_SecurityClassification(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('amoco-general', 6),
+ ('amoco-confidential', 7),
+ ('amoco-highly-confidential', 8)
+ )
+
+
+id_tsp_TEST_Caterpillar = id_tsp + (2, )
+
+class Caterpillar_SecurityClassification(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('caterpillar-public', 6),
+ ('caterpillar-green', 7),
+ ('caterpillar-yellow', 8),
+ ('caterpillar-red', 9)
+ )
+
+
+id_tsp_TEST_Whirlpool = id_tsp + (3, )
+
+class Whirlpool_SecurityClassification(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('whirlpool-public', 6),
+ ('whirlpool-internal', 7),
+ ('whirlpool-confidential', 8)
+ )
+
+
+id_tsp_TEST_Whirlpool_Categories = id_tsp + (4, )
+
+class SecurityCategoryValues(univ.SequenceOf):
+ componentType = char.UTF8String()
+
+# Example SecurityCategoryValues: "LAW DEPARTMENT USE ONLY"
+# Example SecurityCategoryValues: "HUMAN RESOURCES USE ONLY"
+
+
+# Also, the privacy mark in the security label can contain a string,
+# such as: "ATTORNEY-CLIENT PRIVILEGED INFORMATION"
+
+
+# Map of security category type OIDs to security category added
+# to the ones that are in rfc5755.py
+
+_securityCategoryMapUpdate = {
+ id_tsp_TEST_Whirlpool_Categories: SecurityCategoryValues(),
+}
+
+rfc5755.securityCategoryMap.update(_securityCategoryMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc3161.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3161.py
new file mode 100644
index 0000000000..0e1dcedb39
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3161.py
@@ -0,0 +1,142 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Time-Stamp Protocol (TSP)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3161.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc4210
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+
+Extensions = rfc5280.Extensions
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+GeneralName = rfc5280.GeneralName
+
+ContentInfo = rfc5652.ContentInfo
+
+PKIFreeText = rfc4210.PKIFreeText
+
+
+id_ct_TSTInfo = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.4')
+
+
+class Accuracy(univ.Sequence):
+ pass
+
+Accuracy.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('seconds', univ.Integer()),
+ namedtype.OptionalNamedType('millis', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, 999)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('micros', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, 999)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class MessageImprint(univ.Sequence):
+ pass
+
+MessageImprint.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('hashedMessage', univ.OctetString())
+)
+
+
+class PKIFailureInfo(univ.BitString):
+ pass
+
+PKIFailureInfo.namedValues = namedval.NamedValues(
+ ('badAlg', 0),
+ ('badRequest', 2),
+ ('badDataFormat', 5),
+ ('timeNotAvailable', 14),
+ ('unacceptedPolicy', 15),
+ ('unacceptedExtension', 16),
+ ('addInfoNotAvailable', 17),
+ ('systemFailure', 25)
+)
+
+
+class PKIStatus(univ.Integer):
+ pass
+
+PKIStatus.namedValues = namedval.NamedValues(
+ ('granted', 0),
+ ('grantedWithMods', 1),
+ ('rejection', 2),
+ ('waiting', 3),
+ ('revocationWarning', 4),
+ ('revocationNotification', 5)
+)
+
+
+class PKIStatusInfo(univ.Sequence):
+ pass
+
+PKIStatusInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', PKIStatus()),
+ namedtype.OptionalNamedType('statusString', PKIFreeText()),
+ namedtype.OptionalNamedType('failInfo', PKIFailureInfo())
+)
+
+
+class TSAPolicyId(univ.ObjectIdentifier):
+ pass
+
+
+class TSTInfo(univ.Sequence):
+ pass
+
+TSTInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('v1', 1)))),
+ namedtype.NamedType('policy', TSAPolicyId()),
+ namedtype.NamedType('messageImprint', MessageImprint()),
+ namedtype.NamedType('serialNumber', univ.Integer()),
+ namedtype.NamedType('genTime', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('accuracy', Accuracy()),
+ namedtype.DefaultedNamedType('ordering', univ.Boolean().subtype(value=0)),
+ namedtype.OptionalNamedType('nonce', univ.Integer()),
+ namedtype.OptionalNamedType('tsa', GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('extensions', Extensions().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class TimeStampReq(univ.Sequence):
+ pass
+
+TimeStampReq.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('v1', 1)))),
+ namedtype.NamedType('messageImprint', MessageImprint()),
+ namedtype.OptionalNamedType('reqPolicy', TSAPolicyId()),
+ namedtype.OptionalNamedType('nonce', univ.Integer()),
+ namedtype.DefaultedNamedType('certReq', univ.Boolean().subtype(value=0)),
+ namedtype.OptionalNamedType('extensions', Extensions().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class TimeStampToken(ContentInfo):
+ pass
+
+
+class TimeStampResp(univ.Sequence):
+ pass
+
+TimeStampResp.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', PKIStatusInfo()),
+ namedtype.OptionalNamedType('timeStampToken', TimeStampToken())
+)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc3274.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3274.py
new file mode 100644
index 0000000000..425e006f3d
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3274.py
@@ -0,0 +1,59 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add a map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CMS Compressed Data Content Type
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3274.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+
+class CompressionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+# The CMS Compressed Data Content Type
+
+id_ct_compressedData = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.9')
+
+class CompressedData(univ.Sequence):
+ pass
+
+CompressedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', rfc5652.CMSVersion()), # Always set to 0
+ namedtype.NamedType('compressionAlgorithm', CompressionAlgorithmIdentifier()),
+ namedtype.NamedType('encapContentInfo', rfc5652.EncapsulatedContentInfo())
+)
+
+
+# Algorithm identifier for the zLib Compression Algorithm
+# This includes cpa_zlibCompress as defined in RFC 6268,
+# from https://www.rfc-editor.org/rfc/rfc6268.txt
+
+id_alg_zlibCompress = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.8')
+
+cpa_zlibCompress = rfc5280.AlgorithmIdentifier()
+cpa_zlibCompress['algorithm'] = id_alg_zlibCompress
+# cpa_zlibCompress['parameters'] are absent
+
+
+# Map of Content Type OIDs to Content Types is added to thr
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_compressedData: CompressedData(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc3279.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3279.py
new file mode 100644
index 0000000000..f6e24deafc
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3279.py
@@ -0,0 +1,260 @@
+#
+# This file is part of pyasn1-modules.
+#
+# Copyright (c) 2017, Danielle Madeley <danielle@madeley.id.au>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Algorithms and Identifiers for Internet X.509 Certificates and CRLs
+#
+# Derived from RFC 3279:
+# https://www.rfc-editor.org/rfc/rfc3279.txt
+#
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+md2 = _OID(1, 2, 840, 113549, 2, 2)
+md5 = _OID(1, 2, 840, 113549, 2, 5)
+id_sha1 = _OID(1, 3, 14, 3, 2, 26)
+id_dsa = _OID(1, 2, 840, 10040, 4, 1)
+
+
+class DSAPublicKey(univ.Integer):
+ pass
+
+
+class Dss_Parms(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('p', univ.Integer()),
+ namedtype.NamedType('q', univ.Integer()),
+ namedtype.NamedType('g', univ.Integer())
+ )
+
+
+id_dsa_with_sha1 = _OID(1, 2, 840, 10040, 4, 3)
+
+
+class Dss_Sig_Value(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('r', univ.Integer()),
+ namedtype.NamedType('s', univ.Integer())
+ )
+
+
+pkcs_1 = _OID(1, 2, 840, 113549, 1, 1)
+rsaEncryption = _OID(pkcs_1, 1)
+md2WithRSAEncryption = _OID(pkcs_1, 2)
+md5WithRSAEncryption = _OID(pkcs_1, 4)
+sha1WithRSAEncryption = _OID(pkcs_1, 5)
+
+
+class RSAPublicKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('modulus', univ.Integer()),
+ namedtype.NamedType('publicExponent', univ.Integer())
+ )
+
+
+dhpublicnumber = _OID(1, 2, 840, 10046, 2, 1)
+
+
+class DHPublicKey(univ.Integer):
+ pass
+
+
+class ValidationParms(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('seed', univ.BitString()),
+ namedtype.NamedType('pgenCounter', univ.Integer())
+ )
+
+
+class DomainParameters(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('p', univ.Integer()),
+ namedtype.NamedType('g', univ.Integer()),
+ namedtype.NamedType('q', univ.Integer()),
+ namedtype.OptionalNamedType('j', univ.Integer()),
+ namedtype.OptionalNamedType('validationParms', ValidationParms())
+ )
+
+
+id_keyExchangeAlgorithm = _OID(2, 16, 840, 1, 101, 2, 1, 1, 22)
+
+
+class KEA_Parms_Id(univ.OctetString):
+ pass
+
+
+ansi_X9_62 = _OID(1, 2, 840, 10045)
+
+
+class FieldID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('fieldType', univ.ObjectIdentifier()),
+ namedtype.NamedType('parameters', univ.Any())
+ )
+
+
+id_ecSigType = _OID(ansi_X9_62, 4)
+ecdsa_with_SHA1 = _OID(id_ecSigType, 1)
+
+
+class ECDSA_Sig_Value(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('r', univ.Integer()),
+ namedtype.NamedType('s', univ.Integer())
+ )
+
+
+id_fieldType = _OID(ansi_X9_62, 1)
+prime_field = _OID(id_fieldType, 1)
+
+
+class Prime_p(univ.Integer):
+ pass
+
+
+characteristic_two_field = _OID(id_fieldType, 2)
+
+
+class Characteristic_two(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('m', univ.Integer()),
+ namedtype.NamedType('basis', univ.ObjectIdentifier()),
+ namedtype.NamedType('parameters', univ.Any())
+ )
+
+
+id_characteristic_two_basis = _OID(characteristic_two_field, 3)
+gnBasis = _OID(id_characteristic_two_basis, 1)
+tpBasis = _OID(id_characteristic_two_basis, 2)
+
+
+class Trinomial(univ.Integer):
+ pass
+
+
+ppBasis = _OID(id_characteristic_two_basis, 3)
+
+
+class Pentanomial(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('k1', univ.Integer()),
+ namedtype.NamedType('k2', univ.Integer()),
+ namedtype.NamedType('k3', univ.Integer())
+ )
+
+
+class FieldElement(univ.OctetString):
+ pass
+
+
+class ECPoint(univ.OctetString):
+ pass
+
+
+class Curve(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('a', FieldElement()),
+ namedtype.NamedType('b', FieldElement()),
+ namedtype.OptionalNamedType('seed', univ.BitString())
+ )
+
+
+class ECPVer(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('ecpVer1', 1)
+ )
+
+
+class ECParameters(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', ECPVer()),
+ namedtype.NamedType('fieldID', FieldID()),
+ namedtype.NamedType('curve', Curve()),
+ namedtype.NamedType('base', ECPoint()),
+ namedtype.NamedType('order', univ.Integer()),
+ namedtype.OptionalNamedType('cofactor', univ.Integer())
+ )
+
+
+class EcpkParameters(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ecParameters', ECParameters()),
+ namedtype.NamedType('namedCurve', univ.ObjectIdentifier()),
+ namedtype.NamedType('implicitlyCA', univ.Null())
+ )
+
+
+id_publicKeyType = _OID(ansi_X9_62, 2)
+id_ecPublicKey = _OID(id_publicKeyType, 1)
+
+ellipticCurve = _OID(ansi_X9_62, 3)
+
+c_TwoCurve = _OID(ellipticCurve, 0)
+c2pnb163v1 = _OID(c_TwoCurve, 1)
+c2pnb163v2 = _OID(c_TwoCurve, 2)
+c2pnb163v3 = _OID(c_TwoCurve, 3)
+c2pnb176w1 = _OID(c_TwoCurve, 4)
+c2tnb191v1 = _OID(c_TwoCurve, 5)
+c2tnb191v2 = _OID(c_TwoCurve, 6)
+c2tnb191v3 = _OID(c_TwoCurve, 7)
+c2onb191v4 = _OID(c_TwoCurve, 8)
+c2onb191v5 = _OID(c_TwoCurve, 9)
+c2pnb208w1 = _OID(c_TwoCurve, 10)
+c2tnb239v1 = _OID(c_TwoCurve, 11)
+c2tnb239v2 = _OID(c_TwoCurve, 12)
+c2tnb239v3 = _OID(c_TwoCurve, 13)
+c2onb239v4 = _OID(c_TwoCurve, 14)
+c2onb239v5 = _OID(c_TwoCurve, 15)
+c2pnb272w1 = _OID(c_TwoCurve, 16)
+c2pnb304w1 = _OID(c_TwoCurve, 17)
+c2tnb359v1 = _OID(c_TwoCurve, 18)
+c2pnb368w1 = _OID(c_TwoCurve, 19)
+c2tnb431r1 = _OID(c_TwoCurve, 20)
+
+primeCurve = _OID(ellipticCurve, 1)
+prime192v1 = _OID(primeCurve, 1)
+prime192v2 = _OID(primeCurve, 2)
+prime192v3 = _OID(primeCurve, 3)
+prime239v1 = _OID(primeCurve, 4)
+prime239v2 = _OID(primeCurve, 5)
+prime239v3 = _OID(primeCurve, 6)
+prime256v1 = _OID(primeCurve, 7)
+
+
+# Map of Algorithm Identifier OIDs to Parameters added to the
+# ones in rfc5280.py. Do not add OIDs with absent paramaters.
+
+_algorithmIdentifierMapUpdate = {
+ md2: univ.Null(""),
+ md5: univ.Null(""),
+ id_sha1: univ.Null(""),
+ id_dsa: Dss_Parms(),
+ rsaEncryption: univ.Null(""),
+ md2WithRSAEncryption: univ.Null(""),
+ md5WithRSAEncryption: univ.Null(""),
+ sha1WithRSAEncryption: univ.Null(""),
+ dhpublicnumber: DomainParameters(),
+ id_keyExchangeAlgorithm: KEA_Parms_Id(),
+ id_ecPublicKey: EcpkParameters(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc3280.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3280.py
new file mode 100644
index 0000000000..e9dbc86847
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3280.py
@@ -0,0 +1,1543 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Internet X.509 Public Key Infrastructure Certificate and Certificate
+# Revocation List (CRL) Profile
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc3280.txt
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+MAX = float('inf')
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+unformatted_postal_address = univ.Integer(16)
+
+ub_organizational_units = univ.Integer(4)
+
+ub_organizational_unit_name_length = univ.Integer(32)
+
+
+class OrganizationalUnitName(char.PrintableString):
+ pass
+
+
+OrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
+
+
+class OrganizationalUnitNames(univ.SequenceOf):
+ pass
+
+
+OrganizationalUnitNames.componentType = OrganizationalUnitName()
+OrganizationalUnitNames.sizeSpec = constraint.ValueSizeConstraint(1, ub_organizational_units)
+
+
+class AttributeType(univ.ObjectIdentifier):
+ pass
+
+
+id_at = _OID(2, 5, 4)
+
+id_at_name = _OID(id_at, 41)
+
+ub_pds_parameter_length = univ.Integer(30)
+
+
+class PDSParameter(univ.Set):
+ pass
+
+
+PDSParameter.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('printable-string', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))),
+ namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))
+)
+
+
+class PhysicalDeliveryOrganizationName(PDSParameter):
+ pass
+
+
+ub_organization_name_length = univ.Integer(64)
+
+ub_domain_defined_attribute_type_length = univ.Integer(8)
+
+ub_domain_defined_attribute_value_length = univ.Integer(128)
+
+
+class TeletexDomainDefinedAttribute(univ.Sequence):
+ pass
+
+
+TeletexDomainDefinedAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
+ namedtype.NamedType('value', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
+)
+
+id_pkix = _OID(1, 3, 6, 1, 5, 5, 7)
+
+id_qt = _OID(id_pkix, 2)
+
+
+class PresentationAddress(univ.Sequence):
+ pass
+
+
+PresentationAddress.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('sSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('tSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('nAddresses', univ.SetOf(componentType=univ.OctetString()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+class AlgorithmIdentifier(univ.Sequence):
+ pass
+
+
+AlgorithmIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('parameters', univ.Any())
+)
+
+
+class UniqueIdentifier(univ.BitString):
+ pass
+
+
+class Extension(univ.Sequence):
+ pass
+
+
+Extension.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extnID', univ.ObjectIdentifier()),
+ namedtype.DefaultedNamedType('critical', univ.Boolean().subtype(value=0)),
+ namedtype.NamedType('extnValue', univ.OctetString())
+)
+
+
+class Extensions(univ.SequenceOf):
+ pass
+
+
+Extensions.componentType = Extension()
+Extensions.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class CertificateSerialNumber(univ.Integer):
+ pass
+
+
+class SubjectPublicKeyInfo(univ.Sequence):
+ pass
+
+
+SubjectPublicKeyInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('subjectPublicKey', univ.BitString())
+)
+
+
+class Time(univ.Choice):
+ pass
+
+
+Time.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('utcTime', useful.UTCTime()),
+ namedtype.NamedType('generalTime', useful.GeneralizedTime())
+)
+
+
+class Validity(univ.Sequence):
+ pass
+
+
+Validity.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('notBefore', Time()),
+ namedtype.NamedType('notAfter', Time())
+)
+
+
+class Version(univ.Integer):
+ pass
+
+
+Version.namedValues = namedval.NamedValues(
+ ('v1', 0),
+ ('v2', 1),
+ ('v3', 2)
+)
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class AttributeTypeAndValue(univ.Sequence):
+ pass
+
+
+AttributeTypeAndValue.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('value', AttributeValue())
+)
+
+
+class RelativeDistinguishedName(univ.SetOf):
+ pass
+
+
+RelativeDistinguishedName.componentType = AttributeTypeAndValue()
+RelativeDistinguishedName.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class RDNSequence(univ.SequenceOf):
+ pass
+
+
+RDNSequence.componentType = RelativeDistinguishedName()
+
+
+class Name(univ.Choice):
+ pass
+
+
+Name.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('rdnSequence', RDNSequence())
+)
+
+
+class TBSCertificate(univ.Sequence):
+ pass
+
+
+TBSCertificate.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ Version().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value="v1")),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('validity', Validity()),
+ namedtype.NamedType('subject', Name()),
+ namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
+ namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('extensions',
+ Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+class Certificate(univ.Sequence):
+ pass
+
+
+Certificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertificate', TBSCertificate()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+ub_surname_length = univ.Integer(40)
+
+
+class TeletexOrganizationName(char.TeletexString):
+ pass
+
+
+TeletexOrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length)
+
+ub_e163_4_sub_address_length = univ.Integer(40)
+
+teletex_common_name = univ.Integer(2)
+
+ub_country_name_alpha_length = univ.Integer(2)
+
+ub_country_name_numeric_length = univ.Integer(3)
+
+
+class CountryName(univ.Choice):
+ pass
+
+
+CountryName.tagSet = univ.Choice.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1))
+CountryName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))),
+ namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
+)
+
+extension_OR_address_components = univ.Integer(12)
+
+id_at_dnQualifier = _OID(id_at, 46)
+
+ub_e163_4_number_length = univ.Integer(15)
+
+
+class ExtendedNetworkAddress(univ.Choice):
+ pass
+
+
+ExtendedNetworkAddress.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('e163-4-address', univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('number', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_number_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('sub-address', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_sub_address_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ ))
+ ),
+ namedtype.NamedType('psap-address', PresentationAddress().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+terminal_type = univ.Integer(23)
+
+id_domainComponent = _OID(0, 9, 2342, 19200300, 100, 1, 25)
+
+ub_state_name = univ.Integer(128)
+
+
+class X520StateOrProvinceName(univ.Choice):
+ pass
+
+
+X520StateOrProvinceName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name)))
+)
+
+ub_organization_name = univ.Integer(64)
+
+
+class X520OrganizationName(univ.Choice):
+ pass
+
+
+X520OrganizationName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name)))
+)
+
+ub_emailaddress_length = univ.Integer(128)
+
+
+class ExtensionPhysicalDeliveryAddressComponents(PDSParameter):
+ pass
+
+
+id_at_surname = _OID(id_at, 4)
+
+ub_common_name_length = univ.Integer(64)
+
+id_ad = _OID(id_pkix, 48)
+
+ub_numeric_user_id_length = univ.Integer(32)
+
+
+class NumericUserIdentifier(char.NumericString):
+ pass
+
+
+NumericUserIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_numeric_user_id_length)
+
+
+class OrganizationName(char.PrintableString):
+ pass
+
+
+OrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length)
+
+ub_domain_name_length = univ.Integer(16)
+
+
+class AdministrationDomainName(univ.Choice):
+ pass
+
+
+AdministrationDomainName.tagSet = univ.Choice.tagSet.tagExplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 2))
+AdministrationDomainName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))),
+ namedtype.NamedType('printable', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length)))
+)
+
+
+class PrivateDomainName(univ.Choice):
+ pass
+
+
+PrivateDomainName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))),
+ namedtype.NamedType('printable', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length)))
+)
+
+ub_generation_qualifier_length = univ.Integer(3)
+
+ub_given_name_length = univ.Integer(16)
+
+ub_initials_length = univ.Integer(5)
+
+
+class PersonalName(univ.Set):
+ pass
+
+
+PersonalName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('surname', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('given-name', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('initials', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generation-qualifier', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+ub_terminal_id_length = univ.Integer(24)
+
+
+class TerminalIdentifier(char.PrintableString):
+ pass
+
+
+TerminalIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_terminal_id_length)
+
+ub_x121_address_length = univ.Integer(16)
+
+
+class X121Address(char.NumericString):
+ pass
+
+
+X121Address.subtypeSpec = constraint.ValueSizeConstraint(1, ub_x121_address_length)
+
+
+class NetworkAddress(X121Address):
+ pass
+
+
+class BuiltInStandardAttributes(univ.Sequence):
+ pass
+
+
+BuiltInStandardAttributes.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('country-name', CountryName()),
+ namedtype.OptionalNamedType('administration-domain-name', AdministrationDomainName()),
+ namedtype.OptionalNamedType('network-address', NetworkAddress().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('terminal-identifier', TerminalIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('private-domain-name', PrivateDomainName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('organization-name', OrganizationName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('numeric-user-identifier', NumericUserIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.OptionalNamedType('personal-name', PersonalName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.OptionalNamedType('organizational-unit-names', OrganizationalUnitNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6)))
+)
+
+ub_domain_defined_attributes = univ.Integer(4)
+
+
+class BuiltInDomainDefinedAttribute(univ.Sequence):
+ pass
+
+
+BuiltInDomainDefinedAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
+ namedtype.NamedType('value', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
+)
+
+
+class BuiltInDomainDefinedAttributes(univ.SequenceOf):
+ pass
+
+
+BuiltInDomainDefinedAttributes.componentType = BuiltInDomainDefinedAttribute()
+BuiltInDomainDefinedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
+
+ub_extension_attributes = univ.Integer(256)
+
+
+class ExtensionAttribute(univ.Sequence):
+ pass
+
+
+ExtensionAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extension-attribute-type', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, ub_extension_attributes)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('extension-attribute-value',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class ExtensionAttributes(univ.SetOf):
+ pass
+
+
+ExtensionAttributes.componentType = ExtensionAttribute()
+ExtensionAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_extension_attributes)
+
+
+class ORAddress(univ.Sequence):
+ pass
+
+
+ORAddress.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('built-in-standard-attributes', BuiltInStandardAttributes()),
+ namedtype.OptionalNamedType('built-in-domain-defined-attributes', BuiltInDomainDefinedAttributes()),
+ namedtype.OptionalNamedType('extension-attributes', ExtensionAttributes())
+)
+
+id_pe = _OID(id_pkix, 1)
+
+ub_title = univ.Integer(64)
+
+
+class X520Title(univ.Choice):
+ pass
+
+
+X520Title.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title)))
+)
+
+id_at_organizationalUnitName = _OID(id_at, 11)
+
+
+class EmailAddress(char.IA5String):
+ pass
+
+
+EmailAddress.subtypeSpec = constraint.ValueSizeConstraint(1, ub_emailaddress_length)
+
+physical_delivery_country_name = univ.Integer(8)
+
+id_at_givenName = _OID(id_at, 42)
+
+
+class TeletexCommonName(char.TeletexString):
+ pass
+
+
+TeletexCommonName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_common_name_length)
+
+id_qt_cps = _OID(id_qt, 1)
+
+
+class LocalPostalAttributes(PDSParameter):
+ pass
+
+
+class StreetAddress(PDSParameter):
+ pass
+
+
+id_kp = _OID(id_pkix, 3)
+
+
+class DirectoryString(univ.Choice):
+ pass
+
+
+DirectoryString.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class DomainComponent(char.IA5String):
+ pass
+
+
+id_at_initials = _OID(id_at, 43)
+
+id_qt_unotice = _OID(id_qt, 2)
+
+ub_pds_name_length = univ.Integer(16)
+
+
+class PDSName(char.PrintableString):
+ pass
+
+
+PDSName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_pds_name_length)
+
+
+class PosteRestanteAddress(PDSParameter):
+ pass
+
+
+class DistinguishedName(RDNSequence):
+ pass
+
+
+class CommonName(char.PrintableString):
+ pass
+
+
+CommonName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_common_name_length)
+
+ub_serial_number = univ.Integer(64)
+
+
+class X520SerialNumber(char.PrintableString):
+ pass
+
+
+X520SerialNumber.subtypeSpec = constraint.ValueSizeConstraint(1, ub_serial_number)
+
+id_at_generationQualifier = _OID(id_at, 44)
+
+ub_organizational_unit_name = univ.Integer(64)
+
+id_ad_ocsp = _OID(id_ad, 1)
+
+
+class TeletexOrganizationalUnitName(char.TeletexString):
+ pass
+
+
+TeletexOrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
+
+
+class TeletexPersonalName(univ.Set):
+ pass
+
+
+TeletexPersonalName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('surname', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('given-name', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('initials', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generation-qualifier', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+class TeletexDomainDefinedAttributes(univ.SequenceOf):
+ pass
+
+
+TeletexDomainDefinedAttributes.componentType = TeletexDomainDefinedAttribute()
+TeletexDomainDefinedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
+
+
+class TBSCertList(univ.Sequence):
+ pass
+
+
+TBSCertList.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('version', Version()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('thisUpdate', Time()),
+ namedtype.OptionalNamedType('nextUpdate', Time()),
+ namedtype.OptionalNamedType('revokedCertificates',
+ univ.SequenceOf(componentType=univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('userCertificate', CertificateSerialNumber()),
+ namedtype.NamedType('revocationDate', Time()),
+ namedtype.OptionalNamedType('crlEntryExtensions', Extensions())
+ ))
+ )),
+ namedtype.OptionalNamedType('crlExtensions',
+ Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+local_postal_attributes = univ.Integer(21)
+
+pkcs_9 = _OID(1, 2, 840, 113549, 1, 9)
+
+
+class PhysicalDeliveryCountryName(univ.Choice):
+ pass
+
+
+PhysicalDeliveryCountryName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))),
+ namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
+)
+
+ub_name = univ.Integer(32768)
+
+
+class X520name(univ.Choice):
+ pass
+
+
+X520name.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name)))
+)
+
+id_emailAddress = _OID(pkcs_9, 1)
+
+
+class TerminalType(univ.Integer):
+ pass
+
+
+TerminalType.namedValues = namedval.NamedValues(
+ ('telex', 3),
+ ('teletex', 4),
+ ('g3-facsimile', 5),
+ ('g4-facsimile', 6),
+ ('ia5-terminal', 7),
+ ('videotex', 8)
+)
+
+
+class X520OrganizationalUnitName(univ.Choice):
+ pass
+
+
+X520OrganizationalUnitName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name)))
+)
+
+id_at_commonName = _OID(id_at, 3)
+
+pds_name = univ.Integer(7)
+
+post_office_box_address = univ.Integer(18)
+
+ub_locality_name = univ.Integer(128)
+
+
+class X520LocalityName(univ.Choice):
+ pass
+
+
+X520LocalityName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name)))
+)
+
+id_ad_timeStamping = _OID(id_ad, 3)
+
+id_at_countryName = _OID(id_at, 6)
+
+physical_delivery_personal_name = univ.Integer(13)
+
+teletex_personal_name = univ.Integer(4)
+
+teletex_organizational_unit_names = univ.Integer(5)
+
+
+class PhysicalDeliveryPersonalName(PDSParameter):
+ pass
+
+
+ub_postal_code_length = univ.Integer(16)
+
+
+class PostalCode(univ.Choice):
+ pass
+
+
+PostalCode.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))),
+ namedtype.NamedType('printable-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length)))
+)
+
+
+class X520countryName(char.PrintableString):
+ pass
+
+
+X520countryName.subtypeSpec = constraint.ValueSizeConstraint(2, 2)
+
+postal_code = univ.Integer(9)
+
+id_ad_caRepository = _OID(id_ad, 5)
+
+extension_physical_delivery_address_components = univ.Integer(15)
+
+
+class PostOfficeBoxAddress(PDSParameter):
+ pass
+
+
+class PhysicalDeliveryOfficeName(PDSParameter):
+ pass
+
+
+id_at_title = _OID(id_at, 12)
+
+id_at_serialNumber = _OID(id_at, 5)
+
+id_ad_caIssuers = _OID(id_ad, 2)
+
+ub_integer_options = univ.Integer(256)
+
+
+class CertificateList(univ.Sequence):
+ pass
+
+
+CertificateList.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertList', TBSCertList()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class PhysicalDeliveryOfficeNumber(PDSParameter):
+ pass
+
+
+class TeletexOrganizationalUnitNames(univ.SequenceOf):
+ pass
+
+
+TeletexOrganizationalUnitNames.componentType = TeletexOrganizationalUnitName()
+TeletexOrganizationalUnitNames.sizeSpec = constraint.ValueSizeConstraint(1, ub_organizational_units)
+
+physical_delivery_office_name = univ.Integer(10)
+
+ub_common_name = univ.Integer(64)
+
+
+class ExtensionORAddressComponents(PDSParameter):
+ pass
+
+
+ub_pseudonym = univ.Integer(128)
+
+poste_restante_address = univ.Integer(19)
+
+id_at_organizationName = _OID(id_at, 10)
+
+physical_delivery_office_number = univ.Integer(11)
+
+id_at_pseudonym = _OID(id_at, 65)
+
+
+class X520CommonName(univ.Choice):
+ pass
+
+
+X520CommonName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name)))
+)
+
+physical_delivery_organization_name = univ.Integer(14)
+
+
+class X520dnQualifier(char.PrintableString):
+ pass
+
+
+id_at_stateOrProvinceName = _OID(id_at, 8)
+
+common_name = univ.Integer(1)
+
+id_at_localityName = _OID(id_at, 7)
+
+ub_match = univ.Integer(128)
+
+ub_unformatted_address_length = univ.Integer(180)
+
+
+class Attribute(univ.Sequence):
+ pass
+
+
+Attribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('values', univ.SetOf(componentType=AttributeValue()))
+)
+
+extended_network_address = univ.Integer(22)
+
+unique_postal_name = univ.Integer(20)
+
+ub_pds_physical_address_lines = univ.Integer(6)
+
+
+class UnformattedPostalAddress(univ.Set):
+ pass
+
+
+UnformattedPostalAddress.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('printable-address', univ.SequenceOf(componentType=char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))),
+ namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_unformatted_address_length)))
+)
+
+
+class UniquePostalName(PDSParameter):
+ pass
+
+
+class X520Pseudonym(univ.Choice):
+ pass
+
+
+X520Pseudonym.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym)))
+)
+
+teletex_organization_name = univ.Integer(3)
+
+teletex_domain_defined_attributes = univ.Integer(6)
+
+street_address = univ.Integer(17)
+
+id_kp_OCSPSigning = _OID(id_kp, 9)
+
+id_ce = _OID(2, 5, 29)
+
+id_ce_certificatePolicies = _OID(id_ce, 32)
+
+
+class EDIPartyName(univ.Sequence):
+ pass
+
+
+EDIPartyName.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('nameAssigner', DirectoryString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('partyName',
+ DirectoryString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class AnotherName(univ.Sequence):
+ pass
+
+
+AnotherName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type-id', univ.ObjectIdentifier()),
+ namedtype.NamedType('value', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class GeneralName(univ.Choice):
+ pass
+
+
+GeneralName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherName',
+ AnotherName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('rfc822Name',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('dNSName',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('x400Address',
+ ORAddress().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('directoryName',
+ Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('ediPartyName',
+ EDIPartyName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.NamedType('uniformResourceIdentifier',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.NamedType('iPAddress',
+ univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)))
+)
+
+
+class GeneralNames(univ.SequenceOf):
+ pass
+
+
+GeneralNames.componentType = GeneralName()
+GeneralNames.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class IssuerAltName(GeneralNames):
+ pass
+
+
+id_ce_cRLDistributionPoints = _OID(id_ce, 31)
+
+
+class CertPolicyId(univ.ObjectIdentifier):
+ pass
+
+
+class PolicyMappings(univ.SequenceOf):
+ pass
+
+
+PolicyMappings.componentType = univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('issuerDomainPolicy', CertPolicyId()),
+ namedtype.NamedType('subjectDomainPolicy', CertPolicyId())
+))
+
+PolicyMappings.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class PolicyQualifierId(univ.ObjectIdentifier):
+ pass
+
+
+holdInstruction = _OID(2, 2, 840, 10040, 2)
+
+id_ce_subjectDirectoryAttributes = _OID(id_ce, 9)
+
+id_holdinstruction_callissuer = _OID(holdInstruction, 2)
+
+
+class SubjectDirectoryAttributes(univ.SequenceOf):
+ pass
+
+
+SubjectDirectoryAttributes.componentType = Attribute()
+SubjectDirectoryAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+anyPolicy = _OID(id_ce_certificatePolicies, 0)
+
+id_ce_subjectAltName = _OID(id_ce, 17)
+
+id_kp_emailProtection = _OID(id_kp, 4)
+
+
+class ReasonFlags(univ.BitString):
+ pass
+
+
+ReasonFlags.namedValues = namedval.NamedValues(
+ ('unused', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6),
+ ('privilegeWithdrawn', 7),
+ ('aACompromise', 8)
+)
+
+
+class DistributionPointName(univ.Choice):
+ pass
+
+
+DistributionPointName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('fullName',
+ GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('nameRelativeToCRLIssuer', RelativeDistinguishedName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class DistributionPoint(univ.Sequence):
+ pass
+
+
+DistributionPoint.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('reasons', ReasonFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('cRLIssuer', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+id_ce_keyUsage = _OID(id_ce, 15)
+
+
+class PolicyQualifierInfo(univ.Sequence):
+ pass
+
+
+PolicyQualifierInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyQualifierId', PolicyQualifierId()),
+ namedtype.NamedType('qualifier', univ.Any())
+)
+
+
+class PolicyInformation(univ.Sequence):
+ pass
+
+
+PolicyInformation.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyIdentifier', CertPolicyId()),
+ namedtype.OptionalNamedType('policyQualifiers', univ.SequenceOf(componentType=PolicyQualifierInfo()))
+)
+
+
+class CertificatePolicies(univ.SequenceOf):
+ pass
+
+
+CertificatePolicies.componentType = PolicyInformation()
+CertificatePolicies.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_ce_basicConstraints = _OID(id_ce, 19)
+
+
+class HoldInstructionCode(univ.ObjectIdentifier):
+ pass
+
+
+class KeyPurposeId(univ.ObjectIdentifier):
+ pass
+
+
+class ExtKeyUsageSyntax(univ.SequenceOf):
+ pass
+
+
+ExtKeyUsageSyntax.componentType = KeyPurposeId()
+ExtKeyUsageSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class SubjectAltName(GeneralNames):
+ pass
+
+
+class BasicConstraints(univ.Sequence):
+ pass
+
+
+BasicConstraints.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('cA', univ.Boolean().subtype(value=0)),
+ namedtype.OptionalNamedType('pathLenConstraint',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
+)
+
+
+class SkipCerts(univ.Integer):
+ pass
+
+
+SkipCerts.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class InhibitAnyPolicy(SkipCerts):
+ pass
+
+
+class CRLNumber(univ.Integer):
+ pass
+
+
+CRLNumber.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class BaseCRLNumber(CRLNumber):
+ pass
+
+
+class KeyIdentifier(univ.OctetString):
+ pass
+
+
+class AuthorityKeyIdentifier(univ.Sequence):
+ pass
+
+
+AuthorityKeyIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('keyIdentifier', KeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('authorityCertIssuer', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('authorityCertSerialNumber', CertificateSerialNumber().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+id_ce_nameConstraints = _OID(id_ce, 30)
+
+id_kp_serverAuth = _OID(id_kp, 1)
+
+id_ce_freshestCRL = _OID(id_ce, 46)
+
+id_ce_cRLReasons = _OID(id_ce, 21)
+
+
+class CRLDistributionPoints(univ.SequenceOf):
+ pass
+
+
+CRLDistributionPoints.componentType = DistributionPoint()
+CRLDistributionPoints.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class FreshestCRL(CRLDistributionPoints):
+ pass
+
+
+id_ce_inhibitAnyPolicy = _OID(id_ce, 54)
+
+
+class CRLReason(univ.Enumerated):
+ pass
+
+
+CRLReason.namedValues = namedval.NamedValues(
+ ('unspecified', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6),
+ ('removeFromCRL', 8),
+ ('privilegeWithdrawn', 9),
+ ('aACompromise', 10)
+)
+
+
+class BaseDistance(univ.Integer):
+ pass
+
+
+BaseDistance.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class GeneralSubtree(univ.Sequence):
+ pass
+
+
+GeneralSubtree.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('base', GeneralName()),
+ namedtype.DefaultedNamedType('minimum', BaseDistance().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(value=0)),
+ namedtype.OptionalNamedType('maximum', BaseDistance().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class GeneralSubtrees(univ.SequenceOf):
+ pass
+
+
+GeneralSubtrees.componentType = GeneralSubtree()
+GeneralSubtrees.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class NameConstraints(univ.Sequence):
+ pass
+
+
+NameConstraints.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('permittedSubtrees', GeneralSubtrees().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('excludedSubtrees', GeneralSubtrees().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+id_pe_authorityInfoAccess = _OID(id_pe, 1)
+
+id_pe_subjectInfoAccess = _OID(id_pe, 11)
+
+id_ce_certificateIssuer = _OID(id_ce, 29)
+
+id_ce_invalidityDate = _OID(id_ce, 24)
+
+
+class DirectoryString(univ.Choice):
+ pass
+
+
+DirectoryString.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('any', univ.Any())
+)
+
+id_ce_authorityKeyIdentifier = _OID(id_ce, 35)
+
+
+class AccessDescription(univ.Sequence):
+ pass
+
+
+AccessDescription.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('accessMethod', univ.ObjectIdentifier()),
+ namedtype.NamedType('accessLocation', GeneralName())
+)
+
+
+class AuthorityInfoAccessSyntax(univ.SequenceOf):
+ pass
+
+
+AuthorityInfoAccessSyntax.componentType = AccessDescription()
+AuthorityInfoAccessSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_ce_issuingDistributionPoint = _OID(id_ce, 28)
+
+
+class CPSuri(char.IA5String):
+ pass
+
+
+class DisplayText(univ.Choice):
+ pass
+
+
+DisplayText.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ia5String', char.IA5String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('visibleString',
+ char.VisibleString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200)))
+)
+
+
+class NoticeReference(univ.Sequence):
+ pass
+
+
+NoticeReference.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('organization', DisplayText()),
+ namedtype.NamedType('noticeNumbers', univ.SequenceOf(componentType=univ.Integer()))
+)
+
+
+class UserNotice(univ.Sequence):
+ pass
+
+
+UserNotice.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('noticeRef', NoticeReference()),
+ namedtype.OptionalNamedType('explicitText', DisplayText())
+)
+
+
+class PrivateKeyUsagePeriod(univ.Sequence):
+ pass
+
+
+PrivateKeyUsagePeriod.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('notBefore', useful.GeneralizedTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+id_ce_subjectKeyIdentifier = _OID(id_ce, 14)
+
+
+class CertificateIssuer(GeneralNames):
+ pass
+
+
+class InvalidityDate(useful.GeneralizedTime):
+ pass
+
+
+class SubjectInfoAccessSyntax(univ.SequenceOf):
+ pass
+
+
+SubjectInfoAccessSyntax.componentType = AccessDescription()
+SubjectInfoAccessSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class KeyUsage(univ.BitString):
+ pass
+
+
+KeyUsage.namedValues = namedval.NamedValues(
+ ('digitalSignature', 0),
+ ('nonRepudiation', 1),
+ ('keyEncipherment', 2),
+ ('dataEncipherment', 3),
+ ('keyAgreement', 4),
+ ('keyCertSign', 5),
+ ('cRLSign', 6),
+ ('encipherOnly', 7),
+ ('decipherOnly', 8)
+)
+
+id_ce_extKeyUsage = _OID(id_ce, 37)
+
+anyExtendedKeyUsage = _OID(id_ce_extKeyUsage, 0)
+
+id_ce_privateKeyUsagePeriod = _OID(id_ce, 16)
+
+id_ce_policyMappings = _OID(id_ce, 33)
+
+id_ce_cRLNumber = _OID(id_ce, 20)
+
+id_ce_policyConstraints = _OID(id_ce, 36)
+
+id_holdinstruction_none = _OID(holdInstruction, 1)
+
+id_holdinstruction_reject = _OID(holdInstruction, 3)
+
+id_kp_timeStamping = _OID(id_kp, 8)
+
+
+class PolicyConstraints(univ.Sequence):
+ pass
+
+
+PolicyConstraints.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('requireExplicitPolicy',
+ SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('inhibitPolicyMapping',
+ SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class SubjectKeyIdentifier(KeyIdentifier):
+ pass
+
+
+id_kp_clientAuth = _OID(id_kp, 2)
+
+id_ce_deltaCRLIndicator = _OID(id_ce, 27)
+
+id_ce_issuerAltName = _OID(id_ce, 18)
+
+id_kp_codeSigning = _OID(id_kp, 3)
+
+id_ce_holdInstructionCode = _OID(id_ce, 23)
+
+
+class IssuingDistributionPoint(univ.Sequence):
+ pass
+
+
+IssuingDistributionPoint.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.DefaultedNamedType('onlyContainsUserCerts', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)).subtype(value=0)),
+ namedtype.DefaultedNamedType('onlyContainsCACerts', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)).subtype(value=0)),
+ namedtype.OptionalNamedType('onlySomeReasons', ReasonFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.DefaultedNamedType('indirectCRL', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)).subtype(value=0)),
+ namedtype.DefaultedNamedType('onlyContainsAttributeCerts', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5)).subtype(value=0))
+)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc3281.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3281.py
new file mode 100644
index 0000000000..39ce82427c
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3281.py
@@ -0,0 +1,331 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# An Internet Attribute Certificate Profile for Authorization
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc3281.txt
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc3280
+
+MAX = float('inf')
+
+
+def _buildOid(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+class ObjectDigestInfo(univ.Sequence):
+ pass
+
+
+ObjectDigestInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('digestedObjectType', univ.Enumerated(
+ namedValues=namedval.NamedValues(('publicKey', 0), ('publicKeyCert', 1), ('otherObjectTypes', 2)))),
+ namedtype.OptionalNamedType('otherObjectTypeID', univ.ObjectIdentifier()),
+ namedtype.NamedType('digestAlgorithm', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('objectDigest', univ.BitString())
+)
+
+
+class IssuerSerial(univ.Sequence):
+ pass
+
+
+IssuerSerial.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', rfc3280.GeneralNames()),
+ namedtype.NamedType('serial', rfc3280.CertificateSerialNumber()),
+ namedtype.OptionalNamedType('issuerUID', rfc3280.UniqueIdentifier())
+)
+
+
+class TargetCert(univ.Sequence):
+ pass
+
+
+TargetCert.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('targetCertificate', IssuerSerial()),
+ namedtype.OptionalNamedType('targetName', rfc3280.GeneralName()),
+ namedtype.OptionalNamedType('certDigestInfo', ObjectDigestInfo())
+)
+
+
+class Target(univ.Choice):
+ pass
+
+
+Target.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('targetName', rfc3280.GeneralName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('targetGroup', rfc3280.GeneralName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('targetCert',
+ TargetCert().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+
+class Targets(univ.SequenceOf):
+ pass
+
+
+Targets.componentType = Target()
+
+
+class ProxyInfo(univ.SequenceOf):
+ pass
+
+
+ProxyInfo.componentType = Targets()
+
+id_at_role = _buildOid(rfc3280.id_at, 72)
+
+id_pe_aaControls = _buildOid(rfc3280.id_pe, 6)
+
+id_ce_targetInformation = _buildOid(rfc3280.id_ce, 55)
+
+id_pe_ac_auditIdentity = _buildOid(rfc3280.id_pe, 4)
+
+
+class ClassList(univ.BitString):
+ pass
+
+
+ClassList.namedValues = namedval.NamedValues(
+ ('unmarked', 0),
+ ('unclassified', 1),
+ ('restricted', 2),
+ ('confidential', 3),
+ ('secret', 4),
+ ('topSecret', 5)
+)
+
+
+class SecurityCategory(univ.Sequence):
+ pass
+
+
+SecurityCategory.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('value', univ.Any().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class Clearance(univ.Sequence):
+ pass
+
+
+Clearance.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyId', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.DefaultedNamedType('classList',
+ ClassList().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)).subtype(
+ value="unclassified")),
+ namedtype.OptionalNamedType('securityCategories', univ.SetOf(componentType=SecurityCategory()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class AttCertVersion(univ.Integer):
+ pass
+
+
+AttCertVersion.namedValues = namedval.NamedValues(
+ ('v2', 1)
+)
+
+id_aca = _buildOid(rfc3280.id_pkix, 10)
+
+id_at_clearance = _buildOid(2, 5, 1, 5, 55)
+
+
+class AttrSpec(univ.SequenceOf):
+ pass
+
+
+AttrSpec.componentType = univ.ObjectIdentifier()
+
+
+class AAControls(univ.Sequence):
+ pass
+
+
+AAControls.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pathLenConstraint',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
+ namedtype.OptionalNamedType('permittedAttrs',
+ AttrSpec().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('excludedAttrs',
+ AttrSpec().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.DefaultedNamedType('permitUnSpecified', univ.Boolean().subtype(value=1))
+)
+
+
+class AttCertValidityPeriod(univ.Sequence):
+ pass
+
+
+AttCertValidityPeriod.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('notBeforeTime', useful.GeneralizedTime()),
+ namedtype.NamedType('notAfterTime', useful.GeneralizedTime())
+)
+
+
+id_aca_authenticationInfo = _buildOid(id_aca, 1)
+
+
+class V2Form(univ.Sequence):
+ pass
+
+
+V2Form.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('issuerName', rfc3280.GeneralNames()),
+ namedtype.OptionalNamedType('baseCertificateID', IssuerSerial().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('objectDigestInfo', ObjectDigestInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class AttCertIssuer(univ.Choice):
+ pass
+
+
+AttCertIssuer.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('v1Form', rfc3280.GeneralNames()),
+ namedtype.NamedType('v2Form',
+ V2Form().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class Holder(univ.Sequence):
+ pass
+
+
+Holder.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('baseCertificateID', IssuerSerial().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('entityName', rfc3280.GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('objectDigestInfo', ObjectDigestInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+
+class AttributeCertificateInfo(univ.Sequence):
+ pass
+
+
+AttributeCertificateInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', AttCertVersion()),
+ namedtype.NamedType('holder', Holder()),
+ namedtype.NamedType('issuer', AttCertIssuer()),
+ namedtype.NamedType('signature', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('serialNumber', rfc3280.CertificateSerialNumber()),
+ namedtype.NamedType('attrCertValidityPeriod', AttCertValidityPeriod()),
+ namedtype.NamedType('attributes', univ.SequenceOf(componentType=rfc3280.Attribute())),
+ namedtype.OptionalNamedType('issuerUniqueID', rfc3280.UniqueIdentifier()),
+ namedtype.OptionalNamedType('extensions', rfc3280.Extensions())
+)
+
+
+class AttributeCertificate(univ.Sequence):
+ pass
+
+
+AttributeCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('acinfo', AttributeCertificateInfo()),
+ namedtype.NamedType('signatureAlgorithm', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('signatureValue', univ.BitString())
+)
+
+id_mod = _buildOid(rfc3280.id_pkix, 0)
+
+id_mod_attribute_cert = _buildOid(id_mod, 12)
+
+id_aca_accessIdentity = _buildOid(id_aca, 2)
+
+
+class RoleSyntax(univ.Sequence):
+ pass
+
+
+RoleSyntax.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('roleAuthority', rfc3280.GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('roleName',
+ rfc3280.GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+id_aca_chargingIdentity = _buildOid(id_aca, 3)
+
+
+class ACClearAttrs(univ.Sequence):
+ pass
+
+
+ACClearAttrs.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('acIssuer', rfc3280.GeneralName()),
+ namedtype.NamedType('acSerial', univ.Integer()),
+ namedtype.NamedType('attrs', univ.SequenceOf(componentType=rfc3280.Attribute()))
+)
+
+id_aca_group = _buildOid(id_aca, 4)
+
+id_pe_ac_proxying = _buildOid(rfc3280.id_pe, 10)
+
+
+class SvceAuthInfo(univ.Sequence):
+ pass
+
+
+SvceAuthInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('service', rfc3280.GeneralName()),
+ namedtype.NamedType('ident', rfc3280.GeneralName()),
+ namedtype.OptionalNamedType('authInfo', univ.OctetString())
+)
+
+
+class IetfAttrSyntax(univ.Sequence):
+ pass
+
+
+IetfAttrSyntax.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType(
+ 'policyAuthority', rfc3280.GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
+ ),
+ namedtype.NamedType(
+ 'values', univ.SequenceOf(
+ componentType=univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('octets', univ.OctetString()),
+ namedtype.NamedType('oid', univ.ObjectIdentifier()),
+ namedtype.NamedType('string', char.UTF8String())
+ )
+ )
+ )
+ )
+)
+
+id_aca_encAttrs = _buildOid(id_aca, 6)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc3412.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3412.py
new file mode 100644
index 0000000000..59f84959d0
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3412.py
@@ -0,0 +1,53 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv3 message syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc3412.txt
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc1905
+
+
+class ScopedPDU(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contextEngineId', univ.OctetString()),
+ namedtype.NamedType('contextName', univ.OctetString()),
+ namedtype.NamedType('data', rfc1905.PDUs())
+ )
+
+
+class ScopedPduData(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('plaintext', ScopedPDU()),
+ namedtype.NamedType('encryptedPDU', univ.OctetString()),
+ )
+
+
+class HeaderData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('msgID',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
+ namedtype.NamedType('msgMaxSize',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(484, 2147483647))),
+ namedtype.NamedType('msgFlags', univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 1))),
+ namedtype.NamedType('msgSecurityModel',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, 2147483647)))
+ )
+
+
+class SNMPv3Message(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('msgVersion',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
+ namedtype.NamedType('msgGlobalData', HeaderData()),
+ namedtype.NamedType('msgSecurityParameters', univ.OctetString()),
+ namedtype.NamedType('msgData', ScopedPduData())
+ )
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc3414.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3414.py
new file mode 100644
index 0000000000..b9087cb579
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3414.py
@@ -0,0 +1,28 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# SNMPv3 message syntax
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc3414.txt
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+
+class UsmSecurityParameters(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('msgAuthoritativeEngineID', univ.OctetString()),
+ namedtype.NamedType('msgAuthoritativeEngineBoots',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
+ namedtype.NamedType('msgAuthoritativeEngineTime',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, 2147483647))),
+ namedtype.NamedType('msgUserName',
+ univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(0, 32))),
+ namedtype.NamedType('msgAuthenticationParameters', univ.OctetString()),
+ namedtype.NamedType('msgPrivacyParameters', univ.OctetString())
+ )
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc3447.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3447.py
new file mode 100644
index 0000000000..c3621a0c25
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3447.py
@@ -0,0 +1,45 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS#1 syntax
+#
+# ASN.1 source from:
+# ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-1/pkcs-1v2-1.asn
+#
+# Sample captures could be obtained with "openssl genrsa" command
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedval
+
+from pyasn1_modules.rfc2437 import *
+
+
+class OtherPrimeInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('prime', univ.Integer()),
+ namedtype.NamedType('exponent', univ.Integer()),
+ namedtype.NamedType('coefficient', univ.Integer())
+ )
+
+
+class OtherPrimeInfos(univ.SequenceOf):
+ componentType = OtherPrimeInfo()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class RSAPrivateKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer(namedValues=namedval.NamedValues(('two-prime', 0), ('multi', 1)))),
+ namedtype.NamedType('modulus', univ.Integer()),
+ namedtype.NamedType('publicExponent', univ.Integer()),
+ namedtype.NamedType('privateExponent', univ.Integer()),
+ namedtype.NamedType('prime1', univ.Integer()),
+ namedtype.NamedType('prime2', univ.Integer()),
+ namedtype.NamedType('exponent1', univ.Integer()),
+ namedtype.NamedType('exponent2', univ.Integer()),
+ namedtype.NamedType('coefficient', univ.Integer()),
+ namedtype.OptionalNamedType('otherPrimeInfos', OtherPrimeInfos())
+ )
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc3560.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3560.py
new file mode 100644
index 0000000000..8365436df5
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3560.py
@@ -0,0 +1,74 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# RSAES-OAEP Key Transport Algorithm in CMS
+#
+# Notice that all of the things needed in RFC 3560 are also defined
+# in RFC 4055. So, they are all pulled from the RFC 4055 module into
+# this one so that people looking a RFC 3560 can easily find them.
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3560.txt
+#
+
+from pyasn1_modules import rfc4055
+
+id_sha1 = rfc4055.id_sha1
+
+id_sha256 = rfc4055.id_sha256
+
+id_sha384 = rfc4055.id_sha384
+
+id_sha512 = rfc4055.id_sha512
+
+id_mgf1 = rfc4055.id_mgf1
+
+rsaEncryption = rfc4055.rsaEncryption
+
+id_RSAES_OAEP = rfc4055.id_RSAES_OAEP
+
+id_pSpecified = rfc4055.id_pSpecified
+
+sha1Identifier = rfc4055.sha1Identifier
+
+sha256Identifier = rfc4055.sha256Identifier
+
+sha384Identifier = rfc4055.sha384Identifier
+
+sha512Identifier = rfc4055.sha512Identifier
+
+mgf1SHA1Identifier = rfc4055.mgf1SHA1Identifier
+
+mgf1SHA256Identifier = rfc4055.mgf1SHA256Identifier
+
+mgf1SHA384Identifier = rfc4055.mgf1SHA384Identifier
+
+mgf1SHA512Identifier = rfc4055.mgf1SHA512Identifier
+
+pSpecifiedEmptyIdentifier = rfc4055.pSpecifiedEmptyIdentifier
+
+
+class RSAES_OAEP_params(rfc4055.RSAES_OAEP_params):
+ pass
+
+
+rSAES_OAEP_Default_Params = RSAES_OAEP_params()
+
+rSAES_OAEP_Default_Identifier = rfc4055.rSAES_OAEP_Default_Identifier
+
+rSAES_OAEP_SHA256_Params = rfc4055.rSAES_OAEP_SHA256_Params
+
+rSAES_OAEP_SHA256_Identifier = rfc4055.rSAES_OAEP_SHA256_Identifier
+
+rSAES_OAEP_SHA384_Params = rfc4055.rSAES_OAEP_SHA384_Params
+
+rSAES_OAEP_SHA384_Identifier = rfc4055.rSAES_OAEP_SHA384_Identifier
+
+rSAES_OAEP_SHA512_Params = rfc4055.rSAES_OAEP_SHA512_Params
+
+rSAES_OAEP_SHA512_Identifier = rfc4055.rSAES_OAEP_SHA512_Identifier
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc3565.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3565.py
new file mode 100644
index 0000000000..ec75e23489
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3565.py
@@ -0,0 +1,57 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Use of the Advanced Encryption Standard (AES) Encryption
+# Algorithm in the Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3565.txt
+
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+class AlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class AES_IV(univ.OctetString):
+ pass
+
+AES_IV.subtypeSpec = constraint.ValueSizeConstraint(16, 16)
+
+
+id_aes128_CBC = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.2')
+
+id_aes192_CBC = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.22')
+
+id_aes256_CBC = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.42')
+
+
+id_aes128_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.5')
+
+id_aes192_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.25')
+
+id_aes256_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.45')
+
+
+# Update the Algorithm Identifier map
+
+_algorithmIdentifierMapUpdate = {
+ id_aes128_CBC: AES_IV(),
+ id_aes192_CBC: AES_IV(),
+ id_aes256_CBC: AES_IV(),
+ id_aes128_wrap: univ.Null(),
+ id_aes192_wrap: univ.Null(),
+ id_aes256_wrap: univ.Null(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc3709.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3709.py
new file mode 100644
index 0000000000..aa1d5b6abf
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3709.py
@@ -0,0 +1,207 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Logotypes in X.509 Certificates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3709.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc6170
+
+MAX = float('inf')
+
+
+class HashAlgAndValue(univ.Sequence):
+ pass
+
+HashAlgAndValue.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlg', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('hashValue', univ.OctetString())
+)
+
+
+class LogotypeDetails(univ.Sequence):
+ pass
+
+LogotypeDetails.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('mediaType', char.IA5String()),
+ namedtype.NamedType('logotypeHash', univ.SequenceOf(
+ componentType=HashAlgAndValue()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('logotypeURI', univ.SequenceOf(
+ componentType=char.IA5String()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class LogotypeAudioInfo(univ.Sequence):
+ pass
+
+LogotypeAudioInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('fileSize', univ.Integer()),
+ namedtype.NamedType('playTime', univ.Integer()),
+ namedtype.NamedType('channels', univ.Integer()),
+ namedtype.OptionalNamedType('sampleRate', univ.Integer().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('language', char.IA5String().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+)
+
+
+class LogotypeAudio(univ.Sequence):
+ pass
+
+LogotypeAudio.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('audioDetails', LogotypeDetails()),
+ namedtype.OptionalNamedType('audioInfo', LogotypeAudioInfo())
+)
+
+
+class LogotypeImageType(univ.Integer):
+ pass
+
+LogotypeImageType.namedValues = namedval.NamedValues(
+ ('grayScale', 0),
+ ('color', 1)
+)
+
+
+class LogotypeImageResolution(univ.Choice):
+ pass
+
+LogotypeImageResolution.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numBits',
+ univ.Integer().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('tableSize',
+ univ.Integer().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class LogotypeImageInfo(univ.Sequence):
+ pass
+
+LogotypeImageInfo.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('type', LogotypeImageType().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='color')),
+ namedtype.NamedType('fileSize', univ.Integer()),
+ namedtype.NamedType('xSize', univ.Integer()),
+ namedtype.NamedType('ySize', univ.Integer()),
+ namedtype.OptionalNamedType('resolution', LogotypeImageResolution()),
+ namedtype.OptionalNamedType('language', char.IA5String().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+)
+
+
+class LogotypeImage(univ.Sequence):
+ pass
+
+LogotypeImage.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('imageDetails', LogotypeDetails()),
+ namedtype.OptionalNamedType('imageInfo', LogotypeImageInfo())
+)
+
+
+class LogotypeData(univ.Sequence):
+ pass
+
+LogotypeData.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('image', univ.SequenceOf(
+ componentType=LogotypeImage())),
+ namedtype.OptionalNamedType('audio', univ.SequenceOf(
+ componentType=LogotypeAudio()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)))
+)
+
+
+class LogotypeReference(univ.Sequence):
+ pass
+
+LogotypeReference.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('refStructHash', univ.SequenceOf(
+ componentType=HashAlgAndValue()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('refStructURI', univ.SequenceOf(
+ componentType=char.IA5String()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class LogotypeInfo(univ.Choice):
+ pass
+
+LogotypeInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('direct',
+ LogotypeData().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('indirect', LogotypeReference().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 1)))
+)
+
+# Other logotype type and associated object identifiers
+
+id_logo_background = univ.ObjectIdentifier('1.3.6.1.5.5.7.20.2')
+
+id_logo_loyalty = univ.ObjectIdentifier('1.3.6.1.5.5.7.20.1')
+
+id_logo_certImage = rfc6170.id_logo_certImage
+
+
+class OtherLogotypeInfo(univ.Sequence):
+ pass
+
+OtherLogotypeInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('logotypeType', univ.ObjectIdentifier()),
+ namedtype.NamedType('info', LogotypeInfo())
+)
+
+
+# Logotype Certificate Extension
+
+id_pe_logotype = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.12')
+
+
+class LogotypeExtn(univ.Sequence):
+ pass
+
+LogotypeExtn.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('communityLogos', univ.SequenceOf(
+ componentType=LogotypeInfo()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('issuerLogo', LogotypeInfo().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('subjectLogo', LogotypeInfo().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('otherLogos', univ.SequenceOf(
+ componentType=OtherLogotypeInfo()).subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_logotype: LogotypeExtn(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc3770.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3770.py
new file mode 100644
index 0000000000..3fefe1d90e
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3770.py
@@ -0,0 +1,75 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Extensions and Attributes Supporting Authentication
+# in PPP and Wireless LAN Networks
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3770.txt
+# https://www.rfc-editor.org/errata/eid234
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+MAX = float('inf')
+
+
+# Extended Key Usage Values
+
+id_kp_eapOverLAN = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.14')
+
+id_kp_eapOverPPP = univ.ObjectIdentifier('1.3.6.1.5.5.7.3.13')
+
+
+# Wireless LAN SSID Extension
+
+id_pe_wlanSSID = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.13')
+
+
+class SSID(univ.OctetString):
+ pass
+
+SSID.subtypeSpec = constraint.ValueSizeConstraint(1, 32)
+
+
+class SSIDList(univ.SequenceOf):
+ pass
+
+SSIDList.componentType = SSID()
+SSIDList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Wireless LAN SSID Attribute Certificate Attribute
+# Uses same syntax as the certificate extension: SSIDList
+# Correction for https://www.rfc-editor.org/errata/eid234
+
+id_aca_wlanSSID = univ.ObjectIdentifier('1.3.6.1.5.5.7.10.7')
+
+
+# Map of Certificate Extension OIDs to Extensions
+# To be added to the ones that are in rfc5280.py
+
+_certificateExtensionsMap = {
+ id_pe_wlanSSID: SSIDList(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMap)
+
+
+# Map of AttributeType OIDs to AttributeValue added to the
+# ones that are in rfc5280.py
+
+_certificateAttributesMapUpdate = {
+ id_aca_wlanSSID: SSIDList(),
+}
+
+rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc3779.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3779.py
new file mode 100644
index 0000000000..8e6eaa3e7b
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3779.py
@@ -0,0 +1,137 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# X.509 Extensions for IP Addresses and AS Identifiers
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc3779.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# IP Address Delegation Extension
+
+id_pe_ipAddrBlocks = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.7')
+
+
+class IPAddress(univ.BitString):
+ pass
+
+
+class IPAddressRange(univ.Sequence):
+ pass
+
+IPAddressRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('min', IPAddress()),
+ namedtype.NamedType('max', IPAddress())
+)
+
+
+class IPAddressOrRange(univ.Choice):
+ pass
+
+IPAddressOrRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('addressPrefix', IPAddress()),
+ namedtype.NamedType('addressRange', IPAddressRange())
+)
+
+
+class IPAddressChoice(univ.Choice):
+ pass
+
+IPAddressChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('inherit', univ.Null()),
+ namedtype.NamedType('addressesOrRanges', univ.SequenceOf(
+ componentType=IPAddressOrRange())
+ )
+)
+
+
+class IPAddressFamily(univ.Sequence):
+ pass
+
+IPAddressFamily.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('addressFamily', univ.OctetString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(2, 3))),
+ namedtype.NamedType('ipAddressChoice', IPAddressChoice())
+)
+
+
+class IPAddrBlocks(univ.SequenceOf):
+ pass
+
+IPAddrBlocks.componentType = IPAddressFamily()
+
+
+# Autonomous System Identifier Delegation Extension
+
+id_pe_autonomousSysIds = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.8')
+
+
+class ASId(univ.Integer):
+ pass
+
+
+class ASRange(univ.Sequence):
+ pass
+
+ASRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('min', ASId()),
+ namedtype.NamedType('max', ASId())
+)
+
+
+class ASIdOrRange(univ.Choice):
+ pass
+
+ASIdOrRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('id', ASId()),
+ namedtype.NamedType('range', ASRange())
+)
+
+
+class ASIdentifierChoice(univ.Choice):
+ pass
+
+ASIdentifierChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('inherit', univ.Null()),
+ namedtype.NamedType('asIdsOrRanges', univ.SequenceOf(
+ componentType=ASIdOrRange())
+ )
+)
+
+
+class ASIdentifiers(univ.Sequence):
+ pass
+
+ASIdentifiers.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('asnum', ASIdentifierChoice().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('rdi', ASIdentifierChoice().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 1)))
+)
+
+
+# Map of Certificate Extension OIDs to Extensions is added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_ipAddrBlocks: IPAddrBlocks(),
+ id_pe_autonomousSysIds: ASIdentifiers(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc3852.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3852.py
new file mode 100644
index 0000000000..d294c5b722
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc3852.py
@@ -0,0 +1,706 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc3852.txt
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc3280
+from pyasn1_modules import rfc3281
+
+MAX = float('inf')
+
+
+def _buildOid(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class Attribute(univ.Sequence):
+ pass
+
+
+Attribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', univ.ObjectIdentifier()),
+ namedtype.NamedType('attrValues', univ.SetOf(componentType=AttributeValue()))
+)
+
+
+class SignedAttributes(univ.SetOf):
+ pass
+
+
+SignedAttributes.componentType = Attribute()
+SignedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class OtherRevocationInfoFormat(univ.Sequence):
+ pass
+
+
+OtherRevocationInfoFormat.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherRevInfoFormat', univ.ObjectIdentifier()),
+ namedtype.NamedType('otherRevInfo', univ.Any())
+)
+
+
+class RevocationInfoChoice(univ.Choice):
+ pass
+
+
+RevocationInfoChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('crl', rfc3280.CertificateList()),
+ namedtype.NamedType('other', OtherRevocationInfoFormat().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class RevocationInfoChoices(univ.SetOf):
+ pass
+
+
+RevocationInfoChoices.componentType = RevocationInfoChoice()
+
+
+class OtherKeyAttribute(univ.Sequence):
+ pass
+
+
+OtherKeyAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyAttrId', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('keyAttr', univ.Any())
+)
+
+id_signedData = _buildOid(1, 2, 840, 113549, 1, 7, 2)
+
+
+class KeyEncryptionAlgorithmIdentifier(rfc3280.AlgorithmIdentifier):
+ pass
+
+
+class EncryptedKey(univ.OctetString):
+ pass
+
+
+class CMSVersion(univ.Integer):
+ pass
+
+
+CMSVersion.namedValues = namedval.NamedValues(
+ ('v0', 0),
+ ('v1', 1),
+ ('v2', 2),
+ ('v3', 3),
+ ('v4', 4),
+ ('v5', 5)
+)
+
+
+class KEKIdentifier(univ.Sequence):
+ pass
+
+
+KEKIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyIdentifier', univ.OctetString()),
+ namedtype.OptionalNamedType('date', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('other', OtherKeyAttribute())
+)
+
+
+class KEKRecipientInfo(univ.Sequence):
+ pass
+
+
+KEKRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('kekid', KEKIdentifier()),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class KeyDerivationAlgorithmIdentifier(rfc3280.AlgorithmIdentifier):
+ pass
+
+
+class PasswordRecipientInfo(univ.Sequence):
+ pass
+
+
+PasswordRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.OptionalNamedType('keyDerivationAlgorithm', KeyDerivationAlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class OtherRecipientInfo(univ.Sequence):
+ pass
+
+
+OtherRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('oriType', univ.ObjectIdentifier()),
+ namedtype.NamedType('oriValue', univ.Any())
+)
+
+
+class IssuerAndSerialNumber(univ.Sequence):
+ pass
+
+
+IssuerAndSerialNumber.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', rfc3280.Name()),
+ namedtype.NamedType('serialNumber', rfc3280.CertificateSerialNumber())
+)
+
+
+class SubjectKeyIdentifier(univ.OctetString):
+ pass
+
+
+class RecipientKeyIdentifier(univ.Sequence):
+ pass
+
+
+RecipientKeyIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier()),
+ namedtype.OptionalNamedType('date', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('other', OtherKeyAttribute())
+)
+
+
+class KeyAgreeRecipientIdentifier(univ.Choice):
+ pass
+
+
+KeyAgreeRecipientIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('rKeyId', RecipientKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class RecipientEncryptedKey(univ.Sequence):
+ pass
+
+
+RecipientEncryptedKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('rid', KeyAgreeRecipientIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class RecipientEncryptedKeys(univ.SequenceOf):
+ pass
+
+
+RecipientEncryptedKeys.componentType = RecipientEncryptedKey()
+
+
+class UserKeyingMaterial(univ.OctetString):
+ pass
+
+
+class OriginatorPublicKey(univ.Sequence):
+ pass
+
+
+OriginatorPublicKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('publicKey', univ.BitString())
+)
+
+
+class OriginatorIdentifierOrKey(univ.Choice):
+ pass
+
+
+OriginatorIdentifierOrKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('originatorKey', OriginatorPublicKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class KeyAgreeRecipientInfo(univ.Sequence):
+ pass
+
+
+KeyAgreeRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('originator', OriginatorIdentifierOrKey().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('ukm', UserKeyingMaterial().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('recipientEncryptedKeys', RecipientEncryptedKeys())
+)
+
+
+class RecipientIdentifier(univ.Choice):
+ pass
+
+
+RecipientIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class KeyTransRecipientInfo(univ.Sequence):
+ pass
+
+
+KeyTransRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('rid', RecipientIdentifier()),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class RecipientInfo(univ.Choice):
+ pass
+
+
+RecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ktri', KeyTransRecipientInfo()),
+ namedtype.NamedType('kari', KeyAgreeRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('kekri', KEKRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('pwri', PasswordRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('ori', OtherRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)))
+)
+
+
+class RecipientInfos(univ.SetOf):
+ pass
+
+
+RecipientInfos.componentType = RecipientInfo()
+RecipientInfos.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class DigestAlgorithmIdentifier(rfc3280.AlgorithmIdentifier):
+ pass
+
+
+class Signature(univ.BitString):
+ pass
+
+
+class SignerIdentifier(univ.Choice):
+ pass
+
+
+SignerIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class UnprotectedAttributes(univ.SetOf):
+ pass
+
+
+UnprotectedAttributes.componentType = Attribute()
+UnprotectedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class ContentType(univ.ObjectIdentifier):
+ pass
+
+
+class EncryptedContent(univ.OctetString):
+ pass
+
+
+class ContentEncryptionAlgorithmIdentifier(rfc3280.AlgorithmIdentifier):
+ pass
+
+
+class EncryptedContentInfo(univ.Sequence):
+ pass
+
+
+EncryptedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('contentEncryptionAlgorithm', ContentEncryptionAlgorithmIdentifier()),
+ namedtype.OptionalNamedType('encryptedContent', EncryptedContent().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class EncryptedData(univ.Sequence):
+ pass
+
+
+EncryptedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
+ namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+id_contentType = _buildOid(1, 2, 840, 113549, 1, 9, 3)
+
+id_data = _buildOid(1, 2, 840, 113549, 1, 7, 1)
+
+id_messageDigest = _buildOid(1, 2, 840, 113549, 1, 9, 4)
+
+
+class DigestAlgorithmIdentifiers(univ.SetOf):
+ pass
+
+
+DigestAlgorithmIdentifiers.componentType = DigestAlgorithmIdentifier()
+
+
+class EncapsulatedContentInfo(univ.Sequence):
+ pass
+
+
+EncapsulatedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('eContentType', ContentType()),
+ namedtype.OptionalNamedType('eContent', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class Digest(univ.OctetString):
+ pass
+
+
+class DigestedData(univ.Sequence):
+ pass
+
+
+DigestedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
+ namedtype.NamedType('digest', Digest())
+)
+
+
+class ContentInfo(univ.Sequence):
+ pass
+
+
+ContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('content', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class UnauthAttributes(univ.SetOf):
+ pass
+
+
+UnauthAttributes.componentType = Attribute()
+UnauthAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class ExtendedCertificateInfo(univ.Sequence):
+ pass
+
+
+ExtendedCertificateInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('certificate', rfc3280.Certificate()),
+ namedtype.NamedType('attributes', UnauthAttributes())
+)
+
+
+class SignatureAlgorithmIdentifier(rfc3280.AlgorithmIdentifier):
+ pass
+
+
+class ExtendedCertificate(univ.Sequence):
+ pass
+
+
+ExtendedCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extendedCertificateInfo', ExtendedCertificateInfo()),
+ namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signature', Signature())
+)
+
+
+class OtherCertificateFormat(univ.Sequence):
+ pass
+
+
+OtherCertificateFormat.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherCertFormat', univ.ObjectIdentifier()),
+ namedtype.NamedType('otherCert', univ.Any())
+)
+
+
+class AttributeCertificateV2(rfc3281.AttributeCertificate):
+ pass
+
+
+class AttCertVersionV1(univ.Integer):
+ pass
+
+
+AttCertVersionV1.namedValues = namedval.NamedValues(
+ ('v1', 0)
+)
+
+
+class AttributeCertificateInfoV1(univ.Sequence):
+ pass
+
+
+AttributeCertificateInfoV1.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', AttCertVersionV1().subtype(value="v1")),
+ namedtype.NamedType(
+ 'subject', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('baseCertificateID', rfc3281.IssuerSerial().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('subjectName', rfc3280.GeneralNames().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+ )
+ ),
+ namedtype.NamedType('issuer', rfc3280.GeneralNames()),
+ namedtype.NamedType('signature', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('serialNumber', rfc3280.CertificateSerialNumber()),
+ namedtype.NamedType('attCertValidityPeriod', rfc3281.AttCertValidityPeriod()),
+ namedtype.NamedType('attributes', univ.SequenceOf(componentType=rfc3280.Attribute())),
+ namedtype.OptionalNamedType('issuerUniqueID', rfc3280.UniqueIdentifier()),
+ namedtype.OptionalNamedType('extensions', rfc3280.Extensions())
+)
+
+
+class AttributeCertificateV1(univ.Sequence):
+ pass
+
+
+AttributeCertificateV1.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('acInfo', AttributeCertificateInfoV1()),
+ namedtype.NamedType('signatureAlgorithm', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class CertificateChoices(univ.Choice):
+ pass
+
+
+CertificateChoices.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', rfc3280.Certificate()),
+ namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('v1AttrCert', AttributeCertificateV1().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('v2AttrCert', AttributeCertificateV2().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('other', OtherCertificateFormat().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+)
+
+
+class CertificateSet(univ.SetOf):
+ pass
+
+
+CertificateSet.componentType = CertificateChoices()
+
+
+class MessageAuthenticationCode(univ.OctetString):
+ pass
+
+
+class UnsignedAttributes(univ.SetOf):
+ pass
+
+
+UnsignedAttributes.componentType = Attribute()
+UnsignedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class SignatureValue(univ.OctetString):
+ pass
+
+
+class SignerInfo(univ.Sequence):
+ pass
+
+
+SignerInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('sid', SignerIdentifier()),
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.OptionalNamedType('signedAttrs', SignedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signature', SignatureValue()),
+ namedtype.OptionalNamedType('unsignedAttrs', UnsignedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class SignerInfos(univ.SetOf):
+ pass
+
+
+SignerInfos.componentType = SignerInfo()
+
+
+class SignedData(univ.Sequence):
+ pass
+
+
+SignedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
+ namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
+ namedtype.OptionalNamedType('certificates', CertificateSet().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('signerInfos', SignerInfos())
+)
+
+
+class MessageAuthenticationCodeAlgorithm(rfc3280.AlgorithmIdentifier):
+ pass
+
+
+class MessageDigest(univ.OctetString):
+ pass
+
+
+class Time(univ.Choice):
+ pass
+
+
+Time.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('utcTime', useful.UTCTime()),
+ namedtype.NamedType('generalTime', useful.GeneralizedTime())
+)
+
+
+class OriginatorInfo(univ.Sequence):
+ pass
+
+
+OriginatorInfo.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('certs', CertificateSet().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class AuthAttributes(univ.SetOf):
+ pass
+
+
+AuthAttributes.componentType = Attribute()
+AuthAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class AuthenticatedData(univ.Sequence):
+ pass
+
+
+AuthenticatedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('recipientInfos', RecipientInfos()),
+ namedtype.NamedType('macAlgorithm', MessageAuthenticationCodeAlgorithm()),
+ namedtype.OptionalNamedType('digestAlgorithm', DigestAlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
+ namedtype.OptionalNamedType('authAttrs', AuthAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('mac', MessageAuthenticationCode()),
+ namedtype.OptionalNamedType('unauthAttrs', UnauthAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+id_ct_contentInfo = _buildOid(1, 2, 840, 113549, 1, 9, 16, 1, 6)
+
+id_envelopedData = _buildOid(1, 2, 840, 113549, 1, 7, 3)
+
+
+class EnvelopedData(univ.Sequence):
+ pass
+
+
+EnvelopedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('recipientInfos', RecipientInfos()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
+ namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class Countersignature(SignerInfo):
+ pass
+
+
+id_digestedData = _buildOid(1, 2, 840, 113549, 1, 7, 5)
+
+id_signingTime = _buildOid(1, 2, 840, 113549, 1, 9, 5)
+
+
+class ExtendedCertificateOrCertificate(univ.Choice):
+ pass
+
+
+ExtendedCertificateOrCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', rfc3280.Certificate()),
+ namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+id_encryptedData = _buildOid(1, 2, 840, 113549, 1, 7, 6)
+
+id_ct_authData = _buildOid(1, 2, 840, 113549, 1, 9, 16, 1, 2)
+
+
+class SigningTime(Time):
+ pass
+
+
+id_countersignature = _buildOid(1, 2, 840, 113549, 1, 9, 6)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc4043.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc4043.py
new file mode 100644
index 0000000000..cf0a801419
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc4043.py
@@ -0,0 +1,43 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Internet X.509 Public Key Infrastructure Permanent Identifier
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4043.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+id_pkix = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, ))
+
+id_on = id_pkix + (8, )
+
+id_on_permanentIdentifier = id_on + (3, )
+
+
+class PermanentIdentifier(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('identifierValue', char.UTF8String()),
+ namedtype.OptionalNamedType('assigner', univ.ObjectIdentifier())
+ )
+
+
+# Map of Other Name OIDs to Other Name is added to the
+# ones that are in rfc5280.py
+
+_anotherNameMapUpdate = {
+ id_on_permanentIdentifier: PermanentIdentifier(),
+}
+
+rfc5280.anotherNameMap.update(_anotherNameMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc4055.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc4055.py
new file mode 100644
index 0000000000..bdc128632a
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc4055.py
@@ -0,0 +1,258 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with a very small amount of assistance from
+# asn1ate v.0.6.0.
+# Modified by Russ Housley to add maps for opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Additional Algorithms and Identifiers for RSA Cryptography
+# for use in Certificates and CRLs
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4055.txt
+#
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+ return univ.ObjectIdentifier(output)
+
+
+id_sha1 = _OID(1, 3, 14, 3, 2, 26)
+
+id_sha256 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 1)
+
+id_sha384 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 2)
+
+id_sha512 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 3)
+
+id_sha224 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 4)
+
+rsaEncryption = _OID(1, 2, 840, 113549, 1, 1, 1)
+
+id_mgf1 = _OID(1, 2, 840, 113549, 1, 1, 8)
+
+id_RSAES_OAEP = _OID(1, 2, 840, 113549, 1, 1, 7)
+
+id_pSpecified = _OID(1, 2, 840, 113549, 1, 1, 9)
+
+id_RSASSA_PSS = _OID(1, 2, 840, 113549, 1, 1, 10)
+
+sha256WithRSAEncryption = _OID(1, 2, 840, 113549, 1, 1, 11)
+
+sha384WithRSAEncryption = _OID(1, 2, 840, 113549, 1, 1, 12)
+
+sha512WithRSAEncryption = _OID(1, 2, 840, 113549, 1, 1, 13)
+
+sha224WithRSAEncryption = _OID(1, 2, 840, 113549, 1, 1, 14)
+
+sha1Identifier = rfc5280.AlgorithmIdentifier()
+sha1Identifier['algorithm'] = id_sha1
+sha1Identifier['parameters'] = univ.Null("")
+
+sha224Identifier = rfc5280.AlgorithmIdentifier()
+sha224Identifier['algorithm'] = id_sha224
+sha224Identifier['parameters'] = univ.Null("")
+
+sha256Identifier = rfc5280.AlgorithmIdentifier()
+sha256Identifier['algorithm'] = id_sha256
+sha256Identifier['parameters'] = univ.Null("")
+
+sha384Identifier = rfc5280.AlgorithmIdentifier()
+sha384Identifier['algorithm'] = id_sha384
+sha384Identifier['parameters'] = univ.Null("")
+
+sha512Identifier = rfc5280.AlgorithmIdentifier()
+sha512Identifier['algorithm'] = id_sha512
+sha512Identifier['parameters'] = univ.Null("")
+
+mgf1SHA1Identifier = rfc5280.AlgorithmIdentifier()
+mgf1SHA1Identifier['algorithm'] = id_mgf1
+mgf1SHA1Identifier['parameters'] = sha1Identifier
+
+mgf1SHA224Identifier = rfc5280.AlgorithmIdentifier()
+mgf1SHA224Identifier['algorithm'] = id_mgf1
+mgf1SHA224Identifier['parameters'] = sha224Identifier
+
+mgf1SHA256Identifier = rfc5280.AlgorithmIdentifier()
+mgf1SHA256Identifier['algorithm'] = id_mgf1
+mgf1SHA256Identifier['parameters'] = sha256Identifier
+
+mgf1SHA384Identifier = rfc5280.AlgorithmIdentifier()
+mgf1SHA384Identifier['algorithm'] = id_mgf1
+mgf1SHA384Identifier['parameters'] = sha384Identifier
+
+mgf1SHA512Identifier = rfc5280.AlgorithmIdentifier()
+mgf1SHA512Identifier['algorithm'] = id_mgf1
+mgf1SHA512Identifier['parameters'] = sha512Identifier
+
+pSpecifiedEmptyIdentifier = rfc5280.AlgorithmIdentifier()
+pSpecifiedEmptyIdentifier['algorithm'] = id_pSpecified
+pSpecifiedEmptyIdentifier['parameters'] = univ.OctetString(value='')
+
+
+class RSAPublicKey(univ.Sequence):
+ pass
+
+RSAPublicKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('modulus', univ.Integer()),
+ namedtype.NamedType('publicExponent', univ.Integer())
+)
+
+
+class HashAlgorithm(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class MaskGenAlgorithm(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class RSAES_OAEP_params(univ.Sequence):
+ pass
+
+RSAES_OAEP_params.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('hashFunc', rfc5280.AlgorithmIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('maskGenFunc', rfc5280.AlgorithmIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('pSourceFunc', rfc5280.AlgorithmIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+rSAES_OAEP_Default_Params = RSAES_OAEP_params()
+
+rSAES_OAEP_Default_Identifier = rfc5280.AlgorithmIdentifier()
+rSAES_OAEP_Default_Identifier['algorithm'] = id_RSAES_OAEP
+rSAES_OAEP_Default_Identifier['parameters'] = rSAES_OAEP_Default_Params
+
+rSAES_OAEP_SHA224_Params = RSAES_OAEP_params()
+rSAES_OAEP_SHA224_Params['hashFunc'] = sha224Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSAES_OAEP_SHA224_Params['maskGenFunc'] = mgf1SHA224Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSAES_OAEP_SHA224_Identifier = rfc5280.AlgorithmIdentifier()
+rSAES_OAEP_SHA224_Identifier['algorithm'] = id_RSAES_OAEP
+rSAES_OAEP_SHA224_Identifier['parameters'] = rSAES_OAEP_SHA224_Params
+
+rSAES_OAEP_SHA256_Params = RSAES_OAEP_params()
+rSAES_OAEP_SHA256_Params['hashFunc'] = sha256Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSAES_OAEP_SHA256_Params['maskGenFunc'] = mgf1SHA256Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSAES_OAEP_SHA256_Identifier = rfc5280.AlgorithmIdentifier()
+rSAES_OAEP_SHA256_Identifier['algorithm'] = id_RSAES_OAEP
+rSAES_OAEP_SHA256_Identifier['parameters'] = rSAES_OAEP_SHA256_Params
+
+rSAES_OAEP_SHA384_Params = RSAES_OAEP_params()
+rSAES_OAEP_SHA384_Params['hashFunc'] = sha384Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSAES_OAEP_SHA384_Params['maskGenFunc'] = mgf1SHA384Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSAES_OAEP_SHA384_Identifier = rfc5280.AlgorithmIdentifier()
+rSAES_OAEP_SHA384_Identifier['algorithm'] = id_RSAES_OAEP
+rSAES_OAEP_SHA384_Identifier['parameters'] = rSAES_OAEP_SHA384_Params
+
+rSAES_OAEP_SHA512_Params = RSAES_OAEP_params()
+rSAES_OAEP_SHA512_Params['hashFunc'] = sha512Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSAES_OAEP_SHA512_Params['maskGenFunc'] = mgf1SHA512Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSAES_OAEP_SHA512_Identifier = rfc5280.AlgorithmIdentifier()
+rSAES_OAEP_SHA512_Identifier['algorithm'] = id_RSAES_OAEP
+rSAES_OAEP_SHA512_Identifier['parameters'] = rSAES_OAEP_SHA512_Params
+
+
+class RSASSA_PSS_params(univ.Sequence):
+ pass
+
+RSASSA_PSS_params.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('hashAlgorithm', rfc5280.AlgorithmIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('maskGenAlgorithm', rfc5280.AlgorithmIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.DefaultedNamedType('saltLength', univ.Integer(value=20).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.DefaultedNamedType('trailerField', univ.Integer(value=1).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+rSASSA_PSS_Default_Params = RSASSA_PSS_params()
+
+rSASSA_PSS_Default_Identifier = rfc5280.AlgorithmIdentifier()
+rSASSA_PSS_Default_Identifier['algorithm'] = id_RSASSA_PSS
+rSASSA_PSS_Default_Identifier['parameters'] = rSASSA_PSS_Default_Params
+
+rSASSA_PSS_SHA224_Params = RSASSA_PSS_params()
+rSASSA_PSS_SHA224_Params['hashAlgorithm'] = sha224Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSASSA_PSS_SHA224_Params['maskGenAlgorithm'] = mgf1SHA224Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSASSA_PSS_SHA224_Identifier = rfc5280.AlgorithmIdentifier()
+rSASSA_PSS_SHA224_Identifier['algorithm'] = id_RSASSA_PSS
+rSASSA_PSS_SHA224_Identifier['parameters'] = rSASSA_PSS_SHA224_Params
+
+rSASSA_PSS_SHA256_Params = RSASSA_PSS_params()
+rSASSA_PSS_SHA256_Params['hashAlgorithm'] = sha256Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSASSA_PSS_SHA256_Params['maskGenAlgorithm'] = mgf1SHA256Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSASSA_PSS_SHA256_Identifier = rfc5280.AlgorithmIdentifier()
+rSASSA_PSS_SHA256_Identifier['algorithm'] = id_RSASSA_PSS
+rSASSA_PSS_SHA256_Identifier['parameters'] = rSASSA_PSS_SHA256_Params
+
+rSASSA_PSS_SHA384_Params = RSASSA_PSS_params()
+rSASSA_PSS_SHA384_Params['hashAlgorithm'] = sha384Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSASSA_PSS_SHA384_Params['maskGenAlgorithm'] = mgf1SHA384Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSASSA_PSS_SHA384_Identifier = rfc5280.AlgorithmIdentifier()
+rSASSA_PSS_SHA384_Identifier['algorithm'] = id_RSASSA_PSS
+rSASSA_PSS_SHA384_Identifier['parameters'] = rSASSA_PSS_SHA384_Params
+
+rSASSA_PSS_SHA512_Params = RSASSA_PSS_params()
+rSASSA_PSS_SHA512_Params['hashAlgorithm'] = sha512Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0), cloneValueFlag=True)
+rSASSA_PSS_SHA512_Params['maskGenAlgorithm'] = mgf1SHA512Identifier.subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1), cloneValueFlag=True)
+
+rSASSA_PSS_SHA512_Identifier = rfc5280.AlgorithmIdentifier()
+rSASSA_PSS_SHA512_Identifier['algorithm'] = id_RSASSA_PSS
+rSASSA_PSS_SHA512_Identifier['parameters'] = rSASSA_PSS_SHA512_Params
+
+
+# Update the Algorithm Identifier map
+
+_algorithmIdentifierMapUpdate = {
+ id_sha1: univ.Null(),
+ id_sha224: univ.Null(),
+ id_sha256: univ.Null(),
+ id_sha384: univ.Null(),
+ id_sha512: univ.Null(),
+ id_mgf1: rfc5280.AlgorithmIdentifier(),
+ id_pSpecified: univ.OctetString(),
+ id_RSAES_OAEP: RSAES_OAEP_params(),
+ id_RSASSA_PSS: RSASSA_PSS_params(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc4073.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc4073.py
new file mode 100644
index 0000000000..3f425b28ed
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc4073.py
@@ -0,0 +1,59 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with some assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add a map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Protecting Multiple Contents with the CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4073.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+# Content Collection Content Type and Object Identifier
+
+id_ct_contentCollection = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.19')
+
+class ContentCollection(univ.SequenceOf):
+ pass
+
+ContentCollection.componentType = rfc5652.ContentInfo()
+ContentCollection.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+# Content With Attributes Content Type and Object Identifier
+
+id_ct_contentWithAttrs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.20')
+
+class ContentWithAttributes(univ.Sequence):
+ pass
+
+ContentWithAttributes.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('content', rfc5652.ContentInfo()),
+ namedtype.NamedType('attrs', univ.SequenceOf(
+ componentType=rfc5652.Attribute()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_contentCollection: ContentCollection(),
+ id_ct_contentWithAttrs: ContentWithAttributes(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc4108.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc4108.py
new file mode 100644
index 0000000000..ecace9e3ee
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc4108.py
@@ -0,0 +1,350 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add items from the verified errata.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CMS Firmware Wrapper
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4108.txt
+# https://www.rfc-editor.org/errata_search.php?rfc=4108
+#
+
+
+from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+class HardwareSerialEntry(univ.Choice):
+ pass
+
+HardwareSerialEntry.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('all', univ.Null()),
+ namedtype.NamedType('single', univ.OctetString()),
+ namedtype.NamedType('block', univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('low', univ.OctetString()),
+ namedtype.NamedType('high', univ.OctetString())
+ ))
+ )
+)
+
+
+class HardwareModules(univ.Sequence):
+ pass
+
+HardwareModules.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hwType', univ.ObjectIdentifier()),
+ namedtype.NamedType('hwSerialEntries', univ.SequenceOf(componentType=HardwareSerialEntry()))
+)
+
+
+class CommunityIdentifier(univ.Choice):
+ pass
+
+CommunityIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('communityOID', univ.ObjectIdentifier()),
+ namedtype.NamedType('hwModuleList', HardwareModules())
+)
+
+
+
+class PreferredPackageIdentifier(univ.Sequence):
+ pass
+
+PreferredPackageIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('fwPkgID', univ.ObjectIdentifier()),
+ namedtype.NamedType('verNum', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
+)
+
+
+class PreferredOrLegacyPackageIdentifier(univ.Choice):
+ pass
+
+PreferredOrLegacyPackageIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('preferred', PreferredPackageIdentifier()),
+ namedtype.NamedType('legacy', univ.OctetString())
+)
+
+
+class CurrentFWConfig(univ.Sequence):
+ pass
+
+CurrentFWConfig.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('fwPkgType', univ.Integer()),
+ namedtype.NamedType('fwPkgName', PreferredOrLegacyPackageIdentifier())
+)
+
+
+class PreferredOrLegacyStalePackageIdentifier(univ.Choice):
+ pass
+
+PreferredOrLegacyStalePackageIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('preferredStaleVerNum', univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
+ namedtype.NamedType('legacyStaleVersion', univ.OctetString())
+)
+
+
+class FirmwarePackageLoadErrorCode(univ.Enumerated):
+ pass
+
+FirmwarePackageLoadErrorCode.namedValues = namedval.NamedValues(
+ ('decodeFailure', 1),
+ ('badContentInfo', 2),
+ ('badSignedData', 3),
+ ('badEncapContent', 4),
+ ('badCertificate', 5),
+ ('badSignerInfo', 6),
+ ('badSignedAttrs', 7),
+ ('badUnsignedAttrs', 8),
+ ('missingContent', 9),
+ ('noTrustAnchor', 10),
+ ('notAuthorized', 11),
+ ('badDigestAlgorithm', 12),
+ ('badSignatureAlgorithm', 13),
+ ('unsupportedKeySize', 14),
+ ('signatureFailure', 15),
+ ('contentTypeMismatch', 16),
+ ('badEncryptedData', 17),
+ ('unprotectedAttrsPresent', 18),
+ ('badEncryptContent', 19),
+ ('badEncryptAlgorithm', 20),
+ ('missingCiphertext', 21),
+ ('noDecryptKey', 22),
+ ('decryptFailure', 23),
+ ('badCompressAlgorithm', 24),
+ ('missingCompressedContent', 25),
+ ('decompressFailure', 26),
+ ('wrongHardware', 27),
+ ('stalePackage', 28),
+ ('notInCommunity', 29),
+ ('unsupportedPackageType', 30),
+ ('missingDependency', 31),
+ ('wrongDependencyVersion', 32),
+ ('insufficientMemory', 33),
+ ('badFirmware', 34),
+ ('unsupportedParameters', 35),
+ ('breaksDependency', 36),
+ ('otherError', 99)
+)
+
+
+class VendorLoadErrorCode(univ.Integer):
+ pass
+
+
+# Wrapped Firmware Key Unsigned Attribute and Object Identifier
+
+id_aa_wrappedFirmwareKey = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.39')
+
+class WrappedFirmwareKey(rfc5652.EnvelopedData):
+ pass
+
+
+# Firmware Package Information Signed Attribute and Object Identifier
+
+id_aa_firmwarePackageInfo = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.42')
+
+class FirmwarePackageInfo(univ.Sequence):
+ pass
+
+FirmwarePackageInfo.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('fwPkgType', univ.Integer()),
+ namedtype.OptionalNamedType('dependencies', univ.SequenceOf(componentType=PreferredOrLegacyPackageIdentifier()))
+)
+
+FirmwarePackageInfo.sizeSpec = univ.Sequence.sizeSpec + constraint.ValueSizeConstraint(1, 2)
+
+
+# Community Identifiers Signed Attribute and Object Identifier
+
+id_aa_communityIdentifiers = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.40')
+
+class CommunityIdentifiers(univ.SequenceOf):
+ pass
+
+CommunityIdentifiers.componentType = CommunityIdentifier()
+
+
+# Implemented Compression Algorithms Signed Attribute and Object Identifier
+
+id_aa_implCompressAlgs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.43')
+
+class ImplementedCompressAlgorithms(univ.SequenceOf):
+ pass
+
+ImplementedCompressAlgorithms.componentType = univ.ObjectIdentifier()
+
+
+# Implemented Cryptographic Algorithms Signed Attribute and Object Identifier
+
+id_aa_implCryptoAlgs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.38')
+
+class ImplementedCryptoAlgorithms(univ.SequenceOf):
+ pass
+
+ImplementedCryptoAlgorithms.componentType = univ.ObjectIdentifier()
+
+
+# Decrypt Key Identifier Signed Attribute and Object Identifier
+
+id_aa_decryptKeyID = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.37')
+
+class DecryptKeyIdentifier(univ.OctetString):
+ pass
+
+
+# Target Hardware Identifier Signed Attribute and Object Identifier
+
+id_aa_targetHardwareIDs = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.36')
+
+class TargetHardwareIdentifiers(univ.SequenceOf):
+ pass
+
+TargetHardwareIdentifiers.componentType = univ.ObjectIdentifier()
+
+
+# Firmware Package Identifier Signed Attribute and Object Identifier
+
+id_aa_firmwarePackageID = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.35')
+
+class FirmwarePackageIdentifier(univ.Sequence):
+ pass
+
+FirmwarePackageIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('name', PreferredOrLegacyPackageIdentifier()),
+ namedtype.OptionalNamedType('stale', PreferredOrLegacyStalePackageIdentifier())
+)
+
+
+# Firmware Package Message Digest Signed Attribute and Object Identifier
+
+id_aa_fwPkgMessageDigest = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.41')
+
+class FirmwarePackageMessageDigest(univ.Sequence):
+ pass
+
+FirmwarePackageMessageDigest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('msgDigest', univ.OctetString())
+)
+
+
+# Firmware Package Load Error Report Content Type and Object Identifier
+
+class FWErrorVersion(univ.Integer):
+ pass
+
+FWErrorVersion.namedValues = namedval.NamedValues(
+ ('v1', 1)
+)
+
+
+id_ct_firmwareLoadError = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.18')
+
+class FirmwarePackageLoadError(univ.Sequence):
+ pass
+
+FirmwarePackageLoadError.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', FWErrorVersion().subtype(value='v1')),
+ namedtype.NamedType('hwType', univ.ObjectIdentifier()),
+ namedtype.NamedType('hwSerialNum', univ.OctetString()),
+ namedtype.NamedType('errorCode', FirmwarePackageLoadErrorCode()),
+ namedtype.OptionalNamedType('vendorErrorCode', VendorLoadErrorCode()),
+ namedtype.OptionalNamedType('fwPkgName', PreferredOrLegacyPackageIdentifier()),
+ namedtype.OptionalNamedType('config', univ.SequenceOf(componentType=CurrentFWConfig()).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+# Firmware Package Load Receipt Content Type and Object Identifier
+
+class FWReceiptVersion(univ.Integer):
+ pass
+
+FWReceiptVersion.namedValues = namedval.NamedValues(
+ ('v1', 1)
+)
+
+
+id_ct_firmwareLoadReceipt = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.17')
+
+class FirmwarePackageLoadReceipt(univ.Sequence):
+ pass
+
+FirmwarePackageLoadReceipt.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', FWReceiptVersion().subtype(value='v1')),
+ namedtype.NamedType('hwType', univ.ObjectIdentifier()),
+ namedtype.NamedType('hwSerialNum', univ.OctetString()),
+ namedtype.NamedType('fwPkgName', PreferredOrLegacyPackageIdentifier()),
+ namedtype.OptionalNamedType('trustAnchorKeyID', univ.OctetString()),
+ namedtype.OptionalNamedType('decryptKeyID', univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+# Firmware Package Content Type and Object Identifier
+
+id_ct_firmwarePackage = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.16')
+
+class FirmwarePkgData(univ.OctetString):
+ pass
+
+
+# Other Name syntax for Hardware Module Name
+
+id_on_hardwareModuleName = univ.ObjectIdentifier('1.3.6.1.5.5.7.8.4')
+
+class HardwareModuleName(univ.Sequence):
+ pass
+
+HardwareModuleName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hwType', univ.ObjectIdentifier()),
+ namedtype.NamedType('hwSerialNum', univ.OctetString())
+)
+
+
+# Map of Attribute Type OIDs to Attributes is added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_wrappedFirmwareKey: WrappedFirmwareKey(),
+ id_aa_firmwarePackageInfo: FirmwarePackageInfo(),
+ id_aa_communityIdentifiers: CommunityIdentifiers(),
+ id_aa_implCompressAlgs: ImplementedCompressAlgorithms(),
+ id_aa_implCryptoAlgs: ImplementedCryptoAlgorithms(),
+ id_aa_decryptKeyID: DecryptKeyIdentifier(),
+ id_aa_targetHardwareIDs: TargetHardwareIdentifiers(),
+ id_aa_firmwarePackageID: FirmwarePackageIdentifier(),
+ id_aa_fwPkgMessageDigest: FirmwarePackageMessageDigest(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_firmwareLoadError: FirmwarePackageLoadError(),
+ id_ct_firmwareLoadReceipt: FirmwarePackageLoadReceipt(),
+ id_ct_firmwarePackage: FirmwarePkgData(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
+
+
+# Map of Other Name OIDs to Other Name is added to the
+# ones that are in rfc5280.py
+
+_anotherNameMapUpdate = {
+ id_on_hardwareModuleName: HardwareModuleName(),
+}
+
+rfc5280.anotherNameMap.update(_anotherNameMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc4210.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc4210.py
new file mode 100644
index 0000000000..4d01a337dd
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc4210.py
@@ -0,0 +1,803 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Management Protocol structures as per RFC4210
+#
+# Based on Alex Railean's work
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc2314
+from pyasn1_modules import rfc2459
+from pyasn1_modules import rfc2511
+
+MAX = float('inf')
+
+
+class KeyIdentifier(univ.OctetString):
+ pass
+
+
+class CMPCertificate(rfc2459.Certificate):
+ pass
+
+
+class OOBCert(CMPCertificate):
+ pass
+
+
+class CertAnnContent(CMPCertificate):
+ pass
+
+
+class PKIFreeText(univ.SequenceOf):
+ """
+ PKIFreeText ::= SEQUENCE SIZE (1..MAX) OF UTF8String
+ """
+ componentType = char.UTF8String()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+class PollRepContent(univ.SequenceOf):
+ """
+ PollRepContent ::= SEQUENCE OF SEQUENCE {
+ certReqId INTEGER,
+ checkAfter INTEGER, -- time in seconds
+ reason PKIFreeText OPTIONAL
+ }
+ """
+
+ class CertReq(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReqId', univ.Integer()),
+ namedtype.NamedType('checkAfter', univ.Integer()),
+ namedtype.OptionalNamedType('reason', PKIFreeText())
+ )
+
+ componentType = CertReq()
+
+
+class PollReqContent(univ.SequenceOf):
+ """
+ PollReqContent ::= SEQUENCE OF SEQUENCE {
+ certReqId INTEGER
+ }
+
+ """
+
+ class CertReq(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReqId', univ.Integer())
+ )
+
+ componentType = CertReq()
+
+
+class InfoTypeAndValue(univ.Sequence):
+ """
+ InfoTypeAndValue ::= SEQUENCE {
+ infoType OBJECT IDENTIFIER,
+ infoValue ANY DEFINED BY infoType OPTIONAL
+ }"""
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('infoType', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('infoValue', univ.Any())
+ )
+
+
+class GenRepContent(univ.SequenceOf):
+ componentType = InfoTypeAndValue()
+
+
+class GenMsgContent(univ.SequenceOf):
+ componentType = InfoTypeAndValue()
+
+
+class PKIConfirmContent(univ.Null):
+ pass
+
+
+class CRLAnnContent(univ.SequenceOf):
+ componentType = rfc2459.CertificateList()
+
+
+class CAKeyUpdAnnContent(univ.Sequence):
+ """
+ CAKeyUpdAnnContent ::= SEQUENCE {
+ oldWithNew CMPCertificate,
+ newWithOld CMPCertificate,
+ newWithNew CMPCertificate
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('oldWithNew', CMPCertificate()),
+ namedtype.NamedType('newWithOld', CMPCertificate()),
+ namedtype.NamedType('newWithNew', CMPCertificate())
+ )
+
+
+class RevDetails(univ.Sequence):
+ """
+ RevDetails ::= SEQUENCE {
+ certDetails CertTemplate,
+ crlEntryDetails Extensions OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certDetails', rfc2511.CertTemplate()),
+ namedtype.OptionalNamedType('crlEntryDetails', rfc2459.Extensions())
+ )
+
+
+class RevReqContent(univ.SequenceOf):
+ componentType = RevDetails()
+
+
+class CertOrEncCert(univ.Choice):
+ """
+ CertOrEncCert ::= CHOICE {
+ certificate [0] CMPCertificate,
+ encryptedCert [1] EncryptedValue
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', CMPCertificate().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('encryptedCert', rfc2511.EncryptedValue().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class CertifiedKeyPair(univ.Sequence):
+ """
+ CertifiedKeyPair ::= SEQUENCE {
+ certOrEncCert CertOrEncCert,
+ privateKey [0] EncryptedValue OPTIONAL,
+ publicationInfo [1] PKIPublicationInfo OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certOrEncCert', CertOrEncCert()),
+ namedtype.OptionalNamedType('privateKey', rfc2511.EncryptedValue().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('publicationInfo', rfc2511.PKIPublicationInfo().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class POPODecKeyRespContent(univ.SequenceOf):
+ componentType = univ.Integer()
+
+
+class Challenge(univ.Sequence):
+ """
+ Challenge ::= SEQUENCE {
+ owf AlgorithmIdentifier OPTIONAL,
+ witness OCTET STRING,
+ challenge OCTET STRING
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('owf', rfc2459.AlgorithmIdentifier()),
+ namedtype.NamedType('witness', univ.OctetString()),
+ namedtype.NamedType('challenge', univ.OctetString())
+ )
+
+
+class PKIStatus(univ.Integer):
+ """
+ PKIStatus ::= INTEGER {
+ accepted (0),
+ grantedWithMods (1),
+ rejection (2),
+ waiting (3),
+ revocationWarning (4),
+ revocationNotification (5),
+ keyUpdateWarning (6)
+ }
+ """
+ namedValues = namedval.NamedValues(
+ ('accepted', 0),
+ ('grantedWithMods', 1),
+ ('rejection', 2),
+ ('waiting', 3),
+ ('revocationWarning', 4),
+ ('revocationNotification', 5),
+ ('keyUpdateWarning', 6)
+ )
+
+
+class PKIFailureInfo(univ.BitString):
+ """
+ PKIFailureInfo ::= BIT STRING {
+ badAlg (0),
+ badMessageCheck (1),
+ badRequest (2),
+ badTime (3),
+ badCertId (4),
+ badDataFormat (5),
+ wrongAuthority (6),
+ incorrectData (7),
+ missingTimeStamp (8),
+ badPOP (9),
+ certRevoked (10),
+ certConfirmed (11),
+ wrongIntegrity (12),
+ badRecipientNonce (13),
+ timeNotAvailable (14),
+ unacceptedPolicy (15),
+ unacceptedExtension (16),
+ addInfoNotAvailable (17),
+ badSenderNonce (18),
+ badCertTemplate (19),
+ signerNotTrusted (20),
+ transactionIdInUse (21),
+ unsupportedVersion (22),
+ notAuthorized (23),
+ systemUnavail (24),
+ systemFailure (25),
+ duplicateCertReq (26)
+ """
+ namedValues = namedval.NamedValues(
+ ('badAlg', 0),
+ ('badMessageCheck', 1),
+ ('badRequest', 2),
+ ('badTime', 3),
+ ('badCertId', 4),
+ ('badDataFormat', 5),
+ ('wrongAuthority', 6),
+ ('incorrectData', 7),
+ ('missingTimeStamp', 8),
+ ('badPOP', 9),
+ ('certRevoked', 10),
+ ('certConfirmed', 11),
+ ('wrongIntegrity', 12),
+ ('badRecipientNonce', 13),
+ ('timeNotAvailable', 14),
+ ('unacceptedPolicy', 15),
+ ('unacceptedExtension', 16),
+ ('addInfoNotAvailable', 17),
+ ('badSenderNonce', 18),
+ ('badCertTemplate', 19),
+ ('signerNotTrusted', 20),
+ ('transactionIdInUse', 21),
+ ('unsupportedVersion', 22),
+ ('notAuthorized', 23),
+ ('systemUnavail', 24),
+ ('systemFailure', 25),
+ ('duplicateCertReq', 26)
+ )
+
+
+class PKIStatusInfo(univ.Sequence):
+ """
+ PKIStatusInfo ::= SEQUENCE {
+ status PKIStatus,
+ statusString PKIFreeText OPTIONAL,
+ failInfo PKIFailureInfo OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', PKIStatus()),
+ namedtype.OptionalNamedType('statusString', PKIFreeText()),
+ namedtype.OptionalNamedType('failInfo', PKIFailureInfo())
+ )
+
+
+class ErrorMsgContent(univ.Sequence):
+ """
+ ErrorMsgContent ::= SEQUENCE {
+ pKIStatusInfo PKIStatusInfo,
+ errorCode INTEGER OPTIONAL,
+ -- implementation-specific error codes
+ errorDetails PKIFreeText OPTIONAL
+ -- implementation-specific error details
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pKIStatusInfo', PKIStatusInfo()),
+ namedtype.OptionalNamedType('errorCode', univ.Integer()),
+ namedtype.OptionalNamedType('errorDetails', PKIFreeText())
+ )
+
+
+class CertStatus(univ.Sequence):
+ """
+ CertStatus ::= SEQUENCE {
+ certHash OCTET STRING,
+ certReqId INTEGER,
+ statusInfo PKIStatusInfo OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certHash', univ.OctetString()),
+ namedtype.NamedType('certReqId', univ.Integer()),
+ namedtype.OptionalNamedType('statusInfo', PKIStatusInfo())
+ )
+
+
+class CertConfirmContent(univ.SequenceOf):
+ componentType = CertStatus()
+
+
+class RevAnnContent(univ.Sequence):
+ """
+ RevAnnContent ::= SEQUENCE {
+ status PKIStatus,
+ certId CertId,
+ willBeRevokedAt GeneralizedTime,
+ badSinceDate GeneralizedTime,
+ crlDetails Extensions OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', PKIStatus()),
+ namedtype.NamedType('certId', rfc2511.CertId()),
+ namedtype.NamedType('willBeRevokedAt', useful.GeneralizedTime()),
+ namedtype.NamedType('badSinceDate', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('crlDetails', rfc2459.Extensions())
+ )
+
+
+class RevRepContent(univ.Sequence):
+ """
+ RevRepContent ::= SEQUENCE {
+ status SEQUENCE SIZE (1..MAX) OF PKIStatusInfo,
+ revCerts [0] SEQUENCE SIZE (1..MAX) OF CertId
+ OPTIONAL,
+ crls [1] SEQUENCE SIZE (1..MAX) OF CertificateList
+ OPTIONAL
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'status', univ.SequenceOf(
+ componentType=PKIStatusInfo(),
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)
+ )
+ ),
+ namedtype.OptionalNamedType(
+ 'revCerts', univ.SequenceOf(componentType=rfc2511.CertId()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
+ )
+ ),
+ namedtype.OptionalNamedType(
+ 'crls', univ.SequenceOf(componentType=rfc2459.CertificateList()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
+ )
+ )
+ )
+
+
+class KeyRecRepContent(univ.Sequence):
+ """
+ KeyRecRepContent ::= SEQUENCE {
+ status PKIStatusInfo,
+ newSigCert [0] CMPCertificate OPTIONAL,
+ caCerts [1] SEQUENCE SIZE (1..MAX) OF
+ CMPCertificate OPTIONAL,
+ keyPairHist [2] SEQUENCE SIZE (1..MAX) OF
+ CertifiedKeyPair OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', PKIStatusInfo()),
+ namedtype.OptionalNamedType(
+ 'newSigCert', CMPCertificate().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
+ )
+ ),
+ namedtype.OptionalNamedType(
+ 'caCerts', univ.SequenceOf(componentType=CMPCertificate()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1),
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)
+ )
+ ),
+ namedtype.OptionalNamedType('keyPairHist', univ.SequenceOf(componentType=CertifiedKeyPair()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2),
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX))
+ )
+ )
+
+
+class CertResponse(univ.Sequence):
+ """
+ CertResponse ::= SEQUENCE {
+ certReqId INTEGER,
+ status PKIStatusInfo,
+ certifiedKeyPair CertifiedKeyPair OPTIONAL,
+ rspInfo OCTET STRING OPTIONAL
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReqId', univ.Integer()),
+ namedtype.NamedType('status', PKIStatusInfo()),
+ namedtype.OptionalNamedType('certifiedKeyPair', CertifiedKeyPair()),
+ namedtype.OptionalNamedType('rspInfo', univ.OctetString())
+ )
+
+
+class CertRepMessage(univ.Sequence):
+ """
+ CertRepMessage ::= SEQUENCE {
+ caPubs [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate
+ OPTIONAL,
+ response SEQUENCE OF CertResponse
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType(
+ 'caPubs', univ.SequenceOf(
+ componentType=CMPCertificate()
+ ).subtype(sizeSpec=constraint.ValueSizeConstraint(1, MAX),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))
+ ),
+ namedtype.NamedType('response', univ.SequenceOf(componentType=CertResponse()))
+ )
+
+
+class POPODecKeyChallContent(univ.SequenceOf):
+ componentType = Challenge()
+
+
+class OOBCertHash(univ.Sequence):
+ """
+ OOBCertHash ::= SEQUENCE {
+ hashAlg [0] AlgorithmIdentifier OPTIONAL,
+ certId [1] CertId OPTIONAL,
+ hashVal BIT STRING
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType(
+ 'hashAlg', rfc2459.AlgorithmIdentifier().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))
+ ),
+ namedtype.OptionalNamedType(
+ 'certId', rfc2511.CertId().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))
+ ),
+ namedtype.NamedType('hashVal', univ.BitString())
+ )
+
+
+# pyasn1 does not naturally handle recursive definitions, thus this hack:
+# NestedMessageContent ::= PKIMessages
+class NestedMessageContent(univ.SequenceOf):
+ """
+ NestedMessageContent ::= PKIMessages
+ """
+ componentType = univ.Any()
+
+
+class DHBMParameter(univ.Sequence):
+ """
+ DHBMParameter ::= SEQUENCE {
+ owf AlgorithmIdentifier,
+ -- AlgId for a One-Way Function (SHA-1 recommended)
+ mac AlgorithmIdentifier
+ -- the MAC AlgId (e.g., DES-MAC, Triple-DES-MAC [PKCS11],
+ } -- or HMAC [RFC2104, RFC2202])
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('owf', rfc2459.AlgorithmIdentifier()),
+ namedtype.NamedType('mac', rfc2459.AlgorithmIdentifier())
+ )
+
+
+id_DHBasedMac = univ.ObjectIdentifier('1.2.840.113533.7.66.30')
+
+
+class PBMParameter(univ.Sequence):
+ """
+ PBMParameter ::= SEQUENCE {
+ salt OCTET STRING,
+ owf AlgorithmIdentifier,
+ iterationCount INTEGER,
+ mac AlgorithmIdentifier
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'salt', univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(0, 128))
+ ),
+ namedtype.NamedType('owf', rfc2459.AlgorithmIdentifier()),
+ namedtype.NamedType('iterationCount', univ.Integer()),
+ namedtype.NamedType('mac', rfc2459.AlgorithmIdentifier())
+ )
+
+
+id_PasswordBasedMac = univ.ObjectIdentifier('1.2.840.113533.7.66.13')
+
+
+class PKIProtection(univ.BitString):
+ pass
+
+
+# pyasn1 does not naturally handle recursive definitions, thus this hack:
+# NestedMessageContent ::= PKIMessages
+nestedMessageContent = NestedMessageContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 20))
+
+
+class PKIBody(univ.Choice):
+ """
+ PKIBody ::= CHOICE { -- message-specific body elements
+ ir [0] CertReqMessages, --Initialization Request
+ ip [1] CertRepMessage, --Initialization Response
+ cr [2] CertReqMessages, --Certification Request
+ cp [3] CertRepMessage, --Certification Response
+ p10cr [4] CertificationRequest, --imported from [PKCS10]
+ popdecc [5] POPODecKeyChallContent, --pop Challenge
+ popdecr [6] POPODecKeyRespContent, --pop Response
+ kur [7] CertReqMessages, --Key Update Request
+ kup [8] CertRepMessage, --Key Update Response
+ krr [9] CertReqMessages, --Key Recovery Request
+ krp [10] KeyRecRepContent, --Key Recovery Response
+ rr [11] RevReqContent, --Revocation Request
+ rp [12] RevRepContent, --Revocation Response
+ ccr [13] CertReqMessages, --Cross-Cert. Request
+ ccp [14] CertRepMessage, --Cross-Cert. Response
+ ckuann [15] CAKeyUpdAnnContent, --CA Key Update Ann.
+ cann [16] CertAnnContent, --Certificate Ann.
+ rann [17] RevAnnContent, --Revocation Ann.
+ crlann [18] CRLAnnContent, --CRL Announcement
+ pkiconf [19] PKIConfirmContent, --Confirmation
+ nested [20] NestedMessageContent, --Nested Message
+ genm [21] GenMsgContent, --General Message
+ genp [22] GenRepContent, --General Response
+ error [23] ErrorMsgContent, --Error Message
+ certConf [24] CertConfirmContent, --Certificate confirm
+ pollReq [25] PollReqContent, --Polling request
+ pollRep [26] PollRepContent --Polling response
+
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'ir', rfc2511.CertReqMessages().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)
+ )
+ ),
+ namedtype.NamedType(
+ 'ip', CertRepMessage().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
+ )
+ ),
+ namedtype.NamedType(
+ 'cr', rfc2511.CertReqMessages().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)
+ )
+ ),
+ namedtype.NamedType(
+ 'cp', CertRepMessage().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)
+ )
+ ),
+ namedtype.NamedType(
+ 'p10cr', rfc2314.CertificationRequest().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)
+ )
+ ),
+ namedtype.NamedType(
+ 'popdecc', POPODecKeyChallContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5)
+ )
+ ),
+ namedtype.NamedType(
+ 'popdecr', POPODecKeyRespContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6)
+ )
+ ),
+ namedtype.NamedType(
+ 'kur', rfc2511.CertReqMessages().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7)
+ )
+ ),
+ namedtype.NamedType(
+ 'kup', CertRepMessage().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8)
+ )
+ ),
+ namedtype.NamedType(
+ 'krr', rfc2511.CertReqMessages().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 9)
+ )
+ ),
+ namedtype.NamedType(
+ 'krp', KeyRecRepContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 10)
+ )
+ ),
+ namedtype.NamedType(
+ 'rr', RevReqContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 11)
+ )
+ ),
+ namedtype.NamedType(
+ 'rp', RevRepContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 12)
+ )
+ ),
+ namedtype.NamedType(
+ 'ccr', rfc2511.CertReqMessages().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 13)
+ )
+ ),
+ namedtype.NamedType(
+ 'ccp', CertRepMessage().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 14)
+ )
+ ),
+ namedtype.NamedType(
+ 'ckuann', CAKeyUpdAnnContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 15)
+ )
+ ),
+ namedtype.NamedType(
+ 'cann', CertAnnContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 16)
+ )
+ ),
+ namedtype.NamedType(
+ 'rann', RevAnnContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 17)
+ )
+ ),
+ namedtype.NamedType(
+ 'crlann', CRLAnnContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 18)
+ )
+ ),
+ namedtype.NamedType(
+ 'pkiconf', PKIConfirmContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 19)
+ )
+ ),
+ namedtype.NamedType(
+ 'nested', nestedMessageContent
+ ),
+ # namedtype.NamedType('nested', NestedMessageContent().subtype(
+ # explicitTag=tag.Tag(tag.tagClassContext,tag.tagFormatConstructed,20)
+ # )
+ # ),
+ namedtype.NamedType(
+ 'genm', GenMsgContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 21)
+ )
+ ),
+ namedtype.NamedType(
+ 'gen', GenRepContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 22)
+ )
+ ),
+ namedtype.NamedType(
+ 'error', ErrorMsgContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 23)
+ )
+ ),
+ namedtype.NamedType(
+ 'certConf', CertConfirmContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 24)
+ )
+ ),
+ namedtype.NamedType(
+ 'pollReq', PollReqContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 25)
+ )
+ ),
+ namedtype.NamedType(
+ 'pollRep', PollRepContent().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 26)
+ )
+ )
+ )
+
+
+class PKIHeader(univ.Sequence):
+ """
+ PKIHeader ::= SEQUENCE {
+ pvno INTEGER { cmp1999(1), cmp2000(2) },
+ sender GeneralName,
+ recipient GeneralName,
+ messageTime [0] GeneralizedTime OPTIONAL,
+ protectionAlg [1] AlgorithmIdentifier OPTIONAL,
+ senderKID [2] KeyIdentifier OPTIONAL,
+ recipKID [3] KeyIdentifier OPTIONAL,
+ transactionID [4] OCTET STRING OPTIONAL,
+ senderNonce [5] OCTET STRING OPTIONAL,
+ recipNonce [6] OCTET STRING OPTIONAL,
+ freeText [7] PKIFreeText OPTIONAL,
+ generalInfo [8] SEQUENCE SIZE (1..MAX) OF
+ InfoTypeAndValue OPTIONAL
+ }
+
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'pvno', univ.Integer(
+ namedValues=namedval.NamedValues(('cmp1999', 1), ('cmp2000', 2))
+ )
+ ),
+ namedtype.NamedType('sender', rfc2459.GeneralName()),
+ namedtype.NamedType('recipient', rfc2459.GeneralName()),
+ namedtype.OptionalNamedType('messageTime', useful.GeneralizedTime().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('protectionAlg', rfc2459.AlgorithmIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.OptionalNamedType('senderKID', rfc2459.KeyIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('recipKID', rfc2459.KeyIdentifier().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('transactionID', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.OptionalNamedType('senderNonce', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
+ namedtype.OptionalNamedType('recipNonce', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.OptionalNamedType('freeText', PKIFreeText().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 7))),
+ namedtype.OptionalNamedType('generalInfo',
+ univ.SequenceOf(
+ componentType=InfoTypeAndValue().subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)
+ )
+ ).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))
+ )
+ )
+
+
+class ProtectedPart(univ.Sequence):
+ """
+ ProtectedPart ::= SEQUENCE {
+ header PKIHeader,
+ body PKIBody
+ }
+ """
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('header', PKIHeader()),
+ namedtype.NamedType('infoValue', PKIBody())
+ )
+
+
+class PKIMessage(univ.Sequence):
+ """
+ PKIMessage ::= SEQUENCE {
+ header PKIHeader,
+ body PKIBody,
+ protection [0] PKIProtection OPTIONAL,
+ extraCerts [1] SEQUENCE SIZE (1..MAX) OF CMPCertificate
+ OPTIONAL
+ }"""
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('header', PKIHeader()),
+ namedtype.NamedType('body', PKIBody()),
+ namedtype.OptionalNamedType('protection', PKIProtection().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('extraCerts',
+ univ.SequenceOf(
+ componentType=CMPCertificate()
+ ).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX),
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)
+ )
+ )
+ )
+
+
+class PKIMessages(univ.SequenceOf):
+ """
+ PKIMessages ::= SEQUENCE SIZE (1..MAX) OF PKIMessage
+ """
+ componentType = PKIMessage()
+ sizeSpec = univ.SequenceOf.sizeSpec + constraint.ValueSizeConstraint(1, MAX)
+
+
+# pyasn1 does not naturally handle recursive definitions, thus this hack:
+# NestedMessageContent ::= PKIMessages
+NestedMessageContent._componentType = PKIMessages()
+nestedMessageContent._componentType = PKIMessages()
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc4211.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc4211.py
new file mode 100644
index 0000000000..9783058e2c
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc4211.py
@@ -0,0 +1,396 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Internet X.509 Public Key Infrastructure Certificate Request
+# Message Format (CRMF)
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc4211.txt
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc3280
+from pyasn1_modules import rfc3852
+
+MAX = float('inf')
+
+
+def _buildOid(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+id_pkix = _buildOid(1, 3, 6, 1, 5, 5, 7)
+
+id_pkip = _buildOid(id_pkix, 5)
+
+id_regCtrl = _buildOid(id_pkip, 1)
+
+
+class SinglePubInfo(univ.Sequence):
+ pass
+
+
+SinglePubInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pubMethod', univ.Integer(
+ namedValues=namedval.NamedValues(('dontCare', 0), ('x500', 1), ('web', 2), ('ldap', 3)))),
+ namedtype.OptionalNamedType('pubLocation', rfc3280.GeneralName())
+)
+
+
+class UTF8Pairs(char.UTF8String):
+ pass
+
+
+class PKMACValue(univ.Sequence):
+ pass
+
+
+PKMACValue.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algId', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('value', univ.BitString())
+)
+
+
+class POPOSigningKeyInput(univ.Sequence):
+ pass
+
+
+POPOSigningKeyInput.componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'authInfo', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'sender', rfc3280.GeneralName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))
+ ),
+ namedtype.NamedType(
+ 'publicKeyMAC', PKMACValue()
+ )
+ )
+ )
+ ),
+ namedtype.NamedType('publicKey', rfc3280.SubjectPublicKeyInfo())
+)
+
+
+class POPOSigningKey(univ.Sequence):
+ pass
+
+
+POPOSigningKey.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('poposkInput', POPOSigningKeyInput().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('algorithmIdentifier', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class Attributes(univ.SetOf):
+ pass
+
+
+Attributes.componentType = rfc3280.Attribute()
+
+
+class PrivateKeyInfo(univ.Sequence):
+ pass
+
+
+PrivateKeyInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer()),
+ namedtype.NamedType('privateKeyAlgorithm', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('privateKey', univ.OctetString()),
+ namedtype.OptionalNamedType('attributes',
+ Attributes().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class EncryptedValue(univ.Sequence):
+ pass
+
+
+EncryptedValue.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('intendedAlg', rfc3280.AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('symmAlg', rfc3280.AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('encSymmKey', univ.BitString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('keyAlg', rfc3280.AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('valueHint', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.NamedType('encValue', univ.BitString())
+)
+
+
+class EncryptedKey(univ.Choice):
+ pass
+
+
+EncryptedKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptedValue', EncryptedValue()),
+ namedtype.NamedType('envelopedData', rfc3852.EnvelopedData().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class KeyGenParameters(univ.OctetString):
+ pass
+
+
+class PKIArchiveOptions(univ.Choice):
+ pass
+
+
+PKIArchiveOptions.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptedPrivKey',
+ EncryptedKey().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('keyGenParameters',
+ KeyGenParameters().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('archiveRemGenPrivKey',
+ univ.Boolean().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+id_regCtrl_authenticator = _buildOid(id_regCtrl, 2)
+
+id_regInfo = _buildOid(id_pkip, 2)
+
+id_regInfo_certReq = _buildOid(id_regInfo, 2)
+
+
+class ProtocolEncrKey(rfc3280.SubjectPublicKeyInfo):
+ pass
+
+
+class Authenticator(char.UTF8String):
+ pass
+
+
+class SubsequentMessage(univ.Integer):
+ pass
+
+
+SubsequentMessage.namedValues = namedval.NamedValues(
+ ('encrCert', 0),
+ ('challengeResp', 1)
+)
+
+
+class AttributeTypeAndValue(univ.Sequence):
+ pass
+
+
+AttributeTypeAndValue.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', univ.ObjectIdentifier()),
+ namedtype.NamedType('value', univ.Any())
+)
+
+
+class POPOPrivKey(univ.Choice):
+ pass
+
+
+POPOPrivKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('thisMessage',
+ univ.BitString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('subsequentMessage',
+ SubsequentMessage().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('dhMAC',
+ univ.BitString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('agreeMAC',
+ PKMACValue().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('encryptedKey', rfc3852.EnvelopedData().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+)
+
+
+class ProofOfPossession(univ.Choice):
+ pass
+
+
+ProofOfPossession.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('raVerified',
+ univ.Null().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('signature', POPOSigningKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('keyEncipherment',
+ POPOPrivKey().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('keyAgreement',
+ POPOPrivKey().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+)
+
+
+class OptionalValidity(univ.Sequence):
+ pass
+
+
+OptionalValidity.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('notBefore', rfc3280.Time().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('notAfter', rfc3280.Time().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class CertTemplate(univ.Sequence):
+ pass
+
+
+CertTemplate.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('version', rfc3280.Version().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('serialNumber', univ.Integer().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('signingAlg', rfc3280.AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('issuer', rfc3280.Name().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.OptionalNamedType('validity', OptionalValidity().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.OptionalNamedType('subject', rfc3280.Name().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.OptionalNamedType('publicKey', rfc3280.SubjectPublicKeyInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.OptionalNamedType('issuerUID', rfc3280.UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.OptionalNamedType('subjectUID', rfc3280.UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8))),
+ namedtype.OptionalNamedType('extensions', rfc3280.Extensions().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 9)))
+)
+
+
+class Controls(univ.SequenceOf):
+ pass
+
+
+Controls.componentType = AttributeTypeAndValue()
+Controls.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class CertRequest(univ.Sequence):
+ pass
+
+
+CertRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReqId', univ.Integer()),
+ namedtype.NamedType('certTemplate', CertTemplate()),
+ namedtype.OptionalNamedType('controls', Controls())
+)
+
+
+class CertReqMsg(univ.Sequence):
+ pass
+
+
+CertReqMsg.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certReq', CertRequest()),
+ namedtype.OptionalNamedType('popo', ProofOfPossession()),
+ namedtype.OptionalNamedType('regInfo', univ.SequenceOf(componentType=AttributeTypeAndValue()))
+)
+
+
+class CertReqMessages(univ.SequenceOf):
+ pass
+
+
+CertReqMessages.componentType = CertReqMsg()
+CertReqMessages.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class CertReq(CertRequest):
+ pass
+
+
+id_regCtrl_pkiPublicationInfo = _buildOid(id_regCtrl, 3)
+
+
+class CertId(univ.Sequence):
+ pass
+
+
+CertId.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', rfc3280.GeneralName()),
+ namedtype.NamedType('serialNumber', univ.Integer())
+)
+
+
+class OldCertId(CertId):
+ pass
+
+
+class PKIPublicationInfo(univ.Sequence):
+ pass
+
+
+PKIPublicationInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('action',
+ univ.Integer(namedValues=namedval.NamedValues(('dontPublish', 0), ('pleasePublish', 1)))),
+ namedtype.OptionalNamedType('pubInfos', univ.SequenceOf(componentType=SinglePubInfo()))
+)
+
+
+class EncKeyWithID(univ.Sequence):
+ pass
+
+
+EncKeyWithID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('privateKey', PrivateKeyInfo()),
+ namedtype.OptionalNamedType(
+ 'identifier', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('string', char.UTF8String()),
+ namedtype.NamedType('generalName', rfc3280.GeneralName())
+ )
+ )
+ )
+)
+
+id_regCtrl_protocolEncrKey = _buildOid(id_regCtrl, 6)
+
+id_regCtrl_oldCertID = _buildOid(id_regCtrl, 5)
+
+id_smime = _buildOid(1, 2, 840, 113549, 1, 9, 16)
+
+
+class PBMParameter(univ.Sequence):
+ pass
+
+
+PBMParameter.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('salt', univ.OctetString()),
+ namedtype.NamedType('owf', rfc3280.AlgorithmIdentifier()),
+ namedtype.NamedType('iterationCount', univ.Integer()),
+ namedtype.NamedType('mac', rfc3280.AlgorithmIdentifier())
+)
+
+id_regCtrl_regToken = _buildOid(id_regCtrl, 1)
+
+id_regCtrl_pkiArchiveOptions = _buildOid(id_regCtrl, 4)
+
+id_regInfo_utf8Pairs = _buildOid(id_regInfo, 1)
+
+id_ct = _buildOid(id_smime, 1)
+
+id_ct_encKeyWithID = _buildOid(id_ct, 21)
+
+
+class RegToken(char.UTF8String):
+ pass
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc4334.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc4334.py
new file mode 100644
index 0000000000..44cd31b166
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc4334.py
@@ -0,0 +1,75 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Extensions and Attributes Supporting Authentication
+# in PPP and Wireless LAN Networks
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4334.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# OID Arcs
+
+id_pe = univ.ObjectIdentifier('1.3.6.1.5.5.7.1')
+
+id_kp = univ.ObjectIdentifier('1.3.6.1.5.5.7.3')
+
+id_aca = univ.ObjectIdentifier('1.3.6.1.5.5.7.10')
+
+
+# Extended Key Usage Values
+
+id_kp_eapOverPPP = id_kp + (13, )
+
+id_kp_eapOverLAN = id_kp + (14, )
+
+
+# Wireless LAN SSID Extension
+
+id_pe_wlanSSID = id_pe + (13, )
+
+class SSID(univ.OctetString):
+ constraint.ValueSizeConstraint(1, 32)
+
+
+class SSIDList(univ.SequenceOf):
+ componentType = SSID()
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Wireless LAN SSID Attribute Certificate Attribute
+
+id_aca_wlanSSID = id_aca + (7, )
+
+
+# Map of Certificate Extension OIDs to Extensions
+# To be added to the ones that are in rfc5280.py
+
+_certificateExtensionsMap = {
+ id_pe_wlanSSID: SSIDList(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMap)
+
+
+# Map of AttributeType OIDs to AttributeValue added to the
+# ones that are in rfc5280.py
+
+_certificateAttributesMapUpdate = {
+ id_aca_wlanSSID: SSIDList(),
+}
+
+rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc4985.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc4985.py
new file mode 100644
index 0000000000..318e412380
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc4985.py
@@ -0,0 +1,49 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Expression of Service Names in X.509 Certificates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc4985.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# As specified in Appendix A.2 of RFC 4985
+
+id_pkix = rfc5280.id_pkix
+
+id_on = id_pkix + (8, )
+
+id_on_dnsSRV = id_on + (7, )
+
+
+class SRVName(char.IA5String):
+ subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+srvName = rfc5280.AnotherName()
+srvName['type-id'] = id_on_dnsSRV
+srvName['value'] = SRVName()
+
+
+# Map of Other Name OIDs to Other Name is added to the
+# ones that are in rfc5280.py
+
+_anotherNameMapUpdate = {
+ id_on_dnsSRV: SRVName(),
+}
+
+rfc5280.anotherNameMap.update(_anotherNameMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc5035.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5035.py
new file mode 100644
index 0000000000..1cec98249c
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5035.py
@@ -0,0 +1,199 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add a map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Update to Enhanced Security Services for S/MIME
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5035.txt
+#
+
+from pyasn1.codec.der.encoder import encode as der_encode
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc2634
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+
+ContentType = rfc5652.ContentType
+
+IssuerAndSerialNumber = rfc5652.IssuerAndSerialNumber
+
+SubjectKeyIdentifier = rfc5652.SubjectKeyIdentifier
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+PolicyInformation = rfc5280.PolicyInformation
+
+GeneralNames = rfc5280.GeneralNames
+
+CertificateSerialNumber = rfc5280.CertificateSerialNumber
+
+
+# Signing Certificate Attribute V1 and V2
+
+id_aa_signingCertificate = rfc2634.id_aa_signingCertificate
+
+id_aa_signingCertificateV2 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.47')
+
+Hash = rfc2634.Hash
+
+IssuerSerial = rfc2634.IssuerSerial
+
+ESSCertID = rfc2634.ESSCertID
+
+SigningCertificate = rfc2634.SigningCertificate
+
+
+sha256AlgId = AlgorithmIdentifier()
+sha256AlgId['algorithm'] = rfc4055.id_sha256
+# A non-schema object for sha256AlgId['parameters'] as absent
+sha256AlgId['parameters'] = der_encode(univ.OctetString(''))
+
+
+class ESSCertIDv2(univ.Sequence):
+ pass
+
+ESSCertIDv2.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('hashAlgorithm', sha256AlgId),
+ namedtype.NamedType('certHash', Hash()),
+ namedtype.OptionalNamedType('issuerSerial', IssuerSerial())
+)
+
+
+class SigningCertificateV2(univ.Sequence):
+ pass
+
+SigningCertificateV2.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certs', univ.SequenceOf(
+ componentType=ESSCertIDv2())),
+ namedtype.OptionalNamedType('policies', univ.SequenceOf(
+ componentType=PolicyInformation()))
+)
+
+
+# Mail List Expansion History Attribute
+
+id_aa_mlExpandHistory = rfc2634.id_aa_mlExpandHistory
+
+ub_ml_expansion_history = rfc2634.ub_ml_expansion_history
+
+EntityIdentifier = rfc2634.EntityIdentifier
+
+MLReceiptPolicy = rfc2634.MLReceiptPolicy
+
+MLData = rfc2634.MLData
+
+MLExpansionHistory = rfc2634.MLExpansionHistory
+
+
+# ESS Security Label Attribute
+
+id_aa_securityLabel = rfc2634.id_aa_securityLabel
+
+ub_privacy_mark_length = rfc2634.ub_privacy_mark_length
+
+ub_security_categories = rfc2634.ub_security_categories
+
+ub_integer_options = rfc2634.ub_integer_options
+
+ESSPrivacyMark = rfc2634.ESSPrivacyMark
+
+SecurityClassification = rfc2634.SecurityClassification
+
+SecurityPolicyIdentifier = rfc2634.SecurityPolicyIdentifier
+
+SecurityCategory = rfc2634.SecurityCategory
+
+SecurityCategories = rfc2634.SecurityCategories
+
+ESSSecurityLabel = rfc2634.ESSSecurityLabel
+
+
+# Equivalent Labels Attribute
+
+id_aa_equivalentLabels = rfc2634.id_aa_equivalentLabels
+
+EquivalentLabels = rfc2634.EquivalentLabels
+
+
+# Content Identifier Attribute
+
+id_aa_contentIdentifier = rfc2634.id_aa_contentIdentifier
+
+ContentIdentifier = rfc2634.ContentIdentifier
+
+
+# Content Reference Attribute
+
+id_aa_contentReference = rfc2634.id_aa_contentReference
+
+ContentReference = rfc2634.ContentReference
+
+
+# Message Signature Digest Attribute
+
+id_aa_msgSigDigest = rfc2634.id_aa_msgSigDigest
+
+MsgSigDigest = rfc2634.MsgSigDigest
+
+
+# Content Hints Attribute
+
+id_aa_contentHint = rfc2634.id_aa_contentHint
+
+ContentHints = rfc2634.ContentHints
+
+
+# Receipt Request Attribute
+
+AllOrFirstTier = rfc2634.AllOrFirstTier
+
+ReceiptsFrom = rfc2634.ReceiptsFrom
+
+id_aa_receiptRequest = rfc2634.id_aa_receiptRequest
+
+ub_receiptsTo = rfc2634.ub_receiptsTo
+
+ReceiptRequest = rfc2634.ReceiptRequest
+
+
+# Receipt Content Type
+
+ESSVersion = rfc2634.ESSVersion
+
+id_ct_receipt = rfc2634.id_ct_receipt
+
+Receipt = rfc2634.Receipt
+
+ub_receiptsTo = rfc2634.ub_receiptsTo
+
+ReceiptRequest = rfc2634.ReceiptRequest
+
+
+# Map of Attribute Type to the Attribute structure is added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_signingCertificateV2: SigningCertificateV2(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_receipt: Receipt(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc5083.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5083.py
new file mode 100644
index 0000000000..26ef550c47
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5083.py
@@ -0,0 +1,52 @@
+# This file is being contributed to of pyasn1-modules software.
+#
+# Created by Russ Housley without assistance from the asn1ate tool.
+# Modified by Russ Housley to add a map for use with opentypes and
+# simplify the code for the object identifier assignment.
+#
+# Copyright (c) 2018, 2019 Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Authenticated-Enveloped-Data for the Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5083.txt
+
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+# CMS Authenticated-Enveloped-Data Content Type
+
+id_ct_authEnvelopedData = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.23')
+
+class AuthEnvelopedData(univ.Sequence):
+ pass
+
+AuthEnvelopedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', rfc5652.CMSVersion()),
+ namedtype.OptionalNamedType('originatorInfo', rfc5652.OriginatorInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('recipientInfos', rfc5652.RecipientInfos()),
+ namedtype.NamedType('authEncryptedContentInfo', rfc5652.EncryptedContentInfo()),
+ namedtype.OptionalNamedType('authAttrs', rfc5652.AuthAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('mac', rfc5652.MessageAuthenticationCode()),
+ namedtype.OptionalNamedType('unauthAttrs', rfc5652.UnauthAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_authEnvelopedData: AuthEnvelopedData(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc5084.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5084.py
new file mode 100644
index 0000000000..7686839561
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5084.py
@@ -0,0 +1,97 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from the asn1ate tool, with manual
+# changes to AES_CCM_ICVlen.subtypeSpec and added comments
+#
+# Copyright (c) 2018-2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# AES-CCM and AES-GCM Algorithms fo use with the Authenticated-Enveloped-Data
+# protecting content type for the Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5084.txt
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+class AES_CCM_ICVlen(univ.Integer):
+ pass
+
+
+class AES_GCM_ICVlen(univ.Integer):
+ pass
+
+
+AES_CCM_ICVlen.subtypeSpec = constraint.SingleValueConstraint(4, 6, 8, 10, 12, 14, 16)
+
+AES_GCM_ICVlen.subtypeSpec = constraint.ValueRangeConstraint(12, 16)
+
+
+class CCMParameters(univ.Sequence):
+ pass
+
+
+CCMParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('aes-nonce', univ.OctetString().subtype(subtypeSpec=constraint.ValueSizeConstraint(7, 13))),
+ # The aes-nonce parameter contains 15-L octets, where L is the size of the length field. L=8 is RECOMMENDED.
+ # Within the scope of any content-authenticated-encryption key, the nonce value MUST be unique.
+ namedtype.DefaultedNamedType('aes-ICVlen', AES_CCM_ICVlen().subtype(value=12))
+)
+
+
+class GCMParameters(univ.Sequence):
+ pass
+
+
+GCMParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('aes-nonce', univ.OctetString()),
+ # The aes-nonce may have any number of bits between 8 and 2^64, but it MUST be a multiple of 8 bits.
+ # Within the scope of any content-authenticated-encryption key, the nonce value MUST be unique.
+ # A nonce value of 12 octets can be processed more efficiently, so that length is RECOMMENDED.
+ namedtype.DefaultedNamedType('aes-ICVlen', AES_GCM_ICVlen().subtype(value=12))
+)
+
+aes = _OID(2, 16, 840, 1, 101, 3, 4, 1)
+
+id_aes128_CCM = _OID(aes, 7)
+
+id_aes128_GCM = _OID(aes, 6)
+
+id_aes192_CCM = _OID(aes, 27)
+
+id_aes192_GCM = _OID(aes, 26)
+
+id_aes256_CCM = _OID(aes, 47)
+
+id_aes256_GCM = _OID(aes, 46)
+
+
+# Map of Algorithm Identifier OIDs to Parameters is added to the
+# ones in rfc5280.py
+
+_algorithmIdentifierMapUpdate = {
+ id_aes128_CCM: CCMParameters(),
+ id_aes128_GCM: GCMParameters(),
+ id_aes192_CCM: CCMParameters(),
+ id_aes192_GCM: GCMParameters(),
+ id_aes256_CCM: CCMParameters(),
+ id_aes256_GCM: GCMParameters(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc5208.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5208.py
new file mode 100644
index 0000000000..14082a89bd
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5208.py
@@ -0,0 +1,56 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS#8 syntax
+#
+# ASN.1 source from:
+# http://tools.ietf.org/html/rfc5208
+#
+# Sample captures could be obtained with "openssl pkcs8 -topk8" command
+#
+from pyasn1_modules import rfc2251
+from pyasn1_modules.rfc2459 import *
+
+
+class KeyEncryptionAlgorithms(AlgorithmIdentifier):
+ pass
+
+
+class PrivateKeyAlgorithms(AlgorithmIdentifier):
+ pass
+
+
+class EncryptedData(univ.OctetString):
+ pass
+
+
+class EncryptedPrivateKeyInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptionAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('encryptedData', EncryptedData())
+ )
+
+
+class PrivateKey(univ.OctetString):
+ pass
+
+
+class Attributes(univ.SetOf):
+ componentType = rfc2251.Attribute()
+
+
+class Version(univ.Integer):
+ namedValues = namedval.NamedValues(('v1', 0), ('v2', 1))
+
+
+class PrivateKeyInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('privateKeyAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('privateKey', PrivateKey()),
+ namedtype.OptionalNamedType('attributes', Attributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc5280.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5280.py
new file mode 100644
index 0000000000..f2b52b25c2
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5280.py
@@ -0,0 +1,1658 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Updated by Russ Housley for ORAddress Extension Attribute opentype support.
+# Updated by Russ Housley for AlgorithmIdentifier opentype support.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Internet X.509 Public Key Infrastructure Certificate and Certificate
+# Revocation List (CRL) Profile
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5280.txt
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+MAX = float('inf')
+
+
+def _buildOid(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+ub_e163_4_sub_address_length = univ.Integer(40)
+
+ub_e163_4_number_length = univ.Integer(15)
+
+unformatted_postal_address = univ.Integer(16)
+
+
+class TerminalType(univ.Integer):
+ pass
+
+
+TerminalType.namedValues = namedval.NamedValues(
+ ('telex', 3),
+ ('teletex', 4),
+ ('g3-facsimile', 5),
+ ('g4-facsimile', 6),
+ ('ia5-terminal', 7),
+ ('videotex', 8)
+)
+
+
+class Extension(univ.Sequence):
+ pass
+
+
+Extension.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extnID', univ.ObjectIdentifier()),
+ namedtype.DefaultedNamedType('critical', univ.Boolean().subtype(value=0)),
+ namedtype.NamedType('extnValue', univ.OctetString())
+)
+
+
+class Extensions(univ.SequenceOf):
+ pass
+
+
+Extensions.componentType = Extension()
+Extensions.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+physical_delivery_personal_name = univ.Integer(13)
+
+ub_unformatted_address_length = univ.Integer(180)
+
+ub_pds_parameter_length = univ.Integer(30)
+
+ub_pds_physical_address_lines = univ.Integer(6)
+
+
+class UnformattedPostalAddress(univ.Set):
+ pass
+
+
+UnformattedPostalAddress.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('printable-address', univ.SequenceOf(componentType=char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))),
+ namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_unformatted_address_length)))
+)
+
+ub_organization_name = univ.Integer(64)
+
+
+class X520OrganizationName(univ.Choice):
+ pass
+
+
+X520OrganizationName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_organization_name)))
+)
+
+ub_x121_address_length = univ.Integer(16)
+
+pds_name = univ.Integer(7)
+
+id_pkix = _buildOid(1, 3, 6, 1, 5, 5, 7)
+
+id_kp = _buildOid(id_pkix, 3)
+
+ub_postal_code_length = univ.Integer(16)
+
+
+class PostalCode(univ.Choice):
+ pass
+
+
+PostalCode.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length))),
+ namedtype.NamedType('printable-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_postal_code_length)))
+)
+
+ub_generation_qualifier_length = univ.Integer(3)
+
+unique_postal_name = univ.Integer(20)
+
+
+class DomainComponent(char.IA5String):
+ pass
+
+
+ub_domain_defined_attribute_value_length = univ.Integer(128)
+
+ub_match = univ.Integer(128)
+
+id_at = _buildOid(2, 5, 4)
+
+
+class AttributeType(univ.ObjectIdentifier):
+ pass
+
+
+id_at_organizationalUnitName = _buildOid(id_at, 11)
+
+terminal_type = univ.Integer(23)
+
+
+class PDSParameter(univ.Set):
+ pass
+
+
+PDSParameter.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('printable-string', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length))),
+ namedtype.OptionalNamedType('teletex-string', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_pds_parameter_length)))
+)
+
+
+class PhysicalDeliveryPersonalName(PDSParameter):
+ pass
+
+
+ub_surname_length = univ.Integer(40)
+
+id_ad = _buildOid(id_pkix, 48)
+
+ub_domain_defined_attribute_type_length = univ.Integer(8)
+
+
+class TeletexDomainDefinedAttribute(univ.Sequence):
+ pass
+
+
+TeletexDomainDefinedAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
+ namedtype.NamedType('value', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
+)
+
+ub_domain_defined_attributes = univ.Integer(4)
+
+
+class TeletexDomainDefinedAttributes(univ.SequenceOf):
+ pass
+
+
+TeletexDomainDefinedAttributes.componentType = TeletexDomainDefinedAttribute()
+TeletexDomainDefinedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
+
+extended_network_address = univ.Integer(22)
+
+ub_locality_name = univ.Integer(128)
+
+
+class X520LocalityName(univ.Choice):
+ pass
+
+
+X520LocalityName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_locality_name)))
+)
+
+teletex_organization_name = univ.Integer(3)
+
+ub_given_name_length = univ.Integer(16)
+
+ub_initials_length = univ.Integer(5)
+
+
+class PersonalName(univ.Set):
+ pass
+
+
+PersonalName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('surname', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('given-name', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('initials', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generation-qualifier', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+ub_organizational_unit_name_length = univ.Integer(32)
+
+
+class OrganizationalUnitName(char.PrintableString):
+ pass
+
+
+OrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
+
+id_at_generationQualifier = _buildOid(id_at, 44)
+
+
+class Version(univ.Integer):
+ pass
+
+
+Version.namedValues = namedval.NamedValues(
+ ('v1', 0),
+ ('v2', 1),
+ ('v3', 2)
+)
+
+
+class CertificateSerialNumber(univ.Integer):
+ pass
+
+
+algorithmIdentifierMap = {}
+
+
+class AlgorithmIdentifier(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('parameters', univ.Any(),
+ openType=opentype.OpenType('algorithm', algorithmIdentifierMap)
+ )
+ )
+
+
+class Time(univ.Choice):
+ pass
+
+
+Time.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('utcTime', useful.UTCTime()),
+ namedtype.NamedType('generalTime', useful.GeneralizedTime())
+)
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+certificateAttributesMap = {}
+
+
+class AttributeTypeAndValue(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType(
+ 'value', AttributeValue(),
+ openType=opentype.OpenType('type', certificateAttributesMap)
+ )
+ )
+
+
+class RelativeDistinguishedName(univ.SetOf):
+ pass
+
+
+RelativeDistinguishedName.componentType = AttributeTypeAndValue()
+RelativeDistinguishedName.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class RDNSequence(univ.SequenceOf):
+ pass
+
+
+RDNSequence.componentType = RelativeDistinguishedName()
+
+
+class Name(univ.Choice):
+ pass
+
+
+Name.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('rdnSequence', RDNSequence())
+)
+
+
+class TBSCertList(univ.Sequence):
+ pass
+
+
+TBSCertList.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('version', Version()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('thisUpdate', Time()),
+ namedtype.OptionalNamedType('nextUpdate', Time()),
+ namedtype.OptionalNamedType(
+ 'revokedCertificates', univ.SequenceOf(
+ componentType=univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('userCertificate', CertificateSerialNumber()),
+ namedtype.NamedType('revocationDate', Time()),
+ namedtype.OptionalNamedType('crlEntryExtensions', Extensions())
+ )
+ )
+ )
+ ),
+ namedtype.OptionalNamedType(
+ 'crlExtensions', Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class CertificateList(univ.Sequence):
+ pass
+
+
+CertificateList.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertList', TBSCertList()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class PhysicalDeliveryOfficeName(PDSParameter):
+ pass
+
+
+ub_extension_attributes = univ.Integer(256)
+
+certificateExtensionsMap = {
+}
+
+oraddressExtensionAttributeMap = {
+}
+
+
+class ExtensionAttribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'extension-attribute-type',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, ub_extension_attributes)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType(
+ 'extension-attribute-value',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)),
+ openType=opentype.OpenType('extension-attribute-type', oraddressExtensionAttributeMap))
+ )
+
+id_qt = _buildOid(id_pkix, 2)
+
+id_qt_cps = _buildOid(id_qt, 1)
+
+id_at_stateOrProvinceName = _buildOid(id_at, 8)
+
+id_at_title = _buildOid(id_at, 12)
+
+id_at_serialNumber = _buildOid(id_at, 5)
+
+
+class X520dnQualifier(char.PrintableString):
+ pass
+
+
+class PosteRestanteAddress(PDSParameter):
+ pass
+
+
+poste_restante_address = univ.Integer(19)
+
+
+class UniqueIdentifier(univ.BitString):
+ pass
+
+
+class Validity(univ.Sequence):
+ pass
+
+
+Validity.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('notBefore', Time()),
+ namedtype.NamedType('notAfter', Time())
+)
+
+
+class SubjectPublicKeyInfo(univ.Sequence):
+ pass
+
+
+SubjectPublicKeyInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('subjectPublicKey', univ.BitString())
+)
+
+
+class TBSCertificate(univ.Sequence):
+ pass
+
+
+TBSCertificate.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ Version().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value="v1")),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber()),
+ namedtype.NamedType('signature', AlgorithmIdentifier()),
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('validity', Validity()),
+ namedtype.NamedType('subject', Name()),
+ namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo()),
+ namedtype.OptionalNamedType('issuerUniqueID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('subjectUniqueID', UniqueIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('extensions',
+ Extensions().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+physical_delivery_office_name = univ.Integer(10)
+
+ub_name = univ.Integer(32768)
+
+
+class X520name(univ.Choice):
+ pass
+
+
+X520name.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_name)))
+)
+
+id_at_dnQualifier = _buildOid(id_at, 46)
+
+ub_serial_number = univ.Integer(64)
+
+ub_pseudonym = univ.Integer(128)
+
+pkcs_9 = _buildOid(1, 2, 840, 113549, 1, 9)
+
+
+class X121Address(char.NumericString):
+ pass
+
+
+X121Address.subtypeSpec = constraint.ValueSizeConstraint(1, ub_x121_address_length)
+
+
+class NetworkAddress(X121Address):
+ pass
+
+
+ub_integer_options = univ.Integer(256)
+
+id_at_commonName = _buildOid(id_at, 3)
+
+ub_organization_name_length = univ.Integer(64)
+
+id_ad_ocsp = _buildOid(id_ad, 1)
+
+ub_country_name_numeric_length = univ.Integer(3)
+
+ub_country_name_alpha_length = univ.Integer(2)
+
+
+class PhysicalDeliveryCountryName(univ.Choice):
+ pass
+
+
+PhysicalDeliveryCountryName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))),
+ namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
+)
+
+id_emailAddress = _buildOid(pkcs_9, 1)
+
+common_name = univ.Integer(1)
+
+
+class X520Pseudonym(univ.Choice):
+ pass
+
+
+X520Pseudonym.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_pseudonym)))
+)
+
+ub_domain_name_length = univ.Integer(16)
+
+
+class AdministrationDomainName(univ.Choice):
+ pass
+
+
+AdministrationDomainName.tagSet = univ.Choice.tagSet.tagExplicitly(
+ tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 2))
+AdministrationDomainName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length))),
+ namedtype.NamedType('printable', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, ub_domain_name_length)))
+)
+
+
+class PresentationAddress(univ.Sequence):
+ pass
+
+
+PresentationAddress.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('sSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('tSelector', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('nAddresses', univ.SetOf(componentType=univ.OctetString()).subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+class ExtendedNetworkAddress(univ.Choice):
+ pass
+
+
+ExtendedNetworkAddress.componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'e163-4-address', univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('number', char.NumericString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_number_length)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('sub-address', char.NumericString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_e163_4_sub_address_length)).subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+ )
+ ),
+ namedtype.NamedType('psap-address', PresentationAddress().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class TeletexOrganizationName(char.TeletexString):
+ pass
+
+
+TeletexOrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length)
+
+ub_terminal_id_length = univ.Integer(24)
+
+
+class TerminalIdentifier(char.PrintableString):
+ pass
+
+
+TerminalIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_terminal_id_length)
+
+id_ad_caIssuers = _buildOid(id_ad, 2)
+
+id_at_countryName = _buildOid(id_at, 6)
+
+
+class StreetAddress(PDSParameter):
+ pass
+
+
+postal_code = univ.Integer(9)
+
+id_at_givenName = _buildOid(id_at, 42)
+
+ub_title = univ.Integer(64)
+
+
+class ExtensionAttributes(univ.SetOf):
+ pass
+
+
+ExtensionAttributes.componentType = ExtensionAttribute()
+ExtensionAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_extension_attributes)
+
+ub_emailaddress_length = univ.Integer(255)
+
+id_ad_caRepository = _buildOid(id_ad, 5)
+
+
+class ExtensionORAddressComponents(PDSParameter):
+ pass
+
+
+ub_organizational_unit_name = univ.Integer(64)
+
+
+class X520OrganizationalUnitName(univ.Choice):
+ pass
+
+
+X520OrganizationalUnitName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_organizational_unit_name)))
+)
+
+
+class LocalPostalAttributes(PDSParameter):
+ pass
+
+
+teletex_organizational_unit_names = univ.Integer(5)
+
+
+class X520Title(univ.Choice):
+ pass
+
+
+X520Title.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_title)))
+)
+
+id_at_localityName = _buildOid(id_at, 7)
+
+id_at_initials = _buildOid(id_at, 43)
+
+ub_state_name = univ.Integer(128)
+
+
+class X520StateOrProvinceName(univ.Choice):
+ pass
+
+
+X520StateOrProvinceName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_state_name)))
+)
+
+physical_delivery_organization_name = univ.Integer(14)
+
+id_at_surname = _buildOid(id_at, 4)
+
+
+class X520countryName(char.PrintableString):
+ pass
+
+
+X520countryName.subtypeSpec = constraint.ValueSizeConstraint(2, 2)
+
+physical_delivery_office_number = univ.Integer(11)
+
+id_qt_unotice = _buildOid(id_qt, 2)
+
+
+class X520SerialNumber(char.PrintableString):
+ pass
+
+
+X520SerialNumber.subtypeSpec = constraint.ValueSizeConstraint(1, ub_serial_number)
+
+
+class Attribute(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', AttributeType()),
+ namedtype.NamedType('values',
+ univ.SetOf(componentType=AttributeValue()),
+ openType=opentype.OpenType('type', certificateAttributesMap))
+ )
+
+ub_common_name = univ.Integer(64)
+
+id_pe = _buildOid(id_pkix, 1)
+
+
+class ExtensionPhysicalDeliveryAddressComponents(PDSParameter):
+ pass
+
+
+class EmailAddress(char.IA5String):
+ pass
+
+
+EmailAddress.subtypeSpec = constraint.ValueSizeConstraint(1, ub_emailaddress_length)
+
+id_at_organizationName = _buildOid(id_at, 10)
+
+post_office_box_address = univ.Integer(18)
+
+
+class BuiltInDomainDefinedAttribute(univ.Sequence):
+ pass
+
+
+BuiltInDomainDefinedAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_type_length))),
+ namedtype.NamedType('value', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_defined_attribute_value_length)))
+)
+
+
+class BuiltInDomainDefinedAttributes(univ.SequenceOf):
+ pass
+
+
+BuiltInDomainDefinedAttributes.componentType = BuiltInDomainDefinedAttribute()
+BuiltInDomainDefinedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, ub_domain_defined_attributes)
+
+id_at_pseudonym = _buildOid(id_at, 65)
+
+id_domainComponent = _buildOid(0, 9, 2342, 19200300, 100, 1, 25)
+
+
+class X520CommonName(univ.Choice):
+ pass
+
+
+X520CommonName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('utf8String',
+ char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name))),
+ namedtype.NamedType('bmpString',
+ char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, ub_common_name)))
+)
+
+extension_OR_address_components = univ.Integer(12)
+
+ub_organizational_units = univ.Integer(4)
+
+teletex_personal_name = univ.Integer(4)
+
+ub_numeric_user_id_length = univ.Integer(32)
+
+ub_common_name_length = univ.Integer(64)
+
+
+class TeletexCommonName(char.TeletexString):
+ pass
+
+
+TeletexCommonName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_common_name_length)
+
+
+class PhysicalDeliveryOrganizationName(PDSParameter):
+ pass
+
+
+extension_physical_delivery_address_components = univ.Integer(15)
+
+
+class NumericUserIdentifier(char.NumericString):
+ pass
+
+
+NumericUserIdentifier.subtypeSpec = constraint.ValueSizeConstraint(1, ub_numeric_user_id_length)
+
+
+class CountryName(univ.Choice):
+ pass
+
+
+CountryName.tagSet = univ.Choice.tagSet.tagExplicitly(tag.Tag(tag.tagClassApplication, tag.tagFormatConstructed, 1))
+CountryName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('x121-dcc-code', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_numeric_length, ub_country_name_numeric_length))),
+ namedtype.NamedType('iso-3166-alpha2-code', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(ub_country_name_alpha_length, ub_country_name_alpha_length)))
+)
+
+
+class OrganizationName(char.PrintableString):
+ pass
+
+
+OrganizationName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organization_name_length)
+
+
+class OrganizationalUnitNames(univ.SequenceOf):
+ pass
+
+
+OrganizationalUnitNames.componentType = OrganizationalUnitName()
+OrganizationalUnitNames.sizeSpec = constraint.ValueSizeConstraint(1, ub_organizational_units)
+
+
+class PrivateDomainName(univ.Choice):
+ pass
+
+
+PrivateDomainName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('numeric', char.NumericString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length))),
+ namedtype.NamedType('printable', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_domain_name_length)))
+)
+
+
+class BuiltInStandardAttributes(univ.Sequence):
+ pass
+
+
+BuiltInStandardAttributes.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('country-name', CountryName()),
+ namedtype.OptionalNamedType('administration-domain-name', AdministrationDomainName()),
+ namedtype.OptionalNamedType('network-address', NetworkAddress().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('terminal-identifier', TerminalIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('private-domain-name', PrivateDomainName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.OptionalNamedType('organization-name', OrganizationName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('numeric-user-identifier', NumericUserIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.OptionalNamedType('personal-name', PersonalName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.OptionalNamedType('organizational-unit-names', OrganizationalUnitNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6)))
+)
+
+
+class ORAddress(univ.Sequence):
+ pass
+
+
+ORAddress.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('built-in-standard-attributes', BuiltInStandardAttributes()),
+ namedtype.OptionalNamedType('built-in-domain-defined-attributes', BuiltInDomainDefinedAttributes()),
+ namedtype.OptionalNamedType('extension-attributes', ExtensionAttributes())
+)
+
+
+class DistinguishedName(RDNSequence):
+ pass
+
+
+id_ad_timeStamping = _buildOid(id_ad, 3)
+
+
+class PhysicalDeliveryOfficeNumber(PDSParameter):
+ pass
+
+
+teletex_domain_defined_attributes = univ.Integer(6)
+
+
+class UniquePostalName(PDSParameter):
+ pass
+
+
+physical_delivery_country_name = univ.Integer(8)
+
+ub_pds_name_length = univ.Integer(16)
+
+
+class PDSName(char.PrintableString):
+ pass
+
+
+PDSName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_pds_name_length)
+
+
+class TeletexPersonalName(univ.Set):
+ pass
+
+
+TeletexPersonalName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('surname', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_surname_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('given-name', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_given_name_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('initials', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_initials_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('generation-qualifier', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, ub_generation_qualifier_length)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+street_address = univ.Integer(17)
+
+
+class PostOfficeBoxAddress(PDSParameter):
+ pass
+
+
+local_postal_attributes = univ.Integer(21)
+
+
+class DirectoryString(univ.Choice):
+ pass
+
+
+DirectoryString.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString',
+ char.TeletexString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('printableString',
+ char.PrintableString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('universalString',
+ char.UniversalString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+teletex_common_name = univ.Integer(2)
+
+
+class CommonName(char.PrintableString):
+ pass
+
+
+CommonName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_common_name_length)
+
+
+class Certificate(univ.Sequence):
+ pass
+
+
+Certificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertificate', TBSCertificate()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class TeletexOrganizationalUnitName(char.TeletexString):
+ pass
+
+
+TeletexOrganizationalUnitName.subtypeSpec = constraint.ValueSizeConstraint(1, ub_organizational_unit_name_length)
+
+id_at_name = _buildOid(id_at, 41)
+
+
+class TeletexOrganizationalUnitNames(univ.SequenceOf):
+ pass
+
+
+TeletexOrganizationalUnitNames.componentType = TeletexOrganizationalUnitName()
+TeletexOrganizationalUnitNames.sizeSpec = constraint.ValueSizeConstraint(1, ub_organizational_units)
+
+id_ce = _buildOid(2, 5, 29)
+
+id_ce_issuerAltName = _buildOid(id_ce, 18)
+
+
+class SkipCerts(univ.Integer):
+ pass
+
+
+SkipCerts.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class CRLReason(univ.Enumerated):
+ pass
+
+
+CRLReason.namedValues = namedval.NamedValues(
+ ('unspecified', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6),
+ ('removeFromCRL', 8),
+ ('privilegeWithdrawn', 9),
+ ('aACompromise', 10)
+)
+
+
+class PrivateKeyUsagePeriod(univ.Sequence):
+ pass
+
+
+PrivateKeyUsagePeriod.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('notBefore', useful.GeneralizedTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('notAfter', useful.GeneralizedTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+anotherNameMap = {
+
+}
+
+
+class AnotherName(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type-id', univ.ObjectIdentifier()),
+ namedtype.NamedType(
+ 'value',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)),
+ openType=opentype.OpenType('type-id', anotherNameMap)
+ )
+ )
+
+
+class EDIPartyName(univ.Sequence):
+ pass
+
+
+EDIPartyName.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('nameAssigner', DirectoryString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('partyName', DirectoryString().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class GeneralName(univ.Choice):
+ pass
+
+
+GeneralName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherName',
+ AnotherName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('rfc822Name',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('dNSName',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('x400Address',
+ ORAddress().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('directoryName',
+ Name().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4))),
+ namedtype.NamedType('ediPartyName',
+ EDIPartyName().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 5))),
+ namedtype.NamedType('uniformResourceIdentifier',
+ char.IA5String().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 6))),
+ namedtype.NamedType('iPAddress',
+ univ.OctetString().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('registeredID', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 8)))
+)
+
+
+class BaseDistance(univ.Integer):
+ pass
+
+
+BaseDistance.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class GeneralSubtree(univ.Sequence):
+ pass
+
+
+GeneralSubtree.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('base', GeneralName()),
+ namedtype.DefaultedNamedType('minimum', BaseDistance().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(value=0)),
+ namedtype.OptionalNamedType('maximum', BaseDistance().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class GeneralNames(univ.SequenceOf):
+ pass
+
+
+GeneralNames.componentType = GeneralName()
+GeneralNames.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class DistributionPointName(univ.Choice):
+ pass
+
+
+DistributionPointName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('fullName',
+ GeneralNames().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('nameRelativeToCRLIssuer', RelativeDistinguishedName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class ReasonFlags(univ.BitString):
+ pass
+
+
+ReasonFlags.namedValues = namedval.NamedValues(
+ ('unused', 0),
+ ('keyCompromise', 1),
+ ('cACompromise', 2),
+ ('affiliationChanged', 3),
+ ('superseded', 4),
+ ('cessationOfOperation', 5),
+ ('certificateHold', 6),
+ ('privilegeWithdrawn', 7),
+ ('aACompromise', 8)
+)
+
+
+class IssuingDistributionPoint(univ.Sequence):
+ pass
+
+
+IssuingDistributionPoint.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.DefaultedNamedType('onlyContainsUserCerts', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)).subtype(value=0)),
+ namedtype.DefaultedNamedType('onlyContainsCACerts', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)).subtype(value=0)),
+ namedtype.OptionalNamedType('onlySomeReasons', ReasonFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.DefaultedNamedType('indirectCRL', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)).subtype(value=0)),
+ namedtype.DefaultedNamedType('onlyContainsAttributeCerts', univ.Boolean().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5)).subtype(value=0))
+)
+
+id_ce_certificatePolicies = _buildOid(id_ce, 32)
+
+id_kp_emailProtection = _buildOid(id_kp, 4)
+
+
+class AccessDescription(univ.Sequence):
+ pass
+
+
+AccessDescription.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('accessMethod', univ.ObjectIdentifier()),
+ namedtype.NamedType('accessLocation', GeneralName())
+)
+
+
+class IssuerAltName(GeneralNames):
+ pass
+
+
+id_ce_cRLDistributionPoints = _buildOid(id_ce, 31)
+
+holdInstruction = _buildOid(2, 2, 840, 10040, 2)
+
+id_holdinstruction_callissuer = _buildOid(holdInstruction, 2)
+
+id_ce_subjectDirectoryAttributes = _buildOid(id_ce, 9)
+
+id_ce_issuingDistributionPoint = _buildOid(id_ce, 28)
+
+
+class DistributionPoint(univ.Sequence):
+ pass
+
+
+DistributionPoint.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('distributionPoint', DistributionPointName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('reasons', ReasonFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('cRLIssuer', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class CRLDistributionPoints(univ.SequenceOf):
+ pass
+
+
+CRLDistributionPoints.componentType = DistributionPoint()
+CRLDistributionPoints.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class GeneralSubtrees(univ.SequenceOf):
+ pass
+
+
+GeneralSubtrees.componentType = GeneralSubtree()
+GeneralSubtrees.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class NameConstraints(univ.Sequence):
+ pass
+
+
+NameConstraints.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('permittedSubtrees', GeneralSubtrees().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('excludedSubtrees', GeneralSubtrees().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class SubjectDirectoryAttributes(univ.SequenceOf):
+ pass
+
+
+SubjectDirectoryAttributes.componentType = Attribute()
+SubjectDirectoryAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_kp_OCSPSigning = _buildOid(id_kp, 9)
+
+id_kp_timeStamping = _buildOid(id_kp, 8)
+
+
+class DisplayText(univ.Choice):
+ pass
+
+
+DisplayText.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ia5String', char.IA5String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('visibleString',
+ char.VisibleString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(subtypeSpec=constraint.ValueSizeConstraint(1, 200)))
+)
+
+
+class NoticeReference(univ.Sequence):
+ pass
+
+
+NoticeReference.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('organization', DisplayText()),
+ namedtype.NamedType('noticeNumbers', univ.SequenceOf(componentType=univ.Integer()))
+)
+
+
+class UserNotice(univ.Sequence):
+ pass
+
+
+UserNotice.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('noticeRef', NoticeReference()),
+ namedtype.OptionalNamedType('explicitText', DisplayText())
+)
+
+
+class PolicyQualifierId(univ.ObjectIdentifier):
+ pass
+
+
+policyQualifierInfoMap = {
+
+}
+
+
+class PolicyQualifierInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyQualifierId', PolicyQualifierId()),
+ namedtype.NamedType(
+ 'qualifier', univ.Any(),
+ openType=opentype.OpenType('policyQualifierId', policyQualifierInfoMap)
+ )
+ )
+
+
+class CertPolicyId(univ.ObjectIdentifier):
+ pass
+
+
+class PolicyInformation(univ.Sequence):
+ pass
+
+
+PolicyInformation.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyIdentifier', CertPolicyId()),
+ namedtype.OptionalNamedType('policyQualifiers', univ.SequenceOf(componentType=PolicyQualifierInfo()))
+)
+
+
+class CertificatePolicies(univ.SequenceOf):
+ pass
+
+
+CertificatePolicies.componentType = PolicyInformation()
+CertificatePolicies.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class SubjectAltName(GeneralNames):
+ pass
+
+
+id_ce_basicConstraints = _buildOid(id_ce, 19)
+
+id_ce_authorityKeyIdentifier = _buildOid(id_ce, 35)
+
+id_kp_codeSigning = _buildOid(id_kp, 3)
+
+
+class BasicConstraints(univ.Sequence):
+ pass
+
+
+BasicConstraints.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('cA', univ.Boolean().subtype(value=0)),
+ namedtype.OptionalNamedType('pathLenConstraint',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
+)
+
+id_ce_certificateIssuer = _buildOid(id_ce, 29)
+
+
+class PolicyMappings(univ.SequenceOf):
+ pass
+
+
+PolicyMappings.componentType = univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('issuerDomainPolicy', CertPolicyId()),
+ namedtype.NamedType('subjectDomainPolicy', CertPolicyId())
+ )
+)
+
+PolicyMappings.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class InhibitAnyPolicy(SkipCerts):
+ pass
+
+
+anyPolicy = _buildOid(id_ce_certificatePolicies, 0)
+
+
+class CRLNumber(univ.Integer):
+ pass
+
+
+CRLNumber.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class BaseCRLNumber(CRLNumber):
+ pass
+
+
+id_ce_nameConstraints = _buildOid(id_ce, 30)
+
+id_kp_serverAuth = _buildOid(id_kp, 1)
+
+id_ce_freshestCRL = _buildOid(id_ce, 46)
+
+id_ce_cRLReasons = _buildOid(id_ce, 21)
+
+id_ce_extKeyUsage = _buildOid(id_ce, 37)
+
+
+class KeyIdentifier(univ.OctetString):
+ pass
+
+
+class AuthorityKeyIdentifier(univ.Sequence):
+ pass
+
+
+AuthorityKeyIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('keyIdentifier', KeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('authorityCertIssuer', GeneralNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('authorityCertSerialNumber', CertificateSerialNumber().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class FreshestCRL(CRLDistributionPoints):
+ pass
+
+
+id_ce_policyConstraints = _buildOid(id_ce, 36)
+
+id_pe_authorityInfoAccess = _buildOid(id_pe, 1)
+
+
+class AuthorityInfoAccessSyntax(univ.SequenceOf):
+ pass
+
+
+AuthorityInfoAccessSyntax.componentType = AccessDescription()
+AuthorityInfoAccessSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_holdinstruction_none = _buildOid(holdInstruction, 1)
+
+
+class CPSuri(char.IA5String):
+ pass
+
+
+id_pe_subjectInfoAccess = _buildOid(id_pe, 11)
+
+
+class SubjectKeyIdentifier(KeyIdentifier):
+ pass
+
+
+id_ce_subjectAltName = _buildOid(id_ce, 17)
+
+
+class KeyPurposeId(univ.ObjectIdentifier):
+ pass
+
+
+class ExtKeyUsageSyntax(univ.SequenceOf):
+ pass
+
+
+ExtKeyUsageSyntax.componentType = KeyPurposeId()
+ExtKeyUsageSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class HoldInstructionCode(univ.ObjectIdentifier):
+ pass
+
+
+id_ce_deltaCRLIndicator = _buildOid(id_ce, 27)
+
+id_ce_keyUsage = _buildOid(id_ce, 15)
+
+id_ce_holdInstructionCode = _buildOid(id_ce, 23)
+
+
+class SubjectInfoAccessSyntax(univ.SequenceOf):
+ pass
+
+
+SubjectInfoAccessSyntax.componentType = AccessDescription()
+SubjectInfoAccessSyntax.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class InvalidityDate(useful.GeneralizedTime):
+ pass
+
+
+class KeyUsage(univ.BitString):
+ pass
+
+
+KeyUsage.namedValues = namedval.NamedValues(
+ ('digitalSignature', 0),
+ ('nonRepudiation', 1),
+ ('keyEncipherment', 2),
+ ('dataEncipherment', 3),
+ ('keyAgreement', 4),
+ ('keyCertSign', 5),
+ ('cRLSign', 6),
+ ('encipherOnly', 7),
+ ('decipherOnly', 8)
+)
+
+id_ce_invalidityDate = _buildOid(id_ce, 24)
+
+id_ce_policyMappings = _buildOid(id_ce, 33)
+
+anyExtendedKeyUsage = _buildOid(id_ce_extKeyUsage, 0)
+
+id_ce_privateKeyUsagePeriod = _buildOid(id_ce, 16)
+
+id_ce_cRLNumber = _buildOid(id_ce, 20)
+
+
+class CertificateIssuer(GeneralNames):
+ pass
+
+
+id_holdinstruction_reject = _buildOid(holdInstruction, 3)
+
+
+class PolicyConstraints(univ.Sequence):
+ pass
+
+
+PolicyConstraints.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('requireExplicitPolicy',
+ SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('inhibitPolicyMapping',
+ SkipCerts().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+id_kp_clientAuth = _buildOid(id_kp, 2)
+
+id_ce_subjectKeyIdentifier = _buildOid(id_ce, 14)
+
+id_ce_inhibitAnyPolicy = _buildOid(id_ce, 54)
+
+# map of ORAddress ExtensionAttribute type to ExtensionAttribute value
+
+_oraddressExtensionAttributeMapUpdate = {
+ common_name: CommonName(),
+ teletex_common_name: TeletexCommonName(),
+ teletex_organization_name: TeletexOrganizationName(),
+ teletex_personal_name: TeletexPersonalName(),
+ teletex_organizational_unit_names: TeletexOrganizationalUnitNames(),
+ pds_name: PDSName(),
+ physical_delivery_country_name: PhysicalDeliveryCountryName(),
+ postal_code: PostalCode(),
+ physical_delivery_office_name: PhysicalDeliveryOfficeName(),
+ physical_delivery_office_number: PhysicalDeliveryOfficeNumber(),
+ extension_OR_address_components: ExtensionORAddressComponents(),
+ physical_delivery_personal_name: PhysicalDeliveryPersonalName(),
+ physical_delivery_organization_name: PhysicalDeliveryOrganizationName(),
+ extension_physical_delivery_address_components: ExtensionPhysicalDeliveryAddressComponents(),
+ unformatted_postal_address: UnformattedPostalAddress(),
+ street_address: StreetAddress(),
+ post_office_box_address: PostOfficeBoxAddress(),
+ poste_restante_address: PosteRestanteAddress(),
+ unique_postal_name: UniquePostalName(),
+ local_postal_attributes: LocalPostalAttributes(),
+ extended_network_address: ExtendedNetworkAddress(),
+ terminal_type: TerminalType(),
+ teletex_domain_defined_attributes: TeletexDomainDefinedAttributes(),
+}
+
+oraddressExtensionAttributeMap.update(_oraddressExtensionAttributeMapUpdate)
+
+
+# map of AttributeType -> AttributeValue
+
+_certificateAttributesMapUpdate = {
+ id_at_name: X520name(),
+ id_at_surname: X520name(),
+ id_at_givenName: X520name(),
+ id_at_initials: X520name(),
+ id_at_generationQualifier: X520name(),
+ id_at_commonName: X520CommonName(),
+ id_at_localityName: X520LocalityName(),
+ id_at_stateOrProvinceName: X520StateOrProvinceName(),
+ id_at_organizationName: X520OrganizationName(),
+ id_at_organizationalUnitName: X520OrganizationalUnitName(),
+ id_at_title: X520Title(),
+ id_at_dnQualifier: X520dnQualifier(),
+ id_at_countryName: X520countryName(),
+ id_at_serialNumber: X520SerialNumber(),
+ id_at_pseudonym: X520Pseudonym(),
+ id_domainComponent: DomainComponent(),
+ id_emailAddress: EmailAddress(),
+}
+
+certificateAttributesMap.update(_certificateAttributesMapUpdate)
+
+
+# map of Certificate Extension OIDs to Extensions
+
+_certificateExtensionsMap = {
+ id_ce_authorityKeyIdentifier: AuthorityKeyIdentifier(),
+ id_ce_subjectKeyIdentifier: SubjectKeyIdentifier(),
+ id_ce_keyUsage: KeyUsage(),
+ id_ce_privateKeyUsagePeriod: PrivateKeyUsagePeriod(),
+ id_ce_certificatePolicies: CertificatePolicies(),
+ id_ce_policyMappings: PolicyMappings(),
+ id_ce_subjectAltName: SubjectAltName(),
+ id_ce_issuerAltName: IssuerAltName(),
+ id_ce_subjectDirectoryAttributes: SubjectDirectoryAttributes(),
+ id_ce_basicConstraints: BasicConstraints(),
+ id_ce_nameConstraints: NameConstraints(),
+ id_ce_policyConstraints: PolicyConstraints(),
+ id_ce_extKeyUsage: ExtKeyUsageSyntax(),
+ id_ce_cRLDistributionPoints: CRLDistributionPoints(),
+ id_pe_authorityInfoAccess: AuthorityInfoAccessSyntax(),
+ id_ce_cRLNumber: univ.Integer(),
+ id_ce_deltaCRLIndicator: BaseCRLNumber(),
+ id_ce_issuingDistributionPoint: IssuingDistributionPoint(),
+ id_ce_cRLReasons: CRLReason(),
+ id_ce_holdInstructionCode: univ.ObjectIdentifier(),
+ id_ce_invalidityDate: useful.GeneralizedTime(),
+ id_ce_certificateIssuer: GeneralNames(),
+}
+
+certificateExtensionsMap.update(_certificateExtensionsMap)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc5480.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5480.py
new file mode 100644
index 0000000000..84c0c11b88
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5480.py
@@ -0,0 +1,190 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add maps for opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Elliptic Curve Cryptography Subject Public Key Information
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5480.txt
+
+
+# What can be imported from rfc4055.py ?
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc3279
+from pyasn1_modules import rfc5280
+
+
+# These structures are the same as RFC 3279.
+
+DHPublicKey = rfc3279.DHPublicKey
+
+DSAPublicKey = rfc3279.DSAPublicKey
+
+ValidationParms = rfc3279.ValidationParms
+
+DomainParameters = rfc3279.DomainParameters
+
+ECDSA_Sig_Value = rfc3279.ECDSA_Sig_Value
+
+ECPoint = rfc3279.ECPoint
+
+KEA_Parms_Id = rfc3279.KEA_Parms_Id
+
+RSAPublicKey = rfc3279.RSAPublicKey
+
+
+# RFC 5480 changed the names of these structures from RFC 3279.
+
+DSS_Parms = rfc3279.Dss_Parms
+
+DSA_Sig_Value = rfc3279.Dss_Sig_Value
+
+
+# RFC 3279 defines a more complex alternative for ECParameters.
+# RFC 5480 narrows the definition to a single CHOICE: namedCurve.
+
+class ECParameters(univ.Choice):
+ pass
+
+ECParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('namedCurve', univ.ObjectIdentifier())
+)
+
+
+# OIDs for Message Digest Algorithms
+
+id_md2 = univ.ObjectIdentifier('1.2.840.113549.2.2')
+
+id_md5 = univ.ObjectIdentifier('1.2.840.113549.2.5')
+
+id_sha1 = univ.ObjectIdentifier('1.3.14.3.2.26')
+
+id_sha224 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.4')
+
+id_sha256 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.1')
+
+id_sha384 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.2')
+
+id_sha512 = univ.ObjectIdentifier('2.16.840.1.101.3.4.2.3')
+
+
+# OID for RSA PK Algorithm and Key
+
+rsaEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.1')
+
+
+# OID for DSA PK Algorithm, Key, and Parameters
+
+id_dsa = univ.ObjectIdentifier('1.2.840.10040.4.1')
+
+
+# OID for Diffie-Hellman PK Algorithm, Key, and Parameters
+
+dhpublicnumber = univ.ObjectIdentifier('1.2.840.10046.2.1')
+
+# OID for KEA PK Algorithm and Parameters
+
+id_keyExchangeAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.1.22')
+
+
+# OIDs for Elliptic Curve Algorithm ID, Key, and Parameters
+# Note that ECDSA keys always use this OID
+
+id_ecPublicKey = univ.ObjectIdentifier('1.2.840.10045.2.1')
+
+id_ecDH = univ.ObjectIdentifier('1.3.132.1.12')
+
+id_ecMQV = univ.ObjectIdentifier('1.3.132.1.13')
+
+
+# OIDs for RSA Signature Algorithms
+
+md2WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.2')
+
+md5WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.4')
+
+sha1WithRSAEncryption = univ.ObjectIdentifier('1.2.840.113549.1.1.5')
+
+
+# OIDs for DSA Signature Algorithms
+
+id_dsa_with_sha1 = univ.ObjectIdentifier('1.2.840.10040.4.3')
+
+id_dsa_with_sha224 = univ.ObjectIdentifier('2.16.840.1.101.3.4.3.1')
+
+id_dsa_with_sha256 = univ.ObjectIdentifier('2.16.840.1.101.3.4.3.2')
+
+
+# OIDs for ECDSA Signature Algorithms
+
+ecdsa_with_SHA1 = univ.ObjectIdentifier('1.2.840.10045.4.1')
+
+ecdsa_with_SHA224 = univ.ObjectIdentifier('1.2.840.10045.4.3.1')
+
+ecdsa_with_SHA256 = univ.ObjectIdentifier('1.2.840.10045.4.3.2')
+
+ecdsa_with_SHA384 = univ.ObjectIdentifier('1.2.840.10045.4.3.3')
+
+ecdsa_with_SHA512 = univ.ObjectIdentifier('1.2.840.10045.4.3.4')
+
+
+# OIDs for Named Elliptic Curves
+
+secp192r1 = univ.ObjectIdentifier('1.2.840.10045.3.1.1')
+
+sect163k1 = univ.ObjectIdentifier('1.3.132.0.1')
+
+sect163r2 = univ.ObjectIdentifier('1.3.132.0.15')
+
+secp224r1 = univ.ObjectIdentifier('1.3.132.0.33')
+
+sect233k1 = univ.ObjectIdentifier('1.3.132.0.26')
+
+sect233r1 = univ.ObjectIdentifier('1.3.132.0.27')
+
+secp256r1 = univ.ObjectIdentifier('1.2.840.10045.3.1.7')
+
+sect283k1 = univ.ObjectIdentifier('1.3.132.0.16')
+
+sect283r1 = univ.ObjectIdentifier('1.3.132.0.17')
+
+secp384r1 = univ.ObjectIdentifier('1.3.132.0.34')
+
+sect409k1 = univ.ObjectIdentifier('1.3.132.0.36')
+
+sect409r1 = univ.ObjectIdentifier('1.3.132.0.37')
+
+secp521r1 = univ.ObjectIdentifier('1.3.132.0.35')
+
+sect571k1 = univ.ObjectIdentifier('1.3.132.0.38')
+
+sect571r1 = univ.ObjectIdentifier('1.3.132.0.39')
+
+
+# Map of Algorithm Identifier OIDs to Parameters
+# The algorithm is not included if the parameters MUST be absent
+
+_algorithmIdentifierMapUpdate = {
+ rsaEncryption: univ.Null(),
+ md2WithRSAEncryption: univ.Null(),
+ md5WithRSAEncryption: univ.Null(),
+ sha1WithRSAEncryption: univ.Null(),
+ id_dsa: DSS_Parms(),
+ dhpublicnumber: DomainParameters(),
+ id_keyExchangeAlgorithm: KEA_Parms_Id(),
+ id_ecPublicKey: ECParameters(),
+ id_ecDH: ECParameters(),
+ id_ecMQV: ECParameters(),
+}
+
+
+# Add these Algorithm Identifier map entries to the ones in rfc5280.py
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc5649.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5649.py
new file mode 100644
index 0000000000..84809eeb18
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5649.py
@@ -0,0 +1,33 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# AES Key Wrap with Padding
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5649.txt
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+class AlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+id_aes128_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.5')
+
+id_aes192_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.25')
+
+id_aes256_wrap = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.45')
+
+
+id_aes128_wrap_pad = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.8')
+
+id_aes192_wrap_pad = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.28')
+
+id_aes256_wrap_pad = univ.ObjectIdentifier('2.16.840.1.101.3.4.1.48')
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc5652.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5652.py
new file mode 100644
index 0000000000..2e48962dd3
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5652.py
@@ -0,0 +1,761 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Modified by Russ Housley to add support for opentypes.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# http://www.ietf.org/rfc/rfc5652.txt
+#
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc3281
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+def _buildOid(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+cmsContentTypesMap = { }
+
+cmsAttributesMap = { }
+
+otherKeyAttributesMap = { }
+
+otherCertFormatMap = { }
+
+otherRevInfoFormatMap = { }
+
+otherRecipientInfoMap = { }
+
+
+class AttCertVersionV1(univ.Integer):
+ pass
+
+
+AttCertVersionV1.namedValues = namedval.NamedValues(
+ ('v1', 0)
+)
+
+
+class AttributeCertificateInfoV1(univ.Sequence):
+ pass
+
+
+AttributeCertificateInfoV1.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', AttCertVersionV1().subtype(value="v1")),
+ namedtype.NamedType(
+ 'subject', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('baseCertificateID', rfc3281.IssuerSerial().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('subjectName', rfc5280.GeneralNames().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+ )
+ ),
+ namedtype.NamedType('issuer', rfc5280.GeneralNames()),
+ namedtype.NamedType('signature', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('serialNumber', rfc5280.CertificateSerialNumber()),
+ namedtype.NamedType('attCertValidityPeriod', rfc3281.AttCertValidityPeriod()),
+ namedtype.NamedType('attributes', univ.SequenceOf(componentType=rfc5280.Attribute())),
+ namedtype.OptionalNamedType('issuerUniqueID', rfc5280.UniqueIdentifier()),
+ namedtype.OptionalNamedType('extensions', rfc5280.Extensions())
+)
+
+
+class AttributeCertificateV1(univ.Sequence):
+ pass
+
+
+AttributeCertificateV1.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('acInfo', AttributeCertificateInfoV1()),
+ namedtype.NamedType('signatureAlgorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class Attribute(univ.Sequence):
+ pass
+
+
+Attribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', univ.ObjectIdentifier()),
+ namedtype.NamedType('attrValues', univ.SetOf(componentType=AttributeValue()),
+ openType=opentype.OpenType('attrType', cmsAttributesMap)
+ )
+)
+
+
+class SignedAttributes(univ.SetOf):
+ pass
+
+
+SignedAttributes.componentType = Attribute()
+SignedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class AttributeCertificateV2(rfc3281.AttributeCertificate):
+ pass
+
+
+class OtherKeyAttribute(univ.Sequence):
+ pass
+
+
+OtherKeyAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyAttrId', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('keyAttr', univ.Any(),
+ openType=opentype.OpenType('keyAttrId', otherKeyAttributesMap)
+ )
+)
+
+
+class UnauthAttributes(univ.SetOf):
+ pass
+
+
+UnauthAttributes.componentType = Attribute()
+UnauthAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_encryptedData = _buildOid(1, 2, 840, 113549, 1, 7, 6)
+
+
+class SignatureValue(univ.OctetString):
+ pass
+
+
+class IssuerAndSerialNumber(univ.Sequence):
+ pass
+
+
+IssuerAndSerialNumber.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', rfc5280.Name()),
+ namedtype.NamedType('serialNumber', rfc5280.CertificateSerialNumber())
+)
+
+
+class SubjectKeyIdentifier(univ.OctetString):
+ pass
+
+
+class RecipientKeyIdentifier(univ.Sequence):
+ pass
+
+
+RecipientKeyIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier()),
+ namedtype.OptionalNamedType('date', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('other', OtherKeyAttribute())
+)
+
+
+class KeyAgreeRecipientIdentifier(univ.Choice):
+ pass
+
+
+KeyAgreeRecipientIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('rKeyId', RecipientKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class EncryptedKey(univ.OctetString):
+ pass
+
+
+class RecipientEncryptedKey(univ.Sequence):
+ pass
+
+
+RecipientEncryptedKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('rid', KeyAgreeRecipientIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class RecipientEncryptedKeys(univ.SequenceOf):
+ pass
+
+
+RecipientEncryptedKeys.componentType = RecipientEncryptedKey()
+
+
+class MessageAuthenticationCode(univ.OctetString):
+ pass
+
+
+class CMSVersion(univ.Integer):
+ pass
+
+
+CMSVersion.namedValues = namedval.NamedValues(
+ ('v0', 0),
+ ('v1', 1),
+ ('v2', 2),
+ ('v3', 3),
+ ('v4', 4),
+ ('v5', 5)
+)
+
+
+class OtherCertificateFormat(univ.Sequence):
+ pass
+
+
+OtherCertificateFormat.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherCertFormat', univ.ObjectIdentifier()),
+ namedtype.NamedType('otherCert', univ.Any(),
+ openType=opentype.OpenType('otherCertFormat', otherCertFormatMap)
+ )
+)
+
+
+class ExtendedCertificateInfo(univ.Sequence):
+ pass
+
+
+ExtendedCertificateInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('certificate', rfc5280.Certificate()),
+ namedtype.NamedType('attributes', UnauthAttributes())
+)
+
+
+class Signature(univ.BitString):
+ pass
+
+
+class SignatureAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class ExtendedCertificate(univ.Sequence):
+ pass
+
+
+ExtendedCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('extendedCertificateInfo', ExtendedCertificateInfo()),
+ namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signature', Signature())
+)
+
+
+class CertificateChoices(univ.Choice):
+ pass
+
+
+CertificateChoices.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', rfc5280.Certificate()),
+ namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('v1AttrCert', AttributeCertificateV1().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('v2AttrCert', AttributeCertificateV2().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('other', OtherCertificateFormat().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+)
+
+
+class CertificateSet(univ.SetOf):
+ pass
+
+
+CertificateSet.componentType = CertificateChoices()
+
+
+class OtherRevocationInfoFormat(univ.Sequence):
+ pass
+
+
+OtherRevocationInfoFormat.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('otherRevInfoFormat', univ.ObjectIdentifier()),
+ namedtype.NamedType('otherRevInfo', univ.Any(),
+ openType=opentype.OpenType('otherRevInfoFormat', otherRevInfoFormatMap)
+ )
+)
+
+
+class RevocationInfoChoice(univ.Choice):
+ pass
+
+
+RevocationInfoChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('crl', rfc5280.CertificateList()),
+ namedtype.NamedType('other', OtherRevocationInfoFormat().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class RevocationInfoChoices(univ.SetOf):
+ pass
+
+
+RevocationInfoChoices.componentType = RevocationInfoChoice()
+
+
+class OriginatorInfo(univ.Sequence):
+ pass
+
+
+OriginatorInfo.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('certs', CertificateSet().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class ContentType(univ.ObjectIdentifier):
+ pass
+
+
+class EncryptedContent(univ.OctetString):
+ pass
+
+
+class ContentEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class EncryptedContentInfo(univ.Sequence):
+ pass
+
+
+EncryptedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('contentEncryptionAlgorithm', ContentEncryptionAlgorithmIdentifier()),
+ namedtype.OptionalNamedType('encryptedContent', EncryptedContent().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class UnprotectedAttributes(univ.SetOf):
+ pass
+
+
+UnprotectedAttributes.componentType = Attribute()
+UnprotectedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class KeyEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class KEKIdentifier(univ.Sequence):
+ pass
+
+
+KEKIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyIdentifier', univ.OctetString()),
+ namedtype.OptionalNamedType('date', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('other', OtherKeyAttribute())
+)
+
+
+class KEKRecipientInfo(univ.Sequence):
+ pass
+
+
+KEKRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('kekid', KEKIdentifier()),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class KeyDerivationAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class PasswordRecipientInfo(univ.Sequence):
+ pass
+
+
+PasswordRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.OptionalNamedType('keyDerivationAlgorithm', KeyDerivationAlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class RecipientIdentifier(univ.Choice):
+ pass
+
+
+RecipientIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class KeyTransRecipientInfo(univ.Sequence):
+ pass
+
+
+KeyTransRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('rid', RecipientIdentifier()),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedKey', EncryptedKey())
+)
+
+
+class UserKeyingMaterial(univ.OctetString):
+ pass
+
+
+class OriginatorPublicKey(univ.Sequence):
+ pass
+
+
+OriginatorPublicKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('publicKey', univ.BitString())
+)
+
+
+class OriginatorIdentifierOrKey(univ.Choice):
+ pass
+
+
+OriginatorIdentifierOrKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('originatorKey', OriginatorPublicKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class KeyAgreeRecipientInfo(univ.Sequence):
+ pass
+
+
+KeyAgreeRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('originator', OriginatorIdentifierOrKey().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('ukm', UserKeyingMaterial().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('recipientEncryptedKeys', RecipientEncryptedKeys())
+)
+
+
+class OtherRecipientInfo(univ.Sequence):
+ pass
+
+
+OtherRecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('oriType', univ.ObjectIdentifier()),
+ namedtype.NamedType('oriValue', univ.Any(),
+ openType=opentype.OpenType('oriType', otherRecipientInfoMap)
+ )
+)
+
+
+class RecipientInfo(univ.Choice):
+ pass
+
+
+RecipientInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('ktri', KeyTransRecipientInfo()),
+ namedtype.NamedType('kari', KeyAgreeRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('kekri', KEKRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2))),
+ namedtype.NamedType('pwri', PasswordRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3))),
+ namedtype.NamedType('ori', OtherRecipientInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)))
+)
+
+
+class RecipientInfos(univ.SetOf):
+ pass
+
+
+RecipientInfos.componentType = RecipientInfo()
+RecipientInfos.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class EnvelopedData(univ.Sequence):
+ pass
+
+
+EnvelopedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('recipientInfos', RecipientInfos()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
+ namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class DigestAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+id_ct_contentInfo = _buildOid(1, 2, 840, 113549, 1, 9, 16, 1, 6)
+
+id_digestedData = _buildOid(1, 2, 840, 113549, 1, 7, 5)
+
+
+class EncryptedData(univ.Sequence):
+ pass
+
+
+EncryptedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
+ namedtype.OptionalNamedType('unprotectedAttrs', UnprotectedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+id_messageDigest = _buildOid(1, 2, 840, 113549, 1, 9, 4)
+
+id_signedData = _buildOid(1, 2, 840, 113549, 1, 7, 2)
+
+
+class MessageAuthenticationCodeAlgorithm(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class UnsignedAttributes(univ.SetOf):
+ pass
+
+
+UnsignedAttributes.componentType = Attribute()
+UnsignedAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class SignerIdentifier(univ.Choice):
+ pass
+
+
+SignerIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
+ namedtype.NamedType('subjectKeyIdentifier', SubjectKeyIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class SignerInfo(univ.Sequence):
+ pass
+
+
+SignerInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('sid', SignerIdentifier()),
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.OptionalNamedType('signedAttrs', SignedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
+ namedtype.NamedType('signature', SignatureValue()),
+ namedtype.OptionalNamedType('unsignedAttrs', UnsignedAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class SignerInfos(univ.SetOf):
+ pass
+
+
+SignerInfos.componentType = SignerInfo()
+
+
+class Countersignature(SignerInfo):
+ pass
+
+
+class ContentInfo(univ.Sequence):
+ pass
+
+
+ContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', ContentType()),
+ namedtype.NamedType('content', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)),
+ openType=opentype.OpenType('contentType', cmsContentTypesMap)
+ )
+)
+
+
+class EncapsulatedContentInfo(univ.Sequence):
+ pass
+
+
+EncapsulatedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('eContentType', ContentType()),
+ namedtype.OptionalNamedType('eContent', univ.OctetString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+id_countersignature = _buildOid(1, 2, 840, 113549, 1, 9, 6)
+
+id_data = _buildOid(1, 2, 840, 113549, 1, 7, 1)
+
+
+class MessageDigest(univ.OctetString):
+ pass
+
+
+class AuthAttributes(univ.SetOf):
+ pass
+
+
+AuthAttributes.componentType = Attribute()
+AuthAttributes.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class Time(univ.Choice):
+ pass
+
+
+Time.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('utcTime', useful.UTCTime()),
+ namedtype.NamedType('generalTime', useful.GeneralizedTime())
+)
+
+
+class AuthenticatedData(univ.Sequence):
+ pass
+
+
+AuthenticatedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.OptionalNamedType('originatorInfo', OriginatorInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('recipientInfos', RecipientInfos()),
+ namedtype.NamedType('macAlgorithm', MessageAuthenticationCodeAlgorithm()),
+ namedtype.OptionalNamedType('digestAlgorithm', DigestAlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
+ namedtype.OptionalNamedType('authAttrs', AuthAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('mac', MessageAuthenticationCode()),
+ namedtype.OptionalNamedType('unauthAttrs', UnauthAttributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+id_contentType = _buildOid(1, 2, 840, 113549, 1, 9, 3)
+
+
+class ExtendedCertificateOrCertificate(univ.Choice):
+ pass
+
+
+ExtendedCertificateOrCertificate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', rfc5280.Certificate()),
+ namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
+)
+
+
+class Digest(univ.OctetString):
+ pass
+
+
+class DigestedData(univ.Sequence):
+ pass
+
+
+DigestedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
+ namedtype.NamedType('digest', Digest())
+)
+
+id_envelopedData = _buildOid(1, 2, 840, 113549, 1, 7, 3)
+
+
+class DigestAlgorithmIdentifiers(univ.SetOf):
+ pass
+
+
+DigestAlgorithmIdentifiers.componentType = DigestAlgorithmIdentifier()
+
+
+class SignedData(univ.Sequence):
+ pass
+
+
+SignedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', CMSVersion()),
+ namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
+ namedtype.NamedType('encapContentInfo', EncapsulatedContentInfo()),
+ namedtype.OptionalNamedType('certificates', CertificateSet().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('crls', RevocationInfoChoices().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('signerInfos', SignerInfos())
+)
+
+id_signingTime = _buildOid(1, 2, 840, 113549, 1, 9, 5)
+
+
+class SigningTime(Time):
+ pass
+
+
+id_ct_authData = _buildOid(1, 2, 840, 113549, 1, 9, 16, 1, 2)
+
+
+# CMS Content Type Map
+
+_cmsContentTypesMapUpdate = {
+ id_ct_contentInfo: ContentInfo(),
+ id_data: univ.OctetString(),
+ id_signedData: SignedData(),
+ id_envelopedData: EnvelopedData(),
+ id_digestedData: DigestedData(),
+ id_encryptedData: EncryptedData(),
+ id_ct_authData: AuthenticatedData(),
+}
+
+cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
+
+
+# CMS Attribute Map
+
+_cmsAttributesMapUpdate = {
+ id_contentType: ContentType(),
+ id_messageDigest: MessageDigest(),
+ id_signingTime: SigningTime(),
+ id_countersignature: Countersignature(),
+}
+
+cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc5751.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5751.py
new file mode 100644
index 0000000000..7e200012c6
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5751.py
@@ -0,0 +1,124 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# S/MIME Version 3.2 Message Specification
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5751.txt
+
+from pyasn1.type import namedtype
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc8018
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+ return univ.ObjectIdentifier(output)
+
+
+# Imports from RFC 5652 and RFC 8018
+
+IssuerAndSerialNumber = rfc5652.IssuerAndSerialNumber
+
+RecipientKeyIdentifier = rfc5652.RecipientKeyIdentifier
+
+SubjectKeyIdentifier = rfc5652.SubjectKeyIdentifier
+
+rc2CBC = rfc8018.rc2CBC
+
+
+# S/MIME Capabilities Attribute
+
+smimeCapabilities = univ.ObjectIdentifier('1.2.840.113549.1.9.15')
+
+
+smimeCapabilityMap = { }
+
+
+class SMIMECapability(univ.Sequence):
+ pass
+
+SMIMECapability.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('capabilityID', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('parameters', univ.Any(),
+ openType=opentype.OpenType('capabilityID', smimeCapabilityMap))
+)
+
+
+class SMIMECapabilities(univ.SequenceOf):
+ pass
+
+SMIMECapabilities.componentType = SMIMECapability()
+
+
+class SMIMECapabilitiesParametersForRC2CBC(univ.Integer):
+ # which carries the RC2 Key Length (number of bits)
+ pass
+
+
+# S/MIME Encryption Key Preference Attribute
+
+id_smime = univ.ObjectIdentifier('1.2.840.113549.1.9.16')
+
+id_aa = _OID(id_smime, 2)
+
+id_aa_encrypKeyPref = _OID(id_aa, 11)
+
+
+class SMIMEEncryptionKeyPreference(univ.Choice):
+ pass
+
+SMIMEEncryptionKeyPreference.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerAndSerialNumber',
+ IssuerAndSerialNumber().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('receipentKeyId',
+ # Yes, 'receipentKeyId' is spelled incorrectly, but kept
+ # this way for alignment with the ASN.1 module in the RFC.
+ RecipientKeyIdentifier().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('subjectAltKeyIdentifier',
+ SubjectKeyIdentifier().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+# The Prefer Binary Inside SMIMECapabilities attribute
+
+id_cap = _OID(id_smime, 11)
+
+id_cap_preferBinaryInside = _OID(id_cap, 1)
+
+
+# CMS Attribute Map
+
+_cmsAttributesMapUpdate = {
+ smimeCapabilities: SMIMECapabilities(),
+ id_aa_encrypKeyPref: SMIMEEncryptionKeyPreference(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# SMIMECapabilities Attribute Map
+#
+# Do not include OIDs in the dictionary when the parameters are absent.
+
+_smimeCapabilityMapUpdate = {
+ rc2CBC: SMIMECapabilitiesParametersForRC2CBC(),
+}
+
+smimeCapabilityMap.update(_smimeCapabilityMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc5755.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5755.py
new file mode 100644
index 0000000000..14f56fc600
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5755.py
@@ -0,0 +1,398 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# An Internet Attribute Certificate Profile for Authorization
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5755.txt
+# https://www.rfc-editor.org/rfc/rfc5912.txt (see Section 13)
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+# Map for Security Category type to value
+
+securityCategoryMap = { }
+
+
+# Imports from RFC 5652
+
+ContentInfo = rfc5652.ContentInfo
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+Attribute = rfc5280.Attribute
+
+AuthorityInfoAccessSyntax = rfc5280.AuthorityInfoAccessSyntax
+
+AuthorityKeyIdentifier = rfc5280.AuthorityKeyIdentifier
+
+CertificateSerialNumber = rfc5280.CertificateSerialNumber
+
+CRLDistributionPoints = rfc5280.CRLDistributionPoints
+
+Extensions = rfc5280.Extensions
+
+Extension = rfc5280.Extension
+
+GeneralNames = rfc5280.GeneralNames
+
+GeneralName = rfc5280.GeneralName
+
+UniqueIdentifier = rfc5280.UniqueIdentifier
+
+
+# Object Identifier arcs
+
+id_pkix = univ.ObjectIdentifier((1, 3, 6, 1, 5, 5, 7, ))
+
+id_pe = id_pkix + (1, )
+
+id_kp = id_pkix + (3, )
+
+id_aca = id_pkix + (10, )
+
+id_ad = id_pkix + (48, )
+
+id_at = univ.ObjectIdentifier((2, 5, 4, ))
+
+id_ce = univ.ObjectIdentifier((2, 5, 29, ))
+
+
+# Attribute Certificate
+
+class AttCertVersion(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('v2', 1)
+ )
+
+
+class IssuerSerial(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', GeneralNames()),
+ namedtype.NamedType('serial', CertificateSerialNumber()),
+ namedtype.OptionalNamedType('issuerUID', UniqueIdentifier())
+ )
+
+
+class ObjectDigestInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('digestedObjectType',
+ univ.Enumerated(namedValues=namedval.NamedValues(
+ ('publicKey', 0),
+ ('publicKeyCert', 1),
+ ('otherObjectTypes', 2)))),
+ namedtype.OptionalNamedType('otherObjectTypeID',
+ univ.ObjectIdentifier()),
+ namedtype.NamedType('digestAlgorithm',
+ AlgorithmIdentifier()),
+ namedtype.NamedType('objectDigest',
+ univ.BitString())
+ )
+
+
+class Holder(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('baseCertificateID',
+ IssuerSerial().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('entityName',
+ GeneralNames().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('objectDigestInfo',
+ ObjectDigestInfo().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+
+class V2Form(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('issuerName',
+ GeneralNames()),
+ namedtype.OptionalNamedType('baseCertificateID',
+ IssuerSerial().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('objectDigestInfo',
+ ObjectDigestInfo().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class AttCertIssuer(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('v1Form', GeneralNames()),
+ namedtype.NamedType('v2Form', V2Form().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 0)))
+ )
+
+
+class AttCertValidityPeriod(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('notBeforeTime', useful.GeneralizedTime()),
+ namedtype.NamedType('notAfterTime', useful.GeneralizedTime())
+ )
+
+
+class AttributeCertificateInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version',
+ AttCertVersion()),
+ namedtype.NamedType('holder',
+ Holder()),
+ namedtype.NamedType('issuer',
+ AttCertIssuer()),
+ namedtype.NamedType('signature',
+ AlgorithmIdentifier()),
+ namedtype.NamedType('serialNumber',
+ CertificateSerialNumber()),
+ namedtype.NamedType('attrCertValidityPeriod',
+ AttCertValidityPeriod()),
+ namedtype.NamedType('attributes',
+ univ.SequenceOf(componentType=Attribute())),
+ namedtype.OptionalNamedType('issuerUniqueID',
+ UniqueIdentifier()),
+ namedtype.OptionalNamedType('extensions',
+ Extensions())
+ )
+
+
+class AttributeCertificate(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('acinfo', AttributeCertificateInfo()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signatureValue', univ.BitString())
+ )
+
+
+# Attribute Certificate Extensions
+
+id_pe_ac_auditIdentity = id_pe + (4, )
+
+id_ce_noRevAvail = id_ce + (56, )
+
+id_ce_targetInformation = id_ce + (55, )
+
+
+class TargetCert(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('targetCertificate', IssuerSerial()),
+ namedtype.OptionalNamedType('targetName', GeneralName()),
+ namedtype.OptionalNamedType('certDigestInfo', ObjectDigestInfo())
+ )
+
+
+class Target(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('targetName',
+ GeneralName().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('targetGroup',
+ GeneralName().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('targetCert',
+ TargetCert().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 2)))
+ )
+
+
+class Targets(univ.SequenceOf):
+ componentType = Target()
+
+
+id_pe_ac_proxying = id_pe + (10, )
+
+
+class ProxyInfo(univ.SequenceOf):
+ componentType = Targets()
+
+
+id_pe_aaControls = id_pe + (6, )
+
+
+class AttrSpec(univ.SequenceOf):
+ componentType = univ.ObjectIdentifier()
+
+
+class AAControls(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pathLenConstraint',
+ univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
+ namedtype.OptionalNamedType('permittedAttrs',
+ AttrSpec().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('excludedAttrs',
+ AttrSpec().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.DefaultedNamedType('permitUnSpecified',
+ univ.Boolean().subtype(value=1))
+ )
+
+
+# Attribute Certificate Attributes
+
+id_aca_authenticationInfo = id_aca + (1, )
+
+
+id_aca_accessIdentity = id_aca + (2, )
+
+
+class SvceAuthInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('service', GeneralName()),
+ namedtype.NamedType('ident', GeneralName()),
+ namedtype.OptionalNamedType('authInfo', univ.OctetString())
+ )
+
+
+id_aca_chargingIdentity = id_aca + (3, )
+
+
+id_aca_group = id_aca + (4, )
+
+
+class IetfAttrSyntax(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('policyAuthority',
+ GeneralNames().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('values', univ.SequenceOf(
+ componentType=univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('octets', univ.OctetString()),
+ namedtype.NamedType('oid', univ.ObjectIdentifier()),
+ namedtype.NamedType('string', char.UTF8String())
+ ))
+ ))
+ )
+
+
+id_at_role = id_at + (72,)
+
+
+class RoleSyntax(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('roleAuthority',
+ GeneralNames().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('roleName',
+ GeneralName().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class ClassList(univ.BitString):
+ namedValues = namedval.NamedValues(
+ ('unmarked', 0),
+ ('unclassified', 1),
+ ('restricted', 2),
+ ('confidential', 3),
+ ('secret', 4),
+ ('topSecret', 5)
+ )
+
+
+class SecurityCategory(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('type',
+ univ.ObjectIdentifier().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('value',
+ univ.Any().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)),
+ openType=opentype.OpenType('type', securityCategoryMap))
+ )
+
+
+id_at_clearance = univ.ObjectIdentifier((2, 5, 4, 55, ))
+
+
+class Clearance(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyId',
+ univ.ObjectIdentifier()),
+ namedtype.DefaultedNamedType('classList',
+ ClassList().subtype(value='unclassified')),
+ namedtype.OptionalNamedType('securityCategories',
+ univ.SetOf(componentType=SecurityCategory()))
+ )
+
+
+id_at_clearance_rfc3281 = univ.ObjectIdentifier((2, 5, 1, 5, 55, ))
+
+
+class Clearance_rfc3281(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('policyId',
+ univ.ObjectIdentifier().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.DefaultedNamedType('classList',
+ ClassList().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)).subtype(
+ value='unclassified')),
+ namedtype.OptionalNamedType('securityCategories',
+ univ.SetOf(componentType=SecurityCategory()).subtype(
+ implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+id_aca_encAttrs = id_aca + (6, )
+
+
+class ACClearAttrs(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('acIssuer', GeneralName()),
+ namedtype.NamedType('acSerial', univ.Integer()),
+ namedtype.NamedType('attrs', univ.SequenceOf(componentType=Attribute()))
+ )
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_ac_auditIdentity: univ.OctetString(),
+ id_ce_noRevAvail: univ.Null(),
+ id_ce_targetInformation: Targets(),
+ id_pe_ac_proxying: ProxyInfo(),
+ id_pe_aaControls: AAControls(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
+
+
+# Map of AttributeType OIDs to AttributeValue added to the
+# ones that are in rfc5280.py
+
+_certificateAttributesMapUpdate = {
+ id_aca_authenticationInfo: SvceAuthInfo(),
+ id_aca_accessIdentity: SvceAuthInfo(),
+ id_aca_chargingIdentity: IetfAttrSyntax(),
+ id_aca_group: IetfAttrSyntax(),
+ id_at_role: RoleSyntax(),
+ id_at_clearance: Clearance(),
+ id_at_clearance_rfc3281: Clearance_rfc3281(),
+ id_aca_encAttrs: ContentInfo(),
+}
+
+rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc5913.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5913.py
new file mode 100644
index 0000000000..0bd065330d
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5913.py
@@ -0,0 +1,44 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Authority Clearance Constraints Certificate Extension
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5913.txt
+# https://www.rfc-editor.org/errata/eid5890
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5755
+
+MAX = float('inf')
+
+
+# Authority Clearance Constraints Certificate Extension
+
+id_pe_clearanceConstraints = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.21')
+
+id_pe_authorityClearanceConstraints = id_pe_clearanceConstraints
+
+
+class AuthorityClearanceConstraints(univ.SequenceOf):
+ componentType = rfc5755.Clearance()
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_clearanceConstraints: AuthorityClearanceConstraints(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc5914.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5914.py
new file mode 100644
index 0000000000..d125ea2a65
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5914.py
@@ -0,0 +1,119 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Trust Anchor Format
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5914.txt
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+MAX = float('inf')
+
+Certificate = rfc5280.Certificate
+
+Name = rfc5280.Name
+
+Extensions = rfc5280.Extensions
+
+SubjectPublicKeyInfo = rfc5280.SubjectPublicKeyInfo
+
+TBSCertificate = rfc5280.TBSCertificate
+
+CertificatePolicies = rfc5280.CertificatePolicies
+
+KeyIdentifier = rfc5280.KeyIdentifier
+
+NameConstraints = rfc5280.NameConstraints
+
+
+class CertPolicyFlags(univ.BitString):
+ pass
+
+CertPolicyFlags.namedValues = namedval.NamedValues(
+ ('inhibitPolicyMapping', 0),
+ ('requireExplicitPolicy', 1),
+ ('inhibitAnyPolicy', 2)
+)
+
+
+class CertPathControls(univ.Sequence):
+ pass
+
+CertPathControls.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('taName', Name()),
+ namedtype.OptionalNamedType('certificate', Certificate().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('policySet', CertificatePolicies().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('policyFlags', CertPolicyFlags().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('nameConstr', NameConstraints().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('pathLenConstraint', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4)))
+)
+
+
+class TrustAnchorTitle(char.UTF8String):
+ pass
+
+TrustAnchorTitle.subtypeSpec = constraint.ValueSizeConstraint(1, 64)
+
+
+class TrustAnchorInfoVersion(univ.Integer):
+ pass
+
+TrustAnchorInfoVersion.namedValues = namedval.NamedValues(
+ ('v1', 1)
+)
+
+
+class TrustAnchorInfo(univ.Sequence):
+ pass
+
+TrustAnchorInfo.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', TrustAnchorInfoVersion().subtype(value='v1')),
+ namedtype.NamedType('pubKey', SubjectPublicKeyInfo()),
+ namedtype.NamedType('keyId', KeyIdentifier()),
+ namedtype.OptionalNamedType('taTitle', TrustAnchorTitle()),
+ namedtype.OptionalNamedType('certPath', CertPathControls()),
+ namedtype.OptionalNamedType('exts', Extensions().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('taTitleLangTag', char.UTF8String().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class TrustAnchorChoice(univ.Choice):
+ pass
+
+TrustAnchorChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certificate', Certificate()),
+ namedtype.NamedType('tbsCert', TBSCertificate().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('taInfo', TrustAnchorInfo().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+
+id_ct_trustAnchorList = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.34')
+
+class TrustAnchorList(univ.SequenceOf):
+ pass
+
+TrustAnchorList.componentType = TrustAnchorChoice()
+TrustAnchorList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc5915.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5915.py
new file mode 100644
index 0000000000..82ff4a338b
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5915.py
@@ -0,0 +1,32 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Elliptic Curve Private Key
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5915.txt
+
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5480
+
+
+class ECPrivateKey(univ.Sequence):
+ pass
+
+ECPrivateKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer(
+ namedValues=namedval.NamedValues(('ecPrivkeyVer1', 1)))),
+ namedtype.NamedType('privateKey', univ.OctetString()),
+ namedtype.OptionalNamedType('parameters', rfc5480.ECParameters().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('publicKey', univ.BitString().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc5916.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5916.py
new file mode 100644
index 0000000000..ac23c86b79
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5916.py
@@ -0,0 +1,35 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Device Owner Attribute
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5916.txt
+#
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# Device Owner Attribute
+
+id_deviceOwner = univ.ObjectIdentifier((2, 16, 840, 1, 101, 2, 1, 5, 69))
+
+at_deviceOwner = rfc5280.Attribute()
+at_deviceOwner['type'] = id_deviceOwner
+at_deviceOwner['values'][0] = univ.ObjectIdentifier()
+
+
+# Add to the map of Attribute Type OIDs to Attributes in rfc5280.py.
+
+_certificateAttributesMapUpdate = {
+ id_deviceOwner: univ.ObjectIdentifier(),
+}
+
+rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc5917.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5917.py
new file mode 100644
index 0000000000..ed9af987db
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5917.py
@@ -0,0 +1,55 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Clearance Sponsor Attribute
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5917.txt
+# https://www.rfc-editor.org/errata/eid4558
+# https://www.rfc-editor.org/errata/eid5883
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# DirectoryString is the same as RFC 5280, except for two things:
+# 1. the length is limited to 64;
+# 2. only the 'utf8String' choice remains because the ASN.1
+# specification says: ( WITH COMPONENTS { utf8String PRESENT } )
+
+class DirectoryString(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 64))),
+ )
+
+
+# Clearance Sponsor Attribute
+
+id_clearanceSponsor = univ.ObjectIdentifier((2, 16, 840, 1, 101, 2, 1, 5, 68))
+
+ub_clearance_sponsor = univ.Integer(64)
+
+
+at_clearanceSponsor = rfc5280.Attribute()
+at_clearanceSponsor['type'] = id_clearanceSponsor
+at_clearanceSponsor['values'][0] = DirectoryString()
+
+
+# Add to the map of Attribute Type OIDs to Attributes in rfc5280.py.
+
+_certificateAttributesMapUpdate = {
+ id_clearanceSponsor: DirectoryString(),
+}
+
+rfc5280.certificateAttributesMap.update(_certificateAttributesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc5924.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5924.py
new file mode 100644
index 0000000000..4358e4f529
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5924.py
@@ -0,0 +1,19 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Extended Key Usage (EKU) for Session Initiation Protocol (SIP)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5924.txt
+#
+
+from pyasn1.type import univ
+
+id_kp = univ.ObjectIdentifier('1.3.6.1.5.5.7.3')
+
+id_kp_sipDomain = id_kp + (20, )
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc5934.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5934.py
new file mode 100644
index 0000000000..e3ad247aa0
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5934.py
@@ -0,0 +1,786 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Trust Anchor Format
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5934.txt
+
+from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful
+
+from pyasn1_modules import rfc2985
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5914
+
+MAX = float('inf')
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+ return univ.ObjectIdentifier(output)
+
+
+# Imports from RFC 2985
+
+SingleAttribute = rfc2985.SingleAttribute
+
+
+# Imports from RFC5914
+
+CertPathControls = rfc5914.CertPathControls
+
+TrustAnchorChoice = rfc5914.TrustAnchorChoice
+
+TrustAnchorTitle = rfc5914.TrustAnchorTitle
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+AnotherName = rfc5280.AnotherName
+
+Attribute = rfc5280.Attribute
+
+Certificate = rfc5280.Certificate
+
+CertificateSerialNumber = rfc5280.CertificateSerialNumber
+
+Extension = rfc5280.Extension
+
+Extensions = rfc5280.Extensions
+
+KeyIdentifier = rfc5280.KeyIdentifier
+
+Name = rfc5280.Name
+
+SubjectPublicKeyInfo = rfc5280.SubjectPublicKeyInfo
+
+TBSCertificate = rfc5280.TBSCertificate
+
+Validity = rfc5280.Validity
+
+
+# Object Identifier Arc for TAMP Message Content Types
+
+id_tamp = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.77')
+
+
+# TAMP Status Query Message
+
+id_ct_TAMP_statusQuery = _OID(id_tamp, 1)
+
+
+class TAMPVersion(univ.Integer):
+ pass
+
+TAMPVersion.namedValues = namedval.NamedValues(
+ ('v1', 1),
+ ('v2', 2)
+)
+
+
+class TerseOrVerbose(univ.Enumerated):
+ pass
+
+TerseOrVerbose.namedValues = namedval.NamedValues(
+ ('terse', 1),
+ ('verbose', 2)
+)
+
+
+class HardwareSerialEntry(univ.Choice):
+ pass
+
+HardwareSerialEntry.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('all', univ.Null()),
+ namedtype.NamedType('single', univ.OctetString()),
+ namedtype.NamedType('block', univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('low', univ.OctetString()),
+ namedtype.NamedType('high', univ.OctetString())
+ ))
+ )
+)
+
+
+class HardwareModules(univ.Sequence):
+ pass
+
+HardwareModules.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hwType', univ.ObjectIdentifier()),
+ namedtype.NamedType('hwSerialEntries', univ.SequenceOf(
+ componentType=HardwareSerialEntry()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class HardwareModuleIdentifierList(univ.SequenceOf):
+ pass
+
+HardwareModuleIdentifierList.componentType = HardwareModules()
+HardwareModuleIdentifierList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class Community(univ.ObjectIdentifier):
+ pass
+
+
+class CommunityIdentifierList(univ.SequenceOf):
+ pass
+
+CommunityIdentifierList.componentType = Community()
+CommunityIdentifierList.subtypeSpec=constraint.ValueSizeConstraint(0, MAX)
+
+
+class TargetIdentifier(univ.Choice):
+ pass
+
+TargetIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hwModules', HardwareModuleIdentifierList().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('communities', CommunityIdentifierList().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('allModules', univ.Null().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('uri', char.IA5String().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.NamedType('otherName', AnotherName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5)))
+)
+
+
+class SeqNumber(univ.Integer):
+ pass
+
+SeqNumber.subtypeSpec = constraint.ValueRangeConstraint(0, 9223372036854775807)
+
+
+class TAMPMsgRef(univ.Sequence):
+ pass
+
+TAMPMsgRef.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('target', TargetIdentifier()),
+ namedtype.NamedType('seqNum', SeqNumber())
+)
+
+
+class TAMPStatusQuery(univ.Sequence):
+ pass
+
+TAMPStatusQuery.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', TAMPVersion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.DefaultedNamedType('terse', TerseOrVerbose().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)).subtype(value='verbose')),
+ namedtype.NamedType('query', TAMPMsgRef())
+)
+
+
+tamp_status_query = rfc5652.ContentInfo()
+tamp_status_query['contentType'] = id_ct_TAMP_statusQuery
+tamp_status_query['content'] = TAMPStatusQuery()
+
+
+# TAMP Status Response Message
+
+id_ct_TAMP_statusResponse = _OID(id_tamp, 2)
+
+
+class KeyIdentifiers(univ.SequenceOf):
+ pass
+
+KeyIdentifiers.componentType = KeyIdentifier()
+KeyIdentifiers.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class TrustAnchorChoiceList(univ.SequenceOf):
+ pass
+
+TrustAnchorChoiceList.componentType = TrustAnchorChoice()
+TrustAnchorChoiceList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class TAMPSequenceNumber(univ.Sequence):
+ pass
+
+TAMPSequenceNumber.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyId', KeyIdentifier()),
+ namedtype.NamedType('seqNumber', SeqNumber())
+)
+
+
+class TAMPSequenceNumbers(univ.SequenceOf):
+ pass
+
+TAMPSequenceNumbers.componentType = TAMPSequenceNumber()
+TAMPSequenceNumbers.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class TerseStatusResponse(univ.Sequence):
+ pass
+
+TerseStatusResponse.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('taKeyIds', KeyIdentifiers()),
+ namedtype.OptionalNamedType('communities', CommunityIdentifierList())
+)
+
+
+class VerboseStatusResponse(univ.Sequence):
+ pass
+
+VerboseStatusResponse.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('taInfo', TrustAnchorChoiceList()),
+ namedtype.OptionalNamedType('continPubKeyDecryptAlg',
+ AlgorithmIdentifier().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('communities',
+ CommunityIdentifierList().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('tampSeqNumbers',
+ TAMPSequenceNumbers().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+class StatusResponse(univ.Choice):
+ pass
+
+StatusResponse.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('terseResponse', TerseStatusResponse().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('verboseResponse', VerboseStatusResponse().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class TAMPStatusResponse(univ.Sequence):
+ pass
+
+TAMPStatusResponse.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', TAMPVersion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('query', TAMPMsgRef()),
+ namedtype.NamedType('response', StatusResponse()),
+ namedtype.DefaultedNamedType('usesApex', univ.Boolean().subtype(value=1))
+)
+
+
+tamp_status_response = rfc5652.ContentInfo()
+tamp_status_response['contentType'] = id_ct_TAMP_statusResponse
+tamp_status_response['content'] = TAMPStatusResponse()
+
+
+# Trust Anchor Update Message
+
+id_ct_TAMP_update = _OID(id_tamp, 3)
+
+
+class TBSCertificateChangeInfo(univ.Sequence):
+ pass
+
+TBSCertificateChangeInfo.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('serialNumber', CertificateSerialNumber()),
+ namedtype.OptionalNamedType('signature', AlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('issuer', Name().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('validity', Validity().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('subject', Name().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('subjectPublicKeyInfo', SubjectPublicKeyInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.OptionalNamedType('exts', Extensions().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 5)))
+)
+
+
+class TrustAnchorChangeInfo(univ.Sequence):
+ pass
+
+TrustAnchorChangeInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pubKey', SubjectPublicKeyInfo()),
+ namedtype.OptionalNamedType('keyId', KeyIdentifier()),
+ namedtype.OptionalNamedType('taTitle', TrustAnchorTitle()),
+ namedtype.OptionalNamedType('certPath', CertPathControls()),
+ namedtype.OptionalNamedType('exts', Extensions().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class TrustAnchorChangeInfoChoice(univ.Choice):
+ pass
+
+TrustAnchorChangeInfoChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsCertChange', TBSCertificateChangeInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('taChange', TrustAnchorChangeInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class TrustAnchorUpdate(univ.Choice):
+ pass
+
+TrustAnchorUpdate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('add', TrustAnchorChoice().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('remove', SubjectPublicKeyInfo().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('change', TrustAnchorChangeInfoChoice().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 3)))
+)
+
+
+class TAMPUpdate(univ.Sequence):
+ pass
+
+TAMPUpdate.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.DefaultedNamedType('terse',
+ TerseOrVerbose().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)).subtype(value='verbose')),
+ namedtype.NamedType('msgRef', TAMPMsgRef()),
+ namedtype.NamedType('updates',
+ univ.SequenceOf(componentType=TrustAnchorUpdate()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.OptionalNamedType('tampSeqNumbers',
+ TAMPSequenceNumbers().subtype(implicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+tamp_update = rfc5652.ContentInfo()
+tamp_update['contentType'] = id_ct_TAMP_update
+tamp_update['content'] = TAMPUpdate()
+
+
+# Trust Anchor Update Confirm Message
+
+id_ct_TAMP_updateConfirm = _OID(id_tamp, 4)
+
+
+class StatusCode(univ.Enumerated):
+ pass
+
+StatusCode.namedValues = namedval.NamedValues(
+ ('success', 0),
+ ('decodeFailure', 1),
+ ('badContentInfo', 2),
+ ('badSignedData', 3),
+ ('badEncapContent', 4),
+ ('badCertificate', 5),
+ ('badSignerInfo', 6),
+ ('badSignedAttrs', 7),
+ ('badUnsignedAttrs', 8),
+ ('missingContent', 9),
+ ('noTrustAnchor', 10),
+ ('notAuthorized', 11),
+ ('badDigestAlgorithm', 12),
+ ('badSignatureAlgorithm', 13),
+ ('unsupportedKeySize', 14),
+ ('unsupportedParameters', 15),
+ ('signatureFailure', 16),
+ ('insufficientMemory', 17),
+ ('unsupportedTAMPMsgType', 18),
+ ('apexTAMPAnchor', 19),
+ ('improperTAAddition', 20),
+ ('seqNumFailure', 21),
+ ('contingencyPublicKeyDecrypt', 22),
+ ('incorrectTarget', 23),
+ ('communityUpdateFailed', 24),
+ ('trustAnchorNotFound', 25),
+ ('unsupportedTAAlgorithm', 26),
+ ('unsupportedTAKeySize', 27),
+ ('unsupportedContinPubKeyDecryptAlg', 28),
+ ('missingSignature', 29),
+ ('resourcesBusy', 30),
+ ('versionNumberMismatch', 31),
+ ('missingPolicySet', 32),
+ ('revokedCertificate', 33),
+ ('unsupportedTrustAnchorFormat', 34),
+ ('improperTAChange', 35),
+ ('malformed', 36),
+ ('cmsError', 37),
+ ('unsupportedTargetIdentifier', 38),
+ ('other', 127)
+)
+
+
+class StatusCodeList(univ.SequenceOf):
+ pass
+
+StatusCodeList.componentType = StatusCode()
+StatusCodeList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class TerseUpdateConfirm(StatusCodeList):
+ pass
+
+
+class VerboseUpdateConfirm(univ.Sequence):
+ pass
+
+VerboseUpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', StatusCodeList()),
+ namedtype.NamedType('taInfo', TrustAnchorChoiceList()),
+ namedtype.OptionalNamedType('tampSeqNumbers', TAMPSequenceNumbers()),
+ namedtype.DefaultedNamedType('usesApex', univ.Boolean().subtype(value=1))
+)
+
+
+class UpdateConfirm(univ.Choice):
+ pass
+
+UpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('terseConfirm', TerseUpdateConfirm().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('verboseConfirm', VerboseUpdateConfirm().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+
+class TAMPUpdateConfirm(univ.Sequence):
+ pass
+
+TAMPUpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', TAMPVersion().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('update', TAMPMsgRef()),
+ namedtype.NamedType('confirm', UpdateConfirm())
+)
+
+
+tamp_update_confirm = rfc5652.ContentInfo()
+tamp_update_confirm['contentType'] = id_ct_TAMP_updateConfirm
+tamp_update_confirm['content'] = TAMPUpdateConfirm()
+
+
+# Apex Trust Anchor Update Message
+
+id_ct_TAMP_apexUpdate = _OID(id_tamp, 5)
+
+
+class TAMPApexUpdate(univ.Sequence):
+ pass
+
+TAMPApexUpdate.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.DefaultedNamedType('terse',
+ TerseOrVerbose().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)).subtype(value='verbose')),
+ namedtype.NamedType('msgRef', TAMPMsgRef()),
+ namedtype.NamedType('clearTrustAnchors', univ.Boolean()),
+ namedtype.NamedType('clearCommunities', univ.Boolean()),
+ namedtype.OptionalNamedType('seqNumber', SeqNumber()),
+ namedtype.NamedType('apexTA', TrustAnchorChoice())
+)
+
+
+tamp_apex_update = rfc5652.ContentInfo()
+tamp_apex_update['contentType'] = id_ct_TAMP_apexUpdate
+tamp_apex_update['content'] = TAMPApexUpdate()
+
+
+# Apex Trust Anchor Update Confirm Message
+
+id_ct_TAMP_apexUpdateConfirm = _OID(id_tamp, 6)
+
+
+class TerseApexUpdateConfirm(StatusCode):
+ pass
+
+
+class VerboseApexUpdateConfirm(univ.Sequence):
+ pass
+
+VerboseApexUpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', StatusCode()),
+ namedtype.NamedType('taInfo', TrustAnchorChoiceList()),
+ namedtype.OptionalNamedType('communities',
+ CommunityIdentifierList().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('tampSeqNumbers',
+ TAMPSequenceNumbers().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)))
+)
+
+
+class ApexUpdateConfirm(univ.Choice):
+ pass
+
+ApexUpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('terseApexConfirm',
+ TerseApexUpdateConfirm().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0))),
+ namedtype.NamedType('verboseApexConfirm',
+ VerboseApexUpdateConfirm().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 1)))
+)
+
+
+class TAMPApexUpdateConfirm(univ.Sequence):
+ pass
+
+TAMPApexUpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('apexReplace', TAMPMsgRef()),
+ namedtype.NamedType('apexConfirm', ApexUpdateConfirm())
+)
+
+
+tamp_apex_update_confirm = rfc5652.ContentInfo()
+tamp_apex_update_confirm['contentType'] = id_ct_TAMP_apexUpdateConfirm
+tamp_apex_update_confirm['content'] = TAMPApexUpdateConfirm()
+
+
+# Community Update Message
+
+id_ct_TAMP_communityUpdate = _OID(id_tamp, 7)
+
+
+class CommunityUpdates(univ.Sequence):
+ pass
+
+CommunityUpdates.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('remove',
+ CommunityIdentifierList().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('add',
+ CommunityIdentifierList().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 2)))
+)
+
+
+class TAMPCommunityUpdate(univ.Sequence):
+ pass
+
+TAMPCommunityUpdate.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.DefaultedNamedType('terse',
+ TerseOrVerbose().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)).subtype(value='verbose')),
+ namedtype.NamedType('msgRef', TAMPMsgRef()),
+ namedtype.NamedType('updates', CommunityUpdates())
+)
+
+
+tamp_community_update = rfc5652.ContentInfo()
+tamp_community_update['contentType'] = id_ct_TAMP_communityUpdate
+tamp_community_update['content'] = TAMPCommunityUpdate()
+
+
+# Community Update Confirm Message
+
+id_ct_TAMP_communityUpdateConfirm = _OID(id_tamp, 8)
+
+
+class TerseCommunityConfirm(StatusCode):
+ pass
+
+
+class VerboseCommunityConfirm(univ.Sequence):
+ pass
+
+VerboseCommunityConfirm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('status', StatusCode()),
+ namedtype.OptionalNamedType('communities', CommunityIdentifierList())
+)
+
+
+class CommunityConfirm(univ.Choice):
+ pass
+
+CommunityConfirm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('terseCommConfirm',
+ TerseCommunityConfirm().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0))),
+ namedtype.NamedType('verboseCommConfirm',
+ VerboseCommunityConfirm().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 1)))
+)
+
+
+class TAMPCommunityUpdateConfirm(univ.Sequence):
+ pass
+
+TAMPCommunityUpdateConfirm.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('update', TAMPMsgRef()),
+ namedtype.NamedType('commConfirm', CommunityConfirm())
+)
+
+
+tamp_community_update_confirm = rfc5652.ContentInfo()
+tamp_community_update_confirm['contentType'] = id_ct_TAMP_communityUpdateConfirm
+tamp_community_update_confirm['content'] = TAMPCommunityUpdateConfirm()
+
+
+# Sequence Number Adjust Message
+
+id_ct_TAMP_seqNumAdjust = _OID(id_tamp, 10)
+
+
+
+class SequenceNumberAdjust(univ.Sequence):
+ pass
+
+SequenceNumberAdjust.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('msgRef', TAMPMsgRef())
+)
+
+
+tamp_sequence_number_adjust = rfc5652.ContentInfo()
+tamp_sequence_number_adjust['contentType'] = id_ct_TAMP_seqNumAdjust
+tamp_sequence_number_adjust['content'] = SequenceNumberAdjust()
+
+
+# Sequence Number Adjust Confirm Message
+
+id_ct_TAMP_seqNumAdjustConfirm = _OID(id_tamp, 11)
+
+
+class SequenceNumberAdjustConfirm(univ.Sequence):
+ pass
+
+SequenceNumberAdjustConfirm.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('adjust', TAMPMsgRef()),
+ namedtype.NamedType('status', StatusCode())
+)
+
+
+tamp_sequence_number_adjust_confirm = rfc5652.ContentInfo()
+tamp_sequence_number_adjust_confirm['contentType'] = id_ct_TAMP_seqNumAdjustConfirm
+tamp_sequence_number_adjust_confirm['content'] = SequenceNumberAdjustConfirm()
+
+
+# TAMP Error Message
+
+id_ct_TAMP_error = _OID(id_tamp, 9)
+
+
+class TAMPError(univ.Sequence):
+ pass
+
+TAMPError.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ TAMPVersion().subtype(implicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0)).subtype(value='v2')),
+ namedtype.NamedType('msgType', univ.ObjectIdentifier()),
+ namedtype.NamedType('status', StatusCode()),
+ namedtype.OptionalNamedType('msgRef', TAMPMsgRef())
+)
+
+
+tamp_error = rfc5652.ContentInfo()
+tamp_error['contentType'] = id_ct_TAMP_error
+tamp_error['content'] = TAMPError()
+
+
+# Object Identifier Arc for Attributes
+
+id_attributes = univ.ObjectIdentifier('2.16.840.1.101.2.1.5')
+
+
+# contingency-public-key-decrypt-key unsigned attribute
+
+id_aa_TAMP_contingencyPublicKeyDecryptKey = _OID(id_attributes, 63)
+
+
+class PlaintextSymmetricKey(univ.OctetString):
+ pass
+
+
+contingency_public_key_decrypt_key = Attribute()
+contingency_public_key_decrypt_key['type'] = id_aa_TAMP_contingencyPublicKeyDecryptKey
+contingency_public_key_decrypt_key['values'][0] = PlaintextSymmetricKey()
+
+
+# id-pe-wrappedApexContinKey extension
+
+id_pe_wrappedApexContinKey =univ.ObjectIdentifier('1.3.6.1.5.5.7.1.20')
+
+
+class ApexContingencyKey(univ.Sequence):
+ pass
+
+ApexContingencyKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('wrapAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('wrappedContinPubKey', univ.OctetString())
+)
+
+
+wrappedApexContinKey = Extension()
+wrappedApexContinKey['extnID'] = id_pe_wrappedApexContinKey
+wrappedApexContinKey['critical'] = 0
+wrappedApexContinKey['extnValue'] = univ.OctetString()
+
+
+# Add to the map of CMS Content Type OIDs to Content Types in
+# rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_TAMP_statusQuery: TAMPStatusQuery(),
+ id_ct_TAMP_statusResponse: TAMPStatusResponse(),
+ id_ct_TAMP_update: TAMPUpdate(),
+ id_ct_TAMP_updateConfirm: TAMPUpdateConfirm(),
+ id_ct_TAMP_apexUpdate: TAMPApexUpdate(),
+ id_ct_TAMP_apexUpdateConfirm: TAMPApexUpdateConfirm(),
+ id_ct_TAMP_communityUpdate: TAMPCommunityUpdate(),
+ id_ct_TAMP_communityUpdateConfirm: TAMPCommunityUpdateConfirm(),
+ id_ct_TAMP_seqNumAdjust: SequenceNumberAdjust(),
+ id_ct_TAMP_seqNumAdjustConfirm: SequenceNumberAdjustConfirm(),
+ id_ct_TAMP_error: TAMPError(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
+
+
+# Add to the map of CMS Attribute OIDs to Attribute Values in
+# rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_TAMP_contingencyPublicKeyDecryptKey: PlaintextSymmetricKey(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# Add to the map of Certificate Extension OIDs to Extensions in
+# rfc5280.py
+
+_certificateExtensionsMap = {
+ id_pe_wrappedApexContinKey: ApexContingencyKey(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMap)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc5940.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5940.py
new file mode 100644
index 0000000000..e105923358
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5940.py
@@ -0,0 +1,59 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Additional CMS Revocation Information Choices
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5940.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc2560
+from pyasn1_modules import rfc5652
+
+
+# RevocationInfoChoice for OCSP response:
+# The OID is included in otherRevInfoFormat, and
+# signed OCSPResponse is included in otherRevInfo
+
+id_ri_ocsp_response = univ.ObjectIdentifier('1.3.6.1.5.5.7.16.2')
+
+OCSPResponse = rfc2560.OCSPResponse
+
+
+# RevocationInfoChoice for SCVP request/response:
+# The OID is included in otherRevInfoFormat, and
+# SCVPReqRes is included in otherRevInfo
+
+id_ri_scvp = univ.ObjectIdentifier('1.3.6.1.5.5.7.16.4')
+
+ContentInfo = rfc5652.ContentInfo
+
+class SCVPReqRes(univ.Sequence):
+ pass
+
+SCVPReqRes.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('request',
+ ContentInfo().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('response', ContentInfo())
+)
+
+
+# Map of Revocation Info Format OIDs to Revocation Info Format
+# is added to the ones that are in rfc5652.py
+
+_otherRevInfoFormatMapUpdate = {
+ id_ri_ocsp_response: OCSPResponse(),
+ id_ri_scvp: SCVPReqRes(),
+}
+
+rfc5652.otherRevInfoFormatMap.update(_otherRevInfoFormatMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc5958.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5958.py
new file mode 100644
index 0000000000..1aaa9286ad
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5958.py
@@ -0,0 +1,98 @@
+#
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+# Modified by Russ Housley to add a map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Asymmetric Key Packages, which is essentially version 2 of
+# the PrivateKeyInfo structure in PKCS#8 in RFC 5208
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5958.txt
+
+from pyasn1.type import univ, constraint, namedtype, namedval, tag
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+
+MAX = float('inf')
+
+
+class KeyEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class PrivateKeyAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class EncryptedData(univ.OctetString):
+ pass
+
+
+class EncryptedPrivateKeyInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
+ namedtype.NamedType('encryptedData', EncryptedData())
+ )
+
+
+class Version(univ.Integer):
+ namedValues = namedval.NamedValues(('v1', 0), ('v2', 1))
+
+
+class PrivateKey(univ.OctetString):
+ pass
+
+
+class Attributes(univ.SetOf):
+ componentType = rfc5652.Attribute()
+
+
+class PublicKey(univ.BitString):
+ pass
+
+
+# OneAsymmetricKey is essentially version 2 of PrivateKeyInfo.
+# If publicKey is present, then the version must be v2;
+# otherwise, the version should be v1.
+
+class OneAsymmetricKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', Version()),
+ namedtype.NamedType('privateKeyAlgorithm', PrivateKeyAlgorithmIdentifier()),
+ namedtype.NamedType('privateKey', PrivateKey()),
+ namedtype.OptionalNamedType('attributes', Attributes().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.OptionalNamedType('publicKey', PublicKey().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+class PrivateKeyInfo(OneAsymmetricKey):
+ pass
+
+
+# The CMS AsymmetricKeyPackage Content Type
+
+id_ct_KP_aKeyPackage = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.78.5')
+
+class AsymmetricKeyPackage(univ.SequenceOf):
+ pass
+
+AsymmetricKeyPackage.componentType = OneAsymmetricKey()
+AsymmetricKeyPackage.sizeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_KP_aKeyPackage: AsymmetricKeyPackage(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc5990.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5990.py
new file mode 100644
index 0000000000..281316fb81
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc5990.py
@@ -0,0 +1,237 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Use of the RSA-KEM Key Transport Algorithm in the CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc5990.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+ return univ.ObjectIdentifier(output)
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+
+# Useful types and definitions
+
+class NullParms(univ.Null):
+ pass
+
+
+# Object identifier arcs
+
+is18033_2 = _OID(1, 0, 18033, 2)
+
+nistAlgorithm = _OID(2, 16, 840, 1, 101, 3, 4)
+
+pkcs_1 = _OID(1, 2, 840, 113549, 1, 1)
+
+x9_44 = _OID(1, 3, 133, 16, 840, 9, 44)
+
+x9_44_components = _OID(x9_44, 1)
+
+
+# Types for algorithm identifiers
+
+class Camellia_KeyWrappingScheme(AlgorithmIdentifier):
+ pass
+
+class DataEncapsulationMechanism(AlgorithmIdentifier):
+ pass
+
+class KDF2_HashFunction(AlgorithmIdentifier):
+ pass
+
+class KDF3_HashFunction(AlgorithmIdentifier):
+ pass
+
+class KeyDerivationFunction(AlgorithmIdentifier):
+ pass
+
+class KeyEncapsulationMechanism(AlgorithmIdentifier):
+ pass
+
+class X9_SymmetricKeyWrappingScheme(AlgorithmIdentifier):
+ pass
+
+
+# RSA-KEM Key Transport Algorithm
+
+id_rsa_kem = _OID(1, 2, 840, 113549, 1, 9, 16, 3, 14)
+
+
+class GenericHybridParameters(univ.Sequence):
+ pass
+
+GenericHybridParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('kem', KeyEncapsulationMechanism()),
+ namedtype.NamedType('dem', DataEncapsulationMechanism())
+)
+
+
+rsa_kem = AlgorithmIdentifier()
+rsa_kem['algorithm'] = id_rsa_kem
+rsa_kem['parameters'] = GenericHybridParameters()
+
+
+# KEM-RSA Key Encapsulation Mechanism
+
+id_kem_rsa = _OID(is18033_2, 2, 4)
+
+
+class KeyLength(univ.Integer):
+ pass
+
+KeyLength.subtypeSpec = constraint.ValueRangeConstraint(1, MAX)
+
+
+class RsaKemParameters(univ.Sequence):
+ pass
+
+RsaKemParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyDerivationFunction', KeyDerivationFunction()),
+ namedtype.NamedType('keyLength', KeyLength())
+)
+
+
+kem_rsa = AlgorithmIdentifier()
+kem_rsa['algorithm'] = id_kem_rsa
+kem_rsa['parameters'] = RsaKemParameters()
+
+
+# Key Derivation Functions
+
+id_kdf_kdf2 = _OID(x9_44_components, 1)
+
+id_kdf_kdf3 = _OID(x9_44_components, 2)
+
+
+kdf2 = AlgorithmIdentifier()
+kdf2['algorithm'] = id_kdf_kdf2
+kdf2['parameters'] = KDF2_HashFunction()
+
+kdf3 = AlgorithmIdentifier()
+kdf3['algorithm'] = id_kdf_kdf3
+kdf3['parameters'] = KDF3_HashFunction()
+
+
+# Hash Functions
+
+id_sha1 = _OID(1, 3, 14, 3, 2, 26)
+
+id_sha224 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 4)
+
+id_sha256 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 1)
+
+id_sha384 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 2)
+
+id_sha512 = _OID(2, 16, 840, 1, 101, 3, 4, 2, 3)
+
+
+sha1 = AlgorithmIdentifier()
+sha1['algorithm'] = id_sha1
+sha1['parameters'] = univ.Null("")
+
+sha224 = AlgorithmIdentifier()
+sha224['algorithm'] = id_sha224
+sha224['parameters'] = univ.Null("")
+
+sha256 = AlgorithmIdentifier()
+sha256['algorithm'] = id_sha256
+sha256['parameters'] = univ.Null("")
+
+sha384 = AlgorithmIdentifier()
+sha384['algorithm'] = id_sha384
+sha384['parameters'] = univ.Null("")
+
+sha512 = AlgorithmIdentifier()
+sha512['algorithm'] = id_sha512
+sha512['parameters'] = univ.Null("")
+
+
+# Symmetric Key-Wrapping Schemes
+
+id_aes128_Wrap = _OID(nistAlgorithm, 1, 5)
+
+id_aes192_Wrap = _OID(nistAlgorithm, 1, 25)
+
+id_aes256_Wrap = _OID(nistAlgorithm, 1, 45)
+
+id_alg_CMS3DESwrap = _OID(1, 2, 840, 113549, 1, 9, 16, 3, 6)
+
+id_camellia128_Wrap = _OID(1, 2, 392, 200011, 61, 1, 1, 3, 2)
+
+id_camellia192_Wrap = _OID(1, 2, 392, 200011, 61, 1, 1, 3, 3)
+
+id_camellia256_Wrap = _OID(1, 2, 392, 200011, 61, 1, 1, 3, 4)
+
+
+aes128_Wrap = AlgorithmIdentifier()
+aes128_Wrap['algorithm'] = id_aes128_Wrap
+# aes128_Wrap['parameters'] are absent
+
+aes192_Wrap = AlgorithmIdentifier()
+aes192_Wrap['algorithm'] = id_aes128_Wrap
+# aes192_Wrap['parameters'] are absent
+
+aes256_Wrap = AlgorithmIdentifier()
+aes256_Wrap['algorithm'] = id_sha256
+# aes256_Wrap['parameters'] are absent
+
+tdes_Wrap = AlgorithmIdentifier()
+tdes_Wrap['algorithm'] = id_alg_CMS3DESwrap
+tdes_Wrap['parameters'] = univ.Null("")
+
+camellia128_Wrap = AlgorithmIdentifier()
+camellia128_Wrap['algorithm'] = id_camellia128_Wrap
+# camellia128_Wrap['parameters'] are absent
+
+camellia192_Wrap = AlgorithmIdentifier()
+camellia192_Wrap['algorithm'] = id_camellia192_Wrap
+# camellia192_Wrap['parameters'] are absent
+
+camellia256_Wrap = AlgorithmIdentifier()
+camellia256_Wrap['algorithm'] = id_camellia256_Wrap
+# camellia256_Wrap['parameters'] are absent
+
+
+# Update the Algorithm Identifier map in rfc5280.py.
+# Note that the ones that must not have parameters are not added to the map.
+
+_algorithmIdentifierMapUpdate = {
+ id_rsa_kem: GenericHybridParameters(),
+ id_kem_rsa: RsaKemParameters(),
+ id_kdf_kdf2: KDF2_HashFunction(),
+ id_kdf_kdf3: KDF3_HashFunction(),
+ id_sha1: univ.Null(),
+ id_sha224: univ.Null(),
+ id_sha256: univ.Null(),
+ id_sha384: univ.Null(),
+ id_sha512: univ.Null(),
+ id_alg_CMS3DESwrap: univ.Null(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc6010.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6010.py
new file mode 100644
index 0000000000..250e207ba4
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6010.py
@@ -0,0 +1,88 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Extension for CMS Content Constraints (CCC)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6010.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+AttributeType = rfc5280.AttributeType
+
+AttributeValue = rfc5280.AttributeValue
+
+
+id_ct_anyContentType = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.0')
+
+
+class AttrConstraint(univ.Sequence):
+ pass
+
+AttrConstraint.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', AttributeType()),
+ namedtype.NamedType('attrValues', univ.SetOf(
+ componentType=AttributeValue()).subtype(subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class AttrConstraintList(univ.SequenceOf):
+ pass
+
+AttrConstraintList.componentType = AttrConstraint()
+AttrConstraintList.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class ContentTypeGeneration(univ.Enumerated):
+ pass
+
+ContentTypeGeneration.namedValues = namedval.NamedValues(
+ ('canSource', 0),
+ ('cannotSource', 1)
+)
+
+
+class ContentTypeConstraint(univ.Sequence):
+ pass
+
+ContentTypeConstraint.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contentType', univ.ObjectIdentifier()),
+ namedtype.DefaultedNamedType('canSource', ContentTypeGeneration().subtype(value='canSource')),
+ namedtype.OptionalNamedType('attrConstraints', AttrConstraintList())
+)
+
+
+# CMS Content Constraints (CCC) Extension and Object Identifier
+
+id_pe_cmsContentConstraints = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.18')
+
+class CMSContentConstraints(univ.SequenceOf):
+ pass
+
+CMSContentConstraints.componentType = ContentTypeConstraint()
+CMSContentConstraints.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Map of Certificate Extension OIDs to Extensions
+# To be added to the ones that are in rfc5280.py
+
+_certificateExtensionsMap = {
+ id_pe_cmsContentConstraints: CMSContentConstraints(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMap)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc6019.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6019.py
new file mode 100644
index 0000000000..c6872c7669
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6019.py
@@ -0,0 +1,45 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+# Modified by Russ Housley to add a map for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# BinaryTime: An Alternate Format for Representing Date and Time
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6019.txt
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+# BinaryTime: Represent date and time as an integer
+
+class BinaryTime(univ.Integer):
+ pass
+
+BinaryTime.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+# CMS Attribute for representing signing time in BinaryTime
+
+id_aa_binarySigningTime = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.46')
+
+class BinarySigningTime(BinaryTime):
+ pass
+
+
+# Map of Attribute Type OIDs to Attributes ia added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_binarySigningTime: BinarySigningTime(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc6031.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6031.py
new file mode 100644
index 0000000000..6e1bb2261d
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6031.py
@@ -0,0 +1,469 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CMS Symmetric Key Package Content Type
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6031.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6019
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+ return univ.ObjectIdentifier(output)
+
+
+MAX = float('inf')
+
+id_pskc = univ.ObjectIdentifier('1.2.840.113549.1.9.16.12')
+
+
+# Symmetric Key Package Attributes
+
+id_pskc_manufacturer = _OID(id_pskc, 1)
+
+class at_pskc_manufacturer(char.UTF8String):
+ pass
+
+
+id_pskc_serialNo = _OID(id_pskc, 2)
+
+class at_pskc_serialNo(char.UTF8String):
+ pass
+
+
+id_pskc_model = _OID(id_pskc, 3)
+
+class at_pskc_model(char.UTF8String):
+ pass
+
+
+id_pskc_issueNo = _OID(id_pskc, 4)
+
+class at_pskc_issueNo(char.UTF8String):
+ pass
+
+
+id_pskc_deviceBinding = _OID(id_pskc, 5)
+
+class at_pskc_deviceBinding(char.UTF8String):
+ pass
+
+
+id_pskc_deviceStartDate = _OID(id_pskc, 6)
+
+class at_pskc_deviceStartDate(useful.GeneralizedTime):
+ pass
+
+
+id_pskc_deviceExpiryDate = _OID(id_pskc, 7)
+
+class at_pskc_deviceExpiryDate(useful.GeneralizedTime):
+ pass
+
+
+id_pskc_moduleId = _OID(id_pskc, 8)
+
+class at_pskc_moduleId(char.UTF8String):
+ pass
+
+
+id_pskc_deviceUserId = _OID(id_pskc, 26)
+
+class at_pskc_deviceUserId(char.UTF8String):
+ pass
+
+
+# Symmetric Key Attributes
+
+id_pskc_keyId = _OID(id_pskc, 9)
+
+class at_pskc_keyUserId(char.UTF8String):
+ pass
+
+
+id_pskc_algorithm = _OID(id_pskc, 10)
+
+class at_pskc_algorithm(char.UTF8String):
+ pass
+
+
+id_pskc_issuer = _OID(id_pskc, 11)
+
+class at_pskc_issuer(char.UTF8String):
+ pass
+
+
+id_pskc_keyProfileId = _OID(id_pskc, 12)
+
+class at_pskc_keyProfileId(char.UTF8String):
+ pass
+
+
+id_pskc_keyReference = _OID(id_pskc, 13)
+
+class at_pskc_keyReference(char.UTF8String):
+ pass
+
+
+id_pskc_friendlyName = _OID(id_pskc, 14)
+
+class FriendlyName(univ.Sequence):
+ pass
+
+FriendlyName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('friendlyName', char.UTF8String()),
+ namedtype.OptionalNamedType('friendlyNameLangTag', char.UTF8String())
+)
+
+class at_pskc_friendlyName(FriendlyName):
+ pass
+
+
+id_pskc_algorithmParameters = _OID(id_pskc, 15)
+
+class Encoding(char.UTF8String):
+ pass
+
+Encoding.namedValues = namedval.NamedValues(
+ ('dec', "DECIMAL"),
+ ('hex', "HEXADECIMAL"),
+ ('alpha', "ALPHANUMERIC"),
+ ('b64', "BASE64"),
+ ('bin', "BINARY")
+)
+
+Encoding.subtypeSpec = constraint.SingleValueConstraint(
+ "DECIMAL", "HEXADECIMAL", "ALPHANUMERIC", "BASE64", "BINARY" )
+
+class ChallengeFormat(univ.Sequence):
+ pass
+
+ChallengeFormat.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encoding', Encoding()),
+ namedtype.DefaultedNamedType('checkDigit',
+ univ.Boolean().subtype(value=0)),
+ namedtype.NamedType('min', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
+ namedtype.NamedType('max', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX)))
+)
+
+class ResponseFormat(univ.Sequence):
+ pass
+
+ResponseFormat.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encoding', Encoding()),
+ namedtype.NamedType('length', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
+ namedtype.DefaultedNamedType('checkDigit',
+ univ.Boolean().subtype(value=0))
+)
+
+class PSKCAlgorithmParameters(univ.Choice):
+ pass
+
+PSKCAlgorithmParameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('suite', char.UTF8String()),
+ namedtype.NamedType('challengeFormat', ChallengeFormat().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('responseFormat', ResponseFormat().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
+)
+
+class at_pskc_algorithmParameters(PSKCAlgorithmParameters):
+ pass
+
+
+id_pskc_counter = _OID(id_pskc, 16)
+
+class at_pskc_counter(univ.Integer):
+ pass
+
+at_pskc_counter.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+id_pskc_time = _OID(id_pskc, 17)
+
+class at_pskc_time(rfc6019.BinaryTime):
+ pass
+
+
+id_pskc_timeInterval = _OID(id_pskc, 18)
+
+class at_pskc_timeInterval(univ.Integer):
+ pass
+
+at_pskc_timeInterval.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+id_pskc_timeDrift = _OID(id_pskc, 19)
+
+class at_pskc_timeDrift(univ.Integer):
+ pass
+
+at_pskc_timeDrift.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+id_pskc_valueMAC = _OID(id_pskc, 20)
+
+class ValueMac(univ.Sequence):
+ pass
+
+ValueMac.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('macAlgorithm', char.UTF8String()),
+ namedtype.NamedType('mac', char.UTF8String())
+)
+
+class at_pskc_valueMAC(ValueMac):
+ pass
+
+
+id_pskc_keyUserId = _OID(id_pskc, 27)
+
+class at_pskc_keyId(char.UTF8String):
+ pass
+
+
+id_pskc_keyStartDate = _OID(id_pskc, 21)
+
+class at_pskc_keyStartDate(useful.GeneralizedTime):
+ pass
+
+
+id_pskc_keyExpiryDate = _OID(id_pskc, 22)
+
+class at_pskc_keyExpiryDate(useful.GeneralizedTime):
+ pass
+
+
+id_pskc_numberOfTransactions = _OID(id_pskc, 23)
+
+class at_pskc_numberOfTransactions(univ.Integer):
+ pass
+
+at_pskc_numberOfTransactions.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+id_pskc_keyUsages = _OID(id_pskc, 24)
+
+class PSKCKeyUsage(char.UTF8String):
+ pass
+
+PSKCKeyUsage.namedValues = namedval.NamedValues(
+ ('otp', "OTP"),
+ ('cr', "CR"),
+ ('encrypt', "Encrypt"),
+ ('integrity', "Integrity"),
+ ('verify', "Verify"),
+ ('unlock', "Unlock"),
+ ('decrypt', "Decrypt"),
+ ('keywrap', "KeyWrap"),
+ ('unwrap', "Unwrap"),
+ ('derive', "Derive"),
+ ('generate', "Generate")
+)
+
+PSKCKeyUsage.subtypeSpec = constraint.SingleValueConstraint(
+ "OTP", "CR", "Encrypt", "Integrity", "Verify", "Unlock",
+ "Decrypt", "KeyWrap", "Unwrap", "Derive", "Generate" )
+
+class PSKCKeyUsages(univ.SequenceOf):
+ pass
+
+PSKCKeyUsages.componentType = PSKCKeyUsage()
+
+class at_pskc_keyUsage(PSKCKeyUsages):
+ pass
+
+
+id_pskc_pinPolicy = _OID(id_pskc, 25)
+
+class PINUsageMode(char.UTF8String):
+ pass
+
+PINUsageMode.namedValues = namedval.NamedValues(
+ ("local", "Local"),
+ ("prepend", "Prepend"),
+ ("append", "Append"),
+ ("algorithmic", "Algorithmic")
+)
+
+PINUsageMode.subtypeSpec = constraint.SingleValueConstraint(
+ "Local", "Prepend", "Append", "Algorithmic" )
+
+class PINPolicy(univ.Sequence):
+ pass
+
+PINPolicy.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('pinKeyId', char.UTF8String().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('pinUsageMode', PINUsageMode().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('maxFailedAttempts', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('minLength', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.OptionalNamedType('maxLength', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 4))),
+ namedtype.OptionalNamedType('pinEncoding', Encoding().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5)))
+)
+
+class at_pskc_pinPolicy(PINPolicy):
+ pass
+
+
+# Map of Symmetric Key Package Attribute OIDs to Attributes
+
+sKeyPkgAttributesMap = {
+ id_pskc_manufacturer: at_pskc_manufacturer(),
+ id_pskc_serialNo: at_pskc_serialNo(),
+ id_pskc_model: at_pskc_model(),
+ id_pskc_issueNo: at_pskc_issueNo(),
+ id_pskc_deviceBinding: at_pskc_deviceBinding(),
+ id_pskc_deviceStartDate: at_pskc_deviceStartDate(),
+ id_pskc_deviceExpiryDate: at_pskc_deviceExpiryDate(),
+ id_pskc_moduleId: at_pskc_moduleId(),
+ id_pskc_deviceUserId: at_pskc_deviceUserId(),
+}
+
+
+# Map of Symmetric Key Attribute OIDs to Attributes
+
+sKeyAttributesMap = {
+ id_pskc_keyId: at_pskc_keyId(),
+ id_pskc_algorithm: at_pskc_algorithm(),
+ id_pskc_issuer: at_pskc_issuer(),
+ id_pskc_keyProfileId: at_pskc_keyProfileId(),
+ id_pskc_keyReference: at_pskc_keyReference(),
+ id_pskc_friendlyName: at_pskc_friendlyName(),
+ id_pskc_algorithmParameters: at_pskc_algorithmParameters(),
+ id_pskc_counter: at_pskc_counter(),
+ id_pskc_time: at_pskc_time(),
+ id_pskc_timeInterval: at_pskc_timeInterval(),
+ id_pskc_timeDrift: at_pskc_timeDrift(),
+ id_pskc_valueMAC: at_pskc_valueMAC(),
+ id_pskc_keyUserId: at_pskc_keyUserId(),
+ id_pskc_keyStartDate: at_pskc_keyStartDate(),
+ id_pskc_keyExpiryDate: at_pskc_keyExpiryDate(),
+ id_pskc_numberOfTransactions: at_pskc_numberOfTransactions(),
+ id_pskc_keyUsages: at_pskc_keyUsage(),
+ id_pskc_pinPolicy: at_pskc_pinPolicy(),
+}
+
+
+# This definition replaces Attribute() from rfc5652.py; it is the same except
+# that opentype is added with sKeyPkgAttributesMap and sKeyAttributesMap
+
+class AttributeType(univ.ObjectIdentifier):
+ pass
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class SKeyAttribute(univ.Sequence):
+ pass
+
+SKeyAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', AttributeType()),
+ namedtype.NamedType('attrValues',
+ univ.SetOf(componentType=AttributeValue()),
+ openType=opentype.OpenType('attrType', sKeyAttributesMap)
+ )
+)
+
+
+class SKeyPkgAttribute(univ.Sequence):
+ pass
+
+SKeyPkgAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', AttributeType()),
+ namedtype.NamedType('attrValues',
+ univ.SetOf(componentType=AttributeValue()),
+ openType=opentype.OpenType('attrType', sKeyPkgAttributesMap)
+ )
+)
+
+
+# Symmetric Key Package Content Type
+
+id_ct_KP_sKeyPackage = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.25')
+
+
+class KeyPkgVersion(univ.Integer):
+ pass
+
+KeyPkgVersion.namedValues = namedval.NamedValues(
+ ('v1', 1)
+)
+
+
+class OneSymmetricKey(univ.Sequence):
+ pass
+
+OneSymmetricKey.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('sKeyAttrs',
+ univ.SequenceOf(componentType=SKeyAttribute()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX))),
+ namedtype.OptionalNamedType('sKey', univ.OctetString())
+)
+
+OneSymmetricKey.sizeSpec = univ.Sequence.sizeSpec + constraint.ValueSizeConstraint(1, 2)
+
+
+class SymmetricKeys(univ.SequenceOf):
+ pass
+
+SymmetricKeys.componentType = OneSymmetricKey()
+SymmetricKeys.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+class SymmetricKeyPackage(univ.Sequence):
+ pass
+
+SymmetricKeyPackage.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', KeyPkgVersion().subtype(value='v1')),
+ namedtype.OptionalNamedType('sKeyPkgAttrs',
+ univ.SequenceOf(componentType=SKeyPkgAttribute()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX),
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('sKeys', SymmetricKeys())
+)
+
+
+# Map of Content Type OIDs to Content Types are
+# added to the ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_KP_sKeyPackage: SymmetricKeyPackage(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc6032.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6032.py
new file mode 100644
index 0000000000..563639a8d6
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6032.py
@@ -0,0 +1,68 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CMS Encrypted Key Package Content Type
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6032.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5083
+
+
+# Content Decryption Key Identifier attribute
+
+id_aa_KP_contentDecryptKeyID = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.66')
+
+class ContentDecryptKeyID(univ.OctetString):
+ pass
+
+aa_content_decrypt_key_identifier = rfc5652.Attribute()
+aa_content_decrypt_key_identifier['attrType'] = id_aa_KP_contentDecryptKeyID
+aa_content_decrypt_key_identifier['attrValues'][0] = ContentDecryptKeyID()
+
+
+# Encrypted Key Package Content Type
+
+id_ct_KP_encryptedKeyPkg = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.78.2')
+
+class EncryptedKeyPackage(univ.Choice):
+ pass
+
+EncryptedKeyPackage.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('encrypted', rfc5652.EncryptedData()),
+ namedtype.NamedType('enveloped', rfc5652.EnvelopedData().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('authEnveloped', rfc5083.AuthEnvelopedData().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+# Map of Attribute Type OIDs to Attributes are
+# added to the ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_KP_contentDecryptKeyID: ContentDecryptKeyID(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# Map of Content Type OIDs to Content Types are
+# added to the ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_KP_encryptedKeyPkg: EncryptedKeyPackage(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc6120.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6120.py
new file mode 100644
index 0000000000..ab256203a0
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6120.py
@@ -0,0 +1,43 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Extensible Messaging and Presence Protocol (XMPP)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6120.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# XmppAddr Identifier Type as specified in Section 13.7.1.4. of RFC 6120
+
+id_pkix = rfc5280.id_pkix
+
+id_on = id_pkix + (8, )
+
+id_on_xmppAddr = id_on + (5, )
+
+
+class XmppAddr(char.UTF8String):
+ pass
+
+
+# Map of Other Name OIDs to Other Name is added to the
+# ones that are in rfc5280.py
+
+_anotherNameMapUpdate = {
+ id_on_xmppAddr: XmppAddr(),
+}
+
+rfc5280.anotherNameMap.update(_anotherNameMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc6170.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6170.py
new file mode 100644
index 0000000000..e2876167b7
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6170.py
@@ -0,0 +1,17 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Image in the Internet X.509 Public Key Infrastructure
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6170.txt
+#
+
+from pyasn1.type import univ
+
+id_logo_certImage = univ.ObjectIdentifier('1.3.6.1.5.5.7.20.3')
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc6187.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6187.py
new file mode 100644
index 0000000000..4be0054716
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6187.py
@@ -0,0 +1,22 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# X.509v3 Certificates for Secure Shell Authentication
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6187.txt
+#
+
+from pyasn1.type import univ
+
+id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
+
+id_kp = id_pkix + (3, )
+
+id_kp_secureShellClient = id_kp + (21, )
+id_kp_secureShellServer = id_kp + (22, )
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc6210.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6210.py
new file mode 100644
index 0000000000..28587b9e70
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6210.py
@@ -0,0 +1,42 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Experiment for Hash Functions with Parameters in the CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6210.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+id_alg_MD5_XOR_EXPERIMENT = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.13')
+
+
+class MD5_XOR_EXPERIMENT(univ.OctetString):
+ pass
+
+MD5_XOR_EXPERIMENT.subtypeSpec = constraint.ValueSizeConstraint(64, 64)
+
+
+mda_xor_md5_EXPERIMENT = rfc5280.AlgorithmIdentifier()
+mda_xor_md5_EXPERIMENT['algorithm'] = id_alg_MD5_XOR_EXPERIMENT
+mda_xor_md5_EXPERIMENT['parameters'] = MD5_XOR_EXPERIMENT()
+
+
+# Map of Algorithm Identifier OIDs to Parameters added to the
+# ones that are in rfc5280.py.
+
+_algorithmIdentifierMapUpdate = {
+ id_alg_MD5_XOR_EXPERIMENT: MD5_XOR_EXPERIMENT(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc6211.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6211.py
new file mode 100644
index 0000000000..abd7a8688d
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6211.py
@@ -0,0 +1,72 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CMS Algorithm Identifier Protection Attribute
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6211.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+
+# Imports from RFC 5652
+
+DigestAlgorithmIdentifier = rfc5652.DigestAlgorithmIdentifier
+
+MessageAuthenticationCodeAlgorithm = rfc5652.MessageAuthenticationCodeAlgorithm
+
+SignatureAlgorithmIdentifier = rfc5652.SignatureAlgorithmIdentifier
+
+
+# CMS Algorithm Protection attribute
+
+id_aa_cmsAlgorithmProtect = univ.ObjectIdentifier('1.2.840.113549.1.9.52')
+
+
+class CMSAlgorithmProtection(univ.Sequence):
+ pass
+
+CMSAlgorithmProtection.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
+ namedtype.OptionalNamedType('signatureAlgorithm',
+ SignatureAlgorithmIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('macAlgorithm',
+ MessageAuthenticationCodeAlgorithm().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+CMSAlgorithmProtection.subtypeSpec = constraint.ConstraintsUnion(
+ constraint.WithComponentsConstraint(
+ ('signatureAlgorithm', constraint.ComponentPresentConstraint()),
+ ('macAlgorithm', constraint.ComponentAbsentConstraint())),
+ constraint.WithComponentsConstraint(
+ ('signatureAlgorithm', constraint.ComponentAbsentConstraint()),
+ ('macAlgorithm', constraint.ComponentPresentConstraint()))
+)
+
+
+aa_cmsAlgorithmProtection = rfc5652.Attribute()
+aa_cmsAlgorithmProtection['attrType'] = id_aa_cmsAlgorithmProtect
+aa_cmsAlgorithmProtection['attrValues'][0] = CMSAlgorithmProtection()
+
+
+# Map of Attribute Type OIDs to Attributes are
+# added to the ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_cmsAlgorithmProtect: CMSAlgorithmProtection(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate) \ No newline at end of file
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc6402-1.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6402-1.py
new file mode 100644
index 0000000000..322e35e0c7
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6402-1.py
@@ -0,0 +1,627 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Modified by Russ Housley to add a maps for CMC Control Attributes
+# and CMC Content Types for use with opentypes.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Management over CMS (CMC) Updates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6402.txt
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc4211
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+def _buildOid(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+cmcControlAttributesMap = { }
+
+
+class ChangeSubjectName(univ.Sequence):
+ pass
+
+
+ChangeSubjectName.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('subject', rfc5280.Name()),
+ namedtype.OptionalNamedType('subjectAlt', rfc5280.GeneralNames())
+)
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class CMCStatus(univ.Integer):
+ pass
+
+
+CMCStatus.namedValues = namedval.NamedValues(
+ ('success', 0),
+ ('failed', 2),
+ ('pending', 3),
+ ('noSupport', 4),
+ ('confirmRequired', 5),
+ ('popRequired', 6),
+ ('partial', 7)
+)
+
+
+class PendInfo(univ.Sequence):
+ pass
+
+
+PendInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pendToken', univ.OctetString()),
+ namedtype.NamedType('pendTime', useful.GeneralizedTime())
+)
+
+bodyIdMax = univ.Integer(4294967295)
+
+
+class BodyPartID(univ.Integer):
+ pass
+
+
+BodyPartID.subtypeSpec = constraint.ValueRangeConstraint(0, bodyIdMax)
+
+
+class BodyPartPath(univ.SequenceOf):
+ pass
+
+
+BodyPartPath.componentType = BodyPartID()
+BodyPartPath.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class BodyPartReference(univ.Choice):
+ pass
+
+
+BodyPartReference.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('bodyPartPath', BodyPartPath())
+)
+
+
+class CMCFailInfo(univ.Integer):
+ pass
+
+
+CMCFailInfo.namedValues = namedval.NamedValues(
+ ('badAlg', 0),
+ ('badMessageCheck', 1),
+ ('badRequest', 2),
+ ('badTime', 3),
+ ('badCertId', 4),
+ ('unsupportedExt', 5),
+ ('mustArchiveKeys', 6),
+ ('badIdentity', 7),
+ ('popRequired', 8),
+ ('popFailed', 9),
+ ('noKeyReuse', 10),
+ ('internalCAError', 11),
+ ('tryLater', 12),
+ ('authDataFail', 13)
+)
+
+
+class CMCStatusInfoV2(univ.Sequence):
+ pass
+
+
+CMCStatusInfoV2.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('cMCStatus', CMCStatus()),
+ namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartReference())),
+ namedtype.OptionalNamedType('statusString', char.UTF8String()),
+ namedtype.OptionalNamedType(
+ 'otherInfo', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('failInfo', CMCFailInfo()),
+ namedtype.NamedType('pendInfo', PendInfo()),
+ namedtype.NamedType(
+ 'extendedFailInfo', univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('failInfoOID', univ.ObjectIdentifier()),
+ namedtype.NamedType('failInfoValue', AttributeValue()))
+ )
+ )
+ )
+ )
+ )
+)
+
+
+class GetCRL(univ.Sequence):
+ pass
+
+
+GetCRL.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerName', rfc5280.Name()),
+ namedtype.OptionalNamedType('cRLName', rfc5280.GeneralName()),
+ namedtype.OptionalNamedType('time', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('reasons', rfc5280.ReasonFlags())
+)
+
+id_pkix = _buildOid(1, 3, 6, 1, 5, 5, 7)
+
+id_cmc = _buildOid(id_pkix, 7)
+
+id_cmc_batchResponses = _buildOid(id_cmc, 29)
+
+id_cmc_popLinkWitness = _buildOid(id_cmc, 23)
+
+
+class PopLinkWitnessV2(univ.Sequence):
+ pass
+
+
+PopLinkWitnessV2.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyGenAlgorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('macAlgorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('witness', univ.OctetString())
+)
+
+id_cmc_popLinkWitnessV2 = _buildOid(id_cmc, 33)
+
+id_cmc_identityProofV2 = _buildOid(id_cmc, 34)
+
+id_cmc_revokeRequest = _buildOid(id_cmc, 17)
+
+id_cmc_recipientNonce = _buildOid(id_cmc, 7)
+
+
+class ControlsProcessed(univ.Sequence):
+ pass
+
+
+ControlsProcessed.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartReference()))
+)
+
+
+class CertificationRequest(univ.Sequence):
+ pass
+
+
+CertificationRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'certificationRequestInfo', univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer()),
+ namedtype.NamedType('subject', rfc5280.Name()),
+ namedtype.NamedType(
+ 'subjectPublicKeyInfo', univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('subjectPublicKey', univ.BitString())
+ )
+ )
+ ),
+ namedtype.NamedType(
+ 'attributes', univ.SetOf(
+ componentType=rfc5652.Attribute()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
+ )
+ )
+ )
+ ),
+ namedtype.NamedType('signatureAlgorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class TaggedCertificationRequest(univ.Sequence):
+ pass
+
+
+TaggedCertificationRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('certificationRequest', CertificationRequest())
+)
+
+
+class TaggedRequest(univ.Choice):
+ pass
+
+
+TaggedRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tcr', TaggedCertificationRequest().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('crm',
+ rfc4211.CertReqMsg().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('orm', univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('requestMessageType', univ.ObjectIdentifier()),
+ namedtype.NamedType('requestMessageValue', univ.Any())
+ ))
+ .subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+id_cmc_popLinkRandom = _buildOid(id_cmc, 22)
+
+id_cmc_statusInfo = _buildOid(id_cmc, 1)
+
+id_cmc_trustedAnchors = _buildOid(id_cmc, 26)
+
+id_cmc_transactionId = _buildOid(id_cmc, 5)
+
+id_cmc_encryptedPOP = _buildOid(id_cmc, 9)
+
+
+class PublishTrustAnchors(univ.Sequence):
+ pass
+
+
+PublishTrustAnchors.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('seqNumber', univ.Integer()),
+ namedtype.NamedType('hashAlgorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('anchorHashes', univ.SequenceOf(componentType=univ.OctetString()))
+)
+
+
+class RevokeRequest(univ.Sequence):
+ pass
+
+
+RevokeRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerName', rfc5280.Name()),
+ namedtype.NamedType('serialNumber', univ.Integer()),
+ namedtype.NamedType('reason', rfc5280.CRLReason()),
+ namedtype.OptionalNamedType('invalidityDate', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('passphrase', univ.OctetString()),
+ namedtype.OptionalNamedType('comment', char.UTF8String())
+)
+
+id_cmc_senderNonce = _buildOid(id_cmc, 6)
+
+id_cmc_authData = _buildOid(id_cmc, 27)
+
+
+class TaggedContentInfo(univ.Sequence):
+ pass
+
+
+TaggedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('contentInfo', rfc5652.ContentInfo())
+)
+
+
+class IdentifyProofV2(univ.Sequence):
+ pass
+
+
+IdentifyProofV2.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('proofAlgID', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('macAlgId', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('witness', univ.OctetString())
+)
+
+
+class CMCPublicationInfo(univ.Sequence):
+ pass
+
+
+CMCPublicationInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlg', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('certHashes', univ.SequenceOf(componentType=univ.OctetString())),
+ namedtype.NamedType('pubInfo', rfc4211.PKIPublicationInfo())
+)
+
+id_kp_cmcCA = _buildOid(rfc5280.id_kp, 27)
+
+id_cmc_confirmCertAcceptance = _buildOid(id_cmc, 24)
+
+id_cmc_raIdentityWitness = _buildOid(id_cmc, 35)
+
+id_ExtensionReq = _buildOid(1, 2, 840, 113549, 1, 9, 14)
+
+id_cct = _buildOid(id_pkix, 12)
+
+id_cct_PKIData = _buildOid(id_cct, 2)
+
+id_kp_cmcRA = _buildOid(rfc5280.id_kp, 28)
+
+
+class CMCStatusInfo(univ.Sequence):
+ pass
+
+
+CMCStatusInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('cMCStatus', CMCStatus()),
+ namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartID())),
+ namedtype.OptionalNamedType('statusString', char.UTF8String()),
+ namedtype.OptionalNamedType(
+ 'otherInfo', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('failInfo', CMCFailInfo()),
+ namedtype.NamedType('pendInfo', PendInfo())
+ )
+ )
+ )
+)
+
+
+class DecryptedPOP(univ.Sequence):
+ pass
+
+
+DecryptedPOP.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('thePOPAlgID', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('thePOP', univ.OctetString())
+)
+
+id_cmc_addExtensions = _buildOid(id_cmc, 8)
+
+id_cmc_modCertTemplate = _buildOid(id_cmc, 31)
+
+
+class TaggedAttribute(univ.Sequence):
+ pass
+
+
+TaggedAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('attrType', univ.ObjectIdentifier()),
+ namedtype.NamedType('attrValues', univ.SetOf(componentType=AttributeValue()),
+ openType=opentype.OpenType('attrType', cmcControlAttributesMap)
+ )
+)
+
+
+class OtherMsg(univ.Sequence):
+ pass
+
+
+OtherMsg.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('otherMsgType', univ.ObjectIdentifier()),
+ namedtype.NamedType('otherMsgValue', univ.Any())
+)
+
+
+class PKIData(univ.Sequence):
+ pass
+
+
+PKIData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('controlSequence', univ.SequenceOf(componentType=TaggedAttribute())),
+ namedtype.NamedType('reqSequence', univ.SequenceOf(componentType=TaggedRequest())),
+ namedtype.NamedType('cmsSequence', univ.SequenceOf(componentType=TaggedContentInfo())),
+ namedtype.NamedType('otherMsgSequence', univ.SequenceOf(componentType=OtherMsg()))
+)
+
+
+class BodyPartList(univ.SequenceOf):
+ pass
+
+
+BodyPartList.componentType = BodyPartID()
+BodyPartList.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_cmc_responseBody = _buildOid(id_cmc, 37)
+
+
+class AuthPublish(BodyPartID):
+ pass
+
+
+class CMCUnsignedData(univ.Sequence):
+ pass
+
+
+CMCUnsignedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartPath', BodyPartPath()),
+ namedtype.NamedType('identifier', univ.ObjectIdentifier()),
+ namedtype.NamedType('content', univ.Any())
+)
+
+
+class CMCCertId(rfc5652.IssuerAndSerialNumber):
+ pass
+
+
+class PKIResponse(univ.Sequence):
+ pass
+
+
+PKIResponse.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('controlSequence', univ.SequenceOf(componentType=TaggedAttribute())),
+ namedtype.NamedType('cmsSequence', univ.SequenceOf(componentType=TaggedContentInfo())),
+ namedtype.NamedType('otherMsgSequence', univ.SequenceOf(componentType=OtherMsg()))
+)
+
+
+class ResponseBody(PKIResponse):
+ pass
+
+
+id_cmc_statusInfoV2 = _buildOid(id_cmc, 25)
+
+id_cmc_lraPOPWitness = _buildOid(id_cmc, 11)
+
+
+class ModCertTemplate(univ.Sequence):
+ pass
+
+
+ModCertTemplate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pkiDataReference', BodyPartPath()),
+ namedtype.NamedType('certReferences', BodyPartList()),
+ namedtype.DefaultedNamedType('replace', univ.Boolean().subtype(value=1)),
+ namedtype.NamedType('certTemplate', rfc4211.CertTemplate())
+)
+
+id_cmc_regInfo = _buildOid(id_cmc, 18)
+
+id_cmc_identityProof = _buildOid(id_cmc, 3)
+
+
+class ExtensionReq(univ.SequenceOf):
+ pass
+
+
+ExtensionReq.componentType = rfc5280.Extension()
+ExtensionReq.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_kp_cmcArchive = _buildOid(rfc5280.id_kp, 28)
+
+id_cmc_publishCert = _buildOid(id_cmc, 30)
+
+id_cmc_dataReturn = _buildOid(id_cmc, 4)
+
+
+class LraPopWitness(univ.Sequence):
+ pass
+
+
+LraPopWitness.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pkiDataBodyid', BodyPartID()),
+ namedtype.NamedType('bodyIds', univ.SequenceOf(componentType=BodyPartID()))
+)
+
+id_aa = _buildOid(1, 2, 840, 113549, 1, 9, 16, 2)
+
+id_aa_cmc_unsignedData = _buildOid(id_aa, 34)
+
+id_cmc_getCert = _buildOid(id_cmc, 15)
+
+id_cmc_batchRequests = _buildOid(id_cmc, 28)
+
+id_cmc_decryptedPOP = _buildOid(id_cmc, 10)
+
+id_cmc_responseInfo = _buildOid(id_cmc, 19)
+
+id_cmc_changeSubjectName = _buildOid(id_cmc, 36)
+
+
+class GetCert(univ.Sequence):
+ pass
+
+
+GetCert.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerName', rfc5280.GeneralName()),
+ namedtype.NamedType('serialNumber', univ.Integer())
+)
+
+id_cmc_identification = _buildOid(id_cmc, 2)
+
+id_cmc_queryPending = _buildOid(id_cmc, 21)
+
+
+class AddExtensions(univ.Sequence):
+ pass
+
+
+AddExtensions.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pkiDataReference', BodyPartID()),
+ namedtype.NamedType('certReferences', univ.SequenceOf(componentType=BodyPartID())),
+ namedtype.NamedType('extensions', univ.SequenceOf(componentType=rfc5280.Extension()))
+)
+
+
+class EncryptedPOP(univ.Sequence):
+ pass
+
+
+EncryptedPOP.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('request', TaggedRequest()),
+ namedtype.NamedType('cms', rfc5652.ContentInfo()),
+ namedtype.NamedType('thePOPAlgID', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('witnessAlgID', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('witness', univ.OctetString())
+)
+
+id_cmc_getCRL = _buildOid(id_cmc, 16)
+
+id_cct_PKIResponse = _buildOid(id_cct, 3)
+
+id_cmc_controlProcessed = _buildOid(id_cmc, 32)
+
+
+class NoSignatureValue(univ.OctetString):
+ pass
+
+
+id_ad_cmc = _buildOid(rfc5280.id_ad, 12)
+
+id_alg_noSignature = _buildOid(id_pkix, 6, 2)
+
+
+# Map of CMC Control OIDs to CMC Control Attributes
+
+_cmcControlAttributesMapUpdate = {
+ id_cmc_statusInfo: CMCStatusInfo(),
+ id_cmc_statusInfoV2: CMCStatusInfoV2(),
+ id_cmc_identification: char.UTF8String(),
+ id_cmc_identityProof: univ.OctetString(),
+ id_cmc_identityProofV2: IdentifyProofV2(),
+ id_cmc_dataReturn: univ.OctetString(),
+ id_cmc_transactionId: univ.Integer(),
+ id_cmc_senderNonce: univ.OctetString(),
+ id_cmc_recipientNonce: univ.OctetString(),
+ id_cmc_addExtensions: AddExtensions(),
+ id_cmc_encryptedPOP: EncryptedPOP(),
+ id_cmc_decryptedPOP: DecryptedPOP(),
+ id_cmc_lraPOPWitness: LraPopWitness(),
+ id_cmc_getCert: GetCert(),
+ id_cmc_getCRL: GetCRL(),
+ id_cmc_revokeRequest: RevokeRequest(),
+ id_cmc_regInfo: univ.OctetString(),
+ id_cmc_responseInfo: univ.OctetString(),
+ id_cmc_queryPending: univ.OctetString(),
+ id_cmc_popLinkRandom: univ.OctetString(),
+ id_cmc_popLinkWitness: univ.OctetString(),
+ id_cmc_popLinkWitnessV2: PopLinkWitnessV2(),
+ id_cmc_confirmCertAcceptance: CMCCertId(),
+ id_cmc_trustedAnchors: PublishTrustAnchors(),
+ id_cmc_authData: AuthPublish(),
+ id_cmc_batchRequests: BodyPartList(),
+ id_cmc_batchResponses: BodyPartList(),
+ id_cmc_publishCert: CMCPublicationInfo(),
+ id_cmc_modCertTemplate: ModCertTemplate(),
+ id_cmc_controlProcessed: ControlsProcessed(),
+ id_ExtensionReq: ExtensionReq(),
+}
+
+cmcControlAttributesMap.update(_cmcControlAttributesMapUpdate)
+
+
+# Map of CMC Content Type OIDs to CMC Content Types are added to
+# the ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_cct_PKIData: PKIData(),
+ id_cct_PKIResponse: PKIResponse(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
+
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc6402.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6402.py
new file mode 100644
index 0000000000..b5f0d48fa4
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6402.py
@@ -0,0 +1,628 @@
+# coding: utf-8
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Stanisław Pitucha with asn1ate tool.
+# Modified by Russ Housley to add a maps for CMC Control Attributes
+# and CMC Content Types for use with opentypes.
+#
+# Copyright (c) 2005-2019, Ilya Etingof <etingof@gmail.com>
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Certificate Management over CMS (CMC) Updates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6402.txt
+#
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+from pyasn1.type import useful
+
+from pyasn1_modules import rfc4211
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+def _buildOid(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+# Since CMS Attributes and CMC Controls both use 'attrType', one map is used
+cmcControlAttributesMap = rfc5652.cmsAttributesMap
+
+
+class ChangeSubjectName(univ.Sequence):
+ pass
+
+
+ChangeSubjectName.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('subject', rfc5280.Name()),
+ namedtype.OptionalNamedType('subjectAlt', rfc5280.GeneralNames())
+)
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class CMCStatus(univ.Integer):
+ pass
+
+
+CMCStatus.namedValues = namedval.NamedValues(
+ ('success', 0),
+ ('failed', 2),
+ ('pending', 3),
+ ('noSupport', 4),
+ ('confirmRequired', 5),
+ ('popRequired', 6),
+ ('partial', 7)
+)
+
+
+class PendInfo(univ.Sequence):
+ pass
+
+
+PendInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pendToken', univ.OctetString()),
+ namedtype.NamedType('pendTime', useful.GeneralizedTime())
+)
+
+bodyIdMax = univ.Integer(4294967295)
+
+
+class BodyPartID(univ.Integer):
+ pass
+
+
+BodyPartID.subtypeSpec = constraint.ValueRangeConstraint(0, bodyIdMax)
+
+
+class BodyPartPath(univ.SequenceOf):
+ pass
+
+
+BodyPartPath.componentType = BodyPartID()
+BodyPartPath.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class BodyPartReference(univ.Choice):
+ pass
+
+
+BodyPartReference.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('bodyPartPath', BodyPartPath())
+)
+
+
+class CMCFailInfo(univ.Integer):
+ pass
+
+
+CMCFailInfo.namedValues = namedval.NamedValues(
+ ('badAlg', 0),
+ ('badMessageCheck', 1),
+ ('badRequest', 2),
+ ('badTime', 3),
+ ('badCertId', 4),
+ ('unsupportedExt', 5),
+ ('mustArchiveKeys', 6),
+ ('badIdentity', 7),
+ ('popRequired', 8),
+ ('popFailed', 9),
+ ('noKeyReuse', 10),
+ ('internalCAError', 11),
+ ('tryLater', 12),
+ ('authDataFail', 13)
+)
+
+
+class CMCStatusInfoV2(univ.Sequence):
+ pass
+
+
+CMCStatusInfoV2.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('cMCStatus', CMCStatus()),
+ namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartReference())),
+ namedtype.OptionalNamedType('statusString', char.UTF8String()),
+ namedtype.OptionalNamedType(
+ 'otherInfo', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('failInfo', CMCFailInfo()),
+ namedtype.NamedType('pendInfo', PendInfo()),
+ namedtype.NamedType(
+ 'extendedFailInfo', univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('failInfoOID', univ.ObjectIdentifier()),
+ namedtype.NamedType('failInfoValue', AttributeValue()))
+ )
+ )
+ )
+ )
+ )
+)
+
+
+class GetCRL(univ.Sequence):
+ pass
+
+
+GetCRL.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerName', rfc5280.Name()),
+ namedtype.OptionalNamedType('cRLName', rfc5280.GeneralName()),
+ namedtype.OptionalNamedType('time', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('reasons', rfc5280.ReasonFlags())
+)
+
+id_pkix = _buildOid(1, 3, 6, 1, 5, 5, 7)
+
+id_cmc = _buildOid(id_pkix, 7)
+
+id_cmc_batchResponses = _buildOid(id_cmc, 29)
+
+id_cmc_popLinkWitness = _buildOid(id_cmc, 23)
+
+
+class PopLinkWitnessV2(univ.Sequence):
+ pass
+
+
+PopLinkWitnessV2.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyGenAlgorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('macAlgorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('witness', univ.OctetString())
+)
+
+id_cmc_popLinkWitnessV2 = _buildOid(id_cmc, 33)
+
+id_cmc_identityProofV2 = _buildOid(id_cmc, 34)
+
+id_cmc_revokeRequest = _buildOid(id_cmc, 17)
+
+id_cmc_recipientNonce = _buildOid(id_cmc, 7)
+
+
+class ControlsProcessed(univ.Sequence):
+ pass
+
+
+ControlsProcessed.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartReference()))
+)
+
+
+class CertificationRequest(univ.Sequence):
+ pass
+
+
+CertificationRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType(
+ 'certificationRequestInfo', univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer()),
+ namedtype.NamedType('subject', rfc5280.Name()),
+ namedtype.NamedType(
+ 'subjectPublicKeyInfo', univ.Sequence(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('algorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('subjectPublicKey', univ.BitString())
+ )
+ )
+ ),
+ namedtype.NamedType(
+ 'attributes', univ.SetOf(
+ componentType=rfc5652.Attribute()).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))
+ )
+ )
+ )
+ ),
+ namedtype.NamedType('signatureAlgorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString())
+)
+
+
+class TaggedCertificationRequest(univ.Sequence):
+ pass
+
+
+TaggedCertificationRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('certificationRequest', CertificationRequest())
+)
+
+
+class TaggedRequest(univ.Choice):
+ pass
+
+
+TaggedRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tcr', TaggedCertificationRequest().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('crm',
+ rfc4211.CertReqMsg().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('orm', univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('requestMessageType', univ.ObjectIdentifier()),
+ namedtype.NamedType('requestMessageValue', univ.Any())
+ ))
+ .subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+)
+
+id_cmc_popLinkRandom = _buildOid(id_cmc, 22)
+
+id_cmc_statusInfo = _buildOid(id_cmc, 1)
+
+id_cmc_trustedAnchors = _buildOid(id_cmc, 26)
+
+id_cmc_transactionId = _buildOid(id_cmc, 5)
+
+id_cmc_encryptedPOP = _buildOid(id_cmc, 9)
+
+
+class PublishTrustAnchors(univ.Sequence):
+ pass
+
+
+PublishTrustAnchors.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('seqNumber', univ.Integer()),
+ namedtype.NamedType('hashAlgorithm', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('anchorHashes', univ.SequenceOf(componentType=univ.OctetString()))
+)
+
+
+class RevokeRequest(univ.Sequence):
+ pass
+
+
+RevokeRequest.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerName', rfc5280.Name()),
+ namedtype.NamedType('serialNumber', univ.Integer()),
+ namedtype.NamedType('reason', rfc5280.CRLReason()),
+ namedtype.OptionalNamedType('invalidityDate', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('passphrase', univ.OctetString()),
+ namedtype.OptionalNamedType('comment', char.UTF8String())
+)
+
+id_cmc_senderNonce = _buildOid(id_cmc, 6)
+
+id_cmc_authData = _buildOid(id_cmc, 27)
+
+
+class TaggedContentInfo(univ.Sequence):
+ pass
+
+
+TaggedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('contentInfo', rfc5652.ContentInfo())
+)
+
+
+class IdentifyProofV2(univ.Sequence):
+ pass
+
+
+IdentifyProofV2.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('proofAlgID', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('macAlgId', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('witness', univ.OctetString())
+)
+
+
+class CMCPublicationInfo(univ.Sequence):
+ pass
+
+
+CMCPublicationInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlg', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('certHashes', univ.SequenceOf(componentType=univ.OctetString())),
+ namedtype.NamedType('pubInfo', rfc4211.PKIPublicationInfo())
+)
+
+id_kp_cmcCA = _buildOid(rfc5280.id_kp, 27)
+
+id_cmc_confirmCertAcceptance = _buildOid(id_cmc, 24)
+
+id_cmc_raIdentityWitness = _buildOid(id_cmc, 35)
+
+id_ExtensionReq = _buildOid(1, 2, 840, 113549, 1, 9, 14)
+
+id_cct = _buildOid(id_pkix, 12)
+
+id_cct_PKIData = _buildOid(id_cct, 2)
+
+id_kp_cmcRA = _buildOid(rfc5280.id_kp, 28)
+
+
+class CMCStatusInfo(univ.Sequence):
+ pass
+
+
+CMCStatusInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('cMCStatus', CMCStatus()),
+ namedtype.NamedType('bodyList', univ.SequenceOf(componentType=BodyPartID())),
+ namedtype.OptionalNamedType('statusString', char.UTF8String()),
+ namedtype.OptionalNamedType(
+ 'otherInfo', univ.Choice(
+ componentType=namedtype.NamedTypes(
+ namedtype.NamedType('failInfo', CMCFailInfo()),
+ namedtype.NamedType('pendInfo', PendInfo())
+ )
+ )
+ )
+)
+
+
+class DecryptedPOP(univ.Sequence):
+ pass
+
+
+DecryptedPOP.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('thePOPAlgID', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('thePOP', univ.OctetString())
+)
+
+id_cmc_addExtensions = _buildOid(id_cmc, 8)
+
+id_cmc_modCertTemplate = _buildOid(id_cmc, 31)
+
+
+class TaggedAttribute(univ.Sequence):
+ pass
+
+
+TaggedAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('attrType', univ.ObjectIdentifier()),
+ namedtype.NamedType('attrValues', univ.SetOf(componentType=AttributeValue()),
+ openType=opentype.OpenType('attrType', cmcControlAttributesMap)
+ )
+)
+
+
+class OtherMsg(univ.Sequence):
+ pass
+
+
+OtherMsg.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartID', BodyPartID()),
+ namedtype.NamedType('otherMsgType', univ.ObjectIdentifier()),
+ namedtype.NamedType('otherMsgValue', univ.Any())
+)
+
+
+class PKIData(univ.Sequence):
+ pass
+
+
+PKIData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('controlSequence', univ.SequenceOf(componentType=TaggedAttribute())),
+ namedtype.NamedType('reqSequence', univ.SequenceOf(componentType=TaggedRequest())),
+ namedtype.NamedType('cmsSequence', univ.SequenceOf(componentType=TaggedContentInfo())),
+ namedtype.NamedType('otherMsgSequence', univ.SequenceOf(componentType=OtherMsg()))
+)
+
+
+class BodyPartList(univ.SequenceOf):
+ pass
+
+
+BodyPartList.componentType = BodyPartID()
+BodyPartList.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_cmc_responseBody = _buildOid(id_cmc, 37)
+
+
+class AuthPublish(BodyPartID):
+ pass
+
+
+class CMCUnsignedData(univ.Sequence):
+ pass
+
+
+CMCUnsignedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bodyPartPath', BodyPartPath()),
+ namedtype.NamedType('identifier', univ.ObjectIdentifier()),
+ namedtype.NamedType('content', univ.Any())
+)
+
+
+class CMCCertId(rfc5652.IssuerAndSerialNumber):
+ pass
+
+
+class PKIResponse(univ.Sequence):
+ pass
+
+
+PKIResponse.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('controlSequence', univ.SequenceOf(componentType=TaggedAttribute())),
+ namedtype.NamedType('cmsSequence', univ.SequenceOf(componentType=TaggedContentInfo())),
+ namedtype.NamedType('otherMsgSequence', univ.SequenceOf(componentType=OtherMsg()))
+)
+
+
+class ResponseBody(PKIResponse):
+ pass
+
+
+id_cmc_statusInfoV2 = _buildOid(id_cmc, 25)
+
+id_cmc_lraPOPWitness = _buildOid(id_cmc, 11)
+
+
+class ModCertTemplate(univ.Sequence):
+ pass
+
+
+ModCertTemplate.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pkiDataReference', BodyPartPath()),
+ namedtype.NamedType('certReferences', BodyPartList()),
+ namedtype.DefaultedNamedType('replace', univ.Boolean().subtype(value=1)),
+ namedtype.NamedType('certTemplate', rfc4211.CertTemplate())
+)
+
+id_cmc_regInfo = _buildOid(id_cmc, 18)
+
+id_cmc_identityProof = _buildOid(id_cmc, 3)
+
+
+class ExtensionReq(univ.SequenceOf):
+ pass
+
+
+ExtensionReq.componentType = rfc5280.Extension()
+ExtensionReq.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_kp_cmcArchive = _buildOid(rfc5280.id_kp, 28)
+
+id_cmc_publishCert = _buildOid(id_cmc, 30)
+
+id_cmc_dataReturn = _buildOid(id_cmc, 4)
+
+
+class LraPopWitness(univ.Sequence):
+ pass
+
+
+LraPopWitness.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pkiDataBodyid', BodyPartID()),
+ namedtype.NamedType('bodyIds', univ.SequenceOf(componentType=BodyPartID()))
+)
+
+id_aa = _buildOid(1, 2, 840, 113549, 1, 9, 16, 2)
+
+id_aa_cmc_unsignedData = _buildOid(id_aa, 34)
+
+id_cmc_getCert = _buildOid(id_cmc, 15)
+
+id_cmc_batchRequests = _buildOid(id_cmc, 28)
+
+id_cmc_decryptedPOP = _buildOid(id_cmc, 10)
+
+id_cmc_responseInfo = _buildOid(id_cmc, 19)
+
+id_cmc_changeSubjectName = _buildOid(id_cmc, 36)
+
+
+class GetCert(univ.Sequence):
+ pass
+
+
+GetCert.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuerName', rfc5280.GeneralName()),
+ namedtype.NamedType('serialNumber', univ.Integer())
+)
+
+id_cmc_identification = _buildOid(id_cmc, 2)
+
+id_cmc_queryPending = _buildOid(id_cmc, 21)
+
+
+class AddExtensions(univ.Sequence):
+ pass
+
+
+AddExtensions.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pkiDataReference', BodyPartID()),
+ namedtype.NamedType('certReferences', univ.SequenceOf(componentType=BodyPartID())),
+ namedtype.NamedType('extensions', univ.SequenceOf(componentType=rfc5280.Extension()))
+)
+
+
+class EncryptedPOP(univ.Sequence):
+ pass
+
+
+EncryptedPOP.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('request', TaggedRequest()),
+ namedtype.NamedType('cms', rfc5652.ContentInfo()),
+ namedtype.NamedType('thePOPAlgID', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('witnessAlgID', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('witness', univ.OctetString())
+)
+
+id_cmc_getCRL = _buildOid(id_cmc, 16)
+
+id_cct_PKIResponse = _buildOid(id_cct, 3)
+
+id_cmc_controlProcessed = _buildOid(id_cmc, 32)
+
+
+class NoSignatureValue(univ.OctetString):
+ pass
+
+
+id_ad_cmc = _buildOid(rfc5280.id_ad, 12)
+
+id_alg_noSignature = _buildOid(id_pkix, 6, 2)
+
+
+# Map of CMC Control OIDs to CMC Control Attributes
+
+_cmcControlAttributesMapUpdate = {
+ id_cmc_statusInfo: CMCStatusInfo(),
+ id_cmc_statusInfoV2: CMCStatusInfoV2(),
+ id_cmc_identification: char.UTF8String(),
+ id_cmc_identityProof: univ.OctetString(),
+ id_cmc_identityProofV2: IdentifyProofV2(),
+ id_cmc_dataReturn: univ.OctetString(),
+ id_cmc_transactionId: univ.Integer(),
+ id_cmc_senderNonce: univ.OctetString(),
+ id_cmc_recipientNonce: univ.OctetString(),
+ id_cmc_addExtensions: AddExtensions(),
+ id_cmc_encryptedPOP: EncryptedPOP(),
+ id_cmc_decryptedPOP: DecryptedPOP(),
+ id_cmc_lraPOPWitness: LraPopWitness(),
+ id_cmc_getCert: GetCert(),
+ id_cmc_getCRL: GetCRL(),
+ id_cmc_revokeRequest: RevokeRequest(),
+ id_cmc_regInfo: univ.OctetString(),
+ id_cmc_responseInfo: univ.OctetString(),
+ id_cmc_queryPending: univ.OctetString(),
+ id_cmc_popLinkRandom: univ.OctetString(),
+ id_cmc_popLinkWitness: univ.OctetString(),
+ id_cmc_popLinkWitnessV2: PopLinkWitnessV2(),
+ id_cmc_confirmCertAcceptance: CMCCertId(),
+ id_cmc_trustedAnchors: PublishTrustAnchors(),
+ id_cmc_authData: AuthPublish(),
+ id_cmc_batchRequests: BodyPartList(),
+ id_cmc_batchResponses: BodyPartList(),
+ id_cmc_publishCert: CMCPublicationInfo(),
+ id_cmc_modCertTemplate: ModCertTemplate(),
+ id_cmc_controlProcessed: ControlsProcessed(),
+ id_ExtensionReq: ExtensionReq(),
+}
+
+cmcControlAttributesMap.update(_cmcControlAttributesMapUpdate)
+
+
+# Map of CMC Content Type OIDs to CMC Content Types are added to
+# the ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_cct_PKIData: PKIData(),
+ id_cct_PKIResponse: PKIResponse(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
+
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc6482.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6482.py
new file mode 100644
index 0000000000..d213a46f8d
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6482.py
@@ -0,0 +1,74 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# RPKI Route Origin Authorizations (ROAs)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6482.txt
+# https://www.rfc-editor.org/errata/eid5881
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+id_ct_routeOriginAuthz = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.24')
+
+
+class ASID(univ.Integer):
+ pass
+
+
+class IPAddress(univ.BitString):
+ pass
+
+
+class ROAIPAddress(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('address', IPAddress()),
+ namedtype.OptionalNamedType('maxLength', univ.Integer())
+ )
+
+
+class ROAIPAddressFamily(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('addressFamily',
+ univ.OctetString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(2, 3))),
+ namedtype.NamedType('addresses',
+ univ.SequenceOf(componentType=ROAIPAddress()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+class RouteOriginAttestation(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ univ.Integer().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(value=0)),
+ namedtype.NamedType('asID', ASID()),
+ namedtype.NamedType('ipAddrBlocks',
+ univ.SequenceOf(componentType=ROAIPAddressFamily()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)))
+ )
+
+
+# Map of Content Type OIDs to Content Types added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_routeOriginAuthz: RouteOriginAttestation(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc6486.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6486.py
new file mode 100644
index 0000000000..31c936a4f2
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6486.py
@@ -0,0 +1,68 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# RPKI Manifests
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6486.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import useful
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+id_smime = univ.ObjectIdentifier('1.2.840.113549.1.9.16')
+
+id_ct = id_smime + (1, )
+
+id_ct_rpkiManifest = id_ct + (26, )
+
+
+class FileAndHash(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('file', char.IA5String()),
+ namedtype.NamedType('hash', univ.BitString())
+ )
+
+
+class Manifest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version',
+ univ.Integer().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)).subtype(value=0)),
+ namedtype.NamedType('manifestNumber',
+ univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(0, MAX))),
+ namedtype.NamedType('thisUpdate',
+ useful.GeneralizedTime()),
+ namedtype.NamedType('nextUpdate',
+ useful.GeneralizedTime()),
+ namedtype.NamedType('fileHashAlg',
+ univ.ObjectIdentifier()),
+ namedtype.NamedType('fileList',
+ univ.SequenceOf(componentType=FileAndHash()).subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(0, MAX)))
+ )
+
+
+# Map of Content Type OIDs to Content Types added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_rpkiManifest: Manifest(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc6487.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6487.py
new file mode 100644
index 0000000000..d8c2f87423
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6487.py
@@ -0,0 +1,22 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Profile for X.509 PKIX Resource Certificates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6487.txt
+#
+
+from pyasn1.type import univ
+
+id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
+
+id_ad = id_pkix + (48, )
+
+id_ad_rpkiManifest = id_ad + (10, )
+id_ad_signedObject = id_ad + (11, )
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc6664.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6664.py
new file mode 100644
index 0000000000..41629d8d7f
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6664.py
@@ -0,0 +1,147 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with some assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# S/MIME Capabilities for Public Key Definitions
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6664.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5751
+from pyasn1_modules import rfc5480
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc3279
+
+MAX = float('inf')
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+
+# Imports from RFC 3279
+
+dhpublicnumber = rfc3279.dhpublicnumber
+
+Dss_Parms = rfc3279.Dss_Parms
+
+id_dsa = rfc3279.id_dsa
+
+id_ecPublicKey = rfc3279.id_ecPublicKey
+
+rsaEncryption = rfc3279.rsaEncryption
+
+
+# Imports from RFC 4055
+
+id_mgf1 = rfc4055.id_mgf1
+
+id_RSAES_OAEP = rfc4055.id_RSAES_OAEP
+
+id_RSASSA_PSS = rfc4055.id_RSASSA_PSS
+
+
+# Imports from RFC 5480
+
+ECParameters = rfc5480.ECParameters
+
+id_ecDH = rfc5480.id_ecDH
+
+id_ecMQV = rfc5480.id_ecMQV
+
+
+# RSA
+
+class RSAKeySize(univ.Integer):
+ # suggested values are 1024, 2048, 3072, 4096, 7680, 8192, and 15360;
+ # however, the integer value is not limited to these suggestions
+ pass
+
+
+class RSAKeyCapabilities(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('minKeySize', RSAKeySize()),
+ namedtype.OptionalNamedType('maxKeySize', RSAKeySize())
+ )
+
+
+class RsaSsa_Pss_sig_caps(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlg', AlgorithmIdentifier()),
+ namedtype.OptionalNamedType('maskAlg', AlgorithmIdentifier()),
+ namedtype.DefaultedNamedType('trailerField', univ.Integer().subtype(value=1))
+ )
+
+
+# Diffie-Hellman and DSA
+
+class DSAKeySize(univ.Integer):
+ subtypeSpec = constraint.SingleValueConstraint(1024, 2048, 3072, 7680, 15360)
+
+
+class DSAKeyCapabilities(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keySizes', univ.Sequence(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('minKeySize',
+ DSAKeySize()),
+ namedtype.OptionalNamedType('maxKeySize',
+ DSAKeySize()),
+ namedtype.OptionalNamedType('maxSizeP',
+ univ.Integer().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('maxSizeQ',
+ univ.Integer().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.OptionalNamedType('maxSizeG',
+ univ.Integer().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 3)))
+ )).subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('keyParams',
+ Dss_Parms().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatConstructed, 1)))
+ )
+
+
+# Elliptic Curve
+
+class EC_SMimeCaps(univ.SequenceOf):
+ componentType = ECParameters()
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Update the SMIMECapabilities Attribute Map in rfc5751.py
+#
+# The map can either include an entry for scap-sa-rsaSSA-PSS or
+# scap-pk-rsaSSA-PSS, but not both. One is associated with the
+# public key and the other is associated with the signature
+# algorithm; however, they use the same OID. If you need the
+# other one in your application, copy the map into a local dict,
+# adjust as needed, and pass the local dict to the decoder with
+# openTypes=your_local_map.
+
+_smimeCapabilityMapUpdate = {
+ rsaEncryption: RSAKeyCapabilities(),
+ id_RSASSA_PSS: RSAKeyCapabilities(),
+ # id_RSASSA_PSS: RsaSsa_Pss_sig_caps(),
+ id_RSAES_OAEP: RSAKeyCapabilities(),
+ id_dsa: DSAKeyCapabilities(),
+ dhpublicnumber: DSAKeyCapabilities(),
+ id_ecPublicKey: EC_SMimeCaps(),
+ id_ecDH: EC_SMimeCaps(),
+ id_ecMQV: EC_SMimeCaps(),
+ id_mgf1: AlgorithmIdentifier(),
+}
+
+rfc5751.smimeCapabilityMap.update(_smimeCapabilityMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc6955.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6955.py
new file mode 100644
index 0000000000..09f2d6562e
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6955.py
@@ -0,0 +1,108 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Diffie-Hellman Proof-of-Possession Algorithms
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6955.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc3279
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+
+# Imports from RFC 5652
+
+MessageDigest = rfc5652.MessageDigest
+
+IssuerAndSerialNumber = rfc5652.IssuerAndSerialNumber
+
+
+# Imports from RFC 5280
+
+id_pkix = rfc5280.id_pkix
+
+
+# Imports from RFC 3279
+
+Dss_Sig_Value = rfc3279.Dss_Sig_Value
+
+DomainParameters = rfc3279.DomainParameters
+
+
+# Static DH Proof-of-Possession
+
+class DhSigStatic(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('issuerAndSerial', IssuerAndSerialNumber()),
+ namedtype.NamedType('hashValue', MessageDigest())
+ )
+
+
+# Object Identifiers
+
+id_dh_sig_hmac_sha1 = id_pkix + (6, 3, )
+
+id_dhPop_static_sha1_hmac_sha1 = univ.ObjectIdentifier(id_dh_sig_hmac_sha1)
+
+
+id_alg_dh_pop = id_pkix + (6, 4, )
+
+id_alg_dhPop_sha1 = univ.ObjectIdentifier(id_alg_dh_pop)
+
+id_alg_dhPop_sha224 = id_pkix + (6, 5, )
+
+id_alg_dhPop_sha256 = id_pkix + (6, 6, )
+
+id_alg_dhPop_sha384 = id_pkix + (6, 7, )
+
+id_alg_dhPop_sha512 = id_pkix + (6, 8, )
+
+
+id_alg_dhPop_static_sha224_hmac_sha224 = id_pkix + (6, 15, )
+
+id_alg_dhPop_static_sha256_hmac_sha256 = id_pkix + (6, 16, )
+
+id_alg_dhPop_static_sha384_hmac_sha384 = id_pkix + (6, 17, )
+
+id_alg_dhPop_static_sha512_hmac_sha512 = id_pkix + (6, 18, )
+
+
+id_alg_ecdhPop_static_sha224_hmac_sha224 = id_pkix + (6, 25, )
+
+id_alg_ecdhPop_static_sha256_hmac_sha256 = id_pkix + (6, 26, )
+
+id_alg_ecdhPop_static_sha384_hmac_sha384 = id_pkix + (6, 27, )
+
+id_alg_ecdhPop_static_sha512_hmac_sha512 = id_pkix + (6, 28, )
+
+
+# Update the Algorithm Identifier map in rfc5280.py
+
+_algorithmIdentifierMapUpdate = {
+ id_alg_dh_pop: DomainParameters(),
+ id_alg_dhPop_sha224: DomainParameters(),
+ id_alg_dhPop_sha256: DomainParameters(),
+ id_alg_dhPop_sha384: DomainParameters(),
+ id_alg_dhPop_sha512: DomainParameters(),
+ id_dh_sig_hmac_sha1: univ.Null(""),
+ id_alg_dhPop_static_sha224_hmac_sha224: univ.Null(""),
+ id_alg_dhPop_static_sha256_hmac_sha256: univ.Null(""),
+ id_alg_dhPop_static_sha384_hmac_sha384: univ.Null(""),
+ id_alg_dhPop_static_sha512_hmac_sha512: univ.Null(""),
+ id_alg_ecdhPop_static_sha224_hmac_sha224: univ.Null(""),
+ id_alg_ecdhPop_static_sha256_hmac_sha256: univ.Null(""),
+ id_alg_ecdhPop_static_sha384_hmac_sha384: univ.Null(""),
+ id_alg_ecdhPop_static_sha512_hmac_sha512: univ.Null(""),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc6960.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6960.py
new file mode 100644
index 0000000000..e5f1305649
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc6960.py
@@ -0,0 +1,223 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Online Certificate Status Protocol (OCSP)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc6960.txt
+#
+
+from pyasn1.type import univ, char, namedtype, namedval, tag, constraint, useful
+
+from pyasn1_modules import rfc2560
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# Imports from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+AuthorityInfoAccessSyntax = rfc5280.AuthorityInfoAccessSyntax
+Certificate = rfc5280.Certificate
+CertificateSerialNumber = rfc5280.CertificateSerialNumber
+CRLReason = rfc5280.CRLReason
+Extensions = rfc5280.Extensions
+GeneralName = rfc5280.GeneralName
+Name = rfc5280.Name
+
+id_kp = rfc5280.id_kp
+
+id_ad_ocsp = rfc5280.id_ad_ocsp
+
+
+# Imports from the original OCSP module in RFC 2560
+
+AcceptableResponses = rfc2560.AcceptableResponses
+ArchiveCutoff = rfc2560.ArchiveCutoff
+CertStatus = rfc2560.CertStatus
+KeyHash = rfc2560.KeyHash
+OCSPResponse = rfc2560.OCSPResponse
+OCSPResponseStatus = rfc2560.OCSPResponseStatus
+ResponseBytes = rfc2560.ResponseBytes
+RevokedInfo = rfc2560.RevokedInfo
+UnknownInfo = rfc2560.UnknownInfo
+Version = rfc2560.Version
+
+id_kp_OCSPSigning = rfc2560.id_kp_OCSPSigning
+
+id_pkix_ocsp = rfc2560.id_pkix_ocsp
+id_pkix_ocsp_archive_cutoff = rfc2560.id_pkix_ocsp_archive_cutoff
+id_pkix_ocsp_basic = rfc2560.id_pkix_ocsp_basic
+id_pkix_ocsp_crl = rfc2560.id_pkix_ocsp_crl
+id_pkix_ocsp_nocheck = rfc2560.id_pkix_ocsp_nocheck
+id_pkix_ocsp_nonce = rfc2560.id_pkix_ocsp_nonce
+id_pkix_ocsp_response = rfc2560.id_pkix_ocsp_response
+id_pkix_ocsp_service_locator = rfc2560.id_pkix_ocsp_service_locator
+
+
+# Additional object identifiers
+
+id_pkix_ocsp_pref_sig_algs = id_pkix_ocsp + (8, )
+id_pkix_ocsp_extended_revoke = id_pkix_ocsp + (9, )
+
+
+# Updated structures (mostly to improve openTypes support)
+
+class CertID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('issuerNameHash', univ.OctetString()),
+ namedtype.NamedType('issuerKeyHash', univ.OctetString()),
+ namedtype.NamedType('serialNumber', CertificateSerialNumber())
+ )
+
+
+class SingleResponse(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certID', CertID()),
+ namedtype.NamedType('certStatus', CertStatus()),
+ namedtype.NamedType('thisUpdate', useful.GeneralizedTime()),
+ namedtype.OptionalNamedType('nextUpdate', useful.GeneralizedTime().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('singleExtensions', Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class ResponderID(univ.Choice):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('byName', Name().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('byKey', KeyHash().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class ResponseData(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', Version('v1').subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('responderID', ResponderID()),
+ namedtype.NamedType('producedAt', useful.GeneralizedTime()),
+ namedtype.NamedType('responses', univ.SequenceOf(
+ componentType=SingleResponse())),
+ namedtype.OptionalNamedType('responseExtensions', Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+ )
+
+
+class BasicOCSPResponse(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsResponseData', ResponseData()),
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString()),
+ namedtype.OptionalNamedType('certs', univ.SequenceOf(
+ componentType=Certificate()).subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class Request(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('reqCert', CertID()),
+ namedtype.OptionalNamedType('singleRequestExtensions', Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class Signature(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
+ namedtype.NamedType('signature', univ.BitString()),
+ namedtype.OptionalNamedType('certs', univ.SequenceOf(
+ componentType=Certificate()).subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+class TBSRequest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', Version('v1').subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('requestorName', GeneralName().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('requestList', univ.SequenceOf(
+ componentType=Request())),
+ namedtype.OptionalNamedType('requestExtensions', Extensions().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class OCSPRequest(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tbsRequest', TBSRequest()),
+ namedtype.OptionalNamedType('optionalSignature', Signature().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)))
+ )
+
+
+# Previously omitted structure
+
+class ServiceLocator(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('issuer', Name()),
+ namedtype.NamedType('locator', AuthorityInfoAccessSyntax())
+ )
+
+
+# Additional structures
+
+class CrlID(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('crlUrl', char.IA5String().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('crlNum', univ.Integer().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('crlTime', useful.GeneralizedTime().subtype(
+ explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+ )
+
+
+class PreferredSignatureAlgorithm(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('sigIdentifier', AlgorithmIdentifier()),
+ namedtype.OptionalNamedType('certIdentifier', AlgorithmIdentifier())
+ )
+
+
+class PreferredSignatureAlgorithms(univ.SequenceOf):
+ componentType = PreferredSignatureAlgorithm()
+
+
+
+# Response Type OID to Response Map
+
+ocspResponseMap = {
+ id_pkix_ocsp_basic: BasicOCSPResponse(),
+}
+
+
+# Map of Extension OIDs to Extensions added to the ones
+# that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ # Certificate Extension
+ id_pkix_ocsp_nocheck: univ.Null(""),
+ # OCSP Request Extensions
+ id_pkix_ocsp_nonce: univ.OctetString(),
+ id_pkix_ocsp_response: AcceptableResponses(),
+ id_pkix_ocsp_service_locator: ServiceLocator(),
+ id_pkix_ocsp_pref_sig_algs: PreferredSignatureAlgorithms(),
+ # OCSP Response Extensions
+ id_pkix_ocsp_crl: CrlID(),
+ id_pkix_ocsp_archive_cutoff: ArchiveCutoff(),
+ id_pkix_ocsp_extended_revoke: univ.Null(""),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc7030.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7030.py
new file mode 100644
index 0000000000..84b6dc5f9a
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7030.py
@@ -0,0 +1,66 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Enrollment over Secure Transport (EST)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7030.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+
+# Imports from RFC 5652
+
+Attribute = rfc5652.Attribute
+
+
+# Asymmetric Decrypt Key Identifier Attribute
+
+id_aa_asymmDecryptKeyID = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.54')
+
+class AsymmetricDecryptKeyIdentifier(univ.OctetString):
+ pass
+
+
+aa_asymmDecryptKeyID = Attribute()
+aa_asymmDecryptKeyID['attrType'] = id_aa_asymmDecryptKeyID
+aa_asymmDecryptKeyID['attrValues'][0] = AsymmetricDecryptKeyIdentifier()
+
+
+# CSR Attributes
+
+class AttrOrOID(univ.Choice):
+ pass
+
+AttrOrOID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('oid', univ.ObjectIdentifier()),
+ namedtype.NamedType('attribute', Attribute())
+)
+
+
+class CsrAttrs(univ.SequenceOf):
+ pass
+
+CsrAttrs.componentType = AttrOrOID()
+CsrAttrs.subtypeSpec=constraint.ValueSizeConstraint(0, MAX)
+
+
+# Update CMS Attribute Map
+
+_cmsAttributesMapUpdate = {
+ id_aa_asymmDecryptKeyID: AsymmetricDecryptKeyIdentifier(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc7191.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7191.py
new file mode 100644
index 0000000000..7c2be11562
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7191.py
@@ -0,0 +1,261 @@
+# This file is being contributed to of pyasn1-modules software.
+#
+# Created by Russ Housley without assistance from the asn1ate tool.
+# Modified by Russ Housley to add support for opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# CMS Key Package Receipt and Error Content Types
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7191.txt
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+MAX = float('inf')
+
+DistinguishedName = rfc5280.DistinguishedName
+
+
+# SingleAttribute is the same as Attribute in RFC 5652, except that the
+# attrValues SET must have one and only one member
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class AttributeValues(univ.SetOf):
+ pass
+
+AttributeValues.componentType = AttributeValue()
+AttributeValues.sizeSpec = univ.Set.sizeSpec + constraint.ValueSizeConstraint(1, 1)
+
+
+class SingleAttribute(univ.Sequence):
+ pass
+
+SingleAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', univ.ObjectIdentifier()),
+ namedtype.NamedType('attrValues', AttributeValues(),
+ openType=opentype.OpenType('attrType', rfc5652.cmsAttributesMap)
+ )
+)
+
+
+# SIR Entity Name
+
+class SIREntityNameType(univ.ObjectIdentifier):
+ pass
+
+
+class SIREntityNameValue(univ.Any):
+ pass
+
+
+class SIREntityName(univ.Sequence):
+ pass
+
+SIREntityName.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('sirenType', SIREntityNameType()),
+ namedtype.NamedType('sirenValue', univ.OctetString())
+ # CONTAINING the DER-encoded SIREntityNameValue
+)
+
+
+class SIREntityNames(univ.SequenceOf):
+ pass
+
+SIREntityNames.componentType = SIREntityName()
+SIREntityNames.sizeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+id_dn = univ.ObjectIdentifier('2.16.840.1.101.2.1.16.0')
+
+
+class siren_dn(SIREntityName):
+ def __init__(self):
+ SIREntityName.__init__(self)
+ self['sirenType'] = id_dn
+
+
+# Key Package Error CMS Content Type
+
+class EnumeratedErrorCode(univ.Enumerated):
+ pass
+
+# Error codes with values <= 33 are aligned with RFC 5934
+EnumeratedErrorCode.namedValues = namedval.NamedValues(
+ ('decodeFailure', 1),
+ ('badContentInfo', 2),
+ ('badSignedData', 3),
+ ('badEncapContent', 4),
+ ('badCertificate', 5),
+ ('badSignerInfo', 6),
+ ('badSignedAttrs', 7),
+ ('badUnsignedAttrs', 8),
+ ('missingContent', 9),
+ ('noTrustAnchor', 10),
+ ('notAuthorized', 11),
+ ('badDigestAlgorithm', 12),
+ ('badSignatureAlgorithm', 13),
+ ('unsupportedKeySize', 14),
+ ('unsupportedParameters', 15),
+ ('signatureFailure', 16),
+ ('insufficientMemory', 17),
+ ('incorrectTarget', 23),
+ ('missingSignature', 29),
+ ('resourcesBusy', 30),
+ ('versionNumberMismatch', 31),
+ ('revokedCertificate', 33),
+ ('ambiguousDecrypt', 60),
+ ('noDecryptKey', 61),
+ ('badEncryptedData', 62),
+ ('badEnvelopedData', 63),
+ ('badAuthenticatedData', 64),
+ ('badAuthEnvelopedData', 65),
+ ('badKeyAgreeRecipientInfo', 66),
+ ('badKEKRecipientInfo', 67),
+ ('badEncryptContent', 68),
+ ('badEncryptAlgorithm', 69),
+ ('missingCiphertext', 70),
+ ('decryptFailure', 71),
+ ('badMACAlgorithm', 72),
+ ('badAuthAttrs', 73),
+ ('badUnauthAttrs', 74),
+ ('invalidMAC', 75),
+ ('mismatchedDigestAlg', 76),
+ ('missingCertificate', 77),
+ ('tooManySigners', 78),
+ ('missingSignedAttributes', 79),
+ ('derEncodingNotUsed', 80),
+ ('missingContentHints', 81),
+ ('invalidAttributeLocation', 82),
+ ('badMessageDigest', 83),
+ ('badKeyPackage', 84),
+ ('badAttributes', 85),
+ ('attributeComparisonFailure', 86),
+ ('unsupportedSymmetricKeyPackage', 87),
+ ('unsupportedAsymmetricKeyPackage', 88),
+ ('constraintViolation', 89),
+ ('ambiguousDefaultValue', 90),
+ ('noMatchingRecipientInfo', 91),
+ ('unsupportedKeyWrapAlgorithm', 92),
+ ('badKeyTransRecipientInfo', 93),
+ ('other', 127)
+)
+
+
+class ErrorCodeChoice(univ.Choice):
+ pass
+
+ErrorCodeChoice.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('enum', EnumeratedErrorCode()),
+ namedtype.NamedType('oid', univ.ObjectIdentifier())
+)
+
+
+class KeyPkgID(univ.OctetString):
+ pass
+
+
+class KeyPkgIdentifier(univ.Choice):
+ pass
+
+KeyPkgIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pkgID', KeyPkgID()),
+ namedtype.NamedType('attribute', SingleAttribute())
+)
+
+
+class KeyPkgVersion(univ.Integer):
+ pass
+
+
+KeyPkgVersion.namedValues = namedval.NamedValues(
+ ('v1', 1),
+ ('v2', 2)
+)
+
+KeyPkgVersion.subtypeSpec = constraint.ValueRangeConstraint(1, 65535)
+
+
+id_ct_KP_keyPackageError = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.78.6')
+
+class KeyPackageError(univ.Sequence):
+ pass
+
+KeyPackageError.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', KeyPkgVersion().subtype(value='v2')),
+ namedtype.OptionalNamedType('errorOf', KeyPkgIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
+ namedtype.NamedType('errorBy', SIREntityName()),
+ namedtype.NamedType('errorCode', ErrorCodeChoice())
+)
+
+
+# Key Package Receipt CMS Content Type
+
+id_ct_KP_keyPackageReceipt = univ.ObjectIdentifier('2.16.840.1.101.2.1.2.78.3')
+
+class KeyPackageReceipt(univ.Sequence):
+ pass
+
+KeyPackageReceipt.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('version', KeyPkgVersion().subtype(value='v2')),
+ namedtype.NamedType('receiptOf', KeyPkgIdentifier()),
+ namedtype.NamedType('receivedBy', SIREntityName())
+)
+
+
+# Key Package Receipt Request Attribute
+
+class KeyPkgReceiptReq(univ.Sequence):
+ pass
+
+KeyPkgReceiptReq.componentType = namedtype.NamedTypes(
+ namedtype.DefaultedNamedType('encryptReceipt', univ.Boolean().subtype(value=0)),
+ namedtype.OptionalNamedType('receiptsFrom', SIREntityNames().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('receiptsTo', SIREntityNames())
+)
+
+
+id_aa_KP_keyPkgIdAndReceiptReq = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.65')
+
+class KeyPkgIdentifierAndReceiptReq(univ.Sequence):
+ pass
+
+KeyPkgIdentifierAndReceiptReq.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('pkgID', KeyPkgID()),
+ namedtype.OptionalNamedType('receiptReq', KeyPkgReceiptReq())
+)
+
+
+# Map of Attribute Type OIDs to Attributes are added to
+# the ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_KP_keyPkgIdAndReceiptReq: KeyPkgIdentifierAndReceiptReq(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
+
+# Map of CMC Content Type OIDs to CMC Content Types are added to
+# the ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_KP_keyPackageError: KeyPackageError(),
+ id_ct_KP_keyPackageReceipt: KeyPackageReceipt(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc7229.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7229.py
new file mode 100644
index 0000000000..e9bce2d5b6
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7229.py
@@ -0,0 +1,29 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Object Identifiers for Test Certificate Policies
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7229.txt
+#
+
+from pyasn1.type import univ
+
+
+id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
+
+id_TEST = id_pkix + (13, )
+
+id_TEST_certPolicyOne = id_TEST + (1, )
+id_TEST_certPolicyTwo = id_TEST + (2, )
+id_TEST_certPolicyThree = id_TEST + (3, )
+id_TEST_certPolicyFour = id_TEST + (4, )
+id_TEST_certPolicyFive = id_TEST + (5, )
+id_TEST_certPolicySix = id_TEST + (6, )
+id_TEST_certPolicySeven = id_TEST + (7, )
+id_TEST_certPolicyEight = id_TEST + (8, )
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc7292.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7292.py
new file mode 100644
index 0000000000..1c9f319a5d
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7292.py
@@ -0,0 +1,357 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from the asn1ate tool.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS #12: Personal Information Exchange Syntax v1.1
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7292.txt
+# https://www.rfc-editor.org/errata_search.php?rfc=7292
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import opentype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc2315
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5958
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+# Initialize the maps used in PKCS#12
+
+pkcs12BagTypeMap = { }
+
+pkcs12CertBagMap = { }
+
+pkcs12CRLBagMap = { }
+
+pkcs12SecretBagMap = { }
+
+
+# Imports from RFC 2315, RFC 5652, and RFC 5958
+
+DigestInfo = rfc2315.DigestInfo
+
+
+ContentInfo = rfc5652.ContentInfo
+
+PKCS12Attribute = rfc5652.Attribute
+
+
+EncryptedPrivateKeyInfo = rfc5958.EncryptedPrivateKeyInfo
+
+PrivateKeyInfo = rfc5958.PrivateKeyInfo
+
+
+# CMSSingleAttribute is the same as Attribute in RFC 5652 except the attrValues
+# SET must have one and only one member
+
+class AttributeType(univ.ObjectIdentifier):
+ pass
+
+
+class AttributeValue(univ.Any):
+ pass
+
+
+class AttributeValues(univ.SetOf):
+ pass
+
+AttributeValues.componentType = AttributeValue()
+
+
+class CMSSingleAttribute(univ.Sequence):
+ pass
+
+CMSSingleAttribute.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('attrType', AttributeType()),
+ namedtype.NamedType('attrValues',
+ AttributeValues().subtype(sizeSpec=constraint.ValueSizeConstraint(1, 1)),
+ openType=opentype.OpenType('attrType', rfc5652.cmsAttributesMap)
+ )
+)
+
+
+# Object identifier arcs
+
+rsadsi = _OID(1, 2, 840, 113549)
+
+pkcs = _OID(rsadsi, 1)
+
+pkcs_9 = _OID(pkcs, 9)
+
+certTypes = _OID(pkcs_9, 22)
+
+crlTypes = _OID(pkcs_9, 23)
+
+pkcs_12 = _OID(pkcs, 12)
+
+
+# PBE Algorithm Identifiers and Parameters Structure
+
+pkcs_12PbeIds = _OID(pkcs_12, 1)
+
+pbeWithSHAAnd128BitRC4 = _OID(pkcs_12PbeIds, 1)
+
+pbeWithSHAAnd40BitRC4 = _OID(pkcs_12PbeIds, 2)
+
+pbeWithSHAAnd3_KeyTripleDES_CBC = _OID(pkcs_12PbeIds, 3)
+
+pbeWithSHAAnd2_KeyTripleDES_CBC = _OID(pkcs_12PbeIds, 4)
+
+pbeWithSHAAnd128BitRC2_CBC = _OID(pkcs_12PbeIds, 5)
+
+pbeWithSHAAnd40BitRC2_CBC = _OID(pkcs_12PbeIds, 6)
+
+
+class Pkcs_12PbeParams(univ.Sequence):
+ pass
+
+Pkcs_12PbeParams.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('salt', univ.OctetString()),
+ namedtype.NamedType('iterations', univ.Integer())
+)
+
+
+# Bag types
+
+bagtypes = _OID(pkcs_12, 10, 1)
+
+class BAG_TYPE(univ.Sequence):
+ pass
+
+BAG_TYPE.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('id', univ.ObjectIdentifier()),
+ namedtype.NamedType('unnamed1', univ.Any(),
+ openType=opentype.OpenType('attrType', pkcs12BagTypeMap)
+ )
+)
+
+
+id_keyBag = _OID(bagtypes, 1)
+
+class KeyBag(PrivateKeyInfo):
+ pass
+
+
+id_pkcs8ShroudedKeyBag = _OID(bagtypes, 2)
+
+class PKCS8ShroudedKeyBag(EncryptedPrivateKeyInfo):
+ pass
+
+
+id_certBag = _OID(bagtypes, 3)
+
+class CertBag(univ.Sequence):
+ pass
+
+CertBag.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('certId', univ.ObjectIdentifier()),
+ namedtype.NamedType('certValue',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)),
+ openType=opentype.OpenType('certId', pkcs12CertBagMap)
+ )
+)
+
+
+x509Certificate = CertBag()
+x509Certificate['certId'] = _OID(certTypes, 1)
+x509Certificate['certValue'] = univ.OctetString()
+# DER-encoded X.509 certificate stored in OCTET STRING
+
+
+sdsiCertificate = CertBag()
+sdsiCertificate['certId'] = _OID(certTypes, 2)
+sdsiCertificate['certValue'] = char.IA5String()
+# Base64-encoded SDSI certificate stored in IA5String
+
+
+id_CRLBag = _OID(bagtypes, 4)
+
+class CRLBag(univ.Sequence):
+ pass
+
+CRLBag.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('crlId', univ.ObjectIdentifier()),
+ namedtype.NamedType('crlValue',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)),
+ openType=opentype.OpenType('crlId', pkcs12CRLBagMap)
+ )
+)
+
+
+x509CRL = CRLBag()
+x509CRL['crlId'] = _OID(crlTypes, 1)
+x509CRL['crlValue'] = univ.OctetString()
+# DER-encoded X.509 CRL stored in OCTET STRING
+
+
+id_secretBag = _OID(bagtypes, 5)
+
+class SecretBag(univ.Sequence):
+ pass
+
+SecretBag.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('secretTypeId', univ.ObjectIdentifier()),
+ namedtype.NamedType('secretValue',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)),
+ openType=opentype.OpenType('secretTypeId', pkcs12SecretBagMap)
+ )
+)
+
+
+id_safeContentsBag = _OID(bagtypes, 6)
+
+class SafeBag(univ.Sequence):
+ pass
+
+SafeBag.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bagId', univ.ObjectIdentifier()),
+ namedtype.NamedType('bagValue',
+ univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0)),
+ openType=opentype.OpenType('bagId', pkcs12BagTypeMap)
+ ),
+ namedtype.OptionalNamedType('bagAttributes',
+ univ.SetOf(componentType=PKCS12Attribute())
+ )
+)
+
+
+class SafeContents(univ.SequenceOf):
+ pass
+
+SafeContents.componentType = SafeBag()
+
+
+# The PFX PDU
+
+class AuthenticatedSafe(univ.SequenceOf):
+ pass
+
+AuthenticatedSafe.componentType = ContentInfo()
+# Data if unencrypted
+# EncryptedData if password-encrypted
+# EnvelopedData if public key-encrypted
+
+
+class MacData(univ.Sequence):
+ pass
+
+MacData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('mac', DigestInfo()),
+ namedtype.NamedType('macSalt', univ.OctetString()),
+ namedtype.DefaultedNamedType('iterations', univ.Integer().subtype(value=1))
+ # Note: The default is for historical reasons and its use is deprecated
+)
+
+
+class PFX(univ.Sequence):
+ pass
+
+PFX.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version',
+ univ.Integer(namedValues=namedval.NamedValues(('v3', 3)))
+ ),
+ namedtype.NamedType('authSafe', ContentInfo()),
+ namedtype.OptionalNamedType('macData', MacData())
+)
+
+
+# Local key identifier (also defined as certificateAttribute in rfc2985.py)
+
+pkcs_9_at_localKeyId = _OID(pkcs_9, 21)
+
+localKeyId = CMSSingleAttribute()
+localKeyId['attrType'] = pkcs_9_at_localKeyId
+localKeyId['attrValues'][0] = univ.OctetString()
+
+
+# Friendly name (also defined as certificateAttribute in rfc2985.py)
+
+pkcs_9_ub_pkcs9String = univ.Integer(255)
+
+pkcs_9_ub_friendlyName = univ.Integer(pkcs_9_ub_pkcs9String)
+
+pkcs_9_at_friendlyName = _OID(pkcs_9, 20)
+
+class FriendlyName(char.BMPString):
+ pass
+
+FriendlyName.subtypeSpec = constraint.ValueSizeConstraint(1, pkcs_9_ub_friendlyName)
+
+
+friendlyName = CMSSingleAttribute()
+friendlyName['attrType'] = pkcs_9_at_friendlyName
+friendlyName['attrValues'][0] = FriendlyName()
+
+
+# Update the PKCS#12 maps
+
+_pkcs12BagTypeMap = {
+ id_keyBag: KeyBag(),
+ id_pkcs8ShroudedKeyBag: PKCS8ShroudedKeyBag(),
+ id_certBag: CertBag(),
+ id_CRLBag: CRLBag(),
+ id_secretBag: SecretBag(),
+ id_safeContentsBag: SafeBag(),
+}
+
+pkcs12BagTypeMap.update(_pkcs12BagTypeMap)
+
+
+_pkcs12CertBagMap = {
+ _OID(certTypes, 1): univ.OctetString(),
+ _OID(certTypes, 2): char.IA5String(),
+}
+
+pkcs12CertBagMap.update(_pkcs12CertBagMap)
+
+
+_pkcs12CRLBagMap = {
+ _OID(crlTypes, 1): univ.OctetString(),
+}
+
+pkcs12CRLBagMap.update(_pkcs12CRLBagMap)
+
+
+# Update the Algorithm Identifier map
+
+_algorithmIdentifierMapUpdate = {
+ pbeWithSHAAnd128BitRC4: Pkcs_12PbeParams(),
+ pbeWithSHAAnd40BitRC4: Pkcs_12PbeParams(),
+ pbeWithSHAAnd3_KeyTripleDES_CBC: Pkcs_12PbeParams(),
+ pbeWithSHAAnd2_KeyTripleDES_CBC: Pkcs_12PbeParams(),
+ pbeWithSHAAnd128BitRC2_CBC: Pkcs_12PbeParams(),
+ pbeWithSHAAnd40BitRC2_CBC: Pkcs_12PbeParams(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
+
+
+# Update the CMS Attribute map
+
+_cmsAttributesMapUpdate = {
+ pkcs_9_at_friendlyName: FriendlyName(),
+ pkcs_9_at_localKeyId: univ.OctetString(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc7296.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7296.py
new file mode 100644
index 0000000000..95a191a14d
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7296.py
@@ -0,0 +1,32 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# IKEv2 Certificate Bundle
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7296.txt
+
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+class CertificateOrCRL(univ.Choice):
+ pass
+
+CertificateOrCRL.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('cert', rfc5280.Certificate().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('crl', rfc5280.CertificateList().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class CertificateBundle(univ.SequenceOf):
+ pass
+
+CertificateBundle.componentType = CertificateOrCRL()
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc7508.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7508.py
new file mode 100644
index 0000000000..66460240f1
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7508.py
@@ -0,0 +1,90 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Securing Header Fields with S/MIME
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7508.txt
+# https://www.rfc-editor.org/errata/eid5875
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+import string
+
+MAX = float('inf')
+
+
+class Algorithm(univ.Enumerated):
+ namedValues = namedval.NamedValues(
+ ('canonAlgorithmSimple', 0),
+ ('canonAlgorithmRelaxed', 1)
+ )
+
+
+class HeaderFieldStatus(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('duplicated', 0),
+ ('deleted', 1),
+ ('modified', 2)
+ )
+
+
+class HeaderFieldName(char.VisibleString):
+ subtypeSpec = (
+ constraint.PermittedAlphabetConstraint(*string.printable) -
+ constraint.PermittedAlphabetConstraint(':')
+ )
+
+
+class HeaderFieldValue(char.UTF8String):
+ pass
+
+
+class HeaderField(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('field-Name', HeaderFieldName()),
+ namedtype.NamedType('field-Value', HeaderFieldValue()),
+ namedtype.DefaultedNamedType('field-Status',
+ HeaderFieldStatus().subtype(value='duplicated'))
+ )
+
+
+class HeaderFields(univ.SequenceOf):
+ componentType = HeaderField()
+ subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class SecureHeaderFields(univ.Set):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('canonAlgorithm', Algorithm()),
+ namedtype.NamedType('secHeaderFields', HeaderFields())
+ )
+
+
+id_aa = univ.ObjectIdentifier((1, 2, 840, 113549, 1, 9, 16, 2, ))
+
+id_aa_secureHeaderFieldsIdentifier = id_aa + (55, )
+
+
+
+# Map of Attribute Type OIDs to Attributes added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_secureHeaderFieldsIdentifier: SecureHeaderFields(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
+
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc7585.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7585.py
new file mode 100644
index 0000000000..b3fd4a5bac
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7585.py
@@ -0,0 +1,50 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with some assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Network Access Identifier (NAI) Realm Name for Certificates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7585.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# NAI Realm Name for Certificates
+
+id_pkix = univ.ObjectIdentifier('1.3.6.1.5.5.7')
+
+id_on = id_pkix + (8, )
+
+id_on_naiRealm = id_on + (8, )
+
+
+ub_naiRealm_length = univ.Integer(255)
+
+
+class NAIRealm(char.UTF8String):
+ subtypeSpec = constraint.ValueSizeConstraint(1, ub_naiRealm_length)
+
+
+naiRealm = rfc5280.AnotherName()
+naiRealm['type-id'] = id_on_naiRealm
+naiRealm['value'] = NAIRealm()
+
+
+# Map of Other Name OIDs to Other Name is added to the
+# ones that are in rfc5280.py
+
+_anotherNameMapUpdate = {
+ id_on_naiRealm: NAIRealm(),
+}
+
+rfc5280.anotherNameMap.update(_anotherNameMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc7633.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7633.py
new file mode 100644
index 0000000000..f518440ff4
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7633.py
@@ -0,0 +1,38 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with some assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Transport Layer Security (TLS) Feature Certificate Extension
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7633.txt
+#
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# TLS Features Extension
+
+id_pe = univ.ObjectIdentifier('1.3.6.1.5.5.7.1')
+
+id_pe_tlsfeature = id_pe + (24, )
+
+
+class Features(univ.SequenceOf):
+ componentType = univ.Integer()
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_tlsfeature: Features(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc7773.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7773.py
new file mode 100644
index 0000000000..0fee2aa346
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7773.py
@@ -0,0 +1,52 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with some assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Authentication Context Certificate Extension
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7773.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# Authentication Context Extension
+
+e_legnamnden = univ.ObjectIdentifier('1.2.752.201')
+
+id_eleg_ce = e_legnamnden + (5, )
+
+id_ce_authContext = id_eleg_ce + (1, )
+
+
+class AuthenticationContext(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('contextType', char.UTF8String()),
+ namedtype.OptionalNamedType('contextInfo', char.UTF8String())
+ )
+
+class AuthenticationContexts(univ.SequenceOf):
+ componentType = AuthenticationContext()
+ subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_ce_authContext: AuthenticationContexts(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc7894-1.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7894-1.py
new file mode 100644
index 0000000000..92638d1bc0
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7894-1.py
@@ -0,0 +1,92 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Alternative Challenge Password Attributes for EST
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7894.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6402
+from pyasn1_modules import rfc7191
+
+
+# SingleAttribute is the same as Attribute in RFC 5652, except that the
+# attrValues SET must have one and only one member
+
+Attribute = rfc7191.SingleAttribute
+
+
+# DirectoryString is the same as RFC 5280, except the length is limited to 255
+
+class DirectoryString(univ.Choice):
+ pass
+
+DirectoryString.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255)))
+)
+
+
+# OTP Challenge Attribute
+
+id_aa_otpChallenge = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.56')
+
+ub_aa_otpChallenge = univ.Integer(255)
+
+otpChallenge = rfc5652.Attribute()
+otpChallenge['attrType'] = id_aa_otpChallenge
+otpChallenge['attrValues'][0] = DirectoryString()
+
+
+# Revocation Challenge Attribute
+
+id_aa_revocationChallenge = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.57')
+
+ub_aa_revocationChallenge = univ.Integer(255)
+
+revocationChallenge = rfc5652.Attribute()
+revocationChallenge['attrType'] = id_aa_revocationChallenge
+revocationChallenge['attrValues'][0] = DirectoryString()
+
+
+# EST Identity Linking Attribute
+
+id_aa_estIdentityLinking = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.58')
+
+ub_aa_est_identity_linking = univ.Integer(255)
+
+estIdentityLinking = rfc5652.Attribute()
+estIdentityLinking['attrType'] = id_aa_estIdentityLinking
+estIdentityLinking['attrValues'][0] = DirectoryString()
+
+
+# Map of Attribute Type OIDs to Attributes added to the
+# ones that are in rfc6402.py
+
+_cmcControlAttributesMapUpdate = {
+ id_aa_otpChallenge: DirectoryString(),
+ id_aa_revocationChallenge: DirectoryString(),
+ id_aa_estIdentityLinking: DirectoryString(),
+}
+
+rfc6402.cmcControlAttributesMap.update(_cmcControlAttributesMapUpdate) \ No newline at end of file
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc7894.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7894.py
new file mode 100644
index 0000000000..41936433d1
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7894.py
@@ -0,0 +1,92 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Alternative Challenge Password Attributes for EST
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7894.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6402
+from pyasn1_modules import rfc7191
+
+
+# SingleAttribute is the same as Attribute in RFC 5652, except that the
+# attrValues SET must have one and only one member
+
+Attribute = rfc7191.SingleAttribute
+
+
+# DirectoryString is the same as RFC 5280, except the length is limited to 255
+
+class DirectoryString(univ.Choice):
+ pass
+
+DirectoryString.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('teletexString', char.TeletexString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('printableString', char.PrintableString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('universalString', char.UniversalString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('utf8String', char.UTF8String().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255))),
+ namedtype.NamedType('bmpString', char.BMPString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(1, 255)))
+)
+
+
+# OTP Challenge Attribute
+
+id_aa_otpChallenge = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.56')
+
+ub_aa_otpChallenge = univ.Integer(255)
+
+otpChallenge = Attribute()
+otpChallenge['attrType'] = id_aa_otpChallenge
+otpChallenge['attrValues'][0] = DirectoryString()
+
+
+# Revocation Challenge Attribute
+
+id_aa_revocationChallenge = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.57')
+
+ub_aa_revocationChallenge = univ.Integer(255)
+
+revocationChallenge = Attribute()
+revocationChallenge['attrType'] = id_aa_revocationChallenge
+revocationChallenge['attrValues'][0] = DirectoryString()
+
+
+# EST Identity Linking Attribute
+
+id_aa_estIdentityLinking = univ.ObjectIdentifier('1.2.840.113549.1.9.16.2.58')
+
+ub_aa_est_identity_linking = univ.Integer(255)
+
+estIdentityLinking = Attribute()
+estIdentityLinking['attrType'] = id_aa_estIdentityLinking
+estIdentityLinking['attrValues'][0] = DirectoryString()
+
+
+# Map of Attribute Type OIDs to Attributes added to the
+# ones that are in rfc6402.py
+
+_cmcControlAttributesMapUpdate = {
+ id_aa_otpChallenge: DirectoryString(),
+ id_aa_revocationChallenge: DirectoryString(),
+ id_aa_estIdentityLinking: DirectoryString(),
+}
+
+rfc6402.cmcControlAttributesMap.update(_cmcControlAttributesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc7906.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7906.py
new file mode 100644
index 0000000000..fa5f6b0733
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7906.py
@@ -0,0 +1,736 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# NSA's CMS Key Management Attributes
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc7906.txt
+# https://www.rfc-editor.org/errata/eid5850
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc2634
+from pyasn1_modules import rfc4108
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+from pyasn1_modules import rfc6010
+from pyasn1_modules import rfc6019
+from pyasn1_modules import rfc7191
+
+MAX = float('inf')
+
+
+# Imports From RFC 2634
+
+id_aa_contentHint = rfc2634.id_aa_contentHint
+
+ContentHints = rfc2634.ContentHints
+
+id_aa_securityLabel = rfc2634.id_aa_securityLabel
+
+SecurityPolicyIdentifier = rfc2634.SecurityPolicyIdentifier
+
+SecurityClassification = rfc2634.SecurityClassification
+
+ESSPrivacyMark = rfc2634.ESSPrivacyMark
+
+SecurityCategories= rfc2634.SecurityCategories
+
+ESSSecurityLabel = rfc2634.ESSSecurityLabel
+
+
+# Imports From RFC 4108
+
+id_aa_communityIdentifiers = rfc4108.id_aa_communityIdentifiers
+
+CommunityIdentifier = rfc4108.CommunityIdentifier
+
+CommunityIdentifiers = rfc4108.CommunityIdentifiers
+
+
+# Imports From RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+Name = rfc5280.Name
+
+Certificate = rfc5280.Certificate
+
+GeneralNames = rfc5280.GeneralNames
+
+GeneralName = rfc5280.GeneralName
+
+
+SubjectInfoAccessSyntax = rfc5280.SubjectInfoAccessSyntax
+
+id_pkix = rfc5280.id_pkix
+
+id_pe = rfc5280.id_pe
+
+id_pe_subjectInfoAccess = rfc5280.id_pe_subjectInfoAccess
+
+
+# Imports From RFC 6010
+
+CMSContentConstraints = rfc6010.CMSContentConstraints
+
+
+# Imports From RFC 6019
+
+BinaryTime = rfc6019.BinaryTime
+
+id_aa_binarySigningTime = rfc6019.id_aa_binarySigningTime
+
+BinarySigningTime = rfc6019.BinarySigningTime
+
+
+# Imports From RFC 5652
+
+Attribute = rfc5652.Attribute
+
+CertificateSet = rfc5652.CertificateSet
+
+CertificateChoices = rfc5652.CertificateChoices
+
+id_contentType = rfc5652.id_contentType
+
+ContentType = rfc5652.ContentType
+
+id_messageDigest = rfc5652.id_messageDigest
+
+MessageDigest = rfc5652.MessageDigest
+
+
+# Imports From RFC 7191
+
+SIREntityName = rfc7191.SIREntityName
+
+id_aa_KP_keyPkgIdAndReceiptReq = rfc7191.id_aa_KP_keyPkgIdAndReceiptReq
+
+KeyPkgIdentifierAndReceiptReq = rfc7191.KeyPkgIdentifierAndReceiptReq
+
+
+# Key Province Attribute
+
+id_aa_KP_keyProvinceV2 = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.71')
+
+
+class KeyProvinceV2(univ.ObjectIdentifier):
+ pass
+
+
+aa_keyProvince_v2 = Attribute()
+aa_keyProvince_v2['attrType'] = id_aa_KP_keyProvinceV2
+aa_keyProvince_v2['attrValues'][0] = KeyProvinceV2()
+
+
+# Manifest Attribute
+
+id_aa_KP_manifest = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.72')
+
+
+class ShortTitle(char.PrintableString):
+ pass
+
+
+class Manifest(univ.SequenceOf):
+ pass
+
+Manifest.componentType = ShortTitle()
+Manifest.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+aa_manifest = Attribute()
+aa_manifest['attrType'] = id_aa_KP_manifest
+aa_manifest['attrValues'][0] = Manifest()
+
+
+# Key Algorithm Attribute
+
+id_kma_keyAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.1')
+
+
+class KeyAlgorithm(univ.Sequence):
+ pass
+
+KeyAlgorithm.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyAlg', univ.ObjectIdentifier()),
+ namedtype.OptionalNamedType('checkWordAlg', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.OptionalNamedType('crcAlg', univ.ObjectIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2)))
+)
+
+
+aa_keyAlgorithm = Attribute()
+aa_keyAlgorithm['attrType'] = id_kma_keyAlgorithm
+aa_keyAlgorithm['attrValues'][0] = KeyAlgorithm()
+
+
+# User Certificate Attribute
+
+id_at_userCertificate = univ.ObjectIdentifier('2.5.4.36')
+
+
+aa_userCertificate = Attribute()
+aa_userCertificate['attrType'] = id_at_userCertificate
+aa_userCertificate['attrValues'][0] = Certificate()
+
+
+# Key Package Receivers Attribute
+
+id_kma_keyPkgReceiversV2 = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.16')
+
+
+class KeyPkgReceiver(univ.Choice):
+ pass
+
+KeyPkgReceiver.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('sirEntity', SIREntityName().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('community', CommunityIdentifier().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class KeyPkgReceiversV2(univ.SequenceOf):
+ pass
+
+KeyPkgReceiversV2.componentType = KeyPkgReceiver()
+KeyPkgReceiversV2.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+aa_keyPackageReceivers_v2 = Attribute()
+aa_keyPackageReceivers_v2['attrType'] = id_kma_keyPkgReceiversV2
+aa_keyPackageReceivers_v2['attrValues'][0] = KeyPkgReceiversV2()
+
+
+# TSEC Nomenclature Attribute
+
+id_kma_TSECNomenclature = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.3')
+
+
+class CharEdition(char.PrintableString):
+ pass
+
+
+class CharEditionRange(univ.Sequence):
+ pass
+
+CharEditionRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('firstCharEdition', CharEdition()),
+ namedtype.NamedType('lastCharEdition', CharEdition())
+)
+
+
+class NumEdition(univ.Integer):
+ pass
+
+NumEdition.subtypeSpec = constraint.ValueRangeConstraint(0, 308915776)
+
+
+class NumEditionRange(univ.Sequence):
+ pass
+
+NumEditionRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('firstNumEdition', NumEdition()),
+ namedtype.NamedType('lastNumEdition', NumEdition())
+)
+
+
+class EditionID(univ.Choice):
+ pass
+
+EditionID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('char', univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('charEdition', CharEdition().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('charEditionRange', CharEditionRange().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 2)))
+ ))
+ ),
+ namedtype.NamedType('num', univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('numEdition', NumEdition().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3))),
+ namedtype.NamedType('numEditionRange', NumEditionRange().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 4)))
+ ))
+ )
+)
+
+
+class Register(univ.Integer):
+ pass
+
+Register.subtypeSpec = constraint.ValueRangeConstraint(0, 2147483647)
+
+
+class RegisterRange(univ.Sequence):
+ pass
+
+RegisterRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('firstRegister', Register()),
+ namedtype.NamedType('lastRegister', Register())
+)
+
+
+class RegisterID(univ.Choice):
+ pass
+
+RegisterID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('register', Register().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 5))),
+ namedtype.NamedType('registerRange', RegisterRange().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 6)))
+)
+
+
+class SegmentNumber(univ.Integer):
+ pass
+
+SegmentNumber.subtypeSpec = constraint.ValueRangeConstraint(1, 127)
+
+
+class SegmentRange(univ.Sequence):
+ pass
+
+SegmentRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('firstSegment', SegmentNumber()),
+ namedtype.NamedType('lastSegment', SegmentNumber())
+)
+
+
+class SegmentID(univ.Choice):
+ pass
+
+SegmentID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('segmentNumber', SegmentNumber().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 7))),
+ namedtype.NamedType('segmentRange', SegmentRange().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 8)))
+)
+
+
+class TSECNomenclature(univ.Sequence):
+ pass
+
+TSECNomenclature.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('shortTitle', ShortTitle()),
+ namedtype.OptionalNamedType('editionID', EditionID()),
+ namedtype.OptionalNamedType('registerID', RegisterID()),
+ namedtype.OptionalNamedType('segmentID', SegmentID())
+)
+
+
+aa_tsecNomenclature = Attribute()
+aa_tsecNomenclature['attrType'] = id_kma_TSECNomenclature
+aa_tsecNomenclature['attrValues'][0] = TSECNomenclature()
+
+
+# Key Purpose Attribute
+
+id_kma_keyPurpose = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.13')
+
+
+class KeyPurpose(univ.Enumerated):
+ pass
+
+KeyPurpose.namedValues = namedval.NamedValues(
+ ('n-a', 0),
+ ('a', 65),
+ ('b', 66),
+ ('l', 76),
+ ('m', 77),
+ ('r', 82),
+ ('s', 83),
+ ('t', 84),
+ ('v', 86),
+ ('x', 88),
+ ('z', 90)
+)
+
+
+aa_keyPurpose = Attribute()
+aa_keyPurpose['attrType'] = id_kma_keyPurpose
+aa_keyPurpose['attrValues'][0] = KeyPurpose()
+
+
+# Key Use Attribute
+
+id_kma_keyUse = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.14')
+
+
+class KeyUse(univ.Enumerated):
+ pass
+
+KeyUse.namedValues = namedval.NamedValues(
+ ('n-a', 0),
+ ('ffk', 1),
+ ('kek', 2),
+ ('kpk', 3),
+ ('msk', 4),
+ ('qkek', 5),
+ ('tek', 6),
+ ('tsk', 7),
+ ('trkek', 8),
+ ('nfk', 9),
+ ('effk', 10),
+ ('ebfk', 11),
+ ('aek', 12),
+ ('wod', 13),
+ ('kesk', 246),
+ ('eik', 247),
+ ('ask', 248),
+ ('kmk', 249),
+ ('rsk', 250),
+ ('csk', 251),
+ ('sak', 252),
+ ('rgk', 253),
+ ('cek', 254),
+ ('exk', 255)
+)
+
+
+aa_keyUse = Attribute()
+aa_keyPurpose['attrType'] = id_kma_keyUse
+aa_keyPurpose['attrValues'][0] = KeyUse()
+
+
+# Transport Key Attribute
+
+id_kma_transportKey = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.15')
+
+
+class TransOp(univ.Enumerated):
+ pass
+
+TransOp.namedValues = namedval.NamedValues(
+ ('transport', 1),
+ ('operational', 2)
+)
+
+
+aa_transportKey = Attribute()
+aa_transportKey['attrType'] = id_kma_transportKey
+aa_transportKey['attrValues'][0] = TransOp()
+
+
+# Key Distribution Period Attribute
+
+id_kma_keyDistPeriod = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.5')
+
+
+class KeyDistPeriod(univ.Sequence):
+ pass
+
+KeyDistPeriod.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('doNotDistBefore', BinaryTime().subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('doNotDistAfter', BinaryTime())
+)
+
+
+aa_keyDistributionPeriod = Attribute()
+aa_keyDistributionPeriod['attrType'] = id_kma_keyDistPeriod
+aa_keyDistributionPeriod['attrValues'][0] = KeyDistPeriod()
+
+
+# Key Validity Period Attribute
+
+id_kma_keyValidityPeriod = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.6')
+
+
+class KeyValidityPeriod(univ.Sequence):
+ pass
+
+KeyValidityPeriod.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('doNotUseBefore', BinaryTime()),
+ namedtype.OptionalNamedType('doNotUseAfter', BinaryTime())
+)
+
+
+aa_keyValidityPeriod = Attribute()
+aa_keyValidityPeriod['attrType'] = id_kma_keyValidityPeriod
+aa_keyValidityPeriod['attrValues'][0] = KeyValidityPeriod()
+
+
+# Key Duration Attribute
+
+id_kma_keyDuration = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.7')
+
+
+ub_KeyDuration_months = univ.Integer(72)
+
+ub_KeyDuration_hours = univ.Integer(96)
+
+ub_KeyDuration_days = univ.Integer(732)
+
+ub_KeyDuration_weeks = univ.Integer(104)
+
+ub_KeyDuration_years = univ.Integer(100)
+
+
+class KeyDuration(univ.Choice):
+ pass
+
+KeyDuration.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hours', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_hours)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('days', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_days))),
+ namedtype.NamedType('weeks', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_weeks)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 1))),
+ namedtype.NamedType('months', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_months)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 2))),
+ namedtype.NamedType('years', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, ub_KeyDuration_years)).subtype(
+ implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatSimple, 3)))
+)
+
+
+aa_keyDurationPeriod = Attribute()
+aa_keyDurationPeriod['attrType'] = id_kma_keyDuration
+aa_keyDurationPeriod['attrValues'][0] = KeyDuration()
+
+
+# Classification Attribute
+
+id_aa_KP_classification = univ.ObjectIdentifier(id_aa_securityLabel)
+
+
+id_enumeratedPermissiveAttributes = univ.ObjectIdentifier('2.16.840.1.101.2.1.8.3.1')
+
+id_enumeratedRestrictiveAttributes = univ.ObjectIdentifier('2.16.840.1.101.2.1.8.3.4')
+
+id_informativeAttributes = univ.ObjectIdentifier('2.16.840.1.101.2.1.8.3.3')
+
+
+class SecurityAttribute(univ.Integer):
+ pass
+
+SecurityAttribute.subtypeSpec = constraint.ValueRangeConstraint(0, MAX)
+
+
+class EnumeratedTag(univ.Sequence):
+ pass
+
+EnumeratedTag.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tagName', univ.ObjectIdentifier()),
+ namedtype.NamedType('attributeList', univ.SetOf(componentType=SecurityAttribute()))
+)
+
+
+class FreeFormField(univ.Choice):
+ pass
+
+FreeFormField.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('bitSetAttributes', univ.BitString()), # Not permitted in RFC 7906
+ namedtype.NamedType('securityAttributes', univ.SetOf(componentType=SecurityAttribute()))
+)
+
+
+class InformativeTag(univ.Sequence):
+ pass
+
+InformativeTag.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('tagName', univ.ObjectIdentifier()),
+ namedtype.NamedType('attributes', FreeFormField())
+)
+
+
+class Classification(ESSSecurityLabel):
+ pass
+
+
+aa_classification = Attribute()
+aa_classification['attrType'] = id_aa_KP_classification
+aa_classification['attrValues'][0] = Classification()
+
+
+# Split Identifier Attribute
+
+id_kma_splitID = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.11')
+
+
+class SplitID(univ.Sequence):
+ pass
+
+SplitID.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('half', univ.Enumerated(
+ namedValues=namedval.NamedValues(('a', 0), ('b', 1)))),
+ namedtype.OptionalNamedType('combineAlg', AlgorithmIdentifier())
+)
+
+
+aa_splitIdentifier = Attribute()
+aa_splitIdentifier['attrType'] = id_kma_splitID
+aa_splitIdentifier['attrValues'][0] = SplitID()
+
+
+# Key Package Type Attribute
+
+id_kma_keyPkgType = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.12')
+
+
+class KeyPkgType(univ.ObjectIdentifier):
+ pass
+
+
+aa_keyPackageType = Attribute()
+aa_keyPackageType['attrType'] = id_kma_keyPkgType
+aa_keyPackageType['attrValues'][0] = KeyPkgType()
+
+
+# Signature Usage Attribute
+
+id_kma_sigUsageV3 = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.22')
+
+
+class SignatureUsage(CMSContentConstraints):
+ pass
+
+
+aa_signatureUsage_v3 = Attribute()
+aa_signatureUsage_v3['attrType'] = id_kma_sigUsageV3
+aa_signatureUsage_v3['attrValues'][0] = SignatureUsage()
+
+
+# Other Certificate Format Attribute
+
+id_kma_otherCertFormats = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.19')
+
+
+aa_otherCertificateFormats = Attribute()
+aa_signatureUsage_v3['attrType'] = id_kma_otherCertFormats
+aa_signatureUsage_v3['attrValues'][0] = CertificateChoices()
+
+
+# PKI Path Attribute
+
+id_at_pkiPath = univ.ObjectIdentifier('2.5.4.70')
+
+
+class PkiPath(univ.SequenceOf):
+ pass
+
+PkiPath.componentType = Certificate()
+PkiPath.subtypeSpec=constraint.ValueSizeConstraint(1, MAX)
+
+
+aa_pkiPath = Attribute()
+aa_pkiPath['attrType'] = id_at_pkiPath
+aa_pkiPath['attrValues'][0] = PkiPath()
+
+
+# Useful Certificates Attribute
+
+id_kma_usefulCerts = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.20')
+
+
+aa_usefulCertificates = Attribute()
+aa_usefulCertificates['attrType'] = id_kma_usefulCerts
+aa_usefulCertificates['attrValues'][0] = CertificateSet()
+
+
+# Key Wrap Attribute
+
+id_kma_keyWrapAlgorithm = univ.ObjectIdentifier('2.16.840.1.101.2.1.13.21')
+
+
+aa_keyWrapAlgorithm = Attribute()
+aa_keyWrapAlgorithm['attrType'] = id_kma_keyWrapAlgorithm
+aa_keyWrapAlgorithm['attrValues'][0] = AlgorithmIdentifier()
+
+
+# Content Decryption Key Identifier Attribute
+
+id_aa_KP_contentDecryptKeyID = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.66')
+
+
+class ContentDecryptKeyID(univ.OctetString):
+ pass
+
+
+aa_contentDecryptKeyIdentifier = Attribute()
+aa_contentDecryptKeyIdentifier['attrType'] = id_aa_KP_contentDecryptKeyID
+aa_contentDecryptKeyIdentifier['attrValues'][0] = ContentDecryptKeyID()
+
+
+# Certificate Pointers Attribute
+
+aa_certificatePointers = Attribute()
+aa_certificatePointers['attrType'] = id_pe_subjectInfoAccess
+aa_certificatePointers['attrValues'][0] = SubjectInfoAccessSyntax()
+
+
+# CRL Pointers Attribute
+
+id_aa_KP_crlPointers = univ.ObjectIdentifier('2.16.840.1.101.2.1.5.70')
+
+
+aa_cRLDistributionPoints = Attribute()
+aa_cRLDistributionPoints['attrType'] = id_aa_KP_crlPointers
+aa_cRLDistributionPoints['attrValues'][0] = GeneralNames()
+
+
+# Extended Error Codes
+
+id_errorCodes = univ.ObjectIdentifier('2.16.840.1.101.2.1.22')
+
+id_missingKeyType = univ.ObjectIdentifier('2.16.840.1.101.2.1.22.1')
+
+id_privacyMarkTooLong = univ.ObjectIdentifier('2.16.840.1.101.2.1.22.2')
+
+id_unrecognizedSecurityPolicy = univ.ObjectIdentifier('2.16.840.1.101.2.1.22.3')
+
+
+# Map of Attribute Type OIDs to Attributes added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_aa_contentHint: ContentHints(),
+ id_aa_communityIdentifiers: CommunityIdentifiers(),
+ id_aa_binarySigningTime: BinarySigningTime(),
+ id_contentType: ContentType(),
+ id_messageDigest: MessageDigest(),
+ id_aa_KP_keyPkgIdAndReceiptReq: KeyPkgIdentifierAndReceiptReq(),
+ id_aa_KP_keyProvinceV2: KeyProvinceV2(),
+ id_aa_KP_manifest: Manifest(),
+ id_kma_keyAlgorithm: KeyAlgorithm(),
+ id_at_userCertificate: Certificate(),
+ id_kma_keyPkgReceiversV2: KeyPkgReceiversV2(),
+ id_kma_TSECNomenclature: TSECNomenclature(),
+ id_kma_keyPurpose: KeyPurpose(),
+ id_kma_keyUse: KeyUse(),
+ id_kma_transportKey: TransOp(),
+ id_kma_keyDistPeriod: KeyDistPeriod(),
+ id_kma_keyValidityPeriod: KeyValidityPeriod(),
+ id_kma_keyDuration: KeyDuration(),
+ id_aa_KP_classification: Classification(),
+ id_kma_splitID: SplitID(),
+ id_kma_keyPkgType: KeyPkgType(),
+ id_kma_sigUsageV3: SignatureUsage(),
+ id_kma_otherCertFormats: CertificateChoices(),
+ id_at_pkiPath: PkiPath(),
+ id_kma_usefulCerts: CertificateSet(),
+ id_kma_keyWrapAlgorithm: AlgorithmIdentifier(),
+ id_aa_KP_contentDecryptKeyID: ContentDecryptKeyID(),
+ id_pe_subjectInfoAccess: SubjectInfoAccessSyntax(),
+ id_aa_KP_crlPointers: GeneralNames(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc7914.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7914.py
new file mode 100644
index 0000000000..99e9551567
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc7914.py
@@ -0,0 +1,49 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+#The scrypt Password-Based Key Derivation Function
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8520.txt
+# https://www.rfc-editor.org/errata/eid5871
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+id_scrypt = univ.ObjectIdentifier('1.3.6.1.4.1.11591.4.11')
+
+
+class Scrypt_params(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('salt',
+ univ.OctetString()),
+ namedtype.NamedType('costParameter',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, MAX))),
+ namedtype.NamedType('blockSize',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, MAX))),
+ namedtype.NamedType('parallelizationParameter',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, MAX))),
+ namedtype.OptionalNamedType('keyLength',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(1, MAX)))
+ )
+
+
+# Update the Algorithm Identifier map in rfc5280.py
+
+_algorithmIdentifierMapUpdate = {
+ id_scrypt: Scrypt_params(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc8017.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8017.py
new file mode 100644
index 0000000000..fefed1dcd6
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8017.py
@@ -0,0 +1,153 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS #1: RSA Cryptography Specifications Version 2.2
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8017.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc2437
+from pyasn1_modules import rfc3447
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# Import Algorithm Identifier from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+class DigestAlgorithm(AlgorithmIdentifier):
+ pass
+
+class HashAlgorithm(AlgorithmIdentifier):
+ pass
+
+class MaskGenAlgorithm(AlgorithmIdentifier):
+ pass
+
+class PSourceAlgorithm(AlgorithmIdentifier):
+ pass
+
+
+# Object identifiers from NIST SHA2
+
+hashAlgs = univ.ObjectIdentifier('2.16.840.1.101.3.4.2')
+id_sha256 = rfc4055.id_sha256
+id_sha384 = rfc4055.id_sha384
+id_sha512 = rfc4055.id_sha512
+id_sha224 = rfc4055.id_sha224
+id_sha512_224 = hashAlgs + (5, )
+id_sha512_256 = hashAlgs + (6, )
+
+
+# Basic object identifiers
+
+pkcs_1 = univ.ObjectIdentifier('1.2.840.113549.1.1')
+rsaEncryption = rfc2437.rsaEncryption
+id_RSAES_OAEP = rfc2437.id_RSAES_OAEP
+id_pSpecified = rfc2437.id_pSpecified
+id_RSASSA_PSS = rfc4055.id_RSASSA_PSS
+md2WithRSAEncryption = rfc2437.md2WithRSAEncryption
+md5WithRSAEncryption = rfc2437.md5WithRSAEncryption
+sha1WithRSAEncryption = rfc2437.sha1WithRSAEncryption
+sha224WithRSAEncryption = rfc4055.sha224WithRSAEncryption
+sha256WithRSAEncryption = rfc4055.sha256WithRSAEncryption
+sha384WithRSAEncryption = rfc4055.sha384WithRSAEncryption
+sha512WithRSAEncryption = rfc4055.sha512WithRSAEncryption
+sha512_224WithRSAEncryption = pkcs_1 + (15, )
+sha512_256WithRSAEncryption = pkcs_1 + (16, )
+id_sha1 = rfc2437.id_sha1
+id_md2 = univ.ObjectIdentifier('1.2.840.113549.2.2')
+id_md5 = univ.ObjectIdentifier('1.2.840.113549.2.5')
+id_mgf1 = rfc2437.id_mgf1
+
+
+# Default parameter values
+
+sha1 = rfc4055.sha1Identifier
+SHA1Parameters = univ.Null("")
+
+mgf1SHA1 = rfc4055.mgf1SHA1Identifier
+
+class EncodingParameters(univ.OctetString):
+ subtypeSpec = constraint.ValueSizeConstraint(0, MAX)
+
+pSpecifiedEmpty = rfc4055.pSpecifiedEmptyIdentifier
+
+emptyString = EncodingParameters(value='')
+
+
+# Main structures
+
+class Version(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('two-prime', 0),
+ ('multi', 1)
+ )
+
+class TrailerField(univ.Integer):
+ namedValues = namedval.NamedValues(
+ ('trailerFieldBC', 1)
+ )
+
+RSAPublicKey = rfc2437.RSAPublicKey
+
+OtherPrimeInfo = rfc3447.OtherPrimeInfo
+OtherPrimeInfos = rfc3447.OtherPrimeInfos
+RSAPrivateKey = rfc3447.RSAPrivateKey
+
+RSAES_OAEP_params = rfc4055.RSAES_OAEP_params
+rSAES_OAEP_Default_Identifier = rfc4055.rSAES_OAEP_Default_Identifier
+
+RSASSA_PSS_params = rfc4055.RSASSA_PSS_params
+rSASSA_PSS_Default_Identifier = rfc4055.rSASSA_PSS_Default_Identifier
+
+
+# Syntax for the EMSA-PKCS1-v1_5 hash identifier
+
+class DigestInfo(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('digestAlgorithm', DigestAlgorithm()),
+ namedtype.NamedType('digest', univ.OctetString())
+ )
+
+
+# Update the Algorithm Identifier map
+
+_algorithmIdentifierMapUpdate = {
+ id_sha1: univ.Null(),
+ id_sha224: univ.Null(),
+ id_sha256: univ.Null(),
+ id_sha384: univ.Null(),
+ id_sha512: univ.Null(),
+ id_sha512_224: univ.Null(),
+ id_sha512_256: univ.Null(),
+ id_mgf1: AlgorithmIdentifier(),
+ id_pSpecified: univ.OctetString(),
+ id_RSAES_OAEP: RSAES_OAEP_params(),
+ id_RSASSA_PSS: RSASSA_PSS_params(),
+ md2WithRSAEncryption: univ.Null(),
+ md5WithRSAEncryption: univ.Null(),
+ sha1WithRSAEncryption: univ.Null(),
+ sha224WithRSAEncryption: univ.Null(),
+ sha256WithRSAEncryption: univ.Null(),
+ sha384WithRSAEncryption: univ.Null(),
+ sha512WithRSAEncryption: univ.Null(),
+ sha512_224WithRSAEncryption: univ.Null(),
+ sha512_256WithRSAEncryption: univ.Null(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc8018.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8018.py
new file mode 100644
index 0000000000..7a44eea8d2
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8018.py
@@ -0,0 +1,260 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# PKCS #5: Password-Based Cryptography Specification, Version 2.1
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8018.txt
+#
+
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc3565
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+# Import from RFC 3565
+
+AES_IV = rfc3565.AES_IV
+
+
+# Import from RFC 5280
+
+AlgorithmIdentifier = rfc5280.AlgorithmIdentifier
+
+
+# Basic object identifiers
+
+nistAlgorithms = _OID(2, 16, 840, 1, 101, 3, 4)
+
+aes = _OID(nistAlgorithms, 1)
+
+oiw = _OID(1, 3, 14)
+
+rsadsi = _OID(1, 2, 840, 113549)
+
+pkcs = _OID(rsadsi, 1)
+
+digestAlgorithm = _OID(rsadsi, 2)
+
+encryptionAlgorithm = _OID(rsadsi, 3)
+
+pkcs_5 = _OID(pkcs, 5)
+
+
+
+# HMAC object identifiers
+
+id_hmacWithSHA1 = _OID(digestAlgorithm, 7)
+
+id_hmacWithSHA224 = _OID(digestAlgorithm, 8)
+
+id_hmacWithSHA256 = _OID(digestAlgorithm, 9)
+
+id_hmacWithSHA384 = _OID(digestAlgorithm, 10)
+
+id_hmacWithSHA512 = _OID(digestAlgorithm, 11)
+
+id_hmacWithSHA512_224 = _OID(digestAlgorithm, 12)
+
+id_hmacWithSHA512_256 = _OID(digestAlgorithm, 13)
+
+
+# PBES1 object identifiers
+
+pbeWithMD2AndDES_CBC = _OID(pkcs_5, 1)
+
+pbeWithMD2AndRC2_CBC = _OID(pkcs_5, 4)
+
+pbeWithMD5AndDES_CBC = _OID(pkcs_5, 3)
+
+pbeWithMD5AndRC2_CBC = _OID(pkcs_5, 6)
+
+pbeWithSHA1AndDES_CBC = _OID(pkcs_5, 10)
+
+pbeWithSHA1AndRC2_CBC = _OID(pkcs_5, 11)
+
+
+# Supporting techniques object identifiers
+
+desCBC = _OID(oiw, 3, 2, 7)
+
+des_EDE3_CBC = _OID(encryptionAlgorithm, 7)
+
+rc2CBC = _OID(encryptionAlgorithm, 2)
+
+rc5_CBC_PAD = _OID(encryptionAlgorithm, 9)
+
+aes128_CBC_PAD = _OID(aes, 2)
+
+aes192_CBC_PAD = _OID(aes, 22)
+
+aes256_CBC_PAD = _OID(aes, 42)
+
+
+# PBES1
+
+class PBEParameter(univ.Sequence):
+ pass
+
+PBEParameter.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('salt', univ.OctetString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(8, 8))),
+ namedtype.NamedType('iterationCount', univ.Integer())
+)
+
+
+# PBES2
+
+id_PBES2 = _OID(pkcs_5, 13)
+
+
+class PBES2_params(univ.Sequence):
+ pass
+
+PBES2_params.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyDerivationFunc', AlgorithmIdentifier()),
+ namedtype.NamedType('encryptionScheme', AlgorithmIdentifier())
+)
+
+
+# PBMAC1
+
+id_PBMAC1 = _OID(pkcs_5, 14)
+
+
+class PBMAC1_params(univ.Sequence):
+ pass
+
+PBMAC1_params.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('keyDerivationFunc', AlgorithmIdentifier()),
+ namedtype.NamedType('messageAuthScheme', AlgorithmIdentifier())
+)
+
+
+# PBKDF2
+
+id_PBKDF2 = _OID(pkcs_5, 12)
+
+
+algid_hmacWithSHA1 = AlgorithmIdentifier()
+algid_hmacWithSHA1['algorithm'] = id_hmacWithSHA1
+algid_hmacWithSHA1['parameters'] = univ.Null("")
+
+
+class PBKDF2_params(univ.Sequence):
+ pass
+
+PBKDF2_params.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('salt', univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('specified', univ.OctetString()),
+ namedtype.NamedType('otherSource', AlgorithmIdentifier())
+ ))),
+ namedtype.NamedType('iterationCount', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, MAX))),
+ namedtype.OptionalNamedType('keyLength', univ.Integer().subtype(
+ subtypeSpec=constraint.ValueRangeConstraint(1, MAX))),
+ namedtype.DefaultedNamedType('prf', algid_hmacWithSHA1)
+)
+
+
+# RC2 CBC algorithm parameter
+
+class RC2_CBC_Parameter(univ.Sequence):
+ pass
+
+RC2_CBC_Parameter.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('rc2ParameterVersion', univ.Integer()),
+ namedtype.NamedType('iv', univ.OctetString().subtype(
+ subtypeSpec=constraint.ValueSizeConstraint(8, 8)))
+)
+
+
+# RC5 CBC algorithm parameter
+
+class RC5_CBC_Parameters(univ.Sequence):
+ pass
+
+RC5_CBC_Parameters.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version',
+ univ.Integer(namedValues=namedval.NamedValues(('v1_0', 16))).subtype(
+ subtypeSpec=constraint.SingleValueConstraint(16))),
+ namedtype.NamedType('rounds',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(8, 127))),
+ namedtype.NamedType('blockSizeInBits',
+ univ.Integer().subtype(subtypeSpec=constraint.SingleValueConstraint(64, 128))),
+ namedtype.OptionalNamedType('iv', univ.OctetString())
+)
+
+
+# Initialization Vector for AES: OCTET STRING (SIZE(16))
+
+class AES_IV(univ.OctetString):
+ pass
+
+AES_IV.subtypeSpec = constraint.ValueSizeConstraint(16, 16)
+
+
+# Initialization Vector for DES: OCTET STRING (SIZE(8))
+
+class DES_IV(univ.OctetString):
+ pass
+
+DES_IV.subtypeSpec = constraint.ValueSizeConstraint(8, 8)
+
+
+# Update the Algorithm Identifier map
+
+_algorithmIdentifierMapUpdate = {
+ # PBKDF2-PRFs
+ id_hmacWithSHA1: univ.Null(),
+ id_hmacWithSHA224: univ.Null(),
+ id_hmacWithSHA256: univ.Null(),
+ id_hmacWithSHA384: univ.Null(),
+ id_hmacWithSHA512: univ.Null(),
+ id_hmacWithSHA512_224: univ.Null(),
+ id_hmacWithSHA512_256: univ.Null(),
+ # PBES1Algorithms
+ pbeWithMD2AndDES_CBC: PBEParameter(),
+ pbeWithMD2AndRC2_CBC: PBEParameter(),
+ pbeWithMD5AndDES_CBC: PBEParameter(),
+ pbeWithMD5AndRC2_CBC: PBEParameter(),
+ pbeWithSHA1AndDES_CBC: PBEParameter(),
+ pbeWithSHA1AndRC2_CBC: PBEParameter(),
+ # PBES2Algorithms
+ id_PBES2: PBES2_params(),
+ # PBES2-KDFs
+ id_PBKDF2: PBKDF2_params(),
+ # PBMAC1Algorithms
+ id_PBMAC1: PBMAC1_params(),
+ # SupportingAlgorithms
+ desCBC: DES_IV(),
+ des_EDE3_CBC: DES_IV(),
+ rc2CBC: RC2_CBC_Parameter(),
+ rc5_CBC_PAD: RC5_CBC_Parameters(),
+ aes128_CBC_PAD: AES_IV(),
+ aes192_CBC_PAD: AES_IV(),
+ aes256_CBC_PAD: AES_IV(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc8103.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8103.py
new file mode 100644
index 0000000000..6429e8635f
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8103.py
@@ -0,0 +1,36 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from the asn1ate tool.
+# Auto-generated by asn1ate v.0.6.0 from rfc8103.asn.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# ChaCha20Poly1305 algorithm fo use with the Authenticated-Enveloped-Data
+# protecting content type for the Cryptographic Message Syntax (CMS)
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8103.txt
+
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+class AEADChaCha20Poly1305Nonce(univ.OctetString):
+ pass
+
+
+AEADChaCha20Poly1305Nonce.subtypeSpec = constraint.ValueSizeConstraint(12, 12)
+
+id_alg_AEADChaCha20Poly1305 = _OID(1, 2, 840, 113549, 1, 9, 16, 3, 18)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc8209.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8209.py
new file mode 100644
index 0000000000..7d70f51b0c
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8209.py
@@ -0,0 +1,20 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# BGPsec Router PKI Profile
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8209.txt
+#
+
+from pyasn1.type import univ
+
+
+id_kp = univ.ObjectIdentifier('1.3.6.1.5.5.7.3')
+
+id_kp_bgpsec_router = id_kp + (30, )
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc8226.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8226.py
new file mode 100644
index 0000000000..e7fe9460e9
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8226.py
@@ -0,0 +1,149 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from the asn1ate tool, with manual
+# changes to implement appropriate constraints and added comments.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# JWT Claim Constraints and TN Authorization List for certificate extensions.
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8226.txt (with errata corrected)
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import namedtype
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+def _OID(*components):
+ output = []
+ for x in tuple(components):
+ if isinstance(x, univ.ObjectIdentifier):
+ output.extend(list(x))
+ else:
+ output.append(int(x))
+
+ return univ.ObjectIdentifier(output)
+
+
+class JWTClaimName(char.IA5String):
+ pass
+
+
+class JWTClaimNames(univ.SequenceOf):
+ pass
+
+JWTClaimNames.componentType = JWTClaimName()
+JWTClaimNames.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class JWTClaimPermittedValues(univ.Sequence):
+ pass
+
+JWTClaimPermittedValues.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('claim', JWTClaimName()),
+ namedtype.NamedType('permitted', univ.SequenceOf(
+ componentType=char.UTF8String()).subtype(
+ sizeSpec=constraint.ValueSizeConstraint(1, MAX)))
+)
+
+
+class JWTClaimPermittedValuesList(univ.SequenceOf):
+ pass
+
+JWTClaimPermittedValuesList.componentType = JWTClaimPermittedValues()
+JWTClaimPermittedValuesList.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+class JWTClaimConstraints(univ.Sequence):
+ pass
+
+JWTClaimConstraints.componentType = namedtype.NamedTypes(
+ namedtype.OptionalNamedType('mustInclude',
+ JWTClaimNames().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0))),
+ namedtype.OptionalNamedType('permittedValues',
+ JWTClaimPermittedValuesList().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 1)))
+)
+
+JWTClaimConstraints.subtypeSpec = constraint.ConstraintsUnion(
+ constraint.WithComponentsConstraint(
+ ('mustInclude', constraint.ComponentPresentConstraint())),
+ constraint.WithComponentsConstraint(
+ ('permittedValues', constraint.ComponentPresentConstraint()))
+)
+
+
+id_pe_JWTClaimConstraints = _OID(1, 3, 6, 1, 5, 5, 7, 1, 27)
+
+
+class ServiceProviderCode(char.IA5String):
+ pass
+
+
+class TelephoneNumber(char.IA5String):
+ pass
+
+TelephoneNumber.subtypeSpec = constraint.ConstraintsIntersection(
+ constraint.ValueSizeConstraint(1, 15),
+ constraint.PermittedAlphabetConstraint(
+ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '#', '*')
+)
+
+
+class TelephoneNumberRange(univ.Sequence):
+ pass
+
+TelephoneNumberRange.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('start', TelephoneNumber()),
+ namedtype.NamedType('count',
+ univ.Integer().subtype(subtypeSpec=constraint.ValueRangeConstraint(2, MAX)))
+)
+
+
+class TNEntry(univ.Choice):
+ pass
+
+TNEntry.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('spc',
+ ServiceProviderCode().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 0))),
+ namedtype.NamedType('range',
+ TelephoneNumberRange().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatConstructed, 1))),
+ namedtype.NamedType('one',
+ TelephoneNumber().subtype(explicitTag=tag.Tag(tag.tagClassContext,
+ tag.tagFormatSimple, 2)))
+)
+
+
+class TNAuthorizationList(univ.SequenceOf):
+ pass
+
+TNAuthorizationList.componentType = TNEntry()
+TNAuthorizationList.sizeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+id_pe_TNAuthList = _OID(1, 3, 6, 1, 5, 5, 7, 1, 26)
+
+
+id_ad_stirTNList = _OID(1, 3, 6, 1, 5, 5, 7, 48, 14)
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_TNAuthList: TNAuthorizationList(),
+ id_pe_JWTClaimConstraints: JWTClaimConstraints(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc8358.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8358.py
new file mode 100644
index 0000000000..647a366622
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8358.py
@@ -0,0 +1,50 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Digital Signatures on Internet-Draft Documents
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8358.txt
+#
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+
+id_ct = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1')
+
+id_ct_asciiTextWithCRLF = id_ct + (27, )
+
+id_ct_epub = id_ct + (39, )
+
+id_ct_htmlWithCRLF = id_ct + (38, )
+
+id_ct_pdf = id_ct + (29, )
+
+id_ct_postscript = id_ct + (30, )
+
+id_ct_utf8TextWithCRLF = id_ct + (37, )
+
+id_ct_xml = id_ct + (28, )
+
+
+# Map of Content Type OIDs to Content Types is added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_asciiTextWithCRLF: univ.OctetString(),
+ id_ct_epub: univ.OctetString(),
+ id_ct_htmlWithCRLF: univ.OctetString(),
+ id_ct_pdf: univ.OctetString(),
+ id_ct_postscript: univ.OctetString(),
+ id_ct_utf8TextWithCRLF: univ.OctetString(),
+ id_ct_xml: univ.OctetString(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc8360.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8360.py
new file mode 100644
index 0000000000..ca180c18d8
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8360.py
@@ -0,0 +1,44 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Resource Public Key Infrastructure (RPKI) Validation Reconsidered
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8360.txt
+# https://www.rfc-editor.org/errata/eid5870
+#
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc3779
+from pyasn1_modules import rfc5280
+
+
+# IP Address Delegation Extension V2
+
+id_pe_ipAddrBlocks_v2 = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.28')
+
+IPAddrBlocks = rfc3779.IPAddrBlocks
+
+
+# Autonomous System Identifier Delegation Extension V2
+
+id_pe_autonomousSysIds_v2 = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.29')
+
+ASIdentifiers = rfc3779.ASIdentifiers
+
+
+# Map of Certificate Extension OIDs to Extensions is added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_ipAddrBlocks_v2: IPAddrBlocks(),
+ id_pe_autonomousSysIds_v2: ASIdentifiers(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc8398.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8398.py
new file mode 100644
index 0000000000..151b632107
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8398.py
@@ -0,0 +1,52 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with some assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Internationalized Email Addresses in X.509 Certificates
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8398.txt
+# https://www.rfc-editor.org/errata/eid5418
+#
+
+from pyasn1.type import char
+from pyasn1.type import constraint
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+MAX = float('inf')
+
+
+# SmtpUTF8Mailbox contains Mailbox as specified in Section 3.3 of RFC 6531
+
+id_pkix = rfc5280.id_pkix
+
+id_on = id_pkix + (8, )
+
+id_on_SmtpUTF8Mailbox = id_on + (9, )
+
+
+class SmtpUTF8Mailbox(char.UTF8String):
+ pass
+
+SmtpUTF8Mailbox.subtypeSpec = constraint.ValueSizeConstraint(1, MAX)
+
+
+on_SmtpUTF8Mailbox = rfc5280.AnotherName()
+on_SmtpUTF8Mailbox['type-id'] = id_on_SmtpUTF8Mailbox
+on_SmtpUTF8Mailbox['value'] = SmtpUTF8Mailbox()
+
+
+# Map of Other Name OIDs to Other Name is added to the
+# ones that are in rfc5280.py
+
+_anotherNameMapUpdate = {
+ id_on_SmtpUTF8Mailbox: SmtpUTF8Mailbox(),
+}
+
+rfc5280.anotherNameMap.update(_anotherNameMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc8410.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8410.py
new file mode 100644
index 0000000000..98bc97bb14
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8410.py
@@ -0,0 +1,43 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Algorithm Identifiers for Ed25519, Ed448, X25519, and X448
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8410.txt
+
+from pyasn1.type import univ
+from pyasn1_modules import rfc3565
+from pyasn1_modules import rfc4055
+from pyasn1_modules import rfc5280
+
+
+class SignatureAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class KeyEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class CurvePrivateKey(univ.OctetString):
+ pass
+
+
+id_X25519 = univ.ObjectIdentifier('1.3.101.110')
+
+id_X448 = univ.ObjectIdentifier('1.3.101.111')
+
+id_Ed25519 = univ.ObjectIdentifier('1.3.101.112')
+
+id_Ed448 = univ.ObjectIdentifier('1.3.101.113')
+
+id_sha512 = rfc4055.id_sha512
+
+id_aes128_wrap = rfc3565.id_aes128_wrap
+
+id_aes256_wrap = rfc3565.id_aes256_wrap
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc8418.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8418.py
new file mode 100644
index 0000000000..6e76487c88
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8418.py
@@ -0,0 +1,36 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Elliptic Curve Diffie-Hellman (ECDH) Key Agreement Algorithm
+# with X25519 and X448
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8418.txt
+
+from pyasn1.type import univ
+from pyasn1_modules import rfc5280
+
+
+class KeyEncryptionAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+class KeyWrapAlgorithmIdentifier(rfc5280.AlgorithmIdentifier):
+ pass
+
+
+dhSinglePass_stdDH_sha256kdf_scheme = univ.ObjectIdentifier('1.3.133.16.840.63.0.11.1')
+
+dhSinglePass_stdDH_sha384kdf_scheme = univ.ObjectIdentifier('1.3.133.16.840.63.0.11.2')
+
+dhSinglePass_stdDH_sha512kdf_scheme = univ.ObjectIdentifier('1.3.133.16.840.63.0.11.3')
+
+dhSinglePass_stdDH_hkdf_sha256_scheme = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.19')
+
+dhSinglePass_stdDH_hkdf_sha384_scheme = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.20')
+
+dhSinglePass_stdDH_hkdf_sha512_scheme = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.21')
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc8419.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8419.py
new file mode 100644
index 0000000000..f10994be28
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8419.py
@@ -0,0 +1,68 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Edwards-Curve Digital Signature Algorithm (EdDSA) Signatures in the CMS
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8419.txt
+# https://www.rfc-editor.org/errata/eid5869
+
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+class ShakeOutputLen(univ.Integer):
+ pass
+
+
+id_Ed25519 = univ.ObjectIdentifier('1.3.101.112')
+
+sigAlg_Ed25519 = rfc5280.AlgorithmIdentifier()
+sigAlg_Ed25519['algorithm'] = id_Ed25519
+# sigAlg_Ed25519['parameters'] is absent
+
+
+id_Ed448 = univ.ObjectIdentifier('1.3.101.113')
+
+sigAlg_Ed448 = rfc5280.AlgorithmIdentifier()
+sigAlg_Ed448['algorithm'] = id_Ed448
+# sigAlg_Ed448['parameters'] is absent
+
+
+hashAlgs = univ.ObjectIdentifier('2.16.840.1.101.3.4.2')
+
+id_sha512 = hashAlgs + (3, )
+
+hashAlg_SHA_512 = rfc5280.AlgorithmIdentifier()
+hashAlg_SHA_512['algorithm'] = id_sha512
+# hashAlg_SHA_512['parameters'] is absent
+
+
+id_shake256 = hashAlgs + (12, )
+
+hashAlg_SHAKE256 = rfc5280.AlgorithmIdentifier()
+hashAlg_SHAKE256['algorithm'] = id_shake256
+# hashAlg_SHAKE256['parameters']is absent
+
+
+id_shake256_len = hashAlgs + (18, )
+
+hashAlg_SHAKE256_LEN = rfc5280.AlgorithmIdentifier()
+hashAlg_SHAKE256_LEN['algorithm'] = id_shake256_len
+hashAlg_SHAKE256_LEN['parameters'] = ShakeOutputLen()
+
+
+# Map of Algorithm Identifier OIDs to Parameters added to the
+# ones in rfc5280.py. Do not add OIDs with absent paramaters.
+
+_algorithmIdentifierMapUpdate = {
+ id_shake256_len: ShakeOutputLen(),
+}
+
+rfc5280.algorithmIdentifierMap.update(_algorithmIdentifierMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc8479.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8479.py
new file mode 100644
index 0000000000..57f78b62f2
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8479.py
@@ -0,0 +1,45 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Storing Validation Parameters in PKCS#8
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8479.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5652
+
+
+id_attr_validation_parameters = univ.ObjectIdentifier('1.3.6.1.4.1.2312.18.8.1')
+
+
+class ValidationParams(univ.Sequence):
+ pass
+
+ValidationParams.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlg', univ.ObjectIdentifier()),
+ namedtype.NamedType('seed', univ.OctetString())
+)
+
+
+at_validation_parameters = rfc5652.Attribute()
+at_validation_parameters['attrType'] = id_attr_validation_parameters
+at_validation_parameters['attrValues'][0] = ValidationParams()
+
+
+# Map of Attribute Type OIDs to Attributes added to the
+# ones that are in rfc5652.py
+
+_cmsAttributesMapUpdate = {
+ id_attr_validation_parameters: ValidationParams(),
+}
+
+rfc5652.cmsAttributesMap.update(_cmsAttributesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc8494.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8494.py
new file mode 100644
index 0000000000..fe349e14ca
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8494.py
@@ -0,0 +1,80 @@
+# This file is being contributed to pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Multicast Email (MULE) over Allied Communications Publication 142
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8494.txt
+
+from pyasn1.type import namedtype
+from pyasn1.type import namedval
+from pyasn1.type import tag
+from pyasn1.type import univ
+
+
+id_mmhs_CDT = univ.ObjectIdentifier('1.3.26.0.4406.0.4.2')
+
+
+class AlgorithmID_ShortForm(univ.Integer):
+ pass
+
+AlgorithmID_ShortForm.namedValues = namedval.NamedValues(
+ ('zlibCompress', 0)
+)
+
+
+class ContentType_ShortForm(univ.Integer):
+ pass
+
+ContentType_ShortForm.namedValues = namedval.NamedValues(
+ ('unidentified', 0),
+ ('external', 1),
+ ('p1', 2),
+ ('p3', 3),
+ ('p7', 4),
+ ('mule', 25)
+)
+
+
+class CompressedContentInfo(univ.Sequence):
+ pass
+
+CompressedContentInfo.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('unnamed', univ.Choice(componentType=namedtype.NamedTypes(
+ namedtype.NamedType('contentType-ShortForm',
+ ContentType_ShortForm().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('contentType-OID',
+ univ.ObjectIdentifier().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+ ))),
+ namedtype.NamedType('compressedContent',
+ univ.OctetString().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0)))
+)
+
+
+class CompressionAlgorithmIdentifier(univ.Choice):
+ pass
+
+CompressionAlgorithmIdentifier.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('algorithmID-ShortForm',
+ AlgorithmID_ShortForm().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 0))),
+ namedtype.NamedType('algorithmID-OID',
+ univ.ObjectIdentifier().subtype(explicitTag=tag.Tag(
+ tag.tagClassContext, tag.tagFormatSimple, 1)))
+)
+
+
+class CompressedData(univ.Sequence):
+ pass
+
+CompressedData.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('compressionAlgorithm', CompressionAlgorithmIdentifier()),
+ namedtype.NamedType('compressedContentInfo', CompressedContentInfo())
+)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc8520.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8520.py
new file mode 100644
index 0000000000..b9eb6e9377
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8520.py
@@ -0,0 +1,63 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+# Modified by Russ Housley to add maps for use with opentypes.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# X.509 Extensions for MUD URL and MUD Signer;
+# Object Identifier for CMS Content Type for a MUD file
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8520.txt
+#
+
+from pyasn1.type import char
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+from pyasn1_modules import rfc5652
+
+
+# X.509 Extension for MUD URL
+
+id_pe_mud_url = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.25')
+
+class MUDURLSyntax(char.IA5String):
+ pass
+
+
+# X.509 Extension for MUD Signer
+
+id_pe_mudsigner = univ.ObjectIdentifier('1.3.6.1.5.5.7.1.30')
+
+class MUDsignerSyntax(rfc5280.Name):
+ pass
+
+
+# Object Identifier for CMS Content Type for a MUD file
+
+id_ct_mudtype = univ.ObjectIdentifier('1.2.840.113549.1.9.16.1.41')
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_pe_mud_url: MUDURLSyntax(),
+ id_pe_mudsigner: MUDsignerSyntax(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
+
+
+# Map of Content Type OIDs to Content Types added to the
+# ones that are in rfc5652.py
+
+_cmsContentTypesMapUpdate = {
+ id_ct_mudtype: univ.OctetString(),
+}
+
+rfc5652.cmsContentTypesMap.update(_cmsContentTypesMapUpdate)
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc8619.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8619.py
new file mode 100644
index 0000000000..0aaa811bad
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8619.py
@@ -0,0 +1,45 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# Algorithm Identifiers for HKDF
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8619.txt
+#
+
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+# Object Identifiers
+
+id_alg_hkdf_with_sha256 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.28')
+
+
+id_alg_hkdf_with_sha384 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.29')
+
+
+id_alg_hkdf_with_sha512 = univ.ObjectIdentifier('1.2.840.113549.1.9.16.3.30')
+
+
+# Key Derivation Algorithm Identifiers
+
+kda_hkdf_with_sha256 = rfc5280.AlgorithmIdentifier()
+kda_hkdf_with_sha256['algorithm'] = id_alg_hkdf_with_sha256
+# kda_hkdf_with_sha256['parameters'] are absent
+
+
+kda_hkdf_with_sha384 = rfc5280.AlgorithmIdentifier()
+kda_hkdf_with_sha384['algorithm'] = id_alg_hkdf_with_sha384
+# kda_hkdf_with_sha384['parameters'] are absent
+
+
+kda_hkdf_with_sha512 = rfc5280.AlgorithmIdentifier()
+kda_hkdf_with_sha512['algorithm'] = id_alg_hkdf_with_sha512
+# kda_hkdf_with_sha512['parameters'] are absent
diff --git a/third_party/python/pyasn1_modules/pyasn1_modules/rfc8649.py b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8649.py
new file mode 100644
index 0000000000..c405f050e8
--- /dev/null
+++ b/third_party/python/pyasn1_modules/pyasn1_modules/rfc8649.py
@@ -0,0 +1,40 @@
+#
+# This file is part of pyasn1-modules software.
+#
+# Created by Russ Housley with assistance from asn1ate v.0.6.0.
+#
+# Copyright (c) 2019, Vigil Security, LLC
+# License: http://snmplabs.com/pyasn1/license.html
+#
+# X.509 Certificate Extension for Hash Of Root Key
+#
+# ASN.1 source from:
+# https://www.rfc-editor.org/rfc/rfc8649.txt
+#
+
+from pyasn1.type import namedtype
+from pyasn1.type import univ
+
+from pyasn1_modules import rfc5280
+
+
+id_ce_hashOfRootKey = univ.ObjectIdentifier('1.3.6.1.4.1.51483.2.1')
+
+
+class HashedRootKey(univ.Sequence):
+ pass
+
+HashedRootKey.componentType = namedtype.NamedTypes(
+ namedtype.NamedType('hashAlg', rfc5280.AlgorithmIdentifier()),
+ namedtype.NamedType('hashValue', univ.OctetString())
+)
+
+
+# Map of Certificate Extension OIDs to Extensions added to the
+# ones that are in rfc5280.py
+
+_certificateExtensionsMapUpdate = {
+ id_ce_hashOfRootKey: HashedRootKey(),
+}
+
+rfc5280.certificateExtensionsMap.update(_certificateExtensionsMapUpdate)
diff --git a/third_party/python/pylru/LICENSE.txt b/third_party/python/pylru/LICENSE.txt
new file mode 100644
index 0000000000..d159169d10
--- /dev/null
+++ b/third_party/python/pylru/LICENSE.txt
@@ -0,0 +1,339 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ <signature of Ty Coon>, 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/third_party/python/pylru/PKG-INFO b/third_party/python/pylru/PKG-INFO
new file mode 100644
index 0000000000..d0e146d616
--- /dev/null
+++ b/third_party/python/pylru/PKG-INFO
@@ -0,0 +1,263 @@
+Metadata-Version: 1.1
+Name: pylru
+Version: 1.0.9
+Summary: A least recently used (LRU) cache implementation
+Home-page: https://github.com/jlhutch/pylru
+Author: Jay Hutchinson
+Author-email: jlhutch+pylru@gmail.com
+License: UNKNOWN
+Description:
+
+ PyLRU
+ =====
+
+ A least recently used (LRU) cache for Python.
+
+ Introduction
+ ============
+
+ Pylru implements a true LRU cache along with several support classes. The cache is efficient and written in pure Python. It works with Python 2.6+ including the 3.x series. Basic operations (lookup, insert, delete) all run in a constant amount of time. Pylru provides a cache class with a simple dict interface. It also provides classes to wrap any object that has a dict interface with a cache. Both write-through and write-back semantics are supported. Pylru also provides classes to wrap functions in a similar way, including a function decorator.
+
+ You can install pylru or you can just copy the source file pylru.py and use it directly in your own project. The rest of this file explains what the pylru module provides and how to use it. If you want to know more examine pylru.py. The code is straightforward and well commented.
+
+ Usage
+ =====
+
+ lrucache
+ --------
+
+ An lrucache object has a dictionary like interface and can be used in the same way::
+
+ import pylru
+
+ size = 100 # Size of the cache. The maximum number of key/value
+ # pairs you want the cache to hold.
+
+ cache = pylru.lrucache(size)
+ # Create a cache object.
+
+ value = cache[key] # Lookup a value given its key.
+ cache[key] = value # Insert a key/value pair.
+ del cache[key] # Delete a value given its key.
+ #
+ # These three operations affect the order of the cache.
+ # Lookup and insert both move the key/value to the most
+ # recently used position. Delete (obviously) removes a
+ # key/value from whatever position it was in.
+
+ key in cache # Test for membership. Does not affect the cache order.
+
+ value = cache.peek(key)
+ # Lookup a value given its key. Does not affect the
+ # cache order.
+
+ cache.keys() # Return an iterator over the keys in the cache
+ cache.values() # Return an iterator over the values in the cache
+ cache.items() # Return an iterator over the (key, value) pairs in the
+ # cache.
+ #
+ # These calls have no effect on the cache order.
+ # lrucache is scan resistant when these calls are used.
+ # The iterators iterate over their respective elements
+ # in the order of most recently used to least recently
+ # used.
+ #
+ # WARNING - While these iterators do not affect the
+ # cache order the lookup, insert, and delete operations
+ # do. The result of changing the cache's order
+ # during iteration is undefined. If you really need to
+ # do something of the sort use list(cache.keys()), then
+ # loop over the list elements.
+
+ for key in cache: # Caches support __iter__ so you can use them directly
+ pass # in a for loop to loop over the keys just like
+ # cache.keys()
+
+ cache.size() # Returns the size of the cache
+ cache.size(x) # Changes the size of the cache. x MUST be greater than
+ # zero. Returns the new size x.
+
+ x = len(cache) # Returns the number of items stored in the cache.
+ # x will be less than or equal to cache.size()
+
+ cache.clear() # Remove all items from the cache.
+
+
+ Lrucache takes an optional callback function as a second argument. Since the cache has a fixed size, some operations (such as an insertion) may cause the least recently used key/value pair to be ejected. If the optional callback function is given it will be called when this occurs. For example::
+
+ import pylru
+
+ def callback(key, value):
+ print (key, value) # A dumb callback that just prints the key/value
+
+ size = 100
+ cache = pylru.lrucache(size, callback)
+
+ # Use the cache... When it gets full some pairs may be ejected due to
+ # the fixed cache size. But, not before the callback is called to let you
+ # know.
+
+ WriteThroughCacheManager
+ ------------------------
+
+ Often a cache is used to speed up access to some other high latency object. For example, imagine you have a backend storage object that reads/writes from/to a remote server. Let us call this object *store*. If store has a dictionary interface a cache manager class can be used to compose the store object and an lrucache. The manager object exposes a dictionary interface. The programmer can then interact with the manager object as if it were the store. The manager object takes care of communicating with the store and caching key/value pairs in the lrucache object.
+
+ Two different semantics are supported, write-through (WriteThroughCacheManager class) and write-back (WriteBackCacheManager class). With write-through, lookups from the store are cached for future lookups. Insertions and deletions are updated in the cache and written through to the store immediately. Write-back works the same way, but insertions are updated only in the cache. These "dirty" key/value pair will only be updated to the underlying store when they are ejected from the cache or when a sync is performed. The WriteBackCacheManager class is discussed more below.
+
+ The WriteThroughCacheManager class takes as arguments the store object you want to compose and the cache size. It then creates an LRU cache and automatically manages it::
+
+ import pylru
+
+ size = 100
+ cached = pylru.WriteThroughCacheManager(store, size)
+ # Or
+ cached = pylru.lruwrap(store, size)
+ # This is a factory function that does the same thing.
+
+ # Now the object *cached* can be used just like store, except caching is
+ # automatically handled.
+
+ value = cached[key] # Lookup a value given its key.
+ cached[key] = value # Insert a key/value pair.
+ del cached[key] # Delete a value given its key.
+
+ key in cache # Test for membership. Does not affect the cache order.
+
+ cached.keys() # Returns store.keys()
+ cached.values() # Returns store.values()
+ cached.items() # Returns store.items()
+ #
+ # These calls have no effect on the cache order.
+ # The iterators iterate over their respective elements
+ # in the order dictated by store.
+
+ for key in cached: # Same as store.keys()
+
+ cached.size() # Returns the size of the cache
+ cached.size(x) # Changes the size of the cache. x MUST be greater than
+ # zero. Returns the new size x.
+
+ x = len(cached) # Returns the number of items stored in the store.
+
+ cached.clear() # Remove all items from the store and cache.
+
+
+ WriteBackCacheManager
+ ---------------------
+
+ Similar to the WriteThroughCacheManager class except write-back semantics are used to manage the cache. The programmer is responsible for one more thing as well. They MUST call sync() when they are finished. This ensures that the last of the "dirty" entries in the cache are written back. This is not too bad as WriteBackCacheManager objects can be used in with statements. More about that below::
+
+
+ import pylru
+
+ size = 100
+ cached = pylru.WriteBackCacheManager(store, size)
+ # Or
+ cached = pylru.lruwrap(store, size, True)
+ # This is a factory function that does the same thing.
+
+ value = cached[key] # Lookup a value given its key.
+ cached[key] = value # Insert a key/value pair.
+ del cached[key] # Delete a value given its key.
+
+ key in cache # Test for membership. Does not affect the cache order.
+
+
+ cached.keys() # Return an iterator over the keys in the cache/store
+ cached.values() # Return an iterator over the values in the cache/store
+ cached.items() # Return an iterator over the (key, value) pairs in the
+ # cache/store.
+ #
+ # The iterators iterate over a consistent view of the
+ # respective elements. That is, except for the order,
+ # the elements are the same as those returned if you
+ # first called sync() then called
+ # store.keys()[ or values() or items()]
+ #
+ # These calls have no effect on the cache order.
+ # The iterators iterate over their respective elements
+ # in arbitrary order.
+ #
+ # WARNING - While these iterators do not effect the
+ # cache order the lookup, insert, and delete operations
+ # do. The results of changing the cache's order
+ # during iteration is undefined. If you really need to
+ # do something of the sort use list(cached.keys()),
+ # then loop over the list elements.
+
+ for key in cached: # Same as cached.keys()
+
+ cached.size() # Returns the size of the cache
+ cached.size(x) # Changes the size of the cache. x MUST be greater than
+ # zero. Returns the new size x.
+
+ cached.clear() # Remove all items from the store and cache.
+
+ cached.sync() # Make the store and cache consistent. Write all
+ # cached changes to the store that have not been
+ # yet.
+
+ cached.flush() # Calls sync() then clears the cache.
+
+
+ To help the programmer ensure that the final sync() is called, WriteBackCacheManager objects can be used in a with statement::
+
+ with pylru.WriteBackCacheManager(store, size) as cached:
+ # Use cached just like you would store. sync() is called automatically
+ # for you when leaving the with statement block.
+
+
+ FunctionCacheManager
+ ---------------------
+
+ FunctionCacheManager allows you to compose a function with an lrucache. The resulting object can be called just like the original function, but the results are cached to speed up future calls. The fuction must have arguments that are hashable::
+
+ import pylru
+
+ def square(x):
+ return x * x
+
+ size = 100
+ cached = pylru.FunctionCacheManager(square, size)
+
+ y = cached(7)
+
+ # The results of cached are the same as square, but automatically cached
+ # to speed up future calls.
+
+ cached.size() # Returns the size of the cache
+ cached.size(x) # Changes the size of the cache. x MUST be greater than
+ # zero. Returns the new size x.
+
+ cached.clear() # Remove all items from the cache.
+
+
+
+ lrudecorator
+ ------------
+
+ PyLRU also provides a function decorator. This is basically the same functionality as FunctionCacheManager, but in the form of a decorator::
+
+ from pylru import lrudecorator
+
+ @lrudecorator(100)
+ def square(x):
+ return x * x
+
+ # The results of the square function are cached to speed up future calls.
+
+ square.size() # Returns the size of the cache
+ square.size(x) # Changes the size of the cache. x MUST be greater than
+ # zero. Returns the new size x.
+
+ square.clear() # Remove all items from the cache.
+
+Platform: UNKNOWN
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: GNU General Public License (GPL)
+Classifier: Operating System :: OS Independent
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
diff --git a/third_party/python/pylru/README.txt b/third_party/python/pylru/README.txt
new file mode 100644
index 0000000000..f37c9ccdca
--- /dev/null
+++ b/third_party/python/pylru/README.txt
@@ -0,0 +1,245 @@
+
+
+PyLRU
+=====
+
+A least recently used (LRU) cache for Python.
+
+Introduction
+============
+
+Pylru implements a true LRU cache along with several support classes. The cache is efficient and written in pure Python. It works with Python 2.6+ including the 3.x series. Basic operations (lookup, insert, delete) all run in a constant amount of time. Pylru provides a cache class with a simple dict interface. It also provides classes to wrap any object that has a dict interface with a cache. Both write-through and write-back semantics are supported. Pylru also provides classes to wrap functions in a similar way, including a function decorator.
+
+You can install pylru or you can just copy the source file pylru.py and use it directly in your own project. The rest of this file explains what the pylru module provides and how to use it. If you want to know more examine pylru.py. The code is straightforward and well commented.
+
+Usage
+=====
+
+lrucache
+--------
+
+An lrucache object has a dictionary like interface and can be used in the same way::
+
+ import pylru
+
+ size = 100 # Size of the cache. The maximum number of key/value
+ # pairs you want the cache to hold.
+
+ cache = pylru.lrucache(size)
+ # Create a cache object.
+
+ value = cache[key] # Lookup a value given its key.
+ cache[key] = value # Insert a key/value pair.
+ del cache[key] # Delete a value given its key.
+ #
+ # These three operations affect the order of the cache.
+ # Lookup and insert both move the key/value to the most
+ # recently used position. Delete (obviously) removes a
+ # key/value from whatever position it was in.
+
+ key in cache # Test for membership. Does not affect the cache order.
+
+ value = cache.peek(key)
+ # Lookup a value given its key. Does not affect the
+ # cache order.
+
+ cache.keys() # Return an iterator over the keys in the cache
+ cache.values() # Return an iterator over the values in the cache
+ cache.items() # Return an iterator over the (key, value) pairs in the
+ # cache.
+ #
+ # These calls have no effect on the cache order.
+ # lrucache is scan resistant when these calls are used.
+ # The iterators iterate over their respective elements
+ # in the order of most recently used to least recently
+ # used.
+ #
+ # WARNING - While these iterators do not affect the
+ # cache order the lookup, insert, and delete operations
+ # do. The result of changing the cache's order
+ # during iteration is undefined. If you really need to
+ # do something of the sort use list(cache.keys()), then
+ # loop over the list elements.
+
+ for key in cache: # Caches support __iter__ so you can use them directly
+ pass # in a for loop to loop over the keys just like
+ # cache.keys()
+
+ cache.size() # Returns the size of the cache
+ cache.size(x) # Changes the size of the cache. x MUST be greater than
+ # zero. Returns the new size x.
+
+ x = len(cache) # Returns the number of items stored in the cache.
+ # x will be less than or equal to cache.size()
+
+ cache.clear() # Remove all items from the cache.
+
+
+Lrucache takes an optional callback function as a second argument. Since the cache has a fixed size, some operations (such as an insertion) may cause the least recently used key/value pair to be ejected. If the optional callback function is given it will be called when this occurs. For example::
+
+ import pylru
+
+ def callback(key, value):
+ print (key, value) # A dumb callback that just prints the key/value
+
+ size = 100
+ cache = pylru.lrucache(size, callback)
+
+ # Use the cache... When it gets full some pairs may be ejected due to
+ # the fixed cache size. But, not before the callback is called to let you
+ # know.
+
+WriteThroughCacheManager
+------------------------
+
+Often a cache is used to speed up access to some other high latency object. For example, imagine you have a backend storage object that reads/writes from/to a remote server. Let us call this object *store*. If store has a dictionary interface a cache manager class can be used to compose the store object and an lrucache. The manager object exposes a dictionary interface. The programmer can then interact with the manager object as if it were the store. The manager object takes care of communicating with the store and caching key/value pairs in the lrucache object.
+
+Two different semantics are supported, write-through (WriteThroughCacheManager class) and write-back (WriteBackCacheManager class). With write-through, lookups from the store are cached for future lookups. Insertions and deletions are updated in the cache and written through to the store immediately. Write-back works the same way, but insertions are updated only in the cache. These "dirty" key/value pair will only be updated to the underlying store when they are ejected from the cache or when a sync is performed. The WriteBackCacheManager class is discussed more below.
+
+The WriteThroughCacheManager class takes as arguments the store object you want to compose and the cache size. It then creates an LRU cache and automatically manages it::
+
+ import pylru
+
+ size = 100
+ cached = pylru.WriteThroughCacheManager(store, size)
+ # Or
+ cached = pylru.lruwrap(store, size)
+ # This is a factory function that does the same thing.
+
+ # Now the object *cached* can be used just like store, except caching is
+ # automatically handled.
+
+ value = cached[key] # Lookup a value given its key.
+ cached[key] = value # Insert a key/value pair.
+ del cached[key] # Delete a value given its key.
+
+ key in cache # Test for membership. Does not affect the cache order.
+
+ cached.keys() # Returns store.keys()
+ cached.values() # Returns store.values()
+ cached.items() # Returns store.items()
+ #
+ # These calls have no effect on the cache order.
+ # The iterators iterate over their respective elements
+ # in the order dictated by store.
+
+ for key in cached: # Same as store.keys()
+
+ cached.size() # Returns the size of the cache
+ cached.size(x) # Changes the size of the cache. x MUST be greater than
+ # zero. Returns the new size x.
+
+ x = len(cached) # Returns the number of items stored in the store.
+
+ cached.clear() # Remove all items from the store and cache.
+
+
+WriteBackCacheManager
+---------------------
+
+Similar to the WriteThroughCacheManager class except write-back semantics are used to manage the cache. The programmer is responsible for one more thing as well. They MUST call sync() when they are finished. This ensures that the last of the "dirty" entries in the cache are written back. This is not too bad as WriteBackCacheManager objects can be used in with statements. More about that below::
+
+
+ import pylru
+
+ size = 100
+ cached = pylru.WriteBackCacheManager(store, size)
+ # Or
+ cached = pylru.lruwrap(store, size, True)
+ # This is a factory function that does the same thing.
+
+ value = cached[key] # Lookup a value given its key.
+ cached[key] = value # Insert a key/value pair.
+ del cached[key] # Delete a value given its key.
+
+ key in cache # Test for membership. Does not affect the cache order.
+
+
+ cached.keys() # Return an iterator over the keys in the cache/store
+ cached.values() # Return an iterator over the values in the cache/store
+ cached.items() # Return an iterator over the (key, value) pairs in the
+ # cache/store.
+ #
+ # The iterators iterate over a consistent view of the
+ # respective elements. That is, except for the order,
+ # the elements are the same as those returned if you
+ # first called sync() then called
+ # store.keys()[ or values() or items()]
+ #
+ # These calls have no effect on the cache order.
+ # The iterators iterate over their respective elements
+ # in arbitrary order.
+ #
+ # WARNING - While these iterators do not effect the
+ # cache order the lookup, insert, and delete operations
+ # do. The results of changing the cache's order
+ # during iteration is undefined. If you really need to
+ # do something of the sort use list(cached.keys()),
+ # then loop over the list elements.
+
+ for key in cached: # Same as cached.keys()
+
+ cached.size() # Returns the size of the cache
+ cached.size(x) # Changes the size of the cache. x MUST be greater than
+ # zero. Returns the new size x.
+
+ cached.clear() # Remove all items from the store and cache.
+
+ cached.sync() # Make the store and cache consistent. Write all
+ # cached changes to the store that have not been
+ # yet.
+
+ cached.flush() # Calls sync() then clears the cache.
+
+
+To help the programmer ensure that the final sync() is called, WriteBackCacheManager objects can be used in a with statement::
+
+ with pylru.WriteBackCacheManager(store, size) as cached:
+ # Use cached just like you would store. sync() is called automatically
+ # for you when leaving the with statement block.
+
+
+FunctionCacheManager
+---------------------
+
+FunctionCacheManager allows you to compose a function with an lrucache. The resulting object can be called just like the original function, but the results are cached to speed up future calls. The fuction must have arguments that are hashable::
+
+ import pylru
+
+ def square(x):
+ return x * x
+
+ size = 100
+ cached = pylru.FunctionCacheManager(square, size)
+
+ y = cached(7)
+
+ # The results of cached are the same as square, but automatically cached
+ # to speed up future calls.
+
+ cached.size() # Returns the size of the cache
+ cached.size(x) # Changes the size of the cache. x MUST be greater than
+ # zero. Returns the new size x.
+
+ cached.clear() # Remove all items from the cache.
+
+
+
+lrudecorator
+------------
+
+PyLRU also provides a function decorator. This is basically the same functionality as FunctionCacheManager, but in the form of a decorator::
+
+ from pylru import lrudecorator
+
+ @lrudecorator(100)
+ def square(x):
+ return x * x
+
+ # The results of the square function are cached to speed up future calls.
+
+ square.size() # Returns the size of the cache
+ square.size(x) # Changes the size of the cache. x MUST be greater than
+ # zero. Returns the new size x.
+
+ square.clear() # Remove all items from the cache.
diff --git a/third_party/python/pylru/pylru.py b/third_party/python/pylru/pylru.py
new file mode 100644
index 0000000000..e69cadb76c
--- /dev/null
+++ b/third_party/python/pylru/pylru.py
@@ -0,0 +1,556 @@
+
+# Cache implementaion with a Least Recently Used (LRU) replacement policy and
+# a basic dictionary interface.
+
+# Copyright (C) 2006, 2009, 2010, 2011 Jay Hutchinson
+
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc., 51
+# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+
+# The cache is implemented using a combination of a python dictionary (hash
+# table) and a circular doubly linked list. Items in the cache are stored in
+# nodes. These nodes make up the linked list. The list is used to efficiently
+# maintain the order that the items have been used in. The front or head of
+# the list contains the most recently used item, the tail of the list
+# contains the least recently used item. When an item is used it can easily
+# (in a constant amount of time) be moved to the front of the list, thus
+# updating its position in the ordering. These nodes are also placed in the
+# hash table under their associated key. The hash table allows efficient
+# lookup of values by key.
+
+# Class for the node objects.
+class _dlnode(object):
+ def __init__(self):
+ self.empty = True
+
+
+class lrucache(object):
+
+ def __init__(self, size, callback=None):
+
+ self.callback = callback
+
+ # Create an empty hash table.
+ self.table = {}
+
+ # Initialize the doubly linked list with one empty node. This is an
+ # invariant. The cache size must always be greater than zero. Each
+ # node has a 'prev' and 'next' variable to hold the node that comes
+ # before it and after it respectively. Initially the two variables
+ # each point to the head node itself, creating a circular doubly
+ # linked list of size one. Then the size() method is used to adjust
+ # the list to the desired size.
+
+ self.head = _dlnode()
+ self.head.next = self.head
+ self.head.prev = self.head
+
+ self.listSize = 1
+
+ # Adjust the size
+ self.size(size)
+
+
+ def __len__(self):
+ return len(self.table)
+
+ def clear(self):
+ for node in self.dli():
+ node.empty = True
+ node.key = None
+ node.value = None
+
+ self.table.clear()
+
+
+ def __contains__(self, key):
+ return key in self.table
+
+ # Looks up a value in the cache without affecting cache order.
+ def peek(self, key):
+ # Look up the node
+ node = self.table[key]
+ return node.value
+
+
+ def __getitem__(self, key):
+ # Look up the node
+ node = self.table[key]
+
+ # Update the list ordering. Move this node so that is directly
+ # proceeds the head node. Then set the 'head' variable to it. This
+ # makes it the new head of the list.
+ self.mtf(node)
+ self.head = node
+
+ # Return the value.
+ return node.value
+
+ def get(self, key, default=None):
+ """Get an item - return default (None) if not present"""
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def __setitem__(self, key, value):
+ # First, see if any value is stored under 'key' in the cache already.
+ # If so we are going to replace that value with the new one.
+ if key in self.table:
+
+ # Lookup the node
+ node = self.table[key]
+
+ # Replace the value.
+ node.value = value
+
+ # Update the list ordering.
+ self.mtf(node)
+ self.head = node
+
+ return
+
+ # Ok, no value is currently stored under 'key' in the cache. We need
+ # to choose a node to place the new item in. There are two cases. If
+ # the cache is full some item will have to be pushed out of the
+ # cache. We want to choose the node with the least recently used
+ # item. This is the node at the tail of the list. If the cache is not
+ # full we want to choose a node that is empty. Because of the way the
+ # list is managed, the empty nodes are always together at the tail
+ # end of the list. Thus, in either case, by chooseing the node at the
+ # tail of the list our conditions are satisfied.
+
+ # Since the list is circular, the tail node directly preceeds the
+ # 'head' node.
+ node = self.head.prev
+
+ # If the node already contains something we need to remove the old
+ # key from the dictionary.
+ if not node.empty:
+ if self.callback is not None:
+ self.callback(node.key, node.value)
+ del self.table[node.key]
+
+ # Place the new key and value in the node
+ node.empty = False
+ node.key = key
+ node.value = value
+
+ # Add the node to the dictionary under the new key.
+ self.table[key] = node
+
+ # We need to move the node to the head of the list. The node is the
+ # tail node, so it directly preceeds the head node due to the list
+ # being circular. Therefore, the ordering is already correct, we just
+ # need to adjust the 'head' variable.
+ self.head = node
+
+
+ def __delitem__(self, key):
+
+ # Lookup the node, then remove it from the hash table.
+ node = self.table[key]
+ del self.table[key]
+
+ node.empty = True
+
+ # Not strictly necessary.
+ node.key = None
+ node.value = None
+
+ # Because this node is now empty we want to reuse it before any
+ # non-empty node. To do that we want to move it to the tail of the
+ # list. We move it so that it directly preceeds the 'head' node. This
+ # makes it the tail node. The 'head' is then adjusted. This
+ # adjustment ensures correctness even for the case where the 'node'
+ # is the 'head' node.
+ self.mtf(node)
+ self.head = node.next
+
+ def __iter__(self):
+
+ # Return an iterator that returns the keys in the cache in order from
+ # the most recently to least recently used. Does not modify the cache
+ # order.
+ for node in self.dli():
+ yield node.key
+
+ def items(self):
+
+ # Return an iterator that returns the (key, value) pairs in the cache
+ # in order from the most recently to least recently used. Does not
+ # modify the cache order.
+ for node in self.dli():
+ yield (node.key, node.value)
+
+ def keys(self):
+
+ # Return an iterator that returns the keys in the cache in order from
+ # the most recently to least recently used. Does not modify the cache
+ # order.
+ for node in self.dli():
+ yield node.key
+
+ def values(self):
+
+ # Return an iterator that returns the values in the cache in order
+ # from the most recently to least recently used. Does not modify the
+ # cache order.
+ for node in self.dli():
+ yield node.value
+
+ def size(self, size=None):
+
+ if size is not None:
+ assert size > 0
+ if size > self.listSize:
+ self.addTailNode(size - self.listSize)
+ elif size < self.listSize:
+ self.removeTailNode(self.listSize - size)
+
+ return self.listSize
+
+ # Increases the size of the cache by inserting n empty nodes at the tail
+ # of the list.
+ def addTailNode(self, n):
+ for i in range(n):
+ node = _dlnode()
+ node.next = self.head
+ node.prev = self.head.prev
+
+ self.head.prev.next = node
+ self.head.prev = node
+
+ self.listSize += n
+
+ # Decreases the size of the list by removing n nodes from the tail of the
+ # list.
+ def removeTailNode(self, n):
+ assert self.listSize > n
+ for i in range(n):
+ node = self.head.prev
+ if not node.empty:
+ if self.callback is not None:
+ self.callback(node.key, node.value)
+ del self.table[node.key]
+
+ # Splice the tail node out of the list
+ self.head.prev = node.prev
+ node.prev.next = self.head
+
+ # The next four lines are not strictly necessary.
+ node.prev = None
+ node.next = None
+
+ node.key = None
+ node.value = None
+
+ self.listSize -= n
+
+
+ # This method adjusts the ordering of the doubly linked list so that
+ # 'node' directly precedes the 'head' node. Because of the order of
+ # operations, if 'node' already directly precedes the 'head' node or if
+ # 'node' is the 'head' node the order of the list will be unchanged.
+ def mtf(self, node):
+ node.prev.next = node.next
+ node.next.prev = node.prev
+
+ node.prev = self.head.prev
+ node.next = self.head.prev.next
+
+ node.next.prev = node
+ node.prev.next = node
+
+ # This method returns an iterator that iterates over the non-empty nodes
+ # in the doubly linked list in order from the most recently to the least
+ # recently used.
+ def dli(self):
+ node = self.head
+ for i in range(len(self.table)):
+ yield node
+ node = node.next
+
+
+
+
+class WriteThroughCacheManager(object):
+ def __init__(self, store, size):
+ self.store = store
+ self.cache = lrucache(size)
+
+ def __len__(self):
+ return len(self.store)
+
+ # Returns/sets the size of the managed cache.
+ def size(self, size=None):
+ return self.cache.size(size)
+
+ def clear(self):
+ self.cache.clear()
+ self.store.clear()
+
+ def __contains__(self, key):
+ # Check the cache first. If it is there we can return quickly.
+ if key in self.cache:
+ return True
+
+ # Not in the cache. Might be in the underlying store.
+ if key in self.store:
+ return True
+
+ return False
+
+ def __getitem__(self, key):
+ # First we try the cache. If successful we just return the value. If
+ # not we catch KeyError and ignore it since that just means the key
+ # was not in the cache.
+ try:
+ return self.cache[key]
+ except KeyError:
+ pass
+
+ # It wasn't in the cache. Look it up in the store, add the entry to
+ # the cache, and return the value.
+ value = self.store[key]
+ self.cache[key] = value
+ return value
+
+ def get(self, key, default=None):
+ """Get an item - return default (None) if not present"""
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def __setitem__(self, key, value):
+ # Add the key/value pair to the cache and store.
+ self.cache[key] = value
+ self.store[key] = value
+
+ def __delitem__(self, key):
+ # Write-through behavior cache and store should be consistent. Delete
+ # it from the store.
+ del self.store[key]
+ try:
+ # Ok, delete from the store was successful. It might also be in
+ # the cache, try and delete it. If not we catch the KeyError and
+ # ignore it.
+ del self.cache[key]
+ except KeyError:
+ pass
+
+ def __iter__(self):
+ return self.keys()
+
+ def keys(self):
+ return self.store.keys()
+
+ def values(self):
+ return self.store.values()
+
+ def items(self):
+ return self.store.items()
+
+
+
+class WriteBackCacheManager(object):
+ def __init__(self, store, size):
+ self.store = store
+
+ # Create a set to hold the dirty keys.
+ self.dirty = set()
+
+ # Define a callback function to be called by the cache when a
+ # key/value pair is about to be ejected. This callback will check to
+ # see if the key is in the dirty set. If so, then it will update the
+ # store object and remove the key from the dirty set.
+ def callback(key, value):
+ if key in self.dirty:
+ self.store[key] = value
+ self.dirty.remove(key)
+
+ # Create a cache and give it the callback function.
+ self.cache = lrucache(size, callback)
+
+ # Returns/sets the size of the managed cache.
+ def size(self, size=None):
+ return self.cache.size(size)
+
+ def clear(self):
+ self.cache.clear()
+ self.dirty.clear()
+ self.store.clear()
+
+ def __contains__(self, key):
+ # Check the cache first, since if it is there we can return quickly.
+ if key in self.cache:
+ return True
+
+ # Not in the cache. Might be in the underlying store.
+ if key in self.store:
+ return True
+
+ return False
+
+ def __getitem__(self, key):
+ # First we try the cache. If successful we just return the value. If
+ # not we catch KeyError and ignore it since that just means the key
+ # was not in the cache.
+ try:
+ return self.cache[key]
+ except KeyError:
+ pass
+
+ # It wasn't in the cache. Look it up in the store, add the entry to
+ # the cache, and return the value.
+ value = self.store[key]
+ self.cache[key] = value
+ return value
+
+ def get(self, key, default=None):
+ """Get an item - return default (None) if not present"""
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+ def __setitem__(self, key, value):
+ # Add the key/value pair to the cache.
+ self.cache[key] = value
+ self.dirty.add(key)
+
+ def __delitem__(self, key):
+
+ found = False
+ try:
+ del self.cache[key]
+ found = True
+ self.dirty.remove(key)
+ except KeyError:
+ pass
+
+ try:
+ del self.store[key]
+ found = True
+ except KeyError:
+ pass
+
+ if not found: # If not found in cache or store, raise error.
+ raise KeyError
+
+
+ def __iter__(self):
+ return self.keys()
+
+ def keys(self):
+ for key in self.store.keys():
+ if key not in self.dirty:
+ yield key
+
+ for key in self.dirty:
+ yield key
+
+
+ def values(self):
+ for key, value in self.items():
+ yield value
+
+
+ def items(self):
+ for key, value in self.store.items():
+ if key not in self.dirty:
+ yield (key, value)
+
+ for key in self.dirty:
+ value = self.cache.peek(key)
+ yield (key, value)
+
+
+
+ def sync(self):
+ # For each dirty key, peek at its value in the cache and update the
+ # store. Doesn't change the cache's order.
+ for key in self.dirty:
+ self.store[key] = self.cache.peek(key)
+ # There are no dirty keys now.
+ self.dirty.clear()
+
+ def flush(self):
+ self.sync()
+ self.cache.clear()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.sync()
+ return False
+
+
+class FunctionCacheManager(object):
+ def __init__(self, func, size):
+ self.func = func
+ self.cache = lrucache(size)
+
+ def size(self, size=None):
+ return self.cache.size(size)
+
+ def clear(self):
+ self.cache.clear()
+
+ def __call__(self, *args, **kwargs):
+ kwtuple = tuple((key, kwargs[key]) for key in sorted(kwargs.keys()))
+ key = (args, kwtuple)
+ try:
+ return self.cache[key]
+ except KeyError:
+ pass
+
+ value = self.func(*args, **kwargs)
+ self.cache[key] = value
+ return value
+
+
+def lruwrap(store, size, writeback=False):
+ if writeback:
+ return WriteBackCacheManager(store, size)
+ else:
+ return WriteThroughCacheManager(store, size)
+
+import functools
+
+class lrudecorator(object):
+ def __init__(self, size):
+ self.cache = lrucache(size)
+
+ def __call__(self, func):
+ def wrapper(*args, **kwargs):
+ kwtuple = tuple((key, kwargs[key]) for key in sorted(kwargs.keys()))
+ key = (args, kwtuple)
+ try:
+ return self.cache[key]
+ except KeyError:
+ pass
+
+ value = func(*args, **kwargs)
+ self.cache[key] = value
+ return value
+
+ wrapper.cache = self.cache
+ wrapper.size = self.cache.size
+ wrapper.clear = self.cache.clear
+ return functools.update_wrapper(wrapper, func)
diff --git a/third_party/python/pylru/setup.py b/third_party/python/pylru/setup.py
new file mode 100644
index 0000000000..66d441ca94
--- /dev/null
+++ b/third_party/python/pylru/setup.py
@@ -0,0 +1,23 @@
+from distutils.core import setup
+
+setup(
+ name = "pylru",
+ version = "1.0.9",
+ py_modules=['pylru'],
+ description = "A least recently used (LRU) cache implementation",
+ author = "Jay Hutchinson",
+ author_email = "jlhutch+pylru@gmail.com",
+ url = "https://github.com/jlhutch/pylru",
+ classifiers = [
+ "Programming Language :: Python :: 2.6",
+ "Programming Language :: Python :: 2.7",
+ "Programming Language :: Python :: 3",
+ "Development Status :: 5 - Production/Stable",
+ "Intended Audience :: Developers",
+ "License :: OSI Approved :: GNU General Public License (GPL)",
+ "Operating System :: OS Independent",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ ],
+ long_description=open('README.txt').read())
+
+
diff --git a/third_party/python/pylru/test.py b/third_party/python/pylru/test.py
new file mode 100644
index 0000000000..7a4842fb52
--- /dev/null
+++ b/third_party/python/pylru/test.py
@@ -0,0 +1,238 @@
+
+from pylru import *
+import random
+
+# This tests PyLRU by fuzzing it with random operations, then checking the
+# results against another, simpler, LRU cache implementation.
+
+class simplelrucache:
+
+ def __init__(self, size):
+
+ # Initialize the cache as empty.
+ self.cache = []
+ self.size = size
+
+ def __contains__(self, key):
+
+ for x in self.cache:
+ if x[0] == key:
+ return True
+
+ return False
+
+
+ def __getitem__(self, key):
+
+ for i in range(len(self.cache)):
+ x = self.cache[i]
+ if x[0] == key:
+ del self.cache[i]
+ self.cache.append(x)
+ return x[1]
+
+ raise KeyError
+
+
+ def __setitem__(self, key, value):
+
+ for i in range(len(self.cache)):
+ x = self.cache[i]
+ if x[0] == key:
+ x[1] = value
+ del self.cache[i]
+ self.cache.append(x)
+ return
+
+ if len(self.cache) == self.size:
+ self.cache = self.cache[1:]
+
+ self.cache.append([key, value])
+
+
+ def __delitem__(self, key):
+
+ for i in range(len(self.cache)):
+ if self.cache[i][0] == key:
+ del self.cache[i]
+ return
+
+ raise KeyError
+
+ def resize(self, x=None):
+ assert x > 0
+ self.size = x
+ if x < len(self.cache):
+ del self.cache[:len(self.cache) - x]
+
+
+def test(a, b, c, d, verify):
+
+ for i in range(1000):
+ x = random.randint(0, 512)
+ y = random.randint(0, 512)
+
+ a[x] = y
+ b[x] = y
+ verify(c, d)
+
+ for i in range(1000):
+ x = random.randint(0, 512)
+ if x in a:
+ assert x in b
+ z = a[x]
+ z += b[x]
+ else:
+ assert x not in b
+ verify(c, d)
+
+ for i in range(256):
+ x = random.randint(0, 512)
+ if x in a:
+ assert x in b
+ del a[x]
+ del b[x]
+ else:
+ assert x not in b
+ verify(c, d)
+
+
+def testcache():
+ def verify(a, b):
+ q = []
+ z = a.head
+ for j in range(len(a.table)):
+ q.append([z.key, z.value])
+ z = z.next
+
+ assert q == b.cache[::-1]
+
+ q2 = []
+ for x, y in q:
+ q2.append((x, y))
+
+ assert list(a.items()) == q2
+ assert list(zip(a.keys(), a.values())) == q2
+ assert list(a.keys()) == list(a)
+
+
+ a = lrucache(128)
+ b = simplelrucache(128)
+ verify(a, b)
+ test(a, b, a, b, verify)
+
+ a.size(71)
+ b.resize(71)
+ verify(a, b)
+ test(a, b, a, b, verify)
+
+ a.size(341)
+ b.resize(341)
+ verify(a, b)
+ test(a, b, a, b, verify)
+
+ a.size(127)
+ b.resize(127)
+ verify(a, b)
+ test(a, b, a, b, verify)
+
+
+def wraptest():
+
+ def verify(p, x):
+ assert p == x.store
+ for key, value in x.cache.items():
+ assert x.store[key] == value
+
+ tmp = list(x.items())
+ tmp.sort()
+
+ tmp2 = list(p.items())
+ tmp2.sort()
+
+ assert tmp == tmp2
+
+ p = dict()
+ q = dict()
+ x = lruwrap(q, 128)
+
+ test(p, x, p, x, verify)
+
+
+
+def wraptest2():
+
+ def verify(p, x):
+ for key, value in x.store.items():
+ if key not in x.dirty:
+ assert p[key] == value
+
+ for key in x.dirty:
+ assert x.cache.peek(key) == p[key]
+
+ for key, value in x.cache.items():
+ if key not in x.dirty:
+ assert x.store[key] == p[key] == value
+
+ tmp = list(x.items())
+ tmp.sort()
+
+ tmp2 = list(p.items())
+ tmp2.sort()
+
+ assert tmp == tmp2
+
+ p = dict()
+ q = dict()
+ x = lruwrap(q, 128, True)
+
+ test(p, x, p, x, verify)
+
+ x.sync()
+ assert p == q
+
+def wraptest3():
+
+ def verify(p, x):
+ for key, value in x.store.items():
+ if key not in x.dirty:
+ assert p[key] == value
+
+ for key in x.dirty:
+ assert x.cache.peek(key) == p[key]
+
+ for key, value in x.cache.items():
+ if key not in x.dirty:
+ assert x.store[key] == p[key] == value
+
+ p = dict()
+ q = dict()
+ with lruwrap(q, 128, True) as x:
+ test(p, x, p, x, verify)
+
+ assert p == q
+
+
+@lrudecorator(100)
+def square(x):
+ return x*x
+
+def testDecorator():
+ for i in range(1000):
+ x = random.randint(0, 200)
+ assert square(x) == x*x
+
+
+if __name__ == '__main__':
+
+ random.seed()
+
+
+ for i in range(20):
+ testcache()
+ wraptest()
+ wraptest2()
+ wraptest3()
+ testDecorator()
+
+
diff --git a/third_party/python/pyparsing/pyparsing-2.4.7.dist-info/LICENSE b/third_party/python/pyparsing/pyparsing-2.4.7.dist-info/LICENSE
new file mode 100644
index 0000000000..1bf98523e3
--- /dev/null
+++ b/third_party/python/pyparsing/pyparsing-2.4.7.dist-info/LICENSE
@@ -0,0 +1,18 @@
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/third_party/python/pyparsing/pyparsing-2.4.7.dist-info/METADATA b/third_party/python/pyparsing/pyparsing-2.4.7.dist-info/METADATA
new file mode 100644
index 0000000000..2206ad94ed
--- /dev/null
+++ b/third_party/python/pyparsing/pyparsing-2.4.7.dist-info/METADATA
@@ -0,0 +1,104 @@
+Metadata-Version: 2.1
+Name: pyparsing
+Version: 2.4.7
+Summary: Python parsing module
+Home-page: https://github.com/pyparsing/pyparsing/
+Author: Paul McGuire
+Author-email: ptmcg@users.sourceforge.net
+License: MIT License
+Download-URL: https://pypi.org/project/pyparsing/
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Information Technology
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Requires-Python: >=2.6, !=3.0.*, !=3.1.*, !=3.2.*
+
+PyParsing -- A Python Parsing Module
+====================================
+
+|Build Status|
+
+Introduction
+============
+
+The pyparsing module is an alternative approach to creating and
+executing simple grammars, vs. the traditional lex/yacc approach, or the
+use of regular expressions. The pyparsing module provides a library of
+classes that client code uses to construct the grammar directly in
+Python code.
+
+*[Since first writing this description of pyparsing in late 2003, this
+technique for developing parsers has become more widespread, under the
+name Parsing Expression Grammars - PEGs. See more information on PEGs at*
+https://en.wikipedia.org/wiki/Parsing_expression_grammar *.]*
+
+Here is a program to parse ``"Hello, World!"`` (or any greeting of the form
+``"salutation, addressee!"``):
+
+.. code:: python
+
+ from pyparsing import Word, alphas
+ greet = Word(alphas) + "," + Word(alphas) + "!"
+ hello = "Hello, World!"
+ print(hello, "->", greet.parseString(hello))
+
+The program outputs the following::
+
+ Hello, World! -> ['Hello', ',', 'World', '!']
+
+The Python representation of the grammar is quite readable, owing to the
+self-explanatory class names, and the use of '+', '|' and '^' operator
+definitions.
+
+The parsed results returned from ``parseString()`` can be accessed as a
+nested list, a dictionary, or an object with named attributes.
+
+The pyparsing module handles some of the problems that are typically
+vexing when writing text parsers:
+
+- extra or missing whitespace (the above program will also handle ``"Hello,World!"``, ``"Hello , World !"``, etc.)
+- quoted strings
+- embedded comments
+
+The examples directory includes a simple SQL parser, simple CORBA IDL
+parser, a config file parser, a chemical formula parser, and a four-
+function algebraic notation parser, among many others.
+
+Documentation
+=============
+
+There are many examples in the online docstrings of the classes
+and methods in pyparsing. You can find them compiled into online docs
+at https://pyparsing-docs.readthedocs.io/en/latest/. Additional
+documentation resources and project info are listed in the online
+GitHub wiki, at https://github.com/pyparsing/pyparsing/wiki. An
+entire directory of examples is at
+https://github.com/pyparsing/pyparsing/tree/master/examples.
+
+License
+=======
+
+MIT License. See header of pyparsing.py
+
+History
+=======
+
+See CHANGES file.
+
+.. |Build Status| image:: https://travis-ci.org/pyparsing/pyparsing.svg?branch=master
+ :target: https://travis-ci.org/pyparsing/pyparsing
+
+
diff --git a/third_party/python/pyparsing/pyparsing-2.4.7.dist-info/RECORD b/third_party/python/pyparsing/pyparsing-2.4.7.dist-info/RECORD
new file mode 100644
index 0000000000..39a2bc5937
--- /dev/null
+++ b/third_party/python/pyparsing/pyparsing-2.4.7.dist-info/RECORD
@@ -0,0 +1,6 @@
+pyparsing.py,sha256=oxX_ZOz8t-eros-UWY7nJgcdUgD-rQ53Ck0qp7_v3Ig,273365
+pyparsing-2.4.7.dist-info/LICENSE,sha256=ENUSChaAWAT_2otojCIL-06POXQbVzIGBNRVowngGXI,1023
+pyparsing-2.4.7.dist-info/METADATA,sha256=Ry40soZZiZrAkSMQT_KU1_1REe6FKa5UWzbT6YA8Mxs,3636
+pyparsing-2.4.7.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
+pyparsing-2.4.7.dist-info/top_level.txt,sha256=eUOjGzJVhlQ3WS2rFAy2mN3LX_7FKTM5GSJ04jfnLmU,10
+pyparsing-2.4.7.dist-info/RECORD,,
diff --git a/third_party/python/pyparsing/pyparsing-2.4.7.dist-info/WHEEL b/third_party/python/pyparsing/pyparsing-2.4.7.dist-info/WHEEL
new file mode 100644
index 0000000000..ef99c6cf32
--- /dev/null
+++ b/third_party/python/pyparsing/pyparsing-2.4.7.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.34.2)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/pyparsing/pyparsing-2.4.7.dist-info/top_level.txt b/third_party/python/pyparsing/pyparsing-2.4.7.dist-info/top_level.txt
new file mode 100644
index 0000000000..210dfec50b
--- /dev/null
+++ b/third_party/python/pyparsing/pyparsing-2.4.7.dist-info/top_level.txt
@@ -0,0 +1 @@
+pyparsing
diff --git a/third_party/python/pyparsing/pyparsing.py b/third_party/python/pyparsing/pyparsing.py
new file mode 100644
index 0000000000..581d5bbb8a
--- /dev/null
+++ b/third_party/python/pyparsing/pyparsing.py
@@ -0,0 +1,7107 @@
+# -*- coding: utf-8 -*-
+# module pyparsing.py
+#
+# Copyright (c) 2003-2019 Paul T. McGuire
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__doc__ = \
+"""
+pyparsing module - Classes and methods to define and execute parsing grammars
+=============================================================================
+
+The pyparsing module is an alternative approach to creating and
+executing simple grammars, vs. the traditional lex/yacc approach, or the
+use of regular expressions. With pyparsing, you don't need to learn
+a new syntax for defining grammars or matching expressions - the parsing
+module provides a library of classes that you use to construct the
+grammar directly in Python.
+
+Here is a program to parse "Hello, World!" (or any greeting of the form
+``"<salutation>, <addressee>!"``), built up using :class:`Word`,
+:class:`Literal`, and :class:`And` elements
+(the :class:`'+'<ParserElement.__add__>` operators create :class:`And` expressions,
+and the strings are auto-converted to :class:`Literal` expressions)::
+
+ from pyparsing import Word, alphas
+
+ # define grammar of a greeting
+ greet = Word(alphas) + "," + Word(alphas) + "!"
+
+ hello = "Hello, World!"
+ print (hello, "->", greet.parseString(hello))
+
+The program outputs the following::
+
+ Hello, World! -> ['Hello', ',', 'World', '!']
+
+The Python representation of the grammar is quite readable, owing to the
+self-explanatory class names, and the use of '+', '|' and '^' operators.
+
+The :class:`ParseResults` object returned from
+:class:`ParserElement.parseString` can be
+accessed as a nested list, a dictionary, or an object with named
+attributes.
+
+The pyparsing module handles some of the problems that are typically
+vexing when writing text parsers:
+
+ - extra or missing whitespace (the above program will also handle
+ "Hello,World!", "Hello , World !", etc.)
+ - quoted strings
+ - embedded comments
+
+
+Getting Started -
+-----------------
+Visit the classes :class:`ParserElement` and :class:`ParseResults` to
+see the base classes that most other pyparsing
+classes inherit from. Use the docstrings for examples of how to:
+
+ - construct literal match expressions from :class:`Literal` and
+ :class:`CaselessLiteral` classes
+ - construct character word-group expressions using the :class:`Word`
+ class
+ - see how to create repetitive expressions using :class:`ZeroOrMore`
+ and :class:`OneOrMore` classes
+ - use :class:`'+'<And>`, :class:`'|'<MatchFirst>`, :class:`'^'<Or>`,
+ and :class:`'&'<Each>` operators to combine simple expressions into
+ more complex ones
+ - associate names with your parsed results using
+ :class:`ParserElement.setResultsName`
+ - access the parsed data, which is returned as a :class:`ParseResults`
+ object
+ - find some helpful expression short-cuts like :class:`delimitedList`
+ and :class:`oneOf`
+ - find more useful common expressions in the :class:`pyparsing_common`
+ namespace class
+"""
+
+__version__ = "2.4.7"
+__versionTime__ = "30 Mar 2020 00:43 UTC"
+__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
+
+import string
+from weakref import ref as wkref
+import copy
+import sys
+import warnings
+import re
+import sre_constants
+import collections
+import pprint
+import traceback
+import types
+from datetime import datetime
+from operator import itemgetter
+import itertools
+from functools import wraps
+from contextlib import contextmanager
+
+try:
+ # Python 3
+ from itertools import filterfalse
+except ImportError:
+ from itertools import ifilterfalse as filterfalse
+
+try:
+ from _thread import RLock
+except ImportError:
+ from threading import RLock
+
+try:
+ # Python 3
+ from collections.abc import Iterable
+ from collections.abc import MutableMapping, Mapping
+except ImportError:
+ # Python 2.7
+ from collections import Iterable
+ from collections import MutableMapping, Mapping
+
+try:
+ from collections import OrderedDict as _OrderedDict
+except ImportError:
+ try:
+ from ordereddict import OrderedDict as _OrderedDict
+ except ImportError:
+ _OrderedDict = None
+
+try:
+ from types import SimpleNamespace
+except ImportError:
+ class SimpleNamespace: pass
+
+# version compatibility configuration
+__compat__ = SimpleNamespace()
+__compat__.__doc__ = """
+ A cross-version compatibility configuration for pyparsing features that will be
+ released in a future version. By setting values in this configuration to True,
+ those features can be enabled in prior versions for compatibility development
+ and testing.
+
+ - collect_all_And_tokens - flag to enable fix for Issue #63 that fixes erroneous grouping
+ of results names when an And expression is nested within an Or or MatchFirst; set to
+ True to enable bugfix released in pyparsing 2.3.0, or False to preserve
+ pre-2.3.0 handling of named results
+"""
+__compat__.collect_all_And_tokens = True
+
+__diag__ = SimpleNamespace()
+__diag__.__doc__ = """
+Diagnostic configuration (all default to False)
+ - warn_multiple_tokens_in_named_alternation - flag to enable warnings when a results
+ name is defined on a MatchFirst or Or expression with one or more And subexpressions
+ (only warns if __compat__.collect_all_And_tokens is False)
+ - warn_ungrouped_named_tokens_in_collection - flag to enable warnings when a results
+ name is defined on a containing expression with ungrouped subexpressions that also
+ have results names
+ - warn_name_set_on_empty_Forward - flag to enable warnings whan a Forward is defined
+ with a results name, but has no contents defined
+ - warn_on_multiple_string_args_to_oneof - flag to enable warnings whan oneOf is
+ incorrectly called with multiple str arguments
+ - enable_debug_on_named_expressions - flag to auto-enable debug on all subsequent
+ calls to ParserElement.setName()
+"""
+__diag__.warn_multiple_tokens_in_named_alternation = False
+__diag__.warn_ungrouped_named_tokens_in_collection = False
+__diag__.warn_name_set_on_empty_Forward = False
+__diag__.warn_on_multiple_string_args_to_oneof = False
+__diag__.enable_debug_on_named_expressions = False
+__diag__._all_names = [nm for nm in vars(__diag__) if nm.startswith("enable_") or nm.startswith("warn_")]
+
+def _enable_all_warnings():
+ __diag__.warn_multiple_tokens_in_named_alternation = True
+ __diag__.warn_ungrouped_named_tokens_in_collection = True
+ __diag__.warn_name_set_on_empty_Forward = True
+ __diag__.warn_on_multiple_string_args_to_oneof = True
+__diag__.enable_all_warnings = _enable_all_warnings
+
+
+__all__ = ['__version__', '__versionTime__', '__author__', '__compat__', '__diag__',
+ 'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
+ 'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
+ 'PrecededBy', 'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
+ 'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
+ 'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
+ 'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
+ 'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', 'Char',
+ 'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
+ 'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
+ 'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
+ 'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
+ 'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
+ 'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
+ 'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
+ 'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
+ 'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
+ 'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation', 'locatedExpr', 'withClass',
+ 'CloseMatch', 'tokenMap', 'pyparsing_common', 'pyparsing_unicode', 'unicode_set',
+ 'conditionAsParseAction', 're',
+ ]
+
+system_version = tuple(sys.version_info)[:3]
+PY_3 = system_version[0] == 3
+if PY_3:
+ _MAX_INT = sys.maxsize
+ basestring = str
+ unichr = chr
+ unicode = str
+ _ustr = str
+
+ # build list of single arg builtins, that can be used as parse actions
+ singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
+
+else:
+ _MAX_INT = sys.maxint
+ range = xrange
+
+ def _ustr(obj):
+ """Drop-in replacement for str(obj) that tries to be Unicode
+ friendly. It first tries str(obj). If that fails with
+ a UnicodeEncodeError, then it tries unicode(obj). It then
+ < returns the unicode object | encodes it with the default
+ encoding | ... >.
+ """
+ if isinstance(obj, unicode):
+ return obj
+
+ try:
+ # If this works, then _ustr(obj) has the same behaviour as str(obj), so
+ # it won't break any existing code.
+ return str(obj)
+
+ except UnicodeEncodeError:
+ # Else encode it
+ ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
+ xmlcharref = Regex(r'&#\d+;')
+ xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
+ return xmlcharref.transformString(ret)
+
+ # build list of single arg builtins, tolerant of Python version, that can be used as parse actions
+ singleArgBuiltins = []
+ import __builtin__
+
+ for fname in "sum len sorted reversed list tuple set any all min max".split():
+ try:
+ singleArgBuiltins.append(getattr(__builtin__, fname))
+ except AttributeError:
+ continue
+
+_generatorType = type((y for y in range(1)))
+
+def _xml_escape(data):
+ """Escape &, <, >, ", ', etc. in a string of data."""
+
+ # ampersand must be replaced first
+ from_symbols = '&><"\''
+ to_symbols = ('&' + s + ';' for s in "amp gt lt quot apos".split())
+ for from_, to_ in zip(from_symbols, to_symbols):
+ data = data.replace(from_, to_)
+ return data
+
+alphas = string.ascii_uppercase + string.ascii_lowercase
+nums = "0123456789"
+hexnums = nums + "ABCDEFabcdef"
+alphanums = alphas + nums
+_bslash = chr(92)
+printables = "".join(c for c in string.printable if c not in string.whitespace)
+
+
+def conditionAsParseAction(fn, message=None, fatal=False):
+ msg = message if message is not None else "failed user-defined condition"
+ exc_type = ParseFatalException if fatal else ParseException
+ fn = _trim_arity(fn)
+
+ @wraps(fn)
+ def pa(s, l, t):
+ if not bool(fn(s, l, t)):
+ raise exc_type(s, l, msg)
+
+ return pa
+
+class ParseBaseException(Exception):
+ """base exception class for all parsing runtime exceptions"""
+ # Performance tuning: we construct a *lot* of these, so keep this
+ # constructor as small and fast as possible
+ def __init__(self, pstr, loc=0, msg=None, elem=None):
+ self.loc = loc
+ if msg is None:
+ self.msg = pstr
+ self.pstr = ""
+ else:
+ self.msg = msg
+ self.pstr = pstr
+ self.parserElement = elem
+ self.args = (pstr, loc, msg)
+
+ @classmethod
+ def _from_exception(cls, pe):
+ """
+ internal factory method to simplify creating one type of ParseException
+ from another - avoids having __init__ signature conflicts among subclasses
+ """
+ return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
+
+ def __getattr__(self, aname):
+ """supported attributes by name are:
+ - lineno - returns the line number of the exception text
+ - col - returns the column number of the exception text
+ - line - returns the line containing the exception text
+ """
+ if aname == "lineno":
+ return lineno(self.loc, self.pstr)
+ elif aname in ("col", "column"):
+ return col(self.loc, self.pstr)
+ elif aname == "line":
+ return line(self.loc, self.pstr)
+ else:
+ raise AttributeError(aname)
+
+ def __str__(self):
+ if self.pstr:
+ if self.loc >= len(self.pstr):
+ foundstr = ', found end of text'
+ else:
+ foundstr = (', found %r' % self.pstr[self.loc:self.loc + 1]).replace(r'\\', '\\')
+ else:
+ foundstr = ''
+ return ("%s%s (at char %d), (line:%d, col:%d)" %
+ (self.msg, foundstr, self.loc, self.lineno, self.column))
+ def __repr__(self):
+ return _ustr(self)
+ def markInputline(self, markerString=">!<"):
+ """Extracts the exception line from the input string, and marks
+ the location of the exception with a special symbol.
+ """
+ line_str = self.line
+ line_column = self.column - 1
+ if markerString:
+ line_str = "".join((line_str[:line_column],
+ markerString, line_str[line_column:]))
+ return line_str.strip()
+ def __dir__(self):
+ return "lineno col line".split() + dir(type(self))
+
+class ParseException(ParseBaseException):
+ """
+ Exception thrown when parse expressions don't match class;
+ supported attributes by name are:
+ - lineno - returns the line number of the exception text
+ - col - returns the column number of the exception text
+ - line - returns the line containing the exception text
+
+ Example::
+
+ try:
+ Word(nums).setName("integer").parseString("ABC")
+ except ParseException as pe:
+ print(pe)
+ print("column: {}".format(pe.col))
+
+ prints::
+
+ Expected integer (at char 0), (line:1, col:1)
+ column: 1
+
+ """
+
+ @staticmethod
+ def explain(exc, depth=16):
+ """
+ Method to take an exception and translate the Python internal traceback into a list
+ of the pyparsing expressions that caused the exception to be raised.
+
+ Parameters:
+
+ - exc - exception raised during parsing (need not be a ParseException, in support
+ of Python exceptions that might be raised in a parse action)
+ - depth (default=16) - number of levels back in the stack trace to list expression
+ and function names; if None, the full stack trace names will be listed; if 0, only
+ the failing input line, marker, and exception string will be shown
+
+ Returns a multi-line string listing the ParserElements and/or function names in the
+ exception's stack trace.
+
+ Note: the diagnostic output will include string representations of the expressions
+ that failed to parse. These representations will be more helpful if you use `setName` to
+ give identifiable names to your expressions. Otherwise they will use the default string
+ forms, which may be cryptic to read.
+
+ explain() is only supported under Python 3.
+ """
+ import inspect
+
+ if depth is None:
+ depth = sys.getrecursionlimit()
+ ret = []
+ if isinstance(exc, ParseBaseException):
+ ret.append(exc.line)
+ ret.append(' ' * (exc.col - 1) + '^')
+ ret.append("{0}: {1}".format(type(exc).__name__, exc))
+
+ if depth > 0:
+ callers = inspect.getinnerframes(exc.__traceback__, context=depth)
+ seen = set()
+ for i, ff in enumerate(callers[-depth:]):
+ frm = ff[0]
+
+ f_self = frm.f_locals.get('self', None)
+ if isinstance(f_self, ParserElement):
+ if frm.f_code.co_name not in ('parseImpl', '_parseNoCache'):
+ continue
+ if f_self in seen:
+ continue
+ seen.add(f_self)
+
+ self_type = type(f_self)
+ ret.append("{0}.{1} - {2}".format(self_type.__module__,
+ self_type.__name__,
+ f_self))
+ elif f_self is not None:
+ self_type = type(f_self)
+ ret.append("{0}.{1}".format(self_type.__module__,
+ self_type.__name__))
+ else:
+ code = frm.f_code
+ if code.co_name in ('wrapper', '<module>'):
+ continue
+
+ ret.append("{0}".format(code.co_name))
+
+ depth -= 1
+ if not depth:
+ break
+
+ return '\n'.join(ret)
+
+
+class ParseFatalException(ParseBaseException):
+ """user-throwable exception thrown when inconsistent parse content
+ is found; stops all parsing immediately"""
+ pass
+
+class ParseSyntaxException(ParseFatalException):
+ """just like :class:`ParseFatalException`, but thrown internally
+ when an :class:`ErrorStop<And._ErrorStop>` ('-' operator) indicates
+ that parsing is to stop immediately because an unbacktrackable
+ syntax error has been found.
+ """
+ pass
+
+#~ class ReparseException(ParseBaseException):
+ #~ """Experimental class - parse actions can raise this exception to cause
+ #~ pyparsing to reparse the input string:
+ #~ - with a modified input string, and/or
+ #~ - with a modified start location
+ #~ Set the values of the ReparseException in the constructor, and raise the
+ #~ exception in a parse action to cause pyparsing to use the new string/location.
+ #~ Setting the values as None causes no change to be made.
+ #~ """
+ #~ def __init_( self, newstring, restartLoc ):
+ #~ self.newParseText = newstring
+ #~ self.reparseLoc = restartLoc
+
+class RecursiveGrammarException(Exception):
+ """exception thrown by :class:`ParserElement.validate` if the
+ grammar could be improperly recursive
+ """
+ def __init__(self, parseElementList):
+ self.parseElementTrace = parseElementList
+
+ def __str__(self):
+ return "RecursiveGrammarException: %s" % self.parseElementTrace
+
+class _ParseResultsWithOffset(object):
+ def __init__(self, p1, p2):
+ self.tup = (p1, p2)
+ def __getitem__(self, i):
+ return self.tup[i]
+ def __repr__(self):
+ return repr(self.tup[0])
+ def setOffset(self, i):
+ self.tup = (self.tup[0], i)
+
+class ParseResults(object):
+ """Structured parse results, to provide multiple means of access to
+ the parsed data:
+
+ - as a list (``len(results)``)
+ - by list index (``results[0], results[1]``, etc.)
+ - by attribute (``results.<resultsName>`` - see :class:`ParserElement.setResultsName`)
+
+ Example::
+
+ integer = Word(nums)
+ date_str = (integer.setResultsName("year") + '/'
+ + integer.setResultsName("month") + '/'
+ + integer.setResultsName("day"))
+ # equivalent form:
+ # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ # parseString returns a ParseResults object
+ result = date_str.parseString("1999/12/31")
+
+ def test(s, fn=repr):
+ print("%s -> %s" % (s, fn(eval(s))))
+ test("list(result)")
+ test("result[0]")
+ test("result['month']")
+ test("result.day")
+ test("'month' in result")
+ test("'minutes' in result")
+ test("result.dump()", str)
+
+ prints::
+
+ list(result) -> ['1999', '/', '12', '/', '31']
+ result[0] -> '1999'
+ result['month'] -> '12'
+ result.day -> '31'
+ 'month' in result -> True
+ 'minutes' in result -> False
+ result.dump() -> ['1999', '/', '12', '/', '31']
+ - day: 31
+ - month: 12
+ - year: 1999
+ """
+ def __new__(cls, toklist=None, name=None, asList=True, modal=True):
+ if isinstance(toklist, cls):
+ return toklist
+ retobj = object.__new__(cls)
+ retobj.__doinit = True
+ return retobj
+
+ # Performance tuning: we construct a *lot* of these, so keep this
+ # constructor as small and fast as possible
+ def __init__(self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance):
+ if self.__doinit:
+ self.__doinit = False
+ self.__name = None
+ self.__parent = None
+ self.__accumNames = {}
+ self.__asList = asList
+ self.__modal = modal
+ if toklist is None:
+ toklist = []
+ if isinstance(toklist, list):
+ self.__toklist = toklist[:]
+ elif isinstance(toklist, _generatorType):
+ self.__toklist = list(toklist)
+ else:
+ self.__toklist = [toklist]
+ self.__tokdict = dict()
+
+ if name is not None and name:
+ if not modal:
+ self.__accumNames[name] = 0
+ if isinstance(name, int):
+ name = _ustr(name) # will always return a str, but use _ustr for consistency
+ self.__name = name
+ if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None, '', [])):
+ if isinstance(toklist, basestring):
+ toklist = [toklist]
+ if asList:
+ if isinstance(toklist, ParseResults):
+ self[name] = _ParseResultsWithOffset(ParseResults(toklist.__toklist), 0)
+ else:
+ self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0)
+ self[name].__name = name
+ else:
+ try:
+ self[name] = toklist[0]
+ except (KeyError, TypeError, IndexError):
+ self[name] = toklist
+
+ def __getitem__(self, i):
+ if isinstance(i, (int, slice)):
+ return self.__toklist[i]
+ else:
+ if i not in self.__accumNames:
+ return self.__tokdict[i][-1][0]
+ else:
+ return ParseResults([v[0] for v in self.__tokdict[i]])
+
+ def __setitem__(self, k, v, isinstance=isinstance):
+ if isinstance(v, _ParseResultsWithOffset):
+ self.__tokdict[k] = self.__tokdict.get(k, list()) + [v]
+ sub = v[0]
+ elif isinstance(k, (int, slice)):
+ self.__toklist[k] = v
+ sub = v
+ else:
+ self.__tokdict[k] = self.__tokdict.get(k, list()) + [_ParseResultsWithOffset(v, 0)]
+ sub = v
+ if isinstance(sub, ParseResults):
+ sub.__parent = wkref(self)
+
+ def __delitem__(self, i):
+ if isinstance(i, (int, slice)):
+ mylen = len(self.__toklist)
+ del self.__toklist[i]
+
+ # convert int to slice
+ if isinstance(i, int):
+ if i < 0:
+ i += mylen
+ i = slice(i, i + 1)
+ # get removed indices
+ removed = list(range(*i.indices(mylen)))
+ removed.reverse()
+ # fixup indices in token dictionary
+ for name, occurrences in self.__tokdict.items():
+ for j in removed:
+ for k, (value, position) in enumerate(occurrences):
+ occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
+ else:
+ del self.__tokdict[i]
+
+ def __contains__(self, k):
+ return k in self.__tokdict
+
+ def __len__(self):
+ return len(self.__toklist)
+
+ def __bool__(self):
+ return (not not self.__toklist)
+ __nonzero__ = __bool__
+
+ def __iter__(self):
+ return iter(self.__toklist)
+
+ def __reversed__(self):
+ return iter(self.__toklist[::-1])
+
+ def _iterkeys(self):
+ if hasattr(self.__tokdict, "iterkeys"):
+ return self.__tokdict.iterkeys()
+ else:
+ return iter(self.__tokdict)
+
+ def _itervalues(self):
+ return (self[k] for k in self._iterkeys())
+
+ def _iteritems(self):
+ return ((k, self[k]) for k in self._iterkeys())
+
+ if PY_3:
+ keys = _iterkeys
+ """Returns an iterator of all named result keys."""
+
+ values = _itervalues
+ """Returns an iterator of all named result values."""
+
+ items = _iteritems
+ """Returns an iterator of all named result key-value tuples."""
+
+ else:
+ iterkeys = _iterkeys
+ """Returns an iterator of all named result keys (Python 2.x only)."""
+
+ itervalues = _itervalues
+ """Returns an iterator of all named result values (Python 2.x only)."""
+
+ iteritems = _iteritems
+ """Returns an iterator of all named result key-value tuples (Python 2.x only)."""
+
+ def keys(self):
+ """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
+ return list(self.iterkeys())
+
+ def values(self):
+ """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
+ return list(self.itervalues())
+
+ def items(self):
+ """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
+ return list(self.iteritems())
+
+ def haskeys(self):
+ """Since keys() returns an iterator, this method is helpful in bypassing
+ code that looks for the existence of any defined results names."""
+ return bool(self.__tokdict)
+
+ def pop(self, *args, **kwargs):
+ """
+ Removes and returns item at specified index (default= ``last``).
+ Supports both ``list`` and ``dict`` semantics for ``pop()``. If
+ passed no argument or an integer argument, it will use ``list``
+ semantics and pop tokens from the list of parsed tokens. If passed
+ a non-integer argument (most likely a string), it will use ``dict``
+ semantics and pop the corresponding value from any defined results
+ names. A second default return value argument is supported, just as in
+ ``dict.pop()``.
+
+ Example::
+
+ def remove_first(tokens):
+ tokens.pop(0)
+ print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+ print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
+
+ label = Word(alphas)
+ patt = label("LABEL") + OneOrMore(Word(nums))
+ print(patt.parseString("AAB 123 321").dump())
+
+ # Use pop() in a parse action to remove named result (note that corresponding value is not
+ # removed from list form of results)
+ def remove_LABEL(tokens):
+ tokens.pop("LABEL")
+ return tokens
+ patt.addParseAction(remove_LABEL)
+ print(patt.parseString("AAB 123 321").dump())
+
+ prints::
+
+ ['AAB', '123', '321']
+ - LABEL: AAB
+
+ ['AAB', '123', '321']
+ """
+ if not args:
+ args = [-1]
+ for k, v in kwargs.items():
+ if k == 'default':
+ args = (args[0], v)
+ else:
+ raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
+ if (isinstance(args[0], int)
+ or len(args) == 1
+ or args[0] in self):
+ index = args[0]
+ ret = self[index]
+ del self[index]
+ return ret
+ else:
+ defaultvalue = args[1]
+ return defaultvalue
+
+ def get(self, key, defaultValue=None):
+ """
+ Returns named result matching the given key, or if there is no
+ such name, then returns the given ``defaultValue`` or ``None`` if no
+ ``defaultValue`` is specified.
+
+ Similar to ``dict.get()``.
+
+ Example::
+
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parseString("1999/12/31")
+ print(result.get("year")) # -> '1999'
+ print(result.get("hour", "not specified")) # -> 'not specified'
+ print(result.get("hour")) # -> None
+ """
+ if key in self:
+ return self[key]
+ else:
+ return defaultValue
+
+ def insert(self, index, insStr):
+ """
+ Inserts new element at location index in the list of parsed tokens.
+
+ Similar to ``list.insert()``.
+
+ Example::
+
+ print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+
+ # use a parse action to insert the parse location in the front of the parsed results
+ def insert_locn(locn, tokens):
+ tokens.insert(0, locn)
+ print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
+ """
+ self.__toklist.insert(index, insStr)
+ # fixup indices in token dictionary
+ for name, occurrences in self.__tokdict.items():
+ for k, (value, position) in enumerate(occurrences):
+ occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
+
+ def append(self, item):
+ """
+ Add single element to end of ParseResults list of elements.
+
+ Example::
+
+ print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+
+ # use a parse action to compute the sum of the parsed integers, and add it to the end
+ def append_sum(tokens):
+ tokens.append(sum(map(int, tokens)))
+ print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
+ """
+ self.__toklist.append(item)
+
+ def extend(self, itemseq):
+ """
+ Add sequence of elements to end of ParseResults list of elements.
+
+ Example::
+
+ patt = OneOrMore(Word(alphas))
+
+ # use a parse action to append the reverse of the matched strings, to make a palindrome
+ def make_palindrome(tokens):
+ tokens.extend(reversed([t[::-1] for t in tokens]))
+ return ''.join(tokens)
+ print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
+ """
+ if isinstance(itemseq, ParseResults):
+ self.__iadd__(itemseq)
+ else:
+ self.__toklist.extend(itemseq)
+
+ def clear(self):
+ """
+ Clear all elements and results names.
+ """
+ del self.__toklist[:]
+ self.__tokdict.clear()
+
+ def __getattr__(self, name):
+ try:
+ return self[name]
+ except KeyError:
+ return ""
+
+ def __add__(self, other):
+ ret = self.copy()
+ ret += other
+ return ret
+
+ def __iadd__(self, other):
+ if other.__tokdict:
+ offset = len(self.__toklist)
+ addoffset = lambda a: offset if a < 0 else a + offset
+ otheritems = other.__tokdict.items()
+ otherdictitems = [(k, _ParseResultsWithOffset(v[0], addoffset(v[1])))
+ for k, vlist in otheritems for v in vlist]
+ for k, v in otherdictitems:
+ self[k] = v
+ if isinstance(v[0], ParseResults):
+ v[0].__parent = wkref(self)
+
+ self.__toklist += other.__toklist
+ self.__accumNames.update(other.__accumNames)
+ return self
+
+ def __radd__(self, other):
+ if isinstance(other, int) and other == 0:
+ # useful for merging many ParseResults using sum() builtin
+ return self.copy()
+ else:
+ # this may raise a TypeError - so be it
+ return other + self
+
+ def __repr__(self):
+ return "(%s, %s)" % (repr(self.__toklist), repr(self.__tokdict))
+
+ def __str__(self):
+ return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
+
+ def _asStringList(self, sep=''):
+ out = []
+ for item in self.__toklist:
+ if out and sep:
+ out.append(sep)
+ if isinstance(item, ParseResults):
+ out += item._asStringList()
+ else:
+ out.append(_ustr(item))
+ return out
+
+ def asList(self):
+ """
+ Returns the parse results as a nested list of matching tokens, all converted to strings.
+
+ Example::
+
+ patt = OneOrMore(Word(alphas))
+ result = patt.parseString("sldkj lsdkj sldkj")
+ # even though the result prints in string-like form, it is actually a pyparsing ParseResults
+ print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
+
+ # Use asList() to create an actual list
+ result_list = result.asList()
+ print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
+ """
+ return [res.asList() if isinstance(res, ParseResults) else res for res in self.__toklist]
+
+ def asDict(self):
+ """
+ Returns the named parse results as a nested dictionary.
+
+ Example::
+
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parseString('12/31/1999')
+ print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
+
+ result_dict = result.asDict()
+ print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
+
+ # even though a ParseResults supports dict-like access, sometime you just need to have a dict
+ import json
+ print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
+ print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
+ """
+ if PY_3:
+ item_fn = self.items
+ else:
+ item_fn = self.iteritems
+
+ def toItem(obj):
+ if isinstance(obj, ParseResults):
+ if obj.haskeys():
+ return obj.asDict()
+ else:
+ return [toItem(v) for v in obj]
+ else:
+ return obj
+
+ return dict((k, toItem(v)) for k, v in item_fn())
+
+ def copy(self):
+ """
+ Returns a new copy of a :class:`ParseResults` object.
+ """
+ ret = ParseResults(self.__toklist)
+ ret.__tokdict = dict(self.__tokdict.items())
+ ret.__parent = self.__parent
+ ret.__accumNames.update(self.__accumNames)
+ ret.__name = self.__name
+ return ret
+
+ def asXML(self, doctag=None, namedItemsOnly=False, indent="", formatted=True):
+ """
+ (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
+ """
+ nl = "\n"
+ out = []
+ namedItems = dict((v[1], k) for (k, vlist) in self.__tokdict.items()
+ for v in vlist)
+ nextLevelIndent = indent + " "
+
+ # collapse out indents if formatting is not desired
+ if not formatted:
+ indent = ""
+ nextLevelIndent = ""
+ nl = ""
+
+ selfTag = None
+ if doctag is not None:
+ selfTag = doctag
+ else:
+ if self.__name:
+ selfTag = self.__name
+
+ if not selfTag:
+ if namedItemsOnly:
+ return ""
+ else:
+ selfTag = "ITEM"
+
+ out += [nl, indent, "<", selfTag, ">"]
+
+ for i, res in enumerate(self.__toklist):
+ if isinstance(res, ParseResults):
+ if i in namedItems:
+ out += [res.asXML(namedItems[i],
+ namedItemsOnly and doctag is None,
+ nextLevelIndent,
+ formatted)]
+ else:
+ out += [res.asXML(None,
+ namedItemsOnly and doctag is None,
+ nextLevelIndent,
+ formatted)]
+ else:
+ # individual token, see if there is a name for it
+ resTag = None
+ if i in namedItems:
+ resTag = namedItems[i]
+ if not resTag:
+ if namedItemsOnly:
+ continue
+ else:
+ resTag = "ITEM"
+ xmlBodyText = _xml_escape(_ustr(res))
+ out += [nl, nextLevelIndent, "<", resTag, ">",
+ xmlBodyText,
+ "</", resTag, ">"]
+
+ out += [nl, indent, "</", selfTag, ">"]
+ return "".join(out)
+
+ def __lookup(self, sub):
+ for k, vlist in self.__tokdict.items():
+ for v, loc in vlist:
+ if sub is v:
+ return k
+ return None
+
+ def getName(self):
+ r"""
+ Returns the results name for this token expression. Useful when several
+ different expressions might match at a particular location.
+
+ Example::
+
+ integer = Word(nums)
+ ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
+ house_number_expr = Suppress('#') + Word(nums, alphanums)
+ user_data = (Group(house_number_expr)("house_number")
+ | Group(ssn_expr)("ssn")
+ | Group(integer)("age"))
+ user_info = OneOrMore(user_data)
+
+ result = user_info.parseString("22 111-22-3333 #221B")
+ for item in result:
+ print(item.getName(), ':', item[0])
+
+ prints::
+
+ age : 22
+ ssn : 111-22-3333
+ house_number : 221B
+ """
+ if self.__name:
+ return self.__name
+ elif self.__parent:
+ par = self.__parent()
+ if par:
+ return par.__lookup(self)
+ else:
+ return None
+ elif (len(self) == 1
+ and len(self.__tokdict) == 1
+ and next(iter(self.__tokdict.values()))[0][1] in (0, -1)):
+ return next(iter(self.__tokdict.keys()))
+ else:
+ return None
+
+ def dump(self, indent='', full=True, include_list=True, _depth=0):
+ """
+ Diagnostic method for listing out the contents of
+ a :class:`ParseResults`. Accepts an optional ``indent`` argument so
+ that this string can be embedded in a nested display of other data.
+
+ Example::
+
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parseString('12/31/1999')
+ print(result.dump())
+
+ prints::
+
+ ['12', '/', '31', '/', '1999']
+ - day: 1999
+ - month: 31
+ - year: 12
+ """
+ out = []
+ NL = '\n'
+ if include_list:
+ out.append(indent + _ustr(self.asList()))
+ else:
+ out.append('')
+
+ if full:
+ if self.haskeys():
+ items = sorted((str(k), v) for k, v in self.items())
+ for k, v in items:
+ if out:
+ out.append(NL)
+ out.append("%s%s- %s: " % (indent, (' ' * _depth), k))
+ if isinstance(v, ParseResults):
+ if v:
+ out.append(v.dump(indent=indent, full=full, include_list=include_list, _depth=_depth + 1))
+ else:
+ out.append(_ustr(v))
+ else:
+ out.append(repr(v))
+ elif any(isinstance(vv, ParseResults) for vv in self):
+ v = self
+ for i, vv in enumerate(v):
+ if isinstance(vv, ParseResults):
+ out.append("\n%s%s[%d]:\n%s%s%s" % (indent,
+ (' ' * (_depth)),
+ i,
+ indent,
+ (' ' * (_depth + 1)),
+ vv.dump(indent=indent,
+ full=full,
+ include_list=include_list,
+ _depth=_depth + 1)))
+ else:
+ out.append("\n%s%s[%d]:\n%s%s%s" % (indent,
+ (' ' * (_depth)),
+ i,
+ indent,
+ (' ' * (_depth + 1)),
+ _ustr(vv)))
+
+ return "".join(out)
+
+ def pprint(self, *args, **kwargs):
+ """
+ Pretty-printer for parsed results as a list, using the
+ `pprint <https://docs.python.org/3/library/pprint.html>`_ module.
+ Accepts additional positional or keyword args as defined for
+ `pprint.pprint <https://docs.python.org/3/library/pprint.html#pprint.pprint>`_ .
+
+ Example::
+
+ ident = Word(alphas, alphanums)
+ num = Word(nums)
+ func = Forward()
+ term = ident | num | Group('(' + func + ')')
+ func <<= ident + Group(Optional(delimitedList(term)))
+ result = func.parseString("fna a,b,(fnb c,d,200),100")
+ result.pprint(width=40)
+
+ prints::
+
+ ['fna',
+ ['a',
+ 'b',
+ ['(', 'fnb', ['c', 'd', '200'], ')'],
+ '100']]
+ """
+ pprint.pprint(self.asList(), *args, **kwargs)
+
+ # add support for pickle protocol
+ def __getstate__(self):
+ return (self.__toklist,
+ (self.__tokdict.copy(),
+ self.__parent is not None and self.__parent() or None,
+ self.__accumNames,
+ self.__name))
+
+ def __setstate__(self, state):
+ self.__toklist = state[0]
+ self.__tokdict, par, inAccumNames, self.__name = state[1]
+ self.__accumNames = {}
+ self.__accumNames.update(inAccumNames)
+ if par is not None:
+ self.__parent = wkref(par)
+ else:
+ self.__parent = None
+
+ def __getnewargs__(self):
+ return self.__toklist, self.__name, self.__asList, self.__modal
+
+ def __dir__(self):
+ return dir(type(self)) + list(self.keys())
+
+ @classmethod
+ def from_dict(cls, other, name=None):
+ """
+ Helper classmethod to construct a ParseResults from a dict, preserving the
+ name-value relations as results names. If an optional 'name' argument is
+ given, a nested ParseResults will be returned
+ """
+ def is_iterable(obj):
+ try:
+ iter(obj)
+ except Exception:
+ return False
+ else:
+ if PY_3:
+ return not isinstance(obj, (str, bytes))
+ else:
+ return not isinstance(obj, basestring)
+
+ ret = cls([])
+ for k, v in other.items():
+ if isinstance(v, Mapping):
+ ret += cls.from_dict(v, name=k)
+ else:
+ ret += cls([v], name=k, asList=is_iterable(v))
+ if name is not None:
+ ret = cls([ret], name=name)
+ return ret
+
+MutableMapping.register(ParseResults)
+
+def col (loc, strg):
+ """Returns current column within a string, counting newlines as line separators.
+ The first column is number 1.
+
+ Note: the default parsing behavior is to expand tabs in the input string
+ before starting the parsing process. See
+ :class:`ParserElement.parseString` for more
+ information on parsing strings containing ``<TAB>`` s, and suggested
+ methods to maintain a consistent view of the parsed string, the parse
+ location, and line and column positions within the parsed string.
+ """
+ s = strg
+ return 1 if 0 < loc < len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
+
+def lineno(loc, strg):
+ """Returns current line number within a string, counting newlines as line separators.
+ The first line is number 1.
+
+ Note - the default parsing behavior is to expand tabs in the input string
+ before starting the parsing process. See :class:`ParserElement.parseString`
+ for more information on parsing strings containing ``<TAB>`` s, and
+ suggested methods to maintain a consistent view of the parsed string, the
+ parse location, and line and column positions within the parsed string.
+ """
+ return strg.count("\n", 0, loc) + 1
+
+def line(loc, strg):
+ """Returns the line of text containing loc within a string, counting newlines as line separators.
+ """
+ lastCR = strg.rfind("\n", 0, loc)
+ nextCR = strg.find("\n", loc)
+ if nextCR >= 0:
+ return strg[lastCR + 1:nextCR]
+ else:
+ return strg[lastCR + 1:]
+
+def _defaultStartDebugAction(instring, loc, expr):
+ print(("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % (lineno(loc, instring), col(loc, instring))))
+
+def _defaultSuccessDebugAction(instring, startloc, endloc, expr, toks):
+ print("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
+
+def _defaultExceptionDebugAction(instring, loc, expr, exc):
+ print("Exception raised:" + _ustr(exc))
+
+def nullDebugAction(*args):
+ """'Do-nothing' debug action, to suppress debugging output during parsing."""
+ pass
+
+# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
+#~ 'decorator to trim function calls to match the arity of the target'
+#~ def _trim_arity(func, maxargs=3):
+ #~ if func in singleArgBuiltins:
+ #~ return lambda s,l,t: func(t)
+ #~ limit = 0
+ #~ foundArity = False
+ #~ def wrapper(*args):
+ #~ nonlocal limit,foundArity
+ #~ while 1:
+ #~ try:
+ #~ ret = func(*args[limit:])
+ #~ foundArity = True
+ #~ return ret
+ #~ except TypeError:
+ #~ if limit == maxargs or foundArity:
+ #~ raise
+ #~ limit += 1
+ #~ continue
+ #~ return wrapper
+
+# this version is Python 2.x-3.x cross-compatible
+'decorator to trim function calls to match the arity of the target'
+def _trim_arity(func, maxargs=2):
+ if func in singleArgBuiltins:
+ return lambda s, l, t: func(t)
+ limit = [0]
+ foundArity = [False]
+
+ # traceback return data structure changed in Py3.5 - normalize back to plain tuples
+ if system_version[:2] >= (3, 5):
+ def extract_stack(limit=0):
+ # special handling for Python 3.5.0 - extra deep call stack by 1
+ offset = -3 if system_version == (3, 5, 0) else -2
+ frame_summary = traceback.extract_stack(limit=-offset + limit - 1)[offset]
+ return [frame_summary[:2]]
+ def extract_tb(tb, limit=0):
+ frames = traceback.extract_tb(tb, limit=limit)
+ frame_summary = frames[-1]
+ return [frame_summary[:2]]
+ else:
+ extract_stack = traceback.extract_stack
+ extract_tb = traceback.extract_tb
+
+ # synthesize what would be returned by traceback.extract_stack at the call to
+ # user's parse action 'func', so that we don't incur call penalty at parse time
+
+ LINE_DIFF = 6
+ # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
+ # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
+ this_line = extract_stack(limit=2)[-1]
+ pa_call_line_synth = (this_line[0], this_line[1] + LINE_DIFF)
+
+ def wrapper(*args):
+ while 1:
+ try:
+ ret = func(*args[limit[0]:])
+ foundArity[0] = True
+ return ret
+ except TypeError:
+ # re-raise TypeErrors if they did not come from our arity testing
+ if foundArity[0]:
+ raise
+ else:
+ try:
+ tb = sys.exc_info()[-1]
+ if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
+ raise
+ finally:
+ try:
+ del tb
+ except NameError:
+ pass
+
+ if limit[0] <= maxargs:
+ limit[0] += 1
+ continue
+ raise
+
+ # copy func name to wrapper for sensible debug output
+ func_name = "<parse action>"
+ try:
+ func_name = getattr(func, '__name__',
+ getattr(func, '__class__').__name__)
+ except Exception:
+ func_name = str(func)
+ wrapper.__name__ = func_name
+
+ return wrapper
+
+
+class ParserElement(object):
+ """Abstract base level parser element class."""
+ DEFAULT_WHITE_CHARS = " \n\t\r"
+ verbose_stacktrace = False
+
+ @staticmethod
+ def setDefaultWhitespaceChars(chars):
+ r"""
+ Overrides the default whitespace chars
+
+ Example::
+
+ # default whitespace chars are space, <TAB> and newline
+ OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
+
+ # change to just treat newline as significant
+ ParserElement.setDefaultWhitespaceChars(" \t")
+ OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
+ """
+ ParserElement.DEFAULT_WHITE_CHARS = chars
+
+ @staticmethod
+ def inlineLiteralsUsing(cls):
+ """
+ Set class to be used for inclusion of string literals into a parser.
+
+ Example::
+
+ # default literal class used is Literal
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
+
+
+ # change to Suppress
+ ParserElement.inlineLiteralsUsing(Suppress)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
+ """
+ ParserElement._literalStringClass = cls
+
+ @classmethod
+ def _trim_traceback(cls, tb):
+ while tb.tb_next:
+ tb = tb.tb_next
+ return tb
+
+ def __init__(self, savelist=False):
+ self.parseAction = list()
+ self.failAction = None
+ # ~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
+ self.strRepr = None
+ self.resultsName = None
+ self.saveAsList = savelist
+ self.skipWhitespace = True
+ self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS)
+ self.copyDefaultWhiteChars = True
+ self.mayReturnEmpty = False # used when checking for left-recursion
+ self.keepTabs = False
+ self.ignoreExprs = list()
+ self.debug = False
+ self.streamlined = False
+ self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
+ self.errmsg = ""
+ self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
+ self.debugActions = (None, None, None) # custom debug actions
+ self.re = None
+ self.callPreparse = True # used to avoid redundant calls to preParse
+ self.callDuringTry = False
+
+ def copy(self):
+ """
+ Make a copy of this :class:`ParserElement`. Useful for defining
+ different parse actions for the same parsing pattern, using copies of
+ the original parse element.
+
+ Example::
+
+ integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+ integerK = integer.copy().addParseAction(lambda toks: toks[0] * 1024) + Suppress("K")
+ integerM = integer.copy().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
+
+ print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
+
+ prints::
+
+ [5120, 100, 655360, 268435456]
+
+ Equivalent form of ``expr.copy()`` is just ``expr()``::
+
+ integerM = integer().addParseAction(lambda toks: toks[0] * 1024 * 1024) + Suppress("M")
+ """
+ cpy = copy.copy(self)
+ cpy.parseAction = self.parseAction[:]
+ cpy.ignoreExprs = self.ignoreExprs[:]
+ if self.copyDefaultWhiteChars:
+ cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
+ return cpy
+
+ def setName(self, name):
+ """
+ Define name for this expression, makes debugging and exception messages clearer.
+
+ Example::
+
+ Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
+ Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
+ """
+ self.name = name
+ self.errmsg = "Expected " + self.name
+ if __diag__.enable_debug_on_named_expressions:
+ self.setDebug()
+ return self
+
+ def setResultsName(self, name, listAllMatches=False):
+ """
+ Define name for referencing matching tokens as a nested attribute
+ of the returned parse results.
+ NOTE: this returns a *copy* of the original :class:`ParserElement` object;
+ this is so that the client can define a basic element, such as an
+ integer, and reference it in multiple places with different names.
+
+ You can also set results names using the abbreviated syntax,
+ ``expr("name")`` in place of ``expr.setResultsName("name")``
+ - see :class:`__call__`.
+
+ Example::
+
+ date_str = (integer.setResultsName("year") + '/'
+ + integer.setResultsName("month") + '/'
+ + integer.setResultsName("day"))
+
+ # equivalent form:
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+ """
+ return self._setResultsName(name, listAllMatches)
+
+ def _setResultsName(self, name, listAllMatches=False):
+ newself = self.copy()
+ if name.endswith("*"):
+ name = name[:-1]
+ listAllMatches = True
+ newself.resultsName = name
+ newself.modalResults = not listAllMatches
+ return newself
+
+ def setBreak(self, breakFlag=True):
+ """Method to invoke the Python pdb debugger when this element is
+ about to be parsed. Set ``breakFlag`` to True to enable, False to
+ disable.
+ """
+ if breakFlag:
+ _parseMethod = self._parse
+ def breaker(instring, loc, doActions=True, callPreParse=True):
+ import pdb
+ # this call to pdb.set_trace() is intentional, not a checkin error
+ pdb.set_trace()
+ return _parseMethod(instring, loc, doActions, callPreParse)
+ breaker._originalParseMethod = _parseMethod
+ self._parse = breaker
+ else:
+ if hasattr(self._parse, "_originalParseMethod"):
+ self._parse = self._parse._originalParseMethod
+ return self
+
+ def setParseAction(self, *fns, **kwargs):
+ """
+ Define one or more actions to perform when successfully matching parse element definition.
+ Parse action fn is a callable method with 0-3 arguments, called as ``fn(s, loc, toks)`` ,
+ ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where:
+
+ - s = the original string being parsed (see note below)
+ - loc = the location of the matching substring
+ - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object
+
+ If the functions in fns modify the tokens, they can return them as the return
+ value from fn, and the modified list of tokens will replace the original.
+ Otherwise, fn does not need to return any value.
+
+ If None is passed as the parse action, all previously added parse actions for this
+ expression are cleared.
+
+ Optional keyword arguments:
+ - callDuringTry = (default= ``False``) indicate if parse action should be run during lookaheads and alternate testing
+
+ Note: the default parsing behavior is to expand tabs in the input string
+ before starting the parsing process. See :class:`parseString for more
+ information on parsing strings containing ``<TAB>`` s, and suggested
+ methods to maintain a consistent view of the parsed string, the parse
+ location, and line and column positions within the parsed string.
+
+ Example::
+
+ integer = Word(nums)
+ date_str = integer + '/' + integer + '/' + integer
+
+ date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
+
+ # use parse action to convert to ints at parse time
+ integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+ date_str = integer + '/' + integer + '/' + integer
+
+ # note that integer fields are now ints, not strings
+ date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
+ """
+ if list(fns) == [None,]:
+ self.parseAction = []
+ else:
+ if not all(callable(fn) for fn in fns):
+ raise TypeError("parse actions must be callable")
+ self.parseAction = list(map(_trim_arity, list(fns)))
+ self.callDuringTry = kwargs.get("callDuringTry", False)
+ return self
+
+ def addParseAction(self, *fns, **kwargs):
+ """
+ Add one or more parse actions to expression's list of parse actions. See :class:`setParseAction`.
+
+ See examples in :class:`copy`.
+ """
+ self.parseAction += list(map(_trim_arity, list(fns)))
+ self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
+ return self
+
+ def addCondition(self, *fns, **kwargs):
+ """Add a boolean predicate function to expression's list of parse actions. See
+ :class:`setParseAction` for function call signatures. Unlike ``setParseAction``,
+ functions passed to ``addCondition`` need to return boolean success/fail of the condition.
+
+ Optional keyword arguments:
+ - message = define a custom message to be used in the raised exception
+ - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
+
+ Example::
+
+ integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+ year_int = integer.copy()
+ year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
+ date_str = year_int + '/' + integer + '/' + integer
+
+ result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
+ """
+ for fn in fns:
+ self.parseAction.append(conditionAsParseAction(fn, message=kwargs.get('message'),
+ fatal=kwargs.get('fatal', False)))
+
+ self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
+ return self
+
+ def setFailAction(self, fn):
+ """Define action to perform if parsing fails at this expression.
+ Fail acton fn is a callable function that takes the arguments
+ ``fn(s, loc, expr, err)`` where:
+ - s = string being parsed
+ - loc = location where expression match was attempted and failed
+ - expr = the parse expression that failed
+ - err = the exception thrown
+ The function returns no value. It may throw :class:`ParseFatalException`
+ if it is desired to stop parsing immediately."""
+ self.failAction = fn
+ return self
+
+ def _skipIgnorables(self, instring, loc):
+ exprsFound = True
+ while exprsFound:
+ exprsFound = False
+ for e in self.ignoreExprs:
+ try:
+ while 1:
+ loc, dummy = e._parse(instring, loc)
+ exprsFound = True
+ except ParseException:
+ pass
+ return loc
+
+ def preParse(self, instring, loc):
+ if self.ignoreExprs:
+ loc = self._skipIgnorables(instring, loc)
+
+ if self.skipWhitespace:
+ wt = self.whiteChars
+ instrlen = len(instring)
+ while loc < instrlen and instring[loc] in wt:
+ loc += 1
+
+ return loc
+
+ def parseImpl(self, instring, loc, doActions=True):
+ return loc, []
+
+ def postParse(self, instring, loc, tokenlist):
+ return tokenlist
+
+ # ~ @profile
+ def _parseNoCache(self, instring, loc, doActions=True, callPreParse=True):
+ TRY, MATCH, FAIL = 0, 1, 2
+ debugging = (self.debug) # and doActions)
+
+ if debugging or self.failAction:
+ # ~ print ("Match", self, "at loc", loc, "(%d, %d)" % (lineno(loc, instring), col(loc, instring)))
+ if self.debugActions[TRY]:
+ self.debugActions[TRY](instring, loc, self)
+ try:
+ if callPreParse and self.callPreparse:
+ preloc = self.preParse(instring, loc)
+ else:
+ preloc = loc
+ tokensStart = preloc
+ if self.mayIndexError or preloc >= len(instring):
+ try:
+ loc, tokens = self.parseImpl(instring, preloc, doActions)
+ except IndexError:
+ raise ParseException(instring, len(instring), self.errmsg, self)
+ else:
+ loc, tokens = self.parseImpl(instring, preloc, doActions)
+ except Exception as err:
+ # ~ print ("Exception raised:", err)
+ if self.debugActions[FAIL]:
+ self.debugActions[FAIL](instring, tokensStart, self, err)
+ if self.failAction:
+ self.failAction(instring, tokensStart, self, err)
+ raise
+ else:
+ if callPreParse and self.callPreparse:
+ preloc = self.preParse(instring, loc)
+ else:
+ preloc = loc
+ tokensStart = preloc
+ if self.mayIndexError or preloc >= len(instring):
+ try:
+ loc, tokens = self.parseImpl(instring, preloc, doActions)
+ except IndexError:
+ raise ParseException(instring, len(instring), self.errmsg, self)
+ else:
+ loc, tokens = self.parseImpl(instring, preloc, doActions)
+
+ tokens = self.postParse(instring, loc, tokens)
+
+ retTokens = ParseResults(tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults)
+ if self.parseAction and (doActions or self.callDuringTry):
+ if debugging:
+ try:
+ for fn in self.parseAction:
+ try:
+ tokens = fn(instring, tokensStart, retTokens)
+ except IndexError as parse_action_exc:
+ exc = ParseException("exception raised in parse action")
+ exc.__cause__ = parse_action_exc
+ raise exc
+
+ if tokens is not None and tokens is not retTokens:
+ retTokens = ParseResults(tokens,
+ self.resultsName,
+ asList=self.saveAsList and isinstance(tokens, (ParseResults, list)),
+ modal=self.modalResults)
+ except Exception as err:
+ # ~ print "Exception raised in user parse action:", err
+ if self.debugActions[FAIL]:
+ self.debugActions[FAIL](instring, tokensStart, self, err)
+ raise
+ else:
+ for fn in self.parseAction:
+ try:
+ tokens = fn(instring, tokensStart, retTokens)
+ except IndexError as parse_action_exc:
+ exc = ParseException("exception raised in parse action")
+ exc.__cause__ = parse_action_exc
+ raise exc
+
+ if tokens is not None and tokens is not retTokens:
+ retTokens = ParseResults(tokens,
+ self.resultsName,
+ asList=self.saveAsList and isinstance(tokens, (ParseResults, list)),
+ modal=self.modalResults)
+ if debugging:
+ # ~ print ("Matched", self, "->", retTokens.asList())
+ if self.debugActions[MATCH]:
+ self.debugActions[MATCH](instring, tokensStart, loc, self, retTokens)
+
+ return loc, retTokens
+
+ def tryParse(self, instring, loc):
+ try:
+ return self._parse(instring, loc, doActions=False)[0]
+ except ParseFatalException:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ def canParseNext(self, instring, loc):
+ try:
+ self.tryParse(instring, loc)
+ except (ParseException, IndexError):
+ return False
+ else:
+ return True
+
+ class _UnboundedCache(object):
+ def __init__(self):
+ cache = {}
+ self.not_in_cache = not_in_cache = object()
+
+ def get(self, key):
+ return cache.get(key, not_in_cache)
+
+ def set(self, key, value):
+ cache[key] = value
+
+ def clear(self):
+ cache.clear()
+
+ def cache_len(self):
+ return len(cache)
+
+ self.get = types.MethodType(get, self)
+ self.set = types.MethodType(set, self)
+ self.clear = types.MethodType(clear, self)
+ self.__len__ = types.MethodType(cache_len, self)
+
+ if _OrderedDict is not None:
+ class _FifoCache(object):
+ def __init__(self, size):
+ self.not_in_cache = not_in_cache = object()
+
+ cache = _OrderedDict()
+
+ def get(self, key):
+ return cache.get(key, not_in_cache)
+
+ def set(self, key, value):
+ cache[key] = value
+ while len(cache) > size:
+ try:
+ cache.popitem(False)
+ except KeyError:
+ pass
+
+ def clear(self):
+ cache.clear()
+
+ def cache_len(self):
+ return len(cache)
+
+ self.get = types.MethodType(get, self)
+ self.set = types.MethodType(set, self)
+ self.clear = types.MethodType(clear, self)
+ self.__len__ = types.MethodType(cache_len, self)
+
+ else:
+ class _FifoCache(object):
+ def __init__(self, size):
+ self.not_in_cache = not_in_cache = object()
+
+ cache = {}
+ key_fifo = collections.deque([], size)
+
+ def get(self, key):
+ return cache.get(key, not_in_cache)
+
+ def set(self, key, value):
+ cache[key] = value
+ while len(key_fifo) > size:
+ cache.pop(key_fifo.popleft(), None)
+ key_fifo.append(key)
+
+ def clear(self):
+ cache.clear()
+ key_fifo.clear()
+
+ def cache_len(self):
+ return len(cache)
+
+ self.get = types.MethodType(get, self)
+ self.set = types.MethodType(set, self)
+ self.clear = types.MethodType(clear, self)
+ self.__len__ = types.MethodType(cache_len, self)
+
+ # argument cache for optimizing repeated calls when backtracking through recursive expressions
+ packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
+ packrat_cache_lock = RLock()
+ packrat_cache_stats = [0, 0]
+
+ # this method gets repeatedly called during backtracking with the same arguments -
+ # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
+ def _parseCache(self, instring, loc, doActions=True, callPreParse=True):
+ HIT, MISS = 0, 1
+ lookup = (self, instring, loc, callPreParse, doActions)
+ with ParserElement.packrat_cache_lock:
+ cache = ParserElement.packrat_cache
+ value = cache.get(lookup)
+ if value is cache.not_in_cache:
+ ParserElement.packrat_cache_stats[MISS] += 1
+ try:
+ value = self._parseNoCache(instring, loc, doActions, callPreParse)
+ except ParseBaseException as pe:
+ # cache a copy of the exception, without the traceback
+ cache.set(lookup, pe.__class__(*pe.args))
+ raise
+ else:
+ cache.set(lookup, (value[0], value[1].copy()))
+ return value
+ else:
+ ParserElement.packrat_cache_stats[HIT] += 1
+ if isinstance(value, Exception):
+ raise value
+ return value[0], value[1].copy()
+
+ _parse = _parseNoCache
+
+ @staticmethod
+ def resetCache():
+ ParserElement.packrat_cache.clear()
+ ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
+
+ _packratEnabled = False
+ @staticmethod
+ def enablePackrat(cache_size_limit=128):
+ """Enables "packrat" parsing, which adds memoizing to the parsing logic.
+ Repeated parse attempts at the same string location (which happens
+ often in many complex grammars) can immediately return a cached value,
+ instead of re-executing parsing/validating code. Memoizing is done of
+ both valid results and parsing exceptions.
+
+ Parameters:
+
+ - cache_size_limit - (default= ``128``) - if an integer value is provided
+ will limit the size of the packrat cache; if None is passed, then
+ the cache size will be unbounded; if 0 is passed, the cache will
+ be effectively disabled.
+
+ This speedup may break existing programs that use parse actions that
+ have side-effects. For this reason, packrat parsing is disabled when
+ you first import pyparsing. To activate the packrat feature, your
+ program must call the class method :class:`ParserElement.enablePackrat`.
+ For best results, call ``enablePackrat()`` immediately after
+ importing pyparsing.
+
+ Example::
+
+ import pyparsing
+ pyparsing.ParserElement.enablePackrat()
+ """
+ if not ParserElement._packratEnabled:
+ ParserElement._packratEnabled = True
+ if cache_size_limit is None:
+ ParserElement.packrat_cache = ParserElement._UnboundedCache()
+ else:
+ ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
+ ParserElement._parse = ParserElement._parseCache
+
+ def parseString(self, instring, parseAll=False):
+ """
+ Execute the parse expression with the given string.
+ This is the main interface to the client code, once the complete
+ expression has been built.
+
+ Returns the parsed data as a :class:`ParseResults` object, which may be
+ accessed as a list, or as a dict or object with attributes if the given parser
+ includes results names.
+
+ If you want the grammar to require that the entire input string be
+ successfully parsed, then set ``parseAll`` to True (equivalent to ending
+ the grammar with ``StringEnd()``).
+
+ Note: ``parseString`` implicitly calls ``expandtabs()`` on the input string,
+ in order to report proper column numbers in parse actions.
+ If the input string contains tabs and
+ the grammar uses parse actions that use the ``loc`` argument to index into the
+ string being parsed, you can ensure you have a consistent view of the input
+ string by:
+
+ - calling ``parseWithTabs`` on your grammar before calling ``parseString``
+ (see :class:`parseWithTabs`)
+ - define your parse action using the full ``(s, loc, toks)`` signature, and
+ reference the input string using the parse action's ``s`` argument
+ - explictly expand the tabs in your input string before calling
+ ``parseString``
+
+ Example::
+
+ Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
+ Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
+ """
+ ParserElement.resetCache()
+ if not self.streamlined:
+ self.streamline()
+ # ~ self.saveAsList = True
+ for e in self.ignoreExprs:
+ e.streamline()
+ if not self.keepTabs:
+ instring = instring.expandtabs()
+ try:
+ loc, tokens = self._parse(instring, 0)
+ if parseAll:
+ loc = self.preParse(instring, loc)
+ se = Empty() + StringEnd()
+ se._parse(instring, loc)
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clearing out pyparsing internal stack trace
+ if getattr(exc, '__traceback__', None) is not None:
+ exc.__traceback__ = self._trim_traceback(exc.__traceback__)
+ raise exc
+ else:
+ return tokens
+
+ def scanString(self, instring, maxMatches=_MAX_INT, overlap=False):
+ """
+ Scan the input string for expression matches. Each match will return the
+ matching tokens, start location, and end location. May be called with optional
+ ``maxMatches`` argument, to clip scanning after 'n' matches are found. If
+ ``overlap`` is specified, then overlapping matches will be reported.
+
+ Note that the start and end locations are reported relative to the string
+ being parsed. See :class:`parseString` for more information on parsing
+ strings with embedded tabs.
+
+ Example::
+
+ source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
+ print(source)
+ for tokens, start, end in Word(alphas).scanString(source):
+ print(' '*start + '^'*(end-start))
+ print(' '*start + tokens[0])
+
+ prints::
+
+ sldjf123lsdjjkf345sldkjf879lkjsfd987
+ ^^^^^
+ sldjf
+ ^^^^^^^
+ lsdjjkf
+ ^^^^^^
+ sldkjf
+ ^^^^^^
+ lkjsfd
+ """
+ if not self.streamlined:
+ self.streamline()
+ for e in self.ignoreExprs:
+ e.streamline()
+
+ if not self.keepTabs:
+ instring = _ustr(instring).expandtabs()
+ instrlen = len(instring)
+ loc = 0
+ preparseFn = self.preParse
+ parseFn = self._parse
+ ParserElement.resetCache()
+ matches = 0
+ try:
+ while loc <= instrlen and matches < maxMatches:
+ try:
+ preloc = preparseFn(instring, loc)
+ nextLoc, tokens = parseFn(instring, preloc, callPreParse=False)
+ except ParseException:
+ loc = preloc + 1
+ else:
+ if nextLoc > loc:
+ matches += 1
+ yield tokens, preloc, nextLoc
+ if overlap:
+ nextloc = preparseFn(instring, loc)
+ if nextloc > loc:
+ loc = nextLoc
+ else:
+ loc += 1
+ else:
+ loc = nextLoc
+ else:
+ loc = preloc + 1
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clearing out pyparsing internal stack trace
+ if getattr(exc, '__traceback__', None) is not None:
+ exc.__traceback__ = self._trim_traceback(exc.__traceback__)
+ raise exc
+
+ def transformString(self, instring):
+ """
+ Extension to :class:`scanString`, to modify matching text with modified tokens that may
+ be returned from a parse action. To use ``transformString``, define a grammar and
+ attach a parse action to it that modifies the returned token list.
+ Invoking ``transformString()`` on a target string will then scan for matches,
+ and replace the matched text patterns according to the logic in the parse
+ action. ``transformString()`` returns the resulting transformed string.
+
+ Example::
+
+ wd = Word(alphas)
+ wd.setParseAction(lambda toks: toks[0].title())
+
+ print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
+
+ prints::
+
+ Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
+ """
+ out = []
+ lastE = 0
+ # force preservation of <TAB>s, to minimize unwanted transformation of string, and to
+ # keep string locs straight between transformString and scanString
+ self.keepTabs = True
+ try:
+ for t, s, e in self.scanString(instring):
+ out.append(instring[lastE:s])
+ if t:
+ if isinstance(t, ParseResults):
+ out += t.asList()
+ elif isinstance(t, list):
+ out += t
+ else:
+ out.append(t)
+ lastE = e
+ out.append(instring[lastE:])
+ out = [o for o in out if o]
+ return "".join(map(_ustr, _flatten(out)))
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clearing out pyparsing internal stack trace
+ if getattr(exc, '__traceback__', None) is not None:
+ exc.__traceback__ = self._trim_traceback(exc.__traceback__)
+ raise exc
+
+ def searchString(self, instring, maxMatches=_MAX_INT):
+ """
+ Another extension to :class:`scanString`, simplifying the access to the tokens found
+ to match the given parse expression. May be called with optional
+ ``maxMatches`` argument, to clip searching after 'n' matches are found.
+
+ Example::
+
+ # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
+ cap_word = Word(alphas.upper(), alphas.lower())
+
+ print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
+
+ # the sum() builtin can be used to merge results into a single ParseResults object
+ print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
+
+ prints::
+
+ [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
+ ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
+ """
+ try:
+ return ParseResults([t for t, s, e in self.scanString(instring, maxMatches)])
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clearing out pyparsing internal stack trace
+ if getattr(exc, '__traceback__', None) is not None:
+ exc.__traceback__ = self._trim_traceback(exc.__traceback__)
+ raise exc
+
+ def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
+ """
+ Generator method to split a string using the given expression as a separator.
+ May be called with optional ``maxsplit`` argument, to limit the number of splits;
+ and the optional ``includeSeparators`` argument (default= ``False``), if the separating
+ matching text should be included in the split results.
+
+ Example::
+
+ punc = oneOf(list(".,;:/-!?"))
+ print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
+
+ prints::
+
+ ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
+ """
+ splits = 0
+ last = 0
+ for t, s, e in self.scanString(instring, maxMatches=maxsplit):
+ yield instring[last:s]
+ if includeSeparators:
+ yield t[0]
+ last = e
+ yield instring[last:]
+
+ def __add__(self, other):
+ """
+ Implementation of + operator - returns :class:`And`. Adding strings to a ParserElement
+ converts them to :class:`Literal`s by default.
+
+ Example::
+
+ greet = Word(alphas) + "," + Word(alphas) + "!"
+ hello = "Hello, World!"
+ print (hello, "->", greet.parseString(hello))
+
+ prints::
+
+ Hello, World! -> ['Hello', ',', 'World', '!']
+
+ ``...`` may be used as a parse expression as a short form of :class:`SkipTo`.
+
+ Literal('start') + ... + Literal('end')
+
+ is equivalent to:
+
+ Literal('start') + SkipTo('end')("_skipped*") + Literal('end')
+
+ Note that the skipped text is returned with '_skipped' as a results name,
+ and to support having multiple skips in the same parser, the value returned is
+ a list of all skipped text.
+ """
+ if other is Ellipsis:
+ return _PendingSkip(self)
+
+ if isinstance(other, basestring):
+ other = self._literalStringClass(other)
+ if not isinstance(other, ParserElement):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return And([self, other])
+
+ def __radd__(self, other):
+ """
+ Implementation of + operator when left operand is not a :class:`ParserElement`
+ """
+ if other is Ellipsis:
+ return SkipTo(self)("_skipped*") + self
+
+ if isinstance(other, basestring):
+ other = self._literalStringClass(other)
+ if not isinstance(other, ParserElement):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other + self
+
+ def __sub__(self, other):
+ """
+ Implementation of - operator, returns :class:`And` with error stop
+ """
+ if isinstance(other, basestring):
+ other = self._literalStringClass(other)
+ if not isinstance(other, ParserElement):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return self + And._ErrorStop() + other
+
+ def __rsub__(self, other):
+ """
+ Implementation of - operator when left operand is not a :class:`ParserElement`
+ """
+ if isinstance(other, basestring):
+ other = self._literalStringClass(other)
+ if not isinstance(other, ParserElement):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other - self
+
+ def __mul__(self, other):
+ """
+ Implementation of * operator, allows use of ``expr * 3`` in place of
+ ``expr + expr + expr``. Expressions may also me multiplied by a 2-integer
+ tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples
+ may also include ``None`` as in:
+ - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent
+ to ``expr*n + ZeroOrMore(expr)``
+ (read as "at least n instances of ``expr``")
+ - ``expr*(None, n)`` is equivalent to ``expr*(0, n)``
+ (read as "0 to n instances of ``expr``")
+ - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)``
+ - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)``
+
+ Note that ``expr*(None, n)`` does not raise an exception if
+ more than n exprs exist in the input stream; that is,
+ ``expr*(None, n)`` does not enforce a maximum number of expr
+ occurrences. If this behavior is desired, then write
+ ``expr*(None, n) + ~expr``
+ """
+ if other is Ellipsis:
+ other = (0, None)
+ elif isinstance(other, tuple) and other[:1] == (Ellipsis,):
+ other = ((0, ) + other[1:] + (None,))[:2]
+
+ if isinstance(other, int):
+ minElements, optElements = other, 0
+ elif isinstance(other, tuple):
+ other = tuple(o if o is not Ellipsis else None for o in other)
+ other = (other + (None, None))[:2]
+ if other[0] is None:
+ other = (0, other[1])
+ if isinstance(other[0], int) and other[1] is None:
+ if other[0] == 0:
+ return ZeroOrMore(self)
+ if other[0] == 1:
+ return OneOrMore(self)
+ else:
+ return self * other[0] + ZeroOrMore(self)
+ elif isinstance(other[0], int) and isinstance(other[1], int):
+ minElements, optElements = other
+ optElements -= minElements
+ else:
+ raise TypeError("cannot multiply 'ParserElement' and ('%s', '%s') objects", type(other[0]), type(other[1]))
+ else:
+ raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
+
+ if minElements < 0:
+ raise ValueError("cannot multiply ParserElement by negative value")
+ if optElements < 0:
+ raise ValueError("second tuple value must be greater or equal to first tuple value")
+ if minElements == optElements == 0:
+ raise ValueError("cannot multiply ParserElement by 0 or (0, 0)")
+
+ if optElements:
+ def makeOptionalList(n):
+ if n > 1:
+ return Optional(self + makeOptionalList(n - 1))
+ else:
+ return Optional(self)
+ if minElements:
+ if minElements == 1:
+ ret = self + makeOptionalList(optElements)
+ else:
+ ret = And([self] * minElements) + makeOptionalList(optElements)
+ else:
+ ret = makeOptionalList(optElements)
+ else:
+ if minElements == 1:
+ ret = self
+ else:
+ ret = And([self] * minElements)
+ return ret
+
+ def __rmul__(self, other):
+ return self.__mul__(other)
+
+ def __or__(self, other):
+ """
+ Implementation of | operator - returns :class:`MatchFirst`
+ """
+ if other is Ellipsis:
+ return _PendingSkip(self, must_skip=True)
+
+ if isinstance(other, basestring):
+ other = self._literalStringClass(other)
+ if not isinstance(other, ParserElement):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return MatchFirst([self, other])
+
+ def __ror__(self, other):
+ """
+ Implementation of | operator when left operand is not a :class:`ParserElement`
+ """
+ if isinstance(other, basestring):
+ other = self._literalStringClass(other)
+ if not isinstance(other, ParserElement):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other | self
+
+ def __xor__(self, other):
+ """
+ Implementation of ^ operator - returns :class:`Or`
+ """
+ if isinstance(other, basestring):
+ other = self._literalStringClass(other)
+ if not isinstance(other, ParserElement):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return Or([self, other])
+
+ def __rxor__(self, other):
+ """
+ Implementation of ^ operator when left operand is not a :class:`ParserElement`
+ """
+ if isinstance(other, basestring):
+ other = self._literalStringClass(other)
+ if not isinstance(other, ParserElement):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other ^ self
+
+ def __and__(self, other):
+ """
+ Implementation of & operator - returns :class:`Each`
+ """
+ if isinstance(other, basestring):
+ other = self._literalStringClass(other)
+ if not isinstance(other, ParserElement):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return Each([self, other])
+
+ def __rand__(self, other):
+ """
+ Implementation of & operator when left operand is not a :class:`ParserElement`
+ """
+ if isinstance(other, basestring):
+ other = self._literalStringClass(other)
+ if not isinstance(other, ParserElement):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other & self
+
+ def __invert__(self):
+ """
+ Implementation of ~ operator - returns :class:`NotAny`
+ """
+ return NotAny(self)
+
+ def __iter__(self):
+ # must implement __iter__ to override legacy use of sequential access to __getitem__ to
+ # iterate over a sequence
+ raise TypeError('%r object is not iterable' % self.__class__.__name__)
+
+ def __getitem__(self, key):
+ """
+ use ``[]`` indexing notation as a short form for expression repetition:
+ - ``expr[n]`` is equivalent to ``expr*n``
+ - ``expr[m, n]`` is equivalent to ``expr*(m, n)``
+ - ``expr[n, ...]`` or ``expr[n,]`` is equivalent
+ to ``expr*n + ZeroOrMore(expr)``
+ (read as "at least n instances of ``expr``")
+ - ``expr[..., n]`` is equivalent to ``expr*(0, n)``
+ (read as "0 to n instances of ``expr``")
+ - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)``
+ - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)``
+ ``None`` may be used in place of ``...``.
+
+ Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception
+ if more than ``n`` ``expr``s exist in the input stream. If this behavior is
+ desired, then write ``expr[..., n] + ~expr``.
+ """
+
+ # convert single arg keys to tuples
+ try:
+ if isinstance(key, str):
+ key = (key,)
+ iter(key)
+ except TypeError:
+ key = (key, key)
+
+ if len(key) > 2:
+ warnings.warn("only 1 or 2 index arguments supported ({0}{1})".format(key[:5],
+ '... [{0}]'.format(len(key))
+ if len(key) > 5 else ''))
+
+ # clip to 2 elements
+ ret = self * tuple(key[:2])
+ return ret
+
+ def __call__(self, name=None):
+ """
+ Shortcut for :class:`setResultsName`, with ``listAllMatches=False``.
+
+ If ``name`` is given with a trailing ``'*'`` character, then ``listAllMatches`` will be
+ passed as ``True``.
+
+ If ``name` is omitted, same as calling :class:`copy`.
+
+ Example::
+
+ # these are equivalent
+ userdata = Word(alphas).setResultsName("name") + Word(nums + "-").setResultsName("socsecno")
+ userdata = Word(alphas)("name") + Word(nums + "-")("socsecno")
+ """
+ if name is not None:
+ return self._setResultsName(name)
+ else:
+ return self.copy()
+
+ def suppress(self):
+ """
+ Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from
+ cluttering up returned output.
+ """
+ return Suppress(self)
+
+ def leaveWhitespace(self):
+ """
+ Disables the skipping of whitespace before matching the characters in the
+ :class:`ParserElement`'s defined pattern. This is normally only used internally by
+ the pyparsing module, but may be needed in some whitespace-sensitive grammars.
+ """
+ self.skipWhitespace = False
+ return self
+
+ def setWhitespaceChars(self, chars):
+ """
+ Overrides the default whitespace chars
+ """
+ self.skipWhitespace = True
+ self.whiteChars = chars
+ self.copyDefaultWhiteChars = False
+ return self
+
+ def parseWithTabs(self):
+ """
+ Overrides default behavior to expand ``<TAB>``s to spaces before parsing the input string.
+ Must be called before ``parseString`` when the input grammar contains elements that
+ match ``<TAB>`` characters.
+ """
+ self.keepTabs = True
+ return self
+
+ def ignore(self, other):
+ """
+ Define expression to be ignored (e.g., comments) while doing pattern
+ matching; may be called repeatedly, to define multiple comment or other
+ ignorable patterns.
+
+ Example::
+
+ patt = OneOrMore(Word(alphas))
+ patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
+
+ patt.ignore(cStyleComment)
+ patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
+ """
+ if isinstance(other, basestring):
+ other = Suppress(other)
+
+ if isinstance(other, Suppress):
+ if other not in self.ignoreExprs:
+ self.ignoreExprs.append(other)
+ else:
+ self.ignoreExprs.append(Suppress(other.copy()))
+ return self
+
+ def setDebugActions(self, startAction, successAction, exceptionAction):
+ """
+ Enable display of debugging messages while doing pattern matching.
+ """
+ self.debugActions = (startAction or _defaultStartDebugAction,
+ successAction or _defaultSuccessDebugAction,
+ exceptionAction or _defaultExceptionDebugAction)
+ self.debug = True
+ return self
+
+ def setDebug(self, flag=True):
+ """
+ Enable display of debugging messages while doing pattern matching.
+ Set ``flag`` to True to enable, False to disable.
+
+ Example::
+
+ wd = Word(alphas).setName("alphaword")
+ integer = Word(nums).setName("numword")
+ term = wd | integer
+
+ # turn on debugging for wd
+ wd.setDebug()
+
+ OneOrMore(term).parseString("abc 123 xyz 890")
+
+ prints::
+
+ Match alphaword at loc 0(1,1)
+ Matched alphaword -> ['abc']
+ Match alphaword at loc 3(1,4)
+ Exception raised:Expected alphaword (at char 4), (line:1, col:5)
+ Match alphaword at loc 7(1,8)
+ Matched alphaword -> ['xyz']
+ Match alphaword at loc 11(1,12)
+ Exception raised:Expected alphaword (at char 12), (line:1, col:13)
+ Match alphaword at loc 15(1,16)
+ Exception raised:Expected alphaword (at char 15), (line:1, col:16)
+
+ The output shown is that produced by the default debug actions - custom debug actions can be
+ specified using :class:`setDebugActions`. Prior to attempting
+ to match the ``wd`` expression, the debugging message ``"Match <exprname> at loc <n>(<line>,<col>)"``
+ is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"``
+ message is shown. Also note the use of :class:`setName` to assign a human-readable name to the expression,
+ which makes debugging and exception messages easier to understand - for instance, the default
+ name created for the :class:`Word` expression without calling ``setName`` is ``"W:(ABCD...)"``.
+ """
+ if flag:
+ self.setDebugActions(_defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction)
+ else:
+ self.debug = False
+ return self
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return _ustr(self)
+
+ def streamline(self):
+ self.streamlined = True
+ self.strRepr = None
+ return self
+
+ def checkRecursion(self, parseElementList):
+ pass
+
+ def validate(self, validateTrace=None):
+ """
+ Check defined expressions for valid structure, check for infinite recursive definitions.
+ """
+ self.checkRecursion([])
+
+ def parseFile(self, file_or_filename, parseAll=False):
+ """
+ Execute the parse expression on the given file or filename.
+ If a filename is specified (instead of a file object),
+ the entire file is opened, read, and closed before parsing.
+ """
+ try:
+ file_contents = file_or_filename.read()
+ except AttributeError:
+ with open(file_or_filename, "r") as f:
+ file_contents = f.read()
+ try:
+ return self.parseString(file_contents, parseAll)
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clearing out pyparsing internal stack trace
+ if getattr(exc, '__traceback__', None) is not None:
+ exc.__traceback__ = self._trim_traceback(exc.__traceback__)
+ raise exc
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ elif isinstance(other, basestring):
+ return self.matches(other)
+ elif isinstance(other, ParserElement):
+ return vars(self) == vars(other)
+ return False
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __hash__(self):
+ return id(self)
+
+ def __req__(self, other):
+ return self == other
+
+ def __rne__(self, other):
+ return not (self == other)
+
+ def matches(self, testString, parseAll=True):
+ """
+ Method for quick testing of a parser against a test string. Good for simple
+ inline microtests of sub expressions while building up larger parser.
+
+ Parameters:
+ - testString - to test against this expression for a match
+ - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests
+
+ Example::
+
+ expr = Word(nums)
+ assert expr.matches("100")
+ """
+ try:
+ self.parseString(_ustr(testString), parseAll=parseAll)
+ return True
+ except ParseBaseException:
+ return False
+
+ def runTests(self, tests, parseAll=True, comment='#',
+ fullDump=True, printResults=True, failureTests=False, postParse=None,
+ file=None):
+ """
+ Execute the parse expression on a series of test strings, showing each
+ test, the parsed results or where the parse failed. Quick and easy way to
+ run a parse expression against a list of sample strings.
+
+ Parameters:
+ - tests - a list of separate test strings, or a multiline string of test strings
+ - parseAll - (default= ``True``) - flag to pass to :class:`parseString` when running tests
+ - comment - (default= ``'#'``) - expression for indicating embedded comments in the test
+ string; pass None to disable comment filtering
+ - fullDump - (default= ``True``) - dump results as list followed by results names in nested outline;
+ if False, only dump nested list
+ - printResults - (default= ``True``) prints test output to stdout
+ - failureTests - (default= ``False``) indicates if these tests are expected to fail parsing
+ - postParse - (default= ``None``) optional callback for successful parse results; called as
+ `fn(test_string, parse_results)` and returns a string to be added to the test output
+ - file - (default=``None``) optional file-like object to which test output will be written;
+ if None, will default to ``sys.stdout``
+
+ Returns: a (success, results) tuple, where success indicates that all tests succeeded
+ (or failed if ``failureTests`` is True), and the results contain a list of lines of each
+ test's output
+
+ Example::
+
+ number_expr = pyparsing_common.number.copy()
+
+ result = number_expr.runTests('''
+ # unsigned integer
+ 100
+ # negative integer
+ -100
+ # float with scientific notation
+ 6.02e23
+ # integer with scientific notation
+ 1e-12
+ ''')
+ print("Success" if result[0] else "Failed!")
+
+ result = number_expr.runTests('''
+ # stray character
+ 100Z
+ # missing leading digit before '.'
+ -.100
+ # too many '.'
+ 3.14.159
+ ''', failureTests=True)
+ print("Success" if result[0] else "Failed!")
+
+ prints::
+
+ # unsigned integer
+ 100
+ [100]
+
+ # negative integer
+ -100
+ [-100]
+
+ # float with scientific notation
+ 6.02e23
+ [6.02e+23]
+
+ # integer with scientific notation
+ 1e-12
+ [1e-12]
+
+ Success
+
+ # stray character
+ 100Z
+ ^
+ FAIL: Expected end of text (at char 3), (line:1, col:4)
+
+ # missing leading digit before '.'
+ -.100
+ ^
+ FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
+
+ # too many '.'
+ 3.14.159
+ ^
+ FAIL: Expected end of text (at char 4), (line:1, col:5)
+
+ Success
+
+ Each test string must be on a single line. If you want to test a string that spans multiple
+ lines, create a test like this::
+
+ expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
+
+ (Note that this is a raw string literal, you must include the leading 'r'.)
+ """
+ if isinstance(tests, basestring):
+ tests = list(map(str.strip, tests.rstrip().splitlines()))
+ if isinstance(comment, basestring):
+ comment = Literal(comment)
+ if file is None:
+ file = sys.stdout
+ print_ = file.write
+
+ allResults = []
+ comments = []
+ success = True
+ NL = Literal(r'\n').addParseAction(replaceWith('\n')).ignore(quotedString)
+ BOM = u'\ufeff'
+ for t in tests:
+ if comment is not None and comment.matches(t, False) or comments and not t:
+ comments.append(t)
+ continue
+ if not t:
+ continue
+ out = ['\n' + '\n'.join(comments) if comments else '', t]
+ comments = []
+ try:
+ # convert newline marks to actual newlines, and strip leading BOM if present
+ t = NL.transformString(t.lstrip(BOM))
+ result = self.parseString(t, parseAll=parseAll)
+ except ParseBaseException as pe:
+ fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
+ if '\n' in t:
+ out.append(line(pe.loc, t))
+ out.append(' ' * (col(pe.loc, t) - 1) + '^' + fatal)
+ else:
+ out.append(' ' * pe.loc + '^' + fatal)
+ out.append("FAIL: " + str(pe))
+ success = success and failureTests
+ result = pe
+ except Exception as exc:
+ out.append("FAIL-EXCEPTION: " + str(exc))
+ success = success and failureTests
+ result = exc
+ else:
+ success = success and not failureTests
+ if postParse is not None:
+ try:
+ pp_value = postParse(t, result)
+ if pp_value is not None:
+ if isinstance(pp_value, ParseResults):
+ out.append(pp_value.dump())
+ else:
+ out.append(str(pp_value))
+ else:
+ out.append(result.dump())
+ except Exception as e:
+ out.append(result.dump(full=fullDump))
+ out.append("{0} failed: {1}: {2}".format(postParse.__name__, type(e).__name__, e))
+ else:
+ out.append(result.dump(full=fullDump))
+
+ if printResults:
+ if fullDump:
+ out.append('')
+ print_('\n'.join(out))
+
+ allResults.append((t, result))
+
+ return success, allResults
+
+
+class _PendingSkip(ParserElement):
+ # internal placeholder class to hold a place were '...' is added to a parser element,
+ # once another ParserElement is added, this placeholder will be replaced with a SkipTo
+ def __init__(self, expr, must_skip=False):
+ super(_PendingSkip, self).__init__()
+ self.strRepr = str(expr + Empty()).replace('Empty', '...')
+ self.name = self.strRepr
+ self.anchor = expr
+ self.must_skip = must_skip
+
+ def __add__(self, other):
+ skipper = SkipTo(other).setName("...")("_skipped*")
+ if self.must_skip:
+ def must_skip(t):
+ if not t._skipped or t._skipped.asList() == ['']:
+ del t[0]
+ t.pop("_skipped", None)
+ def show_skip(t):
+ if t._skipped.asList()[-1:] == ['']:
+ skipped = t.pop('_skipped')
+ t['_skipped'] = 'missing <' + repr(self.anchor) + '>'
+ return (self.anchor + skipper().addParseAction(must_skip)
+ | skipper().addParseAction(show_skip)) + other
+
+ return self.anchor + skipper + other
+
+ def __repr__(self):
+ return self.strRepr
+
+ def parseImpl(self, *args):
+ raise Exception("use of `...` expression without following SkipTo target expression")
+
+
+class Token(ParserElement):
+ """Abstract :class:`ParserElement` subclass, for defining atomic
+ matching patterns.
+ """
+ def __init__(self):
+ super(Token, self).__init__(savelist=False)
+
+
+class Empty(Token):
+ """An empty token, will always match.
+ """
+ def __init__(self):
+ super(Empty, self).__init__()
+ self.name = "Empty"
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+
+
+class NoMatch(Token):
+ """A token that will never match.
+ """
+ def __init__(self):
+ super(NoMatch, self).__init__()
+ self.name = "NoMatch"
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+ self.errmsg = "Unmatchable token"
+
+ def parseImpl(self, instring, loc, doActions=True):
+ raise ParseException(instring, loc, self.errmsg, self)
+
+
+class Literal(Token):
+ """Token to exactly match a specified string.
+
+ Example::
+
+ Literal('blah').parseString('blah') # -> ['blah']
+ Literal('blah').parseString('blahfooblah') # -> ['blah']
+ Literal('blah').parseString('bla') # -> Exception: Expected "blah"
+
+ For case-insensitive matching, use :class:`CaselessLiteral`.
+
+ For keyword matching (force word break before and after the matched string),
+ use :class:`Keyword` or :class:`CaselessKeyword`.
+ """
+ def __init__(self, matchString):
+ super(Literal, self).__init__()
+ self.match = matchString
+ self.matchLen = len(matchString)
+ try:
+ self.firstMatchChar = matchString[0]
+ except IndexError:
+ warnings.warn("null string passed to Literal; use Empty() instead",
+ SyntaxWarning, stacklevel=2)
+ self.__class__ = Empty
+ self.name = '"%s"' % _ustr(self.match)
+ self.errmsg = "Expected " + self.name
+ self.mayReturnEmpty = False
+ self.mayIndexError = False
+
+ # Performance tuning: modify __class__ to select
+ # a parseImpl optimized for single-character check
+ if self.matchLen == 1 and type(self) is Literal:
+ self.__class__ = _SingleCharLiteral
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if instring[loc] == self.firstMatchChar and instring.startswith(self.match, loc):
+ return loc + self.matchLen, self.match
+ raise ParseException(instring, loc, self.errmsg, self)
+
+class _SingleCharLiteral(Literal):
+ def parseImpl(self, instring, loc, doActions=True):
+ if instring[loc] == self.firstMatchChar:
+ return loc + 1, self.match
+ raise ParseException(instring, loc, self.errmsg, self)
+
+_L = Literal
+ParserElement._literalStringClass = Literal
+
+class Keyword(Token):
+ """Token to exactly match a specified string as a keyword, that is,
+ it must be immediately followed by a non-keyword character. Compare
+ with :class:`Literal`:
+
+ - ``Literal("if")`` will match the leading ``'if'`` in
+ ``'ifAndOnlyIf'``.
+ - ``Keyword("if")`` will not; it will only match the leading
+ ``'if'`` in ``'if x=1'``, or ``'if(y==2)'``
+
+ Accepts two optional constructor arguments in addition to the
+ keyword string:
+
+ - ``identChars`` is a string of characters that would be valid
+ identifier characters, defaulting to all alphanumerics + "_" and
+ "$"
+ - ``caseless`` allows case-insensitive matching, default is ``False``.
+
+ Example::
+
+ Keyword("start").parseString("start") # -> ['start']
+ Keyword("start").parseString("starting") # -> Exception
+
+ For case-insensitive matching, use :class:`CaselessKeyword`.
+ """
+ DEFAULT_KEYWORD_CHARS = alphanums + "_$"
+
+ def __init__(self, matchString, identChars=None, caseless=False):
+ super(Keyword, self).__init__()
+ if identChars is None:
+ identChars = Keyword.DEFAULT_KEYWORD_CHARS
+ self.match = matchString
+ self.matchLen = len(matchString)
+ try:
+ self.firstMatchChar = matchString[0]
+ except IndexError:
+ warnings.warn("null string passed to Keyword; use Empty() instead",
+ SyntaxWarning, stacklevel=2)
+ self.name = '"%s"' % self.match
+ self.errmsg = "Expected " + self.name
+ self.mayReturnEmpty = False
+ self.mayIndexError = False
+ self.caseless = caseless
+ if caseless:
+ self.caselessmatch = matchString.upper()
+ identChars = identChars.upper()
+ self.identChars = set(identChars)
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if self.caseless:
+ if ((instring[loc:loc + self.matchLen].upper() == self.caselessmatch)
+ and (loc >= len(instring) - self.matchLen
+ or instring[loc + self.matchLen].upper() not in self.identChars)
+ and (loc == 0
+ or instring[loc - 1].upper() not in self.identChars)):
+ return loc + self.matchLen, self.match
+
+ else:
+ if instring[loc] == self.firstMatchChar:
+ if ((self.matchLen == 1 or instring.startswith(self.match, loc))
+ and (loc >= len(instring) - self.matchLen
+ or instring[loc + self.matchLen] not in self.identChars)
+ and (loc == 0 or instring[loc - 1] not in self.identChars)):
+ return loc + self.matchLen, self.match
+
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ def copy(self):
+ c = super(Keyword, self).copy()
+ c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
+ return c
+
+ @staticmethod
+ def setDefaultKeywordChars(chars):
+ """Overrides the default Keyword chars
+ """
+ Keyword.DEFAULT_KEYWORD_CHARS = chars
+
+class CaselessLiteral(Literal):
+ """Token to match a specified string, ignoring case of letters.
+ Note: the matched results will always be in the case of the given
+ match string, NOT the case of the input text.
+
+ Example::
+
+ OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
+
+ (Contrast with example for :class:`CaselessKeyword`.)
+ """
+ def __init__(self, matchString):
+ super(CaselessLiteral, self).__init__(matchString.upper())
+ # Preserve the defining literal.
+ self.returnString = matchString
+ self.name = "'%s'" % self.returnString
+ self.errmsg = "Expected " + self.name
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if instring[loc:loc + self.matchLen].upper() == self.match:
+ return loc + self.matchLen, self.returnString
+ raise ParseException(instring, loc, self.errmsg, self)
+
+class CaselessKeyword(Keyword):
+ """
+ Caseless version of :class:`Keyword`.
+
+ Example::
+
+ OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
+
+ (Contrast with example for :class:`CaselessLiteral`.)
+ """
+ def __init__(self, matchString, identChars=None):
+ super(CaselessKeyword, self).__init__(matchString, identChars, caseless=True)
+
+class CloseMatch(Token):
+ """A variation on :class:`Literal` which matches "close" matches,
+ that is, strings with at most 'n' mismatching characters.
+ :class:`CloseMatch` takes parameters:
+
+ - ``match_string`` - string to be matched
+ - ``maxMismatches`` - (``default=1``) maximum number of
+ mismatches allowed to count as a match
+
+ The results from a successful parse will contain the matched text
+ from the input string and the following named results:
+
+ - ``mismatches`` - a list of the positions within the
+ match_string where mismatches were found
+ - ``original`` - the original match_string used to compare
+ against the input string
+
+ If ``mismatches`` is an empty list, then the match was an exact
+ match.
+
+ Example::
+
+ patt = CloseMatch("ATCATCGAATGGA")
+ patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
+ patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
+
+ # exact match
+ patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
+
+ # close match allowing up to 2 mismatches
+ patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
+ patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
+ """
+ def __init__(self, match_string, maxMismatches=1):
+ super(CloseMatch, self).__init__()
+ self.name = match_string
+ self.match_string = match_string
+ self.maxMismatches = maxMismatches
+ self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
+ self.mayIndexError = False
+ self.mayReturnEmpty = False
+
+ def parseImpl(self, instring, loc, doActions=True):
+ start = loc
+ instrlen = len(instring)
+ maxloc = start + len(self.match_string)
+
+ if maxloc <= instrlen:
+ match_string = self.match_string
+ match_stringloc = 0
+ mismatches = []
+ maxMismatches = self.maxMismatches
+
+ for match_stringloc, s_m in enumerate(zip(instring[loc:maxloc], match_string)):
+ src, mat = s_m
+ if src != mat:
+ mismatches.append(match_stringloc)
+ if len(mismatches) > maxMismatches:
+ break
+ else:
+ loc = match_stringloc + 1
+ results = ParseResults([instring[start:loc]])
+ results['original'] = match_string
+ results['mismatches'] = mismatches
+ return loc, results
+
+ raise ParseException(instring, loc, self.errmsg, self)
+
+
+class Word(Token):
+ """Token for matching words composed of allowed character sets.
+ Defined with string containing all allowed initial characters, an
+ optional string containing allowed body characters (if omitted,
+ defaults to the initial character set), and an optional minimum,
+ maximum, and/or exact length. The default value for ``min`` is
+ 1 (a minimum value < 1 is not valid); the default values for
+ ``max`` and ``exact`` are 0, meaning no maximum or exact
+ length restriction. An optional ``excludeChars`` parameter can
+ list characters that might be found in the input ``bodyChars``
+ string; useful to define a word of all printables except for one or
+ two characters, for instance.
+
+ :class:`srange` is useful for defining custom character set strings
+ for defining ``Word`` expressions, using range notation from
+ regular expression character sets.
+
+ A common mistake is to use :class:`Word` to match a specific literal
+ string, as in ``Word("Address")``. Remember that :class:`Word`
+ uses the string argument to define *sets* of matchable characters.
+ This expression would match "Add", "AAA", "dAred", or any other word
+ made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an
+ exact literal string, use :class:`Literal` or :class:`Keyword`.
+
+ pyparsing includes helper strings for building Words:
+
+ - :class:`alphas`
+ - :class:`nums`
+ - :class:`alphanums`
+ - :class:`hexnums`
+ - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255
+ - accented, tilded, umlauted, etc.)
+ - :class:`punc8bit` (non-alphabetic characters in ASCII range
+ 128-255 - currency, symbols, superscripts, diacriticals, etc.)
+ - :class:`printables` (any non-whitespace character)
+
+ Example::
+
+ # a word composed of digits
+ integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
+
+ # a word with a leading capital, and zero or more lowercase
+ capital_word = Word(alphas.upper(), alphas.lower())
+
+ # hostnames are alphanumeric, with leading alpha, and '-'
+ hostname = Word(alphas, alphanums + '-')
+
+ # roman numeral (not a strict parser, accepts invalid mix of characters)
+ roman = Word("IVXLCDM")
+
+ # any string of non-whitespace characters, except for ','
+ csv_value = Word(printables, excludeChars=",")
+ """
+ def __init__(self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None):
+ super(Word, self).__init__()
+ if excludeChars:
+ excludeChars = set(excludeChars)
+ initChars = ''.join(c for c in initChars if c not in excludeChars)
+ if bodyChars:
+ bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
+ self.initCharsOrig = initChars
+ self.initChars = set(initChars)
+ if bodyChars:
+ self.bodyCharsOrig = bodyChars
+ self.bodyChars = set(bodyChars)
+ else:
+ self.bodyCharsOrig = initChars
+ self.bodyChars = set(initChars)
+
+ self.maxSpecified = max > 0
+
+ if min < 1:
+ raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
+
+ self.minLen = min
+
+ if max > 0:
+ self.maxLen = max
+ else:
+ self.maxLen = _MAX_INT
+
+ if exact > 0:
+ self.maxLen = exact
+ self.minLen = exact
+
+ self.name = _ustr(self)
+ self.errmsg = "Expected " + self.name
+ self.mayIndexError = False
+ self.asKeyword = asKeyword
+
+ if ' ' not in self.initCharsOrig + self.bodyCharsOrig and (min == 1 and max == 0 and exact == 0):
+ if self.bodyCharsOrig == self.initCharsOrig:
+ self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
+ elif len(self.initCharsOrig) == 1:
+ self.reString = "%s[%s]*" % (re.escape(self.initCharsOrig),
+ _escapeRegexRangeChars(self.bodyCharsOrig),)
+ else:
+ self.reString = "[%s][%s]*" % (_escapeRegexRangeChars(self.initCharsOrig),
+ _escapeRegexRangeChars(self.bodyCharsOrig),)
+ if self.asKeyword:
+ self.reString = r"\b" + self.reString + r"\b"
+
+ try:
+ self.re = re.compile(self.reString)
+ except Exception:
+ self.re = None
+ else:
+ self.re_match = self.re.match
+ self.__class__ = _WordRegex
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if instring[loc] not in self.initChars:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ start = loc
+ loc += 1
+ instrlen = len(instring)
+ bodychars = self.bodyChars
+ maxloc = start + self.maxLen
+ maxloc = min(maxloc, instrlen)
+ while loc < maxloc and instring[loc] in bodychars:
+ loc += 1
+
+ throwException = False
+ if loc - start < self.minLen:
+ throwException = True
+ elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
+ throwException = True
+ elif self.asKeyword:
+ if (start > 0 and instring[start - 1] in bodychars
+ or loc < instrlen and instring[loc] in bodychars):
+ throwException = True
+
+ if throwException:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ return loc, instring[start:loc]
+
+ def __str__(self):
+ try:
+ return super(Word, self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None:
+
+ def charsAsStr(s):
+ if len(s) > 4:
+ return s[:4] + "..."
+ else:
+ return s
+
+ if self.initCharsOrig != self.bodyCharsOrig:
+ self.strRepr = "W:(%s, %s)" % (charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig))
+ else:
+ self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
+
+ return self.strRepr
+
+class _WordRegex(Word):
+ def parseImpl(self, instring, loc, doActions=True):
+ result = self.re_match(instring, loc)
+ if not result:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ loc = result.end()
+ return loc, result.group()
+
+
+class Char(_WordRegex):
+ """A short-cut class for defining ``Word(characters, exact=1)``,
+ when defining a match of any single character in a string of
+ characters.
+ """
+ def __init__(self, charset, asKeyword=False, excludeChars=None):
+ super(Char, self).__init__(charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars)
+ self.reString = "[%s]" % _escapeRegexRangeChars(''.join(self.initChars))
+ if asKeyword:
+ self.reString = r"\b%s\b" % self.reString
+ self.re = re.compile(self.reString)
+ self.re_match = self.re.match
+
+
+class Regex(Token):
+ r"""Token for matching strings that match a given regular
+ expression. Defined with string specifying the regular expression in
+ a form recognized by the stdlib Python `re module <https://docs.python.org/3/library/re.html>`_.
+ If the given regex contains named groups (defined using ``(?P<name>...)``),
+ these will be preserved as named parse results.
+
+ If instead of the Python stdlib re module you wish to use a different RE module
+ (such as the `regex` module), you can replace it by either building your
+ Regex object with a compiled RE that was compiled using regex:
+
+ Example::
+
+ realnum = Regex(r"[+-]?\d+\.\d*")
+ date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
+ # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
+ roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
+
+ # use regex module instead of stdlib re module to construct a Regex using
+ # a compiled regular expression
+ import regex
+ parser = pp.Regex(regex.compile(r'[0-9]'))
+
+ """
+ def __init__(self, pattern, flags=0, asGroupList=False, asMatch=False):
+ """The parameters ``pattern`` and ``flags`` are passed
+ to the ``re.compile()`` function as-is. See the Python
+ `re module <https://docs.python.org/3/library/re.html>`_ module for an
+ explanation of the acceptable patterns and flags.
+ """
+ super(Regex, self).__init__()
+
+ if isinstance(pattern, basestring):
+ if not pattern:
+ warnings.warn("null string passed to Regex; use Empty() instead",
+ SyntaxWarning, stacklevel=2)
+
+ self.pattern = pattern
+ self.flags = flags
+
+ try:
+ self.re = re.compile(self.pattern, self.flags)
+ self.reString = self.pattern
+ except sre_constants.error:
+ warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
+ SyntaxWarning, stacklevel=2)
+ raise
+
+ elif hasattr(pattern, 'pattern') and hasattr(pattern, 'match'):
+ self.re = pattern
+ self.pattern = self.reString = pattern.pattern
+ self.flags = flags
+
+ else:
+ raise TypeError("Regex may only be constructed with a string or a compiled RE object")
+
+ self.re_match = self.re.match
+
+ self.name = _ustr(self)
+ self.errmsg = "Expected " + self.name
+ self.mayIndexError = False
+ self.mayReturnEmpty = self.re_match("") is not None
+ self.asGroupList = asGroupList
+ self.asMatch = asMatch
+ if self.asGroupList:
+ self.parseImpl = self.parseImplAsGroupList
+ if self.asMatch:
+ self.parseImpl = self.parseImplAsMatch
+
+ def parseImpl(self, instring, loc, doActions=True):
+ result = self.re_match(instring, loc)
+ if not result:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ loc = result.end()
+ ret = ParseResults(result.group())
+ d = result.groupdict()
+ if d:
+ for k, v in d.items():
+ ret[k] = v
+ return loc, ret
+
+ def parseImplAsGroupList(self, instring, loc, doActions=True):
+ result = self.re_match(instring, loc)
+ if not result:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ loc = result.end()
+ ret = result.groups()
+ return loc, ret
+
+ def parseImplAsMatch(self, instring, loc, doActions=True):
+ result = self.re_match(instring, loc)
+ if not result:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ loc = result.end()
+ ret = result
+ return loc, ret
+
+ def __str__(self):
+ try:
+ return super(Regex, self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None:
+ self.strRepr = "Re:(%s)" % repr(self.pattern)
+
+ return self.strRepr
+
+ def sub(self, repl):
+ r"""
+ Return Regex with an attached parse action to transform the parsed
+ result as if called using `re.sub(expr, repl, string) <https://docs.python.org/3/library/re.html#re.sub>`_.
+
+ Example::
+
+ make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2</\1>")
+ print(make_html.transformString("h1:main title:"))
+ # prints "<h1>main title</h1>"
+ """
+ if self.asGroupList:
+ warnings.warn("cannot use sub() with Regex(asGroupList=True)",
+ SyntaxWarning, stacklevel=2)
+ raise SyntaxError()
+
+ if self.asMatch and callable(repl):
+ warnings.warn("cannot use sub() with a callable with Regex(asMatch=True)",
+ SyntaxWarning, stacklevel=2)
+ raise SyntaxError()
+
+ if self.asMatch:
+ def pa(tokens):
+ return tokens[0].expand(repl)
+ else:
+ def pa(tokens):
+ return self.re.sub(repl, tokens[0])
+ return self.addParseAction(pa)
+
+class QuotedString(Token):
+ r"""
+ Token for matching strings that are delimited by quoting characters.
+
+ Defined with the following parameters:
+
+ - quoteChar - string of one or more characters defining the
+ quote delimiting string
+ - escChar - character to escape quotes, typically backslash
+ (default= ``None``)
+ - escQuote - special quote sequence to escape an embedded quote
+ string (such as SQL's ``""`` to escape an embedded ``"``)
+ (default= ``None``)
+ - multiline - boolean indicating whether quotes can span
+ multiple lines (default= ``False``)
+ - unquoteResults - boolean indicating whether the matched text
+ should be unquoted (default= ``True``)
+ - endQuoteChar - string of one or more characters defining the
+ end of the quote delimited string (default= ``None`` => same as
+ quoteChar)
+ - convertWhitespaceEscapes - convert escaped whitespace
+ (``'\t'``, ``'\n'``, etc.) to actual whitespace
+ (default= ``True``)
+
+ Example::
+
+ qs = QuotedString('"')
+ print(qs.searchString('lsjdf "This is the quote" sldjf'))
+ complex_qs = QuotedString('{{', endQuoteChar='}}')
+ print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
+ sql_qs = QuotedString('"', escQuote='""')
+ print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
+
+ prints::
+
+ [['This is the quote']]
+ [['This is the "quote"']]
+ [['This is the quote with "embedded" quotes']]
+ """
+ def __init__(self, quoteChar, escChar=None, escQuote=None, multiline=False,
+ unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
+ super(QuotedString, self).__init__()
+
+ # remove white space from quote chars - wont work anyway
+ quoteChar = quoteChar.strip()
+ if not quoteChar:
+ warnings.warn("quoteChar cannot be the empty string", SyntaxWarning, stacklevel=2)
+ raise SyntaxError()
+
+ if endQuoteChar is None:
+ endQuoteChar = quoteChar
+ else:
+ endQuoteChar = endQuoteChar.strip()
+ if not endQuoteChar:
+ warnings.warn("endQuoteChar cannot be the empty string", SyntaxWarning, stacklevel=2)
+ raise SyntaxError()
+
+ self.quoteChar = quoteChar
+ self.quoteCharLen = len(quoteChar)
+ self.firstQuoteChar = quoteChar[0]
+ self.endQuoteChar = endQuoteChar
+ self.endQuoteCharLen = len(endQuoteChar)
+ self.escChar = escChar
+ self.escQuote = escQuote
+ self.unquoteResults = unquoteResults
+ self.convertWhitespaceEscapes = convertWhitespaceEscapes
+
+ if multiline:
+ self.flags = re.MULTILINE | re.DOTALL
+ self.pattern = r'%s(?:[^%s%s]' % (re.escape(self.quoteChar),
+ _escapeRegexRangeChars(self.endQuoteChar[0]),
+ (escChar is not None and _escapeRegexRangeChars(escChar) or ''))
+ else:
+ self.flags = 0
+ self.pattern = r'%s(?:[^%s\n\r%s]' % (re.escape(self.quoteChar),
+ _escapeRegexRangeChars(self.endQuoteChar[0]),
+ (escChar is not None and _escapeRegexRangeChars(escChar) or ''))
+ if len(self.endQuoteChar) > 1:
+ self.pattern += (
+ '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
+ _escapeRegexRangeChars(self.endQuoteChar[i]))
+ for i in range(len(self.endQuoteChar) - 1, 0, -1)) + ')')
+
+ if escQuote:
+ self.pattern += (r'|(?:%s)' % re.escape(escQuote))
+ if escChar:
+ self.pattern += (r'|(?:%s.)' % re.escape(escChar))
+ self.escCharReplacePattern = re.escape(self.escChar) + "(.)"
+ self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
+
+ try:
+ self.re = re.compile(self.pattern, self.flags)
+ self.reString = self.pattern
+ self.re_match = self.re.match
+ except sre_constants.error:
+ warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
+ SyntaxWarning, stacklevel=2)
+ raise
+
+ self.name = _ustr(self)
+ self.errmsg = "Expected " + self.name
+ self.mayIndexError = False
+ self.mayReturnEmpty = True
+
+ def parseImpl(self, instring, loc, doActions=True):
+ result = instring[loc] == self.firstQuoteChar and self.re_match(instring, loc) or None
+ if not result:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ loc = result.end()
+ ret = result.group()
+
+ if self.unquoteResults:
+
+ # strip off quotes
+ ret = ret[self.quoteCharLen: -self.endQuoteCharLen]
+
+ if isinstance(ret, basestring):
+ # replace escaped whitespace
+ if '\\' in ret and self.convertWhitespaceEscapes:
+ ws_map = {
+ r'\t': '\t',
+ r'\n': '\n',
+ r'\f': '\f',
+ r'\r': '\r',
+ }
+ for wslit, wschar in ws_map.items():
+ ret = ret.replace(wslit, wschar)
+
+ # replace escaped characters
+ if self.escChar:
+ ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
+
+ # replace escaped quotes
+ if self.escQuote:
+ ret = ret.replace(self.escQuote, self.endQuoteChar)
+
+ return loc, ret
+
+ def __str__(self):
+ try:
+ return super(QuotedString, self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None:
+ self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
+
+ return self.strRepr
+
+
+class CharsNotIn(Token):
+ """Token for matching words composed of characters *not* in a given
+ set (will include whitespace in matched characters if not listed in
+ the provided exclusion set - see example). Defined with string
+ containing all disallowed characters, and an optional minimum,
+ maximum, and/or exact length. The default value for ``min`` is
+ 1 (a minimum value < 1 is not valid); the default values for
+ ``max`` and ``exact`` are 0, meaning no maximum or exact
+ length restriction.
+
+ Example::
+
+ # define a comma-separated-value as anything that is not a ','
+ csv_value = CharsNotIn(',')
+ print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
+
+ prints::
+
+ ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
+ """
+ def __init__(self, notChars, min=1, max=0, exact=0):
+ super(CharsNotIn, self).__init__()
+ self.skipWhitespace = False
+ self.notChars = notChars
+
+ if min < 1:
+ raise ValueError("cannot specify a minimum length < 1; use "
+ "Optional(CharsNotIn()) if zero-length char group is permitted")
+
+ self.minLen = min
+
+ if max > 0:
+ self.maxLen = max
+ else:
+ self.maxLen = _MAX_INT
+
+ if exact > 0:
+ self.maxLen = exact
+ self.minLen = exact
+
+ self.name = _ustr(self)
+ self.errmsg = "Expected " + self.name
+ self.mayReturnEmpty = (self.minLen == 0)
+ self.mayIndexError = False
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if instring[loc] in self.notChars:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ start = loc
+ loc += 1
+ notchars = self.notChars
+ maxlen = min(start + self.maxLen, len(instring))
+ while loc < maxlen and instring[loc] not in notchars:
+ loc += 1
+
+ if loc - start < self.minLen:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ return loc, instring[start:loc]
+
+ def __str__(self):
+ try:
+ return super(CharsNotIn, self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None:
+ if len(self.notChars) > 4:
+ self.strRepr = "!W:(%s...)" % self.notChars[:4]
+ else:
+ self.strRepr = "!W:(%s)" % self.notChars
+
+ return self.strRepr
+
+class White(Token):
+ """Special matching class for matching whitespace. Normally,
+ whitespace is ignored by pyparsing grammars. This class is included
+ when some whitespace structures are significant. Define with
+ a string containing the whitespace characters to be matched; default
+ is ``" \\t\\r\\n"``. Also takes optional ``min``,
+ ``max``, and ``exact`` arguments, as defined for the
+ :class:`Word` class.
+ """
+ whiteStrs = {
+ ' ' : '<SP>',
+ '\t': '<TAB>',
+ '\n': '<LF>',
+ '\r': '<CR>',
+ '\f': '<FF>',
+ u'\u00A0': '<NBSP>',
+ u'\u1680': '<OGHAM_SPACE_MARK>',
+ u'\u180E': '<MONGOLIAN_VOWEL_SEPARATOR>',
+ u'\u2000': '<EN_QUAD>',
+ u'\u2001': '<EM_QUAD>',
+ u'\u2002': '<EN_SPACE>',
+ u'\u2003': '<EM_SPACE>',
+ u'\u2004': '<THREE-PER-EM_SPACE>',
+ u'\u2005': '<FOUR-PER-EM_SPACE>',
+ u'\u2006': '<SIX-PER-EM_SPACE>',
+ u'\u2007': '<FIGURE_SPACE>',
+ u'\u2008': '<PUNCTUATION_SPACE>',
+ u'\u2009': '<THIN_SPACE>',
+ u'\u200A': '<HAIR_SPACE>',
+ u'\u200B': '<ZERO_WIDTH_SPACE>',
+ u'\u202F': '<NNBSP>',
+ u'\u205F': '<MMSP>',
+ u'\u3000': '<IDEOGRAPHIC_SPACE>',
+ }
+ def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
+ super(White, self).__init__()
+ self.matchWhite = ws
+ self.setWhitespaceChars("".join(c for c in self.whiteChars if c not in self.matchWhite))
+ # ~ self.leaveWhitespace()
+ self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
+ self.mayReturnEmpty = True
+ self.errmsg = "Expected " + self.name
+
+ self.minLen = min
+
+ if max > 0:
+ self.maxLen = max
+ else:
+ self.maxLen = _MAX_INT
+
+ if exact > 0:
+ self.maxLen = exact
+ self.minLen = exact
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if instring[loc] not in self.matchWhite:
+ raise ParseException(instring, loc, self.errmsg, self)
+ start = loc
+ loc += 1
+ maxloc = start + self.maxLen
+ maxloc = min(maxloc, len(instring))
+ while loc < maxloc and instring[loc] in self.matchWhite:
+ loc += 1
+
+ if loc - start < self.minLen:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ return loc, instring[start:loc]
+
+
+class _PositionToken(Token):
+ def __init__(self):
+ super(_PositionToken, self).__init__()
+ self.name = self.__class__.__name__
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+
+class GoToColumn(_PositionToken):
+ """Token to advance to a specific column of input text; useful for
+ tabular report scraping.
+ """
+ def __init__(self, colno):
+ super(GoToColumn, self).__init__()
+ self.col = colno
+
+ def preParse(self, instring, loc):
+ if col(loc, instring) != self.col:
+ instrlen = len(instring)
+ if self.ignoreExprs:
+ loc = self._skipIgnorables(instring, loc)
+ while loc < instrlen and instring[loc].isspace() and col(loc, instring) != self.col:
+ loc += 1
+ return loc
+
+ def parseImpl(self, instring, loc, doActions=True):
+ thiscol = col(loc, instring)
+ if thiscol > self.col:
+ raise ParseException(instring, loc, "Text not in expected column", self)
+ newloc = loc + self.col - thiscol
+ ret = instring[loc: newloc]
+ return newloc, ret
+
+
+class LineStart(_PositionToken):
+ r"""Matches if current position is at the beginning of a line within
+ the parse string
+
+ Example::
+
+ test = '''\
+ AAA this line
+ AAA and this line
+ AAA but not this one
+ B AAA and definitely not this one
+ '''
+
+ for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
+ print(t)
+
+ prints::
+
+ ['AAA', ' this line']
+ ['AAA', ' and this line']
+
+ """
+ def __init__(self):
+ super(LineStart, self).__init__()
+ self.errmsg = "Expected start of line"
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if col(loc, instring) == 1:
+ return loc, []
+ raise ParseException(instring, loc, self.errmsg, self)
+
+class LineEnd(_PositionToken):
+ """Matches if current position is at the end of a line within the
+ parse string
+ """
+ def __init__(self):
+ super(LineEnd, self).__init__()
+ self.setWhitespaceChars(ParserElement.DEFAULT_WHITE_CHARS.replace("\n", ""))
+ self.errmsg = "Expected end of line"
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if loc < len(instring):
+ if instring[loc] == "\n":
+ return loc + 1, "\n"
+ else:
+ raise ParseException(instring, loc, self.errmsg, self)
+ elif loc == len(instring):
+ return loc + 1, []
+ else:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+class StringStart(_PositionToken):
+ """Matches if current position is at the beginning of the parse
+ string
+ """
+ def __init__(self):
+ super(StringStart, self).__init__()
+ self.errmsg = "Expected start of text"
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if loc != 0:
+ # see if entire string up to here is just whitespace and ignoreables
+ if loc != self.preParse(instring, 0):
+ raise ParseException(instring, loc, self.errmsg, self)
+ return loc, []
+
+class StringEnd(_PositionToken):
+ """Matches if current position is at the end of the parse string
+ """
+ def __init__(self):
+ super(StringEnd, self).__init__()
+ self.errmsg = "Expected end of text"
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if loc < len(instring):
+ raise ParseException(instring, loc, self.errmsg, self)
+ elif loc == len(instring):
+ return loc + 1, []
+ elif loc > len(instring):
+ return loc, []
+ else:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+class WordStart(_PositionToken):
+ """Matches if the current position is at the beginning of a Word,
+ and is not preceded by any character in a given set of
+ ``wordChars`` (default= ``printables``). To emulate the
+ ``\b`` behavior of regular expressions, use
+ ``WordStart(alphanums)``. ``WordStart`` will also match at
+ the beginning of the string being parsed, or at the beginning of
+ a line.
+ """
+ def __init__(self, wordChars=printables):
+ super(WordStart, self).__init__()
+ self.wordChars = set(wordChars)
+ self.errmsg = "Not at the start of a word"
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if loc != 0:
+ if (instring[loc - 1] in self.wordChars
+ or instring[loc] not in self.wordChars):
+ raise ParseException(instring, loc, self.errmsg, self)
+ return loc, []
+
+class WordEnd(_PositionToken):
+ """Matches if the current position is at the end of a Word, and is
+ not followed by any character in a given set of ``wordChars``
+ (default= ``printables``). To emulate the ``\b`` behavior of
+ regular expressions, use ``WordEnd(alphanums)``. ``WordEnd``
+ will also match at the end of the string being parsed, or at the end
+ of a line.
+ """
+ def __init__(self, wordChars=printables):
+ super(WordEnd, self).__init__()
+ self.wordChars = set(wordChars)
+ self.skipWhitespace = False
+ self.errmsg = "Not at the end of a word"
+
+ def parseImpl(self, instring, loc, doActions=True):
+ instrlen = len(instring)
+ if instrlen > 0 and loc < instrlen:
+ if (instring[loc] in self.wordChars or
+ instring[loc - 1] not in self.wordChars):
+ raise ParseException(instring, loc, self.errmsg, self)
+ return loc, []
+
+
+class ParseExpression(ParserElement):
+ """Abstract subclass of ParserElement, for combining and
+ post-processing parsed tokens.
+ """
+ def __init__(self, exprs, savelist=False):
+ super(ParseExpression, self).__init__(savelist)
+ if isinstance(exprs, _generatorType):
+ exprs = list(exprs)
+
+ if isinstance(exprs, basestring):
+ self.exprs = [self._literalStringClass(exprs)]
+ elif isinstance(exprs, ParserElement):
+ self.exprs = [exprs]
+ elif isinstance(exprs, Iterable):
+ exprs = list(exprs)
+ # if sequence of strings provided, wrap with Literal
+ if any(isinstance(expr, basestring) for expr in exprs):
+ exprs = (self._literalStringClass(e) if isinstance(e, basestring) else e for e in exprs)
+ self.exprs = list(exprs)
+ else:
+ try:
+ self.exprs = list(exprs)
+ except TypeError:
+ self.exprs = [exprs]
+ self.callPreparse = False
+
+ def append(self, other):
+ self.exprs.append(other)
+ self.strRepr = None
+ return self
+
+ def leaveWhitespace(self):
+ """Extends ``leaveWhitespace`` defined in base class, and also invokes ``leaveWhitespace`` on
+ all contained expressions."""
+ self.skipWhitespace = False
+ self.exprs = [e.copy() for e in self.exprs]
+ for e in self.exprs:
+ e.leaveWhitespace()
+ return self
+
+ def ignore(self, other):
+ if isinstance(other, Suppress):
+ if other not in self.ignoreExprs:
+ super(ParseExpression, self).ignore(other)
+ for e in self.exprs:
+ e.ignore(self.ignoreExprs[-1])
+ else:
+ super(ParseExpression, self).ignore(other)
+ for e in self.exprs:
+ e.ignore(self.ignoreExprs[-1])
+ return self
+
+ def __str__(self):
+ try:
+ return super(ParseExpression, self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None:
+ self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.exprs))
+ return self.strRepr
+
+ def streamline(self):
+ super(ParseExpression, self).streamline()
+
+ for e in self.exprs:
+ e.streamline()
+
+ # collapse nested And's of the form And(And(And(a, b), c), d) to And(a, b, c, d)
+ # but only if there are no parse actions or resultsNames on the nested And's
+ # (likewise for Or's and MatchFirst's)
+ if len(self.exprs) == 2:
+ other = self.exprs[0]
+ if (isinstance(other, self.__class__)
+ and not other.parseAction
+ and other.resultsName is None
+ and not other.debug):
+ self.exprs = other.exprs[:] + [self.exprs[1]]
+ self.strRepr = None
+ self.mayReturnEmpty |= other.mayReturnEmpty
+ self.mayIndexError |= other.mayIndexError
+
+ other = self.exprs[-1]
+ if (isinstance(other, self.__class__)
+ and not other.parseAction
+ and other.resultsName is None
+ and not other.debug):
+ self.exprs = self.exprs[:-1] + other.exprs[:]
+ self.strRepr = None
+ self.mayReturnEmpty |= other.mayReturnEmpty
+ self.mayIndexError |= other.mayIndexError
+
+ self.errmsg = "Expected " + _ustr(self)
+
+ return self
+
+ def validate(self, validateTrace=None):
+ tmp = (validateTrace if validateTrace is not None else [])[:] + [self]
+ for e in self.exprs:
+ e.validate(tmp)
+ self.checkRecursion([])
+
+ def copy(self):
+ ret = super(ParseExpression, self).copy()
+ ret.exprs = [e.copy() for e in self.exprs]
+ return ret
+
+ def _setResultsName(self, name, listAllMatches=False):
+ if __diag__.warn_ungrouped_named_tokens_in_collection:
+ for e in self.exprs:
+ if isinstance(e, ParserElement) and e.resultsName:
+ warnings.warn("{0}: setting results name {1!r} on {2} expression "
+ "collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection",
+ name,
+ type(self).__name__,
+ e.resultsName),
+ stacklevel=3)
+
+ return super(ParseExpression, self)._setResultsName(name, listAllMatches)
+
+
+class And(ParseExpression):
+ """
+ Requires all given :class:`ParseExpression` s to be found in the given order.
+ Expressions may be separated by whitespace.
+ May be constructed using the ``'+'`` operator.
+ May also be constructed using the ``'-'`` operator, which will
+ suppress backtracking.
+
+ Example::
+
+ integer = Word(nums)
+ name_expr = OneOrMore(Word(alphas))
+
+ expr = And([integer("id"), name_expr("name"), integer("age")])
+ # more easily written as:
+ expr = integer("id") + name_expr("name") + integer("age")
+ """
+
+ class _ErrorStop(Empty):
+ def __init__(self, *args, **kwargs):
+ super(And._ErrorStop, self).__init__(*args, **kwargs)
+ self.name = '-'
+ self.leaveWhitespace()
+
+ def __init__(self, exprs, savelist=True):
+ exprs = list(exprs)
+ if exprs and Ellipsis in exprs:
+ tmp = []
+ for i, expr in enumerate(exprs):
+ if expr is Ellipsis:
+ if i < len(exprs) - 1:
+ skipto_arg = (Empty() + exprs[i + 1]).exprs[-1]
+ tmp.append(SkipTo(skipto_arg)("_skipped*"))
+ else:
+ raise Exception("cannot construct And with sequence ending in ...")
+ else:
+ tmp.append(expr)
+ exprs[:] = tmp
+ super(And, self).__init__(exprs, savelist)
+ self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+ self.setWhitespaceChars(self.exprs[0].whiteChars)
+ self.skipWhitespace = self.exprs[0].skipWhitespace
+ self.callPreparse = True
+
+ def streamline(self):
+ # collapse any _PendingSkip's
+ if self.exprs:
+ if any(isinstance(e, ParseExpression) and e.exprs and isinstance(e.exprs[-1], _PendingSkip)
+ for e in self.exprs[:-1]):
+ for i, e in enumerate(self.exprs[:-1]):
+ if e is None:
+ continue
+ if (isinstance(e, ParseExpression)
+ and e.exprs and isinstance(e.exprs[-1], _PendingSkip)):
+ e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1]
+ self.exprs[i + 1] = None
+ self.exprs = [e for e in self.exprs if e is not None]
+
+ super(And, self).streamline()
+ self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+ return self
+
+ def parseImpl(self, instring, loc, doActions=True):
+ # pass False as last arg to _parse for first element, since we already
+ # pre-parsed the string as part of our And pre-parsing
+ loc, resultlist = self.exprs[0]._parse(instring, loc, doActions, callPreParse=False)
+ errorStop = False
+ for e in self.exprs[1:]:
+ if isinstance(e, And._ErrorStop):
+ errorStop = True
+ continue
+ if errorStop:
+ try:
+ loc, exprtokens = e._parse(instring, loc, doActions)
+ except ParseSyntaxException:
+ raise
+ except ParseBaseException as pe:
+ pe.__traceback__ = None
+ raise ParseSyntaxException._from_exception(pe)
+ except IndexError:
+ raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
+ else:
+ loc, exprtokens = e._parse(instring, loc, doActions)
+ if exprtokens or exprtokens.haskeys():
+ resultlist += exprtokens
+ return loc, resultlist
+
+ def __iadd__(self, other):
+ if isinstance(other, basestring):
+ other = self._literalStringClass(other)
+ return self.append(other) # And([self, other])
+
+ def checkRecursion(self, parseElementList):
+ subRecCheckList = parseElementList[:] + [self]
+ for e in self.exprs:
+ e.checkRecursion(subRecCheckList)
+ if not e.mayReturnEmpty:
+ break
+
+ def __str__(self):
+ if hasattr(self, "name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
+
+ return self.strRepr
+
+
+class Or(ParseExpression):
+ """Requires that at least one :class:`ParseExpression` is found. If
+ two expressions match, the expression that matches the longest
+ string will be used. May be constructed using the ``'^'``
+ operator.
+
+ Example::
+
+ # construct Or using '^' operator
+
+ number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
+ print(number.searchString("123 3.1416 789"))
+
+ prints::
+
+ [['123'], ['3.1416'], ['789']]
+ """
+ def __init__(self, exprs, savelist=False):
+ super(Or, self).__init__(exprs, savelist)
+ if self.exprs:
+ self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
+ else:
+ self.mayReturnEmpty = True
+
+ def streamline(self):
+ super(Or, self).streamline()
+ if __compat__.collect_all_And_tokens:
+ self.saveAsList = any(e.saveAsList for e in self.exprs)
+ return self
+
+ def parseImpl(self, instring, loc, doActions=True):
+ maxExcLoc = -1
+ maxException = None
+ matches = []
+ for e in self.exprs:
+ try:
+ loc2 = e.tryParse(instring, loc)
+ except ParseException as err:
+ err.__traceback__ = None
+ if err.loc > maxExcLoc:
+ maxException = err
+ maxExcLoc = err.loc
+ except IndexError:
+ if len(instring) > maxExcLoc:
+ maxException = ParseException(instring, len(instring), e.errmsg, self)
+ maxExcLoc = len(instring)
+ else:
+ # save match among all matches, to retry longest to shortest
+ matches.append((loc2, e))
+
+ if matches:
+ # re-evaluate all matches in descending order of length of match, in case attached actions
+ # might change whether or how much they match of the input.
+ matches.sort(key=itemgetter(0), reverse=True)
+
+ if not doActions:
+ # no further conditions or parse actions to change the selection of
+ # alternative, so the first match will be the best match
+ best_expr = matches[0][1]
+ return best_expr._parse(instring, loc, doActions)
+
+ longest = -1, None
+ for loc1, expr1 in matches:
+ if loc1 <= longest[0]:
+ # already have a longer match than this one will deliver, we are done
+ return longest
+
+ try:
+ loc2, toks = expr1._parse(instring, loc, doActions)
+ except ParseException as err:
+ err.__traceback__ = None
+ if err.loc > maxExcLoc:
+ maxException = err
+ maxExcLoc = err.loc
+ else:
+ if loc2 >= loc1:
+ return loc2, toks
+ # didn't match as much as before
+ elif loc2 > longest[0]:
+ longest = loc2, toks
+
+ if longest != (-1, None):
+ return longest
+
+ if maxException is not None:
+ maxException.msg = self.errmsg
+ raise maxException
+ else:
+ raise ParseException(instring, loc, "no defined alternatives to match", self)
+
+
+ def __ixor__(self, other):
+ if isinstance(other, basestring):
+ other = self._literalStringClass(other)
+ return self.append(other) # Or([self, other])
+
+ def __str__(self):
+ if hasattr(self, "name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
+
+ return self.strRepr
+
+ def checkRecursion(self, parseElementList):
+ subRecCheckList = parseElementList[:] + [self]
+ for e in self.exprs:
+ e.checkRecursion(subRecCheckList)
+
+ def _setResultsName(self, name, listAllMatches=False):
+ if (not __compat__.collect_all_And_tokens
+ and __diag__.warn_multiple_tokens_in_named_alternation):
+ if any(isinstance(e, And) for e in self.exprs):
+ warnings.warn("{0}: setting results name {1!r} on {2} expression "
+ "may only return a single token for an And alternative, "
+ "in future will return the full list of tokens".format(
+ "warn_multiple_tokens_in_named_alternation", name, type(self).__name__),
+ stacklevel=3)
+
+ return super(Or, self)._setResultsName(name, listAllMatches)
+
+
+class MatchFirst(ParseExpression):
+ """Requires that at least one :class:`ParseExpression` is found. If
+ two expressions match, the first one listed is the one that will
+ match. May be constructed using the ``'|'`` operator.
+
+ Example::
+
+ # construct MatchFirst using '|' operator
+
+ # watch the order of expressions to match
+ number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
+ print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
+
+ # put more selective expression first
+ number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
+ print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
+ """
+ def __init__(self, exprs, savelist=False):
+ super(MatchFirst, self).__init__(exprs, savelist)
+ if self.exprs:
+ self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
+ else:
+ self.mayReturnEmpty = True
+
+ def streamline(self):
+ super(MatchFirst, self).streamline()
+ if __compat__.collect_all_And_tokens:
+ self.saveAsList = any(e.saveAsList for e in self.exprs)
+ return self
+
+ def parseImpl(self, instring, loc, doActions=True):
+ maxExcLoc = -1
+ maxException = None
+ for e in self.exprs:
+ try:
+ ret = e._parse(instring, loc, doActions)
+ return ret
+ except ParseException as err:
+ if err.loc > maxExcLoc:
+ maxException = err
+ maxExcLoc = err.loc
+ except IndexError:
+ if len(instring) > maxExcLoc:
+ maxException = ParseException(instring, len(instring), e.errmsg, self)
+ maxExcLoc = len(instring)
+
+ # only got here if no expression matched, raise exception for match that made it the furthest
+ else:
+ if maxException is not None:
+ maxException.msg = self.errmsg
+ raise maxException
+ else:
+ raise ParseException(instring, loc, "no defined alternatives to match", self)
+
+ def __ior__(self, other):
+ if isinstance(other, basestring):
+ other = self._literalStringClass(other)
+ return self.append(other) # MatchFirst([self, other])
+
+ def __str__(self):
+ if hasattr(self, "name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
+
+ return self.strRepr
+
+ def checkRecursion(self, parseElementList):
+ subRecCheckList = parseElementList[:] + [self]
+ for e in self.exprs:
+ e.checkRecursion(subRecCheckList)
+
+ def _setResultsName(self, name, listAllMatches=False):
+ if (not __compat__.collect_all_And_tokens
+ and __diag__.warn_multiple_tokens_in_named_alternation):
+ if any(isinstance(e, And) for e in self.exprs):
+ warnings.warn("{0}: setting results name {1!r} on {2} expression "
+ "may only return a single token for an And alternative, "
+ "in future will return the full list of tokens".format(
+ "warn_multiple_tokens_in_named_alternation", name, type(self).__name__),
+ stacklevel=3)
+
+ return super(MatchFirst, self)._setResultsName(name, listAllMatches)
+
+
+class Each(ParseExpression):
+ """Requires all given :class:`ParseExpression` s to be found, but in
+ any order. Expressions may be separated by whitespace.
+
+ May be constructed using the ``'&'`` operator.
+
+ Example::
+
+ color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
+ shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
+ integer = Word(nums)
+ shape_attr = "shape:" + shape_type("shape")
+ posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
+ color_attr = "color:" + color("color")
+ size_attr = "size:" + integer("size")
+
+ # use Each (using operator '&') to accept attributes in any order
+ # (shape and posn are required, color and size are optional)
+ shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
+
+ shape_spec.runTests('''
+ shape: SQUARE color: BLACK posn: 100, 120
+ shape: CIRCLE size: 50 color: BLUE posn: 50,80
+ color:GREEN size:20 shape:TRIANGLE posn:20,40
+ '''
+ )
+
+ prints::
+
+ shape: SQUARE color: BLACK posn: 100, 120
+ ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
+ - color: BLACK
+ - posn: ['100', ',', '120']
+ - x: 100
+ - y: 120
+ - shape: SQUARE
+
+
+ shape: CIRCLE size: 50 color: BLUE posn: 50,80
+ ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
+ - color: BLUE
+ - posn: ['50', ',', '80']
+ - x: 50
+ - y: 80
+ - shape: CIRCLE
+ - size: 50
+
+
+ color: GREEN size: 20 shape: TRIANGLE posn: 20,40
+ ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
+ - color: GREEN
+ - posn: ['20', ',', '40']
+ - x: 20
+ - y: 40
+ - shape: TRIANGLE
+ - size: 20
+ """
+ def __init__(self, exprs, savelist=True):
+ super(Each, self).__init__(exprs, savelist)
+ self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+ self.skipWhitespace = True
+ self.initExprGroups = True
+ self.saveAsList = True
+
+ def streamline(self):
+ super(Each, self).streamline()
+ self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+ return self
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if self.initExprGroups:
+ self.opt1map = dict((id(e.expr), e) for e in self.exprs if isinstance(e, Optional))
+ opt1 = [e.expr for e in self.exprs if isinstance(e, Optional)]
+ opt2 = [e for e in self.exprs if e.mayReturnEmpty and not isinstance(e, (Optional, Regex))]
+ self.optionals = opt1 + opt2
+ self.multioptionals = [e.expr for e in self.exprs if isinstance(e, ZeroOrMore)]
+ self.multirequired = [e.expr for e in self.exprs if isinstance(e, OneOrMore)]
+ self.required = [e for e in self.exprs if not isinstance(e, (Optional, ZeroOrMore, OneOrMore))]
+ self.required += self.multirequired
+ self.initExprGroups = False
+ tmpLoc = loc
+ tmpReqd = self.required[:]
+ tmpOpt = self.optionals[:]
+ matchOrder = []
+
+ keepMatching = True
+ while keepMatching:
+ tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
+ failed = []
+ for e in tmpExprs:
+ try:
+ tmpLoc = e.tryParse(instring, tmpLoc)
+ except ParseException:
+ failed.append(e)
+ else:
+ matchOrder.append(self.opt1map.get(id(e), e))
+ if e in tmpReqd:
+ tmpReqd.remove(e)
+ elif e in tmpOpt:
+ tmpOpt.remove(e)
+ if len(failed) == len(tmpExprs):
+ keepMatching = False
+
+ if tmpReqd:
+ missing = ", ".join(_ustr(e) for e in tmpReqd)
+ raise ParseException(instring, loc, "Missing one or more required elements (%s)" % missing)
+
+ # add any unmatched Optionals, in case they have default values defined
+ matchOrder += [e for e in self.exprs if isinstance(e, Optional) and e.expr in tmpOpt]
+
+ resultlist = []
+ for e in matchOrder:
+ loc, results = e._parse(instring, loc, doActions)
+ resultlist.append(results)
+
+ finalResults = sum(resultlist, ParseResults([]))
+ return loc, finalResults
+
+ def __str__(self):
+ if hasattr(self, "name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
+
+ return self.strRepr
+
+ def checkRecursion(self, parseElementList):
+ subRecCheckList = parseElementList[:] + [self]
+ for e in self.exprs:
+ e.checkRecursion(subRecCheckList)
+
+
+class ParseElementEnhance(ParserElement):
+ """Abstract subclass of :class:`ParserElement`, for combining and
+ post-processing parsed tokens.
+ """
+ def __init__(self, expr, savelist=False):
+ super(ParseElementEnhance, self).__init__(savelist)
+ if isinstance(expr, basestring):
+ if issubclass(self._literalStringClass, Token):
+ expr = self._literalStringClass(expr)
+ else:
+ expr = self._literalStringClass(Literal(expr))
+ self.expr = expr
+ self.strRepr = None
+ if expr is not None:
+ self.mayIndexError = expr.mayIndexError
+ self.mayReturnEmpty = expr.mayReturnEmpty
+ self.setWhitespaceChars(expr.whiteChars)
+ self.skipWhitespace = expr.skipWhitespace
+ self.saveAsList = expr.saveAsList
+ self.callPreparse = expr.callPreparse
+ self.ignoreExprs.extend(expr.ignoreExprs)
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if self.expr is not None:
+ return self.expr._parse(instring, loc, doActions, callPreParse=False)
+ else:
+ raise ParseException("", loc, self.errmsg, self)
+
+ def leaveWhitespace(self):
+ self.skipWhitespace = False
+ self.expr = self.expr.copy()
+ if self.expr is not None:
+ self.expr.leaveWhitespace()
+ return self
+
+ def ignore(self, other):
+ if isinstance(other, Suppress):
+ if other not in self.ignoreExprs:
+ super(ParseElementEnhance, self).ignore(other)
+ if self.expr is not None:
+ self.expr.ignore(self.ignoreExprs[-1])
+ else:
+ super(ParseElementEnhance, self).ignore(other)
+ if self.expr is not None:
+ self.expr.ignore(self.ignoreExprs[-1])
+ return self
+
+ def streamline(self):
+ super(ParseElementEnhance, self).streamline()
+ if self.expr is not None:
+ self.expr.streamline()
+ return self
+
+ def checkRecursion(self, parseElementList):
+ if self in parseElementList:
+ raise RecursiveGrammarException(parseElementList + [self])
+ subRecCheckList = parseElementList[:] + [self]
+ if self.expr is not None:
+ self.expr.checkRecursion(subRecCheckList)
+
+ def validate(self, validateTrace=None):
+ if validateTrace is None:
+ validateTrace = []
+ tmp = validateTrace[:] + [self]
+ if self.expr is not None:
+ self.expr.validate(tmp)
+ self.checkRecursion([])
+
+ def __str__(self):
+ try:
+ return super(ParseElementEnhance, self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None and self.expr is not None:
+ self.strRepr = "%s:(%s)" % (self.__class__.__name__, _ustr(self.expr))
+ return self.strRepr
+
+
+class FollowedBy(ParseElementEnhance):
+ """Lookahead matching of the given parse expression.
+ ``FollowedBy`` does *not* advance the parsing position within
+ the input string, it only verifies that the specified parse
+ expression matches at the current position. ``FollowedBy``
+ always returns a null token list. If any results names are defined
+ in the lookahead expression, those *will* be returned for access by
+ name.
+
+ Example::
+
+ # use FollowedBy to match a label only if it is followed by a ':'
+ data_word = Word(alphas)
+ label = data_word + FollowedBy(':')
+ attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+
+ OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
+
+ prints::
+
+ [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
+ """
+ def __init__(self, expr):
+ super(FollowedBy, self).__init__(expr)
+ self.mayReturnEmpty = True
+
+ def parseImpl(self, instring, loc, doActions=True):
+ # by using self._expr.parse and deleting the contents of the returned ParseResults list
+ # we keep any named results that were defined in the FollowedBy expression
+ _, ret = self.expr._parse(instring, loc, doActions=doActions)
+ del ret[:]
+
+ return loc, ret
+
+
+class PrecededBy(ParseElementEnhance):
+ """Lookbehind matching of the given parse expression.
+ ``PrecededBy`` does not advance the parsing position within the
+ input string, it only verifies that the specified parse expression
+ matches prior to the current position. ``PrecededBy`` always
+ returns a null token list, but if a results name is defined on the
+ given expression, it is returned.
+
+ Parameters:
+
+ - expr - expression that must match prior to the current parse
+ location
+ - retreat - (default= ``None``) - (int) maximum number of characters
+ to lookbehind prior to the current parse location
+
+ If the lookbehind expression is a string, Literal, Keyword, or
+ a Word or CharsNotIn with a specified exact or maximum length, then
+ the retreat parameter is not required. Otherwise, retreat must be
+ specified to give a maximum number of characters to look back from
+ the current parse position for a lookbehind match.
+
+ Example::
+
+ # VB-style variable names with type prefixes
+ int_var = PrecededBy("#") + pyparsing_common.identifier
+ str_var = PrecededBy("$") + pyparsing_common.identifier
+
+ """
+ def __init__(self, expr, retreat=None):
+ super(PrecededBy, self).__init__(expr)
+ self.expr = self.expr().leaveWhitespace()
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+ self.exact = False
+ if isinstance(expr, str):
+ retreat = len(expr)
+ self.exact = True
+ elif isinstance(expr, (Literal, Keyword)):
+ retreat = expr.matchLen
+ self.exact = True
+ elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT:
+ retreat = expr.maxLen
+ self.exact = True
+ elif isinstance(expr, _PositionToken):
+ retreat = 0
+ self.exact = True
+ self.retreat = retreat
+ self.errmsg = "not preceded by " + str(expr)
+ self.skipWhitespace = False
+ self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None)))
+
+ def parseImpl(self, instring, loc=0, doActions=True):
+ if self.exact:
+ if loc < self.retreat:
+ raise ParseException(instring, loc, self.errmsg)
+ start = loc - self.retreat
+ _, ret = self.expr._parse(instring, start)
+ else:
+ # retreat specified a maximum lookbehind window, iterate
+ test_expr = self.expr + StringEnd()
+ instring_slice = instring[max(0, loc - self.retreat):loc]
+ last_expr = ParseException(instring, loc, self.errmsg)
+ for offset in range(1, min(loc, self.retreat + 1)+1):
+ try:
+ # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:]))
+ _, ret = test_expr._parse(instring_slice, len(instring_slice) - offset)
+ except ParseBaseException as pbe:
+ last_expr = pbe
+ else:
+ break
+ else:
+ raise last_expr
+ return loc, ret
+
+
+class NotAny(ParseElementEnhance):
+ """Lookahead to disallow matching with the given parse expression.
+ ``NotAny`` does *not* advance the parsing position within the
+ input string, it only verifies that the specified parse expression
+ does *not* match at the current position. Also, ``NotAny`` does
+ *not* skip over leading whitespace. ``NotAny`` always returns
+ a null token list. May be constructed using the '~' operator.
+
+ Example::
+
+ AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split())
+
+ # take care not to mistake keywords for identifiers
+ ident = ~(AND | OR | NOT) + Word(alphas)
+ boolean_term = Optional(NOT) + ident
+
+ # very crude boolean expression - to support parenthesis groups and
+ # operation hierarchy, use infixNotation
+ boolean_expr = boolean_term + ZeroOrMore((AND | OR) + boolean_term)
+
+ # integers that are followed by "." are actually floats
+ integer = Word(nums) + ~Char(".")
+ """
+ def __init__(self, expr):
+ super(NotAny, self).__init__(expr)
+ # ~ self.leaveWhitespace()
+ self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
+ self.mayReturnEmpty = True
+ self.errmsg = "Found unwanted token, " + _ustr(self.expr)
+
+ def parseImpl(self, instring, loc, doActions=True):
+ if self.expr.canParseNext(instring, loc):
+ raise ParseException(instring, loc, self.errmsg, self)
+ return loc, []
+
+ def __str__(self):
+ if hasattr(self, "name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "~{" + _ustr(self.expr) + "}"
+
+ return self.strRepr
+
+class _MultipleMatch(ParseElementEnhance):
+ def __init__(self, expr, stopOn=None):
+ super(_MultipleMatch, self).__init__(expr)
+ self.saveAsList = True
+ ender = stopOn
+ if isinstance(ender, basestring):
+ ender = self._literalStringClass(ender)
+ self.stopOn(ender)
+
+ def stopOn(self, ender):
+ if isinstance(ender, basestring):
+ ender = self._literalStringClass(ender)
+ self.not_ender = ~ender if ender is not None else None
+ return self
+
+ def parseImpl(self, instring, loc, doActions=True):
+ self_expr_parse = self.expr._parse
+ self_skip_ignorables = self._skipIgnorables
+ check_ender = self.not_ender is not None
+ if check_ender:
+ try_not_ender = self.not_ender.tryParse
+
+ # must be at least one (but first see if we are the stopOn sentinel;
+ # if so, fail)
+ if check_ender:
+ try_not_ender(instring, loc)
+ loc, tokens = self_expr_parse(instring, loc, doActions, callPreParse=False)
+ try:
+ hasIgnoreExprs = (not not self.ignoreExprs)
+ while 1:
+ if check_ender:
+ try_not_ender(instring, loc)
+ if hasIgnoreExprs:
+ preloc = self_skip_ignorables(instring, loc)
+ else:
+ preloc = loc
+ loc, tmptokens = self_expr_parse(instring, preloc, doActions)
+ if tmptokens or tmptokens.haskeys():
+ tokens += tmptokens
+ except (ParseException, IndexError):
+ pass
+
+ return loc, tokens
+
+ def _setResultsName(self, name, listAllMatches=False):
+ if __diag__.warn_ungrouped_named_tokens_in_collection:
+ for e in [self.expr] + getattr(self.expr, 'exprs', []):
+ if isinstance(e, ParserElement) and e.resultsName:
+ warnings.warn("{0}: setting results name {1!r} on {2} expression "
+ "collides with {3!r} on contained expression".format("warn_ungrouped_named_tokens_in_collection",
+ name,
+ type(self).__name__,
+ e.resultsName),
+ stacklevel=3)
+
+ return super(_MultipleMatch, self)._setResultsName(name, listAllMatches)
+
+
+class OneOrMore(_MultipleMatch):
+ """Repetition of one or more of the given expression.
+
+ Parameters:
+ - expr - expression that must match one or more times
+ - stopOn - (default= ``None``) - expression for a terminating sentinel
+ (only required if the sentinel would ordinarily match the repetition
+ expression)
+
+ Example::
+
+ data_word = Word(alphas)
+ label = data_word + FollowedBy(':')
+ attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
+
+ text = "shape: SQUARE posn: upper left color: BLACK"
+ OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
+
+ # use stopOn attribute for OneOrMore to avoid reading label string as part of the data
+ attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+ OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
+
+ # could also be written as
+ (attr_expr * (1,)).parseString(text).pprint()
+ """
+
+ def __str__(self):
+ if hasattr(self, "name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "{" + _ustr(self.expr) + "}..."
+
+ return self.strRepr
+
+class ZeroOrMore(_MultipleMatch):
+ """Optional repetition of zero or more of the given expression.
+
+ Parameters:
+ - expr - expression that must match zero or more times
+ - stopOn - (default= ``None``) - expression for a terminating sentinel
+ (only required if the sentinel would ordinarily match the repetition
+ expression)
+
+ Example: similar to :class:`OneOrMore`
+ """
+ def __init__(self, expr, stopOn=None):
+ super(ZeroOrMore, self).__init__(expr, stopOn=stopOn)
+ self.mayReturnEmpty = True
+
+ def parseImpl(self, instring, loc, doActions=True):
+ try:
+ return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
+ except (ParseException, IndexError):
+ return loc, []
+
+ def __str__(self):
+ if hasattr(self, "name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "[" + _ustr(self.expr) + "]..."
+
+ return self.strRepr
+
+
+class _NullToken(object):
+ def __bool__(self):
+ return False
+ __nonzero__ = __bool__
+ def __str__(self):
+ return ""
+
+class Optional(ParseElementEnhance):
+ """Optional matching of the given expression.
+
+ Parameters:
+ - expr - expression that must match zero or more times
+ - default (optional) - value to be returned if the optional expression is not found.
+
+ Example::
+
+ # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
+ zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
+ zip.runTests('''
+ # traditional ZIP code
+ 12345
+
+ # ZIP+4 form
+ 12101-0001
+
+ # invalid ZIP
+ 98765-
+ ''')
+
+ prints::
+
+ # traditional ZIP code
+ 12345
+ ['12345']
+
+ # ZIP+4 form
+ 12101-0001
+ ['12101-0001']
+
+ # invalid ZIP
+ 98765-
+ ^
+ FAIL: Expected end of text (at char 5), (line:1, col:6)
+ """
+ __optionalNotMatched = _NullToken()
+
+ def __init__(self, expr, default=__optionalNotMatched):
+ super(Optional, self).__init__(expr, savelist=False)
+ self.saveAsList = self.expr.saveAsList
+ self.defaultValue = default
+ self.mayReturnEmpty = True
+
+ def parseImpl(self, instring, loc, doActions=True):
+ try:
+ loc, tokens = self.expr._parse(instring, loc, doActions, callPreParse=False)
+ except (ParseException, IndexError):
+ if self.defaultValue is not self.__optionalNotMatched:
+ if self.expr.resultsName:
+ tokens = ParseResults([self.defaultValue])
+ tokens[self.expr.resultsName] = self.defaultValue
+ else:
+ tokens = [self.defaultValue]
+ else:
+ tokens = []
+ return loc, tokens
+
+ def __str__(self):
+ if hasattr(self, "name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "[" + _ustr(self.expr) + "]"
+
+ return self.strRepr
+
+class SkipTo(ParseElementEnhance):
+ """Token for skipping over all undefined text until the matched
+ expression is found.
+
+ Parameters:
+ - expr - target expression marking the end of the data to be skipped
+ - include - (default= ``False``) if True, the target expression is also parsed
+ (the skipped text and target expression are returned as a 2-element list).
+ - ignore - (default= ``None``) used to define grammars (typically quoted strings and
+ comments) that might contain false matches to the target expression
+ - failOn - (default= ``None``) define expressions that are not allowed to be
+ included in the skipped test; if found before the target expression is found,
+ the SkipTo is not a match
+
+ Example::
+
+ report = '''
+ Outstanding Issues Report - 1 Jan 2000
+
+ # | Severity | Description | Days Open
+ -----+----------+-------------------------------------------+-----------
+ 101 | Critical | Intermittent system crash | 6
+ 94 | Cosmetic | Spelling error on Login ('log|n') | 14
+ 79 | Minor | System slow when running too many reports | 47
+ '''
+ integer = Word(nums)
+ SEP = Suppress('|')
+ # use SkipTo to simply match everything up until the next SEP
+ # - ignore quoted strings, so that a '|' character inside a quoted string does not match
+ # - parse action will call token.strip() for each matched token, i.e., the description body
+ string_data = SkipTo(SEP, ignore=quotedString)
+ string_data.setParseAction(tokenMap(str.strip))
+ ticket_expr = (integer("issue_num") + SEP
+ + string_data("sev") + SEP
+ + string_data("desc") + SEP
+ + integer("days_open"))
+
+ for tkt in ticket_expr.searchString(report):
+ print tkt.dump()
+
+ prints::
+
+ ['101', 'Critical', 'Intermittent system crash', '6']
+ - days_open: 6
+ - desc: Intermittent system crash
+ - issue_num: 101
+ - sev: Critical
+ ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
+ - days_open: 14
+ - desc: Spelling error on Login ('log|n')
+ - issue_num: 94
+ - sev: Cosmetic
+ ['79', 'Minor', 'System slow when running too many reports', '47']
+ - days_open: 47
+ - desc: System slow when running too many reports
+ - issue_num: 79
+ - sev: Minor
+ """
+ def __init__(self, other, include=False, ignore=None, failOn=None):
+ super(SkipTo, self).__init__(other)
+ self.ignoreExpr = ignore
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+ self.includeMatch = include
+ self.saveAsList = False
+ if isinstance(failOn, basestring):
+ self.failOn = self._literalStringClass(failOn)
+ else:
+ self.failOn = failOn
+ self.errmsg = "No match found for " + _ustr(self.expr)
+
+ def parseImpl(self, instring, loc, doActions=True):
+ startloc = loc
+ instrlen = len(instring)
+ expr = self.expr
+ expr_parse = self.expr._parse
+ self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
+ self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
+
+ tmploc = loc
+ while tmploc <= instrlen:
+ if self_failOn_canParseNext is not None:
+ # break if failOn expression matches
+ if self_failOn_canParseNext(instring, tmploc):
+ break
+
+ if self_ignoreExpr_tryParse is not None:
+ # advance past ignore expressions
+ while 1:
+ try:
+ tmploc = self_ignoreExpr_tryParse(instring, tmploc)
+ except ParseBaseException:
+ break
+
+ try:
+ expr_parse(instring, tmploc, doActions=False, callPreParse=False)
+ except (ParseException, IndexError):
+ # no match, advance loc in string
+ tmploc += 1
+ else:
+ # matched skipto expr, done
+ break
+
+ else:
+ # ran off the end of the input string without matching skipto expr, fail
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ # build up return values
+ loc = tmploc
+ skiptext = instring[startloc:loc]
+ skipresult = ParseResults(skiptext)
+
+ if self.includeMatch:
+ loc, mat = expr_parse(instring, loc, doActions, callPreParse=False)
+ skipresult += mat
+
+ return loc, skipresult
+
+class Forward(ParseElementEnhance):
+ """Forward declaration of an expression to be defined later -
+ used for recursive grammars, such as algebraic infix notation.
+ When the expression is known, it is assigned to the ``Forward``
+ variable using the '<<' operator.
+
+ Note: take care when assigning to ``Forward`` not to overlook
+ precedence of operators.
+
+ Specifically, '|' has a lower precedence than '<<', so that::
+
+ fwdExpr << a | b | c
+
+ will actually be evaluated as::
+
+ (fwdExpr << a) | b | c
+
+ thereby leaving b and c out as parseable alternatives. It is recommended that you
+ explicitly group the values inserted into the ``Forward``::
+
+ fwdExpr << (a | b | c)
+
+ Converting to use the '<<=' operator instead will avoid this problem.
+
+ See :class:`ParseResults.pprint` for an example of a recursive
+ parser created using ``Forward``.
+ """
+ def __init__(self, other=None):
+ super(Forward, self).__init__(other, savelist=False)
+
+ def __lshift__(self, other):
+ if isinstance(other, basestring):
+ other = self._literalStringClass(other)
+ self.expr = other
+ self.strRepr = None
+ self.mayIndexError = self.expr.mayIndexError
+ self.mayReturnEmpty = self.expr.mayReturnEmpty
+ self.setWhitespaceChars(self.expr.whiteChars)
+ self.skipWhitespace = self.expr.skipWhitespace
+ self.saveAsList = self.expr.saveAsList
+ self.ignoreExprs.extend(self.expr.ignoreExprs)
+ return self
+
+ def __ilshift__(self, other):
+ return self << other
+
+ def leaveWhitespace(self):
+ self.skipWhitespace = False
+ return self
+
+ def streamline(self):
+ if not self.streamlined:
+ self.streamlined = True
+ if self.expr is not None:
+ self.expr.streamline()
+ return self
+
+ def validate(self, validateTrace=None):
+ if validateTrace is None:
+ validateTrace = []
+
+ if self not in validateTrace:
+ tmp = validateTrace[:] + [self]
+ if self.expr is not None:
+ self.expr.validate(tmp)
+ self.checkRecursion([])
+
+ def __str__(self):
+ if hasattr(self, "name"):
+ return self.name
+ if self.strRepr is not None:
+ return self.strRepr
+
+ # Avoid infinite recursion by setting a temporary strRepr
+ self.strRepr = ": ..."
+
+ # Use the string representation of main expression.
+ retString = '...'
+ try:
+ if self.expr is not None:
+ retString = _ustr(self.expr)[:1000]
+ else:
+ retString = "None"
+ finally:
+ self.strRepr = self.__class__.__name__ + ": " + retString
+ return self.strRepr
+
+ def copy(self):
+ if self.expr is not None:
+ return super(Forward, self).copy()
+ else:
+ ret = Forward()
+ ret <<= self
+ return ret
+
+ def _setResultsName(self, name, listAllMatches=False):
+ if __diag__.warn_name_set_on_empty_Forward:
+ if self.expr is None:
+ warnings.warn("{0}: setting results name {0!r} on {1} expression "
+ "that has no contained expression".format("warn_name_set_on_empty_Forward",
+ name,
+ type(self).__name__),
+ stacklevel=3)
+
+ return super(Forward, self)._setResultsName(name, listAllMatches)
+
+class TokenConverter(ParseElementEnhance):
+ """
+ Abstract subclass of :class:`ParseExpression`, for converting parsed results.
+ """
+ def __init__(self, expr, savelist=False):
+ super(TokenConverter, self).__init__(expr) # , savelist)
+ self.saveAsList = False
+
+class Combine(TokenConverter):
+ """Converter to concatenate all matching tokens to a single string.
+ By default, the matching patterns must also be contiguous in the
+ input string; this can be disabled by specifying
+ ``'adjacent=False'`` in the constructor.
+
+ Example::
+
+ real = Word(nums) + '.' + Word(nums)
+ print(real.parseString('3.1416')) # -> ['3', '.', '1416']
+ # will also erroneously match the following
+ print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
+
+ real = Combine(Word(nums) + '.' + Word(nums))
+ print(real.parseString('3.1416')) # -> ['3.1416']
+ # no match when there are internal spaces
+ print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
+ """
+ def __init__(self, expr, joinString="", adjacent=True):
+ super(Combine, self).__init__(expr)
+ # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
+ if adjacent:
+ self.leaveWhitespace()
+ self.adjacent = adjacent
+ self.skipWhitespace = True
+ self.joinString = joinString
+ self.callPreparse = True
+
+ def ignore(self, other):
+ if self.adjacent:
+ ParserElement.ignore(self, other)
+ else:
+ super(Combine, self).ignore(other)
+ return self
+
+ def postParse(self, instring, loc, tokenlist):
+ retToks = tokenlist.copy()
+ del retToks[:]
+ retToks += ParseResults(["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults)
+
+ if self.resultsName and retToks.haskeys():
+ return [retToks]
+ else:
+ return retToks
+
+class Group(TokenConverter):
+ """Converter to return the matched tokens as a list - useful for
+ returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions.
+
+ Example::
+
+ ident = Word(alphas)
+ num = Word(nums)
+ term = ident | num
+ func = ident + Optional(delimitedList(term))
+ print(func.parseString("fn a, b, 100")) # -> ['fn', 'a', 'b', '100']
+
+ func = ident + Group(Optional(delimitedList(term)))
+ print(func.parseString("fn a, b, 100")) # -> ['fn', ['a', 'b', '100']]
+ """
+ def __init__(self, expr):
+ super(Group, self).__init__(expr)
+ self.saveAsList = True
+
+ def postParse(self, instring, loc, tokenlist):
+ return [tokenlist]
+
+class Dict(TokenConverter):
+ """Converter to return a repetitive expression as a list, but also
+ as a dictionary. Each element can also be referenced using the first
+ token in the expression as its key. Useful for tabular report
+ scraping when the first column can be used as a item key.
+
+ Example::
+
+ data_word = Word(alphas)
+ label = data_word + FollowedBy(':')
+ attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
+
+ text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+ attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+
+ # print attributes as plain groups
+ print(OneOrMore(attr_expr).parseString(text).dump())
+
+ # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
+ result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
+ print(result.dump())
+
+ # access named fields as dict entries, or output as dict
+ print(result['shape'])
+ print(result.asDict())
+
+ prints::
+
+ ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
+ [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+ - color: light blue
+ - posn: upper left
+ - shape: SQUARE
+ - texture: burlap
+ SQUARE
+ {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
+
+ See more examples at :class:`ParseResults` of accessing fields by results name.
+ """
+ def __init__(self, expr):
+ super(Dict, self).__init__(expr)
+ self.saveAsList = True
+
+ def postParse(self, instring, loc, tokenlist):
+ for i, tok in enumerate(tokenlist):
+ if len(tok) == 0:
+ continue
+ ikey = tok[0]
+ if isinstance(ikey, int):
+ ikey = _ustr(tok[0]).strip()
+ if len(tok) == 1:
+ tokenlist[ikey] = _ParseResultsWithOffset("", i)
+ elif len(tok) == 2 and not isinstance(tok[1], ParseResults):
+ tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i)
+ else:
+ dictvalue = tok.copy() # ParseResults(i)
+ del dictvalue[0]
+ if len(dictvalue) != 1 or (isinstance(dictvalue, ParseResults) and dictvalue.haskeys()):
+ tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i)
+ else:
+ tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i)
+
+ if self.resultsName:
+ return [tokenlist]
+ else:
+ return tokenlist
+
+
+class Suppress(TokenConverter):
+ """Converter for ignoring the results of a parsed expression.
+
+ Example::
+
+ source = "a, b, c,d"
+ wd = Word(alphas)
+ wd_list1 = wd + ZeroOrMore(',' + wd)
+ print(wd_list1.parseString(source))
+
+ # often, delimiters that are useful during parsing are just in the
+ # way afterward - use Suppress to keep them out of the parsed output
+ wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
+ print(wd_list2.parseString(source))
+
+ prints::
+
+ ['a', ',', 'b', ',', 'c', ',', 'd']
+ ['a', 'b', 'c', 'd']
+
+ (See also :class:`delimitedList`.)
+ """
+ def postParse(self, instring, loc, tokenlist):
+ return []
+
+ def suppress(self):
+ return self
+
+
+class OnlyOnce(object):
+ """Wrapper for parse actions, to ensure they are only called once.
+ """
+ def __init__(self, methodCall):
+ self.callable = _trim_arity(methodCall)
+ self.called = False
+ def __call__(self, s, l, t):
+ if not self.called:
+ results = self.callable(s, l, t)
+ self.called = True
+ return results
+ raise ParseException(s, l, "")
+ def reset(self):
+ self.called = False
+
+def traceParseAction(f):
+ """Decorator for debugging parse actions.
+
+ When the parse action is called, this decorator will print
+ ``">> entering method-name(line:<current_source_line>, <parse_location>, <matched_tokens>)"``.
+ When the parse action completes, the decorator will print
+ ``"<<"`` followed by the returned value, or any exception that the parse action raised.
+
+ Example::
+
+ wd = Word(alphas)
+
+ @traceParseAction
+ def remove_duplicate_chars(tokens):
+ return ''.join(sorted(set(''.join(tokens))))
+
+ wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
+ print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
+
+ prints::
+
+ >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
+ <<leaving remove_duplicate_chars (ret: 'dfjkls')
+ ['dfjkls']
+ """
+ f = _trim_arity(f)
+ def z(*paArgs):
+ thisFunc = f.__name__
+ s, l, t = paArgs[-3:]
+ if len(paArgs) > 3:
+ thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
+ sys.stderr.write(">>entering %s(line: '%s', %d, %r)\n" % (thisFunc, line(l, s), l, t))
+ try:
+ ret = f(*paArgs)
+ except Exception as exc:
+ sys.stderr.write("<<leaving %s (exception: %s)\n" % (thisFunc, exc))
+ raise
+ sys.stderr.write("<<leaving %s (ret: %r)\n" % (thisFunc, ret))
+ return ret
+ try:
+ z.__name__ = f.__name__
+ except AttributeError:
+ pass
+ return z
+
+#
+# global helpers
+#
+def delimitedList(expr, delim=",", combine=False):
+ """Helper to define a delimited list of expressions - the delimiter
+ defaults to ','. By default, the list elements and delimiters can
+ have intervening whitespace, and comments, but this can be
+ overridden by passing ``combine=True`` in the constructor. If
+ ``combine`` is set to ``True``, the matching tokens are
+ returned as a single token string, with the delimiters included;
+ otherwise, the matching tokens are returned as a list of tokens,
+ with the delimiters suppressed.
+
+ Example::
+
+ delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
+ delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
+ """
+ dlName = _ustr(expr) + " [" + _ustr(delim) + " " + _ustr(expr) + "]..."
+ if combine:
+ return Combine(expr + ZeroOrMore(delim + expr)).setName(dlName)
+ else:
+ return (expr + ZeroOrMore(Suppress(delim) + expr)).setName(dlName)
+
+def countedArray(expr, intExpr=None):
+ """Helper to define a counted list of expressions.
+
+ This helper defines a pattern of the form::
+
+ integer expr expr expr...
+
+ where the leading integer tells how many expr expressions follow.
+ The matched tokens returns the array of expr tokens as a list - the
+ leading count token is suppressed.
+
+ If ``intExpr`` is specified, it should be a pyparsing expression
+ that produces an integer value.
+
+ Example::
+
+ countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
+
+ # in this parser, the leading integer value is given in binary,
+ # '10' indicating that 2 values are in the array
+ binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
+ countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
+ """
+ arrayExpr = Forward()
+ def countFieldParseAction(s, l, t):
+ n = t[0]
+ arrayExpr << (n and Group(And([expr] * n)) or Group(empty))
+ return []
+ if intExpr is None:
+ intExpr = Word(nums).setParseAction(lambda t: int(t[0]))
+ else:
+ intExpr = intExpr.copy()
+ intExpr.setName("arrayLen")
+ intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
+ return (intExpr + arrayExpr).setName('(len) ' + _ustr(expr) + '...')
+
+def _flatten(L):
+ ret = []
+ for i in L:
+ if isinstance(i, list):
+ ret.extend(_flatten(i))
+ else:
+ ret.append(i)
+ return ret
+
+def matchPreviousLiteral(expr):
+ """Helper to define an expression that is indirectly defined from
+ the tokens matched in a previous expression, that is, it looks for
+ a 'repeat' of a previous expression. For example::
+
+ first = Word(nums)
+ second = matchPreviousLiteral(first)
+ matchExpr = first + ":" + second
+
+ will match ``"1:1"``, but not ``"1:2"``. Because this
+ matches a previous literal, will also match the leading
+ ``"1:1"`` in ``"1:10"``. If this is not desired, use
+ :class:`matchPreviousExpr`. Do *not* use with packrat parsing
+ enabled.
+ """
+ rep = Forward()
+ def copyTokenToRepeater(s, l, t):
+ if t:
+ if len(t) == 1:
+ rep << t[0]
+ else:
+ # flatten t tokens
+ tflat = _flatten(t.asList())
+ rep << And(Literal(tt) for tt in tflat)
+ else:
+ rep << Empty()
+ expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
+ rep.setName('(prev) ' + _ustr(expr))
+ return rep
+
+def matchPreviousExpr(expr):
+ """Helper to define an expression that is indirectly defined from
+ the tokens matched in a previous expression, that is, it looks for
+ a 'repeat' of a previous expression. For example::
+
+ first = Word(nums)
+ second = matchPreviousExpr(first)
+ matchExpr = first + ":" + second
+
+ will match ``"1:1"``, but not ``"1:2"``. Because this
+ matches by expressions, will *not* match the leading ``"1:1"``
+ in ``"1:10"``; the expressions are evaluated first, and then
+ compared, so ``"1"`` is compared with ``"10"``. Do *not* use
+ with packrat parsing enabled.
+ """
+ rep = Forward()
+ e2 = expr.copy()
+ rep <<= e2
+ def copyTokenToRepeater(s, l, t):
+ matchTokens = _flatten(t.asList())
+ def mustMatchTheseTokens(s, l, t):
+ theseTokens = _flatten(t.asList())
+ if theseTokens != matchTokens:
+ raise ParseException('', 0, '')
+ rep.setParseAction(mustMatchTheseTokens, callDuringTry=True)
+ expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
+ rep.setName('(prev) ' + _ustr(expr))
+ return rep
+
+def _escapeRegexRangeChars(s):
+ # ~ escape these chars: ^-[]
+ for c in r"\^-[]":
+ s = s.replace(c, _bslash + c)
+ s = s.replace("\n", r"\n")
+ s = s.replace("\t", r"\t")
+ return _ustr(s)
+
+def oneOf(strs, caseless=False, useRegex=True, asKeyword=False):
+ """Helper to quickly define a set of alternative Literals, and makes
+ sure to do longest-first testing when there is a conflict,
+ regardless of the input order, but returns
+ a :class:`MatchFirst` for best performance.
+
+ Parameters:
+
+ - strs - a string of space-delimited literals, or a collection of
+ string literals
+ - caseless - (default= ``False``) - treat all literals as
+ caseless
+ - useRegex - (default= ``True``) - as an optimization, will
+ generate a Regex object; otherwise, will generate
+ a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if
+ creating a :class:`Regex` raises an exception)
+ - asKeyword - (default=``False``) - enforce Keyword-style matching on the
+ generated expressions
+
+ Example::
+
+ comp_oper = oneOf("< = > <= >= !=")
+ var = Word(alphas)
+ number = Word(nums)
+ term = var | number
+ comparison_expr = term + comp_oper + term
+ print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
+
+ prints::
+
+ [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
+ """
+ if isinstance(caseless, basestring):
+ warnings.warn("More than one string argument passed to oneOf, pass "
+ "choices as a list or space-delimited string", stacklevel=2)
+
+ if caseless:
+ isequal = (lambda a, b: a.upper() == b.upper())
+ masks = (lambda a, b: b.upper().startswith(a.upper()))
+ parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral
+ else:
+ isequal = (lambda a, b: a == b)
+ masks = (lambda a, b: b.startswith(a))
+ parseElementClass = Keyword if asKeyword else Literal
+
+ symbols = []
+ if isinstance(strs, basestring):
+ symbols = strs.split()
+ elif isinstance(strs, Iterable):
+ symbols = list(strs)
+ else:
+ warnings.warn("Invalid argument to oneOf, expected string or iterable",
+ SyntaxWarning, stacklevel=2)
+ if not symbols:
+ return NoMatch()
+
+ if not asKeyword:
+ # if not producing keywords, need to reorder to take care to avoid masking
+ # longer choices with shorter ones
+ i = 0
+ while i < len(symbols) - 1:
+ cur = symbols[i]
+ for j, other in enumerate(symbols[i + 1:]):
+ if isequal(other, cur):
+ del symbols[i + j + 1]
+ break
+ elif masks(cur, other):
+ del symbols[i + j + 1]
+ symbols.insert(i, other)
+ break
+ else:
+ i += 1
+
+ if not (caseless or asKeyword) and useRegex:
+ # ~ print (strs, "->", "|".join([_escapeRegexChars(sym) for sym in symbols]))
+ try:
+ if len(symbols) == len("".join(symbols)):
+ return Regex("[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols)).setName(' | '.join(symbols))
+ else:
+ return Regex("|".join(re.escape(sym) for sym in symbols)).setName(' | '.join(symbols))
+ except Exception:
+ warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
+ SyntaxWarning, stacklevel=2)
+
+ # last resort, just use MatchFirst
+ return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
+
+def dictOf(key, value):
+ """Helper to easily and clearly define a dictionary by specifying
+ the respective patterns for the key and value. Takes care of
+ defining the :class:`Dict`, :class:`ZeroOrMore`, and
+ :class:`Group` tokens in the proper order. The key pattern
+ can include delimiting markers or punctuation, as long as they are
+ suppressed, thereby leaving the significant key text. The value
+ pattern can include named results, so that the :class:`Dict` results
+ can include named token fields.
+
+ Example::
+
+ text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+ attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+ print(OneOrMore(attr_expr).parseString(text).dump())
+
+ attr_label = label
+ attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
+
+ # similar to Dict, but simpler call format
+ result = dictOf(attr_label, attr_value).parseString(text)
+ print(result.dump())
+ print(result['shape'])
+ print(result.shape) # object attribute access works too
+ print(result.asDict())
+
+ prints::
+
+ [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+ - color: light blue
+ - posn: upper left
+ - shape: SQUARE
+ - texture: burlap
+ SQUARE
+ SQUARE
+ {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
+ """
+ return Dict(OneOrMore(Group(key + value)))
+
+def originalTextFor(expr, asString=True):
+ """Helper to return the original, untokenized text for a given
+ expression. Useful to restore the parsed fields of an HTML start
+ tag into the raw tag text itself, or to revert separate tokens with
+ intervening whitespace back to the original matching input text. By
+ default, returns astring containing the original parsed text.
+
+ If the optional ``asString`` argument is passed as
+ ``False``, then the return value is
+ a :class:`ParseResults` containing any results names that
+ were originally matched, and a single token containing the original
+ matched text from the input string. So if the expression passed to
+ :class:`originalTextFor` contains expressions with defined
+ results names, you must set ``asString`` to ``False`` if you
+ want to preserve those results name values.
+
+ Example::
+
+ src = "this is test <b> bold <i>text</i> </b> normal text "
+ for tag in ("b", "i"):
+ opener, closer = makeHTMLTags(tag)
+ patt = originalTextFor(opener + SkipTo(closer) + closer)
+ print(patt.searchString(src)[0])
+
+ prints::
+
+ ['<b> bold <i>text</i> </b>']
+ ['<i>text</i>']
+ """
+ locMarker = Empty().setParseAction(lambda s, loc, t: loc)
+ endlocMarker = locMarker.copy()
+ endlocMarker.callPreparse = False
+ matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
+ if asString:
+ extractText = lambda s, l, t: s[t._original_start: t._original_end]
+ else:
+ def extractText(s, l, t):
+ t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
+ matchExpr.setParseAction(extractText)
+ matchExpr.ignoreExprs = expr.ignoreExprs
+ return matchExpr
+
+def ungroup(expr):
+ """Helper to undo pyparsing's default grouping of And expressions,
+ even if all but one are non-empty.
+ """
+ return TokenConverter(expr).addParseAction(lambda t: t[0])
+
+def locatedExpr(expr):
+ """Helper to decorate a returned token with its starting and ending
+ locations in the input string.
+
+ This helper adds the following results names:
+
+ - locn_start = location where matched expression begins
+ - locn_end = location where matched expression ends
+ - value = the actual parsed results
+
+ Be careful if the input text contains ``<TAB>`` characters, you
+ may want to call :class:`ParserElement.parseWithTabs`
+
+ Example::
+
+ wd = Word(alphas)
+ for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
+ print(match)
+
+ prints::
+
+ [[0, 'ljsdf', 5]]
+ [[8, 'lksdjjf', 15]]
+ [[18, 'lkkjj', 23]]
+ """
+ locator = Empty().setParseAction(lambda s, l, t: l)
+ return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
+
+
+# convenience constants for positional expressions
+empty = Empty().setName("empty")
+lineStart = LineStart().setName("lineStart")
+lineEnd = LineEnd().setName("lineEnd")
+stringStart = StringStart().setName("stringStart")
+stringEnd = StringEnd().setName("stringEnd")
+
+_escapedPunc = Word(_bslash, r"\[]-*.$+^?()~ ", exact=2).setParseAction(lambda s, l, t: t[0][1])
+_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s, l, t: unichr(int(t[0].lstrip(r'\0x'), 16)))
+_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s, l, t: unichr(int(t[0][1:], 8)))
+_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1)
+_charRange = Group(_singleChar + Suppress("-") + _singleChar)
+_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group(OneOrMore(_charRange | _singleChar)).setResultsName("body") + "]"
+
+def srange(s):
+ r"""Helper to easily define string ranges for use in Word
+ construction. Borrows syntax from regexp '[]' string range
+ definitions::
+
+ srange("[0-9]") -> "0123456789"
+ srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
+ srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
+
+ The input string must be enclosed in []'s, and the returned string
+ is the expanded character set joined into a single string. The
+ values enclosed in the []'s may be:
+
+ - a single character
+ - an escaped character with a leading backslash (such as ``\-``
+ or ``\]``)
+ - an escaped hex character with a leading ``'\x'``
+ (``\x21``, which is a ``'!'`` character) (``\0x##``
+ is also supported for backwards compatibility)
+ - an escaped octal character with a leading ``'\0'``
+ (``\041``, which is a ``'!'`` character)
+ - a range of any of the above, separated by a dash (``'a-z'``,
+ etc.)
+ - any combination of the above (``'aeiouy'``,
+ ``'a-zA-Z0-9_$'``, etc.)
+ """
+ _expanded = lambda p: p if not isinstance(p, ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]), ord(p[1]) + 1))
+ try:
+ return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
+ except Exception:
+ return ""
+
+def matchOnlyAtCol(n):
+ """Helper method for defining parse actions that require matching at
+ a specific column in the input text.
+ """
+ def verifyCol(strg, locn, toks):
+ if col(locn, strg) != n:
+ raise ParseException(strg, locn, "matched token not at column %d" % n)
+ return verifyCol
+
+def replaceWith(replStr):
+ """Helper method for common parse actions that simply return
+ a literal value. Especially useful when used with
+ :class:`transformString<ParserElement.transformString>` ().
+
+ Example::
+
+ num = Word(nums).setParseAction(lambda toks: int(toks[0]))
+ na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
+ term = na | num
+
+ OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
+ """
+ return lambda s, l, t: [replStr]
+
+def removeQuotes(s, l, t):
+ """Helper parse action for removing quotation marks from parsed
+ quoted strings.
+
+ Example::
+
+ # by default, quotation marks are included in parsed results
+ quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
+
+ # use removeQuotes to strip quotation marks from parsed results
+ quotedString.setParseAction(removeQuotes)
+ quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
+ """
+ return t[0][1:-1]
+
+def tokenMap(func, *args):
+ """Helper to define a parse action by mapping a function to all
+ elements of a ParseResults list. If any additional args are passed,
+ they are forwarded to the given function as additional arguments
+ after the token, as in
+ ``hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))``,
+ which will convert the parsed data to an integer using base 16.
+
+ Example (compare the last to example in :class:`ParserElement.transformString`::
+
+ hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
+ hex_ints.runTests('''
+ 00 11 22 aa FF 0a 0d 1a
+ ''')
+
+ upperword = Word(alphas).setParseAction(tokenMap(str.upper))
+ OneOrMore(upperword).runTests('''
+ my kingdom for a horse
+ ''')
+
+ wd = Word(alphas).setParseAction(tokenMap(str.title))
+ OneOrMore(wd).setParseAction(' '.join).runTests('''
+ now is the winter of our discontent made glorious summer by this sun of york
+ ''')
+
+ prints::
+
+ 00 11 22 aa FF 0a 0d 1a
+ [0, 17, 34, 170, 255, 10, 13, 26]
+
+ my kingdom for a horse
+ ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
+
+ now is the winter of our discontent made glorious summer by this sun of york
+ ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
+ """
+ def pa(s, l, t):
+ return [func(tokn, *args) for tokn in t]
+
+ try:
+ func_name = getattr(func, '__name__',
+ getattr(func, '__class__').__name__)
+ except Exception:
+ func_name = str(func)
+ pa.__name__ = func_name
+
+ return pa
+
+upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
+"""(Deprecated) Helper parse action to convert tokens to upper case.
+Deprecated in favor of :class:`pyparsing_common.upcaseTokens`"""
+
+downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
+"""(Deprecated) Helper parse action to convert tokens to lower case.
+Deprecated in favor of :class:`pyparsing_common.downcaseTokens`"""
+
+def _makeTags(tagStr, xml,
+ suppress_LT=Suppress("<"),
+ suppress_GT=Suppress(">")):
+ """Internal helper to construct opening and closing tag expressions, given a tag name"""
+ if isinstance(tagStr, basestring):
+ resname = tagStr
+ tagStr = Keyword(tagStr, caseless=not xml)
+ else:
+ resname = tagStr.name
+
+ tagAttrName = Word(alphas, alphanums + "_-:")
+ if xml:
+ tagAttrValue = dblQuotedString.copy().setParseAction(removeQuotes)
+ openTag = (suppress_LT
+ + tagStr("tag")
+ + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
+ + Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == '/')
+ + suppress_GT)
+ else:
+ tagAttrValue = quotedString.copy().setParseAction(removeQuotes) | Word(printables, excludeChars=">")
+ openTag = (suppress_LT
+ + tagStr("tag")
+ + Dict(ZeroOrMore(Group(tagAttrName.setParseAction(downcaseTokens)
+ + Optional(Suppress("=") + tagAttrValue))))
+ + Optional("/", default=[False])("empty").setParseAction(lambda s, l, t: t[0] == '/')
+ + suppress_GT)
+ closeTag = Combine(_L("</") + tagStr + ">", adjacent=False)
+
+ openTag.setName("<%s>" % resname)
+ # add start<tagname> results name in parse action now that ungrouped names are not reported at two levels
+ openTag.addParseAction(lambda t: t.__setitem__("start" + "".join(resname.replace(":", " ").title().split()), t.copy()))
+ closeTag = closeTag("end" + "".join(resname.replace(":", " ").title().split())).setName("</%s>" % resname)
+ openTag.tag = resname
+ closeTag.tag = resname
+ openTag.tag_body = SkipTo(closeTag())
+ return openTag, closeTag
+
+def makeHTMLTags(tagStr):
+ """Helper to construct opening and closing tag expressions for HTML,
+ given a tag name. Matches tags in either upper or lower case,
+ attributes with namespaces and with quoted or unquoted values.
+
+ Example::
+
+ text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
+ # makeHTMLTags returns pyparsing expressions for the opening and
+ # closing tags as a 2-tuple
+ a, a_end = makeHTMLTags("A")
+ link_expr = a + SkipTo(a_end)("link_text") + a_end
+
+ for link in link_expr.searchString(text):
+ # attributes in the <A> tag (like "href" shown here) are
+ # also accessible as named results
+ print(link.link_text, '->', link.href)
+
+ prints::
+
+ pyparsing -> https://github.com/pyparsing/pyparsing/wiki
+ """
+ return _makeTags(tagStr, False)
+
+def makeXMLTags(tagStr):
+ """Helper to construct opening and closing tag expressions for XML,
+ given a tag name. Matches tags only in the given upper/lower case.
+
+ Example: similar to :class:`makeHTMLTags`
+ """
+ return _makeTags(tagStr, True)
+
+def withAttribute(*args, **attrDict):
+ """Helper to create a validating parse action to be used with start
+ tags created with :class:`makeXMLTags` or
+ :class:`makeHTMLTags`. Use ``withAttribute`` to qualify
+ a starting tag with a required attribute value, to avoid false
+ matches on common tags such as ``<TD>`` or ``<DIV>``.
+
+ Call ``withAttribute`` with a series of attribute names and
+ values. Specify the list of filter attributes names and values as:
+
+ - keyword arguments, as in ``(align="right")``, or
+ - as an explicit dict with ``**`` operator, when an attribute
+ name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
+ - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))``
+
+ For attribute names with a namespace prefix, you must use the second
+ form. Attribute names are matched insensitive to upper/lower case.
+
+ If just testing for ``class`` (with or without a namespace), use
+ :class:`withClass`.
+
+ To verify that the attribute exists, but without specifying a value,
+ pass ``withAttribute.ANY_VALUE`` as the value.
+
+ Example::
+
+ html = '''
+ <div>
+ Some text
+ <div type="grid">1 4 0 1 0</div>
+ <div type="graph">1,3 2,3 1,1</div>
+ <div>this has no type</div>
+ </div>
+
+ '''
+ div,div_end = makeHTMLTags("div")
+
+ # only match div tag having a type attribute with value "grid"
+ div_grid = div().setParseAction(withAttribute(type="grid"))
+ grid_expr = div_grid + SkipTo(div | div_end)("body")
+ for grid_header in grid_expr.searchString(html):
+ print(grid_header.body)
+
+ # construct a match with any div tag having a type attribute, regardless of the value
+ div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
+ div_expr = div_any_type + SkipTo(div | div_end)("body")
+ for div_header in div_expr.searchString(html):
+ print(div_header.body)
+
+ prints::
+
+ 1 4 0 1 0
+
+ 1 4 0 1 0
+ 1,3 2,3 1,1
+ """
+ if args:
+ attrs = args[:]
+ else:
+ attrs = attrDict.items()
+ attrs = [(k, v) for k, v in attrs]
+ def pa(s, l, tokens):
+ for attrName, attrValue in attrs:
+ if attrName not in tokens:
+ raise ParseException(s, l, "no matching attribute " + attrName)
+ if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
+ raise ParseException(s, l, "attribute '%s' has value '%s', must be '%s'" %
+ (attrName, tokens[attrName], attrValue))
+ return pa
+withAttribute.ANY_VALUE = object()
+
+def withClass(classname, namespace=''):
+ """Simplified version of :class:`withAttribute` when
+ matching on a div class - made difficult because ``class`` is
+ a reserved word in Python.
+
+ Example::
+
+ html = '''
+ <div>
+ Some text
+ <div class="grid">1 4 0 1 0</div>
+ <div class="graph">1,3 2,3 1,1</div>
+ <div>this &lt;div&gt; has no class</div>
+ </div>
+
+ '''
+ div,div_end = makeHTMLTags("div")
+ div_grid = div().setParseAction(withClass("grid"))
+
+ grid_expr = div_grid + SkipTo(div | div_end)("body")
+ for grid_header in grid_expr.searchString(html):
+ print(grid_header.body)
+
+ div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
+ div_expr = div_any_type + SkipTo(div | div_end)("body")
+ for div_header in div_expr.searchString(html):
+ print(div_header.body)
+
+ prints::
+
+ 1 4 0 1 0
+
+ 1 4 0 1 0
+ 1,3 2,3 1,1
+ """
+ classattr = "%s:class" % namespace if namespace else "class"
+ return withAttribute(**{classattr: classname})
+
+opAssoc = SimpleNamespace()
+opAssoc.LEFT = object()
+opAssoc.RIGHT = object()
+
+def infixNotation(baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')')):
+ """Helper method for constructing grammars of expressions made up of
+ operators working in a precedence hierarchy. Operators may be unary
+ or binary, left- or right-associative. Parse actions can also be
+ attached to operator expressions. The generated parser will also
+ recognize the use of parentheses to override operator precedences
+ (see example below).
+
+ Note: if you define a deep operator list, you may see performance
+ issues when using infixNotation. See
+ :class:`ParserElement.enablePackrat` for a mechanism to potentially
+ improve your parser performance.
+
+ Parameters:
+ - baseExpr - expression representing the most basic element for the
+ nested
+ - opList - list of tuples, one for each operator precedence level
+ in the expression grammar; each tuple is of the form ``(opExpr,
+ numTerms, rightLeftAssoc, parseAction)``, where:
+
+ - opExpr is the pyparsing expression for the operator; may also
+ be a string, which will be converted to a Literal; if numTerms
+ is 3, opExpr is a tuple of two expressions, for the two
+ operators separating the 3 terms
+ - numTerms is the number of terms for this operator (must be 1,
+ 2, or 3)
+ - rightLeftAssoc is the indicator whether the operator is right
+ or left associative, using the pyparsing-defined constants
+ ``opAssoc.RIGHT`` and ``opAssoc.LEFT``.
+ - parseAction is the parse action to be associated with
+ expressions matching this operator expression (the parse action
+ tuple member may be omitted); if the parse action is passed
+ a tuple or list of functions, this is equivalent to calling
+ ``setParseAction(*fn)``
+ (:class:`ParserElement.setParseAction`)
+ - lpar - expression for matching left-parentheses
+ (default= ``Suppress('(')``)
+ - rpar - expression for matching right-parentheses
+ (default= ``Suppress(')')``)
+
+ Example::
+
+ # simple example of four-function arithmetic with ints and
+ # variable names
+ integer = pyparsing_common.signed_integer
+ varname = pyparsing_common.identifier
+
+ arith_expr = infixNotation(integer | varname,
+ [
+ ('-', 1, opAssoc.RIGHT),
+ (oneOf('* /'), 2, opAssoc.LEFT),
+ (oneOf('+ -'), 2, opAssoc.LEFT),
+ ])
+
+ arith_expr.runTests('''
+ 5+3*6
+ (5+3)*6
+ -2--11
+ ''', fullDump=False)
+
+ prints::
+
+ 5+3*6
+ [[5, '+', [3, '*', 6]]]
+
+ (5+3)*6
+ [[[5, '+', 3], '*', 6]]
+
+ -2--11
+ [[['-', 2], '-', ['-', 11]]]
+ """
+ # captive version of FollowedBy that does not do parse actions or capture results names
+ class _FB(FollowedBy):
+ def parseImpl(self, instring, loc, doActions=True):
+ self.expr.tryParse(instring, loc)
+ return loc, []
+
+ ret = Forward()
+ lastExpr = baseExpr | (lpar + ret + rpar)
+ for i, operDef in enumerate(opList):
+ opExpr, arity, rightLeftAssoc, pa = (operDef + (None, ))[:4]
+ termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
+ if arity == 3:
+ if opExpr is None or len(opExpr) != 2:
+ raise ValueError(
+ "if numterms=3, opExpr must be a tuple or list of two expressions")
+ opExpr1, opExpr2 = opExpr
+ thisExpr = Forward().setName(termName)
+ if rightLeftAssoc == opAssoc.LEFT:
+ if arity == 1:
+ matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + OneOrMore(opExpr))
+ elif arity == 2:
+ if opExpr is not None:
+ matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(lastExpr + OneOrMore(opExpr + lastExpr))
+ else:
+ matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr + OneOrMore(lastExpr))
+ elif arity == 3:
+ matchExpr = (_FB(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr)
+ + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr)))
+ else:
+ raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
+ elif rightLeftAssoc == opAssoc.RIGHT:
+ if arity == 1:
+ # try to avoid LR with this extra test
+ if not isinstance(opExpr, Optional):
+ opExpr = Optional(opExpr)
+ matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr)
+ elif arity == 2:
+ if opExpr is not None:
+ matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(lastExpr + OneOrMore(opExpr + thisExpr))
+ else:
+ matchExpr = _FB(lastExpr + thisExpr) + Group(lastExpr + OneOrMore(thisExpr))
+ elif arity == 3:
+ matchExpr = (_FB(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
+ + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr))
+ else:
+ raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
+ else:
+ raise ValueError("operator must indicate right or left associativity")
+ if pa:
+ if isinstance(pa, (tuple, list)):
+ matchExpr.setParseAction(*pa)
+ else:
+ matchExpr.setParseAction(pa)
+ thisExpr <<= (matchExpr.setName(termName) | lastExpr)
+ lastExpr = thisExpr
+ ret <<= lastExpr
+ return ret
+
+operatorPrecedence = infixNotation
+"""(Deprecated) Former name of :class:`infixNotation`, will be
+dropped in a future release."""
+
+dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"').setName("string enclosed in double quotes")
+sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").setName("string enclosed in single quotes")
+quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"'
+ | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'").setName("quotedString using single or double quotes")
+unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
+
+def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
+ """Helper method for defining nested lists enclosed in opening and
+ closing delimiters ("(" and ")" are the default).
+
+ Parameters:
+ - opener - opening character for a nested list
+ (default= ``"("``); can also be a pyparsing expression
+ - closer - closing character for a nested list
+ (default= ``")"``); can also be a pyparsing expression
+ - content - expression for items within the nested lists
+ (default= ``None``)
+ - ignoreExpr - expression for ignoring opening and closing
+ delimiters (default= :class:`quotedString`)
+
+ If an expression is not provided for the content argument, the
+ nested expression will capture all whitespace-delimited content
+ between delimiters as a list of separate values.
+
+ Use the ``ignoreExpr`` argument to define expressions that may
+ contain opening or closing characters that should not be treated as
+ opening or closing characters for nesting, such as quotedString or
+ a comment expression. Specify multiple expressions using an
+ :class:`Or` or :class:`MatchFirst`. The default is
+ :class:`quotedString`, but if no expressions are to be ignored, then
+ pass ``None`` for this argument.
+
+ Example::
+
+ data_type = oneOf("void int short long char float double")
+ decl_data_type = Combine(data_type + Optional(Word('*')))
+ ident = Word(alphas+'_', alphanums+'_')
+ number = pyparsing_common.number
+ arg = Group(decl_data_type + ident)
+ LPAR, RPAR = map(Suppress, "()")
+
+ code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
+
+ c_function = (decl_data_type("type")
+ + ident("name")
+ + LPAR + Optional(delimitedList(arg), [])("args") + RPAR
+ + code_body("body"))
+ c_function.ignore(cStyleComment)
+
+ source_code = '''
+ int is_odd(int x) {
+ return (x%2);
+ }
+
+ int dec_to_hex(char hchar) {
+ if (hchar >= '0' && hchar <= '9') {
+ return (ord(hchar)-ord('0'));
+ } else {
+ return (10+ord(hchar)-ord('A'));
+ }
+ }
+ '''
+ for func in c_function.searchString(source_code):
+ print("%(name)s (%(type)s) args: %(args)s" % func)
+
+
+ prints::
+
+ is_odd (int) args: [['int', 'x']]
+ dec_to_hex (int) args: [['char', 'hchar']]
+ """
+ if opener == closer:
+ raise ValueError("opening and closing strings cannot be the same")
+ if content is None:
+ if isinstance(opener, basestring) and isinstance(closer, basestring):
+ if len(opener) == 1 and len(closer) == 1:
+ if ignoreExpr is not None:
+ content = (Combine(OneOrMore(~ignoreExpr
+ + CharsNotIn(opener
+ + closer
+ + ParserElement.DEFAULT_WHITE_CHARS, exact=1)
+ )
+ ).setParseAction(lambda t: t[0].strip()))
+ else:
+ content = (empty.copy() + CharsNotIn(opener
+ + closer
+ + ParserElement.DEFAULT_WHITE_CHARS
+ ).setParseAction(lambda t: t[0].strip()))
+ else:
+ if ignoreExpr is not None:
+ content = (Combine(OneOrMore(~ignoreExpr
+ + ~Literal(opener)
+ + ~Literal(closer)
+ + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1))
+ ).setParseAction(lambda t: t[0].strip()))
+ else:
+ content = (Combine(OneOrMore(~Literal(opener)
+ + ~Literal(closer)
+ + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1))
+ ).setParseAction(lambda t: t[0].strip()))
+ else:
+ raise ValueError("opening and closing arguments must be strings if no content expression is given")
+ ret = Forward()
+ if ignoreExpr is not None:
+ ret <<= Group(Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer))
+ else:
+ ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
+ ret.setName('nested %s%s expression' % (opener, closer))
+ return ret
+
+def indentedBlock(blockStatementExpr, indentStack, indent=True):
+ """Helper method for defining space-delimited indentation blocks,
+ such as those used to define block statements in Python source code.
+
+ Parameters:
+
+ - blockStatementExpr - expression defining syntax of statement that
+ is repeated within the indented block
+ - indentStack - list created by caller to manage indentation stack
+ (multiple statementWithIndentedBlock expressions within a single
+ grammar should share a common indentStack)
+ - indent - boolean indicating whether block must be indented beyond
+ the current level; set to False for block of left-most
+ statements (default= ``True``)
+
+ A valid block must contain at least one ``blockStatement``.
+
+ Example::
+
+ data = '''
+ def A(z):
+ A1
+ B = 100
+ G = A2
+ A2
+ A3
+ B
+ def BB(a,b,c):
+ BB1
+ def BBA():
+ bba1
+ bba2
+ bba3
+ C
+ D
+ def spam(x,y):
+ def eggs(z):
+ pass
+ '''
+
+
+ indentStack = [1]
+ stmt = Forward()
+
+ identifier = Word(alphas, alphanums)
+ funcDecl = ("def" + identifier + Group("(" + Optional(delimitedList(identifier)) + ")") + ":")
+ func_body = indentedBlock(stmt, indentStack)
+ funcDef = Group(funcDecl + func_body)
+
+ rvalue = Forward()
+ funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
+ rvalue << (funcCall | identifier | Word(nums))
+ assignment = Group(identifier + "=" + rvalue)
+ stmt << (funcDef | assignment | identifier)
+
+ module_body = OneOrMore(stmt)
+
+ parseTree = module_body.parseString(data)
+ parseTree.pprint()
+
+ prints::
+
+ [['def',
+ 'A',
+ ['(', 'z', ')'],
+ ':',
+ [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
+ 'B',
+ ['def',
+ 'BB',
+ ['(', 'a', 'b', 'c', ')'],
+ ':',
+ [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
+ 'C',
+ 'D',
+ ['def',
+ 'spam',
+ ['(', 'x', 'y', ')'],
+ ':',
+ [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
+ """
+ backup_stack = indentStack[:]
+
+ def reset_stack():
+ indentStack[:] = backup_stack
+
+ def checkPeerIndent(s, l, t):
+ if l >= len(s): return
+ curCol = col(l, s)
+ if curCol != indentStack[-1]:
+ if curCol > indentStack[-1]:
+ raise ParseException(s, l, "illegal nesting")
+ raise ParseException(s, l, "not a peer entry")
+
+ def checkSubIndent(s, l, t):
+ curCol = col(l, s)
+ if curCol > indentStack[-1]:
+ indentStack.append(curCol)
+ else:
+ raise ParseException(s, l, "not a subentry")
+
+ def checkUnindent(s, l, t):
+ if l >= len(s): return
+ curCol = col(l, s)
+ if not(indentStack and curCol in indentStack):
+ raise ParseException(s, l, "not an unindent")
+ if curCol < indentStack[-1]:
+ indentStack.pop()
+
+ NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress(), stopOn=StringEnd())
+ INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
+ PEER = Empty().setParseAction(checkPeerIndent).setName('')
+ UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
+ if indent:
+ smExpr = Group(Optional(NL)
+ + INDENT
+ + OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL), stopOn=StringEnd())
+ + UNDENT)
+ else:
+ smExpr = Group(Optional(NL)
+ + OneOrMore(PEER + Group(blockStatementExpr) + Optional(NL), stopOn=StringEnd())
+ + UNDENT)
+ smExpr.setFailAction(lambda a, b, c, d: reset_stack())
+ blockStatementExpr.ignore(_bslash + LineEnd())
+ return smExpr.setName('indented block')
+
+alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
+punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
+
+anyOpenTag, anyCloseTag = makeHTMLTags(Word(alphas, alphanums + "_:").setName('any tag'))
+_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(), '><& "\''))
+commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
+def replaceHTMLEntity(t):
+ """Helper parser action to replace common HTML entities with their special characters"""
+ return _htmlEntityMap.get(t.entity)
+
+# it's easy to get these comment structures wrong - they're very common, so may as well make them available
+cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
+"Comment of the form ``/* ... */``"
+
+htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
+"Comment of the form ``<!-- ... -->``"
+
+restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
+dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
+"Comment of the form ``// ... (to end of line)``"
+
+cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/' | dblSlashComment).setName("C++ style comment")
+"Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`"
+
+javaStyleComment = cppStyleComment
+"Same as :class:`cppStyleComment`"
+
+pythonStyleComment = Regex(r"#.*").setName("Python style comment")
+"Comment of the form ``# ... (to end of line)``"
+
+_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',')
+ + Optional(Word(" \t")
+ + ~Literal(",") + ~LineEnd()))).streamline().setName("commaItem")
+commaSeparatedList = delimitedList(Optional(quotedString.copy() | _commasepitem, default="")).setName("commaSeparatedList")
+"""(Deprecated) Predefined expression of 1 or more printable words or
+quoted strings, separated by commas.
+
+This expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`.
+"""
+
+# some other useful expressions - using lower-case class name since we are really using this as a namespace
+class pyparsing_common:
+ """Here are some common low-level expressions that may be useful in
+ jump-starting parser development:
+
+ - numeric forms (:class:`integers<integer>`, :class:`reals<real>`,
+ :class:`scientific notation<sci_real>`)
+ - common :class:`programming identifiers<identifier>`
+ - network addresses (:class:`MAC<mac_address>`,
+ :class:`IPv4<ipv4_address>`, :class:`IPv6<ipv6_address>`)
+ - ISO8601 :class:`dates<iso8601_date>` and
+ :class:`datetime<iso8601_datetime>`
+ - :class:`UUID<uuid>`
+ - :class:`comma-separated list<comma_separated_list>`
+
+ Parse actions:
+
+ - :class:`convertToInteger`
+ - :class:`convertToFloat`
+ - :class:`convertToDate`
+ - :class:`convertToDatetime`
+ - :class:`stripHTMLTags`
+ - :class:`upcaseTokens`
+ - :class:`downcaseTokens`
+
+ Example::
+
+ pyparsing_common.number.runTests('''
+ # any int or real number, returned as the appropriate type
+ 100
+ -100
+ +100
+ 3.14159
+ 6.02e23
+ 1e-12
+ ''')
+
+ pyparsing_common.fnumber.runTests('''
+ # any int or real number, returned as float
+ 100
+ -100
+ +100
+ 3.14159
+ 6.02e23
+ 1e-12
+ ''')
+
+ pyparsing_common.hex_integer.runTests('''
+ # hex numbers
+ 100
+ FF
+ ''')
+
+ pyparsing_common.fraction.runTests('''
+ # fractions
+ 1/2
+ -3/4
+ ''')
+
+ pyparsing_common.mixed_integer.runTests('''
+ # mixed fractions
+ 1
+ 1/2
+ -3/4
+ 1-3/4
+ ''')
+
+ import uuid
+ pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
+ pyparsing_common.uuid.runTests('''
+ # uuid
+ 12345678-1234-5678-1234-567812345678
+ ''')
+
+ prints::
+
+ # any int or real number, returned as the appropriate type
+ 100
+ [100]
+
+ -100
+ [-100]
+
+ +100
+ [100]
+
+ 3.14159
+ [3.14159]
+
+ 6.02e23
+ [6.02e+23]
+
+ 1e-12
+ [1e-12]
+
+ # any int or real number, returned as float
+ 100
+ [100.0]
+
+ -100
+ [-100.0]
+
+ +100
+ [100.0]
+
+ 3.14159
+ [3.14159]
+
+ 6.02e23
+ [6.02e+23]
+
+ 1e-12
+ [1e-12]
+
+ # hex numbers
+ 100
+ [256]
+
+ FF
+ [255]
+
+ # fractions
+ 1/2
+ [0.5]
+
+ -3/4
+ [-0.75]
+
+ # mixed fractions
+ 1
+ [1]
+
+ 1/2
+ [0.5]
+
+ -3/4
+ [-0.75]
+
+ 1-3/4
+ [1.75]
+
+ # uuid
+ 12345678-1234-5678-1234-567812345678
+ [UUID('12345678-1234-5678-1234-567812345678')]
+ """
+
+ convertToInteger = tokenMap(int)
+ """
+ Parse action for converting parsed integers to Python int
+ """
+
+ convertToFloat = tokenMap(float)
+ """
+ Parse action for converting parsed numbers to Python float
+ """
+
+ integer = Word(nums).setName("integer").setParseAction(convertToInteger)
+ """expression that parses an unsigned integer, returns an int"""
+
+ hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int, 16))
+ """expression that parses a hexadecimal integer, returns an int"""
+
+ signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
+ """expression that parses an integer with optional leading sign, returns an int"""
+
+ fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
+ """fractional expression of an integer divided by an integer, returns a float"""
+ fraction.addParseAction(lambda t: t[0]/t[-1])
+
+ mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
+ """mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
+ mixed_integer.addParseAction(sum)
+
+ real = Regex(r'[+-]?(?:\d+\.\d*|\.\d+)').setName("real number").setParseAction(convertToFloat)
+ """expression that parses a floating point number and returns a float"""
+
+ sci_real = Regex(r'[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
+ """expression that parses a floating point number with optional
+ scientific notation and returns a float"""
+
+ # streamlining this expression makes the docs nicer-looking
+ number = (sci_real | real | signed_integer).streamline()
+ """any numeric expression, returns the corresponding Python type"""
+
+ fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
+ """any int or real number, returned as float"""
+
+ identifier = Word(alphas + '_', alphanums + '_').setName("identifier")
+ """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
+
+ ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
+ "IPv4 address (``0.0.0.0 - 255.255.255.255``)"
+
+ _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
+ _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part) * 7).setName("full IPv6 address")
+ _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6))
+ + "::"
+ + Optional(_ipv6_part + (':' + _ipv6_part) * (0, 6))
+ ).setName("short IPv6 address")
+ _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
+ _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
+ ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
+ "IPv6 address (long, short, or mixed form)"
+
+ mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
+ "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
+
+ @staticmethod
+ def convertToDate(fmt="%Y-%m-%d"):
+ """
+ Helper to create a parse action for converting parsed date string to Python datetime.date
+
+ Params -
+ - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``)
+
+ Example::
+
+ date_expr = pyparsing_common.iso8601_date.copy()
+ date_expr.setParseAction(pyparsing_common.convertToDate())
+ print(date_expr.parseString("1999-12-31"))
+
+ prints::
+
+ [datetime.date(1999, 12, 31)]
+ """
+ def cvt_fn(s, l, t):
+ try:
+ return datetime.strptime(t[0], fmt).date()
+ except ValueError as ve:
+ raise ParseException(s, l, str(ve))
+ return cvt_fn
+
+ @staticmethod
+ def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
+ """Helper to create a parse action for converting parsed
+ datetime string to Python datetime.datetime
+
+ Params -
+ - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``)
+
+ Example::
+
+ dt_expr = pyparsing_common.iso8601_datetime.copy()
+ dt_expr.setParseAction(pyparsing_common.convertToDatetime())
+ print(dt_expr.parseString("1999-12-31T23:59:59.999"))
+
+ prints::
+
+ [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
+ """
+ def cvt_fn(s, l, t):
+ try:
+ return datetime.strptime(t[0], fmt)
+ except ValueError as ve:
+ raise ParseException(s, l, str(ve))
+ return cvt_fn
+
+ iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
+ "ISO8601 date (``yyyy-mm-dd``)"
+
+ iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
+ "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``"
+
+ uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
+ "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)"
+
+ _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
+ @staticmethod
+ def stripHTMLTags(s, l, tokens):
+ """Parse action to remove HTML tags from web page HTML source
+
+ Example::
+
+ # strip HTML links from normal text
+ text = '<td>More info at the <a href="https://github.com/pyparsing/pyparsing/wiki">pyparsing</a> wiki page</td>'
+ td, td_end = makeHTMLTags("TD")
+ table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
+ print(table_text.parseString(text).body)
+
+ Prints::
+
+ More info at the pyparsing wiki page
+ """
+ return pyparsing_common._html_stripper.transformString(tokens[0])
+
+ _commasepitem = Combine(OneOrMore(~Literal(",")
+ + ~LineEnd()
+ + Word(printables, excludeChars=',')
+ + Optional(White(" \t")))).streamline().setName("commaItem")
+ comma_separated_list = delimitedList(Optional(quotedString.copy()
+ | _commasepitem, default='')
+ ).setName("comma separated list")
+ """Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
+
+ upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
+ """Parse action to convert tokens to upper case."""
+
+ downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
+ """Parse action to convert tokens to lower case."""
+
+
+class _lazyclassproperty(object):
+ def __init__(self, fn):
+ self.fn = fn
+ self.__doc__ = fn.__doc__
+ self.__name__ = fn.__name__
+
+ def __get__(self, obj, cls):
+ if cls is None:
+ cls = type(obj)
+ if not hasattr(cls, '_intern') or any(cls._intern is getattr(superclass, '_intern', [])
+ for superclass in cls.__mro__[1:]):
+ cls._intern = {}
+ attrname = self.fn.__name__
+ if attrname not in cls._intern:
+ cls._intern[attrname] = self.fn(cls)
+ return cls._intern[attrname]
+
+
+class unicode_set(object):
+ """
+ A set of Unicode characters, for language-specific strings for
+ ``alphas``, ``nums``, ``alphanums``, and ``printables``.
+ A unicode_set is defined by a list of ranges in the Unicode character
+ set, in a class attribute ``_ranges``, such as::
+
+ _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),]
+
+ A unicode set can also be defined using multiple inheritance of other unicode sets::
+
+ class CJK(Chinese, Japanese, Korean):
+ pass
+ """
+ _ranges = []
+
+ @classmethod
+ def _get_chars_for_ranges(cls):
+ ret = []
+ for cc in cls.__mro__:
+ if cc is unicode_set:
+ break
+ for rr in cc._ranges:
+ ret.extend(range(rr[0], rr[-1] + 1))
+ return [unichr(c) for c in sorted(set(ret))]
+
+ @_lazyclassproperty
+ def printables(cls):
+ "all non-whitespace characters in this range"
+ return u''.join(filterfalse(unicode.isspace, cls._get_chars_for_ranges()))
+
+ @_lazyclassproperty
+ def alphas(cls):
+ "all alphabetic characters in this range"
+ return u''.join(filter(unicode.isalpha, cls._get_chars_for_ranges()))
+
+ @_lazyclassproperty
+ def nums(cls):
+ "all numeric digit characters in this range"
+ return u''.join(filter(unicode.isdigit, cls._get_chars_for_ranges()))
+
+ @_lazyclassproperty
+ def alphanums(cls):
+ "all alphanumeric characters in this range"
+ return cls.alphas + cls.nums
+
+
+class pyparsing_unicode(unicode_set):
+ """
+ A namespace class for defining common language unicode_sets.
+ """
+ _ranges = [(32, sys.maxunicode)]
+
+ class Latin1(unicode_set):
+ "Unicode set for Latin-1 Unicode Character Range"
+ _ranges = [(0x0020, 0x007e), (0x00a0, 0x00ff),]
+
+ class LatinA(unicode_set):
+ "Unicode set for Latin-A Unicode Character Range"
+ _ranges = [(0x0100, 0x017f),]
+
+ class LatinB(unicode_set):
+ "Unicode set for Latin-B Unicode Character Range"
+ _ranges = [(0x0180, 0x024f),]
+
+ class Greek(unicode_set):
+ "Unicode set for Greek Unicode Character Ranges"
+ _ranges = [
+ (0x0370, 0x03ff), (0x1f00, 0x1f15), (0x1f18, 0x1f1d), (0x1f20, 0x1f45), (0x1f48, 0x1f4d),
+ (0x1f50, 0x1f57), (0x1f59,), (0x1f5b,), (0x1f5d,), (0x1f5f, 0x1f7d), (0x1f80, 0x1fb4), (0x1fb6, 0x1fc4),
+ (0x1fc6, 0x1fd3), (0x1fd6, 0x1fdb), (0x1fdd, 0x1fef), (0x1ff2, 0x1ff4), (0x1ff6, 0x1ffe),
+ ]
+
+ class Cyrillic(unicode_set):
+ "Unicode set for Cyrillic Unicode Character Range"
+ _ranges = [(0x0400, 0x04ff)]
+
+ class Chinese(unicode_set):
+ "Unicode set for Chinese Unicode Character Range"
+ _ranges = [(0x4e00, 0x9fff), (0x3000, 0x303f),]
+
+ class Japanese(unicode_set):
+ "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"
+ _ranges = []
+
+ class Kanji(unicode_set):
+ "Unicode set for Kanji Unicode Character Range"
+ _ranges = [(0x4E00, 0x9Fbf), (0x3000, 0x303f),]
+
+ class Hiragana(unicode_set):
+ "Unicode set for Hiragana Unicode Character Range"
+ _ranges = [(0x3040, 0x309f),]
+
+ class Katakana(unicode_set):
+ "Unicode set for Katakana Unicode Character Range"
+ _ranges = [(0x30a0, 0x30ff),]
+
+ class Korean(unicode_set):
+ "Unicode set for Korean Unicode Character Range"
+ _ranges = [(0xac00, 0xd7af), (0x1100, 0x11ff), (0x3130, 0x318f), (0xa960, 0xa97f), (0xd7b0, 0xd7ff), (0x3000, 0x303f),]
+
+ class CJK(Chinese, Japanese, Korean):
+ "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"
+ pass
+
+ class Thai(unicode_set):
+ "Unicode set for Thai Unicode Character Range"
+ _ranges = [(0x0e01, 0x0e3a), (0x0e3f, 0x0e5b),]
+
+ class Arabic(unicode_set):
+ "Unicode set for Arabic Unicode Character Range"
+ _ranges = [(0x0600, 0x061b), (0x061e, 0x06ff), (0x0700, 0x077f),]
+
+ class Hebrew(unicode_set):
+ "Unicode set for Hebrew Unicode Character Range"
+ _ranges = [(0x0590, 0x05ff),]
+
+ class Devanagari(unicode_set):
+ "Unicode set for Devanagari Unicode Character Range"
+ _ranges = [(0x0900, 0x097f), (0xa8e0, 0xa8ff)]
+
+pyparsing_unicode.Japanese._ranges = (pyparsing_unicode.Japanese.Kanji._ranges
+ + pyparsing_unicode.Japanese.Hiragana._ranges
+ + pyparsing_unicode.Japanese.Katakana._ranges)
+
+# define ranges in language character sets
+if PY_3:
+ setattr(pyparsing_unicode, u"العربية", pyparsing_unicode.Arabic)
+ setattr(pyparsing_unicode, u"中文", pyparsing_unicode.Chinese)
+ setattr(pyparsing_unicode, u"кириллица", pyparsing_unicode.Cyrillic)
+ setattr(pyparsing_unicode, u"Ελληνικά", pyparsing_unicode.Greek)
+ setattr(pyparsing_unicode, u"עִברִית", pyparsing_unicode.Hebrew)
+ setattr(pyparsing_unicode, u"日本語", pyparsing_unicode.Japanese)
+ setattr(pyparsing_unicode.Japanese, u"漢字", pyparsing_unicode.Japanese.Kanji)
+ setattr(pyparsing_unicode.Japanese, u"カタカナ", pyparsing_unicode.Japanese.Katakana)
+ setattr(pyparsing_unicode.Japanese, u"ひらがな", pyparsing_unicode.Japanese.Hiragana)
+ setattr(pyparsing_unicode, u"한국어", pyparsing_unicode.Korean)
+ setattr(pyparsing_unicode, u"ไทย", pyparsing_unicode.Thai)
+ setattr(pyparsing_unicode, u"देवनागरी", pyparsing_unicode.Devanagari)
+
+
+class pyparsing_test:
+ """
+ namespace class for classes useful in writing unit tests
+ """
+
+ class reset_pyparsing_context:
+ """
+ Context manager to be used when writing unit tests that modify pyparsing config values:
+ - packrat parsing
+ - default whitespace characters.
+ - default keyword characters
+ - literal string auto-conversion class
+ - __diag__ settings
+
+ Example:
+ with reset_pyparsing_context():
+ # test that literals used to construct a grammar are automatically suppressed
+ ParserElement.inlineLiteralsUsing(Suppress)
+
+ term = Word(alphas) | Word(nums)
+ group = Group('(' + term[...] + ')')
+
+ # assert that the '()' characters are not included in the parsed tokens
+ self.assertParseAndCheckLisst(group, "(abc 123 def)", ['abc', '123', 'def'])
+
+ # after exiting context manager, literals are converted to Literal expressions again
+ """
+
+ def __init__(self):
+ self._save_context = {}
+
+ def save(self):
+ self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS
+ self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS
+ self._save_context[
+ "literal_string_class"
+ ] = ParserElement._literalStringClass
+ self._save_context["packrat_enabled"] = ParserElement._packratEnabled
+ self._save_context["packrat_parse"] = ParserElement._parse
+ self._save_context["__diag__"] = {
+ name: getattr(__diag__, name) for name in __diag__._all_names
+ }
+ self._save_context["__compat__"] = {
+ "collect_all_And_tokens": __compat__.collect_all_And_tokens
+ }
+ return self
+
+ def restore(self):
+ # reset pyparsing global state
+ if (
+ ParserElement.DEFAULT_WHITE_CHARS
+ != self._save_context["default_whitespace"]
+ ):
+ ParserElement.setDefaultWhitespaceChars(
+ self._save_context["default_whitespace"]
+ )
+ Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"]
+ ParserElement.inlineLiteralsUsing(
+ self._save_context["literal_string_class"]
+ )
+ for name, value in self._save_context["__diag__"].items():
+ setattr(__diag__, name, value)
+ ParserElement._packratEnabled = self._save_context["packrat_enabled"]
+ ParserElement._parse = self._save_context["packrat_parse"]
+ __compat__.collect_all_And_tokens = self._save_context["__compat__"]
+
+ def __enter__(self):
+ return self.save()
+
+ def __exit__(self, *args):
+ return self.restore()
+
+ class TestParseResultsAsserts:
+ """
+ A mixin class to add parse results assertion methods to normal unittest.TestCase classes.
+ """
+ def assertParseResultsEquals(
+ self, result, expected_list=None, expected_dict=None, msg=None
+ ):
+ """
+ Unit test assertion to compare a ParseResults object with an optional expected_list,
+ and compare any defined results names with an optional expected_dict.
+ """
+ if expected_list is not None:
+ self.assertEqual(expected_list, result.asList(), msg=msg)
+ if expected_dict is not None:
+ self.assertEqual(expected_dict, result.asDict(), msg=msg)
+
+ def assertParseAndCheckList(
+ self, expr, test_string, expected_list, msg=None, verbose=True
+ ):
+ """
+ Convenience wrapper assert to test a parser element and input string, and assert that
+ the resulting ParseResults.asList() is equal to the expected_list.
+ """
+ result = expr.parseString(test_string, parseAll=True)
+ if verbose:
+ print(result.dump())
+ self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg)
+
+ def assertParseAndCheckDict(
+ self, expr, test_string, expected_dict, msg=None, verbose=True
+ ):
+ """
+ Convenience wrapper assert to test a parser element and input string, and assert that
+ the resulting ParseResults.asDict() is equal to the expected_dict.
+ """
+ result = expr.parseString(test_string, parseAll=True)
+ if verbose:
+ print(result.dump())
+ self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg)
+
+ def assertRunTestResults(
+ self, run_tests_report, expected_parse_results=None, msg=None
+ ):
+ """
+ Unit test assertion to evaluate output of ParserElement.runTests(). If a list of
+ list-dict tuples is given as the expected_parse_results argument, then these are zipped
+ with the report tuples returned by runTests and evaluated using assertParseResultsEquals.
+ Finally, asserts that the overall runTests() success value is True.
+
+ :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests
+ :param expected_parse_results (optional): [tuple(str, list, dict, Exception)]
+ """
+ run_test_success, run_test_results = run_tests_report
+
+ if expected_parse_results is not None:
+ merged = [
+ (rpt[0], rpt[1], expected)
+ for rpt, expected in zip(run_test_results, expected_parse_results)
+ ]
+ for test_string, result, expected in merged:
+ # expected should be a tuple containing a list and/or a dict or an exception,
+ # and optional failure message string
+ # an empty tuple will skip any result validation
+ fail_msg = next(
+ (exp for exp in expected if isinstance(exp, str)), None
+ )
+ expected_exception = next(
+ (
+ exp
+ for exp in expected
+ if isinstance(exp, type) and issubclass(exp, Exception)
+ ),
+ None,
+ )
+ if expected_exception is not None:
+ with self.assertRaises(
+ expected_exception=expected_exception, msg=fail_msg or msg
+ ):
+ if isinstance(result, Exception):
+ raise result
+ else:
+ expected_list = next(
+ (exp for exp in expected if isinstance(exp, list)), None
+ )
+ expected_dict = next(
+ (exp for exp in expected if isinstance(exp, dict)), None
+ )
+ if (expected_list, expected_dict) != (None, None):
+ self.assertParseResultsEquals(
+ result,
+ expected_list=expected_list,
+ expected_dict=expected_dict,
+ msg=fail_msg or msg,
+ )
+ else:
+ # warning here maybe?
+ print("no validation for {!r}".format(test_string))
+
+ # do this last, in case some specific test results can be reported instead
+ self.assertTrue(
+ run_test_success, msg=msg if msg is not None else "failed runTests"
+ )
+
+ @contextmanager
+ def assertRaisesParseException(self, exc_type=ParseException, msg=None):
+ with self.assertRaises(exc_type, msg=msg):
+ yield
+
+
+if __name__ == "__main__":
+
+ selectToken = CaselessLiteral("select")
+ fromToken = CaselessLiteral("from")
+
+ ident = Word(alphas, alphanums + "_$")
+
+ columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
+ columnNameList = Group(delimitedList(columnName)).setName("columns")
+ columnSpec = ('*' | columnNameList)
+
+ tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
+ tableNameList = Group(delimitedList(tableName)).setName("tables")
+
+ simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
+
+ # demo runTests method, including embedded comments in test string
+ simpleSQL.runTests("""
+ # '*' as column list and dotted table name
+ select * from SYS.XYZZY
+
+ # caseless match on "SELECT", and casts back to "select"
+ SELECT * from XYZZY, ABC
+
+ # list of column names, and mixed case SELECT keyword
+ Select AA,BB,CC from Sys.dual
+
+ # multiple tables
+ Select A, B, C from Sys.dual, Table2
+
+ # invalid SELECT keyword - should fail
+ Xelect A, B, C from Sys.dual
+
+ # incomplete command - should fail
+ Select
+
+ # invalid column name - should fail
+ Select ^^^ frox Sys.dual
+
+ """)
+
+ pyparsing_common.number.runTests("""
+ 100
+ -100
+ +100
+ 3.14159
+ 6.02e23
+ 1e-12
+ """)
+
+ # any int or real number, returned as float
+ pyparsing_common.fnumber.runTests("""
+ 100
+ -100
+ +100
+ 3.14159
+ 6.02e23
+ 1e-12
+ """)
+
+ pyparsing_common.hex_integer.runTests("""
+ 100
+ FF
+ """)
+
+ import uuid
+ pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
+ pyparsing_common.uuid.runTests("""
+ 12345678-1234-5678-1234-567812345678
+ """)
diff --git a/third_party/python/pyrsistent/CHANGES.txt b/third_party/python/pyrsistent/CHANGES.txt
new file mode 100644
index 0000000000..603b3f2048
--- /dev/null
+++ b/third_party/python/pyrsistent/CHANGES.txt
@@ -0,0 +1,333 @@
+Revision history
+----------------
+0.16.0, 2020-03-24
+ * No major updates but Python 2 support no longer guaranteed.
+ * Fix #192, 'ignore_extra' for 'pvector_field'. Thanks @ss18 for this!
+ * Fix #191, include LICENCE in distribution. Thanks @johnthagen for this!
+ * Fix #190, minor MyPy errors. Thanks @Qhesz for this!
+
+0.15.7, 2020-01-07
+ * NOTE! This is the last version of Pyrsistent that officially supports Python 2.X!
+ * Fix #186, type errors with more recent versions of MyPy. Thanks @qhesz for this!
+ * Build and test on ARM during CI. Thanks @ossdev07 for this!
+ * Set absolute imports for python2 compatibility. Thanks @michalvi for this!
+
+0.15.6, 2019-11-23
+ * Fix #182 moduleinit name clash.
+
+0.15.5, 2019-10-27
+ * Fix #179 Fixed 'ignore_extra' factory parameter for pvector. Thanks @ss18 for this!
+
+0.15.4, 2019-07-27
+ * Fix #174, fix a GC traversal bug in pvector evolver C extension. Thanks @till-varoquaux for finding and fixing this!
+ * Fix #175, pytest 5 compatibility, this is a quick fix, some more work is needed to get coverage working etc.
+
+0.15.3, 2019-07-07
+ * Fix #172, catch all exceptions during extension build to reduce chance of corner cases that prevents installation.
+ * Fix #171, in PVector equality comparison don's assume that other object has a length, check before calling len.
+ * Fix #168, write warning about failing build of C extension directly to stderr to avoid that pip silences it.
+ * Fix #155, update PMapEvolver type stub to better reflect implementation.
+
+0.15.2, 2019-05-12
+ * Fix #166, Propagate 'ignore_extra' param in hierarchy. Thanks @ss18 for this!
+ * Fix #167, thaw typing. Thanks @nattofriends for this!
+ * Fix #154, not possible to insert empty pmap as leaf node with transform.
+
+0.15.1, 2019-04-26
+ * Fix #163 installation broken on Python 2 because of fix of #161, thanks @vphilippon for this! Sorry for the
+ inconvenience.
+
+0.15.0, 2019-04-25
+ * Python 3.4 is no longer officially supported since it is EOL since 2019-03-18.
+ * Fix #157, major improvements to type hints. Thanks @je-l for working on this and @nattofriend for reviewing the PR!
+ * Fix #161, installation fails on some Windows platforms because fallback to Python pvector does not work.
+ Thanks @MaxTaggart for fixing and verifying this!
+
+0.14.11, 2019-02-21
+ * Fix #152 Don't use __builtin_popcount, this hopefully fixes #147 Error in pvectorc.cp37-win_amd64.pyd file, as well.
+ Thanks @benrg for this!
+ * Fix #151 Fix compatibility for hypothesis 4. Thanks @felixonmars for this!
+
+0.14.10, 2019-02-09
+ * Fix #148, only require pytest-runner if running tests. Thanks @ccorbacho for this!
+
+0.14.9, 2019-01-06
+ * Fix #144, Compile pvectormodule.c on windows. Thanks @ganwell for this!
+
+0.14.8, 2018-12-19
+ * Fix #142, Improve type stubs. Thanks @arxanas for this!
+
+0.14.7, 2018-11-20
+ * Fix #102, add PEP 561 type annotation stubs for most pyrsistent types. Thanks @nattofriends for this!
+
+0.14.6, 2018-11-17
+ * Fix #135, Type classes for Python 3 type annotations of pyrsistent types. Thanks @nattofriends for this!
+ * Fix #128, Allow PClass and PRecord to ignore input parameters to constructor that are not part of the spec
+ instead of blowing up with a type error. Thanks @agberk for this!
+
+0.14.5, 2018-10-14
+ * Fix #137, deprecation warnings in Python 3.7. Thanks @thombashi for this!
+ * Fix #129, building via setuptools and setup.py. Thanks @galuszkak for this!
+
+0.14.4, 2018-07-08
+ * Fix #133, minor Python 3.7 compatibility issue. Pyrsistent is now officially Python 3.7 compliant!
+
+v0.14.3, 2018-06-11
+ * Fix #123 regression where type names break sequence fields. Thanks @doozr for this!
+ * Fix #124 using the class name to make AttributeError on __getattr__ more informative for PRecords.
+ Thanks @neilvyas for this!
+ * Fix #125 how fields handle type arguments. Thanks @neilvyas for this!
+
+v0.14.2, 2017-12-06
+ * Fix #121, regression in PClass.set() introduced in 0.14.1.
+
+v0.14.1, 2017-11-27
+ * Equality check performance improvements for pvectors and pmaps. Thanks @dtomas for this!
+ * Avoid calling factories multiple times for fields that do not change, see PR #120 for for
+ details. Thanks @teepark for this!
+
+v0.14.0, 2017-10-08
+ * Fix #117, pmap now accepts iterators as input to constructor. Thanks @Julian for this!
+ * Drop support for Python 2.6. Nothing has been done in this release that will explicitly
+ break pyrsistent for 2.6 but it will not be considered moving forward. Dropping 2.6
+ support is the reason for stepping the second decimal instead of the third.
+
+v0.13.0, 2017-09-01
+ * Fix #113, Skip field factories when loading pickled objects. There is a
+ minor backwards incompatibilty in the behaviour because of this. Thanks
+ @teepark for fi this!
+ * Fix #116, negative indexing for pdeques. Thanks @Julian for this!
+
+v0.12.3, 2017-06-04
+ * Fix #83, make it possible to use Python 3 enums as field type without having to wrap it in
+ a list or tuple. Thanks @douglas-treadwell for this!
+
+v0.12.2, 2017-05-30
+ * Fix #108, now possible to use the values in predicates to transform. Thanks @exarkus for this!
+ * Fix #107, support multiple level of __invariant__ inheritance. Thanks @exarkus for this!
+
+v0.12.1, 2017-02-26
+ * Fix #97, initialize CheckedPVector from iterator-
+ * Fix #97, cache hash value on PMap. Thanks @sarum90 for this!
+
+v0.12.0, 2017-01-06
+ * Fix #87, add function get_in() for access to elements in deeply nested structures.
+ * Fix #91, add method update() to pset and pbag.
+ * Fix #92, incorrect discard of elements in transform on pvector
+ * This is a release candidate for 1.0 as I now consider pyrsistent fairly stable.
+
+v0.11.13, 2016-04-03
+ * Fix #84, pvector segfault in CPython 3 when repr of contained object raises Exception.
+ * Update README to cover for issue described in #83.
+
+v0.11.12, 2016-02-06
+ * Minor modifications of tests to allow testing as requested in #79 and #80.
+ * Also run CI tests under python 3.5
+
+v0.11.11, 2016-01-31
+ * #78, include tests in pypi dist.
+
+v0.11.10, 2015-12-27, NOTE! This release contains a backwards incompatible change
+ despite only stepping the patch version number. See below.
+ * Implement #74, attribute access on PClass evolver
+ * Implement #75, lazily evaluated invariant messages by providing a
+ callable with no arguments.
+ * Initial values on fields can now be evaluated on object creation
+ by providing a callable with no arguments.
+
+ NOTE! If you previously had callables as initial values this change means that those
+ will be called upon object creation which may not be what you want. As
+ a temporary workaround a callable returning a callable can be used. This
+ feature and the concept of initial values will likely change slightly in the future.
+ See #77 and and #76 for more information.
+
+v0.11.9, 2015-11-01
+ * Added PVector.remove(), thanks @radix for initiating this!
+
+v0.11.8, 2015-10-18
+ * Fix #66, UnicodeDecodeError when doing pip install in environments with ascii encoding as default.
+ Thanks @foolswood!
+ * Implement support for multiple types in pmap_field(), pvector_field() and pset_field(). Thanks @itamarst!
+
+v0.11.7, 2015-10-03
+ * Fix #52, occasional SEGFAULTs due to misplaced call to PyObject_GC_Track. Thanks @jkbjh for this!
+ * Fix #42, complete support for delete. Now also on the C-implementation of the PVectorEvolver.
+ Thanks @itamarst for contributing a whole bunch of Hypothesis test cases covering the evolver operations!
+
+v0.11.6, 2015-09-30
+ * Add +, -, & and | operations to PBag. Thanks @Futrell for this!
+
+v0.11.5, 2015-09-29
+ * Fix bug introduced in 0.11.4 that prevented multi level inheritance from PClass.
+ * Make PClassMeta public for friendlier subclassing
+
+v0.11.4, 2015-09-28
+ * Fix #59, make it possible to create weakrefs to all collection types.
+ Thanks @itamarst for reporting it.
+ * Fix #58, add __str__ to InvariantException. Thanks @tomprince for reporting it.
+
+v0.11.3, 2015-09-15
+ * Fix #57, support pickling of PClasses and PRecords using pmap_field, pvector_field, and pset_field.
+ Thanks @radix for reporting this and submitting a fix for it!
+
+v0.11.2, 2015-09-09
+ * Fix bug causing potential element loss when reallocating PMap. Thanks to @jml for finding
+ this and submitting a PR with a fix!
+ * Removed python 3.2 test build from Travis. There is nothing breaking 3.2 compatibility in this
+ release but there will be no effort moving forward to keep the 3.2 compatibility.
+
+v0.11.1, 2015-08-24
+ * Fix #51, PClass.set() broken when used with string+value argument.
+ * #50, make it possible to specify more than one assertion in an invariant
+ * #48, make it possible to make recursive type references by using a string
+ as type specification.
+
+v0.11.0, 2015-07-11
+ * #42, delete() function added to PVector to allow deletion of elements by index
+ and range. Will perform a full copy of the vector, no structural sharing.
+ Thanks @radix for helping out with this one!
+ * Fix #39, explicitly disallow ordering for PMap and PBag, Python 3 style
+ * Fix #37, PMap.values()/keys()/items() now returns PVectors instead of lists
+
+v0.10.3, 2015-06-13
+ * Fix #40, make it possible to disable the C extension by setting the
+ PYRSISTENT_NO_C_EXTENSION environment variable.
+
+v0.10.2, 2015-06-07
+ * Fix #38, construction from serialized object for pvector/pset/pmap fields.
+
+v0.10.1, 2015-04-27
+ * Fix broken README.rst
+
+v10.0.0, 2015-04-27
+ * New type PClass, a persistent version of a Python object. Related to issues #30 and #32.
+ Thanks @exarkun and @radix for input on this one!
+ * Rename PRecordTypeError -> PTypeError, it is now also raised by PClass
+ * New convenience functions, pvector_field, pmap_field and pset_field to create PRecord/PClass
+ fields for checked collections. Issues #26 and #36. Thanks to @itamarst for this!
+ * Removed deprecated function set_in() on PMap and PVector.
+ * Removed deprecated factory function pclass.
+ * Major internal restructuring breaking pyrsistent.py into multiple files. This should
+ not affect those only using the public interface but if you experience problems please
+ let me know.
+
+v0.9.4, 2015-04-20
+ * Fix #34, PVector now compares against built in list type
+
+v0.9.3, 2015-04-06
+ * Rename pclass back to immutable and deprecate the usage of the pclass function. PClass will be used by
+ a new, different type in upcoming releases.
+ * Documentation strings for the exceptions introduced in 0.9.2.
+
+v0.9.2, 2015-04-03
+ * More informative type errors from checked types, issue #30
+ * Support multiple optional types, issue #28
+
+v0.9.1, 2015-02-25
+ * Multi level serialization for checked types
+
+v0.9.0, 2015-02-25, Lots of new stuff in this release!
+ * Checked types, checked versions of PVector, PMap, PSet that support type and invariant specification.
+ Currently lacking proper documentation but I'm working on it.
+ * set_in() on PVector and PMap are now deprecated and will be removed in the next release.
+ Use transform() instead. set_in() has been updated to use transform() for this release
+ this means that some corner error cases behave slightly different than before.
+ * Refactoring of the PVector to unify the type. Should not have any user impact as long as
+ only the public interface of pyrsistent has been used. PVector is now an abstract base class
+ with which the different implementations are registered.
+ * Evolvers have been updated to return themselves for evolving operations to allow function chaining.
+ * Richer exception messages for KeyErrors and IndexErrors specifying the key/index that caused the failure.
+ Thanks @radix for this.
+ * Missing attribute on PMaps when accessing with dot-notation now raises an AttributeError instead of a
+ KeyError. Issue #21.
+ * New function decorator @mutant that freezes all input arguments to a function and the return value.
+ * Add __version__ to pyrsistent.py. Issue #23.
+ * Fix pickling for pset. Issue #24.
+
+v0.8.0, 2015-01-21
+ * New type PRecord. Subtype of PMap that allows explicit, declarative field specification. Thanks @boxed
+ for inspiration!
+ * Efficient transformations of arbitrary complexity on PMap and PVector. Thanks @boxed for inspiration!
+ * Breaking change to the evolver interface. What used to be .pvector(), .pmap() and .pset()
+ on the different evolvers has now been unified so that all evolvers have one method .persistent()
+ to produce the persistent counterpart. Sorry for any inconvenience.
+ * Removed the tests directory from the package.
+ * PMap and PSet now contains a copy-function to closer mimic the interface of the dict and set. These
+ functions will simply return a reference to self.
+ * Removed deprecated alias 'immutable' from pclass.
+
+v0.7.1, 2015-01-17
+ * Fixes #14 where a file executed (unexpectedly) during installation was not python 3 compatible.
+
+v0.7.0, 2015-01-04, No 1.0, instead a bunch of new stuff and one API breaking change to PMap.remove().
+ * Evolvers for pvector, pmap and pset to allow simple and efficient updates of multiple elements
+ in the collection. See the documentation for a closer description.
+ * New method mset on pvector to update multiple values in one operation
+ * Remove deprecated methods merge and merge_with on PMap
+ * Change behavior of PMap.remove, it will now raise a KeyError if the element is not present.
+ New method PMap.discard will instead return the original pmap if the element is not present.
+ This aligns the PMap with how things are done in the PSet and is closer to the behavior of the
+ built in counterparts.
+
+v0.6.3, 2014-11-27
+ * Python 2.6 support, thanks @wrmsr!
+ * PMap.merge/merge_with renamed to update/update_with. merge/merge_with remains but will be
+ removed for 1.0.
+ * This is a release candidate for 1.0! Please be aware that PMap.merge/merge_with and immutable()
+ will be removed for 1.0.
+
+v0.6.2, 2014-11-03
+ * Fix typo causing the pure python vector to be used even if the C implementation was
+ available. Thanks @zerc for finding it!
+
+v0.6.1, 2014-10-31
+ * Renamed 'immutable' to 'pclass' for consistency but left immutable for compatibility.
+
+v0.6.0, 2014-10-25
+ * New data structure, persistent linked list
+ * New data structure, persistent double ended queue
+
+v0.5.0, 2014-09-24
+ * New data structure, persistent bag / multiset
+ * New functions freeze and thaw to recursively convert between python
+ built in data types and corresponding pyrsistent data types.
+ * All data structures can now be pickled
+ * New function merge_in on persistent map which allows a user
+ supplied function to implement the merge strategy.
+
+v0.4.0, 2014-09-20
+ * Full Python 3 support.
+ * Immutable object implemented.
+ * Bug fixes in PVector.__repr__() and PMap.__hash__() and index check of PVector.
+ * Repr changed to be fully cut and paste compatible
+ * Changed assoc() -> set(), assoc_in() -> set_in(), massoc() -> mset().
+ Sorry for the API breaking change but I think those names are more pythonic.
+ * Improved documentation.
+
+v0.3.1, 2014-06-29
+ * assoc() on PSet renamed back to add()
+
+v0.3.0, 2014-06-28
+ * Full Sequence protocol support for PVector
+ * Full Mapping protocol support for PMap
+ * Full Set protocol support for PSet
+ * assoc_in() support for both PMap and PVector
+ * merge() support for PMap
+ * Performance improvements to the PVector C extension speed up allocation
+
+v0.2.1, 2014-06-21
+ * Supply the tests with the distribution
+
+v0.2.0, 2014-06-21
+ * New C extension with an optimized version of the persistent vector
+ * Updated API slightly
+
+v0.1.0, 2013-11-10
+ * Initial release.
+
+
+TODO (in no particular order)
+-----------------------------
+- Versioned data structure where the different versions can be accessed by index?
+- Ordered sets and maps
+- A good performance measurement suite
diff --git a/third_party/python/pyrsistent/LICENCE.mit b/third_party/python/pyrsistent/LICENCE.mit
new file mode 100644
index 0000000000..6609e4c05a
--- /dev/null
+++ b/third_party/python/pyrsistent/LICENCE.mit
@@ -0,0 +1,22 @@
+Copyright (c) 2019 Tobias Gustafsson
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without
+restriction, including without limitation the rights to use,
+copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
+HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file
diff --git a/third_party/python/pyrsistent/MANIFEST.in b/third_party/python/pyrsistent/MANIFEST.in
new file mode 100644
index 0000000000..155c6351bb
--- /dev/null
+++ b/third_party/python/pyrsistent/MANIFEST.in
@@ -0,0 +1,5 @@
+include *.rst
+include tests/*_test.py
+include tests/memory_profiling.py
+include CHANGES.txt
+include LICENCE.mit \ No newline at end of file
diff --git a/third_party/python/pyrsistent/PKG-INFO b/third_party/python/pyrsistent/PKG-INFO
new file mode 100644
index 0000000000..1d1c159034
--- /dev/null
+++ b/third_party/python/pyrsistent/PKG-INFO
@@ -0,0 +1,742 @@
+Metadata-Version: 1.1
+Name: pyrsistent
+Version: 0.16.0
+Summary: Persistent/Functional/Immutable data structures
+Home-page: http://github.com/tobgu/pyrsistent/
+Author: Tobias Gustafsson
+Author-email: tobias.l.gustafsson@gmail.com
+License: MIT
+Description: Pyrsistent
+ ==========
+ .. image:: https://travis-ci.org/tobgu/pyrsistent.png?branch=master
+ :target: https://travis-ci.org/tobgu/pyrsistent
+
+ .. image:: https://badge.fury.io/py/pyrsistent.svg
+ :target: https://badge.fury.io/py/pyrsistent
+
+ .. image:: https://coveralls.io/repos/tobgu/pyrsistent/badge.svg?branch=master&service=github
+ :target: https://coveralls.io/github/tobgu/pyrsistent?branch=master
+
+
+ .. _Pyrthon: https://www.github.com/tobgu/pyrthon/
+
+ Pyrsistent is a number of persistent collections (by some referred to as functional data structures). Persistent in
+ the sense that they are immutable.
+
+ All methods on a data structure that would normally mutate it instead return a new copy of the structure containing the
+ requested updates. The original structure is left untouched.
+
+ This will simplify the reasoning about what a program does since no hidden side effects ever can take place to these
+ data structures. You can rest assured that the object you hold a reference to will remain the same throughout its
+ lifetime and need not worry that somewhere five stack levels below you in the darkest corner of your application
+ someone has decided to remove that element that you expected to be there.
+
+ Pyrsistent is influenced by persistent data structures such as those found in the standard library of Clojure. The
+ data structures are designed to share common elements through path copying.
+ It aims at taking these concepts and make them as pythonic as possible so that they can be easily integrated into any python
+ program without hassle.
+
+ If you want to go all in on persistent data structures and use literal syntax to define them in your code rather
+ than function calls check out Pyrthon_.
+
+ Examples
+ --------
+ .. _Sequence: collections_
+ .. _Hashable: collections_
+ .. _Mapping: collections_
+ .. _Mappings: collections_
+ .. _Set: collections_
+ .. _collections: https://docs.python.org/3/library/collections.abc.html
+ .. _documentation: http://pyrsistent.readthedocs.org/
+
+ The collection types and key features currently implemented are:
+
+ * PVector_, similar to a python list
+ * PMap_, similar to dict
+ * PSet_, similar to set
+ * PRecord_, a PMap on steroids with fixed fields, optional type and invariant checking and much more
+ * PClass_, a Python class fixed fields, optional type and invariant checking and much more
+ * `Checked collections`_, PVector, PMap and PSet with optional type and invariance checks and more
+ * PBag, similar to collections.Counter
+ * PList, a classic singly linked list
+ * PDeque, similar to collections.deque
+ * Immutable object type (immutable) built on the named tuple
+ * freeze_ and thaw_ functions to convert between pythons standard collections and pyrsistent collections.
+ * Flexible transformations_ of arbitrarily complex structures built from PMaps and PVectors.
+
+ Below are examples of common usage patterns for some of the structures and features. More information and
+ full documentation for all data structures is available in the documentation_.
+
+ .. _PVector:
+
+ PVector
+ ~~~~~~~
+ With full support for the Sequence_ protocol PVector is meant as a drop in replacement to the built in list from a readers
+ point of view. Write operations of course differ since no in place mutation is done but naming should be in line
+ with corresponding operations on the built in list.
+
+ Support for the Hashable_ protocol also means that it can be used as key in Mappings_.
+
+ Appends are amortized O(1). Random access and insert is log32(n) where n is the size of the vector.
+
+ .. code:: python
+
+ >>> from pyrsistent import v, pvector
+
+ # No mutation of vectors once created, instead they
+ # are "evolved" leaving the original untouched
+ >>> v1 = v(1, 2, 3)
+ >>> v2 = v1.append(4)
+ >>> v3 = v2.set(1, 5)
+ >>> v1
+ pvector([1, 2, 3])
+ >>> v2
+ pvector([1, 2, 3, 4])
+ >>> v3
+ pvector([1, 5, 3, 4])
+
+ # Random access and slicing
+ >>> v3[1]
+ 5
+ >>> v3[1:3]
+ pvector([5, 3])
+
+ # Iteration
+ >>> list(x + 1 for x in v3)
+ [2, 6, 4, 5]
+ >>> pvector(2 * x for x in range(3))
+ pvector([0, 2, 4])
+
+ .. _PMap:
+
+ PMap
+ ~~~~
+ With full support for the Mapping_ protocol PMap is meant as a drop in replacement to the built in dict from a readers point
+ of view. Support for the Hashable_ protocol also means that it can be used as key in other Mappings_.
+
+ Random access and insert is log32(n) where n is the size of the map.
+
+ .. code:: python
+
+ >>> from pyrsistent import m, pmap, v
+
+ # No mutation of maps once created, instead they are
+ # "evolved" leaving the original untouched
+ >>> m1 = m(a=1, b=2)
+ >>> m2 = m1.set('c', 3)
+ >>> m3 = m2.set('a', 5)
+ >>> m1
+ pmap({'a': 1, 'b': 2})
+ >>> m2
+ pmap({'a': 1, 'c': 3, 'b': 2})
+ >>> m3
+ pmap({'a': 5, 'c': 3, 'b': 2})
+ >>> m3['a']
+ 5
+
+ # Evolution of nested persistent structures
+ >>> m4 = m(a=5, b=6, c=v(1, 2))
+ >>> m4.transform(('c', 1), 17)
+ pmap({'a': 5, 'c': pvector([1, 17]), 'b': 6})
+ >>> m5 = m(a=1, b=2)
+
+ # Evolve by merging with other mappings
+ >>> m5.update(m(a=2, c=3), {'a': 17, 'd': 35})
+ pmap({'a': 17, 'c': 3, 'b': 2, 'd': 35})
+ >>> pmap({'x': 1, 'y': 2}) + pmap({'y': 3, 'z': 4})
+ pmap({'y': 3, 'x': 1, 'z': 4})
+
+ # Dict-like methods to convert to list and iterate
+ >>> m3.items()
+ pvector([('a', 5), ('c', 3), ('b', 2)])
+ >>> list(m3)
+ ['a', 'c', 'b']
+
+ .. _PSet:
+
+ PSet
+ ~~~~
+ With full support for the Set_ protocol PSet is meant as a drop in replacement to the built in set from a readers point
+ of view. Support for the Hashable_ protocol also means that it can be used as key in Mappings_.
+
+ Random access and insert is log32(n) where n is the size of the set.
+
+ .. code:: python
+
+ >>> from pyrsistent import s
+
+ # No mutation of sets once created, you know the story...
+ >>> s1 = s(1, 2, 3, 2)
+ >>> s2 = s1.add(4)
+ >>> s3 = s1.remove(1)
+ >>> s1
+ pset([1, 2, 3])
+ >>> s2
+ pset([1, 2, 3, 4])
+ >>> s3
+ pset([2, 3])
+
+ # Full support for set operations
+ >>> s1 | s(3, 4, 5)
+ pset([1, 2, 3, 4, 5])
+ >>> s1 & s(3, 4, 5)
+ pset([3])
+ >>> s1 < s2
+ True
+ >>> s1 < s(3, 4, 5)
+ False
+
+ .. _PRecord:
+
+ PRecord
+ ~~~~~~~
+ A PRecord is a PMap with a fixed set of specified fields. Records are declared as python classes inheriting
+ from PRecord. Because it is a PMap it has full support for all Mapping methods such as iteration and element
+ access using subscript notation.
+
+ .. code:: python
+
+ >>> from pyrsistent import PRecord, field
+ >>> class ARecord(PRecord):
+ ... x = field()
+ ...
+ >>> r = ARecord(x=3)
+ >>> r
+ ARecord(x=3)
+ >>> r.x
+ 3
+ >>> r.set(x=2)
+ ARecord(x=2)
+ >>> r.set(y=2)
+ Traceback (most recent call last):
+ AttributeError: 'y' is not among the specified fields for ARecord
+
+ Type information
+ ****************
+ It is possible to add type information to the record to enforce type checks. Multiple allowed types can be specified
+ by providing an iterable of types.
+
+ .. code:: python
+
+ >>> class BRecord(PRecord):
+ ... x = field(type=int)
+ ... y = field(type=(int, type(None)))
+ ...
+ >>> BRecord(x=3, y=None)
+ BRecord(y=None, x=3)
+ >>> BRecord(x=3.0)
+ Traceback (most recent call last):
+ PTypeError: Invalid type for field BRecord.x, was float
+
+
+ Custom types (classes) that are iterable should be wrapped in a tuple to prevent their
+ members being added to the set of valid types. Although Enums in particular are now
+ supported without wrapping, see #83 for more information.
+
+ Mandatory fields
+ ****************
+ Fields are not mandatory by default but can be specified as such. If fields are missing an
+ *InvariantException* will be thrown which contains information about the missing fields.
+
+ .. code:: python
+
+ >>> from pyrsistent import InvariantException
+ >>> class CRecord(PRecord):
+ ... x = field(mandatory=True)
+ ...
+ >>> r = CRecord(x=3)
+ >>> try:
+ ... r.discard('x')
+ ... except InvariantException as e:
+ ... print(e.missing_fields)
+ ...
+ ('CRecord.x',)
+
+ Invariants
+ **********
+ It is possible to add invariants that must hold when evolving the record. Invariants can be
+ specified on both field and record level. If invariants fail an *InvariantException* will be
+ thrown which contains information about the failing invariants. An invariant function should
+ return a tuple consisting of a boolean that tells if the invariant holds or not and an object
+ describing the invariant. This object can later be used to identify which invariant that failed.
+
+ The global invariant function is only executed if all field invariants hold.
+
+ Global invariants are inherited to subclasses.
+
+ .. code:: python
+
+ >>> class RestrictedVector(PRecord):
+ ... __invariant__ = lambda r: (r.y >= r.x, 'x larger than y')
+ ... x = field(invariant=lambda x: (x > 0, 'x negative'))
+ ... y = field(invariant=lambda y: (y > 0, 'y negative'))
+ ...
+ >>> r = RestrictedVector(y=3, x=2)
+ >>> try:
+ ... r.set(x=-1, y=-2)
+ ... except InvariantException as e:
+ ... print(e.invariant_errors)
+ ...
+ ('y negative', 'x negative')
+ >>> try:
+ ... r.set(x=2, y=1)
+ ... except InvariantException as e:
+ ... print(e.invariant_errors)
+ ...
+ ('x larger than y',)
+
+ Invariants may also contain multiple assertions. For those cases the invariant function should
+ return a tuple of invariant tuples as described above. This structure is reflected in the
+ invariant_errors attribute of the exception which will contain tuples with data from all failed
+ invariants. Eg:
+
+ .. code:: python
+
+ >>> class EvenX(PRecord):
+ ... x = field(invariant=lambda x: ((x > 0, 'x negative'), (x % 2 == 0, 'x odd')))
+ ...
+ >>> try:
+ ... EvenX(x=-1)
+ ... except InvariantException as e:
+ ... print(e.invariant_errors)
+ ...
+ (('x negative', 'x odd'),)
+
+
+ Factories
+ *********
+ It's possible to specify factory functions for fields. The factory function receives whatever
+ is supplied as field value and the actual returned by the factory is assigned to the field
+ given that any type and invariant checks hold.
+ PRecords have a default factory specified as a static function on the class, create(). It takes
+ a *Mapping* as argument and returns an instance of the specific record.
+ If a record has fields of type PRecord the create() method of that record will
+ be called to create the "sub record" if no factory has explicitly been specified to override
+ this behaviour.
+
+ .. code:: python
+
+ >>> class DRecord(PRecord):
+ ... x = field(factory=int)
+ ...
+ >>> class ERecord(PRecord):
+ ... d = field(type=DRecord)
+ ...
+ >>> ERecord.create({'d': {'x': '1'}})
+ ERecord(d=DRecord(x=1))
+
+ Collection fields
+ *****************
+ It is also possible to have fields with ``pyrsistent`` collections.
+
+ .. code:: python
+
+ >>> from pyrsistent import pset_field, pmap_field, pvector_field
+ >>> class MultiRecord(PRecord):
+ ... set_of_ints = pset_field(int)
+ ... map_int_to_str = pmap_field(int, str)
+ ... vector_of_strs = pvector_field(str)
+ ...
+
+ Serialization
+ *************
+ PRecords support serialization back to dicts. Default serialization will take keys and values
+ "as is" and output them into a dict. It is possible to specify custom serialization functions
+ to take care of fields that require special treatment.
+
+ .. code:: python
+
+ >>> from datetime import date
+ >>> class Person(PRecord):
+ ... name = field(type=unicode)
+ ... birth_date = field(type=date,
+ ... serializer=lambda format, d: d.strftime(format['date']))
+ ...
+ >>> john = Person(name=u'John', birth_date=date(1985, 10, 21))
+ >>> john.serialize({'date': '%Y-%m-%d'})
+ {'birth_date': '1985-10-21', 'name': u'John'}
+
+
+ .. _instar: https://github.com/boxed/instar/
+
+ .. _PClass:
+
+ PClass
+ ~~~~~~
+ A PClass is a python class with a fixed set of specified fields. PClasses are declared as python classes inheriting
+ from PClass. It is defined the same way that PRecords are and behaves like a PRecord in all aspects except that it
+ is not a PMap and hence not a collection but rather a plain Python object.
+
+ .. code:: python
+
+ >>> from pyrsistent import PClass, field
+ >>> class AClass(PClass):
+ ... x = field()
+ ...
+ >>> a = AClass(x=3)
+ >>> a
+ AClass(x=3)
+ >>> a.x
+ 3
+
+
+ Checked collections
+ ~~~~~~~~~~~~~~~~~~~
+ Checked collections currently come in three flavors: CheckedPVector, CheckedPMap and CheckedPSet.
+
+ .. code:: python
+
+ >>> from pyrsistent import CheckedPVector, CheckedPMap, CheckedPSet, thaw
+ >>> class Positives(CheckedPSet):
+ ... __type__ = (long, int)
+ ... __invariant__ = lambda n: (n >= 0, 'Negative')
+ ...
+ >>> class Lottery(PRecord):
+ ... name = field(type=str)
+ ... numbers = field(type=Positives, invariant=lambda p: (len(p) > 0, 'No numbers'))
+ ...
+ >>> class Lotteries(CheckedPVector):
+ ... __type__ = Lottery
+ ...
+ >>> class LotteriesByDate(CheckedPMap):
+ ... __key_type__ = date
+ ... __value_type__ = Lotteries
+ ...
+ >>> lotteries = LotteriesByDate.create({date(2015, 2, 15): [{'name': 'SuperLotto', 'numbers': {1, 2, 3}},
+ ... {'name': 'MegaLotto', 'numbers': {4, 5, 6}}],
+ ... date(2015, 2, 16): [{'name': 'SuperLotto', 'numbers': {3, 2, 1}},
+ ... {'name': 'MegaLotto', 'numbers': {6, 5, 4}}]})
+ >>> lotteries
+ LotteriesByDate({datetime.date(2015, 2, 15): Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')]), datetime.date(2015, 2, 16): Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')])})
+
+ # The checked versions support all operations that the corresponding
+ # unchecked types do
+ >>> lottery_0215 = lotteries[date(2015, 2, 15)]
+ >>> lottery_0215.transform([0, 'name'], 'SuperDuperLotto')
+ Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperDuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')])
+
+ # But also makes asserts that types and invariants hold
+ >>> lottery_0215.transform([0, 'name'], 999)
+ Traceback (most recent call last):
+ PTypeError: Invalid type for field Lottery.name, was int
+
+ >>> lottery_0215.transform([0, 'numbers'], set())
+ Traceback (most recent call last):
+ InvariantException: Field invariant failed
+
+ # They can be converted back to python built ins with either thaw()
+ # or serialize() (which provides possibilities to customize serialization)
+ >>> thaw(lottery_0215)
+ [{'numbers': set([1, 2, 3]), 'name': 'SuperLotto'}, {'numbers': set([4, 5, 6]), 'name': 'MegaLotto'}]
+ >>> lottery_0215.serialize()
+ [{'numbers': set([1, 2, 3]), 'name': 'SuperLotto'}, {'numbers': set([4, 5, 6]), 'name': 'MegaLotto'}]
+
+ .. _transformations:
+
+ Transformations
+ ~~~~~~~~~~~~~~~
+ Transformations are inspired by the cool library instar_ for Clojure. They let you evolve PMaps and PVectors
+ with arbitrarily deep/complex nesting using simple syntax and flexible matching syntax.
+
+ The first argument to transformation is the path that points out the value to transform. The
+ second is the transformation to perform. If the transformation is callable it will be applied
+ to the value(s) matching the path. The path may also contain callables. In that case they are
+ treated as matchers. If the matcher returns True for a specific key it is considered for transformation.
+
+ .. code:: python
+
+ # Basic examples
+ >>> from pyrsistent import inc, freeze, thaw, rex, ny, discard
+ >>> v1 = freeze([1, 2, 3, 4, 5])
+ >>> v1.transform([2], inc)
+ pvector([1, 2, 4, 4, 5])
+ >>> v1.transform([lambda ix: 0 < ix < 4], 8)
+ pvector([1, 8, 8, 8, 5])
+ >>> v1.transform([lambda ix, v: ix == 0 or v == 5], 0)
+ pvector([0, 2, 3, 4, 0])
+
+ # The (a)ny matcher can be used to match anything
+ >>> v1.transform([ny], 8)
+ pvector([8, 8, 8, 8, 8])
+
+ # Regular expressions can be used for matching
+ >>> scores = freeze({'John': 12, 'Joseph': 34, 'Sara': 23})
+ >>> scores.transform([rex('^Jo')], 0)
+ pmap({'Joseph': 0, 'Sara': 23, 'John': 0})
+
+ # Transformations can be done on arbitrarily deep structures
+ >>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'},
+ ... {'author': 'Steve', 'content': 'A slightly longer article'}],
+ ... 'weather': {'temperature': '11C', 'wind': '5m/s'}})
+ >>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c)
+ >>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c)
+ >>> very_short_news.articles[0].content
+ 'A short article'
+ >>> very_short_news.articles[1].content
+ 'A slightly long...'
+
+ # When nothing has been transformed the original data structure is kept
+ >>> short_news is news_paper
+ True
+ >>> very_short_news is news_paper
+ False
+ >>> very_short_news.articles[0] is news_paper.articles[0]
+ True
+
+ # There is a special transformation that can be used to discard elements. Also
+ # multiple transformations can be applied in one call
+ >>> thaw(news_paper.transform(['weather'], discard, ['articles', ny, 'content'], discard))
+ {'articles': [{'author': 'Sara'}, {'author': 'Steve'}]}
+
+ Evolvers
+ ~~~~~~~~
+ PVector, PMap and PSet all have support for a concept dubbed *evolvers*. An evolver acts like a mutable
+ view of the underlying persistent data structure with "transaction like" semantics. No updates of the original
+ data structure is ever performed, it is still fully immutable.
+
+ The evolvers have a very limited API by design to discourage excessive, and inappropriate, usage as that would
+ take us down the mutable road. In principle only basic mutation and element access functions are supported.
+ Check out the documentation_ of each data structure for specific examples.
+
+ Examples of when you may want to use an evolver instead of working directly with the data structure include:
+
+ * Multiple updates are done to the same data structure and the intermediate results are of no
+ interest. In this case using an evolver may be a more efficient and easier to work with.
+ * You need to pass a vector into a legacy function or a function that you have no control
+ over which performs in place mutations. In this case pass an evolver instance
+ instead and then create a new pvector from the evolver once the function returns.
+
+ .. code:: python
+
+ >>> from pyrsistent import v
+
+ # In place mutation as when working with the built in counterpart
+ >>> v1 = v(1, 2, 3)
+ >>> e = v1.evolver()
+ >>> e[1] = 22
+ >>> e = e.append(4)
+ >>> e = e.extend([5, 6])
+ >>> e[5] += 1
+ >>> len(e)
+ 6
+
+ # The evolver is considered *dirty* when it contains changes compared to the underlying vector
+ >>> e.is_dirty()
+ True
+
+ # But the underlying pvector still remains untouched
+ >>> v1
+ pvector([1, 2, 3])
+
+ # Once satisfied with the updates you can produce a new pvector containing the updates.
+ # The new pvector will share data with the original pvector in the same way that would have
+ # been done if only using operations on the pvector.
+ >>> v2 = e.persistent()
+ >>> v2
+ pvector([1, 22, 3, 4, 5, 7])
+
+ # The evolver is now no longer considered *dirty* as it contains no differences compared to the
+ # pvector just produced.
+ >>> e.is_dirty()
+ False
+
+ # You may continue to work with the same evolver without affecting the content of v2
+ >>> e[0] = 11
+
+ # Or create a new evolver from v2. The two evolvers can be updated independently but will both
+ # share data with v2 where possible.
+ >>> e2 = v2.evolver()
+ >>> e2[0] = 1111
+ >>> e.persistent()
+ pvector([11, 22, 3, 4, 5, 7])
+ >>> e2.persistent()
+ pvector([1111, 22, 3, 4, 5, 7])
+
+ .. _freeze:
+ .. _thaw:
+
+ freeze and thaw
+ ~~~~~~~~~~~~~~~
+ These functions are great when your cozy immutable world has to interact with the evil mutable world outside.
+
+ .. code:: python
+
+ >>> from pyrsistent import freeze, thaw, v, m
+ >>> freeze([1, {'a': 3}])
+ pvector([1, pmap({'a': 3})])
+ >>> thaw(v(1, m(a=3)))
+ [1, {'a': 3}]
+
+ Compatibility
+ -------------
+
+ Pyrsistent is developed and tested on Python 2.7, 3.5, 3.6, 3.7 and PyPy (Python 2 and 3 compatible). It will most
+ likely work on all other versions >= 3.4 but no guarantees are given. :)
+
+ Compatibility issues
+ ~~~~~~~~~~~~~~~~~~~~
+
+ .. _27: https://github.com/tobgu/pyrsistent/issues/27
+
+ There is currently one known compatibility issue when comparing built in sets and frozensets to PSets as discussed in 27_.
+ It affects python 2 versions < 2.7.8 and python 3 versions < 3.4.0 and is due to a bug described in
+ http://bugs.python.org/issue8743.
+
+ Comparisons will fail or be incorrect when using the set/frozenset as left hand side of the comparison. As a workaround
+ you need to either upgrade Python to a more recent version, avoid comparing sets/frozensets with PSets or always make
+ sure to convert both sides of the comparison to the same type before performing the comparison.
+
+ Performance
+ -----------
+
+ Pyrsistent is developed with performance in mind. Still, while some operations are nearly on par with their built in,
+ mutable, counterparts in terms of speed, other operations are slower. In the cases where attempts at
+ optimizations have been done, speed has generally been valued over space.
+
+ Pyrsistent comes with two API compatible flavors of PVector (on which PMap and PSet are based), one pure Python
+ implementation and one implemented as a C extension. The latter generally being 2 - 20 times faster than the former.
+ The C extension will be used automatically when possible.
+
+ The pure python implementation is fully PyPy compatible. Running it under PyPy speeds operations up considerably if
+ the structures are used heavily (if JITed), for some cases the performance is almost on par with the built in counterparts.
+
+ Type hints
+ ----------
+
+ PEP 561 style type hints for use with mypy and various editors are available for most types and functions in pyrsistent.
+
+ Type classes for annotating your own code with pyrsistent types are also available under pyrsistent.typing.
+
+ Installation
+ ------------
+
+ pip install pyrsistent
+
+ Documentation
+ -------------
+
+ Available at http://pyrsistent.readthedocs.org/
+
+ Brief presentation available at http://slides.com/tobiasgustafsson/immutability-and-python/
+
+ Contributors
+ ------------
+
+ Tobias Gustafsson https://github.com/tobgu
+
+ Christopher Armstrong https://github.com/radix
+
+ Anders Hovmöller https://github.com/boxed
+
+ Itamar Turner-Trauring https://github.com/itamarst
+
+ Jonathan Lange https://github.com/jml
+
+ Richard Futrell https://github.com/Futrell
+
+ Jakob Hollenstein https://github.com/jkbjh
+
+ David Honour https://github.com/foolswood
+
+ David R. MacIver https://github.com/DRMacIver
+
+ Marcus Ewert https://github.com/sarum90
+
+ Jean-Paul Calderone https://github.com/exarkun
+
+ Douglas Treadwell https://github.com/douglas-treadwell
+
+ Travis Parker https://github.com/teepark
+
+ Julian Berman https://github.com/Julian
+
+ Dennis Tomas https://github.com/dtomas
+
+ Neil Vyas https://github.com/neilvyas
+
+ doozr https://github.com/doozr
+
+ Kamil Galuszka https://github.com/galuszkak
+
+ Tsuyoshi Hombashi https://github.com/thombashi
+
+ nattofriends https://github.com/nattofriends
+
+ agberk https://github.com/agberk
+
+ Waleed Khan https://github.com/arxanas
+
+ Jean-Louis Fuchs https://github.com/ganwell
+
+ Carlos Corbacho https://github.com/ccorbacho
+
+ Felix Yan https://github.com/felixonmars
+
+ benrg https://github.com/benrg
+
+ Jere Lahelma https://github.com/je-l
+
+ Max Taggart https://github.com/MaxTaggart
+
+ Vincent Philippon https://github.com/vphilippon
+
+ Semen Zhydenko https://github.com/ss18
+
+ Till Varoquaux https://github.com/till-varoquaux
+
+ Michal Kowalik https://github.com/michalvi
+
+ ossdev07 https://github.com/ossdev07
+
+ Kerry Olesen https://github.com/qhesz
+
+ johnthagen https://github.com/johnthagen
+
+ Contributing
+ ------------
+
+ Want to contribute? That's great! If you experience problems please log them on GitHub. If you want to contribute code,
+ please fork the repository and submit a pull request.
+
+ Run tests
+ ~~~~~~~~~
+ .. _tox: https://tox.readthedocs.io/en/latest/
+
+ Tests can be executed using tox_.
+
+ Install tox: ``pip install tox``
+
+ Run test for Python 2.7: ``tox -epy27``
+
+ Release
+ ~~~~~~~
+ * Update CHANGES.txt
+ * Update README with any new contributors and potential info needed.
+ * Update _pyrsistent_version.py
+ * python setup.py sdist upload
+ * Commit and tag with new version: git add -u . && git commit -m 'Prepare version vX.Y.Z' && git tag -a vX.Y.Z -m 'vX.Y.Z'
+ * Push commit and tags: git push && git push --tags
+
+ Project status
+ --------------
+ Pyrsistent can be considered stable and mature (who knows, there may even be a 1.0 some day :-)). The project is
+ maintained, bugs fixed, PRs reviewed and merged and new releases made. I currently do not have time for development
+ of new features or functionality which I don't have use for myself. I'm more than happy to take PRs for new
+ functionality though!
+
+ There are a bunch of issues marked with ``enhancement`` and ``help wanted`` that contain requests for new functionality
+ that would be nice to include. The level of difficulty and extend of the issues varies, please reach out to me if you're
+ interested in working on any of them.
+
+ If you feel that you have a grand master plan for where you would like Pyrsistent to go and have the time to put into
+ it please don't hesitate to discuss this with me and submit PRs for it. If all goes well I'd be more than happy to add
+ additional maintainers to the project!
+
+Platform: UNKNOWN
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: Implementation :: PyPy
diff --git a/third_party/python/pyrsistent/README b/third_party/python/pyrsistent/README
new file mode 100644
index 0000000000..a4c24e49bd
--- /dev/null
+++ b/third_party/python/pyrsistent/README
@@ -0,0 +1,725 @@
+Pyrsistent
+==========
+.. image:: https://travis-ci.org/tobgu/pyrsistent.png?branch=master
+ :target: https://travis-ci.org/tobgu/pyrsistent
+
+.. image:: https://badge.fury.io/py/pyrsistent.svg
+ :target: https://badge.fury.io/py/pyrsistent
+
+.. image:: https://coveralls.io/repos/tobgu/pyrsistent/badge.svg?branch=master&service=github
+ :target: https://coveralls.io/github/tobgu/pyrsistent?branch=master
+
+
+.. _Pyrthon: https://www.github.com/tobgu/pyrthon/
+
+Pyrsistent is a number of persistent collections (by some referred to as functional data structures). Persistent in
+the sense that they are immutable.
+
+All methods on a data structure that would normally mutate it instead return a new copy of the structure containing the
+requested updates. The original structure is left untouched.
+
+This will simplify the reasoning about what a program does since no hidden side effects ever can take place to these
+data structures. You can rest assured that the object you hold a reference to will remain the same throughout its
+lifetime and need not worry that somewhere five stack levels below you in the darkest corner of your application
+someone has decided to remove that element that you expected to be there.
+
+Pyrsistent is influenced by persistent data structures such as those found in the standard library of Clojure. The
+data structures are designed to share common elements through path copying.
+It aims at taking these concepts and make them as pythonic as possible so that they can be easily integrated into any python
+program without hassle.
+
+If you want to go all in on persistent data structures and use literal syntax to define them in your code rather
+than function calls check out Pyrthon_.
+
+Examples
+--------
+.. _Sequence: collections_
+.. _Hashable: collections_
+.. _Mapping: collections_
+.. _Mappings: collections_
+.. _Set: collections_
+.. _collections: https://docs.python.org/3/library/collections.abc.html
+.. _documentation: http://pyrsistent.readthedocs.org/
+
+The collection types and key features currently implemented are:
+
+* PVector_, similar to a python list
+* PMap_, similar to dict
+* PSet_, similar to set
+* PRecord_, a PMap on steroids with fixed fields, optional type and invariant checking and much more
+* PClass_, a Python class fixed fields, optional type and invariant checking and much more
+* `Checked collections`_, PVector, PMap and PSet with optional type and invariance checks and more
+* PBag, similar to collections.Counter
+* PList, a classic singly linked list
+* PDeque, similar to collections.deque
+* Immutable object type (immutable) built on the named tuple
+* freeze_ and thaw_ functions to convert between pythons standard collections and pyrsistent collections.
+* Flexible transformations_ of arbitrarily complex structures built from PMaps and PVectors.
+
+Below are examples of common usage patterns for some of the structures and features. More information and
+full documentation for all data structures is available in the documentation_.
+
+.. _PVector:
+
+PVector
+~~~~~~~
+With full support for the Sequence_ protocol PVector is meant as a drop in replacement to the built in list from a readers
+point of view. Write operations of course differ since no in place mutation is done but naming should be in line
+with corresponding operations on the built in list.
+
+Support for the Hashable_ protocol also means that it can be used as key in Mappings_.
+
+Appends are amortized O(1). Random access and insert is log32(n) where n is the size of the vector.
+
+.. code:: python
+
+ >>> from pyrsistent import v, pvector
+
+ # No mutation of vectors once created, instead they
+ # are "evolved" leaving the original untouched
+ >>> v1 = v(1, 2, 3)
+ >>> v2 = v1.append(4)
+ >>> v3 = v2.set(1, 5)
+ >>> v1
+ pvector([1, 2, 3])
+ >>> v2
+ pvector([1, 2, 3, 4])
+ >>> v3
+ pvector([1, 5, 3, 4])
+
+ # Random access and slicing
+ >>> v3[1]
+ 5
+ >>> v3[1:3]
+ pvector([5, 3])
+
+ # Iteration
+ >>> list(x + 1 for x in v3)
+ [2, 6, 4, 5]
+ >>> pvector(2 * x for x in range(3))
+ pvector([0, 2, 4])
+
+.. _PMap:
+
+PMap
+~~~~
+With full support for the Mapping_ protocol PMap is meant as a drop in replacement to the built in dict from a readers point
+of view. Support for the Hashable_ protocol also means that it can be used as key in other Mappings_.
+
+Random access and insert is log32(n) where n is the size of the map.
+
+.. code:: python
+
+ >>> from pyrsistent import m, pmap, v
+
+ # No mutation of maps once created, instead they are
+ # "evolved" leaving the original untouched
+ >>> m1 = m(a=1, b=2)
+ >>> m2 = m1.set('c', 3)
+ >>> m3 = m2.set('a', 5)
+ >>> m1
+ pmap({'a': 1, 'b': 2})
+ >>> m2
+ pmap({'a': 1, 'c': 3, 'b': 2})
+ >>> m3
+ pmap({'a': 5, 'c': 3, 'b': 2})
+ >>> m3['a']
+ 5
+
+ # Evolution of nested persistent structures
+ >>> m4 = m(a=5, b=6, c=v(1, 2))
+ >>> m4.transform(('c', 1), 17)
+ pmap({'a': 5, 'c': pvector([1, 17]), 'b': 6})
+ >>> m5 = m(a=1, b=2)
+
+ # Evolve by merging with other mappings
+ >>> m5.update(m(a=2, c=3), {'a': 17, 'd': 35})
+ pmap({'a': 17, 'c': 3, 'b': 2, 'd': 35})
+ >>> pmap({'x': 1, 'y': 2}) + pmap({'y': 3, 'z': 4})
+ pmap({'y': 3, 'x': 1, 'z': 4})
+
+ # Dict-like methods to convert to list and iterate
+ >>> m3.items()
+ pvector([('a', 5), ('c', 3), ('b', 2)])
+ >>> list(m3)
+ ['a', 'c', 'b']
+
+.. _PSet:
+
+PSet
+~~~~
+With full support for the Set_ protocol PSet is meant as a drop in replacement to the built in set from a readers point
+of view. Support for the Hashable_ protocol also means that it can be used as key in Mappings_.
+
+Random access and insert is log32(n) where n is the size of the set.
+
+.. code:: python
+
+ >>> from pyrsistent import s
+
+ # No mutation of sets once created, you know the story...
+ >>> s1 = s(1, 2, 3, 2)
+ >>> s2 = s1.add(4)
+ >>> s3 = s1.remove(1)
+ >>> s1
+ pset([1, 2, 3])
+ >>> s2
+ pset([1, 2, 3, 4])
+ >>> s3
+ pset([2, 3])
+
+ # Full support for set operations
+ >>> s1 | s(3, 4, 5)
+ pset([1, 2, 3, 4, 5])
+ >>> s1 & s(3, 4, 5)
+ pset([3])
+ >>> s1 < s2
+ True
+ >>> s1 < s(3, 4, 5)
+ False
+
+.. _PRecord:
+
+PRecord
+~~~~~~~
+A PRecord is a PMap with a fixed set of specified fields. Records are declared as python classes inheriting
+from PRecord. Because it is a PMap it has full support for all Mapping methods such as iteration and element
+access using subscript notation.
+
+.. code:: python
+
+ >>> from pyrsistent import PRecord, field
+ >>> class ARecord(PRecord):
+ ... x = field()
+ ...
+ >>> r = ARecord(x=3)
+ >>> r
+ ARecord(x=3)
+ >>> r.x
+ 3
+ >>> r.set(x=2)
+ ARecord(x=2)
+ >>> r.set(y=2)
+ Traceback (most recent call last):
+ AttributeError: 'y' is not among the specified fields for ARecord
+
+Type information
+****************
+It is possible to add type information to the record to enforce type checks. Multiple allowed types can be specified
+by providing an iterable of types.
+
+.. code:: python
+
+ >>> class BRecord(PRecord):
+ ... x = field(type=int)
+ ... y = field(type=(int, type(None)))
+ ...
+ >>> BRecord(x=3, y=None)
+ BRecord(y=None, x=3)
+ >>> BRecord(x=3.0)
+ Traceback (most recent call last):
+ PTypeError: Invalid type for field BRecord.x, was float
+
+
+Custom types (classes) that are iterable should be wrapped in a tuple to prevent their
+members being added to the set of valid types. Although Enums in particular are now
+supported without wrapping, see #83 for more information.
+
+Mandatory fields
+****************
+Fields are not mandatory by default but can be specified as such. If fields are missing an
+*InvariantException* will be thrown which contains information about the missing fields.
+
+.. code:: python
+
+ >>> from pyrsistent import InvariantException
+ >>> class CRecord(PRecord):
+ ... x = field(mandatory=True)
+ ...
+ >>> r = CRecord(x=3)
+ >>> try:
+ ... r.discard('x')
+ ... except InvariantException as e:
+ ... print(e.missing_fields)
+ ...
+ ('CRecord.x',)
+
+Invariants
+**********
+It is possible to add invariants that must hold when evolving the record. Invariants can be
+specified on both field and record level. If invariants fail an *InvariantException* will be
+thrown which contains information about the failing invariants. An invariant function should
+return a tuple consisting of a boolean that tells if the invariant holds or not and an object
+describing the invariant. This object can later be used to identify which invariant that failed.
+
+The global invariant function is only executed if all field invariants hold.
+
+Global invariants are inherited to subclasses.
+
+.. code:: python
+
+ >>> class RestrictedVector(PRecord):
+ ... __invariant__ = lambda r: (r.y >= r.x, 'x larger than y')
+ ... x = field(invariant=lambda x: (x > 0, 'x negative'))
+ ... y = field(invariant=lambda y: (y > 0, 'y negative'))
+ ...
+ >>> r = RestrictedVector(y=3, x=2)
+ >>> try:
+ ... r.set(x=-1, y=-2)
+ ... except InvariantException as e:
+ ... print(e.invariant_errors)
+ ...
+ ('y negative', 'x negative')
+ >>> try:
+ ... r.set(x=2, y=1)
+ ... except InvariantException as e:
+ ... print(e.invariant_errors)
+ ...
+ ('x larger than y',)
+
+Invariants may also contain multiple assertions. For those cases the invariant function should
+return a tuple of invariant tuples as described above. This structure is reflected in the
+invariant_errors attribute of the exception which will contain tuples with data from all failed
+invariants. Eg:
+
+.. code:: python
+
+ >>> class EvenX(PRecord):
+ ... x = field(invariant=lambda x: ((x > 0, 'x negative'), (x % 2 == 0, 'x odd')))
+ ...
+ >>> try:
+ ... EvenX(x=-1)
+ ... except InvariantException as e:
+ ... print(e.invariant_errors)
+ ...
+ (('x negative', 'x odd'),)
+
+
+Factories
+*********
+It's possible to specify factory functions for fields. The factory function receives whatever
+is supplied as field value and the actual returned by the factory is assigned to the field
+given that any type and invariant checks hold.
+PRecords have a default factory specified as a static function on the class, create(). It takes
+a *Mapping* as argument and returns an instance of the specific record.
+If a record has fields of type PRecord the create() method of that record will
+be called to create the "sub record" if no factory has explicitly been specified to override
+this behaviour.
+
+.. code:: python
+
+ >>> class DRecord(PRecord):
+ ... x = field(factory=int)
+ ...
+ >>> class ERecord(PRecord):
+ ... d = field(type=DRecord)
+ ...
+ >>> ERecord.create({'d': {'x': '1'}})
+ ERecord(d=DRecord(x=1))
+
+Collection fields
+*****************
+It is also possible to have fields with ``pyrsistent`` collections.
+
+.. code:: python
+
+ >>> from pyrsistent import pset_field, pmap_field, pvector_field
+ >>> class MultiRecord(PRecord):
+ ... set_of_ints = pset_field(int)
+ ... map_int_to_str = pmap_field(int, str)
+ ... vector_of_strs = pvector_field(str)
+ ...
+
+Serialization
+*************
+PRecords support serialization back to dicts. Default serialization will take keys and values
+"as is" and output them into a dict. It is possible to specify custom serialization functions
+to take care of fields that require special treatment.
+
+.. code:: python
+
+ >>> from datetime import date
+ >>> class Person(PRecord):
+ ... name = field(type=unicode)
+ ... birth_date = field(type=date,
+ ... serializer=lambda format, d: d.strftime(format['date']))
+ ...
+ >>> john = Person(name=u'John', birth_date=date(1985, 10, 21))
+ >>> john.serialize({'date': '%Y-%m-%d'})
+ {'birth_date': '1985-10-21', 'name': u'John'}
+
+
+.. _instar: https://github.com/boxed/instar/
+
+.. _PClass:
+
+PClass
+~~~~~~
+A PClass is a python class with a fixed set of specified fields. PClasses are declared as python classes inheriting
+from PClass. It is defined the same way that PRecords are and behaves like a PRecord in all aspects except that it
+is not a PMap and hence not a collection but rather a plain Python object.
+
+.. code:: python
+
+ >>> from pyrsistent import PClass, field
+ >>> class AClass(PClass):
+ ... x = field()
+ ...
+ >>> a = AClass(x=3)
+ >>> a
+ AClass(x=3)
+ >>> a.x
+ 3
+
+
+Checked collections
+~~~~~~~~~~~~~~~~~~~
+Checked collections currently come in three flavors: CheckedPVector, CheckedPMap and CheckedPSet.
+
+.. code:: python
+
+ >>> from pyrsistent import CheckedPVector, CheckedPMap, CheckedPSet, thaw
+ >>> class Positives(CheckedPSet):
+ ... __type__ = (long, int)
+ ... __invariant__ = lambda n: (n >= 0, 'Negative')
+ ...
+ >>> class Lottery(PRecord):
+ ... name = field(type=str)
+ ... numbers = field(type=Positives, invariant=lambda p: (len(p) > 0, 'No numbers'))
+ ...
+ >>> class Lotteries(CheckedPVector):
+ ... __type__ = Lottery
+ ...
+ >>> class LotteriesByDate(CheckedPMap):
+ ... __key_type__ = date
+ ... __value_type__ = Lotteries
+ ...
+ >>> lotteries = LotteriesByDate.create({date(2015, 2, 15): [{'name': 'SuperLotto', 'numbers': {1, 2, 3}},
+ ... {'name': 'MegaLotto', 'numbers': {4, 5, 6}}],
+ ... date(2015, 2, 16): [{'name': 'SuperLotto', 'numbers': {3, 2, 1}},
+ ... {'name': 'MegaLotto', 'numbers': {6, 5, 4}}]})
+ >>> lotteries
+ LotteriesByDate({datetime.date(2015, 2, 15): Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')]), datetime.date(2015, 2, 16): Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')])})
+
+ # The checked versions support all operations that the corresponding
+ # unchecked types do
+ >>> lottery_0215 = lotteries[date(2015, 2, 15)]
+ >>> lottery_0215.transform([0, 'name'], 'SuperDuperLotto')
+ Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperDuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')])
+
+ # But also makes asserts that types and invariants hold
+ >>> lottery_0215.transform([0, 'name'], 999)
+ Traceback (most recent call last):
+ PTypeError: Invalid type for field Lottery.name, was int
+
+ >>> lottery_0215.transform([0, 'numbers'], set())
+ Traceback (most recent call last):
+ InvariantException: Field invariant failed
+
+ # They can be converted back to python built ins with either thaw()
+ # or serialize() (which provides possibilities to customize serialization)
+ >>> thaw(lottery_0215)
+ [{'numbers': set([1, 2, 3]), 'name': 'SuperLotto'}, {'numbers': set([4, 5, 6]), 'name': 'MegaLotto'}]
+ >>> lottery_0215.serialize()
+ [{'numbers': set([1, 2, 3]), 'name': 'SuperLotto'}, {'numbers': set([4, 5, 6]), 'name': 'MegaLotto'}]
+
+.. _transformations:
+
+Transformations
+~~~~~~~~~~~~~~~
+Transformations are inspired by the cool library instar_ for Clojure. They let you evolve PMaps and PVectors
+with arbitrarily deep/complex nesting using simple syntax and flexible matching syntax.
+
+The first argument to transformation is the path that points out the value to transform. The
+second is the transformation to perform. If the transformation is callable it will be applied
+to the value(s) matching the path. The path may also contain callables. In that case they are
+treated as matchers. If the matcher returns True for a specific key it is considered for transformation.
+
+.. code:: python
+
+ # Basic examples
+ >>> from pyrsistent import inc, freeze, thaw, rex, ny, discard
+ >>> v1 = freeze([1, 2, 3, 4, 5])
+ >>> v1.transform([2], inc)
+ pvector([1, 2, 4, 4, 5])
+ >>> v1.transform([lambda ix: 0 < ix < 4], 8)
+ pvector([1, 8, 8, 8, 5])
+ >>> v1.transform([lambda ix, v: ix == 0 or v == 5], 0)
+ pvector([0, 2, 3, 4, 0])
+
+ # The (a)ny matcher can be used to match anything
+ >>> v1.transform([ny], 8)
+ pvector([8, 8, 8, 8, 8])
+
+ # Regular expressions can be used for matching
+ >>> scores = freeze({'John': 12, 'Joseph': 34, 'Sara': 23})
+ >>> scores.transform([rex('^Jo')], 0)
+ pmap({'Joseph': 0, 'Sara': 23, 'John': 0})
+
+ # Transformations can be done on arbitrarily deep structures
+ >>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'},
+ ... {'author': 'Steve', 'content': 'A slightly longer article'}],
+ ... 'weather': {'temperature': '11C', 'wind': '5m/s'}})
+ >>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c)
+ >>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c)
+ >>> very_short_news.articles[0].content
+ 'A short article'
+ >>> very_short_news.articles[1].content
+ 'A slightly long...'
+
+ # When nothing has been transformed the original data structure is kept
+ >>> short_news is news_paper
+ True
+ >>> very_short_news is news_paper
+ False
+ >>> very_short_news.articles[0] is news_paper.articles[0]
+ True
+
+ # There is a special transformation that can be used to discard elements. Also
+ # multiple transformations can be applied in one call
+ >>> thaw(news_paper.transform(['weather'], discard, ['articles', ny, 'content'], discard))
+ {'articles': [{'author': 'Sara'}, {'author': 'Steve'}]}
+
+Evolvers
+~~~~~~~~
+PVector, PMap and PSet all have support for a concept dubbed *evolvers*. An evolver acts like a mutable
+view of the underlying persistent data structure with "transaction like" semantics. No updates of the original
+data structure is ever performed, it is still fully immutable.
+
+The evolvers have a very limited API by design to discourage excessive, and inappropriate, usage as that would
+take us down the mutable road. In principle only basic mutation and element access functions are supported.
+Check out the documentation_ of each data structure for specific examples.
+
+Examples of when you may want to use an evolver instead of working directly with the data structure include:
+
+* Multiple updates are done to the same data structure and the intermediate results are of no
+ interest. In this case using an evolver may be a more efficient and easier to work with.
+* You need to pass a vector into a legacy function or a function that you have no control
+ over which performs in place mutations. In this case pass an evolver instance
+ instead and then create a new pvector from the evolver once the function returns.
+
+.. code:: python
+
+ >>> from pyrsistent import v
+
+ # In place mutation as when working with the built in counterpart
+ >>> v1 = v(1, 2, 3)
+ >>> e = v1.evolver()
+ >>> e[1] = 22
+ >>> e = e.append(4)
+ >>> e = e.extend([5, 6])
+ >>> e[5] += 1
+ >>> len(e)
+ 6
+
+ # The evolver is considered *dirty* when it contains changes compared to the underlying vector
+ >>> e.is_dirty()
+ True
+
+ # But the underlying pvector still remains untouched
+ >>> v1
+ pvector([1, 2, 3])
+
+ # Once satisfied with the updates you can produce a new pvector containing the updates.
+ # The new pvector will share data with the original pvector in the same way that would have
+ # been done if only using operations on the pvector.
+ >>> v2 = e.persistent()
+ >>> v2
+ pvector([1, 22, 3, 4, 5, 7])
+
+ # The evolver is now no longer considered *dirty* as it contains no differences compared to the
+ # pvector just produced.
+ >>> e.is_dirty()
+ False
+
+ # You may continue to work with the same evolver without affecting the content of v2
+ >>> e[0] = 11
+
+ # Or create a new evolver from v2. The two evolvers can be updated independently but will both
+ # share data with v2 where possible.
+ >>> e2 = v2.evolver()
+ >>> e2[0] = 1111
+ >>> e.persistent()
+ pvector([11, 22, 3, 4, 5, 7])
+ >>> e2.persistent()
+ pvector([1111, 22, 3, 4, 5, 7])
+
+.. _freeze:
+.. _thaw:
+
+freeze and thaw
+~~~~~~~~~~~~~~~
+These functions are great when your cozy immutable world has to interact with the evil mutable world outside.
+
+.. code:: python
+
+ >>> from pyrsistent import freeze, thaw, v, m
+ >>> freeze([1, {'a': 3}])
+ pvector([1, pmap({'a': 3})])
+ >>> thaw(v(1, m(a=3)))
+ [1, {'a': 3}]
+
+Compatibility
+-------------
+
+Pyrsistent is developed and tested on Python 2.7, 3.5, 3.6, 3.7 and PyPy (Python 2 and 3 compatible). It will most
+likely work on all other versions >= 3.4 but no guarantees are given. :)
+
+Compatibility issues
+~~~~~~~~~~~~~~~~~~~~
+
+.. _27: https://github.com/tobgu/pyrsistent/issues/27
+
+There is currently one known compatibility issue when comparing built in sets and frozensets to PSets as discussed in 27_.
+It affects python 2 versions < 2.7.8 and python 3 versions < 3.4.0 and is due to a bug described in
+http://bugs.python.org/issue8743.
+
+Comparisons will fail or be incorrect when using the set/frozenset as left hand side of the comparison. As a workaround
+you need to either upgrade Python to a more recent version, avoid comparing sets/frozensets with PSets or always make
+sure to convert both sides of the comparison to the same type before performing the comparison.
+
+Performance
+-----------
+
+Pyrsistent is developed with performance in mind. Still, while some operations are nearly on par with their built in,
+mutable, counterparts in terms of speed, other operations are slower. In the cases where attempts at
+optimizations have been done, speed has generally been valued over space.
+
+Pyrsistent comes with two API compatible flavors of PVector (on which PMap and PSet are based), one pure Python
+implementation and one implemented as a C extension. The latter generally being 2 - 20 times faster than the former.
+The C extension will be used automatically when possible.
+
+The pure python implementation is fully PyPy compatible. Running it under PyPy speeds operations up considerably if
+the structures are used heavily (if JITed), for some cases the performance is almost on par with the built in counterparts.
+
+Type hints
+----------
+
+PEP 561 style type hints for use with mypy and various editors are available for most types and functions in pyrsistent.
+
+Type classes for annotating your own code with pyrsistent types are also available under pyrsistent.typing.
+
+Installation
+------------
+
+pip install pyrsistent
+
+Documentation
+-------------
+
+Available at http://pyrsistent.readthedocs.org/
+
+Brief presentation available at http://slides.com/tobiasgustafsson/immutability-and-python/
+
+Contributors
+------------
+
+Tobias Gustafsson https://github.com/tobgu
+
+Christopher Armstrong https://github.com/radix
+
+Anders Hovmöller https://github.com/boxed
+
+Itamar Turner-Trauring https://github.com/itamarst
+
+Jonathan Lange https://github.com/jml
+
+Richard Futrell https://github.com/Futrell
+
+Jakob Hollenstein https://github.com/jkbjh
+
+David Honour https://github.com/foolswood
+
+David R. MacIver https://github.com/DRMacIver
+
+Marcus Ewert https://github.com/sarum90
+
+Jean-Paul Calderone https://github.com/exarkun
+
+Douglas Treadwell https://github.com/douglas-treadwell
+
+Travis Parker https://github.com/teepark
+
+Julian Berman https://github.com/Julian
+
+Dennis Tomas https://github.com/dtomas
+
+Neil Vyas https://github.com/neilvyas
+
+doozr https://github.com/doozr
+
+Kamil Galuszka https://github.com/galuszkak
+
+Tsuyoshi Hombashi https://github.com/thombashi
+
+nattofriends https://github.com/nattofriends
+
+agberk https://github.com/agberk
+
+Waleed Khan https://github.com/arxanas
+
+Jean-Louis Fuchs https://github.com/ganwell
+
+Carlos Corbacho https://github.com/ccorbacho
+
+Felix Yan https://github.com/felixonmars
+
+benrg https://github.com/benrg
+
+Jere Lahelma https://github.com/je-l
+
+Max Taggart https://github.com/MaxTaggart
+
+Vincent Philippon https://github.com/vphilippon
+
+Semen Zhydenko https://github.com/ss18
+
+Till Varoquaux https://github.com/till-varoquaux
+
+Michal Kowalik https://github.com/michalvi
+
+ossdev07 https://github.com/ossdev07
+
+Kerry Olesen https://github.com/qhesz
+
+johnthagen https://github.com/johnthagen
+
+Contributing
+------------
+
+Want to contribute? That's great! If you experience problems please log them on GitHub. If you want to contribute code,
+please fork the repository and submit a pull request.
+
+Run tests
+~~~~~~~~~
+.. _tox: https://tox.readthedocs.io/en/latest/
+
+Tests can be executed using tox_.
+
+Install tox: ``pip install tox``
+
+Run test for Python 2.7: ``tox -epy27``
+
+Release
+~~~~~~~
+* Update CHANGES.txt
+* Update README with any new contributors and potential info needed.
+* Update _pyrsistent_version.py
+* python setup.py sdist upload
+* Commit and tag with new version: git add -u . && git commit -m 'Prepare version vX.Y.Z' && git tag -a vX.Y.Z -m 'vX.Y.Z'
+* Push commit and tags: git push && git push --tags
+
+Project status
+--------------
+Pyrsistent can be considered stable and mature (who knows, there may even be a 1.0 some day :-)). The project is
+maintained, bugs fixed, PRs reviewed and merged and new releases made. I currently do not have time for development
+of new features or functionality which I don't have use for myself. I'm more than happy to take PRs for new
+functionality though!
+
+There are a bunch of issues marked with ``enhancement`` and ``help wanted`` that contain requests for new functionality
+that would be nice to include. The level of difficulty and extend of the issues varies, please reach out to me if you're
+interested in working on any of them.
+
+If you feel that you have a grand master plan for where you would like Pyrsistent to go and have the time to put into
+it please don't hesitate to discuss this with me and submit PRs for it. If all goes well I'd be more than happy to add
+additional maintainers to the project!
diff --git a/third_party/python/pyrsistent/README.rst b/third_party/python/pyrsistent/README.rst
new file mode 100644
index 0000000000..a4c24e49bd
--- /dev/null
+++ b/third_party/python/pyrsistent/README.rst
@@ -0,0 +1,725 @@
+Pyrsistent
+==========
+.. image:: https://travis-ci.org/tobgu/pyrsistent.png?branch=master
+ :target: https://travis-ci.org/tobgu/pyrsistent
+
+.. image:: https://badge.fury.io/py/pyrsistent.svg
+ :target: https://badge.fury.io/py/pyrsistent
+
+.. image:: https://coveralls.io/repos/tobgu/pyrsistent/badge.svg?branch=master&service=github
+ :target: https://coveralls.io/github/tobgu/pyrsistent?branch=master
+
+
+.. _Pyrthon: https://www.github.com/tobgu/pyrthon/
+
+Pyrsistent is a number of persistent collections (by some referred to as functional data structures). Persistent in
+the sense that they are immutable.
+
+All methods on a data structure that would normally mutate it instead return a new copy of the structure containing the
+requested updates. The original structure is left untouched.
+
+This will simplify the reasoning about what a program does since no hidden side effects ever can take place to these
+data structures. You can rest assured that the object you hold a reference to will remain the same throughout its
+lifetime and need not worry that somewhere five stack levels below you in the darkest corner of your application
+someone has decided to remove that element that you expected to be there.
+
+Pyrsistent is influenced by persistent data structures such as those found in the standard library of Clojure. The
+data structures are designed to share common elements through path copying.
+It aims at taking these concepts and make them as pythonic as possible so that they can be easily integrated into any python
+program without hassle.
+
+If you want to go all in on persistent data structures and use literal syntax to define them in your code rather
+than function calls check out Pyrthon_.
+
+Examples
+--------
+.. _Sequence: collections_
+.. _Hashable: collections_
+.. _Mapping: collections_
+.. _Mappings: collections_
+.. _Set: collections_
+.. _collections: https://docs.python.org/3/library/collections.abc.html
+.. _documentation: http://pyrsistent.readthedocs.org/
+
+The collection types and key features currently implemented are:
+
+* PVector_, similar to a python list
+* PMap_, similar to dict
+* PSet_, similar to set
+* PRecord_, a PMap on steroids with fixed fields, optional type and invariant checking and much more
+* PClass_, a Python class fixed fields, optional type and invariant checking and much more
+* `Checked collections`_, PVector, PMap and PSet with optional type and invariance checks and more
+* PBag, similar to collections.Counter
+* PList, a classic singly linked list
+* PDeque, similar to collections.deque
+* Immutable object type (immutable) built on the named tuple
+* freeze_ and thaw_ functions to convert between pythons standard collections and pyrsistent collections.
+* Flexible transformations_ of arbitrarily complex structures built from PMaps and PVectors.
+
+Below are examples of common usage patterns for some of the structures and features. More information and
+full documentation for all data structures is available in the documentation_.
+
+.. _PVector:
+
+PVector
+~~~~~~~
+With full support for the Sequence_ protocol PVector is meant as a drop in replacement to the built in list from a readers
+point of view. Write operations of course differ since no in place mutation is done but naming should be in line
+with corresponding operations on the built in list.
+
+Support for the Hashable_ protocol also means that it can be used as key in Mappings_.
+
+Appends are amortized O(1). Random access and insert is log32(n) where n is the size of the vector.
+
+.. code:: python
+
+ >>> from pyrsistent import v, pvector
+
+ # No mutation of vectors once created, instead they
+ # are "evolved" leaving the original untouched
+ >>> v1 = v(1, 2, 3)
+ >>> v2 = v1.append(4)
+ >>> v3 = v2.set(1, 5)
+ >>> v1
+ pvector([1, 2, 3])
+ >>> v2
+ pvector([1, 2, 3, 4])
+ >>> v3
+ pvector([1, 5, 3, 4])
+
+ # Random access and slicing
+ >>> v3[1]
+ 5
+ >>> v3[1:3]
+ pvector([5, 3])
+
+ # Iteration
+ >>> list(x + 1 for x in v3)
+ [2, 6, 4, 5]
+ >>> pvector(2 * x for x in range(3))
+ pvector([0, 2, 4])
+
+.. _PMap:
+
+PMap
+~~~~
+With full support for the Mapping_ protocol PMap is meant as a drop in replacement to the built in dict from a readers point
+of view. Support for the Hashable_ protocol also means that it can be used as key in other Mappings_.
+
+Random access and insert is log32(n) where n is the size of the map.
+
+.. code:: python
+
+ >>> from pyrsistent import m, pmap, v
+
+ # No mutation of maps once created, instead they are
+ # "evolved" leaving the original untouched
+ >>> m1 = m(a=1, b=2)
+ >>> m2 = m1.set('c', 3)
+ >>> m3 = m2.set('a', 5)
+ >>> m1
+ pmap({'a': 1, 'b': 2})
+ >>> m2
+ pmap({'a': 1, 'c': 3, 'b': 2})
+ >>> m3
+ pmap({'a': 5, 'c': 3, 'b': 2})
+ >>> m3['a']
+ 5
+
+ # Evolution of nested persistent structures
+ >>> m4 = m(a=5, b=6, c=v(1, 2))
+ >>> m4.transform(('c', 1), 17)
+ pmap({'a': 5, 'c': pvector([1, 17]), 'b': 6})
+ >>> m5 = m(a=1, b=2)
+
+ # Evolve by merging with other mappings
+ >>> m5.update(m(a=2, c=3), {'a': 17, 'd': 35})
+ pmap({'a': 17, 'c': 3, 'b': 2, 'd': 35})
+ >>> pmap({'x': 1, 'y': 2}) + pmap({'y': 3, 'z': 4})
+ pmap({'y': 3, 'x': 1, 'z': 4})
+
+ # Dict-like methods to convert to list and iterate
+ >>> m3.items()
+ pvector([('a', 5), ('c', 3), ('b', 2)])
+ >>> list(m3)
+ ['a', 'c', 'b']
+
+.. _PSet:
+
+PSet
+~~~~
+With full support for the Set_ protocol PSet is meant as a drop in replacement to the built in set from a readers point
+of view. Support for the Hashable_ protocol also means that it can be used as key in Mappings_.
+
+Random access and insert is log32(n) where n is the size of the set.
+
+.. code:: python
+
+ >>> from pyrsistent import s
+
+ # No mutation of sets once created, you know the story...
+ >>> s1 = s(1, 2, 3, 2)
+ >>> s2 = s1.add(4)
+ >>> s3 = s1.remove(1)
+ >>> s1
+ pset([1, 2, 3])
+ >>> s2
+ pset([1, 2, 3, 4])
+ >>> s3
+ pset([2, 3])
+
+ # Full support for set operations
+ >>> s1 | s(3, 4, 5)
+ pset([1, 2, 3, 4, 5])
+ >>> s1 & s(3, 4, 5)
+ pset([3])
+ >>> s1 < s2
+ True
+ >>> s1 < s(3, 4, 5)
+ False
+
+.. _PRecord:
+
+PRecord
+~~~~~~~
+A PRecord is a PMap with a fixed set of specified fields. Records are declared as python classes inheriting
+from PRecord. Because it is a PMap it has full support for all Mapping methods such as iteration and element
+access using subscript notation.
+
+.. code:: python
+
+ >>> from pyrsistent import PRecord, field
+ >>> class ARecord(PRecord):
+ ... x = field()
+ ...
+ >>> r = ARecord(x=3)
+ >>> r
+ ARecord(x=3)
+ >>> r.x
+ 3
+ >>> r.set(x=2)
+ ARecord(x=2)
+ >>> r.set(y=2)
+ Traceback (most recent call last):
+ AttributeError: 'y' is not among the specified fields for ARecord
+
+Type information
+****************
+It is possible to add type information to the record to enforce type checks. Multiple allowed types can be specified
+by providing an iterable of types.
+
+.. code:: python
+
+ >>> class BRecord(PRecord):
+ ... x = field(type=int)
+ ... y = field(type=(int, type(None)))
+ ...
+ >>> BRecord(x=3, y=None)
+ BRecord(y=None, x=3)
+ >>> BRecord(x=3.0)
+ Traceback (most recent call last):
+ PTypeError: Invalid type for field BRecord.x, was float
+
+
+Custom types (classes) that are iterable should be wrapped in a tuple to prevent their
+members being added to the set of valid types. Although Enums in particular are now
+supported without wrapping, see #83 for more information.
+
+Mandatory fields
+****************
+Fields are not mandatory by default but can be specified as such. If fields are missing an
+*InvariantException* will be thrown which contains information about the missing fields.
+
+.. code:: python
+
+ >>> from pyrsistent import InvariantException
+ >>> class CRecord(PRecord):
+ ... x = field(mandatory=True)
+ ...
+ >>> r = CRecord(x=3)
+ >>> try:
+ ... r.discard('x')
+ ... except InvariantException as e:
+ ... print(e.missing_fields)
+ ...
+ ('CRecord.x',)
+
+Invariants
+**********
+It is possible to add invariants that must hold when evolving the record. Invariants can be
+specified on both field and record level. If invariants fail an *InvariantException* will be
+thrown which contains information about the failing invariants. An invariant function should
+return a tuple consisting of a boolean that tells if the invariant holds or not and an object
+describing the invariant. This object can later be used to identify which invariant that failed.
+
+The global invariant function is only executed if all field invariants hold.
+
+Global invariants are inherited to subclasses.
+
+.. code:: python
+
+ >>> class RestrictedVector(PRecord):
+ ... __invariant__ = lambda r: (r.y >= r.x, 'x larger than y')
+ ... x = field(invariant=lambda x: (x > 0, 'x negative'))
+ ... y = field(invariant=lambda y: (y > 0, 'y negative'))
+ ...
+ >>> r = RestrictedVector(y=3, x=2)
+ >>> try:
+ ... r.set(x=-1, y=-2)
+ ... except InvariantException as e:
+ ... print(e.invariant_errors)
+ ...
+ ('y negative', 'x negative')
+ >>> try:
+ ... r.set(x=2, y=1)
+ ... except InvariantException as e:
+ ... print(e.invariant_errors)
+ ...
+ ('x larger than y',)
+
+Invariants may also contain multiple assertions. For those cases the invariant function should
+return a tuple of invariant tuples as described above. This structure is reflected in the
+invariant_errors attribute of the exception which will contain tuples with data from all failed
+invariants. Eg:
+
+.. code:: python
+
+ >>> class EvenX(PRecord):
+ ... x = field(invariant=lambda x: ((x > 0, 'x negative'), (x % 2 == 0, 'x odd')))
+ ...
+ >>> try:
+ ... EvenX(x=-1)
+ ... except InvariantException as e:
+ ... print(e.invariant_errors)
+ ...
+ (('x negative', 'x odd'),)
+
+
+Factories
+*********
+It's possible to specify factory functions for fields. The factory function receives whatever
+is supplied as field value and the actual returned by the factory is assigned to the field
+given that any type and invariant checks hold.
+PRecords have a default factory specified as a static function on the class, create(). It takes
+a *Mapping* as argument and returns an instance of the specific record.
+If a record has fields of type PRecord the create() method of that record will
+be called to create the "sub record" if no factory has explicitly been specified to override
+this behaviour.
+
+.. code:: python
+
+ >>> class DRecord(PRecord):
+ ... x = field(factory=int)
+ ...
+ >>> class ERecord(PRecord):
+ ... d = field(type=DRecord)
+ ...
+ >>> ERecord.create({'d': {'x': '1'}})
+ ERecord(d=DRecord(x=1))
+
+Collection fields
+*****************
+It is also possible to have fields with ``pyrsistent`` collections.
+
+.. code:: python
+
+ >>> from pyrsistent import pset_field, pmap_field, pvector_field
+ >>> class MultiRecord(PRecord):
+ ... set_of_ints = pset_field(int)
+ ... map_int_to_str = pmap_field(int, str)
+ ... vector_of_strs = pvector_field(str)
+ ...
+
+Serialization
+*************
+PRecords support serialization back to dicts. Default serialization will take keys and values
+"as is" and output them into a dict. It is possible to specify custom serialization functions
+to take care of fields that require special treatment.
+
+.. code:: python
+
+ >>> from datetime import date
+ >>> class Person(PRecord):
+ ... name = field(type=unicode)
+ ... birth_date = field(type=date,
+ ... serializer=lambda format, d: d.strftime(format['date']))
+ ...
+ >>> john = Person(name=u'John', birth_date=date(1985, 10, 21))
+ >>> john.serialize({'date': '%Y-%m-%d'})
+ {'birth_date': '1985-10-21', 'name': u'John'}
+
+
+.. _instar: https://github.com/boxed/instar/
+
+.. _PClass:
+
+PClass
+~~~~~~
+A PClass is a python class with a fixed set of specified fields. PClasses are declared as python classes inheriting
+from PClass. It is defined the same way that PRecords are and behaves like a PRecord in all aspects except that it
+is not a PMap and hence not a collection but rather a plain Python object.
+
+.. code:: python
+
+ >>> from pyrsistent import PClass, field
+ >>> class AClass(PClass):
+ ... x = field()
+ ...
+ >>> a = AClass(x=3)
+ >>> a
+ AClass(x=3)
+ >>> a.x
+ 3
+
+
+Checked collections
+~~~~~~~~~~~~~~~~~~~
+Checked collections currently come in three flavors: CheckedPVector, CheckedPMap and CheckedPSet.
+
+.. code:: python
+
+ >>> from pyrsistent import CheckedPVector, CheckedPMap, CheckedPSet, thaw
+ >>> class Positives(CheckedPSet):
+ ... __type__ = (long, int)
+ ... __invariant__ = lambda n: (n >= 0, 'Negative')
+ ...
+ >>> class Lottery(PRecord):
+ ... name = field(type=str)
+ ... numbers = field(type=Positives, invariant=lambda p: (len(p) > 0, 'No numbers'))
+ ...
+ >>> class Lotteries(CheckedPVector):
+ ... __type__ = Lottery
+ ...
+ >>> class LotteriesByDate(CheckedPMap):
+ ... __key_type__ = date
+ ... __value_type__ = Lotteries
+ ...
+ >>> lotteries = LotteriesByDate.create({date(2015, 2, 15): [{'name': 'SuperLotto', 'numbers': {1, 2, 3}},
+ ... {'name': 'MegaLotto', 'numbers': {4, 5, 6}}],
+ ... date(2015, 2, 16): [{'name': 'SuperLotto', 'numbers': {3, 2, 1}},
+ ... {'name': 'MegaLotto', 'numbers': {6, 5, 4}}]})
+ >>> lotteries
+ LotteriesByDate({datetime.date(2015, 2, 15): Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')]), datetime.date(2015, 2, 16): Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')])})
+
+ # The checked versions support all operations that the corresponding
+ # unchecked types do
+ >>> lottery_0215 = lotteries[date(2015, 2, 15)]
+ >>> lottery_0215.transform([0, 'name'], 'SuperDuperLotto')
+ Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperDuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')])
+
+ # But also makes asserts that types and invariants hold
+ >>> lottery_0215.transform([0, 'name'], 999)
+ Traceback (most recent call last):
+ PTypeError: Invalid type for field Lottery.name, was int
+
+ >>> lottery_0215.transform([0, 'numbers'], set())
+ Traceback (most recent call last):
+ InvariantException: Field invariant failed
+
+ # They can be converted back to python built ins with either thaw()
+ # or serialize() (which provides possibilities to customize serialization)
+ >>> thaw(lottery_0215)
+ [{'numbers': set([1, 2, 3]), 'name': 'SuperLotto'}, {'numbers': set([4, 5, 6]), 'name': 'MegaLotto'}]
+ >>> lottery_0215.serialize()
+ [{'numbers': set([1, 2, 3]), 'name': 'SuperLotto'}, {'numbers': set([4, 5, 6]), 'name': 'MegaLotto'}]
+
+.. _transformations:
+
+Transformations
+~~~~~~~~~~~~~~~
+Transformations are inspired by the cool library instar_ for Clojure. They let you evolve PMaps and PVectors
+with arbitrarily deep/complex nesting using simple syntax and flexible matching syntax.
+
+The first argument to transformation is the path that points out the value to transform. The
+second is the transformation to perform. If the transformation is callable it will be applied
+to the value(s) matching the path. The path may also contain callables. In that case they are
+treated as matchers. If the matcher returns True for a specific key it is considered for transformation.
+
+.. code:: python
+
+ # Basic examples
+ >>> from pyrsistent import inc, freeze, thaw, rex, ny, discard
+ >>> v1 = freeze([1, 2, 3, 4, 5])
+ >>> v1.transform([2], inc)
+ pvector([1, 2, 4, 4, 5])
+ >>> v1.transform([lambda ix: 0 < ix < 4], 8)
+ pvector([1, 8, 8, 8, 5])
+ >>> v1.transform([lambda ix, v: ix == 0 or v == 5], 0)
+ pvector([0, 2, 3, 4, 0])
+
+ # The (a)ny matcher can be used to match anything
+ >>> v1.transform([ny], 8)
+ pvector([8, 8, 8, 8, 8])
+
+ # Regular expressions can be used for matching
+ >>> scores = freeze({'John': 12, 'Joseph': 34, 'Sara': 23})
+ >>> scores.transform([rex('^Jo')], 0)
+ pmap({'Joseph': 0, 'Sara': 23, 'John': 0})
+
+ # Transformations can be done on arbitrarily deep structures
+ >>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'},
+ ... {'author': 'Steve', 'content': 'A slightly longer article'}],
+ ... 'weather': {'temperature': '11C', 'wind': '5m/s'}})
+ >>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c)
+ >>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c)
+ >>> very_short_news.articles[0].content
+ 'A short article'
+ >>> very_short_news.articles[1].content
+ 'A slightly long...'
+
+ # When nothing has been transformed the original data structure is kept
+ >>> short_news is news_paper
+ True
+ >>> very_short_news is news_paper
+ False
+ >>> very_short_news.articles[0] is news_paper.articles[0]
+ True
+
+ # There is a special transformation that can be used to discard elements. Also
+ # multiple transformations can be applied in one call
+ >>> thaw(news_paper.transform(['weather'], discard, ['articles', ny, 'content'], discard))
+ {'articles': [{'author': 'Sara'}, {'author': 'Steve'}]}
+
+Evolvers
+~~~~~~~~
+PVector, PMap and PSet all have support for a concept dubbed *evolvers*. An evolver acts like a mutable
+view of the underlying persistent data structure with "transaction like" semantics. No updates of the original
+data structure is ever performed, it is still fully immutable.
+
+The evolvers have a very limited API by design to discourage excessive, and inappropriate, usage as that would
+take us down the mutable road. In principle only basic mutation and element access functions are supported.
+Check out the documentation_ of each data structure for specific examples.
+
+Examples of when you may want to use an evolver instead of working directly with the data structure include:
+
+* Multiple updates are done to the same data structure and the intermediate results are of no
+ interest. In this case using an evolver may be a more efficient and easier to work with.
+* You need to pass a vector into a legacy function or a function that you have no control
+ over which performs in place mutations. In this case pass an evolver instance
+ instead and then create a new pvector from the evolver once the function returns.
+
+.. code:: python
+
+ >>> from pyrsistent import v
+
+ # In place mutation as when working with the built in counterpart
+ >>> v1 = v(1, 2, 3)
+ >>> e = v1.evolver()
+ >>> e[1] = 22
+ >>> e = e.append(4)
+ >>> e = e.extend([5, 6])
+ >>> e[5] += 1
+ >>> len(e)
+ 6
+
+ # The evolver is considered *dirty* when it contains changes compared to the underlying vector
+ >>> e.is_dirty()
+ True
+
+ # But the underlying pvector still remains untouched
+ >>> v1
+ pvector([1, 2, 3])
+
+ # Once satisfied with the updates you can produce a new pvector containing the updates.
+ # The new pvector will share data with the original pvector in the same way that would have
+ # been done if only using operations on the pvector.
+ >>> v2 = e.persistent()
+ >>> v2
+ pvector([1, 22, 3, 4, 5, 7])
+
+ # The evolver is now no longer considered *dirty* as it contains no differences compared to the
+ # pvector just produced.
+ >>> e.is_dirty()
+ False
+
+ # You may continue to work with the same evolver without affecting the content of v2
+ >>> e[0] = 11
+
+ # Or create a new evolver from v2. The two evolvers can be updated independently but will both
+ # share data with v2 where possible.
+ >>> e2 = v2.evolver()
+ >>> e2[0] = 1111
+ >>> e.persistent()
+ pvector([11, 22, 3, 4, 5, 7])
+ >>> e2.persistent()
+ pvector([1111, 22, 3, 4, 5, 7])
+
+.. _freeze:
+.. _thaw:
+
+freeze and thaw
+~~~~~~~~~~~~~~~
+These functions are great when your cozy immutable world has to interact with the evil mutable world outside.
+
+.. code:: python
+
+ >>> from pyrsistent import freeze, thaw, v, m
+ >>> freeze([1, {'a': 3}])
+ pvector([1, pmap({'a': 3})])
+ >>> thaw(v(1, m(a=3)))
+ [1, {'a': 3}]
+
+Compatibility
+-------------
+
+Pyrsistent is developed and tested on Python 2.7, 3.5, 3.6, 3.7 and PyPy (Python 2 and 3 compatible). It will most
+likely work on all other versions >= 3.4 but no guarantees are given. :)
+
+Compatibility issues
+~~~~~~~~~~~~~~~~~~~~
+
+.. _27: https://github.com/tobgu/pyrsistent/issues/27
+
+There is currently one known compatibility issue when comparing built in sets and frozensets to PSets as discussed in 27_.
+It affects python 2 versions < 2.7.8 and python 3 versions < 3.4.0 and is due to a bug described in
+http://bugs.python.org/issue8743.
+
+Comparisons will fail or be incorrect when using the set/frozenset as left hand side of the comparison. As a workaround
+you need to either upgrade Python to a more recent version, avoid comparing sets/frozensets with PSets or always make
+sure to convert both sides of the comparison to the same type before performing the comparison.
+
+Performance
+-----------
+
+Pyrsistent is developed with performance in mind. Still, while some operations are nearly on par with their built in,
+mutable, counterparts in terms of speed, other operations are slower. In the cases where attempts at
+optimizations have been done, speed has generally been valued over space.
+
+Pyrsistent comes with two API compatible flavors of PVector (on which PMap and PSet are based), one pure Python
+implementation and one implemented as a C extension. The latter generally being 2 - 20 times faster than the former.
+The C extension will be used automatically when possible.
+
+The pure python implementation is fully PyPy compatible. Running it under PyPy speeds operations up considerably if
+the structures are used heavily (if JITed), for some cases the performance is almost on par with the built in counterparts.
+
+Type hints
+----------
+
+PEP 561 style type hints for use with mypy and various editors are available for most types and functions in pyrsistent.
+
+Type classes for annotating your own code with pyrsistent types are also available under pyrsistent.typing.
+
+Installation
+------------
+
+pip install pyrsistent
+
+Documentation
+-------------
+
+Available at http://pyrsistent.readthedocs.org/
+
+Brief presentation available at http://slides.com/tobiasgustafsson/immutability-and-python/
+
+Contributors
+------------
+
+Tobias Gustafsson https://github.com/tobgu
+
+Christopher Armstrong https://github.com/radix
+
+Anders Hovmöller https://github.com/boxed
+
+Itamar Turner-Trauring https://github.com/itamarst
+
+Jonathan Lange https://github.com/jml
+
+Richard Futrell https://github.com/Futrell
+
+Jakob Hollenstein https://github.com/jkbjh
+
+David Honour https://github.com/foolswood
+
+David R. MacIver https://github.com/DRMacIver
+
+Marcus Ewert https://github.com/sarum90
+
+Jean-Paul Calderone https://github.com/exarkun
+
+Douglas Treadwell https://github.com/douglas-treadwell
+
+Travis Parker https://github.com/teepark
+
+Julian Berman https://github.com/Julian
+
+Dennis Tomas https://github.com/dtomas
+
+Neil Vyas https://github.com/neilvyas
+
+doozr https://github.com/doozr
+
+Kamil Galuszka https://github.com/galuszkak
+
+Tsuyoshi Hombashi https://github.com/thombashi
+
+nattofriends https://github.com/nattofriends
+
+agberk https://github.com/agberk
+
+Waleed Khan https://github.com/arxanas
+
+Jean-Louis Fuchs https://github.com/ganwell
+
+Carlos Corbacho https://github.com/ccorbacho
+
+Felix Yan https://github.com/felixonmars
+
+benrg https://github.com/benrg
+
+Jere Lahelma https://github.com/je-l
+
+Max Taggart https://github.com/MaxTaggart
+
+Vincent Philippon https://github.com/vphilippon
+
+Semen Zhydenko https://github.com/ss18
+
+Till Varoquaux https://github.com/till-varoquaux
+
+Michal Kowalik https://github.com/michalvi
+
+ossdev07 https://github.com/ossdev07
+
+Kerry Olesen https://github.com/qhesz
+
+johnthagen https://github.com/johnthagen
+
+Contributing
+------------
+
+Want to contribute? That's great! If you experience problems please log them on GitHub. If you want to contribute code,
+please fork the repository and submit a pull request.
+
+Run tests
+~~~~~~~~~
+.. _tox: https://tox.readthedocs.io/en/latest/
+
+Tests can be executed using tox_.
+
+Install tox: ``pip install tox``
+
+Run test for Python 2.7: ``tox -epy27``
+
+Release
+~~~~~~~
+* Update CHANGES.txt
+* Update README with any new contributors and potential info needed.
+* Update _pyrsistent_version.py
+* python setup.py sdist upload
+* Commit and tag with new version: git add -u . && git commit -m 'Prepare version vX.Y.Z' && git tag -a vX.Y.Z -m 'vX.Y.Z'
+* Push commit and tags: git push && git push --tags
+
+Project status
+--------------
+Pyrsistent can be considered stable and mature (who knows, there may even be a 1.0 some day :-)). The project is
+maintained, bugs fixed, PRs reviewed and merged and new releases made. I currently do not have time for development
+of new features or functionality which I don't have use for myself. I'm more than happy to take PRs for new
+functionality though!
+
+There are a bunch of issues marked with ``enhancement`` and ``help wanted`` that contain requests for new functionality
+that would be nice to include. The level of difficulty and extend of the issues varies, please reach out to me if you're
+interested in working on any of them.
+
+If you feel that you have a grand master plan for where you would like Pyrsistent to go and have the time to put into
+it please don't hesitate to discuss this with me and submit PRs for it. If all goes well I'd be more than happy to add
+additional maintainers to the project!
diff --git a/third_party/python/pyrsistent/_pyrsistent_version.py b/third_party/python/pyrsistent/_pyrsistent_version.py
new file mode 100644
index 0000000000..8911e95ca7
--- /dev/null
+++ b/third_party/python/pyrsistent/_pyrsistent_version.py
@@ -0,0 +1 @@
+__version__ = '0.16.0'
diff --git a/third_party/python/pyrsistent/pvectorcmodule.c b/third_party/python/pyrsistent/pvectorcmodule.c
new file mode 100644
index 0000000000..11a5bd6411
--- /dev/null
+++ b/third_party/python/pyrsistent/pvectorcmodule.c
@@ -0,0 +1,1642 @@
+#include <Python.h>
+#include <structmember.h>
+
+/*
+Persistent/Immutable/Functional vector and helper types.
+
+Please note that they are anything but immutable at this level since
+there is a whole lot of reference counting going on. That's the way
+CPython works though and the GIL makes them appear immutable.
+
+To the programmer using them from Python they appear immutable and
+behave immutably at least.
+
+Naming conventions
+------------------
+initpyrsistentc - This is the method that initializes the whole module
+pyrsistent_* - Methods part of the interface
+<typename>_* - Instance methods of types. For examle PVector_append(...)
+
+All other methods are camel cased without prefix. All methods are static, none should
+require to be exposed outside of this module.
+*/
+
+#define SHIFT 5
+#define BRANCH_FACTOR (1 << SHIFT)
+#define BIT_MASK (BRANCH_FACTOR - 1)
+
+static PyTypeObject PVectorType;
+static PyTypeObject PVectorEvolverType;
+
+typedef struct {
+ void *items[BRANCH_FACTOR];
+ unsigned int refCount;
+} VNode;
+
+#define NODE_CACHE_MAX_SIZE 1024
+
+typedef struct {
+ unsigned int size;
+ VNode* nodes[NODE_CACHE_MAX_SIZE];
+} vNodeCache;
+
+static vNodeCache nodeCache;
+
+typedef struct {
+ PyObject_HEAD
+ unsigned int count; // Perhaps ditch this one in favor of ob_size/Py_SIZE()
+ unsigned int shift;
+ VNode *root;
+ VNode *tail;
+ PyObject *in_weakreflist; /* List of weak references */
+} PVector;
+
+typedef struct {
+ PyObject_HEAD
+ PVector* originalVector;
+ PVector* newVector;
+ PyObject* appendList;
+} PVectorEvolver;
+
+
+static PVector* EMPTY_VECTOR = NULL;
+static PyObject* transform_fn = NULL;
+
+static PyObject* transform(PVector* self, PyObject* args) {
+ if(transform_fn == NULL) {
+ // transform to avoid circular import problems
+ transform_fn = PyObject_GetAttrString(PyImport_ImportModule("pyrsistent._transformations"), "transform");
+ }
+
+ return PyObject_CallFunctionObjArgs(transform_fn, self, args, NULL);
+}
+
+
+// No access to internal members
+static PyMemberDef PVector_members[] = {
+ {NULL} /* Sentinel */
+};
+
+#define debug(...)
+// #define debug printf
+
+#define NODE_REF_COUNT(n) ((n)->refCount)
+#define SET_NODE_REF_COUNT(n, c) (NODE_REF_COUNT(n) = (c))
+#define INC_NODE_REF_COUNT(n) (NODE_REF_COUNT(n)++)
+#define DEC_NODE_REF_COUNT(n) (NODE_REF_COUNT(n)--)
+
+static VNode* allocNode(void) {
+ if(nodeCache.size > 0) {
+ nodeCache.size--;
+ return nodeCache.nodes[nodeCache.size];
+ }
+
+ return PyMem_Malloc(sizeof(VNode));
+}
+
+static void freeNode(VNode *node) {
+ if(nodeCache.size < NODE_CACHE_MAX_SIZE) {
+ nodeCache.nodes[nodeCache.size] = node;
+ nodeCache.size++;
+ } else {
+ PyMem_Free(node);
+ }
+}
+
+static VNode* newNode(void) {
+ VNode* result = allocNode();
+ memset(result, 0x0, sizeof(VNode));
+ SET_NODE_REF_COUNT(result, 1);
+ debug("newNode() %p\n", result);
+ return result;
+}
+
+static VNode* copyNode(VNode* source) {
+ /* NB: Only to be used for internal nodes, eg. nodes that do not
+ hold direct references to python objects but only to other nodes. */
+ int i;
+ VNode* result = allocNode();
+ debug("copyNode() %p\n", result);
+ memcpy(result->items, source->items, sizeof(source->items));
+
+ for(i = 0; i < BRANCH_FACTOR; i++) {
+ // TODO-OPT: Any need to go on when the first NULL has been found?
+ if(result->items[i] != NULL) {
+ INC_NODE_REF_COUNT((VNode*)result->items[i]);
+ }
+ }
+
+ SET_NODE_REF_COUNT(result, 1);
+ return result;
+}
+
+static PVector* emptyNewPvec(void);
+static PVector* copyPVector(PVector *original);
+static void extendWithItem(PVector *newVec, PyObject *item);
+
+static PyObject *PVectorEvolver_persistent(PVectorEvolver *);
+static int PVectorEvolver_set_item(PVectorEvolver *, PyObject*, PyObject*);
+
+static Py_ssize_t PVector_len(PVector *self) {
+ return self->count;
+}
+
+/* Convenience macros */
+#define ROOT_NODE_FULL(vec) ((vec->count >> SHIFT) > (1 << vec->shift))
+#define TAIL_OFF(vec) ((vec->count < BRANCH_FACTOR) ? 0 : (((vec->count - 1) >> SHIFT) << SHIFT))
+#define TAIL_SIZE(vec) (vec->count - TAIL_OFF(vec))
+#define PVector_CheckExact(op) (Py_TYPE(op) == &PVectorType)
+
+static VNode* nodeFor(PVector *self, int i){
+ int level;
+ if((i >= 0) && (i < self->count)) {
+ if(i >= TAIL_OFF(self)) {
+ return self->tail;
+ }
+
+ VNode* node = self->root;
+ for(level = self->shift; level > 0; level -= SHIFT) {
+ node = (VNode*) node->items[(i >> level) & BIT_MASK];
+ }
+
+ return node;
+ }
+
+ PyErr_Format(PyExc_IndexError, "Index out of range: %i", i);
+ return NULL;
+}
+
+static PyObject* _get_item(PVector *self, Py_ssize_t pos) {
+ VNode* node = nodeFor((PVector*)self, pos);
+ PyObject *result = NULL;
+ if(node != NULL) {
+ result = node->items[pos & BIT_MASK];
+ }
+ return result;
+}
+
+/*
+ Returns a new reference as specified by the PySequence_GetItem function.
+*/
+static PyObject* PVector_get_item(PVector *self, Py_ssize_t pos) {
+ if (pos < 0) {
+ pos += self->count;
+ }
+
+ PyObject* obj = _get_item(self, pos);
+ Py_XINCREF(obj);
+ return obj;
+}
+
+static void releaseNode(int level, VNode *node) {
+ if(node == NULL) {
+ return;
+ }
+
+ debug("releaseNode(): node=%p, level=%i, refCount=%i\n", node, level, NODE_REF_COUNT(node));
+
+ int i;
+
+ DEC_NODE_REF_COUNT(node);
+ debug("Refcount when trying to release: %u\n", NODE_REF_COUNT(node));
+ if(NODE_REF_COUNT(node) == 0) {
+ if(level > 0) {
+ for(i = 0; i < BRANCH_FACTOR; i++) {
+ if(node->items[i] != NULL) {
+ releaseNode(level - SHIFT, node->items[i]);
+ }
+ }
+ freeNode(node);
+ } else {
+ for(i = 0; i < BRANCH_FACTOR; i++) {
+ Py_XDECREF(node->items[i]);
+ }
+ freeNode(node);
+ }
+ }
+
+ debug("releaseNode(): Done! node=%p!\n", node);
+}
+
+/*
+ Returns all references to PyObjects that have been stolen. Also decrements
+ the internal reference counts used for shared memory structures and deallocates
+ those if needed.
+*/
+static void PVector_dealloc(PVector *self) {
+ debug("Dealloc(): self=%p, self->count=%u, tail->refCount=%u, root->refCount=%u, self->shift=%u, self->tail=%p, self->root=%p\n",
+ self, self->count, NODE_REF_COUNT(self->tail), NODE_REF_COUNT(self->root), self->shift, self->tail, self->root);
+
+ if (self->in_weakreflist != NULL) {
+ PyObject_ClearWeakRefs((PyObject *) self);
+ }
+
+ PyObject_GC_UnTrack((PyObject*)self);
+ Py_TRASHCAN_SAFE_BEGIN(self);
+
+ releaseNode(0, self->tail);
+ releaseNode(self->shift, self->root);
+
+ PyObject_GC_Del(self);
+ Py_TRASHCAN_SAFE_END(self);
+}
+
+static PyObject *PVector_toList(PVector *self) {
+ Py_ssize_t i;
+ PyObject *list = PyList_New(self->count);
+ for (i = 0; i < self->count; ++i) {
+ PyObject *o = _get_item(self, i);
+ Py_INCREF(o);
+ PyList_SET_ITEM(list, i, o);
+ }
+
+ return list;
+}
+
+
+static PyObject *PVector_repr(PVector *self) {
+ // Reuse the list repr code, a bit less efficient but saves some code
+ PyObject *list = PVector_toList(self);
+ PyObject *list_repr = PyObject_Repr(list);
+ Py_DECREF(list);
+
+ if(list_repr == NULL) {
+ // Exception raised during call to repr
+ return NULL;
+ }
+
+ // Repr for list implemented differently in python 2 and 3. Need to
+ // handle this or core dump will occur.
+#if PY_MAJOR_VERSION >= 3
+ PyObject *s = PyUnicode_FromFormat("%s%U%s", "pvector(", list_repr, ")");
+ Py_DECREF(list_repr);
+#else
+ PyObject *s = PyString_FromString("pvector(");
+ PyString_ConcatAndDel(&s, list_repr);
+ PyString_ConcatAndDel(&s, PyString_FromString(")"));
+#endif
+
+ return s;
+}
+
+
+static long PVector_hash(PVector *self) {
+ // Follows the pattern of the tuple hash
+ long x, y;
+ Py_ssize_t i;
+ long mult = 1000003L;
+ x = 0x456789L;
+ for(i=0; i<self->count; i++) {
+ y = PyObject_Hash(_get_item(self, i));
+ if (y == -1) {
+ return -1;
+ }
+ x = (x ^ y) * mult;
+ mult += (long)(82520L + i + i);
+ }
+
+ x += 97531L;
+ if(x == -1) {
+ x = -2;
+ }
+
+ return x;
+}
+
+static PyObject* compareSizes(long vlen, long wlen, int op) {
+ int cmp;
+ PyObject *res;
+ switch (op) {
+ case Py_LT: cmp = vlen < wlen; break;
+ case Py_LE: cmp = vlen <= wlen; break;
+ case Py_EQ: cmp = vlen == wlen; break;
+ case Py_NE: cmp = vlen != wlen; break;
+ case Py_GT: cmp = vlen > wlen; break;
+ case Py_GE: cmp = vlen >= wlen; break;
+ default: return NULL; /* cannot happen */
+ }
+
+ if (cmp) {
+ res = Py_True;
+ } else {
+ res = Py_False;
+ }
+
+ Py_INCREF(res);
+ return res;
+}
+
+static PyObject* PVector_richcompare(PyObject *v, PyObject *w, int op) {
+ // Follows the principles of the tuple comparison
+ PVector *vt, *wt;
+ Py_ssize_t i;
+ Py_ssize_t vlen, wlen;
+ PyObject *list;
+ PyObject *result;
+
+ if(!PVector_CheckExact(v) || !PVector_CheckExact(w)) {
+ if(PVector_CheckExact(v)) {
+ list = PVector_toList((PVector*)v);
+ result = PyObject_RichCompare(list , w, op);
+ Py_DECREF(list);
+ return result;
+ }
+
+ if(PVector_CheckExact(w)) {
+ list = PVector_toList((PVector*)w);
+ result = PyObject_RichCompare(v, list, op);
+ Py_DECREF(list);
+ return result;
+ }
+
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+
+ if((op == Py_EQ) && (v == w)) {
+ Py_INCREF(Py_True);
+ return Py_True;
+ }
+
+ vt = (PVector *)v;
+ wt = (PVector *)w;
+
+ vlen = vt->count;
+ wlen = wt->count;
+
+ if (vlen != wlen) {
+ if (op == Py_EQ) {
+ Py_INCREF(Py_False);
+ return Py_False;
+ } else if (op == Py_NE) {
+ Py_INCREF(Py_True);
+ return Py_True;
+ }
+ }
+
+ /* Search for the first index where items are different. */
+ PyObject *left = NULL;
+ PyObject *right = NULL;
+ for (i = 0; i < vlen && i < wlen; i++) {
+ left = _get_item(vt, i);
+ right = _get_item(wt, i);
+ int k = PyObject_RichCompareBool(left, right, Py_EQ);
+ if (k < 0) {
+ return NULL;
+ }
+ if (!k) {
+ break;
+ }
+ }
+
+ if (i >= vlen || i >= wlen) {
+ /* No more items to compare -- compare sizes */
+ return compareSizes(vlen, wlen, op);
+ }
+
+ /* We have an item that differs -- shortcuts for EQ/NE */
+ if (op == Py_EQ) {
+ Py_INCREF(Py_False);
+ return Py_False;
+ } else if (op == Py_NE) {
+ Py_INCREF(Py_True);
+ return Py_True;
+ } else {
+ /* Compare the final item again using the proper operator */
+ return PyObject_RichCompare(left, right, op);
+ }
+}
+
+
+static PyObject* PVector_repeat(PVector *self, Py_ssize_t n) {
+ if (n < 0) {
+ n = 0;
+ }
+
+ if ((n == 0) || (self->count == 0)) {
+ Py_INCREF(EMPTY_VECTOR);
+ return (PyObject *)EMPTY_VECTOR;
+ } else if (n == 1) {
+ Py_INCREF(self);
+ return (PyObject *)self;
+ } else if ((self->count * n)/self->count != n) {
+ return PyErr_NoMemory();
+ } else {
+ int i, j;
+ PVector *newVec = copyPVector(self);
+ for(i=0; i<(n-1); i++) {
+ for(j=0; j<self->count; j++) {
+ extendWithItem(newVec, PVector_get_item(self, j));
+ }
+ }
+ return (PyObject*)newVec;
+ }
+}
+
+static int PVector_traverse(PVector *o, visitproc visit, void *arg) {
+ // Naive traverse
+ Py_ssize_t i;
+ for (i = o->count; --i >= 0; ) {
+ Py_VISIT(_get_item(o, i));
+ }
+
+ return 0;
+}
+
+
+static PyObject* PVector_index(PVector *self, PyObject *args) {
+ // A direct rip-off of the tuple version
+ Py_ssize_t i, start=0, stop=self->count;
+ PyObject *value;
+
+ if (!PyArg_ParseTuple(args, "O|O&O&:index", &value,
+ _PyEval_SliceIndex, &start,
+ _PyEval_SliceIndex, &stop)) {
+ return NULL;
+ }
+
+ if (start < 0) {
+ start += self->count;
+ if (start < 0) {
+ start = 0;
+ }
+ }
+
+ if (stop < 0) {
+ stop += self->count;
+ if (stop < 0) {
+ stop = 0;
+ }
+ }
+
+ for (i = start; i < stop && i < self->count; i++) {
+ int cmp = PyObject_RichCompareBool(_get_item(self, i), value, Py_EQ);
+ if (cmp > 0) {
+#if PY_MAJOR_VERSION >= 3
+ return PyLong_FromSsize_t(i);
+#else
+ return PyInt_FromSsize_t(i);
+#endif
+ } else if (cmp < 0) {
+ return NULL;
+ }
+ }
+
+ PyErr_SetString(PyExc_ValueError, "PVector.index(x): x not in vector");
+ return NULL;
+}
+
+static PyObject* PVector_count(PVector *self, PyObject *value) {
+ Py_ssize_t count = 0;
+ Py_ssize_t i;
+
+ for (i = 0; i < self->count; i++) {
+ int cmp = PyObject_RichCompareBool(_get_item(self, i), value, Py_EQ);
+ if (cmp > 0) {
+ count++;
+ } else if (cmp < 0) {
+ return NULL;
+ }
+ }
+
+#if PY_MAJOR_VERSION >= 3
+ return PyLong_FromSsize_t(count);
+#else
+ return PyInt_FromSsize_t(count);
+#endif
+}
+
+static PyObject* PVector_pickle_reduce(PVector *self) {
+
+ PyObject* module = PyImport_ImportModule("pvectorc");
+ PyObject* pvector_fn = PyObject_GetAttrString(module, "pvector");
+ Py_DECREF(module);
+
+ PyObject *list = PVector_toList(self);
+ PyObject *arg_tuple = PyTuple_New(1);
+ PyTuple_SET_ITEM(arg_tuple, 0, list);
+
+ PyObject *result_tuple = PyTuple_New(2);
+ PyTuple_SET_ITEM(result_tuple, 0, pvector_fn);
+ PyTuple_SET_ITEM(result_tuple, 1, arg_tuple);
+
+ return result_tuple;
+}
+
+static PVector* rawCopyPVector(PVector* vector) {
+ PVector* newVector = PyObject_GC_New(PVector, &PVectorType);
+ newVector->count = vector->count;
+ newVector->shift = vector->shift;
+ newVector->root = vector->root;
+ newVector->tail = vector->tail;
+ newVector->in_weakreflist = NULL;
+ PyObject_GC_Track((PyObject*)newVector);
+ return newVector;
+}
+
+static void initializeEvolver(PVectorEvolver* evolver, PVector* vector, PyObject* appendList) {
+ // Need to hold a reference to the underlying vector to manage
+ // the ref counting properly.
+ evolver->originalVector = vector;
+ evolver->newVector = vector;
+
+ if(appendList == NULL) {
+ evolver->appendList = PyList_New(0);
+ } else {
+ evolver->appendList = appendList;
+ }
+}
+
+static PyObject * PVector_evolver(PVector *self) {
+ PVectorEvolver *evolver = PyObject_GC_New(PVectorEvolver, &PVectorEvolverType);
+ if (evolver == NULL) {
+ return NULL;
+ }
+ initializeEvolver(evolver, self, NULL);
+ PyObject_GC_Track(evolver);
+ Py_INCREF(self);
+ return (PyObject *)evolver;
+}
+
+
+static void copyInsert(void** dest, void** src, Py_ssize_t pos, void *obj) {
+ memcpy(dest, src, BRANCH_FACTOR * sizeof(void*));
+ dest[pos] = obj;
+}
+
+static PyObject* PVector_append(PVector *self, PyObject *obj);
+
+static PyObject* PVector_transform(PVector *self, PyObject *obj);
+
+static PyObject* PVector_set(PVector *self, PyObject *obj);
+
+static PyObject* PVector_mset(PVector *self, PyObject *args);
+
+static PyObject* PVector_subscript(PVector* self, PyObject* item);
+
+static PyObject* PVector_extend(PVector *self, PyObject *args);
+
+static PyObject* PVector_delete(PVector *self, PyObject *args);
+
+static PyObject* PVector_remove(PVector *self, PyObject *args);
+
+static PySequenceMethods PVector_sequence_methods = {
+ (lenfunc)PVector_len, /* sq_length */
+ (binaryfunc)PVector_extend, /* sq_concat */
+ (ssizeargfunc)PVector_repeat, /* sq_repeat */
+ (ssizeargfunc)PVector_get_item, /* sq_item */
+ // TODO might want to move the slice function to here
+ NULL, /* sq_slice */
+ NULL, /* sq_ass_item */
+ NULL, /* sq_ass_slice */
+ NULL, /* sq_contains */
+ NULL, /* sq_inplace_concat */
+ NULL, /* sq_inplace_repeat */
+};
+
+static PyMappingMethods PVector_mapping_methods = {
+ (lenfunc)PVector_len,
+ (binaryfunc)PVector_subscript,
+ NULL
+};
+
+
+static PyMethodDef PVector_methods[] = {
+ {"append", (PyCFunction)PVector_append, METH_O, "Appends an element"},
+ {"set", (PyCFunction)PVector_set, METH_VARARGS, "Inserts an element at the specified position"},
+ {"extend", (PyCFunction)PVector_extend, METH_O|METH_COEXIST, "Extend"},
+ {"transform", (PyCFunction)PVector_transform, METH_VARARGS, "Apply one or more transformations"},
+ {"index", (PyCFunction)PVector_index, METH_VARARGS, "Return first index of value"},
+ {"count", (PyCFunction)PVector_count, METH_O, "Return number of occurrences of value"},
+ {"__reduce__", (PyCFunction)PVector_pickle_reduce, METH_NOARGS, "Pickle support method"},
+ {"evolver", (PyCFunction)PVector_evolver, METH_NOARGS, "Return new evolver for pvector"},
+ {"mset", (PyCFunction)PVector_mset, METH_VARARGS, "Inserts multiple elements at the specified positions"},
+ {"tolist", (PyCFunction)PVector_toList, METH_NOARGS, "Convert to list"},
+ {"delete", (PyCFunction)PVector_delete, METH_VARARGS, "Delete element(s) by index"},
+ {"remove", (PyCFunction)PVector_remove, METH_VARARGS, "Remove element(s) by equality"},
+ {NULL}
+};
+
+static PyObject * PVectorIter_iter(PyObject *seq);
+
+static PyTypeObject PVectorType = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pvectorc.PVector", /* tp_name */
+ sizeof(PVector), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ (destructor)PVector_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ (reprfunc)PVector_repr, /* tp_repr */
+ 0, /* tp_as_number */
+ &PVector_sequence_methods, /* tp_as_sequence */
+ &PVector_mapping_methods, /* tp_as_mapping */
+ (hashfunc)PVector_hash, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
+ "Persistent vector", /* tp_doc */
+ (traverseproc)PVector_traverse, /* tp_traverse */
+ 0, /* tp_clear */
+ PVector_richcompare, /* tp_richcompare */
+ offsetof(PVector, in_weakreflist), /* tp_weaklistoffset */
+ PVectorIter_iter, /* tp_iter */
+ 0, /* tp_iternext */
+ PVector_methods, /* tp_methods */
+ PVector_members, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+};
+
+static PyObject* pyrsistent_pvec(PyObject *self, PyObject *args) {
+ debug("pyrsistent_pvec(): %x\n", args);
+
+ PyObject *argObj = NULL; /* list of arguments */
+
+ if(!PyArg_ParseTuple(args, "|O", &argObj)) {
+ return NULL;
+ }
+
+ if(argObj == NULL) {
+ Py_INCREF(EMPTY_VECTOR);
+ return (PyObject*)EMPTY_VECTOR;
+ }
+
+ return PVector_extend(EMPTY_VECTOR, argObj);
+}
+
+static PVector* emptyNewPvec(void) {
+ PVector *pvec = PyObject_GC_New(PVector, &PVectorType);
+ debug("pymem alloc_new %x, ref cnt: %u\n", pvec, pvec->ob_refcnt);
+ pvec->count = (Py_ssize_t)0;
+ pvec->shift = SHIFT;
+ pvec->root = newNode();
+ pvec->tail = newNode();
+ pvec->in_weakreflist = NULL;
+ PyObject_GC_Track((PyObject*)pvec);
+ return pvec;
+}
+
+static void incRefs(PyObject **obj) {
+ // TODO-OPT: Would it be OK to exit on first NULL? Should not be any
+ // non NULLs beyond a NULL.
+ int i;
+ for(i = 0; i < BRANCH_FACTOR; i++) {
+ Py_XINCREF(obj[i]);
+ }
+}
+
+
+static PVector* newPvec(unsigned int count, unsigned int shift, VNode *root) {
+ // TODO-OPT: Introduce object cache
+ PVector *pvec = PyObject_GC_New(PVector, &PVectorType);
+ debug("pymem alloc_copy %x, ref cnt: %u\n", pvec, pvec->ob_refcnt);
+ pvec->count = count;
+ pvec->shift = shift;
+ pvec->root = root;
+ pvec->tail = newNode();
+ pvec->in_weakreflist = NULL;
+ PyObject_GC_Track((PyObject*)pvec);
+ return pvec;
+}
+
+static VNode* newPath(unsigned int level, VNode* node){
+ if(level == 0) {
+ INC_NODE_REF_COUNT(node);
+ return node;
+ }
+
+ VNode* result = newNode();
+ result->items[0] = newPath(level - SHIFT, node);
+ return result;
+}
+
+static VNode* pushTail(unsigned int level, unsigned int count, VNode* parent, VNode* tail) {
+ int subIndex = ((count - 1) >> level) & BIT_MASK;
+ VNode* result = copyNode(parent);
+ VNode* nodeToInsert;
+ VNode* child;
+ debug("pushTail(): count = %i, subIndex = %i\n", count, subIndex);
+
+ if(level == SHIFT) {
+ // We're at the bottom
+ INC_NODE_REF_COUNT(tail);
+ nodeToInsert = tail;
+ } else {
+ // More levels available in the tree
+ child = parent->items[subIndex];
+
+ if(child != NULL) {
+ nodeToInsert = pushTail(level - SHIFT, count, child, tail);
+
+ // Need to make an adjustment of the ref COUNT for the child node here since
+ // it was incremented in an earlier stage when the node was copied. Now the child
+ // node will be part of the path copy so the number of references to the original
+ // child will not increase at all.
+ DEC_NODE_REF_COUNT(child);
+ } else {
+ nodeToInsert = newPath(level - SHIFT, tail);
+ }
+ }
+
+ result->items[subIndex] = nodeToInsert;
+ return result;
+}
+
+static PVector* copyPVector(PVector *original) {
+ PVector *newVec = newPvec(original->count, original->shift, original->root);
+ INC_NODE_REF_COUNT(original->root);
+ memcpy(newVec->tail->items, original->tail->items, TAIL_SIZE(original) * sizeof(void*));
+ incRefs((PyObject**)newVec->tail->items);
+ return newVec;
+}
+
+/* Does not steal a reference, this must be managed outside of this function */
+static void extendWithItem(PVector *newVec, PyObject *item) {
+ unsigned int tail_size = TAIL_SIZE(newVec);
+
+ if(tail_size >= BRANCH_FACTOR) {
+ VNode* new_root;
+ if(ROOT_NODE_FULL(newVec)) {
+ new_root = newNode();
+ new_root->items[0] = newVec->root;
+ new_root->items[1] = newPath(newVec->shift, newVec->tail);
+ newVec->shift += SHIFT;
+ } else {
+ new_root = pushTail(newVec->shift, newVec->count, newVec->root, newVec->tail);
+ releaseNode(newVec->shift, newVec->root);
+ }
+
+ newVec->root = new_root;
+
+ // Need to adjust the ref count of the old tail here since no new references were
+ // actually created, we just moved the tail.
+ DEC_NODE_REF_COUNT(newVec->tail);
+ newVec->tail = newNode();
+ tail_size = 0;
+ }
+
+ newVec->tail->items[tail_size] = item;
+ newVec->count++;
+}
+
+
+#if PY_MAJOR_VERSION >= 3
+// This was changed in 3.2 but we do not claim compatibility with any older version of python 3.
+#define SLICE_CAST
+#else
+#define SLICE_CAST (PySliceObject *)
+#endif
+
+static PyObject *PVector_subscript(PVector* self, PyObject* item) {
+ if (PyIndex_Check(item)) {
+ Py_ssize_t i = PyNumber_AsSsize_t(item, PyExc_IndexError);
+ if (i == -1 && PyErr_Occurred()) {
+ return NULL;
+ }
+
+ return PVector_get_item(self, i);
+ } else if (PySlice_Check(item)) {
+ Py_ssize_t start, stop, step, slicelength, cur, i;
+ if (PySlice_GetIndicesEx(SLICE_CAST item, self->count,
+ &start, &stop, &step, &slicelength) < 0) {
+ return NULL;
+ }
+
+ debug("start=%i, stop=%i, step=%i\n", start, stop, step);
+
+ if (slicelength <= 0) {
+ Py_INCREF(EMPTY_VECTOR);
+ return (PyObject*)EMPTY_VECTOR;
+ } else if((slicelength == self->count) && (step > 0)) {
+ Py_INCREF(self);
+ return (PyObject*)self;
+ } else {
+ PVector *newVec = copyPVector(EMPTY_VECTOR);
+ for (cur=start, i=0; i<slicelength; cur += (size_t)step, i++) {
+ extendWithItem(newVec, PVector_get_item(self, cur));
+ }
+
+ return (PyObject*)newVec;
+ }
+ } else {
+ PyErr_Format(PyExc_TypeError, "pvector indices must be integers, not %.200s", Py_TYPE(item)->tp_name);
+ return NULL;
+ }
+}
+
+/* A hack to get some of the error handling code away from the function
+ doing the actual work */
+#define HANDLE_ITERATION_ERROR() \
+ if (PyErr_Occurred()) { \
+ if (PyErr_ExceptionMatches(PyExc_StopIteration)) { \
+ PyErr_Clear(); \
+ } else { \
+ return NULL; \
+ } \
+ }
+
+
+/* Returns a new vector that is extended with the iterable b.
+ Takes a copy of the original vector and performs the extension in place on this
+ one for efficiency.
+
+ These are some optimizations that could be done to this function,
+ these are not considered important enough yet though.
+ - Use the PySequence_Fast ops if the iterable is a list or a tuple (which it
+ whould probably often be)
+ - Only copy the original tail if it is not full
+ - No need to try to increment ref count in tail for the whole tail
+*/
+static PyObject* PVector_extend(PVector *self, PyObject *iterable) {
+ PyObject *it;
+ PyObject *(*iternext)(PyObject *);
+
+ it = PyObject_GetIter(iterable);
+ if (it == NULL) {
+ return NULL;
+ }
+
+ // TODO-OPT: Use special fast iterator if available
+ iternext = *Py_TYPE(it)->tp_iternext;
+ PyObject *item = iternext(it);
+ if (item == NULL) {
+ Py_DECREF(it);
+ HANDLE_ITERATION_ERROR()
+ Py_INCREF(self);
+ return (PyObject *)self;
+ } else {
+ PVector *newVec = copyPVector(self);
+ // TODO-OPT test using special case code here for extension to
+ // avoid recalculating tail length all the time.
+ while(item != NULL) {
+ extendWithItem(newVec, item);
+ item = iternext(it);
+ }
+
+ Py_DECREF(it);
+ HANDLE_ITERATION_ERROR()
+ return (PyObject*)newVec;
+ }
+}
+
+/*
+ Steals a reference to the object that is appended to the list.
+*/
+static PyObject* PVector_append(PVector *self, PyObject *obj) {
+ assert (obj != NULL);
+
+ unsigned int tail_size = TAIL_SIZE(self);
+ debug("append(): count = %u, tail_size = %u\n", self->count, tail_size);
+
+ // Does the new object fit in the tail? If so, take a copy of the tail and
+ // insert the new element in that.
+ if(tail_size < BRANCH_FACTOR) {
+ INC_NODE_REF_COUNT(self->root);
+ PVector *new_pvec = newPvec(self->count + 1, self->shift, self->root);
+ // TODO-OPT No need to copy more than the current tail length
+ // TODO-OPT No need to incRefs for all elements all the time
+ copyInsert(new_pvec->tail->items, self->tail->items, tail_size, obj);
+ incRefs((PyObject**)new_pvec->tail->items);
+ debug("append(): new_pvec=%p, new_pvec->tail=%p, new_pvec->root=%p\n",
+ new_pvec, new_pvec->tail, new_pvec->root);
+
+ return (PyObject*)new_pvec;
+ }
+
+ // Tail is full, need to push it into the tree
+ VNode* new_root;
+ unsigned int new_shift;
+ if(ROOT_NODE_FULL(self)) {
+ new_root = newNode();
+ new_root->items[0] = self->root;
+ INC_NODE_REF_COUNT(self->root);
+ new_root->items[1] = newPath(self->shift, self->tail);
+ new_shift = self->shift + SHIFT;
+ } else {
+ new_root = pushTail(self->shift, self->count, self->root, self->tail);
+ new_shift = self->shift;
+ }
+
+ PVector* pvec = newPvec(self->count + 1, new_shift, new_root);
+ pvec->tail->items[0] = obj;
+ Py_XINCREF(obj);
+ debug("append_push(): pvec=%p, pvec->tail=%p, pvec->root=%p\n", pvec, pvec->tail, pvec->root);
+ return (PyObject*)pvec;
+}
+
+static VNode* doSet(VNode* node, unsigned int level, unsigned int position, PyObject* value) {
+ debug("doSet(): level == %i\n", level);
+ if(level == 0) {
+ // TODO-OPT: Perhaps an alloc followed by a reset of reference
+ // count is enough here since we overwrite all subnodes below.
+ VNode* theNewNode = newNode();
+ copyInsert(theNewNode->items, node->items, position & BIT_MASK, value);
+ incRefs((PyObject**)theNewNode->items);
+ return theNewNode;
+ } else {
+ VNode* theNewNode = copyNode(node);
+ Py_ssize_t index = (position >> level) & BIT_MASK;
+
+ // Drop reference to this node since we're about to replace it
+ DEC_NODE_REF_COUNT((VNode*)theNewNode->items[index]);
+ theNewNode->items[index] = doSet(node->items[index], level - SHIFT, position, value);
+ return theNewNode;
+ }
+}
+
+
+static PyObject* internalSet(PVector *self, Py_ssize_t position, PyObject *argObj) {
+ if(position < 0) {
+ position += self->count;
+ }
+
+ if((0 <= position) && (position < self->count)) {
+ if(position >= TAIL_OFF(self)) {
+ // Reuse the root, replace the tail
+ INC_NODE_REF_COUNT(self->root);
+ PVector *new_pvec = newPvec(self->count, self->shift, self->root);
+ copyInsert(new_pvec->tail->items, self->tail->items, position & BIT_MASK, argObj);
+ incRefs((PyObject**)new_pvec->tail->items);
+ return (PyObject*)new_pvec;
+ } else {
+ // Keep the tail, replace the root
+ VNode *newRoot = doSet(self->root, self->shift, position, argObj);
+ PVector *new_pvec = newPvec(self->count, self->shift, newRoot);
+
+ // Free the tail and replace it with a reference to the tail of the original vector
+ freeNode(new_pvec->tail);
+ new_pvec->tail = self->tail;
+ INC_NODE_REF_COUNT(self->tail);
+ return (PyObject*)new_pvec;
+ }
+ } else if (position == self->count) {
+ // TODO Remove this case?
+ return PVector_append(self, argObj);
+ } else {
+ PyErr_Format(PyExc_IndexError, "Index out of range: %zd", position);
+ return NULL;
+ }
+}
+
+static PyObject* PVector_transform(PVector *self, PyObject *obj) {
+ return transform(self, obj);
+}
+
+/*
+ Steals a reference to the object that is inserted in the vector.
+*/
+static PyObject* PVector_set(PVector *self, PyObject *args) {
+ PyObject *argObj = NULL; /* argument to insert */
+ Py_ssize_t position;
+
+ /* The n parses for size, the O parses for a Python object */
+ if(!PyArg_ParseTuple(args, "nO", &position, &argObj)) {
+ return NULL;
+ }
+
+ return internalSet(self, position, argObj);
+}
+
+
+static PyObject* PVector_mset(PVector *self, PyObject *args) {
+ Py_ssize_t size = PyTuple_Size(args);
+ if(size % 2) {
+ PyErr_SetString(PyExc_TypeError, "mset expected an even number of arguments");
+ return NULL;
+ }
+
+ PVectorEvolver* evolver = (PVectorEvolver*)PVector_evolver(self);
+ Py_ssize_t i;
+ for(i=0; i<size; i+=2) {
+ if(PVectorEvolver_set_item(evolver, PyTuple_GetItem(args, i), PyTuple_GetItem(args, i + 1)) < 0) {
+ Py_DECREF(evolver);
+ return NULL;
+ }
+ }
+
+ PyObject* vector = PVectorEvolver_persistent(evolver);
+ Py_DECREF(evolver);
+ return vector;
+}
+
+
+static PyObject* internalDelete(PVector *self, Py_ssize_t index, PyObject *stop_obj) {
+ Py_ssize_t stop;
+ PyObject *list;
+ PyObject *result;
+
+ if (index < 0) {
+ index += self->count;
+ }
+
+ if (stop_obj != NULL) {
+ if (PyIndex_Check(stop_obj)) {
+ stop = PyNumber_AsSsize_t(stop_obj, PyExc_IndexError);
+ if (stop == -1 && PyErr_Occurred()) {
+ return NULL;
+ }
+ } else {
+ PyErr_Format(PyExc_TypeError, "Stop index must be integer, not %.200s", Py_TYPE(stop_obj)->tp_name);
+ return NULL;
+ }
+
+ if (stop < 0) {
+ stop += self->count;
+ }
+ } else {
+ if (index < 0 || index >= self->count) {
+ PyErr_SetString(PyExc_IndexError, "delete index out of range");
+ return NULL;
+ }
+
+ stop = index + 1;
+ }
+
+ list = PVector_toList(self);
+ if(PyList_SetSlice(list, index, stop, NULL) < 0) {
+ return NULL;
+ }
+
+ result = PVector_extend(EMPTY_VECTOR, list);
+ Py_DECREF(list);
+ return result;
+}
+
+static PyObject* PVector_delete(PVector *self, PyObject *args) {
+ Py_ssize_t index;
+ PyObject *stop_obj = NULL;
+
+ if(!PyArg_ParseTuple(args, "n|O:delete", &index, &stop_obj)) {
+ return NULL;
+ }
+
+ return internalDelete(self, index, stop_obj);
+}
+
+static PyObject* PVector_remove(PVector *self, PyObject *args) {
+ Py_ssize_t index;
+ PyObject* py_index = PVector_index(self, args);
+
+ if(py_index != NULL) {
+#if PY_MAJOR_VERSION >= 3
+ index = PyLong_AsSsize_t(py_index);
+#else
+ index = PyInt_AsSsize_t(py_index);
+#endif
+ Py_DECREF(py_index);
+ return internalDelete(self, index, NULL);
+ }
+
+ PyErr_SetString(PyExc_ValueError, "PVector.remove(x): x not in vector");
+ return NULL;
+}
+
+
+/*********************** PVector Iterator **************************/
+
+/*
+The Sequence class provides us with a default iterator but the runtime
+overhead of using that compared to the iterator below is huge.
+*/
+
+typedef struct {
+ PyObject_HEAD
+ Py_ssize_t it_index;
+ PVector *it_seq; /* Set to NULL when iterator is exhausted */
+} PVectorIter;
+
+static void PVectorIter_dealloc(PVectorIter *);
+static int PVectorIter_traverse(PVectorIter *, visitproc, void *);
+static PyObject *PVectorIter_next(PVectorIter *);
+
+static PyMethodDef PVectorIter_methods[] = {
+ {NULL, NULL} /* sentinel */
+};
+
+static PyTypeObject PVectorIterType = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pvector_iterator", /* tp_name */
+ sizeof(PVectorIter), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ /* methods */
+ (destructor)PVectorIter_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ PyObject_GenericGetAttr, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
+ 0, /* tp_doc */
+ (traverseproc)PVectorIter_traverse, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ PyObject_SelfIter, /* tp_iter */
+ (iternextfunc)PVectorIter_next, /* tp_iternext */
+ PVectorIter_methods, /* tp_methods */
+ 0, /* tp_members */
+};
+
+static PyObject *PVectorIter_iter(PyObject *seq) {
+ PVectorIter *it = PyObject_GC_New(PVectorIter, &PVectorIterType);
+ if (it == NULL) {
+ return NULL;
+ }
+
+ it->it_index = 0;
+ Py_INCREF(seq);
+ it->it_seq = (PVector *)seq;
+ PyObject_GC_Track(it);
+ return (PyObject *)it;
+}
+
+static void PVectorIter_dealloc(PVectorIter *it) {
+ PyObject_GC_UnTrack(it);
+ Py_XDECREF(it->it_seq);
+ PyObject_GC_Del(it);
+}
+
+static int PVectorIter_traverse(PVectorIter *it, visitproc visit, void *arg) {
+ Py_VISIT(it->it_seq);
+ return 0;
+}
+
+static PyObject *PVectorIter_next(PVectorIter *it) {
+ assert(it != NULL);
+ PVector *seq = it->it_seq;
+ if (seq == NULL) {
+ return NULL;
+ }
+
+ if (it->it_index < seq->count) {
+ PyObject *item = _get_item(seq, it->it_index);
+ ++it->it_index;
+ Py_INCREF(item);
+ return item;
+ }
+
+ Py_DECREF(seq);
+ it->it_seq = NULL;
+ return NULL;
+}
+
+
+/*********************** PVector Evolver **************************/
+
+/*
+Evolver to make multi updates easier to work with and more efficient.
+*/
+
+static void PVectorEvolver_dealloc(PVectorEvolver *);
+static PyObject *PVectorEvolver_append(PVectorEvolver *, PyObject *);
+static PyObject *PVectorEvolver_extend(PVectorEvolver *, PyObject *);
+static PyObject *PVectorEvolver_set(PVectorEvolver *, PyObject *);
+static PyObject *PVectorEvolver_delete(PVectorEvolver *self, PyObject *args);
+static PyObject *PVectorEvolver_subscript(PVectorEvolver *, PyObject *);
+static PyObject *PVectorEvolver_persistent(PVectorEvolver *);
+static Py_ssize_t PVectorEvolver_len(PVectorEvolver *);
+static PyObject *PVectorEvolver_is_dirty(PVectorEvolver *);
+static int PVectorEvolver_traverse(PVectorEvolver *self, visitproc visit, void *arg);
+
+static PyMappingMethods PVectorEvolver_mapping_methods = {
+ (lenfunc)PVectorEvolver_len,
+ (binaryfunc)PVectorEvolver_subscript,
+ (objobjargproc)PVectorEvolver_set_item,
+};
+
+
+static PyMethodDef PVectorEvolver_methods[] = {
+ {"append", (PyCFunction)PVectorEvolver_append, METH_O, "Appends an element"},
+ {"extend", (PyCFunction)PVectorEvolver_extend, METH_O|METH_COEXIST, "Extend"},
+ {"set", (PyCFunction)PVectorEvolver_set, METH_VARARGS, "Set item"},
+ {"delete", (PyCFunction)PVectorEvolver_delete, METH_VARARGS, "Delete item"},
+ {"persistent", (PyCFunction)PVectorEvolver_persistent, METH_NOARGS, "Create PVector from evolver"},
+ {"is_dirty", (PyCFunction)PVectorEvolver_is_dirty, METH_NOARGS, "Check if evolver contains modifications"},
+ {NULL, NULL} /* sentinel */
+};
+
+static PyTypeObject PVectorEvolverType = {
+ PyVarObject_HEAD_INIT(NULL, 0)
+ "pvector_evolver", /* tp_name */
+ sizeof(PVectorEvolver), /* tp_basicsize */
+ 0, /* tp_itemsize */
+ /* methods */
+ (destructor)PVectorEvolver_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+ 0, /* tp_compare */
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ &PVectorEvolver_mapping_methods, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ PyObject_GenericGetAttr, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
+ 0, /* tp_doc */
+ (traverseproc)PVectorEvolver_traverse, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ PVectorEvolver_methods, /* tp_methods */
+ 0, /* tp_members */
+};
+
+
+// Indicate that a node is "dirty" (has been updated by the evolver)
+// by setting the MSB of the refCount. This will be cleared when
+// creating a pvector from the evolver (cleaning it).
+#define DIRTY_BIT 0x80000000
+#define REF_COUNT_MASK (~DIRTY_BIT)
+#define IS_DIRTY(node) ((node)->refCount & DIRTY_BIT)
+#define SET_DIRTY(node) ((node)->refCount |= DIRTY_BIT)
+#define CLEAR_DIRTY(node) ((node)->refCount &= REF_COUNT_MASK)
+
+
+static void cleanNodeRecursively(VNode *node, int level) {
+ debug("Cleaning recursively node=%p, level=%u\n", node, level);
+
+ int i;
+ CLEAR_DIRTY(node);
+ SET_NODE_REF_COUNT(node, 1);
+ if(level > 0) {
+ for(i = 0; i < BRANCH_FACTOR; i++) {
+ VNode *nextNode = (VNode*)node->items[i];
+ if((nextNode != NULL) && IS_DIRTY(nextNode)) {
+ cleanNodeRecursively(nextNode, level - SHIFT);
+ }
+ }
+ }
+}
+
+static void cleanVector(PVector *vector) {
+ // Cleaning the vector means that all dirty indications are cleared
+ // and that the nodes that were dirty get a ref count of 1 since
+ // they are brand new. Once cleaned the vector can be released into
+ // the wild.
+ if(IS_DIRTY(vector->tail)) {
+ cleanNodeRecursively(vector->tail, 0);
+ } else {
+ INC_NODE_REF_COUNT(vector->tail);
+ }
+
+ if(IS_DIRTY(vector->root)) {
+ cleanNodeRecursively(vector->root, vector->shift);
+ } else {
+ INC_NODE_REF_COUNT(vector->root);
+ }
+}
+
+static void PVectorEvolver_dealloc(PVectorEvolver *self) {
+ PyObject_GC_UnTrack(self);
+ Py_TRASHCAN_SAFE_BEGIN(self);
+
+ if(self->originalVector != self->newVector) {
+ cleanVector(self->newVector);
+ Py_DECREF(self->newVector);
+ }
+
+ Py_DECREF(self->originalVector);
+ Py_DECREF(self->appendList);
+
+ PyObject_GC_Del(self);
+ Py_TRASHCAN_SAFE_END(self);
+}
+
+static PyObject *PVectorEvolver_append(PVectorEvolver *self, PyObject *args) {
+ if (PyList_Append(self->appendList, args) == 0) {
+ Py_INCREF(self);
+ return (PyObject*)self;
+ }
+
+ return NULL;
+}
+
+static PyObject *PVectorEvolver_extend(PVectorEvolver *self, PyObject *args) {
+ PyObject *retVal = _PyList_Extend((PyListObject *)self->appendList, args);
+ if (retVal == NULL) {
+ return NULL;
+ }
+
+ Py_DECREF(retVal);
+ Py_INCREF(self);
+ return (PyObject*)self;
+}
+
+static PyObject *PVectorEvolver_subscript(PVectorEvolver *self, PyObject *item) {
+ if (PyIndex_Check(item)) {
+ Py_ssize_t position = PyNumber_AsSsize_t(item, PyExc_IndexError);
+ if (position == -1 && PyErr_Occurred()) {
+ return NULL;
+ }
+
+ if (position < 0) {
+ position += self->newVector->count + PyList_GET_SIZE(self->appendList);
+ }
+
+ if(0 <= position && position < self->newVector->count) {
+ PyObject *result = _get_item(self->newVector, position);
+ Py_XINCREF(result);
+ return result;
+ } else if (0 <= position && position < (self->newVector->count + PyList_GET_SIZE(self->appendList))) {
+ PyObject *result = PyList_GetItem(self->appendList, position - self->newVector->count);
+ Py_INCREF(result);
+ return result;
+ } else {
+ PyErr_SetString(PyExc_IndexError, "Index out of range");
+ }
+ } else {
+ PyErr_Format(PyExc_TypeError, "Indices must be integers, not %.200s", item->ob_type->tp_name);
+ }
+
+ return NULL;
+}
+
+static VNode* doSetWithDirty(VNode* node, unsigned int level, unsigned int position, PyObject* value) {
+ VNode* resultNode;
+ debug("doSetWithDirty(): level == %i\n", level);
+ if(level == 0) {
+ if(!IS_DIRTY(node)) {
+ resultNode = allocNode();
+ copyInsert(resultNode->items, node->items, position & BIT_MASK, value);
+ incRefs((PyObject**)resultNode->items);
+ SET_DIRTY(resultNode);
+ } else {
+ resultNode = node;
+ Py_INCREF(value);
+ Py_DECREF(resultNode->items[position & BIT_MASK]);
+ resultNode->items[position & BIT_MASK] = value;
+ }
+ } else {
+ if(!IS_DIRTY(node)) {
+ resultNode = copyNode(node);
+ SET_DIRTY(resultNode);
+ } else {
+ resultNode = node;
+ }
+
+ Py_ssize_t index = (position >> level) & BIT_MASK;
+ VNode* oldNode = (VNode*)resultNode->items[index];
+ resultNode->items[index] = doSetWithDirty(resultNode->items[index], level - SHIFT, position, value);
+
+ if(resultNode->items[index] != oldNode) {
+ // Node replaced, drop references to old node
+ DEC_NODE_REF_COUNT(oldNode);
+ }
+ }
+
+ return resultNode;
+}
+
+/*
+ Steals a reference to the object that is inserted in the vector.
+*/
+static PyObject *PVectorEvolver_set(PVectorEvolver *self, PyObject *args) {
+ PyObject *argObj = NULL; /* argument to insert */
+ PyObject *position = NULL;
+
+ /* The n parses for size, the O parses for a Python object */
+ if(!PyArg_ParseTuple(args, "OO", &position, &argObj)) {
+ return NULL;
+ }
+
+ if(PVectorEvolver_set_item(self, position, argObj) < 0) {
+ return NULL;
+ }
+
+ Py_INCREF(self);
+ return (PyObject*)self;
+}
+
+static PyObject *PVectorEvolver_delete(PVectorEvolver *self, PyObject *args) {
+ PyObject *position = NULL;
+
+ /* The n parses for size, the O parses for a Python object */
+ if(!PyArg_ParseTuple(args, "O", &position)) {
+ return NULL;
+ }
+
+ if(PVectorEvolver_set_item(self, position, NULL) < 0) {
+ return NULL;
+ }
+
+ Py_INCREF(self);
+ return (PyObject*)self;
+}
+
+
+static int internalPVectorDelete(PVectorEvolver *self, Py_ssize_t position) {
+ // Delete element. Should be unusual. Simple but expensive operation
+ // that reuses the delete code for the vector. Realize the vector, delete on it and
+ // then reset the evolver to work on the new vector.
+ PVector *temp = (PVector*)PVectorEvolver_persistent(self);
+ PVector *temp2 = (PVector*)internalDelete(temp, position, NULL);
+ Py_DECREF(temp);
+
+ if(temp2 == NULL) {
+ return -1;
+ }
+
+ Py_DECREF(self->originalVector);
+ self->originalVector = temp2;
+ self->newVector = self->originalVector;
+ return 0;
+}
+
+static int PVectorEvolver_set_item(PVectorEvolver *self, PyObject* item, PyObject* value) {
+ if (PyIndex_Check(item)) {
+ Py_ssize_t position = PyNumber_AsSsize_t(item, PyExc_IndexError);
+ if (position == -1 && PyErr_Occurred()) {
+ return -1;
+ }
+
+ if (position < 0) {
+ position += self->newVector->count + PyList_GET_SIZE(self->appendList);
+ }
+
+ if((0 <= position) && (position < self->newVector->count)) {
+ if(self->originalVector == self->newVector) {
+ // Create new vector since we're about to modify the original
+ self->newVector = rawCopyPVector(self->originalVector);
+ }
+
+ if(value != NULL) {
+ if(position < TAIL_OFF(self->newVector)) {
+ self->newVector->root = doSetWithDirty(self->newVector->root, self->newVector->shift, position, value);
+ } else {
+ self->newVector->tail = doSetWithDirty(self->newVector->tail, 0, position, value);
+ }
+
+ return 0;
+ }
+
+ return internalPVectorDelete(self, position);
+ } else if((0 <= position) && (position < (self->newVector->count + PyList_GET_SIZE(self->appendList)))) {
+ if (value != NULL) {
+ int result = PyList_SetItem(self->appendList, position - self->newVector->count, value);
+ if(result == 0) {
+ Py_INCREF(value);
+ }
+ return result;
+ }
+
+ return internalPVectorDelete(self, position);
+ } else if((0 <= position)
+ && (position < (self->newVector->count + PyList_GET_SIZE(self->appendList) + 1))
+ && (value != NULL)) {
+ return PyList_Append(self->appendList, value);
+ } else {
+ PyErr_Format(PyExc_IndexError, "Index out of range: %zd", position);
+ }
+ } else {
+ PyErr_Format(PyExc_TypeError, "Indices must be integers, not %.200s", item->ob_type->tp_name);
+ }
+ return -1;
+}
+
+static PyObject *PVectorEvolver_persistent(PVectorEvolver *self) {
+ PVector *resultVector;
+ if(self->newVector != self->originalVector) {
+ cleanVector(self->newVector);
+ Py_DECREF(self->originalVector);
+ }
+
+ resultVector = self->newVector;
+
+ if(PyList_GET_SIZE(self->appendList)) {
+ PVector *oldVector = resultVector;
+ resultVector = (PVector*)PVector_extend(resultVector, self->appendList);
+ Py_DECREF(oldVector);
+ Py_DECREF(self->appendList);
+ self->appendList = NULL;
+ }
+
+ initializeEvolver(self, resultVector, self->appendList);
+ Py_INCREF(resultVector);
+ return (PyObject*)resultVector;
+}
+
+static Py_ssize_t PVectorEvolver_len(PVectorEvolver *self) {
+ return self->newVector->count + PyList_GET_SIZE(self->appendList);
+}
+
+static PyObject* PVectorEvolver_is_dirty(PVectorEvolver *self) {
+ if((self->newVector != self->originalVector) || (PyList_GET_SIZE(self->appendList) > 0)) {
+ Py_INCREF(Py_True);
+ return Py_True;
+ }
+
+ Py_INCREF(Py_False);
+ return Py_False;
+}
+
+static int PVectorEvolver_traverse(PVectorEvolver *self, visitproc visit, void *arg) {
+ Py_VISIT(self->newVector);
+ if (self->newVector != self->originalVector) {
+ Py_VISIT(self->originalVector);
+ }
+ Py_VISIT(self->appendList);
+ return 0;
+}
+
+static PyMethodDef PyrsistentMethods[] = {
+ {"pvector", pyrsistent_pvec, METH_VARARGS,
+ "pvector([iterable])\n"
+ "Create a new persistent vector containing the elements in iterable.\n\n"
+ ">>> v1 = pvector([1, 2, 3])\n"
+ ">>> v1\n"
+ "pvector([1, 2, 3])"},
+ {NULL, NULL, 0, NULL}
+};
+
+
+/********************* Python module initialization ************************/
+
+#if PY_MAJOR_VERSION >= 3
+ static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "pvectorc", /* m_name */
+ "Persistent vector", /* m_doc */
+ -1, /* m_size */
+ PyrsistentMethods, /* m_methods */
+ NULL, /* m_reload */
+ NULL, /* m_traverse */
+ NULL, /* m_clear */
+ NULL, /* m_free */
+ };
+#endif
+
+static PyObject* pyrsistent_pvectorc_moduleinit(void) {
+ PyObject* m;
+
+ // Only allow creation/initialization through factory method pvec
+ PVectorType.tp_init = NULL;
+ PVectorType.tp_new = NULL;
+
+ if (PyType_Ready(&PVectorType) < 0) {
+ return NULL;
+ }
+ if (PyType_Ready(&PVectorIterType) < 0) {
+ return NULL;
+ }
+ if (PyType_Ready(&PVectorEvolverType) < 0) {
+ return NULL;
+ }
+
+
+#if PY_MAJOR_VERSION >= 3
+ m = PyModule_Create(&moduledef);
+#else
+ m = Py_InitModule3("pvectorc", PyrsistentMethods, "Persistent vector");
+#endif
+
+ if (m == NULL) {
+ return NULL;
+ }
+
+ if(EMPTY_VECTOR == NULL) {
+ EMPTY_VECTOR = emptyNewPvec();
+ }
+
+ nodeCache.size = 0;
+
+ Py_INCREF(&PVectorType);
+ PyModule_AddObject(m, "PVector", (PyObject *)&PVectorType);
+
+ return m;
+}
+
+#if PY_MAJOR_VERSION >= 3
+PyMODINIT_FUNC PyInit_pvectorc(void) {
+ return pyrsistent_pvectorc_moduleinit();
+}
+#else
+PyMODINIT_FUNC initpvectorc(void) {
+ pyrsistent_pvectorc_moduleinit();
+}
+#endif
diff --git a/third_party/python/pyrsistent/pyrsistent.egg-info/PKG-INFO b/third_party/python/pyrsistent/pyrsistent.egg-info/PKG-INFO
new file mode 100644
index 0000000000..1d1c159034
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent.egg-info/PKG-INFO
@@ -0,0 +1,742 @@
+Metadata-Version: 1.1
+Name: pyrsistent
+Version: 0.16.0
+Summary: Persistent/Functional/Immutable data structures
+Home-page: http://github.com/tobgu/pyrsistent/
+Author: Tobias Gustafsson
+Author-email: tobias.l.gustafsson@gmail.com
+License: MIT
+Description: Pyrsistent
+ ==========
+ .. image:: https://travis-ci.org/tobgu/pyrsistent.png?branch=master
+ :target: https://travis-ci.org/tobgu/pyrsistent
+
+ .. image:: https://badge.fury.io/py/pyrsistent.svg
+ :target: https://badge.fury.io/py/pyrsistent
+
+ .. image:: https://coveralls.io/repos/tobgu/pyrsistent/badge.svg?branch=master&service=github
+ :target: https://coveralls.io/github/tobgu/pyrsistent?branch=master
+
+
+ .. _Pyrthon: https://www.github.com/tobgu/pyrthon/
+
+ Pyrsistent is a number of persistent collections (by some referred to as functional data structures). Persistent in
+ the sense that they are immutable.
+
+ All methods on a data structure that would normally mutate it instead return a new copy of the structure containing the
+ requested updates. The original structure is left untouched.
+
+ This will simplify the reasoning about what a program does since no hidden side effects ever can take place to these
+ data structures. You can rest assured that the object you hold a reference to will remain the same throughout its
+ lifetime and need not worry that somewhere five stack levels below you in the darkest corner of your application
+ someone has decided to remove that element that you expected to be there.
+
+ Pyrsistent is influenced by persistent data structures such as those found in the standard library of Clojure. The
+ data structures are designed to share common elements through path copying.
+ It aims at taking these concepts and make them as pythonic as possible so that they can be easily integrated into any python
+ program without hassle.
+
+ If you want to go all in on persistent data structures and use literal syntax to define them in your code rather
+ than function calls check out Pyrthon_.
+
+ Examples
+ --------
+ .. _Sequence: collections_
+ .. _Hashable: collections_
+ .. _Mapping: collections_
+ .. _Mappings: collections_
+ .. _Set: collections_
+ .. _collections: https://docs.python.org/3/library/collections.abc.html
+ .. _documentation: http://pyrsistent.readthedocs.org/
+
+ The collection types and key features currently implemented are:
+
+ * PVector_, similar to a python list
+ * PMap_, similar to dict
+ * PSet_, similar to set
+ * PRecord_, a PMap on steroids with fixed fields, optional type and invariant checking and much more
+ * PClass_, a Python class fixed fields, optional type and invariant checking and much more
+ * `Checked collections`_, PVector, PMap and PSet with optional type and invariance checks and more
+ * PBag, similar to collections.Counter
+ * PList, a classic singly linked list
+ * PDeque, similar to collections.deque
+ * Immutable object type (immutable) built on the named tuple
+ * freeze_ and thaw_ functions to convert between pythons standard collections and pyrsistent collections.
+ * Flexible transformations_ of arbitrarily complex structures built from PMaps and PVectors.
+
+ Below are examples of common usage patterns for some of the structures and features. More information and
+ full documentation for all data structures is available in the documentation_.
+
+ .. _PVector:
+
+ PVector
+ ~~~~~~~
+ With full support for the Sequence_ protocol PVector is meant as a drop in replacement to the built in list from a readers
+ point of view. Write operations of course differ since no in place mutation is done but naming should be in line
+ with corresponding operations on the built in list.
+
+ Support for the Hashable_ protocol also means that it can be used as key in Mappings_.
+
+ Appends are amortized O(1). Random access and insert is log32(n) where n is the size of the vector.
+
+ .. code:: python
+
+ >>> from pyrsistent import v, pvector
+
+ # No mutation of vectors once created, instead they
+ # are "evolved" leaving the original untouched
+ >>> v1 = v(1, 2, 3)
+ >>> v2 = v1.append(4)
+ >>> v3 = v2.set(1, 5)
+ >>> v1
+ pvector([1, 2, 3])
+ >>> v2
+ pvector([1, 2, 3, 4])
+ >>> v3
+ pvector([1, 5, 3, 4])
+
+ # Random access and slicing
+ >>> v3[1]
+ 5
+ >>> v3[1:3]
+ pvector([5, 3])
+
+ # Iteration
+ >>> list(x + 1 for x in v3)
+ [2, 6, 4, 5]
+ >>> pvector(2 * x for x in range(3))
+ pvector([0, 2, 4])
+
+ .. _PMap:
+
+ PMap
+ ~~~~
+ With full support for the Mapping_ protocol PMap is meant as a drop in replacement to the built in dict from a readers point
+ of view. Support for the Hashable_ protocol also means that it can be used as key in other Mappings_.
+
+ Random access and insert is log32(n) where n is the size of the map.
+
+ .. code:: python
+
+ >>> from pyrsistent import m, pmap, v
+
+ # No mutation of maps once created, instead they are
+ # "evolved" leaving the original untouched
+ >>> m1 = m(a=1, b=2)
+ >>> m2 = m1.set('c', 3)
+ >>> m3 = m2.set('a', 5)
+ >>> m1
+ pmap({'a': 1, 'b': 2})
+ >>> m2
+ pmap({'a': 1, 'c': 3, 'b': 2})
+ >>> m3
+ pmap({'a': 5, 'c': 3, 'b': 2})
+ >>> m3['a']
+ 5
+
+ # Evolution of nested persistent structures
+ >>> m4 = m(a=5, b=6, c=v(1, 2))
+ >>> m4.transform(('c', 1), 17)
+ pmap({'a': 5, 'c': pvector([1, 17]), 'b': 6})
+ >>> m5 = m(a=1, b=2)
+
+ # Evolve by merging with other mappings
+ >>> m5.update(m(a=2, c=3), {'a': 17, 'd': 35})
+ pmap({'a': 17, 'c': 3, 'b': 2, 'd': 35})
+ >>> pmap({'x': 1, 'y': 2}) + pmap({'y': 3, 'z': 4})
+ pmap({'y': 3, 'x': 1, 'z': 4})
+
+ # Dict-like methods to convert to list and iterate
+ >>> m3.items()
+ pvector([('a', 5), ('c', 3), ('b', 2)])
+ >>> list(m3)
+ ['a', 'c', 'b']
+
+ .. _PSet:
+
+ PSet
+ ~~~~
+ With full support for the Set_ protocol PSet is meant as a drop in replacement to the built in set from a readers point
+ of view. Support for the Hashable_ protocol also means that it can be used as key in Mappings_.
+
+ Random access and insert is log32(n) where n is the size of the set.
+
+ .. code:: python
+
+ >>> from pyrsistent import s
+
+ # No mutation of sets once created, you know the story...
+ >>> s1 = s(1, 2, 3, 2)
+ >>> s2 = s1.add(4)
+ >>> s3 = s1.remove(1)
+ >>> s1
+ pset([1, 2, 3])
+ >>> s2
+ pset([1, 2, 3, 4])
+ >>> s3
+ pset([2, 3])
+
+ # Full support for set operations
+ >>> s1 | s(3, 4, 5)
+ pset([1, 2, 3, 4, 5])
+ >>> s1 & s(3, 4, 5)
+ pset([3])
+ >>> s1 < s2
+ True
+ >>> s1 < s(3, 4, 5)
+ False
+
+ .. _PRecord:
+
+ PRecord
+ ~~~~~~~
+ A PRecord is a PMap with a fixed set of specified fields. Records are declared as python classes inheriting
+ from PRecord. Because it is a PMap it has full support for all Mapping methods such as iteration and element
+ access using subscript notation.
+
+ .. code:: python
+
+ >>> from pyrsistent import PRecord, field
+ >>> class ARecord(PRecord):
+ ... x = field()
+ ...
+ >>> r = ARecord(x=3)
+ >>> r
+ ARecord(x=3)
+ >>> r.x
+ 3
+ >>> r.set(x=2)
+ ARecord(x=2)
+ >>> r.set(y=2)
+ Traceback (most recent call last):
+ AttributeError: 'y' is not among the specified fields for ARecord
+
+ Type information
+ ****************
+ It is possible to add type information to the record to enforce type checks. Multiple allowed types can be specified
+ by providing an iterable of types.
+
+ .. code:: python
+
+ >>> class BRecord(PRecord):
+ ... x = field(type=int)
+ ... y = field(type=(int, type(None)))
+ ...
+ >>> BRecord(x=3, y=None)
+ BRecord(y=None, x=3)
+ >>> BRecord(x=3.0)
+ Traceback (most recent call last):
+ PTypeError: Invalid type for field BRecord.x, was float
+
+
+ Custom types (classes) that are iterable should be wrapped in a tuple to prevent their
+ members being added to the set of valid types. Although Enums in particular are now
+ supported without wrapping, see #83 for more information.
+
+ Mandatory fields
+ ****************
+ Fields are not mandatory by default but can be specified as such. If fields are missing an
+ *InvariantException* will be thrown which contains information about the missing fields.
+
+ .. code:: python
+
+ >>> from pyrsistent import InvariantException
+ >>> class CRecord(PRecord):
+ ... x = field(mandatory=True)
+ ...
+ >>> r = CRecord(x=3)
+ >>> try:
+ ... r.discard('x')
+ ... except InvariantException as e:
+ ... print(e.missing_fields)
+ ...
+ ('CRecord.x',)
+
+ Invariants
+ **********
+ It is possible to add invariants that must hold when evolving the record. Invariants can be
+ specified on both field and record level. If invariants fail an *InvariantException* will be
+ thrown which contains information about the failing invariants. An invariant function should
+ return a tuple consisting of a boolean that tells if the invariant holds or not and an object
+ describing the invariant. This object can later be used to identify which invariant that failed.
+
+ The global invariant function is only executed if all field invariants hold.
+
+ Global invariants are inherited to subclasses.
+
+ .. code:: python
+
+ >>> class RestrictedVector(PRecord):
+ ... __invariant__ = lambda r: (r.y >= r.x, 'x larger than y')
+ ... x = field(invariant=lambda x: (x > 0, 'x negative'))
+ ... y = field(invariant=lambda y: (y > 0, 'y negative'))
+ ...
+ >>> r = RestrictedVector(y=3, x=2)
+ >>> try:
+ ... r.set(x=-1, y=-2)
+ ... except InvariantException as e:
+ ... print(e.invariant_errors)
+ ...
+ ('y negative', 'x negative')
+ >>> try:
+ ... r.set(x=2, y=1)
+ ... except InvariantException as e:
+ ... print(e.invariant_errors)
+ ...
+ ('x larger than y',)
+
+ Invariants may also contain multiple assertions. For those cases the invariant function should
+ return a tuple of invariant tuples as described above. This structure is reflected in the
+ invariant_errors attribute of the exception which will contain tuples with data from all failed
+ invariants. Eg:
+
+ .. code:: python
+
+ >>> class EvenX(PRecord):
+ ... x = field(invariant=lambda x: ((x > 0, 'x negative'), (x % 2 == 0, 'x odd')))
+ ...
+ >>> try:
+ ... EvenX(x=-1)
+ ... except InvariantException as e:
+ ... print(e.invariant_errors)
+ ...
+ (('x negative', 'x odd'),)
+
+
+ Factories
+ *********
+ It's possible to specify factory functions for fields. The factory function receives whatever
+ is supplied as field value and the actual returned by the factory is assigned to the field
+ given that any type and invariant checks hold.
+ PRecords have a default factory specified as a static function on the class, create(). It takes
+ a *Mapping* as argument and returns an instance of the specific record.
+ If a record has fields of type PRecord the create() method of that record will
+ be called to create the "sub record" if no factory has explicitly been specified to override
+ this behaviour.
+
+ .. code:: python
+
+ >>> class DRecord(PRecord):
+ ... x = field(factory=int)
+ ...
+ >>> class ERecord(PRecord):
+ ... d = field(type=DRecord)
+ ...
+ >>> ERecord.create({'d': {'x': '1'}})
+ ERecord(d=DRecord(x=1))
+
+ Collection fields
+ *****************
+ It is also possible to have fields with ``pyrsistent`` collections.
+
+ .. code:: python
+
+ >>> from pyrsistent import pset_field, pmap_field, pvector_field
+ >>> class MultiRecord(PRecord):
+ ... set_of_ints = pset_field(int)
+ ... map_int_to_str = pmap_field(int, str)
+ ... vector_of_strs = pvector_field(str)
+ ...
+
+ Serialization
+ *************
+ PRecords support serialization back to dicts. Default serialization will take keys and values
+ "as is" and output them into a dict. It is possible to specify custom serialization functions
+ to take care of fields that require special treatment.
+
+ .. code:: python
+
+ >>> from datetime import date
+ >>> class Person(PRecord):
+ ... name = field(type=unicode)
+ ... birth_date = field(type=date,
+ ... serializer=lambda format, d: d.strftime(format['date']))
+ ...
+ >>> john = Person(name=u'John', birth_date=date(1985, 10, 21))
+ >>> john.serialize({'date': '%Y-%m-%d'})
+ {'birth_date': '1985-10-21', 'name': u'John'}
+
+
+ .. _instar: https://github.com/boxed/instar/
+
+ .. _PClass:
+
+ PClass
+ ~~~~~~
+ A PClass is a python class with a fixed set of specified fields. PClasses are declared as python classes inheriting
+ from PClass. It is defined the same way that PRecords are and behaves like a PRecord in all aspects except that it
+ is not a PMap and hence not a collection but rather a plain Python object.
+
+ .. code:: python
+
+ >>> from pyrsistent import PClass, field
+ >>> class AClass(PClass):
+ ... x = field()
+ ...
+ >>> a = AClass(x=3)
+ >>> a
+ AClass(x=3)
+ >>> a.x
+ 3
+
+
+ Checked collections
+ ~~~~~~~~~~~~~~~~~~~
+ Checked collections currently come in three flavors: CheckedPVector, CheckedPMap and CheckedPSet.
+
+ .. code:: python
+
+ >>> from pyrsistent import CheckedPVector, CheckedPMap, CheckedPSet, thaw
+ >>> class Positives(CheckedPSet):
+ ... __type__ = (long, int)
+ ... __invariant__ = lambda n: (n >= 0, 'Negative')
+ ...
+ >>> class Lottery(PRecord):
+ ... name = field(type=str)
+ ... numbers = field(type=Positives, invariant=lambda p: (len(p) > 0, 'No numbers'))
+ ...
+ >>> class Lotteries(CheckedPVector):
+ ... __type__ = Lottery
+ ...
+ >>> class LotteriesByDate(CheckedPMap):
+ ... __key_type__ = date
+ ... __value_type__ = Lotteries
+ ...
+ >>> lotteries = LotteriesByDate.create({date(2015, 2, 15): [{'name': 'SuperLotto', 'numbers': {1, 2, 3}},
+ ... {'name': 'MegaLotto', 'numbers': {4, 5, 6}}],
+ ... date(2015, 2, 16): [{'name': 'SuperLotto', 'numbers': {3, 2, 1}},
+ ... {'name': 'MegaLotto', 'numbers': {6, 5, 4}}]})
+ >>> lotteries
+ LotteriesByDate({datetime.date(2015, 2, 15): Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')]), datetime.date(2015, 2, 16): Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')])})
+
+ # The checked versions support all operations that the corresponding
+ # unchecked types do
+ >>> lottery_0215 = lotteries[date(2015, 2, 15)]
+ >>> lottery_0215.transform([0, 'name'], 'SuperDuperLotto')
+ Lotteries([Lottery(numbers=Positives([1, 2, 3]), name='SuperDuperLotto'), Lottery(numbers=Positives([4, 5, 6]), name='MegaLotto')])
+
+ # But also makes asserts that types and invariants hold
+ >>> lottery_0215.transform([0, 'name'], 999)
+ Traceback (most recent call last):
+ PTypeError: Invalid type for field Lottery.name, was int
+
+ >>> lottery_0215.transform([0, 'numbers'], set())
+ Traceback (most recent call last):
+ InvariantException: Field invariant failed
+
+ # They can be converted back to python built ins with either thaw()
+ # or serialize() (which provides possibilities to customize serialization)
+ >>> thaw(lottery_0215)
+ [{'numbers': set([1, 2, 3]), 'name': 'SuperLotto'}, {'numbers': set([4, 5, 6]), 'name': 'MegaLotto'}]
+ >>> lottery_0215.serialize()
+ [{'numbers': set([1, 2, 3]), 'name': 'SuperLotto'}, {'numbers': set([4, 5, 6]), 'name': 'MegaLotto'}]
+
+ .. _transformations:
+
+ Transformations
+ ~~~~~~~~~~~~~~~
+ Transformations are inspired by the cool library instar_ for Clojure. They let you evolve PMaps and PVectors
+ with arbitrarily deep/complex nesting using simple syntax and flexible matching syntax.
+
+ The first argument to transformation is the path that points out the value to transform. The
+ second is the transformation to perform. If the transformation is callable it will be applied
+ to the value(s) matching the path. The path may also contain callables. In that case they are
+ treated as matchers. If the matcher returns True for a specific key it is considered for transformation.
+
+ .. code:: python
+
+ # Basic examples
+ >>> from pyrsistent import inc, freeze, thaw, rex, ny, discard
+ >>> v1 = freeze([1, 2, 3, 4, 5])
+ >>> v1.transform([2], inc)
+ pvector([1, 2, 4, 4, 5])
+ >>> v1.transform([lambda ix: 0 < ix < 4], 8)
+ pvector([1, 8, 8, 8, 5])
+ >>> v1.transform([lambda ix, v: ix == 0 or v == 5], 0)
+ pvector([0, 2, 3, 4, 0])
+
+ # The (a)ny matcher can be used to match anything
+ >>> v1.transform([ny], 8)
+ pvector([8, 8, 8, 8, 8])
+
+ # Regular expressions can be used for matching
+ >>> scores = freeze({'John': 12, 'Joseph': 34, 'Sara': 23})
+ >>> scores.transform([rex('^Jo')], 0)
+ pmap({'Joseph': 0, 'Sara': 23, 'John': 0})
+
+ # Transformations can be done on arbitrarily deep structures
+ >>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'},
+ ... {'author': 'Steve', 'content': 'A slightly longer article'}],
+ ... 'weather': {'temperature': '11C', 'wind': '5m/s'}})
+ >>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c)
+ >>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c)
+ >>> very_short_news.articles[0].content
+ 'A short article'
+ >>> very_short_news.articles[1].content
+ 'A slightly long...'
+
+ # When nothing has been transformed the original data structure is kept
+ >>> short_news is news_paper
+ True
+ >>> very_short_news is news_paper
+ False
+ >>> very_short_news.articles[0] is news_paper.articles[0]
+ True
+
+ # There is a special transformation that can be used to discard elements. Also
+ # multiple transformations can be applied in one call
+ >>> thaw(news_paper.transform(['weather'], discard, ['articles', ny, 'content'], discard))
+ {'articles': [{'author': 'Sara'}, {'author': 'Steve'}]}
+
+ Evolvers
+ ~~~~~~~~
+ PVector, PMap and PSet all have support for a concept dubbed *evolvers*. An evolver acts like a mutable
+ view of the underlying persistent data structure with "transaction like" semantics. No updates of the original
+ data structure is ever performed, it is still fully immutable.
+
+ The evolvers have a very limited API by design to discourage excessive, and inappropriate, usage as that would
+ take us down the mutable road. In principle only basic mutation and element access functions are supported.
+ Check out the documentation_ of each data structure for specific examples.
+
+ Examples of when you may want to use an evolver instead of working directly with the data structure include:
+
+ * Multiple updates are done to the same data structure and the intermediate results are of no
+ interest. In this case using an evolver may be a more efficient and easier to work with.
+ * You need to pass a vector into a legacy function or a function that you have no control
+ over which performs in place mutations. In this case pass an evolver instance
+ instead and then create a new pvector from the evolver once the function returns.
+
+ .. code:: python
+
+ >>> from pyrsistent import v
+
+ # In place mutation as when working with the built in counterpart
+ >>> v1 = v(1, 2, 3)
+ >>> e = v1.evolver()
+ >>> e[1] = 22
+ >>> e = e.append(4)
+ >>> e = e.extend([5, 6])
+ >>> e[5] += 1
+ >>> len(e)
+ 6
+
+ # The evolver is considered *dirty* when it contains changes compared to the underlying vector
+ >>> e.is_dirty()
+ True
+
+ # But the underlying pvector still remains untouched
+ >>> v1
+ pvector([1, 2, 3])
+
+ # Once satisfied with the updates you can produce a new pvector containing the updates.
+ # The new pvector will share data with the original pvector in the same way that would have
+ # been done if only using operations on the pvector.
+ >>> v2 = e.persistent()
+ >>> v2
+ pvector([1, 22, 3, 4, 5, 7])
+
+ # The evolver is now no longer considered *dirty* as it contains no differences compared to the
+ # pvector just produced.
+ >>> e.is_dirty()
+ False
+
+ # You may continue to work with the same evolver without affecting the content of v2
+ >>> e[0] = 11
+
+ # Or create a new evolver from v2. The two evolvers can be updated independently but will both
+ # share data with v2 where possible.
+ >>> e2 = v2.evolver()
+ >>> e2[0] = 1111
+ >>> e.persistent()
+ pvector([11, 22, 3, 4, 5, 7])
+ >>> e2.persistent()
+ pvector([1111, 22, 3, 4, 5, 7])
+
+ .. _freeze:
+ .. _thaw:
+
+ freeze and thaw
+ ~~~~~~~~~~~~~~~
+ These functions are great when your cozy immutable world has to interact with the evil mutable world outside.
+
+ .. code:: python
+
+ >>> from pyrsistent import freeze, thaw, v, m
+ >>> freeze([1, {'a': 3}])
+ pvector([1, pmap({'a': 3})])
+ >>> thaw(v(1, m(a=3)))
+ [1, {'a': 3}]
+
+ Compatibility
+ -------------
+
+ Pyrsistent is developed and tested on Python 2.7, 3.5, 3.6, 3.7 and PyPy (Python 2 and 3 compatible). It will most
+ likely work on all other versions >= 3.4 but no guarantees are given. :)
+
+ Compatibility issues
+ ~~~~~~~~~~~~~~~~~~~~
+
+ .. _27: https://github.com/tobgu/pyrsistent/issues/27
+
+ There is currently one known compatibility issue when comparing built in sets and frozensets to PSets as discussed in 27_.
+ It affects python 2 versions < 2.7.8 and python 3 versions < 3.4.0 and is due to a bug described in
+ http://bugs.python.org/issue8743.
+
+ Comparisons will fail or be incorrect when using the set/frozenset as left hand side of the comparison. As a workaround
+ you need to either upgrade Python to a more recent version, avoid comparing sets/frozensets with PSets or always make
+ sure to convert both sides of the comparison to the same type before performing the comparison.
+
+ Performance
+ -----------
+
+ Pyrsistent is developed with performance in mind. Still, while some operations are nearly on par with their built in,
+ mutable, counterparts in terms of speed, other operations are slower. In the cases where attempts at
+ optimizations have been done, speed has generally been valued over space.
+
+ Pyrsistent comes with two API compatible flavors of PVector (on which PMap and PSet are based), one pure Python
+ implementation and one implemented as a C extension. The latter generally being 2 - 20 times faster than the former.
+ The C extension will be used automatically when possible.
+
+ The pure python implementation is fully PyPy compatible. Running it under PyPy speeds operations up considerably if
+ the structures are used heavily (if JITed), for some cases the performance is almost on par with the built in counterparts.
+
+ Type hints
+ ----------
+
+ PEP 561 style type hints for use with mypy and various editors are available for most types and functions in pyrsistent.
+
+ Type classes for annotating your own code with pyrsistent types are also available under pyrsistent.typing.
+
+ Installation
+ ------------
+
+ pip install pyrsistent
+
+ Documentation
+ -------------
+
+ Available at http://pyrsistent.readthedocs.org/
+
+ Brief presentation available at http://slides.com/tobiasgustafsson/immutability-and-python/
+
+ Contributors
+ ------------
+
+ Tobias Gustafsson https://github.com/tobgu
+
+ Christopher Armstrong https://github.com/radix
+
+ Anders Hovmöller https://github.com/boxed
+
+ Itamar Turner-Trauring https://github.com/itamarst
+
+ Jonathan Lange https://github.com/jml
+
+ Richard Futrell https://github.com/Futrell
+
+ Jakob Hollenstein https://github.com/jkbjh
+
+ David Honour https://github.com/foolswood
+
+ David R. MacIver https://github.com/DRMacIver
+
+ Marcus Ewert https://github.com/sarum90
+
+ Jean-Paul Calderone https://github.com/exarkun
+
+ Douglas Treadwell https://github.com/douglas-treadwell
+
+ Travis Parker https://github.com/teepark
+
+ Julian Berman https://github.com/Julian
+
+ Dennis Tomas https://github.com/dtomas
+
+ Neil Vyas https://github.com/neilvyas
+
+ doozr https://github.com/doozr
+
+ Kamil Galuszka https://github.com/galuszkak
+
+ Tsuyoshi Hombashi https://github.com/thombashi
+
+ nattofriends https://github.com/nattofriends
+
+ agberk https://github.com/agberk
+
+ Waleed Khan https://github.com/arxanas
+
+ Jean-Louis Fuchs https://github.com/ganwell
+
+ Carlos Corbacho https://github.com/ccorbacho
+
+ Felix Yan https://github.com/felixonmars
+
+ benrg https://github.com/benrg
+
+ Jere Lahelma https://github.com/je-l
+
+ Max Taggart https://github.com/MaxTaggart
+
+ Vincent Philippon https://github.com/vphilippon
+
+ Semen Zhydenko https://github.com/ss18
+
+ Till Varoquaux https://github.com/till-varoquaux
+
+ Michal Kowalik https://github.com/michalvi
+
+ ossdev07 https://github.com/ossdev07
+
+ Kerry Olesen https://github.com/qhesz
+
+ johnthagen https://github.com/johnthagen
+
+ Contributing
+ ------------
+
+ Want to contribute? That's great! If you experience problems please log them on GitHub. If you want to contribute code,
+ please fork the repository and submit a pull request.
+
+ Run tests
+ ~~~~~~~~~
+ .. _tox: https://tox.readthedocs.io/en/latest/
+
+ Tests can be executed using tox_.
+
+ Install tox: ``pip install tox``
+
+ Run test for Python 2.7: ``tox -epy27``
+
+ Release
+ ~~~~~~~
+ * Update CHANGES.txt
+ * Update README with any new contributors and potential info needed.
+ * Update _pyrsistent_version.py
+ * python setup.py sdist upload
+ * Commit and tag with new version: git add -u . && git commit -m 'Prepare version vX.Y.Z' && git tag -a vX.Y.Z -m 'vX.Y.Z'
+ * Push commit and tags: git push && git push --tags
+
+ Project status
+ --------------
+ Pyrsistent can be considered stable and mature (who knows, there may even be a 1.0 some day :-)). The project is
+ maintained, bugs fixed, PRs reviewed and merged and new releases made. I currently do not have time for development
+ of new features or functionality which I don't have use for myself. I'm more than happy to take PRs for new
+ functionality though!
+
+ There are a bunch of issues marked with ``enhancement`` and ``help wanted`` that contain requests for new functionality
+ that would be nice to include. The level of difficulty and extend of the issues varies, please reach out to me if you're
+ interested in working on any of them.
+
+ If you feel that you have a grand master plan for where you would like Pyrsistent to go and have the time to put into
+ it please don't hesitate to discuss this with me and submit PRs for it. If all goes well I'd be more than happy to add
+ additional maintainers to the project!
+
+Platform: UNKNOWN
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: Implementation :: PyPy
diff --git a/third_party/python/pyrsistent/pyrsistent.egg-info/SOURCES.txt b/third_party/python/pyrsistent/pyrsistent.egg-info/SOURCES.txt
new file mode 100644
index 0000000000..9d3cc0a8d6
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent.egg-info/SOURCES.txt
@@ -0,0 +1,53 @@
+CHANGES.txt
+LICENCE.mit
+MANIFEST.in
+README
+README.rst
+_pyrsistent_version.py
+pvectorcmodule.c
+setup.cfg
+setup.py
+pyrsistent/__init__.py
+pyrsistent/__init__.pyi
+pyrsistent/_checked_types.py
+pyrsistent/_compat.py
+pyrsistent/_field_common.py
+pyrsistent/_helpers.py
+pyrsistent/_immutable.py
+pyrsistent/_pbag.py
+pyrsistent/_pclass.py
+pyrsistent/_pdeque.py
+pyrsistent/_plist.py
+pyrsistent/_pmap.py
+pyrsistent/_precord.py
+pyrsistent/_pset.py
+pyrsistent/_pvector.py
+pyrsistent/_toolz.py
+pyrsistent/_transformations.py
+pyrsistent/py.typed
+pyrsistent/typing.py
+pyrsistent/typing.pyi
+pyrsistent.egg-info/PKG-INFO
+pyrsistent.egg-info/SOURCES.txt
+pyrsistent.egg-info/dependency_links.txt
+pyrsistent.egg-info/requires.txt
+pyrsistent.egg-info/top_level.txt
+tests/bag_test.py
+tests/checked_map_test.py
+tests/checked_set_test.py
+tests/checked_vector_test.py
+tests/class_test.py
+tests/deque_test.py
+tests/field_test.py
+tests/freeze_test.py
+tests/hypothesis_vector_test.py
+tests/immutable_object_test.py
+tests/list_test.py
+tests/map_test.py
+tests/memory_profiling.py
+tests/record_test.py
+tests/regression_test.py
+tests/set_test.py
+tests/toolz_test.py
+tests/transform_test.py
+tests/vector_test.py \ No newline at end of file
diff --git a/third_party/python/pyrsistent/pyrsistent.egg-info/dependency_links.txt b/third_party/python/pyrsistent/pyrsistent.egg-info/dependency_links.txt
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/third_party/python/pyrsistent/pyrsistent.egg-info/requires.txt b/third_party/python/pyrsistent/pyrsistent.egg-info/requires.txt
new file mode 100644
index 0000000000..ffe2fce498
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent.egg-info/requires.txt
@@ -0,0 +1 @@
+six
diff --git a/third_party/python/pyrsistent/pyrsistent.egg-info/top_level.txt b/third_party/python/pyrsistent/pyrsistent.egg-info/top_level.txt
new file mode 100644
index 0000000000..f2460728a9
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent.egg-info/top_level.txt
@@ -0,0 +1,3 @@
+_pyrsistent_version
+pvectorc
+pyrsistent
diff --git a/third_party/python/pyrsistent/pyrsistent/__init__.py b/third_party/python/pyrsistent/pyrsistent/__init__.py
new file mode 100644
index 0000000000..be299658f3
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent/__init__.py
@@ -0,0 +1,47 @@
+# -*- coding: utf-8 -*-
+
+from pyrsistent._pmap import pmap, m, PMap
+
+from pyrsistent._pvector import pvector, v, PVector
+
+from pyrsistent._pset import pset, s, PSet
+
+from pyrsistent._pbag import pbag, b, PBag
+
+from pyrsistent._plist import plist, l, PList
+
+from pyrsistent._pdeque import pdeque, dq, PDeque
+
+from pyrsistent._checked_types import (
+ CheckedPMap, CheckedPVector, CheckedPSet, InvariantException, CheckedKeyTypeError,
+ CheckedValueTypeError, CheckedType, optional)
+
+from pyrsistent._field_common import (
+ field, PTypeError, pset_field, pmap_field, pvector_field)
+
+from pyrsistent._precord import PRecord
+
+from pyrsistent._pclass import PClass, PClassMeta
+
+from pyrsistent._immutable import immutable
+
+from pyrsistent._helpers import freeze, thaw, mutant
+
+from pyrsistent._transformations import inc, discard, rex, ny
+
+from pyrsistent._toolz import get_in
+
+
+__all__ = ('pmap', 'm', 'PMap',
+ 'pvector', 'v', 'PVector',
+ 'pset', 's', 'PSet',
+ 'pbag', 'b', 'PBag',
+ 'plist', 'l', 'PList',
+ 'pdeque', 'dq', 'PDeque',
+ 'CheckedPMap', 'CheckedPVector', 'CheckedPSet', 'InvariantException', 'CheckedKeyTypeError', 'CheckedValueTypeError', 'CheckedType', 'optional',
+ 'PRecord', 'field', 'pset_field', 'pmap_field', 'pvector_field',
+ 'PClass', 'PClassMeta',
+ 'immutable',
+ 'freeze', 'thaw', 'mutant',
+ 'get_in',
+ 'inc', 'discard', 'rex', 'ny')
diff --git a/third_party/python/pyrsistent/pyrsistent/__init__.pyi b/third_party/python/pyrsistent/pyrsistent/__init__.pyi
new file mode 100644
index 0000000000..5909f7991a
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent/__init__.pyi
@@ -0,0 +1,213 @@
+# flake8: noqa: E704
+# from https://gist.github.com/WuTheFWasThat/091a17d4b5cab597dfd5d4c2d96faf09
+# Stubs for pyrsistent (Python 3.6)
+
+from typing import Any
+from typing import AnyStr
+from typing import Callable
+from typing import Iterable
+from typing import Iterator
+from typing import List
+from typing import Optional
+from typing import Mapping
+from typing import MutableMapping
+from typing import Sequence
+from typing import Set
+from typing import Union
+from typing import Tuple
+from typing import Type
+from typing import TypeVar
+from typing import overload
+
+# see commit 08519aa for explanation of the re-export
+from pyrsistent.typing import CheckedKeyTypeError as CheckedKeyTypeError
+from pyrsistent.typing import CheckedPMap as CheckedPMap
+from pyrsistent.typing import CheckedPSet as CheckedPSet
+from pyrsistent.typing import CheckedPVector as CheckedPVector
+from pyrsistent.typing import CheckedType as CheckedType
+from pyrsistent.typing import CheckedValueTypeError as CheckedValueTypeError
+from pyrsistent.typing import InvariantException as InvariantException
+from pyrsistent.typing import PClass as PClass
+from pyrsistent.typing import PBag as PBag
+from pyrsistent.typing import PDeque as PDeque
+from pyrsistent.typing import PList as PList
+from pyrsistent.typing import PMap as PMap
+from pyrsistent.typing import PMapEvolver as PMapEvolver
+from pyrsistent.typing import PSet as PSet
+from pyrsistent.typing import PSetEvolver as PSetEvolver
+from pyrsistent.typing import PTypeError as PTypeError
+from pyrsistent.typing import PVector as PVector
+from pyrsistent.typing import PVectorEvolver as PVectorEvolver
+
+T = TypeVar('T')
+KT = TypeVar('KT')
+VT = TypeVar('VT')
+
+def pmap(initial: Union[Mapping[KT, VT], Iterable[Tuple[KT, VT]]] = {}, pre_size: int = 0) -> PMap[KT, VT]: ...
+def m(**kwargs: VT) -> PMap[str, VT]: ...
+
+def pvector(iterable: Iterable[T] = ...) -> PVector[T]: ...
+def v(*iterable: T) -> PVector[T]: ...
+
+def pset(iterable: Iterable[T] = (), pre_size: int = 8) -> PSet[T]: ...
+def s(*iterable: T) -> PSet[T]: ...
+
+# see class_test.py for use cases
+Invariant = Tuple[bool, Optional[Union[str, Callable[[], str]]]]
+
+@overload
+def field(
+ type: Union[Type[T], Sequence[Type[T]]] = ...,
+ invariant: Callable[[Any], Union[Invariant, Iterable[Invariant]]] = lambda _: (True, None),
+ initial: Any = object(),
+ mandatory: bool = False,
+ factory: Callable[[Any], T] = lambda x: x,
+ serializer: Callable[[Any, T], Any] = lambda _, value: value,
+) -> T: ...
+# The actual return value (_PField) is irrelevant after a PRecord has been instantiated,
+# see https://github.com/tobgu/pyrsistent/blob/master/pyrsistent/_precord.py#L10
+@overload
+def field(
+ type: Any = ...,
+ invariant: Callable[[Any], Union[Invariant, Iterable[Invariant]]] = lambda _: (True, None),
+ initial: Any = object(),
+ mandatory: bool = False,
+ factory: Callable[[Any], Any] = lambda x: x,
+ serializer: Callable[[Any, Any], Any] = lambda _, value: value,
+) -> Any: ...
+
+# Use precise types for the simplest use cases, but fall back to Any for
+# everything else. See record_test.py for the wide range of possible types for
+# item_type
+@overload
+def pset_field(
+ item_type: Type[T],
+ optional: bool = False,
+ initial: Iterable[T] = ...,
+) -> PSet[T]: ...
+@overload
+def pset_field(
+ item_type: Any,
+ optional: bool = False,
+ initial: Any = (),
+) -> PSet[Any]: ...
+
+@overload
+def pmap_field(
+ key_type: Type[KT],
+ value_type: Type[VT],
+ optional: bool = False,
+ invariant: Callable[[Any], Tuple[bool, Optional[str]]] = lambda _: (True, None),
+) -> PMap[KT, VT]: ...
+@overload
+def pmap_field(
+ key_type: Any,
+ value_type: Any,
+ optional: bool = False,
+ invariant: Callable[[Any], Tuple[bool, Optional[str]]] = lambda _: (True, None),
+) -> PMap[Any, Any]: ...
+
+@overload
+def pvector_field(
+ item_type: Type[T],
+ optional: bool = False,
+ initial: Iterable[T] = ...,
+) -> PVector[T]: ...
+@overload
+def pvector_field(
+ item_type: Any,
+ optional: bool = False,
+ initial: Any = (),
+) -> PVector[Any]: ...
+
+def pbag(elements: Iterable[T]) -> PBag[T]: ...
+def b(*elements: T) -> PBag[T]: ...
+
+def plist(iterable: Iterable[T] = (), reverse: bool = False) -> PList[T]: ...
+def l(*elements: T) -> PList[T]: ...
+
+def pdeque(iterable: Optional[Iterable[T]] = None, maxlen: Optional[int] = None) -> PDeque[T]: ...
+def dq(*iterable: T) -> PDeque[T]: ...
+
+@overload
+def optional(type: T) -> Tuple[T, Type[None]]: ...
+@overload
+def optional(*typs: Any) -> Tuple[Any, ...]: ...
+
+T_PRecord = TypeVar('T_PRecord', bound='PRecord')
+class PRecord(PMap[AnyStr, Any]):
+ _precord_fields: Mapping
+ _precord_initial_values: Mapping
+
+ def __hash__(self) -> int: ...
+ def __init__(self, **kwargs: Any) -> None: ...
+ def __iter__(self) -> Iterator[Any]: ...
+ def __len__(self) -> int: ...
+ @classmethod
+ def create(
+ cls: Type[T_PRecord],
+ kwargs: Mapping,
+ _factory_fields: Optional[Iterable] = None,
+ ignore_extra: bool = False,
+ ) -> T_PRecord: ...
+ # This is OK because T_PRecord is a concrete type
+ def discard(self: T_PRecord, key: KT) -> T_PRecord: ...
+ def remove(self: T_PRecord, key: KT) -> T_PRecord: ...
+
+ def serialize(self, format: Optional[Any] = ...) -> MutableMapping: ...
+
+ # From pyrsistent documentation:
+ # This set function differs slightly from that in the PMap
+ # class. First of all it accepts key-value pairs. Second it accepts multiple key-value
+ # pairs to perform one, atomic, update of multiple fields.
+ @overload
+ def set(self, key: KT, val: VT) -> Any: ...
+ @overload
+ def set(self, **kwargs: VT) -> Any: ...
+
+def immutable(
+ members: Union[str, Iterable[str]] = '',
+ name: str = 'Immutable',
+ verbose: bool = False,
+) -> Tuple: ... # actually a namedtuple
+
+# ignore mypy warning "Overloaded function signatures 1 and 5 overlap with
+# incompatible return types"
+@overload
+def freeze(o: Mapping[KT, VT]) -> PMap[KT, VT]: ... # type: ignore
+@overload
+def freeze(o: List[T]) -> PVector[T]: ... # type: ignore
+@overload
+def freeze(o: Tuple[T, ...]) -> Tuple[T, ...]: ...
+@overload
+def freeze(o: Set[T]) -> PSet[T]: ... # type: ignore
+@overload
+def freeze(o: T) -> T: ...
+
+
+@overload
+def thaw(o: PMap[KT, VT]) -> MutableMapping[KT, VT]: ... # type: ignore
+@overload
+def thaw(o: PVector[T]) -> List[T]: ... # type: ignore
+@overload
+def thaw(o: Tuple[T, ...]) -> Tuple[T, ...]: ...
+# collections.abc.MutableSet is kind of garbage:
+# https://stackoverflow.com/questions/24977898/why-does-collections-mutableset-not-bestow-an-update-method
+@overload
+def thaw(o: PSet[T]) -> Set[T]: ... # type: ignore
+@overload
+def thaw(o: T) -> T: ...
+
+def mutant(fn: Callable) -> Callable: ...
+
+def inc(x: int) -> int: ...
+@overload
+def discard(evolver: PMapEvolver[KT, VT], key: KT) -> None: ...
+@overload
+def discard(evolver: PVectorEvolver[T], key: int) -> None: ...
+@overload
+def discard(evolver: PSetEvolver[T], key: T) -> None: ...
+def rex(expr: str) -> Callable[[Any], bool]: ...
+def ny(_: Any) -> bool: ...
+
+def get_in(keys: Iterable, coll: Mapping, default: Optional[Any] = None, no_default: bool = False) -> Any: ...
diff --git a/third_party/python/pyrsistent/pyrsistent/_checked_types.py b/third_party/python/pyrsistent/pyrsistent/_checked_types.py
new file mode 100644
index 0000000000..293d989f13
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent/_checked_types.py
@@ -0,0 +1,542 @@
+from ._compat import Iterable
+import six
+
+from pyrsistent._compat import Enum, string_types
+from pyrsistent._pmap import PMap, pmap
+from pyrsistent._pset import PSet, pset
+from pyrsistent._pvector import PythonPVector, python_pvector
+
+
+class CheckedType(object):
+ """
+ Marker class to enable creation and serialization of checked object graphs.
+ """
+ __slots__ = ()
+
+ @classmethod
+ def create(cls, source_data, _factory_fields=None):
+ raise NotImplementedError()
+
+ def serialize(self, format=None):
+ raise NotImplementedError()
+
+
+def _restore_pickle(cls, data):
+ return cls.create(data, _factory_fields=set())
+
+
+class InvariantException(Exception):
+ """
+ Exception raised from a :py:class:`CheckedType` when invariant tests fail or when a mandatory
+ field is missing.
+
+ Contains two fields of interest:
+ invariant_errors, a tuple of error data for the failing invariants
+ missing_fields, a tuple of strings specifying the missing names
+ """
+
+ def __init__(self, error_codes=(), missing_fields=(), *args, **kwargs):
+ self.invariant_errors = tuple(e() if callable(e) else e for e in error_codes)
+ self.missing_fields = missing_fields
+ super(InvariantException, self).__init__(*args, **kwargs)
+
+ def __str__(self):
+ return super(InvariantException, self).__str__() + \
+ ", invariant_errors=[{invariant_errors}], missing_fields=[{missing_fields}]".format(
+ invariant_errors=', '.join(str(e) for e in self.invariant_errors),
+ missing_fields=', '.join(self.missing_fields))
+
+
+_preserved_iterable_types = (
+ Enum,
+)
+"""Some types are themselves iterable, but we want to use the type itself and
+not its members for the type specification. This defines a set of such types
+that we explicitly preserve.
+
+Note that strings are not such types because the string inputs we pass in are
+values, not types.
+"""
+
+
+def maybe_parse_user_type(t):
+ """Try to coerce a user-supplied type directive into a list of types.
+
+ This function should be used in all places where a user specifies a type,
+ for consistency.
+
+ The policy for what defines valid user input should be clear from the implementation.
+ """
+ is_type = isinstance(t, type)
+ is_preserved = isinstance(t, type) and issubclass(t, _preserved_iterable_types)
+ is_string = isinstance(t, string_types)
+ is_iterable = isinstance(t, Iterable)
+
+ if is_preserved:
+ return [t]
+ elif is_string:
+ return [t]
+ elif is_type and not is_iterable:
+ return [t]
+ elif is_iterable:
+ # Recur to validate contained types as well.
+ ts = t
+ return tuple(e for t in ts for e in maybe_parse_user_type(t))
+ else:
+ # If this raises because `t` cannot be formatted, so be it.
+ raise TypeError(
+ 'Type specifications must be types or strings. Input: {}'.format(t)
+ )
+
+
+def maybe_parse_many_user_types(ts):
+ # Just a different name to communicate that you're parsing multiple user
+ # inputs. `maybe_parse_user_type` handles the iterable case anyway.
+ return maybe_parse_user_type(ts)
+
+
+def _store_types(dct, bases, destination_name, source_name):
+ maybe_types = maybe_parse_many_user_types([
+ d[source_name]
+ for d in ([dct] + [b.__dict__ for b in bases]) if source_name in d
+ ])
+
+ dct[destination_name] = maybe_types
+
+
+def _merge_invariant_results(result):
+ verdict = True
+ data = []
+ for verd, dat in result:
+ if not verd:
+ verdict = False
+ data.append(dat)
+
+ return verdict, tuple(data)
+
+
+def wrap_invariant(invariant):
+ # Invariant functions may return the outcome of several tests
+ # In those cases the results have to be merged before being passed
+ # back to the client.
+ def f(*args, **kwargs):
+ result = invariant(*args, **kwargs)
+ if isinstance(result[0], bool):
+ return result
+
+ return _merge_invariant_results(result)
+
+ return f
+
+
+def _all_dicts(bases, seen=None):
+ """
+ Yield each class in ``bases`` and each of their base classes.
+ """
+ if seen is None:
+ seen = set()
+ for cls in bases:
+ if cls in seen:
+ continue
+ seen.add(cls)
+ yield cls.__dict__
+ for b in _all_dicts(cls.__bases__, seen):
+ yield b
+
+
+def store_invariants(dct, bases, destination_name, source_name):
+ # Invariants are inherited
+ invariants = []
+ for ns in [dct] + list(_all_dicts(bases)):
+ try:
+ invariant = ns[source_name]
+ except KeyError:
+ continue
+ invariants.append(invariant)
+
+ if not all(callable(invariant) for invariant in invariants):
+ raise TypeError('Invariants must be callable')
+ dct[destination_name] = tuple(wrap_invariant(inv) for inv in invariants)
+
+
+class _CheckedTypeMeta(type):
+ def __new__(mcs, name, bases, dct):
+ _store_types(dct, bases, '_checked_types', '__type__')
+ store_invariants(dct, bases, '_checked_invariants', '__invariant__')
+
+ def default_serializer(self, _, value):
+ if isinstance(value, CheckedType):
+ return value.serialize()
+ return value
+
+ dct.setdefault('__serializer__', default_serializer)
+
+ dct['__slots__'] = ()
+
+ return super(_CheckedTypeMeta, mcs).__new__(mcs, name, bases, dct)
+
+
+class CheckedTypeError(TypeError):
+ def __init__(self, source_class, expected_types, actual_type, actual_value, *args, **kwargs):
+ super(CheckedTypeError, self).__init__(*args, **kwargs)
+ self.source_class = source_class
+ self.expected_types = expected_types
+ self.actual_type = actual_type
+ self.actual_value = actual_value
+
+
+class CheckedKeyTypeError(CheckedTypeError):
+ """
+ Raised when trying to set a value using a key with a type that doesn't match the declared type.
+
+ Attributes:
+ source_class -- The class of the collection
+ expected_types -- Allowed types
+ actual_type -- The non matching type
+ actual_value -- Value of the variable with the non matching type
+ """
+ pass
+
+
+class CheckedValueTypeError(CheckedTypeError):
+ """
+ Raised when trying to set a value using a key with a type that doesn't match the declared type.
+
+ Attributes:
+ source_class -- The class of the collection
+ expected_types -- Allowed types
+ actual_type -- The non matching type
+ actual_value -- Value of the variable with the non matching type
+ """
+ pass
+
+
+def _get_class(type_name):
+ module_name, class_name = type_name.rsplit('.', 1)
+ module = __import__(module_name, fromlist=[class_name])
+ return getattr(module, class_name)
+
+
+def get_type(typ):
+ if isinstance(typ, type):
+ return typ
+
+ return _get_class(typ)
+
+
+def get_types(typs):
+ return [get_type(typ) for typ in typs]
+
+
+def _check_types(it, expected_types, source_class, exception_type=CheckedValueTypeError):
+ if expected_types:
+ for e in it:
+ if not any(isinstance(e, get_type(t)) for t in expected_types):
+ actual_type = type(e)
+ msg = "Type {source_class} can only be used with {expected_types}, not {actual_type}".format(
+ source_class=source_class.__name__,
+ expected_types=tuple(get_type(et).__name__ for et in expected_types),
+ actual_type=actual_type.__name__)
+ raise exception_type(source_class, expected_types, actual_type, e, msg)
+
+
+def _invariant_errors(elem, invariants):
+ return [data for valid, data in (invariant(elem) for invariant in invariants) if not valid]
+
+
+def _invariant_errors_iterable(it, invariants):
+ return sum([_invariant_errors(elem, invariants) for elem in it], [])
+
+
+def optional(*typs):
+ """ Convenience function to specify that a value may be of any of the types in type 'typs' or None """
+ return tuple(typs) + (type(None),)
+
+
+def _checked_type_create(cls, source_data, _factory_fields=None, ignore_extra=False):
+ if isinstance(source_data, cls):
+ return source_data
+
+ # Recursively apply create methods of checked types if the types of the supplied data
+ # does not match any of the valid types.
+ types = get_types(cls._checked_types)
+ checked_type = next((t for t in types if issubclass(t, CheckedType)), None)
+ if checked_type:
+ return cls([checked_type.create(data, ignore_extra=ignore_extra)
+ if not any(isinstance(data, t) for t in types) else data
+ for data in source_data])
+
+ return cls(source_data)
+
+@six.add_metaclass(_CheckedTypeMeta)
+class CheckedPVector(PythonPVector, CheckedType):
+ """
+ A CheckedPVector is a PVector which allows specifying type and invariant checks.
+
+ >>> class Positives(CheckedPVector):
+ ... __type__ = (int, float)
+ ... __invariant__ = lambda n: (n >= 0, 'Negative')
+ ...
+ >>> Positives([1, 2, 3])
+ Positives([1, 2, 3])
+ """
+
+ __slots__ = ()
+
+ def __new__(cls, initial=()):
+ if type(initial) == PythonPVector:
+ return super(CheckedPVector, cls).__new__(cls, initial._count, initial._shift, initial._root, initial._tail)
+
+ return CheckedPVector.Evolver(cls, python_pvector()).extend(initial).persistent()
+
+ def set(self, key, value):
+ return self.evolver().set(key, value).persistent()
+
+ def append(self, val):
+ return self.evolver().append(val).persistent()
+
+ def extend(self, it):
+ return self.evolver().extend(it).persistent()
+
+ create = classmethod(_checked_type_create)
+
+ def serialize(self, format=None):
+ serializer = self.__serializer__
+ return list(serializer(format, v) for v in self)
+
+ def __reduce__(self):
+ # Pickling support
+ return _restore_pickle, (self.__class__, list(self),)
+
+ class Evolver(PythonPVector.Evolver):
+ __slots__ = ('_destination_class', '_invariant_errors')
+
+ def __init__(self, destination_class, vector):
+ super(CheckedPVector.Evolver, self).__init__(vector)
+ self._destination_class = destination_class
+ self._invariant_errors = []
+
+ def _check(self, it):
+ _check_types(it, self._destination_class._checked_types, self._destination_class)
+ error_data = _invariant_errors_iterable(it, self._destination_class._checked_invariants)
+ self._invariant_errors.extend(error_data)
+
+ def __setitem__(self, key, value):
+ self._check([value])
+ return super(CheckedPVector.Evolver, self).__setitem__(key, value)
+
+ def append(self, elem):
+ self._check([elem])
+ return super(CheckedPVector.Evolver, self).append(elem)
+
+ def extend(self, it):
+ it = list(it)
+ self._check(it)
+ return super(CheckedPVector.Evolver, self).extend(it)
+
+ def persistent(self):
+ if self._invariant_errors:
+ raise InvariantException(error_codes=self._invariant_errors)
+
+ result = self._orig_pvector
+ if self.is_dirty() or (self._destination_class != type(self._orig_pvector)):
+ pv = super(CheckedPVector.Evolver, self).persistent().extend(self._extra_tail)
+ result = self._destination_class(pv)
+ self._reset(result)
+
+ return result
+
+ def __repr__(self):
+ return self.__class__.__name__ + "({0})".format(self.tolist())
+
+ __str__ = __repr__
+
+ def evolver(self):
+ return CheckedPVector.Evolver(self.__class__, self)
+
+
+@six.add_metaclass(_CheckedTypeMeta)
+class CheckedPSet(PSet, CheckedType):
+ """
+ A CheckedPSet is a PSet which allows specifying type and invariant checks.
+
+ >>> class Positives(CheckedPSet):
+ ... __type__ = (int, float)
+ ... __invariant__ = lambda n: (n >= 0, 'Negative')
+ ...
+ >>> Positives([1, 2, 3])
+ Positives([1, 2, 3])
+ """
+
+ __slots__ = ()
+
+ def __new__(cls, initial=()):
+ if type(initial) is PMap:
+ return super(CheckedPSet, cls).__new__(cls, initial)
+
+ evolver = CheckedPSet.Evolver(cls, pset())
+ for e in initial:
+ evolver.add(e)
+
+ return evolver.persistent()
+
+ def __repr__(self):
+ return self.__class__.__name__ + super(CheckedPSet, self).__repr__()[4:]
+
+ def __str__(self):
+ return self.__repr__()
+
+ def serialize(self, format=None):
+ serializer = self.__serializer__
+ return set(serializer(format, v) for v in self)
+
+ create = classmethod(_checked_type_create)
+
+ def __reduce__(self):
+ # Pickling support
+ return _restore_pickle, (self.__class__, list(self),)
+
+ def evolver(self):
+ return CheckedPSet.Evolver(self.__class__, self)
+
+ class Evolver(PSet._Evolver):
+ __slots__ = ('_destination_class', '_invariant_errors')
+
+ def __init__(self, destination_class, original_set):
+ super(CheckedPSet.Evolver, self).__init__(original_set)
+ self._destination_class = destination_class
+ self._invariant_errors = []
+
+ def _check(self, it):
+ _check_types(it, self._destination_class._checked_types, self._destination_class)
+ error_data = _invariant_errors_iterable(it, self._destination_class._checked_invariants)
+ self._invariant_errors.extend(error_data)
+
+ def add(self, element):
+ self._check([element])
+ self._pmap_evolver[element] = True
+ return self
+
+ def persistent(self):
+ if self._invariant_errors:
+ raise InvariantException(error_codes=self._invariant_errors)
+
+ if self.is_dirty() or self._destination_class != type(self._original_pset):
+ return self._destination_class(self._pmap_evolver.persistent())
+
+ return self._original_pset
+
+
+class _CheckedMapTypeMeta(type):
+ def __new__(mcs, name, bases, dct):
+ _store_types(dct, bases, '_checked_key_types', '__key_type__')
+ _store_types(dct, bases, '_checked_value_types', '__value_type__')
+ store_invariants(dct, bases, '_checked_invariants', '__invariant__')
+
+ def default_serializer(self, _, key, value):
+ sk = key
+ if isinstance(key, CheckedType):
+ sk = key.serialize()
+
+ sv = value
+ if isinstance(value, CheckedType):
+ sv = value.serialize()
+
+ return sk, sv
+
+ dct.setdefault('__serializer__', default_serializer)
+
+ dct['__slots__'] = ()
+
+ return super(_CheckedMapTypeMeta, mcs).__new__(mcs, name, bases, dct)
+
+# Marker object
+_UNDEFINED_CHECKED_PMAP_SIZE = object()
+
+
+@six.add_metaclass(_CheckedMapTypeMeta)
+class CheckedPMap(PMap, CheckedType):
+ """
+ A CheckedPMap is a PMap which allows specifying type and invariant checks.
+
+ >>> class IntToFloatMap(CheckedPMap):
+ ... __key_type__ = int
+ ... __value_type__ = float
+ ... __invariant__ = lambda k, v: (int(v) == k, 'Invalid mapping')
+ ...
+ >>> IntToFloatMap({1: 1.5, 2: 2.25})
+ IntToFloatMap({1: 1.5, 2: 2.25})
+ """
+
+ __slots__ = ()
+
+ def __new__(cls, initial={}, size=_UNDEFINED_CHECKED_PMAP_SIZE):
+ if size is not _UNDEFINED_CHECKED_PMAP_SIZE:
+ return super(CheckedPMap, cls).__new__(cls, size, initial)
+
+ evolver = CheckedPMap.Evolver(cls, pmap())
+ for k, v in initial.items():
+ evolver.set(k, v)
+
+ return evolver.persistent()
+
+ def evolver(self):
+ return CheckedPMap.Evolver(self.__class__, self)
+
+ def __repr__(self):
+ return self.__class__.__name__ + "({0})".format(str(dict(self)))
+
+ __str__ = __repr__
+
+ def serialize(self, format=None):
+ serializer = self.__serializer__
+ return dict(serializer(format, k, v) for k, v in self.items())
+
+ @classmethod
+ def create(cls, source_data, _factory_fields=None):
+ if isinstance(source_data, cls):
+ return source_data
+
+ # Recursively apply create methods of checked types if the types of the supplied data
+ # does not match any of the valid types.
+ key_types = get_types(cls._checked_key_types)
+ checked_key_type = next((t for t in key_types if issubclass(t, CheckedType)), None)
+ value_types = get_types(cls._checked_value_types)
+ checked_value_type = next((t for t in value_types if issubclass(t, CheckedType)), None)
+
+ if checked_key_type or checked_value_type:
+ return cls(dict((checked_key_type.create(key) if checked_key_type and not any(isinstance(key, t) for t in key_types) else key,
+ checked_value_type.create(value) if checked_value_type and not any(isinstance(value, t) for t in value_types) else value)
+ for key, value in source_data.items()))
+
+ return cls(source_data)
+
+ def __reduce__(self):
+ # Pickling support
+ return _restore_pickle, (self.__class__, dict(self),)
+
+ class Evolver(PMap._Evolver):
+ __slots__ = ('_destination_class', '_invariant_errors')
+
+ def __init__(self, destination_class, original_map):
+ super(CheckedPMap.Evolver, self).__init__(original_map)
+ self._destination_class = destination_class
+ self._invariant_errors = []
+
+ def set(self, key, value):
+ _check_types([key], self._destination_class._checked_key_types, self._destination_class, CheckedKeyTypeError)
+ _check_types([value], self._destination_class._checked_value_types, self._destination_class)
+ self._invariant_errors.extend(data for valid, data in (invariant(key, value)
+ for invariant in self._destination_class._checked_invariants)
+ if not valid)
+
+ return super(CheckedPMap.Evolver, self).set(key, value)
+
+ def persistent(self):
+ if self._invariant_errors:
+ raise InvariantException(error_codes=self._invariant_errors)
+
+ if self.is_dirty() or type(self._original_pmap) != self._destination_class:
+ return self._destination_class(self._buckets_evolver.persistent(), self._size)
+
+ return self._original_pmap
diff --git a/third_party/python/pyrsistent/pyrsistent/_compat.py b/third_party/python/pyrsistent/pyrsistent/_compat.py
new file mode 100644
index 0000000000..e728586afe
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent/_compat.py
@@ -0,0 +1,31 @@
+from six import string_types
+
+
+# enum compat
+try:
+ from enum import Enum
+except:
+ class Enum(object): pass
+ # no objects will be instances of this class
+
+# collections compat
+try:
+ from collections.abc import (
+ Container,
+ Hashable,
+ Iterable,
+ Mapping,
+ Sequence,
+ Set,
+ Sized,
+ )
+except ImportError:
+ from collections import (
+ Container,
+ Hashable,
+ Iterable,
+ Mapping,
+ Sequence,
+ Set,
+ Sized,
+ )
diff --git a/third_party/python/pyrsistent/pyrsistent/_field_common.py b/third_party/python/pyrsistent/pyrsistent/_field_common.py
new file mode 100644
index 0000000000..ca1cccd43c
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent/_field_common.py
@@ -0,0 +1,330 @@
+import six
+import sys
+
+from pyrsistent._checked_types import (
+ CheckedPMap,
+ CheckedPSet,
+ CheckedPVector,
+ CheckedType,
+ InvariantException,
+ _restore_pickle,
+ get_type,
+ maybe_parse_user_type,
+ maybe_parse_many_user_types,
+)
+from pyrsistent._checked_types import optional as optional_type
+from pyrsistent._checked_types import wrap_invariant
+import inspect
+
+PY2 = sys.version_info[0] < 3
+
+
+def set_fields(dct, bases, name):
+ dct[name] = dict(sum([list(b.__dict__.get(name, {}).items()) for b in bases], []))
+
+ for k, v in list(dct.items()):
+ if isinstance(v, _PField):
+ dct[name][k] = v
+ del dct[k]
+
+
+def check_global_invariants(subject, invariants):
+ error_codes = tuple(error_code for is_ok, error_code in
+ (invariant(subject) for invariant in invariants) if not is_ok)
+ if error_codes:
+ raise InvariantException(error_codes, (), 'Global invariant failed')
+
+
+def serialize(serializer, format, value):
+ if isinstance(value, CheckedType) and serializer is PFIELD_NO_SERIALIZER:
+ return value.serialize(format)
+
+ return serializer(format, value)
+
+
+def check_type(destination_cls, field, name, value):
+ if field.type and not any(isinstance(value, get_type(t)) for t in field.type):
+ actual_type = type(value)
+ message = "Invalid type for field {0}.{1}, was {2}".format(destination_cls.__name__, name, actual_type.__name__)
+ raise PTypeError(destination_cls, name, field.type, actual_type, message)
+
+
+def is_type_cls(type_cls, field_type):
+ if type(field_type) is set:
+ return True
+ types = tuple(field_type)
+ if len(types) == 0:
+ return False
+ return issubclass(get_type(types[0]), type_cls)
+
+
+def is_field_ignore_extra_complaint(type_cls, field, ignore_extra):
+ # ignore_extra param has default False value, for speed purpose no need to propagate False
+ if not ignore_extra:
+ return False
+
+ if not is_type_cls(type_cls, field.type):
+ return False
+
+ if PY2:
+ return 'ignore_extra' in inspect.getargspec(field.factory).args
+ else:
+ return 'ignore_extra' in inspect.signature(field.factory).parameters
+
+
+
+class _PField(object):
+ __slots__ = ('type', 'invariant', 'initial', 'mandatory', '_factory', 'serializer')
+
+ def __init__(self, type, invariant, initial, mandatory, factory, serializer):
+ self.type = type
+ self.invariant = invariant
+ self.initial = initial
+ self.mandatory = mandatory
+ self._factory = factory
+ self.serializer = serializer
+
+ @property
+ def factory(self):
+ # If no factory is specified and the type is another CheckedType use the factory method of that CheckedType
+ if self._factory is PFIELD_NO_FACTORY and len(self.type) == 1:
+ typ = get_type(tuple(self.type)[0])
+ if issubclass(typ, CheckedType):
+ return typ.create
+
+ return self._factory
+
+PFIELD_NO_TYPE = ()
+PFIELD_NO_INVARIANT = lambda _: (True, None)
+PFIELD_NO_FACTORY = lambda x: x
+PFIELD_NO_INITIAL = object()
+PFIELD_NO_SERIALIZER = lambda _, value: value
+
+
+def field(type=PFIELD_NO_TYPE, invariant=PFIELD_NO_INVARIANT, initial=PFIELD_NO_INITIAL,
+ mandatory=False, factory=PFIELD_NO_FACTORY, serializer=PFIELD_NO_SERIALIZER):
+ """
+ Field specification factory for :py:class:`PRecord`.
+
+ :param type: a type or iterable with types that are allowed for this field
+ :param invariant: a function specifying an invariant that must hold for the field
+ :param initial: value of field if not specified when instantiating the record
+ :param mandatory: boolean specifying if the field is mandatory or not
+ :param factory: function called when field is set.
+ :param serializer: function that returns a serialized version of the field
+ """
+
+ # NB: We have to check this predicate separately from the predicates in
+ # `maybe_parse_user_type` et al. because this one is related to supporting
+ # the argspec for `field`, while those are related to supporting the valid
+ # ways to specify types.
+
+ # Multiple types must be passed in one of the following containers. Note
+ # that a type that is a subclass of one of these containers, like a
+ # `collections.namedtuple`, will work as expected, since we check
+ # `isinstance` and not `issubclass`.
+ if isinstance(type, (list, set, tuple)):
+ types = set(maybe_parse_many_user_types(type))
+ else:
+ types = set(maybe_parse_user_type(type))
+
+ invariant_function = wrap_invariant(invariant) if invariant != PFIELD_NO_INVARIANT and callable(invariant) else invariant
+ field = _PField(type=types, invariant=invariant_function, initial=initial,
+ mandatory=mandatory, factory=factory, serializer=serializer)
+
+ _check_field_parameters(field)
+
+ return field
+
+
+def _check_field_parameters(field):
+ for t in field.type:
+ if not isinstance(t, type) and not isinstance(t, six.string_types):
+ raise TypeError('Type parameter expected, not {0}'.format(type(t)))
+
+ if field.initial is not PFIELD_NO_INITIAL and \
+ not callable(field.initial) and \
+ field.type and not any(isinstance(field.initial, t) for t in field.type):
+ raise TypeError('Initial has invalid type {0}'.format(type(field.initial)))
+
+ if not callable(field.invariant):
+ raise TypeError('Invariant must be callable')
+
+ if not callable(field.factory):
+ raise TypeError('Factory must be callable')
+
+ if not callable(field.serializer):
+ raise TypeError('Serializer must be callable')
+
+
+class PTypeError(TypeError):
+ """
+ Raised when trying to assign a value with a type that doesn't match the declared type.
+
+ Attributes:
+ source_class -- The class of the record
+ field -- Field name
+ expected_types -- Types allowed for the field
+ actual_type -- The non matching type
+ """
+ def __init__(self, source_class, field, expected_types, actual_type, *args, **kwargs):
+ super(PTypeError, self).__init__(*args, **kwargs)
+ self.source_class = source_class
+ self.field = field
+ self.expected_types = expected_types
+ self.actual_type = actual_type
+
+
+SEQ_FIELD_TYPE_SUFFIXES = {
+ CheckedPVector: "PVector",
+ CheckedPSet: "PSet",
+}
+
+# Global dictionary to hold auto-generated field types: used for unpickling
+_seq_field_types = {}
+
+def _restore_seq_field_pickle(checked_class, item_type, data):
+ """Unpickling function for auto-generated PVec/PSet field types."""
+ type_ = _seq_field_types[checked_class, item_type]
+ return _restore_pickle(type_, data)
+
+def _types_to_names(types):
+ """Convert a tuple of types to a human-readable string."""
+ return "".join(get_type(typ).__name__.capitalize() for typ in types)
+
+def _make_seq_field_type(checked_class, item_type):
+ """Create a subclass of the given checked class with the given item type."""
+ type_ = _seq_field_types.get((checked_class, item_type))
+ if type_ is not None:
+ return type_
+
+ class TheType(checked_class):
+ __type__ = item_type
+
+ def __reduce__(self):
+ return (_restore_seq_field_pickle,
+ (checked_class, item_type, list(self)))
+
+ suffix = SEQ_FIELD_TYPE_SUFFIXES[checked_class]
+ TheType.__name__ = _types_to_names(TheType._checked_types) + suffix
+ _seq_field_types[checked_class, item_type] = TheType
+ return TheType
+
+def _sequence_field(checked_class, item_type, optional, initial):
+ """
+ Create checked field for either ``PSet`` or ``PVector``.
+
+ :param checked_class: ``CheckedPSet`` or ``CheckedPVector``.
+ :param item_type: The required type for the items in the set.
+ :param optional: If true, ``None`` can be used as a value for
+ this field.
+ :param initial: Initial value to pass to factory.
+
+ :return: A ``field`` containing a checked class.
+ """
+ TheType = _make_seq_field_type(checked_class, item_type)
+
+ if optional:
+ def factory(argument, _factory_fields=None, ignore_extra=False):
+ if argument is None:
+ return None
+ else:
+ return TheType.create(argument, _factory_fields=_factory_fields, ignore_extra=ignore_extra)
+ else:
+ factory = TheType.create
+
+ return field(type=optional_type(TheType) if optional else TheType,
+ factory=factory, mandatory=True,
+ initial=factory(initial))
+
+
+def pset_field(item_type, optional=False, initial=()):
+ """
+ Create checked ``PSet`` field.
+
+ :param item_type: The required type for the items in the set.
+ :param optional: If true, ``None`` can be used as a value for
+ this field.
+ :param initial: Initial value to pass to factory if no value is given
+ for the field.
+
+ :return: A ``field`` containing a ``CheckedPSet`` of the given type.
+ """
+ return _sequence_field(CheckedPSet, item_type, optional,
+ initial)
+
+
+def pvector_field(item_type, optional=False, initial=()):
+ """
+ Create checked ``PVector`` field.
+
+ :param item_type: The required type for the items in the vector.
+ :param optional: If true, ``None`` can be used as a value for
+ this field.
+ :param initial: Initial value to pass to factory if no value is given
+ for the field.
+
+ :return: A ``field`` containing a ``CheckedPVector`` of the given type.
+ """
+ return _sequence_field(CheckedPVector, item_type, optional,
+ initial)
+
+
+_valid = lambda item: (True, "")
+
+
+# Global dictionary to hold auto-generated field types: used for unpickling
+_pmap_field_types = {}
+
+def _restore_pmap_field_pickle(key_type, value_type, data):
+ """Unpickling function for auto-generated PMap field types."""
+ type_ = _pmap_field_types[key_type, value_type]
+ return _restore_pickle(type_, data)
+
+def _make_pmap_field_type(key_type, value_type):
+ """Create a subclass of CheckedPMap with the given key and value types."""
+ type_ = _pmap_field_types.get((key_type, value_type))
+ if type_ is not None:
+ return type_
+
+ class TheMap(CheckedPMap):
+ __key_type__ = key_type
+ __value_type__ = value_type
+
+ def __reduce__(self):
+ return (_restore_pmap_field_pickle,
+ (self.__key_type__, self.__value_type__, dict(self)))
+
+ TheMap.__name__ = "{0}To{1}PMap".format(
+ _types_to_names(TheMap._checked_key_types),
+ _types_to_names(TheMap._checked_value_types))
+ _pmap_field_types[key_type, value_type] = TheMap
+ return TheMap
+
+
+def pmap_field(key_type, value_type, optional=False, invariant=PFIELD_NO_INVARIANT):
+ """
+ Create a checked ``PMap`` field.
+
+ :param key: The required type for the keys of the map.
+ :param value: The required type for the values of the map.
+ :param optional: If true, ``None`` can be used as a value for
+ this field.
+ :param invariant: Pass-through to ``field``.
+
+ :return: A ``field`` containing a ``CheckedPMap``.
+ """
+ TheMap = _make_pmap_field_type(key_type, value_type)
+
+ if optional:
+ def factory(argument):
+ if argument is None:
+ return None
+ else:
+ return TheMap.create(argument)
+ else:
+ factory = TheMap.create
+
+ return field(mandatory=True, initial=TheMap(),
+ type=optional_type(TheMap) if optional else TheMap,
+ factory=factory, invariant=invariant)
diff --git a/third_party/python/pyrsistent/pyrsistent/_helpers.py b/third_party/python/pyrsistent/pyrsistent/_helpers.py
new file mode 100644
index 0000000000..c9c58feac5
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent/_helpers.py
@@ -0,0 +1,82 @@
+from functools import wraps
+import six
+from pyrsistent._pmap import PMap, pmap
+from pyrsistent._pset import PSet, pset
+from pyrsistent._pvector import PVector, pvector
+
+
+def freeze(o):
+ """
+ Recursively convert simple Python containers into pyrsistent versions
+ of those containers.
+
+ - list is converted to pvector, recursively
+ - dict is converted to pmap, recursively on values (but not keys)
+ - set is converted to pset, but not recursively
+ - tuple is converted to tuple, recursively.
+
+ Sets and dict keys are not recursively frozen because they do not contain
+ mutable data by convention. The main exception to this rule is that
+ dict keys and set elements are often instances of mutable objects that
+ support hash-by-id, which this function can't convert anyway.
+
+ >>> freeze(set([1, 2]))
+ pset([1, 2])
+ >>> freeze([1, {'a': 3}])
+ pvector([1, pmap({'a': 3})])
+ >>> freeze((1, []))
+ (1, pvector([]))
+ """
+ typ = type(o)
+ if typ is dict:
+ return pmap(dict((k, freeze(v)) for k, v in six.iteritems(o)))
+ if typ is list:
+ return pvector(map(freeze, o))
+ if typ is tuple:
+ return tuple(map(freeze, o))
+ if typ is set:
+ return pset(o)
+ return o
+
+
+def thaw(o):
+ """
+ Recursively convert pyrsistent containers into simple Python containers.
+
+ - pvector is converted to list, recursively
+ - pmap is converted to dict, recursively on values (but not keys)
+ - pset is converted to set, but not recursively
+ - tuple is converted to tuple, recursively.
+
+ >>> from pyrsistent import s, m, v
+ >>> thaw(s(1, 2))
+ {1, 2}
+ >>> thaw(v(1, m(a=3)))
+ [1, {'a': 3}]
+ >>> thaw((1, v()))
+ (1, [])
+ """
+ if isinstance(o, PVector):
+ return list(map(thaw, o))
+ if isinstance(o, PMap):
+ return dict((k, thaw(v)) for k, v in o.iteritems())
+ if isinstance(o, PSet):
+ return set(o)
+ if type(o) is tuple:
+ return tuple(map(thaw, o))
+ return o
+
+
+def mutant(fn):
+ """
+ Convenience decorator to isolate mutation to within the decorated function (with respect
+ to the input arguments).
+
+ All arguments to the decorated function will be frozen so that they are guaranteed not to change.
+ The return value is also frozen.
+ """
+ @wraps(fn)
+ def inner_f(*args, **kwargs):
+ return freeze(fn(*[freeze(e) for e in args], **dict(freeze(item) for item in kwargs.items())))
+
+ return inner_f
diff --git a/third_party/python/pyrsistent/pyrsistent/_immutable.py b/third_party/python/pyrsistent/pyrsistent/_immutable.py
new file mode 100644
index 0000000000..a89bd7552f
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent/_immutable.py
@@ -0,0 +1,105 @@
+import sys
+
+import six
+
+
+def immutable(members='', name='Immutable', verbose=False):
+ """
+ Produces a class that either can be used standalone or as a base class for persistent classes.
+
+ This is a thin wrapper around a named tuple.
+
+ Constructing a type and using it to instantiate objects:
+
+ >>> Point = immutable('x, y', name='Point')
+ >>> p = Point(1, 2)
+ >>> p2 = p.set(x=3)
+ >>> p
+ Point(x=1, y=2)
+ >>> p2
+ Point(x=3, y=2)
+
+ Inheriting from a constructed type. In this case no type name needs to be supplied:
+
+ >>> class PositivePoint(immutable('x, y')):
+ ... __slots__ = tuple()
+ ... def __new__(cls, x, y):
+ ... if x > 0 and y > 0:
+ ... return super(PositivePoint, cls).__new__(cls, x, y)
+ ... raise Exception('Coordinates must be positive!')
+ ...
+ >>> p = PositivePoint(1, 2)
+ >>> p.set(x=3)
+ PositivePoint(x=3, y=2)
+ >>> p.set(y=-3)
+ Traceback (most recent call last):
+ Exception: Coordinates must be positive!
+
+ The persistent class also supports the notion of frozen members. The value of a frozen member
+ cannot be updated. For example it could be used to implement an ID that should remain the same
+ over time. A frozen member is denoted by a trailing underscore.
+
+ >>> Point = immutable('x, y, id_', name='Point')
+ >>> p = Point(1, 2, id_=17)
+ >>> p.set(x=3)
+ Point(x=3, y=2, id_=17)
+ >>> p.set(id_=18)
+ Traceback (most recent call last):
+ AttributeError: Cannot set frozen members id_
+ """
+
+ if isinstance(members, six.string_types):
+ members = members.replace(',', ' ').split()
+
+ def frozen_member_test():
+ frozen_members = ["'%s'" % f for f in members if f.endswith('_')]
+ if frozen_members:
+ return """
+ frozen_fields = fields_to_modify & set([{frozen_members}])
+ if frozen_fields:
+ raise AttributeError('Cannot set frozen members %s' % ', '.join(frozen_fields))
+ """.format(frozen_members=', '.join(frozen_members))
+
+ return ''
+
+ verbose_string = ""
+ if sys.version_info < (3, 7):
+ # Verbose is no longer supported in Python 3.7
+ verbose_string = ", verbose={verbose}".format(verbose=verbose)
+
+ quoted_members = ', '.join("'%s'" % m for m in members)
+ template = """
+class {class_name}(namedtuple('ImmutableBase', [{quoted_members}]{verbose_string})):
+ __slots__ = tuple()
+
+ def __repr__(self):
+ return super({class_name}, self).__repr__().replace('ImmutableBase', self.__class__.__name__)
+
+ def set(self, **kwargs):
+ if not kwargs:
+ return self
+
+ fields_to_modify = set(kwargs.keys())
+ if not fields_to_modify <= {member_set}:
+ raise AttributeError("'%s' is not a member" % ', '.join(fields_to_modify - {member_set}))
+
+ {frozen_member_test}
+
+ return self.__class__.__new__(self.__class__, *map(kwargs.pop, [{quoted_members}], self))
+""".format(quoted_members=quoted_members,
+ member_set="set([%s])" % quoted_members if quoted_members else 'set()',
+ frozen_member_test=frozen_member_test(),
+ verbose_string=verbose_string,
+ class_name=name)
+
+ if verbose:
+ print(template)
+
+ from collections import namedtuple
+ namespace = dict(namedtuple=namedtuple, __name__='pyrsistent_immutable')
+ try:
+ six.exec_(template, namespace)
+ except SyntaxError as e:
+ raise SyntaxError(e.message + ':\n' + template)
+
+ return namespace[name] \ No newline at end of file
diff --git a/third_party/python/pyrsistent/pyrsistent/_pbag.py b/third_party/python/pyrsistent/pyrsistent/_pbag.py
new file mode 100644
index 0000000000..9905e9a6e3
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent/_pbag.py
@@ -0,0 +1,267 @@
+from ._compat import Container, Iterable, Sized, Hashable
+from functools import reduce
+from pyrsistent._pmap import pmap
+
+
+def _add_to_counters(counters, element):
+ return counters.set(element, counters.get(element, 0) + 1)
+
+
+class PBag(object):
+ """
+ A persistent bag/multiset type.
+
+ Requires elements to be hashable, and allows duplicates, but has no
+ ordering. Bags are hashable.
+
+ Do not instantiate directly, instead use the factory functions :py:func:`b`
+ or :py:func:`pbag` to create an instance.
+
+ Some examples:
+
+ >>> s = pbag([1, 2, 3, 1])
+ >>> s2 = s.add(4)
+ >>> s3 = s2.remove(1)
+ >>> s
+ pbag([1, 1, 2, 3])
+ >>> s2
+ pbag([1, 1, 2, 3, 4])
+ >>> s3
+ pbag([1, 2, 3, 4])
+ """
+
+ __slots__ = ('_counts', '__weakref__')
+
+ def __init__(self, counts):
+ self._counts = counts
+
+ def add(self, element):
+ """
+ Add an element to the bag.
+
+ >>> s = pbag([1])
+ >>> s2 = s.add(1)
+ >>> s3 = s.add(2)
+ >>> s2
+ pbag([1, 1])
+ >>> s3
+ pbag([1, 2])
+ """
+ return PBag(_add_to_counters(self._counts, element))
+
+ def update(self, iterable):
+ """
+ Update bag with all elements in iterable.
+
+ >>> s = pbag([1])
+ >>> s.update([1, 2])
+ pbag([1, 1, 2])
+ """
+ if iterable:
+ return PBag(reduce(_add_to_counters, iterable, self._counts))
+
+ return self
+
+ def remove(self, element):
+ """
+ Remove an element from the bag.
+
+ >>> s = pbag([1, 1, 2])
+ >>> s2 = s.remove(1)
+ >>> s3 = s.remove(2)
+ >>> s2
+ pbag([1, 2])
+ >>> s3
+ pbag([1, 1])
+ """
+ if element not in self._counts:
+ raise KeyError(element)
+ elif self._counts[element] == 1:
+ newc = self._counts.remove(element)
+ else:
+ newc = self._counts.set(element, self._counts[element] - 1)
+ return PBag(newc)
+
+ def count(self, element):
+ """
+ Return the number of times an element appears.
+
+
+ >>> pbag([]).count('non-existent')
+ 0
+ >>> pbag([1, 1, 2]).count(1)
+ 2
+ """
+ return self._counts.get(element, 0)
+
+ def __len__(self):
+ """
+ Return the length including duplicates.
+
+ >>> len(pbag([1, 1, 2]))
+ 3
+ """
+ return sum(self._counts.itervalues())
+
+ def __iter__(self):
+ """
+ Return an iterator of all elements, including duplicates.
+
+ >>> list(pbag([1, 1, 2]))
+ [1, 1, 2]
+ >>> list(pbag([1, 2]))
+ [1, 2]
+ """
+ for elt, count in self._counts.iteritems():
+ for i in range(count):
+ yield elt
+
+ def __contains__(self, elt):
+ """
+ Check if an element is in the bag.
+
+ >>> 1 in pbag([1, 1, 2])
+ True
+ >>> 0 in pbag([1, 2])
+ False
+ """
+ return elt in self._counts
+
+ def __repr__(self):
+ return "pbag({0})".format(list(self))
+
+ def __eq__(self, other):
+ """
+ Check if two bags are equivalent, honoring the number of duplicates,
+ and ignoring insertion order.
+
+ >>> pbag([1, 1, 2]) == pbag([1, 2])
+ False
+ >>> pbag([2, 1, 0]) == pbag([0, 1, 2])
+ True
+ """
+ if type(other) is not PBag:
+ raise TypeError("Can only compare PBag with PBags")
+ return self._counts == other._counts
+
+ def __lt__(self, other):
+ raise TypeError('PBags are not orderable')
+
+ __le__ = __lt__
+ __gt__ = __lt__
+ __ge__ = __lt__
+
+ # Multiset-style operations similar to collections.Counter
+
+ def __add__(self, other):
+ """
+ Combine elements from two PBags.
+
+ >>> pbag([1, 2, 2]) + pbag([2, 3, 3])
+ pbag([1, 2, 2, 2, 3, 3])
+ """
+ if not isinstance(other, PBag):
+ return NotImplemented
+ result = self._counts.evolver()
+ for elem, other_count in other._counts.iteritems():
+ result[elem] = self.count(elem) + other_count
+ return PBag(result.persistent())
+
+ def __sub__(self, other):
+ """
+ Remove elements from one PBag that are present in another.
+
+ >>> pbag([1, 2, 2, 2, 3]) - pbag([2, 3, 3, 4])
+ pbag([1, 2, 2])
+ """
+ if not isinstance(other, PBag):
+ return NotImplemented
+ result = self._counts.evolver()
+ for elem, other_count in other._counts.iteritems():
+ newcount = self.count(elem) - other_count
+ if newcount > 0:
+ result[elem] = newcount
+ elif elem in self:
+ result.remove(elem)
+ return PBag(result.persistent())
+
+ def __or__(self, other):
+ """
+ Union: Keep elements that are present in either of two PBags.
+
+ >>> pbag([1, 2, 2, 2]) | pbag([2, 3, 3])
+ pbag([1, 2, 2, 2, 3, 3])
+ """
+ if not isinstance(other, PBag):
+ return NotImplemented
+ result = self._counts.evolver()
+ for elem, other_count in other._counts.iteritems():
+ count = self.count(elem)
+ newcount = max(count, other_count)
+ result[elem] = newcount
+ return PBag(result.persistent())
+
+ def __and__(self, other):
+ """
+ Intersection: Only keep elements that are present in both PBags.
+
+ >>> pbag([1, 2, 2, 2]) & pbag([2, 3, 3])
+ pbag([2])
+ """
+ if not isinstance(other, PBag):
+ return NotImplemented
+ result = pmap().evolver()
+ for elem, count in self._counts.iteritems():
+ newcount = min(count, other.count(elem))
+ if newcount > 0:
+ result[elem] = newcount
+ return PBag(result.persistent())
+
+ def __hash__(self):
+ """
+ Hash based on value of elements.
+
+ >>> m = pmap({pbag([1, 2]): "it's here!"})
+ >>> m[pbag([2, 1])]
+ "it's here!"
+ >>> pbag([1, 1, 2]) in m
+ False
+ """
+ return hash(self._counts)
+
+
+Container.register(PBag)
+Iterable.register(PBag)
+Sized.register(PBag)
+Hashable.register(PBag)
+
+
+def b(*elements):
+ """
+ Construct a persistent bag.
+
+ Takes an arbitrary number of arguments to insert into the new persistent
+ bag.
+
+ >>> b(1, 2, 3, 2)
+ pbag([1, 2, 2, 3])
+ """
+ return pbag(elements)
+
+
+def pbag(elements):
+ """
+ Convert an iterable to a persistent bag.
+
+ Takes an iterable with elements to insert.
+
+ >>> pbag([1, 2, 3, 2])
+ pbag([1, 2, 2, 3])
+ """
+ if not elements:
+ return _EMPTY_PBAG
+ return PBag(reduce(_add_to_counters, elements, pmap()))
+
+
+_EMPTY_PBAG = PBag(pmap())
+
diff --git a/third_party/python/pyrsistent/pyrsistent/_pclass.py b/third_party/python/pyrsistent/pyrsistent/_pclass.py
new file mode 100644
index 0000000000..a437f71648
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent/_pclass.py
@@ -0,0 +1,264 @@
+import six
+from pyrsistent._checked_types import (InvariantException, CheckedType, _restore_pickle, store_invariants)
+from pyrsistent._field_common import (
+ set_fields, check_type, is_field_ignore_extra_complaint, PFIELD_NO_INITIAL, serialize, check_global_invariants
+)
+from pyrsistent._transformations import transform
+
+
+def _is_pclass(bases):
+ return len(bases) == 1 and bases[0] == CheckedType
+
+
+class PClassMeta(type):
+ def __new__(mcs, name, bases, dct):
+ set_fields(dct, bases, name='_pclass_fields')
+ store_invariants(dct, bases, '_pclass_invariants', '__invariant__')
+ dct['__slots__'] = ('_pclass_frozen',) + tuple(key for key in dct['_pclass_fields'])
+
+ # There must only be one __weakref__ entry in the inheritance hierarchy,
+ # lets put it on the top level class.
+ if _is_pclass(bases):
+ dct['__slots__'] += ('__weakref__',)
+
+ return super(PClassMeta, mcs).__new__(mcs, name, bases, dct)
+
+_MISSING_VALUE = object()
+
+
+def _check_and_set_attr(cls, field, name, value, result, invariant_errors):
+ check_type(cls, field, name, value)
+ is_ok, error_code = field.invariant(value)
+ if not is_ok:
+ invariant_errors.append(error_code)
+ else:
+ setattr(result, name, value)
+
+
+@six.add_metaclass(PClassMeta)
+class PClass(CheckedType):
+ """
+ A PClass is a python class with a fixed set of specified fields. PClasses are declared as python classes inheriting
+ from PClass. It is defined the same way that PRecords are and behaves like a PRecord in all aspects except that it
+ is not a PMap and hence not a collection but rather a plain Python object.
+
+
+ More documentation and examples of PClass usage is available at https://github.com/tobgu/pyrsistent
+ """
+ def __new__(cls, **kwargs): # Support *args?
+ result = super(PClass, cls).__new__(cls)
+ factory_fields = kwargs.pop('_factory_fields', None)
+ ignore_extra = kwargs.pop('ignore_extra', None)
+ missing_fields = []
+ invariant_errors = []
+ for name, field in cls._pclass_fields.items():
+ if name in kwargs:
+ if factory_fields is None or name in factory_fields:
+ if is_field_ignore_extra_complaint(PClass, field, ignore_extra):
+ value = field.factory(kwargs[name], ignore_extra=ignore_extra)
+ else:
+ value = field.factory(kwargs[name])
+ else:
+ value = kwargs[name]
+ _check_and_set_attr(cls, field, name, value, result, invariant_errors)
+ del kwargs[name]
+ elif field.initial is not PFIELD_NO_INITIAL:
+ initial = field.initial() if callable(field.initial) else field.initial
+ _check_and_set_attr(
+ cls, field, name, initial, result, invariant_errors)
+ elif field.mandatory:
+ missing_fields.append('{0}.{1}'.format(cls.__name__, name))
+
+ if invariant_errors or missing_fields:
+ raise InvariantException(tuple(invariant_errors), tuple(missing_fields), 'Field invariant failed')
+
+ if kwargs:
+ raise AttributeError("'{0}' are not among the specified fields for {1}".format(
+ ', '.join(kwargs), cls.__name__))
+
+ check_global_invariants(result, cls._pclass_invariants)
+
+ result._pclass_frozen = True
+ return result
+
+ def set(self, *args, **kwargs):
+ """
+ Set a field in the instance. Returns a new instance with the updated value. The original instance remains
+ unmodified. Accepts key-value pairs or single string representing the field name and a value.
+
+ >>> from pyrsistent import PClass, field
+ >>> class AClass(PClass):
+ ... x = field()
+ ...
+ >>> a = AClass(x=1)
+ >>> a2 = a.set(x=2)
+ >>> a3 = a.set('x', 3)
+ >>> a
+ AClass(x=1)
+ >>> a2
+ AClass(x=2)
+ >>> a3
+ AClass(x=3)
+ """
+ if args:
+ kwargs[args[0]] = args[1]
+
+ factory_fields = set(kwargs)
+
+ for key in self._pclass_fields:
+ if key not in kwargs:
+ value = getattr(self, key, _MISSING_VALUE)
+ if value is not _MISSING_VALUE:
+ kwargs[key] = value
+
+ return self.__class__(_factory_fields=factory_fields, **kwargs)
+
+ @classmethod
+ def create(cls, kwargs, _factory_fields=None, ignore_extra=False):
+ """
+ Factory method. Will create a new PClass of the current type and assign the values
+ specified in kwargs.
+
+ :param ignore_extra: A boolean which when set to True will ignore any keys which appear in kwargs that are not
+ in the set of fields on the PClass.
+ """
+ if isinstance(kwargs, cls):
+ return kwargs
+
+ if ignore_extra:
+ kwargs = {k: kwargs[k] for k in cls._pclass_fields if k in kwargs}
+
+ return cls(_factory_fields=_factory_fields, ignore_extra=ignore_extra, **kwargs)
+
+ def serialize(self, format=None):
+ """
+ Serialize the current PClass using custom serializer functions for fields where
+ such have been supplied.
+ """
+ result = {}
+ for name in self._pclass_fields:
+ value = getattr(self, name, _MISSING_VALUE)
+ if value is not _MISSING_VALUE:
+ result[name] = serialize(self._pclass_fields[name].serializer, format, value)
+
+ return result
+
+ def transform(self, *transformations):
+ """
+ Apply transformations to the currency PClass. For more details on transformations see
+ the documentation for PMap. Transformations on PClasses do not support key matching
+ since the PClass is not a collection. Apart from that the transformations available
+ for other persistent types work as expected.
+ """
+ return transform(self, transformations)
+
+ def __eq__(self, other):
+ if isinstance(other, self.__class__):
+ for name in self._pclass_fields:
+ if getattr(self, name, _MISSING_VALUE) != getattr(other, name, _MISSING_VALUE):
+ return False
+
+ return True
+
+ return NotImplemented
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __hash__(self):
+ # May want to optimize this by caching the hash somehow
+ return hash(tuple((key, getattr(self, key, _MISSING_VALUE)) for key in self._pclass_fields))
+
+ def __setattr__(self, key, value):
+ if getattr(self, '_pclass_frozen', False):
+ raise AttributeError("Can't set attribute, key={0}, value={1}".format(key, value))
+
+ super(PClass, self).__setattr__(key, value)
+
+ def __delattr__(self, key):
+ raise AttributeError("Can't delete attribute, key={0}, use remove()".format(key))
+
+ def _to_dict(self):
+ result = {}
+ for key in self._pclass_fields:
+ value = getattr(self, key, _MISSING_VALUE)
+ if value is not _MISSING_VALUE:
+ result[key] = value
+
+ return result
+
+ def __repr__(self):
+ return "{0}({1})".format(self.__class__.__name__,
+ ', '.join('{0}={1}'.format(k, repr(v)) for k, v in self._to_dict().items()))
+
+ def __reduce__(self):
+ # Pickling support
+ data = dict((key, getattr(self, key)) for key in self._pclass_fields if hasattr(self, key))
+ return _restore_pickle, (self.__class__, data,)
+
+ def evolver(self):
+ """
+ Returns an evolver for this object.
+ """
+ return _PClassEvolver(self, self._to_dict())
+
+ def remove(self, name):
+ """
+ Remove attribute given by name from the current instance. Raises AttributeError if the
+ attribute doesn't exist.
+ """
+ evolver = self.evolver()
+ del evolver[name]
+ return evolver.persistent()
+
+
+class _PClassEvolver(object):
+ __slots__ = ('_pclass_evolver_original', '_pclass_evolver_data', '_pclass_evolver_data_is_dirty', '_factory_fields')
+
+ def __init__(self, original, initial_dict):
+ self._pclass_evolver_original = original
+ self._pclass_evolver_data = initial_dict
+ self._pclass_evolver_data_is_dirty = False
+ self._factory_fields = set()
+
+ def __getitem__(self, item):
+ return self._pclass_evolver_data[item]
+
+ def set(self, key, value):
+ if self._pclass_evolver_data.get(key, _MISSING_VALUE) is not value:
+ self._pclass_evolver_data[key] = value
+ self._factory_fields.add(key)
+ self._pclass_evolver_data_is_dirty = True
+
+ return self
+
+ def __setitem__(self, key, value):
+ self.set(key, value)
+
+ def remove(self, item):
+ if item in self._pclass_evolver_data:
+ del self._pclass_evolver_data[item]
+ self._factory_fields.discard(item)
+ self._pclass_evolver_data_is_dirty = True
+ return self
+
+ raise AttributeError(item)
+
+ def __delitem__(self, item):
+ self.remove(item)
+
+ def persistent(self):
+ if self._pclass_evolver_data_is_dirty:
+ return self._pclass_evolver_original.__class__(_factory_fields=self._factory_fields,
+ **self._pclass_evolver_data)
+
+ return self._pclass_evolver_original
+
+ def __setattr__(self, key, value):
+ if key not in self.__slots__:
+ self.set(key, value)
+ else:
+ super(_PClassEvolver, self).__setattr__(key, value)
+
+ def __getattr__(self, item):
+ return self[item]
diff --git a/third_party/python/pyrsistent/pyrsistent/_pdeque.py b/third_party/python/pyrsistent/pyrsistent/_pdeque.py
new file mode 100644
index 0000000000..5147b3fa6a
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent/_pdeque.py
@@ -0,0 +1,376 @@
+from ._compat import Sequence, Hashable
+from itertools import islice, chain
+from numbers import Integral
+from pyrsistent._plist import plist
+
+
+class PDeque(object):
+ """
+ Persistent double ended queue (deque). Allows quick appends and pops in both ends. Implemented
+ using two persistent lists.
+
+ A maximum length can be specified to create a bounded queue.
+
+ Fully supports the Sequence and Hashable protocols including indexing and slicing but
+ if you need fast random access go for the PVector instead.
+
+ Do not instantiate directly, instead use the factory functions :py:func:`dq` or :py:func:`pdeque` to
+ create an instance.
+
+ Some examples:
+
+ >>> x = pdeque([1, 2, 3])
+ >>> x.left
+ 1
+ >>> x.right
+ 3
+ >>> x[0] == x.left
+ True
+ >>> x[-1] == x.right
+ True
+ >>> x.pop()
+ pdeque([1, 2])
+ >>> x.pop() == x[:-1]
+ True
+ >>> x.popleft()
+ pdeque([2, 3])
+ >>> x.append(4)
+ pdeque([1, 2, 3, 4])
+ >>> x.appendleft(4)
+ pdeque([4, 1, 2, 3])
+
+ >>> y = pdeque([1, 2, 3], maxlen=3)
+ >>> y.append(4)
+ pdeque([2, 3, 4], maxlen=3)
+ >>> y.appendleft(4)
+ pdeque([4, 1, 2], maxlen=3)
+ """
+ __slots__ = ('_left_list', '_right_list', '_length', '_maxlen', '__weakref__')
+
+ def __new__(cls, left_list, right_list, length, maxlen=None):
+ instance = super(PDeque, cls).__new__(cls)
+ instance._left_list = left_list
+ instance._right_list = right_list
+ instance._length = length
+
+ if maxlen is not None:
+ if not isinstance(maxlen, Integral):
+ raise TypeError('An integer is required as maxlen')
+
+ if maxlen < 0:
+ raise ValueError("maxlen must be non-negative")
+
+ instance._maxlen = maxlen
+ return instance
+
+ @property
+ def right(self):
+ """
+ Rightmost element in dqueue.
+ """
+ return PDeque._tip_from_lists(self._right_list, self._left_list)
+
+ @property
+ def left(self):
+ """
+ Leftmost element in dqueue.
+ """
+ return PDeque._tip_from_lists(self._left_list, self._right_list)
+
+ @staticmethod
+ def _tip_from_lists(primary_list, secondary_list):
+ if primary_list:
+ return primary_list.first
+
+ if secondary_list:
+ return secondary_list[-1]
+
+ raise IndexError('No elements in empty deque')
+
+ def __iter__(self):
+ return chain(self._left_list, self._right_list.reverse())
+
+ def __repr__(self):
+ return "pdeque({0}{1})".format(list(self),
+ ', maxlen={0}'.format(self._maxlen) if self._maxlen is not None else '')
+ __str__ = __repr__
+
+ @property
+ def maxlen(self):
+ """
+ Maximum length of the queue.
+ """
+ return self._maxlen
+
+ def pop(self, count=1):
+ """
+ Return new deque with rightmost element removed. Popping the empty queue
+ will return the empty queue. A optional count can be given to indicate the
+ number of elements to pop. Popping with a negative index is the same as
+ popleft. Executes in amortized O(k) where k is the number of elements to pop.
+
+ >>> pdeque([1, 2]).pop()
+ pdeque([1])
+ >>> pdeque([1, 2]).pop(2)
+ pdeque([])
+ >>> pdeque([1, 2]).pop(-1)
+ pdeque([2])
+ """
+ if count < 0:
+ return self.popleft(-count)
+
+ new_right_list, new_left_list = PDeque._pop_lists(self._right_list, self._left_list, count)
+ return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen)
+
+ def popleft(self, count=1):
+ """
+ Return new deque with leftmost element removed. Otherwise functionally
+ equivalent to pop().
+
+ >>> pdeque([1, 2]).popleft()
+ pdeque([2])
+ """
+ if count < 0:
+ return self.pop(-count)
+
+ new_left_list, new_right_list = PDeque._pop_lists(self._left_list, self._right_list, count)
+ return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen)
+
+ @staticmethod
+ def _pop_lists(primary_list, secondary_list, count):
+ new_primary_list = primary_list
+ new_secondary_list = secondary_list
+
+ while count > 0 and (new_primary_list or new_secondary_list):
+ count -= 1
+ if new_primary_list.rest:
+ new_primary_list = new_primary_list.rest
+ elif new_primary_list:
+ new_primary_list = new_secondary_list.reverse()
+ new_secondary_list = plist()
+ else:
+ new_primary_list = new_secondary_list.reverse().rest
+ new_secondary_list = plist()
+
+ return new_primary_list, new_secondary_list
+
+ def _is_empty(self):
+ return not self._left_list and not self._right_list
+
+ def __lt__(self, other):
+ if not isinstance(other, PDeque):
+ return NotImplemented
+
+ return tuple(self) < tuple(other)
+
+ def __eq__(self, other):
+ if not isinstance(other, PDeque):
+ return NotImplemented
+
+ if tuple(self) == tuple(other):
+ # Sanity check of the length value since it is redundant (there for performance)
+ assert len(self) == len(other)
+ return True
+
+ return False
+
+ def __hash__(self):
+ return hash(tuple(self))
+
+ def __len__(self):
+ return self._length
+
+ def append(self, elem):
+ """
+ Return new deque with elem as the rightmost element.
+
+ >>> pdeque([1, 2]).append(3)
+ pdeque([1, 2, 3])
+ """
+ new_left_list, new_right_list, new_length = self._append(self._left_list, self._right_list, elem)
+ return PDeque(new_left_list, new_right_list, new_length, self._maxlen)
+
+ def appendleft(self, elem):
+ """
+ Return new deque with elem as the leftmost element.
+
+ >>> pdeque([1, 2]).appendleft(3)
+ pdeque([3, 1, 2])
+ """
+ new_right_list, new_left_list, new_length = self._append(self._right_list, self._left_list, elem)
+ return PDeque(new_left_list, new_right_list, new_length, self._maxlen)
+
+ def _append(self, primary_list, secondary_list, elem):
+ if self._maxlen is not None and self._length == self._maxlen:
+ if self._maxlen == 0:
+ return primary_list, secondary_list, 0
+ new_primary_list, new_secondary_list = PDeque._pop_lists(primary_list, secondary_list, 1)
+ return new_primary_list, new_secondary_list.cons(elem), self._length
+
+ return primary_list, secondary_list.cons(elem), self._length + 1
+
+ @staticmethod
+ def _extend_list(the_list, iterable):
+ count = 0
+ for elem in iterable:
+ the_list = the_list.cons(elem)
+ count += 1
+
+ return the_list, count
+
+ def _extend(self, primary_list, secondary_list, iterable):
+ new_primary_list, extend_count = PDeque._extend_list(primary_list, iterable)
+ new_secondary_list = secondary_list
+ current_len = self._length + extend_count
+ if self._maxlen is not None and current_len > self._maxlen:
+ pop_len = current_len - self._maxlen
+ new_secondary_list, new_primary_list = PDeque._pop_lists(new_secondary_list, new_primary_list, pop_len)
+ extend_count -= pop_len
+
+ return new_primary_list, new_secondary_list, extend_count
+
+ def extend(self, iterable):
+ """
+ Return new deque with all elements of iterable appended to the right.
+
+ >>> pdeque([1, 2]).extend([3, 4])
+ pdeque([1, 2, 3, 4])
+ """
+ new_right_list, new_left_list, extend_count = self._extend(self._right_list, self._left_list, iterable)
+ return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen)
+
+ def extendleft(self, iterable):
+ """
+ Return new deque with all elements of iterable appended to the left.
+
+ NB! The elements will be inserted in reverse order compared to the order in the iterable.
+
+ >>> pdeque([1, 2]).extendleft([3, 4])
+ pdeque([4, 3, 1, 2])
+ """
+ new_left_list, new_right_list, extend_count = self._extend(self._left_list, self._right_list, iterable)
+ return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen)
+
+ def count(self, elem):
+ """
+ Return the number of elements equal to elem present in the queue
+
+ >>> pdeque([1, 2, 1]).count(1)
+ 2
+ """
+ return self._left_list.count(elem) + self._right_list.count(elem)
+
+ def remove(self, elem):
+ """
+ Return new deque with first element from left equal to elem removed. If no such element is found
+ a ValueError is raised.
+
+ >>> pdeque([2, 1, 2]).remove(2)
+ pdeque([1, 2])
+ """
+ try:
+ return PDeque(self._left_list.remove(elem), self._right_list, self._length - 1)
+ except ValueError:
+ # Value not found in left list, try the right list
+ try:
+ # This is severely inefficient with a double reverse, should perhaps implement a remove_last()?
+ return PDeque(self._left_list,
+ self._right_list.reverse().remove(elem).reverse(), self._length - 1)
+ except ValueError:
+ raise ValueError('{0} not found in PDeque'.format(elem))
+
+ def reverse(self):
+ """
+ Return reversed deque.
+
+ >>> pdeque([1, 2, 3]).reverse()
+ pdeque([3, 2, 1])
+
+ Also supports the standard python reverse function.
+
+ >>> reversed(pdeque([1, 2, 3]))
+ pdeque([3, 2, 1])
+ """
+ return PDeque(self._right_list, self._left_list, self._length)
+ __reversed__ = reverse
+
+ def rotate(self, steps):
+ """
+ Return deque with elements rotated steps steps.
+
+ >>> x = pdeque([1, 2, 3])
+ >>> x.rotate(1)
+ pdeque([3, 1, 2])
+ >>> x.rotate(-2)
+ pdeque([3, 1, 2])
+ """
+ popped_deque = self.pop(steps)
+ if steps >= 0:
+ return popped_deque.extendleft(islice(self.reverse(), steps))
+
+ return popped_deque.extend(islice(self, -steps))
+
+ def __reduce__(self):
+ # Pickling support
+ return pdeque, (list(self), self._maxlen)
+
+ def __getitem__(self, index):
+ if isinstance(index, slice):
+ if index.step is not None and index.step != 1:
+ # Too difficult, no structural sharing possible
+ return pdeque(tuple(self)[index], maxlen=self._maxlen)
+
+ result = self
+ if index.start is not None:
+ result = result.popleft(index.start % self._length)
+ if index.stop is not None:
+ result = result.pop(self._length - (index.stop % self._length))
+
+ return result
+
+ if not isinstance(index, Integral):
+ raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
+
+ if index >= 0:
+ return self.popleft(index).left
+
+ shifted = len(self) + index
+ if shifted < 0:
+ raise IndexError(
+ "pdeque index {0} out of range {1}".format(index, len(self)),
+ )
+ return self.popleft(shifted).left
+
+ index = Sequence.index
+
+Sequence.register(PDeque)
+Hashable.register(PDeque)
+
+
+def pdeque(iterable=(), maxlen=None):
+ """
+ Return deque containing the elements of iterable. If maxlen is specified then
+ len(iterable) - maxlen elements are discarded from the left to if len(iterable) > maxlen.
+
+ >>> pdeque([1, 2, 3])
+ pdeque([1, 2, 3])
+ >>> pdeque([1, 2, 3, 4], maxlen=2)
+ pdeque([3, 4], maxlen=2)
+ """
+ t = tuple(iterable)
+ if maxlen is not None:
+ t = t[-maxlen:]
+ length = len(t)
+ pivot = int(length / 2)
+ left = plist(t[:pivot])
+ right = plist(t[pivot:], reverse=True)
+ return PDeque(left, right, length, maxlen)
+
+def dq(*elements):
+ """
+ Return deque containing all arguments.
+
+ >>> dq(1, 2, 3)
+ pdeque([1, 2, 3])
+ """
+ return pdeque(elements)
diff --git a/third_party/python/pyrsistent/pyrsistent/_plist.py b/third_party/python/pyrsistent/pyrsistent/_plist.py
new file mode 100644
index 0000000000..8b4267f5e3
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent/_plist.py
@@ -0,0 +1,313 @@
+from ._compat import Sequence, Hashable
+from numbers import Integral
+from functools import reduce
+
+
+class _PListBuilder(object):
+ """
+ Helper class to allow construction of a list without
+ having to reverse it in the end.
+ """
+ __slots__ = ('_head', '_tail')
+
+ def __init__(self):
+ self._head = _EMPTY_PLIST
+ self._tail = _EMPTY_PLIST
+
+ def _append(self, elem, constructor):
+ if not self._tail:
+ self._head = constructor(elem)
+ self._tail = self._head
+ else:
+ self._tail.rest = constructor(elem)
+ self._tail = self._tail.rest
+
+ return self._head
+
+ def append_elem(self, elem):
+ return self._append(elem, lambda e: PList(e, _EMPTY_PLIST))
+
+ def append_plist(self, pl):
+ return self._append(pl, lambda l: l)
+
+ def build(self):
+ return self._head
+
+
+class _PListBase(object):
+ __slots__ = ('__weakref__',)
+
+ # Selected implementations can be taken straight from the Sequence
+ # class, other are less suitable. Especially those that work with
+ # index lookups.
+ count = Sequence.count
+ index = Sequence.index
+
+ def __reduce__(self):
+ # Pickling support
+ return plist, (list(self),)
+
+ def __len__(self):
+ """
+ Return the length of the list, computed by traversing it.
+
+ This is obviously O(n) but with the current implementation
+ where a list is also a node the overhead of storing the length
+ in every node would be quite significant.
+ """
+ return sum(1 for _ in self)
+
+ def __repr__(self):
+ return "plist({0})".format(list(self))
+ __str__ = __repr__
+
+ def cons(self, elem):
+ """
+ Return a new list with elem inserted as new head.
+
+ >>> plist([1, 2]).cons(3)
+ plist([3, 1, 2])
+ """
+ return PList(elem, self)
+
+ def mcons(self, iterable):
+ """
+ Return a new list with all elements of iterable repeatedly cons:ed to the current list.
+ NB! The elements will be inserted in the reverse order of the iterable.
+ Runs in O(len(iterable)).
+
+ >>> plist([1, 2]).mcons([3, 4])
+ plist([4, 3, 1, 2])
+ """
+ head = self
+ for elem in iterable:
+ head = head.cons(elem)
+
+ return head
+
+ def reverse(self):
+ """
+ Return a reversed version of list. Runs in O(n) where n is the length of the list.
+
+ >>> plist([1, 2, 3]).reverse()
+ plist([3, 2, 1])
+
+ Also supports the standard reversed function.
+
+ >>> reversed(plist([1, 2, 3]))
+ plist([3, 2, 1])
+ """
+ result = plist()
+ head = self
+ while head:
+ result = result.cons(head.first)
+ head = head.rest
+
+ return result
+ __reversed__ = reverse
+
+ def split(self, index):
+ """
+ Spilt the list at position specified by index. Returns a tuple containing the
+ list up until index and the list after the index. Runs in O(index).
+
+ >>> plist([1, 2, 3, 4]).split(2)
+ (plist([1, 2]), plist([3, 4]))
+ """
+ lb = _PListBuilder()
+ right_list = self
+ i = 0
+ while right_list and i < index:
+ lb.append_elem(right_list.first)
+ right_list = right_list.rest
+ i += 1
+
+ if not right_list:
+ # Just a small optimization in the cases where no split occurred
+ return self, _EMPTY_PLIST
+
+ return lb.build(), right_list
+
+ def __iter__(self):
+ li = self
+ while li:
+ yield li.first
+ li = li.rest
+
+ def __lt__(self, other):
+ if not isinstance(other, _PListBase):
+ return NotImplemented
+
+ return tuple(self) < tuple(other)
+
+ def __eq__(self, other):
+ """
+ Traverses the lists, checking equality of elements.
+
+ This is an O(n) operation, but preserves the standard semantics of list equality.
+ """
+ if not isinstance(other, _PListBase):
+ return NotImplemented
+
+ self_head = self
+ other_head = other
+ while self_head and other_head:
+ if not self_head.first == other_head.first:
+ return False
+ self_head = self_head.rest
+ other_head = other_head.rest
+
+ return not self_head and not other_head
+
+ def __getitem__(self, index):
+ # Don't use this this data structure if you plan to do a lot of indexing, it is
+ # very inefficient! Use a PVector instead!
+
+ if isinstance(index, slice):
+ if index.start is not None and index.stop is None and (index.step is None or index.step == 1):
+ return self._drop(index.start)
+
+ # Take the easy way out for all other slicing cases, not much structural reuse possible anyway
+ return plist(tuple(self)[index])
+
+ if not isinstance(index, Integral):
+ raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
+
+ if index < 0:
+ # NB: O(n)!
+ index += len(self)
+
+ try:
+ return self._drop(index).first
+ except AttributeError:
+ raise IndexError("PList index out of range")
+
+ def _drop(self, count):
+ if count < 0:
+ raise IndexError("PList index out of range")
+
+ head = self
+ while count > 0:
+ head = head.rest
+ count -= 1
+
+ return head
+
+ def __hash__(self):
+ return hash(tuple(self))
+
+ def remove(self, elem):
+ """
+ Return new list with first element equal to elem removed. O(k) where k is the position
+ of the element that is removed.
+
+ Raises ValueError if no matching element is found.
+
+ >>> plist([1, 2, 1]).remove(1)
+ plist([2, 1])
+ """
+
+ builder = _PListBuilder()
+ head = self
+ while head:
+ if head.first == elem:
+ return builder.append_plist(head.rest)
+
+ builder.append_elem(head.first)
+ head = head.rest
+
+ raise ValueError('{0} not found in PList'.format(elem))
+
+
+class PList(_PListBase):
+ """
+ Classical Lisp style singly linked list. Adding elements to the head using cons is O(1).
+ Element access is O(k) where k is the position of the element in the list. Taking the
+ length of the list is O(n).
+
+ Fully supports the Sequence and Hashable protocols including indexing and slicing but
+ if you need fast random access go for the PVector instead.
+
+ Do not instantiate directly, instead use the factory functions :py:func:`l` or :py:func:`plist` to
+ create an instance.
+
+ Some examples:
+
+ >>> x = plist([1, 2])
+ >>> y = x.cons(3)
+ >>> x
+ plist([1, 2])
+ >>> y
+ plist([3, 1, 2])
+ >>> y.first
+ 3
+ >>> y.rest == x
+ True
+ >>> y[:2]
+ plist([3, 1])
+ """
+ __slots__ = ('first', 'rest')
+
+ def __new__(cls, first, rest):
+ instance = super(PList, cls).__new__(cls)
+ instance.first = first
+ instance.rest = rest
+ return instance
+
+ def __bool__(self):
+ return True
+ __nonzero__ = __bool__
+
+
+Sequence.register(PList)
+Hashable.register(PList)
+
+
+class _EmptyPList(_PListBase):
+ __slots__ = ()
+
+ def __bool__(self):
+ return False
+ __nonzero__ = __bool__
+
+ @property
+ def first(self):
+ raise AttributeError("Empty PList has no first")
+
+ @property
+ def rest(self):
+ return self
+
+
+Sequence.register(_EmptyPList)
+Hashable.register(_EmptyPList)
+
+_EMPTY_PLIST = _EmptyPList()
+
+
+def plist(iterable=(), reverse=False):
+ """
+ Creates a new persistent list containing all elements of iterable.
+ Optional parameter reverse specifies if the elements should be inserted in
+ reverse order or not.
+
+ >>> plist([1, 2, 3])
+ plist([1, 2, 3])
+ >>> plist([1, 2, 3], reverse=True)
+ plist([3, 2, 1])
+ """
+ if not reverse:
+ iterable = list(iterable)
+ iterable.reverse()
+
+ return reduce(lambda pl, elem: pl.cons(elem), iterable, _EMPTY_PLIST)
+
+
+def l(*elements):
+ """
+ Creates a new persistent list containing all arguments.
+
+ >>> l(1, 2, 3)
+ plist([1, 2, 3])
+ """
+ return plist(elements)
diff --git a/third_party/python/pyrsistent/pyrsistent/_pmap.py b/third_party/python/pyrsistent/pyrsistent/_pmap.py
new file mode 100644
index 0000000000..e8a0ec53f8
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent/_pmap.py
@@ -0,0 +1,460 @@
+from ._compat import Mapping, Hashable
+from itertools import chain
+import six
+from pyrsistent._pvector import pvector
+from pyrsistent._transformations import transform
+
+
+class PMap(object):
+ """
+ Persistent map/dict. Tries to follow the same naming conventions as the built in dict where feasible.
+
+ Do not instantiate directly, instead use the factory functions :py:func:`m` or :py:func:`pmap` to
+ create an instance.
+
+ Was originally written as a very close copy of the Clojure equivalent but was later rewritten to closer
+ re-assemble the python dict. This means that a sparse vector (a PVector) of buckets is used. The keys are
+ hashed and the elements inserted at position hash % len(bucket_vector). Whenever the map size exceeds 2/3 of
+ the containing vectors size the map is reallocated to a vector of double the size. This is done to avoid
+ excessive hash collisions.
+
+ This structure corresponds most closely to the built in dict type and is intended as a replacement. Where the
+ semantics are the same (more or less) the same function names have been used but for some cases it is not possible,
+ for example assignments and deletion of values.
+
+ PMap implements the Mapping protocol and is Hashable. It also supports dot-notation for
+ element access.
+
+ Random access and insert is log32(n) where n is the size of the map.
+
+ The following are examples of some common operations on persistent maps
+
+ >>> m1 = m(a=1, b=3)
+ >>> m2 = m1.set('c', 3)
+ >>> m3 = m2.remove('a')
+ >>> m1
+ pmap({'b': 3, 'a': 1})
+ >>> m2
+ pmap({'c': 3, 'b': 3, 'a': 1})
+ >>> m3
+ pmap({'c': 3, 'b': 3})
+ >>> m3['c']
+ 3
+ >>> m3.c
+ 3
+ """
+ __slots__ = ('_size', '_buckets', '__weakref__', '_cached_hash')
+
+ def __new__(cls, size, buckets):
+ self = super(PMap, cls).__new__(cls)
+ self._size = size
+ self._buckets = buckets
+ return self
+
+ @staticmethod
+ def _get_bucket(buckets, key):
+ index = hash(key) % len(buckets)
+ bucket = buckets[index]
+ return index, bucket
+
+ @staticmethod
+ def _getitem(buckets, key):
+ _, bucket = PMap._get_bucket(buckets, key)
+ if bucket:
+ for k, v in bucket:
+ if k == key:
+ return v
+
+ raise KeyError(key)
+
+ def __getitem__(self, key):
+ return PMap._getitem(self._buckets, key)
+
+ @staticmethod
+ def _contains(buckets, key):
+ _, bucket = PMap._get_bucket(buckets, key)
+ if bucket:
+ for k, _ in bucket:
+ if k == key:
+ return True
+
+ return False
+
+ return False
+
+ def __contains__(self, key):
+ return self._contains(self._buckets, key)
+
+ get = Mapping.get
+
+ def __iter__(self):
+ return self.iterkeys()
+
+ def __getattr__(self, key):
+ try:
+ return self[key]
+ except KeyError:
+ raise AttributeError(
+ "{0} has no attribute '{1}'".format(type(self).__name__, key)
+ )
+
+ def iterkeys(self):
+ for k, _ in self.iteritems():
+ yield k
+
+ # These are more efficient implementations compared to the original
+ # methods that are based on the keys iterator and then calls the
+ # accessor functions to access the value for the corresponding key
+ def itervalues(self):
+ for _, v in self.iteritems():
+ yield v
+
+ def iteritems(self):
+ for bucket in self._buckets:
+ if bucket:
+ for k, v in bucket:
+ yield k, v
+
+ def values(self):
+ return pvector(self.itervalues())
+
+ def keys(self):
+ return pvector(self.iterkeys())
+
+ def items(self):
+ return pvector(self.iteritems())
+
+ def __len__(self):
+ return self._size
+
+ def __repr__(self):
+ return 'pmap({0})'.format(str(dict(self)))
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ if not isinstance(other, Mapping):
+ return NotImplemented
+ if len(self) != len(other):
+ return False
+ if isinstance(other, PMap):
+ if (hasattr(self, '_cached_hash') and hasattr(other, '_cached_hash')
+ and self._cached_hash != other._cached_hash):
+ return False
+ if self._buckets == other._buckets:
+ return True
+ return dict(self.iteritems()) == dict(other.iteritems())
+ elif isinstance(other, dict):
+ return dict(self.iteritems()) == other
+ return dict(self.iteritems()) == dict(six.iteritems(other))
+
+ __ne__ = Mapping.__ne__
+
+ def __lt__(self, other):
+ raise TypeError('PMaps are not orderable')
+
+ __le__ = __lt__
+ __gt__ = __lt__
+ __ge__ = __lt__
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __hash__(self):
+ if not hasattr(self, '_cached_hash'):
+ self._cached_hash = hash(frozenset(self.iteritems()))
+ return self._cached_hash
+
+ def set(self, key, val):
+ """
+ Return a new PMap with key and val inserted.
+
+ >>> m1 = m(a=1, b=2)
+ >>> m2 = m1.set('a', 3)
+ >>> m3 = m1.set('c' ,4)
+ >>> m1
+ pmap({'b': 2, 'a': 1})
+ >>> m2
+ pmap({'b': 2, 'a': 3})
+ >>> m3
+ pmap({'c': 4, 'b': 2, 'a': 1})
+ """
+ return self.evolver().set(key, val).persistent()
+
+ def remove(self, key):
+ """
+ Return a new PMap without the element specified by key. Raises KeyError if the element
+ is not present.
+
+ >>> m1 = m(a=1, b=2)
+ >>> m1.remove('a')
+ pmap({'b': 2})
+ """
+ return self.evolver().remove(key).persistent()
+
+ def discard(self, key):
+ """
+ Return a new PMap without the element specified by key. Returns reference to itself
+ if element is not present.
+
+ >>> m1 = m(a=1, b=2)
+ >>> m1.discard('a')
+ pmap({'b': 2})
+ >>> m1 is m1.discard('c')
+ True
+ """
+ try:
+ return self.remove(key)
+ except KeyError:
+ return self
+
+ def update(self, *maps):
+ """
+ Return a new PMap with the items in Mappings inserted. If the same key is present in multiple
+ maps the rightmost (last) value is inserted.
+
+ >>> m1 = m(a=1, b=2)
+ >>> m1.update(m(a=2, c=3), {'a': 17, 'd': 35})
+ pmap({'c': 3, 'b': 2, 'a': 17, 'd': 35})
+ """
+ return self.update_with(lambda l, r: r, *maps)
+
+ def update_with(self, update_fn, *maps):
+ """
+ Return a new PMap with the items in Mappings maps inserted. If the same key is present in multiple
+ maps the values will be merged using merge_fn going from left to right.
+
+ >>> from operator import add
+ >>> m1 = m(a=1, b=2)
+ >>> m1.update_with(add, m(a=2))
+ pmap({'b': 2, 'a': 3})
+
+ The reverse behaviour of the regular merge. Keep the leftmost element instead of the rightmost.
+
+ >>> m1 = m(a=1)
+ >>> m1.update_with(lambda l, r: l, m(a=2), {'a':3})
+ pmap({'a': 1})
+ """
+ evolver = self.evolver()
+ for map in maps:
+ for key, value in map.items():
+ evolver.set(key, update_fn(evolver[key], value) if key in evolver else value)
+
+ return evolver.persistent()
+
+ def __add__(self, other):
+ return self.update(other)
+
+ def __reduce__(self):
+ # Pickling support
+ return pmap, (dict(self),)
+
+ def transform(self, *transformations):
+ """
+ Transform arbitrarily complex combinations of PVectors and PMaps. A transformation
+ consists of two parts. One match expression that specifies which elements to transform
+ and one transformation function that performs the actual transformation.
+
+ >>> from pyrsistent import freeze, ny
+ >>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'},
+ ... {'author': 'Steve', 'content': 'A slightly longer article'}],
+ ... 'weather': {'temperature': '11C', 'wind': '5m/s'}})
+ >>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c)
+ >>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c)
+ >>> very_short_news.articles[0].content
+ 'A short article'
+ >>> very_short_news.articles[1].content
+ 'A slightly long...'
+
+ When nothing has been transformed the original data structure is kept
+
+ >>> short_news is news_paper
+ True
+ >>> very_short_news is news_paper
+ False
+ >>> very_short_news.articles[0] is news_paper.articles[0]
+ True
+ """
+ return transform(self, transformations)
+
+ def copy(self):
+ return self
+
+ class _Evolver(object):
+ __slots__ = ('_buckets_evolver', '_size', '_original_pmap')
+
+ def __init__(self, original_pmap):
+ self._original_pmap = original_pmap
+ self._buckets_evolver = original_pmap._buckets.evolver()
+ self._size = original_pmap._size
+
+ def __getitem__(self, key):
+ return PMap._getitem(self._buckets_evolver, key)
+
+ def __setitem__(self, key, val):
+ self.set(key, val)
+
+ def set(self, key, val):
+ if len(self._buckets_evolver) < 0.67 * self._size:
+ self._reallocate(2 * len(self._buckets_evolver))
+
+ kv = (key, val)
+ index, bucket = PMap._get_bucket(self._buckets_evolver, key)
+ if bucket:
+ for k, v in bucket:
+ if k == key:
+ if v is not val:
+ new_bucket = [(k2, v2) if k2 != k else (k2, val) for k2, v2 in bucket]
+ self._buckets_evolver[index] = new_bucket
+
+ return self
+
+ new_bucket = [kv]
+ new_bucket.extend(bucket)
+ self._buckets_evolver[index] = new_bucket
+ self._size += 1
+ else:
+ self._buckets_evolver[index] = [kv]
+ self._size += 1
+
+ return self
+
+ def _reallocate(self, new_size):
+ new_list = new_size * [None]
+ buckets = self._buckets_evolver.persistent()
+ for k, v in chain.from_iterable(x for x in buckets if x):
+ index = hash(k) % new_size
+ if new_list[index]:
+ new_list[index].append((k, v))
+ else:
+ new_list[index] = [(k, v)]
+
+ # A reallocation should always result in a dirty buckets evolver to avoid
+ # possible loss of elements when doing the reallocation.
+ self._buckets_evolver = pvector().evolver()
+ self._buckets_evolver.extend(new_list)
+
+ def is_dirty(self):
+ return self._buckets_evolver.is_dirty()
+
+ def persistent(self):
+ if self.is_dirty():
+ self._original_pmap = PMap(self._size, self._buckets_evolver.persistent())
+
+ return self._original_pmap
+
+ def __len__(self):
+ return self._size
+
+ def __contains__(self, key):
+ return PMap._contains(self._buckets_evolver, key)
+
+ def __delitem__(self, key):
+ self.remove(key)
+
+ def remove(self, key):
+ index, bucket = PMap._get_bucket(self._buckets_evolver, key)
+
+ if bucket:
+ new_bucket = [(k, v) for (k, v) in bucket if k != key]
+ if len(bucket) > len(new_bucket):
+ self._buckets_evolver[index] = new_bucket if new_bucket else None
+ self._size -= 1
+ return self
+
+ raise KeyError('{0}'.format(key))
+
+ def evolver(self):
+ """
+ Create a new evolver for this pmap. For a discussion on evolvers in general see the
+ documentation for the pvector evolver.
+
+ Create the evolver and perform various mutating updates to it:
+
+ >>> m1 = m(a=1, b=2)
+ >>> e = m1.evolver()
+ >>> e['c'] = 3
+ >>> len(e)
+ 3
+ >>> del e['a']
+
+ The underlying pmap remains the same:
+
+ >>> m1
+ pmap({'b': 2, 'a': 1})
+
+ The changes are kept in the evolver. An updated pmap can be created using the
+ persistent() function on the evolver.
+
+ >>> m2 = e.persistent()
+ >>> m2
+ pmap({'c': 3, 'b': 2})
+
+ The new pmap will share data with the original pmap in the same way that would have
+ been done if only using operations on the pmap.
+ """
+ return self._Evolver(self)
+
+Mapping.register(PMap)
+Hashable.register(PMap)
+
+
+def _turbo_mapping(initial, pre_size):
+ if pre_size:
+ size = pre_size
+ else:
+ try:
+ size = 2 * len(initial) or 8
+ except Exception:
+ # Guess we can't figure out the length. Give up on length hinting,
+ # we can always reallocate later.
+ size = 8
+
+ buckets = size * [None]
+
+ if not isinstance(initial, Mapping):
+ # Make a dictionary of the initial data if it isn't already,
+ # that will save us some job further down since we can assume no
+ # key collisions
+ initial = dict(initial)
+
+ for k, v in six.iteritems(initial):
+ h = hash(k)
+ index = h % size
+ bucket = buckets[index]
+
+ if bucket:
+ bucket.append((k, v))
+ else:
+ buckets[index] = [(k, v)]
+
+ return PMap(len(initial), pvector().extend(buckets))
+
+
+_EMPTY_PMAP = _turbo_mapping({}, 0)
+
+
+def pmap(initial={}, pre_size=0):
+ """
+ Create new persistent map, inserts all elements in initial into the newly created map.
+ The optional argument pre_size may be used to specify an initial size of the underlying bucket vector. This
+ may have a positive performance impact in the cases where you know beforehand that a large number of elements
+ will be inserted into the map eventually since it will reduce the number of reallocations required.
+
+ >>> pmap({'a': 13, 'b': 14})
+ pmap({'b': 14, 'a': 13})
+ """
+ if not initial:
+ return _EMPTY_PMAP
+
+ return _turbo_mapping(initial, pre_size)
+
+
+def m(**kwargs):
+ """
+ Creates a new persitent map. Inserts all key value arguments into the newly created map.
+
+ >>> m(a=13, b=14)
+ pmap({'b': 14, 'a': 13})
+ """
+ return pmap(kwargs)
diff --git a/third_party/python/pyrsistent/pyrsistent/_precord.py b/third_party/python/pyrsistent/pyrsistent/_precord.py
new file mode 100644
index 0000000000..ec8d32c3da
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent/_precord.py
@@ -0,0 +1,169 @@
+import six
+from pyrsistent._checked_types import CheckedType, _restore_pickle, InvariantException, store_invariants
+from pyrsistent._field_common import (
+ set_fields, check_type, is_field_ignore_extra_complaint, PFIELD_NO_INITIAL, serialize, check_global_invariants
+)
+from pyrsistent._pmap import PMap, pmap
+
+
+class _PRecordMeta(type):
+ def __new__(mcs, name, bases, dct):
+ set_fields(dct, bases, name='_precord_fields')
+ store_invariants(dct, bases, '_precord_invariants', '__invariant__')
+
+ dct['_precord_mandatory_fields'] = \
+ set(name for name, field in dct['_precord_fields'].items() if field.mandatory)
+
+ dct['_precord_initial_values'] = \
+ dict((k, field.initial) for k, field in dct['_precord_fields'].items() if field.initial is not PFIELD_NO_INITIAL)
+
+
+ dct['__slots__'] = ()
+
+ return super(_PRecordMeta, mcs).__new__(mcs, name, bases, dct)
+
+
+@six.add_metaclass(_PRecordMeta)
+class PRecord(PMap, CheckedType):
+ """
+ A PRecord is a PMap with a fixed set of specified fields. Records are declared as python classes inheriting
+ from PRecord. Because it is a PMap it has full support for all Mapping methods such as iteration and element
+ access using subscript notation.
+
+ More documentation and examples of PRecord usage is available at https://github.com/tobgu/pyrsistent
+ """
+ def __new__(cls, **kwargs):
+ # Hack total! If these two special attributes exist that means we can create
+ # ourselves. Otherwise we need to go through the Evolver to create the structures
+ # for us.
+ if '_precord_size' in kwargs and '_precord_buckets' in kwargs:
+ return super(PRecord, cls).__new__(cls, kwargs['_precord_size'], kwargs['_precord_buckets'])
+
+ factory_fields = kwargs.pop('_factory_fields', None)
+ ignore_extra = kwargs.pop('_ignore_extra', False)
+
+ initial_values = kwargs
+ if cls._precord_initial_values:
+ initial_values = dict((k, v() if callable(v) else v)
+ for k, v in cls._precord_initial_values.items())
+ initial_values.update(kwargs)
+
+ e = _PRecordEvolver(cls, pmap(), _factory_fields=factory_fields, _ignore_extra=ignore_extra)
+ for k, v in initial_values.items():
+ e[k] = v
+
+ return e.persistent()
+
+ def set(self, *args, **kwargs):
+ """
+ Set a field in the record. This set function differs slightly from that in the PMap
+ class. First of all it accepts key-value pairs. Second it accepts multiple key-value
+ pairs to perform one, atomic, update of multiple fields.
+ """
+
+ # The PRecord set() can accept kwargs since all fields that have been declared are
+ # valid python identifiers. Also allow multiple fields to be set in one operation.
+ if args:
+ return super(PRecord, self).set(args[0], args[1])
+
+ return self.update(kwargs)
+
+ def evolver(self):
+ """
+ Returns an evolver of this object.
+ """
+ return _PRecordEvolver(self.__class__, self)
+
+ def __repr__(self):
+ return "{0}({1})".format(self.__class__.__name__,
+ ', '.join('{0}={1}'.format(k, repr(v)) for k, v in self.items()))
+
+ @classmethod
+ def create(cls, kwargs, _factory_fields=None, ignore_extra=False):
+ """
+ Factory method. Will create a new PRecord of the current type and assign the values
+ specified in kwargs.
+
+ :param ignore_extra: A boolean which when set to True will ignore any keys which appear in kwargs that are not
+ in the set of fields on the PRecord.
+ """
+ if isinstance(kwargs, cls):
+ return kwargs
+
+ if ignore_extra:
+ kwargs = {k: kwargs[k] for k in cls._precord_fields if k in kwargs}
+
+ return cls(_factory_fields=_factory_fields, _ignore_extra=ignore_extra, **kwargs)
+
+ def __reduce__(self):
+ # Pickling support
+ return _restore_pickle, (self.__class__, dict(self),)
+
+ def serialize(self, format=None):
+ """
+ Serialize the current PRecord using custom serializer functions for fields where
+ such have been supplied.
+ """
+ return dict((k, serialize(self._precord_fields[k].serializer, format, v)) for k, v in self.items())
+
+
+class _PRecordEvolver(PMap._Evolver):
+ __slots__ = ('_destination_cls', '_invariant_error_codes', '_missing_fields', '_factory_fields', '_ignore_extra')
+
+ def __init__(self, cls, original_pmap, _factory_fields=None, _ignore_extra=False):
+ super(_PRecordEvolver, self).__init__(original_pmap)
+ self._destination_cls = cls
+ self._invariant_error_codes = []
+ self._missing_fields = []
+ self._factory_fields = _factory_fields
+ self._ignore_extra = _ignore_extra
+
+ def __setitem__(self, key, original_value):
+ self.set(key, original_value)
+
+ def set(self, key, original_value):
+ field = self._destination_cls._precord_fields.get(key)
+ if field:
+ if self._factory_fields is None or field in self._factory_fields:
+ try:
+ if is_field_ignore_extra_complaint(PRecord, field, self._ignore_extra):
+ value = field.factory(original_value, ignore_extra=self._ignore_extra)
+ else:
+ value = field.factory(original_value)
+ except InvariantException as e:
+ self._invariant_error_codes += e.invariant_errors
+ self._missing_fields += e.missing_fields
+ return self
+ else:
+ value = original_value
+
+ check_type(self._destination_cls, field, key, value)
+
+ is_ok, error_code = field.invariant(value)
+ if not is_ok:
+ self._invariant_error_codes.append(error_code)
+
+ return super(_PRecordEvolver, self).set(key, value)
+ else:
+ raise AttributeError("'{0}' is not among the specified fields for {1}".format(key, self._destination_cls.__name__))
+
+ def persistent(self):
+ cls = self._destination_cls
+ is_dirty = self.is_dirty()
+ pm = super(_PRecordEvolver, self).persistent()
+ if is_dirty or not isinstance(pm, cls):
+ result = cls(_precord_buckets=pm._buckets, _precord_size=pm._size)
+ else:
+ result = pm
+
+ if cls._precord_mandatory_fields:
+ self._missing_fields += tuple('{0}.{1}'.format(cls.__name__, f) for f
+ in (cls._precord_mandatory_fields - set(result.keys())))
+
+ if self._invariant_error_codes or self._missing_fields:
+ raise InvariantException(tuple(self._invariant_error_codes), tuple(self._missing_fields),
+ 'Field invariant failed')
+
+ check_global_invariants(result, cls._precord_invariants)
+
+ return result
diff --git a/third_party/python/pyrsistent/pyrsistent/_pset.py b/third_party/python/pyrsistent/pyrsistent/_pset.py
new file mode 100644
index 0000000000..a972ec533b
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent/_pset.py
@@ -0,0 +1,229 @@
+from ._compat import Set, Hashable
+import sys
+from pyrsistent._pmap import pmap
+
+PY2 = sys.version_info[0] < 3
+
+
+class PSet(object):
+ """
+ Persistent set implementation. Built on top of the persistent map. The set supports all operations
+ in the Set protocol and is Hashable.
+
+ Do not instantiate directly, instead use the factory functions :py:func:`s` or :py:func:`pset`
+ to create an instance.
+
+ Random access and insert is log32(n) where n is the size of the set.
+
+ Some examples:
+
+ >>> s = pset([1, 2, 3, 1])
+ >>> s2 = s.add(4)
+ >>> s3 = s2.remove(2)
+ >>> s
+ pset([1, 2, 3])
+ >>> s2
+ pset([1, 2, 3, 4])
+ >>> s3
+ pset([1, 3, 4])
+ """
+ __slots__ = ('_map', '__weakref__')
+
+ def __new__(cls, m):
+ self = super(PSet, cls).__new__(cls)
+ self._map = m
+ return self
+
+ def __contains__(self, element):
+ return element in self._map
+
+ def __iter__(self):
+ return iter(self._map)
+
+ def __len__(self):
+ return len(self._map)
+
+ def __repr__(self):
+ if PY2 or not self:
+ return 'p' + str(set(self))
+
+ return 'pset([{0}])'.format(str(set(self))[1:-1])
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __hash__(self):
+ return hash(self._map)
+
+ def __reduce__(self):
+ # Pickling support
+ return pset, (list(self),)
+
+ @classmethod
+ def _from_iterable(cls, it, pre_size=8):
+ return PSet(pmap(dict((k, True) for k in it), pre_size=pre_size))
+
+ def add(self, element):
+ """
+ Return a new PSet with element added
+
+ >>> s1 = s(1, 2)
+ >>> s1.add(3)
+ pset([1, 2, 3])
+ """
+ return self.evolver().add(element).persistent()
+
+ def update(self, iterable):
+ """
+ Return a new PSet with elements in iterable added
+
+ >>> s1 = s(1, 2)
+ >>> s1.update([3, 4, 4])
+ pset([1, 2, 3, 4])
+ """
+ e = self.evolver()
+ for element in iterable:
+ e.add(element)
+
+ return e.persistent()
+
+ def remove(self, element):
+ """
+ Return a new PSet with element removed. Raises KeyError if element is not present.
+
+ >>> s1 = s(1, 2)
+ >>> s1.remove(2)
+ pset([1])
+ """
+ if element in self._map:
+ return self.evolver().remove(element).persistent()
+
+ raise KeyError("Element '%s' not present in PSet" % element)
+
+ def discard(self, element):
+ """
+ Return a new PSet with element removed. Returns itself if element is not present.
+ """
+ if element in self._map:
+ return self.evolver().remove(element).persistent()
+
+ return self
+
+ class _Evolver(object):
+ __slots__ = ('_original_pset', '_pmap_evolver')
+
+ def __init__(self, original_pset):
+ self._original_pset = original_pset
+ self._pmap_evolver = original_pset._map.evolver()
+
+ def add(self, element):
+ self._pmap_evolver[element] = True
+ return self
+
+ def remove(self, element):
+ del self._pmap_evolver[element]
+ return self
+
+ def is_dirty(self):
+ return self._pmap_evolver.is_dirty()
+
+ def persistent(self):
+ if not self.is_dirty():
+ return self._original_pset
+
+ return PSet(self._pmap_evolver.persistent())
+
+ def __len__(self):
+ return len(self._pmap_evolver)
+
+ def copy(self):
+ return self
+
+ def evolver(self):
+ """
+ Create a new evolver for this pset. For a discussion on evolvers in general see the
+ documentation for the pvector evolver.
+
+ Create the evolver and perform various mutating updates to it:
+
+ >>> s1 = s(1, 2, 3)
+ >>> e = s1.evolver()
+ >>> _ = e.add(4)
+ >>> len(e)
+ 4
+ >>> _ = e.remove(1)
+
+ The underlying pset remains the same:
+
+ >>> s1
+ pset([1, 2, 3])
+
+ The changes are kept in the evolver. An updated pmap can be created using the
+ persistent() function on the evolver.
+
+ >>> s2 = e.persistent()
+ >>> s2
+ pset([2, 3, 4])
+
+ The new pset will share data with the original pset in the same way that would have
+ been done if only using operations on the pset.
+ """
+ return PSet._Evolver(self)
+
+ # All the operations and comparisons you would expect on a set.
+ #
+ # This is not very beautiful. If we avoid inheriting from PSet we can use the
+ # __slots__ concepts (which requires a new style class) and hopefully save some memory.
+ __le__ = Set.__le__
+ __lt__ = Set.__lt__
+ __gt__ = Set.__gt__
+ __ge__ = Set.__ge__
+ __eq__ = Set.__eq__
+ __ne__ = Set.__ne__
+
+ __and__ = Set.__and__
+ __or__ = Set.__or__
+ __sub__ = Set.__sub__
+ __xor__ = Set.__xor__
+
+ issubset = __le__
+ issuperset = __ge__
+ union = __or__
+ intersection = __and__
+ difference = __sub__
+ symmetric_difference = __xor__
+
+ isdisjoint = Set.isdisjoint
+
+Set.register(PSet)
+Hashable.register(PSet)
+
+_EMPTY_PSET = PSet(pmap())
+
+
+def pset(iterable=(), pre_size=8):
+ """
+ Creates a persistent set from iterable. Optionally takes a sizing parameter equivalent to that
+ used for :py:func:`pmap`.
+
+ >>> s1 = pset([1, 2, 3, 2])
+ >>> s1
+ pset([1, 2, 3])
+ """
+ if not iterable:
+ return _EMPTY_PSET
+
+ return PSet._from_iterable(iterable, pre_size=pre_size)
+
+
+def s(*elements):
+ """
+ Create a persistent set.
+
+ Takes an arbitrary number of arguments to insert into the new set.
+
+ >>> s1 = s(1, 2, 3, 2)
+ >>> s1
+ pset([1, 2, 3])
+ """
+ return pset(elements)
diff --git a/third_party/python/pyrsistent/pyrsistent/_pvector.py b/third_party/python/pyrsistent/pyrsistent/_pvector.py
new file mode 100644
index 0000000000..82232782b7
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent/_pvector.py
@@ -0,0 +1,713 @@
+from abc import abstractmethod, ABCMeta
+from ._compat import Sequence, Hashable
+from numbers import Integral
+import operator
+import six
+from pyrsistent._transformations import transform
+
+
+def _bitcount(val):
+ return bin(val).count("1")
+
+BRANCH_FACTOR = 32
+BIT_MASK = BRANCH_FACTOR - 1
+SHIFT = _bitcount(BIT_MASK)
+
+
+def compare_pvector(v, other, operator):
+ return operator(v.tolist(), other.tolist() if isinstance(other, PVector) else other)
+
+
+def _index_or_slice(index, stop):
+ if stop is None:
+ return index
+
+ return slice(index, stop)
+
+
+class PythonPVector(object):
+ """
+ Support structure for PVector that implements structural sharing for vectors using a trie.
+ """
+ __slots__ = ('_count', '_shift', '_root', '_tail', '_tail_offset', '__weakref__')
+
+ def __new__(cls, count, shift, root, tail):
+ self = super(PythonPVector, cls).__new__(cls)
+ self._count = count
+ self._shift = shift
+ self._root = root
+ self._tail = tail
+
+ # Derived attribute stored for performance
+ self._tail_offset = self._count - len(self._tail)
+ return self
+
+ def __len__(self):
+ return self._count
+
+ def __getitem__(self, index):
+ if isinstance(index, slice):
+ # There are more conditions than the below where it would be OK to
+ # return ourselves, implement those...
+ if index.start is None and index.stop is None and index.step is None:
+ return self
+
+ # This is a bit nasty realizing the whole structure as a list before
+ # slicing it but it is the fastest way I've found to date, and it's easy :-)
+ return _EMPTY_PVECTOR.extend(self.tolist()[index])
+
+ if index < 0:
+ index += self._count
+
+ return PythonPVector._node_for(self, index)[index & BIT_MASK]
+
+ def __add__(self, other):
+ return self.extend(other)
+
+ def __repr__(self):
+ return 'pvector({0})'.format(str(self.tolist()))
+
+ def __str__(self):
+ return self.__repr__()
+
+ def __iter__(self):
+ # This is kind of lazy and will produce some memory overhead but it is the fasted method
+ # by far of those tried since it uses the speed of the built in python list directly.
+ return iter(self.tolist())
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def __eq__(self, other):
+ return self is other or (hasattr(other, '__len__') and self._count == len(other)) and compare_pvector(self, other, operator.eq)
+
+ def __gt__(self, other):
+ return compare_pvector(self, other, operator.gt)
+
+ def __lt__(self, other):
+ return compare_pvector(self, other, operator.lt)
+
+ def __ge__(self, other):
+ return compare_pvector(self, other, operator.ge)
+
+ def __le__(self, other):
+ return compare_pvector(self, other, operator.le)
+
+ def __mul__(self, times):
+ if times <= 0 or self is _EMPTY_PVECTOR:
+ return _EMPTY_PVECTOR
+
+ if times == 1:
+ return self
+
+ return _EMPTY_PVECTOR.extend(times * self.tolist())
+
+ __rmul__ = __mul__
+
+ def _fill_list(self, node, shift, the_list):
+ if shift:
+ shift -= SHIFT
+ for n in node:
+ self._fill_list(n, shift, the_list)
+ else:
+ the_list.extend(node)
+
+ def tolist(self):
+ """
+ The fastest way to convert the vector into a python list.
+ """
+ the_list = []
+ self._fill_list(self._root, self._shift, the_list)
+ the_list.extend(self._tail)
+ return the_list
+
+ def _totuple(self):
+ """
+ Returns the content as a python tuple.
+ """
+ return tuple(self.tolist())
+
+ def __hash__(self):
+ # Taking the easy way out again...
+ return hash(self._totuple())
+
+ def transform(self, *transformations):
+ return transform(self, transformations)
+
+ def __reduce__(self):
+ # Pickling support
+ return pvector, (self.tolist(),)
+
+ def mset(self, *args):
+ if len(args) % 2:
+ raise TypeError("mset expected an even number of arguments")
+
+ evolver = self.evolver()
+ for i in range(0, len(args), 2):
+ evolver[args[i]] = args[i+1]
+
+ return evolver.persistent()
+
+ class Evolver(object):
+ __slots__ = ('_count', '_shift', '_root', '_tail', '_tail_offset', '_dirty_nodes',
+ '_extra_tail', '_cached_leafs', '_orig_pvector')
+
+ def __init__(self, v):
+ self._reset(v)
+
+ def __getitem__(self, index):
+ if not isinstance(index, Integral):
+ raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
+
+ if index < 0:
+ index += self._count + len(self._extra_tail)
+
+ if self._count <= index < self._count + len(self._extra_tail):
+ return self._extra_tail[index - self._count]
+
+ return PythonPVector._node_for(self, index)[index & BIT_MASK]
+
+ def _reset(self, v):
+ self._count = v._count
+ self._shift = v._shift
+ self._root = v._root
+ self._tail = v._tail
+ self._tail_offset = v._tail_offset
+ self._dirty_nodes = {}
+ self._cached_leafs = {}
+ self._extra_tail = []
+ self._orig_pvector = v
+
+ def append(self, element):
+ self._extra_tail.append(element)
+ return self
+
+ def extend(self, iterable):
+ self._extra_tail.extend(iterable)
+ return self
+
+ def set(self, index, val):
+ self[index] = val
+ return self
+
+ def __setitem__(self, index, val):
+ if not isinstance(index, Integral):
+ raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__)
+
+ if index < 0:
+ index += self._count + len(self._extra_tail)
+
+ if 0 <= index < self._count:
+ node = self._cached_leafs.get(index >> SHIFT)
+ if node:
+ node[index & BIT_MASK] = val
+ elif index >= self._tail_offset:
+ if id(self._tail) not in self._dirty_nodes:
+ self._tail = list(self._tail)
+ self._dirty_nodes[id(self._tail)] = True
+ self._cached_leafs[index >> SHIFT] = self._tail
+ self._tail[index & BIT_MASK] = val
+ else:
+ self._root = self._do_set(self._shift, self._root, index, val)
+ elif self._count <= index < self._count + len(self._extra_tail):
+ self._extra_tail[index - self._count] = val
+ elif index == self._count + len(self._extra_tail):
+ self._extra_tail.append(val)
+ else:
+ raise IndexError("Index out of range: %s" % (index,))
+
+ def _do_set(self, level, node, i, val):
+ if id(node) in self._dirty_nodes:
+ ret = node
+ else:
+ ret = list(node)
+ self._dirty_nodes[id(ret)] = True
+
+ if level == 0:
+ ret[i & BIT_MASK] = val
+ self._cached_leafs[i >> SHIFT] = ret
+ else:
+ sub_index = (i >> level) & BIT_MASK # >>>
+ ret[sub_index] = self._do_set(level - SHIFT, node[sub_index], i, val)
+
+ return ret
+
+ def delete(self, index):
+ del self[index]
+ return self
+
+ def __delitem__(self, key):
+ if self._orig_pvector:
+ # All structural sharing bets are off, base evolver on _extra_tail only
+ l = PythonPVector(self._count, self._shift, self._root, self._tail).tolist()
+ l.extend(self._extra_tail)
+ self._reset(_EMPTY_PVECTOR)
+ self._extra_tail = l
+
+ del self._extra_tail[key]
+
+ def persistent(self):
+ result = self._orig_pvector
+ if self.is_dirty():
+ result = PythonPVector(self._count, self._shift, self._root, self._tail).extend(self._extra_tail)
+ self._reset(result)
+
+ return result
+
+ def __len__(self):
+ return self._count + len(self._extra_tail)
+
+ def is_dirty(self):
+ return bool(self._dirty_nodes or self._extra_tail)
+
+ def evolver(self):
+ return PythonPVector.Evolver(self)
+
+ def set(self, i, val):
+ # This method could be implemented by a call to mset() but doing so would cause
+ # a ~5 X performance penalty on PyPy (considered the primary platform for this implementation
+ # of PVector) so we're keeping this implementation for now.
+
+ if not isinstance(i, Integral):
+ raise TypeError("'%s' object cannot be interpreted as an index" % type(i).__name__)
+
+ if i < 0:
+ i += self._count
+
+ if 0 <= i < self._count:
+ if i >= self._tail_offset:
+ new_tail = list(self._tail)
+ new_tail[i & BIT_MASK] = val
+ return PythonPVector(self._count, self._shift, self._root, new_tail)
+
+ return PythonPVector(self._count, self._shift, self._do_set(self._shift, self._root, i, val), self._tail)
+
+ if i == self._count:
+ return self.append(val)
+
+ raise IndexError("Index out of range: %s" % (i,))
+
+ def _do_set(self, level, node, i, val):
+ ret = list(node)
+ if level == 0:
+ ret[i & BIT_MASK] = val
+ else:
+ sub_index = (i >> level) & BIT_MASK # >>>
+ ret[sub_index] = self._do_set(level - SHIFT, node[sub_index], i, val)
+
+ return ret
+
+ @staticmethod
+ def _node_for(pvector_like, i):
+ if 0 <= i < pvector_like._count:
+ if i >= pvector_like._tail_offset:
+ return pvector_like._tail
+
+ node = pvector_like._root
+ for level in range(pvector_like._shift, 0, -SHIFT):
+ node = node[(i >> level) & BIT_MASK] # >>>
+
+ return node
+
+ raise IndexError("Index out of range: %s" % (i,))
+
+ def _create_new_root(self):
+ new_shift = self._shift
+
+ # Overflow root?
+ if (self._count >> SHIFT) > (1 << self._shift): # >>>
+ new_root = [self._root, self._new_path(self._shift, self._tail)]
+ new_shift += SHIFT
+ else:
+ new_root = self._push_tail(self._shift, self._root, self._tail)
+
+ return new_root, new_shift
+
+ def append(self, val):
+ if len(self._tail) < BRANCH_FACTOR:
+ new_tail = list(self._tail)
+ new_tail.append(val)
+ return PythonPVector(self._count + 1, self._shift, self._root, new_tail)
+
+ # Full tail, push into tree
+ new_root, new_shift = self._create_new_root()
+ return PythonPVector(self._count + 1, new_shift, new_root, [val])
+
+ def _new_path(self, level, node):
+ if level == 0:
+ return node
+
+ return [self._new_path(level - SHIFT, node)]
+
+ def _mutating_insert_tail(self):
+ self._root, self._shift = self._create_new_root()
+ self._tail = []
+
+ def _mutating_fill_tail(self, offset, sequence):
+ max_delta_len = BRANCH_FACTOR - len(self._tail)
+ delta = sequence[offset:offset + max_delta_len]
+ self._tail.extend(delta)
+ delta_len = len(delta)
+ self._count += delta_len
+ return offset + delta_len
+
+ def _mutating_extend(self, sequence):
+ offset = 0
+ sequence_len = len(sequence)
+ while offset < sequence_len:
+ offset = self._mutating_fill_tail(offset, sequence)
+ if len(self._tail) == BRANCH_FACTOR:
+ self._mutating_insert_tail()
+
+ self._tail_offset = self._count - len(self._tail)
+
+ def extend(self, obj):
+ # Mutates the new vector directly for efficiency but that's only an
+ # implementation detail, once it is returned it should be considered immutable
+ l = obj.tolist() if isinstance(obj, PythonPVector) else list(obj)
+ if l:
+ new_vector = self.append(l[0])
+ new_vector._mutating_extend(l[1:])
+ return new_vector
+
+ return self
+
+ def _push_tail(self, level, parent, tail_node):
+ """
+ if parent is leaf, insert node,
+ else does it map to an existing child? ->
+ node_to_insert = push node one more level
+ else alloc new path
+
+ return node_to_insert placed in copy of parent
+ """
+ ret = list(parent)
+
+ if level == SHIFT:
+ ret.append(tail_node)
+ return ret
+
+ sub_index = ((self._count - 1) >> level) & BIT_MASK # >>>
+ if len(parent) > sub_index:
+ ret[sub_index] = self._push_tail(level - SHIFT, parent[sub_index], tail_node)
+ return ret
+
+ ret.append(self._new_path(level - SHIFT, tail_node))
+ return ret
+
+ def index(self, value, *args, **kwargs):
+ return self.tolist().index(value, *args, **kwargs)
+
+ def count(self, value):
+ return self.tolist().count(value)
+
+ def delete(self, index, stop=None):
+ l = self.tolist()
+ del l[_index_or_slice(index, stop)]
+ return _EMPTY_PVECTOR.extend(l)
+
+ def remove(self, value):
+ l = self.tolist()
+ l.remove(value)
+ return _EMPTY_PVECTOR.extend(l)
+
+@six.add_metaclass(ABCMeta)
+class PVector(object):
+ """
+ Persistent vector implementation. Meant as a replacement for the cases where you would normally
+ use a Python list.
+
+ Do not instantiate directly, instead use the factory functions :py:func:`v` and :py:func:`pvector` to
+ create an instance.
+
+ Heavily influenced by the persistent vector available in Clojure. Initially this was more or
+ less just a port of the Java code for the Clojure vector. It has since been modified and to
+ some extent optimized for usage in Python.
+
+ The vector is organized as a trie, any mutating method will return a new vector that contains the changes. No
+ updates are done to the original vector. Structural sharing between vectors are applied where possible to save
+ space and to avoid making complete copies.
+
+ This structure corresponds most closely to the built in list type and is intended as a replacement. Where the
+ semantics are the same (more or less) the same function names have been used but for some cases it is not possible,
+ for example assignments.
+
+ The PVector implements the Sequence protocol and is Hashable.
+
+ Inserts are amortized O(1). Random access is log32(n) where n is the size of the vector.
+
+ The following are examples of some common operations on persistent vectors:
+
+ >>> p = v(1, 2, 3)
+ >>> p2 = p.append(4)
+ >>> p3 = p2.extend([5, 6, 7])
+ >>> p
+ pvector([1, 2, 3])
+ >>> p2
+ pvector([1, 2, 3, 4])
+ >>> p3
+ pvector([1, 2, 3, 4, 5, 6, 7])
+ >>> p3[5]
+ 6
+ >>> p.set(1, 99)
+ pvector([1, 99, 3])
+ >>>
+ """
+
+ @abstractmethod
+ def __len__(self):
+ """
+ >>> len(v(1, 2, 3))
+ 3
+ """
+
+ @abstractmethod
+ def __getitem__(self, index):
+ """
+ Get value at index. Full slicing support.
+
+ >>> v1 = v(5, 6, 7, 8)
+ >>> v1[2]
+ 7
+ >>> v1[1:3]
+ pvector([6, 7])
+ """
+
+ @abstractmethod
+ def __add__(self, other):
+ """
+ >>> v1 = v(1, 2)
+ >>> v2 = v(3, 4)
+ >>> v1 + v2
+ pvector([1, 2, 3, 4])
+ """
+
+ @abstractmethod
+ def __mul__(self, times):
+ """
+ >>> v1 = v(1, 2)
+ >>> 3 * v1
+ pvector([1, 2, 1, 2, 1, 2])
+ """
+
+ @abstractmethod
+ def __hash__(self):
+ """
+ >>> v1 = v(1, 2, 3)
+ >>> v2 = v(1, 2, 3)
+ >>> hash(v1) == hash(v2)
+ True
+ """
+
+ @abstractmethod
+ def evolver(self):
+ """
+ Create a new evolver for this pvector. The evolver acts as a mutable view of the vector
+ with "transaction like" semantics. No part of the underlying vector i updated, it is still
+ fully immutable. Furthermore multiple evolvers created from the same pvector do not
+ interfere with each other.
+
+ You may want to use an evolver instead of working directly with the pvector in the
+ following cases:
+
+ * Multiple updates are done to the same vector and the intermediate results are of no
+ interest. In this case using an evolver may be a more efficient and easier to work with.
+ * You need to pass a vector into a legacy function or a function that you have no control
+ over which performs in place mutations of lists. In this case pass an evolver instance
+ instead and then create a new pvector from the evolver once the function returns.
+
+ The following example illustrates a typical workflow when working with evolvers. It also
+ displays most of the API (which i kept small by design, you should not be tempted to
+ use evolvers in excess ;-)).
+
+ Create the evolver and perform various mutating updates to it:
+
+ >>> v1 = v(1, 2, 3, 4, 5)
+ >>> e = v1.evolver()
+ >>> e[1] = 22
+ >>> _ = e.append(6)
+ >>> _ = e.extend([7, 8, 9])
+ >>> e[8] += 1
+ >>> len(e)
+ 9
+
+ The underlying pvector remains the same:
+
+ >>> v1
+ pvector([1, 2, 3, 4, 5])
+
+ The changes are kept in the evolver. An updated pvector can be created using the
+ persistent() function on the evolver.
+
+ >>> v2 = e.persistent()
+ >>> v2
+ pvector([1, 22, 3, 4, 5, 6, 7, 8, 10])
+
+ The new pvector will share data with the original pvector in the same way that would have
+ been done if only using operations on the pvector.
+ """
+
+ @abstractmethod
+ def mset(self, *args):
+ """
+ Return a new vector with elements in specified positions replaced by values (multi set).
+
+ Elements on even positions in the argument list are interpreted as indexes while
+ elements on odd positions are considered values.
+
+ >>> v1 = v(1, 2, 3)
+ >>> v1.mset(0, 11, 2, 33)
+ pvector([11, 2, 33])
+ """
+
+ @abstractmethod
+ def set(self, i, val):
+ """
+ Return a new vector with element at position i replaced with val. The original vector remains unchanged.
+
+ Setting a value one step beyond the end of the vector is equal to appending. Setting beyond that will
+ result in an IndexError.
+
+ >>> v1 = v(1, 2, 3)
+ >>> v1.set(1, 4)
+ pvector([1, 4, 3])
+ >>> v1.set(3, 4)
+ pvector([1, 2, 3, 4])
+ >>> v1.set(-1, 4)
+ pvector([1, 2, 4])
+ """
+
+ @abstractmethod
+ def append(self, val):
+ """
+ Return a new vector with val appended.
+
+ >>> v1 = v(1, 2)
+ >>> v1.append(3)
+ pvector([1, 2, 3])
+ """
+
+ @abstractmethod
+ def extend(self, obj):
+ """
+ Return a new vector with all values in obj appended to it. Obj may be another
+ PVector or any other Iterable.
+
+ >>> v1 = v(1, 2, 3)
+ >>> v1.extend([4, 5])
+ pvector([1, 2, 3, 4, 5])
+ """
+
+ @abstractmethod
+ def index(self, value, *args, **kwargs):
+ """
+ Return first index of value. Additional indexes may be supplied to limit the search to a
+ sub range of the vector.
+
+ >>> v1 = v(1, 2, 3, 4, 3)
+ >>> v1.index(3)
+ 2
+ >>> v1.index(3, 3, 5)
+ 4
+ """
+
+ @abstractmethod
+ def count(self, value):
+ """
+ Return the number of times that value appears in the vector.
+
+ >>> v1 = v(1, 4, 3, 4)
+ >>> v1.count(4)
+ 2
+ """
+
+ @abstractmethod
+ def transform(self, *transformations):
+ """
+ Transform arbitrarily complex combinations of PVectors and PMaps. A transformation
+ consists of two parts. One match expression that specifies which elements to transform
+ and one transformation function that performs the actual transformation.
+
+ >>> from pyrsistent import freeze, ny
+ >>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'},
+ ... {'author': 'Steve', 'content': 'A slightly longer article'}],
+ ... 'weather': {'temperature': '11C', 'wind': '5m/s'}})
+ >>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c)
+ >>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c)
+ >>> very_short_news.articles[0].content
+ 'A short article'
+ >>> very_short_news.articles[1].content
+ 'A slightly long...'
+
+ When nothing has been transformed the original data structure is kept
+
+ >>> short_news is news_paper
+ True
+ >>> very_short_news is news_paper
+ False
+ >>> very_short_news.articles[0] is news_paper.articles[0]
+ True
+ """
+
+ @abstractmethod
+ def delete(self, index, stop=None):
+ """
+ Delete a portion of the vector by index or range.
+
+ >>> v1 = v(1, 2, 3, 4, 5)
+ >>> v1.delete(1)
+ pvector([1, 3, 4, 5])
+ >>> v1.delete(1, 3)
+ pvector([1, 4, 5])
+ """
+
+ @abstractmethod
+ def remove(self, value):
+ """
+ Remove the first occurrence of a value from the vector.
+
+ >>> v1 = v(1, 2, 3, 2, 1)
+ >>> v2 = v1.remove(1)
+ >>> v2
+ pvector([2, 3, 2, 1])
+ >>> v2.remove(1)
+ pvector([2, 3, 2])
+ """
+
+
+_EMPTY_PVECTOR = PythonPVector(0, SHIFT, [], [])
+PVector.register(PythonPVector)
+Sequence.register(PVector)
+Hashable.register(PVector)
+
+def python_pvector(iterable=()):
+ """
+ Create a new persistent vector containing the elements in iterable.
+
+ >>> v1 = pvector([1, 2, 3])
+ >>> v1
+ pvector([1, 2, 3])
+ """
+ return _EMPTY_PVECTOR.extend(iterable)
+
+try:
+ # Use the C extension as underlying trie implementation if it is available
+ import os
+ if os.environ.get('PYRSISTENT_NO_C_EXTENSION'):
+ pvector = python_pvector
+ else:
+ from pvectorc import pvector
+ PVector.register(type(pvector()))
+except ImportError:
+ pvector = python_pvector
+
+
+def v(*elements):
+ """
+ Create a new persistent vector containing all parameters to this function.
+
+ >>> v1 = v(1, 2, 3)
+ >>> v1
+ pvector([1, 2, 3])
+ """
+ return pvector(elements)
diff --git a/third_party/python/pyrsistent/pyrsistent/_toolz.py b/third_party/python/pyrsistent/pyrsistent/_toolz.py
new file mode 100644
index 0000000000..6643ee860d
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent/_toolz.py
@@ -0,0 +1,83 @@
+"""
+Functionality copied from the toolz package to avoid having
+to add toolz as a dependency.
+
+See https://github.com/pytoolz/toolz/.
+
+toolz is relased under BSD licence. Below is the licence text
+from toolz as it appeared when copying the code.
+
+--------------------------------------------------------------
+
+Copyright (c) 2013 Matthew Rocklin
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ a. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ b. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ c. Neither the name of toolz nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGE.
+"""
+import operator
+from six.moves import reduce
+
+
+def get_in(keys, coll, default=None, no_default=False):
+ """
+ NB: This is a straight copy of the get_in implementation found in
+ the toolz library (https://github.com/pytoolz/toolz/). It works
+ with persistent data structures as well as the corresponding
+ datastructures from the stdlib.
+
+ Returns coll[i0][i1]...[iX] where [i0, i1, ..., iX]==keys.
+
+ If coll[i0][i1]...[iX] cannot be found, returns ``default``, unless
+ ``no_default`` is specified, then it raises KeyError or IndexError.
+
+ ``get_in`` is a generalization of ``operator.getitem`` for nested data
+ structures such as dictionaries and lists.
+ >>> from pyrsistent import freeze
+ >>> transaction = freeze({'name': 'Alice',
+ ... 'purchase': {'items': ['Apple', 'Orange'],
+ ... 'costs': [0.50, 1.25]},
+ ... 'credit card': '5555-1234-1234-1234'})
+ >>> get_in(['purchase', 'items', 0], transaction)
+ 'Apple'
+ >>> get_in(['name'], transaction)
+ 'Alice'
+ >>> get_in(['purchase', 'total'], transaction)
+ >>> get_in(['purchase', 'items', 'apple'], transaction)
+ >>> get_in(['purchase', 'items', 10], transaction)
+ >>> get_in(['purchase', 'total'], transaction, 0)
+ 0
+ >>> get_in(['y'], {}, no_default=True)
+ Traceback (most recent call last):
+ ...
+ KeyError: 'y'
+ """
+ try:
+ return reduce(operator.getitem, keys, coll)
+ except (KeyError, IndexError, TypeError):
+ if no_default:
+ raise
+ return default \ No newline at end of file
diff --git a/third_party/python/pyrsistent/pyrsistent/_transformations.py b/third_party/python/pyrsistent/pyrsistent/_transformations.py
new file mode 100644
index 0000000000..612098969b
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent/_transformations.py
@@ -0,0 +1,143 @@
+import re
+import six
+try:
+ from inspect import Parameter, signature
+except ImportError:
+ signature = None
+ try:
+ from inspect import getfullargspec as getargspec
+ except ImportError:
+ from inspect import getargspec
+
+
+_EMPTY_SENTINEL = object()
+
+
+def inc(x):
+ """ Add one to the current value """
+ return x + 1
+
+
+def dec(x):
+ """ Subtract one from the current value """
+ return x - 1
+
+
+def discard(evolver, key):
+ """ Discard the element and returns a structure without the discarded elements """
+ try:
+ del evolver[key]
+ except KeyError:
+ pass
+
+
+# Matchers
+def rex(expr):
+ """ Regular expression matcher to use together with transform functions """
+ r = re.compile(expr)
+ return lambda key: isinstance(key, six.string_types) and r.match(key)
+
+
+def ny(_):
+ """ Matcher that matches any value """
+ return True
+
+
+# Support functions
+def _chunks(l, n):
+ for i in range(0, len(l), n):
+ yield l[i:i + n]
+
+
+def transform(structure, transformations):
+ r = structure
+ for path, command in _chunks(transformations, 2):
+ r = _do_to_path(r, path, command)
+ return r
+
+
+def _do_to_path(structure, path, command):
+ if not path:
+ return command(structure) if callable(command) else command
+
+ kvs = _get_keys_and_values(structure, path[0])
+ return _update_structure(structure, kvs, path[1:], command)
+
+
+def _items(structure):
+ try:
+ return structure.items()
+ except AttributeError:
+ # Support wider range of structures by adding a transform_items() or similar?
+ return list(enumerate(structure))
+
+
+def _get(structure, key, default):
+ try:
+ if hasattr(structure, '__getitem__'):
+ return structure[key]
+
+ return getattr(structure, key)
+
+ except (IndexError, KeyError):
+ return default
+
+
+def _get_keys_and_values(structure, key_spec):
+ if callable(key_spec):
+ # Support predicates as callable objects in the path
+ arity = _get_arity(key_spec)
+ if arity == 1:
+ # Unary predicates are called with the "key" of the path
+ # - eg a key in a mapping, an index in a sequence.
+ return [(k, v) for k, v in _items(structure) if key_spec(k)]
+ elif arity == 2:
+ # Binary predicates are called with the key and the corresponding
+ # value.
+ return [(k, v) for k, v in _items(structure) if key_spec(k, v)]
+ else:
+ # Other arities are an error.
+ raise ValueError(
+ "callable in transform path must take 1 or 2 arguments"
+ )
+
+ # Non-callables are used as-is as a key.
+ return [(key_spec, _get(structure, key_spec, _EMPTY_SENTINEL))]
+
+
+if signature is None:
+ def _get_arity(f):
+ argspec = getargspec(f)
+ return len(argspec.args) - len(argspec.defaults or ())
+else:
+ def _get_arity(f):
+ return sum(
+ 1
+ for p
+ in signature(f).parameters.values()
+ if p.default is Parameter.empty
+ and p.kind in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD)
+ )
+
+
+def _update_structure(structure, kvs, path, command):
+ from pyrsistent._pmap import pmap
+ e = structure.evolver()
+ if not path and command is discard:
+ # Do this in reverse to avoid index problems with vectors. See #92.
+ for k, v in reversed(kvs):
+ discard(e, k)
+ else:
+ for k, v in kvs:
+ is_empty = False
+ if v is _EMPTY_SENTINEL:
+ # Allow expansion of structure but make sure to cover the case
+ # when an empty pmap is added as leaf node. See #154.
+ is_empty = True
+ v = pmap()
+
+ result = _do_to_path(v, path, command)
+ if result is not v or is_empty:
+ e[k] = result
+
+ return e.persistent()
diff --git a/third_party/python/pyrsistent/pyrsistent/py.typed b/third_party/python/pyrsistent/pyrsistent/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent/py.typed
diff --git a/third_party/python/pyrsistent/pyrsistent/typing.py b/third_party/python/pyrsistent/pyrsistent/typing.py
new file mode 100644
index 0000000000..6a86c831ba
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent/typing.py
@@ -0,0 +1,80 @@
+"""Helpers for use with type annotation.
+
+Use the empty classes in this module when annotating the types of Pyrsistent
+objects, instead of using the actual collection class.
+
+For example,
+
+ from pyrsistent import pvector
+ from pyrsistent.typing import PVector
+
+ myvector: PVector[str] = pvector(['a', 'b', 'c'])
+
+"""
+from __future__ import absolute_import
+
+try:
+ from typing import Container
+ from typing import Hashable
+ from typing import Generic
+ from typing import Iterable
+ from typing import Mapping
+ from typing import Sequence
+ from typing import Sized
+ from typing import TypeVar
+
+ __all__ = [
+ 'CheckedPMap',
+ 'CheckedPSet',
+ 'CheckedPVector',
+ 'PBag',
+ 'PDeque',
+ 'PList',
+ 'PMap',
+ 'PSet',
+ 'PVector',
+ ]
+
+ T = TypeVar('T')
+ KT = TypeVar('KT')
+ VT = TypeVar('VT')
+
+ class CheckedPMap(Mapping[KT, VT], Hashable):
+ pass
+
+ # PSet.add and PSet.discard have different type signatures than that of Set.
+ class CheckedPSet(Generic[T], Hashable):
+ pass
+
+ class CheckedPVector(Sequence[T], Hashable):
+ pass
+
+ class PBag(Container[T], Iterable[T], Sized, Hashable):
+ pass
+
+ class PDeque(Sequence[T], Hashable):
+ pass
+
+ class PList(Sequence[T], Hashable):
+ pass
+
+ class PMap(Mapping[KT, VT], Hashable):
+ pass
+
+ # PSet.add and PSet.discard have different type signatures than that of Set.
+ class PSet(Generic[T], Hashable):
+ pass
+
+ class PVector(Sequence[T], Hashable):
+ pass
+
+ class PVectorEvolver(Generic[T]):
+ pass
+
+ class PMapEvolver(Generic[KT, VT]):
+ pass
+
+ class PSetEvolver(Generic[T]):
+ pass
+except ImportError:
+ pass
diff --git a/third_party/python/pyrsistent/pyrsistent/typing.pyi b/third_party/python/pyrsistent/pyrsistent/typing.pyi
new file mode 100644
index 0000000000..0221c48cc9
--- /dev/null
+++ b/third_party/python/pyrsistent/pyrsistent/typing.pyi
@@ -0,0 +1,292 @@
+# flake8: noqa: E704
+# from https://gist.github.com/WuTheFWasThat/091a17d4b5cab597dfd5d4c2d96faf09
+# Stubs for pyrsistent (Python 3.6)
+#
+from typing import Any
+from typing import Callable
+from typing import Dict
+from typing import Generic
+from typing import Hashable
+from typing import Iterator
+from typing import Iterable
+from typing import List
+from typing import Mapping
+from typing import Optional
+from typing import Sequence
+from typing import AbstractSet
+from typing import Sized
+from typing import Set
+from typing import Tuple
+from typing import TypeVar
+from typing import Type
+from typing import Union
+from typing import overload
+
+T = TypeVar('T')
+KT = TypeVar('KT')
+VT = TypeVar('VT')
+
+
+class PMap(Mapping[KT, VT], Hashable):
+ def __add__(self, other: PMap[KT, VT]) -> PMap[KT, VT]: ...
+ def __getitem__(self, key: KT) -> VT: ...
+ def __getattr__(self, key: str) -> VT: ...
+ def __hash__(self) -> int: ...
+ def __iter__(self) -> Iterator[KT]: ...
+ def __len__(self) -> int: ...
+ def copy(self) -> PMap[KT, VT]: ...
+ def discard(self, key: KT) -> PMap[KT, VT]: ...
+ def evolver(self) -> PMapEvolver[KT, VT]: ...
+ def iteritems(self) -> Iterable[Tuple[KT, VT]]: ...
+ def iterkeys(self) -> Iterable[KT]: ...
+ def itervalues(self) -> Iterable[VT]: ...
+ def remove(self, key: KT) -> PMap[KT, VT]: ...
+ def set(self, key: KT, val: VT) -> PMap[KT, VT]: ...
+ def transform(self, *transformations: Any) -> PMap[KT, VT]: ...
+ def update(self, *args: Mapping): ...
+ def update_with(self, update_fn: Callable[[VT, VT], VT], *args: Mapping) -> Any: ...
+
+
+class PMapEvolver(Generic[KT, VT]):
+ def __delitem__(self, key: KT) -> None: ...
+ def __getitem__(self, key: KT) -> VT: ...
+ def __len__(self) -> int: ...
+ def __setitem__(self, key: KT, val: VT) -> None: ...
+ def is_dirty(self) -> bool: ...
+ def persistent(self) -> PMap[KT, VT]: ...
+ def remove(self, key: KT) -> PMapEvolver[KT, VT]: ...
+ def set(self, key: KT, val: VT) -> PMapEvolver[KT, VT]: ...
+
+
+class PVector(Sequence[T], Hashable):
+ def __add__(self, other: PVector[T]) -> PVector[T]: ...
+ @overload
+ def __getitem__(self, index: int) -> T: ...
+ @overload
+ def __getitem__(self, index: slice) -> PVector[T]: ...
+ def __hash__(self) -> int: ...
+ def __len__(self) -> int: ...
+ def __mul__(self, other: PVector[T]) -> PVector[T]: ...
+ def append(self, val: T) -> PVector[T]: ...
+ def delete(self, index: int, stop: Optional[int]) -> PVector[T]: ...
+ def evolver(self) -> PVectorEvolver[T]: ...
+ def extend(self, obj: Iterable[T]) -> PVector[T]: ...
+ def tolist(self) -> List[T]: ...
+ def mset(self, *args: Iterable[Union[T, int]]) -> PVector[T]: ...
+ def remove(self, value: T) -> PVector[T]: ...
+ # Not compatible with MutableSequence
+ def set(self, i: int, val: T) -> PVector[T]: ...
+ def transform(self, *transformations: Any) -> PVector[T]: ...
+
+
+class PVectorEvolver(Sequence[T], Sized):
+ def __delitem__(self, i: Union[int, slice]) -> None: ...
+ @overload
+ def __getitem__(self, index: int) -> T: ...
+ # Not actually supported
+ @overload
+ def __getitem__(self, index: slice) -> PVectorEvolver[T]: ...
+ def __len__(self) -> int: ...
+ def __setitem__(self, index: int, val: T) -> None: ...
+ def append(self, val: T) -> PVectorEvolver[T]: ...
+ def delete(self, value: T) -> PVectorEvolver[T]: ...
+ def extend(self, obj: Iterable[T]) -> PVectorEvolver[T]: ...
+ def is_dirty(self) -> bool: ...
+ def persistent(self) -> PVector[T]: ...
+ def set(self, i: int, val: T) -> PVectorEvolver[T]: ...
+
+
+class PSet(AbstractSet[T], Hashable):
+ def __contains__(self, element: object) -> bool: ...
+ def __hash__(self) -> int: ...
+ def __iter__(self) -> Iterator[T]: ...
+ def __len__(self) -> int: ...
+ def add(self, element: T) -> PSet[T]: ...
+ def copy(self) -> PSet[T]: ...
+ def difference(self, iterable: Iterable) -> PSet[T]: ...
+ def discard(self, element: T) -> PSet[T]: ...
+ def evolver(self) -> PSetEvolver[T]: ...
+ def intersection(self, iterable: Iterable) -> PSet[T]: ...
+ def issubset(self, iterable: Iterable) -> bool: ...
+ def issuperset(self, iterable: Iterable) -> bool: ...
+ def remove(self, element: T) -> PSet[T]: ...
+ def symmetric_difference(self, iterable: Iterable[T]) -> PSet[T]: ...
+ def union(self, iterable: Iterable[T]) -> PSet[T]: ...
+ def update(self, iterable: Iterable[T]) -> PSet[T]: ...
+
+
+class PSetEvolver(Generic[T], Sized):
+ def __len__(self) -> int: ...
+ def add(self, element: T) -> PSetEvolver[T]: ...
+ def is_dirty(self) -> bool: ...
+ def persistent(self) -> PSet[T]: ...
+ def remove(self, element: T) -> PSetEvolver[T]: ...
+
+
+class PBag(Generic[T], Sized, Hashable):
+ def __add__(self, other: PBag[T]) -> PBag[T]: ...
+ def __and__(self, other: PBag[T]) -> PBag[T]: ...
+ def __contains__(self, elem: object) -> bool: ...
+ def __hash__(self) -> int: ...
+ def __iter__(self) -> Iterator[T]: ...
+ def __len__(self) -> int: ...
+ def __or__(self, other: PBag[T]) -> PBag[T]: ...
+ def __sub__(self, other: PBag[T]) -> PBag[T]: ...
+ def add(self, elem: T) -> PBag[T]: ...
+ def count(self, elem: T) -> int: ...
+ def remove(self, elem: T) -> PBag[T]: ...
+ def update(self, iterable: Iterable[T]) -> PBag[T]: ...
+
+
+class PDeque(Sequence[T], Hashable):
+ @overload
+ def __getitem__(self, index: int) -> T: ...
+ @overload
+ def __getitem__(self, index: slice) -> PDeque[T]: ...
+ def __hash__(self) -> int: ...
+ def __len__(self) -> int: ...
+ def __lt__(self, other: PDeque[T]) -> bool: ...
+ def append(self, elem: T) -> PDeque[T]: ...
+ def appendleft(self, elem: T) -> PDeque[T]: ...
+ def extend(self, iterable: Iterable[T]) -> PDeque[T]: ...
+ def extendleft(self, iterable: Iterable[T]) -> PDeque[T]: ...
+ @property
+ def left(self) -> T: ...
+ # The real return type is Integral according to what pyrsistent
+ # checks at runtime but mypy doesn't deal in numeric.*:
+ # https://github.com/python/mypy/issues/2636
+ @property
+ def maxlen(self) -> int: ...
+ def pop(self, count: int = 1) -> PDeque[T]: ...
+ def popleft(self, count: int = 1) -> PDeque[T]: ...
+ def remove(self, elem: T) -> PDeque[T]: ...
+ def reverse(self) -> PDeque[T]: ...
+ @property
+ def right(self) -> T: ...
+ def rotate(self, steps: int) -> PDeque[T]: ...
+
+
+class PList(Sequence[T], Hashable):
+ @overload
+ def __getitem__(self, index: int) -> T: ...
+ @overload
+ def __getitem__(self, index: slice) -> PList[T]: ...
+ def __hash__(self) -> int: ...
+ def __len__(self) -> int: ...
+ def __lt__(self, other: PList[T]) -> bool: ...
+ def __gt__(self, other: PList[T]) -> bool: ...
+ def cons(self, elem: T) -> PList[T]: ...
+ @property
+ def first(self) -> T: ...
+ def mcons(self, iterable: Iterable[T]) -> PList[T]: ...
+ def remove(self, elem: T) -> PList[T]: ...
+ @property
+ def rest(self) -> PList[T]: ...
+ def reverse(self) -> PList[T]: ...
+ def split(self, index: int) -> Tuple[PList[T], PList[T]]: ...
+
+T_PClass = TypeVar('T_PClass', bound='PClass')
+
+class PClass(Hashable):
+ def __new__(cls, **kwargs: Any): ...
+ def set(self: T_PClass, *args: Any, **kwargs: Any) -> T_PClass: ...
+ @classmethod
+ def create(
+ cls: Type[T_PClass],
+ kwargs: Any,
+ _factory_fields: Optional[Any] = ...,
+ ignore_extra: bool = ...,
+ ) -> T_PClass: ...
+ def serialize(self, format: Optional[Any] = ...): ...
+ def transform(self, *transformations: Any): ...
+ def __eq__(self, other: object): ...
+ def __ne__(self, other: object): ...
+ def __hash__(self): ...
+ def __reduce__(self): ...
+ def evolver(self) -> PClassEvolver: ...
+ def remove(self: T_PClass, name: Any) -> T_PClass: ...
+
+class PClassEvolver:
+ def __init__(self, original: Any, initial_dict: Any) -> None: ...
+ def __getitem__(self, item: Any): ...
+ def set(self, key: Any, value: Any): ...
+ def __setitem__(self, key: Any, value: Any) -> None: ...
+ def remove(self, item: Any): ...
+ def __delitem__(self, item: Any) -> None: ...
+ def persistent(self) -> PClass: ...
+ def __getattr__(self, item: Any): ...
+
+
+
+class CheckedPMap(PMap[KT, VT]):
+ __key_type__: Type[KT]
+ __value_type__: Type[VT]
+ def __new__(cls, source: Mapping[KT, VT] = ..., size: int = ...) -> CheckedPMap: ...
+ @classmethod
+ def create(cls, source_data: Mapping[KT, VT], _factory_fields: Any = ...) -> CheckedPMap[KT, VT]: ...
+ def serialize(self, format: Optional[Any] = ...) -> Dict[KT, VT]: ...
+
+
+class CheckedPVector(PVector[T]):
+ __type__: Type[T]
+ def __new__(self, initial: Iterable[T] = ...) -> CheckedPVector: ...
+ @classmethod
+ def create(cls, source_data: Iterable[T], _factory_fields: Any = ...) -> CheckedPVector[T]: ...
+ def serialize(self, format: Optional[Any] = ...) -> List[T]: ...
+
+
+class CheckedPSet(PSet[T]):
+ __type__: Type[T]
+ def __new__(cls, initial: Iterable[T] = ...) -> CheckedPSet: ...
+ @classmethod
+ def create(cls, source_data: Iterable[T], _factory_fields: Any = ...) -> CheckedPSet[T]: ...
+ def serialize(self, format: Optional[Any] = ...) -> Set[T]: ...
+
+
+class InvariantException(Exception):
+ invariant_errors: Tuple[Any, ...] = ... # possibly nested tuple
+ missing_fields: Tuple[str, ...] = ...
+ def __init__(
+ self,
+ error_codes: Any = ...,
+ missing_fields: Any = ...,
+ *args: Any,
+ **kwargs: Any
+ ) -> None: ...
+
+
+class CheckedTypeError(TypeError):
+ source_class: Type[Any]
+ expected_types: Tuple[Any, ...]
+ actual_type: Type[Any]
+ actual_value: Any
+ def __init__(
+ self,
+ source_class: Any,
+ expected_types: Any,
+ actual_type: Any,
+ actual_value: Any,
+ *args: Any,
+ **kwargs: Any
+ ) -> None: ...
+
+
+class CheckedKeyTypeError(CheckedTypeError): ...
+class CheckedValueTypeError(CheckedTypeError): ...
+class CheckedType: ...
+
+
+class PTypeError(TypeError):
+ source_class: Type[Any] = ...
+ field: str = ...
+ expected_types: Tuple[Any, ...] = ...
+ actual_type: Type[Any] = ...
+ def __init__(
+ self,
+ source_class: Any,
+ field: Any,
+ expected_types: Any,
+ actual_type: Any,
+ *args: Any,
+ **kwargs: Any
+ ) -> None: ...
diff --git a/third_party/python/pyrsistent/setup.cfg b/third_party/python/pyrsistent/setup.cfg
new file mode 100644
index 0000000000..e4eba0b6c3
--- /dev/null
+++ b/third_party/python/pyrsistent/setup.cfg
@@ -0,0 +1,7 @@
+[aliases]
+test = pytest
+
+[egg_info]
+tag_build =
+tag_date = 0
+
diff --git a/third_party/python/pyrsistent/setup.py b/third_party/python/pyrsistent/setup.py
new file mode 100644
index 0000000000..931800ff15
--- /dev/null
+++ b/third_party/python/pyrsistent/setup.py
@@ -0,0 +1,81 @@
+import os
+from setuptools import setup, Extension
+import sys
+import platform
+import warnings
+import codecs
+from distutils.command.build_ext import build_ext
+from distutils.errors import CCompilerError
+from distutils.errors import DistutilsPlatformError, DistutilsExecError
+from _pyrsistent_version import __version__
+
+readme_path = os.path.join(os.path.dirname(__file__), 'README.rst')
+with codecs.open(readme_path, encoding='utf8') as f:
+ readme = f.read()
+
+extensions = []
+if platform.python_implementation() == 'CPython':
+ extensions = [Extension('pvectorc', sources=['pvectorcmodule.c'])]
+
+needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
+pytest_runner = ['pytest-runner'] if needs_pytest else []
+
+
+class custom_build_ext(build_ext):
+ """Allow C extension building to fail."""
+
+ warning_message = """
+********************************************************************************
+WARNING: Could not build the %s.
+ Pyrsistent will still work but performance may be degraded.
+ %s
+********************************************************************************
+"""
+
+ def run(self):
+ try:
+ build_ext.run(self)
+ except Exception:
+ e = sys.exc_info()[1]
+ sys.stderr.write('%s\n' % str(e))
+ sys.stderr.write(self.warning_message % ("extension modules", "There was an issue with your platform configuration - see above."))
+
+ def build_extension(self, ext):
+ name = ext.name
+ try:
+ build_ext.build_extension(self, ext)
+ except Exception:
+ e = sys.exc_info()[1]
+ sys.stderr.write('%s\n' % str(e))
+ sys.stderr.write(self.warning_message % ("%s extension module" % name, "The output above this warning shows how the compilation failed."))
+
+setup(
+ name='pyrsistent',
+ version=__version__,
+ description='Persistent/Functional/Immutable data structures',
+ long_description=readme,
+ author='Tobias Gustafsson',
+ author_email='tobias.l.gustafsson@gmail.com',
+ url='http://github.com/tobgu/pyrsistent/',
+ license='MIT',
+ license_files=['LICENCE.mit'],
+ py_modules=['_pyrsistent_version'],
+ classifiers=[
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: MIT License',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
+ 'Programming Language :: Python :: 3.7',
+ 'Programming Language :: Python :: Implementation :: PyPy',
+ ],
+ test_suite='tests',
+ tests_require=['pytest<5', 'hypothesis<5'],
+ scripts=[],
+ setup_requires=pytest_runner,
+ ext_modules=extensions,
+ cmdclass={'build_ext': custom_build_ext},
+ install_requires=['six'],
+ packages=['pyrsistent'],
+ package_data={'pyrsistent': ['py.typed', '__init__.pyi', 'typing.pyi']},
+)
diff --git a/third_party/python/python-hglib/LICENSE b/third_party/python/python-hglib/LICENSE
new file mode 100644
index 0000000000..25d01ceb87
--- /dev/null
+++ b/third_party/python/python-hglib/LICENSE
@@ -0,0 +1,20 @@
+Copyright (c) 2011 Matt Mackall and other contributors
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file
diff --git a/third_party/python/python-hglib/Makefile b/third_party/python/python-hglib/Makefile
new file mode 100644
index 0000000000..ad26093755
--- /dev/null
+++ b/third_party/python/python-hglib/Makefile
@@ -0,0 +1,17 @@
+PYTHON=python
+help:
+ @echo 'Commonly used make targets:'
+ @echo ' tests - run all tests in the automatic test suite'
+
+all: help
+
+.PHONY: tests
+
+MANIFEST.in:
+ hg manifest | sed -e 's/^/include /' > MANIFEST.in
+
+dist: MANIFEST.in
+ TAR_OPTIONS="--owner=root --group=root --mode=u+w,go-w,a+rX-s" $(PYTHON) setup.py -q sdist
+
+tests:
+ $(PYTHON) test.py --with-doctest
diff --git a/third_party/python/python-hglib/PKG-INFO b/third_party/python/python-hglib/PKG-INFO
new file mode 100644
index 0000000000..e32cabb04d
--- /dev/null
+++ b/third_party/python/python-hglib/PKG-INFO
@@ -0,0 +1,26 @@
+Metadata-Version: 1.1
+Name: python-hglib
+Version: 2.4
+Summary: Mercurial Python library
+Home-page: http://selenic.com/repo/python-hglib
+Author: Idan Kamara
+Author-email: idankk86@gmail.com
+License: MIT
+Description: python-hglib
+ ============
+
+ python-hglib is a library with a fast, convenient interface to Mercurial.
+ It uses Mercurial's command server for communication with hg.
+
+ Installation is standard:
+
+ $ python setup.py install
+
+Platform: UNKNOWN
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2.4
+Classifier: Programming Language :: Python :: 2.5
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
diff --git a/third_party/python/python-hglib/README b/third_party/python/python-hglib/README
new file mode 100644
index 0000000000..01c45e7a3b
--- /dev/null
+++ b/third_party/python/python-hglib/README
@@ -0,0 +1,9 @@
+python-hglib
+============
+
+python-hglib is a library with a fast, convenient interface to Mercurial.
+It uses Mercurial's command server for communication with hg.
+
+Installation is standard:
+
+ $ python setup.py install
diff --git a/third_party/python/python-hglib/examples/stats.py b/third_party/python/python-hglib/examples/stats.py
new file mode 100644
index 0000000000..f54a59236e
--- /dev/null
+++ b/third_party/python/python-hglib/examples/stats.py
@@ -0,0 +1,35 @@
+# stats - get stats on the given repo
+
+import sys
+import hglib
+
+# figure out what repo path to use
+repo = '.'
+if len(sys.argv) > 1:
+ repo = sys.argv[1]
+
+# connect to hg
+client = hglib.open(repo)
+
+# gather some stats
+revs = int(client.tip().rev)
+files = len(list(client.manifest()))
+heads = len(client.heads())
+branches = len(client.branches())
+tags = len(client.tags()) - 1 # don't count tip
+
+authors = {}
+for e in client.log():
+ authors[e.author] = True
+
+merges = 0
+for e in client.log(onlymerges=True):
+ merges += 1
+
+print "%d revisions" % revs
+print "%d merges" % merges
+print "%d files" % files
+print "%d heads" % heads
+print "%d branches" % branches
+print "%d tags" % tags
+print "%d authors" % len(authors)
diff --git a/third_party/python/python-hglib/hglib/__init__.py b/third_party/python/python-hglib/hglib/__init__.py
new file mode 100644
index 0000000000..a522d33382
--- /dev/null
+++ b/third_party/python/python-hglib/hglib/__init__.py
@@ -0,0 +1,40 @@
+import subprocess
+from hglib import client, util, error
+
+HGPATH = 'hg'
+
+def open(path=None, encoding=None, configs=None):
+ '''starts a cmdserver for the given path (or for a repository found
+ in the cwd). HGENCODING is set to the given encoding. configs is a
+ list of key, value, similar to those passed to hg --config.
+ '''
+ return client.hgclient(path, encoding, configs)
+
+def init(dest=None, ssh=None, remotecmd=None, insecure=False,
+ encoding=None, configs=None):
+ args = util.cmdbuilder('init', dest, e=ssh, remotecmd=remotecmd,
+ insecure=insecure)
+
+ args.insert(0, HGPATH)
+ proc = util.popen(args)
+ out, err = proc.communicate()
+ if proc.returncode:
+ raise error.CommandError(args, proc.returncode, out, err)
+
+ return client.hgclient(dest, encoding, configs, connect=False)
+
+def clone(source=None, dest=None, noupdate=False, updaterev=None, rev=None,
+ branch=None, pull=False, uncompressed=False, ssh=None, remotecmd=None,
+ insecure=False, encoding=None, configs=None):
+ args = util.cmdbuilder('clone', source, dest, noupdate=noupdate,
+ updaterev=updaterev, rev=rev, branch=branch,
+ pull=pull, uncompressed=uncompressed,
+ e=ssh, remotecmd=remotecmd, insecure=insecure)
+
+ args.insert(0, HGPATH)
+ proc = util.popen(args)
+ out, err = proc.communicate()
+ if proc.returncode:
+ raise error.CommandError(args, proc.returncode, out, err)
+
+ return client.hgclient(dest, encoding, configs, connect=False)
diff --git a/third_party/python/python-hglib/hglib/client.py b/third_party/python/python-hglib/hglib/client.py
new file mode 100644
index 0000000000..4eababdf40
--- /dev/null
+++ b/third_party/python/python-hglib/hglib/client.py
@@ -0,0 +1,1717 @@
+import struct, re, datetime
+import hglib
+from hglib import error, util, templates, merge, context
+
+from hglib.util import b, cmdbuilder, BytesIO, strtobytes
+
+class revision(tuple):
+ def __new__(cls, rev, node, tags, branch, author, desc, date):
+ return tuple.__new__(cls, (rev, node, tags, branch, author, desc, date))
+
+ @property
+ def rev(self):
+ return self[0]
+
+ @property
+ def node(self):
+ return self[1]
+
+ @property
+ def tags(self):
+ return self[2]
+
+ @property
+ def branch(self):
+ return self[3]
+
+ @property
+ def author(self):
+ return self[4]
+
+ @property
+ def desc(self):
+ return self[5]
+
+ @property
+ def date(self):
+ return self[6]
+
+class hgclient(object):
+ inputfmt = '>I'
+ outputfmt = '>cI'
+ outputfmtsize = struct.calcsize(outputfmt)
+ retfmt = '>i'
+
+ def __init__(self, path, encoding, configs, connect=True):
+ self._args = [hglib.HGPATH, 'serve', '--cmdserver', 'pipe',
+ '--config', 'ui.interactive=True']
+ if path:
+ self._args += ['-R', path]
+ if configs:
+ for config in configs:
+ self._args += ['--config', config]
+ self._env = {'HGPLAIN': '1'}
+ if encoding:
+ self._env['HGENCODING'] = encoding
+
+ self.server = None
+ self._version = None
+ # include the hidden changesets if True
+ self.hidden = None
+
+ self._cbout = None
+ self._cberr = None
+ self._cbprompt = None
+
+ if connect:
+ self.open()
+
+ self._protocoltracefn = None
+
+ def setcbout(self, cbout):
+ """
+ cbout is a function that will be called with the stdout data of
+ the command as it runs. Call with None to stop getting call backs.
+ """
+ self._cbout = cbout
+
+ def setcberr(self, cberr):
+ """
+ cberr is a function that will be called with the stderr data of
+ the command as it runs.Call with None to stop getting call backs.
+ """
+ self._cberr = cberr
+
+ def setcbprompt(self, cbprompt):
+ """
+ cbprompt is used to reply to prompts by the server
+ It receives the max number of bytes to return and the
+ contents of stdout received so far.
+
+ Call with None to stop getting call backs.
+
+ cbprompt is never called from merge() or import_()
+ which already handle the prompt.
+ """
+ self._cbprompt = cbprompt
+
+ def setprotocoltrace(self, tracefn=None):
+ """
+ if tracefn is None no trace calls will be made.
+ Otherwise tracefn is call as tracefn( direction, channel, data )
+ direction is 'r' for read from server and 'w' for write to server
+ channel is always None when direction is 'w'
+ and the channel-identified when the direction is 'r'
+ """
+ self._protocoltracefn = tracefn
+
+ def __enter__(self):
+ if self.server is None:
+ self.open()
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+
+ def _readhello(self):
+ """ read the hello message the server sends when started """
+ ch, msg = self._readchannel()
+ assert ch == b('o')
+
+ msg = msg.split(b('\n'))
+
+ self.capabilities = msg[0][len(b('capabilities: ')):]
+ if not self.capabilities:
+ raise error.ResponseError(
+ "bad hello message: expected 'capabilities: '"
+ ", got %r" % msg[0])
+
+ self.capabilities = set(self.capabilities.split())
+
+ # at the very least the server should be able to run commands
+ assert b('runcommand') in self.capabilities
+
+ self._encoding = msg[1][len(b('encoding: ')):]
+ if not self._encoding:
+ raise error.ResponseError("bad hello message: expected 'encoding: '"
+ ", got %r" % msg[1])
+
+ def _readchannel(self):
+ data = self.server.stdout.read(hgclient.outputfmtsize)
+ if not data:
+ raise error.ServerError()
+ channel, length = struct.unpack(hgclient.outputfmt, data)
+ if channel in b('IL'):
+ return channel, length
+ else:
+ return channel, self.server.stdout.read(length)
+
+ @staticmethod
+ def _parserevs(splitted):
+ '''splitted is a list of fields according to our rev.style, where
+ each 6 fields compose one revision.
+ '''
+ revs = []
+ for rev in util.grouper(7, splitted):
+ # truncate the timezone and convert to a local datetime
+ posixtime = float(rev[6].split(b('.'), 1)[0])
+ dt = datetime.datetime.fromtimestamp(posixtime)
+ revs.append(revision(rev[0], rev[1], rev[2], rev[3],
+ rev[4], rev[5], dt))
+ return revs
+
+ def runcommand(self, args, inchannels, outchannels):
+ def writeblock(data):
+ if self._protocoltracefn is not None:
+ self._protocoltracefn('w', None, data)
+ self.server.stdin.write(struct.pack(self.inputfmt, len(data)))
+ self.server.stdin.write(data)
+ self.server.stdin.flush()
+
+ if not self.server:
+ raise ValueError("server not connected")
+
+ self.server.stdin.write(b('runcommand\n'))
+ writeblock(b('\0').join(args))
+
+ while True:
+ channel, data = self._readchannel()
+ if self._protocoltracefn is not None:
+ self._protocoltracefn('r', channel, data)
+
+ # input channels
+ if channel in inchannels:
+ writeblock(inchannels[channel](data))
+ # output channels
+ elif channel in outchannels:
+ outchannels[channel](data)
+ # result channel, command finished
+ elif channel == b('r'):
+ return struct.unpack(hgclient.retfmt, data)[0]
+ # a channel that we don't know and can't ignore
+ elif channel.isupper():
+ raise error.ResponseError(
+ "unexpected data on required channel '%s'" % channel)
+ # optional channel
+ else:
+ pass
+
+ def rawcommand(self, args, eh=None, prompt=None, input=None):
+ """
+ args is the cmdline (usually built using util.cmdbuilder)
+
+ eh is an error handler that is passed the return code, stdout and stderr
+ If no eh is given, we raise a CommandError if ret != 0
+
+ prompt is used to reply to prompts by the server
+ It receives the max number of bytes to return and the contents of stdout
+ received so far
+
+ input is used to reply to bulk data requests by the server
+ It receives the max number of bytes to return
+ """
+ out, err = BytesIO(), BytesIO()
+ outchannels = {}
+ if self._cbout is None:
+ outchannels[b('o')] = out.write
+ else:
+ def out_handler(data):
+ out.write(data)
+ self._cbout(data)
+ outchannels[b('o')] = out_handler
+ if self._cberr is None:
+ outchannels[b('e')] = err.write
+ else:
+ def err_handler(data):
+ err.write(data)
+ self._cberr(data)
+ outchannels[b('e')] = err_handler
+
+ inchannels = {}
+ if prompt is None:
+ prompt = self._cbprompt
+ if prompt is not None:
+ def func(size):
+ reply = prompt(size, out.getvalue())
+ return reply
+ inchannels[b('L')] = func
+ if input is not None:
+ inchannels[b('I')] = input
+
+ ret = self.runcommand(args, inchannels, outchannels)
+ out, err = out.getvalue(), err.getvalue()
+
+ if ret:
+ if eh is None:
+ raise error.CommandError(args, ret, out, err)
+ else:
+ return eh(ret, out, err)
+ return out
+
+ def open(self):
+ if self.server is not None:
+ raise ValueError('server already open')
+
+ self.server = util.popen(self._args, self._env)
+ try:
+ self._readhello()
+ except error.ServerError:
+ ret, serr = self._close()
+ raise error.ServerError('server exited with status %d: %s'
+ % (ret, serr.strip()))
+ return self
+
+ def close(self):
+ """Closes the command server instance and waits for it to exit,
+ returns the exit code.
+
+ Attempting to call any function afterwards that needs to
+ communicate with the server will raise a ValueError.
+ """
+ return self._close()[0]
+
+ def _close(self):
+ _sout, serr = self.server.communicate()
+ ret = self.server.returncode
+ self.server = None
+ return ret, serr
+
+ def add(self, files=[], dryrun=False, subrepos=False, include=None,
+ exclude=None):
+ """
+ Add the specified files on the next commit.
+ If no files are given, add all files to the repository.
+
+ dryrun - do no perform actions
+ subrepos - recurse into subrepositories
+ include - include names matching the given patterns
+ exclude - exclude names matching the given patterns
+
+ Return whether all given files were added.
+ """
+ if not isinstance(files, list):
+ files = [files]
+
+ args = cmdbuilder(b('add'), n=dryrun, S=subrepos, I=include, X=exclude,
+ *files)
+
+ eh = util.reterrorhandler(args)
+ self.rawcommand(args, eh=eh)
+
+ return bool(eh)
+
+ def addremove(self, files=[], similarity=None, dryrun=False, include=None,
+ exclude=None):
+ """Add all new files and remove all missing files from the repository.
+
+ New files are ignored if they match any of the patterns in
+ ".hgignore". As with add, these changes take effect at the
+ next commit.
+
+ similarity - used to detect renamed files. With a parameter
+ greater than 0, this compares every removed file with every
+ added file and records those similar enough as renames. This
+ option takes a percentage between 0 (disabled) and 100 (files
+ must be identical) as its parameter. Detecting renamed files
+ this way can be expensive. After using this option, "hg status
+ -C" can be used to check which files were identified as moved
+ or renamed.
+
+ dryrun - do no perform actions
+ include - include names matching the given patterns
+ exclude - exclude names matching the given patterns
+
+ Return True if all files are successfully added.
+
+ """
+ if not isinstance(files, list):
+ files = [files]
+
+ args = cmdbuilder(b('addremove'), s=similarity, n=dryrun, I=include,
+ X=exclude, *files)
+
+ eh = util.reterrorhandler(args)
+ self.rawcommand(args, eh=eh)
+
+ return bool(eh)
+
+ def annotate(self, files, rev=None, nofollow=False, text=False, user=False,
+ file=False, date=False, number=False, changeset=False,
+ line=False, verbose=False, include=None, exclude=None):
+ """
+ Show changeset information by line for each file in files.
+
+ rev - annotate the specified revision
+ nofollow - don't follow copies and renames
+ text - treat all files as text
+ user - list the author (long with -v)
+ file - list the filename
+ date - list the date
+ number - list the revision number (default)
+ changeset - list the changeset
+ line - show line number at the first appearance
+ include - include names matching the given patterns
+ exclude - exclude names matching the given patterns
+
+ Yields a (info, contents) tuple for each line in a file. Info is a space
+ separated string according to the given options.
+ """
+ if not isinstance(files, list):
+ files = [files]
+
+ args = cmdbuilder(b('annotate'), r=rev, no_follow=nofollow, a=text,
+ u=user, f=file, d=date, n=number, c=changeset,
+ l=line, v=verbose, I=include, X=exclude,
+ hidden=self.hidden, *files)
+
+ out = self.rawcommand(args)
+
+ for line in out.splitlines():
+ yield tuple(line.split(b(': '), 1))
+
+ def archive(self, dest, rev=None, nodecode=False, prefix=None, type=None,
+ subrepos=False, include=None, exclude=None):
+ """Create an unversioned archive of a repository revision.
+
+ The exact name of the destination archive or directory is given using a
+ format string; see export for details.
+
+ Each member added to an archive file has a directory prefix
+ prepended. Use prefix to specify a format string for the
+ prefix. The default is the basename of the archive, with
+ suffixes removed.
+
+ dest - destination path
+ rev - revision to distribute. The revision used is the parent of the
+ working directory if one isn't given.
+
+ nodecode - do not pass files through decoders
+ prefix - directory prefix for files in archive
+ type - type of distribution to create. The archive type is automatically
+ detected based on file extension if one isn't given.
+
+ Valid types are:
+
+ "files" a directory full of files (default)
+ "tar" tar archive, uncompressed
+ "tbz2" tar archive, compressed using bzip2
+ "tgz" tar archive, compressed using gzip
+ "uzip" zip archive, uncompressed
+ "zip" zip archive, compressed using deflate
+
+ subrepos - recurse into subrepositories
+ include - include names matching the given patterns
+ exclude - exclude names matching the given patterns
+
+ """
+ args = cmdbuilder(b('archive'), dest, r=rev,
+ no_decode=nodecode, p=prefix,
+ t=type, S=subrepos, I=include, X=exclude,
+ hidden=self.hidden)
+
+ self.rawcommand(args)
+
+ def backout(self, rev, merge=False, parent=None, tool=None, message=None,
+ logfile=None, date=None, user=None):
+ """Prepare a new changeset with the effect of rev undone in the current
+ working directory.
+
+ If rev is the parent of the working directory, then this new
+ changeset is committed automatically. Otherwise, hg needs to
+ merge the changes and the merged result is left uncommitted.
+
+ rev - revision to backout
+ merge - merge with old dirstate parent after backout
+ parent - parent to choose when backing out merge
+ tool - specify merge tool
+ message - use text as commit message
+ logfile - read commit message from file
+ date - record the specified date as commit date
+ user - record the specified user as committer
+
+ """
+ if message and logfile:
+ raise ValueError("cannot specify both a message and a logfile")
+
+ args = cmdbuilder(b('backout'), r=rev, merge=merge, parent=parent,
+ t=tool, m=message, l=logfile, d=date, u=user,
+ hidden=self.hidden)
+
+ self.rawcommand(args)
+
+ def bookmark(self, name, rev=None, force=False, delete=False,
+ inactive=False, rename=None):
+ """
+ Set a bookmark on the working directory's parent revision or rev,
+ with the given name.
+
+ name - bookmark name
+ rev - revision to bookmark
+ force - bookmark even if another bookmark with the same name exists
+ delete - delete the given bookmark
+ inactive - do not mark the new bookmark active
+ rename - rename the bookmark given by rename to name
+ """
+ args = cmdbuilder(b('bookmark'), name, r=rev, f=force, d=delete,
+ i=inactive, m=rename)
+
+ self.rawcommand(args)
+
+ def bookmarks(self):
+ """
+ Return the bookmarks as a list of (name, rev, node) and the index of the
+ current one.
+
+ If there isn't a current one, -1 is returned as the index.
+ """
+ args = cmdbuilder(b('bookmarks'), hidden=self.hidden)
+ out = self.rawcommand(args)
+
+ bms = []
+ current = -1
+ if out.rstrip() != b('no bookmarks set'):
+ for line in out.splitlines():
+ iscurrent, line = line[0:3], line[3:]
+ if b('*') in iscurrent:
+ current = len(bms)
+ name, line = line.split(b(' '), 1)
+ rev, node = line.split(b(':'))
+ bms.append((name, int(rev), node))
+ return bms, current
+
+ def branch(self, name=None, clean=False, force=False):
+ """When name isn't given, return the current branch name. Otherwise
+ set the working directory branch name (the branch will not
+ exist in the repository until the next commit). Standard
+ practice recommends that primary development take place on the
+ 'default' branch.
+
+ When clean is True, reset and return the working directory
+ branch to that of the parent of the working directory,
+ negating a previous branch change.
+
+ name - new branch name
+ clean - reset branch name to parent branch name
+ force - set branch name even if it shadows an existing branch
+
+ """
+ if name and clean:
+ raise ValueError('cannot use both name and clean')
+
+ args = cmdbuilder(b('branch'), name, f=force, C=clean)
+ out = self.rawcommand(args).rstrip()
+
+ if name:
+ return name
+ elif not clean:
+ return out
+ else:
+ # len('reset working directory to branch ') == 34
+ return out[34:]
+
+ def branches(self, active=False, closed=False):
+ """
+ Returns the repository's named branches as a list of (name, rev, node).
+
+ active - show only branches that have unmerged heads
+ closed - show normal and closed branches
+ """
+ args = cmdbuilder(b('branches'), a=active, c=closed, hidden=self.hidden)
+ out = self.rawcommand(args)
+
+ branches = []
+ for line in out.rstrip().splitlines():
+ namerev, node = line.rsplit(b(':'), 1)
+ name, rev = namerev.rsplit(b(' '), 1)
+ name = name.rstrip()
+ node = node.split()[0] # get rid of ' (inactive)'
+ branches.append((name, int(rev), node))
+ return branches
+
+ def bundle(self, file, destrepo=None, rev=[], branch=[], base=[], all=False,
+ force=False, type=None, ssh=None, remotecmd=None,
+ insecure=False):
+ """Generate a compressed changegroup file collecting changesets not
+ known to be in another repository.
+
+ If destrepo isn't given, then hg assumes the destination will have all
+ the nodes you specify with base. To create a bundle containing all
+ changesets, use all (or set base to 'null').
+
+ file - destination file name
+ destrepo - repository to look for changes
+ rev - a changeset intended to be added to the destination
+ branch - a specific branch you would like to bundle
+ base - a base changeset assumed to be available at the destination
+ all - bundle all changesets in the repository
+ type - bundle compression type to use, available compression
+ methods are: none, bzip2, and gzip (default: bzip2)
+
+ force - run even when the destrepo is unrelated
+ ssh - specify ssh command to use
+ remotecmd - specify hg command to run on the remote side
+ insecure - do not verify server certificate (ignoring
+ web.cacerts config)
+
+ Return True if a bundle was created, False if no changes were found.
+
+ """
+ args = cmdbuilder(b('bundle'), file, destrepo, f=force, r=rev, b=branch,
+ base=base, a=all, t=type, e=ssh, remotecmd=remotecmd,
+ insecure=insecure, hidden=self.hidden)
+
+ eh = util.reterrorhandler(args)
+ self.rawcommand(args, eh=eh)
+
+ return bool(eh)
+
+ def cat(self, files, rev=None, output=None):
+ """Return a string containing the specified files as they were at the
+ given revision. If no revision is given, the parent of the working
+ directory is used, or tip if no revision is checked out.
+
+ If output is given, writes the contents to the specified file.
+ The name of the file is given using a format string. The
+ formatting rules are the same as for the export command, with
+ the following additions:
+
+ "%s" basename of file being printed
+ "%d" dirname of file being printed, or '.' if in repository root
+ "%p" root-relative path name of file being printed
+
+ """
+ args = cmdbuilder(b('cat'), r=rev, o=output, hidden=self.hidden, *files)
+ out = self.rawcommand(args)
+
+ if not output:
+ return out
+
+ def clone(self, source=b('.'), dest=None, branch=None, updaterev=None,
+ revrange=None):
+ """
+ Create a copy of an existing repository specified by source in a new
+ directory dest.
+
+ If dest isn't specified, it defaults to the basename of source.
+
+ branch - clone only the specified branch
+ updaterev - revision, tag or branch to check out
+ revrange - include the specified changeset
+ """
+ args = cmdbuilder(b('clone'), source, dest, b=branch,
+ u=updaterev, r=revrange)
+ self.rawcommand(args)
+
+ def init(self, dest, ssh=None, remotecmd=None, insecure=False):
+ args = util.cmdbuilder('init', dest, e=ssh, remotecmd=remotecmd,
+ insecure=insecure)
+ self.rawcommand(args)
+
+ def commit(self, message=None, logfile=None, addremove=False,
+ closebranch=False, date=None, user=None, include=None,
+ exclude=None, amend=False):
+ """
+ Commit changes reported by status into the repository.
+
+ message - the commit message
+ logfile - read commit message from file
+ addremove - mark new/missing files as added/removed before committing
+ closebranch - mark a branch as closed, hiding it from the branch list
+ date - record the specified date as commit date
+ user - record the specified user as committer
+ include - include names matching the given patterns
+ exclude - exclude names matching the given patterns
+ amend - amend the parent of the working dir
+ """
+ if amend and message is None and logfile is None:
+ # retrieve current commit message
+ message = self.log(b('.'))[0][5]
+ if message is None and logfile is None and not amend:
+ raise ValueError("must provide at least a message or a logfile")
+ elif message and logfile:
+ raise ValueError("cannot specify both a message and a logfile")
+
+ # --debug will print the committed cset
+ args = cmdbuilder(b('commit'), debug=True, m=message, A=addremove,
+ close_branch=closebranch, d=date, u=user, l=logfile,
+ I=include, X=exclude, amend=amend)
+ out = self.rawcommand(args)
+ m = re.search(b(r'^committed changeset (\d+):([0-9a-f]+)'), out,
+ re.MULTILINE)
+ if not m:
+ raise ValueError('revision and node not found in hg output: %r'
+ % out)
+ rev, node = m.groups()
+ return int(rev), node
+
+ def config(self, names=[], untrusted=False, showsource=False):
+ """Return a list of (section, key, value) config settings from all
+ hgrc files
+
+ When showsource is specified, return (source, section, key, value) where
+ source is of the form filename:[line]
+
+ """
+ def splitline(s):
+ k, value = s.rstrip().split(b('='), 1)
+ section, key = k.split(b('.'), 1)
+ return section, key, value
+
+ if not isinstance(names, list):
+ names = [names]
+
+ args = cmdbuilder(b('showconfig'), u=untrusted, debug=showsource,
+ *names)
+ out = self.rawcommand(args)
+
+ conf = []
+ if showsource:
+ out = util.skiplines(out, b('read config from: '))
+ for line in out.splitlines():
+ m = re.match(b(r"(.+?:(?:\d+:)?) (.*)"), line)
+ t = splitline(m.group(2))
+ conf.append((m.group(1)[:-1], t[0], t[1], t[2]))
+ else:
+ for line in out.splitlines():
+ conf.append(splitline(line))
+
+ return conf
+
+ @property
+ def encoding(self):
+ """
+ Return the server's encoding (as reported in the hello message).
+ """
+ if not b('getencoding') in self.capabilities:
+ raise CapabilityError('getencoding')
+
+ if not self._encoding:
+ self.server.stdin.write(b('getencoding\n'))
+ self._encoding = self._readfromchannel('r')
+
+ return self._encoding
+
+ def copy(self, source, dest, after=False, force=False, dryrun=False,
+ include=None, exclude=None):
+ """Mark dest as having copies of source files. If dest is a
+ directory, copies are put in that directory. If dest is a
+ file, then source must be a string.
+
+ Returns True on success, False if errors are encountered.
+
+ source - a file or a list of files
+ dest - a destination file or directory
+ after - record a copy that has already occurred
+ force - forcibly copy over an existing managed file
+ dryrun - do not perform actions, just print output
+ include - include names matching the given patterns
+ exclude - exclude names matching the given patterns
+
+ """
+ if not isinstance(source, list):
+ source = [source]
+
+ source.append(dest)
+ args = cmdbuilder(b('copy'), A=after, f=force, n=dryrun,
+ I=include, X=exclude, *source)
+
+ eh = util.reterrorhandler(args)
+ self.rawcommand(args, eh=eh)
+
+ return bool(eh)
+
+ def diff(self, files=[], revs=[], change=None, text=False,
+ git=False, nodates=False, showfunction=False,
+ reverse=False, ignoreallspace=False,
+ ignorespacechange=False, ignoreblanklines=False,
+ unified=None, stat=False, subrepos=False, include=None,
+ exclude=None):
+ """
+ Return differences between revisions for the specified files.
+
+ revs - a revision or a list of two revisions to diff
+ change - change made by revision
+ text - treat all files as text
+ git - use git extended diff format
+ nodates - omit dates from diff headers
+ showfunction - show which function each change is in
+ reverse - produce a diff that undoes the changes
+ ignoreallspace - ignore white space when comparing lines
+ ignorespacechange - ignore changes in the amount of white space
+ ignoreblanklines - ignore changes whose lines are all blank
+ unified - number of lines of context to show
+ stat - output diffstat-style summary of changes
+ subrepos - recurse into subrepositories
+ include - include names matching the given patterns
+ exclude - exclude names matching the given patterns
+ """
+ if change and revs:
+ raise ValueError('cannot specify both change and rev')
+
+ args = cmdbuilder(b('diff'), r=list(map(strtobytes, revs)), c=change,
+ a=text, g=git, nodates=nodates,
+ p=showfunction, reverse=reverse,
+ w=ignoreallspace, b=ignorespacechange,
+ B=ignoreblanklines, U=unified, stat=stat,
+ S=subrepos, I=include, X=exclude, hidden=self.hidden,
+ *files)
+
+ return self.rawcommand(args)
+
+ def export(self, revs, output=None, switchparent=False,
+ text=False, git=False, nodates=False):
+ """Return the header and diffs for one or more changesets. When
+ output is given, dumps to file. The name of the file is given
+ using a format string. The formatting rules are as follows:
+
+ "%%" literal "%" character
+ "%H" changeset hash (40 hexadecimal digits)
+ "%N" number of patches being generated
+ "%R" changeset revision number
+ "%b" basename of the exporting repository
+ "%h" short-form changeset hash (12 hexadecimal digits)
+ "%n" zero-padded sequence number, starting at 1
+ "%r" zero-padded changeset revision number
+
+ output - print output to file with formatted name
+ switchparent - diff against the second parent
+ rev - a revision or list of revisions to export
+ text - treat all files as text
+ git - use git extended diff format
+ nodates - omit dates from diff headers
+
+ """
+ if not isinstance(revs, list):
+ revs = [revs]
+ args = cmdbuilder(b('export'), o=output, switch_parent=switchparent,
+ a=text, g=git, nodates=nodates, hidden=self.hidden,
+ *revs)
+
+ out = self.rawcommand(args)
+
+ if output is None:
+ return out
+
+ def forget(self, files, include=None, exclude=None):
+ """Mark the specified files so they will no longer be tracked after
+ the next commit.
+
+ This only removes files from the current branch, not from the entire
+ project history, and it does not delete them from the working directory.
+
+ Returns True on success.
+
+ include - include names matching the given patterns
+ exclude - exclude names matching the given patterns
+
+ """
+ if not isinstance(files, list):
+ files = [files]
+
+ args = cmdbuilder(b('forget'), I=include, X=exclude, *files)
+
+ eh = util.reterrorhandler(args)
+ self.rawcommand(args, eh=eh)
+
+ return bool(eh)
+
+ def grep(self, pattern, files=[], all=False, text=False, follow=False,
+ ignorecase=False, fileswithmatches=False, line=False, user=False,
+ date=False, include=None, exclude=None):
+ """Search for a pattern in specified files and revisions.
+
+ This behaves differently than Unix grep. It only accepts Python/Perl
+ regexps. It searches repository history, not the working directory.
+ It always prints the revision number in which a match appears.
+
+ Yields (filename, revision, [line, [match status, [user,
+ [date, [match]]]]]) per match depending on the given options.
+
+ all - print all revisions that match
+ text - treat all files as text
+ follow - follow changeset history, or file history across
+ copies and renames
+ ignorecase - ignore case when matching
+ fileswithmatches - return only filenames and revisions that match
+ line - return line numbers in the result tuple
+ user - return the author in the result tuple
+ date - return the date in the result tuple
+ include - include names matching the given patterns
+ exclude - exclude names matching the given patterns
+
+ """
+ if not isinstance(files, list):
+ files = [files]
+
+ args = cmdbuilder(b('grep'), all=all, a=text, f=follow, i=ignorecase,
+ l=fileswithmatches, n=line, u=user, d=date,
+ I=include, X=exclude, hidden=self.hidden,
+ *[pattern] + files)
+ args.append(b('-0'))
+
+ def eh(ret, out, err):
+ if ret != 1:
+ raise error.CommandError(args, ret, out, err)
+ return b('')
+
+ out = self.rawcommand(args, eh=eh).split(b('\0'))
+
+ fieldcount = 3
+ if user:
+ fieldcount += 1
+ if date:
+ fieldcount += 1
+ if line:
+ fieldcount += 1
+ if all:
+ fieldcount += 1
+ if fileswithmatches:
+ fieldcount -= 1
+
+ return util.grouper(fieldcount, out)
+
+ def heads(self, rev=[], startrev=[], topological=False, closed=False):
+ """Return a list of current repository heads or branch heads.
+
+ rev - return only branch heads on the branches associated with
+ the specified changesets.
+
+ startrev - return only heads which are descendants of the given revs.
+ topological - named branch mechanics will be ignored and only changesets
+ without children will be shown.
+
+ closed - normal and closed branch heads.
+
+ """
+ if not isinstance(rev, list):
+ rev = [rev]
+
+ args = cmdbuilder(b('heads'), r=startrev, t=topological, c=closed,
+ template=templates.changeset, hidden=self.hidden,
+ *rev)
+
+ def eh(ret, out, err):
+ if ret != 1:
+ raise error.CommandError(args, ret, out, err)
+ return b('')
+
+ out = self.rawcommand(args, eh=eh).split(b('\0'))[:-1]
+ return self._parserevs(out)
+
+ def identify(self, rev=None, source=None, num=False, id=False, branch=False,
+ tags=False, bookmarks=False):
+ """Return a summary string identifying the repository state at rev
+ using one or two parent hash identifiers, followed by a "+" if
+ the working directory has uncommitted changes, the branch name
+ (if not default), a list of tags, and a list of bookmarks.
+
+ When rev is not given, return a summary string of the current
+ state of the repository.
+
+ Specifying source as a repository root or Mercurial bundle will cause
+ lookup to operate on that repository/bundle.
+
+ num - show local revision number
+ id - show global revision id
+ branch - show branch
+ tags - show tags
+ bookmarks - show bookmarks
+
+ """
+ args = cmdbuilder(b('identify'), source, r=rev, n=num, i=id,
+ b=branch, t=tags, B=bookmarks,
+ hidden=self.hidden)
+
+ return self.rawcommand(args)
+
+ def import_(self, patches, strip=None, force=False, nocommit=False,
+ bypass=False, exact=False, importbranch=False, message=None,
+ date=None, user=None, similarity=None):
+ """Import the specified patches which can be a list of file names or a
+ file-like object and commit them individually (unless nocommit is
+ specified).
+
+ strip - directory strip option for patch. This has the same
+ meaning as the corresponding patch option (default: 1)
+
+ force - skip check for outstanding uncommitted changes
+ nocommit - don't commit, just update the working directory
+ bypass - apply patch without touching the working directory
+ exact - apply patch to the nodes from which it was generated
+ importbranch - use any branch information in patch (implied by exact)
+ message - the commit message
+ date - record the specified date as commit date
+ user - record the specified user as committer
+ similarity - guess renamed files by similarity (0<=s<=100)
+
+ """
+ if hasattr(patches, 'read') and hasattr(patches, 'readline'):
+ patch = patches
+
+ def readline(size, output):
+ return patch.readline(size)
+
+ stdin = True
+ patches = ()
+ prompt = readline
+ input = patch.read
+ else:
+ stdin = False
+ prompt = None
+ input = None
+
+ args = cmdbuilder(b('import'), strip=strip, force=force,
+ no_commit=nocommit, bypass=bypass, exact=exact,
+ import_branch=importbranch, message=message,
+ date=date, user=user, similarity=similarity, _=stdin,
+ *patches)
+
+ self.rawcommand(args, prompt=prompt, input=input)
+
+ def incoming(self, revrange=None, path=None, force=False, newest=False,
+ bundle=None, bookmarks=False, branch=None, limit=None,
+ nomerges=False, subrepos=False):
+ """Return new changesets found in the specified path or the default pull
+ location.
+
+ When bookmarks=True, return a list of (name, node) of incoming
+ bookmarks.
+
+ revrange - a remote changeset or list of changesets intended to be added
+ force - run even if remote repository is unrelated
+ newest - show newest record first
+ bundle - avoid downloading the changesets twice and store the
+ bundles into the specified file.
+
+ bookmarks - compare bookmarks (this changes the return value)
+ branch - a specific branch you would like to pull
+ limit - limit number of changes returned
+ nomerges - do not show merges
+ ssh - specify ssh command to use
+ remotecmd - specify hg command to run on the remote side
+ insecure- do not verify server certificate (ignoring web.cacerts config)
+ subrepos - recurse into subrepositories
+
+ """
+ args = cmdbuilder(b('incoming'), path,
+ template=templates.changeset, r=revrange,
+ f=force, n=newest, bundle=bundle,
+ B=bookmarks, b=branch, l=limit, M=nomerges,
+ S=subrepos)
+
+ def eh(ret, out, err):
+ if ret != 1:
+ raise error.CommandError(args, ret, out, err)
+
+ out = self.rawcommand(args, eh=eh)
+ if not out:
+ return []
+
+ out = util.eatlines(out, 2)
+ if bookmarks:
+ bms = []
+ for line in out.splitlines():
+ bms.append(tuple(line.split()))
+ return bms
+ else:
+ out = out.split(b('\0'))[:-1]
+ return self._parserevs(out)
+
+ def log(self, revrange=None, files=[], follow=False,
+ followfirst=False, date=None, copies=False, keyword=None,
+ removed=False, onlymerges=False, user=None, branch=None,
+ prune=None, hidden=None, limit=None, nomerges=False,
+ include=None, exclude=None):
+ """Return the revision history of the specified files or the entire
+ project.
+
+ File history is shown without following rename or copy history of files.
+ Use follow with a filename to follow history across renames and copies.
+ follow without a filename will only show ancestors or descendants of the
+ starting revision. followfirst only follows the first parent of merge
+ revisions.
+
+ If revrange isn't specified, the default is "tip:0" unless
+ follow is set, in which case the working directory parent is
+ used as the starting revision.
+
+ The returned changeset is a named tuple with the following
+ string fields:
+
+ - rev
+ - node
+ - tags (space delimited)
+ - branch
+ - author
+ - desc
+
+ follow - follow changeset history, or file history across
+ copies and renames
+ followfirst - only follow the first parent of merge changesets
+ date - show revisions matching date spec
+ copies - show copied files
+ keyword - do case-insensitive search for a given text
+ removed - include revisions where files were removed
+ onlymerges - show only merges
+ user - revisions committed by user
+ branch - show changesets within the given named branch
+ prune - do not display revision or any of its ancestors
+ hidden - show hidden changesets
+ limit - limit number of changes displayed
+ nomerges - do not show merges
+ include - include names matching the given patterns
+ exclude - exclude names matching the given patterns
+
+ """
+ if hidden is None:
+ hidden = self.hidden
+ args = cmdbuilder(b('log'), template=templates.changeset,
+ r=revrange, f=follow, follow_first=followfirst,
+ d=date, C=copies, k=keyword, removed=removed,
+ m=onlymerges, u=user, b=branch, P=prune,
+ l=limit, M=nomerges, I=include, X=exclude,
+ hidden=hidden, *files)
+
+ out = self.rawcommand(args)
+ out = out.split(b('\0'))[:-1]
+
+ return self._parserevs(out)
+
+ def manifest(self, rev=None, all=False):
+ """Yields (nodeid, permission, executable, symlink, file path) tuples
+ for version controlled files for the given revision. If no
+ revision is given, the first parent of the working directory
+ is used, or the null revision if no revision is checked out.
+
+ When all is True, all files from all revisions are yielded
+ (just the name). This includes deleted and renamed files.
+
+ """
+ args = cmdbuilder(b('manifest'), r=rev, all=all, debug=True,
+ hidden=self.hidden)
+
+ out = self.rawcommand(args)
+
+ if all:
+ for line in out.splitlines():
+ yield line
+ else:
+ for line in out.splitlines():
+ node = line[0:40]
+ perm = line[41:44]
+ symlink = line[45:46] == b('@')
+ executable = line[45:46] == b('*')
+ yield node, perm, executable, symlink, line[47:]
+
+ def merge(self, rev=None, force=False, tool=None, cb=merge.handlers.abort):
+ """Merge working directory with rev. If no revision is specified, the
+ working directory's parent is a head revision, and the current
+ branch contains exactly one other head, the other head is
+ merged with by default.
+
+ The current working directory is updated with all changes made in the
+ requested revision since the last common predecessor revision.
+
+ Files that changed between either parent are marked as changed for the
+ next commit and a commit must be performed before any further updates to
+ the repository are allowed. The next commit will have two parents.
+
+ force - force a merge with outstanding changes
+ tool - can be used to specify the merge tool used for file merges. It
+ overrides the HGMERGE environment variable and your configuration files.
+
+ cb - controls the behaviour when Mercurial prompts what to do
+ with regard to a specific file, e.g. when one parent modified
+ a file and the other removed it. It can be one of
+ merge.handlers, or a function that gets a single argument
+ which are the contents of stdout. It should return one of the
+ expected choices (a single character).
+
+ """
+ # we can't really use --preview since merge doesn't support --template
+ args = cmdbuilder(b('merge'), r=rev, f=force, t=tool)
+
+ prompt = None
+ if cb is merge.handlers.abort:
+ prompt = cb
+ elif cb is merge.handlers.noninteractive:
+ args.append(b('-y'))
+ else:
+ prompt = lambda size, output: cb(output) + b('\n')
+
+ self.rawcommand(args, prompt=prompt)
+
+ def move(self, source, dest, after=False, force=False, dryrun=False,
+ include=None, exclude=None):
+ """Mark dest as copies of source; mark source for deletion. If dest
+ is a directory, copies are put in that directory. If dest is a
+ file, then source must be a string.
+
+ Returns True on success, False if errors are encountered.
+
+ source - a file or a list of files
+ dest - a destination file or directory
+ after - record a rename that has already occurred
+ force - forcibly copy over an existing managed file
+ dryrun - do not perform actions, just print output
+ include - include names matching the given patterns
+ exclude - exclude names matching the given patterns
+
+ """
+ if not isinstance(source, list):
+ source = [source]
+
+ source.append(dest)
+ args = cmdbuilder(b('move'), A=after, f=force, n=dryrun,
+ I=include, X=exclude, *source)
+
+ eh = util.reterrorhandler(args)
+ self.rawcommand(args, eh=eh)
+
+ return bool(eh)
+
+ def outgoing(self, revrange=None, path=None, force=False, newest=False,
+ bookmarks=False, branch=None, limit=None, nomerges=False,
+ subrepos=False):
+ """Return changesets not found in the specified path or the default push
+ location.
+
+ When bookmarks=True, return a list of (name, node) of
+ bookmarks that will be pushed.
+
+ revrange - a (list of) changeset intended to be included in
+ the destination force - run even when the destination is
+ unrelated newest - show newest record first branch - a
+ specific branch you would like to push limit - limit number of
+ changes displayed nomerges - do not show merges ssh - specify
+ ssh command to use remotecmd - specify hg command to run on
+ the remote side insecure - do not verify server certificate
+ (ignoring web.cacerts config) subrepos - recurse into
+ subrepositories
+
+ """
+ args = cmdbuilder(b('outgoing'),
+ path,
+ template=templates.changeset, r=revrange,
+ f=force, n=newest, B=bookmarks,
+ b=branch, S=subrepos)
+
+ def eh(ret, out, err):
+ if ret != 1:
+ raise error.CommandError(args, ret, out, err)
+
+ out = self.rawcommand(args, eh=eh)
+ if not out:
+ return []
+
+ out = util.eatlines(out, 2)
+ if bookmarks:
+ bms = []
+ for line in out.splitlines():
+ bms.append(tuple(line.split()))
+ return bms
+ else:
+ out = out.split(b('\0'))[:-1]
+ return self._parserevs(out)
+
+ def parents(self, rev=None, file=None):
+ """Return the working directory's parent revisions. If rev is given,
+ the parent of that revision will be printed. If file is given,
+ the revision in which the file was last changed (before the
+ working directory revision or the revision specified by rev)
+ is returned.
+
+ """
+ args = cmdbuilder(b('parents'), file, template=templates.changeset,
+ r=rev, hidden=self.hidden)
+
+ out = self.rawcommand(args)
+ if not out:
+ return
+
+ out = out.split(b('\0'))[:-1]
+
+ return self._parserevs(out)
+
+ def paths(self, name=None):
+ """
+ Return the definition of given symbolic path name. If no name is given,
+ return a dictionary of pathname : url of all available names.
+
+ Path names are defined in the [paths] section of your configuration file
+ and in "/etc/mercurial/hgrc". If run inside a repository, ".hg/hgrc" is
+ used, too.
+ """
+ if not name:
+ out = self.rawcommand([b('paths')])
+ if not out:
+ return {}
+
+ return dict([s.split(b(' = '))
+ for s in out.rstrip().split(b('\n'))])
+ else:
+ args = cmdbuilder(b('paths'), name)
+ out = self.rawcommand(args)
+ return out.rstrip()
+
+ def pull(self, source=None, rev=None, update=False, force=False,
+ bookmark=None, branch=None, ssh=None, remotecmd=None,
+ insecure=False, tool=None):
+ """Pull changes from a remote repository.
+
+ This finds all changes from the repository specified by source
+ and adds them to this repository. If source is omitted, the
+ 'default' path will be used. By default, this does not update
+ the copy of the project in the working directory.
+
+ Returns True on success, False if update was given and there were
+ unresolved files.
+
+ update - update to new branch head if changesets were pulled
+ force - run even when remote repository is unrelated
+ rev - a (list of) remote changeset intended to be added
+ bookmark - (list of) bookmark to pull
+ branch - a (list of) specific branch you would like to pull
+ ssh - specify ssh command to use
+ remotecmd - specify hg command to run on the remote side
+ insecure - do not verify server certificate (ignoring
+ web.cacerts config)
+ tool - specify merge tool for rebase
+
+ """
+ args = cmdbuilder(b('pull'), source, r=rev, u=update, f=force,
+ B=bookmark, b=branch, e=ssh,
+ remotecmd=remotecmd, insecure=insecure,
+ t=tool)
+
+ eh = util.reterrorhandler(args)
+ self.rawcommand(args, eh=eh)
+
+ return bool(eh)
+
+ def push(self, dest=None, rev=None, force=False, bookmark=None, branch=None,
+ newbranch=False, ssh=None, remotecmd=None, insecure=False):
+ """Push changesets from this repository to the specified destination.
+
+ This operation is symmetrical to pull: it is identical to a pull in the
+ destination repository from the current one.
+
+ Returns True if push was successful, False if nothing to push.
+
+ rev - the (list of) specified revision and all its ancestors
+ will be pushed to the remote repository.
+
+ force - override the default behavior and push all changesets on all
+ branches.
+
+ bookmark - (list of) bookmark to push
+ branch - a (list of) specific branch you would like to push
+ newbranch - allows push to create a new named branch that is
+ not present at the destination. This allows you to only create
+ a new branch without forcing other changes.
+
+ ssh - specify ssh command to use
+ remotecmd - specify hg command to run on the remote side
+ insecure - do not verify server certificate (ignoring
+ web.cacerts config)
+
+ """
+ args = cmdbuilder(b('push'), dest, r=rev, f=force, B=bookmark, b=branch,
+ new_branch=newbranch, e=ssh, remotecmd=remotecmd,
+ insecure=insecure)
+
+ eh = util.reterrorhandler(args)
+ self.rawcommand(args, eh=eh)
+
+ return bool(eh)
+
+ def remove(self, files, after=False, force=False, include=None,
+ exclude=None):
+ """Schedule the indicated files for removal from the repository. This
+ only removes files from the current branch, not from the
+ entire project history.
+
+ Returns True on success, False if any warnings encountered.
+
+ after - used to remove only files that have already been deleted
+ force - remove (and delete) file even if added or modified
+ include - include names matching the given patterns
+ exclude - exclude names matching the given patterns
+
+ """
+ if not isinstance(files, list):
+ files = [files]
+
+ args = cmdbuilder(b('remove'), A=after, f=force, I=include, X=exclude,
+ *files)
+
+ eh = util.reterrorhandler(args)
+ self.rawcommand(args, eh=eh)
+
+ return bool(eh)
+
+ def resolve(self, file=[], all=False, listfiles=False, mark=False,
+ unmark=False, tool=None, include=None, exclude=None):
+ """
+ Redo merges or set/view the merge status of given files.
+
+ Returns True on success, False if any files fail a resolve attempt.
+
+ When listfiles is True, returns a list of (code, file path) of resolved
+ and unresolved files. Code will be 'R' or 'U' accordingly.
+
+ all - select all unresolved files
+ mark - mark files as resolved
+ unmark - mark files as unresolved
+ tool - specify merge tool
+ include - include names matching the given patterns
+ exclude - exclude names matching the given patterns
+ """
+ if not isinstance(file, list):
+ file = [file]
+
+ args = cmdbuilder(b('resolve'), a=all, l=listfiles, m=mark, u=unmark,
+ t=tool, I=include, X=exclude, *file)
+
+ out = self.rawcommand(args)
+
+ if listfiles:
+ l = []
+ for line in out.splitlines():
+ l.append(tuple(line.split(b(' '), 1)))
+ return l
+
+ def revert(self, files, rev=None, all=False, date=None, nobackup=False,
+ dryrun=False, include=None, exclude=None):
+ """With no revision specified, revert the specified files or
+ directories to the contents they had in the parent of the
+ working directory. This restores the contents of files to an
+ unmodified state and unschedules adds, removes, copies, and
+ renames. If the working directory has two parents, you must
+ explicitly specify a revision.
+
+ Specifying rev or date will revert the given files or
+ directories to their states as of a specific revision. Because
+ revert does not change the working directory parents, this
+ will cause these files to appear modified. This can be helpful
+ to "back out" some or all of an earlier change.
+
+ Modified files are saved with a .orig suffix before reverting.
+ To disable these backups, use nobackup.
+
+ Returns True on success.
+
+ all - revert all changes when no arguments given
+ date - tipmost revision matching date
+ rev - revert to the specified revision
+ nobackup - do not save backup copies of files
+ include - include names matching the given patterns
+ exclude - exclude names matching the given patterns
+ dryrun - do not perform actions, just print output
+
+ """
+ if not isinstance(files, list):
+ files = [files]
+
+ args = cmdbuilder(b('revert'), r=rev, a=all, d=date,
+ no_backup=nobackup, n=dryrun, I=include, X=exclude,
+ hidden=self.hidden, *files)
+
+ eh = util.reterrorhandler(args)
+ self.rawcommand(args, eh=eh)
+
+ return bool(eh)
+
+ def root(self):
+ """
+ Return the root directory of the current repository.
+ """
+ return self.rawcommand([b('root')]).rstrip()
+
+ def status(self, rev=None, change=None, all=False, modified=False,
+ added=False, removed=False, deleted=False, clean=False,
+ unknown=False, ignored=False, copies=False,
+ subrepos=False, include=None, exclude=None):
+ """
+ Return status of files in the repository as a list of (code, file path)
+ where code can be:
+
+ M = modified
+ A = added
+ R = removed
+ C = clean
+ ! = missing (deleted by non-hg command, but still tracked)
+ ? = untracked
+ I = ignored
+ = origin of the previous file listed as A (added)
+
+ rev - show difference from (list of) revision
+ change - list the changed files of a revision
+ all - show status of all files
+ modified - show only modified files
+ added - show only added files
+ removed - show only removed files
+ deleted - show only deleted (but tracked) files
+ clean - show only files without changes
+ unknown - show only unknown (not tracked) files
+ ignored - show only ignored files
+ copies - show source of copied files
+ subrepos - recurse into subrepositories
+ include - include names matching the given patterns
+ exclude - exclude names matching the given patterns
+ """
+ if rev and change:
+ raise ValueError('cannot specify both rev and change')
+
+ args = cmdbuilder(b('status'), rev=rev, change=change, A=all,
+ m=modified, a=added, r=removed, d=deleted, c=clean,
+ u=unknown, i=ignored, C=copies, S=subrepos, I=include,
+ X=exclude, hidden=self.hidden)
+
+ args.append(b('-0'))
+
+ out = self.rawcommand(args)
+ l = []
+
+ for entry in out.split(b('\0')):
+ if entry:
+ if entry[0:1] == b(' '):
+ l.append((b(' '), entry[2:]))
+ else:
+ l.append(tuple(entry.split(b(' '), 1)))
+
+ return l
+
+ def tag(self, names, rev=None, message=None, force=False, local=False,
+ remove=False, date=None, user=None):
+ """Add one or more tags specified by names for the current or given
+ revision.
+
+ Changing an existing tag is normally disallowed; use force to override.
+
+ Tag commits are usually made at the head of a branch. If the
+ parent of the working directory is not a branch head, a
+ CommandError will be raised. force can be specified to force
+ the tag commit to be based on a non-head changeset.
+
+ local - make the tag local
+ rev - revision to tag
+ remove - remove a tag
+ message - set commit message
+ date - record the specified date as commit date
+ user - record the specified user as committer
+
+ """
+ if not isinstance(names, list):
+ names = [names]
+
+ args = cmdbuilder(b('tag'), r=rev, m=message, f=force, l=local,
+ remove=remove, d=date, u=user, hidden=self.hidden,
+ *names)
+
+ self.rawcommand(args)
+
+ def tags(self):
+ """
+ Return a list of repository tags as: (name, rev, node, islocal)
+ """
+ args = cmdbuilder(b('tags'), v=True)
+
+ out = self.rawcommand(args)
+
+ t = []
+ for line in out.splitlines():
+ taglocal = line.endswith(b(' local'))
+ if taglocal:
+ line = line[:-6]
+ name, rev = line.rsplit(b(' '), 1)
+ rev, node = rev.split(b(':'))
+ t.append((name.rstrip(), int(rev), node, taglocal))
+ return t
+
+ def phase(self, revs=(), secret=False, draft=False, public=False,
+ force=False):
+ '''Set or show the current phase name.
+
+ revs - target revision(s)
+ public - set changeset phase to public
+ draft - set changeset phase to draft
+ secret - set changeset phase to secret
+ force - allow to move boundary backward
+
+ output format: [(id, phase) ...] for each changeset
+
+ The arguments match the mercurial API.
+ '''
+ if not isinstance(revs, (list, tuple)):
+ revs = [revs]
+ args = util.cmdbuilder(b('phase'), secret=secret, draft=draft,
+ public=public, force=force,
+ hidden=self.hidden, *revs)
+ out = self.rawcommand(args)
+ if draft or public or secret:
+ return
+ else:
+ output = [i.split(b(': '))for i in out.strip().split(b('\n'))]
+ return [(int(num), phase) for (num, phase) in output]
+
+ def summary(self, remote=False):
+ """
+ Return a dictionary with a brief summary of the working directory state,
+ including parents, branch, commit status, and available updates.
+
+ 'parent' : a list of (rev, node, tags, message)
+ 'branch' : the current branch
+ 'commit' : True if the working directory is clean, False otherwise
+ 'update' : number of available updates,
+ ['remote' : (in, in bookmarks, out, out bookmarks),]
+ ['mq': (applied, unapplied) mq patches,]
+
+ unparsed entries will be of them form key : value
+ """
+ args = cmdbuilder(b('summary'), remote=remote, hidden=self.hidden)
+
+ out = self.rawcommand(args).splitlines()
+
+ d = {}
+ while out:
+ line = out.pop(0)
+ name, value = line.split(b(': '), 1)
+
+ if name == b('parent'):
+ parent, tags = value.split(b(' '), 1)
+ rev, node = parent.split(b(':'))
+
+ if tags:
+ tags = tags.replace(b(' (empty repository)'), b(''))
+ else:
+ tags = None
+
+ value = d.get(name, [])
+
+ if rev == b('-1'):
+ value.append((int(rev), node, tags, None))
+ else:
+ message = out.pop(0)[1:]
+ value.append((int(rev), node, tags, message))
+ elif name == b('branch'):
+ pass
+ elif name == b('commit'):
+ value = value == b('(clean)')
+ elif name == b('update'):
+ if value == b('(current)'):
+ value = 0
+ else:
+ value = int(value.split(b(' '), 1)[0])
+ elif remote and name == b('remote'):
+ if value == b('(synced)'):
+ value = 0, 0, 0, 0
+ else:
+ inc = incb = out_ = outb = 0
+
+ for v in value.split(b(', ')):
+ count, v = v.split(b(' '), 1)
+ if v == b('outgoing'):
+ out_ = int(count)
+ elif v.endswith(b('incoming')):
+ inc = int(count)
+ elif v == b('incoming bookmarks'):
+ incb = int(count)
+ elif v == b('outgoing bookmarks'):
+ outb = int(count)
+
+ value = inc, incb, out_, outb
+ elif name == b('mq'):
+ applied = unapplied = 0
+ for v in value.split(b(', ')):
+ count, v = v.split(b(' '), 1)
+ if v == b('applied'):
+ applied = int(count)
+ elif v == b('unapplied'):
+ unapplied = int(count)
+ value = applied, unapplied
+
+ d[name] = value
+
+ return d
+
+ def tip(self):
+ """
+ Return the tip revision (usually just called the tip) which is the
+ changeset most recently added to the repository (and therefore the most
+ recently changed head).
+ """
+ args = cmdbuilder(b('tip'), template=templates.changeset,
+ hidden=self.hidden)
+ out = self.rawcommand(args)
+ out = out.split(b('\0'))
+
+ return self._parserevs(out)[0]
+
+ def update(self, rev=None, clean=False, check=False, date=None):
+ """
+ Update the repository's working directory to changeset specified by rev.
+ If rev isn't specified, update to the tip of the current named branch.
+
+ Return the number of files (updated, merged, removed, unresolved)
+
+ clean - discard uncommitted changes (no backup)
+ check - update across branches if no uncommitted changes
+ date - tipmost revision matching date
+ """
+ if clean and check:
+ raise ValueError('clean and check cannot both be True')
+
+ args = cmdbuilder(b('update'), r=rev, C=clean, c=check, d=date,
+ hidden=self.hidden)
+
+ def eh(ret, out, err):
+ if ret == 1:
+ return out
+
+ raise error.CommandError(args, ret, out, err)
+
+
+ out = self.rawcommand(args, eh=eh)
+
+ m = re.search(b(r'^(\d+).+, (\d+).+, (\d+).+, (\d+)'), out,
+ re.MULTILINE)
+ return tuple(map(int, list(m.groups())))
+
+ @property
+ def version(self):
+ """Return hg version that runs the command server as a 4 fielded
+ tuple: major, minor, micro and local build info. e.g. (1, 9,
+ 1, '+4-3095db9f5c2c')
+ """
+ if self._version is None:
+ v = self.rawcommand(cmdbuilder(b('version'), q=True))
+ v = list(re.match(b(r'.*?(\d+)\.(\d+)\.?(\d+)?(\+[0-9a-f-]+)?'),
+ v).groups())
+
+ for i in range(3):
+ try:
+ v[i] = int(v[i])
+ except TypeError:
+ v[i] = 0
+
+ self._version = tuple(v)
+
+ return self._version
+
+ def __getitem__(self, changeid):
+ try:
+ return context.changectx(self, changeid)
+ except ValueError as e:
+ raise KeyError(*e.args)
+
+ def __contains__(self, changeid):
+ """
+ check if changeid, which can be either a local revision number or a
+ changeset id, matches a changeset in the client.
+ """
+ try:
+ context.changectx(self, changeid)
+ return True
+ except ValueError:
+ return False
diff --git a/third_party/python/python-hglib/hglib/context.py b/third_party/python/python-hglib/hglib/context.py
new file mode 100644
index 0000000000..3ba9abb890
--- /dev/null
+++ b/third_party/python/python-hglib/hglib/context.py
@@ -0,0 +1,238 @@
+import hglib.client # Circular dependency.
+from hglib import util, templates
+from hglib.error import CommandError
+from hglib.util import b, strtobytes, integertypes
+
+_nullcset = [b('-1'), b('0000000000000000000000000000000000000000'), b(''),
+ b(''), b(''), b(''), b('')]
+
+class changectx(object):
+ """A changecontext object makes access to data related to a particular
+ changeset convenient."""
+ def __init__(self, repo, changeid=b('')):
+ """changeid is a revision number, node, or tag"""
+ if changeid == b(''):
+ changeid = b('.')
+ self._repo = repo
+ if isinstance(changeid, hglib.client.revision):
+ cset = changeid
+ elif changeid == -1:
+ cset = _nullcset
+ else:
+ if isinstance(changeid, integertypes):
+ changeid = b('rev(') + strtobytes(changeid) + b(')')
+
+ notfound = False
+ try:
+ cset = self._repo.log(changeid)
+ # hg bbf4f3dfd700 gave a null result for tip+1
+ if (cset and cset[0][1] == _nullcset[1]
+ and cset[0][0] != _nullcset[0]):
+ notfound = True
+ except CommandError:
+ notfound = True
+
+ if notfound or not len(cset):
+ raise ValueError('changeid %r not found in repo' % changeid)
+ if len(cset) > 1:
+ raise ValueError('changeid must yield a single changeset')
+ cset = cset[0]
+
+ self._rev, self._node, self._tags = cset[:3]
+ self._branch, self._author, self._description, self._date = cset[3:]
+
+ self._rev = int(self._rev)
+
+ self._tags = self._tags.split()
+ try:
+ self._tags.remove(b('tip'))
+ except ValueError:
+ pass
+
+ self._ignored = None
+ self._clean = None
+
+ def __str__(self):
+ return self._node[:12].decode('latin-1')
+
+ def __int__(self):
+ return self._rev
+
+ def __repr__(self):
+ return "<changectx %s>" % str(self)
+
+ def __hash__(self):
+ try:
+ return hash(self._rev)
+ except AttributeError:
+ return id(self)
+
+ def __eq__(self, other):
+ try:
+ return self._rev == other._rev
+ except AttributeError:
+ return False
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __nonzero__(self):
+ return self._rev != -1
+
+ def __bool__(self):
+ return self.__nonzero__()
+
+ def __contains__(self, key):
+ return key in self._manifest
+
+ def __iter__(self):
+ for f in sorted(self._manifest):
+ yield f
+
+ @util.propertycache
+ def _status(self):
+ return self._parsestatus(self._repo.status(change=strtobytes(self)))[:4]
+
+ def _parsestatus(self, stat):
+ d = dict((c, [])
+ for c in (b('M'), b('A'), b('R'), b('!'), b('?'), b('I'),
+ b('C'), b(' ')))
+ for k, path in stat:
+ d[k].append(path)
+ return (d[b('M')], d[b('A')], d[b('R')], d[b('!')], d[b('?')],
+ d[b('I')], d[b('C')])
+
+ def status(self, ignored=False, clean=False):
+ """Explicit status query
+ Unless this method is used to query the working copy status, the
+ _status property will implicitly read the status using its default
+ arguments."""
+ stat = self._parsestatus(self._repo.status(change=strtobytes(self),
+ ignored=ignored,
+ clean=clean))
+ self._unknown = self._ignored = self._clean = None
+ if ignored:
+ self._ignored = stat[5]
+ if clean:
+ self._clean = stat[6]
+ self._status = stat[:4]
+ return stat
+
+ def rev(self):
+ return self._rev
+
+ def node(self):
+ return self._node
+
+ def tags(self):
+ return self._tags
+
+ def branch(self):
+ return self._branch
+
+ def author(self):
+ return self._author
+
+ def user(self):
+ return self._author
+
+ def date(self):
+ return self._date
+
+ def description(self):
+ return self._description
+
+ def files(self):
+ return sorted(self._status[0] + self._status[1] + self._status[2])
+
+ def modified(self):
+ return self._status[0]
+
+ def added(self):
+ return self._status[1]
+
+ def removed(self):
+ return self._status[2]
+
+ def ignored(self):
+ if self._ignored is None:
+ self.status(ignored=True)
+ return self._ignored
+
+ def clean(self):
+ if self._clean is None:
+ self.status(clean=True)
+ return self._clean
+
+ @util.propertycache
+ def _manifest(self):
+ d = {}
+ for node, p, e, s, path in self._repo.manifest(rev=strtobytes(self)):
+ d[path] = node
+ return d
+
+ def manifest(self):
+ return self._manifest
+
+ def hex(self):
+ return hex(self._node)
+
+ @util.propertycache
+ def _parents(self):
+ """return contexts for each parent changeset"""
+ par = self._repo.parents(rev=strtobytes(self))
+ if not par:
+ return [changectx(self._repo, -1)]
+ return [changectx(self._repo, int(cset.rev)) for cset in par]
+
+ def parents(self):
+ return self._parents
+
+ def p1(self):
+ return self._parents[0]
+
+ def p2(self):
+ if len(self._parents) == 2:
+ return self._parents[1]
+ return changectx(self._repo, -1)
+
+ @util.propertycache
+ def _bookmarks(self):
+ books = [bm for bm in self._repo.bookmarks()[0] if bm[1] == self._rev]
+
+ bms = []
+ for name, r, n in books:
+ bms.append(name)
+ return bms
+
+ def bookmarks(self):
+ return self._bookmarks
+
+ def hidden(self):
+ """return True if the changeset is hidden, else False"""
+ return bool(self._repo.log(revrange=self._node + b(' and hidden()'),
+ hidden=True))
+
+ def phase(self):
+ """return the phase of the changeset (public, draft or secret)"""
+ return self._repo.phase(strtobytes(self._rev))[0][1]
+
+ def children(self):
+ """return contexts for each child changeset"""
+ for c in self._repo.log(b('children(') + self._node + b(')')):
+ yield changectx(self._repo, c)
+
+ def ancestors(self):
+ for a in self._repo.log(b('ancestors(') + self._node + b(')')):
+ yield changectx(self._repo, a)
+
+ def descendants(self):
+ for d in self._repo.log(b('descendants(') + self._node + b(')')):
+ yield changectx(self._repo, d)
+
+ def ancestor(self, c2):
+ """
+ return the ancestor context of self and c2
+ """
+ return changectx(self._repo,
+ b('ancestor(') + self + b(', ') + c2 + b(')'))
diff --git a/third_party/python/python-hglib/hglib/error.py b/third_party/python/python-hglib/hglib/error.py
new file mode 100644
index 0000000000..e0652dc74d
--- /dev/null
+++ b/third_party/python/python-hglib/hglib/error.py
@@ -0,0 +1,18 @@
+class CommandError(Exception):
+ def __init__(self, args, ret, out, err):
+ self.args = args
+ self.ret = ret
+ self.out = out
+ self.err = err
+
+ def __str__(self):
+ return str((self.ret, self.out.rstrip(), self.err.rstrip()))
+
+class ServerError(Exception):
+ pass
+
+class ResponseError(ServerError, ValueError):
+ pass
+
+class CapabilityError(ServerError):
+ pass
diff --git a/third_party/python/python-hglib/hglib/merge.py b/third_party/python/python-hglib/hglib/merge.py
new file mode 100644
index 0000000000..88bc99d993
--- /dev/null
+++ b/third_party/python/python-hglib/hglib/merge.py
@@ -0,0 +1,21 @@
+from hglib.util import b
+
+class handlers(object):
+ """
+ These can be used as the cb argument to hgclient.merge() to control the
+ behaviour when Mercurial prompts what to do with regard to a specific file,
+ e.g. when one parent modified a file and the other removed it.
+ """
+
+ @staticmethod
+ def abort(size, output):
+ """
+ Abort the merge if a prompt appears.
+ """
+ return b('')
+
+ """
+ This corresponds to Mercurial's -y/--noninteractive global option, which
+ picks the first choice on all prompts.
+ """
+ noninteractive = 'yes'
diff --git a/third_party/python/python-hglib/hglib/templates.py b/third_party/python/python-hglib/hglib/templates.py
new file mode 100644
index 0000000000..f91ee466a7
--- /dev/null
+++ b/third_party/python/python-hglib/hglib/templates.py
@@ -0,0 +1,4 @@
+from hglib.util import b
+
+changeset = b('{rev}\\0{node}\\0{tags}\\0{branch}\\0{author}'
+ '\\0{desc}\\0{date}\\0')
diff --git a/third_party/python/python-hglib/hglib/util.py b/third_party/python/python-hglib/hglib/util.py
new file mode 100644
index 0000000000..b4bfe731f9
--- /dev/null
+++ b/third_party/python/python-hglib/hglib/util.py
@@ -0,0 +1,217 @@
+import os, subprocess, sys
+from hglib import error
+try:
+ from io import BytesIO
+except ImportError:
+ from cStringIO import StringIO as BytesIO
+
+if sys.version_info[0] > 2:
+ izip = zip
+ integertypes = (int,)
+
+ def b(s):
+ """Encode the string as bytes."""
+ return s.encode('latin-1')
+else:
+ from itertools import izip
+ integertypes = (long, int)
+ bytes = str # Defined in Python 2.6/2.7, but to the same value.
+
+ def b(s):
+ """Encode the string as bytes."""
+ return s
+
+def strtobytes(s):
+ """Return the bytes of the string representation of an object."""
+ return str(s).encode('latin-1')
+
+def grouper(n, iterable):
+ ''' list(grouper(2, range(4))) -> [(0, 1), (2, 3)] '''
+ args = [iter(iterable)] * n
+ return izip(*args)
+
+def eatlines(s, n):
+ """
+ >>> eatlines(b("1\\n2"), 1) == b('2')
+ True
+ >>> eatlines(b("1\\n2"), 2) == b('')
+ True
+ >>> eatlines(b("1\\n2"), 3) == b('')
+ True
+ >>> eatlines(b("1\\n2\\n3"), 1) == b('2\\n3')
+ True
+ """
+ cs = BytesIO(s)
+
+ for line in cs:
+ n -= 1
+ if n == 0:
+ return cs.read()
+ return b('')
+
+def skiplines(s, prefix):
+ """
+ Skip lines starting with prefix in s
+
+ >>> skiplines(b('a\\nb\\na\\n'), b('a')) == b('b\\na\\n')
+ True
+ >>> skiplines(b('a\\na\\n'), b('a')) == b('')
+ True
+ >>> skiplines(b(''), b('a')) == b('')
+ True
+ >>> skiplines(b('a\\nb'), b('b')) == b('a\\nb')
+ True
+ """
+ cs = BytesIO(s)
+
+ for line in cs:
+ if not line.startswith(prefix):
+ return line + cs.read()
+
+ return b('')
+
+def _cmdval(val):
+ if isinstance(val, bytes):
+ return val
+ else:
+ return strtobytes(val)
+
+def cmdbuilder(name, *args, **kwargs):
+ """
+ A helper for building the command arguments
+
+ args are the positional arguments
+
+ kwargs are the options
+ keys that are single lettered are prepended with '-', others with '--',
+ underscores are replaced with dashes
+
+ keys with False boolean values are ignored, lists add the key multiple times
+
+ None arguments are skipped
+
+ >>> cmdbuilder(b('cmd'), a=True, b=False, c=None) == [b('cmd'), b('-a')]
+ True
+ >>> cmdbuilder(b('cmd'), long=True) == [b('cmd'), b('--long')]
+ True
+ >>> cmdbuilder(b('cmd'), str=b('s')) == [b('cmd'), b('--str'), b('s')]
+ True
+ >>> cmdbuilder(b('cmd'), d_ash=True) == [b('cmd'), b('--d-ash')]
+ True
+ >>> cmdbuilder(b('cmd'), _=True) == [b('cmd'), b('-')]
+ True
+ >>> expect = [b('cmd'), b('--list'), b('1'), b('--list'), b('2')]
+ >>> cmdbuilder(b('cmd'), list=[1, 2]) == expect
+ True
+ >>> cmdbuilder(b('cmd'), None) == [b('cmd')]
+ True
+ """
+ cmd = [name]
+ for arg, val in kwargs.items():
+ if val is None:
+ continue
+
+ arg = arg.encode('latin-1').replace(b('_'), b('-'))
+ if arg != b('-'):
+ if len(arg) == 1:
+ arg = b('-') + arg
+ else:
+ arg = b('--') + arg
+ if isinstance(val, bool):
+ if val:
+ cmd.append(arg)
+ elif isinstance(val, list):
+ for v in val:
+ cmd.append(arg)
+ cmd.append(_cmdval(v))
+ else:
+ cmd.append(arg)
+ cmd.append(_cmdval(val))
+
+ for a in args:
+ if a is not None:
+ cmd.append(a)
+
+ return cmd
+
+class reterrorhandler(object):
+ """This class is meant to be used with rawcommand() error handler
+ argument. It remembers the return value the command returned if
+ it's one of allowed values, which is only 1 if none are given.
+ Otherwise it raises a CommandError.
+
+ >>> e = reterrorhandler('')
+ >>> bool(e)
+ True
+ >>> e(1, 'a', '')
+ 'a'
+ >>> bool(e)
+ False
+
+ """
+ def __init__(self, args, allowed=None):
+ self.args = args
+ self.ret = 0
+ if allowed is None:
+ self.allowed = [1]
+ else:
+ self.allowed = allowed
+
+ def __call__(self, ret, out, err):
+ self.ret = ret
+ if ret not in self.allowed:
+ raise error.CommandError(self.args, ret, out, err)
+ return out
+
+ def __nonzero__(self):
+ """ Returns True if the return code was 0, False otherwise """
+ return self.ret == 0
+
+ def __bool__(self):
+ return self.__nonzero__()
+
+class propertycache(object):
+ """
+ Decorator that remembers the return value of a function call.
+
+ >>> execcount = 0
+ >>> class obj(object):
+ ... def func(self):
+ ... global execcount
+ ... execcount += 1
+ ... return []
+ ... func = propertycache(func)
+ >>> o = obj()
+ >>> o.func
+ []
+ >>> execcount
+ 1
+ >>> o.func
+ []
+ >>> execcount
+ 1
+ """
+ def __init__(self, func):
+ self.func = func
+ self.name = func.__name__
+ def __get__(self, obj, type=None):
+ result = self.func(obj)
+ setattr(obj, self.name, result)
+ return result
+
+close_fds = os.name == 'posix'
+
+startupinfo = None
+if os.name == 'nt':
+ startupinfo = subprocess.STARTUPINFO()
+ startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
+
+def popen(args, env=None):
+ environ = None
+ if env:
+ environ = dict(os.environ)
+ environ.update(env)
+
+ return subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE, close_fds=close_fds,
+ startupinfo=startupinfo, env=environ)
diff --git a/third_party/python/python-hglib/setup.py b/third_party/python/python-hglib/setup.py
new file mode 100644
index 0000000000..f565ae7f98
--- /dev/null
+++ b/third_party/python/python-hglib/setup.py
@@ -0,0 +1,54 @@
+import os, time
+from distutils.core import setup
+
+# query Mercurial for version number, or pull from PKG-INFO
+version = 'unknown'
+if os.path.isdir('.hg'):
+ cmd = "hg id -i -t"
+ l = os.popen(cmd).read().split()
+ while len(l) > 1 and l[-1][0].isalpha(): # remove non-numbered tags
+ l.pop()
+ if len(l) > 1: # tag found
+ version = l[-1]
+ if l[0].endswith('+'): # propagate the dirty status to the tag
+ version += '+'
+ elif len(l) == 1: # no tag found
+ cmd = 'hg parents --template "{latesttag}+{latesttagdistance}-"'
+ version = os.popen(cmd).read() + l[0]
+ if version.endswith('+'):
+ version += time.strftime('%Y%m%d')
+elif os.path.exists('.hg_archival.txt'):
+ kw = dict([[t.strip() for t in l.split(':', 1)]
+ for l in open('.hg_archival.txt')])
+ if 'tag' in kw:
+ version = kw['tag']
+ elif 'latesttag' in kw:
+ version = '%(latesttag)s+%(latesttagdistance)s-%(node).12s' % kw
+ else:
+ version = kw.get('node', '')[:12]
+elif os.path.exists('PKG-INFO'):
+ kw = dict([[t.strip() for t in l.split(':', 1)]
+ for l in open('PKG-INFO') if ':' in l])
+ version = kw.get('Version', version)
+
+setup(
+ name='python-hglib',
+ version=version,
+ author='Idan Kamara',
+ author_email='idankk86@gmail.com',
+ url='http://selenic.com/repo/python-hglib',
+ description='Mercurial Python library',
+ long_description=open(os.path.join(os.path.dirname(__file__),
+ 'README')).read(),
+ classifiers=[
+ 'Programming Language :: Python',
+ 'Programming Language :: Python :: 2.4',
+ 'Programming Language :: Python :: 2.5',
+ 'Programming Language :: Python :: 2.6',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3',
+ 'Programming Language :: Python :: 3.4',
+
+ ],
+ license='MIT',
+ packages=['hglib'])
diff --git a/third_party/python/python-hglib/test.py b/third_party/python/python-hglib/test.py
new file mode 100644
index 0000000000..e0b4021f45
--- /dev/null
+++ b/third_party/python/python-hglib/test.py
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+
+import nose
+from tests import with_hg
+
+if __name__ == '__main__':
+ nose.main(addplugins=[with_hg.WithHgPlugin()])
diff --git a/third_party/python/redo/redo-2.0.3.dist-info/AUTHORS b/third_party/python/redo/redo-2.0.3.dist-info/AUTHORS
new file mode 100644
index 0000000000..b2e24333c0
--- /dev/null
+++ b/third_party/python/redo/redo-2.0.3.dist-info/AUTHORS
@@ -0,0 +1,7 @@
+Rail Aliiev (https://github.com/rail)
+Chris AtLee (https://github.com/catlee)
+Ben Hearsum (https://github.com/bhearsum)
+John Hopkins (https://github.com/jhopkinsmoz)
+Justin Wood (https://github.com/callek)
+Terry Chia (https://github.com/Ayrx)
+Mr. Deathless (https://github.com/mrdeathless)
diff --git a/third_party/python/redo/redo-2.0.3.dist-info/METADATA b/third_party/python/redo/redo-2.0.3.dist-info/METADATA
new file mode 100644
index 0000000000..e43a46e13a
--- /dev/null
+++ b/third_party/python/redo/redo-2.0.3.dist-info/METADATA
@@ -0,0 +1,13 @@
+Metadata-Version: 2.1
+Name: redo
+Version: 2.0.3
+Summary: Utilities to retry Python callables.
+Home-page: https://github.com/bhearsum/redo
+Author: Ben Hearsum
+Author-email: ben@hearsum.ca
+License: UNKNOWN
+Platform: UNKNOWN
+
+UNKNOWN
+
+
diff --git a/third_party/python/redo/redo-2.0.3.dist-info/RECORD b/third_party/python/redo/redo-2.0.3.dist-info/RECORD
new file mode 100644
index 0000000000..d8f82bf7da
--- /dev/null
+++ b/third_party/python/redo/redo-2.0.3.dist-info/RECORD
@@ -0,0 +1,8 @@
+redo/__init__.py,sha256=6VZUeFfbFkBJ_lxY_cJWk0S8mgSkrSRIwVniVm_sKsw,8518
+redo/cmd.py,sha256=F1axa3CVChlIvrSnq4xZZIyZ4M4wnnZjpv8wy46ugS4,2085
+redo-2.0.3.dist-info/AUTHORS,sha256=uIuTIaIlfQwklq75eg8VTjdnzENPlN_WKxa1UxQWTtQ,290
+redo-2.0.3.dist-info/METADATA,sha256=0DOrbjh62qccs3wFTgTxP9kQ1S4cphhUnupdfv0_6ms,233
+redo-2.0.3.dist-info/WHEEL,sha256=_wJFdOYk7i3xxT8ElOkUJvOdOvfNGbR9g-bf6UQT6sU,110
+redo-2.0.3.dist-info/entry_points.txt,sha256=ftcg9P_jTwZ9bYYDKB-s-5eIY6mHGiRHvd_HAGc7UPc,41
+redo-2.0.3.dist-info/top_level.txt,sha256=o1qXhN94bANfd7pz4zervaf8ytHEG_UVfhFZKMmmdvo,5
+redo-2.0.3.dist-info/RECORD,,
diff --git a/third_party/python/redo/redo-2.0.3.dist-info/WHEEL b/third_party/python/redo/redo-2.0.3.dist-info/WHEEL
new file mode 100644
index 0000000000..c4bde30377
--- /dev/null
+++ b/third_party/python/redo/redo-2.0.3.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.32.3)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/redo/redo-2.0.3.dist-info/entry_points.txt b/third_party/python/redo/redo-2.0.3.dist-info/entry_points.txt
new file mode 100644
index 0000000000..44eccdcfca
--- /dev/null
+++ b/third_party/python/redo/redo-2.0.3.dist-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+retry = redo.cmd:main
+
diff --git a/third_party/python/redo/redo-2.0.3.dist-info/top_level.txt b/third_party/python/redo/redo-2.0.3.dist-info/top_level.txt
new file mode 100644
index 0000000000..f49789cbab
--- /dev/null
+++ b/third_party/python/redo/redo-2.0.3.dist-info/top_level.txt
@@ -0,0 +1 @@
+redo
diff --git a/third_party/python/redo/redo/__init__.py b/third_party/python/redo/redo/__init__.py
new file mode 100644
index 0000000000..9814805990
--- /dev/null
+++ b/third_party/python/redo/redo/__init__.py
@@ -0,0 +1,265 @@
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+
+import time
+from functools import wraps
+from contextlib import contextmanager
+import logging
+import random
+
+log = logging.getLogger(__name__)
+
+
+def retrier(attempts=5, sleeptime=10, max_sleeptime=300, sleepscale=1.5, jitter=1):
+ """
+ A generator function that sleeps between retries, handles exponential
+ backoff and jitter. The action you are retrying is meant to run after
+ retrier yields.
+
+ At each iteration, we sleep for sleeptime + random.uniform(-jitter, jitter).
+ Afterwards sleeptime is multiplied by sleepscale for the next iteration.
+
+ Args:
+ attempts (int): maximum number of times to try; defaults to 5
+ sleeptime (float): how many seconds to sleep between tries; defaults to
+ 10 seconds
+ max_sleeptime (float): the longest we'll sleep, in seconds; defaults to
+ 300s (five minutes)
+ sleepscale (float): how much to multiply the sleep time by each
+ iteration; defaults to 1.5
+ jitter (float): random jitter to introduce to sleep time each iteration.
+ the amount is chosen at random between [-jitter, +jitter]
+ defaults to 1
+
+ Yields:
+ None, a maximum of `attempts` number of times
+
+ Example:
+ >>> n = 0
+ >>> for _ in retrier(sleeptime=0, jitter=0):
+ ... if n == 3:
+ ... # We did the thing!
+ ... break
+ ... n += 1
+ >>> n
+ 3
+
+ >>> n = 0
+ >>> for _ in retrier(sleeptime=0, jitter=0):
+ ... if n == 6:
+ ... # We did the thing!
+ ... break
+ ... n += 1
+ ... else:
+ ... print("max tries hit")
+ max tries hit
+ """
+ jitter = jitter or 0 # py35 barfs on the next line if jitter is None
+ if jitter > sleeptime:
+ # To prevent negative sleep times
+ raise Exception(
+ "jitter ({}) must be less than sleep time ({})".format(jitter, sleeptime)
+ )
+
+ sleeptime_real = sleeptime
+ for _ in range(attempts):
+ log.debug("attempt %i/%i", _ + 1, attempts)
+
+ yield sleeptime_real
+
+ if jitter:
+ sleeptime_real = sleeptime + random.uniform(-jitter, jitter)
+ # our jitter should scale along with the sleeptime
+ jitter = jitter * sleepscale
+ else:
+ sleeptime_real = sleeptime
+
+ sleeptime *= sleepscale
+
+ if sleeptime_real > max_sleeptime:
+ sleeptime_real = max_sleeptime
+
+ # Don't need to sleep the last time
+ if _ < attempts - 1:
+ log.debug(
+ "sleeping for %.2fs (attempt %i/%i)", sleeptime_real, _ + 1, attempts
+ )
+ time.sleep(sleeptime_real)
+
+
+def retry(
+ action,
+ attempts=5,
+ sleeptime=60,
+ max_sleeptime=5 * 60,
+ sleepscale=1.5,
+ jitter=1,
+ retry_exceptions=(Exception,),
+ cleanup=None,
+ args=(),
+ kwargs={},
+ log_args=True,
+):
+ """
+ Calls an action function until it succeeds, or we give up.
+
+ Args:
+ action (callable): the function to retry
+ attempts (int): maximum number of times to try; defaults to 5
+ sleeptime (float): how many seconds to sleep between tries; defaults to
+ 60s (one minute)
+ max_sleeptime (float): the longest we'll sleep, in seconds; defaults to
+ 300s (five minutes)
+ sleepscale (float): how much to multiply the sleep time by each
+ iteration; defaults to 1.5
+ jitter (float): random jitter to introduce to sleep time each iteration.
+ the amount is chosen at random between [-jitter, +jitter]
+ defaults to 1
+ retry_exceptions (tuple): tuple of exceptions to be caught. If other
+ exceptions are raised by action(), then these
+ are immediately re-raised to the caller.
+ cleanup (callable): optional; called if one of `retry_exceptions` is
+ caught. No arguments are passed to the cleanup
+ function; if your cleanup requires arguments,
+ consider using functools.partial or a lambda
+ function.
+ args (tuple): positional arguments to call `action` with
+ kwargs (dict): keyword arguments to call `action` with
+ log_args (bool): whether or not to include args and kwargs in log
+ messages. Defaults to True.
+
+ Returns:
+ Whatever action(*args, **kwargs) returns
+
+ Raises:
+ Whatever action(*args, **kwargs) raises. `retry_exceptions` are caught
+ up until the last attempt, in which case they are re-raised.
+
+ Example:
+ >>> count = 0
+ >>> def foo():
+ ... global count
+ ... count += 1
+ ... print(count)
+ ... if count < 3:
+ ... raise ValueError("count is too small!")
+ ... return "success!"
+ >>> retry(foo, sleeptime=0, jitter=0)
+ 1
+ 2
+ 3
+ 'success!'
+ """
+ assert callable(action)
+ assert not cleanup or callable(cleanup)
+
+ action_name = getattr(action, "__name__", action)
+ if log_args and (args or kwargs):
+ log_attempt_args = (
+ "retry: calling %s with args: %s," " kwargs: %s, attempt #%d",
+ action_name,
+ args,
+ kwargs,
+ )
+ else:
+ log_attempt_args = ("retry: calling %s, attempt #%d", action_name)
+
+ if max_sleeptime < sleeptime:
+ log.debug("max_sleeptime %d less than sleeptime %d", max_sleeptime, sleeptime)
+
+ n = 1
+ for _ in retrier(
+ attempts=attempts,
+ sleeptime=sleeptime,
+ max_sleeptime=max_sleeptime,
+ sleepscale=sleepscale,
+ jitter=jitter,
+ ):
+ try:
+ logfn = log.info if n != 1 else log.debug
+ logfn_args = log_attempt_args + (n,)
+ logfn(*logfn_args)
+ return action(*args, **kwargs)
+ except retry_exceptions:
+ log.debug("retry: Caught exception: ", exc_info=True)
+ if cleanup:
+ cleanup()
+ if n == attempts:
+ log.info("retry: Giving up on %s", action_name)
+ raise
+ continue
+ finally:
+ n += 1
+
+
+def retriable(*retry_args, **retry_kwargs):
+ """
+ A decorator factory for retry(). Wrap your function in @retriable(...) to
+ give it retry powers!
+
+ Arguments:
+ Same as for `retry`, with the exception of `action`, `args`, and `kwargs`,
+ which are left to the normal function definition.
+
+ Returns:
+ A function decorator
+
+ Example:
+ >>> count = 0
+ >>> @retriable(sleeptime=0, jitter=0)
+ ... def foo():
+ ... global count
+ ... count += 1
+ ... print(count)
+ ... if count < 3:
+ ... raise ValueError("count too small")
+ ... return "success!"
+ >>> foo()
+ 1
+ 2
+ 3
+ 'success!'
+ """
+
+ def _retriable_factory(func):
+ @wraps(func)
+ def _retriable_wrapper(*args, **kwargs):
+ return retry(func, args=args, kwargs=kwargs, *retry_args, **retry_kwargs)
+
+ return _retriable_wrapper
+
+ return _retriable_factory
+
+
+@contextmanager
+def retrying(func, *retry_args, **retry_kwargs):
+ """
+ A context manager for wrapping functions with retry functionality.
+
+ Arguments:
+ func (callable): the function to wrap
+ other arguments as per `retry`
+
+ Returns:
+ A context manager that returns retriable(func) on __enter__
+
+ Example:
+ >>> count = 0
+ >>> def foo():
+ ... global count
+ ... count += 1
+ ... print(count)
+ ... if count < 3:
+ ... raise ValueError("count too small")
+ ... return "success!"
+ >>> with retrying(foo, sleeptime=0, jitter=0) as f:
+ ... f()
+ 1
+ 2
+ 3
+ 'success!'
+ """
+ yield retriable(*retry_args, **retry_kwargs)(func)
diff --git a/third_party/python/redo/redo/cmd.py b/third_party/python/redo/redo/cmd.py
new file mode 100644
index 0000000000..aeb65dbb3e
--- /dev/null
+++ b/third_party/python/redo/redo/cmd.py
@@ -0,0 +1,70 @@
+# ***** BEGIN LICENSE BLOCK *****
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+# ***** END LICENSE BLOCK *****
+import logging
+from subprocess import check_call, CalledProcessError
+import sys
+
+from redo import retrying
+
+log = logging.getLogger(__name__)
+
+
+def main(argv):
+ from argparse import ArgumentParser, REMAINDER
+
+ parser = ArgumentParser()
+ parser.add_argument(
+ "-a", "--attempts", type=int, default=5, help="How many times to retry."
+ )
+ parser.add_argument(
+ "-s",
+ "--sleeptime",
+ type=int,
+ default=60,
+ help="How long to sleep between attempts. Sleeptime doubles after each attempt.",
+ )
+ parser.add_argument(
+ "-m",
+ "--max-sleeptime",
+ type=int,
+ default=5 * 60,
+ help="Maximum length of time to sleep between attempts (limits backoff length).",
+ )
+ parser.add_argument("-v", "--verbose", action="store_true", default=False)
+ parser.add_argument(
+ "cmd", nargs=REMAINDER, help="Command to run. Eg: wget http://blah"
+ )
+
+ args = parser.parse_args(argv[1:])
+
+ if args.verbose:
+ logging.basicConfig(level=logging.INFO)
+ logging.getLogger("retry").setLevel(logging.INFO)
+ else:
+ logging.basicConfig(level=logging.ERROR)
+ logging.getLogger("retry").setLevel(logging.ERROR)
+
+ try:
+ with retrying(
+ check_call,
+ attempts=args.attempts,
+ sleeptime=args.sleeptime,
+ max_sleeptime=args.max_sleeptime,
+ retry_exceptions=(CalledProcessError,),
+ ) as r_check_call:
+ r_check_call(args.cmd)
+ except KeyboardInterrupt:
+ sys.exit(-1)
+ except Exception as e:
+ log.error(
+ "Unable to run command after %d attempts" % args.attempts, exc_info=True
+ )
+ rc = getattr(e, "returncode", -2)
+ sys.exit(rc)
+
+
+if __name__ == "__main__":
+ main(sys.argv)
diff --git a/third_party/python/requests/requests-2.25.1.dist-info/LICENSE b/third_party/python/requests/requests-2.25.1.dist-info/LICENSE
new file mode 100644
index 0000000000..67db858821
--- /dev/null
+++ b/third_party/python/requests/requests-2.25.1.dist-info/LICENSE
@@ -0,0 +1,175 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/third_party/python/requests/requests-2.25.1.dist-info/METADATA b/third_party/python/requests/requests-2.25.1.dist-info/METADATA
new file mode 100644
index 0000000000..6aaa2dda90
--- /dev/null
+++ b/third_party/python/requests/requests-2.25.1.dist-info/METADATA
@@ -0,0 +1,103 @@
+Metadata-Version: 2.1
+Name: requests
+Version: 2.25.1
+Summary: Python HTTP for Humans.
+Home-page: https://requests.readthedocs.io
+Author: Kenneth Reitz
+Author-email: me@kennethreitz.org
+License: Apache 2.0
+Project-URL: Documentation, https://requests.readthedocs.io
+Project-URL: Source, https://github.com/psf/requests
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Natural Language :: English
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
+Description-Content-Type: text/markdown
+Requires-Dist: chardet (<5,>=3.0.2)
+Requires-Dist: idna (<3,>=2.5)
+Requires-Dist: urllib3 (<1.27,>=1.21.1)
+Requires-Dist: certifi (>=2017.4.17)
+Provides-Extra: security
+Requires-Dist: pyOpenSSL (>=0.14) ; extra == 'security'
+Requires-Dist: cryptography (>=1.3.4) ; extra == 'security'
+Provides-Extra: socks
+Requires-Dist: PySocks (!=1.5.7,>=1.5.6) ; extra == 'socks'
+Requires-Dist: win-inet-pton ; (sys_platform == "win32" and python_version == "2.7") and extra == 'socks'
+
+# Requests
+
+**Requests** is a simple, yet elegant HTTP library.
+
+```python
+>>> import requests
+>>> r = requests.get('https://api.github.com/user', auth=('user', 'pass'))
+>>> r.status_code
+200
+>>> r.headers['content-type']
+'application/json; charset=utf8'
+>>> r.encoding
+'utf-8'
+>>> r.text
+'{"type":"User"...'
+>>> r.json()
+{'disk_usage': 368627, 'private_gists': 484, ...}
+```
+
+Requests allows you to send HTTP/1.1 requests extremely easily. There’s no need to manually add query strings to your URLs, or to form-encode your `PUT` & `POST` data — but nowadays, just use the `json` method!
+
+Requests is one of the most downloaded Python package today, pulling in around `14M downloads / week`— according to GitHub, Requests is currently [depended upon](https://github.com/psf/requests/network/dependents?package_id=UGFja2FnZS01NzA4OTExNg%3D%3D) by `500,000+` repositories. You may certainly put your trust in this code.
+
+[![Downloads](https://pepy.tech/badge/requests/month)](https://pepy.tech/project/requests/month)
+[![Supported Versions](https://img.shields.io/pypi/pyversions/requests.svg)](https://pypi.org/project/requests)
+[![Contributors](https://img.shields.io/github/contributors/psf/requests.svg)](https://github.com/psf/requests/graphs/contributors)
+
+## Installing Requests and Supported Versions
+
+Requests is available on PyPI:
+
+```console
+$ python -m pip install requests
+```
+
+Requests officially supports Python 2.7 & 3.5+.
+
+## Supported Features & Best–Practices
+
+Requests is ready for the demands of building robust and reliable HTTP–speaking applications, for the needs of today.
+
+- Keep-Alive & Connection Pooling
+- International Domains and URLs
+- Sessions with Cookie Persistence
+- Browser-style TLS/SSL Verification
+- Basic & Digest Authentication
+- Familiar `dict`–like Cookies
+- Automatic Content Decompression and Decoding
+- Multi-part File Uploads
+- SOCKS Proxy Support
+- Connection Timeouts
+- Streaming Downloads
+- Automatic honoring of `.netrc`
+- Chunked HTTP Requests
+
+## API Reference and User Guide available on [Read the Docs](https://requests.readthedocs.io)
+
+[![Read the Docs](https://raw.githubusercontent.com/psf/requests/master/ext/ss.png)](https://requests.readthedocs.io)
+
+---
+
+[![Kenneth Reitz](https://raw.githubusercontent.com/psf/requests/master/ext/kr.png)](https://kennethreitz.org) [![Python Software Foundation](https://raw.githubusercontent.com/psf/requests/master/ext/psf.png)](https://www.python.org/psf)
+
+
diff --git a/third_party/python/requests/requests-2.25.1.dist-info/RECORD b/third_party/python/requests/requests-2.25.1.dist-info/RECORD
new file mode 100644
index 0000000000..8b38386890
--- /dev/null
+++ b/third_party/python/requests/requests-2.25.1.dist-info/RECORD
@@ -0,0 +1,23 @@
+requests/__init__.py,sha256=rsmg7xmbbCE_zmDcG6EDk_pyvdEfadztdBaWIkInlH8,4141
+requests/__version__.py,sha256=k4J8c1yFRFzwGWwlN7miaDOclFtbcIs1GlnmT17YbXQ,441
+requests/_internal_utils.py,sha256=Zx3PnEUccyfsB-ie11nZVAW8qClJy0gx1qNME7rgT18,1096
+requests/adapters.py,sha256=WelSM1BCQXdbjEuDsBxqKDADeY8BHmxlrwbNnLN2rr4,21344
+requests/api.py,sha256=PlHM-HT3PQ5lyufoeGmV-nJxRi7UnUyGVh7OV7B9XV4,6496
+requests/auth.py,sha256=OMoJIVKyRLy9THr91y8rxysZuclwPB-K1Xg1zBomUhQ,10207
+requests/certs.py,sha256=dOB5rV2DZ13dEhq9BUa_4hd5kAqg59e_zUZB00faYz8,453
+requests/compat.py,sha256=iBRvu-X540CH4PJsuxr0vcGTnl_TZhq_75SwmeckQ7w,1782
+requests/cookies.py,sha256=Y-bKX6TvW3FnYlE6Au0SXtVVWcaNdFvuAwQxw-G0iTI,18430
+requests/exceptions.py,sha256=xXoj1rdhnxTS_DYphKZ9OvFZJQZ333A64REc9ZDZIgU,3161
+requests/help.py,sha256=lLcBtKAar8T6T78e9Tc4Zfd_EEJFhntxgib1JHNctEI,3515
+requests/hooks.py,sha256=QReGyy0bRcr5rkwCuObNakbYsc7EkiKeBwG4qHekr2Q,757
+requests/models.py,sha256=Uhb4Ra_ubNGBf-6ktHShgO5mUSCGZKa5D_wLGVCMtYk,34308
+requests/packages.py,sha256=Q2rF0L5mc3wQAvc6q_lAVtPTDOaOeFgD-7kWSQLkjEQ,542
+requests/sessions.py,sha256=BsnR-zYILgoFzJ6yq4T8ht_i0PwwPGVAxWxWaV5dcHg,30137
+requests/status_codes.py,sha256=gT79Pbs_cQjBgp-fvrUgg1dn2DQO32bDj4TInjnMPSc,4188
+requests/structures.py,sha256=msAtr9mq1JxHd-JRyiILfdFlpbJwvvFuP3rfUQT_QxE,3005
+requests/utils.py,sha256=_K9AgkN6efPe-a-zgZurXzds5PBC0CzDkyjAE2oCQFQ,30529
+requests-2.25.1.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
+requests-2.25.1.dist-info/METADATA,sha256=RuNh38uN0IMsRT3OwaTNB_WyGx6RMwwQoMwujXfkUVM,4168
+requests-2.25.1.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110
+requests-2.25.1.dist-info/top_level.txt,sha256=fMSVmHfb5rbGOo6xv-O_tUX6j-WyixssE-SnwcDRxNQ,9
+requests-2.25.1.dist-info/RECORD,,
diff --git a/third_party/python/requests/requests-2.25.1.dist-info/WHEEL b/third_party/python/requests/requests-2.25.1.dist-info/WHEEL
new file mode 100644
index 0000000000..01b8fc7d4a
--- /dev/null
+++ b/third_party/python/requests/requests-2.25.1.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.36.2)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/requests/requests-2.25.1.dist-info/top_level.txt b/third_party/python/requests/requests-2.25.1.dist-info/top_level.txt
new file mode 100644
index 0000000000..f2293605cf
--- /dev/null
+++ b/third_party/python/requests/requests-2.25.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+requests
diff --git a/third_party/python/requests/requests/__init__.py b/third_party/python/requests/requests/__init__.py
new file mode 100644
index 0000000000..f8f94295f9
--- /dev/null
+++ b/third_party/python/requests/requests/__init__.py
@@ -0,0 +1,137 @@
+# -*- coding: utf-8 -*-
+
+# __
+# /__) _ _ _ _ _/ _
+# / ( (- (/ (/ (- _) / _)
+# /
+
+"""
+Requests HTTP Library
+~~~~~~~~~~~~~~~~~~~~~
+
+Requests is an HTTP library, written in Python, for human beings.
+Basic GET usage:
+
+ >>> import requests
+ >>> r = requests.get('https://www.python.org')
+ >>> r.status_code
+ 200
+ >>> b'Python is a programming language' in r.content
+ True
+
+... or POST:
+
+ >>> payload = dict(key1='value1', key2='value2')
+ >>> r = requests.post('https://httpbin.org/post', data=payload)
+ >>> print(r.text)
+ {
+ ...
+ "form": {
+ "key1": "value1",
+ "key2": "value2"
+ },
+ ...
+ }
+
+The other HTTP methods are supported - see `requests.api`. Full documentation
+is at <https://requests.readthedocs.io>.
+
+:copyright: (c) 2017 by Kenneth Reitz.
+:license: Apache 2.0, see LICENSE for more details.
+"""
+
+import urllib3
+import chardet
+import warnings
+from .exceptions import RequestsDependencyWarning
+
+
+def check_compatibility(urllib3_version, chardet_version):
+ urllib3_version = urllib3_version.split('.')
+ assert urllib3_version != ['dev'] # Verify urllib3 isn't installed from git.
+
+ # Sometimes, urllib3 only reports its version as 16.1.
+ if len(urllib3_version) == 2:
+ urllib3_version.append('0')
+
+ # Check urllib3 for compatibility.
+ major, minor, patch = urllib3_version # noqa: F811
+ major, minor, patch = int(major), int(minor), int(patch)
+ # urllib3 >= 1.21.1, <= 1.26
+ assert major == 1
+ assert minor >= 21
+ assert minor <= 26
+
+ # Check chardet for compatibility.
+ major, minor, patch = chardet_version.split('.')[:3]
+ major, minor, patch = int(major), int(minor), int(patch)
+ # chardet >= 3.0.2, < 5.0.0
+ assert (3, 0, 2) <= (major, minor, patch) < (5, 0, 0)
+
+
+def _check_cryptography(cryptography_version):
+ # cryptography < 1.3.4
+ try:
+ cryptography_version = list(map(int, cryptography_version.split('.')))
+ except ValueError:
+ return
+
+ if cryptography_version < [1, 3, 4]:
+ warning = 'Old version of cryptography ({}) may cause slowdown.'.format(cryptography_version)
+ warnings.warn(warning, RequestsDependencyWarning)
+
+# Check imported dependencies for compatibility.
+try:
+ check_compatibility(urllib3.__version__, chardet.__version__)
+except (AssertionError, ValueError):
+ warnings.warn("urllib3 ({}) or chardet ({}) doesn't match a supported "
+ "version!".format(urllib3.__version__, chardet.__version__),
+ RequestsDependencyWarning)
+
+# Attempt to enable urllib3's fallback for SNI support
+# if the standard library doesn't support SNI or the
+# 'ssl' library isn't available.
+try:
+ try:
+ import ssl
+ except ImportError:
+ ssl = None
+
+ if not getattr(ssl, "HAS_SNI", False):
+ from urllib3.contrib import pyopenssl
+ pyopenssl.inject_into_urllib3()
+
+ # Check cryptography version
+ from cryptography import __version__ as cryptography_version
+ _check_cryptography(cryptography_version)
+except ImportError:
+ pass
+
+# urllib3's DependencyWarnings should be silenced.
+from urllib3.exceptions import DependencyWarning
+warnings.simplefilter('ignore', DependencyWarning)
+
+from .__version__ import __title__, __description__, __url__, __version__
+from .__version__ import __build__, __author__, __author_email__, __license__
+from .__version__ import __copyright__, __cake__
+
+from . import utils
+from . import packages
+from .models import Request, Response, PreparedRequest
+from .api import request, get, head, post, patch, put, delete, options
+from .sessions import session, Session
+from .status_codes import codes
+from .exceptions import (
+ RequestException, Timeout, URLRequired,
+ TooManyRedirects, HTTPError, ConnectionError,
+ FileModeWarning, ConnectTimeout, ReadTimeout
+)
+
+# Set default logging handler to avoid "No handler found" warnings.
+import logging
+from logging import NullHandler
+
+logging.getLogger(__name__).addHandler(NullHandler())
+
+# FileModeWarnings go off per the default.
+warnings.simplefilter('default', FileModeWarning, append=True)
diff --git a/third_party/python/requests/requests/__version__.py b/third_party/python/requests/requests/__version__.py
new file mode 100644
index 0000000000..1267488d28
--- /dev/null
+++ b/third_party/python/requests/requests/__version__.py
@@ -0,0 +1,14 @@
+# .-. .-. .-. . . .-. .-. .-. .-.
+# |( |- |.| | | |- `-. | `-.
+# ' ' `-' `-`.`-' `-' `-' ' `-'
+
+__title__ = 'requests'
+__description__ = 'Python HTTP for Humans.'
+__url__ = 'https://requests.readthedocs.io'
+__version__ = '2.25.1'
+__build__ = 0x022501
+__author__ = 'Kenneth Reitz'
+__author_email__ = 'me@kennethreitz.org'
+__license__ = 'Apache 2.0'
+__copyright__ = 'Copyright 2020 Kenneth Reitz'
+__cake__ = u'\u2728 \U0001f370 \u2728'
diff --git a/third_party/python/requests/requests/_internal_utils.py b/third_party/python/requests/requests/_internal_utils.py
new file mode 100644
index 0000000000..759d9a56ba
--- /dev/null
+++ b/third_party/python/requests/requests/_internal_utils.py
@@ -0,0 +1,42 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests._internal_utils
+~~~~~~~~~~~~~~
+
+Provides utility functions that are consumed internally by Requests
+which depend on extremely few external helpers (such as compat)
+"""
+
+from .compat import is_py2, builtin_str, str
+
+
+def to_native_string(string, encoding='ascii'):
+ """Given a string object, regardless of type, returns a representation of
+ that string in the native string type, encoding and decoding where
+ necessary. This assumes ASCII unless told otherwise.
+ """
+ if isinstance(string, builtin_str):
+ out = string
+ else:
+ if is_py2:
+ out = string.encode(encoding)
+ else:
+ out = string.decode(encoding)
+
+ return out
+
+
+def unicode_is_ascii(u_string):
+ """Determine if unicode string only contains ASCII characters.
+
+ :param str u_string: unicode string to check. Must be unicode
+ and not Python 2 `str`.
+ :rtype: bool
+ """
+ assert isinstance(u_string, str)
+ try:
+ u_string.encode('ascii')
+ return True
+ except UnicodeEncodeError:
+ return False
diff --git a/third_party/python/requests/requests/adapters.py b/third_party/python/requests/requests/adapters.py
new file mode 100644
index 0000000000..fa4d9b3cc9
--- /dev/null
+++ b/third_party/python/requests/requests/adapters.py
@@ -0,0 +1,533 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.adapters
+~~~~~~~~~~~~~~~~~
+
+This module contains the transport adapters that Requests uses to define
+and maintain connections.
+"""
+
+import os.path
+import socket
+
+from urllib3.poolmanager import PoolManager, proxy_from_url
+from urllib3.response import HTTPResponse
+from urllib3.util import parse_url
+from urllib3.util import Timeout as TimeoutSauce
+from urllib3.util.retry import Retry
+from urllib3.exceptions import ClosedPoolError
+from urllib3.exceptions import ConnectTimeoutError
+from urllib3.exceptions import HTTPError as _HTTPError
+from urllib3.exceptions import MaxRetryError
+from urllib3.exceptions import NewConnectionError
+from urllib3.exceptions import ProxyError as _ProxyError
+from urllib3.exceptions import ProtocolError
+from urllib3.exceptions import ReadTimeoutError
+from urllib3.exceptions import SSLError as _SSLError
+from urllib3.exceptions import ResponseError
+from urllib3.exceptions import LocationValueError
+
+from .models import Response
+from .compat import urlparse, basestring
+from .utils import (DEFAULT_CA_BUNDLE_PATH, extract_zipped_paths,
+ get_encoding_from_headers, prepend_scheme_if_needed,
+ get_auth_from_url, urldefragauth, select_proxy)
+from .structures import CaseInsensitiveDict
+from .cookies import extract_cookies_to_jar
+from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
+ ProxyError, RetryError, InvalidSchema, InvalidProxyURL,
+ InvalidURL)
+from .auth import _basic_auth_str
+
+try:
+ from urllib3.contrib.socks import SOCKSProxyManager
+except ImportError:
+ def SOCKSProxyManager(*args, **kwargs):
+ raise InvalidSchema("Missing dependencies for SOCKS support.")
+
+DEFAULT_POOLBLOCK = False
+DEFAULT_POOLSIZE = 10
+DEFAULT_RETRIES = 0
+DEFAULT_POOL_TIMEOUT = None
+
+
+class BaseAdapter(object):
+ """The Base Transport Adapter"""
+
+ def __init__(self):
+ super(BaseAdapter, self).__init__()
+
+ def send(self, request, stream=False, timeout=None, verify=True,
+ cert=None, proxies=None):
+ """Sends PreparedRequest object. Returns Response object.
+
+ :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
+ :param stream: (optional) Whether to stream the request content.
+ :param timeout: (optional) How long to wait for the server to send
+ data before giving up, as a float, or a :ref:`(connect timeout,
+ read timeout) <timeouts>` tuple.
+ :type timeout: float or tuple
+ :param verify: (optional) Either a boolean, in which case it controls whether we verify
+ the server's TLS certificate, or a string, in which case it must be a path
+ to a CA bundle to use
+ :param cert: (optional) Any user-provided SSL certificate to be trusted.
+ :param proxies: (optional) The proxies dictionary to apply to the request.
+ """
+ raise NotImplementedError
+
+ def close(self):
+ """Cleans up adapter specific items."""
+ raise NotImplementedError
+
+
+class HTTPAdapter(BaseAdapter):
+ """The built-in HTTP Adapter for urllib3.
+
+ Provides a general-case interface for Requests sessions to contact HTTP and
+ HTTPS urls by implementing the Transport Adapter interface. This class will
+ usually be created by the :class:`Session <Session>` class under the
+ covers.
+
+ :param pool_connections: The number of urllib3 connection pools to cache.
+ :param pool_maxsize: The maximum number of connections to save in the pool.
+ :param max_retries: The maximum number of retries each connection
+ should attempt. Note, this applies only to failed DNS lookups, socket
+ connections and connection timeouts, never to requests where data has
+ made it to the server. By default, Requests does not retry failed
+ connections. If you need granular control over the conditions under
+ which we retry a request, import urllib3's ``Retry`` class and pass
+ that instead.
+ :param pool_block: Whether the connection pool should block for connections.
+
+ Usage::
+
+ >>> import requests
+ >>> s = requests.Session()
+ >>> a = requests.adapters.HTTPAdapter(max_retries=3)
+ >>> s.mount('http://', a)
+ """
+ __attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
+ '_pool_block']
+
+ def __init__(self, pool_connections=DEFAULT_POOLSIZE,
+ pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
+ pool_block=DEFAULT_POOLBLOCK):
+ if max_retries == DEFAULT_RETRIES:
+ self.max_retries = Retry(0, read=False)
+ else:
+ self.max_retries = Retry.from_int(max_retries)
+ self.config = {}
+ self.proxy_manager = {}
+
+ super(HTTPAdapter, self).__init__()
+
+ self._pool_connections = pool_connections
+ self._pool_maxsize = pool_maxsize
+ self._pool_block = pool_block
+
+ self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
+
+ def __getstate__(self):
+ return {attr: getattr(self, attr, None) for attr in self.__attrs__}
+
+ def __setstate__(self, state):
+ # Can't handle by adding 'proxy_manager' to self.__attrs__ because
+ # self.poolmanager uses a lambda function, which isn't pickleable.
+ self.proxy_manager = {}
+ self.config = {}
+
+ for attr, value in state.items():
+ setattr(self, attr, value)
+
+ self.init_poolmanager(self._pool_connections, self._pool_maxsize,
+ block=self._pool_block)
+
+ def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
+ """Initializes a urllib3 PoolManager.
+
+ This method should not be called from user code, and is only
+ exposed for use when subclassing the
+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+ :param connections: The number of urllib3 connection pools to cache.
+ :param maxsize: The maximum number of connections to save in the pool.
+ :param block: Block when no free connections are available.
+ :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
+ """
+ # save these values for pickling
+ self._pool_connections = connections
+ self._pool_maxsize = maxsize
+ self._pool_block = block
+
+ self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
+ block=block, strict=True, **pool_kwargs)
+
+ def proxy_manager_for(self, proxy, **proxy_kwargs):
+ """Return urllib3 ProxyManager for the given proxy.
+
+ This method should not be called from user code, and is only
+ exposed for use when subclassing the
+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+ :param proxy: The proxy to return a urllib3 ProxyManager for.
+ :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
+ :returns: ProxyManager
+ :rtype: urllib3.ProxyManager
+ """
+ if proxy in self.proxy_manager:
+ manager = self.proxy_manager[proxy]
+ elif proxy.lower().startswith('socks'):
+ username, password = get_auth_from_url(proxy)
+ manager = self.proxy_manager[proxy] = SOCKSProxyManager(
+ proxy,
+ username=username,
+ password=password,
+ num_pools=self._pool_connections,
+ maxsize=self._pool_maxsize,
+ block=self._pool_block,
+ **proxy_kwargs
+ )
+ else:
+ proxy_headers = self.proxy_headers(proxy)
+ manager = self.proxy_manager[proxy] = proxy_from_url(
+ proxy,
+ proxy_headers=proxy_headers,
+ num_pools=self._pool_connections,
+ maxsize=self._pool_maxsize,
+ block=self._pool_block,
+ **proxy_kwargs)
+
+ return manager
+
+ def cert_verify(self, conn, url, verify, cert):
+ """Verify a SSL certificate. This method should not be called from user
+ code, and is only exposed for use when subclassing the
+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+ :param conn: The urllib3 connection object associated with the cert.
+ :param url: The requested URL.
+ :param verify: Either a boolean, in which case it controls whether we verify
+ the server's TLS certificate, or a string, in which case it must be a path
+ to a CA bundle to use
+ :param cert: The SSL certificate to verify.
+ """
+ if url.lower().startswith('https') and verify:
+
+ cert_loc = None
+
+ # Allow self-specified cert location.
+ if verify is not True:
+ cert_loc = verify
+
+ if not cert_loc:
+ cert_loc = extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH)
+
+ if not cert_loc or not os.path.exists(cert_loc):
+ raise IOError("Could not find a suitable TLS CA certificate bundle, "
+ "invalid path: {}".format(cert_loc))
+
+ conn.cert_reqs = 'CERT_REQUIRED'
+
+ if not os.path.isdir(cert_loc):
+ conn.ca_certs = cert_loc
+ else:
+ conn.ca_cert_dir = cert_loc
+ else:
+ conn.cert_reqs = 'CERT_NONE'
+ conn.ca_certs = None
+ conn.ca_cert_dir = None
+
+ if cert:
+ if not isinstance(cert, basestring):
+ conn.cert_file = cert[0]
+ conn.key_file = cert[1]
+ else:
+ conn.cert_file = cert
+ conn.key_file = None
+ if conn.cert_file and not os.path.exists(conn.cert_file):
+ raise IOError("Could not find the TLS certificate file, "
+ "invalid path: {}".format(conn.cert_file))
+ if conn.key_file and not os.path.exists(conn.key_file):
+ raise IOError("Could not find the TLS key file, "
+ "invalid path: {}".format(conn.key_file))
+
+ def build_response(self, req, resp):
+ """Builds a :class:`Response <requests.Response>` object from a urllib3
+ response. This should not be called from user code, and is only exposed
+ for use when subclassing the
+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
+
+ :param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
+ :param resp: The urllib3 response object.
+ :rtype: requests.Response
+ """
+ response = Response()
+
+ # Fallback to None if there's no status_code, for whatever reason.
+ response.status_code = getattr(resp, 'status', None)
+
+ # Make headers case-insensitive.
+ response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
+
+ # Set encoding.
+ response.encoding = get_encoding_from_headers(response.headers)
+ response.raw = resp
+ response.reason = response.raw.reason
+
+ if isinstance(req.url, bytes):
+ response.url = req.url.decode('utf-8')
+ else:
+ response.url = req.url
+
+ # Add new cookies from the server.
+ extract_cookies_to_jar(response.cookies, req, resp)
+
+ # Give the Response some context.
+ response.request = req
+ response.connection = self
+
+ return response
+
+ def get_connection(self, url, proxies=None):
+ """Returns a urllib3 connection for the given URL. This should not be
+ called from user code, and is only exposed for use when subclassing the
+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+ :param url: The URL to connect to.
+ :param proxies: (optional) A Requests-style dictionary of proxies used on this request.
+ :rtype: urllib3.ConnectionPool
+ """
+ proxy = select_proxy(url, proxies)
+
+ if proxy:
+ proxy = prepend_scheme_if_needed(proxy, 'http')
+ proxy_url = parse_url(proxy)
+ if not proxy_url.host:
+ raise InvalidProxyURL("Please check proxy URL. It is malformed"
+ " and could be missing the host.")
+ proxy_manager = self.proxy_manager_for(proxy)
+ conn = proxy_manager.connection_from_url(url)
+ else:
+ # Only scheme should be lower case
+ parsed = urlparse(url)
+ url = parsed.geturl()
+ conn = self.poolmanager.connection_from_url(url)
+
+ return conn
+
+ def close(self):
+ """Disposes of any internal state.
+
+ Currently, this closes the PoolManager and any active ProxyManager,
+ which closes any pooled connections.
+ """
+ self.poolmanager.clear()
+ for proxy in self.proxy_manager.values():
+ proxy.clear()
+
+ def request_url(self, request, proxies):
+ """Obtain the url to use when making the final request.
+
+ If the message is being sent through a HTTP proxy, the full URL has to
+ be used. Otherwise, we should only use the path portion of the URL.
+
+ This should not be called from user code, and is only exposed for use
+ when subclassing the
+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+ :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
+ :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
+ :rtype: str
+ """
+ proxy = select_proxy(request.url, proxies)
+ scheme = urlparse(request.url).scheme
+
+ is_proxied_http_request = (proxy and scheme != 'https')
+ using_socks_proxy = False
+ if proxy:
+ proxy_scheme = urlparse(proxy).scheme.lower()
+ using_socks_proxy = proxy_scheme.startswith('socks')
+
+ url = request.path_url
+ if is_proxied_http_request and not using_socks_proxy:
+ url = urldefragauth(request.url)
+
+ return url
+
+ def add_headers(self, request, **kwargs):
+ """Add any headers needed by the connection. As of v2.0 this does
+ nothing by default, but is left for overriding by users that subclass
+ the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+ This should not be called from user code, and is only exposed for use
+ when subclassing the
+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+ :param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
+ :param kwargs: The keyword arguments from the call to send().
+ """
+ pass
+
+ def proxy_headers(self, proxy):
+ """Returns a dictionary of the headers to add to any request sent
+ through a proxy. This works with urllib3 magic to ensure that they are
+ correctly sent to the proxy, rather than in a tunnelled request if
+ CONNECT is being used.
+
+ This should not be called from user code, and is only exposed for use
+ when subclassing the
+ :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
+
+ :param proxy: The url of the proxy being used for this request.
+ :rtype: dict
+ """
+ headers = {}
+ username, password = get_auth_from_url(proxy)
+
+ if username:
+ headers['Proxy-Authorization'] = _basic_auth_str(username,
+ password)
+
+ return headers
+
+ def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
+ """Sends PreparedRequest object. Returns Response object.
+
+ :param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
+ :param stream: (optional) Whether to stream the request content.
+ :param timeout: (optional) How long to wait for the server to send
+ data before giving up, as a float, or a :ref:`(connect timeout,
+ read timeout) <timeouts>` tuple.
+ :type timeout: float or tuple or urllib3 Timeout object
+ :param verify: (optional) Either a boolean, in which case it controls whether
+ we verify the server's TLS certificate, or a string, in which case it
+ must be a path to a CA bundle to use
+ :param cert: (optional) Any user-provided SSL certificate to be trusted.
+ :param proxies: (optional) The proxies dictionary to apply to the request.
+ :rtype: requests.Response
+ """
+
+ try:
+ conn = self.get_connection(request.url, proxies)
+ except LocationValueError as e:
+ raise InvalidURL(e, request=request)
+
+ self.cert_verify(conn, request.url, verify, cert)
+ url = self.request_url(request, proxies)
+ self.add_headers(request, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies)
+
+ chunked = not (request.body is None or 'Content-Length' in request.headers)
+
+ if isinstance(timeout, tuple):
+ try:
+ connect, read = timeout
+ timeout = TimeoutSauce(connect=connect, read=read)
+ except ValueError as e:
+ # this may raise a string formatting error.
+ err = ("Invalid timeout {}. Pass a (connect, read) "
+ "timeout tuple, or a single float to set "
+ "both timeouts to the same value".format(timeout))
+ raise ValueError(err)
+ elif isinstance(timeout, TimeoutSauce):
+ pass
+ else:
+ timeout = TimeoutSauce(connect=timeout, read=timeout)
+
+ try:
+ if not chunked:
+ resp = conn.urlopen(
+ method=request.method,
+ url=url,
+ body=request.body,
+ headers=request.headers,
+ redirect=False,
+ assert_same_host=False,
+ preload_content=False,
+ decode_content=False,
+ retries=self.max_retries,
+ timeout=timeout
+ )
+
+ # Send the request.
+ else:
+ if hasattr(conn, 'proxy_pool'):
+ conn = conn.proxy_pool
+
+ low_conn = conn._get_conn(timeout=DEFAULT_POOL_TIMEOUT)
+
+ try:
+ low_conn.putrequest(request.method,
+ url,
+ skip_accept_encoding=True)
+
+ for header, value in request.headers.items():
+ low_conn.putheader(header, value)
+
+ low_conn.endheaders()
+
+ for i in request.body:
+ low_conn.send(hex(len(i))[2:].encode('utf-8'))
+ low_conn.send(b'\r\n')
+ low_conn.send(i)
+ low_conn.send(b'\r\n')
+ low_conn.send(b'0\r\n\r\n')
+
+ # Receive the response from the server
+ try:
+ # For Python 2.7, use buffering of HTTP responses
+ r = low_conn.getresponse(buffering=True)
+ except TypeError:
+ # For compatibility with Python 3.3+
+ r = low_conn.getresponse()
+
+ resp = HTTPResponse.from_httplib(
+ r,
+ pool=conn,
+ connection=low_conn,
+ preload_content=False,
+ decode_content=False
+ )
+ except:
+ # If we hit any problems here, clean up the connection.
+ # Then, reraise so that we can handle the actual exception.
+ low_conn.close()
+ raise
+
+ except (ProtocolError, socket.error) as err:
+ raise ConnectionError(err, request=request)
+
+ except MaxRetryError as e:
+ if isinstance(e.reason, ConnectTimeoutError):
+ # TODO: Remove this in 3.0.0: see #2811
+ if not isinstance(e.reason, NewConnectionError):
+ raise ConnectTimeout(e, request=request)
+
+ if isinstance(e.reason, ResponseError):
+ raise RetryError(e, request=request)
+
+ if isinstance(e.reason, _ProxyError):
+ raise ProxyError(e, request=request)
+
+ if isinstance(e.reason, _SSLError):
+ # This branch is for urllib3 v1.22 and later.
+ raise SSLError(e, request=request)
+
+ raise ConnectionError(e, request=request)
+
+ except ClosedPoolError as e:
+ raise ConnectionError(e, request=request)
+
+ except _ProxyError as e:
+ raise ProxyError(e)
+
+ except (_SSLError, _HTTPError) as e:
+ if isinstance(e, _SSLError):
+ # This branch is for urllib3 versions earlier than v1.22
+ raise SSLError(e, request=request)
+ elif isinstance(e, ReadTimeoutError):
+ raise ReadTimeout(e, request=request)
+ else:
+ raise
+
+ return self.build_response(request, resp)
diff --git a/third_party/python/requests/requests/api.py b/third_party/python/requests/requests/api.py
new file mode 100644
index 0000000000..e978e20311
--- /dev/null
+++ b/third_party/python/requests/requests/api.py
@@ -0,0 +1,161 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.api
+~~~~~~~~~~~~
+
+This module implements the Requests API.
+
+:copyright: (c) 2012 by Kenneth Reitz.
+:license: Apache2, see LICENSE for more details.
+"""
+
+from . import sessions
+
+
+def request(method, url, **kwargs):
+ """Constructs and sends a :class:`Request <Request>`.
+
+ :param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``.
+ :param url: URL for the new :class:`Request` object.
+ :param params: (optional) Dictionary, list of tuples or bytes to send
+ in the query string for the :class:`Request`.
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+ object to send in the body of the :class:`Request`.
+ :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
+ :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
+ :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
+ :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
+ ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
+ or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content-type'`` is a string
+ defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
+ to add for the file.
+ :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
+ :param timeout: (optional) How many seconds to wait for the server to send data
+ before giving up, as a float, or a :ref:`(connect timeout, read
+ timeout) <timeouts>` tuple.
+ :type timeout: float or tuple
+ :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
+ :type allow_redirects: bool
+ :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
+ :param verify: (optional) Either a boolean, in which case it controls whether we verify
+ the server's TLS certificate, or a string, in which case it must be a path
+ to a CA bundle to use. Defaults to ``True``.
+ :param stream: (optional) if ``False``, the response content will be immediately downloaded.
+ :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
+ :return: :class:`Response <Response>` object
+ :rtype: requests.Response
+
+ Usage::
+
+ >>> import requests
+ >>> req = requests.request('GET', 'https://httpbin.org/get')
+ >>> req
+ <Response [200]>
+ """
+
+ # By using the 'with' statement we are sure the session is closed, thus we
+ # avoid leaving sockets open which can trigger a ResourceWarning in some
+ # cases, and look like a memory leak in others.
+ with sessions.Session() as session:
+ return session.request(method=method, url=url, **kwargs)
+
+
+def get(url, params=None, **kwargs):
+ r"""Sends a GET request.
+
+ :param url: URL for the new :class:`Request` object.
+ :param params: (optional) Dictionary, list of tuples or bytes to send
+ in the query string for the :class:`Request`.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :return: :class:`Response <Response>` object
+ :rtype: requests.Response
+ """
+
+ kwargs.setdefault('allow_redirects', True)
+ return request('get', url, params=params, **kwargs)
+
+
+def options(url, **kwargs):
+ r"""Sends an OPTIONS request.
+
+ :param url: URL for the new :class:`Request` object.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :return: :class:`Response <Response>` object
+ :rtype: requests.Response
+ """
+
+ kwargs.setdefault('allow_redirects', True)
+ return request('options', url, **kwargs)
+
+
+def head(url, **kwargs):
+ r"""Sends a HEAD request.
+
+ :param url: URL for the new :class:`Request` object.
+ :param \*\*kwargs: Optional arguments that ``request`` takes. If
+ `allow_redirects` is not provided, it will be set to `False` (as
+ opposed to the default :meth:`request` behavior).
+ :return: :class:`Response <Response>` object
+ :rtype: requests.Response
+ """
+
+ kwargs.setdefault('allow_redirects', False)
+ return request('head', url, **kwargs)
+
+
+def post(url, data=None, json=None, **kwargs):
+ r"""Sends a POST request.
+
+ :param url: URL for the new :class:`Request` object.
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+ object to send in the body of the :class:`Request`.
+ :param json: (optional) json data to send in the body of the :class:`Request`.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :return: :class:`Response <Response>` object
+ :rtype: requests.Response
+ """
+
+ return request('post', url, data=data, json=json, **kwargs)
+
+
+def put(url, data=None, **kwargs):
+ r"""Sends a PUT request.
+
+ :param url: URL for the new :class:`Request` object.
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+ object to send in the body of the :class:`Request`.
+ :param json: (optional) json data to send in the body of the :class:`Request`.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :return: :class:`Response <Response>` object
+ :rtype: requests.Response
+ """
+
+ return request('put', url, data=data, **kwargs)
+
+
+def patch(url, data=None, **kwargs):
+ r"""Sends a PATCH request.
+
+ :param url: URL for the new :class:`Request` object.
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+ object to send in the body of the :class:`Request`.
+ :param json: (optional) json data to send in the body of the :class:`Request`.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :return: :class:`Response <Response>` object
+ :rtype: requests.Response
+ """
+
+ return request('patch', url, data=data, **kwargs)
+
+
+def delete(url, **kwargs):
+ r"""Sends a DELETE request.
+
+ :param url: URL for the new :class:`Request` object.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :return: :class:`Response <Response>` object
+ :rtype: requests.Response
+ """
+
+ return request('delete', url, **kwargs)
diff --git a/third_party/python/requests/requests/auth.py b/third_party/python/requests/requests/auth.py
new file mode 100644
index 0000000000..eeface39ae
--- /dev/null
+++ b/third_party/python/requests/requests/auth.py
@@ -0,0 +1,305 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.auth
+~~~~~~~~~~~~~
+
+This module contains the authentication handlers for Requests.
+"""
+
+import os
+import re
+import time
+import hashlib
+import threading
+import warnings
+
+from base64 import b64encode
+
+from .compat import urlparse, str, basestring
+from .cookies import extract_cookies_to_jar
+from ._internal_utils import to_native_string
+from .utils import parse_dict_header
+
+CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded'
+CONTENT_TYPE_MULTI_PART = 'multipart/form-data'
+
+
+def _basic_auth_str(username, password):
+ """Returns a Basic Auth string."""
+
+ # "I want us to put a big-ol' comment on top of it that
+ # says that this behaviour is dumb but we need to preserve
+ # it because people are relying on it."
+ # - Lukasa
+ #
+ # These are here solely to maintain backwards compatibility
+ # for things like ints. This will be removed in 3.0.0.
+ if not isinstance(username, basestring):
+ warnings.warn(
+ "Non-string usernames will no longer be supported in Requests "
+ "3.0.0. Please convert the object you've passed in ({!r}) to "
+ "a string or bytes object in the near future to avoid "
+ "problems.".format(username),
+ category=DeprecationWarning,
+ )
+ username = str(username)
+
+ if not isinstance(password, basestring):
+ warnings.warn(
+ "Non-string passwords will no longer be supported in Requests "
+ "3.0.0. Please convert the object you've passed in ({!r}) to "
+ "a string or bytes object in the near future to avoid "
+ "problems.".format(type(password)),
+ category=DeprecationWarning,
+ )
+ password = str(password)
+ # -- End Removal --
+
+ if isinstance(username, str):
+ username = username.encode('latin1')
+
+ if isinstance(password, str):
+ password = password.encode('latin1')
+
+ authstr = 'Basic ' + to_native_string(
+ b64encode(b':'.join((username, password))).strip()
+ )
+
+ return authstr
+
+
+class AuthBase(object):
+ """Base class that all auth implementations derive from"""
+
+ def __call__(self, r):
+ raise NotImplementedError('Auth hooks must be callable.')
+
+
+class HTTPBasicAuth(AuthBase):
+ """Attaches HTTP Basic Authentication to the given Request object."""
+
+ def __init__(self, username, password):
+ self.username = username
+ self.password = password
+
+ def __eq__(self, other):
+ return all([
+ self.username == getattr(other, 'username', None),
+ self.password == getattr(other, 'password', None)
+ ])
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __call__(self, r):
+ r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
+ return r
+
+
+class HTTPProxyAuth(HTTPBasicAuth):
+ """Attaches HTTP Proxy Authentication to a given Request object."""
+
+ def __call__(self, r):
+ r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password)
+ return r
+
+
+class HTTPDigestAuth(AuthBase):
+ """Attaches HTTP Digest Authentication to the given Request object."""
+
+ def __init__(self, username, password):
+ self.username = username
+ self.password = password
+ # Keep state in per-thread local storage
+ self._thread_local = threading.local()
+
+ def init_per_thread_state(self):
+ # Ensure state is initialized just once per-thread
+ if not hasattr(self._thread_local, 'init'):
+ self._thread_local.init = True
+ self._thread_local.last_nonce = ''
+ self._thread_local.nonce_count = 0
+ self._thread_local.chal = {}
+ self._thread_local.pos = None
+ self._thread_local.num_401_calls = None
+
+ def build_digest_header(self, method, url):
+ """
+ :rtype: str
+ """
+
+ realm = self._thread_local.chal['realm']
+ nonce = self._thread_local.chal['nonce']
+ qop = self._thread_local.chal.get('qop')
+ algorithm = self._thread_local.chal.get('algorithm')
+ opaque = self._thread_local.chal.get('opaque')
+ hash_utf8 = None
+
+ if algorithm is None:
+ _algorithm = 'MD5'
+ else:
+ _algorithm = algorithm.upper()
+ # lambdas assume digest modules are imported at the top level
+ if _algorithm == 'MD5' or _algorithm == 'MD5-SESS':
+ def md5_utf8(x):
+ if isinstance(x, str):
+ x = x.encode('utf-8')
+ return hashlib.md5(x).hexdigest()
+ hash_utf8 = md5_utf8
+ elif _algorithm == 'SHA':
+ def sha_utf8(x):
+ if isinstance(x, str):
+ x = x.encode('utf-8')
+ return hashlib.sha1(x).hexdigest()
+ hash_utf8 = sha_utf8
+ elif _algorithm == 'SHA-256':
+ def sha256_utf8(x):
+ if isinstance(x, str):
+ x = x.encode('utf-8')
+ return hashlib.sha256(x).hexdigest()
+ hash_utf8 = sha256_utf8
+ elif _algorithm == 'SHA-512':
+ def sha512_utf8(x):
+ if isinstance(x, str):
+ x = x.encode('utf-8')
+ return hashlib.sha512(x).hexdigest()
+ hash_utf8 = sha512_utf8
+
+ KD = lambda s, d: hash_utf8("%s:%s" % (s, d))
+
+ if hash_utf8 is None:
+ return None
+
+ # XXX not implemented yet
+ entdig = None
+ p_parsed = urlparse(url)
+ #: path is request-uri defined in RFC 2616 which should not be empty
+ path = p_parsed.path or "/"
+ if p_parsed.query:
+ path += '?' + p_parsed.query
+
+ A1 = '%s:%s:%s' % (self.username, realm, self.password)
+ A2 = '%s:%s' % (method, path)
+
+ HA1 = hash_utf8(A1)
+ HA2 = hash_utf8(A2)
+
+ if nonce == self._thread_local.last_nonce:
+ self._thread_local.nonce_count += 1
+ else:
+ self._thread_local.nonce_count = 1
+ ncvalue = '%08x' % self._thread_local.nonce_count
+ s = str(self._thread_local.nonce_count).encode('utf-8')
+ s += nonce.encode('utf-8')
+ s += time.ctime().encode('utf-8')
+ s += os.urandom(8)
+
+ cnonce = (hashlib.sha1(s).hexdigest()[:16])
+ if _algorithm == 'MD5-SESS':
+ HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce))
+
+ if not qop:
+ respdig = KD(HA1, "%s:%s" % (nonce, HA2))
+ elif qop == 'auth' or 'auth' in qop.split(','):
+ noncebit = "%s:%s:%s:%s:%s" % (
+ nonce, ncvalue, cnonce, 'auth', HA2
+ )
+ respdig = KD(HA1, noncebit)
+ else:
+ # XXX handle auth-int.
+ return None
+
+ self._thread_local.last_nonce = nonce
+
+ # XXX should the partial digests be encoded too?
+ base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
+ 'response="%s"' % (self.username, realm, nonce, path, respdig)
+ if opaque:
+ base += ', opaque="%s"' % opaque
+ if algorithm:
+ base += ', algorithm="%s"' % algorithm
+ if entdig:
+ base += ', digest="%s"' % entdig
+ if qop:
+ base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce)
+
+ return 'Digest %s' % (base)
+
+ def handle_redirect(self, r, **kwargs):
+ """Reset num_401_calls counter on redirects."""
+ if r.is_redirect:
+ self._thread_local.num_401_calls = 1
+
+ def handle_401(self, r, **kwargs):
+ """
+ Takes the given response and tries digest-auth, if needed.
+
+ :rtype: requests.Response
+ """
+
+ # If response is not 4xx, do not auth
+ # See https://github.com/psf/requests/issues/3772
+ if not 400 <= r.status_code < 500:
+ self._thread_local.num_401_calls = 1
+ return r
+
+ if self._thread_local.pos is not None:
+ # Rewind the file position indicator of the body to where
+ # it was to resend the request.
+ r.request.body.seek(self._thread_local.pos)
+ s_auth = r.headers.get('www-authenticate', '')
+
+ if 'digest' in s_auth.lower() and self._thread_local.num_401_calls < 2:
+
+ self._thread_local.num_401_calls += 1
+ pat = re.compile(r'digest ', flags=re.IGNORECASE)
+ self._thread_local.chal = parse_dict_header(pat.sub('', s_auth, count=1))
+
+ # Consume content and release the original connection
+ # to allow our new request to reuse the same one.
+ r.content
+ r.close()
+ prep = r.request.copy()
+ extract_cookies_to_jar(prep._cookies, r.request, r.raw)
+ prep.prepare_cookies(prep._cookies)
+
+ prep.headers['Authorization'] = self.build_digest_header(
+ prep.method, prep.url)
+ _r = r.connection.send(prep, **kwargs)
+ _r.history.append(r)
+ _r.request = prep
+
+ return _r
+
+ self._thread_local.num_401_calls = 1
+ return r
+
+ def __call__(self, r):
+ # Initialize per-thread state, if needed
+ self.init_per_thread_state()
+ # If we have a saved nonce, skip the 401
+ if self._thread_local.last_nonce:
+ r.headers['Authorization'] = self.build_digest_header(r.method, r.url)
+ try:
+ self._thread_local.pos = r.body.tell()
+ except AttributeError:
+ # In the case of HTTPDigestAuth being reused and the body of
+ # the previous request was a file-like object, pos has the
+ # file position of the previous body. Ensure it's set to
+ # None.
+ self._thread_local.pos = None
+ r.register_hook('response', self.handle_401)
+ r.register_hook('response', self.handle_redirect)
+ self._thread_local.num_401_calls = 1
+
+ return r
+
+ def __eq__(self, other):
+ return all([
+ self.username == getattr(other, 'username', None),
+ self.password == getattr(other, 'password', None)
+ ])
+
+ def __ne__(self, other):
+ return not self == other
diff --git a/third_party/python/requests/requests/certs.py b/third_party/python/requests/requests/certs.py
new file mode 100644
index 0000000000..d1a378d787
--- /dev/null
+++ b/third_party/python/requests/requests/certs.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+"""
+requests.certs
+~~~~~~~~~~~~~~
+
+This module returns the preferred default CA certificate bundle. There is
+only one — the one from the certifi package.
+
+If you are packaging Requests, e.g., for a Linux distribution or a managed
+environment, you can change the definition of where() to return a separately
+packaged CA bundle.
+"""
+from certifi import where
+
+if __name__ == '__main__':
+ print(where())
diff --git a/third_party/python/requests/requests/compat.py b/third_party/python/requests/requests/compat.py
new file mode 100644
index 0000000000..5de0769f50
--- /dev/null
+++ b/third_party/python/requests/requests/compat.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.compat
+~~~~~~~~~~~~~~~
+
+This module handles import compatibility issues between Python 2 and
+Python 3.
+"""
+
+import chardet
+
+import sys
+
+# -------
+# Pythons
+# -------
+
+# Syntax sugar.
+_ver = sys.version_info
+
+#: Python 2.x?
+is_py2 = (_ver[0] == 2)
+
+#: Python 3.x?
+is_py3 = (_ver[0] == 3)
+
+try:
+ import simplejson as json
+except ImportError:
+ import json
+
+# ---------
+# Specifics
+# ---------
+
+if is_py2:
+ from urllib import (
+ quote, unquote, quote_plus, unquote_plus, urlencode, getproxies,
+ proxy_bypass, proxy_bypass_environment, getproxies_environment)
+ from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
+ from urllib2 import parse_http_list
+ import cookielib
+ from Cookie import Morsel
+ from StringIO import StringIO
+ # Keep OrderedDict for backwards compatibility.
+ from collections import Callable, Mapping, MutableMapping, OrderedDict
+
+
+ builtin_str = str
+ bytes = str
+ str = unicode
+ basestring = basestring
+ numeric_types = (int, long, float)
+ integer_types = (int, long)
+
+elif is_py3:
+ from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
+ from urllib.request import parse_http_list, getproxies, proxy_bypass, proxy_bypass_environment, getproxies_environment
+ from http import cookiejar as cookielib
+ from http.cookies import Morsel
+ from io import StringIO
+ # Keep OrderedDict for backwards compatibility.
+ from collections import OrderedDict
+ from collections.abc import Callable, Mapping, MutableMapping
+
+ builtin_str = str
+ str = str
+ bytes = bytes
+ basestring = (str, bytes)
+ numeric_types = (int, float)
+ integer_types = (int,)
diff --git a/third_party/python/requests/requests/cookies.py b/third_party/python/requests/requests/cookies.py
new file mode 100644
index 0000000000..56fccd9c25
--- /dev/null
+++ b/third_party/python/requests/requests/cookies.py
@@ -0,0 +1,549 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.cookies
+~~~~~~~~~~~~~~~~
+
+Compatibility code to be able to use `cookielib.CookieJar` with requests.
+
+requests.utils imports from here, so be careful with imports.
+"""
+
+import copy
+import time
+import calendar
+
+from ._internal_utils import to_native_string
+from .compat import cookielib, urlparse, urlunparse, Morsel, MutableMapping
+
+try:
+ import threading
+except ImportError:
+ import dummy_threading as threading
+
+
+class MockRequest(object):
+ """Wraps a `requests.Request` to mimic a `urllib2.Request`.
+
+ The code in `cookielib.CookieJar` expects this interface in order to correctly
+ manage cookie policies, i.e., determine whether a cookie can be set, given the
+ domains of the request and the cookie.
+
+ The original request object is read-only. The client is responsible for collecting
+ the new headers via `get_new_headers()` and interpreting them appropriately. You
+ probably want `get_cookie_header`, defined below.
+ """
+
+ def __init__(self, request):
+ self._r = request
+ self._new_headers = {}
+ self.type = urlparse(self._r.url).scheme
+
+ def get_type(self):
+ return self.type
+
+ def get_host(self):
+ return urlparse(self._r.url).netloc
+
+ def get_origin_req_host(self):
+ return self.get_host()
+
+ def get_full_url(self):
+ # Only return the response's URL if the user hadn't set the Host
+ # header
+ if not self._r.headers.get('Host'):
+ return self._r.url
+ # If they did set it, retrieve it and reconstruct the expected domain
+ host = to_native_string(self._r.headers['Host'], encoding='utf-8')
+ parsed = urlparse(self._r.url)
+ # Reconstruct the URL as we expect it
+ return urlunparse([
+ parsed.scheme, host, parsed.path, parsed.params, parsed.query,
+ parsed.fragment
+ ])
+
+ def is_unverifiable(self):
+ return True
+
+ def has_header(self, name):
+ return name in self._r.headers or name in self._new_headers
+
+ def get_header(self, name, default=None):
+ return self._r.headers.get(name, self._new_headers.get(name, default))
+
+ def add_header(self, key, val):
+ """cookielib has no legitimate use for this method; add it back if you find one."""
+ raise NotImplementedError("Cookie headers should be added with add_unredirected_header()")
+
+ def add_unredirected_header(self, name, value):
+ self._new_headers[name] = value
+
+ def get_new_headers(self):
+ return self._new_headers
+
+ @property
+ def unverifiable(self):
+ return self.is_unverifiable()
+
+ @property
+ def origin_req_host(self):
+ return self.get_origin_req_host()
+
+ @property
+ def host(self):
+ return self.get_host()
+
+
+class MockResponse(object):
+ """Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
+
+ ...what? Basically, expose the parsed HTTP headers from the server response
+ the way `cookielib` expects to see them.
+ """
+
+ def __init__(self, headers):
+ """Make a MockResponse for `cookielib` to read.
+
+ :param headers: a httplib.HTTPMessage or analogous carrying the headers
+ """
+ self._headers = headers
+
+ def info(self):
+ return self._headers
+
+ def getheaders(self, name):
+ self._headers.getheaders(name)
+
+
+def extract_cookies_to_jar(jar, request, response):
+ """Extract the cookies from the response into a CookieJar.
+
+ :param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
+ :param request: our own requests.Request object
+ :param response: urllib3.HTTPResponse object
+ """
+ if not (hasattr(response, '_original_response') and
+ response._original_response):
+ return
+ # the _original_response field is the wrapped httplib.HTTPResponse object,
+ req = MockRequest(request)
+ # pull out the HTTPMessage with the headers and put it in the mock:
+ res = MockResponse(response._original_response.msg)
+ jar.extract_cookies(res, req)
+
+
+def get_cookie_header(jar, request):
+ """
+ Produce an appropriate Cookie header string to be sent with `request`, or None.
+
+ :rtype: str
+ """
+ r = MockRequest(request)
+ jar.add_cookie_header(r)
+ return r.get_new_headers().get('Cookie')
+
+
+def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
+ """Unsets a cookie by name, by default over all domains and paths.
+
+ Wraps CookieJar.clear(), is O(n).
+ """
+ clearables = []
+ for cookie in cookiejar:
+ if cookie.name != name:
+ continue
+ if domain is not None and domain != cookie.domain:
+ continue
+ if path is not None and path != cookie.path:
+ continue
+ clearables.append((cookie.domain, cookie.path, cookie.name))
+
+ for domain, path, name in clearables:
+ cookiejar.clear(domain, path, name)
+
+
+class CookieConflictError(RuntimeError):
+ """There are two cookies that meet the criteria specified in the cookie jar.
+ Use .get and .set and include domain and path args in order to be more specific.
+ """
+
+
+class RequestsCookieJar(cookielib.CookieJar, MutableMapping):
+ """Compatibility class; is a cookielib.CookieJar, but exposes a dict
+ interface.
+
+ This is the CookieJar we create by default for requests and sessions that
+ don't specify one, since some clients may expect response.cookies and
+ session.cookies to support dict operations.
+
+ Requests does not use the dict interface internally; it's just for
+ compatibility with external client code. All requests code should work
+ out of the box with externally provided instances of ``CookieJar``, e.g.
+ ``LWPCookieJar`` and ``FileCookieJar``.
+
+ Unlike a regular CookieJar, this class is pickleable.
+
+ .. warning:: dictionary operations that are normally O(1) may be O(n).
+ """
+
+ def get(self, name, default=None, domain=None, path=None):
+ """Dict-like get() that also supports optional domain and path args in
+ order to resolve naming collisions from using one cookie jar over
+ multiple domains.
+
+ .. warning:: operation is O(n), not O(1).
+ """
+ try:
+ return self._find_no_duplicates(name, domain, path)
+ except KeyError:
+ return default
+
+ def set(self, name, value, **kwargs):
+ """Dict-like set() that also supports optional domain and path args in
+ order to resolve naming collisions from using one cookie jar over
+ multiple domains.
+ """
+ # support client code that unsets cookies by assignment of a None value:
+ if value is None:
+ remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path'))
+ return
+
+ if isinstance(value, Morsel):
+ c = morsel_to_cookie(value)
+ else:
+ c = create_cookie(name, value, **kwargs)
+ self.set_cookie(c)
+ return c
+
+ def iterkeys(self):
+ """Dict-like iterkeys() that returns an iterator of names of cookies
+ from the jar.
+
+ .. seealso:: itervalues() and iteritems().
+ """
+ for cookie in iter(self):
+ yield cookie.name
+
+ def keys(self):
+ """Dict-like keys() that returns a list of names of cookies from the
+ jar.
+
+ .. seealso:: values() and items().
+ """
+ return list(self.iterkeys())
+
+ def itervalues(self):
+ """Dict-like itervalues() that returns an iterator of values of cookies
+ from the jar.
+
+ .. seealso:: iterkeys() and iteritems().
+ """
+ for cookie in iter(self):
+ yield cookie.value
+
+ def values(self):
+ """Dict-like values() that returns a list of values of cookies from the
+ jar.
+
+ .. seealso:: keys() and items().
+ """
+ return list(self.itervalues())
+
+ def iteritems(self):
+ """Dict-like iteritems() that returns an iterator of name-value tuples
+ from the jar.
+
+ .. seealso:: iterkeys() and itervalues().
+ """
+ for cookie in iter(self):
+ yield cookie.name, cookie.value
+
+ def items(self):
+ """Dict-like items() that returns a list of name-value tuples from the
+ jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a
+ vanilla python dict of key value pairs.
+
+ .. seealso:: keys() and values().
+ """
+ return list(self.iteritems())
+
+ def list_domains(self):
+ """Utility method to list all the domains in the jar."""
+ domains = []
+ for cookie in iter(self):
+ if cookie.domain not in domains:
+ domains.append(cookie.domain)
+ return domains
+
+ def list_paths(self):
+ """Utility method to list all the paths in the jar."""
+ paths = []
+ for cookie in iter(self):
+ if cookie.path not in paths:
+ paths.append(cookie.path)
+ return paths
+
+ def multiple_domains(self):
+ """Returns True if there are multiple domains in the jar.
+ Returns False otherwise.
+
+ :rtype: bool
+ """
+ domains = []
+ for cookie in iter(self):
+ if cookie.domain is not None and cookie.domain in domains:
+ return True
+ domains.append(cookie.domain)
+ return False # there is only one domain in jar
+
+ def get_dict(self, domain=None, path=None):
+ """Takes as an argument an optional domain and path and returns a plain
+ old Python dict of name-value pairs of cookies that meet the
+ requirements.
+
+ :rtype: dict
+ """
+ dictionary = {}
+ for cookie in iter(self):
+ if (
+ (domain is None or cookie.domain == domain) and
+ (path is None or cookie.path == path)
+ ):
+ dictionary[cookie.name] = cookie.value
+ return dictionary
+
+ def __contains__(self, name):
+ try:
+ return super(RequestsCookieJar, self).__contains__(name)
+ except CookieConflictError:
+ return True
+
+ def __getitem__(self, name):
+ """Dict-like __getitem__() for compatibility with client code. Throws
+ exception if there are more than one cookie with name. In that case,
+ use the more explicit get() method instead.
+
+ .. warning:: operation is O(n), not O(1).
+ """
+ return self._find_no_duplicates(name)
+
+ def __setitem__(self, name, value):
+ """Dict-like __setitem__ for compatibility with client code. Throws
+ exception if there is already a cookie of that name in the jar. In that
+ case, use the more explicit set() method instead.
+ """
+ self.set(name, value)
+
+ def __delitem__(self, name):
+ """Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
+ ``remove_cookie_by_name()``.
+ """
+ remove_cookie_by_name(self, name)
+
+ def set_cookie(self, cookie, *args, **kwargs):
+ if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'):
+ cookie.value = cookie.value.replace('\\"', '')
+ return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs)
+
+ def update(self, other):
+ """Updates this jar with cookies from another CookieJar or dict-like"""
+ if isinstance(other, cookielib.CookieJar):
+ for cookie in other:
+ self.set_cookie(copy.copy(cookie))
+ else:
+ super(RequestsCookieJar, self).update(other)
+
+ def _find(self, name, domain=None, path=None):
+ """Requests uses this method internally to get cookie values.
+
+ If there are conflicting cookies, _find arbitrarily chooses one.
+ See _find_no_duplicates if you want an exception thrown if there are
+ conflicting cookies.
+
+ :param name: a string containing name of cookie
+ :param domain: (optional) string containing domain of cookie
+ :param path: (optional) string containing path of cookie
+ :return: cookie.value
+ """
+ for cookie in iter(self):
+ if cookie.name == name:
+ if domain is None or cookie.domain == domain:
+ if path is None or cookie.path == path:
+ return cookie.value
+
+ raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
+
+ def _find_no_duplicates(self, name, domain=None, path=None):
+ """Both ``__get_item__`` and ``get`` call this function: it's never
+ used elsewhere in Requests.
+
+ :param name: a string containing name of cookie
+ :param domain: (optional) string containing domain of cookie
+ :param path: (optional) string containing path of cookie
+ :raises KeyError: if cookie is not found
+ :raises CookieConflictError: if there are multiple cookies
+ that match name and optionally domain and path
+ :return: cookie.value
+ """
+ toReturn = None
+ for cookie in iter(self):
+ if cookie.name == name:
+ if domain is None or cookie.domain == domain:
+ if path is None or cookie.path == path:
+ if toReturn is not None: # if there are multiple cookies that meet passed in criteria
+ raise CookieConflictError('There are multiple cookies with name, %r' % (name))
+ toReturn = cookie.value # we will eventually return this as long as no cookie conflict
+
+ if toReturn:
+ return toReturn
+ raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
+
+ def __getstate__(self):
+ """Unlike a normal CookieJar, this class is pickleable."""
+ state = self.__dict__.copy()
+ # remove the unpickleable RLock object
+ state.pop('_cookies_lock')
+ return state
+
+ def __setstate__(self, state):
+ """Unlike a normal CookieJar, this class is pickleable."""
+ self.__dict__.update(state)
+ if '_cookies_lock' not in self.__dict__:
+ self._cookies_lock = threading.RLock()
+
+ def copy(self):
+ """Return a copy of this RequestsCookieJar."""
+ new_cj = RequestsCookieJar()
+ new_cj.set_policy(self.get_policy())
+ new_cj.update(self)
+ return new_cj
+
+ def get_policy(self):
+ """Return the CookiePolicy instance used."""
+ return self._policy
+
+
+def _copy_cookie_jar(jar):
+ if jar is None:
+ return None
+
+ if hasattr(jar, 'copy'):
+ # We're dealing with an instance of RequestsCookieJar
+ return jar.copy()
+ # We're dealing with a generic CookieJar instance
+ new_jar = copy.copy(jar)
+ new_jar.clear()
+ for cookie in jar:
+ new_jar.set_cookie(copy.copy(cookie))
+ return new_jar
+
+
+def create_cookie(name, value, **kwargs):
+ """Make a cookie from underspecified parameters.
+
+ By default, the pair of `name` and `value` will be set for the domain ''
+ and sent on every request (this is sometimes called a "supercookie").
+ """
+ result = {
+ 'version': 0,
+ 'name': name,
+ 'value': value,
+ 'port': None,
+ 'domain': '',
+ 'path': '/',
+ 'secure': False,
+ 'expires': None,
+ 'discard': True,
+ 'comment': None,
+ 'comment_url': None,
+ 'rest': {'HttpOnly': None},
+ 'rfc2109': False,
+ }
+
+ badargs = set(kwargs) - set(result)
+ if badargs:
+ err = 'create_cookie() got unexpected keyword arguments: %s'
+ raise TypeError(err % list(badargs))
+
+ result.update(kwargs)
+ result['port_specified'] = bool(result['port'])
+ result['domain_specified'] = bool(result['domain'])
+ result['domain_initial_dot'] = result['domain'].startswith('.')
+ result['path_specified'] = bool(result['path'])
+
+ return cookielib.Cookie(**result)
+
+
+def morsel_to_cookie(morsel):
+ """Convert a Morsel object into a Cookie containing the one k/v pair."""
+
+ expires = None
+ if morsel['max-age']:
+ try:
+ expires = int(time.time() + int(morsel['max-age']))
+ except ValueError:
+ raise TypeError('max-age: %s must be integer' % morsel['max-age'])
+ elif morsel['expires']:
+ time_template = '%a, %d-%b-%Y %H:%M:%S GMT'
+ expires = calendar.timegm(
+ time.strptime(morsel['expires'], time_template)
+ )
+ return create_cookie(
+ comment=morsel['comment'],
+ comment_url=bool(morsel['comment']),
+ discard=False,
+ domain=morsel['domain'],
+ expires=expires,
+ name=morsel.key,
+ path=morsel['path'],
+ port=None,
+ rest={'HttpOnly': morsel['httponly']},
+ rfc2109=False,
+ secure=bool(morsel['secure']),
+ value=morsel.value,
+ version=morsel['version'] or 0,
+ )
+
+
+def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
+ """Returns a CookieJar from a key/value dictionary.
+
+ :param cookie_dict: Dict of key/values to insert into CookieJar.
+ :param cookiejar: (optional) A cookiejar to add the cookies to.
+ :param overwrite: (optional) If False, will not replace cookies
+ already in the jar with new ones.
+ :rtype: CookieJar
+ """
+ if cookiejar is None:
+ cookiejar = RequestsCookieJar()
+
+ if cookie_dict is not None:
+ names_from_jar = [cookie.name for cookie in cookiejar]
+ for name in cookie_dict:
+ if overwrite or (name not in names_from_jar):
+ cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
+
+ return cookiejar
+
+
+def merge_cookies(cookiejar, cookies):
+ """Add cookies to cookiejar and returns a merged CookieJar.
+
+ :param cookiejar: CookieJar object to add the cookies to.
+ :param cookies: Dictionary or CookieJar object to be added.
+ :rtype: CookieJar
+ """
+ if not isinstance(cookiejar, cookielib.CookieJar):
+ raise ValueError('You can only merge into CookieJar')
+
+ if isinstance(cookies, dict):
+ cookiejar = cookiejar_from_dict(
+ cookies, cookiejar=cookiejar, overwrite=False)
+ elif isinstance(cookies, cookielib.CookieJar):
+ try:
+ cookiejar.update(cookies)
+ except AttributeError:
+ for cookie_in_jar in cookies:
+ cookiejar.set_cookie(cookie_in_jar)
+
+ return cookiejar
diff --git a/third_party/python/requests/requests/exceptions.py b/third_party/python/requests/requests/exceptions.py
new file mode 100644
index 0000000000..0e9c820c83
--- /dev/null
+++ b/third_party/python/requests/requests/exceptions.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.exceptions
+~~~~~~~~~~~~~~~~~~~
+
+This module contains the set of Requests' exceptions.
+"""
+from urllib3.exceptions import HTTPError as BaseHTTPError
+
+
+class RequestException(IOError):
+ """There was an ambiguous exception that occurred while handling your
+ request.
+ """
+
+ def __init__(self, *args, **kwargs):
+ """Initialize RequestException with `request` and `response` objects."""
+ response = kwargs.pop('response', None)
+ self.response = response
+ self.request = kwargs.pop('request', None)
+ if (response is not None and not self.request and
+ hasattr(response, 'request')):
+ self.request = self.response.request
+ super(RequestException, self).__init__(*args, **kwargs)
+
+
+class HTTPError(RequestException):
+ """An HTTP error occurred."""
+
+
+class ConnectionError(RequestException):
+ """A Connection error occurred."""
+
+
+class ProxyError(ConnectionError):
+ """A proxy error occurred."""
+
+
+class SSLError(ConnectionError):
+ """An SSL error occurred."""
+
+
+class Timeout(RequestException):
+ """The request timed out.
+
+ Catching this error will catch both
+ :exc:`~requests.exceptions.ConnectTimeout` and
+ :exc:`~requests.exceptions.ReadTimeout` errors.
+ """
+
+
+class ConnectTimeout(ConnectionError, Timeout):
+ """The request timed out while trying to connect to the remote server.
+
+ Requests that produced this error are safe to retry.
+ """
+
+
+class ReadTimeout(Timeout):
+ """The server did not send any data in the allotted amount of time."""
+
+
+class URLRequired(RequestException):
+ """A valid URL is required to make a request."""
+
+
+class TooManyRedirects(RequestException):
+ """Too many redirects."""
+
+
+class MissingSchema(RequestException, ValueError):
+ """The URL schema (e.g. http or https) is missing."""
+
+
+class InvalidSchema(RequestException, ValueError):
+ """See defaults.py for valid schemas."""
+
+
+class InvalidURL(RequestException, ValueError):
+ """The URL provided was somehow invalid."""
+
+
+class InvalidHeader(RequestException, ValueError):
+ """The header value provided was somehow invalid."""
+
+
+class InvalidProxyURL(InvalidURL):
+ """The proxy URL provided is invalid."""
+
+
+class ChunkedEncodingError(RequestException):
+ """The server declared chunked encoding but sent an invalid chunk."""
+
+
+class ContentDecodingError(RequestException, BaseHTTPError):
+ """Failed to decode response content."""
+
+
+class StreamConsumedError(RequestException, TypeError):
+ """The content for this response was already consumed."""
+
+
+class RetryError(RequestException):
+ """Custom retries logic failed"""
+
+
+class UnrewindableBodyError(RequestException):
+ """Requests encountered an error when trying to rewind a body."""
+
+# Warnings
+
+
+class RequestsWarning(Warning):
+ """Base warning for Requests."""
+
+
+class FileModeWarning(RequestsWarning, DeprecationWarning):
+ """A file was opened in text mode, but Requests determined its binary length."""
+
+
+class RequestsDependencyWarning(RequestsWarning):
+ """An imported dependency doesn't match the expected version range."""
diff --git a/third_party/python/requests/requests/help.py b/third_party/python/requests/requests/help.py
new file mode 100644
index 0000000000..e53d35ef6d
--- /dev/null
+++ b/third_party/python/requests/requests/help.py
@@ -0,0 +1,119 @@
+"""Module containing bug report helper(s)."""
+from __future__ import print_function
+
+import json
+import platform
+import sys
+import ssl
+
+import idna
+import urllib3
+import chardet
+
+from . import __version__ as requests_version
+
+try:
+ from urllib3.contrib import pyopenssl
+except ImportError:
+ pyopenssl = None
+ OpenSSL = None
+ cryptography = None
+else:
+ import OpenSSL
+ import cryptography
+
+
+def _implementation():
+ """Return a dict with the Python implementation and version.
+
+ Provide both the name and the version of the Python implementation
+ currently running. For example, on CPython 2.7.5 it will return
+ {'name': 'CPython', 'version': '2.7.5'}.
+
+ This function works best on CPython and PyPy: in particular, it probably
+ doesn't work for Jython or IronPython. Future investigation should be done
+ to work out the correct shape of the code for those platforms.
+ """
+ implementation = platform.python_implementation()
+
+ if implementation == 'CPython':
+ implementation_version = platform.python_version()
+ elif implementation == 'PyPy':
+ implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
+ sys.pypy_version_info.minor,
+ sys.pypy_version_info.micro)
+ if sys.pypy_version_info.releaselevel != 'final':
+ implementation_version = ''.join([
+ implementation_version, sys.pypy_version_info.releaselevel
+ ])
+ elif implementation == 'Jython':
+ implementation_version = platform.python_version() # Complete Guess
+ elif implementation == 'IronPython':
+ implementation_version = platform.python_version() # Complete Guess
+ else:
+ implementation_version = 'Unknown'
+
+ return {'name': implementation, 'version': implementation_version}
+
+
+def info():
+ """Generate information for a bug report."""
+ try:
+ platform_info = {
+ 'system': platform.system(),
+ 'release': platform.release(),
+ }
+ except IOError:
+ platform_info = {
+ 'system': 'Unknown',
+ 'release': 'Unknown',
+ }
+
+ implementation_info = _implementation()
+ urllib3_info = {'version': urllib3.__version__}
+ chardet_info = {'version': chardet.__version__}
+
+ pyopenssl_info = {
+ 'version': None,
+ 'openssl_version': '',
+ }
+ if OpenSSL:
+ pyopenssl_info = {
+ 'version': OpenSSL.__version__,
+ 'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER,
+ }
+ cryptography_info = {
+ 'version': getattr(cryptography, '__version__', ''),
+ }
+ idna_info = {
+ 'version': getattr(idna, '__version__', ''),
+ }
+
+ system_ssl = ssl.OPENSSL_VERSION_NUMBER
+ system_ssl_info = {
+ 'version': '%x' % system_ssl if system_ssl is not None else ''
+ }
+
+ return {
+ 'platform': platform_info,
+ 'implementation': implementation_info,
+ 'system_ssl': system_ssl_info,
+ 'using_pyopenssl': pyopenssl is not None,
+ 'pyOpenSSL': pyopenssl_info,
+ 'urllib3': urllib3_info,
+ 'chardet': chardet_info,
+ 'cryptography': cryptography_info,
+ 'idna': idna_info,
+ 'requests': {
+ 'version': requests_version,
+ },
+ }
+
+
+def main():
+ """Pretty-print the bug information as JSON."""
+ print(json.dumps(info(), sort_keys=True, indent=2))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/third_party/python/requests/requests/hooks.py b/third_party/python/requests/requests/hooks.py
new file mode 100644
index 0000000000..7a51f212c8
--- /dev/null
+++ b/third_party/python/requests/requests/hooks.py
@@ -0,0 +1,34 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.hooks
+~~~~~~~~~~~~~~
+
+This module provides the capabilities for the Requests hooks system.
+
+Available hooks:
+
+``response``:
+ The response generated from a Request.
+"""
+HOOKS = ['response']
+
+
+def default_hooks():
+ return {event: [] for event in HOOKS}
+
+# TODO: response is the only one
+
+
+def dispatch_hook(key, hooks, hook_data, **kwargs):
+ """Dispatches a hook dictionary on a given piece of data."""
+ hooks = hooks or {}
+ hooks = hooks.get(key)
+ if hooks:
+ if hasattr(hooks, '__call__'):
+ hooks = [hooks]
+ for hook in hooks:
+ _hook_data = hook(hook_data, **kwargs)
+ if _hook_data is not None:
+ hook_data = _hook_data
+ return hook_data
diff --git a/third_party/python/requests/requests/models.py b/third_party/python/requests/requests/models.py
new file mode 100644
index 0000000000..ec2edc20b5
--- /dev/null
+++ b/third_party/python/requests/requests/models.py
@@ -0,0 +1,956 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.models
+~~~~~~~~~~~~~~~
+
+This module contains the primary objects that power Requests.
+"""
+
+import datetime
+import sys
+
+# Import encoding now, to avoid implicit import later.
+# Implicit import within threads may cause LookupError when standard library is in a ZIP,
+# such as in Embedded Python. See https://github.com/psf/requests/issues/3578.
+import encodings.idna
+
+from urllib3.fields import RequestField
+from urllib3.filepost import encode_multipart_formdata
+from urllib3.util import parse_url
+from urllib3.exceptions import (
+ DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
+
+from io import UnsupportedOperation
+from .hooks import default_hooks
+from .structures import CaseInsensitiveDict
+
+from .auth import HTTPBasicAuth
+from .cookies import cookiejar_from_dict, get_cookie_header, _copy_cookie_jar
+from .exceptions import (
+ HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
+ ContentDecodingError, ConnectionError, StreamConsumedError)
+from ._internal_utils import to_native_string, unicode_is_ascii
+from .utils import (
+ guess_filename, get_auth_from_url, requote_uri,
+ stream_decode_response_unicode, to_key_val_list, parse_header_links,
+ iter_slices, guess_json_utf, super_len, check_header_validity)
+from .compat import (
+ Callable, Mapping,
+ cookielib, urlunparse, urlsplit, urlencode, str, bytes,
+ is_py2, chardet, builtin_str, basestring)
+from .compat import json as complexjson
+from .status_codes import codes
+
+#: The set of HTTP status codes that indicate an automatically
+#: processable redirect.
+REDIRECT_STATI = (
+ codes.moved, # 301
+ codes.found, # 302
+ codes.other, # 303
+ codes.temporary_redirect, # 307
+ codes.permanent_redirect, # 308
+)
+
+DEFAULT_REDIRECT_LIMIT = 30
+CONTENT_CHUNK_SIZE = 10 * 1024
+ITER_CHUNK_SIZE = 512
+
+
+class RequestEncodingMixin(object):
+ @property
+ def path_url(self):
+ """Build the path URL to use."""
+
+ url = []
+
+ p = urlsplit(self.url)
+
+ path = p.path
+ if not path:
+ path = '/'
+
+ url.append(path)
+
+ query = p.query
+ if query:
+ url.append('?')
+ url.append(query)
+
+ return ''.join(url)
+
+ @staticmethod
+ def _encode_params(data):
+ """Encode parameters in a piece of data.
+
+ Will successfully encode parameters when passed as a dict or a list of
+ 2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
+ if parameters are supplied as a dict.
+ """
+
+ if isinstance(data, (str, bytes)):
+ return data
+ elif hasattr(data, 'read'):
+ return data
+ elif hasattr(data, '__iter__'):
+ result = []
+ for k, vs in to_key_val_list(data):
+ if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
+ vs = [vs]
+ for v in vs:
+ if v is not None:
+ result.append(
+ (k.encode('utf-8') if isinstance(k, str) else k,
+ v.encode('utf-8') if isinstance(v, str) else v))
+ return urlencode(result, doseq=True)
+ else:
+ return data
+
+ @staticmethod
+ def _encode_files(files, data):
+ """Build the body for a multipart/form-data request.
+
+ Will successfully encode files when passed as a dict or a list of
+ tuples. Order is retained if data is a list of tuples but arbitrary
+ if parameters are supplied as a dict.
+ The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
+ or 4-tuples (filename, fileobj, contentype, custom_headers).
+ """
+ if (not files):
+ raise ValueError("Files must be provided.")
+ elif isinstance(data, basestring):
+ raise ValueError("Data must not be a string.")
+
+ new_fields = []
+ fields = to_key_val_list(data or {})
+ files = to_key_val_list(files or {})
+
+ for field, val in fields:
+ if isinstance(val, basestring) or not hasattr(val, '__iter__'):
+ val = [val]
+ for v in val:
+ if v is not None:
+ # Don't call str() on bytestrings: in Py3 it all goes wrong.
+ if not isinstance(v, bytes):
+ v = str(v)
+
+ new_fields.append(
+ (field.decode('utf-8') if isinstance(field, bytes) else field,
+ v.encode('utf-8') if isinstance(v, str) else v))
+
+ for (k, v) in files:
+ # support for explicit filename
+ ft = None
+ fh = None
+ if isinstance(v, (tuple, list)):
+ if len(v) == 2:
+ fn, fp = v
+ elif len(v) == 3:
+ fn, fp, ft = v
+ else:
+ fn, fp, ft, fh = v
+ else:
+ fn = guess_filename(v) or k
+ fp = v
+
+ if isinstance(fp, (str, bytes, bytearray)):
+ fdata = fp
+ elif hasattr(fp, 'read'):
+ fdata = fp.read()
+ elif fp is None:
+ continue
+ else:
+ fdata = fp
+
+ rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
+ rf.make_multipart(content_type=ft)
+ new_fields.append(rf)
+
+ body, content_type = encode_multipart_formdata(new_fields)
+
+ return body, content_type
+
+
+class RequestHooksMixin(object):
+ def register_hook(self, event, hook):
+ """Properly register a hook."""
+
+ if event not in self.hooks:
+ raise ValueError('Unsupported event specified, with event name "%s"' % (event))
+
+ if isinstance(hook, Callable):
+ self.hooks[event].append(hook)
+ elif hasattr(hook, '__iter__'):
+ self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
+
+ def deregister_hook(self, event, hook):
+ """Deregister a previously registered hook.
+ Returns True if the hook existed, False if not.
+ """
+
+ try:
+ self.hooks[event].remove(hook)
+ return True
+ except ValueError:
+ return False
+
+
+class Request(RequestHooksMixin):
+ """A user-created :class:`Request <Request>` object.
+
+ Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
+
+ :param method: HTTP method to use.
+ :param url: URL to send.
+ :param headers: dictionary of headers to send.
+ :param files: dictionary of {filename: fileobject} files to multipart upload.
+ :param data: the body to attach to the request. If a dictionary or
+ list of tuples ``[(key, value)]`` is provided, form-encoding will
+ take place.
+ :param json: json for the body to attach to the request (if files or data is not specified).
+ :param params: URL parameters to append to the URL. If a dictionary or
+ list of tuples ``[(key, value)]`` is provided, form-encoding will
+ take place.
+ :param auth: Auth handler or (user, pass) tuple.
+ :param cookies: dictionary or CookieJar of cookies to attach to this request.
+ :param hooks: dictionary of callback hooks, for internal usage.
+
+ Usage::
+
+ >>> import requests
+ >>> req = requests.Request('GET', 'https://httpbin.org/get')
+ >>> req.prepare()
+ <PreparedRequest [GET]>
+ """
+
+ def __init__(self,
+ method=None, url=None, headers=None, files=None, data=None,
+ params=None, auth=None, cookies=None, hooks=None, json=None):
+
+ # Default empty dicts for dict params.
+ data = [] if data is None else data
+ files = [] if files is None else files
+ headers = {} if headers is None else headers
+ params = {} if params is None else params
+ hooks = {} if hooks is None else hooks
+
+ self.hooks = default_hooks()
+ for (k, v) in list(hooks.items()):
+ self.register_hook(event=k, hook=v)
+
+ self.method = method
+ self.url = url
+ self.headers = headers
+ self.files = files
+ self.data = data
+ self.json = json
+ self.params = params
+ self.auth = auth
+ self.cookies = cookies
+
+ def __repr__(self):
+ return '<Request [%s]>' % (self.method)
+
+ def prepare(self):
+ """Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
+ p = PreparedRequest()
+ p.prepare(
+ method=self.method,
+ url=self.url,
+ headers=self.headers,
+ files=self.files,
+ data=self.data,
+ json=self.json,
+ params=self.params,
+ auth=self.auth,
+ cookies=self.cookies,
+ hooks=self.hooks,
+ )
+ return p
+
+
+class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
+ """The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
+ containing the exact bytes that will be sent to the server.
+
+ Instances are generated from a :class:`Request <Request>` object, and
+ should not be instantiated manually; doing so may produce undesirable
+ effects.
+
+ Usage::
+
+ >>> import requests
+ >>> req = requests.Request('GET', 'https://httpbin.org/get')
+ >>> r = req.prepare()
+ >>> r
+ <PreparedRequest [GET]>
+
+ >>> s = requests.Session()
+ >>> s.send(r)
+ <Response [200]>
+ """
+
+ def __init__(self):
+ #: HTTP verb to send to the server.
+ self.method = None
+ #: HTTP URL to send the request to.
+ self.url = None
+ #: dictionary of HTTP headers.
+ self.headers = None
+ # The `CookieJar` used to create the Cookie header will be stored here
+ # after prepare_cookies is called
+ self._cookies = None
+ #: request body to send to the server.
+ self.body = None
+ #: dictionary of callback hooks, for internal usage.
+ self.hooks = default_hooks()
+ #: integer denoting starting position of a readable file-like body.
+ self._body_position = None
+
+ def prepare(self,
+ method=None, url=None, headers=None, files=None, data=None,
+ params=None, auth=None, cookies=None, hooks=None, json=None):
+ """Prepares the entire request with the given parameters."""
+
+ self.prepare_method(method)
+ self.prepare_url(url, params)
+ self.prepare_headers(headers)
+ self.prepare_cookies(cookies)
+ self.prepare_body(data, files, json)
+ self.prepare_auth(auth, url)
+
+ # Note that prepare_auth must be last to enable authentication schemes
+ # such as OAuth to work on a fully prepared request.
+
+ # This MUST go after prepare_auth. Authenticators could add a hook
+ self.prepare_hooks(hooks)
+
+ def __repr__(self):
+ return '<PreparedRequest [%s]>' % (self.method)
+
+ def copy(self):
+ p = PreparedRequest()
+ p.method = self.method
+ p.url = self.url
+ p.headers = self.headers.copy() if self.headers is not None else None
+ p._cookies = _copy_cookie_jar(self._cookies)
+ p.body = self.body
+ p.hooks = self.hooks
+ p._body_position = self._body_position
+ return p
+
+ def prepare_method(self, method):
+ """Prepares the given HTTP method."""
+ self.method = method
+ if self.method is not None:
+ self.method = to_native_string(self.method.upper())
+
+ @staticmethod
+ def _get_idna_encoded_host(host):
+ import idna
+
+ try:
+ host = idna.encode(host, uts46=True).decode('utf-8')
+ except idna.IDNAError:
+ raise UnicodeError
+ return host
+
+ def prepare_url(self, url, params):
+ """Prepares the given HTTP URL."""
+ #: Accept objects that have string representations.
+ #: We're unable to blindly call unicode/str functions
+ #: as this will include the bytestring indicator (b'')
+ #: on python 3.x.
+ #: https://github.com/psf/requests/pull/2238
+ if isinstance(url, bytes):
+ url = url.decode('utf8')
+ else:
+ url = unicode(url) if is_py2 else str(url)
+
+ # Remove leading whitespaces from url
+ url = url.lstrip()
+
+ # Don't do any URL preparation for non-HTTP schemes like `mailto`,
+ # `data` etc to work around exceptions from `url_parse`, which
+ # handles RFC 3986 only.
+ if ':' in url and not url.lower().startswith('http'):
+ self.url = url
+ return
+
+ # Support for unicode domain names and paths.
+ try:
+ scheme, auth, host, port, path, query, fragment = parse_url(url)
+ except LocationParseError as e:
+ raise InvalidURL(*e.args)
+
+ if not scheme:
+ error = ("Invalid URL {0!r}: No schema supplied. Perhaps you meant http://{0}?")
+ error = error.format(to_native_string(url, 'utf8'))
+
+ raise MissingSchema(error)
+
+ if not host:
+ raise InvalidURL("Invalid URL %r: No host supplied" % url)
+
+ # In general, we want to try IDNA encoding the hostname if the string contains
+ # non-ASCII characters. This allows users to automatically get the correct IDNA
+ # behaviour. For strings containing only ASCII characters, we need to also verify
+ # it doesn't start with a wildcard (*), before allowing the unencoded hostname.
+ if not unicode_is_ascii(host):
+ try:
+ host = self._get_idna_encoded_host(host)
+ except UnicodeError:
+ raise InvalidURL('URL has an invalid label.')
+ elif host.startswith(u'*'):
+ raise InvalidURL('URL has an invalid label.')
+
+ # Carefully reconstruct the network location
+ netloc = auth or ''
+ if netloc:
+ netloc += '@'
+ netloc += host
+ if port:
+ netloc += ':' + str(port)
+
+ # Bare domains aren't valid URLs.
+ if not path:
+ path = '/'
+
+ if is_py2:
+ if isinstance(scheme, str):
+ scheme = scheme.encode('utf-8')
+ if isinstance(netloc, str):
+ netloc = netloc.encode('utf-8')
+ if isinstance(path, str):
+ path = path.encode('utf-8')
+ if isinstance(query, str):
+ query = query.encode('utf-8')
+ if isinstance(fragment, str):
+ fragment = fragment.encode('utf-8')
+
+ if isinstance(params, (str, bytes)):
+ params = to_native_string(params)
+
+ enc_params = self._encode_params(params)
+ if enc_params:
+ if query:
+ query = '%s&%s' % (query, enc_params)
+ else:
+ query = enc_params
+
+ url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
+ self.url = url
+
+ def prepare_headers(self, headers):
+ """Prepares the given HTTP headers."""
+
+ self.headers = CaseInsensitiveDict()
+ if headers:
+ for header in headers.items():
+ # Raise exception on invalid header value.
+ check_header_validity(header)
+ name, value = header
+ self.headers[to_native_string(name)] = value
+
+ def prepare_body(self, data, files, json=None):
+ """Prepares the given HTTP body data."""
+
+ # Check if file, fo, generator, iterator.
+ # If not, run through normal process.
+
+ # Nottin' on you.
+ body = None
+ content_type = None
+
+ if not data and json is not None:
+ # urllib3 requires a bytes-like body. Python 2's json.dumps
+ # provides this natively, but Python 3 gives a Unicode string.
+ content_type = 'application/json'
+ body = complexjson.dumps(json)
+ if not isinstance(body, bytes):
+ body = body.encode('utf-8')
+
+ is_stream = all([
+ hasattr(data, '__iter__'),
+ not isinstance(data, (basestring, list, tuple, Mapping))
+ ])
+
+ if is_stream:
+ try:
+ length = super_len(data)
+ except (TypeError, AttributeError, UnsupportedOperation):
+ length = None
+
+ body = data
+
+ if getattr(body, 'tell', None) is not None:
+ # Record the current file position before reading.
+ # This will allow us to rewind a file in the event
+ # of a redirect.
+ try:
+ self._body_position = body.tell()
+ except (IOError, OSError):
+ # This differentiates from None, allowing us to catch
+ # a failed `tell()` later when trying to rewind the body
+ self._body_position = object()
+
+ if files:
+ raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
+
+ if length:
+ self.headers['Content-Length'] = builtin_str(length)
+ else:
+ self.headers['Transfer-Encoding'] = 'chunked'
+ else:
+ # Multi-part file uploads.
+ if files:
+ (body, content_type) = self._encode_files(files, data)
+ else:
+ if data:
+ body = self._encode_params(data)
+ if isinstance(data, basestring) or hasattr(data, 'read'):
+ content_type = None
+ else:
+ content_type = 'application/x-www-form-urlencoded'
+
+ self.prepare_content_length(body)
+
+ # Add content-type if it wasn't explicitly provided.
+ if content_type and ('content-type' not in self.headers):
+ self.headers['Content-Type'] = content_type
+
+ self.body = body
+
+ def prepare_content_length(self, body):
+ """Prepare Content-Length header based on request method and body"""
+ if body is not None:
+ length = super_len(body)
+ if length:
+ # If length exists, set it. Otherwise, we fallback
+ # to Transfer-Encoding: chunked.
+ self.headers['Content-Length'] = builtin_str(length)
+ elif self.method not in ('GET', 'HEAD') and self.headers.get('Content-Length') is None:
+ # Set Content-Length to 0 for methods that can have a body
+ # but don't provide one. (i.e. not GET or HEAD)
+ self.headers['Content-Length'] = '0'
+
+ def prepare_auth(self, auth, url=''):
+ """Prepares the given HTTP auth data."""
+
+ # If no Auth is explicitly provided, extract it from the URL first.
+ if auth is None:
+ url_auth = get_auth_from_url(self.url)
+ auth = url_auth if any(url_auth) else None
+
+ if auth:
+ if isinstance(auth, tuple) and len(auth) == 2:
+ # special-case basic HTTP auth
+ auth = HTTPBasicAuth(*auth)
+
+ # Allow auth to make its changes.
+ r = auth(self)
+
+ # Update self to reflect the auth changes.
+ self.__dict__.update(r.__dict__)
+
+ # Recompute Content-Length
+ self.prepare_content_length(self.body)
+
+ def prepare_cookies(self, cookies):
+ """Prepares the given HTTP cookie data.
+
+ This function eventually generates a ``Cookie`` header from the
+ given cookies using cookielib. Due to cookielib's design, the header
+ will not be regenerated if it already exists, meaning this function
+ can only be called once for the life of the
+ :class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
+ to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
+ header is removed beforehand.
+ """
+ if isinstance(cookies, cookielib.CookieJar):
+ self._cookies = cookies
+ else:
+ self._cookies = cookiejar_from_dict(cookies)
+
+ cookie_header = get_cookie_header(self._cookies, self)
+ if cookie_header is not None:
+ self.headers['Cookie'] = cookie_header
+
+ def prepare_hooks(self, hooks):
+ """Prepares the given hooks."""
+ # hooks can be passed as None to the prepare method and to this
+ # method. To prevent iterating over None, simply use an empty list
+ # if hooks is False-y
+ hooks = hooks or []
+ for event in hooks:
+ self.register_hook(event, hooks[event])
+
+
+class Response(object):
+ """The :class:`Response <Response>` object, which contains a
+ server's response to an HTTP request.
+ """
+
+ __attrs__ = [
+ '_content', 'status_code', 'headers', 'url', 'history',
+ 'encoding', 'reason', 'cookies', 'elapsed', 'request'
+ ]
+
+ def __init__(self):
+ self._content = False
+ self._content_consumed = False
+ self._next = None
+
+ #: Integer Code of responded HTTP Status, e.g. 404 or 200.
+ self.status_code = None
+
+ #: Case-insensitive Dictionary of Response Headers.
+ #: For example, ``headers['content-encoding']`` will return the
+ #: value of a ``'Content-Encoding'`` response header.
+ self.headers = CaseInsensitiveDict()
+
+ #: File-like object representation of response (for advanced usage).
+ #: Use of ``raw`` requires that ``stream=True`` be set on the request.
+ #: This requirement does not apply for use internally to Requests.
+ self.raw = None
+
+ #: Final URL location of Response.
+ self.url = None
+
+ #: Encoding to decode with when accessing r.text.
+ self.encoding = None
+
+ #: A list of :class:`Response <Response>` objects from
+ #: the history of the Request. Any redirect responses will end
+ #: up here. The list is sorted from the oldest to the most recent request.
+ self.history = []
+
+ #: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
+ self.reason = None
+
+ #: A CookieJar of Cookies the server sent back.
+ self.cookies = cookiejar_from_dict({})
+
+ #: The amount of time elapsed between sending the request
+ #: and the arrival of the response (as a timedelta).
+ #: This property specifically measures the time taken between sending
+ #: the first byte of the request and finishing parsing the headers. It
+ #: is therefore unaffected by consuming the response content or the
+ #: value of the ``stream`` keyword argument.
+ self.elapsed = datetime.timedelta(0)
+
+ #: The :class:`PreparedRequest <PreparedRequest>` object to which this
+ #: is a response.
+ self.request = None
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.close()
+
+ def __getstate__(self):
+ # Consume everything; accessing the content attribute makes
+ # sure the content has been fully read.
+ if not self._content_consumed:
+ self.content
+
+ return {attr: getattr(self, attr, None) for attr in self.__attrs__}
+
+ def __setstate__(self, state):
+ for name, value in state.items():
+ setattr(self, name, value)
+
+ # pickled objects do not have .raw
+ setattr(self, '_content_consumed', True)
+ setattr(self, 'raw', None)
+
+ def __repr__(self):
+ return '<Response [%s]>' % (self.status_code)
+
+ def __bool__(self):
+ """Returns True if :attr:`status_code` is less than 400.
+
+ This attribute checks if the status code of the response is between
+ 400 and 600 to see if there was a client error or a server error. If
+ the status code, is between 200 and 400, this will return True. This
+ is **not** a check to see if the response code is ``200 OK``.
+ """
+ return self.ok
+
+ def __nonzero__(self):
+ """Returns True if :attr:`status_code` is less than 400.
+
+ This attribute checks if the status code of the response is between
+ 400 and 600 to see if there was a client error or a server error. If
+ the status code, is between 200 and 400, this will return True. This
+ is **not** a check to see if the response code is ``200 OK``.
+ """
+ return self.ok
+
+ def __iter__(self):
+ """Allows you to use a response as an iterator."""
+ return self.iter_content(128)
+
+ @property
+ def ok(self):
+ """Returns True if :attr:`status_code` is less than 400, False if not.
+
+ This attribute checks if the status code of the response is between
+ 400 and 600 to see if there was a client error or a server error. If
+ the status code is between 200 and 400, this will return True. This
+ is **not** a check to see if the response code is ``200 OK``.
+ """
+ try:
+ self.raise_for_status()
+ except HTTPError:
+ return False
+ return True
+
+ @property
+ def is_redirect(self):
+ """True if this Response is a well-formed HTTP redirect that could have
+ been processed automatically (by :meth:`Session.resolve_redirects`).
+ """
+ return ('location' in self.headers and self.status_code in REDIRECT_STATI)
+
+ @property
+ def is_permanent_redirect(self):
+ """True if this Response one of the permanent versions of redirect."""
+ return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
+
+ @property
+ def next(self):
+ """Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
+ return self._next
+
+ @property
+ def apparent_encoding(self):
+ """The apparent encoding, provided by the chardet library."""
+ return chardet.detect(self.content)['encoding']
+
+ def iter_content(self, chunk_size=1, decode_unicode=False):
+ """Iterates over the response data. When stream=True is set on the
+ request, this avoids reading the content at once into memory for
+ large responses. The chunk size is the number of bytes it should
+ read into memory. This is not necessarily the length of each item
+ returned as decoding can take place.
+
+ chunk_size must be of type int or None. A value of None will
+ function differently depending on the value of `stream`.
+ stream=True will read data as it arrives in whatever size the
+ chunks are received. If stream=False, data is returned as
+ a single chunk.
+
+ If decode_unicode is True, content will be decoded using the best
+ available encoding based on the response.
+ """
+
+ def generate():
+ # Special case for urllib3.
+ if hasattr(self.raw, 'stream'):
+ try:
+ for chunk in self.raw.stream(chunk_size, decode_content=True):
+ yield chunk
+ except ProtocolError as e:
+ raise ChunkedEncodingError(e)
+ except DecodeError as e:
+ raise ContentDecodingError(e)
+ except ReadTimeoutError as e:
+ raise ConnectionError(e)
+ else:
+ # Standard file-like object.
+ while True:
+ chunk = self.raw.read(chunk_size)
+ if not chunk:
+ break
+ yield chunk
+
+ self._content_consumed = True
+
+ if self._content_consumed and isinstance(self._content, bool):
+ raise StreamConsumedError()
+ elif chunk_size is not None and not isinstance(chunk_size, int):
+ raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
+ # simulate reading small chunks of the content
+ reused_chunks = iter_slices(self._content, chunk_size)
+
+ stream_chunks = generate()
+
+ chunks = reused_chunks if self._content_consumed else stream_chunks
+
+ if decode_unicode:
+ chunks = stream_decode_response_unicode(chunks, self)
+
+ return chunks
+
+ def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None):
+ """Iterates over the response data, one line at a time. When
+ stream=True is set on the request, this avoids reading the
+ content at once into memory for large responses.
+
+ .. note:: This method is not reentrant safe.
+ """
+
+ pending = None
+
+ for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
+
+ if pending is not None:
+ chunk = pending + chunk
+
+ if delimiter:
+ lines = chunk.split(delimiter)
+ else:
+ lines = chunk.splitlines()
+
+ if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
+ pending = lines.pop()
+ else:
+ pending = None
+
+ for line in lines:
+ yield line
+
+ if pending is not None:
+ yield pending
+
+ @property
+ def content(self):
+ """Content of the response, in bytes."""
+
+ if self._content is False:
+ # Read the contents.
+ if self._content_consumed:
+ raise RuntimeError(
+ 'The content for this response was already consumed')
+
+ if self.status_code == 0 or self.raw is None:
+ self._content = None
+ else:
+ self._content = b''.join(self.iter_content(CONTENT_CHUNK_SIZE)) or b''
+
+ self._content_consumed = True
+ # don't need to release the connection; that's been handled by urllib3
+ # since we exhausted the data.
+ return self._content
+
+ @property
+ def text(self):
+ """Content of the response, in unicode.
+
+ If Response.encoding is None, encoding will be guessed using
+ ``chardet``.
+
+ The encoding of the response content is determined based solely on HTTP
+ headers, following RFC 2616 to the letter. If you can take advantage of
+ non-HTTP knowledge to make a better guess at the encoding, you should
+ set ``r.encoding`` appropriately before accessing this property.
+ """
+
+ # Try charset from content-type
+ content = None
+ encoding = self.encoding
+
+ if not self.content:
+ return str('')
+
+ # Fallback to auto-detected encoding.
+ if self.encoding is None:
+ encoding = self.apparent_encoding
+
+ # Decode unicode from given encoding.
+ try:
+ content = str(self.content, encoding, errors='replace')
+ except (LookupError, TypeError):
+ # A LookupError is raised if the encoding was not found which could
+ # indicate a misspelling or similar mistake.
+ #
+ # A TypeError can be raised if encoding is None
+ #
+ # So we try blindly encoding.
+ content = str(self.content, errors='replace')
+
+ return content
+
+ def json(self, **kwargs):
+ r"""Returns the json-encoded content of a response, if any.
+
+ :param \*\*kwargs: Optional arguments that ``json.loads`` takes.
+ :raises ValueError: If the response body does not contain valid json.
+ """
+
+ if not self.encoding and self.content and len(self.content) > 3:
+ # No encoding set. JSON RFC 4627 section 3 states we should expect
+ # UTF-8, -16 or -32. Detect which one to use; If the detection or
+ # decoding fails, fall back to `self.text` (using chardet to make
+ # a best guess).
+ encoding = guess_json_utf(self.content)
+ if encoding is not None:
+ try:
+ return complexjson.loads(
+ self.content.decode(encoding), **kwargs
+ )
+ except UnicodeDecodeError:
+ # Wrong UTF codec detected; usually because it's not UTF-8
+ # but some other 8-bit codec. This is an RFC violation,
+ # and the server didn't bother to tell us what codec *was*
+ # used.
+ pass
+ return complexjson.loads(self.text, **kwargs)
+
+ @property
+ def links(self):
+ """Returns the parsed header links of the response, if any."""
+
+ header = self.headers.get('link')
+
+ # l = MultiDict()
+ l = {}
+
+ if header:
+ links = parse_header_links(header)
+
+ for link in links:
+ key = link.get('rel') or link.get('url')
+ l[key] = link
+
+ return l
+
+ def raise_for_status(self):
+ """Raises :class:`HTTPError`, if one occurred."""
+
+ http_error_msg = ''
+ if isinstance(self.reason, bytes):
+ # We attempt to decode utf-8 first because some servers
+ # choose to localize their reason strings. If the string
+ # isn't utf-8, we fall back to iso-8859-1 for all other
+ # encodings. (See PR #3538)
+ try:
+ reason = self.reason.decode('utf-8')
+ except UnicodeDecodeError:
+ reason = self.reason.decode('iso-8859-1')
+ else:
+ reason = self.reason
+
+ if 400 <= self.status_code < 500:
+ http_error_msg = u'%s Client Error: %s for url: %s' % (self.status_code, reason, self.url)
+
+ elif 500 <= self.status_code < 600:
+ http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url)
+
+ if http_error_msg:
+ raise HTTPError(http_error_msg, response=self)
+
+ def close(self):
+ """Releases the connection back to the pool. Once this method has been
+ called the underlying ``raw`` object must not be accessed again.
+
+ *Note: Should not normally need to be called explicitly.*
+ """
+ if not self._content_consumed:
+ self.raw.close()
+
+ release_conn = getattr(self.raw, 'release_conn', None)
+ if release_conn is not None:
+ release_conn()
diff --git a/third_party/python/requests/requests/packages.py b/third_party/python/requests/requests/packages.py
new file mode 100644
index 0000000000..7232fe0ff7
--- /dev/null
+++ b/third_party/python/requests/requests/packages.py
@@ -0,0 +1,14 @@
+import sys
+
+# This code exists for backwards compatibility reasons.
+# I don't like it either. Just look the other way. :)
+
+for package in ('urllib3', 'idna', 'chardet'):
+ locals()[package] = __import__(package)
+ # This traversal is apparently necessary such that the identities are
+ # preserved (requests.packages.urllib3.* is urllib3.*)
+ for mod in list(sys.modules):
+ if mod == package or mod.startswith(package + '.'):
+ sys.modules['requests.packages.' + mod] = sys.modules[mod]
+
+# Kinda cool, though, right?
diff --git a/third_party/python/requests/requests/sessions.py b/third_party/python/requests/requests/sessions.py
new file mode 100644
index 0000000000..45ab8a5d3f
--- /dev/null
+++ b/third_party/python/requests/requests/sessions.py
@@ -0,0 +1,781 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.sessions
+~~~~~~~~~~~~~~~~~
+
+This module provides a Session object to manage and persist settings across
+requests (cookies, auth, proxies).
+"""
+import os
+import sys
+import time
+from datetime import timedelta
+from collections import OrderedDict
+
+from .auth import _basic_auth_str
+from .compat import cookielib, is_py3, urljoin, urlparse, Mapping
+from .cookies import (
+ cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
+from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
+from .hooks import default_hooks, dispatch_hook
+from ._internal_utils import to_native_string
+from .utils import to_key_val_list, default_headers, DEFAULT_PORTS
+from .exceptions import (
+ TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
+
+from .structures import CaseInsensitiveDict
+from .adapters import HTTPAdapter
+
+from .utils import (
+ requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
+ get_auth_from_url, rewind_body
+)
+
+from .status_codes import codes
+
+# formerly defined here, reexposed here for backward compatibility
+from .models import REDIRECT_STATI
+
+# Preferred clock, based on which one is more accurate on a given system.
+if sys.platform == 'win32':
+ try: # Python 3.4+
+ preferred_clock = time.perf_counter
+ except AttributeError: # Earlier than Python 3.
+ preferred_clock = time.clock
+else:
+ preferred_clock = time.time
+
+
+def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
+ """Determines appropriate setting for a given request, taking into account
+ the explicit setting on that request, and the setting in the session. If a
+ setting is a dictionary, they will be merged together using `dict_class`
+ """
+
+ if session_setting is None:
+ return request_setting
+
+ if request_setting is None:
+ return session_setting
+
+ # Bypass if not a dictionary (e.g. verify)
+ if not (
+ isinstance(session_setting, Mapping) and
+ isinstance(request_setting, Mapping)
+ ):
+ return request_setting
+
+ merged_setting = dict_class(to_key_val_list(session_setting))
+ merged_setting.update(to_key_val_list(request_setting))
+
+ # Remove keys that are set to None. Extract keys first to avoid altering
+ # the dictionary during iteration.
+ none_keys = [k for (k, v) in merged_setting.items() if v is None]
+ for key in none_keys:
+ del merged_setting[key]
+
+ return merged_setting
+
+
+def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
+ """Properly merges both requests and session hooks.
+
+ This is necessary because when request_hooks == {'response': []}, the
+ merge breaks Session hooks entirely.
+ """
+ if session_hooks is None or session_hooks.get('response') == []:
+ return request_hooks
+
+ if request_hooks is None or request_hooks.get('response') == []:
+ return session_hooks
+
+ return merge_setting(request_hooks, session_hooks, dict_class)
+
+
+class SessionRedirectMixin(object):
+
+ def get_redirect_target(self, resp):
+ """Receives a Response. Returns a redirect URI or ``None``"""
+ # Due to the nature of how requests processes redirects this method will
+ # be called at least once upon the original response and at least twice
+ # on each subsequent redirect response (if any).
+ # If a custom mixin is used to handle this logic, it may be advantageous
+ # to cache the redirect location onto the response object as a private
+ # attribute.
+ if resp.is_redirect:
+ location = resp.headers['location']
+ # Currently the underlying http module on py3 decode headers
+ # in latin1, but empirical evidence suggests that latin1 is very
+ # rarely used with non-ASCII characters in HTTP headers.
+ # It is more likely to get UTF8 header rather than latin1.
+ # This causes incorrect handling of UTF8 encoded location headers.
+ # To solve this, we re-encode the location in latin1.
+ if is_py3:
+ location = location.encode('latin1')
+ return to_native_string(location, 'utf8')
+ return None
+
+ def should_strip_auth(self, old_url, new_url):
+ """Decide whether Authorization header should be removed when redirecting"""
+ old_parsed = urlparse(old_url)
+ new_parsed = urlparse(new_url)
+ if old_parsed.hostname != new_parsed.hostname:
+ return True
+ # Special case: allow http -> https redirect when using the standard
+ # ports. This isn't specified by RFC 7235, but is kept to avoid
+ # breaking backwards compatibility with older versions of requests
+ # that allowed any redirects on the same host.
+ if (old_parsed.scheme == 'http' and old_parsed.port in (80, None)
+ and new_parsed.scheme == 'https' and new_parsed.port in (443, None)):
+ return False
+
+ # Handle default port usage corresponding to scheme.
+ changed_port = old_parsed.port != new_parsed.port
+ changed_scheme = old_parsed.scheme != new_parsed.scheme
+ default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None)
+ if (not changed_scheme and old_parsed.port in default_port
+ and new_parsed.port in default_port):
+ return False
+
+ # Standard case: root URI must match
+ return changed_port or changed_scheme
+
+ def resolve_redirects(self, resp, req, stream=False, timeout=None,
+ verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs):
+ """Receives a Response. Returns a generator of Responses or Requests."""
+
+ hist = [] # keep track of history
+
+ url = self.get_redirect_target(resp)
+ previous_fragment = urlparse(req.url).fragment
+ while url:
+ prepared_request = req.copy()
+
+ # Update history and keep track of redirects.
+ # resp.history must ignore the original request in this loop
+ hist.append(resp)
+ resp.history = hist[1:]
+
+ try:
+ resp.content # Consume socket so it can be released
+ except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
+ resp.raw.read(decode_content=False)
+
+ if len(resp.history) >= self.max_redirects:
+ raise TooManyRedirects('Exceeded {} redirects.'.format(self.max_redirects), response=resp)
+
+ # Release the connection back into the pool.
+ resp.close()
+
+ # Handle redirection without scheme (see: RFC 1808 Section 4)
+ if url.startswith('//'):
+ parsed_rurl = urlparse(resp.url)
+ url = ':'.join([to_native_string(parsed_rurl.scheme), url])
+
+ # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2)
+ parsed = urlparse(url)
+ if parsed.fragment == '' and previous_fragment:
+ parsed = parsed._replace(fragment=previous_fragment)
+ elif parsed.fragment:
+ previous_fragment = parsed.fragment
+ url = parsed.geturl()
+
+ # Facilitate relative 'location' headers, as allowed by RFC 7231.
+ # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
+ # Compliant with RFC3986, we percent encode the url.
+ if not parsed.netloc:
+ url = urljoin(resp.url, requote_uri(url))
+ else:
+ url = requote_uri(url)
+
+ prepared_request.url = to_native_string(url)
+
+ self.rebuild_method(prepared_request, resp)
+
+ # https://github.com/psf/requests/issues/1084
+ if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
+ # https://github.com/psf/requests/issues/3490
+ purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding')
+ for header in purged_headers:
+ prepared_request.headers.pop(header, None)
+ prepared_request.body = None
+
+ headers = prepared_request.headers
+ headers.pop('Cookie', None)
+
+ # Extract any cookies sent on the response to the cookiejar
+ # in the new request. Because we've mutated our copied prepared
+ # request, use the old one that we haven't yet touched.
+ extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
+ merge_cookies(prepared_request._cookies, self.cookies)
+ prepared_request.prepare_cookies(prepared_request._cookies)
+
+ # Rebuild auth and proxy information.
+ proxies = self.rebuild_proxies(prepared_request, proxies)
+ self.rebuild_auth(prepared_request, resp)
+
+ # A failed tell() sets `_body_position` to `object()`. This non-None
+ # value ensures `rewindable` will be True, allowing us to raise an
+ # UnrewindableBodyError, instead of hanging the connection.
+ rewindable = (
+ prepared_request._body_position is not None and
+ ('Content-Length' in headers or 'Transfer-Encoding' in headers)
+ )
+
+ # Attempt to rewind consumed file-like object.
+ if rewindable:
+ rewind_body(prepared_request)
+
+ # Override the original request.
+ req = prepared_request
+
+ if yield_requests:
+ yield req
+ else:
+
+ resp = self.send(
+ req,
+ stream=stream,
+ timeout=timeout,
+ verify=verify,
+ cert=cert,
+ proxies=proxies,
+ allow_redirects=False,
+ **adapter_kwargs
+ )
+
+ extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
+
+ # extract redirect url, if any, for the next loop
+ url = self.get_redirect_target(resp)
+ yield resp
+
+ def rebuild_auth(self, prepared_request, response):
+ """When being redirected we may want to strip authentication from the
+ request to avoid leaking credentials. This method intelligently removes
+ and reapplies authentication where possible to avoid credential loss.
+ """
+ headers = prepared_request.headers
+ url = prepared_request.url
+
+ if 'Authorization' in headers and self.should_strip_auth(response.request.url, url):
+ # If we get redirected to a new host, we should strip out any
+ # authentication headers.
+ del headers['Authorization']
+
+ # .netrc might have more auth for us on our new host.
+ new_auth = get_netrc_auth(url) if self.trust_env else None
+ if new_auth is not None:
+ prepared_request.prepare_auth(new_auth)
+
+
+ def rebuild_proxies(self, prepared_request, proxies):
+ """This method re-evaluates the proxy configuration by considering the
+ environment variables. If we are redirected to a URL covered by
+ NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
+ proxy keys for this URL (in case they were stripped by a previous
+ redirect).
+
+ This method also replaces the Proxy-Authorization header where
+ necessary.
+
+ :rtype: dict
+ """
+ proxies = proxies if proxies is not None else {}
+ headers = prepared_request.headers
+ url = prepared_request.url
+ scheme = urlparse(url).scheme
+ new_proxies = proxies.copy()
+ no_proxy = proxies.get('no_proxy')
+
+ bypass_proxy = should_bypass_proxies(url, no_proxy=no_proxy)
+ if self.trust_env and not bypass_proxy:
+ environ_proxies = get_environ_proxies(url, no_proxy=no_proxy)
+
+ proxy = environ_proxies.get(scheme, environ_proxies.get('all'))
+
+ if proxy:
+ new_proxies.setdefault(scheme, proxy)
+
+ if 'Proxy-Authorization' in headers:
+ del headers['Proxy-Authorization']
+
+ try:
+ username, password = get_auth_from_url(new_proxies[scheme])
+ except KeyError:
+ username, password = None, None
+
+ if username and password:
+ headers['Proxy-Authorization'] = _basic_auth_str(username, password)
+
+ return new_proxies
+
+ def rebuild_method(self, prepared_request, response):
+ """When being redirected we may want to change the method of the request
+ based on certain specs or browser behavior.
+ """
+ method = prepared_request.method
+
+ # https://tools.ietf.org/html/rfc7231#section-6.4.4
+ if response.status_code == codes.see_other and method != 'HEAD':
+ method = 'GET'
+
+ # Do what the browsers do, despite standards...
+ # First, turn 302s into GETs.
+ if response.status_code == codes.found and method != 'HEAD':
+ method = 'GET'
+
+ # Second, if a POST is responded to with a 301, turn it into a GET.
+ # This bizarre behaviour is explained in Issue 1704.
+ if response.status_code == codes.moved and method == 'POST':
+ method = 'GET'
+
+ prepared_request.method = method
+
+
+class Session(SessionRedirectMixin):
+ """A Requests session.
+
+ Provides cookie persistence, connection-pooling, and configuration.
+
+ Basic Usage::
+
+ >>> import requests
+ >>> s = requests.Session()
+ >>> s.get('https://httpbin.org/get')
+ <Response [200]>
+
+ Or as a context manager::
+
+ >>> with requests.Session() as s:
+ ... s.get('https://httpbin.org/get')
+ <Response [200]>
+ """
+
+ __attrs__ = [
+ 'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
+ 'cert', 'adapters', 'stream', 'trust_env',
+ 'max_redirects',
+ ]
+
+ def __init__(self):
+
+ #: A case-insensitive dictionary of headers to be sent on each
+ #: :class:`Request <Request>` sent from this
+ #: :class:`Session <Session>`.
+ self.headers = default_headers()
+
+ #: Default Authentication tuple or object to attach to
+ #: :class:`Request <Request>`.
+ self.auth = None
+
+ #: Dictionary mapping protocol or protocol and host to the URL of the proxy
+ #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
+ #: be used on each :class:`Request <Request>`.
+ self.proxies = {}
+
+ #: Event-handling hooks.
+ self.hooks = default_hooks()
+
+ #: Dictionary of querystring data to attach to each
+ #: :class:`Request <Request>`. The dictionary values may be lists for
+ #: representing multivalued query parameters.
+ self.params = {}
+
+ #: Stream response content default.
+ self.stream = False
+
+ #: SSL Verification default.
+ #: Defaults to `True`, requiring requests to verify the TLS certificate at the
+ #: remote end.
+ #: If verify is set to `False`, requests will accept any TLS certificate
+ #: presented by the server, and will ignore hostname mismatches and/or
+ #: expired certificates, which will make your application vulnerable to
+ #: man-in-the-middle (MitM) attacks.
+ #: Only set this to `False` for testing.
+ self.verify = True
+
+ #: SSL client certificate default, if String, path to ssl client
+ #: cert file (.pem). If Tuple, ('cert', 'key') pair.
+ self.cert = None
+
+ #: Maximum number of redirects allowed. If the request exceeds this
+ #: limit, a :class:`TooManyRedirects` exception is raised.
+ #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is
+ #: 30.
+ self.max_redirects = DEFAULT_REDIRECT_LIMIT
+
+ #: Trust environment settings for proxy configuration, default
+ #: authentication and similar.
+ self.trust_env = True
+
+ #: A CookieJar containing all currently outstanding cookies set on this
+ #: session. By default it is a
+ #: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
+ #: may be any other ``cookielib.CookieJar`` compatible object.
+ self.cookies = cookiejar_from_dict({})
+
+ # Default connection adapters.
+ self.adapters = OrderedDict()
+ self.mount('https://', HTTPAdapter())
+ self.mount('http://', HTTPAdapter())
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ self.close()
+
+ def prepare_request(self, request):
+ """Constructs a :class:`PreparedRequest <PreparedRequest>` for
+ transmission and returns it. The :class:`PreparedRequest` has settings
+ merged from the :class:`Request <Request>` instance and those of the
+ :class:`Session`.
+
+ :param request: :class:`Request` instance to prepare with this
+ session's settings.
+ :rtype: requests.PreparedRequest
+ """
+ cookies = request.cookies or {}
+
+ # Bootstrap CookieJar.
+ if not isinstance(cookies, cookielib.CookieJar):
+ cookies = cookiejar_from_dict(cookies)
+
+ # Merge with session cookies
+ merged_cookies = merge_cookies(
+ merge_cookies(RequestsCookieJar(), self.cookies), cookies)
+
+ # Set environment's basic authentication if not explicitly set.
+ auth = request.auth
+ if self.trust_env and not auth and not self.auth:
+ auth = get_netrc_auth(request.url)
+
+ p = PreparedRequest()
+ p.prepare(
+ method=request.method.upper(),
+ url=request.url,
+ files=request.files,
+ data=request.data,
+ json=request.json,
+ headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
+ params=merge_setting(request.params, self.params),
+ auth=merge_setting(auth, self.auth),
+ cookies=merged_cookies,
+ hooks=merge_hooks(request.hooks, self.hooks),
+ )
+ return p
+
+ def request(self, method, url,
+ params=None, data=None, headers=None, cookies=None, files=None,
+ auth=None, timeout=None, allow_redirects=True, proxies=None,
+ hooks=None, stream=None, verify=None, cert=None, json=None):
+ """Constructs a :class:`Request <Request>`, prepares it and sends it.
+ Returns :class:`Response <Response>` object.
+
+ :param method: method for the new :class:`Request` object.
+ :param url: URL for the new :class:`Request` object.
+ :param params: (optional) Dictionary or bytes to be sent in the query
+ string for the :class:`Request`.
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+ object to send in the body of the :class:`Request`.
+ :param json: (optional) json to send in the body of the
+ :class:`Request`.
+ :param headers: (optional) Dictionary of HTTP Headers to send with the
+ :class:`Request`.
+ :param cookies: (optional) Dict or CookieJar object to send with the
+ :class:`Request`.
+ :param files: (optional) Dictionary of ``'filename': file-like-objects``
+ for multipart encoding upload.
+ :param auth: (optional) Auth tuple or callable to enable
+ Basic/Digest/Custom HTTP Auth.
+ :param timeout: (optional) How long to wait for the server to send
+ data before giving up, as a float, or a :ref:`(connect timeout,
+ read timeout) <timeouts>` tuple.
+ :type timeout: float or tuple
+ :param allow_redirects: (optional) Set to True by default.
+ :type allow_redirects: bool
+ :param proxies: (optional) Dictionary mapping protocol or protocol and
+ hostname to the URL of the proxy.
+ :param stream: (optional) whether to immediately download the response
+ content. Defaults to ``False``.
+ :param verify: (optional) Either a boolean, in which case it controls whether we verify
+ the server's TLS certificate, or a string, in which case it must be a path
+ to a CA bundle to use. Defaults to ``True``. When set to
+ ``False``, requests will accept any TLS certificate presented by
+ the server, and will ignore hostname mismatches and/or expired
+ certificates, which will make your application vulnerable to
+ man-in-the-middle (MitM) attacks. Setting verify to ``False``
+ may be useful during local development or testing.
+ :param cert: (optional) if String, path to ssl client cert file (.pem).
+ If Tuple, ('cert', 'key') pair.
+ :rtype: requests.Response
+ """
+ # Create the Request.
+ req = Request(
+ method=method.upper(),
+ url=url,
+ headers=headers,
+ files=files,
+ data=data or {},
+ json=json,
+ params=params or {},
+ auth=auth,
+ cookies=cookies,
+ hooks=hooks,
+ )
+ prep = self.prepare_request(req)
+
+ proxies = proxies or {}
+
+ settings = self.merge_environment_settings(
+ prep.url, proxies, stream, verify, cert
+ )
+
+ # Send the request.
+ send_kwargs = {
+ 'timeout': timeout,
+ 'allow_redirects': allow_redirects,
+ }
+ send_kwargs.update(settings)
+ resp = self.send(prep, **send_kwargs)
+
+ return resp
+
+ def get(self, url, **kwargs):
+ r"""Sends a GET request. Returns :class:`Response` object.
+
+ :param url: URL for the new :class:`Request` object.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :rtype: requests.Response
+ """
+
+ kwargs.setdefault('allow_redirects', True)
+ return self.request('GET', url, **kwargs)
+
+ def options(self, url, **kwargs):
+ r"""Sends a OPTIONS request. Returns :class:`Response` object.
+
+ :param url: URL for the new :class:`Request` object.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :rtype: requests.Response
+ """
+
+ kwargs.setdefault('allow_redirects', True)
+ return self.request('OPTIONS', url, **kwargs)
+
+ def head(self, url, **kwargs):
+ r"""Sends a HEAD request. Returns :class:`Response` object.
+
+ :param url: URL for the new :class:`Request` object.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :rtype: requests.Response
+ """
+
+ kwargs.setdefault('allow_redirects', False)
+ return self.request('HEAD', url, **kwargs)
+
+ def post(self, url, data=None, json=None, **kwargs):
+ r"""Sends a POST request. Returns :class:`Response` object.
+
+ :param url: URL for the new :class:`Request` object.
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+ object to send in the body of the :class:`Request`.
+ :param json: (optional) json to send in the body of the :class:`Request`.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :rtype: requests.Response
+ """
+
+ return self.request('POST', url, data=data, json=json, **kwargs)
+
+ def put(self, url, data=None, **kwargs):
+ r"""Sends a PUT request. Returns :class:`Response` object.
+
+ :param url: URL for the new :class:`Request` object.
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+ object to send in the body of the :class:`Request`.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :rtype: requests.Response
+ """
+
+ return self.request('PUT', url, data=data, **kwargs)
+
+ def patch(self, url, data=None, **kwargs):
+ r"""Sends a PATCH request. Returns :class:`Response` object.
+
+ :param url: URL for the new :class:`Request` object.
+ :param data: (optional) Dictionary, list of tuples, bytes, or file-like
+ object to send in the body of the :class:`Request`.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :rtype: requests.Response
+ """
+
+ return self.request('PATCH', url, data=data, **kwargs)
+
+ def delete(self, url, **kwargs):
+ r"""Sends a DELETE request. Returns :class:`Response` object.
+
+ :param url: URL for the new :class:`Request` object.
+ :param \*\*kwargs: Optional arguments that ``request`` takes.
+ :rtype: requests.Response
+ """
+
+ return self.request('DELETE', url, **kwargs)
+
+ def send(self, request, **kwargs):
+ """Send a given PreparedRequest.
+
+ :rtype: requests.Response
+ """
+ # Set defaults that the hooks can utilize to ensure they always have
+ # the correct parameters to reproduce the previous request.
+ kwargs.setdefault('stream', self.stream)
+ kwargs.setdefault('verify', self.verify)
+ kwargs.setdefault('cert', self.cert)
+ kwargs.setdefault('proxies', self.proxies)
+
+ # It's possible that users might accidentally send a Request object.
+ # Guard against that specific failure case.
+ if isinstance(request, Request):
+ raise ValueError('You can only send PreparedRequests.')
+
+ # Set up variables needed for resolve_redirects and dispatching of hooks
+ allow_redirects = kwargs.pop('allow_redirects', True)
+ stream = kwargs.get('stream')
+ hooks = request.hooks
+
+ # Get the appropriate adapter to use
+ adapter = self.get_adapter(url=request.url)
+
+ # Start time (approximately) of the request
+ start = preferred_clock()
+
+ # Send the request
+ r = adapter.send(request, **kwargs)
+
+ # Total elapsed time of the request (approximately)
+ elapsed = preferred_clock() - start
+ r.elapsed = timedelta(seconds=elapsed)
+
+ # Response manipulation hooks
+ r = dispatch_hook('response', hooks, r, **kwargs)
+
+ # Persist cookies
+ if r.history:
+
+ # If the hooks create history then we want those cookies too
+ for resp in r.history:
+ extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
+
+ extract_cookies_to_jar(self.cookies, request, r.raw)
+
+ # Resolve redirects if allowed.
+ if allow_redirects:
+ # Redirect resolving generator.
+ gen = self.resolve_redirects(r, request, **kwargs)
+ history = [resp for resp in gen]
+ else:
+ history = []
+
+ # Shuffle things around if there's history.
+ if history:
+ # Insert the first (original) request at the start
+ history.insert(0, r)
+ # Get the last request made
+ r = history.pop()
+ r.history = history
+
+ # If redirects aren't being followed, store the response on the Request for Response.next().
+ if not allow_redirects:
+ try:
+ r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs))
+ except StopIteration:
+ pass
+
+ if not stream:
+ r.content
+
+ return r
+
+ def merge_environment_settings(self, url, proxies, stream, verify, cert):
+ """
+ Check the environment and merge it with some settings.
+
+ :rtype: dict
+ """
+ # Gather clues from the surrounding environment.
+ if self.trust_env:
+ # Set environment's proxies.
+ no_proxy = proxies.get('no_proxy') if proxies is not None else None
+ env_proxies = get_environ_proxies(url, no_proxy=no_proxy)
+ for (k, v) in env_proxies.items():
+ proxies.setdefault(k, v)
+
+ # Look for requests environment configuration and be compatible
+ # with cURL.
+ if verify is True or verify is None:
+ verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
+ os.environ.get('CURL_CA_BUNDLE'))
+
+ # Merge all the kwargs.
+ proxies = merge_setting(proxies, self.proxies)
+ stream = merge_setting(stream, self.stream)
+ verify = merge_setting(verify, self.verify)
+ cert = merge_setting(cert, self.cert)
+
+ return {'verify': verify, 'proxies': proxies, 'stream': stream,
+ 'cert': cert}
+
+ def get_adapter(self, url):
+ """
+ Returns the appropriate connection adapter for the given URL.
+
+ :rtype: requests.adapters.BaseAdapter
+ """
+ for (prefix, adapter) in self.adapters.items():
+
+ if url.lower().startswith(prefix.lower()):
+ return adapter
+
+ # Nothing matches :-/
+ raise InvalidSchema("No connection adapters were found for {!r}".format(url))
+
+ def close(self):
+ """Closes all adapters and as such the session"""
+ for v in self.adapters.values():
+ v.close()
+
+ def mount(self, prefix, adapter):
+ """Registers a connection adapter to a prefix.
+
+ Adapters are sorted in descending order by prefix length.
+ """
+ self.adapters[prefix] = adapter
+ keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
+
+ for key in keys_to_move:
+ self.adapters[key] = self.adapters.pop(key)
+
+ def __getstate__(self):
+ state = {attr: getattr(self, attr, None) for attr in self.__attrs__}
+ return state
+
+ def __setstate__(self, state):
+ for attr, value in state.items():
+ setattr(self, attr, value)
+
+
+def session():
+ """
+ Returns a :class:`Session` for context-management.
+
+ .. deprecated:: 1.0.0
+
+ This method has been deprecated since version 1.0.0 and is only kept for
+ backwards compatibility. New code should use :class:`~requests.sessions.Session`
+ to create a session. This may be removed at a future date.
+
+ :rtype: Session
+ """
+ return Session()
diff --git a/third_party/python/requests/requests/status_codes.py b/third_party/python/requests/requests/status_codes.py
new file mode 100644
index 0000000000..d80a7cd4dd
--- /dev/null
+++ b/third_party/python/requests/requests/status_codes.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+
+r"""
+The ``codes`` object defines a mapping from common names for HTTP statuses
+to their numerical codes, accessible either as attributes or as dictionary
+items.
+
+Example::
+
+ >>> import requests
+ >>> requests.codes['temporary_redirect']
+ 307
+ >>> requests.codes.teapot
+ 418
+ >>> requests.codes['\o/']
+ 200
+
+Some codes have multiple names, and both upper- and lower-case versions of
+the names are allowed. For example, ``codes.ok``, ``codes.OK``, and
+``codes.okay`` all correspond to the HTTP status code 200.
+"""
+
+from .structures import LookupDict
+
+_codes = {
+
+ # Informational.
+ 100: ('continue',),
+ 101: ('switching_protocols',),
+ 102: ('processing',),
+ 103: ('checkpoint',),
+ 122: ('uri_too_long', 'request_uri_too_long'),
+ 200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
+ 201: ('created',),
+ 202: ('accepted',),
+ 203: ('non_authoritative_info', 'non_authoritative_information'),
+ 204: ('no_content',),
+ 205: ('reset_content', 'reset'),
+ 206: ('partial_content', 'partial'),
+ 207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
+ 208: ('already_reported',),
+ 226: ('im_used',),
+
+ # Redirection.
+ 300: ('multiple_choices',),
+ 301: ('moved_permanently', 'moved', '\\o-'),
+ 302: ('found',),
+ 303: ('see_other', 'other'),
+ 304: ('not_modified',),
+ 305: ('use_proxy',),
+ 306: ('switch_proxy',),
+ 307: ('temporary_redirect', 'temporary_moved', 'temporary'),
+ 308: ('permanent_redirect',
+ 'resume_incomplete', 'resume',), # These 2 to be removed in 3.0
+
+ # Client Error.
+ 400: ('bad_request', 'bad'),
+ 401: ('unauthorized',),
+ 402: ('payment_required', 'payment'),
+ 403: ('forbidden',),
+ 404: ('not_found', '-o-'),
+ 405: ('method_not_allowed', 'not_allowed'),
+ 406: ('not_acceptable',),
+ 407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
+ 408: ('request_timeout', 'timeout'),
+ 409: ('conflict',),
+ 410: ('gone',),
+ 411: ('length_required',),
+ 412: ('precondition_failed', 'precondition'),
+ 413: ('request_entity_too_large',),
+ 414: ('request_uri_too_large',),
+ 415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
+ 416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
+ 417: ('expectation_failed',),
+ 418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
+ 421: ('misdirected_request',),
+ 422: ('unprocessable_entity', 'unprocessable'),
+ 423: ('locked',),
+ 424: ('failed_dependency', 'dependency'),
+ 425: ('unordered_collection', 'unordered'),
+ 426: ('upgrade_required', 'upgrade'),
+ 428: ('precondition_required', 'precondition'),
+ 429: ('too_many_requests', 'too_many'),
+ 431: ('header_fields_too_large', 'fields_too_large'),
+ 444: ('no_response', 'none'),
+ 449: ('retry_with', 'retry'),
+ 450: ('blocked_by_windows_parental_controls', 'parental_controls'),
+ 451: ('unavailable_for_legal_reasons', 'legal_reasons'),
+ 499: ('client_closed_request',),
+
+ # Server Error.
+ 500: ('internal_server_error', 'server_error', '/o\\', '✗'),
+ 501: ('not_implemented',),
+ 502: ('bad_gateway',),
+ 503: ('service_unavailable', 'unavailable'),
+ 504: ('gateway_timeout',),
+ 505: ('http_version_not_supported', 'http_version'),
+ 506: ('variant_also_negotiates',),
+ 507: ('insufficient_storage',),
+ 509: ('bandwidth_limit_exceeded', 'bandwidth'),
+ 510: ('not_extended',),
+ 511: ('network_authentication_required', 'network_auth', 'network_authentication'),
+}
+
+codes = LookupDict(name='status_codes')
+
+def _init():
+ for code, titles in _codes.items():
+ for title in titles:
+ setattr(codes, title, code)
+ if not title.startswith(('\\', '/')):
+ setattr(codes, title.upper(), code)
+
+ def doc(code):
+ names = ', '.join('``%s``' % n for n in _codes[code])
+ return '* %d: %s' % (code, names)
+
+ global __doc__
+ __doc__ = (__doc__ + '\n' +
+ '\n'.join(doc(code) for code in sorted(_codes))
+ if __doc__ is not None else None)
+
+_init()
diff --git a/third_party/python/requests/requests/structures.py b/third_party/python/requests/requests/structures.py
new file mode 100644
index 0000000000..8ee0ba7a08
--- /dev/null
+++ b/third_party/python/requests/requests/structures.py
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.structures
+~~~~~~~~~~~~~~~~~~~
+
+Data structures that power Requests.
+"""
+
+from collections import OrderedDict
+
+from .compat import Mapping, MutableMapping
+
+
+class CaseInsensitiveDict(MutableMapping):
+ """A case-insensitive ``dict``-like object.
+
+ Implements all methods and operations of
+ ``MutableMapping`` as well as dict's ``copy``. Also
+ provides ``lower_items``.
+
+ All keys are expected to be strings. The structure remembers the
+ case of the last key to be set, and ``iter(instance)``,
+ ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
+ will contain case-sensitive keys. However, querying and contains
+ testing is case insensitive::
+
+ cid = CaseInsensitiveDict()
+ cid['Accept'] = 'application/json'
+ cid['aCCEPT'] == 'application/json' # True
+ list(cid) == ['Accept'] # True
+
+ For example, ``headers['content-encoding']`` will return the
+ value of a ``'Content-Encoding'`` response header, regardless
+ of how the header name was originally stored.
+
+ If the constructor, ``.update``, or equality comparison
+ operations are given keys that have equal ``.lower()``s, the
+ behavior is undefined.
+ """
+
+ def __init__(self, data=None, **kwargs):
+ self._store = OrderedDict()
+ if data is None:
+ data = {}
+ self.update(data, **kwargs)
+
+ def __setitem__(self, key, value):
+ # Use the lowercased key for lookups, but store the actual
+ # key alongside the value.
+ self._store[key.lower()] = (key, value)
+
+ def __getitem__(self, key):
+ return self._store[key.lower()][1]
+
+ def __delitem__(self, key):
+ del self._store[key.lower()]
+
+ def __iter__(self):
+ return (casedkey for casedkey, mappedvalue in self._store.values())
+
+ def __len__(self):
+ return len(self._store)
+
+ def lower_items(self):
+ """Like iteritems(), but with all lowercase keys."""
+ return (
+ (lowerkey, keyval[1])
+ for (lowerkey, keyval)
+ in self._store.items()
+ )
+
+ def __eq__(self, other):
+ if isinstance(other, Mapping):
+ other = CaseInsensitiveDict(other)
+ else:
+ return NotImplemented
+ # Compare insensitively
+ return dict(self.lower_items()) == dict(other.lower_items())
+
+ # Copy is required
+ def copy(self):
+ return CaseInsensitiveDict(self._store.values())
+
+ def __repr__(self):
+ return str(dict(self.items()))
+
+
+class LookupDict(dict):
+ """Dictionary lookup object."""
+
+ def __init__(self, name=None):
+ self.name = name
+ super(LookupDict, self).__init__()
+
+ def __repr__(self):
+ return '<lookup \'%s\'>' % (self.name)
+
+ def __getitem__(self, key):
+ # We allow fall-through here, so values default to None
+
+ return self.__dict__.get(key, None)
+
+ def get(self, key, default=None):
+ return self.__dict__.get(key, default)
diff --git a/third_party/python/requests/requests/utils.py b/third_party/python/requests/requests/utils.py
new file mode 100644
index 0000000000..db67938e67
--- /dev/null
+++ b/third_party/python/requests/requests/utils.py
@@ -0,0 +1,992 @@
+# -*- coding: utf-8 -*-
+
+"""
+requests.utils
+~~~~~~~~~~~~~~
+
+This module provides utility functions that are used within Requests
+that are also useful for external consumption.
+"""
+
+import codecs
+import contextlib
+import io
+import os
+import re
+import socket
+import struct
+import sys
+import tempfile
+import warnings
+import zipfile
+from collections import OrderedDict
+
+from .__version__ import __version__
+from . import certs
+# to_native_string is unused here, but imported here for backwards compatibility
+from ._internal_utils import to_native_string
+from .compat import parse_http_list as _parse_list_header
+from .compat import (
+ quote, urlparse, bytes, str, unquote, getproxies,
+ proxy_bypass, urlunparse, basestring, integer_types, is_py3,
+ proxy_bypass_environment, getproxies_environment, Mapping)
+from .cookies import cookiejar_from_dict
+from .structures import CaseInsensitiveDict
+from .exceptions import (
+ InvalidURL, InvalidHeader, FileModeWarning, UnrewindableBodyError)
+
+NETRC_FILES = ('.netrc', '_netrc')
+
+DEFAULT_CA_BUNDLE_PATH = certs.where()
+
+DEFAULT_PORTS = {'http': 80, 'https': 443}
+
+
+if sys.platform == 'win32':
+ # provide a proxy_bypass version on Windows without DNS lookups
+
+ def proxy_bypass_registry(host):
+ try:
+ if is_py3:
+ import winreg
+ else:
+ import _winreg as winreg
+ except ImportError:
+ return False
+
+ try:
+ internetSettings = winreg.OpenKey(winreg.HKEY_CURRENT_USER,
+ r'Software\Microsoft\Windows\CurrentVersion\Internet Settings')
+ # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it
+ proxyEnable = int(winreg.QueryValueEx(internetSettings,
+ 'ProxyEnable')[0])
+ # ProxyOverride is almost always a string
+ proxyOverride = winreg.QueryValueEx(internetSettings,
+ 'ProxyOverride')[0]
+ except OSError:
+ return False
+ if not proxyEnable or not proxyOverride:
+ return False
+
+ # make a check value list from the registry entry: replace the
+ # '<local>' string by the localhost entry and the corresponding
+ # canonical entry.
+ proxyOverride = proxyOverride.split(';')
+ # now check if we match one of the registry values.
+ for test in proxyOverride:
+ if test == '<local>':
+ if '.' not in host:
+ return True
+ test = test.replace(".", r"\.") # mask dots
+ test = test.replace("*", r".*") # change glob sequence
+ test = test.replace("?", r".") # change glob char
+ if re.match(test, host, re.I):
+ return True
+ return False
+
+ def proxy_bypass(host): # noqa
+ """Return True, if the host should be bypassed.
+
+ Checks proxy settings gathered from the environment, if specified,
+ or the registry.
+ """
+ if getproxies_environment():
+ return proxy_bypass_environment(host)
+ else:
+ return proxy_bypass_registry(host)
+
+
+def dict_to_sequence(d):
+ """Returns an internal sequence dictionary update."""
+
+ if hasattr(d, 'items'):
+ d = d.items()
+
+ return d
+
+
+def super_len(o):
+ total_length = None
+ current_position = 0
+
+ if hasattr(o, '__len__'):
+ total_length = len(o)
+
+ elif hasattr(o, 'len'):
+ total_length = o.len
+
+ elif hasattr(o, 'fileno'):
+ try:
+ fileno = o.fileno()
+ except io.UnsupportedOperation:
+ pass
+ else:
+ total_length = os.fstat(fileno).st_size
+
+ # Having used fstat to determine the file length, we need to
+ # confirm that this file was opened up in binary mode.
+ if 'b' not in o.mode:
+ warnings.warn((
+ "Requests has determined the content-length for this "
+ "request using the binary size of the file: however, the "
+ "file has been opened in text mode (i.e. without the 'b' "
+ "flag in the mode). This may lead to an incorrect "
+ "content-length. In Requests 3.0, support will be removed "
+ "for files in text mode."),
+ FileModeWarning
+ )
+
+ if hasattr(o, 'tell'):
+ try:
+ current_position = o.tell()
+ except (OSError, IOError):
+ # This can happen in some weird situations, such as when the file
+ # is actually a special file descriptor like stdin. In this
+ # instance, we don't know what the length is, so set it to zero and
+ # let requests chunk it instead.
+ if total_length is not None:
+ current_position = total_length
+ else:
+ if hasattr(o, 'seek') and total_length is None:
+ # StringIO and BytesIO have seek but no useable fileno
+ try:
+ # seek to end of file
+ o.seek(0, 2)
+ total_length = o.tell()
+
+ # seek back to current position to support
+ # partially read file-like objects
+ o.seek(current_position or 0)
+ except (OSError, IOError):
+ total_length = 0
+
+ if total_length is None:
+ total_length = 0
+
+ return max(0, total_length - current_position)
+
+
+def get_netrc_auth(url, raise_errors=False):
+ """Returns the Requests tuple auth for a given url from netrc."""
+
+ netrc_file = os.environ.get('NETRC')
+ if netrc_file is not None:
+ netrc_locations = (netrc_file,)
+ else:
+ netrc_locations = ('~/{}'.format(f) for f in NETRC_FILES)
+
+ try:
+ from netrc import netrc, NetrcParseError
+
+ netrc_path = None
+
+ for f in netrc_locations:
+ try:
+ loc = os.path.expanduser(f)
+ except KeyError:
+ # os.path.expanduser can fail when $HOME is undefined and
+ # getpwuid fails. See https://bugs.python.org/issue20164 &
+ # https://github.com/psf/requests/issues/1846
+ return
+
+ if os.path.exists(loc):
+ netrc_path = loc
+ break
+
+ # Abort early if there isn't one.
+ if netrc_path is None:
+ return
+
+ ri = urlparse(url)
+
+ # Strip port numbers from netloc. This weird `if...encode`` dance is
+ # used for Python 3.2, which doesn't support unicode literals.
+ splitstr = b':'
+ if isinstance(url, str):
+ splitstr = splitstr.decode('ascii')
+ host = ri.netloc.split(splitstr)[0]
+
+ try:
+ _netrc = netrc(netrc_path).authenticators(host)
+ if _netrc:
+ # Return with login / password
+ login_i = (0 if _netrc[0] else 1)
+ return (_netrc[login_i], _netrc[2])
+ except (NetrcParseError, IOError):
+ # If there was a parsing error or a permissions issue reading the file,
+ # we'll just skip netrc auth unless explicitly asked to raise errors.
+ if raise_errors:
+ raise
+
+ # App Engine hackiness.
+ except (ImportError, AttributeError):
+ pass
+
+
+def guess_filename(obj):
+ """Tries to guess the filename of the given object."""
+ name = getattr(obj, 'name', None)
+ if (name and isinstance(name, basestring) and name[0] != '<' and
+ name[-1] != '>'):
+ return os.path.basename(name)
+
+
+def extract_zipped_paths(path):
+ """Replace nonexistent paths that look like they refer to a member of a zip
+ archive with the location of an extracted copy of the target, or else
+ just return the provided path unchanged.
+ """
+ if os.path.exists(path):
+ # this is already a valid path, no need to do anything further
+ return path
+
+ # find the first valid part of the provided path and treat that as a zip archive
+ # assume the rest of the path is the name of a member in the archive
+ archive, member = os.path.split(path)
+ while archive and not os.path.exists(archive):
+ archive, prefix = os.path.split(archive)
+ member = '/'.join([prefix, member])
+
+ if not zipfile.is_zipfile(archive):
+ return path
+
+ zip_file = zipfile.ZipFile(archive)
+ if member not in zip_file.namelist():
+ return path
+
+ # we have a valid zip archive and a valid member of that archive
+ tmp = tempfile.gettempdir()
+ extracted_path = os.path.join(tmp, *member.split('/'))
+ if not os.path.exists(extracted_path):
+ extracted_path = zip_file.extract(member, path=tmp)
+
+ return extracted_path
+
+
+def from_key_val_list(value):
+ """Take an object and test to see if it can be represented as a
+ dictionary. Unless it can not be represented as such, return an
+ OrderedDict, e.g.,
+
+ ::
+
+ >>> from_key_val_list([('key', 'val')])
+ OrderedDict([('key', 'val')])
+ >>> from_key_val_list('string')
+ Traceback (most recent call last):
+ ...
+ ValueError: cannot encode objects that are not 2-tuples
+ >>> from_key_val_list({'key': 'val'})
+ OrderedDict([('key', 'val')])
+
+ :rtype: OrderedDict
+ """
+ if value is None:
+ return None
+
+ if isinstance(value, (str, bytes, bool, int)):
+ raise ValueError('cannot encode objects that are not 2-tuples')
+
+ return OrderedDict(value)
+
+
+def to_key_val_list(value):
+ """Take an object and test to see if it can be represented as a
+ dictionary. If it can be, return a list of tuples, e.g.,
+
+ ::
+
+ >>> to_key_val_list([('key', 'val')])
+ [('key', 'val')]
+ >>> to_key_val_list({'key': 'val'})
+ [('key', 'val')]
+ >>> to_key_val_list('string')
+ Traceback (most recent call last):
+ ...
+ ValueError: cannot encode objects that are not 2-tuples
+
+ :rtype: list
+ """
+ if value is None:
+ return None
+
+ if isinstance(value, (str, bytes, bool, int)):
+ raise ValueError('cannot encode objects that are not 2-tuples')
+
+ if isinstance(value, Mapping):
+ value = value.items()
+
+ return list(value)
+
+
+# From mitsuhiko/werkzeug (used with permission).
+def parse_list_header(value):
+ """Parse lists as described by RFC 2068 Section 2.
+
+ In particular, parse comma-separated lists where the elements of
+ the list may include quoted-strings. A quoted-string could
+ contain a comma. A non-quoted string could have quotes in the
+ middle. Quotes are removed automatically after parsing.
+
+ It basically works like :func:`parse_set_header` just that items
+ may appear multiple times and case sensitivity is preserved.
+
+ The return value is a standard :class:`list`:
+
+ >>> parse_list_header('token, "quoted value"')
+ ['token', 'quoted value']
+
+ To create a header from the :class:`list` again, use the
+ :func:`dump_header` function.
+
+ :param value: a string with a list header.
+ :return: :class:`list`
+ :rtype: list
+ """
+ result = []
+ for item in _parse_list_header(value):
+ if item[:1] == item[-1:] == '"':
+ item = unquote_header_value(item[1:-1])
+ result.append(item)
+ return result
+
+
+# From mitsuhiko/werkzeug (used with permission).
+def parse_dict_header(value):
+ """Parse lists of key, value pairs as described by RFC 2068 Section 2 and
+ convert them into a python dict:
+
+ >>> d = parse_dict_header('foo="is a fish", bar="as well"')
+ >>> type(d) is dict
+ True
+ >>> sorted(d.items())
+ [('bar', 'as well'), ('foo', 'is a fish')]
+
+ If there is no value for a key it will be `None`:
+
+ >>> parse_dict_header('key_without_value')
+ {'key_without_value': None}
+
+ To create a header from the :class:`dict` again, use the
+ :func:`dump_header` function.
+
+ :param value: a string with a dict header.
+ :return: :class:`dict`
+ :rtype: dict
+ """
+ result = {}
+ for item in _parse_list_header(value):
+ if '=' not in item:
+ result[item] = None
+ continue
+ name, value = item.split('=', 1)
+ if value[:1] == value[-1:] == '"':
+ value = unquote_header_value(value[1:-1])
+ result[name] = value
+ return result
+
+
+# From mitsuhiko/werkzeug (used with permission).
+def unquote_header_value(value, is_filename=False):
+ r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
+ This does not use the real unquoting but what browsers are actually
+ using for quoting.
+
+ :param value: the header value to unquote.
+ :rtype: str
+ """
+ if value and value[0] == value[-1] == '"':
+ # this is not the real unquoting, but fixing this so that the
+ # RFC is met will result in bugs with internet explorer and
+ # probably some other browsers as well. IE for example is
+ # uploading files with "C:\foo\bar.txt" as filename
+ value = value[1:-1]
+
+ # if this is a filename and the starting characters look like
+ # a UNC path, then just return the value without quotes. Using the
+ # replace sequence below on a UNC path has the effect of turning
+ # the leading double slash into a single slash and then
+ # _fix_ie_filename() doesn't work correctly. See #458.
+ if not is_filename or value[:2] != '\\\\':
+ return value.replace('\\\\', '\\').replace('\\"', '"')
+ return value
+
+
+def dict_from_cookiejar(cj):
+ """Returns a key/value dictionary from a CookieJar.
+
+ :param cj: CookieJar object to extract cookies from.
+ :rtype: dict
+ """
+
+ cookie_dict = {}
+
+ for cookie in cj:
+ cookie_dict[cookie.name] = cookie.value
+
+ return cookie_dict
+
+
+def add_dict_to_cookiejar(cj, cookie_dict):
+ """Returns a CookieJar from a key/value dictionary.
+
+ :param cj: CookieJar to insert cookies into.
+ :param cookie_dict: Dict of key/values to insert into CookieJar.
+ :rtype: CookieJar
+ """
+
+ return cookiejar_from_dict(cookie_dict, cj)
+
+
+def get_encodings_from_content(content):
+ """Returns encodings from given content string.
+
+ :param content: bytestring to extract encodings from.
+ """
+ warnings.warn((
+ 'In requests 3.0, get_encodings_from_content will be removed. For '
+ 'more information, please see the discussion on issue #2266. (This'
+ ' warning should only appear once.)'),
+ DeprecationWarning)
+
+ charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
+ pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
+ xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
+
+ return (charset_re.findall(content) +
+ pragma_re.findall(content) +
+ xml_re.findall(content))
+
+
+def _parse_content_type_header(header):
+ """Returns content type and parameters from given header
+
+ :param header: string
+ :return: tuple containing content type and dictionary of
+ parameters
+ """
+
+ tokens = header.split(';')
+ content_type, params = tokens[0].strip(), tokens[1:]
+ params_dict = {}
+ items_to_strip = "\"' "
+
+ for param in params:
+ param = param.strip()
+ if param:
+ key, value = param, True
+ index_of_equals = param.find("=")
+ if index_of_equals != -1:
+ key = param[:index_of_equals].strip(items_to_strip)
+ value = param[index_of_equals + 1:].strip(items_to_strip)
+ params_dict[key.lower()] = value
+ return content_type, params_dict
+
+
+def get_encoding_from_headers(headers):
+ """Returns encodings from given HTTP Header Dict.
+
+ :param headers: dictionary to extract encoding from.
+ :rtype: str
+ """
+
+ content_type = headers.get('content-type')
+
+ if not content_type:
+ return None
+
+ content_type, params = _parse_content_type_header(content_type)
+
+ if 'charset' in params:
+ return params['charset'].strip("'\"")
+
+ if 'text' in content_type:
+ return 'ISO-8859-1'
+
+ if 'application/json' in content_type:
+ # Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset
+ return 'utf-8'
+
+
+def stream_decode_response_unicode(iterator, r):
+ """Stream decodes a iterator."""
+
+ if r.encoding is None:
+ for item in iterator:
+ yield item
+ return
+
+ decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
+ for chunk in iterator:
+ rv = decoder.decode(chunk)
+ if rv:
+ yield rv
+ rv = decoder.decode(b'', final=True)
+ if rv:
+ yield rv
+
+
+def iter_slices(string, slice_length):
+ """Iterate over slices of a string."""
+ pos = 0
+ if slice_length is None or slice_length <= 0:
+ slice_length = len(string)
+ while pos < len(string):
+ yield string[pos:pos + slice_length]
+ pos += slice_length
+
+
+def get_unicode_from_response(r):
+ """Returns the requested content back in unicode.
+
+ :param r: Response object to get unicode content from.
+
+ Tried:
+
+ 1. charset from content-type
+ 2. fall back and replace all unicode characters
+
+ :rtype: str
+ """
+ warnings.warn((
+ 'In requests 3.0, get_unicode_from_response will be removed. For '
+ 'more information, please see the discussion on issue #2266. (This'
+ ' warning should only appear once.)'),
+ DeprecationWarning)
+
+ tried_encodings = []
+
+ # Try charset from content-type
+ encoding = get_encoding_from_headers(r.headers)
+
+ if encoding:
+ try:
+ return str(r.content, encoding)
+ except UnicodeError:
+ tried_encodings.append(encoding)
+
+ # Fall back:
+ try:
+ return str(r.content, encoding, errors='replace')
+ except TypeError:
+ return r.content
+
+
+# The unreserved URI characters (RFC 3986)
+UNRESERVED_SET = frozenset(
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~")
+
+
+def unquote_unreserved(uri):
+ """Un-escape any percent-escape sequences in a URI that are unreserved
+ characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
+
+ :rtype: str
+ """
+ parts = uri.split('%')
+ for i in range(1, len(parts)):
+ h = parts[i][0:2]
+ if len(h) == 2 and h.isalnum():
+ try:
+ c = chr(int(h, 16))
+ except ValueError:
+ raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
+
+ if c in UNRESERVED_SET:
+ parts[i] = c + parts[i][2:]
+ else:
+ parts[i] = '%' + parts[i]
+ else:
+ parts[i] = '%' + parts[i]
+ return ''.join(parts)
+
+
+def requote_uri(uri):
+ """Re-quote the given URI.
+
+ This function passes the given URI through an unquote/quote cycle to
+ ensure that it is fully and consistently quoted.
+
+ :rtype: str
+ """
+ safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
+ safe_without_percent = "!#$&'()*+,/:;=?@[]~"
+ try:
+ # Unquote only the unreserved characters
+ # Then quote only illegal characters (do not quote reserved,
+ # unreserved, or '%')
+ return quote(unquote_unreserved(uri), safe=safe_with_percent)
+ except InvalidURL:
+ # We couldn't unquote the given URI, so let's try quoting it, but
+ # there may be unquoted '%'s in the URI. We need to make sure they're
+ # properly quoted so they do not cause issues elsewhere.
+ return quote(uri, safe=safe_without_percent)
+
+
+def address_in_network(ip, net):
+ """This function allows you to check if an IP belongs to a network subnet
+
+ Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
+ returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
+
+ :rtype: bool
+ """
+ ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
+ netaddr, bits = net.split('/')
+ netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
+ network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
+ return (ipaddr & netmask) == (network & netmask)
+
+
+def dotted_netmask(mask):
+ """Converts mask from /xx format to xxx.xxx.xxx.xxx
+
+ Example: if mask is 24 function returns 255.255.255.0
+
+ :rtype: str
+ """
+ bits = 0xffffffff ^ (1 << 32 - mask) - 1
+ return socket.inet_ntoa(struct.pack('>I', bits))
+
+
+def is_ipv4_address(string_ip):
+ """
+ :rtype: bool
+ """
+ try:
+ socket.inet_aton(string_ip)
+ except socket.error:
+ return False
+ return True
+
+
+def is_valid_cidr(string_network):
+ """
+ Very simple check of the cidr format in no_proxy variable.
+
+ :rtype: bool
+ """
+ if string_network.count('/') == 1:
+ try:
+ mask = int(string_network.split('/')[1])
+ except ValueError:
+ return False
+
+ if mask < 1 or mask > 32:
+ return False
+
+ try:
+ socket.inet_aton(string_network.split('/')[0])
+ except socket.error:
+ return False
+ else:
+ return False
+ return True
+
+
+@contextlib.contextmanager
+def set_environ(env_name, value):
+ """Set the environment variable 'env_name' to 'value'
+
+ Save previous value, yield, and then restore the previous value stored in
+ the environment variable 'env_name'.
+
+ If 'value' is None, do nothing"""
+ value_changed = value is not None
+ if value_changed:
+ old_value = os.environ.get(env_name)
+ os.environ[env_name] = value
+ try:
+ yield
+ finally:
+ if value_changed:
+ if old_value is None:
+ del os.environ[env_name]
+ else:
+ os.environ[env_name] = old_value
+
+
+def should_bypass_proxies(url, no_proxy):
+ """
+ Returns whether we should bypass proxies or not.
+
+ :rtype: bool
+ """
+ # Prioritize lowercase environment variables over uppercase
+ # to keep a consistent behaviour with other http projects (curl, wget).
+ get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
+
+ # First check whether no_proxy is defined. If it is, check that the URL
+ # we're getting isn't in the no_proxy list.
+ no_proxy_arg = no_proxy
+ if no_proxy is None:
+ no_proxy = get_proxy('no_proxy')
+ parsed = urlparse(url)
+
+ if parsed.hostname is None:
+ # URLs don't always have hostnames, e.g. file:/// urls.
+ return True
+
+ if no_proxy:
+ # We need to check whether we match here. We need to see if we match
+ # the end of the hostname, both with and without the port.
+ no_proxy = (
+ host for host in no_proxy.replace(' ', '').split(',') if host
+ )
+
+ if is_ipv4_address(parsed.hostname):
+ for proxy_ip in no_proxy:
+ if is_valid_cidr(proxy_ip):
+ if address_in_network(parsed.hostname, proxy_ip):
+ return True
+ elif parsed.hostname == proxy_ip:
+ # If no_proxy ip was defined in plain IP notation instead of cidr notation &
+ # matches the IP of the index
+ return True
+ else:
+ host_with_port = parsed.hostname
+ if parsed.port:
+ host_with_port += ':{}'.format(parsed.port)
+
+ for host in no_proxy:
+ if parsed.hostname.endswith(host) or host_with_port.endswith(host):
+ # The URL does match something in no_proxy, so we don't want
+ # to apply the proxies on this URL.
+ return True
+
+ with set_environ('no_proxy', no_proxy_arg):
+ # parsed.hostname can be `None` in cases such as a file URI.
+ try:
+ bypass = proxy_bypass(parsed.hostname)
+ except (TypeError, socket.gaierror):
+ bypass = False
+
+ if bypass:
+ return True
+
+ return False
+
+
+def get_environ_proxies(url, no_proxy=None):
+ """
+ Return a dict of environment proxies.
+
+ :rtype: dict
+ """
+ if should_bypass_proxies(url, no_proxy=no_proxy):
+ return {}
+ else:
+ return getproxies()
+
+
+def select_proxy(url, proxies):
+ """Select a proxy for the url, if applicable.
+
+ :param url: The url being for the request
+ :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
+ """
+ proxies = proxies or {}
+ urlparts = urlparse(url)
+ if urlparts.hostname is None:
+ return proxies.get(urlparts.scheme, proxies.get('all'))
+
+ proxy_keys = [
+ urlparts.scheme + '://' + urlparts.hostname,
+ urlparts.scheme,
+ 'all://' + urlparts.hostname,
+ 'all',
+ ]
+ proxy = None
+ for proxy_key in proxy_keys:
+ if proxy_key in proxies:
+ proxy = proxies[proxy_key]
+ break
+
+ return proxy
+
+
+def default_user_agent(name="python-requests"):
+ """
+ Return a string representing the default user agent.
+
+ :rtype: str
+ """
+ return '%s/%s' % (name, __version__)
+
+
+def default_headers():
+ """
+ :rtype: requests.structures.CaseInsensitiveDict
+ """
+ return CaseInsensitiveDict({
+ 'User-Agent': default_user_agent(),
+ 'Accept-Encoding': ', '.join(('gzip', 'deflate')),
+ 'Accept': '*/*',
+ 'Connection': 'keep-alive',
+ })
+
+
+def parse_header_links(value):
+ """Return a list of parsed link headers proxies.
+
+ i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
+
+ :rtype: list
+ """
+
+ links = []
+
+ replace_chars = ' \'"'
+
+ value = value.strip(replace_chars)
+ if not value:
+ return links
+
+ for val in re.split(', *<', value):
+ try:
+ url, params = val.split(';', 1)
+ except ValueError:
+ url, params = val, ''
+
+ link = {'url': url.strip('<> \'"')}
+
+ for param in params.split(';'):
+ try:
+ key, value = param.split('=')
+ except ValueError:
+ break
+
+ link[key.strip(replace_chars)] = value.strip(replace_chars)
+
+ links.append(link)
+
+ return links
+
+
+# Null bytes; no need to recreate these on each call to guess_json_utf
+_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
+_null2 = _null * 2
+_null3 = _null * 3
+
+
+def guess_json_utf(data):
+ """
+ :rtype: str
+ """
+ # JSON always starts with two ASCII characters, so detection is as
+ # easy as counting the nulls and from their location and count
+ # determine the encoding. Also detect a BOM, if present.
+ sample = data[:4]
+ if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
+ return 'utf-32' # BOM included
+ if sample[:3] == codecs.BOM_UTF8:
+ return 'utf-8-sig' # BOM included, MS style (discouraged)
+ if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
+ return 'utf-16' # BOM included
+ nullcount = sample.count(_null)
+ if nullcount == 0:
+ return 'utf-8'
+ if nullcount == 2:
+ if sample[::2] == _null2: # 1st and 3rd are null
+ return 'utf-16-be'
+ if sample[1::2] == _null2: # 2nd and 4th are null
+ return 'utf-16-le'
+ # Did not detect 2 valid UTF-16 ascii-range characters
+ if nullcount == 3:
+ if sample[:3] == _null3:
+ return 'utf-32-be'
+ if sample[1:] == _null3:
+ return 'utf-32-le'
+ # Did not detect a valid UTF-32 ascii-range character
+ return None
+
+
+def prepend_scheme_if_needed(url, new_scheme):
+ """Given a URL that may or may not have a scheme, prepend the given scheme.
+ Does not replace a present scheme with the one provided as an argument.
+
+ :rtype: str
+ """
+ scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
+
+ # urlparse is a finicky beast, and sometimes decides that there isn't a
+ # netloc present. Assume that it's being over-cautious, and switch netloc
+ # and path if urlparse decided there was no netloc.
+ if not netloc:
+ netloc, path = path, netloc
+
+ return urlunparse((scheme, netloc, path, params, query, fragment))
+
+
+def get_auth_from_url(url):
+ """Given a url with authentication components, extract them into a tuple of
+ username,password.
+
+ :rtype: (str,str)
+ """
+ parsed = urlparse(url)
+
+ try:
+ auth = (unquote(parsed.username), unquote(parsed.password))
+ except (AttributeError, TypeError):
+ auth = ('', '')
+
+ return auth
+
+
+# Moved outside of function to avoid recompile every call
+_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
+_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
+
+
+def check_header_validity(header):
+ """Verifies that header value is a string which doesn't contain
+ leading whitespace or return characters. This prevents unintended
+ header injection.
+
+ :param header: tuple, in the format (name, value).
+ """
+ name, value = header
+
+ if isinstance(value, bytes):
+ pat = _CLEAN_HEADER_REGEX_BYTE
+ else:
+ pat = _CLEAN_HEADER_REGEX_STR
+ try:
+ if not pat.match(value):
+ raise InvalidHeader("Invalid return character or leading space in header: %s" % name)
+ except TypeError:
+ raise InvalidHeader("Value for header {%s: %s} must be of type str or "
+ "bytes, not %s" % (name, value, type(value)))
+
+
+def urldefragauth(url):
+ """
+ Given a url remove the fragment and the authentication part.
+
+ :rtype: str
+ """
+ scheme, netloc, path, params, query, fragment = urlparse(url)
+
+ # see func:`prepend_scheme_if_needed`
+ if not netloc:
+ netloc, path = path, netloc
+
+ netloc = netloc.rsplit('@', 1)[-1]
+
+ return urlunparse((scheme, netloc, path, params, query, ''))
+
+
+def rewind_body(prepared_request):
+ """Move file pointer back to its recorded starting position
+ so it can be read again on redirect.
+ """
+ body_seek = getattr(prepared_request.body, 'seek', None)
+ if body_seek is not None and isinstance(prepared_request._body_position, integer_types):
+ try:
+ body_seek(prepared_request._body_position)
+ except (IOError, OSError):
+ raise UnrewindableBodyError("An error occurred when rewinding request "
+ "body for redirect.")
+ else:
+ raise UnrewindableBodyError("Unable to rewind request body for redirect.")
diff --git a/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/AUTHORS b/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/AUTHORS
new file mode 100644
index 0000000000..37da4b99b4
--- /dev/null
+++ b/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/AUTHORS
@@ -0,0 +1,11 @@
+Aaron Gallagher <_@habnab.it>
+Ben Jackson <puremourning@gmail.com>
+David Preece <davep@zedkep.com>
+Esben Haabendal <esben@haabendal.dk>
+Marc Abramowitz <abramowi@adobe.com>
+Marc Abramowitz <marc@marc-abramowitz.com>
+Marc Abramowitz <msabramo@gmail.com>
+Ondřej Kobližel <koblizeko@gmail.com>
+Tomaz Solc <tomaz.solc@tablix.org>
+Will Rouesnel <w.rouesnel@gmail.com>
+William Rouesnel <William.Rouesnel@netregistry.com.au>
diff --git a/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/LICENSE b/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/LICENSE
new file mode 100644
index 0000000000..e06d208186
--- /dev/null
+++ b/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/METADATA b/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/METADATA
new file mode 100644
index 0000000000..54234d6ab2
--- /dev/null
+++ b/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/METADATA
@@ -0,0 +1,117 @@
+Metadata-Version: 2.1
+Name: requests-unixsocket
+Version: 0.2.0
+Summary: Use requests to talk HTTP via a UNIX domain socket
+Home-page: https://github.com/msabramo/requests-unixsocket
+Author: Marc Abramowitz
+Author-email: marc@marc-abramowitz.com
+License: Apache-2
+Platform: UNKNOWN
+Classifier: Development Status :: 3 - Alpha
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Information Technology
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Requires-Dist: requests (>=1.1)
+Requires-Dist: urllib3 (>=1.8)
+
+requests-unixsocket
+===================
+
+.. image:: https://badge.fury.io/py/requests-unixsocket.svg
+ :target: https://badge.fury.io/py/requests-unixsocket
+ :alt: Latest Version on PyPI
+
+.. image:: https://travis-ci.org/msabramo/requests-unixsocket.svg?branch=master
+ :target: https://travis-ci.org/msabramo/requests-unixsocket
+
+Use `requests <http://docs.python-requests.org/>`_ to talk HTTP via a UNIX domain socket
+
+Usage
+-----
+
+Explicit
+++++++++
+
+You can use it by instantiating a special ``Session`` object:
+
+.. code-block:: python
+
+ import json
+
+ import requests_unixsocket
+
+ session = requests_unixsocket.Session()
+
+ r = session.get('http+unix://%2Fvar%2Frun%2Fdocker.sock/info')
+ registry_config = r.json()['RegistryConfig']
+ print(json.dumps(registry_config, indent=4))
+
+
+Implicit (monkeypatching)
++++++++++++++++++++++++++
+
+Monkeypatching allows you to use the functionality in this module, while making
+minimal changes to your code. Note that in the above example we had to
+instantiate a special ``requests_unixsocket.Session`` object and call the
+``get`` method on that object. Calling ``requests.get(url)`` (the easiest way
+to use requests and probably very common), would not work. But we can make it
+work by doing monkeypatching.
+
+You can monkeypatch globally:
+
+.. code-block:: python
+
+ import requests_unixsocket
+
+ requests_unixsocket.monkeypatch()
+
+ r = requests.get('http+unix://%2Fvar%2Frun%2Fdocker.sock/info')
+ assert r.status_code == 200
+
+or you can do it temporarily using a context manager:
+
+.. code-block:: python
+
+ import requests_unixsocket
+
+ with requests_unixsocket.monkeypatch():
+ r = requests.get('http+unix://%2Fvar%2Frun%2Fdocker.sock/info')
+ assert r.status_code == 200
+
+
+Abstract namespace sockets
+++++++++++++++++++++++++++
+
+To connect to an `abstract namespace
+socket <https://utcc.utoronto.ca/~cks/space/blog/python/AbstractUnixSocketsAndPeercred>`_
+(Linux only), prefix the name with a NULL byte (i.e.: `\0`) - e.g.:
+
+.. code-block:: python
+
+ import requests_unixsocket
+
+ session = requests_unixsocket.Session()
+ res = session.get('http+unix://\0test_socket/get')
+ print(res.text)
+
+For an example program that illustrates this, see
+``examples/abstract_namespace.py`` in the git repo. Since abstract namespace
+sockets are specific to Linux, the program will only work on Linux.
+
+
+See also
+--------
+
+- https://github.com/httpie/httpie-unixsocket - a plugin for `HTTPie <https://httpie.org/>`_ that allows you to interact with UNIX domain sockets
+
+
+
diff --git a/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/RECORD b/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/RECORD
new file mode 100644
index 0000000000..f8fa773b83
--- /dev/null
+++ b/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/RECORD
@@ -0,0 +1,11 @@
+requests_unixsocket/__init__.py,sha256=_SzyTnexKzhCfmtiIRbrDpLNpKYJuf01QyyNHw76bF0,2077
+requests_unixsocket/adapters.py,sha256=UhXkEErWss-D90cehJltXqPVxwey4RjVkEj7wq0FFgs,2727
+requests_unixsocket/testutils.py,sha256=knU4P4lvwvogbEqRAJ-X77ojHD2V5rTDcYaLat_JreQ,3093
+requests_unixsocket/tests/test_requests_unixsocket.py,sha256=FIDUuM8ZPhpDpiHxvff8fyqCdbJsCDeUa8mKrtSBcSM,5196
+requests_unixsocket-0.2.0.dist-info/AUTHORS,sha256=CAloaNwgMbpQp1CeYjcT6FeDSqSgUZMppfV8FdeFSmM,420
+requests_unixsocket-0.2.0.dist-info/LICENSE,sha256=y16Ofl9KOYjhBjwULGDcLfdWBfTEZRXnduOspt-XbhQ,11325
+requests_unixsocket-0.2.0.dist-info/METADATA,sha256=wUV1Z9UnYmcs95HR7JMQrvwgxxh4lieBuSmGufX9BU4,3545
+requests_unixsocket-0.2.0.dist-info/WHEEL,sha256=HX-v9-noUkyUoxyZ1PMSuS7auUxDAR4VBdoYLqD0xws,110
+requests_unixsocket-0.2.0.dist-info/pbr.json,sha256=b-kcBU2vW_AOypwYSXDMuitwk2Wo4MC57Y9Pnx5JfZk,47
+requests_unixsocket-0.2.0.dist-info/top_level.txt,sha256=Y1EEbvkeC5k8NXwoNkaqjeDlx2oDGfUJrbEubbBbjcc,20
+requests_unixsocket-0.2.0.dist-info/RECORD,,
diff --git a/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/WHEEL b/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/WHEEL
new file mode 100644
index 0000000000..c8240f03e8
--- /dev/null
+++ b/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.1)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/pbr.json b/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/pbr.json
new file mode 100644
index 0000000000..859fce145b
--- /dev/null
+++ b/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/pbr.json
@@ -0,0 +1 @@
+{"is_release": false, "git_version": "f4703e0"} \ No newline at end of file
diff --git a/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/top_level.txt b/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..412903bd05
--- /dev/null
+++ b/third_party/python/requests_unixsocket/requests_unixsocket-0.2.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+requests_unixsocket
diff --git a/third_party/python/requests_unixsocket/requests_unixsocket/__init__.py b/third_party/python/requests_unixsocket/requests_unixsocket/__init__.py
new file mode 100644
index 0000000000..0fb5e1fd7f
--- /dev/null
+++ b/third_party/python/requests_unixsocket/requests_unixsocket/__init__.py
@@ -0,0 +1,77 @@
+import requests
+import sys
+
+from .adapters import UnixAdapter
+
+DEFAULT_SCHEME = 'http+unix://'
+
+
+class Session(requests.Session):
+ def __init__(self, url_scheme=DEFAULT_SCHEME, *args, **kwargs):
+ super(Session, self).__init__(*args, **kwargs)
+ self.mount(url_scheme, UnixAdapter())
+
+
+class monkeypatch(object):
+ def __init__(self, url_scheme=DEFAULT_SCHEME):
+ self.session = Session()
+ requests = self._get_global_requests_module()
+
+ # Methods to replace
+ self.methods = ('request', 'get', 'head', 'post',
+ 'patch', 'put', 'delete', 'options')
+ # Store the original methods
+ self.orig_methods = dict(
+ (m, requests.__dict__[m]) for m in self.methods)
+ # Monkey patch
+ g = globals()
+ for m in self.methods:
+ requests.__dict__[m] = g[m]
+
+ def _get_global_requests_module(self):
+ return sys.modules['requests']
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ requests = self._get_global_requests_module()
+ for m in self.methods:
+ requests.__dict__[m] = self.orig_methods[m]
+
+
+# These are the same methods defined for the global requests object
+def request(method, url, **kwargs):
+ session = Session()
+ return session.request(method=method, url=url, **kwargs)
+
+
+def get(url, **kwargs):
+ kwargs.setdefault('allow_redirects', True)
+ return request('get', url, **kwargs)
+
+
+def head(url, **kwargs):
+ kwargs.setdefault('allow_redirects', False)
+ return request('head', url, **kwargs)
+
+
+def post(url, data=None, json=None, **kwargs):
+ return request('post', url, data=data, json=json, **kwargs)
+
+
+def patch(url, data=None, **kwargs):
+ return request('patch', url, data=data, **kwargs)
+
+
+def put(url, data=None, **kwargs):
+ return request('put', url, data=data, **kwargs)
+
+
+def delete(url, **kwargs):
+ return request('delete', url, **kwargs)
+
+
+def options(url, **kwargs):
+ kwargs.setdefault('allow_redirects', True)
+ return request('options', url, **kwargs)
diff --git a/third_party/python/requests_unixsocket/requests_unixsocket/adapters.py b/third_party/python/requests_unixsocket/requests_unixsocket/adapters.py
new file mode 100644
index 0000000000..a2c15642b1
--- /dev/null
+++ b/third_party/python/requests_unixsocket/requests_unixsocket/adapters.py
@@ -0,0 +1,89 @@
+import socket
+
+from requests.adapters import HTTPAdapter
+from requests.compat import urlparse, unquote
+
+try:
+ import http.client as httplib
+except ImportError:
+ import httplib
+
+try:
+ from requests.packages import urllib3
+except ImportError:
+ import urllib3
+
+
+# The following was adapted from some code from docker-py
+# https://github.com/docker/docker-py/blob/master/docker/transport/unixconn.py
+class UnixHTTPConnection(httplib.HTTPConnection, object):
+
+ def __init__(self, unix_socket_url, timeout=60):
+ """Create an HTTP connection to a unix domain socket
+
+ :param unix_socket_url: A URL with a scheme of 'http+unix' and the
+ netloc is a percent-encoded path to a unix domain socket. E.g.:
+ 'http+unix://%2Ftmp%2Fprofilesvc.sock/status/pid'
+ """
+ super(UnixHTTPConnection, self).__init__('localhost', timeout=timeout)
+ self.unix_socket_url = unix_socket_url
+ self.timeout = timeout
+ self.sock = None
+
+ def __del__(self): # base class does not have d'tor
+ if self.sock:
+ self.sock.close()
+
+ def connect(self):
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.settimeout(self.timeout)
+ socket_path = unquote(urlparse(self.unix_socket_url).netloc)
+ sock.connect(socket_path)
+ self.sock = sock
+
+
+class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
+
+ def __init__(self, socket_path, timeout=60):
+ super(UnixHTTPConnectionPool, self).__init__(
+ 'localhost', timeout=timeout)
+ self.socket_path = socket_path
+ self.timeout = timeout
+
+ def _new_conn(self):
+ return UnixHTTPConnection(self.socket_path, self.timeout)
+
+
+class UnixAdapter(HTTPAdapter):
+
+ def __init__(self, timeout=60, pool_connections=25):
+ super(UnixAdapter, self).__init__()
+ self.timeout = timeout
+ self.pools = urllib3._collections.RecentlyUsedContainer(
+ pool_connections, dispose_func=lambda p: p.close()
+ )
+ super(UnixAdapter, self).__init__()
+
+ def get_connection(self, url, proxies=None):
+ proxies = proxies or {}
+ proxy = proxies.get(urlparse(url.lower()).scheme)
+
+ if proxy:
+ raise ValueError('%s does not support specifying proxies'
+ % self.__class__.__name__)
+
+ with self.pools.lock:
+ pool = self.pools.get(url)
+ if pool:
+ return pool
+
+ pool = UnixHTTPConnectionPool(url, self.timeout)
+ self.pools[url] = pool
+
+ return pool
+
+ def request_url(self, request, proxies):
+ return request.path_url
+
+ def close(self):
+ self.pools.clear()
diff --git a/third_party/python/requests_unixsocket/requests_unixsocket/testutils.py b/third_party/python/requests_unixsocket/requests_unixsocket/testutils.py
new file mode 100644
index 0000000000..77e572e16f
--- /dev/null
+++ b/third_party/python/requests_unixsocket/requests_unixsocket/testutils.py
@@ -0,0 +1,97 @@
+"""
+Utilities helpful for writing tests
+
+Provides a UnixSocketServerThread that creates a running server, listening on a
+newly created unix socket.
+
+Example usage:
+
+.. code-block:: python
+
+ def test_unix_domain_adapter_monkeypatch():
+ with UnixSocketServerThread() as usock_thread:
+ with requests_unixsocket.monkeypatch('http+unix://'):
+ urlencoded_usock = quote_plus(usock_process.usock)
+ url = 'http+unix://%s/path/to/page' % urlencoded_usock
+ r = requests.get(url)
+"""
+
+import logging
+import os
+import threading
+import time
+import uuid
+import waitress
+
+
+logger = logging.getLogger(__name__)
+
+
+class KillThread(threading.Thread):
+ def __init__(self, server, *args, **kwargs):
+ super(KillThread, self).__init__(*args, **kwargs)
+ self.server = server
+
+ def run(self):
+ time.sleep(1)
+ logger.debug('Sleeping')
+ self.server._map.clear()
+
+
+class WSGIApp:
+ server = None
+
+ def __call__(self, environ, start_response):
+ logger.debug('WSGIApp.__call__: Invoked for %s', environ['PATH_INFO'])
+ logger.debug('WSGIApp.__call__: environ = %r', environ)
+ status_text = '200 OK'
+ response_headers = [
+ ('X-Transport', 'unix domain socket'),
+ ('X-Socket-Path', environ['SERVER_PORT']),
+ ('X-Requested-Query-String', environ['QUERY_STRING']),
+ ('X-Requested-Path', environ['PATH_INFO'])]
+ body_bytes = b'Hello world!'
+ start_response(status_text, response_headers)
+ logger.debug(
+ 'WSGIApp.__call__: Responding with '
+ 'status_text = %r; '
+ 'response_headers = %r; '
+ 'body_bytes = %r',
+ status_text, response_headers, body_bytes)
+ return [body_bytes]
+
+
+class UnixSocketServerThread(threading.Thread):
+ def __init__(self, *args, **kwargs):
+ super(UnixSocketServerThread, self).__init__(*args, **kwargs)
+ self.usock = self.get_tempfile_name()
+ self.server = None
+ self.server_ready_event = threading.Event()
+
+ def get_tempfile_name(self):
+ # I'd rather use tempfile.NamedTemporaryFile but IDNA limits
+ # the hostname to 63 characters and we'll get a "InvalidURL:
+ # URL has an invalid label" error if we exceed that.
+ args = (os.stat(__file__).st_ino, os.getpid(), uuid.uuid4().hex[-8:])
+ return '/tmp/test_requests.%s_%s_%s' % args
+
+ def run(self):
+ logger.debug('Call waitress.serve in %r ...', self)
+ wsgi_app = WSGIApp()
+ server = waitress.create_server(wsgi_app, unix_socket=self.usock)
+ wsgi_app.server = server
+ self.server = server
+ self.server_ready_event.set()
+ server.run()
+
+ def __enter__(self):
+ logger.debug('Starting %r ...' % self)
+ self.start()
+ logger.debug('Started %r.', self)
+ self.server_ready_event.wait()
+ return self
+
+ def __exit__(self, *args):
+ self.server_ready_event.wait()
+ if self.server:
+ KillThread(self.server).start()
diff --git a/third_party/python/requirements.in b/third_party/python/requirements.in
new file mode 100644
index 0000000000..1268f553ff
--- /dev/null
+++ b/third_party/python/requirements.in
@@ -0,0 +1,52 @@
+# Not direct dependency - only needed to fix a constraint problem
+MarkupSafe==2.0.1
+appdirs==1.4.4
+attrs==23.1.0
+blessed==1.19.1
+cbor2==4.0.1
+certifi==2022.12.7
+colorama==0.4.5
+compare-locales==9.0.1
+cookies==2.2.1
+cram==0.7
+distro==1.4.0
+ecdsa==0.15
+esprima==4.0.1
+fluent.migrate==0.12.0
+fluent.syntax==0.19.0
+glean_parser==7.2.1
+importlib-metadata==6.0.0
+jsmin==3.0.0
+json-e==2.7.0
+jsonschema==4.17.3
+looseversion==1.0.1
+mozilla-repo-urls==0.1.1
+mozilla-version==2.0.0
+packaging==21.3
+pathspec==0.9.0
+pip==23.0.1
+pip-tools==5.5.0
+ply==3.10
+pyasn1==0.4.8
+pyasn1-modules==0.2.8
+pylru==1.0.9
+python-hglib==2.4
+pyyaml==5.4.1
+redo==2.0.3
+requests==2.25.1
+requests-unixsocket==0.2.0
+responses==0.10.6
+rsa==3.1.4
+sentry-sdk==0.14.3
+setuptools==51.2.0
+six==1.13.0
+slugid==2.0.0
+taskcluster==44.2.2
+taskcluster-taskgraph==3.5.2
+taskcluster-urls==13.0.1
+toml==0.10.2
+tqdm==4.62.3
+urllib3==1.26
+voluptuous==0.12.1
+wheel==0.37.0
+yamllint==1.23
diff --git a/third_party/python/requirements.txt b/third_party/python/requirements.txt
new file mode 100644
index 0000000000..6460b258c0
--- /dev/null
+++ b/third_party/python/requirements.txt
@@ -0,0 +1,407 @@
+aiohttp==3.7.4.post0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:02f46fc0e3c5ac58b80d4d56eb0a7c7d97fcef69ace9326289fb9f1955e65cfe \
+ --hash=sha256:0563c1b3826945eecd62186f3f5c7d31abb7391fedc893b7e2b26303b5a9f3fe \
+ --hash=sha256:114b281e4d68302a324dd33abb04778e8557d88947875cbf4e842c2c01a030c5 \
+ --hash=sha256:14762875b22d0055f05d12abc7f7d61d5fd4fe4642ce1a249abdf8c700bf1fd8 \
+ --hash=sha256:15492a6368d985b76a2a5fdd2166cddfea5d24e69eefed4630cbaae5c81d89bd \
+ --hash=sha256:17c073de315745a1510393a96e680d20af8e67e324f70b42accbd4cb3315c9fb \
+ --hash=sha256:209b4a8ee987eccc91e2bd3ac36adee0e53a5970b8ac52c273f7f8fd4872c94c \
+ --hash=sha256:230a8f7e24298dea47659251abc0fd8b3c4e38a664c59d4b89cca7f6c09c9e87 \
+ --hash=sha256:2e19413bf84934d651344783c9f5e22dee452e251cfd220ebadbed2d9931dbf0 \
+ --hash=sha256:393f389841e8f2dfc86f774ad22f00923fdee66d238af89b70ea314c4aefd290 \
+ --hash=sha256:3cf75f7cdc2397ed4442594b935a11ed5569961333d49b7539ea741be2cc79d5 \
+ --hash=sha256:3d78619672183be860b96ed96f533046ec97ca067fd46ac1f6a09cd9b7484287 \
+ --hash=sha256:40eced07f07a9e60e825554a31f923e8d3997cfc7fb31dbc1328c70826e04cde \
+ --hash=sha256:493d3299ebe5f5a7c66b9819eacdcfbbaaf1a8e84911ddffcdc48888497afecf \
+ --hash=sha256:4b302b45040890cea949ad092479e01ba25911a15e648429c7c5aae9650c67a8 \
+ --hash=sha256:515dfef7f869a0feb2afee66b957cc7bbe9ad0cdee45aec7fdc623f4ecd4fb16 \
+ --hash=sha256:547da6cacac20666422d4882cfcd51298d45f7ccb60a04ec27424d2f36ba3eaf \
+ --hash=sha256:5df68496d19f849921f05f14f31bd6ef53ad4b00245da3195048c69934521809 \
+ --hash=sha256:64322071e046020e8797117b3658b9c2f80e3267daec409b350b6a7a05041213 \
+ --hash=sha256:7615dab56bb07bff74bc865307aeb89a8bfd9941d2ef9d817b9436da3a0ea54f \
+ --hash=sha256:79ebfc238612123a713a457d92afb4096e2148be17df6c50fb9bf7a81c2f8013 \
+ --hash=sha256:7b18b97cf8ee5452fa5f4e3af95d01d84d86d32c5e2bfa260cf041749d66360b \
+ --hash=sha256:932bb1ea39a54e9ea27fc9232163059a0b8855256f4052e776357ad9add6f1c9 \
+ --hash=sha256:a00bb73540af068ca7390e636c01cbc4f644961896fa9363154ff43fd37af2f5 \
+ --hash=sha256:a5ca29ee66f8343ed336816c553e82d6cade48a3ad702b9ffa6125d187e2dedb \
+ --hash=sha256:af9aa9ef5ba1fd5b8c948bb11f44891968ab30356d65fd0cc6707d989cd521df \
+ --hash=sha256:bb437315738aa441251214dad17428cafda9cdc9729499f1d6001748e1d432f4 \
+ --hash=sha256:bdb230b4943891321e06fc7def63c7aace16095be7d9cf3b1e01be2f10fba439 \
+ --hash=sha256:c6e9dcb4cb338d91a73f178d866d051efe7c62a7166653a91e7d9fb18274058f \
+ --hash=sha256:cffe3ab27871bc3ea47df5d8f7013945712c46a3cc5a95b6bee15887f1675c22 \
+ --hash=sha256:d012ad7911653a906425d8473a1465caa9f8dea7fcf07b6d870397b774ea7c0f \
+ --hash=sha256:d9e13b33afd39ddeb377eff2c1c4f00544e191e1d1dee5b6c51ddee8ea6f0cf5 \
+ --hash=sha256:e4b2b334e68b18ac9817d828ba44d8fcb391f6acb398bcc5062b14b2cbeac970 \
+ --hash=sha256:e54962802d4b8b18b6207d4a927032826af39395a3bd9196a5af43fc4e60b009 \
+ --hash=sha256:f705e12750171c0ab4ef2a3c76b9a4024a62c4103e3a55dd6f99265b9bc6fcfc \
+ --hash=sha256:f881853d2643a29e643609da57b96d5f9c9b93f62429dcc1cbb413c7d07f0e1a \
+ --hash=sha256:fe60131d21b31fd1a14bd43e6bb88256f69dfc3188b3a89d736d6c71ed43ec95
+ansicon==1.89.0 ; python_version >= "3.7" and python_version < "4.0" and platform_system == "Windows" \
+ --hash=sha256:e4d039def5768a47e4afec8e89e83ec3ae5a26bf00ad851f914d1240b444d2b1 \
+ --hash=sha256:f1def52d17f65c2c9682cf8370c03f541f410c1752d6a14029f97318e4b9dfec
+appdirs==1.4.4 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41 \
+ --hash=sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128
+async-timeout==3.0.1 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:0c3c816a028d47f659d6ff5c745cb2acf1f966da1fe5c19c77a70282b25f4c5f \
+ --hash=sha256:4291ca197d287d274d0b6cb5d6f8f8f82d434ed288f962539ff18cc9012f9ea3
+attrs==23.1.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04 \
+ --hash=sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015
+blessed==1.19.1 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:63b8554ae2e0e7f43749b6715c734cc8f3883010a809bf16790102563e6cf25b \
+ --hash=sha256:9a0d099695bf621d4680dd6c73f6ad547f6a3442fbdbe80c4b1daa1edbc492fc
+cbor2==4.0.1 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:b0eb916c9ea226aa81e9091607737475d5b0e5c314fe8d5a87179fba449cd190 \
+ --hash=sha256:cee0d01e520563b5a73c72eace5c428bb68aefb1b3f7aee5d692d3af6a1e5172
+certifi==2022.12.7 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3 \
+ --hash=sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18
+chardet==4.0.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:0d6f53a15db4120f2b08c94f11e7d93d2c911ee118b6b30a04ec3ee8310179fa \
+ --hash=sha256:f864054d66fd9118f2e67044ac8981a54775ec5b67aed0441892edb553d21da5
+click==7.1.2 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:d2b5255c7c6349bc1bd1e59e08cd12acbbd63ce649f2588755783aa94dfb6b1a \
+ --hash=sha256:dacca89f4bfadd5de3d7489b7c8a566eee0d3676333fbb50030263894c38c0dc
+colorama==0.4.5 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:854bf444933e37f5824ae7bfc1e98d5bce2ebe4160d46b5edf346a89358e99da \
+ --hash=sha256:e6c6b4334fc50988a639d9b98aa429a0b57da6e17b9a44f0451f930b6967b7a4
+compare-locales==9.0.1 ; python_version >= "3.7" and python_version < "4" \
+ --hash=sha256:2de0f1d382749fffa6a482d462daff0d70bbc99d48520a0bf8459b22dc7fe9da \
+ --hash=sha256:eda953796841cbfab508ee35f7613a38ae7fbeed48bd26bf5cda9063bd638f06
+cookies==2.2.1 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:15bee753002dff684987b8df8c235288eb8d45f8191ae056254812dfd42c81d3 \
+ --hash=sha256:d6b698788cae4cfa4e62ef8643a9ca332b79bd96cb314294b864ae8d7eb3ee8e
+cram==0.7 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:008e4e8b4d325cf040964b5f62460535b004a7bc816d54f8527a4d299edfe4a3 \
+ --hash=sha256:7da7445af2ce15b90aad5ec4792f857cef5786d71f14377e9eb994d8b8337f2f
+diskcache==4.1.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:69b253a6ffe95bb4bafb483b97c24fca3c2c6c47b82e92b36486969a7e80d47d \
+ --hash=sha256:bcee5a59f9c264e2809e58d01be6569a3bbb1e36a1e0fb83f7ef9b2075f95ce0
+distro==1.4.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:362dde65d846d23baee4b5c058c8586f219b5a54be1cf5fc6ff55c4578392f57 \
+ --hash=sha256:eedf82a470ebe7d010f1872c17237c79ab04097948800029994fa458e52fb4b4
+ecdsa==0.15 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:867ec9cf6df0b03addc8ef66b56359643cb5d0c1dc329df76ba7ecfe256c8061 \
+ --hash=sha256:8f12ac317f8a1318efa75757ef0a651abe12e51fc1af8838fb91079445227277
+esprima==4.0.1 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:08db1a876d3c2910db9cfaeb83108193af5411fc3a3a66ebefacd390d21323ee
+fluent-migrate==0.12.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:926e69e94975521a974b206e242a479310c2cbca1865ca26bf40fa3c7a357338 \
+ --hash=sha256:e3564c92d1f53700e98792f1be1ff954488d431ff9f5ec290a4ab13b5de69487
+fluent-syntax==0.19.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:920326d7f46864b9758f0044e9968e3112198bc826acee16ddd8f11d359004fd \
+ --hash=sha256:b352b3475fac6c6ed5f06527921f432aac073d764445508ee5218aeccc7cc5c4
+giturlparse==0.10.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:04ba1a3a099c3093fa8d24a422913c6a9b2c2cd22bcffc939cf72e3e98f672d7 \
+ --hash=sha256:2595ab291d30717cda8474b874c9fd509f1b9802ad7f6968c36a45e4b13eb337
+glean-parser==7.2.1 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:11496ac004fe421b914c7fbdc9a1d620e4821d56e1d9f65523d3858cdb907bbd \
+ --hash=sha256:651cfee34422ea1db90bbf1cb03732bd8c598773bf95daa289a62addeaf10295
+idna==2.10 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:b307872f855b18632ce0c21c5e45be78c0ea7ae4c15c828c20788b26921eb3f6 \
+ --hash=sha256:b97d804b1e9b523befed77c48dacec60e6dcb0b5391d57af6a65a312a90648c0
+importlib-metadata==6.0.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:7efb448ec9a5e313a57655d35aa54cd3e01b7e1fbcf72dce1bf06119420f5bad \
+ --hash=sha256:e354bedeb60efa6affdcc8ae121b73544a7aa74156d047311948f6d711cd378d
+importlib-resources==5.12.0 ; python_version >= "3.7" and python_version < "3.9" \
+ --hash=sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6 \
+ --hash=sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a
+jinja2==2.11.3 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:03e47ad063331dd6a3f04a43eddca8a966a26ba0c5b7207a9a9e4e08f1b29419 \
+ --hash=sha256:a6d58433de0ae800347cab1fa3043cebbabe8baa9d29e668f1c768cb87a333c6
+jinxed==1.2.0 ; python_version >= "3.7" and python_version < "4.0" and platform_system == "Windows" \
+ --hash=sha256:032acda92d5c57cd216033cbbd53de731e6ed50deb63eb4781336ca55f72cda5 \
+ --hash=sha256:cfc2b2e4e3b4326954d546ba6d6b9a7a796ddcb0aef8d03161d005177eb0d48b
+jsmin==3.0.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:88fc1bd6033a47c5911dbcada7d279c7a8b7ad0841909590f6a742c20c4d2e08
+json-e==2.7.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:d8c1ec3f5bbc7728c3a504ebe58829f283c64eca230871e4eefe974b4cdaae4a
+jsonschema==4.17.3 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d \
+ --hash=sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6
+looseversion==1.0.1 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:a205beabd0ffd40488edb9ccb3a39134510fc7c0c2847a25079f559e59c004ac \
+ --hash=sha256:b339dfde67680e9c5c2e96673e52bee9f94d2f0e1b8f4cbfd86d32311e86b952
+markupsafe==2.0.1 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:01a9b8ea66f1658938f65b93a85ebe8bc016e6769611be228d797c9d998dd298 \
+ --hash=sha256:023cb26ec21ece8dc3907c0e8320058b2e0cb3c55cf9564da612bc325bed5e64 \
+ --hash=sha256:0446679737af14f45767963a1a9ef7620189912317d095f2d9ffa183a4d25d2b \
+ --hash=sha256:04635854b943835a6ea959e948d19dcd311762c5c0c6e1f0e16ee57022669194 \
+ --hash=sha256:0717a7390a68be14b8c793ba258e075c6f4ca819f15edfc2a3a027c823718567 \
+ --hash=sha256:0955295dd5eec6cb6cc2fe1698f4c6d84af2e92de33fbcac4111913cd100a6ff \
+ --hash=sha256:0d4b31cc67ab36e3392bbf3862cfbadac3db12bdd8b02a2731f509ed5b829724 \
+ --hash=sha256:10f82115e21dc0dfec9ab5c0223652f7197feb168c940f3ef61563fc2d6beb74 \
+ --hash=sha256:168cd0a3642de83558a5153c8bd34f175a9a6e7f6dc6384b9655d2697312a646 \
+ --hash=sha256:1d609f577dc6e1aa17d746f8bd3c31aa4d258f4070d61b2aa5c4166c1539de35 \
+ --hash=sha256:1f2ade76b9903f39aa442b4aadd2177decb66525062db244b35d71d0ee8599b6 \
+ --hash=sha256:20dca64a3ef2d6e4d5d615a3fd418ad3bde77a47ec8a23d984a12b5b4c74491a \
+ --hash=sha256:2a7d351cbd8cfeb19ca00de495e224dea7e7d919659c2841bbb7f420ad03e2d6 \
+ --hash=sha256:2d7d807855b419fc2ed3e631034685db6079889a1f01d5d9dac950f764da3dad \
+ --hash=sha256:2ef54abee730b502252bcdf31b10dacb0a416229b72c18b19e24a4509f273d26 \
+ --hash=sha256:36bc903cbb393720fad60fc28c10de6acf10dc6cc883f3e24ee4012371399a38 \
+ --hash=sha256:37205cac2a79194e3750b0af2a5720d95f786a55ce7df90c3af697bfa100eaac \
+ --hash=sha256:3c112550557578c26af18a1ccc9e090bfe03832ae994343cfdacd287db6a6ae7 \
+ --hash=sha256:3dd007d54ee88b46be476e293f48c85048603f5f516008bee124ddd891398ed6 \
+ --hash=sha256:4296f2b1ce8c86a6aea78613c34bb1a672ea0e3de9c6ba08a960efe0b0a09047 \
+ --hash=sha256:47ab1e7b91c098ab893b828deafa1203de86d0bc6ab587b160f78fe6c4011f75 \
+ --hash=sha256:49e3ceeabbfb9d66c3aef5af3a60cc43b85c33df25ce03d0031a608b0a8b2e3f \
+ --hash=sha256:4dc8f9fb58f7364b63fd9f85013b780ef83c11857ae79f2feda41e270468dd9b \
+ --hash=sha256:4efca8f86c54b22348a5467704e3fec767b2db12fc39c6d963168ab1d3fc9135 \
+ --hash=sha256:53edb4da6925ad13c07b6d26c2a852bd81e364f95301c66e930ab2aef5b5ddd8 \
+ --hash=sha256:5855f8438a7d1d458206a2466bf82b0f104a3724bf96a1c781ab731e4201731a \
+ --hash=sha256:594c67807fb16238b30c44bdf74f36c02cdf22d1c8cda91ef8a0ed8dabf5620a \
+ --hash=sha256:5b6d930f030f8ed98e3e6c98ffa0652bdb82601e7a016ec2ab5d7ff23baa78d1 \
+ --hash=sha256:5bb28c636d87e840583ee3adeb78172efc47c8b26127267f54a9c0ec251d41a9 \
+ --hash=sha256:60bf42e36abfaf9aff1f50f52644b336d4f0a3fd6d8a60ca0d054ac9f713a864 \
+ --hash=sha256:611d1ad9a4288cf3e3c16014564df047fe08410e628f89805e475368bd304914 \
+ --hash=sha256:6300b8454aa6930a24b9618fbb54b5a68135092bc666f7b06901f897fa5c2fee \
+ --hash=sha256:63f3268ba69ace99cab4e3e3b5840b03340efed0948ab8f78d2fd87ee5442a4f \
+ --hash=sha256:6557b31b5e2c9ddf0de32a691f2312a32f77cd7681d8af66c2692efdbef84c18 \
+ --hash=sha256:693ce3f9e70a6cf7d2fb9e6c9d8b204b6b39897a2c4a1aa65728d5ac97dcc1d8 \
+ --hash=sha256:6a7fae0dd14cf60ad5ff42baa2e95727c3d81ded453457771d02b7d2b3f9c0c2 \
+ --hash=sha256:6c4ca60fa24e85fe25b912b01e62cb969d69a23a5d5867682dd3e80b5b02581d \
+ --hash=sha256:6fcf051089389abe060c9cd7caa212c707e58153afa2c649f00346ce6d260f1b \
+ --hash=sha256:7d91275b0245b1da4d4cfa07e0faedd5b0812efc15b702576d103293e252af1b \
+ --hash=sha256:89c687013cb1cd489a0f0ac24febe8c7a666e6e221b783e53ac50ebf68e45d86 \
+ --hash=sha256:8d206346619592c6200148b01a2142798c989edcb9c896f9ac9722a99d4e77e6 \
+ --hash=sha256:905fec760bd2fa1388bb5b489ee8ee5f7291d692638ea5f67982d968366bef9f \
+ --hash=sha256:97383d78eb34da7e1fa37dd273c20ad4320929af65d156e35a5e2d89566d9dfb \
+ --hash=sha256:984d76483eb32f1bcb536dc27e4ad56bba4baa70be32fa87152832cdd9db0833 \
+ --hash=sha256:99df47edb6bda1249d3e80fdabb1dab8c08ef3975f69aed437cb69d0a5de1e28 \
+ --hash=sha256:9f02365d4e99430a12647f09b6cc8bab61a6564363f313126f775eb4f6ef798e \
+ --hash=sha256:a30e67a65b53ea0a5e62fe23682cfe22712e01f453b95233b25502f7c61cb415 \
+ --hash=sha256:ab3ef638ace319fa26553db0624c4699e31a28bb2a835c5faca8f8acf6a5a902 \
+ --hash=sha256:aca6377c0cb8a8253e493c6b451565ac77e98c2951c45f913e0b52facdcff83f \
+ --hash=sha256:add36cb2dbb8b736611303cd3bfcee00afd96471b09cda130da3581cbdc56a6d \
+ --hash=sha256:b2f4bf27480f5e5e8ce285a8c8fd176c0b03e93dcc6646477d4630e83440c6a9 \
+ --hash=sha256:b7f2d075102dc8c794cbde1947378051c4e5180d52d276987b8d28a3bd58c17d \
+ --hash=sha256:baa1a4e8f868845af802979fcdbf0bb11f94f1cb7ced4c4b8a351bb60d108145 \
+ --hash=sha256:be98f628055368795d818ebf93da628541e10b75b41c559fdf36d104c5787066 \
+ --hash=sha256:bf5d821ffabf0ef3533c39c518f3357b171a1651c1ff6827325e4489b0e46c3c \
+ --hash=sha256:c47adbc92fc1bb2b3274c4b3a43ae0e4573d9fbff4f54cd484555edbf030baf1 \
+ --hash=sha256:cdfba22ea2f0029c9261a4bd07e830a8da012291fbe44dc794e488b6c9bb353a \
+ --hash=sha256:d6c7ebd4e944c85e2c3421e612a7057a2f48d478d79e61800d81468a8d842207 \
+ --hash=sha256:d7f9850398e85aba693bb640262d3611788b1f29a79f0c93c565694658f4071f \
+ --hash=sha256:d8446c54dc28c01e5a2dbac5a25f071f6653e6e40f3a8818e8b45d790fe6ef53 \
+ --hash=sha256:deb993cacb280823246a026e3b2d81c493c53de6acfd5e6bfe31ab3402bb37dd \
+ --hash=sha256:e0f138900af21926a02425cf736db95be9f4af72ba1bb21453432a07f6082134 \
+ --hash=sha256:e9936f0b261d4df76ad22f8fee3ae83b60d7c3e871292cd42f40b81b70afae85 \
+ --hash=sha256:f0567c4dc99f264f49fe27da5f735f414c4e7e7dd850cfd8e69f0862d7c74ea9 \
+ --hash=sha256:f5653a225f31e113b152e56f154ccbe59eeb1c7487b39b9d9f9cdb58e6c79dc5 \
+ --hash=sha256:f826e31d18b516f653fe296d967d700fddad5901ae07c622bb3705955e1faa94 \
+ --hash=sha256:f8ba0e8349a38d3001fae7eadded3f6606f0da5d748ee53cc1dab1d6527b9509 \
+ --hash=sha256:f9081981fe268bd86831e5c75f7de206ef275defcb82bc70740ae6dc507aee51 \
+ --hash=sha256:fa130dd50c57d53368c9d59395cb5526eda596d3ffe36666cd81a44d56e48872
+mohawk==0.3.4 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:b3f85ffa93a5c7d2f9cc591246ef9f8ac4a9fa716bfd5bae0377699a2d89d78c \
+ --hash=sha256:e98b331d9fa9ece7b8be26094cbe2d57613ae882133cc755167268a984bc0ab3
+mozilla-repo-urls==0.1.1 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:30510d3519479aa70211145d0ac9cf6e2fadcb8d30fa3b196bb957bd773502ba \
+ --hash=sha256:7364da790751db2a060eb45adbf1d7db89a145ed279ba235f3425db9dd255915
+mozilla-version==2.0.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:09697ddc5f55ad8d76521bf3e37aaec4d5bfd7fd4c9018a1cbb0e8cf6c536538 \
+ --hash=sha256:50807a1f4000a7db6bfe95b0ffb1bade429cd8e56cbab70fd3eff5dd46ebb794
+multidict==5.1.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:018132dbd8688c7a69ad89c4a3f39ea2f9f33302ebe567a879da8f4ca73f0d0a \
+ --hash=sha256:051012ccee979b2b06be928a6150d237aec75dd6bf2d1eeeb190baf2b05abc93 \
+ --hash=sha256:05c20b68e512166fddba59a918773ba002fdd77800cad9f55b59790030bab632 \
+ --hash=sha256:07b42215124aedecc6083f1ce6b7e5ec5b50047afa701f3442054373a6deb656 \
+ --hash=sha256:0e3c84e6c67eba89c2dbcee08504ba8644ab4284863452450520dad8f1e89b79 \
+ --hash=sha256:0e929169f9c090dae0646a011c8b058e5e5fb391466016b39d21745b48817fd7 \
+ --hash=sha256:1ab820665e67373de5802acae069a6a05567ae234ddb129f31d290fc3d1aa56d \
+ --hash=sha256:25b4e5f22d3a37ddf3effc0710ba692cfc792c2b9edfb9c05aefe823256e84d5 \
+ --hash=sha256:2e68965192c4ea61fff1b81c14ff712fc7dc15d2bd120602e4a3494ea6584224 \
+ --hash=sha256:2f1a132f1c88724674271d636e6b7351477c27722f2ed789f719f9e3545a3d26 \
+ --hash=sha256:37e5438e1c78931df5d3c0c78ae049092877e5e9c02dd1ff5abb9cf27a5914ea \
+ --hash=sha256:3a041b76d13706b7fff23b9fc83117c7b8fe8d5fe9e6be45eee72b9baa75f348 \
+ --hash=sha256:3a4f32116f8f72ecf2a29dabfb27b23ab7cdc0ba807e8459e59a93a9be9506f6 \
+ --hash=sha256:46c73e09ad374a6d876c599f2328161bcd95e280f84d2060cf57991dec5cfe76 \
+ --hash=sha256:46dd362c2f045095c920162e9307de5ffd0a1bfbba0a6e990b344366f55a30c1 \
+ --hash=sha256:4b186eb7d6ae7c06eb4392411189469e6a820da81447f46c0072a41c748ab73f \
+ --hash=sha256:54fd1e83a184e19c598d5e70ba508196fd0bbdd676ce159feb412a4a6664f952 \
+ --hash=sha256:585fd452dd7782130d112f7ddf3473ffdd521414674c33876187e101b588738a \
+ --hash=sha256:5cf3443199b83ed9e955f511b5b241fd3ae004e3cb81c58ec10f4fe47c7dce37 \
+ --hash=sha256:6a4d5ce640e37b0efcc8441caeea8f43a06addace2335bd11151bc02d2ee31f9 \
+ --hash=sha256:7df80d07818b385f3129180369079bd6934cf70469f99daaebfac89dca288359 \
+ --hash=sha256:806068d4f86cb06af37cd65821554f98240a19ce646d3cd24e1c33587f313eb8 \
+ --hash=sha256:830f57206cc96ed0ccf68304141fec9481a096c4d2e2831f311bde1c404401da \
+ --hash=sha256:929006d3c2d923788ba153ad0de8ed2e5ed39fdbe8e7be21e2f22ed06c6783d3 \
+ --hash=sha256:9436dc58c123f07b230383083855593550c4d301d2532045a17ccf6eca505f6d \
+ --hash=sha256:9dd6e9b1a913d096ac95d0399bd737e00f2af1e1594a787e00f7975778c8b2bf \
+ --hash=sha256:ace010325c787c378afd7f7c1ac66b26313b3344628652eacd149bdd23c68841 \
+ --hash=sha256:b47a43177a5e65b771b80db71e7be76c0ba23cc8aa73eeeb089ed5219cdbe27d \
+ --hash=sha256:b797515be8743b771aa868f83563f789bbd4b236659ba52243b735d80b29ed93 \
+ --hash=sha256:b7993704f1a4b204e71debe6095150d43b2ee6150fa4f44d6d966ec356a8d61f \
+ --hash=sha256:d5c65bdf4484872c4af3150aeebe101ba560dcfb34488d9a8ff8dbcd21079647 \
+ --hash=sha256:d81eddcb12d608cc08081fa88d046c78afb1bf8107e6feab5d43503fea74a635 \
+ --hash=sha256:dc862056f76443a0db4509116c5cd480fe1b6a2d45512a653f9a855cc0517456 \
+ --hash=sha256:ecc771ab628ea281517e24fd2c52e8f31c41e66652d07599ad8818abaad38cda \
+ --hash=sha256:f200755768dc19c6f4e2b672421e0ebb3dd54c38d5a4f262b872d8cfcc9e93b5 \
+ --hash=sha256:f21756997ad8ef815d8ef3d34edd98804ab5ea337feedcd62fb52d22bf531281 \
+ --hash=sha256:fc13a9524bc18b6fb6e0dbec3533ba0496bbed167c56d0aabefd965584557d80
+packaging==21.3 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \
+ --hash=sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522
+pathspec==0.9.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:7d15c4ddb0b5c802d161efc417ec1a2558ea2653c2e8ad9c19098201dc1c993a \
+ --hash=sha256:e564499435a2673d586f6b2130bb5b95f04a3ba06f81b8f895b651a3c76aabb1
+pip-tools==5.5.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:10841c1e56c234d610d0466447685b9ea4ee4a2c274f858c0ef3c33d9bd0d985 \
+ --hash=sha256:cb0108391366b3ef336185097b3c2c0f3fa115b15098dafbda5e78aef70ea114
+pip==23.0.1 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:236bcb61156d76c4b8a05821b988c7b8c35bf0da28a4b614e8d6ab5212c25c6f \
+ --hash=sha256:cd015ea1bfb0fcef59d8a286c1f8bebcb983f6317719d415dc5351efb7cd7024
+pkgutil-resolve-name==1.3.10 ; python_version >= "3.7" and python_version < "3.9" \
+ --hash=sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174 \
+ --hash=sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e
+ply==3.10 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:96e94af7dd7031d8d6dd6e2a8e0de593b511c211a86e28a9c9621c275ac8bacb
+pyasn1-modules==0.2.8 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e \
+ --hash=sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74
+pyasn1==0.4.8 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d \
+ --hash=sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba
+pylru==1.0.9 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:71376192671f0ad1690b2a7427d39a29b1df994c8469a9b46b03ed7e28c0172c
+pyparsing==2.4.7 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:c203ec8783bf771a155b207279b9bccb8dea02d8f0c9e5f8ead507bc3246ecc1 \
+ --hash=sha256:ef9d7589ef3c200abe66653d3f1ab1033c3c419ae9b9bdb1240a85b024efc88b
+pyrsistent==0.16.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:28669905fe725965daa16184933676547c5bb40a5153055a8dee2a4bd7933ad3
+python-hglib==2.4 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:693d6ed92a6566e78802c7a03c256cda33d08c63ad3f00fcfa11379b184b9462
+pyyaml==5.4.1 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:08682f6b72c722394747bddaf0aa62277e02557c0fd1c42cb853016a38f8dedf \
+ --hash=sha256:0f5f5786c0e09baddcd8b4b45f20a7b5d61a7e7e99846e3c799b05c7c53fa696 \
+ --hash=sha256:129def1b7c1bf22faffd67b8f3724645203b79d8f4cc81f674654d9902cb4393 \
+ --hash=sha256:294db365efa064d00b8d1ef65d8ea2c3426ac366c0c4368d930bf1c5fb497f77 \
+ --hash=sha256:3b2b1824fe7112845700f815ff6a489360226a5609b96ec2190a45e62a9fc922 \
+ --hash=sha256:3bd0e463264cf257d1ffd2e40223b197271046d09dadf73a0fe82b9c1fc385a5 \
+ --hash=sha256:4465124ef1b18d9ace298060f4eccc64b0850899ac4ac53294547536533800c8 \
+ --hash=sha256:49d4cdd9065b9b6e206d0595fee27a96b5dd22618e7520c33204a4a3239d5b10 \
+ --hash=sha256:4e0583d24c881e14342eaf4ec5fbc97f934b999a6828693a99157fde912540cc \
+ --hash=sha256:5accb17103e43963b80e6f837831f38d314a0495500067cb25afab2e8d7a4018 \
+ --hash=sha256:607774cbba28732bfa802b54baa7484215f530991055bb562efbed5b2f20a45e \
+ --hash=sha256:6c78645d400265a062508ae399b60b8c167bf003db364ecb26dcab2bda048253 \
+ --hash=sha256:72a01f726a9c7851ca9bfad6fd09ca4e090a023c00945ea05ba1638c09dc3347 \
+ --hash=sha256:74c1485f7707cf707a7aef42ef6322b8f97921bd89be2ab6317fd782c2d53183 \
+ --hash=sha256:895f61ef02e8fed38159bb70f7e100e00f471eae2bc838cd0f4ebb21e28f8541 \
+ --hash=sha256:8c1be557ee92a20f184922c7b6424e8ab6691788e6d86137c5d93c1a6ec1b8fb \
+ --hash=sha256:bb4191dfc9306777bc594117aee052446b3fa88737cd13b7188d0e7aa8162185 \
+ --hash=sha256:bfb51918d4ff3d77c1c856a9699f8492c612cde32fd3bcd344af9be34999bfdc \
+ --hash=sha256:c20cfa2d49991c8b4147af39859b167664f2ad4561704ee74c1de03318e898db \
+ --hash=sha256:cb333c16912324fd5f769fff6bc5de372e9e7a202247b48870bc251ed40239aa \
+ --hash=sha256:d2d9808ea7b4af864f35ea216be506ecec180628aced0704e34aca0b040ffe46 \
+ --hash=sha256:d483ad4e639292c90170eb6f7783ad19490e7a8defb3e46f97dfe4bacae89122 \
+ --hash=sha256:dd5de0646207f053eb0d6c74ae45ba98c3395a571a2891858e87df7c9b9bd51b \
+ --hash=sha256:e1d4970ea66be07ae37a3c2e48b5ec63f7ba6804bdddfdbd3cfd954d25a82e63 \
+ --hash=sha256:e4fac90784481d221a8e4b1162afa7c47ed953be40d31ab4629ae917510051df \
+ --hash=sha256:fa5ae20527d8e831e8230cbffd9f8fe952815b2b7dae6ffec25318803a7528fc \
+ --hash=sha256:fd7f6999a8070df521b6384004ef42833b9bd62cfee11a09bda1079b4b704247 \
+ --hash=sha256:fdc842473cd33f45ff6bce46aea678a54e3d21f1b61a7750ce3c498eedfe25d6 \
+ --hash=sha256:fe69978f3f768926cfa37b867e3843918e012cf83f680806599ddce33c2c68b0
+redo==2.0.3 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:36784bf8ae766e14f9db0e377ccfa02835d648321d2007b6ae0bf4fd612c0f94 \
+ --hash=sha256:71161cb0e928d824092a5f16203939bbc0867ce4c4685db263cf22c3ae7634a8
+requests-unixsocket==0.2.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:014d07bfb66dc805a011a8b4b306cf4ec96d2eddb589f6b2b5765e626f0dc0cc \
+ --hash=sha256:9e5c1a20afc3cf786197ae59c79bcdb0e7565f218f27df5f891307ee8817c1ea
+requests==2.25.1 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:27973dd4a904a4f13b263a19c866c13b92a39ed1c964655f025f3f8d3d75b804 \
+ --hash=sha256:c210084e36a42ae6b9219e00e48287def368a26d03a048ddad7bfee44f75871e
+responses==0.10.6 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:502d9c0c8008439cfcdef7e251f507fcfdd503b56e8c0c87c3c3e3393953f790 \
+ --hash=sha256:97193c0183d63fba8cd3a041c75464e4b09ea0aff6328800d1546598567dde0b
+rsa==3.1.4 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:e2b0b05936c276b1edd2e1525553233b666df9e29b5c3ba223eed738277c82a0
+sentry-sdk==0.14.3 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:23808d571d2461a4ce3784ec12bbee5bdb8c026c143fe79d36cef8a6d653e71f \
+ --hash=sha256:bb90a4e19c7233a580715fc986cc44be2c48fc10b31e71580a2037e1c94b6950
+setuptools==51.2.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:56948bf25c682e166cf2bfe7c1ad63e5745849b50d1ae7b0f8bff5decdcf34f2 \
+ --hash=sha256:7ef59b1790b3491f8d321f531eccc11517a07a4d7637e498465cd834d80d4c2c
+six==1.13.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:1f1b7d42e254082a9db6279deae68afb421ceba6158efa6131de7b3003ee93fd \
+ --hash=sha256:30f610279e8b2578cab6db20741130331735c781b56053c59c4076da27f06b66
+slugid==2.0.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:a950d98b72691178bdd4d6c52743c4a2aa039207cf7a97d71060a111ff9ba297 \
+ --hash=sha256:aec8b0e01c4ad32e38e12d609eab3ec912fd129aaf6b2ded0199b56a5f8fd67c
+taskcluster-taskgraph==3.5.2 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:62f1a320d6b310f65151904a9992719a9b2c4c41ef8f57be810899fd3c5d2703 \
+ --hash=sha256:6a024ba2383f56e11b764500f92837afb825612a49d24bde9791dfa7aa7ddaec
+taskcluster-urls==13.0.1 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:5e25e7e6818e8877178b175ff43d2e6548afad72694aa125f404a7329ece0973 \
+ --hash=sha256:b25e122ecec249c4299ac7b20b08db76e3e2025bdaeb699a9d444556de5fd367 \
+ --hash=sha256:f66dcbd6572a6216ab65949f0fa0b91f2df647918028436c384e6af5cd12ae2b
+taskcluster==44.2.2 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:0266a6a901e1a2ec838984a7f24e7adb6d58f9f2e221a7f613388f8f23f786fc \
+ --hash=sha256:846d73c597f0f47dd8525c85c8d9bc41111d5200b090690d3f16b2f57c56a2e1 \
+ --hash=sha256:c1b0e82be25b1ed17e07c90b24a382634b2bfce273fdf2682d94568abe10716c
+toml==0.10.2 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b \
+ --hash=sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f
+tqdm==4.62.3 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:8dd278a422499cd6b727e6ae4061c40b48fce8b76d1ccbf5d34fca9b7f925b0c \
+ --hash=sha256:d359de7217506c9851b7869f3708d8ee53ed70a1b8edbba4dbcb47442592920d
+typing-extensions==3.10.0.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:0ac0f89795dd19de6b97debb0c6af1c70987fd80a2d62d1958f7e56fcc31b497 \
+ --hash=sha256:50b6f157849174217d0656f99dc82fe932884fb250826c18350e159ec6cdf342 \
+ --hash=sha256:779383f6086d90c99ae41cf0ff39aac8a7937a9283ce0a414e5dd782f4c94a84
+urllib3==1.26.0 ; python_version >= "3.7" and python_version < "4" \
+ --hash=sha256:4849f132941d68144df0a3785ccc4fe423430ba5db0108d045c8cadbc90f517a \
+ --hash=sha256:bad31cb622ceee0ab46c4c884cf61957def0ff2e644de0a7a093678844c9ccac
+voluptuous==0.12.1 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:663572419281ddfaf4b4197fd4942d181630120fb39b333e3adad70aeb56444b \
+ --hash=sha256:8ace33fcf9e6b1f59406bfaf6b8ec7bcc44266a9f29080b4deb4fe6ff2492386
+wcwidth==0.2.5 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:beb4802a9cebb9144e99086eff703a642a13d6a0052920003a230f3294bbe784 \
+ --hash=sha256:c4d647b99872929fdb7bdcaa4fbe7f01413ed3d98077df798530e5b04f116c83
+wheel==0.37.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:21014b2bd93c6d0034b6ba5d35e4eb284340e09d63c59aef6fc14b0f346146fd \
+ --hash=sha256:e2ef7239991699e3355d54f8e968a21bb940a1dbf34a4d226741e64462516fad
+yamllint==1.23.0 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:0fa69bf8a86182b7fe14918bdd3a30354c869966bbc7cbfff176af71bda9c806 \
+ --hash=sha256:59f3ff77f44e7f46be6aecdb985830f73a1c51e290b7082a7d38c2ae1940f4a9
+yarl==1.6.3 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:00d7ad91b6583602eb9c1d085a2cf281ada267e9a197e8b7cae487dadbfa293e \
+ --hash=sha256:0355a701b3998dcd832d0dc47cc5dedf3874f966ac7f870e0f3a6788d802d434 \
+ --hash=sha256:15263c3b0b47968c1d90daa89f21fcc889bb4b1aac5555580d74565de6836366 \
+ --hash=sha256:2ce4c621d21326a4a5500c25031e102af589edb50c09b321049e388b3934eec3 \
+ --hash=sha256:31ede6e8c4329fb81c86706ba8f6bf661a924b53ba191b27aa5fcee5714d18ec \
+ --hash=sha256:324ba3d3c6fee56e2e0b0d09bf5c73824b9f08234339d2b788af65e60040c959 \
+ --hash=sha256:329412812ecfc94a57cd37c9d547579510a9e83c516bc069470db5f75684629e \
+ --hash=sha256:4736eaee5626db8d9cda9eb5282028cc834e2aeb194e0d8b50217d707e98bb5c \
+ --hash=sha256:4953fb0b4fdb7e08b2f3b3be80a00d28c5c8a2056bb066169de00e6501b986b6 \
+ --hash=sha256:4c5bcfc3ed226bf6419f7a33982fb4b8ec2e45785a0561eb99274ebbf09fdd6a \
+ --hash=sha256:547f7665ad50fa8563150ed079f8e805e63dd85def6674c97efd78eed6c224a6 \
+ --hash=sha256:5b883e458058f8d6099e4420f0cc2567989032b5f34b271c0827de9f1079a424 \
+ --hash=sha256:63f90b20ca654b3ecc7a8d62c03ffa46999595f0167d6450fa8383bab252987e \
+ --hash=sha256:68dc568889b1c13f1e4745c96b931cc94fdd0defe92a72c2b8ce01091b22e35f \
+ --hash=sha256:69ee97c71fee1f63d04c945f56d5d726483c4762845400a6795a3b75d56b6c50 \
+ --hash=sha256:6d6283d8e0631b617edf0fd726353cb76630b83a089a40933043894e7f6721e2 \
+ --hash=sha256:72a660bdd24497e3e84f5519e57a9ee9220b6f3ac4d45056961bf22838ce20cc \
+ --hash=sha256:73494d5b71099ae8cb8754f1df131c11d433b387efab7b51849e7e1e851f07a4 \
+ --hash=sha256:7356644cbed76119d0b6bd32ffba704d30d747e0c217109d7979a7bc36c4d970 \
+ --hash=sha256:8a9066529240171b68893d60dca86a763eae2139dd42f42106b03cf4b426bf10 \
+ --hash=sha256:8aa3decd5e0e852dc68335abf5478a518b41bf2ab2f330fe44916399efedfae0 \
+ --hash=sha256:97b5bdc450d63c3ba30a127d018b866ea94e65655efaf889ebeabc20f7d12406 \
+ --hash=sha256:9ede61b0854e267fd565e7527e2f2eb3ef8858b301319be0604177690e1a3896 \
+ --hash=sha256:b2e9a456c121e26d13c29251f8267541bd75e6a1ccf9e859179701c36a078643 \
+ --hash=sha256:b5dfc9a40c198334f4f3f55880ecf910adebdcb2a0b9a9c23c9345faa9185721 \
+ --hash=sha256:bafb450deef6861815ed579c7a6113a879a6ef58aed4c3a4be54400ae8871478 \
+ --hash=sha256:c49ff66d479d38ab863c50f7bb27dee97c6627c5fe60697de15529da9c3de724 \
+ --hash=sha256:ce3beb46a72d9f2190f9e1027886bfc513702d748047b548b05dab7dfb584d2e \
+ --hash=sha256:d26608cf178efb8faa5ff0f2d2e77c208f471c5a3709e577a7b3fd0445703ac8 \
+ --hash=sha256:d597767fcd2c3dc49d6eea360c458b65643d1e4dbed91361cf5e36e53c1f8c96 \
+ --hash=sha256:d5c32c82990e4ac4d8150fd7652b972216b204de4e83a122546dce571c1bdf25 \
+ --hash=sha256:d8d07d102f17b68966e2de0e07bfd6e139c7c02ef06d3a0f8d2f0f055e13bb76 \
+ --hash=sha256:e46fba844f4895b36f4c398c5af062a9808d1f26b2999c58909517384d5deda2 \
+ --hash=sha256:e6b5460dc5ad42ad2b36cca524491dfcaffbfd9c8df50508bddc354e787b8dc2 \
+ --hash=sha256:f040bcc6725c821a4c0665f3aa96a4d0805a7aaf2caf266d256b8ed71b9f041c \
+ --hash=sha256:f0b059678fd549c66b89bed03efcabb009075bd131c248ecdf087bdb6faba24a \
+ --hash=sha256:fcbb48a93e8699eae920f8d92f7160c03567b421bc17362a9ffbbd706a816f71
+zipp==3.4.1 ; python_version >= "3.7" and python_version < "4.0" \
+ --hash=sha256:3607921face881ba3e026887d8150cca609d517579abe052ac81fc5aeffdbd76 \
+ --hash=sha256:51cb66cc54621609dd593d1787f286ee42a5c0adbb4b29abea5a63edc3e03098
diff --git a/third_party/python/responses/responses-0.10.6.dist-info/LICENSE b/third_party/python/responses/responses-0.10.6.dist-info/LICENSE
new file mode 100644
index 0000000000..52b44b20a3
--- /dev/null
+++ b/third_party/python/responses/responses-0.10.6.dist-info/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright 2015 David Cramer
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/third_party/python/responses/responses-0.10.6.dist-info/METADATA b/third_party/python/responses/responses-0.10.6.dist-info/METADATA
new file mode 100644
index 0000000000..45368b35d5
--- /dev/null
+++ b/third_party/python/responses/responses-0.10.6.dist-info/METADATA
@@ -0,0 +1,454 @@
+Metadata-Version: 2.1
+Name: responses
+Version: 0.10.6
+Summary: A utility library for mocking out the `requests` Python library.
+Home-page: https://github.com/getsentry/responses
+Author: David Cramer
+License: Apache 2.0
+Platform: UNKNOWN
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: System Administrators
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Topic :: Software Development
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
+Requires-Dist: requests (>=2.0)
+Requires-Dist: six
+Requires-Dist: mock ; python_version < "3.3"
+Requires-Dist: cookies ; python_version < "3.4"
+Provides-Extra: tests
+Requires-Dist: pytest ; extra == 'tests'
+Requires-Dist: coverage (<5.0.0,>=3.7.1) ; extra == 'tests'
+Requires-Dist: pytest-cov ; extra == 'tests'
+Requires-Dist: pytest-localserver ; extra == 'tests'
+Requires-Dist: flake8 ; extra == 'tests'
+
+Responses
+=========
+
+.. image:: https://travis-ci.org/getsentry/responses.svg?branch=master
+ :target: https://travis-ci.org/getsentry/responses
+
+A utility library for mocking out the `requests` Python library.
+
+.. note::
+
+ Responses requires Python 2.7 or newer, and requests >= 2.0
+
+
+Installing
+----------
+
+``pip install responses``
+
+
+Basics
+------
+
+The core of ``responses`` comes from registering mock responses:
+
+.. code-block:: python
+
+ import responses
+ import requests
+
+ @responses.activate
+ def test_simple():
+ responses.add(responses.GET, 'http://twitter.com/api/1/foobar',
+ json={'error': 'not found'}, status=404)
+
+ resp = requests.get('http://twitter.com/api/1/foobar')
+
+ assert resp.json() == {"error": "not found"}
+
+ assert len(responses.calls) == 1
+ assert responses.calls[0].request.url == 'http://twitter.com/api/1/foobar'
+ assert responses.calls[0].response.text == '{"error": "not found"}'
+
+If you attempt to fetch a url which doesn't hit a match, ``responses`` will raise
+a ``ConnectionError``:
+
+.. code-block:: python
+
+ import responses
+ import requests
+
+ from requests.exceptions import ConnectionError
+
+ @responses.activate
+ def test_simple():
+ with pytest.raises(ConnectionError):
+ requests.get('http://twitter.com/api/1/foobar')
+
+Lastly, you can pass an ``Exception`` as the body to trigger an error on the request:
+
+.. code-block:: python
+
+ import responses
+ import requests
+
+ @responses.activate
+ def test_simple():
+ responses.add(responses.GET, 'http://twitter.com/api/1/foobar',
+ body=Exception('...'))
+ with pytest.raises(Exception):
+ requests.get('http://twitter.com/api/1/foobar')
+
+
+Response Parameters
+-------------------
+
+Responses are automatically registered via params on ``add``, but can also be
+passed directly:
+
+.. code-block:: python
+
+ import responses
+
+ responses.add(
+ responses.Response(
+ method='GET',
+ url='http://example.com',
+ )
+ )
+
+The following attributes can be passed to a Response mock:
+
+method (``str``)
+ The HTTP method (GET, POST, etc).
+
+url (``str`` or compiled regular expression)
+ The full resource URL.
+
+match_querystring (``bool``)
+ Include the query string when matching requests.
+ Enabled by default if the response URL contains a query string,
+ disabled if it doesn't or the URL is a regular expression.
+
+body (``str`` or ``BufferedReader``)
+ The response body.
+
+json
+ A Python object representing the JSON response body. Automatically configures
+ the appropriate Content-Type.
+
+status (``int``)
+ The HTTP status code.
+
+content_type (``content_type``)
+ Defaults to ``text/plain``.
+
+headers (``dict``)
+ Response headers.
+
+stream (``bool``)
+ Disabled by default. Indicates the response should use the streaming API.
+
+
+Dynamic Responses
+-----------------
+
+You can utilize callbacks to provide dynamic responses. The callback must return
+a tuple of (``status``, ``headers``, ``body``).
+
+.. code-block:: python
+
+ import json
+
+ import responses
+ import requests
+
+ @responses.activate
+ def test_calc_api():
+
+ def request_callback(request):
+ payload = json.loads(request.body)
+ resp_body = {'value': sum(payload['numbers'])}
+ headers = {'request-id': '728d329e-0e86-11e4-a748-0c84dc037c13'}
+ return (200, headers, json.dumps(resp_body))
+
+ responses.add_callback(
+ responses.POST, 'http://calc.com/sum',
+ callback=request_callback,
+ content_type='application/json',
+ )
+
+ resp = requests.post(
+ 'http://calc.com/sum',
+ json.dumps({'numbers': [1, 2, 3]}),
+ headers={'content-type': 'application/json'},
+ )
+
+ assert resp.json() == {'value': 6}
+
+ assert len(responses.calls) == 1
+ assert responses.calls[0].request.url == 'http://calc.com/sum'
+ assert responses.calls[0].response.text == '{"value": 6}'
+ assert (
+ responses.calls[0].response.headers['request-id'] ==
+ '728d329e-0e86-11e4-a748-0c84dc037c13'
+ )
+
+You can also pass a compiled regex to `add_callback` to match multiple urls:
+
+.. code-block:: python
+
+ import re, json
+
+ from functools import reduce
+
+ import responses
+ import requests
+
+ operators = {
+ 'sum': lambda x, y: x+y,
+ 'prod': lambda x, y: x*y,
+ 'pow': lambda x, y: x**y
+ }
+
+ @responses.activate
+ def test_regex_url():
+
+ def request_callback(request):
+ payload = json.loads(request.body)
+ operator_name = request.path_url[1:]
+
+ operator = operators[operator_name]
+
+ resp_body = {'value': reduce(operator, payload['numbers'])}
+ headers = {'request-id': '728d329e-0e86-11e4-a748-0c84dc037c13'}
+ return (200, headers, json.dumps(resp_body))
+
+ responses.add_callback(
+ responses.POST,
+ re.compile('http://calc.com/(sum|prod|pow|unsupported)'),
+ callback=request_callback,
+ content_type='application/json',
+ )
+
+ resp = requests.post(
+ 'http://calc.com/prod',
+ json.dumps({'numbers': [2, 3, 4]}),
+ headers={'content-type': 'application/json'},
+ )
+ assert resp.json() == {'value': 24}
+
+ test_regex_url()
+
+
+If you want to pass extra keyword arguments to the callback function, for example when reusing
+a callback function to give a slightly different result, you can use ``functools.partial``:
+
+.. code-block:: python
+
+ from functools import partial
+
+ ...
+
+ def request_callback(request, id=None):
+ payload = json.loads(request.body)
+ resp_body = {'value': sum(payload['numbers'])}
+ headers = {'request-id': id}
+ return (200, headers, json.dumps(resp_body))
+
+ responses.add_callback(
+ responses.POST, 'http://calc.com/sum',
+ callback=partial(request_callback, id='728d329e-0e86-11e4-a748-0c84dc037c13'),
+ content_type='application/json',
+ )
+
+
+Responses as a context manager
+------------------------------
+
+.. code-block:: python
+
+ import responses
+ import requests
+
+ def test_my_api():
+ with responses.RequestsMock() as rsps:
+ rsps.add(responses.GET, 'http://twitter.com/api/1/foobar',
+ body='{}', status=200,
+ content_type='application/json')
+ resp = requests.get('http://twitter.com/api/1/foobar')
+
+ assert resp.status_code == 200
+
+ # outside the context manager requests will hit the remote server
+ resp = requests.get('http://twitter.com/api/1/foobar')
+ resp.status_code == 404
+
+Responses as a pytest fixture
+-----------------------------
+
+.. code-block:: python
+
+ @pytest.fixture
+ def mocked_responses():
+ with responses.RequestsMock() as rsps:
+ yield rsps
+
+ def test_api(mocked_responses):
+ mocked_responses.add(
+ responses.GET, 'http://twitter.com/api/1/foobar',
+ body='{}', status=200,
+ content_type='application/json')
+ resp = requests.get('http://twitter.com/api/1/foobar')
+ assert resp.status_code == 200
+
+Assertions on declared responses
+--------------------------------
+
+When used as a context manager, Responses will, by default, raise an assertion
+error if a url was registered but not accessed. This can be disabled by passing
+the ``assert_all_requests_are_fired`` value:
+
+.. code-block:: python
+
+ import responses
+ import requests
+
+ def test_my_api():
+ with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:
+ rsps.add(responses.GET, 'http://twitter.com/api/1/foobar',
+ body='{}', status=200,
+ content_type='application/json')
+
+
+Multiple Responses
+------------------
+
+You can also add multiple responses for the same url:
+
+.. code-block:: python
+
+ import responses
+ import requests
+
+ @responses.activate
+ def test_my_api():
+ responses.add(responses.GET, 'http://twitter.com/api/1/foobar', status=500)
+ responses.add(responses.GET, 'http://twitter.com/api/1/foobar',
+ body='{}', status=200,
+ content_type='application/json')
+
+ resp = requests.get('http://twitter.com/api/1/foobar')
+ assert resp.status_code == 500
+ resp = requests.get('http://twitter.com/api/1/foobar')
+ assert resp.status_code == 200
+
+
+Using a callback to modify the response
+---------------------------------------
+
+If you use customized processing in `requests` via subclassing/mixins, or if you
+have library tools that interact with `requests` at a low level, you may need
+to add extended processing to the mocked Response object to fully simulate the
+environment for your tests. A `response_callback` can be used, which will be
+wrapped by the library before being returned to the caller. The callback
+accepts a `response` as it's single argument, and is expected to return a
+single `response` object.
+
+.. code-block:: python
+
+ import responses
+ import requests
+
+ def response_callback(resp):
+ resp.callback_processed = True
+ return resp
+
+ with responses.RequestsMock(response_callback=response_callback) as m:
+ m.add(responses.GET, 'http://example.com', body=b'test')
+ resp = requests.get('http://example.com')
+ assert resp.text == "test"
+ assert hasattr(resp, 'callback_processed')
+ assert resp.callback_processed is True
+
+
+Passing thru real requests
+--------------------------
+
+In some cases you may wish to allow for certain requests to pass thru responses
+and hit a real server. This can be done with the 'passthru' methods:
+
+.. code-block:: python
+
+ import responses
+
+ @responses.activate
+ def test_my_api():
+ responses.add_passthru('https://percy.io')
+
+This will allow any requests matching that prefix, that is otherwise not registered
+as a mock response, to passthru using the standard behavior.
+
+
+Viewing/Modifying registered responses
+--------------------------------------
+
+Registered responses are available as a private attribute of the RequestMock
+instance. It is sometimes useful for debugging purposes to view the stack of
+registered responses which can be accessed via ``responses.mock._matches``.
+
+The ``replace`` function allows a previously registered ``response`` to be
+changed. The method signature is identical to ``add``. ``response``s are
+identified using ``method`` and ``url``. Only the first matched ``response`` is
+replaced.
+
+.. code-block:: python
+
+ import responses
+ import requests
+
+ @responses.activate
+ def test_replace():
+
+ responses.add(responses.GET, 'http://example.org', json={'data': 1})
+ responses.replace(responses.GET, 'http://example.org', json={'data': 2})
+
+ resp = requests.get('http://example.org')
+
+ assert resp.json() == {'data': 2}
+
+
+``remove`` takes a ``method`` and ``url`` argument and will remove *all*
+matched ``response``s from the registered list.
+
+Finally, ``clear`` will reset all registered ``response``s
+
+
+
+Contributing
+------------
+
+Responses uses several linting and autoformatting utilities, so it's important that when
+submitting patches you use the appropriate toolchain:
+
+Clone the repository:
+
+.. code-block:: shell
+
+ git clone https://github.com/getsentry/responses.git
+
+Create an environment (e.g. with ``virtualenv``):
+
+.. code-block:: shell
+
+ virtualenv .env && source .env/bin/activate
+
+Configure development requirements:
+
+.. code-block:: shell
+
+ make develop
+
+
diff --git a/third_party/python/responses/responses-0.10.6.dist-info/RECORD b/third_party/python/responses/responses-0.10.6.dist-info/RECORD
new file mode 100644
index 0000000000..ab6546d203
--- /dev/null
+++ b/third_party/python/responses/responses-0.10.6.dist-info/RECORD
@@ -0,0 +1,6 @@
+responses.py,sha256=We44OwS185MQp72HCzsUCoGnzmfOhI6AH_MQq0yLQuU,19550
+responses-0.10.6.dist-info/LICENSE,sha256=SJ7LcLREfANKEJeKSwjaAVyb2fqVyjrq8hnZgVQWpnw,10835
+responses-0.10.6.dist-info/METADATA,sha256=pW-WNNN44ZdoLe1C7UQstvt2ffO-WNFyQ_haglud19o,13088
+responses-0.10.6.dist-info/WHEEL,sha256=HX-v9-noUkyUoxyZ1PMSuS7auUxDAR4VBdoYLqD0xws,110
+responses-0.10.6.dist-info/top_level.txt,sha256=aQhzfC0bq4TkAaB_Yr-7cv4u2Xnc8WiVzvh4KdZo0Qo,10
+responses-0.10.6.dist-info/RECORD,,
diff --git a/third_party/python/responses/responses-0.10.6.dist-info/WHEEL b/third_party/python/responses/responses-0.10.6.dist-info/WHEEL
new file mode 100644
index 0000000000..c8240f03e8
--- /dev/null
+++ b/third_party/python/responses/responses-0.10.6.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.1)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/responses/responses-0.10.6.dist-info/top_level.txt b/third_party/python/responses/responses-0.10.6.dist-info/top_level.txt
new file mode 100644
index 0000000000..2cb24f43db
--- /dev/null
+++ b/third_party/python/responses/responses-0.10.6.dist-info/top_level.txt
@@ -0,0 +1 @@
+responses
diff --git a/third_party/python/responses/responses.py b/third_party/python/responses/responses.py
new file mode 100644
index 0000000000..9de936805c
--- /dev/null
+++ b/third_party/python/responses/responses.py
@@ -0,0 +1,653 @@
+from __future__ import absolute_import, print_function, division, unicode_literals
+
+import _io
+import inspect
+import json as json_module
+import logging
+import re
+import six
+
+from collections import namedtuple
+from functools import update_wrapper
+from requests.adapters import HTTPAdapter
+from requests.exceptions import ConnectionError
+from requests.sessions import REDIRECT_STATI
+from requests.utils import cookiejar_from_dict
+
+try:
+ from collections.abc import Sequence, Sized
+except ImportError:
+ from collections import Sequence, Sized
+
+try:
+ from requests.packages.urllib3.response import HTTPResponse
+except ImportError:
+ from urllib3.response import HTTPResponse
+
+if six.PY2:
+ from urlparse import urlparse, parse_qsl, urlsplit, urlunsplit
+ from urllib import quote
+else:
+ from urllib.parse import urlparse, parse_qsl, urlsplit, urlunsplit, quote
+
+if six.PY2:
+ try:
+ from six import cStringIO as BufferIO
+ except ImportError:
+ from six import StringIO as BufferIO
+else:
+ from io import BytesIO as BufferIO
+
+try:
+ from unittest import mock as std_mock
+except ImportError:
+ import mock as std_mock
+
+try:
+ Pattern = re._pattern_type
+except AttributeError:
+ # Python 3.7
+ Pattern = re.Pattern
+
+UNSET = object()
+
+Call = namedtuple("Call", ["request", "response"])
+
+_real_send = HTTPAdapter.send
+
+logger = logging.getLogger("responses")
+
+
+def _is_string(s):
+ return isinstance(s, six.string_types)
+
+
+def _has_unicode(s):
+ return any(ord(char) > 128 for char in s)
+
+
+def _clean_unicode(url):
+ # Clean up domain names, which use punycode to handle unicode chars
+ urllist = list(urlsplit(url))
+ netloc = urllist[1]
+ if _has_unicode(netloc):
+ domains = netloc.split(".")
+ for i, d in enumerate(domains):
+ if _has_unicode(d):
+ d = "xn--" + d.encode("punycode").decode("ascii")
+ domains[i] = d
+ urllist[1] = ".".join(domains)
+ url = urlunsplit(urllist)
+
+ # Clean up path/query/params, which use url-encoding to handle unicode chars
+ if isinstance(url.encode("utf8"), six.string_types):
+ url = url.encode("utf8")
+ chars = list(url)
+ for i, x in enumerate(chars):
+ if ord(x) > 128:
+ chars[i] = quote(x)
+
+ return "".join(chars)
+
+
+def _is_redirect(response):
+ try:
+ # 2.0.0 <= requests <= 2.2
+ return response.is_redirect
+
+ except AttributeError:
+ # requests > 2.2
+ return (
+ # use request.sessions conditional
+ response.status_code in REDIRECT_STATI
+ and "location" in response.headers
+ )
+
+
+def _cookies_from_headers(headers):
+ try:
+ import http.cookies as cookies
+
+ resp_cookie = cookies.SimpleCookie()
+ resp_cookie.load(headers["set-cookie"])
+
+ cookies_dict = {name: v.value for name, v in resp_cookie.items()}
+ except ImportError:
+ from cookies import Cookies
+
+ resp_cookies = Cookies.from_request(headers["set-cookie"])
+ cookies_dict = {v.name: v.value for _, v in resp_cookies.items()}
+ return cookiejar_from_dict(cookies_dict)
+
+
+_wrapper_template = """\
+def wrapper%(wrapper_args)s:
+ with responses:
+ return func%(func_args)s
+"""
+
+
+def get_wrapped(func, responses):
+ if six.PY2:
+ args, a, kw, defaults = inspect.getargspec(func)
+ wrapper_args = inspect.formatargspec(args, a, kw, defaults)
+
+ # Preserve the argspec for the wrapped function so that testing
+ # tools such as pytest can continue to use their fixture injection.
+ if hasattr(func, "__self__"):
+ args = args[1:] # Omit 'self'
+ func_args = inspect.formatargspec(args, a, kw, None)
+ else:
+ signature = inspect.signature(func)
+ signature = signature.replace(return_annotation=inspect.Signature.empty)
+ # If the function is wrapped, switch to *args, **kwargs for the parameters
+ # as we can't rely on the signature to give us the arguments the function will
+ # be called with. For example unittest.mock.patch uses required args that are
+ # not actually passed to the function when invoked.
+ if hasattr(func, "__wrapped__"):
+ wrapper_params = [
+ inspect.Parameter("args", inspect.Parameter.VAR_POSITIONAL),
+ inspect.Parameter("kwargs", inspect.Parameter.VAR_KEYWORD),
+ ]
+ else:
+ wrapper_params = [
+ param.replace(annotation=inspect.Parameter.empty)
+ for param in signature.parameters.values()
+ ]
+ signature = signature.replace(parameters=wrapper_params)
+
+ wrapper_args = str(signature)
+ params_without_defaults = [
+ param.replace(
+ annotation=inspect.Parameter.empty, default=inspect.Parameter.empty
+ )
+ for param in signature.parameters.values()
+ ]
+ signature = signature.replace(parameters=params_without_defaults)
+ func_args = str(signature)
+
+ evaldict = {"func": func, "responses": responses}
+ six.exec_(
+ _wrapper_template % {"wrapper_args": wrapper_args, "func_args": func_args},
+ evaldict,
+ )
+ wrapper = evaldict["wrapper"]
+ update_wrapper(wrapper, func)
+ return wrapper
+
+
+class CallList(Sequence, Sized):
+ def __init__(self):
+ self._calls = []
+
+ def __iter__(self):
+ return iter(self._calls)
+
+ def __len__(self):
+ return len(self._calls)
+
+ def __getitem__(self, idx):
+ return self._calls[idx]
+
+ def add(self, request, response):
+ self._calls.append(Call(request, response))
+
+ def reset(self):
+ self._calls = []
+
+
+def _ensure_url_default_path(url):
+ if _is_string(url):
+ url_parts = list(urlsplit(url))
+ if url_parts[2] == "":
+ url_parts[2] = "/"
+ url = urlunsplit(url_parts)
+ return url
+
+
+def _handle_body(body):
+ if isinstance(body, six.text_type):
+ body = body.encode("utf-8")
+ if isinstance(body, _io.BufferedReader):
+ return body
+
+ return BufferIO(body)
+
+
+_unspecified = object()
+
+
+class BaseResponse(object):
+ content_type = None
+ headers = None
+
+ stream = False
+
+ def __init__(self, method, url, match_querystring=_unspecified):
+ self.method = method
+ # ensure the url has a default path set if the url is a string
+ self.url = _ensure_url_default_path(url)
+ self.match_querystring = self._should_match_querystring(match_querystring)
+ self.call_count = 0
+
+ def __eq__(self, other):
+ if not isinstance(other, BaseResponse):
+ return False
+
+ if self.method != other.method:
+ return False
+
+ # Can't simply do a equality check on the objects directly here since __eq__ isn't
+ # implemented for regex. It might seem to work as regex is using a cache to return
+ # the same regex instances, but it doesn't in all cases.
+ self_url = self.url.pattern if isinstance(self.url, Pattern) else self.url
+ other_url = other.url.pattern if isinstance(other.url, Pattern) else other.url
+
+ return self_url == other_url
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ def _url_matches_strict(self, url, other):
+ url_parsed = urlparse(url)
+ other_parsed = urlparse(other)
+
+ if url_parsed[:3] != other_parsed[:3]:
+ return False
+
+ url_qsl = sorted(parse_qsl(url_parsed.query))
+ other_qsl = sorted(parse_qsl(other_parsed.query))
+
+ if len(url_qsl) != len(other_qsl):
+ return False
+
+ for (a_k, a_v), (b_k, b_v) in zip(url_qsl, other_qsl):
+ if a_k != b_k:
+ return False
+
+ if a_v != b_v:
+ return False
+
+ return True
+
+ def _should_match_querystring(self, match_querystring_argument):
+ if match_querystring_argument is not _unspecified:
+ return match_querystring_argument
+
+ if isinstance(self.url, Pattern):
+ # the old default from <= 0.9.0
+ return False
+
+ return bool(urlparse(self.url).query)
+
+ def _url_matches(self, url, other, match_querystring=False):
+ if _is_string(url):
+ if _has_unicode(url):
+ url = _clean_unicode(url)
+ if not isinstance(other, six.text_type):
+ other = other.encode("ascii").decode("utf8")
+ if match_querystring:
+ return self._url_matches_strict(url, other)
+
+ else:
+ url_without_qs = url.split("?", 1)[0]
+ other_without_qs = other.split("?", 1)[0]
+ return url_without_qs == other_without_qs
+
+ elif isinstance(url, Pattern) and url.match(other):
+ return True
+
+ else:
+ return False
+
+ def get_headers(self):
+ headers = {}
+ if self.content_type is not None:
+ headers["Content-Type"] = self.content_type
+ if self.headers:
+ headers.update(self.headers)
+ return headers
+
+ def get_response(self, request):
+ raise NotImplementedError
+
+ def matches(self, request):
+ if request.method != self.method:
+ return False
+
+ if not self._url_matches(self.url, request.url, self.match_querystring):
+ return False
+
+ return True
+
+
+class Response(BaseResponse):
+ def __init__(
+ self,
+ method,
+ url,
+ body="",
+ json=None,
+ status=200,
+ headers=None,
+ stream=False,
+ content_type=UNSET,
+ **kwargs
+ ):
+ # if we were passed a `json` argument,
+ # override the body and content_type
+ if json is not None:
+ assert not body
+ body = json_module.dumps(json)
+ if content_type is UNSET:
+ content_type = "application/json"
+
+ if content_type is UNSET:
+ content_type = "text/plain"
+
+ # body must be bytes
+ if isinstance(body, six.text_type):
+ body = body.encode("utf-8")
+
+ self.body = body
+ self.status = status
+ self.headers = headers
+ self.stream = stream
+ self.content_type = content_type
+ super(Response, self).__init__(method, url, **kwargs)
+
+ def get_response(self, request):
+ if self.body and isinstance(self.body, Exception):
+ raise self.body
+
+ headers = self.get_headers()
+ status = self.status
+ body = _handle_body(self.body)
+
+ return HTTPResponse(
+ status=status,
+ reason=six.moves.http_client.responses.get(status),
+ body=body,
+ headers=headers,
+ preload_content=False,
+ )
+
+
+class CallbackResponse(BaseResponse):
+ def __init__(
+ self, method, url, callback, stream=False, content_type="text/plain", **kwargs
+ ):
+ self.callback = callback
+ self.stream = stream
+ self.content_type = content_type
+ super(CallbackResponse, self).__init__(method, url, **kwargs)
+
+ def get_response(self, request):
+ headers = self.get_headers()
+
+ result = self.callback(request)
+ if isinstance(result, Exception):
+ raise result
+
+ status, r_headers, body = result
+ if isinstance(body, Exception):
+ raise body
+
+ body = _handle_body(body)
+ headers.update(r_headers)
+
+ return HTTPResponse(
+ status=status,
+ reason=six.moves.http_client.responses.get(status),
+ body=body,
+ headers=headers,
+ preload_content=False,
+ )
+
+
+class RequestsMock(object):
+ DELETE = "DELETE"
+ GET = "GET"
+ HEAD = "HEAD"
+ OPTIONS = "OPTIONS"
+ PATCH = "PATCH"
+ POST = "POST"
+ PUT = "PUT"
+ response_callback = None
+
+ def __init__(
+ self,
+ assert_all_requests_are_fired=True,
+ response_callback=None,
+ passthru_prefixes=(),
+ target="requests.adapters.HTTPAdapter.send",
+ ):
+ self._calls = CallList()
+ self.reset()
+ self.assert_all_requests_are_fired = assert_all_requests_are_fired
+ self.response_callback = response_callback
+ self.passthru_prefixes = tuple(passthru_prefixes)
+ self.target = target
+
+ def reset(self):
+ self._matches = []
+ self._calls.reset()
+
+ def add(
+ self,
+ method=None, # method or ``Response``
+ url=None,
+ body="",
+ adding_headers=None,
+ *args,
+ **kwargs
+ ):
+ """
+ A basic request:
+
+ >>> responses.add(responses.GET, 'http://example.com')
+
+ You can also directly pass an object which implements the
+ ``BaseResponse`` interface:
+
+ >>> responses.add(Response(...))
+
+ A JSON payload:
+
+ >>> responses.add(
+ >>> method='GET',
+ >>> url='http://example.com',
+ >>> json={'foo': 'bar'},
+ >>> )
+
+ Custom headers:
+
+ >>> responses.add(
+ >>> method='GET',
+ >>> url='http://example.com',
+ >>> headers={'X-Header': 'foo'},
+ >>> )
+
+
+ Strict query string matching:
+
+ >>> responses.add(
+ >>> method='GET',
+ >>> url='http://example.com?foo=bar',
+ >>> match_querystring=True
+ >>> )
+ """
+ if isinstance(method, BaseResponse):
+ self._matches.append(method)
+ return
+
+ if adding_headers is not None:
+ kwargs.setdefault("headers", adding_headers)
+
+ self._matches.append(Response(method=method, url=url, body=body, **kwargs))
+
+ def add_passthru(self, prefix):
+ """
+ Register a URL prefix to passthru any non-matching mock requests to.
+
+ For example, to allow any request to 'https://example.com', but require
+ mocks for the remainder, you would add the prefix as so:
+
+ >>> responses.add_passthru('https://example.com')
+ """
+ if _has_unicode(prefix):
+ prefix = _clean_unicode(prefix)
+ self.passthru_prefixes += (prefix,)
+
+ def remove(self, method_or_response=None, url=None):
+ """
+ Removes a response previously added using ``add()``, identified
+ either by a response object inheriting ``BaseResponse`` or
+ ``method`` and ``url``. Removes all matching responses.
+
+ >>> response.add(responses.GET, 'http://example.org')
+ >>> response.remove(responses.GET, 'http://example.org')
+ """
+ if isinstance(method_or_response, BaseResponse):
+ response = method_or_response
+ else:
+ response = BaseResponse(method=method_or_response, url=url)
+
+ while response in self._matches:
+ self._matches.remove(response)
+
+ def replace(self, method_or_response=None, url=None, body="", *args, **kwargs):
+ """
+ Replaces a response previously added using ``add()``. The signature
+ is identical to ``add()``. The response is identified using ``method``
+ and ``url``, and the first matching response is replaced.
+
+ >>> responses.add(responses.GET, 'http://example.org', json={'data': 1})
+ >>> responses.replace(responses.GET, 'http://example.org', json={'data': 2})
+ """
+ if isinstance(method_or_response, BaseResponse):
+ response = method_or_response
+ else:
+ response = Response(method=method_or_response, url=url, body=body, **kwargs)
+
+ index = self._matches.index(response)
+ self._matches[index] = response
+
+ def add_callback(
+ self, method, url, callback, match_querystring=False, content_type="text/plain"
+ ):
+ # ensure the url has a default path set if the url is a string
+ # url = _ensure_url_default_path(url, match_querystring)
+
+ self._matches.append(
+ CallbackResponse(
+ url=url,
+ method=method,
+ callback=callback,
+ content_type=content_type,
+ match_querystring=match_querystring,
+ )
+ )
+
+ @property
+ def calls(self):
+ return self._calls
+
+ def __enter__(self):
+ self.start()
+ return self
+
+ def __exit__(self, type, value, traceback):
+ success = type is None
+ self.stop(allow_assert=success)
+ self.reset()
+ return success
+
+ def activate(self, func):
+ return get_wrapped(func, self)
+
+ def _find_match(self, request):
+ found = None
+ found_match = None
+ for i, match in enumerate(self._matches):
+ if match.matches(request):
+ if found is None:
+ found = i
+ found_match = match
+ else:
+ # Multiple matches found. Remove & return the first match.
+ return self._matches.pop(found)
+
+ return found_match
+
+ def _on_request(self, adapter, request, **kwargs):
+ match = self._find_match(request)
+ resp_callback = self.response_callback
+
+ if match is None:
+ if request.url.startswith(self.passthru_prefixes):
+ logger.info("request.allowed-passthru", extra={"url": request.url})
+ return _real_send(adapter, request, **kwargs)
+
+ error_msg = (
+ "Connection refused by Responses: {0} {1} doesn't "
+ "match Responses Mock".format(request.method, request.url)
+ )
+ response = ConnectionError(error_msg)
+ response.request = request
+
+ self._calls.add(request, response)
+ response = resp_callback(response) if resp_callback else response
+ raise response
+
+ try:
+ response = adapter.build_response(request, match.get_response(request))
+ except Exception as response:
+ match.call_count += 1
+ self._calls.add(request, response)
+ response = resp_callback(response) if resp_callback else response
+ raise
+
+ if not match.stream:
+ response.content # NOQA
+
+ try:
+ response.cookies = _cookies_from_headers(response.headers)
+ except (KeyError, TypeError):
+ pass
+
+ response = resp_callback(response) if resp_callback else response
+ match.call_count += 1
+ self._calls.add(request, response)
+ return response
+
+ def start(self):
+ def unbound_on_send(adapter, request, *a, **kwargs):
+ return self._on_request(adapter, request, *a, **kwargs)
+
+ self._patcher = std_mock.patch(target=self.target, new=unbound_on_send)
+ self._patcher.start()
+
+ def stop(self, allow_assert=True):
+ self._patcher.stop()
+ if not self.assert_all_requests_are_fired:
+ return
+
+ if not allow_assert:
+ return
+
+ not_called = [m for m in self._matches if m.call_count == 0]
+ if not_called:
+ raise AssertionError(
+ "Not all requests have been executed {0!r}".format(
+ [(match.method, match.url) for match in not_called]
+ )
+ )
+
+
+# expose default mock namespace
+mock = _default_mock = RequestsMock(assert_all_requests_are_fired=False)
+__all__ = ["CallbackResponse", "Response", "RequestsMock"]
+for __attr in (a for a in dir(_default_mock) if not a.startswith("_")):
+ __all__.append(__attr)
+ globals()[__attr] = getattr(_default_mock, __attr)
diff --git a/third_party/python/rsa/LICENSE b/third_party/python/rsa/LICENSE
new file mode 100644
index 0000000000..da76c9d7f8
--- /dev/null
+++ b/third_party/python/rsa/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/third_party/python/rsa/MANIFEST.in b/third_party/python/rsa/MANIFEST.in
new file mode 100644
index 0000000000..8cf0021b9a
--- /dev/null
+++ b/third_party/python/rsa/MANIFEST.in
@@ -0,0 +1,5 @@
+include README
+include LICENSE
+include *.py
+recursive-include rsa *.py
+recursive-include tests *.py
diff --git a/third_party/python/rsa/PKG-INFO b/third_party/python/rsa/PKG-INFO
new file mode 100644
index 0000000000..399ba7b3e9
--- /dev/null
+++ b/third_party/python/rsa/PKG-INFO
@@ -0,0 +1,18 @@
+Metadata-Version: 1.1
+Name: rsa
+Version: 3.1.4
+Summary: Pure-Python RSA implementation
+Home-page: http://stuvel.eu/rsa
+Author: Sybren A. Stuvel
+Author-email: sybren@stuvel.eu
+License: ASL 2
+Description: UNKNOWN
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Education
+Classifier: Intended Audience :: Information Technology
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Topic :: Security :: Cryptography
diff --git a/third_party/python/rsa/README.rst b/third_party/python/rsa/README.rst
new file mode 100644
index 0000000000..9f348636d7
--- /dev/null
+++ b/third_party/python/rsa/README.rst
@@ -0,0 +1,31 @@
+Pure Python RSA implementation
+==============================
+
+`Python-RSA`_ is a pure-Python RSA implementation. It supports
+encryption and decryption, signing and verifying signatures, and key
+generation according to PKCS#1 version 1.5. It can be used as a Python
+library as well as on the commandline. The code was mostly written by
+Sybren A. Stüvel.
+
+Documentation can be found at the Python-RSA homepage:
+http://stuvel.eu/rsa
+
+Download and install using::
+
+ pip install rsa
+
+or::
+
+ easy_install rsa
+
+or download it from the `Python Package Index`_.
+
+The source code is maintained in a `Mercurial repository`_ and is
+licensed under the `Apache License, version 2.0`_
+
+
+.. _`Python-RSA`: http://stuvel.eu/rsa
+.. _`Mercurial repository`: https://bitbucket.org/sybren/python-rsa
+.. _`Python Package Index`: http://pypi.python.org/pypi/rsa
+.. _`Apache License, version 2.0`: http://www.apache.org/licenses/LICENSE-2.0
+
diff --git a/third_party/python/rsa/create_timing_table.py b/third_party/python/rsa/create_timing_table.py
new file mode 100755
index 0000000000..b1b2871b3d
--- /dev/null
+++ b/third_party/python/rsa/create_timing_table.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+
+import time
+import rsa
+
+poolsize = 8
+accurate = True
+
+def run_speed_test(bitsize):
+
+ iterations = 0
+ start = end = time.time()
+
+ # At least a number of iterations, and at least 2 seconds
+ while iterations < 10 or end - start < 2:
+ iterations += 1
+ rsa.newkeys(bitsize, accurate=accurate, poolsize=poolsize)
+ end = time.time()
+
+ duration = end - start
+ dur_per_call = duration / iterations
+
+ print '%5i bit: %9.3f sec. (%i iterations over %.1f seconds)' % (bitsize,
+ dur_per_call, iterations, duration)
+
+for bitsize in (128, 256, 384, 512, 1024, 2048, 3072, 4096):
+ run_speed_test(bitsize)
+
+
diff --git a/third_party/python/rsa/playstuff.py b/third_party/python/rsa/playstuff.py
new file mode 100755
index 0000000000..bfb941b88c
--- /dev/null
+++ b/third_party/python/rsa/playstuff.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+import re
+import rsa
+
+def _logon( username, password ):
+ # Retrive the public key
+ # network stuff # req = urllib2.Request(AAA_GET_KEY, headers={'User-Agent': CLIENT_ID})
+ # network stuff # response = urllib2.urlopen(req)
+ # network stuff # html = response.read()
+ # network stuff # print response.info() # DEBUG
+ # network stuff # print html # DEBUG
+
+ # replacement for network stuff #
+ html="<x509PublicKey>30820122300d06092a864886f70d01010105000382010f003082010a0282010100dad8e3c084137bab285e869ae99a5de9752a095753680e9128adbe981e8141225704e558b8ee437836ec8c5460514efae61550bfdd883549981458bae388c9490b5ab43475068b169b32da446b0aae2dfbb3a5f425c74b284ced3f57ed33b30ec7b4b95a8216f8b063e34af2c84fef58bab381f3b79b80d06b687e0b5fc7aaeb311a88389ab7aa1422ae0b58956bb9e91c5cbf2b98422b05e1eacb82e29938566f6f05274294a8c596677c950ce97dcd003709d008f1ae6418ce5bf55ad2bf921318c6e31b324bdda4b4f12ff1fd86b5b71e647d1fc175aea137ba0ff869d5fbcf9ed0289fe7da3619c1204fc42d616462ac1b6a4e6ca2655d44bce039db519d0203010001</x509PublicKey>"
+ # end replacement for network stuff #
+
+ # This shall pick the key
+ hexstring = re.compile('<x509PublicKey[^>]*>([0-9a-fA-F]+)</x509PublicKey>')
+
+ # pick the key and convert it to der format
+ hex_pub_der = hexstring.search(html).group(1)
+ pub_der = hex_pub_der.decode('hex')
+
+ # Convert it to a public key
+ pub_key = rsa.PublicKey.load_pkcs1_openssl_der(pub_der)
+
+ # encode the password
+ enc_pass = rsa.encrypt(password, pub_key)
+
+ # and hex-encode it
+ hex_pass = enc_pass.encode('hex')
+
+# _logon('me', 'MyPass')
+
+import timeit
+timeit.timeit('_logon( "me", "MyPass" )',
+ setup='from __main__ import _logon',
+ number=1000)
+
+
diff --git a/third_party/python/rsa/rsa.egg-info/PKG-INFO b/third_party/python/rsa/rsa.egg-info/PKG-INFO
new file mode 100644
index 0000000000..399ba7b3e9
--- /dev/null
+++ b/third_party/python/rsa/rsa.egg-info/PKG-INFO
@@ -0,0 +1,18 @@
+Metadata-Version: 1.1
+Name: rsa
+Version: 3.1.4
+Summary: Pure-Python RSA implementation
+Home-page: http://stuvel.eu/rsa
+Author: Sybren A. Stuvel
+Author-email: sybren@stuvel.eu
+License: ASL 2
+Description: UNKNOWN
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Education
+Classifier: Intended Audience :: Information Technology
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Topic :: Security :: Cryptography
diff --git a/third_party/python/rsa/rsa.egg-info/SOURCES.txt b/third_party/python/rsa/rsa.egg-info/SOURCES.txt
new file mode 100644
index 0000000000..58d22b9c2e
--- /dev/null
+++ b/third_party/python/rsa/rsa.egg-info/SOURCES.txt
@@ -0,0 +1,46 @@
+LICENSE
+MANIFEST.in
+README.rst
+create_timing_table.py
+playstuff.py
+run_tests.py
+setup.cfg
+setup.py
+rsa/__init__.py
+rsa/_compat.py
+rsa/_version133.py
+rsa/_version200.py
+rsa/asn1.py
+rsa/bigfile.py
+rsa/cli.py
+rsa/common.py
+rsa/core.py
+rsa/key.py
+rsa/parallel.py
+rsa/pem.py
+rsa/pkcs1.py
+rsa/prime.py
+rsa/randnum.py
+rsa/transform.py
+rsa/util.py
+rsa/varblock.py
+rsa.egg-info/PKG-INFO
+rsa.egg-info/SOURCES.txt
+rsa.egg-info/dependency_links.txt
+rsa.egg-info/entry_points.txt
+rsa.egg-info/requires.txt
+rsa.egg-info/top_level.txt
+tests/__init__.py
+tests/constants.py
+tests/py2kconstants.py
+tests/py3kconstants.py
+tests/test_bigfile.py
+tests/test_common.py
+tests/test_compat.py
+tests/test_integers.py
+tests/test_load_save_keys.py
+tests/test_pem.py
+tests/test_pkcs1.py
+tests/test_strings.py
+tests/test_transform.py
+tests/test_varblock.py \ No newline at end of file
diff --git a/third_party/python/rsa/rsa.egg-info/dependency_links.txt b/third_party/python/rsa/rsa.egg-info/dependency_links.txt
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/third_party/python/rsa/rsa.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/third_party/python/rsa/rsa.egg-info/entry_points.txt b/third_party/python/rsa/rsa.egg-info/entry_points.txt
new file mode 100644
index 0000000000..65d0e29759
--- /dev/null
+++ b/third_party/python/rsa/rsa.egg-info/entry_points.txt
@@ -0,0 +1,10 @@
+[console_scripts]
+pyrsa-encrypt = rsa.cli:encrypt
+pyrsa-keygen = rsa.cli:keygen
+pyrsa-priv2pub = rsa.util:private_to_public
+pyrsa-sign = rsa.cli:sign
+pyrsa-verify = rsa.cli:verify
+pyrsa-encrypt-bigfile = rsa.cli:encrypt_bigfile
+pyrsa-decrypt-bigfile = rsa.cli:decrypt_bigfile
+pyrsa-decrypt = rsa.cli:decrypt
+
diff --git a/third_party/python/rsa/rsa.egg-info/requires.txt b/third_party/python/rsa/rsa.egg-info/requires.txt
new file mode 100644
index 0000000000..ae4e6eb9b9
--- /dev/null
+++ b/third_party/python/rsa/rsa.egg-info/requires.txt
@@ -0,0 +1 @@
+pyasn1 >= 0.1.3 \ No newline at end of file
diff --git a/third_party/python/rsa/rsa.egg-info/top_level.txt b/third_party/python/rsa/rsa.egg-info/top_level.txt
new file mode 100644
index 0000000000..703f551006
--- /dev/null
+++ b/third_party/python/rsa/rsa.egg-info/top_level.txt
@@ -0,0 +1 @@
+rsa
diff --git a/third_party/python/rsa/rsa/__init__.py b/third_party/python/rsa/rsa/__init__.py
new file mode 100644
index 0000000000..2d01c12e0f
--- /dev/null
+++ b/third_party/python/rsa/rsa/__init__.py
@@ -0,0 +1,45 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""RSA module
+
+Module for calculating large primes, and RSA encryption, decryption, signing
+and verification. Includes generating public and private keys.
+
+WARNING: this implementation does not use random padding, compression of the
+cleartext input to prevent repetitions, or other common security improvements.
+Use with care.
+
+If you want to have a more secure implementation, use the functions from the
+``rsa.pkcs1`` module.
+
+"""
+
+__author__ = "Sybren Stuvel, Barry Mead and Yesudeep Mangalapilly"
+__date__ = "2014-02-22"
+__version__ = '3.1.4'
+
+from rsa.key import newkeys, PrivateKey, PublicKey
+from rsa.pkcs1 import encrypt, decrypt, sign, verify, DecryptionError, \
+ VerificationError
+
+# Do doctest if we're run directly
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
+
+__all__ = ["newkeys", "encrypt", "decrypt", "sign", "verify", 'PublicKey',
+ 'PrivateKey', 'DecryptionError', 'VerificationError']
+
diff --git a/third_party/python/rsa/rsa/_compat.py b/third_party/python/rsa/rsa/_compat.py
new file mode 100644
index 0000000000..3c4eb81b13
--- /dev/null
+++ b/third_party/python/rsa/rsa/_compat.py
@@ -0,0 +1,160 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Python compatibility wrappers."""
+
+
+from __future__ import absolute_import
+
+import sys
+from struct import pack
+
+try:
+ MAX_INT = sys.maxsize
+except AttributeError:
+ MAX_INT = sys.maxint
+
+MAX_INT64 = (1 << 63) - 1
+MAX_INT32 = (1 << 31) - 1
+MAX_INT16 = (1 << 15) - 1
+
+# Determine the word size of the processor.
+if MAX_INT == MAX_INT64:
+ # 64-bit processor.
+ MACHINE_WORD_SIZE = 64
+elif MAX_INT == MAX_INT32:
+ # 32-bit processor.
+ MACHINE_WORD_SIZE = 32
+else:
+ # Else we just assume 64-bit processor keeping up with modern times.
+ MACHINE_WORD_SIZE = 64
+
+
+try:
+ # < Python3
+ unicode_type = unicode
+ have_python3 = False
+except NameError:
+ # Python3.
+ unicode_type = str
+ have_python3 = True
+
+# Fake byte literals.
+if str is unicode_type:
+ def byte_literal(s):
+ return s.encode('latin1')
+else:
+ def byte_literal(s):
+ return s
+
+# ``long`` is no more. Do type detection using this instead.
+try:
+ integer_types = (int, long)
+except NameError:
+ integer_types = (int,)
+
+b = byte_literal
+
+try:
+ # Python 2.6 or higher.
+ bytes_type = bytes
+except NameError:
+ # Python 2.5
+ bytes_type = str
+
+
+# To avoid calling b() multiple times in tight loops.
+ZERO_BYTE = b('\x00')
+EMPTY_BYTE = b('')
+
+
+def is_bytes(obj):
+ """
+ Determines whether the given value is a byte string.
+
+ :param obj:
+ The value to test.
+ :returns:
+ ``True`` if ``value`` is a byte string; ``False`` otherwise.
+ """
+ return isinstance(obj, bytes_type)
+
+
+def is_integer(obj):
+ """
+ Determines whether the given value is an integer.
+
+ :param obj:
+ The value to test.
+ :returns:
+ ``True`` if ``value`` is an integer; ``False`` otherwise.
+ """
+ return isinstance(obj, integer_types)
+
+
+def byte(num):
+ """
+ Converts a number between 0 and 255 (both inclusive) to a base-256 (byte)
+ representation.
+
+ Use it as a replacement for ``chr`` where you are expecting a byte
+ because this will work on all current versions of Python::
+
+ :param num:
+ An unsigned integer between 0 and 255 (both inclusive).
+ :returns:
+ A single byte.
+ """
+ return pack("B", num)
+
+
+def get_word_alignment(num, force_arch=64,
+ _machine_word_size=MACHINE_WORD_SIZE):
+ """
+ Returns alignment details for the given number based on the platform
+ Python is running on.
+
+ :param num:
+ Unsigned integral number.
+ :param force_arch:
+ If you don't want to use 64-bit unsigned chunks, set this to
+ anything other than 64. 32-bit chunks will be preferred then.
+ Default 64 will be used when on a 64-bit machine.
+ :param _machine_word_size:
+ (Internal) The machine word size used for alignment.
+ :returns:
+ 4-tuple::
+
+ (word_bits, word_bytes,
+ max_uint, packing_format_type)
+ """
+ max_uint64 = 0xffffffffffffffff
+ max_uint32 = 0xffffffff
+ max_uint16 = 0xffff
+ max_uint8 = 0xff
+
+ if force_arch == 64 and _machine_word_size >= 64 and num > max_uint32:
+ # 64-bit unsigned integer.
+ return 64, 8, max_uint64, "Q"
+ elif num > max_uint16:
+ # 32-bit unsigned integer
+ return 32, 4, max_uint32, "L"
+ elif num > max_uint8:
+ # 16-bit unsigned integer.
+ return 16, 2, max_uint16, "H"
+ else:
+ # 8-bit unsigned integer.
+ return 8, 1, max_uint8, "B"
diff --git a/third_party/python/rsa/rsa/_version133.py b/third_party/python/rsa/rsa/_version133.py
new file mode 100644
index 0000000000..230a03c84b
--- /dev/null
+++ b/third_party/python/rsa/rsa/_version133.py
@@ -0,0 +1,442 @@
+"""RSA module
+pri = k[1] //Private part of keys d,p,q
+
+Module for calculating large primes, and RSA encryption, decryption,
+signing and verification. Includes generating public and private keys.
+
+WARNING: this code implements the mathematics of RSA. It is not suitable for
+real-world secure cryptography purposes. It has not been reviewed by a security
+expert. It does not include padding of data. There are many ways in which the
+output of this module, when used without any modification, can be sucessfully
+attacked.
+"""
+
+__author__ = "Sybren Stuvel, Marloes de Boer and Ivo Tamboer"
+__date__ = "2010-02-05"
+__version__ = '1.3.3'
+
+# NOTE: Python's modulo can return negative numbers. We compensate for
+# this behaviour using the abs() function
+
+from cPickle import dumps, loads
+import base64
+import math
+import os
+import random
+import sys
+import types
+import zlib
+
+from rsa._compat import byte
+
+# Display a warning that this insecure version is imported.
+import warnings
+warnings.warn('Insecure version of the RSA module is imported as %s, be careful'
+ % __name__)
+
+def gcd(p, q):
+ """Returns the greatest common divisor of p and q
+
+
+ >>> gcd(42, 6)
+ 6
+ """
+ if p<q: return gcd(q, p)
+ if q == 0: return p
+ return gcd(q, abs(p%q))
+
+def bytes2int(bytes):
+ """Converts a list of bytes or a string to an integer
+
+ >>> (128*256 + 64)*256 + + 15
+ 8405007
+ >>> l = [128, 64, 15]
+ >>> bytes2int(l)
+ 8405007
+ """
+
+ if not (type(bytes) is types.ListType or type(bytes) is types.StringType):
+ raise TypeError("You must pass a string or a list")
+
+ # Convert byte stream to integer
+ integer = 0
+ for byte in bytes:
+ integer *= 256
+ if type(byte) is types.StringType: byte = ord(byte)
+ integer += byte
+
+ return integer
+
+def int2bytes(number):
+ """Converts a number to a string of bytes
+
+ >>> bytes2int(int2bytes(123456789))
+ 123456789
+ """
+
+ if not (type(number) is types.LongType or type(number) is types.IntType):
+ raise TypeError("You must pass a long or an int")
+
+ string = ""
+
+ while number > 0:
+ string = "%s%s" % (byte(number & 0xFF), string)
+ number /= 256
+
+ return string
+
+def fast_exponentiation(a, p, n):
+ """Calculates r = a^p mod n
+ """
+ result = a % n
+ remainders = []
+ while p != 1:
+ remainders.append(p & 1)
+ p = p >> 1
+ while remainders:
+ rem = remainders.pop()
+ result = ((a ** rem) * result ** 2) % n
+ return result
+
+def read_random_int(nbits):
+ """Reads a random integer of approximately nbits bits rounded up
+ to whole bytes"""
+
+ nbytes = ceil(nbits/8.)
+ randomdata = os.urandom(nbytes)
+ return bytes2int(randomdata)
+
+def ceil(x):
+ """ceil(x) -> int(math.ceil(x))"""
+
+ return int(math.ceil(x))
+
+def randint(minvalue, maxvalue):
+ """Returns a random integer x with minvalue <= x <= maxvalue"""
+
+ # Safety - get a lot of random data even if the range is fairly
+ # small
+ min_nbits = 32
+
+ # The range of the random numbers we need to generate
+ range = maxvalue - minvalue
+
+ # Which is this number of bytes
+ rangebytes = ceil(math.log(range, 2) / 8.)
+
+ # Convert to bits, but make sure it's always at least min_nbits*2
+ rangebits = max(rangebytes * 8, min_nbits * 2)
+
+ # Take a random number of bits between min_nbits and rangebits
+ nbits = random.randint(min_nbits, rangebits)
+
+ return (read_random_int(nbits) % range) + minvalue
+
+def fermat_little_theorem(p):
+ """Returns 1 if p may be prime, and something else if p definitely
+ is not prime"""
+
+ a = randint(1, p-1)
+ return fast_exponentiation(a, p-1, p)
+
+def jacobi(a, b):
+ """Calculates the value of the Jacobi symbol (a/b)
+ """
+
+ if a % b == 0:
+ return 0
+ result = 1
+ while a > 1:
+ if a & 1:
+ if ((a-1)*(b-1) >> 2) & 1:
+ result = -result
+ b, a = a, b % a
+ else:
+ if ((b ** 2 - 1) >> 3) & 1:
+ result = -result
+ a = a >> 1
+ return result
+
+def jacobi_witness(x, n):
+ """Returns False if n is an Euler pseudo-prime with base x, and
+ True otherwise.
+ """
+
+ j = jacobi(x, n) % n
+ f = fast_exponentiation(x, (n-1)/2, n)
+
+ if j == f: return False
+ return True
+
+def randomized_primality_testing(n, k):
+ """Calculates whether n is composite (which is always correct) or
+ prime (which is incorrect with error probability 2**-k)
+
+ Returns False if the number if composite, and True if it's
+ probably prime.
+ """
+
+ q = 0.5 # Property of the jacobi_witness function
+
+ # t = int(math.ceil(k / math.log(1/q, 2)))
+ t = ceil(k / math.log(1/q, 2))
+ for i in range(t+1):
+ x = randint(1, n-1)
+ if jacobi_witness(x, n): return False
+
+ return True
+
+def is_prime(number):
+ """Returns True if the number is prime, and False otherwise.
+
+ >>> is_prime(42)
+ 0
+ >>> is_prime(41)
+ 1
+ """
+
+ """
+ if not fermat_little_theorem(number) == 1:
+ # Not prime, according to Fermat's little theorem
+ return False
+ """
+
+ if randomized_primality_testing(number, 5):
+ # Prime, according to Jacobi
+ return True
+
+ # Not prime
+ return False
+
+
+def getprime(nbits):
+ """Returns a prime number of max. 'math.ceil(nbits/8)*8' bits. In
+ other words: nbits is rounded up to whole bytes.
+
+ >>> p = getprime(8)
+ >>> is_prime(p-1)
+ 0
+ >>> is_prime(p)
+ 1
+ >>> is_prime(p+1)
+ 0
+ """
+
+ nbytes = int(math.ceil(nbits/8.))
+
+ while True:
+ integer = read_random_int(nbits)
+
+ # Make sure it's odd
+ integer |= 1
+
+ # Test for primeness
+ if is_prime(integer): break
+
+ # Retry if not prime
+
+ return integer
+
+def are_relatively_prime(a, b):
+ """Returns True if a and b are relatively prime, and False if they
+ are not.
+
+ >>> are_relatively_prime(2, 3)
+ 1
+ >>> are_relatively_prime(2, 4)
+ 0
+ """
+
+ d = gcd(a, b)
+ return (d == 1)
+
+def find_p_q(nbits):
+ """Returns a tuple of two different primes of nbits bits"""
+
+ p = getprime(nbits)
+ while True:
+ q = getprime(nbits)
+ if not q == p: break
+
+ return (p, q)
+
+def extended_euclid_gcd(a, b):
+ """Returns a tuple (d, i, j) such that d = gcd(a, b) = ia + jb
+ """
+
+ if b == 0:
+ return (a, 1, 0)
+
+ q = abs(a % b)
+ r = long(a / b)
+ (d, k, l) = extended_euclid_gcd(b, q)
+
+ return (d, l, k - l*r)
+
+# Main function: calculate encryption and decryption keys
+def calculate_keys(p, q, nbits):
+ """Calculates an encryption and a decryption key for p and q, and
+ returns them as a tuple (e, d)"""
+
+ n = p * q
+ phi_n = (p-1) * (q-1)
+
+ while True:
+ # Make sure e has enough bits so we ensure "wrapping" through
+ # modulo n
+ e = getprime(max(8, nbits/2))
+ if are_relatively_prime(e, n) and are_relatively_prime(e, phi_n): break
+
+ (d, i, j) = extended_euclid_gcd(e, phi_n)
+
+ if not d == 1:
+ raise Exception("e (%d) and phi_n (%d) are not relatively prime" % (e, phi_n))
+
+ if not (e * i) % phi_n == 1:
+ raise Exception("e (%d) and i (%d) are not mult. inv. modulo phi_n (%d)" % (e, i, phi_n))
+
+ return (e, i)
+
+
+def gen_keys(nbits):
+ """Generate RSA keys of nbits bits. Returns (p, q, e, d).
+
+ Note: this can take a long time, depending on the key size.
+ """
+
+ while True:
+ (p, q) = find_p_q(nbits)
+ (e, d) = calculate_keys(p, q, nbits)
+
+ # For some reason, d is sometimes negative. We don't know how
+ # to fix it (yet), so we keep trying until everything is shiny
+ if d > 0: break
+
+ return (p, q, e, d)
+
+def gen_pubpriv_keys(nbits):
+ """Generates public and private keys, and returns them as (pub,
+ priv).
+
+ The public key consists of a dict {e: ..., , n: ....). The private
+ key consists of a dict {d: ...., p: ...., q: ....).
+ """
+
+ (p, q, e, d) = gen_keys(nbits)
+
+ return ( {'e': e, 'n': p*q}, {'d': d, 'p': p, 'q': q} )
+
+def encrypt_int(message, ekey, n):
+ """Encrypts a message using encryption key 'ekey', working modulo
+ n"""
+
+ if type(message) is types.IntType:
+ return encrypt_int(long(message), ekey, n)
+
+ if not type(message) is types.LongType:
+ raise TypeError("You must pass a long or an int")
+
+ if message > 0 and \
+ math.floor(math.log(message, 2)) > math.floor(math.log(n, 2)):
+ raise OverflowError("The message is too long")
+
+ return fast_exponentiation(message, ekey, n)
+
+def decrypt_int(cyphertext, dkey, n):
+ """Decrypts a cypher text using the decryption key 'dkey', working
+ modulo n"""
+
+ return encrypt_int(cyphertext, dkey, n)
+
+def sign_int(message, dkey, n):
+ """Signs 'message' using key 'dkey', working modulo n"""
+
+ return decrypt_int(message, dkey, n)
+
+def verify_int(signed, ekey, n):
+ """verifies 'signed' using key 'ekey', working modulo n"""
+
+ return encrypt_int(signed, ekey, n)
+
+def picklechops(chops):
+ """Pickles and base64encodes it's argument chops"""
+
+ value = zlib.compress(dumps(chops))
+ encoded = base64.encodestring(value)
+ return encoded.strip()
+
+def unpicklechops(string):
+ """base64decodes and unpickes it's argument string into chops"""
+
+ return loads(zlib.decompress(base64.decodestring(string)))
+
+def chopstring(message, key, n, funcref):
+ """Splits 'message' into chops that are at most as long as n,
+ converts these into integers, and calls funcref(integer, key, n)
+ for each chop.
+
+ Used by 'encrypt' and 'sign'.
+ """
+
+ msglen = len(message)
+ mbits = msglen * 8
+ nbits = int(math.floor(math.log(n, 2)))
+ nbytes = nbits / 8
+ blocks = msglen / nbytes
+
+ if msglen % nbytes > 0:
+ blocks += 1
+
+ cypher = []
+
+ for bindex in range(blocks):
+ offset = bindex * nbytes
+ block = message[offset:offset+nbytes]
+ value = bytes2int(block)
+ cypher.append(funcref(value, key, n))
+
+ return picklechops(cypher)
+
+def gluechops(chops, key, n, funcref):
+ """Glues chops back together into a string. calls
+ funcref(integer, key, n) for each chop.
+
+ Used by 'decrypt' and 'verify'.
+ """
+ message = ""
+
+ chops = unpicklechops(chops)
+
+ for cpart in chops:
+ mpart = funcref(cpart, key, n)
+ message += int2bytes(mpart)
+
+ return message
+
+def encrypt(message, key):
+ """Encrypts a string 'message' with the public key 'key'"""
+
+ return chopstring(message, key['e'], key['n'], encrypt_int)
+
+def sign(message, key):
+ """Signs a string 'message' with the private key 'key'"""
+
+ return chopstring(message, key['d'], key['p']*key['q'], decrypt_int)
+
+def decrypt(cypher, key):
+ """Decrypts a cypher with the private key 'key'"""
+
+ return gluechops(cypher, key['d'], key['p']*key['q'], decrypt_int)
+
+def verify(cypher, key):
+ """Verifies a cypher with the public key 'key'"""
+
+ return gluechops(cypher, key['e'], key['n'], encrypt_int)
+
+# Do doctest if we're not imported
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
+
+__all__ = ["gen_pubpriv_keys", "encrypt", "decrypt", "sign", "verify"]
+
diff --git a/third_party/python/rsa/rsa/_version200.py b/third_party/python/rsa/rsa/_version200.py
new file mode 100644
index 0000000000..f915653857
--- /dev/null
+++ b/third_party/python/rsa/rsa/_version200.py
@@ -0,0 +1,529 @@
+"""RSA module
+
+Module for calculating large primes, and RSA encryption, decryption,
+signing and verification. Includes generating public and private keys.
+
+WARNING: this implementation does not use random padding, compression of the
+cleartext input to prevent repetitions, or other common security improvements.
+Use with care.
+
+"""
+
+__author__ = "Sybren Stuvel, Marloes de Boer, Ivo Tamboer, and Barry Mead"
+__date__ = "2010-02-08"
+__version__ = '2.0'
+
+import math
+import os
+import random
+import sys
+import types
+from rsa._compat import byte
+
+# Display a warning that this insecure version is imported.
+import warnings
+warnings.warn('Insecure version of the RSA module is imported as %s' % __name__)
+
+
+def bit_size(number):
+ """Returns the number of bits required to hold a specific long number"""
+
+ return int(math.ceil(math.log(number,2)))
+
+def gcd(p, q):
+ """Returns the greatest common divisor of p and q
+ >>> gcd(48, 180)
+ 12
+ """
+ # Iterateive Version is faster and uses much less stack space
+ while q != 0:
+ if p < q: (p,q) = (q,p)
+ (p,q) = (q, p % q)
+ return p
+
+
+def bytes2int(bytes):
+ """Converts a list of bytes or a string to an integer
+
+ >>> (((128 * 256) + 64) * 256) + 15
+ 8405007
+ >>> l = [128, 64, 15]
+ >>> bytes2int(l) #same as bytes2int('\x80@\x0f')
+ 8405007
+ """
+
+ if not (type(bytes) is types.ListType or type(bytes) is types.StringType):
+ raise TypeError("You must pass a string or a list")
+
+ # Convert byte stream to integer
+ integer = 0
+ for byte in bytes:
+ integer *= 256
+ if type(byte) is types.StringType: byte = ord(byte)
+ integer += byte
+
+ return integer
+
+def int2bytes(number):
+ """
+ Converts a number to a string of bytes
+ """
+
+ if not (type(number) is types.LongType or type(number) is types.IntType):
+ raise TypeError("You must pass a long or an int")
+
+ string = ""
+
+ while number > 0:
+ string = "%s%s" % (byte(number & 0xFF), string)
+ number /= 256
+
+ return string
+
+def to64(number):
+ """Converts a number in the range of 0 to 63 into base 64 digit
+ character in the range of '0'-'9', 'A'-'Z', 'a'-'z','-','_'.
+
+ >>> to64(10)
+ 'A'
+ """
+
+ if not (type(number) is types.LongType or type(number) is types.IntType):
+ raise TypeError("You must pass a long or an int")
+
+ if 0 <= number <= 9: #00-09 translates to '0' - '9'
+ return byte(number + 48)
+
+ if 10 <= number <= 35:
+ return byte(number + 55) #10-35 translates to 'A' - 'Z'
+
+ if 36 <= number <= 61:
+ return byte(number + 61) #36-61 translates to 'a' - 'z'
+
+ if number == 62: # 62 translates to '-' (minus)
+ return byte(45)
+
+ if number == 63: # 63 translates to '_' (underscore)
+ return byte(95)
+
+ raise ValueError('Invalid Base64 value: %i' % number)
+
+
+def from64(number):
+ """Converts an ordinal character value in the range of
+ 0-9,A-Z,a-z,-,_ to a number in the range of 0-63.
+
+ >>> from64(49)
+ 1
+ """
+
+ if not (type(number) is types.LongType or type(number) is types.IntType):
+ raise TypeError("You must pass a long or an int")
+
+ if 48 <= number <= 57: #ord('0') - ord('9') translates to 0-9
+ return(number - 48)
+
+ if 65 <= number <= 90: #ord('A') - ord('Z') translates to 10-35
+ return(number - 55)
+
+ if 97 <= number <= 122: #ord('a') - ord('z') translates to 36-61
+ return(number - 61)
+
+ if number == 45: #ord('-') translates to 62
+ return(62)
+
+ if number == 95: #ord('_') translates to 63
+ return(63)
+
+ raise ValueError('Invalid Base64 value: %i' % number)
+
+
+def int2str64(number):
+ """Converts a number to a string of base64 encoded characters in
+ the range of '0'-'9','A'-'Z,'a'-'z','-','_'.
+
+ >>> int2str64(123456789)
+ '7MyqL'
+ """
+
+ if not (type(number) is types.LongType or type(number) is types.IntType):
+ raise TypeError("You must pass a long or an int")
+
+ string = ""
+
+ while number > 0:
+ string = "%s%s" % (to64(number & 0x3F), string)
+ number /= 64
+
+ return string
+
+
+def str642int(string):
+ """Converts a base64 encoded string into an integer.
+ The chars of this string in in the range '0'-'9','A'-'Z','a'-'z','-','_'
+
+ >>> str642int('7MyqL')
+ 123456789
+ """
+
+ if not (type(string) is types.ListType or type(string) is types.StringType):
+ raise TypeError("You must pass a string or a list")
+
+ integer = 0
+ for byte in string:
+ integer *= 64
+ if type(byte) is types.StringType: byte = ord(byte)
+ integer += from64(byte)
+
+ return integer
+
+def read_random_int(nbits):
+ """Reads a random integer of approximately nbits bits rounded up
+ to whole bytes"""
+
+ nbytes = int(math.ceil(nbits/8.))
+ randomdata = os.urandom(nbytes)
+ return bytes2int(randomdata)
+
+def randint(minvalue, maxvalue):
+ """Returns a random integer x with minvalue <= x <= maxvalue"""
+
+ # Safety - get a lot of random data even if the range is fairly
+ # small
+ min_nbits = 32
+
+ # The range of the random numbers we need to generate
+ range = (maxvalue - minvalue) + 1
+
+ # Which is this number of bytes
+ rangebytes = ((bit_size(range) + 7) / 8)
+
+ # Convert to bits, but make sure it's always at least min_nbits*2
+ rangebits = max(rangebytes * 8, min_nbits * 2)
+
+ # Take a random number of bits between min_nbits and rangebits
+ nbits = random.randint(min_nbits, rangebits)
+
+ return (read_random_int(nbits) % range) + minvalue
+
+def jacobi(a, b):
+ """Calculates the value of the Jacobi symbol (a/b)
+ where both a and b are positive integers, and b is odd
+ """
+
+ if a == 0: return 0
+ result = 1
+ while a > 1:
+ if a & 1:
+ if ((a-1)*(b-1) >> 2) & 1:
+ result = -result
+ a, b = b % a, a
+ else:
+ if (((b * b) - 1) >> 3) & 1:
+ result = -result
+ a >>= 1
+ if a == 0: return 0
+ return result
+
+def jacobi_witness(x, n):
+ """Returns False if n is an Euler pseudo-prime with base x, and
+ True otherwise.
+ """
+
+ j = jacobi(x, n) % n
+ f = pow(x, (n-1)/2, n)
+
+ if j == f: return False
+ return True
+
+def randomized_primality_testing(n, k):
+ """Calculates whether n is composite (which is always correct) or
+ prime (which is incorrect with error probability 2**-k)
+
+ Returns False if the number is composite, and True if it's
+ probably prime.
+ """
+
+ # 50% of Jacobi-witnesses can report compositness of non-prime numbers
+
+ for i in range(k):
+ x = randint(1, n-1)
+ if jacobi_witness(x, n): return False
+
+ return True
+
+def is_prime(number):
+ """Returns True if the number is prime, and False otherwise.
+
+ >>> is_prime(42)
+ 0
+ >>> is_prime(41)
+ 1
+ """
+
+ if randomized_primality_testing(number, 6):
+ # Prime, according to Jacobi
+ return True
+
+ # Not prime
+ return False
+
+
+def getprime(nbits):
+ """Returns a prime number of max. 'math.ceil(nbits/8)*8' bits. In
+ other words: nbits is rounded up to whole bytes.
+
+ >>> p = getprime(8)
+ >>> is_prime(p-1)
+ 0
+ >>> is_prime(p)
+ 1
+ >>> is_prime(p+1)
+ 0
+ """
+
+ while True:
+ integer = read_random_int(nbits)
+
+ # Make sure it's odd
+ integer |= 1
+
+ # Test for primeness
+ if is_prime(integer): break
+
+ # Retry if not prime
+
+ return integer
+
+def are_relatively_prime(a, b):
+ """Returns True if a and b are relatively prime, and False if they
+ are not.
+
+ >>> are_relatively_prime(2, 3)
+ 1
+ >>> are_relatively_prime(2, 4)
+ 0
+ """
+
+ d = gcd(a, b)
+ return (d == 1)
+
+def find_p_q(nbits):
+ """Returns a tuple of two different primes of nbits bits"""
+ pbits = nbits + (nbits/16) #Make sure that p and q aren't too close
+ qbits = nbits - (nbits/16) #or the factoring programs can factor n
+ p = getprime(pbits)
+ while True:
+ q = getprime(qbits)
+ #Make sure p and q are different.
+ if not q == p: break
+ return (p, q)
+
+def extended_gcd(a, b):
+ """Returns a tuple (r, i, j) such that r = gcd(a, b) = ia + jb
+ """
+ # r = gcd(a,b) i = multiplicitive inverse of a mod b
+ # or j = multiplicitive inverse of b mod a
+ # Neg return values for i or j are made positive mod b or a respectively
+ # Iterateive Version is faster and uses much less stack space
+ x = 0
+ y = 1
+ lx = 1
+ ly = 0
+ oa = a #Remember original a/b to remove
+ ob = b #negative values from return results
+ while b != 0:
+ q = long(a/b)
+ (a, b) = (b, a % b)
+ (x, lx) = ((lx - (q * x)),x)
+ (y, ly) = ((ly - (q * y)),y)
+ if (lx < 0): lx += ob #If neg wrap modulo orignal b
+ if (ly < 0): ly += oa #If neg wrap modulo orignal a
+ return (a, lx, ly) #Return only positive values
+
+# Main function: calculate encryption and decryption keys
+def calculate_keys(p, q, nbits):
+ """Calculates an encryption and a decryption key for p and q, and
+ returns them as a tuple (e, d)"""
+
+ n = p * q
+ phi_n = (p-1) * (q-1)
+
+ while True:
+ # Make sure e has enough bits so we ensure "wrapping" through
+ # modulo n
+ e = max(65537,getprime(nbits/4))
+ if are_relatively_prime(e, n) and are_relatively_prime(e, phi_n): break
+
+ (d, i, j) = extended_gcd(e, phi_n)
+
+ if not d == 1:
+ raise Exception("e (%d) and phi_n (%d) are not relatively prime" % (e, phi_n))
+ if (i < 0):
+ raise Exception("New extended_gcd shouldn't return negative values")
+ if not (e * i) % phi_n == 1:
+ raise Exception("e (%d) and i (%d) are not mult. inv. modulo phi_n (%d)" % (e, i, phi_n))
+
+ return (e, i)
+
+
+def gen_keys(nbits):
+ """Generate RSA keys of nbits bits. Returns (p, q, e, d).
+
+ Note: this can take a long time, depending on the key size.
+ """
+
+ (p, q) = find_p_q(nbits)
+ (e, d) = calculate_keys(p, q, nbits)
+
+ return (p, q, e, d)
+
+def newkeys(nbits):
+ """Generates public and private keys, and returns them as (pub,
+ priv).
+
+ The public key consists of a dict {e: ..., , n: ....). The private
+ key consists of a dict {d: ...., p: ...., q: ....).
+ """
+ nbits = max(9,nbits) # Don't let nbits go below 9 bits
+ (p, q, e, d) = gen_keys(nbits)
+
+ return ( {'e': e, 'n': p*q}, {'d': d, 'p': p, 'q': q} )
+
+def encrypt_int(message, ekey, n):
+ """Encrypts a message using encryption key 'ekey', working modulo n"""
+
+ if type(message) is types.IntType:
+ message = long(message)
+
+ if not type(message) is types.LongType:
+ raise TypeError("You must pass a long or int")
+
+ if message < 0 or message > n:
+ raise OverflowError("The message is too long")
+
+ #Note: Bit exponents start at zero (bit counts start at 1) this is correct
+ safebit = bit_size(n) - 2 #compute safe bit (MSB - 1)
+ message += (1 << safebit) #add safebit to ensure folding
+
+ return pow(message, ekey, n)
+
+def decrypt_int(cyphertext, dkey, n):
+ """Decrypts a cypher text using the decryption key 'dkey', working
+ modulo n"""
+
+ message = pow(cyphertext, dkey, n)
+
+ safebit = bit_size(n) - 2 #compute safe bit (MSB - 1)
+ message -= (1 << safebit) #remove safebit before decode
+
+ return message
+
+def encode64chops(chops):
+ """base64encodes chops and combines them into a ',' delimited string"""
+
+ chips = [] #chips are character chops
+
+ for value in chops:
+ chips.append(int2str64(value))
+
+ #delimit chops with comma
+ encoded = ','.join(chips)
+
+ return encoded
+
+def decode64chops(string):
+ """base64decodes and makes a ',' delimited string into chops"""
+
+ chips = string.split(',') #split chops at commas
+
+ chops = []
+
+ for string in chips: #make char chops (chips) into chops
+ chops.append(str642int(string))
+
+ return chops
+
+def chopstring(message, key, n, funcref):
+ """Chops the 'message' into integers that fit into n,
+ leaving room for a safebit to be added to ensure that all
+ messages fold during exponentiation. The MSB of the number n
+ is not independant modulo n (setting it could cause overflow), so
+ use the next lower bit for the safebit. Therefore reserve 2-bits
+ in the number n for non-data bits. Calls specified encryption
+ function for each chop.
+
+ Used by 'encrypt' and 'sign'.
+ """
+
+ msglen = len(message)
+ mbits = msglen * 8
+ #Set aside 2-bits so setting of safebit won't overflow modulo n.
+ nbits = bit_size(n) - 2 # leave room for safebit
+ nbytes = nbits / 8
+ blocks = msglen / nbytes
+
+ if msglen % nbytes > 0:
+ blocks += 1
+
+ cypher = []
+
+ for bindex in range(blocks):
+ offset = bindex * nbytes
+ block = message[offset:offset+nbytes]
+ value = bytes2int(block)
+ cypher.append(funcref(value, key, n))
+
+ return encode64chops(cypher) #Encode encrypted ints to base64 strings
+
+def gluechops(string, key, n, funcref):
+ """Glues chops back together into a string. calls
+ funcref(integer, key, n) for each chop.
+
+ Used by 'decrypt' and 'verify'.
+ """
+ message = ""
+
+ chops = decode64chops(string) #Decode base64 strings into integer chops
+
+ for cpart in chops:
+ mpart = funcref(cpart, key, n) #Decrypt each chop
+ message += int2bytes(mpart) #Combine decrypted strings into a msg
+
+ return message
+
+def encrypt(message, key):
+ """Encrypts a string 'message' with the public key 'key'"""
+ if 'n' not in key:
+ raise Exception("You must use the public key with encrypt")
+
+ return chopstring(message, key['e'], key['n'], encrypt_int)
+
+def sign(message, key):
+ """Signs a string 'message' with the private key 'key'"""
+ if 'p' not in key:
+ raise Exception("You must use the private key with sign")
+
+ return chopstring(message, key['d'], key['p']*key['q'], encrypt_int)
+
+def decrypt(cypher, key):
+ """Decrypts a string 'cypher' with the private key 'key'"""
+ if 'p' not in key:
+ raise Exception("You must use the private key with decrypt")
+
+ return gluechops(cypher, key['d'], key['p']*key['q'], decrypt_int)
+
+def verify(cypher, key):
+ """Verifies a string 'cypher' with the public key 'key'"""
+ if 'n' not in key:
+ raise Exception("You must use the public key with verify")
+
+ return gluechops(cypher, key['e'], key['n'], decrypt_int)
+
+# Do doctest if we're not imported
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod()
+
+__all__ = ["newkeys", "encrypt", "decrypt", "sign", "verify"]
+
diff --git a/third_party/python/rsa/rsa/asn1.py b/third_party/python/rsa/rsa/asn1.py
new file mode 100644
index 0000000000..706e6cf228
--- /dev/null
+++ b/third_party/python/rsa/rsa/asn1.py
@@ -0,0 +1,35 @@
+'''ASN.1 definitions.
+
+Not all ASN.1-handling code use these definitions, but when it does, they should be here.
+'''
+
+from pyasn1.type import univ, namedtype, tag
+
+class PubKeyHeader(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('oid', univ.ObjectIdentifier()),
+ namedtype.NamedType('parameters', univ.Null()),
+ )
+
+class OpenSSLPubKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('header', PubKeyHeader()),
+
+ # This little hack (the implicit tag) allows us to get a Bit String as Octet String
+ namedtype.NamedType('key', univ.OctetString().subtype(
+ implicitTag=tag.Tag(tagClass=0, tagFormat=0, tagId=3))),
+ )
+
+
+class AsnPubKey(univ.Sequence):
+ '''ASN.1 contents of DER encoded public key:
+
+ RSAPublicKey ::= SEQUENCE {
+ modulus INTEGER, -- n
+ publicExponent INTEGER, -- e
+ '''
+
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('modulus', univ.Integer()),
+ namedtype.NamedType('publicExponent', univ.Integer()),
+ )
diff --git a/third_party/python/rsa/rsa/bigfile.py b/third_party/python/rsa/rsa/bigfile.py
new file mode 100644
index 0000000000..516cf56b51
--- /dev/null
+++ b/third_party/python/rsa/rsa/bigfile.py
@@ -0,0 +1,87 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''Large file support
+
+ - break a file into smaller blocks, and encrypt them, and store the
+ encrypted blocks in another file.
+
+ - take such an encrypted files, decrypt its blocks, and reconstruct the
+ original file.
+
+The encrypted file format is as follows, where || denotes byte concatenation:
+
+ FILE := VERSION || BLOCK || BLOCK ...
+
+ BLOCK := LENGTH || DATA
+
+ LENGTH := varint-encoded length of the subsequent data. Varint comes from
+ Google Protobuf, and encodes an integer into a variable number of bytes.
+ Each byte uses the 7 lowest bits to encode the value. The highest bit set
+ to 1 indicates the next byte is also part of the varint. The last byte will
+ have this bit set to 0.
+
+This file format is called the VARBLOCK format, in line with the varint format
+used to denote the block sizes.
+
+'''
+
+from rsa import key, common, pkcs1, varblock
+from rsa._compat import byte
+
+def encrypt_bigfile(infile, outfile, pub_key):
+ '''Encrypts a file, writing it to 'outfile' in VARBLOCK format.
+
+ :param infile: file-like object to read the cleartext from
+ :param outfile: file-like object to write the crypto in VARBLOCK format to
+ :param pub_key: :py:class:`rsa.PublicKey` to encrypt with
+
+ '''
+
+ if not isinstance(pub_key, key.PublicKey):
+ raise TypeError('Public key required, but got %r' % pub_key)
+
+ key_bytes = common.bit_size(pub_key.n) // 8
+ blocksize = key_bytes - 11 # keep space for PKCS#1 padding
+
+ # Write the version number to the VARBLOCK file
+ outfile.write(byte(varblock.VARBLOCK_VERSION))
+
+ # Encrypt and write each block
+ for block in varblock.yield_fixedblocks(infile, blocksize):
+ crypto = pkcs1.encrypt(block, pub_key)
+
+ varblock.write_varint(outfile, len(crypto))
+ outfile.write(crypto)
+
+def decrypt_bigfile(infile, outfile, priv_key):
+ '''Decrypts an encrypted VARBLOCK file, writing it to 'outfile'
+
+ :param infile: file-like object to read the crypto in VARBLOCK format from
+ :param outfile: file-like object to write the cleartext to
+ :param priv_key: :py:class:`rsa.PrivateKey` to decrypt with
+
+ '''
+
+ if not isinstance(priv_key, key.PrivateKey):
+ raise TypeError('Private key required, but got %r' % priv_key)
+
+ for block in varblock.yield_varblocks(infile):
+ cleartext = pkcs1.decrypt(block, priv_key)
+ outfile.write(cleartext)
+
+__all__ = ['encrypt_bigfile', 'decrypt_bigfile']
+
diff --git a/third_party/python/rsa/rsa/cli.py b/third_party/python/rsa/rsa/cli.py
new file mode 100644
index 0000000000..527cc4979a
--- /dev/null
+++ b/third_party/python/rsa/rsa/cli.py
@@ -0,0 +1,379 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''Commandline scripts.
+
+These scripts are called by the executables defined in setup.py.
+'''
+
+from __future__ import with_statement, print_function
+
+import abc
+import sys
+from optparse import OptionParser
+
+import rsa
+import rsa.bigfile
+import rsa.pkcs1
+
+HASH_METHODS = sorted(rsa.pkcs1.HASH_METHODS.keys())
+
+def keygen():
+ '''Key generator.'''
+
+ # Parse the CLI options
+ parser = OptionParser(usage='usage: %prog [options] keysize',
+ description='Generates a new RSA keypair of "keysize" bits.')
+
+ parser.add_option('--pubout', type='string',
+ help='Output filename for the public key. The public key is '
+ 'not saved if this option is not present. You can use '
+ 'pyrsa-priv2pub to create the public key file later.')
+
+ parser.add_option('-o', '--out', type='string',
+ help='Output filename for the private key. The key is '
+ 'written to stdout if this option is not present.')
+
+ parser.add_option('--form',
+ help='key format of the private and public keys - default PEM',
+ choices=('PEM', 'DER'), default='PEM')
+
+ (cli, cli_args) = parser.parse_args(sys.argv[1:])
+
+ if len(cli_args) != 1:
+ parser.print_help()
+ raise SystemExit(1)
+
+ try:
+ keysize = int(cli_args[0])
+ except ValueError:
+ parser.print_help()
+ print('Not a valid number: %s' % cli_args[0], file=sys.stderr)
+ raise SystemExit(1)
+
+ print('Generating %i-bit key' % keysize, file=sys.stderr)
+ (pub_key, priv_key) = rsa.newkeys(keysize)
+
+
+ # Save public key
+ if cli.pubout:
+ print('Writing public key to %s' % cli.pubout, file=sys.stderr)
+ data = pub_key.save_pkcs1(format=cli.form)
+ with open(cli.pubout, 'wb') as outfile:
+ outfile.write(data)
+
+ # Save private key
+ data = priv_key.save_pkcs1(format=cli.form)
+
+ if cli.out:
+ print('Writing private key to %s' % cli.out, file=sys.stderr)
+ with open(cli.out, 'wb') as outfile:
+ outfile.write(data)
+ else:
+ print('Writing private key to stdout', file=sys.stderr)
+ sys.stdout.write(data)
+
+
+class CryptoOperation(object):
+ '''CLI callable that operates with input, output, and a key.'''
+
+ __metaclass__ = abc.ABCMeta
+
+ keyname = 'public' # or 'private'
+ usage = 'usage: %%prog [options] %(keyname)s_key'
+ description = None
+ operation = 'decrypt'
+ operation_past = 'decrypted'
+ operation_progressive = 'decrypting'
+ input_help = 'Name of the file to %(operation)s. Reads from stdin if ' \
+ 'not specified.'
+ output_help = 'Name of the file to write the %(operation_past)s file ' \
+ 'to. Written to stdout if this option is not present.'
+ expected_cli_args = 1
+ has_output = True
+
+ key_class = rsa.PublicKey
+
+ def __init__(self):
+ self.usage = self.usage % self.__class__.__dict__
+ self.input_help = self.input_help % self.__class__.__dict__
+ self.output_help = self.output_help % self.__class__.__dict__
+
+ @abc.abstractmethod
+ def perform_operation(self, indata, key, cli_args=None):
+ '''Performs the program's operation.
+
+ Implement in a subclass.
+
+ :returns: the data to write to the output.
+ '''
+
+ def __call__(self):
+ '''Runs the program.'''
+
+ (cli, cli_args) = self.parse_cli()
+
+ key = self.read_key(cli_args[0], cli.keyform)
+
+ indata = self.read_infile(cli.input)
+
+ print(self.operation_progressive.title(), file=sys.stderr)
+ outdata = self.perform_operation(indata, key, cli_args)
+
+ if self.has_output:
+ self.write_outfile(outdata, cli.output)
+
+ def parse_cli(self):
+ '''Parse the CLI options
+
+ :returns: (cli_opts, cli_args)
+ '''
+
+ parser = OptionParser(usage=self.usage, description=self.description)
+
+ parser.add_option('-i', '--input', type='string', help=self.input_help)
+
+ if self.has_output:
+ parser.add_option('-o', '--output', type='string', help=self.output_help)
+
+ parser.add_option('--keyform',
+ help='Key format of the %s key - default PEM' % self.keyname,
+ choices=('PEM', 'DER'), default='PEM')
+
+ (cli, cli_args) = parser.parse_args(sys.argv[1:])
+
+ if len(cli_args) != self.expected_cli_args:
+ parser.print_help()
+ raise SystemExit(1)
+
+ return (cli, cli_args)
+
+ def read_key(self, filename, keyform):
+ '''Reads a public or private key.'''
+
+ print('Reading %s key from %s' % (self.keyname, filename), file=sys.stderr)
+ with open(filename, 'rb') as keyfile:
+ keydata = keyfile.read()
+
+ return self.key_class.load_pkcs1(keydata, keyform)
+
+ def read_infile(self, inname):
+ '''Read the input file'''
+
+ if inname:
+ print('Reading input from %s' % inname, file=sys.stderr)
+ with open(inname, 'rb') as infile:
+ return infile.read()
+
+ print('Reading input from stdin', file=sys.stderr)
+ return sys.stdin.read()
+
+ def write_outfile(self, outdata, outname):
+ '''Write the output file'''
+
+ if outname:
+ print('Writing output to %s' % outname, file=sys.stderr)
+ with open(outname, 'wb') as outfile:
+ outfile.write(outdata)
+ else:
+ print('Writing output to stdout', file=sys.stderr)
+ sys.stdout.write(outdata)
+
+class EncryptOperation(CryptoOperation):
+ '''Encrypts a file.'''
+
+ keyname = 'public'
+ description = ('Encrypts a file. The file must be shorter than the key '
+ 'length in order to be encrypted. For larger files, use the '
+ 'pyrsa-encrypt-bigfile command.')
+ operation = 'encrypt'
+ operation_past = 'encrypted'
+ operation_progressive = 'encrypting'
+
+
+ def perform_operation(self, indata, pub_key, cli_args=None):
+ '''Encrypts files.'''
+
+ return rsa.encrypt(indata, pub_key)
+
+class DecryptOperation(CryptoOperation):
+ '''Decrypts a file.'''
+
+ keyname = 'private'
+ description = ('Decrypts a file. The original file must be shorter than '
+ 'the key length in order to have been encrypted. For larger '
+ 'files, use the pyrsa-decrypt-bigfile command.')
+ operation = 'decrypt'
+ operation_past = 'decrypted'
+ operation_progressive = 'decrypting'
+ key_class = rsa.PrivateKey
+
+ def perform_operation(self, indata, priv_key, cli_args=None):
+ '''Decrypts files.'''
+
+ return rsa.decrypt(indata, priv_key)
+
+class SignOperation(CryptoOperation):
+ '''Signs a file.'''
+
+ keyname = 'private'
+ usage = 'usage: %%prog [options] private_key hash_method'
+ description = ('Signs a file, outputs the signature. Choose the hash '
+ 'method from %s' % ', '.join(HASH_METHODS))
+ operation = 'sign'
+ operation_past = 'signature'
+ operation_progressive = 'Signing'
+ key_class = rsa.PrivateKey
+ expected_cli_args = 2
+
+ output_help = ('Name of the file to write the signature to. Written '
+ 'to stdout if this option is not present.')
+
+ def perform_operation(self, indata, priv_key, cli_args):
+ '''Decrypts files.'''
+
+ hash_method = cli_args[1]
+ if hash_method not in HASH_METHODS:
+ raise SystemExit('Invalid hash method, choose one of %s' %
+ ', '.join(HASH_METHODS))
+
+ return rsa.sign(indata, priv_key, hash_method)
+
+class VerifyOperation(CryptoOperation):
+ '''Verify a signature.'''
+
+ keyname = 'public'
+ usage = 'usage: %%prog [options] public_key signature_file'
+ description = ('Verifies a signature, exits with status 0 upon success, '
+ 'prints an error message and exits with status 1 upon error.')
+ operation = 'verify'
+ operation_past = 'verified'
+ operation_progressive = 'Verifying'
+ key_class = rsa.PublicKey
+ expected_cli_args = 2
+ has_output = False
+
+ def perform_operation(self, indata, pub_key, cli_args):
+ '''Decrypts files.'''
+
+ signature_file = cli_args[1]
+
+ with open(signature_file, 'rb') as sigfile:
+ signature = sigfile.read()
+
+ try:
+ rsa.verify(indata, signature, pub_key)
+ except rsa.VerificationError:
+ raise SystemExit('Verification failed.')
+
+ print('Verification OK', file=sys.stderr)
+
+
+class BigfileOperation(CryptoOperation):
+ '''CryptoOperation that doesn't read the entire file into memory.'''
+
+ def __init__(self):
+ CryptoOperation.__init__(self)
+
+ self.file_objects = []
+
+ def __del__(self):
+ '''Closes any open file handles.'''
+
+ for fobj in self.file_objects:
+ fobj.close()
+
+ def __call__(self):
+ '''Runs the program.'''
+
+ (cli, cli_args) = self.parse_cli()
+
+ key = self.read_key(cli_args[0], cli.keyform)
+
+ # Get the file handles
+ infile = self.get_infile(cli.input)
+ outfile = self.get_outfile(cli.output)
+
+ # Call the operation
+ print(self.operation_progressive.title(), file=sys.stderr)
+ self.perform_operation(infile, outfile, key, cli_args)
+
+ def get_infile(self, inname):
+ '''Returns the input file object'''
+
+ if inname:
+ print('Reading input from %s' % inname, file=sys.stderr)
+ fobj = open(inname, 'rb')
+ self.file_objects.append(fobj)
+ else:
+ print('Reading input from stdin', file=sys.stderr)
+ fobj = sys.stdin
+
+ return fobj
+
+ def get_outfile(self, outname):
+ '''Returns the output file object'''
+
+ if outname:
+ print('Will write output to %s' % outname, file=sys.stderr)
+ fobj = open(outname, 'wb')
+ self.file_objects.append(fobj)
+ else:
+ print('Will write output to stdout', file=sys.stderr)
+ fobj = sys.stdout
+
+ return fobj
+
+class EncryptBigfileOperation(BigfileOperation):
+ '''Encrypts a file to VARBLOCK format.'''
+
+ keyname = 'public'
+ description = ('Encrypts a file to an encrypted VARBLOCK file. The file '
+ 'can be larger than the key length, but the output file is only '
+ 'compatible with Python-RSA.')
+ operation = 'encrypt'
+ operation_past = 'encrypted'
+ operation_progressive = 'encrypting'
+
+ def perform_operation(self, infile, outfile, pub_key, cli_args=None):
+ '''Encrypts files to VARBLOCK.'''
+
+ return rsa.bigfile.encrypt_bigfile(infile, outfile, pub_key)
+
+class DecryptBigfileOperation(BigfileOperation):
+ '''Decrypts a file in VARBLOCK format.'''
+
+ keyname = 'private'
+ description = ('Decrypts an encrypted VARBLOCK file that was encrypted '
+ 'with pyrsa-encrypt-bigfile')
+ operation = 'decrypt'
+ operation_past = 'decrypted'
+ operation_progressive = 'decrypting'
+ key_class = rsa.PrivateKey
+
+ def perform_operation(self, infile, outfile, priv_key, cli_args=None):
+ '''Decrypts a VARBLOCK file.'''
+
+ return rsa.bigfile.decrypt_bigfile(infile, outfile, priv_key)
+
+
+encrypt = EncryptOperation()
+decrypt = DecryptOperation()
+sign = SignOperation()
+verify = VerifyOperation()
+encrypt_bigfile = EncryptBigfileOperation()
+decrypt_bigfile = DecryptBigfileOperation()
+
diff --git a/third_party/python/rsa/rsa/common.py b/third_party/python/rsa/rsa/common.py
new file mode 100644
index 0000000000..39feb8c228
--- /dev/null
+++ b/third_party/python/rsa/rsa/common.py
@@ -0,0 +1,185 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''Common functionality shared by several modules.'''
+
+
+def bit_size(num):
+ '''
+ Number of bits needed to represent a integer excluding any prefix
+ 0 bits.
+
+ As per definition from http://wiki.python.org/moin/BitManipulation and
+ to match the behavior of the Python 3 API.
+
+ Usage::
+
+ >>> bit_size(1023)
+ 10
+ >>> bit_size(1024)
+ 11
+ >>> bit_size(1025)
+ 11
+
+ :param num:
+ Integer value. If num is 0, returns 0. Only the absolute value of the
+ number is considered. Therefore, signed integers will be abs(num)
+ before the number's bit length is determined.
+ :returns:
+ Returns the number of bits in the integer.
+ '''
+ if num == 0:
+ return 0
+ if num < 0:
+ num = -num
+
+ # Make sure this is an int and not a float.
+ num & 1
+
+ hex_num = "%x" % num
+ return ((len(hex_num) - 1) * 4) + {
+ '0':0, '1':1, '2':2, '3':2,
+ '4':3, '5':3, '6':3, '7':3,
+ '8':4, '9':4, 'a':4, 'b':4,
+ 'c':4, 'd':4, 'e':4, 'f':4,
+ }[hex_num[0]]
+
+
+def _bit_size(number):
+ '''
+ Returns the number of bits required to hold a specific long number.
+ '''
+ if number < 0:
+ raise ValueError('Only nonnegative numbers possible: %s' % number)
+
+ if number == 0:
+ return 0
+
+ # This works, even with very large numbers. When using math.log(number, 2),
+ # you'll get rounding errors and it'll fail.
+ bits = 0
+ while number:
+ bits += 1
+ number >>= 1
+
+ return bits
+
+
+def byte_size(number):
+ '''
+ Returns the number of bytes required to hold a specific long number.
+
+ The number of bytes is rounded up.
+
+ Usage::
+
+ >>> byte_size(1 << 1023)
+ 128
+ >>> byte_size((1 << 1024) - 1)
+ 128
+ >>> byte_size(1 << 1024)
+ 129
+
+ :param number:
+ An unsigned integer
+ :returns:
+ The number of bytes required to hold a specific long number.
+ '''
+ quanta, mod = divmod(bit_size(number), 8)
+ if mod or number == 0:
+ quanta += 1
+ return quanta
+ #return int(math.ceil(bit_size(number) / 8.0))
+
+
+def extended_gcd(a, b):
+ '''Returns a tuple (r, i, j) such that r = gcd(a, b) = ia + jb
+ '''
+ # r = gcd(a,b) i = multiplicitive inverse of a mod b
+ # or j = multiplicitive inverse of b mod a
+ # Neg return values for i or j are made positive mod b or a respectively
+ # Iterateive Version is faster and uses much less stack space
+ x = 0
+ y = 1
+ lx = 1
+ ly = 0
+ oa = a #Remember original a/b to remove
+ ob = b #negative values from return results
+ while b != 0:
+ q = a // b
+ (a, b) = (b, a % b)
+ (x, lx) = ((lx - (q * x)),x)
+ (y, ly) = ((ly - (q * y)),y)
+ if (lx < 0): lx += ob #If neg wrap modulo orignal b
+ if (ly < 0): ly += oa #If neg wrap modulo orignal a
+ return (a, lx, ly) #Return only positive values
+
+
+def inverse(x, n):
+ '''Returns x^-1 (mod n)
+
+ >>> inverse(7, 4)
+ 3
+ >>> (inverse(143, 4) * 143) % 4
+ 1
+ '''
+
+ (divider, inv, _) = extended_gcd(x, n)
+
+ if divider != 1:
+ raise ValueError("x (%d) and n (%d) are not relatively prime" % (x, n))
+
+ return inv
+
+
+def crt(a_values, modulo_values):
+ '''Chinese Remainder Theorem.
+
+ Calculates x such that x = a[i] (mod m[i]) for each i.
+
+ :param a_values: the a-values of the above equation
+ :param modulo_values: the m-values of the above equation
+ :returns: x such that x = a[i] (mod m[i]) for each i
+
+
+ >>> crt([2, 3], [3, 5])
+ 8
+
+ >>> crt([2, 3, 2], [3, 5, 7])
+ 23
+
+ >>> crt([2, 3, 0], [7, 11, 15])
+ 135
+ '''
+
+ m = 1
+ x = 0
+
+ for modulo in modulo_values:
+ m *= modulo
+
+ for (m_i, a_i) in zip(modulo_values, a_values):
+ M_i = m // m_i
+ inv = inverse(M_i, m_i)
+
+ x = (x + a_i * M_i * inv) % m
+
+ return x
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
+
diff --git a/third_party/python/rsa/rsa/core.py b/third_party/python/rsa/rsa/core.py
new file mode 100644
index 0000000000..90dfee8e57
--- /dev/null
+++ b/third_party/python/rsa/rsa/core.py
@@ -0,0 +1,58 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''Core mathematical operations.
+
+This is the actual core RSA implementation, which is only defined
+mathematically on integers.
+'''
+
+
+from rsa._compat import is_integer
+
+def assert_int(var, name):
+
+ if is_integer(var):
+ return
+
+ raise TypeError('%s should be an integer, not %s' % (name, var.__class__))
+
+def encrypt_int(message, ekey, n):
+ '''Encrypts a message using encryption key 'ekey', working modulo n'''
+
+ assert_int(message, 'message')
+ assert_int(ekey, 'ekey')
+ assert_int(n, 'n')
+
+ if message < 0:
+ raise ValueError('Only non-negative numbers are supported')
+
+ if message > n:
+ raise OverflowError("The message %i is too long for n=%i" % (message, n))
+
+ return pow(message, ekey, n)
+
+def decrypt_int(cyphertext, dkey, n):
+ '''Decrypts a cypher text using the decryption key 'dkey', working
+ modulo n'''
+
+ assert_int(cyphertext, 'cyphertext')
+ assert_int(dkey, 'dkey')
+ assert_int(n, 'n')
+
+ message = pow(cyphertext, dkey, n)
+ return message
+
diff --git a/third_party/python/rsa/rsa/key.py b/third_party/python/rsa/rsa/key.py
new file mode 100644
index 0000000000..b6de7b3f3b
--- /dev/null
+++ b/third_party/python/rsa/rsa/key.py
@@ -0,0 +1,612 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''RSA key generation code.
+
+Create new keys with the newkeys() function. It will give you a PublicKey and a
+PrivateKey object.
+
+Loading and saving keys requires the pyasn1 module. This module is imported as
+late as possible, such that other functionality will remain working in absence
+of pyasn1.
+
+'''
+
+import logging
+from rsa._compat import b, bytes_type
+
+import rsa.prime
+import rsa.pem
+import rsa.common
+
+log = logging.getLogger(__name__)
+
+
+
+class AbstractKey(object):
+ '''Abstract superclass for private and public keys.'''
+
+ @classmethod
+ def load_pkcs1(cls, keyfile, format='PEM'):
+ r'''Loads a key in PKCS#1 DER or PEM format.
+
+ :param keyfile: contents of a DER- or PEM-encoded file that contains
+ the public key.
+ :param format: the format of the file to load; 'PEM' or 'DER'
+
+ :return: a PublicKey object
+
+ '''
+
+ methods = {
+ 'PEM': cls._load_pkcs1_pem,
+ 'DER': cls._load_pkcs1_der,
+ }
+
+ if format not in methods:
+ formats = ', '.join(sorted(methods.keys()))
+ raise ValueError('Unsupported format: %r, try one of %s' % (format,
+ formats))
+
+ method = methods[format]
+ return method(keyfile)
+
+ def save_pkcs1(self, format='PEM'):
+ '''Saves the public key in PKCS#1 DER or PEM format.
+
+ :param format: the format to save; 'PEM' or 'DER'
+ :returns: the DER- or PEM-encoded public key.
+
+ '''
+
+ methods = {
+ 'PEM': self._save_pkcs1_pem,
+ 'DER': self._save_pkcs1_der,
+ }
+
+ if format not in methods:
+ formats = ', '.join(sorted(methods.keys()))
+ raise ValueError('Unsupported format: %r, try one of %s' % (format,
+ formats))
+
+ method = methods[format]
+ return method()
+
+class PublicKey(AbstractKey):
+ '''Represents a public RSA key.
+
+ This key is also known as the 'encryption key'. It contains the 'n' and 'e'
+ values.
+
+ Supports attributes as well as dictionary-like access. Attribute accesss is
+ faster, though.
+
+ >>> PublicKey(5, 3)
+ PublicKey(5, 3)
+
+ >>> key = PublicKey(5, 3)
+ >>> key.n
+ 5
+ >>> key['n']
+ 5
+ >>> key.e
+ 3
+ >>> key['e']
+ 3
+
+ '''
+
+ __slots__ = ('n', 'e')
+
+ def __init__(self, n, e):
+ self.n = n
+ self.e = e
+
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ def __repr__(self):
+ return 'PublicKey(%i, %i)' % (self.n, self.e)
+
+ def __eq__(self, other):
+ if other is None:
+ return False
+
+ if not isinstance(other, PublicKey):
+ return False
+
+ return self.n == other.n and self.e == other.e
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ @classmethod
+ def _load_pkcs1_der(cls, keyfile):
+ r'''Loads a key in PKCS#1 DER format.
+
+ @param keyfile: contents of a DER-encoded file that contains the public
+ key.
+ @return: a PublicKey object
+
+ First let's construct a DER encoded key:
+
+ >>> import base64
+ >>> b64der = 'MAwCBQCNGmYtAgMBAAE='
+ >>> der = base64.decodestring(b64der)
+
+ This loads the file:
+
+ >>> PublicKey._load_pkcs1_der(der)
+ PublicKey(2367317549, 65537)
+
+ '''
+
+ from pyasn1.codec.der import decoder
+ from rsa.asn1 import AsnPubKey
+
+ (priv, _) = decoder.decode(keyfile, asn1Spec=AsnPubKey())
+ return cls(n=int(priv['modulus']), e=int(priv['publicExponent']))
+
+ def _save_pkcs1_der(self):
+ '''Saves the public key in PKCS#1 DER format.
+
+ @returns: the DER-encoded public key.
+ '''
+
+ from pyasn1.codec.der import encoder
+ from rsa.asn1 import AsnPubKey
+
+ # Create the ASN object
+ asn_key = AsnPubKey()
+ asn_key.setComponentByName('modulus', self.n)
+ asn_key.setComponentByName('publicExponent', self.e)
+
+ return encoder.encode(asn_key)
+
+ @classmethod
+ def _load_pkcs1_pem(cls, keyfile):
+ '''Loads a PKCS#1 PEM-encoded public key file.
+
+ The contents of the file before the "-----BEGIN RSA PUBLIC KEY-----" and
+ after the "-----END RSA PUBLIC KEY-----" lines is ignored.
+
+ @param keyfile: contents of a PEM-encoded file that contains the public
+ key.
+ @return: a PublicKey object
+ '''
+
+ der = rsa.pem.load_pem(keyfile, 'RSA PUBLIC KEY')
+ return cls._load_pkcs1_der(der)
+
+ def _save_pkcs1_pem(self):
+ '''Saves a PKCS#1 PEM-encoded public key file.
+
+ @return: contents of a PEM-encoded file that contains the public key.
+ '''
+
+ der = self._save_pkcs1_der()
+ return rsa.pem.save_pem(der, 'RSA PUBLIC KEY')
+
+ @classmethod
+ def load_pkcs1_openssl_pem(cls, keyfile):
+ '''Loads a PKCS#1.5 PEM-encoded public key file from OpenSSL.
+
+ These files can be recognised in that they start with BEGIN PUBLIC KEY
+ rather than BEGIN RSA PUBLIC KEY.
+
+ The contents of the file before the "-----BEGIN PUBLIC KEY-----" and
+ after the "-----END PUBLIC KEY-----" lines is ignored.
+
+ @param keyfile: contents of a PEM-encoded file that contains the public
+ key, from OpenSSL.
+ @return: a PublicKey object
+ '''
+
+ der = rsa.pem.load_pem(keyfile, 'PUBLIC KEY')
+ return cls.load_pkcs1_openssl_der(der)
+
+ @classmethod
+ def load_pkcs1_openssl_der(cls, keyfile):
+ '''Loads a PKCS#1 DER-encoded public key file from OpenSSL.
+
+ @param keyfile: contents of a DER-encoded file that contains the public
+ key, from OpenSSL.
+ @return: a PublicKey object
+ '''
+
+ from rsa.asn1 import OpenSSLPubKey
+ from pyasn1.codec.der import decoder
+ from pyasn1.type import univ
+
+ (keyinfo, _) = decoder.decode(keyfile, asn1Spec=OpenSSLPubKey())
+
+ if keyinfo['header']['oid'] != univ.ObjectIdentifier('1.2.840.113549.1.1.1'):
+ raise TypeError("This is not a DER-encoded OpenSSL-compatible public key")
+
+ return cls._load_pkcs1_der(keyinfo['key'][1:])
+
+
+
+
+class PrivateKey(AbstractKey):
+ '''Represents a private RSA key.
+
+ This key is also known as the 'decryption key'. It contains the 'n', 'e',
+ 'd', 'p', 'q' and other values.
+
+ Supports attributes as well as dictionary-like access. Attribute accesss is
+ faster, though.
+
+ >>> PrivateKey(3247, 65537, 833, 191, 17)
+ PrivateKey(3247, 65537, 833, 191, 17)
+
+ exp1, exp2 and coef don't have to be given, they will be calculated:
+
+ >>> pk = PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
+ >>> pk.exp1
+ 55063
+ >>> pk.exp2
+ 10095
+ >>> pk.coef
+ 50797
+
+ If you give exp1, exp2 or coef, they will be used as-is:
+
+ >>> pk = PrivateKey(1, 2, 3, 4, 5, 6, 7, 8)
+ >>> pk.exp1
+ 6
+ >>> pk.exp2
+ 7
+ >>> pk.coef
+ 8
+
+ '''
+
+ __slots__ = ('n', 'e', 'd', 'p', 'q', 'exp1', 'exp2', 'coef')
+
+ def __init__(self, n, e, d, p, q, exp1=None, exp2=None, coef=None):
+ self.n = n
+ self.e = e
+ self.d = d
+ self.p = p
+ self.q = q
+
+ # Calculate the other values if they aren't supplied
+ if exp1 is None:
+ self.exp1 = int(d % (p - 1))
+ else:
+ self.exp1 = exp1
+
+ if exp1 is None:
+ self.exp2 = int(d % (q - 1))
+ else:
+ self.exp2 = exp2
+
+ if coef is None:
+ self.coef = rsa.common.inverse(q, p)
+ else:
+ self.coef = coef
+
+ def __getitem__(self, key):
+ return getattr(self, key)
+
+ def __repr__(self):
+ return 'PrivateKey(%(n)i, %(e)i, %(d)i, %(p)i, %(q)i)' % self
+
+ def __eq__(self, other):
+ if other is None:
+ return False
+
+ if not isinstance(other, PrivateKey):
+ return False
+
+ return (self.n == other.n and
+ self.e == other.e and
+ self.d == other.d and
+ self.p == other.p and
+ self.q == other.q and
+ self.exp1 == other.exp1 and
+ self.exp2 == other.exp2 and
+ self.coef == other.coef)
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ @classmethod
+ def _load_pkcs1_der(cls, keyfile):
+ r'''Loads a key in PKCS#1 DER format.
+
+ @param keyfile: contents of a DER-encoded file that contains the private
+ key.
+ @return: a PrivateKey object
+
+ First let's construct a DER encoded key:
+
+ >>> import base64
+ >>> b64der = 'MC4CAQACBQDeKYlRAgMBAAECBQDHn4npAgMA/icCAwDfxwIDANcXAgInbwIDAMZt'
+ >>> der = base64.decodestring(b64der)
+
+ This loads the file:
+
+ >>> PrivateKey._load_pkcs1_der(der)
+ PrivateKey(3727264081, 65537, 3349121513, 65063, 57287)
+
+ '''
+
+ from pyasn1.codec.der import decoder
+ (priv, _) = decoder.decode(keyfile)
+
+ # ASN.1 contents of DER encoded private key:
+ #
+ # RSAPrivateKey ::= SEQUENCE {
+ # version Version,
+ # modulus INTEGER, -- n
+ # publicExponent INTEGER, -- e
+ # privateExponent INTEGER, -- d
+ # prime1 INTEGER, -- p
+ # prime2 INTEGER, -- q
+ # exponent1 INTEGER, -- d mod (p-1)
+ # exponent2 INTEGER, -- d mod (q-1)
+ # coefficient INTEGER, -- (inverse of q) mod p
+ # otherPrimeInfos OtherPrimeInfos OPTIONAL
+ # }
+
+ if priv[0] != 0:
+ raise ValueError('Unable to read this file, version %s != 0' % priv[0])
+
+ as_ints = tuple(int(x) for x in priv[1:9])
+ return cls(*as_ints)
+
+ def _save_pkcs1_der(self):
+ '''Saves the private key in PKCS#1 DER format.
+
+ @returns: the DER-encoded private key.
+ '''
+
+ from pyasn1.type import univ, namedtype
+ from pyasn1.codec.der import encoder
+
+ class AsnPrivKey(univ.Sequence):
+ componentType = namedtype.NamedTypes(
+ namedtype.NamedType('version', univ.Integer()),
+ namedtype.NamedType('modulus', univ.Integer()),
+ namedtype.NamedType('publicExponent', univ.Integer()),
+ namedtype.NamedType('privateExponent', univ.Integer()),
+ namedtype.NamedType('prime1', univ.Integer()),
+ namedtype.NamedType('prime2', univ.Integer()),
+ namedtype.NamedType('exponent1', univ.Integer()),
+ namedtype.NamedType('exponent2', univ.Integer()),
+ namedtype.NamedType('coefficient', univ.Integer()),
+ )
+
+ # Create the ASN object
+ asn_key = AsnPrivKey()
+ asn_key.setComponentByName('version', 0)
+ asn_key.setComponentByName('modulus', self.n)
+ asn_key.setComponentByName('publicExponent', self.e)
+ asn_key.setComponentByName('privateExponent', self.d)
+ asn_key.setComponentByName('prime1', self.p)
+ asn_key.setComponentByName('prime2', self.q)
+ asn_key.setComponentByName('exponent1', self.exp1)
+ asn_key.setComponentByName('exponent2', self.exp2)
+ asn_key.setComponentByName('coefficient', self.coef)
+
+ return encoder.encode(asn_key)
+
+ @classmethod
+ def _load_pkcs1_pem(cls, keyfile):
+ '''Loads a PKCS#1 PEM-encoded private key file.
+
+ The contents of the file before the "-----BEGIN RSA PRIVATE KEY-----" and
+ after the "-----END RSA PRIVATE KEY-----" lines is ignored.
+
+ @param keyfile: contents of a PEM-encoded file that contains the private
+ key.
+ @return: a PrivateKey object
+ '''
+
+ der = rsa.pem.load_pem(keyfile, b('RSA PRIVATE KEY'))
+ return cls._load_pkcs1_der(der)
+
+ def _save_pkcs1_pem(self):
+ '''Saves a PKCS#1 PEM-encoded private key file.
+
+ @return: contents of a PEM-encoded file that contains the private key.
+ '''
+
+ der = self._save_pkcs1_der()
+ return rsa.pem.save_pem(der, b('RSA PRIVATE KEY'))
+
+def find_p_q(nbits, getprime_func=rsa.prime.getprime, accurate=True):
+ ''''Returns a tuple of two different primes of nbits bits each.
+
+ The resulting p * q has exacty 2 * nbits bits, and the returned p and q
+ will not be equal.
+
+ :param nbits: the number of bits in each of p and q.
+ :param getprime_func: the getprime function, defaults to
+ :py:func:`rsa.prime.getprime`.
+
+ *Introduced in Python-RSA 3.1*
+
+ :param accurate: whether to enable accurate mode or not.
+ :returns: (p, q), where p > q
+
+ >>> (p, q) = find_p_q(128)
+ >>> from rsa import common
+ >>> common.bit_size(p * q)
+ 256
+
+ When not in accurate mode, the number of bits can be slightly less
+
+ >>> (p, q) = find_p_q(128, accurate=False)
+ >>> from rsa import common
+ >>> common.bit_size(p * q) <= 256
+ True
+ >>> common.bit_size(p * q) > 240
+ True
+
+ '''
+
+ total_bits = nbits * 2
+
+ # Make sure that p and q aren't too close or the factoring programs can
+ # factor n.
+ shift = nbits // 16
+ pbits = nbits + shift
+ qbits = nbits - shift
+
+ # Choose the two initial primes
+ log.debug('find_p_q(%i): Finding p', nbits)
+ p = getprime_func(pbits)
+ log.debug('find_p_q(%i): Finding q', nbits)
+ q = getprime_func(qbits)
+
+ def is_acceptable(p, q):
+ '''Returns True iff p and q are acceptable:
+
+ - p and q differ
+ - (p * q) has the right nr of bits (when accurate=True)
+ '''
+
+ if p == q:
+ return False
+
+ if not accurate:
+ return True
+
+ # Make sure we have just the right amount of bits
+ found_size = rsa.common.bit_size(p * q)
+ return total_bits == found_size
+
+ # Keep choosing other primes until they match our requirements.
+ change_p = False
+ while not is_acceptable(p, q):
+ # Change p on one iteration and q on the other
+ if change_p:
+ p = getprime_func(pbits)
+ else:
+ q = getprime_func(qbits)
+
+ change_p = not change_p
+
+ # We want p > q as described on
+ # http://www.di-mgt.com.au/rsa_alg.html#crt
+ return (max(p, q), min(p, q))
+
+def calculate_keys(p, q, nbits):
+ '''Calculates an encryption and a decryption key given p and q, and
+ returns them as a tuple (e, d)
+
+ '''
+
+ phi_n = (p - 1) * (q - 1)
+
+ # A very common choice for e is 65537
+ e = 65537
+
+ try:
+ d = rsa.common.inverse(e, phi_n)
+ except ValueError:
+ raise ValueError("e (%d) and phi_n (%d) are not relatively prime" %
+ (e, phi_n))
+
+ if (e * d) % phi_n != 1:
+ raise ValueError("e (%d) and d (%d) are not mult. inv. modulo "
+ "phi_n (%d)" % (e, d, phi_n))
+
+ return (e, d)
+
+def gen_keys(nbits, getprime_func, accurate=True):
+ '''Generate RSA keys of nbits bits. Returns (p, q, e, d).
+
+ Note: this can take a long time, depending on the key size.
+
+ :param nbits: the total number of bits in ``p`` and ``q``. Both ``p`` and
+ ``q`` will use ``nbits/2`` bits.
+ :param getprime_func: either :py:func:`rsa.prime.getprime` or a function
+ with similar signature.
+ '''
+
+ (p, q) = find_p_q(nbits // 2, getprime_func, accurate)
+ (e, d) = calculate_keys(p, q, nbits // 2)
+
+ return (p, q, e, d)
+
+def newkeys(nbits, accurate=True, poolsize=1):
+ '''Generates public and private keys, and returns them as (pub, priv).
+
+ The public key is also known as the 'encryption key', and is a
+ :py:class:`rsa.PublicKey` object. The private key is also known as the
+ 'decryption key' and is a :py:class:`rsa.PrivateKey` object.
+
+ :param nbits: the number of bits required to store ``n = p*q``.
+ :param accurate: when True, ``n`` will have exactly the number of bits you
+ asked for. However, this makes key generation much slower. When False,
+ `n`` may have slightly less bits.
+ :param poolsize: the number of processes to use to generate the prime
+ numbers. If set to a number > 1, a parallel algorithm will be used.
+ This requires Python 2.6 or newer.
+
+ :returns: a tuple (:py:class:`rsa.PublicKey`, :py:class:`rsa.PrivateKey`)
+
+ The ``poolsize`` parameter was added in *Python-RSA 3.1* and requires
+ Python 2.6 or newer.
+
+ '''
+
+ if nbits < 16:
+ raise ValueError('Key too small')
+
+ if poolsize < 1:
+ raise ValueError('Pool size (%i) should be >= 1' % poolsize)
+
+ # Determine which getprime function to use
+ if poolsize > 1:
+ from rsa import parallel
+ import functools
+
+ getprime_func = functools.partial(parallel.getprime, poolsize=poolsize)
+ else: getprime_func = rsa.prime.getprime
+
+ # Generate the key components
+ (p, q, e, d) = gen_keys(nbits, getprime_func)
+
+ # Create the key objects
+ n = p * q
+
+ return (
+ PublicKey(n, e),
+ PrivateKey(n, e, d, p, q)
+ )
+
+__all__ = ['PublicKey', 'PrivateKey', 'newkeys']
+
+if __name__ == '__main__':
+ import doctest
+
+ try:
+ for count in range(100):
+ (failures, tests) = doctest.testmod()
+ if failures:
+ break
+
+ if (count and count % 10 == 0) or count == 1:
+ print('%i times' % count)
+ except KeyboardInterrupt:
+ print('Aborted')
+ else:
+ print('Doctests done')
diff --git a/third_party/python/rsa/rsa/parallel.py b/third_party/python/rsa/rsa/parallel.py
new file mode 100644
index 0000000000..e5034ac707
--- /dev/null
+++ b/third_party/python/rsa/rsa/parallel.py
@@ -0,0 +1,94 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''Functions for parallel computation on multiple cores.
+
+Introduced in Python-RSA 3.1.
+
+.. note::
+
+ Requires Python 2.6 or newer.
+
+'''
+
+from __future__ import print_function
+
+import multiprocessing as mp
+
+import rsa.prime
+import rsa.randnum
+
+def _find_prime(nbits, pipe):
+ while True:
+ integer = rsa.randnum.read_random_int(nbits)
+
+ # Make sure it's odd
+ integer |= 1
+
+ # Test for primeness
+ if rsa.prime.is_prime(integer):
+ pipe.send(integer)
+ return
+
+def getprime(nbits, poolsize):
+ '''Returns a prime number that can be stored in 'nbits' bits.
+
+ Works in multiple threads at the same time.
+
+ >>> p = getprime(128, 3)
+ >>> rsa.prime.is_prime(p-1)
+ False
+ >>> rsa.prime.is_prime(p)
+ True
+ >>> rsa.prime.is_prime(p+1)
+ False
+
+ >>> from rsa import common
+ >>> common.bit_size(p) == 128
+ True
+
+ '''
+
+ (pipe_recv, pipe_send) = mp.Pipe(duplex=False)
+
+ # Create processes
+ procs = [mp.Process(target=_find_prime, args=(nbits, pipe_send))
+ for _ in range(poolsize)]
+ [p.start() for p in procs]
+
+ result = pipe_recv.recv()
+
+ [p.terminate() for p in procs]
+
+ return result
+
+__all__ = ['getprime']
+
+
+if __name__ == '__main__':
+ print('Running doctests 1000x or until failure')
+ import doctest
+
+ for count in range(100):
+ (failures, tests) = doctest.testmod()
+ if failures:
+ break
+
+ if count and count % 10 == 0:
+ print('%i times' % count)
+
+ print('Doctests done')
+
diff --git a/third_party/python/rsa/rsa/pem.py b/third_party/python/rsa/rsa/pem.py
new file mode 100644
index 0000000000..b1c3a0edb4
--- /dev/null
+++ b/third_party/python/rsa/rsa/pem.py
@@ -0,0 +1,120 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''Functions that load and write PEM-encoded files.'''
+
+import base64
+from rsa._compat import b, is_bytes
+
+def _markers(pem_marker):
+ '''
+ Returns the start and end PEM markers
+ '''
+
+ if is_bytes(pem_marker):
+ pem_marker = pem_marker.decode('utf-8')
+
+ return (b('-----BEGIN %s-----' % pem_marker),
+ b('-----END %s-----' % pem_marker))
+
+def load_pem(contents, pem_marker):
+ '''Loads a PEM file.
+
+ @param contents: the contents of the file to interpret
+ @param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY'
+ when your file has '-----BEGIN RSA PRIVATE KEY-----' and
+ '-----END RSA PRIVATE KEY-----' markers.
+
+ @return the base64-decoded content between the start and end markers.
+
+ @raise ValueError: when the content is invalid, for example when the start
+ marker cannot be found.
+
+ '''
+
+ (pem_start, pem_end) = _markers(pem_marker)
+
+ pem_lines = []
+ in_pem_part = False
+
+ for line in contents.splitlines():
+ line = line.strip()
+
+ # Skip empty lines
+ if not line:
+ continue
+
+ # Handle start marker
+ if line == pem_start:
+ if in_pem_part:
+ raise ValueError('Seen start marker "%s" twice' % pem_start)
+
+ in_pem_part = True
+ continue
+
+ # Skip stuff before first marker
+ if not in_pem_part:
+ continue
+
+ # Handle end marker
+ if in_pem_part and line == pem_end:
+ in_pem_part = False
+ break
+
+ # Load fields
+ if b(':') in line:
+ continue
+
+ pem_lines.append(line)
+
+ # Do some sanity checks
+ if not pem_lines:
+ raise ValueError('No PEM start marker "%s" found' % pem_start)
+
+ if in_pem_part:
+ raise ValueError('No PEM end marker "%s" found' % pem_end)
+
+ # Base64-decode the contents
+ pem = b('').join(pem_lines)
+ return base64.decodestring(pem)
+
+
+def save_pem(contents, pem_marker):
+ '''Saves a PEM file.
+
+ @param contents: the contents to encode in PEM format
+ @param pem_marker: the marker of the PEM content, such as 'RSA PRIVATE KEY'
+ when your file has '-----BEGIN RSA PRIVATE KEY-----' and
+ '-----END RSA PRIVATE KEY-----' markers.
+
+ @return the base64-encoded content between the start and end markers.
+
+ '''
+
+ (pem_start, pem_end) = _markers(pem_marker)
+
+ b64 = base64.encodestring(contents).replace(b('\n'), b(''))
+ pem_lines = [pem_start]
+
+ for block_start in range(0, len(b64), 64):
+ block = b64[block_start:block_start + 64]
+ pem_lines.append(block)
+
+ pem_lines.append(pem_end)
+ pem_lines.append(b(''))
+
+ return b('\n').join(pem_lines)
+
diff --git a/third_party/python/rsa/rsa/pkcs1.py b/third_party/python/rsa/rsa/pkcs1.py
new file mode 100644
index 0000000000..15e4cf639e
--- /dev/null
+++ b/third_party/python/rsa/rsa/pkcs1.py
@@ -0,0 +1,391 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''Functions for PKCS#1 version 1.5 encryption and signing
+
+This module implements certain functionality from PKCS#1 version 1.5. For a
+very clear example, read http://www.di-mgt.com.au/rsa_alg.html#pkcs1schemes
+
+At least 8 bytes of random padding is used when encrypting a message. This makes
+these methods much more secure than the ones in the ``rsa`` module.
+
+WARNING: this module leaks information when decryption or verification fails.
+The exceptions that are raised contain the Python traceback information, which
+can be used to deduce where in the process the failure occurred. DO NOT PASS
+SUCH INFORMATION to your users.
+'''
+
+import hashlib
+import os
+
+from rsa._compat import b
+from rsa import common, transform, core, varblock
+
+# ASN.1 codes that describe the hash algorithm used.
+HASH_ASN1 = {
+ 'MD5': b('\x30\x20\x30\x0c\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x05\x05\x00\x04\x10'),
+ 'SHA-1': b('\x30\x21\x30\x09\x06\x05\x2b\x0e\x03\x02\x1a\x05\x00\x04\x14'),
+ 'SHA-256': b('\x30\x31\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20'),
+ 'SHA-384': b('\x30\x41\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x02\x05\x00\x04\x30'),
+ 'SHA-512': b('\x30\x51\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x03\x05\x00\x04\x40'),
+}
+
+HASH_METHODS = {
+ 'MD5': hashlib.md5,
+ 'SHA-1': hashlib.sha1,
+ 'SHA-256': hashlib.sha256,
+ 'SHA-384': hashlib.sha384,
+ 'SHA-512': hashlib.sha512,
+}
+
+class CryptoError(Exception):
+ '''Base class for all exceptions in this module.'''
+
+class DecryptionError(CryptoError):
+ '''Raised when decryption fails.'''
+
+class VerificationError(CryptoError):
+ '''Raised when verification fails.'''
+
+def _pad_for_encryption(message, target_length):
+ r'''Pads the message for encryption, returning the padded message.
+
+ :return: 00 02 RANDOM_DATA 00 MESSAGE
+
+ >>> block = _pad_for_encryption('hello', 16)
+ >>> len(block)
+ 16
+ >>> block[0:2]
+ '\x00\x02'
+ >>> block[-6:]
+ '\x00hello'
+
+ '''
+
+ max_msglength = target_length - 11
+ msglength = len(message)
+
+ if msglength > max_msglength:
+ raise OverflowError('%i bytes needed for message, but there is only'
+ ' space for %i' % (msglength, max_msglength))
+
+ # Get random padding
+ padding = b('')
+ padding_length = target_length - msglength - 3
+
+ # We remove 0-bytes, so we'll end up with less padding than we've asked for,
+ # so keep adding data until we're at the correct length.
+ while len(padding) < padding_length:
+ needed_bytes = padding_length - len(padding)
+
+ # Always read at least 8 bytes more than we need, and trim off the rest
+ # after removing the 0-bytes. This increases the chance of getting
+ # enough bytes, especially when needed_bytes is small
+ new_padding = os.urandom(needed_bytes + 5)
+ new_padding = new_padding.replace(b('\x00'), b(''))
+ padding = padding + new_padding[:needed_bytes]
+
+ assert len(padding) == padding_length
+
+ return b('').join([b('\x00\x02'),
+ padding,
+ b('\x00'),
+ message])
+
+
+def _pad_for_signing(message, target_length):
+ r'''Pads the message for signing, returning the padded message.
+
+ The padding is always a repetition of FF bytes.
+
+ :return: 00 01 PADDING 00 MESSAGE
+
+ >>> block = _pad_for_signing('hello', 16)
+ >>> len(block)
+ 16
+ >>> block[0:2]
+ '\x00\x01'
+ >>> block[-6:]
+ '\x00hello'
+ >>> block[2:-6]
+ '\xff\xff\xff\xff\xff\xff\xff\xff'
+
+ '''
+
+ max_msglength = target_length - 11
+ msglength = len(message)
+
+ if msglength > max_msglength:
+ raise OverflowError('%i bytes needed for message, but there is only'
+ ' space for %i' % (msglength, max_msglength))
+
+ padding_length = target_length - msglength - 3
+
+ return b('').join([b('\x00\x01'),
+ padding_length * b('\xff'),
+ b('\x00'),
+ message])
+
+
+def encrypt(message, pub_key):
+ '''Encrypts the given message using PKCS#1 v1.5
+
+ :param message: the message to encrypt. Must be a byte string no longer than
+ ``k-11`` bytes, where ``k`` is the number of bytes needed to encode
+ the ``n`` component of the public key.
+ :param pub_key: the :py:class:`rsa.PublicKey` to encrypt with.
+ :raise OverflowError: when the message is too large to fit in the padded
+ block.
+
+ >>> from rsa import key, common
+ >>> (pub_key, priv_key) = key.newkeys(256)
+ >>> message = 'hello'
+ >>> crypto = encrypt(message, pub_key)
+
+ The crypto text should be just as long as the public key 'n' component:
+
+ >>> len(crypto) == common.byte_size(pub_key.n)
+ True
+
+ '''
+
+ keylength = common.byte_size(pub_key.n)
+ padded = _pad_for_encryption(message, keylength)
+
+ payload = transform.bytes2int(padded)
+ encrypted = core.encrypt_int(payload, pub_key.e, pub_key.n)
+ block = transform.int2bytes(encrypted, keylength)
+
+ return block
+
+def decrypt(crypto, priv_key):
+ r'''Decrypts the given message using PKCS#1 v1.5
+
+ The decryption is considered 'failed' when the resulting cleartext doesn't
+ start with the bytes 00 02, or when the 00 byte between the padding and
+ the message cannot be found.
+
+ :param crypto: the crypto text as returned by :py:func:`rsa.encrypt`
+ :param priv_key: the :py:class:`rsa.PrivateKey` to decrypt with.
+ :raise DecryptionError: when the decryption fails. No details are given as
+ to why the code thinks the decryption fails, as this would leak
+ information about the private key.
+
+
+ >>> import rsa
+ >>> (pub_key, priv_key) = rsa.newkeys(256)
+
+ It works with strings:
+
+ >>> crypto = encrypt('hello', pub_key)
+ >>> decrypt(crypto, priv_key)
+ 'hello'
+
+ And with binary data:
+
+ >>> crypto = encrypt('\x00\x00\x00\x00\x01', pub_key)
+ >>> decrypt(crypto, priv_key)
+ '\x00\x00\x00\x00\x01'
+
+ Altering the encrypted information will *likely* cause a
+ :py:class:`rsa.pkcs1.DecryptionError`. If you want to be *sure*, use
+ :py:func:`rsa.sign`.
+
+
+ .. warning::
+
+ Never display the stack trace of a
+ :py:class:`rsa.pkcs1.DecryptionError` exception. It shows where in the
+ code the exception occurred, and thus leaks information about the key.
+ It's only a tiny bit of information, but every bit makes cracking the
+ keys easier.
+
+ >>> crypto = encrypt('hello', pub_key)
+ >>> crypto = crypto[0:5] + 'X' + crypto[6:] # change a byte
+ >>> decrypt(crypto, priv_key)
+ Traceback (most recent call last):
+ ...
+ DecryptionError: Decryption failed
+
+ '''
+
+ blocksize = common.byte_size(priv_key.n)
+ encrypted = transform.bytes2int(crypto)
+ decrypted = core.decrypt_int(encrypted, priv_key.d, priv_key.n)
+ cleartext = transform.int2bytes(decrypted, blocksize)
+
+ # If we can't find the cleartext marker, decryption failed.
+ if cleartext[0:2] != b('\x00\x02'):
+ raise DecryptionError('Decryption failed')
+
+ # Find the 00 separator between the padding and the message
+ try:
+ sep_idx = cleartext.index(b('\x00'), 2)
+ except ValueError:
+ raise DecryptionError('Decryption failed')
+
+ return cleartext[sep_idx+1:]
+
+def sign(message, priv_key, hash):
+ '''Signs the message with the private key.
+
+ Hashes the message, then signs the hash with the given key. This is known
+ as a "detached signature", because the message itself isn't altered.
+
+ :param message: the message to sign. Can be an 8-bit string or a file-like
+ object. If ``message`` has a ``read()`` method, it is assumed to be a
+ file-like object.
+ :param priv_key: the :py:class:`rsa.PrivateKey` to sign with
+ :param hash: the hash method used on the message. Use 'MD5', 'SHA-1',
+ 'SHA-256', 'SHA-384' or 'SHA-512'.
+ :return: a message signature block.
+ :raise OverflowError: if the private key is too small to contain the
+ requested hash.
+
+ '''
+
+ # Get the ASN1 code for this hash method
+ if hash not in HASH_ASN1:
+ raise ValueError('Invalid hash method: %s' % hash)
+ asn1code = HASH_ASN1[hash]
+
+ # Calculate the hash
+ hash = _hash(message, hash)
+
+ # Encrypt the hash with the private key
+ cleartext = asn1code + hash
+ keylength = common.byte_size(priv_key.n)
+ padded = _pad_for_signing(cleartext, keylength)
+
+ payload = transform.bytes2int(padded)
+ encrypted = core.encrypt_int(payload, priv_key.d, priv_key.n)
+ block = transform.int2bytes(encrypted, keylength)
+
+ return block
+
+def verify(message, signature, pub_key):
+ '''Verifies that the signature matches the message.
+
+ The hash method is detected automatically from the signature.
+
+ :param message: the signed message. Can be an 8-bit string or a file-like
+ object. If ``message`` has a ``read()`` method, it is assumed to be a
+ file-like object.
+ :param signature: the signature block, as created with :py:func:`rsa.sign`.
+ :param pub_key: the :py:class:`rsa.PublicKey` of the person signing the message.
+ :raise VerificationError: when the signature doesn't match the message.
+
+ .. warning::
+
+ Never display the stack trace of a
+ :py:class:`rsa.pkcs1.VerificationError` exception. It shows where in
+ the code the exception occurred, and thus leaks information about the
+ key. It's only a tiny bit of information, but every bit makes cracking
+ the keys easier.
+
+ '''
+
+ blocksize = common.byte_size(pub_key.n)
+ encrypted = transform.bytes2int(signature)
+ decrypted = core.decrypt_int(encrypted, pub_key.e, pub_key.n)
+ clearsig = transform.int2bytes(decrypted, blocksize)
+
+ # If we can't find the signature marker, verification failed.
+ if clearsig[0:2] != b('\x00\x01'):
+ raise VerificationError('Verification failed')
+
+ # Find the 00 separator between the padding and the payload
+ try:
+ sep_idx = clearsig.index(b('\x00'), 2)
+ except ValueError:
+ raise VerificationError('Verification failed')
+
+ # Get the hash and the hash method
+ (method_name, signature_hash) = _find_method_hash(clearsig[sep_idx+1:])
+ message_hash = _hash(message, method_name)
+
+ # Compare the real hash to the hash in the signature
+ if message_hash != signature_hash:
+ raise VerificationError('Verification failed')
+
+ return True
+
+def _hash(message, method_name):
+ '''Returns the message digest.
+
+ :param message: the signed message. Can be an 8-bit string or a file-like
+ object. If ``message`` has a ``read()`` method, it is assumed to be a
+ file-like object.
+ :param method_name: the hash method, must be a key of
+ :py:const:`HASH_METHODS`.
+
+ '''
+
+ if method_name not in HASH_METHODS:
+ raise ValueError('Invalid hash method: %s' % method_name)
+
+ method = HASH_METHODS[method_name]
+ hasher = method()
+
+ if hasattr(message, 'read') and hasattr(message.read, '__call__'):
+ # read as 1K blocks
+ for block in varblock.yield_fixedblocks(message, 1024):
+ hasher.update(block)
+ else:
+ # hash the message object itself.
+ hasher.update(message)
+
+ return hasher.digest()
+
+
+def _find_method_hash(method_hash):
+ '''Finds the hash method and the hash itself.
+
+ :param method_hash: ASN1 code for the hash method concatenated with the
+ hash itself.
+
+ :return: tuple (method, hash) where ``method`` is the used hash method, and
+ ``hash`` is the hash itself.
+
+ :raise VerificationFailed: when the hash method cannot be found
+
+ '''
+
+ for (hashname, asn1code) in HASH_ASN1.items():
+ if not method_hash.startswith(asn1code):
+ continue
+
+ return (hashname, method_hash[len(asn1code):])
+
+ raise VerificationError('Verification failed')
+
+
+__all__ = ['encrypt', 'decrypt', 'sign', 'verify',
+ 'DecryptionError', 'VerificationError', 'CryptoError']
+
+if __name__ == '__main__':
+ print('Running doctests 1000x or until failure')
+ import doctest
+
+ for count in range(1000):
+ (failures, tests) = doctest.testmod()
+ if failures:
+ break
+
+ if count and count % 100 == 0:
+ print('%i times' % count)
+
+ print('Doctests done')
diff --git a/third_party/python/rsa/rsa/prime.py b/third_party/python/rsa/rsa/prime.py
new file mode 100644
index 0000000000..7422eb1d28
--- /dev/null
+++ b/third_party/python/rsa/rsa/prime.py
@@ -0,0 +1,166 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''Numerical functions related to primes.
+
+Implementation based on the book Algorithm Design by Michael T. Goodrich and
+Roberto Tamassia, 2002.
+'''
+
+__all__ = [ 'getprime', 'are_relatively_prime']
+
+import rsa.randnum
+
+def gcd(p, q):
+ '''Returns the greatest common divisor of p and q
+
+ >>> gcd(48, 180)
+ 12
+ '''
+
+ while q != 0:
+ if p < q: (p,q) = (q,p)
+ (p,q) = (q, p % q)
+ return p
+
+
+def jacobi(a, b):
+ '''Calculates the value of the Jacobi symbol (a/b) where both a and b are
+ positive integers, and b is odd
+
+ :returns: -1, 0 or 1
+ '''
+
+ assert a > 0
+ assert b > 0
+
+ if a == 0: return 0
+ result = 1
+ while a > 1:
+ if a & 1:
+ if ((a-1)*(b-1) >> 2) & 1:
+ result = -result
+ a, b = b % a, a
+ else:
+ if (((b * b) - 1) >> 3) & 1:
+ result = -result
+ a >>= 1
+ if a == 0: return 0
+ return result
+
+def jacobi_witness(x, n):
+ '''Returns False if n is an Euler pseudo-prime with base x, and
+ True otherwise.
+ '''
+
+ j = jacobi(x, n) % n
+
+ f = pow(x, n >> 1, n)
+
+ if j == f: return False
+ return True
+
+def randomized_primality_testing(n, k):
+ '''Calculates whether n is composite (which is always correct) or
+ prime (which is incorrect with error probability 2**-k)
+
+ Returns False if the number is composite, and True if it's
+ probably prime.
+ '''
+
+ # 50% of Jacobi-witnesses can report compositness of non-prime numbers
+
+ # The implemented algorithm using the Jacobi witness function has error
+ # probability q <= 0.5, according to Goodrich et. al
+ #
+ # q = 0.5
+ # t = int(math.ceil(k / log(1 / q, 2)))
+ # So t = k / log(2, 2) = k / 1 = k
+ # this means we can use range(k) rather than range(t)
+
+ for _ in range(k):
+ x = rsa.randnum.randint(n-1)
+ if jacobi_witness(x, n): return False
+
+ return True
+
+def is_prime(number):
+ '''Returns True if the number is prime, and False otherwise.
+
+ >>> is_prime(42)
+ False
+ >>> is_prime(41)
+ True
+ '''
+
+ return randomized_primality_testing(number, 6)
+
+def getprime(nbits):
+ '''Returns a prime number that can be stored in 'nbits' bits.
+
+ >>> p = getprime(128)
+ >>> is_prime(p-1)
+ False
+ >>> is_prime(p)
+ True
+ >>> is_prime(p+1)
+ False
+
+ >>> from rsa import common
+ >>> common.bit_size(p) == 128
+ True
+
+ '''
+
+ while True:
+ integer = rsa.randnum.read_random_int(nbits)
+
+ # Make sure it's odd
+ integer |= 1
+
+ # Test for primeness
+ if is_prime(integer):
+ return integer
+
+ # Retry if not prime
+
+
+def are_relatively_prime(a, b):
+ '''Returns True if a and b are relatively prime, and False if they
+ are not.
+
+ >>> are_relatively_prime(2, 3)
+ 1
+ >>> are_relatively_prime(2, 4)
+ 0
+ '''
+
+ d = gcd(a, b)
+ return (d == 1)
+
+if __name__ == '__main__':
+ print('Running doctests 1000x or until failure')
+ import doctest
+
+ for count in range(1000):
+ (failures, tests) = doctest.testmod()
+ if failures:
+ break
+
+ if count and count % 100 == 0:
+ print('%i times' % count)
+
+ print('Doctests done')
diff --git a/third_party/python/rsa/rsa/randnum.py b/third_party/python/rsa/rsa/randnum.py
new file mode 100644
index 0000000000..0e782744c0
--- /dev/null
+++ b/third_party/python/rsa/rsa/randnum.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''Functions for generating random numbers.'''
+
+# Source inspired by code by Yesudeep Mangalapilly <yesudeep@gmail.com>
+
+import os
+
+from rsa import common, transform
+from rsa._compat import byte
+
+def read_random_bits(nbits):
+ '''Reads 'nbits' random bits.
+
+ If nbits isn't a whole number of bytes, an extra byte will be appended with
+ only the lower bits set.
+ '''
+
+ nbytes, rbits = divmod(nbits, 8)
+
+ # Get the random bytes
+ randomdata = os.urandom(nbytes)
+
+ # Add the remaining random bits
+ if rbits > 0:
+ randomvalue = ord(os.urandom(1))
+ randomvalue >>= (8 - rbits)
+ randomdata = byte(randomvalue) + randomdata
+
+ return randomdata
+
+
+def read_random_int(nbits):
+ '''Reads a random integer of approximately nbits bits.
+ '''
+
+ randomdata = read_random_bits(nbits)
+ value = transform.bytes2int(randomdata)
+
+ # Ensure that the number is large enough to just fill out the required
+ # number of bits.
+ value |= 1 << (nbits - 1)
+
+ return value
+
+def randint(maxvalue):
+ '''Returns a random integer x with 1 <= x <= maxvalue
+
+ May take a very long time in specific situations. If maxvalue needs N bits
+ to store, the closer maxvalue is to (2 ** N) - 1, the faster this function
+ is.
+ '''
+
+ bit_size = common.bit_size(maxvalue)
+
+ tries = 0
+ while True:
+ value = read_random_int(bit_size)
+ if value <= maxvalue:
+ break
+
+ if tries and tries % 10 == 0:
+ # After a lot of tries to get the right number of bits but still
+ # smaller than maxvalue, decrease the number of bits by 1. That'll
+ # dramatically increase the chances to get a large enough number.
+ bit_size -= 1
+ tries += 1
+
+ return value
+
+
diff --git a/third_party/python/rsa/rsa/transform.py b/third_party/python/rsa/rsa/transform.py
new file mode 100644
index 0000000000..c740b2d275
--- /dev/null
+++ b/third_party/python/rsa/rsa/transform.py
@@ -0,0 +1,220 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''Data transformation functions.
+
+From bytes to a number, number to bytes, etc.
+'''
+
+from __future__ import absolute_import
+
+try:
+ # We'll use psyco if available on 32-bit architectures to speed up code.
+ # Using psyco (if available) cuts down the execution time on Python 2.5
+ # at least by half.
+ import psyco
+ psyco.full()
+except ImportError:
+ pass
+
+import binascii
+from struct import pack
+from rsa import common
+from rsa._compat import is_integer, b, byte, get_word_alignment, ZERO_BYTE, EMPTY_BYTE
+
+
+def bytes2int(raw_bytes):
+ r'''Converts a list of bytes or an 8-bit string to an integer.
+
+ When using unicode strings, encode it to some encoding like UTF8 first.
+
+ >>> (((128 * 256) + 64) * 256) + 15
+ 8405007
+ >>> bytes2int('\x80@\x0f')
+ 8405007
+
+ '''
+
+ return int(binascii.hexlify(raw_bytes), 16)
+
+
+def _int2bytes(number, block_size=None):
+ r'''Converts a number to a string of bytes.
+
+ Usage::
+
+ >>> _int2bytes(123456789)
+ '\x07[\xcd\x15'
+ >>> bytes2int(_int2bytes(123456789))
+ 123456789
+
+ >>> _int2bytes(123456789, 6)
+ '\x00\x00\x07[\xcd\x15'
+ >>> bytes2int(_int2bytes(123456789, 128))
+ 123456789
+
+ >>> _int2bytes(123456789, 3)
+ Traceback (most recent call last):
+ ...
+ OverflowError: Needed 4 bytes for number, but block size is 3
+
+ @param number: the number to convert
+ @param block_size: the number of bytes to output. If the number encoded to
+ bytes is less than this, the block will be zero-padded. When not given,
+ the returned block is not padded.
+
+ @throws OverflowError when block_size is given and the number takes up more
+ bytes than fit into the block.
+ '''
+ # Type checking
+ if not is_integer(number):
+ raise TypeError("You must pass an integer for 'number', not %s" %
+ number.__class__)
+
+ if number < 0:
+ raise ValueError('Negative numbers cannot be used: %i' % number)
+
+ # Do some bounds checking
+ if number == 0:
+ needed_bytes = 1
+ raw_bytes = [ZERO_BYTE]
+ else:
+ needed_bytes = common.byte_size(number)
+ raw_bytes = []
+
+ # You cannot compare None > 0 in Python 3x. It will fail with a TypeError.
+ if block_size and block_size > 0:
+ if needed_bytes > block_size:
+ raise OverflowError('Needed %i bytes for number, but block size '
+ 'is %i' % (needed_bytes, block_size))
+
+ # Convert the number to bytes.
+ while number > 0:
+ raw_bytes.insert(0, byte(number & 0xFF))
+ number >>= 8
+
+ # Pad with zeroes to fill the block
+ if block_size and block_size > 0:
+ padding = (block_size - needed_bytes) * ZERO_BYTE
+ else:
+ padding = EMPTY_BYTE
+
+ return padding + EMPTY_BYTE.join(raw_bytes)
+
+
+def bytes_leading(raw_bytes, needle=ZERO_BYTE):
+ '''
+ Finds the number of prefixed byte occurrences in the haystack.
+
+ Useful when you want to deal with padding.
+
+ :param raw_bytes:
+ Raw bytes.
+ :param needle:
+ The byte to count. Default \000.
+ :returns:
+ The number of leading needle bytes.
+ '''
+ leading = 0
+ # Indexing keeps compatibility between Python 2.x and Python 3.x
+ _byte = needle[0]
+ for x in raw_bytes:
+ if x == _byte:
+ leading += 1
+ else:
+ break
+ return leading
+
+
+def int2bytes(number, fill_size=None, chunk_size=None, overflow=False):
+ '''
+ Convert an unsigned integer to bytes (base-256 representation)::
+
+ Does not preserve leading zeros if you don't specify a chunk size or
+ fill size.
+
+ .. NOTE:
+ You must not specify both fill_size and chunk_size. Only one
+ of them is allowed.
+
+ :param number:
+ Integer value
+ :param fill_size:
+ If the optional fill size is given the length of the resulting
+ byte string is expected to be the fill size and will be padded
+ with prefix zero bytes to satisfy that length.
+ :param chunk_size:
+ If optional chunk size is given and greater than zero, pad the front of
+ the byte string with binary zeros so that the length is a multiple of
+ ``chunk_size``.
+ :param overflow:
+ ``False`` (default). If this is ``True``, no ``OverflowError``
+ will be raised when the fill_size is shorter than the length
+ of the generated byte sequence. Instead the byte sequence will
+ be returned as is.
+ :returns:
+ Raw bytes (base-256 representation).
+ :raises:
+ ``OverflowError`` when fill_size is given and the number takes up more
+ bytes than fit into the block. This requires the ``overflow``
+ argument to this function to be set to ``False`` otherwise, no
+ error will be raised.
+ '''
+ if number < 0:
+ raise ValueError("Number must be an unsigned integer: %d" % number)
+
+ if fill_size and chunk_size:
+ raise ValueError("You can either fill or pad chunks, but not both")
+
+ # Ensure these are integers.
+ number & 1
+
+ raw_bytes = b('')
+
+ # Pack the integer one machine word at a time into bytes.
+ num = number
+ word_bits, _, max_uint, pack_type = get_word_alignment(num)
+ pack_format = ">%s" % pack_type
+ while num > 0:
+ raw_bytes = pack(pack_format, num & max_uint) + raw_bytes
+ num >>= word_bits
+ # Obtain the index of the first non-zero byte.
+ zero_leading = bytes_leading(raw_bytes)
+ if number == 0:
+ raw_bytes = ZERO_BYTE
+ # De-padding.
+ raw_bytes = raw_bytes[zero_leading:]
+
+ length = len(raw_bytes)
+ if fill_size and fill_size > 0:
+ if not overflow and length > fill_size:
+ raise OverflowError(
+ "Need %d bytes for number, but fill size is %d" %
+ (length, fill_size)
+ )
+ raw_bytes = raw_bytes.rjust(fill_size, ZERO_BYTE)
+ elif chunk_size and chunk_size > 0:
+ remainder = length % chunk_size
+ if remainder:
+ padding_size = chunk_size - remainder
+ raw_bytes = raw_bytes.rjust(length + padding_size, ZERO_BYTE)
+ return raw_bytes
+
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
+
diff --git a/third_party/python/rsa/rsa/util.py b/third_party/python/rsa/rsa/util.py
new file mode 100644
index 0000000000..5bbb70be18
--- /dev/null
+++ b/third_party/python/rsa/rsa/util.py
@@ -0,0 +1,81 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''Utility functions.'''
+
+from __future__ import with_statement, print_function
+
+import sys
+from optparse import OptionParser
+
+import rsa.key
+
+def private_to_public():
+ '''Reads a private key and outputs the corresponding public key.'''
+
+ # Parse the CLI options
+ parser = OptionParser(usage='usage: %prog [options]',
+ description='Reads a private key and outputs the '
+ 'corresponding public key. Both private and public keys use '
+ 'the format described in PKCS#1 v1.5')
+
+ parser.add_option('-i', '--input', dest='infilename', type='string',
+ help='Input filename. Reads from stdin if not specified')
+ parser.add_option('-o', '--output', dest='outfilename', type='string',
+ help='Output filename. Writes to stdout of not specified')
+
+ parser.add_option('--inform', dest='inform',
+ help='key format of input - default PEM',
+ choices=('PEM', 'DER'), default='PEM')
+
+ parser.add_option('--outform', dest='outform',
+ help='key format of output - default PEM',
+ choices=('PEM', 'DER'), default='PEM')
+
+ (cli, cli_args) = parser.parse_args(sys.argv)
+
+ # Read the input data
+ if cli.infilename:
+ print('Reading private key from %s in %s format' % \
+ (cli.infilename, cli.inform), file=sys.stderr)
+ with open(cli.infilename, 'rb') as infile:
+ in_data = infile.read()
+ else:
+ print('Reading private key from stdin in %s format' % cli.inform,
+ file=sys.stderr)
+ in_data = sys.stdin.read().encode('ascii')
+
+ assert type(in_data) == bytes, type(in_data)
+
+
+ # Take the public fields and create a public key
+ priv_key = rsa.key.PrivateKey.load_pkcs1(in_data, cli.inform)
+ pub_key = rsa.key.PublicKey(priv_key.n, priv_key.e)
+
+ # Save to the output file
+ out_data = pub_key.save_pkcs1(cli.outform)
+
+ if cli.outfilename:
+ print('Writing public key to %s in %s format' % \
+ (cli.outfilename, cli.outform), file=sys.stderr)
+ with open(cli.outfilename, 'wb') as outfile:
+ outfile.write(out_data)
+ else:
+ print('Writing public key to stdout in %s format' % cli.outform,
+ file=sys.stderr)
+ sys.stdout.write(out_data.decode('ascii'))
+
+
diff --git a/third_party/python/rsa/rsa/varblock.py b/third_party/python/rsa/rsa/varblock.py
new file mode 100644
index 0000000000..c7d96ae6a7
--- /dev/null
+++ b/third_party/python/rsa/rsa/varblock.py
@@ -0,0 +1,155 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+'''VARBLOCK file support
+
+The VARBLOCK file format is as follows, where || denotes byte concatenation:
+
+ FILE := VERSION || BLOCK || BLOCK ...
+
+ BLOCK := LENGTH || DATA
+
+ LENGTH := varint-encoded length of the subsequent data. Varint comes from
+ Google Protobuf, and encodes an integer into a variable number of bytes.
+ Each byte uses the 7 lowest bits to encode the value. The highest bit set
+ to 1 indicates the next byte is also part of the varint. The last byte will
+ have this bit set to 0.
+
+This file format is called the VARBLOCK format, in line with the varint format
+used to denote the block sizes.
+
+'''
+
+from rsa._compat import byte, b
+
+
+ZERO_BYTE = b('\x00')
+VARBLOCK_VERSION = 1
+
+def read_varint(infile):
+ '''Reads a varint from the file.
+
+ When the first byte to be read indicates EOF, (0, 0) is returned. When an
+ EOF occurs when at least one byte has been read, an EOFError exception is
+ raised.
+
+ @param infile: the file-like object to read from. It should have a read()
+ method.
+ @returns (varint, length), the read varint and the number of read bytes.
+ '''
+
+ varint = 0
+ read_bytes = 0
+
+ while True:
+ char = infile.read(1)
+ if len(char) == 0:
+ if read_bytes == 0:
+ return (0, 0)
+ raise EOFError('EOF while reading varint, value is %i so far' %
+ varint)
+
+ byte = ord(char)
+ varint += (byte & 0x7F) << (7 * read_bytes)
+
+ read_bytes += 1
+
+ if not byte & 0x80:
+ return (varint, read_bytes)
+
+
+def write_varint(outfile, value):
+ '''Writes a varint to a file.
+
+ @param outfile: the file-like object to write to. It should have a write()
+ method.
+ @returns the number of written bytes.
+ '''
+
+ # there is a big difference between 'write the value 0' (this case) and
+ # 'there is nothing left to write' (the false-case of the while loop)
+
+ if value == 0:
+ outfile.write(ZERO_BYTE)
+ return 1
+
+ written_bytes = 0
+ while value > 0:
+ to_write = value & 0x7f
+ value = value >> 7
+
+ if value > 0:
+ to_write |= 0x80
+
+ outfile.write(byte(to_write))
+ written_bytes += 1
+
+ return written_bytes
+
+
+def yield_varblocks(infile):
+ '''Generator, yields each block in the input file.
+
+ @param infile: file to read, is expected to have the VARBLOCK format as
+ described in the module's docstring.
+ @yields the contents of each block.
+ '''
+
+ # Check the version number
+ first_char = infile.read(1)
+ if len(first_char) == 0:
+ raise EOFError('Unable to read VARBLOCK version number')
+
+ version = ord(first_char)
+ if version != VARBLOCK_VERSION:
+ raise ValueError('VARBLOCK version %i not supported' % version)
+
+ while True:
+ (block_size, read_bytes) = read_varint(infile)
+
+ # EOF at block boundary, that's fine.
+ if read_bytes == 0 and block_size == 0:
+ break
+
+ block = infile.read(block_size)
+
+ read_size = len(block)
+ if read_size != block_size:
+ raise EOFError('Block size is %i, but could read only %i bytes' %
+ (block_size, read_size))
+
+ yield block
+
+
+def yield_fixedblocks(infile, blocksize):
+ '''Generator, yields each block of ``blocksize`` bytes in the input file.
+
+ :param infile: file to read and separate in blocks.
+ :returns: a generator that yields the contents of each block
+ '''
+
+ while True:
+ block = infile.read(blocksize)
+
+ read_bytes = len(block)
+ if read_bytes == 0:
+ break
+
+ yield block
+
+ if read_bytes < blocksize:
+ break
+
diff --git a/third_party/python/rsa/run_tests.py b/third_party/python/rsa/run_tests.py
new file mode 100644
index 0000000000..e0f249081f
--- /dev/null
+++ b/third_party/python/rsa/run_tests.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+import os
+import sys
+import unittest2 as unittest
+
+current_path = os.path.abspath(os.path.dirname(__file__))
+tests_path = os.path.join(current_path, 'tests')
+sys.path[0:0] = [
+ current_path,
+ tests_path,
+]
+
+all_tests = [f[:-3] for f in os.listdir(tests_path)
+ if f.startswith('test_') and f.endswith(".py")]
+
+def get_suite(tests):
+ tests = sorted(tests)
+ suite = unittest.TestSuite()
+ loader = unittest.TestLoader()
+ for test in tests:
+ suite.addTest(loader.loadTestsFromName(test))
+ return suite
+
+if __name__ == '__main__':
+ """
+ To run all tests:
+ $ python run_tests.py
+ To run a single test:
+ $ python run_tests.py app
+ To run a couple of tests:
+ $ python run_tests.py app config sessions
+ To run code coverage:
+ $ coverage run run_tests.py
+ $ coverage report -m
+ """
+ tests = sys.argv[1:]
+ if not tests:
+ tests = all_tests
+ tests = ['%s' % t for t in tests]
+ suite = get_suite(tests)
+ unittest.TextTestRunner(verbosity=1).run(suite)
diff --git a/third_party/python/rsa/setup.cfg b/third_party/python/rsa/setup.cfg
new file mode 100644
index 0000000000..2675c2767c
--- /dev/null
+++ b/third_party/python/rsa/setup.cfg
@@ -0,0 +1,8 @@
+[nosetests]
+verbosity = 2
+
+[egg_info]
+tag_date = 0
+tag_build =
+tag_svn_revision = 0
+
diff --git a/third_party/python/rsa/setup.py b/third_party/python/rsa/setup.py
new file mode 100755
index 0000000000..8a2df8d1f8
--- /dev/null
+++ b/third_party/python/rsa/setup.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+
+from setuptools import setup
+
+import rsa
+
+setup(name='rsa',
+ version=rsa.__version__,
+ description='Pure-Python RSA implementation',
+ author='Sybren A. Stuvel',
+ author_email='sybren@stuvel.eu',
+ maintainer='Sybren A. Stuvel',
+ maintainer_email='sybren@stuvel.eu',
+ url='http://stuvel.eu/rsa',
+ packages=['rsa'],
+ license='ASL 2',
+ classifiers=[
+ 'Development Status :: 5 - Production/Stable',
+ 'Intended Audience :: Developers',
+ 'Intended Audience :: Education',
+ 'Intended Audience :: Information Technology',
+ 'License :: OSI Approved :: Apache Software License',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Python',
+ 'Topic :: Security :: Cryptography',
+ ],
+ install_requires=[
+ 'pyasn1 >= 0.1.3',
+ ],
+ entry_points={ 'console_scripts': [
+ 'pyrsa-priv2pub = rsa.util:private_to_public',
+ 'pyrsa-keygen = rsa.cli:keygen',
+ 'pyrsa-encrypt = rsa.cli:encrypt',
+ 'pyrsa-decrypt = rsa.cli:decrypt',
+ 'pyrsa-sign = rsa.cli:sign',
+ 'pyrsa-verify = rsa.cli:verify',
+ 'pyrsa-encrypt-bigfile = rsa.cli:encrypt_bigfile',
+ 'pyrsa-decrypt-bigfile = rsa.cli:decrypt_bigfile',
+ ]},
+
+)
diff --git a/third_party/python/sentry_sdk/sentry_sdk-0.14.3.dist-info/LICENSE b/third_party/python/sentry_sdk/sentry_sdk-0.14.3.dist-info/LICENSE
new file mode 100644
index 0000000000..61555f192e
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk-0.14.3.dist-info/LICENSE
@@ -0,0 +1,9 @@
+Copyright (c) 2018 Sentry (https://sentry.io) and individual contributors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/third_party/python/sentry_sdk/sentry_sdk-0.14.3.dist-info/METADATA b/third_party/python/sentry_sdk/sentry_sdk-0.14.3.dist-info/METADATA
new file mode 100644
index 0000000000..5fef4faa5b
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk-0.14.3.dist-info/METADATA
@@ -0,0 +1,60 @@
+Metadata-Version: 2.1
+Name: sentry-sdk
+Version: 0.14.3
+Summary: Python client for Sentry (https://getsentry.com)
+Home-page: https://github.com/getsentry/sentry-python
+Author: Sentry Team and Contributors
+Author-email: hello@getsentry.com
+License: BSD
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Requires-Dist: urllib3 (>=1.10.0)
+Requires-Dist: certifi
+Provides-Extra: aiohttp
+Requires-Dist: aiohttp (>=3.5) ; extra == 'aiohttp'
+Provides-Extra: beam
+Requires-Dist: beam (>=2.12) ; extra == 'beam'
+Provides-Extra: bottle
+Requires-Dist: bottle (>=0.12.13) ; extra == 'bottle'
+Provides-Extra: celery
+Requires-Dist: celery (>=3) ; extra == 'celery'
+Provides-Extra: django
+Requires-Dist: django (>=1.8) ; extra == 'django'
+Provides-Extra: falcon
+Requires-Dist: falcon (>=1.4) ; extra == 'falcon'
+Provides-Extra: flask
+Requires-Dist: flask (>=0.11) ; extra == 'flask'
+Requires-Dist: blinker (>=1.1) ; extra == 'flask'
+Provides-Extra: pyspark
+Requires-Dist: pyspark (>=2.4.4) ; extra == 'pyspark'
+Provides-Extra: rq
+Requires-Dist: 0.6 ; extra == 'rq'
+Provides-Extra: sanic
+Requires-Dist: sanic (>=0.8) ; extra == 'sanic'
+Provides-Extra: sqlalchemy
+Requires-Dist: sqlalchemy (>=1.2) ; extra == 'sqlalchemy'
+Provides-Extra: tornado
+Requires-Dist: tornado (>=5) ; extra == 'tornado'
+
+
+Sentry-Python - Sentry SDK for Python
+=====================================
+
+**Sentry-Python is an SDK for Sentry.** Check out `GitHub
+<https://github.com/getsentry/sentry-python>`_ to find out more.
+
+
diff --git a/third_party/python/sentry_sdk/sentry_sdk-0.14.3.dist-info/RECORD b/third_party/python/sentry_sdk/sentry_sdk-0.14.3.dist-info/RECORD
new file mode 100644
index 0000000000..14cfd725cb
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk-0.14.3.dist-info/RECORD
@@ -0,0 +1,58 @@
+sentry_sdk/__init__.py,sha256=k1HZ_Malhx3a5bIh4pAl4Tvt_hAopZhyR5lcRR0enX0,591
+sentry_sdk/_compat.py,sha256=f3Tadrt6580oSkwwriJrggZR6oeNsdN2OBCOGuSasj8,2422
+sentry_sdk/_types.py,sha256=1WMOPaU3zhM-W_ejMduB3yM-qNeb4fTKdmBY3L2QhXY,1080
+sentry_sdk/api.py,sha256=f6EJa-Zdr_Oehq5JgMqVcXeY0rU0scTiMMp2c3gSJN4,5528
+sentry_sdk/client.py,sha256=mD57zGxONY_DfFFksae5dJEEMnS11UnSgcfv9pE5gC4,13157
+sentry_sdk/consts.py,sha256=WS4SVRzlEoF0H3BKhxgAEPa8kT2mFPFdr6IqDznS6cQ,3487
+sentry_sdk/debug.py,sha256=ZT-7VoCIA5TYhh7X-ZSaSzYZ4_MuoNvzi12gKgIKzm0,1132
+sentry_sdk/envelope.py,sha256=cOSNXA2r0Q2lGMY-UpSP-jBh-cvSDDlMe12vy5DUn3w,8221
+sentry_sdk/hub.py,sha256=tbbZaSexKp44Sx6cxJ7uV8YwDkm2KAObZIIgu9UPfRQ,19459
+sentry_sdk/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+sentry_sdk/scope.py,sha256=ZQshHgonA0lTgRzUsV0eSqqPdOMBD9OHEJZNs5Dsf-8,12509
+sentry_sdk/serializer.py,sha256=Nn9O5fZo7QquXae_pxoaOIbT3_iQP30GrWuUV0ykywI,10507
+sentry_sdk/sessions.py,sha256=5g-pUW3LqcEHL3gvnPk5lZvPQmP6B9Vu2KYYrxjbDls,7732
+sentry_sdk/tracing.py,sha256=55qCUgIesvTTAzl7dL9BFDDOcY25FgsUqm6bHvVMOP4,15210
+sentry_sdk/transport.py,sha256=qXPsbb4OAWLpAhSCImoILnKUulMVoHjgFWuiEJGXbbA,11002
+sentry_sdk/utils.py,sha256=TG89z9WzBqPu0039YnxgY6FcRRr8l5K0_R3X4hiInjc,22953
+sentry_sdk/worker.py,sha256=nlcd5mlawU3AOQLOYIgTtQVceX623p3YMqv2ot0czZw,4560
+sentry_sdk/integrations/__init__.py,sha256=pD6ksI4OXAH0NeXbqAc_vhvVJpyPR-ycGzqOLQSbiy4,6464
+sentry_sdk/integrations/_wsgi_common.py,sha256=J8jlafU5yhQu2xjKuK_3-Bt9HbqbHIP5ZyWebkfSJ_k,4763
+sentry_sdk/integrations/aiohttp.py,sha256=__SUTMu1k2eIT7M7lefyCXNxUMeSAoQYKTV0OtgB6YM,7076
+sentry_sdk/integrations/argv.py,sha256=X-RVfWNxuPObsOmuommDc4FcIhNSTKpGsD1cYbrqza4,945
+sentry_sdk/integrations/asgi.py,sha256=BvLrqtzyaM1UIXidSjkpZEWDGYlrplqOvXc52gX5Zhc,6821
+sentry_sdk/integrations/atexit.py,sha256=b75c2SBJPl_cS4ObiQT1SMKqeZG5Fqed61z2myHRKug,1837
+sentry_sdk/integrations/aws_lambda.py,sha256=NCop82UqSLetsvoIf0THuFPXSdWua-y4ZToG-ysqGSM,8890
+sentry_sdk/integrations/beam.py,sha256=3wSJv4SuIBsVFtoc2J1oW2JYZnq-biMXEL-7Qn85rOQ,5650
+sentry_sdk/integrations/bottle.py,sha256=KwvGziHwdxzv_FeT1UNRMAmtDEWiT_0ssT5QyzFQG2M,6188
+sentry_sdk/integrations/celery.py,sha256=86NG-gplsol8gR-8c5Cr43QT_CvLC_9sbD8efRuuHbg,8394
+sentry_sdk/integrations/dedupe.py,sha256=d3JaHlMJpeF9zqVmITSPcLPBEvr9aHRzIrlGyzqeNgs,1166
+sentry_sdk/integrations/excepthook.py,sha256=ho96OGOzBdTZDxNOV4VQXfHv_MD5hsZ_8ww-5GNrRDI,2182
+sentry_sdk/integrations/falcon.py,sha256=S15UIm84t1cHvzj_3gxEbgmkEy5sXex9p-L8Sc1UwSQ,6797
+sentry_sdk/integrations/flask.py,sha256=U5-23SYrEbiNB80TSX96lry_qzpa-Nyr675cWLSSfWQ,8168
+sentry_sdk/integrations/gnu_backtrace.py,sha256=VJU3zYY7GUybAgIOEGF_P7i4V2R_jOnlSgTCS7XNto0,2912
+sentry_sdk/integrations/logging.py,sha256=kqWZmR711fCc2k--eULHXbi0khJY9K4pcsYvSgu-Zs8,6922
+sentry_sdk/integrations/modules.py,sha256=tgl4abSudtR03NBOjXCWJ08dHY5KlxUveX3mPUNosYk,1393
+sentry_sdk/integrations/pyramid.py,sha256=4VPOY1AZAjGfrRiM2KAZHrevwC4Uy6gBQ2oH9sF5XyU,7006
+sentry_sdk/integrations/redis.py,sha256=HhXrJ8tOrsd-8pqXeYjtAepF31diRgl3bqJ4GnPiluI,1941
+sentry_sdk/integrations/rq.py,sha256=mT6iE4JcuwBL0cMeInMQMSaA0QzJ7JgN0dpDpiw4SCM,4714
+sentry_sdk/integrations/sanic.py,sha256=n_y49BpScw6XCMg1bRSJDrNnLcBIklAnFT9xibmGEQY,7646
+sentry_sdk/integrations/serverless.py,sha256=d97Z1cBXdRFdXxzYsLNtRebATfExIWZ1oxMT8_xTf4Q,1993
+sentry_sdk/integrations/sqlalchemy.py,sha256=jLVpxLSol0_4goqLVb9_z1awTpwdtmG_1IXbpqC1lkg,2525
+sentry_sdk/integrations/stdlib.py,sha256=bYoNEOP_xmKgR-n_SmBLNAHhwC41y8l96go5aQX3gss,7348
+sentry_sdk/integrations/threading.py,sha256=TN5cmoLfRIaayFFWoN9L0VdXunB23iTcUjUA6V9GSrE,2856
+sentry_sdk/integrations/tornado.py,sha256=4V32cl0cw0cpUjCyVUFKtW3kD0iN9VibW4IAHTJYhnM,6910
+sentry_sdk/integrations/trytond.py,sha256=cLpQ5CZrG1Wn5Cq3_Xosswu5Jt43KEV-ag_zrvcXwqo,1728
+sentry_sdk/integrations/wsgi.py,sha256=UPUJiEYM1eu1-zSNNZFSPLenkGYYS022lCLM36CQgCs,10232
+sentry_sdk/integrations/django/__init__.py,sha256=R0zTG2qyFyqal9Azb_DkMfcOW_gjF9PLWPz9vm0KKqM,15893
+sentry_sdk/integrations/django/asgi.py,sha256=-OPPl8WjFjXPgfdTmMuIZv1EqFSk0BRKLfkgmmPAwPA,1467
+sentry_sdk/integrations/django/middleware.py,sha256=bM-J_4ur2qilpHzAlrWtlUsakR-ZPH-wvKw931fnuX4,4419
+sentry_sdk/integrations/django/templates.py,sha256=Knq4W6NyfBbFGCLQqpB6mBCze2ZQJKmS4Up5Gvy47VU,3398
+sentry_sdk/integrations/django/transactions.py,sha256=1W-9xuryfy7ztqI_PLrSTAOWO0holUPyYuXYUh8ez2E,4094
+sentry_sdk/integrations/spark/__init__.py,sha256=oOewMErnZk2rzNvIlZO6URxQexu9bUJuSLM2m_zECy8,208
+sentry_sdk/integrations/spark/spark_driver.py,sha256=CMyEe6_Qf8E9OSz3bcCumsOgO8eJ4egKOrazOYPcvX4,8465
+sentry_sdk/integrations/spark/spark_worker.py,sha256=if_Pqkaxm-SKghaUETCLhL7Vrxk2HKG7A3mwocAHzas,3884
+sentry_sdk-0.14.3.dist-info/LICENSE,sha256=WUBNTIVOV5CX1Bv8zVAGr96dbXDmRs9VB0zb_q1ezxw,1330
+sentry_sdk-0.14.3.dist-info/METADATA,sha256=qTLzHA_baC0M0ImoObls68NW_BzxKhx63tzeKnBAW8E,2203
+sentry_sdk-0.14.3.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
+sentry_sdk-0.14.3.dist-info/top_level.txt,sha256=XrQz30XE9FKXSY_yGLrd9bsv2Rk390GTDJOSujYaMxI,11
+sentry_sdk-0.14.3.dist-info/RECORD,,
diff --git a/third_party/python/sentry_sdk/sentry_sdk-0.14.3.dist-info/WHEEL b/third_party/python/sentry_sdk/sentry_sdk-0.14.3.dist-info/WHEEL
new file mode 100644
index 0000000000..ef99c6cf32
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk-0.14.3.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.34.2)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/sentry_sdk/sentry_sdk-0.14.3.dist-info/top_level.txt b/third_party/python/sentry_sdk/sentry_sdk-0.14.3.dist-info/top_level.txt
new file mode 100644
index 0000000000..5051901ecb
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk-0.14.3.dist-info/top_level.txt
@@ -0,0 +1 @@
+sentry_sdk
diff --git a/third_party/python/sentry_sdk/sentry_sdk/__init__.py b/third_party/python/sentry_sdk/sentry_sdk/__init__.py
new file mode 100644
index 0000000000..b211a6c754
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/__init__.py
@@ -0,0 +1,25 @@
+from sentry_sdk.hub import Hub, init
+from sentry_sdk.scope import Scope
+from sentry_sdk.transport import Transport, HttpTransport
+from sentry_sdk.client import Client
+
+from sentry_sdk.api import * # noqa
+from sentry_sdk.api import __all__ as api_all
+
+from sentry_sdk.consts import VERSION # noqa
+
+__all__ = api_all + [ # noqa
+ "Hub",
+ "Scope",
+ "Client",
+ "Transport",
+ "HttpTransport",
+ "init",
+ "integrations",
+]
+
+# Initialize the debug support after everything is loaded
+from sentry_sdk.debug import init_debug_support
+
+init_debug_support()
+del init_debug_support
diff --git a/third_party/python/sentry_sdk/sentry_sdk/_compat.py b/third_party/python/sentry_sdk/sentry_sdk/_compat.py
new file mode 100644
index 0000000000..4db5f44c33
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/_compat.py
@@ -0,0 +1,92 @@
+import sys
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Optional
+ from typing import Tuple
+ from typing import Any
+ from typing import Type
+
+ from typing import TypeVar
+
+ T = TypeVar("T")
+
+
+PY2 = sys.version_info[0] == 2
+
+if PY2:
+ import urlparse # noqa
+
+ text_type = unicode # noqa
+ import Queue as queue # noqa
+
+ string_types = (str, text_type)
+ number_types = (int, long, float) # noqa
+ int_types = (int, long) # noqa
+ iteritems = lambda x: x.iteritems() # noqa: B301
+
+ def implements_str(cls):
+ # type: (T) -> T
+ cls.__unicode__ = cls.__str__
+ cls.__str__ = lambda x: unicode(x).encode("utf-8") # noqa
+ return cls
+
+ exec("def reraise(tp, value, tb=None):\n raise tp, value, tb")
+
+
+else:
+ import urllib.parse as urlparse # noqa
+ import queue # noqa
+
+ text_type = str
+ string_types = (text_type,) # type: Tuple[type]
+ number_types = (int, float) # type: Tuple[type, type]
+ int_types = (int,) # noqa
+ iteritems = lambda x: x.items()
+
+ def implements_str(x):
+ # type: (T) -> T
+ return x
+
+ def reraise(tp, value, tb=None):
+ # type: (Optional[Type[BaseException]], Optional[BaseException], Optional[Any]) -> None
+ assert value is not None
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+
+
+def with_metaclass(meta, *bases):
+ # type: (Any, *Any) -> Any
+ class MetaClass(type):
+ def __new__(metacls, name, this_bases, d):
+ # type: (Any, Any, Any, Any) -> Any
+ return meta(name, bases, d)
+
+ return type.__new__(MetaClass, "temporary_class", (), {})
+
+
+def check_thread_support():
+ # type: () -> None
+ try:
+ from uwsgi import opt # type: ignore
+ except ImportError:
+ return
+
+ # When `threads` is passed in as a uwsgi option,
+ # `enable-threads` is implied on.
+ if "threads" in opt:
+ return
+
+ if str(opt.get("enable-threads", "0")).lower() in ("false", "off", "no", "0"):
+ from warnings import warn
+
+ warn(
+ Warning(
+ "We detected the use of uwsgi with disabled threads. "
+ "This will cause issues with the transport you are "
+ "trying to use. Please enable threading for uwsgi. "
+ '(Enable the "enable-threads" flag).'
+ )
+ )
diff --git a/third_party/python/sentry_sdk/sentry_sdk/_types.py b/third_party/python/sentry_sdk/sentry_sdk/_types.py
new file mode 100644
index 0000000000..74020aea57
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/_types.py
@@ -0,0 +1,37 @@
+try:
+ from typing import TYPE_CHECKING as MYPY
+except ImportError:
+ MYPY = False
+
+
+if MYPY:
+ from types import TracebackType
+ from typing import Any
+ from typing import Callable
+ from typing import Dict
+ from typing import Optional
+ from typing import Tuple
+ from typing import Type
+ from typing_extensions import Literal
+
+ ExcInfo = Tuple[
+ Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]
+ ]
+
+ Event = Dict[str, Any]
+ Hint = Dict[str, Any]
+
+ Breadcrumb = Dict[str, Any]
+ BreadcrumbHint = Dict[str, Any]
+
+ EventProcessor = Callable[[Event, Hint], Optional[Event]]
+ ErrorProcessor = Callable[[Event, ExcInfo], Optional[Event]]
+ BreadcrumbProcessor = Callable[[Breadcrumb, BreadcrumbHint], Optional[Breadcrumb]]
+
+ # https://github.com/python/mypy/issues/5710
+ NotImplementedType = Any
+
+ EventDataCategory = Literal[
+ "default", "error", "crash", "transaction", "security", "attachment", "session"
+ ]
+ SessionStatus = Literal["ok", "exited", "crashed", "abnormal"]
diff --git a/third_party/python/sentry_sdk/sentry_sdk/api.py b/third_party/python/sentry_sdk/sentry_sdk/api.py
new file mode 100644
index 0000000000..0f1cdfc741
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/api.py
@@ -0,0 +1,256 @@
+import inspect
+from contextlib import contextmanager
+
+from sentry_sdk.hub import Hub
+from sentry_sdk.scope import Scope
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Any
+ from typing import Dict
+ from typing import Optional
+ from typing import overload
+ from typing import Callable
+ from typing import TypeVar
+ from typing import ContextManager
+
+ from sentry_sdk._types import Event, Hint, Breadcrumb, BreadcrumbHint
+ from sentry_sdk.tracing import Span
+
+ T = TypeVar("T")
+ F = TypeVar("F", bound=Callable[..., Any])
+else:
+
+ def overload(x):
+ # type: (T) -> T
+ return x
+
+
+__all__ = [
+ "capture_event",
+ "capture_message",
+ "capture_exception",
+ "add_breadcrumb",
+ "configure_scope",
+ "push_scope",
+ "flush",
+ "last_event_id",
+ "start_span",
+ "set_tag",
+ "set_context",
+ "set_extra",
+ "set_user",
+ "set_level",
+]
+
+
+def hubmethod(f):
+ # type: (F) -> F
+ f.__doc__ = "%s\n\n%s" % (
+ "Alias for :py:meth:`sentry_sdk.Hub.%s`" % f.__name__,
+ inspect.getdoc(getattr(Hub, f.__name__)),
+ )
+ return f
+
+
+def scopemethod(f):
+ # type: (F) -> F
+ f.__doc__ = "%s\n\n%s" % (
+ "Alias for :py:meth:`sentry_sdk.Scope.%s`" % f.__name__,
+ inspect.getdoc(getattr(Scope, f.__name__)),
+ )
+ return f
+
+
+@hubmethod
+def capture_event(
+ event, # type: Event
+ hint=None, # type: Optional[Hint]
+ scope=None, # type: Optional[Any]
+ **scope_args # type: Dict[str, Any]
+):
+ # type: (...) -> Optional[str]
+ hub = Hub.current
+ if hub is not None:
+ return hub.capture_event(event, hint, scope=scope, **scope_args)
+ return None
+
+
+@hubmethod
+def capture_message(
+ message, # type: str
+ level=None, # type: Optional[str]
+ scope=None, # type: Optional[Any]
+ **scope_args # type: Dict[str, Any]
+):
+ # type: (...) -> Optional[str]
+ hub = Hub.current
+ if hub is not None:
+ return hub.capture_message(message, level, scope=scope, **scope_args)
+ return None
+
+
+@hubmethod
+def capture_exception(
+ error=None, # type: Optional[BaseException]
+ scope=None, # type: Optional[Any]
+ **scope_args # type: Dict[str, Any]
+):
+ # type: (...) -> Optional[str]
+ hub = Hub.current
+ if hub is not None:
+ return hub.capture_exception(error, scope=scope, **scope_args)
+ return None
+
+
+@hubmethod
+def add_breadcrumb(
+ crumb=None, # type: Optional[Breadcrumb]
+ hint=None, # type: Optional[BreadcrumbHint]
+ **kwargs # type: Any
+):
+ # type: (...) -> None
+ hub = Hub.current
+ if hub is not None:
+ return hub.add_breadcrumb(crumb, hint, **kwargs)
+
+
+@overload # noqa
+def configure_scope():
+ # type: () -> ContextManager[Scope]
+ pass
+
+
+@overload # noqa
+def configure_scope(
+ callback, # type: Callable[[Scope], None]
+):
+ # type: (...) -> None
+ pass
+
+
+@hubmethod # noqa
+def configure_scope(
+ callback=None, # type: Optional[Callable[[Scope], None]]
+):
+ # type: (...) -> Optional[ContextManager[Scope]]
+ hub = Hub.current
+ if hub is not None:
+ return hub.configure_scope(callback)
+ elif callback is None:
+
+ @contextmanager
+ def inner():
+ yield Scope()
+
+ return inner()
+ else:
+ # returned if user provided callback
+ return None
+
+
+@overload # noqa
+def push_scope():
+ # type: () -> ContextManager[Scope]
+ pass
+
+
+@overload # noqa
+def push_scope(
+ callback, # type: Callable[[Scope], None]
+):
+ # type: (...) -> None
+ pass
+
+
+@hubmethod # noqa
+def push_scope(
+ callback=None, # type: Optional[Callable[[Scope], None]]
+):
+ # type: (...) -> Optional[ContextManager[Scope]]
+ hub = Hub.current
+ if hub is not None:
+ return hub.push_scope(callback)
+ elif callback is None:
+
+ @contextmanager
+ def inner():
+ yield Scope()
+
+ return inner()
+ else:
+ # returned if user provided callback
+ return None
+
+
+@scopemethod # noqa
+def set_tag(key, value):
+ # type: (str, Any) -> None
+ hub = Hub.current
+ if hub is not None:
+ hub.scope.set_tag(key, value)
+
+
+@scopemethod # noqa
+def set_context(key, value):
+ # type: (str, Any) -> None
+ hub = Hub.current
+ if hub is not None:
+ hub.scope.set_context(key, value)
+
+
+@scopemethod # noqa
+def set_extra(key, value):
+ # type: (str, Any) -> None
+ hub = Hub.current
+ if hub is not None:
+ hub.scope.set_extra(key, value)
+
+
+@scopemethod # noqa
+def set_user(value):
+ # type: (Dict[str, Any]) -> None
+ hub = Hub.current
+ if hub is not None:
+ hub.scope.set_user(value)
+
+
+@scopemethod # noqa
+def set_level(value):
+ # type: (str) -> None
+ hub = Hub.current
+ if hub is not None:
+ hub.scope.set_level(value)
+
+
+@hubmethod
+def flush(
+ timeout=None, # type: Optional[float]
+ callback=None, # type: Optional[Callable[[int, float], None]]
+):
+ # type: (...) -> None
+ hub = Hub.current
+ if hub is not None:
+ return hub.flush(timeout=timeout, callback=callback)
+
+
+@hubmethod
+def last_event_id():
+ # type: () -> Optional[str]
+ hub = Hub.current
+ if hub is not None:
+ return hub.last_event_id()
+ return None
+
+
+@hubmethod
+def start_span(
+ span=None, # type: Optional[Span]
+ **kwargs # type: Any
+):
+ # type: (...) -> Span
+
+ # TODO: All other functions in this module check for
+ # `Hub.current is None`. That actually should never happen?
+ return Hub.current.start_span(span=span, **kwargs)
diff --git a/third_party/python/sentry_sdk/sentry_sdk/client.py b/third_party/python/sentry_sdk/sentry_sdk/client.py
new file mode 100644
index 0000000000..c0fb8422d8
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/client.py
@@ -0,0 +1,406 @@
+import os
+import uuid
+import random
+from datetime import datetime
+import socket
+
+from sentry_sdk._compat import string_types, text_type, iteritems
+from sentry_sdk.utils import (
+ handle_in_app,
+ get_type_name,
+ capture_internal_exceptions,
+ current_stacktrace,
+ disable_capture_event,
+ logger,
+)
+from sentry_sdk.serializer import serialize
+from sentry_sdk.transport import make_transport
+from sentry_sdk.consts import DEFAULT_OPTIONS, SDK_INFO, ClientConstructor
+from sentry_sdk.integrations import setup_integrations
+from sentry_sdk.utils import ContextVar
+from sentry_sdk.sessions import SessionFlusher
+from sentry_sdk.envelope import Envelope
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Any
+ from typing import Callable
+ from typing import Dict
+ from typing import List
+ from typing import Optional
+
+ from sentry_sdk.scope import Scope
+ from sentry_sdk._types import Event, Hint
+ from sentry_sdk.sessions import Session
+
+
+_client_init_debug = ContextVar("client_init_debug")
+
+
+def _get_options(*args, **kwargs):
+ # type: (*Optional[str], **Any) -> Dict[str, Any]
+ if args and (isinstance(args[0], (text_type, bytes, str)) or args[0] is None):
+ dsn = args[0] # type: Optional[str]
+ args = args[1:]
+ else:
+ dsn = None
+
+ rv = dict(DEFAULT_OPTIONS)
+ options = dict(*args, **kwargs)
+ if dsn is not None and options.get("dsn") is None:
+ options["dsn"] = dsn
+
+ for key, value in iteritems(options):
+ if key not in rv:
+ raise TypeError("Unknown option %r" % (key,))
+ rv[key] = value
+
+ if rv["dsn"] is None:
+ rv["dsn"] = os.environ.get("SENTRY_DSN")
+
+ if rv["release"] is None:
+ rv["release"] = os.environ.get("SENTRY_RELEASE")
+
+ if rv["environment"] is None:
+ rv["environment"] = os.environ.get("SENTRY_ENVIRONMENT")
+
+ if rv["server_name"] is None and hasattr(socket, "gethostname"):
+ rv["server_name"] = socket.gethostname()
+
+ return rv
+
+
+class _Client(object):
+ """The client is internally responsible for capturing the events and
+ forwarding them to sentry through the configured transport. It takes
+ the client options as keyword arguments and optionally the DSN as first
+ argument.
+ """
+
+ def __init__(self, *args, **kwargs):
+ # type: (*Any, **Any) -> None
+ self.options = get_options(*args, **kwargs) # type: Dict[str, Any]
+ self._init_impl()
+
+ def __getstate__(self):
+ # type: () -> Any
+ return {"options": self.options}
+
+ def __setstate__(self, state):
+ # type: (Any) -> None
+ self.options = state["options"]
+ self._init_impl()
+
+ def _init_impl(self):
+ # type: () -> None
+ old_debug = _client_init_debug.get(False)
+
+ def _send_sessions(sessions):
+ # type: (List[Any]) -> None
+ transport = self.transport
+ if sessions and transport:
+ envelope = Envelope()
+ for session in sessions:
+ envelope.add_session(session)
+ transport.capture_envelope(envelope)
+
+ try:
+ _client_init_debug.set(self.options["debug"])
+ self.transport = make_transport(self.options)
+ self.session_flusher = SessionFlusher(flush_func=_send_sessions)
+
+ request_bodies = ("always", "never", "small", "medium")
+ if self.options["request_bodies"] not in request_bodies:
+ raise ValueError(
+ "Invalid value for request_bodies. Must be one of {}".format(
+ request_bodies
+ )
+ )
+
+ self.integrations = setup_integrations(
+ self.options["integrations"],
+ with_defaults=self.options["default_integrations"],
+ with_auto_enabling_integrations=self.options["_experiments"].get(
+ "auto_enabling_integrations", False
+ ),
+ )
+ finally:
+ _client_init_debug.set(old_debug)
+
+ @property
+ def dsn(self):
+ # type: () -> Optional[str]
+ """Returns the configured DSN as string."""
+ return self.options["dsn"]
+
+ def _prepare_event(
+ self,
+ event, # type: Event
+ hint, # type: Optional[Hint]
+ scope, # type: Optional[Scope]
+ ):
+ # type: (...) -> Optional[Event]
+
+ if event.get("timestamp") is None:
+ event["timestamp"] = datetime.utcnow()
+
+ hint = dict(hint or ()) # type: Hint
+
+ if scope is not None:
+ event_ = scope.apply_to_event(event, hint)
+ if event_ is None:
+ return None
+ event = event_
+
+ if (
+ self.options["attach_stacktrace"]
+ and "exception" not in event
+ and "stacktrace" not in event
+ and "threads" not in event
+ ):
+ with capture_internal_exceptions():
+ event["threads"] = {
+ "values": [
+ {
+ "stacktrace": current_stacktrace(
+ self.options["with_locals"]
+ ),
+ "crashed": False,
+ "current": True,
+ }
+ ]
+ }
+
+ for key in "release", "environment", "server_name", "dist":
+ if event.get(key) is None and self.options[key] is not None:
+ event[key] = text_type(self.options[key]).strip()
+ if event.get("sdk") is None:
+ sdk_info = dict(SDK_INFO)
+ sdk_info["integrations"] = sorted(self.integrations.keys())
+ event["sdk"] = sdk_info
+
+ if event.get("platform") is None:
+ event["platform"] = "python"
+
+ event = handle_in_app(
+ event, self.options["in_app_exclude"], self.options["in_app_include"]
+ )
+
+ # Postprocess the event here so that annotated types do
+ # generally not surface in before_send
+ if event is not None:
+ event = serialize(event)
+
+ before_send = self.options["before_send"]
+ if before_send is not None:
+ new_event = None
+ with capture_internal_exceptions():
+ new_event = before_send(event, hint or {})
+ if new_event is None:
+ logger.info("before send dropped event (%s)", event)
+ event = new_event # type: ignore
+
+ return event
+
+ def _is_ignored_error(self, event, hint):
+ # type: (Event, Hint) -> bool
+ exc_info = hint.get("exc_info")
+ if exc_info is None:
+ return False
+
+ type_name = get_type_name(exc_info[0])
+ full_name = "%s.%s" % (exc_info[0].__module__, type_name)
+
+ for errcls in self.options["ignore_errors"]:
+ # String types are matched against the type name in the
+ # exception only
+ if isinstance(errcls, string_types):
+ if errcls == full_name or errcls == type_name:
+ return True
+ else:
+ if issubclass(exc_info[0], errcls):
+ return True
+
+ return False
+
+ def _should_capture(
+ self,
+ event, # type: Event
+ hint, # type: Hint
+ scope=None, # type: Optional[Scope]
+ ):
+ # type: (...) -> bool
+ if scope is not None and not scope._should_capture:
+ return False
+
+ if (
+ self.options["sample_rate"] < 1.0
+ and random.random() >= self.options["sample_rate"]
+ ):
+ return False
+
+ if self._is_ignored_error(event, hint):
+ return False
+
+ return True
+
+ def _update_session_from_event(
+ self,
+ session, # type: Session
+ event, # type: Event
+ ):
+ # type: (...) -> None
+
+ crashed = False
+ errored = False
+ user_agent = None
+
+ # Figure out if this counts as an error and if we should mark the
+ # session as crashed.
+ level = event.get("level")
+ if level == "fatal":
+ crashed = True
+ if not crashed:
+ exceptions = (event.get("exception") or {}).get("values")
+ if exceptions:
+ errored = True
+ for error in exceptions:
+ mechanism = error.get("mechanism")
+ if mechanism and mechanism.get("handled") is False:
+ crashed = True
+ break
+
+ user = event.get("user")
+
+ if session.user_agent is None:
+ headers = (event.get("request") or {}).get("headers")
+ for (k, v) in iteritems(headers or {}):
+ if k.lower() == "user-agent":
+ user_agent = v
+ break
+
+ session.update(
+ status="crashed" if crashed else None,
+ user=user,
+ user_agent=user_agent,
+ errors=session.errors + (errored or crashed),
+ )
+
+ def capture_event(
+ self,
+ event, # type: Event
+ hint=None, # type: Optional[Hint]
+ scope=None, # type: Optional[Scope]
+ ):
+ # type: (...) -> Optional[str]
+ """Captures an event.
+
+ :param event: A ready-made event that can be directly sent to Sentry.
+
+ :param hint: Contains metadata about the event that can be read from `before_send`, such as the original exception object or a HTTP request object.
+
+ :returns: An event ID. May be `None` if there is no DSN set or of if the SDK decided to discard the event for other reasons. In such situations setting `debug=True` on `init()` may help.
+ """
+ if disable_capture_event.get(False):
+ return None
+
+ if self.transport is None:
+ return None
+ if hint is None:
+ hint = {}
+ event_id = event.get("event_id")
+ if event_id is None:
+ event["event_id"] = event_id = uuid.uuid4().hex
+ if not self._should_capture(event, hint, scope):
+ return None
+ event_opt = self._prepare_event(event, hint, scope)
+ if event_opt is None:
+ return None
+
+ # whenever we capture an event we also check if the session needs
+ # to be updated based on that information.
+ session = scope._session if scope else None
+ if session:
+ self._update_session_from_event(session, event)
+
+ self.transport.capture_event(event_opt)
+ return event_id
+
+ def capture_session(
+ self, session # type: Session
+ ):
+ # type: (...) -> None
+ if not session.release:
+ logger.info("Discarded session update because of missing release")
+ else:
+ self.session_flusher.add_session(session)
+
+ def close(
+ self,
+ timeout=None, # type: Optional[float]
+ callback=None, # type: Optional[Callable[[int, float], None]]
+ ):
+ # type: (...) -> None
+ """
+ Close the client and shut down the transport. Arguments have the same
+ semantics as :py:meth:`Client.flush`.
+ """
+ if self.transport is not None:
+ self.flush(timeout=timeout, callback=callback)
+ self.session_flusher.kill()
+ self.transport.kill()
+ self.transport = None
+
+ def flush(
+ self,
+ timeout=None, # type: Optional[float]
+ callback=None, # type: Optional[Callable[[int, float], None]]
+ ):
+ # type: (...) -> None
+ """
+ Wait for the current events to be sent.
+
+ :param timeout: Wait for at most `timeout` seconds. If no `timeout` is provided, the `shutdown_timeout` option value is used.
+
+ :param callback: Is invoked with the number of pending events and the configured timeout.
+ """
+ if self.transport is not None:
+ if timeout is None:
+ timeout = self.options["shutdown_timeout"]
+ self.session_flusher.flush()
+ self.transport.flush(timeout=timeout, callback=callback)
+
+ def __enter__(self):
+ # type: () -> _Client
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ # type: (Any, Any, Any) -> None
+ self.close()
+
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ # Make mypy, PyCharm and other static analyzers think `get_options` is a
+ # type to have nicer autocompletion for params.
+ #
+ # Use `ClientConstructor` to define the argument types of `init` and
+ # `Dict[str, Any]` to tell static analyzers about the return type.
+
+ class get_options(ClientConstructor, Dict[str, Any]): # noqa: N801
+ pass
+
+ class Client(ClientConstructor, _Client):
+ pass
+
+
+else:
+ # Alias `get_options` for actual usage. Go through the lambda indirection
+ # to throw PyCharm off of the weakly typed signature (it would otherwise
+ # discover both the weakly typed signature of `_init` and our faked `init`
+ # type).
+
+ get_options = (lambda: _get_options)()
+ Client = (lambda: _Client)()
diff --git a/third_party/python/sentry_sdk/sentry_sdk/consts.py b/third_party/python/sentry_sdk/sentry_sdk/consts.py
new file mode 100644
index 0000000000..2fe012e66d
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/consts.py
@@ -0,0 +1,97 @@
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Optional
+ from typing import Callable
+ from typing import Union
+ from typing import List
+ from typing import Type
+ from typing import Dict
+ from typing import Any
+ from typing import Sequence
+ from typing_extensions import TypedDict
+
+ from sentry_sdk.transport import Transport
+ from sentry_sdk.integrations import Integration
+
+ from sentry_sdk._types import Event, EventProcessor, BreadcrumbProcessor
+
+ # Experiments are feature flags to enable and disable certain unstable SDK
+ # functionality. Changing them from the defaults (`None`) in production
+ # code is highly discouraged. They are not subject to any stability
+ # guarantees such as the ones from semantic versioning.
+ Experiments = TypedDict(
+ "Experiments",
+ {
+ "max_spans": Optional[int],
+ "record_sql_params": Optional[bool],
+ "auto_enabling_integrations": Optional[bool],
+ "auto_session_tracking": Optional[bool],
+ },
+ total=False,
+ )
+
+
+# This type exists to trick mypy and PyCharm into thinking `init` and `Client`
+# take these arguments (even though they take opaque **kwargs)
+class ClientConstructor(object):
+ def __init__(
+ self,
+ dsn=None, # type: Optional[str]
+ with_locals=True, # type: bool
+ max_breadcrumbs=100, # type: int
+ release=None, # type: Optional[str]
+ environment=None, # type: Optional[str]
+ server_name=None, # type: Optional[str]
+ shutdown_timeout=2, # type: int
+ integrations=[], # type: Sequence[Integration] # noqa: B006
+ in_app_include=[], # type: List[str] # noqa: B006
+ in_app_exclude=[], # type: List[str] # noqa: B006
+ default_integrations=True, # type: bool
+ dist=None, # type: Optional[str]
+ transport=None, # type: Optional[Union[Transport, Type[Transport], Callable[[Event], None]]]
+ sample_rate=1.0, # type: float
+ send_default_pii=False, # type: bool
+ http_proxy=None, # type: Optional[str]
+ https_proxy=None, # type: Optional[str]
+ ignore_errors=[], # type: List[Union[type, str]] # noqa: B006
+ request_bodies="medium", # type: str
+ before_send=None, # type: Optional[EventProcessor]
+ before_breadcrumb=None, # type: Optional[BreadcrumbProcessor]
+ debug=False, # type: bool
+ attach_stacktrace=False, # type: bool
+ ca_certs=None, # type: Optional[str]
+ propagate_traces=True, # type: bool
+ # DO NOT ENABLE THIS RIGHT NOW UNLESS YOU WANT TO EXCEED YOUR EVENT QUOTA IMMEDIATELY
+ traces_sample_rate=0.0, # type: float
+ traceparent_v2=False, # type: bool
+ _experiments={}, # type: Experiments # noqa: B006
+ ):
+ # type: (...) -> None
+ pass
+
+
+def _get_default_options():
+ # type: () -> Dict[str, Any]
+ import inspect
+
+ if hasattr(inspect, "getfullargspec"):
+ getargspec = inspect.getfullargspec
+ else:
+ getargspec = inspect.getargspec # type: ignore
+
+ a = getargspec(ClientConstructor.__init__)
+ defaults = a.defaults or ()
+ return dict(zip(a.args[-len(defaults) :], defaults))
+
+
+DEFAULT_OPTIONS = _get_default_options()
+del _get_default_options
+
+
+VERSION = "0.14.3"
+SDK_INFO = {
+ "name": "sentry.python",
+ "version": VERSION,
+ "packages": [{"name": "pypi:sentry-sdk", "version": VERSION}],
+}
diff --git a/third_party/python/sentry_sdk/sentry_sdk/debug.py b/third_party/python/sentry_sdk/sentry_sdk/debug.py
new file mode 100644
index 0000000000..fe8ae50cea
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/debug.py
@@ -0,0 +1,44 @@
+import sys
+import logging
+
+from sentry_sdk import utils
+from sentry_sdk.hub import Hub
+from sentry_sdk.utils import logger
+from sentry_sdk.client import _client_init_debug
+from logging import LogRecord
+
+
+class _HubBasedClientFilter(logging.Filter):
+ def filter(self, record):
+ # type: (LogRecord) -> bool
+ if _client_init_debug.get(False):
+ return True
+ hub = Hub.current
+ if hub is not None and hub.client is not None:
+ return hub.client.options["debug"]
+ return False
+
+
+def init_debug_support():
+ # type: () -> None
+ if not logger.handlers:
+ configure_logger()
+ configure_debug_hub()
+
+
+def configure_logger():
+ # type: () -> None
+ _handler = logging.StreamHandler(sys.stderr)
+ _handler.setFormatter(logging.Formatter(" [sentry] %(levelname)s: %(message)s"))
+ logger.addHandler(_handler)
+ logger.setLevel(logging.DEBUG)
+ logger.addFilter(_HubBasedClientFilter())
+
+
+def configure_debug_hub():
+ # type: () -> None
+ def _get_debug_hub():
+ # type: () -> Hub
+ return Hub.current
+
+ utils._get_debug_hub = _get_debug_hub
diff --git a/third_party/python/sentry_sdk/sentry_sdk/envelope.py b/third_party/python/sentry_sdk/sentry_sdk/envelope.py
new file mode 100644
index 0000000000..fd08553249
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/envelope.py
@@ -0,0 +1,293 @@
+import io
+import json
+import shutil
+import mimetypes
+
+from sentry_sdk._compat import text_type
+from sentry_sdk._types import MYPY
+from sentry_sdk.sessions import Session
+
+if MYPY:
+ from typing import Any
+ from typing import Tuple
+ from typing import Optional
+ from typing import Union
+ from typing import Dict
+ from typing import List
+ from typing import Iterator
+
+ from sentry_sdk._types import Event, EventDataCategory
+
+
+def get_event_data_category(event):
+ # type: (Event) -> EventDataCategory
+ if event.get("type") == "transaction":
+ return "transaction"
+ return "error"
+
+
+class Envelope(object):
+ def __init__(
+ self,
+ headers=None, # type: Optional[Dict[str, str]]
+ items=None, # type: Optional[List[Item]]
+ ):
+ # type: (...) -> None
+ if headers is not None:
+ headers = dict(headers)
+ self.headers = headers or {}
+ if items is None:
+ items = []
+ else:
+ items = list(items)
+ self.items = items
+
+ @property
+ def description(self):
+ # type: (...) -> str
+ return "envelope with %s items (%s)" % (
+ len(self.items),
+ ", ".join(x.data_category for x in self.items),
+ )
+
+ def add_event(
+ self, event # type: Event
+ ):
+ # type: (...) -> None
+ self.add_item(Item(payload=PayloadRef(json=event), type="event"))
+
+ def add_session(
+ self, session # type: Union[Session, Any]
+ ):
+ # type: (...) -> None
+ if isinstance(session, Session):
+ session = session.to_json()
+ self.add_item(Item(payload=PayloadRef(json=session), type="session"))
+
+ def add_item(
+ self, item # type: Item
+ ):
+ # type: (...) -> None
+ self.items.append(item)
+
+ def get_event(self):
+ # type: (...) -> Optional[Event]
+ for items in self.items:
+ event = items.get_event()
+ if event is not None:
+ return event
+ return None
+
+ def __iter__(self):
+ # type: (...) -> Iterator[Item]
+ return iter(self.items)
+
+ def serialize_into(
+ self, f # type: Any
+ ):
+ # type: (...) -> None
+ f.write(json.dumps(self.headers).encode("utf-8"))
+ f.write(b"\n")
+ for item in self.items:
+ item.serialize_into(f)
+
+ def serialize(self):
+ # type: (...) -> bytes
+ out = io.BytesIO()
+ self.serialize_into(out)
+ return out.getvalue()
+
+ @classmethod
+ def deserialize_from(
+ cls, f # type: Any
+ ):
+ # type: (...) -> Envelope
+ headers = json.loads(f.readline())
+ items = []
+ while 1:
+ item = Item.deserialize_from(f)
+ if item is None:
+ break
+ items.append(item)
+ return cls(headers=headers, items=items)
+
+ @classmethod
+ def deserialize(
+ cls, bytes # type: bytes
+ ):
+ # type: (...) -> Envelope
+ return cls.deserialize_from(io.BytesIO(bytes))
+
+ def __repr__(self):
+ # type: (...) -> str
+ return "<Envelope headers=%r items=%r>" % (self.headers, self.items)
+
+
+class PayloadRef(object):
+ def __init__(
+ self,
+ bytes=None, # type: Optional[bytes]
+ path=None, # type: Optional[Union[bytes, text_type]]
+ json=None, # type: Optional[Any]
+ ):
+ # type: (...) -> None
+ self.json = json
+ self.bytes = bytes
+ self.path = path
+
+ def get_bytes(self):
+ # type: (...) -> bytes
+ if self.bytes is None:
+ if self.path is not None:
+ with open(self.path, "rb") as f:
+ self.bytes = f.read()
+ elif self.json is not None:
+ self.bytes = json.dumps(self.json).encode("utf-8")
+ else:
+ self.bytes = b""
+ return self.bytes
+
+ def _prepare_serialize(self):
+ # type: (...) -> Tuple[Any, Any]
+ if self.path is not None and self.bytes is None:
+ f = open(self.path, "rb")
+ f.seek(0, 2)
+ length = f.tell()
+ f.seek(0, 0)
+
+ def writer(out):
+ # type: (Any) -> None
+ try:
+ shutil.copyfileobj(f, out)
+ finally:
+ f.close()
+
+ return length, writer
+
+ bytes = self.get_bytes()
+ return len(bytes), lambda f: f.write(bytes)
+
+ @property
+ def inferred_content_type(self):
+ # type: (...) -> str
+ if self.json is not None:
+ return "application/json"
+ elif self.path is not None:
+ path = self.path
+ if isinstance(path, bytes):
+ path = path.decode("utf-8", "replace")
+ ty = mimetypes.guess_type(path)[0]
+ if ty:
+ return ty
+ return "application/octet-stream"
+
+ def __repr__(self):
+ # type: (...) -> str
+ return "<Payload %r>" % (self.inferred_content_type,)
+
+
+class Item(object):
+ def __init__(
+ self,
+ payload, # type: Union[bytes, text_type, PayloadRef]
+ headers=None, # type: Optional[Dict[str, str]]
+ type=None, # type: Optional[str]
+ content_type=None, # type: Optional[str]
+ filename=None, # type: Optional[str]
+ ):
+ if headers is not None:
+ headers = dict(headers)
+ elif headers is None:
+ headers = {}
+ self.headers = headers
+ if isinstance(payload, bytes):
+ payload = PayloadRef(bytes=payload)
+ elif isinstance(payload, text_type):
+ payload = PayloadRef(bytes=payload.encode("utf-8"))
+ else:
+ payload = payload
+
+ if filename is not None:
+ headers["filename"] = filename
+ if type is not None:
+ headers["type"] = type
+ if content_type is not None:
+ headers["content_type"] = content_type
+ elif "content_type" not in headers:
+ headers["content_type"] = payload.inferred_content_type
+
+ self.payload = payload
+
+ def __repr__(self):
+ # type: (...) -> str
+ return "<Item headers=%r payload=%r data_category=%r>" % (
+ self.headers,
+ self.payload,
+ self.data_category,
+ )
+
+ @property
+ def data_category(self):
+ # type: (...) -> EventDataCategory
+ rv = "default" # type: Any
+ event = self.get_event()
+ if event is not None:
+ rv = get_event_data_category(event)
+ else:
+ ty = self.headers.get("type")
+ if ty in ("session", "attachment"):
+ rv = ty
+ return rv
+
+ def get_bytes(self):
+ # type: (...) -> bytes
+ return self.payload.get_bytes()
+
+ def get_event(self):
+ # type: (...) -> Optional[Event]
+ if self.headers.get("type") == "event" and self.payload.json is not None:
+ return self.payload.json
+ return None
+
+ def serialize_into(
+ self, f # type: Any
+ ):
+ # type: (...) -> None
+ headers = dict(self.headers)
+ length, writer = self.payload._prepare_serialize()
+ headers["length"] = length
+ f.write(json.dumps(headers).encode("utf-8"))
+ f.write(b"\n")
+ writer(f)
+ f.write(b"\n")
+
+ def serialize(self):
+ # type: (...) -> bytes
+ out = io.BytesIO()
+ self.serialize_into(out)
+ return out.getvalue()
+
+ @classmethod
+ def deserialize_from(
+ cls, f # type: Any
+ ):
+ # type: (...) -> Optional[Item]
+ line = f.readline().rstrip()
+ if not line:
+ return None
+ headers = json.loads(line)
+ length = headers["length"]
+ payload = f.read(length)
+ if headers.get("type") == "event":
+ rv = cls(headers=headers, payload=PayloadRef(json=json.loads(payload)))
+ else:
+ rv = cls(headers=headers, payload=payload)
+ f.readline()
+ return rv
+
+ @classmethod
+ def deserialize(
+ cls, bytes # type: bytes
+ ):
+ # type: (...) -> Optional[Item]
+ return cls.deserialize_from(io.BytesIO(bytes))
diff --git a/third_party/python/sentry_sdk/sentry_sdk/hub.py b/third_party/python/sentry_sdk/sentry_sdk/hub.py
new file mode 100644
index 0000000000..f0060b9d79
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/hub.py
@@ -0,0 +1,647 @@
+import copy
+import random
+import sys
+
+from datetime import datetime
+from contextlib import contextmanager
+
+from sentry_sdk._compat import with_metaclass
+from sentry_sdk.scope import Scope
+from sentry_sdk.client import Client
+from sentry_sdk.tracing import Span
+from sentry_sdk.sessions import Session
+from sentry_sdk.utils import (
+ exc_info_from_error,
+ event_from_exception,
+ logger,
+ ContextVar,
+)
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Union
+ from typing import Any
+ from typing import Optional
+ from typing import Tuple
+ from typing import Dict
+ from typing import List
+ from typing import Callable
+ from typing import Generator
+ from typing import Type
+ from typing import TypeVar
+ from typing import overload
+ from typing import ContextManager
+
+ from sentry_sdk.integrations import Integration
+ from sentry_sdk._types import (
+ Event,
+ Hint,
+ Breadcrumb,
+ BreadcrumbHint,
+ ExcInfo,
+ )
+ from sentry_sdk.consts import ClientConstructor
+
+ T = TypeVar("T")
+
+else:
+
+ def overload(x):
+ # type: (T) -> T
+ return x
+
+
+_local = ContextVar("sentry_current_hub")
+
+
+def _update_scope(base, scope_change, scope_kwargs):
+ # type: (Scope, Optional[Any], Dict[str, Any]) -> Scope
+ if scope_change and scope_kwargs:
+ raise TypeError("cannot provide scope and kwargs")
+ if scope_change is not None:
+ final_scope = copy.copy(base)
+ if callable(scope_change):
+ scope_change(final_scope)
+ else:
+ final_scope.update_from_scope(scope_change)
+ elif scope_kwargs:
+ final_scope = copy.copy(base)
+ final_scope.update_from_kwargs(scope_kwargs)
+ else:
+ final_scope = base
+ return final_scope
+
+
+def _should_send_default_pii():
+ # type: () -> bool
+ client = Hub.current.client
+ if not client:
+ return False
+ return client.options["send_default_pii"]
+
+
+class _InitGuard(object):
+ def __init__(self, client):
+ # type: (Client) -> None
+ self._client = client
+
+ def __enter__(self):
+ # type: () -> _InitGuard
+ return self
+
+ def __exit__(self, exc_type, exc_value, tb):
+ # type: (Any, Any, Any) -> None
+ c = self._client
+ if c is not None:
+ c.close()
+
+
+def _init(*args, **kwargs):
+ # type: (*Optional[str], **Any) -> ContextManager[Any]
+ """Initializes the SDK and optionally integrations.
+
+ This takes the same arguments as the client constructor.
+ """
+ client = Client(*args, **kwargs) # type: ignore
+ Hub.current.bind_client(client)
+ rv = _InitGuard(client)
+ return rv
+
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ # Make mypy, PyCharm and other static analyzers think `init` is a type to
+ # have nicer autocompletion for params.
+ #
+ # Use `ClientConstructor` to define the argument types of `init` and
+ # `ContextManager[Any]` to tell static analyzers about the return type.
+
+ class init(ClientConstructor, ContextManager[Any]): # noqa: N801
+ pass
+
+
+else:
+ # Alias `init` for actual usage. Go through the lambda indirection to throw
+ # PyCharm off of the weakly typed signature (it would otherwise discover
+ # both the weakly typed signature of `_init` and our faked `init` type).
+
+ init = (lambda: _init)()
+
+
+class HubMeta(type):
+ @property
+ def current(cls):
+ # type: () -> Hub
+ """Returns the current instance of the hub."""
+ rv = _local.get(None)
+ if rv is None:
+ rv = Hub(GLOBAL_HUB)
+ _local.set(rv)
+ return rv
+
+ @property
+ def main(cls):
+ # type: () -> Hub
+ """Returns the main instance of the hub."""
+ return GLOBAL_HUB
+
+
+class _ScopeManager(object):
+ def __init__(self, hub):
+ # type: (Hub) -> None
+ self._hub = hub
+ self._original_len = len(hub._stack)
+ self._layer = hub._stack[-1]
+
+ def __enter__(self):
+ # type: () -> Scope
+ scope = self._layer[1]
+ assert scope is not None
+ return scope
+
+ def __exit__(self, exc_type, exc_value, tb):
+ # type: (Any, Any, Any) -> None
+ current_len = len(self._hub._stack)
+ if current_len < self._original_len:
+ logger.error(
+ "Scope popped too soon. Popped %s scopes too many.",
+ self._original_len - current_len,
+ )
+ return
+ elif current_len > self._original_len:
+ logger.warning(
+ "Leaked %s scopes: %s",
+ current_len - self._original_len,
+ self._hub._stack[self._original_len :],
+ )
+
+ layer = self._hub._stack[self._original_len - 1]
+ del self._hub._stack[self._original_len - 1 :]
+
+ if layer[1] != self._layer[1]:
+ logger.error(
+ "Wrong scope found. Meant to pop %s, but popped %s.",
+ layer[1],
+ self._layer[1],
+ )
+ elif layer[0] != self._layer[0]:
+ warning = (
+ "init() called inside of pushed scope. This might be entirely "
+ "legitimate but usually occurs when initializing the SDK inside "
+ "a request handler or task/job function. Try to initialize the "
+ "SDK as early as possible instead."
+ )
+ logger.warning(warning)
+
+
+class Hub(with_metaclass(HubMeta)): # type: ignore
+ """The hub wraps the concurrency management of the SDK. Each thread has
+ its own hub but the hub might transfer with the flow of execution if
+ context vars are available.
+
+ If the hub is used with a with statement it's temporarily activated.
+ """
+
+ _stack = None # type: List[Tuple[Optional[Client], Scope]]
+
+ # Mypy doesn't pick up on the metaclass.
+
+ if MYPY:
+ current = None # type: Hub
+ main = None # type: Hub
+
+ def __init__(
+ self,
+ client_or_hub=None, # type: Optional[Union[Hub, Client]]
+ scope=None, # type: Optional[Any]
+ ):
+ # type: (...) -> None
+ if isinstance(client_or_hub, Hub):
+ hub = client_or_hub
+ client, other_scope = hub._stack[-1]
+ if scope is None:
+ scope = copy.copy(other_scope)
+ else:
+ client = client_or_hub
+ if scope is None:
+ scope = Scope()
+
+ self._stack = [(client, scope)]
+ self._last_event_id = None # type: Optional[str]
+ self._old_hubs = [] # type: List[Hub]
+
+ def __enter__(self):
+ # type: () -> Hub
+ self._old_hubs.append(Hub.current)
+ _local.set(self)
+ return self
+
+ def __exit__(
+ self,
+ exc_type, # type: Optional[type]
+ exc_value, # type: Optional[BaseException]
+ tb, # type: Optional[Any]
+ ):
+ # type: (...) -> None
+ old = self._old_hubs.pop()
+ _local.set(old)
+
+ def run(
+ self, callback # type: Callable[[], T]
+ ):
+ # type: (...) -> T
+ """Runs a callback in the context of the hub. Alternatively the
+ with statement can be used on the hub directly.
+ """
+ with self:
+ return callback()
+
+ def get_integration(
+ self, name_or_class # type: Union[str, Type[Integration]]
+ ):
+ # type: (...) -> Any
+ """Returns the integration for this hub by name or class. If there
+ is no client bound or the client does not have that integration
+ then `None` is returned.
+
+ If the return value is not `None` the hub is guaranteed to have a
+ client attached.
+ """
+ if isinstance(name_or_class, str):
+ integration_name = name_or_class
+ elif name_or_class.identifier is not None:
+ integration_name = name_or_class.identifier
+ else:
+ raise ValueError("Integration has no name")
+
+ client = self._stack[-1][0]
+ if client is not None:
+ rv = client.integrations.get(integration_name)
+ if rv is not None:
+ return rv
+
+ @property
+ def client(self):
+ # type: () -> Optional[Client]
+ """Returns the current client on the hub."""
+ return self._stack[-1][0]
+
+ @property
+ def scope(self):
+ # type: () -> Scope
+ """Returns the current scope on the hub."""
+ return self._stack[-1][1]
+
+ def last_event_id(self):
+ # type: () -> Optional[str]
+ """Returns the last event ID."""
+ return self._last_event_id
+
+ def bind_client(
+ self, new # type: Optional[Client]
+ ):
+ # type: (...) -> None
+ """Binds a new client to the hub."""
+ top = self._stack[-1]
+ self._stack[-1] = (new, top[1])
+
+ def capture_event(
+ self,
+ event, # type: Event
+ hint=None, # type: Optional[Hint]
+ scope=None, # type: Optional[Any]
+ **scope_args # type: Dict[str, Any]
+ ):
+ # type: (...) -> Optional[str]
+ """Captures an event. Alias of :py:meth:`sentry_sdk.Client.capture_event`.
+ """
+ client, top_scope = self._stack[-1]
+ scope = _update_scope(top_scope, scope, scope_args)
+ if client is not None:
+ rv = client.capture_event(event, hint, scope)
+ if rv is not None:
+ self._last_event_id = rv
+ return rv
+ return None
+
+ def capture_message(
+ self,
+ message, # type: str
+ level=None, # type: Optional[str]
+ scope=None, # type: Optional[Any]
+ **scope_args # type: Dict[str, Any]
+ ):
+ # type: (...) -> Optional[str]
+ """Captures a message. The message is just a string. If no level
+ is provided the default level is `info`.
+
+ :returns: An `event_id` if the SDK decided to send the event (see :py:meth:`sentry_sdk.Client.capture_event`).
+ """
+ if self.client is None:
+ return None
+ if level is None:
+ level = "info"
+ return self.capture_event(
+ {"message": message, "level": level}, scope=scope, **scope_args
+ )
+
+ def capture_exception(
+ self,
+ error=None, # type: Optional[Union[BaseException, ExcInfo]]
+ scope=None, # type: Optional[Any]
+ **scope_args # type: Dict[str, Any]
+ ):
+ # type: (...) -> Optional[str]
+ """Captures an exception.
+
+ :param error: An exception to catch. If `None`, `sys.exc_info()` will be used.
+
+ :returns: An `event_id` if the SDK decided to send the event (see :py:meth:`sentry_sdk.Client.capture_event`).
+ """
+ client = self.client
+ if client is None:
+ return None
+ if error is not None:
+ exc_info = exc_info_from_error(error)
+ else:
+ exc_info = sys.exc_info()
+
+ event, hint = event_from_exception(exc_info, client_options=client.options)
+ try:
+ return self.capture_event(event, hint=hint, scope=scope, **scope_args)
+ except Exception:
+ self._capture_internal_exception(sys.exc_info())
+
+ return None
+
+ def _capture_internal_exception(
+ self, exc_info # type: Any
+ ):
+ # type: (...) -> Any
+ """
+ Capture an exception that is likely caused by a bug in the SDK
+ itself.
+
+ These exceptions do not end up in Sentry and are just logged instead.
+ """
+ logger.error("Internal error in sentry_sdk", exc_info=exc_info)
+
+ def add_breadcrumb(
+ self,
+ crumb=None, # type: Optional[Breadcrumb]
+ hint=None, # type: Optional[BreadcrumbHint]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> None
+ """
+ Adds a breadcrumb.
+
+ :param crumb: Dictionary with the data as the sentry v7/v8 protocol expects.
+
+ :param hint: An optional value that can be used by `before_breadcrumb`
+ to customize the breadcrumbs that are emitted.
+ """
+ client, scope = self._stack[-1]
+ if client is None:
+ logger.info("Dropped breadcrumb because no client bound")
+ return
+
+ crumb = dict(crumb or ()) # type: Breadcrumb
+ crumb.update(kwargs)
+ if not crumb:
+ return
+
+ hint = dict(hint or ()) # type: Hint
+
+ if crumb.get("timestamp") is None:
+ crumb["timestamp"] = datetime.utcnow()
+ if crumb.get("type") is None:
+ crumb["type"] = "default"
+
+ if client.options["before_breadcrumb"] is not None:
+ new_crumb = client.options["before_breadcrumb"](crumb, hint)
+ else:
+ new_crumb = crumb
+
+ if new_crumb is not None:
+ scope._breadcrumbs.append(new_crumb)
+ else:
+ logger.info("before breadcrumb dropped breadcrumb (%s)", crumb)
+
+ max_breadcrumbs = client.options["max_breadcrumbs"] # type: int
+ while len(scope._breadcrumbs) > max_breadcrumbs:
+ scope._breadcrumbs.popleft()
+
+ def start_span(
+ self,
+ span=None, # type: Optional[Span]
+ **kwargs # type: Any
+ ):
+ # type: (...) -> Span
+ """
+ Create a new span whose parent span is the currently active
+ span, if any. The return value is the span object that can
+ be used as a context manager to start and stop timing.
+
+ Note that you will not see any span that is not contained
+ within a transaction. Create a transaction with
+ ``start_span(transaction="my transaction")`` if an
+ integration doesn't already do this for you.
+ """
+
+ client, scope = self._stack[-1]
+
+ kwargs.setdefault("hub", self)
+
+ if span is None:
+ span = scope.span
+ if span is not None:
+ span = span.new_span(**kwargs)
+ else:
+ span = Span(**kwargs)
+
+ if span.sampled is None and span.transaction is not None:
+ sample_rate = client and client.options["traces_sample_rate"] or 0
+ span.sampled = random.random() < sample_rate
+
+ if span.sampled:
+ max_spans = (
+ client and client.options["_experiments"].get("max_spans") or 1000
+ )
+ span.init_finished_spans(maxlen=max_spans)
+
+ return span
+
+ @overload # noqa
+ def push_scope(
+ self, callback=None # type: Optional[None]
+ ):
+ # type: (...) -> ContextManager[Scope]
+ pass
+
+ @overload # noqa
+ def push_scope(
+ self, callback # type: Callable[[Scope], None]
+ ):
+ # type: (...) -> None
+ pass
+
+ def push_scope( # noqa
+ self, callback=None # type: Optional[Callable[[Scope], None]]
+ ):
+ # type: (...) -> Optional[ContextManager[Scope]]
+ """
+ Pushes a new layer on the scope stack.
+
+ :param callback: If provided, this method pushes a scope, calls
+ `callback`, and pops the scope again.
+
+ :returns: If no `callback` is provided, a context manager that should
+ be used to pop the scope again.
+ """
+ if callback is not None:
+ with self.push_scope() as scope:
+ callback(scope)
+ return None
+
+ client, scope = self._stack[-1]
+ new_layer = (client, copy.copy(scope))
+ self._stack.append(new_layer)
+
+ return _ScopeManager(self)
+
+ def pop_scope_unsafe(self):
+ # type: () -> Tuple[Optional[Client], Scope]
+ """
+ Pops a scope layer from the stack.
+
+ Try to use the context manager :py:meth:`push_scope` instead.
+ """
+ rv = self._stack.pop()
+ assert self._stack, "stack must have at least one layer"
+ return rv
+
+ @overload # noqa
+ def configure_scope(
+ self, callback=None # type: Optional[None]
+ ):
+ # type: (...) -> ContextManager[Scope]
+ pass
+
+ @overload # noqa
+ def configure_scope(
+ self, callback # type: Callable[[Scope], None]
+ ):
+ # type: (...) -> None
+ pass
+
+ def configure_scope( # noqa
+ self, callback=None # type: Optional[Callable[[Scope], None]]
+ ): # noqa
+ # type: (...) -> Optional[ContextManager[Scope]]
+
+ """
+ Reconfigures the scope.
+
+ :param callback: If provided, call the callback with the current scope.
+
+ :returns: If no callback is provided, returns a context manager that returns the scope.
+ """
+
+ client, scope = self._stack[-1]
+ if callback is not None:
+ if client is not None:
+ callback(scope)
+
+ return None
+
+ @contextmanager
+ def inner():
+ # type: () -> Generator[Scope, None, None]
+ if client is not None:
+ yield scope
+ else:
+ yield Scope()
+
+ return inner()
+
+ def start_session(self):
+ # type: (...) -> None
+ """Starts a new session."""
+ self.end_session()
+ client, scope = self._stack[-1]
+ scope._session = Session(
+ release=client.options["release"] if client else None,
+ environment=client.options["environment"] if client else None,
+ user=scope._user,
+ )
+
+ def end_session(self):
+ # type: (...) -> None
+ """Ends the current session if there is one."""
+ client, scope = self._stack[-1]
+ session = scope._session
+ if session is not None:
+ session.close()
+ if client is not None:
+ client.capture_session(session)
+ self._stack[-1][1]._session = None
+
+ def stop_auto_session_tracking(self):
+ # type: (...) -> None
+ """Stops automatic session tracking.
+
+ This temporarily session tracking for the current scope when called.
+ To resume session tracking call `resume_auto_session_tracking`.
+ """
+ self.end_session()
+ client, scope = self._stack[-1]
+ scope._force_auto_session_tracking = False
+
+ def resume_auto_session_tracking(self):
+ # type: (...) -> None
+ """Resumes automatic session tracking for the current scope if
+ disabled earlier. This requires that generally automatic session
+ tracking is enabled.
+ """
+ client, scope = self._stack[-1]
+ scope._force_auto_session_tracking = None
+
+ def flush(
+ self,
+ timeout=None, # type: Optional[float]
+ callback=None, # type: Optional[Callable[[int, float], None]]
+ ):
+ # type: (...) -> None
+ """
+ Alias for :py:meth:`sentry_sdk.Client.flush`
+ """
+ client, scope = self._stack[-1]
+ if client is not None:
+ return client.flush(timeout=timeout, callback=callback)
+
+ def iter_trace_propagation_headers(self):
+ # type: () -> Generator[Tuple[str, str], None, None]
+ # TODO: Document
+ client, scope = self._stack[-1]
+ span = scope.span
+
+ if span is None:
+ return
+
+ propagate_traces = client and client.options["propagate_traces"]
+ if not propagate_traces:
+ return
+
+ if client and client.options["traceparent_v2"]:
+ traceparent = span.to_traceparent()
+ else:
+ traceparent = span.to_legacy_traceparent()
+
+ yield "sentry-trace", traceparent
+
+
+GLOBAL_HUB = Hub()
+_local.set(GLOBAL_HUB)
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/__init__.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/__init__.py
new file mode 100644
index 0000000000..f264bc4855
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/__init__.py
@@ -0,0 +1,183 @@
+"""This package"""
+from __future__ import absolute_import
+
+from threading import Lock
+
+from sentry_sdk._compat import iteritems
+from sentry_sdk.utils import logger
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Callable
+ from typing import Dict
+ from typing import Iterator
+ from typing import List
+ from typing import Set
+ from typing import Tuple
+ from typing import Type
+
+
+_installer_lock = Lock()
+_installed_integrations = set() # type: Set[str]
+
+
+def _generate_default_integrations_iterator(integrations, auto_enabling_integrations):
+ # type: (Tuple[str, ...], Tuple[str, ...]) -> Callable[[bool], Iterator[Type[Integration]]]
+
+ def iter_default_integrations(with_auto_enabling_integrations):
+ # type: (bool) -> Iterator[Type[Integration]]
+ """Returns an iterator of the default integration classes:
+ """
+ from importlib import import_module
+
+ if with_auto_enabling_integrations:
+ all_import_strings = integrations + auto_enabling_integrations
+ else:
+ all_import_strings = integrations
+
+ for import_string in all_import_strings:
+ try:
+ module, cls = import_string.rsplit(".", 1)
+ yield getattr(import_module(module), cls)
+ except (DidNotEnable, SyntaxError) as e:
+ logger.debug(
+ "Did not import default integration %s: %s", import_string, e
+ )
+
+ if isinstance(iter_default_integrations.__doc__, str):
+ for import_string in integrations:
+ iter_default_integrations.__doc__ += "\n- `{}`".format(import_string)
+
+ return iter_default_integrations
+
+
+_AUTO_ENABLING_INTEGRATIONS = (
+ "sentry_sdk.integrations.django.DjangoIntegration",
+ "sentry_sdk.integrations.flask.FlaskIntegration",
+ "sentry_sdk.integrations.bottle.BottleIntegration",
+ "sentry_sdk.integrations.falcon.FalconIntegration",
+ "sentry_sdk.integrations.sanic.SanicIntegration",
+ "sentry_sdk.integrations.celery.CeleryIntegration",
+ "sentry_sdk.integrations.rq.RqIntegration",
+ "sentry_sdk.integrations.aiohttp.AioHttpIntegration",
+ "sentry_sdk.integrations.tornado.TornadoIntegration",
+ "sentry_sdk.integrations.sqlalchemy.SqlalchemyIntegration",
+)
+
+
+iter_default_integrations = _generate_default_integrations_iterator(
+ integrations=(
+ # stdlib/base runtime integrations
+ "sentry_sdk.integrations.logging.LoggingIntegration",
+ "sentry_sdk.integrations.stdlib.StdlibIntegration",
+ "sentry_sdk.integrations.excepthook.ExcepthookIntegration",
+ "sentry_sdk.integrations.dedupe.DedupeIntegration",
+ "sentry_sdk.integrations.atexit.AtexitIntegration",
+ "sentry_sdk.integrations.modules.ModulesIntegration",
+ "sentry_sdk.integrations.argv.ArgvIntegration",
+ "sentry_sdk.integrations.threading.ThreadingIntegration",
+ ),
+ auto_enabling_integrations=_AUTO_ENABLING_INTEGRATIONS,
+)
+
+del _generate_default_integrations_iterator
+
+
+def setup_integrations(
+ integrations, with_defaults=True, with_auto_enabling_integrations=False
+):
+ # type: (List[Integration], bool, bool) -> Dict[str, Integration]
+ """Given a list of integration instances this installs them all. When
+ `with_defaults` is set to `True` then all default integrations are added
+ unless they were already provided before.
+ """
+ integrations = dict(
+ (integration.identifier, integration) for integration in integrations or ()
+ )
+
+ logger.debug("Setting up integrations (with default = %s)", with_defaults)
+
+ # Integrations that are not explicitly set up by the user.
+ used_as_default_integration = set()
+
+ if with_defaults:
+ for integration_cls in iter_default_integrations(
+ with_auto_enabling_integrations
+ ):
+ if integration_cls.identifier not in integrations:
+ instance = integration_cls()
+ integrations[instance.identifier] = instance
+ used_as_default_integration.add(instance.identifier)
+
+ for identifier, integration in iteritems(integrations):
+ with _installer_lock:
+ if identifier not in _installed_integrations:
+ logger.debug(
+ "Setting up previously not enabled integration %s", identifier
+ )
+ try:
+ type(integration).setup_once()
+ except NotImplementedError:
+ if getattr(integration, "install", None) is not None:
+ logger.warning(
+ "Integration %s: The install method is "
+ "deprecated. Use `setup_once`.",
+ identifier,
+ )
+ integration.install()
+ else:
+ raise
+ except DidNotEnable as e:
+ if identifier not in used_as_default_integration:
+ raise
+
+ logger.debug(
+ "Did not enable default integration %s: %s", identifier, e
+ )
+
+ _installed_integrations.add(identifier)
+
+ for identifier in integrations:
+ logger.debug("Enabling integration %s", identifier)
+
+ return integrations
+
+
+class DidNotEnable(Exception):
+ """
+ The integration could not be enabled due to a trivial user error like
+ `flask` not being installed for the `FlaskIntegration`.
+
+ This exception is silently swallowed for default integrations, but reraised
+ for explicitly enabled integrations.
+ """
+
+
+class Integration(object):
+ """Baseclass for all integrations.
+
+ To accept options for an integration, implement your own constructor that
+ saves those options on `self`.
+ """
+
+ install = None
+ """Legacy method, do not implement."""
+
+ identifier = None # type: str
+ """String unique ID of integration type"""
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ """
+ Initialize the integration.
+
+ This function is only called once, ever. Configuration is not available
+ at this point, so the only thing to do here is to hook into exception
+ handlers, and perhaps do monkeypatches.
+
+ Inside those hooks `Integration.current` can be used to access the
+ instance again.
+ """
+ raise NotImplementedError()
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/_wsgi_common.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/_wsgi_common.py
new file mode 100644
index 0000000000..f874663883
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/_wsgi_common.py
@@ -0,0 +1,180 @@
+import json
+
+from sentry_sdk.hub import Hub, _should_send_default_pii
+from sentry_sdk.utils import AnnotatedValue
+from sentry_sdk._compat import text_type, iteritems
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ import sentry_sdk
+
+ from typing import Any
+ from typing import Dict
+ from typing import Optional
+ from typing import Union
+
+
+SENSITIVE_ENV_KEYS = (
+ "REMOTE_ADDR",
+ "HTTP_X_FORWARDED_FOR",
+ "HTTP_SET_COOKIE",
+ "HTTP_COOKIE",
+ "HTTP_AUTHORIZATION",
+ "HTTP_X_FORWARDED_FOR",
+ "HTTP_X_REAL_IP",
+)
+
+SENSITIVE_HEADERS = tuple(
+ x[len("HTTP_") :] for x in SENSITIVE_ENV_KEYS if x.startswith("HTTP_")
+)
+
+
+def request_body_within_bounds(client, content_length):
+ # type: (Optional[sentry_sdk.Client], int) -> bool
+ if client is None:
+ return False
+
+ bodies = client.options["request_bodies"]
+ return not (
+ bodies == "never"
+ or (bodies == "small" and content_length > 10 ** 3)
+ or (bodies == "medium" and content_length > 10 ** 4)
+ )
+
+
+class RequestExtractor(object):
+ def __init__(self, request):
+ # type: (Any) -> None
+ self.request = request
+
+ def extract_into_event(self, event):
+ # type: (Dict[str, Any]) -> None
+ client = Hub.current.client
+ if client is None:
+ return
+
+ data = None # type: Optional[Union[AnnotatedValue, Dict[str, Any]]]
+
+ content_length = self.content_length()
+ request_info = event.get("request", {})
+
+ if _should_send_default_pii():
+ request_info["cookies"] = dict(self.cookies())
+
+ if not request_body_within_bounds(client, content_length):
+ data = AnnotatedValue(
+ "",
+ {"rem": [["!config", "x", 0, content_length]], "len": content_length},
+ )
+ else:
+ parsed_body = self.parsed_body()
+ if parsed_body is not None:
+ data = parsed_body
+ elif self.raw_data():
+ data = AnnotatedValue(
+ "",
+ {"rem": [["!raw", "x", 0, content_length]], "len": content_length},
+ )
+ else:
+ data = None
+
+ if data is not None:
+ request_info["data"] = data
+
+ event["request"] = request_info
+
+ def content_length(self):
+ # type: () -> int
+ try:
+ return int(self.env().get("CONTENT_LENGTH", 0))
+ except ValueError:
+ return 0
+
+ def cookies(self):
+ # type: () -> Dict[str, Any]
+ raise NotImplementedError()
+
+ def raw_data(self):
+ # type: () -> Optional[Union[str, bytes]]
+ raise NotImplementedError()
+
+ def form(self):
+ # type: () -> Optional[Dict[str, Any]]
+ raise NotImplementedError()
+
+ def parsed_body(self):
+ # type: () -> Optional[Dict[str, Any]]
+ form = self.form()
+ files = self.files()
+ if form or files:
+ data = dict(iteritems(form))
+ for k, v in iteritems(files):
+ size = self.size_of_file(v)
+ data[k] = AnnotatedValue(
+ "", {"len": size, "rem": [["!raw", "x", 0, size]]}
+ )
+
+ return data
+
+ return self.json()
+
+ def is_json(self):
+ # type: () -> bool
+ return _is_json_content_type(self.env().get("CONTENT_TYPE"))
+
+ def json(self):
+ # type: () -> Optional[Any]
+ try:
+ if not self.is_json():
+ return None
+
+ raw_data = self.raw_data()
+ if raw_data is None:
+ return None
+
+ if isinstance(raw_data, text_type):
+ return json.loads(raw_data)
+ else:
+ return json.loads(raw_data.decode("utf-8"))
+ except ValueError:
+ pass
+
+ return None
+
+ def files(self):
+ # type: () -> Optional[Dict[str, Any]]
+ raise NotImplementedError()
+
+ def size_of_file(self, file):
+ # type: (Any) -> int
+ raise NotImplementedError()
+
+ def env(self):
+ # type: () -> Dict[str, Any]
+ raise NotImplementedError()
+
+
+def _is_json_content_type(ct):
+ # type: (Optional[str]) -> bool
+ mt = (ct or "").split(";", 1)[0]
+ return (
+ mt == "application/json"
+ or (mt.startswith("application/"))
+ and mt.endswith("+json")
+ )
+
+
+def _filter_headers(headers):
+ # type: (Dict[str, str]) -> Dict[str, str]
+ if _should_send_default_pii():
+ return headers
+
+ return {
+ k: (
+ v
+ if k.upper().replace("-", "_") not in SENSITIVE_HEADERS
+ else AnnotatedValue("", {"rem": [["!config", "x", 0, len(v)]]})
+ )
+ for k, v in iteritems(headers)
+ }
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/aiohttp.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/aiohttp.py
new file mode 100644
index 0000000000..02c76df7ef
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/aiohttp.py
@@ -0,0 +1,211 @@
+import sys
+import weakref
+
+from sentry_sdk._compat import reraise
+from sentry_sdk.hub import Hub
+from sentry_sdk.integrations import Integration, DidNotEnable
+from sentry_sdk.integrations.logging import ignore_logger
+from sentry_sdk.integrations._wsgi_common import (
+ _filter_headers,
+ request_body_within_bounds,
+)
+from sentry_sdk.tracing import Span
+from sentry_sdk.utils import (
+ capture_internal_exceptions,
+ event_from_exception,
+ transaction_from_function,
+ HAS_REAL_CONTEXTVARS,
+ AnnotatedValue,
+)
+
+try:
+ import asyncio
+
+ from aiohttp import __version__ as AIOHTTP_VERSION
+ from aiohttp.web import Application, HTTPException, UrlDispatcher
+except ImportError:
+ raise DidNotEnable("AIOHTTP not installed")
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from aiohttp.web_request import Request
+ from aiohttp.abc import AbstractMatchInfo
+ from typing import Any
+ from typing import Dict
+ from typing import Optional
+ from typing import Tuple
+ from typing import Callable
+ from typing import Union
+
+ from sentry_sdk.utils import ExcInfo
+ from sentry_sdk._types import EventProcessor
+
+
+class AioHttpIntegration(Integration):
+ identifier = "aiohttp"
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+
+ try:
+ version = tuple(map(int, AIOHTTP_VERSION.split(".")))
+ except (TypeError, ValueError):
+ raise DidNotEnable("AIOHTTP version unparseable: {}".format(version))
+
+ if version < (3, 4):
+ raise DidNotEnable("AIOHTTP 3.4 or newer required.")
+
+ if not HAS_REAL_CONTEXTVARS:
+ # We better have contextvars or we're going to leak state between
+ # requests.
+ raise RuntimeError(
+ "The aiohttp integration for Sentry requires Python 3.7+ "
+ " or aiocontextvars package"
+ )
+
+ ignore_logger("aiohttp.server")
+
+ old_handle = Application._handle
+
+ async def sentry_app_handle(self, request, *args, **kwargs):
+ # type: (Any, Request, *Any, **Any) -> Any
+ async def inner():
+ # type: () -> Any
+ hub = Hub.current
+ if hub.get_integration(AioHttpIntegration) is None:
+ return await old_handle(self, request, *args, **kwargs)
+
+ weak_request = weakref.ref(request)
+
+ with Hub(Hub.current) as hub:
+ with hub.configure_scope() as scope:
+ scope.clear_breadcrumbs()
+ scope.add_event_processor(_make_request_processor(weak_request))
+
+ span = Span.continue_from_headers(request.headers)
+ span.op = "http.server"
+ # If this transaction name makes it to the UI, AIOHTTP's
+ # URL resolver did not find a route or died trying.
+ span.transaction = "generic AIOHTTP request"
+
+ with hub.start_span(span):
+ try:
+ response = await old_handle(self, request)
+ except HTTPException as e:
+ span.set_http_status(e.status_code)
+ raise
+ except asyncio.CancelledError:
+ span.set_status("cancelled")
+ raise
+ except Exception:
+ # This will probably map to a 500 but seems like we
+ # have no way to tell. Do not set span status.
+ reraise(*_capture_exception(hub))
+
+ span.set_http_status(response.status)
+ return response
+
+ # Explicitly wrap in task such that current contextvar context is
+ # copied. Just doing `return await inner()` will leak scope data
+ # between requests.
+ return await asyncio.get_event_loop().create_task(inner())
+
+ Application._handle = sentry_app_handle
+
+ old_urldispatcher_resolve = UrlDispatcher.resolve
+
+ async def sentry_urldispatcher_resolve(self, request):
+ # type: (UrlDispatcher, Request) -> AbstractMatchInfo
+ rv = await old_urldispatcher_resolve(self, request)
+
+ name = None
+
+ try:
+ name = transaction_from_function(rv.handler)
+ except Exception:
+ pass
+
+ if name is not None:
+ with Hub.current.configure_scope() as scope:
+ scope.transaction = name
+
+ return rv
+
+ UrlDispatcher.resolve = sentry_urldispatcher_resolve
+
+
+def _make_request_processor(weak_request):
+ # type: (Callable[[], Request]) -> EventProcessor
+ def aiohttp_processor(
+ event, # type: Dict[str, Any]
+ hint, # type: Dict[str, Tuple[type, BaseException, Any]]
+ ):
+ # type: (...) -> Dict[str, Any]
+ request = weak_request()
+ if request is None:
+ return event
+
+ with capture_internal_exceptions():
+ request_info = event.setdefault("request", {})
+
+ request_info["url"] = "%s://%s%s" % (
+ request.scheme,
+ request.host,
+ request.path,
+ )
+
+ request_info["query_string"] = request.query_string
+ request_info["method"] = request.method
+ request_info["env"] = {"REMOTE_ADDR": request.remote}
+
+ hub = Hub.current
+ request_info["headers"] = _filter_headers(dict(request.headers))
+
+ # Just attach raw data here if it is within bounds, if available.
+ # Unfortunately there's no way to get structured data from aiohttp
+ # without awaiting on some coroutine.
+ request_info["data"] = get_aiohttp_request_data(hub, request)
+
+ return event
+
+ return aiohttp_processor
+
+
+def _capture_exception(hub):
+ # type: (Hub) -> ExcInfo
+ exc_info = sys.exc_info()
+ event, hint = event_from_exception(
+ exc_info,
+ client_options=hub.client.options, # type: ignore
+ mechanism={"type": "aiohttp", "handled": False},
+ )
+ hub.capture_event(event, hint=hint)
+ return exc_info
+
+
+BODY_NOT_READ_MESSAGE = "[Can't show request body due to implementation details.]"
+
+
+def get_aiohttp_request_data(hub, request):
+ # type: (Hub, Request) -> Union[Optional[str], AnnotatedValue]
+ bytes_body = request._read_bytes
+
+ if bytes_body is not None:
+ # we have body to show
+ if not request_body_within_bounds(hub.client, len(bytes_body)):
+
+ return AnnotatedValue(
+ "",
+ {"rem": [["!config", "x", 0, len(bytes_body)]], "len": len(bytes_body)},
+ )
+ encoding = request.charset or "utf-8"
+ return bytes_body.decode(encoding, "replace")
+
+ if request.can_read_body:
+ # body exists but we can't show it
+ return BODY_NOT_READ_MESSAGE
+
+ # request has no body
+ return None
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/argv.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/argv.py
new file mode 100644
index 0000000000..f005521d32
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/argv.py
@@ -0,0 +1,33 @@
+from __future__ import absolute_import
+
+import sys
+
+from sentry_sdk.hub import Hub
+from sentry_sdk.integrations import Integration
+from sentry_sdk.scope import add_global_event_processor
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Optional
+
+ from sentry_sdk._types import Event, Hint
+
+
+class ArgvIntegration(Integration):
+ identifier = "argv"
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ @add_global_event_processor
+ def processor(event, hint):
+ # type: (Event, Optional[Hint]) -> Optional[Event]
+ if Hub.current.get_integration(ArgvIntegration) is not None:
+ extra = event.setdefault("extra", {})
+ # If some event processor decided to set extra to e.g. an
+ # `int`, don't crash. Not here.
+ if isinstance(extra, dict):
+ extra["sys.argv"] = sys.argv
+
+ return event
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/asgi.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/asgi.py
new file mode 100644
index 0000000000..762634f82f
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/asgi.py
@@ -0,0 +1,194 @@
+"""
+An ASGI middleware.
+
+Based on Tom Christie's `sentry-asgi <https://github.com/encode/sentry-asgi>`_.
+"""
+
+import asyncio
+import functools
+import inspect
+import urllib
+
+from sentry_sdk._types import MYPY
+from sentry_sdk.hub import Hub, _should_send_default_pii
+from sentry_sdk.integrations._wsgi_common import _filter_headers
+from sentry_sdk.utils import ContextVar, event_from_exception, transaction_from_function
+from sentry_sdk.tracing import Span
+
+if MYPY:
+ from typing import Dict
+ from typing import Any
+ from typing import Optional
+ from typing import Callable
+
+ from sentry_sdk._types import Event, Hint
+
+
+_asgi_middleware_applied = ContextVar("sentry_asgi_middleware_applied")
+
+
+def _capture_exception(hub, exc):
+ # type: (Hub, Any) -> None
+
+ # Check client here as it might have been unset while streaming response
+ if hub.client is not None:
+ event, hint = event_from_exception(
+ exc,
+ client_options=hub.client.options,
+ mechanism={"type": "asgi", "handled": False},
+ )
+ hub.capture_event(event, hint=hint)
+
+
+def _looks_like_asgi3(app):
+ # type: (Any) -> bool
+ """
+ Try to figure out if an application object supports ASGI3.
+
+ This is how uvicorn figures out the application version as well.
+ """
+ if inspect.isclass(app):
+ return hasattr(app, "__await__")
+ elif inspect.isfunction(app):
+ return asyncio.iscoroutinefunction(app)
+ else:
+ call = getattr(app, "__call__", None) # noqa
+ return asyncio.iscoroutinefunction(call)
+
+
+class SentryAsgiMiddleware:
+ __slots__ = ("app", "__call__")
+
+ def __init__(self, app):
+ # type: (Any) -> None
+ self.app = app
+
+ if _looks_like_asgi3(app):
+ self.__call__ = self._run_asgi3 # type: Callable[..., Any]
+ else:
+ self.__call__ = self._run_asgi2
+
+ def _run_asgi2(self, scope):
+ # type: (Any) -> Any
+ async def inner(receive, send):
+ # type: (Any, Any) -> Any
+ return await self._run_app(scope, lambda: self.app(scope)(receive, send))
+
+ return inner
+
+ async def _run_asgi3(self, scope, receive, send):
+ # type: (Any, Any, Any) -> Any
+ return await self._run_app(scope, lambda: self.app(scope, receive, send))
+
+ async def _run_app(self, scope, callback):
+ # type: (Any, Any) -> Any
+ if _asgi_middleware_applied.get(False):
+ return await callback()
+
+ _asgi_middleware_applied.set(True)
+ try:
+ hub = Hub(Hub.current)
+ with hub:
+ with hub.configure_scope() as sentry_scope:
+ sentry_scope.clear_breadcrumbs()
+ sentry_scope._name = "asgi"
+ processor = functools.partial(
+ self.event_processor, asgi_scope=scope
+ )
+ sentry_scope.add_event_processor(processor)
+
+ if scope["type"] in ("http", "websocket"):
+ span = Span.continue_from_headers(dict(scope["headers"]))
+ span.op = "{}.server".format(scope["type"])
+ else:
+ span = Span()
+ span.op = "asgi.server"
+
+ span.set_tag("asgi.type", scope["type"])
+ span.transaction = "generic ASGI request"
+
+ with hub.start_span(span) as span:
+ # XXX: Would be cool to have correct span status, but we
+ # would have to wrap send(). That is a bit hard to do with
+ # the current abstraction over ASGI 2/3.
+ try:
+ return await callback()
+ except Exception as exc:
+ _capture_exception(hub, exc)
+ raise exc from None
+ finally:
+ _asgi_middleware_applied.set(False)
+
+ def event_processor(self, event, hint, asgi_scope):
+ # type: (Event, Hint, Any) -> Optional[Event]
+ request_info = event.get("request", {})
+
+ if asgi_scope["type"] in ("http", "websocket"):
+ request_info["url"] = self.get_url(asgi_scope)
+ request_info["method"] = asgi_scope["method"]
+ request_info["headers"] = _filter_headers(self.get_headers(asgi_scope))
+ request_info["query_string"] = self.get_query(asgi_scope)
+
+ if asgi_scope.get("client") and _should_send_default_pii():
+ request_info["env"] = {"REMOTE_ADDR": asgi_scope["client"][0]}
+
+ if asgi_scope.get("endpoint"):
+ # Webframeworks like Starlette mutate the ASGI env once routing is
+ # done, which is sometime after the request has started. If we have
+ # an endpoint, overwrite our path-based transaction name.
+ event["transaction"] = self.get_transaction(asgi_scope)
+
+ event["request"] = request_info
+
+ return event
+
+ def get_url(self, scope):
+ # type: (Any) -> str
+ """
+ Extract URL from the ASGI scope, without also including the querystring.
+ """
+ scheme = scope.get("scheme", "http")
+ server = scope.get("server", None)
+ path = scope.get("root_path", "") + scope["path"]
+
+ for key, value in scope["headers"]:
+ if key == b"host":
+ host_header = value.decode("latin-1")
+ return "%s://%s%s" % (scheme, host_header, path)
+
+ if server is not None:
+ host, port = server
+ default_port = {"http": 80, "https": 443, "ws": 80, "wss": 443}[scheme]
+ if port != default_port:
+ return "%s://%s:%s%s" % (scheme, host, port, path)
+ return "%s://%s%s" % (scheme, host, path)
+ return path
+
+ def get_query(self, scope):
+ # type: (Any) -> Any
+ """
+ Extract querystring from the ASGI scope, in the format that the Sentry protocol expects.
+ """
+ return urllib.parse.unquote(scope["query_string"].decode("latin-1"))
+
+ def get_headers(self, scope):
+ # type: (Any) -> Dict[str, Any]
+ """
+ Extract headers from the ASGI scope, in the format that the Sentry protocol expects.
+ """
+ headers = {} # type: Dict[str, str]
+ for raw_key, raw_value in scope["headers"]:
+ key = raw_key.decode("latin-1")
+ value = raw_value.decode("latin-1")
+ if key in headers:
+ headers[key] = headers[key] + ", " + value
+ else:
+ headers[key] = value
+ return headers
+
+ def get_transaction(self, scope):
+ # type: (Any) -> Optional[str]
+ """
+ Return a transaction string to identify the routed endpoint.
+ """
+ return transaction_from_function(scope["endpoint"])
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/atexit.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/atexit.py
new file mode 100644
index 0000000000..18fe657bff
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/atexit.py
@@ -0,0 +1,62 @@
+from __future__ import absolute_import
+
+import os
+import sys
+import atexit
+
+from sentry_sdk.hub import Hub
+from sentry_sdk.utils import logger
+from sentry_sdk.integrations import Integration
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+
+ from typing import Any
+ from typing import Optional
+
+
+def default_callback(pending, timeout):
+ # type: (int, int) -> None
+ """This is the default shutdown callback that is set on the options.
+ It prints out a message to stderr that informs the user that some events
+ are still pending and the process is waiting for them to flush out.
+ """
+
+ def echo(msg):
+ # type: (str) -> None
+ sys.stderr.write(msg + "\n")
+
+ echo("Sentry is attempting to send %i pending error messages" % pending)
+ echo("Waiting up to %s seconds" % timeout)
+ echo("Press Ctrl-%s to quit" % (os.name == "nt" and "Break" or "C"))
+ sys.stderr.flush()
+
+
+class AtexitIntegration(Integration):
+ identifier = "atexit"
+
+ def __init__(self, callback=None):
+ # type: (Optional[Any]) -> None
+ if callback is None:
+ callback = default_callback
+ self.callback = callback
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ @atexit.register
+ def _shutdown():
+ # type: () -> None
+ logger.debug("atexit: got shutdown signal")
+ hub = Hub.main
+ integration = hub.get_integration(AtexitIntegration)
+ if integration is not None:
+ logger.debug("atexit: shutting down client")
+
+ # If there is a session on the hub, close it now.
+ hub.end_session()
+
+ # If an integration is there, a client has to be there.
+ client = hub.client # type: Any
+ client.close(callback=integration.callback)
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/aws_lambda.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/aws_lambda.py
new file mode 100644
index 0000000000..3a08d998db
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/aws_lambda.py
@@ -0,0 +1,254 @@
+from datetime import datetime, timedelta
+from os import environ
+import sys
+
+from sentry_sdk.hub import Hub, _should_send_default_pii
+from sentry_sdk._compat import reraise
+from sentry_sdk.utils import (
+ AnnotatedValue,
+ capture_internal_exceptions,
+ event_from_exception,
+ logger,
+)
+from sentry_sdk.integrations import Integration
+from sentry_sdk.integrations._wsgi_common import _filter_headers
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Any
+ from typing import TypeVar
+ from typing import Callable
+ from typing import Optional
+
+ from sentry_sdk._types import EventProcessor, Event, Hint
+
+ F = TypeVar("F", bound=Callable[..., Any])
+
+
+def _wrap_handler(handler):
+ # type: (F) -> F
+ def sentry_handler(event, context, *args, **kwargs):
+ # type: (Any, Any, *Any, **Any) -> Any
+ hub = Hub.current
+ integration = hub.get_integration(AwsLambdaIntegration)
+ if integration is None:
+ return handler(event, context, *args, **kwargs)
+
+ # If an integration is there, a client has to be there.
+ client = hub.client # type: Any
+
+ with hub.push_scope() as scope:
+ with capture_internal_exceptions():
+ scope.clear_breadcrumbs()
+ scope.transaction = context.function_name
+ scope.add_event_processor(_make_request_event_processor(event, context))
+
+ try:
+ return handler(event, context, *args, **kwargs)
+ except Exception:
+ exc_info = sys.exc_info()
+ event, hint = event_from_exception(
+ exc_info,
+ client_options=client.options,
+ mechanism={"type": "aws_lambda", "handled": False},
+ )
+ hub.capture_event(event, hint=hint)
+ reraise(*exc_info)
+
+ return sentry_handler # type: ignore
+
+
+def _drain_queue():
+ # type: () -> None
+ with capture_internal_exceptions():
+ hub = Hub.current
+ integration = hub.get_integration(AwsLambdaIntegration)
+ if integration is not None:
+ # Flush out the event queue before AWS kills the
+ # process.
+ hub.flush()
+
+
+class AwsLambdaIntegration(Integration):
+ identifier = "aws_lambda"
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ import __main__ as lambda_bootstrap # type: ignore
+
+ pre_37 = True # Python 3.6 or 2.7
+
+ if not hasattr(lambda_bootstrap, "handle_http_request"):
+ try:
+ import bootstrap as lambda_bootstrap # type: ignore
+
+ pre_37 = False # Python 3.7
+ except ImportError:
+ pass
+
+ if not hasattr(lambda_bootstrap, "handle_event_request"):
+ logger.warning(
+ "Not running in AWS Lambda environment, "
+ "AwsLambdaIntegration disabled"
+ )
+ return
+
+ if pre_37:
+ old_handle_event_request = lambda_bootstrap.handle_event_request
+
+ def sentry_handle_event_request(request_handler, *args, **kwargs):
+ # type: (Any, *Any, **Any) -> Any
+ request_handler = _wrap_handler(request_handler)
+ return old_handle_event_request(request_handler, *args, **kwargs)
+
+ lambda_bootstrap.handle_event_request = sentry_handle_event_request
+
+ old_handle_http_request = lambda_bootstrap.handle_http_request
+
+ def sentry_handle_http_request(request_handler, *args, **kwargs):
+ # type: (Any, *Any, **Any) -> Any
+ request_handler = _wrap_handler(request_handler)
+ return old_handle_http_request(request_handler, *args, **kwargs)
+
+ lambda_bootstrap.handle_http_request = sentry_handle_http_request
+
+ # Patch to_json to drain the queue. This should work even when the
+ # SDK is initialized inside of the handler
+
+ old_to_json = lambda_bootstrap.to_json
+
+ def sentry_to_json(*args, **kwargs):
+ # type: (*Any, **Any) -> Any
+ _drain_queue()
+ return old_to_json(*args, **kwargs)
+
+ lambda_bootstrap.to_json = sentry_to_json
+ else:
+ old_handle_event_request = lambda_bootstrap.handle_event_request
+
+ def sentry_handle_event_request( # type: ignore
+ lambda_runtime_client, request_handler, *args, **kwargs
+ ):
+ request_handler = _wrap_handler(request_handler)
+ return old_handle_event_request(
+ lambda_runtime_client, request_handler, *args, **kwargs
+ )
+
+ lambda_bootstrap.handle_event_request = sentry_handle_event_request
+
+ # Patch the runtime client to drain the queue. This should work
+ # even when the SDK is initialized inside of the handler
+
+ def _wrap_post_function(f):
+ # type: (F) -> F
+ def inner(*args, **kwargs):
+ # type: (*Any, **Any) -> Any
+ _drain_queue()
+ return f(*args, **kwargs)
+
+ return inner # type: ignore
+
+ lambda_bootstrap.LambdaRuntimeClient.post_invocation_result = _wrap_post_function(
+ lambda_bootstrap.LambdaRuntimeClient.post_invocation_result
+ )
+ lambda_bootstrap.LambdaRuntimeClient.post_invocation_error = _wrap_post_function(
+ lambda_bootstrap.LambdaRuntimeClient.post_invocation_error
+ )
+
+
+def _make_request_event_processor(aws_event, aws_context):
+ # type: (Any, Any) -> EventProcessor
+ start_time = datetime.now()
+
+ def event_processor(event, hint, start_time=start_time):
+ # type: (Event, Hint, datetime) -> Optional[Event]
+ extra = event.setdefault("extra", {})
+ extra["lambda"] = {
+ "function_name": aws_context.function_name,
+ "function_version": aws_context.function_version,
+ "invoked_function_arn": aws_context.invoked_function_arn,
+ "remaining_time_in_millis": aws_context.get_remaining_time_in_millis(),
+ "aws_request_id": aws_context.aws_request_id,
+ }
+
+ extra["cloudwatch logs"] = {
+ "url": _get_cloudwatch_logs_url(aws_context, start_time),
+ "log_group": aws_context.log_group_name,
+ "log_stream": aws_context.log_stream_name,
+ }
+
+ request = event.get("request", {})
+
+ if "httpMethod" in aws_event:
+ request["method"] = aws_event["httpMethod"]
+
+ request["url"] = _get_url(aws_event, aws_context)
+
+ if "queryStringParameters" in aws_event:
+ request["query_string"] = aws_event["queryStringParameters"]
+
+ if "headers" in aws_event:
+ request["headers"] = _filter_headers(aws_event["headers"])
+
+ if aws_event.get("body", None):
+ # Unfortunately couldn't find a way to get structured body from AWS
+ # event. Meaning every body is unstructured to us.
+ request["data"] = AnnotatedValue("", {"rem": [["!raw", "x", 0, 0]]})
+
+ if _should_send_default_pii():
+ user_info = event.setdefault("user", {})
+
+ id = aws_event.get("identity", {}).get("userArn")
+ if id is not None:
+ user_info.setdefault("id", id)
+
+ ip = aws_event.get("identity", {}).get("sourceIp")
+ if ip is not None:
+ user_info.setdefault("ip_address", ip)
+
+ event["request"] = request
+
+ return event
+
+ return event_processor
+
+
+def _get_url(event, context):
+ # type: (Any, Any) -> str
+ path = event.get("path", None)
+ headers = event.get("headers", {})
+ host = headers.get("Host", None)
+ proto = headers.get("X-Forwarded-Proto", None)
+ if proto and host and path:
+ return "{}://{}{}".format(proto, host, path)
+ return "awslambda:///{}".format(context.function_name)
+
+
+def _get_cloudwatch_logs_url(context, start_time):
+ # type: (Any, datetime) -> str
+ """
+ Generates a CloudWatchLogs console URL based on the context object
+
+ Arguments:
+ context {Any} -- context from lambda handler
+
+ Returns:
+ str -- AWS Console URL to logs.
+ """
+ formatstring = "%Y-%m-%dT%H:%M:%S"
+
+ url = (
+ "https://console.aws.amazon.com/cloudwatch/home?region={region}"
+ "#logEventViewer:group={log_group};stream={log_stream}"
+ ";start={start_time};end={end_time}"
+ ).format(
+ region=environ.get("AWS_REGION"),
+ log_group=context.log_group_name,
+ log_stream=context.log_stream_name,
+ start_time=(start_time - timedelta(seconds=1)).strftime(formatstring),
+ end_time=(datetime.now() + timedelta(seconds=2)).strftime(formatstring),
+ )
+
+ return url
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/beam.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/beam.py
new file mode 100644
index 0000000000..7252746a7f
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/beam.py
@@ -0,0 +1,184 @@
+from __future__ import absolute_import
+
+import sys
+import types
+from functools import wraps
+
+from sentry_sdk.hub import Hub
+from sentry_sdk._compat import reraise
+from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
+from sentry_sdk.integrations import Integration
+from sentry_sdk.integrations.logging import ignore_logger
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Any
+ from typing import Iterator
+ from typing import TypeVar
+ from typing import Optional
+ from typing import Callable
+
+ from sentry_sdk.client import Client
+ from sentry_sdk._types import ExcInfo
+
+ T = TypeVar("T")
+ F = TypeVar("F", bound=Callable[..., Any])
+
+
+WRAPPED_FUNC = "_wrapped_{}_"
+INSPECT_FUNC = "_inspect_{}" # Required format per apache_beam/transforms/core.py
+USED_FUNC = "_sentry_used_"
+
+
+class BeamIntegration(Integration):
+ identifier = "beam"
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ from apache_beam.transforms.core import DoFn, ParDo # type: ignore
+
+ ignore_logger("root")
+ ignore_logger("bundle_processor.create")
+
+ function_patches = ["process", "start_bundle", "finish_bundle", "setup"]
+ for func_name in function_patches:
+ setattr(
+ DoFn,
+ INSPECT_FUNC.format(func_name),
+ _wrap_inspect_call(DoFn, func_name),
+ )
+
+ old_init = ParDo.__init__
+
+ def sentry_init_pardo(self, fn, *args, **kwargs):
+ # type: (ParDo, Any, *Any, **Any) -> Any
+ # Do not monkey patch init twice
+ if not getattr(self, "_sentry_is_patched", False):
+ for func_name in function_patches:
+ if not hasattr(fn, func_name):
+ continue
+ wrapped_func = WRAPPED_FUNC.format(func_name)
+
+ # Check to see if inspect is set and process is not
+ # to avoid monkey patching process twice.
+ # Check to see if function is part of object for
+ # backwards compatibility.
+ process_func = getattr(fn, func_name)
+ inspect_func = getattr(fn, INSPECT_FUNC.format(func_name))
+ if not getattr(inspect_func, USED_FUNC, False) and not getattr(
+ process_func, USED_FUNC, False
+ ):
+ setattr(fn, wrapped_func, process_func)
+ setattr(fn, func_name, _wrap_task_call(process_func))
+
+ self._sentry_is_patched = True
+ old_init(self, fn, *args, **kwargs)
+
+ ParDo.__init__ = sentry_init_pardo
+
+
+def _wrap_inspect_call(cls, func_name):
+ # type: (Any, Any) -> Any
+ from apache_beam.typehints.decorators import getfullargspec # type: ignore
+
+ if not hasattr(cls, func_name):
+ return None
+
+ def _inspect(self):
+ # type: (Any) -> Any
+ """
+ Inspect function overrides the way Beam gets argspec.
+ """
+ wrapped_func = WRAPPED_FUNC.format(func_name)
+ if hasattr(self, wrapped_func):
+ process_func = getattr(self, wrapped_func)
+ else:
+ process_func = getattr(self, func_name)
+ setattr(self, func_name, _wrap_task_call(process_func))
+ setattr(self, wrapped_func, process_func)
+
+ # getfullargspec is deprecated in more recent beam versions and get_function_args_defaults
+ # (which uses Signatures internally) should be used instead.
+ try:
+ from apache_beam.transforms.core import get_function_args_defaults
+
+ return get_function_args_defaults(process_func)
+ except ImportError:
+ return getfullargspec(process_func)
+
+ setattr(_inspect, USED_FUNC, True)
+ return _inspect
+
+
+def _wrap_task_call(func):
+ # type: (F) -> F
+ """
+ Wrap task call with a try catch to get exceptions.
+ Pass the client on to raise_exception so it can get rebinded.
+ """
+ client = Hub.current.client
+
+ @wraps(func)
+ def _inner(*args, **kwargs):
+ # type: (*Any, **Any) -> Any
+ try:
+ gen = func(*args, **kwargs)
+ except Exception:
+ raise_exception(client)
+
+ if not isinstance(gen, types.GeneratorType):
+ return gen
+ return _wrap_generator_call(gen, client)
+
+ setattr(_inner, USED_FUNC, True)
+ return _inner # type: ignore
+
+
+def _capture_exception(exc_info, hub):
+ # type: (ExcInfo, Hub) -> None
+ """
+ Send Beam exception to Sentry.
+ """
+ integration = hub.get_integration(BeamIntegration)
+ if integration is None:
+ return
+
+ client = hub.client
+ if client is None:
+ return
+
+ event, hint = event_from_exception(
+ exc_info,
+ client_options=client.options,
+ mechanism={"type": "beam", "handled": False},
+ )
+ hub.capture_event(event, hint=hint)
+
+
+def raise_exception(client):
+ # type: (Optional[Client]) -> None
+ """
+ Raise an exception. If the client is not in the hub, rebind it.
+ """
+ hub = Hub.current
+ if hub.client is None:
+ hub.bind_client(client)
+ exc_info = sys.exc_info()
+ with capture_internal_exceptions():
+ _capture_exception(exc_info, hub)
+ reraise(*exc_info)
+
+
+def _wrap_generator_call(gen, client):
+ # type: (Iterator[T], Optional[Client]) -> Iterator[T]
+ """
+ Wrap the generator to handle any failures.
+ """
+ while True:
+ try:
+ yield next(gen)
+ except StopIteration:
+ break
+ except Exception:
+ raise_exception(client)
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/bottle.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/bottle.py
new file mode 100644
index 0000000000..80224e4dc4
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/bottle.py
@@ -0,0 +1,199 @@
+from __future__ import absolute_import
+
+from sentry_sdk.hub import Hub
+from sentry_sdk.utils import (
+ capture_internal_exceptions,
+ event_from_exception,
+ transaction_from_function,
+)
+from sentry_sdk.integrations import Integration, DidNotEnable
+from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
+from sentry_sdk.integrations._wsgi_common import RequestExtractor
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from sentry_sdk.integrations.wsgi import _ScopedResponse
+ from typing import Any
+ from typing import Dict
+ from typing import Callable
+ from typing import Optional
+ from bottle import FileUpload, FormsDict, LocalRequest # type: ignore
+
+ from sentry_sdk._types import EventProcessor
+
+try:
+ from bottle import (
+ Bottle,
+ Route,
+ request as bottle_request,
+ HTTPResponse,
+ __version__ as BOTTLE_VERSION,
+ )
+except ImportError:
+ raise DidNotEnable("Bottle not installed")
+
+
+TRANSACTION_STYLE_VALUES = ("endpoint", "url")
+
+
+class BottleIntegration(Integration):
+ identifier = "bottle"
+
+ transaction_style = None
+
+ def __init__(self, transaction_style="endpoint"):
+ # type: (str) -> None
+
+ if transaction_style not in TRANSACTION_STYLE_VALUES:
+ raise ValueError(
+ "Invalid value for transaction_style: %s (must be in %s)"
+ % (transaction_style, TRANSACTION_STYLE_VALUES)
+ )
+ self.transaction_style = transaction_style
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+
+ try:
+ version = tuple(map(int, BOTTLE_VERSION.split(".")))
+ except (TypeError, ValueError):
+ raise DidNotEnable("Unparseable Bottle version: {}".format(version))
+
+ if version < (0, 12):
+ raise DidNotEnable("Bottle 0.12 or newer required.")
+
+ # monkey patch method Bottle.__call__
+ old_app = Bottle.__call__
+
+ def sentry_patched_wsgi_app(self, environ, start_response):
+ # type: (Any, Dict[str, str], Callable[..., Any]) -> _ScopedResponse
+
+ hub = Hub.current
+ integration = hub.get_integration(BottleIntegration)
+ if integration is None:
+ return old_app(self, environ, start_response)
+
+ return SentryWsgiMiddleware(lambda *a, **kw: old_app(self, *a, **kw))(
+ environ, start_response
+ )
+
+ Bottle.__call__ = sentry_patched_wsgi_app
+
+ # monkey patch method Bottle._handle
+ old_handle = Bottle._handle
+
+ def _patched_handle(self, environ):
+ # type: (Bottle, Dict[str, Any]) -> Any
+ hub = Hub.current
+ integration = hub.get_integration(BottleIntegration)
+ if integration is None:
+ return old_handle(self, environ)
+
+ # create new scope
+ scope_manager = hub.push_scope()
+
+ with scope_manager:
+ app = self
+ with hub.configure_scope() as scope:
+ scope._name = "bottle"
+ scope.add_event_processor(
+ _make_request_event_processor(app, bottle_request, integration)
+ )
+ res = old_handle(self, environ)
+
+ # scope cleanup
+ return res
+
+ Bottle._handle = _patched_handle
+
+ # monkey patch method Route._make_callback
+ old_make_callback = Route._make_callback
+
+ def patched_make_callback(self, *args, **kwargs):
+ # type: (Route, *object, **object) -> Any
+ hub = Hub.current
+ integration = hub.get_integration(BottleIntegration)
+ prepared_callback = old_make_callback(self, *args, **kwargs)
+ if integration is None:
+ return prepared_callback
+
+ # If an integration is there, a client has to be there.
+ client = hub.client # type: Any
+
+ def wrapped_callback(*args, **kwargs):
+ # type: (*object, **object) -> Any
+
+ try:
+ res = prepared_callback(*args, **kwargs)
+ except HTTPResponse:
+ raise
+ except Exception as exception:
+ event, hint = event_from_exception(
+ exception,
+ client_options=client.options,
+ mechanism={"type": "bottle", "handled": False},
+ )
+ hub.capture_event(event, hint=hint)
+ raise exception
+
+ return res
+
+ return wrapped_callback
+
+ Route._make_callback = patched_make_callback
+
+
+class BottleRequestExtractor(RequestExtractor):
+ def env(self):
+ # type: () -> Dict[str, str]
+ return self.request.environ
+
+ def cookies(self):
+ # type: () -> Dict[str, str]
+ return self.request.cookies
+
+ def raw_data(self):
+ # type: () -> bytes
+ return self.request.body.read()
+
+ def form(self):
+ # type: () -> FormsDict
+ if self.is_json():
+ return None
+ return self.request.forms.decode()
+
+ def files(self):
+ # type: () -> Optional[Dict[str, str]]
+ if self.is_json():
+ return None
+
+ return self.request.files
+
+ def size_of_file(self, file):
+ # type: (FileUpload) -> int
+ return file.content_length
+
+
+def _make_request_event_processor(app, request, integration):
+ # type: (Bottle, LocalRequest, BottleIntegration) -> EventProcessor
+ def inner(event, hint):
+ # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
+
+ try:
+ if integration.transaction_style == "endpoint":
+ event["transaction"] = request.route.name or transaction_from_function(
+ request.route.callback
+ )
+ elif integration.transaction_style == "url":
+ event["transaction"] = request.route.rule
+ except Exception:
+ pass
+
+ with capture_internal_exceptions():
+ BottleRequestExtractor(request).extract_into_event(event)
+
+ return event
+
+ return inner
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/celery.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/celery.py
new file mode 100644
index 0000000000..9b58796173
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/celery.py
@@ -0,0 +1,258 @@
+from __future__ import absolute_import
+
+import functools
+import sys
+
+from sentry_sdk.hub import Hub
+from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
+from sentry_sdk.tracing import Span
+from sentry_sdk._compat import reraise
+from sentry_sdk.integrations import Integration, DidNotEnable
+from sentry_sdk.integrations.logging import ignore_logger
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Any
+ from typing import TypeVar
+ from typing import Callable
+ from typing import Optional
+
+ from sentry_sdk._types import EventProcessor, Event, Hint, ExcInfo
+
+ F = TypeVar("F", bound=Callable[..., Any])
+
+
+try:
+ from celery import VERSION as CELERY_VERSION # type: ignore
+ from celery.exceptions import ( # type: ignore
+ SoftTimeLimitExceeded,
+ Retry,
+ Ignore,
+ Reject,
+ )
+except ImportError:
+ raise DidNotEnable("Celery not installed")
+
+
+CELERY_CONTROL_FLOW_EXCEPTIONS = (Retry, Ignore, Reject)
+
+
+class CeleryIntegration(Integration):
+ identifier = "celery"
+
+ def __init__(self, propagate_traces=True):
+ # type: (bool) -> None
+ self.propagate_traces = propagate_traces
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ if CELERY_VERSION < (3,):
+ raise DidNotEnable("Celery 3 or newer required.")
+
+ import celery.app.trace as trace # type: ignore
+
+ old_build_tracer = trace.build_tracer
+
+ def sentry_build_tracer(name, task, *args, **kwargs):
+ # type: (Any, Any, *Any, **Any) -> Any
+ if not getattr(task, "_sentry_is_patched", False):
+ # Need to patch both methods because older celery sometimes
+ # short-circuits to task.run if it thinks it's safe.
+ task.__call__ = _wrap_task_call(task, task.__call__)
+ task.run = _wrap_task_call(task, task.run)
+ task.apply_async = _wrap_apply_async(task, task.apply_async)
+
+ # `build_tracer` is apparently called for every task
+ # invocation. Can't wrap every celery task for every invocation
+ # or we will get infinitely nested wrapper functions.
+ task._sentry_is_patched = True
+
+ return _wrap_tracer(task, old_build_tracer(name, task, *args, **kwargs))
+
+ trace.build_tracer = sentry_build_tracer
+
+ _patch_worker_exit()
+
+ # This logger logs every status of every task that ran on the worker.
+ # Meaning that every task's breadcrumbs are full of stuff like "Task
+ # <foo> raised unexpected <bar>".
+ ignore_logger("celery.worker.job")
+ ignore_logger("celery.app.trace")
+
+ # This is stdout/err redirected to a logger, can't deal with this
+ # (need event_level=logging.WARN to reproduce)
+ ignore_logger("celery.redirected")
+
+
+def _wrap_apply_async(task, f):
+ # type: (Any, F) -> F
+ @functools.wraps(f)
+ def apply_async(*args, **kwargs):
+ # type: (*Any, **Any) -> Any
+ hub = Hub.current
+ integration = hub.get_integration(CeleryIntegration)
+ if integration is not None and integration.propagate_traces:
+ headers = None
+ for key, value in hub.iter_trace_propagation_headers():
+ if headers is None:
+ headers = dict(kwargs.get("headers") or {})
+ headers[key] = value
+ if headers is not None:
+ kwargs["headers"] = headers
+
+ with hub.start_span(op="celery.submit", description=task.name):
+ return f(*args, **kwargs)
+ else:
+ return f(*args, **kwargs)
+
+ return apply_async # type: ignore
+
+
+def _wrap_tracer(task, f):
+ # type: (Any, F) -> F
+
+ # Need to wrap tracer for pushing the scope before prerun is sent, and
+ # popping it after postrun is sent.
+ #
+ # This is the reason we don't use signals for hooking in the first place.
+ # Also because in Celery 3, signal dispatch returns early if one handler
+ # crashes.
+ @functools.wraps(f)
+ def _inner(*args, **kwargs):
+ # type: (*Any, **Any) -> Any
+ hub = Hub.current
+ if hub.get_integration(CeleryIntegration) is None:
+ return f(*args, **kwargs)
+
+ with hub.push_scope() as scope:
+ scope._name = "celery"
+ scope.clear_breadcrumbs()
+ scope.add_event_processor(_make_event_processor(task, *args, **kwargs))
+
+ span = Span.continue_from_headers(args[3].get("headers") or {})
+ span.op = "celery.task"
+ span.transaction = "unknown celery task"
+
+ # Could possibly use a better hook than this one
+ span.set_status("ok")
+
+ with capture_internal_exceptions():
+ # Celery task objects are not a thing to be trusted. Even
+ # something such as attribute access can fail.
+ span.transaction = task.name
+
+ with hub.start_span(span):
+ return f(*args, **kwargs)
+
+ return _inner # type: ignore
+
+
+def _wrap_task_call(task, f):
+ # type: (Any, F) -> F
+
+ # Need to wrap task call because the exception is caught before we get to
+ # see it. Also celery's reported stacktrace is untrustworthy.
+
+ # functools.wraps is important here because celery-once looks at this
+ # method's name.
+ # https://github.com/getsentry/sentry-python/issues/421
+ @functools.wraps(f)
+ def _inner(*args, **kwargs):
+ # type: (*Any, **Any) -> Any
+ try:
+ return f(*args, **kwargs)
+ except Exception:
+ exc_info = sys.exc_info()
+ with capture_internal_exceptions():
+ _capture_exception(task, exc_info)
+ reraise(*exc_info)
+
+ return _inner # type: ignore
+
+
+def _make_event_processor(task, uuid, args, kwargs, request=None):
+ # type: (Any, Any, Any, Any, Optional[Any]) -> EventProcessor
+ def event_processor(event, hint):
+ # type: (Event, Hint) -> Optional[Event]
+
+ with capture_internal_exceptions():
+ tags = event.setdefault("tags", {})
+ tags["celery_task_id"] = uuid
+ extra = event.setdefault("extra", {})
+ extra["celery-job"] = {
+ "task_name": task.name,
+ "args": args,
+ "kwargs": kwargs,
+ }
+
+ if "exc_info" in hint:
+ with capture_internal_exceptions():
+ if issubclass(hint["exc_info"][0], SoftTimeLimitExceeded):
+ event["fingerprint"] = [
+ "celery",
+ "SoftTimeLimitExceeded",
+ getattr(task, "name", task),
+ ]
+
+ return event
+
+ return event_processor
+
+
+def _capture_exception(task, exc_info):
+ # type: (Any, ExcInfo) -> None
+ hub = Hub.current
+
+ if hub.get_integration(CeleryIntegration) is None:
+ return
+ if isinstance(exc_info[1], CELERY_CONTROL_FLOW_EXCEPTIONS):
+ # ??? Doesn't map to anything
+ _set_status(hub, "aborted")
+ return
+
+ _set_status(hub, "internal_error")
+
+ if hasattr(task, "throws") and isinstance(exc_info[1], task.throws):
+ return
+
+ # If an integration is there, a client has to be there.
+ client = hub.client # type: Any
+
+ event, hint = event_from_exception(
+ exc_info,
+ client_options=client.options,
+ mechanism={"type": "celery", "handled": False},
+ )
+
+ hub.capture_event(event, hint=hint)
+
+
+def _set_status(hub, status):
+ # type: (Hub, str) -> None
+ with capture_internal_exceptions():
+ with hub.configure_scope() as scope:
+ if scope.span is not None:
+ scope.span.set_status(status)
+
+
+def _patch_worker_exit():
+ # type: () -> None
+
+ # Need to flush queue before worker shutdown because a crashing worker will
+ # call os._exit
+ from billiard.pool import Worker # type: ignore
+
+ old_workloop = Worker.workloop
+
+ def sentry_workloop(*args, **kwargs):
+ # type: (*Any, **Any) -> Any
+ try:
+ return old_workloop(*args, **kwargs)
+ finally:
+ with capture_internal_exceptions():
+ hub = Hub.current
+ if hub.get_integration(CeleryIntegration) is not None:
+ hub.flush()
+
+ Worker.workloop = sentry_workloop
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/dedupe.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/dedupe.py
new file mode 100644
index 0000000000..b023df2042
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/dedupe.py
@@ -0,0 +1,43 @@
+from sentry_sdk.hub import Hub
+from sentry_sdk.utils import ContextVar
+from sentry_sdk.integrations import Integration
+from sentry_sdk.scope import add_global_event_processor
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Optional
+
+ from sentry_sdk._types import Event, Hint
+
+
+class DedupeIntegration(Integration):
+ identifier = "dedupe"
+
+ def __init__(self):
+ # type: () -> None
+ self._last_seen = ContextVar("last-seen")
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ @add_global_event_processor
+ def processor(event, hint):
+ # type: (Event, Optional[Hint]) -> Optional[Event]
+ if hint is None:
+ return event
+
+ integration = Hub.current.get_integration(DedupeIntegration)
+
+ if integration is None:
+ return event
+
+ exc_info = hint.get("exc_info", None)
+ if exc_info is None:
+ return event
+
+ exc = exc_info[1]
+ if integration._last_seen.get(None) is exc:
+ return None
+ integration._last_seen.set(exc)
+ return event
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/django/__init__.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/django/__init__.py
new file mode 100644
index 0000000000..4e62fe3b74
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/django/__init__.py
@@ -0,0 +1,484 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+
+import sys
+import threading
+import weakref
+
+from sentry_sdk._types import MYPY
+from sentry_sdk.hub import Hub, _should_send_default_pii
+from sentry_sdk.scope import add_global_event_processor
+from sentry_sdk.serializer import add_global_repr_processor
+from sentry_sdk.tracing import record_sql_queries
+from sentry_sdk.utils import (
+ HAS_REAL_CONTEXTVARS,
+ logger,
+ capture_internal_exceptions,
+ event_from_exception,
+ transaction_from_function,
+ walk_exception_chain,
+)
+from sentry_sdk.integrations import Integration, DidNotEnable
+from sentry_sdk.integrations.logging import ignore_logger
+from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
+from sentry_sdk.integrations._wsgi_common import RequestExtractor
+
+try:
+ from django import VERSION as DJANGO_VERSION
+ from django.core import signals
+
+ try:
+ from django.urls import resolve
+ except ImportError:
+ from django.core.urlresolvers import resolve
+except ImportError:
+ raise DidNotEnable("Django not installed")
+
+
+from sentry_sdk.integrations.django.transactions import LEGACY_RESOLVER
+from sentry_sdk.integrations.django.templates import get_template_frame_from_exception
+from sentry_sdk.integrations.django.middleware import patch_django_middlewares
+
+
+if MYPY:
+ from typing import Any
+ from typing import Callable
+ from typing import Dict
+ from typing import Optional
+ from typing import Union
+ from typing import List
+
+ from django.core.handlers.wsgi import WSGIRequest
+ from django.http.response import HttpResponse
+ from django.http.request import QueryDict
+ from django.utils.datastructures import MultiValueDict
+
+ from sentry_sdk.integrations.wsgi import _ScopedResponse
+ from sentry_sdk._types import Event, Hint, EventProcessor, NotImplementedType
+
+
+if DJANGO_VERSION < (1, 10):
+
+ def is_authenticated(request_user):
+ # type: (Any) -> bool
+ return request_user.is_authenticated()
+
+
+else:
+
+ def is_authenticated(request_user):
+ # type: (Any) -> bool
+ return request_user.is_authenticated
+
+
+TRANSACTION_STYLE_VALUES = ("function_name", "url")
+
+
+class DjangoIntegration(Integration):
+ identifier = "django"
+
+ transaction_style = None
+ middleware_spans = None
+
+ def __init__(self, transaction_style="url", middleware_spans=True):
+ # type: (str, bool) -> None
+ if transaction_style not in TRANSACTION_STYLE_VALUES:
+ raise ValueError(
+ "Invalid value for transaction_style: %s (must be in %s)"
+ % (transaction_style, TRANSACTION_STYLE_VALUES)
+ )
+ self.transaction_style = transaction_style
+ self.middleware_spans = middleware_spans
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+
+ if DJANGO_VERSION < (1, 6):
+ raise DidNotEnable("Django 1.6 or newer is required.")
+
+ install_sql_hook()
+ # Patch in our custom middleware.
+
+ # logs an error for every 500
+ ignore_logger("django.server")
+ ignore_logger("django.request")
+
+ from django.core.handlers.wsgi import WSGIHandler
+
+ old_app = WSGIHandler.__call__
+
+ def sentry_patched_wsgi_handler(self, environ, start_response):
+ # type: (Any, Dict[str, str], Callable[..., Any]) -> _ScopedResponse
+ if Hub.current.get_integration(DjangoIntegration) is None:
+ return old_app(self, environ, start_response)
+
+ bound_old_app = old_app.__get__(self, WSGIHandler)
+
+ return SentryWsgiMiddleware(bound_old_app)(environ, start_response)
+
+ WSGIHandler.__call__ = sentry_patched_wsgi_handler
+
+ _patch_django_asgi_handler()
+
+ # patch get_response, because at that point we have the Django request
+ # object
+ from django.core.handlers.base import BaseHandler
+
+ old_get_response = BaseHandler.get_response
+
+ def sentry_patched_get_response(self, request):
+ # type: (Any, WSGIRequest) -> Union[HttpResponse, BaseException]
+ hub = Hub.current
+ integration = hub.get_integration(DjangoIntegration)
+ if integration is not None:
+ _patch_drf()
+
+ with hub.configure_scope() as scope:
+ # Rely on WSGI middleware to start a trace
+ try:
+ if integration.transaction_style == "function_name":
+ scope.transaction = transaction_from_function(
+ resolve(request.path).func
+ )
+ elif integration.transaction_style == "url":
+ scope.transaction = LEGACY_RESOLVER.resolve(request.path)
+ except Exception:
+ pass
+
+ scope.add_event_processor(
+ _make_event_processor(weakref.ref(request), integration)
+ )
+ return old_get_response(self, request)
+
+ BaseHandler.get_response = sentry_patched_get_response
+
+ signals.got_request_exception.connect(_got_request_exception)
+
+ @add_global_event_processor
+ def process_django_templates(event, hint):
+ # type: (Event, Optional[Hint]) -> Optional[Event]
+ if hint is None:
+ return event
+
+ exc_info = hint.get("exc_info", None)
+
+ if exc_info is None:
+ return event
+
+ exception = event.get("exception", None)
+
+ if exception is None:
+ return event
+
+ values = exception.get("values", None)
+
+ if values is None:
+ return event
+
+ for exception, (_, exc_value, _) in zip(
+ reversed(values), walk_exception_chain(exc_info)
+ ):
+ frame = get_template_frame_from_exception(exc_value)
+ if frame is not None:
+ frames = exception.get("stacktrace", {}).get("frames", [])
+
+ for i in reversed(range(len(frames))):
+ f = frames[i]
+ if (
+ f.get("function") in ("parse", "render")
+ and f.get("module") == "django.template.base"
+ ):
+ i += 1
+ break
+ else:
+ i = len(frames)
+
+ frames.insert(i, frame)
+
+ return event
+
+ @add_global_repr_processor
+ def _django_queryset_repr(value, hint):
+ # type: (Any, Dict[str, Any]) -> Union[NotImplementedType, str]
+ try:
+ # Django 1.6 can fail to import `QuerySet` when Django settings
+ # have not yet been initialized.
+ #
+ # If we fail to import, return `NotImplemented`. It's at least
+ # unlikely that we have a query set in `value` when importing
+ # `QuerySet` fails.
+ from django.db.models.query import QuerySet
+ except Exception:
+ return NotImplemented
+
+ if not isinstance(value, QuerySet) or value._result_cache:
+ return NotImplemented
+
+ # Do not call Hub.get_integration here. It is intentional that
+ # running under a new hub does not suddenly start executing
+ # querysets. This might be surprising to the user but it's likely
+ # less annoying.
+
+ return u"<%s from %s at 0x%x>" % (
+ value.__class__.__name__,
+ value.__module__,
+ id(value),
+ )
+
+ _patch_channels()
+ patch_django_middlewares()
+
+
+_DRF_PATCHED = False
+_DRF_PATCH_LOCK = threading.Lock()
+
+
+def _patch_drf():
+ # type: () -> None
+ """
+ Patch Django Rest Framework for more/better request data. DRF's request
+ type is a wrapper around Django's request type. The attribute we're
+ interested in is `request.data`, which is a cached property containing a
+ parsed request body. Reading a request body from that property is more
+ reliable than reading from any of Django's own properties, as those don't
+ hold payloads in memory and therefore can only be accessed once.
+
+ We patch the Django request object to include a weak backreference to the
+ DRF request object, such that we can later use either in
+ `DjangoRequestExtractor`.
+
+ This function is not called directly on SDK setup, because importing almost
+ any part of Django Rest Framework will try to access Django settings (where
+ `sentry_sdk.init()` might be called from in the first place). Instead we
+ run this function on every request and do the patching on the first
+ request.
+ """
+
+ global _DRF_PATCHED
+
+ if _DRF_PATCHED:
+ # Double-checked locking
+ return
+
+ with _DRF_PATCH_LOCK:
+ if _DRF_PATCHED:
+ return
+
+ # We set this regardless of whether the code below succeeds or fails.
+ # There is no point in trying to patch again on the next request.
+ _DRF_PATCHED = True
+
+ with capture_internal_exceptions():
+ try:
+ from rest_framework.views import APIView # type: ignore
+ except ImportError:
+ pass
+ else:
+ old_drf_initial = APIView.initial
+
+ def sentry_patched_drf_initial(self, request, *args, **kwargs):
+ # type: (APIView, Any, *Any, **Any) -> Any
+ with capture_internal_exceptions():
+ request._request._sentry_drf_request_backref = weakref.ref(
+ request
+ )
+ pass
+ return old_drf_initial(self, request, *args, **kwargs)
+
+ APIView.initial = sentry_patched_drf_initial
+
+
+def _patch_channels():
+ # type: () -> None
+ try:
+ from channels.http import AsgiHandler # type: ignore
+ except ImportError:
+ return
+
+ if not HAS_REAL_CONTEXTVARS:
+ # We better have contextvars or we're going to leak state between
+ # requests.
+ #
+ # We cannot hard-raise here because channels may not be used at all in
+ # the current process.
+ logger.warning(
+ "We detected that you are using Django channels 2.0. To get proper "
+ "instrumentation for ASGI requests, the Sentry SDK requires "
+ "Python 3.7+ or the aiocontextvars package from PyPI."
+ )
+
+ from sentry_sdk.integrations.django.asgi import patch_channels_asgi_handler_impl
+
+ patch_channels_asgi_handler_impl(AsgiHandler)
+
+
+def _patch_django_asgi_handler():
+ # type: () -> None
+ try:
+ from django.core.handlers.asgi import ASGIHandler
+ except ImportError:
+ return
+
+ if not HAS_REAL_CONTEXTVARS:
+ # We better have contextvars or we're going to leak state between
+ # requests.
+ #
+ # We cannot hard-raise here because Django may not be used at all in
+ # the current process.
+ logger.warning(
+ "We detected that you are using Django 3. To get proper "
+ "instrumentation for ASGI requests, the Sentry SDK requires "
+ "Python 3.7+ or the aiocontextvars package from PyPI."
+ )
+
+ from sentry_sdk.integrations.django.asgi import patch_django_asgi_handler_impl
+
+ patch_django_asgi_handler_impl(ASGIHandler)
+
+
+def _make_event_processor(weak_request, integration):
+ # type: (Callable[[], WSGIRequest], DjangoIntegration) -> EventProcessor
+ def event_processor(event, hint):
+ # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
+ # if the request is gone we are fine not logging the data from
+ # it. This might happen if the processor is pushed away to
+ # another thread.
+ request = weak_request()
+ if request is None:
+ return event
+
+ try:
+ drf_request = request._sentry_drf_request_backref()
+ if drf_request is not None:
+ request = drf_request
+ except AttributeError:
+ pass
+
+ with capture_internal_exceptions():
+ DjangoRequestExtractor(request).extract_into_event(event)
+
+ if _should_send_default_pii():
+ with capture_internal_exceptions():
+ _set_user_info(request, event)
+
+ return event
+
+ return event_processor
+
+
+def _got_request_exception(request=None, **kwargs):
+ # type: (WSGIRequest, **Any) -> None
+ hub = Hub.current
+ integration = hub.get_integration(DjangoIntegration)
+ if integration is not None:
+
+ # If an integration is there, a client has to be there.
+ client = hub.client # type: Any
+
+ event, hint = event_from_exception(
+ sys.exc_info(),
+ client_options=client.options,
+ mechanism={"type": "django", "handled": False},
+ )
+ hub.capture_event(event, hint=hint)
+
+
+class DjangoRequestExtractor(RequestExtractor):
+ def env(self):
+ # type: () -> Dict[str, str]
+ return self.request.META
+
+ def cookies(self):
+ # type: () -> Dict[str, str]
+ return self.request.COOKIES
+
+ def raw_data(self):
+ # type: () -> bytes
+ return self.request.body
+
+ def form(self):
+ # type: () -> QueryDict
+ return self.request.POST
+
+ def files(self):
+ # type: () -> MultiValueDict
+ return self.request.FILES
+
+ def size_of_file(self, file):
+ # type: (Any) -> int
+ return file.size
+
+ def parsed_body(self):
+ # type: () -> Optional[Dict[str, Any]]
+ try:
+ return self.request.data
+ except AttributeError:
+ return RequestExtractor.parsed_body(self)
+
+
+def _set_user_info(request, event):
+ # type: (WSGIRequest, Dict[str, Any]) -> None
+ user_info = event.setdefault("user", {})
+
+ user = getattr(request, "user", None)
+
+ if user is None or not is_authenticated(user):
+ return
+
+ try:
+ user_info.setdefault("id", str(user.pk))
+ except Exception:
+ pass
+
+ try:
+ user_info.setdefault("email", user.email)
+ except Exception:
+ pass
+
+ try:
+ user_info.setdefault("username", user.get_username())
+ except Exception:
+ pass
+
+
+def install_sql_hook():
+ # type: () -> None
+ """If installed this causes Django's queries to be captured."""
+ try:
+ from django.db.backends.utils import CursorWrapper
+ except ImportError:
+ from django.db.backends.util import CursorWrapper
+
+ try:
+ real_execute = CursorWrapper.execute
+ real_executemany = CursorWrapper.executemany
+ except AttributeError:
+ # This won't work on Django versions < 1.6
+ return
+
+ def execute(self, sql, params=None):
+ # type: (CursorWrapper, Any, Optional[Any]) -> Any
+ hub = Hub.current
+ if hub.get_integration(DjangoIntegration) is None:
+ return real_execute(self, sql, params)
+
+ with record_sql_queries(
+ hub, self.cursor, sql, params, paramstyle="format", executemany=False
+ ):
+ return real_execute(self, sql, params)
+
+ def executemany(self, sql, param_list):
+ # type: (CursorWrapper, Any, List[Any]) -> Any
+ hub = Hub.current
+ if hub.get_integration(DjangoIntegration) is None:
+ return real_executemany(self, sql, param_list)
+
+ with record_sql_queries(
+ hub, self.cursor, sql, param_list, paramstyle="format", executemany=True
+ ):
+ return real_executemany(self, sql, param_list)
+
+ CursorWrapper.execute = execute
+ CursorWrapper.executemany = executemany
+ ignore_logger("django.db.backends")
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/django/asgi.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/django/asgi.py
new file mode 100644
index 0000000000..96ae3e0809
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/django/asgi.py
@@ -0,0 +1,47 @@
+"""
+Instrumentation for Django 3.0
+
+Since this file contains `async def` it is conditionally imported in
+`sentry_sdk.integrations.django` (depending on the existence of
+`django.core.handlers.asgi`.
+"""
+
+from sentry_sdk import Hub
+from sentry_sdk._types import MYPY
+
+from sentry_sdk.integrations.django import DjangoIntegration
+from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
+
+if MYPY:
+ from typing import Any
+
+
+def patch_django_asgi_handler_impl(cls):
+ # type: (Any) -> None
+ old_app = cls.__call__
+
+ async def sentry_patched_asgi_handler(self, scope, receive, send):
+ # type: (Any, Any, Any, Any) -> Any
+ if Hub.current.get_integration(DjangoIntegration) is None:
+ return await old_app(self, scope, receive, send)
+
+ middleware = SentryAsgiMiddleware(old_app.__get__(self, cls))._run_asgi3
+ return await middleware(scope, receive, send)
+
+ cls.__call__ = sentry_patched_asgi_handler
+
+
+def patch_channels_asgi_handler_impl(cls):
+ # type: (Any) -> None
+ old_app = cls.__call__
+
+ async def sentry_patched_asgi_handler(self, receive, send):
+ # type: (Any, Any, Any) -> Any
+ if Hub.current.get_integration(DjangoIntegration) is None:
+ return await old_app(self, receive, send)
+
+ middleware = SentryAsgiMiddleware(lambda _scope: old_app.__get__(self, cls))
+
+ return await middleware(self.scope)(receive, send)
+
+ cls.__call__ = sentry_patched_asgi_handler
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/django/middleware.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/django/middleware.py
new file mode 100644
index 0000000000..edbeccb093
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/django/middleware.py
@@ -0,0 +1,136 @@
+"""
+Create spans from Django middleware invocations
+"""
+
+from functools import wraps
+
+from django import VERSION as DJANGO_VERSION
+
+from sentry_sdk import Hub
+from sentry_sdk.utils import (
+ ContextVar,
+ transaction_from_function,
+ capture_internal_exceptions,
+)
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Any
+ from typing import Callable
+ from typing import TypeVar
+
+ F = TypeVar("F", bound=Callable[..., Any])
+
+_import_string_should_wrap_middleware = ContextVar(
+ "import_string_should_wrap_middleware"
+)
+
+if DJANGO_VERSION < (1, 7):
+ import_string_name = "import_by_path"
+else:
+ import_string_name = "import_string"
+
+
+def patch_django_middlewares():
+ # type: () -> None
+ from django.core.handlers import base
+
+ old_import_string = getattr(base, import_string_name)
+
+ def sentry_patched_import_string(dotted_path):
+ # type: (str) -> Any
+ rv = old_import_string(dotted_path)
+
+ if _import_string_should_wrap_middleware.get(None):
+ rv = _wrap_middleware(rv, dotted_path)
+
+ return rv
+
+ setattr(base, import_string_name, sentry_patched_import_string)
+
+ old_load_middleware = base.BaseHandler.load_middleware
+
+ def sentry_patched_load_middleware(self):
+ # type: (base.BaseHandler) -> Any
+ _import_string_should_wrap_middleware.set(True)
+ try:
+ return old_load_middleware(self)
+ finally:
+ _import_string_should_wrap_middleware.set(False)
+
+ base.BaseHandler.load_middleware = sentry_patched_load_middleware
+
+
+def _wrap_middleware(middleware, middleware_name):
+ # type: (Any, str) -> Any
+ from sentry_sdk.integrations.django import DjangoIntegration
+
+ def _get_wrapped_method(old_method):
+ # type: (F) -> F
+ with capture_internal_exceptions():
+
+ def sentry_wrapped_method(*args, **kwargs):
+ # type: (*Any, **Any) -> Any
+ hub = Hub.current
+ integration = hub.get_integration(DjangoIntegration)
+ if integration is None or not integration.middleware_spans:
+ return old_method(*args, **kwargs)
+
+ function_name = transaction_from_function(old_method)
+
+ description = middleware_name
+ function_basename = getattr(old_method, "__name__", None)
+ if function_basename:
+ description = "{}.{}".format(description, function_basename)
+
+ with hub.start_span(
+ op="django.middleware", description=description
+ ) as span:
+ span.set_tag("django.function_name", function_name)
+ span.set_tag("django.middleware_name", middleware_name)
+ return old_method(*args, **kwargs)
+
+ try:
+ # fails for __call__ of function on Python 2 (see py2.7-django-1.11)
+ return wraps(old_method)(sentry_wrapped_method) # type: ignore
+ except Exception:
+ return sentry_wrapped_method # type: ignore
+
+ return old_method
+
+ class SentryWrappingMiddleware(object):
+ def __init__(self, *args, **kwargs):
+ # type: (*Any, **Any) -> None
+ self._inner = middleware(*args, **kwargs)
+ self._call_method = None
+
+ # We need correct behavior for `hasattr()`, which we can only determine
+ # when we have an instance of the middleware we're wrapping.
+ def __getattr__(self, method_name):
+ # type: (str) -> Any
+ if method_name not in (
+ "process_request",
+ "process_view",
+ "process_template_response",
+ "process_response",
+ "process_exception",
+ ):
+ raise AttributeError()
+
+ old_method = getattr(self._inner, method_name)
+ rv = _get_wrapped_method(old_method)
+ self.__dict__[method_name] = rv
+ return rv
+
+ def __call__(self, *args, **kwargs):
+ # type: (*Any, **Any) -> Any
+ f = self._call_method
+ if f is None:
+ self._call_method = f = _get_wrapped_method(self._inner.__call__)
+ return f(*args, **kwargs)
+
+ if hasattr(middleware, "__name__"):
+ SentryWrappingMiddleware.__name__ = middleware.__name__
+
+ return SentryWrappingMiddleware
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/django/templates.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/django/templates.py
new file mode 100644
index 0000000000..2285644909
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/django/templates.py
@@ -0,0 +1,121 @@
+from django.template import TemplateSyntaxError
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Any
+ from typing import Dict
+ from typing import Optional
+ from typing import Iterator
+ from typing import Tuple
+
+try:
+ # support Django 1.9
+ from django.template.base import Origin
+except ImportError:
+ # backward compatibility
+ from django.template.loader import LoaderOrigin as Origin
+
+
+def get_template_frame_from_exception(exc_value):
+ # type: (Optional[BaseException]) -> Optional[Dict[str, Any]]
+
+ # As of Django 1.9 or so the new template debug thing showed up.
+ if hasattr(exc_value, "template_debug"):
+ return _get_template_frame_from_debug(exc_value.template_debug) # type: ignore
+
+ # As of r16833 (Django) all exceptions may contain a
+ # ``django_template_source`` attribute (rather than the legacy
+ # ``TemplateSyntaxError.source`` check)
+ if hasattr(exc_value, "django_template_source"):
+ return _get_template_frame_from_source(
+ exc_value.django_template_source # type: ignore
+ )
+
+ if isinstance(exc_value, TemplateSyntaxError) and hasattr(exc_value, "source"):
+ source = exc_value.source
+ if isinstance(source, (tuple, list)) and isinstance(source[0], Origin):
+ return _get_template_frame_from_source(source) # type: ignore
+
+ return None
+
+
+def _get_template_frame_from_debug(debug):
+ # type: (Dict[str, Any]) -> Dict[str, Any]
+ if debug is None:
+ return None
+
+ lineno = debug["line"]
+ filename = debug["name"]
+ if filename is None:
+ filename = "<django template>"
+
+ pre_context = []
+ post_context = []
+ context_line = None
+
+ for i, line in debug["source_lines"]:
+ if i < lineno:
+ pre_context.append(line)
+ elif i > lineno:
+ post_context.append(line)
+ else:
+ context_line = line
+
+ return {
+ "filename": filename,
+ "lineno": lineno,
+ "pre_context": pre_context[-5:],
+ "post_context": post_context[:5],
+ "context_line": context_line,
+ "in_app": True,
+ }
+
+
+def _linebreak_iter(template_source):
+ # type: (str) -> Iterator[int]
+ yield 0
+ p = template_source.find("\n")
+ while p >= 0:
+ yield p + 1
+ p = template_source.find("\n", p + 1)
+
+
+def _get_template_frame_from_source(source):
+ # type: (Tuple[Origin, Tuple[int, int]]) -> Optional[Dict[str, Any]]
+ if not source:
+ return None
+
+ origin, (start, end) = source
+ filename = getattr(origin, "loadname", None)
+ if filename is None:
+ filename = "<django template>"
+ template_source = origin.reload()
+ lineno = None
+ upto = 0
+ pre_context = []
+ post_context = []
+ context_line = None
+
+ for num, next in enumerate(_linebreak_iter(template_source)):
+ line = template_source[upto:next]
+ if start >= upto and end <= next:
+ lineno = num
+ context_line = line
+ elif lineno is None:
+ pre_context.append(line)
+ else:
+ post_context.append(line)
+
+ upto = next
+
+ if context_line is None or lineno is None:
+ return None
+
+ return {
+ "filename": filename,
+ "lineno": lineno,
+ "pre_context": pre_context[-5:],
+ "post_context": post_context[:5],
+ "context_line": context_line,
+ }
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/django/transactions.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/django/transactions.py
new file mode 100644
index 0000000000..f20866ef95
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/django/transactions.py
@@ -0,0 +1,134 @@
+"""
+Copied from raven-python. Used for
+`DjangoIntegration(transaction_fron="raven_legacy")`.
+"""
+
+from __future__ import absolute_import
+
+import re
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from django.urls.resolvers import URLResolver
+ from typing import Dict
+ from typing import List
+ from typing import Optional
+ from django.urls.resolvers import URLPattern
+ from typing import Tuple
+ from typing import Union
+ from re import Pattern
+
+try:
+ from django.urls import get_resolver
+except ImportError:
+ from django.core.urlresolvers import get_resolver
+
+
+def get_regex(resolver_or_pattern):
+ # type: (Union[URLPattern, URLResolver]) -> Pattern[str]
+ """Utility method for django's deprecated resolver.regex"""
+ try:
+ regex = resolver_or_pattern.regex
+ except AttributeError:
+ regex = resolver_or_pattern.pattern.regex
+ return regex
+
+
+class RavenResolver(object):
+ _optional_group_matcher = re.compile(r"\(\?\:([^\)]+)\)")
+ _named_group_matcher = re.compile(r"\(\?P<(\w+)>[^\)]+\)")
+ _non_named_group_matcher = re.compile(r"\([^\)]+\)")
+ # [foo|bar|baz]
+ _either_option_matcher = re.compile(r"\[([^\]]+)\|([^\]]+)\]")
+ _camel_re = re.compile(r"([A-Z]+)([a-z])")
+
+ _cache = {} # type: Dict[URLPattern, str]
+
+ def _simplify(self, pattern):
+ # type: (str) -> str
+ r"""
+ Clean up urlpattern regexes into something readable by humans:
+
+ From:
+ > "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
+
+ To:
+ > "{sport_slug}/athletes/{athlete_slug}/"
+ """
+ # remove optional params
+ # TODO(dcramer): it'd be nice to change these into [%s] but it currently
+ # conflicts with the other rules because we're doing regexp matches
+ # rather than parsing tokens
+ result = self._optional_group_matcher.sub(lambda m: "%s" % m.group(1), pattern)
+
+ # handle named groups first
+ result = self._named_group_matcher.sub(lambda m: "{%s}" % m.group(1), result)
+
+ # handle non-named groups
+ result = self._non_named_group_matcher.sub("{var}", result)
+
+ # handle optional params
+ result = self._either_option_matcher.sub(lambda m: m.group(1), result)
+
+ # clean up any outstanding regex-y characters.
+ result = (
+ result.replace("^", "")
+ .replace("$", "")
+ .replace("?", "")
+ .replace("//", "/")
+ .replace("\\", "")
+ )
+
+ return result
+
+ def _resolve(self, resolver, path, parents=None):
+ # type: (URLResolver, str, Optional[List[URLResolver]]) -> Optional[str]
+
+ match = get_regex(resolver).search(path) # Django < 2.0
+
+ if not match:
+ return None
+
+ if parents is None:
+ parents = [resolver]
+ elif resolver not in parents:
+ parents = parents + [resolver]
+
+ new_path = path[match.end() :]
+ for pattern in resolver.url_patterns:
+ # this is an include()
+ if not pattern.callback:
+ match_ = self._resolve(pattern, new_path, parents)
+ if match_:
+ return match_
+ continue
+ elif not get_regex(pattern).search(new_path):
+ continue
+
+ try:
+ return self._cache[pattern]
+ except KeyError:
+ pass
+
+ prefix = "".join(self._simplify(get_regex(p).pattern) for p in parents)
+ result = prefix + self._simplify(get_regex(pattern).pattern)
+ if not result.startswith("/"):
+ result = "/" + result
+ self._cache[pattern] = result
+ return result
+
+ return None
+
+ def resolve(
+ self,
+ path, # type: str
+ urlconf=None, # type: Union[None, Tuple[URLPattern, URLPattern, URLResolver], Tuple[URLPattern]]
+ ):
+ # type: (...) -> str
+ resolver = get_resolver(urlconf)
+ match = self._resolve(resolver, path)
+ return match or path
+
+
+LEGACY_RESOLVER = RavenResolver()
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/excepthook.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/excepthook.py
new file mode 100644
index 0000000000..d8aead097a
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/excepthook.py
@@ -0,0 +1,76 @@
+import sys
+
+from sentry_sdk.hub import Hub
+from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
+from sentry_sdk.integrations import Integration
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Callable
+ from typing import Any
+ from typing import Type
+
+ from types import TracebackType
+
+ Excepthook = Callable[
+ [Type[BaseException], BaseException, TracebackType], Any,
+ ]
+
+
+class ExcepthookIntegration(Integration):
+ identifier = "excepthook"
+
+ always_run = False
+
+ def __init__(self, always_run=False):
+ # type: (bool) -> None
+
+ if not isinstance(always_run, bool):
+ raise ValueError(
+ "Invalid value for always_run: %s (must be type boolean)"
+ % (always_run,)
+ )
+ self.always_run = always_run
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ sys.excepthook = _make_excepthook(sys.excepthook)
+
+
+def _make_excepthook(old_excepthook):
+ # type: (Excepthook) -> Excepthook
+ def sentry_sdk_excepthook(type_, value, traceback):
+ # type: (Type[BaseException], BaseException, TracebackType) -> None
+ hub = Hub.current
+ integration = hub.get_integration(ExcepthookIntegration)
+
+ if integration is not None and _should_send(integration.always_run):
+ # If an integration is there, a client has to be there.
+ client = hub.client # type: Any
+
+ with capture_internal_exceptions():
+ event, hint = event_from_exception(
+ (type_, value, traceback),
+ client_options=client.options,
+ mechanism={"type": "excepthook", "handled": False},
+ )
+ hub.capture_event(event, hint=hint)
+
+ return old_excepthook(type_, value, traceback)
+
+ return sentry_sdk_excepthook
+
+
+def _should_send(always_run=False):
+ # type: (bool) -> bool
+ if always_run:
+ return True
+
+ if hasattr(sys, "ps1"):
+ # Disable the excepthook for interactive Python shells, otherwise
+ # every typo gets sent to Sentry.
+ return False
+
+ return True
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/falcon.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/falcon.py
new file mode 100644
index 0000000000..b24aac41c6
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/falcon.py
@@ -0,0 +1,209 @@
+from __future__ import absolute_import
+
+from sentry_sdk.hub import Hub
+from sentry_sdk.integrations import Integration, DidNotEnable
+from sentry_sdk.integrations._wsgi_common import RequestExtractor
+from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
+from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Any
+ from typing import Dict
+ from typing import Optional
+
+ from sentry_sdk._types import EventProcessor
+
+try:
+ import falcon # type: ignore
+ import falcon.api_helpers # type: ignore
+
+ from falcon import __version__ as FALCON_VERSION
+except ImportError:
+ raise DidNotEnable("Falcon not installed")
+
+
+class FalconRequestExtractor(RequestExtractor):
+ def env(self):
+ # type: () -> Dict[str, Any]
+ return self.request.env
+
+ def cookies(self):
+ # type: () -> Dict[str, Any]
+ return self.request.cookies
+
+ def form(self):
+ # type: () -> None
+ return None # No such concept in Falcon
+
+ def files(self):
+ # type: () -> None
+ return None # No such concept in Falcon
+
+ def raw_data(self):
+ # type: () -> Optional[str]
+
+ # As request data can only be read once we won't make this available
+ # to Sentry. Just send back a dummy string in case there was a
+ # content length.
+ # TODO(jmagnusson): Figure out if there's a way to support this
+ content_length = self.content_length()
+ if content_length > 0:
+ return "[REQUEST_CONTAINING_RAW_DATA]"
+ else:
+ return None
+
+ def json(self):
+ # type: () -> Optional[Dict[str, Any]]
+ try:
+ return self.request.media
+ except falcon.errors.HTTPBadRequest:
+ # NOTE(jmagnusson): We return `falcon.Request._media` here because
+ # falcon 1.4 doesn't do proper type checking in
+ # `falcon.Request.media`. This has been fixed in 2.0.
+ # Relevant code: https://github.com/falconry/falcon/blob/1.4.1/falcon/request.py#L953
+ return self.request._media
+
+
+class SentryFalconMiddleware(object):
+ """Captures exceptions in Falcon requests and send to Sentry"""
+
+ def process_request(self, req, resp, *args, **kwargs):
+ # type: (Any, Any, *Any, **Any) -> None
+ hub = Hub.current
+ integration = hub.get_integration(FalconIntegration)
+ if integration is None:
+ return
+
+ with hub.configure_scope() as scope:
+ scope._name = "falcon"
+ scope.add_event_processor(_make_request_event_processor(req, integration))
+
+
+TRANSACTION_STYLE_VALUES = ("uri_template", "path")
+
+
+class FalconIntegration(Integration):
+ identifier = "falcon"
+
+ transaction_style = None
+
+ def __init__(self, transaction_style="uri_template"):
+ # type: (str) -> None
+ if transaction_style not in TRANSACTION_STYLE_VALUES:
+ raise ValueError(
+ "Invalid value for transaction_style: %s (must be in %s)"
+ % (transaction_style, TRANSACTION_STYLE_VALUES)
+ )
+ self.transaction_style = transaction_style
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ try:
+ version = tuple(map(int, FALCON_VERSION.split(".")))
+ except (ValueError, TypeError):
+ raise DidNotEnable("Unparseable Falcon version: {}".format(FALCON_VERSION))
+
+ if version < (1, 4):
+ raise DidNotEnable("Falcon 1.4 or newer required.")
+
+ _patch_wsgi_app()
+ _patch_handle_exception()
+ _patch_prepare_middleware()
+
+
+def _patch_wsgi_app():
+ # type: () -> None
+ original_wsgi_app = falcon.API.__call__
+
+ def sentry_patched_wsgi_app(self, env, start_response):
+ # type: (falcon.API, Any, Any) -> Any
+ hub = Hub.current
+ integration = hub.get_integration(FalconIntegration)
+ if integration is None:
+ return original_wsgi_app(self, env, start_response)
+
+ sentry_wrapped = SentryWsgiMiddleware(
+ lambda envi, start_resp: original_wsgi_app(self, envi, start_resp)
+ )
+
+ return sentry_wrapped(env, start_response)
+
+ falcon.API.__call__ = sentry_patched_wsgi_app
+
+
+def _patch_handle_exception():
+ # type: () -> None
+ original_handle_exception = falcon.API._handle_exception
+
+ def sentry_patched_handle_exception(self, *args):
+ # type: (falcon.API, *Any) -> Any
+ # NOTE(jmagnusson): falcon 2.0 changed falcon.API._handle_exception
+ # method signature from `(ex, req, resp, params)` to
+ # `(req, resp, ex, params)`
+ if isinstance(args[0], Exception):
+ ex = args[0]
+ else:
+ ex = args[2]
+
+ was_handled = original_handle_exception(self, *args)
+
+ hub = Hub.current
+ integration = hub.get_integration(FalconIntegration)
+
+ if integration is not None and not _is_falcon_http_error(ex):
+ # If an integration is there, a client has to be there.
+ client = hub.client # type: Any
+
+ event, hint = event_from_exception(
+ ex,
+ client_options=client.options,
+ mechanism={"type": "falcon", "handled": False},
+ )
+ hub.capture_event(event, hint=hint)
+
+ return was_handled
+
+ falcon.API._handle_exception = sentry_patched_handle_exception
+
+
+def _patch_prepare_middleware():
+ # type: () -> None
+ original_prepare_middleware = falcon.api_helpers.prepare_middleware
+
+ def sentry_patched_prepare_middleware(
+ middleware=None, independent_middleware=False
+ ):
+ # type: (Any, Any) -> Any
+ hub = Hub.current
+ integration = hub.get_integration(FalconIntegration)
+ if integration is not None:
+ middleware = [SentryFalconMiddleware()] + (middleware or [])
+ return original_prepare_middleware(middleware, independent_middleware)
+
+ falcon.api_helpers.prepare_middleware = sentry_patched_prepare_middleware
+
+
+def _is_falcon_http_error(ex):
+ # type: (BaseException) -> bool
+ return isinstance(ex, (falcon.HTTPError, falcon.http_status.HTTPStatus))
+
+
+def _make_request_event_processor(req, integration):
+ # type: (falcon.Request, FalconIntegration) -> EventProcessor
+
+ def inner(event, hint):
+ # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
+ if integration.transaction_style == "uri_template":
+ event["transaction"] = req.uri_template
+ elif integration.transaction_style == "path":
+ event["transaction"] = req.path
+
+ with capture_internal_exceptions():
+ FalconRequestExtractor(req).extract_into_event(event)
+
+ return event
+
+ return inner
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/flask.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/flask.py
new file mode 100644
index 0000000000..ef6ae0e4f0
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/flask.py
@@ -0,0 +1,260 @@
+from __future__ import absolute_import
+
+import weakref
+
+from sentry_sdk.hub import Hub, _should_send_default_pii
+from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
+from sentry_sdk.integrations import Integration, DidNotEnable
+from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
+from sentry_sdk.integrations._wsgi_common import RequestExtractor
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from sentry_sdk.integrations.wsgi import _ScopedResponse
+ from typing import Any
+ from typing import Dict
+ from werkzeug.datastructures import ImmutableTypeConversionDict
+ from werkzeug.datastructures import ImmutableMultiDict
+ from werkzeug.datastructures import FileStorage
+ from typing import Union
+ from typing import Callable
+
+ from sentry_sdk._types import EventProcessor
+
+
+try:
+ import flask_login # type: ignore
+except ImportError:
+ flask_login = None
+
+try:
+ from flask import ( # type: ignore
+ Request,
+ Flask,
+ _request_ctx_stack,
+ _app_ctx_stack,
+ __version__ as FLASK_VERSION,
+ )
+ from flask.signals import (
+ appcontext_pushed,
+ appcontext_tearing_down,
+ got_request_exception,
+ request_started,
+ )
+except ImportError:
+ raise DidNotEnable("Flask is not installed")
+
+
+TRANSACTION_STYLE_VALUES = ("endpoint", "url")
+
+
+class FlaskIntegration(Integration):
+ identifier = "flask"
+
+ transaction_style = None
+
+ def __init__(self, transaction_style="endpoint"):
+ # type: (str) -> None
+ if transaction_style not in TRANSACTION_STYLE_VALUES:
+ raise ValueError(
+ "Invalid value for transaction_style: %s (must be in %s)"
+ % (transaction_style, TRANSACTION_STYLE_VALUES)
+ )
+ self.transaction_style = transaction_style
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ try:
+ version = tuple(map(int, FLASK_VERSION.split(".")[:3]))
+ except (ValueError, TypeError):
+ raise DidNotEnable("Unparseable Flask version: {}".format(FLASK_VERSION))
+
+ if version < (0, 11):
+ raise DidNotEnable("Flask 0.11 or newer is required.")
+
+ appcontext_pushed.connect(_push_appctx)
+ appcontext_tearing_down.connect(_pop_appctx)
+ request_started.connect(_request_started)
+ got_request_exception.connect(_capture_exception)
+
+ old_app = Flask.__call__
+
+ def sentry_patched_wsgi_app(self, environ, start_response):
+ # type: (Any, Dict[str, str], Callable[..., Any]) -> _ScopedResponse
+ if Hub.current.get_integration(FlaskIntegration) is None:
+ return old_app(self, environ, start_response)
+
+ return SentryWsgiMiddleware(lambda *a, **kw: old_app(self, *a, **kw))(
+ environ, start_response
+ )
+
+ Flask.__call__ = sentry_patched_wsgi_app # type: ignore
+
+
+def _push_appctx(*args, **kwargs):
+ # type: (*Flask, **Any) -> None
+ hub = Hub.current
+ if hub.get_integration(FlaskIntegration) is not None:
+ # always want to push scope regardless of whether WSGI app might already
+ # have (not the case for CLI for example)
+ scope_manager = hub.push_scope()
+ scope_manager.__enter__()
+ _app_ctx_stack.top.sentry_sdk_scope_manager = scope_manager
+ with hub.configure_scope() as scope:
+ scope._name = "flask"
+
+
+def _pop_appctx(*args, **kwargs):
+ # type: (*Flask, **Any) -> None
+ scope_manager = getattr(_app_ctx_stack.top, "sentry_sdk_scope_manager", None)
+ if scope_manager is not None:
+ scope_manager.__exit__(None, None, None)
+
+
+def _request_started(sender, **kwargs):
+ # type: (Flask, **Any) -> None
+ hub = Hub.current
+ integration = hub.get_integration(FlaskIntegration)
+ if integration is None:
+ return
+
+ app = _app_ctx_stack.top.app
+ with hub.configure_scope() as scope:
+ request = _request_ctx_stack.top.request
+
+ # Rely on WSGI middleware to start a trace
+ try:
+ if integration.transaction_style == "endpoint":
+ scope.transaction = request.url_rule.endpoint
+ elif integration.transaction_style == "url":
+ scope.transaction = request.url_rule.rule
+ except Exception:
+ pass
+
+ weak_request = weakref.ref(request)
+ evt_processor = _make_request_event_processor(
+ app, weak_request, integration # type: ignore
+ )
+ scope.add_event_processor(evt_processor)
+
+
+class FlaskRequestExtractor(RequestExtractor):
+ def env(self):
+ # type: () -> Dict[str, str]
+ return self.request.environ
+
+ def cookies(self):
+ # type: () -> ImmutableTypeConversionDict[Any, Any]
+ return self.request.cookies
+
+ def raw_data(self):
+ # type: () -> bytes
+ return self.request.get_data()
+
+ def form(self):
+ # type: () -> ImmutableMultiDict[str, Any]
+ return self.request.form
+
+ def files(self):
+ # type: () -> ImmutableMultiDict[str, Any]
+ return self.request.files
+
+ def is_json(self):
+ # type: () -> bool
+ return self.request.is_json
+
+ def json(self):
+ # type: () -> Any
+ return self.request.get_json()
+
+ def size_of_file(self, file):
+ # type: (FileStorage) -> int
+ return file.content_length
+
+
+def _make_request_event_processor(app, weak_request, integration):
+ # type: (Flask, Callable[[], Request], FlaskIntegration) -> EventProcessor
+ def inner(event, hint):
+ # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
+ request = weak_request()
+
+ # if the request is gone we are fine not logging the data from
+ # it. This might happen if the processor is pushed away to
+ # another thread.
+ if request is None:
+ return event
+
+ with capture_internal_exceptions():
+ FlaskRequestExtractor(request).extract_into_event(event)
+
+ if _should_send_default_pii():
+ with capture_internal_exceptions():
+ _add_user_to_event(event)
+
+ return event
+
+ return inner
+
+
+def _capture_exception(sender, exception, **kwargs):
+ # type: (Flask, Union[ValueError, BaseException], **Any) -> None
+ hub = Hub.current
+ if hub.get_integration(FlaskIntegration) is None:
+ return
+
+ # If an integration is there, a client has to be there.
+ client = hub.client # type: Any
+
+ event, hint = event_from_exception(
+ exception,
+ client_options=client.options,
+ mechanism={"type": "flask", "handled": False},
+ )
+
+ hub.capture_event(event, hint=hint)
+
+
+def _add_user_to_event(event):
+ # type: (Dict[str, Any]) -> None
+ if flask_login is None:
+ return
+
+ user = flask_login.current_user
+ if user is None:
+ return
+
+ with capture_internal_exceptions():
+ # Access this object as late as possible as accessing the user
+ # is relatively costly
+
+ user_info = event.setdefault("user", {})
+
+ try:
+ user_info.setdefault("id", user.get_id())
+ # TODO: more configurable user attrs here
+ except AttributeError:
+ # might happen if:
+ # - flask_login could not be imported
+ # - flask_login is not configured
+ # - no user is logged in
+ pass
+
+ # The following attribute accesses are ineffective for the general
+ # Flask-Login case, because the User interface of Flask-Login does not
+ # care about anything but the ID. However, Flask-User (based on
+ # Flask-Login) documents a few optional extra attributes.
+ #
+ # https://github.com/lingthio/Flask-User/blob/a379fa0a281789618c484b459cb41236779b95b1/docs/source/data_models.rst#fixed-data-model-property-names
+
+ try:
+ user_info.setdefault("email", user.email)
+ except Exception:
+ pass
+
+ try:
+ user_info.setdefault("username", user.username)
+ user_info.setdefault("username", user.email)
+ except Exception:
+ pass
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/gnu_backtrace.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/gnu_backtrace.py
new file mode 100644
index 0000000000..e0ec110547
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/gnu_backtrace.py
@@ -0,0 +1,107 @@
+import re
+
+from sentry_sdk.hub import Hub
+from sentry_sdk.integrations import Integration
+from sentry_sdk.scope import add_global_event_processor
+from sentry_sdk.utils import capture_internal_exceptions
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Any
+ from typing import Dict
+
+
+MODULE_RE = r"[a-zA-Z0-9/._:\\-]+"
+TYPE_RE = r"[a-zA-Z0-9._:<>,-]+"
+HEXVAL_RE = r"[A-Fa-f0-9]+"
+
+
+FRAME_RE = r"""
+^(?P<index>\d+)\.\s
+(?P<package>{MODULE_RE})\(
+ (?P<retval>{TYPE_RE}\ )?
+ ((?P<function>{TYPE_RE})
+ (?P<args>\(.*\))?
+ )?
+ ((?P<constoffset>\ const)?\+0x(?P<offset>{HEXVAL_RE}))?
+\)\s
+\[0x(?P<retaddr>{HEXVAL_RE})\]$
+""".format(
+ MODULE_RE=MODULE_RE, HEXVAL_RE=HEXVAL_RE, TYPE_RE=TYPE_RE
+)
+
+FRAME_RE = re.compile(FRAME_RE, re.MULTILINE | re.VERBOSE)
+
+
+class GnuBacktraceIntegration(Integration):
+ identifier = "gnu_backtrace"
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ @add_global_event_processor
+ def process_gnu_backtrace(event, hint):
+ # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
+ with capture_internal_exceptions():
+ return _process_gnu_backtrace(event, hint)
+
+
+def _process_gnu_backtrace(event, hint):
+ # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
+ if Hub.current.get_integration(GnuBacktraceIntegration) is None:
+ return event
+
+ exc_info = hint.get("exc_info", None)
+
+ if exc_info is None:
+ return event
+
+ exception = event.get("exception", None)
+
+ if exception is None:
+ return event
+
+ values = exception.get("values", None)
+
+ if values is None:
+ return event
+
+ for exception in values:
+ frames = exception.get("stacktrace", {}).get("frames", [])
+ if not frames:
+ continue
+
+ msg = exception.get("value", None)
+ if not msg:
+ continue
+
+ additional_frames = []
+ new_msg = []
+
+ for line in msg.splitlines():
+ match = FRAME_RE.match(line)
+ if match:
+ additional_frames.append(
+ (
+ int(match.group("index")),
+ {
+ "package": match.group("package") or None,
+ "function": match.group("function") or None,
+ "platform": "native",
+ },
+ )
+ )
+ else:
+ # Put garbage lines back into message, not sure what to do with them.
+ new_msg.append(line)
+
+ if additional_frames:
+ additional_frames.sort(key=lambda x: -x[0])
+ for _, frame in additional_frames:
+ frames.append(frame)
+
+ new_msg.append("<stacktrace parsed and removed by GnuBacktraceIntegration>")
+ exception["value"] = "\n".join(new_msg)
+
+ return event
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/logging.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/logging.py
new file mode 100644
index 0000000000..6edd785e91
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/logging.py
@@ -0,0 +1,237 @@
+from __future__ import absolute_import
+
+import logging
+import datetime
+
+from sentry_sdk.hub import Hub
+from sentry_sdk.utils import (
+ to_string,
+ event_from_exception,
+ current_stacktrace,
+ capture_internal_exceptions,
+)
+from sentry_sdk.integrations import Integration
+from sentry_sdk._compat import iteritems
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from logging import LogRecord
+ from typing import Any
+ from typing import Dict
+ from typing import Optional
+
+DEFAULT_LEVEL = logging.INFO
+DEFAULT_EVENT_LEVEL = logging.ERROR
+
+_IGNORED_LOGGERS = set(["sentry_sdk.errors"])
+
+
+def ignore_logger(
+ name, # type: str
+):
+ # type: (...) -> None
+ """This disables recording (both in breadcrumbs and as events) calls to
+ a logger of a specific name. Among other uses, many of our integrations
+ use this to prevent their actions being recorded as breadcrumbs. Exposed
+ to users as a way to quiet spammy loggers.
+
+ :param name: The name of the logger to ignore (same string you would pass to ``logging.getLogger``).
+ """
+ _IGNORED_LOGGERS.add(name)
+
+
+class LoggingIntegration(Integration):
+ identifier = "logging"
+
+ def __init__(self, level=DEFAULT_LEVEL, event_level=DEFAULT_EVENT_LEVEL):
+ # type: (Optional[int], Optional[int]) -> None
+ self._handler = None
+ self._breadcrumb_handler = None
+
+ if level is not None:
+ self._breadcrumb_handler = BreadcrumbHandler(level=level)
+
+ if event_level is not None:
+ self._handler = EventHandler(level=event_level)
+
+ def _handle_record(self, record):
+ # type: (LogRecord) -> None
+ if self._handler is not None and record.levelno >= self._handler.level:
+ self._handler.handle(record)
+
+ if (
+ self._breadcrumb_handler is not None
+ and record.levelno >= self._breadcrumb_handler.level
+ ):
+ self._breadcrumb_handler.handle(record)
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ old_callhandlers = logging.Logger.callHandlers # type: ignore
+
+ def sentry_patched_callhandlers(self, record):
+ # type: (Any, LogRecord) -> Any
+ try:
+ return old_callhandlers(self, record)
+ finally:
+ # This check is done twice, once also here before we even get
+ # the integration. Otherwise we have a high chance of getting
+ # into a recursion error when the integration is resolved
+ # (this also is slower).
+ if record.name not in _IGNORED_LOGGERS:
+ integration = Hub.current.get_integration(LoggingIntegration)
+ if integration is not None:
+ integration._handle_record(record)
+
+ logging.Logger.callHandlers = sentry_patched_callhandlers # type: ignore
+
+
+def _can_record(record):
+ # type: (LogRecord) -> bool
+ return record.name not in _IGNORED_LOGGERS
+
+
+def _breadcrumb_from_record(record):
+ # type: (LogRecord) -> Dict[str, Any]
+ return {
+ "ty": "log",
+ "level": _logging_to_event_level(record.levelname),
+ "category": record.name,
+ "message": record.message,
+ "timestamp": datetime.datetime.utcfromtimestamp(record.created),
+ "data": _extra_from_record(record),
+ }
+
+
+def _logging_to_event_level(levelname):
+ # type: (str) -> str
+ return {"critical": "fatal"}.get(levelname.lower(), levelname.lower())
+
+
+COMMON_RECORD_ATTRS = frozenset(
+ (
+ "args",
+ "created",
+ "exc_info",
+ "exc_text",
+ "filename",
+ "funcName",
+ "levelname",
+ "levelno",
+ "linenno",
+ "lineno",
+ "message",
+ "module",
+ "msecs",
+ "msg",
+ "name",
+ "pathname",
+ "process",
+ "processName",
+ "relativeCreated",
+ "stack",
+ "tags",
+ "thread",
+ "threadName",
+ "stack_info",
+ )
+)
+
+
+def _extra_from_record(record):
+ # type: (LogRecord) -> Dict[str, None]
+ return {
+ k: v
+ for k, v in iteritems(vars(record))
+ if k not in COMMON_RECORD_ATTRS
+ and (not isinstance(k, str) or not k.startswith("_"))
+ }
+
+
+class EventHandler(logging.Handler, object):
+ """
+ A logging handler that emits Sentry events for each log record
+
+ Note that you do not have to use this class if the logging integration is enabled, which it is by default.
+ """
+
+ def emit(self, record):
+ # type: (LogRecord) -> Any
+ with capture_internal_exceptions():
+ self.format(record)
+ return self._emit(record)
+
+ def _emit(self, record):
+ # type: (LogRecord) -> None
+ if not _can_record(record):
+ return
+
+ hub = Hub.current
+ if hub.client is None:
+ return
+
+ client_options = hub.client.options
+
+ # exc_info might be None or (None, None, None)
+ if record.exc_info is not None and record.exc_info[0] is not None:
+ event, hint = event_from_exception(
+ record.exc_info,
+ client_options=client_options,
+ mechanism={"type": "logging", "handled": True},
+ )
+ elif record.exc_info and record.exc_info[0] is None:
+ event = {}
+ hint = {}
+ with capture_internal_exceptions():
+ event["threads"] = {
+ "values": [
+ {
+ "stacktrace": current_stacktrace(
+ client_options["with_locals"]
+ ),
+ "crashed": False,
+ "current": True,
+ }
+ ]
+ }
+ else:
+ event = {}
+ hint = {}
+
+ hint["log_record"] = record
+
+ event["level"] = _logging_to_event_level(record.levelname)
+ event["logger"] = record.name
+ event["logentry"] = {"message": to_string(record.msg), "params": record.args}
+ event["extra"] = _extra_from_record(record)
+
+ hub.capture_event(event, hint=hint)
+
+
+# Legacy name
+SentryHandler = EventHandler
+
+
+class BreadcrumbHandler(logging.Handler, object):
+ """
+ A logging handler that records breadcrumbs for each log record.
+
+ Note that you do not have to use this class if the logging integration is enabled, which it is by default.
+ """
+
+ def emit(self, record):
+ # type: (LogRecord) -> Any
+ with capture_internal_exceptions():
+ self.format(record)
+ return self._emit(record)
+
+ def _emit(self, record):
+ # type: (LogRecord) -> None
+ if not _can_record(record):
+ return
+
+ Hub.current.add_breadcrumb(
+ _breadcrumb_from_record(record), hint={"log_record": record}
+ )
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/modules.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/modules.py
new file mode 100644
index 0000000000..3d78cb89bb
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/modules.py
@@ -0,0 +1,56 @@
+from __future__ import absolute_import
+
+from sentry_sdk.hub import Hub
+from sentry_sdk.integrations import Integration
+from sentry_sdk.scope import add_global_event_processor
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Any
+ from typing import Dict
+ from typing import Tuple
+ from typing import Iterator
+
+ from sentry_sdk._types import Event
+
+
+_installed_modules = None
+
+
+def _generate_installed_modules():
+ # type: () -> Iterator[Tuple[str, str]]
+ try:
+ import pkg_resources
+ except ImportError:
+ return
+
+ for info in pkg_resources.working_set:
+ yield info.key, info.version
+
+
+def _get_installed_modules():
+ # type: () -> Dict[str, str]
+ global _installed_modules
+ if _installed_modules is None:
+ _installed_modules = dict(_generate_installed_modules())
+ return _installed_modules
+
+
+class ModulesIntegration(Integration):
+ identifier = "modules"
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ @add_global_event_processor
+ def processor(event, hint):
+ # type: (Event, Any) -> Dict[str, Any]
+ if event.get("type") == "transaction":
+ return event
+
+ if Hub.current.get_integration(ModulesIntegration) is None:
+ return event
+
+ event["modules"] = _get_installed_modules()
+ return event
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/pyramid.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/pyramid.py
new file mode 100644
index 0000000000..ee9682343a
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/pyramid.py
@@ -0,0 +1,217 @@
+from __future__ import absolute_import
+
+import os
+import sys
+import weakref
+
+from pyramid.httpexceptions import HTTPException
+from pyramid.request import Request
+
+from sentry_sdk.hub import Hub, _should_send_default_pii
+from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
+from sentry_sdk._compat import reraise, iteritems
+
+from sentry_sdk.integrations import Integration
+from sentry_sdk.integrations._wsgi_common import RequestExtractor
+from sentry_sdk.integrations.wsgi import SentryWsgiMiddleware
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from pyramid.response import Response
+ from typing import Any
+ from sentry_sdk.integrations.wsgi import _ScopedResponse
+ from typing import Callable
+ from typing import Dict
+ from typing import Optional
+ from webob.cookies import RequestCookies # type: ignore
+ from webob.compat import cgi_FieldStorage # type: ignore
+
+ from sentry_sdk.utils import ExcInfo
+ from sentry_sdk._types import EventProcessor
+
+
+if getattr(Request, "authenticated_userid", None):
+
+ def authenticated_userid(request):
+ # type: (Request) -> Optional[Any]
+ return request.authenticated_userid
+
+
+else:
+ # bw-compat for pyramid < 1.5
+ from pyramid.security import authenticated_userid # type: ignore
+
+
+TRANSACTION_STYLE_VALUES = ("route_name", "route_pattern")
+
+
+class PyramidIntegration(Integration):
+ identifier = "pyramid"
+
+ transaction_style = None
+
+ def __init__(self, transaction_style="route_name"):
+ # type: (str) -> None
+ if transaction_style not in TRANSACTION_STYLE_VALUES:
+ raise ValueError(
+ "Invalid value for transaction_style: %s (must be in %s)"
+ % (transaction_style, TRANSACTION_STYLE_VALUES)
+ )
+ self.transaction_style = transaction_style
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ from pyramid.router import Router
+ from pyramid.request import Request
+
+ old_handle_request = Router.handle_request
+
+ def sentry_patched_handle_request(self, request, *args, **kwargs):
+ # type: (Any, Request, *Any, **Any) -> Response
+ hub = Hub.current
+ integration = hub.get_integration(PyramidIntegration)
+ if integration is not None:
+ with hub.configure_scope() as scope:
+ scope.add_event_processor(
+ _make_event_processor(weakref.ref(request), integration)
+ )
+
+ return old_handle_request(self, request, *args, **kwargs)
+
+ Router.handle_request = sentry_patched_handle_request
+
+ if hasattr(Request, "invoke_exception_view"):
+ old_invoke_exception_view = Request.invoke_exception_view
+
+ def sentry_patched_invoke_exception_view(self, *args, **kwargs):
+ # type: (Request, *Any, **Any) -> Any
+ rv = old_invoke_exception_view(self, *args, **kwargs)
+
+ if (
+ self.exc_info
+ and all(self.exc_info)
+ and rv.status_int == 500
+ and Hub.current.get_integration(PyramidIntegration) is not None
+ ):
+ _capture_exception(self.exc_info)
+
+ return rv
+
+ Request.invoke_exception_view = sentry_patched_invoke_exception_view
+
+ old_wsgi_call = Router.__call__
+
+ def sentry_patched_wsgi_call(self, environ, start_response):
+ # type: (Any, Dict[str, str], Callable[..., Any]) -> _ScopedResponse
+ hub = Hub.current
+ integration = hub.get_integration(PyramidIntegration)
+ if integration is None:
+ return old_wsgi_call(self, environ, start_response)
+
+ def sentry_patched_inner_wsgi_call(environ, start_response):
+ # type: (Dict[str, Any], Callable[..., Any]) -> Any
+ try:
+ return old_wsgi_call(self, environ, start_response)
+ except Exception:
+ einfo = sys.exc_info()
+ _capture_exception(einfo)
+ reraise(*einfo)
+
+ return SentryWsgiMiddleware(sentry_patched_inner_wsgi_call)(
+ environ, start_response
+ )
+
+ Router.__call__ = sentry_patched_wsgi_call
+
+
+def _capture_exception(exc_info):
+ # type: (ExcInfo) -> None
+ if exc_info[0] is None or issubclass(exc_info[0], HTTPException):
+ return
+ hub = Hub.current
+ if hub.get_integration(PyramidIntegration) is None:
+ return
+
+ # If an integration is there, a client has to be there.
+ client = hub.client # type: Any
+
+ event, hint = event_from_exception(
+ exc_info,
+ client_options=client.options,
+ mechanism={"type": "pyramid", "handled": False},
+ )
+
+ hub.capture_event(event, hint=hint)
+
+
+class PyramidRequestExtractor(RequestExtractor):
+ def url(self):
+ # type: () -> str
+ return self.request.path_url
+
+ def env(self):
+ # type: () -> Dict[str, str]
+ return self.request.environ
+
+ def cookies(self):
+ # type: () -> RequestCookies
+ return self.request.cookies
+
+ def raw_data(self):
+ # type: () -> str
+ return self.request.text
+
+ def form(self):
+ # type: () -> Dict[str, str]
+ return {
+ key: value
+ for key, value in iteritems(self.request.POST)
+ if not getattr(value, "filename", None)
+ }
+
+ def files(self):
+ # type: () -> Dict[str, cgi_FieldStorage]
+ return {
+ key: value
+ for key, value in iteritems(self.request.POST)
+ if getattr(value, "filename", None)
+ }
+
+ def size_of_file(self, postdata):
+ # type: (cgi_FieldStorage) -> int
+ file = postdata.file
+ try:
+ return os.fstat(file.fileno()).st_size
+ except Exception:
+ return 0
+
+
+def _make_event_processor(weak_request, integration):
+ # type: (Callable[[], Request], PyramidIntegration) -> EventProcessor
+ def event_processor(event, hint):
+ # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
+ request = weak_request()
+ if request is None:
+ return event
+
+ try:
+ if integration.transaction_style == "route_name":
+ event["transaction"] = request.matched_route.name
+ elif integration.transaction_style == "route_pattern":
+ event["transaction"] = request.matched_route.pattern
+ except Exception:
+ pass
+
+ with capture_internal_exceptions():
+ PyramidRequestExtractor(request).extract_into_event(event)
+
+ if _should_send_default_pii():
+ with capture_internal_exceptions():
+ user_info = event.setdefault("user", {})
+ user_info.setdefault("id", authenticated_userid(request))
+
+ return event
+
+ return event_processor
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/redis.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/redis.py
new file mode 100644
index 0000000000..510fdbb22c
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/redis.py
@@ -0,0 +1,70 @@
+from __future__ import absolute_import
+
+from sentry_sdk import Hub
+from sentry_sdk.utils import capture_internal_exceptions
+from sentry_sdk.integrations import Integration
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Any
+
+
+class RedisIntegration(Integration):
+ identifier = "redis"
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ import redis
+
+ patch_redis_client(redis.StrictRedis)
+
+ try:
+ import rb.clients # type: ignore
+ except ImportError:
+ pass
+ else:
+ patch_redis_client(rb.clients.FanoutClient)
+ patch_redis_client(rb.clients.MappingClient)
+ patch_redis_client(rb.clients.RoutingClient)
+
+
+def patch_redis_client(cls):
+ # type: (Any) -> None
+ """
+ This function can be used to instrument custom redis client classes or
+ subclasses.
+ """
+
+ old_execute_command = cls.execute_command
+
+ def sentry_patched_execute_command(self, name, *args, **kwargs):
+ # type: (Any, str, *Any, **Any) -> Any
+ hub = Hub.current
+
+ if hub.get_integration(RedisIntegration) is None:
+ return old_execute_command(self, name, *args, **kwargs)
+
+ description = name
+
+ with capture_internal_exceptions():
+ description_parts = [name]
+ for i, arg in enumerate(args):
+ if i > 10:
+ break
+
+ description_parts.append(repr(arg))
+
+ description = " ".join(description_parts)
+
+ with hub.start_span(op="redis", description=description) as span:
+ if name:
+ span.set_tag("redis.command", name)
+
+ if name and args and name.lower() in ("get", "set", "setex", "setnx"):
+ span.set_tag("redis.key", args[0])
+
+ return old_execute_command(self, name, *args, **kwargs)
+
+ cls.execute_command = sentry_patched_execute_command
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/rq.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/rq.py
new file mode 100644
index 0000000000..fbe8cdda3d
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/rq.py
@@ -0,0 +1,150 @@
+from __future__ import absolute_import
+
+import weakref
+
+from sentry_sdk.hub import Hub
+from sentry_sdk.integrations import Integration, DidNotEnable
+from sentry_sdk.tracing import Span
+from sentry_sdk.utils import capture_internal_exceptions, event_from_exception
+
+
+try:
+ from rq.version import VERSION as RQ_VERSION
+ from rq.timeouts import JobTimeoutException
+ from rq.worker import Worker
+ from rq.queue import Queue
+except ImportError:
+ raise DidNotEnable("RQ not installed")
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Any
+ from typing import Dict
+ from typing import Callable
+
+ from rq.job import Job
+
+ from sentry_sdk.utils import ExcInfo
+ from sentry_sdk._types import EventProcessor
+
+
+class RqIntegration(Integration):
+ identifier = "rq"
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+
+ try:
+ version = tuple(map(int, RQ_VERSION.split(".")[:3]))
+ except (ValueError, TypeError):
+ raise DidNotEnable("Unparseable RQ version: {}".format(RQ_VERSION))
+
+ if version < (0, 6):
+ raise DidNotEnable("RQ 0.6 or newer is required.")
+
+ old_perform_job = Worker.perform_job
+
+ def sentry_patched_perform_job(self, job, *args, **kwargs):
+ # type: (Any, Job, *Queue, **Any) -> bool
+ hub = Hub.current
+ integration = hub.get_integration(RqIntegration)
+
+ if integration is None:
+ return old_perform_job(self, job, *args, **kwargs)
+
+ client = hub.client
+ assert client is not None
+
+ with hub.push_scope() as scope:
+ scope.clear_breadcrumbs()
+ scope.add_event_processor(_make_event_processor(weakref.ref(job)))
+
+ span = Span.continue_from_headers(
+ job.meta.get("_sentry_trace_headers") or {}
+ )
+ span.op = "rq.task"
+
+ with capture_internal_exceptions():
+ span.transaction = job.func_name
+
+ with hub.start_span(span):
+ rv = old_perform_job(self, job, *args, **kwargs)
+
+ if self.is_horse:
+ # We're inside of a forked process and RQ is
+ # about to call `os._exit`. Make sure that our
+ # events get sent out.
+ client.flush()
+
+ return rv
+
+ Worker.perform_job = sentry_patched_perform_job
+
+ old_handle_exception = Worker.handle_exception
+
+ def sentry_patched_handle_exception(self, job, *exc_info, **kwargs):
+ # type: (Worker, Any, *Any, **Any) -> Any
+ _capture_exception(exc_info) # type: ignore
+ return old_handle_exception(self, job, *exc_info, **kwargs)
+
+ Worker.handle_exception = sentry_patched_handle_exception
+
+ old_enqueue_job = Queue.enqueue_job
+
+ def sentry_patched_enqueue_job(self, job, **kwargs):
+ # type: (Queue, Any, **Any) -> Any
+ hub = Hub.current
+ if hub.get_integration(RqIntegration) is not None:
+ job.meta["_sentry_trace_headers"] = dict(
+ hub.iter_trace_propagation_headers()
+ )
+
+ return old_enqueue_job(self, job, **kwargs)
+
+ Queue.enqueue_job = sentry_patched_enqueue_job
+
+
+def _make_event_processor(weak_job):
+ # type: (Callable[[], Job]) -> EventProcessor
+ def event_processor(event, hint):
+ # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
+ job = weak_job()
+ if job is not None:
+ with capture_internal_exceptions():
+ extra = event.setdefault("extra", {})
+ extra["rq-job"] = {
+ "job_id": job.id,
+ "func": job.func_name,
+ "args": job.args,
+ "kwargs": job.kwargs,
+ "description": job.description,
+ }
+
+ if "exc_info" in hint:
+ with capture_internal_exceptions():
+ if issubclass(hint["exc_info"][0], JobTimeoutException):
+ event["fingerprint"] = ["rq", "JobTimeoutException", job.func_name]
+
+ return event
+
+ return event_processor
+
+
+def _capture_exception(exc_info, **kwargs):
+ # type: (ExcInfo, **Any) -> None
+ hub = Hub.current
+ if hub.get_integration(RqIntegration) is None:
+ return
+
+ # If an integration is there, a client has to be there.
+ client = hub.client # type: Any
+
+ event, hint = event_from_exception(
+ exc_info,
+ client_options=client.options,
+ mechanism={"type": "rq", "handled": False},
+ )
+
+ hub.capture_event(event, hint=hint)
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/sanic.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/sanic.py
new file mode 100644
index 0000000000..e8fdca422a
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/sanic.py
@@ -0,0 +1,233 @@
+import sys
+import weakref
+from inspect import isawaitable
+
+from sentry_sdk._compat import urlparse, reraise
+from sentry_sdk.hub import Hub
+from sentry_sdk.utils import (
+ capture_internal_exceptions,
+ event_from_exception,
+ HAS_REAL_CONTEXTVARS,
+)
+from sentry_sdk.integrations import Integration, DidNotEnable
+from sentry_sdk.integrations._wsgi_common import RequestExtractor, _filter_headers
+from sentry_sdk.integrations.logging import ignore_logger
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Any
+ from typing import Callable
+ from typing import Optional
+ from typing import Union
+ from typing import Tuple
+ from typing import Dict
+
+ from sanic.request import Request, RequestParameters
+
+ from sentry_sdk._types import Event, EventProcessor, Hint
+
+try:
+ from sanic import Sanic, __version__ as SANIC_VERSION
+ from sanic.exceptions import SanicException
+ from sanic.router import Router
+ from sanic.handlers import ErrorHandler
+except ImportError:
+ raise DidNotEnable("Sanic not installed")
+
+
+class SanicIntegration(Integration):
+ identifier = "sanic"
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ try:
+ version = tuple(map(int, SANIC_VERSION.split(".")))
+ except (TypeError, ValueError):
+ raise DidNotEnable("Unparseable Sanic version: {}".format(SANIC_VERSION))
+
+ if version < (0, 8):
+ raise DidNotEnable("Sanic 0.8 or newer required.")
+
+ if not HAS_REAL_CONTEXTVARS:
+ # We better have contextvars or we're going to leak state between
+ # requests.
+ raise DidNotEnable(
+ "The sanic integration for Sentry requires Python 3.7+ "
+ " or aiocontextvars package"
+ )
+
+ if SANIC_VERSION.startswith("0.8."):
+ # Sanic 0.8 and older creates a logger named "root" and puts a
+ # stringified version of every exception in there (without exc_info),
+ # which our error deduplication can't detect.
+ #
+ # We explicitly check the version here because it is a very
+ # invasive step to ignore this logger and not necessary in newer
+ # versions at all.
+ #
+ # https://github.com/huge-success/sanic/issues/1332
+ ignore_logger("root")
+
+ old_handle_request = Sanic.handle_request
+
+ async def sentry_handle_request(self, request, *args, **kwargs):
+ # type: (Any, Request, *Any, **Any) -> Any
+ hub = Hub.current
+ if hub.get_integration(SanicIntegration) is None:
+ return old_handle_request(self, request, *args, **kwargs)
+
+ weak_request = weakref.ref(request)
+
+ with Hub(hub) as hub:
+ with hub.configure_scope() as scope:
+ scope.clear_breadcrumbs()
+ scope.add_event_processor(_make_request_processor(weak_request))
+
+ response = old_handle_request(self, request, *args, **kwargs)
+ if isawaitable(response):
+ response = await response
+
+ return response
+
+ Sanic.handle_request = sentry_handle_request
+
+ old_router_get = Router.get
+
+ def sentry_router_get(self, request):
+ # type: (Any, Request) -> Any
+ rv = old_router_get(self, request)
+ hub = Hub.current
+ if hub.get_integration(SanicIntegration) is not None:
+ with capture_internal_exceptions():
+ with hub.configure_scope() as scope:
+ scope.transaction = rv[0].__name__
+ return rv
+
+ Router.get = sentry_router_get
+
+ old_error_handler_lookup = ErrorHandler.lookup
+
+ def sentry_error_handler_lookup(self, exception):
+ # type: (Any, Exception) -> Optional[object]
+ _capture_exception(exception)
+ old_error_handler = old_error_handler_lookup(self, exception)
+
+ if old_error_handler is None:
+ return None
+
+ if Hub.current.get_integration(SanicIntegration) is None:
+ return old_error_handler
+
+ async def sentry_wrapped_error_handler(request, exception):
+ # type: (Request, Exception) -> Any
+ try:
+ response = old_error_handler(request, exception)
+ if isawaitable(response):
+ response = await response
+ return response
+ except Exception:
+ # Report errors that occur in Sanic error handler. These
+ # exceptions will not even show up in Sanic's
+ # `sanic.exceptions` logger.
+ exc_info = sys.exc_info()
+ _capture_exception(exc_info)
+ reraise(*exc_info)
+
+ return sentry_wrapped_error_handler
+
+ ErrorHandler.lookup = sentry_error_handler_lookup
+
+
+def _capture_exception(exception):
+ # type: (Union[Tuple[Optional[type], Optional[BaseException], Any], BaseException]) -> None
+ hub = Hub.current
+ integration = hub.get_integration(SanicIntegration)
+ if integration is None:
+ return
+
+ # If an integration is there, a client has to be there.
+ client = hub.client # type: Any
+
+ with capture_internal_exceptions():
+ event, hint = event_from_exception(
+ exception,
+ client_options=client.options,
+ mechanism={"type": "sanic", "handled": False},
+ )
+ hub.capture_event(event, hint=hint)
+
+
+def _make_request_processor(weak_request):
+ # type: (Callable[[], Request]) -> EventProcessor
+ def sanic_processor(event, hint):
+ # type: (Event, Optional[Hint]) -> Optional[Event]
+
+ try:
+ if hint and issubclass(hint["exc_info"][0], SanicException):
+ return None
+ except KeyError:
+ pass
+
+ request = weak_request()
+ if request is None:
+ return event
+
+ with capture_internal_exceptions():
+ extractor = SanicRequestExtractor(request)
+ extractor.extract_into_event(event)
+
+ request_info = event["request"]
+ urlparts = urlparse.urlsplit(request.url)
+
+ request_info["url"] = "%s://%s%s" % (
+ urlparts.scheme,
+ urlparts.netloc,
+ urlparts.path,
+ )
+
+ request_info["query_string"] = urlparts.query
+ request_info["method"] = request.method
+ request_info["env"] = {"REMOTE_ADDR": request.remote_addr}
+ request_info["headers"] = _filter_headers(dict(request.headers))
+
+ return event
+
+ return sanic_processor
+
+
+class SanicRequestExtractor(RequestExtractor):
+ def content_length(self):
+ # type: () -> int
+ if self.request.body is None:
+ return 0
+ return len(self.request.body)
+
+ def cookies(self):
+ # type: () -> Dict[str, str]
+ return dict(self.request.cookies)
+
+ def raw_data(self):
+ # type: () -> bytes
+ return self.request.body
+
+ def form(self):
+ # type: () -> RequestParameters
+ return self.request.form
+
+ def is_json(self):
+ # type: () -> bool
+ raise NotImplementedError()
+
+ def json(self):
+ # type: () -> Optional[Any]
+ return self.request.json
+
+ def files(self):
+ # type: () -> RequestParameters
+ return self.request.files
+
+ def size_of_file(self, file):
+ # type: (Any) -> int
+ return len(file.body or ())
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/serverless.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/serverless.py
new file mode 100644
index 0000000000..6dd90b43d0
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/serverless.py
@@ -0,0 +1,87 @@
+import functools
+import sys
+
+from sentry_sdk.hub import Hub
+from sentry_sdk.utils import event_from_exception
+from sentry_sdk._compat import reraise
+
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Any
+ from typing import Callable
+ from typing import TypeVar
+ from typing import Union
+ from typing import Optional
+
+ from typing import overload
+
+ F = TypeVar("F", bound=Callable[..., Any])
+
+else:
+
+ def overload(x):
+ # type: (F) -> F
+ return x
+
+
+@overload
+def serverless_function(f, flush=True):
+ # type: (F, bool) -> F
+ pass
+
+
+@overload # noqa
+def serverless_function(f=None, flush=True):
+ # type: (None, bool) -> Callable[[F], F]
+ pass
+
+
+def serverless_function(f=None, flush=True): # noqa
+ # type: (Optional[F], bool) -> Union[F, Callable[[F], F]]
+ def wrapper(f):
+ # type: (F) -> F
+ @functools.wraps(f)
+ def inner(*args, **kwargs):
+ # type: (*Any, **Any) -> Any
+ with Hub(Hub.current) as hub:
+ with hub.configure_scope() as scope:
+ scope.clear_breadcrumbs()
+
+ try:
+ return f(*args, **kwargs)
+ except Exception:
+ _capture_and_reraise()
+ finally:
+ if flush:
+ _flush_client()
+
+ return inner # type: ignore
+
+ if f is None:
+ return wrapper
+ else:
+ return wrapper(f)
+
+
+def _capture_and_reraise():
+ # type: () -> None
+ exc_info = sys.exc_info()
+ hub = Hub.current
+ if hub is not None and hub.client is not None:
+ event, hint = event_from_exception(
+ exc_info,
+ client_options=hub.client.options,
+ mechanism={"type": "serverless", "handled": False},
+ )
+ hub.capture_event(event, hint=hint)
+
+ reraise(*exc_info)
+
+
+def _flush_client():
+ # type: () -> None
+ hub = Hub.current
+ if hub is not None:
+ hub.flush()
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/spark/__init__.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/spark/__init__.py
new file mode 100644
index 0000000000..10d94163c5
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/spark/__init__.py
@@ -0,0 +1,4 @@
+from sentry_sdk.integrations.spark.spark_driver import SparkIntegration
+from sentry_sdk.integrations.spark.spark_worker import SparkWorkerIntegration
+
+__all__ = ["SparkIntegration", "SparkWorkerIntegration"]
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/spark/spark_driver.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/spark/spark_driver.py
new file mode 100644
index 0000000000..ea43c37821
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/spark/spark_driver.py
@@ -0,0 +1,263 @@
+from sentry_sdk import configure_scope
+from sentry_sdk.hub import Hub
+from sentry_sdk.integrations import Integration
+from sentry_sdk.utils import capture_internal_exceptions
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Any
+ from typing import Optional
+
+ from sentry_sdk._types import Event, Hint
+
+
+class SparkIntegration(Integration):
+ identifier = "spark"
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ patch_spark_context_init()
+
+
+def _set_app_properties():
+ # type: () -> None
+ """
+ Set properties in driver that propagate to worker processes, allowing for workers to have access to those properties.
+ This allows worker integration to have access to app_name and application_id.
+ """
+ from pyspark import SparkContext
+
+ spark_context = SparkContext._active_spark_context
+ if spark_context:
+ spark_context.setLocalProperty("sentry_app_name", spark_context.appName)
+ spark_context.setLocalProperty(
+ "sentry_application_id", spark_context.applicationId
+ )
+
+
+def _start_sentry_listener(sc):
+ # type: (Any) -> None
+ """
+ Start java gateway server to add custom `SparkListener`
+ """
+ from pyspark.java_gateway import ensure_callback_server_started
+
+ gw = sc._gateway
+ ensure_callback_server_started(gw)
+ listener = SentryListener()
+ sc._jsc.sc().addSparkListener(listener)
+
+
+def patch_spark_context_init():
+ # type: () -> None
+ from pyspark import SparkContext
+
+ spark_context_init = SparkContext._do_init
+
+ def _sentry_patched_spark_context_init(self, *args, **kwargs):
+ # type: (SparkContext, *Any, **Any) -> Optional[Any]
+ init = spark_context_init(self, *args, **kwargs)
+
+ if Hub.current.get_integration(SparkIntegration) is None:
+ return init
+
+ _start_sentry_listener(self)
+ _set_app_properties()
+
+ with configure_scope() as scope:
+
+ @scope.add_event_processor
+ def process_event(event, hint):
+ # type: (Event, Hint) -> Optional[Event]
+ with capture_internal_exceptions():
+ if Hub.current.get_integration(SparkIntegration) is None:
+ return event
+
+ event.setdefault("user", {}).setdefault("id", self.sparkUser())
+
+ event.setdefault("tags", {}).setdefault(
+ "executor.id", self._conf.get("spark.executor.id")
+ )
+ event["tags"].setdefault(
+ "spark-submit.deployMode",
+ self._conf.get("spark.submit.deployMode"),
+ )
+ event["tags"].setdefault(
+ "driver.host", self._conf.get("spark.driver.host")
+ )
+ event["tags"].setdefault(
+ "driver.port", self._conf.get("spark.driver.port")
+ )
+ event["tags"].setdefault("spark_version", self.version)
+ event["tags"].setdefault("app_name", self.appName)
+ event["tags"].setdefault("application_id", self.applicationId)
+ event["tags"].setdefault("master", self.master)
+ event["tags"].setdefault("spark_home", self.sparkHome)
+
+ event.setdefault("extra", {}).setdefault("web_url", self.uiWebUrl)
+
+ return event
+
+ return init
+
+ SparkContext._do_init = _sentry_patched_spark_context_init
+
+
+class SparkListener(object):
+ def onApplicationEnd(self, applicationEnd): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onApplicationStart(self, applicationStart): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onBlockManagerAdded(self, blockManagerAdded): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onBlockManagerRemoved(self, blockManagerRemoved): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onBlockUpdated(self, blockUpdated): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onEnvironmentUpdate(self, environmentUpdate): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onExecutorAdded(self, executorAdded): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onExecutorBlacklisted(self, executorBlacklisted): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onExecutorBlacklistedForStage( # noqa: N802
+ self, executorBlacklistedForStage # noqa: N803
+ ):
+ # type: (Any) -> None
+ pass
+
+ def onExecutorMetricsUpdate(self, executorMetricsUpdate): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onExecutorRemoved(self, executorRemoved): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onJobEnd(self, jobEnd): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onJobStart(self, jobStart): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onNodeBlacklisted(self, nodeBlacklisted): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onNodeBlacklistedForStage(self, nodeBlacklistedForStage): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onNodeUnblacklisted(self, nodeUnblacklisted): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onOtherEvent(self, event): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onSpeculativeTaskSubmitted(self, speculativeTask): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onStageCompleted(self, stageCompleted): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onStageSubmitted(self, stageSubmitted): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onTaskEnd(self, taskEnd): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onTaskGettingResult(self, taskGettingResult): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onTaskStart(self, taskStart): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ def onUnpersistRDD(self, unpersistRDD): # noqa: N802,N803
+ # type: (Any) -> None
+ pass
+
+ class Java:
+ implements = ["org.apache.spark.scheduler.SparkListenerInterface"]
+
+
+class SentryListener(SparkListener):
+ def __init__(self):
+ # type: () -> None
+ self.hub = Hub.current
+
+ def onJobStart(self, jobStart): # noqa: N802,N803
+ # type: (Any) -> None
+ message = "Job {} Started".format(jobStart.jobId())
+ self.hub.add_breadcrumb(level="info", message=message)
+ _set_app_properties()
+
+ def onJobEnd(self, jobEnd): # noqa: N802,N803
+ # type: (Any) -> None
+ level = ""
+ message = ""
+ data = {"result": jobEnd.jobResult().toString()}
+
+ if jobEnd.jobResult().toString() == "JobSucceeded":
+ level = "info"
+ message = "Job {} Ended".format(jobEnd.jobId())
+ else:
+ level = "warning"
+ message = "Job {} Failed".format(jobEnd.jobId())
+
+ self.hub.add_breadcrumb(level=level, message=message, data=data)
+
+ def onStageSubmitted(self, stageSubmitted): # noqa: N802,N803
+ # type: (Any) -> None
+ stage_info = stageSubmitted.stageInfo()
+ message = "Stage {} Submitted".format(stage_info.stageId())
+ data = {"attemptId": stage_info.attemptId(), "name": stage_info.name()}
+ self.hub.add_breadcrumb(level="info", message=message, data=data)
+ _set_app_properties()
+
+ def onStageCompleted(self, stageCompleted): # noqa: N802,N803
+ # type: (Any) -> None
+ from py4j.protocol import Py4JJavaError # type: ignore
+
+ stage_info = stageCompleted.stageInfo()
+ message = ""
+ level = ""
+ data = {"attemptId": stage_info.attemptId(), "name": stage_info.name()}
+
+ # Have to Try Except because stageInfo.failureReason() is typed with Scala Option
+ try:
+ data["reason"] = stage_info.failureReason().get()
+ message = "Stage {} Failed".format(stage_info.stageId())
+ level = "warning"
+ except Py4JJavaError:
+ message = "Stage {} Completed".format(stage_info.stageId())
+ level = "info"
+
+ self.hub.add_breadcrumb(level=level, message=message, data=data)
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/spark/spark_worker.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/spark/spark_worker.py
new file mode 100644
index 0000000000..bae4413d11
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/spark/spark_worker.py
@@ -0,0 +1,120 @@
+from __future__ import absolute_import
+
+import sys
+
+from sentry_sdk import configure_scope
+from sentry_sdk.hub import Hub
+from sentry_sdk.integrations import Integration
+from sentry_sdk.utils import (
+ capture_internal_exceptions,
+ exc_info_from_error,
+ single_exception_from_error_tuple,
+ walk_exception_chain,
+ event_hint_with_exc_info,
+)
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Any
+ from typing import Optional
+
+ from sentry_sdk._types import ExcInfo, Event, Hint
+
+
+class SparkWorkerIntegration(Integration):
+ identifier = "spark_worker"
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ import pyspark.daemon as original_daemon
+
+ original_daemon.worker_main = _sentry_worker_main
+
+
+def _capture_exception(exc_info, hub):
+ # type: (ExcInfo, Hub) -> None
+ client = hub.client
+
+ client_options = client.options # type: ignore
+
+ mechanism = {"type": "spark", "handled": False}
+
+ exc_info = exc_info_from_error(exc_info)
+
+ exc_type, exc_value, tb = exc_info
+ rv = []
+
+ # On Exception worker will call sys.exit(-1), so we can ignore SystemExit and similar errors
+ for exc_type, exc_value, tb in walk_exception_chain(exc_info):
+ if exc_type not in (SystemExit, EOFError, ConnectionResetError):
+ rv.append(
+ single_exception_from_error_tuple(
+ exc_type, exc_value, tb, client_options, mechanism
+ )
+ )
+
+ if rv:
+ rv.reverse()
+ hint = event_hint_with_exc_info(exc_info)
+ event = {"level": "error", "exception": {"values": rv}}
+
+ _tag_task_context()
+
+ hub.capture_event(event, hint=hint)
+
+
+def _tag_task_context():
+ # type: () -> None
+ from pyspark.taskcontext import TaskContext
+
+ with configure_scope() as scope:
+
+ @scope.add_event_processor
+ def process_event(event, hint):
+ # type: (Event, Hint) -> Optional[Event]
+ with capture_internal_exceptions():
+ integration = Hub.current.get_integration(SparkWorkerIntegration)
+ task_context = TaskContext.get()
+
+ if integration is None or task_context is None:
+ return event
+
+ event.setdefault("tags", {}).setdefault(
+ "stageId", task_context.stageId()
+ )
+ event["tags"].setdefault("partitionId", task_context.partitionId())
+ event["tags"].setdefault("attemptNumber", task_context.attemptNumber())
+ event["tags"].setdefault("taskAttemptId", task_context.taskAttemptId())
+
+ if task_context._localProperties:
+ if "sentry_app_name" in task_context._localProperties:
+ event["tags"].setdefault(
+ "app_name", task_context._localProperties["sentry_app_name"]
+ )
+ event["tags"].setdefault(
+ "application_id",
+ task_context._localProperties["sentry_application_id"],
+ )
+
+ if "callSite.short" in task_context._localProperties:
+ event.setdefault("extra", {}).setdefault(
+ "callSite", task_context._localProperties["callSite.short"]
+ )
+
+ return event
+
+
+def _sentry_worker_main(*args, **kwargs):
+ # type: (*Optional[Any], **Optional[Any]) -> None
+ import pyspark.worker as original_worker
+
+ try:
+ original_worker.main(*args, **kwargs)
+ except SystemExit:
+ if Hub.current.get_integration(SparkWorkerIntegration) is not None:
+ hub = Hub.current
+ exc_info = sys.exc_info()
+ with capture_internal_exceptions():
+ _capture_exception(exc_info, hub)
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/sqlalchemy.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/sqlalchemy.py
new file mode 100644
index 0000000000..f24d2f20bf
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/sqlalchemy.py
@@ -0,0 +1,86 @@
+from __future__ import absolute_import
+
+from sentry_sdk._types import MYPY
+from sentry_sdk.hub import Hub
+from sentry_sdk.integrations import Integration, DidNotEnable
+from sentry_sdk.tracing import record_sql_queries
+
+try:
+ from sqlalchemy.engine import Engine # type: ignore
+ from sqlalchemy.event import listen # type: ignore
+ from sqlalchemy import __version__ as SQLALCHEMY_VERSION # type: ignore
+except ImportError:
+ raise DidNotEnable("SQLAlchemy not installed.")
+
+if MYPY:
+ from typing import Any
+ from typing import ContextManager
+ from typing import Optional
+
+ from sentry_sdk.tracing import Span
+
+
+class SqlalchemyIntegration(Integration):
+ identifier = "sqlalchemy"
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+
+ try:
+ version = tuple(map(int, SQLALCHEMY_VERSION.split("b")[0].split(".")))
+ except (TypeError, ValueError):
+ raise DidNotEnable(
+ "Unparseable SQLAlchemy version: {}".format(SQLALCHEMY_VERSION)
+ )
+
+ if version < (1, 2):
+ raise DidNotEnable("SQLAlchemy 1.2 or newer required.")
+
+ listen(Engine, "before_cursor_execute", _before_cursor_execute)
+ listen(Engine, "after_cursor_execute", _after_cursor_execute)
+ listen(Engine, "handle_error", _handle_error)
+
+
+def _before_cursor_execute(
+ conn, cursor, statement, parameters, context, executemany, *args
+):
+ # type: (Any, Any, Any, Any, Any, bool, *Any) -> None
+ hub = Hub.current
+ if hub.get_integration(SqlalchemyIntegration) is None:
+ return
+
+ ctx_mgr = record_sql_queries(
+ hub,
+ cursor,
+ statement,
+ parameters,
+ paramstyle=context and context.dialect and context.dialect.paramstyle or None,
+ executemany=executemany,
+ )
+ conn._sentry_sql_span_manager = ctx_mgr
+
+ span = ctx_mgr.__enter__()
+
+ if span is not None:
+ conn._sentry_sql_span = span
+
+
+def _after_cursor_execute(conn, cursor, statement, *args):
+ # type: (Any, Any, Any, *Any) -> None
+ ctx_mgr = getattr(
+ conn, "_sentry_sql_span_manager", None
+ ) # type: ContextManager[Any]
+
+ if ctx_mgr is not None:
+ conn._sentry_sql_span_manager = None
+ ctx_mgr.__exit__(None, None, None)
+
+
+def _handle_error(context, *args):
+ # type: (Any, *Any) -> None
+ conn = context.connection
+ span = getattr(conn, "_sentry_sql_span", None) # type: Optional[Span]
+
+ if span is not None:
+ span.set_status("internal_error")
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/stdlib.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/stdlib.py
new file mode 100644
index 0000000000..56cece70ac
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/stdlib.py
@@ -0,0 +1,230 @@
+import os
+import subprocess
+import sys
+import platform
+
+from sentry_sdk.hub import Hub
+from sentry_sdk.integrations import Integration
+from sentry_sdk.scope import add_global_event_processor
+from sentry_sdk.tracing import EnvironHeaders
+from sentry_sdk.utils import capture_internal_exceptions, safe_repr
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Any
+ from typing import Callable
+ from typing import Dict
+ from typing import Optional
+ from typing import List
+
+ from sentry_sdk._types import Event, Hint
+
+
+try:
+ from httplib import HTTPConnection # type: ignore
+except ImportError:
+ from http.client import HTTPConnection
+
+
+_RUNTIME_CONTEXT = {
+ "name": platform.python_implementation(),
+ "version": "%s.%s.%s" % (sys.version_info[:3]),
+ "build": sys.version,
+}
+
+
+class StdlibIntegration(Integration):
+ identifier = "stdlib"
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ _install_httplib()
+ _install_subprocess()
+
+ @add_global_event_processor
+ def add_python_runtime_context(event, hint):
+ # type: (Event, Hint) -> Optional[Event]
+ if Hub.current.get_integration(StdlibIntegration) is not None:
+ contexts = event.setdefault("contexts", {})
+ if isinstance(contexts, dict) and "runtime" not in contexts:
+ contexts["runtime"] = _RUNTIME_CONTEXT
+
+ return event
+
+
+def _install_httplib():
+ # type: () -> None
+ real_putrequest = HTTPConnection.putrequest
+ real_getresponse = HTTPConnection.getresponse
+
+ def putrequest(self, method, url, *args, **kwargs):
+ # type: (HTTPConnection, str, str, *Any, **Any) -> Any
+ hub = Hub.current
+ if hub.get_integration(StdlibIntegration) is None:
+ return real_putrequest(self, method, url, *args, **kwargs)
+
+ host = self.host
+ port = self.port
+ default_port = self.default_port
+
+ real_url = url
+ if not real_url.startswith(("http://", "https://")):
+ real_url = "%s://%s%s%s" % (
+ default_port == 443 and "https" or "http",
+ host,
+ port != default_port and ":%s" % port or "",
+ url,
+ )
+
+ span = hub.start_span(op="http", description="%s %s" % (method, real_url))
+
+ span.set_data("method", method)
+ span.set_data("url", real_url)
+
+ rv = real_putrequest(self, method, url, *args, **kwargs)
+
+ for key, value in hub.iter_trace_propagation_headers():
+ self.putheader(key, value)
+
+ self._sentrysdk_span = span
+
+ return rv
+
+ def getresponse(self, *args, **kwargs):
+ # type: (HTTPConnection, *Any, **Any) -> Any
+ span = getattr(self, "_sentrysdk_span", None)
+
+ if span is None:
+ return real_getresponse(self, *args, **kwargs)
+
+ rv = real_getresponse(self, *args, **kwargs)
+
+ span.set_data("status_code", rv.status)
+ span.set_http_status(int(rv.status))
+ span.set_data("reason", rv.reason)
+ span.finish()
+
+ return rv
+
+ HTTPConnection.putrequest = putrequest
+ HTTPConnection.getresponse = getresponse
+
+
+def _init_argument(args, kwargs, name, position, setdefault_callback=None):
+ # type: (List[Any], Dict[Any, Any], str, int, Optional[Callable[[Any], Any]]) -> Any
+ """
+ given (*args, **kwargs) of a function call, retrieve (and optionally set a
+ default for) an argument by either name or position.
+
+ This is useful for wrapping functions with complex type signatures and
+ extracting a few arguments without needing to redefine that function's
+ entire type signature.
+ """
+
+ if name in kwargs:
+ rv = kwargs[name]
+ if setdefault_callback is not None:
+ rv = setdefault_callback(rv)
+ if rv is not None:
+ kwargs[name] = rv
+ elif position < len(args):
+ rv = args[position]
+ if setdefault_callback is not None:
+ rv = setdefault_callback(rv)
+ if rv is not None:
+ args[position] = rv
+ else:
+ rv = setdefault_callback and setdefault_callback(None)
+ if rv is not None:
+ kwargs[name] = rv
+
+ return rv
+
+
+def _install_subprocess():
+ # type: () -> None
+ old_popen_init = subprocess.Popen.__init__
+
+ def sentry_patched_popen_init(self, *a, **kw):
+ # type: (subprocess.Popen[Any], *Any, **Any) -> None
+
+ hub = Hub.current
+ if hub.get_integration(StdlibIntegration) is None:
+ return old_popen_init(self, *a, **kw) # type: ignore
+
+ # Convert from tuple to list to be able to set values.
+ a = list(a)
+
+ args = _init_argument(a, kw, "args", 0) or []
+ cwd = _init_argument(a, kw, "cwd", 9)
+
+ # if args is not a list or tuple (and e.g. some iterator instead),
+ # let's not use it at all. There are too many things that can go wrong
+ # when trying to collect an iterator into a list and setting that list
+ # into `a` again.
+ #
+ # Also invocations where `args` is not a sequence are not actually
+ # legal. They just happen to work under CPython.
+ description = None
+
+ if isinstance(args, (list, tuple)) and len(args) < 100:
+ with capture_internal_exceptions():
+ description = " ".join(map(str, args))
+
+ if description is None:
+ description = safe_repr(args)
+
+ env = None
+
+ for k, v in hub.iter_trace_propagation_headers():
+ if env is None:
+ env = _init_argument(a, kw, "env", 10, lambda x: dict(x or os.environ))
+ env["SUBPROCESS_" + k.upper().replace("-", "_")] = v
+
+ with hub.start_span(op="subprocess", description=description) as span:
+ if cwd:
+ span.set_data("subprocess.cwd", cwd)
+
+ rv = old_popen_init(self, *a, **kw) # type: ignore
+
+ span.set_tag("subprocess.pid", self.pid)
+ return rv
+
+ subprocess.Popen.__init__ = sentry_patched_popen_init # type: ignore
+
+ old_popen_wait = subprocess.Popen.wait
+
+ def sentry_patched_popen_wait(self, *a, **kw):
+ # type: (subprocess.Popen[Any], *Any, **Any) -> Any
+ hub = Hub.current
+
+ if hub.get_integration(StdlibIntegration) is None:
+ return old_popen_wait(self, *a, **kw)
+
+ with hub.start_span(op="subprocess.wait") as span:
+ span.set_tag("subprocess.pid", self.pid)
+ return old_popen_wait(self, *a, **kw)
+
+ subprocess.Popen.wait = sentry_patched_popen_wait # type: ignore
+
+ old_popen_communicate = subprocess.Popen.communicate
+
+ def sentry_patched_popen_communicate(self, *a, **kw):
+ # type: (subprocess.Popen[Any], *Any, **Any) -> Any
+ hub = Hub.current
+
+ if hub.get_integration(StdlibIntegration) is None:
+ return old_popen_communicate(self, *a, **kw)
+
+ with hub.start_span(op="subprocess.communicate") as span:
+ span.set_tag("subprocess.pid", self.pid)
+ return old_popen_communicate(self, *a, **kw)
+
+ subprocess.Popen.communicate = sentry_patched_popen_communicate # type: ignore
+
+
+def get_subprocess_traceparent_headers():
+ # type: () -> EnvironHeaders
+ return EnvironHeaders(os.environ, prefix="SUBPROCESS_")
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/threading.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/threading.py
new file mode 100644
index 0000000000..b750257e2a
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/threading.py
@@ -0,0 +1,90 @@
+from __future__ import absolute_import
+
+import sys
+from threading import Thread, current_thread
+
+from sentry_sdk import Hub
+from sentry_sdk._compat import reraise
+from sentry_sdk._types import MYPY
+from sentry_sdk.integrations import Integration
+from sentry_sdk.utils import event_from_exception, capture_internal_exceptions
+
+if MYPY:
+ from typing import Any
+ from typing import TypeVar
+ from typing import Callable
+ from typing import Optional
+
+ from sentry_sdk._types import ExcInfo
+
+ F = TypeVar("F", bound=Callable[..., Any])
+
+
+class ThreadingIntegration(Integration):
+ identifier = "threading"
+
+ def __init__(self, propagate_hub=False):
+ # type: (bool) -> None
+ self.propagate_hub = propagate_hub
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ old_start = Thread.start
+
+ def sentry_start(self, *a, **kw):
+ # type: (Thread, *Any, **Any) -> Any
+ hub = Hub.current
+ integration = hub.get_integration(ThreadingIntegration)
+ if integration is not None:
+ if not integration.propagate_hub:
+ hub_ = None
+ else:
+ hub_ = Hub(hub)
+ # Patching instance methods in `start()` creates a reference cycle if
+ # done in a naive way. See
+ # https://github.com/getsentry/sentry-python/pull/434
+ #
+ # In threading module, using current_thread API will access current thread instance
+ # without holding it to avoid a reference cycle in an easier way.
+ with capture_internal_exceptions():
+ new_run = _wrap_run(hub_, getattr(self.run, "__func__", self.run))
+ self.run = new_run # type: ignore
+
+ return old_start(self, *a, **kw) # type: ignore
+
+ Thread.start = sentry_start # type: ignore
+
+
+def _wrap_run(parent_hub, old_run_func):
+ # type: (Optional[Hub], F) -> F
+ def run(*a, **kw):
+ # type: (*Any, **Any) -> Any
+ hub = parent_hub or Hub.current
+ with hub:
+ try:
+ self = current_thread()
+ return old_run_func(self, *a, **kw)
+ except Exception:
+ reraise(*_capture_exception())
+
+ return run # type: ignore
+
+
+def _capture_exception():
+ # type: () -> ExcInfo
+ hub = Hub.current
+ exc_info = sys.exc_info()
+
+ if hub.get_integration(ThreadingIntegration) is not None:
+ # If an integration is there, a client has to be there.
+ client = hub.client # type: Any
+
+ event, hint = event_from_exception(
+ exc_info,
+ client_options=client.options,
+ mechanism={"type": "threading", "handled": False},
+ )
+ hub.capture_event(event, hint=hint)
+
+ return exc_info
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/tornado.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/tornado.py
new file mode 100644
index 0000000000..d3ae065690
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/tornado.py
@@ -0,0 +1,203 @@
+import weakref
+from inspect import iscoroutinefunction
+
+from sentry_sdk.hub import Hub, _should_send_default_pii
+from sentry_sdk.utils import (
+ HAS_REAL_CONTEXTVARS,
+ event_from_exception,
+ capture_internal_exceptions,
+ transaction_from_function,
+)
+from sentry_sdk.integrations import Integration, DidNotEnable
+from sentry_sdk.integrations._wsgi_common import (
+ RequestExtractor,
+ _filter_headers,
+ _is_json_content_type,
+)
+from sentry_sdk.integrations.logging import ignore_logger
+from sentry_sdk._compat import iteritems
+
+try:
+ from tornado import version_info as TORNADO_VERSION # type: ignore
+ from tornado.web import RequestHandler, HTTPError
+ from tornado.gen import coroutine
+except ImportError:
+ raise DidNotEnable("Tornado not installed")
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Any
+ from typing import Optional
+ from typing import Dict
+ from typing import Callable
+
+ from sentry_sdk._types import EventProcessor
+
+
+class TornadoIntegration(Integration):
+ identifier = "tornado"
+
+ @staticmethod
+ def setup_once():
+ # type: () -> None
+ if TORNADO_VERSION < (5, 0):
+ raise DidNotEnable("Tornado 5+ required")
+
+ if not HAS_REAL_CONTEXTVARS:
+ # Tornado is async. We better have contextvars or we're going to leak
+ # state between requests.
+ raise DidNotEnable(
+ "The tornado integration for Sentry requires Python 3.6+ or the aiocontextvars package"
+ )
+
+ ignore_logger("tornado.access")
+
+ old_execute = RequestHandler._execute # type: ignore
+
+ awaitable = iscoroutinefunction(old_execute)
+
+ if awaitable:
+ # Starting Tornado 6 RequestHandler._execute method is a standard Python coroutine (async/await)
+ # In that case our method should be a coroutine function too
+ async def sentry_execute_request_handler(self, *args, **kwargs):
+ # type: (Any, *Any, **Any) -> Any
+ hub = Hub.current
+ integration = hub.get_integration(TornadoIntegration)
+ if integration is None:
+ return await old_execute(self, *args, **kwargs)
+
+ weak_handler = weakref.ref(self)
+
+ with Hub(hub) as hub:
+ with hub.configure_scope() as scope:
+ scope.clear_breadcrumbs()
+ processor = _make_event_processor(weak_handler) # type: ignore
+ scope.add_event_processor(processor)
+ return await old_execute(self, *args, **kwargs)
+
+ else:
+
+ @coroutine # type: ignore
+ def sentry_execute_request_handler(self, *args, **kwargs):
+ # type: (RequestHandler, *Any, **Any) -> Any
+ hub = Hub.current
+ integration = hub.get_integration(TornadoIntegration)
+ if integration is None:
+ return old_execute(self, *args, **kwargs)
+
+ weak_handler = weakref.ref(self)
+
+ with Hub(hub) as hub:
+ with hub.configure_scope() as scope:
+ scope.clear_breadcrumbs()
+ processor = _make_event_processor(weak_handler) # type: ignore
+ scope.add_event_processor(processor)
+ result = yield from old_execute(self, *args, **kwargs)
+ return result
+
+ RequestHandler._execute = sentry_execute_request_handler # type: ignore
+
+ old_log_exception = RequestHandler.log_exception
+
+ def sentry_log_exception(self, ty, value, tb, *args, **kwargs):
+ # type: (Any, type, BaseException, Any, *Any, **Any) -> Optional[Any]
+ _capture_exception(ty, value, tb)
+ return old_log_exception(self, ty, value, tb, *args, **kwargs) # type: ignore
+
+ RequestHandler.log_exception = sentry_log_exception # type: ignore
+
+
+def _capture_exception(ty, value, tb):
+ # type: (type, BaseException, Any) -> None
+ hub = Hub.current
+ if hub.get_integration(TornadoIntegration) is None:
+ return
+ if isinstance(value, HTTPError):
+ return
+
+ # If an integration is there, a client has to be there.
+ client = hub.client # type: Any
+
+ event, hint = event_from_exception(
+ (ty, value, tb),
+ client_options=client.options,
+ mechanism={"type": "tornado", "handled": False},
+ )
+
+ hub.capture_event(event, hint=hint)
+
+
+def _make_event_processor(weak_handler):
+ # type: (Callable[[], RequestHandler]) -> EventProcessor
+ def tornado_processor(event, hint):
+ # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
+ handler = weak_handler()
+ if handler is None:
+ return event
+
+ request = handler.request
+
+ with capture_internal_exceptions():
+ method = getattr(handler, handler.request.method.lower())
+ event["transaction"] = transaction_from_function(method)
+
+ with capture_internal_exceptions():
+ extractor = TornadoRequestExtractor(request)
+ extractor.extract_into_event(event)
+
+ request_info = event["request"]
+
+ request_info["url"] = "%s://%s%s" % (
+ request.protocol,
+ request.host,
+ request.path,
+ )
+
+ request_info["query_string"] = request.query
+ request_info["method"] = request.method
+ request_info["env"] = {"REMOTE_ADDR": request.remote_ip}
+ request_info["headers"] = _filter_headers(dict(request.headers))
+
+ with capture_internal_exceptions():
+ if handler.current_user and _should_send_default_pii():
+ event.setdefault("user", {}).setdefault("is_authenticated", True)
+
+ return event
+
+ return tornado_processor
+
+
+class TornadoRequestExtractor(RequestExtractor):
+ def content_length(self):
+ # type: () -> int
+ if self.request.body is None:
+ return 0
+ return len(self.request.body)
+
+ def cookies(self):
+ # type: () -> Dict[str, str]
+ return {k: v.value for k, v in iteritems(self.request.cookies)}
+
+ def raw_data(self):
+ # type: () -> bytes
+ return self.request.body
+
+ def form(self):
+ # type: () -> Dict[str, Any]
+ return {
+ k: [v.decode("latin1", "replace") for v in vs]
+ for k, vs in iteritems(self.request.body_arguments)
+ }
+
+ def is_json(self):
+ # type: () -> bool
+ return _is_json_content_type(self.request.headers.get("content-type"))
+
+ def files(self):
+ # type: () -> Dict[str, Any]
+ return {k: v[0] for k, v in iteritems(self.request.files) if v}
+
+ def size_of_file(self, file):
+ # type: (Any) -> int
+ return len(file.body or ())
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/trytond.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/trytond.py
new file mode 100644
index 0000000000..062a756993
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/trytond.py
@@ -0,0 +1,55 @@
+import sentry_sdk.hub
+import sentry_sdk.utils
+import sentry_sdk.integrations
+import sentry_sdk.integrations.wsgi
+from sentry_sdk._types import MYPY
+
+from trytond.exceptions import TrytonException # type: ignore
+from trytond.wsgi import app # type: ignore
+
+if MYPY:
+ from typing import Any
+
+
+# TODO: trytond-worker, trytond-cron and trytond-admin intergations
+
+
+class TrytondWSGIIntegration(sentry_sdk.integrations.Integration):
+ identifier = "trytond_wsgi"
+
+ def __init__(self): # type: () -> None
+ pass
+
+ @staticmethod
+ def setup_once(): # type: () -> None
+
+ app.wsgi_app = sentry_sdk.integrations.wsgi.SentryWsgiMiddleware(app.wsgi_app)
+
+ def error_handler(e): # type: (Exception) -> None
+ hub = sentry_sdk.hub.Hub.current
+
+ if hub.get_integration(TrytondWSGIIntegration) is None:
+ return
+ elif isinstance(e, TrytonException):
+ return
+ else:
+ # If an integration is there, a client has to be there.
+ client = hub.client # type: Any
+ event, hint = sentry_sdk.utils.event_from_exception(
+ e,
+ client_options=client.options,
+ mechanism={"type": "trytond", "handled": False},
+ )
+ hub.capture_event(event, hint=hint)
+
+ # Expected error handlers signature was changed
+ # when the error_handler decorator was introduced
+ # in Tryton-5.4
+ if hasattr(app, "error_handler"):
+
+ @app.error_handler
+ def _(app, request, e): # type: ignore
+ error_handler(e)
+
+ else:
+ app.error_handlers.append(error_handler)
diff --git a/third_party/python/sentry_sdk/sentry_sdk/integrations/wsgi.py b/third_party/python/sentry_sdk/sentry_sdk/integrations/wsgi.py
new file mode 100644
index 0000000000..22982d8bb1
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/integrations/wsgi.py
@@ -0,0 +1,309 @@
+import functools
+import sys
+
+from sentry_sdk.hub import Hub, _should_send_default_pii
+from sentry_sdk.utils import (
+ ContextVar,
+ capture_internal_exceptions,
+ event_from_exception,
+)
+from sentry_sdk._compat import PY2, reraise, iteritems
+from sentry_sdk.tracing import Span
+from sentry_sdk.sessions import auto_session_tracking
+from sentry_sdk.integrations._wsgi_common import _filter_headers
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Callable
+ from typing import Dict
+ from typing import Iterator
+ from typing import Any
+ from typing import Tuple
+ from typing import Optional
+ from typing import TypeVar
+ from typing import Protocol
+
+ from sentry_sdk.utils import ExcInfo
+ from sentry_sdk._types import EventProcessor
+
+ WsgiResponseIter = TypeVar("WsgiResponseIter")
+ WsgiResponseHeaders = TypeVar("WsgiResponseHeaders")
+ WsgiExcInfo = TypeVar("WsgiExcInfo")
+
+ class StartResponse(Protocol):
+ def __call__(self, status, response_headers, exc_info=None):
+ # type: (str, WsgiResponseHeaders, Optional[WsgiExcInfo]) -> WsgiResponseIter
+ pass
+
+
+_wsgi_middleware_applied = ContextVar("sentry_wsgi_middleware_applied")
+
+
+if PY2:
+
+ def wsgi_decoding_dance(s, charset="utf-8", errors="replace"):
+ # type: (str, str, str) -> str
+ return s.decode(charset, errors)
+
+
+else:
+
+ def wsgi_decoding_dance(s, charset="utf-8", errors="replace"):
+ # type: (str, str, str) -> str
+ return s.encode("latin1").decode(charset, errors)
+
+
+def get_host(environ):
+ # type: (Dict[str, str]) -> str
+ """Return the host for the given WSGI environment. Yanked from Werkzeug."""
+ if environ.get("HTTP_HOST"):
+ rv = environ["HTTP_HOST"]
+ if environ["wsgi.url_scheme"] == "http" and rv.endswith(":80"):
+ rv = rv[:-3]
+ elif environ["wsgi.url_scheme"] == "https" and rv.endswith(":443"):
+ rv = rv[:-4]
+ elif environ.get("SERVER_NAME"):
+ rv = environ["SERVER_NAME"]
+ if (environ["wsgi.url_scheme"], environ["SERVER_PORT"]) not in (
+ ("https", "443"),
+ ("http", "80"),
+ ):
+ rv += ":" + environ["SERVER_PORT"]
+ else:
+ # In spite of the WSGI spec, SERVER_NAME might not be present.
+ rv = "unknown"
+
+ return rv
+
+
+def get_request_url(environ):
+ # type: (Dict[str, str]) -> str
+ """Return the absolute URL without query string for the given WSGI
+ environment."""
+ return "%s://%s/%s" % (
+ environ.get("wsgi.url_scheme"),
+ get_host(environ),
+ wsgi_decoding_dance(environ.get("PATH_INFO") or "").lstrip("/"),
+ )
+
+
+class SentryWsgiMiddleware(object):
+ __slots__ = ("app",)
+
+ def __init__(self, app):
+ # type: (Callable[[Dict[str, str], Callable[..., Any]], Any]) -> None
+ self.app = app
+
+ def __call__(self, environ, start_response):
+ # type: (Dict[str, str], Callable[..., Any]) -> _ScopedResponse
+ if _wsgi_middleware_applied.get(False):
+ return self.app(environ, start_response)
+
+ _wsgi_middleware_applied.set(True)
+ try:
+ hub = Hub(Hub.current)
+ with auto_session_tracking(hub):
+ with hub:
+ with capture_internal_exceptions():
+ with hub.configure_scope() as scope:
+ scope.clear_breadcrumbs()
+ scope._name = "wsgi"
+ scope.add_event_processor(
+ _make_wsgi_event_processor(environ)
+ )
+
+ span = Span.continue_from_environ(environ)
+ span.op = "http.server"
+ span.transaction = "generic WSGI request"
+
+ with hub.start_span(span) as span:
+ try:
+ rv = self.app(
+ environ,
+ functools.partial(
+ _sentry_start_response, start_response, span
+ ),
+ )
+ except BaseException:
+ reraise(*_capture_exception(hub))
+ finally:
+ _wsgi_middleware_applied.set(False)
+
+ return _ScopedResponse(hub, rv)
+
+
+def _sentry_start_response(
+ old_start_response, # type: StartResponse
+ span, # type: Span
+ status, # type: str
+ response_headers, # type: WsgiResponseHeaders
+ exc_info=None, # type: Optional[WsgiExcInfo]
+):
+ # type: (...) -> WsgiResponseIter
+ with capture_internal_exceptions():
+ status_int = int(status.split(" ", 1)[0])
+ span.set_http_status(status_int)
+
+ if exc_info is None:
+ # The Django Rest Framework WSGI test client, and likely other
+ # (incorrect) implementations, cannot deal with the exc_info argument
+ # if one is present. Avoid providing a third argument if not necessary.
+ return old_start_response(status, response_headers)
+ else:
+ return old_start_response(status, response_headers, exc_info)
+
+
+def _get_environ(environ):
+ # type: (Dict[str, str]) -> Iterator[Tuple[str, str]]
+ """
+ Returns our whitelisted environment variables.
+ """
+ keys = ["SERVER_NAME", "SERVER_PORT"]
+ if _should_send_default_pii():
+ # make debugging of proxy setup easier. Proxy headers are
+ # in headers.
+ keys += ["REMOTE_ADDR"]
+
+ for key in keys:
+ if key in environ:
+ yield key, environ[key]
+
+
+# `get_headers` comes from `werkzeug.datastructures.EnvironHeaders`
+#
+# We need this function because Django does not give us a "pure" http header
+# dict. So we might as well use it for all WSGI integrations.
+def _get_headers(environ):
+ # type: (Dict[str, str]) -> Iterator[Tuple[str, str]]
+ """
+ Returns only proper HTTP headers.
+
+ """
+ for key, value in iteritems(environ):
+ key = str(key)
+ if key.startswith("HTTP_") and key not in (
+ "HTTP_CONTENT_TYPE",
+ "HTTP_CONTENT_LENGTH",
+ ):
+ yield key[5:].replace("_", "-").title(), value
+ elif key in ("CONTENT_TYPE", "CONTENT_LENGTH"):
+ yield key.replace("_", "-").title(), value
+
+
+def get_client_ip(environ):
+ # type: (Dict[str, str]) -> Optional[Any]
+ """
+ Infer the user IP address from various headers. This cannot be used in
+ security sensitive situations since the value may be forged from a client,
+ but it's good enough for the event payload.
+ """
+ try:
+ return environ["HTTP_X_FORWARDED_FOR"].split(",")[0].strip()
+ except (KeyError, IndexError):
+ pass
+
+ try:
+ return environ["HTTP_X_REAL_IP"]
+ except KeyError:
+ pass
+
+ return environ.get("REMOTE_ADDR")
+
+
+def _capture_exception(hub):
+ # type: (Hub) -> ExcInfo
+ exc_info = sys.exc_info()
+
+ # Check client here as it might have been unset while streaming response
+ if hub.client is not None:
+ e = exc_info[1]
+
+ # SystemExit(0) is the only uncaught exception that is expected behavior
+ should_skip_capture = isinstance(e, SystemExit) and e.code in (0, None)
+ if not should_skip_capture:
+ event, hint = event_from_exception(
+ exc_info,
+ client_options=hub.client.options,
+ mechanism={"type": "wsgi", "handled": False},
+ )
+ hub.capture_event(event, hint=hint)
+
+ return exc_info
+
+
+class _ScopedResponse(object):
+ __slots__ = ("_response", "_hub")
+
+ def __init__(self, hub, response):
+ # type: (Hub, Iterator[bytes]) -> None
+ self._hub = hub
+ self._response = response
+
+ def __iter__(self):
+ # type: () -> Iterator[bytes]
+ iterator = iter(self._response)
+
+ while True:
+ with self._hub:
+ try:
+ chunk = next(iterator)
+ except StopIteration:
+ break
+ except BaseException:
+ reraise(*_capture_exception(self._hub))
+
+ yield chunk
+
+ def close(self):
+ # type: () -> None
+ with self._hub:
+ try:
+ self._response.close() # type: ignore
+ except AttributeError:
+ pass
+ except BaseException:
+ reraise(*_capture_exception(self._hub))
+
+
+def _make_wsgi_event_processor(environ):
+ # type: (Dict[str, str]) -> EventProcessor
+ # It's a bit unfortunate that we have to extract and parse the request data
+ # from the environ so eagerly, but there are a few good reasons for this.
+ #
+ # We might be in a situation where the scope/hub never gets torn down
+ # properly. In that case we will have an unnecessary strong reference to
+ # all objects in the environ (some of which may take a lot of memory) when
+ # we're really just interested in a few of them.
+ #
+ # Keeping the environment around for longer than the request lifecycle is
+ # also not necessarily something uWSGI can deal with:
+ # https://github.com/unbit/uwsgi/issues/1950
+
+ client_ip = get_client_ip(environ)
+ request_url = get_request_url(environ)
+ query_string = environ.get("QUERY_STRING")
+ method = environ.get("REQUEST_METHOD")
+ env = dict(_get_environ(environ))
+ headers = _filter_headers(dict(_get_headers(environ)))
+
+ def event_processor(event, hint):
+ # type: (Dict[str, Any], Dict[str, Any]) -> Dict[str, Any]
+ with capture_internal_exceptions():
+ # if the code below fails halfway through we at least have some data
+ request_info = event.setdefault("request", {})
+
+ if _should_send_default_pii():
+ user_info = event.setdefault("user", {})
+ if client_ip:
+ user_info.setdefault("ip_address", client_ip)
+
+ request_info["url"] = request_url
+ request_info["query_string"] = query_string
+ request_info["method"] = method
+ request_info["env"] = env
+ request_info["headers"] = headers
+
+ return event
+
+ return event_processor
diff --git a/third_party/python/sentry_sdk/sentry_sdk/py.typed b/third_party/python/sentry_sdk/sentry_sdk/py.typed
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/py.typed
diff --git a/third_party/python/sentry_sdk/sentry_sdk/scope.py b/third_party/python/sentry_sdk/sentry_sdk/scope.py
new file mode 100644
index 0000000000..407af3a2cb
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/scope.py
@@ -0,0 +1,408 @@
+from copy import copy
+from collections import deque
+from functools import wraps
+from itertools import chain
+
+from sentry_sdk.utils import logger, capture_internal_exceptions
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Any
+ from typing import Dict
+ from typing import Optional
+ from typing import Deque
+ from typing import List
+ from typing import Callable
+ from typing import TypeVar
+
+ from sentry_sdk._types import (
+ Breadcrumb,
+ Event,
+ EventProcessor,
+ ErrorProcessor,
+ ExcInfo,
+ Hint,
+ Type,
+ )
+
+ from sentry_sdk.tracing import Span
+ from sentry_sdk.sessions import Session
+
+ F = TypeVar("F", bound=Callable[..., Any])
+ T = TypeVar("T")
+
+
+global_event_processors = [] # type: List[EventProcessor]
+
+
+def add_global_event_processor(processor):
+ # type: (EventProcessor) -> None
+ global_event_processors.append(processor)
+
+
+def _attr_setter(fn):
+ # type: (Any) -> Any
+ return property(fset=fn, doc=fn.__doc__)
+
+
+def _disable_capture(fn):
+ # type: (F) -> F
+ @wraps(fn)
+ def wrapper(self, *args, **kwargs):
+ # type: (Any, *Dict[str, Any], **Any) -> Any
+ if not self._should_capture:
+ return
+ try:
+ self._should_capture = False
+ return fn(self, *args, **kwargs)
+ finally:
+ self._should_capture = True
+
+ return wrapper # type: ignore
+
+
+class Scope(object):
+ """The scope holds extra information that should be sent with all
+ events that belong to it.
+ """
+
+ # NOTE: Even though it should not happen, the scope needs to not crash when
+ # accessed by multiple threads. It's fine if it's full of races, but those
+ # races should never make the user application crash.
+ #
+ # The same needs to hold for any accesses of the scope the SDK makes.
+
+ __slots__ = (
+ "_level",
+ "_name",
+ "_fingerprint",
+ "_transaction",
+ "_user",
+ "_tags",
+ "_contexts",
+ "_extras",
+ "_breadcrumbs",
+ "_event_processors",
+ "_error_processors",
+ "_should_capture",
+ "_span",
+ "_session",
+ "_force_auto_session_tracking",
+ )
+
+ def __init__(self):
+ # type: () -> None
+ self._event_processors = [] # type: List[EventProcessor]
+ self._error_processors = [] # type: List[ErrorProcessor]
+
+ self._name = None # type: Optional[str]
+ self.clear()
+
+ def clear(self):
+ # type: () -> None
+ """Clears the entire scope."""
+ self._level = None # type: Optional[str]
+ self._fingerprint = None # type: Optional[List[str]]
+ self._transaction = None # type: Optional[str]
+ self._user = None # type: Optional[Dict[str, Any]]
+
+ self._tags = {} # type: Dict[str, Any]
+ self._contexts = {} # type: Dict[str, Dict[str, Any]]
+ self._extras = {} # type: Dict[str, Any]
+
+ self.clear_breadcrumbs()
+ self._should_capture = True
+
+ self._span = None # type: Optional[Span]
+ self._session = None # type: Optional[Session]
+ self._force_auto_session_tracking = None # type: Optional[bool]
+
+ @_attr_setter
+ def level(self, value):
+ # type: (Optional[str]) -> None
+ """When set this overrides the level. Deprecated in favor of set_level."""
+ self._level = value
+
+ def set_level(self, value):
+ # type: (Optional[str]) -> None
+ """Sets the level for the scope."""
+ self._level = value
+
+ @_attr_setter
+ def fingerprint(self, value):
+ # type: (Optional[List[str]]) -> None
+ """When set this overrides the default fingerprint."""
+ self._fingerprint = value
+
+ @_attr_setter
+ def transaction(self, value):
+ # type: (Optional[str]) -> None
+ """When set this forces a specific transaction name to be set."""
+ self._transaction = value
+ span = self._span
+ if span:
+ span.transaction = value
+
+ @_attr_setter
+ def user(self, value):
+ # type: (Dict[str, Any]) -> None
+ """When set a specific user is bound to the scope. Deprecated in favor of set_user."""
+ self.set_user(value)
+
+ def set_user(self, value):
+ # type: (Dict[str, Any]) -> None
+ """Sets a user for the scope."""
+ self._user = value
+ if self._session is not None:
+ self._session.update(user=value)
+
+ @property
+ def span(self):
+ # type: () -> Optional[Span]
+ """Get/set current tracing span."""
+ return self._span
+
+ @span.setter
+ def span(self, span):
+ # type: (Optional[Span]) -> None
+ self._span = span
+ if span is not None:
+ span_transaction = span.transaction
+ if span_transaction:
+ self._transaction = span_transaction
+
+ def set_tag(
+ self,
+ key, # type: str
+ value, # type: Any
+ ):
+ # type: (...) -> None
+ """Sets a tag for a key to a specific value."""
+ self._tags[key] = value
+
+ def remove_tag(
+ self, key # type: str
+ ):
+ # type: (...) -> None
+ """Removes a specific tag."""
+ self._tags.pop(key, None)
+
+ def set_context(
+ self,
+ key, # type: str
+ value, # type: Any
+ ):
+ # type: (...) -> None
+ """Binds a context at a certain key to a specific value."""
+ self._contexts[key] = value
+
+ def remove_context(
+ self, key # type: str
+ ):
+ # type: (...) -> None
+ """Removes a context."""
+ self._contexts.pop(key, None)
+
+ def set_extra(
+ self,
+ key, # type: str
+ value, # type: Any
+ ):
+ # type: (...) -> None
+ """Sets an extra key to a specific value."""
+ self._extras[key] = value
+
+ def remove_extra(
+ self, key # type: str
+ ):
+ # type: (...) -> None
+ """Removes a specific extra key."""
+ self._extras.pop(key, None)
+
+ def clear_breadcrumbs(self):
+ # type: () -> None
+ """Clears breadcrumb buffer."""
+ self._breadcrumbs = deque() # type: Deque[Breadcrumb]
+
+ def add_event_processor(
+ self, func # type: EventProcessor
+ ):
+ # type: (...) -> None
+ """Register a scope local event processor on the scope.
+
+ :param func: This function behaves like `before_send.`
+ """
+ if len(self._event_processors) > 20:
+ logger.warning(
+ "Too many event processors on scope! Clearing list to free up some memory: %r",
+ self._event_processors,
+ )
+ del self._event_processors[:]
+
+ self._event_processors.append(func)
+
+ def add_error_processor(
+ self,
+ func, # type: ErrorProcessor
+ cls=None, # type: Optional[Type[BaseException]]
+ ):
+ # type: (...) -> None
+ """Register a scope local error processor on the scope.
+
+ :param func: A callback that works similar to an event processor but is invoked with the original exception info triple as second argument.
+
+ :param cls: Optionally, only process exceptions of this type.
+ """
+ if cls is not None:
+ cls_ = cls # For mypy.
+ real_func = func
+
+ def func(event, exc_info):
+ # type: (Event, ExcInfo) -> Optional[Event]
+ try:
+ is_inst = isinstance(exc_info[1], cls_)
+ except Exception:
+ is_inst = False
+ if is_inst:
+ return real_func(event, exc_info)
+ return event
+
+ self._error_processors.append(func)
+
+ @_disable_capture
+ def apply_to_event(
+ self,
+ event, # type: Event
+ hint, # type: Hint
+ ):
+ # type: (...) -> Optional[Event]
+ """Applies the information contained on the scope to the given event."""
+
+ def _drop(event, cause, ty):
+ # type: (Dict[str, Any], Any, str) -> Optional[Any]
+ logger.info("%s (%s) dropped event (%s)", ty, cause, event)
+ return None
+
+ if self._level is not None:
+ event["level"] = self._level
+
+ if event.get("type") != "transaction":
+ event.setdefault("breadcrumbs", []).extend(self._breadcrumbs)
+
+ if event.get("user") is None and self._user is not None:
+ event["user"] = self._user
+
+ if event.get("transaction") is None and self._transaction is not None:
+ event["transaction"] = self._transaction
+
+ if event.get("fingerprint") is None and self._fingerprint is not None:
+ event["fingerprint"] = self._fingerprint
+
+ if self._extras:
+ event.setdefault("extra", {}).update(self._extras)
+
+ if self._tags:
+ event.setdefault("tags", {}).update(self._tags)
+
+ if self._contexts:
+ event.setdefault("contexts", {}).update(self._contexts)
+
+ if self._span is not None:
+ contexts = event.setdefault("contexts", {})
+ if not contexts.get("trace"):
+ contexts["trace"] = self._span.get_trace_context()
+
+ exc_info = hint.get("exc_info")
+ if exc_info is not None:
+ for error_processor in self._error_processors:
+ new_event = error_processor(event, exc_info)
+ if new_event is None:
+ return _drop(event, error_processor, "error processor")
+ event = new_event
+
+ for event_processor in chain(global_event_processors, self._event_processors):
+ new_event = event
+ with capture_internal_exceptions():
+ new_event = event_processor(event, hint)
+ if new_event is None:
+ return _drop(event, event_processor, "event processor")
+ event = new_event
+
+ return event
+
+ def update_from_scope(self, scope):
+ # type: (Scope) -> None
+ if scope._level is not None:
+ self._level = scope._level
+ if scope._fingerprint is not None:
+ self._fingerprint = scope._fingerprint
+ if scope._transaction is not None:
+ self._transaction = scope._transaction
+ if scope._user is not None:
+ self._user = scope._user
+ if scope._tags:
+ self._tags.update(scope._tags)
+ if scope._contexts:
+ self._contexts.update(scope._contexts)
+ if scope._extras:
+ self._extras.update(scope._extras)
+ if scope._breadcrumbs:
+ self._breadcrumbs.extend(scope._breadcrumbs)
+ if scope._span:
+ self._span = scope._span
+
+ def update_from_kwargs(
+ self,
+ user=None, # type: Optional[Any]
+ level=None, # type: Optional[str]
+ extras=None, # type: Optional[Dict[str, Any]]
+ contexts=None, # type: Optional[Dict[str, Any]]
+ tags=None, # type: Optional[Dict[str, str]]
+ fingerprint=None, # type: Optional[List[str]]
+ ):
+ # type: (...) -> None
+ if level is not None:
+ self._level = level
+ if user is not None:
+ self._user = user
+ if extras is not None:
+ self._extras.update(extras)
+ if contexts is not None:
+ self._contexts.update(contexts)
+ if tags is not None:
+ self._tags.update(tags)
+ if fingerprint is not None:
+ self._fingerprint = fingerprint
+
+ def __copy__(self):
+ # type: () -> Scope
+ rv = object.__new__(self.__class__) # type: Scope
+
+ rv._level = self._level
+ rv._name = self._name
+ rv._fingerprint = self._fingerprint
+ rv._transaction = self._transaction
+ rv._user = self._user
+
+ rv._tags = dict(self._tags)
+ rv._contexts = dict(self._contexts)
+ rv._extras = dict(self._extras)
+
+ rv._breadcrumbs = copy(self._breadcrumbs)
+ rv._event_processors = list(self._event_processors)
+ rv._error_processors = list(self._error_processors)
+
+ rv._should_capture = self._should_capture
+ rv._span = self._span
+ rv._session = self._session
+ rv._force_auto_session_tracking = self._force_auto_session_tracking
+
+ return rv
+
+ def __repr__(self):
+ # type: () -> str
+ return "<%s id=%s name=%s>" % (
+ self.__class__.__name__,
+ hex(id(self)),
+ self._name,
+ )
diff --git a/third_party/python/sentry_sdk/sentry_sdk/serializer.py b/third_party/python/sentry_sdk/sentry_sdk/serializer.py
new file mode 100644
index 0000000000..3940947553
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/serializer.py
@@ -0,0 +1,336 @@
+import sys
+
+from datetime import datetime
+
+from sentry_sdk.utils import (
+ AnnotatedValue,
+ capture_internal_exception,
+ disable_capture_event,
+ safe_repr,
+ strip_string,
+ format_timestamp,
+)
+
+from sentry_sdk._compat import text_type, PY2, string_types, number_types, iteritems
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from types import TracebackType
+
+ from typing import Any
+ from typing import Dict
+ from typing import List
+ from typing import Optional
+ from typing import Callable
+ from typing import Union
+ from typing import ContextManager
+ from typing import Type
+
+ from sentry_sdk._types import NotImplementedType, Event
+
+ ReprProcessor = Callable[[Any, Dict[str, Any]], Union[NotImplementedType, str]]
+ Segment = Union[str, int]
+
+
+if PY2:
+ # Importing ABCs from collections is deprecated, and will stop working in 3.8
+ # https://github.com/python/cpython/blob/master/Lib/collections/__init__.py#L49
+ from collections import Mapping, Sequence
+
+ serializable_str_types = string_types
+
+else:
+ # New in 3.3
+ # https://docs.python.org/3/library/collections.abc.html
+ from collections.abc import Mapping, Sequence
+
+ # Bytes are technically not strings in Python 3, but we can serialize them
+ serializable_str_types = (str, bytes)
+
+MAX_DATABAG_DEPTH = 5
+MAX_DATABAG_BREADTH = 10
+CYCLE_MARKER = u"<cyclic>"
+
+
+global_repr_processors = [] # type: List[ReprProcessor]
+
+
+def add_global_repr_processor(processor):
+ # type: (ReprProcessor) -> None
+ global_repr_processors.append(processor)
+
+
+class Memo(object):
+ __slots__ = ("_ids", "_objs")
+
+ def __init__(self):
+ # type: () -> None
+ self._ids = {} # type: Dict[int, Any]
+ self._objs = [] # type: List[Any]
+
+ def memoize(self, obj):
+ # type: (Any) -> ContextManager[bool]
+ self._objs.append(obj)
+ return self
+
+ def __enter__(self):
+ # type: () -> bool
+ obj = self._objs[-1]
+ if id(obj) in self._ids:
+ return True
+ else:
+ self._ids[id(obj)] = obj
+ return False
+
+ def __exit__(
+ self,
+ ty, # type: Optional[Type[BaseException]]
+ value, # type: Optional[BaseException]
+ tb, # type: Optional[TracebackType]
+ ):
+ # type: (...) -> None
+ self._ids.pop(id(self._objs.pop()), None)
+
+
+def serialize(event, **kwargs):
+ # type: (Event, **Any) -> Event
+ memo = Memo()
+ path = [] # type: List[Segment]
+ meta_stack = [] # type: List[Dict[str, Any]]
+
+ def _annotate(**meta):
+ # type: (**Any) -> None
+ while len(meta_stack) <= len(path):
+ try:
+ segment = path[len(meta_stack) - 1]
+ node = meta_stack[-1].setdefault(text_type(segment), {})
+ except IndexError:
+ node = {}
+
+ meta_stack.append(node)
+
+ meta_stack[-1].setdefault("", {}).update(meta)
+
+ def _should_repr_strings():
+ # type: () -> Optional[bool]
+ """
+ By default non-serializable objects are going through
+ safe_repr(). For certain places in the event (local vars) we
+ want to repr() even things that are JSON-serializable to
+ make their type more apparent. For example, it's useful to
+ see the difference between a unicode-string and a bytestring
+ when viewing a stacktrace.
+
+ For container-types we still don't do anything different.
+ Generally we just try to make the Sentry UI present exactly
+ what a pretty-printed repr would look like.
+
+ :returns: `True` if we are somewhere in frame variables, and `False` if
+ we are in a position where we will never encounter frame variables
+ when recursing (for example, we're in `event.extra`). `None` if we
+ are not (yet) in frame variables, but might encounter them when
+ recursing (e.g. we're in `event.exception`)
+ """
+ try:
+ p0 = path[0]
+ if p0 == "stacktrace" and path[1] == "frames" and path[3] == "vars":
+ return True
+
+ if (
+ p0 in ("threads", "exception")
+ and path[1] == "values"
+ and path[3] == "stacktrace"
+ and path[4] == "frames"
+ and path[6] == "vars"
+ ):
+ return True
+ except IndexError:
+ return None
+
+ return False
+
+ def _is_databag():
+ # type: () -> Optional[bool]
+ """
+ A databag is any value that we need to trim.
+
+ :returns: Works like `_should_repr_strings()`. `True` for "yes",
+ `False` for :"no", `None` for "maybe soon".
+ """
+ try:
+ rv = _should_repr_strings()
+ if rv in (True, None):
+ return rv
+
+ p0 = path[0]
+ if p0 == "request" and path[1] == "data":
+ return True
+
+ if p0 == "breadcrumbs":
+ path[1]
+ return True
+
+ if p0 == "extra":
+ return True
+
+ except IndexError:
+ return None
+
+ return False
+
+ def _serialize_node(
+ obj, # type: Any
+ is_databag=None, # type: Optional[bool]
+ should_repr_strings=None, # type: Optional[bool]
+ segment=None, # type: Optional[Segment]
+ remaining_breadth=None, # type: Optional[int]
+ remaining_depth=None, # type: Optional[int]
+ ):
+ # type: (...) -> Any
+ if segment is not None:
+ path.append(segment)
+
+ try:
+ with memo.memoize(obj) as result:
+ if result:
+ return CYCLE_MARKER
+
+ return _serialize_node_impl(
+ obj,
+ is_databag=is_databag,
+ should_repr_strings=should_repr_strings,
+ remaining_depth=remaining_depth,
+ remaining_breadth=remaining_breadth,
+ )
+ except BaseException:
+ capture_internal_exception(sys.exc_info())
+
+ if is_databag:
+ return u"<failed to serialize, use init(debug=True) to see error logs>"
+
+ return None
+ finally:
+ if segment is not None:
+ path.pop()
+ del meta_stack[len(path) + 1 :]
+
+ def _flatten_annotated(obj):
+ # type: (Any) -> Any
+ if isinstance(obj, AnnotatedValue):
+ _annotate(**obj.metadata)
+ obj = obj.value
+ return obj
+
+ def _serialize_node_impl(
+ obj, is_databag, should_repr_strings, remaining_depth, remaining_breadth
+ ):
+ # type: (Any, Optional[bool], Optional[bool], Optional[int], Optional[int]) -> Any
+ if should_repr_strings is None:
+ should_repr_strings = _should_repr_strings()
+
+ if is_databag is None:
+ is_databag = _is_databag()
+
+ if is_databag and remaining_depth is None:
+ remaining_depth = MAX_DATABAG_DEPTH
+ if is_databag and remaining_breadth is None:
+ remaining_breadth = MAX_DATABAG_BREADTH
+
+ obj = _flatten_annotated(obj)
+
+ if remaining_depth is not None and remaining_depth <= 0:
+ _annotate(rem=[["!limit", "x"]])
+ if is_databag:
+ return _flatten_annotated(strip_string(safe_repr(obj)))
+ return None
+
+ if is_databag and global_repr_processors:
+ hints = {"memo": memo, "remaining_depth": remaining_depth}
+ for processor in global_repr_processors:
+ result = processor(obj, hints)
+ if result is not NotImplemented:
+ return _flatten_annotated(result)
+
+ if obj is None or isinstance(obj, (bool, number_types)):
+ return obj if not should_repr_strings else safe_repr(obj)
+
+ elif isinstance(obj, datetime):
+ return (
+ text_type(format_timestamp(obj))
+ if not should_repr_strings
+ else safe_repr(obj)
+ )
+
+ elif isinstance(obj, Mapping):
+ # Create temporary copy here to avoid calling too much code that
+ # might mutate our dictionary while we're still iterating over it.
+ obj = dict(iteritems(obj))
+
+ rv_dict = {} # type: Dict[str, Any]
+ i = 0
+
+ for k, v in iteritems(obj):
+ if remaining_breadth is not None and i >= remaining_breadth:
+ _annotate(len=len(obj))
+ break
+
+ str_k = text_type(k)
+ v = _serialize_node(
+ v,
+ segment=str_k,
+ should_repr_strings=should_repr_strings,
+ is_databag=is_databag,
+ remaining_depth=remaining_depth - 1
+ if remaining_depth is not None
+ else None,
+ remaining_breadth=remaining_breadth,
+ )
+ rv_dict[str_k] = v
+ i += 1
+
+ return rv_dict
+
+ elif not isinstance(obj, serializable_str_types) and isinstance(obj, Sequence):
+ rv_list = []
+
+ for i, v in enumerate(obj):
+ if remaining_breadth is not None and i >= remaining_breadth:
+ _annotate(len=len(obj))
+ break
+
+ rv_list.append(
+ _serialize_node(
+ v,
+ segment=i,
+ should_repr_strings=should_repr_strings,
+ is_databag=is_databag,
+ remaining_depth=remaining_depth - 1
+ if remaining_depth is not None
+ else None,
+ remaining_breadth=remaining_breadth,
+ )
+ )
+
+ return rv_list
+
+ if should_repr_strings:
+ obj = safe_repr(obj)
+ else:
+ if isinstance(obj, bytes):
+ obj = obj.decode("utf-8", "replace")
+
+ if not isinstance(obj, string_types):
+ obj = safe_repr(obj)
+
+ return _flatten_annotated(strip_string(obj))
+
+ disable_capture_event.set(True)
+ try:
+ rv = _serialize_node(event, **kwargs)
+ if meta_stack and isinstance(rv, dict):
+ rv["_meta"] = meta_stack[0]
+
+ return rv
+ finally:
+ disable_capture_event.set(False)
diff --git a/third_party/python/sentry_sdk/sentry_sdk/sessions.py b/third_party/python/sentry_sdk/sentry_sdk/sessions.py
new file mode 100644
index 0000000000..f4f7137cc0
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/sessions.py
@@ -0,0 +1,249 @@
+import os
+import uuid
+import time
+from datetime import datetime
+from threading import Thread, Lock
+from contextlib import contextmanager
+
+from sentry_sdk._types import MYPY
+from sentry_sdk.utils import format_timestamp
+
+if MYPY:
+ import sentry_sdk
+
+ from typing import Optional
+ from typing import Union
+ from typing import Any
+ from typing import Dict
+ from typing import Generator
+
+ from sentry_sdk._types import SessionStatus
+
+
+def is_auto_session_tracking_enabled(hub=None):
+ # type: (Optional[sentry_sdk.Hub]) -> bool
+ """Utility function to find out if session tracking is enabled."""
+ if hub is None:
+ hub = sentry_sdk.Hub.current
+ should_track = hub.scope._force_auto_session_tracking
+ if should_track is None:
+ exp = hub.client.options["_experiments"] if hub.client else {}
+ should_track = exp.get("auto_session_tracking")
+ return should_track
+
+
+@contextmanager
+def auto_session_tracking(hub=None):
+ # type: (Optional[sentry_sdk.Hub]) -> Generator[None, None, None]
+ """Starts and stops a session automatically around a block."""
+ if hub is None:
+ hub = sentry_sdk.Hub.current
+ should_track = is_auto_session_tracking_enabled(hub)
+ if should_track:
+ hub.start_session()
+ try:
+ yield
+ finally:
+ if should_track:
+ hub.end_session()
+
+
+def _make_uuid(
+ val, # type: Union[str, uuid.UUID]
+):
+ # type: (...) -> uuid.UUID
+ if isinstance(val, uuid.UUID):
+ return val
+ return uuid.UUID(val)
+
+
+TERMINAL_SESSION_STATES = ("exited", "abnormal", "crashed")
+
+
+class SessionFlusher(object):
+ def __init__(
+ self,
+ flush_func, # type: Any
+ flush_interval=10, # type: int
+ ):
+ # type: (...) -> None
+ self.flush_func = flush_func
+ self.flush_interval = flush_interval
+ self.pending = {} # type: Dict[str, Any]
+ self._thread = None # type: Optional[Thread]
+ self._thread_lock = Lock()
+ self._thread_for_pid = None # type: Optional[int]
+ self._running = True
+
+ def flush(self):
+ # type: (...) -> None
+ pending = self.pending
+ self.pending = {}
+ self.flush_func(list(pending.values()))
+
+ def _ensure_running(self):
+ # type: (...) -> None
+ if self._thread_for_pid == os.getpid() and self._thread is not None:
+ return None
+ with self._thread_lock:
+ if self._thread_for_pid == os.getpid() and self._thread is not None:
+ return None
+
+ def _thread():
+ # type: (...) -> None
+ while self._running:
+ time.sleep(self.flush_interval)
+ if self.pending and self._running:
+ self.flush()
+
+ thread = Thread(target=_thread)
+ thread.daemon = True
+ thread.start()
+ self._thread = thread
+ self._thread_for_pid = os.getpid()
+ return None
+
+ def add_session(
+ self, session # type: Session
+ ):
+ # type: (...) -> None
+ self.pending[session.sid.hex] = session.to_json()
+ self._ensure_running()
+
+ def kill(self):
+ # type: (...) -> None
+ self._running = False
+
+ def __del__(self):
+ # type: (...) -> None
+ self.kill()
+
+
+class Session(object):
+ def __init__(
+ self,
+ sid=None, # type: Optional[Union[str, uuid.UUID]]
+ did=None, # type: Optional[str]
+ timestamp=None, # type: Optional[datetime]
+ started=None, # type: Optional[datetime]
+ duration=None, # type: Optional[float]
+ status=None, # type: Optional[SessionStatus]
+ release=None, # type: Optional[str]
+ environment=None, # type: Optional[str]
+ user_agent=None, # type: Optional[str]
+ ip_address=None, # type: Optional[str]
+ errors=None, # type: Optional[int]
+ user=None, # type: Optional[Any]
+ ):
+ # type: (...) -> None
+ if sid is None:
+ sid = uuid.uuid4()
+ if started is None:
+ started = datetime.utcnow()
+ if status is None:
+ status = "ok"
+ self.status = status
+ self.did = None # type: Optional[str]
+ self.started = started
+ self.release = None # type: Optional[str]
+ self.environment = None # type: Optional[str]
+ self.duration = None # type: Optional[float]
+ self.user_agent = None # type: Optional[str]
+ self.ip_address = None # type: Optional[str]
+ self.errors = 0
+
+ self.update(
+ sid=sid,
+ did=did,
+ timestamp=timestamp,
+ duration=duration,
+ release=release,
+ environment=environment,
+ user_agent=user_agent,
+ ip_address=ip_address,
+ errors=errors,
+ user=user,
+ )
+
+ def update(
+ self,
+ sid=None, # type: Optional[Union[str, uuid.UUID]]
+ did=None, # type: Optional[str]
+ timestamp=None, # type: Optional[datetime]
+ duration=None, # type: Optional[float]
+ status=None, # type: Optional[SessionStatus]
+ release=None, # type: Optional[str]
+ environment=None, # type: Optional[str]
+ user_agent=None, # type: Optional[str]
+ ip_address=None, # type: Optional[str]
+ errors=None, # type: Optional[int]
+ user=None, # type: Optional[Any]
+ ):
+ # type: (...) -> None
+ # If a user is supplied we pull some data form it
+ if user:
+ if ip_address is None:
+ ip_address = user.get("ip_address")
+ if did is None:
+ did = user.get("id") or user.get("email") or user.get("username")
+
+ if sid is not None:
+ self.sid = _make_uuid(sid)
+ if did is not None:
+ self.did = str(did)
+ if timestamp is None:
+ timestamp = datetime.utcnow()
+ self.timestamp = timestamp
+ if duration is not None:
+ self.duration = duration
+ if release is not None:
+ self.release = release
+ if environment is not None:
+ self.environment = environment
+ if ip_address is not None:
+ self.ip_address = ip_address
+ if user_agent is not None:
+ self.user_agent = user_agent
+ if errors is not None:
+ self.errors = errors
+
+ if status is not None:
+ self.status = status
+
+ def close(
+ self, status=None # type: Optional[SessionStatus]
+ ):
+ # type: (...) -> Any
+ if status is None and self.status == "ok":
+ status = "exited"
+ if status is not None:
+ self.update(status=status)
+
+ def to_json(self):
+ # type: (...) -> Any
+ rv = {
+ "sid": str(self.sid),
+ "init": True,
+ "started": format_timestamp(self.started),
+ "timestamp": format_timestamp(self.timestamp),
+ "status": self.status,
+ } # type: Dict[str, Any]
+ if self.errors:
+ rv["errors"] = self.errors
+ if self.did is not None:
+ rv["did"] = self.did
+ if self.duration is not None:
+ rv["duration"] = self.duration
+
+ attrs = {}
+ if self.release is not None:
+ attrs["release"] = self.release
+ if self.environment is not None:
+ attrs["environment"] = self.environment
+ if self.ip_address is not None:
+ attrs["ip_address"] = self.ip_address
+ if self.user_agent is not None:
+ attrs["user_agent"] = self.user_agent
+ if attrs:
+ rv["attrs"] = attrs
+ return rv
diff --git a/third_party/python/sentry_sdk/sentry_sdk/tracing.py b/third_party/python/sentry_sdk/sentry_sdk/tracing.py
new file mode 100644
index 0000000000..9293365b83
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/tracing.py
@@ -0,0 +1,498 @@
+import re
+import uuid
+import contextlib
+import time
+
+from datetime import datetime, timedelta
+
+import sentry_sdk
+
+from sentry_sdk.utils import capture_internal_exceptions, logger, to_string
+from sentry_sdk._compat import PY2
+from sentry_sdk._types import MYPY
+
+if PY2:
+ from collections import Mapping
+else:
+ from collections.abc import Mapping
+
+if MYPY:
+ import typing
+
+ from typing import Generator
+ from typing import Optional
+ from typing import Any
+ from typing import Dict
+ from typing import List
+ from typing import Tuple
+
+_traceparent_header_format_re = re.compile(
+ "^[ \t]*" # whitespace
+ "([0-9a-f]{32})?" # trace_id
+ "-?([0-9a-f]{16})?" # span_id
+ "-?([01])?" # sampled
+ "[ \t]*$" # whitespace
+)
+
+
+class EnvironHeaders(Mapping): # type: ignore
+ def __init__(
+ self,
+ environ, # type: typing.Mapping[str, str]
+ prefix="HTTP_", # type: str
+ ):
+ # type: (...) -> None
+ self.environ = environ
+ self.prefix = prefix
+
+ def __getitem__(self, key):
+ # type: (str) -> Optional[Any]
+ return self.environ[self.prefix + key.replace("-", "_").upper()]
+
+ def __len__(self):
+ # type: () -> int
+ return sum(1 for _ in iter(self))
+
+ def __iter__(self):
+ # type: () -> Generator[str, None, None]
+ for k in self.environ:
+ if not isinstance(k, str):
+ continue
+
+ k = k.replace("-", "_").upper()
+ if not k.startswith(self.prefix):
+ continue
+
+ yield k[len(self.prefix) :]
+
+
+class _SpanRecorder(object):
+ __slots__ = ("maxlen", "finished_spans", "open_span_count")
+
+ def __init__(self, maxlen):
+ # type: (int) -> None
+ self.maxlen = maxlen
+ self.open_span_count = 0 # type: int
+ self.finished_spans = [] # type: List[Span]
+
+ def start_span(self, span):
+ # type: (Span) -> None
+
+ # This is just so that we don't run out of memory while recording a lot
+ # of spans. At some point we just stop and flush out the start of the
+ # trace tree (i.e. the first n spans with the smallest
+ # start_timestamp).
+ self.open_span_count += 1
+ if self.open_span_count > self.maxlen:
+ span._span_recorder = None
+
+ def finish_span(self, span):
+ # type: (Span) -> None
+ self.finished_spans.append(span)
+
+
+class Span(object):
+ __slots__ = (
+ "trace_id",
+ "span_id",
+ "parent_span_id",
+ "same_process_as_parent",
+ "sampled",
+ "transaction",
+ "op",
+ "description",
+ "start_timestamp",
+ "_start_timestamp_monotonic",
+ "status",
+ "timestamp",
+ "_tags",
+ "_data",
+ "_span_recorder",
+ "hub",
+ "_context_manager_state",
+ )
+
+ def __init__(
+ self,
+ trace_id=None, # type: Optional[str]
+ span_id=None, # type: Optional[str]
+ parent_span_id=None, # type: Optional[str]
+ same_process_as_parent=True, # type: bool
+ sampled=None, # type: Optional[bool]
+ transaction=None, # type: Optional[str]
+ op=None, # type: Optional[str]
+ description=None, # type: Optional[str]
+ hub=None, # type: Optional[sentry_sdk.Hub]
+ status=None, # type: Optional[str]
+ ):
+ # type: (...) -> None
+ self.trace_id = trace_id or uuid.uuid4().hex
+ self.span_id = span_id or uuid.uuid4().hex[16:]
+ self.parent_span_id = parent_span_id
+ self.same_process_as_parent = same_process_as_parent
+ self.sampled = sampled
+ self.transaction = transaction
+ self.op = op
+ self.description = description
+ self.status = status
+ self.hub = hub
+ self._tags = {} # type: Dict[str, str]
+ self._data = {} # type: Dict[str, Any]
+ self.start_timestamp = datetime.utcnow()
+ try:
+ # TODO: For Python 3.7+, we could use a clock with ns resolution:
+ # self._start_timestamp_monotonic = time.perf_counter_ns()
+
+ # Python 3.3+
+ self._start_timestamp_monotonic = time.perf_counter()
+ except AttributeError:
+ pass
+
+ #: End timestamp of span
+ self.timestamp = None # type: Optional[datetime]
+
+ self._span_recorder = None # type: Optional[_SpanRecorder]
+
+ def init_finished_spans(self, maxlen):
+ # type: (int) -> None
+ if self._span_recorder is None:
+ self._span_recorder = _SpanRecorder(maxlen)
+ self._span_recorder.start_span(self)
+
+ def __repr__(self):
+ # type: () -> str
+ return (
+ "<%s(transaction=%r, trace_id=%r, span_id=%r, parent_span_id=%r, sampled=%r)>"
+ % (
+ self.__class__.__name__,
+ self.transaction,
+ self.trace_id,
+ self.span_id,
+ self.parent_span_id,
+ self.sampled,
+ )
+ )
+
+ def __enter__(self):
+ # type: () -> Span
+ hub = self.hub or sentry_sdk.Hub.current
+
+ _, scope = hub._stack[-1]
+ old_span = scope.span
+ scope.span = self
+ self._context_manager_state = (hub, scope, old_span)
+ return self
+
+ def __exit__(self, ty, value, tb):
+ # type: (Optional[Any], Optional[Any], Optional[Any]) -> None
+ if value is not None:
+ self.set_status("internal_error")
+
+ hub, scope, old_span = self._context_manager_state
+ del self._context_manager_state
+
+ self.finish(hub)
+ scope.span = old_span
+
+ def new_span(self, **kwargs):
+ # type: (**Any) -> Span
+ rv = type(self)(
+ trace_id=self.trace_id,
+ span_id=None,
+ parent_span_id=self.span_id,
+ sampled=self.sampled,
+ **kwargs
+ )
+
+ rv._span_recorder = self._span_recorder
+ return rv
+
+ @classmethod
+ def continue_from_environ(cls, environ):
+ # type: (typing.Mapping[str, str]) -> Span
+ return cls.continue_from_headers(EnvironHeaders(environ))
+
+ @classmethod
+ def continue_from_headers(cls, headers):
+ # type: (typing.Mapping[str, str]) -> Span
+ parent = cls.from_traceparent(headers.get("sentry-trace"))
+ if parent is None:
+ return cls()
+ parent.same_process_as_parent = False
+ return parent
+
+ def iter_headers(self):
+ # type: () -> Generator[Tuple[str, str], None, None]
+ yield "sentry-trace", self.to_traceparent()
+
+ @classmethod
+ def from_traceparent(cls, traceparent):
+ # type: (Optional[str]) -> Optional[Span]
+ if not traceparent:
+ return None
+
+ if traceparent.startswith("00-") and traceparent.endswith("-00"):
+ traceparent = traceparent[3:-3]
+
+ match = _traceparent_header_format_re.match(str(traceparent))
+ if match is None:
+ return None
+
+ trace_id, span_id, sampled_str = match.groups()
+
+ if trace_id is not None:
+ trace_id = "{:032x}".format(int(trace_id, 16))
+ if span_id is not None:
+ span_id = "{:016x}".format(int(span_id, 16))
+
+ if sampled_str:
+ sampled = sampled_str != "0" # type: Optional[bool]
+ else:
+ sampled = None
+
+ return cls(trace_id=trace_id, parent_span_id=span_id, sampled=sampled)
+
+ def to_traceparent(self):
+ # type: () -> str
+ sampled = ""
+ if self.sampled is True:
+ sampled = "1"
+ if self.sampled is False:
+ sampled = "0"
+ return "%s-%s-%s" % (self.trace_id, self.span_id, sampled)
+
+ def to_legacy_traceparent(self):
+ # type: () -> str
+ return "00-%s-%s-00" % (self.trace_id, self.span_id)
+
+ def set_tag(self, key, value):
+ # type: (str, Any) -> None
+ self._tags[key] = value
+
+ def set_data(self, key, value):
+ # type: (str, Any) -> None
+ self._data[key] = value
+
+ def set_status(self, value):
+ # type: (str) -> None
+ self.status = value
+
+ def set_http_status(self, http_status):
+ # type: (int) -> None
+ self.set_tag("http.status_code", http_status)
+
+ if http_status < 400:
+ self.set_status("ok")
+ elif 400 <= http_status < 500:
+ if http_status == 403:
+ self.set_status("permission_denied")
+ elif http_status == 404:
+ self.set_status("not_found")
+ elif http_status == 429:
+ self.set_status("resource_exhausted")
+ elif http_status == 413:
+ self.set_status("failed_precondition")
+ elif http_status == 401:
+ self.set_status("unauthenticated")
+ elif http_status == 409:
+ self.set_status("already_exists")
+ else:
+ self.set_status("invalid_argument")
+ elif 500 <= http_status < 600:
+ if http_status == 504:
+ self.set_status("deadline_exceeded")
+ elif http_status == 501:
+ self.set_status("unimplemented")
+ elif http_status == 503:
+ self.set_status("unavailable")
+ else:
+ self.set_status("internal_error")
+ else:
+ self.set_status("unknown_error")
+
+ def is_success(self):
+ # type: () -> bool
+ return self.status == "ok"
+
+ def finish(self, hub=None):
+ # type: (Optional[sentry_sdk.Hub]) -> Optional[str]
+ hub = hub or self.hub or sentry_sdk.Hub.current
+
+ if self.timestamp is not None:
+ # This transaction is already finished, so we should not flush it again.
+ return None
+
+ try:
+ duration_seconds = time.perf_counter() - self._start_timestamp_monotonic
+ self.timestamp = self.start_timestamp + timedelta(seconds=duration_seconds)
+ except AttributeError:
+ self.timestamp = datetime.utcnow()
+
+ _maybe_create_breadcrumbs_from_span(hub, self)
+
+ if self._span_recorder is None:
+ return None
+
+ self._span_recorder.finish_span(self)
+
+ if self.transaction is None:
+ # If this has no transaction set we assume there's a parent
+ # transaction for this span that would be flushed out eventually.
+ return None
+
+ client = hub.client
+
+ if client is None:
+ # We have no client and therefore nowhere to send this transaction
+ # event.
+ return None
+
+ if not self.sampled:
+ # At this point a `sampled = None` should have already been
+ # resolved to a concrete decision. If `sampled` is `None`, it's
+ # likely that somebody used `with sentry_sdk.Hub.start_span(..)` on a
+ # non-transaction span and later decided to make it a transaction.
+ if self.sampled is None:
+ logger.warning("Discarding transaction Span without sampling decision")
+
+ return None
+
+ return hub.capture_event(
+ {
+ "type": "transaction",
+ "transaction": self.transaction,
+ "contexts": {"trace": self.get_trace_context()},
+ "tags": self._tags,
+ "timestamp": self.timestamp,
+ "start_timestamp": self.start_timestamp,
+ "spans": [
+ s.to_json(client)
+ for s in self._span_recorder.finished_spans
+ if s is not self
+ ],
+ }
+ )
+
+ def to_json(self, client):
+ # type: (Optional[sentry_sdk.Client]) -> Dict[str, Any]
+ rv = {
+ "trace_id": self.trace_id,
+ "span_id": self.span_id,
+ "parent_span_id": self.parent_span_id,
+ "same_process_as_parent": self.same_process_as_parent,
+ "op": self.op,
+ "description": self.description,
+ "start_timestamp": self.start_timestamp,
+ "timestamp": self.timestamp,
+ } # type: Dict[str, Any]
+
+ transaction = self.transaction
+ if transaction:
+ rv["transaction"] = transaction
+
+ if self.status:
+ self._tags["status"] = self.status
+
+ tags = self._tags
+ if tags:
+ rv["tags"] = tags
+
+ data = self._data
+ if data:
+ rv["data"] = data
+
+ return rv
+
+ def get_trace_context(self):
+ # type: () -> Any
+ rv = {
+ "trace_id": self.trace_id,
+ "span_id": self.span_id,
+ "parent_span_id": self.parent_span_id,
+ "op": self.op,
+ "description": self.description,
+ }
+ if self.status:
+ rv["status"] = self.status
+
+ return rv
+
+
+def _format_sql(cursor, sql):
+ # type: (Any, str) -> Optional[str]
+
+ real_sql = None
+
+ # If we're using psycopg2, it could be that we're
+ # looking at a query that uses Composed objects. Use psycopg2's mogrify
+ # function to format the query. We lose per-parameter trimming but gain
+ # accuracy in formatting.
+ try:
+ if hasattr(cursor, "mogrify"):
+ real_sql = cursor.mogrify(sql)
+ if isinstance(real_sql, bytes):
+ real_sql = real_sql.decode(cursor.connection.encoding)
+ except Exception:
+ real_sql = None
+
+ return real_sql or to_string(sql)
+
+
+@contextlib.contextmanager
+def record_sql_queries(
+ hub, # type: sentry_sdk.Hub
+ cursor, # type: Any
+ query, # type: Any
+ params_list, # type: Any
+ paramstyle, # type: Optional[str]
+ executemany, # type: bool
+):
+ # type: (...) -> Generator[Span, None, None]
+
+ # TODO: Bring back capturing of params by default
+ if hub.client and hub.client.options["_experiments"].get(
+ "record_sql_params", False
+ ):
+ if not params_list or params_list == [None]:
+ params_list = None
+
+ if paramstyle == "pyformat":
+ paramstyle = "format"
+ else:
+ params_list = None
+ paramstyle = None
+
+ query = _format_sql(cursor, query)
+
+ data = {}
+ if params_list is not None:
+ data["db.params"] = params_list
+ if paramstyle is not None:
+ data["db.paramstyle"] = paramstyle
+ if executemany:
+ data["db.executemany"] = True
+
+ with capture_internal_exceptions():
+ hub.add_breadcrumb(message=query, category="query", data=data)
+
+ with hub.start_span(op="db", description=query) as span:
+ for k, v in data.items():
+ span.set_data(k, v)
+ yield span
+
+
+def _maybe_create_breadcrumbs_from_span(hub, span):
+ # type: (sentry_sdk.Hub, Span) -> None
+ if span.op == "redis":
+ hub.add_breadcrumb(
+ message=span.description, type="redis", category="redis", data=span._tags
+ )
+ elif span.op == "http":
+ hub.add_breadcrumb(type="http", category="httplib", data=span._data)
+ elif span.op == "subprocess":
+ hub.add_breadcrumb(
+ type="subprocess",
+ category="subprocess",
+ message=span.description,
+ data=span._data,
+ )
diff --git a/third_party/python/sentry_sdk/sentry_sdk/transport.py b/third_party/python/sentry_sdk/sentry_sdk/transport.py
new file mode 100644
index 0000000000..60ab611c54
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/transport.py
@@ -0,0 +1,365 @@
+from __future__ import print_function
+
+import json
+import io
+import urllib3 # type: ignore
+import certifi
+import gzip
+
+from datetime import datetime, timedelta
+
+from sentry_sdk.utils import Dsn, logger, capture_internal_exceptions
+from sentry_sdk.worker import BackgroundWorker
+from sentry_sdk.envelope import Envelope, get_event_data_category
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from typing import Type
+ from typing import Any
+ from typing import Optional
+ from typing import Dict
+ from typing import Union
+ from typing import Callable
+ from urllib3.poolmanager import PoolManager # type: ignore
+ from urllib3.poolmanager import ProxyManager
+
+ from sentry_sdk._types import Event
+
+try:
+ from urllib.request import getproxies
+except ImportError:
+ from urllib import getproxies # type: ignore
+
+
+class Transport(object):
+ """Baseclass for all transports.
+
+ A transport is used to send an event to sentry.
+ """
+
+ parsed_dsn = None # type: Optional[Dsn]
+
+ def __init__(
+ self, options=None # type: Optional[Dict[str, Any]]
+ ):
+ # type: (...) -> None
+ self.options = options
+ if options and options["dsn"] is not None and options["dsn"]:
+ self.parsed_dsn = Dsn(options["dsn"])
+ else:
+ self.parsed_dsn = None
+
+ def capture_event(
+ self, event # type: Event
+ ):
+ # type: (...) -> None
+ """This gets invoked with the event dictionary when an event should
+ be sent to sentry.
+ """
+ raise NotImplementedError()
+
+ def capture_envelope(
+ self, envelope # type: Envelope
+ ):
+ # type: (...) -> None
+ """This gets invoked with an envelope when an event should
+ be sent to sentry. The default implementation invokes `capture_event`
+ if the envelope contains an event and ignores all other envelopes.
+ """
+ event = envelope.get_event()
+ if event is not None:
+ self.capture_event(event)
+ return None
+
+ def flush(
+ self,
+ timeout, # type: float
+ callback=None, # type: Optional[Any]
+ ):
+ # type: (...) -> None
+ """Wait `timeout` seconds for the current events to be sent out."""
+ pass
+
+ def kill(self):
+ # type: () -> None
+ """Forcefully kills the transport."""
+ pass
+
+ def __del__(self):
+ # type: () -> None
+ try:
+ self.kill()
+ except Exception:
+ pass
+
+
+class HttpTransport(Transport):
+ """The default HTTP transport."""
+
+ def __init__(
+ self, options # type: Dict[str, Any]
+ ):
+ # type: (...) -> None
+ from sentry_sdk.consts import VERSION
+
+ Transport.__init__(self, options)
+ assert self.parsed_dsn is not None
+ self._worker = BackgroundWorker()
+ self._auth = self.parsed_dsn.to_auth("sentry.python/%s" % VERSION)
+ self._disabled_until = {} # type: Dict[Any, datetime]
+ self._retry = urllib3.util.Retry()
+ self.options = options
+
+ self._pool = self._make_pool(
+ self.parsed_dsn,
+ http_proxy=options["http_proxy"],
+ https_proxy=options["https_proxy"],
+ ca_certs=options["ca_certs"],
+ )
+
+ from sentry_sdk import Hub
+
+ self.hub_cls = Hub
+
+ def _update_rate_limits(self, response):
+ # type: (urllib3.HTTPResponse) -> None
+
+ # new sentries with more rate limit insights. We honor this header
+ # no matter of the status code to update our internal rate limits.
+ header = response.headers.get("x-sentry-rate-limit")
+ if header:
+ for limit in header.split(","):
+ try:
+ retry_after, categories, _ = limit.strip().split(":", 2)
+ retry_after = datetime.utcnow() + timedelta(
+ seconds=int(retry_after)
+ )
+ for category in categories.split(";") or (None,):
+ self._disabled_until[category] = retry_after
+ except (LookupError, ValueError):
+ continue
+
+ # old sentries only communicate global rate limit hits via the
+ # retry-after header on 429. This header can also be emitted on new
+ # sentries if a proxy in front wants to globally slow things down.
+ elif response.status == 429:
+ self._disabled_until[None] = datetime.utcnow() + timedelta(
+ seconds=self._retry.get_retry_after(response) or 60
+ )
+
+ def _send_request(
+ self,
+ body, # type: bytes
+ headers, # type: Dict[str, str]
+ ):
+ # type: (...) -> None
+ headers.update(
+ {
+ "User-Agent": str(self._auth.client),
+ "X-Sentry-Auth": str(self._auth.to_header()),
+ }
+ )
+ response = self._pool.request(
+ "POST", str(self._auth.store_api_url), body=body, headers=headers
+ )
+
+ try:
+ self._update_rate_limits(response)
+
+ if response.status == 429:
+ # if we hit a 429. Something was rate limited but we already
+ # acted on this in `self._update_rate_limits`.
+ pass
+
+ elif response.status >= 300 or response.status < 200:
+ logger.error(
+ "Unexpected status code: %s (body: %s)",
+ response.status,
+ response.data,
+ )
+ finally:
+ response.close()
+
+ def _check_disabled(self, category):
+ # type: (str) -> bool
+ def _disabled(bucket):
+ # type: (Any) -> bool
+ ts = self._disabled_until.get(bucket)
+ return ts is not None and ts > datetime.utcnow()
+
+ return _disabled(category) or _disabled(None)
+
+ def _send_event(
+ self, event # type: Event
+ ):
+ # type: (...) -> None
+ if self._check_disabled(get_event_data_category(event)):
+ return None
+
+ body = io.BytesIO()
+ with gzip.GzipFile(fileobj=body, mode="w") as f:
+ f.write(json.dumps(event, allow_nan=False).encode("utf-8"))
+
+ assert self.parsed_dsn is not None
+ logger.debug(
+ "Sending event, type:%s level:%s event_id:%s project:%s host:%s"
+ % (
+ event.get("type") or "null",
+ event.get("level") or "null",
+ event.get("event_id") or "null",
+ self.parsed_dsn.project_id,
+ self.parsed_dsn.host,
+ )
+ )
+ self._send_request(
+ body.getvalue(),
+ headers={"Content-Type": "application/json", "Content-Encoding": "gzip"},
+ )
+ return None
+
+ def _send_envelope(
+ self, envelope # type: Envelope
+ ):
+ # type: (...) -> None
+
+ # remove all items from the envelope which are over quota
+ envelope.items[:] = [
+ x for x in envelope.items if not self._check_disabled(x.data_category)
+ ]
+ if not envelope.items:
+ return None
+
+ body = io.BytesIO()
+ with gzip.GzipFile(fileobj=body, mode="w") as f:
+ envelope.serialize_into(f)
+
+ assert self.parsed_dsn is not None
+ logger.debug(
+ "Sending envelope [%s] project:%s host:%s",
+ envelope.description,
+ self.parsed_dsn.project_id,
+ self.parsed_dsn.host,
+ )
+ self._send_request(
+ body.getvalue(),
+ headers={
+ "Content-Type": "application/x-sentry-envelope",
+ "Content-Encoding": "gzip",
+ },
+ )
+ return None
+
+ def _get_pool_options(self, ca_certs):
+ # type: (Optional[Any]) -> Dict[str, Any]
+ return {
+ "num_pools": 2,
+ "cert_reqs": "CERT_REQUIRED",
+ "ca_certs": ca_certs or certifi.where(),
+ }
+
+ def _make_pool(
+ self,
+ parsed_dsn, # type: Dsn
+ http_proxy, # type: Optional[str]
+ https_proxy, # type: Optional[str]
+ ca_certs, # type: Optional[Any]
+ ):
+ # type: (...) -> Union[PoolManager, ProxyManager]
+ proxy = None
+
+ # try HTTPS first
+ if parsed_dsn.scheme == "https" and (https_proxy != ""):
+ proxy = https_proxy or getproxies().get("https")
+
+ # maybe fallback to HTTP proxy
+ if not proxy and (http_proxy != ""):
+ proxy = http_proxy or getproxies().get("http")
+
+ opts = self._get_pool_options(ca_certs)
+
+ if proxy:
+ return urllib3.ProxyManager(proxy, **opts)
+ else:
+ return urllib3.PoolManager(**opts)
+
+ def capture_event(
+ self, event # type: Event
+ ):
+ # type: (...) -> None
+ hub = self.hub_cls.current
+
+ def send_event_wrapper():
+ # type: () -> None
+ with hub:
+ with capture_internal_exceptions():
+ self._send_event(event)
+
+ self._worker.submit(send_event_wrapper)
+
+ def capture_envelope(
+ self, envelope # type: Envelope
+ ):
+ # type: (...) -> None
+ hub = self.hub_cls.current
+
+ def send_envelope_wrapper():
+ # type: () -> None
+ with hub:
+ with capture_internal_exceptions():
+ self._send_envelope(envelope)
+
+ self._worker.submit(send_envelope_wrapper)
+
+ def flush(
+ self,
+ timeout, # type: float
+ callback=None, # type: Optional[Any]
+ ):
+ # type: (...) -> None
+ logger.debug("Flushing HTTP transport")
+ if timeout > 0:
+ self._worker.flush(timeout, callback)
+
+ def kill(self):
+ # type: () -> None
+ logger.debug("Killing HTTP transport")
+ self._worker.kill()
+
+
+class _FunctionTransport(Transport):
+ def __init__(
+ self, func # type: Callable[[Event], None]
+ ):
+ # type: (...) -> None
+ Transport.__init__(self)
+ self._func = func
+
+ def capture_event(
+ self, event # type: Event
+ ):
+ # type: (...) -> None
+ self._func(event)
+ return None
+
+
+def make_transport(options):
+ # type: (Dict[str, Any]) -> Optional[Transport]
+ ref_transport = options["transport"]
+
+ # If no transport is given, we use the http transport class
+ if ref_transport is None:
+ transport_cls = HttpTransport # type: Type[Transport]
+ elif isinstance(ref_transport, Transport):
+ return ref_transport
+ elif isinstance(ref_transport, type) and issubclass(ref_transport, Transport):
+ transport_cls = ref_transport
+ elif callable(ref_transport):
+ return _FunctionTransport(ref_transport) # type: ignore
+
+ # if a transport class is given only instanciate it if the dsn is not
+ # empty or None
+ if options["dsn"]:
+ return transport_cls(options)
+
+ return None
diff --git a/third_party/python/sentry_sdk/sentry_sdk/utils.py b/third_party/python/sentry_sdk/sentry_sdk/utils.py
new file mode 100644
index 0000000000..d92309c5f7
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/utils.py
@@ -0,0 +1,831 @@
+import os
+import sys
+import linecache
+import logging
+
+from datetime import datetime
+
+import sentry_sdk
+from sentry_sdk._compat import urlparse, text_type, implements_str, PY2
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from types import FrameType
+ from types import TracebackType
+ from typing import Any
+ from typing import Callable
+ from typing import Dict
+ from typing import ContextManager
+ from typing import Iterator
+ from typing import List
+ from typing import Optional
+ from typing import Set
+ from typing import Tuple
+ from typing import Union
+ from typing import Type
+
+ from sentry_sdk._types import ExcInfo
+
+epoch = datetime(1970, 1, 1)
+
+
+# The logger is created here but initialized in the debug support module
+logger = logging.getLogger("sentry_sdk.errors")
+
+MAX_STRING_LENGTH = 512
+MAX_FORMAT_PARAM_LENGTH = 128
+
+
+def _get_debug_hub():
+ # type: () -> Optional[sentry_sdk.Hub]
+ # This function is replaced by debug.py
+ pass
+
+
+class CaptureInternalException(object):
+ __slots__ = ()
+
+ def __enter__(self):
+ # type: () -> ContextManager[Any]
+ return self
+
+ def __exit__(self, ty, value, tb):
+ # type: (Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]) -> bool
+ if ty is not None and value is not None:
+ capture_internal_exception((ty, value, tb))
+
+ return True
+
+
+_CAPTURE_INTERNAL_EXCEPTION = CaptureInternalException()
+
+
+def capture_internal_exceptions():
+ # type: () -> ContextManager[Any]
+ return _CAPTURE_INTERNAL_EXCEPTION
+
+
+def capture_internal_exception(exc_info):
+ # type: (ExcInfo) -> None
+ hub = _get_debug_hub()
+ if hub is not None:
+ hub._capture_internal_exception(exc_info)
+
+
+def to_timestamp(value):
+ # type: (datetime) -> float
+ return (value - epoch).total_seconds()
+
+
+def format_timestamp(value):
+ # type: (datetime) -> str
+ return value.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
+
+
+def event_hint_with_exc_info(exc_info=None):
+ # type: (Optional[ExcInfo]) -> Dict[str, Optional[ExcInfo]]
+ """Creates a hint with the exc info filled in."""
+ if exc_info is None:
+ exc_info = sys.exc_info()
+ else:
+ exc_info = exc_info_from_error(exc_info)
+ if exc_info[0] is None:
+ exc_info = None
+ return {"exc_info": exc_info}
+
+
+class BadDsn(ValueError):
+ """Raised on invalid DSNs."""
+
+
+@implements_str
+class Dsn(object):
+ """Represents a DSN."""
+
+ def __init__(self, value):
+ # type: (Union[Dsn, str]) -> None
+ if isinstance(value, Dsn):
+ self.__dict__ = dict(value.__dict__)
+ return
+ parts = urlparse.urlsplit(text_type(value))
+
+ if parts.scheme not in (u"http", u"https"):
+ raise BadDsn("Unsupported scheme %r" % parts.scheme)
+ self.scheme = parts.scheme
+
+ if parts.hostname is None:
+ raise BadDsn("Missing hostname")
+
+ self.host = parts.hostname
+
+ if parts.port is None:
+ self.port = self.scheme == "https" and 443 or 80
+ else:
+ self.port = parts.port
+
+ if not parts.username:
+ raise BadDsn("Missing public key")
+
+ self.public_key = parts.username
+ self.secret_key = parts.password
+
+ path = parts.path.rsplit("/", 1)
+
+ try:
+ self.project_id = text_type(int(path.pop()))
+ except (ValueError, TypeError):
+ raise BadDsn("Invalid project in DSN (%r)" % (parts.path or "")[1:])
+
+ self.path = "/".join(path) + "/"
+
+ @property
+ def netloc(self):
+ # type: () -> str
+ """The netloc part of a DSN."""
+ rv = self.host
+ if (self.scheme, self.port) not in (("http", 80), ("https", 443)):
+ rv = "%s:%s" % (rv, self.port)
+ return rv
+
+ def to_auth(self, client=None):
+ # type: (Optional[Any]) -> Auth
+ """Returns the auth info object for this dsn."""
+ return Auth(
+ scheme=self.scheme,
+ host=self.netloc,
+ path=self.path,
+ project_id=self.project_id,
+ public_key=self.public_key,
+ secret_key=self.secret_key,
+ client=client,
+ )
+
+ def __str__(self):
+ # type: () -> str
+ return "%s://%s%s@%s%s%s" % (
+ self.scheme,
+ self.public_key,
+ self.secret_key and "@" + self.secret_key or "",
+ self.netloc,
+ self.path,
+ self.project_id,
+ )
+
+
+class Auth(object):
+ """Helper object that represents the auth info."""
+
+ def __init__(
+ self,
+ scheme,
+ host,
+ project_id,
+ public_key,
+ secret_key=None,
+ version=7,
+ client=None,
+ path="/",
+ ):
+ # type: (str, str, str, str, Optional[str], int, Optional[Any], str) -> None
+ self.scheme = scheme
+ self.host = host
+ self.path = path
+ self.project_id = project_id
+ self.public_key = public_key
+ self.secret_key = secret_key
+ self.version = version
+ self.client = client
+
+ @property
+ def store_api_url(self):
+ # type: () -> str
+ """Returns the API url for storing events."""
+ return "%s://%s%sapi/%s/store/" % (
+ self.scheme,
+ self.host,
+ self.path,
+ self.project_id,
+ )
+
+ def to_header(self, timestamp=None):
+ # type: (Optional[datetime]) -> str
+ """Returns the auth header a string."""
+ rv = [("sentry_key", self.public_key), ("sentry_version", self.version)]
+ if timestamp is not None:
+ rv.append(("sentry_timestamp", str(to_timestamp(timestamp))))
+ if self.client is not None:
+ rv.append(("sentry_client", self.client))
+ if self.secret_key is not None:
+ rv.append(("sentry_secret", self.secret_key))
+ return u"Sentry " + u", ".join("%s=%s" % (key, value) for key, value in rv)
+
+
+class AnnotatedValue(object):
+ __slots__ = ("value", "metadata")
+
+ def __init__(self, value, metadata):
+ # type: (Optional[Any], Dict[str, Any]) -> None
+ self.value = value
+ self.metadata = metadata
+
+
+if MYPY:
+ from typing import TypeVar
+
+ T = TypeVar("T")
+ Annotated = Union[AnnotatedValue, T]
+
+
+def get_type_name(cls):
+ # type: (Optional[type]) -> Optional[str]
+ return getattr(cls, "__qualname__", None) or getattr(cls, "__name__", None)
+
+
+def get_type_module(cls):
+ # type: (Optional[type]) -> Optional[str]
+ mod = getattr(cls, "__module__", None)
+ if mod not in (None, "builtins", "__builtins__"):
+ return mod
+ return None
+
+
+def should_hide_frame(frame):
+ # type: (FrameType) -> bool
+ try:
+ mod = frame.f_globals["__name__"]
+ if mod.startswith("sentry_sdk."):
+ return True
+ except (AttributeError, KeyError):
+ pass
+
+ for flag_name in "__traceback_hide__", "__tracebackhide__":
+ try:
+ if frame.f_locals[flag_name]:
+ return True
+ except Exception:
+ pass
+
+ return False
+
+
+def iter_stacks(tb):
+ # type: (Optional[TracebackType]) -> Iterator[TracebackType]
+ tb_ = tb # type: Optional[TracebackType]
+ while tb_ is not None:
+ if not should_hide_frame(tb_.tb_frame):
+ yield tb_
+ tb_ = tb_.tb_next
+
+
+def get_lines_from_file(
+ filename, # type: str
+ lineno, # type: int
+ loader=None, # type: Optional[Any]
+ module=None, # type: Optional[str]
+):
+ # type: (...) -> Tuple[List[Annotated[str]], Optional[Annotated[str]], List[Annotated[str]]]
+ context_lines = 5
+ source = None
+ if loader is not None and hasattr(loader, "get_source"):
+ try:
+ source_str = loader.get_source(module) # type: Optional[str]
+ except (ImportError, IOError):
+ source_str = None
+ if source_str is not None:
+ source = source_str.splitlines()
+
+ if source is None:
+ try:
+ source = linecache.getlines(filename)
+ except (OSError, IOError):
+ return [], None, []
+
+ if not source:
+ return [], None, []
+
+ lower_bound = max(0, lineno - context_lines)
+ upper_bound = min(lineno + 1 + context_lines, len(source))
+
+ try:
+ pre_context = [
+ strip_string(line.strip("\r\n")) for line in source[lower_bound:lineno]
+ ]
+ context_line = strip_string(source[lineno].strip("\r\n"))
+ post_context = [
+ strip_string(line.strip("\r\n"))
+ for line in source[(lineno + 1) : upper_bound]
+ ]
+ return pre_context, context_line, post_context
+ except IndexError:
+ # the file may have changed since it was loaded into memory
+ return [], None, []
+
+
+def get_source_context(
+ frame, # type: FrameType
+ tb_lineno, # type: int
+):
+ # type: (...) -> Tuple[List[Annotated[str]], Optional[Annotated[str]], List[Annotated[str]]]
+ try:
+ abs_path = frame.f_code.co_filename # type: Optional[str]
+ except Exception:
+ abs_path = None
+ try:
+ module = frame.f_globals["__name__"]
+ except Exception:
+ return [], None, []
+ try:
+ loader = frame.f_globals["__loader__"]
+ except Exception:
+ loader = None
+ lineno = tb_lineno - 1
+ if lineno is not None and abs_path:
+ return get_lines_from_file(abs_path, lineno, loader, module)
+ return [], None, []
+
+
+def safe_str(value):
+ # type: (Any) -> str
+ try:
+ return text_type(value)
+ except Exception:
+ return safe_repr(value)
+
+
+if PY2:
+
+ def safe_repr(value):
+ # type: (Any) -> str
+ try:
+ rv = repr(value).decode("utf-8", "replace")
+
+ # At this point `rv` contains a bunch of literal escape codes, like
+ # this (exaggerated example):
+ #
+ # u"\\x2f"
+ #
+ # But we want to show this string as:
+ #
+ # u"/"
+ try:
+ # unicode-escape does this job, but can only decode latin1. So we
+ # attempt to encode in latin1.
+ return rv.encode("latin1").decode("unicode-escape")
+ except Exception:
+ # Since usually strings aren't latin1 this can break. In those
+ # cases we just give up.
+ return rv
+ except Exception:
+ # If e.g. the call to `repr` already fails
+ return u"<broken repr>"
+
+
+else:
+
+ def safe_repr(value):
+ # type: (Any) -> str
+ try:
+ return repr(value)
+ except Exception:
+ return "<broken repr>"
+
+
+def filename_for_module(module, abs_path):
+ # type: (Optional[str], Optional[str]) -> Optional[str]
+ if not abs_path or not module:
+ return abs_path
+
+ try:
+ if abs_path.endswith(".pyc"):
+ abs_path = abs_path[:-1]
+
+ base_module = module.split(".", 1)[0]
+ if base_module == module:
+ return os.path.basename(abs_path)
+
+ base_module_path = sys.modules[base_module].__file__
+ return abs_path.split(base_module_path.rsplit(os.sep, 2)[0], 1)[-1].lstrip(
+ os.sep
+ )
+ except Exception:
+ return abs_path
+
+
+def serialize_frame(frame, tb_lineno=None, with_locals=True):
+ # type: (FrameType, Optional[int], bool) -> Dict[str, Any]
+ f_code = getattr(frame, "f_code", None)
+ if not f_code:
+ abs_path = None
+ function = None
+ else:
+ abs_path = frame.f_code.co_filename
+ function = frame.f_code.co_name
+ try:
+ module = frame.f_globals["__name__"]
+ except Exception:
+ module = None
+
+ if tb_lineno is None:
+ tb_lineno = frame.f_lineno
+
+ pre_context, context_line, post_context = get_source_context(frame, tb_lineno)
+
+ rv = {
+ "filename": filename_for_module(module, abs_path) or None,
+ "abs_path": os.path.abspath(abs_path) if abs_path else None,
+ "function": function or "<unknown>",
+ "module": module,
+ "lineno": tb_lineno,
+ "pre_context": pre_context,
+ "context_line": context_line,
+ "post_context": post_context,
+ } # type: Dict[str, Any]
+ if with_locals:
+ rv["vars"] = frame.f_locals
+
+ return rv
+
+
+def stacktrace_from_traceback(tb=None, with_locals=True):
+ # type: (Optional[TracebackType], bool) -> Dict[str, List[Dict[str, Any]]]
+ return {
+ "frames": [
+ serialize_frame(
+ tb.tb_frame, tb_lineno=tb.tb_lineno, with_locals=with_locals
+ )
+ for tb in iter_stacks(tb)
+ ]
+ }
+
+
+def current_stacktrace(with_locals=True):
+ # type: (bool) -> Any
+ __tracebackhide__ = True
+ frames = []
+
+ f = sys._getframe() # type: Optional[FrameType]
+ while f is not None:
+ if not should_hide_frame(f):
+ frames.append(serialize_frame(f, with_locals=with_locals))
+ f = f.f_back
+
+ frames.reverse()
+
+ return {"frames": frames}
+
+
+def get_errno(exc_value):
+ # type: (BaseException) -> Optional[Any]
+ return getattr(exc_value, "errno", None)
+
+
+def single_exception_from_error_tuple(
+ exc_type, # type: Optional[type]
+ exc_value, # type: Optional[BaseException]
+ tb, # type: Optional[TracebackType]
+ client_options=None, # type: Optional[Dict[str, Any]]
+ mechanism=None, # type: Optional[Dict[str, Any]]
+):
+ # type: (...) -> Dict[str, Any]
+ if exc_value is not None:
+ errno = get_errno(exc_value)
+ else:
+ errno = None
+
+ if errno is not None:
+ mechanism = mechanism or {}
+ mechanism.setdefault("meta", {}).setdefault("errno", {}).setdefault(
+ "number", errno
+ )
+
+ if client_options is None:
+ with_locals = True
+ else:
+ with_locals = client_options["with_locals"]
+
+ return {
+ "module": get_type_module(exc_type),
+ "type": get_type_name(exc_type),
+ "value": safe_str(exc_value),
+ "mechanism": mechanism,
+ "stacktrace": stacktrace_from_traceback(tb, with_locals),
+ }
+
+
+HAS_CHAINED_EXCEPTIONS = hasattr(Exception, "__suppress_context__")
+
+if HAS_CHAINED_EXCEPTIONS:
+
+ def walk_exception_chain(exc_info):
+ # type: (ExcInfo) -> Iterator[ExcInfo]
+ exc_type, exc_value, tb = exc_info
+
+ seen_exceptions = []
+ seen_exception_ids = set() # type: Set[int]
+
+ while (
+ exc_type is not None
+ and exc_value is not None
+ and id(exc_value) not in seen_exception_ids
+ ):
+ yield exc_type, exc_value, tb
+
+ # Avoid hashing random types we don't know anything
+ # about. Use the list to keep a ref so that the `id` is
+ # not used for another object.
+ seen_exceptions.append(exc_value)
+ seen_exception_ids.add(id(exc_value))
+
+ if exc_value.__suppress_context__:
+ cause = exc_value.__cause__
+ else:
+ cause = exc_value.__context__
+ if cause is None:
+ break
+ exc_type = type(cause)
+ exc_value = cause
+ tb = getattr(cause, "__traceback__", None)
+
+
+else:
+
+ def walk_exception_chain(exc_info):
+ # type: (ExcInfo) -> Iterator[ExcInfo]
+ yield exc_info
+
+
+def exceptions_from_error_tuple(
+ exc_info, # type: ExcInfo
+ client_options=None, # type: Optional[Dict[str, Any]]
+ mechanism=None, # type: Optional[Dict[str, Any]]
+):
+ # type: (...) -> List[Dict[str, Any]]
+ exc_type, exc_value, tb = exc_info
+ rv = []
+ for exc_type, exc_value, tb in walk_exception_chain(exc_info):
+ rv.append(
+ single_exception_from_error_tuple(
+ exc_type, exc_value, tb, client_options, mechanism
+ )
+ )
+
+ rv.reverse()
+
+ return rv
+
+
+def to_string(value):
+ # type: (str) -> str
+ try:
+ return text_type(value)
+ except UnicodeDecodeError:
+ return repr(value)[1:-1]
+
+
+def iter_event_stacktraces(event):
+ # type: (Dict[str, Any]) -> Iterator[Dict[str, Any]]
+ if "stacktrace" in event:
+ yield event["stacktrace"]
+ if "threads" in event:
+ for thread in event["threads"].get("values") or ():
+ if "stacktrace" in thread:
+ yield thread["stacktrace"]
+ if "exception" in event:
+ for exception in event["exception"].get("values") or ():
+ if "stacktrace" in exception:
+ yield exception["stacktrace"]
+
+
+def iter_event_frames(event):
+ # type: (Dict[str, Any]) -> Iterator[Dict[str, Any]]
+ for stacktrace in iter_event_stacktraces(event):
+ for frame in stacktrace.get("frames") or ():
+ yield frame
+
+
+def handle_in_app(event, in_app_exclude=None, in_app_include=None):
+ # type: (Dict[str, Any], Optional[List[str]], Optional[List[str]]) -> Dict[str, Any]
+ for stacktrace in iter_event_stacktraces(event):
+ handle_in_app_impl(
+ stacktrace.get("frames"),
+ in_app_exclude=in_app_exclude,
+ in_app_include=in_app_include,
+ )
+
+ return event
+
+
+def handle_in_app_impl(frames, in_app_exclude, in_app_include):
+ # type: (Any, Optional[List[str]], Optional[List[str]]) -> Optional[Any]
+ if not frames:
+ return None
+
+ any_in_app = False
+ for frame in frames:
+ in_app = frame.get("in_app")
+ if in_app is not None:
+ if in_app:
+ any_in_app = True
+ continue
+
+ module = frame.get("module")
+ if not module:
+ continue
+ elif _module_in_set(module, in_app_include):
+ frame["in_app"] = True
+ any_in_app = True
+ elif _module_in_set(module, in_app_exclude):
+ frame["in_app"] = False
+
+ if not any_in_app:
+ for frame in frames:
+ if frame.get("in_app") is None:
+ frame["in_app"] = True
+
+ return frames
+
+
+def exc_info_from_error(error):
+ # type: (Union[BaseException, ExcInfo]) -> ExcInfo
+ if isinstance(error, tuple) and len(error) == 3:
+ exc_type, exc_value, tb = error
+ elif isinstance(error, BaseException):
+ tb = getattr(error, "__traceback__", None)
+ if tb is not None:
+ exc_type = type(error)
+ exc_value = error
+ else:
+ exc_type, exc_value, tb = sys.exc_info()
+ if exc_value is not error:
+ tb = None
+ exc_value = error
+ exc_type = type(error)
+
+ else:
+ raise ValueError("Expected Exception object to report, got %s!" % type(error))
+
+ return exc_type, exc_value, tb
+
+
+def event_from_exception(
+ exc_info, # type: Union[BaseException, ExcInfo]
+ client_options=None, # type: Optional[Dict[str, Any]]
+ mechanism=None, # type: Optional[Dict[str, Any]]
+):
+ # type: (...) -> Tuple[Dict[str, Any], Dict[str, Any]]
+ exc_info = exc_info_from_error(exc_info)
+ hint = event_hint_with_exc_info(exc_info)
+ return (
+ {
+ "level": "error",
+ "exception": {
+ "values": exceptions_from_error_tuple(
+ exc_info, client_options, mechanism
+ )
+ },
+ },
+ hint,
+ )
+
+
+def _module_in_set(name, set):
+ # type: (str, Optional[List[str]]) -> bool
+ if not set:
+ return False
+ for item in set or ():
+ if item == name or name.startswith(item + "."):
+ return True
+ return False
+
+
+def strip_string(value, max_length=None):
+ # type: (str, Optional[int]) -> Union[AnnotatedValue, str]
+ # TODO: read max_length from config
+ if not value:
+ return value
+
+ if max_length is None:
+ # This is intentionally not just the default such that one can patch `MAX_STRING_LENGTH` and affect `strip_string`.
+ max_length = MAX_STRING_LENGTH
+
+ length = len(value)
+
+ if length > max_length:
+ return AnnotatedValue(
+ value=value[: max_length - 3] + u"...",
+ metadata={
+ "len": length,
+ "rem": [["!limit", "x", max_length - 3, max_length]],
+ },
+ )
+ return value
+
+
+def _is_threading_local_monkey_patched():
+ # type: () -> bool
+ try:
+ from gevent.monkey import is_object_patched # type: ignore
+
+ if is_object_patched("threading", "local"):
+ return True
+ except ImportError:
+ pass
+
+ try:
+ from eventlet.patcher import is_monkey_patched # type: ignore
+
+ if is_monkey_patched("thread"):
+ return True
+ except ImportError:
+ pass
+
+ return False
+
+
+def _get_contextvars():
+ # type: () -> Tuple[bool, type]
+ """
+ Try to import contextvars and use it if it's deemed safe. We should not use
+ contextvars if gevent or eventlet have patched thread locals, as
+ contextvars are unaffected by that patch.
+
+ https://github.com/gevent/gevent/issues/1407
+ """
+ if not _is_threading_local_monkey_patched():
+ # aiocontextvars is a PyPI package that ensures that the contextvars
+ # backport (also a PyPI package) works with asyncio under Python 3.6
+ #
+ # Import it if available.
+ if not PY2 and sys.version_info < (3, 7):
+ try:
+ from aiocontextvars import ContextVar # noqa
+
+ return True, ContextVar
+ except ImportError:
+ pass
+
+ try:
+ from contextvars import ContextVar
+
+ return True, ContextVar
+ except ImportError:
+ pass
+
+ from threading import local
+
+ class ContextVar(object):
+ # Super-limited impl of ContextVar
+
+ def __init__(self, name):
+ # type: (str) -> None
+ self._name = name
+ self._local = local()
+
+ def get(self, default):
+ # type: (Any) -> Any
+ return getattr(self._local, "value", default)
+
+ def set(self, value):
+ # type: (Any) -> None
+ self._local.value = value
+
+ return False, ContextVar
+
+
+HAS_REAL_CONTEXTVARS, ContextVar = _get_contextvars()
+
+
+def transaction_from_function(func):
+ # type: (Callable[..., Any]) -> Optional[str]
+ # Methods in Python 2
+ try:
+ return "%s.%s.%s" % (
+ func.im_class.__module__, # type: ignore
+ func.im_class.__name__, # type: ignore
+ func.__name__,
+ )
+ except Exception:
+ pass
+
+ func_qualname = (
+ getattr(func, "__qualname__", None) or getattr(func, "__name__", None) or None
+ ) # type: Optional[str]
+
+ if not func_qualname:
+ # No idea what it is
+ return None
+
+ # Methods in Python 3
+ # Functions
+ # Classes
+ try:
+ return "%s.%s" % (func.__module__, func_qualname)
+ except Exception:
+ pass
+
+ # Possibly a lambda
+ return func_qualname
+
+
+disable_capture_event = ContextVar("disable_capture_event")
diff --git a/third_party/python/sentry_sdk/sentry_sdk/worker.py b/third_party/python/sentry_sdk/sentry_sdk/worker.py
new file mode 100644
index 0000000000..b5f2ea8ae6
--- /dev/null
+++ b/third_party/python/sentry_sdk/sentry_sdk/worker.py
@@ -0,0 +1,142 @@
+import os
+
+from threading import Thread, Lock
+from time import sleep, time
+from sentry_sdk._compat import queue, check_thread_support
+from sentry_sdk.utils import logger
+
+from sentry_sdk._types import MYPY
+
+if MYPY:
+ from queue import Queue
+ from typing import Any
+ from typing import Optional
+ from typing import Callable
+
+
+_TERMINATOR = object()
+
+
+class BackgroundWorker(object):
+ def __init__(self):
+ # type: () -> None
+ check_thread_support()
+ self._queue = queue.Queue(30) # type: Queue[Any]
+ self._lock = Lock()
+ self._thread = None # type: Optional[Thread]
+ self._thread_for_pid = None # type: Optional[int]
+
+ @property
+ def is_alive(self):
+ # type: () -> bool
+ if self._thread_for_pid != os.getpid():
+ return False
+ if not self._thread:
+ return False
+ return self._thread.is_alive()
+
+ def _ensure_thread(self):
+ # type: () -> None
+ if not self.is_alive:
+ self.start()
+
+ def _timed_queue_join(self, timeout):
+ # type: (float) -> bool
+ deadline = time() + timeout
+ queue = self._queue
+
+ real_all_tasks_done = getattr(
+ queue, "all_tasks_done", None
+ ) # type: Optional[Any]
+ if real_all_tasks_done is not None:
+ real_all_tasks_done.acquire()
+ all_tasks_done = real_all_tasks_done # type: Optional[Any]
+ elif queue.__module__.startswith("eventlet."):
+ all_tasks_done = getattr(queue, "_cond", None)
+ else:
+ all_tasks_done = None
+
+ try:
+ while queue.unfinished_tasks:
+ delay = deadline - time()
+ if delay <= 0:
+ return False
+ if all_tasks_done is not None:
+ all_tasks_done.wait(timeout=delay)
+ else:
+ # worst case, we just poll the number of remaining tasks
+ sleep(0.1)
+
+ return True
+ finally:
+ if real_all_tasks_done is not None:
+ real_all_tasks_done.release()
+
+ def start(self):
+ # type: () -> None
+ with self._lock:
+ if not self.is_alive:
+ self._thread = Thread(
+ target=self._target, name="raven-sentry.BackgroundWorker"
+ )
+ self._thread.setDaemon(True)
+ self._thread.start()
+ self._thread_for_pid = os.getpid()
+
+ def kill(self):
+ # type: () -> None
+ """
+ Kill worker thread. Returns immediately. Not useful for
+ waiting on shutdown for events, use `flush` for that.
+ """
+ logger.debug("background worker got kill request")
+ with self._lock:
+ if self._thread:
+ try:
+ self._queue.put_nowait(_TERMINATOR)
+ except queue.Full:
+ logger.debug("background worker queue full, kill failed")
+
+ self._thread = None
+ self._thread_for_pid = None
+
+ def flush(self, timeout, callback=None):
+ # type: (float, Optional[Any]) -> None
+ logger.debug("background worker got flush request")
+ with self._lock:
+ if self.is_alive and timeout > 0.0:
+ self._wait_flush(timeout, callback)
+ logger.debug("background worker flushed")
+
+ def _wait_flush(self, timeout, callback):
+ # type: (float, Optional[Any]) -> None
+ initial_timeout = min(0.1, timeout)
+ if not self._timed_queue_join(initial_timeout):
+ pending = self._queue.qsize()
+ logger.debug("%d event(s) pending on flush", pending)
+ if callback is not None:
+ callback(pending, timeout)
+ self._timed_queue_join(timeout - initial_timeout)
+
+ def submit(self, callback):
+ # type: (Callable[[], None]) -> None
+ self._ensure_thread()
+ try:
+ self._queue.put_nowait(callback)
+ except queue.Full:
+ logger.debug("background worker queue full, dropping event")
+
+ def _target(self):
+ # type: () -> None
+ while True:
+ callback = self._queue.get()
+ try:
+ if callback is _TERMINATOR:
+ break
+ try:
+ callback()
+ except Exception:
+ logger.error("Failed processing job", exc_info=True)
+ finally:
+ self._queue.task_done()
+ sleep(0)
diff --git a/third_party/python/setuptools/_distutils_hack/__init__.py b/third_party/python/setuptools/_distutils_hack/__init__.py
new file mode 100644
index 0000000000..c31edfed17
--- /dev/null
+++ b/third_party/python/setuptools/_distutils_hack/__init__.py
@@ -0,0 +1,123 @@
+import sys
+import os
+import re
+import importlib
+import warnings
+
+
+is_pypy = '__pypy__' in sys.builtin_module_names
+
+
+def warn_distutils_present():
+ if 'distutils' not in sys.modules:
+ return
+ if is_pypy and sys.version_info < (3, 7):
+ # PyPy for 3.6 unconditionally imports distutils, so bypass the warning
+ # https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
+ return
+ warnings.warn(
+ "Distutils was imported before Setuptools, but importing Setuptools "
+ "also replaces the `distutils` module in `sys.modules`. This may lead "
+ "to undesirable behaviors or errors. To avoid these issues, avoid "
+ "using distutils directly, ensure that setuptools is installed in the "
+ "traditional way (e.g. not an editable install), and/or make sure "
+ "that setuptools is always imported before distutils.")
+
+
+def clear_distutils():
+ if 'distutils' not in sys.modules:
+ return
+ warnings.warn("Setuptools is replacing distutils.")
+ mods = [name for name in sys.modules if re.match(r'distutils\b', name)]
+ for name in mods:
+ del sys.modules[name]
+
+
+def enabled():
+ """
+ Allow selection of distutils by environment variable.
+ """
+ which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'stdlib')
+ return which == 'local'
+
+
+def ensure_local_distutils():
+ clear_distutils()
+ distutils = importlib.import_module('setuptools._distutils')
+ distutils.__name__ = 'distutils'
+ sys.modules['distutils'] = distutils
+
+ # sanity check that submodules load as expected
+ core = importlib.import_module('distutils.core')
+ assert '_distutils' in core.__file__, core.__file__
+
+
+def do_override():
+ """
+ Ensure that the local copy of distutils is preferred over stdlib.
+
+ See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
+ for more motivation.
+ """
+ if enabled():
+ warn_distutils_present()
+ ensure_local_distutils()
+
+
+class DistutilsMetaFinder:
+ def find_spec(self, fullname, path, target=None):
+ if path is not None:
+ return
+
+ method_name = 'spec_for_{fullname}'.format(**locals())
+ method = getattr(self, method_name, lambda: None)
+ return method()
+
+ def spec_for_distutils(self):
+ import importlib.abc
+ import importlib.util
+
+ class DistutilsLoader(importlib.abc.Loader):
+
+ def create_module(self, spec):
+ return importlib.import_module('setuptools._distutils')
+
+ def exec_module(self, module):
+ pass
+
+ return importlib.util.spec_from_loader('distutils', DistutilsLoader())
+
+ def spec_for_pip(self):
+ """
+ Ensure stdlib distutils when running under pip.
+ See pypa/pip#8761 for rationale.
+ """
+ if self.pip_imported_during_build():
+ return
+ clear_distutils()
+ self.spec_for_distutils = lambda: None
+
+ @staticmethod
+ def pip_imported_during_build():
+ """
+ Detect if pip is being imported in a build script. Ref #2355.
+ """
+ import traceback
+ return any(
+ frame.f_globals['__file__'].endswith('setup.py')
+ for frame, line in traceback.walk_stack(None)
+ )
+
+
+DISTUTILS_FINDER = DistutilsMetaFinder()
+
+
+def add_shim():
+ sys.meta_path.insert(0, DISTUTILS_FINDER)
+
+
+def remove_shim():
+ try:
+ sys.meta_path.remove(DISTUTILS_FINDER)
+ except ValueError:
+ pass
diff --git a/third_party/python/setuptools/_distutils_hack/override.py b/third_party/python/setuptools/_distutils_hack/override.py
new file mode 100644
index 0000000000..2cc433a4a5
--- /dev/null
+++ b/third_party/python/setuptools/_distutils_hack/override.py
@@ -0,0 +1 @@
+__import__('_distutils_hack').do_override()
diff --git a/third_party/python/setuptools/distutils-precedence.pth b/third_party/python/setuptools/distutils-precedence.pth
new file mode 100644
index 0000000000..6de4198fcc
--- /dev/null
+++ b/third_party/python/setuptools/distutils-precedence.pth
@@ -0,0 +1 @@
+import os; var = 'SETUPTOOLS_USE_DISTUTILS'; enabled = os.environ.get(var, 'stdlib') == 'local'; enabled and __import__('_distutils_hack').add_shim();
diff --git a/third_party/python/setuptools/easy_install.py b/third_party/python/setuptools/easy_install.py
new file mode 100644
index 0000000000..d87e984034
--- /dev/null
+++ b/third_party/python/setuptools/easy_install.py
@@ -0,0 +1,5 @@
+"""Run the EasyInstall command"""
+
+if __name__ == '__main__':
+ from setuptools.command.easy_install import main
+ main()
diff --git a/third_party/python/setuptools/pkg_resources/__init__.py b/third_party/python/setuptools/pkg_resources/__init__.py
new file mode 100644
index 0000000000..99b7f68075
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/__init__.py
@@ -0,0 +1,3285 @@
+"""
+Package resource API
+--------------------
+
+A resource is a logical file contained within a package, or a logical
+subdirectory thereof. The package resource API expects resource names
+to have their path parts separated with ``/``, *not* whatever the local
+path separator is. Do not use os.path operations to manipulate resource
+names being passed into the API.
+
+The package resource API is designed to work with normal filesystem packages,
+.egg files, and unpacked .egg files. It can also work in a limited way with
+.zip files and with custom PEP 302 loaders that support the ``get_data()``
+method.
+"""
+
+import sys
+import os
+import io
+import time
+import re
+import types
+import zipfile
+import zipimport
+import warnings
+import stat
+import functools
+import pkgutil
+import operator
+import platform
+import collections
+import plistlib
+import email.parser
+import errno
+import tempfile
+import textwrap
+import itertools
+import inspect
+import ntpath
+import posixpath
+import importlib
+from pkgutil import get_importer
+
+try:
+ import _imp
+except ImportError:
+ # Python 3.2 compatibility
+ import imp as _imp
+
+try:
+ FileExistsError
+except NameError:
+ FileExistsError = OSError
+
+# capture these to bypass sandboxing
+from os import utime
+try:
+ from os import mkdir, rename, unlink
+ WRITE_SUPPORT = True
+except ImportError:
+ # no write support, probably under GAE
+ WRITE_SUPPORT = False
+
+from os import open as os_open
+from os.path import isdir, split
+
+try:
+ import importlib.machinery as importlib_machinery
+ # access attribute to force import under delayed import mechanisms.
+ importlib_machinery.__name__
+except ImportError:
+ importlib_machinery = None
+
+from pkg_resources.extern import appdirs
+from pkg_resources.extern import packaging
+__import__('pkg_resources.extern.packaging.version')
+__import__('pkg_resources.extern.packaging.specifiers')
+__import__('pkg_resources.extern.packaging.requirements')
+__import__('pkg_resources.extern.packaging.markers')
+
+if sys.version_info < (3, 5):
+ raise RuntimeError("Python 3.5 or later is required")
+
+# declare some globals that will be defined later to
+# satisfy the linters.
+require = None
+working_set = None
+add_activation_listener = None
+resources_stream = None
+cleanup_resources = None
+resource_dir = None
+resource_stream = None
+set_extraction_path = None
+resource_isdir = None
+resource_string = None
+iter_entry_points = None
+resource_listdir = None
+resource_filename = None
+resource_exists = None
+_distribution_finders = None
+_namespace_handlers = None
+_namespace_packages = None
+
+
+class PEP440Warning(RuntimeWarning):
+ """
+ Used when there is an issue with a version or specifier not complying with
+ PEP 440.
+ """
+
+
+def parse_version(v):
+ try:
+ return packaging.version.Version(v)
+ except packaging.version.InvalidVersion:
+ return packaging.version.LegacyVersion(v)
+
+
+_state_vars = {}
+
+
+def _declare_state(vartype, **kw):
+ globals().update(kw)
+ _state_vars.update(dict.fromkeys(kw, vartype))
+
+
+def __getstate__():
+ state = {}
+ g = globals()
+ for k, v in _state_vars.items():
+ state[k] = g['_sget_' + v](g[k])
+ return state
+
+
+def __setstate__(state):
+ g = globals()
+ for k, v in state.items():
+ g['_sset_' + _state_vars[k]](k, g[k], v)
+ return state
+
+
+def _sget_dict(val):
+ return val.copy()
+
+
+def _sset_dict(key, ob, state):
+ ob.clear()
+ ob.update(state)
+
+
+def _sget_object(val):
+ return val.__getstate__()
+
+
+def _sset_object(key, ob, state):
+ ob.__setstate__(state)
+
+
+_sget_none = _sset_none = lambda *args: None
+
+
+def get_supported_platform():
+ """Return this platform's maximum compatible version.
+
+ distutils.util.get_platform() normally reports the minimum version
+ of macOS that would be required to *use* extensions produced by
+ distutils. But what we want when checking compatibility is to know the
+ version of macOS that we are *running*. To allow usage of packages that
+ explicitly require a newer version of macOS, we must also know the
+ current version of the OS.
+
+ If this condition occurs for any other platform with a version in its
+ platform strings, this function should be extended accordingly.
+ """
+ plat = get_build_platform()
+ m = macosVersionString.match(plat)
+ if m is not None and sys.platform == "darwin":
+ try:
+ plat = 'macosx-%s-%s' % ('.'.join(_macos_vers()[:2]), m.group(3))
+ except ValueError:
+ # not macOS
+ pass
+ return plat
+
+
+__all__ = [
+ # Basic resource access and distribution/entry point discovery
+ 'require', 'run_script', 'get_provider', 'get_distribution',
+ 'load_entry_point', 'get_entry_map', 'get_entry_info',
+ 'iter_entry_points',
+ 'resource_string', 'resource_stream', 'resource_filename',
+ 'resource_listdir', 'resource_exists', 'resource_isdir',
+
+ # Environmental control
+ 'declare_namespace', 'working_set', 'add_activation_listener',
+ 'find_distributions', 'set_extraction_path', 'cleanup_resources',
+ 'get_default_cache',
+
+ # Primary implementation classes
+ 'Environment', 'WorkingSet', 'ResourceManager',
+ 'Distribution', 'Requirement', 'EntryPoint',
+
+ # Exceptions
+ 'ResolutionError', 'VersionConflict', 'DistributionNotFound',
+ 'UnknownExtra', 'ExtractionError',
+
+ # Warnings
+ 'PEP440Warning',
+
+ # Parsing functions and string utilities
+ 'parse_requirements', 'parse_version', 'safe_name', 'safe_version',
+ 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections',
+ 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker',
+
+ # filesystem utilities
+ 'ensure_directory', 'normalize_path',
+
+ # Distribution "precedence" constants
+ 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST',
+
+ # "Provider" interfaces, implementations, and registration/lookup APIs
+ 'IMetadataProvider', 'IResourceProvider', 'FileMetadata',
+ 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider',
+ 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider',
+ 'register_finder', 'register_namespace_handler', 'register_loader_type',
+ 'fixup_namespace_packages', 'get_importer',
+
+ # Warnings
+ 'PkgResourcesDeprecationWarning',
+
+ # Deprecated/backward compatibility only
+ 'run_main', 'AvailableDistributions',
+]
+
+
+class ResolutionError(Exception):
+ """Abstract base for dependency resolution errors"""
+
+ def __repr__(self):
+ return self.__class__.__name__ + repr(self.args)
+
+
+class VersionConflict(ResolutionError):
+ """
+ An already-installed version conflicts with the requested version.
+
+ Should be initialized with the installed Distribution and the requested
+ Requirement.
+ """
+
+ _template = "{self.dist} is installed but {self.req} is required"
+
+ @property
+ def dist(self):
+ return self.args[0]
+
+ @property
+ def req(self):
+ return self.args[1]
+
+ def report(self):
+ return self._template.format(**locals())
+
+ def with_context(self, required_by):
+ """
+ If required_by is non-empty, return a version of self that is a
+ ContextualVersionConflict.
+ """
+ if not required_by:
+ return self
+ args = self.args + (required_by,)
+ return ContextualVersionConflict(*args)
+
+
+class ContextualVersionConflict(VersionConflict):
+ """
+ A VersionConflict that accepts a third parameter, the set of the
+ requirements that required the installed Distribution.
+ """
+
+ _template = VersionConflict._template + ' by {self.required_by}'
+
+ @property
+ def required_by(self):
+ return self.args[2]
+
+
+class DistributionNotFound(ResolutionError):
+ """A requested distribution was not found"""
+
+ _template = ("The '{self.req}' distribution was not found "
+ "and is required by {self.requirers_str}")
+
+ @property
+ def req(self):
+ return self.args[0]
+
+ @property
+ def requirers(self):
+ return self.args[1]
+
+ @property
+ def requirers_str(self):
+ if not self.requirers:
+ return 'the application'
+ return ', '.join(self.requirers)
+
+ def report(self):
+ return self._template.format(**locals())
+
+ def __str__(self):
+ return self.report()
+
+
+class UnknownExtra(ResolutionError):
+ """Distribution doesn't have an "extra feature" of the given name"""
+
+
+_provider_factories = {}
+
+PY_MAJOR = '{}.{}'.format(*sys.version_info)
+EGG_DIST = 3
+BINARY_DIST = 2
+SOURCE_DIST = 1
+CHECKOUT_DIST = 0
+DEVELOP_DIST = -1
+
+
+def register_loader_type(loader_type, provider_factory):
+ """Register `provider_factory` to make providers for `loader_type`
+
+ `loader_type` is the type or class of a PEP 302 ``module.__loader__``,
+ and `provider_factory` is a function that, passed a *module* object,
+ returns an ``IResourceProvider`` for that module.
+ """
+ _provider_factories[loader_type] = provider_factory
+
+
+def get_provider(moduleOrReq):
+ """Return an IResourceProvider for the named module or requirement"""
+ if isinstance(moduleOrReq, Requirement):
+ return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0]
+ try:
+ module = sys.modules[moduleOrReq]
+ except KeyError:
+ __import__(moduleOrReq)
+ module = sys.modules[moduleOrReq]
+ loader = getattr(module, '__loader__', None)
+ return _find_adapter(_provider_factories, loader)(module)
+
+
+def _macos_vers(_cache=[]):
+ if not _cache:
+ version = platform.mac_ver()[0]
+ # fallback for MacPorts
+ if version == '':
+ plist = '/System/Library/CoreServices/SystemVersion.plist'
+ if os.path.exists(plist):
+ if hasattr(plistlib, 'readPlist'):
+ plist_content = plistlib.readPlist(plist)
+ if 'ProductVersion' in plist_content:
+ version = plist_content['ProductVersion']
+
+ _cache.append(version.split('.'))
+ return _cache[0]
+
+
+def _macos_arch(machine):
+ return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine)
+
+
+def get_build_platform():
+ """Return this platform's string for platform-specific distributions
+
+ XXX Currently this is the same as ``distutils.util.get_platform()``, but it
+ needs some hacks for Linux and macOS.
+ """
+ from sysconfig import get_platform
+
+ plat = get_platform()
+ if sys.platform == "darwin" and not plat.startswith('macosx-'):
+ try:
+ version = _macos_vers()
+ machine = os.uname()[4].replace(" ", "_")
+ return "macosx-%d.%d-%s" % (
+ int(version[0]), int(version[1]),
+ _macos_arch(machine),
+ )
+ except ValueError:
+ # if someone is running a non-Mac darwin system, this will fall
+ # through to the default implementation
+ pass
+ return plat
+
+
+macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)")
+darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)")
+# XXX backward compat
+get_platform = get_build_platform
+
+
+def compatible_platforms(provided, required):
+ """Can code for the `provided` platform run on the `required` platform?
+
+ Returns true if either platform is ``None``, or the platforms are equal.
+
+ XXX Needs compatibility checks for Linux and other unixy OSes.
+ """
+ if provided is None or required is None or provided == required:
+ # easy case
+ return True
+
+ # macOS special cases
+ reqMac = macosVersionString.match(required)
+ if reqMac:
+ provMac = macosVersionString.match(provided)
+
+ # is this a Mac package?
+ if not provMac:
+ # this is backwards compatibility for packages built before
+ # setuptools 0.6. All packages built after this point will
+ # use the new macOS designation.
+ provDarwin = darwinVersionString.match(provided)
+ if provDarwin:
+ dversion = int(provDarwin.group(1))
+ macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2))
+ if dversion == 7 and macosversion >= "10.3" or \
+ dversion == 8 and macosversion >= "10.4":
+ return True
+ # egg isn't macOS or legacy darwin
+ return False
+
+ # are they the same major version and machine type?
+ if provMac.group(1) != reqMac.group(1) or \
+ provMac.group(3) != reqMac.group(3):
+ return False
+
+ # is the required OS major update >= the provided one?
+ if int(provMac.group(2)) > int(reqMac.group(2)):
+ return False
+
+ return True
+
+ # XXX Linux and other platforms' special cases should go here
+ return False
+
+
+def run_script(dist_spec, script_name):
+ """Locate distribution `dist_spec` and run its `script_name` script"""
+ ns = sys._getframe(1).f_globals
+ name = ns['__name__']
+ ns.clear()
+ ns['__name__'] = name
+ require(dist_spec)[0].run_script(script_name, ns)
+
+
+# backward compatibility
+run_main = run_script
+
+
+def get_distribution(dist):
+ """Return a current distribution object for a Requirement or string"""
+ if isinstance(dist, str):
+ dist = Requirement.parse(dist)
+ if isinstance(dist, Requirement):
+ dist = get_provider(dist)
+ if not isinstance(dist, Distribution):
+ raise TypeError("Expected string, Requirement, or Distribution", dist)
+ return dist
+
+
+def load_entry_point(dist, group, name):
+ """Return `name` entry point of `group` for `dist` or raise ImportError"""
+ return get_distribution(dist).load_entry_point(group, name)
+
+
+def get_entry_map(dist, group=None):
+ """Return the entry point map for `group`, or the full entry map"""
+ return get_distribution(dist).get_entry_map(group)
+
+
+def get_entry_info(dist, group, name):
+ """Return the EntryPoint object for `group`+`name`, or ``None``"""
+ return get_distribution(dist).get_entry_info(group, name)
+
+
+class IMetadataProvider:
+ def has_metadata(name):
+ """Does the package's distribution contain the named metadata?"""
+
+ def get_metadata(name):
+ """The named metadata resource as a string"""
+
+ def get_metadata_lines(name):
+ """Yield named metadata resource as list of non-blank non-comment lines
+
+ Leading and trailing whitespace is stripped from each line, and lines
+ with ``#`` as the first non-blank character are omitted."""
+
+ def metadata_isdir(name):
+ """Is the named metadata a directory? (like ``os.path.isdir()``)"""
+
+ def metadata_listdir(name):
+ """List of metadata names in the directory (like ``os.listdir()``)"""
+
+ def run_script(script_name, namespace):
+ """Execute the named script in the supplied namespace dictionary"""
+
+
+class IResourceProvider(IMetadataProvider):
+ """An object that provides access to package resources"""
+
+ def get_resource_filename(manager, resource_name):
+ """Return a true filesystem path for `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def get_resource_stream(manager, resource_name):
+ """Return a readable file-like object for `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def get_resource_string(manager, resource_name):
+ """Return a string containing the contents of `resource_name`
+
+ `manager` must be an ``IResourceManager``"""
+
+ def has_resource(resource_name):
+ """Does the package contain the named resource?"""
+
+ def resource_isdir(resource_name):
+ """Is the named resource a directory? (like ``os.path.isdir()``)"""
+
+ def resource_listdir(resource_name):
+ """List of resource names in the directory (like ``os.listdir()``)"""
+
+
+class WorkingSet:
+ """A collection of active distributions on sys.path (or a similar list)"""
+
+ def __init__(self, entries=None):
+ """Create working set from list of path entries (default=sys.path)"""
+ self.entries = []
+ self.entry_keys = {}
+ self.by_key = {}
+ self.callbacks = []
+
+ if entries is None:
+ entries = sys.path
+
+ for entry in entries:
+ self.add_entry(entry)
+
+ @classmethod
+ def _build_master(cls):
+ """
+ Prepare the master working set.
+ """
+ ws = cls()
+ try:
+ from __main__ import __requires__
+ except ImportError:
+ # The main program does not list any requirements
+ return ws
+
+ # ensure the requirements are met
+ try:
+ ws.require(__requires__)
+ except VersionConflict:
+ return cls._build_from_requirements(__requires__)
+
+ return ws
+
+ @classmethod
+ def _build_from_requirements(cls, req_spec):
+ """
+ Build a working set from a requirement spec. Rewrites sys.path.
+ """
+ # try it without defaults already on sys.path
+ # by starting with an empty path
+ ws = cls([])
+ reqs = parse_requirements(req_spec)
+ dists = ws.resolve(reqs, Environment())
+ for dist in dists:
+ ws.add(dist)
+
+ # add any missing entries from sys.path
+ for entry in sys.path:
+ if entry not in ws.entries:
+ ws.add_entry(entry)
+
+ # then copy back to sys.path
+ sys.path[:] = ws.entries
+ return ws
+
+ def add_entry(self, entry):
+ """Add a path item to ``.entries``, finding any distributions on it
+
+ ``find_distributions(entry, True)`` is used to find distributions
+ corresponding to the path entry, and they are added. `entry` is
+ always appended to ``.entries``, even if it is already present.
+ (This is because ``sys.path`` can contain the same value more than
+ once, and the ``.entries`` of the ``sys.path`` WorkingSet should always
+ equal ``sys.path``.)
+ """
+ self.entry_keys.setdefault(entry, [])
+ self.entries.append(entry)
+ for dist in find_distributions(entry, True):
+ self.add(dist, entry, False)
+
+ def __contains__(self, dist):
+ """True if `dist` is the active distribution for its project"""
+ return self.by_key.get(dist.key) == dist
+
+ def find(self, req):
+ """Find a distribution matching requirement `req`
+
+ If there is an active distribution for the requested project, this
+ returns it as long as it meets the version requirement specified by
+ `req`. But, if there is an active distribution for the project and it
+ does *not* meet the `req` requirement, ``VersionConflict`` is raised.
+ If there is no active distribution for the requested project, ``None``
+ is returned.
+ """
+ dist = self.by_key.get(req.key)
+ if dist is not None and dist not in req:
+ # XXX add more info
+ raise VersionConflict(dist, req)
+ return dist
+
+ def iter_entry_points(self, group, name=None):
+ """Yield entry point objects from `group` matching `name`
+
+ If `name` is None, yields all entry points in `group` from all
+ distributions in the working set, otherwise only ones matching
+ both `group` and `name` are yielded (in distribution order).
+ """
+ return (
+ entry
+ for dist in self
+ for entry in dist.get_entry_map(group).values()
+ if name is None or name == entry.name
+ )
+
+ def run_script(self, requires, script_name):
+ """Locate distribution for `requires` and run `script_name` script"""
+ ns = sys._getframe(1).f_globals
+ name = ns['__name__']
+ ns.clear()
+ ns['__name__'] = name
+ self.require(requires)[0].run_script(script_name, ns)
+
+ def __iter__(self):
+ """Yield distributions for non-duplicate projects in the working set
+
+ The yield order is the order in which the items' path entries were
+ added to the working set.
+ """
+ seen = {}
+ for item in self.entries:
+ if item not in self.entry_keys:
+ # workaround a cache issue
+ continue
+
+ for key in self.entry_keys[item]:
+ if key not in seen:
+ seen[key] = 1
+ yield self.by_key[key]
+
+ def add(self, dist, entry=None, insert=True, replace=False):
+ """Add `dist` to working set, associated with `entry`
+
+ If `entry` is unspecified, it defaults to the ``.location`` of `dist`.
+ On exit from this routine, `entry` is added to the end of the working
+ set's ``.entries`` (if it wasn't already present).
+
+ `dist` is only added to the working set if it's for a project that
+ doesn't already have a distribution in the set, unless `replace=True`.
+ If it's added, any callbacks registered with the ``subscribe()`` method
+ will be called.
+ """
+ if insert:
+ dist.insert_on(self.entries, entry, replace=replace)
+
+ if entry is None:
+ entry = dist.location
+ keys = self.entry_keys.setdefault(entry, [])
+ keys2 = self.entry_keys.setdefault(dist.location, [])
+ if not replace and dist.key in self.by_key:
+ # ignore hidden distros
+ return
+
+ self.by_key[dist.key] = dist
+ if dist.key not in keys:
+ keys.append(dist.key)
+ if dist.key not in keys2:
+ keys2.append(dist.key)
+ self._added_new(dist)
+
+ def resolve(self, requirements, env=None, installer=None,
+ replace_conflicting=False, extras=None):
+ """List all distributions needed to (recursively) meet `requirements`
+
+ `requirements` must be a sequence of ``Requirement`` objects. `env`,
+ if supplied, should be an ``Environment`` instance. If
+ not supplied, it defaults to all distributions available within any
+ entry or distribution in the working set. `installer`, if supplied,
+ will be invoked with each requirement that cannot be met by an
+ already-installed distribution; it should return a ``Distribution`` or
+ ``None``.
+
+ Unless `replace_conflicting=True`, raises a VersionConflict exception
+ if
+ any requirements are found on the path that have the correct name but
+ the wrong version. Otherwise, if an `installer` is supplied it will be
+ invoked to obtain the correct version of the requirement and activate
+ it.
+
+ `extras` is a list of the extras to be used with these requirements.
+ This is important because extra requirements may look like `my_req;
+ extra = "my_extra"`, which would otherwise be interpreted as a purely
+ optional requirement. Instead, we want to be able to assert that these
+ requirements are truly required.
+ """
+
+ # set up the stack
+ requirements = list(requirements)[::-1]
+ # set of processed requirements
+ processed = {}
+ # key -> dist
+ best = {}
+ to_activate = []
+
+ req_extras = _ReqExtras()
+
+ # Mapping of requirement to set of distributions that required it;
+ # useful for reporting info about conflicts.
+ required_by = collections.defaultdict(set)
+
+ while requirements:
+ # process dependencies breadth-first
+ req = requirements.pop(0)
+ if req in processed:
+ # Ignore cyclic or redundant dependencies
+ continue
+
+ if not req_extras.markers_pass(req, extras):
+ continue
+
+ dist = best.get(req.key)
+ if dist is None:
+ # Find the best distribution and add it to the map
+ dist = self.by_key.get(req.key)
+ if dist is None or (dist not in req and replace_conflicting):
+ ws = self
+ if env is None:
+ if dist is None:
+ env = Environment(self.entries)
+ else:
+ # Use an empty environment and workingset to avoid
+ # any further conflicts with the conflicting
+ # distribution
+ env = Environment([])
+ ws = WorkingSet([])
+ dist = best[req.key] = env.best_match(
+ req, ws, installer,
+ replace_conflicting=replace_conflicting
+ )
+ if dist is None:
+ requirers = required_by.get(req, None)
+ raise DistributionNotFound(req, requirers)
+ to_activate.append(dist)
+ if dist not in req:
+ # Oops, the "best" so far conflicts with a dependency
+ dependent_req = required_by[req]
+ raise VersionConflict(dist, req).with_context(dependent_req)
+
+ # push the new requirements onto the stack
+ new_requirements = dist.requires(req.extras)[::-1]
+ requirements.extend(new_requirements)
+
+ # Register the new requirements needed by req
+ for new_requirement in new_requirements:
+ required_by[new_requirement].add(req.project_name)
+ req_extras[new_requirement] = req.extras
+
+ processed[req] = True
+
+ # return list of distros to activate
+ return to_activate
+
+ def find_plugins(
+ self, plugin_env, full_env=None, installer=None, fallback=True):
+ """Find all activatable distributions in `plugin_env`
+
+ Example usage::
+
+ distributions, errors = working_set.find_plugins(
+ Environment(plugin_dirlist)
+ )
+ # add plugins+libs to sys.path
+ map(working_set.add, distributions)
+ # display errors
+ print('Could not load', errors)
+
+ The `plugin_env` should be an ``Environment`` instance that contains
+ only distributions that are in the project's "plugin directory" or
+ directories. The `full_env`, if supplied, should be an ``Environment``
+ contains all currently-available distributions. If `full_env` is not
+ supplied, one is created automatically from the ``WorkingSet`` this
+ method is called on, which will typically mean that every directory on
+ ``sys.path`` will be scanned for distributions.
+
+ `installer` is a standard installer callback as used by the
+ ``resolve()`` method. The `fallback` flag indicates whether we should
+ attempt to resolve older versions of a plugin if the newest version
+ cannot be resolved.
+
+ This method returns a 2-tuple: (`distributions`, `error_info`), where
+ `distributions` is a list of the distributions found in `plugin_env`
+ that were loadable, along with any other distributions that are needed
+ to resolve their dependencies. `error_info` is a dictionary mapping
+ unloadable plugin distributions to an exception instance describing the
+ error that occurred. Usually this will be a ``DistributionNotFound`` or
+ ``VersionConflict`` instance.
+ """
+
+ plugin_projects = list(plugin_env)
+ # scan project names in alphabetic order
+ plugin_projects.sort()
+
+ error_info = {}
+ distributions = {}
+
+ if full_env is None:
+ env = Environment(self.entries)
+ env += plugin_env
+ else:
+ env = full_env + plugin_env
+
+ shadow_set = self.__class__([])
+ # put all our entries in shadow_set
+ list(map(shadow_set.add, self))
+
+ for project_name in plugin_projects:
+
+ for dist in plugin_env[project_name]:
+
+ req = [dist.as_requirement()]
+
+ try:
+ resolvees = shadow_set.resolve(req, env, installer)
+
+ except ResolutionError as v:
+ # save error info
+ error_info[dist] = v
+ if fallback:
+ # try the next older version of project
+ continue
+ else:
+ # give up on this project, keep going
+ break
+
+ else:
+ list(map(shadow_set.add, resolvees))
+ distributions.update(dict.fromkeys(resolvees))
+
+ # success, no need to try any more versions of this project
+ break
+
+ distributions = list(distributions)
+ distributions.sort()
+
+ return distributions, error_info
+
+ def require(self, *requirements):
+ """Ensure that distributions matching `requirements` are activated
+
+ `requirements` must be a string or a (possibly-nested) sequence
+ thereof, specifying the distributions and versions required. The
+ return value is a sequence of the distributions that needed to be
+ activated to fulfill the requirements; all relevant distributions are
+ included, even if they were already activated in this working set.
+ """
+ needed = self.resolve(parse_requirements(requirements))
+
+ for dist in needed:
+ self.add(dist)
+
+ return needed
+
+ def subscribe(self, callback, existing=True):
+ """Invoke `callback` for all distributions
+
+ If `existing=True` (default),
+ call on all existing ones, as well.
+ """
+ if callback in self.callbacks:
+ return
+ self.callbacks.append(callback)
+ if not existing:
+ return
+ for dist in self:
+ callback(dist)
+
+ def _added_new(self, dist):
+ for callback in self.callbacks:
+ callback(dist)
+
+ def __getstate__(self):
+ return (
+ self.entries[:], self.entry_keys.copy(), self.by_key.copy(),
+ self.callbacks[:]
+ )
+
+ def __setstate__(self, e_k_b_c):
+ entries, keys, by_key, callbacks = e_k_b_c
+ self.entries = entries[:]
+ self.entry_keys = keys.copy()
+ self.by_key = by_key.copy()
+ self.callbacks = callbacks[:]
+
+
+class _ReqExtras(dict):
+ """
+ Map each requirement to the extras that demanded it.
+ """
+
+ def markers_pass(self, req, extras=None):
+ """
+ Evaluate markers for req against each extra that
+ demanded it.
+
+ Return False if the req has a marker and fails
+ evaluation. Otherwise, return True.
+ """
+ extra_evals = (
+ req.marker.evaluate({'extra': extra})
+ for extra in self.get(req, ()) + (extras or (None,))
+ )
+ return not req.marker or any(extra_evals)
+
+
+class Environment:
+ """Searchable snapshot of distributions on a search path"""
+
+ def __init__(
+ self, search_path=None, platform=get_supported_platform(),
+ python=PY_MAJOR):
+ """Snapshot distributions available on a search path
+
+ Any distributions found on `search_path` are added to the environment.
+ `search_path` should be a sequence of ``sys.path`` items. If not
+ supplied, ``sys.path`` is used.
+
+ `platform` is an optional string specifying the name of the platform
+ that platform-specific distributions must be compatible with. If
+ unspecified, it defaults to the current platform. `python` is an
+ optional string naming the desired version of Python (e.g. ``'3.6'``);
+ it defaults to the current version.
+
+ You may explicitly set `platform` (and/or `python`) to ``None`` if you
+ wish to map *all* distributions, not just those compatible with the
+ running platform or Python version.
+ """
+ self._distmap = {}
+ self.platform = platform
+ self.python = python
+ self.scan(search_path)
+
+ def can_add(self, dist):
+ """Is distribution `dist` acceptable for this environment?
+
+ The distribution must match the platform and python version
+ requirements specified when this environment was created, or False
+ is returned.
+ """
+ py_compat = (
+ self.python is None
+ or dist.py_version is None
+ or dist.py_version == self.python
+ )
+ return py_compat and compatible_platforms(dist.platform, self.platform)
+
+ def remove(self, dist):
+ """Remove `dist` from the environment"""
+ self._distmap[dist.key].remove(dist)
+
+ def scan(self, search_path=None):
+ """Scan `search_path` for distributions usable in this environment
+
+ Any distributions found are added to the environment.
+ `search_path` should be a sequence of ``sys.path`` items. If not
+ supplied, ``sys.path`` is used. Only distributions conforming to
+ the platform/python version defined at initialization are added.
+ """
+ if search_path is None:
+ search_path = sys.path
+
+ for item in search_path:
+ for dist in find_distributions(item):
+ self.add(dist)
+
+ def __getitem__(self, project_name):
+ """Return a newest-to-oldest list of distributions for `project_name`
+
+ Uses case-insensitive `project_name` comparison, assuming all the
+ project's distributions use their project's name converted to all
+ lowercase as their key.
+
+ """
+ distribution_key = project_name.lower()
+ return self._distmap.get(distribution_key, [])
+
+ def add(self, dist):
+ """Add `dist` if we ``can_add()`` it and it has not already been added
+ """
+ if self.can_add(dist) and dist.has_version():
+ dists = self._distmap.setdefault(dist.key, [])
+ if dist not in dists:
+ dists.append(dist)
+ dists.sort(key=operator.attrgetter('hashcmp'), reverse=True)
+
+ def best_match(
+ self, req, working_set, installer=None, replace_conflicting=False):
+ """Find distribution best matching `req` and usable on `working_set`
+
+ This calls the ``find(req)`` method of the `working_set` to see if a
+ suitable distribution is already active. (This may raise
+ ``VersionConflict`` if an unsuitable version of the project is already
+ active in the specified `working_set`.) If a suitable distribution
+ isn't active, this method returns the newest distribution in the
+ environment that meets the ``Requirement`` in `req`. If no suitable
+ distribution is found, and `installer` is supplied, then the result of
+ calling the environment's ``obtain(req, installer)`` method will be
+ returned.
+ """
+ try:
+ dist = working_set.find(req)
+ except VersionConflict:
+ if not replace_conflicting:
+ raise
+ dist = None
+ if dist is not None:
+ return dist
+ for dist in self[req.key]:
+ if dist in req:
+ return dist
+ # try to download/install
+ return self.obtain(req, installer)
+
+ def obtain(self, requirement, installer=None):
+ """Obtain a distribution matching `requirement` (e.g. via download)
+
+ Obtain a distro that matches requirement (e.g. via download). In the
+ base ``Environment`` class, this routine just returns
+ ``installer(requirement)``, unless `installer` is None, in which case
+ None is returned instead. This method is a hook that allows subclasses
+ to attempt other ways of obtaining a distribution before falling back
+ to the `installer` argument."""
+ if installer is not None:
+ return installer(requirement)
+
+ def __iter__(self):
+ """Yield the unique project names of the available distributions"""
+ for key in self._distmap.keys():
+ if self[key]:
+ yield key
+
+ def __iadd__(self, other):
+ """In-place addition of a distribution or environment"""
+ if isinstance(other, Distribution):
+ self.add(other)
+ elif isinstance(other, Environment):
+ for project in other:
+ for dist in other[project]:
+ self.add(dist)
+ else:
+ raise TypeError("Can't add %r to environment" % (other,))
+ return self
+
+ def __add__(self, other):
+ """Add an environment or distribution to an environment"""
+ new = self.__class__([], platform=None, python=None)
+ for env in self, other:
+ new += env
+ return new
+
+
+# XXX backward compatibility
+AvailableDistributions = Environment
+
+
+class ExtractionError(RuntimeError):
+ """An error occurred extracting a resource
+
+ The following attributes are available from instances of this exception:
+
+ manager
+ The resource manager that raised this exception
+
+ cache_path
+ The base directory for resource extraction
+
+ original_error
+ The exception instance that caused extraction to fail
+ """
+
+
+class ResourceManager:
+ """Manage resource extraction and packages"""
+ extraction_path = None
+
+ def __init__(self):
+ self.cached_files = {}
+
+ def resource_exists(self, package_or_requirement, resource_name):
+ """Does the named resource exist?"""
+ return get_provider(package_or_requirement).has_resource(resource_name)
+
+ def resource_isdir(self, package_or_requirement, resource_name):
+ """Is the named resource an existing directory?"""
+ return get_provider(package_or_requirement).resource_isdir(
+ resource_name
+ )
+
+ def resource_filename(self, package_or_requirement, resource_name):
+ """Return a true filesystem path for specified resource"""
+ return get_provider(package_or_requirement).get_resource_filename(
+ self, resource_name
+ )
+
+ def resource_stream(self, package_or_requirement, resource_name):
+ """Return a readable file-like object for specified resource"""
+ return get_provider(package_or_requirement).get_resource_stream(
+ self, resource_name
+ )
+
+ def resource_string(self, package_or_requirement, resource_name):
+ """Return specified resource as a string"""
+ return get_provider(package_or_requirement).get_resource_string(
+ self, resource_name
+ )
+
+ def resource_listdir(self, package_or_requirement, resource_name):
+ """List the contents of the named resource directory"""
+ return get_provider(package_or_requirement).resource_listdir(
+ resource_name
+ )
+
+ def extraction_error(self):
+ """Give an error message for problems extracting file(s)"""
+
+ old_exc = sys.exc_info()[1]
+ cache_path = self.extraction_path or get_default_cache()
+
+ tmpl = textwrap.dedent("""
+ Can't extract file(s) to egg cache
+
+ The following error occurred while trying to extract file(s)
+ to the Python egg cache:
+
+ {old_exc}
+
+ The Python egg cache directory is currently set to:
+
+ {cache_path}
+
+ Perhaps your account does not have write access to this directory?
+ You can change the cache directory by setting the PYTHON_EGG_CACHE
+ environment variable to point to an accessible directory.
+ """).lstrip()
+ err = ExtractionError(tmpl.format(**locals()))
+ err.manager = self
+ err.cache_path = cache_path
+ err.original_error = old_exc
+ raise err
+
+ def get_cache_path(self, archive_name, names=()):
+ """Return absolute location in cache for `archive_name` and `names`
+
+ The parent directory of the resulting path will be created if it does
+ not already exist. `archive_name` should be the base filename of the
+ enclosing egg (which may not be the name of the enclosing zipfile!),
+ including its ".egg" extension. `names`, if provided, should be a
+ sequence of path name parts "under" the egg's extraction location.
+
+ This method should only be called by resource providers that need to
+ obtain an extraction location, and only for names they intend to
+ extract, as it tracks the generated names for possible cleanup later.
+ """
+ extract_path = self.extraction_path or get_default_cache()
+ target_path = os.path.join(extract_path, archive_name + '-tmp', *names)
+ try:
+ _bypass_ensure_directory(target_path)
+ except Exception:
+ self.extraction_error()
+
+ self._warn_unsafe_extraction_path(extract_path)
+
+ self.cached_files[target_path] = 1
+ return target_path
+
+ @staticmethod
+ def _warn_unsafe_extraction_path(path):
+ """
+ If the default extraction path is overridden and set to an insecure
+ location, such as /tmp, it opens up an opportunity for an attacker to
+ replace an extracted file with an unauthorized payload. Warn the user
+ if a known insecure location is used.
+
+ See Distribute #375 for more details.
+ """
+ if os.name == 'nt' and not path.startswith(os.environ['windir']):
+ # On Windows, permissions are generally restrictive by default
+ # and temp directories are not writable by other users, so
+ # bypass the warning.
+ return
+ mode = os.stat(path).st_mode
+ if mode & stat.S_IWOTH or mode & stat.S_IWGRP:
+ msg = (
+ "Extraction path is writable by group/others "
+ "and vulnerable to attack when "
+ "used with get_resource_filename ({path}). "
+ "Consider a more secure "
+ "location (set with .set_extraction_path or the "
+ "PYTHON_EGG_CACHE environment variable)."
+ ).format(**locals())
+ warnings.warn(msg, UserWarning)
+
+ def postprocess(self, tempname, filename):
+ """Perform any platform-specific postprocessing of `tempname`
+
+ This is where Mac header rewrites should be done; other platforms don't
+ have anything special they should do.
+
+ Resource providers should call this method ONLY after successfully
+ extracting a compressed resource. They must NOT call it on resources
+ that are already in the filesystem.
+
+ `tempname` is the current (temporary) name of the file, and `filename`
+ is the name it will be renamed to by the caller after this routine
+ returns.
+ """
+
+ if os.name == 'posix':
+ # Make the resource executable
+ mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777
+ os.chmod(tempname, mode)
+
+ def set_extraction_path(self, path):
+ """Set the base path where resources will be extracted to, if needed.
+
+ If you do not call this routine before any extractions take place, the
+ path defaults to the return value of ``get_default_cache()``. (Which
+ is based on the ``PYTHON_EGG_CACHE`` environment variable, with various
+ platform-specific fallbacks. See that routine's documentation for more
+ details.)
+
+ Resources are extracted to subdirectories of this path based upon
+ information given by the ``IResourceProvider``. You may set this to a
+ temporary directory, but then you must call ``cleanup_resources()`` to
+ delete the extracted files when done. There is no guarantee that
+ ``cleanup_resources()`` will be able to remove all extracted files.
+
+ (Note: you may not change the extraction path for a given resource
+ manager once resources have been extracted, unless you first call
+ ``cleanup_resources()``.)
+ """
+ if self.cached_files:
+ raise ValueError(
+ "Can't change extraction path, files already extracted"
+ )
+
+ self.extraction_path = path
+
+ def cleanup_resources(self, force=False):
+ """
+ Delete all extracted resource files and directories, returning a list
+ of the file and directory names that could not be successfully removed.
+ This function does not have any concurrency protection, so it should
+ generally only be called when the extraction path is a temporary
+ directory exclusive to a single process. This method is not
+ automatically called; you must call it explicitly or register it as an
+ ``atexit`` function if you wish to ensure cleanup of a temporary
+ directory used for extractions.
+ """
+ # XXX
+
+
+def get_default_cache():
+ """
+ Return the ``PYTHON_EGG_CACHE`` environment variable
+ or a platform-relevant user cache dir for an app
+ named "Python-Eggs".
+ """
+ return (
+ os.environ.get('PYTHON_EGG_CACHE')
+ or appdirs.user_cache_dir(appname='Python-Eggs')
+ )
+
+
+def safe_name(name):
+ """Convert an arbitrary string to a standard distribution name
+
+ Any runs of non-alphanumeric/. characters are replaced with a single '-'.
+ """
+ return re.sub('[^A-Za-z0-9.]+', '-', name)
+
+
+def safe_version(version):
+ """
+ Convert an arbitrary string to a standard version string
+ """
+ try:
+ # normalize the version
+ return str(packaging.version.Version(version))
+ except packaging.version.InvalidVersion:
+ version = version.replace(' ', '.')
+ return re.sub('[^A-Za-z0-9.]+', '-', version)
+
+
+def safe_extra(extra):
+ """Convert an arbitrary string to a standard 'extra' name
+
+ Any runs of non-alphanumeric characters are replaced with a single '_',
+ and the result is always lowercased.
+ """
+ return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower()
+
+
+def to_filename(name):
+ """Convert a project or version name to its filename-escaped form
+
+ Any '-' characters are currently replaced with '_'.
+ """
+ return name.replace('-', '_')
+
+
+def invalid_marker(text):
+ """
+ Validate text as a PEP 508 environment marker; return an exception
+ if invalid or False otherwise.
+ """
+ try:
+ evaluate_marker(text)
+ except SyntaxError as e:
+ e.filename = None
+ e.lineno = None
+ return e
+ return False
+
+
+def evaluate_marker(text, extra=None):
+ """
+ Evaluate a PEP 508 environment marker.
+ Return a boolean indicating the marker result in this environment.
+ Raise SyntaxError if marker is invalid.
+
+ This implementation uses the 'pyparsing' module.
+ """
+ try:
+ marker = packaging.markers.Marker(text)
+ return marker.evaluate()
+ except packaging.markers.InvalidMarker as e:
+ raise SyntaxError(e) from e
+
+
+class NullProvider:
+ """Try to implement resources and metadata for arbitrary PEP 302 loaders"""
+
+ egg_name = None
+ egg_info = None
+ loader = None
+
+ def __init__(self, module):
+ self.loader = getattr(module, '__loader__', None)
+ self.module_path = os.path.dirname(getattr(module, '__file__', ''))
+
+ def get_resource_filename(self, manager, resource_name):
+ return self._fn(self.module_path, resource_name)
+
+ def get_resource_stream(self, manager, resource_name):
+ return io.BytesIO(self.get_resource_string(manager, resource_name))
+
+ def get_resource_string(self, manager, resource_name):
+ return self._get(self._fn(self.module_path, resource_name))
+
+ def has_resource(self, resource_name):
+ return self._has(self._fn(self.module_path, resource_name))
+
+ def _get_metadata_path(self, name):
+ return self._fn(self.egg_info, name)
+
+ def has_metadata(self, name):
+ if not self.egg_info:
+ return self.egg_info
+
+ path = self._get_metadata_path(name)
+ return self._has(path)
+
+ def get_metadata(self, name):
+ if not self.egg_info:
+ return ""
+ path = self._get_metadata_path(name)
+ value = self._get(path)
+ try:
+ return value.decode('utf-8')
+ except UnicodeDecodeError as exc:
+ # Include the path in the error message to simplify
+ # troubleshooting, and without changing the exception type.
+ exc.reason += ' in {} file at path: {}'.format(name, path)
+ raise
+
+ def get_metadata_lines(self, name):
+ return yield_lines(self.get_metadata(name))
+
+ def resource_isdir(self, resource_name):
+ return self._isdir(self._fn(self.module_path, resource_name))
+
+ def metadata_isdir(self, name):
+ return self.egg_info and self._isdir(self._fn(self.egg_info, name))
+
+ def resource_listdir(self, resource_name):
+ return self._listdir(self._fn(self.module_path, resource_name))
+
+ def metadata_listdir(self, name):
+ if self.egg_info:
+ return self._listdir(self._fn(self.egg_info, name))
+ return []
+
+ def run_script(self, script_name, namespace):
+ script = 'scripts/' + script_name
+ if not self.has_metadata(script):
+ raise ResolutionError(
+ "Script {script!r} not found in metadata at {self.egg_info!r}"
+ .format(**locals()),
+ )
+ script_text = self.get_metadata(script).replace('\r\n', '\n')
+ script_text = script_text.replace('\r', '\n')
+ script_filename = self._fn(self.egg_info, script)
+ namespace['__file__'] = script_filename
+ if os.path.exists(script_filename):
+ with open(script_filename) as fid:
+ source = fid.read()
+ code = compile(source, script_filename, 'exec')
+ exec(code, namespace, namespace)
+ else:
+ from linecache import cache
+ cache[script_filename] = (
+ len(script_text), 0, script_text.split('\n'), script_filename
+ )
+ script_code = compile(script_text, script_filename, 'exec')
+ exec(script_code, namespace, namespace)
+
+ def _has(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _isdir(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _listdir(self, path):
+ raise NotImplementedError(
+ "Can't perform this operation for unregistered loader type"
+ )
+
+ def _fn(self, base, resource_name):
+ self._validate_resource_path(resource_name)
+ if resource_name:
+ return os.path.join(base, *resource_name.split('/'))
+ return base
+
+ @staticmethod
+ def _validate_resource_path(path):
+ """
+ Validate the resource paths according to the docs.
+ https://setuptools.readthedocs.io/en/latest/pkg_resources.html#basic-resource-access
+
+ >>> warned = getfixture('recwarn')
+ >>> warnings.simplefilter('always')
+ >>> vrp = NullProvider._validate_resource_path
+ >>> vrp('foo/bar.txt')
+ >>> bool(warned)
+ False
+ >>> vrp('../foo/bar.txt')
+ >>> bool(warned)
+ True
+ >>> warned.clear()
+ >>> vrp('/foo/bar.txt')
+ >>> bool(warned)
+ True
+ >>> vrp('foo/../../bar.txt')
+ >>> bool(warned)
+ True
+ >>> warned.clear()
+ >>> vrp('foo/f../bar.txt')
+ >>> bool(warned)
+ False
+
+ Windows path separators are straight-up disallowed.
+ >>> vrp(r'\\foo/bar.txt')
+ Traceback (most recent call last):
+ ...
+ ValueError: Use of .. or absolute path in a resource path \
+is not allowed.
+
+ >>> vrp(r'C:\\foo/bar.txt')
+ Traceback (most recent call last):
+ ...
+ ValueError: Use of .. or absolute path in a resource path \
+is not allowed.
+
+ Blank values are allowed
+
+ >>> vrp('')
+ >>> bool(warned)
+ False
+
+ Non-string values are not.
+
+ >>> vrp(None)
+ Traceback (most recent call last):
+ ...
+ AttributeError: ...
+ """
+ invalid = (
+ os.path.pardir in path.split(posixpath.sep) or
+ posixpath.isabs(path) or
+ ntpath.isabs(path)
+ )
+ if not invalid:
+ return
+
+ msg = "Use of .. or absolute path in a resource path is not allowed."
+
+ # Aggressively disallow Windows absolute paths
+ if ntpath.isabs(path) and not posixpath.isabs(path):
+ raise ValueError(msg)
+
+ # for compatibility, warn; in future
+ # raise ValueError(msg)
+ warnings.warn(
+ msg[:-1] + " and will raise exceptions in a future release.",
+ DeprecationWarning,
+ stacklevel=4,
+ )
+
+ def _get(self, path):
+ if hasattr(self.loader, 'get_data'):
+ return self.loader.get_data(path)
+ raise NotImplementedError(
+ "Can't perform this operation for loaders without 'get_data()'"
+ )
+
+
+register_loader_type(object, NullProvider)
+
+
+def _parents(path):
+ """
+ yield all parents of path including path
+ """
+ last = None
+ while path != last:
+ yield path
+ last = path
+ path, _ = os.path.split(path)
+
+
+class EggProvider(NullProvider):
+ """Provider based on a virtual filesystem"""
+
+ def __init__(self, module):
+ NullProvider.__init__(self, module)
+ self._setup_prefix()
+
+ def _setup_prefix(self):
+ # Assume that metadata may be nested inside a "basket"
+ # of multiple eggs and use module_path instead of .archive.
+ eggs = filter(_is_egg_path, _parents(self.module_path))
+ egg = next(eggs, None)
+ egg and self._set_egg(egg)
+
+ def _set_egg(self, path):
+ self.egg_name = os.path.basename(path)
+ self.egg_info = os.path.join(path, 'EGG-INFO')
+ self.egg_root = path
+
+
+class DefaultProvider(EggProvider):
+ """Provides access to package resources in the filesystem"""
+
+ def _has(self, path):
+ return os.path.exists(path)
+
+ def _isdir(self, path):
+ return os.path.isdir(path)
+
+ def _listdir(self, path):
+ return os.listdir(path)
+
+ def get_resource_stream(self, manager, resource_name):
+ return open(self._fn(self.module_path, resource_name), 'rb')
+
+ def _get(self, path):
+ with open(path, 'rb') as stream:
+ return stream.read()
+
+ @classmethod
+ def _register(cls):
+ loader_names = 'SourceFileLoader', 'SourcelessFileLoader',
+ for name in loader_names:
+ loader_cls = getattr(importlib_machinery, name, type(None))
+ register_loader_type(loader_cls, cls)
+
+
+DefaultProvider._register()
+
+
+class EmptyProvider(NullProvider):
+ """Provider that returns nothing for all requests"""
+
+ module_path = None
+
+ _isdir = _has = lambda self, path: False
+
+ def _get(self, path):
+ return ''
+
+ def _listdir(self, path):
+ return []
+
+ def __init__(self):
+ pass
+
+
+empty_provider = EmptyProvider()
+
+
+class ZipManifests(dict):
+ """
+ zip manifest builder
+ """
+
+ @classmethod
+ def build(cls, path):
+ """
+ Build a dictionary similar to the zipimport directory
+ caches, except instead of tuples, store ZipInfo objects.
+
+ Use a platform-specific path separator (os.sep) for the path keys
+ for compatibility with pypy on Windows.
+ """
+ with zipfile.ZipFile(path) as zfile:
+ items = (
+ (
+ name.replace('/', os.sep),
+ zfile.getinfo(name),
+ )
+ for name in zfile.namelist()
+ )
+ return dict(items)
+
+ load = build
+
+
+class MemoizedZipManifests(ZipManifests):
+ """
+ Memoized zipfile manifests.
+ """
+ manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime')
+
+ def load(self, path):
+ """
+ Load a manifest at path or return a suitable manifest already loaded.
+ """
+ path = os.path.normpath(path)
+ mtime = os.stat(path).st_mtime
+
+ if path not in self or self[path].mtime != mtime:
+ manifest = self.build(path)
+ self[path] = self.manifest_mod(manifest, mtime)
+
+ return self[path].manifest
+
+
+class ZipProvider(EggProvider):
+ """Resource support for zips and eggs"""
+
+ eagers = None
+ _zip_manifests = MemoizedZipManifests()
+
+ def __init__(self, module):
+ EggProvider.__init__(self, module)
+ self.zip_pre = self.loader.archive + os.sep
+
+ def _zipinfo_name(self, fspath):
+ # Convert a virtual filename (full path to file) into a zipfile subpath
+ # usable with the zipimport directory cache for our target archive
+ fspath = fspath.rstrip(os.sep)
+ if fspath == self.loader.archive:
+ return ''
+ if fspath.startswith(self.zip_pre):
+ return fspath[len(self.zip_pre):]
+ raise AssertionError(
+ "%s is not a subpath of %s" % (fspath, self.zip_pre)
+ )
+
+ def _parts(self, zip_path):
+ # Convert a zipfile subpath into an egg-relative path part list.
+ # pseudo-fs path
+ fspath = self.zip_pre + zip_path
+ if fspath.startswith(self.egg_root + os.sep):
+ return fspath[len(self.egg_root) + 1:].split(os.sep)
+ raise AssertionError(
+ "%s is not a subpath of %s" % (fspath, self.egg_root)
+ )
+
+ @property
+ def zipinfo(self):
+ return self._zip_manifests.load(self.loader.archive)
+
+ def get_resource_filename(self, manager, resource_name):
+ if not self.egg_name:
+ raise NotImplementedError(
+ "resource_filename() only supported for .egg, not .zip"
+ )
+ # no need to lock for extraction, since we use temp names
+ zip_path = self._resource_to_zip(resource_name)
+ eagers = self._get_eager_resources()
+ if '/'.join(self._parts(zip_path)) in eagers:
+ for name in eagers:
+ self._extract_resource(manager, self._eager_to_zip(name))
+ return self._extract_resource(manager, zip_path)
+
+ @staticmethod
+ def _get_date_and_size(zip_stat):
+ size = zip_stat.file_size
+ # ymdhms+wday, yday, dst
+ date_time = zip_stat.date_time + (0, 0, -1)
+ # 1980 offset already done
+ timestamp = time.mktime(date_time)
+ return timestamp, size
+
+ def _extract_resource(self, manager, zip_path):
+
+ if zip_path in self._index():
+ for name in self._index()[zip_path]:
+ last = self._extract_resource(
+ manager, os.path.join(zip_path, name)
+ )
+ # return the extracted directory name
+ return os.path.dirname(last)
+
+ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
+
+ if not WRITE_SUPPORT:
+ raise IOError('"os.rename" and "os.unlink" are not supported '
+ 'on this platform')
+ try:
+
+ real_path = manager.get_cache_path(
+ self.egg_name, self._parts(zip_path)
+ )
+
+ if self._is_current(real_path, zip_path):
+ return real_path
+
+ outf, tmpnam = _mkstemp(
+ ".$extract",
+ dir=os.path.dirname(real_path),
+ )
+ os.write(outf, self.loader.get_data(zip_path))
+ os.close(outf)
+ utime(tmpnam, (timestamp, timestamp))
+ manager.postprocess(tmpnam, real_path)
+
+ try:
+ rename(tmpnam, real_path)
+
+ except os.error:
+ if os.path.isfile(real_path):
+ if self._is_current(real_path, zip_path):
+ # the file became current since it was checked above,
+ # so proceed.
+ return real_path
+ # Windows, del old file and retry
+ elif os.name == 'nt':
+ unlink(real_path)
+ rename(tmpnam, real_path)
+ return real_path
+ raise
+
+ except os.error:
+ # report a user-friendly error
+ manager.extraction_error()
+
+ return real_path
+
+ def _is_current(self, file_path, zip_path):
+ """
+ Return True if the file_path is current for this zip_path
+ """
+ timestamp, size = self._get_date_and_size(self.zipinfo[zip_path])
+ if not os.path.isfile(file_path):
+ return False
+ stat = os.stat(file_path)
+ if stat.st_size != size or stat.st_mtime != timestamp:
+ return False
+ # check that the contents match
+ zip_contents = self.loader.get_data(zip_path)
+ with open(file_path, 'rb') as f:
+ file_contents = f.read()
+ return zip_contents == file_contents
+
+ def _get_eager_resources(self):
+ if self.eagers is None:
+ eagers = []
+ for name in ('native_libs.txt', 'eager_resources.txt'):
+ if self.has_metadata(name):
+ eagers.extend(self.get_metadata_lines(name))
+ self.eagers = eagers
+ return self.eagers
+
+ def _index(self):
+ try:
+ return self._dirindex
+ except AttributeError:
+ ind = {}
+ for path in self.zipinfo:
+ parts = path.split(os.sep)
+ while parts:
+ parent = os.sep.join(parts[:-1])
+ if parent in ind:
+ ind[parent].append(parts[-1])
+ break
+ else:
+ ind[parent] = [parts.pop()]
+ self._dirindex = ind
+ return ind
+
+ def _has(self, fspath):
+ zip_path = self._zipinfo_name(fspath)
+ return zip_path in self.zipinfo or zip_path in self._index()
+
+ def _isdir(self, fspath):
+ return self._zipinfo_name(fspath) in self._index()
+
+ def _listdir(self, fspath):
+ return list(self._index().get(self._zipinfo_name(fspath), ()))
+
+ def _eager_to_zip(self, resource_name):
+ return self._zipinfo_name(self._fn(self.egg_root, resource_name))
+
+ def _resource_to_zip(self, resource_name):
+ return self._zipinfo_name(self._fn(self.module_path, resource_name))
+
+
+register_loader_type(zipimport.zipimporter, ZipProvider)
+
+
+class FileMetadata(EmptyProvider):
+ """Metadata handler for standalone PKG-INFO files
+
+ Usage::
+
+ metadata = FileMetadata("/path/to/PKG-INFO")
+
+ This provider rejects all data and metadata requests except for PKG-INFO,
+ which is treated as existing, and will be the contents of the file at
+ the provided location.
+ """
+
+ def __init__(self, path):
+ self.path = path
+
+ def _get_metadata_path(self, name):
+ return self.path
+
+ def has_metadata(self, name):
+ return name == 'PKG-INFO' and os.path.isfile(self.path)
+
+ def get_metadata(self, name):
+ if name != 'PKG-INFO':
+ raise KeyError("No metadata except PKG-INFO is available")
+
+ with io.open(self.path, encoding='utf-8', errors="replace") as f:
+ metadata = f.read()
+ self._warn_on_replacement(metadata)
+ return metadata
+
+ def _warn_on_replacement(self, metadata):
+ replacement_char = '�'
+ if replacement_char in metadata:
+ tmpl = "{self.path} could not be properly decoded in UTF-8"
+ msg = tmpl.format(**locals())
+ warnings.warn(msg)
+
+ def get_metadata_lines(self, name):
+ return yield_lines(self.get_metadata(name))
+
+
+class PathMetadata(DefaultProvider):
+ """Metadata provider for egg directories
+
+ Usage::
+
+ # Development eggs:
+
+ egg_info = "/path/to/PackageName.egg-info"
+ base_dir = os.path.dirname(egg_info)
+ metadata = PathMetadata(base_dir, egg_info)
+ dist_name = os.path.splitext(os.path.basename(egg_info))[0]
+ dist = Distribution(basedir, project_name=dist_name, metadata=metadata)
+
+ # Unpacked egg directories:
+
+ egg_path = "/path/to/PackageName-ver-pyver-etc.egg"
+ metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO'))
+ dist = Distribution.from_filename(egg_path, metadata=metadata)
+ """
+
+ def __init__(self, path, egg_info):
+ self.module_path = path
+ self.egg_info = egg_info
+
+
+class EggMetadata(ZipProvider):
+ """Metadata provider for .egg files"""
+
+ def __init__(self, importer):
+ """Create a metadata provider from a zipimporter"""
+
+ self.zip_pre = importer.archive + os.sep
+ self.loader = importer
+ if importer.prefix:
+ self.module_path = os.path.join(importer.archive, importer.prefix)
+ else:
+ self.module_path = importer.archive
+ self._setup_prefix()
+
+
+_declare_state('dict', _distribution_finders={})
+
+
+def register_finder(importer_type, distribution_finder):
+ """Register `distribution_finder` to find distributions in sys.path items
+
+ `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+ handler), and `distribution_finder` is a callable that, passed a path
+ item and the importer instance, yields ``Distribution`` instances found on
+ that path item. See ``pkg_resources.find_on_path`` for an example."""
+ _distribution_finders[importer_type] = distribution_finder
+
+
+def find_distributions(path_item, only=False):
+ """Yield distributions accessible via `path_item`"""
+ importer = get_importer(path_item)
+ finder = _find_adapter(_distribution_finders, importer)
+ return finder(importer, path_item, only)
+
+
+def find_eggs_in_zip(importer, path_item, only=False):
+ """
+ Find eggs in zip files; possibly multiple nested eggs.
+ """
+ if importer.archive.endswith('.whl'):
+ # wheels are not supported with this finder
+ # they don't have PKG-INFO metadata, and won't ever contain eggs
+ return
+ metadata = EggMetadata(importer)
+ if metadata.has_metadata('PKG-INFO'):
+ yield Distribution.from_filename(path_item, metadata=metadata)
+ if only:
+ # don't yield nested distros
+ return
+ for subitem in metadata.resource_listdir(''):
+ if _is_egg_path(subitem):
+ subpath = os.path.join(path_item, subitem)
+ dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath)
+ for dist in dists:
+ yield dist
+ elif subitem.lower().endswith('.dist-info'):
+ subpath = os.path.join(path_item, subitem)
+ submeta = EggMetadata(zipimport.zipimporter(subpath))
+ submeta.egg_info = subpath
+ yield Distribution.from_location(path_item, subitem, submeta)
+
+
+register_finder(zipimport.zipimporter, find_eggs_in_zip)
+
+
+def find_nothing(importer, path_item, only=False):
+ return ()
+
+
+register_finder(object, find_nothing)
+
+
+def _by_version_descending(names):
+ """
+ Given a list of filenames, return them in descending order
+ by version number.
+
+ >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg'
+ >>> _by_version_descending(names)
+ ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar']
+ >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg'
+ >>> _by_version_descending(names)
+ ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg']
+ >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg'
+ >>> _by_version_descending(names)
+ ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg']
+ """
+ def _by_version(name):
+ """
+ Parse each component of the filename
+ """
+ name, ext = os.path.splitext(name)
+ parts = itertools.chain(name.split('-'), [ext])
+ return [packaging.version.parse(part) for part in parts]
+
+ return sorted(names, key=_by_version, reverse=True)
+
+
+def find_on_path(importer, path_item, only=False):
+ """Yield distributions accessible on a sys.path directory"""
+ path_item = _normalize_cached(path_item)
+
+ if _is_unpacked_egg(path_item):
+ yield Distribution.from_filename(
+ path_item, metadata=PathMetadata(
+ path_item, os.path.join(path_item, 'EGG-INFO')
+ )
+ )
+ return
+
+ entries = (
+ os.path.join(path_item, child)
+ for child in safe_listdir(path_item)
+ )
+
+ # for performance, before sorting by version,
+ # screen entries for only those that will yield
+ # distributions
+ filtered = (
+ entry
+ for entry in entries
+ if dist_factory(path_item, entry, only)
+ )
+
+ # scan for .egg and .egg-info in directory
+ path_item_entries = _by_version_descending(filtered)
+ for entry in path_item_entries:
+ fullpath = os.path.join(path_item, entry)
+ factory = dist_factory(path_item, entry, only)
+ for dist in factory(fullpath):
+ yield dist
+
+
+def dist_factory(path_item, entry, only):
+ """Return a dist_factory for the given entry."""
+ lower = entry.lower()
+ is_egg_info = lower.endswith('.egg-info')
+ is_dist_info = (
+ lower.endswith('.dist-info') and
+ os.path.isdir(os.path.join(path_item, entry))
+ )
+ is_meta = is_egg_info or is_dist_info
+ return (
+ distributions_from_metadata
+ if is_meta else
+ find_distributions
+ if not only and _is_egg_path(entry) else
+ resolve_egg_link
+ if not only and lower.endswith('.egg-link') else
+ NoDists()
+ )
+
+
+class NoDists:
+ """
+ >>> bool(NoDists())
+ False
+
+ >>> list(NoDists()('anything'))
+ []
+ """
+ def __bool__(self):
+ return False
+
+ def __call__(self, fullpath):
+ return iter(())
+
+
+def safe_listdir(path):
+ """
+ Attempt to list contents of path, but suppress some exceptions.
+ """
+ try:
+ return os.listdir(path)
+ except (PermissionError, NotADirectoryError):
+ pass
+ except OSError as e:
+ # Ignore the directory if does not exist, not a directory or
+ # permission denied
+ if e.errno not in (errno.ENOTDIR, errno.EACCES, errno.ENOENT):
+ raise
+ return ()
+
+
+def distributions_from_metadata(path):
+ root = os.path.dirname(path)
+ if os.path.isdir(path):
+ if len(os.listdir(path)) == 0:
+ # empty metadata dir; skip
+ return
+ metadata = PathMetadata(root, path)
+ else:
+ metadata = FileMetadata(path)
+ entry = os.path.basename(path)
+ yield Distribution.from_location(
+ root, entry, metadata, precedence=DEVELOP_DIST,
+ )
+
+
+def non_empty_lines(path):
+ """
+ Yield non-empty lines from file at path
+ """
+ with open(path) as f:
+ for line in f:
+ line = line.strip()
+ if line:
+ yield line
+
+
+def resolve_egg_link(path):
+ """
+ Given a path to an .egg-link, resolve distributions
+ present in the referenced path.
+ """
+ referenced_paths = non_empty_lines(path)
+ resolved_paths = (
+ os.path.join(os.path.dirname(path), ref)
+ for ref in referenced_paths
+ )
+ dist_groups = map(find_distributions, resolved_paths)
+ return next(dist_groups, ())
+
+
+register_finder(pkgutil.ImpImporter, find_on_path)
+
+if hasattr(importlib_machinery, 'FileFinder'):
+ register_finder(importlib_machinery.FileFinder, find_on_path)
+
+_declare_state('dict', _namespace_handlers={})
+_declare_state('dict', _namespace_packages={})
+
+
+def register_namespace_handler(importer_type, namespace_handler):
+ """Register `namespace_handler` to declare namespace packages
+
+ `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item
+ handler), and `namespace_handler` is a callable like this::
+
+ def namespace_handler(importer, path_entry, moduleName, module):
+ # return a path_entry to use for child packages
+
+ Namespace handlers are only called if the importer object has already
+ agreed that it can handle the relevant path item, and they should only
+ return a subpath if the module __path__ does not already contain an
+ equivalent subpath. For an example namespace handler, see
+ ``pkg_resources.file_ns_handler``.
+ """
+ _namespace_handlers[importer_type] = namespace_handler
+
+
+def _handle_ns(packageName, path_item):
+ """Ensure that named package includes a subpath of path_item (if needed)"""
+
+ importer = get_importer(path_item)
+ if importer is None:
+ return None
+
+ # use find_spec (PEP 451) and fall-back to find_module (PEP 302)
+ try:
+ loader = importer.find_spec(packageName).loader
+ except AttributeError:
+ # capture warnings due to #1111
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ loader = importer.find_module(packageName)
+
+ if loader is None:
+ return None
+ module = sys.modules.get(packageName)
+ if module is None:
+ module = sys.modules[packageName] = types.ModuleType(packageName)
+ module.__path__ = []
+ _set_parent_ns(packageName)
+ elif not hasattr(module, '__path__'):
+ raise TypeError("Not a package:", packageName)
+ handler = _find_adapter(_namespace_handlers, importer)
+ subpath = handler(importer, path_item, packageName, module)
+ if subpath is not None:
+ path = module.__path__
+ path.append(subpath)
+ importlib.import_module(packageName)
+ _rebuild_mod_path(path, packageName, module)
+ return subpath
+
+
+def _rebuild_mod_path(orig_path, package_name, module):
+ """
+ Rebuild module.__path__ ensuring that all entries are ordered
+ corresponding to their sys.path order
+ """
+ sys_path = [_normalize_cached(p) for p in sys.path]
+
+ def safe_sys_path_index(entry):
+ """
+ Workaround for #520 and #513.
+ """
+ try:
+ return sys_path.index(entry)
+ except ValueError:
+ return float('inf')
+
+ def position_in_sys_path(path):
+ """
+ Return the ordinal of the path based on its position in sys.path
+ """
+ path_parts = path.split(os.sep)
+ module_parts = package_name.count('.') + 1
+ parts = path_parts[:-module_parts]
+ return safe_sys_path_index(_normalize_cached(os.sep.join(parts)))
+
+ new_path = sorted(orig_path, key=position_in_sys_path)
+ new_path = [_normalize_cached(p) for p in new_path]
+
+ if isinstance(module.__path__, list):
+ module.__path__[:] = new_path
+ else:
+ module.__path__ = new_path
+
+
+def declare_namespace(packageName):
+ """Declare that package 'packageName' is a namespace package"""
+
+ _imp.acquire_lock()
+ try:
+ if packageName in _namespace_packages:
+ return
+
+ path = sys.path
+ parent, _, _ = packageName.rpartition('.')
+
+ if parent:
+ declare_namespace(parent)
+ if parent not in _namespace_packages:
+ __import__(parent)
+ try:
+ path = sys.modules[parent].__path__
+ except AttributeError as e:
+ raise TypeError("Not a package:", parent) from e
+
+ # Track what packages are namespaces, so when new path items are added,
+ # they can be updated
+ _namespace_packages.setdefault(parent or None, []).append(packageName)
+ _namespace_packages.setdefault(packageName, [])
+
+ for path_item in path:
+ # Ensure all the parent's path items are reflected in the child,
+ # if they apply
+ _handle_ns(packageName, path_item)
+
+ finally:
+ _imp.release_lock()
+
+
+def fixup_namespace_packages(path_item, parent=None):
+ """Ensure that previously-declared namespace packages include path_item"""
+ _imp.acquire_lock()
+ try:
+ for package in _namespace_packages.get(parent, ()):
+ subpath = _handle_ns(package, path_item)
+ if subpath:
+ fixup_namespace_packages(subpath, package)
+ finally:
+ _imp.release_lock()
+
+
+def file_ns_handler(importer, path_item, packageName, module):
+ """Compute an ns-package subpath for a filesystem or zipfile importer"""
+
+ subpath = os.path.join(path_item, packageName.split('.')[-1])
+ normalized = _normalize_cached(subpath)
+ for item in module.__path__:
+ if _normalize_cached(item) == normalized:
+ break
+ else:
+ # Only return the path if it's not already there
+ return subpath
+
+
+register_namespace_handler(pkgutil.ImpImporter, file_ns_handler)
+register_namespace_handler(zipimport.zipimporter, file_ns_handler)
+
+if hasattr(importlib_machinery, 'FileFinder'):
+ register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler)
+
+
+def null_ns_handler(importer, path_item, packageName, module):
+ return None
+
+
+register_namespace_handler(object, null_ns_handler)
+
+
+def normalize_path(filename):
+ """Normalize a file/dir name for comparison purposes"""
+ return os.path.normcase(os.path.realpath(os.path.normpath(
+ _cygwin_patch(filename))))
+
+
+def _cygwin_patch(filename): # pragma: nocover
+ """
+ Contrary to POSIX 2008, on Cygwin, getcwd (3) contains
+ symlink components. Using
+ os.path.abspath() works around this limitation. A fix in os.getcwd()
+ would probably better, in Cygwin even more so, except
+ that this seems to be by design...
+ """
+ return os.path.abspath(filename) if sys.platform == 'cygwin' else filename
+
+
+def _normalize_cached(filename, _cache={}):
+ try:
+ return _cache[filename]
+ except KeyError:
+ _cache[filename] = result = normalize_path(filename)
+ return result
+
+
+def _is_egg_path(path):
+ """
+ Determine if given path appears to be an egg.
+ """
+ return _is_zip_egg(path) or _is_unpacked_egg(path)
+
+
+def _is_zip_egg(path):
+ return (
+ path.lower().endswith('.egg') and
+ os.path.isfile(path) and
+ zipfile.is_zipfile(path)
+ )
+
+
+def _is_unpacked_egg(path):
+ """
+ Determine if given path appears to be an unpacked egg.
+ """
+ return (
+ path.lower().endswith('.egg') and
+ os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO'))
+ )
+
+
+def _set_parent_ns(packageName):
+ parts = packageName.split('.')
+ name = parts.pop()
+ if parts:
+ parent = '.'.join(parts)
+ setattr(sys.modules[parent], name, sys.modules[packageName])
+
+
+def yield_lines(strs):
+ """Yield non-empty/non-comment lines of a string or sequence"""
+ if isinstance(strs, str):
+ for s in strs.splitlines():
+ s = s.strip()
+ # skip blank lines/comments
+ if s and not s.startswith('#'):
+ yield s
+ else:
+ for ss in strs:
+ for s in yield_lines(ss):
+ yield s
+
+
+MODULE = re.compile(r"\w+(\.\w+)*$").match
+EGG_NAME = re.compile(
+ r"""
+ (?P<name>[^-]+) (
+ -(?P<ver>[^-]+) (
+ -py(?P<pyver>[^-]+) (
+ -(?P<plat>.+)
+ )?
+ )?
+ )?
+ """,
+ re.VERBOSE | re.IGNORECASE,
+).match
+
+
+class EntryPoint:
+ """Object representing an advertised importable object"""
+
+ def __init__(self, name, module_name, attrs=(), extras=(), dist=None):
+ if not MODULE(module_name):
+ raise ValueError("Invalid module name", module_name)
+ self.name = name
+ self.module_name = module_name
+ self.attrs = tuple(attrs)
+ self.extras = tuple(extras)
+ self.dist = dist
+
+ def __str__(self):
+ s = "%s = %s" % (self.name, self.module_name)
+ if self.attrs:
+ s += ':' + '.'.join(self.attrs)
+ if self.extras:
+ s += ' [%s]' % ','.join(self.extras)
+ return s
+
+ def __repr__(self):
+ return "EntryPoint.parse(%r)" % str(self)
+
+ def load(self, require=True, *args, **kwargs):
+ """
+ Require packages for this EntryPoint, then resolve it.
+ """
+ if not require or args or kwargs:
+ warnings.warn(
+ "Parameters to load are deprecated. Call .resolve and "
+ ".require separately.",
+ PkgResourcesDeprecationWarning,
+ stacklevel=2,
+ )
+ if require:
+ self.require(*args, **kwargs)
+ return self.resolve()
+
+ def resolve(self):
+ """
+ Resolve the entry point from its module and attrs.
+ """
+ module = __import__(self.module_name, fromlist=['__name__'], level=0)
+ try:
+ return functools.reduce(getattr, self.attrs, module)
+ except AttributeError as exc:
+ raise ImportError(str(exc)) from exc
+
+ def require(self, env=None, installer=None):
+ if self.extras and not self.dist:
+ raise UnknownExtra("Can't require() without a distribution", self)
+
+ # Get the requirements for this entry point with all its extras and
+ # then resolve them. We have to pass `extras` along when resolving so
+ # that the working set knows what extras we want. Otherwise, for
+ # dist-info distributions, the working set will assume that the
+ # requirements for that extra are purely optional and skip over them.
+ reqs = self.dist.requires(self.extras)
+ items = working_set.resolve(reqs, env, installer, extras=self.extras)
+ list(map(working_set.add, items))
+
+ pattern = re.compile(
+ r'\s*'
+ r'(?P<name>.+?)\s*'
+ r'=\s*'
+ r'(?P<module>[\w.]+)\s*'
+ r'(:\s*(?P<attr>[\w.]+))?\s*'
+ r'(?P<extras>\[.*\])?\s*$'
+ )
+
+ @classmethod
+ def parse(cls, src, dist=None):
+ """Parse a single entry point from string `src`
+
+ Entry point syntax follows the form::
+
+ name = some.module:some.attr [extra1, extra2]
+
+ The entry name and module name are required, but the ``:attrs`` and
+ ``[extras]`` parts are optional
+ """
+ m = cls.pattern.match(src)
+ if not m:
+ msg = "EntryPoint must be in 'name=module:attrs [extras]' format"
+ raise ValueError(msg, src)
+ res = m.groupdict()
+ extras = cls._parse_extras(res['extras'])
+ attrs = res['attr'].split('.') if res['attr'] else ()
+ return cls(res['name'], res['module'], attrs, extras, dist)
+
+ @classmethod
+ def _parse_extras(cls, extras_spec):
+ if not extras_spec:
+ return ()
+ req = Requirement.parse('x' + extras_spec)
+ if req.specs:
+ raise ValueError()
+ return req.extras
+
+ @classmethod
+ def parse_group(cls, group, lines, dist=None):
+ """Parse an entry point group"""
+ if not MODULE(group):
+ raise ValueError("Invalid group name", group)
+ this = {}
+ for line in yield_lines(lines):
+ ep = cls.parse(line, dist)
+ if ep.name in this:
+ raise ValueError("Duplicate entry point", group, ep.name)
+ this[ep.name] = ep
+ return this
+
+ @classmethod
+ def parse_map(cls, data, dist=None):
+ """Parse a map of entry point groups"""
+ if isinstance(data, dict):
+ data = data.items()
+ else:
+ data = split_sections(data)
+ maps = {}
+ for group, lines in data:
+ if group is None:
+ if not lines:
+ continue
+ raise ValueError("Entry points must be listed in groups")
+ group = group.strip()
+ if group in maps:
+ raise ValueError("Duplicate group name", group)
+ maps[group] = cls.parse_group(group, lines, dist)
+ return maps
+
+
+def _version_from_file(lines):
+ """
+ Given an iterable of lines from a Metadata file, return
+ the value of the Version field, if present, or None otherwise.
+ """
+ def is_version_line(line):
+ return line.lower().startswith('version:')
+ version_lines = filter(is_version_line, lines)
+ line = next(iter(version_lines), '')
+ _, _, value = line.partition(':')
+ return safe_version(value.strip()) or None
+
+
+class Distribution:
+ """Wrap an actual or potential sys.path entry w/metadata"""
+ PKG_INFO = 'PKG-INFO'
+
+ def __init__(
+ self, location=None, metadata=None, project_name=None,
+ version=None, py_version=PY_MAJOR, platform=None,
+ precedence=EGG_DIST):
+ self.project_name = safe_name(project_name or 'Unknown')
+ if version is not None:
+ self._version = safe_version(version)
+ self.py_version = py_version
+ self.platform = platform
+ self.location = location
+ self.precedence = precedence
+ self._provider = metadata or empty_provider
+
+ @classmethod
+ def from_location(cls, location, basename, metadata=None, **kw):
+ project_name, version, py_version, platform = [None] * 4
+ basename, ext = os.path.splitext(basename)
+ if ext.lower() in _distributionImpl:
+ cls = _distributionImpl[ext.lower()]
+
+ match = EGG_NAME(basename)
+ if match:
+ project_name, version, py_version, platform = match.group(
+ 'name', 'ver', 'pyver', 'plat'
+ )
+ return cls(
+ location, metadata, project_name=project_name, version=version,
+ py_version=py_version, platform=platform, **kw
+ )._reload_version()
+
+ def _reload_version(self):
+ return self
+
+ @property
+ def hashcmp(self):
+ return (
+ self.parsed_version,
+ self.precedence,
+ self.key,
+ self.location,
+ self.py_version or '',
+ self.platform or '',
+ )
+
+ def __hash__(self):
+ return hash(self.hashcmp)
+
+ def __lt__(self, other):
+ return self.hashcmp < other.hashcmp
+
+ def __le__(self, other):
+ return self.hashcmp <= other.hashcmp
+
+ def __gt__(self, other):
+ return self.hashcmp > other.hashcmp
+
+ def __ge__(self, other):
+ return self.hashcmp >= other.hashcmp
+
+ def __eq__(self, other):
+ if not isinstance(other, self.__class__):
+ # It's not a Distribution, so they are not equal
+ return False
+ return self.hashcmp == other.hashcmp
+
+ def __ne__(self, other):
+ return not self == other
+
+ # These properties have to be lazy so that we don't have to load any
+ # metadata until/unless it's actually needed. (i.e., some distributions
+ # may not know their name or version without loading PKG-INFO)
+
+ @property
+ def key(self):
+ try:
+ return self._key
+ except AttributeError:
+ self._key = key = self.project_name.lower()
+ return key
+
+ @property
+ def parsed_version(self):
+ if not hasattr(self, "_parsed_version"):
+ self._parsed_version = parse_version(self.version)
+
+ return self._parsed_version
+
+ def _warn_legacy_version(self):
+ LV = packaging.version.LegacyVersion
+ is_legacy = isinstance(self._parsed_version, LV)
+ if not is_legacy:
+ return
+
+ # While an empty version is technically a legacy version and
+ # is not a valid PEP 440 version, it's also unlikely to
+ # actually come from someone and instead it is more likely that
+ # it comes from setuptools attempting to parse a filename and
+ # including it in the list. So for that we'll gate this warning
+ # on if the version is anything at all or not.
+ if not self.version:
+ return
+
+ tmpl = textwrap.dedent("""
+ '{project_name} ({version})' is being parsed as a legacy,
+ non PEP 440,
+ version. You may find odd behavior and sort order.
+ In particular it will be sorted as less than 0.0. It
+ is recommended to migrate to PEP 440 compatible
+ versions.
+ """).strip().replace('\n', ' ')
+
+ warnings.warn(tmpl.format(**vars(self)), PEP440Warning)
+
+ @property
+ def version(self):
+ try:
+ return self._version
+ except AttributeError as e:
+ version = self._get_version()
+ if version is None:
+ path = self._get_metadata_path_for_display(self.PKG_INFO)
+ msg = (
+ "Missing 'Version:' header and/or {} file at path: {}"
+ ).format(self.PKG_INFO, path)
+ raise ValueError(msg, self) from e
+
+ return version
+
+ @property
+ def _dep_map(self):
+ """
+ A map of extra to its list of (direct) requirements
+ for this distribution, including the null extra.
+ """
+ try:
+ return self.__dep_map
+ except AttributeError:
+ self.__dep_map = self._filter_extras(self._build_dep_map())
+ return self.__dep_map
+
+ @staticmethod
+ def _filter_extras(dm):
+ """
+ Given a mapping of extras to dependencies, strip off
+ environment markers and filter out any dependencies
+ not matching the markers.
+ """
+ for extra in list(filter(None, dm)):
+ new_extra = extra
+ reqs = dm.pop(extra)
+ new_extra, _, marker = extra.partition(':')
+ fails_marker = marker and (
+ invalid_marker(marker)
+ or not evaluate_marker(marker)
+ )
+ if fails_marker:
+ reqs = []
+ new_extra = safe_extra(new_extra) or None
+
+ dm.setdefault(new_extra, []).extend(reqs)
+ return dm
+
+ def _build_dep_map(self):
+ dm = {}
+ for name in 'requires.txt', 'depends.txt':
+ for extra, reqs in split_sections(self._get_metadata(name)):
+ dm.setdefault(extra, []).extend(parse_requirements(reqs))
+ return dm
+
+ def requires(self, extras=()):
+ """List of Requirements needed for this distro if `extras` are used"""
+ dm = self._dep_map
+ deps = []
+ deps.extend(dm.get(None, ()))
+ for ext in extras:
+ try:
+ deps.extend(dm[safe_extra(ext)])
+ except KeyError as e:
+ raise UnknownExtra(
+ "%s has no such extra feature %r" % (self, ext)
+ ) from e
+ return deps
+
+ def _get_metadata_path_for_display(self, name):
+ """
+ Return the path to the given metadata file, if available.
+ """
+ try:
+ # We need to access _get_metadata_path() on the provider object
+ # directly rather than through this class's __getattr__()
+ # since _get_metadata_path() is marked private.
+ path = self._provider._get_metadata_path(name)
+
+ # Handle exceptions e.g. in case the distribution's metadata
+ # provider doesn't support _get_metadata_path().
+ except Exception:
+ return '[could not detect]'
+
+ return path
+
+ def _get_metadata(self, name):
+ if self.has_metadata(name):
+ for line in self.get_metadata_lines(name):
+ yield line
+
+ def _get_version(self):
+ lines = self._get_metadata(self.PKG_INFO)
+ version = _version_from_file(lines)
+
+ return version
+
+ def activate(self, path=None, replace=False):
+ """Ensure distribution is importable on `path` (default=sys.path)"""
+ if path is None:
+ path = sys.path
+ self.insert_on(path, replace=replace)
+ if path is sys.path:
+ fixup_namespace_packages(self.location)
+ for pkg in self._get_metadata('namespace_packages.txt'):
+ if pkg in sys.modules:
+ declare_namespace(pkg)
+
+ def egg_name(self):
+ """Return what this distribution's standard .egg filename should be"""
+ filename = "%s-%s-py%s" % (
+ to_filename(self.project_name), to_filename(self.version),
+ self.py_version or PY_MAJOR
+ )
+
+ if self.platform:
+ filename += '-' + self.platform
+ return filename
+
+ def __repr__(self):
+ if self.location:
+ return "%s (%s)" % (self, self.location)
+ else:
+ return str(self)
+
+ def __str__(self):
+ try:
+ version = getattr(self, 'version', None)
+ except ValueError:
+ version = None
+ version = version or "[unknown version]"
+ return "%s %s" % (self.project_name, version)
+
+ def __getattr__(self, attr):
+ """Delegate all unrecognized public attributes to .metadata provider"""
+ if attr.startswith('_'):
+ raise AttributeError(attr)
+ return getattr(self._provider, attr)
+
+ def __dir__(self):
+ return list(
+ set(super(Distribution, self).__dir__())
+ | set(
+ attr for attr in self._provider.__dir__()
+ if not attr.startswith('_')
+ )
+ )
+
+ @classmethod
+ def from_filename(cls, filename, metadata=None, **kw):
+ return cls.from_location(
+ _normalize_cached(filename), os.path.basename(filename), metadata,
+ **kw
+ )
+
+ def as_requirement(self):
+ """Return a ``Requirement`` that matches this distribution exactly"""
+ if isinstance(self.parsed_version, packaging.version.Version):
+ spec = "%s==%s" % (self.project_name, self.parsed_version)
+ else:
+ spec = "%s===%s" % (self.project_name, self.parsed_version)
+
+ return Requirement.parse(spec)
+
+ def load_entry_point(self, group, name):
+ """Return the `name` entry point of `group` or raise ImportError"""
+ ep = self.get_entry_info(group, name)
+ if ep is None:
+ raise ImportError("Entry point %r not found" % ((group, name),))
+ return ep.load()
+
+ def get_entry_map(self, group=None):
+ """Return the entry point map for `group`, or the full entry map"""
+ try:
+ ep_map = self._ep_map
+ except AttributeError:
+ ep_map = self._ep_map = EntryPoint.parse_map(
+ self._get_metadata('entry_points.txt'), self
+ )
+ if group is not None:
+ return ep_map.get(group, {})
+ return ep_map
+
+ def get_entry_info(self, group, name):
+ """Return the EntryPoint object for `group`+`name`, or ``None``"""
+ return self.get_entry_map(group).get(name)
+
+ def insert_on(self, path, loc=None, replace=False):
+ """Ensure self.location is on path
+
+ If replace=False (default):
+ - If location is already in path anywhere, do nothing.
+ - Else:
+ - If it's an egg and its parent directory is on path,
+ insert just ahead of the parent.
+ - Else: add to the end of path.
+ If replace=True:
+ - If location is already on path anywhere (not eggs)
+ or higher priority than its parent (eggs)
+ do nothing.
+ - Else:
+ - If it's an egg and its parent directory is on path,
+ insert just ahead of the parent,
+ removing any lower-priority entries.
+ - Else: add it to the front of path.
+ """
+
+ loc = loc or self.location
+ if not loc:
+ return
+
+ nloc = _normalize_cached(loc)
+ bdir = os.path.dirname(nloc)
+ npath = [(p and _normalize_cached(p) or p) for p in path]
+
+ for p, item in enumerate(npath):
+ if item == nloc:
+ if replace:
+ break
+ else:
+ # don't modify path (even removing duplicates) if
+ # found and not replace
+ return
+ elif item == bdir and self.precedence == EGG_DIST:
+ # if it's an .egg, give it precedence over its directory
+ # UNLESS it's already been added to sys.path and replace=False
+ if (not replace) and nloc in npath[p:]:
+ return
+ if path is sys.path:
+ self.check_version_conflict()
+ path.insert(p, loc)
+ npath.insert(p, nloc)
+ break
+ else:
+ if path is sys.path:
+ self.check_version_conflict()
+ if replace:
+ path.insert(0, loc)
+ else:
+ path.append(loc)
+ return
+
+ # p is the spot where we found or inserted loc; now remove duplicates
+ while True:
+ try:
+ np = npath.index(nloc, p + 1)
+ except ValueError:
+ break
+ else:
+ del npath[np], path[np]
+ # ha!
+ p = np
+
+ return
+
+ def check_version_conflict(self):
+ if self.key == 'setuptools':
+ # ignore the inevitable setuptools self-conflicts :(
+ return
+
+ nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt'))
+ loc = normalize_path(self.location)
+ for modname in self._get_metadata('top_level.txt'):
+ if (modname not in sys.modules or modname in nsp
+ or modname in _namespace_packages):
+ continue
+ if modname in ('pkg_resources', 'setuptools', 'site'):
+ continue
+ fn = getattr(sys.modules[modname], '__file__', None)
+ if fn and (normalize_path(fn).startswith(loc) or
+ fn.startswith(self.location)):
+ continue
+ issue_warning(
+ "Module %s was already imported from %s, but %s is being added"
+ " to sys.path" % (modname, fn, self.location),
+ )
+
+ def has_version(self):
+ try:
+ self.version
+ except ValueError:
+ issue_warning("Unbuilt egg for " + repr(self))
+ return False
+ return True
+
+ def clone(self, **kw):
+ """Copy this distribution, substituting in any changed keyword args"""
+ names = 'project_name version py_version platform location precedence'
+ for attr in names.split():
+ kw.setdefault(attr, getattr(self, attr, None))
+ kw.setdefault('metadata', self._provider)
+ return self.__class__(**kw)
+
+ @property
+ def extras(self):
+ return [dep for dep in self._dep_map if dep]
+
+
+class EggInfoDistribution(Distribution):
+ def _reload_version(self):
+ """
+ Packages installed by distutils (e.g. numpy or scipy),
+ which uses an old safe_version, and so
+ their version numbers can get mangled when
+ converted to filenames (e.g., 1.11.0.dev0+2329eae to
+ 1.11.0.dev0_2329eae). These distributions will not be
+ parsed properly
+ downstream by Distribution and safe_version, so
+ take an extra step and try to get the version number from
+ the metadata file itself instead of the filename.
+ """
+ md_version = self._get_version()
+ if md_version:
+ self._version = md_version
+ return self
+
+
+class DistInfoDistribution(Distribution):
+ """
+ Wrap an actual or potential sys.path entry
+ w/metadata, .dist-info style.
+ """
+ PKG_INFO = 'METADATA'
+ EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])")
+
+ @property
+ def _parsed_pkg_info(self):
+ """Parse and cache metadata"""
+ try:
+ return self._pkg_info
+ except AttributeError:
+ metadata = self.get_metadata(self.PKG_INFO)
+ self._pkg_info = email.parser.Parser().parsestr(metadata)
+ return self._pkg_info
+
+ @property
+ def _dep_map(self):
+ try:
+ return self.__dep_map
+ except AttributeError:
+ self.__dep_map = self._compute_dependencies()
+ return self.__dep_map
+
+ def _compute_dependencies(self):
+ """Recompute this distribution's dependencies."""
+ dm = self.__dep_map = {None: []}
+
+ reqs = []
+ # Including any condition expressions
+ for req in self._parsed_pkg_info.get_all('Requires-Dist') or []:
+ reqs.extend(parse_requirements(req))
+
+ def reqs_for_extra(extra):
+ for req in reqs:
+ if not req.marker or req.marker.evaluate({'extra': extra}):
+ yield req
+
+ common = frozenset(reqs_for_extra(None))
+ dm[None].extend(common)
+
+ for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []:
+ s_extra = safe_extra(extra.strip())
+ dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common)
+
+ return dm
+
+
+_distributionImpl = {
+ '.egg': Distribution,
+ '.egg-info': EggInfoDistribution,
+ '.dist-info': DistInfoDistribution,
+}
+
+
+def issue_warning(*args, **kw):
+ level = 1
+ g = globals()
+ try:
+ # find the first stack frame that is *not* code in
+ # the pkg_resources module, to use for the warning
+ while sys._getframe(level).f_globals is g:
+ level += 1
+ except ValueError:
+ pass
+ warnings.warn(stacklevel=level + 1, *args, **kw)
+
+
+def parse_requirements(strs):
+ """Yield ``Requirement`` objects for each specification in `strs`
+
+ `strs` must be a string, or a (possibly-nested) iterable thereof.
+ """
+ # create a steppable iterator, so we can handle \-continuations
+ lines = iter(yield_lines(strs))
+
+ for line in lines:
+ # Drop comments -- a hash without a space may be in a URL.
+ if ' #' in line:
+ line = line[:line.find(' #')]
+ # If there is a line continuation, drop it, and append the next line.
+ if line.endswith('\\'):
+ line = line[:-2].strip()
+ try:
+ line += next(lines)
+ except StopIteration:
+ return
+ yield Requirement(line)
+
+
+class RequirementParseError(packaging.requirements.InvalidRequirement):
+ "Compatibility wrapper for InvalidRequirement"
+
+
+class Requirement(packaging.requirements.Requirement):
+ def __init__(self, requirement_string):
+ """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!"""
+ super(Requirement, self).__init__(requirement_string)
+ self.unsafe_name = self.name
+ project_name = safe_name(self.name)
+ self.project_name, self.key = project_name, project_name.lower()
+ self.specs = [
+ (spec.operator, spec.version) for spec in self.specifier]
+ self.extras = tuple(map(safe_extra, self.extras))
+ self.hashCmp = (
+ self.key,
+ self.url,
+ self.specifier,
+ frozenset(self.extras),
+ str(self.marker) if self.marker else None,
+ )
+ self.__hash = hash(self.hashCmp)
+
+ def __eq__(self, other):
+ return (
+ isinstance(other, Requirement) and
+ self.hashCmp == other.hashCmp
+ )
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __contains__(self, item):
+ if isinstance(item, Distribution):
+ if item.key != self.key:
+ return False
+
+ item = item.version
+
+ # Allow prereleases always in order to match the previous behavior of
+ # this method. In the future this should be smarter and follow PEP 440
+ # more accurately.
+ return self.specifier.contains(item, prereleases=True)
+
+ def __hash__(self):
+ return self.__hash
+
+ def __repr__(self):
+ return "Requirement.parse(%r)" % str(self)
+
+ @staticmethod
+ def parse(s):
+ req, = parse_requirements(s)
+ return req
+
+
+def _always_object(classes):
+ """
+ Ensure object appears in the mro even
+ for old-style classes.
+ """
+ if object not in classes:
+ return classes + (object,)
+ return classes
+
+
+def _find_adapter(registry, ob):
+ """Return an adapter factory for `ob` from `registry`"""
+ types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob))))
+ for t in types:
+ if t in registry:
+ return registry[t]
+
+
+def ensure_directory(path):
+ """Ensure that the parent directory of `path` exists"""
+ dirname = os.path.dirname(path)
+ os.makedirs(dirname, exist_ok=True)
+
+
+def _bypass_ensure_directory(path):
+ """Sandbox-bypassing version of ensure_directory()"""
+ if not WRITE_SUPPORT:
+ raise IOError('"os.mkdir" not supported on this platform.')
+ dirname, filename = split(path)
+ if dirname and filename and not isdir(dirname):
+ _bypass_ensure_directory(dirname)
+ try:
+ mkdir(dirname, 0o755)
+ except FileExistsError:
+ pass
+
+
+def split_sections(s):
+ """Split a string or iterable thereof into (section, content) pairs
+
+ Each ``section`` is a stripped version of the section header ("[section]")
+ and each ``content`` is a list of stripped lines excluding blank lines and
+ comment-only lines. If there are any such lines before the first section
+ header, they're returned in a first ``section`` of ``None``.
+ """
+ section = None
+ content = []
+ for line in yield_lines(s):
+ if line.startswith("["):
+ if line.endswith("]"):
+ if section or content:
+ yield section, content
+ section = line[1:-1].strip()
+ content = []
+ else:
+ raise ValueError("Invalid section heading", line)
+ else:
+ content.append(line)
+
+ # wrap up last segment
+ yield section, content
+
+
+def _mkstemp(*args, **kw):
+ old_open = os.open
+ try:
+ # temporarily bypass sandboxing
+ os.open = os_open
+ return tempfile.mkstemp(*args, **kw)
+ finally:
+ # and then put it back
+ os.open = old_open
+
+
+# Silence the PEP440Warning by default, so that end users don't get hit by it
+# randomly just because they use pkg_resources. We want to append the rule
+# because we want earlier uses of filterwarnings to take precedence over this
+# one.
+warnings.filterwarnings("ignore", category=PEP440Warning, append=True)
+
+
+# from jaraco.functools 1.3
+def _call_aside(f, *args, **kwargs):
+ f(*args, **kwargs)
+ return f
+
+
+@_call_aside
+def _initialize(g=globals()):
+ "Set up global resource manager (deliberately not state-saved)"
+ manager = ResourceManager()
+ g['_manager'] = manager
+ g.update(
+ (name, getattr(manager, name))
+ for name in dir(manager)
+ if not name.startswith('_')
+ )
+
+
+@_call_aside
+def _initialize_master_working_set():
+ """
+ Prepare the master working set and make the ``require()``
+ API available.
+
+ This function has explicit effects on the global state
+ of pkg_resources. It is intended to be invoked once at
+ the initialization of this module.
+
+ Invocation by other packages is unsupported and done
+ at their own risk.
+ """
+ working_set = WorkingSet._build_master()
+ _declare_state('object', working_set=working_set)
+
+ require = working_set.require
+ iter_entry_points = working_set.iter_entry_points
+ add_activation_listener = working_set.subscribe
+ run_script = working_set.run_script
+ # backward compatibility
+ run_main = run_script
+ # Activate all distributions already on sys.path with replace=False and
+ # ensure that all distributions added to the working set in the future
+ # (e.g. by calling ``require()``) will get activated as well,
+ # with higher priority (replace=True).
+ tuple(
+ dist.activate(replace=False)
+ for dist in working_set
+ )
+ add_activation_listener(
+ lambda dist: dist.activate(replace=True),
+ existing=False,
+ )
+ working_set.entries = []
+ # match order
+ list(map(working_set.add_entry, sys.path))
+ globals().update(locals())
+
+
+class PkgResourcesDeprecationWarning(Warning):
+ """
+ Base class for warning about deprecations in ``pkg_resources``
+
+ This class is not derived from ``DeprecationWarning``, and as such is
+ visible by default.
+ """
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/__init__.py b/third_party/python/setuptools/pkg_resources/_vendor/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/__init__.py
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/appdirs.py b/third_party/python/setuptools/pkg_resources/_vendor/appdirs.py
new file mode 100644
index 0000000000..ae67001af8
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/appdirs.py
@@ -0,0 +1,608 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+# Copyright (c) 2005-2010 ActiveState Software Inc.
+# Copyright (c) 2013 Eddy Petrișor
+
+"""Utilities for determining application-specific dirs.
+
+See <http://github.com/ActiveState/appdirs> for details and usage.
+"""
+# Dev Notes:
+# - MSDN on where to store app data files:
+# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
+# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
+# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
+
+__version_info__ = (1, 4, 3)
+__version__ = '.'.join(map(str, __version_info__))
+
+
+import sys
+import os
+
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+ unicode = str
+
+if sys.platform.startswith('java'):
+ import platform
+ os_name = platform.java_ver()[3][0]
+ if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
+ system = 'win32'
+ elif os_name.startswith('Mac'): # "Mac OS X", etc.
+ system = 'darwin'
+ else: # "Linux", "SunOS", "FreeBSD", etc.
+ # Setting this to "linux2" is not ideal, but only Windows or Mac
+ # are actually checked for and the rest of the module expects
+ # *sys.platform* style strings.
+ system = 'linux2'
+else:
+ system = sys.platform
+
+
+
+def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
+ r"""Return full path to the user-specific data dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "roaming" (boolean, default False) can be set True to use the Windows
+ roaming appdata directory. That means that for users on a Windows
+ network setup for roaming profiles, this user data will be
+ sync'd on login. See
+ <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
+ for a discussion of issues.
+
+ Typical user data directories are:
+ Mac OS X: ~/Library/Application Support/<AppName>
+ Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
+ Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
+ Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
+ Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
+ Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
+
+ For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
+ That means, by default "~/.local/share/<AppName>".
+ """
+ if system == "win32":
+ if appauthor is None:
+ appauthor = appname
+ const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
+ path = os.path.normpath(_get_win_folder(const))
+ if appname:
+ if appauthor is not False:
+ path = os.path.join(path, appauthor, appname)
+ else:
+ path = os.path.join(path, appname)
+ elif system == 'darwin':
+ path = os.path.expanduser('~/Library/Application Support/')
+ if appname:
+ path = os.path.join(path, appname)
+ else:
+ path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
+ r"""Return full path to the user-shared data dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "multipath" is an optional parameter only applicable to *nix
+ which indicates that the entire list of data dirs should be
+ returned. By default, the first item from XDG_DATA_DIRS is
+ returned, or '/usr/local/share/<AppName>',
+ if XDG_DATA_DIRS is not set
+
+ Typical site data directories are:
+ Mac OS X: /Library/Application Support/<AppName>
+ Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
+ Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
+ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
+ Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
+
+ For Unix, this is using the $XDG_DATA_DIRS[0] default.
+
+ WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
+ """
+ if system == "win32":
+ if appauthor is None:
+ appauthor = appname
+ path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
+ if appname:
+ if appauthor is not False:
+ path = os.path.join(path, appauthor, appname)
+ else:
+ path = os.path.join(path, appname)
+ elif system == 'darwin':
+ path = os.path.expanduser('/Library/Application Support')
+ if appname:
+ path = os.path.join(path, appname)
+ else:
+ # XDG default for $XDG_DATA_DIRS
+ # only first, if multipath is False
+ path = os.getenv('XDG_DATA_DIRS',
+ os.pathsep.join(['/usr/local/share', '/usr/share']))
+ pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
+ if appname:
+ if version:
+ appname = os.path.join(appname, version)
+ pathlist = [os.sep.join([x, appname]) for x in pathlist]
+
+ if multipath:
+ path = os.pathsep.join(pathlist)
+ else:
+ path = pathlist[0]
+ return path
+
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
+ r"""Return full path to the user-specific config dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "roaming" (boolean, default False) can be set True to use the Windows
+ roaming appdata directory. That means that for users on a Windows
+ network setup for roaming profiles, this user data will be
+ sync'd on login. See
+ <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
+ for a discussion of issues.
+
+ Typical user config directories are:
+ Mac OS X: same as user_data_dir
+ Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
+ Win *: same as user_data_dir
+
+ For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
+ That means, by default "~/.config/<AppName>".
+ """
+ if system in ["win32", "darwin"]:
+ path = user_data_dir(appname, appauthor, None, roaming)
+ else:
+ path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
+ r"""Return full path to the user-shared data dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "multipath" is an optional parameter only applicable to *nix
+ which indicates that the entire list of config dirs should be
+ returned. By default, the first item from XDG_CONFIG_DIRS is
+ returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
+
+ Typical site config directories are:
+ Mac OS X: same as site_data_dir
+ Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
+ $XDG_CONFIG_DIRS
+ Win *: same as site_data_dir
+ Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
+
+ For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
+
+ WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
+ """
+ if system in ["win32", "darwin"]:
+ path = site_data_dir(appname, appauthor)
+ if appname and version:
+ path = os.path.join(path, version)
+ else:
+ # XDG default for $XDG_CONFIG_DIRS
+ # only first, if multipath is False
+ path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
+ pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
+ if appname:
+ if version:
+ appname = os.path.join(appname, version)
+ pathlist = [os.sep.join([x, appname]) for x in pathlist]
+
+ if multipath:
+ path = os.pathsep.join(pathlist)
+ else:
+ path = pathlist[0]
+ return path
+
+
+def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
+ r"""Return full path to the user-specific cache dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "opinion" (boolean) can be False to disable the appending of
+ "Cache" to the base app data dir for Windows. See
+ discussion below.
+
+ Typical user cache directories are:
+ Mac OS X: ~/Library/Caches/<AppName>
+ Unix: ~/.cache/<AppName> (XDG default)
+ Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
+ Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
+
+ On Windows the only suggestion in the MSDN docs is that local settings go in
+ the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
+ app data dir (the default returned by `user_data_dir` above). Apps typically
+ put cache data somewhere *under* the given dir here. Some examples:
+ ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
+ ...\Acme\SuperApp\Cache\1.0
+ OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
+ This can be disabled with the `opinion=False` option.
+ """
+ if system == "win32":
+ if appauthor is None:
+ appauthor = appname
+ path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
+ if appname:
+ if appauthor is not False:
+ path = os.path.join(path, appauthor, appname)
+ else:
+ path = os.path.join(path, appname)
+ if opinion:
+ path = os.path.join(path, "Cache")
+ elif system == 'darwin':
+ path = os.path.expanduser('~/Library/Caches')
+ if appname:
+ path = os.path.join(path, appname)
+ else:
+ path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
+ r"""Return full path to the user-specific state dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "roaming" (boolean, default False) can be set True to use the Windows
+ roaming appdata directory. That means that for users on a Windows
+ network setup for roaming profiles, this user data will be
+ sync'd on login. See
+ <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
+ for a discussion of issues.
+
+ Typical user state directories are:
+ Mac OS X: same as user_data_dir
+ Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
+ Win *: same as user_data_dir
+
+ For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
+ to extend the XDG spec and support $XDG_STATE_HOME.
+
+ That means, by default "~/.local/state/<AppName>".
+ """
+ if system in ["win32", "darwin"]:
+ path = user_data_dir(appname, appauthor, None, roaming)
+ else:
+ path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
+ if appname:
+ path = os.path.join(path, appname)
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
+ r"""Return full path to the user-specific log dir for this application.
+
+ "appname" is the name of application.
+ If None, just the system directory is returned.
+ "appauthor" (only used on Windows) is the name of the
+ appauthor or distributing body for this application. Typically
+ it is the owning company name. This falls back to appname. You may
+ pass False to disable it.
+ "version" is an optional version path element to append to the
+ path. You might want to use this if you want multiple versions
+ of your app to be able to run independently. If used, this
+ would typically be "<major>.<minor>".
+ Only applied when appname is present.
+ "opinion" (boolean) can be False to disable the appending of
+ "Logs" to the base app data dir for Windows, and "log" to the
+ base cache dir for Unix. See discussion below.
+
+ Typical user log directories are:
+ Mac OS X: ~/Library/Logs/<AppName>
+ Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
+ Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
+ Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
+
+ On Windows the only suggestion in the MSDN docs is that local settings
+ go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
+ examples of what some windows apps use for a logs dir.)
+
+ OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
+ value for Windows and appends "log" to the user cache dir for Unix.
+ This can be disabled with the `opinion=False` option.
+ """
+ if system == "darwin":
+ path = os.path.join(
+ os.path.expanduser('~/Library/Logs'),
+ appname)
+ elif system == "win32":
+ path = user_data_dir(appname, appauthor, version)
+ version = False
+ if opinion:
+ path = os.path.join(path, "Logs")
+ else:
+ path = user_cache_dir(appname, appauthor, version)
+ version = False
+ if opinion:
+ path = os.path.join(path, "log")
+ if appname and version:
+ path = os.path.join(path, version)
+ return path
+
+
+class AppDirs(object):
+ """Convenience wrapper for getting application dirs."""
+ def __init__(self, appname=None, appauthor=None, version=None,
+ roaming=False, multipath=False):
+ self.appname = appname
+ self.appauthor = appauthor
+ self.version = version
+ self.roaming = roaming
+ self.multipath = multipath
+
+ @property
+ def user_data_dir(self):
+ return user_data_dir(self.appname, self.appauthor,
+ version=self.version, roaming=self.roaming)
+
+ @property
+ def site_data_dir(self):
+ return site_data_dir(self.appname, self.appauthor,
+ version=self.version, multipath=self.multipath)
+
+ @property
+ def user_config_dir(self):
+ return user_config_dir(self.appname, self.appauthor,
+ version=self.version, roaming=self.roaming)
+
+ @property
+ def site_config_dir(self):
+ return site_config_dir(self.appname, self.appauthor,
+ version=self.version, multipath=self.multipath)
+
+ @property
+ def user_cache_dir(self):
+ return user_cache_dir(self.appname, self.appauthor,
+ version=self.version)
+
+ @property
+ def user_state_dir(self):
+ return user_state_dir(self.appname, self.appauthor,
+ version=self.version)
+
+ @property
+ def user_log_dir(self):
+ return user_log_dir(self.appname, self.appauthor,
+ version=self.version)
+
+
+#---- internal support stuff
+
+def _get_win_folder_from_registry(csidl_name):
+ """This is a fallback technique at best. I'm not sure if using the
+ registry for this guarantees us the correct answer for all CSIDL_*
+ names.
+ """
+ if PY3:
+ import winreg as _winreg
+ else:
+ import _winreg
+
+ shell_folder_name = {
+ "CSIDL_APPDATA": "AppData",
+ "CSIDL_COMMON_APPDATA": "Common AppData",
+ "CSIDL_LOCAL_APPDATA": "Local AppData",
+ }[csidl_name]
+
+ key = _winreg.OpenKey(
+ _winreg.HKEY_CURRENT_USER,
+ r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
+ )
+ dir, type = _winreg.QueryValueEx(key, shell_folder_name)
+ return dir
+
+
+def _get_win_folder_with_pywin32(csidl_name):
+ from win32com.shell import shellcon, shell
+ dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
+ # Try to make this a unicode path because SHGetFolderPath does
+ # not return unicode strings when there is unicode data in the
+ # path.
+ try:
+ dir = unicode(dir)
+
+ # Downgrade to short path name if have highbit chars. See
+ # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+ has_high_char = False
+ for c in dir:
+ if ord(c) > 255:
+ has_high_char = True
+ break
+ if has_high_char:
+ try:
+ import win32api
+ dir = win32api.GetShortPathName(dir)
+ except ImportError:
+ pass
+ except UnicodeError:
+ pass
+ return dir
+
+
+def _get_win_folder_with_ctypes(csidl_name):
+ import ctypes
+
+ csidl_const = {
+ "CSIDL_APPDATA": 26,
+ "CSIDL_COMMON_APPDATA": 35,
+ "CSIDL_LOCAL_APPDATA": 28,
+ }[csidl_name]
+
+ buf = ctypes.create_unicode_buffer(1024)
+ ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
+
+ # Downgrade to short path name if have highbit chars. See
+ # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+ has_high_char = False
+ for c in buf:
+ if ord(c) > 255:
+ has_high_char = True
+ break
+ if has_high_char:
+ buf2 = ctypes.create_unicode_buffer(1024)
+ if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
+ buf = buf2
+
+ return buf.value
+
+def _get_win_folder_with_jna(csidl_name):
+ import array
+ from com.sun import jna
+ from com.sun.jna.platform import win32
+
+ buf_size = win32.WinDef.MAX_PATH * 2
+ buf = array.zeros('c', buf_size)
+ shell = win32.Shell32.INSTANCE
+ shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
+ dir = jna.Native.toString(buf.tostring()).rstrip("\0")
+
+ # Downgrade to short path name if have highbit chars. See
+ # <http://bugs.activestate.com/show_bug.cgi?id=85099>.
+ has_high_char = False
+ for c in dir:
+ if ord(c) > 255:
+ has_high_char = True
+ break
+ if has_high_char:
+ buf = array.zeros('c', buf_size)
+ kernel = win32.Kernel32.INSTANCE
+ if kernel.GetShortPathName(dir, buf, buf_size):
+ dir = jna.Native.toString(buf.tostring()).rstrip("\0")
+
+ return dir
+
+if system == "win32":
+ try:
+ import win32com.shell
+ _get_win_folder = _get_win_folder_with_pywin32
+ except ImportError:
+ try:
+ from ctypes import windll
+ _get_win_folder = _get_win_folder_with_ctypes
+ except ImportError:
+ try:
+ import com.sun.jna
+ _get_win_folder = _get_win_folder_with_jna
+ except ImportError:
+ _get_win_folder = _get_win_folder_from_registry
+
+
+#---- self test code
+
+if __name__ == "__main__":
+ appname = "MyApp"
+ appauthor = "MyCompany"
+
+ props = ("user_data_dir",
+ "user_config_dir",
+ "user_cache_dir",
+ "user_state_dir",
+ "user_log_dir",
+ "site_data_dir",
+ "site_config_dir")
+
+ print("-- app dirs %s --" % __version__)
+
+ print("-- app dirs (with optional 'version')")
+ dirs = AppDirs(appname, appauthor, version="1.0")
+ for prop in props:
+ print("%s: %s" % (prop, getattr(dirs, prop)))
+
+ print("\n-- app dirs (without optional 'version')")
+ dirs = AppDirs(appname, appauthor)
+ for prop in props:
+ print("%s: %s" % (prop, getattr(dirs, prop)))
+
+ print("\n-- app dirs (without optional 'appauthor')")
+ dirs = AppDirs(appname)
+ for prop in props:
+ print("%s: %s" % (prop, getattr(dirs, prop)))
+
+ print("\n-- app dirs (with disabled 'appauthor')")
+ dirs = AppDirs(appname, appauthor=False)
+ for prop in props:
+ print("%s: %s" % (prop, getattr(dirs, prop)))
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/__about__.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/__about__.py
new file mode 100644
index 0000000000..4d998578d7
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/__about__.py
@@ -0,0 +1,27 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+__all__ = [
+ "__title__",
+ "__summary__",
+ "__uri__",
+ "__version__",
+ "__author__",
+ "__email__",
+ "__license__",
+ "__copyright__",
+]
+
+__title__ = "packaging"
+__summary__ = "Core utilities for Python packages"
+__uri__ = "https://github.com/pypa/packaging"
+
+__version__ = "20.4"
+
+__author__ = "Donald Stufft and individual contributors"
+__email__ = "donald@stufft.io"
+
+__license__ = "BSD-2-Clause or Apache-2.0"
+__copyright__ = "Copyright 2014-2019 %s" % __author__
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/__init__.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/__init__.py
new file mode 100644
index 0000000000..a0cf67df52
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/__init__.py
@@ -0,0 +1,26 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+from .__about__ import (
+ __author__,
+ __copyright__,
+ __email__,
+ __license__,
+ __summary__,
+ __title__,
+ __uri__,
+ __version__,
+)
+
+__all__ = [
+ "__title__",
+ "__summary__",
+ "__uri__",
+ "__version__",
+ "__author__",
+ "__email__",
+ "__license__",
+ "__copyright__",
+]
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/_compat.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/_compat.py
new file mode 100644
index 0000000000..e54bd4ede8
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/_compat.py
@@ -0,0 +1,38 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import sys
+
+from ._typing import TYPE_CHECKING
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import Any, Dict, Tuple, Type
+
+
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+# flake8: noqa
+
+if PY3:
+ string_types = (str,)
+else:
+ string_types = (basestring,)
+
+
+def with_metaclass(meta, *bases):
+ # type: (Type[Any], Tuple[Type[Any], ...]) -> Any
+ """
+ Create a base class with a metaclass.
+ """
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(meta): # type: ignore
+ def __new__(cls, name, this_bases, d):
+ # type: (Type[Any], str, Tuple[Any], Dict[Any, Any]) -> Any
+ return meta(name, bases, d)
+
+ return type.__new__(metaclass, "temporary_class", (), {})
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/_structures.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/_structures.py
new file mode 100644
index 0000000000..800d5c5588
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/_structures.py
@@ -0,0 +1,86 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+
+class InfinityType(object):
+ def __repr__(self):
+ # type: () -> str
+ return "Infinity"
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(repr(self))
+
+ def __lt__(self, other):
+ # type: (object) -> bool
+ return False
+
+ def __le__(self, other):
+ # type: (object) -> bool
+ return False
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ return isinstance(other, self.__class__)
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ return not isinstance(other, self.__class__)
+
+ def __gt__(self, other):
+ # type: (object) -> bool
+ return True
+
+ def __ge__(self, other):
+ # type: (object) -> bool
+ return True
+
+ def __neg__(self):
+ # type: (object) -> NegativeInfinityType
+ return NegativeInfinity
+
+
+Infinity = InfinityType()
+
+
+class NegativeInfinityType(object):
+ def __repr__(self):
+ # type: () -> str
+ return "-Infinity"
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(repr(self))
+
+ def __lt__(self, other):
+ # type: (object) -> bool
+ return True
+
+ def __le__(self, other):
+ # type: (object) -> bool
+ return True
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ return isinstance(other, self.__class__)
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ return not isinstance(other, self.__class__)
+
+ def __gt__(self, other):
+ # type: (object) -> bool
+ return False
+
+ def __ge__(self, other):
+ # type: (object) -> bool
+ return False
+
+ def __neg__(self):
+ # type: (object) -> InfinityType
+ return Infinity
+
+
+NegativeInfinity = NegativeInfinityType()
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/_typing.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/_typing.py
new file mode 100644
index 0000000000..77a8b9185a
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/_typing.py
@@ -0,0 +1,48 @@
+"""For neatly implementing static typing in packaging.
+
+`mypy` - the static type analysis tool we use - uses the `typing` module, which
+provides core functionality fundamental to mypy's functioning.
+
+Generally, `typing` would be imported at runtime and used in that fashion -
+it acts as a no-op at runtime and does not have any run-time overhead by
+design.
+
+As it turns out, `typing` is not vendorable - it uses separate sources for
+Python 2/Python 3. Thus, this codebase can not expect it to be present.
+To work around this, mypy allows the typing import to be behind a False-y
+optional to prevent it from running at runtime and type-comments can be used
+to remove the need for the types to be accessible directly during runtime.
+
+This module provides the False-y guard in a nicely named fashion so that a
+curious maintainer can reach here to read this.
+
+In packaging, all static-typing related imports should be guarded as follows:
+
+ from packaging._typing import TYPE_CHECKING
+
+ if TYPE_CHECKING:
+ from typing import ...
+
+Ref: https://github.com/python/mypy/issues/3216
+"""
+
+__all__ = ["TYPE_CHECKING", "cast"]
+
+# The TYPE_CHECKING constant defined by the typing module is False at runtime
+# but True while type checking.
+if False: # pragma: no cover
+ from typing import TYPE_CHECKING
+else:
+ TYPE_CHECKING = False
+
+# typing's cast syntax requires calling typing.cast at runtime, but we don't
+# want to import typing at runtime. Here, we inform the type checkers that
+# we're importing `typing.cast` as `cast` and re-implement typing.cast's
+# runtime behavior in a block that is ignored by type checkers.
+if TYPE_CHECKING: # pragma: no cover
+ # not executed at runtime
+ from typing import cast
+else:
+ # executed at runtime
+ def cast(type_, value): # noqa
+ return value
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/markers.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/markers.py
new file mode 100644
index 0000000000..fd1559c10e
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/markers.py
@@ -0,0 +1,328 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import operator
+import os
+import platform
+import sys
+
+from pkg_resources.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
+from pkg_resources.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
+from pkg_resources.extern.pyparsing import Literal as L # noqa
+
+from ._compat import string_types
+from ._typing import TYPE_CHECKING
+from .specifiers import Specifier, InvalidSpecifier
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+
+ Operator = Callable[[str, str], bool]
+
+
+__all__ = [
+ "InvalidMarker",
+ "UndefinedComparison",
+ "UndefinedEnvironmentName",
+ "Marker",
+ "default_environment",
+]
+
+
+class InvalidMarker(ValueError):
+ """
+ An invalid marker was found, users should refer to PEP 508.
+ """
+
+
+class UndefinedComparison(ValueError):
+ """
+ An invalid operation was attempted on a value that doesn't support it.
+ """
+
+
+class UndefinedEnvironmentName(ValueError):
+ """
+ A name was attempted to be used that does not exist inside of the
+ environment.
+ """
+
+
+class Node(object):
+ def __init__(self, value):
+ # type: (Any) -> None
+ self.value = value
+
+ def __str__(self):
+ # type: () -> str
+ return str(self.value)
+
+ def __repr__(self):
+ # type: () -> str
+ return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
+
+ def serialize(self):
+ # type: () -> str
+ raise NotImplementedError
+
+
+class Variable(Node):
+ def serialize(self):
+ # type: () -> str
+ return str(self)
+
+
+class Value(Node):
+ def serialize(self):
+ # type: () -> str
+ return '"{0}"'.format(self)
+
+
+class Op(Node):
+ def serialize(self):
+ # type: () -> str
+ return str(self)
+
+
+VARIABLE = (
+ L("implementation_version")
+ | L("platform_python_implementation")
+ | L("implementation_name")
+ | L("python_full_version")
+ | L("platform_release")
+ | L("platform_version")
+ | L("platform_machine")
+ | L("platform_system")
+ | L("python_version")
+ | L("sys_platform")
+ | L("os_name")
+ | L("os.name") # PEP-345
+ | L("sys.platform") # PEP-345
+ | L("platform.version") # PEP-345
+ | L("platform.machine") # PEP-345
+ | L("platform.python_implementation") # PEP-345
+ | L("python_implementation") # undocumented setuptools legacy
+ | L("extra") # PEP-508
+)
+ALIASES = {
+ "os.name": "os_name",
+ "sys.platform": "sys_platform",
+ "platform.version": "platform_version",
+ "platform.machine": "platform_machine",
+ "platform.python_implementation": "platform_python_implementation",
+ "python_implementation": "platform_python_implementation",
+}
+VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
+
+VERSION_CMP = (
+ L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
+)
+
+MARKER_OP = VERSION_CMP | L("not in") | L("in")
+MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
+
+MARKER_VALUE = QuotedString("'") | QuotedString('"')
+MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
+
+BOOLOP = L("and") | L("or")
+
+MARKER_VAR = VARIABLE | MARKER_VALUE
+
+MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
+MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
+
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+
+MARKER_EXPR = Forward()
+MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
+MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
+
+MARKER = stringStart + MARKER_EXPR + stringEnd
+
+
+def _coerce_parse_result(results):
+ # type: (Union[ParseResults, List[Any]]) -> List[Any]
+ if isinstance(results, ParseResults):
+ return [_coerce_parse_result(i) for i in results]
+ else:
+ return results
+
+
+def _format_marker(marker, first=True):
+ # type: (Union[List[str], Tuple[Node, ...], str], Optional[bool]) -> str
+
+ assert isinstance(marker, (list, tuple, string_types))
+
+ # Sometimes we have a structure like [[...]] which is a single item list
+ # where the single item is itself it's own list. In that case we want skip
+ # the rest of this function so that we don't get extraneous () on the
+ # outside.
+ if (
+ isinstance(marker, list)
+ and len(marker) == 1
+ and isinstance(marker[0], (list, tuple))
+ ):
+ return _format_marker(marker[0])
+
+ if isinstance(marker, list):
+ inner = (_format_marker(m, first=False) for m in marker)
+ if first:
+ return " ".join(inner)
+ else:
+ return "(" + " ".join(inner) + ")"
+ elif isinstance(marker, tuple):
+ return " ".join([m.serialize() for m in marker])
+ else:
+ return marker
+
+
+_operators = {
+ "in": lambda lhs, rhs: lhs in rhs,
+ "not in": lambda lhs, rhs: lhs not in rhs,
+ "<": operator.lt,
+ "<=": operator.le,
+ "==": operator.eq,
+ "!=": operator.ne,
+ ">=": operator.ge,
+ ">": operator.gt,
+} # type: Dict[str, Operator]
+
+
+def _eval_op(lhs, op, rhs):
+ # type: (str, Op, str) -> bool
+ try:
+ spec = Specifier("".join([op.serialize(), rhs]))
+ except InvalidSpecifier:
+ pass
+ else:
+ return spec.contains(lhs)
+
+ oper = _operators.get(op.serialize()) # type: Optional[Operator]
+ if oper is None:
+ raise UndefinedComparison(
+ "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
+ )
+
+ return oper(lhs, rhs)
+
+
+class Undefined(object):
+ pass
+
+
+_undefined = Undefined()
+
+
+def _get_env(environment, name):
+ # type: (Dict[str, str], str) -> str
+ value = environment.get(name, _undefined) # type: Union[str, Undefined]
+
+ if isinstance(value, Undefined):
+ raise UndefinedEnvironmentName(
+ "{0!r} does not exist in evaluation environment.".format(name)
+ )
+
+ return value
+
+
+def _evaluate_markers(markers, environment):
+ # type: (List[Any], Dict[str, str]) -> bool
+ groups = [[]] # type: List[List[bool]]
+
+ for marker in markers:
+ assert isinstance(marker, (list, tuple, string_types))
+
+ if isinstance(marker, list):
+ groups[-1].append(_evaluate_markers(marker, environment))
+ elif isinstance(marker, tuple):
+ lhs, op, rhs = marker
+
+ if isinstance(lhs, Variable):
+ lhs_value = _get_env(environment, lhs.value)
+ rhs_value = rhs.value
+ else:
+ lhs_value = lhs.value
+ rhs_value = _get_env(environment, rhs.value)
+
+ groups[-1].append(_eval_op(lhs_value, op, rhs_value))
+ else:
+ assert marker in ["and", "or"]
+ if marker == "or":
+ groups.append([])
+
+ return any(all(item) for item in groups)
+
+
+def format_full_version(info):
+ # type: (sys._version_info) -> str
+ version = "{0.major}.{0.minor}.{0.micro}".format(info)
+ kind = info.releaselevel
+ if kind != "final":
+ version += kind[0] + str(info.serial)
+ return version
+
+
+def default_environment():
+ # type: () -> Dict[str, str]
+ if hasattr(sys, "implementation"):
+ # Ignoring the `sys.implementation` reference for type checking due to
+ # mypy not liking that the attribute doesn't exist in Python 2.7 when
+ # run with the `--py27` flag.
+ iver = format_full_version(sys.implementation.version) # type: ignore
+ implementation_name = sys.implementation.name # type: ignore
+ else:
+ iver = "0"
+ implementation_name = ""
+
+ return {
+ "implementation_name": implementation_name,
+ "implementation_version": iver,
+ "os_name": os.name,
+ "platform_machine": platform.machine(),
+ "platform_release": platform.release(),
+ "platform_system": platform.system(),
+ "platform_version": platform.version(),
+ "python_full_version": platform.python_version(),
+ "platform_python_implementation": platform.python_implementation(),
+ "python_version": ".".join(platform.python_version_tuple()[:2]),
+ "sys_platform": sys.platform,
+ }
+
+
+class Marker(object):
+ def __init__(self, marker):
+ # type: (str) -> None
+ try:
+ self._markers = _coerce_parse_result(MARKER.parseString(marker))
+ except ParseException as e:
+ err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
+ marker, marker[e.loc : e.loc + 8]
+ )
+ raise InvalidMarker(err_str)
+
+ def __str__(self):
+ # type: () -> str
+ return _format_marker(self._markers)
+
+ def __repr__(self):
+ # type: () -> str
+ return "<Marker({0!r})>".format(str(self))
+
+ def evaluate(self, environment=None):
+ # type: (Optional[Dict[str, str]]) -> bool
+ """Evaluate a marker.
+
+ Return the boolean from evaluating the given marker against the
+ environment. environment is an optional argument to override all or
+ part of the determined environment.
+
+ The environment is determined from the current Python process.
+ """
+ current_environment = default_environment()
+ if environment is not None:
+ current_environment.update(environment)
+
+ return _evaluate_markers(self._markers, current_environment)
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/requirements.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/requirements.py
new file mode 100644
index 0000000000..9495a1df1e
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/requirements.py
@@ -0,0 +1,145 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import string
+import re
+
+from pkg_resources.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException
+from pkg_resources.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
+from pkg_resources.extern.pyparsing import Literal as L # noqa
+from urllib import parse as urlparse
+
+from ._typing import TYPE_CHECKING
+from .markers import MARKER_EXPR, Marker
+from .specifiers import LegacySpecifier, Specifier, SpecifierSet
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import List
+
+
+class InvalidRequirement(ValueError):
+ """
+ An invalid requirement was found, users should refer to PEP 508.
+ """
+
+
+ALPHANUM = Word(string.ascii_letters + string.digits)
+
+LBRACKET = L("[").suppress()
+RBRACKET = L("]").suppress()
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+COMMA = L(",").suppress()
+SEMICOLON = L(";").suppress()
+AT = L("@").suppress()
+
+PUNCTUATION = Word("-_.")
+IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
+IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
+
+NAME = IDENTIFIER("name")
+EXTRA = IDENTIFIER
+
+URI = Regex(r"[^ ]+")("url")
+URL = AT + URI
+
+EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
+EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
+
+VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
+VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
+
+VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
+VERSION_MANY = Combine(
+ VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
+)("_raw_spec")
+_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
+_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
+
+VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
+VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
+
+MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
+MARKER_EXPR.setParseAction(
+ lambda s, l, t: Marker(s[t._original_start : t._original_end])
+)
+MARKER_SEPARATOR = SEMICOLON
+MARKER = MARKER_SEPARATOR + MARKER_EXPR
+
+VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
+URL_AND_MARKER = URL + Optional(MARKER)
+
+NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
+
+REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
+# pkg_resources.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see
+# issue #104
+REQUIREMENT.parseString("x[]")
+
+
+class Requirement(object):
+ """Parse a requirement.
+
+ Parse a given requirement string into its parts, such as name, specifier,
+ URL, and extras. Raises InvalidRequirement on a badly-formed requirement
+ string.
+ """
+
+ # TODO: Can we test whether something is contained within a requirement?
+ # If so how do we do that? Do we need to test against the _name_ of
+ # the thing as well as the version? What about the markers?
+ # TODO: Can we normalize the name and extra name?
+
+ def __init__(self, requirement_string):
+ # type: (str) -> None
+ try:
+ req = REQUIREMENT.parseString(requirement_string)
+ except ParseException as e:
+ raise InvalidRequirement(
+ 'Parse error at "{0!r}": {1}'.format(
+ requirement_string[e.loc : e.loc + 8], e.msg
+ )
+ )
+
+ self.name = req.name
+ if req.url:
+ parsed_url = urlparse.urlparse(req.url)
+ if parsed_url.scheme == "file":
+ if urlparse.urlunparse(parsed_url) != req.url:
+ raise InvalidRequirement("Invalid URL given")
+ elif not (parsed_url.scheme and parsed_url.netloc) or (
+ not parsed_url.scheme and not parsed_url.netloc
+ ):
+ raise InvalidRequirement("Invalid URL: {0}".format(req.url))
+ self.url = req.url
+ else:
+ self.url = None
+ self.extras = set(req.extras.asList() if req.extras else [])
+ self.specifier = SpecifierSet(req.specifier)
+ self.marker = req.marker if req.marker else None
+
+ def __str__(self):
+ # type: () -> str
+ parts = [self.name] # type: List[str]
+
+ if self.extras:
+ parts.append("[{0}]".format(",".join(sorted(self.extras))))
+
+ if self.specifier:
+ parts.append(str(self.specifier))
+
+ if self.url:
+ parts.append("@ {0}".format(self.url))
+ if self.marker:
+ parts.append(" ")
+
+ if self.marker:
+ parts.append("; {0}".format(self.marker))
+
+ return "".join(parts)
+
+ def __repr__(self):
+ # type: () -> str
+ return "<Requirement({0!r})>".format(str(self))
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/specifiers.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/specifiers.py
new file mode 100644
index 0000000000..fe09bb1dbb
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/specifiers.py
@@ -0,0 +1,863 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import abc
+import functools
+import itertools
+import re
+
+from ._compat import string_types, with_metaclass
+from ._typing import TYPE_CHECKING
+from .utils import canonicalize_version
+from .version import Version, LegacyVersion, parse
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import (
+ List,
+ Dict,
+ Union,
+ Iterable,
+ Iterator,
+ Optional,
+ Callable,
+ Tuple,
+ FrozenSet,
+ )
+
+ ParsedVersion = Union[Version, LegacyVersion]
+ UnparsedVersion = Union[Version, LegacyVersion, str]
+ CallableOperator = Callable[[ParsedVersion, str], bool]
+
+
+class InvalidSpecifier(ValueError):
+ """
+ An invalid specifier was found, users should refer to PEP 440.
+ """
+
+
+class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): # type: ignore
+ @abc.abstractmethod
+ def __str__(self):
+ # type: () -> str
+ """
+ Returns the str representation of this Specifier like object. This
+ should be representative of the Specifier itself.
+ """
+
+ @abc.abstractmethod
+ def __hash__(self):
+ # type: () -> int
+ """
+ Returns a hash value for this Specifier like object.
+ """
+
+ @abc.abstractmethod
+ def __eq__(self, other):
+ # type: (object) -> bool
+ """
+ Returns a boolean representing whether or not the two Specifier like
+ objects are equal.
+ """
+
+ @abc.abstractmethod
+ def __ne__(self, other):
+ # type: (object) -> bool
+ """
+ Returns a boolean representing whether or not the two Specifier like
+ objects are not equal.
+ """
+
+ @abc.abstractproperty
+ def prereleases(self):
+ # type: () -> Optional[bool]
+ """
+ Returns whether or not pre-releases as a whole are allowed by this
+ specifier.
+ """
+
+ @prereleases.setter
+ def prereleases(self, value):
+ # type: (bool) -> None
+ """
+ Sets whether or not pre-releases as a whole are allowed by this
+ specifier.
+ """
+
+ @abc.abstractmethod
+ def contains(self, item, prereleases=None):
+ # type: (str, Optional[bool]) -> bool
+ """
+ Determines if the given item is contained within this specifier.
+ """
+
+ @abc.abstractmethod
+ def filter(self, iterable, prereleases=None):
+ # type: (Iterable[UnparsedVersion], Optional[bool]) -> Iterable[UnparsedVersion]
+ """
+ Takes an iterable of items and filters them so that only items which
+ are contained within this specifier are allowed in it.
+ """
+
+
+class _IndividualSpecifier(BaseSpecifier):
+
+ _operators = {} # type: Dict[str, str]
+
+ def __init__(self, spec="", prereleases=None):
+ # type: (str, Optional[bool]) -> None
+ match = self._regex.search(spec)
+ if not match:
+ raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
+
+ self._spec = (
+ match.group("operator").strip(),
+ match.group("version").strip(),
+ ) # type: Tuple[str, str]
+
+ # Store whether or not this Specifier should accept prereleases
+ self._prereleases = prereleases
+
+ def __repr__(self):
+ # type: () -> str
+ pre = (
+ ", prereleases={0!r}".format(self.prereleases)
+ if self._prereleases is not None
+ else ""
+ )
+
+ return "<{0}({1!r}{2})>".format(self.__class__.__name__, str(self), pre)
+
+ def __str__(self):
+ # type: () -> str
+ return "{0}{1}".format(*self._spec)
+
+ @property
+ def _canonical_spec(self):
+ # type: () -> Tuple[str, Union[Version, str]]
+ return self._spec[0], canonicalize_version(self._spec[1])
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(self._canonical_spec)
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ if isinstance(other, string_types):
+ try:
+ other = self.__class__(str(other))
+ except InvalidSpecifier:
+ return NotImplemented
+ elif not isinstance(other, self.__class__):
+ return NotImplemented
+
+ return self._canonical_spec == other._canonical_spec
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ if isinstance(other, string_types):
+ try:
+ other = self.__class__(str(other))
+ except InvalidSpecifier:
+ return NotImplemented
+ elif not isinstance(other, self.__class__):
+ return NotImplemented
+
+ return self._spec != other._spec
+
+ def _get_operator(self, op):
+ # type: (str) -> CallableOperator
+ operator_callable = getattr(
+ self, "_compare_{0}".format(self._operators[op])
+ ) # type: CallableOperator
+ return operator_callable
+
+ def _coerce_version(self, version):
+ # type: (UnparsedVersion) -> ParsedVersion
+ if not isinstance(version, (LegacyVersion, Version)):
+ version = parse(version)
+ return version
+
+ @property
+ def operator(self):
+ # type: () -> str
+ return self._spec[0]
+
+ @property
+ def version(self):
+ # type: () -> str
+ return self._spec[1]
+
+ @property
+ def prereleases(self):
+ # type: () -> Optional[bool]
+ return self._prereleases
+
+ @prereleases.setter
+ def prereleases(self, value):
+ # type: (bool) -> None
+ self._prereleases = value
+
+ def __contains__(self, item):
+ # type: (str) -> bool
+ return self.contains(item)
+
+ def contains(self, item, prereleases=None):
+ # type: (UnparsedVersion, Optional[bool]) -> bool
+
+ # Determine if prereleases are to be allowed or not.
+ if prereleases is None:
+ prereleases = self.prereleases
+
+ # Normalize item to a Version or LegacyVersion, this allows us to have
+ # a shortcut for ``"2.0" in Specifier(">=2")
+ normalized_item = self._coerce_version(item)
+
+ # Determine if we should be supporting prereleases in this specifier
+ # or not, if we do not support prereleases than we can short circuit
+ # logic if this version is a prereleases.
+ if normalized_item.is_prerelease and not prereleases:
+ return False
+
+ # Actually do the comparison to determine if this item is contained
+ # within this Specifier or not.
+ operator_callable = self._get_operator(self.operator) # type: CallableOperator
+ return operator_callable(normalized_item, self.version)
+
+ def filter(self, iterable, prereleases=None):
+ # type: (Iterable[UnparsedVersion], Optional[bool]) -> Iterable[UnparsedVersion]
+
+ yielded = False
+ found_prereleases = []
+
+ kw = {"prereleases": prereleases if prereleases is not None else True}
+
+ # Attempt to iterate over all the values in the iterable and if any of
+ # them match, yield them.
+ for version in iterable:
+ parsed_version = self._coerce_version(version)
+
+ if self.contains(parsed_version, **kw):
+ # If our version is a prerelease, and we were not set to allow
+ # prereleases, then we'll store it for later incase nothing
+ # else matches this specifier.
+ if parsed_version.is_prerelease and not (
+ prereleases or self.prereleases
+ ):
+ found_prereleases.append(version)
+ # Either this is not a prerelease, or we should have been
+ # accepting prereleases from the beginning.
+ else:
+ yielded = True
+ yield version
+
+ # Now that we've iterated over everything, determine if we've yielded
+ # any values, and if we have not and we have any prereleases stored up
+ # then we will go ahead and yield the prereleases.
+ if not yielded and found_prereleases:
+ for version in found_prereleases:
+ yield version
+
+
+class LegacySpecifier(_IndividualSpecifier):
+
+ _regex_str = r"""
+ (?P<operator>(==|!=|<=|>=|<|>))
+ \s*
+ (?P<version>
+ [^,;\s)]* # Since this is a "legacy" specifier, and the version
+ # string can be just about anything, we match everything
+ # except for whitespace, a semi-colon for marker support,
+ # a closing paren since versions can be enclosed in
+ # them, and a comma since it's a version separator.
+ )
+ """
+
+ _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ _operators = {
+ "==": "equal",
+ "!=": "not_equal",
+ "<=": "less_than_equal",
+ ">=": "greater_than_equal",
+ "<": "less_than",
+ ">": "greater_than",
+ }
+
+ def _coerce_version(self, version):
+ # type: (Union[ParsedVersion, str]) -> LegacyVersion
+ if not isinstance(version, LegacyVersion):
+ version = LegacyVersion(str(version))
+ return version
+
+ def _compare_equal(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective == self._coerce_version(spec)
+
+ def _compare_not_equal(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective != self._coerce_version(spec)
+
+ def _compare_less_than_equal(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective <= self._coerce_version(spec)
+
+ def _compare_greater_than_equal(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective >= self._coerce_version(spec)
+
+ def _compare_less_than(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective < self._coerce_version(spec)
+
+ def _compare_greater_than(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective > self._coerce_version(spec)
+
+
+def _require_version_compare(
+ fn # type: (Callable[[Specifier, ParsedVersion, str], bool])
+):
+ # type: (...) -> Callable[[Specifier, ParsedVersion, str], bool]
+ @functools.wraps(fn)
+ def wrapped(self, prospective, spec):
+ # type: (Specifier, ParsedVersion, str) -> bool
+ if not isinstance(prospective, Version):
+ return False
+ return fn(self, prospective, spec)
+
+ return wrapped
+
+
+class Specifier(_IndividualSpecifier):
+
+ _regex_str = r"""
+ (?P<operator>(~=|==|!=|<=|>=|<|>|===))
+ (?P<version>
+ (?:
+ # The identity operators allow for an escape hatch that will
+ # do an exact string match of the version you wish to install.
+ # This will not be parsed by PEP 440 and we cannot determine
+ # any semantic meaning from it. This operator is discouraged
+ # but included entirely as an escape hatch.
+ (?<====) # Only match for the identity operator
+ \s*
+ [^\s]* # We just match everything, except for whitespace
+ # since we are only testing for strict identity.
+ )
+ |
+ (?:
+ # The (non)equality operators allow for wild card and local
+ # versions to be specified so we have to define these two
+ # operators separately to enable that.
+ (?<===|!=) # Only match for equals and not equals
+
+ \s*
+ v?
+ (?:[0-9]+!)? # epoch
+ [0-9]+(?:\.[0-9]+)* # release
+ (?: # pre release
+ [-_\.]?
+ (a|b|c|rc|alpha|beta|pre|preview)
+ [-_\.]?
+ [0-9]*
+ )?
+ (?: # post release
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+ )?
+
+ # You cannot use a wild card and a dev or local version
+ # together so group them with a | and make them optional.
+ (?:
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
+ (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
+ |
+ \.\* # Wild card syntax of .*
+ )?
+ )
+ |
+ (?:
+ # The compatible operator requires at least two digits in the
+ # release segment.
+ (?<=~=) # Only match for the compatible operator
+
+ \s*
+ v?
+ (?:[0-9]+!)? # epoch
+ [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
+ (?: # pre release
+ [-_\.]?
+ (a|b|c|rc|alpha|beta|pre|preview)
+ [-_\.]?
+ [0-9]*
+ )?
+ (?: # post release
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+ )?
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
+ )
+ |
+ (?:
+ # All other operators only allow a sub set of what the
+ # (non)equality operators do. Specifically they do not allow
+ # local versions to be specified nor do they allow the prefix
+ # matching wild cards.
+ (?<!==|!=|~=) # We have special cases for these
+ # operators so we want to make sure they
+ # don't match here.
+
+ \s*
+ v?
+ (?:[0-9]+!)? # epoch
+ [0-9]+(?:\.[0-9]+)* # release
+ (?: # pre release
+ [-_\.]?
+ (a|b|c|rc|alpha|beta|pre|preview)
+ [-_\.]?
+ [0-9]*
+ )?
+ (?: # post release
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+ )?
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
+ )
+ )
+ """
+
+ _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ _operators = {
+ "~=": "compatible",
+ "==": "equal",
+ "!=": "not_equal",
+ "<=": "less_than_equal",
+ ">=": "greater_than_equal",
+ "<": "less_than",
+ ">": "greater_than",
+ "===": "arbitrary",
+ }
+
+ @_require_version_compare
+ def _compare_compatible(self, prospective, spec):
+ # type: (ParsedVersion, str) -> bool
+
+ # Compatible releases have an equivalent combination of >= and ==. That
+ # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
+ # implement this in terms of the other specifiers instead of
+ # implementing it ourselves. The only thing we need to do is construct
+ # the other specifiers.
+
+ # We want everything but the last item in the version, but we want to
+ # ignore post and dev releases and we want to treat the pre-release as
+ # it's own separate segment.
+ prefix = ".".join(
+ list(
+ itertools.takewhile(
+ lambda x: (not x.startswith("post") and not x.startswith("dev")),
+ _version_split(spec),
+ )
+ )[:-1]
+ )
+
+ # Add the prefix notation to the end of our string
+ prefix += ".*"
+
+ return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
+ prospective, prefix
+ )
+
+ @_require_version_compare
+ def _compare_equal(self, prospective, spec):
+ # type: (ParsedVersion, str) -> bool
+
+ # We need special logic to handle prefix matching
+ if spec.endswith(".*"):
+ # In the case of prefix matching we want to ignore local segment.
+ prospective = Version(prospective.public)
+ # Split the spec out by dots, and pretend that there is an implicit
+ # dot in between a release segment and a pre-release segment.
+ split_spec = _version_split(spec[:-2]) # Remove the trailing .*
+
+ # Split the prospective version out by dots, and pretend that there
+ # is an implicit dot in between a release segment and a pre-release
+ # segment.
+ split_prospective = _version_split(str(prospective))
+
+ # Shorten the prospective version to be the same length as the spec
+ # so that we can determine if the specifier is a prefix of the
+ # prospective version or not.
+ shortened_prospective = split_prospective[: len(split_spec)]
+
+ # Pad out our two sides with zeros so that they both equal the same
+ # length.
+ padded_spec, padded_prospective = _pad_version(
+ split_spec, shortened_prospective
+ )
+
+ return padded_prospective == padded_spec
+ else:
+ # Convert our spec string into a Version
+ spec_version = Version(spec)
+
+ # If the specifier does not have a local segment, then we want to
+ # act as if the prospective version also does not have a local
+ # segment.
+ if not spec_version.local:
+ prospective = Version(prospective.public)
+
+ return prospective == spec_version
+
+ @_require_version_compare
+ def _compare_not_equal(self, prospective, spec):
+ # type: (ParsedVersion, str) -> bool
+ return not self._compare_equal(prospective, spec)
+
+ @_require_version_compare
+ def _compare_less_than_equal(self, prospective, spec):
+ # type: (ParsedVersion, str) -> bool
+
+ # NB: Local version identifiers are NOT permitted in the version
+ # specifier, so local version labels can be universally removed from
+ # the prospective version.
+ return Version(prospective.public) <= Version(spec)
+
+ @_require_version_compare
+ def _compare_greater_than_equal(self, prospective, spec):
+ # type: (ParsedVersion, str) -> bool
+
+ # NB: Local version identifiers are NOT permitted in the version
+ # specifier, so local version labels can be universally removed from
+ # the prospective version.
+ return Version(prospective.public) >= Version(spec)
+
+ @_require_version_compare
+ def _compare_less_than(self, prospective, spec_str):
+ # type: (ParsedVersion, str) -> bool
+
+ # Convert our spec to a Version instance, since we'll want to work with
+ # it as a version.
+ spec = Version(spec_str)
+
+ # Check to see if the prospective version is less than the spec
+ # version. If it's not we can short circuit and just return False now
+ # instead of doing extra unneeded work.
+ if not prospective < spec:
+ return False
+
+ # This special case is here so that, unless the specifier itself
+ # includes is a pre-release version, that we do not accept pre-release
+ # versions for the version mentioned in the specifier (e.g. <3.1 should
+ # not match 3.1.dev0, but should match 3.0.dev0).
+ if not spec.is_prerelease and prospective.is_prerelease:
+ if Version(prospective.base_version) == Version(spec.base_version):
+ return False
+
+ # If we've gotten to here, it means that prospective version is both
+ # less than the spec version *and* it's not a pre-release of the same
+ # version in the spec.
+ return True
+
+ @_require_version_compare
+ def _compare_greater_than(self, prospective, spec_str):
+ # type: (ParsedVersion, str) -> bool
+
+ # Convert our spec to a Version instance, since we'll want to work with
+ # it as a version.
+ spec = Version(spec_str)
+
+ # Check to see if the prospective version is greater than the spec
+ # version. If it's not we can short circuit and just return False now
+ # instead of doing extra unneeded work.
+ if not prospective > spec:
+ return False
+
+ # This special case is here so that, unless the specifier itself
+ # includes is a post-release version, that we do not accept
+ # post-release versions for the version mentioned in the specifier
+ # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
+ if not spec.is_postrelease and prospective.is_postrelease:
+ if Version(prospective.base_version) == Version(spec.base_version):
+ return False
+
+ # Ensure that we do not allow a local version of the version mentioned
+ # in the specifier, which is technically greater than, to match.
+ if prospective.local is not None:
+ if Version(prospective.base_version) == Version(spec.base_version):
+ return False
+
+ # If we've gotten to here, it means that prospective version is both
+ # greater than the spec version *and* it's not a pre-release of the
+ # same version in the spec.
+ return True
+
+ def _compare_arbitrary(self, prospective, spec):
+ # type: (Version, str) -> bool
+ return str(prospective).lower() == str(spec).lower()
+
+ @property
+ def prereleases(self):
+ # type: () -> bool
+
+ # If there is an explicit prereleases set for this, then we'll just
+ # blindly use that.
+ if self._prereleases is not None:
+ return self._prereleases
+
+ # Look at all of our specifiers and determine if they are inclusive
+ # operators, and if they are if they are including an explicit
+ # prerelease.
+ operator, version = self._spec
+ if operator in ["==", ">=", "<=", "~=", "==="]:
+ # The == specifier can include a trailing .*, if it does we
+ # want to remove before parsing.
+ if operator == "==" and version.endswith(".*"):
+ version = version[:-2]
+
+ # Parse the version, and if it is a pre-release than this
+ # specifier allows pre-releases.
+ if parse(version).is_prerelease:
+ return True
+
+ return False
+
+ @prereleases.setter
+ def prereleases(self, value):
+ # type: (bool) -> None
+ self._prereleases = value
+
+
+_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
+
+
+def _version_split(version):
+ # type: (str) -> List[str]
+ result = [] # type: List[str]
+ for item in version.split("."):
+ match = _prefix_regex.search(item)
+ if match:
+ result.extend(match.groups())
+ else:
+ result.append(item)
+ return result
+
+
+def _pad_version(left, right):
+ # type: (List[str], List[str]) -> Tuple[List[str], List[str]]
+ left_split, right_split = [], []
+
+ # Get the release segment of our versions
+ left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
+ right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
+
+ # Get the rest of our versions
+ left_split.append(left[len(left_split[0]) :])
+ right_split.append(right[len(right_split[0]) :])
+
+ # Insert our padding
+ left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
+ right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
+
+ return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
+
+
+class SpecifierSet(BaseSpecifier):
+ def __init__(self, specifiers="", prereleases=None):
+ # type: (str, Optional[bool]) -> None
+
+ # Split on , to break each individual specifier into it's own item, and
+ # strip each item to remove leading/trailing whitespace.
+ split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
+
+ # Parsed each individual specifier, attempting first to make it a
+ # Specifier and falling back to a LegacySpecifier.
+ parsed = set()
+ for specifier in split_specifiers:
+ try:
+ parsed.add(Specifier(specifier))
+ except InvalidSpecifier:
+ parsed.add(LegacySpecifier(specifier))
+
+ # Turn our parsed specifiers into a frozen set and save them for later.
+ self._specs = frozenset(parsed)
+
+ # Store our prereleases value so we can use it later to determine if
+ # we accept prereleases or not.
+ self._prereleases = prereleases
+
+ def __repr__(self):
+ # type: () -> str
+ pre = (
+ ", prereleases={0!r}".format(self.prereleases)
+ if self._prereleases is not None
+ else ""
+ )
+
+ return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
+
+ def __str__(self):
+ # type: () -> str
+ return ",".join(sorted(str(s) for s in self._specs))
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(self._specs)
+
+ def __and__(self, other):
+ # type: (Union[SpecifierSet, str]) -> SpecifierSet
+ if isinstance(other, string_types):
+ other = SpecifierSet(other)
+ elif not isinstance(other, SpecifierSet):
+ return NotImplemented
+
+ specifier = SpecifierSet()
+ specifier._specs = frozenset(self._specs | other._specs)
+
+ if self._prereleases is None and other._prereleases is not None:
+ specifier._prereleases = other._prereleases
+ elif self._prereleases is not None and other._prereleases is None:
+ specifier._prereleases = self._prereleases
+ elif self._prereleases == other._prereleases:
+ specifier._prereleases = self._prereleases
+ else:
+ raise ValueError(
+ "Cannot combine SpecifierSets with True and False prerelease "
+ "overrides."
+ )
+
+ return specifier
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ if isinstance(other, (string_types, _IndividualSpecifier)):
+ other = SpecifierSet(str(other))
+ elif not isinstance(other, SpecifierSet):
+ return NotImplemented
+
+ return self._specs == other._specs
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ if isinstance(other, (string_types, _IndividualSpecifier)):
+ other = SpecifierSet(str(other))
+ elif not isinstance(other, SpecifierSet):
+ return NotImplemented
+
+ return self._specs != other._specs
+
+ def __len__(self):
+ # type: () -> int
+ return len(self._specs)
+
+ def __iter__(self):
+ # type: () -> Iterator[FrozenSet[_IndividualSpecifier]]
+ return iter(self._specs)
+
+ @property
+ def prereleases(self):
+ # type: () -> Optional[bool]
+
+ # If we have been given an explicit prerelease modifier, then we'll
+ # pass that through here.
+ if self._prereleases is not None:
+ return self._prereleases
+
+ # If we don't have any specifiers, and we don't have a forced value,
+ # then we'll just return None since we don't know if this should have
+ # pre-releases or not.
+ if not self._specs:
+ return None
+
+ # Otherwise we'll see if any of the given specifiers accept
+ # prereleases, if any of them do we'll return True, otherwise False.
+ return any(s.prereleases for s in self._specs)
+
+ @prereleases.setter
+ def prereleases(self, value):
+ # type: (bool) -> None
+ self._prereleases = value
+
+ def __contains__(self, item):
+ # type: (Union[ParsedVersion, str]) -> bool
+ return self.contains(item)
+
+ def contains(self, item, prereleases=None):
+ # type: (Union[ParsedVersion, str], Optional[bool]) -> bool
+
+ # Ensure that our item is a Version or LegacyVersion instance.
+ if not isinstance(item, (LegacyVersion, Version)):
+ item = parse(item)
+
+ # Determine if we're forcing a prerelease or not, if we're not forcing
+ # one for this particular filter call, then we'll use whatever the
+ # SpecifierSet thinks for whether or not we should support prereleases.
+ if prereleases is None:
+ prereleases = self.prereleases
+
+ # We can determine if we're going to allow pre-releases by looking to
+ # see if any of the underlying items supports them. If none of them do
+ # and this item is a pre-release then we do not allow it and we can
+ # short circuit that here.
+ # Note: This means that 1.0.dev1 would not be contained in something
+ # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
+ if not prereleases and item.is_prerelease:
+ return False
+
+ # We simply dispatch to the underlying specs here to make sure that the
+ # given version is contained within all of them.
+ # Note: This use of all() here means that an empty set of specifiers
+ # will always return True, this is an explicit design decision.
+ return all(s.contains(item, prereleases=prereleases) for s in self._specs)
+
+ def filter(
+ self,
+ iterable, # type: Iterable[Union[ParsedVersion, str]]
+ prereleases=None, # type: Optional[bool]
+ ):
+ # type: (...) -> Iterable[Union[ParsedVersion, str]]
+
+ # Determine if we're forcing a prerelease or not, if we're not forcing
+ # one for this particular filter call, then we'll use whatever the
+ # SpecifierSet thinks for whether or not we should support prereleases.
+ if prereleases is None:
+ prereleases = self.prereleases
+
+ # If we have any specifiers, then we want to wrap our iterable in the
+ # filter method for each one, this will act as a logical AND amongst
+ # each specifier.
+ if self._specs:
+ for spec in self._specs:
+ iterable = spec.filter(iterable, prereleases=bool(prereleases))
+ return iterable
+ # If we do not have any specifiers, then we need to have a rough filter
+ # which will filter out any pre-releases, unless there are no final
+ # releases, and which will filter out LegacyVersion in general.
+ else:
+ filtered = [] # type: List[Union[ParsedVersion, str]]
+ found_prereleases = [] # type: List[Union[ParsedVersion, str]]
+
+ for item in iterable:
+ # Ensure that we some kind of Version class for this item.
+ if not isinstance(item, (LegacyVersion, Version)):
+ parsed_version = parse(item)
+ else:
+ parsed_version = item
+
+ # Filter out any item which is parsed as a LegacyVersion
+ if isinstance(parsed_version, LegacyVersion):
+ continue
+
+ # Store any item which is a pre-release for later unless we've
+ # already found a final version or we are accepting prereleases
+ if parsed_version.is_prerelease and not prereleases:
+ if not filtered:
+ found_prereleases.append(item)
+ else:
+ filtered.append(item)
+
+ # If we've found no items except for pre-releases, then we'll go
+ # ahead and use the pre-releases
+ if not filtered and found_prereleases and prereleases is None:
+ return found_prereleases
+
+ return filtered
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/tags.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/tags.py
new file mode 100644
index 0000000000..9064910b8b
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/tags.py
@@ -0,0 +1,751 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+from __future__ import absolute_import
+
+import distutils.util
+
+try:
+ from importlib.machinery import EXTENSION_SUFFIXES
+except ImportError: # pragma: no cover
+ import imp
+
+ EXTENSION_SUFFIXES = [x[0] for x in imp.get_suffixes()]
+ del imp
+import logging
+import os
+import platform
+import re
+import struct
+import sys
+import sysconfig
+import warnings
+
+from ._typing import TYPE_CHECKING, cast
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import (
+ Dict,
+ FrozenSet,
+ IO,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+ )
+
+ PythonVersion = Sequence[int]
+ MacVersion = Tuple[int, int]
+ GlibcVersion = Tuple[int, int]
+
+
+logger = logging.getLogger(__name__)
+
+INTERPRETER_SHORT_NAMES = {
+ "python": "py", # Generic.
+ "cpython": "cp",
+ "pypy": "pp",
+ "ironpython": "ip",
+ "jython": "jy",
+} # type: Dict[str, str]
+
+
+_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
+
+
+class Tag(object):
+ """
+ A representation of the tag triple for a wheel.
+
+ Instances are considered immutable and thus are hashable. Equality checking
+ is also supported.
+ """
+
+ __slots__ = ["_interpreter", "_abi", "_platform"]
+
+ def __init__(self, interpreter, abi, platform):
+ # type: (str, str, str) -> None
+ self._interpreter = interpreter.lower()
+ self._abi = abi.lower()
+ self._platform = platform.lower()
+
+ @property
+ def interpreter(self):
+ # type: () -> str
+ return self._interpreter
+
+ @property
+ def abi(self):
+ # type: () -> str
+ return self._abi
+
+ @property
+ def platform(self):
+ # type: () -> str
+ return self._platform
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ if not isinstance(other, Tag):
+ return NotImplemented
+
+ return (
+ (self.platform == other.platform)
+ and (self.abi == other.abi)
+ and (self.interpreter == other.interpreter)
+ )
+
+ def __hash__(self):
+ # type: () -> int
+ return hash((self._interpreter, self._abi, self._platform))
+
+ def __str__(self):
+ # type: () -> str
+ return "{}-{}-{}".format(self._interpreter, self._abi, self._platform)
+
+ def __repr__(self):
+ # type: () -> str
+ return "<{self} @ {self_id}>".format(self=self, self_id=id(self))
+
+
+def parse_tag(tag):
+ # type: (str) -> FrozenSet[Tag]
+ """
+ Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
+
+ Returning a set is required due to the possibility that the tag is a
+ compressed tag set.
+ """
+ tags = set()
+ interpreters, abis, platforms = tag.split("-")
+ for interpreter in interpreters.split("."):
+ for abi in abis.split("."):
+ for platform_ in platforms.split("."):
+ tags.add(Tag(interpreter, abi, platform_))
+ return frozenset(tags)
+
+
+def _warn_keyword_parameter(func_name, kwargs):
+ # type: (str, Dict[str, bool]) -> bool
+ """
+ Backwards-compatibility with Python 2.7 to allow treating 'warn' as keyword-only.
+ """
+ if not kwargs:
+ return False
+ elif len(kwargs) > 1 or "warn" not in kwargs:
+ kwargs.pop("warn", None)
+ arg = next(iter(kwargs.keys()))
+ raise TypeError(
+ "{}() got an unexpected keyword argument {!r}".format(func_name, arg)
+ )
+ return kwargs["warn"]
+
+
+def _get_config_var(name, warn=False):
+ # type: (str, bool) -> Union[int, str, None]
+ value = sysconfig.get_config_var(name)
+ if value is None and warn:
+ logger.debug(
+ "Config variable '%s' is unset, Python ABI tag may be incorrect", name
+ )
+ return value
+
+
+def _normalize_string(string):
+ # type: (str) -> str
+ return string.replace(".", "_").replace("-", "_")
+
+
+def _abi3_applies(python_version):
+ # type: (PythonVersion) -> bool
+ """
+ Determine if the Python version supports abi3.
+
+ PEP 384 was first implemented in Python 3.2.
+ """
+ return len(python_version) > 1 and tuple(python_version) >= (3, 2)
+
+
+def _cpython_abis(py_version, warn=False):
+ # type: (PythonVersion, bool) -> List[str]
+ py_version = tuple(py_version) # To allow for version comparison.
+ abis = []
+ version = _version_nodot(py_version[:2])
+ debug = pymalloc = ucs4 = ""
+ with_debug = _get_config_var("Py_DEBUG", warn)
+ has_refcount = hasattr(sys, "gettotalrefcount")
+ # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
+ # extension modules is the best option.
+ # https://github.com/pypa/pip/issues/3383#issuecomment-173267692
+ has_ext = "_d.pyd" in EXTENSION_SUFFIXES
+ if with_debug or (with_debug is None and (has_refcount or has_ext)):
+ debug = "d"
+ if py_version < (3, 8):
+ with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
+ if with_pymalloc or with_pymalloc is None:
+ pymalloc = "m"
+ if py_version < (3, 3):
+ unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
+ if unicode_size == 4 or (
+ unicode_size is None and sys.maxunicode == 0x10FFFF
+ ):
+ ucs4 = "u"
+ elif debug:
+ # Debug builds can also load "normal" extension modules.
+ # We can also assume no UCS-4 or pymalloc requirement.
+ abis.append("cp{version}".format(version=version))
+ abis.insert(
+ 0,
+ "cp{version}{debug}{pymalloc}{ucs4}".format(
+ version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
+ ),
+ )
+ return abis
+
+
+def cpython_tags(
+ python_version=None, # type: Optional[PythonVersion]
+ abis=None, # type: Optional[Iterable[str]]
+ platforms=None, # type: Optional[Iterable[str]]
+ **kwargs # type: bool
+):
+ # type: (...) -> Iterator[Tag]
+ """
+ Yields the tags for a CPython interpreter.
+
+ The tags consist of:
+ - cp<python_version>-<abi>-<platform>
+ - cp<python_version>-abi3-<platform>
+ - cp<python_version>-none-<platform>
+ - cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
+
+ If python_version only specifies a major version then user-provided ABIs and
+ the 'none' ABItag will be used.
+
+ If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
+ their normal position and not at the beginning.
+ """
+ warn = _warn_keyword_parameter("cpython_tags", kwargs)
+ if not python_version:
+ python_version = sys.version_info[:2]
+
+ interpreter = "cp{}".format(_version_nodot(python_version[:2]))
+
+ if abis is None:
+ if len(python_version) > 1:
+ abis = _cpython_abis(python_version, warn)
+ else:
+ abis = []
+ abis = list(abis)
+ # 'abi3' and 'none' are explicitly handled later.
+ for explicit_abi in ("abi3", "none"):
+ try:
+ abis.remove(explicit_abi)
+ except ValueError:
+ pass
+
+ platforms = list(platforms or _platform_tags())
+ for abi in abis:
+ for platform_ in platforms:
+ yield Tag(interpreter, abi, platform_)
+ if _abi3_applies(python_version):
+ for tag in (Tag(interpreter, "abi3", platform_) for platform_ in platforms):
+ yield tag
+ for tag in (Tag(interpreter, "none", platform_) for platform_ in platforms):
+ yield tag
+
+ if _abi3_applies(python_version):
+ for minor_version in range(python_version[1] - 1, 1, -1):
+ for platform_ in platforms:
+ interpreter = "cp{version}".format(
+ version=_version_nodot((python_version[0], minor_version))
+ )
+ yield Tag(interpreter, "abi3", platform_)
+
+
+def _generic_abi():
+ # type: () -> Iterator[str]
+ abi = sysconfig.get_config_var("SOABI")
+ if abi:
+ yield _normalize_string(abi)
+
+
+def generic_tags(
+ interpreter=None, # type: Optional[str]
+ abis=None, # type: Optional[Iterable[str]]
+ platforms=None, # type: Optional[Iterable[str]]
+ **kwargs # type: bool
+):
+ # type: (...) -> Iterator[Tag]
+ """
+ Yields the tags for a generic interpreter.
+
+ The tags consist of:
+ - <interpreter>-<abi>-<platform>
+
+ The "none" ABI will be added if it was not explicitly provided.
+ """
+ warn = _warn_keyword_parameter("generic_tags", kwargs)
+ if not interpreter:
+ interp_name = interpreter_name()
+ interp_version = interpreter_version(warn=warn)
+ interpreter = "".join([interp_name, interp_version])
+ if abis is None:
+ abis = _generic_abi()
+ platforms = list(platforms or _platform_tags())
+ abis = list(abis)
+ if "none" not in abis:
+ abis.append("none")
+ for abi in abis:
+ for platform_ in platforms:
+ yield Tag(interpreter, abi, platform_)
+
+
+def _py_interpreter_range(py_version):
+ # type: (PythonVersion) -> Iterator[str]
+ """
+ Yields Python versions in descending order.
+
+ After the latest version, the major-only version will be yielded, and then
+ all previous versions of that major version.
+ """
+ if len(py_version) > 1:
+ yield "py{version}".format(version=_version_nodot(py_version[:2]))
+ yield "py{major}".format(major=py_version[0])
+ if len(py_version) > 1:
+ for minor in range(py_version[1] - 1, -1, -1):
+ yield "py{version}".format(version=_version_nodot((py_version[0], minor)))
+
+
+def compatible_tags(
+ python_version=None, # type: Optional[PythonVersion]
+ interpreter=None, # type: Optional[str]
+ platforms=None, # type: Optional[Iterable[str]]
+):
+ # type: (...) -> Iterator[Tag]
+ """
+ Yields the sequence of tags that are compatible with a specific version of Python.
+
+ The tags consist of:
+ - py*-none-<platform>
+ - <interpreter>-none-any # ... if `interpreter` is provided.
+ - py*-none-any
+ """
+ if not python_version:
+ python_version = sys.version_info[:2]
+ platforms = list(platforms or _platform_tags())
+ for version in _py_interpreter_range(python_version):
+ for platform_ in platforms:
+ yield Tag(version, "none", platform_)
+ if interpreter:
+ yield Tag(interpreter, "none", "any")
+ for version in _py_interpreter_range(python_version):
+ yield Tag(version, "none", "any")
+
+
+def _mac_arch(arch, is_32bit=_32_BIT_INTERPRETER):
+ # type: (str, bool) -> str
+ if not is_32bit:
+ return arch
+
+ if arch.startswith("ppc"):
+ return "ppc"
+
+ return "i386"
+
+
+def _mac_binary_formats(version, cpu_arch):
+ # type: (MacVersion, str) -> List[str]
+ formats = [cpu_arch]
+ if cpu_arch == "x86_64":
+ if version < (10, 4):
+ return []
+ formats.extend(["intel", "fat64", "fat32"])
+
+ elif cpu_arch == "i386":
+ if version < (10, 4):
+ return []
+ formats.extend(["intel", "fat32", "fat"])
+
+ elif cpu_arch == "ppc64":
+ # TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
+ if version > (10, 5) or version < (10, 4):
+ return []
+ formats.append("fat64")
+
+ elif cpu_arch == "ppc":
+ if version > (10, 6):
+ return []
+ formats.extend(["fat32", "fat"])
+
+ formats.append("universal")
+ return formats
+
+
+def mac_platforms(version=None, arch=None):
+ # type: (Optional[MacVersion], Optional[str]) -> Iterator[str]
+ """
+ Yields the platform tags for a macOS system.
+
+ The `version` parameter is a two-item tuple specifying the macOS version to
+ generate platform tags for. The `arch` parameter is the CPU architecture to
+ generate platform tags for. Both parameters default to the appropriate value
+ for the current system.
+ """
+ version_str, _, cpu_arch = platform.mac_ver() # type: ignore
+ if version is None:
+ version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
+ else:
+ version = version
+ if arch is None:
+ arch = _mac_arch(cpu_arch)
+ else:
+ arch = arch
+ for minor_version in range(version[1], -1, -1):
+ compat_version = version[0], minor_version
+ binary_formats = _mac_binary_formats(compat_version, arch)
+ for binary_format in binary_formats:
+ yield "macosx_{major}_{minor}_{binary_format}".format(
+ major=compat_version[0],
+ minor=compat_version[1],
+ binary_format=binary_format,
+ )
+
+
+# From PEP 513.
+def _is_manylinux_compatible(name, glibc_version):
+ # type: (str, GlibcVersion) -> bool
+ # Check for presence of _manylinux module.
+ try:
+ import _manylinux # noqa
+
+ return bool(getattr(_manylinux, name + "_compatible"))
+ except (ImportError, AttributeError):
+ # Fall through to heuristic check below.
+ pass
+
+ return _have_compatible_glibc(*glibc_version)
+
+
+def _glibc_version_string():
+ # type: () -> Optional[str]
+ # Returns glibc version string, or None if not using glibc.
+ return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
+
+
+def _glibc_version_string_confstr():
+ # type: () -> Optional[str]
+ """
+ Primary implementation of glibc_version_string using os.confstr.
+ """
+ # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
+ # to be broken or missing. This strategy is used in the standard library
+ # platform module.
+ # https://github.com/python/cpython/blob/fcf1d003bf4f0100c9d0921ff3d70e1127ca1b71/Lib/platform.py#L175-L183
+ try:
+ # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
+ version_string = os.confstr( # type: ignore[attr-defined] # noqa: F821
+ "CS_GNU_LIBC_VERSION"
+ )
+ assert version_string is not None
+ _, version = version_string.split() # type: Tuple[str, str]
+ except (AssertionError, AttributeError, OSError, ValueError):
+ # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
+ return None
+ return version
+
+
+def _glibc_version_string_ctypes():
+ # type: () -> Optional[str]
+ """
+ Fallback implementation of glibc_version_string using ctypes.
+ """
+ try:
+ import ctypes
+ except ImportError:
+ return None
+
+ # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
+ # manpage says, "If filename is NULL, then the returned handle is for the
+ # main program". This way we can let the linker do the work to figure out
+ # which libc our process is actually using.
+ #
+ # Note: typeshed is wrong here so we are ignoring this line.
+ process_namespace = ctypes.CDLL(None) # type: ignore
+ try:
+ gnu_get_libc_version = process_namespace.gnu_get_libc_version
+ except AttributeError:
+ # Symbol doesn't exist -> therefore, we are not linked to
+ # glibc.
+ return None
+
+ # Call gnu_get_libc_version, which returns a string like "2.5"
+ gnu_get_libc_version.restype = ctypes.c_char_p
+ version_str = gnu_get_libc_version() # type: str
+ # py2 / py3 compatibility:
+ if not isinstance(version_str, str):
+ version_str = version_str.decode("ascii")
+
+ return version_str
+
+
+# Separated out from have_compatible_glibc for easier unit testing.
+def _check_glibc_version(version_str, required_major, minimum_minor):
+ # type: (str, int, int) -> bool
+ # Parse string and check against requested version.
+ #
+ # We use a regexp instead of str.split because we want to discard any
+ # random junk that might come after the minor version -- this might happen
+ # in patched/forked versions of glibc (e.g. Linaro's version of glibc
+ # uses version strings like "2.20-2014.11"). See gh-3588.
+ m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
+ if not m:
+ warnings.warn(
+ "Expected glibc version with 2 components major.minor,"
+ " got: %s" % version_str,
+ RuntimeWarning,
+ )
+ return False
+ return (
+ int(m.group("major")) == required_major
+ and int(m.group("minor")) >= minimum_minor
+ )
+
+
+def _have_compatible_glibc(required_major, minimum_minor):
+ # type: (int, int) -> bool
+ version_str = _glibc_version_string()
+ if version_str is None:
+ return False
+ return _check_glibc_version(version_str, required_major, minimum_minor)
+
+
+# Python does not provide platform information at sufficient granularity to
+# identify the architecture of the running executable in some cases, so we
+# determine it dynamically by reading the information from the running
+# process. This only applies on Linux, which uses the ELF format.
+class _ELFFileHeader(object):
+ # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
+ class _InvalidELFFileHeader(ValueError):
+ """
+ An invalid ELF file header was found.
+ """
+
+ ELF_MAGIC_NUMBER = 0x7F454C46
+ ELFCLASS32 = 1
+ ELFCLASS64 = 2
+ ELFDATA2LSB = 1
+ ELFDATA2MSB = 2
+ EM_386 = 3
+ EM_S390 = 22
+ EM_ARM = 40
+ EM_X86_64 = 62
+ EF_ARM_ABIMASK = 0xFF000000
+ EF_ARM_ABI_VER5 = 0x05000000
+ EF_ARM_ABI_FLOAT_HARD = 0x00000400
+
+ def __init__(self, file):
+ # type: (IO[bytes]) -> None
+ def unpack(fmt):
+ # type: (str) -> int
+ try:
+ (result,) = struct.unpack(
+ fmt, file.read(struct.calcsize(fmt))
+ ) # type: (int, )
+ except struct.error:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ return result
+
+ self.e_ident_magic = unpack(">I")
+ if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_class = unpack("B")
+ if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_data = unpack("B")
+ if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_version = unpack("B")
+ self.e_ident_osabi = unpack("B")
+ self.e_ident_abiversion = unpack("B")
+ self.e_ident_pad = file.read(7)
+ format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
+ format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
+ format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
+ format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
+ self.e_type = unpack(format_h)
+ self.e_machine = unpack(format_h)
+ self.e_version = unpack(format_i)
+ self.e_entry = unpack(format_p)
+ self.e_phoff = unpack(format_p)
+ self.e_shoff = unpack(format_p)
+ self.e_flags = unpack(format_i)
+ self.e_ehsize = unpack(format_h)
+ self.e_phentsize = unpack(format_h)
+ self.e_phnum = unpack(format_h)
+ self.e_shentsize = unpack(format_h)
+ self.e_shnum = unpack(format_h)
+ self.e_shstrndx = unpack(format_h)
+
+
+def _get_elf_header():
+ # type: () -> Optional[_ELFFileHeader]
+ try:
+ with open(sys.executable, "rb") as f:
+ elf_header = _ELFFileHeader(f)
+ except (IOError, OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
+ return None
+ return elf_header
+
+
+def _is_linux_armhf():
+ # type: () -> bool
+ # hard-float ABI can be detected from the ELF header of the running
+ # process
+ # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
+ elf_header = _get_elf_header()
+ if elf_header is None:
+ return False
+ result = elf_header.e_ident_class == elf_header.ELFCLASS32
+ result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
+ result &= elf_header.e_machine == elf_header.EM_ARM
+ result &= (
+ elf_header.e_flags & elf_header.EF_ARM_ABIMASK
+ ) == elf_header.EF_ARM_ABI_VER5
+ result &= (
+ elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
+ ) == elf_header.EF_ARM_ABI_FLOAT_HARD
+ return result
+
+
+def _is_linux_i686():
+ # type: () -> bool
+ elf_header = _get_elf_header()
+ if elf_header is None:
+ return False
+ result = elf_header.e_ident_class == elf_header.ELFCLASS32
+ result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
+ result &= elf_header.e_machine == elf_header.EM_386
+ return result
+
+
+def _have_compatible_manylinux_abi(arch):
+ # type: (str) -> bool
+ if arch == "armv7l":
+ return _is_linux_armhf()
+ if arch == "i686":
+ return _is_linux_i686()
+ return True
+
+
+def _linux_platforms(is_32bit=_32_BIT_INTERPRETER):
+ # type: (bool) -> Iterator[str]
+ linux = _normalize_string(distutils.util.get_platform())
+ if is_32bit:
+ if linux == "linux_x86_64":
+ linux = "linux_i686"
+ elif linux == "linux_aarch64":
+ linux = "linux_armv7l"
+ manylinux_support = []
+ _, arch = linux.split("_", 1)
+ if _have_compatible_manylinux_abi(arch):
+ if arch in {"x86_64", "i686", "aarch64", "armv7l", "ppc64", "ppc64le", "s390x"}:
+ manylinux_support.append(
+ ("manylinux2014", (2, 17))
+ ) # CentOS 7 w/ glibc 2.17 (PEP 599)
+ if arch in {"x86_64", "i686"}:
+ manylinux_support.append(
+ ("manylinux2010", (2, 12))
+ ) # CentOS 6 w/ glibc 2.12 (PEP 571)
+ manylinux_support.append(
+ ("manylinux1", (2, 5))
+ ) # CentOS 5 w/ glibc 2.5 (PEP 513)
+ manylinux_support_iter = iter(manylinux_support)
+ for name, glibc_version in manylinux_support_iter:
+ if _is_manylinux_compatible(name, glibc_version):
+ yield linux.replace("linux", name)
+ break
+ # Support for a later manylinux implies support for an earlier version.
+ for name, _ in manylinux_support_iter:
+ yield linux.replace("linux", name)
+ yield linux
+
+
+def _generic_platforms():
+ # type: () -> Iterator[str]
+ yield _normalize_string(distutils.util.get_platform())
+
+
+def _platform_tags():
+ # type: () -> Iterator[str]
+ """
+ Provides the platform tags for this installation.
+ """
+ if platform.system() == "Darwin":
+ return mac_platforms()
+ elif platform.system() == "Linux":
+ return _linux_platforms()
+ else:
+ return _generic_platforms()
+
+
+def interpreter_name():
+ # type: () -> str
+ """
+ Returns the name of the running interpreter.
+ """
+ try:
+ name = sys.implementation.name # type: ignore
+ except AttributeError: # pragma: no cover
+ # Python 2.7 compatibility.
+ name = platform.python_implementation().lower()
+ return INTERPRETER_SHORT_NAMES.get(name) or name
+
+
+def interpreter_version(**kwargs):
+ # type: (bool) -> str
+ """
+ Returns the version of the running interpreter.
+ """
+ warn = _warn_keyword_parameter("interpreter_version", kwargs)
+ version = _get_config_var("py_version_nodot", warn=warn)
+ if version:
+ version = str(version)
+ else:
+ version = _version_nodot(sys.version_info[:2])
+ return version
+
+
+def _version_nodot(version):
+ # type: (PythonVersion) -> str
+ if any(v >= 10 for v in version):
+ sep = "_"
+ else:
+ sep = ""
+ return sep.join(map(str, version))
+
+
+def sys_tags(**kwargs):
+ # type: (bool) -> Iterator[Tag]
+ """
+ Returns the sequence of tag triples for the running interpreter.
+
+ The order of the sequence corresponds to priority order for the
+ interpreter, from most to least important.
+ """
+ warn = _warn_keyword_parameter("sys_tags", kwargs)
+
+ interp_name = interpreter_name()
+ if interp_name == "cp":
+ for tag in cpython_tags(warn=warn):
+ yield tag
+ else:
+ for tag in generic_tags():
+ yield tag
+
+ for tag in compatible_tags():
+ yield tag
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/utils.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/utils.py
new file mode 100644
index 0000000000..19579c1a0f
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/utils.py
@@ -0,0 +1,65 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import re
+
+from ._typing import TYPE_CHECKING, cast
+from .version import InvalidVersion, Version
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import NewType, Union
+
+ NormalizedName = NewType("NormalizedName", str)
+
+_canonicalize_regex = re.compile(r"[-_.]+")
+
+
+def canonicalize_name(name):
+ # type: (str) -> NormalizedName
+ # This is taken from PEP 503.
+ value = _canonicalize_regex.sub("-", name).lower()
+ return cast("NormalizedName", value)
+
+
+def canonicalize_version(_version):
+ # type: (str) -> Union[Version, str]
+ """
+ This is very similar to Version.__str__, but has one subtle difference
+ with the way it handles the release segment.
+ """
+
+ try:
+ version = Version(_version)
+ except InvalidVersion:
+ # Legacy versions cannot be normalized
+ return _version
+
+ parts = []
+
+ # Epoch
+ if version.epoch != 0:
+ parts.append("{0}!".format(version.epoch))
+
+ # Release segment
+ # NB: This strips trailing '.0's to normalize
+ parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in version.release)))
+
+ # Pre-release
+ if version.pre is not None:
+ parts.append("".join(str(x) for x in version.pre))
+
+ # Post-release
+ if version.post is not None:
+ parts.append(".post{0}".format(version.post))
+
+ # Development release
+ if version.dev is not None:
+ parts.append(".dev{0}".format(version.dev))
+
+ # Local version segment
+ if version.local is not None:
+ parts.append("+{0}".format(version.local))
+
+ return "".join(parts)
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/packaging/version.py b/third_party/python/setuptools/pkg_resources/_vendor/packaging/version.py
new file mode 100644
index 0000000000..00371e86a8
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/packaging/version.py
@@ -0,0 +1,535 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import collections
+import itertools
+import re
+
+from ._structures import Infinity, NegativeInfinity
+from ._typing import TYPE_CHECKING
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
+
+ from ._structures import InfinityType, NegativeInfinityType
+
+ InfiniteTypes = Union[InfinityType, NegativeInfinityType]
+ PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
+ SubLocalType = Union[InfiniteTypes, int, str]
+ LocalType = Union[
+ NegativeInfinityType,
+ Tuple[
+ Union[
+ SubLocalType,
+ Tuple[SubLocalType, str],
+ Tuple[NegativeInfinityType, SubLocalType],
+ ],
+ ...,
+ ],
+ ]
+ CmpKey = Tuple[
+ int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
+ ]
+ LegacyCmpKey = Tuple[int, Tuple[str, ...]]
+ VersionComparisonMethod = Callable[
+ [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
+ ]
+
+__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
+
+
+_Version = collections.namedtuple(
+ "_Version", ["epoch", "release", "dev", "pre", "post", "local"]
+)
+
+
+def parse(version):
+ # type: (str) -> Union[LegacyVersion, Version]
+ """
+ Parse the given version string and return either a :class:`Version` object
+ or a :class:`LegacyVersion` object depending on if the given version is
+ a valid PEP 440 version or a legacy version.
+ """
+ try:
+ return Version(version)
+ except InvalidVersion:
+ return LegacyVersion(version)
+
+
+class InvalidVersion(ValueError):
+ """
+ An invalid version was found, users should refer to PEP 440.
+ """
+
+
+class _BaseVersion(object):
+ _key = None # type: Union[CmpKey, LegacyCmpKey]
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(self._key)
+
+ def __lt__(self, other):
+ # type: (_BaseVersion) -> bool
+ return self._compare(other, lambda s, o: s < o)
+
+ def __le__(self, other):
+ # type: (_BaseVersion) -> bool
+ return self._compare(other, lambda s, o: s <= o)
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ return self._compare(other, lambda s, o: s == o)
+
+ def __ge__(self, other):
+ # type: (_BaseVersion) -> bool
+ return self._compare(other, lambda s, o: s >= o)
+
+ def __gt__(self, other):
+ # type: (_BaseVersion) -> bool
+ return self._compare(other, lambda s, o: s > o)
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ return self._compare(other, lambda s, o: s != o)
+
+ def _compare(self, other, method):
+ # type: (object, VersionComparisonMethod) -> Union[bool, NotImplemented]
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
+
+ return method(self._key, other._key)
+
+
+class LegacyVersion(_BaseVersion):
+ def __init__(self, version):
+ # type: (str) -> None
+ self._version = str(version)
+ self._key = _legacy_cmpkey(self._version)
+
+ def __str__(self):
+ # type: () -> str
+ return self._version
+
+ def __repr__(self):
+ # type: () -> str
+ return "<LegacyVersion({0})>".format(repr(str(self)))
+
+ @property
+ def public(self):
+ # type: () -> str
+ return self._version
+
+ @property
+ def base_version(self):
+ # type: () -> str
+ return self._version
+
+ @property
+ def epoch(self):
+ # type: () -> int
+ return -1
+
+ @property
+ def release(self):
+ # type: () -> None
+ return None
+
+ @property
+ def pre(self):
+ # type: () -> None
+ return None
+
+ @property
+ def post(self):
+ # type: () -> None
+ return None
+
+ @property
+ def dev(self):
+ # type: () -> None
+ return None
+
+ @property
+ def local(self):
+ # type: () -> None
+ return None
+
+ @property
+ def is_prerelease(self):
+ # type: () -> bool
+ return False
+
+ @property
+ def is_postrelease(self):
+ # type: () -> bool
+ return False
+
+ @property
+ def is_devrelease(self):
+ # type: () -> bool
+ return False
+
+
+_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
+
+_legacy_version_replacement_map = {
+ "pre": "c",
+ "preview": "c",
+ "-": "final-",
+ "rc": "c",
+ "dev": "@",
+}
+
+
+def _parse_version_parts(s):
+ # type: (str) -> Iterator[str]
+ for part in _legacy_version_component_re.split(s):
+ part = _legacy_version_replacement_map.get(part, part)
+
+ if not part or part == ".":
+ continue
+
+ if part[:1] in "0123456789":
+ # pad for numeric comparison
+ yield part.zfill(8)
+ else:
+ yield "*" + part
+
+ # ensure that alpha/beta/candidate are before final
+ yield "*final"
+
+
+def _legacy_cmpkey(version):
+ # type: (str) -> LegacyCmpKey
+
+ # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
+ # greater than or equal to 0. This will effectively put the LegacyVersion,
+ # which uses the defacto standard originally implemented by setuptools,
+ # as before all PEP 440 versions.
+ epoch = -1
+
+ # This scheme is taken from pkg_resources.parse_version setuptools prior to
+ # it's adoption of the packaging library.
+ parts = [] # type: List[str]
+ for part in _parse_version_parts(version.lower()):
+ if part.startswith("*"):
+ # remove "-" before a prerelease tag
+ if part < "*final":
+ while parts and parts[-1] == "*final-":
+ parts.pop()
+
+ # remove trailing zeros from each series of numeric parts
+ while parts and parts[-1] == "00000000":
+ parts.pop()
+
+ parts.append(part)
+
+ return epoch, tuple(parts)
+
+
+# Deliberately not anchored to the start and end of the string, to make it
+# easier for 3rd party code to reuse
+VERSION_PATTERN = r"""
+ v?
+ (?:
+ (?:(?P<epoch>[0-9]+)!)? # epoch
+ (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
+ (?P<pre> # pre-release
+ [-_\.]?
+ (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
+ [-_\.]?
+ (?P<pre_n>[0-9]+)?
+ )?
+ (?P<post> # post release
+ (?:-(?P<post_n1>[0-9]+))
+ |
+ (?:
+ [-_\.]?
+ (?P<post_l>post|rev|r)
+ [-_\.]?
+ (?P<post_n2>[0-9]+)?
+ )
+ )?
+ (?P<dev> # dev release
+ [-_\.]?
+ (?P<dev_l>dev)
+ [-_\.]?
+ (?P<dev_n>[0-9]+)?
+ )?
+ )
+ (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
+"""
+
+
+class Version(_BaseVersion):
+
+ _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ def __init__(self, version):
+ # type: (str) -> None
+
+ # Validate the version and parse it into pieces
+ match = self._regex.search(version)
+ if not match:
+ raise InvalidVersion("Invalid version: '{0}'".format(version))
+
+ # Store the parsed out pieces of the version
+ self._version = _Version(
+ epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+ release=tuple(int(i) for i in match.group("release").split(".")),
+ pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+ post=_parse_letter_version(
+ match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+ ),
+ dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+ local=_parse_local_version(match.group("local")),
+ )
+
+ # Generate a key which will be used for sorting
+ self._key = _cmpkey(
+ self._version.epoch,
+ self._version.release,
+ self._version.pre,
+ self._version.post,
+ self._version.dev,
+ self._version.local,
+ )
+
+ def __repr__(self):
+ # type: () -> str
+ return "<Version({0})>".format(repr(str(self)))
+
+ def __str__(self):
+ # type: () -> str
+ parts = []
+
+ # Epoch
+ if self.epoch != 0:
+ parts.append("{0}!".format(self.epoch))
+
+ # Release segment
+ parts.append(".".join(str(x) for x in self.release))
+
+ # Pre-release
+ if self.pre is not None:
+ parts.append("".join(str(x) for x in self.pre))
+
+ # Post-release
+ if self.post is not None:
+ parts.append(".post{0}".format(self.post))
+
+ # Development release
+ if self.dev is not None:
+ parts.append(".dev{0}".format(self.dev))
+
+ # Local version segment
+ if self.local is not None:
+ parts.append("+{0}".format(self.local))
+
+ return "".join(parts)
+
+ @property
+ def epoch(self):
+ # type: () -> int
+ _epoch = self._version.epoch # type: int
+ return _epoch
+
+ @property
+ def release(self):
+ # type: () -> Tuple[int, ...]
+ _release = self._version.release # type: Tuple[int, ...]
+ return _release
+
+ @property
+ def pre(self):
+ # type: () -> Optional[Tuple[str, int]]
+ _pre = self._version.pre # type: Optional[Tuple[str, int]]
+ return _pre
+
+ @property
+ def post(self):
+ # type: () -> Optional[Tuple[str, int]]
+ return self._version.post[1] if self._version.post else None
+
+ @property
+ def dev(self):
+ # type: () -> Optional[Tuple[str, int]]
+ return self._version.dev[1] if self._version.dev else None
+
+ @property
+ def local(self):
+ # type: () -> Optional[str]
+ if self._version.local:
+ return ".".join(str(x) for x in self._version.local)
+ else:
+ return None
+
+ @property
+ def public(self):
+ # type: () -> str
+ return str(self).split("+", 1)[0]
+
+ @property
+ def base_version(self):
+ # type: () -> str
+ parts = []
+
+ # Epoch
+ if self.epoch != 0:
+ parts.append("{0}!".format(self.epoch))
+
+ # Release segment
+ parts.append(".".join(str(x) for x in self.release))
+
+ return "".join(parts)
+
+ @property
+ def is_prerelease(self):
+ # type: () -> bool
+ return self.dev is not None or self.pre is not None
+
+ @property
+ def is_postrelease(self):
+ # type: () -> bool
+ return self.post is not None
+
+ @property
+ def is_devrelease(self):
+ # type: () -> bool
+ return self.dev is not None
+
+ @property
+ def major(self):
+ # type: () -> int
+ return self.release[0] if len(self.release) >= 1 else 0
+
+ @property
+ def minor(self):
+ # type: () -> int
+ return self.release[1] if len(self.release) >= 2 else 0
+
+ @property
+ def micro(self):
+ # type: () -> int
+ return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+ letter, # type: str
+ number, # type: Union[str, bytes, SupportsInt]
+):
+ # type: (...) -> Optional[Tuple[str, int]]
+
+ if letter:
+ # We consider there to be an implicit 0 in a pre-release if there is
+ # not a numeral associated with it.
+ if number is None:
+ number = 0
+
+ # We normalize any letters to their lower case form
+ letter = letter.lower()
+
+ # We consider some words to be alternate spellings of other words and
+ # in those cases we want to normalize the spellings to our preferred
+ # spelling.
+ if letter == "alpha":
+ letter = "a"
+ elif letter == "beta":
+ letter = "b"
+ elif letter in ["c", "pre", "preview"]:
+ letter = "rc"
+ elif letter in ["rev", "r"]:
+ letter = "post"
+
+ return letter, int(number)
+ if not letter and number:
+ # We assume if we are given a number, but we are not given a letter
+ # then this is using the implicit post release syntax (e.g. 1.0-1)
+ letter = "post"
+
+ return letter, int(number)
+
+ return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+ # type: (str) -> Optional[LocalType]
+ """
+ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+ """
+ if local is not None:
+ return tuple(
+ part.lower() if not part.isdigit() else int(part)
+ for part in _local_version_separators.split(local)
+ )
+ return None
+
+
+def _cmpkey(
+ epoch, # type: int
+ release, # type: Tuple[int, ...]
+ pre, # type: Optional[Tuple[str, int]]
+ post, # type: Optional[Tuple[str, int]]
+ dev, # type: Optional[Tuple[str, int]]
+ local, # type: Optional[Tuple[SubLocalType]]
+):
+ # type: (...) -> CmpKey
+
+ # When we compare a release version, we want to compare it with all of the
+ # trailing zeros removed. So we'll use a reverse the list, drop all the now
+ # leading zeros until we come to something non zero, then take the rest
+ # re-reverse it back into the correct order and make it a tuple and use
+ # that for our sorting key.
+ _release = tuple(
+ reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+ )
+
+ # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+ # We'll do this by abusing the pre segment, but we _only_ want to do this
+ # if there is not a pre or a post segment. If we have one of those then
+ # the normal sorting rules will handle this case correctly.
+ if pre is None and post is None and dev is not None:
+ _pre = NegativeInfinity # type: PrePostDevType
+ # Versions without a pre-release (except as noted above) should sort after
+ # those with one.
+ elif pre is None:
+ _pre = Infinity
+ else:
+ _pre = pre
+
+ # Versions without a post segment should sort before those with one.
+ if post is None:
+ _post = NegativeInfinity # type: PrePostDevType
+
+ else:
+ _post = post
+
+ # Versions without a development segment should sort after those with one.
+ if dev is None:
+ _dev = Infinity # type: PrePostDevType
+
+ else:
+ _dev = dev
+
+ if local is None:
+ # Versions without a local segment should sort before those with one.
+ _local = NegativeInfinity # type: LocalType
+ else:
+ # Versions with a local segment need that segment parsed to implement
+ # the sorting rules in PEP440.
+ # - Alpha numeric segments sort before numeric segments
+ # - Alpha numeric segments sort lexicographically
+ # - Numeric segments sort numerically
+ # - Shorter versions sort before longer versions when the prefixes
+ # match exactly
+ _local = tuple(
+ (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+ )
+
+ return epoch, _release, _pre, _post, _dev, _local
diff --git a/third_party/python/setuptools/pkg_resources/_vendor/pyparsing.py b/third_party/python/setuptools/pkg_resources/_vendor/pyparsing.py
new file mode 100644
index 0000000000..cf75e1e5fc
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/_vendor/pyparsing.py
@@ -0,0 +1,5742 @@
+# module pyparsing.py
+#
+# Copyright (c) 2003-2018 Paul T. McGuire
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__doc__ = \
+"""
+pyparsing module - Classes and methods to define and execute parsing grammars
+=============================================================================
+
+The pyparsing module is an alternative approach to creating and executing simple grammars,
+vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
+don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
+provides a library of classes that you use to construct the grammar directly in Python.
+
+Here is a program to parse "Hello, World!" (or any greeting of the form
+C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements
+(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
+L{Literal} expressions)::
+
+ from pyparsing import Word, alphas
+
+ # define grammar of a greeting
+ greet = Word(alphas) + "," + Word(alphas) + "!"
+
+ hello = "Hello, World!"
+ print (hello, "->", greet.parseString(hello))
+
+The program outputs the following::
+
+ Hello, World! -> ['Hello', ',', 'World', '!']
+
+The Python representation of the grammar is quite readable, owing to the self-explanatory
+class names, and the use of '+', '|' and '^' operators.
+
+The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
+object with named attributes.
+
+The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
+ - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
+ - quoted strings
+ - embedded comments
+
+
+Getting Started -
+-----------------
+Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing
+classes inherit from. Use the docstrings for examples of how to:
+ - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes
+ - construct character word-group expressions using the L{Word} class
+ - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes
+ - use L{'+'<And>}, L{'|'<MatchFirst>}, L{'^'<Or>}, and L{'&'<Each>} operators to combine simple expressions into more complex ones
+ - associate names with your parsed results using L{ParserElement.setResultsName}
+ - find some helpful expression short-cuts like L{delimitedList} and L{oneOf}
+ - find more useful common expressions in the L{pyparsing_common} namespace class
+"""
+
+__version__ = "2.2.1"
+__versionTime__ = "18 Sep 2018 00:49 UTC"
+__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
+
+import string
+from weakref import ref as wkref
+import copy
+import sys
+import warnings
+import re
+import sre_constants
+import collections
+import pprint
+import traceback
+import types
+from datetime import datetime
+
+try:
+ from _thread import RLock
+except ImportError:
+ from threading import RLock
+
+try:
+ # Python 3
+ from collections.abc import Iterable
+ from collections.abc import MutableMapping
+except ImportError:
+ # Python 2.7
+ from collections import Iterable
+ from collections import MutableMapping
+
+try:
+ from collections import OrderedDict as _OrderedDict
+except ImportError:
+ try:
+ from ordereddict import OrderedDict as _OrderedDict
+ except ImportError:
+ _OrderedDict = None
+
+#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
+
+__all__ = [
+'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
+'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
+'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
+'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
+'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
+'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
+'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
+'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
+'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
+'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
+'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
+'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
+'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
+'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
+'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
+'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
+'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
+'CloseMatch', 'tokenMap', 'pyparsing_common',
+]
+
+system_version = tuple(sys.version_info)[:3]
+PY_3 = system_version[0] == 3
+if PY_3:
+ _MAX_INT = sys.maxsize
+ basestring = str
+ unichr = chr
+ _ustr = str
+
+ # build list of single arg builtins, that can be used as parse actions
+ singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
+
+else:
+ _MAX_INT = sys.maxint
+ range = xrange
+
+ def _ustr(obj):
+ """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
+ str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
+ then < returns the unicode object | encodes it with the default encoding | ... >.
+ """
+ if isinstance(obj,unicode):
+ return obj
+
+ try:
+ # If this works, then _ustr(obj) has the same behaviour as str(obj), so
+ # it won't break any existing code.
+ return str(obj)
+
+ except UnicodeEncodeError:
+ # Else encode it
+ ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
+ xmlcharref = Regex(r'&#\d+;')
+ xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
+ return xmlcharref.transformString(ret)
+
+ # build list of single arg builtins, tolerant of Python version, that can be used as parse actions
+ singleArgBuiltins = []
+ import __builtin__
+ for fname in "sum len sorted reversed list tuple set any all min max".split():
+ try:
+ singleArgBuiltins.append(getattr(__builtin__,fname))
+ except AttributeError:
+ continue
+
+_generatorType = type((y for y in range(1)))
+
+def _xml_escape(data):
+ """Escape &, <, >, ", ', etc. in a string of data."""
+
+ # ampersand must be replaced first
+ from_symbols = '&><"\''
+ to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
+ for from_,to_ in zip(from_symbols, to_symbols):
+ data = data.replace(from_, to_)
+ return data
+
+class _Constants(object):
+ pass
+
+alphas = string.ascii_uppercase + string.ascii_lowercase
+nums = "0123456789"
+hexnums = nums + "ABCDEFabcdef"
+alphanums = alphas + nums
+_bslash = chr(92)
+printables = "".join(c for c in string.printable if c not in string.whitespace)
+
+class ParseBaseException(Exception):
+ """base exception class for all parsing runtime exceptions"""
+ # Performance tuning: we construct a *lot* of these, so keep this
+ # constructor as small and fast as possible
+ def __init__( self, pstr, loc=0, msg=None, elem=None ):
+ self.loc = loc
+ if msg is None:
+ self.msg = pstr
+ self.pstr = ""
+ else:
+ self.msg = msg
+ self.pstr = pstr
+ self.parserElement = elem
+ self.args = (pstr, loc, msg)
+
+ @classmethod
+ def _from_exception(cls, pe):
+ """
+ internal factory method to simplify creating one type of ParseException
+ from another - avoids having __init__ signature conflicts among subclasses
+ """
+ return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
+
+ def __getattr__( self, aname ):
+ """supported attributes by name are:
+ - lineno - returns the line number of the exception text
+ - col - returns the column number of the exception text
+ - line - returns the line containing the exception text
+ """
+ if( aname == "lineno" ):
+ return lineno( self.loc, self.pstr )
+ elif( aname in ("col", "column") ):
+ return col( self.loc, self.pstr )
+ elif( aname == "line" ):
+ return line( self.loc, self.pstr )
+ else:
+ raise AttributeError(aname)
+
+ def __str__( self ):
+ return "%s (at char %d), (line:%d, col:%d)" % \
+ ( self.msg, self.loc, self.lineno, self.column )
+ def __repr__( self ):
+ return _ustr(self)
+ def markInputline( self, markerString = ">!<" ):
+ """Extracts the exception line from the input string, and marks
+ the location of the exception with a special symbol.
+ """
+ line_str = self.line
+ line_column = self.column - 1
+ if markerString:
+ line_str = "".join((line_str[:line_column],
+ markerString, line_str[line_column:]))
+ return line_str.strip()
+ def __dir__(self):
+ return "lineno col line".split() + dir(type(self))
+
+class ParseException(ParseBaseException):
+ """
+ Exception thrown when parse expressions don't match class;
+ supported attributes by name are:
+ - lineno - returns the line number of the exception text
+ - col - returns the column number of the exception text
+ - line - returns the line containing the exception text
+
+ Example::
+ try:
+ Word(nums).setName("integer").parseString("ABC")
+ except ParseException as pe:
+ print(pe)
+ print("column: {}".format(pe.col))
+
+ prints::
+ Expected integer (at char 0), (line:1, col:1)
+ column: 1
+ """
+ pass
+
+class ParseFatalException(ParseBaseException):
+ """user-throwable exception thrown when inconsistent parse content
+ is found; stops all parsing immediately"""
+ pass
+
+class ParseSyntaxException(ParseFatalException):
+ """just like L{ParseFatalException}, but thrown internally when an
+ L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop
+ immediately because an unbacktrackable syntax error has been found"""
+ pass
+
+#~ class ReparseException(ParseBaseException):
+ #~ """Experimental class - parse actions can raise this exception to cause
+ #~ pyparsing to reparse the input string:
+ #~ - with a modified input string, and/or
+ #~ - with a modified start location
+ #~ Set the values of the ReparseException in the constructor, and raise the
+ #~ exception in a parse action to cause pyparsing to use the new string/location.
+ #~ Setting the values as None causes no change to be made.
+ #~ """
+ #~ def __init_( self, newstring, restartLoc ):
+ #~ self.newParseText = newstring
+ #~ self.reparseLoc = restartLoc
+
+class RecursiveGrammarException(Exception):
+ """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
+ def __init__( self, parseElementList ):
+ self.parseElementTrace = parseElementList
+
+ def __str__( self ):
+ return "RecursiveGrammarException: %s" % self.parseElementTrace
+
+class _ParseResultsWithOffset(object):
+ def __init__(self,p1,p2):
+ self.tup = (p1,p2)
+ def __getitem__(self,i):
+ return self.tup[i]
+ def __repr__(self):
+ return repr(self.tup[0])
+ def setOffset(self,i):
+ self.tup = (self.tup[0],i)
+
+class ParseResults(object):
+ """
+ Structured parse results, to provide multiple means of access to the parsed data:
+ - as a list (C{len(results)})
+ - by list index (C{results[0], results[1]}, etc.)
+ - by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})
+
+ Example::
+ integer = Word(nums)
+ date_str = (integer.setResultsName("year") + '/'
+ + integer.setResultsName("month") + '/'
+ + integer.setResultsName("day"))
+ # equivalent form:
+ # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ # parseString returns a ParseResults object
+ result = date_str.parseString("1999/12/31")
+
+ def test(s, fn=repr):
+ print("%s -> %s" % (s, fn(eval(s))))
+ test("list(result)")
+ test("result[0]")
+ test("result['month']")
+ test("result.day")
+ test("'month' in result")
+ test("'minutes' in result")
+ test("result.dump()", str)
+ prints::
+ list(result) -> ['1999', '/', '12', '/', '31']
+ result[0] -> '1999'
+ result['month'] -> '12'
+ result.day -> '31'
+ 'month' in result -> True
+ 'minutes' in result -> False
+ result.dump() -> ['1999', '/', '12', '/', '31']
+ - day: 31
+ - month: 12
+ - year: 1999
+ """
+ def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
+ if isinstance(toklist, cls):
+ return toklist
+ retobj = object.__new__(cls)
+ retobj.__doinit = True
+ return retobj
+
+ # Performance tuning: we construct a *lot* of these, so keep this
+ # constructor as small and fast as possible
+ def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
+ if self.__doinit:
+ self.__doinit = False
+ self.__name = None
+ self.__parent = None
+ self.__accumNames = {}
+ self.__asList = asList
+ self.__modal = modal
+ if toklist is None:
+ toklist = []
+ if isinstance(toklist, list):
+ self.__toklist = toklist[:]
+ elif isinstance(toklist, _generatorType):
+ self.__toklist = list(toklist)
+ else:
+ self.__toklist = [toklist]
+ self.__tokdict = dict()
+
+ if name is not None and name:
+ if not modal:
+ self.__accumNames[name] = 0
+ if isinstance(name,int):
+ name = _ustr(name) # will always return a str, but use _ustr for consistency
+ self.__name = name
+ if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
+ if isinstance(toklist,basestring):
+ toklist = [ toklist ]
+ if asList:
+ if isinstance(toklist,ParseResults):
+ self[name] = _ParseResultsWithOffset(toklist.copy(),0)
+ else:
+ self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
+ self[name].__name = name
+ else:
+ try:
+ self[name] = toklist[0]
+ except (KeyError,TypeError,IndexError):
+ self[name] = toklist
+
+ def __getitem__( self, i ):
+ if isinstance( i, (int,slice) ):
+ return self.__toklist[i]
+ else:
+ if i not in self.__accumNames:
+ return self.__tokdict[i][-1][0]
+ else:
+ return ParseResults([ v[0] for v in self.__tokdict[i] ])
+
+ def __setitem__( self, k, v, isinstance=isinstance ):
+ if isinstance(v,_ParseResultsWithOffset):
+ self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
+ sub = v[0]
+ elif isinstance(k,(int,slice)):
+ self.__toklist[k] = v
+ sub = v
+ else:
+ self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
+ sub = v
+ if isinstance(sub,ParseResults):
+ sub.__parent = wkref(self)
+
+ def __delitem__( self, i ):
+ if isinstance(i,(int,slice)):
+ mylen = len( self.__toklist )
+ del self.__toklist[i]
+
+ # convert int to slice
+ if isinstance(i, int):
+ if i < 0:
+ i += mylen
+ i = slice(i, i+1)
+ # get removed indices
+ removed = list(range(*i.indices(mylen)))
+ removed.reverse()
+ # fixup indices in token dictionary
+ for name,occurrences in self.__tokdict.items():
+ for j in removed:
+ for k, (value, position) in enumerate(occurrences):
+ occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
+ else:
+ del self.__tokdict[i]
+
+ def __contains__( self, k ):
+ return k in self.__tokdict
+
+ def __len__( self ): return len( self.__toklist )
+ def __bool__(self): return ( not not self.__toklist )
+ __nonzero__ = __bool__
+ def __iter__( self ): return iter( self.__toklist )
+ def __reversed__( self ): return iter( self.__toklist[::-1] )
+ def _iterkeys( self ):
+ if hasattr(self.__tokdict, "iterkeys"):
+ return self.__tokdict.iterkeys()
+ else:
+ return iter(self.__tokdict)
+
+ def _itervalues( self ):
+ return (self[k] for k in self._iterkeys())
+
+ def _iteritems( self ):
+ return ((k, self[k]) for k in self._iterkeys())
+
+ if PY_3:
+ keys = _iterkeys
+ """Returns an iterator of all named result keys (Python 3.x only)."""
+
+ values = _itervalues
+ """Returns an iterator of all named result values (Python 3.x only)."""
+
+ items = _iteritems
+ """Returns an iterator of all named result key-value tuples (Python 3.x only)."""
+
+ else:
+ iterkeys = _iterkeys
+ """Returns an iterator of all named result keys (Python 2.x only)."""
+
+ itervalues = _itervalues
+ """Returns an iterator of all named result values (Python 2.x only)."""
+
+ iteritems = _iteritems
+ """Returns an iterator of all named result key-value tuples (Python 2.x only)."""
+
+ def keys( self ):
+ """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
+ return list(self.iterkeys())
+
+ def values( self ):
+ """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
+ return list(self.itervalues())
+
+ def items( self ):
+ """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
+ return list(self.iteritems())
+
+ def haskeys( self ):
+ """Since keys() returns an iterator, this method is helpful in bypassing
+ code that looks for the existence of any defined results names."""
+ return bool(self.__tokdict)
+
+ def pop( self, *args, **kwargs):
+ """
+ Removes and returns item at specified index (default=C{last}).
+ Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
+ argument or an integer argument, it will use C{list} semantics
+ and pop tokens from the list of parsed tokens. If passed a
+ non-integer argument (most likely a string), it will use C{dict}
+ semantics and pop the corresponding value from any defined
+ results names. A second default return value argument is
+ supported, just as in C{dict.pop()}.
+
+ Example::
+ def remove_first(tokens):
+ tokens.pop(0)
+ print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+ print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
+
+ label = Word(alphas)
+ patt = label("LABEL") + OneOrMore(Word(nums))
+ print(patt.parseString("AAB 123 321").dump())
+
+ # Use pop() in a parse action to remove named result (note that corresponding value is not
+ # removed from list form of results)
+ def remove_LABEL(tokens):
+ tokens.pop("LABEL")
+ return tokens
+ patt.addParseAction(remove_LABEL)
+ print(patt.parseString("AAB 123 321").dump())
+ prints::
+ ['AAB', '123', '321']
+ - LABEL: AAB
+
+ ['AAB', '123', '321']
+ """
+ if not args:
+ args = [-1]
+ for k,v in kwargs.items():
+ if k == 'default':
+ args = (args[0], v)
+ else:
+ raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
+ if (isinstance(args[0], int) or
+ len(args) == 1 or
+ args[0] in self):
+ index = args[0]
+ ret = self[index]
+ del self[index]
+ return ret
+ else:
+ defaultvalue = args[1]
+ return defaultvalue
+
+ def get(self, key, defaultValue=None):
+ """
+ Returns named result matching the given key, or if there is no
+ such name, then returns the given C{defaultValue} or C{None} if no
+ C{defaultValue} is specified.
+
+ Similar to C{dict.get()}.
+
+ Example::
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parseString("1999/12/31")
+ print(result.get("year")) # -> '1999'
+ print(result.get("hour", "not specified")) # -> 'not specified'
+ print(result.get("hour")) # -> None
+ """
+ if key in self:
+ return self[key]
+ else:
+ return defaultValue
+
+ def insert( self, index, insStr ):
+ """
+ Inserts new element at location index in the list of parsed tokens.
+
+ Similar to C{list.insert()}.
+
+ Example::
+ print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+
+ # use a parse action to insert the parse location in the front of the parsed results
+ def insert_locn(locn, tokens):
+ tokens.insert(0, locn)
+ print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
+ """
+ self.__toklist.insert(index, insStr)
+ # fixup indices in token dictionary
+ for name,occurrences in self.__tokdict.items():
+ for k, (value, position) in enumerate(occurrences):
+ occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
+
+ def append( self, item ):
+ """
+ Add single element to end of ParseResults list of elements.
+
+ Example::
+ print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+
+ # use a parse action to compute the sum of the parsed integers, and add it to the end
+ def append_sum(tokens):
+ tokens.append(sum(map(int, tokens)))
+ print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
+ """
+ self.__toklist.append(item)
+
+ def extend( self, itemseq ):
+ """
+ Add sequence of elements to end of ParseResults list of elements.
+
+ Example::
+ patt = OneOrMore(Word(alphas))
+
+ # use a parse action to append the reverse of the matched strings, to make a palindrome
+ def make_palindrome(tokens):
+ tokens.extend(reversed([t[::-1] for t in tokens]))
+ return ''.join(tokens)
+ print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
+ """
+ if isinstance(itemseq, ParseResults):
+ self += itemseq
+ else:
+ self.__toklist.extend(itemseq)
+
+ def clear( self ):
+ """
+ Clear all elements and results names.
+ """
+ del self.__toklist[:]
+ self.__tokdict.clear()
+
+ def __getattr__( self, name ):
+ try:
+ return self[name]
+ except KeyError:
+ return ""
+
+ if name in self.__tokdict:
+ if name not in self.__accumNames:
+ return self.__tokdict[name][-1][0]
+ else:
+ return ParseResults([ v[0] for v in self.__tokdict[name] ])
+ else:
+ return ""
+
+ def __add__( self, other ):
+ ret = self.copy()
+ ret += other
+ return ret
+
+ def __iadd__( self, other ):
+ if other.__tokdict:
+ offset = len(self.__toklist)
+ addoffset = lambda a: offset if a<0 else a+offset
+ otheritems = other.__tokdict.items()
+ otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
+ for (k,vlist) in otheritems for v in vlist]
+ for k,v in otherdictitems:
+ self[k] = v
+ if isinstance(v[0],ParseResults):
+ v[0].__parent = wkref(self)
+
+ self.__toklist += other.__toklist
+ self.__accumNames.update( other.__accumNames )
+ return self
+
+ def __radd__(self, other):
+ if isinstance(other,int) and other == 0:
+ # useful for merging many ParseResults using sum() builtin
+ return self.copy()
+ else:
+ # this may raise a TypeError - so be it
+ return other + self
+
+ def __repr__( self ):
+ return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
+
+ def __str__( self ):
+ return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
+
+ def _asStringList( self, sep='' ):
+ out = []
+ for item in self.__toklist:
+ if out and sep:
+ out.append(sep)
+ if isinstance( item, ParseResults ):
+ out += item._asStringList()
+ else:
+ out.append( _ustr(item) )
+ return out
+
+ def asList( self ):
+ """
+ Returns the parse results as a nested list of matching tokens, all converted to strings.
+
+ Example::
+ patt = OneOrMore(Word(alphas))
+ result = patt.parseString("sldkj lsdkj sldkj")
+ # even though the result prints in string-like form, it is actually a pyparsing ParseResults
+ print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
+
+ # Use asList() to create an actual list
+ result_list = result.asList()
+ print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
+ """
+ return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
+
+ def asDict( self ):
+ """
+ Returns the named parse results as a nested dictionary.
+
+ Example::
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parseString('12/31/1999')
+ print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
+
+ result_dict = result.asDict()
+ print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
+
+ # even though a ParseResults supports dict-like access, sometime you just need to have a dict
+ import json
+ print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
+ print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
+ """
+ if PY_3:
+ item_fn = self.items
+ else:
+ item_fn = self.iteritems
+
+ def toItem(obj):
+ if isinstance(obj, ParseResults):
+ if obj.haskeys():
+ return obj.asDict()
+ else:
+ return [toItem(v) for v in obj]
+ else:
+ return obj
+
+ return dict((k,toItem(v)) for k,v in item_fn())
+
+ def copy( self ):
+ """
+ Returns a new copy of a C{ParseResults} object.
+ """
+ ret = ParseResults( self.__toklist )
+ ret.__tokdict = self.__tokdict.copy()
+ ret.__parent = self.__parent
+ ret.__accumNames.update( self.__accumNames )
+ ret.__name = self.__name
+ return ret
+
+ def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
+ """
+ (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
+ """
+ nl = "\n"
+ out = []
+ namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
+ for v in vlist)
+ nextLevelIndent = indent + " "
+
+ # collapse out indents if formatting is not desired
+ if not formatted:
+ indent = ""
+ nextLevelIndent = ""
+ nl = ""
+
+ selfTag = None
+ if doctag is not None:
+ selfTag = doctag
+ else:
+ if self.__name:
+ selfTag = self.__name
+
+ if not selfTag:
+ if namedItemsOnly:
+ return ""
+ else:
+ selfTag = "ITEM"
+
+ out += [ nl, indent, "<", selfTag, ">" ]
+
+ for i,res in enumerate(self.__toklist):
+ if isinstance(res,ParseResults):
+ if i in namedItems:
+ out += [ res.asXML(namedItems[i],
+ namedItemsOnly and doctag is None,
+ nextLevelIndent,
+ formatted)]
+ else:
+ out += [ res.asXML(None,
+ namedItemsOnly and doctag is None,
+ nextLevelIndent,
+ formatted)]
+ else:
+ # individual token, see if there is a name for it
+ resTag = None
+ if i in namedItems:
+ resTag = namedItems[i]
+ if not resTag:
+ if namedItemsOnly:
+ continue
+ else:
+ resTag = "ITEM"
+ xmlBodyText = _xml_escape(_ustr(res))
+ out += [ nl, nextLevelIndent, "<", resTag, ">",
+ xmlBodyText,
+ "</", resTag, ">" ]
+
+ out += [ nl, indent, "</", selfTag, ">" ]
+ return "".join(out)
+
+ def __lookup(self,sub):
+ for k,vlist in self.__tokdict.items():
+ for v,loc in vlist:
+ if sub is v:
+ return k
+ return None
+
+ def getName(self):
+ r"""
+ Returns the results name for this token expression. Useful when several
+ different expressions might match at a particular location.
+
+ Example::
+ integer = Word(nums)
+ ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
+ house_number_expr = Suppress('#') + Word(nums, alphanums)
+ user_data = (Group(house_number_expr)("house_number")
+ | Group(ssn_expr)("ssn")
+ | Group(integer)("age"))
+ user_info = OneOrMore(user_data)
+
+ result = user_info.parseString("22 111-22-3333 #221B")
+ for item in result:
+ print(item.getName(), ':', item[0])
+ prints::
+ age : 22
+ ssn : 111-22-3333
+ house_number : 221B
+ """
+ if self.__name:
+ return self.__name
+ elif self.__parent:
+ par = self.__parent()
+ if par:
+ return par.__lookup(self)
+ else:
+ return None
+ elif (len(self) == 1 and
+ len(self.__tokdict) == 1 and
+ next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
+ return next(iter(self.__tokdict.keys()))
+ else:
+ return None
+
+ def dump(self, indent='', depth=0, full=True):
+ """
+ Diagnostic method for listing out the contents of a C{ParseResults}.
+ Accepts an optional C{indent} argument so that this string can be embedded
+ in a nested display of other data.
+
+ Example::
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parseString('12/31/1999')
+ print(result.dump())
+ prints::
+ ['12', '/', '31', '/', '1999']
+ - day: 1999
+ - month: 31
+ - year: 12
+ """
+ out = []
+ NL = '\n'
+ out.append( indent+_ustr(self.asList()) )
+ if full:
+ if self.haskeys():
+ items = sorted((str(k), v) for k,v in self.items())
+ for k,v in items:
+ if out:
+ out.append(NL)
+ out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
+ if isinstance(v,ParseResults):
+ if v:
+ out.append( v.dump(indent,depth+1) )
+ else:
+ out.append(_ustr(v))
+ else:
+ out.append(repr(v))
+ elif any(isinstance(vv,ParseResults) for vv in self):
+ v = self
+ for i,vv in enumerate(v):
+ if isinstance(vv,ParseResults):
+ out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) ))
+ else:
+ out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv)))
+
+ return "".join(out)
+
+ def pprint(self, *args, **kwargs):
+ """
+ Pretty-printer for parsed results as a list, using the C{pprint} module.
+ Accepts additional positional or keyword args as defined for the
+ C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
+
+ Example::
+ ident = Word(alphas, alphanums)
+ num = Word(nums)
+ func = Forward()
+ term = ident | num | Group('(' + func + ')')
+ func <<= ident + Group(Optional(delimitedList(term)))
+ result = func.parseString("fna a,b,(fnb c,d,200),100")
+ result.pprint(width=40)
+ prints::
+ ['fna',
+ ['a',
+ 'b',
+ ['(', 'fnb', ['c', 'd', '200'], ')'],
+ '100']]
+ """
+ pprint.pprint(self.asList(), *args, **kwargs)
+
+ # add support for pickle protocol
+ def __getstate__(self):
+ return ( self.__toklist,
+ ( self.__tokdict.copy(),
+ self.__parent is not None and self.__parent() or None,
+ self.__accumNames,
+ self.__name ) )
+
+ def __setstate__(self,state):
+ self.__toklist = state[0]
+ (self.__tokdict,
+ par,
+ inAccumNames,
+ self.__name) = state[1]
+ self.__accumNames = {}
+ self.__accumNames.update(inAccumNames)
+ if par is not None:
+ self.__parent = wkref(par)
+ else:
+ self.__parent = None
+
+ def __getnewargs__(self):
+ return self.__toklist, self.__name, self.__asList, self.__modal
+
+ def __dir__(self):
+ return (dir(type(self)) + list(self.keys()))
+
+MutableMapping.register(ParseResults)
+
+def col (loc,strg):
+ """Returns current column within a string, counting newlines as line separators.
+ The first column is number 1.
+
+ Note: the default parsing behavior is to expand tabs in the input string
+ before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
+ on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
+ consistent view of the parsed string, the parse location, and line and column
+ positions within the parsed string.
+ """
+ s = strg
+ return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
+
+def lineno(loc,strg):
+ """Returns current line number within a string, counting newlines as line separators.
+ The first line is number 1.
+
+ Note: the default parsing behavior is to expand tabs in the input string
+ before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
+ on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
+ consistent view of the parsed string, the parse location, and line and column
+ positions within the parsed string.
+ """
+ return strg.count("\n",0,loc) + 1
+
+def line( loc, strg ):
+ """Returns the line of text containing loc within a string, counting newlines as line separators.
+ """
+ lastCR = strg.rfind("\n", 0, loc)
+ nextCR = strg.find("\n", loc)
+ if nextCR >= 0:
+ return strg[lastCR+1:nextCR]
+ else:
+ return strg[lastCR+1:]
+
+def _defaultStartDebugAction( instring, loc, expr ):
+ print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
+
+def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
+ print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
+
+def _defaultExceptionDebugAction( instring, loc, expr, exc ):
+ print ("Exception raised:" + _ustr(exc))
+
+def nullDebugAction(*args):
+ """'Do-nothing' debug action, to suppress debugging output during parsing."""
+ pass
+
+# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
+#~ 'decorator to trim function calls to match the arity of the target'
+#~ def _trim_arity(func, maxargs=3):
+ #~ if func in singleArgBuiltins:
+ #~ return lambda s,l,t: func(t)
+ #~ limit = 0
+ #~ foundArity = False
+ #~ def wrapper(*args):
+ #~ nonlocal limit,foundArity
+ #~ while 1:
+ #~ try:
+ #~ ret = func(*args[limit:])
+ #~ foundArity = True
+ #~ return ret
+ #~ except TypeError:
+ #~ if limit == maxargs or foundArity:
+ #~ raise
+ #~ limit += 1
+ #~ continue
+ #~ return wrapper
+
+# this version is Python 2.x-3.x cross-compatible
+'decorator to trim function calls to match the arity of the target'
+def _trim_arity(func, maxargs=2):
+ if func in singleArgBuiltins:
+ return lambda s,l,t: func(t)
+ limit = [0]
+ foundArity = [False]
+
+ # traceback return data structure changed in Py3.5 - normalize back to plain tuples
+ if system_version[:2] >= (3,5):
+ def extract_stack(limit=0):
+ # special handling for Python 3.5.0 - extra deep call stack by 1
+ offset = -3 if system_version == (3,5,0) else -2
+ frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
+ return [frame_summary[:2]]
+ def extract_tb(tb, limit=0):
+ frames = traceback.extract_tb(tb, limit=limit)
+ frame_summary = frames[-1]
+ return [frame_summary[:2]]
+ else:
+ extract_stack = traceback.extract_stack
+ extract_tb = traceback.extract_tb
+
+ # synthesize what would be returned by traceback.extract_stack at the call to
+ # user's parse action 'func', so that we don't incur call penalty at parse time
+
+ LINE_DIFF = 6
+ # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
+ # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
+ this_line = extract_stack(limit=2)[-1]
+ pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
+
+ def wrapper(*args):
+ while 1:
+ try:
+ ret = func(*args[limit[0]:])
+ foundArity[0] = True
+ return ret
+ except TypeError:
+ # re-raise TypeErrors if they did not come from our arity testing
+ if foundArity[0]:
+ raise
+ else:
+ try:
+ tb = sys.exc_info()[-1]
+ if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
+ raise
+ finally:
+ del tb
+
+ if limit[0] <= maxargs:
+ limit[0] += 1
+ continue
+ raise
+
+ # copy func name to wrapper for sensible debug output
+ func_name = "<parse action>"
+ try:
+ func_name = getattr(func, '__name__',
+ getattr(func, '__class__').__name__)
+ except Exception:
+ func_name = str(func)
+ wrapper.__name__ = func_name
+
+ return wrapper
+
+class ParserElement(object):
+ """Abstract base level parser element class."""
+ DEFAULT_WHITE_CHARS = " \n\t\r"
+ verbose_stacktrace = False
+
+ @staticmethod
+ def setDefaultWhitespaceChars( chars ):
+ r"""
+ Overrides the default whitespace chars
+
+ Example::
+ # default whitespace chars are space, <TAB> and newline
+ OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
+
+ # change to just treat newline as significant
+ ParserElement.setDefaultWhitespaceChars(" \t")
+ OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
+ """
+ ParserElement.DEFAULT_WHITE_CHARS = chars
+
+ @staticmethod
+ def inlineLiteralsUsing(cls):
+ """
+ Set class to be used for inclusion of string literals into a parser.
+
+ Example::
+ # default literal class used is Literal
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
+
+
+ # change to Suppress
+ ParserElement.inlineLiteralsUsing(Suppress)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
+ """
+ ParserElement._literalStringClass = cls
+
+ def __init__( self, savelist=False ):
+ self.parseAction = list()
+ self.failAction = None
+ #~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
+ self.strRepr = None
+ self.resultsName = None
+ self.saveAsList = savelist
+ self.skipWhitespace = True
+ self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
+ self.copyDefaultWhiteChars = True
+ self.mayReturnEmpty = False # used when checking for left-recursion
+ self.keepTabs = False
+ self.ignoreExprs = list()
+ self.debug = False
+ self.streamlined = False
+ self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
+ self.errmsg = ""
+ self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
+ self.debugActions = ( None, None, None ) #custom debug actions
+ self.re = None
+ self.callPreparse = True # used to avoid redundant calls to preParse
+ self.callDuringTry = False
+
+ def copy( self ):
+ """
+ Make a copy of this C{ParserElement}. Useful for defining different parse actions
+ for the same parsing pattern, using copies of the original parse element.
+
+ Example::
+ integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+ integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
+ integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
+
+ print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
+ prints::
+ [5120, 100, 655360, 268435456]
+ Equivalent form of C{expr.copy()} is just C{expr()}::
+ integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
+ """
+ cpy = copy.copy( self )
+ cpy.parseAction = self.parseAction[:]
+ cpy.ignoreExprs = self.ignoreExprs[:]
+ if self.copyDefaultWhiteChars:
+ cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
+ return cpy
+
+ def setName( self, name ):
+ """
+ Define name for this expression, makes debugging and exception messages clearer.
+
+ Example::
+ Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
+ Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
+ """
+ self.name = name
+ self.errmsg = "Expected " + self.name
+ if hasattr(self,"exception"):
+ self.exception.msg = self.errmsg
+ return self
+
+ def setResultsName( self, name, listAllMatches=False ):
+ """
+ Define name for referencing matching tokens as a nested attribute
+ of the returned parse results.
+ NOTE: this returns a *copy* of the original C{ParserElement} object;
+ this is so that the client can define a basic element, such as an
+ integer, and reference it in multiple places with different names.
+
+ You can also set results names using the abbreviated syntax,
+ C{expr("name")} in place of C{expr.setResultsName("name")} -
+ see L{I{__call__}<__call__>}.
+
+ Example::
+ date_str = (integer.setResultsName("year") + '/'
+ + integer.setResultsName("month") + '/'
+ + integer.setResultsName("day"))
+
+ # equivalent form:
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+ """
+ newself = self.copy()
+ if name.endswith("*"):
+ name = name[:-1]
+ listAllMatches=True
+ newself.resultsName = name
+ newself.modalResults = not listAllMatches
+ return newself
+
+ def setBreak(self,breakFlag = True):
+ """Method to invoke the Python pdb debugger when this element is
+ about to be parsed. Set C{breakFlag} to True to enable, False to
+ disable.
+ """
+ if breakFlag:
+ _parseMethod = self._parse
+ def breaker(instring, loc, doActions=True, callPreParse=True):
+ import pdb
+ pdb.set_trace()
+ return _parseMethod( instring, loc, doActions, callPreParse )
+ breaker._originalParseMethod = _parseMethod
+ self._parse = breaker
+ else:
+ if hasattr(self._parse,"_originalParseMethod"):
+ self._parse = self._parse._originalParseMethod
+ return self
+
+ def setParseAction( self, *fns, **kwargs ):
+ """
+ Define one or more actions to perform when successfully matching parse element definition.
+ Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
+ C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
+ - s = the original string being parsed (see note below)
+ - loc = the location of the matching substring
+ - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
+ If the functions in fns modify the tokens, they can return them as the return
+ value from fn, and the modified list of tokens will replace the original.
+ Otherwise, fn does not need to return any value.
+
+ Optional keyword arguments:
+ - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
+
+ Note: the default parsing behavior is to expand tabs in the input string
+ before starting the parsing process. See L{I{parseString}<parseString>} for more information
+ on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
+ consistent view of the parsed string, the parse location, and line and column
+ positions within the parsed string.
+
+ Example::
+ integer = Word(nums)
+ date_str = integer + '/' + integer + '/' + integer
+
+ date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
+
+ # use parse action to convert to ints at parse time
+ integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+ date_str = integer + '/' + integer + '/' + integer
+
+ # note that integer fields are now ints, not strings
+ date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
+ """
+ self.parseAction = list(map(_trim_arity, list(fns)))
+ self.callDuringTry = kwargs.get("callDuringTry", False)
+ return self
+
+ def addParseAction( self, *fns, **kwargs ):
+ """
+ Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
+
+ See examples in L{I{copy}<copy>}.
+ """
+ self.parseAction += list(map(_trim_arity, list(fns)))
+ self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
+ return self
+
+ def addCondition(self, *fns, **kwargs):
+ """Add a boolean predicate function to expression's list of parse actions. See
+ L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction},
+ functions passed to C{addCondition} need to return boolean success/fail of the condition.
+
+ Optional keyword arguments:
+ - message = define a custom message to be used in the raised exception
+ - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
+
+ Example::
+ integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+ year_int = integer.copy()
+ year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
+ date_str = year_int + '/' + integer + '/' + integer
+
+ result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
+ """
+ msg = kwargs.get("message", "failed user-defined condition")
+ exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
+ for fn in fns:
+ def pa(s,l,t):
+ if not bool(_trim_arity(fn)(s,l,t)):
+ raise exc_type(s,l,msg)
+ self.parseAction.append(pa)
+ self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
+ return self
+
+ def setFailAction( self, fn ):
+ """Define action to perform if parsing fails at this expression.
+ Fail acton fn is a callable function that takes the arguments
+ C{fn(s,loc,expr,err)} where:
+ - s = string being parsed
+ - loc = location where expression match was attempted and failed
+ - expr = the parse expression that failed
+ - err = the exception thrown
+ The function returns no value. It may throw C{L{ParseFatalException}}
+ if it is desired to stop parsing immediately."""
+ self.failAction = fn
+ return self
+
+ def _skipIgnorables( self, instring, loc ):
+ exprsFound = True
+ while exprsFound:
+ exprsFound = False
+ for e in self.ignoreExprs:
+ try:
+ while 1:
+ loc,dummy = e._parse( instring, loc )
+ exprsFound = True
+ except ParseException:
+ pass
+ return loc
+
+ def preParse( self, instring, loc ):
+ if self.ignoreExprs:
+ loc = self._skipIgnorables( instring, loc )
+
+ if self.skipWhitespace:
+ wt = self.whiteChars
+ instrlen = len(instring)
+ while loc < instrlen and instring[loc] in wt:
+ loc += 1
+
+ return loc
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ return loc, []
+
+ def postParse( self, instring, loc, tokenlist ):
+ return tokenlist
+
+ #~ @profile
+ def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
+ debugging = ( self.debug ) #and doActions )
+
+ if debugging or self.failAction:
+ #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
+ if (self.debugActions[0] ):
+ self.debugActions[0]( instring, loc, self )
+ if callPreParse and self.callPreparse:
+ preloc = self.preParse( instring, loc )
+ else:
+ preloc = loc
+ tokensStart = preloc
+ try:
+ try:
+ loc,tokens = self.parseImpl( instring, preloc, doActions )
+ except IndexError:
+ raise ParseException( instring, len(instring), self.errmsg, self )
+ except ParseBaseException as err:
+ #~ print ("Exception raised:", err)
+ if self.debugActions[2]:
+ self.debugActions[2]( instring, tokensStart, self, err )
+ if self.failAction:
+ self.failAction( instring, tokensStart, self, err )
+ raise
+ else:
+ if callPreParse and self.callPreparse:
+ preloc = self.preParse( instring, loc )
+ else:
+ preloc = loc
+ tokensStart = preloc
+ if self.mayIndexError or preloc >= len(instring):
+ try:
+ loc,tokens = self.parseImpl( instring, preloc, doActions )
+ except IndexError:
+ raise ParseException( instring, len(instring), self.errmsg, self )
+ else:
+ loc,tokens = self.parseImpl( instring, preloc, doActions )
+
+ tokens = self.postParse( instring, loc, tokens )
+
+ retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
+ if self.parseAction and (doActions or self.callDuringTry):
+ if debugging:
+ try:
+ for fn in self.parseAction:
+ tokens = fn( instring, tokensStart, retTokens )
+ if tokens is not None:
+ retTokens = ParseResults( tokens,
+ self.resultsName,
+ asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
+ modal=self.modalResults )
+ except ParseBaseException as err:
+ #~ print "Exception raised in user parse action:", err
+ if (self.debugActions[2] ):
+ self.debugActions[2]( instring, tokensStart, self, err )
+ raise
+ else:
+ for fn in self.parseAction:
+ tokens = fn( instring, tokensStart, retTokens )
+ if tokens is not None:
+ retTokens = ParseResults( tokens,
+ self.resultsName,
+ asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
+ modal=self.modalResults )
+ if debugging:
+ #~ print ("Matched",self,"->",retTokens.asList())
+ if (self.debugActions[1] ):
+ self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
+
+ return loc, retTokens
+
+ def tryParse( self, instring, loc ):
+ try:
+ return self._parse( instring, loc, doActions=False )[0]
+ except ParseFatalException:
+ raise ParseException( instring, loc, self.errmsg, self)
+
+ def canParseNext(self, instring, loc):
+ try:
+ self.tryParse(instring, loc)
+ except (ParseException, IndexError):
+ return False
+ else:
+ return True
+
+ class _UnboundedCache(object):
+ def __init__(self):
+ cache = {}
+ self.not_in_cache = not_in_cache = object()
+
+ def get(self, key):
+ return cache.get(key, not_in_cache)
+
+ def set(self, key, value):
+ cache[key] = value
+
+ def clear(self):
+ cache.clear()
+
+ def cache_len(self):
+ return len(cache)
+
+ self.get = types.MethodType(get, self)
+ self.set = types.MethodType(set, self)
+ self.clear = types.MethodType(clear, self)
+ self.__len__ = types.MethodType(cache_len, self)
+
+ if _OrderedDict is not None:
+ class _FifoCache(object):
+ def __init__(self, size):
+ self.not_in_cache = not_in_cache = object()
+
+ cache = _OrderedDict()
+
+ def get(self, key):
+ return cache.get(key, not_in_cache)
+
+ def set(self, key, value):
+ cache[key] = value
+ while len(cache) > size:
+ try:
+ cache.popitem(False)
+ except KeyError:
+ pass
+
+ def clear(self):
+ cache.clear()
+
+ def cache_len(self):
+ return len(cache)
+
+ self.get = types.MethodType(get, self)
+ self.set = types.MethodType(set, self)
+ self.clear = types.MethodType(clear, self)
+ self.__len__ = types.MethodType(cache_len, self)
+
+ else:
+ class _FifoCache(object):
+ def __init__(self, size):
+ self.not_in_cache = not_in_cache = object()
+
+ cache = {}
+ key_fifo = collections.deque([], size)
+
+ def get(self, key):
+ return cache.get(key, not_in_cache)
+
+ def set(self, key, value):
+ cache[key] = value
+ while len(key_fifo) > size:
+ cache.pop(key_fifo.popleft(), None)
+ key_fifo.append(key)
+
+ def clear(self):
+ cache.clear()
+ key_fifo.clear()
+
+ def cache_len(self):
+ return len(cache)
+
+ self.get = types.MethodType(get, self)
+ self.set = types.MethodType(set, self)
+ self.clear = types.MethodType(clear, self)
+ self.__len__ = types.MethodType(cache_len, self)
+
+ # argument cache for optimizing repeated calls when backtracking through recursive expressions
+ packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
+ packrat_cache_lock = RLock()
+ packrat_cache_stats = [0, 0]
+
+ # this method gets repeatedly called during backtracking with the same arguments -
+ # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
+ def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
+ HIT, MISS = 0, 1
+ lookup = (self, instring, loc, callPreParse, doActions)
+ with ParserElement.packrat_cache_lock:
+ cache = ParserElement.packrat_cache
+ value = cache.get(lookup)
+ if value is cache.not_in_cache:
+ ParserElement.packrat_cache_stats[MISS] += 1
+ try:
+ value = self._parseNoCache(instring, loc, doActions, callPreParse)
+ except ParseBaseException as pe:
+ # cache a copy of the exception, without the traceback
+ cache.set(lookup, pe.__class__(*pe.args))
+ raise
+ else:
+ cache.set(lookup, (value[0], value[1].copy()))
+ return value
+ else:
+ ParserElement.packrat_cache_stats[HIT] += 1
+ if isinstance(value, Exception):
+ raise value
+ return (value[0], value[1].copy())
+
+ _parse = _parseNoCache
+
+ @staticmethod
+ def resetCache():
+ ParserElement.packrat_cache.clear()
+ ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
+
+ _packratEnabled = False
+ @staticmethod
+ def enablePackrat(cache_size_limit=128):
+ """Enables "packrat" parsing, which adds memoizing to the parsing logic.
+ Repeated parse attempts at the same string location (which happens
+ often in many complex grammars) can immediately return a cached value,
+ instead of re-executing parsing/validating code. Memoizing is done of
+ both valid results and parsing exceptions.
+
+ Parameters:
+ - cache_size_limit - (default=C{128}) - if an integer value is provided
+ will limit the size of the packrat cache; if None is passed, then
+ the cache size will be unbounded; if 0 is passed, the cache will
+ be effectively disabled.
+
+ This speedup may break existing programs that use parse actions that
+ have side-effects. For this reason, packrat parsing is disabled when
+ you first import pyparsing. To activate the packrat feature, your
+ program must call the class method C{ParserElement.enablePackrat()}. If
+ your program uses C{psyco} to "compile as you go", you must call
+ C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
+ Python will crash. For best results, call C{enablePackrat()} immediately
+ after importing pyparsing.
+
+ Example::
+ import pyparsing
+ pyparsing.ParserElement.enablePackrat()
+ """
+ if not ParserElement._packratEnabled:
+ ParserElement._packratEnabled = True
+ if cache_size_limit is None:
+ ParserElement.packrat_cache = ParserElement._UnboundedCache()
+ else:
+ ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
+ ParserElement._parse = ParserElement._parseCache
+
+ def parseString( self, instring, parseAll=False ):
+ """
+ Execute the parse expression with the given string.
+ This is the main interface to the client code, once the complete
+ expression has been built.
+
+ If you want the grammar to require that the entire input string be
+ successfully parsed, then set C{parseAll} to True (equivalent to ending
+ the grammar with C{L{StringEnd()}}).
+
+ Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
+ in order to report proper column numbers in parse actions.
+ If the input string contains tabs and
+ the grammar uses parse actions that use the C{loc} argument to index into the
+ string being parsed, you can ensure you have a consistent view of the input
+ string by:
+ - calling C{parseWithTabs} on your grammar before calling C{parseString}
+ (see L{I{parseWithTabs}<parseWithTabs>})
+ - define your parse action using the full C{(s,loc,toks)} signature, and
+ reference the input string using the parse action's C{s} argument
+ - explictly expand the tabs in your input string before calling
+ C{parseString}
+
+ Example::
+ Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
+ Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
+ """
+ ParserElement.resetCache()
+ if not self.streamlined:
+ self.streamline()
+ #~ self.saveAsList = True
+ for e in self.ignoreExprs:
+ e.streamline()
+ if not self.keepTabs:
+ instring = instring.expandtabs()
+ try:
+ loc, tokens = self._parse( instring, 0 )
+ if parseAll:
+ loc = self.preParse( instring, loc )
+ se = Empty() + StringEnd()
+ se._parse( instring, loc )
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc
+ else:
+ return tokens
+
+ def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
+ """
+ Scan the input string for expression matches. Each match will return the
+ matching tokens, start location, and end location. May be called with optional
+ C{maxMatches} argument, to clip scanning after 'n' matches are found. If
+ C{overlap} is specified, then overlapping matches will be reported.
+
+ Note that the start and end locations are reported relative to the string
+ being parsed. See L{I{parseString}<parseString>} for more information on parsing
+ strings with embedded tabs.
+
+ Example::
+ source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
+ print(source)
+ for tokens,start,end in Word(alphas).scanString(source):
+ print(' '*start + '^'*(end-start))
+ print(' '*start + tokens[0])
+
+ prints::
+
+ sldjf123lsdjjkf345sldkjf879lkjsfd987
+ ^^^^^
+ sldjf
+ ^^^^^^^
+ lsdjjkf
+ ^^^^^^
+ sldkjf
+ ^^^^^^
+ lkjsfd
+ """
+ if not self.streamlined:
+ self.streamline()
+ for e in self.ignoreExprs:
+ e.streamline()
+
+ if not self.keepTabs:
+ instring = _ustr(instring).expandtabs()
+ instrlen = len(instring)
+ loc = 0
+ preparseFn = self.preParse
+ parseFn = self._parse
+ ParserElement.resetCache()
+ matches = 0
+ try:
+ while loc <= instrlen and matches < maxMatches:
+ try:
+ preloc = preparseFn( instring, loc )
+ nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
+ except ParseException:
+ loc = preloc+1
+ else:
+ if nextLoc > loc:
+ matches += 1
+ yield tokens, preloc, nextLoc
+ if overlap:
+ nextloc = preparseFn( instring, loc )
+ if nextloc > loc:
+ loc = nextLoc
+ else:
+ loc += 1
+ else:
+ loc = nextLoc
+ else:
+ loc = preloc+1
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc
+
+ def transformString( self, instring ):
+ """
+ Extension to C{L{scanString}}, to modify matching text with modified tokens that may
+ be returned from a parse action. To use C{transformString}, define a grammar and
+ attach a parse action to it that modifies the returned token list.
+ Invoking C{transformString()} on a target string will then scan for matches,
+ and replace the matched text patterns according to the logic in the parse
+ action. C{transformString()} returns the resulting transformed string.
+
+ Example::
+ wd = Word(alphas)
+ wd.setParseAction(lambda toks: toks[0].title())
+
+ print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
+ Prints::
+ Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
+ """
+ out = []
+ lastE = 0
+ # force preservation of <TAB>s, to minimize unwanted transformation of string, and to
+ # keep string locs straight between transformString and scanString
+ self.keepTabs = True
+ try:
+ for t,s,e in self.scanString( instring ):
+ out.append( instring[lastE:s] )
+ if t:
+ if isinstance(t,ParseResults):
+ out += t.asList()
+ elif isinstance(t,list):
+ out += t
+ else:
+ out.append(t)
+ lastE = e
+ out.append(instring[lastE:])
+ out = [o for o in out if o]
+ return "".join(map(_ustr,_flatten(out)))
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc
+
+ def searchString( self, instring, maxMatches=_MAX_INT ):
+ """
+ Another extension to C{L{scanString}}, simplifying the access to the tokens found
+ to match the given parse expression. May be called with optional
+ C{maxMatches} argument, to clip searching after 'n' matches are found.
+
+ Example::
+ # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
+ cap_word = Word(alphas.upper(), alphas.lower())
+
+ print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
+
+ # the sum() builtin can be used to merge results into a single ParseResults object
+ print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
+ prints::
+ [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
+ ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
+ """
+ try:
+ return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc
+
+ def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
+ """
+ Generator method to split a string using the given expression as a separator.
+ May be called with optional C{maxsplit} argument, to limit the number of splits;
+ and the optional C{includeSeparators} argument (default=C{False}), if the separating
+ matching text should be included in the split results.
+
+ Example::
+ punc = oneOf(list(".,;:/-!?"))
+ print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
+ prints::
+ ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
+ """
+ splits = 0
+ last = 0
+ for t,s,e in self.scanString(instring, maxMatches=maxsplit):
+ yield instring[last:s]
+ if includeSeparators:
+ yield t[0]
+ last = e
+ yield instring[last:]
+
+ def __add__(self, other ):
+ """
+ Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
+ converts them to L{Literal}s by default.
+
+ Example::
+ greet = Word(alphas) + "," + Word(alphas) + "!"
+ hello = "Hello, World!"
+ print (hello, "->", greet.parseString(hello))
+ Prints::
+ Hello, World! -> ['Hello', ',', 'World', '!']
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return And( [ self, other ] )
+
+ def __radd__(self, other ):
+ """
+ Implementation of + operator when left operand is not a C{L{ParserElement}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other + self
+
+ def __sub__(self, other):
+ """
+ Implementation of - operator, returns C{L{And}} with error stop
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return self + And._ErrorStop() + other
+
+ def __rsub__(self, other ):
+ """
+ Implementation of - operator when left operand is not a C{L{ParserElement}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other - self
+
+ def __mul__(self,other):
+ """
+ Implementation of * operator, allows use of C{expr * 3} in place of
+ C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
+ tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
+ may also include C{None} as in:
+ - C{expr*(n,None)} or C{expr*(n,)} is equivalent
+ to C{expr*n + L{ZeroOrMore}(expr)}
+ (read as "at least n instances of C{expr}")
+ - C{expr*(None,n)} is equivalent to C{expr*(0,n)}
+ (read as "0 to n instances of C{expr}")
+ - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
+ - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
+
+ Note that C{expr*(None,n)} does not raise an exception if
+ more than n exprs exist in the input stream; that is,
+ C{expr*(None,n)} does not enforce a maximum number of expr
+ occurrences. If this behavior is desired, then write
+ C{expr*(None,n) + ~expr}
+ """
+ if isinstance(other,int):
+ minElements, optElements = other,0
+ elif isinstance(other,tuple):
+ other = (other + (None, None))[:2]
+ if other[0] is None:
+ other = (0, other[1])
+ if isinstance(other[0],int) and other[1] is None:
+ if other[0] == 0:
+ return ZeroOrMore(self)
+ if other[0] == 1:
+ return OneOrMore(self)
+ else:
+ return self*other[0] + ZeroOrMore(self)
+ elif isinstance(other[0],int) and isinstance(other[1],int):
+ minElements, optElements = other
+ optElements -= minElements
+ else:
+ raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
+ else:
+ raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
+
+ if minElements < 0:
+ raise ValueError("cannot multiply ParserElement by negative value")
+ if optElements < 0:
+ raise ValueError("second tuple value must be greater or equal to first tuple value")
+ if minElements == optElements == 0:
+ raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
+
+ if (optElements):
+ def makeOptionalList(n):
+ if n>1:
+ return Optional(self + makeOptionalList(n-1))
+ else:
+ return Optional(self)
+ if minElements:
+ if minElements == 1:
+ ret = self + makeOptionalList(optElements)
+ else:
+ ret = And([self]*minElements) + makeOptionalList(optElements)
+ else:
+ ret = makeOptionalList(optElements)
+ else:
+ if minElements == 1:
+ ret = self
+ else:
+ ret = And([self]*minElements)
+ return ret
+
+ def __rmul__(self, other):
+ return self.__mul__(other)
+
+ def __or__(self, other ):
+ """
+ Implementation of | operator - returns C{L{MatchFirst}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return MatchFirst( [ self, other ] )
+
+ def __ror__(self, other ):
+ """
+ Implementation of | operator when left operand is not a C{L{ParserElement}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other | self
+
+ def __xor__(self, other ):
+ """
+ Implementation of ^ operator - returns C{L{Or}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return Or( [ self, other ] )
+
+ def __rxor__(self, other ):
+ """
+ Implementation of ^ operator when left operand is not a C{L{ParserElement}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other ^ self
+
+ def __and__(self, other ):
+ """
+ Implementation of & operator - returns C{L{Each}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return Each( [ self, other ] )
+
+ def __rand__(self, other ):
+ """
+ Implementation of & operator when left operand is not a C{L{ParserElement}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other & self
+
+ def __invert__( self ):
+ """
+ Implementation of ~ operator - returns C{L{NotAny}}
+ """
+ return NotAny( self )
+
+ def __call__(self, name=None):
+ """
+ Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
+
+ If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
+ passed as C{True}.
+
+ If C{name} is omitted, same as calling C{L{copy}}.
+
+ Example::
+ # these are equivalent
+ userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
+ userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
+ """
+ if name is not None:
+ return self.setResultsName(name)
+ else:
+ return self.copy()
+
+ def suppress( self ):
+ """
+ Suppresses the output of this C{ParserElement}; useful to keep punctuation from
+ cluttering up returned output.
+ """
+ return Suppress( self )
+
+ def leaveWhitespace( self ):
+ """
+ Disables the skipping of whitespace before matching the characters in the
+ C{ParserElement}'s defined pattern. This is normally only used internally by
+ the pyparsing module, but may be needed in some whitespace-sensitive grammars.
+ """
+ self.skipWhitespace = False
+ return self
+
+ def setWhitespaceChars( self, chars ):
+ """
+ Overrides the default whitespace chars
+ """
+ self.skipWhitespace = True
+ self.whiteChars = chars
+ self.copyDefaultWhiteChars = False
+ return self
+
+ def parseWithTabs( self ):
+ """
+ Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
+ Must be called before C{parseString} when the input grammar contains elements that
+ match C{<TAB>} characters.
+ """
+ self.keepTabs = True
+ return self
+
+ def ignore( self, other ):
+ """
+ Define expression to be ignored (e.g., comments) while doing pattern
+ matching; may be called repeatedly, to define multiple comment or other
+ ignorable patterns.
+
+ Example::
+ patt = OneOrMore(Word(alphas))
+ patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
+
+ patt.ignore(cStyleComment)
+ patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
+ """
+ if isinstance(other, basestring):
+ other = Suppress(other)
+
+ if isinstance( other, Suppress ):
+ if other not in self.ignoreExprs:
+ self.ignoreExprs.append(other)
+ else:
+ self.ignoreExprs.append( Suppress( other.copy() ) )
+ return self
+
+ def setDebugActions( self, startAction, successAction, exceptionAction ):
+ """
+ Enable display of debugging messages while doing pattern matching.
+ """
+ self.debugActions = (startAction or _defaultStartDebugAction,
+ successAction or _defaultSuccessDebugAction,
+ exceptionAction or _defaultExceptionDebugAction)
+ self.debug = True
+ return self
+
+ def setDebug( self, flag=True ):
+ """
+ Enable display of debugging messages while doing pattern matching.
+ Set C{flag} to True to enable, False to disable.
+
+ Example::
+ wd = Word(alphas).setName("alphaword")
+ integer = Word(nums).setName("numword")
+ term = wd | integer
+
+ # turn on debugging for wd
+ wd.setDebug()
+
+ OneOrMore(term).parseString("abc 123 xyz 890")
+
+ prints::
+ Match alphaword at loc 0(1,1)
+ Matched alphaword -> ['abc']
+ Match alphaword at loc 3(1,4)
+ Exception raised:Expected alphaword (at char 4), (line:1, col:5)
+ Match alphaword at loc 7(1,8)
+ Matched alphaword -> ['xyz']
+ Match alphaword at loc 11(1,12)
+ Exception raised:Expected alphaword (at char 12), (line:1, col:13)
+ Match alphaword at loc 15(1,16)
+ Exception raised:Expected alphaword (at char 15), (line:1, col:16)
+
+ The output shown is that produced by the default debug actions - custom debug actions can be
+ specified using L{setDebugActions}. Prior to attempting
+ to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
+ is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
+ message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
+ which makes debugging and exception messages easier to understand - for instance, the default
+ name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
+ """
+ if flag:
+ self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
+ else:
+ self.debug = False
+ return self
+
+ def __str__( self ):
+ return self.name
+
+ def __repr__( self ):
+ return _ustr(self)
+
+ def streamline( self ):
+ self.streamlined = True
+ self.strRepr = None
+ return self
+
+ def checkRecursion( self, parseElementList ):
+ pass
+
+ def validate( self, validateTrace=[] ):
+ """
+ Check defined expressions for valid structure, check for infinite recursive definitions.
+ """
+ self.checkRecursion( [] )
+
+ def parseFile( self, file_or_filename, parseAll=False ):
+ """
+ Execute the parse expression on the given file or filename.
+ If a filename is specified (instead of a file object),
+ the entire file is opened, read, and closed before parsing.
+ """
+ try:
+ file_contents = file_or_filename.read()
+ except AttributeError:
+ with open(file_or_filename, "r") as f:
+ file_contents = f.read()
+ try:
+ return self.parseString(file_contents, parseAll)
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc
+
+ def __eq__(self,other):
+ if isinstance(other, ParserElement):
+ return self is other or vars(self) == vars(other)
+ elif isinstance(other, basestring):
+ return self.matches(other)
+ else:
+ return super(ParserElement,self)==other
+
+ def __ne__(self,other):
+ return not (self == other)
+
+ def __hash__(self):
+ return hash(id(self))
+
+ def __req__(self,other):
+ return self == other
+
+ def __rne__(self,other):
+ return not (self == other)
+
+ def matches(self, testString, parseAll=True):
+ """
+ Method for quick testing of a parser against a test string. Good for simple
+ inline microtests of sub expressions while building up larger parser.
+
+ Parameters:
+ - testString - to test against this expression for a match
+ - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
+
+ Example::
+ expr = Word(nums)
+ assert expr.matches("100")
+ """
+ try:
+ self.parseString(_ustr(testString), parseAll=parseAll)
+ return True
+ except ParseBaseException:
+ return False
+
+ def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
+ """
+ Execute the parse expression on a series of test strings, showing each
+ test, the parsed results or where the parse failed. Quick and easy way to
+ run a parse expression against a list of sample strings.
+
+ Parameters:
+ - tests - a list of separate test strings, or a multiline string of test strings
+ - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
+ - comment - (default=C{'#'}) - expression for indicating embedded comments in the test
+ string; pass None to disable comment filtering
+ - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
+ if False, only dump nested list
+ - printResults - (default=C{True}) prints test output to stdout
+ - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
+
+ Returns: a (success, results) tuple, where success indicates that all tests succeeded
+ (or failed if C{failureTests} is True), and the results contain a list of lines of each
+ test's output
+
+ Example::
+ number_expr = pyparsing_common.number.copy()
+
+ result = number_expr.runTests('''
+ # unsigned integer
+ 100
+ # negative integer
+ -100
+ # float with scientific notation
+ 6.02e23
+ # integer with scientific notation
+ 1e-12
+ ''')
+ print("Success" if result[0] else "Failed!")
+
+ result = number_expr.runTests('''
+ # stray character
+ 100Z
+ # missing leading digit before '.'
+ -.100
+ # too many '.'
+ 3.14.159
+ ''', failureTests=True)
+ print("Success" if result[0] else "Failed!")
+ prints::
+ # unsigned integer
+ 100
+ [100]
+
+ # negative integer
+ -100
+ [-100]
+
+ # float with scientific notation
+ 6.02e23
+ [6.02e+23]
+
+ # integer with scientific notation
+ 1e-12
+ [1e-12]
+
+ Success
+
+ # stray character
+ 100Z
+ ^
+ FAIL: Expected end of text (at char 3), (line:1, col:4)
+
+ # missing leading digit before '.'
+ -.100
+ ^
+ FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
+
+ # too many '.'
+ 3.14.159
+ ^
+ FAIL: Expected end of text (at char 4), (line:1, col:5)
+
+ Success
+
+ Each test string must be on a single line. If you want to test a string that spans multiple
+ lines, create a test like this::
+
+ expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
+
+ (Note that this is a raw string literal, you must include the leading 'r'.)
+ """
+ if isinstance(tests, basestring):
+ tests = list(map(str.strip, tests.rstrip().splitlines()))
+ if isinstance(comment, basestring):
+ comment = Literal(comment)
+ allResults = []
+ comments = []
+ success = True
+ for t in tests:
+ if comment is not None and comment.matches(t, False) or comments and not t:
+ comments.append(t)
+ continue
+ if not t:
+ continue
+ out = ['\n'.join(comments), t]
+ comments = []
+ try:
+ t = t.replace(r'\n','\n')
+ result = self.parseString(t, parseAll=parseAll)
+ out.append(result.dump(full=fullDump))
+ success = success and not failureTests
+ except ParseBaseException as pe:
+ fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
+ if '\n' in t:
+ out.append(line(pe.loc, t))
+ out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
+ else:
+ out.append(' '*pe.loc + '^' + fatal)
+ out.append("FAIL: " + str(pe))
+ success = success and failureTests
+ result = pe
+ except Exception as exc:
+ out.append("FAIL-EXCEPTION: " + str(exc))
+ success = success and failureTests
+ result = exc
+
+ if printResults:
+ if fullDump:
+ out.append('')
+ print('\n'.join(out))
+
+ allResults.append((t, result))
+
+ return success, allResults
+
+
+class Token(ParserElement):
+ """
+ Abstract C{ParserElement} subclass, for defining atomic matching patterns.
+ """
+ def __init__( self ):
+ super(Token,self).__init__( savelist=False )
+
+
+class Empty(Token):
+ """
+ An empty token, will always match.
+ """
+ def __init__( self ):
+ super(Empty,self).__init__()
+ self.name = "Empty"
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+
+
+class NoMatch(Token):
+ """
+ A token that will never match.
+ """
+ def __init__( self ):
+ super(NoMatch,self).__init__()
+ self.name = "NoMatch"
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+ self.errmsg = "Unmatchable token"
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ raise ParseException(instring, loc, self.errmsg, self)
+
+
+class Literal(Token):
+ """
+ Token to exactly match a specified string.
+
+ Example::
+ Literal('blah').parseString('blah') # -> ['blah']
+ Literal('blah').parseString('blahfooblah') # -> ['blah']
+ Literal('blah').parseString('bla') # -> Exception: Expected "blah"
+
+ For case-insensitive matching, use L{CaselessLiteral}.
+
+ For keyword matching (force word break before and after the matched string),
+ use L{Keyword} or L{CaselessKeyword}.
+ """
+ def __init__( self, matchString ):
+ super(Literal,self).__init__()
+ self.match = matchString
+ self.matchLen = len(matchString)
+ try:
+ self.firstMatchChar = matchString[0]
+ except IndexError:
+ warnings.warn("null string passed to Literal; use Empty() instead",
+ SyntaxWarning, stacklevel=2)
+ self.__class__ = Empty
+ self.name = '"%s"' % _ustr(self.match)
+ self.errmsg = "Expected " + self.name
+ self.mayReturnEmpty = False
+ self.mayIndexError = False
+
+ # Performance tuning: this routine gets called a *lot*
+ # if this is a single character match string and the first character matches,
+ # short-circuit as quickly as possible, and avoid calling startswith
+ #~ @profile
+ def parseImpl( self, instring, loc, doActions=True ):
+ if (instring[loc] == self.firstMatchChar and
+ (self.matchLen==1 or instring.startswith(self.match,loc)) ):
+ return loc+self.matchLen, self.match
+ raise ParseException(instring, loc, self.errmsg, self)
+_L = Literal
+ParserElement._literalStringClass = Literal
+
+class Keyword(Token):
+ """
+ Token to exactly match a specified string as a keyword, that is, it must be
+ immediately followed by a non-keyword character. Compare with C{L{Literal}}:
+ - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
+ - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
+ Accepts two optional constructor arguments in addition to the keyword string:
+ - C{identChars} is a string of characters that would be valid identifier characters,
+ defaulting to all alphanumerics + "_" and "$"
+ - C{caseless} allows case-insensitive matching, default is C{False}.
+
+ Example::
+ Keyword("start").parseString("start") # -> ['start']
+ Keyword("start").parseString("starting") # -> Exception
+
+ For case-insensitive matching, use L{CaselessKeyword}.
+ """
+ DEFAULT_KEYWORD_CHARS = alphanums+"_$"
+
+ def __init__( self, matchString, identChars=None, caseless=False ):
+ super(Keyword,self).__init__()
+ if identChars is None:
+ identChars = Keyword.DEFAULT_KEYWORD_CHARS
+ self.match = matchString
+ self.matchLen = len(matchString)
+ try:
+ self.firstMatchChar = matchString[0]
+ except IndexError:
+ warnings.warn("null string passed to Keyword; use Empty() instead",
+ SyntaxWarning, stacklevel=2)
+ self.name = '"%s"' % self.match
+ self.errmsg = "Expected " + self.name
+ self.mayReturnEmpty = False
+ self.mayIndexError = False
+ self.caseless = caseless
+ if caseless:
+ self.caselessmatch = matchString.upper()
+ identChars = identChars.upper()
+ self.identChars = set(identChars)
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if self.caseless:
+ if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
+ (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
+ (loc == 0 or instring[loc-1].upper() not in self.identChars) ):
+ return loc+self.matchLen, self.match
+ else:
+ if (instring[loc] == self.firstMatchChar and
+ (self.matchLen==1 or instring.startswith(self.match,loc)) and
+ (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
+ (loc == 0 or instring[loc-1] not in self.identChars) ):
+ return loc+self.matchLen, self.match
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ def copy(self):
+ c = super(Keyword,self).copy()
+ c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
+ return c
+
+ @staticmethod
+ def setDefaultKeywordChars( chars ):
+ """Overrides the default Keyword chars
+ """
+ Keyword.DEFAULT_KEYWORD_CHARS = chars
+
+class CaselessLiteral(Literal):
+ """
+ Token to match a specified string, ignoring case of letters.
+ Note: the matched results will always be in the case of the given
+ match string, NOT the case of the input text.
+
+ Example::
+ OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
+
+ (Contrast with example for L{CaselessKeyword}.)
+ """
+ def __init__( self, matchString ):
+ super(CaselessLiteral,self).__init__( matchString.upper() )
+ # Preserve the defining literal.
+ self.returnString = matchString
+ self.name = "'%s'" % self.returnString
+ self.errmsg = "Expected " + self.name
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if instring[ loc:loc+self.matchLen ].upper() == self.match:
+ return loc+self.matchLen, self.returnString
+ raise ParseException(instring, loc, self.errmsg, self)
+
+class CaselessKeyword(Keyword):
+ """
+ Caseless version of L{Keyword}.
+
+ Example::
+ OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
+
+ (Contrast with example for L{CaselessLiteral}.)
+ """
+ def __init__( self, matchString, identChars=None ):
+ super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
+ (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
+ return loc+self.matchLen, self.match
+ raise ParseException(instring, loc, self.errmsg, self)
+
+class CloseMatch(Token):
+ """
+ A variation on L{Literal} which matches "close" matches, that is,
+ strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
+ - C{match_string} - string to be matched
+ - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
+
+ The results from a successful parse will contain the matched text from the input string and the following named results:
+ - C{mismatches} - a list of the positions within the match_string where mismatches were found
+ - C{original} - the original match_string used to compare against the input string
+
+ If C{mismatches} is an empty list, then the match was an exact match.
+
+ Example::
+ patt = CloseMatch("ATCATCGAATGGA")
+ patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
+ patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
+
+ # exact match
+ patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
+
+ # close match allowing up to 2 mismatches
+ patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
+ patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
+ """
+ def __init__(self, match_string, maxMismatches=1):
+ super(CloseMatch,self).__init__()
+ self.name = match_string
+ self.match_string = match_string
+ self.maxMismatches = maxMismatches
+ self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
+ self.mayIndexError = False
+ self.mayReturnEmpty = False
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ start = loc
+ instrlen = len(instring)
+ maxloc = start + len(self.match_string)
+
+ if maxloc <= instrlen:
+ match_string = self.match_string
+ match_stringloc = 0
+ mismatches = []
+ maxMismatches = self.maxMismatches
+
+ for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
+ src,mat = s_m
+ if src != mat:
+ mismatches.append(match_stringloc)
+ if len(mismatches) > maxMismatches:
+ break
+ else:
+ loc = match_stringloc + 1
+ results = ParseResults([instring[start:loc]])
+ results['original'] = self.match_string
+ results['mismatches'] = mismatches
+ return loc, results
+
+ raise ParseException(instring, loc, self.errmsg, self)
+
+
+class Word(Token):
+ """
+ Token for matching words composed of allowed character sets.
+ Defined with string containing all allowed initial characters,
+ an optional string containing allowed body characters (if omitted,
+ defaults to the initial character set), and an optional minimum,
+ maximum, and/or exact length. The default value for C{min} is 1 (a
+ minimum value < 1 is not valid); the default values for C{max} and C{exact}
+ are 0, meaning no maximum or exact length restriction. An optional
+ C{excludeChars} parameter can list characters that might be found in
+ the input C{bodyChars} string; useful to define a word of all printables
+ except for one or two characters, for instance.
+
+ L{srange} is useful for defining custom character set strings for defining
+ C{Word} expressions, using range notation from regular expression character sets.
+
+ A common mistake is to use C{Word} to match a specific literal string, as in
+ C{Word("Address")}. Remember that C{Word} uses the string argument to define
+ I{sets} of matchable characters. This expression would match "Add", "AAA",
+ "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
+ To match an exact literal string, use L{Literal} or L{Keyword}.
+
+ pyparsing includes helper strings for building Words:
+ - L{alphas}
+ - L{nums}
+ - L{alphanums}
+ - L{hexnums}
+ - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
+ - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
+ - L{printables} (any non-whitespace character)
+
+ Example::
+ # a word composed of digits
+ integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
+
+ # a word with a leading capital, and zero or more lowercase
+ capital_word = Word(alphas.upper(), alphas.lower())
+
+ # hostnames are alphanumeric, with leading alpha, and '-'
+ hostname = Word(alphas, alphanums+'-')
+
+ # roman numeral (not a strict parser, accepts invalid mix of characters)
+ roman = Word("IVXLCDM")
+
+ # any string of non-whitespace characters, except for ','
+ csv_value = Word(printables, excludeChars=",")
+ """
+ def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
+ super(Word,self).__init__()
+ if excludeChars:
+ initChars = ''.join(c for c in initChars if c not in excludeChars)
+ if bodyChars:
+ bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
+ self.initCharsOrig = initChars
+ self.initChars = set(initChars)
+ if bodyChars :
+ self.bodyCharsOrig = bodyChars
+ self.bodyChars = set(bodyChars)
+ else:
+ self.bodyCharsOrig = initChars
+ self.bodyChars = set(initChars)
+
+ self.maxSpecified = max > 0
+
+ if min < 1:
+ raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
+
+ self.minLen = min
+
+ if max > 0:
+ self.maxLen = max
+ else:
+ self.maxLen = _MAX_INT
+
+ if exact > 0:
+ self.maxLen = exact
+ self.minLen = exact
+
+ self.name = _ustr(self)
+ self.errmsg = "Expected " + self.name
+ self.mayIndexError = False
+ self.asKeyword = asKeyword
+
+ if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
+ if self.bodyCharsOrig == self.initCharsOrig:
+ self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
+ elif len(self.initCharsOrig) == 1:
+ self.reString = "%s[%s]*" % \
+ (re.escape(self.initCharsOrig),
+ _escapeRegexRangeChars(self.bodyCharsOrig),)
+ else:
+ self.reString = "[%s][%s]*" % \
+ (_escapeRegexRangeChars(self.initCharsOrig),
+ _escapeRegexRangeChars(self.bodyCharsOrig),)
+ if self.asKeyword:
+ self.reString = r"\b"+self.reString+r"\b"
+ try:
+ self.re = re.compile( self.reString )
+ except Exception:
+ self.re = None
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if self.re:
+ result = self.re.match(instring,loc)
+ if not result:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ loc = result.end()
+ return loc, result.group()
+
+ if not(instring[ loc ] in self.initChars):
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ start = loc
+ loc += 1
+ instrlen = len(instring)
+ bodychars = self.bodyChars
+ maxloc = start + self.maxLen
+ maxloc = min( maxloc, instrlen )
+ while loc < maxloc and instring[loc] in bodychars:
+ loc += 1
+
+ throwException = False
+ if loc - start < self.minLen:
+ throwException = True
+ if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
+ throwException = True
+ if self.asKeyword:
+ if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
+ throwException = True
+
+ if throwException:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ return loc, instring[start:loc]
+
+ def __str__( self ):
+ try:
+ return super(Word,self).__str__()
+ except Exception:
+ pass
+
+
+ if self.strRepr is None:
+
+ def charsAsStr(s):
+ if len(s)>4:
+ return s[:4]+"..."
+ else:
+ return s
+
+ if ( self.initCharsOrig != self.bodyCharsOrig ):
+ self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
+ else:
+ self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
+
+ return self.strRepr
+
+
+class Regex(Token):
+ r"""
+ Token for matching strings that match a given regular expression.
+ Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
+ If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as
+ named parse results.
+
+ Example::
+ realnum = Regex(r"[+-]?\d+\.\d*")
+ date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
+ # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
+ roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
+ """
+ compiledREtype = type(re.compile("[A-Z]"))
+ def __init__( self, pattern, flags=0):
+ """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
+ super(Regex,self).__init__()
+
+ if isinstance(pattern, basestring):
+ if not pattern:
+ warnings.warn("null string passed to Regex; use Empty() instead",
+ SyntaxWarning, stacklevel=2)
+
+ self.pattern = pattern
+ self.flags = flags
+
+ try:
+ self.re = re.compile(self.pattern, self.flags)
+ self.reString = self.pattern
+ except sre_constants.error:
+ warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
+ SyntaxWarning, stacklevel=2)
+ raise
+
+ elif isinstance(pattern, Regex.compiledREtype):
+ self.re = pattern
+ self.pattern = \
+ self.reString = str(pattern)
+ self.flags = flags
+
+ else:
+ raise ValueError("Regex may only be constructed with a string or a compiled RE object")
+
+ self.name = _ustr(self)
+ self.errmsg = "Expected " + self.name
+ self.mayIndexError = False
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ result = self.re.match(instring,loc)
+ if not result:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ loc = result.end()
+ d = result.groupdict()
+ ret = ParseResults(result.group())
+ if d:
+ for k in d:
+ ret[k] = d[k]
+ return loc,ret
+
+ def __str__( self ):
+ try:
+ return super(Regex,self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None:
+ self.strRepr = "Re:(%s)" % repr(self.pattern)
+
+ return self.strRepr
+
+
+class QuotedString(Token):
+ r"""
+ Token for matching strings that are delimited by quoting characters.
+
+ Defined with the following parameters:
+ - quoteChar - string of one or more characters defining the quote delimiting string
+ - escChar - character to escape quotes, typically backslash (default=C{None})
+ - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
+ - multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
+ - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
+ - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
+ - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
+
+ Example::
+ qs = QuotedString('"')
+ print(qs.searchString('lsjdf "This is the quote" sldjf'))
+ complex_qs = QuotedString('{{', endQuoteChar='}}')
+ print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
+ sql_qs = QuotedString('"', escQuote='""')
+ print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
+ prints::
+ [['This is the quote']]
+ [['This is the "quote"']]
+ [['This is the quote with "embedded" quotes']]
+ """
+ def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
+ super(QuotedString,self).__init__()
+
+ # remove white space from quote chars - wont work anyway
+ quoteChar = quoteChar.strip()
+ if not quoteChar:
+ warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
+ raise SyntaxError()
+
+ if endQuoteChar is None:
+ endQuoteChar = quoteChar
+ else:
+ endQuoteChar = endQuoteChar.strip()
+ if not endQuoteChar:
+ warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
+ raise SyntaxError()
+
+ self.quoteChar = quoteChar
+ self.quoteCharLen = len(quoteChar)
+ self.firstQuoteChar = quoteChar[0]
+ self.endQuoteChar = endQuoteChar
+ self.endQuoteCharLen = len(endQuoteChar)
+ self.escChar = escChar
+ self.escQuote = escQuote
+ self.unquoteResults = unquoteResults
+ self.convertWhitespaceEscapes = convertWhitespaceEscapes
+
+ if multiline:
+ self.flags = re.MULTILINE | re.DOTALL
+ self.pattern = r'%s(?:[^%s%s]' % \
+ ( re.escape(self.quoteChar),
+ _escapeRegexRangeChars(self.endQuoteChar[0]),
+ (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
+ else:
+ self.flags = 0
+ self.pattern = r'%s(?:[^%s\n\r%s]' % \
+ ( re.escape(self.quoteChar),
+ _escapeRegexRangeChars(self.endQuoteChar[0]),
+ (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
+ if len(self.endQuoteChar) > 1:
+ self.pattern += (
+ '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
+ _escapeRegexRangeChars(self.endQuoteChar[i]))
+ for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
+ )
+ if escQuote:
+ self.pattern += (r'|(?:%s)' % re.escape(escQuote))
+ if escChar:
+ self.pattern += (r'|(?:%s.)' % re.escape(escChar))
+ self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
+ self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
+
+ try:
+ self.re = re.compile(self.pattern, self.flags)
+ self.reString = self.pattern
+ except sre_constants.error:
+ warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
+ SyntaxWarning, stacklevel=2)
+ raise
+
+ self.name = _ustr(self)
+ self.errmsg = "Expected " + self.name
+ self.mayIndexError = False
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
+ if not result:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ loc = result.end()
+ ret = result.group()
+
+ if self.unquoteResults:
+
+ # strip off quotes
+ ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
+
+ if isinstance(ret,basestring):
+ # replace escaped whitespace
+ if '\\' in ret and self.convertWhitespaceEscapes:
+ ws_map = {
+ r'\t' : '\t',
+ r'\n' : '\n',
+ r'\f' : '\f',
+ r'\r' : '\r',
+ }
+ for wslit,wschar in ws_map.items():
+ ret = ret.replace(wslit, wschar)
+
+ # replace escaped characters
+ if self.escChar:
+ ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
+
+ # replace escaped quotes
+ if self.escQuote:
+ ret = ret.replace(self.escQuote, self.endQuoteChar)
+
+ return loc, ret
+
+ def __str__( self ):
+ try:
+ return super(QuotedString,self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None:
+ self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
+
+ return self.strRepr
+
+
+class CharsNotIn(Token):
+ """
+ Token for matching words composed of characters I{not} in a given set (will
+ include whitespace in matched characters if not listed in the provided exclusion set - see example).
+ Defined with string containing all disallowed characters, and an optional
+ minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
+ minimum value < 1 is not valid); the default values for C{max} and C{exact}
+ are 0, meaning no maximum or exact length restriction.
+
+ Example::
+ # define a comma-separated-value as anything that is not a ','
+ csv_value = CharsNotIn(',')
+ print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
+ prints::
+ ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
+ """
+ def __init__( self, notChars, min=1, max=0, exact=0 ):
+ super(CharsNotIn,self).__init__()
+ self.skipWhitespace = False
+ self.notChars = notChars
+
+ if min < 1:
+ raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
+
+ self.minLen = min
+
+ if max > 0:
+ self.maxLen = max
+ else:
+ self.maxLen = _MAX_INT
+
+ if exact > 0:
+ self.maxLen = exact
+ self.minLen = exact
+
+ self.name = _ustr(self)
+ self.errmsg = "Expected " + self.name
+ self.mayReturnEmpty = ( self.minLen == 0 )
+ self.mayIndexError = False
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if instring[loc] in self.notChars:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ start = loc
+ loc += 1
+ notchars = self.notChars
+ maxlen = min( start+self.maxLen, len(instring) )
+ while loc < maxlen and \
+ (instring[loc] not in notchars):
+ loc += 1
+
+ if loc - start < self.minLen:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ return loc, instring[start:loc]
+
+ def __str__( self ):
+ try:
+ return super(CharsNotIn, self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None:
+ if len(self.notChars) > 4:
+ self.strRepr = "!W:(%s...)" % self.notChars[:4]
+ else:
+ self.strRepr = "!W:(%s)" % self.notChars
+
+ return self.strRepr
+
+class White(Token):
+ """
+ Special matching class for matching whitespace. Normally, whitespace is ignored
+ by pyparsing grammars. This class is included when some whitespace structures
+ are significant. Define with a string containing the whitespace characters to be
+ matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
+ as defined for the C{L{Word}} class.
+ """
+ whiteStrs = {
+ " " : "<SPC>",
+ "\t": "<TAB>",
+ "\n": "<LF>",
+ "\r": "<CR>",
+ "\f": "<FF>",
+ }
+ def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
+ super(White,self).__init__()
+ self.matchWhite = ws
+ self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
+ #~ self.leaveWhitespace()
+ self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
+ self.mayReturnEmpty = True
+ self.errmsg = "Expected " + self.name
+
+ self.minLen = min
+
+ if max > 0:
+ self.maxLen = max
+ else:
+ self.maxLen = _MAX_INT
+
+ if exact > 0:
+ self.maxLen = exact
+ self.minLen = exact
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if not(instring[ loc ] in self.matchWhite):
+ raise ParseException(instring, loc, self.errmsg, self)
+ start = loc
+ loc += 1
+ maxloc = start + self.maxLen
+ maxloc = min( maxloc, len(instring) )
+ while loc < maxloc and instring[loc] in self.matchWhite:
+ loc += 1
+
+ if loc - start < self.minLen:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ return loc, instring[start:loc]
+
+
+class _PositionToken(Token):
+ def __init__( self ):
+ super(_PositionToken,self).__init__()
+ self.name=self.__class__.__name__
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+
+class GoToColumn(_PositionToken):
+ """
+ Token to advance to a specific column of input text; useful for tabular report scraping.
+ """
+ def __init__( self, colno ):
+ super(GoToColumn,self).__init__()
+ self.col = colno
+
+ def preParse( self, instring, loc ):
+ if col(loc,instring) != self.col:
+ instrlen = len(instring)
+ if self.ignoreExprs:
+ loc = self._skipIgnorables( instring, loc )
+ while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
+ loc += 1
+ return loc
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ thiscol = col( loc, instring )
+ if thiscol > self.col:
+ raise ParseException( instring, loc, "Text not in expected column", self )
+ newloc = loc + self.col - thiscol
+ ret = instring[ loc: newloc ]
+ return newloc, ret
+
+
+class LineStart(_PositionToken):
+ """
+ Matches if current position is at the beginning of a line within the parse string
+
+ Example::
+
+ test = '''\
+ AAA this line
+ AAA and this line
+ AAA but not this one
+ B AAA and definitely not this one
+ '''
+
+ for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
+ print(t)
+
+ Prints::
+ ['AAA', ' this line']
+ ['AAA', ' and this line']
+
+ """
+ def __init__( self ):
+ super(LineStart,self).__init__()
+ self.errmsg = "Expected start of line"
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if col(loc, instring) == 1:
+ return loc, []
+ raise ParseException(instring, loc, self.errmsg, self)
+
+class LineEnd(_PositionToken):
+ """
+ Matches if current position is at the end of a line within the parse string
+ """
+ def __init__( self ):
+ super(LineEnd,self).__init__()
+ self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
+ self.errmsg = "Expected end of line"
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if loc<len(instring):
+ if instring[loc] == "\n":
+ return loc+1, "\n"
+ else:
+ raise ParseException(instring, loc, self.errmsg, self)
+ elif loc == len(instring):
+ return loc+1, []
+ else:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+class StringStart(_PositionToken):
+ """
+ Matches if current position is at the beginning of the parse string
+ """
+ def __init__( self ):
+ super(StringStart,self).__init__()
+ self.errmsg = "Expected start of text"
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if loc != 0:
+ # see if entire string up to here is just whitespace and ignoreables
+ if loc != self.preParse( instring, 0 ):
+ raise ParseException(instring, loc, self.errmsg, self)
+ return loc, []
+
+class StringEnd(_PositionToken):
+ """
+ Matches if current position is at the end of the parse string
+ """
+ def __init__( self ):
+ super(StringEnd,self).__init__()
+ self.errmsg = "Expected end of text"
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if loc < len(instring):
+ raise ParseException(instring, loc, self.errmsg, self)
+ elif loc == len(instring):
+ return loc+1, []
+ elif loc > len(instring):
+ return loc, []
+ else:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+class WordStart(_PositionToken):
+ """
+ Matches if the current position is at the beginning of a Word, and
+ is not preceded by any character in a given set of C{wordChars}
+ (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
+ use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
+ the string being parsed, or at the beginning of a line.
+ """
+ def __init__(self, wordChars = printables):
+ super(WordStart,self).__init__()
+ self.wordChars = set(wordChars)
+ self.errmsg = "Not at the start of a word"
+
+ def parseImpl(self, instring, loc, doActions=True ):
+ if loc != 0:
+ if (instring[loc-1] in self.wordChars or
+ instring[loc] not in self.wordChars):
+ raise ParseException(instring, loc, self.errmsg, self)
+ return loc, []
+
+class WordEnd(_PositionToken):
+ """
+ Matches if the current position is at the end of a Word, and
+ is not followed by any character in a given set of C{wordChars}
+ (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
+ use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
+ the string being parsed, or at the end of a line.
+ """
+ def __init__(self, wordChars = printables):
+ super(WordEnd,self).__init__()
+ self.wordChars = set(wordChars)
+ self.skipWhitespace = False
+ self.errmsg = "Not at the end of a word"
+
+ def parseImpl(self, instring, loc, doActions=True ):
+ instrlen = len(instring)
+ if instrlen>0 and loc<instrlen:
+ if (instring[loc] in self.wordChars or
+ instring[loc-1] not in self.wordChars):
+ raise ParseException(instring, loc, self.errmsg, self)
+ return loc, []
+
+
+class ParseExpression(ParserElement):
+ """
+ Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
+ """
+ def __init__( self, exprs, savelist = False ):
+ super(ParseExpression,self).__init__(savelist)
+ if isinstance( exprs, _generatorType ):
+ exprs = list(exprs)
+
+ if isinstance( exprs, basestring ):
+ self.exprs = [ ParserElement._literalStringClass( exprs ) ]
+ elif isinstance( exprs, Iterable ):
+ exprs = list(exprs)
+ # if sequence of strings provided, wrap with Literal
+ if all(isinstance(expr, basestring) for expr in exprs):
+ exprs = map(ParserElement._literalStringClass, exprs)
+ self.exprs = list(exprs)
+ else:
+ try:
+ self.exprs = list( exprs )
+ except TypeError:
+ self.exprs = [ exprs ]
+ self.callPreparse = False
+
+ def __getitem__( self, i ):
+ return self.exprs[i]
+
+ def append( self, other ):
+ self.exprs.append( other )
+ self.strRepr = None
+ return self
+
+ def leaveWhitespace( self ):
+ """Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
+ all contained expressions."""
+ self.skipWhitespace = False
+ self.exprs = [ e.copy() for e in self.exprs ]
+ for e in self.exprs:
+ e.leaveWhitespace()
+ return self
+
+ def ignore( self, other ):
+ if isinstance( other, Suppress ):
+ if other not in self.ignoreExprs:
+ super( ParseExpression, self).ignore( other )
+ for e in self.exprs:
+ e.ignore( self.ignoreExprs[-1] )
+ else:
+ super( ParseExpression, self).ignore( other )
+ for e in self.exprs:
+ e.ignore( self.ignoreExprs[-1] )
+ return self
+
+ def __str__( self ):
+ try:
+ return super(ParseExpression,self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None:
+ self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
+ return self.strRepr
+
+ def streamline( self ):
+ super(ParseExpression,self).streamline()
+
+ for e in self.exprs:
+ e.streamline()
+
+ # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
+ # but only if there are no parse actions or resultsNames on the nested And's
+ # (likewise for Or's and MatchFirst's)
+ if ( len(self.exprs) == 2 ):
+ other = self.exprs[0]
+ if ( isinstance( other, self.__class__ ) and
+ not(other.parseAction) and
+ other.resultsName is None and
+ not other.debug ):
+ self.exprs = other.exprs[:] + [ self.exprs[1] ]
+ self.strRepr = None
+ self.mayReturnEmpty |= other.mayReturnEmpty
+ self.mayIndexError |= other.mayIndexError
+
+ other = self.exprs[-1]
+ if ( isinstance( other, self.__class__ ) and
+ not(other.parseAction) and
+ other.resultsName is None and
+ not other.debug ):
+ self.exprs = self.exprs[:-1] + other.exprs[:]
+ self.strRepr = None
+ self.mayReturnEmpty |= other.mayReturnEmpty
+ self.mayIndexError |= other.mayIndexError
+
+ self.errmsg = "Expected " + _ustr(self)
+
+ return self
+
+ def setResultsName( self, name, listAllMatches=False ):
+ ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
+ return ret
+
+ def validate( self, validateTrace=[] ):
+ tmp = validateTrace[:]+[self]
+ for e in self.exprs:
+ e.validate(tmp)
+ self.checkRecursion( [] )
+
+ def copy(self):
+ ret = super(ParseExpression,self).copy()
+ ret.exprs = [e.copy() for e in self.exprs]
+ return ret
+
+class And(ParseExpression):
+ """
+ Requires all given C{ParseExpression}s to be found in the given order.
+ Expressions may be separated by whitespace.
+ May be constructed using the C{'+'} operator.
+ May also be constructed using the C{'-'} operator, which will suppress backtracking.
+
+ Example::
+ integer = Word(nums)
+ name_expr = OneOrMore(Word(alphas))
+
+ expr = And([integer("id"),name_expr("name"),integer("age")])
+ # more easily written as:
+ expr = integer("id") + name_expr("name") + integer("age")
+ """
+
+ class _ErrorStop(Empty):
+ def __init__(self, *args, **kwargs):
+ super(And._ErrorStop,self).__init__(*args, **kwargs)
+ self.name = '-'
+ self.leaveWhitespace()
+
+ def __init__( self, exprs, savelist = True ):
+ super(And,self).__init__(exprs, savelist)
+ self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+ self.setWhitespaceChars( self.exprs[0].whiteChars )
+ self.skipWhitespace = self.exprs[0].skipWhitespace
+ self.callPreparse = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ # pass False as last arg to _parse for first element, since we already
+ # pre-parsed the string as part of our And pre-parsing
+ loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
+ errorStop = False
+ for e in self.exprs[1:]:
+ if isinstance(e, And._ErrorStop):
+ errorStop = True
+ continue
+ if errorStop:
+ try:
+ loc, exprtokens = e._parse( instring, loc, doActions )
+ except ParseSyntaxException:
+ raise
+ except ParseBaseException as pe:
+ pe.__traceback__ = None
+ raise ParseSyntaxException._from_exception(pe)
+ except IndexError:
+ raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
+ else:
+ loc, exprtokens = e._parse( instring, loc, doActions )
+ if exprtokens or exprtokens.haskeys():
+ resultlist += exprtokens
+ return loc, resultlist
+
+ def __iadd__(self, other ):
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ return self.append( other ) #And( [ self, other ] )
+
+ def checkRecursion( self, parseElementList ):
+ subRecCheckList = parseElementList[:] + [ self ]
+ for e in self.exprs:
+ e.checkRecursion( subRecCheckList )
+ if not e.mayReturnEmpty:
+ break
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
+
+ return self.strRepr
+
+
+class Or(ParseExpression):
+ """
+ Requires that at least one C{ParseExpression} is found.
+ If two expressions match, the expression that matches the longest string will be used.
+ May be constructed using the C{'^'} operator.
+
+ Example::
+ # construct Or using '^' operator
+
+ number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
+ print(number.searchString("123 3.1416 789"))
+ prints::
+ [['123'], ['3.1416'], ['789']]
+ """
+ def __init__( self, exprs, savelist = False ):
+ super(Or,self).__init__(exprs, savelist)
+ if self.exprs:
+ self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
+ else:
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ maxExcLoc = -1
+ maxException = None
+ matches = []
+ for e in self.exprs:
+ try:
+ loc2 = e.tryParse( instring, loc )
+ except ParseException as err:
+ err.__traceback__ = None
+ if err.loc > maxExcLoc:
+ maxException = err
+ maxExcLoc = err.loc
+ except IndexError:
+ if len(instring) > maxExcLoc:
+ maxException = ParseException(instring,len(instring),e.errmsg,self)
+ maxExcLoc = len(instring)
+ else:
+ # save match among all matches, to retry longest to shortest
+ matches.append((loc2, e))
+
+ if matches:
+ matches.sort(key=lambda x: -x[0])
+ for _,e in matches:
+ try:
+ return e._parse( instring, loc, doActions )
+ except ParseException as err:
+ err.__traceback__ = None
+ if err.loc > maxExcLoc:
+ maxException = err
+ maxExcLoc = err.loc
+
+ if maxException is not None:
+ maxException.msg = self.errmsg
+ raise maxException
+ else:
+ raise ParseException(instring, loc, "no defined alternatives to match", self)
+
+
+ def __ixor__(self, other ):
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ return self.append( other ) #Or( [ self, other ] )
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
+
+ return self.strRepr
+
+ def checkRecursion( self, parseElementList ):
+ subRecCheckList = parseElementList[:] + [ self ]
+ for e in self.exprs:
+ e.checkRecursion( subRecCheckList )
+
+
+class MatchFirst(ParseExpression):
+ """
+ Requires that at least one C{ParseExpression} is found.
+ If two expressions match, the first one listed is the one that will match.
+ May be constructed using the C{'|'} operator.
+
+ Example::
+ # construct MatchFirst using '|' operator
+
+ # watch the order of expressions to match
+ number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
+ print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
+
+ # put more selective expression first
+ number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
+ print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
+ """
+ def __init__( self, exprs, savelist = False ):
+ super(MatchFirst,self).__init__(exprs, savelist)
+ if self.exprs:
+ self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
+ else:
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ maxExcLoc = -1
+ maxException = None
+ for e in self.exprs:
+ try:
+ ret = e._parse( instring, loc, doActions )
+ return ret
+ except ParseException as err:
+ if err.loc > maxExcLoc:
+ maxException = err
+ maxExcLoc = err.loc
+ except IndexError:
+ if len(instring) > maxExcLoc:
+ maxException = ParseException(instring,len(instring),e.errmsg,self)
+ maxExcLoc = len(instring)
+
+ # only got here if no expression matched, raise exception for match that made it the furthest
+ else:
+ if maxException is not None:
+ maxException.msg = self.errmsg
+ raise maxException
+ else:
+ raise ParseException(instring, loc, "no defined alternatives to match", self)
+
+ def __ior__(self, other ):
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ return self.append( other ) #MatchFirst( [ self, other ] )
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
+
+ return self.strRepr
+
+ def checkRecursion( self, parseElementList ):
+ subRecCheckList = parseElementList[:] + [ self ]
+ for e in self.exprs:
+ e.checkRecursion( subRecCheckList )
+
+
+class Each(ParseExpression):
+ """
+ Requires all given C{ParseExpression}s to be found, but in any order.
+ Expressions may be separated by whitespace.
+ May be constructed using the C{'&'} operator.
+
+ Example::
+ color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
+ shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
+ integer = Word(nums)
+ shape_attr = "shape:" + shape_type("shape")
+ posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
+ color_attr = "color:" + color("color")
+ size_attr = "size:" + integer("size")
+
+ # use Each (using operator '&') to accept attributes in any order
+ # (shape and posn are required, color and size are optional)
+ shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
+
+ shape_spec.runTests('''
+ shape: SQUARE color: BLACK posn: 100, 120
+ shape: CIRCLE size: 50 color: BLUE posn: 50,80
+ color:GREEN size:20 shape:TRIANGLE posn:20,40
+ '''
+ )
+ prints::
+ shape: SQUARE color: BLACK posn: 100, 120
+ ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
+ - color: BLACK
+ - posn: ['100', ',', '120']
+ - x: 100
+ - y: 120
+ - shape: SQUARE
+
+
+ shape: CIRCLE size: 50 color: BLUE posn: 50,80
+ ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
+ - color: BLUE
+ - posn: ['50', ',', '80']
+ - x: 50
+ - y: 80
+ - shape: CIRCLE
+ - size: 50
+
+
+ color: GREEN size: 20 shape: TRIANGLE posn: 20,40
+ ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
+ - color: GREEN
+ - posn: ['20', ',', '40']
+ - x: 20
+ - y: 40
+ - shape: TRIANGLE
+ - size: 20
+ """
+ def __init__( self, exprs, savelist = True ):
+ super(Each,self).__init__(exprs, savelist)
+ self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+ self.skipWhitespace = True
+ self.initExprGroups = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if self.initExprGroups:
+ self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
+ opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
+ opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
+ self.optionals = opt1 + opt2
+ self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
+ self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
+ self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
+ self.required += self.multirequired
+ self.initExprGroups = False
+ tmpLoc = loc
+ tmpReqd = self.required[:]
+ tmpOpt = self.optionals[:]
+ matchOrder = []
+
+ keepMatching = True
+ while keepMatching:
+ tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
+ failed = []
+ for e in tmpExprs:
+ try:
+ tmpLoc = e.tryParse( instring, tmpLoc )
+ except ParseException:
+ failed.append(e)
+ else:
+ matchOrder.append(self.opt1map.get(id(e),e))
+ if e in tmpReqd:
+ tmpReqd.remove(e)
+ elif e in tmpOpt:
+ tmpOpt.remove(e)
+ if len(failed) == len(tmpExprs):
+ keepMatching = False
+
+ if tmpReqd:
+ missing = ", ".join(_ustr(e) for e in tmpReqd)
+ raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
+
+ # add any unmatched Optionals, in case they have default values defined
+ matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
+
+ resultlist = []
+ for e in matchOrder:
+ loc,results = e._parse(instring,loc,doActions)
+ resultlist.append(results)
+
+ finalResults = sum(resultlist, ParseResults([]))
+ return loc, finalResults
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
+
+ return self.strRepr
+
+ def checkRecursion( self, parseElementList ):
+ subRecCheckList = parseElementList[:] + [ self ]
+ for e in self.exprs:
+ e.checkRecursion( subRecCheckList )
+
+
+class ParseElementEnhance(ParserElement):
+ """
+ Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
+ """
+ def __init__( self, expr, savelist=False ):
+ super(ParseElementEnhance,self).__init__(savelist)
+ if isinstance( expr, basestring ):
+ if issubclass(ParserElement._literalStringClass, Token):
+ expr = ParserElement._literalStringClass(expr)
+ else:
+ expr = ParserElement._literalStringClass(Literal(expr))
+ self.expr = expr
+ self.strRepr = None
+ if expr is not None:
+ self.mayIndexError = expr.mayIndexError
+ self.mayReturnEmpty = expr.mayReturnEmpty
+ self.setWhitespaceChars( expr.whiteChars )
+ self.skipWhitespace = expr.skipWhitespace
+ self.saveAsList = expr.saveAsList
+ self.callPreparse = expr.callPreparse
+ self.ignoreExprs.extend(expr.ignoreExprs)
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if self.expr is not None:
+ return self.expr._parse( instring, loc, doActions, callPreParse=False )
+ else:
+ raise ParseException("",loc,self.errmsg,self)
+
+ def leaveWhitespace( self ):
+ self.skipWhitespace = False
+ self.expr = self.expr.copy()
+ if self.expr is not None:
+ self.expr.leaveWhitespace()
+ return self
+
+ def ignore( self, other ):
+ if isinstance( other, Suppress ):
+ if other not in self.ignoreExprs:
+ super( ParseElementEnhance, self).ignore( other )
+ if self.expr is not None:
+ self.expr.ignore( self.ignoreExprs[-1] )
+ else:
+ super( ParseElementEnhance, self).ignore( other )
+ if self.expr is not None:
+ self.expr.ignore( self.ignoreExprs[-1] )
+ return self
+
+ def streamline( self ):
+ super(ParseElementEnhance,self).streamline()
+ if self.expr is not None:
+ self.expr.streamline()
+ return self
+
+ def checkRecursion( self, parseElementList ):
+ if self in parseElementList:
+ raise RecursiveGrammarException( parseElementList+[self] )
+ subRecCheckList = parseElementList[:] + [ self ]
+ if self.expr is not None:
+ self.expr.checkRecursion( subRecCheckList )
+
+ def validate( self, validateTrace=[] ):
+ tmp = validateTrace[:]+[self]
+ if self.expr is not None:
+ self.expr.validate(tmp)
+ self.checkRecursion( [] )
+
+ def __str__( self ):
+ try:
+ return super(ParseElementEnhance,self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None and self.expr is not None:
+ self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
+ return self.strRepr
+
+
+class FollowedBy(ParseElementEnhance):
+ """
+ Lookahead matching of the given parse expression. C{FollowedBy}
+ does I{not} advance the parsing position within the input string, it only
+ verifies that the specified parse expression matches at the current
+ position. C{FollowedBy} always returns a null token list.
+
+ Example::
+ # use FollowedBy to match a label only if it is followed by a ':'
+ data_word = Word(alphas)
+ label = data_word + FollowedBy(':')
+ attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+
+ OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
+ prints::
+ [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
+ """
+ def __init__( self, expr ):
+ super(FollowedBy,self).__init__(expr)
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ self.expr.tryParse( instring, loc )
+ return loc, []
+
+
+class NotAny(ParseElementEnhance):
+ """
+ Lookahead to disallow matching with the given parse expression. C{NotAny}
+ does I{not} advance the parsing position within the input string, it only
+ verifies that the specified parse expression does I{not} match at the current
+ position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
+ always returns a null token list. May be constructed using the '~' operator.
+
+ Example::
+
+ """
+ def __init__( self, expr ):
+ super(NotAny,self).__init__(expr)
+ #~ self.leaveWhitespace()
+ self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
+ self.mayReturnEmpty = True
+ self.errmsg = "Found unwanted token, "+_ustr(self.expr)
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if self.expr.canParseNext(instring, loc):
+ raise ParseException(instring, loc, self.errmsg, self)
+ return loc, []
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "~{" + _ustr(self.expr) + "}"
+
+ return self.strRepr
+
+class _MultipleMatch(ParseElementEnhance):
+ def __init__( self, expr, stopOn=None):
+ super(_MultipleMatch, self).__init__(expr)
+ self.saveAsList = True
+ ender = stopOn
+ if isinstance(ender, basestring):
+ ender = ParserElement._literalStringClass(ender)
+ self.not_ender = ~ender if ender is not None else None
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ self_expr_parse = self.expr._parse
+ self_skip_ignorables = self._skipIgnorables
+ check_ender = self.not_ender is not None
+ if check_ender:
+ try_not_ender = self.not_ender.tryParse
+
+ # must be at least one (but first see if we are the stopOn sentinel;
+ # if so, fail)
+ if check_ender:
+ try_not_ender(instring, loc)
+ loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
+ try:
+ hasIgnoreExprs = (not not self.ignoreExprs)
+ while 1:
+ if check_ender:
+ try_not_ender(instring, loc)
+ if hasIgnoreExprs:
+ preloc = self_skip_ignorables( instring, loc )
+ else:
+ preloc = loc
+ loc, tmptokens = self_expr_parse( instring, preloc, doActions )
+ if tmptokens or tmptokens.haskeys():
+ tokens += tmptokens
+ except (ParseException,IndexError):
+ pass
+
+ return loc, tokens
+
+class OneOrMore(_MultipleMatch):
+ """
+ Repetition of one or more of the given expression.
+
+ Parameters:
+ - expr - expression that must match one or more times
+ - stopOn - (default=C{None}) - expression for a terminating sentinel
+ (only required if the sentinel would ordinarily match the repetition
+ expression)
+
+ Example::
+ data_word = Word(alphas)
+ label = data_word + FollowedBy(':')
+ attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
+
+ text = "shape: SQUARE posn: upper left color: BLACK"
+ OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
+
+ # use stopOn attribute for OneOrMore to avoid reading label string as part of the data
+ attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+ OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
+
+ # could also be written as
+ (attr_expr * (1,)).parseString(text).pprint()
+ """
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "{" + _ustr(self.expr) + "}..."
+
+ return self.strRepr
+
+class ZeroOrMore(_MultipleMatch):
+ """
+ Optional repetition of zero or more of the given expression.
+
+ Parameters:
+ - expr - expression that must match zero or more times
+ - stopOn - (default=C{None}) - expression for a terminating sentinel
+ (only required if the sentinel would ordinarily match the repetition
+ expression)
+
+ Example: similar to L{OneOrMore}
+ """
+ def __init__( self, expr, stopOn=None):
+ super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ try:
+ return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
+ except (ParseException,IndexError):
+ return loc, []
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "[" + _ustr(self.expr) + "]..."
+
+ return self.strRepr
+
+class _NullToken(object):
+ def __bool__(self):
+ return False
+ __nonzero__ = __bool__
+ def __str__(self):
+ return ""
+
+_optionalNotMatched = _NullToken()
+class Optional(ParseElementEnhance):
+ """
+ Optional matching of the given expression.
+
+ Parameters:
+ - expr - expression that must match zero or more times
+ - default (optional) - value to be returned if the optional expression is not found.
+
+ Example::
+ # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
+ zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
+ zip.runTests('''
+ # traditional ZIP code
+ 12345
+
+ # ZIP+4 form
+ 12101-0001
+
+ # invalid ZIP
+ 98765-
+ ''')
+ prints::
+ # traditional ZIP code
+ 12345
+ ['12345']
+
+ # ZIP+4 form
+ 12101-0001
+ ['12101-0001']
+
+ # invalid ZIP
+ 98765-
+ ^
+ FAIL: Expected end of text (at char 5), (line:1, col:6)
+ """
+ def __init__( self, expr, default=_optionalNotMatched ):
+ super(Optional,self).__init__( expr, savelist=False )
+ self.saveAsList = self.expr.saveAsList
+ self.defaultValue = default
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ try:
+ loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
+ except (ParseException,IndexError):
+ if self.defaultValue is not _optionalNotMatched:
+ if self.expr.resultsName:
+ tokens = ParseResults([ self.defaultValue ])
+ tokens[self.expr.resultsName] = self.defaultValue
+ else:
+ tokens = [ self.defaultValue ]
+ else:
+ tokens = []
+ return loc, tokens
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "[" + _ustr(self.expr) + "]"
+
+ return self.strRepr
+
+class SkipTo(ParseElementEnhance):
+ """
+ Token for skipping over all undefined text until the matched expression is found.
+
+ Parameters:
+ - expr - target expression marking the end of the data to be skipped
+ - include - (default=C{False}) if True, the target expression is also parsed
+ (the skipped text and target expression are returned as a 2-element list).
+ - ignore - (default=C{None}) used to define grammars (typically quoted strings and
+ comments) that might contain false matches to the target expression
+ - failOn - (default=C{None}) define expressions that are not allowed to be
+ included in the skipped test; if found before the target expression is found,
+ the SkipTo is not a match
+
+ Example::
+ report = '''
+ Outstanding Issues Report - 1 Jan 2000
+
+ # | Severity | Description | Days Open
+ -----+----------+-------------------------------------------+-----------
+ 101 | Critical | Intermittent system crash | 6
+ 94 | Cosmetic | Spelling error on Login ('log|n') | 14
+ 79 | Minor | System slow when running too many reports | 47
+ '''
+ integer = Word(nums)
+ SEP = Suppress('|')
+ # use SkipTo to simply match everything up until the next SEP
+ # - ignore quoted strings, so that a '|' character inside a quoted string does not match
+ # - parse action will call token.strip() for each matched token, i.e., the description body
+ string_data = SkipTo(SEP, ignore=quotedString)
+ string_data.setParseAction(tokenMap(str.strip))
+ ticket_expr = (integer("issue_num") + SEP
+ + string_data("sev") + SEP
+ + string_data("desc") + SEP
+ + integer("days_open"))
+
+ for tkt in ticket_expr.searchString(report):
+ print tkt.dump()
+ prints::
+ ['101', 'Critical', 'Intermittent system crash', '6']
+ - days_open: 6
+ - desc: Intermittent system crash
+ - issue_num: 101
+ - sev: Critical
+ ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
+ - days_open: 14
+ - desc: Spelling error on Login ('log|n')
+ - issue_num: 94
+ - sev: Cosmetic
+ ['79', 'Minor', 'System slow when running too many reports', '47']
+ - days_open: 47
+ - desc: System slow when running too many reports
+ - issue_num: 79
+ - sev: Minor
+ """
+ def __init__( self, other, include=False, ignore=None, failOn=None ):
+ super( SkipTo, self ).__init__( other )
+ self.ignoreExpr = ignore
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+ self.includeMatch = include
+ self.asList = False
+ if isinstance(failOn, basestring):
+ self.failOn = ParserElement._literalStringClass(failOn)
+ else:
+ self.failOn = failOn
+ self.errmsg = "No match found for "+_ustr(self.expr)
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ startloc = loc
+ instrlen = len(instring)
+ expr = self.expr
+ expr_parse = self.expr._parse
+ self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
+ self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
+
+ tmploc = loc
+ while tmploc <= instrlen:
+ if self_failOn_canParseNext is not None:
+ # break if failOn expression matches
+ if self_failOn_canParseNext(instring, tmploc):
+ break
+
+ if self_ignoreExpr_tryParse is not None:
+ # advance past ignore expressions
+ while 1:
+ try:
+ tmploc = self_ignoreExpr_tryParse(instring, tmploc)
+ except ParseBaseException:
+ break
+
+ try:
+ expr_parse(instring, tmploc, doActions=False, callPreParse=False)
+ except (ParseException, IndexError):
+ # no match, advance loc in string
+ tmploc += 1
+ else:
+ # matched skipto expr, done
+ break
+
+ else:
+ # ran off the end of the input string without matching skipto expr, fail
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ # build up return values
+ loc = tmploc
+ skiptext = instring[startloc:loc]
+ skipresult = ParseResults(skiptext)
+
+ if self.includeMatch:
+ loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
+ skipresult += mat
+
+ return loc, skipresult
+
+class Forward(ParseElementEnhance):
+ """
+ Forward declaration of an expression to be defined later -
+ used for recursive grammars, such as algebraic infix notation.
+ When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
+
+ Note: take care when assigning to C{Forward} not to overlook precedence of operators.
+ Specifically, '|' has a lower precedence than '<<', so that::
+ fwdExpr << a | b | c
+ will actually be evaluated as::
+ (fwdExpr << a) | b | c
+ thereby leaving b and c out as parseable alternatives. It is recommended that you
+ explicitly group the values inserted into the C{Forward}::
+ fwdExpr << (a | b | c)
+ Converting to use the '<<=' operator instead will avoid this problem.
+
+ See L{ParseResults.pprint} for an example of a recursive parser created using
+ C{Forward}.
+ """
+ def __init__( self, other=None ):
+ super(Forward,self).__init__( other, savelist=False )
+
+ def __lshift__( self, other ):
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass(other)
+ self.expr = other
+ self.strRepr = None
+ self.mayIndexError = self.expr.mayIndexError
+ self.mayReturnEmpty = self.expr.mayReturnEmpty
+ self.setWhitespaceChars( self.expr.whiteChars )
+ self.skipWhitespace = self.expr.skipWhitespace
+ self.saveAsList = self.expr.saveAsList
+ self.ignoreExprs.extend(self.expr.ignoreExprs)
+ return self
+
+ def __ilshift__(self, other):
+ return self << other
+
+ def leaveWhitespace( self ):
+ self.skipWhitespace = False
+ return self
+
+ def streamline( self ):
+ if not self.streamlined:
+ self.streamlined = True
+ if self.expr is not None:
+ self.expr.streamline()
+ return self
+
+ def validate( self, validateTrace=[] ):
+ if self not in validateTrace:
+ tmp = validateTrace[:]+[self]
+ if self.expr is not None:
+ self.expr.validate(tmp)
+ self.checkRecursion([])
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+ return self.__class__.__name__ + ": ..."
+
+ # stubbed out for now - creates awful memory and perf issues
+ self._revertClass = self.__class__
+ self.__class__ = _ForwardNoRecurse
+ try:
+ if self.expr is not None:
+ retString = _ustr(self.expr)
+ else:
+ retString = "None"
+ finally:
+ self.__class__ = self._revertClass
+ return self.__class__.__name__ + ": " + retString
+
+ def copy(self):
+ if self.expr is not None:
+ return super(Forward,self).copy()
+ else:
+ ret = Forward()
+ ret <<= self
+ return ret
+
+class _ForwardNoRecurse(Forward):
+ def __str__( self ):
+ return "..."
+
+class TokenConverter(ParseElementEnhance):
+ """
+ Abstract subclass of C{ParseExpression}, for converting parsed results.
+ """
+ def __init__( self, expr, savelist=False ):
+ super(TokenConverter,self).__init__( expr )#, savelist )
+ self.saveAsList = False
+
+class Combine(TokenConverter):
+ """
+ Converter to concatenate all matching tokens to a single string.
+ By default, the matching patterns must also be contiguous in the input string;
+ this can be disabled by specifying C{'adjacent=False'} in the constructor.
+
+ Example::
+ real = Word(nums) + '.' + Word(nums)
+ print(real.parseString('3.1416')) # -> ['3', '.', '1416']
+ # will also erroneously match the following
+ print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
+
+ real = Combine(Word(nums) + '.' + Word(nums))
+ print(real.parseString('3.1416')) # -> ['3.1416']
+ # no match when there are internal spaces
+ print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
+ """
+ def __init__( self, expr, joinString="", adjacent=True ):
+ super(Combine,self).__init__( expr )
+ # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
+ if adjacent:
+ self.leaveWhitespace()
+ self.adjacent = adjacent
+ self.skipWhitespace = True
+ self.joinString = joinString
+ self.callPreparse = True
+
+ def ignore( self, other ):
+ if self.adjacent:
+ ParserElement.ignore(self, other)
+ else:
+ super( Combine, self).ignore( other )
+ return self
+
+ def postParse( self, instring, loc, tokenlist ):
+ retToks = tokenlist.copy()
+ del retToks[:]
+ retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
+
+ if self.resultsName and retToks.haskeys():
+ return [ retToks ]
+ else:
+ return retToks
+
+class Group(TokenConverter):
+ """
+ Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
+
+ Example::
+ ident = Word(alphas)
+ num = Word(nums)
+ term = ident | num
+ func = ident + Optional(delimitedList(term))
+ print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100']
+
+ func = ident + Group(Optional(delimitedList(term)))
+ print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']]
+ """
+ def __init__( self, expr ):
+ super(Group,self).__init__( expr )
+ self.saveAsList = True
+
+ def postParse( self, instring, loc, tokenlist ):
+ return [ tokenlist ]
+
+class Dict(TokenConverter):
+ """
+ Converter to return a repetitive expression as a list, but also as a dictionary.
+ Each element can also be referenced using the first token in the expression as its key.
+ Useful for tabular report scraping when the first column can be used as a item key.
+
+ Example::
+ data_word = Word(alphas)
+ label = data_word + FollowedBy(':')
+ attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
+
+ text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+ attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+
+ # print attributes as plain groups
+ print(OneOrMore(attr_expr).parseString(text).dump())
+
+ # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
+ result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
+ print(result.dump())
+
+ # access named fields as dict entries, or output as dict
+ print(result['shape'])
+ print(result.asDict())
+ prints::
+ ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
+
+ [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+ - color: light blue
+ - posn: upper left
+ - shape: SQUARE
+ - texture: burlap
+ SQUARE
+ {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
+ See more examples at L{ParseResults} of accessing fields by results name.
+ """
+ def __init__( self, expr ):
+ super(Dict,self).__init__( expr )
+ self.saveAsList = True
+
+ def postParse( self, instring, loc, tokenlist ):
+ for i,tok in enumerate(tokenlist):
+ if len(tok) == 0:
+ continue
+ ikey = tok[0]
+ if isinstance(ikey,int):
+ ikey = _ustr(tok[0]).strip()
+ if len(tok)==1:
+ tokenlist[ikey] = _ParseResultsWithOffset("",i)
+ elif len(tok)==2 and not isinstance(tok[1],ParseResults):
+ tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
+ else:
+ dictvalue = tok.copy() #ParseResults(i)
+ del dictvalue[0]
+ if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
+ tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
+ else:
+ tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
+
+ if self.resultsName:
+ return [ tokenlist ]
+ else:
+ return tokenlist
+
+
+class Suppress(TokenConverter):
+ """
+ Converter for ignoring the results of a parsed expression.
+
+ Example::
+ source = "a, b, c,d"
+ wd = Word(alphas)
+ wd_list1 = wd + ZeroOrMore(',' + wd)
+ print(wd_list1.parseString(source))
+
+ # often, delimiters that are useful during parsing are just in the
+ # way afterward - use Suppress to keep them out of the parsed output
+ wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
+ print(wd_list2.parseString(source))
+ prints::
+ ['a', ',', 'b', ',', 'c', ',', 'd']
+ ['a', 'b', 'c', 'd']
+ (See also L{delimitedList}.)
+ """
+ def postParse( self, instring, loc, tokenlist ):
+ return []
+
+ def suppress( self ):
+ return self
+
+
+class OnlyOnce(object):
+ """
+ Wrapper for parse actions, to ensure they are only called once.
+ """
+ def __init__(self, methodCall):
+ self.callable = _trim_arity(methodCall)
+ self.called = False
+ def __call__(self,s,l,t):
+ if not self.called:
+ results = self.callable(s,l,t)
+ self.called = True
+ return results
+ raise ParseException(s,l,"")
+ def reset(self):
+ self.called = False
+
+def traceParseAction(f):
+ """
+ Decorator for debugging parse actions.
+
+ When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
+ When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
+
+ Example::
+ wd = Word(alphas)
+
+ @traceParseAction
+ def remove_duplicate_chars(tokens):
+ return ''.join(sorted(set(''.join(tokens))))
+
+ wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
+ print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
+ prints::
+ >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
+ <<leaving remove_duplicate_chars (ret: 'dfjkls')
+ ['dfjkls']
+ """
+ f = _trim_arity(f)
+ def z(*paArgs):
+ thisFunc = f.__name__
+ s,l,t = paArgs[-3:]
+ if len(paArgs)>3:
+ thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
+ sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
+ try:
+ ret = f(*paArgs)
+ except Exception as exc:
+ sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
+ raise
+ sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )
+ return ret
+ try:
+ z.__name__ = f.__name__
+ except AttributeError:
+ pass
+ return z
+
+#
+# global helpers
+#
+def delimitedList( expr, delim=",", combine=False ):
+ """
+ Helper to define a delimited list of expressions - the delimiter defaults to ','.
+ By default, the list elements and delimiters can have intervening whitespace, and
+ comments, but this can be overridden by passing C{combine=True} in the constructor.
+ If C{combine} is set to C{True}, the matching tokens are returned as a single token
+ string, with the delimiters included; otherwise, the matching tokens are returned
+ as a list of tokens, with the delimiters suppressed.
+
+ Example::
+ delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
+ delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
+ """
+ dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
+ if combine:
+ return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
+ else:
+ return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
+
+def countedArray( expr, intExpr=None ):
+ """
+ Helper to define a counted list of expressions.
+ This helper defines a pattern of the form::
+ integer expr expr expr...
+ where the leading integer tells how many expr expressions follow.
+ The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
+
+ If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
+
+ Example::
+ countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
+
+ # in this parser, the leading integer value is given in binary,
+ # '10' indicating that 2 values are in the array
+ binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
+ countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
+ """
+ arrayExpr = Forward()
+ def countFieldParseAction(s,l,t):
+ n = t[0]
+ arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
+ return []
+ if intExpr is None:
+ intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
+ else:
+ intExpr = intExpr.copy()
+ intExpr.setName("arrayLen")
+ intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
+ return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
+
+def _flatten(L):
+ ret = []
+ for i in L:
+ if isinstance(i,list):
+ ret.extend(_flatten(i))
+ else:
+ ret.append(i)
+ return ret
+
+def matchPreviousLiteral(expr):
+ """
+ Helper to define an expression that is indirectly defined from
+ the tokens matched in a previous expression, that is, it looks
+ for a 'repeat' of a previous expression. For example::
+ first = Word(nums)
+ second = matchPreviousLiteral(first)
+ matchExpr = first + ":" + second
+ will match C{"1:1"}, but not C{"1:2"}. Because this matches a
+ previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
+ If this is not desired, use C{matchPreviousExpr}.
+ Do I{not} use with packrat parsing enabled.
+ """
+ rep = Forward()
+ def copyTokenToRepeater(s,l,t):
+ if t:
+ if len(t) == 1:
+ rep << t[0]
+ else:
+ # flatten t tokens
+ tflat = _flatten(t.asList())
+ rep << And(Literal(tt) for tt in tflat)
+ else:
+ rep << Empty()
+ expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
+ rep.setName('(prev) ' + _ustr(expr))
+ return rep
+
+def matchPreviousExpr(expr):
+ """
+ Helper to define an expression that is indirectly defined from
+ the tokens matched in a previous expression, that is, it looks
+ for a 'repeat' of a previous expression. For example::
+ first = Word(nums)
+ second = matchPreviousExpr(first)
+ matchExpr = first + ":" + second
+ will match C{"1:1"}, but not C{"1:2"}. Because this matches by
+ expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
+ the expressions are evaluated first, and then compared, so
+ C{"1"} is compared with C{"10"}.
+ Do I{not} use with packrat parsing enabled.
+ """
+ rep = Forward()
+ e2 = expr.copy()
+ rep <<= e2
+ def copyTokenToRepeater(s,l,t):
+ matchTokens = _flatten(t.asList())
+ def mustMatchTheseTokens(s,l,t):
+ theseTokens = _flatten(t.asList())
+ if theseTokens != matchTokens:
+ raise ParseException("",0,"")
+ rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
+ expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
+ rep.setName('(prev) ' + _ustr(expr))
+ return rep
+
+def _escapeRegexRangeChars(s):
+ #~ escape these chars: ^-]
+ for c in r"\^-]":
+ s = s.replace(c,_bslash+c)
+ s = s.replace("\n",r"\n")
+ s = s.replace("\t",r"\t")
+ return _ustr(s)
+
+def oneOf( strs, caseless=False, useRegex=True ):
+ """
+ Helper to quickly define a set of alternative Literals, and makes sure to do
+ longest-first testing when there is a conflict, regardless of the input order,
+ but returns a C{L{MatchFirst}} for best performance.
+
+ Parameters:
+ - strs - a string of space-delimited literals, or a collection of string literals
+ - caseless - (default=C{False}) - treat all literals as caseless
+ - useRegex - (default=C{True}) - as an optimization, will generate a Regex
+ object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
+ if creating a C{Regex} raises an exception)
+
+ Example::
+ comp_oper = oneOf("< = > <= >= !=")
+ var = Word(alphas)
+ number = Word(nums)
+ term = var | number
+ comparison_expr = term + comp_oper + term
+ print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
+ prints::
+ [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
+ """
+ if caseless:
+ isequal = ( lambda a,b: a.upper() == b.upper() )
+ masks = ( lambda a,b: b.upper().startswith(a.upper()) )
+ parseElementClass = CaselessLiteral
+ else:
+ isequal = ( lambda a,b: a == b )
+ masks = ( lambda a,b: b.startswith(a) )
+ parseElementClass = Literal
+
+ symbols = []
+ if isinstance(strs,basestring):
+ symbols = strs.split()
+ elif isinstance(strs, Iterable):
+ symbols = list(strs)
+ else:
+ warnings.warn("Invalid argument to oneOf, expected string or iterable",
+ SyntaxWarning, stacklevel=2)
+ if not symbols:
+ return NoMatch()
+
+ i = 0
+ while i < len(symbols)-1:
+ cur = symbols[i]
+ for j,other in enumerate(symbols[i+1:]):
+ if ( isequal(other, cur) ):
+ del symbols[i+j+1]
+ break
+ elif ( masks(cur, other) ):
+ del symbols[i+j+1]
+ symbols.insert(i,other)
+ cur = other
+ break
+ else:
+ i += 1
+
+ if not caseless and useRegex:
+ #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
+ try:
+ if len(symbols)==len("".join(symbols)):
+ return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
+ else:
+ return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
+ except Exception:
+ warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
+ SyntaxWarning, stacklevel=2)
+
+
+ # last resort, just use MatchFirst
+ return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
+
+def dictOf( key, value ):
+ """
+ Helper to easily and clearly define a dictionary by specifying the respective patterns
+ for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
+ in the proper order. The key pattern can include delimiting markers or punctuation,
+ as long as they are suppressed, thereby leaving the significant key text. The value
+ pattern can include named results, so that the C{Dict} results can include named token
+ fields.
+
+ Example::
+ text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+ attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+ print(OneOrMore(attr_expr).parseString(text).dump())
+
+ attr_label = label
+ attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
+
+ # similar to Dict, but simpler call format
+ result = dictOf(attr_label, attr_value).parseString(text)
+ print(result.dump())
+ print(result['shape'])
+ print(result.shape) # object attribute access works too
+ print(result.asDict())
+ prints::
+ [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+ - color: light blue
+ - posn: upper left
+ - shape: SQUARE
+ - texture: burlap
+ SQUARE
+ SQUARE
+ {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
+ """
+ return Dict( ZeroOrMore( Group ( key + value ) ) )
+
+def originalTextFor(expr, asString=True):
+ """
+ Helper to return the original, untokenized text for a given expression. Useful to
+ restore the parsed fields of an HTML start tag into the raw tag text itself, or to
+ revert separate tokens with intervening whitespace back to the original matching
+ input text. By default, returns astring containing the original parsed text.
+
+ If the optional C{asString} argument is passed as C{False}, then the return value is a
+ C{L{ParseResults}} containing any results names that were originally matched, and a
+ single token containing the original matched text from the input string. So if
+ the expression passed to C{L{originalTextFor}} contains expressions with defined
+ results names, you must set C{asString} to C{False} if you want to preserve those
+ results name values.
+
+ Example::
+ src = "this is test <b> bold <i>text</i> </b> normal text "
+ for tag in ("b","i"):
+ opener,closer = makeHTMLTags(tag)
+ patt = originalTextFor(opener + SkipTo(closer) + closer)
+ print(patt.searchString(src)[0])
+ prints::
+ ['<b> bold <i>text</i> </b>']
+ ['<i>text</i>']
+ """
+ locMarker = Empty().setParseAction(lambda s,loc,t: loc)
+ endlocMarker = locMarker.copy()
+ endlocMarker.callPreparse = False
+ matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
+ if asString:
+ extractText = lambda s,l,t: s[t._original_start:t._original_end]
+ else:
+ def extractText(s,l,t):
+ t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
+ matchExpr.setParseAction(extractText)
+ matchExpr.ignoreExprs = expr.ignoreExprs
+ return matchExpr
+
+def ungroup(expr):
+ """
+ Helper to undo pyparsing's default grouping of And expressions, even
+ if all but one are non-empty.
+ """
+ return TokenConverter(expr).setParseAction(lambda t:t[0])
+
+def locatedExpr(expr):
+ """
+ Helper to decorate a returned token with its starting and ending locations in the input string.
+ This helper adds the following results names:
+ - locn_start = location where matched expression begins
+ - locn_end = location where matched expression ends
+ - value = the actual parsed results
+
+ Be careful if the input text contains C{<TAB>} characters, you may want to call
+ C{L{ParserElement.parseWithTabs}}
+
+ Example::
+ wd = Word(alphas)
+ for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
+ print(match)
+ prints::
+ [[0, 'ljsdf', 5]]
+ [[8, 'lksdjjf', 15]]
+ [[18, 'lkkjj', 23]]
+ """
+ locator = Empty().setParseAction(lambda s,l,t: l)
+ return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
+
+
+# convenience constants for positional expressions
+empty = Empty().setName("empty")
+lineStart = LineStart().setName("lineStart")
+lineEnd = LineEnd().setName("lineEnd")
+stringStart = StringStart().setName("stringStart")
+stringEnd = StringEnd().setName("stringEnd")
+
+_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
+_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
+_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
+_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1)
+_charRange = Group(_singleChar + Suppress("-") + _singleChar)
+_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
+
+def srange(s):
+ r"""
+ Helper to easily define string ranges for use in Word construction. Borrows
+ syntax from regexp '[]' string range definitions::
+ srange("[0-9]") -> "0123456789"
+ srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
+ srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
+ The input string must be enclosed in []'s, and the returned string is the expanded
+ character set joined into a single string.
+ The values enclosed in the []'s may be:
+ - a single character
+ - an escaped character with a leading backslash (such as C{\-} or C{\]})
+ - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character)
+ (C{\0x##} is also supported for backwards compatibility)
+ - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
+ - a range of any of the above, separated by a dash (C{'a-z'}, etc.)
+ - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
+ """
+ _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
+ try:
+ return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
+ except Exception:
+ return ""
+
+def matchOnlyAtCol(n):
+ """
+ Helper method for defining parse actions that require matching at a specific
+ column in the input text.
+ """
+ def verifyCol(strg,locn,toks):
+ if col(locn,strg) != n:
+ raise ParseException(strg,locn,"matched token not at column %d" % n)
+ return verifyCol
+
+def replaceWith(replStr):
+ """
+ Helper method for common parse actions that simply return a literal value. Especially
+ useful when used with C{L{transformString<ParserElement.transformString>}()}.
+
+ Example::
+ num = Word(nums).setParseAction(lambda toks: int(toks[0]))
+ na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
+ term = na | num
+
+ OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
+ """
+ return lambda s,l,t: [replStr]
+
+def removeQuotes(s,l,t):
+ """
+ Helper parse action for removing quotation marks from parsed quoted strings.
+
+ Example::
+ # by default, quotation marks are included in parsed results
+ quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
+
+ # use removeQuotes to strip quotation marks from parsed results
+ quotedString.setParseAction(removeQuotes)
+ quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
+ """
+ return t[0][1:-1]
+
+def tokenMap(func, *args):
+ """
+ Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional
+ args are passed, they are forwarded to the given function as additional arguments after
+ the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
+ parsed data to an integer using base 16.
+
+ Example (compare the last to example in L{ParserElement.transformString}::
+ hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
+ hex_ints.runTests('''
+ 00 11 22 aa FF 0a 0d 1a
+ ''')
+
+ upperword = Word(alphas).setParseAction(tokenMap(str.upper))
+ OneOrMore(upperword).runTests('''
+ my kingdom for a horse
+ ''')
+
+ wd = Word(alphas).setParseAction(tokenMap(str.title))
+ OneOrMore(wd).setParseAction(' '.join).runTests('''
+ now is the winter of our discontent made glorious summer by this sun of york
+ ''')
+ prints::
+ 00 11 22 aa FF 0a 0d 1a
+ [0, 17, 34, 170, 255, 10, 13, 26]
+
+ my kingdom for a horse
+ ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
+
+ now is the winter of our discontent made glorious summer by this sun of york
+ ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
+ """
+ def pa(s,l,t):
+ return [func(tokn, *args) for tokn in t]
+
+ try:
+ func_name = getattr(func, '__name__',
+ getattr(func, '__class__').__name__)
+ except Exception:
+ func_name = str(func)
+ pa.__name__ = func_name
+
+ return pa
+
+upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
+"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
+
+downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
+"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
+
+def _makeTags(tagStr, xml):
+ """Internal helper to construct opening and closing tag expressions, given a tag name"""
+ if isinstance(tagStr,basestring):
+ resname = tagStr
+ tagStr = Keyword(tagStr, caseless=not xml)
+ else:
+ resname = tagStr.name
+
+ tagAttrName = Word(alphas,alphanums+"_-:")
+ if (xml):
+ tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
+ openTag = Suppress("<") + tagStr("tag") + \
+ Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
+ Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
+ else:
+ printablesLessRAbrack = "".join(c for c in printables if c not in ">")
+ tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
+ openTag = Suppress("<") + tagStr("tag") + \
+ Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
+ Optional( Suppress("=") + tagAttrValue ) ))) + \
+ Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
+ closeTag = Combine(_L("</") + tagStr + ">")
+
+ openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
+ closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
+ openTag.tag = resname
+ closeTag.tag = resname
+ return openTag, closeTag
+
+def makeHTMLTags(tagStr):
+ """
+ Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
+ tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
+
+ Example::
+ text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
+ # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
+ a,a_end = makeHTMLTags("A")
+ link_expr = a + SkipTo(a_end)("link_text") + a_end
+
+ for link in link_expr.searchString(text):
+ # attributes in the <A> tag (like "href" shown here) are also accessible as named results
+ print(link.link_text, '->', link.href)
+ prints::
+ pyparsing -> http://pyparsing.wikispaces.com
+ """
+ return _makeTags( tagStr, False )
+
+def makeXMLTags(tagStr):
+ """
+ Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
+ tags only in the given upper/lower case.
+
+ Example: similar to L{makeHTMLTags}
+ """
+ return _makeTags( tagStr, True )
+
+def withAttribute(*args,**attrDict):
+ """
+ Helper to create a validating parse action to be used with start tags created
+ with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
+ with a required attribute value, to avoid false matches on common tags such as
+ C{<TD>} or C{<DIV>}.
+
+ Call C{withAttribute} with a series of attribute names and values. Specify the list
+ of filter attributes names and values as:
+ - keyword arguments, as in C{(align="right")}, or
+ - as an explicit dict with C{**} operator, when an attribute name is also a Python
+ reserved word, as in C{**{"class":"Customer", "align":"right"}}
+ - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
+ For attribute names with a namespace prefix, you must use the second form. Attribute
+ names are matched insensitive to upper/lower case.
+
+ If just testing for C{class} (with or without a namespace), use C{L{withClass}}.
+
+ To verify that the attribute exists, but without specifying a value, pass
+ C{withAttribute.ANY_VALUE} as the value.
+
+ Example::
+ html = '''
+ <div>
+ Some text
+ <div type="grid">1 4 0 1 0</div>
+ <div type="graph">1,3 2,3 1,1</div>
+ <div>this has no type</div>
+ </div>
+
+ '''
+ div,div_end = makeHTMLTags("div")
+
+ # only match div tag having a type attribute with value "grid"
+ div_grid = div().setParseAction(withAttribute(type="grid"))
+ grid_expr = div_grid + SkipTo(div | div_end)("body")
+ for grid_header in grid_expr.searchString(html):
+ print(grid_header.body)
+
+ # construct a match with any div tag having a type attribute, regardless of the value
+ div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
+ div_expr = div_any_type + SkipTo(div | div_end)("body")
+ for div_header in div_expr.searchString(html):
+ print(div_header.body)
+ prints::
+ 1 4 0 1 0
+
+ 1 4 0 1 0
+ 1,3 2,3 1,1
+ """
+ if args:
+ attrs = args[:]
+ else:
+ attrs = attrDict.items()
+ attrs = [(k,v) for k,v in attrs]
+ def pa(s,l,tokens):
+ for attrName,attrValue in attrs:
+ if attrName not in tokens:
+ raise ParseException(s,l,"no matching attribute " + attrName)
+ if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
+ raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
+ (attrName, tokens[attrName], attrValue))
+ return pa
+withAttribute.ANY_VALUE = object()
+
+def withClass(classname, namespace=''):
+ """
+ Simplified version of C{L{withAttribute}} when matching on a div class - made
+ difficult because C{class} is a reserved word in Python.
+
+ Example::
+ html = '''
+ <div>
+ Some text
+ <div class="grid">1 4 0 1 0</div>
+ <div class="graph">1,3 2,3 1,1</div>
+ <div>this &lt;div&gt; has no class</div>
+ </div>
+
+ '''
+ div,div_end = makeHTMLTags("div")
+ div_grid = div().setParseAction(withClass("grid"))
+
+ grid_expr = div_grid + SkipTo(div | div_end)("body")
+ for grid_header in grid_expr.searchString(html):
+ print(grid_header.body)
+
+ div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
+ div_expr = div_any_type + SkipTo(div | div_end)("body")
+ for div_header in div_expr.searchString(html):
+ print(div_header.body)
+ prints::
+ 1 4 0 1 0
+
+ 1 4 0 1 0
+ 1,3 2,3 1,1
+ """
+ classattr = "%s:class" % namespace if namespace else "class"
+ return withAttribute(**{classattr : classname})
+
+opAssoc = _Constants()
+opAssoc.LEFT = object()
+opAssoc.RIGHT = object()
+
+def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
+ """
+ Helper method for constructing grammars of expressions made up of
+ operators working in a precedence hierarchy. Operators may be unary or
+ binary, left- or right-associative. Parse actions can also be attached
+ to operator expressions. The generated parser will also recognize the use
+ of parentheses to override operator precedences (see example below).
+
+ Note: if you define a deep operator list, you may see performance issues
+ when using infixNotation. See L{ParserElement.enablePackrat} for a
+ mechanism to potentially improve your parser performance.
+
+ Parameters:
+ - baseExpr - expression representing the most basic element for the nested
+ - opList - list of tuples, one for each operator precedence level in the
+ expression grammar; each tuple is of the form
+ (opExpr, numTerms, rightLeftAssoc, parseAction), where:
+ - opExpr is the pyparsing expression for the operator;
+ may also be a string, which will be converted to a Literal;
+ if numTerms is 3, opExpr is a tuple of two expressions, for the
+ two operators separating the 3 terms
+ - numTerms is the number of terms for this operator (must
+ be 1, 2, or 3)
+ - rightLeftAssoc is the indicator whether the operator is
+ right or left associative, using the pyparsing-defined
+ constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
+ - parseAction is the parse action to be associated with
+ expressions matching this operator expression (the
+ parse action tuple member may be omitted); if the parse action
+ is passed a tuple or list of functions, this is equivalent to
+ calling C{setParseAction(*fn)} (L{ParserElement.setParseAction})
+ - lpar - expression for matching left-parentheses (default=C{Suppress('(')})
+ - rpar - expression for matching right-parentheses (default=C{Suppress(')')})
+
+ Example::
+ # simple example of four-function arithmetic with ints and variable names
+ integer = pyparsing_common.signed_integer
+ varname = pyparsing_common.identifier
+
+ arith_expr = infixNotation(integer | varname,
+ [
+ ('-', 1, opAssoc.RIGHT),
+ (oneOf('* /'), 2, opAssoc.LEFT),
+ (oneOf('+ -'), 2, opAssoc.LEFT),
+ ])
+
+ arith_expr.runTests('''
+ 5+3*6
+ (5+3)*6
+ -2--11
+ ''', fullDump=False)
+ prints::
+ 5+3*6
+ [[5, '+', [3, '*', 6]]]
+
+ (5+3)*6
+ [[[5, '+', 3], '*', 6]]
+
+ -2--11
+ [[['-', 2], '-', ['-', 11]]]
+ """
+ ret = Forward()
+ lastExpr = baseExpr | ( lpar + ret + rpar )
+ for i,operDef in enumerate(opList):
+ opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
+ termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
+ if arity == 3:
+ if opExpr is None or len(opExpr) != 2:
+ raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
+ opExpr1, opExpr2 = opExpr
+ thisExpr = Forward().setName(termName)
+ if rightLeftAssoc == opAssoc.LEFT:
+ if arity == 1:
+ matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
+ elif arity == 2:
+ if opExpr is not None:
+ matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
+ else:
+ matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
+ elif arity == 3:
+ matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
+ Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
+ else:
+ raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
+ elif rightLeftAssoc == opAssoc.RIGHT:
+ if arity == 1:
+ # try to avoid LR with this extra test
+ if not isinstance(opExpr, Optional):
+ opExpr = Optional(opExpr)
+ matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
+ elif arity == 2:
+ if opExpr is not None:
+ matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
+ else:
+ matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
+ elif arity == 3:
+ matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
+ Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
+ else:
+ raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
+ else:
+ raise ValueError("operator must indicate right or left associativity")
+ if pa:
+ if isinstance(pa, (tuple, list)):
+ matchExpr.setParseAction(*pa)
+ else:
+ matchExpr.setParseAction(pa)
+ thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
+ lastExpr = thisExpr
+ ret <<= lastExpr
+ return ret
+
+operatorPrecedence = infixNotation
+"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""
+
+dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")
+sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")
+quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|
+ Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")
+unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
+
+def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
+ """
+ Helper method for defining nested lists enclosed in opening and closing
+ delimiters ("(" and ")" are the default).
+
+ Parameters:
+ - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
+ - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
+ - content - expression for items within the nested lists (default=C{None})
+ - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})
+
+ If an expression is not provided for the content argument, the nested
+ expression will capture all whitespace-delimited content between delimiters
+ as a list of separate values.
+
+ Use the C{ignoreExpr} argument to define expressions that may contain
+ opening or closing characters that should not be treated as opening
+ or closing characters for nesting, such as quotedString or a comment
+ expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
+ The default is L{quotedString}, but if no expressions are to be ignored,
+ then pass C{None} for this argument.
+
+ Example::
+ data_type = oneOf("void int short long char float double")
+ decl_data_type = Combine(data_type + Optional(Word('*')))
+ ident = Word(alphas+'_', alphanums+'_')
+ number = pyparsing_common.number
+ arg = Group(decl_data_type + ident)
+ LPAR,RPAR = map(Suppress, "()")
+
+ code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
+
+ c_function = (decl_data_type("type")
+ + ident("name")
+ + LPAR + Optional(delimitedList(arg), [])("args") + RPAR
+ + code_body("body"))
+ c_function.ignore(cStyleComment)
+
+ source_code = '''
+ int is_odd(int x) {
+ return (x%2);
+ }
+
+ int dec_to_hex(char hchar) {
+ if (hchar >= '0' && hchar <= '9') {
+ return (ord(hchar)-ord('0'));
+ } else {
+ return (10+ord(hchar)-ord('A'));
+ }
+ }
+ '''
+ for func in c_function.searchString(source_code):
+ print("%(name)s (%(type)s) args: %(args)s" % func)
+
+ prints::
+ is_odd (int) args: [['int', 'x']]
+ dec_to_hex (int) args: [['char', 'hchar']]
+ """
+ if opener == closer:
+ raise ValueError("opening and closing strings cannot be the same")
+ if content is None:
+ if isinstance(opener,basestring) and isinstance(closer,basestring):
+ if len(opener) == 1 and len(closer)==1:
+ if ignoreExpr is not None:
+ content = (Combine(OneOrMore(~ignoreExpr +
+ CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+ ).setParseAction(lambda t:t[0].strip()))
+ else:
+ content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
+ ).setParseAction(lambda t:t[0].strip()))
+ else:
+ if ignoreExpr is not None:
+ content = (Combine(OneOrMore(~ignoreExpr +
+ ~Literal(opener) + ~Literal(closer) +
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+ ).setParseAction(lambda t:t[0].strip()))
+ else:
+ content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+ ).setParseAction(lambda t:t[0].strip()))
+ else:
+ raise ValueError("opening and closing arguments must be strings if no content expression is given")
+ ret = Forward()
+ if ignoreExpr is not None:
+ ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
+ else:
+ ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
+ ret.setName('nested %s%s expression' % (opener,closer))
+ return ret
+
+def indentedBlock(blockStatementExpr, indentStack, indent=True):
+ """
+ Helper method for defining space-delimited indentation blocks, such as
+ those used to define block statements in Python source code.
+
+ Parameters:
+ - blockStatementExpr - expression defining syntax of statement that
+ is repeated within the indented block
+ - indentStack - list created by caller to manage indentation stack
+ (multiple statementWithIndentedBlock expressions within a single grammar
+ should share a common indentStack)
+ - indent - boolean indicating whether block must be indented beyond the
+ the current level; set to False for block of left-most statements
+ (default=C{True})
+
+ A valid block must contain at least one C{blockStatement}.
+
+ Example::
+ data = '''
+ def A(z):
+ A1
+ B = 100
+ G = A2
+ A2
+ A3
+ B
+ def BB(a,b,c):
+ BB1
+ def BBA():
+ bba1
+ bba2
+ bba3
+ C
+ D
+ def spam(x,y):
+ def eggs(z):
+ pass
+ '''
+
+
+ indentStack = [1]
+ stmt = Forward()
+
+ identifier = Word(alphas, alphanums)
+ funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
+ func_body = indentedBlock(stmt, indentStack)
+ funcDef = Group( funcDecl + func_body )
+
+ rvalue = Forward()
+ funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
+ rvalue << (funcCall | identifier | Word(nums))
+ assignment = Group(identifier + "=" + rvalue)
+ stmt << ( funcDef | assignment | identifier )
+
+ module_body = OneOrMore(stmt)
+
+ parseTree = module_body.parseString(data)
+ parseTree.pprint()
+ prints::
+ [['def',
+ 'A',
+ ['(', 'z', ')'],
+ ':',
+ [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
+ 'B',
+ ['def',
+ 'BB',
+ ['(', 'a', 'b', 'c', ')'],
+ ':',
+ [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
+ 'C',
+ 'D',
+ ['def',
+ 'spam',
+ ['(', 'x', 'y', ')'],
+ ':',
+ [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
+ """
+ def checkPeerIndent(s,l,t):
+ if l >= len(s): return
+ curCol = col(l,s)
+ if curCol != indentStack[-1]:
+ if curCol > indentStack[-1]:
+ raise ParseFatalException(s,l,"illegal nesting")
+ raise ParseException(s,l,"not a peer entry")
+
+ def checkSubIndent(s,l,t):
+ curCol = col(l,s)
+ if curCol > indentStack[-1]:
+ indentStack.append( curCol )
+ else:
+ raise ParseException(s,l,"not a subentry")
+
+ def checkUnindent(s,l,t):
+ if l >= len(s): return
+ curCol = col(l,s)
+ if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
+ raise ParseException(s,l,"not an unindent")
+ indentStack.pop()
+
+ NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
+ INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
+ PEER = Empty().setParseAction(checkPeerIndent).setName('')
+ UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
+ if indent:
+ smExpr = Group( Optional(NL) +
+ #~ FollowedBy(blockStatementExpr) +
+ INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
+ else:
+ smExpr = Group( Optional(NL) +
+ (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
+ blockStatementExpr.ignore(_bslash + LineEnd())
+ return smExpr.setName('indented block')
+
+alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
+punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
+
+anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
+_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
+commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
+def replaceHTMLEntity(t):
+ """Helper parser action to replace common HTML entities with their special characters"""
+ return _htmlEntityMap.get(t.entity)
+
+# it's easy to get these comment structures wrong - they're very common, so may as well make them available
+cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
+"Comment of the form C{/* ... */}"
+
+htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
+"Comment of the form C{<!-- ... -->}"
+
+restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
+dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
+"Comment of the form C{// ... (to end of line)}"
+
+cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")
+"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"
+
+javaStyleComment = cppStyleComment
+"Same as C{L{cppStyleComment}}"
+
+pythonStyleComment = Regex(r"#.*").setName("Python style comment")
+"Comment of the form C{# ... (to end of line)}"
+
+_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
+ Optional( Word(" \t") +
+ ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
+commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
+"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.
+ This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""
+
+# some other useful expressions - using lower-case class name since we are really using this as a namespace
+class pyparsing_common:
+ """
+ Here are some common low-level expressions that may be useful in jump-starting parser development:
+ - numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
+ - common L{programming identifiers<identifier>}
+ - network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
+ - ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
+ - L{UUID<uuid>}
+ - L{comma-separated list<comma_separated_list>}
+ Parse actions:
+ - C{L{convertToInteger}}
+ - C{L{convertToFloat}}
+ - C{L{convertToDate}}
+ - C{L{convertToDatetime}}
+ - C{L{stripHTMLTags}}
+ - C{L{upcaseTokens}}
+ - C{L{downcaseTokens}}
+
+ Example::
+ pyparsing_common.number.runTests('''
+ # any int or real number, returned as the appropriate type
+ 100
+ -100
+ +100
+ 3.14159
+ 6.02e23
+ 1e-12
+ ''')
+
+ pyparsing_common.fnumber.runTests('''
+ # any int or real number, returned as float
+ 100
+ -100
+ +100
+ 3.14159
+ 6.02e23
+ 1e-12
+ ''')
+
+ pyparsing_common.hex_integer.runTests('''
+ # hex numbers
+ 100
+ FF
+ ''')
+
+ pyparsing_common.fraction.runTests('''
+ # fractions
+ 1/2
+ -3/4
+ ''')
+
+ pyparsing_common.mixed_integer.runTests('''
+ # mixed fractions
+ 1
+ 1/2
+ -3/4
+ 1-3/4
+ ''')
+
+ import uuid
+ pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
+ pyparsing_common.uuid.runTests('''
+ # uuid
+ 12345678-1234-5678-1234-567812345678
+ ''')
+ prints::
+ # any int or real number, returned as the appropriate type
+ 100
+ [100]
+
+ -100
+ [-100]
+
+ +100
+ [100]
+
+ 3.14159
+ [3.14159]
+
+ 6.02e23
+ [6.02e+23]
+
+ 1e-12
+ [1e-12]
+
+ # any int or real number, returned as float
+ 100
+ [100.0]
+
+ -100
+ [-100.0]
+
+ +100
+ [100.0]
+
+ 3.14159
+ [3.14159]
+
+ 6.02e23
+ [6.02e+23]
+
+ 1e-12
+ [1e-12]
+
+ # hex numbers
+ 100
+ [256]
+
+ FF
+ [255]
+
+ # fractions
+ 1/2
+ [0.5]
+
+ -3/4
+ [-0.75]
+
+ # mixed fractions
+ 1
+ [1]
+
+ 1/2
+ [0.5]
+
+ -3/4
+ [-0.75]
+
+ 1-3/4
+ [1.75]
+
+ # uuid
+ 12345678-1234-5678-1234-567812345678
+ [UUID('12345678-1234-5678-1234-567812345678')]
+ """
+
+ convertToInteger = tokenMap(int)
+ """
+ Parse action for converting parsed integers to Python int
+ """
+
+ convertToFloat = tokenMap(float)
+ """
+ Parse action for converting parsed numbers to Python float
+ """
+
+ integer = Word(nums).setName("integer").setParseAction(convertToInteger)
+ """expression that parses an unsigned integer, returns an int"""
+
+ hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))
+ """expression that parses a hexadecimal integer, returns an int"""
+
+ signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
+ """expression that parses an integer with optional leading sign, returns an int"""
+
+ fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
+ """fractional expression of an integer divided by an integer, returns a float"""
+ fraction.addParseAction(lambda t: t[0]/t[-1])
+
+ mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
+ """mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
+ mixed_integer.addParseAction(sum)
+
+ real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)
+ """expression that parses a floating point number and returns a float"""
+
+ sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
+ """expression that parses a floating point number with optional scientific notation and returns a float"""
+
+ # streamlining this expression makes the docs nicer-looking
+ number = (sci_real | real | signed_integer).streamline()
+ """any numeric expression, returns the corresponding Python type"""
+
+ fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
+ """any int or real number, returned as float"""
+
+ identifier = Word(alphas+'_', alphanums+'_').setName("identifier")
+ """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
+
+ ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
+ "IPv4 address (C{0.0.0.0 - 255.255.255.255})"
+
+ _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
+ _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")
+ _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")
+ _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
+ _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
+ ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
+ "IPv6 address (long, short, or mixed form)"
+
+ mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
+ "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
+
+ @staticmethod
+ def convertToDate(fmt="%Y-%m-%d"):
+ """
+ Helper to create a parse action for converting parsed date string to Python datetime.date
+
+ Params -
+ - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})
+
+ Example::
+ date_expr = pyparsing_common.iso8601_date.copy()
+ date_expr.setParseAction(pyparsing_common.convertToDate())
+ print(date_expr.parseString("1999-12-31"))
+ prints::
+ [datetime.date(1999, 12, 31)]
+ """
+ def cvt_fn(s,l,t):
+ try:
+ return datetime.strptime(t[0], fmt).date()
+ except ValueError as ve:
+ raise ParseException(s, l, str(ve))
+ return cvt_fn
+
+ @staticmethod
+ def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
+ """
+ Helper to create a parse action for converting parsed datetime string to Python datetime.datetime
+
+ Params -
+ - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})
+
+ Example::
+ dt_expr = pyparsing_common.iso8601_datetime.copy()
+ dt_expr.setParseAction(pyparsing_common.convertToDatetime())
+ print(dt_expr.parseString("1999-12-31T23:59:59.999"))
+ prints::
+ [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
+ """
+ def cvt_fn(s,l,t):
+ try:
+ return datetime.strptime(t[0], fmt)
+ except ValueError as ve:
+ raise ParseException(s, l, str(ve))
+ return cvt_fn
+
+ iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
+ "ISO8601 date (C{yyyy-mm-dd})"
+
+ iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
+ "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"
+
+ uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
+ "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"
+
+ _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
+ @staticmethod
+ def stripHTMLTags(s, l, tokens):
+ """
+ Parse action to remove HTML tags from web page HTML source
+
+ Example::
+ # strip HTML links from normal text
+ text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
+ td,td_end = makeHTMLTags("TD")
+ table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
+
+ print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
+ """
+ return pyparsing_common._html_stripper.transformString(tokens[0])
+
+ _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',')
+ + Optional( White(" \t") ) ) ).streamline().setName("commaItem")
+ comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")
+ """Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
+
+ upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
+ """Parse action to convert tokens to upper case."""
+
+ downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
+ """Parse action to convert tokens to lower case."""
+
+
+if __name__ == "__main__":
+
+ selectToken = CaselessLiteral("select")
+ fromToken = CaselessLiteral("from")
+
+ ident = Word(alphas, alphanums + "_$")
+
+ columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
+ columnNameList = Group(delimitedList(columnName)).setName("columns")
+ columnSpec = ('*' | columnNameList)
+
+ tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
+ tableNameList = Group(delimitedList(tableName)).setName("tables")
+
+ simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
+
+ # demo runTests method, including embedded comments in test string
+ simpleSQL.runTests("""
+ # '*' as column list and dotted table name
+ select * from SYS.XYZZY
+
+ # caseless match on "SELECT", and casts back to "select"
+ SELECT * from XYZZY, ABC
+
+ # list of column names, and mixed case SELECT keyword
+ Select AA,BB,CC from Sys.dual
+
+ # multiple tables
+ Select A, B, C from Sys.dual, Table2
+
+ # invalid SELECT keyword - should fail
+ Xelect A, B, C from Sys.dual
+
+ # incomplete command - should fail
+ Select
+
+ # invalid column name - should fail
+ Select ^^^ frox Sys.dual
+
+ """)
+
+ pyparsing_common.number.runTests("""
+ 100
+ -100
+ +100
+ 3.14159
+ 6.02e23
+ 1e-12
+ """)
+
+ # any int or real number, returned as float
+ pyparsing_common.fnumber.runTests("""
+ 100
+ -100
+ +100
+ 3.14159
+ 6.02e23
+ 1e-12
+ """)
+
+ pyparsing_common.hex_integer.runTests("""
+ 100
+ FF
+ """)
+
+ import uuid
+ pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
+ pyparsing_common.uuid.runTests("""
+ 12345678-1234-5678-1234-567812345678
+ """)
diff --git a/third_party/python/setuptools/pkg_resources/extern/__init__.py b/third_party/python/setuptools/pkg_resources/extern/__init__.py
new file mode 100644
index 0000000000..4dc3beb2fa
--- /dev/null
+++ b/third_party/python/setuptools/pkg_resources/extern/__init__.py
@@ -0,0 +1,66 @@
+import sys
+
+
+class VendorImporter:
+ """
+ A PEP 302 meta path importer for finding optionally-vendored
+ or otherwise naturally-installed packages from root_name.
+ """
+
+ def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
+ self.root_name = root_name
+ self.vendored_names = set(vendored_names)
+ self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
+
+ @property
+ def search_path(self):
+ """
+ Search first the vendor package then as a natural package.
+ """
+ yield self.vendor_pkg + '.'
+ yield ''
+
+ def find_module(self, fullname, path=None):
+ """
+ Return self when fullname starts with root_name and the
+ target module is one vendored through this importer.
+ """
+ root, base, target = fullname.partition(self.root_name + '.')
+ if root:
+ return
+ if not any(map(target.startswith, self.vendored_names)):
+ return
+ return self
+
+ def load_module(self, fullname):
+ """
+ Iterate over the search path to locate and load fullname.
+ """
+ root, base, target = fullname.partition(self.root_name + '.')
+ for prefix in self.search_path:
+ try:
+ extant = prefix + target
+ __import__(extant)
+ mod = sys.modules[extant]
+ sys.modules[fullname] = mod
+ return mod
+ except ImportError:
+ pass
+ else:
+ raise ImportError(
+ "The '{target}' package is required; "
+ "normally this is bundled with this package so if you get "
+ "this warning, consult the packager of your "
+ "distribution.".format(**locals())
+ )
+
+ def install(self):
+ """
+ Install this importer into sys.meta_path if not already present.
+ """
+ if self not in sys.meta_path:
+ sys.meta_path.append(self)
+
+
+names = 'packaging', 'pyparsing', 'appdirs'
+VendorImporter(__name__, names).install()
diff --git a/third_party/python/setuptools/setuptools-51.2.0.dist-info/LICENSE b/third_party/python/setuptools/setuptools-51.2.0.dist-info/LICENSE
new file mode 100644
index 0000000000..353924be0e
--- /dev/null
+++ b/third_party/python/setuptools/setuptools-51.2.0.dist-info/LICENSE
@@ -0,0 +1,19 @@
+Copyright Jason R. Coombs
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to
+deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.
diff --git a/third_party/python/setuptools/setuptools-51.2.0.dist-info/METADATA b/third_party/python/setuptools/setuptools-51.2.0.dist-info/METADATA
new file mode 100644
index 0000000000..9c11c15ca6
--- /dev/null
+++ b/third_party/python/setuptools/setuptools-51.2.0.dist-info/METADATA
@@ -0,0 +1,110 @@
+Metadata-Version: 2.1
+Name: setuptools
+Version: 51.2.0
+Summary: Easily download, build, install, upgrade, and uninstall Python packages
+Home-page: https://github.com/pypa/setuptools
+Author: Python Packaging Authority
+Author-email: distutils-sig@python.org
+License: UNKNOWN
+Project-URL: Documentation, https://setuptools.readthedocs.io/
+Keywords: CPAN PyPI distutils eggs package management
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: System :: Archiving :: Packaging
+Classifier: Topic :: System :: Systems Administration
+Classifier: Topic :: Utilities
+Requires-Python: >=3.6
+Provides-Extra: certs
+Requires-Dist: certifi (==2016.9.26) ; extra == 'certs'
+Provides-Extra: docs
+Requires-Dist: sphinx ; extra == 'docs'
+Requires-Dist: jaraco.packaging (>=8.2) ; extra == 'docs'
+Requires-Dist: rst.linker (>=1.9) ; extra == 'docs'
+Requires-Dist: pygments-github-lexers (==0.0.5) ; extra == 'docs'
+Provides-Extra: ssl
+Requires-Dist: wincertstore (==0.2) ; (sys_platform == "win32") and extra == 'ssl'
+Provides-Extra: testing
+Requires-Dist: pytest (!=3.7.3,>=3.5) ; extra == 'testing'
+Requires-Dist: pytest-checkdocs (>=1.2.3) ; extra == 'testing'
+Requires-Dist: pytest-flake8 ; extra == 'testing'
+Requires-Dist: pytest-cov ; extra == 'testing'
+Requires-Dist: jaraco.test (>=3.2.0) ; extra == 'testing'
+Requires-Dist: mock ; extra == 'testing'
+Requires-Dist: flake8-2020 ; extra == 'testing'
+Requires-Dist: virtualenv (>=13.0.0) ; extra == 'testing'
+Requires-Dist: pytest-virtualenv (>=1.2.7) ; extra == 'testing'
+Requires-Dist: wheel ; extra == 'testing'
+Requires-Dist: paver ; extra == 'testing'
+Requires-Dist: pip (>=19.1) ; extra == 'testing'
+Requires-Dist: jaraco.envs ; extra == 'testing'
+Requires-Dist: pytest-black (>=0.3.7) ; (platform_python_implementation != "PyPy") and extra == 'testing'
+Requires-Dist: pytest-mypy ; (platform_python_implementation != "PyPy") and extra == 'testing'
+
+.. image:: https://img.shields.io/pypi/v/setuptools.svg
+ :target: `PyPI link`_
+
+.. image:: https://img.shields.io/pypi/pyversions/setuptools.svg
+ :target: `PyPI link`_
+
+.. _PyPI link: https://pypi.org/project/setuptools
+
+.. image:: https://github.com/pypa/setuptools/workflows/Automated%20Tests/badge.svg
+ :target: https://github.com/pypa/setuptools/actions?query=workflow%3A%22Automated+Tests%22
+ :alt: Automated Tests
+
+.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
+ :target: https://github.com/psf/black
+ :alt: Code style: Black
+
+.. image:: https://img.shields.io/readthedocs/setuptools/latest.svg
+ :target: https://setuptools.readthedocs.io
+
+.. image:: https://img.shields.io/codecov/c/github/pypa/setuptools/master.svg?logo=codecov&logoColor=white
+ :target: https://codecov.io/gh/pypa/setuptools
+
+.. image:: https://tidelift.com/badges/github/pypa/setuptools?style=flat
+ :target: https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=readme
+
+See the `Installation Instructions
+<https://packaging.python.org/installing/>`_ in the Python Packaging
+User's Guide for instructions on installing, upgrading, and uninstalling
+Setuptools.
+
+Questions and comments should be directed to the `distutils-sig
+mailing list <http://mail.python.org/pipermail/distutils-sig/>`_.
+Bug reports and especially tested patches may be
+submitted directly to the `bug tracker
+<https://github.com/pypa/setuptools/issues>`_.
+
+
+Code of Conduct
+===============
+
+Everyone interacting in the setuptools project's codebases, issue trackers,
+chat rooms, and mailing lists is expected to follow the
+`PSF Code of Conduct <https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md>`_.
+
+
+For Enterprise
+==============
+
+Available as part of the Tidelift Subscription.
+
+Setuptools and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use.
+
+`Learn more <https://tidelift.com/subscription/pkg/pypi-setuptools?utm_source=pypi-setuptools&utm_medium=referral&utm_campaign=github>`_.
+
+
+Security Contact
+================
+
+To report a security vulnerability, please use the
+`Tidelift security contact <https://tidelift.com/security>`_.
+Tidelift will coordinate the fix and disclosure.
+
+
diff --git a/third_party/python/setuptools/setuptools-51.2.0.dist-info/RECORD b/third_party/python/setuptools/setuptools-51.2.0.dist-info/RECORD
new file mode 100644
index 0000000000..f2107e9abf
--- /dev/null
+++ b/third_party/python/setuptools/setuptools-51.2.0.dist-info/RECORD
@@ -0,0 +1,156 @@
+distutils-precedence.pth,sha256=fqf_7z_ioRfuEsaO1lU2F_DX_S8FkCV8JcSElZo7c3M,152
+easy_install.py,sha256=MDC9vt5AxDsXX5qcKlBz2TnW6Tpuv_AobnfhCJ9X3PM,126
+_distutils_hack/__init__.py,sha256=wFuARcmlHtkV20HfRBlQaMPY7hQx-TEEtnBpXeysiwI,3552
+_distutils_hack/override.py,sha256=Eu_s-NF6VIZ4Cqd0tbbA5wtWky2IZPNd8et6GLt1mzo,44
+pkg_resources/__init__.py,sha256=lhZRsyFftcjl10dCV_WtPIg9PzS-z4pSckiclF-hYWA,107968
+pkg_resources/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+pkg_resources/_vendor/appdirs.py,sha256=MievUEuv3l_mQISH5SF0shDk_BNhHHzYiAPrT3ITN4I,24701
+pkg_resources/_vendor/pyparsing.py,sha256=tmrp-lu-qO1i75ZzIN5A12nKRRD1Cm4Vpk-5LR9rims,232055
+pkg_resources/_vendor/packaging/__about__.py,sha256=PNMsaZn4UcCHyubgROH1bl6CluduPjI5kFrSp_Zgklo,736
+pkg_resources/_vendor/packaging/__init__.py,sha256=6enbp5XgRfjBjsI9-bn00HjHf5TH21PDMOKkJW8xw-w,562
+pkg_resources/_vendor/packaging/_compat.py,sha256=MXdsGpSE_W-ZrHoC87andI4LV2FAwU7HLL-eHe_CjhU,1128
+pkg_resources/_vendor/packaging/_structures.py,sha256=ozkCX8Q8f2qE1Eic3YiQ4buDVfgz2iYevY9e7R2y3iY,2022
+pkg_resources/_vendor/packaging/_typing.py,sha256=x59EhQ57TMT-kTRyLZV25HZvYGGwbucTo6iKh_O0tMw,1812
+pkg_resources/_vendor/packaging/markers.py,sha256=YSntQkMnKyw1_FG6oRNNnGxLL6bAxcGXOtuFE-YTS3k,9518
+pkg_resources/_vendor/packaging/requirements.py,sha256=R8K4H4xX_iD4LvpGw1U3ouuPbGN-wzsFgD7brhAM71Y,4929
+pkg_resources/_vendor/packaging/specifiers.py,sha256=uYp9l13F0LcknS6d4N60ytiBgFmIhKideOq9AnsxTco,31944
+pkg_resources/_vendor/packaging/tags.py,sha256=NKMS37Zo_nWrZxgsD6zbXsXgc9edn9m160cBiLmHJdE,24067
+pkg_resources/_vendor/packaging/utils.py,sha256=RShlvnjO2CtYSD8uri32frMMFMTmB-3ihsq1-ghzLEw,1811
+pkg_resources/_vendor/packaging/version.py,sha256=Cnbm-OO9D_qd8ZTFxzFcjSavexSYFZmyeaoPvMsjgPc,15470
+pkg_resources/extern/__init__.py,sha256=Gj7bMt5YI5cGt6kcg-M2GNxOL_k2x-bUlLzPaZK8uek,2094
+setuptools/__init__.py,sha256=0c232LRyOLGdL-Ywmgk8uMubx7I21w-ixJWiT0jQK-c,7681
+setuptools/_deprecation_warning.py,sha256=jU9-dtfv6cKmtQJOXN8nP1mm7gONw5kKEtiPtbwnZyI,218
+setuptools/_imp.py,sha256=Qx0LJzEBaWk_6PfICamJtfBN2rh5K9sJq1wXvtZW-mc,2388
+setuptools/archive_util.py,sha256=F1-XrQJTdXHRPRA09kxPWwm9Z2Ms1lE_IQZKG_JZ7rM,6638
+setuptools/build_meta.py,sha256=bm5RnBmygBneOldaLhcuncOiTqH5NPdBZISVeUDeYyc,10096
+setuptools/cli-32.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536
+setuptools/cli-64.exe,sha256=KLABu5pyrnokJCv6skjXZ6GsXeyYHGcqOUT3oHI3Xpo,74752
+setuptools/cli.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536
+setuptools/config.py,sha256=NUrsVW54pxYCuekfJbV7B6wyKzvzatXM5zT4LqnegWo,21485
+setuptools/dep_util.py,sha256=BDx1BkzNQntvAB4alypHbW5UVBzjqths000PrUL4Zqc,949
+setuptools/depends.py,sha256=iHfZdLdlCu2BllSF9bRg7NU0oqbPWMH8ljm4BuwQDY0,5474
+setuptools/dist.py,sha256=msWjjUNczEXVO5H_X-Xx6Y00_9VM_S0_cIBWehrkVRM,38405
+setuptools/errors.py,sha256=MVOcv381HNSajDgEUWzOQ4J6B5BHCBMSjHfaWcEwA1o,524
+setuptools/extension.py,sha256=NMM46XjNdVelWemc0x8CyVKA5Ks6Zm3xTWSA2SS6xZM,1684
+setuptools/glob.py,sha256=o75cHrOxYsvn854thSxE0x9k8JrKDuhP_rRXlVB00Q4,5084
+setuptools/gui-32.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536
+setuptools/gui-64.exe,sha256=aYKMhX1IJLn4ULHgWX0sE0yREUt6B3TEHf_jOw6yNyE,75264
+setuptools/gui.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536
+setuptools/installer.py,sha256=G6xBHtc3GYR6fXKLN6xQl4f50KIaAMyMpMsknOLHRt0,5277
+setuptools/launch.py,sha256=TyPT-Ic1T2EnYvGO26gfNRP4ysBlrhpbRjQxWsiO414,812
+setuptools/lib2to3_ex.py,sha256=YKA7CmdIJWwy0-yuZAxUgoNHbXFmT4p53iNadWdBQCk,2335
+setuptools/monkey.py,sha256=0e3HdVKXHL415O7np-AUqhEFXPPuDdJKbI47chQ_DE4,5217
+setuptools/msvc.py,sha256=PHsjWnTwaD12gDJ1nbt9WkcRvwOOfTK0N7w5fmTZTS0,51197
+setuptools/namespaces.py,sha256=PMqGVPXPYQgjUTvEg9bGccRAkIODrQ6NmsDg_fwErwI,3093
+setuptools/package_index.py,sha256=lRLjafYNmRJ3_ZPmgwdU_Evh2BOu63PJoJPZbsXPIrQ,40610
+setuptools/py34compat.py,sha256=KYOd6ybRxjBW8NJmYD8t_UyyVmysppFXqHpFLdslGXU,245
+setuptools/sandbox.py,sha256=IirxmeCHbl1CHT7pEPgQ6tTx9wU854n-d2p80Su8t5c,14151
+setuptools/script (dev).tmpl,sha256=RUzQzCQUaXtwdLtYHWYbIQmOaES5Brqq1FvUA_tu-5I,218
+setuptools/script.tmpl,sha256=WGTt5piezO27c-Dbx6l5Q4T3Ff20A5z7872hv3aAhYY,138
+setuptools/ssl_support.py,sha256=3yuxmYJxPupL97d4RBihMo1UCMXSou1Upn4VE9jkq48,8511
+setuptools/unicode_utils.py,sha256=aOOFo4JGwAsiBttGYDsqFS7YqWQeZ2j6DWiCuctR_00,941
+setuptools/version.py,sha256=og_cuZQb0QI6ukKZFfZWPlr1HgJBPPn2vO2m_bI9ZTE,144
+setuptools/wheel.py,sha256=0P8tSk105uF_Ub-30N2HU2X2v7MKDSdjpeQlRRW3SkI,8288
+setuptools/windows_support.py,sha256=5GrfqSP2-dLGJoZTq2g6dCKkyQxxa2n5IQiXlJCoYEE,714
+setuptools/_distutils/__init__.py,sha256=lpQAphR_7uhWC2fbSEps4Ja9W4YwezN_IX_LJEt3khU,250
+setuptools/_distutils/_msvccompiler.py,sha256=JQcHez50UA3BQKK9fOKANI_GzNFx3_qnZdyHyHNAghA,20813
+setuptools/_distutils/archive_util.py,sha256=qW-uiGwYexTvK5e-iSel_31Dshx-CqTanNPK6snwf98,8572
+setuptools/_distutils/bcppcompiler.py,sha256=OJDVpCUmX6H8v_7lV1zifV1fcx92Cr2dhiUh6989UJI,14894
+setuptools/_distutils/ccompiler.py,sha256=4cqQgq06NbGo0vazGMT2aPZ6K2Z-HcuRn9Pfz_bQUPw,47437
+setuptools/_distutils/cmd.py,sha256=eco6LAGUtobLuPafuhmgKgkwRRL_WY8KJ4YeDCHpcls,18079
+setuptools/_distutils/config.py,sha256=dtHgblx9JhfyrKx1-J7Jlxw_f7s8ZbPFQii2UWMTZpY,4827
+setuptools/_distutils/core.py,sha256=jbdOkpOK09xi-56vhhwvn3fYdhLb5DJO8q3K1fnQz0Q,8876
+setuptools/_distutils/cygwinccompiler.py,sha256=9U4JAusUzlAGJl0Y5nToPkQ3ldzseAtiye434mwJ0ow,16380
+setuptools/_distutils/debug.py,sha256=N6MrTAqK6l9SVk6tWweR108PM8Ol7qNlfyV-nHcLhsY,139
+setuptools/_distutils/dep_util.py,sha256=GuR9Iw_jzZRkyemJ5HX8rB_wRGxkIBcBm1qh54r7zhk,3491
+setuptools/_distutils/dir_util.py,sha256=UwhBOUTcV65GTwce4SPuTXR8Z8q3LYEcmttqcGb0bYo,7778
+setuptools/_distutils/dist.py,sha256=Biuf6ca8uiFfMScRFsYUKtb5neMPtxKxRtXn50_1f3U,50421
+setuptools/_distutils/errors.py,sha256=Yr6tKZGdzBoNi53vBtiq0UJ__X05CmxSdQJqOWaw6SY,3577
+setuptools/_distutils/extension.py,sha256=bTb3Q0CoevGKYv5dX1ls--Ln8tlB0-UEOsi9BwzlZ-s,10515
+setuptools/_distutils/fancy_getopt.py,sha256=OPxp2CxHi1Yp_d1D8JxW4Ueq9fC71tegQFaafh58GGU,17784
+setuptools/_distutils/file_util.py,sha256=0hUqfItN_x2DVihR0MHdA4KCMVCOO8VoByaFp_a6MDg,8148
+setuptools/_distutils/filelist.py,sha256=8bRxhzp2FsaoHT7TuKD4Qjcuh_B9Ow_xTt_htZJvN2Q,12832
+setuptools/_distutils/log.py,sha256=hWBmdUC2K927QcVv3REMW3HMPclxccPQngxLSuUXQl0,1969
+setuptools/_distutils/msvc9compiler.py,sha256=uv0TAfoWrxEBOQL-Z2uws5g4AXoTPahUEMuq6FLkCYY,30453
+setuptools/_distutils/msvccompiler.py,sha256=ZYsnUgIC4tZT2WkJbTkTUyVSCAc2nFM9DVKIuIfPBU0,23540
+setuptools/_distutils/py35compat.py,sha256=-sk1vBIsOgH-AobjIYbK_OEjdJF_54Ul_D1EiE9XM_c,455
+setuptools/_distutils/py38compat.py,sha256=II7ddBxOijC7uNN4z_46HYUjwYTJYMNiLJoGTormZm0,212
+setuptools/_distutils/spawn.py,sha256=XBmUqzhxXfay_JE18RkaalHf9kgi7NvXeBPW9BfTqmw,4408
+setuptools/_distutils/sysconfig.py,sha256=5z55MU7gXeceL_G9FK6ex-2OvdeIXJRZJafrtthJcfU,21349
+setuptools/_distutils/text_file.py,sha256=PsuAJeWdKJoLSV_6N6IpB5-0Pa84KzLUucJMFRazw3I,12483
+setuptools/_distutils/unixccompiler.py,sha256=E65edChYLoHY8wi4OxFu_wKt3hJe3GySF6v51G_ZzL0,14696
+setuptools/_distutils/util.py,sha256=Wlz9noChJjzem9mfgOu-KaN8soB4aNhRfe4VGltXd8w,20985
+setuptools/_distutils/version.py,sha256=8NogP6NPPQpp3EUMZcT9czEHia-ehqPo8spo_e7AgUU,12514
+setuptools/_distutils/versionpredicate.py,sha256=ZxpEA-TQv88mUWc6hetUO4qSqA2sa7ipjZ3QEK5evDk,5133
+setuptools/_distutils/command/__init__.py,sha256=2TA-rlNDlzeI-csbWHXFjGD8uOYqALMfyWOhT49nC6g,799
+setuptools/_distutils/command/bdist.py,sha256=2z4eudRl_n7m3lG9leL0IYqes4bsm8c0fxfZuiafjMg,5562
+setuptools/_distutils/command/bdist_dumb.py,sha256=BTur9jcIppyP7Piavjfsk7YjElqvxeYO2npUyPPOekc,4913
+setuptools/_distutils/command/bdist_msi.py,sha256=EVFQYN_X-ExeeP8gmdV9JcINsuUGsLJUz9afMU0Rt8c,35579
+setuptools/_distutils/command/bdist_rpm.py,sha256=gjOw22GhDSbcq0bdq25cTb-n6HWWm0bShLQad_mkJ4k,21537
+setuptools/_distutils/command/bdist_wininst.py,sha256=iGlaI-VfElHOneeczKHWnSN5a10-7IMcJaXuR1mdS3c,16030
+setuptools/_distutils/command/build.py,sha256=11NyR2UAUzalrkTZ2ph0BAHFWFC2jtSsN7gIaF-NC08,5767
+setuptools/_distutils/command/build_clib.py,sha256=bgVTHh28eLQA2Gkw68amApd_j7qQBX4MTI-zTvAK_J4,8022
+setuptools/_distutils/command/build_ext.py,sha256=Y_SYbd8SHcpgNPfv3ifVniZljYs1cLAFleBSi2_O3CY,31685
+setuptools/_distutils/command/build_py.py,sha256=S_Nlw4hZE8PnIgqX5OFMdmt-GSmOhPQQ4f2jr1uBnoU,17190
+setuptools/_distutils/command/build_scripts.py,sha256=aKycJJPx3LfZ1cvZgSJaxnD2LnvRM5WJ-8xkpdgcLsI,6232
+setuptools/_distutils/command/check.py,sha256=5qDtI75ccZg3sAItQWeaIu8y3FR314O4rr9Smz4HsEo,5637
+setuptools/_distutils/command/clean.py,sha256=2TCt47ru4hZZM0RfVfUYj5bbpicpGLP4Qhw5jBtvp9k,2776
+setuptools/_distutils/command/config.py,sha256=2aTjww3PwjMB8-ZibCe4P7B-qG1hM1gn_rJXYyxRz6c,13117
+setuptools/_distutils/command/install.py,sha256=oOM2rD7l_SglARNVDmiZn8u6DAfidXRF_yE5QS328B4,27482
+setuptools/_distutils/command/install_data.py,sha256=YhGOAwh3gJPqF7em5XA0rmpR42z1bLh80ooElzDyUvk,2822
+setuptools/_distutils/command/install_egg_info.py,sha256=0kW0liVMeadkjX0ZcRfMptKFen07Gw6gyw1VHT5KIwc,2603
+setuptools/_distutils/command/install_headers.py,sha256=XQ6idkbIDfr1ljXCOznuVUMvOFpHBn6cK0Wz9gIM2b4,1298
+setuptools/_distutils/command/install_lib.py,sha256=9AofR-MO9lAtjwwuukCptepOaJEKMZW2VHiyR5hU7HA,8397
+setuptools/_distutils/command/install_scripts.py,sha256=_CLUeQwGJRcY2kik7azPMn5IdtDCrjWdUvZ1khlG6ck,2017
+setuptools/_distutils/command/py37compat.py,sha256=qzRhhvTihqx_PZZt2ZYECxh1X3Oj255VqatzelYFAKw,671
+setuptools/_distutils/command/register.py,sha256=2jaq9968rt2puRVDBx1HbNiXv27uOk8idE_4lPf_3VM,11712
+setuptools/_distutils/command/sdist.py,sha256=qotJjAOzyhJjq2-oDImjNFrOtaSneEFDJTB-sEk1wnU,19005
+setuptools/_distutils/command/upload.py,sha256=BLO1w7eSAqsCjCLXtf_CRVSjwF1WmyOByGVGNdcQ8oY,7597
+setuptools/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+setuptools/_vendor/ordered_set.py,sha256=dbaCcs27dyN9gnMWGF5nA_BrVn6Q-NrjKYJpV9_fgBs,15130
+setuptools/_vendor/pyparsing.py,sha256=tmrp-lu-qO1i75ZzIN5A12nKRRD1Cm4Vpk-5LR9rims,232055
+setuptools/_vendor/packaging/__about__.py,sha256=PNMsaZn4UcCHyubgROH1bl6CluduPjI5kFrSp_Zgklo,736
+setuptools/_vendor/packaging/__init__.py,sha256=6enbp5XgRfjBjsI9-bn00HjHf5TH21PDMOKkJW8xw-w,562
+setuptools/_vendor/packaging/_compat.py,sha256=MXdsGpSE_W-ZrHoC87andI4LV2FAwU7HLL-eHe_CjhU,1128
+setuptools/_vendor/packaging/_structures.py,sha256=ozkCX8Q8f2qE1Eic3YiQ4buDVfgz2iYevY9e7R2y3iY,2022
+setuptools/_vendor/packaging/_typing.py,sha256=x59EhQ57TMT-kTRyLZV25HZvYGGwbucTo6iKh_O0tMw,1812
+setuptools/_vendor/packaging/markers.py,sha256=BCCxZbt8xgysH8v5pqbLkdtQnRZHIGkJQqlNBGek4nQ,9509
+setuptools/_vendor/packaging/requirements.py,sha256=VHydZdk8m3qFxReomNwKr71cmpjantEV_xOhkEyyINI,4917
+setuptools/_vendor/packaging/specifiers.py,sha256=uYp9l13F0LcknS6d4N60ytiBgFmIhKideOq9AnsxTco,31944
+setuptools/_vendor/packaging/tags.py,sha256=NKMS37Zo_nWrZxgsD6zbXsXgc9edn9m160cBiLmHJdE,24067
+setuptools/_vendor/packaging/utils.py,sha256=RShlvnjO2CtYSD8uri32frMMFMTmB-3ihsq1-ghzLEw,1811
+setuptools/_vendor/packaging/version.py,sha256=Cnbm-OO9D_qd8ZTFxzFcjSavexSYFZmyeaoPvMsjgPc,15470
+setuptools/command/__init__.py,sha256=QCAuA9whnq8Bnoc0bBaS6Lw_KAUO0DiHYZQXEMNn5hg,568
+setuptools/command/alias.py,sha256=1sLQxZcNh6dDQpDmm4G7UGGTol83nY1NTPmNBbm2siI,2381
+setuptools/command/bdist_egg.py,sha256=9qKKLR9gtYXx84xxU4DQYTtMK__mrsaxwjUAiGQ1foY,18244
+setuptools/command/bdist_rpm.py,sha256=_4Y7tVAzu1zEuDc8tpRdE_sy3zST3h3LPTtzioos5Ck,900
+setuptools/command/bdist_wininst.py,sha256=Tmqa9wW0F8i_72KHWpu9pDdnCN6Er_8uJUs2UmCAwTA,922
+setuptools/command/build_clib.py,sha256=fWHSFGkk10VCddBWCszvNhowbG9Z9CZXVjQ2uSInoOs,4415
+setuptools/command/build_ext.py,sha256=aI_qnK9m8lULZDS6XMv_p2j2pIehVbSarb4PJHDA7dw,13027
+setuptools/command/build_py.py,sha256=10DNYiaM707UGJ-eV6YNcIKRN1pbU7UwXGYUXACrXU8,9473
+setuptools/command/develop.py,sha256=B0p5dh7VrSMdEfhdUE_AJlWk2UxAesOOY14CAV5_DEA,8045
+setuptools/command/dist_info.py,sha256=5t6kOfrdgALT-P3ogss6PF9k-Leyesueycuk3dUyZnI,960
+setuptools/command/easy_install.py,sha256=lMTMBbwsNdf7vYOJjPCZYCUBXVqOzKsd-NCE0V76dnU,86430
+setuptools/command/egg_info.py,sha256=a-fC7SCIprafrOlFEOfAf7oxKSplW92kRm1RZkEuBGo,25495
+setuptools/command/install.py,sha256=8doMxeQEDoK4Eco0mO2WlXXzzp9QnsGJQ7Z7yWkZPG8,4705
+setuptools/command/install_egg_info.py,sha256=bMgeIeRiXzQ4DAGPV1328kcjwQjHjOWU4FngAWLV78Q,2203
+setuptools/command/install_lib.py,sha256=Uz42McsyHZAjrB6cw9E7Bz0xsaTbzxnM1PI9CBhiPtE,3875
+setuptools/command/install_scripts.py,sha256=x7sdEICuyFpaf5LuWXcTp49oYt8EeNbwKkW2Pv-TVXI,2519
+setuptools/command/launcher manifest.xml,sha256=xlLbjWrB01tKC0-hlVkOKkiSPbzMml2eOPtJ_ucCnbE,628
+setuptools/command/py36compat.py,sha256=7yLWzQj179Enx3pJ8V1cDDCzeLMFMd9XJXlK-iZTq5Y,4946
+setuptools/command/register.py,sha256=kk3DxXCb5lXTvqnhfwx2g6q7iwbUmgTyXUCaBooBOUk,468
+setuptools/command/rotate.py,sha256=SvsQPasezIojPjvMnfkqzh8P0U0tCj0daczF8uc3NQM,2128
+setuptools/command/saveopts.py,sha256=za7QCBcQimKKriWcoCcbhxPjUz30gSB74zuTL47xpP4,658
+setuptools/command/sdist.py,sha256=R1bt94i_RscMePwKS1iBSvdlpFFRfh5iCPkucsxx0oI,6970
+setuptools/command/setopt.py,sha256=LicqlXockLqBOHYPNv1J032HxoBKD4HOHB11qm_t-Bs,5051
+setuptools/command/test.py,sha256=Y4jwjdX_4DCimImq6fDWoHzBniXDNJVEcD6XxVZIYS0,9469
+setuptools/command/upload.py,sha256=XT3YFVfYPAmA5qhGg0euluU98ftxRUW-PzKcODMLxUs,462
+setuptools/command/upload_docs.py,sha256=482Bd7ksBBfzdokxjNQvnR0A6dg8XOOREg9wB5BwKtU,7151
+setuptools/extern/__init__.py,sha256=59mpPNYGozvXXbAKrxldOmHLYFyBC4aaoYihYHkKnJ0,2121
+setuptools-51.2.0.dist-info/LICENSE,sha256=2z8CRrH5J48VhFuZ_sR4uLUG63ZIeZNyL4xuJUKF-vg,1050
+setuptools-51.2.0.dist-info/METADATA,sha256=CF0akXLXDT596yt3V7w4M0RYB9ICVCo6WkMF115Q2cs,4540
+setuptools-51.2.0.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92
+setuptools-51.2.0.dist-info/dependency_links.txt,sha256=HlkCFkoK5TbZ5EMLbLKYhLcY_E31kBWD8TqW2EgmatQ,239
+setuptools-51.2.0.dist-info/entry_points.txt,sha256=ZJ1UygxmvKwag49KPsxSSUYtfwsbIhXiYHZ0szK6CCA,3143
+setuptools-51.2.0.dist-info/top_level.txt,sha256=Xc47-_bn9IXa0F2lP6dE-l-ySe5Xmv_ihIWX-aUjAgg,54
+setuptools-51.2.0.dist-info/RECORD,,
diff --git a/third_party/python/setuptools/setuptools-51.2.0.dist-info/WHEEL b/third_party/python/setuptools/setuptools-51.2.0.dist-info/WHEEL
new file mode 100644
index 0000000000..385faab052
--- /dev/null
+++ b/third_party/python/setuptools/setuptools-51.2.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.36.2)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/third_party/python/setuptools/setuptools-51.2.0.dist-info/dependency_links.txt b/third_party/python/setuptools/setuptools-51.2.0.dist-info/dependency_links.txt
new file mode 100644
index 0000000000..e87d02103e
--- /dev/null
+++ b/third_party/python/setuptools/setuptools-51.2.0.dist-info/dependency_links.txt
@@ -0,0 +1,2 @@
+https://files.pythonhosted.org/packages/source/c/certifi/certifi-2016.9.26.tar.gz#md5=baa81e951a29958563689d868ef1064d
+https://files.pythonhosted.org/packages/source/w/wincertstore/wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2
diff --git a/third_party/python/setuptools/setuptools-51.2.0.dist-info/entry_points.txt b/third_party/python/setuptools/setuptools-51.2.0.dist-info/entry_points.txt
new file mode 100644
index 0000000000..64381a4de3
--- /dev/null
+++ b/third_party/python/setuptools/setuptools-51.2.0.dist-info/entry_points.txt
@@ -0,0 +1,68 @@
+[console_scripts]
+easy_install = setuptools.command.easy_install:main
+easy_install-3.9 = setuptools.command.easy_install:main
+
+[distutils.commands]
+alias = setuptools.command.alias:alias
+bdist_egg = setuptools.command.bdist_egg:bdist_egg
+bdist_rpm = setuptools.command.bdist_rpm:bdist_rpm
+bdist_wininst = setuptools.command.bdist_wininst:bdist_wininst
+build_clib = setuptools.command.build_clib:build_clib
+build_ext = setuptools.command.build_ext:build_ext
+build_py = setuptools.command.build_py:build_py
+develop = setuptools.command.develop:develop
+dist_info = setuptools.command.dist_info:dist_info
+easy_install = setuptools.command.easy_install:easy_install
+egg_info = setuptools.command.egg_info:egg_info
+install = setuptools.command.install:install
+install_egg_info = setuptools.command.install_egg_info:install_egg_info
+install_lib = setuptools.command.install_lib:install_lib
+install_scripts = setuptools.command.install_scripts:install_scripts
+rotate = setuptools.command.rotate:rotate
+saveopts = setuptools.command.saveopts:saveopts
+sdist = setuptools.command.sdist:sdist
+setopt = setuptools.command.setopt:setopt
+test = setuptools.command.test:test
+upload_docs = setuptools.command.upload_docs:upload_docs
+
+[distutils.setup_keywords]
+convert_2to3_doctests = setuptools.dist:assert_string_list
+dependency_links = setuptools.dist:assert_string_list
+eager_resources = setuptools.dist:assert_string_list
+entry_points = setuptools.dist:check_entry_points
+exclude_package_data = setuptools.dist:check_package_data
+extras_require = setuptools.dist:check_extras
+include_package_data = setuptools.dist:assert_bool
+install_requires = setuptools.dist:check_requirements
+namespace_packages = setuptools.dist:check_nsp
+package_data = setuptools.dist:check_package_data
+packages = setuptools.dist:check_packages
+python_requires = setuptools.dist:check_specifier
+setup_requires = setuptools.dist:check_requirements
+test_loader = setuptools.dist:check_importable
+test_runner = setuptools.dist:check_importable
+test_suite = setuptools.dist:check_test_suite
+tests_require = setuptools.dist:check_requirements
+use_2to3 = setuptools.dist:assert_bool
+use_2to3_exclude_fixers = setuptools.dist:assert_string_list
+use_2to3_fixers = setuptools.dist:assert_string_list
+zip_safe = setuptools.dist:assert_bool
+
+[egg_info.writers]
+PKG-INFO = setuptools.command.egg_info:write_pkg_info
+dependency_links.txt = setuptools.command.egg_info:overwrite_arg
+depends.txt = setuptools.command.egg_info:warn_depends_obsolete
+eager_resources.txt = setuptools.command.egg_info:overwrite_arg
+entry_points.txt = setuptools.command.egg_info:write_entries
+namespace_packages.txt = setuptools.command.egg_info:overwrite_arg
+requires.txt = setuptools.command.egg_info:write_requirements
+top_level.txt = setuptools.command.egg_info:write_toplevel_names
+
+[setuptools.finalize_distribution_options]
+2to3_doctests = setuptools.dist:Distribution._finalize_2to3_doctests
+keywords = setuptools.dist:Distribution._finalize_setup_keywords
+parent_finalize = setuptools.dist:_Distribution.finalize_options
+
+[setuptools.installation]
+eggsecutable = setuptools.command.easy_install:bootstrap
+
diff --git a/third_party/python/setuptools/setuptools-51.2.0.dist-info/top_level.txt b/third_party/python/setuptools/setuptools-51.2.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..0ba4b25ad5
--- /dev/null
+++ b/third_party/python/setuptools/setuptools-51.2.0.dist-info/top_level.txt
@@ -0,0 +1,4 @@
+_distutils_hack
+easy_install
+pkg_resources
+setuptools
diff --git a/third_party/python/setuptools/setuptools/__init__.py b/third_party/python/setuptools/setuptools/__init__.py
new file mode 100644
index 0000000000..4d9b835729
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/__init__.py
@@ -0,0 +1,241 @@
+"""Extensions to the 'distutils' for large or complex distributions"""
+
+from fnmatch import fnmatchcase
+import functools
+import os
+import re
+
+import _distutils_hack.override # noqa: F401
+
+import distutils.core
+from distutils.errors import DistutilsOptionError
+from distutils.util import convert_path
+
+from ._deprecation_warning import SetuptoolsDeprecationWarning
+
+import setuptools.version
+from setuptools.extension import Extension
+from setuptools.dist import Distribution
+from setuptools.depends import Require
+from . import monkey
+
+
+__all__ = [
+ 'setup', 'Distribution', 'Command', 'Extension', 'Require',
+ 'SetuptoolsDeprecationWarning',
+ 'find_packages', 'find_namespace_packages',
+]
+
+__version__ = setuptools.version.__version__
+
+bootstrap_install_from = None
+
+# If we run 2to3 on .py files, should we also convert docstrings?
+# Default: yes; assume that we can detect doctests reliably
+run_2to3_on_doctests = True
+# Standard package names for fixer packages
+lib2to3_fixer_packages = ['lib2to3.fixes']
+
+
+class PackageFinder:
+ """
+ Generate a list of all Python packages found within a directory
+ """
+
+ @classmethod
+ def find(cls, where='.', exclude=(), include=('*',)):
+ """Return a list all Python packages found within directory 'where'
+
+ 'where' is the root directory which will be searched for packages. It
+ should be supplied as a "cross-platform" (i.e. URL-style) path; it will
+ be converted to the appropriate local path syntax.
+
+ 'exclude' is a sequence of package names to exclude; '*' can be used
+ as a wildcard in the names, such that 'foo.*' will exclude all
+ subpackages of 'foo' (but not 'foo' itself).
+
+ 'include' is a sequence of package names to include. If it's
+ specified, only the named packages will be included. If it's not
+ specified, all found packages will be included. 'include' can contain
+ shell style wildcard patterns just like 'exclude'.
+ """
+
+ return list(cls._find_packages_iter(
+ convert_path(where),
+ cls._build_filter('ez_setup', '*__pycache__', *exclude),
+ cls._build_filter(*include)))
+
+ @classmethod
+ def _find_packages_iter(cls, where, exclude, include):
+ """
+ All the packages found in 'where' that pass the 'include' filter, but
+ not the 'exclude' filter.
+ """
+ for root, dirs, files in os.walk(where, followlinks=True):
+ # Copy dirs to iterate over it, then empty dirs.
+ all_dirs = dirs[:]
+ dirs[:] = []
+
+ for dir in all_dirs:
+ full_path = os.path.join(root, dir)
+ rel_path = os.path.relpath(full_path, where)
+ package = rel_path.replace(os.path.sep, '.')
+
+ # Skip directory trees that are not valid packages
+ if ('.' in dir or not cls._looks_like_package(full_path)):
+ continue
+
+ # Should this package be included?
+ if include(package) and not exclude(package):
+ yield package
+
+ # Keep searching subdirectories, as there may be more packages
+ # down there, even if the parent was excluded.
+ dirs.append(dir)
+
+ @staticmethod
+ def _looks_like_package(path):
+ """Does a directory look like a package?"""
+ return os.path.isfile(os.path.join(path, '__init__.py'))
+
+ @staticmethod
+ def _build_filter(*patterns):
+ """
+ Given a list of patterns, return a callable that will be true only if
+ the input matches at least one of the patterns.
+ """
+ return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
+
+
+class PEP420PackageFinder(PackageFinder):
+ @staticmethod
+ def _looks_like_package(path):
+ return True
+
+
+find_packages = PackageFinder.find
+find_namespace_packages = PEP420PackageFinder.find
+
+
+def _install_setup_requires(attrs):
+ # Note: do not use `setuptools.Distribution` directly, as
+ # our PEP 517 backend patch `distutils.core.Distribution`.
+ class MinimalDistribution(distutils.core.Distribution):
+ """
+ A minimal version of a distribution for supporting the
+ fetch_build_eggs interface.
+ """
+ def __init__(self, attrs):
+ _incl = 'dependency_links', 'setup_requires'
+ filtered = {
+ k: attrs[k]
+ for k in set(_incl) & set(attrs)
+ }
+ distutils.core.Distribution.__init__(self, filtered)
+
+ def finalize_options(self):
+ """
+ Disable finalize_options to avoid building the working set.
+ Ref #2158.
+ """
+
+ dist = MinimalDistribution(attrs)
+
+ # Honor setup.cfg's options.
+ dist.parse_config_files(ignore_option_errors=True)
+ if dist.setup_requires:
+ dist.fetch_build_eggs(dist.setup_requires)
+
+
+def setup(**attrs):
+ # Make sure we have any requirements needed to interpret 'attrs'.
+ _install_setup_requires(attrs)
+ return distutils.core.setup(**attrs)
+
+
+setup.__doc__ = distutils.core.setup.__doc__
+
+
+_Command = monkey.get_unpatched(distutils.core.Command)
+
+
+class Command(_Command):
+ __doc__ = _Command.__doc__
+
+ command_consumes_arguments = False
+
+ def __init__(self, dist, **kw):
+ """
+ Construct the command for dist, updating
+ vars(self) with any keyword parameters.
+ """
+ _Command.__init__(self, dist)
+ vars(self).update(kw)
+
+ def _ensure_stringlike(self, option, what, default=None):
+ val = getattr(self, option)
+ if val is None:
+ setattr(self, option, default)
+ return default
+ elif not isinstance(val, str):
+ raise DistutilsOptionError("'%s' must be a %s (got `%s`)"
+ % (option, what, val))
+ return val
+
+ def ensure_string_list(self, option):
+ r"""Ensure that 'option' is a list of strings. If 'option' is
+ currently a string, we split it either on /,\s*/ or /\s+/, so
+ "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
+ ["foo", "bar", "baz"].
+ """
+ val = getattr(self, option)
+ if val is None:
+ return
+ elif isinstance(val, str):
+ setattr(self, option, re.split(r',\s*|\s+', val))
+ else:
+ if isinstance(val, list):
+ ok = all(isinstance(v, str) for v in val)
+ else:
+ ok = False
+ if not ok:
+ raise DistutilsOptionError(
+ "'%s' must be a list of strings (got %r)"
+ % (option, val))
+
+ def reinitialize_command(self, command, reinit_subcommands=0, **kw):
+ cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
+ vars(cmd).update(kw)
+ return cmd
+
+
+def _find_all_simple(path):
+ """
+ Find all files under 'path'
+ """
+ results = (
+ os.path.join(base, file)
+ for base, dirs, files in os.walk(path, followlinks=True)
+ for file in files
+ )
+ return filter(os.path.isfile, results)
+
+
+def findall(dir=os.curdir):
+ """
+ Find all files under 'dir' and return the list of full filenames.
+ Unless dir is '.', return full filenames with dir prepended.
+ """
+ files = _find_all_simple(dir)
+ if dir == os.curdir:
+ make_rel = functools.partial(os.path.relpath, start=dir)
+ files = map(make_rel, files)
+ return list(files)
+
+
+class sic(str):
+ """Treat this string as-is (https://en.wikipedia.org/wiki/Sic)"""
+
+
+# Apply monkey patches
+monkey.patch_all()
diff --git a/third_party/python/setuptools/setuptools/_deprecation_warning.py b/third_party/python/setuptools/setuptools/_deprecation_warning.py
new file mode 100644
index 0000000000..086b64dd38
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_deprecation_warning.py
@@ -0,0 +1,7 @@
+class SetuptoolsDeprecationWarning(Warning):
+ """
+ Base class for warning deprecations in ``setuptools``
+
+ This class is not derived from ``DeprecationWarning``, and as such is
+ visible by default.
+ """
diff --git a/third_party/python/setuptools/setuptools/_distutils/__init__.py b/third_party/python/setuptools/setuptools/_distutils/__init__.py
new file mode 100644
index 0000000000..7dac55b601
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/__init__.py
@@ -0,0 +1,15 @@
+"""distutils
+
+The main package for the Python Module Distribution Utilities. Normally
+used from a setup script as
+
+ from distutils.core import setup
+
+ setup (...)
+"""
+
+import sys
+
+__version__ = sys.version[:sys.version.index(' ')]
+
+local = True
diff --git a/third_party/python/setuptools/setuptools/_distutils/_msvccompiler.py b/third_party/python/setuptools/setuptools/_distutils/_msvccompiler.py
new file mode 100644
index 0000000000..e9af4cf52b
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/_msvccompiler.py
@@ -0,0 +1,561 @@
+"""distutils._msvccompiler
+
+Contains MSVCCompiler, an implementation of the abstract CCompiler class
+for Microsoft Visual Studio 2015.
+
+The module is compatible with VS 2015 and later. You can find legacy support
+for older versions in distutils.msvc9compiler and distutils.msvccompiler.
+"""
+
+# Written by Perry Stoll
+# hacked by Robin Becker and Thomas Heller to do a better job of
+# finding DevStudio (through the registry)
+# ported to VS 2005 and VS 2008 by Christian Heimes
+# ported to VS 2015 by Steve Dower
+
+import os
+import subprocess
+import contextlib
+import warnings
+import unittest.mock
+with contextlib.suppress(ImportError):
+ import winreg
+
+from distutils.errors import DistutilsExecError, DistutilsPlatformError, \
+ CompileError, LibError, LinkError
+from distutils.ccompiler import CCompiler, gen_lib_options
+from distutils import log
+from distutils.util import get_platform
+
+from itertools import count
+
+def _find_vc2015():
+ try:
+ key = winreg.OpenKeyEx(
+ winreg.HKEY_LOCAL_MACHINE,
+ r"Software\Microsoft\VisualStudio\SxS\VC7",
+ access=winreg.KEY_READ | winreg.KEY_WOW64_32KEY
+ )
+ except OSError:
+ log.debug("Visual C++ is not registered")
+ return None, None
+
+ best_version = 0
+ best_dir = None
+ with key:
+ for i in count():
+ try:
+ v, vc_dir, vt = winreg.EnumValue(key, i)
+ except OSError:
+ break
+ if v and vt == winreg.REG_SZ and os.path.isdir(vc_dir):
+ try:
+ version = int(float(v))
+ except (ValueError, TypeError):
+ continue
+ if version >= 14 and version > best_version:
+ best_version, best_dir = version, vc_dir
+ return best_version, best_dir
+
+def _find_vc2017():
+ """Returns "15, path" based on the result of invoking vswhere.exe
+ If no install is found, returns "None, None"
+
+ The version is returned to avoid unnecessarily changing the function
+ result. It may be ignored when the path is not None.
+
+ If vswhere.exe is not available, by definition, VS 2017 is not
+ installed.
+ """
+ root = os.environ.get("ProgramFiles(x86)") or os.environ.get("ProgramFiles")
+ if not root:
+ return None, None
+
+ try:
+ path = subprocess.check_output([
+ os.path.join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"),
+ "-latest",
+ "-prerelease",
+ "-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
+ "-property", "installationPath",
+ "-products", "*",
+ ], encoding="mbcs", errors="strict").strip()
+ except (subprocess.CalledProcessError, OSError, UnicodeDecodeError):
+ return None, None
+
+ path = os.path.join(path, "VC", "Auxiliary", "Build")
+ if os.path.isdir(path):
+ return 15, path
+
+ return None, None
+
+PLAT_SPEC_TO_RUNTIME = {
+ 'x86' : 'x86',
+ 'x86_amd64' : 'x64',
+ 'x86_arm' : 'arm',
+ 'x86_arm64' : 'arm64'
+}
+
+def _find_vcvarsall(plat_spec):
+ # bpo-38597: Removed vcruntime return value
+ _, best_dir = _find_vc2017()
+
+ if not best_dir:
+ best_version, best_dir = _find_vc2015()
+
+ if not best_dir:
+ log.debug("No suitable Visual C++ version found")
+ return None, None
+
+ vcvarsall = os.path.join(best_dir, "vcvarsall.bat")
+ if not os.path.isfile(vcvarsall):
+ log.debug("%s cannot be found", vcvarsall)
+ return None, None
+
+ return vcvarsall, None
+
+def _get_vc_env(plat_spec):
+ if os.getenv("DISTUTILS_USE_SDK"):
+ return {
+ key.lower(): value
+ for key, value in os.environ.items()
+ }
+
+ vcvarsall, _ = _find_vcvarsall(plat_spec)
+ if not vcvarsall:
+ raise DistutilsPlatformError("Unable to find vcvarsall.bat")
+
+ try:
+ out = subprocess.check_output(
+ 'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec),
+ stderr=subprocess.STDOUT,
+ ).decode('utf-16le', errors='replace')
+ except subprocess.CalledProcessError as exc:
+ log.error(exc.output)
+ raise DistutilsPlatformError("Error executing {}"
+ .format(exc.cmd))
+
+ env = {
+ key.lower(): value
+ for key, _, value in
+ (line.partition('=') for line in out.splitlines())
+ if key and value
+ }
+
+ return env
+
+def _find_exe(exe, paths=None):
+ """Return path to an MSVC executable program.
+
+ Tries to find the program in several places: first, one of the
+ MSVC program search paths from the registry; next, the directories
+ in the PATH environment variable. If any of those work, return an
+ absolute path that is known to exist. If none of them work, just
+ return the original program name, 'exe'.
+ """
+ if not paths:
+ paths = os.getenv('path').split(os.pathsep)
+ for p in paths:
+ fn = os.path.join(os.path.abspath(p), exe)
+ if os.path.isfile(fn):
+ return fn
+ return exe
+
+# A map keyed by get_platform() return values to values accepted by
+# 'vcvarsall.bat'. Always cross-compile from x86 to work with the
+# lighter-weight MSVC installs that do not include native 64-bit tools.
+PLAT_TO_VCVARS = {
+ 'win32' : 'x86',
+ 'win-amd64' : 'x86_amd64',
+ 'win-arm32' : 'x86_arm',
+ 'win-arm64' : 'x86_arm64'
+}
+
+class MSVCCompiler(CCompiler) :
+ """Concrete class that implements an interface to Microsoft Visual C++,
+ as defined by the CCompiler abstract class."""
+
+ compiler_type = 'msvc'
+
+ # Just set this so CCompiler's constructor doesn't barf. We currently
+ # don't use the 'set_executables()' bureaucracy provided by CCompiler,
+ # as it really isn't necessary for this sort of single-compiler class.
+ # Would be nice to have a consistent interface with UnixCCompiler,
+ # though, so it's worth thinking about.
+ executables = {}
+
+ # Private class data (need to distinguish C from C++ source for compiler)
+ _c_extensions = ['.c']
+ _cpp_extensions = ['.cc', '.cpp', '.cxx']
+ _rc_extensions = ['.rc']
+ _mc_extensions = ['.mc']
+
+ # Needed for the filename generation methods provided by the
+ # base class, CCompiler.
+ src_extensions = (_c_extensions + _cpp_extensions +
+ _rc_extensions + _mc_extensions)
+ res_extension = '.res'
+ obj_extension = '.obj'
+ static_lib_extension = '.lib'
+ shared_lib_extension = '.dll'
+ static_lib_format = shared_lib_format = '%s%s'
+ exe_extension = '.exe'
+
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ CCompiler.__init__ (self, verbose, dry_run, force)
+ # target platform (.plat_name is consistent with 'bdist')
+ self.plat_name = None
+ self.initialized = False
+
+ def initialize(self, plat_name=None):
+ # multi-init means we would need to check platform same each time...
+ assert not self.initialized, "don't init multiple times"
+ if plat_name is None:
+ plat_name = get_platform()
+ # sanity check for platforms to prevent obscure errors later.
+ if plat_name not in PLAT_TO_VCVARS:
+ raise DistutilsPlatformError("--plat-name must be one of {}"
+ .format(tuple(PLAT_TO_VCVARS)))
+
+ # Get the vcvarsall.bat spec for the requested platform.
+ plat_spec = PLAT_TO_VCVARS[plat_name]
+
+ vc_env = _get_vc_env(plat_spec)
+ if not vc_env:
+ raise DistutilsPlatformError("Unable to find a compatible "
+ "Visual Studio installation.")
+
+ self._paths = vc_env.get('path', '')
+ paths = self._paths.split(os.pathsep)
+ self.cc = _find_exe("cl.exe", paths)
+ self.linker = _find_exe("link.exe", paths)
+ self.lib = _find_exe("lib.exe", paths)
+ self.rc = _find_exe("rc.exe", paths) # resource compiler
+ self.mc = _find_exe("mc.exe", paths) # message compiler
+ self.mt = _find_exe("mt.exe", paths) # message compiler
+
+ for dir in vc_env.get('include', '').split(os.pathsep):
+ if dir:
+ self.add_include_dir(dir.rstrip(os.sep))
+
+ for dir in vc_env.get('lib', '').split(os.pathsep):
+ if dir:
+ self.add_library_dir(dir.rstrip(os.sep))
+
+ self.preprocess_options = None
+ # bpo-38597: Always compile with dynamic linking
+ # Future releases of Python 3.x will include all past
+ # versions of vcruntime*.dll for compatibility.
+ self.compile_options = [
+ '/nologo', '/Ox', '/W3', '/GL', '/DNDEBUG', '/MD'
+ ]
+
+ self.compile_options_debug = [
+ '/nologo', '/Od', '/MDd', '/Zi', '/W3', '/D_DEBUG'
+ ]
+
+ ldflags = [
+ '/nologo', '/INCREMENTAL:NO', '/LTCG'
+ ]
+
+ ldflags_debug = [
+ '/nologo', '/INCREMENTAL:NO', '/LTCG', '/DEBUG:FULL'
+ ]
+
+ self.ldflags_exe = [*ldflags, '/MANIFEST:EMBED,ID=1']
+ self.ldflags_exe_debug = [*ldflags_debug, '/MANIFEST:EMBED,ID=1']
+ self.ldflags_shared = [*ldflags, '/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO']
+ self.ldflags_shared_debug = [*ldflags_debug, '/DLL', '/MANIFEST:EMBED,ID=2', '/MANIFESTUAC:NO']
+ self.ldflags_static = [*ldflags]
+ self.ldflags_static_debug = [*ldflags_debug]
+
+ self._ldflags = {
+ (CCompiler.EXECUTABLE, None): self.ldflags_exe,
+ (CCompiler.EXECUTABLE, False): self.ldflags_exe,
+ (CCompiler.EXECUTABLE, True): self.ldflags_exe_debug,
+ (CCompiler.SHARED_OBJECT, None): self.ldflags_shared,
+ (CCompiler.SHARED_OBJECT, False): self.ldflags_shared,
+ (CCompiler.SHARED_OBJECT, True): self.ldflags_shared_debug,
+ (CCompiler.SHARED_LIBRARY, None): self.ldflags_static,
+ (CCompiler.SHARED_LIBRARY, False): self.ldflags_static,
+ (CCompiler.SHARED_LIBRARY, True): self.ldflags_static_debug,
+ }
+
+ self.initialized = True
+
+ # -- Worker methods ------------------------------------------------
+
+ def object_filenames(self,
+ source_filenames,
+ strip_dir=0,
+ output_dir=''):
+ ext_map = {
+ **{ext: self.obj_extension for ext in self.src_extensions},
+ **{ext: self.res_extension for ext in self._rc_extensions + self._mc_extensions},
+ }
+
+ output_dir = output_dir or ''
+
+ def make_out_path(p):
+ base, ext = os.path.splitext(p)
+ if strip_dir:
+ base = os.path.basename(base)
+ else:
+ _, base = os.path.splitdrive(base)
+ if base.startswith((os.path.sep, os.path.altsep)):
+ base = base[1:]
+ try:
+ # XXX: This may produce absurdly long paths. We should check
+ # the length of the result and trim base until we fit within
+ # 260 characters.
+ return os.path.join(output_dir, base + ext_map[ext])
+ except LookupError:
+ # Better to raise an exception instead of silently continuing
+ # and later complain about sources and targets having
+ # different lengths
+ raise CompileError("Don't know how to compile {}".format(p))
+
+ return list(map(make_out_path, source_filenames))
+
+
+ def compile(self, sources,
+ output_dir=None, macros=None, include_dirs=None, debug=0,
+ extra_preargs=None, extra_postargs=None, depends=None):
+
+ if not self.initialized:
+ self.initialize()
+ compile_info = self._setup_compile(output_dir, macros, include_dirs,
+ sources, depends, extra_postargs)
+ macros, objects, extra_postargs, pp_opts, build = compile_info
+
+ compile_opts = extra_preargs or []
+ compile_opts.append('/c')
+ if debug:
+ compile_opts.extend(self.compile_options_debug)
+ else:
+ compile_opts.extend(self.compile_options)
+
+
+ add_cpp_opts = False
+
+ for obj in objects:
+ try:
+ src, ext = build[obj]
+ except KeyError:
+ continue
+ if debug:
+ # pass the full pathname to MSVC in debug mode,
+ # this allows the debugger to find the source file
+ # without asking the user to browse for it
+ src = os.path.abspath(src)
+
+ if ext in self._c_extensions:
+ input_opt = "/Tc" + src
+ elif ext in self._cpp_extensions:
+ input_opt = "/Tp" + src
+ add_cpp_opts = True
+ elif ext in self._rc_extensions:
+ # compile .RC to .RES file
+ input_opt = src
+ output_opt = "/fo" + obj
+ try:
+ self.spawn([self.rc] + pp_opts + [output_opt, input_opt])
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+ continue
+ elif ext in self._mc_extensions:
+ # Compile .MC to .RC file to .RES file.
+ # * '-h dir' specifies the directory for the
+ # generated include file
+ # * '-r dir' specifies the target directory of the
+ # generated RC file and the binary message resource
+ # it includes
+ #
+ # For now (since there are no options to change this),
+ # we use the source-directory for the include file and
+ # the build directory for the RC file and message
+ # resources. This works at least for win32all.
+ h_dir = os.path.dirname(src)
+ rc_dir = os.path.dirname(obj)
+ try:
+ # first compile .MC to .RC and .H file
+ self.spawn([self.mc, '-h', h_dir, '-r', rc_dir, src])
+ base, _ = os.path.splitext(os.path.basename (src))
+ rc_file = os.path.join(rc_dir, base + '.rc')
+ # then compile .RC to .RES file
+ self.spawn([self.rc, "/fo" + obj, rc_file])
+
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+ continue
+ else:
+ # how to handle this file?
+ raise CompileError("Don't know how to compile {} to {}"
+ .format(src, obj))
+
+ args = [self.cc] + compile_opts + pp_opts
+ if add_cpp_opts:
+ args.append('/EHsc')
+ args.append(input_opt)
+ args.append("/Fo" + obj)
+ args.extend(extra_postargs)
+
+ try:
+ self.spawn(args)
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+
+ return objects
+
+
+ def create_static_lib(self,
+ objects,
+ output_libname,
+ output_dir=None,
+ debug=0,
+ target_lang=None):
+
+ if not self.initialized:
+ self.initialize()
+ objects, output_dir = self._fix_object_args(objects, output_dir)
+ output_filename = self.library_filename(output_libname,
+ output_dir=output_dir)
+
+ if self._need_link(objects, output_filename):
+ lib_args = objects + ['/OUT:' + output_filename]
+ if debug:
+ pass # XXX what goes here?
+ try:
+ log.debug('Executing "%s" %s', self.lib, ' '.join(lib_args))
+ self.spawn([self.lib] + lib_args)
+ except DistutilsExecError as msg:
+ raise LibError(msg)
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+
+ def link(self,
+ target_desc,
+ objects,
+ output_filename,
+ output_dir=None,
+ libraries=None,
+ library_dirs=None,
+ runtime_library_dirs=None,
+ export_symbols=None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ build_temp=None,
+ target_lang=None):
+
+ if not self.initialized:
+ self.initialize()
+ objects, output_dir = self._fix_object_args(objects, output_dir)
+ fixed_args = self._fix_lib_args(libraries, library_dirs,
+ runtime_library_dirs)
+ libraries, library_dirs, runtime_library_dirs = fixed_args
+
+ if runtime_library_dirs:
+ self.warn("I don't know what to do with 'runtime_library_dirs': "
+ + str(runtime_library_dirs))
+
+ lib_opts = gen_lib_options(self,
+ library_dirs, runtime_library_dirs,
+ libraries)
+ if output_dir is not None:
+ output_filename = os.path.join(output_dir, output_filename)
+
+ if self._need_link(objects, output_filename):
+ ldflags = self._ldflags[target_desc, debug]
+
+ export_opts = ["/EXPORT:" + sym for sym in (export_symbols or [])]
+
+ ld_args = (ldflags + lib_opts + export_opts +
+ objects + ['/OUT:' + output_filename])
+
+ # The MSVC linker generates .lib and .exp files, which cannot be
+ # suppressed by any linker switches. The .lib files may even be
+ # needed! Make sure they are generated in the temporary build
+ # directory. Since they have different names for debug and release
+ # builds, they can go into the same directory.
+ build_temp = os.path.dirname(objects[0])
+ if export_symbols is not None:
+ (dll_name, dll_ext) = os.path.splitext(
+ os.path.basename(output_filename))
+ implib_file = os.path.join(
+ build_temp,
+ self.library_filename(dll_name))
+ ld_args.append ('/IMPLIB:' + implib_file)
+
+ if extra_preargs:
+ ld_args[:0] = extra_preargs
+ if extra_postargs:
+ ld_args.extend(extra_postargs)
+
+ output_dir = os.path.dirname(os.path.abspath(output_filename))
+ self.mkpath(output_dir)
+ try:
+ log.debug('Executing "%s" %s', self.linker, ' '.join(ld_args))
+ self.spawn([self.linker] + ld_args)
+ except DistutilsExecError as msg:
+ raise LinkError(msg)
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+ def spawn(self, cmd):
+ env = dict(os.environ, PATH=self._paths)
+ with self._fallback_spawn(cmd, env) as fallback:
+ return super().spawn(cmd, env=env)
+ return fallback.value
+
+ @contextlib.contextmanager
+ def _fallback_spawn(self, cmd, env):
+ """
+ Discovered in pypa/distutils#15, some tools monkeypatch the compiler,
+ so the 'env' kwarg causes a TypeError. Detect this condition and
+ restore the legacy, unsafe behavior.
+ """
+ bag = type('Bag', (), {})()
+ try:
+ yield bag
+ except TypeError as exc:
+ if "unexpected keyword argument 'env'" not in str(exc):
+ raise
+ else:
+ return
+ warnings.warn(
+ "Fallback spawn triggered. Please update distutils monkeypatch.")
+ with unittest.mock.patch('os.environ', env):
+ bag.value = super().spawn(cmd)
+
+ # -- Miscellaneous methods -----------------------------------------
+ # These are all used by the 'gen_lib_options() function, in
+ # ccompiler.py.
+
+ def library_dir_option(self, dir):
+ return "/LIBPATH:" + dir
+
+ def runtime_library_dir_option(self, dir):
+ raise DistutilsPlatformError(
+ "don't know how to set runtime library search path for MSVC")
+
+ def library_option(self, lib):
+ return self.library_filename(lib)
+
+ def find_library_file(self, dirs, lib, debug=0):
+ # Prefer a debugging library if found (and requested), but deal
+ # with it if we don't have one.
+ if debug:
+ try_names = [lib + "_d", lib]
+ else:
+ try_names = [lib]
+ for dir in dirs:
+ for name in try_names:
+ libfile = os.path.join(dir, self.library_filename(name))
+ if os.path.isfile(libfile):
+ return libfile
+ else:
+ # Oops, didn't find it in *any* of 'dirs'
+ return None
diff --git a/third_party/python/setuptools/setuptools/_distutils/archive_util.py b/third_party/python/setuptools/setuptools/_distutils/archive_util.py
new file mode 100644
index 0000000000..565a3117b4
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/archive_util.py
@@ -0,0 +1,256 @@
+"""distutils.archive_util
+
+Utility functions for creating archive files (tarballs, zip files,
+that sort of thing)."""
+
+import os
+from warnings import warn
+import sys
+
+try:
+ import zipfile
+except ImportError:
+ zipfile = None
+
+
+from distutils.errors import DistutilsExecError
+from distutils.spawn import spawn
+from distutils.dir_util import mkpath
+from distutils import log
+
+try:
+ from pwd import getpwnam
+except ImportError:
+ getpwnam = None
+
+try:
+ from grp import getgrnam
+except ImportError:
+ getgrnam = None
+
+def _get_gid(name):
+ """Returns a gid, given a group name."""
+ if getgrnam is None or name is None:
+ return None
+ try:
+ result = getgrnam(name)
+ except KeyError:
+ result = None
+ if result is not None:
+ return result[2]
+ return None
+
+def _get_uid(name):
+ """Returns an uid, given a user name."""
+ if getpwnam is None or name is None:
+ return None
+ try:
+ result = getpwnam(name)
+ except KeyError:
+ result = None
+ if result is not None:
+ return result[2]
+ return None
+
+def make_tarball(base_name, base_dir, compress="gzip", verbose=0, dry_run=0,
+ owner=None, group=None):
+ """Create a (possibly compressed) tar file from all the files under
+ 'base_dir'.
+
+ 'compress' must be "gzip" (the default), "bzip2", "xz", "compress", or
+ None. ("compress" will be deprecated in Python 3.2)
+
+ 'owner' and 'group' can be used to define an owner and a group for the
+ archive that is being built. If not provided, the current owner and group
+ will be used.
+
+ The output tar file will be named 'base_dir' + ".tar", possibly plus
+ the appropriate compression extension (".gz", ".bz2", ".xz" or ".Z").
+
+ Returns the output filename.
+ """
+ tar_compression = {'gzip': 'gz', 'bzip2': 'bz2', 'xz': 'xz', None: '',
+ 'compress': ''}
+ compress_ext = {'gzip': '.gz', 'bzip2': '.bz2', 'xz': '.xz',
+ 'compress': '.Z'}
+
+ # flags for compression program, each element of list will be an argument
+ if compress is not None and compress not in compress_ext.keys():
+ raise ValueError(
+ "bad value for 'compress': must be None, 'gzip', 'bzip2', "
+ "'xz' or 'compress'")
+
+ archive_name = base_name + '.tar'
+ if compress != 'compress':
+ archive_name += compress_ext.get(compress, '')
+
+ mkpath(os.path.dirname(archive_name), dry_run=dry_run)
+
+ # creating the tarball
+ import tarfile # late import so Python build itself doesn't break
+
+ log.info('Creating tar archive')
+
+ uid = _get_uid(owner)
+ gid = _get_gid(group)
+
+ def _set_uid_gid(tarinfo):
+ if gid is not None:
+ tarinfo.gid = gid
+ tarinfo.gname = group
+ if uid is not None:
+ tarinfo.uid = uid
+ tarinfo.uname = owner
+ return tarinfo
+
+ if not dry_run:
+ tar = tarfile.open(archive_name, 'w|%s' % tar_compression[compress])
+ try:
+ tar.add(base_dir, filter=_set_uid_gid)
+ finally:
+ tar.close()
+
+ # compression using `compress`
+ if compress == 'compress':
+ warn("'compress' will be deprecated.", PendingDeprecationWarning)
+ # the option varies depending on the platform
+ compressed_name = archive_name + compress_ext[compress]
+ if sys.platform == 'win32':
+ cmd = [compress, archive_name, compressed_name]
+ else:
+ cmd = [compress, '-f', archive_name]
+ spawn(cmd, dry_run=dry_run)
+ return compressed_name
+
+ return archive_name
+
+def make_zipfile(base_name, base_dir, verbose=0, dry_run=0):
+ """Create a zip file from all the files under 'base_dir'.
+
+ The output zip file will be named 'base_name' + ".zip". Uses either the
+ "zipfile" Python module (if available) or the InfoZIP "zip" utility
+ (if installed and found on the default search path). If neither tool is
+ available, raises DistutilsExecError. Returns the name of the output zip
+ file.
+ """
+ zip_filename = base_name + ".zip"
+ mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
+
+ # If zipfile module is not available, try spawning an external
+ # 'zip' command.
+ if zipfile is None:
+ if verbose:
+ zipoptions = "-r"
+ else:
+ zipoptions = "-rq"
+
+ try:
+ spawn(["zip", zipoptions, zip_filename, base_dir],
+ dry_run=dry_run)
+ except DistutilsExecError:
+ # XXX really should distinguish between "couldn't find
+ # external 'zip' command" and "zip failed".
+ raise DistutilsExecError(("unable to create zip file '%s': "
+ "could neither import the 'zipfile' module nor "
+ "find a standalone zip utility") % zip_filename)
+
+ else:
+ log.info("creating '%s' and adding '%s' to it",
+ zip_filename, base_dir)
+
+ if not dry_run:
+ try:
+ zip = zipfile.ZipFile(zip_filename, "w",
+ compression=zipfile.ZIP_DEFLATED)
+ except RuntimeError:
+ zip = zipfile.ZipFile(zip_filename, "w",
+ compression=zipfile.ZIP_STORED)
+
+ with zip:
+ if base_dir != os.curdir:
+ path = os.path.normpath(os.path.join(base_dir, ''))
+ zip.write(path, path)
+ log.info("adding '%s'", path)
+ for dirpath, dirnames, filenames in os.walk(base_dir):
+ for name in dirnames:
+ path = os.path.normpath(os.path.join(dirpath, name, ''))
+ zip.write(path, path)
+ log.info("adding '%s'", path)
+ for name in filenames:
+ path = os.path.normpath(os.path.join(dirpath, name))
+ if os.path.isfile(path):
+ zip.write(path, path)
+ log.info("adding '%s'", path)
+
+ return zip_filename
+
+ARCHIVE_FORMATS = {
+ 'gztar': (make_tarball, [('compress', 'gzip')], "gzip'ed tar-file"),
+ 'bztar': (make_tarball, [('compress', 'bzip2')], "bzip2'ed tar-file"),
+ 'xztar': (make_tarball, [('compress', 'xz')], "xz'ed tar-file"),
+ 'ztar': (make_tarball, [('compress', 'compress')], "compressed tar file"),
+ 'tar': (make_tarball, [('compress', None)], "uncompressed tar file"),
+ 'zip': (make_zipfile, [],"ZIP file")
+ }
+
+def check_archive_formats(formats):
+ """Returns the first format from the 'format' list that is unknown.
+
+ If all formats are known, returns None
+ """
+ for format in formats:
+ if format not in ARCHIVE_FORMATS:
+ return format
+ return None
+
+def make_archive(base_name, format, root_dir=None, base_dir=None, verbose=0,
+ dry_run=0, owner=None, group=None):
+ """Create an archive file (eg. zip or tar).
+
+ 'base_name' is the name of the file to create, minus any format-specific
+ extension; 'format' is the archive format: one of "zip", "tar", "gztar",
+ "bztar", "xztar", or "ztar".
+
+ 'root_dir' is a directory that will be the root directory of the
+ archive; ie. we typically chdir into 'root_dir' before creating the
+ archive. 'base_dir' is the directory where we start archiving from;
+ ie. 'base_dir' will be the common prefix of all files and
+ directories in the archive. 'root_dir' and 'base_dir' both default
+ to the current directory. Returns the name of the archive file.
+
+ 'owner' and 'group' are used when creating a tar archive. By default,
+ uses the current owner and group.
+ """
+ save_cwd = os.getcwd()
+ if root_dir is not None:
+ log.debug("changing into '%s'", root_dir)
+ base_name = os.path.abspath(base_name)
+ if not dry_run:
+ os.chdir(root_dir)
+
+ if base_dir is None:
+ base_dir = os.curdir
+
+ kwargs = {'dry_run': dry_run}
+
+ try:
+ format_info = ARCHIVE_FORMATS[format]
+ except KeyError:
+ raise ValueError("unknown archive format '%s'" % format)
+
+ func = format_info[0]
+ for arg, val in format_info[1]:
+ kwargs[arg] = val
+
+ if format != 'zip':
+ kwargs['owner'] = owner
+ kwargs['group'] = group
+
+ try:
+ filename = func(base_name, base_dir, **kwargs)
+ finally:
+ if root_dir is not None:
+ log.debug("changing back to '%s'", save_cwd)
+ os.chdir(save_cwd)
+
+ return filename
diff --git a/third_party/python/setuptools/setuptools/_distutils/bcppcompiler.py b/third_party/python/setuptools/setuptools/_distutils/bcppcompiler.py
new file mode 100644
index 0000000000..071fea5d03
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/bcppcompiler.py
@@ -0,0 +1,393 @@
+"""distutils.bcppcompiler
+
+Contains BorlandCCompiler, an implementation of the abstract CCompiler class
+for the Borland C++ compiler.
+"""
+
+# This implementation by Lyle Johnson, based on the original msvccompiler.py
+# module and using the directions originally published by Gordon Williams.
+
+# XXX looks like there's a LOT of overlap between these two classes:
+# someone should sit down and factor out the common code as
+# WindowsCCompiler! --GPW
+
+
+import os
+from distutils.errors import \
+ DistutilsExecError, \
+ CompileError, LibError, LinkError, UnknownFileError
+from distutils.ccompiler import \
+ CCompiler, gen_preprocess_options
+from distutils.file_util import write_file
+from distutils.dep_util import newer
+from distutils import log
+
+class BCPPCompiler(CCompiler) :
+ """Concrete class that implements an interface to the Borland C/C++
+ compiler, as defined by the CCompiler abstract class.
+ """
+
+ compiler_type = 'bcpp'
+
+ # Just set this so CCompiler's constructor doesn't barf. We currently
+ # don't use the 'set_executables()' bureaucracy provided by CCompiler,
+ # as it really isn't necessary for this sort of single-compiler class.
+ # Would be nice to have a consistent interface with UnixCCompiler,
+ # though, so it's worth thinking about.
+ executables = {}
+
+ # Private class data (need to distinguish C from C++ source for compiler)
+ _c_extensions = ['.c']
+ _cpp_extensions = ['.cc', '.cpp', '.cxx']
+
+ # Needed for the filename generation methods provided by the
+ # base class, CCompiler.
+ src_extensions = _c_extensions + _cpp_extensions
+ obj_extension = '.obj'
+ static_lib_extension = '.lib'
+ shared_lib_extension = '.dll'
+ static_lib_format = shared_lib_format = '%s%s'
+ exe_extension = '.exe'
+
+
+ def __init__ (self,
+ verbose=0,
+ dry_run=0,
+ force=0):
+
+ CCompiler.__init__ (self, verbose, dry_run, force)
+
+ # These executables are assumed to all be in the path.
+ # Borland doesn't seem to use any special registry settings to
+ # indicate their installation locations.
+
+ self.cc = "bcc32.exe"
+ self.linker = "ilink32.exe"
+ self.lib = "tlib.exe"
+
+ self.preprocess_options = None
+ self.compile_options = ['/tWM', '/O2', '/q', '/g0']
+ self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0']
+
+ self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x']
+ self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x']
+ self.ldflags_static = []
+ self.ldflags_exe = ['/Gn', '/q', '/x']
+ self.ldflags_exe_debug = ['/Gn', '/q', '/x','/r']
+
+
+ # -- Worker methods ------------------------------------------------
+
+ def compile(self, sources,
+ output_dir=None, macros=None, include_dirs=None, debug=0,
+ extra_preargs=None, extra_postargs=None, depends=None):
+
+ macros, objects, extra_postargs, pp_opts, build = \
+ self._setup_compile(output_dir, macros, include_dirs, sources,
+ depends, extra_postargs)
+ compile_opts = extra_preargs or []
+ compile_opts.append ('-c')
+ if debug:
+ compile_opts.extend (self.compile_options_debug)
+ else:
+ compile_opts.extend (self.compile_options)
+
+ for obj in objects:
+ try:
+ src, ext = build[obj]
+ except KeyError:
+ continue
+ # XXX why do the normpath here?
+ src = os.path.normpath(src)
+ obj = os.path.normpath(obj)
+ # XXX _setup_compile() did a mkpath() too but before the normpath.
+ # Is it possible to skip the normpath?
+ self.mkpath(os.path.dirname(obj))
+
+ if ext == '.res':
+ # This is already a binary file -- skip it.
+ continue # the 'for' loop
+ if ext == '.rc':
+ # This needs to be compiled to a .res file -- do it now.
+ try:
+ self.spawn (["brcc32", "-fo", obj, src])
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+ continue # the 'for' loop
+
+ # The next two are both for the real compiler.
+ if ext in self._c_extensions:
+ input_opt = ""
+ elif ext in self._cpp_extensions:
+ input_opt = "-P"
+ else:
+ # Unknown file type -- no extra options. The compiler
+ # will probably fail, but let it just in case this is a
+ # file the compiler recognizes even if we don't.
+ input_opt = ""
+
+ output_opt = "-o" + obj
+
+ # Compiler command line syntax is: "bcc32 [options] file(s)".
+ # Note that the source file names must appear at the end of
+ # the command line.
+ try:
+ self.spawn ([self.cc] + compile_opts + pp_opts +
+ [input_opt, output_opt] +
+ extra_postargs + [src])
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+
+ return objects
+
+ # compile ()
+
+
+ def create_static_lib (self,
+ objects,
+ output_libname,
+ output_dir=None,
+ debug=0,
+ target_lang=None):
+
+ (objects, output_dir) = self._fix_object_args (objects, output_dir)
+ output_filename = \
+ self.library_filename (output_libname, output_dir=output_dir)
+
+ if self._need_link (objects, output_filename):
+ lib_args = [output_filename, '/u'] + objects
+ if debug:
+ pass # XXX what goes here?
+ try:
+ self.spawn ([self.lib] + lib_args)
+ except DistutilsExecError as msg:
+ raise LibError(msg)
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+ # create_static_lib ()
+
+
+ def link (self,
+ target_desc,
+ objects,
+ output_filename,
+ output_dir=None,
+ libraries=None,
+ library_dirs=None,
+ runtime_library_dirs=None,
+ export_symbols=None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ build_temp=None,
+ target_lang=None):
+
+ # XXX this ignores 'build_temp'! should follow the lead of
+ # msvccompiler.py
+
+ (objects, output_dir) = self._fix_object_args (objects, output_dir)
+ (libraries, library_dirs, runtime_library_dirs) = \
+ self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
+
+ if runtime_library_dirs:
+ log.warn("I don't know what to do with 'runtime_library_dirs': %s",
+ str(runtime_library_dirs))
+
+ if output_dir is not None:
+ output_filename = os.path.join (output_dir, output_filename)
+
+ if self._need_link (objects, output_filename):
+
+ # Figure out linker args based on type of target.
+ if target_desc == CCompiler.EXECUTABLE:
+ startup_obj = 'c0w32'
+ if debug:
+ ld_args = self.ldflags_exe_debug[:]
+ else:
+ ld_args = self.ldflags_exe[:]
+ else:
+ startup_obj = 'c0d32'
+ if debug:
+ ld_args = self.ldflags_shared_debug[:]
+ else:
+ ld_args = self.ldflags_shared[:]
+
+
+ # Create a temporary exports file for use by the linker
+ if export_symbols is None:
+ def_file = ''
+ else:
+ head, tail = os.path.split (output_filename)
+ modname, ext = os.path.splitext (tail)
+ temp_dir = os.path.dirname(objects[0]) # preserve tree structure
+ def_file = os.path.join (temp_dir, '%s.def' % modname)
+ contents = ['EXPORTS']
+ for sym in (export_symbols or []):
+ contents.append(' %s=_%s' % (sym, sym))
+ self.execute(write_file, (def_file, contents),
+ "writing %s" % def_file)
+
+ # Borland C++ has problems with '/' in paths
+ objects2 = map(os.path.normpath, objects)
+ # split objects in .obj and .res files
+ # Borland C++ needs them at different positions in the command line
+ objects = [startup_obj]
+ resources = []
+ for file in objects2:
+ (base, ext) = os.path.splitext(os.path.normcase(file))
+ if ext == '.res':
+ resources.append(file)
+ else:
+ objects.append(file)
+
+
+ for l in library_dirs:
+ ld_args.append("/L%s" % os.path.normpath(l))
+ ld_args.append("/L.") # we sometimes use relative paths
+
+ # list of object files
+ ld_args.extend(objects)
+
+ # XXX the command-line syntax for Borland C++ is a bit wonky;
+ # certain filenames are jammed together in one big string, but
+ # comma-delimited. This doesn't mesh too well with the
+ # Unix-centric attitude (with a DOS/Windows quoting hack) of
+ # 'spawn()', so constructing the argument list is a bit
+ # awkward. Note that doing the obvious thing and jamming all
+ # the filenames and commas into one argument would be wrong,
+ # because 'spawn()' would quote any filenames with spaces in
+ # them. Arghghh!. Apparently it works fine as coded...
+
+ # name of dll/exe file
+ ld_args.extend([',',output_filename])
+ # no map file and start libraries
+ ld_args.append(',,')
+
+ for lib in libraries:
+ # see if we find it and if there is a bcpp specific lib
+ # (xxx_bcpp.lib)
+ libfile = self.find_library_file(library_dirs, lib, debug)
+ if libfile is None:
+ ld_args.append(lib)
+ # probably a BCPP internal library -- don't warn
+ else:
+ # full name which prefers bcpp_xxx.lib over xxx.lib
+ ld_args.append(libfile)
+
+ # some default libraries
+ ld_args.append ('import32')
+ ld_args.append ('cw32mt')
+
+ # def file for export symbols
+ ld_args.extend([',',def_file])
+ # add resource files
+ ld_args.append(',')
+ ld_args.extend(resources)
+
+
+ if extra_preargs:
+ ld_args[:0] = extra_preargs
+ if extra_postargs:
+ ld_args.extend(extra_postargs)
+
+ self.mkpath (os.path.dirname (output_filename))
+ try:
+ self.spawn ([self.linker] + ld_args)
+ except DistutilsExecError as msg:
+ raise LinkError(msg)
+
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+ # link ()
+
+ # -- Miscellaneous methods -----------------------------------------
+
+
+ def find_library_file (self, dirs, lib, debug=0):
+ # List of effective library names to try, in order of preference:
+ # xxx_bcpp.lib is better than xxx.lib
+ # and xxx_d.lib is better than xxx.lib if debug is set
+ #
+ # The "_bcpp" suffix is to handle a Python installation for people
+ # with multiple compilers (primarily Distutils hackers, I suspect
+ # ;-). The idea is they'd have one static library for each
+ # compiler they care about, since (almost?) every Windows compiler
+ # seems to have a different format for static libraries.
+ if debug:
+ dlib = (lib + "_d")
+ try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib)
+ else:
+ try_names = (lib + "_bcpp", lib)
+
+ for dir in dirs:
+ for name in try_names:
+ libfile = os.path.join(dir, self.library_filename(name))
+ if os.path.exists(libfile):
+ return libfile
+ else:
+ # Oops, didn't find it in *any* of 'dirs'
+ return None
+
+ # overwrite the one from CCompiler to support rc and res-files
+ def object_filenames (self,
+ source_filenames,
+ strip_dir=0,
+ output_dir=''):
+ if output_dir is None: output_dir = ''
+ obj_names = []
+ for src_name in source_filenames:
+ # use normcase to make sure '.rc' is really '.rc' and not '.RC'
+ (base, ext) = os.path.splitext (os.path.normcase(src_name))
+ if ext not in (self.src_extensions + ['.rc','.res']):
+ raise UnknownFileError("unknown file type '%s' (from '%s')" % \
+ (ext, src_name))
+ if strip_dir:
+ base = os.path.basename (base)
+ if ext == '.res':
+ # these can go unchanged
+ obj_names.append (os.path.join (output_dir, base + ext))
+ elif ext == '.rc':
+ # these need to be compiled to .res-files
+ obj_names.append (os.path.join (output_dir, base + '.res'))
+ else:
+ obj_names.append (os.path.join (output_dir,
+ base + self.obj_extension))
+ return obj_names
+
+ # object_filenames ()
+
+ def preprocess (self,
+ source,
+ output_file=None,
+ macros=None,
+ include_dirs=None,
+ extra_preargs=None,
+ extra_postargs=None):
+
+ (_, macros, include_dirs) = \
+ self._fix_compile_args(None, macros, include_dirs)
+ pp_opts = gen_preprocess_options(macros, include_dirs)
+ pp_args = ['cpp32.exe'] + pp_opts
+ if output_file is not None:
+ pp_args.append('-o' + output_file)
+ if extra_preargs:
+ pp_args[:0] = extra_preargs
+ if extra_postargs:
+ pp_args.extend(extra_postargs)
+ pp_args.append(source)
+
+ # We need to preprocess: either we're being forced to, or the
+ # source file is newer than the target (or the target doesn't
+ # exist).
+ if self.force or output_file is None or newer(source, output_file):
+ if output_file:
+ self.mkpath(os.path.dirname(output_file))
+ try:
+ self.spawn(pp_args)
+ except DistutilsExecError as msg:
+ print(msg)
+ raise CompileError(msg)
+
+ # preprocess()
diff --git a/third_party/python/setuptools/setuptools/_distutils/ccompiler.py b/third_party/python/setuptools/setuptools/_distutils/ccompiler.py
new file mode 100644
index 0000000000..57bb94e8bb
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/ccompiler.py
@@ -0,0 +1,1116 @@
+"""distutils.ccompiler
+
+Contains CCompiler, an abstract base class that defines the interface
+for the Distutils compiler abstraction model."""
+
+import sys, os, re
+from distutils.errors import *
+from distutils.spawn import spawn
+from distutils.file_util import move_file
+from distutils.dir_util import mkpath
+from distutils.dep_util import newer_group
+from distutils.util import split_quoted, execute
+from distutils import log
+
+class CCompiler:
+ """Abstract base class to define the interface that must be implemented
+ by real compiler classes. Also has some utility methods used by
+ several compiler classes.
+
+ The basic idea behind a compiler abstraction class is that each
+ instance can be used for all the compile/link steps in building a
+ single project. Thus, attributes common to all of those compile and
+ link steps -- include directories, macros to define, libraries to link
+ against, etc. -- are attributes of the compiler instance. To allow for
+ variability in how individual files are treated, most of those
+ attributes may be varied on a per-compilation or per-link basis.
+ """
+
+ # 'compiler_type' is a class attribute that identifies this class. It
+ # keeps code that wants to know what kind of compiler it's dealing with
+ # from having to import all possible compiler classes just to do an
+ # 'isinstance'. In concrete CCompiler subclasses, 'compiler_type'
+ # should really, really be one of the keys of the 'compiler_class'
+ # dictionary (see below -- used by the 'new_compiler()' factory
+ # function) -- authors of new compiler interface classes are
+ # responsible for updating 'compiler_class'!
+ compiler_type = None
+
+ # XXX things not handled by this compiler abstraction model:
+ # * client can't provide additional options for a compiler,
+ # e.g. warning, optimization, debugging flags. Perhaps this
+ # should be the domain of concrete compiler abstraction classes
+ # (UnixCCompiler, MSVCCompiler, etc.) -- or perhaps the base
+ # class should have methods for the common ones.
+ # * can't completely override the include or library searchg
+ # path, ie. no "cc -I -Idir1 -Idir2" or "cc -L -Ldir1 -Ldir2".
+ # I'm not sure how widely supported this is even by Unix
+ # compilers, much less on other platforms. And I'm even less
+ # sure how useful it is; maybe for cross-compiling, but
+ # support for that is a ways off. (And anyways, cross
+ # compilers probably have a dedicated binary with the
+ # right paths compiled in. I hope.)
+ # * can't do really freaky things with the library list/library
+ # dirs, e.g. "-Ldir1 -lfoo -Ldir2 -lfoo" to link against
+ # different versions of libfoo.a in different locations. I
+ # think this is useless without the ability to null out the
+ # library search path anyways.
+
+
+ # Subclasses that rely on the standard filename generation methods
+ # implemented below should override these; see the comment near
+ # those methods ('object_filenames()' et. al.) for details:
+ src_extensions = None # list of strings
+ obj_extension = None # string
+ static_lib_extension = None
+ shared_lib_extension = None # string
+ static_lib_format = None # format string
+ shared_lib_format = None # prob. same as static_lib_format
+ exe_extension = None # string
+
+ # Default language settings. language_map is used to detect a source
+ # file or Extension target language, checking source filenames.
+ # language_order is used to detect the language precedence, when deciding
+ # what language to use when mixing source types. For example, if some
+ # extension has two files with ".c" extension, and one with ".cpp", it
+ # is still linked as c++.
+ language_map = {".c" : "c",
+ ".cc" : "c++",
+ ".cpp" : "c++",
+ ".cxx" : "c++",
+ ".m" : "objc",
+ }
+ language_order = ["c++", "objc", "c"]
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ self.dry_run = dry_run
+ self.force = force
+ self.verbose = verbose
+
+ # 'output_dir': a common output directory for object, library,
+ # shared object, and shared library files
+ self.output_dir = None
+
+ # 'macros': a list of macro definitions (or undefinitions). A
+ # macro definition is a 2-tuple (name, value), where the value is
+ # either a string or None (no explicit value). A macro
+ # undefinition is a 1-tuple (name,).
+ self.macros = []
+
+ # 'include_dirs': a list of directories to search for include files
+ self.include_dirs = []
+
+ # 'libraries': a list of libraries to include in any link
+ # (library names, not filenames: eg. "foo" not "libfoo.a")
+ self.libraries = []
+
+ # 'library_dirs': a list of directories to search for libraries
+ self.library_dirs = []
+
+ # 'runtime_library_dirs': a list of directories to search for
+ # shared libraries/objects at runtime
+ self.runtime_library_dirs = []
+
+ # 'objects': a list of object files (or similar, such as explicitly
+ # named library files) to include on any link
+ self.objects = []
+
+ for key in self.executables.keys():
+ self.set_executable(key, self.executables[key])
+
+ def set_executables(self, **kwargs):
+ """Define the executables (and options for them) that will be run
+ to perform the various stages of compilation. The exact set of
+ executables that may be specified here depends on the compiler
+ class (via the 'executables' class attribute), but most will have:
+ compiler the C/C++ compiler
+ linker_so linker used to create shared objects and libraries
+ linker_exe linker used to create binary executables
+ archiver static library creator
+
+ On platforms with a command-line (Unix, DOS/Windows), each of these
+ is a string that will be split into executable name and (optional)
+ list of arguments. (Splitting the string is done similarly to how
+ Unix shells operate: words are delimited by spaces, but quotes and
+ backslashes can override this. See
+ 'distutils.util.split_quoted()'.)
+ """
+
+ # Note that some CCompiler implementation classes will define class
+ # attributes 'cpp', 'cc', etc. with hard-coded executable names;
+ # this is appropriate when a compiler class is for exactly one
+ # compiler/OS combination (eg. MSVCCompiler). Other compiler
+ # classes (UnixCCompiler, in particular) are driven by information
+ # discovered at run-time, since there are many different ways to do
+ # basically the same things with Unix C compilers.
+
+ for key in kwargs:
+ if key not in self.executables:
+ raise ValueError("unknown executable '%s' for class %s" %
+ (key, self.__class__.__name__))
+ self.set_executable(key, kwargs[key])
+
+ def set_executable(self, key, value):
+ if isinstance(value, str):
+ setattr(self, key, split_quoted(value))
+ else:
+ setattr(self, key, value)
+
+ def _find_macro(self, name):
+ i = 0
+ for defn in self.macros:
+ if defn[0] == name:
+ return i
+ i += 1
+ return None
+
+ def _check_macro_definitions(self, definitions):
+ """Ensures that every element of 'definitions' is a valid macro
+ definition, ie. either (name,value) 2-tuple or a (name,) tuple. Do
+ nothing if all definitions are OK, raise TypeError otherwise.
+ """
+ for defn in definitions:
+ if not (isinstance(defn, tuple) and
+ (len(defn) in (1, 2) and
+ (isinstance (defn[1], str) or defn[1] is None)) and
+ isinstance (defn[0], str)):
+ raise TypeError(("invalid macro definition '%s': " % defn) + \
+ "must be tuple (string,), (string, string), or " + \
+ "(string, None)")
+
+
+ # -- Bookkeeping methods -------------------------------------------
+
+ def define_macro(self, name, value=None):
+ """Define a preprocessor macro for all compilations driven by this
+ compiler object. The optional parameter 'value' should be a
+ string; if it is not supplied, then the macro will be defined
+ without an explicit value and the exact outcome depends on the
+ compiler used (XXX true? does ANSI say anything about this?)
+ """
+ # Delete from the list of macro definitions/undefinitions if
+ # already there (so that this one will take precedence).
+ i = self._find_macro (name)
+ if i is not None:
+ del self.macros[i]
+
+ self.macros.append((name, value))
+
+ def undefine_macro(self, name):
+ """Undefine a preprocessor macro for all compilations driven by
+ this compiler object. If the same macro is defined by
+ 'define_macro()' and undefined by 'undefine_macro()' the last call
+ takes precedence (including multiple redefinitions or
+ undefinitions). If the macro is redefined/undefined on a
+ per-compilation basis (ie. in the call to 'compile()'), then that
+ takes precedence.
+ """
+ # Delete from the list of macro definitions/undefinitions if
+ # already there (so that this one will take precedence).
+ i = self._find_macro (name)
+ if i is not None:
+ del self.macros[i]
+
+ undefn = (name,)
+ self.macros.append(undefn)
+
+ def add_include_dir(self, dir):
+ """Add 'dir' to the list of directories that will be searched for
+ header files. The compiler is instructed to search directories in
+ the order in which they are supplied by successive calls to
+ 'add_include_dir()'.
+ """
+ self.include_dirs.append(dir)
+
+ def set_include_dirs(self, dirs):
+ """Set the list of directories that will be searched to 'dirs' (a
+ list of strings). Overrides any preceding calls to
+ 'add_include_dir()'; subsequence calls to 'add_include_dir()' add
+ to the list passed to 'set_include_dirs()'. This does not affect
+ any list of standard include directories that the compiler may
+ search by default.
+ """
+ self.include_dirs = dirs[:]
+
+ def add_library(self, libname):
+ """Add 'libname' to the list of libraries that will be included in
+ all links driven by this compiler object. Note that 'libname'
+ should *not* be the name of a file containing a library, but the
+ name of the library itself: the actual filename will be inferred by
+ the linker, the compiler, or the compiler class (depending on the
+ platform).
+
+ The linker will be instructed to link against libraries in the
+ order they were supplied to 'add_library()' and/or
+ 'set_libraries()'. It is perfectly valid to duplicate library
+ names; the linker will be instructed to link against libraries as
+ many times as they are mentioned.
+ """
+ self.libraries.append(libname)
+
+ def set_libraries(self, libnames):
+ """Set the list of libraries to be included in all links driven by
+ this compiler object to 'libnames' (a list of strings). This does
+ not affect any standard system libraries that the linker may
+ include by default.
+ """
+ self.libraries = libnames[:]
+
+ def add_library_dir(self, dir):
+ """Add 'dir' to the list of directories that will be searched for
+ libraries specified to 'add_library()' and 'set_libraries()'. The
+ linker will be instructed to search for libraries in the order they
+ are supplied to 'add_library_dir()' and/or 'set_library_dirs()'.
+ """
+ self.library_dirs.append(dir)
+
+ def set_library_dirs(self, dirs):
+ """Set the list of library search directories to 'dirs' (a list of
+ strings). This does not affect any standard library search path
+ that the linker may search by default.
+ """
+ self.library_dirs = dirs[:]
+
+ def add_runtime_library_dir(self, dir):
+ """Add 'dir' to the list of directories that will be searched for
+ shared libraries at runtime.
+ """
+ self.runtime_library_dirs.append(dir)
+
+ def set_runtime_library_dirs(self, dirs):
+ """Set the list of directories to search for shared libraries at
+ runtime to 'dirs' (a list of strings). This does not affect any
+ standard search path that the runtime linker may search by
+ default.
+ """
+ self.runtime_library_dirs = dirs[:]
+
+ def add_link_object(self, object):
+ """Add 'object' to the list of object files (or analogues, such as
+ explicitly named library files or the output of "resource
+ compilers") to be included in every link driven by this compiler
+ object.
+ """
+ self.objects.append(object)
+
+ def set_link_objects(self, objects):
+ """Set the list of object files (or analogues) to be included in
+ every link to 'objects'. This does not affect any standard object
+ files that the linker may include by default (such as system
+ libraries).
+ """
+ self.objects = objects[:]
+
+
+ # -- Private utility methods --------------------------------------
+ # (here for the convenience of subclasses)
+
+ # Helper method to prep compiler in subclass compile() methods
+
+ def _setup_compile(self, outdir, macros, incdirs, sources, depends,
+ extra):
+ """Process arguments and decide which source files to compile."""
+ if outdir is None:
+ outdir = self.output_dir
+ elif not isinstance(outdir, str):
+ raise TypeError("'output_dir' must be a string or None")
+
+ if macros is None:
+ macros = self.macros
+ elif isinstance(macros, list):
+ macros = macros + (self.macros or [])
+ else:
+ raise TypeError("'macros' (if supplied) must be a list of tuples")
+
+ if incdirs is None:
+ incdirs = self.include_dirs
+ elif isinstance(incdirs, (list, tuple)):
+ incdirs = list(incdirs) + (self.include_dirs or [])
+ else:
+ raise TypeError(
+ "'include_dirs' (if supplied) must be a list of strings")
+
+ if extra is None:
+ extra = []
+
+ # Get the list of expected output (object) files
+ objects = self.object_filenames(sources, strip_dir=0,
+ output_dir=outdir)
+ assert len(objects) == len(sources)
+
+ pp_opts = gen_preprocess_options(macros, incdirs)
+
+ build = {}
+ for i in range(len(sources)):
+ src = sources[i]
+ obj = objects[i]
+ ext = os.path.splitext(src)[1]
+ self.mkpath(os.path.dirname(obj))
+ build[obj] = (src, ext)
+
+ return macros, objects, extra, pp_opts, build
+
+ def _get_cc_args(self, pp_opts, debug, before):
+ # works for unixccompiler, cygwinccompiler
+ cc_args = pp_opts + ['-c']
+ if debug:
+ cc_args[:0] = ['-g']
+ if before:
+ cc_args[:0] = before
+ return cc_args
+
+ def _fix_compile_args(self, output_dir, macros, include_dirs):
+ """Typecheck and fix-up some of the arguments to the 'compile()'
+ method, and return fixed-up values. Specifically: if 'output_dir'
+ is None, replaces it with 'self.output_dir'; ensures that 'macros'
+ is a list, and augments it with 'self.macros'; ensures that
+ 'include_dirs' is a list, and augments it with 'self.include_dirs'.
+ Guarantees that the returned values are of the correct type,
+ i.e. for 'output_dir' either string or None, and for 'macros' and
+ 'include_dirs' either list or None.
+ """
+ if output_dir is None:
+ output_dir = self.output_dir
+ elif not isinstance(output_dir, str):
+ raise TypeError("'output_dir' must be a string or None")
+
+ if macros is None:
+ macros = self.macros
+ elif isinstance(macros, list):
+ macros = macros + (self.macros or [])
+ else:
+ raise TypeError("'macros' (if supplied) must be a list of tuples")
+
+ if include_dirs is None:
+ include_dirs = self.include_dirs
+ elif isinstance(include_dirs, (list, tuple)):
+ include_dirs = list(include_dirs) + (self.include_dirs or [])
+ else:
+ raise TypeError(
+ "'include_dirs' (if supplied) must be a list of strings")
+
+ return output_dir, macros, include_dirs
+
+ def _prep_compile(self, sources, output_dir, depends=None):
+ """Decide which souce files must be recompiled.
+
+ Determine the list of object files corresponding to 'sources',
+ and figure out which ones really need to be recompiled.
+ Return a list of all object files and a dictionary telling
+ which source files can be skipped.
+ """
+ # Get the list of expected output (object) files
+ objects = self.object_filenames(sources, output_dir=output_dir)
+ assert len(objects) == len(sources)
+
+ # Return an empty dict for the "which source files can be skipped"
+ # return value to preserve API compatibility.
+ return objects, {}
+
+ def _fix_object_args(self, objects, output_dir):
+ """Typecheck and fix up some arguments supplied to various methods.
+ Specifically: ensure that 'objects' is a list; if output_dir is
+ None, replace with self.output_dir. Return fixed versions of
+ 'objects' and 'output_dir'.
+ """
+ if not isinstance(objects, (list, tuple)):
+ raise TypeError("'objects' must be a list or tuple of strings")
+ objects = list(objects)
+
+ if output_dir is None:
+ output_dir = self.output_dir
+ elif not isinstance(output_dir, str):
+ raise TypeError("'output_dir' must be a string or None")
+
+ return (objects, output_dir)
+
+ def _fix_lib_args(self, libraries, library_dirs, runtime_library_dirs):
+ """Typecheck and fix up some of the arguments supplied to the
+ 'link_*' methods. Specifically: ensure that all arguments are
+ lists, and augment them with their permanent versions
+ (eg. 'self.libraries' augments 'libraries'). Return a tuple with
+ fixed versions of all arguments.
+ """
+ if libraries is None:
+ libraries = self.libraries
+ elif isinstance(libraries, (list, tuple)):
+ libraries = list (libraries) + (self.libraries or [])
+ else:
+ raise TypeError(
+ "'libraries' (if supplied) must be a list of strings")
+
+ if library_dirs is None:
+ library_dirs = self.library_dirs
+ elif isinstance(library_dirs, (list, tuple)):
+ library_dirs = list (library_dirs) + (self.library_dirs or [])
+ else:
+ raise TypeError(
+ "'library_dirs' (if supplied) must be a list of strings")
+
+ if runtime_library_dirs is None:
+ runtime_library_dirs = self.runtime_library_dirs
+ elif isinstance(runtime_library_dirs, (list, tuple)):
+ runtime_library_dirs = (list(runtime_library_dirs) +
+ (self.runtime_library_dirs or []))
+ else:
+ raise TypeError("'runtime_library_dirs' (if supplied) "
+ "must be a list of strings")
+
+ return (libraries, library_dirs, runtime_library_dirs)
+
+ def _need_link(self, objects, output_file):
+ """Return true if we need to relink the files listed in 'objects'
+ to recreate 'output_file'.
+ """
+ if self.force:
+ return True
+ else:
+ if self.dry_run:
+ newer = newer_group (objects, output_file, missing='newer')
+ else:
+ newer = newer_group (objects, output_file)
+ return newer
+
+ def detect_language(self, sources):
+ """Detect the language of a given file, or list of files. Uses
+ language_map, and language_order to do the job.
+ """
+ if not isinstance(sources, list):
+ sources = [sources]
+ lang = None
+ index = len(self.language_order)
+ for source in sources:
+ base, ext = os.path.splitext(source)
+ extlang = self.language_map.get(ext)
+ try:
+ extindex = self.language_order.index(extlang)
+ if extindex < index:
+ lang = extlang
+ index = extindex
+ except ValueError:
+ pass
+ return lang
+
+
+ # -- Worker methods ------------------------------------------------
+ # (must be implemented by subclasses)
+
+ def preprocess(self, source, output_file=None, macros=None,
+ include_dirs=None, extra_preargs=None, extra_postargs=None):
+ """Preprocess a single C/C++ source file, named in 'source'.
+ Output will be written to file named 'output_file', or stdout if
+ 'output_file' not supplied. 'macros' is a list of macro
+ definitions as for 'compile()', which will augment the macros set
+ with 'define_macro()' and 'undefine_macro()'. 'include_dirs' is a
+ list of directory names that will be added to the default list.
+
+ Raises PreprocessError on failure.
+ """
+ pass
+
+ def compile(self, sources, output_dir=None, macros=None,
+ include_dirs=None, debug=0, extra_preargs=None,
+ extra_postargs=None, depends=None):
+ """Compile one or more source files.
+
+ 'sources' must be a list of filenames, most likely C/C++
+ files, but in reality anything that can be handled by a
+ particular compiler and compiler class (eg. MSVCCompiler can
+ handle resource files in 'sources'). Return a list of object
+ filenames, one per source filename in 'sources'. Depending on
+ the implementation, not all source files will necessarily be
+ compiled, but all corresponding object filenames will be
+ returned.
+
+ If 'output_dir' is given, object files will be put under it, while
+ retaining their original path component. That is, "foo/bar.c"
+ normally compiles to "foo/bar.o" (for a Unix implementation); if
+ 'output_dir' is "build", then it would compile to
+ "build/foo/bar.o".
+
+ 'macros', if given, must be a list of macro definitions. A macro
+ definition is either a (name, value) 2-tuple or a (name,) 1-tuple.
+ The former defines a macro; if the value is None, the macro is
+ defined without an explicit value. The 1-tuple case undefines a
+ macro. Later definitions/redefinitions/ undefinitions take
+ precedence.
+
+ 'include_dirs', if given, must be a list of strings, the
+ directories to add to the default include file search path for this
+ compilation only.
+
+ 'debug' is a boolean; if true, the compiler will be instructed to
+ output debug symbols in (or alongside) the object file(s).
+
+ 'extra_preargs' and 'extra_postargs' are implementation- dependent.
+ On platforms that have the notion of a command-line (e.g. Unix,
+ DOS/Windows), they are most likely lists of strings: extra
+ command-line arguments to prepend/append to the compiler command
+ line. On other platforms, consult the implementation class
+ documentation. In any event, they are intended as an escape hatch
+ for those occasions when the abstract compiler framework doesn't
+ cut the mustard.
+
+ 'depends', if given, is a list of filenames that all targets
+ depend on. If a source file is older than any file in
+ depends, then the source file will be recompiled. This
+ supports dependency tracking, but only at a coarse
+ granularity.
+
+ Raises CompileError on failure.
+ """
+ # A concrete compiler class can either override this method
+ # entirely or implement _compile().
+ macros, objects, extra_postargs, pp_opts, build = \
+ self._setup_compile(output_dir, macros, include_dirs, sources,
+ depends, extra_postargs)
+ cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
+
+ for obj in objects:
+ try:
+ src, ext = build[obj]
+ except KeyError:
+ continue
+ self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
+
+ # Return *all* object filenames, not just the ones we just built.
+ return objects
+
+ def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
+ """Compile 'src' to product 'obj'."""
+ # A concrete compiler class that does not override compile()
+ # should implement _compile().
+ pass
+
+ def create_static_lib(self, objects, output_libname, output_dir=None,
+ debug=0, target_lang=None):
+ """Link a bunch of stuff together to create a static library file.
+ The "bunch of stuff" consists of the list of object files supplied
+ as 'objects', the extra object files supplied to
+ 'add_link_object()' and/or 'set_link_objects()', the libraries
+ supplied to 'add_library()' and/or 'set_libraries()', and the
+ libraries supplied as 'libraries' (if any).
+
+ 'output_libname' should be a library name, not a filename; the
+ filename will be inferred from the library name. 'output_dir' is
+ the directory where the library file will be put.
+
+ 'debug' is a boolean; if true, debugging information will be
+ included in the library (note that on most platforms, it is the
+ compile step where this matters: the 'debug' flag is included here
+ just for consistency).
+
+ 'target_lang' is the target language for which the given objects
+ are being compiled. This allows specific linkage time treatment of
+ certain languages.
+
+ Raises LibError on failure.
+ """
+ pass
+
+
+ # values for target_desc parameter in link()
+ SHARED_OBJECT = "shared_object"
+ SHARED_LIBRARY = "shared_library"
+ EXECUTABLE = "executable"
+
+ def link(self,
+ target_desc,
+ objects,
+ output_filename,
+ output_dir=None,
+ libraries=None,
+ library_dirs=None,
+ runtime_library_dirs=None,
+ export_symbols=None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ build_temp=None,
+ target_lang=None):
+ """Link a bunch of stuff together to create an executable or
+ shared library file.
+
+ The "bunch of stuff" consists of the list of object files supplied
+ as 'objects'. 'output_filename' should be a filename. If
+ 'output_dir' is supplied, 'output_filename' is relative to it
+ (i.e. 'output_filename' can provide directory components if
+ needed).
+
+ 'libraries' is a list of libraries to link against. These are
+ library names, not filenames, since they're translated into
+ filenames in a platform-specific way (eg. "foo" becomes "libfoo.a"
+ on Unix and "foo.lib" on DOS/Windows). However, they can include a
+ directory component, which means the linker will look in that
+ specific directory rather than searching all the normal locations.
+
+ 'library_dirs', if supplied, should be a list of directories to
+ search for libraries that were specified as bare library names
+ (ie. no directory component). These are on top of the system
+ default and those supplied to 'add_library_dir()' and/or
+ 'set_library_dirs()'. 'runtime_library_dirs' is a list of
+ directories that will be embedded into the shared library and used
+ to search for other shared libraries that *it* depends on at
+ run-time. (This may only be relevant on Unix.)
+
+ 'export_symbols' is a list of symbols that the shared library will
+ export. (This appears to be relevant only on Windows.)
+
+ 'debug' is as for 'compile()' and 'create_static_lib()', with the
+ slight distinction that it actually matters on most platforms (as
+ opposed to 'create_static_lib()', which includes a 'debug' flag
+ mostly for form's sake).
+
+ 'extra_preargs' and 'extra_postargs' are as for 'compile()' (except
+ of course that they supply command-line arguments for the
+ particular linker being used).
+
+ 'target_lang' is the target language for which the given objects
+ are being compiled. This allows specific linkage time treatment of
+ certain languages.
+
+ Raises LinkError on failure.
+ """
+ raise NotImplementedError
+
+
+ # Old 'link_*()' methods, rewritten to use the new 'link()' method.
+
+ def link_shared_lib(self,
+ objects,
+ output_libname,
+ output_dir=None,
+ libraries=None,
+ library_dirs=None,
+ runtime_library_dirs=None,
+ export_symbols=None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ build_temp=None,
+ target_lang=None):
+ self.link(CCompiler.SHARED_LIBRARY, objects,
+ self.library_filename(output_libname, lib_type='shared'),
+ output_dir,
+ libraries, library_dirs, runtime_library_dirs,
+ export_symbols, debug,
+ extra_preargs, extra_postargs, build_temp, target_lang)
+
+
+ def link_shared_object(self,
+ objects,
+ output_filename,
+ output_dir=None,
+ libraries=None,
+ library_dirs=None,
+ runtime_library_dirs=None,
+ export_symbols=None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ build_temp=None,
+ target_lang=None):
+ self.link(CCompiler.SHARED_OBJECT, objects,
+ output_filename, output_dir,
+ libraries, library_dirs, runtime_library_dirs,
+ export_symbols, debug,
+ extra_preargs, extra_postargs, build_temp, target_lang)
+
+
+ def link_executable(self,
+ objects,
+ output_progname,
+ output_dir=None,
+ libraries=None,
+ library_dirs=None,
+ runtime_library_dirs=None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ target_lang=None):
+ self.link(CCompiler.EXECUTABLE, objects,
+ self.executable_filename(output_progname), output_dir,
+ libraries, library_dirs, runtime_library_dirs, None,
+ debug, extra_preargs, extra_postargs, None, target_lang)
+
+
+ # -- Miscellaneous methods -----------------------------------------
+ # These are all used by the 'gen_lib_options() function; there is
+ # no appropriate default implementation so subclasses should
+ # implement all of these.
+
+ def library_dir_option(self, dir):
+ """Return the compiler option to add 'dir' to the list of
+ directories searched for libraries.
+ """
+ raise NotImplementedError
+
+ def runtime_library_dir_option(self, dir):
+ """Return the compiler option to add 'dir' to the list of
+ directories searched for runtime libraries.
+ """
+ raise NotImplementedError
+
+ def library_option(self, lib):
+ """Return the compiler option to add 'lib' to the list of libraries
+ linked into the shared library or executable.
+ """
+ raise NotImplementedError
+
+ def has_function(self, funcname, includes=None, include_dirs=None,
+ libraries=None, library_dirs=None):
+ """Return a boolean indicating whether funcname is supported on
+ the current platform. The optional arguments can be used to
+ augment the compilation environment.
+ """
+ # this can't be included at module scope because it tries to
+ # import math which might not be available at that point - maybe
+ # the necessary logic should just be inlined?
+ import tempfile
+ if includes is None:
+ includes = []
+ if include_dirs is None:
+ include_dirs = []
+ if libraries is None:
+ libraries = []
+ if library_dirs is None:
+ library_dirs = []
+ fd, fname = tempfile.mkstemp(".c", funcname, text=True)
+ f = os.fdopen(fd, "w")
+ try:
+ for incl in includes:
+ f.write("""#include "%s"\n""" % incl)
+ f.write("""\
+int main (int argc, char **argv) {
+ %s();
+ return 0;
+}
+""" % funcname)
+ finally:
+ f.close()
+ try:
+ objects = self.compile([fname], include_dirs=include_dirs)
+ except CompileError:
+ return False
+
+ try:
+ self.link_executable(objects, "a.out",
+ libraries=libraries,
+ library_dirs=library_dirs)
+ except (LinkError, TypeError):
+ return False
+ return True
+
+ def find_library_file (self, dirs, lib, debug=0):
+ """Search the specified list of directories for a static or shared
+ library file 'lib' and return the full path to that file. If
+ 'debug' true, look for a debugging version (if that makes sense on
+ the current platform). Return None if 'lib' wasn't found in any of
+ the specified directories.
+ """
+ raise NotImplementedError
+
+ # -- Filename generation methods -----------------------------------
+
+ # The default implementation of the filename generating methods are
+ # prejudiced towards the Unix/DOS/Windows view of the world:
+ # * object files are named by replacing the source file extension
+ # (eg. .c/.cpp -> .o/.obj)
+ # * library files (shared or static) are named by plugging the
+ # library name and extension into a format string, eg.
+ # "lib%s.%s" % (lib_name, ".a") for Unix static libraries
+ # * executables are named by appending an extension (possibly
+ # empty) to the program name: eg. progname + ".exe" for
+ # Windows
+ #
+ # To reduce redundant code, these methods expect to find
+ # several attributes in the current object (presumably defined
+ # as class attributes):
+ # * src_extensions -
+ # list of C/C++ source file extensions, eg. ['.c', '.cpp']
+ # * obj_extension -
+ # object file extension, eg. '.o' or '.obj'
+ # * static_lib_extension -
+ # extension for static library files, eg. '.a' or '.lib'
+ # * shared_lib_extension -
+ # extension for shared library/object files, eg. '.so', '.dll'
+ # * static_lib_format -
+ # format string for generating static library filenames,
+ # eg. 'lib%s.%s' or '%s.%s'
+ # * shared_lib_format
+ # format string for generating shared library filenames
+ # (probably same as static_lib_format, since the extension
+ # is one of the intended parameters to the format string)
+ # * exe_extension -
+ # extension for executable files, eg. '' or '.exe'
+
+ def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
+ if output_dir is None:
+ output_dir = ''
+ obj_names = []
+ for src_name in source_filenames:
+ base, ext = os.path.splitext(src_name)
+ base = os.path.splitdrive(base)[1] # Chop off the drive
+ base = base[os.path.isabs(base):] # If abs, chop off leading /
+ if ext not in self.src_extensions:
+ raise UnknownFileError(
+ "unknown file type '%s' (from '%s')" % (ext, src_name))
+ if strip_dir:
+ base = os.path.basename(base)
+ obj_names.append(os.path.join(output_dir,
+ base + self.obj_extension))
+ return obj_names
+
+ def shared_object_filename(self, basename, strip_dir=0, output_dir=''):
+ assert output_dir is not None
+ if strip_dir:
+ basename = os.path.basename(basename)
+ return os.path.join(output_dir, basename + self.shared_lib_extension)
+
+ def executable_filename(self, basename, strip_dir=0, output_dir=''):
+ assert output_dir is not None
+ if strip_dir:
+ basename = os.path.basename(basename)
+ return os.path.join(output_dir, basename + (self.exe_extension or ''))
+
+ def library_filename(self, libname, lib_type='static', # or 'shared'
+ strip_dir=0, output_dir=''):
+ assert output_dir is not None
+ if lib_type not in ("static", "shared", "dylib", "xcode_stub"):
+ raise ValueError(
+ "'lib_type' must be \"static\", \"shared\", \"dylib\", or \"xcode_stub\"")
+ fmt = getattr(self, lib_type + "_lib_format")
+ ext = getattr(self, lib_type + "_lib_extension")
+
+ dir, base = os.path.split(libname)
+ filename = fmt % (base, ext)
+ if strip_dir:
+ dir = ''
+
+ return os.path.join(output_dir, dir, filename)
+
+
+ # -- Utility methods -----------------------------------------------
+
+ def announce(self, msg, level=1):
+ log.debug(msg)
+
+ def debug_print(self, msg):
+ from distutils.debug import DEBUG
+ if DEBUG:
+ print(msg)
+
+ def warn(self, msg):
+ sys.stderr.write("warning: %s\n" % msg)
+
+ def execute(self, func, args, msg=None, level=1):
+ execute(func, args, msg, self.dry_run)
+
+ def spawn(self, cmd, **kwargs):
+ spawn(cmd, dry_run=self.dry_run, **kwargs)
+
+ def move_file(self, src, dst):
+ return move_file(src, dst, dry_run=self.dry_run)
+
+ def mkpath (self, name, mode=0o777):
+ mkpath(name, mode, dry_run=self.dry_run)
+
+
+# Map a sys.platform/os.name ('posix', 'nt') to the default compiler
+# type for that platform. Keys are interpreted as re match
+# patterns. Order is important; platform mappings are preferred over
+# OS names.
+_default_compilers = (
+
+ # Platform string mappings
+
+ # on a cygwin built python we can use gcc like an ordinary UNIXish
+ # compiler
+ ('cygwin.*', 'unix'),
+
+ # OS name mappings
+ ('posix', 'unix'),
+ ('nt', 'msvc'),
+
+ )
+
+def get_default_compiler(osname=None, platform=None):
+ """Determine the default compiler to use for the given platform.
+
+ osname should be one of the standard Python OS names (i.e. the
+ ones returned by os.name) and platform the common value
+ returned by sys.platform for the platform in question.
+
+ The default values are os.name and sys.platform in case the
+ parameters are not given.
+ """
+ if osname is None:
+ osname = os.name
+ if platform is None:
+ platform = sys.platform
+ for pattern, compiler in _default_compilers:
+ if re.match(pattern, platform) is not None or \
+ re.match(pattern, osname) is not None:
+ return compiler
+ # Default to Unix compiler
+ return 'unix'
+
+# Map compiler types to (module_name, class_name) pairs -- ie. where to
+# find the code that implements an interface to this compiler. (The module
+# is assumed to be in the 'distutils' package.)
+compiler_class = { 'unix': ('unixccompiler', 'UnixCCompiler',
+ "standard UNIX-style compiler"),
+ 'msvc': ('_msvccompiler', 'MSVCCompiler',
+ "Microsoft Visual C++"),
+ 'cygwin': ('cygwinccompiler', 'CygwinCCompiler',
+ "Cygwin port of GNU C Compiler for Win32"),
+ 'mingw32': ('cygwinccompiler', 'Mingw32CCompiler',
+ "Mingw32 port of GNU C Compiler for Win32"),
+ 'bcpp': ('bcppcompiler', 'BCPPCompiler',
+ "Borland C++ Compiler"),
+ }
+
+def show_compilers():
+ """Print list of available compilers (used by the "--help-compiler"
+ options to "build", "build_ext", "build_clib").
+ """
+ # XXX this "knows" that the compiler option it's describing is
+ # "--compiler", which just happens to be the case for the three
+ # commands that use it.
+ from distutils.fancy_getopt import FancyGetopt
+ compilers = []
+ for compiler in compiler_class.keys():
+ compilers.append(("compiler="+compiler, None,
+ compiler_class[compiler][2]))
+ compilers.sort()
+ pretty_printer = FancyGetopt(compilers)
+ pretty_printer.print_help("List of available compilers:")
+
+
+def new_compiler(plat=None, compiler=None, verbose=0, dry_run=0, force=0):
+ """Generate an instance of some CCompiler subclass for the supplied
+ platform/compiler combination. 'plat' defaults to 'os.name'
+ (eg. 'posix', 'nt'), and 'compiler' defaults to the default compiler
+ for that platform. Currently only 'posix' and 'nt' are supported, and
+ the default compilers are "traditional Unix interface" (UnixCCompiler
+ class) and Visual C++ (MSVCCompiler class). Note that it's perfectly
+ possible to ask for a Unix compiler object under Windows, and a
+ Microsoft compiler object under Unix -- if you supply a value for
+ 'compiler', 'plat' is ignored.
+ """
+ if plat is None:
+ plat = os.name
+
+ try:
+ if compiler is None:
+ compiler = get_default_compiler(plat)
+
+ (module_name, class_name, long_description) = compiler_class[compiler]
+ except KeyError:
+ msg = "don't know how to compile C/C++ code on platform '%s'" % plat
+ if compiler is not None:
+ msg = msg + " with '%s' compiler" % compiler
+ raise DistutilsPlatformError(msg)
+
+ try:
+ module_name = "distutils." + module_name
+ __import__ (module_name)
+ module = sys.modules[module_name]
+ klass = vars(module)[class_name]
+ except ImportError:
+ raise DistutilsModuleError(
+ "can't compile C/C++ code: unable to load module '%s'" % \
+ module_name)
+ except KeyError:
+ raise DistutilsModuleError(
+ "can't compile C/C++ code: unable to find class '%s' "
+ "in module '%s'" % (class_name, module_name))
+
+ # XXX The None is necessary to preserve backwards compatibility
+ # with classes that expect verbose to be the first positional
+ # argument.
+ return klass(None, dry_run, force)
+
+
+def gen_preprocess_options(macros, include_dirs):
+ """Generate C pre-processor options (-D, -U, -I) as used by at least
+ two types of compilers: the typical Unix compiler and Visual C++.
+ 'macros' is the usual thing, a list of 1- or 2-tuples, where (name,)
+ means undefine (-U) macro 'name', and (name,value) means define (-D)
+ macro 'name' to 'value'. 'include_dirs' is just a list of directory
+ names to be added to the header file search path (-I). Returns a list
+ of command-line options suitable for either Unix compilers or Visual
+ C++.
+ """
+ # XXX it would be nice (mainly aesthetic, and so we don't generate
+ # stupid-looking command lines) to go over 'macros' and eliminate
+ # redundant definitions/undefinitions (ie. ensure that only the
+ # latest mention of a particular macro winds up on the command
+ # line). I don't think it's essential, though, since most (all?)
+ # Unix C compilers only pay attention to the latest -D or -U
+ # mention of a macro on their command line. Similar situation for
+ # 'include_dirs'. I'm punting on both for now. Anyways, weeding out
+ # redundancies like this should probably be the province of
+ # CCompiler, since the data structures used are inherited from it
+ # and therefore common to all CCompiler classes.
+ pp_opts = []
+ for macro in macros:
+ if not (isinstance(macro, tuple) and 1 <= len(macro) <= 2):
+ raise TypeError(
+ "bad macro definition '%s': "
+ "each element of 'macros' list must be a 1- or 2-tuple"
+ % macro)
+
+ if len(macro) == 1: # undefine this macro
+ pp_opts.append("-U%s" % macro[0])
+ elif len(macro) == 2:
+ if macro[1] is None: # define with no explicit value
+ pp_opts.append("-D%s" % macro[0])
+ else:
+ # XXX *don't* need to be clever about quoting the
+ # macro value here, because we're going to avoid the
+ # shell at all costs when we spawn the command!
+ pp_opts.append("-D%s=%s" % macro)
+
+ for dir in include_dirs:
+ pp_opts.append("-I%s" % dir)
+ return pp_opts
+
+
+def gen_lib_options (compiler, library_dirs, runtime_library_dirs, libraries):
+ """Generate linker options for searching library directories and
+ linking with specific libraries. 'libraries' and 'library_dirs' are,
+ respectively, lists of library names (not filenames!) and search
+ directories. Returns a list of command-line options suitable for use
+ with some compiler (depending on the two format strings passed in).
+ """
+ lib_opts = []
+
+ for dir in library_dirs:
+ lib_opts.append(compiler.library_dir_option(dir))
+
+ for dir in runtime_library_dirs:
+ opt = compiler.runtime_library_dir_option(dir)
+ if isinstance(opt, list):
+ lib_opts = lib_opts + opt
+ else:
+ lib_opts.append(opt)
+
+ # XXX it's important that we *not* remove redundant library mentions!
+ # sometimes you really do have to say "-lfoo -lbar -lfoo" in order to
+ # resolve all symbols. I just hope we never have to say "-lfoo obj.o
+ # -lbar" to get things to work -- that's certainly a possibility, but a
+ # pretty nasty way to arrange your C code.
+
+ for lib in libraries:
+ (lib_dir, lib_name) = os.path.split(lib)
+ if lib_dir:
+ lib_file = compiler.find_library_file([lib_dir], lib_name)
+ if lib_file:
+ lib_opts.append(lib_file)
+ else:
+ compiler.warn("no library file corresponding to "
+ "'%s' found (skipping)" % lib)
+ else:
+ lib_opts.append(compiler.library_option (lib))
+ return lib_opts
diff --git a/third_party/python/setuptools/setuptools/_distutils/cmd.py b/third_party/python/setuptools/setuptools/_distutils/cmd.py
new file mode 100644
index 0000000000..dba3191e58
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/cmd.py
@@ -0,0 +1,403 @@
+"""distutils.cmd
+
+Provides the Command class, the base class for the command classes
+in the distutils.command package.
+"""
+
+import sys, os, re
+from distutils.errors import DistutilsOptionError
+from distutils import util, dir_util, file_util, archive_util, dep_util
+from distutils import log
+
+class Command:
+ """Abstract base class for defining command classes, the "worker bees"
+ of the Distutils. A useful analogy for command classes is to think of
+ them as subroutines with local variables called "options". The options
+ are "declared" in 'initialize_options()' and "defined" (given their
+ final values, aka "finalized") in 'finalize_options()', both of which
+ must be defined by every command class. The distinction between the
+ two is necessary because option values might come from the outside
+ world (command line, config file, ...), and any options dependent on
+ other options must be computed *after* these outside influences have
+ been processed -- hence 'finalize_options()'. The "body" of the
+ subroutine, where it does all its work based on the values of its
+ options, is the 'run()' method, which must also be implemented by every
+ command class.
+ """
+
+ # 'sub_commands' formalizes the notion of a "family" of commands,
+ # eg. "install" as the parent with sub-commands "install_lib",
+ # "install_headers", etc. The parent of a family of commands
+ # defines 'sub_commands' as a class attribute; it's a list of
+ # (command_name : string, predicate : unbound_method | string | None)
+ # tuples, where 'predicate' is a method of the parent command that
+ # determines whether the corresponding command is applicable in the
+ # current situation. (Eg. we "install_headers" is only applicable if
+ # we have any C header files to install.) If 'predicate' is None,
+ # that command is always applicable.
+ #
+ # 'sub_commands' is usually defined at the *end* of a class, because
+ # predicates can be unbound methods, so they must already have been
+ # defined. The canonical example is the "install" command.
+ sub_commands = []
+
+
+ # -- Creation/initialization methods -------------------------------
+
+ def __init__(self, dist):
+ """Create and initialize a new Command object. Most importantly,
+ invokes the 'initialize_options()' method, which is the real
+ initializer and depends on the actual command being
+ instantiated.
+ """
+ # late import because of mutual dependence between these classes
+ from distutils.dist import Distribution
+
+ if not isinstance(dist, Distribution):
+ raise TypeError("dist must be a Distribution instance")
+ if self.__class__ is Command:
+ raise RuntimeError("Command is an abstract class")
+
+ self.distribution = dist
+ self.initialize_options()
+
+ # Per-command versions of the global flags, so that the user can
+ # customize Distutils' behaviour command-by-command and let some
+ # commands fall back on the Distribution's behaviour. None means
+ # "not defined, check self.distribution's copy", while 0 or 1 mean
+ # false and true (duh). Note that this means figuring out the real
+ # value of each flag is a touch complicated -- hence "self._dry_run"
+ # will be handled by __getattr__, below.
+ # XXX This needs to be fixed.
+ self._dry_run = None
+
+ # verbose is largely ignored, but needs to be set for
+ # backwards compatibility (I think)?
+ self.verbose = dist.verbose
+
+ # Some commands define a 'self.force' option to ignore file
+ # timestamps, but methods defined *here* assume that
+ # 'self.force' exists for all commands. So define it here
+ # just to be safe.
+ self.force = None
+
+ # The 'help' flag is just used for command-line parsing, so
+ # none of that complicated bureaucracy is needed.
+ self.help = 0
+
+ # 'finalized' records whether or not 'finalize_options()' has been
+ # called. 'finalize_options()' itself should not pay attention to
+ # this flag: it is the business of 'ensure_finalized()', which
+ # always calls 'finalize_options()', to respect/update it.
+ self.finalized = 0
+
+ # XXX A more explicit way to customize dry_run would be better.
+ def __getattr__(self, attr):
+ if attr == 'dry_run':
+ myval = getattr(self, "_" + attr)
+ if myval is None:
+ return getattr(self.distribution, attr)
+ else:
+ return myval
+ else:
+ raise AttributeError(attr)
+
+ def ensure_finalized(self):
+ if not self.finalized:
+ self.finalize_options()
+ self.finalized = 1
+
+ # Subclasses must define:
+ # initialize_options()
+ # provide default values for all options; may be customized by
+ # setup script, by options from config file(s), or by command-line
+ # options
+ # finalize_options()
+ # decide on the final values for all options; this is called
+ # after all possible intervention from the outside world
+ # (command-line, option file, etc.) has been processed
+ # run()
+ # run the command: do whatever it is we're here to do,
+ # controlled by the command's various option values
+
+ def initialize_options(self):
+ """Set default values for all the options that this command
+ supports. Note that these defaults may be overridden by other
+ commands, by the setup script, by config files, or by the
+ command-line. Thus, this is not the place to code dependencies
+ between options; generally, 'initialize_options()' implementations
+ are just a bunch of "self.foo = None" assignments.
+
+ This method must be implemented by all command classes.
+ """
+ raise RuntimeError("abstract method -- subclass %s must override"
+ % self.__class__)
+
+ def finalize_options(self):
+ """Set final values for all the options that this command supports.
+ This is always called as late as possible, ie. after any option
+ assignments from the command-line or from other commands have been
+ done. Thus, this is the place to code option dependencies: if
+ 'foo' depends on 'bar', then it is safe to set 'foo' from 'bar' as
+ long as 'foo' still has the same value it was assigned in
+ 'initialize_options()'.
+
+ This method must be implemented by all command classes.
+ """
+ raise RuntimeError("abstract method -- subclass %s must override"
+ % self.__class__)
+
+
+ def dump_options(self, header=None, indent=""):
+ from distutils.fancy_getopt import longopt_xlate
+ if header is None:
+ header = "command options for '%s':" % self.get_command_name()
+ self.announce(indent + header, level=log.INFO)
+ indent = indent + " "
+ for (option, _, _) in self.user_options:
+ option = option.translate(longopt_xlate)
+ if option[-1] == "=":
+ option = option[:-1]
+ value = getattr(self, option)
+ self.announce(indent + "%s = %s" % (option, value),
+ level=log.INFO)
+
+ def run(self):
+ """A command's raison d'etre: carry out the action it exists to
+ perform, controlled by the options initialized in
+ 'initialize_options()', customized by other commands, the setup
+ script, the command-line, and config files, and finalized in
+ 'finalize_options()'. All terminal output and filesystem
+ interaction should be done by 'run()'.
+
+ This method must be implemented by all command classes.
+ """
+ raise RuntimeError("abstract method -- subclass %s must override"
+ % self.__class__)
+
+ def announce(self, msg, level=1):
+ """If the current verbosity level is of greater than or equal to
+ 'level' print 'msg' to stdout.
+ """
+ log.log(level, msg)
+
+ def debug_print(self, msg):
+ """Print 'msg' to stdout if the global DEBUG (taken from the
+ DISTUTILS_DEBUG environment variable) flag is true.
+ """
+ from distutils.debug import DEBUG
+ if DEBUG:
+ print(msg)
+ sys.stdout.flush()
+
+
+ # -- Option validation methods -------------------------------------
+ # (these are very handy in writing the 'finalize_options()' method)
+ #
+ # NB. the general philosophy here is to ensure that a particular option
+ # value meets certain type and value constraints. If not, we try to
+ # force it into conformance (eg. if we expect a list but have a string,
+ # split the string on comma and/or whitespace). If we can't force the
+ # option into conformance, raise DistutilsOptionError. Thus, command
+ # classes need do nothing more than (eg.)
+ # self.ensure_string_list('foo')
+ # and they can be guaranteed that thereafter, self.foo will be
+ # a list of strings.
+
+ def _ensure_stringlike(self, option, what, default=None):
+ val = getattr(self, option)
+ if val is None:
+ setattr(self, option, default)
+ return default
+ elif not isinstance(val, str):
+ raise DistutilsOptionError("'%s' must be a %s (got `%s`)"
+ % (option, what, val))
+ return val
+
+ def ensure_string(self, option, default=None):
+ """Ensure that 'option' is a string; if not defined, set it to
+ 'default'.
+ """
+ self._ensure_stringlike(option, "string", default)
+
+ def ensure_string_list(self, option):
+ r"""Ensure that 'option' is a list of strings. If 'option' is
+ currently a string, we split it either on /,\s*/ or /\s+/, so
+ "foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
+ ["foo", "bar", "baz"].
+ """
+ val = getattr(self, option)
+ if val is None:
+ return
+ elif isinstance(val, str):
+ setattr(self, option, re.split(r',\s*|\s+', val))
+ else:
+ if isinstance(val, list):
+ ok = all(isinstance(v, str) for v in val)
+ else:
+ ok = False
+ if not ok:
+ raise DistutilsOptionError(
+ "'%s' must be a list of strings (got %r)"
+ % (option, val))
+
+ def _ensure_tested_string(self, option, tester, what, error_fmt,
+ default=None):
+ val = self._ensure_stringlike(option, what, default)
+ if val is not None and not tester(val):
+ raise DistutilsOptionError(("error in '%s' option: " + error_fmt)
+ % (option, val))
+
+ def ensure_filename(self, option):
+ """Ensure that 'option' is the name of an existing file."""
+ self._ensure_tested_string(option, os.path.isfile,
+ "filename",
+ "'%s' does not exist or is not a file")
+
+ def ensure_dirname(self, option):
+ self._ensure_tested_string(option, os.path.isdir,
+ "directory name",
+ "'%s' does not exist or is not a directory")
+
+
+ # -- Convenience methods for commands ------------------------------
+
+ def get_command_name(self):
+ if hasattr(self, 'command_name'):
+ return self.command_name
+ else:
+ return self.__class__.__name__
+
+ def set_undefined_options(self, src_cmd, *option_pairs):
+ """Set the values of any "undefined" options from corresponding
+ option values in some other command object. "Undefined" here means
+ "is None", which is the convention used to indicate that an option
+ has not been changed between 'initialize_options()' and
+ 'finalize_options()'. Usually called from 'finalize_options()' for
+ options that depend on some other command rather than another
+ option of the same command. 'src_cmd' is the other command from
+ which option values will be taken (a command object will be created
+ for it if necessary); the remaining arguments are
+ '(src_option,dst_option)' tuples which mean "take the value of
+ 'src_option' in the 'src_cmd' command object, and copy it to
+ 'dst_option' in the current command object".
+ """
+ # Option_pairs: list of (src_option, dst_option) tuples
+ src_cmd_obj = self.distribution.get_command_obj(src_cmd)
+ src_cmd_obj.ensure_finalized()
+ for (src_option, dst_option) in option_pairs:
+ if getattr(self, dst_option) is None:
+ setattr(self, dst_option, getattr(src_cmd_obj, src_option))
+
+ def get_finalized_command(self, command, create=1):
+ """Wrapper around Distribution's 'get_command_obj()' method: find
+ (create if necessary and 'create' is true) the command object for
+ 'command', call its 'ensure_finalized()' method, and return the
+ finalized command object.
+ """
+ cmd_obj = self.distribution.get_command_obj(command, create)
+ cmd_obj.ensure_finalized()
+ return cmd_obj
+
+ # XXX rename to 'get_reinitialized_command()'? (should do the
+ # same in dist.py, if so)
+ def reinitialize_command(self, command, reinit_subcommands=0):
+ return self.distribution.reinitialize_command(command,
+ reinit_subcommands)
+
+ def run_command(self, command):
+ """Run some other command: uses the 'run_command()' method of
+ Distribution, which creates and finalizes the command object if
+ necessary and then invokes its 'run()' method.
+ """
+ self.distribution.run_command(command)
+
+ def get_sub_commands(self):
+ """Determine the sub-commands that are relevant in the current
+ distribution (ie., that need to be run). This is based on the
+ 'sub_commands' class attribute: each tuple in that list may include
+ a method that we call to determine if the subcommand needs to be
+ run for the current distribution. Return a list of command names.
+ """
+ commands = []
+ for (cmd_name, method) in self.sub_commands:
+ if method is None or method(self):
+ commands.append(cmd_name)
+ return commands
+
+
+ # -- External world manipulation -----------------------------------
+
+ def warn(self, msg):
+ log.warn("warning: %s: %s\n", self.get_command_name(), msg)
+
+ def execute(self, func, args, msg=None, level=1):
+ util.execute(func, args, msg, dry_run=self.dry_run)
+
+ def mkpath(self, name, mode=0o777):
+ dir_util.mkpath(name, mode, dry_run=self.dry_run)
+
+ def copy_file(self, infile, outfile, preserve_mode=1, preserve_times=1,
+ link=None, level=1):
+ """Copy a file respecting verbose, dry-run and force flags. (The
+ former two default to whatever is in the Distribution object, and
+ the latter defaults to false for commands that don't define it.)"""
+ return file_util.copy_file(infile, outfile, preserve_mode,
+ preserve_times, not self.force, link,
+ dry_run=self.dry_run)
+
+ def copy_tree(self, infile, outfile, preserve_mode=1, preserve_times=1,
+ preserve_symlinks=0, level=1):
+ """Copy an entire directory tree respecting verbose, dry-run,
+ and force flags.
+ """
+ return dir_util.copy_tree(infile, outfile, preserve_mode,
+ preserve_times, preserve_symlinks,
+ not self.force, dry_run=self.dry_run)
+
+ def move_file (self, src, dst, level=1):
+ """Move a file respecting dry-run flag."""
+ return file_util.move_file(src, dst, dry_run=self.dry_run)
+
+ def spawn(self, cmd, search_path=1, level=1):
+ """Spawn an external command respecting dry-run flag."""
+ from distutils.spawn import spawn
+ spawn(cmd, search_path, dry_run=self.dry_run)
+
+ def make_archive(self, base_name, format, root_dir=None, base_dir=None,
+ owner=None, group=None):
+ return archive_util.make_archive(base_name, format, root_dir, base_dir,
+ dry_run=self.dry_run,
+ owner=owner, group=group)
+
+ def make_file(self, infiles, outfile, func, args,
+ exec_msg=None, skip_msg=None, level=1):
+ """Special case of 'execute()' for operations that process one or
+ more input files and generate one output file. Works just like
+ 'execute()', except the operation is skipped and a different
+ message printed if 'outfile' already exists and is newer than all
+ files listed in 'infiles'. If the command defined 'self.force',
+ and it is true, then the command is unconditionally run -- does no
+ timestamp checks.
+ """
+ if skip_msg is None:
+ skip_msg = "skipping %s (inputs unchanged)" % outfile
+
+ # Allow 'infiles' to be a single string
+ if isinstance(infiles, str):
+ infiles = (infiles,)
+ elif not isinstance(infiles, (list, tuple)):
+ raise TypeError(
+ "'infiles' must be a string, or a list or tuple of strings")
+
+ if exec_msg is None:
+ exec_msg = "generating %s from %s" % (outfile, ', '.join(infiles))
+
+ # If 'outfile' must be regenerated (either because it doesn't
+ # exist, is out-of-date, or the 'force' flag is true) then
+ # perform the action that presumably regenerates it
+ if self.force or dep_util.newer_group(infiles, outfile):
+ self.execute(func, args, exec_msg, level)
+ # Otherwise, print the "skip" message
+ else:
+ log.debug(skip_msg)
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/__init__.py b/third_party/python/setuptools/setuptools/_distutils/command/__init__.py
new file mode 100644
index 0000000000..481eea9fd4
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/__init__.py
@@ -0,0 +1,31 @@
+"""distutils.command
+
+Package containing implementation of all the standard Distutils
+commands."""
+
+__all__ = ['build',
+ 'build_py',
+ 'build_ext',
+ 'build_clib',
+ 'build_scripts',
+ 'clean',
+ 'install',
+ 'install_lib',
+ 'install_headers',
+ 'install_scripts',
+ 'install_data',
+ 'sdist',
+ 'register',
+ 'bdist',
+ 'bdist_dumb',
+ 'bdist_rpm',
+ 'bdist_wininst',
+ 'check',
+ 'upload',
+ # These two are reserved for future use:
+ #'bdist_sdux',
+ #'bdist_pkgtool',
+ # Note:
+ # bdist_packager is not included because it only provides
+ # an abstract base class
+ ]
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/bdist.py b/third_party/python/setuptools/setuptools/_distutils/command/bdist.py
new file mode 100644
index 0000000000..014871d280
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/bdist.py
@@ -0,0 +1,143 @@
+"""distutils.command.bdist
+
+Implements the Distutils 'bdist' command (create a built [binary]
+distribution)."""
+
+import os
+from distutils.core import Command
+from distutils.errors import *
+from distutils.util import get_platform
+
+
+def show_formats():
+ """Print list of available formats (arguments to "--format" option).
+ """
+ from distutils.fancy_getopt import FancyGetopt
+ formats = []
+ for format in bdist.format_commands:
+ formats.append(("formats=" + format, None,
+ bdist.format_command[format][1]))
+ pretty_printer = FancyGetopt(formats)
+ pretty_printer.print_help("List of available distribution formats:")
+
+
+class bdist(Command):
+
+ description = "create a built (binary) distribution"
+
+ user_options = [('bdist-base=', 'b',
+ "temporary directory for creating built distributions"),
+ ('plat-name=', 'p',
+ "platform name to embed in generated filenames "
+ "(default: %s)" % get_platform()),
+ ('formats=', None,
+ "formats for distribution (comma-separated list)"),
+ ('dist-dir=', 'd',
+ "directory to put final built distributions in "
+ "[default: dist]"),
+ ('skip-build', None,
+ "skip rebuilding everything (for testing/debugging)"),
+ ('owner=', 'u',
+ "Owner name used when creating a tar file"
+ " [default: current user]"),
+ ('group=', 'g',
+ "Group name used when creating a tar file"
+ " [default: current group]"),
+ ]
+
+ boolean_options = ['skip-build']
+
+ help_options = [
+ ('help-formats', None,
+ "lists available distribution formats", show_formats),
+ ]
+
+ # The following commands do not take a format option from bdist
+ no_format_option = ('bdist_rpm',)
+
+ # This won't do in reality: will need to distinguish RPM-ish Linux,
+ # Debian-ish Linux, Solaris, FreeBSD, ..., Windows, Mac OS.
+ default_format = {'posix': 'gztar',
+ 'nt': 'zip'}
+
+ # Establish the preferred order (for the --help-formats option).
+ format_commands = ['rpm', 'gztar', 'bztar', 'xztar', 'ztar', 'tar',
+ 'wininst', 'zip', 'msi']
+
+ # And the real information.
+ format_command = {'rpm': ('bdist_rpm', "RPM distribution"),
+ 'gztar': ('bdist_dumb', "gzip'ed tar file"),
+ 'bztar': ('bdist_dumb', "bzip2'ed tar file"),
+ 'xztar': ('bdist_dumb', "xz'ed tar file"),
+ 'ztar': ('bdist_dumb', "compressed tar file"),
+ 'tar': ('bdist_dumb', "tar file"),
+ 'wininst': ('bdist_wininst',
+ "Windows executable installer"),
+ 'zip': ('bdist_dumb', "ZIP file"),
+ 'msi': ('bdist_msi', "Microsoft Installer")
+ }
+
+
+ def initialize_options(self):
+ self.bdist_base = None
+ self.plat_name = None
+ self.formats = None
+ self.dist_dir = None
+ self.skip_build = 0
+ self.group = None
+ self.owner = None
+
+ def finalize_options(self):
+ # have to finalize 'plat_name' before 'bdist_base'
+ if self.plat_name is None:
+ if self.skip_build:
+ self.plat_name = get_platform()
+ else:
+ self.plat_name = self.get_finalized_command('build').plat_name
+
+ # 'bdist_base' -- parent of per-built-distribution-format
+ # temporary directories (eg. we'll probably have
+ # "build/bdist.<plat>/dumb", "build/bdist.<plat>/rpm", etc.)
+ if self.bdist_base is None:
+ build_base = self.get_finalized_command('build').build_base
+ self.bdist_base = os.path.join(build_base,
+ 'bdist.' + self.plat_name)
+
+ self.ensure_string_list('formats')
+ if self.formats is None:
+ try:
+ self.formats = [self.default_format[os.name]]
+ except KeyError:
+ raise DistutilsPlatformError(
+ "don't know how to create built distributions "
+ "on platform %s" % os.name)
+
+ if self.dist_dir is None:
+ self.dist_dir = "dist"
+
+ def run(self):
+ # Figure out which sub-commands we need to run.
+ commands = []
+ for format in self.formats:
+ try:
+ commands.append(self.format_command[format][0])
+ except KeyError:
+ raise DistutilsOptionError("invalid format '%s'" % format)
+
+ # Reinitialize and run each command.
+ for i in range(len(self.formats)):
+ cmd_name = commands[i]
+ sub_cmd = self.reinitialize_command(cmd_name)
+ if cmd_name not in self.no_format_option:
+ sub_cmd.format = self.formats[i]
+
+ # passing the owner and group names for tar archiving
+ if cmd_name == 'bdist_dumb':
+ sub_cmd.owner = self.owner
+ sub_cmd.group = self.group
+
+ # If we're going to need to run this command again, tell it to
+ # keep its temporary files around so subsequent runs go faster.
+ if cmd_name in commands[i+1:]:
+ sub_cmd.keep_temp = 1
+ self.run_command(cmd_name)
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/bdist_dumb.py b/third_party/python/setuptools/setuptools/_distutils/command/bdist_dumb.py
new file mode 100644
index 0000000000..f0d6b5b8cd
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/bdist_dumb.py
@@ -0,0 +1,123 @@
+"""distutils.command.bdist_dumb
+
+Implements the Distutils 'bdist_dumb' command (create a "dumb" built
+distribution -- i.e., just an archive to be unpacked under $prefix or
+$exec_prefix)."""
+
+import os
+from distutils.core import Command
+from distutils.util import get_platform
+from distutils.dir_util import remove_tree, ensure_relative
+from distutils.errors import *
+from distutils.sysconfig import get_python_version
+from distutils import log
+
+class bdist_dumb(Command):
+
+ description = "create a \"dumb\" built distribution"
+
+ user_options = [('bdist-dir=', 'd',
+ "temporary directory for creating the distribution"),
+ ('plat-name=', 'p',
+ "platform name to embed in generated filenames "
+ "(default: %s)" % get_platform()),
+ ('format=', 'f',
+ "archive format to create (tar, gztar, bztar, xztar, "
+ "ztar, zip)"),
+ ('keep-temp', 'k',
+ "keep the pseudo-installation tree around after " +
+ "creating the distribution archive"),
+ ('dist-dir=', 'd',
+ "directory to put final built distributions in"),
+ ('skip-build', None,
+ "skip rebuilding everything (for testing/debugging)"),
+ ('relative', None,
+ "build the archive using relative paths "
+ "(default: false)"),
+ ('owner=', 'u',
+ "Owner name used when creating a tar file"
+ " [default: current user]"),
+ ('group=', 'g',
+ "Group name used when creating a tar file"
+ " [default: current group]"),
+ ]
+
+ boolean_options = ['keep-temp', 'skip-build', 'relative']
+
+ default_format = { 'posix': 'gztar',
+ 'nt': 'zip' }
+
+ def initialize_options(self):
+ self.bdist_dir = None
+ self.plat_name = None
+ self.format = None
+ self.keep_temp = 0
+ self.dist_dir = None
+ self.skip_build = None
+ self.relative = 0
+ self.owner = None
+ self.group = None
+
+ def finalize_options(self):
+ if self.bdist_dir is None:
+ bdist_base = self.get_finalized_command('bdist').bdist_base
+ self.bdist_dir = os.path.join(bdist_base, 'dumb')
+
+ if self.format is None:
+ try:
+ self.format = self.default_format[os.name]
+ except KeyError:
+ raise DistutilsPlatformError(
+ "don't know how to create dumb built distributions "
+ "on platform %s" % os.name)
+
+ self.set_undefined_options('bdist',
+ ('dist_dir', 'dist_dir'),
+ ('plat_name', 'plat_name'),
+ ('skip_build', 'skip_build'))
+
+ def run(self):
+ if not self.skip_build:
+ self.run_command('build')
+
+ install = self.reinitialize_command('install', reinit_subcommands=1)
+ install.root = self.bdist_dir
+ install.skip_build = self.skip_build
+ install.warn_dir = 0
+
+ log.info("installing to %s", self.bdist_dir)
+ self.run_command('install')
+
+ # And make an archive relative to the root of the
+ # pseudo-installation tree.
+ archive_basename = "%s.%s" % (self.distribution.get_fullname(),
+ self.plat_name)
+
+ pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
+ if not self.relative:
+ archive_root = self.bdist_dir
+ else:
+ if (self.distribution.has_ext_modules() and
+ (install.install_base != install.install_platbase)):
+ raise DistutilsPlatformError(
+ "can't make a dumb built distribution where "
+ "base and platbase are different (%s, %s)"
+ % (repr(install.install_base),
+ repr(install.install_platbase)))
+ else:
+ archive_root = os.path.join(self.bdist_dir,
+ ensure_relative(install.install_base))
+
+ # Make the archive
+ filename = self.make_archive(pseudoinstall_root,
+ self.format, root_dir=archive_root,
+ owner=self.owner, group=self.group)
+ if self.distribution.has_ext_modules():
+ pyversion = get_python_version()
+ else:
+ pyversion = 'any'
+ self.distribution.dist_files.append(('bdist_dumb', pyversion,
+ filename))
+
+ if not self.keep_temp:
+ remove_tree(self.bdist_dir, dry_run=self.dry_run)
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/bdist_msi.py b/third_party/python/setuptools/setuptools/_distutils/command/bdist_msi.py
new file mode 100644
index 0000000000..0863a1883e
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/bdist_msi.py
@@ -0,0 +1,749 @@
+# Copyright (C) 2005, 2006 Martin von Löwis
+# Licensed to PSF under a Contributor Agreement.
+# The bdist_wininst command proper
+# based on bdist_wininst
+"""
+Implements the bdist_msi command.
+"""
+
+import os
+import sys
+import warnings
+from distutils.core import Command
+from distutils.dir_util import remove_tree
+from distutils.sysconfig import get_python_version
+from distutils.version import StrictVersion
+from distutils.errors import DistutilsOptionError
+from distutils.util import get_platform
+from distutils import log
+import msilib
+from msilib import schema, sequence, text
+from msilib import Directory, Feature, Dialog, add_data
+
+class PyDialog(Dialog):
+ """Dialog class with a fixed layout: controls at the top, then a ruler,
+ then a list of buttons: back, next, cancel. Optionally a bitmap at the
+ left."""
+ def __init__(self, *args, **kw):
+ """Dialog(database, name, x, y, w, h, attributes, title, first,
+ default, cancel, bitmap=true)"""
+ Dialog.__init__(self, *args)
+ ruler = self.h - 36
+ bmwidth = 152*ruler/328
+ #if kw.get("bitmap", True):
+ # self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin")
+ self.line("BottomLine", 0, ruler, self.w, 0)
+
+ def title(self, title):
+ "Set the title text of the dialog at the top."
+ # name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix,
+ # text, in VerdanaBold10
+ self.text("Title", 15, 10, 320, 60, 0x30003,
+ r"{\VerdanaBold10}%s" % title)
+
+ def back(self, title, next, name = "Back", active = 1):
+ """Add a back button with a given title, the tab-next button,
+ its name in the Control table, possibly initially disabled.
+
+ Return the button, so that events can be associated"""
+ if active:
+ flags = 3 # Visible|Enabled
+ else:
+ flags = 1 # Visible
+ return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next)
+
+ def cancel(self, title, next, name = "Cancel", active = 1):
+ """Add a cancel button with a given title, the tab-next button,
+ its name in the Control table, possibly initially disabled.
+
+ Return the button, so that events can be associated"""
+ if active:
+ flags = 3 # Visible|Enabled
+ else:
+ flags = 1 # Visible
+ return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next)
+
+ def next(self, title, next, name = "Next", active = 1):
+ """Add a Next button with a given title, the tab-next button,
+ its name in the Control table, possibly initially disabled.
+
+ Return the button, so that events can be associated"""
+ if active:
+ flags = 3 # Visible|Enabled
+ else:
+ flags = 1 # Visible
+ return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next)
+
+ def xbutton(self, name, title, next, xpos):
+ """Add a button with a given title, the tab-next button,
+ its name in the Control table, giving its x position; the
+ y-position is aligned with the other buttons.
+
+ Return the button, so that events can be associated"""
+ return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next)
+
+class bdist_msi(Command):
+
+ description = "create a Microsoft Installer (.msi) binary distribution"
+
+ user_options = [('bdist-dir=', None,
+ "temporary directory for creating the distribution"),
+ ('plat-name=', 'p',
+ "platform name to embed in generated filenames "
+ "(default: %s)" % get_platform()),
+ ('keep-temp', 'k',
+ "keep the pseudo-installation tree around after " +
+ "creating the distribution archive"),
+ ('target-version=', None,
+ "require a specific python version" +
+ " on the target system"),
+ ('no-target-compile', 'c',
+ "do not compile .py to .pyc on the target system"),
+ ('no-target-optimize', 'o',
+ "do not compile .py to .pyo (optimized) "
+ "on the target system"),
+ ('dist-dir=', 'd',
+ "directory to put final built distributions in"),
+ ('skip-build', None,
+ "skip rebuilding everything (for testing/debugging)"),
+ ('install-script=', None,
+ "basename of installation script to be run after "
+ "installation or before deinstallation"),
+ ('pre-install-script=', None,
+ "Fully qualified filename of a script to be run before "
+ "any files are installed. This script need not be in the "
+ "distribution"),
+ ]
+
+ boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
+ 'skip-build']
+
+ all_versions = ['2.0', '2.1', '2.2', '2.3', '2.4',
+ '2.5', '2.6', '2.7', '2.8', '2.9',
+ '3.0', '3.1', '3.2', '3.3', '3.4',
+ '3.5', '3.6', '3.7', '3.8', '3.9']
+ other_version = 'X'
+
+ def __init__(self, *args, **kw):
+ super().__init__(*args, **kw)
+ warnings.warn("bdist_msi command is deprecated since Python 3.9, "
+ "use bdist_wheel (wheel packages) instead",
+ DeprecationWarning, 2)
+
+ def initialize_options(self):
+ self.bdist_dir = None
+ self.plat_name = None
+ self.keep_temp = 0
+ self.no_target_compile = 0
+ self.no_target_optimize = 0
+ self.target_version = None
+ self.dist_dir = None
+ self.skip_build = None
+ self.install_script = None
+ self.pre_install_script = None
+ self.versions = None
+
+ def finalize_options(self):
+ self.set_undefined_options('bdist', ('skip_build', 'skip_build'))
+
+ if self.bdist_dir is None:
+ bdist_base = self.get_finalized_command('bdist').bdist_base
+ self.bdist_dir = os.path.join(bdist_base, 'msi')
+
+ short_version = get_python_version()
+ if (not self.target_version) and self.distribution.has_ext_modules():
+ self.target_version = short_version
+
+ if self.target_version:
+ self.versions = [self.target_version]
+ if not self.skip_build and self.distribution.has_ext_modules()\
+ and self.target_version != short_version:
+ raise DistutilsOptionError(
+ "target version can only be %s, or the '--skip-build'"
+ " option must be specified" % (short_version,))
+ else:
+ self.versions = list(self.all_versions)
+
+ self.set_undefined_options('bdist',
+ ('dist_dir', 'dist_dir'),
+ ('plat_name', 'plat_name'),
+ )
+
+ if self.pre_install_script:
+ raise DistutilsOptionError(
+ "the pre-install-script feature is not yet implemented")
+
+ if self.install_script:
+ for script in self.distribution.scripts:
+ if self.install_script == os.path.basename(script):
+ break
+ else:
+ raise DistutilsOptionError(
+ "install_script '%s' not found in scripts"
+ % self.install_script)
+ self.install_script_key = None
+
+ def run(self):
+ if not self.skip_build:
+ self.run_command('build')
+
+ install = self.reinitialize_command('install', reinit_subcommands=1)
+ install.prefix = self.bdist_dir
+ install.skip_build = self.skip_build
+ install.warn_dir = 0
+
+ install_lib = self.reinitialize_command('install_lib')
+ # we do not want to include pyc or pyo files
+ install_lib.compile = 0
+ install_lib.optimize = 0
+
+ if self.distribution.has_ext_modules():
+ # If we are building an installer for a Python version other
+ # than the one we are currently running, then we need to ensure
+ # our build_lib reflects the other Python version rather than ours.
+ # Note that for target_version!=sys.version, we must have skipped the
+ # build step, so there is no issue with enforcing the build of this
+ # version.
+ target_version = self.target_version
+ if not target_version:
+ assert self.skip_build, "Should have already checked this"
+ target_version = '%d.%d' % sys.version_info[:2]
+ plat_specifier = ".%s-%s" % (self.plat_name, target_version)
+ build = self.get_finalized_command('build')
+ build.build_lib = os.path.join(build.build_base,
+ 'lib' + plat_specifier)
+
+ log.info("installing to %s", self.bdist_dir)
+ install.ensure_finalized()
+
+ # avoid warning of 'install_lib' about installing
+ # into a directory not in sys.path
+ sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
+
+ install.run()
+
+ del sys.path[0]
+
+ self.mkpath(self.dist_dir)
+ fullname = self.distribution.get_fullname()
+ installer_name = self.get_installer_filename(fullname)
+ installer_name = os.path.abspath(installer_name)
+ if os.path.exists(installer_name): os.unlink(installer_name)
+
+ metadata = self.distribution.metadata
+ author = metadata.author
+ if not author:
+ author = metadata.maintainer
+ if not author:
+ author = "UNKNOWN"
+ version = metadata.get_version()
+ # ProductVersion must be strictly numeric
+ # XXX need to deal with prerelease versions
+ sversion = "%d.%d.%d" % StrictVersion(version).version
+ # Prefix ProductName with Python x.y, so that
+ # it sorts together with the other Python packages
+ # in Add-Remove-Programs (APR)
+ fullname = self.distribution.get_fullname()
+ if self.target_version:
+ product_name = "Python %s %s" % (self.target_version, fullname)
+ else:
+ product_name = "Python %s" % (fullname)
+ self.db = msilib.init_database(installer_name, schema,
+ product_name, msilib.gen_uuid(),
+ sversion, author)
+ msilib.add_tables(self.db, sequence)
+ props = [('DistVersion', version)]
+ email = metadata.author_email or metadata.maintainer_email
+ if email:
+ props.append(("ARPCONTACT", email))
+ if metadata.url:
+ props.append(("ARPURLINFOABOUT", metadata.url))
+ if props:
+ add_data(self.db, 'Property', props)
+
+ self.add_find_python()
+ self.add_files()
+ self.add_scripts()
+ self.add_ui()
+ self.db.Commit()
+
+ if hasattr(self.distribution, 'dist_files'):
+ tup = 'bdist_msi', self.target_version or 'any', fullname
+ self.distribution.dist_files.append(tup)
+
+ if not self.keep_temp:
+ remove_tree(self.bdist_dir, dry_run=self.dry_run)
+
+ def add_files(self):
+ db = self.db
+ cab = msilib.CAB("distfiles")
+ rootdir = os.path.abspath(self.bdist_dir)
+
+ root = Directory(db, cab, None, rootdir, "TARGETDIR", "SourceDir")
+ f = Feature(db, "Python", "Python", "Everything",
+ 0, 1, directory="TARGETDIR")
+
+ items = [(f, root, '')]
+ for version in self.versions + [self.other_version]:
+ target = "TARGETDIR" + version
+ name = default = "Python" + version
+ desc = "Everything"
+ if version is self.other_version:
+ title = "Python from another location"
+ level = 2
+ else:
+ title = "Python %s from registry" % version
+ level = 1
+ f = Feature(db, name, title, desc, 1, level, directory=target)
+ dir = Directory(db, cab, root, rootdir, target, default)
+ items.append((f, dir, version))
+ db.Commit()
+
+ seen = {}
+ for feature, dir, version in items:
+ todo = [dir]
+ while todo:
+ dir = todo.pop()
+ for file in os.listdir(dir.absolute):
+ afile = os.path.join(dir.absolute, file)
+ if os.path.isdir(afile):
+ short = "%s|%s" % (dir.make_short(file), file)
+ default = file + version
+ newdir = Directory(db, cab, dir, file, default, short)
+ todo.append(newdir)
+ else:
+ if not dir.component:
+ dir.start_component(dir.logical, feature, 0)
+ if afile not in seen:
+ key = seen[afile] = dir.add_file(file)
+ if file==self.install_script:
+ if self.install_script_key:
+ raise DistutilsOptionError(
+ "Multiple files with name %s" % file)
+ self.install_script_key = '[#%s]' % key
+ else:
+ key = seen[afile]
+ add_data(self.db, "DuplicateFile",
+ [(key + version, dir.component, key, None, dir.logical)])
+ db.Commit()
+ cab.commit(db)
+
+ def add_find_python(self):
+ """Adds code to the installer to compute the location of Python.
+
+ Properties PYTHON.MACHINE.X.Y and PYTHON.USER.X.Y will be set from the
+ registry for each version of Python.
+
+ Properties TARGETDIRX.Y will be set from PYTHON.USER.X.Y if defined,
+ else from PYTHON.MACHINE.X.Y.
+
+ Properties PYTHONX.Y will be set to TARGETDIRX.Y\\python.exe"""
+
+ start = 402
+ for ver in self.versions:
+ install_path = r"SOFTWARE\Python\PythonCore\%s\InstallPath" % ver
+ machine_reg = "python.machine." + ver
+ user_reg = "python.user." + ver
+ machine_prop = "PYTHON.MACHINE." + ver
+ user_prop = "PYTHON.USER." + ver
+ machine_action = "PythonFromMachine" + ver
+ user_action = "PythonFromUser" + ver
+ exe_action = "PythonExe" + ver
+ target_dir_prop = "TARGETDIR" + ver
+ exe_prop = "PYTHON" + ver
+ if msilib.Win64:
+ # type: msidbLocatorTypeRawValue + msidbLocatorType64bit
+ Type = 2+16
+ else:
+ Type = 2
+ add_data(self.db, "RegLocator",
+ [(machine_reg, 2, install_path, None, Type),
+ (user_reg, 1, install_path, None, Type)])
+ add_data(self.db, "AppSearch",
+ [(machine_prop, machine_reg),
+ (user_prop, user_reg)])
+ add_data(self.db, "CustomAction",
+ [(machine_action, 51+256, target_dir_prop, "[" + machine_prop + "]"),
+ (user_action, 51+256, target_dir_prop, "[" + user_prop + "]"),
+ (exe_action, 51+256, exe_prop, "[" + target_dir_prop + "]\\python.exe"),
+ ])
+ add_data(self.db, "InstallExecuteSequence",
+ [(machine_action, machine_prop, start),
+ (user_action, user_prop, start + 1),
+ (exe_action, None, start + 2),
+ ])
+ add_data(self.db, "InstallUISequence",
+ [(machine_action, machine_prop, start),
+ (user_action, user_prop, start + 1),
+ (exe_action, None, start + 2),
+ ])
+ add_data(self.db, "Condition",
+ [("Python" + ver, 0, "NOT TARGETDIR" + ver)])
+ start += 4
+ assert start < 500
+
+ def add_scripts(self):
+ if self.install_script:
+ start = 6800
+ for ver in self.versions + [self.other_version]:
+ install_action = "install_script." + ver
+ exe_prop = "PYTHON" + ver
+ add_data(self.db, "CustomAction",
+ [(install_action, 50, exe_prop, self.install_script_key)])
+ add_data(self.db, "InstallExecuteSequence",
+ [(install_action, "&Python%s=3" % ver, start)])
+ start += 1
+ # XXX pre-install scripts are currently refused in finalize_options()
+ # but if this feature is completed, it will also need to add
+ # entries for each version as the above code does
+ if self.pre_install_script:
+ scriptfn = os.path.join(self.bdist_dir, "preinstall.bat")
+ with open(scriptfn, "w") as f:
+ # The batch file will be executed with [PYTHON], so that %1
+ # is the path to the Python interpreter; %0 will be the path
+ # of the batch file.
+ # rem ="""
+ # %1 %0
+ # exit
+ # """
+ # <actual script>
+ f.write('rem ="""\n%1 %0\nexit\n"""\n')
+ with open(self.pre_install_script) as fin:
+ f.write(fin.read())
+ add_data(self.db, "Binary",
+ [("PreInstall", msilib.Binary(scriptfn))
+ ])
+ add_data(self.db, "CustomAction",
+ [("PreInstall", 2, "PreInstall", None)
+ ])
+ add_data(self.db, "InstallExecuteSequence",
+ [("PreInstall", "NOT Installed", 450)])
+
+
+ def add_ui(self):
+ db = self.db
+ x = y = 50
+ w = 370
+ h = 300
+ title = "[ProductName] Setup"
+
+ # see "Dialog Style Bits"
+ modal = 3 # visible | modal
+ modeless = 1 # visible
+ track_disk_space = 32
+
+ # UI customization properties
+ add_data(db, "Property",
+ # See "DefaultUIFont Property"
+ [("DefaultUIFont", "DlgFont8"),
+ # See "ErrorDialog Style Bit"
+ ("ErrorDialog", "ErrorDlg"),
+ ("Progress1", "Install"), # modified in maintenance type dlg
+ ("Progress2", "installs"),
+ ("MaintenanceForm_Action", "Repair"),
+ # possible values: ALL, JUSTME
+ ("WhichUsers", "ALL")
+ ])
+
+ # Fonts, see "TextStyle Table"
+ add_data(db, "TextStyle",
+ [("DlgFont8", "Tahoma", 9, None, 0),
+ ("DlgFontBold8", "Tahoma", 8, None, 1), #bold
+ ("VerdanaBold10", "Verdana", 10, None, 1),
+ ("VerdanaRed9", "Verdana", 9, 255, 0),
+ ])
+
+ # UI Sequences, see "InstallUISequence Table", "Using a Sequence Table"
+ # Numbers indicate sequence; see sequence.py for how these action integrate
+ add_data(db, "InstallUISequence",
+ [("PrepareDlg", "Not Privileged or Windows9x or Installed", 140),
+ ("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141),
+ # In the user interface, assume all-users installation if privileged.
+ ("SelectFeaturesDlg", "Not Installed", 1230),
+ # XXX no support for resume installations yet
+ #("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240),
+ ("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250),
+ ("ProgressDlg", None, 1280)])
+
+ add_data(db, 'ActionText', text.ActionText)
+ add_data(db, 'UIText', text.UIText)
+ #####################################################################
+ # Standard dialogs: FatalError, UserExit, ExitDialog
+ fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title,
+ "Finish", "Finish", "Finish")
+ fatal.title("[ProductName] Installer ended prematurely")
+ fatal.back("< Back", "Finish", active = 0)
+ fatal.cancel("Cancel", "Back", active = 0)
+ fatal.text("Description1", 15, 70, 320, 80, 0x30003,
+ "[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.")
+ fatal.text("Description2", 15, 155, 320, 20, 0x30003,
+ "Click the Finish button to exit the Installer.")
+ c=fatal.next("Finish", "Cancel", name="Finish")
+ c.event("EndDialog", "Exit")
+
+ user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title,
+ "Finish", "Finish", "Finish")
+ user_exit.title("[ProductName] Installer was interrupted")
+ user_exit.back("< Back", "Finish", active = 0)
+ user_exit.cancel("Cancel", "Back", active = 0)
+ user_exit.text("Description1", 15, 70, 320, 80, 0x30003,
+ "[ProductName] setup was interrupted. Your system has not been modified. "
+ "To install this program at a later time, please run the installation again.")
+ user_exit.text("Description2", 15, 155, 320, 20, 0x30003,
+ "Click the Finish button to exit the Installer.")
+ c = user_exit.next("Finish", "Cancel", name="Finish")
+ c.event("EndDialog", "Exit")
+
+ exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title,
+ "Finish", "Finish", "Finish")
+ exit_dialog.title("Completing the [ProductName] Installer")
+ exit_dialog.back("< Back", "Finish", active = 0)
+ exit_dialog.cancel("Cancel", "Back", active = 0)
+ exit_dialog.text("Description", 15, 235, 320, 20, 0x30003,
+ "Click the Finish button to exit the Installer.")
+ c = exit_dialog.next("Finish", "Cancel", name="Finish")
+ c.event("EndDialog", "Return")
+
+ #####################################################################
+ # Required dialog: FilesInUse, ErrorDlg
+ inuse = PyDialog(db, "FilesInUse",
+ x, y, w, h,
+ 19, # KeepModeless|Modal|Visible
+ title,
+ "Retry", "Retry", "Retry", bitmap=False)
+ inuse.text("Title", 15, 6, 200, 15, 0x30003,
+ r"{\DlgFontBold8}Files in Use")
+ inuse.text("Description", 20, 23, 280, 20, 0x30003,
+ "Some files that need to be updated are currently in use.")
+ inuse.text("Text", 20, 55, 330, 50, 3,
+ "The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.")
+ inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess",
+ None, None, None)
+ c=inuse.back("Exit", "Ignore", name="Exit")
+ c.event("EndDialog", "Exit")
+ c=inuse.next("Ignore", "Retry", name="Ignore")
+ c.event("EndDialog", "Ignore")
+ c=inuse.cancel("Retry", "Exit", name="Retry")
+ c.event("EndDialog","Retry")
+
+ # See "Error Dialog". See "ICE20" for the required names of the controls.
+ error = Dialog(db, "ErrorDlg",
+ 50, 10, 330, 101,
+ 65543, # Error|Minimize|Modal|Visible
+ title,
+ "ErrorText", None, None)
+ error.text("ErrorText", 50,9,280,48,3, "")
+ #error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None)
+ error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo")
+ error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes")
+ error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort")
+ error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel")
+ error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore")
+ error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk")
+ error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry")
+
+ #####################################################################
+ # Global "Query Cancel" dialog
+ cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title,
+ "No", "No", "No")
+ cancel.text("Text", 48, 15, 194, 30, 3,
+ "Are you sure you want to cancel [ProductName] installation?")
+ #cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None,
+ # "py.ico", None, None)
+ c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No")
+ c.event("EndDialog", "Exit")
+
+ c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes")
+ c.event("EndDialog", "Return")
+
+ #####################################################################
+ # Global "Wait for costing" dialog
+ costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title,
+ "Return", "Return", "Return")
+ costing.text("Text", 48, 15, 194, 30, 3,
+ "Please wait while the installer finishes determining your disk space requirements.")
+ c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None)
+ c.event("EndDialog", "Exit")
+
+ #####################################################################
+ # Preparation dialog: no user input except cancellation
+ prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title,
+ "Cancel", "Cancel", "Cancel")
+ prep.text("Description", 15, 70, 320, 40, 0x30003,
+ "Please wait while the Installer prepares to guide you through the installation.")
+ prep.title("Welcome to the [ProductName] Installer")
+ c=prep.text("ActionText", 15, 110, 320, 20, 0x30003, "Pondering...")
+ c.mapping("ActionText", "Text")
+ c=prep.text("ActionData", 15, 135, 320, 30, 0x30003, None)
+ c.mapping("ActionData", "Text")
+ prep.back("Back", None, active=0)
+ prep.next("Next", None, active=0)
+ c=prep.cancel("Cancel", None)
+ c.event("SpawnDialog", "CancelDlg")
+
+ #####################################################################
+ # Feature (Python directory) selection
+ seldlg = PyDialog(db, "SelectFeaturesDlg", x, y, w, h, modal, title,
+ "Next", "Next", "Cancel")
+ seldlg.title("Select Python Installations")
+
+ seldlg.text("Hint", 15, 30, 300, 20, 3,
+ "Select the Python locations where %s should be installed."
+ % self.distribution.get_fullname())
+
+ seldlg.back("< Back", None, active=0)
+ c = seldlg.next("Next >", "Cancel")
+ order = 1
+ c.event("[TARGETDIR]", "[SourceDir]", ordering=order)
+ for version in self.versions + [self.other_version]:
+ order += 1
+ c.event("[TARGETDIR]", "[TARGETDIR%s]" % version,
+ "FEATURE_SELECTED AND &Python%s=3" % version,
+ ordering=order)
+ c.event("SpawnWaitDialog", "WaitForCostingDlg", ordering=order + 1)
+ c.event("EndDialog", "Return", ordering=order + 2)
+ c = seldlg.cancel("Cancel", "Features")
+ c.event("SpawnDialog", "CancelDlg")
+
+ c = seldlg.control("Features", "SelectionTree", 15, 60, 300, 120, 3,
+ "FEATURE", None, "PathEdit", None)
+ c.event("[FEATURE_SELECTED]", "1")
+ ver = self.other_version
+ install_other_cond = "FEATURE_SELECTED AND &Python%s=3" % ver
+ dont_install_other_cond = "FEATURE_SELECTED AND &Python%s<>3" % ver
+
+ c = seldlg.text("Other", 15, 200, 300, 15, 3,
+ "Provide an alternate Python location")
+ c.condition("Enable", install_other_cond)
+ c.condition("Show", install_other_cond)
+ c.condition("Disable", dont_install_other_cond)
+ c.condition("Hide", dont_install_other_cond)
+
+ c = seldlg.control("PathEdit", "PathEdit", 15, 215, 300, 16, 1,
+ "TARGETDIR" + ver, None, "Next", None)
+ c.condition("Enable", install_other_cond)
+ c.condition("Show", install_other_cond)
+ c.condition("Disable", dont_install_other_cond)
+ c.condition("Hide", dont_install_other_cond)
+
+ #####################################################################
+ # Disk cost
+ cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title,
+ "OK", "OK", "OK", bitmap=False)
+ cost.text("Title", 15, 6, 200, 15, 0x30003,
+ r"{\DlgFontBold8}Disk Space Requirements")
+ cost.text("Description", 20, 20, 280, 20, 0x30003,
+ "The disk space required for the installation of the selected features.")
+ cost.text("Text", 20, 53, 330, 60, 3,
+ "The highlighted volumes (if any) do not have enough disk space "
+ "available for the currently selected features. You can either "
+ "remove some files from the highlighted volumes, or choose to "
+ "install less features onto local drive(s), or select different "
+ "destination drive(s).")
+ cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223,
+ None, "{120}{70}{70}{70}{70}", None, None)
+ cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return")
+
+ #####################################################################
+ # WhichUsers Dialog. Only available on NT, and for privileged users.
+ # This must be run before FindRelatedProducts, because that will
+ # take into account whether the previous installation was per-user
+ # or per-machine. We currently don't support going back to this
+ # dialog after "Next" was selected; to support this, we would need to
+ # find how to reset the ALLUSERS property, and how to re-run
+ # FindRelatedProducts.
+ # On Windows9x, the ALLUSERS property is ignored on the command line
+ # and in the Property table, but installer fails according to the documentation
+ # if a dialog attempts to set ALLUSERS.
+ whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title,
+ "AdminInstall", "Next", "Cancel")
+ whichusers.title("Select whether to install [ProductName] for all users of this computer.")
+ # A radio group with two options: allusers, justme
+ g = whichusers.radiogroup("AdminInstall", 15, 60, 260, 50, 3,
+ "WhichUsers", "", "Next")
+ g.add("ALL", 0, 5, 150, 20, "Install for all users")
+ g.add("JUSTME", 0, 25, 150, 20, "Install just for me")
+
+ whichusers.back("Back", None, active=0)
+
+ c = whichusers.next("Next >", "Cancel")
+ c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1)
+ c.event("EndDialog", "Return", ordering = 2)
+
+ c = whichusers.cancel("Cancel", "AdminInstall")
+ c.event("SpawnDialog", "CancelDlg")
+
+ #####################################################################
+ # Installation Progress dialog (modeless)
+ progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title,
+ "Cancel", "Cancel", "Cancel", bitmap=False)
+ progress.text("Title", 20, 15, 200, 15, 0x30003,
+ r"{\DlgFontBold8}[Progress1] [ProductName]")
+ progress.text("Text", 35, 65, 300, 30, 3,
+ "Please wait while the Installer [Progress2] [ProductName]. "
+ "This may take several minutes.")
+ progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:")
+
+ c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...")
+ c.mapping("ActionText", "Text")
+
+ #c=progress.text("ActionData", 35, 140, 300, 20, 3, None)
+ #c.mapping("ActionData", "Text")
+
+ c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537,
+ None, "Progress done", None, None)
+ c.mapping("SetProgress", "Progress")
+
+ progress.back("< Back", "Next", active=False)
+ progress.next("Next >", "Cancel", active=False)
+ progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg")
+
+ ###################################################################
+ # Maintenance type: repair/uninstall
+ maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title,
+ "Next", "Next", "Cancel")
+ maint.title("Welcome to the [ProductName] Setup Wizard")
+ maint.text("BodyText", 15, 63, 330, 42, 3,
+ "Select whether you want to repair or remove [ProductName].")
+ g=maint.radiogroup("RepairRadioGroup", 15, 108, 330, 60, 3,
+ "MaintenanceForm_Action", "", "Next")
+ #g.add("Change", 0, 0, 200, 17, "&Change [ProductName]")
+ g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]")
+ g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]")
+
+ maint.back("< Back", None, active=False)
+ c=maint.next("Finish", "Cancel")
+ # Change installation: Change progress dialog to "Change", then ask
+ # for feature selection
+ #c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1)
+ #c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2)
+
+ # Reinstall: Change progress dialog to "Repair", then invoke reinstall
+ # Also set list of reinstalled features to "ALL"
+ c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5)
+ c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6)
+ c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7)
+ c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8)
+
+ # Uninstall: Change progress to "Remove", then invoke uninstall
+ # Also set list of removed features to "ALL"
+ c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11)
+ c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12)
+ c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13)
+ c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14)
+
+ # Close dialog when maintenance action scheduled
+ c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20)
+ #c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21)
+
+ maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg")
+
+ def get_installer_filename(self, fullname):
+ # Factored out to allow overriding in subclasses
+ if self.target_version:
+ base_name = "%s.%s-py%s.msi" % (fullname, self.plat_name,
+ self.target_version)
+ else:
+ base_name = "%s.%s.msi" % (fullname, self.plat_name)
+ installer_name = os.path.join(self.dist_dir, base_name)
+ return installer_name
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/bdist_rpm.py b/third_party/python/setuptools/setuptools/_distutils/command/bdist_rpm.py
new file mode 100644
index 0000000000..550cbfa1e2
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/bdist_rpm.py
@@ -0,0 +1,579 @@
+"""distutils.command.bdist_rpm
+
+Implements the Distutils 'bdist_rpm' command (create RPM source and binary
+distributions)."""
+
+import subprocess, sys, os
+from distutils.core import Command
+from distutils.debug import DEBUG
+from distutils.file_util import write_file
+from distutils.errors import *
+from distutils.sysconfig import get_python_version
+from distutils import log
+
+class bdist_rpm(Command):
+
+ description = "create an RPM distribution"
+
+ user_options = [
+ ('bdist-base=', None,
+ "base directory for creating built distributions"),
+ ('rpm-base=', None,
+ "base directory for creating RPMs (defaults to \"rpm\" under "
+ "--bdist-base; must be specified for RPM 2)"),
+ ('dist-dir=', 'd',
+ "directory to put final RPM files in "
+ "(and .spec files if --spec-only)"),
+ ('python=', None,
+ "path to Python interpreter to hard-code in the .spec file "
+ "(default: \"python\")"),
+ ('fix-python', None,
+ "hard-code the exact path to the current Python interpreter in "
+ "the .spec file"),
+ ('spec-only', None,
+ "only regenerate spec file"),
+ ('source-only', None,
+ "only generate source RPM"),
+ ('binary-only', None,
+ "only generate binary RPM"),
+ ('use-bzip2', None,
+ "use bzip2 instead of gzip to create source distribution"),
+
+ # More meta-data: too RPM-specific to put in the setup script,
+ # but needs to go in the .spec file -- so we make these options
+ # to "bdist_rpm". The idea is that packagers would put this
+ # info in setup.cfg, although they are of course free to
+ # supply it on the command line.
+ ('distribution-name=', None,
+ "name of the (Linux) distribution to which this "
+ "RPM applies (*not* the name of the module distribution!)"),
+ ('group=', None,
+ "package classification [default: \"Development/Libraries\"]"),
+ ('release=', None,
+ "RPM release number"),
+ ('serial=', None,
+ "RPM serial number"),
+ ('vendor=', None,
+ "RPM \"vendor\" (eg. \"Joe Blow <joe@example.com>\") "
+ "[default: maintainer or author from setup script]"),
+ ('packager=', None,
+ "RPM packager (eg. \"Jane Doe <jane@example.net>\") "
+ "[default: vendor]"),
+ ('doc-files=', None,
+ "list of documentation files (space or comma-separated)"),
+ ('changelog=', None,
+ "RPM changelog"),
+ ('icon=', None,
+ "name of icon file"),
+ ('provides=', None,
+ "capabilities provided by this package"),
+ ('requires=', None,
+ "capabilities required by this package"),
+ ('conflicts=', None,
+ "capabilities which conflict with this package"),
+ ('build-requires=', None,
+ "capabilities required to build this package"),
+ ('obsoletes=', None,
+ "capabilities made obsolete by this package"),
+ ('no-autoreq', None,
+ "do not automatically calculate dependencies"),
+
+ # Actions to take when building RPM
+ ('keep-temp', 'k',
+ "don't clean up RPM build directory"),
+ ('no-keep-temp', None,
+ "clean up RPM build directory [default]"),
+ ('use-rpm-opt-flags', None,
+ "compile with RPM_OPT_FLAGS when building from source RPM"),
+ ('no-rpm-opt-flags', None,
+ "do not pass any RPM CFLAGS to compiler"),
+ ('rpm3-mode', None,
+ "RPM 3 compatibility mode (default)"),
+ ('rpm2-mode', None,
+ "RPM 2 compatibility mode"),
+
+ # Add the hooks necessary for specifying custom scripts
+ ('prep-script=', None,
+ "Specify a script for the PREP phase of RPM building"),
+ ('build-script=', None,
+ "Specify a script for the BUILD phase of RPM building"),
+
+ ('pre-install=', None,
+ "Specify a script for the pre-INSTALL phase of RPM building"),
+ ('install-script=', None,
+ "Specify a script for the INSTALL phase of RPM building"),
+ ('post-install=', None,
+ "Specify a script for the post-INSTALL phase of RPM building"),
+
+ ('pre-uninstall=', None,
+ "Specify a script for the pre-UNINSTALL phase of RPM building"),
+ ('post-uninstall=', None,
+ "Specify a script for the post-UNINSTALL phase of RPM building"),
+
+ ('clean-script=', None,
+ "Specify a script for the CLEAN phase of RPM building"),
+
+ ('verify-script=', None,
+ "Specify a script for the VERIFY phase of the RPM build"),
+
+ # Allow a packager to explicitly force an architecture
+ ('force-arch=', None,
+ "Force an architecture onto the RPM build process"),
+
+ ('quiet', 'q',
+ "Run the INSTALL phase of RPM building in quiet mode"),
+ ]
+
+ boolean_options = ['keep-temp', 'use-rpm-opt-flags', 'rpm3-mode',
+ 'no-autoreq', 'quiet']
+
+ negative_opt = {'no-keep-temp': 'keep-temp',
+ 'no-rpm-opt-flags': 'use-rpm-opt-flags',
+ 'rpm2-mode': 'rpm3-mode'}
+
+
+ def initialize_options(self):
+ self.bdist_base = None
+ self.rpm_base = None
+ self.dist_dir = None
+ self.python = None
+ self.fix_python = None
+ self.spec_only = None
+ self.binary_only = None
+ self.source_only = None
+ self.use_bzip2 = None
+
+ self.distribution_name = None
+ self.group = None
+ self.release = None
+ self.serial = None
+ self.vendor = None
+ self.packager = None
+ self.doc_files = None
+ self.changelog = None
+ self.icon = None
+
+ self.prep_script = None
+ self.build_script = None
+ self.install_script = None
+ self.clean_script = None
+ self.verify_script = None
+ self.pre_install = None
+ self.post_install = None
+ self.pre_uninstall = None
+ self.post_uninstall = None
+ self.prep = None
+ self.provides = None
+ self.requires = None
+ self.conflicts = None
+ self.build_requires = None
+ self.obsoletes = None
+
+ self.keep_temp = 0
+ self.use_rpm_opt_flags = 1
+ self.rpm3_mode = 1
+ self.no_autoreq = 0
+
+ self.force_arch = None
+ self.quiet = 0
+
+ def finalize_options(self):
+ self.set_undefined_options('bdist', ('bdist_base', 'bdist_base'))
+ if self.rpm_base is None:
+ if not self.rpm3_mode:
+ raise DistutilsOptionError(
+ "you must specify --rpm-base in RPM 2 mode")
+ self.rpm_base = os.path.join(self.bdist_base, "rpm")
+
+ if self.python is None:
+ if self.fix_python:
+ self.python = sys.executable
+ else:
+ self.python = "python3"
+ elif self.fix_python:
+ raise DistutilsOptionError(
+ "--python and --fix-python are mutually exclusive options")
+
+ if os.name != 'posix':
+ raise DistutilsPlatformError("don't know how to create RPM "
+ "distributions on platform %s" % os.name)
+ if self.binary_only and self.source_only:
+ raise DistutilsOptionError(
+ "cannot supply both '--source-only' and '--binary-only'")
+
+ # don't pass CFLAGS to pure python distributions
+ if not self.distribution.has_ext_modules():
+ self.use_rpm_opt_flags = 0
+
+ self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
+ self.finalize_package_data()
+
+ def finalize_package_data(self):
+ self.ensure_string('group', "Development/Libraries")
+ self.ensure_string('vendor',
+ "%s <%s>" % (self.distribution.get_contact(),
+ self.distribution.get_contact_email()))
+ self.ensure_string('packager')
+ self.ensure_string_list('doc_files')
+ if isinstance(self.doc_files, list):
+ for readme in ('README', 'README.txt'):
+ if os.path.exists(readme) and readme not in self.doc_files:
+ self.doc_files.append(readme)
+
+ self.ensure_string('release', "1")
+ self.ensure_string('serial') # should it be an int?
+
+ self.ensure_string('distribution_name')
+
+ self.ensure_string('changelog')
+ # Format changelog correctly
+ self.changelog = self._format_changelog(self.changelog)
+
+ self.ensure_filename('icon')
+
+ self.ensure_filename('prep_script')
+ self.ensure_filename('build_script')
+ self.ensure_filename('install_script')
+ self.ensure_filename('clean_script')
+ self.ensure_filename('verify_script')
+ self.ensure_filename('pre_install')
+ self.ensure_filename('post_install')
+ self.ensure_filename('pre_uninstall')
+ self.ensure_filename('post_uninstall')
+
+ # XXX don't forget we punted on summaries and descriptions -- they
+ # should be handled here eventually!
+
+ # Now *this* is some meta-data that belongs in the setup script...
+ self.ensure_string_list('provides')
+ self.ensure_string_list('requires')
+ self.ensure_string_list('conflicts')
+ self.ensure_string_list('build_requires')
+ self.ensure_string_list('obsoletes')
+
+ self.ensure_string('force_arch')
+
+ def run(self):
+ if DEBUG:
+ print("before _get_package_data():")
+ print("vendor =", self.vendor)
+ print("packager =", self.packager)
+ print("doc_files =", self.doc_files)
+ print("changelog =", self.changelog)
+
+ # make directories
+ if self.spec_only:
+ spec_dir = self.dist_dir
+ self.mkpath(spec_dir)
+ else:
+ rpm_dir = {}
+ for d in ('SOURCES', 'SPECS', 'BUILD', 'RPMS', 'SRPMS'):
+ rpm_dir[d] = os.path.join(self.rpm_base, d)
+ self.mkpath(rpm_dir[d])
+ spec_dir = rpm_dir['SPECS']
+
+ # Spec file goes into 'dist_dir' if '--spec-only specified',
+ # build/rpm.<plat> otherwise.
+ spec_path = os.path.join(spec_dir,
+ "%s.spec" % self.distribution.get_name())
+ self.execute(write_file,
+ (spec_path,
+ self._make_spec_file()),
+ "writing '%s'" % spec_path)
+
+ if self.spec_only: # stop if requested
+ return
+
+ # Make a source distribution and copy to SOURCES directory with
+ # optional icon.
+ saved_dist_files = self.distribution.dist_files[:]
+ sdist = self.reinitialize_command('sdist')
+ if self.use_bzip2:
+ sdist.formats = ['bztar']
+ else:
+ sdist.formats = ['gztar']
+ self.run_command('sdist')
+ self.distribution.dist_files = saved_dist_files
+
+ source = sdist.get_archive_files()[0]
+ source_dir = rpm_dir['SOURCES']
+ self.copy_file(source, source_dir)
+
+ if self.icon:
+ if os.path.exists(self.icon):
+ self.copy_file(self.icon, source_dir)
+ else:
+ raise DistutilsFileError(
+ "icon file '%s' does not exist" % self.icon)
+
+ # build package
+ log.info("building RPMs")
+ rpm_cmd = ['rpmbuild']
+
+ if self.source_only: # what kind of RPMs?
+ rpm_cmd.append('-bs')
+ elif self.binary_only:
+ rpm_cmd.append('-bb')
+ else:
+ rpm_cmd.append('-ba')
+ rpm_cmd.extend(['--define', '__python %s' % self.python])
+ if self.rpm3_mode:
+ rpm_cmd.extend(['--define',
+ '_topdir %s' % os.path.abspath(self.rpm_base)])
+ if not self.keep_temp:
+ rpm_cmd.append('--clean')
+
+ if self.quiet:
+ rpm_cmd.append('--quiet')
+
+ rpm_cmd.append(spec_path)
+ # Determine the binary rpm names that should be built out of this spec
+ # file
+ # Note that some of these may not be really built (if the file
+ # list is empty)
+ nvr_string = "%{name}-%{version}-%{release}"
+ src_rpm = nvr_string + ".src.rpm"
+ non_src_rpm = "%{arch}/" + nvr_string + ".%{arch}.rpm"
+ q_cmd = r"rpm -q --qf '%s %s\n' --specfile '%s'" % (
+ src_rpm, non_src_rpm, spec_path)
+
+ out = os.popen(q_cmd)
+ try:
+ binary_rpms = []
+ source_rpm = None
+ while True:
+ line = out.readline()
+ if not line:
+ break
+ l = line.strip().split()
+ assert(len(l) == 2)
+ binary_rpms.append(l[1])
+ # The source rpm is named after the first entry in the spec file
+ if source_rpm is None:
+ source_rpm = l[0]
+
+ status = out.close()
+ if status:
+ raise DistutilsExecError("Failed to execute: %s" % repr(q_cmd))
+
+ finally:
+ out.close()
+
+ self.spawn(rpm_cmd)
+
+ if not self.dry_run:
+ if self.distribution.has_ext_modules():
+ pyversion = get_python_version()
+ else:
+ pyversion = 'any'
+
+ if not self.binary_only:
+ srpm = os.path.join(rpm_dir['SRPMS'], source_rpm)
+ assert(os.path.exists(srpm))
+ self.move_file(srpm, self.dist_dir)
+ filename = os.path.join(self.dist_dir, source_rpm)
+ self.distribution.dist_files.append(
+ ('bdist_rpm', pyversion, filename))
+
+ if not self.source_only:
+ for rpm in binary_rpms:
+ rpm = os.path.join(rpm_dir['RPMS'], rpm)
+ if os.path.exists(rpm):
+ self.move_file(rpm, self.dist_dir)
+ filename = os.path.join(self.dist_dir,
+ os.path.basename(rpm))
+ self.distribution.dist_files.append(
+ ('bdist_rpm', pyversion, filename))
+
+ def _dist_path(self, path):
+ return os.path.join(self.dist_dir, os.path.basename(path))
+
+ def _make_spec_file(self):
+ """Generate the text of an RPM spec file and return it as a
+ list of strings (one per line).
+ """
+ # definitions and headers
+ spec_file = [
+ '%define name ' + self.distribution.get_name(),
+ '%define version ' + self.distribution.get_version().replace('-','_'),
+ '%define unmangled_version ' + self.distribution.get_version(),
+ '%define release ' + self.release.replace('-','_'),
+ '',
+ 'Summary: ' + self.distribution.get_description(),
+ ]
+
+ # Workaround for #14443 which affects some RPM based systems such as
+ # RHEL6 (and probably derivatives)
+ vendor_hook = subprocess.getoutput('rpm --eval %{__os_install_post}')
+ # Generate a potential replacement value for __os_install_post (whilst
+ # normalizing the whitespace to simplify the test for whether the
+ # invocation of brp-python-bytecompile passes in __python):
+ vendor_hook = '\n'.join([' %s \\' % line.strip()
+ for line in vendor_hook.splitlines()])
+ problem = "brp-python-bytecompile \\\n"
+ fixed = "brp-python-bytecompile %{__python} \\\n"
+ fixed_hook = vendor_hook.replace(problem, fixed)
+ if fixed_hook != vendor_hook:
+ spec_file.append('# Workaround for http://bugs.python.org/issue14443')
+ spec_file.append('%define __os_install_post ' + fixed_hook + '\n')
+
+ # put locale summaries into spec file
+ # XXX not supported for now (hard to put a dictionary
+ # in a config file -- arg!)
+ #for locale in self.summaries.keys():
+ # spec_file.append('Summary(%s): %s' % (locale,
+ # self.summaries[locale]))
+
+ spec_file.extend([
+ 'Name: %{name}',
+ 'Version: %{version}',
+ 'Release: %{release}',])
+
+ # XXX yuck! this filename is available from the "sdist" command,
+ # but only after it has run: and we create the spec file before
+ # running "sdist", in case of --spec-only.
+ if self.use_bzip2:
+ spec_file.append('Source0: %{name}-%{unmangled_version}.tar.bz2')
+ else:
+ spec_file.append('Source0: %{name}-%{unmangled_version}.tar.gz')
+
+ spec_file.extend([
+ 'License: ' + self.distribution.get_license(),
+ 'Group: ' + self.group,
+ 'BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot',
+ 'Prefix: %{_prefix}', ])
+
+ if not self.force_arch:
+ # noarch if no extension modules
+ if not self.distribution.has_ext_modules():
+ spec_file.append('BuildArch: noarch')
+ else:
+ spec_file.append( 'BuildArch: %s' % self.force_arch )
+
+ for field in ('Vendor',
+ 'Packager',
+ 'Provides',
+ 'Requires',
+ 'Conflicts',
+ 'Obsoletes',
+ ):
+ val = getattr(self, field.lower())
+ if isinstance(val, list):
+ spec_file.append('%s: %s' % (field, ' '.join(val)))
+ elif val is not None:
+ spec_file.append('%s: %s' % (field, val))
+
+
+ if self.distribution.get_url() != 'UNKNOWN':
+ spec_file.append('Url: ' + self.distribution.get_url())
+
+ if self.distribution_name:
+ spec_file.append('Distribution: ' + self.distribution_name)
+
+ if self.build_requires:
+ spec_file.append('BuildRequires: ' +
+ ' '.join(self.build_requires))
+
+ if self.icon:
+ spec_file.append('Icon: ' + os.path.basename(self.icon))
+
+ if self.no_autoreq:
+ spec_file.append('AutoReq: 0')
+
+ spec_file.extend([
+ '',
+ '%description',
+ self.distribution.get_long_description()
+ ])
+
+ # put locale descriptions into spec file
+ # XXX again, suppressed because config file syntax doesn't
+ # easily support this ;-(
+ #for locale in self.descriptions.keys():
+ # spec_file.extend([
+ # '',
+ # '%description -l ' + locale,
+ # self.descriptions[locale],
+ # ])
+
+ # rpm scripts
+ # figure out default build script
+ def_setup_call = "%s %s" % (self.python,os.path.basename(sys.argv[0]))
+ def_build = "%s build" % def_setup_call
+ if self.use_rpm_opt_flags:
+ def_build = 'env CFLAGS="$RPM_OPT_FLAGS" ' + def_build
+
+ # insert contents of files
+
+ # XXX this is kind of misleading: user-supplied options are files
+ # that we open and interpolate into the spec file, but the defaults
+ # are just text that we drop in as-is. Hmmm.
+
+ install_cmd = ('%s install -O1 --root=$RPM_BUILD_ROOT '
+ '--record=INSTALLED_FILES') % def_setup_call
+
+ script_options = [
+ ('prep', 'prep_script', "%setup -n %{name}-%{unmangled_version}"),
+ ('build', 'build_script', def_build),
+ ('install', 'install_script', install_cmd),
+ ('clean', 'clean_script', "rm -rf $RPM_BUILD_ROOT"),
+ ('verifyscript', 'verify_script', None),
+ ('pre', 'pre_install', None),
+ ('post', 'post_install', None),
+ ('preun', 'pre_uninstall', None),
+ ('postun', 'post_uninstall', None),
+ ]
+
+ for (rpm_opt, attr, default) in script_options:
+ # Insert contents of file referred to, if no file is referred to
+ # use 'default' as contents of script
+ val = getattr(self, attr)
+ if val or default:
+ spec_file.extend([
+ '',
+ '%' + rpm_opt,])
+ if val:
+ with open(val) as f:
+ spec_file.extend(f.read().split('\n'))
+ else:
+ spec_file.append(default)
+
+
+ # files section
+ spec_file.extend([
+ '',
+ '%files -f INSTALLED_FILES',
+ '%defattr(-,root,root)',
+ ])
+
+ if self.doc_files:
+ spec_file.append('%doc ' + ' '.join(self.doc_files))
+
+ if self.changelog:
+ spec_file.extend([
+ '',
+ '%changelog',])
+ spec_file.extend(self.changelog)
+
+ return spec_file
+
+ def _format_changelog(self, changelog):
+ """Format the changelog correctly and convert it to a list of strings
+ """
+ if not changelog:
+ return changelog
+ new_changelog = []
+ for line in changelog.strip().split('\n'):
+ line = line.strip()
+ if line[0] == '*':
+ new_changelog.extend(['', line])
+ elif line[0] == '-':
+ new_changelog.append(line)
+ else:
+ new_changelog.append(' ' + line)
+
+ # strip trailing newline inserted by first changelog entry
+ if not new_changelog[0]:
+ del new_changelog[0]
+
+ return new_changelog
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/bdist_wininst.py b/third_party/python/setuptools/setuptools/_distutils/command/bdist_wininst.py
new file mode 100644
index 0000000000..0e9ddaa214
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/bdist_wininst.py
@@ -0,0 +1,377 @@
+"""distutils.command.bdist_wininst
+
+Implements the Distutils 'bdist_wininst' command: create a windows installer
+exe-program."""
+
+import os
+import sys
+import warnings
+from distutils.core import Command
+from distutils.util import get_platform
+from distutils.dir_util import remove_tree
+from distutils.errors import *
+from distutils.sysconfig import get_python_version
+from distutils import log
+
+class bdist_wininst(Command):
+
+ description = "create an executable installer for MS Windows"
+
+ user_options = [('bdist-dir=', None,
+ "temporary directory for creating the distribution"),
+ ('plat-name=', 'p',
+ "platform name to embed in generated filenames "
+ "(default: %s)" % get_platform()),
+ ('keep-temp', 'k',
+ "keep the pseudo-installation tree around after " +
+ "creating the distribution archive"),
+ ('target-version=', None,
+ "require a specific python version" +
+ " on the target system"),
+ ('no-target-compile', 'c',
+ "do not compile .py to .pyc on the target system"),
+ ('no-target-optimize', 'o',
+ "do not compile .py to .pyo (optimized) "
+ "on the target system"),
+ ('dist-dir=', 'd',
+ "directory to put final built distributions in"),
+ ('bitmap=', 'b',
+ "bitmap to use for the installer instead of python-powered logo"),
+ ('title=', 't',
+ "title to display on the installer background instead of default"),
+ ('skip-build', None,
+ "skip rebuilding everything (for testing/debugging)"),
+ ('install-script=', None,
+ "basename of installation script to be run after "
+ "installation or before deinstallation"),
+ ('pre-install-script=', None,
+ "Fully qualified filename of a script to be run before "
+ "any files are installed. This script need not be in the "
+ "distribution"),
+ ('user-access-control=', None,
+ "specify Vista's UAC handling - 'none'/default=no "
+ "handling, 'auto'=use UAC if target Python installed for "
+ "all users, 'force'=always use UAC"),
+ ]
+
+ boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize',
+ 'skip-build']
+
+ # bpo-10945: bdist_wininst requires mbcs encoding only available on Windows
+ _unsupported = (sys.platform != "win32")
+
+ def __init__(self, *args, **kw):
+ super().__init__(*args, **kw)
+ warnings.warn("bdist_wininst command is deprecated since Python 3.8, "
+ "use bdist_wheel (wheel packages) instead",
+ DeprecationWarning, 2)
+
+ def initialize_options(self):
+ self.bdist_dir = None
+ self.plat_name = None
+ self.keep_temp = 0
+ self.no_target_compile = 0
+ self.no_target_optimize = 0
+ self.target_version = None
+ self.dist_dir = None
+ self.bitmap = None
+ self.title = None
+ self.skip_build = None
+ self.install_script = None
+ self.pre_install_script = None
+ self.user_access_control = None
+
+
+ def finalize_options(self):
+ self.set_undefined_options('bdist', ('skip_build', 'skip_build'))
+
+ if self.bdist_dir is None:
+ if self.skip_build and self.plat_name:
+ # If build is skipped and plat_name is overridden, bdist will
+ # not see the correct 'plat_name' - so set that up manually.
+ bdist = self.distribution.get_command_obj('bdist')
+ bdist.plat_name = self.plat_name
+ # next the command will be initialized using that name
+ bdist_base = self.get_finalized_command('bdist').bdist_base
+ self.bdist_dir = os.path.join(bdist_base, 'wininst')
+
+ if not self.target_version:
+ self.target_version = ""
+
+ if not self.skip_build and self.distribution.has_ext_modules():
+ short_version = get_python_version()
+ if self.target_version and self.target_version != short_version:
+ raise DistutilsOptionError(
+ "target version can only be %s, or the '--skip-build'" \
+ " option must be specified" % (short_version,))
+ self.target_version = short_version
+
+ self.set_undefined_options('bdist',
+ ('dist_dir', 'dist_dir'),
+ ('plat_name', 'plat_name'),
+ )
+
+ if self.install_script:
+ for script in self.distribution.scripts:
+ if self.install_script == os.path.basename(script):
+ break
+ else:
+ raise DistutilsOptionError(
+ "install_script '%s' not found in scripts"
+ % self.install_script)
+
+ def run(self):
+ if (sys.platform != "win32" and
+ (self.distribution.has_ext_modules() or
+ self.distribution.has_c_libraries())):
+ raise DistutilsPlatformError \
+ ("distribution contains extensions and/or C libraries; "
+ "must be compiled on a Windows 32 platform")
+
+ if not self.skip_build:
+ self.run_command('build')
+
+ install = self.reinitialize_command('install', reinit_subcommands=1)
+ install.root = self.bdist_dir
+ install.skip_build = self.skip_build
+ install.warn_dir = 0
+ install.plat_name = self.plat_name
+
+ install_lib = self.reinitialize_command('install_lib')
+ # we do not want to include pyc or pyo files
+ install_lib.compile = 0
+ install_lib.optimize = 0
+
+ if self.distribution.has_ext_modules():
+ # If we are building an installer for a Python version other
+ # than the one we are currently running, then we need to ensure
+ # our build_lib reflects the other Python version rather than ours.
+ # Note that for target_version!=sys.version, we must have skipped the
+ # build step, so there is no issue with enforcing the build of this
+ # version.
+ target_version = self.target_version
+ if not target_version:
+ assert self.skip_build, "Should have already checked this"
+ target_version = '%d.%d' % sys.version_info[:2]
+ plat_specifier = ".%s-%s" % (self.plat_name, target_version)
+ build = self.get_finalized_command('build')
+ build.build_lib = os.path.join(build.build_base,
+ 'lib' + plat_specifier)
+
+ # Use a custom scheme for the zip-file, because we have to decide
+ # at installation time which scheme to use.
+ for key in ('purelib', 'platlib', 'headers', 'scripts', 'data'):
+ value = key.upper()
+ if key == 'headers':
+ value = value + '/Include/$dist_name'
+ setattr(install,
+ 'install_' + key,
+ value)
+
+ log.info("installing to %s", self.bdist_dir)
+ install.ensure_finalized()
+
+ # avoid warning of 'install_lib' about installing
+ # into a directory not in sys.path
+ sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB'))
+
+ install.run()
+
+ del sys.path[0]
+
+ # And make an archive relative to the root of the
+ # pseudo-installation tree.
+ from tempfile import mktemp
+ archive_basename = mktemp()
+ fullname = self.distribution.get_fullname()
+ arcname = self.make_archive(archive_basename, "zip",
+ root_dir=self.bdist_dir)
+ # create an exe containing the zip-file
+ self.create_exe(arcname, fullname, self.bitmap)
+ if self.distribution.has_ext_modules():
+ pyversion = get_python_version()
+ else:
+ pyversion = 'any'
+ self.distribution.dist_files.append(('bdist_wininst', pyversion,
+ self.get_installer_filename(fullname)))
+ # remove the zip-file again
+ log.debug("removing temporary file '%s'", arcname)
+ os.remove(arcname)
+
+ if not self.keep_temp:
+ remove_tree(self.bdist_dir, dry_run=self.dry_run)
+
+ def get_inidata(self):
+ # Return data describing the installation.
+ lines = []
+ metadata = self.distribution.metadata
+
+ # Write the [metadata] section.
+ lines.append("[metadata]")
+
+ # 'info' will be displayed in the installer's dialog box,
+ # describing the items to be installed.
+ info = (metadata.long_description or '') + '\n'
+
+ # Escape newline characters
+ def escape(s):
+ return s.replace("\n", "\\n")
+
+ for name in ["author", "author_email", "description", "maintainer",
+ "maintainer_email", "name", "url", "version"]:
+ data = getattr(metadata, name, "")
+ if data:
+ info = info + ("\n %s: %s" % \
+ (name.capitalize(), escape(data)))
+ lines.append("%s=%s" % (name, escape(data)))
+
+ # The [setup] section contains entries controlling
+ # the installer runtime.
+ lines.append("\n[Setup]")
+ if self.install_script:
+ lines.append("install_script=%s" % self.install_script)
+ lines.append("info=%s" % escape(info))
+ lines.append("target_compile=%d" % (not self.no_target_compile))
+ lines.append("target_optimize=%d" % (not self.no_target_optimize))
+ if self.target_version:
+ lines.append("target_version=%s" % self.target_version)
+ if self.user_access_control:
+ lines.append("user_access_control=%s" % self.user_access_control)
+
+ title = self.title or self.distribution.get_fullname()
+ lines.append("title=%s" % escape(title))
+ import time
+ import distutils
+ build_info = "Built %s with distutils-%s" % \
+ (time.ctime(time.time()), distutils.__version__)
+ lines.append("build_info=%s" % build_info)
+ return "\n".join(lines)
+
+ def create_exe(self, arcname, fullname, bitmap=None):
+ import struct
+
+ self.mkpath(self.dist_dir)
+
+ cfgdata = self.get_inidata()
+
+ installer_name = self.get_installer_filename(fullname)
+ self.announce("creating %s" % installer_name)
+
+ if bitmap:
+ with open(bitmap, "rb") as f:
+ bitmapdata = f.read()
+ bitmaplen = len(bitmapdata)
+ else:
+ bitmaplen = 0
+
+ with open(installer_name, "wb") as file:
+ file.write(self.get_exe_bytes())
+ if bitmap:
+ file.write(bitmapdata)
+
+ # Convert cfgdata from unicode to ascii, mbcs encoded
+ if isinstance(cfgdata, str):
+ cfgdata = cfgdata.encode("mbcs")
+
+ # Append the pre-install script
+ cfgdata = cfgdata + b"\0"
+ if self.pre_install_script:
+ # We need to normalize newlines, so we open in text mode and
+ # convert back to bytes. "latin-1" simply avoids any possible
+ # failures.
+ with open(self.pre_install_script, "r",
+ encoding="latin-1") as script:
+ script_data = script.read().encode("latin-1")
+ cfgdata = cfgdata + script_data + b"\n\0"
+ else:
+ # empty pre-install script
+ cfgdata = cfgdata + b"\0"
+ file.write(cfgdata)
+
+ # The 'magic number' 0x1234567B is used to make sure that the
+ # binary layout of 'cfgdata' is what the wininst.exe binary
+ # expects. If the layout changes, increment that number, make
+ # the corresponding changes to the wininst.exe sources, and
+ # recompile them.
+ header = struct.pack("<iii",
+ 0x1234567B, # tag
+ len(cfgdata), # length
+ bitmaplen, # number of bytes in bitmap
+ )
+ file.write(header)
+ with open(arcname, "rb") as f:
+ file.write(f.read())
+
+ def get_installer_filename(self, fullname):
+ # Factored out to allow overriding in subclasses
+ if self.target_version:
+ # if we create an installer for a specific python version,
+ # it's better to include this in the name
+ installer_name = os.path.join(self.dist_dir,
+ "%s.%s-py%s.exe" %
+ (fullname, self.plat_name, self.target_version))
+ else:
+ installer_name = os.path.join(self.dist_dir,
+ "%s.%s.exe" % (fullname, self.plat_name))
+ return installer_name
+
+ def get_exe_bytes(self):
+ # If a target-version other than the current version has been
+ # specified, then using the MSVC version from *this* build is no good.
+ # Without actually finding and executing the target version and parsing
+ # its sys.version, we just hard-code our knowledge of old versions.
+ # NOTE: Possible alternative is to allow "--target-version" to
+ # specify a Python executable rather than a simple version string.
+ # We can then execute this program to obtain any info we need, such
+ # as the real sys.version string for the build.
+ cur_version = get_python_version()
+
+ # If the target version is *later* than us, then we assume they
+ # use what we use
+ # string compares seem wrong, but are what sysconfig.py itself uses
+ if self.target_version and self.target_version < cur_version:
+ if self.target_version < "2.4":
+ bv = '6.0'
+ elif self.target_version == "2.4":
+ bv = '7.1'
+ elif self.target_version == "2.5":
+ bv = '8.0'
+ elif self.target_version <= "3.2":
+ bv = '9.0'
+ elif self.target_version <= "3.4":
+ bv = '10.0'
+ else:
+ bv = '14.0'
+ else:
+ # for current version - use authoritative check.
+ try:
+ from msvcrt import CRT_ASSEMBLY_VERSION
+ except ImportError:
+ # cross-building, so assume the latest version
+ bv = '14.0'
+ else:
+ # as far as we know, CRT is binary compatible based on
+ # the first field, so assume 'x.0' until proven otherwise
+ major = CRT_ASSEMBLY_VERSION.partition('.')[0]
+ bv = major + '.0'
+
+
+ # wininst-x.y.exe is in the same directory as this file
+ directory = os.path.dirname(__file__)
+ # we must use a wininst-x.y.exe built with the same C compiler
+ # used for python. XXX What about mingw, borland, and so on?
+
+ # if plat_name starts with "win" but is not "win32"
+ # we want to strip "win" and leave the rest (e.g. -amd64)
+ # for all other cases, we don't want any suffix
+ if self.plat_name != 'win32' and self.plat_name[:3] == 'win':
+ sfix = self.plat_name[3:]
+ else:
+ sfix = ''
+
+ filename = os.path.join(directory, "wininst-%s%s.exe" % (bv, sfix))
+ f = open(filename, "rb")
+ try:
+ return f.read()
+ finally:
+ f.close()
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/build.py b/third_party/python/setuptools/setuptools/_distutils/command/build.py
new file mode 100644
index 0000000000..a86df0bc7f
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/build.py
@@ -0,0 +1,157 @@
+"""distutils.command.build
+
+Implements the Distutils 'build' command."""
+
+import sys, os
+from distutils.core import Command
+from distutils.errors import DistutilsOptionError
+from distutils.util import get_platform
+
+
+def show_compilers():
+ from distutils.ccompiler import show_compilers
+ show_compilers()
+
+
+class build(Command):
+
+ description = "build everything needed to install"
+
+ user_options = [
+ ('build-base=', 'b',
+ "base directory for build library"),
+ ('build-purelib=', None,
+ "build directory for platform-neutral distributions"),
+ ('build-platlib=', None,
+ "build directory for platform-specific distributions"),
+ ('build-lib=', None,
+ "build directory for all distribution (defaults to either " +
+ "build-purelib or build-platlib"),
+ ('build-scripts=', None,
+ "build directory for scripts"),
+ ('build-temp=', 't',
+ "temporary build directory"),
+ ('plat-name=', 'p',
+ "platform name to build for, if supported "
+ "(default: %s)" % get_platform()),
+ ('compiler=', 'c',
+ "specify the compiler type"),
+ ('parallel=', 'j',
+ "number of parallel build jobs"),
+ ('debug', 'g',
+ "compile extensions and libraries with debugging information"),
+ ('force', 'f',
+ "forcibly build everything (ignore file timestamps)"),
+ ('executable=', 'e',
+ "specify final destination interpreter path (build.py)"),
+ ]
+
+ boolean_options = ['debug', 'force']
+
+ help_options = [
+ ('help-compiler', None,
+ "list available compilers", show_compilers),
+ ]
+
+ def initialize_options(self):
+ self.build_base = 'build'
+ # these are decided only after 'build_base' has its final value
+ # (unless overridden by the user or client)
+ self.build_purelib = None
+ self.build_platlib = None
+ self.build_lib = None
+ self.build_temp = None
+ self.build_scripts = None
+ self.compiler = None
+ self.plat_name = None
+ self.debug = None
+ self.force = 0
+ self.executable = None
+ self.parallel = None
+
+ def finalize_options(self):
+ if self.plat_name is None:
+ self.plat_name = get_platform()
+ else:
+ # plat-name only supported for windows (other platforms are
+ # supported via ./configure flags, if at all). Avoid misleading
+ # other platforms.
+ if os.name != 'nt':
+ raise DistutilsOptionError(
+ "--plat-name only supported on Windows (try "
+ "using './configure --help' on your platform)")
+
+ plat_specifier = ".%s-%d.%d" % (self.plat_name, *sys.version_info[:2])
+
+ # Make it so Python 2.x and Python 2.x with --with-pydebug don't
+ # share the same build directories. Doing so confuses the build
+ # process for C modules
+ if hasattr(sys, 'gettotalrefcount'):
+ plat_specifier += '-pydebug'
+
+ # 'build_purelib' and 'build_platlib' just default to 'lib' and
+ # 'lib.<plat>' under the base build directory. We only use one of
+ # them for a given distribution, though --
+ if self.build_purelib is None:
+ self.build_purelib = os.path.join(self.build_base, 'lib')
+ if self.build_platlib is None:
+ self.build_platlib = os.path.join(self.build_base,
+ 'lib' + plat_specifier)
+
+ # 'build_lib' is the actual directory that we will use for this
+ # particular module distribution -- if user didn't supply it, pick
+ # one of 'build_purelib' or 'build_platlib'.
+ if self.build_lib is None:
+ if self.distribution.ext_modules:
+ self.build_lib = self.build_platlib
+ else:
+ self.build_lib = self.build_purelib
+
+ # 'build_temp' -- temporary directory for compiler turds,
+ # "build/temp.<plat>"
+ if self.build_temp is None:
+ self.build_temp = os.path.join(self.build_base,
+ 'temp' + plat_specifier)
+ if self.build_scripts is None:
+ self.build_scripts = os.path.join(self.build_base,
+ 'scripts-%d.%d' % sys.version_info[:2])
+
+ if self.executable is None and sys.executable:
+ self.executable = os.path.normpath(sys.executable)
+
+ if isinstance(self.parallel, str):
+ try:
+ self.parallel = int(self.parallel)
+ except ValueError:
+ raise DistutilsOptionError("parallel should be an integer")
+
+ def run(self):
+ # Run all relevant sub-commands. This will be some subset of:
+ # - build_py - pure Python modules
+ # - build_clib - standalone C libraries
+ # - build_ext - Python extensions
+ # - build_scripts - (Python) scripts
+ for cmd_name in self.get_sub_commands():
+ self.run_command(cmd_name)
+
+
+ # -- Predicates for the sub-command list ---------------------------
+
+ def has_pure_modules(self):
+ return self.distribution.has_pure_modules()
+
+ def has_c_libraries(self):
+ return self.distribution.has_c_libraries()
+
+ def has_ext_modules(self):
+ return self.distribution.has_ext_modules()
+
+ def has_scripts(self):
+ return self.distribution.has_scripts()
+
+
+ sub_commands = [('build_py', has_pure_modules),
+ ('build_clib', has_c_libraries),
+ ('build_ext', has_ext_modules),
+ ('build_scripts', has_scripts),
+ ]
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/build_clib.py b/third_party/python/setuptools/setuptools/_distutils/command/build_clib.py
new file mode 100644
index 0000000000..3e20ef23cd
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/build_clib.py
@@ -0,0 +1,209 @@
+"""distutils.command.build_clib
+
+Implements the Distutils 'build_clib' command, to build a C/C++ library
+that is included in the module distribution and needed by an extension
+module."""
+
+
+# XXX this module has *lots* of code ripped-off quite transparently from
+# build_ext.py -- not surprisingly really, as the work required to build
+# a static library from a collection of C source files is not really all
+# that different from what's required to build a shared object file from
+# a collection of C source files. Nevertheless, I haven't done the
+# necessary refactoring to account for the overlap in code between the
+# two modules, mainly because a number of subtle details changed in the
+# cut 'n paste. Sigh.
+
+import os
+from distutils.core import Command
+from distutils.errors import *
+from distutils.sysconfig import customize_compiler
+from distutils import log
+
+def show_compilers():
+ from distutils.ccompiler import show_compilers
+ show_compilers()
+
+
+class build_clib(Command):
+
+ description = "build C/C++ libraries used by Python extensions"
+
+ user_options = [
+ ('build-clib=', 'b',
+ "directory to build C/C++ libraries to"),
+ ('build-temp=', 't',
+ "directory to put temporary build by-products"),
+ ('debug', 'g',
+ "compile with debugging information"),
+ ('force', 'f',
+ "forcibly build everything (ignore file timestamps)"),
+ ('compiler=', 'c',
+ "specify the compiler type"),
+ ]
+
+ boolean_options = ['debug', 'force']
+
+ help_options = [
+ ('help-compiler', None,
+ "list available compilers", show_compilers),
+ ]
+
+ def initialize_options(self):
+ self.build_clib = None
+ self.build_temp = None
+
+ # List of libraries to build
+ self.libraries = None
+
+ # Compilation options for all libraries
+ self.include_dirs = None
+ self.define = None
+ self.undef = None
+ self.debug = None
+ self.force = 0
+ self.compiler = None
+
+
+ def finalize_options(self):
+ # This might be confusing: both build-clib and build-temp default
+ # to build-temp as defined by the "build" command. This is because
+ # I think that C libraries are really just temporary build
+ # by-products, at least from the point of view of building Python
+ # extensions -- but I want to keep my options open.
+ self.set_undefined_options('build',
+ ('build_temp', 'build_clib'),
+ ('build_temp', 'build_temp'),
+ ('compiler', 'compiler'),
+ ('debug', 'debug'),
+ ('force', 'force'))
+
+ self.libraries = self.distribution.libraries
+ if self.libraries:
+ self.check_library_list(self.libraries)
+
+ if self.include_dirs is None:
+ self.include_dirs = self.distribution.include_dirs or []
+ if isinstance(self.include_dirs, str):
+ self.include_dirs = self.include_dirs.split(os.pathsep)
+
+ # XXX same as for build_ext -- what about 'self.define' and
+ # 'self.undef' ?
+
+
+ def run(self):
+ if not self.libraries:
+ return
+
+ # Yech -- this is cut 'n pasted from build_ext.py!
+ from distutils.ccompiler import new_compiler
+ self.compiler = new_compiler(compiler=self.compiler,
+ dry_run=self.dry_run,
+ force=self.force)
+ customize_compiler(self.compiler)
+
+ if self.include_dirs is not None:
+ self.compiler.set_include_dirs(self.include_dirs)
+ if self.define is not None:
+ # 'define' option is a list of (name,value) tuples
+ for (name,value) in self.define:
+ self.compiler.define_macro(name, value)
+ if self.undef is not None:
+ for macro in self.undef:
+ self.compiler.undefine_macro(macro)
+
+ self.build_libraries(self.libraries)
+
+
+ def check_library_list(self, libraries):
+ """Ensure that the list of libraries is valid.
+
+ `library` is presumably provided as a command option 'libraries'.
+ This method checks that it is a list of 2-tuples, where the tuples
+ are (library_name, build_info_dict).
+
+ Raise DistutilsSetupError if the structure is invalid anywhere;
+ just returns otherwise.
+ """
+ if not isinstance(libraries, list):
+ raise DistutilsSetupError(
+ "'libraries' option must be a list of tuples")
+
+ for lib in libraries:
+ if not isinstance(lib, tuple) and len(lib) != 2:
+ raise DistutilsSetupError(
+ "each element of 'libraries' must a 2-tuple")
+
+ name, build_info = lib
+
+ if not isinstance(name, str):
+ raise DistutilsSetupError(
+ "first element of each tuple in 'libraries' "
+ "must be a string (the library name)")
+
+ if '/' in name or (os.sep != '/' and os.sep in name):
+ raise DistutilsSetupError("bad library name '%s': "
+ "may not contain directory separators" % lib[0])
+
+ if not isinstance(build_info, dict):
+ raise DistutilsSetupError(
+ "second element of each tuple in 'libraries' "
+ "must be a dictionary (build info)")
+
+
+ def get_library_names(self):
+ # Assume the library list is valid -- 'check_library_list()' is
+ # called from 'finalize_options()', so it should be!
+ if not self.libraries:
+ return None
+
+ lib_names = []
+ for (lib_name, build_info) in self.libraries:
+ lib_names.append(lib_name)
+ return lib_names
+
+
+ def get_source_files(self):
+ self.check_library_list(self.libraries)
+ filenames = []
+ for (lib_name, build_info) in self.libraries:
+ sources = build_info.get('sources')
+ if sources is None or not isinstance(sources, (list, tuple)):
+ raise DistutilsSetupError(
+ "in 'libraries' option (library '%s'), "
+ "'sources' must be present and must be "
+ "a list of source filenames" % lib_name)
+
+ filenames.extend(sources)
+ return filenames
+
+
+ def build_libraries(self, libraries):
+ for (lib_name, build_info) in libraries:
+ sources = build_info.get('sources')
+ if sources is None or not isinstance(sources, (list, tuple)):
+ raise DistutilsSetupError(
+ "in 'libraries' option (library '%s'), "
+ "'sources' must be present and must be "
+ "a list of source filenames" % lib_name)
+ sources = list(sources)
+
+ log.info("building '%s' library", lib_name)
+
+ # First, compile the source code to object files in the library
+ # directory. (This should probably change to putting object
+ # files in a temporary build directory.)
+ macros = build_info.get('macros')
+ include_dirs = build_info.get('include_dirs')
+ objects = self.compiler.compile(sources,
+ output_dir=self.build_temp,
+ macros=macros,
+ include_dirs=include_dirs,
+ debug=self.debug)
+
+ # Now "link" the object files together into a static library.
+ # (On Unix at least, this isn't really linking -- it just
+ # builds an archive. Whatever.)
+ self.compiler.create_static_lib(objects, lib_name,
+ output_dir=self.build_clib,
+ debug=self.debug)
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/build_ext.py b/third_party/python/setuptools/setuptools/_distutils/command/build_ext.py
new file mode 100644
index 0000000000..bbb348331b
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/build_ext.py
@@ -0,0 +1,755 @@
+"""distutils.command.build_ext
+
+Implements the Distutils 'build_ext' command, for building extension
+modules (currently limited to C extensions, should accommodate C++
+extensions ASAP)."""
+
+import contextlib
+import os
+import re
+import sys
+from distutils.core import Command
+from distutils.errors import *
+from distutils.sysconfig import customize_compiler, get_python_version
+from distutils.sysconfig import get_config_h_filename
+from distutils.dep_util import newer_group
+from distutils.extension import Extension
+from distutils.util import get_platform
+from distutils import log
+from . import py37compat
+
+from site import USER_BASE
+
+# An extension name is just a dot-separated list of Python NAMEs (ie.
+# the same as a fully-qualified module name).
+extension_name_re = re.compile \
+ (r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$')
+
+
+def show_compilers ():
+ from distutils.ccompiler import show_compilers
+ show_compilers()
+
+
+class build_ext(Command):
+
+ description = "build C/C++ extensions (compile/link to build directory)"
+
+ # XXX thoughts on how to deal with complex command-line options like
+ # these, i.e. how to make it so fancy_getopt can suck them off the
+ # command line and make it look like setup.py defined the appropriate
+ # lists of tuples of what-have-you.
+ # - each command needs a callback to process its command-line options
+ # - Command.__init__() needs access to its share of the whole
+ # command line (must ultimately come from
+ # Distribution.parse_command_line())
+ # - it then calls the current command class' option-parsing
+ # callback to deal with weird options like -D, which have to
+ # parse the option text and churn out some custom data
+ # structure
+ # - that data structure (in this case, a list of 2-tuples)
+ # will then be present in the command object by the time
+ # we get to finalize_options() (i.e. the constructor
+ # takes care of both command-line and client options
+ # in between initialize_options() and finalize_options())
+
+ sep_by = " (separated by '%s')" % os.pathsep
+ user_options = [
+ ('build-lib=', 'b',
+ "directory for compiled extension modules"),
+ ('build-temp=', 't',
+ "directory for temporary files (build by-products)"),
+ ('plat-name=', 'p',
+ "platform name to cross-compile for, if supported "
+ "(default: %s)" % get_platform()),
+ ('inplace', 'i',
+ "ignore build-lib and put compiled extensions into the source " +
+ "directory alongside your pure Python modules"),
+ ('include-dirs=', 'I',
+ "list of directories to search for header files" + sep_by),
+ ('define=', 'D',
+ "C preprocessor macros to define"),
+ ('undef=', 'U',
+ "C preprocessor macros to undefine"),
+ ('libraries=', 'l',
+ "external C libraries to link with"),
+ ('library-dirs=', 'L',
+ "directories to search for external C libraries" + sep_by),
+ ('rpath=', 'R',
+ "directories to search for shared C libraries at runtime"),
+ ('link-objects=', 'O',
+ "extra explicit link objects to include in the link"),
+ ('debug', 'g',
+ "compile/link with debugging information"),
+ ('force', 'f',
+ "forcibly build everything (ignore file timestamps)"),
+ ('compiler=', 'c',
+ "specify the compiler type"),
+ ('parallel=', 'j',
+ "number of parallel build jobs"),
+ ('swig-cpp', None,
+ "make SWIG create C++ files (default is C)"),
+ ('swig-opts=', None,
+ "list of SWIG command line options"),
+ ('swig=', None,
+ "path to the SWIG executable"),
+ ('user', None,
+ "add user include, library and rpath")
+ ]
+
+ boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user']
+
+ help_options = [
+ ('help-compiler', None,
+ "list available compilers", show_compilers),
+ ]
+
+ def initialize_options(self):
+ self.extensions = None
+ self.build_lib = None
+ self.plat_name = None
+ self.build_temp = None
+ self.inplace = 0
+ self.package = None
+
+ self.include_dirs = None
+ self.define = None
+ self.undef = None
+ self.libraries = None
+ self.library_dirs = None
+ self.rpath = None
+ self.link_objects = None
+ self.debug = None
+ self.force = None
+ self.compiler = None
+ self.swig = None
+ self.swig_cpp = None
+ self.swig_opts = None
+ self.user = None
+ self.parallel = None
+
+ def finalize_options(self):
+ from distutils import sysconfig
+
+ self.set_undefined_options('build',
+ ('build_lib', 'build_lib'),
+ ('build_temp', 'build_temp'),
+ ('compiler', 'compiler'),
+ ('debug', 'debug'),
+ ('force', 'force'),
+ ('parallel', 'parallel'),
+ ('plat_name', 'plat_name'),
+ )
+
+ if self.package is None:
+ self.package = self.distribution.ext_package
+
+ self.extensions = self.distribution.ext_modules
+
+ # Make sure Python's include directories (for Python.h, pyconfig.h,
+ # etc.) are in the include search path.
+ py_include = sysconfig.get_python_inc()
+ plat_py_include = sysconfig.get_python_inc(plat_specific=1)
+ if self.include_dirs is None:
+ self.include_dirs = self.distribution.include_dirs or []
+ if isinstance(self.include_dirs, str):
+ self.include_dirs = self.include_dirs.split(os.pathsep)
+
+ # If in a virtualenv, add its include directory
+ # Issue 16116
+ if sys.exec_prefix != sys.base_exec_prefix:
+ self.include_dirs.append(os.path.join(sys.exec_prefix, 'include'))
+
+ # Put the Python "system" include dir at the end, so that
+ # any local include dirs take precedence.
+ self.include_dirs.extend(py_include.split(os.path.pathsep))
+ if plat_py_include != py_include:
+ self.include_dirs.extend(
+ plat_py_include.split(os.path.pathsep))
+
+ self.ensure_string_list('libraries')
+ self.ensure_string_list('link_objects')
+
+ # Life is easier if we're not forever checking for None, so
+ # simplify these options to empty lists if unset
+ if self.libraries is None:
+ self.libraries = []
+ if self.library_dirs is None:
+ self.library_dirs = []
+ elif isinstance(self.library_dirs, str):
+ self.library_dirs = self.library_dirs.split(os.pathsep)
+
+ if self.rpath is None:
+ self.rpath = []
+ elif isinstance(self.rpath, str):
+ self.rpath = self.rpath.split(os.pathsep)
+
+ # for extensions under windows use different directories
+ # for Release and Debug builds.
+ # also Python's library directory must be appended to library_dirs
+ if os.name == 'nt':
+ # the 'libs' directory is for binary installs - we assume that
+ # must be the *native* platform. But we don't really support
+ # cross-compiling via a binary install anyway, so we let it go.
+ self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
+ if sys.base_exec_prefix != sys.prefix: # Issue 16116
+ self.library_dirs.append(os.path.join(sys.base_exec_prefix, 'libs'))
+ if self.debug:
+ self.build_temp = os.path.join(self.build_temp, "Debug")
+ else:
+ self.build_temp = os.path.join(self.build_temp, "Release")
+
+ # Append the source distribution include and library directories,
+ # this allows distutils on windows to work in the source tree
+ self.include_dirs.append(os.path.dirname(get_config_h_filename()))
+ _sys_home = getattr(sys, '_home', None)
+ if _sys_home:
+ self.library_dirs.append(_sys_home)
+
+ # Use the .lib files for the correct architecture
+ if self.plat_name == 'win32':
+ suffix = 'win32'
+ else:
+ # win-amd64
+ suffix = self.plat_name[4:]
+ new_lib = os.path.join(sys.exec_prefix, 'PCbuild')
+ if suffix:
+ new_lib = os.path.join(new_lib, suffix)
+ self.library_dirs.append(new_lib)
+
+ # For extensions under Cygwin, Python's library directory must be
+ # appended to library_dirs
+ if sys.platform[:6] == 'cygwin':
+ if sys.executable.startswith(os.path.join(sys.exec_prefix, "bin")):
+ # building third party extensions
+ self.library_dirs.append(os.path.join(sys.prefix, "lib",
+ "python" + get_python_version(),
+ "config"))
+ else:
+ # building python standard extensions
+ self.library_dirs.append('.')
+
+ # For building extensions with a shared Python library,
+ # Python's library directory must be appended to library_dirs
+ # See Issues: #1600860, #4366
+ if (sysconfig.get_config_var('Py_ENABLE_SHARED')):
+ if not sysconfig.python_build:
+ # building third party extensions
+ self.library_dirs.append(sysconfig.get_config_var('LIBDIR'))
+ else:
+ # building python standard extensions
+ self.library_dirs.append('.')
+
+ # The argument parsing will result in self.define being a string, but
+ # it has to be a list of 2-tuples. All the preprocessor symbols
+ # specified by the 'define' option will be set to '1'. Multiple
+ # symbols can be separated with commas.
+
+ if self.define:
+ defines = self.define.split(',')
+ self.define = [(symbol, '1') for symbol in defines]
+
+ # The option for macros to undefine is also a string from the
+ # option parsing, but has to be a list. Multiple symbols can also
+ # be separated with commas here.
+ if self.undef:
+ self.undef = self.undef.split(',')
+
+ if self.swig_opts is None:
+ self.swig_opts = []
+ else:
+ self.swig_opts = self.swig_opts.split(' ')
+
+ # Finally add the user include and library directories if requested
+ if self.user:
+ user_include = os.path.join(USER_BASE, "include")
+ user_lib = os.path.join(USER_BASE, "lib")
+ if os.path.isdir(user_include):
+ self.include_dirs.append(user_include)
+ if os.path.isdir(user_lib):
+ self.library_dirs.append(user_lib)
+ self.rpath.append(user_lib)
+
+ if isinstance(self.parallel, str):
+ try:
+ self.parallel = int(self.parallel)
+ except ValueError:
+ raise DistutilsOptionError("parallel should be an integer")
+
+ def run(self):
+ from distutils.ccompiler import new_compiler
+
+ # 'self.extensions', as supplied by setup.py, is a list of
+ # Extension instances. See the documentation for Extension (in
+ # distutils.extension) for details.
+ #
+ # For backwards compatibility with Distutils 0.8.2 and earlier, we
+ # also allow the 'extensions' list to be a list of tuples:
+ # (ext_name, build_info)
+ # where build_info is a dictionary containing everything that
+ # Extension instances do except the name, with a few things being
+ # differently named. We convert these 2-tuples to Extension
+ # instances as needed.
+
+ if not self.extensions:
+ return
+
+ # If we were asked to build any C/C++ libraries, make sure that the
+ # directory where we put them is in the library search path for
+ # linking extensions.
+ if self.distribution.has_c_libraries():
+ build_clib = self.get_finalized_command('build_clib')
+ self.libraries.extend(build_clib.get_library_names() or [])
+ self.library_dirs.append(build_clib.build_clib)
+
+ # Setup the CCompiler object that we'll use to do all the
+ # compiling and linking
+ self.compiler = new_compiler(compiler=self.compiler,
+ verbose=self.verbose,
+ dry_run=self.dry_run,
+ force=self.force)
+ customize_compiler(self.compiler)
+ # If we are cross-compiling, init the compiler now (if we are not
+ # cross-compiling, init would not hurt, but people may rely on
+ # late initialization of compiler even if they shouldn't...)
+ if os.name == 'nt' and self.plat_name != get_platform():
+ self.compiler.initialize(self.plat_name)
+
+ # And make sure that any compile/link-related options (which might
+ # come from the command-line or from the setup script) are set in
+ # that CCompiler object -- that way, they automatically apply to
+ # all compiling and linking done here.
+ if self.include_dirs is not None:
+ self.compiler.set_include_dirs(self.include_dirs)
+ if self.define is not None:
+ # 'define' option is a list of (name,value) tuples
+ for (name, value) in self.define:
+ self.compiler.define_macro(name, value)
+ if self.undef is not None:
+ for macro in self.undef:
+ self.compiler.undefine_macro(macro)
+ if self.libraries is not None:
+ self.compiler.set_libraries(self.libraries)
+ if self.library_dirs is not None:
+ self.compiler.set_library_dirs(self.library_dirs)
+ if self.rpath is not None:
+ self.compiler.set_runtime_library_dirs(self.rpath)
+ if self.link_objects is not None:
+ self.compiler.set_link_objects(self.link_objects)
+
+ # Now actually compile and link everything.
+ self.build_extensions()
+
+ def check_extensions_list(self, extensions):
+ """Ensure that the list of extensions (presumably provided as a
+ command option 'extensions') is valid, i.e. it is a list of
+ Extension objects. We also support the old-style list of 2-tuples,
+ where the tuples are (ext_name, build_info), which are converted to
+ Extension instances here.
+
+ Raise DistutilsSetupError if the structure is invalid anywhere;
+ just returns otherwise.
+ """
+ if not isinstance(extensions, list):
+ raise DistutilsSetupError(
+ "'ext_modules' option must be a list of Extension instances")
+
+ for i, ext in enumerate(extensions):
+ if isinstance(ext, Extension):
+ continue # OK! (assume type-checking done
+ # by Extension constructor)
+
+ if not isinstance(ext, tuple) or len(ext) != 2:
+ raise DistutilsSetupError(
+ "each element of 'ext_modules' option must be an "
+ "Extension instance or 2-tuple")
+
+ ext_name, build_info = ext
+
+ log.warn("old-style (ext_name, build_info) tuple found in "
+ "ext_modules for extension '%s' "
+ "-- please convert to Extension instance", ext_name)
+
+ if not (isinstance(ext_name, str) and
+ extension_name_re.match(ext_name)):
+ raise DistutilsSetupError(
+ "first element of each tuple in 'ext_modules' "
+ "must be the extension name (a string)")
+
+ if not isinstance(build_info, dict):
+ raise DistutilsSetupError(
+ "second element of each tuple in 'ext_modules' "
+ "must be a dictionary (build info)")
+
+ # OK, the (ext_name, build_info) dict is type-safe: convert it
+ # to an Extension instance.
+ ext = Extension(ext_name, build_info['sources'])
+
+ # Easy stuff: one-to-one mapping from dict elements to
+ # instance attributes.
+ for key in ('include_dirs', 'library_dirs', 'libraries',
+ 'extra_objects', 'extra_compile_args',
+ 'extra_link_args'):
+ val = build_info.get(key)
+ if val is not None:
+ setattr(ext, key, val)
+
+ # Medium-easy stuff: same syntax/semantics, different names.
+ ext.runtime_library_dirs = build_info.get('rpath')
+ if 'def_file' in build_info:
+ log.warn("'def_file' element of build info dict "
+ "no longer supported")
+
+ # Non-trivial stuff: 'macros' split into 'define_macros'
+ # and 'undef_macros'.
+ macros = build_info.get('macros')
+ if macros:
+ ext.define_macros = []
+ ext.undef_macros = []
+ for macro in macros:
+ if not (isinstance(macro, tuple) and len(macro) in (1, 2)):
+ raise DistutilsSetupError(
+ "'macros' element of build info dict "
+ "must be 1- or 2-tuple")
+ if len(macro) == 1:
+ ext.undef_macros.append(macro[0])
+ elif len(macro) == 2:
+ ext.define_macros.append(macro)
+
+ extensions[i] = ext
+
+ def get_source_files(self):
+ self.check_extensions_list(self.extensions)
+ filenames = []
+
+ # Wouldn't it be neat if we knew the names of header files too...
+ for ext in self.extensions:
+ filenames.extend(ext.sources)
+ return filenames
+
+ def get_outputs(self):
+ # Sanity check the 'extensions' list -- can't assume this is being
+ # done in the same run as a 'build_extensions()' call (in fact, we
+ # can probably assume that it *isn't*!).
+ self.check_extensions_list(self.extensions)
+
+ # And build the list of output (built) filenames. Note that this
+ # ignores the 'inplace' flag, and assumes everything goes in the
+ # "build" tree.
+ outputs = []
+ for ext in self.extensions:
+ outputs.append(self.get_ext_fullpath(ext.name))
+ return outputs
+
+ def build_extensions(self):
+ # First, sanity-check the 'extensions' list
+ self.check_extensions_list(self.extensions)
+ if self.parallel:
+ self._build_extensions_parallel()
+ else:
+ self._build_extensions_serial()
+
+ def _build_extensions_parallel(self):
+ workers = self.parallel
+ if self.parallel is True:
+ workers = os.cpu_count() # may return None
+ try:
+ from concurrent.futures import ThreadPoolExecutor
+ except ImportError:
+ workers = None
+
+ if workers is None:
+ self._build_extensions_serial()
+ return
+
+ with ThreadPoolExecutor(max_workers=workers) as executor:
+ futures = [executor.submit(self.build_extension, ext)
+ for ext in self.extensions]
+ for ext, fut in zip(self.extensions, futures):
+ with self._filter_build_errors(ext):
+ fut.result()
+
+ def _build_extensions_serial(self):
+ for ext in self.extensions:
+ with self._filter_build_errors(ext):
+ self.build_extension(ext)
+
+ @contextlib.contextmanager
+ def _filter_build_errors(self, ext):
+ try:
+ yield
+ except (CCompilerError, DistutilsError, CompileError) as e:
+ if not ext.optional:
+ raise
+ self.warn('building extension "%s" failed: %s' %
+ (ext.name, e))
+
+ def build_extension(self, ext):
+ sources = ext.sources
+ if sources is None or not isinstance(sources, (list, tuple)):
+ raise DistutilsSetupError(
+ "in 'ext_modules' option (extension '%s'), "
+ "'sources' must be present and must be "
+ "a list of source filenames" % ext.name)
+ # sort to make the resulting .so file build reproducible
+ sources = sorted(sources)
+
+ ext_path = self.get_ext_fullpath(ext.name)
+ depends = sources + ext.depends
+ if not (self.force or newer_group(depends, ext_path, 'newer')):
+ log.debug("skipping '%s' extension (up-to-date)", ext.name)
+ return
+ else:
+ log.info("building '%s' extension", ext.name)
+
+ # First, scan the sources for SWIG definition files (.i), run
+ # SWIG on 'em to create .c files, and modify the sources list
+ # accordingly.
+ sources = self.swig_sources(sources, ext)
+
+ # Next, compile the source code to object files.
+
+ # XXX not honouring 'define_macros' or 'undef_macros' -- the
+ # CCompiler API needs to change to accommodate this, and I
+ # want to do one thing at a time!
+
+ # Two possible sources for extra compiler arguments:
+ # - 'extra_compile_args' in Extension object
+ # - CFLAGS environment variable (not particularly
+ # elegant, but people seem to expect it and I
+ # guess it's useful)
+ # The environment variable should take precedence, and
+ # any sensible compiler will give precedence to later
+ # command line args. Hence we combine them in order:
+ extra_args = ext.extra_compile_args or []
+
+ macros = ext.define_macros[:]
+ for undef in ext.undef_macros:
+ macros.append((undef,))
+
+ objects = self.compiler.compile(sources,
+ output_dir=self.build_temp,
+ macros=macros,
+ include_dirs=ext.include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_args,
+ depends=ext.depends)
+
+ # XXX outdated variable, kept here in case third-part code
+ # needs it.
+ self._built_objects = objects[:]
+
+ # Now link the object files together into a "shared object" --
+ # of course, first we have to figure out all the other things
+ # that go into the mix.
+ if ext.extra_objects:
+ objects.extend(ext.extra_objects)
+ extra_args = ext.extra_link_args or []
+
+ # Detect target language, if not provided
+ language = ext.language or self.compiler.detect_language(sources)
+
+ self.compiler.link_shared_object(
+ objects, ext_path,
+ libraries=self.get_libraries(ext),
+ library_dirs=ext.library_dirs,
+ runtime_library_dirs=ext.runtime_library_dirs,
+ extra_postargs=extra_args,
+ export_symbols=self.get_export_symbols(ext),
+ debug=self.debug,
+ build_temp=self.build_temp,
+ target_lang=language)
+
+ def swig_sources(self, sources, extension):
+ """Walk the list of source files in 'sources', looking for SWIG
+ interface (.i) files. Run SWIG on all that are found, and
+ return a modified 'sources' list with SWIG source files replaced
+ by the generated C (or C++) files.
+ """
+ new_sources = []
+ swig_sources = []
+ swig_targets = {}
+
+ # XXX this drops generated C/C++ files into the source tree, which
+ # is fine for developers who want to distribute the generated
+ # source -- but there should be an option to put SWIG output in
+ # the temp dir.
+
+ if self.swig_cpp:
+ log.warn("--swig-cpp is deprecated - use --swig-opts=-c++")
+
+ if self.swig_cpp or ('-c++' in self.swig_opts) or \
+ ('-c++' in extension.swig_opts):
+ target_ext = '.cpp'
+ else:
+ target_ext = '.c'
+
+ for source in sources:
+ (base, ext) = os.path.splitext(source)
+ if ext == ".i": # SWIG interface file
+ new_sources.append(base + '_wrap' + target_ext)
+ swig_sources.append(source)
+ swig_targets[source] = new_sources[-1]
+ else:
+ new_sources.append(source)
+
+ if not swig_sources:
+ return new_sources
+
+ swig = self.swig or self.find_swig()
+ swig_cmd = [swig, "-python"]
+ swig_cmd.extend(self.swig_opts)
+ if self.swig_cpp:
+ swig_cmd.append("-c++")
+
+ # Do not override commandline arguments
+ if not self.swig_opts:
+ for o in extension.swig_opts:
+ swig_cmd.append(o)
+
+ for source in swig_sources:
+ target = swig_targets[source]
+ log.info("swigging %s to %s", source, target)
+ self.spawn(swig_cmd + ["-o", target, source])
+
+ return new_sources
+
+ def find_swig(self):
+ """Return the name of the SWIG executable. On Unix, this is
+ just "swig" -- it should be in the PATH. Tries a bit harder on
+ Windows.
+ """
+ if os.name == "posix":
+ return "swig"
+ elif os.name == "nt":
+ # Look for SWIG in its standard installation directory on
+ # Windows (or so I presume!). If we find it there, great;
+ # if not, act like Unix and assume it's in the PATH.
+ for vers in ("1.3", "1.2", "1.1"):
+ fn = os.path.join("c:\\swig%s" % vers, "swig.exe")
+ if os.path.isfile(fn):
+ return fn
+ else:
+ return "swig.exe"
+ else:
+ raise DistutilsPlatformError(
+ "I don't know how to find (much less run) SWIG "
+ "on platform '%s'" % os.name)
+
+ # -- Name generators -----------------------------------------------
+ # (extension names, filenames, whatever)
+ def get_ext_fullpath(self, ext_name):
+ """Returns the path of the filename for a given extension.
+
+ The file is located in `build_lib` or directly in the package
+ (inplace option).
+ """
+ fullname = self.get_ext_fullname(ext_name)
+ modpath = fullname.split('.')
+ filename = self.get_ext_filename(modpath[-1])
+
+ if not self.inplace:
+ # no further work needed
+ # returning :
+ # build_dir/package/path/filename
+ filename = os.path.join(*modpath[:-1]+[filename])
+ return os.path.join(self.build_lib, filename)
+
+ # the inplace option requires to find the package directory
+ # using the build_py command for that
+ package = '.'.join(modpath[0:-1])
+ build_py = self.get_finalized_command('build_py')
+ package_dir = os.path.abspath(build_py.get_package_dir(package))
+
+ # returning
+ # package_dir/filename
+ return os.path.join(package_dir, filename)
+
+ def get_ext_fullname(self, ext_name):
+ """Returns the fullname of a given extension name.
+
+ Adds the `package.` prefix"""
+ if self.package is None:
+ return ext_name
+ else:
+ return self.package + '.' + ext_name
+
+ def get_ext_filename(self, ext_name):
+ r"""Convert the name of an extension (eg. "foo.bar") into the name
+ of the file from which it will be loaded (eg. "foo/bar.so", or
+ "foo\bar.pyd").
+ """
+ from distutils.sysconfig import get_config_var
+ ext_path = ext_name.split('.')
+ ext_suffix = get_config_var('EXT_SUFFIX')
+ return os.path.join(*ext_path) + ext_suffix
+
+ def get_export_symbols(self, ext):
+ """Return the list of symbols that a shared extension has to
+ export. This either uses 'ext.export_symbols' or, if it's not
+ provided, "PyInit_" + module_name. Only relevant on Windows, where
+ the .pyd file (DLL) must export the module "PyInit_" function.
+ """
+ suffix = '_' + ext.name.split('.')[-1]
+ try:
+ # Unicode module name support as defined in PEP-489
+ # https://www.python.org/dev/peps/pep-0489/#export-hook-name
+ suffix.encode('ascii')
+ except UnicodeEncodeError:
+ suffix = 'U' + suffix.encode('punycode').replace(b'-', b'_').decode('ascii')
+
+ initfunc_name = "PyInit" + suffix
+ if initfunc_name not in ext.export_symbols:
+ ext.export_symbols.append(initfunc_name)
+ return ext.export_symbols
+
+ def get_libraries(self, ext):
+ """Return the list of libraries to link against when building a
+ shared extension. On most platforms, this is just 'ext.libraries';
+ on Windows, we add the Python library (eg. python20.dll).
+ """
+ # The python library is always needed on Windows. For MSVC, this
+ # is redundant, since the library is mentioned in a pragma in
+ # pyconfig.h that MSVC groks. The other Windows compilers all seem
+ # to need it mentioned explicitly, though, so that's what we do.
+ # Append '_d' to the python import library on debug builds.
+ if sys.platform == "win32":
+ from distutils._msvccompiler import MSVCCompiler
+ if not isinstance(self.compiler, MSVCCompiler):
+ template = "python%d%d"
+ if self.debug:
+ template = template + '_d'
+ pythonlib = (template %
+ (sys.hexversion >> 24, (sys.hexversion >> 16) & 0xff))
+ # don't extend ext.libraries, it may be shared with other
+ # extensions, it is a reference to the original list
+ return ext.libraries + [pythonlib]
+ else:
+ # On Android only the main executable and LD_PRELOADs are considered
+ # to be RTLD_GLOBAL, all the dependencies of the main executable
+ # remain RTLD_LOCAL and so the shared libraries must be linked with
+ # libpython when python is built with a shared python library (issue
+ # bpo-21536).
+ # On Cygwin (and if required, other POSIX-like platforms based on
+ # Windows like MinGW) it is simply necessary that all symbols in
+ # shared libraries are resolved at link time.
+ from distutils.sysconfig import get_config_var
+ link_libpython = False
+ if get_config_var('Py_ENABLE_SHARED'):
+ # A native build on an Android device or on Cygwin
+ if hasattr(sys, 'getandroidapilevel'):
+ link_libpython = True
+ elif sys.platform == 'cygwin':
+ link_libpython = True
+ elif '_PYTHON_HOST_PLATFORM' in os.environ:
+ # We are cross-compiling for one of the relevant platforms
+ if get_config_var('ANDROID_API_LEVEL') != 0:
+ link_libpython = True
+ elif get_config_var('MACHDEP') == 'cygwin':
+ link_libpython = True
+
+ if link_libpython:
+ ldversion = get_config_var('LDVERSION')
+ return ext.libraries + ['python' + ldversion]
+
+ return ext.libraries + py37compat.pythonlib()
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/build_py.py b/third_party/python/setuptools/setuptools/_distutils/command/build_py.py
new file mode 100644
index 0000000000..edc2171cd1
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/build_py.py
@@ -0,0 +1,416 @@
+"""distutils.command.build_py
+
+Implements the Distutils 'build_py' command."""
+
+import os
+import importlib.util
+import sys
+import glob
+
+from distutils.core import Command
+from distutils.errors import *
+from distutils.util import convert_path, Mixin2to3
+from distutils import log
+
+class build_py (Command):
+
+ description = "\"build\" pure Python modules (copy to build directory)"
+
+ user_options = [
+ ('build-lib=', 'd', "directory to \"build\" (copy) to"),
+ ('compile', 'c', "compile .py to .pyc"),
+ ('no-compile', None, "don't compile .py files [default]"),
+ ('optimize=', 'O',
+ "also compile with optimization: -O1 for \"python -O\", "
+ "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
+ ('force', 'f', "forcibly build everything (ignore file timestamps)"),
+ ]
+
+ boolean_options = ['compile', 'force']
+ negative_opt = {'no-compile' : 'compile'}
+
+ def initialize_options(self):
+ self.build_lib = None
+ self.py_modules = None
+ self.package = None
+ self.package_data = None
+ self.package_dir = None
+ self.compile = 0
+ self.optimize = 0
+ self.force = None
+
+ def finalize_options(self):
+ self.set_undefined_options('build',
+ ('build_lib', 'build_lib'),
+ ('force', 'force'))
+
+ # Get the distribution options that are aliases for build_py
+ # options -- list of packages and list of modules.
+ self.packages = self.distribution.packages
+ self.py_modules = self.distribution.py_modules
+ self.package_data = self.distribution.package_data
+ self.package_dir = {}
+ if self.distribution.package_dir:
+ for name, path in self.distribution.package_dir.items():
+ self.package_dir[name] = convert_path(path)
+ self.data_files = self.get_data_files()
+
+ # Ick, copied straight from install_lib.py (fancy_getopt needs a
+ # type system! Hell, *everything* needs a type system!!!)
+ if not isinstance(self.optimize, int):
+ try:
+ self.optimize = int(self.optimize)
+ assert 0 <= self.optimize <= 2
+ except (ValueError, AssertionError):
+ raise DistutilsOptionError("optimize must be 0, 1, or 2")
+
+ def run(self):
+ # XXX copy_file by default preserves atime and mtime. IMHO this is
+ # the right thing to do, but perhaps it should be an option -- in
+ # particular, a site administrator might want installed files to
+ # reflect the time of installation rather than the last
+ # modification time before the installed release.
+
+ # XXX copy_file by default preserves mode, which appears to be the
+ # wrong thing to do: if a file is read-only in the working
+ # directory, we want it to be installed read/write so that the next
+ # installation of the same module distribution can overwrite it
+ # without problems. (This might be a Unix-specific issue.) Thus
+ # we turn off 'preserve_mode' when copying to the build directory,
+ # since the build directory is supposed to be exactly what the
+ # installation will look like (ie. we preserve mode when
+ # installing).
+
+ # Two options control which modules will be installed: 'packages'
+ # and 'py_modules'. The former lets us work with whole packages, not
+ # specifying individual modules at all; the latter is for
+ # specifying modules one-at-a-time.
+
+ if self.py_modules:
+ self.build_modules()
+ if self.packages:
+ self.build_packages()
+ self.build_package_data()
+
+ self.byte_compile(self.get_outputs(include_bytecode=0))
+
+ def get_data_files(self):
+ """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
+ data = []
+ if not self.packages:
+ return data
+ for package in self.packages:
+ # Locate package source directory
+ src_dir = self.get_package_dir(package)
+
+ # Compute package build directory
+ build_dir = os.path.join(*([self.build_lib] + package.split('.')))
+
+ # Length of path to strip from found files
+ plen = 0
+ if src_dir:
+ plen = len(src_dir)+1
+
+ # Strip directory from globbed filenames
+ filenames = [
+ file[plen:] for file in self.find_data_files(package, src_dir)
+ ]
+ data.append((package, src_dir, build_dir, filenames))
+ return data
+
+ def find_data_files(self, package, src_dir):
+ """Return filenames for package's data files in 'src_dir'"""
+ globs = (self.package_data.get('', [])
+ + self.package_data.get(package, []))
+ files = []
+ for pattern in globs:
+ # Each pattern has to be converted to a platform-specific path
+ filelist = glob.glob(os.path.join(glob.escape(src_dir), convert_path(pattern)))
+ # Files that match more than one pattern are only added once
+ files.extend([fn for fn in filelist if fn not in files
+ and os.path.isfile(fn)])
+ return files
+
+ def build_package_data(self):
+ """Copy data files into build directory"""
+ lastdir = None
+ for package, src_dir, build_dir, filenames in self.data_files:
+ for filename in filenames:
+ target = os.path.join(build_dir, filename)
+ self.mkpath(os.path.dirname(target))
+ self.copy_file(os.path.join(src_dir, filename), target,
+ preserve_mode=False)
+
+ def get_package_dir(self, package):
+ """Return the directory, relative to the top of the source
+ distribution, where package 'package' should be found
+ (at least according to the 'package_dir' option, if any)."""
+ path = package.split('.')
+
+ if not self.package_dir:
+ if path:
+ return os.path.join(*path)
+ else:
+ return ''
+ else:
+ tail = []
+ while path:
+ try:
+ pdir = self.package_dir['.'.join(path)]
+ except KeyError:
+ tail.insert(0, path[-1])
+ del path[-1]
+ else:
+ tail.insert(0, pdir)
+ return os.path.join(*tail)
+ else:
+ # Oops, got all the way through 'path' without finding a
+ # match in package_dir. If package_dir defines a directory
+ # for the root (nameless) package, then fallback on it;
+ # otherwise, we might as well have not consulted
+ # package_dir at all, as we just use the directory implied
+ # by 'tail' (which should be the same as the original value
+ # of 'path' at this point).
+ pdir = self.package_dir.get('')
+ if pdir is not None:
+ tail.insert(0, pdir)
+
+ if tail:
+ return os.path.join(*tail)
+ else:
+ return ''
+
+ def check_package(self, package, package_dir):
+ # Empty dir name means current directory, which we can probably
+ # assume exists. Also, os.path.exists and isdir don't know about
+ # my "empty string means current dir" convention, so we have to
+ # circumvent them.
+ if package_dir != "":
+ if not os.path.exists(package_dir):
+ raise DistutilsFileError(
+ "package directory '%s' does not exist" % package_dir)
+ if not os.path.isdir(package_dir):
+ raise DistutilsFileError(
+ "supposed package directory '%s' exists, "
+ "but is not a directory" % package_dir)
+
+ # Require __init__.py for all but the "root package"
+ if package:
+ init_py = os.path.join(package_dir, "__init__.py")
+ if os.path.isfile(init_py):
+ return init_py
+ else:
+ log.warn(("package init file '%s' not found " +
+ "(or not a regular file)"), init_py)
+
+ # Either not in a package at all (__init__.py not expected), or
+ # __init__.py doesn't exist -- so don't return the filename.
+ return None
+
+ def check_module(self, module, module_file):
+ if not os.path.isfile(module_file):
+ log.warn("file %s (for module %s) not found", module_file, module)
+ return False
+ else:
+ return True
+
+ def find_package_modules(self, package, package_dir):
+ self.check_package(package, package_dir)
+ module_files = glob.glob(os.path.join(glob.escape(package_dir), "*.py"))
+ modules = []
+ setup_script = os.path.abspath(self.distribution.script_name)
+
+ for f in module_files:
+ abs_f = os.path.abspath(f)
+ if abs_f != setup_script:
+ module = os.path.splitext(os.path.basename(f))[0]
+ modules.append((package, module, f))
+ else:
+ self.debug_print("excluding %s" % setup_script)
+ return modules
+
+ def find_modules(self):
+ """Finds individually-specified Python modules, ie. those listed by
+ module name in 'self.py_modules'. Returns a list of tuples (package,
+ module_base, filename): 'package' is a tuple of the path through
+ package-space to the module; 'module_base' is the bare (no
+ packages, no dots) module name, and 'filename' is the path to the
+ ".py" file (relative to the distribution root) that implements the
+ module.
+ """
+ # Map package names to tuples of useful info about the package:
+ # (package_dir, checked)
+ # package_dir - the directory where we'll find source files for
+ # this package
+ # checked - true if we have checked that the package directory
+ # is valid (exists, contains __init__.py, ... ?)
+ packages = {}
+
+ # List of (package, module, filename) tuples to return
+ modules = []
+
+ # We treat modules-in-packages almost the same as toplevel modules,
+ # just the "package" for a toplevel is empty (either an empty
+ # string or empty list, depending on context). Differences:
+ # - don't check for __init__.py in directory for empty package
+ for module in self.py_modules:
+ path = module.split('.')
+ package = '.'.join(path[0:-1])
+ module_base = path[-1]
+
+ try:
+ (package_dir, checked) = packages[package]
+ except KeyError:
+ package_dir = self.get_package_dir(package)
+ checked = 0
+
+ if not checked:
+ init_py = self.check_package(package, package_dir)
+ packages[package] = (package_dir, 1)
+ if init_py:
+ modules.append((package, "__init__", init_py))
+
+ # XXX perhaps we should also check for just .pyc files
+ # (so greedy closed-source bastards can distribute Python
+ # modules too)
+ module_file = os.path.join(package_dir, module_base + ".py")
+ if not self.check_module(module, module_file):
+ continue
+
+ modules.append((package, module_base, module_file))
+
+ return modules
+
+ def find_all_modules(self):
+ """Compute the list of all modules that will be built, whether
+ they are specified one-module-at-a-time ('self.py_modules') or
+ by whole packages ('self.packages'). Return a list of tuples
+ (package, module, module_file), just like 'find_modules()' and
+ 'find_package_modules()' do."""
+ modules = []
+ if self.py_modules:
+ modules.extend(self.find_modules())
+ if self.packages:
+ for package in self.packages:
+ package_dir = self.get_package_dir(package)
+ m = self.find_package_modules(package, package_dir)
+ modules.extend(m)
+ return modules
+
+ def get_source_files(self):
+ return [module[-1] for module in self.find_all_modules()]
+
+ def get_module_outfile(self, build_dir, package, module):
+ outfile_path = [build_dir] + list(package) + [module + ".py"]
+ return os.path.join(*outfile_path)
+
+ def get_outputs(self, include_bytecode=1):
+ modules = self.find_all_modules()
+ outputs = []
+ for (package, module, module_file) in modules:
+ package = package.split('.')
+ filename = self.get_module_outfile(self.build_lib, package, module)
+ outputs.append(filename)
+ if include_bytecode:
+ if self.compile:
+ outputs.append(importlib.util.cache_from_source(
+ filename, optimization=''))
+ if self.optimize > 0:
+ outputs.append(importlib.util.cache_from_source(
+ filename, optimization=self.optimize))
+
+ outputs += [
+ os.path.join(build_dir, filename)
+ for package, src_dir, build_dir, filenames in self.data_files
+ for filename in filenames
+ ]
+
+ return outputs
+
+ def build_module(self, module, module_file, package):
+ if isinstance(package, str):
+ package = package.split('.')
+ elif not isinstance(package, (list, tuple)):
+ raise TypeError(
+ "'package' must be a string (dot-separated), list, or tuple")
+
+ # Now put the module source file into the "build" area -- this is
+ # easy, we just copy it somewhere under self.build_lib (the build
+ # directory for Python source).
+ outfile = self.get_module_outfile(self.build_lib, package, module)
+ dir = os.path.dirname(outfile)
+ self.mkpath(dir)
+ return self.copy_file(module_file, outfile, preserve_mode=0)
+
+ def build_modules(self):
+ modules = self.find_modules()
+ for (package, module, module_file) in modules:
+ # Now "build" the module -- ie. copy the source file to
+ # self.build_lib (the build directory for Python source).
+ # (Actually, it gets copied to the directory for this package
+ # under self.build_lib.)
+ self.build_module(module, module_file, package)
+
+ def build_packages(self):
+ for package in self.packages:
+ # Get list of (package, module, module_file) tuples based on
+ # scanning the package directory. 'package' is only included
+ # in the tuple so that 'find_modules()' and
+ # 'find_package_tuples()' have a consistent interface; it's
+ # ignored here (apart from a sanity check). Also, 'module' is
+ # the *unqualified* module name (ie. no dots, no package -- we
+ # already know its package!), and 'module_file' is the path to
+ # the .py file, relative to the current directory
+ # (ie. including 'package_dir').
+ package_dir = self.get_package_dir(package)
+ modules = self.find_package_modules(package, package_dir)
+
+ # Now loop over the modules we found, "building" each one (just
+ # copy it to self.build_lib).
+ for (package_, module, module_file) in modules:
+ assert package == package_
+ self.build_module(module, module_file, package)
+
+ def byte_compile(self, files):
+ if sys.dont_write_bytecode:
+ self.warn('byte-compiling is disabled, skipping.')
+ return
+
+ from distutils.util import byte_compile
+ prefix = self.build_lib
+ if prefix[-1] != os.sep:
+ prefix = prefix + os.sep
+
+ # XXX this code is essentially the same as the 'byte_compile()
+ # method of the "install_lib" command, except for the determination
+ # of the 'prefix' string. Hmmm.
+ if self.compile:
+ byte_compile(files, optimize=0,
+ force=self.force, prefix=prefix, dry_run=self.dry_run)
+ if self.optimize > 0:
+ byte_compile(files, optimize=self.optimize,
+ force=self.force, prefix=prefix, dry_run=self.dry_run)
+
+class build_py_2to3(build_py, Mixin2to3):
+ def run(self):
+ self.updated_files = []
+
+ # Base class code
+ if self.py_modules:
+ self.build_modules()
+ if self.packages:
+ self.build_packages()
+ self.build_package_data()
+
+ # 2to3
+ self.run_2to3(self.updated_files)
+
+ # Remaining base class code
+ self.byte_compile(self.get_outputs(include_bytecode=0))
+
+ def build_module(self, module, module_file, package):
+ res = build_py.build_module(self, module, module_file, package)
+ if res[1]:
+ # file was copied
+ self.updated_files.append(res[0])
+ return res
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/build_scripts.py b/third_party/python/setuptools/setuptools/_distutils/command/build_scripts.py
new file mode 100644
index 0000000000..ccc70e6465
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/build_scripts.py
@@ -0,0 +1,160 @@
+"""distutils.command.build_scripts
+
+Implements the Distutils 'build_scripts' command."""
+
+import os, re
+from stat import ST_MODE
+from distutils import sysconfig
+from distutils.core import Command
+from distutils.dep_util import newer
+from distutils.util import convert_path, Mixin2to3
+from distutils import log
+import tokenize
+
+# check if Python is called on the first line with this expression
+first_line_re = re.compile(b'^#!.*python[0-9.]*([ \t].*)?$')
+
+class build_scripts(Command):
+
+ description = "\"build\" scripts (copy and fixup #! line)"
+
+ user_options = [
+ ('build-dir=', 'd', "directory to \"build\" (copy) to"),
+ ('force', 'f', "forcibly build everything (ignore file timestamps"),
+ ('executable=', 'e', "specify final destination interpreter path"),
+ ]
+
+ boolean_options = ['force']
+
+
+ def initialize_options(self):
+ self.build_dir = None
+ self.scripts = None
+ self.force = None
+ self.executable = None
+ self.outfiles = None
+
+ def finalize_options(self):
+ self.set_undefined_options('build',
+ ('build_scripts', 'build_dir'),
+ ('force', 'force'),
+ ('executable', 'executable'))
+ self.scripts = self.distribution.scripts
+
+ def get_source_files(self):
+ return self.scripts
+
+ def run(self):
+ if not self.scripts:
+ return
+ self.copy_scripts()
+
+
+ def copy_scripts(self):
+ r"""Copy each script listed in 'self.scripts'; if it's marked as a
+ Python script in the Unix way (first line matches 'first_line_re',
+ ie. starts with "\#!" and contains "python"), then adjust the first
+ line to refer to the current Python interpreter as we copy.
+ """
+ self.mkpath(self.build_dir)
+ outfiles = []
+ updated_files = []
+ for script in self.scripts:
+ adjust = False
+ script = convert_path(script)
+ outfile = os.path.join(self.build_dir, os.path.basename(script))
+ outfiles.append(outfile)
+
+ if not self.force and not newer(script, outfile):
+ log.debug("not copying %s (up-to-date)", script)
+ continue
+
+ # Always open the file, but ignore failures in dry-run mode --
+ # that way, we'll get accurate feedback if we can read the
+ # script.
+ try:
+ f = open(script, "rb")
+ except OSError:
+ if not self.dry_run:
+ raise
+ f = None
+ else:
+ encoding, lines = tokenize.detect_encoding(f.readline)
+ f.seek(0)
+ first_line = f.readline()
+ if not first_line:
+ self.warn("%s is an empty file (skipping)" % script)
+ continue
+
+ match = first_line_re.match(first_line)
+ if match:
+ adjust = True
+ post_interp = match.group(1) or b''
+
+ if adjust:
+ log.info("copying and adjusting %s -> %s", script,
+ self.build_dir)
+ updated_files.append(outfile)
+ if not self.dry_run:
+ if not sysconfig.python_build:
+ executable = self.executable
+ else:
+ executable = os.path.join(
+ sysconfig.get_config_var("BINDIR"),
+ "python%s%s" % (sysconfig.get_config_var("VERSION"),
+ sysconfig.get_config_var("EXE")))
+ executable = os.fsencode(executable)
+ shebang = b"#!" + executable + post_interp + b"\n"
+ # Python parser starts to read a script using UTF-8 until
+ # it gets a #coding:xxx cookie. The shebang has to be the
+ # first line of a file, the #coding:xxx cookie cannot be
+ # written before. So the shebang has to be decodable from
+ # UTF-8.
+ try:
+ shebang.decode('utf-8')
+ except UnicodeDecodeError:
+ raise ValueError(
+ "The shebang ({!r}) is not decodable "
+ "from utf-8".format(shebang))
+ # If the script is encoded to a custom encoding (use a
+ # #coding:xxx cookie), the shebang has to be decodable from
+ # the script encoding too.
+ try:
+ shebang.decode(encoding)
+ except UnicodeDecodeError:
+ raise ValueError(
+ "The shebang ({!r}) is not decodable "
+ "from the script encoding ({})"
+ .format(shebang, encoding))
+ with open(outfile, "wb") as outf:
+ outf.write(shebang)
+ outf.writelines(f.readlines())
+ if f:
+ f.close()
+ else:
+ if f:
+ f.close()
+ updated_files.append(outfile)
+ self.copy_file(script, outfile)
+
+ if os.name == 'posix':
+ for file in outfiles:
+ if self.dry_run:
+ log.info("changing mode of %s", file)
+ else:
+ oldmode = os.stat(file)[ST_MODE] & 0o7777
+ newmode = (oldmode | 0o555) & 0o7777
+ if newmode != oldmode:
+ log.info("changing mode of %s from %o to %o",
+ file, oldmode, newmode)
+ os.chmod(file, newmode)
+ # XXX should we modify self.outfiles?
+ return outfiles, updated_files
+
+class build_scripts_2to3(build_scripts, Mixin2to3):
+
+ def copy_scripts(self):
+ outfiles, updated_files = build_scripts.copy_scripts(self)
+ if not self.dry_run:
+ self.run_2to3(updated_files)
+ return outfiles, updated_files
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/check.py b/third_party/python/setuptools/setuptools/_distutils/command/check.py
new file mode 100644
index 0000000000..ada2500646
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/check.py
@@ -0,0 +1,148 @@
+"""distutils.command.check
+
+Implements the Distutils 'check' command.
+"""
+from distutils.core import Command
+from distutils.errors import DistutilsSetupError
+
+try:
+ # docutils is installed
+ from docutils.utils import Reporter
+ from docutils.parsers.rst import Parser
+ from docutils import frontend
+ from docutils import nodes
+
+ class SilentReporter(Reporter):
+
+ def __init__(self, source, report_level, halt_level, stream=None,
+ debug=0, encoding='ascii', error_handler='replace'):
+ self.messages = []
+ Reporter.__init__(self, source, report_level, halt_level, stream,
+ debug, encoding, error_handler)
+
+ def system_message(self, level, message, *children, **kwargs):
+ self.messages.append((level, message, children, kwargs))
+ return nodes.system_message(message, level=level,
+ type=self.levels[level],
+ *children, **kwargs)
+
+ HAS_DOCUTILS = True
+except Exception:
+ # Catch all exceptions because exceptions besides ImportError probably
+ # indicate that docutils is not ported to Py3k.
+ HAS_DOCUTILS = False
+
+class check(Command):
+ """This command checks the meta-data of the package.
+ """
+ description = ("perform some checks on the package")
+ user_options = [('metadata', 'm', 'Verify meta-data'),
+ ('restructuredtext', 'r',
+ ('Checks if long string meta-data syntax '
+ 'are reStructuredText-compliant')),
+ ('strict', 's',
+ 'Will exit with an error if a check fails')]
+
+ boolean_options = ['metadata', 'restructuredtext', 'strict']
+
+ def initialize_options(self):
+ """Sets default values for options."""
+ self.restructuredtext = 0
+ self.metadata = 1
+ self.strict = 0
+ self._warnings = 0
+
+ def finalize_options(self):
+ pass
+
+ def warn(self, msg):
+ """Counts the number of warnings that occurs."""
+ self._warnings += 1
+ return Command.warn(self, msg)
+
+ def run(self):
+ """Runs the command."""
+ # perform the various tests
+ if self.metadata:
+ self.check_metadata()
+ if self.restructuredtext:
+ if HAS_DOCUTILS:
+ self.check_restructuredtext()
+ elif self.strict:
+ raise DistutilsSetupError('The docutils package is needed.')
+
+ # let's raise an error in strict mode, if we have at least
+ # one warning
+ if self.strict and self._warnings > 0:
+ raise DistutilsSetupError('Please correct your package.')
+
+ def check_metadata(self):
+ """Ensures that all required elements of meta-data are supplied.
+
+ Required fields:
+ name, version, URL
+
+ Recommended fields:
+ (author and author_email) or (maintainer and maintainer_email))
+
+ Warns if any are missing.
+ """
+ metadata = self.distribution.metadata
+
+ missing = []
+ for attr in ('name', 'version', 'url'):
+ if not (hasattr(metadata, attr) and getattr(metadata, attr)):
+ missing.append(attr)
+
+ if missing:
+ self.warn("missing required meta-data: %s" % ', '.join(missing))
+ if metadata.author:
+ if not metadata.author_email:
+ self.warn("missing meta-data: if 'author' supplied, " +
+ "'author_email' should be supplied too")
+ elif metadata.maintainer:
+ if not metadata.maintainer_email:
+ self.warn("missing meta-data: if 'maintainer' supplied, " +
+ "'maintainer_email' should be supplied too")
+ else:
+ self.warn("missing meta-data: either (author and author_email) " +
+ "or (maintainer and maintainer_email) " +
+ "should be supplied")
+
+ def check_restructuredtext(self):
+ """Checks if the long string fields are reST-compliant."""
+ data = self.distribution.get_long_description()
+ for warning in self._check_rst_data(data):
+ line = warning[-1].get('line')
+ if line is None:
+ warning = warning[1]
+ else:
+ warning = '%s (line %s)' % (warning[1], line)
+ self.warn(warning)
+
+ def _check_rst_data(self, data):
+ """Returns warnings when the provided data doesn't compile."""
+ # the include and csv_table directives need this to be a path
+ source_path = self.distribution.script_name or 'setup.py'
+ parser = Parser()
+ settings = frontend.OptionParser(components=(Parser,)).get_default_values()
+ settings.tab_width = 4
+ settings.pep_references = None
+ settings.rfc_references = None
+ reporter = SilentReporter(source_path,
+ settings.report_level,
+ settings.halt_level,
+ stream=settings.warning_stream,
+ debug=settings.debug,
+ encoding=settings.error_encoding,
+ error_handler=settings.error_encoding_error_handler)
+
+ document = nodes.document(settings, reporter, source=source_path)
+ document.note_source(source_path, -1)
+ try:
+ parser.parse(data, document)
+ except AttributeError as e:
+ reporter.messages.append(
+ (-1, 'Could not finish the parsing: %s.' % e, '', {}))
+
+ return reporter.messages
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/clean.py b/third_party/python/setuptools/setuptools/_distutils/command/clean.py
new file mode 100644
index 0000000000..0cb2701662
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/clean.py
@@ -0,0 +1,76 @@
+"""distutils.command.clean
+
+Implements the Distutils 'clean' command."""
+
+# contributed by Bastian Kleineidam <calvin@cs.uni-sb.de>, added 2000-03-18
+
+import os
+from distutils.core import Command
+from distutils.dir_util import remove_tree
+from distutils import log
+
+class clean(Command):
+
+ description = "clean up temporary files from 'build' command"
+ user_options = [
+ ('build-base=', 'b',
+ "base build directory (default: 'build.build-base')"),
+ ('build-lib=', None,
+ "build directory for all modules (default: 'build.build-lib')"),
+ ('build-temp=', 't',
+ "temporary build directory (default: 'build.build-temp')"),
+ ('build-scripts=', None,
+ "build directory for scripts (default: 'build.build-scripts')"),
+ ('bdist-base=', None,
+ "temporary directory for built distributions"),
+ ('all', 'a',
+ "remove all build output, not just temporary by-products")
+ ]
+
+ boolean_options = ['all']
+
+ def initialize_options(self):
+ self.build_base = None
+ self.build_lib = None
+ self.build_temp = None
+ self.build_scripts = None
+ self.bdist_base = None
+ self.all = None
+
+ def finalize_options(self):
+ self.set_undefined_options('build',
+ ('build_base', 'build_base'),
+ ('build_lib', 'build_lib'),
+ ('build_scripts', 'build_scripts'),
+ ('build_temp', 'build_temp'))
+ self.set_undefined_options('bdist',
+ ('bdist_base', 'bdist_base'))
+
+ def run(self):
+ # remove the build/temp.<plat> directory (unless it's already
+ # gone)
+ if os.path.exists(self.build_temp):
+ remove_tree(self.build_temp, dry_run=self.dry_run)
+ else:
+ log.debug("'%s' does not exist -- can't clean it",
+ self.build_temp)
+
+ if self.all:
+ # remove build directories
+ for directory in (self.build_lib,
+ self.bdist_base,
+ self.build_scripts):
+ if os.path.exists(directory):
+ remove_tree(directory, dry_run=self.dry_run)
+ else:
+ log.warn("'%s' does not exist -- can't clean it",
+ directory)
+
+ # just for the heck of it, try to remove the base build directory:
+ # we might have emptied it right now, but if not we don't care
+ if not self.dry_run:
+ try:
+ os.rmdir(self.build_base)
+ log.info("removing '%s'", self.build_base)
+ except OSError:
+ pass
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/config.py b/third_party/python/setuptools/setuptools/_distutils/command/config.py
new file mode 100644
index 0000000000..aeda408e73
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/config.py
@@ -0,0 +1,344 @@
+"""distutils.command.config
+
+Implements the Distutils 'config' command, a (mostly) empty command class
+that exists mainly to be sub-classed by specific module distributions and
+applications. The idea is that while every "config" command is different,
+at least they're all named the same, and users always see "config" in the
+list of standard commands. Also, this is a good place to put common
+configure-like tasks: "try to compile this C code", or "figure out where
+this header file lives".
+"""
+
+import os, re
+
+from distutils.core import Command
+from distutils.errors import DistutilsExecError
+from distutils.sysconfig import customize_compiler
+from distutils import log
+
+LANG_EXT = {"c": ".c", "c++": ".cxx"}
+
+class config(Command):
+
+ description = "prepare to build"
+
+ user_options = [
+ ('compiler=', None,
+ "specify the compiler type"),
+ ('cc=', None,
+ "specify the compiler executable"),
+ ('include-dirs=', 'I',
+ "list of directories to search for header files"),
+ ('define=', 'D',
+ "C preprocessor macros to define"),
+ ('undef=', 'U',
+ "C preprocessor macros to undefine"),
+ ('libraries=', 'l',
+ "external C libraries to link with"),
+ ('library-dirs=', 'L',
+ "directories to search for external C libraries"),
+
+ ('noisy', None,
+ "show every action (compile, link, run, ...) taken"),
+ ('dump-source', None,
+ "dump generated source files before attempting to compile them"),
+ ]
+
+
+ # The three standard command methods: since the "config" command
+ # does nothing by default, these are empty.
+
+ def initialize_options(self):
+ self.compiler = None
+ self.cc = None
+ self.include_dirs = None
+ self.libraries = None
+ self.library_dirs = None
+
+ # maximal output for now
+ self.noisy = 1
+ self.dump_source = 1
+
+ # list of temporary files generated along-the-way that we have
+ # to clean at some point
+ self.temp_files = []
+
+ def finalize_options(self):
+ if self.include_dirs is None:
+ self.include_dirs = self.distribution.include_dirs or []
+ elif isinstance(self.include_dirs, str):
+ self.include_dirs = self.include_dirs.split(os.pathsep)
+
+ if self.libraries is None:
+ self.libraries = []
+ elif isinstance(self.libraries, str):
+ self.libraries = [self.libraries]
+
+ if self.library_dirs is None:
+ self.library_dirs = []
+ elif isinstance(self.library_dirs, str):
+ self.library_dirs = self.library_dirs.split(os.pathsep)
+
+ def run(self):
+ pass
+
+ # Utility methods for actual "config" commands. The interfaces are
+ # loosely based on Autoconf macros of similar names. Sub-classes
+ # may use these freely.
+
+ def _check_compiler(self):
+ """Check that 'self.compiler' really is a CCompiler object;
+ if not, make it one.
+ """
+ # We do this late, and only on-demand, because this is an expensive
+ # import.
+ from distutils.ccompiler import CCompiler, new_compiler
+ if not isinstance(self.compiler, CCompiler):
+ self.compiler = new_compiler(compiler=self.compiler,
+ dry_run=self.dry_run, force=1)
+ customize_compiler(self.compiler)
+ if self.include_dirs:
+ self.compiler.set_include_dirs(self.include_dirs)
+ if self.libraries:
+ self.compiler.set_libraries(self.libraries)
+ if self.library_dirs:
+ self.compiler.set_library_dirs(self.library_dirs)
+
+ def _gen_temp_sourcefile(self, body, headers, lang):
+ filename = "_configtest" + LANG_EXT[lang]
+ with open(filename, "w") as file:
+ if headers:
+ for header in headers:
+ file.write("#include <%s>\n" % header)
+ file.write("\n")
+ file.write(body)
+ if body[-1] != "\n":
+ file.write("\n")
+ return filename
+
+ def _preprocess(self, body, headers, include_dirs, lang):
+ src = self._gen_temp_sourcefile(body, headers, lang)
+ out = "_configtest.i"
+ self.temp_files.extend([src, out])
+ self.compiler.preprocess(src, out, include_dirs=include_dirs)
+ return (src, out)
+
+ def _compile(self, body, headers, include_dirs, lang):
+ src = self._gen_temp_sourcefile(body, headers, lang)
+ if self.dump_source:
+ dump_file(src, "compiling '%s':" % src)
+ (obj,) = self.compiler.object_filenames([src])
+ self.temp_files.extend([src, obj])
+ self.compiler.compile([src], include_dirs=include_dirs)
+ return (src, obj)
+
+ def _link(self, body, headers, include_dirs, libraries, library_dirs,
+ lang):
+ (src, obj) = self._compile(body, headers, include_dirs, lang)
+ prog = os.path.splitext(os.path.basename(src))[0]
+ self.compiler.link_executable([obj], prog,
+ libraries=libraries,
+ library_dirs=library_dirs,
+ target_lang=lang)
+
+ if self.compiler.exe_extension is not None:
+ prog = prog + self.compiler.exe_extension
+ self.temp_files.append(prog)
+
+ return (src, obj, prog)
+
+ def _clean(self, *filenames):
+ if not filenames:
+ filenames = self.temp_files
+ self.temp_files = []
+ log.info("removing: %s", ' '.join(filenames))
+ for filename in filenames:
+ try:
+ os.remove(filename)
+ except OSError:
+ pass
+
+
+ # XXX these ignore the dry-run flag: what to do, what to do? even if
+ # you want a dry-run build, you still need some sort of configuration
+ # info. My inclination is to make it up to the real config command to
+ # consult 'dry_run', and assume a default (minimal) configuration if
+ # true. The problem with trying to do it here is that you'd have to
+ # return either true or false from all the 'try' methods, neither of
+ # which is correct.
+
+ # XXX need access to the header search path and maybe default macros.
+
+ def try_cpp(self, body=None, headers=None, include_dirs=None, lang="c"):
+ """Construct a source file from 'body' (a string containing lines
+ of C/C++ code) and 'headers' (a list of header files to include)
+ and run it through the preprocessor. Return true if the
+ preprocessor succeeded, false if there were any errors.
+ ('body' probably isn't of much use, but what the heck.)
+ """
+ from distutils.ccompiler import CompileError
+ self._check_compiler()
+ ok = True
+ try:
+ self._preprocess(body, headers, include_dirs, lang)
+ except CompileError:
+ ok = False
+
+ self._clean()
+ return ok
+
+ def search_cpp(self, pattern, body=None, headers=None, include_dirs=None,
+ lang="c"):
+ """Construct a source file (just like 'try_cpp()'), run it through
+ the preprocessor, and return true if any line of the output matches
+ 'pattern'. 'pattern' should either be a compiled regex object or a
+ string containing a regex. If both 'body' and 'headers' are None,
+ preprocesses an empty file -- which can be useful to determine the
+ symbols the preprocessor and compiler set by default.
+ """
+ self._check_compiler()
+ src, out = self._preprocess(body, headers, include_dirs, lang)
+
+ if isinstance(pattern, str):
+ pattern = re.compile(pattern)
+
+ with open(out) as file:
+ match = False
+ while True:
+ line = file.readline()
+ if line == '':
+ break
+ if pattern.search(line):
+ match = True
+ break
+
+ self._clean()
+ return match
+
+ def try_compile(self, body, headers=None, include_dirs=None, lang="c"):
+ """Try to compile a source file built from 'body' and 'headers'.
+ Return true on success, false otherwise.
+ """
+ from distutils.ccompiler import CompileError
+ self._check_compiler()
+ try:
+ self._compile(body, headers, include_dirs, lang)
+ ok = True
+ except CompileError:
+ ok = False
+
+ log.info(ok and "success!" or "failure.")
+ self._clean()
+ return ok
+
+ def try_link(self, body, headers=None, include_dirs=None, libraries=None,
+ library_dirs=None, lang="c"):
+ """Try to compile and link a source file, built from 'body' and
+ 'headers', to executable form. Return true on success, false
+ otherwise.
+ """
+ from distutils.ccompiler import CompileError, LinkError
+ self._check_compiler()
+ try:
+ self._link(body, headers, include_dirs,
+ libraries, library_dirs, lang)
+ ok = True
+ except (CompileError, LinkError):
+ ok = False
+
+ log.info(ok and "success!" or "failure.")
+ self._clean()
+ return ok
+
+ def try_run(self, body, headers=None, include_dirs=None, libraries=None,
+ library_dirs=None, lang="c"):
+ """Try to compile, link to an executable, and run a program
+ built from 'body' and 'headers'. Return true on success, false
+ otherwise.
+ """
+ from distutils.ccompiler import CompileError, LinkError
+ self._check_compiler()
+ try:
+ src, obj, exe = self._link(body, headers, include_dirs,
+ libraries, library_dirs, lang)
+ self.spawn([exe])
+ ok = True
+ except (CompileError, LinkError, DistutilsExecError):
+ ok = False
+
+ log.info(ok and "success!" or "failure.")
+ self._clean()
+ return ok
+
+
+ # -- High-level methods --------------------------------------------
+ # (these are the ones that are actually likely to be useful
+ # when implementing a real-world config command!)
+
+ def check_func(self, func, headers=None, include_dirs=None,
+ libraries=None, library_dirs=None, decl=0, call=0):
+ """Determine if function 'func' is available by constructing a
+ source file that refers to 'func', and compiles and links it.
+ If everything succeeds, returns true; otherwise returns false.
+
+ The constructed source file starts out by including the header
+ files listed in 'headers'. If 'decl' is true, it then declares
+ 'func' (as "int func()"); you probably shouldn't supply 'headers'
+ and set 'decl' true in the same call, or you might get errors about
+ a conflicting declarations for 'func'. Finally, the constructed
+ 'main()' function either references 'func' or (if 'call' is true)
+ calls it. 'libraries' and 'library_dirs' are used when
+ linking.
+ """
+ self._check_compiler()
+ body = []
+ if decl:
+ body.append("int %s ();" % func)
+ body.append("int main () {")
+ if call:
+ body.append(" %s();" % func)
+ else:
+ body.append(" %s;" % func)
+ body.append("}")
+ body = "\n".join(body) + "\n"
+
+ return self.try_link(body, headers, include_dirs,
+ libraries, library_dirs)
+
+ def check_lib(self, library, library_dirs=None, headers=None,
+ include_dirs=None, other_libraries=[]):
+ """Determine if 'library' is available to be linked against,
+ without actually checking that any particular symbols are provided
+ by it. 'headers' will be used in constructing the source file to
+ be compiled, but the only effect of this is to check if all the
+ header files listed are available. Any libraries listed in
+ 'other_libraries' will be included in the link, in case 'library'
+ has symbols that depend on other libraries.
+ """
+ self._check_compiler()
+ return self.try_link("int main (void) { }", headers, include_dirs,
+ [library] + other_libraries, library_dirs)
+
+ def check_header(self, header, include_dirs=None, library_dirs=None,
+ lang="c"):
+ """Determine if the system header file named by 'header_file'
+ exists and can be found by the preprocessor; return true if so,
+ false otherwise.
+ """
+ return self.try_cpp(body="/* No body */", headers=[header],
+ include_dirs=include_dirs)
+
+def dump_file(filename, head=None):
+ """Dumps a file content into log.info.
+
+ If head is not None, will be dumped before the file content.
+ """
+ if head is None:
+ log.info('%s', filename)
+ else:
+ log.info(head)
+ file = open(filename)
+ try:
+ log.info(file.read())
+ finally:
+ file.close()
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/install.py b/third_party/python/setuptools/setuptools/_distutils/command/install.py
new file mode 100644
index 0000000000..13feeb890f
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/install.py
@@ -0,0 +1,677 @@
+"""distutils.command.install
+
+Implements the Distutils 'install' command."""
+
+import sys
+import os
+
+from distutils import log
+from distutils.core import Command
+from distutils.debug import DEBUG
+from distutils.sysconfig import get_config_vars
+from distutils.errors import DistutilsPlatformError
+from distutils.file_util import write_file
+from distutils.util import convert_path, subst_vars, change_root
+from distutils.util import get_platform
+from distutils.errors import DistutilsOptionError
+
+from site import USER_BASE
+from site import USER_SITE
+HAS_USER_SITE = True
+
+WINDOWS_SCHEME = {
+ 'purelib': '$base/Lib/site-packages',
+ 'platlib': '$base/Lib/site-packages',
+ 'headers': '$base/Include/$dist_name',
+ 'scripts': '$base/Scripts',
+ 'data' : '$base',
+}
+
+INSTALL_SCHEMES = {
+ 'unix_prefix': {
+ 'purelib': '$base/lib/python$py_version_short/site-packages',
+ 'platlib': '$platbase/$platlibdir/python$py_version_short/site-packages',
+ 'headers': '$base/include/python$py_version_short$abiflags/$dist_name',
+ 'scripts': '$base/bin',
+ 'data' : '$base',
+ },
+ 'unix_home': {
+ 'purelib': '$base/lib/python',
+ 'platlib': '$base/$platlibdir/python',
+ 'headers': '$base/include/python/$dist_name',
+ 'scripts': '$base/bin',
+ 'data' : '$base',
+ },
+ 'nt': WINDOWS_SCHEME,
+ 'pypy': {
+ 'purelib': '$base/site-packages',
+ 'platlib': '$base/site-packages',
+ 'headers': '$base/include/$dist_name',
+ 'scripts': '$base/bin',
+ 'data' : '$base',
+ },
+ 'pypy_nt': {
+ 'purelib': '$base/site-packages',
+ 'platlib': '$base/site-packages',
+ 'headers': '$base/include/$dist_name',
+ 'scripts': '$base/Scripts',
+ 'data' : '$base',
+ },
+ }
+
+# user site schemes
+if HAS_USER_SITE:
+ INSTALL_SCHEMES['nt_user'] = {
+ 'purelib': '$usersite',
+ 'platlib': '$usersite',
+ 'headers': '$userbase/Python$py_version_nodot/Include/$dist_name',
+ 'scripts': '$userbase/Python$py_version_nodot/Scripts',
+ 'data' : '$userbase',
+ }
+
+ INSTALL_SCHEMES['unix_user'] = {
+ 'purelib': '$usersite',
+ 'platlib': '$usersite',
+ 'headers':
+ '$userbase/include/python$py_version_short$abiflags/$dist_name',
+ 'scripts': '$userbase/bin',
+ 'data' : '$userbase',
+ }
+
+# The keys to an installation scheme; if any new types of files are to be
+# installed, be sure to add an entry to every installation scheme above,
+# and to SCHEME_KEYS here.
+SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
+
+
+class install(Command):
+
+ description = "install everything from build directory"
+
+ user_options = [
+ # Select installation scheme and set base director(y|ies)
+ ('prefix=', None,
+ "installation prefix"),
+ ('exec-prefix=', None,
+ "(Unix only) prefix for platform-specific files"),
+ ('home=', None,
+ "(Unix only) home directory to install under"),
+
+ # Or, just set the base director(y|ies)
+ ('install-base=', None,
+ "base installation directory (instead of --prefix or --home)"),
+ ('install-platbase=', None,
+ "base installation directory for platform-specific files " +
+ "(instead of --exec-prefix or --home)"),
+ ('root=', None,
+ "install everything relative to this alternate root directory"),
+
+ # Or, explicitly set the installation scheme
+ ('install-purelib=', None,
+ "installation directory for pure Python module distributions"),
+ ('install-platlib=', None,
+ "installation directory for non-pure module distributions"),
+ ('install-lib=', None,
+ "installation directory for all module distributions " +
+ "(overrides --install-purelib and --install-platlib)"),
+
+ ('install-headers=', None,
+ "installation directory for C/C++ headers"),
+ ('install-scripts=', None,
+ "installation directory for Python scripts"),
+ ('install-data=', None,
+ "installation directory for data files"),
+
+ # Byte-compilation options -- see install_lib.py for details, as
+ # these are duplicated from there (but only install_lib does
+ # anything with them).
+ ('compile', 'c', "compile .py to .pyc [default]"),
+ ('no-compile', None, "don't compile .py files"),
+ ('optimize=', 'O',
+ "also compile with optimization: -O1 for \"python -O\", "
+ "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
+
+ # Miscellaneous control options
+ ('force', 'f',
+ "force installation (overwrite any existing files)"),
+ ('skip-build', None,
+ "skip rebuilding everything (for testing/debugging)"),
+
+ # Where to install documentation (eventually!)
+ #('doc-format=', None, "format of documentation to generate"),
+ #('install-man=', None, "directory for Unix man pages"),
+ #('install-html=', None, "directory for HTML documentation"),
+ #('install-info=', None, "directory for GNU info files"),
+
+ ('record=', None,
+ "filename in which to record list of installed files"),
+ ]
+
+ boolean_options = ['compile', 'force', 'skip-build']
+
+ if HAS_USER_SITE:
+ user_options.append(('user', None,
+ "install in user site-package '%s'" % USER_SITE))
+ boolean_options.append('user')
+
+ negative_opt = {'no-compile' : 'compile'}
+
+
+ def initialize_options(self):
+ """Initializes options."""
+ # High-level options: these select both an installation base
+ # and scheme.
+ self.prefix = None
+ self.exec_prefix = None
+ self.home = None
+ self.user = 0
+
+ # These select only the installation base; it's up to the user to
+ # specify the installation scheme (currently, that means supplying
+ # the --install-{platlib,purelib,scripts,data} options).
+ self.install_base = None
+ self.install_platbase = None
+ self.root = None
+
+ # These options are the actual installation directories; if not
+ # supplied by the user, they are filled in using the installation
+ # scheme implied by prefix/exec-prefix/home and the contents of
+ # that installation scheme.
+ self.install_purelib = None # for pure module distributions
+ self.install_platlib = None # non-pure (dists w/ extensions)
+ self.install_headers = None # for C/C++ headers
+ self.install_lib = None # set to either purelib or platlib
+ self.install_scripts = None
+ self.install_data = None
+ self.install_userbase = USER_BASE
+ self.install_usersite = USER_SITE
+
+ self.compile = None
+ self.optimize = None
+
+ # Deprecated
+ # These two are for putting non-packagized distributions into their
+ # own directory and creating a .pth file if it makes sense.
+ # 'extra_path' comes from the setup file; 'install_path_file' can
+ # be turned off if it makes no sense to install a .pth file. (But
+ # better to install it uselessly than to guess wrong and not
+ # install it when it's necessary and would be used!) Currently,
+ # 'install_path_file' is always true unless some outsider meddles
+ # with it.
+ self.extra_path = None
+ self.install_path_file = 1
+
+ # 'force' forces installation, even if target files are not
+ # out-of-date. 'skip_build' skips running the "build" command,
+ # handy if you know it's not necessary. 'warn_dir' (which is *not*
+ # a user option, it's just there so the bdist_* commands can turn
+ # it off) determines whether we warn about installing to a
+ # directory not in sys.path.
+ self.force = 0
+ self.skip_build = 0
+ self.warn_dir = 1
+
+ # These are only here as a conduit from the 'build' command to the
+ # 'install_*' commands that do the real work. ('build_base' isn't
+ # actually used anywhere, but it might be useful in future.) They
+ # are not user options, because if the user told the install
+ # command where the build directory is, that wouldn't affect the
+ # build command.
+ self.build_base = None
+ self.build_lib = None
+
+ # Not defined yet because we don't know anything about
+ # documentation yet.
+ #self.install_man = None
+ #self.install_html = None
+ #self.install_info = None
+
+ self.record = None
+
+
+ # -- Option finalizing methods -------------------------------------
+ # (This is rather more involved than for most commands,
+ # because this is where the policy for installing third-
+ # party Python modules on various platforms given a wide
+ # array of user input is decided. Yes, it's quite complex!)
+
+ def finalize_options(self):
+ """Finalizes options."""
+ # This method (and its helpers, like 'finalize_unix()',
+ # 'finalize_other()', and 'select_scheme()') is where the default
+ # installation directories for modules, extension modules, and
+ # anything else we care to install from a Python module
+ # distribution. Thus, this code makes a pretty important policy
+ # statement about how third-party stuff is added to a Python
+ # installation! Note that the actual work of installation is done
+ # by the relatively simple 'install_*' commands; they just take
+ # their orders from the installation directory options determined
+ # here.
+
+ # Check for errors/inconsistencies in the options; first, stuff
+ # that's wrong on any platform.
+
+ if ((self.prefix or self.exec_prefix or self.home) and
+ (self.install_base or self.install_platbase)):
+ raise DistutilsOptionError(
+ "must supply either prefix/exec-prefix/home or " +
+ "install-base/install-platbase -- not both")
+
+ if self.home and (self.prefix or self.exec_prefix):
+ raise DistutilsOptionError(
+ "must supply either home or prefix/exec-prefix -- not both")
+
+ if self.user and (self.prefix or self.exec_prefix or self.home or
+ self.install_base or self.install_platbase):
+ raise DistutilsOptionError("can't combine user with prefix, "
+ "exec_prefix/home, or install_(plat)base")
+
+ # Next, stuff that's wrong (or dubious) only on certain platforms.
+ if os.name != "posix":
+ if self.exec_prefix:
+ self.warn("exec-prefix option ignored on this platform")
+ self.exec_prefix = None
+
+ # Now the interesting logic -- so interesting that we farm it out
+ # to other methods. The goal of these methods is to set the final
+ # values for the install_{lib,scripts,data,...} options, using as
+ # input a heady brew of prefix, exec_prefix, home, install_base,
+ # install_platbase, user-supplied versions of
+ # install_{purelib,platlib,lib,scripts,data,...}, and the
+ # INSTALL_SCHEME dictionary above. Phew!
+
+ self.dump_dirs("pre-finalize_{unix,other}")
+
+ if os.name == 'posix':
+ self.finalize_unix()
+ else:
+ self.finalize_other()
+
+ self.dump_dirs("post-finalize_{unix,other}()")
+
+ # Expand configuration variables, tilde, etc. in self.install_base
+ # and self.install_platbase -- that way, we can use $base or
+ # $platbase in the other installation directories and not worry
+ # about needing recursive variable expansion (shudder).
+
+ py_version = sys.version.split()[0]
+ (prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix')
+ try:
+ abiflags = sys.abiflags
+ except AttributeError:
+ # sys.abiflags may not be defined on all platforms.
+ abiflags = ''
+ self.config_vars = {'dist_name': self.distribution.get_name(),
+ 'dist_version': self.distribution.get_version(),
+ 'dist_fullname': self.distribution.get_fullname(),
+ 'py_version': py_version,
+ 'py_version_short': '%d.%d' % sys.version_info[:2],
+ 'py_version_nodot': '%d%d' % sys.version_info[:2],
+ 'sys_prefix': prefix,
+ 'prefix': prefix,
+ 'sys_exec_prefix': exec_prefix,
+ 'exec_prefix': exec_prefix,
+ 'abiflags': abiflags,
+ 'platlibdir': getattr(sys, 'platlibdir', 'lib'),
+ }
+
+ if HAS_USER_SITE:
+ self.config_vars['userbase'] = self.install_userbase
+ self.config_vars['usersite'] = self.install_usersite
+
+ self.expand_basedirs()
+
+ self.dump_dirs("post-expand_basedirs()")
+
+ # Now define config vars for the base directories so we can expand
+ # everything else.
+ self.config_vars['base'] = self.install_base
+ self.config_vars['platbase'] = self.install_platbase
+
+ if DEBUG:
+ from pprint import pprint
+ print("config vars:")
+ pprint(self.config_vars)
+
+ # Expand "~" and configuration variables in the installation
+ # directories.
+ self.expand_dirs()
+
+ self.dump_dirs("post-expand_dirs()")
+
+ # Create directories in the home dir:
+ if self.user:
+ self.create_home_path()
+
+ # Pick the actual directory to install all modules to: either
+ # install_purelib or install_platlib, depending on whether this
+ # module distribution is pure or not. Of course, if the user
+ # already specified install_lib, use their selection.
+ if self.install_lib is None:
+ if self.distribution.ext_modules: # has extensions: non-pure
+ self.install_lib = self.install_platlib
+ else:
+ self.install_lib = self.install_purelib
+
+
+ # Convert directories from Unix /-separated syntax to the local
+ # convention.
+ self.convert_paths('lib', 'purelib', 'platlib',
+ 'scripts', 'data', 'headers',
+ 'userbase', 'usersite')
+
+ # Deprecated
+ # Well, we're not actually fully completely finalized yet: we still
+ # have to deal with 'extra_path', which is the hack for allowing
+ # non-packagized module distributions (hello, Numerical Python!) to
+ # get their own directories.
+ self.handle_extra_path()
+ self.install_libbase = self.install_lib # needed for .pth file
+ self.install_lib = os.path.join(self.install_lib, self.extra_dirs)
+
+ # If a new root directory was supplied, make all the installation
+ # dirs relative to it.
+ if self.root is not None:
+ self.change_roots('libbase', 'lib', 'purelib', 'platlib',
+ 'scripts', 'data', 'headers')
+
+ self.dump_dirs("after prepending root")
+
+ # Find out the build directories, ie. where to install from.
+ self.set_undefined_options('build',
+ ('build_base', 'build_base'),
+ ('build_lib', 'build_lib'))
+
+ # Punt on doc directories for now -- after all, we're punting on
+ # documentation completely!
+
+ def dump_dirs(self, msg):
+ """Dumps the list of user options."""
+ if not DEBUG:
+ return
+ from distutils.fancy_getopt import longopt_xlate
+ log.debug(msg + ":")
+ for opt in self.user_options:
+ opt_name = opt[0]
+ if opt_name[-1] == "=":
+ opt_name = opt_name[0:-1]
+ if opt_name in self.negative_opt:
+ opt_name = self.negative_opt[opt_name]
+ opt_name = opt_name.translate(longopt_xlate)
+ val = not getattr(self, opt_name)
+ else:
+ opt_name = opt_name.translate(longopt_xlate)
+ val = getattr(self, opt_name)
+ log.debug(" %s: %s", opt_name, val)
+
+ def finalize_unix(self):
+ """Finalizes options for posix platforms."""
+ if self.install_base is not None or self.install_platbase is not None:
+ if ((self.install_lib is None and
+ self.install_purelib is None and
+ self.install_platlib is None) or
+ self.install_headers is None or
+ self.install_scripts is None or
+ self.install_data is None):
+ raise DistutilsOptionError(
+ "install-base or install-platbase supplied, but "
+ "installation scheme is incomplete")
+ return
+
+ if self.user:
+ if self.install_userbase is None:
+ raise DistutilsPlatformError(
+ "User base directory is not specified")
+ self.install_base = self.install_platbase = self.install_userbase
+ self.select_scheme("unix_user")
+ elif self.home is not None:
+ self.install_base = self.install_platbase = self.home
+ self.select_scheme("unix_home")
+ else:
+ if self.prefix is None:
+ if self.exec_prefix is not None:
+ raise DistutilsOptionError(
+ "must not supply exec-prefix without prefix")
+
+ self.prefix = os.path.normpath(sys.prefix)
+ self.exec_prefix = os.path.normpath(sys.exec_prefix)
+
+ else:
+ if self.exec_prefix is None:
+ self.exec_prefix = self.prefix
+
+ self.install_base = self.prefix
+ self.install_platbase = self.exec_prefix
+ self.select_scheme("unix_prefix")
+
+ def finalize_other(self):
+ """Finalizes options for non-posix platforms"""
+ if self.user:
+ if self.install_userbase is None:
+ raise DistutilsPlatformError(
+ "User base directory is not specified")
+ self.install_base = self.install_platbase = self.install_userbase
+ self.select_scheme(os.name + "_user")
+ elif self.home is not None:
+ self.install_base = self.install_platbase = self.home
+ self.select_scheme("unix_home")
+ else:
+ if self.prefix is None:
+ self.prefix = os.path.normpath(sys.prefix)
+
+ self.install_base = self.install_platbase = self.prefix
+ try:
+ self.select_scheme(os.name)
+ except KeyError:
+ raise DistutilsPlatformError(
+ "I don't know how to install stuff on '%s'" % os.name)
+
+ def select_scheme(self, name):
+ """Sets the install directories by applying the install schemes."""
+ # it's the caller's problem if they supply a bad name!
+ if (hasattr(sys, 'pypy_version_info') and
+ not name.endswith(('_user', '_home'))):
+ if os.name == 'nt':
+ name = 'pypy_nt'
+ else:
+ name = 'pypy'
+ scheme = INSTALL_SCHEMES[name]
+ for key in SCHEME_KEYS:
+ attrname = 'install_' + key
+ if getattr(self, attrname) is None:
+ setattr(self, attrname, scheme[key])
+
+ def _expand_attrs(self, attrs):
+ for attr in attrs:
+ val = getattr(self, attr)
+ if val is not None:
+ if os.name == 'posix' or os.name == 'nt':
+ val = os.path.expanduser(val)
+ val = subst_vars(val, self.config_vars)
+ setattr(self, attr, val)
+
+ def expand_basedirs(self):
+ """Calls `os.path.expanduser` on install_base, install_platbase and
+ root."""
+ self._expand_attrs(['install_base', 'install_platbase', 'root'])
+
+ def expand_dirs(self):
+ """Calls `os.path.expanduser` on install dirs."""
+ self._expand_attrs(['install_purelib', 'install_platlib',
+ 'install_lib', 'install_headers',
+ 'install_scripts', 'install_data',])
+
+ def convert_paths(self, *names):
+ """Call `convert_path` over `names`."""
+ for name in names:
+ attr = "install_" + name
+ setattr(self, attr, convert_path(getattr(self, attr)))
+
+ def handle_extra_path(self):
+ """Set `path_file` and `extra_dirs` using `extra_path`."""
+ if self.extra_path is None:
+ self.extra_path = self.distribution.extra_path
+
+ if self.extra_path is not None:
+ log.warn(
+ "Distribution option extra_path is deprecated. "
+ "See issue27919 for details."
+ )
+ if isinstance(self.extra_path, str):
+ self.extra_path = self.extra_path.split(',')
+
+ if len(self.extra_path) == 1:
+ path_file = extra_dirs = self.extra_path[0]
+ elif len(self.extra_path) == 2:
+ path_file, extra_dirs = self.extra_path
+ else:
+ raise DistutilsOptionError(
+ "'extra_path' option must be a list, tuple, or "
+ "comma-separated string with 1 or 2 elements")
+
+ # convert to local form in case Unix notation used (as it
+ # should be in setup scripts)
+ extra_dirs = convert_path(extra_dirs)
+ else:
+ path_file = None
+ extra_dirs = ''
+
+ # XXX should we warn if path_file and not extra_dirs? (in which
+ # case the path file would be harmless but pointless)
+ self.path_file = path_file
+ self.extra_dirs = extra_dirs
+
+ def change_roots(self, *names):
+ """Change the install directories pointed by name using root."""
+ for name in names:
+ attr = "install_" + name
+ setattr(self, attr, change_root(self.root, getattr(self, attr)))
+
+ def create_home_path(self):
+ """Create directories under ~."""
+ if not self.user:
+ return
+ home = convert_path(os.path.expanduser("~"))
+ for name, path in self.config_vars.items():
+ if path.startswith(home) and not os.path.isdir(path):
+ self.debug_print("os.makedirs('%s', 0o700)" % path)
+ os.makedirs(path, 0o700)
+
+ # -- Command execution methods -------------------------------------
+
+ def run(self):
+ """Runs the command."""
+ # Obviously have to build before we can install
+ if not self.skip_build:
+ self.run_command('build')
+ # If we built for any other platform, we can't install.
+ build_plat = self.distribution.get_command_obj('build').plat_name
+ # check warn_dir - it is a clue that the 'install' is happening
+ # internally, and not to sys.path, so we don't check the platform
+ # matches what we are running.
+ if self.warn_dir and build_plat != get_platform():
+ raise DistutilsPlatformError("Can't install when "
+ "cross-compiling")
+
+ # Run all sub-commands (at least those that need to be run)
+ for cmd_name in self.get_sub_commands():
+ self.run_command(cmd_name)
+
+ if self.path_file:
+ self.create_path_file()
+
+ # write list of installed files, if requested.
+ if self.record:
+ outputs = self.get_outputs()
+ if self.root: # strip any package prefix
+ root_len = len(self.root)
+ for counter in range(len(outputs)):
+ outputs[counter] = outputs[counter][root_len:]
+ self.execute(write_file,
+ (self.record, outputs),
+ "writing list of installed files to '%s'" %
+ self.record)
+
+ sys_path = map(os.path.normpath, sys.path)
+ sys_path = map(os.path.normcase, sys_path)
+ install_lib = os.path.normcase(os.path.normpath(self.install_lib))
+ if (self.warn_dir and
+ not (self.path_file and self.install_path_file) and
+ install_lib not in sys_path):
+ log.debug(("modules installed to '%s', which is not in "
+ "Python's module search path (sys.path) -- "
+ "you'll have to change the search path yourself"),
+ self.install_lib)
+
+ def create_path_file(self):
+ """Creates the .pth file"""
+ filename = os.path.join(self.install_libbase,
+ self.path_file + ".pth")
+ if self.install_path_file:
+ self.execute(write_file,
+ (filename, [self.extra_dirs]),
+ "creating %s" % filename)
+ else:
+ self.warn("path file '%s' not created" % filename)
+
+
+ # -- Reporting methods ---------------------------------------------
+
+ def get_outputs(self):
+ """Assembles the outputs of all the sub-commands."""
+ outputs = []
+ for cmd_name in self.get_sub_commands():
+ cmd = self.get_finalized_command(cmd_name)
+ # Add the contents of cmd.get_outputs(), ensuring
+ # that outputs doesn't contain duplicate entries
+ for filename in cmd.get_outputs():
+ if filename not in outputs:
+ outputs.append(filename)
+
+ if self.path_file and self.install_path_file:
+ outputs.append(os.path.join(self.install_libbase,
+ self.path_file + ".pth"))
+
+ return outputs
+
+ def get_inputs(self):
+ """Returns the inputs of all the sub-commands"""
+ # XXX gee, this looks familiar ;-(
+ inputs = []
+ for cmd_name in self.get_sub_commands():
+ cmd = self.get_finalized_command(cmd_name)
+ inputs.extend(cmd.get_inputs())
+
+ return inputs
+
+ # -- Predicates for sub-command list -------------------------------
+
+ def has_lib(self):
+ """Returns true if the current distribution has any Python
+ modules to install."""
+ return (self.distribution.has_pure_modules() or
+ self.distribution.has_ext_modules())
+
+ def has_headers(self):
+ """Returns true if the current distribution has any headers to
+ install."""
+ return self.distribution.has_headers()
+
+ def has_scripts(self):
+ """Returns true if the current distribution has any scripts to.
+ install."""
+ return self.distribution.has_scripts()
+
+ def has_data(self):
+ """Returns true if the current distribution has any data to.
+ install."""
+ return self.distribution.has_data_files()
+
+ # 'sub_commands': a list of commands this command might have to run to
+ # get its work done. See cmd.py for more info.
+ sub_commands = [('install_lib', has_lib),
+ ('install_headers', has_headers),
+ ('install_scripts', has_scripts),
+ ('install_data', has_data),
+ ('install_egg_info', lambda self:True),
+ ]
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/install_data.py b/third_party/python/setuptools/setuptools/_distutils/command/install_data.py
new file mode 100644
index 0000000000..947cd76a99
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/install_data.py
@@ -0,0 +1,79 @@
+"""distutils.command.install_data
+
+Implements the Distutils 'install_data' command, for installing
+platform-independent data files."""
+
+# contributed by Bastian Kleineidam
+
+import os
+from distutils.core import Command
+from distutils.util import change_root, convert_path
+
+class install_data(Command):
+
+ description = "install data files"
+
+ user_options = [
+ ('install-dir=', 'd',
+ "base directory for installing data files "
+ "(default: installation base dir)"),
+ ('root=', None,
+ "install everything relative to this alternate root directory"),
+ ('force', 'f', "force installation (overwrite existing files)"),
+ ]
+
+ boolean_options = ['force']
+
+ def initialize_options(self):
+ self.install_dir = None
+ self.outfiles = []
+ self.root = None
+ self.force = 0
+ self.data_files = self.distribution.data_files
+ self.warn_dir = 1
+
+ def finalize_options(self):
+ self.set_undefined_options('install',
+ ('install_data', 'install_dir'),
+ ('root', 'root'),
+ ('force', 'force'),
+ )
+
+ def run(self):
+ self.mkpath(self.install_dir)
+ for f in self.data_files:
+ if isinstance(f, str):
+ # it's a simple file, so copy it
+ f = convert_path(f)
+ if self.warn_dir:
+ self.warn("setup script did not provide a directory for "
+ "'%s' -- installing right in '%s'" %
+ (f, self.install_dir))
+ (out, _) = self.copy_file(f, self.install_dir)
+ self.outfiles.append(out)
+ else:
+ # it's a tuple with path to install to and a list of files
+ dir = convert_path(f[0])
+ if not os.path.isabs(dir):
+ dir = os.path.join(self.install_dir, dir)
+ elif self.root:
+ dir = change_root(self.root, dir)
+ self.mkpath(dir)
+
+ if f[1] == []:
+ # If there are no files listed, the user must be
+ # trying to create an empty directory, so add the
+ # directory to the list of output files.
+ self.outfiles.append(dir)
+ else:
+ # Copy files, adding them to the list of output files.
+ for data in f[1]:
+ data = convert_path(data)
+ (out, _) = self.copy_file(data, dir)
+ self.outfiles.append(out)
+
+ def get_inputs(self):
+ return self.data_files or []
+
+ def get_outputs(self):
+ return self.outfiles
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/install_egg_info.py b/third_party/python/setuptools/setuptools/_distutils/command/install_egg_info.py
new file mode 100644
index 0000000000..0ddc7367cc
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/install_egg_info.py
@@ -0,0 +1,77 @@
+"""distutils.command.install_egg_info
+
+Implements the Distutils 'install_egg_info' command, for installing
+a package's PKG-INFO metadata."""
+
+
+from distutils.cmd import Command
+from distutils import log, dir_util
+import os, sys, re
+
+class install_egg_info(Command):
+ """Install an .egg-info file for the package"""
+
+ description = "Install package's PKG-INFO metadata as an .egg-info file"
+ user_options = [
+ ('install-dir=', 'd', "directory to install to"),
+ ]
+
+ def initialize_options(self):
+ self.install_dir = None
+
+ def finalize_options(self):
+ self.set_undefined_options('install_lib',('install_dir','install_dir'))
+ basename = "%s-%s-py%d.%d.egg-info" % (
+ to_filename(safe_name(self.distribution.get_name())),
+ to_filename(safe_version(self.distribution.get_version())),
+ *sys.version_info[:2]
+ )
+ self.target = os.path.join(self.install_dir, basename)
+ self.outputs = [self.target]
+
+ def run(self):
+ target = self.target
+ if os.path.isdir(target) and not os.path.islink(target):
+ dir_util.remove_tree(target, dry_run=self.dry_run)
+ elif os.path.exists(target):
+ self.execute(os.unlink,(self.target,),"Removing "+target)
+ elif not os.path.isdir(self.install_dir):
+ self.execute(os.makedirs, (self.install_dir,),
+ "Creating "+self.install_dir)
+ log.info("Writing %s", target)
+ if not self.dry_run:
+ with open(target, 'w', encoding='UTF-8') as f:
+ self.distribution.metadata.write_pkg_file(f)
+
+ def get_outputs(self):
+ return self.outputs
+
+
+# The following routines are taken from setuptools' pkg_resources module and
+# can be replaced by importing them from pkg_resources once it is included
+# in the stdlib.
+
+def safe_name(name):
+ """Convert an arbitrary string to a standard distribution name
+
+ Any runs of non-alphanumeric/. characters are replaced with a single '-'.
+ """
+ return re.sub('[^A-Za-z0-9.]+', '-', name)
+
+
+def safe_version(version):
+ """Convert an arbitrary string to a standard version string
+
+ Spaces become dots, and all other non-alphanumeric characters become
+ dashes, with runs of multiple dashes condensed to a single dash.
+ """
+ version = version.replace(' ','.')
+ return re.sub('[^A-Za-z0-9.]+', '-', version)
+
+
+def to_filename(name):
+ """Convert a project or version name to its filename-escaped form
+
+ Any '-' characters are currently replaced with '_'.
+ """
+ return name.replace('-','_')
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/install_headers.py b/third_party/python/setuptools/setuptools/_distutils/command/install_headers.py
new file mode 100644
index 0000000000..9bb0b18dc0
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/install_headers.py
@@ -0,0 +1,47 @@
+"""distutils.command.install_headers
+
+Implements the Distutils 'install_headers' command, to install C/C++ header
+files to the Python include directory."""
+
+from distutils.core import Command
+
+
+# XXX force is never used
+class install_headers(Command):
+
+ description = "install C/C++ header files"
+
+ user_options = [('install-dir=', 'd',
+ "directory to install header files to"),
+ ('force', 'f',
+ "force installation (overwrite existing files)"),
+ ]
+
+ boolean_options = ['force']
+
+ def initialize_options(self):
+ self.install_dir = None
+ self.force = 0
+ self.outfiles = []
+
+ def finalize_options(self):
+ self.set_undefined_options('install',
+ ('install_headers', 'install_dir'),
+ ('force', 'force'))
+
+
+ def run(self):
+ headers = self.distribution.headers
+ if not headers:
+ return
+
+ self.mkpath(self.install_dir)
+ for header in headers:
+ (out, _) = self.copy_file(header, self.install_dir)
+ self.outfiles.append(out)
+
+ def get_inputs(self):
+ return self.distribution.headers or []
+
+ def get_outputs(self):
+ return self.outfiles
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/install_lib.py b/third_party/python/setuptools/setuptools/_distutils/command/install_lib.py
new file mode 100644
index 0000000000..6154cf0943
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/install_lib.py
@@ -0,0 +1,217 @@
+"""distutils.command.install_lib
+
+Implements the Distutils 'install_lib' command
+(install all Python modules)."""
+
+import os
+import importlib.util
+import sys
+
+from distutils.core import Command
+from distutils.errors import DistutilsOptionError
+
+
+# Extension for Python source files.
+PYTHON_SOURCE_EXTENSION = ".py"
+
+class install_lib(Command):
+
+ description = "install all Python modules (extensions and pure Python)"
+
+ # The byte-compilation options are a tad confusing. Here are the
+ # possible scenarios:
+ # 1) no compilation at all (--no-compile --no-optimize)
+ # 2) compile .pyc only (--compile --no-optimize; default)
+ # 3) compile .pyc and "opt-1" .pyc (--compile --optimize)
+ # 4) compile "opt-1" .pyc only (--no-compile --optimize)
+ # 5) compile .pyc and "opt-2" .pyc (--compile --optimize-more)
+ # 6) compile "opt-2" .pyc only (--no-compile --optimize-more)
+ #
+ # The UI for this is two options, 'compile' and 'optimize'.
+ # 'compile' is strictly boolean, and only decides whether to
+ # generate .pyc files. 'optimize' is three-way (0, 1, or 2), and
+ # decides both whether to generate .pyc files and what level of
+ # optimization to use.
+
+ user_options = [
+ ('install-dir=', 'd', "directory to install to"),
+ ('build-dir=','b', "build directory (where to install from)"),
+ ('force', 'f', "force installation (overwrite existing files)"),
+ ('compile', 'c', "compile .py to .pyc [default]"),
+ ('no-compile', None, "don't compile .py files"),
+ ('optimize=', 'O',
+ "also compile with optimization: -O1 for \"python -O\", "
+ "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
+ ('skip-build', None, "skip the build steps"),
+ ]
+
+ boolean_options = ['force', 'compile', 'skip-build']
+ negative_opt = {'no-compile' : 'compile'}
+
+ def initialize_options(self):
+ # let the 'install' command dictate our installation directory
+ self.install_dir = None
+ self.build_dir = None
+ self.force = 0
+ self.compile = None
+ self.optimize = None
+ self.skip_build = None
+
+ def finalize_options(self):
+ # Get all the information we need to install pure Python modules
+ # from the umbrella 'install' command -- build (source) directory,
+ # install (target) directory, and whether to compile .py files.
+ self.set_undefined_options('install',
+ ('build_lib', 'build_dir'),
+ ('install_lib', 'install_dir'),
+ ('force', 'force'),
+ ('compile', 'compile'),
+ ('optimize', 'optimize'),
+ ('skip_build', 'skip_build'),
+ )
+
+ if self.compile is None:
+ self.compile = True
+ if self.optimize is None:
+ self.optimize = False
+
+ if not isinstance(self.optimize, int):
+ try:
+ self.optimize = int(self.optimize)
+ if self.optimize not in (0, 1, 2):
+ raise AssertionError
+ except (ValueError, AssertionError):
+ raise DistutilsOptionError("optimize must be 0, 1, or 2")
+
+ def run(self):
+ # Make sure we have built everything we need first
+ self.build()
+
+ # Install everything: simply dump the entire contents of the build
+ # directory to the installation directory (that's the beauty of
+ # having a build directory!)
+ outfiles = self.install()
+
+ # (Optionally) compile .py to .pyc
+ if outfiles is not None and self.distribution.has_pure_modules():
+ self.byte_compile(outfiles)
+
+ # -- Top-level worker functions ------------------------------------
+ # (called from 'run()')
+
+ def build(self):
+ if not self.skip_build:
+ if self.distribution.has_pure_modules():
+ self.run_command('build_py')
+ if self.distribution.has_ext_modules():
+ self.run_command('build_ext')
+
+ def install(self):
+ if os.path.isdir(self.build_dir):
+ outfiles = self.copy_tree(self.build_dir, self.install_dir)
+ else:
+ self.warn("'%s' does not exist -- no Python modules to install" %
+ self.build_dir)
+ return
+ return outfiles
+
+ def byte_compile(self, files):
+ if sys.dont_write_bytecode:
+ self.warn('byte-compiling is disabled, skipping.')
+ return
+
+ from distutils.util import byte_compile
+
+ # Get the "--root" directory supplied to the "install" command,
+ # and use it as a prefix to strip off the purported filename
+ # encoded in bytecode files. This is far from complete, but it
+ # should at least generate usable bytecode in RPM distributions.
+ install_root = self.get_finalized_command('install').root
+
+ if self.compile:
+ byte_compile(files, optimize=0,
+ force=self.force, prefix=install_root,
+ dry_run=self.dry_run)
+ if self.optimize > 0:
+ byte_compile(files, optimize=self.optimize,
+ force=self.force, prefix=install_root,
+ verbose=self.verbose, dry_run=self.dry_run)
+
+
+ # -- Utility methods -----------------------------------------------
+
+ def _mutate_outputs(self, has_any, build_cmd, cmd_option, output_dir):
+ if not has_any:
+ return []
+
+ build_cmd = self.get_finalized_command(build_cmd)
+ build_files = build_cmd.get_outputs()
+ build_dir = getattr(build_cmd, cmd_option)
+
+ prefix_len = len(build_dir) + len(os.sep)
+ outputs = []
+ for file in build_files:
+ outputs.append(os.path.join(output_dir, file[prefix_len:]))
+
+ return outputs
+
+ def _bytecode_filenames(self, py_filenames):
+ bytecode_files = []
+ for py_file in py_filenames:
+ # Since build_py handles package data installation, the
+ # list of outputs can contain more than just .py files.
+ # Make sure we only report bytecode for the .py files.
+ ext = os.path.splitext(os.path.normcase(py_file))[1]
+ if ext != PYTHON_SOURCE_EXTENSION:
+ continue
+ if self.compile:
+ bytecode_files.append(importlib.util.cache_from_source(
+ py_file, optimization=''))
+ if self.optimize > 0:
+ bytecode_files.append(importlib.util.cache_from_source(
+ py_file, optimization=self.optimize))
+
+ return bytecode_files
+
+
+ # -- External interface --------------------------------------------
+ # (called by outsiders)
+
+ def get_outputs(self):
+ """Return the list of files that would be installed if this command
+ were actually run. Not affected by the "dry-run" flag or whether
+ modules have actually been built yet.
+ """
+ pure_outputs = \
+ self._mutate_outputs(self.distribution.has_pure_modules(),
+ 'build_py', 'build_lib',
+ self.install_dir)
+ if self.compile:
+ bytecode_outputs = self._bytecode_filenames(pure_outputs)
+ else:
+ bytecode_outputs = []
+
+ ext_outputs = \
+ self._mutate_outputs(self.distribution.has_ext_modules(),
+ 'build_ext', 'build_lib',
+ self.install_dir)
+
+ return pure_outputs + bytecode_outputs + ext_outputs
+
+ def get_inputs(self):
+ """Get the list of files that are input to this command, ie. the
+ files that get installed as they are named in the build tree.
+ The files in this list correspond one-to-one to the output
+ filenames returned by 'get_outputs()'.
+ """
+ inputs = []
+
+ if self.distribution.has_pure_modules():
+ build_py = self.get_finalized_command('build_py')
+ inputs.extend(build_py.get_outputs())
+
+ if self.distribution.has_ext_modules():
+ build_ext = self.get_finalized_command('build_ext')
+ inputs.extend(build_ext.get_outputs())
+
+ return inputs
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/install_scripts.py b/third_party/python/setuptools/setuptools/_distutils/command/install_scripts.py
new file mode 100644
index 0000000000..31a1130ee5
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/install_scripts.py
@@ -0,0 +1,60 @@
+"""distutils.command.install_scripts
+
+Implements the Distutils 'install_scripts' command, for installing
+Python scripts."""
+
+# contributed by Bastian Kleineidam
+
+import os
+from distutils.core import Command
+from distutils import log
+from stat import ST_MODE
+
+
+class install_scripts(Command):
+
+ description = "install scripts (Python or otherwise)"
+
+ user_options = [
+ ('install-dir=', 'd', "directory to install scripts to"),
+ ('build-dir=','b', "build directory (where to install from)"),
+ ('force', 'f', "force installation (overwrite existing files)"),
+ ('skip-build', None, "skip the build steps"),
+ ]
+
+ boolean_options = ['force', 'skip-build']
+
+ def initialize_options(self):
+ self.install_dir = None
+ self.force = 0
+ self.build_dir = None
+ self.skip_build = None
+
+ def finalize_options(self):
+ self.set_undefined_options('build', ('build_scripts', 'build_dir'))
+ self.set_undefined_options('install',
+ ('install_scripts', 'install_dir'),
+ ('force', 'force'),
+ ('skip_build', 'skip_build'),
+ )
+
+ def run(self):
+ if not self.skip_build:
+ self.run_command('build_scripts')
+ self.outfiles = self.copy_tree(self.build_dir, self.install_dir)
+ if os.name == 'posix':
+ # Set the executable bits (owner, group, and world) on
+ # all the scripts we just installed.
+ for file in self.get_outputs():
+ if self.dry_run:
+ log.info("changing mode of %s", file)
+ else:
+ mode = ((os.stat(file)[ST_MODE]) | 0o555) & 0o7777
+ log.info("changing mode of %s to %o", file, mode)
+ os.chmod(file, mode)
+
+ def get_inputs(self):
+ return self.distribution.scripts or []
+
+ def get_outputs(self):
+ return self.outfiles or []
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/py37compat.py b/third_party/python/setuptools/setuptools/_distutils/command/py37compat.py
new file mode 100644
index 0000000000..754715a508
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/py37compat.py
@@ -0,0 +1,30 @@
+import sys
+
+
+def _pythonlib_compat():
+ """
+ On Python 3.7 and earlier, distutils would include the Python
+ library. See pypa/distutils#9.
+ """
+ from distutils import sysconfig
+ if not sysconfig.get_config_var('Py_ENABLED_SHARED'):
+ return
+
+ yield 'python{}.{}{}'.format(
+ sys.hexversion >> 24,
+ (sys.hexversion >> 16) & 0xff,
+ sysconfig.get_config_var('ABIFLAGS'),
+ )
+
+
+def compose(f1, f2):
+ return lambda *args, **kwargs: f1(f2(*args, **kwargs))
+
+
+pythonlib = (
+ compose(list, _pythonlib_compat)
+ if sys.version_info < (3, 8)
+ and sys.platform != 'darwin'
+ and sys.platform[:3] != 'aix'
+ else list
+)
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/register.py b/third_party/python/setuptools/setuptools/_distutils/command/register.py
new file mode 100644
index 0000000000..0fac94e9e5
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/register.py
@@ -0,0 +1,304 @@
+"""distutils.command.register
+
+Implements the Distutils 'register' command (register with the repository).
+"""
+
+# created 2002/10/21, Richard Jones
+
+import getpass
+import io
+import urllib.parse, urllib.request
+from warnings import warn
+
+from distutils.core import PyPIRCCommand
+from distutils.errors import *
+from distutils import log
+
+class register(PyPIRCCommand):
+
+ description = ("register the distribution with the Python package index")
+ user_options = PyPIRCCommand.user_options + [
+ ('list-classifiers', None,
+ 'list the valid Trove classifiers'),
+ ('strict', None ,
+ 'Will stop the registering if the meta-data are not fully compliant')
+ ]
+ boolean_options = PyPIRCCommand.boolean_options + [
+ 'verify', 'list-classifiers', 'strict']
+
+ sub_commands = [('check', lambda self: True)]
+
+ def initialize_options(self):
+ PyPIRCCommand.initialize_options(self)
+ self.list_classifiers = 0
+ self.strict = 0
+
+ def finalize_options(self):
+ PyPIRCCommand.finalize_options(self)
+ # setting options for the `check` subcommand
+ check_options = {'strict': ('register', self.strict),
+ 'restructuredtext': ('register', 1)}
+ self.distribution.command_options['check'] = check_options
+
+ def run(self):
+ self.finalize_options()
+ self._set_config()
+
+ # Run sub commands
+ for cmd_name in self.get_sub_commands():
+ self.run_command(cmd_name)
+
+ if self.dry_run:
+ self.verify_metadata()
+ elif self.list_classifiers:
+ self.classifiers()
+ else:
+ self.send_metadata()
+
+ def check_metadata(self):
+ """Deprecated API."""
+ warn("distutils.command.register.check_metadata is deprecated, \
+ use the check command instead", PendingDeprecationWarning)
+ check = self.distribution.get_command_obj('check')
+ check.ensure_finalized()
+ check.strict = self.strict
+ check.restructuredtext = 1
+ check.run()
+
+ def _set_config(self):
+ ''' Reads the configuration file and set attributes.
+ '''
+ config = self._read_pypirc()
+ if config != {}:
+ self.username = config['username']
+ self.password = config['password']
+ self.repository = config['repository']
+ self.realm = config['realm']
+ self.has_config = True
+ else:
+ if self.repository not in ('pypi', self.DEFAULT_REPOSITORY):
+ raise ValueError('%s not found in .pypirc' % self.repository)
+ if self.repository == 'pypi':
+ self.repository = self.DEFAULT_REPOSITORY
+ self.has_config = False
+
+ def classifiers(self):
+ ''' Fetch the list of classifiers from the server.
+ '''
+ url = self.repository+'?:action=list_classifiers'
+ response = urllib.request.urlopen(url)
+ log.info(self._read_pypi_response(response))
+
+ def verify_metadata(self):
+ ''' Send the metadata to the package index server to be checked.
+ '''
+ # send the info to the server and report the result
+ (code, result) = self.post_to_server(self.build_post_data('verify'))
+ log.info('Server response (%s): %s', code, result)
+
+ def send_metadata(self):
+ ''' Send the metadata to the package index server.
+
+ Well, do the following:
+ 1. figure who the user is, and then
+ 2. send the data as a Basic auth'ed POST.
+
+ First we try to read the username/password from $HOME/.pypirc,
+ which is a ConfigParser-formatted file with a section
+ [distutils] containing username and password entries (both
+ in clear text). Eg:
+
+ [distutils]
+ index-servers =
+ pypi
+
+ [pypi]
+ username: fred
+ password: sekrit
+
+ Otherwise, to figure who the user is, we offer the user three
+ choices:
+
+ 1. use existing login,
+ 2. register as a new user, or
+ 3. set the password to a random string and email the user.
+
+ '''
+ # see if we can short-cut and get the username/password from the
+ # config
+ if self.has_config:
+ choice = '1'
+ username = self.username
+ password = self.password
+ else:
+ choice = 'x'
+ username = password = ''
+
+ # get the user's login info
+ choices = '1 2 3 4'.split()
+ while choice not in choices:
+ self.announce('''\
+We need to know who you are, so please choose either:
+ 1. use your existing login,
+ 2. register as a new user,
+ 3. have the server generate a new password for you (and email it to you), or
+ 4. quit
+Your selection [default 1]: ''', log.INFO)
+ choice = input()
+ if not choice:
+ choice = '1'
+ elif choice not in choices:
+ print('Please choose one of the four options!')
+
+ if choice == '1':
+ # get the username and password
+ while not username:
+ username = input('Username: ')
+ while not password:
+ password = getpass.getpass('Password: ')
+
+ # set up the authentication
+ auth = urllib.request.HTTPPasswordMgr()
+ host = urllib.parse.urlparse(self.repository)[1]
+ auth.add_password(self.realm, host, username, password)
+ # send the info to the server and report the result
+ code, result = self.post_to_server(self.build_post_data('submit'),
+ auth)
+ self.announce('Server response (%s): %s' % (code, result),
+ log.INFO)
+
+ # possibly save the login
+ if code == 200:
+ if self.has_config:
+ # sharing the password in the distribution instance
+ # so the upload command can reuse it
+ self.distribution.password = password
+ else:
+ self.announce(('I can store your PyPI login so future '
+ 'submissions will be faster.'), log.INFO)
+ self.announce('(the login will be stored in %s)' % \
+ self._get_rc_file(), log.INFO)
+ choice = 'X'
+ while choice.lower() not in 'yn':
+ choice = input('Save your login (y/N)?')
+ if not choice:
+ choice = 'n'
+ if choice.lower() == 'y':
+ self._store_pypirc(username, password)
+
+ elif choice == '2':
+ data = {':action': 'user'}
+ data['name'] = data['password'] = data['email'] = ''
+ data['confirm'] = None
+ while not data['name']:
+ data['name'] = input('Username: ')
+ while data['password'] != data['confirm']:
+ while not data['password']:
+ data['password'] = getpass.getpass('Password: ')
+ while not data['confirm']:
+ data['confirm'] = getpass.getpass(' Confirm: ')
+ if data['password'] != data['confirm']:
+ data['password'] = ''
+ data['confirm'] = None
+ print("Password and confirm don't match!")
+ while not data['email']:
+ data['email'] = input(' EMail: ')
+ code, result = self.post_to_server(data)
+ if code != 200:
+ log.info('Server response (%s): %s', code, result)
+ else:
+ log.info('You will receive an email shortly.')
+ log.info(('Follow the instructions in it to '
+ 'complete registration.'))
+ elif choice == '3':
+ data = {':action': 'password_reset'}
+ data['email'] = ''
+ while not data['email']:
+ data['email'] = input('Your email address: ')
+ code, result = self.post_to_server(data)
+ log.info('Server response (%s): %s', code, result)
+
+ def build_post_data(self, action):
+ # figure the data to send - the metadata plus some additional
+ # information used by the package server
+ meta = self.distribution.metadata
+ data = {
+ ':action': action,
+ 'metadata_version' : '1.0',
+ 'name': meta.get_name(),
+ 'version': meta.get_version(),
+ 'summary': meta.get_description(),
+ 'home_page': meta.get_url(),
+ 'author': meta.get_contact(),
+ 'author_email': meta.get_contact_email(),
+ 'license': meta.get_licence(),
+ 'description': meta.get_long_description(),
+ 'keywords': meta.get_keywords(),
+ 'platform': meta.get_platforms(),
+ 'classifiers': meta.get_classifiers(),
+ 'download_url': meta.get_download_url(),
+ # PEP 314
+ 'provides': meta.get_provides(),
+ 'requires': meta.get_requires(),
+ 'obsoletes': meta.get_obsoletes(),
+ }
+ if data['provides'] or data['requires'] or data['obsoletes']:
+ data['metadata_version'] = '1.1'
+ return data
+
+ def post_to_server(self, data, auth=None):
+ ''' Post a query to the server, and return a string response.
+ '''
+ if 'name' in data:
+ self.announce('Registering %s to %s' % (data['name'],
+ self.repository),
+ log.INFO)
+ # Build up the MIME payload for the urllib2 POST data
+ boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
+ sep_boundary = '\n--' + boundary
+ end_boundary = sep_boundary + '--'
+ body = io.StringIO()
+ for key, value in data.items():
+ # handle multiple entries for the same name
+ if type(value) not in (type([]), type( () )):
+ value = [value]
+ for value in value:
+ value = str(value)
+ body.write(sep_boundary)
+ body.write('\nContent-Disposition: form-data; name="%s"'%key)
+ body.write("\n\n")
+ body.write(value)
+ if value and value[-1] == '\r':
+ body.write('\n') # write an extra newline (lurve Macs)
+ body.write(end_boundary)
+ body.write("\n")
+ body = body.getvalue().encode("utf-8")
+
+ # build the Request
+ headers = {
+ 'Content-type': 'multipart/form-data; boundary=%s; charset=utf-8'%boundary,
+ 'Content-length': str(len(body))
+ }
+ req = urllib.request.Request(self.repository, body, headers)
+
+ # handle HTTP and include the Basic Auth handler
+ opener = urllib.request.build_opener(
+ urllib.request.HTTPBasicAuthHandler(password_mgr=auth)
+ )
+ data = ''
+ try:
+ result = opener.open(req)
+ except urllib.error.HTTPError as e:
+ if self.show_response:
+ data = e.fp.read()
+ result = e.code, e.msg
+ except urllib.error.URLError as e:
+ result = 500, str(e)
+ else:
+ if self.show_response:
+ data = self._read_pypi_response(result)
+ result = 200, 'OK'
+ if self.show_response:
+ msg = '\n'.join(('-' * 75, data, '-' * 75))
+ self.announce(msg, log.INFO)
+ return result
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/sdist.py b/third_party/python/setuptools/setuptools/_distutils/command/sdist.py
new file mode 100644
index 0000000000..b4996fcb1d
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/sdist.py
@@ -0,0 +1,494 @@
+"""distutils.command.sdist
+
+Implements the Distutils 'sdist' command (create a source distribution)."""
+
+import os
+import sys
+from glob import glob
+from warnings import warn
+
+from distutils.core import Command
+from distutils import dir_util
+from distutils import file_util
+from distutils import archive_util
+from distutils.text_file import TextFile
+from distutils.filelist import FileList
+from distutils import log
+from distutils.util import convert_path
+from distutils.errors import DistutilsTemplateError, DistutilsOptionError
+
+
+def show_formats():
+ """Print all possible values for the 'formats' option (used by
+ the "--help-formats" command-line option).
+ """
+ from distutils.fancy_getopt import FancyGetopt
+ from distutils.archive_util import ARCHIVE_FORMATS
+ formats = []
+ for format in ARCHIVE_FORMATS.keys():
+ formats.append(("formats=" + format, None,
+ ARCHIVE_FORMATS[format][2]))
+ formats.sort()
+ FancyGetopt(formats).print_help(
+ "List of available source distribution formats:")
+
+
+class sdist(Command):
+
+ description = "create a source distribution (tarball, zip file, etc.)"
+
+ def checking_metadata(self):
+ """Callable used for the check sub-command.
+
+ Placed here so user_options can view it"""
+ return self.metadata_check
+
+ user_options = [
+ ('template=', 't',
+ "name of manifest template file [default: MANIFEST.in]"),
+ ('manifest=', 'm',
+ "name of manifest file [default: MANIFEST]"),
+ ('use-defaults', None,
+ "include the default file set in the manifest "
+ "[default; disable with --no-defaults]"),
+ ('no-defaults', None,
+ "don't include the default file set"),
+ ('prune', None,
+ "specifically exclude files/directories that should not be "
+ "distributed (build tree, RCS/CVS dirs, etc.) "
+ "[default; disable with --no-prune]"),
+ ('no-prune', None,
+ "don't automatically exclude anything"),
+ ('manifest-only', 'o',
+ "just regenerate the manifest and then stop "
+ "(implies --force-manifest)"),
+ ('force-manifest', 'f',
+ "forcibly regenerate the manifest and carry on as usual. "
+ "Deprecated: now the manifest is always regenerated."),
+ ('formats=', None,
+ "formats for source distribution (comma-separated list)"),
+ ('keep-temp', 'k',
+ "keep the distribution tree around after creating " +
+ "archive file(s)"),
+ ('dist-dir=', 'd',
+ "directory to put the source distribution archive(s) in "
+ "[default: dist]"),
+ ('metadata-check', None,
+ "Ensure that all required elements of meta-data "
+ "are supplied. Warn if any missing. [default]"),
+ ('owner=', 'u',
+ "Owner name used when creating a tar file [default: current user]"),
+ ('group=', 'g',
+ "Group name used when creating a tar file [default: current group]"),
+ ]
+
+ boolean_options = ['use-defaults', 'prune',
+ 'manifest-only', 'force-manifest',
+ 'keep-temp', 'metadata-check']
+
+ help_options = [
+ ('help-formats', None,
+ "list available distribution formats", show_formats),
+ ]
+
+ negative_opt = {'no-defaults': 'use-defaults',
+ 'no-prune': 'prune' }
+
+ sub_commands = [('check', checking_metadata)]
+
+ READMES = ('README', 'README.txt', 'README.rst')
+
+ def initialize_options(self):
+ # 'template' and 'manifest' are, respectively, the names of
+ # the manifest template and manifest file.
+ self.template = None
+ self.manifest = None
+
+ # 'use_defaults': if true, we will include the default file set
+ # in the manifest
+ self.use_defaults = 1
+ self.prune = 1
+
+ self.manifest_only = 0
+ self.force_manifest = 0
+
+ self.formats = ['gztar']
+ self.keep_temp = 0
+ self.dist_dir = None
+
+ self.archive_files = None
+ self.metadata_check = 1
+ self.owner = None
+ self.group = None
+
+ def finalize_options(self):
+ if self.manifest is None:
+ self.manifest = "MANIFEST"
+ if self.template is None:
+ self.template = "MANIFEST.in"
+
+ self.ensure_string_list('formats')
+
+ bad_format = archive_util.check_archive_formats(self.formats)
+ if bad_format:
+ raise DistutilsOptionError(
+ "unknown archive format '%s'" % bad_format)
+
+ if self.dist_dir is None:
+ self.dist_dir = "dist"
+
+ def run(self):
+ # 'filelist' contains the list of files that will make up the
+ # manifest
+ self.filelist = FileList()
+
+ # Run sub commands
+ for cmd_name in self.get_sub_commands():
+ self.run_command(cmd_name)
+
+ # Do whatever it takes to get the list of files to process
+ # (process the manifest template, read an existing manifest,
+ # whatever). File list is accumulated in 'self.filelist'.
+ self.get_file_list()
+
+ # If user just wanted us to regenerate the manifest, stop now.
+ if self.manifest_only:
+ return
+
+ # Otherwise, go ahead and create the source distribution tarball,
+ # or zipfile, or whatever.
+ self.make_distribution()
+
+ def check_metadata(self):
+ """Deprecated API."""
+ warn("distutils.command.sdist.check_metadata is deprecated, \
+ use the check command instead", PendingDeprecationWarning)
+ check = self.distribution.get_command_obj('check')
+ check.ensure_finalized()
+ check.run()
+
+ def get_file_list(self):
+ """Figure out the list of files to include in the source
+ distribution, and put it in 'self.filelist'. This might involve
+ reading the manifest template (and writing the manifest), or just
+ reading the manifest, or just using the default file set -- it all
+ depends on the user's options.
+ """
+ # new behavior when using a template:
+ # the file list is recalculated every time because
+ # even if MANIFEST.in or setup.py are not changed
+ # the user might have added some files in the tree that
+ # need to be included.
+ #
+ # This makes --force the default and only behavior with templates.
+ template_exists = os.path.isfile(self.template)
+ if not template_exists and self._manifest_is_not_generated():
+ self.read_manifest()
+ self.filelist.sort()
+ self.filelist.remove_duplicates()
+ return
+
+ if not template_exists:
+ self.warn(("manifest template '%s' does not exist " +
+ "(using default file list)") %
+ self.template)
+ self.filelist.findall()
+
+ if self.use_defaults:
+ self.add_defaults()
+
+ if template_exists:
+ self.read_template()
+
+ if self.prune:
+ self.prune_file_list()
+
+ self.filelist.sort()
+ self.filelist.remove_duplicates()
+ self.write_manifest()
+
+ def add_defaults(self):
+ """Add all the default files to self.filelist:
+ - README or README.txt
+ - setup.py
+ - test/test*.py
+ - all pure Python modules mentioned in setup script
+ - all files pointed by package_data (build_py)
+ - all files defined in data_files.
+ - all files defined as scripts.
+ - all C sources listed as part of extensions or C libraries
+ in the setup script (doesn't catch C headers!)
+ Warns if (README or README.txt) or setup.py are missing; everything
+ else is optional.
+ """
+ self._add_defaults_standards()
+ self._add_defaults_optional()
+ self._add_defaults_python()
+ self._add_defaults_data_files()
+ self._add_defaults_ext()
+ self._add_defaults_c_libs()
+ self._add_defaults_scripts()
+
+ @staticmethod
+ def _cs_path_exists(fspath):
+ """
+ Case-sensitive path existence check
+
+ >>> sdist._cs_path_exists(__file__)
+ True
+ >>> sdist._cs_path_exists(__file__.upper())
+ False
+ """
+ if not os.path.exists(fspath):
+ return False
+ # make absolute so we always have a directory
+ abspath = os.path.abspath(fspath)
+ directory, filename = os.path.split(abspath)
+ return filename in os.listdir(directory)
+
+ def _add_defaults_standards(self):
+ standards = [self.READMES, self.distribution.script_name]
+ for fn in standards:
+ if isinstance(fn, tuple):
+ alts = fn
+ got_it = False
+ for fn in alts:
+ if self._cs_path_exists(fn):
+ got_it = True
+ self.filelist.append(fn)
+ break
+
+ if not got_it:
+ self.warn("standard file not found: should have one of " +
+ ', '.join(alts))
+ else:
+ if self._cs_path_exists(fn):
+ self.filelist.append(fn)
+ else:
+ self.warn("standard file '%s' not found" % fn)
+
+ def _add_defaults_optional(self):
+ optional = ['test/test*.py', 'setup.cfg']
+ for pattern in optional:
+ files = filter(os.path.isfile, glob(pattern))
+ self.filelist.extend(files)
+
+ def _add_defaults_python(self):
+ # build_py is used to get:
+ # - python modules
+ # - files defined in package_data
+ build_py = self.get_finalized_command('build_py')
+
+ # getting python files
+ if self.distribution.has_pure_modules():
+ self.filelist.extend(build_py.get_source_files())
+
+ # getting package_data files
+ # (computed in build_py.data_files by build_py.finalize_options)
+ for pkg, src_dir, build_dir, filenames in build_py.data_files:
+ for filename in filenames:
+ self.filelist.append(os.path.join(src_dir, filename))
+
+ def _add_defaults_data_files(self):
+ # getting distribution.data_files
+ if self.distribution.has_data_files():
+ for item in self.distribution.data_files:
+ if isinstance(item, str):
+ # plain file
+ item = convert_path(item)
+ if os.path.isfile(item):
+ self.filelist.append(item)
+ else:
+ # a (dirname, filenames) tuple
+ dirname, filenames = item
+ for f in filenames:
+ f = convert_path(f)
+ if os.path.isfile(f):
+ self.filelist.append(f)
+
+ def _add_defaults_ext(self):
+ if self.distribution.has_ext_modules():
+ build_ext = self.get_finalized_command('build_ext')
+ self.filelist.extend(build_ext.get_source_files())
+
+ def _add_defaults_c_libs(self):
+ if self.distribution.has_c_libraries():
+ build_clib = self.get_finalized_command('build_clib')
+ self.filelist.extend(build_clib.get_source_files())
+
+ def _add_defaults_scripts(self):
+ if self.distribution.has_scripts():
+ build_scripts = self.get_finalized_command('build_scripts')
+ self.filelist.extend(build_scripts.get_source_files())
+
+ def read_template(self):
+ """Read and parse manifest template file named by self.template.
+
+ (usually "MANIFEST.in") The parsing and processing is done by
+ 'self.filelist', which updates itself accordingly.
+ """
+ log.info("reading manifest template '%s'", self.template)
+ template = TextFile(self.template, strip_comments=1, skip_blanks=1,
+ join_lines=1, lstrip_ws=1, rstrip_ws=1,
+ collapse_join=1)
+
+ try:
+ while True:
+ line = template.readline()
+ if line is None: # end of file
+ break
+
+ try:
+ self.filelist.process_template_line(line)
+ # the call above can raise a DistutilsTemplateError for
+ # malformed lines, or a ValueError from the lower-level
+ # convert_path function
+ except (DistutilsTemplateError, ValueError) as msg:
+ self.warn("%s, line %d: %s" % (template.filename,
+ template.current_line,
+ msg))
+ finally:
+ template.close()
+
+ def prune_file_list(self):
+ """Prune off branches that might slip into the file list as created
+ by 'read_template()', but really don't belong there:
+ * the build tree (typically "build")
+ * the release tree itself (only an issue if we ran "sdist"
+ previously with --keep-temp, or it aborted)
+ * any RCS, CVS, .svn, .hg, .git, .bzr, _darcs directories
+ """
+ build = self.get_finalized_command('build')
+ base_dir = self.distribution.get_fullname()
+
+ self.filelist.exclude_pattern(None, prefix=build.build_base)
+ self.filelist.exclude_pattern(None, prefix=base_dir)
+
+ if sys.platform == 'win32':
+ seps = r'/|\\'
+ else:
+ seps = '/'
+
+ vcs_dirs = ['RCS', 'CVS', r'\.svn', r'\.hg', r'\.git', r'\.bzr',
+ '_darcs']
+ vcs_ptrn = r'(^|%s)(%s)(%s).*' % (seps, '|'.join(vcs_dirs), seps)
+ self.filelist.exclude_pattern(vcs_ptrn, is_regex=1)
+
+ def write_manifest(self):
+ """Write the file list in 'self.filelist' (presumably as filled in
+ by 'add_defaults()' and 'read_template()') to the manifest file
+ named by 'self.manifest'.
+ """
+ if self._manifest_is_not_generated():
+ log.info("not writing to manually maintained "
+ "manifest file '%s'" % self.manifest)
+ return
+
+ content = self.filelist.files[:]
+ content.insert(0, '# file GENERATED by distutils, do NOT edit')
+ self.execute(file_util.write_file, (self.manifest, content),
+ "writing manifest file '%s'" % self.manifest)
+
+ def _manifest_is_not_generated(self):
+ # check for special comment used in 3.1.3 and higher
+ if not os.path.isfile(self.manifest):
+ return False
+
+ fp = open(self.manifest)
+ try:
+ first_line = fp.readline()
+ finally:
+ fp.close()
+ return first_line != '# file GENERATED by distutils, do NOT edit\n'
+
+ def read_manifest(self):
+ """Read the manifest file (named by 'self.manifest') and use it to
+ fill in 'self.filelist', the list of files to include in the source
+ distribution.
+ """
+ log.info("reading manifest file '%s'", self.manifest)
+ with open(self.manifest) as manifest:
+ for line in manifest:
+ # ignore comments and blank lines
+ line = line.strip()
+ if line.startswith('#') or not line:
+ continue
+ self.filelist.append(line)
+
+ def make_release_tree(self, base_dir, files):
+ """Create the directory tree that will become the source
+ distribution archive. All directories implied by the filenames in
+ 'files' are created under 'base_dir', and then we hard link or copy
+ (if hard linking is unavailable) those files into place.
+ Essentially, this duplicates the developer's source tree, but in a
+ directory named after the distribution, containing only the files
+ to be distributed.
+ """
+ # Create all the directories under 'base_dir' necessary to
+ # put 'files' there; the 'mkpath()' is just so we don't die
+ # if the manifest happens to be empty.
+ self.mkpath(base_dir)
+ dir_util.create_tree(base_dir, files, dry_run=self.dry_run)
+
+ # And walk over the list of files, either making a hard link (if
+ # os.link exists) to each one that doesn't already exist in its
+ # corresponding location under 'base_dir', or copying each file
+ # that's out-of-date in 'base_dir'. (Usually, all files will be
+ # out-of-date, because by default we blow away 'base_dir' when
+ # we're done making the distribution archives.)
+
+ if hasattr(os, 'link'): # can make hard links on this system
+ link = 'hard'
+ msg = "making hard links in %s..." % base_dir
+ else: # nope, have to copy
+ link = None
+ msg = "copying files to %s..." % base_dir
+
+ if not files:
+ log.warn("no files to distribute -- empty manifest?")
+ else:
+ log.info(msg)
+ for file in files:
+ if not os.path.isfile(file):
+ log.warn("'%s' not a regular file -- skipping", file)
+ else:
+ dest = os.path.join(base_dir, file)
+ self.copy_file(file, dest, link=link)
+
+ self.distribution.metadata.write_pkg_info(base_dir)
+
+ def make_distribution(self):
+ """Create the source distribution(s). First, we create the release
+ tree with 'make_release_tree()'; then, we create all required
+ archive files (according to 'self.formats') from the release tree.
+ Finally, we clean up by blowing away the release tree (unless
+ 'self.keep_temp' is true). The list of archive files created is
+ stored so it can be retrieved later by 'get_archive_files()'.
+ """
+ # Don't warn about missing meta-data here -- should be (and is!)
+ # done elsewhere.
+ base_dir = self.distribution.get_fullname()
+ base_name = os.path.join(self.dist_dir, base_dir)
+
+ self.make_release_tree(base_dir, self.filelist.files)
+ archive_files = [] # remember names of files we create
+ # tar archive must be created last to avoid overwrite and remove
+ if 'tar' in self.formats:
+ self.formats.append(self.formats.pop(self.formats.index('tar')))
+
+ for fmt in self.formats:
+ file = self.make_archive(base_name, fmt, base_dir=base_dir,
+ owner=self.owner, group=self.group)
+ archive_files.append(file)
+ self.distribution.dist_files.append(('sdist', '', file))
+
+ self.archive_files = archive_files
+
+ if not self.keep_temp:
+ dir_util.remove_tree(base_dir, dry_run=self.dry_run)
+
+ def get_archive_files(self):
+ """Return the list of archive files created when the command
+ was run, or None if the command hasn't run yet.
+ """
+ return self.archive_files
diff --git a/third_party/python/setuptools/setuptools/_distutils/command/upload.py b/third_party/python/setuptools/setuptools/_distutils/command/upload.py
new file mode 100644
index 0000000000..95e9fda186
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/command/upload.py
@@ -0,0 +1,214 @@
+"""
+distutils.command.upload
+
+Implements the Distutils 'upload' subcommand (upload package to a package
+index).
+"""
+
+import os
+import io
+import hashlib
+from base64 import standard_b64encode
+from urllib.request import urlopen, Request, HTTPError
+from urllib.parse import urlparse
+from distutils.errors import DistutilsError, DistutilsOptionError
+from distutils.core import PyPIRCCommand
+from distutils.spawn import spawn
+from distutils import log
+
+
+# PyPI Warehouse supports MD5, SHA256, and Blake2 (blake2-256)
+# https://bugs.python.org/issue40698
+_FILE_CONTENT_DIGESTS = {
+ "md5_digest": getattr(hashlib, "md5", None),
+ "sha256_digest": getattr(hashlib, "sha256", None),
+ "blake2_256_digest": getattr(hashlib, "blake2b", None),
+}
+
+
+class upload(PyPIRCCommand):
+
+ description = "upload binary package to PyPI"
+
+ user_options = PyPIRCCommand.user_options + [
+ ('sign', 's',
+ 'sign files to upload using gpg'),
+ ('identity=', 'i', 'GPG identity used to sign files'),
+ ]
+
+ boolean_options = PyPIRCCommand.boolean_options + ['sign']
+
+ def initialize_options(self):
+ PyPIRCCommand.initialize_options(self)
+ self.username = ''
+ self.password = ''
+ self.show_response = 0
+ self.sign = False
+ self.identity = None
+
+ def finalize_options(self):
+ PyPIRCCommand.finalize_options(self)
+ if self.identity and not self.sign:
+ raise DistutilsOptionError(
+ "Must use --sign for --identity to have meaning"
+ )
+ config = self._read_pypirc()
+ if config != {}:
+ self.username = config['username']
+ self.password = config['password']
+ self.repository = config['repository']
+ self.realm = config['realm']
+
+ # getting the password from the distribution
+ # if previously set by the register command
+ if not self.password and self.distribution.password:
+ self.password = self.distribution.password
+
+ def run(self):
+ if not self.distribution.dist_files:
+ msg = ("Must create and upload files in one command "
+ "(e.g. setup.py sdist upload)")
+ raise DistutilsOptionError(msg)
+ for command, pyversion, filename in self.distribution.dist_files:
+ self.upload_file(command, pyversion, filename)
+
+ def upload_file(self, command, pyversion, filename):
+ # Makes sure the repository URL is compliant
+ schema, netloc, url, params, query, fragments = \
+ urlparse(self.repository)
+ if params or query or fragments:
+ raise AssertionError("Incompatible url %s" % self.repository)
+
+ if schema not in ('http', 'https'):
+ raise AssertionError("unsupported schema " + schema)
+
+ # Sign if requested
+ if self.sign:
+ gpg_args = ["gpg", "--detach-sign", "-a", filename]
+ if self.identity:
+ gpg_args[2:2] = ["--local-user", self.identity]
+ spawn(gpg_args,
+ dry_run=self.dry_run)
+
+ # Fill in the data - send all the meta-data in case we need to
+ # register a new release
+ f = open(filename,'rb')
+ try:
+ content = f.read()
+ finally:
+ f.close()
+
+ meta = self.distribution.metadata
+ data = {
+ # action
+ ':action': 'file_upload',
+ 'protocol_version': '1',
+
+ # identify release
+ 'name': meta.get_name(),
+ 'version': meta.get_version(),
+
+ # file content
+ 'content': (os.path.basename(filename),content),
+ 'filetype': command,
+ 'pyversion': pyversion,
+
+ # additional meta-data
+ 'metadata_version': '1.0',
+ 'summary': meta.get_description(),
+ 'home_page': meta.get_url(),
+ 'author': meta.get_contact(),
+ 'author_email': meta.get_contact_email(),
+ 'license': meta.get_licence(),
+ 'description': meta.get_long_description(),
+ 'keywords': meta.get_keywords(),
+ 'platform': meta.get_platforms(),
+ 'classifiers': meta.get_classifiers(),
+ 'download_url': meta.get_download_url(),
+ # PEP 314
+ 'provides': meta.get_provides(),
+ 'requires': meta.get_requires(),
+ 'obsoletes': meta.get_obsoletes(),
+ }
+
+ data['comment'] = ''
+
+ # file content digests
+ for digest_name, digest_cons in _FILE_CONTENT_DIGESTS.items():
+ if digest_cons is None:
+ continue
+ try:
+ data[digest_name] = digest_cons(content).hexdigest()
+ except ValueError:
+ # hash digest not available or blocked by security policy
+ pass
+
+ if self.sign:
+ with open(filename + ".asc", "rb") as f:
+ data['gpg_signature'] = (os.path.basename(filename) + ".asc",
+ f.read())
+
+ # set up the authentication
+ user_pass = (self.username + ":" + self.password).encode('ascii')
+ # The exact encoding of the authentication string is debated.
+ # Anyway PyPI only accepts ascii for both username or password.
+ auth = "Basic " + standard_b64encode(user_pass).decode('ascii')
+
+ # Build up the MIME payload for the POST data
+ boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
+ sep_boundary = b'\r\n--' + boundary.encode('ascii')
+ end_boundary = sep_boundary + b'--\r\n'
+ body = io.BytesIO()
+ for key, value in data.items():
+ title = '\r\nContent-Disposition: form-data; name="%s"' % key
+ # handle multiple entries for the same name
+ if not isinstance(value, list):
+ value = [value]
+ for value in value:
+ if type(value) is tuple:
+ title += '; filename="%s"' % value[0]
+ value = value[1]
+ else:
+ value = str(value).encode('utf-8')
+ body.write(sep_boundary)
+ body.write(title.encode('utf-8'))
+ body.write(b"\r\n\r\n")
+ body.write(value)
+ body.write(end_boundary)
+ body = body.getvalue()
+
+ msg = "Submitting %s to %s" % (filename, self.repository)
+ self.announce(msg, log.INFO)
+
+ # build the Request
+ headers = {
+ 'Content-type': 'multipart/form-data; boundary=%s' % boundary,
+ 'Content-length': str(len(body)),
+ 'Authorization': auth,
+ }
+
+ request = Request(self.repository, data=body,
+ headers=headers)
+ # send the data
+ try:
+ result = urlopen(request)
+ status = result.getcode()
+ reason = result.msg
+ except HTTPError as e:
+ status = e.code
+ reason = e.msg
+ except OSError as e:
+ self.announce(str(e), log.ERROR)
+ raise
+
+ if status == 200:
+ self.announce('Server response (%s): %s' % (status, reason),
+ log.INFO)
+ if self.show_response:
+ text = self._read_pypi_response(result)
+ msg = '\n'.join(('-' * 75, text, '-' * 75))
+ self.announce(msg, log.INFO)
+ else:
+ msg = 'Upload failed (%s): %s' % (status, reason)
+ self.announce(msg, log.ERROR)
+ raise DistutilsError(msg)
diff --git a/third_party/python/setuptools/setuptools/_distutils/config.py b/third_party/python/setuptools/setuptools/_distutils/config.py
new file mode 100644
index 0000000000..2171abd696
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/config.py
@@ -0,0 +1,130 @@
+"""distutils.pypirc
+
+Provides the PyPIRCCommand class, the base class for the command classes
+that uses .pypirc in the distutils.command package.
+"""
+import os
+from configparser import RawConfigParser
+
+from distutils.cmd import Command
+
+DEFAULT_PYPIRC = """\
+[distutils]
+index-servers =
+ pypi
+
+[pypi]
+username:%s
+password:%s
+"""
+
+class PyPIRCCommand(Command):
+ """Base command that knows how to handle the .pypirc file
+ """
+ DEFAULT_REPOSITORY = 'https://upload.pypi.org/legacy/'
+ DEFAULT_REALM = 'pypi'
+ repository = None
+ realm = None
+
+ user_options = [
+ ('repository=', 'r',
+ "url of repository [default: %s]" % \
+ DEFAULT_REPOSITORY),
+ ('show-response', None,
+ 'display full response text from server')]
+
+ boolean_options = ['show-response']
+
+ def _get_rc_file(self):
+ """Returns rc file path."""
+ return os.path.join(os.path.expanduser('~'), '.pypirc')
+
+ def _store_pypirc(self, username, password):
+ """Creates a default .pypirc file."""
+ rc = self._get_rc_file()
+ with os.fdopen(os.open(rc, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f:
+ f.write(DEFAULT_PYPIRC % (username, password))
+
+ def _read_pypirc(self):
+ """Reads the .pypirc file."""
+ rc = self._get_rc_file()
+ if os.path.exists(rc):
+ self.announce('Using PyPI login from %s' % rc)
+ repository = self.repository or self.DEFAULT_REPOSITORY
+
+ config = RawConfigParser()
+ config.read(rc)
+ sections = config.sections()
+ if 'distutils' in sections:
+ # let's get the list of servers
+ index_servers = config.get('distutils', 'index-servers')
+ _servers = [server.strip() for server in
+ index_servers.split('\n')
+ if server.strip() != '']
+ if _servers == []:
+ # nothing set, let's try to get the default pypi
+ if 'pypi' in sections:
+ _servers = ['pypi']
+ else:
+ # the file is not properly defined, returning
+ # an empty dict
+ return {}
+ for server in _servers:
+ current = {'server': server}
+ current['username'] = config.get(server, 'username')
+
+ # optional params
+ for key, default in (('repository',
+ self.DEFAULT_REPOSITORY),
+ ('realm', self.DEFAULT_REALM),
+ ('password', None)):
+ if config.has_option(server, key):
+ current[key] = config.get(server, key)
+ else:
+ current[key] = default
+
+ # work around people having "repository" for the "pypi"
+ # section of their config set to the HTTP (rather than
+ # HTTPS) URL
+ if (server == 'pypi' and
+ repository in (self.DEFAULT_REPOSITORY, 'pypi')):
+ current['repository'] = self.DEFAULT_REPOSITORY
+ return current
+
+ if (current['server'] == repository or
+ current['repository'] == repository):
+ return current
+ elif 'server-login' in sections:
+ # old format
+ server = 'server-login'
+ if config.has_option(server, 'repository'):
+ repository = config.get(server, 'repository')
+ else:
+ repository = self.DEFAULT_REPOSITORY
+ return {'username': config.get(server, 'username'),
+ 'password': config.get(server, 'password'),
+ 'repository': repository,
+ 'server': server,
+ 'realm': self.DEFAULT_REALM}
+
+ return {}
+
+ def _read_pypi_response(self, response):
+ """Read and decode a PyPI HTTP response."""
+ import cgi
+ content_type = response.getheader('content-type', 'text/plain')
+ encoding = cgi.parse_header(content_type)[1].get('charset', 'ascii')
+ return response.read().decode(encoding)
+
+ def initialize_options(self):
+ """Initialize options."""
+ self.repository = None
+ self.realm = None
+ self.show_response = 0
+
+ def finalize_options(self):
+ """Finalizes options."""
+ if self.repository is None:
+ self.repository = self.DEFAULT_REPOSITORY
+ if self.realm is None:
+ self.realm = self.DEFAULT_REALM
diff --git a/third_party/python/setuptools/setuptools/_distutils/core.py b/third_party/python/setuptools/setuptools/_distutils/core.py
new file mode 100644
index 0000000000..d603d4a45a
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/core.py
@@ -0,0 +1,234 @@
+"""distutils.core
+
+The only module that needs to be imported to use the Distutils; provides
+the 'setup' function (which is to be called from the setup script). Also
+indirectly provides the Distribution and Command classes, although they are
+really defined in distutils.dist and distutils.cmd.
+"""
+
+import os
+import sys
+
+from distutils.debug import DEBUG
+from distutils.errors import *
+
+# Mainly import these so setup scripts can "from distutils.core import" them.
+from distutils.dist import Distribution
+from distutils.cmd import Command
+from distutils.config import PyPIRCCommand
+from distutils.extension import Extension
+
+# This is a barebones help message generated displayed when the user
+# runs the setup script with no arguments at all. More useful help
+# is generated with various --help options: global help, list commands,
+# and per-command help.
+USAGE = """\
+usage: %(script)s [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...]
+ or: %(script)s --help [cmd1 cmd2 ...]
+ or: %(script)s --help-commands
+ or: %(script)s cmd --help
+"""
+
+def gen_usage (script_name):
+ script = os.path.basename(script_name)
+ return USAGE % vars()
+
+
+# Some mild magic to control the behaviour of 'setup()' from 'run_setup()'.
+_setup_stop_after = None
+_setup_distribution = None
+
+# Legal keyword arguments for the setup() function
+setup_keywords = ('distclass', 'script_name', 'script_args', 'options',
+ 'name', 'version', 'author', 'author_email',
+ 'maintainer', 'maintainer_email', 'url', 'license',
+ 'description', 'long_description', 'keywords',
+ 'platforms', 'classifiers', 'download_url',
+ 'requires', 'provides', 'obsoletes',
+ )
+
+# Legal keyword arguments for the Extension constructor
+extension_keywords = ('name', 'sources', 'include_dirs',
+ 'define_macros', 'undef_macros',
+ 'library_dirs', 'libraries', 'runtime_library_dirs',
+ 'extra_objects', 'extra_compile_args', 'extra_link_args',
+ 'swig_opts', 'export_symbols', 'depends', 'language')
+
+def setup (**attrs):
+ """The gateway to the Distutils: do everything your setup script needs
+ to do, in a highly flexible and user-driven way. Briefly: create a
+ Distribution instance; find and parse config files; parse the command
+ line; run each Distutils command found there, customized by the options
+ supplied to 'setup()' (as keyword arguments), in config files, and on
+ the command line.
+
+ The Distribution instance might be an instance of a class supplied via
+ the 'distclass' keyword argument to 'setup'; if no such class is
+ supplied, then the Distribution class (in dist.py) is instantiated.
+ All other arguments to 'setup' (except for 'cmdclass') are used to set
+ attributes of the Distribution instance.
+
+ The 'cmdclass' argument, if supplied, is a dictionary mapping command
+ names to command classes. Each command encountered on the command line
+ will be turned into a command class, which is in turn instantiated; any
+ class found in 'cmdclass' is used in place of the default, which is
+ (for command 'foo_bar') class 'foo_bar' in module
+ 'distutils.command.foo_bar'. The command class must provide a
+ 'user_options' attribute which is a list of option specifiers for
+ 'distutils.fancy_getopt'. Any command-line options between the current
+ and the next command are used to set attributes of the current command
+ object.
+
+ When the entire command-line has been successfully parsed, calls the
+ 'run()' method on each command object in turn. This method will be
+ driven entirely by the Distribution object (which each command object
+ has a reference to, thanks to its constructor), and the
+ command-specific options that became attributes of each command
+ object.
+ """
+
+ global _setup_stop_after, _setup_distribution
+
+ # Determine the distribution class -- either caller-supplied or
+ # our Distribution (see below).
+ klass = attrs.get('distclass')
+ if klass:
+ del attrs['distclass']
+ else:
+ klass = Distribution
+
+ if 'script_name' not in attrs:
+ attrs['script_name'] = os.path.basename(sys.argv[0])
+ if 'script_args' not in attrs:
+ attrs['script_args'] = sys.argv[1:]
+
+ # Create the Distribution instance, using the remaining arguments
+ # (ie. everything except distclass) to initialize it
+ try:
+ _setup_distribution = dist = klass(attrs)
+ except DistutilsSetupError as msg:
+ if 'name' not in attrs:
+ raise SystemExit("error in setup command: %s" % msg)
+ else:
+ raise SystemExit("error in %s setup command: %s" % \
+ (attrs['name'], msg))
+
+ if _setup_stop_after == "init":
+ return dist
+
+ # Find and parse the config file(s): they will override options from
+ # the setup script, but be overridden by the command line.
+ dist.parse_config_files()
+
+ if DEBUG:
+ print("options (after parsing config files):")
+ dist.dump_option_dicts()
+
+ if _setup_stop_after == "config":
+ return dist
+
+ # Parse the command line and override config files; any
+ # command-line errors are the end user's fault, so turn them into
+ # SystemExit to suppress tracebacks.
+ try:
+ ok = dist.parse_command_line()
+ except DistutilsArgError as msg:
+ raise SystemExit(gen_usage(dist.script_name) + "\nerror: %s" % msg)
+
+ if DEBUG:
+ print("options (after parsing command line):")
+ dist.dump_option_dicts()
+
+ if _setup_stop_after == "commandline":
+ return dist
+
+ # And finally, run all the commands found on the command line.
+ if ok:
+ try:
+ dist.run_commands()
+ except KeyboardInterrupt:
+ raise SystemExit("interrupted")
+ except OSError as exc:
+ if DEBUG:
+ sys.stderr.write("error: %s\n" % (exc,))
+ raise
+ else:
+ raise SystemExit("error: %s" % (exc,))
+
+ except (DistutilsError,
+ CCompilerError) as msg:
+ if DEBUG:
+ raise
+ else:
+ raise SystemExit("error: " + str(msg))
+
+ return dist
+
+# setup ()
+
+
+def run_setup (script_name, script_args=None, stop_after="run"):
+ """Run a setup script in a somewhat controlled environment, and
+ return the Distribution instance that drives things. This is useful
+ if you need to find out the distribution meta-data (passed as
+ keyword args from 'script' to 'setup()', or the contents of the
+ config files or command-line.
+
+ 'script_name' is a file that will be read and run with 'exec()';
+ 'sys.argv[0]' will be replaced with 'script' for the duration of the
+ call. 'script_args' is a list of strings; if supplied,
+ 'sys.argv[1:]' will be replaced by 'script_args' for the duration of
+ the call.
+
+ 'stop_after' tells 'setup()' when to stop processing; possible
+ values:
+ init
+ stop after the Distribution instance has been created and
+ populated with the keyword arguments to 'setup()'
+ config
+ stop after config files have been parsed (and their data
+ stored in the Distribution instance)
+ commandline
+ stop after the command-line ('sys.argv[1:]' or 'script_args')
+ have been parsed (and the data stored in the Distribution)
+ run [default]
+ stop after all commands have been run (the same as if 'setup()'
+ had been called in the usual way
+
+ Returns the Distribution instance, which provides all information
+ used to drive the Distutils.
+ """
+ if stop_after not in ('init', 'config', 'commandline', 'run'):
+ raise ValueError("invalid value for 'stop_after': %r" % (stop_after,))
+
+ global _setup_stop_after, _setup_distribution
+ _setup_stop_after = stop_after
+
+ save_argv = sys.argv.copy()
+ g = {'__file__': script_name}
+ try:
+ try:
+ sys.argv[0] = script_name
+ if script_args is not None:
+ sys.argv[1:] = script_args
+ with open(script_name, 'rb') as f:
+ exec(f.read(), g)
+ finally:
+ sys.argv = save_argv
+ _setup_stop_after = None
+ except SystemExit:
+ # Hmm, should we do something if exiting with a non-zero code
+ # (ie. error)?
+ pass
+
+ if _setup_distribution is None:
+ raise RuntimeError(("'distutils.core.setup()' was never called -- "
+ "perhaps '%s' is not a Distutils setup script?") % \
+ script_name)
+
+ # I wonder if the setup script's namespace -- g and l -- would be of
+ # any interest to callers?
+ #print "_setup_distribution:", _setup_distribution
+ return _setup_distribution
+
+# run_setup ()
diff --git a/third_party/python/setuptools/setuptools/_distutils/cygwinccompiler.py b/third_party/python/setuptools/setuptools/_distutils/cygwinccompiler.py
new file mode 100644
index 0000000000..66c12dd358
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/cygwinccompiler.py
@@ -0,0 +1,403 @@
+"""distutils.cygwinccompiler
+
+Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
+handles the Cygwin port of the GNU C compiler to Windows. It also contains
+the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
+cygwin in no-cygwin mode).
+"""
+
+# problems:
+#
+# * if you use a msvc compiled python version (1.5.2)
+# 1. you have to insert a __GNUC__ section in its config.h
+# 2. you have to generate an import library for its dll
+# - create a def-file for python??.dll
+# - create an import library using
+# dlltool --dllname python15.dll --def python15.def \
+# --output-lib libpython15.a
+#
+# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
+#
+# * We put export_symbols in a def-file, and don't use
+# --export-all-symbols because it doesn't worked reliable in some
+# tested configurations. And because other windows compilers also
+# need their symbols specified this no serious problem.
+#
+# tested configurations:
+#
+# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
+# (after patching python's config.h and for C++ some other include files)
+# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
+# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
+# (ld doesn't support -shared, so we use dllwrap)
+# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
+# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
+# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
+# - using gcc -mdll instead dllwrap doesn't work without -static because
+# it tries to link against dlls instead their import libraries. (If
+# it finds the dll first.)
+# By specifying -static we force ld to link against the import libraries,
+# this is windows standard and there are normally not the necessary symbols
+# in the dlls.
+# *** only the version of June 2000 shows these problems
+# * cygwin gcc 3.2/ld 2.13.90 works
+# (ld supports -shared)
+# * mingw gcc 3.2/ld 2.13 works
+# (ld supports -shared)
+
+import os
+import sys
+import copy
+from subprocess import Popen, PIPE, check_output
+import re
+
+from distutils.unixccompiler import UnixCCompiler
+from distutils.file_util import write_file
+from distutils.errors import (DistutilsExecError, CCompilerError,
+ CompileError, UnknownFileError)
+from distutils.version import LooseVersion
+from distutils.spawn import find_executable
+
+def get_msvcr():
+ """Include the appropriate MSVC runtime library if Python was built
+ with MSVC 7.0 or later.
+ """
+ msc_pos = sys.version.find('MSC v.')
+ if msc_pos != -1:
+ msc_ver = sys.version[msc_pos+6:msc_pos+10]
+ if msc_ver == '1300':
+ # MSVC 7.0
+ return ['msvcr70']
+ elif msc_ver == '1310':
+ # MSVC 7.1
+ return ['msvcr71']
+ elif msc_ver == '1400':
+ # VS2005 / MSVC 8.0
+ return ['msvcr80']
+ elif msc_ver == '1500':
+ # VS2008 / MSVC 9.0
+ return ['msvcr90']
+ elif msc_ver == '1600':
+ # VS2010 / MSVC 10.0
+ return ['msvcr100']
+ else:
+ raise ValueError("Unknown MS Compiler version %s " % msc_ver)
+
+
+class CygwinCCompiler(UnixCCompiler):
+ """ Handles the Cygwin port of the GNU C compiler to Windows.
+ """
+ compiler_type = 'cygwin'
+ obj_extension = ".o"
+ static_lib_extension = ".a"
+ shared_lib_extension = ".dll"
+ static_lib_format = "lib%s%s"
+ shared_lib_format = "%s%s"
+ exe_extension = ".exe"
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+
+ UnixCCompiler.__init__(self, verbose, dry_run, force)
+
+ status, details = check_config_h()
+ self.debug_print("Python's GCC status: %s (details: %s)" %
+ (status, details))
+ if status is not CONFIG_H_OK:
+ self.warn(
+ "Python's pyconfig.h doesn't seem to support your compiler. "
+ "Reason: %s. "
+ "Compiling may fail because of undefined preprocessor macros."
+ % details)
+
+ self.gcc_version, self.ld_version, self.dllwrap_version = \
+ get_versions()
+ self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
+ (self.gcc_version,
+ self.ld_version,
+ self.dllwrap_version) )
+
+ # ld_version >= "2.10.90" and < "2.13" should also be able to use
+ # gcc -mdll instead of dllwrap
+ # Older dllwraps had own version numbers, newer ones use the
+ # same as the rest of binutils ( also ld )
+ # dllwrap 2.10.90 is buggy
+ if self.ld_version >= "2.10.90":
+ self.linker_dll = "gcc"
+ else:
+ self.linker_dll = "dllwrap"
+
+ # ld_version >= "2.13" support -shared so use it instead of
+ # -mdll -static
+ if self.ld_version >= "2.13":
+ shared_option = "-shared"
+ else:
+ shared_option = "-mdll -static"
+
+ # Hard-code GCC because that's what this is all about.
+ # XXX optimization, warnings etc. should be customizable.
+ self.set_executables(compiler='gcc -mcygwin -O -Wall',
+ compiler_so='gcc -mcygwin -mdll -O -Wall',
+ compiler_cxx='g++ -mcygwin -O -Wall',
+ linker_exe='gcc -mcygwin',
+ linker_so=('%s -mcygwin %s' %
+ (self.linker_dll, shared_option)))
+
+ # cygwin and mingw32 need different sets of libraries
+ if self.gcc_version == "2.91.57":
+ # cygwin shouldn't need msvcrt, but without the dlls will crash
+ # (gcc version 2.91.57) -- perhaps something about initialization
+ self.dll_libraries=["msvcrt"]
+ self.warn(
+ "Consider upgrading to a newer version of gcc")
+ else:
+ # Include the appropriate MSVC runtime library if Python was built
+ # with MSVC 7.0 or later.
+ self.dll_libraries = get_msvcr()
+
+ def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
+ """Compiles the source by spawning GCC and windres if needed."""
+ if ext == '.rc' or ext == '.res':
+ # gcc needs '.res' and '.rc' compiled to object files !!!
+ try:
+ self.spawn(["windres", "-i", src, "-o", obj])
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+ else: # for other files use the C-compiler
+ try:
+ self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
+ extra_postargs)
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+
+ def link(self, target_desc, objects, output_filename, output_dir=None,
+ libraries=None, library_dirs=None, runtime_library_dirs=None,
+ export_symbols=None, debug=0, extra_preargs=None,
+ extra_postargs=None, build_temp=None, target_lang=None):
+ """Link the objects."""
+ # use separate copies, so we can modify the lists
+ extra_preargs = copy.copy(extra_preargs or [])
+ libraries = copy.copy(libraries or [])
+ objects = copy.copy(objects or [])
+
+ # Additional libraries
+ libraries.extend(self.dll_libraries)
+
+ # handle export symbols by creating a def-file
+ # with executables this only works with gcc/ld as linker
+ if ((export_symbols is not None) and
+ (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
+ # (The linker doesn't do anything if output is up-to-date.
+ # So it would probably better to check if we really need this,
+ # but for this we had to insert some unchanged parts of
+ # UnixCCompiler, and this is not what we want.)
+
+ # we want to put some files in the same directory as the
+ # object files are, build_temp doesn't help much
+ # where are the object files
+ temp_dir = os.path.dirname(objects[0])
+ # name of dll to give the helper files the same base name
+ (dll_name, dll_extension) = os.path.splitext(
+ os.path.basename(output_filename))
+
+ # generate the filenames for these files
+ def_file = os.path.join(temp_dir, dll_name + ".def")
+ lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
+
+ # Generate .def file
+ contents = [
+ "LIBRARY %s" % os.path.basename(output_filename),
+ "EXPORTS"]
+ for sym in export_symbols:
+ contents.append(sym)
+ self.execute(write_file, (def_file, contents),
+ "writing %s" % def_file)
+
+ # next add options for def-file and to creating import libraries
+
+ # dllwrap uses different options than gcc/ld
+ if self.linker_dll == "dllwrap":
+ extra_preargs.extend(["--output-lib", lib_file])
+ # for dllwrap we have to use a special option
+ extra_preargs.extend(["--def", def_file])
+ # we use gcc/ld here and can be sure ld is >= 2.9.10
+ else:
+ # doesn't work: bfd_close build\...\libfoo.a: Invalid operation
+ #extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
+ # for gcc/ld the def-file is specified as any object files
+ objects.append(def_file)
+
+ #end: if ((export_symbols is not None) and
+ # (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
+
+ # who wants symbols and a many times larger output file
+ # should explicitly switch the debug mode on
+ # otherwise we let dllwrap/ld strip the output file
+ # (On my machine: 10KiB < stripped_file < ??100KiB
+ # unstripped_file = stripped_file + XXX KiB
+ # ( XXX=254 for a typical python extension))
+ if not debug:
+ extra_preargs.append("-s")
+
+ UnixCCompiler.link(self, target_desc, objects, output_filename,
+ output_dir, libraries, library_dirs,
+ runtime_library_dirs,
+ None, # export_symbols, we do this in our def-file
+ debug, extra_preargs, extra_postargs, build_temp,
+ target_lang)
+
+ # -- Miscellaneous methods -----------------------------------------
+
+ def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
+ """Adds supports for rc and res files."""
+ if output_dir is None:
+ output_dir = ''
+ obj_names = []
+ for src_name in source_filenames:
+ # use normcase to make sure '.rc' is really '.rc' and not '.RC'
+ base, ext = os.path.splitext(os.path.normcase(src_name))
+ if ext not in (self.src_extensions + ['.rc','.res']):
+ raise UnknownFileError("unknown file type '%s' (from '%s')" % \
+ (ext, src_name))
+ if strip_dir:
+ base = os.path.basename (base)
+ if ext in ('.res', '.rc'):
+ # these need to be compiled to object files
+ obj_names.append (os.path.join(output_dir,
+ base + ext + self.obj_extension))
+ else:
+ obj_names.append (os.path.join(output_dir,
+ base + self.obj_extension))
+ return obj_names
+
+# the same as cygwin plus some additional parameters
+class Mingw32CCompiler(CygwinCCompiler):
+ """ Handles the Mingw32 port of the GNU C compiler to Windows.
+ """
+ compiler_type = 'mingw32'
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+
+ CygwinCCompiler.__init__ (self, verbose, dry_run, force)
+
+ # ld_version >= "2.13" support -shared so use it instead of
+ # -mdll -static
+ if self.ld_version >= "2.13":
+ shared_option = "-shared"
+ else:
+ shared_option = "-mdll -static"
+
+ # A real mingw32 doesn't need to specify a different entry point,
+ # but cygwin 2.91.57 in no-cygwin-mode needs it.
+ if self.gcc_version <= "2.91.57":
+ entry_point = '--entry _DllMain@12'
+ else:
+ entry_point = ''
+
+ if is_cygwingcc():
+ raise CCompilerError(
+ 'Cygwin gcc cannot be used with --compiler=mingw32')
+
+ self.set_executables(compiler='gcc -O -Wall',
+ compiler_so='gcc -mdll -O -Wall',
+ compiler_cxx='g++ -O -Wall',
+ linker_exe='gcc',
+ linker_so='%s %s %s'
+ % (self.linker_dll, shared_option,
+ entry_point))
+ # Maybe we should also append -mthreads, but then the finished
+ # dlls need another dll (mingwm10.dll see Mingw32 docs)
+ # (-mthreads: Support thread-safe exception handling on `Mingw32')
+
+ # no additional libraries needed
+ self.dll_libraries=[]
+
+ # Include the appropriate MSVC runtime library if Python was built
+ # with MSVC 7.0 or later.
+ self.dll_libraries = get_msvcr()
+
+# Because these compilers aren't configured in Python's pyconfig.h file by
+# default, we should at least warn the user if he is using an unmodified
+# version.
+
+CONFIG_H_OK = "ok"
+CONFIG_H_NOTOK = "not ok"
+CONFIG_H_UNCERTAIN = "uncertain"
+
+def check_config_h():
+ """Check if the current Python installation appears amenable to building
+ extensions with GCC.
+
+ Returns a tuple (status, details), where 'status' is one of the following
+ constants:
+
+ - CONFIG_H_OK: all is well, go ahead and compile
+ - CONFIG_H_NOTOK: doesn't look good
+ - CONFIG_H_UNCERTAIN: not sure -- unable to read pyconfig.h
+
+ 'details' is a human-readable string explaining the situation.
+
+ Note there are two ways to conclude "OK": either 'sys.version' contains
+ the string "GCC" (implying that this Python was built with GCC), or the
+ installed "pyconfig.h" contains the string "__GNUC__".
+ """
+
+ # XXX since this function also checks sys.version, it's not strictly a
+ # "pyconfig.h" check -- should probably be renamed...
+
+ from distutils import sysconfig
+
+ # if sys.version contains GCC then python was compiled with GCC, and the
+ # pyconfig.h file should be OK
+ if "GCC" in sys.version:
+ return CONFIG_H_OK, "sys.version mentions 'GCC'"
+
+ # let's see if __GNUC__ is mentioned in python.h
+ fn = sysconfig.get_config_h_filename()
+ try:
+ config_h = open(fn)
+ try:
+ if "__GNUC__" in config_h.read():
+ return CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn
+ else:
+ return CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn
+ finally:
+ config_h.close()
+ except OSError as exc:
+ return (CONFIG_H_UNCERTAIN,
+ "couldn't read '%s': %s" % (fn, exc.strerror))
+
+RE_VERSION = re.compile(br'(\d+\.\d+(\.\d+)*)')
+
+def _find_exe_version(cmd):
+ """Find the version of an executable by running `cmd` in the shell.
+
+ If the command is not found, or the output does not match
+ `RE_VERSION`, returns None.
+ """
+ executable = cmd.split()[0]
+ if find_executable(executable) is None:
+ return None
+ out = Popen(cmd, shell=True, stdout=PIPE).stdout
+ try:
+ out_string = out.read()
+ finally:
+ out.close()
+ result = RE_VERSION.search(out_string)
+ if result is None:
+ return None
+ # LooseVersion works with strings
+ # so we need to decode our bytes
+ return LooseVersion(result.group(1).decode())
+
+def get_versions():
+ """ Try to find out the versions of gcc, ld and dllwrap.
+
+ If not possible it returns None for it.
+ """
+ commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version']
+ return tuple([_find_exe_version(cmd) for cmd in commands])
+
+def is_cygwingcc():
+ '''Try to determine if the gcc that would be used is from cygwin.'''
+ out_string = check_output(['gcc', '-dumpmachine'])
+ return out_string.strip().endswith(b'cygwin')
diff --git a/third_party/python/setuptools/setuptools/_distutils/debug.py b/third_party/python/setuptools/setuptools/_distutils/debug.py
new file mode 100644
index 0000000000..daf1660f0d
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/debug.py
@@ -0,0 +1,5 @@
+import os
+
+# If DISTUTILS_DEBUG is anything other than the empty string, we run in
+# debug mode.
+DEBUG = os.environ.get('DISTUTILS_DEBUG')
diff --git a/third_party/python/setuptools/setuptools/_distutils/dep_util.py b/third_party/python/setuptools/setuptools/_distutils/dep_util.py
new file mode 100644
index 0000000000..d74f5e4e92
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/dep_util.py
@@ -0,0 +1,92 @@
+"""distutils.dep_util
+
+Utility functions for simple, timestamp-based dependency of files
+and groups of files; also, function based entirely on such
+timestamp dependency analysis."""
+
+import os
+from distutils.errors import DistutilsFileError
+
+
+def newer (source, target):
+ """Return true if 'source' exists and is more recently modified than
+ 'target', or if 'source' exists and 'target' doesn't. Return false if
+ both exist and 'target' is the same age or younger than 'source'.
+ Raise DistutilsFileError if 'source' does not exist.
+ """
+ if not os.path.exists(source):
+ raise DistutilsFileError("file '%s' does not exist" %
+ os.path.abspath(source))
+ if not os.path.exists(target):
+ return 1
+
+ from stat import ST_MTIME
+ mtime1 = os.stat(source)[ST_MTIME]
+ mtime2 = os.stat(target)[ST_MTIME]
+
+ return mtime1 > mtime2
+
+# newer ()
+
+
+def newer_pairwise (sources, targets):
+ """Walk two filename lists in parallel, testing if each source is newer
+ than its corresponding target. Return a pair of lists (sources,
+ targets) where source is newer than target, according to the semantics
+ of 'newer()'.
+ """
+ if len(sources) != len(targets):
+ raise ValueError("'sources' and 'targets' must be same length")
+
+ # build a pair of lists (sources, targets) where source is newer
+ n_sources = []
+ n_targets = []
+ for i in range(len(sources)):
+ if newer(sources[i], targets[i]):
+ n_sources.append(sources[i])
+ n_targets.append(targets[i])
+
+ return (n_sources, n_targets)
+
+# newer_pairwise ()
+
+
+def newer_group (sources, target, missing='error'):
+ """Return true if 'target' is out-of-date with respect to any file
+ listed in 'sources'. In other words, if 'target' exists and is newer
+ than every file in 'sources', return false; otherwise return true.
+ 'missing' controls what we do when a source file is missing; the
+ default ("error") is to blow up with an OSError from inside 'stat()';
+ if it is "ignore", we silently drop any missing source files; if it is
+ "newer", any missing source files make us assume that 'target' is
+ out-of-date (this is handy in "dry-run" mode: it'll make you pretend to
+ carry out commands that wouldn't work because inputs are missing, but
+ that doesn't matter because you're not actually going to run the
+ commands).
+ """
+ # If the target doesn't even exist, then it's definitely out-of-date.
+ if not os.path.exists(target):
+ return 1
+
+ # Otherwise we have to find out the hard way: if *any* source file
+ # is more recent than 'target', then 'target' is out-of-date and
+ # we can immediately return true. If we fall through to the end
+ # of the loop, then 'target' is up-to-date and we return false.
+ from stat import ST_MTIME
+ target_mtime = os.stat(target)[ST_MTIME]
+ for source in sources:
+ if not os.path.exists(source):
+ if missing == 'error': # blow up when we stat() the file
+ pass
+ elif missing == 'ignore': # missing source dropped from
+ continue # target's dependency list
+ elif missing == 'newer': # missing source means target is
+ return 1 # out-of-date
+
+ source_mtime = os.stat(source)[ST_MTIME]
+ if source_mtime > target_mtime:
+ return 1
+ else:
+ return 0
+
+# newer_group ()
diff --git a/third_party/python/setuptools/setuptools/_distutils/dir_util.py b/third_party/python/setuptools/setuptools/_distutils/dir_util.py
new file mode 100644
index 0000000000..d5cd8e3e24
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/dir_util.py
@@ -0,0 +1,210 @@
+"""distutils.dir_util
+
+Utility functions for manipulating directories and directory trees."""
+
+import os
+import errno
+from distutils.errors import DistutilsFileError, DistutilsInternalError
+from distutils import log
+
+# cache for by mkpath() -- in addition to cheapening redundant calls,
+# eliminates redundant "creating /foo/bar/baz" messages in dry-run mode
+_path_created = {}
+
+# I don't use os.makedirs because a) it's new to Python 1.5.2, and
+# b) it blows up if the directory already exists (I want to silently
+# succeed in that case).
+def mkpath(name, mode=0o777, verbose=1, dry_run=0):
+ """Create a directory and any missing ancestor directories.
+
+ If the directory already exists (or if 'name' is the empty string, which
+ means the current directory, which of course exists), then do nothing.
+ Raise DistutilsFileError if unable to create some directory along the way
+ (eg. some sub-path exists, but is a file rather than a directory).
+ If 'verbose' is true, print a one-line summary of each mkdir to stdout.
+ Return the list of directories actually created.
+ """
+
+ global _path_created
+
+ # Detect a common bug -- name is None
+ if not isinstance(name, str):
+ raise DistutilsInternalError(
+ "mkpath: 'name' must be a string (got %r)" % (name,))
+
+ # XXX what's the better way to handle verbosity? print as we create
+ # each directory in the path (the current behaviour), or only announce
+ # the creation of the whole path? (quite easy to do the latter since
+ # we're not using a recursive algorithm)
+
+ name = os.path.normpath(name)
+ created_dirs = []
+ if os.path.isdir(name) or name == '':
+ return created_dirs
+ if _path_created.get(os.path.abspath(name)):
+ return created_dirs
+
+ (head, tail) = os.path.split(name)
+ tails = [tail] # stack of lone dirs to create
+
+ while head and tail and not os.path.isdir(head):
+ (head, tail) = os.path.split(head)
+ tails.insert(0, tail) # push next higher dir onto stack
+
+ # now 'head' contains the deepest directory that already exists
+ # (that is, the child of 'head' in 'name' is the highest directory
+ # that does *not* exist)
+ for d in tails:
+ #print "head = %s, d = %s: " % (head, d),
+ head = os.path.join(head, d)
+ abs_head = os.path.abspath(head)
+
+ if _path_created.get(abs_head):
+ continue
+
+ if verbose >= 1:
+ log.info("creating %s", head)
+
+ if not dry_run:
+ try:
+ os.mkdir(head, mode)
+ except OSError as exc:
+ if not (exc.errno == errno.EEXIST and os.path.isdir(head)):
+ raise DistutilsFileError(
+ "could not create '%s': %s" % (head, exc.args[-1]))
+ created_dirs.append(head)
+
+ _path_created[abs_head] = 1
+ return created_dirs
+
+def create_tree(base_dir, files, mode=0o777, verbose=1, dry_run=0):
+ """Create all the empty directories under 'base_dir' needed to put 'files'
+ there.
+
+ 'base_dir' is just the name of a directory which doesn't necessarily
+ exist yet; 'files' is a list of filenames to be interpreted relative to
+ 'base_dir'. 'base_dir' + the directory portion of every file in 'files'
+ will be created if it doesn't already exist. 'mode', 'verbose' and
+ 'dry_run' flags are as for 'mkpath()'.
+ """
+ # First get the list of directories to create
+ need_dir = set()
+ for file in files:
+ need_dir.add(os.path.join(base_dir, os.path.dirname(file)))
+
+ # Now create them
+ for dir in sorted(need_dir):
+ mkpath(dir, mode, verbose=verbose, dry_run=dry_run)
+
+def copy_tree(src, dst, preserve_mode=1, preserve_times=1,
+ preserve_symlinks=0, update=0, verbose=1, dry_run=0):
+ """Copy an entire directory tree 'src' to a new location 'dst'.
+
+ Both 'src' and 'dst' must be directory names. If 'src' is not a
+ directory, raise DistutilsFileError. If 'dst' does not exist, it is
+ created with 'mkpath()'. The end result of the copy is that every
+ file in 'src' is copied to 'dst', and directories under 'src' are
+ recursively copied to 'dst'. Return the list of files that were
+ copied or might have been copied, using their output name. The
+ return value is unaffected by 'update' or 'dry_run': it is simply
+ the list of all files under 'src', with the names changed to be
+ under 'dst'.
+
+ 'preserve_mode' and 'preserve_times' are the same as for
+ 'copy_file'; note that they only apply to regular files, not to
+ directories. If 'preserve_symlinks' is true, symlinks will be
+ copied as symlinks (on platforms that support them!); otherwise
+ (the default), the destination of the symlink will be copied.
+ 'update' and 'verbose' are the same as for 'copy_file'.
+ """
+ from distutils.file_util import copy_file
+
+ if not dry_run and not os.path.isdir(src):
+ raise DistutilsFileError(
+ "cannot copy tree '%s': not a directory" % src)
+ try:
+ names = os.listdir(src)
+ except OSError as e:
+ if dry_run:
+ names = []
+ else:
+ raise DistutilsFileError(
+ "error listing files in '%s': %s" % (src, e.strerror))
+
+ if not dry_run:
+ mkpath(dst, verbose=verbose)
+
+ outputs = []
+
+ for n in names:
+ src_name = os.path.join(src, n)
+ dst_name = os.path.join(dst, n)
+
+ if n.startswith('.nfs'):
+ # skip NFS rename files
+ continue
+
+ if preserve_symlinks and os.path.islink(src_name):
+ link_dest = os.readlink(src_name)
+ if verbose >= 1:
+ log.info("linking %s -> %s", dst_name, link_dest)
+ if not dry_run:
+ os.symlink(link_dest, dst_name)
+ outputs.append(dst_name)
+
+ elif os.path.isdir(src_name):
+ outputs.extend(
+ copy_tree(src_name, dst_name, preserve_mode,
+ preserve_times, preserve_symlinks, update,
+ verbose=verbose, dry_run=dry_run))
+ else:
+ copy_file(src_name, dst_name, preserve_mode,
+ preserve_times, update, verbose=verbose,
+ dry_run=dry_run)
+ outputs.append(dst_name)
+
+ return outputs
+
+def _build_cmdtuple(path, cmdtuples):
+ """Helper for remove_tree()."""
+ for f in os.listdir(path):
+ real_f = os.path.join(path,f)
+ if os.path.isdir(real_f) and not os.path.islink(real_f):
+ _build_cmdtuple(real_f, cmdtuples)
+ else:
+ cmdtuples.append((os.remove, real_f))
+ cmdtuples.append((os.rmdir, path))
+
+def remove_tree(directory, verbose=1, dry_run=0):
+ """Recursively remove an entire directory tree.
+
+ Any errors are ignored (apart from being reported to stdout if 'verbose'
+ is true).
+ """
+ global _path_created
+
+ if verbose >= 1:
+ log.info("removing '%s' (and everything under it)", directory)
+ if dry_run:
+ return
+ cmdtuples = []
+ _build_cmdtuple(directory, cmdtuples)
+ for cmd in cmdtuples:
+ try:
+ cmd[0](cmd[1])
+ # remove dir from cache if it's already there
+ abspath = os.path.abspath(cmd[1])
+ if abspath in _path_created:
+ del _path_created[abspath]
+ except OSError as exc:
+ log.warn("error removing %s: %s", directory, exc)
+
+def ensure_relative(path):
+ """Take the full path 'path', and make it a relative path.
+
+ This is useful to make 'path' the second argument to os.path.join().
+ """
+ drive, path = os.path.splitdrive(path)
+ if path[0:1] == os.sep:
+ path = drive + path[1:]
+ return path
diff --git a/third_party/python/setuptools/setuptools/_distutils/dist.py b/third_party/python/setuptools/setuptools/_distutils/dist.py
new file mode 100644
index 0000000000..37db4d6cd7
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/dist.py
@@ -0,0 +1,1257 @@
+"""distutils.dist
+
+Provides the Distribution class, which represents the module distribution
+being built/installed/distributed.
+"""
+
+import sys
+import os
+import re
+from email import message_from_file
+
+try:
+ import warnings
+except ImportError:
+ warnings = None
+
+from distutils.errors import *
+from distutils.fancy_getopt import FancyGetopt, translate_longopt
+from distutils.util import check_environ, strtobool, rfc822_escape
+from distutils import log
+from distutils.debug import DEBUG
+
+# Regex to define acceptable Distutils command names. This is not *quite*
+# the same as a Python NAME -- I don't allow leading underscores. The fact
+# that they're very similar is no coincidence; the default naming scheme is
+# to look for a Python module named after the command.
+command_re = re.compile(r'^[a-zA-Z]([a-zA-Z0-9_]*)$')
+
+
+def _ensure_list(value, fieldname):
+ if isinstance(value, str):
+ # a string containing comma separated values is okay. It will
+ # be converted to a list by Distribution.finalize_options().
+ pass
+ elif not isinstance(value, list):
+ # passing a tuple or an iterator perhaps, warn and convert
+ typename = type(value).__name__
+ msg = "Warning: '{fieldname}' should be a list, got type '{typename}'"
+ msg = msg.format(**locals())
+ log.log(log.WARN, msg)
+ value = list(value)
+ return value
+
+
+class Distribution:
+ """The core of the Distutils. Most of the work hiding behind 'setup'
+ is really done within a Distribution instance, which farms the work out
+ to the Distutils commands specified on the command line.
+
+ Setup scripts will almost never instantiate Distribution directly,
+ unless the 'setup()' function is totally inadequate to their needs.
+ However, it is conceivable that a setup script might wish to subclass
+ Distribution for some specialized purpose, and then pass the subclass
+ to 'setup()' as the 'distclass' keyword argument. If so, it is
+ necessary to respect the expectations that 'setup' has of Distribution.
+ See the code for 'setup()', in core.py, for details.
+ """
+
+ # 'global_options' describes the command-line options that may be
+ # supplied to the setup script prior to any actual commands.
+ # Eg. "./setup.py -n" or "./setup.py --quiet" both take advantage of
+ # these global options. This list should be kept to a bare minimum,
+ # since every global option is also valid as a command option -- and we
+ # don't want to pollute the commands with too many options that they
+ # have minimal control over.
+ # The fourth entry for verbose means that it can be repeated.
+ global_options = [
+ ('verbose', 'v', "run verbosely (default)", 1),
+ ('quiet', 'q', "run quietly (turns verbosity off)"),
+ ('dry-run', 'n', "don't actually do anything"),
+ ('help', 'h', "show detailed help message"),
+ ('no-user-cfg', None,
+ 'ignore pydistutils.cfg in your home directory'),
+ ]
+
+ # 'common_usage' is a short (2-3 line) string describing the common
+ # usage of the setup script.
+ common_usage = """\
+Common commands: (see '--help-commands' for more)
+
+ setup.py build will build the package underneath 'build/'
+ setup.py install will install the package
+"""
+
+ # options that are not propagated to the commands
+ display_options = [
+ ('help-commands', None,
+ "list all available commands"),
+ ('name', None,
+ "print package name"),
+ ('version', 'V',
+ "print package version"),
+ ('fullname', None,
+ "print <package name>-<version>"),
+ ('author', None,
+ "print the author's name"),
+ ('author-email', None,
+ "print the author's email address"),
+ ('maintainer', None,
+ "print the maintainer's name"),
+ ('maintainer-email', None,
+ "print the maintainer's email address"),
+ ('contact', None,
+ "print the maintainer's name if known, else the author's"),
+ ('contact-email', None,
+ "print the maintainer's email address if known, else the author's"),
+ ('url', None,
+ "print the URL for this package"),
+ ('license', None,
+ "print the license of the package"),
+ ('licence', None,
+ "alias for --license"),
+ ('description', None,
+ "print the package description"),
+ ('long-description', None,
+ "print the long package description"),
+ ('platforms', None,
+ "print the list of platforms"),
+ ('classifiers', None,
+ "print the list of classifiers"),
+ ('keywords', None,
+ "print the list of keywords"),
+ ('provides', None,
+ "print the list of packages/modules provided"),
+ ('requires', None,
+ "print the list of packages/modules required"),
+ ('obsoletes', None,
+ "print the list of packages/modules made obsolete")
+ ]
+ display_option_names = [translate_longopt(x[0]) for x in display_options]
+
+ # negative options are options that exclude other options
+ negative_opt = {'quiet': 'verbose'}
+
+ # -- Creation/initialization methods -------------------------------
+
+ def __init__(self, attrs=None):
+ """Construct a new Distribution instance: initialize all the
+ attributes of a Distribution, and then use 'attrs' (a dictionary
+ mapping attribute names to values) to assign some of those
+ attributes their "real" values. (Any attributes not mentioned in
+ 'attrs' will be assigned to some null value: 0, None, an empty list
+ or dictionary, etc.) Most importantly, initialize the
+ 'command_obj' attribute to the empty dictionary; this will be
+ filled in with real command objects by 'parse_command_line()'.
+ """
+
+ # Default values for our command-line options
+ self.verbose = 1
+ self.dry_run = 0
+ self.help = 0
+ for attr in self.display_option_names:
+ setattr(self, attr, 0)
+
+ # Store the distribution meta-data (name, version, author, and so
+ # forth) in a separate object -- we're getting to have enough
+ # information here (and enough command-line options) that it's
+ # worth it. Also delegate 'get_XXX()' methods to the 'metadata'
+ # object in a sneaky and underhanded (but efficient!) way.
+ self.metadata = DistributionMetadata()
+ for basename in self.metadata._METHOD_BASENAMES:
+ method_name = "get_" + basename
+ setattr(self, method_name, getattr(self.metadata, method_name))
+
+ # 'cmdclass' maps command names to class objects, so we
+ # can 1) quickly figure out which class to instantiate when
+ # we need to create a new command object, and 2) have a way
+ # for the setup script to override command classes
+ self.cmdclass = {}
+
+ # 'command_packages' is a list of packages in which commands
+ # are searched for. The factory for command 'foo' is expected
+ # to be named 'foo' in the module 'foo' in one of the packages
+ # named here. This list is searched from the left; an error
+ # is raised if no named package provides the command being
+ # searched for. (Always access using get_command_packages().)
+ self.command_packages = None
+
+ # 'script_name' and 'script_args' are usually set to sys.argv[0]
+ # and sys.argv[1:], but they can be overridden when the caller is
+ # not necessarily a setup script run from the command-line.
+ self.script_name = None
+ self.script_args = None
+
+ # 'command_options' is where we store command options between
+ # parsing them (from config files, the command-line, etc.) and when
+ # they are actually needed -- ie. when the command in question is
+ # instantiated. It is a dictionary of dictionaries of 2-tuples:
+ # command_options = { command_name : { option : (source, value) } }
+ self.command_options = {}
+
+ # 'dist_files' is the list of (command, pyversion, file) that
+ # have been created by any dist commands run so far. This is
+ # filled regardless of whether the run is dry or not. pyversion
+ # gives sysconfig.get_python_version() if the dist file is
+ # specific to a Python version, 'any' if it is good for all
+ # Python versions on the target platform, and '' for a source
+ # file. pyversion should not be used to specify minimum or
+ # maximum required Python versions; use the metainfo for that
+ # instead.
+ self.dist_files = []
+
+ # These options are really the business of various commands, rather
+ # than of the Distribution itself. We provide aliases for them in
+ # Distribution as a convenience to the developer.
+ self.packages = None
+ self.package_data = {}
+ self.package_dir = None
+ self.py_modules = None
+ self.libraries = None
+ self.headers = None
+ self.ext_modules = None
+ self.ext_package = None
+ self.include_dirs = None
+ self.extra_path = None
+ self.scripts = None
+ self.data_files = None
+ self.password = ''
+
+ # And now initialize bookkeeping stuff that can't be supplied by
+ # the caller at all. 'command_obj' maps command names to
+ # Command instances -- that's how we enforce that every command
+ # class is a singleton.
+ self.command_obj = {}
+
+ # 'have_run' maps command names to boolean values; it keeps track
+ # of whether we have actually run a particular command, to make it
+ # cheap to "run" a command whenever we think we might need to -- if
+ # it's already been done, no need for expensive filesystem
+ # operations, we just check the 'have_run' dictionary and carry on.
+ # It's only safe to query 'have_run' for a command class that has
+ # been instantiated -- a false value will be inserted when the
+ # command object is created, and replaced with a true value when
+ # the command is successfully run. Thus it's probably best to use
+ # '.get()' rather than a straight lookup.
+ self.have_run = {}
+
+ # Now we'll use the attrs dictionary (ultimately, keyword args from
+ # the setup script) to possibly override any or all of these
+ # distribution options.
+
+ if attrs:
+ # Pull out the set of command options and work on them
+ # specifically. Note that this order guarantees that aliased
+ # command options will override any supplied redundantly
+ # through the general options dictionary.
+ options = attrs.get('options')
+ if options is not None:
+ del attrs['options']
+ for (command, cmd_options) in options.items():
+ opt_dict = self.get_option_dict(command)
+ for (opt, val) in cmd_options.items():
+ opt_dict[opt] = ("setup script", val)
+
+ if 'licence' in attrs:
+ attrs['license'] = attrs['licence']
+ del attrs['licence']
+ msg = "'licence' distribution option is deprecated; use 'license'"
+ if warnings is not None:
+ warnings.warn(msg)
+ else:
+ sys.stderr.write(msg + "\n")
+
+ # Now work on the rest of the attributes. Any attribute that's
+ # not already defined is invalid!
+ for (key, val) in attrs.items():
+ if hasattr(self.metadata, "set_" + key):
+ getattr(self.metadata, "set_" + key)(val)
+ elif hasattr(self.metadata, key):
+ setattr(self.metadata, key, val)
+ elif hasattr(self, key):
+ setattr(self, key, val)
+ else:
+ msg = "Unknown distribution option: %s" % repr(key)
+ warnings.warn(msg)
+
+ # no-user-cfg is handled before other command line args
+ # because other args override the config files, and this
+ # one is needed before we can load the config files.
+ # If attrs['script_args'] wasn't passed, assume false.
+ #
+ # This also make sure we just look at the global options
+ self.want_user_cfg = True
+
+ if self.script_args is not None:
+ for arg in self.script_args:
+ if not arg.startswith('-'):
+ break
+ if arg == '--no-user-cfg':
+ self.want_user_cfg = False
+ break
+
+ self.finalize_options()
+
+ def get_option_dict(self, command):
+ """Get the option dictionary for a given command. If that
+ command's option dictionary hasn't been created yet, then create it
+ and return the new dictionary; otherwise, return the existing
+ option dictionary.
+ """
+ dict = self.command_options.get(command)
+ if dict is None:
+ dict = self.command_options[command] = {}
+ return dict
+
+ def dump_option_dicts(self, header=None, commands=None, indent=""):
+ from pprint import pformat
+
+ if commands is None: # dump all command option dicts
+ commands = sorted(self.command_options.keys())
+
+ if header is not None:
+ self.announce(indent + header)
+ indent = indent + " "
+
+ if not commands:
+ self.announce(indent + "no commands known yet")
+ return
+
+ for cmd_name in commands:
+ opt_dict = self.command_options.get(cmd_name)
+ if opt_dict is None:
+ self.announce(indent +
+ "no option dict for '%s' command" % cmd_name)
+ else:
+ self.announce(indent +
+ "option dict for '%s' command:" % cmd_name)
+ out = pformat(opt_dict)
+ for line in out.split('\n'):
+ self.announce(indent + " " + line)
+
+ # -- Config file finding/parsing methods ---------------------------
+
+ def find_config_files(self):
+ """Find as many configuration files as should be processed for this
+ platform, and return a list of filenames in the order in which they
+ should be parsed. The filenames returned are guaranteed to exist
+ (modulo nasty race conditions).
+
+ There are three possible config files: distutils.cfg in the
+ Distutils installation directory (ie. where the top-level
+ Distutils __inst__.py file lives), a file in the user's home
+ directory named .pydistutils.cfg on Unix and pydistutils.cfg
+ on Windows/Mac; and setup.cfg in the current directory.
+
+ The file in the user's home directory can be disabled with the
+ --no-user-cfg option.
+ """
+ files = []
+ check_environ()
+
+ # Where to look for the system-wide Distutils config file
+ sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
+
+ # Look for the system config file
+ sys_file = os.path.join(sys_dir, "distutils.cfg")
+ if os.path.isfile(sys_file):
+ files.append(sys_file)
+
+ # What to call the per-user config file
+ if os.name == 'posix':
+ user_filename = ".pydistutils.cfg"
+ else:
+ user_filename = "pydistutils.cfg"
+
+ # And look for the user config file
+ if self.want_user_cfg:
+ user_file = os.path.join(os.path.expanduser('~'), user_filename)
+ if os.path.isfile(user_file):
+ files.append(user_file)
+
+ # All platforms support local setup.cfg
+ local_file = "setup.cfg"
+ if os.path.isfile(local_file):
+ files.append(local_file)
+
+ if DEBUG:
+ self.announce("using config files: %s" % ', '.join(files))
+
+ return files
+
+ def parse_config_files(self, filenames=None):
+ from configparser import ConfigParser
+
+ # Ignore install directory options if we have a venv
+ if sys.prefix != sys.base_prefix:
+ ignore_options = [
+ 'install-base', 'install-platbase', 'install-lib',
+ 'install-platlib', 'install-purelib', 'install-headers',
+ 'install-scripts', 'install-data', 'prefix', 'exec-prefix',
+ 'home', 'user', 'root']
+ else:
+ ignore_options = []
+
+ ignore_options = frozenset(ignore_options)
+
+ if filenames is None:
+ filenames = self.find_config_files()
+
+ if DEBUG:
+ self.announce("Distribution.parse_config_files():")
+
+ parser = ConfigParser()
+ for filename in filenames:
+ if DEBUG:
+ self.announce(" reading %s" % filename)
+ parser.read(filename)
+ for section in parser.sections():
+ options = parser.options(section)
+ opt_dict = self.get_option_dict(section)
+
+ for opt in options:
+ if opt != '__name__' and opt not in ignore_options:
+ val = parser.get(section,opt)
+ opt = opt.replace('-', '_')
+ opt_dict[opt] = (filename, val)
+
+ # Make the ConfigParser forget everything (so we retain
+ # the original filenames that options come from)
+ parser.__init__()
+
+ # If there was a "global" section in the config file, use it
+ # to set Distribution options.
+
+ if 'global' in self.command_options:
+ for (opt, (src, val)) in self.command_options['global'].items():
+ alias = self.negative_opt.get(opt)
+ try:
+ if alias:
+ setattr(self, alias, not strtobool(val))
+ elif opt in ('verbose', 'dry_run'): # ugh!
+ setattr(self, opt, strtobool(val))
+ else:
+ setattr(self, opt, val)
+ except ValueError as msg:
+ raise DistutilsOptionError(msg)
+
+ # -- Command-line parsing methods ----------------------------------
+
+ def parse_command_line(self):
+ """Parse the setup script's command line, taken from the
+ 'script_args' instance attribute (which defaults to 'sys.argv[1:]'
+ -- see 'setup()' in core.py). This list is first processed for
+ "global options" -- options that set attributes of the Distribution
+ instance. Then, it is alternately scanned for Distutils commands
+ and options for that command. Each new command terminates the
+ options for the previous command. The allowed options for a
+ command are determined by the 'user_options' attribute of the
+ command class -- thus, we have to be able to load command classes
+ in order to parse the command line. Any error in that 'options'
+ attribute raises DistutilsGetoptError; any error on the
+ command-line raises DistutilsArgError. If no Distutils commands
+ were found on the command line, raises DistutilsArgError. Return
+ true if command-line was successfully parsed and we should carry
+ on with executing commands; false if no errors but we shouldn't
+ execute commands (currently, this only happens if user asks for
+ help).
+ """
+ #
+ # We now have enough information to show the Macintosh dialog
+ # that allows the user to interactively specify the "command line".
+ #
+ toplevel_options = self._get_toplevel_options()
+
+ # We have to parse the command line a bit at a time -- global
+ # options, then the first command, then its options, and so on --
+ # because each command will be handled by a different class, and
+ # the options that are valid for a particular class aren't known
+ # until we have loaded the command class, which doesn't happen
+ # until we know what the command is.
+
+ self.commands = []
+ parser = FancyGetopt(toplevel_options + self.display_options)
+ parser.set_negative_aliases(self.negative_opt)
+ parser.set_aliases({'licence': 'license'})
+ args = parser.getopt(args=self.script_args, object=self)
+ option_order = parser.get_option_order()
+ log.set_verbosity(self.verbose)
+
+ # for display options we return immediately
+ if self.handle_display_options(option_order):
+ return
+ while args:
+ args = self._parse_command_opts(parser, args)
+ if args is None: # user asked for help (and got it)
+ return
+
+ # Handle the cases of --help as a "global" option, ie.
+ # "setup.py --help" and "setup.py --help command ...". For the
+ # former, we show global options (--verbose, --dry-run, etc.)
+ # and display-only options (--name, --version, etc.); for the
+ # latter, we omit the display-only options and show help for
+ # each command listed on the command line.
+ if self.help:
+ self._show_help(parser,
+ display_options=len(self.commands) == 0,
+ commands=self.commands)
+ return
+
+ # Oops, no commands found -- an end-user error
+ if not self.commands:
+ raise DistutilsArgError("no commands supplied")
+
+ # All is well: return true
+ return True
+
+ def _get_toplevel_options(self):
+ """Return the non-display options recognized at the top level.
+
+ This includes options that are recognized *only* at the top
+ level as well as options recognized for commands.
+ """
+ return self.global_options + [
+ ("command-packages=", None,
+ "list of packages that provide distutils commands"),
+ ]
+
+ def _parse_command_opts(self, parser, args):
+ """Parse the command-line options for a single command.
+ 'parser' must be a FancyGetopt instance; 'args' must be the list
+ of arguments, starting with the current command (whose options
+ we are about to parse). Returns a new version of 'args' with
+ the next command at the front of the list; will be the empty
+ list if there are no more commands on the command line. Returns
+ None if the user asked for help on this command.
+ """
+ # late import because of mutual dependence between these modules
+ from distutils.cmd import Command
+
+ # Pull the current command from the head of the command line
+ command = args[0]
+ if not command_re.match(command):
+ raise SystemExit("invalid command name '%s'" % command)
+ self.commands.append(command)
+
+ # Dig up the command class that implements this command, so we
+ # 1) know that it's a valid command, and 2) know which options
+ # it takes.
+ try:
+ cmd_class = self.get_command_class(command)
+ except DistutilsModuleError as msg:
+ raise DistutilsArgError(msg)
+
+ # Require that the command class be derived from Command -- want
+ # to be sure that the basic "command" interface is implemented.
+ if not issubclass(cmd_class, Command):
+ raise DistutilsClassError(
+ "command class %s must subclass Command" % cmd_class)
+
+ # Also make sure that the command object provides a list of its
+ # known options.
+ if not (hasattr(cmd_class, 'user_options') and
+ isinstance(cmd_class.user_options, list)):
+ msg = ("command class %s must provide "
+ "'user_options' attribute (a list of tuples)")
+ raise DistutilsClassError(msg % cmd_class)
+
+ # If the command class has a list of negative alias options,
+ # merge it in with the global negative aliases.
+ negative_opt = self.negative_opt
+ if hasattr(cmd_class, 'negative_opt'):
+ negative_opt = negative_opt.copy()
+ negative_opt.update(cmd_class.negative_opt)
+
+ # Check for help_options in command class. They have a different
+ # format (tuple of four) so we need to preprocess them here.
+ if (hasattr(cmd_class, 'help_options') and
+ isinstance(cmd_class.help_options, list)):
+ help_options = fix_help_options(cmd_class.help_options)
+ else:
+ help_options = []
+
+ # All commands support the global options too, just by adding
+ # in 'global_options'.
+ parser.set_option_table(self.global_options +
+ cmd_class.user_options +
+ help_options)
+ parser.set_negative_aliases(negative_opt)
+ (args, opts) = parser.getopt(args[1:])
+ if hasattr(opts, 'help') and opts.help:
+ self._show_help(parser, display_options=0, commands=[cmd_class])
+ return
+
+ if (hasattr(cmd_class, 'help_options') and
+ isinstance(cmd_class.help_options, list)):
+ help_option_found=0
+ for (help_option, short, desc, func) in cmd_class.help_options:
+ if hasattr(opts, parser.get_attr_name(help_option)):
+ help_option_found=1
+ if callable(func):
+ func()
+ else:
+ raise DistutilsClassError(
+ "invalid help function %r for help option '%s': "
+ "must be a callable object (function, etc.)"
+ % (func, help_option))
+
+ if help_option_found:
+ return
+
+ # Put the options from the command-line into their official
+ # holding pen, the 'command_options' dictionary.
+ opt_dict = self.get_option_dict(command)
+ for (name, value) in vars(opts).items():
+ opt_dict[name] = ("command line", value)
+
+ return args
+
+ def finalize_options(self):
+ """Set final values for all the options on the Distribution
+ instance, analogous to the .finalize_options() method of Command
+ objects.
+ """
+ for attr in ('keywords', 'platforms'):
+ value = getattr(self.metadata, attr)
+ if value is None:
+ continue
+ if isinstance(value, str):
+ value = [elm.strip() for elm in value.split(',')]
+ setattr(self.metadata, attr, value)
+
+ def _show_help(self, parser, global_options=1, display_options=1,
+ commands=[]):
+ """Show help for the setup script command-line in the form of
+ several lists of command-line options. 'parser' should be a
+ FancyGetopt instance; do not expect it to be returned in the
+ same state, as its option table will be reset to make it
+ generate the correct help text.
+
+ If 'global_options' is true, lists the global options:
+ --verbose, --dry-run, etc. If 'display_options' is true, lists
+ the "display-only" options: --name, --version, etc. Finally,
+ lists per-command help for every command name or command class
+ in 'commands'.
+ """
+ # late import because of mutual dependence between these modules
+ from distutils.core import gen_usage
+ from distutils.cmd import Command
+
+ if global_options:
+ if display_options:
+ options = self._get_toplevel_options()
+ else:
+ options = self.global_options
+ parser.set_option_table(options)
+ parser.print_help(self.common_usage + "\nGlobal options:")
+ print('')
+
+ if display_options:
+ parser.set_option_table(self.display_options)
+ parser.print_help(
+ "Information display options (just display " +
+ "information, ignore any commands)")
+ print('')
+
+ for command in self.commands:
+ if isinstance(command, type) and issubclass(command, Command):
+ klass = command
+ else:
+ klass = self.get_command_class(command)
+ if (hasattr(klass, 'help_options') and
+ isinstance(klass.help_options, list)):
+ parser.set_option_table(klass.user_options +
+ fix_help_options(klass.help_options))
+ else:
+ parser.set_option_table(klass.user_options)
+ parser.print_help("Options for '%s' command:" % klass.__name__)
+ print('')
+
+ print(gen_usage(self.script_name))
+
+ def handle_display_options(self, option_order):
+ """If there were any non-global "display-only" options
+ (--help-commands or the metadata display options) on the command
+ line, display the requested info and return true; else return
+ false.
+ """
+ from distutils.core import gen_usage
+
+ # User just wants a list of commands -- we'll print it out and stop
+ # processing now (ie. if they ran "setup --help-commands foo bar",
+ # we ignore "foo bar").
+ if self.help_commands:
+ self.print_commands()
+ print('')
+ print(gen_usage(self.script_name))
+ return 1
+
+ # If user supplied any of the "display metadata" options, then
+ # display that metadata in the order in which the user supplied the
+ # metadata options.
+ any_display_options = 0
+ is_display_option = {}
+ for option in self.display_options:
+ is_display_option[option[0]] = 1
+
+ for (opt, val) in option_order:
+ if val and is_display_option.get(opt):
+ opt = translate_longopt(opt)
+ value = getattr(self.metadata, "get_"+opt)()
+ if opt in ['keywords', 'platforms']:
+ print(','.join(value))
+ elif opt in ('classifiers', 'provides', 'requires',
+ 'obsoletes'):
+ print('\n'.join(value))
+ else:
+ print(value)
+ any_display_options = 1
+
+ return any_display_options
+
+ def print_command_list(self, commands, header, max_length):
+ """Print a subset of the list of all commands -- used by
+ 'print_commands()'.
+ """
+ print(header + ":")
+
+ for cmd in commands:
+ klass = self.cmdclass.get(cmd)
+ if not klass:
+ klass = self.get_command_class(cmd)
+ try:
+ description = klass.description
+ except AttributeError:
+ description = "(no description available)"
+
+ print(" %-*s %s" % (max_length, cmd, description))
+
+ def print_commands(self):
+ """Print out a help message listing all available commands with a
+ description of each. The list is divided into "standard commands"
+ (listed in distutils.command.__all__) and "extra commands"
+ (mentioned in self.cmdclass, but not a standard command). The
+ descriptions come from the command class attribute
+ 'description'.
+ """
+ import distutils.command
+ std_commands = distutils.command.__all__
+ is_std = {}
+ for cmd in std_commands:
+ is_std[cmd] = 1
+
+ extra_commands = []
+ for cmd in self.cmdclass.keys():
+ if not is_std.get(cmd):
+ extra_commands.append(cmd)
+
+ max_length = 0
+ for cmd in (std_commands + extra_commands):
+ if len(cmd) > max_length:
+ max_length = len(cmd)
+
+ self.print_command_list(std_commands,
+ "Standard commands",
+ max_length)
+ if extra_commands:
+ print()
+ self.print_command_list(extra_commands,
+ "Extra commands",
+ max_length)
+
+ def get_command_list(self):
+ """Get a list of (command, description) tuples.
+ The list is divided into "standard commands" (listed in
+ distutils.command.__all__) and "extra commands" (mentioned in
+ self.cmdclass, but not a standard command). The descriptions come
+ from the command class attribute 'description'.
+ """
+ # Currently this is only used on Mac OS, for the Mac-only GUI
+ # Distutils interface (by Jack Jansen)
+ import distutils.command
+ std_commands = distutils.command.__all__
+ is_std = {}
+ for cmd in std_commands:
+ is_std[cmd] = 1
+
+ extra_commands = []
+ for cmd in self.cmdclass.keys():
+ if not is_std.get(cmd):
+ extra_commands.append(cmd)
+
+ rv = []
+ for cmd in (std_commands + extra_commands):
+ klass = self.cmdclass.get(cmd)
+ if not klass:
+ klass = self.get_command_class(cmd)
+ try:
+ description = klass.description
+ except AttributeError:
+ description = "(no description available)"
+ rv.append((cmd, description))
+ return rv
+
+ # -- Command class/object methods ----------------------------------
+
+ def get_command_packages(self):
+ """Return a list of packages from which commands are loaded."""
+ pkgs = self.command_packages
+ if not isinstance(pkgs, list):
+ if pkgs is None:
+ pkgs = ''
+ pkgs = [pkg.strip() for pkg in pkgs.split(',') if pkg != '']
+ if "distutils.command" not in pkgs:
+ pkgs.insert(0, "distutils.command")
+ self.command_packages = pkgs
+ return pkgs
+
+ def get_command_class(self, command):
+ """Return the class that implements the Distutils command named by
+ 'command'. First we check the 'cmdclass' dictionary; if the
+ command is mentioned there, we fetch the class object from the
+ dictionary and return it. Otherwise we load the command module
+ ("distutils.command." + command) and fetch the command class from
+ the module. The loaded class is also stored in 'cmdclass'
+ to speed future calls to 'get_command_class()'.
+
+ Raises DistutilsModuleError if the expected module could not be
+ found, or if that module does not define the expected class.
+ """
+ klass = self.cmdclass.get(command)
+ if klass:
+ return klass
+
+ for pkgname in self.get_command_packages():
+ module_name = "%s.%s" % (pkgname, command)
+ klass_name = command
+
+ try:
+ __import__(module_name)
+ module = sys.modules[module_name]
+ except ImportError:
+ continue
+
+ try:
+ klass = getattr(module, klass_name)
+ except AttributeError:
+ raise DistutilsModuleError(
+ "invalid command '%s' (no class '%s' in module '%s')"
+ % (command, klass_name, module_name))
+
+ self.cmdclass[command] = klass
+ return klass
+
+ raise DistutilsModuleError("invalid command '%s'" % command)
+
+ def get_command_obj(self, command, create=1):
+ """Return the command object for 'command'. Normally this object
+ is cached on a previous call to 'get_command_obj()'; if no command
+ object for 'command' is in the cache, then we either create and
+ return it (if 'create' is true) or return None.
+ """
+ cmd_obj = self.command_obj.get(command)
+ if not cmd_obj and create:
+ if DEBUG:
+ self.announce("Distribution.get_command_obj(): "
+ "creating '%s' command object" % command)
+
+ klass = self.get_command_class(command)
+ cmd_obj = self.command_obj[command] = klass(self)
+ self.have_run[command] = 0
+
+ # Set any options that were supplied in config files
+ # or on the command line. (NB. support for error
+ # reporting is lame here: any errors aren't reported
+ # until 'finalize_options()' is called, which means
+ # we won't report the source of the error.)
+ options = self.command_options.get(command)
+ if options:
+ self._set_command_options(cmd_obj, options)
+
+ return cmd_obj
+
+ def _set_command_options(self, command_obj, option_dict=None):
+ """Set the options for 'command_obj' from 'option_dict'. Basically
+ this means copying elements of a dictionary ('option_dict') to
+ attributes of an instance ('command').
+
+ 'command_obj' must be a Command instance. If 'option_dict' is not
+ supplied, uses the standard option dictionary for this command
+ (from 'self.command_options').
+ """
+ command_name = command_obj.get_command_name()
+ if option_dict is None:
+ option_dict = self.get_option_dict(command_name)
+
+ if DEBUG:
+ self.announce(" setting options for '%s' command:" % command_name)
+ for (option, (source, value)) in option_dict.items():
+ if DEBUG:
+ self.announce(" %s = %s (from %s)" % (option, value,
+ source))
+ try:
+ bool_opts = [translate_longopt(o)
+ for o in command_obj.boolean_options]
+ except AttributeError:
+ bool_opts = []
+ try:
+ neg_opt = command_obj.negative_opt
+ except AttributeError:
+ neg_opt = {}
+
+ try:
+ is_string = isinstance(value, str)
+ if option in neg_opt and is_string:
+ setattr(command_obj, neg_opt[option], not strtobool(value))
+ elif option in bool_opts and is_string:
+ setattr(command_obj, option, strtobool(value))
+ elif hasattr(command_obj, option):
+ setattr(command_obj, option, value)
+ else:
+ raise DistutilsOptionError(
+ "error in %s: command '%s' has no such option '%s'"
+ % (source, command_name, option))
+ except ValueError as msg:
+ raise DistutilsOptionError(msg)
+
+ def reinitialize_command(self, command, reinit_subcommands=0):
+ """Reinitializes a command to the state it was in when first
+ returned by 'get_command_obj()': ie., initialized but not yet
+ finalized. This provides the opportunity to sneak option
+ values in programmatically, overriding or supplementing
+ user-supplied values from the config files and command line.
+ You'll have to re-finalize the command object (by calling
+ 'finalize_options()' or 'ensure_finalized()') before using it for
+ real.
+
+ 'command' should be a command name (string) or command object. If
+ 'reinit_subcommands' is true, also reinitializes the command's
+ sub-commands, as declared by the 'sub_commands' class attribute (if
+ it has one). See the "install" command for an example. Only
+ reinitializes the sub-commands that actually matter, ie. those
+ whose test predicates return true.
+
+ Returns the reinitialized command object.
+ """
+ from distutils.cmd import Command
+ if not isinstance(command, Command):
+ command_name = command
+ command = self.get_command_obj(command_name)
+ else:
+ command_name = command.get_command_name()
+
+ if not command.finalized:
+ return command
+ command.initialize_options()
+ command.finalized = 0
+ self.have_run[command_name] = 0
+ self._set_command_options(command)
+
+ if reinit_subcommands:
+ for sub in command.get_sub_commands():
+ self.reinitialize_command(sub, reinit_subcommands)
+
+ return command
+
+ # -- Methods that operate on the Distribution ----------------------
+
+ def announce(self, msg, level=log.INFO):
+ log.log(level, msg)
+
+ def run_commands(self):
+ """Run each command that was seen on the setup script command line.
+ Uses the list of commands found and cache of command objects
+ created by 'get_command_obj()'.
+ """
+ for cmd in self.commands:
+ self.run_command(cmd)
+
+ # -- Methods that operate on its Commands --------------------------
+
+ def run_command(self, command):
+ """Do whatever it takes to run a command (including nothing at all,
+ if the command has already been run). Specifically: if we have
+ already created and run the command named by 'command', return
+ silently without doing anything. If the command named by 'command'
+ doesn't even have a command object yet, create one. Then invoke
+ 'run()' on that command object (or an existing one).
+ """
+ # Already been here, done that? then return silently.
+ if self.have_run.get(command):
+ return
+
+ log.info("running %s", command)
+ cmd_obj = self.get_command_obj(command)
+ cmd_obj.ensure_finalized()
+ cmd_obj.run()
+ self.have_run[command] = 1
+
+ # -- Distribution query methods ------------------------------------
+
+ def has_pure_modules(self):
+ return len(self.packages or self.py_modules or []) > 0
+
+ def has_ext_modules(self):
+ return self.ext_modules and len(self.ext_modules) > 0
+
+ def has_c_libraries(self):
+ return self.libraries and len(self.libraries) > 0
+
+ def has_modules(self):
+ return self.has_pure_modules() or self.has_ext_modules()
+
+ def has_headers(self):
+ return self.headers and len(self.headers) > 0
+
+ def has_scripts(self):
+ return self.scripts and len(self.scripts) > 0
+
+ def has_data_files(self):
+ return self.data_files and len(self.data_files) > 0
+
+ def is_pure(self):
+ return (self.has_pure_modules() and
+ not self.has_ext_modules() and
+ not self.has_c_libraries())
+
+ # -- Metadata query methods ----------------------------------------
+
+ # If you're looking for 'get_name()', 'get_version()', and so forth,
+ # they are defined in a sneaky way: the constructor binds self.get_XXX
+ # to self.metadata.get_XXX. The actual code is in the
+ # DistributionMetadata class, below.
+
+class DistributionMetadata:
+ """Dummy class to hold the distribution meta-data: name, version,
+ author, and so forth.
+ """
+
+ _METHOD_BASENAMES = ("name", "version", "author", "author_email",
+ "maintainer", "maintainer_email", "url",
+ "license", "description", "long_description",
+ "keywords", "platforms", "fullname", "contact",
+ "contact_email", "classifiers", "download_url",
+ # PEP 314
+ "provides", "requires", "obsoletes",
+ )
+
+ def __init__(self, path=None):
+ if path is not None:
+ self.read_pkg_file(open(path))
+ else:
+ self.name = None
+ self.version = None
+ self.author = None
+ self.author_email = None
+ self.maintainer = None
+ self.maintainer_email = None
+ self.url = None
+ self.license = None
+ self.description = None
+ self.long_description = None
+ self.keywords = None
+ self.platforms = None
+ self.classifiers = None
+ self.download_url = None
+ # PEP 314
+ self.provides = None
+ self.requires = None
+ self.obsoletes = None
+
+ def read_pkg_file(self, file):
+ """Reads the metadata values from a file object."""
+ msg = message_from_file(file)
+
+ def _read_field(name):
+ value = msg[name]
+ if value == 'UNKNOWN':
+ return None
+ return value
+
+ def _read_list(name):
+ values = msg.get_all(name, None)
+ if values == []:
+ return None
+ return values
+
+ metadata_version = msg['metadata-version']
+ self.name = _read_field('name')
+ self.version = _read_field('version')
+ self.description = _read_field('summary')
+ # we are filling author only.
+ self.author = _read_field('author')
+ self.maintainer = None
+ self.author_email = _read_field('author-email')
+ self.maintainer_email = None
+ self.url = _read_field('home-page')
+ self.license = _read_field('license')
+
+ if 'download-url' in msg:
+ self.download_url = _read_field('download-url')
+ else:
+ self.download_url = None
+
+ self.long_description = _read_field('description')
+ self.description = _read_field('summary')
+
+ if 'keywords' in msg:
+ self.keywords = _read_field('keywords').split(',')
+
+ self.platforms = _read_list('platform')
+ self.classifiers = _read_list('classifier')
+
+ # PEP 314 - these fields only exist in 1.1
+ if metadata_version == '1.1':
+ self.requires = _read_list('requires')
+ self.provides = _read_list('provides')
+ self.obsoletes = _read_list('obsoletes')
+ else:
+ self.requires = None
+ self.provides = None
+ self.obsoletes = None
+
+ def write_pkg_info(self, base_dir):
+ """Write the PKG-INFO file into the release tree.
+ """
+ with open(os.path.join(base_dir, 'PKG-INFO'), 'w',
+ encoding='UTF-8') as pkg_info:
+ self.write_pkg_file(pkg_info)
+
+ def write_pkg_file(self, file):
+ """Write the PKG-INFO format data to a file object.
+ """
+ version = '1.0'
+ if (self.provides or self.requires or self.obsoletes or
+ self.classifiers or self.download_url):
+ version = '1.1'
+
+ file.write('Metadata-Version: %s\n' % version)
+ file.write('Name: %s\n' % self.get_name())
+ file.write('Version: %s\n' % self.get_version())
+ file.write('Summary: %s\n' % self.get_description())
+ file.write('Home-page: %s\n' % self.get_url())
+ file.write('Author: %s\n' % self.get_contact())
+ file.write('Author-email: %s\n' % self.get_contact_email())
+ file.write('License: %s\n' % self.get_license())
+ if self.download_url:
+ file.write('Download-URL: %s\n' % self.download_url)
+
+ long_desc = rfc822_escape(self.get_long_description())
+ file.write('Description: %s\n' % long_desc)
+
+ keywords = ','.join(self.get_keywords())
+ if keywords:
+ file.write('Keywords: %s\n' % keywords)
+
+ self._write_list(file, 'Platform', self.get_platforms())
+ self._write_list(file, 'Classifier', self.get_classifiers())
+
+ # PEP 314
+ self._write_list(file, 'Requires', self.get_requires())
+ self._write_list(file, 'Provides', self.get_provides())
+ self._write_list(file, 'Obsoletes', self.get_obsoletes())
+
+ def _write_list(self, file, name, values):
+ for value in values:
+ file.write('%s: %s\n' % (name, value))
+
+ # -- Metadata query methods ----------------------------------------
+
+ def get_name(self):
+ return self.name or "UNKNOWN"
+
+ def get_version(self):
+ return self.version or "0.0.0"
+
+ def get_fullname(self):
+ return "%s-%s" % (self.get_name(), self.get_version())
+
+ def get_author(self):
+ return self.author or "UNKNOWN"
+
+ def get_author_email(self):
+ return self.author_email or "UNKNOWN"
+
+ def get_maintainer(self):
+ return self.maintainer or "UNKNOWN"
+
+ def get_maintainer_email(self):
+ return self.maintainer_email or "UNKNOWN"
+
+ def get_contact(self):
+ return self.maintainer or self.author or "UNKNOWN"
+
+ def get_contact_email(self):
+ return self.maintainer_email or self.author_email or "UNKNOWN"
+
+ def get_url(self):
+ return self.url or "UNKNOWN"
+
+ def get_license(self):
+ return self.license or "UNKNOWN"
+ get_licence = get_license
+
+ def get_description(self):
+ return self.description or "UNKNOWN"
+
+ def get_long_description(self):
+ return self.long_description or "UNKNOWN"
+
+ def get_keywords(self):
+ return self.keywords or []
+
+ def set_keywords(self, value):
+ self.keywords = _ensure_list(value, 'keywords')
+
+ def get_platforms(self):
+ return self.platforms or ["UNKNOWN"]
+
+ def set_platforms(self, value):
+ self.platforms = _ensure_list(value, 'platforms')
+
+ def get_classifiers(self):
+ return self.classifiers or []
+
+ def set_classifiers(self, value):
+ self.classifiers = _ensure_list(value, 'classifiers')
+
+ def get_download_url(self):
+ return self.download_url or "UNKNOWN"
+
+ # PEP 314
+ def get_requires(self):
+ return self.requires or []
+
+ def set_requires(self, value):
+ import distutils.versionpredicate
+ for v in value:
+ distutils.versionpredicate.VersionPredicate(v)
+ self.requires = list(value)
+
+ def get_provides(self):
+ return self.provides or []
+
+ def set_provides(self, value):
+ value = [v.strip() for v in value]
+ for v in value:
+ import distutils.versionpredicate
+ distutils.versionpredicate.split_provision(v)
+ self.provides = value
+
+ def get_obsoletes(self):
+ return self.obsoletes or []
+
+ def set_obsoletes(self, value):
+ import distutils.versionpredicate
+ for v in value:
+ distutils.versionpredicate.VersionPredicate(v)
+ self.obsoletes = list(value)
+
+def fix_help_options(options):
+ """Convert a 4-tuple 'help_options' list as found in various command
+ classes to the 3-tuple form required by FancyGetopt.
+ """
+ new_options = []
+ for help_tuple in options:
+ new_options.append(help_tuple[0:3])
+ return new_options
diff --git a/third_party/python/setuptools/setuptools/_distutils/errors.py b/third_party/python/setuptools/setuptools/_distutils/errors.py
new file mode 100644
index 0000000000..8b93059e19
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/errors.py
@@ -0,0 +1,97 @@
+"""distutils.errors
+
+Provides exceptions used by the Distutils modules. Note that Distutils
+modules may raise standard exceptions; in particular, SystemExit is
+usually raised for errors that are obviously the end-user's fault
+(eg. bad command-line arguments).
+
+This module is safe to use in "from ... import *" mode; it only exports
+symbols whose names start with "Distutils" and end with "Error"."""
+
+class DistutilsError (Exception):
+ """The root of all Distutils evil."""
+ pass
+
+class DistutilsModuleError (DistutilsError):
+ """Unable to load an expected module, or to find an expected class
+ within some module (in particular, command modules and classes)."""
+ pass
+
+class DistutilsClassError (DistutilsError):
+ """Some command class (or possibly distribution class, if anyone
+ feels a need to subclass Distribution) is found not to be holding
+ up its end of the bargain, ie. implementing some part of the
+ "command "interface."""
+ pass
+
+class DistutilsGetoptError (DistutilsError):
+ """The option table provided to 'fancy_getopt()' is bogus."""
+ pass
+
+class DistutilsArgError (DistutilsError):
+ """Raised by fancy_getopt in response to getopt.error -- ie. an
+ error in the command line usage."""
+ pass
+
+class DistutilsFileError (DistutilsError):
+ """Any problems in the filesystem: expected file not found, etc.
+ Typically this is for problems that we detect before OSError
+ could be raised."""
+ pass
+
+class DistutilsOptionError (DistutilsError):
+ """Syntactic/semantic errors in command options, such as use of
+ mutually conflicting options, or inconsistent options,
+ badly-spelled values, etc. No distinction is made between option
+ values originating in the setup script, the command line, config
+ files, or what-have-you -- but if we *know* something originated in
+ the setup script, we'll raise DistutilsSetupError instead."""
+ pass
+
+class DistutilsSetupError (DistutilsError):
+ """For errors that can be definitely blamed on the setup script,
+ such as invalid keyword arguments to 'setup()'."""
+ pass
+
+class DistutilsPlatformError (DistutilsError):
+ """We don't know how to do something on the current platform (but
+ we do know how to do it on some platform) -- eg. trying to compile
+ C files on a platform not supported by a CCompiler subclass."""
+ pass
+
+class DistutilsExecError (DistutilsError):
+ """Any problems executing an external program (such as the C
+ compiler, when compiling C files)."""
+ pass
+
+class DistutilsInternalError (DistutilsError):
+ """Internal inconsistencies or impossibilities (obviously, this
+ should never be seen if the code is working!)."""
+ pass
+
+class DistutilsTemplateError (DistutilsError):
+ """Syntax error in a file list template."""
+
+class DistutilsByteCompileError(DistutilsError):
+ """Byte compile error."""
+
+# Exception classes used by the CCompiler implementation classes
+class CCompilerError (Exception):
+ """Some compile/link operation failed."""
+
+class PreprocessError (CCompilerError):
+ """Failure to preprocess one or more C/C++ files."""
+
+class CompileError (CCompilerError):
+ """Failure to compile one or more C/C++ source files."""
+
+class LibError (CCompilerError):
+ """Failure to create a static library from one or more C/C++ object
+ files."""
+
+class LinkError (CCompilerError):
+ """Failure to link one or more C/C++ object files into an executable
+ or shared library file."""
+
+class UnknownFileError (CCompilerError):
+ """Attempt to process an unknown file type."""
diff --git a/third_party/python/setuptools/setuptools/_distutils/extension.py b/third_party/python/setuptools/setuptools/_distutils/extension.py
new file mode 100644
index 0000000000..c507da360a
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/extension.py
@@ -0,0 +1,240 @@
+"""distutils.extension
+
+Provides the Extension class, used to describe C/C++ extension
+modules in setup scripts."""
+
+import os
+import warnings
+
+# This class is really only used by the "build_ext" command, so it might
+# make sense to put it in distutils.command.build_ext. However, that
+# module is already big enough, and I want to make this class a bit more
+# complex to simplify some common cases ("foo" module in "foo.c") and do
+# better error-checking ("foo.c" actually exists).
+#
+# Also, putting this in build_ext.py means every setup script would have to
+# import that large-ish module (indirectly, through distutils.core) in
+# order to do anything.
+
+class Extension:
+ """Just a collection of attributes that describes an extension
+ module and everything needed to build it (hopefully in a portable
+ way, but there are hooks that let you be as unportable as you need).
+
+ Instance attributes:
+ name : string
+ the full name of the extension, including any packages -- ie.
+ *not* a filename or pathname, but Python dotted name
+ sources : [string]
+ list of source filenames, relative to the distribution root
+ (where the setup script lives), in Unix form (slash-separated)
+ for portability. Source files may be C, C++, SWIG (.i),
+ platform-specific resource files, or whatever else is recognized
+ by the "build_ext" command as source for a Python extension.
+ include_dirs : [string]
+ list of directories to search for C/C++ header files (in Unix
+ form for portability)
+ define_macros : [(name : string, value : string|None)]
+ list of macros to define; each macro is defined using a 2-tuple,
+ where 'value' is either the string to define it to or None to
+ define it without a particular value (equivalent of "#define
+ FOO" in source or -DFOO on Unix C compiler command line)
+ undef_macros : [string]
+ list of macros to undefine explicitly
+ library_dirs : [string]
+ list of directories to search for C/C++ libraries at link time
+ libraries : [string]
+ list of library names (not filenames or paths) to link against
+ runtime_library_dirs : [string]
+ list of directories to search for C/C++ libraries at run time
+ (for shared extensions, this is when the extension is loaded)
+ extra_objects : [string]
+ list of extra files to link with (eg. object files not implied
+ by 'sources', static library that must be explicitly specified,
+ binary resource files, etc.)
+ extra_compile_args : [string]
+ any extra platform- and compiler-specific information to use
+ when compiling the source files in 'sources'. For platforms and
+ compilers where "command line" makes sense, this is typically a
+ list of command-line arguments, but for other platforms it could
+ be anything.
+ extra_link_args : [string]
+ any extra platform- and compiler-specific information to use
+ when linking object files together to create the extension (or
+ to create a new static Python interpreter). Similar
+ interpretation as for 'extra_compile_args'.
+ export_symbols : [string]
+ list of symbols to be exported from a shared extension. Not
+ used on all platforms, and not generally necessary for Python
+ extensions, which typically export exactly one symbol: "init" +
+ extension_name.
+ swig_opts : [string]
+ any extra options to pass to SWIG if a source file has the .i
+ extension.
+ depends : [string]
+ list of files that the extension depends on
+ language : string
+ extension language (i.e. "c", "c++", "objc"). Will be detected
+ from the source extensions if not provided.
+ optional : boolean
+ specifies that a build failure in the extension should not abort the
+ build process, but simply not install the failing extension.
+ """
+
+ # When adding arguments to this constructor, be sure to update
+ # setup_keywords in core.py.
+ def __init__(self, name, sources,
+ include_dirs=None,
+ define_macros=None,
+ undef_macros=None,
+ library_dirs=None,
+ libraries=None,
+ runtime_library_dirs=None,
+ extra_objects=None,
+ extra_compile_args=None,
+ extra_link_args=None,
+ export_symbols=None,
+ swig_opts = None,
+ depends=None,
+ language=None,
+ optional=None,
+ **kw # To catch unknown keywords
+ ):
+ if not isinstance(name, str):
+ raise AssertionError("'name' must be a string")
+ if not (isinstance(sources, list) and
+ all(isinstance(v, str) for v in sources)):
+ raise AssertionError("'sources' must be a list of strings")
+
+ self.name = name
+ self.sources = sources
+ self.include_dirs = include_dirs or []
+ self.define_macros = define_macros or []
+ self.undef_macros = undef_macros or []
+ self.library_dirs = library_dirs or []
+ self.libraries = libraries or []
+ self.runtime_library_dirs = runtime_library_dirs or []
+ self.extra_objects = extra_objects or []
+ self.extra_compile_args = extra_compile_args or []
+ self.extra_link_args = extra_link_args or []
+ self.export_symbols = export_symbols or []
+ self.swig_opts = swig_opts or []
+ self.depends = depends or []
+ self.language = language
+ self.optional = optional
+
+ # If there are unknown keyword options, warn about them
+ if len(kw) > 0:
+ options = [repr(option) for option in kw]
+ options = ', '.join(sorted(options))
+ msg = "Unknown Extension options: %s" % options
+ warnings.warn(msg)
+
+ def __repr__(self):
+ return '<%s.%s(%r) at %#x>' % (
+ self.__class__.__module__,
+ self.__class__.__qualname__,
+ self.name,
+ id(self))
+
+
+def read_setup_file(filename):
+ """Reads a Setup file and returns Extension instances."""
+ from distutils.sysconfig import (parse_makefile, expand_makefile_vars,
+ _variable_rx)
+
+ from distutils.text_file import TextFile
+ from distutils.util import split_quoted
+
+ # First pass over the file to gather "VAR = VALUE" assignments.
+ vars = parse_makefile(filename)
+
+ # Second pass to gobble up the real content: lines of the form
+ # <module> ... [<sourcefile> ...] [<cpparg> ...] [<library> ...]
+ file = TextFile(filename,
+ strip_comments=1, skip_blanks=1, join_lines=1,
+ lstrip_ws=1, rstrip_ws=1)
+ try:
+ extensions = []
+
+ while True:
+ line = file.readline()
+ if line is None: # eof
+ break
+ if _variable_rx.match(line): # VAR=VALUE, handled in first pass
+ continue
+
+ if line[0] == line[-1] == "*":
+ file.warn("'%s' lines not handled yet" % line)
+ continue
+
+ line = expand_makefile_vars(line, vars)
+ words = split_quoted(line)
+
+ # NB. this parses a slightly different syntax than the old
+ # makesetup script: here, there must be exactly one extension per
+ # line, and it must be the first word of the line. I have no idea
+ # why the old syntax supported multiple extensions per line, as
+ # they all wind up being the same.
+
+ module = words[0]
+ ext = Extension(module, [])
+ append_next_word = None
+
+ for word in words[1:]:
+ if append_next_word is not None:
+ append_next_word.append(word)
+ append_next_word = None
+ continue
+
+ suffix = os.path.splitext(word)[1]
+ switch = word[0:2] ; value = word[2:]
+
+ if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"):
+ # hmm, should we do something about C vs. C++ sources?
+ # or leave it up to the CCompiler implementation to
+ # worry about?
+ ext.sources.append(word)
+ elif switch == "-I":
+ ext.include_dirs.append(value)
+ elif switch == "-D":
+ equals = value.find("=")
+ if equals == -1: # bare "-DFOO" -- no value
+ ext.define_macros.append((value, None))
+ else: # "-DFOO=blah"
+ ext.define_macros.append((value[0:equals],
+ value[equals+2:]))
+ elif switch == "-U":
+ ext.undef_macros.append(value)
+ elif switch == "-C": # only here 'cause makesetup has it!
+ ext.extra_compile_args.append(word)
+ elif switch == "-l":
+ ext.libraries.append(value)
+ elif switch == "-L":
+ ext.library_dirs.append(value)
+ elif switch == "-R":
+ ext.runtime_library_dirs.append(value)
+ elif word == "-rpath":
+ append_next_word = ext.runtime_library_dirs
+ elif word == "-Xlinker":
+ append_next_word = ext.extra_link_args
+ elif word == "-Xcompiler":
+ append_next_word = ext.extra_compile_args
+ elif switch == "-u":
+ ext.extra_link_args.append(word)
+ if not value:
+ append_next_word = ext.extra_link_args
+ elif suffix in (".a", ".so", ".sl", ".o", ".dylib"):
+ # NB. a really faithful emulation of makesetup would
+ # append a .o file to extra_objects only if it
+ # had a slash in it; otherwise, it would s/.o/.c/
+ # and append it to sources. Hmmmm.
+ ext.extra_objects.append(word)
+ else:
+ file.warn("unrecognized argument '%s'" % word)
+
+ extensions.append(ext)
+ finally:
+ file.close()
+
+ return extensions
diff --git a/third_party/python/setuptools/setuptools/_distutils/fancy_getopt.py b/third_party/python/setuptools/setuptools/_distutils/fancy_getopt.py
new file mode 100644
index 0000000000..7d170dd277
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/fancy_getopt.py
@@ -0,0 +1,457 @@
+"""distutils.fancy_getopt
+
+Wrapper around the standard getopt module that provides the following
+additional features:
+ * short and long options are tied together
+ * options have help strings, so fancy_getopt could potentially
+ create a complete usage summary
+ * options set attributes of a passed-in object
+"""
+
+import sys, string, re
+import getopt
+from distutils.errors import *
+
+# Much like command_re in distutils.core, this is close to but not quite
+# the same as a Python NAME -- except, in the spirit of most GNU
+# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!)
+# The similarities to NAME are again not a coincidence...
+longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)'
+longopt_re = re.compile(r'^%s$' % longopt_pat)
+
+# For recognizing "negative alias" options, eg. "quiet=!verbose"
+neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat))
+
+# This is used to translate long options to legitimate Python identifiers
+# (for use as attributes of some object).
+longopt_xlate = str.maketrans('-', '_')
+
+class FancyGetopt:
+ """Wrapper around the standard 'getopt()' module that provides some
+ handy extra functionality:
+ * short and long options are tied together
+ * options have help strings, and help text can be assembled
+ from them
+ * options set attributes of a passed-in object
+ * boolean options can have "negative aliases" -- eg. if
+ --quiet is the "negative alias" of --verbose, then "--quiet"
+ on the command line sets 'verbose' to false
+ """
+
+ def __init__(self, option_table=None):
+ # The option table is (currently) a list of tuples. The
+ # tuples may have 3 or four values:
+ # (long_option, short_option, help_string [, repeatable])
+ # if an option takes an argument, its long_option should have '='
+ # appended; short_option should just be a single character, no ':'
+ # in any case. If a long_option doesn't have a corresponding
+ # short_option, short_option should be None. All option tuples
+ # must have long options.
+ self.option_table = option_table
+
+ # 'option_index' maps long option names to entries in the option
+ # table (ie. those 3-tuples).
+ self.option_index = {}
+ if self.option_table:
+ self._build_index()
+
+ # 'alias' records (duh) alias options; {'foo': 'bar'} means
+ # --foo is an alias for --bar
+ self.alias = {}
+
+ # 'negative_alias' keeps track of options that are the boolean
+ # opposite of some other option
+ self.negative_alias = {}
+
+ # These keep track of the information in the option table. We
+ # don't actually populate these structures until we're ready to
+ # parse the command-line, since the 'option_table' passed in here
+ # isn't necessarily the final word.
+ self.short_opts = []
+ self.long_opts = []
+ self.short2long = {}
+ self.attr_name = {}
+ self.takes_arg = {}
+
+ # And 'option_order' is filled up in 'getopt()'; it records the
+ # original order of options (and their values) on the command-line,
+ # but expands short options, converts aliases, etc.
+ self.option_order = []
+
+ def _build_index(self):
+ self.option_index.clear()
+ for option in self.option_table:
+ self.option_index[option[0]] = option
+
+ def set_option_table(self, option_table):
+ self.option_table = option_table
+ self._build_index()
+
+ def add_option(self, long_option, short_option=None, help_string=None):
+ if long_option in self.option_index:
+ raise DistutilsGetoptError(
+ "option conflict: already an option '%s'" % long_option)
+ else:
+ option = (long_option, short_option, help_string)
+ self.option_table.append(option)
+ self.option_index[long_option] = option
+
+ def has_option(self, long_option):
+ """Return true if the option table for this parser has an
+ option with long name 'long_option'."""
+ return long_option in self.option_index
+
+ def get_attr_name(self, long_option):
+ """Translate long option name 'long_option' to the form it
+ has as an attribute of some object: ie., translate hyphens
+ to underscores."""
+ return long_option.translate(longopt_xlate)
+
+ def _check_alias_dict(self, aliases, what):
+ assert isinstance(aliases, dict)
+ for (alias, opt) in aliases.items():
+ if alias not in self.option_index:
+ raise DistutilsGetoptError(("invalid %s '%s': "
+ "option '%s' not defined") % (what, alias, alias))
+ if opt not in self.option_index:
+ raise DistutilsGetoptError(("invalid %s '%s': "
+ "aliased option '%s' not defined") % (what, alias, opt))
+
+ def set_aliases(self, alias):
+ """Set the aliases for this option parser."""
+ self._check_alias_dict(alias, "alias")
+ self.alias = alias
+
+ def set_negative_aliases(self, negative_alias):
+ """Set the negative aliases for this option parser.
+ 'negative_alias' should be a dictionary mapping option names to
+ option names, both the key and value must already be defined
+ in the option table."""
+ self._check_alias_dict(negative_alias, "negative alias")
+ self.negative_alias = negative_alias
+
+ def _grok_option_table(self):
+ """Populate the various data structures that keep tabs on the
+ option table. Called by 'getopt()' before it can do anything
+ worthwhile.
+ """
+ self.long_opts = []
+ self.short_opts = []
+ self.short2long.clear()
+ self.repeat = {}
+
+ for option in self.option_table:
+ if len(option) == 3:
+ long, short, help = option
+ repeat = 0
+ elif len(option) == 4:
+ long, short, help, repeat = option
+ else:
+ # the option table is part of the code, so simply
+ # assert that it is correct
+ raise ValueError("invalid option tuple: %r" % (option,))
+
+ # Type- and value-check the option names
+ if not isinstance(long, str) or len(long) < 2:
+ raise DistutilsGetoptError(("invalid long option '%s': "
+ "must be a string of length >= 2") % long)
+
+ if (not ((short is None) or
+ (isinstance(short, str) and len(short) == 1))):
+ raise DistutilsGetoptError("invalid short option '%s': "
+ "must a single character or None" % short)
+
+ self.repeat[long] = repeat
+ self.long_opts.append(long)
+
+ if long[-1] == '=': # option takes an argument?
+ if short: short = short + ':'
+ long = long[0:-1]
+ self.takes_arg[long] = 1
+ else:
+ # Is option is a "negative alias" for some other option (eg.
+ # "quiet" == "!verbose")?
+ alias_to = self.negative_alias.get(long)
+ if alias_to is not None:
+ if self.takes_arg[alias_to]:
+ raise DistutilsGetoptError(
+ "invalid negative alias '%s': "
+ "aliased option '%s' takes a value"
+ % (long, alias_to))
+
+ self.long_opts[-1] = long # XXX redundant?!
+ self.takes_arg[long] = 0
+
+ # If this is an alias option, make sure its "takes arg" flag is
+ # the same as the option it's aliased to.
+ alias_to = self.alias.get(long)
+ if alias_to is not None:
+ if self.takes_arg[long] != self.takes_arg[alias_to]:
+ raise DistutilsGetoptError(
+ "invalid alias '%s': inconsistent with "
+ "aliased option '%s' (one of them takes a value, "
+ "the other doesn't"
+ % (long, alias_to))
+
+ # Now enforce some bondage on the long option name, so we can
+ # later translate it to an attribute name on some object. Have
+ # to do this a bit late to make sure we've removed any trailing
+ # '='.
+ if not longopt_re.match(long):
+ raise DistutilsGetoptError(
+ "invalid long option name '%s' "
+ "(must be letters, numbers, hyphens only" % long)
+
+ self.attr_name[long] = self.get_attr_name(long)
+ if short:
+ self.short_opts.append(short)
+ self.short2long[short[0]] = long
+
+ def getopt(self, args=None, object=None):
+ """Parse command-line options in args. Store as attributes on object.
+
+ If 'args' is None or not supplied, uses 'sys.argv[1:]'. If
+ 'object' is None or not supplied, creates a new OptionDummy
+ object, stores option values there, and returns a tuple (args,
+ object). If 'object' is supplied, it is modified in place and
+ 'getopt()' just returns 'args'; in both cases, the returned
+ 'args' is a modified copy of the passed-in 'args' list, which
+ is left untouched.
+ """
+ if args is None:
+ args = sys.argv[1:]
+ if object is None:
+ object = OptionDummy()
+ created_object = True
+ else:
+ created_object = False
+
+ self._grok_option_table()
+
+ short_opts = ' '.join(self.short_opts)
+ try:
+ opts, args = getopt.getopt(args, short_opts, self.long_opts)
+ except getopt.error as msg:
+ raise DistutilsArgError(msg)
+
+ for opt, val in opts:
+ if len(opt) == 2 and opt[0] == '-': # it's a short option
+ opt = self.short2long[opt[1]]
+ else:
+ assert len(opt) > 2 and opt[:2] == '--'
+ opt = opt[2:]
+
+ alias = self.alias.get(opt)
+ if alias:
+ opt = alias
+
+ if not self.takes_arg[opt]: # boolean option?
+ assert val == '', "boolean option can't have value"
+ alias = self.negative_alias.get(opt)
+ if alias:
+ opt = alias
+ val = 0
+ else:
+ val = 1
+
+ attr = self.attr_name[opt]
+ # The only repeating option at the moment is 'verbose'.
+ # It has a negative option -q quiet, which should set verbose = 0.
+ if val and self.repeat.get(attr) is not None:
+ val = getattr(object, attr, 0) + 1
+ setattr(object, attr, val)
+ self.option_order.append((opt, val))
+
+ # for opts
+ if created_object:
+ return args, object
+ else:
+ return args
+
+ def get_option_order(self):
+ """Returns the list of (option, value) tuples processed by the
+ previous run of 'getopt()'. Raises RuntimeError if
+ 'getopt()' hasn't been called yet.
+ """
+ if self.option_order is None:
+ raise RuntimeError("'getopt()' hasn't been called yet")
+ else:
+ return self.option_order
+
+ def generate_help(self, header=None):
+ """Generate help text (a list of strings, one per suggested line of
+ output) from the option table for this FancyGetopt object.
+ """
+ # Blithely assume the option table is good: probably wouldn't call
+ # 'generate_help()' unless you've already called 'getopt()'.
+
+ # First pass: determine maximum length of long option names
+ max_opt = 0
+ for option in self.option_table:
+ long = option[0]
+ short = option[1]
+ l = len(long)
+ if long[-1] == '=':
+ l = l - 1
+ if short is not None:
+ l = l + 5 # " (-x)" where short == 'x'
+ if l > max_opt:
+ max_opt = l
+
+ opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter
+
+ # Typical help block looks like this:
+ # --foo controls foonabulation
+ # Help block for longest option looks like this:
+ # --flimflam set the flim-flam level
+ # and with wrapped text:
+ # --flimflam set the flim-flam level (must be between
+ # 0 and 100, except on Tuesdays)
+ # Options with short names will have the short name shown (but
+ # it doesn't contribute to max_opt):
+ # --foo (-f) controls foonabulation
+ # If adding the short option would make the left column too wide,
+ # we push the explanation off to the next line
+ # --flimflam (-l)
+ # set the flim-flam level
+ # Important parameters:
+ # - 2 spaces before option block start lines
+ # - 2 dashes for each long option name
+ # - min. 2 spaces between option and explanation (gutter)
+ # - 5 characters (incl. space) for short option name
+
+ # Now generate lines of help text. (If 80 columns were good enough
+ # for Jesus, then 78 columns are good enough for me!)
+ line_width = 78
+ text_width = line_width - opt_width
+ big_indent = ' ' * opt_width
+ if header:
+ lines = [header]
+ else:
+ lines = ['Option summary:']
+
+ for option in self.option_table:
+ long, short, help = option[:3]
+ text = wrap_text(help, text_width)
+ if long[-1] == '=':
+ long = long[0:-1]
+
+ # Case 1: no short option at all (makes life easy)
+ if short is None:
+ if text:
+ lines.append(" --%-*s %s" % (max_opt, long, text[0]))
+ else:
+ lines.append(" --%-*s " % (max_opt, long))
+
+ # Case 2: we have a short option, so we have to include it
+ # just after the long option
+ else:
+ opt_names = "%s (-%s)" % (long, short)
+ if text:
+ lines.append(" --%-*s %s" %
+ (max_opt, opt_names, text[0]))
+ else:
+ lines.append(" --%-*s" % opt_names)
+
+ for l in text[1:]:
+ lines.append(big_indent + l)
+ return lines
+
+ def print_help(self, header=None, file=None):
+ if file is None:
+ file = sys.stdout
+ for line in self.generate_help(header):
+ file.write(line + "\n")
+
+
+def fancy_getopt(options, negative_opt, object, args):
+ parser = FancyGetopt(options)
+ parser.set_negative_aliases(negative_opt)
+ return parser.getopt(args, object)
+
+
+WS_TRANS = {ord(_wschar) : ' ' for _wschar in string.whitespace}
+
+def wrap_text(text, width):
+ """wrap_text(text : string, width : int) -> [string]
+
+ Split 'text' into multiple lines of no more than 'width' characters
+ each, and return the list of strings that results.
+ """
+ if text is None:
+ return []
+ if len(text) <= width:
+ return [text]
+
+ text = text.expandtabs()
+ text = text.translate(WS_TRANS)
+ chunks = re.split(r'( +|-+)', text)
+ chunks = [ch for ch in chunks if ch] # ' - ' results in empty strings
+ lines = []
+
+ while chunks:
+ cur_line = [] # list of chunks (to-be-joined)
+ cur_len = 0 # length of current line
+
+ while chunks:
+ l = len(chunks[0])
+ if cur_len + l <= width: # can squeeze (at least) this chunk in
+ cur_line.append(chunks[0])
+ del chunks[0]
+ cur_len = cur_len + l
+ else: # this line is full
+ # drop last chunk if all space
+ if cur_line and cur_line[-1][0] == ' ':
+ del cur_line[-1]
+ break
+
+ if chunks: # any chunks left to process?
+ # if the current line is still empty, then we had a single
+ # chunk that's too big too fit on a line -- so we break
+ # down and break it up at the line width
+ if cur_len == 0:
+ cur_line.append(chunks[0][0:width])
+ chunks[0] = chunks[0][width:]
+
+ # all-whitespace chunks at the end of a line can be discarded
+ # (and we know from the re.split above that if a chunk has
+ # *any* whitespace, it is *all* whitespace)
+ if chunks[0][0] == ' ':
+ del chunks[0]
+
+ # and store this line in the list-of-all-lines -- as a single
+ # string, of course!
+ lines.append(''.join(cur_line))
+
+ return lines
+
+
+def translate_longopt(opt):
+ """Convert a long option name to a valid Python identifier by
+ changing "-" to "_".
+ """
+ return opt.translate(longopt_xlate)
+
+
+class OptionDummy:
+ """Dummy class just used as a place to hold command-line option
+ values as instance attributes."""
+
+ def __init__(self, options=[]):
+ """Create a new OptionDummy instance. The attributes listed in
+ 'options' will be initialized to None."""
+ for opt in options:
+ setattr(self, opt, None)
+
+
+if __name__ == "__main__":
+ text = """\
+Tra-la-la, supercalifragilisticexpialidocious.
+How *do* you spell that odd word, anyways?
+(Someone ask Mary -- she'll know [or she'll
+say, "How should I know?"].)"""
+
+ for w in (10, 20, 30, 40):
+ print("width: %d" % w)
+ print("\n".join(wrap_text(text, w)))
+ print()
diff --git a/third_party/python/setuptools/setuptools/_distutils/file_util.py b/third_party/python/setuptools/setuptools/_distutils/file_util.py
new file mode 100644
index 0000000000..b3fee35a6c
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/file_util.py
@@ -0,0 +1,238 @@
+"""distutils.file_util
+
+Utility functions for operating on single files.
+"""
+
+import os
+from distutils.errors import DistutilsFileError
+from distutils import log
+
+# for generating verbose output in 'copy_file()'
+_copy_action = { None: 'copying',
+ 'hard': 'hard linking',
+ 'sym': 'symbolically linking' }
+
+
+def _copy_file_contents(src, dst, buffer_size=16*1024):
+ """Copy the file 'src' to 'dst'; both must be filenames. Any error
+ opening either file, reading from 'src', or writing to 'dst', raises
+ DistutilsFileError. Data is read/written in chunks of 'buffer_size'
+ bytes (default 16k). No attempt is made to handle anything apart from
+ regular files.
+ """
+ # Stolen from shutil module in the standard library, but with
+ # custom error-handling added.
+ fsrc = None
+ fdst = None
+ try:
+ try:
+ fsrc = open(src, 'rb')
+ except OSError as e:
+ raise DistutilsFileError("could not open '%s': %s" % (src, e.strerror))
+
+ if os.path.exists(dst):
+ try:
+ os.unlink(dst)
+ except OSError as e:
+ raise DistutilsFileError(
+ "could not delete '%s': %s" % (dst, e.strerror))
+
+ try:
+ fdst = open(dst, 'wb')
+ except OSError as e:
+ raise DistutilsFileError(
+ "could not create '%s': %s" % (dst, e.strerror))
+
+ while True:
+ try:
+ buf = fsrc.read(buffer_size)
+ except OSError as e:
+ raise DistutilsFileError(
+ "could not read from '%s': %s" % (src, e.strerror))
+
+ if not buf:
+ break
+
+ try:
+ fdst.write(buf)
+ except OSError as e:
+ raise DistutilsFileError(
+ "could not write to '%s': %s" % (dst, e.strerror))
+ finally:
+ if fdst:
+ fdst.close()
+ if fsrc:
+ fsrc.close()
+
+def copy_file(src, dst, preserve_mode=1, preserve_times=1, update=0,
+ link=None, verbose=1, dry_run=0):
+ """Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is
+ copied there with the same name; otherwise, it must be a filename. (If
+ the file exists, it will be ruthlessly clobbered.) If 'preserve_mode'
+ is true (the default), the file's mode (type and permission bits, or
+ whatever is analogous on the current platform) is copied. If
+ 'preserve_times' is true (the default), the last-modified and
+ last-access times are copied as well. If 'update' is true, 'src' will
+ only be copied if 'dst' does not exist, or if 'dst' does exist but is
+ older than 'src'.
+
+ 'link' allows you to make hard links (os.link) or symbolic links
+ (os.symlink) instead of copying: set it to "hard" or "sym"; if it is
+ None (the default), files are copied. Don't set 'link' on systems that
+ don't support it: 'copy_file()' doesn't check if hard or symbolic
+ linking is available. If hardlink fails, falls back to
+ _copy_file_contents().
+
+ Under Mac OS, uses the native file copy function in macostools; on
+ other systems, uses '_copy_file_contents()' to copy file contents.
+
+ Return a tuple (dest_name, copied): 'dest_name' is the actual name of
+ the output file, and 'copied' is true if the file was copied (or would
+ have been copied, if 'dry_run' true).
+ """
+ # XXX if the destination file already exists, we clobber it if
+ # copying, but blow up if linking. Hmmm. And I don't know what
+ # macostools.copyfile() does. Should definitely be consistent, and
+ # should probably blow up if destination exists and we would be
+ # changing it (ie. it's not already a hard/soft link to src OR
+ # (not update) and (src newer than dst).
+
+ from distutils.dep_util import newer
+ from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
+
+ if not os.path.isfile(src):
+ raise DistutilsFileError(
+ "can't copy '%s': doesn't exist or not a regular file" % src)
+
+ if os.path.isdir(dst):
+ dir = dst
+ dst = os.path.join(dst, os.path.basename(src))
+ else:
+ dir = os.path.dirname(dst)
+
+ if update and not newer(src, dst):
+ if verbose >= 1:
+ log.debug("not copying %s (output up-to-date)", src)
+ return (dst, 0)
+
+ try:
+ action = _copy_action[link]
+ except KeyError:
+ raise ValueError("invalid value '%s' for 'link' argument" % link)
+
+ if verbose >= 1:
+ if os.path.basename(dst) == os.path.basename(src):
+ log.info("%s %s -> %s", action, src, dir)
+ else:
+ log.info("%s %s -> %s", action, src, dst)
+
+ if dry_run:
+ return (dst, 1)
+
+ # If linking (hard or symbolic), use the appropriate system call
+ # (Unix only, of course, but that's the caller's responsibility)
+ elif link == 'hard':
+ if not (os.path.exists(dst) and os.path.samefile(src, dst)):
+ try:
+ os.link(src, dst)
+ return (dst, 1)
+ except OSError:
+ # If hard linking fails, fall back on copying file
+ # (some special filesystems don't support hard linking
+ # even under Unix, see issue #8876).
+ pass
+ elif link == 'sym':
+ if not (os.path.exists(dst) and os.path.samefile(src, dst)):
+ os.symlink(src, dst)
+ return (dst, 1)
+
+ # Otherwise (non-Mac, not linking), copy the file contents and
+ # (optionally) copy the times and mode.
+ _copy_file_contents(src, dst)
+ if preserve_mode or preserve_times:
+ st = os.stat(src)
+
+ # According to David Ascher <da@ski.org>, utime() should be done
+ # before chmod() (at least under NT).
+ if preserve_times:
+ os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
+ if preserve_mode:
+ os.chmod(dst, S_IMODE(st[ST_MODE]))
+
+ return (dst, 1)
+
+
+# XXX I suspect this is Unix-specific -- need porting help!
+def move_file (src, dst,
+ verbose=1,
+ dry_run=0):
+
+ """Move a file 'src' to 'dst'. If 'dst' is a directory, the file will
+ be moved into it with the same name; otherwise, 'src' is just renamed
+ to 'dst'. Return the new full name of the file.
+
+ Handles cross-device moves on Unix using 'copy_file()'. What about
+ other systems???
+ """
+ from os.path import exists, isfile, isdir, basename, dirname
+ import errno
+
+ if verbose >= 1:
+ log.info("moving %s -> %s", src, dst)
+
+ if dry_run:
+ return dst
+
+ if not isfile(src):
+ raise DistutilsFileError("can't move '%s': not a regular file" % src)
+
+ if isdir(dst):
+ dst = os.path.join(dst, basename(src))
+ elif exists(dst):
+ raise DistutilsFileError(
+ "can't move '%s': destination '%s' already exists" %
+ (src, dst))
+
+ if not isdir(dirname(dst)):
+ raise DistutilsFileError(
+ "can't move '%s': destination '%s' not a valid path" %
+ (src, dst))
+
+ copy_it = False
+ try:
+ os.rename(src, dst)
+ except OSError as e:
+ (num, msg) = e.args
+ if num == errno.EXDEV:
+ copy_it = True
+ else:
+ raise DistutilsFileError(
+ "couldn't move '%s' to '%s': %s" % (src, dst, msg))
+
+ if copy_it:
+ copy_file(src, dst, verbose=verbose)
+ try:
+ os.unlink(src)
+ except OSError as e:
+ (num, msg) = e.args
+ try:
+ os.unlink(dst)
+ except OSError:
+ pass
+ raise DistutilsFileError(
+ "couldn't move '%s' to '%s' by copy/delete: "
+ "delete '%s' failed: %s"
+ % (src, dst, src, msg))
+ return dst
+
+
+def write_file (filename, contents):
+ """Create a file with the specified name and write 'contents' (a
+ sequence of strings without line terminators) to it.
+ """
+ f = open(filename, "w")
+ try:
+ for line in contents:
+ f.write(line + "\n")
+ finally:
+ f.close()
diff --git a/third_party/python/setuptools/setuptools/_distutils/filelist.py b/third_party/python/setuptools/setuptools/_distutils/filelist.py
new file mode 100644
index 0000000000..c92d5fdba3
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/filelist.py
@@ -0,0 +1,327 @@
+"""distutils.filelist
+
+Provides the FileList class, used for poking about the filesystem
+and building lists of files.
+"""
+
+import os, re
+import fnmatch
+import functools
+from distutils.util import convert_path
+from distutils.errors import DistutilsTemplateError, DistutilsInternalError
+from distutils import log
+
+class FileList:
+ """A list of files built by on exploring the filesystem and filtered by
+ applying various patterns to what we find there.
+
+ Instance attributes:
+ dir
+ directory from which files will be taken -- only used if
+ 'allfiles' not supplied to constructor
+ files
+ list of filenames currently being built/filtered/manipulated
+ allfiles
+ complete list of files under consideration (ie. without any
+ filtering applied)
+ """
+
+ def __init__(self, warn=None, debug_print=None):
+ # ignore argument to FileList, but keep them for backwards
+ # compatibility
+ self.allfiles = None
+ self.files = []
+
+ def set_allfiles(self, allfiles):
+ self.allfiles = allfiles
+
+ def findall(self, dir=os.curdir):
+ self.allfiles = findall(dir)
+
+ def debug_print(self, msg):
+ """Print 'msg' to stdout if the global DEBUG (taken from the
+ DISTUTILS_DEBUG environment variable) flag is true.
+ """
+ from distutils.debug import DEBUG
+ if DEBUG:
+ print(msg)
+
+ # -- List-like methods ---------------------------------------------
+
+ def append(self, item):
+ self.files.append(item)
+
+ def extend(self, items):
+ self.files.extend(items)
+
+ def sort(self):
+ # Not a strict lexical sort!
+ sortable_files = sorted(map(os.path.split, self.files))
+ self.files = []
+ for sort_tuple in sortable_files:
+ self.files.append(os.path.join(*sort_tuple))
+
+
+ # -- Other miscellaneous utility methods ---------------------------
+
+ def remove_duplicates(self):
+ # Assumes list has been sorted!
+ for i in range(len(self.files) - 1, 0, -1):
+ if self.files[i] == self.files[i - 1]:
+ del self.files[i]
+
+
+ # -- "File template" methods ---------------------------------------
+
+ def _parse_template_line(self, line):
+ words = line.split()
+ action = words[0]
+
+ patterns = dir = dir_pattern = None
+
+ if action in ('include', 'exclude',
+ 'global-include', 'global-exclude'):
+ if len(words) < 2:
+ raise DistutilsTemplateError(
+ "'%s' expects <pattern1> <pattern2> ..." % action)
+ patterns = [convert_path(w) for w in words[1:]]
+ elif action in ('recursive-include', 'recursive-exclude'):
+ if len(words) < 3:
+ raise DistutilsTemplateError(
+ "'%s' expects <dir> <pattern1> <pattern2> ..." % action)
+ dir = convert_path(words[1])
+ patterns = [convert_path(w) for w in words[2:]]
+ elif action in ('graft', 'prune'):
+ if len(words) != 2:
+ raise DistutilsTemplateError(
+ "'%s' expects a single <dir_pattern>" % action)
+ dir_pattern = convert_path(words[1])
+ else:
+ raise DistutilsTemplateError("unknown action '%s'" % action)
+
+ return (action, patterns, dir, dir_pattern)
+
+ def process_template_line(self, line):
+ # Parse the line: split it up, make sure the right number of words
+ # is there, and return the relevant words. 'action' is always
+ # defined: it's the first word of the line. Which of the other
+ # three are defined depends on the action; it'll be either
+ # patterns, (dir and patterns), or (dir_pattern).
+ (action, patterns, dir, dir_pattern) = self._parse_template_line(line)
+
+ # OK, now we know that the action is valid and we have the
+ # right number of words on the line for that action -- so we
+ # can proceed with minimal error-checking.
+ if action == 'include':
+ self.debug_print("include " + ' '.join(patterns))
+ for pattern in patterns:
+ if not self.include_pattern(pattern, anchor=1):
+ log.warn("warning: no files found matching '%s'",
+ pattern)
+
+ elif action == 'exclude':
+ self.debug_print("exclude " + ' '.join(patterns))
+ for pattern in patterns:
+ if not self.exclude_pattern(pattern, anchor=1):
+ log.warn(("warning: no previously-included files "
+ "found matching '%s'"), pattern)
+
+ elif action == 'global-include':
+ self.debug_print("global-include " + ' '.join(patterns))
+ for pattern in patterns:
+ if not self.include_pattern(pattern, anchor=0):
+ log.warn(("warning: no files found matching '%s' "
+ "anywhere in distribution"), pattern)
+
+ elif action == 'global-exclude':
+ self.debug_print("global-exclude " + ' '.join(patterns))
+ for pattern in patterns:
+ if not self.exclude_pattern(pattern, anchor=0):
+ log.warn(("warning: no previously-included files matching "
+ "'%s' found anywhere in distribution"),
+ pattern)
+
+ elif action == 'recursive-include':
+ self.debug_print("recursive-include %s %s" %
+ (dir, ' '.join(patterns)))
+ for pattern in patterns:
+ if not self.include_pattern(pattern, prefix=dir):
+ log.warn(("warning: no files found matching '%s' "
+ "under directory '%s'"),
+ pattern, dir)
+
+ elif action == 'recursive-exclude':
+ self.debug_print("recursive-exclude %s %s" %
+ (dir, ' '.join(patterns)))
+ for pattern in patterns:
+ if not self.exclude_pattern(pattern, prefix=dir):
+ log.warn(("warning: no previously-included files matching "
+ "'%s' found under directory '%s'"),
+ pattern, dir)
+
+ elif action == 'graft':
+ self.debug_print("graft " + dir_pattern)
+ if not self.include_pattern(None, prefix=dir_pattern):
+ log.warn("warning: no directories found matching '%s'",
+ dir_pattern)
+
+ elif action == 'prune':
+ self.debug_print("prune " + dir_pattern)
+ if not self.exclude_pattern(None, prefix=dir_pattern):
+ log.warn(("no previously-included directories found "
+ "matching '%s'"), dir_pattern)
+ else:
+ raise DistutilsInternalError(
+ "this cannot happen: invalid action '%s'" % action)
+
+
+ # -- Filtering/selection methods -----------------------------------
+
+ def include_pattern(self, pattern, anchor=1, prefix=None, is_regex=0):
+ """Select strings (presumably filenames) from 'self.files' that
+ match 'pattern', a Unix-style wildcard (glob) pattern. Patterns
+ are not quite the same as implemented by the 'fnmatch' module: '*'
+ and '?' match non-special characters, where "special" is platform-
+ dependent: slash on Unix; colon, slash, and backslash on
+ DOS/Windows; and colon on Mac OS.
+
+ If 'anchor' is true (the default), then the pattern match is more
+ stringent: "*.py" will match "foo.py" but not "foo/bar.py". If
+ 'anchor' is false, both of these will match.
+
+ If 'prefix' is supplied, then only filenames starting with 'prefix'
+ (itself a pattern) and ending with 'pattern', with anything in between
+ them, will match. 'anchor' is ignored in this case.
+
+ If 'is_regex' is true, 'anchor' and 'prefix' are ignored, and
+ 'pattern' is assumed to be either a string containing a regex or a
+ regex object -- no translation is done, the regex is just compiled
+ and used as-is.
+
+ Selected strings will be added to self.files.
+
+ Return True if files are found, False otherwise.
+ """
+ # XXX docstring lying about what the special chars are?
+ files_found = False
+ pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
+ self.debug_print("include_pattern: applying regex r'%s'" %
+ pattern_re.pattern)
+
+ # delayed loading of allfiles list
+ if self.allfiles is None:
+ self.findall()
+
+ for name in self.allfiles:
+ if pattern_re.search(name):
+ self.debug_print(" adding " + name)
+ self.files.append(name)
+ files_found = True
+ return files_found
+
+
+ def exclude_pattern (self, pattern,
+ anchor=1, prefix=None, is_regex=0):
+ """Remove strings (presumably filenames) from 'files' that match
+ 'pattern'. Other parameters are the same as for
+ 'include_pattern()', above.
+ The list 'self.files' is modified in place.
+ Return True if files are found, False otherwise.
+ """
+ files_found = False
+ pattern_re = translate_pattern(pattern, anchor, prefix, is_regex)
+ self.debug_print("exclude_pattern: applying regex r'%s'" %
+ pattern_re.pattern)
+ for i in range(len(self.files)-1, -1, -1):
+ if pattern_re.search(self.files[i]):
+ self.debug_print(" removing " + self.files[i])
+ del self.files[i]
+ files_found = True
+ return files_found
+
+
+# ----------------------------------------------------------------------
+# Utility functions
+
+def _find_all_simple(path):
+ """
+ Find all files under 'path'
+ """
+ results = (
+ os.path.join(base, file)
+ for base, dirs, files in os.walk(path, followlinks=True)
+ for file in files
+ )
+ return filter(os.path.isfile, results)
+
+
+def findall(dir=os.curdir):
+ """
+ Find all files under 'dir' and return the list of full filenames.
+ Unless dir is '.', return full filenames with dir prepended.
+ """
+ files = _find_all_simple(dir)
+ if dir == os.curdir:
+ make_rel = functools.partial(os.path.relpath, start=dir)
+ files = map(make_rel, files)
+ return list(files)
+
+
+def glob_to_re(pattern):
+ """Translate a shell-like glob pattern to a regular expression; return
+ a string containing the regex. Differs from 'fnmatch.translate()' in
+ that '*' does not match "special characters" (which are
+ platform-specific).
+ """
+ pattern_re = fnmatch.translate(pattern)
+
+ # '?' and '*' in the glob pattern become '.' and '.*' in the RE, which
+ # IMHO is wrong -- '?' and '*' aren't supposed to match slash in Unix,
+ # and by extension they shouldn't match such "special characters" under
+ # any OS. So change all non-escaped dots in the RE to match any
+ # character except the special characters (currently: just os.sep).
+ sep = os.sep
+ if os.sep == '\\':
+ # we're using a regex to manipulate a regex, so we need
+ # to escape the backslash twice
+ sep = r'\\\\'
+ escaped = r'\1[^%s]' % sep
+ pattern_re = re.sub(r'((?<!\\)(\\\\)*)\.', escaped, pattern_re)
+ return pattern_re
+
+
+def translate_pattern(pattern, anchor=1, prefix=None, is_regex=0):
+ """Translate a shell-like wildcard pattern to a compiled regular
+ expression. Return the compiled regex. If 'is_regex' true,
+ then 'pattern' is directly compiled to a regex (if it's a string)
+ or just returned as-is (assumes it's a regex object).
+ """
+ if is_regex:
+ if isinstance(pattern, str):
+ return re.compile(pattern)
+ else:
+ return pattern
+
+ # ditch start and end characters
+ start, _, end = glob_to_re('_').partition('_')
+
+ if pattern:
+ pattern_re = glob_to_re(pattern)
+ assert pattern_re.startswith(start) and pattern_re.endswith(end)
+ else:
+ pattern_re = ''
+
+ if prefix is not None:
+ prefix_re = glob_to_re(prefix)
+ assert prefix_re.startswith(start) and prefix_re.endswith(end)
+ prefix_re = prefix_re[len(start): len(prefix_re) - len(end)]
+ sep = os.sep
+ if os.sep == '\\':
+ sep = r'\\'
+ pattern_re = pattern_re[len(start): len(pattern_re) - len(end)]
+ pattern_re = r'%s\A%s%s.*%s%s' % (start, prefix_re, sep, pattern_re, end)
+ else: # no prefix -- respect anchor flag
+ if anchor:
+ pattern_re = r'%s\A%s' % (start, pattern_re[len(start):])
+
+ return re.compile(pattern_re)
diff --git a/third_party/python/setuptools/setuptools/_distutils/log.py b/third_party/python/setuptools/setuptools/_distutils/log.py
new file mode 100644
index 0000000000..8ef6b28ea2
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/log.py
@@ -0,0 +1,77 @@
+"""A simple log mechanism styled after PEP 282."""
+
+# The class here is styled after PEP 282 so that it could later be
+# replaced with a standard Python logging implementation.
+
+DEBUG = 1
+INFO = 2
+WARN = 3
+ERROR = 4
+FATAL = 5
+
+import sys
+
+class Log:
+
+ def __init__(self, threshold=WARN):
+ self.threshold = threshold
+
+ def _log(self, level, msg, args):
+ if level not in (DEBUG, INFO, WARN, ERROR, FATAL):
+ raise ValueError('%s wrong log level' % str(level))
+
+ if level >= self.threshold:
+ if args:
+ msg = msg % args
+ if level in (WARN, ERROR, FATAL):
+ stream = sys.stderr
+ else:
+ stream = sys.stdout
+ try:
+ stream.write('%s\n' % msg)
+ except UnicodeEncodeError:
+ # emulate backslashreplace error handler
+ encoding = stream.encoding
+ msg = msg.encode(encoding, "backslashreplace").decode(encoding)
+ stream.write('%s\n' % msg)
+ stream.flush()
+
+ def log(self, level, msg, *args):
+ self._log(level, msg, args)
+
+ def debug(self, msg, *args):
+ self._log(DEBUG, msg, args)
+
+ def info(self, msg, *args):
+ self._log(INFO, msg, args)
+
+ def warn(self, msg, *args):
+ self._log(WARN, msg, args)
+
+ def error(self, msg, *args):
+ self._log(ERROR, msg, args)
+
+ def fatal(self, msg, *args):
+ self._log(FATAL, msg, args)
+
+_global_log = Log()
+log = _global_log.log
+debug = _global_log.debug
+info = _global_log.info
+warn = _global_log.warn
+error = _global_log.error
+fatal = _global_log.fatal
+
+def set_threshold(level):
+ # return the old threshold for use from tests
+ old = _global_log.threshold
+ _global_log.threshold = level
+ return old
+
+def set_verbosity(v):
+ if v <= 0:
+ set_threshold(WARN)
+ elif v == 1:
+ set_threshold(INFO)
+ elif v >= 2:
+ set_threshold(DEBUG)
diff --git a/third_party/python/setuptools/setuptools/_distutils/msvc9compiler.py b/third_party/python/setuptools/setuptools/_distutils/msvc9compiler.py
new file mode 100644
index 0000000000..6934e964ab
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/msvc9compiler.py
@@ -0,0 +1,788 @@
+"""distutils.msvc9compiler
+
+Contains MSVCCompiler, an implementation of the abstract CCompiler class
+for the Microsoft Visual Studio 2008.
+
+The module is compatible with VS 2005 and VS 2008. You can find legacy support
+for older versions of VS in distutils.msvccompiler.
+"""
+
+# Written by Perry Stoll
+# hacked by Robin Becker and Thomas Heller to do a better job of
+# finding DevStudio (through the registry)
+# ported to VS2005 and VS 2008 by Christian Heimes
+
+import os
+import subprocess
+import sys
+import re
+
+from distutils.errors import DistutilsExecError, DistutilsPlatformError, \
+ CompileError, LibError, LinkError
+from distutils.ccompiler import CCompiler, gen_lib_options
+from distutils import log
+from distutils.util import get_platform
+
+import winreg
+
+RegOpenKeyEx = winreg.OpenKeyEx
+RegEnumKey = winreg.EnumKey
+RegEnumValue = winreg.EnumValue
+RegError = winreg.error
+
+HKEYS = (winreg.HKEY_USERS,
+ winreg.HKEY_CURRENT_USER,
+ winreg.HKEY_LOCAL_MACHINE,
+ winreg.HKEY_CLASSES_ROOT)
+
+NATIVE_WIN64 = (sys.platform == 'win32' and sys.maxsize > 2**32)
+if NATIVE_WIN64:
+ # Visual C++ is a 32-bit application, so we need to look in
+ # the corresponding registry branch, if we're running a
+ # 64-bit Python on Win64
+ VS_BASE = r"Software\Wow6432Node\Microsoft\VisualStudio\%0.1f"
+ WINSDK_BASE = r"Software\Wow6432Node\Microsoft\Microsoft SDKs\Windows"
+ NET_BASE = r"Software\Wow6432Node\Microsoft\.NETFramework"
+else:
+ VS_BASE = r"Software\Microsoft\VisualStudio\%0.1f"
+ WINSDK_BASE = r"Software\Microsoft\Microsoft SDKs\Windows"
+ NET_BASE = r"Software\Microsoft\.NETFramework"
+
+# A map keyed by get_platform() return values to values accepted by
+# 'vcvarsall.bat'. Note a cross-compile may combine these (eg, 'x86_amd64' is
+# the param to cross-compile on x86 targeting amd64.)
+PLAT_TO_VCVARS = {
+ 'win32' : 'x86',
+ 'win-amd64' : 'amd64',
+}
+
+class Reg:
+ """Helper class to read values from the registry
+ """
+
+ def get_value(cls, path, key):
+ for base in HKEYS:
+ d = cls.read_values(base, path)
+ if d and key in d:
+ return d[key]
+ raise KeyError(key)
+ get_value = classmethod(get_value)
+
+ def read_keys(cls, base, key):
+ """Return list of registry keys."""
+ try:
+ handle = RegOpenKeyEx(base, key)
+ except RegError:
+ return None
+ L = []
+ i = 0
+ while True:
+ try:
+ k = RegEnumKey(handle, i)
+ except RegError:
+ break
+ L.append(k)
+ i += 1
+ return L
+ read_keys = classmethod(read_keys)
+
+ def read_values(cls, base, key):
+ """Return dict of registry keys and values.
+
+ All names are converted to lowercase.
+ """
+ try:
+ handle = RegOpenKeyEx(base, key)
+ except RegError:
+ return None
+ d = {}
+ i = 0
+ while True:
+ try:
+ name, value, type = RegEnumValue(handle, i)
+ except RegError:
+ break
+ name = name.lower()
+ d[cls.convert_mbcs(name)] = cls.convert_mbcs(value)
+ i += 1
+ return d
+ read_values = classmethod(read_values)
+
+ def convert_mbcs(s):
+ dec = getattr(s, "decode", None)
+ if dec is not None:
+ try:
+ s = dec("mbcs")
+ except UnicodeError:
+ pass
+ return s
+ convert_mbcs = staticmethod(convert_mbcs)
+
+class MacroExpander:
+
+ def __init__(self, version):
+ self.macros = {}
+ self.vsbase = VS_BASE % version
+ self.load_macros(version)
+
+ def set_macro(self, macro, path, key):
+ self.macros["$(%s)" % macro] = Reg.get_value(path, key)
+
+ def load_macros(self, version):
+ self.set_macro("VCInstallDir", self.vsbase + r"\Setup\VC", "productdir")
+ self.set_macro("VSInstallDir", self.vsbase + r"\Setup\VS", "productdir")
+ self.set_macro("FrameworkDir", NET_BASE, "installroot")
+ try:
+ if version >= 8.0:
+ self.set_macro("FrameworkSDKDir", NET_BASE,
+ "sdkinstallrootv2.0")
+ else:
+ raise KeyError("sdkinstallrootv2.0")
+ except KeyError:
+ raise DistutilsPlatformError(
+ """Python was built with Visual Studio 2008;
+extensions must be built with a compiler than can generate compatible binaries.
+Visual Studio 2008 was not found on this system. If you have Cygwin installed,
+you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
+
+ if version >= 9.0:
+ self.set_macro("FrameworkVersion", self.vsbase, "clr version")
+ self.set_macro("WindowsSdkDir", WINSDK_BASE, "currentinstallfolder")
+ else:
+ p = r"Software\Microsoft\NET Framework Setup\Product"
+ for base in HKEYS:
+ try:
+ h = RegOpenKeyEx(base, p)
+ except RegError:
+ continue
+ key = RegEnumKey(h, 0)
+ d = Reg.get_value(base, r"%s\%s" % (p, key))
+ self.macros["$(FrameworkVersion)"] = d["version"]
+
+ def sub(self, s):
+ for k, v in self.macros.items():
+ s = s.replace(k, v)
+ return s
+
+def get_build_version():
+ """Return the version of MSVC that was used to build Python.
+
+ For Python 2.3 and up, the version number is included in
+ sys.version. For earlier versions, assume the compiler is MSVC 6.
+ """
+ prefix = "MSC v."
+ i = sys.version.find(prefix)
+ if i == -1:
+ return 6
+ i = i + len(prefix)
+ s, rest = sys.version[i:].split(" ", 1)
+ majorVersion = int(s[:-2]) - 6
+ if majorVersion >= 13:
+ # v13 was skipped and should be v14
+ majorVersion += 1
+ minorVersion = int(s[2:3]) / 10.0
+ # I don't think paths are affected by minor version in version 6
+ if majorVersion == 6:
+ minorVersion = 0
+ if majorVersion >= 6:
+ return majorVersion + minorVersion
+ # else we don't know what version of the compiler this is
+ return None
+
+def normalize_and_reduce_paths(paths):
+ """Return a list of normalized paths with duplicates removed.
+
+ The current order of paths is maintained.
+ """
+ # Paths are normalized so things like: /a and /a/ aren't both preserved.
+ reduced_paths = []
+ for p in paths:
+ np = os.path.normpath(p)
+ # XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
+ if np not in reduced_paths:
+ reduced_paths.append(np)
+ return reduced_paths
+
+def removeDuplicates(variable):
+ """Remove duplicate values of an environment variable.
+ """
+ oldList = variable.split(os.pathsep)
+ newList = []
+ for i in oldList:
+ if i not in newList:
+ newList.append(i)
+ newVariable = os.pathsep.join(newList)
+ return newVariable
+
+def find_vcvarsall(version):
+ """Find the vcvarsall.bat file
+
+ At first it tries to find the productdir of VS 2008 in the registry. If
+ that fails it falls back to the VS90COMNTOOLS env var.
+ """
+ vsbase = VS_BASE % version
+ try:
+ productdir = Reg.get_value(r"%s\Setup\VC" % vsbase,
+ "productdir")
+ except KeyError:
+ log.debug("Unable to find productdir in registry")
+ productdir = None
+
+ if not productdir or not os.path.isdir(productdir):
+ toolskey = "VS%0.f0COMNTOOLS" % version
+ toolsdir = os.environ.get(toolskey, None)
+
+ if toolsdir and os.path.isdir(toolsdir):
+ productdir = os.path.join(toolsdir, os.pardir, os.pardir, "VC")
+ productdir = os.path.abspath(productdir)
+ if not os.path.isdir(productdir):
+ log.debug("%s is not a valid directory" % productdir)
+ return None
+ else:
+ log.debug("Env var %s is not set or invalid" % toolskey)
+ if not productdir:
+ log.debug("No productdir found")
+ return None
+ vcvarsall = os.path.join(productdir, "vcvarsall.bat")
+ if os.path.isfile(vcvarsall):
+ return vcvarsall
+ log.debug("Unable to find vcvarsall.bat")
+ return None
+
+def query_vcvarsall(version, arch="x86"):
+ """Launch vcvarsall.bat and read the settings from its environment
+ """
+ vcvarsall = find_vcvarsall(version)
+ interesting = {"include", "lib", "libpath", "path"}
+ result = {}
+
+ if vcvarsall is None:
+ raise DistutilsPlatformError("Unable to find vcvarsall.bat")
+ log.debug("Calling 'vcvarsall.bat %s' (version=%s)", arch, version)
+ popen = subprocess.Popen('"%s" %s & set' % (vcvarsall, arch),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ try:
+ stdout, stderr = popen.communicate()
+ if popen.wait() != 0:
+ raise DistutilsPlatformError(stderr.decode("mbcs"))
+
+ stdout = stdout.decode("mbcs")
+ for line in stdout.split("\n"):
+ line = Reg.convert_mbcs(line)
+ if '=' not in line:
+ continue
+ line = line.strip()
+ key, value = line.split('=', 1)
+ key = key.lower()
+ if key in interesting:
+ if value.endswith(os.pathsep):
+ value = value[:-1]
+ result[key] = removeDuplicates(value)
+
+ finally:
+ popen.stdout.close()
+ popen.stderr.close()
+
+ if len(result) != len(interesting):
+ raise ValueError(str(list(result.keys())))
+
+ return result
+
+# More globals
+VERSION = get_build_version()
+if VERSION < 8.0:
+ raise DistutilsPlatformError("VC %0.1f is not supported by this module" % VERSION)
+# MACROS = MacroExpander(VERSION)
+
+class MSVCCompiler(CCompiler) :
+ """Concrete class that implements an interface to Microsoft Visual C++,
+ as defined by the CCompiler abstract class."""
+
+ compiler_type = 'msvc'
+
+ # Just set this so CCompiler's constructor doesn't barf. We currently
+ # don't use the 'set_executables()' bureaucracy provided by CCompiler,
+ # as it really isn't necessary for this sort of single-compiler class.
+ # Would be nice to have a consistent interface with UnixCCompiler,
+ # though, so it's worth thinking about.
+ executables = {}
+
+ # Private class data (need to distinguish C from C++ source for compiler)
+ _c_extensions = ['.c']
+ _cpp_extensions = ['.cc', '.cpp', '.cxx']
+ _rc_extensions = ['.rc']
+ _mc_extensions = ['.mc']
+
+ # Needed for the filename generation methods provided by the
+ # base class, CCompiler.
+ src_extensions = (_c_extensions + _cpp_extensions +
+ _rc_extensions + _mc_extensions)
+ res_extension = '.res'
+ obj_extension = '.obj'
+ static_lib_extension = '.lib'
+ shared_lib_extension = '.dll'
+ static_lib_format = shared_lib_format = '%s%s'
+ exe_extension = '.exe'
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ CCompiler.__init__ (self, verbose, dry_run, force)
+ self.__version = VERSION
+ self.__root = r"Software\Microsoft\VisualStudio"
+ # self.__macros = MACROS
+ self.__paths = []
+ # target platform (.plat_name is consistent with 'bdist')
+ self.plat_name = None
+ self.__arch = None # deprecated name
+ self.initialized = False
+
+ def initialize(self, plat_name=None):
+ # multi-init means we would need to check platform same each time...
+ assert not self.initialized, "don't init multiple times"
+ if plat_name is None:
+ plat_name = get_platform()
+ # sanity check for platforms to prevent obscure errors later.
+ ok_plats = 'win32', 'win-amd64'
+ if plat_name not in ok_plats:
+ raise DistutilsPlatformError("--plat-name must be one of %s" %
+ (ok_plats,))
+
+ if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
+ # Assume that the SDK set up everything alright; don't try to be
+ # smarter
+ self.cc = "cl.exe"
+ self.linker = "link.exe"
+ self.lib = "lib.exe"
+ self.rc = "rc.exe"
+ self.mc = "mc.exe"
+ else:
+ # On x86, 'vcvars32.bat amd64' creates an env that doesn't work;
+ # to cross compile, you use 'x86_amd64'.
+ # On AMD64, 'vcvars32.bat amd64' is a native build env; to cross
+ # compile use 'x86' (ie, it runs the x86 compiler directly)
+ if plat_name == get_platform() or plat_name == 'win32':
+ # native build or cross-compile to win32
+ plat_spec = PLAT_TO_VCVARS[plat_name]
+ else:
+ # cross compile from win32 -> some 64bit
+ plat_spec = PLAT_TO_VCVARS[get_platform()] + '_' + \
+ PLAT_TO_VCVARS[plat_name]
+
+ vc_env = query_vcvarsall(VERSION, plat_spec)
+
+ self.__paths = vc_env['path'].split(os.pathsep)
+ os.environ['lib'] = vc_env['lib']
+ os.environ['include'] = vc_env['include']
+
+ if len(self.__paths) == 0:
+ raise DistutilsPlatformError("Python was built with %s, "
+ "and extensions need to be built with the same "
+ "version of the compiler, but it isn't installed."
+ % self.__product)
+
+ self.cc = self.find_exe("cl.exe")
+ self.linker = self.find_exe("link.exe")
+ self.lib = self.find_exe("lib.exe")
+ self.rc = self.find_exe("rc.exe") # resource compiler
+ self.mc = self.find_exe("mc.exe") # message compiler
+ #self.set_path_env_var('lib')
+ #self.set_path_env_var('include')
+
+ # extend the MSVC path with the current path
+ try:
+ for p in os.environ['path'].split(';'):
+ self.__paths.append(p)
+ except KeyError:
+ pass
+ self.__paths = normalize_and_reduce_paths(self.__paths)
+ os.environ['path'] = ";".join(self.__paths)
+
+ self.preprocess_options = None
+ if self.__arch == "x86":
+ self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3',
+ '/DNDEBUG']
+ self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',
+ '/Z7', '/D_DEBUG']
+ else:
+ # Win64
+ self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,
+ '/DNDEBUG']
+ self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
+ '/Z7', '/D_DEBUG']
+
+ self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
+ if self.__version >= 7:
+ self.ldflags_shared_debug = [
+ '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG'
+ ]
+ self.ldflags_static = [ '/nologo']
+
+ self.initialized = True
+
+ # -- Worker methods ------------------------------------------------
+
+ def object_filenames(self,
+ source_filenames,
+ strip_dir=0,
+ output_dir=''):
+ # Copied from ccompiler.py, extended to return .res as 'object'-file
+ # for .rc input file
+ if output_dir is None: output_dir = ''
+ obj_names = []
+ for src_name in source_filenames:
+ (base, ext) = os.path.splitext (src_name)
+ base = os.path.splitdrive(base)[1] # Chop off the drive
+ base = base[os.path.isabs(base):] # If abs, chop off leading /
+ if ext not in self.src_extensions:
+ # Better to raise an exception instead of silently continuing
+ # and later complain about sources and targets having
+ # different lengths
+ raise CompileError ("Don't know how to compile %s" % src_name)
+ if strip_dir:
+ base = os.path.basename (base)
+ if ext in self._rc_extensions:
+ obj_names.append (os.path.join (output_dir,
+ base + self.res_extension))
+ elif ext in self._mc_extensions:
+ obj_names.append (os.path.join (output_dir,
+ base + self.res_extension))
+ else:
+ obj_names.append (os.path.join (output_dir,
+ base + self.obj_extension))
+ return obj_names
+
+
+ def compile(self, sources,
+ output_dir=None, macros=None, include_dirs=None, debug=0,
+ extra_preargs=None, extra_postargs=None, depends=None):
+
+ if not self.initialized:
+ self.initialize()
+ compile_info = self._setup_compile(output_dir, macros, include_dirs,
+ sources, depends, extra_postargs)
+ macros, objects, extra_postargs, pp_opts, build = compile_info
+
+ compile_opts = extra_preargs or []
+ compile_opts.append ('/c')
+ if debug:
+ compile_opts.extend(self.compile_options_debug)
+ else:
+ compile_opts.extend(self.compile_options)
+
+ for obj in objects:
+ try:
+ src, ext = build[obj]
+ except KeyError:
+ continue
+ if debug:
+ # pass the full pathname to MSVC in debug mode,
+ # this allows the debugger to find the source file
+ # without asking the user to browse for it
+ src = os.path.abspath(src)
+
+ if ext in self._c_extensions:
+ input_opt = "/Tc" + src
+ elif ext in self._cpp_extensions:
+ input_opt = "/Tp" + src
+ elif ext in self._rc_extensions:
+ # compile .RC to .RES file
+ input_opt = src
+ output_opt = "/fo" + obj
+ try:
+ self.spawn([self.rc] + pp_opts +
+ [output_opt] + [input_opt])
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+ continue
+ elif ext in self._mc_extensions:
+ # Compile .MC to .RC file to .RES file.
+ # * '-h dir' specifies the directory for the
+ # generated include file
+ # * '-r dir' specifies the target directory of the
+ # generated RC file and the binary message resource
+ # it includes
+ #
+ # For now (since there are no options to change this),
+ # we use the source-directory for the include file and
+ # the build directory for the RC file and message
+ # resources. This works at least for win32all.
+ h_dir = os.path.dirname(src)
+ rc_dir = os.path.dirname(obj)
+ try:
+ # first compile .MC to .RC and .H file
+ self.spawn([self.mc] +
+ ['-h', h_dir, '-r', rc_dir] + [src])
+ base, _ = os.path.splitext (os.path.basename (src))
+ rc_file = os.path.join (rc_dir, base + '.rc')
+ # then compile .RC to .RES file
+ self.spawn([self.rc] +
+ ["/fo" + obj] + [rc_file])
+
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+ continue
+ else:
+ # how to handle this file?
+ raise CompileError("Don't know how to compile %s to %s"
+ % (src, obj))
+
+ output_opt = "/Fo" + obj
+ try:
+ self.spawn([self.cc] + compile_opts + pp_opts +
+ [input_opt, output_opt] +
+ extra_postargs)
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+
+ return objects
+
+
+ def create_static_lib(self,
+ objects,
+ output_libname,
+ output_dir=None,
+ debug=0,
+ target_lang=None):
+
+ if not self.initialized:
+ self.initialize()
+ (objects, output_dir) = self._fix_object_args(objects, output_dir)
+ output_filename = self.library_filename(output_libname,
+ output_dir=output_dir)
+
+ if self._need_link(objects, output_filename):
+ lib_args = objects + ['/OUT:' + output_filename]
+ if debug:
+ pass # XXX what goes here?
+ try:
+ self.spawn([self.lib] + lib_args)
+ except DistutilsExecError as msg:
+ raise LibError(msg)
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+
+ def link(self,
+ target_desc,
+ objects,
+ output_filename,
+ output_dir=None,
+ libraries=None,
+ library_dirs=None,
+ runtime_library_dirs=None,
+ export_symbols=None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ build_temp=None,
+ target_lang=None):
+
+ if not self.initialized:
+ self.initialize()
+ (objects, output_dir) = self._fix_object_args(objects, output_dir)
+ fixed_args = self._fix_lib_args(libraries, library_dirs,
+ runtime_library_dirs)
+ (libraries, library_dirs, runtime_library_dirs) = fixed_args
+
+ if runtime_library_dirs:
+ self.warn ("I don't know what to do with 'runtime_library_dirs': "
+ + str (runtime_library_dirs))
+
+ lib_opts = gen_lib_options(self,
+ library_dirs, runtime_library_dirs,
+ libraries)
+ if output_dir is not None:
+ output_filename = os.path.join(output_dir, output_filename)
+
+ if self._need_link(objects, output_filename):
+ if target_desc == CCompiler.EXECUTABLE:
+ if debug:
+ ldflags = self.ldflags_shared_debug[1:]
+ else:
+ ldflags = self.ldflags_shared[1:]
+ else:
+ if debug:
+ ldflags = self.ldflags_shared_debug
+ else:
+ ldflags = self.ldflags_shared
+
+ export_opts = []
+ for sym in (export_symbols or []):
+ export_opts.append("/EXPORT:" + sym)
+
+ ld_args = (ldflags + lib_opts + export_opts +
+ objects + ['/OUT:' + output_filename])
+
+ # The MSVC linker generates .lib and .exp files, which cannot be
+ # suppressed by any linker switches. The .lib files may even be
+ # needed! Make sure they are generated in the temporary build
+ # directory. Since they have different names for debug and release
+ # builds, they can go into the same directory.
+ build_temp = os.path.dirname(objects[0])
+ if export_symbols is not None:
+ (dll_name, dll_ext) = os.path.splitext(
+ os.path.basename(output_filename))
+ implib_file = os.path.join(
+ build_temp,
+ self.library_filename(dll_name))
+ ld_args.append ('/IMPLIB:' + implib_file)
+
+ self.manifest_setup_ldargs(output_filename, build_temp, ld_args)
+
+ if extra_preargs:
+ ld_args[:0] = extra_preargs
+ if extra_postargs:
+ ld_args.extend(extra_postargs)
+
+ self.mkpath(os.path.dirname(output_filename))
+ try:
+ self.spawn([self.linker] + ld_args)
+ except DistutilsExecError as msg:
+ raise LinkError(msg)
+
+ # embed the manifest
+ # XXX - this is somewhat fragile - if mt.exe fails, distutils
+ # will still consider the DLL up-to-date, but it will not have a
+ # manifest. Maybe we should link to a temp file? OTOH, that
+ # implies a build environment error that shouldn't go undetected.
+ mfinfo = self.manifest_get_embed_info(target_desc, ld_args)
+ if mfinfo is not None:
+ mffilename, mfid = mfinfo
+ out_arg = '-outputresource:%s;%s' % (output_filename, mfid)
+ try:
+ self.spawn(['mt.exe', '-nologo', '-manifest',
+ mffilename, out_arg])
+ except DistutilsExecError as msg:
+ raise LinkError(msg)
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+ def manifest_setup_ldargs(self, output_filename, build_temp, ld_args):
+ # If we need a manifest at all, an embedded manifest is recommended.
+ # See MSDN article titled
+ # "How to: Embed a Manifest Inside a C/C++ Application"
+ # (currently at http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx)
+ # Ask the linker to generate the manifest in the temp dir, so
+ # we can check it, and possibly embed it, later.
+ temp_manifest = os.path.join(
+ build_temp,
+ os.path.basename(output_filename) + ".manifest")
+ ld_args.append('/MANIFESTFILE:' + temp_manifest)
+
+ def manifest_get_embed_info(self, target_desc, ld_args):
+ # If a manifest should be embedded, return a tuple of
+ # (manifest_filename, resource_id). Returns None if no manifest
+ # should be embedded. See http://bugs.python.org/issue7833 for why
+ # we want to avoid any manifest for extension modules if we can)
+ for arg in ld_args:
+ if arg.startswith("/MANIFESTFILE:"):
+ temp_manifest = arg.split(":", 1)[1]
+ break
+ else:
+ # no /MANIFESTFILE so nothing to do.
+ return None
+ if target_desc == CCompiler.EXECUTABLE:
+ # by default, executables always get the manifest with the
+ # CRT referenced.
+ mfid = 1
+ else:
+ # Extension modules try and avoid any manifest if possible.
+ mfid = 2
+ temp_manifest = self._remove_visual_c_ref(temp_manifest)
+ if temp_manifest is None:
+ return None
+ return temp_manifest, mfid
+
+ def _remove_visual_c_ref(self, manifest_file):
+ try:
+ # Remove references to the Visual C runtime, so they will
+ # fall through to the Visual C dependency of Python.exe.
+ # This way, when installed for a restricted user (e.g.
+ # runtimes are not in WinSxS folder, but in Python's own
+ # folder), the runtimes do not need to be in every folder
+ # with .pyd's.
+ # Returns either the filename of the modified manifest or
+ # None if no manifest should be embedded.
+ manifest_f = open(manifest_file)
+ try:
+ manifest_buf = manifest_f.read()
+ finally:
+ manifest_f.close()
+ pattern = re.compile(
+ r"""<assemblyIdentity.*?name=("|')Microsoft\."""\
+ r"""VC\d{2}\.CRT("|').*?(/>|</assemblyIdentity>)""",
+ re.DOTALL)
+ manifest_buf = re.sub(pattern, "", manifest_buf)
+ pattern = r"<dependentAssembly>\s*</dependentAssembly>"
+ manifest_buf = re.sub(pattern, "", manifest_buf)
+ # Now see if any other assemblies are referenced - if not, we
+ # don't want a manifest embedded.
+ pattern = re.compile(
+ r"""<assemblyIdentity.*?name=(?:"|')(.+?)(?:"|')"""
+ r""".*?(?:/>|</assemblyIdentity>)""", re.DOTALL)
+ if re.search(pattern, manifest_buf) is None:
+ return None
+
+ manifest_f = open(manifest_file, 'w')
+ try:
+ manifest_f.write(manifest_buf)
+ return manifest_file
+ finally:
+ manifest_f.close()
+ except OSError:
+ pass
+
+ # -- Miscellaneous methods -----------------------------------------
+ # These are all used by the 'gen_lib_options() function, in
+ # ccompiler.py.
+
+ def library_dir_option(self, dir):
+ return "/LIBPATH:" + dir
+
+ def runtime_library_dir_option(self, dir):
+ raise DistutilsPlatformError(
+ "don't know how to set runtime library search path for MSVC++")
+
+ def library_option(self, lib):
+ return self.library_filename(lib)
+
+
+ def find_library_file(self, dirs, lib, debug=0):
+ # Prefer a debugging library if found (and requested), but deal
+ # with it if we don't have one.
+ if debug:
+ try_names = [lib + "_d", lib]
+ else:
+ try_names = [lib]
+ for dir in dirs:
+ for name in try_names:
+ libfile = os.path.join(dir, self.library_filename (name))
+ if os.path.exists(libfile):
+ return libfile
+ else:
+ # Oops, didn't find it in *any* of 'dirs'
+ return None
+
+ # Helper methods for using the MSVC registry settings
+
+ def find_exe(self, exe):
+ """Return path to an MSVC executable program.
+
+ Tries to find the program in several places: first, one of the
+ MSVC program search paths from the registry; next, the directories
+ in the PATH environment variable. If any of those work, return an
+ absolute path that is known to exist. If none of them work, just
+ return the original program name, 'exe'.
+ """
+ for p in self.__paths:
+ fn = os.path.join(os.path.abspath(p), exe)
+ if os.path.isfile(fn):
+ return fn
+
+ # didn't find it; try existing path
+ for p in os.environ['Path'].split(';'):
+ fn = os.path.join(os.path.abspath(p),exe)
+ if os.path.isfile(fn):
+ return fn
+
+ return exe
diff --git a/third_party/python/setuptools/setuptools/_distutils/msvccompiler.py b/third_party/python/setuptools/setuptools/_distutils/msvccompiler.py
new file mode 100644
index 0000000000..d5857cb1ff
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/msvccompiler.py
@@ -0,0 +1,643 @@
+"""distutils.msvccompiler
+
+Contains MSVCCompiler, an implementation of the abstract CCompiler class
+for the Microsoft Visual Studio.
+"""
+
+# Written by Perry Stoll
+# hacked by Robin Becker and Thomas Heller to do a better job of
+# finding DevStudio (through the registry)
+
+import sys, os
+from distutils.errors import \
+ DistutilsExecError, DistutilsPlatformError, \
+ CompileError, LibError, LinkError
+from distutils.ccompiler import \
+ CCompiler, gen_lib_options
+from distutils import log
+
+_can_read_reg = False
+try:
+ import winreg
+
+ _can_read_reg = True
+ hkey_mod = winreg
+
+ RegOpenKeyEx = winreg.OpenKeyEx
+ RegEnumKey = winreg.EnumKey
+ RegEnumValue = winreg.EnumValue
+ RegError = winreg.error
+
+except ImportError:
+ try:
+ import win32api
+ import win32con
+ _can_read_reg = True
+ hkey_mod = win32con
+
+ RegOpenKeyEx = win32api.RegOpenKeyEx
+ RegEnumKey = win32api.RegEnumKey
+ RegEnumValue = win32api.RegEnumValue
+ RegError = win32api.error
+ except ImportError:
+ log.info("Warning: Can't read registry to find the "
+ "necessary compiler setting\n"
+ "Make sure that Python modules winreg, "
+ "win32api or win32con are installed.")
+ pass
+
+if _can_read_reg:
+ HKEYS = (hkey_mod.HKEY_USERS,
+ hkey_mod.HKEY_CURRENT_USER,
+ hkey_mod.HKEY_LOCAL_MACHINE,
+ hkey_mod.HKEY_CLASSES_ROOT)
+
+def read_keys(base, key):
+ """Return list of registry keys."""
+ try:
+ handle = RegOpenKeyEx(base, key)
+ except RegError:
+ return None
+ L = []
+ i = 0
+ while True:
+ try:
+ k = RegEnumKey(handle, i)
+ except RegError:
+ break
+ L.append(k)
+ i += 1
+ return L
+
+def read_values(base, key):
+ """Return dict of registry keys and values.
+
+ All names are converted to lowercase.
+ """
+ try:
+ handle = RegOpenKeyEx(base, key)
+ except RegError:
+ return None
+ d = {}
+ i = 0
+ while True:
+ try:
+ name, value, type = RegEnumValue(handle, i)
+ except RegError:
+ break
+ name = name.lower()
+ d[convert_mbcs(name)] = convert_mbcs(value)
+ i += 1
+ return d
+
+def convert_mbcs(s):
+ dec = getattr(s, "decode", None)
+ if dec is not None:
+ try:
+ s = dec("mbcs")
+ except UnicodeError:
+ pass
+ return s
+
+class MacroExpander:
+ def __init__(self, version):
+ self.macros = {}
+ self.load_macros(version)
+
+ def set_macro(self, macro, path, key):
+ for base in HKEYS:
+ d = read_values(base, path)
+ if d:
+ self.macros["$(%s)" % macro] = d[key]
+ break
+
+ def load_macros(self, version):
+ vsbase = r"Software\Microsoft\VisualStudio\%0.1f" % version
+ self.set_macro("VCInstallDir", vsbase + r"\Setup\VC", "productdir")
+ self.set_macro("VSInstallDir", vsbase + r"\Setup\VS", "productdir")
+ net = r"Software\Microsoft\.NETFramework"
+ self.set_macro("FrameworkDir", net, "installroot")
+ try:
+ if version > 7.0:
+ self.set_macro("FrameworkSDKDir", net, "sdkinstallrootv1.1")
+ else:
+ self.set_macro("FrameworkSDKDir", net, "sdkinstallroot")
+ except KeyError as exc: #
+ raise DistutilsPlatformError(
+ """Python was built with Visual Studio 2003;
+extensions must be built with a compiler than can generate compatible binaries.
+Visual Studio 2003 was not found on this system. If you have Cygwin installed,
+you can try compiling with MingW32, by passing "-c mingw32" to setup.py.""")
+
+ p = r"Software\Microsoft\NET Framework Setup\Product"
+ for base in HKEYS:
+ try:
+ h = RegOpenKeyEx(base, p)
+ except RegError:
+ continue
+ key = RegEnumKey(h, 0)
+ d = read_values(base, r"%s\%s" % (p, key))
+ self.macros["$(FrameworkVersion)"] = d["version"]
+
+ def sub(self, s):
+ for k, v in self.macros.items():
+ s = s.replace(k, v)
+ return s
+
+def get_build_version():
+ """Return the version of MSVC that was used to build Python.
+
+ For Python 2.3 and up, the version number is included in
+ sys.version. For earlier versions, assume the compiler is MSVC 6.
+ """
+ prefix = "MSC v."
+ i = sys.version.find(prefix)
+ if i == -1:
+ return 6
+ i = i + len(prefix)
+ s, rest = sys.version[i:].split(" ", 1)
+ majorVersion = int(s[:-2]) - 6
+ if majorVersion >= 13:
+ # v13 was skipped and should be v14
+ majorVersion += 1
+ minorVersion = int(s[2:3]) / 10.0
+ # I don't think paths are affected by minor version in version 6
+ if majorVersion == 6:
+ minorVersion = 0
+ if majorVersion >= 6:
+ return majorVersion + minorVersion
+ # else we don't know what version of the compiler this is
+ return None
+
+def get_build_architecture():
+ """Return the processor architecture.
+
+ Possible results are "Intel" or "AMD64".
+ """
+
+ prefix = " bit ("
+ i = sys.version.find(prefix)
+ if i == -1:
+ return "Intel"
+ j = sys.version.find(")", i)
+ return sys.version[i+len(prefix):j]
+
+def normalize_and_reduce_paths(paths):
+ """Return a list of normalized paths with duplicates removed.
+
+ The current order of paths is maintained.
+ """
+ # Paths are normalized so things like: /a and /a/ aren't both preserved.
+ reduced_paths = []
+ for p in paths:
+ np = os.path.normpath(p)
+ # XXX(nnorwitz): O(n**2), if reduced_paths gets long perhaps use a set.
+ if np not in reduced_paths:
+ reduced_paths.append(np)
+ return reduced_paths
+
+
+class MSVCCompiler(CCompiler) :
+ """Concrete class that implements an interface to Microsoft Visual C++,
+ as defined by the CCompiler abstract class."""
+
+ compiler_type = 'msvc'
+
+ # Just set this so CCompiler's constructor doesn't barf. We currently
+ # don't use the 'set_executables()' bureaucracy provided by CCompiler,
+ # as it really isn't necessary for this sort of single-compiler class.
+ # Would be nice to have a consistent interface with UnixCCompiler,
+ # though, so it's worth thinking about.
+ executables = {}
+
+ # Private class data (need to distinguish C from C++ source for compiler)
+ _c_extensions = ['.c']
+ _cpp_extensions = ['.cc', '.cpp', '.cxx']
+ _rc_extensions = ['.rc']
+ _mc_extensions = ['.mc']
+
+ # Needed for the filename generation methods provided by the
+ # base class, CCompiler.
+ src_extensions = (_c_extensions + _cpp_extensions +
+ _rc_extensions + _mc_extensions)
+ res_extension = '.res'
+ obj_extension = '.obj'
+ static_lib_extension = '.lib'
+ shared_lib_extension = '.dll'
+ static_lib_format = shared_lib_format = '%s%s'
+ exe_extension = '.exe'
+
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ CCompiler.__init__ (self, verbose, dry_run, force)
+ self.__version = get_build_version()
+ self.__arch = get_build_architecture()
+ if self.__arch == "Intel":
+ # x86
+ if self.__version >= 7:
+ self.__root = r"Software\Microsoft\VisualStudio"
+ self.__macros = MacroExpander(self.__version)
+ else:
+ self.__root = r"Software\Microsoft\Devstudio"
+ self.__product = "Visual Studio version %s" % self.__version
+ else:
+ # Win64. Assume this was built with the platform SDK
+ self.__product = "Microsoft SDK compiler %s" % (self.__version + 6)
+
+ self.initialized = False
+
+ def initialize(self):
+ self.__paths = []
+ if "DISTUTILS_USE_SDK" in os.environ and "MSSdk" in os.environ and self.find_exe("cl.exe"):
+ # Assume that the SDK set up everything alright; don't try to be
+ # smarter
+ self.cc = "cl.exe"
+ self.linker = "link.exe"
+ self.lib = "lib.exe"
+ self.rc = "rc.exe"
+ self.mc = "mc.exe"
+ else:
+ self.__paths = self.get_msvc_paths("path")
+
+ if len(self.__paths) == 0:
+ raise DistutilsPlatformError("Python was built with %s, "
+ "and extensions need to be built with the same "
+ "version of the compiler, but it isn't installed."
+ % self.__product)
+
+ self.cc = self.find_exe("cl.exe")
+ self.linker = self.find_exe("link.exe")
+ self.lib = self.find_exe("lib.exe")
+ self.rc = self.find_exe("rc.exe") # resource compiler
+ self.mc = self.find_exe("mc.exe") # message compiler
+ self.set_path_env_var('lib')
+ self.set_path_env_var('include')
+
+ # extend the MSVC path with the current path
+ try:
+ for p in os.environ['path'].split(';'):
+ self.__paths.append(p)
+ except KeyError:
+ pass
+ self.__paths = normalize_and_reduce_paths(self.__paths)
+ os.environ['path'] = ";".join(self.__paths)
+
+ self.preprocess_options = None
+ if self.__arch == "Intel":
+ self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GX' ,
+ '/DNDEBUG']
+ self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GX',
+ '/Z7', '/D_DEBUG']
+ else:
+ # Win64
+ self.compile_options = [ '/nologo', '/Ox', '/MD', '/W3', '/GS-' ,
+ '/DNDEBUG']
+ self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3', '/GS-',
+ '/Z7', '/D_DEBUG']
+
+ self.ldflags_shared = ['/DLL', '/nologo', '/INCREMENTAL:NO']
+ if self.__version >= 7:
+ self.ldflags_shared_debug = [
+ '/DLL', '/nologo', '/INCREMENTAL:no', '/DEBUG'
+ ]
+ else:
+ self.ldflags_shared_debug = [
+ '/DLL', '/nologo', '/INCREMENTAL:no', '/pdb:None', '/DEBUG'
+ ]
+ self.ldflags_static = [ '/nologo']
+
+ self.initialized = True
+
+ # -- Worker methods ------------------------------------------------
+
+ def object_filenames(self,
+ source_filenames,
+ strip_dir=0,
+ output_dir=''):
+ # Copied from ccompiler.py, extended to return .res as 'object'-file
+ # for .rc input file
+ if output_dir is None: output_dir = ''
+ obj_names = []
+ for src_name in source_filenames:
+ (base, ext) = os.path.splitext (src_name)
+ base = os.path.splitdrive(base)[1] # Chop off the drive
+ base = base[os.path.isabs(base):] # If abs, chop off leading /
+ if ext not in self.src_extensions:
+ # Better to raise an exception instead of silently continuing
+ # and later complain about sources and targets having
+ # different lengths
+ raise CompileError ("Don't know how to compile %s" % src_name)
+ if strip_dir:
+ base = os.path.basename (base)
+ if ext in self._rc_extensions:
+ obj_names.append (os.path.join (output_dir,
+ base + self.res_extension))
+ elif ext in self._mc_extensions:
+ obj_names.append (os.path.join (output_dir,
+ base + self.res_extension))
+ else:
+ obj_names.append (os.path.join (output_dir,
+ base + self.obj_extension))
+ return obj_names
+
+
+ def compile(self, sources,
+ output_dir=None, macros=None, include_dirs=None, debug=0,
+ extra_preargs=None, extra_postargs=None, depends=None):
+
+ if not self.initialized:
+ self.initialize()
+ compile_info = self._setup_compile(output_dir, macros, include_dirs,
+ sources, depends, extra_postargs)
+ macros, objects, extra_postargs, pp_opts, build = compile_info
+
+ compile_opts = extra_preargs or []
+ compile_opts.append ('/c')
+ if debug:
+ compile_opts.extend(self.compile_options_debug)
+ else:
+ compile_opts.extend(self.compile_options)
+
+ for obj in objects:
+ try:
+ src, ext = build[obj]
+ except KeyError:
+ continue
+ if debug:
+ # pass the full pathname to MSVC in debug mode,
+ # this allows the debugger to find the source file
+ # without asking the user to browse for it
+ src = os.path.abspath(src)
+
+ if ext in self._c_extensions:
+ input_opt = "/Tc" + src
+ elif ext in self._cpp_extensions:
+ input_opt = "/Tp" + src
+ elif ext in self._rc_extensions:
+ # compile .RC to .RES file
+ input_opt = src
+ output_opt = "/fo" + obj
+ try:
+ self.spawn([self.rc] + pp_opts +
+ [output_opt] + [input_opt])
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+ continue
+ elif ext in self._mc_extensions:
+ # Compile .MC to .RC file to .RES file.
+ # * '-h dir' specifies the directory for the
+ # generated include file
+ # * '-r dir' specifies the target directory of the
+ # generated RC file and the binary message resource
+ # it includes
+ #
+ # For now (since there are no options to change this),
+ # we use the source-directory for the include file and
+ # the build directory for the RC file and message
+ # resources. This works at least for win32all.
+ h_dir = os.path.dirname(src)
+ rc_dir = os.path.dirname(obj)
+ try:
+ # first compile .MC to .RC and .H file
+ self.spawn([self.mc] +
+ ['-h', h_dir, '-r', rc_dir] + [src])
+ base, _ = os.path.splitext (os.path.basename (src))
+ rc_file = os.path.join (rc_dir, base + '.rc')
+ # then compile .RC to .RES file
+ self.spawn([self.rc] +
+ ["/fo" + obj] + [rc_file])
+
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+ continue
+ else:
+ # how to handle this file?
+ raise CompileError("Don't know how to compile %s to %s"
+ % (src, obj))
+
+ output_opt = "/Fo" + obj
+ try:
+ self.spawn([self.cc] + compile_opts + pp_opts +
+ [input_opt, output_opt] +
+ extra_postargs)
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+
+ return objects
+
+
+ def create_static_lib(self,
+ objects,
+ output_libname,
+ output_dir=None,
+ debug=0,
+ target_lang=None):
+
+ if not self.initialized:
+ self.initialize()
+ (objects, output_dir) = self._fix_object_args(objects, output_dir)
+ output_filename = self.library_filename(output_libname,
+ output_dir=output_dir)
+
+ if self._need_link(objects, output_filename):
+ lib_args = objects + ['/OUT:' + output_filename]
+ if debug:
+ pass # XXX what goes here?
+ try:
+ self.spawn([self.lib] + lib_args)
+ except DistutilsExecError as msg:
+ raise LibError(msg)
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+
+ def link(self,
+ target_desc,
+ objects,
+ output_filename,
+ output_dir=None,
+ libraries=None,
+ library_dirs=None,
+ runtime_library_dirs=None,
+ export_symbols=None,
+ debug=0,
+ extra_preargs=None,
+ extra_postargs=None,
+ build_temp=None,
+ target_lang=None):
+
+ if not self.initialized:
+ self.initialize()
+ (objects, output_dir) = self._fix_object_args(objects, output_dir)
+ fixed_args = self._fix_lib_args(libraries, library_dirs,
+ runtime_library_dirs)
+ (libraries, library_dirs, runtime_library_dirs) = fixed_args
+
+ if runtime_library_dirs:
+ self.warn ("I don't know what to do with 'runtime_library_dirs': "
+ + str (runtime_library_dirs))
+
+ lib_opts = gen_lib_options(self,
+ library_dirs, runtime_library_dirs,
+ libraries)
+ if output_dir is not None:
+ output_filename = os.path.join(output_dir, output_filename)
+
+ if self._need_link(objects, output_filename):
+ if target_desc == CCompiler.EXECUTABLE:
+ if debug:
+ ldflags = self.ldflags_shared_debug[1:]
+ else:
+ ldflags = self.ldflags_shared[1:]
+ else:
+ if debug:
+ ldflags = self.ldflags_shared_debug
+ else:
+ ldflags = self.ldflags_shared
+
+ export_opts = []
+ for sym in (export_symbols or []):
+ export_opts.append("/EXPORT:" + sym)
+
+ ld_args = (ldflags + lib_opts + export_opts +
+ objects + ['/OUT:' + output_filename])
+
+ # The MSVC linker generates .lib and .exp files, which cannot be
+ # suppressed by any linker switches. The .lib files may even be
+ # needed! Make sure they are generated in the temporary build
+ # directory. Since they have different names for debug and release
+ # builds, they can go into the same directory.
+ if export_symbols is not None:
+ (dll_name, dll_ext) = os.path.splitext(
+ os.path.basename(output_filename))
+ implib_file = os.path.join(
+ os.path.dirname(objects[0]),
+ self.library_filename(dll_name))
+ ld_args.append ('/IMPLIB:' + implib_file)
+
+ if extra_preargs:
+ ld_args[:0] = extra_preargs
+ if extra_postargs:
+ ld_args.extend(extra_postargs)
+
+ self.mkpath(os.path.dirname(output_filename))
+ try:
+ self.spawn([self.linker] + ld_args)
+ except DistutilsExecError as msg:
+ raise LinkError(msg)
+
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+
+ # -- Miscellaneous methods -----------------------------------------
+ # These are all used by the 'gen_lib_options() function, in
+ # ccompiler.py.
+
+ def library_dir_option(self, dir):
+ return "/LIBPATH:" + dir
+
+ def runtime_library_dir_option(self, dir):
+ raise DistutilsPlatformError(
+ "don't know how to set runtime library search path for MSVC++")
+
+ def library_option(self, lib):
+ return self.library_filename(lib)
+
+
+ def find_library_file(self, dirs, lib, debug=0):
+ # Prefer a debugging library if found (and requested), but deal
+ # with it if we don't have one.
+ if debug:
+ try_names = [lib + "_d", lib]
+ else:
+ try_names = [lib]
+ for dir in dirs:
+ for name in try_names:
+ libfile = os.path.join(dir, self.library_filename (name))
+ if os.path.exists(libfile):
+ return libfile
+ else:
+ # Oops, didn't find it in *any* of 'dirs'
+ return None
+
+ # Helper methods for using the MSVC registry settings
+
+ def find_exe(self, exe):
+ """Return path to an MSVC executable program.
+
+ Tries to find the program in several places: first, one of the
+ MSVC program search paths from the registry; next, the directories
+ in the PATH environment variable. If any of those work, return an
+ absolute path that is known to exist. If none of them work, just
+ return the original program name, 'exe'.
+ """
+ for p in self.__paths:
+ fn = os.path.join(os.path.abspath(p), exe)
+ if os.path.isfile(fn):
+ return fn
+
+ # didn't find it; try existing path
+ for p in os.environ['Path'].split(';'):
+ fn = os.path.join(os.path.abspath(p),exe)
+ if os.path.isfile(fn):
+ return fn
+
+ return exe
+
+ def get_msvc_paths(self, path, platform='x86'):
+ """Get a list of devstudio directories (include, lib or path).
+
+ Return a list of strings. The list will be empty if unable to
+ access the registry or appropriate registry keys not found.
+ """
+ if not _can_read_reg:
+ return []
+
+ path = path + " dirs"
+ if self.__version >= 7:
+ key = (r"%s\%0.1f\VC\VC_OBJECTS_PLATFORM_INFO\Win32\Directories"
+ % (self.__root, self.__version))
+ else:
+ key = (r"%s\6.0\Build System\Components\Platforms"
+ r"\Win32 (%s)\Directories" % (self.__root, platform))
+
+ for base in HKEYS:
+ d = read_values(base, key)
+ if d:
+ if self.__version >= 7:
+ return self.__macros.sub(d[path]).split(";")
+ else:
+ return d[path].split(";")
+ # MSVC 6 seems to create the registry entries we need only when
+ # the GUI is run.
+ if self.__version == 6:
+ for base in HKEYS:
+ if read_values(base, r"%s\6.0" % self.__root) is not None:
+ self.warn("It seems you have Visual Studio 6 installed, "
+ "but the expected registry settings are not present.\n"
+ "You must at least run the Visual Studio GUI once "
+ "so that these entries are created.")
+ break
+ return []
+
+ def set_path_env_var(self, name):
+ """Set environment variable 'name' to an MSVC path type value.
+
+ This is equivalent to a SET command prior to execution of spawned
+ commands.
+ """
+
+ if name == "lib":
+ p = self.get_msvc_paths("library")
+ else:
+ p = self.get_msvc_paths(name)
+ if p:
+ os.environ[name] = ';'.join(p)
+
+
+if get_build_version() >= 8.0:
+ log.debug("Importing new compiler from distutils.msvc9compiler")
+ OldMSVCCompiler = MSVCCompiler
+ from distutils.msvc9compiler import MSVCCompiler
+ # get_build_architecture not really relevant now we support cross-compile
+ from distutils.msvc9compiler import MacroExpander
diff --git a/third_party/python/setuptools/setuptools/_distutils/py35compat.py b/third_party/python/setuptools/setuptools/_distutils/py35compat.py
new file mode 100644
index 0000000000..79b2e7f38c
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/py35compat.py
@@ -0,0 +1,19 @@
+import sys
+import subprocess
+
+
+def __optim_args_from_interpreter_flags():
+ """Return a list of command-line arguments reproducing the current
+ optimization settings in sys.flags."""
+ args = []
+ value = sys.flags.optimize
+ if value > 0:
+ args.append("-" + "O" * value)
+ return args
+
+
+_optim_args_from_interpreter_flags = getattr(
+ subprocess,
+ "_optim_args_from_interpreter_flags",
+ __optim_args_from_interpreter_flags,
+)
diff --git a/third_party/python/setuptools/setuptools/_distutils/py38compat.py b/third_party/python/setuptools/setuptools/_distutils/py38compat.py
new file mode 100644
index 0000000000..7dbe8cef54
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/py38compat.py
@@ -0,0 +1,7 @@
+def aix_platform(osname, version, release):
+ try:
+ import _aix_support
+ return _aix_support.aix_platform()
+ except ImportError:
+ pass
+ return "%s-%s.%s" % (osname, version, release)
diff --git a/third_party/python/setuptools/setuptools/_distutils/spawn.py b/third_party/python/setuptools/setuptools/_distutils/spawn.py
new file mode 100644
index 0000000000..fc592d4a91
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/spawn.py
@@ -0,0 +1,125 @@
+"""distutils.spawn
+
+Provides the 'spawn()' function, a front-end to various platform-
+specific functions for launching another program in a sub-process.
+Also provides the 'find_executable()' to search the path for a given
+executable name.
+"""
+
+import sys
+import os
+import subprocess
+
+from distutils.errors import DistutilsPlatformError, DistutilsExecError
+from distutils.debug import DEBUG
+from distutils import log
+
+
+if sys.platform == 'darwin':
+ _cfg_target = None
+ _cfg_target_split = None
+
+
+def spawn(cmd, search_path=1, verbose=0, dry_run=0, env=None):
+ """Run another program, specified as a command list 'cmd', in a new process.
+
+ 'cmd' is just the argument list for the new process, ie.
+ cmd[0] is the program to run and cmd[1:] are the rest of its arguments.
+ There is no way to run a program with a name different from that of its
+ executable.
+
+ If 'search_path' is true (the default), the system's executable
+ search path will be used to find the program; otherwise, cmd[0]
+ must be the exact path to the executable. If 'dry_run' is true,
+ the command will not actually be run.
+
+ Raise DistutilsExecError if running the program fails in any way; just
+ return on success.
+ """
+ # cmd is documented as a list, but just in case some code passes a tuple
+ # in, protect our %-formatting code against horrible death
+ cmd = list(cmd)
+
+ log.info(' '.join(cmd))
+ if dry_run:
+ return
+
+ if search_path:
+ executable = find_executable(cmd[0])
+ if executable is not None:
+ cmd[0] = executable
+
+ env = env if env is not None else dict(os.environ)
+
+ if sys.platform == 'darwin':
+ global _cfg_target, _cfg_target_split
+ if _cfg_target is None:
+ from distutils import sysconfig
+ _cfg_target = sysconfig.get_config_var(
+ 'MACOSX_DEPLOYMENT_TARGET') or ''
+ if _cfg_target:
+ _cfg_target_split = [int(x) for x in _cfg_target.split('.')]
+ if _cfg_target:
+ # ensure that the deployment target of build process is not less
+ # than that used when the interpreter was built. This ensures
+ # extension modules are built with correct compatibility values
+ cur_target = os.environ.get('MACOSX_DEPLOYMENT_TARGET', _cfg_target)
+ if _cfg_target_split > [int(x) for x in cur_target.split('.')]:
+ my_msg = ('$MACOSX_DEPLOYMENT_TARGET mismatch: '
+ 'now "%s" but "%s" during configure'
+ % (cur_target, _cfg_target))
+ raise DistutilsPlatformError(my_msg)
+ env.update(MACOSX_DEPLOYMENT_TARGET=cur_target)
+
+ try:
+ proc = subprocess.Popen(cmd, env=env)
+ proc.wait()
+ exitcode = proc.returncode
+ except OSError as exc:
+ if not DEBUG:
+ cmd = cmd[0]
+ raise DistutilsExecError(
+ "command %r failed: %s" % (cmd, exc.args[-1])) from exc
+
+ if exitcode:
+ if not DEBUG:
+ cmd = cmd[0]
+ raise DistutilsExecError(
+ "command %r failed with exit code %s" % (cmd, exitcode))
+
+
+def find_executable(executable, path=None):
+ """Tries to find 'executable' in the directories listed in 'path'.
+
+ A string listing directories separated by 'os.pathsep'; defaults to
+ os.environ['PATH']. Returns the complete filename or None if not found.
+ """
+ _, ext = os.path.splitext(executable)
+ if (sys.platform == 'win32') and (ext != '.exe'):
+ executable = executable + '.exe'
+
+ if os.path.isfile(executable):
+ return executable
+
+ if path is None:
+ path = os.environ.get('PATH', None)
+ if path is None:
+ try:
+ path = os.confstr("CS_PATH")
+ except (AttributeError, ValueError):
+ # os.confstr() or CS_PATH is not available
+ path = os.defpath
+ # bpo-35755: Don't use os.defpath if the PATH environment variable is
+ # set to an empty string
+
+ # PATH='' doesn't match, whereas PATH=':' looks in the current directory
+ if not path:
+ return None
+
+ paths = path.split(os.pathsep)
+ for p in paths:
+ f = os.path.join(p, executable)
+ if os.path.isfile(f):
+ # the file exists, we have a shot at spawn working
+ return f
+ return None
diff --git a/third_party/python/setuptools/setuptools/_distutils/sysconfig.py b/third_party/python/setuptools/setuptools/_distutils/sysconfig.py
new file mode 100644
index 0000000000..879b6981ed
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/sysconfig.py
@@ -0,0 +1,573 @@
+"""Provide access to Python's configuration information. The specific
+configuration variables available depend heavily on the platform and
+configuration. The values may be retrieved using
+get_config_var(name), and the list of variables is available via
+get_config_vars().keys(). Additional convenience functions are also
+available.
+
+Written by: Fred L. Drake, Jr.
+Email: <fdrake@acm.org>
+"""
+
+import _imp
+import os
+import re
+import sys
+
+from .errors import DistutilsPlatformError
+
+IS_PYPY = '__pypy__' in sys.builtin_module_names
+
+# These are needed in a couple of spots, so just compute them once.
+PREFIX = os.path.normpath(sys.prefix)
+EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
+BASE_PREFIX = os.path.normpath(sys.base_prefix)
+BASE_EXEC_PREFIX = os.path.normpath(sys.base_exec_prefix)
+
+# Path to the base directory of the project. On Windows the binary may
+# live in project/PCbuild/win32 or project/PCbuild/amd64.
+# set for cross builds
+if "_PYTHON_PROJECT_BASE" in os.environ:
+ project_base = os.path.abspath(os.environ["_PYTHON_PROJECT_BASE"])
+else:
+ if sys.executable:
+ project_base = os.path.dirname(os.path.abspath(sys.executable))
+ else:
+ # sys.executable can be empty if argv[0] has been changed and Python is
+ # unable to retrieve the real program name
+ project_base = os.getcwd()
+
+
+# python_build: (Boolean) if true, we're either building Python or
+# building an extension with an un-installed Python, so we use
+# different (hard-wired) directories.
+def _is_python_source_dir(d):
+ for fn in ("Setup", "Setup.local"):
+ if os.path.isfile(os.path.join(d, "Modules", fn)):
+ return True
+ return False
+
+_sys_home = getattr(sys, '_home', None)
+
+if os.name == 'nt':
+ def _fix_pcbuild(d):
+ if d and os.path.normcase(d).startswith(
+ os.path.normcase(os.path.join(PREFIX, "PCbuild"))):
+ return PREFIX
+ return d
+ project_base = _fix_pcbuild(project_base)
+ _sys_home = _fix_pcbuild(_sys_home)
+
+def _python_build():
+ if _sys_home:
+ return _is_python_source_dir(_sys_home)
+ return _is_python_source_dir(project_base)
+
+python_build = _python_build()
+
+
+# Calculate the build qualifier flags if they are defined. Adding the flags
+# to the include and lib directories only makes sense for an installation, not
+# an in-source build.
+build_flags = ''
+try:
+ if not python_build:
+ build_flags = sys.abiflags
+except AttributeError:
+ # It's not a configure-based build, so the sys module doesn't have
+ # this attribute, which is fine.
+ pass
+
+def get_python_version():
+ """Return a string containing the major and minor Python version,
+ leaving off the patchlevel. Sample return values could be '1.5'
+ or '2.2'.
+ """
+ return '%d.%d' % sys.version_info[:2]
+
+
+def get_python_inc(plat_specific=0, prefix=None):
+ """Return the directory containing installed Python header files.
+
+ If 'plat_specific' is false (the default), this is the path to the
+ non-platform-specific header files, i.e. Python.h and so on;
+ otherwise, this is the path to platform-specific header files
+ (namely pyconfig.h).
+
+ If 'prefix' is supplied, use it instead of sys.base_prefix or
+ sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
+ """
+ if prefix is None:
+ prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX
+ if IS_PYPY:
+ return os.path.join(prefix, 'include')
+ elif os.name == "posix":
+ if python_build:
+ # Assume the executable is in the build directory. The
+ # pyconfig.h file should be in the same directory. Since
+ # the build directory may not be the source directory, we
+ # must use "srcdir" from the makefile to find the "Include"
+ # directory.
+ if plat_specific:
+ return _sys_home or project_base
+ else:
+ incdir = os.path.join(get_config_var('srcdir'), 'Include')
+ return os.path.normpath(incdir)
+ python_dir = 'python' + get_python_version() + build_flags
+ return os.path.join(prefix, "include", python_dir)
+ elif os.name == "nt":
+ if python_build:
+ # Include both the include and PC dir to ensure we can find
+ # pyconfig.h
+ return (os.path.join(prefix, "include") + os.path.pathsep +
+ os.path.join(prefix, "PC"))
+ return os.path.join(prefix, "include")
+ else:
+ raise DistutilsPlatformError(
+ "I don't know where Python installs its C header files "
+ "on platform '%s'" % os.name)
+
+
+def get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
+ """Return the directory containing the Python library (standard or
+ site additions).
+
+ If 'plat_specific' is true, return the directory containing
+ platform-specific modules, i.e. any module from a non-pure-Python
+ module distribution; otherwise, return the platform-shared library
+ directory. If 'standard_lib' is true, return the directory
+ containing standard Python library modules; otherwise, return the
+ directory for site-specific modules.
+
+ If 'prefix' is supplied, use it instead of sys.base_prefix or
+ sys.base_exec_prefix -- i.e., ignore 'plat_specific'.
+ """
+ if IS_PYPY:
+ # PyPy-specific schema
+ if prefix is None:
+ prefix = PREFIX
+ if standard_lib:
+ return os.path.join(prefix, "lib-python", sys.version[0])
+ return os.path.join(prefix, 'site-packages')
+
+ if prefix is None:
+ if standard_lib:
+ prefix = plat_specific and BASE_EXEC_PREFIX or BASE_PREFIX
+ else:
+ prefix = plat_specific and EXEC_PREFIX or PREFIX
+
+ if os.name == "posix":
+ if plat_specific or standard_lib:
+ # Platform-specific modules (any module from a non-pure-Python
+ # module distribution) or standard Python library modules.
+ libdir = getattr(sys, "platlibdir", "lib")
+ else:
+ # Pure Python
+ libdir = "lib"
+ libpython = os.path.join(prefix, libdir,
+ "python" + get_python_version())
+ if standard_lib:
+ return libpython
+ else:
+ return os.path.join(libpython, "site-packages")
+ elif os.name == "nt":
+ if standard_lib:
+ return os.path.join(prefix, "Lib")
+ else:
+ return os.path.join(prefix, "Lib", "site-packages")
+ else:
+ raise DistutilsPlatformError(
+ "I don't know where Python installs its library "
+ "on platform '%s'" % os.name)
+
+
+
+def customize_compiler(compiler):
+ """Do any platform-specific customization of a CCompiler instance.
+
+ Mainly needed on Unix, so we can plug in the information that
+ varies across Unices and is stored in Python's Makefile.
+ """
+ if compiler.compiler_type == "unix":
+ if sys.platform == "darwin":
+ # Perform first-time customization of compiler-related
+ # config vars on OS X now that we know we need a compiler.
+ # This is primarily to support Pythons from binary
+ # installers. The kind and paths to build tools on
+ # the user system may vary significantly from the system
+ # that Python itself was built on. Also the user OS
+ # version and build tools may not support the same set
+ # of CPU architectures for universal builds.
+ global _config_vars
+ # Use get_config_var() to ensure _config_vars is initialized.
+ if not get_config_var('CUSTOMIZED_OSX_COMPILER'):
+ import _osx_support
+ _osx_support.customize_compiler(_config_vars)
+ _config_vars['CUSTOMIZED_OSX_COMPILER'] = 'True'
+
+ (cc, cxx, cflags, ccshared, ldshared, shlib_suffix, ar, ar_flags) = \
+ get_config_vars('CC', 'CXX', 'CFLAGS',
+ 'CCSHARED', 'LDSHARED', 'SHLIB_SUFFIX', 'AR', 'ARFLAGS')
+
+ if 'CC' in os.environ:
+ newcc = os.environ['CC']
+ if (sys.platform == 'darwin'
+ and 'LDSHARED' not in os.environ
+ and ldshared.startswith(cc)):
+ # On OS X, if CC is overridden, use that as the default
+ # command for LDSHARED as well
+ ldshared = newcc + ldshared[len(cc):]
+ cc = newcc
+ if 'CXX' in os.environ:
+ cxx = os.environ['CXX']
+ if 'LDSHARED' in os.environ:
+ ldshared = os.environ['LDSHARED']
+ if 'CPP' in os.environ:
+ cpp = os.environ['CPP']
+ else:
+ cpp = cc + " -E" # not always
+ if 'LDFLAGS' in os.environ:
+ ldshared = ldshared + ' ' + os.environ['LDFLAGS']
+ if 'CFLAGS' in os.environ:
+ cflags = cflags + ' ' + os.environ['CFLAGS']
+ ldshared = ldshared + ' ' + os.environ['CFLAGS']
+ if 'CPPFLAGS' in os.environ:
+ cpp = cpp + ' ' + os.environ['CPPFLAGS']
+ cflags = cflags + ' ' + os.environ['CPPFLAGS']
+ ldshared = ldshared + ' ' + os.environ['CPPFLAGS']
+ if 'AR' in os.environ:
+ ar = os.environ['AR']
+ if 'ARFLAGS' in os.environ:
+ archiver = ar + ' ' + os.environ['ARFLAGS']
+ else:
+ archiver = ar + ' ' + ar_flags
+
+ cc_cmd = cc + ' ' + cflags
+ compiler.set_executables(
+ preprocessor=cpp,
+ compiler=cc_cmd,
+ compiler_so=cc_cmd + ' ' + ccshared,
+ compiler_cxx=cxx,
+ linker_so=ldshared,
+ linker_exe=cc,
+ archiver=archiver)
+
+ compiler.shared_lib_extension = shlib_suffix
+
+
+def get_config_h_filename():
+ """Return full pathname of installed pyconfig.h file."""
+ if python_build:
+ if os.name == "nt":
+ inc_dir = os.path.join(_sys_home or project_base, "PC")
+ else:
+ inc_dir = _sys_home or project_base
+ else:
+ inc_dir = get_python_inc(plat_specific=1)
+
+ return os.path.join(inc_dir, 'pyconfig.h')
+
+
+def get_makefile_filename():
+ """Return full pathname of installed Makefile from the Python build."""
+ if python_build:
+ return os.path.join(_sys_home or project_base, "Makefile")
+ lib_dir = get_python_lib(plat_specific=0, standard_lib=1)
+ config_file = 'config-{}{}'.format(get_python_version(), build_flags)
+ if hasattr(sys.implementation, '_multiarch'):
+ config_file += '-%s' % sys.implementation._multiarch
+ return os.path.join(lib_dir, config_file, 'Makefile')
+
+
+def parse_config_h(fp, g=None):
+ """Parse a config.h-style file.
+
+ A dictionary containing name/value pairs is returned. If an
+ optional dictionary is passed in as the second argument, it is
+ used instead of a new dictionary.
+ """
+ if g is None:
+ g = {}
+ define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
+ undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
+ #
+ while True:
+ line = fp.readline()
+ if not line:
+ break
+ m = define_rx.match(line)
+ if m:
+ n, v = m.group(1, 2)
+ try: v = int(v)
+ except ValueError: pass
+ g[n] = v
+ else:
+ m = undef_rx.match(line)
+ if m:
+ g[m.group(1)] = 0
+ return g
+
+
+# Regexes needed for parsing Makefile (and similar syntaxes,
+# like old-style Setup files).
+_variable_rx = re.compile(r"([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
+_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
+_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
+
+def parse_makefile(fn, g=None):
+ """Parse a Makefile-style file.
+
+ A dictionary containing name/value pairs is returned. If an
+ optional dictionary is passed in as the second argument, it is
+ used instead of a new dictionary.
+ """
+ from distutils.text_file import TextFile
+ fp = TextFile(fn, strip_comments=1, skip_blanks=1, join_lines=1, errors="surrogateescape")
+
+ if g is None:
+ g = {}
+ done = {}
+ notdone = {}
+
+ while True:
+ line = fp.readline()
+ if line is None: # eof
+ break
+ m = _variable_rx.match(line)
+ if m:
+ n, v = m.group(1, 2)
+ v = v.strip()
+ # `$$' is a literal `$' in make
+ tmpv = v.replace('$$', '')
+
+ if "$" in tmpv:
+ notdone[n] = v
+ else:
+ try:
+ v = int(v)
+ except ValueError:
+ # insert literal `$'
+ done[n] = v.replace('$$', '$')
+ else:
+ done[n] = v
+
+ # Variables with a 'PY_' prefix in the makefile. These need to
+ # be made available without that prefix through sysconfig.
+ # Special care is needed to ensure that variable expansion works, even
+ # if the expansion uses the name without a prefix.
+ renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
+
+ # do variable interpolation here
+ while notdone:
+ for name in list(notdone):
+ value = notdone[name]
+ m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
+ if m:
+ n = m.group(1)
+ found = True
+ if n in done:
+ item = str(done[n])
+ elif n in notdone:
+ # get it on a subsequent round
+ found = False
+ elif n in os.environ:
+ # do it like make: fall back to environment
+ item = os.environ[n]
+
+ elif n in renamed_variables:
+ if name.startswith('PY_') and name[3:] in renamed_variables:
+ item = ""
+
+ elif 'PY_' + n in notdone:
+ found = False
+
+ else:
+ item = str(done['PY_' + n])
+ else:
+ done[n] = item = ""
+ if found:
+ after = value[m.end():]
+ value = value[:m.start()] + item + after
+ if "$" in after:
+ notdone[name] = value
+ else:
+ try: value = int(value)
+ except ValueError:
+ done[name] = value.strip()
+ else:
+ done[name] = value
+ del notdone[name]
+
+ if name.startswith('PY_') \
+ and name[3:] in renamed_variables:
+
+ name = name[3:]
+ if name not in done:
+ done[name] = value
+ else:
+ # bogus variable reference; just drop it since we can't deal
+ del notdone[name]
+
+ fp.close()
+
+ # strip spurious spaces
+ for k, v in done.items():
+ if isinstance(v, str):
+ done[k] = v.strip()
+
+ # save the results in the global dictionary
+ g.update(done)
+ return g
+
+
+def expand_makefile_vars(s, vars):
+ """Expand Makefile-style variables -- "${foo}" or "$(foo)" -- in
+ 'string' according to 'vars' (a dictionary mapping variable names to
+ values). Variables not present in 'vars' are silently expanded to the
+ empty string. The variable values in 'vars' should not contain further
+ variable expansions; if 'vars' is the output of 'parse_makefile()',
+ you're fine. Returns a variable-expanded version of 's'.
+ """
+
+ # This algorithm does multiple expansion, so if vars['foo'] contains
+ # "${bar}", it will expand ${foo} to ${bar}, and then expand
+ # ${bar}... and so forth. This is fine as long as 'vars' comes from
+ # 'parse_makefile()', which takes care of such expansions eagerly,
+ # according to make's variable expansion semantics.
+
+ while True:
+ m = _findvar1_rx.search(s) or _findvar2_rx.search(s)
+ if m:
+ (beg, end) = m.span()
+ s = s[0:beg] + vars.get(m.group(1)) + s[end:]
+ else:
+ break
+ return s
+
+
+_config_vars = None
+
+def _init_posix():
+ """Initialize the module as appropriate for POSIX systems."""
+ # _sysconfigdata is generated at build time, see the sysconfig module
+ name = os.environ.get('_PYTHON_SYSCONFIGDATA_NAME',
+ '_sysconfigdata_{abi}_{platform}_{multiarch}'.format(
+ abi=sys.abiflags,
+ platform=sys.platform,
+ multiarch=getattr(sys.implementation, '_multiarch', ''),
+ ))
+ try:
+ _temp = __import__(name, globals(), locals(), ['build_time_vars'], 0)
+ except ImportError:
+ # Python 3.5 and pypy 7.3.1
+ _temp = __import__(
+ '_sysconfigdata', globals(), locals(), ['build_time_vars'], 0)
+ build_time_vars = _temp.build_time_vars
+ global _config_vars
+ _config_vars = {}
+ _config_vars.update(build_time_vars)
+
+
+def _init_nt():
+ """Initialize the module as appropriate for NT"""
+ g = {}
+ # set basic install directories
+ g['LIBDEST'] = get_python_lib(plat_specific=0, standard_lib=1)
+ g['BINLIBDEST'] = get_python_lib(plat_specific=1, standard_lib=1)
+
+ # XXX hmmm.. a normal install puts include files here
+ g['INCLUDEPY'] = get_python_inc(plat_specific=0)
+
+ g['EXT_SUFFIX'] = _imp.extension_suffixes()[0]
+ g['EXE'] = ".exe"
+ g['VERSION'] = get_python_version().replace(".", "")
+ g['BINDIR'] = os.path.dirname(os.path.abspath(sys.executable))
+
+ global _config_vars
+ _config_vars = g
+
+
+def get_config_vars(*args):
+ """With no arguments, return a dictionary of all configuration
+ variables relevant for the current platform. Generally this includes
+ everything needed to build extensions and install both pure modules and
+ extensions. On Unix, this means every variable defined in Python's
+ installed Makefile; on Windows it's a much smaller set.
+
+ With arguments, return a list of values that result from looking up
+ each argument in the configuration variable dictionary.
+ """
+ global _config_vars
+ if _config_vars is None:
+ func = globals().get("_init_" + os.name)
+ if func:
+ func()
+ else:
+ _config_vars = {}
+
+ # Normalized versions of prefix and exec_prefix are handy to have;
+ # in fact, these are the standard versions used most places in the
+ # Distutils.
+ _config_vars['prefix'] = PREFIX
+ _config_vars['exec_prefix'] = EXEC_PREFIX
+
+ if not IS_PYPY:
+ # For backward compatibility, see issue19555
+ SO = _config_vars.get('EXT_SUFFIX')
+ if SO is not None:
+ _config_vars['SO'] = SO
+
+ # Always convert srcdir to an absolute path
+ srcdir = _config_vars.get('srcdir', project_base)
+ if os.name == 'posix':
+ if python_build:
+ # If srcdir is a relative path (typically '.' or '..')
+ # then it should be interpreted relative to the directory
+ # containing Makefile.
+ base = os.path.dirname(get_makefile_filename())
+ srcdir = os.path.join(base, srcdir)
+ else:
+ # srcdir is not meaningful since the installation is
+ # spread about the filesystem. We choose the
+ # directory containing the Makefile since we know it
+ # exists.
+ srcdir = os.path.dirname(get_makefile_filename())
+ _config_vars['srcdir'] = os.path.abspath(os.path.normpath(srcdir))
+
+ # Convert srcdir into an absolute path if it appears necessary.
+ # Normally it is relative to the build directory. However, during
+ # testing, for example, we might be running a non-installed python
+ # from a different directory.
+ if python_build and os.name == "posix":
+ base = project_base
+ if (not os.path.isabs(_config_vars['srcdir']) and
+ base != os.getcwd()):
+ # srcdir is relative and we are not in the same directory
+ # as the executable. Assume executable is in the build
+ # directory and make srcdir absolute.
+ srcdir = os.path.join(base, _config_vars['srcdir'])
+ _config_vars['srcdir'] = os.path.normpath(srcdir)
+
+ # OS X platforms require special customization to handle
+ # multi-architecture, multi-os-version installers
+ if sys.platform == 'darwin':
+ import _osx_support
+ _osx_support.customize_config_vars(_config_vars)
+
+ if args:
+ vals = []
+ for name in args:
+ vals.append(_config_vars.get(name))
+ return vals
+ else:
+ return _config_vars
+
+def get_config_var(name):
+ """Return the value of a single variable using the dictionary
+ returned by 'get_config_vars()'. Equivalent to
+ get_config_vars().get(name)
+ """
+ if name == 'SO':
+ import warnings
+ warnings.warn('SO is deprecated, use EXT_SUFFIX', DeprecationWarning, 2)
+ return get_config_vars().get(name)
diff --git a/third_party/python/setuptools/setuptools/_distutils/text_file.py b/third_party/python/setuptools/setuptools/_distutils/text_file.py
new file mode 100644
index 0000000000..93abad38f4
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/text_file.py
@@ -0,0 +1,286 @@
+"""text_file
+
+provides the TextFile class, which gives an interface to text files
+that (optionally) takes care of stripping comments, ignoring blank
+lines, and joining lines with backslashes."""
+
+import sys, io
+
+
+class TextFile:
+ """Provides a file-like object that takes care of all the things you
+ commonly want to do when processing a text file that has some
+ line-by-line syntax: strip comments (as long as "#" is your
+ comment character), skip blank lines, join adjacent lines by
+ escaping the newline (ie. backslash at end of line), strip
+ leading and/or trailing whitespace. All of these are optional
+ and independently controllable.
+
+ Provides a 'warn()' method so you can generate warning messages that
+ report physical line number, even if the logical line in question
+ spans multiple physical lines. Also provides 'unreadline()' for
+ implementing line-at-a-time lookahead.
+
+ Constructor is called as:
+
+ TextFile (filename=None, file=None, **options)
+
+ It bombs (RuntimeError) if both 'filename' and 'file' are None;
+ 'filename' should be a string, and 'file' a file object (or
+ something that provides 'readline()' and 'close()' methods). It is
+ recommended that you supply at least 'filename', so that TextFile
+ can include it in warning messages. If 'file' is not supplied,
+ TextFile creates its own using 'io.open()'.
+
+ The options are all boolean, and affect the value returned by
+ 'readline()':
+ strip_comments [default: true]
+ strip from "#" to end-of-line, as well as any whitespace
+ leading up to the "#" -- unless it is escaped by a backslash
+ lstrip_ws [default: false]
+ strip leading whitespace from each line before returning it
+ rstrip_ws [default: true]
+ strip trailing whitespace (including line terminator!) from
+ each line before returning it
+ skip_blanks [default: true}
+ skip lines that are empty *after* stripping comments and
+ whitespace. (If both lstrip_ws and rstrip_ws are false,
+ then some lines may consist of solely whitespace: these will
+ *not* be skipped, even if 'skip_blanks' is true.)
+ join_lines [default: false]
+ if a backslash is the last non-newline character on a line
+ after stripping comments and whitespace, join the following line
+ to it to form one "logical line"; if N consecutive lines end
+ with a backslash, then N+1 physical lines will be joined to
+ form one logical line.
+ collapse_join [default: false]
+ strip leading whitespace from lines that are joined to their
+ predecessor; only matters if (join_lines and not lstrip_ws)
+ errors [default: 'strict']
+ error handler used to decode the file content
+
+ Note that since 'rstrip_ws' can strip the trailing newline, the
+ semantics of 'readline()' must differ from those of the builtin file
+ object's 'readline()' method! In particular, 'readline()' returns
+ None for end-of-file: an empty string might just be a blank line (or
+ an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is
+ not."""
+
+ default_options = { 'strip_comments': 1,
+ 'skip_blanks': 1,
+ 'lstrip_ws': 0,
+ 'rstrip_ws': 1,
+ 'join_lines': 0,
+ 'collapse_join': 0,
+ 'errors': 'strict',
+ }
+
+ def __init__(self, filename=None, file=None, **options):
+ """Construct a new TextFile object. At least one of 'filename'
+ (a string) and 'file' (a file-like object) must be supplied.
+ They keyword argument options are described above and affect
+ the values returned by 'readline()'."""
+ if filename is None and file is None:
+ raise RuntimeError("you must supply either or both of 'filename' and 'file'")
+
+ # set values for all options -- either from client option hash
+ # or fallback to default_options
+ for opt in self.default_options.keys():
+ if opt in options:
+ setattr(self, opt, options[opt])
+ else:
+ setattr(self, opt, self.default_options[opt])
+
+ # sanity check client option hash
+ for opt in options.keys():
+ if opt not in self.default_options:
+ raise KeyError("invalid TextFile option '%s'" % opt)
+
+ if file is None:
+ self.open(filename)
+ else:
+ self.filename = filename
+ self.file = file
+ self.current_line = 0 # assuming that file is at BOF!
+
+ # 'linebuf' is a stack of lines that will be emptied before we
+ # actually read from the file; it's only populated by an
+ # 'unreadline()' operation
+ self.linebuf = []
+
+ def open(self, filename):
+ """Open a new file named 'filename'. This overrides both the
+ 'filename' and 'file' arguments to the constructor."""
+ self.filename = filename
+ self.file = io.open(self.filename, 'r', errors=self.errors)
+ self.current_line = 0
+
+ def close(self):
+ """Close the current file and forget everything we know about it
+ (filename, current line number)."""
+ file = self.file
+ self.file = None
+ self.filename = None
+ self.current_line = None
+ file.close()
+
+ def gen_error(self, msg, line=None):
+ outmsg = []
+ if line is None:
+ line = self.current_line
+ outmsg.append(self.filename + ", ")
+ if isinstance(line, (list, tuple)):
+ outmsg.append("lines %d-%d: " % tuple(line))
+ else:
+ outmsg.append("line %d: " % line)
+ outmsg.append(str(msg))
+ return "".join(outmsg)
+
+ def error(self, msg, line=None):
+ raise ValueError("error: " + self.gen_error(msg, line))
+
+ def warn(self, msg, line=None):
+ """Print (to stderr) a warning message tied to the current logical
+ line in the current file. If the current logical line in the
+ file spans multiple physical lines, the warning refers to the
+ whole range, eg. "lines 3-5". If 'line' supplied, it overrides
+ the current line number; it may be a list or tuple to indicate a
+ range of physical lines, or an integer for a single physical
+ line."""
+ sys.stderr.write("warning: " + self.gen_error(msg, line) + "\n")
+
+ def readline(self):
+ """Read and return a single logical line from the current file (or
+ from an internal buffer if lines have previously been "unread"
+ with 'unreadline()'). If the 'join_lines' option is true, this
+ may involve reading multiple physical lines concatenated into a
+ single string. Updates the current line number, so calling
+ 'warn()' after 'readline()' emits a warning about the physical
+ line(s) just read. Returns None on end-of-file, since the empty
+ string can occur if 'rstrip_ws' is true but 'strip_blanks' is
+ not."""
+ # If any "unread" lines waiting in 'linebuf', return the top
+ # one. (We don't actually buffer read-ahead data -- lines only
+ # get put in 'linebuf' if the client explicitly does an
+ # 'unreadline()'.
+ if self.linebuf:
+ line = self.linebuf[-1]
+ del self.linebuf[-1]
+ return line
+
+ buildup_line = ''
+
+ while True:
+ # read the line, make it None if EOF
+ line = self.file.readline()
+ if line == '':
+ line = None
+
+ if self.strip_comments and line:
+
+ # Look for the first "#" in the line. If none, never
+ # mind. If we find one and it's the first character, or
+ # is not preceded by "\", then it starts a comment --
+ # strip the comment, strip whitespace before it, and
+ # carry on. Otherwise, it's just an escaped "#", so
+ # unescape it (and any other escaped "#"'s that might be
+ # lurking in there) and otherwise leave the line alone.
+
+ pos = line.find("#")
+ if pos == -1: # no "#" -- no comments
+ pass
+
+ # It's definitely a comment -- either "#" is the first
+ # character, or it's elsewhere and unescaped.
+ elif pos == 0 or line[pos-1] != "\\":
+ # Have to preserve the trailing newline, because it's
+ # the job of a later step (rstrip_ws) to remove it --
+ # and if rstrip_ws is false, we'd better preserve it!
+ # (NB. this means that if the final line is all comment
+ # and has no trailing newline, we will think that it's
+ # EOF; I think that's OK.)
+ eol = (line[-1] == '\n') and '\n' or ''
+ line = line[0:pos] + eol
+
+ # If all that's left is whitespace, then skip line
+ # *now*, before we try to join it to 'buildup_line' --
+ # that way constructs like
+ # hello \\
+ # # comment that should be ignored
+ # there
+ # result in "hello there".
+ if line.strip() == "":
+ continue
+ else: # it's an escaped "#"
+ line = line.replace("\\#", "#")
+
+ # did previous line end with a backslash? then accumulate
+ if self.join_lines and buildup_line:
+ # oops: end of file
+ if line is None:
+ self.warn("continuation line immediately precedes "
+ "end-of-file")
+ return buildup_line
+
+ if self.collapse_join:
+ line = line.lstrip()
+ line = buildup_line + line
+
+ # careful: pay attention to line number when incrementing it
+ if isinstance(self.current_line, list):
+ self.current_line[1] = self.current_line[1] + 1
+ else:
+ self.current_line = [self.current_line,
+ self.current_line + 1]
+ # just an ordinary line, read it as usual
+ else:
+ if line is None: # eof
+ return None
+
+ # still have to be careful about incrementing the line number!
+ if isinstance(self.current_line, list):
+ self.current_line = self.current_line[1] + 1
+ else:
+ self.current_line = self.current_line + 1
+
+ # strip whitespace however the client wants (leading and
+ # trailing, or one or the other, or neither)
+ if self.lstrip_ws and self.rstrip_ws:
+ line = line.strip()
+ elif self.lstrip_ws:
+ line = line.lstrip()
+ elif self.rstrip_ws:
+ line = line.rstrip()
+
+ # blank line (whether we rstrip'ed or not)? skip to next line
+ # if appropriate
+ if (line == '' or line == '\n') and self.skip_blanks:
+ continue
+
+ if self.join_lines:
+ if line[-1] == '\\':
+ buildup_line = line[:-1]
+ continue
+
+ if line[-2:] == '\\\n':
+ buildup_line = line[0:-2] + '\n'
+ continue
+
+ # well, I guess there's some actual content there: return it
+ return line
+
+ def readlines(self):
+ """Read and return the list of all logical lines remaining in the
+ current file."""
+ lines = []
+ while True:
+ line = self.readline()
+ if line is None:
+ return lines
+ lines.append(line)
+
+ def unreadline(self, line):
+ """Push 'line' (a string) onto an internal buffer that will be
+ checked by future 'readline()' calls. Handy for implementing
+ a parser with line-at-a-time lookahead."""
+ self.linebuf.append(line)
diff --git a/third_party/python/setuptools/setuptools/_distutils/unixccompiler.py b/third_party/python/setuptools/setuptools/_distutils/unixccompiler.py
new file mode 100644
index 0000000000..4d7a6de740
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/unixccompiler.py
@@ -0,0 +1,328 @@
+"""distutils.unixccompiler
+
+Contains the UnixCCompiler class, a subclass of CCompiler that handles
+the "typical" Unix-style command-line C compiler:
+ * macros defined with -Dname[=value]
+ * macros undefined with -Uname
+ * include search directories specified with -Idir
+ * libraries specified with -lllib
+ * library search directories specified with -Ldir
+ * compile handled by 'cc' (or similar) executable with -c option:
+ compiles .c to .o
+ * link static library handled by 'ar' command (possibly with 'ranlib')
+ * link shared library handled by 'cc -shared'
+"""
+
+import os, sys, re
+
+from distutils import sysconfig
+from distutils.dep_util import newer
+from distutils.ccompiler import \
+ CCompiler, gen_preprocess_options, gen_lib_options
+from distutils.errors import \
+ DistutilsExecError, CompileError, LibError, LinkError
+from distutils import log
+
+if sys.platform == 'darwin':
+ import _osx_support
+
+# XXX Things not currently handled:
+# * optimization/debug/warning flags; we just use whatever's in Python's
+# Makefile and live with it. Is this adequate? If not, we might
+# have to have a bunch of subclasses GNUCCompiler, SGICCompiler,
+# SunCCompiler, and I suspect down that road lies madness.
+# * even if we don't know a warning flag from an optimization flag,
+# we need some way for outsiders to feed preprocessor/compiler/linker
+# flags in to us -- eg. a sysadmin might want to mandate certain flags
+# via a site config file, or a user might want to set something for
+# compiling this module distribution only via the setup.py command
+# line, whatever. As long as these options come from something on the
+# current system, they can be as system-dependent as they like, and we
+# should just happily stuff them into the preprocessor/compiler/linker
+# options and carry on.
+
+
+class UnixCCompiler(CCompiler):
+
+ compiler_type = 'unix'
+
+ # These are used by CCompiler in two places: the constructor sets
+ # instance attributes 'preprocessor', 'compiler', etc. from them, and
+ # 'set_executable()' allows any of these to be set. The defaults here
+ # are pretty generic; they will probably have to be set by an outsider
+ # (eg. using information discovered by the sysconfig about building
+ # Python extensions).
+ executables = {'preprocessor' : None,
+ 'compiler' : ["cc"],
+ 'compiler_so' : ["cc"],
+ 'compiler_cxx' : ["cc"],
+ 'linker_so' : ["cc", "-shared"],
+ 'linker_exe' : ["cc"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : None,
+ }
+
+ if sys.platform[:6] == "darwin":
+ executables['ranlib'] = ["ranlib"]
+
+ # Needed for the filename generation methods provided by the base
+ # class, CCompiler. NB. whoever instantiates/uses a particular
+ # UnixCCompiler instance should set 'shared_lib_ext' -- we set a
+ # reasonable common default here, but it's not necessarily used on all
+ # Unices!
+
+ src_extensions = [".c",".C",".cc",".cxx",".cpp",".m"]
+ obj_extension = ".o"
+ static_lib_extension = ".a"
+ shared_lib_extension = ".so"
+ dylib_lib_extension = ".dylib"
+ xcode_stub_lib_extension = ".tbd"
+ static_lib_format = shared_lib_format = dylib_lib_format = "lib%s%s"
+ xcode_stub_lib_format = dylib_lib_format
+ if sys.platform == "cygwin":
+ exe_extension = ".exe"
+
+ def preprocess(self, source, output_file=None, macros=None,
+ include_dirs=None, extra_preargs=None, extra_postargs=None):
+ fixed_args = self._fix_compile_args(None, macros, include_dirs)
+ ignore, macros, include_dirs = fixed_args
+ pp_opts = gen_preprocess_options(macros, include_dirs)
+ pp_args = self.preprocessor + pp_opts
+ if output_file:
+ pp_args.extend(['-o', output_file])
+ if extra_preargs:
+ pp_args[:0] = extra_preargs
+ if extra_postargs:
+ pp_args.extend(extra_postargs)
+ pp_args.append(source)
+
+ # We need to preprocess: either we're being forced to, or we're
+ # generating output to stdout, or there's a target output file and
+ # the source file is newer than the target (or the target doesn't
+ # exist).
+ if self.force or output_file is None or newer(source, output_file):
+ if output_file:
+ self.mkpath(os.path.dirname(output_file))
+ try:
+ self.spawn(pp_args)
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+
+ def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
+ compiler_so = self.compiler_so
+ if sys.platform == 'darwin':
+ compiler_so = _osx_support.compiler_fixup(compiler_so,
+ cc_args + extra_postargs)
+ try:
+ self.spawn(compiler_so + cc_args + [src, '-o', obj] +
+ extra_postargs)
+ except DistutilsExecError as msg:
+ raise CompileError(msg)
+
+ def create_static_lib(self, objects, output_libname,
+ output_dir=None, debug=0, target_lang=None):
+ objects, output_dir = self._fix_object_args(objects, output_dir)
+
+ output_filename = \
+ self.library_filename(output_libname, output_dir=output_dir)
+
+ if self._need_link(objects, output_filename):
+ self.mkpath(os.path.dirname(output_filename))
+ self.spawn(self.archiver +
+ [output_filename] +
+ objects + self.objects)
+
+ # Not many Unices required ranlib anymore -- SunOS 4.x is, I
+ # think the only major Unix that does. Maybe we need some
+ # platform intelligence here to skip ranlib if it's not
+ # needed -- or maybe Python's configure script took care of
+ # it for us, hence the check for leading colon.
+ if self.ranlib:
+ try:
+ self.spawn(self.ranlib + [output_filename])
+ except DistutilsExecError as msg:
+ raise LibError(msg)
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+ def link(self, target_desc, objects,
+ output_filename, output_dir=None, libraries=None,
+ library_dirs=None, runtime_library_dirs=None,
+ export_symbols=None, debug=0, extra_preargs=None,
+ extra_postargs=None, build_temp=None, target_lang=None):
+ objects, output_dir = self._fix_object_args(objects, output_dir)
+ fixed_args = self._fix_lib_args(libraries, library_dirs,
+ runtime_library_dirs)
+ libraries, library_dirs, runtime_library_dirs = fixed_args
+
+ lib_opts = gen_lib_options(self, library_dirs, runtime_library_dirs,
+ libraries)
+ if not isinstance(output_dir, (str, type(None))):
+ raise TypeError("'output_dir' must be a string or None")
+ if output_dir is not None:
+ output_filename = os.path.join(output_dir, output_filename)
+
+ if self._need_link(objects, output_filename):
+ ld_args = (objects + self.objects +
+ lib_opts + ['-o', output_filename])
+ if debug:
+ ld_args[:0] = ['-g']
+ if extra_preargs:
+ ld_args[:0] = extra_preargs
+ if extra_postargs:
+ ld_args.extend(extra_postargs)
+ self.mkpath(os.path.dirname(output_filename))
+ try:
+ if target_desc == CCompiler.EXECUTABLE:
+ linker = self.linker_exe[:]
+ else:
+ linker = self.linker_so[:]
+ if target_lang == "c++" and self.compiler_cxx:
+ # skip over environment variable settings if /usr/bin/env
+ # is used to set up the linker's environment.
+ # This is needed on OSX. Note: this assumes that the
+ # normal and C++ compiler have the same environment
+ # settings.
+ i = 0
+ if os.path.basename(linker[0]) == "env":
+ i = 1
+ while '=' in linker[i]:
+ i += 1
+
+ if os.path.basename(linker[i]) == 'ld_so_aix':
+ # AIX platforms prefix the compiler with the ld_so_aix
+ # script, so we need to adjust our linker index
+ offset = 1
+ else:
+ offset = 0
+
+ linker[i+offset] = self.compiler_cxx[i]
+
+ if sys.platform == 'darwin':
+ linker = _osx_support.compiler_fixup(linker, ld_args)
+
+ self.spawn(linker + ld_args)
+ except DistutilsExecError as msg:
+ raise LinkError(msg)
+ else:
+ log.debug("skipping %s (up-to-date)", output_filename)
+
+ # -- Miscellaneous methods -----------------------------------------
+ # These are all used by the 'gen_lib_options() function, in
+ # ccompiler.py.
+
+ def library_dir_option(self, dir):
+ return "-L" + dir
+
+ def _is_gcc(self, compiler_name):
+ return "gcc" in compiler_name or "g++" in compiler_name
+
+ def runtime_library_dir_option(self, dir):
+ # XXX Hackish, at the very least. See Python bug #445902:
+ # http://sourceforge.net/tracker/index.php
+ # ?func=detail&aid=445902&group_id=5470&atid=105470
+ # Linkers on different platforms need different options to
+ # specify that directories need to be added to the list of
+ # directories searched for dependencies when a dynamic library
+ # is sought. GCC on GNU systems (Linux, FreeBSD, ...) has to
+ # be told to pass the -R option through to the linker, whereas
+ # other compilers and gcc on other systems just know this.
+ # Other compilers may need something slightly different. At
+ # this time, there's no way to determine this information from
+ # the configuration data stored in the Python installation, so
+ # we use this hack.
+ compiler = os.path.basename(sysconfig.get_config_var("CC"))
+ if sys.platform[:6] == "darwin":
+ # MacOSX's linker doesn't understand the -R flag at all
+ return "-L" + dir
+ elif sys.platform[:7] == "freebsd":
+ return "-Wl,-rpath=" + dir
+ elif sys.platform[:5] == "hp-ux":
+ if self._is_gcc(compiler):
+ return ["-Wl,+s", "-L" + dir]
+ return ["+s", "-L" + dir]
+ else:
+ if self._is_gcc(compiler):
+ # gcc on non-GNU systems does not need -Wl, but can
+ # use it anyway. Since distutils has always passed in
+ # -Wl whenever gcc was used in the past it is probably
+ # safest to keep doing so.
+ if sysconfig.get_config_var("GNULD") == "yes":
+ # GNU ld needs an extra option to get a RUNPATH
+ # instead of just an RPATH.
+ return "-Wl,--enable-new-dtags,-R" + dir
+ else:
+ return "-Wl,-R" + dir
+ else:
+ # No idea how --enable-new-dtags would be passed on to
+ # ld if this system was using GNU ld. Don't know if a
+ # system like this even exists.
+ return "-R" + dir
+
+ def library_option(self, lib):
+ return "-l" + lib
+
+ def find_library_file(self, dirs, lib, debug=0):
+ shared_f = self.library_filename(lib, lib_type='shared')
+ dylib_f = self.library_filename(lib, lib_type='dylib')
+ xcode_stub_f = self.library_filename(lib, lib_type='xcode_stub')
+ static_f = self.library_filename(lib, lib_type='static')
+
+ if sys.platform == 'darwin':
+ # On OSX users can specify an alternate SDK using
+ # '-isysroot', calculate the SDK root if it is specified
+ # (and use it further on)
+ #
+ # Note that, as of Xcode 7, Apple SDKs may contain textual stub
+ # libraries with .tbd extensions rather than the normal .dylib
+ # shared libraries installed in /. The Apple compiler tool
+ # chain handles this transparently but it can cause problems
+ # for programs that are being built with an SDK and searching
+ # for specific libraries. Callers of find_library_file need to
+ # keep in mind that the base filename of the returned SDK library
+ # file might have a different extension from that of the library
+ # file installed on the running system, for example:
+ # /Applications/Xcode.app/Contents/Developer/Platforms/
+ # MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/
+ # usr/lib/libedit.tbd
+ # vs
+ # /usr/lib/libedit.dylib
+ cflags = sysconfig.get_config_var('CFLAGS')
+ m = re.search(r'-isysroot\s*(\S+)', cflags)
+ if m is None:
+ sysroot = '/'
+ else:
+ sysroot = m.group(1)
+
+
+
+ for dir in dirs:
+ shared = os.path.join(dir, shared_f)
+ dylib = os.path.join(dir, dylib_f)
+ static = os.path.join(dir, static_f)
+ xcode_stub = os.path.join(dir, xcode_stub_f)
+
+ if sys.platform == 'darwin' and (
+ dir.startswith('/System/') or (
+ dir.startswith('/usr/') and not dir.startswith('/usr/local/'))):
+
+ shared = os.path.join(sysroot, dir[1:], shared_f)
+ dylib = os.path.join(sysroot, dir[1:], dylib_f)
+ static = os.path.join(sysroot, dir[1:], static_f)
+ xcode_stub = os.path.join(sysroot, dir[1:], xcode_stub_f)
+
+ # We're second-guessing the linker here, with not much hard
+ # data to go on: GCC seems to prefer the shared library, so I'm
+ # assuming that *all* Unix C compilers do. And of course I'm
+ # ignoring even GCC's "-static" option. So sue me.
+ if os.path.exists(dylib):
+ return dylib
+ elif os.path.exists(xcode_stub):
+ return xcode_stub
+ elif os.path.exists(shared):
+ return shared
+ elif os.path.exists(static):
+ return static
+
+ # Oops, didn't find it in *any* of 'dirs'
+ return None
diff --git a/third_party/python/setuptools/setuptools/_distutils/util.py b/third_party/python/setuptools/setuptools/_distutils/util.py
new file mode 100644
index 0000000000..f5aca79421
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/util.py
@@ -0,0 +1,561 @@
+"""distutils.util
+
+Miscellaneous utility functions -- anything that doesn't fit into
+one of the other *util.py modules.
+"""
+
+import os
+import re
+import importlib.util
+import string
+import sys
+from distutils.errors import DistutilsPlatformError
+from distutils.dep_util import newer
+from distutils.spawn import spawn
+from distutils import log
+from distutils.errors import DistutilsByteCompileError
+from .py35compat import _optim_args_from_interpreter_flags
+
+
+def get_host_platform():
+ """Return a string that identifies the current platform. This is used mainly to
+ distinguish platform-specific build directories and platform-specific built
+ distributions. Typically includes the OS name and version and the
+ architecture (as supplied by 'os.uname()'), although the exact information
+ included depends on the OS; eg. on Linux, the kernel version isn't
+ particularly important.
+
+ Examples of returned values:
+ linux-i586
+ linux-alpha (?)
+ solaris-2.6-sun4u
+
+ Windows will return one of:
+ win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
+ win32 (all others - specifically, sys.platform is returned)
+
+ For other non-POSIX platforms, currently just returns 'sys.platform'.
+
+ """
+ if os.name == 'nt':
+ if 'amd64' in sys.version.lower():
+ return 'win-amd64'
+ if '(arm)' in sys.version.lower():
+ return 'win-arm32'
+ if '(arm64)' in sys.version.lower():
+ return 'win-arm64'
+ return sys.platform
+
+ # Set for cross builds explicitly
+ if "_PYTHON_HOST_PLATFORM" in os.environ:
+ return os.environ["_PYTHON_HOST_PLATFORM"]
+
+ if os.name != "posix" or not hasattr(os, 'uname'):
+ # XXX what about the architecture? NT is Intel or Alpha,
+ # Mac OS is M68k or PPC, etc.
+ return sys.platform
+
+ # Try to distinguish various flavours of Unix
+
+ (osname, host, release, version, machine) = os.uname()
+
+ # Convert the OS name to lowercase, remove '/' characters, and translate
+ # spaces (for "Power Macintosh")
+ osname = osname.lower().replace('/', '')
+ machine = machine.replace(' ', '_')
+ machine = machine.replace('/', '-')
+
+ if osname[:5] == "linux":
+ # At least on Linux/Intel, 'machine' is the processor --
+ # i386, etc.
+ # XXX what about Alpha, SPARC, etc?
+ return "%s-%s" % (osname, machine)
+ elif osname[:5] == "sunos":
+ if release[0] >= "5": # SunOS 5 == Solaris 2
+ osname = "solaris"
+ release = "%d.%s" % (int(release[0]) - 3, release[2:])
+ # We can't use "platform.architecture()[0]" because a
+ # bootstrap problem. We use a dict to get an error
+ # if some suspicious happens.
+ bitness = {2147483647:"32bit", 9223372036854775807:"64bit"}
+ machine += ".%s" % bitness[sys.maxsize]
+ # fall through to standard osname-release-machine representation
+ elif osname[:3] == "aix":
+ from .py38compat import aix_platform
+ return aix_platform(osname, version, release)
+ elif osname[:6] == "cygwin":
+ osname = "cygwin"
+ rel_re = re.compile (r'[\d.]+', re.ASCII)
+ m = rel_re.match(release)
+ if m:
+ release = m.group()
+ elif osname[:6] == "darwin":
+ import _osx_support, distutils.sysconfig
+ osname, release, machine = _osx_support.get_platform_osx(
+ distutils.sysconfig.get_config_vars(),
+ osname, release, machine)
+
+ return "%s-%s-%s" % (osname, release, machine)
+
+def get_platform():
+ if os.name == 'nt':
+ TARGET_TO_PLAT = {
+ 'x86' : 'win32',
+ 'x64' : 'win-amd64',
+ 'arm' : 'win-arm32',
+ }
+ return TARGET_TO_PLAT.get(os.environ.get('VSCMD_ARG_TGT_ARCH')) or get_host_platform()
+ else:
+ return get_host_platform()
+
+def convert_path (pathname):
+ """Return 'pathname' as a name that will work on the native filesystem,
+ i.e. split it on '/' and put it back together again using the current
+ directory separator. Needed because filenames in the setup script are
+ always supplied in Unix style, and have to be converted to the local
+ convention before we can actually use them in the filesystem. Raises
+ ValueError on non-Unix-ish systems if 'pathname' either starts or
+ ends with a slash.
+ """
+ if os.sep == '/':
+ return pathname
+ if not pathname:
+ return pathname
+ if pathname[0] == '/':
+ raise ValueError("path '%s' cannot be absolute" % pathname)
+ if pathname[-1] == '/':
+ raise ValueError("path '%s' cannot end with '/'" % pathname)
+
+ paths = pathname.split('/')
+ while '.' in paths:
+ paths.remove('.')
+ if not paths:
+ return os.curdir
+ return os.path.join(*paths)
+
+# convert_path ()
+
+
+def change_root (new_root, pathname):
+ """Return 'pathname' with 'new_root' prepended. If 'pathname' is
+ relative, this is equivalent to "os.path.join(new_root,pathname)".
+ Otherwise, it requires making 'pathname' relative and then joining the
+ two, which is tricky on DOS/Windows and Mac OS.
+ """
+ if os.name == 'posix':
+ if not os.path.isabs(pathname):
+ return os.path.join(new_root, pathname)
+ else:
+ return os.path.join(new_root, pathname[1:])
+
+ elif os.name == 'nt':
+ (drive, path) = os.path.splitdrive(pathname)
+ if path[0] == '\\':
+ path = path[1:]
+ return os.path.join(new_root, path)
+
+ else:
+ raise DistutilsPlatformError("nothing known about platform '%s'" % os.name)
+
+
+_environ_checked = 0
+def check_environ ():
+ """Ensure that 'os.environ' has all the environment variables we
+ guarantee that users can use in config files, command-line options,
+ etc. Currently this includes:
+ HOME - user's home directory (Unix only)
+ PLAT - description of the current platform, including hardware
+ and OS (see 'get_platform()')
+ """
+ global _environ_checked
+ if _environ_checked:
+ return
+
+ if os.name == 'posix' and 'HOME' not in os.environ:
+ try:
+ import pwd
+ os.environ['HOME'] = pwd.getpwuid(os.getuid())[5]
+ except (ImportError, KeyError):
+ # bpo-10496: if the current user identifier doesn't exist in the
+ # password database, do nothing
+ pass
+
+ if 'PLAT' not in os.environ:
+ os.environ['PLAT'] = get_platform()
+
+ _environ_checked = 1
+
+
+def subst_vars (s, local_vars):
+ """Perform shell/Perl-style variable substitution on 'string'. Every
+ occurrence of '$' followed by a name is considered a variable, and
+ variable is substituted by the value found in the 'local_vars'
+ dictionary, or in 'os.environ' if it's not in 'local_vars'.
+ 'os.environ' is first checked/augmented to guarantee that it contains
+ certain values: see 'check_environ()'. Raise ValueError for any
+ variables not found in either 'local_vars' or 'os.environ'.
+ """
+ check_environ()
+ def _subst (match, local_vars=local_vars):
+ var_name = match.group(1)
+ if var_name in local_vars:
+ return str(local_vars[var_name])
+ else:
+ return os.environ[var_name]
+
+ try:
+ return re.sub(r'\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)
+ except KeyError as var:
+ raise ValueError("invalid variable '$%s'" % var)
+
+# subst_vars ()
+
+
+def grok_environment_error (exc, prefix="error: "):
+ # Function kept for backward compatibility.
+ # Used to try clever things with EnvironmentErrors,
+ # but nowadays str(exception) produces good messages.
+ return prefix + str(exc)
+
+
+# Needed by 'split_quoted()'
+_wordchars_re = _squote_re = _dquote_re = None
+def _init_regex():
+ global _wordchars_re, _squote_re, _dquote_re
+ _wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
+ _squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
+ _dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
+
+def split_quoted (s):
+ """Split a string up according to Unix shell-like rules for quotes and
+ backslashes. In short: words are delimited by spaces, as long as those
+ spaces are not escaped by a backslash, or inside a quoted string.
+ Single and double quotes are equivalent, and the quote characters can
+ be backslash-escaped. The backslash is stripped from any two-character
+ escape sequence, leaving only the escaped character. The quote
+ characters are stripped from any quoted string. Returns a list of
+ words.
+ """
+
+ # This is a nice algorithm for splitting up a single string, since it
+ # doesn't require character-by-character examination. It was a little
+ # bit of a brain-bender to get it working right, though...
+ if _wordchars_re is None: _init_regex()
+
+ s = s.strip()
+ words = []
+ pos = 0
+
+ while s:
+ m = _wordchars_re.match(s, pos)
+ end = m.end()
+ if end == len(s):
+ words.append(s[:end])
+ break
+
+ if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
+ words.append(s[:end]) # we definitely have a word delimiter
+ s = s[end:].lstrip()
+ pos = 0
+
+ elif s[end] == '\\': # preserve whatever is being escaped;
+ # will become part of the current word
+ s = s[:end] + s[end+1:]
+ pos = end+1
+
+ else:
+ if s[end] == "'": # slurp singly-quoted string
+ m = _squote_re.match(s, end)
+ elif s[end] == '"': # slurp doubly-quoted string
+ m = _dquote_re.match(s, end)
+ else:
+ raise RuntimeError("this can't happen (bad char '%c')" % s[end])
+
+ if m is None:
+ raise ValueError("bad string (mismatched %s quotes?)" % s[end])
+
+ (beg, end) = m.span()
+ s = s[:beg] + s[beg+1:end-1] + s[end:]
+ pos = m.end() - 2
+
+ if pos >= len(s):
+ words.append(s)
+ break
+
+ return words
+
+# split_quoted ()
+
+
+def execute (func, args, msg=None, verbose=0, dry_run=0):
+ """Perform some action that affects the outside world (eg. by
+ writing to the filesystem). Such actions are special because they
+ are disabled by the 'dry_run' flag. This method takes care of all
+ that bureaucracy for you; all you have to do is supply the
+ function to call and an argument tuple for it (to embody the
+ "external action" being performed), and an optional message to
+ print.
+ """
+ if msg is None:
+ msg = "%s%r" % (func.__name__, args)
+ if msg[-2:] == ',)': # correct for singleton tuple
+ msg = msg[0:-2] + ')'
+
+ log.info(msg)
+ if not dry_run:
+ func(*args)
+
+
+def strtobool (val):
+ """Convert a string representation of truth to true (1) or false (0).
+
+ True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
+ are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
+ 'val' is anything else.
+ """
+ val = val.lower()
+ if val in ('y', 'yes', 't', 'true', 'on', '1'):
+ return 1
+ elif val in ('n', 'no', 'f', 'false', 'off', '0'):
+ return 0
+ else:
+ raise ValueError("invalid truth value %r" % (val,))
+
+
+def byte_compile (py_files,
+ optimize=0, force=0,
+ prefix=None, base_dir=None,
+ verbose=1, dry_run=0,
+ direct=None):
+ """Byte-compile a collection of Python source files to .pyc
+ files in a __pycache__ subdirectory. 'py_files' is a list
+ of files to compile; any files that don't end in ".py" are silently
+ skipped. 'optimize' must be one of the following:
+ 0 - don't optimize
+ 1 - normal optimization (like "python -O")
+ 2 - extra optimization (like "python -OO")
+ If 'force' is true, all files are recompiled regardless of
+ timestamps.
+
+ The source filename encoded in each bytecode file defaults to the
+ filenames listed in 'py_files'; you can modify these with 'prefix' and
+ 'basedir'. 'prefix' is a string that will be stripped off of each
+ source filename, and 'base_dir' is a directory name that will be
+ prepended (after 'prefix' is stripped). You can supply either or both
+ (or neither) of 'prefix' and 'base_dir', as you wish.
+
+ If 'dry_run' is true, doesn't actually do anything that would
+ affect the filesystem.
+
+ Byte-compilation is either done directly in this interpreter process
+ with the standard py_compile module, or indirectly by writing a
+ temporary script and executing it. Normally, you should let
+ 'byte_compile()' figure out to use direct compilation or not (see
+ the source for details). The 'direct' flag is used by the script
+ generated in indirect mode; unless you know what you're doing, leave
+ it set to None.
+ """
+
+ # Late import to fix a bootstrap issue: _posixsubprocess is built by
+ # setup.py, but setup.py uses distutils.
+ import subprocess
+
+ # nothing is done if sys.dont_write_bytecode is True
+ if sys.dont_write_bytecode:
+ raise DistutilsByteCompileError('byte-compiling is disabled.')
+
+ # First, if the caller didn't force us into direct or indirect mode,
+ # figure out which mode we should be in. We take a conservative
+ # approach: choose direct mode *only* if the current interpreter is
+ # in debug mode and optimize is 0. If we're not in debug mode (-O
+ # or -OO), we don't know which level of optimization this
+ # interpreter is running with, so we can't do direct
+ # byte-compilation and be certain that it's the right thing. Thus,
+ # always compile indirectly if the current interpreter is in either
+ # optimize mode, or if either optimization level was requested by
+ # the caller.
+ if direct is None:
+ direct = (__debug__ and optimize == 0)
+
+ # "Indirect" byte-compilation: write a temporary script and then
+ # run it with the appropriate flags.
+ if not direct:
+ try:
+ from tempfile import mkstemp
+ (script_fd, script_name) = mkstemp(".py")
+ except ImportError:
+ from tempfile import mktemp
+ (script_fd, script_name) = None, mktemp(".py")
+ log.info("writing byte-compilation script '%s'", script_name)
+ if not dry_run:
+ if script_fd is not None:
+ script = os.fdopen(script_fd, "w")
+ else:
+ script = open(script_name, "w")
+
+ with script:
+ script.write("""\
+from distutils.util import byte_compile
+files = [
+""")
+
+ # XXX would be nice to write absolute filenames, just for
+ # safety's sake (script should be more robust in the face of
+ # chdir'ing before running it). But this requires abspath'ing
+ # 'prefix' as well, and that breaks the hack in build_lib's
+ # 'byte_compile()' method that carefully tacks on a trailing
+ # slash (os.sep really) to make sure the prefix here is "just
+ # right". This whole prefix business is rather delicate -- the
+ # problem is that it's really a directory, but I'm treating it
+ # as a dumb string, so trailing slashes and so forth matter.
+
+ #py_files = map(os.path.abspath, py_files)
+ #if prefix:
+ # prefix = os.path.abspath(prefix)
+
+ script.write(",\n".join(map(repr, py_files)) + "]\n")
+ script.write("""
+byte_compile(files, optimize=%r, force=%r,
+ prefix=%r, base_dir=%r,
+ verbose=%r, dry_run=0,
+ direct=1)
+""" % (optimize, force, prefix, base_dir, verbose))
+
+ cmd = [sys.executable]
+ cmd.extend(_optim_args_from_interpreter_flags())
+ cmd.append(script_name)
+ spawn(cmd, dry_run=dry_run)
+ execute(os.remove, (script_name,), "removing %s" % script_name,
+ dry_run=dry_run)
+
+ # "Direct" byte-compilation: use the py_compile module to compile
+ # right here, right now. Note that the script generated in indirect
+ # mode simply calls 'byte_compile()' in direct mode, a weird sort of
+ # cross-process recursion. Hey, it works!
+ else:
+ from py_compile import compile
+
+ for file in py_files:
+ if file[-3:] != ".py":
+ # This lets us be lazy and not filter filenames in
+ # the "install_lib" command.
+ continue
+
+ # Terminology from the py_compile module:
+ # cfile - byte-compiled file
+ # dfile - purported source filename (same as 'file' by default)
+ if optimize >= 0:
+ opt = '' if optimize == 0 else optimize
+ cfile = importlib.util.cache_from_source(
+ file, optimization=opt)
+ else:
+ cfile = importlib.util.cache_from_source(file)
+ dfile = file
+ if prefix:
+ if file[:len(prefix)] != prefix:
+ raise ValueError("invalid prefix: filename %r doesn't start with %r"
+ % (file, prefix))
+ dfile = dfile[len(prefix):]
+ if base_dir:
+ dfile = os.path.join(base_dir, dfile)
+
+ cfile_base = os.path.basename(cfile)
+ if direct:
+ if force or newer(file, cfile):
+ log.info("byte-compiling %s to %s", file, cfile_base)
+ if not dry_run:
+ compile(file, cfile, dfile)
+ else:
+ log.debug("skipping byte-compilation of %s to %s",
+ file, cfile_base)
+
+# byte_compile ()
+
+def rfc822_escape (header):
+ """Return a version of the string escaped for inclusion in an
+ RFC-822 header, by ensuring there are 8 spaces space after each newline.
+ """
+ lines = header.split('\n')
+ sep = '\n' + 8 * ' '
+ return sep.join(lines)
+
+# 2to3 support
+
+def run_2to3(files, fixer_names=None, options=None, explicit=None):
+ """Invoke 2to3 on a list of Python files.
+ The files should all come from the build area, as the
+ modification is done in-place. To reduce the build time,
+ only files modified since the last invocation of this
+ function should be passed in the files argument."""
+
+ if not files:
+ return
+
+ # Make this class local, to delay import of 2to3
+ from lib2to3.refactor import RefactoringTool, get_fixers_from_package
+ class DistutilsRefactoringTool(RefactoringTool):
+ def log_error(self, msg, *args, **kw):
+ log.error(msg, *args)
+
+ def log_message(self, msg, *args):
+ log.info(msg, *args)
+
+ def log_debug(self, msg, *args):
+ log.debug(msg, *args)
+
+ if fixer_names is None:
+ fixer_names = get_fixers_from_package('lib2to3.fixes')
+ r = DistutilsRefactoringTool(fixer_names, options=options)
+ r.refactor(files, write=True)
+
+def copydir_run_2to3(src, dest, template=None, fixer_names=None,
+ options=None, explicit=None):
+ """Recursively copy a directory, only copying new and changed files,
+ running run_2to3 over all newly copied Python modules afterward.
+
+ If you give a template string, it's parsed like a MANIFEST.in.
+ """
+ from distutils.dir_util import mkpath
+ from distutils.file_util import copy_file
+ from distutils.filelist import FileList
+ filelist = FileList()
+ curdir = os.getcwd()
+ os.chdir(src)
+ try:
+ filelist.findall()
+ finally:
+ os.chdir(curdir)
+ filelist.files[:] = filelist.allfiles
+ if template:
+ for line in template.splitlines():
+ line = line.strip()
+ if not line: continue
+ filelist.process_template_line(line)
+ copied = []
+ for filename in filelist.files:
+ outname = os.path.join(dest, filename)
+ mkpath(os.path.dirname(outname))
+ res = copy_file(os.path.join(src, filename), outname, update=1)
+ if res[1]: copied.append(outname)
+ run_2to3([fn for fn in copied if fn.lower().endswith('.py')],
+ fixer_names=fixer_names, options=options, explicit=explicit)
+ return copied
+
+class Mixin2to3:
+ '''Mixin class for commands that run 2to3.
+ To configure 2to3, setup scripts may either change
+ the class variables, or inherit from individual commands
+ to override how 2to3 is invoked.'''
+
+ # provide list of fixers to run;
+ # defaults to all from lib2to3.fixers
+ fixer_names = None
+
+ # options dictionary
+ options = None
+
+ # list of fixers to invoke even though they are marked as explicit
+ explicit = None
+
+ def run_2to3(self, files):
+ return run_2to3(files, self.fixer_names, self.options, self.explicit)
diff --git a/third_party/python/setuptools/setuptools/_distutils/version.py b/third_party/python/setuptools/setuptools/_distutils/version.py
new file mode 100644
index 0000000000..c33bebaed2
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/version.py
@@ -0,0 +1,347 @@
+#
+# distutils/version.py
+#
+# Implements multiple version numbering conventions for the
+# Python Module Distribution Utilities.
+#
+# $Id$
+#
+
+"""Provides classes to represent module version numbers (one class for
+each style of version numbering). There are currently two such classes
+implemented: StrictVersion and LooseVersion.
+
+Every version number class implements the following interface:
+ * the 'parse' method takes a string and parses it to some internal
+ representation; if the string is an invalid version number,
+ 'parse' raises a ValueError exception
+ * the class constructor takes an optional string argument which,
+ if supplied, is passed to 'parse'
+ * __str__ reconstructs the string that was passed to 'parse' (or
+ an equivalent string -- ie. one that will generate an equivalent
+ version number instance)
+ * __repr__ generates Python code to recreate the version number instance
+ * _cmp compares the current instance with either another instance
+ of the same class or a string (which will be parsed to an instance
+ of the same class, thus must follow the same rules)
+"""
+
+import re
+
+class Version:
+ """Abstract base class for version numbering classes. Just provides
+ constructor (__init__) and reproducer (__repr__), because those
+ seem to be the same for all version numbering classes; and route
+ rich comparisons to _cmp.
+ """
+
+ def __init__ (self, vstring=None):
+ if vstring:
+ self.parse(vstring)
+
+ def __repr__ (self):
+ return "%s ('%s')" % (self.__class__.__name__, str(self))
+
+ def __eq__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c == 0
+
+ def __lt__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c < 0
+
+ def __le__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c <= 0
+
+ def __gt__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c > 0
+
+ def __ge__(self, other):
+ c = self._cmp(other)
+ if c is NotImplemented:
+ return c
+ return c >= 0
+
+
+# Interface for version-number classes -- must be implemented
+# by the following classes (the concrete ones -- Version should
+# be treated as an abstract class).
+# __init__ (string) - create and take same action as 'parse'
+# (string parameter is optional)
+# parse (string) - convert a string representation to whatever
+# internal representation is appropriate for
+# this style of version numbering
+# __str__ (self) - convert back to a string; should be very similar
+# (if not identical to) the string supplied to parse
+# __repr__ (self) - generate Python code to recreate
+# the instance
+# _cmp (self, other) - compare two version numbers ('other' may
+# be an unparsed version string, or another
+# instance of your version class)
+
+
+class StrictVersion (Version):
+
+ """Version numbering for anal retentives and software idealists.
+ Implements the standard interface for version number classes as
+ described above. A version number consists of two or three
+ dot-separated numeric components, with an optional "pre-release" tag
+ on the end. The pre-release tag consists of the letter 'a' or 'b'
+ followed by a number. If the numeric components of two version
+ numbers are equal, then one with a pre-release tag will always
+ be deemed earlier (lesser) than one without.
+
+ The following are valid version numbers (shown in the order that
+ would be obtained by sorting according to the supplied cmp function):
+
+ 0.4 0.4.0 (these two are equivalent)
+ 0.4.1
+ 0.5a1
+ 0.5b3
+ 0.5
+ 0.9.6
+ 1.0
+ 1.0.4a3
+ 1.0.4b1
+ 1.0.4
+
+ The following are examples of invalid version numbers:
+
+ 1
+ 2.7.2.2
+ 1.3.a4
+ 1.3pl1
+ 1.3c4
+
+ The rationale for this version numbering system will be explained
+ in the distutils documentation.
+ """
+
+ version_re = re.compile(r'^(\d+) \. (\d+) (\. (\d+))? ([ab](\d+))?$',
+ re.VERBOSE | re.ASCII)
+
+
+ def parse (self, vstring):
+ match = self.version_re.match(vstring)
+ if not match:
+ raise ValueError("invalid version number '%s'" % vstring)
+
+ (major, minor, patch, prerelease, prerelease_num) = \
+ match.group(1, 2, 4, 5, 6)
+
+ if patch:
+ self.version = tuple(map(int, [major, minor, patch]))
+ else:
+ self.version = tuple(map(int, [major, minor])) + (0,)
+
+ if prerelease:
+ self.prerelease = (prerelease[0], int(prerelease_num))
+ else:
+ self.prerelease = None
+
+
+ def __str__ (self):
+
+ if self.version[2] == 0:
+ vstring = '.'.join(map(str, self.version[0:2]))
+ else:
+ vstring = '.'.join(map(str, self.version))
+
+ if self.prerelease:
+ vstring = vstring + self.prerelease[0] + str(self.prerelease[1])
+
+ return vstring
+
+
+ def _cmp (self, other):
+ if isinstance(other, str):
+ other = StrictVersion(other)
+ elif not isinstance(other, StrictVersion):
+ return NotImplemented
+
+ if self.version != other.version:
+ # numeric versions don't match
+ # prerelease stuff doesn't matter
+ if self.version < other.version:
+ return -1
+ else:
+ return 1
+
+ # have to compare prerelease
+ # case 1: neither has prerelease; they're equal
+ # case 2: self has prerelease, other doesn't; other is greater
+ # case 3: self doesn't have prerelease, other does: self is greater
+ # case 4: both have prerelease: must compare them!
+
+ if (not self.prerelease and not other.prerelease):
+ return 0
+ elif (self.prerelease and not other.prerelease):
+ return -1
+ elif (not self.prerelease and other.prerelease):
+ return 1
+ elif (self.prerelease and other.prerelease):
+ if self.prerelease == other.prerelease:
+ return 0
+ elif self.prerelease < other.prerelease:
+ return -1
+ else:
+ return 1
+ else:
+ assert False, "never get here"
+
+# end class StrictVersion
+
+
+# The rules according to Greg Stein:
+# 1) a version number has 1 or more numbers separated by a period or by
+# sequences of letters. If only periods, then these are compared
+# left-to-right to determine an ordering.
+# 2) sequences of letters are part of the tuple for comparison and are
+# compared lexicographically
+# 3) recognize the numeric components may have leading zeroes
+#
+# The LooseVersion class below implements these rules: a version number
+# string is split up into a tuple of integer and string components, and
+# comparison is a simple tuple comparison. This means that version
+# numbers behave in a predictable and obvious way, but a way that might
+# not necessarily be how people *want* version numbers to behave. There
+# wouldn't be a problem if people could stick to purely numeric version
+# numbers: just split on period and compare the numbers as tuples.
+# However, people insist on putting letters into their version numbers;
+# the most common purpose seems to be:
+# - indicating a "pre-release" version
+# ('alpha', 'beta', 'a', 'b', 'pre', 'p')
+# - indicating a post-release patch ('p', 'pl', 'patch')
+# but of course this can't cover all version number schemes, and there's
+# no way to know what a programmer means without asking him.
+#
+# The problem is what to do with letters (and other non-numeric
+# characters) in a version number. The current implementation does the
+# obvious and predictable thing: keep them as strings and compare
+# lexically within a tuple comparison. This has the desired effect if
+# an appended letter sequence implies something "post-release":
+# eg. "0.99" < "0.99pl14" < "1.0", and "5.001" < "5.001m" < "5.002".
+#
+# However, if letters in a version number imply a pre-release version,
+# the "obvious" thing isn't correct. Eg. you would expect that
+# "1.5.1" < "1.5.2a2" < "1.5.2", but under the tuple/lexical comparison
+# implemented here, this just isn't so.
+#
+# Two possible solutions come to mind. The first is to tie the
+# comparison algorithm to a particular set of semantic rules, as has
+# been done in the StrictVersion class above. This works great as long
+# as everyone can go along with bondage and discipline. Hopefully a
+# (large) subset of Python module programmers will agree that the
+# particular flavour of bondage and discipline provided by StrictVersion
+# provides enough benefit to be worth using, and will submit their
+# version numbering scheme to its domination. The free-thinking
+# anarchists in the lot will never give in, though, and something needs
+# to be done to accommodate them.
+#
+# Perhaps a "moderately strict" version class could be implemented that
+# lets almost anything slide (syntactically), and makes some heuristic
+# assumptions about non-digits in version number strings. This could
+# sink into special-case-hell, though; if I was as talented and
+# idiosyncratic as Larry Wall, I'd go ahead and implement a class that
+# somehow knows that "1.2.1" < "1.2.2a2" < "1.2.2" < "1.2.2pl3", and is
+# just as happy dealing with things like "2g6" and "1.13++". I don't
+# think I'm smart enough to do it right though.
+#
+# In any case, I've coded the test suite for this module (see
+# ../test/test_version.py) specifically to fail on things like comparing
+# "1.2a2" and "1.2". That's not because the *code* is doing anything
+# wrong, it's because the simple, obvious design doesn't match my
+# complicated, hairy expectations for real-world version numbers. It
+# would be a snap to fix the test suite to say, "Yep, LooseVersion does
+# the Right Thing" (ie. the code matches the conception). But I'd rather
+# have a conception that matches common notions about version numbers.
+
+class LooseVersion (Version):
+
+ """Version numbering for anarchists and software realists.
+ Implements the standard interface for version number classes as
+ described above. A version number consists of a series of numbers,
+ separated by either periods or strings of letters. When comparing
+ version numbers, the numeric components will be compared
+ numerically, and the alphabetic components lexically. The following
+ are all valid version numbers, in no particular order:
+
+ 1.5.1
+ 1.5.2b2
+ 161
+ 3.10a
+ 8.02
+ 3.4j
+ 1996.07.12
+ 3.2.pl0
+ 3.1.1.6
+ 2g6
+ 11g
+ 0.960923
+ 2.2beta29
+ 1.13++
+ 5.5.kw
+ 2.0b1pl0
+
+ In fact, there is no such thing as an invalid version number under
+ this scheme; the rules for comparison are simple and predictable,
+ but may not always give the results you want (for some definition
+ of "want").
+ """
+
+ component_re = re.compile(r'(\d+ | [a-z]+ | \.)', re.VERBOSE)
+
+ def __init__ (self, vstring=None):
+ if vstring:
+ self.parse(vstring)
+
+
+ def parse (self, vstring):
+ # I've given up on thinking I can reconstruct the version string
+ # from the parsed tuple -- so I just store the string here for
+ # use by __str__
+ self.vstring = vstring
+ components = [x for x in self.component_re.split(vstring)
+ if x and x != '.']
+ for i, obj in enumerate(components):
+ try:
+ components[i] = int(obj)
+ except ValueError:
+ pass
+
+ self.version = components
+
+
+ def __str__ (self):
+ return self.vstring
+
+
+ def __repr__ (self):
+ return "LooseVersion ('%s')" % str(self)
+
+
+ def _cmp (self, other):
+ if isinstance(other, str):
+ other = LooseVersion(other)
+ elif not isinstance(other, LooseVersion):
+ return NotImplemented
+
+ if self.version == other.version:
+ return 0
+ if self.version < other.version:
+ return -1
+ if self.version > other.version:
+ return 1
+
+
+# end class LooseVersion
diff --git a/third_party/python/setuptools/setuptools/_distutils/versionpredicate.py b/third_party/python/setuptools/setuptools/_distutils/versionpredicate.py
new file mode 100644
index 0000000000..062c98f248
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_distutils/versionpredicate.py
@@ -0,0 +1,166 @@
+"""Module for parsing and testing package version predicate strings.
+"""
+import re
+import distutils.version
+import operator
+
+
+re_validPackage = re.compile(r"(?i)^\s*([a-z_]\w*(?:\.[a-z_]\w*)*)(.*)",
+ re.ASCII)
+# (package) (rest)
+
+re_paren = re.compile(r"^\s*\((.*)\)\s*$") # (list) inside of parentheses
+re_splitComparison = re.compile(r"^\s*(<=|>=|<|>|!=|==)\s*([^\s,]+)\s*$")
+# (comp) (version)
+
+
+def splitUp(pred):
+ """Parse a single version comparison.
+
+ Return (comparison string, StrictVersion)
+ """
+ res = re_splitComparison.match(pred)
+ if not res:
+ raise ValueError("bad package restriction syntax: %r" % pred)
+ comp, verStr = res.groups()
+ return (comp, distutils.version.StrictVersion(verStr))
+
+compmap = {"<": operator.lt, "<=": operator.le, "==": operator.eq,
+ ">": operator.gt, ">=": operator.ge, "!=": operator.ne}
+
+class VersionPredicate:
+ """Parse and test package version predicates.
+
+ >>> v = VersionPredicate('pyepat.abc (>1.0, <3333.3a1, !=1555.1b3)')
+
+ The `name` attribute provides the full dotted name that is given::
+
+ >>> v.name
+ 'pyepat.abc'
+
+ The str() of a `VersionPredicate` provides a normalized
+ human-readable version of the expression::
+
+ >>> print(v)
+ pyepat.abc (> 1.0, < 3333.3a1, != 1555.1b3)
+
+ The `satisfied_by()` method can be used to determine with a given
+ version number is included in the set described by the version
+ restrictions::
+
+ >>> v.satisfied_by('1.1')
+ True
+ >>> v.satisfied_by('1.4')
+ True
+ >>> v.satisfied_by('1.0')
+ False
+ >>> v.satisfied_by('4444.4')
+ False
+ >>> v.satisfied_by('1555.1b3')
+ False
+
+ `VersionPredicate` is flexible in accepting extra whitespace::
+
+ >>> v = VersionPredicate(' pat( == 0.1 ) ')
+ >>> v.name
+ 'pat'
+ >>> v.satisfied_by('0.1')
+ True
+ >>> v.satisfied_by('0.2')
+ False
+
+ If any version numbers passed in do not conform to the
+ restrictions of `StrictVersion`, a `ValueError` is raised::
+
+ >>> v = VersionPredicate('p1.p2.p3.p4(>=1.0, <=1.3a1, !=1.2zb3)')
+ Traceback (most recent call last):
+ ...
+ ValueError: invalid version number '1.2zb3'
+
+ It the module or package name given does not conform to what's
+ allowed as a legal module or package name, `ValueError` is
+ raised::
+
+ >>> v = VersionPredicate('foo-bar')
+ Traceback (most recent call last):
+ ...
+ ValueError: expected parenthesized list: '-bar'
+
+ >>> v = VersionPredicate('foo bar (12.21)')
+ Traceback (most recent call last):
+ ...
+ ValueError: expected parenthesized list: 'bar (12.21)'
+
+ """
+
+ def __init__(self, versionPredicateStr):
+ """Parse a version predicate string.
+ """
+ # Fields:
+ # name: package name
+ # pred: list of (comparison string, StrictVersion)
+
+ versionPredicateStr = versionPredicateStr.strip()
+ if not versionPredicateStr:
+ raise ValueError("empty package restriction")
+ match = re_validPackage.match(versionPredicateStr)
+ if not match:
+ raise ValueError("bad package name in %r" % versionPredicateStr)
+ self.name, paren = match.groups()
+ paren = paren.strip()
+ if paren:
+ match = re_paren.match(paren)
+ if not match:
+ raise ValueError("expected parenthesized list: %r" % paren)
+ str = match.groups()[0]
+ self.pred = [splitUp(aPred) for aPred in str.split(",")]
+ if not self.pred:
+ raise ValueError("empty parenthesized list in %r"
+ % versionPredicateStr)
+ else:
+ self.pred = []
+
+ def __str__(self):
+ if self.pred:
+ seq = [cond + " " + str(ver) for cond, ver in self.pred]
+ return self.name + " (" + ", ".join(seq) + ")"
+ else:
+ return self.name
+
+ def satisfied_by(self, version):
+ """True if version is compatible with all the predicates in self.
+ The parameter version must be acceptable to the StrictVersion
+ constructor. It may be either a string or StrictVersion.
+ """
+ for cond, ver in self.pred:
+ if not compmap[cond](version, ver):
+ return False
+ return True
+
+
+_provision_rx = None
+
+def split_provision(value):
+ """Return the name and optional version number of a provision.
+
+ The version number, if given, will be returned as a `StrictVersion`
+ instance, otherwise it will be `None`.
+
+ >>> split_provision('mypkg')
+ ('mypkg', None)
+ >>> split_provision(' mypkg( 1.2 ) ')
+ ('mypkg', StrictVersion ('1.2'))
+ """
+ global _provision_rx
+ if _provision_rx is None:
+ _provision_rx = re.compile(
+ r"([a-zA-Z_]\w*(?:\.[a-zA-Z_]\w*)*)(?:\s*\(\s*([^)\s]+)\s*\))?$",
+ re.ASCII)
+ value = value.strip()
+ m = _provision_rx.match(value)
+ if not m:
+ raise ValueError("illegal provides specification: %r" % value)
+ ver = m.group(2) or None
+ if ver:
+ ver = distutils.version.StrictVersion(ver)
+ return m.group(1), ver
diff --git a/third_party/python/setuptools/setuptools/_imp.py b/third_party/python/setuptools/setuptools/_imp.py
new file mode 100644
index 0000000000..451e45a831
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_imp.py
@@ -0,0 +1,82 @@
+"""
+Re-implementation of find_module and get_frozen_object
+from the deprecated imp module.
+"""
+
+import os
+import importlib.util
+import importlib.machinery
+
+from .py34compat import module_from_spec
+
+
+PY_SOURCE = 1
+PY_COMPILED = 2
+C_EXTENSION = 3
+C_BUILTIN = 6
+PY_FROZEN = 7
+
+
+def find_spec(module, paths):
+ finder = (
+ importlib.machinery.PathFinder().find_spec
+ if isinstance(paths, list) else
+ importlib.util.find_spec
+ )
+ return finder(module, paths)
+
+
+def find_module(module, paths=None):
+ """Just like 'imp.find_module()', but with package support"""
+ spec = find_spec(module, paths)
+ if spec is None:
+ raise ImportError("Can't find %s" % module)
+ if not spec.has_location and hasattr(spec, 'submodule_search_locations'):
+ spec = importlib.util.spec_from_loader('__init__.py', spec.loader)
+
+ kind = -1
+ file = None
+ static = isinstance(spec.loader, type)
+ if spec.origin == 'frozen' or static and issubclass(
+ spec.loader, importlib.machinery.FrozenImporter):
+ kind = PY_FROZEN
+ path = None # imp compabilty
+ suffix = mode = '' # imp compability
+ elif spec.origin == 'built-in' or static and issubclass(
+ spec.loader, importlib.machinery.BuiltinImporter):
+ kind = C_BUILTIN
+ path = None # imp compabilty
+ suffix = mode = '' # imp compability
+ elif spec.has_location:
+ path = spec.origin
+ suffix = os.path.splitext(path)[1]
+ mode = 'r' if suffix in importlib.machinery.SOURCE_SUFFIXES else 'rb'
+
+ if suffix in importlib.machinery.SOURCE_SUFFIXES:
+ kind = PY_SOURCE
+ elif suffix in importlib.machinery.BYTECODE_SUFFIXES:
+ kind = PY_COMPILED
+ elif suffix in importlib.machinery.EXTENSION_SUFFIXES:
+ kind = C_EXTENSION
+
+ if kind in {PY_SOURCE, PY_COMPILED}:
+ file = open(path, mode)
+ else:
+ path = None
+ suffix = mode = ''
+
+ return file, path, (suffix, mode, kind)
+
+
+def get_frozen_object(module, paths=None):
+ spec = find_spec(module, paths)
+ if not spec:
+ raise ImportError("Can't find %s" % module)
+ return spec.loader.get_code(module)
+
+
+def get_module(module, paths, info):
+ spec = find_spec(module, paths)
+ if not spec:
+ raise ImportError("Can't find %s" % module)
+ return module_from_spec(spec)
diff --git a/third_party/python/setuptools/setuptools/_vendor/__init__.py b/third_party/python/setuptools/setuptools/_vendor/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_vendor/__init__.py
diff --git a/third_party/python/setuptools/setuptools/_vendor/ordered_set.py b/third_party/python/setuptools/setuptools/_vendor/ordered_set.py
new file mode 100644
index 0000000000..14876000de
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_vendor/ordered_set.py
@@ -0,0 +1,488 @@
+"""
+An OrderedSet is a custom MutableSet that remembers its order, so that every
+entry has an index that can be looked up.
+
+Based on a recipe originally posted to ActiveState Recipes by Raymond Hettiger,
+and released under the MIT license.
+"""
+import itertools as it
+from collections import deque
+
+try:
+ # Python 3
+ from collections.abc import MutableSet, Sequence
+except ImportError:
+ # Python 2.7
+ from collections import MutableSet, Sequence
+
+SLICE_ALL = slice(None)
+__version__ = "3.1"
+
+
+def is_iterable(obj):
+ """
+ Are we being asked to look up a list of things, instead of a single thing?
+ We check for the `__iter__` attribute so that this can cover types that
+ don't have to be known by this module, such as NumPy arrays.
+
+ Strings, however, should be considered as atomic values to look up, not
+ iterables. The same goes for tuples, since they are immutable and therefore
+ valid entries.
+
+ We don't need to check for the Python 2 `unicode` type, because it doesn't
+ have an `__iter__` attribute anyway.
+ """
+ return (
+ hasattr(obj, "__iter__")
+ and not isinstance(obj, str)
+ and not isinstance(obj, tuple)
+ )
+
+
+class OrderedSet(MutableSet, Sequence):
+ """
+ An OrderedSet is a custom MutableSet that remembers its order, so that
+ every entry has an index that can be looked up.
+
+ Example:
+ >>> OrderedSet([1, 1, 2, 3, 2])
+ OrderedSet([1, 2, 3])
+ """
+
+ def __init__(self, iterable=None):
+ self.items = []
+ self.map = {}
+ if iterable is not None:
+ self |= iterable
+
+ def __len__(self):
+ """
+ Returns the number of unique elements in the ordered set
+
+ Example:
+ >>> len(OrderedSet([]))
+ 0
+ >>> len(OrderedSet([1, 2]))
+ 2
+ """
+ return len(self.items)
+
+ def __getitem__(self, index):
+ """
+ Get the item at a given index.
+
+ If `index` is a slice, you will get back that slice of items, as a
+ new OrderedSet.
+
+ If `index` is a list or a similar iterable, you'll get a list of
+ items corresponding to those indices. This is similar to NumPy's
+ "fancy indexing". The result is not an OrderedSet because you may ask
+ for duplicate indices, and the number of elements returned should be
+ the number of elements asked for.
+
+ Example:
+ >>> oset = OrderedSet([1, 2, 3])
+ >>> oset[1]
+ 2
+ """
+ if isinstance(index, slice) and index == SLICE_ALL:
+ return self.copy()
+ elif is_iterable(index):
+ return [self.items[i] for i in index]
+ elif hasattr(index, "__index__") or isinstance(index, slice):
+ result = self.items[index]
+ if isinstance(result, list):
+ return self.__class__(result)
+ else:
+ return result
+ else:
+ raise TypeError("Don't know how to index an OrderedSet by %r" % index)
+
+ def copy(self):
+ """
+ Return a shallow copy of this object.
+
+ Example:
+ >>> this = OrderedSet([1, 2, 3])
+ >>> other = this.copy()
+ >>> this == other
+ True
+ >>> this is other
+ False
+ """
+ return self.__class__(self)
+
+ def __getstate__(self):
+ if len(self) == 0:
+ # The state can't be an empty list.
+ # We need to return a truthy value, or else __setstate__ won't be run.
+ #
+ # This could have been done more gracefully by always putting the state
+ # in a tuple, but this way is backwards- and forwards- compatible with
+ # previous versions of OrderedSet.
+ return (None,)
+ else:
+ return list(self)
+
+ def __setstate__(self, state):
+ if state == (None,):
+ self.__init__([])
+ else:
+ self.__init__(state)
+
+ def __contains__(self, key):
+ """
+ Test if the item is in this ordered set
+
+ Example:
+ >>> 1 in OrderedSet([1, 3, 2])
+ True
+ >>> 5 in OrderedSet([1, 3, 2])
+ False
+ """
+ return key in self.map
+
+ def add(self, key):
+ """
+ Add `key` as an item to this OrderedSet, then return its index.
+
+ If `key` is already in the OrderedSet, return the index it already
+ had.
+
+ Example:
+ >>> oset = OrderedSet()
+ >>> oset.append(3)
+ 0
+ >>> print(oset)
+ OrderedSet([3])
+ """
+ if key not in self.map:
+ self.map[key] = len(self.items)
+ self.items.append(key)
+ return self.map[key]
+
+ append = add
+
+ def update(self, sequence):
+ """
+ Update the set with the given iterable sequence, then return the index
+ of the last element inserted.
+
+ Example:
+ >>> oset = OrderedSet([1, 2, 3])
+ >>> oset.update([3, 1, 5, 1, 4])
+ 4
+ >>> print(oset)
+ OrderedSet([1, 2, 3, 5, 4])
+ """
+ item_index = None
+ try:
+ for item in sequence:
+ item_index = self.add(item)
+ except TypeError:
+ raise ValueError(
+ "Argument needs to be an iterable, got %s" % type(sequence)
+ )
+ return item_index
+
+ def index(self, key):
+ """
+ Get the index of a given entry, raising an IndexError if it's not
+ present.
+
+ `key` can be an iterable of entries that is not a string, in which case
+ this returns a list of indices.
+
+ Example:
+ >>> oset = OrderedSet([1, 2, 3])
+ >>> oset.index(2)
+ 1
+ """
+ if is_iterable(key):
+ return [self.index(subkey) for subkey in key]
+ return self.map[key]
+
+ # Provide some compatibility with pd.Index
+ get_loc = index
+ get_indexer = index
+
+ def pop(self):
+ """
+ Remove and return the last element from the set.
+
+ Raises KeyError if the set is empty.
+
+ Example:
+ >>> oset = OrderedSet([1, 2, 3])
+ >>> oset.pop()
+ 3
+ """
+ if not self.items:
+ raise KeyError("Set is empty")
+
+ elem = self.items[-1]
+ del self.items[-1]
+ del self.map[elem]
+ return elem
+
+ def discard(self, key):
+ """
+ Remove an element. Do not raise an exception if absent.
+
+ The MutableSet mixin uses this to implement the .remove() method, which
+ *does* raise an error when asked to remove a non-existent item.
+
+ Example:
+ >>> oset = OrderedSet([1, 2, 3])
+ >>> oset.discard(2)
+ >>> print(oset)
+ OrderedSet([1, 3])
+ >>> oset.discard(2)
+ >>> print(oset)
+ OrderedSet([1, 3])
+ """
+ if key in self:
+ i = self.map[key]
+ del self.items[i]
+ del self.map[key]
+ for k, v in self.map.items():
+ if v >= i:
+ self.map[k] = v - 1
+
+ def clear(self):
+ """
+ Remove all items from this OrderedSet.
+ """
+ del self.items[:]
+ self.map.clear()
+
+ def __iter__(self):
+ """
+ Example:
+ >>> list(iter(OrderedSet([1, 2, 3])))
+ [1, 2, 3]
+ """
+ return iter(self.items)
+
+ def __reversed__(self):
+ """
+ Example:
+ >>> list(reversed(OrderedSet([1, 2, 3])))
+ [3, 2, 1]
+ """
+ return reversed(self.items)
+
+ def __repr__(self):
+ if not self:
+ return "%s()" % (self.__class__.__name__,)
+ return "%s(%r)" % (self.__class__.__name__, list(self))
+
+ def __eq__(self, other):
+ """
+ Returns true if the containers have the same items. If `other` is a
+ Sequence, then order is checked, otherwise it is ignored.
+
+ Example:
+ >>> oset = OrderedSet([1, 3, 2])
+ >>> oset == [1, 3, 2]
+ True
+ >>> oset == [1, 2, 3]
+ False
+ >>> oset == [2, 3]
+ False
+ >>> oset == OrderedSet([3, 2, 1])
+ False
+ """
+ # In Python 2 deque is not a Sequence, so treat it as one for
+ # consistent behavior with Python 3.
+ if isinstance(other, (Sequence, deque)):
+ # Check that this OrderedSet contains the same elements, in the
+ # same order, as the other object.
+ return list(self) == list(other)
+ try:
+ other_as_set = set(other)
+ except TypeError:
+ # If `other` can't be converted into a set, it's not equal.
+ return False
+ else:
+ return set(self) == other_as_set
+
+ def union(self, *sets):
+ """
+ Combines all unique items.
+ Each items order is defined by its first appearance.
+
+ Example:
+ >>> oset = OrderedSet.union(OrderedSet([3, 1, 4, 1, 5]), [1, 3], [2, 0])
+ >>> print(oset)
+ OrderedSet([3, 1, 4, 5, 2, 0])
+ >>> oset.union([8, 9])
+ OrderedSet([3, 1, 4, 5, 2, 0, 8, 9])
+ >>> oset | {10}
+ OrderedSet([3, 1, 4, 5, 2, 0, 10])
+ """
+ cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
+ containers = map(list, it.chain([self], sets))
+ items = it.chain.from_iterable(containers)
+ return cls(items)
+
+ def __and__(self, other):
+ # the parent implementation of this is backwards
+ return self.intersection(other)
+
+ def intersection(self, *sets):
+ """
+ Returns elements in common between all sets. Order is defined only
+ by the first set.
+
+ Example:
+ >>> oset = OrderedSet.intersection(OrderedSet([0, 1, 2, 3]), [1, 2, 3])
+ >>> print(oset)
+ OrderedSet([1, 2, 3])
+ >>> oset.intersection([2, 4, 5], [1, 2, 3, 4])
+ OrderedSet([2])
+ >>> oset.intersection()
+ OrderedSet([1, 2, 3])
+ """
+ cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
+ if sets:
+ common = set.intersection(*map(set, sets))
+ items = (item for item in self if item in common)
+ else:
+ items = self
+ return cls(items)
+
+ def difference(self, *sets):
+ """
+ Returns all elements that are in this set but not the others.
+
+ Example:
+ >>> OrderedSet([1, 2, 3]).difference(OrderedSet([2]))
+ OrderedSet([1, 3])
+ >>> OrderedSet([1, 2, 3]).difference(OrderedSet([2]), OrderedSet([3]))
+ OrderedSet([1])
+ >>> OrderedSet([1, 2, 3]) - OrderedSet([2])
+ OrderedSet([1, 3])
+ >>> OrderedSet([1, 2, 3]).difference()
+ OrderedSet([1, 2, 3])
+ """
+ cls = self.__class__
+ if sets:
+ other = set.union(*map(set, sets))
+ items = (item for item in self if item not in other)
+ else:
+ items = self
+ return cls(items)
+
+ def issubset(self, other):
+ """
+ Report whether another set contains this set.
+
+ Example:
+ >>> OrderedSet([1, 2, 3]).issubset({1, 2})
+ False
+ >>> OrderedSet([1, 2, 3]).issubset({1, 2, 3, 4})
+ True
+ >>> OrderedSet([1, 2, 3]).issubset({1, 4, 3, 5})
+ False
+ """
+ if len(self) > len(other): # Fast check for obvious cases
+ return False
+ return all(item in other for item in self)
+
+ def issuperset(self, other):
+ """
+ Report whether this set contains another set.
+
+ Example:
+ >>> OrderedSet([1, 2]).issuperset([1, 2, 3])
+ False
+ >>> OrderedSet([1, 2, 3, 4]).issuperset({1, 2, 3})
+ True
+ >>> OrderedSet([1, 4, 3, 5]).issuperset({1, 2, 3})
+ False
+ """
+ if len(self) < len(other): # Fast check for obvious cases
+ return False
+ return all(item in self for item in other)
+
+ def symmetric_difference(self, other):
+ """
+ Return the symmetric difference of two OrderedSets as a new set.
+ That is, the new set will contain all elements that are in exactly
+ one of the sets.
+
+ Their order will be preserved, with elements from `self` preceding
+ elements from `other`.
+
+ Example:
+ >>> this = OrderedSet([1, 4, 3, 5, 7])
+ >>> other = OrderedSet([9, 7, 1, 3, 2])
+ >>> this.symmetric_difference(other)
+ OrderedSet([4, 5, 9, 2])
+ """
+ cls = self.__class__ if isinstance(self, OrderedSet) else OrderedSet
+ diff1 = cls(self).difference(other)
+ diff2 = cls(other).difference(self)
+ return diff1.union(diff2)
+
+ def _update_items(self, items):
+ """
+ Replace the 'items' list of this OrderedSet with a new one, updating
+ self.map accordingly.
+ """
+ self.items = items
+ self.map = {item: idx for (idx, item) in enumerate(items)}
+
+ def difference_update(self, *sets):
+ """
+ Update this OrderedSet to remove items from one or more other sets.
+
+ Example:
+ >>> this = OrderedSet([1, 2, 3])
+ >>> this.difference_update(OrderedSet([2, 4]))
+ >>> print(this)
+ OrderedSet([1, 3])
+
+ >>> this = OrderedSet([1, 2, 3, 4, 5])
+ >>> this.difference_update(OrderedSet([2, 4]), OrderedSet([1, 4, 6]))
+ >>> print(this)
+ OrderedSet([3, 5])
+ """
+ items_to_remove = set()
+ for other in sets:
+ items_to_remove |= set(other)
+ self._update_items([item for item in self.items if item not in items_to_remove])
+
+ def intersection_update(self, other):
+ """
+ Update this OrderedSet to keep only items in another set, preserving
+ their order in this set.
+
+ Example:
+ >>> this = OrderedSet([1, 4, 3, 5, 7])
+ >>> other = OrderedSet([9, 7, 1, 3, 2])
+ >>> this.intersection_update(other)
+ >>> print(this)
+ OrderedSet([1, 3, 7])
+ """
+ other = set(other)
+ self._update_items([item for item in self.items if item in other])
+
+ def symmetric_difference_update(self, other):
+ """
+ Update this OrderedSet to remove items from another set, then
+ add items from the other set that were not present in this set.
+
+ Example:
+ >>> this = OrderedSet([1, 4, 3, 5, 7])
+ >>> other = OrderedSet([9, 7, 1, 3, 2])
+ >>> this.symmetric_difference_update(other)
+ >>> print(this)
+ OrderedSet([4, 5, 9, 2])
+ """
+ items_to_add = [item for item in other if item not in self]
+ items_to_remove = set(other)
+ self._update_items(
+ [item for item in self.items if item not in items_to_remove] + items_to_add
+ )
diff --git a/third_party/python/setuptools/setuptools/_vendor/packaging/__about__.py b/third_party/python/setuptools/setuptools/_vendor/packaging/__about__.py
new file mode 100644
index 0000000000..4d998578d7
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_vendor/packaging/__about__.py
@@ -0,0 +1,27 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+__all__ = [
+ "__title__",
+ "__summary__",
+ "__uri__",
+ "__version__",
+ "__author__",
+ "__email__",
+ "__license__",
+ "__copyright__",
+]
+
+__title__ = "packaging"
+__summary__ = "Core utilities for Python packages"
+__uri__ = "https://github.com/pypa/packaging"
+
+__version__ = "20.4"
+
+__author__ = "Donald Stufft and individual contributors"
+__email__ = "donald@stufft.io"
+
+__license__ = "BSD-2-Clause or Apache-2.0"
+__copyright__ = "Copyright 2014-2019 %s" % __author__
diff --git a/third_party/python/setuptools/setuptools/_vendor/packaging/__init__.py b/third_party/python/setuptools/setuptools/_vendor/packaging/__init__.py
new file mode 100644
index 0000000000..a0cf67df52
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_vendor/packaging/__init__.py
@@ -0,0 +1,26 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+from .__about__ import (
+ __author__,
+ __copyright__,
+ __email__,
+ __license__,
+ __summary__,
+ __title__,
+ __uri__,
+ __version__,
+)
+
+__all__ = [
+ "__title__",
+ "__summary__",
+ "__uri__",
+ "__version__",
+ "__author__",
+ "__email__",
+ "__license__",
+ "__copyright__",
+]
diff --git a/third_party/python/setuptools/setuptools/_vendor/packaging/_compat.py b/third_party/python/setuptools/setuptools/_vendor/packaging/_compat.py
new file mode 100644
index 0000000000..e54bd4ede8
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_vendor/packaging/_compat.py
@@ -0,0 +1,38 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import sys
+
+from ._typing import TYPE_CHECKING
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import Any, Dict, Tuple, Type
+
+
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+# flake8: noqa
+
+if PY3:
+ string_types = (str,)
+else:
+ string_types = (basestring,)
+
+
+def with_metaclass(meta, *bases):
+ # type: (Type[Any], Tuple[Type[Any], ...]) -> Any
+ """
+ Create a base class with a metaclass.
+ """
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(meta): # type: ignore
+ def __new__(cls, name, this_bases, d):
+ # type: (Type[Any], str, Tuple[Any], Dict[Any, Any]) -> Any
+ return meta(name, bases, d)
+
+ return type.__new__(metaclass, "temporary_class", (), {})
diff --git a/third_party/python/setuptools/setuptools/_vendor/packaging/_structures.py b/third_party/python/setuptools/setuptools/_vendor/packaging/_structures.py
new file mode 100644
index 0000000000..800d5c5588
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_vendor/packaging/_structures.py
@@ -0,0 +1,86 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+
+class InfinityType(object):
+ def __repr__(self):
+ # type: () -> str
+ return "Infinity"
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(repr(self))
+
+ def __lt__(self, other):
+ # type: (object) -> bool
+ return False
+
+ def __le__(self, other):
+ # type: (object) -> bool
+ return False
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ return isinstance(other, self.__class__)
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ return not isinstance(other, self.__class__)
+
+ def __gt__(self, other):
+ # type: (object) -> bool
+ return True
+
+ def __ge__(self, other):
+ # type: (object) -> bool
+ return True
+
+ def __neg__(self):
+ # type: (object) -> NegativeInfinityType
+ return NegativeInfinity
+
+
+Infinity = InfinityType()
+
+
+class NegativeInfinityType(object):
+ def __repr__(self):
+ # type: () -> str
+ return "-Infinity"
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(repr(self))
+
+ def __lt__(self, other):
+ # type: (object) -> bool
+ return True
+
+ def __le__(self, other):
+ # type: (object) -> bool
+ return True
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ return isinstance(other, self.__class__)
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ return not isinstance(other, self.__class__)
+
+ def __gt__(self, other):
+ # type: (object) -> bool
+ return False
+
+ def __ge__(self, other):
+ # type: (object) -> bool
+ return False
+
+ def __neg__(self):
+ # type: (object) -> InfinityType
+ return Infinity
+
+
+NegativeInfinity = NegativeInfinityType()
diff --git a/third_party/python/setuptools/setuptools/_vendor/packaging/_typing.py b/third_party/python/setuptools/setuptools/_vendor/packaging/_typing.py
new file mode 100644
index 0000000000..77a8b9185a
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_vendor/packaging/_typing.py
@@ -0,0 +1,48 @@
+"""For neatly implementing static typing in packaging.
+
+`mypy` - the static type analysis tool we use - uses the `typing` module, which
+provides core functionality fundamental to mypy's functioning.
+
+Generally, `typing` would be imported at runtime and used in that fashion -
+it acts as a no-op at runtime and does not have any run-time overhead by
+design.
+
+As it turns out, `typing` is not vendorable - it uses separate sources for
+Python 2/Python 3. Thus, this codebase can not expect it to be present.
+To work around this, mypy allows the typing import to be behind a False-y
+optional to prevent it from running at runtime and type-comments can be used
+to remove the need for the types to be accessible directly during runtime.
+
+This module provides the False-y guard in a nicely named fashion so that a
+curious maintainer can reach here to read this.
+
+In packaging, all static-typing related imports should be guarded as follows:
+
+ from packaging._typing import TYPE_CHECKING
+
+ if TYPE_CHECKING:
+ from typing import ...
+
+Ref: https://github.com/python/mypy/issues/3216
+"""
+
+__all__ = ["TYPE_CHECKING", "cast"]
+
+# The TYPE_CHECKING constant defined by the typing module is False at runtime
+# but True while type checking.
+if False: # pragma: no cover
+ from typing import TYPE_CHECKING
+else:
+ TYPE_CHECKING = False
+
+# typing's cast syntax requires calling typing.cast at runtime, but we don't
+# want to import typing at runtime. Here, we inform the type checkers that
+# we're importing `typing.cast` as `cast` and re-implement typing.cast's
+# runtime behavior in a block that is ignored by type checkers.
+if TYPE_CHECKING: # pragma: no cover
+ # not executed at runtime
+ from typing import cast
+else:
+ # executed at runtime
+ def cast(type_, value): # noqa
+ return value
diff --git a/third_party/python/setuptools/setuptools/_vendor/packaging/markers.py b/third_party/python/setuptools/setuptools/_vendor/packaging/markers.py
new file mode 100644
index 0000000000..03fbdfcc94
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_vendor/packaging/markers.py
@@ -0,0 +1,328 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import operator
+import os
+import platform
+import sys
+
+from setuptools.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd
+from setuptools.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString
+from setuptools.extern.pyparsing import Literal as L # noqa
+
+from ._compat import string_types
+from ._typing import TYPE_CHECKING
+from .specifiers import Specifier, InvalidSpecifier
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+
+ Operator = Callable[[str, str], bool]
+
+
+__all__ = [
+ "InvalidMarker",
+ "UndefinedComparison",
+ "UndefinedEnvironmentName",
+ "Marker",
+ "default_environment",
+]
+
+
+class InvalidMarker(ValueError):
+ """
+ An invalid marker was found, users should refer to PEP 508.
+ """
+
+
+class UndefinedComparison(ValueError):
+ """
+ An invalid operation was attempted on a value that doesn't support it.
+ """
+
+
+class UndefinedEnvironmentName(ValueError):
+ """
+ A name was attempted to be used that does not exist inside of the
+ environment.
+ """
+
+
+class Node(object):
+ def __init__(self, value):
+ # type: (Any) -> None
+ self.value = value
+
+ def __str__(self):
+ # type: () -> str
+ return str(self.value)
+
+ def __repr__(self):
+ # type: () -> str
+ return "<{0}({1!r})>".format(self.__class__.__name__, str(self))
+
+ def serialize(self):
+ # type: () -> str
+ raise NotImplementedError
+
+
+class Variable(Node):
+ def serialize(self):
+ # type: () -> str
+ return str(self)
+
+
+class Value(Node):
+ def serialize(self):
+ # type: () -> str
+ return '"{0}"'.format(self)
+
+
+class Op(Node):
+ def serialize(self):
+ # type: () -> str
+ return str(self)
+
+
+VARIABLE = (
+ L("implementation_version")
+ | L("platform_python_implementation")
+ | L("implementation_name")
+ | L("python_full_version")
+ | L("platform_release")
+ | L("platform_version")
+ | L("platform_machine")
+ | L("platform_system")
+ | L("python_version")
+ | L("sys_platform")
+ | L("os_name")
+ | L("os.name") # PEP-345
+ | L("sys.platform") # PEP-345
+ | L("platform.version") # PEP-345
+ | L("platform.machine") # PEP-345
+ | L("platform.python_implementation") # PEP-345
+ | L("python_implementation") # undocumented setuptools legacy
+ | L("extra") # PEP-508
+)
+ALIASES = {
+ "os.name": "os_name",
+ "sys.platform": "sys_platform",
+ "platform.version": "platform_version",
+ "platform.machine": "platform_machine",
+ "platform.python_implementation": "platform_python_implementation",
+ "python_implementation": "platform_python_implementation",
+}
+VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0])))
+
+VERSION_CMP = (
+ L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<")
+)
+
+MARKER_OP = VERSION_CMP | L("not in") | L("in")
+MARKER_OP.setParseAction(lambda s, l, t: Op(t[0]))
+
+MARKER_VALUE = QuotedString("'") | QuotedString('"')
+MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0]))
+
+BOOLOP = L("and") | L("or")
+
+MARKER_VAR = VARIABLE | MARKER_VALUE
+
+MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR)
+MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0]))
+
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+
+MARKER_EXPR = Forward()
+MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN)
+MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR)
+
+MARKER = stringStart + MARKER_EXPR + stringEnd
+
+
+def _coerce_parse_result(results):
+ # type: (Union[ParseResults, List[Any]]) -> List[Any]
+ if isinstance(results, ParseResults):
+ return [_coerce_parse_result(i) for i in results]
+ else:
+ return results
+
+
+def _format_marker(marker, first=True):
+ # type: (Union[List[str], Tuple[Node, ...], str], Optional[bool]) -> str
+
+ assert isinstance(marker, (list, tuple, string_types))
+
+ # Sometimes we have a structure like [[...]] which is a single item list
+ # where the single item is itself it's own list. In that case we want skip
+ # the rest of this function so that we don't get extraneous () on the
+ # outside.
+ if (
+ isinstance(marker, list)
+ and len(marker) == 1
+ and isinstance(marker[0], (list, tuple))
+ ):
+ return _format_marker(marker[0])
+
+ if isinstance(marker, list):
+ inner = (_format_marker(m, first=False) for m in marker)
+ if first:
+ return " ".join(inner)
+ else:
+ return "(" + " ".join(inner) + ")"
+ elif isinstance(marker, tuple):
+ return " ".join([m.serialize() for m in marker])
+ else:
+ return marker
+
+
+_operators = {
+ "in": lambda lhs, rhs: lhs in rhs,
+ "not in": lambda lhs, rhs: lhs not in rhs,
+ "<": operator.lt,
+ "<=": operator.le,
+ "==": operator.eq,
+ "!=": operator.ne,
+ ">=": operator.ge,
+ ">": operator.gt,
+} # type: Dict[str, Operator]
+
+
+def _eval_op(lhs, op, rhs):
+ # type: (str, Op, str) -> bool
+ try:
+ spec = Specifier("".join([op.serialize(), rhs]))
+ except InvalidSpecifier:
+ pass
+ else:
+ return spec.contains(lhs)
+
+ oper = _operators.get(op.serialize()) # type: Optional[Operator]
+ if oper is None:
+ raise UndefinedComparison(
+ "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs)
+ )
+
+ return oper(lhs, rhs)
+
+
+class Undefined(object):
+ pass
+
+
+_undefined = Undefined()
+
+
+def _get_env(environment, name):
+ # type: (Dict[str, str], str) -> str
+ value = environment.get(name, _undefined) # type: Union[str, Undefined]
+
+ if isinstance(value, Undefined):
+ raise UndefinedEnvironmentName(
+ "{0!r} does not exist in evaluation environment.".format(name)
+ )
+
+ return value
+
+
+def _evaluate_markers(markers, environment):
+ # type: (List[Any], Dict[str, str]) -> bool
+ groups = [[]] # type: List[List[bool]]
+
+ for marker in markers:
+ assert isinstance(marker, (list, tuple, string_types))
+
+ if isinstance(marker, list):
+ groups[-1].append(_evaluate_markers(marker, environment))
+ elif isinstance(marker, tuple):
+ lhs, op, rhs = marker
+
+ if isinstance(lhs, Variable):
+ lhs_value = _get_env(environment, lhs.value)
+ rhs_value = rhs.value
+ else:
+ lhs_value = lhs.value
+ rhs_value = _get_env(environment, rhs.value)
+
+ groups[-1].append(_eval_op(lhs_value, op, rhs_value))
+ else:
+ assert marker in ["and", "or"]
+ if marker == "or":
+ groups.append([])
+
+ return any(all(item) for item in groups)
+
+
+def format_full_version(info):
+ # type: (sys._version_info) -> str
+ version = "{0.major}.{0.minor}.{0.micro}".format(info)
+ kind = info.releaselevel
+ if kind != "final":
+ version += kind[0] + str(info.serial)
+ return version
+
+
+def default_environment():
+ # type: () -> Dict[str, str]
+ if hasattr(sys, "implementation"):
+ # Ignoring the `sys.implementation` reference for type checking due to
+ # mypy not liking that the attribute doesn't exist in Python 2.7 when
+ # run with the `--py27` flag.
+ iver = format_full_version(sys.implementation.version) # type: ignore
+ implementation_name = sys.implementation.name # type: ignore
+ else:
+ iver = "0"
+ implementation_name = ""
+
+ return {
+ "implementation_name": implementation_name,
+ "implementation_version": iver,
+ "os_name": os.name,
+ "platform_machine": platform.machine(),
+ "platform_release": platform.release(),
+ "platform_system": platform.system(),
+ "platform_version": platform.version(),
+ "python_full_version": platform.python_version(),
+ "platform_python_implementation": platform.python_implementation(),
+ "python_version": ".".join(platform.python_version_tuple()[:2]),
+ "sys_platform": sys.platform,
+ }
+
+
+class Marker(object):
+ def __init__(self, marker):
+ # type: (str) -> None
+ try:
+ self._markers = _coerce_parse_result(MARKER.parseString(marker))
+ except ParseException as e:
+ err_str = "Invalid marker: {0!r}, parse error at {1!r}".format(
+ marker, marker[e.loc : e.loc + 8]
+ )
+ raise InvalidMarker(err_str)
+
+ def __str__(self):
+ # type: () -> str
+ return _format_marker(self._markers)
+
+ def __repr__(self):
+ # type: () -> str
+ return "<Marker({0!r})>".format(str(self))
+
+ def evaluate(self, environment=None):
+ # type: (Optional[Dict[str, str]]) -> bool
+ """Evaluate a marker.
+
+ Return the boolean from evaluating the given marker against the
+ environment. environment is an optional argument to override all or
+ part of the determined environment.
+
+ The environment is determined from the current Python process.
+ """
+ current_environment = default_environment()
+ if environment is not None:
+ current_environment.update(environment)
+
+ return _evaluate_markers(self._markers, current_environment)
diff --git a/third_party/python/setuptools/setuptools/_vendor/packaging/requirements.py b/third_party/python/setuptools/setuptools/_vendor/packaging/requirements.py
new file mode 100644
index 0000000000..5d50c7d7e2
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_vendor/packaging/requirements.py
@@ -0,0 +1,145 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import string
+import re
+
+from setuptools.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException
+from setuptools.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
+from setuptools.extern.pyparsing import Literal as L # noqa
+from urllib import parse as urlparse
+
+from ._typing import TYPE_CHECKING
+from .markers import MARKER_EXPR, Marker
+from .specifiers import LegacySpecifier, Specifier, SpecifierSet
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import List
+
+
+class InvalidRequirement(ValueError):
+ """
+ An invalid requirement was found, users should refer to PEP 508.
+ """
+
+
+ALPHANUM = Word(string.ascii_letters + string.digits)
+
+LBRACKET = L("[").suppress()
+RBRACKET = L("]").suppress()
+LPAREN = L("(").suppress()
+RPAREN = L(")").suppress()
+COMMA = L(",").suppress()
+SEMICOLON = L(";").suppress()
+AT = L("@").suppress()
+
+PUNCTUATION = Word("-_.")
+IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
+IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
+
+NAME = IDENTIFIER("name")
+EXTRA = IDENTIFIER
+
+URI = Regex(r"[^ ]+")("url")
+URL = AT + URI
+
+EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
+EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
+
+VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
+VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
+
+VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
+VERSION_MANY = Combine(
+ VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False
+)("_raw_spec")
+_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
+_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "")
+
+VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
+VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
+
+MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
+MARKER_EXPR.setParseAction(
+ lambda s, l, t: Marker(s[t._original_start : t._original_end])
+)
+MARKER_SEPARATOR = SEMICOLON
+MARKER = MARKER_SEPARATOR + MARKER_EXPR
+
+VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
+URL_AND_MARKER = URL + Optional(MARKER)
+
+NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
+
+REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
+# setuptools.extern.pyparsing isn't thread safe during initialization, so we do it eagerly, see
+# issue #104
+REQUIREMENT.parseString("x[]")
+
+
+class Requirement(object):
+ """Parse a requirement.
+
+ Parse a given requirement string into its parts, such as name, specifier,
+ URL, and extras. Raises InvalidRequirement on a badly-formed requirement
+ string.
+ """
+
+ # TODO: Can we test whether something is contained within a requirement?
+ # If so how do we do that? Do we need to test against the _name_ of
+ # the thing as well as the version? What about the markers?
+ # TODO: Can we normalize the name and extra name?
+
+ def __init__(self, requirement_string):
+ # type: (str) -> None
+ try:
+ req = REQUIREMENT.parseString(requirement_string)
+ except ParseException as e:
+ raise InvalidRequirement(
+ 'Parse error at "{0!r}": {1}'.format(
+ requirement_string[e.loc : e.loc + 8], e.msg
+ )
+ )
+
+ self.name = req.name
+ if req.url:
+ parsed_url = urlparse.urlparse(req.url)
+ if parsed_url.scheme == "file":
+ if urlparse.urlunparse(parsed_url) != req.url:
+ raise InvalidRequirement("Invalid URL given")
+ elif not (parsed_url.scheme and parsed_url.netloc) or (
+ not parsed_url.scheme and not parsed_url.netloc
+ ):
+ raise InvalidRequirement("Invalid URL: {0}".format(req.url))
+ self.url = req.url
+ else:
+ self.url = None
+ self.extras = set(req.extras.asList() if req.extras else [])
+ self.specifier = SpecifierSet(req.specifier)
+ self.marker = req.marker if req.marker else None
+
+ def __str__(self):
+ # type: () -> str
+ parts = [self.name] # type: List[str]
+
+ if self.extras:
+ parts.append("[{0}]".format(",".join(sorted(self.extras))))
+
+ if self.specifier:
+ parts.append(str(self.specifier))
+
+ if self.url:
+ parts.append("@ {0}".format(self.url))
+ if self.marker:
+ parts.append(" ")
+
+ if self.marker:
+ parts.append("; {0}".format(self.marker))
+
+ return "".join(parts)
+
+ def __repr__(self):
+ # type: () -> str
+ return "<Requirement({0!r})>".format(str(self))
diff --git a/third_party/python/setuptools/setuptools/_vendor/packaging/specifiers.py b/third_party/python/setuptools/setuptools/_vendor/packaging/specifiers.py
new file mode 100644
index 0000000000..fe09bb1dbb
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_vendor/packaging/specifiers.py
@@ -0,0 +1,863 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import abc
+import functools
+import itertools
+import re
+
+from ._compat import string_types, with_metaclass
+from ._typing import TYPE_CHECKING
+from .utils import canonicalize_version
+from .version import Version, LegacyVersion, parse
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import (
+ List,
+ Dict,
+ Union,
+ Iterable,
+ Iterator,
+ Optional,
+ Callable,
+ Tuple,
+ FrozenSet,
+ )
+
+ ParsedVersion = Union[Version, LegacyVersion]
+ UnparsedVersion = Union[Version, LegacyVersion, str]
+ CallableOperator = Callable[[ParsedVersion, str], bool]
+
+
+class InvalidSpecifier(ValueError):
+ """
+ An invalid specifier was found, users should refer to PEP 440.
+ """
+
+
+class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): # type: ignore
+ @abc.abstractmethod
+ def __str__(self):
+ # type: () -> str
+ """
+ Returns the str representation of this Specifier like object. This
+ should be representative of the Specifier itself.
+ """
+
+ @abc.abstractmethod
+ def __hash__(self):
+ # type: () -> int
+ """
+ Returns a hash value for this Specifier like object.
+ """
+
+ @abc.abstractmethod
+ def __eq__(self, other):
+ # type: (object) -> bool
+ """
+ Returns a boolean representing whether or not the two Specifier like
+ objects are equal.
+ """
+
+ @abc.abstractmethod
+ def __ne__(self, other):
+ # type: (object) -> bool
+ """
+ Returns a boolean representing whether or not the two Specifier like
+ objects are not equal.
+ """
+
+ @abc.abstractproperty
+ def prereleases(self):
+ # type: () -> Optional[bool]
+ """
+ Returns whether or not pre-releases as a whole are allowed by this
+ specifier.
+ """
+
+ @prereleases.setter
+ def prereleases(self, value):
+ # type: (bool) -> None
+ """
+ Sets whether or not pre-releases as a whole are allowed by this
+ specifier.
+ """
+
+ @abc.abstractmethod
+ def contains(self, item, prereleases=None):
+ # type: (str, Optional[bool]) -> bool
+ """
+ Determines if the given item is contained within this specifier.
+ """
+
+ @abc.abstractmethod
+ def filter(self, iterable, prereleases=None):
+ # type: (Iterable[UnparsedVersion], Optional[bool]) -> Iterable[UnparsedVersion]
+ """
+ Takes an iterable of items and filters them so that only items which
+ are contained within this specifier are allowed in it.
+ """
+
+
+class _IndividualSpecifier(BaseSpecifier):
+
+ _operators = {} # type: Dict[str, str]
+
+ def __init__(self, spec="", prereleases=None):
+ # type: (str, Optional[bool]) -> None
+ match = self._regex.search(spec)
+ if not match:
+ raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
+
+ self._spec = (
+ match.group("operator").strip(),
+ match.group("version").strip(),
+ ) # type: Tuple[str, str]
+
+ # Store whether or not this Specifier should accept prereleases
+ self._prereleases = prereleases
+
+ def __repr__(self):
+ # type: () -> str
+ pre = (
+ ", prereleases={0!r}".format(self.prereleases)
+ if self._prereleases is not None
+ else ""
+ )
+
+ return "<{0}({1!r}{2})>".format(self.__class__.__name__, str(self), pre)
+
+ def __str__(self):
+ # type: () -> str
+ return "{0}{1}".format(*self._spec)
+
+ @property
+ def _canonical_spec(self):
+ # type: () -> Tuple[str, Union[Version, str]]
+ return self._spec[0], canonicalize_version(self._spec[1])
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(self._canonical_spec)
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ if isinstance(other, string_types):
+ try:
+ other = self.__class__(str(other))
+ except InvalidSpecifier:
+ return NotImplemented
+ elif not isinstance(other, self.__class__):
+ return NotImplemented
+
+ return self._canonical_spec == other._canonical_spec
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ if isinstance(other, string_types):
+ try:
+ other = self.__class__(str(other))
+ except InvalidSpecifier:
+ return NotImplemented
+ elif not isinstance(other, self.__class__):
+ return NotImplemented
+
+ return self._spec != other._spec
+
+ def _get_operator(self, op):
+ # type: (str) -> CallableOperator
+ operator_callable = getattr(
+ self, "_compare_{0}".format(self._operators[op])
+ ) # type: CallableOperator
+ return operator_callable
+
+ def _coerce_version(self, version):
+ # type: (UnparsedVersion) -> ParsedVersion
+ if not isinstance(version, (LegacyVersion, Version)):
+ version = parse(version)
+ return version
+
+ @property
+ def operator(self):
+ # type: () -> str
+ return self._spec[0]
+
+ @property
+ def version(self):
+ # type: () -> str
+ return self._spec[1]
+
+ @property
+ def prereleases(self):
+ # type: () -> Optional[bool]
+ return self._prereleases
+
+ @prereleases.setter
+ def prereleases(self, value):
+ # type: (bool) -> None
+ self._prereleases = value
+
+ def __contains__(self, item):
+ # type: (str) -> bool
+ return self.contains(item)
+
+ def contains(self, item, prereleases=None):
+ # type: (UnparsedVersion, Optional[bool]) -> bool
+
+ # Determine if prereleases are to be allowed or not.
+ if prereleases is None:
+ prereleases = self.prereleases
+
+ # Normalize item to a Version or LegacyVersion, this allows us to have
+ # a shortcut for ``"2.0" in Specifier(">=2")
+ normalized_item = self._coerce_version(item)
+
+ # Determine if we should be supporting prereleases in this specifier
+ # or not, if we do not support prereleases than we can short circuit
+ # logic if this version is a prereleases.
+ if normalized_item.is_prerelease and not prereleases:
+ return False
+
+ # Actually do the comparison to determine if this item is contained
+ # within this Specifier or not.
+ operator_callable = self._get_operator(self.operator) # type: CallableOperator
+ return operator_callable(normalized_item, self.version)
+
+ def filter(self, iterable, prereleases=None):
+ # type: (Iterable[UnparsedVersion], Optional[bool]) -> Iterable[UnparsedVersion]
+
+ yielded = False
+ found_prereleases = []
+
+ kw = {"prereleases": prereleases if prereleases is not None else True}
+
+ # Attempt to iterate over all the values in the iterable and if any of
+ # them match, yield them.
+ for version in iterable:
+ parsed_version = self._coerce_version(version)
+
+ if self.contains(parsed_version, **kw):
+ # If our version is a prerelease, and we were not set to allow
+ # prereleases, then we'll store it for later incase nothing
+ # else matches this specifier.
+ if parsed_version.is_prerelease and not (
+ prereleases or self.prereleases
+ ):
+ found_prereleases.append(version)
+ # Either this is not a prerelease, or we should have been
+ # accepting prereleases from the beginning.
+ else:
+ yielded = True
+ yield version
+
+ # Now that we've iterated over everything, determine if we've yielded
+ # any values, and if we have not and we have any prereleases stored up
+ # then we will go ahead and yield the prereleases.
+ if not yielded and found_prereleases:
+ for version in found_prereleases:
+ yield version
+
+
+class LegacySpecifier(_IndividualSpecifier):
+
+ _regex_str = r"""
+ (?P<operator>(==|!=|<=|>=|<|>))
+ \s*
+ (?P<version>
+ [^,;\s)]* # Since this is a "legacy" specifier, and the version
+ # string can be just about anything, we match everything
+ # except for whitespace, a semi-colon for marker support,
+ # a closing paren since versions can be enclosed in
+ # them, and a comma since it's a version separator.
+ )
+ """
+
+ _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ _operators = {
+ "==": "equal",
+ "!=": "not_equal",
+ "<=": "less_than_equal",
+ ">=": "greater_than_equal",
+ "<": "less_than",
+ ">": "greater_than",
+ }
+
+ def _coerce_version(self, version):
+ # type: (Union[ParsedVersion, str]) -> LegacyVersion
+ if not isinstance(version, LegacyVersion):
+ version = LegacyVersion(str(version))
+ return version
+
+ def _compare_equal(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective == self._coerce_version(spec)
+
+ def _compare_not_equal(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective != self._coerce_version(spec)
+
+ def _compare_less_than_equal(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective <= self._coerce_version(spec)
+
+ def _compare_greater_than_equal(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective >= self._coerce_version(spec)
+
+ def _compare_less_than(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective < self._coerce_version(spec)
+
+ def _compare_greater_than(self, prospective, spec):
+ # type: (LegacyVersion, str) -> bool
+ return prospective > self._coerce_version(spec)
+
+
+def _require_version_compare(
+ fn # type: (Callable[[Specifier, ParsedVersion, str], bool])
+):
+ # type: (...) -> Callable[[Specifier, ParsedVersion, str], bool]
+ @functools.wraps(fn)
+ def wrapped(self, prospective, spec):
+ # type: (Specifier, ParsedVersion, str) -> bool
+ if not isinstance(prospective, Version):
+ return False
+ return fn(self, prospective, spec)
+
+ return wrapped
+
+
+class Specifier(_IndividualSpecifier):
+
+ _regex_str = r"""
+ (?P<operator>(~=|==|!=|<=|>=|<|>|===))
+ (?P<version>
+ (?:
+ # The identity operators allow for an escape hatch that will
+ # do an exact string match of the version you wish to install.
+ # This will not be parsed by PEP 440 and we cannot determine
+ # any semantic meaning from it. This operator is discouraged
+ # but included entirely as an escape hatch.
+ (?<====) # Only match for the identity operator
+ \s*
+ [^\s]* # We just match everything, except for whitespace
+ # since we are only testing for strict identity.
+ )
+ |
+ (?:
+ # The (non)equality operators allow for wild card and local
+ # versions to be specified so we have to define these two
+ # operators separately to enable that.
+ (?<===|!=) # Only match for equals and not equals
+
+ \s*
+ v?
+ (?:[0-9]+!)? # epoch
+ [0-9]+(?:\.[0-9]+)* # release
+ (?: # pre release
+ [-_\.]?
+ (a|b|c|rc|alpha|beta|pre|preview)
+ [-_\.]?
+ [0-9]*
+ )?
+ (?: # post release
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+ )?
+
+ # You cannot use a wild card and a dev or local version
+ # together so group them with a | and make them optional.
+ (?:
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
+ (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
+ |
+ \.\* # Wild card syntax of .*
+ )?
+ )
+ |
+ (?:
+ # The compatible operator requires at least two digits in the
+ # release segment.
+ (?<=~=) # Only match for the compatible operator
+
+ \s*
+ v?
+ (?:[0-9]+!)? # epoch
+ [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
+ (?: # pre release
+ [-_\.]?
+ (a|b|c|rc|alpha|beta|pre|preview)
+ [-_\.]?
+ [0-9]*
+ )?
+ (?: # post release
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+ )?
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
+ )
+ |
+ (?:
+ # All other operators only allow a sub set of what the
+ # (non)equality operators do. Specifically they do not allow
+ # local versions to be specified nor do they allow the prefix
+ # matching wild cards.
+ (?<!==|!=|~=) # We have special cases for these
+ # operators so we want to make sure they
+ # don't match here.
+
+ \s*
+ v?
+ (?:[0-9]+!)? # epoch
+ [0-9]+(?:\.[0-9]+)* # release
+ (?: # pre release
+ [-_\.]?
+ (a|b|c|rc|alpha|beta|pre|preview)
+ [-_\.]?
+ [0-9]*
+ )?
+ (?: # post release
+ (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
+ )?
+ (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
+ )
+ )
+ """
+
+ _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ _operators = {
+ "~=": "compatible",
+ "==": "equal",
+ "!=": "not_equal",
+ "<=": "less_than_equal",
+ ">=": "greater_than_equal",
+ "<": "less_than",
+ ">": "greater_than",
+ "===": "arbitrary",
+ }
+
+ @_require_version_compare
+ def _compare_compatible(self, prospective, spec):
+ # type: (ParsedVersion, str) -> bool
+
+ # Compatible releases have an equivalent combination of >= and ==. That
+ # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
+ # implement this in terms of the other specifiers instead of
+ # implementing it ourselves. The only thing we need to do is construct
+ # the other specifiers.
+
+ # We want everything but the last item in the version, but we want to
+ # ignore post and dev releases and we want to treat the pre-release as
+ # it's own separate segment.
+ prefix = ".".join(
+ list(
+ itertools.takewhile(
+ lambda x: (not x.startswith("post") and not x.startswith("dev")),
+ _version_split(spec),
+ )
+ )[:-1]
+ )
+
+ # Add the prefix notation to the end of our string
+ prefix += ".*"
+
+ return self._get_operator(">=")(prospective, spec) and self._get_operator("==")(
+ prospective, prefix
+ )
+
+ @_require_version_compare
+ def _compare_equal(self, prospective, spec):
+ # type: (ParsedVersion, str) -> bool
+
+ # We need special logic to handle prefix matching
+ if spec.endswith(".*"):
+ # In the case of prefix matching we want to ignore local segment.
+ prospective = Version(prospective.public)
+ # Split the spec out by dots, and pretend that there is an implicit
+ # dot in between a release segment and a pre-release segment.
+ split_spec = _version_split(spec[:-2]) # Remove the trailing .*
+
+ # Split the prospective version out by dots, and pretend that there
+ # is an implicit dot in between a release segment and a pre-release
+ # segment.
+ split_prospective = _version_split(str(prospective))
+
+ # Shorten the prospective version to be the same length as the spec
+ # so that we can determine if the specifier is a prefix of the
+ # prospective version or not.
+ shortened_prospective = split_prospective[: len(split_spec)]
+
+ # Pad out our two sides with zeros so that they both equal the same
+ # length.
+ padded_spec, padded_prospective = _pad_version(
+ split_spec, shortened_prospective
+ )
+
+ return padded_prospective == padded_spec
+ else:
+ # Convert our spec string into a Version
+ spec_version = Version(spec)
+
+ # If the specifier does not have a local segment, then we want to
+ # act as if the prospective version also does not have a local
+ # segment.
+ if not spec_version.local:
+ prospective = Version(prospective.public)
+
+ return prospective == spec_version
+
+ @_require_version_compare
+ def _compare_not_equal(self, prospective, spec):
+ # type: (ParsedVersion, str) -> bool
+ return not self._compare_equal(prospective, spec)
+
+ @_require_version_compare
+ def _compare_less_than_equal(self, prospective, spec):
+ # type: (ParsedVersion, str) -> bool
+
+ # NB: Local version identifiers are NOT permitted in the version
+ # specifier, so local version labels can be universally removed from
+ # the prospective version.
+ return Version(prospective.public) <= Version(spec)
+
+ @_require_version_compare
+ def _compare_greater_than_equal(self, prospective, spec):
+ # type: (ParsedVersion, str) -> bool
+
+ # NB: Local version identifiers are NOT permitted in the version
+ # specifier, so local version labels can be universally removed from
+ # the prospective version.
+ return Version(prospective.public) >= Version(spec)
+
+ @_require_version_compare
+ def _compare_less_than(self, prospective, spec_str):
+ # type: (ParsedVersion, str) -> bool
+
+ # Convert our spec to a Version instance, since we'll want to work with
+ # it as a version.
+ spec = Version(spec_str)
+
+ # Check to see if the prospective version is less than the spec
+ # version. If it's not we can short circuit and just return False now
+ # instead of doing extra unneeded work.
+ if not prospective < spec:
+ return False
+
+ # This special case is here so that, unless the specifier itself
+ # includes is a pre-release version, that we do not accept pre-release
+ # versions for the version mentioned in the specifier (e.g. <3.1 should
+ # not match 3.1.dev0, but should match 3.0.dev0).
+ if not spec.is_prerelease and prospective.is_prerelease:
+ if Version(prospective.base_version) == Version(spec.base_version):
+ return False
+
+ # If we've gotten to here, it means that prospective version is both
+ # less than the spec version *and* it's not a pre-release of the same
+ # version in the spec.
+ return True
+
+ @_require_version_compare
+ def _compare_greater_than(self, prospective, spec_str):
+ # type: (ParsedVersion, str) -> bool
+
+ # Convert our spec to a Version instance, since we'll want to work with
+ # it as a version.
+ spec = Version(spec_str)
+
+ # Check to see if the prospective version is greater than the spec
+ # version. If it's not we can short circuit and just return False now
+ # instead of doing extra unneeded work.
+ if not prospective > spec:
+ return False
+
+ # This special case is here so that, unless the specifier itself
+ # includes is a post-release version, that we do not accept
+ # post-release versions for the version mentioned in the specifier
+ # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
+ if not spec.is_postrelease and prospective.is_postrelease:
+ if Version(prospective.base_version) == Version(spec.base_version):
+ return False
+
+ # Ensure that we do not allow a local version of the version mentioned
+ # in the specifier, which is technically greater than, to match.
+ if prospective.local is not None:
+ if Version(prospective.base_version) == Version(spec.base_version):
+ return False
+
+ # If we've gotten to here, it means that prospective version is both
+ # greater than the spec version *and* it's not a pre-release of the
+ # same version in the spec.
+ return True
+
+ def _compare_arbitrary(self, prospective, spec):
+ # type: (Version, str) -> bool
+ return str(prospective).lower() == str(spec).lower()
+
+ @property
+ def prereleases(self):
+ # type: () -> bool
+
+ # If there is an explicit prereleases set for this, then we'll just
+ # blindly use that.
+ if self._prereleases is not None:
+ return self._prereleases
+
+ # Look at all of our specifiers and determine if they are inclusive
+ # operators, and if they are if they are including an explicit
+ # prerelease.
+ operator, version = self._spec
+ if operator in ["==", ">=", "<=", "~=", "==="]:
+ # The == specifier can include a trailing .*, if it does we
+ # want to remove before parsing.
+ if operator == "==" and version.endswith(".*"):
+ version = version[:-2]
+
+ # Parse the version, and if it is a pre-release than this
+ # specifier allows pre-releases.
+ if parse(version).is_prerelease:
+ return True
+
+ return False
+
+ @prereleases.setter
+ def prereleases(self, value):
+ # type: (bool) -> None
+ self._prereleases = value
+
+
+_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
+
+
+def _version_split(version):
+ # type: (str) -> List[str]
+ result = [] # type: List[str]
+ for item in version.split("."):
+ match = _prefix_regex.search(item)
+ if match:
+ result.extend(match.groups())
+ else:
+ result.append(item)
+ return result
+
+
+def _pad_version(left, right):
+ # type: (List[str], List[str]) -> Tuple[List[str], List[str]]
+ left_split, right_split = [], []
+
+ # Get the release segment of our versions
+ left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
+ right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
+
+ # Get the rest of our versions
+ left_split.append(left[len(left_split[0]) :])
+ right_split.append(right[len(right_split[0]) :])
+
+ # Insert our padding
+ left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0])))
+ right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0])))
+
+ return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split)))
+
+
+class SpecifierSet(BaseSpecifier):
+ def __init__(self, specifiers="", prereleases=None):
+ # type: (str, Optional[bool]) -> None
+
+ # Split on , to break each individual specifier into it's own item, and
+ # strip each item to remove leading/trailing whitespace.
+ split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
+
+ # Parsed each individual specifier, attempting first to make it a
+ # Specifier and falling back to a LegacySpecifier.
+ parsed = set()
+ for specifier in split_specifiers:
+ try:
+ parsed.add(Specifier(specifier))
+ except InvalidSpecifier:
+ parsed.add(LegacySpecifier(specifier))
+
+ # Turn our parsed specifiers into a frozen set and save them for later.
+ self._specs = frozenset(parsed)
+
+ # Store our prereleases value so we can use it later to determine if
+ # we accept prereleases or not.
+ self._prereleases = prereleases
+
+ def __repr__(self):
+ # type: () -> str
+ pre = (
+ ", prereleases={0!r}".format(self.prereleases)
+ if self._prereleases is not None
+ else ""
+ )
+
+ return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
+
+ def __str__(self):
+ # type: () -> str
+ return ",".join(sorted(str(s) for s in self._specs))
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(self._specs)
+
+ def __and__(self, other):
+ # type: (Union[SpecifierSet, str]) -> SpecifierSet
+ if isinstance(other, string_types):
+ other = SpecifierSet(other)
+ elif not isinstance(other, SpecifierSet):
+ return NotImplemented
+
+ specifier = SpecifierSet()
+ specifier._specs = frozenset(self._specs | other._specs)
+
+ if self._prereleases is None and other._prereleases is not None:
+ specifier._prereleases = other._prereleases
+ elif self._prereleases is not None and other._prereleases is None:
+ specifier._prereleases = self._prereleases
+ elif self._prereleases == other._prereleases:
+ specifier._prereleases = self._prereleases
+ else:
+ raise ValueError(
+ "Cannot combine SpecifierSets with True and False prerelease "
+ "overrides."
+ )
+
+ return specifier
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ if isinstance(other, (string_types, _IndividualSpecifier)):
+ other = SpecifierSet(str(other))
+ elif not isinstance(other, SpecifierSet):
+ return NotImplemented
+
+ return self._specs == other._specs
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ if isinstance(other, (string_types, _IndividualSpecifier)):
+ other = SpecifierSet(str(other))
+ elif not isinstance(other, SpecifierSet):
+ return NotImplemented
+
+ return self._specs != other._specs
+
+ def __len__(self):
+ # type: () -> int
+ return len(self._specs)
+
+ def __iter__(self):
+ # type: () -> Iterator[FrozenSet[_IndividualSpecifier]]
+ return iter(self._specs)
+
+ @property
+ def prereleases(self):
+ # type: () -> Optional[bool]
+
+ # If we have been given an explicit prerelease modifier, then we'll
+ # pass that through here.
+ if self._prereleases is not None:
+ return self._prereleases
+
+ # If we don't have any specifiers, and we don't have a forced value,
+ # then we'll just return None since we don't know if this should have
+ # pre-releases or not.
+ if not self._specs:
+ return None
+
+ # Otherwise we'll see if any of the given specifiers accept
+ # prereleases, if any of them do we'll return True, otherwise False.
+ return any(s.prereleases for s in self._specs)
+
+ @prereleases.setter
+ def prereleases(self, value):
+ # type: (bool) -> None
+ self._prereleases = value
+
+ def __contains__(self, item):
+ # type: (Union[ParsedVersion, str]) -> bool
+ return self.contains(item)
+
+ def contains(self, item, prereleases=None):
+ # type: (Union[ParsedVersion, str], Optional[bool]) -> bool
+
+ # Ensure that our item is a Version or LegacyVersion instance.
+ if not isinstance(item, (LegacyVersion, Version)):
+ item = parse(item)
+
+ # Determine if we're forcing a prerelease or not, if we're not forcing
+ # one for this particular filter call, then we'll use whatever the
+ # SpecifierSet thinks for whether or not we should support prereleases.
+ if prereleases is None:
+ prereleases = self.prereleases
+
+ # We can determine if we're going to allow pre-releases by looking to
+ # see if any of the underlying items supports them. If none of them do
+ # and this item is a pre-release then we do not allow it and we can
+ # short circuit that here.
+ # Note: This means that 1.0.dev1 would not be contained in something
+ # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
+ if not prereleases and item.is_prerelease:
+ return False
+
+ # We simply dispatch to the underlying specs here to make sure that the
+ # given version is contained within all of them.
+ # Note: This use of all() here means that an empty set of specifiers
+ # will always return True, this is an explicit design decision.
+ return all(s.contains(item, prereleases=prereleases) for s in self._specs)
+
+ def filter(
+ self,
+ iterable, # type: Iterable[Union[ParsedVersion, str]]
+ prereleases=None, # type: Optional[bool]
+ ):
+ # type: (...) -> Iterable[Union[ParsedVersion, str]]
+
+ # Determine if we're forcing a prerelease or not, if we're not forcing
+ # one for this particular filter call, then we'll use whatever the
+ # SpecifierSet thinks for whether or not we should support prereleases.
+ if prereleases is None:
+ prereleases = self.prereleases
+
+ # If we have any specifiers, then we want to wrap our iterable in the
+ # filter method for each one, this will act as a logical AND amongst
+ # each specifier.
+ if self._specs:
+ for spec in self._specs:
+ iterable = spec.filter(iterable, prereleases=bool(prereleases))
+ return iterable
+ # If we do not have any specifiers, then we need to have a rough filter
+ # which will filter out any pre-releases, unless there are no final
+ # releases, and which will filter out LegacyVersion in general.
+ else:
+ filtered = [] # type: List[Union[ParsedVersion, str]]
+ found_prereleases = [] # type: List[Union[ParsedVersion, str]]
+
+ for item in iterable:
+ # Ensure that we some kind of Version class for this item.
+ if not isinstance(item, (LegacyVersion, Version)):
+ parsed_version = parse(item)
+ else:
+ parsed_version = item
+
+ # Filter out any item which is parsed as a LegacyVersion
+ if isinstance(parsed_version, LegacyVersion):
+ continue
+
+ # Store any item which is a pre-release for later unless we've
+ # already found a final version or we are accepting prereleases
+ if parsed_version.is_prerelease and not prereleases:
+ if not filtered:
+ found_prereleases.append(item)
+ else:
+ filtered.append(item)
+
+ # If we've found no items except for pre-releases, then we'll go
+ # ahead and use the pre-releases
+ if not filtered and found_prereleases and prereleases is None:
+ return found_prereleases
+
+ return filtered
diff --git a/third_party/python/setuptools/setuptools/_vendor/packaging/tags.py b/third_party/python/setuptools/setuptools/_vendor/packaging/tags.py
new file mode 100644
index 0000000000..9064910b8b
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_vendor/packaging/tags.py
@@ -0,0 +1,751 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+from __future__ import absolute_import
+
+import distutils.util
+
+try:
+ from importlib.machinery import EXTENSION_SUFFIXES
+except ImportError: # pragma: no cover
+ import imp
+
+ EXTENSION_SUFFIXES = [x[0] for x in imp.get_suffixes()]
+ del imp
+import logging
+import os
+import platform
+import re
+import struct
+import sys
+import sysconfig
+import warnings
+
+from ._typing import TYPE_CHECKING, cast
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import (
+ Dict,
+ FrozenSet,
+ IO,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+ )
+
+ PythonVersion = Sequence[int]
+ MacVersion = Tuple[int, int]
+ GlibcVersion = Tuple[int, int]
+
+
+logger = logging.getLogger(__name__)
+
+INTERPRETER_SHORT_NAMES = {
+ "python": "py", # Generic.
+ "cpython": "cp",
+ "pypy": "pp",
+ "ironpython": "ip",
+ "jython": "jy",
+} # type: Dict[str, str]
+
+
+_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
+
+
+class Tag(object):
+ """
+ A representation of the tag triple for a wheel.
+
+ Instances are considered immutable and thus are hashable. Equality checking
+ is also supported.
+ """
+
+ __slots__ = ["_interpreter", "_abi", "_platform"]
+
+ def __init__(self, interpreter, abi, platform):
+ # type: (str, str, str) -> None
+ self._interpreter = interpreter.lower()
+ self._abi = abi.lower()
+ self._platform = platform.lower()
+
+ @property
+ def interpreter(self):
+ # type: () -> str
+ return self._interpreter
+
+ @property
+ def abi(self):
+ # type: () -> str
+ return self._abi
+
+ @property
+ def platform(self):
+ # type: () -> str
+ return self._platform
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ if not isinstance(other, Tag):
+ return NotImplemented
+
+ return (
+ (self.platform == other.platform)
+ and (self.abi == other.abi)
+ and (self.interpreter == other.interpreter)
+ )
+
+ def __hash__(self):
+ # type: () -> int
+ return hash((self._interpreter, self._abi, self._platform))
+
+ def __str__(self):
+ # type: () -> str
+ return "{}-{}-{}".format(self._interpreter, self._abi, self._platform)
+
+ def __repr__(self):
+ # type: () -> str
+ return "<{self} @ {self_id}>".format(self=self, self_id=id(self))
+
+
+def parse_tag(tag):
+ # type: (str) -> FrozenSet[Tag]
+ """
+ Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
+
+ Returning a set is required due to the possibility that the tag is a
+ compressed tag set.
+ """
+ tags = set()
+ interpreters, abis, platforms = tag.split("-")
+ for interpreter in interpreters.split("."):
+ for abi in abis.split("."):
+ for platform_ in platforms.split("."):
+ tags.add(Tag(interpreter, abi, platform_))
+ return frozenset(tags)
+
+
+def _warn_keyword_parameter(func_name, kwargs):
+ # type: (str, Dict[str, bool]) -> bool
+ """
+ Backwards-compatibility with Python 2.7 to allow treating 'warn' as keyword-only.
+ """
+ if not kwargs:
+ return False
+ elif len(kwargs) > 1 or "warn" not in kwargs:
+ kwargs.pop("warn", None)
+ arg = next(iter(kwargs.keys()))
+ raise TypeError(
+ "{}() got an unexpected keyword argument {!r}".format(func_name, arg)
+ )
+ return kwargs["warn"]
+
+
+def _get_config_var(name, warn=False):
+ # type: (str, bool) -> Union[int, str, None]
+ value = sysconfig.get_config_var(name)
+ if value is None and warn:
+ logger.debug(
+ "Config variable '%s' is unset, Python ABI tag may be incorrect", name
+ )
+ return value
+
+
+def _normalize_string(string):
+ # type: (str) -> str
+ return string.replace(".", "_").replace("-", "_")
+
+
+def _abi3_applies(python_version):
+ # type: (PythonVersion) -> bool
+ """
+ Determine if the Python version supports abi3.
+
+ PEP 384 was first implemented in Python 3.2.
+ """
+ return len(python_version) > 1 and tuple(python_version) >= (3, 2)
+
+
+def _cpython_abis(py_version, warn=False):
+ # type: (PythonVersion, bool) -> List[str]
+ py_version = tuple(py_version) # To allow for version comparison.
+ abis = []
+ version = _version_nodot(py_version[:2])
+ debug = pymalloc = ucs4 = ""
+ with_debug = _get_config_var("Py_DEBUG", warn)
+ has_refcount = hasattr(sys, "gettotalrefcount")
+ # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
+ # extension modules is the best option.
+ # https://github.com/pypa/pip/issues/3383#issuecomment-173267692
+ has_ext = "_d.pyd" in EXTENSION_SUFFIXES
+ if with_debug or (with_debug is None and (has_refcount or has_ext)):
+ debug = "d"
+ if py_version < (3, 8):
+ with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
+ if with_pymalloc or with_pymalloc is None:
+ pymalloc = "m"
+ if py_version < (3, 3):
+ unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
+ if unicode_size == 4 or (
+ unicode_size is None and sys.maxunicode == 0x10FFFF
+ ):
+ ucs4 = "u"
+ elif debug:
+ # Debug builds can also load "normal" extension modules.
+ # We can also assume no UCS-4 or pymalloc requirement.
+ abis.append("cp{version}".format(version=version))
+ abis.insert(
+ 0,
+ "cp{version}{debug}{pymalloc}{ucs4}".format(
+ version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
+ ),
+ )
+ return abis
+
+
+def cpython_tags(
+ python_version=None, # type: Optional[PythonVersion]
+ abis=None, # type: Optional[Iterable[str]]
+ platforms=None, # type: Optional[Iterable[str]]
+ **kwargs # type: bool
+):
+ # type: (...) -> Iterator[Tag]
+ """
+ Yields the tags for a CPython interpreter.
+
+ The tags consist of:
+ - cp<python_version>-<abi>-<platform>
+ - cp<python_version>-abi3-<platform>
+ - cp<python_version>-none-<platform>
+ - cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
+
+ If python_version only specifies a major version then user-provided ABIs and
+ the 'none' ABItag will be used.
+
+ If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
+ their normal position and not at the beginning.
+ """
+ warn = _warn_keyword_parameter("cpython_tags", kwargs)
+ if not python_version:
+ python_version = sys.version_info[:2]
+
+ interpreter = "cp{}".format(_version_nodot(python_version[:2]))
+
+ if abis is None:
+ if len(python_version) > 1:
+ abis = _cpython_abis(python_version, warn)
+ else:
+ abis = []
+ abis = list(abis)
+ # 'abi3' and 'none' are explicitly handled later.
+ for explicit_abi in ("abi3", "none"):
+ try:
+ abis.remove(explicit_abi)
+ except ValueError:
+ pass
+
+ platforms = list(platforms or _platform_tags())
+ for abi in abis:
+ for platform_ in platforms:
+ yield Tag(interpreter, abi, platform_)
+ if _abi3_applies(python_version):
+ for tag in (Tag(interpreter, "abi3", platform_) for platform_ in platforms):
+ yield tag
+ for tag in (Tag(interpreter, "none", platform_) for platform_ in platforms):
+ yield tag
+
+ if _abi3_applies(python_version):
+ for minor_version in range(python_version[1] - 1, 1, -1):
+ for platform_ in platforms:
+ interpreter = "cp{version}".format(
+ version=_version_nodot((python_version[0], minor_version))
+ )
+ yield Tag(interpreter, "abi3", platform_)
+
+
+def _generic_abi():
+ # type: () -> Iterator[str]
+ abi = sysconfig.get_config_var("SOABI")
+ if abi:
+ yield _normalize_string(abi)
+
+
+def generic_tags(
+ interpreter=None, # type: Optional[str]
+ abis=None, # type: Optional[Iterable[str]]
+ platforms=None, # type: Optional[Iterable[str]]
+ **kwargs # type: bool
+):
+ # type: (...) -> Iterator[Tag]
+ """
+ Yields the tags for a generic interpreter.
+
+ The tags consist of:
+ - <interpreter>-<abi>-<platform>
+
+ The "none" ABI will be added if it was not explicitly provided.
+ """
+ warn = _warn_keyword_parameter("generic_tags", kwargs)
+ if not interpreter:
+ interp_name = interpreter_name()
+ interp_version = interpreter_version(warn=warn)
+ interpreter = "".join([interp_name, interp_version])
+ if abis is None:
+ abis = _generic_abi()
+ platforms = list(platforms or _platform_tags())
+ abis = list(abis)
+ if "none" not in abis:
+ abis.append("none")
+ for abi in abis:
+ for platform_ in platforms:
+ yield Tag(interpreter, abi, platform_)
+
+
+def _py_interpreter_range(py_version):
+ # type: (PythonVersion) -> Iterator[str]
+ """
+ Yields Python versions in descending order.
+
+ After the latest version, the major-only version will be yielded, and then
+ all previous versions of that major version.
+ """
+ if len(py_version) > 1:
+ yield "py{version}".format(version=_version_nodot(py_version[:2]))
+ yield "py{major}".format(major=py_version[0])
+ if len(py_version) > 1:
+ for minor in range(py_version[1] - 1, -1, -1):
+ yield "py{version}".format(version=_version_nodot((py_version[0], minor)))
+
+
+def compatible_tags(
+ python_version=None, # type: Optional[PythonVersion]
+ interpreter=None, # type: Optional[str]
+ platforms=None, # type: Optional[Iterable[str]]
+):
+ # type: (...) -> Iterator[Tag]
+ """
+ Yields the sequence of tags that are compatible with a specific version of Python.
+
+ The tags consist of:
+ - py*-none-<platform>
+ - <interpreter>-none-any # ... if `interpreter` is provided.
+ - py*-none-any
+ """
+ if not python_version:
+ python_version = sys.version_info[:2]
+ platforms = list(platforms or _platform_tags())
+ for version in _py_interpreter_range(python_version):
+ for platform_ in platforms:
+ yield Tag(version, "none", platform_)
+ if interpreter:
+ yield Tag(interpreter, "none", "any")
+ for version in _py_interpreter_range(python_version):
+ yield Tag(version, "none", "any")
+
+
+def _mac_arch(arch, is_32bit=_32_BIT_INTERPRETER):
+ # type: (str, bool) -> str
+ if not is_32bit:
+ return arch
+
+ if arch.startswith("ppc"):
+ return "ppc"
+
+ return "i386"
+
+
+def _mac_binary_formats(version, cpu_arch):
+ # type: (MacVersion, str) -> List[str]
+ formats = [cpu_arch]
+ if cpu_arch == "x86_64":
+ if version < (10, 4):
+ return []
+ formats.extend(["intel", "fat64", "fat32"])
+
+ elif cpu_arch == "i386":
+ if version < (10, 4):
+ return []
+ formats.extend(["intel", "fat32", "fat"])
+
+ elif cpu_arch == "ppc64":
+ # TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
+ if version > (10, 5) or version < (10, 4):
+ return []
+ formats.append("fat64")
+
+ elif cpu_arch == "ppc":
+ if version > (10, 6):
+ return []
+ formats.extend(["fat32", "fat"])
+
+ formats.append("universal")
+ return formats
+
+
+def mac_platforms(version=None, arch=None):
+ # type: (Optional[MacVersion], Optional[str]) -> Iterator[str]
+ """
+ Yields the platform tags for a macOS system.
+
+ The `version` parameter is a two-item tuple specifying the macOS version to
+ generate platform tags for. The `arch` parameter is the CPU architecture to
+ generate platform tags for. Both parameters default to the appropriate value
+ for the current system.
+ """
+ version_str, _, cpu_arch = platform.mac_ver() # type: ignore
+ if version is None:
+ version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
+ else:
+ version = version
+ if arch is None:
+ arch = _mac_arch(cpu_arch)
+ else:
+ arch = arch
+ for minor_version in range(version[1], -1, -1):
+ compat_version = version[0], minor_version
+ binary_formats = _mac_binary_formats(compat_version, arch)
+ for binary_format in binary_formats:
+ yield "macosx_{major}_{minor}_{binary_format}".format(
+ major=compat_version[0],
+ minor=compat_version[1],
+ binary_format=binary_format,
+ )
+
+
+# From PEP 513.
+def _is_manylinux_compatible(name, glibc_version):
+ # type: (str, GlibcVersion) -> bool
+ # Check for presence of _manylinux module.
+ try:
+ import _manylinux # noqa
+
+ return bool(getattr(_manylinux, name + "_compatible"))
+ except (ImportError, AttributeError):
+ # Fall through to heuristic check below.
+ pass
+
+ return _have_compatible_glibc(*glibc_version)
+
+
+def _glibc_version_string():
+ # type: () -> Optional[str]
+ # Returns glibc version string, or None if not using glibc.
+ return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
+
+
+def _glibc_version_string_confstr():
+ # type: () -> Optional[str]
+ """
+ Primary implementation of glibc_version_string using os.confstr.
+ """
+ # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
+ # to be broken or missing. This strategy is used in the standard library
+ # platform module.
+ # https://github.com/python/cpython/blob/fcf1d003bf4f0100c9d0921ff3d70e1127ca1b71/Lib/platform.py#L175-L183
+ try:
+ # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
+ version_string = os.confstr( # type: ignore[attr-defined] # noqa: F821
+ "CS_GNU_LIBC_VERSION"
+ )
+ assert version_string is not None
+ _, version = version_string.split() # type: Tuple[str, str]
+ except (AssertionError, AttributeError, OSError, ValueError):
+ # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
+ return None
+ return version
+
+
+def _glibc_version_string_ctypes():
+ # type: () -> Optional[str]
+ """
+ Fallback implementation of glibc_version_string using ctypes.
+ """
+ try:
+ import ctypes
+ except ImportError:
+ return None
+
+ # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
+ # manpage says, "If filename is NULL, then the returned handle is for the
+ # main program". This way we can let the linker do the work to figure out
+ # which libc our process is actually using.
+ #
+ # Note: typeshed is wrong here so we are ignoring this line.
+ process_namespace = ctypes.CDLL(None) # type: ignore
+ try:
+ gnu_get_libc_version = process_namespace.gnu_get_libc_version
+ except AttributeError:
+ # Symbol doesn't exist -> therefore, we are not linked to
+ # glibc.
+ return None
+
+ # Call gnu_get_libc_version, which returns a string like "2.5"
+ gnu_get_libc_version.restype = ctypes.c_char_p
+ version_str = gnu_get_libc_version() # type: str
+ # py2 / py3 compatibility:
+ if not isinstance(version_str, str):
+ version_str = version_str.decode("ascii")
+
+ return version_str
+
+
+# Separated out from have_compatible_glibc for easier unit testing.
+def _check_glibc_version(version_str, required_major, minimum_minor):
+ # type: (str, int, int) -> bool
+ # Parse string and check against requested version.
+ #
+ # We use a regexp instead of str.split because we want to discard any
+ # random junk that might come after the minor version -- this might happen
+ # in patched/forked versions of glibc (e.g. Linaro's version of glibc
+ # uses version strings like "2.20-2014.11"). See gh-3588.
+ m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
+ if not m:
+ warnings.warn(
+ "Expected glibc version with 2 components major.minor,"
+ " got: %s" % version_str,
+ RuntimeWarning,
+ )
+ return False
+ return (
+ int(m.group("major")) == required_major
+ and int(m.group("minor")) >= minimum_minor
+ )
+
+
+def _have_compatible_glibc(required_major, minimum_minor):
+ # type: (int, int) -> bool
+ version_str = _glibc_version_string()
+ if version_str is None:
+ return False
+ return _check_glibc_version(version_str, required_major, minimum_minor)
+
+
+# Python does not provide platform information at sufficient granularity to
+# identify the architecture of the running executable in some cases, so we
+# determine it dynamically by reading the information from the running
+# process. This only applies on Linux, which uses the ELF format.
+class _ELFFileHeader(object):
+ # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
+ class _InvalidELFFileHeader(ValueError):
+ """
+ An invalid ELF file header was found.
+ """
+
+ ELF_MAGIC_NUMBER = 0x7F454C46
+ ELFCLASS32 = 1
+ ELFCLASS64 = 2
+ ELFDATA2LSB = 1
+ ELFDATA2MSB = 2
+ EM_386 = 3
+ EM_S390 = 22
+ EM_ARM = 40
+ EM_X86_64 = 62
+ EF_ARM_ABIMASK = 0xFF000000
+ EF_ARM_ABI_VER5 = 0x05000000
+ EF_ARM_ABI_FLOAT_HARD = 0x00000400
+
+ def __init__(self, file):
+ # type: (IO[bytes]) -> None
+ def unpack(fmt):
+ # type: (str) -> int
+ try:
+ (result,) = struct.unpack(
+ fmt, file.read(struct.calcsize(fmt))
+ ) # type: (int, )
+ except struct.error:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ return result
+
+ self.e_ident_magic = unpack(">I")
+ if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_class = unpack("B")
+ if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_data = unpack("B")
+ if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_version = unpack("B")
+ self.e_ident_osabi = unpack("B")
+ self.e_ident_abiversion = unpack("B")
+ self.e_ident_pad = file.read(7)
+ format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
+ format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
+ format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
+ format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
+ self.e_type = unpack(format_h)
+ self.e_machine = unpack(format_h)
+ self.e_version = unpack(format_i)
+ self.e_entry = unpack(format_p)
+ self.e_phoff = unpack(format_p)
+ self.e_shoff = unpack(format_p)
+ self.e_flags = unpack(format_i)
+ self.e_ehsize = unpack(format_h)
+ self.e_phentsize = unpack(format_h)
+ self.e_phnum = unpack(format_h)
+ self.e_shentsize = unpack(format_h)
+ self.e_shnum = unpack(format_h)
+ self.e_shstrndx = unpack(format_h)
+
+
+def _get_elf_header():
+ # type: () -> Optional[_ELFFileHeader]
+ try:
+ with open(sys.executable, "rb") as f:
+ elf_header = _ELFFileHeader(f)
+ except (IOError, OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
+ return None
+ return elf_header
+
+
+def _is_linux_armhf():
+ # type: () -> bool
+ # hard-float ABI can be detected from the ELF header of the running
+ # process
+ # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
+ elf_header = _get_elf_header()
+ if elf_header is None:
+ return False
+ result = elf_header.e_ident_class == elf_header.ELFCLASS32
+ result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
+ result &= elf_header.e_machine == elf_header.EM_ARM
+ result &= (
+ elf_header.e_flags & elf_header.EF_ARM_ABIMASK
+ ) == elf_header.EF_ARM_ABI_VER5
+ result &= (
+ elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
+ ) == elf_header.EF_ARM_ABI_FLOAT_HARD
+ return result
+
+
+def _is_linux_i686():
+ # type: () -> bool
+ elf_header = _get_elf_header()
+ if elf_header is None:
+ return False
+ result = elf_header.e_ident_class == elf_header.ELFCLASS32
+ result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
+ result &= elf_header.e_machine == elf_header.EM_386
+ return result
+
+
+def _have_compatible_manylinux_abi(arch):
+ # type: (str) -> bool
+ if arch == "armv7l":
+ return _is_linux_armhf()
+ if arch == "i686":
+ return _is_linux_i686()
+ return True
+
+
+def _linux_platforms(is_32bit=_32_BIT_INTERPRETER):
+ # type: (bool) -> Iterator[str]
+ linux = _normalize_string(distutils.util.get_platform())
+ if is_32bit:
+ if linux == "linux_x86_64":
+ linux = "linux_i686"
+ elif linux == "linux_aarch64":
+ linux = "linux_armv7l"
+ manylinux_support = []
+ _, arch = linux.split("_", 1)
+ if _have_compatible_manylinux_abi(arch):
+ if arch in {"x86_64", "i686", "aarch64", "armv7l", "ppc64", "ppc64le", "s390x"}:
+ manylinux_support.append(
+ ("manylinux2014", (2, 17))
+ ) # CentOS 7 w/ glibc 2.17 (PEP 599)
+ if arch in {"x86_64", "i686"}:
+ manylinux_support.append(
+ ("manylinux2010", (2, 12))
+ ) # CentOS 6 w/ glibc 2.12 (PEP 571)
+ manylinux_support.append(
+ ("manylinux1", (2, 5))
+ ) # CentOS 5 w/ glibc 2.5 (PEP 513)
+ manylinux_support_iter = iter(manylinux_support)
+ for name, glibc_version in manylinux_support_iter:
+ if _is_manylinux_compatible(name, glibc_version):
+ yield linux.replace("linux", name)
+ break
+ # Support for a later manylinux implies support for an earlier version.
+ for name, _ in manylinux_support_iter:
+ yield linux.replace("linux", name)
+ yield linux
+
+
+def _generic_platforms():
+ # type: () -> Iterator[str]
+ yield _normalize_string(distutils.util.get_platform())
+
+
+def _platform_tags():
+ # type: () -> Iterator[str]
+ """
+ Provides the platform tags for this installation.
+ """
+ if platform.system() == "Darwin":
+ return mac_platforms()
+ elif platform.system() == "Linux":
+ return _linux_platforms()
+ else:
+ return _generic_platforms()
+
+
+def interpreter_name():
+ # type: () -> str
+ """
+ Returns the name of the running interpreter.
+ """
+ try:
+ name = sys.implementation.name # type: ignore
+ except AttributeError: # pragma: no cover
+ # Python 2.7 compatibility.
+ name = platform.python_implementation().lower()
+ return INTERPRETER_SHORT_NAMES.get(name) or name
+
+
+def interpreter_version(**kwargs):
+ # type: (bool) -> str
+ """
+ Returns the version of the running interpreter.
+ """
+ warn = _warn_keyword_parameter("interpreter_version", kwargs)
+ version = _get_config_var("py_version_nodot", warn=warn)
+ if version:
+ version = str(version)
+ else:
+ version = _version_nodot(sys.version_info[:2])
+ return version
+
+
+def _version_nodot(version):
+ # type: (PythonVersion) -> str
+ if any(v >= 10 for v in version):
+ sep = "_"
+ else:
+ sep = ""
+ return sep.join(map(str, version))
+
+
+def sys_tags(**kwargs):
+ # type: (bool) -> Iterator[Tag]
+ """
+ Returns the sequence of tag triples for the running interpreter.
+
+ The order of the sequence corresponds to priority order for the
+ interpreter, from most to least important.
+ """
+ warn = _warn_keyword_parameter("sys_tags", kwargs)
+
+ interp_name = interpreter_name()
+ if interp_name == "cp":
+ for tag in cpython_tags(warn=warn):
+ yield tag
+ else:
+ for tag in generic_tags():
+ yield tag
+
+ for tag in compatible_tags():
+ yield tag
diff --git a/third_party/python/setuptools/setuptools/_vendor/packaging/utils.py b/third_party/python/setuptools/setuptools/_vendor/packaging/utils.py
new file mode 100644
index 0000000000..19579c1a0f
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_vendor/packaging/utils.py
@@ -0,0 +1,65 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import re
+
+from ._typing import TYPE_CHECKING, cast
+from .version import InvalidVersion, Version
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import NewType, Union
+
+ NormalizedName = NewType("NormalizedName", str)
+
+_canonicalize_regex = re.compile(r"[-_.]+")
+
+
+def canonicalize_name(name):
+ # type: (str) -> NormalizedName
+ # This is taken from PEP 503.
+ value = _canonicalize_regex.sub("-", name).lower()
+ return cast("NormalizedName", value)
+
+
+def canonicalize_version(_version):
+ # type: (str) -> Union[Version, str]
+ """
+ This is very similar to Version.__str__, but has one subtle difference
+ with the way it handles the release segment.
+ """
+
+ try:
+ version = Version(_version)
+ except InvalidVersion:
+ # Legacy versions cannot be normalized
+ return _version
+
+ parts = []
+
+ # Epoch
+ if version.epoch != 0:
+ parts.append("{0}!".format(version.epoch))
+
+ # Release segment
+ # NB: This strips trailing '.0's to normalize
+ parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in version.release)))
+
+ # Pre-release
+ if version.pre is not None:
+ parts.append("".join(str(x) for x in version.pre))
+
+ # Post-release
+ if version.post is not None:
+ parts.append(".post{0}".format(version.post))
+
+ # Development release
+ if version.dev is not None:
+ parts.append(".dev{0}".format(version.dev))
+
+ # Local version segment
+ if version.local is not None:
+ parts.append("+{0}".format(version.local))
+
+ return "".join(parts)
diff --git a/third_party/python/setuptools/setuptools/_vendor/packaging/version.py b/third_party/python/setuptools/setuptools/_vendor/packaging/version.py
new file mode 100644
index 0000000000..00371e86a8
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_vendor/packaging/version.py
@@ -0,0 +1,535 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+from __future__ import absolute_import, division, print_function
+
+import collections
+import itertools
+import re
+
+from ._structures import Infinity, NegativeInfinity
+from ._typing import TYPE_CHECKING
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union
+
+ from ._structures import InfinityType, NegativeInfinityType
+
+ InfiniteTypes = Union[InfinityType, NegativeInfinityType]
+ PrePostDevType = Union[InfiniteTypes, Tuple[str, int]]
+ SubLocalType = Union[InfiniteTypes, int, str]
+ LocalType = Union[
+ NegativeInfinityType,
+ Tuple[
+ Union[
+ SubLocalType,
+ Tuple[SubLocalType, str],
+ Tuple[NegativeInfinityType, SubLocalType],
+ ],
+ ...,
+ ],
+ ]
+ CmpKey = Tuple[
+ int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType
+ ]
+ LegacyCmpKey = Tuple[int, Tuple[str, ...]]
+ VersionComparisonMethod = Callable[
+ [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool
+ ]
+
+__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"]
+
+
+_Version = collections.namedtuple(
+ "_Version", ["epoch", "release", "dev", "pre", "post", "local"]
+)
+
+
+def parse(version):
+ # type: (str) -> Union[LegacyVersion, Version]
+ """
+ Parse the given version string and return either a :class:`Version` object
+ or a :class:`LegacyVersion` object depending on if the given version is
+ a valid PEP 440 version or a legacy version.
+ """
+ try:
+ return Version(version)
+ except InvalidVersion:
+ return LegacyVersion(version)
+
+
+class InvalidVersion(ValueError):
+ """
+ An invalid version was found, users should refer to PEP 440.
+ """
+
+
+class _BaseVersion(object):
+ _key = None # type: Union[CmpKey, LegacyCmpKey]
+
+ def __hash__(self):
+ # type: () -> int
+ return hash(self._key)
+
+ def __lt__(self, other):
+ # type: (_BaseVersion) -> bool
+ return self._compare(other, lambda s, o: s < o)
+
+ def __le__(self, other):
+ # type: (_BaseVersion) -> bool
+ return self._compare(other, lambda s, o: s <= o)
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ return self._compare(other, lambda s, o: s == o)
+
+ def __ge__(self, other):
+ # type: (_BaseVersion) -> bool
+ return self._compare(other, lambda s, o: s >= o)
+
+ def __gt__(self, other):
+ # type: (_BaseVersion) -> bool
+ return self._compare(other, lambda s, o: s > o)
+
+ def __ne__(self, other):
+ # type: (object) -> bool
+ return self._compare(other, lambda s, o: s != o)
+
+ def _compare(self, other, method):
+ # type: (object, VersionComparisonMethod) -> Union[bool, NotImplemented]
+ if not isinstance(other, _BaseVersion):
+ return NotImplemented
+
+ return method(self._key, other._key)
+
+
+class LegacyVersion(_BaseVersion):
+ def __init__(self, version):
+ # type: (str) -> None
+ self._version = str(version)
+ self._key = _legacy_cmpkey(self._version)
+
+ def __str__(self):
+ # type: () -> str
+ return self._version
+
+ def __repr__(self):
+ # type: () -> str
+ return "<LegacyVersion({0})>".format(repr(str(self)))
+
+ @property
+ def public(self):
+ # type: () -> str
+ return self._version
+
+ @property
+ def base_version(self):
+ # type: () -> str
+ return self._version
+
+ @property
+ def epoch(self):
+ # type: () -> int
+ return -1
+
+ @property
+ def release(self):
+ # type: () -> None
+ return None
+
+ @property
+ def pre(self):
+ # type: () -> None
+ return None
+
+ @property
+ def post(self):
+ # type: () -> None
+ return None
+
+ @property
+ def dev(self):
+ # type: () -> None
+ return None
+
+ @property
+ def local(self):
+ # type: () -> None
+ return None
+
+ @property
+ def is_prerelease(self):
+ # type: () -> bool
+ return False
+
+ @property
+ def is_postrelease(self):
+ # type: () -> bool
+ return False
+
+ @property
+ def is_devrelease(self):
+ # type: () -> bool
+ return False
+
+
+_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE)
+
+_legacy_version_replacement_map = {
+ "pre": "c",
+ "preview": "c",
+ "-": "final-",
+ "rc": "c",
+ "dev": "@",
+}
+
+
+def _parse_version_parts(s):
+ # type: (str) -> Iterator[str]
+ for part in _legacy_version_component_re.split(s):
+ part = _legacy_version_replacement_map.get(part, part)
+
+ if not part or part == ".":
+ continue
+
+ if part[:1] in "0123456789":
+ # pad for numeric comparison
+ yield part.zfill(8)
+ else:
+ yield "*" + part
+
+ # ensure that alpha/beta/candidate are before final
+ yield "*final"
+
+
+def _legacy_cmpkey(version):
+ # type: (str) -> LegacyCmpKey
+
+ # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch
+ # greater than or equal to 0. This will effectively put the LegacyVersion,
+ # which uses the defacto standard originally implemented by setuptools,
+ # as before all PEP 440 versions.
+ epoch = -1
+
+ # This scheme is taken from pkg_resources.parse_version setuptools prior to
+ # it's adoption of the packaging library.
+ parts = [] # type: List[str]
+ for part in _parse_version_parts(version.lower()):
+ if part.startswith("*"):
+ # remove "-" before a prerelease tag
+ if part < "*final":
+ while parts and parts[-1] == "*final-":
+ parts.pop()
+
+ # remove trailing zeros from each series of numeric parts
+ while parts and parts[-1] == "00000000":
+ parts.pop()
+
+ parts.append(part)
+
+ return epoch, tuple(parts)
+
+
+# Deliberately not anchored to the start and end of the string, to make it
+# easier for 3rd party code to reuse
+VERSION_PATTERN = r"""
+ v?
+ (?:
+ (?:(?P<epoch>[0-9]+)!)? # epoch
+ (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
+ (?P<pre> # pre-release
+ [-_\.]?
+ (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
+ [-_\.]?
+ (?P<pre_n>[0-9]+)?
+ )?
+ (?P<post> # post release
+ (?:-(?P<post_n1>[0-9]+))
+ |
+ (?:
+ [-_\.]?
+ (?P<post_l>post|rev|r)
+ [-_\.]?
+ (?P<post_n2>[0-9]+)?
+ )
+ )?
+ (?P<dev> # dev release
+ [-_\.]?
+ (?P<dev_l>dev)
+ [-_\.]?
+ (?P<dev_n>[0-9]+)?
+ )?
+ )
+ (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
+"""
+
+
+class Version(_BaseVersion):
+
+ _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+ def __init__(self, version):
+ # type: (str) -> None
+
+ # Validate the version and parse it into pieces
+ match = self._regex.search(version)
+ if not match:
+ raise InvalidVersion("Invalid version: '{0}'".format(version))
+
+ # Store the parsed out pieces of the version
+ self._version = _Version(
+ epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+ release=tuple(int(i) for i in match.group("release").split(".")),
+ pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+ post=_parse_letter_version(
+ match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+ ),
+ dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+ local=_parse_local_version(match.group("local")),
+ )
+
+ # Generate a key which will be used for sorting
+ self._key = _cmpkey(
+ self._version.epoch,
+ self._version.release,
+ self._version.pre,
+ self._version.post,
+ self._version.dev,
+ self._version.local,
+ )
+
+ def __repr__(self):
+ # type: () -> str
+ return "<Version({0})>".format(repr(str(self)))
+
+ def __str__(self):
+ # type: () -> str
+ parts = []
+
+ # Epoch
+ if self.epoch != 0:
+ parts.append("{0}!".format(self.epoch))
+
+ # Release segment
+ parts.append(".".join(str(x) for x in self.release))
+
+ # Pre-release
+ if self.pre is not None:
+ parts.append("".join(str(x) for x in self.pre))
+
+ # Post-release
+ if self.post is not None:
+ parts.append(".post{0}".format(self.post))
+
+ # Development release
+ if self.dev is not None:
+ parts.append(".dev{0}".format(self.dev))
+
+ # Local version segment
+ if self.local is not None:
+ parts.append("+{0}".format(self.local))
+
+ return "".join(parts)
+
+ @property
+ def epoch(self):
+ # type: () -> int
+ _epoch = self._version.epoch # type: int
+ return _epoch
+
+ @property
+ def release(self):
+ # type: () -> Tuple[int, ...]
+ _release = self._version.release # type: Tuple[int, ...]
+ return _release
+
+ @property
+ def pre(self):
+ # type: () -> Optional[Tuple[str, int]]
+ _pre = self._version.pre # type: Optional[Tuple[str, int]]
+ return _pre
+
+ @property
+ def post(self):
+ # type: () -> Optional[Tuple[str, int]]
+ return self._version.post[1] if self._version.post else None
+
+ @property
+ def dev(self):
+ # type: () -> Optional[Tuple[str, int]]
+ return self._version.dev[1] if self._version.dev else None
+
+ @property
+ def local(self):
+ # type: () -> Optional[str]
+ if self._version.local:
+ return ".".join(str(x) for x in self._version.local)
+ else:
+ return None
+
+ @property
+ def public(self):
+ # type: () -> str
+ return str(self).split("+", 1)[0]
+
+ @property
+ def base_version(self):
+ # type: () -> str
+ parts = []
+
+ # Epoch
+ if self.epoch != 0:
+ parts.append("{0}!".format(self.epoch))
+
+ # Release segment
+ parts.append(".".join(str(x) for x in self.release))
+
+ return "".join(parts)
+
+ @property
+ def is_prerelease(self):
+ # type: () -> bool
+ return self.dev is not None or self.pre is not None
+
+ @property
+ def is_postrelease(self):
+ # type: () -> bool
+ return self.post is not None
+
+ @property
+ def is_devrelease(self):
+ # type: () -> bool
+ return self.dev is not None
+
+ @property
+ def major(self):
+ # type: () -> int
+ return self.release[0] if len(self.release) >= 1 else 0
+
+ @property
+ def minor(self):
+ # type: () -> int
+ return self.release[1] if len(self.release) >= 2 else 0
+
+ @property
+ def micro(self):
+ # type: () -> int
+ return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+ letter, # type: str
+ number, # type: Union[str, bytes, SupportsInt]
+):
+ # type: (...) -> Optional[Tuple[str, int]]
+
+ if letter:
+ # We consider there to be an implicit 0 in a pre-release if there is
+ # not a numeral associated with it.
+ if number is None:
+ number = 0
+
+ # We normalize any letters to their lower case form
+ letter = letter.lower()
+
+ # We consider some words to be alternate spellings of other words and
+ # in those cases we want to normalize the spellings to our preferred
+ # spelling.
+ if letter == "alpha":
+ letter = "a"
+ elif letter == "beta":
+ letter = "b"
+ elif letter in ["c", "pre", "preview"]:
+ letter = "rc"
+ elif letter in ["rev", "r"]:
+ letter = "post"
+
+ return letter, int(number)
+ if not letter and number:
+ # We assume if we are given a number, but we are not given a letter
+ # then this is using the implicit post release syntax (e.g. 1.0-1)
+ letter = "post"
+
+ return letter, int(number)
+
+ return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+ # type: (str) -> Optional[LocalType]
+ """
+ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+ """
+ if local is not None:
+ return tuple(
+ part.lower() if not part.isdigit() else int(part)
+ for part in _local_version_separators.split(local)
+ )
+ return None
+
+
+def _cmpkey(
+ epoch, # type: int
+ release, # type: Tuple[int, ...]
+ pre, # type: Optional[Tuple[str, int]]
+ post, # type: Optional[Tuple[str, int]]
+ dev, # type: Optional[Tuple[str, int]]
+ local, # type: Optional[Tuple[SubLocalType]]
+):
+ # type: (...) -> CmpKey
+
+ # When we compare a release version, we want to compare it with all of the
+ # trailing zeros removed. So we'll use a reverse the list, drop all the now
+ # leading zeros until we come to something non zero, then take the rest
+ # re-reverse it back into the correct order and make it a tuple and use
+ # that for our sorting key.
+ _release = tuple(
+ reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+ )
+
+ # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+ # We'll do this by abusing the pre segment, but we _only_ want to do this
+ # if there is not a pre or a post segment. If we have one of those then
+ # the normal sorting rules will handle this case correctly.
+ if pre is None and post is None and dev is not None:
+ _pre = NegativeInfinity # type: PrePostDevType
+ # Versions without a pre-release (except as noted above) should sort after
+ # those with one.
+ elif pre is None:
+ _pre = Infinity
+ else:
+ _pre = pre
+
+ # Versions without a post segment should sort before those with one.
+ if post is None:
+ _post = NegativeInfinity # type: PrePostDevType
+
+ else:
+ _post = post
+
+ # Versions without a development segment should sort after those with one.
+ if dev is None:
+ _dev = Infinity # type: PrePostDevType
+
+ else:
+ _dev = dev
+
+ if local is None:
+ # Versions without a local segment should sort before those with one.
+ _local = NegativeInfinity # type: LocalType
+ else:
+ # Versions with a local segment need that segment parsed to implement
+ # the sorting rules in PEP440.
+ # - Alpha numeric segments sort before numeric segments
+ # - Alpha numeric segments sort lexicographically
+ # - Numeric segments sort numerically
+ # - Shorter versions sort before longer versions when the prefixes
+ # match exactly
+ _local = tuple(
+ (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+ )
+
+ return epoch, _release, _pre, _post, _dev, _local
diff --git a/third_party/python/setuptools/setuptools/_vendor/pyparsing.py b/third_party/python/setuptools/setuptools/_vendor/pyparsing.py
new file mode 100644
index 0000000000..cf75e1e5fc
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/_vendor/pyparsing.py
@@ -0,0 +1,5742 @@
+# module pyparsing.py
+#
+# Copyright (c) 2003-2018 Paul T. McGuire
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__doc__ = \
+"""
+pyparsing module - Classes and methods to define and execute parsing grammars
+=============================================================================
+
+The pyparsing module is an alternative approach to creating and executing simple grammars,
+vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
+don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
+provides a library of classes that you use to construct the grammar directly in Python.
+
+Here is a program to parse "Hello, World!" (or any greeting of the form
+C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements
+(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
+L{Literal} expressions)::
+
+ from pyparsing import Word, alphas
+
+ # define grammar of a greeting
+ greet = Word(alphas) + "," + Word(alphas) + "!"
+
+ hello = "Hello, World!"
+ print (hello, "->", greet.parseString(hello))
+
+The program outputs the following::
+
+ Hello, World! -> ['Hello', ',', 'World', '!']
+
+The Python representation of the grammar is quite readable, owing to the self-explanatory
+class names, and the use of '+', '|' and '^' operators.
+
+The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
+object with named attributes.
+
+The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
+ - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
+ - quoted strings
+ - embedded comments
+
+
+Getting Started -
+-----------------
+Visit the classes L{ParserElement} and L{ParseResults} to see the base classes that most other pyparsing
+classes inherit from. Use the docstrings for examples of how to:
+ - construct literal match expressions from L{Literal} and L{CaselessLiteral} classes
+ - construct character word-group expressions using the L{Word} class
+ - see how to create repetitive expressions using L{ZeroOrMore} and L{OneOrMore} classes
+ - use L{'+'<And>}, L{'|'<MatchFirst>}, L{'^'<Or>}, and L{'&'<Each>} operators to combine simple expressions into more complex ones
+ - associate names with your parsed results using L{ParserElement.setResultsName}
+ - find some helpful expression short-cuts like L{delimitedList} and L{oneOf}
+ - find more useful common expressions in the L{pyparsing_common} namespace class
+"""
+
+__version__ = "2.2.1"
+__versionTime__ = "18 Sep 2018 00:49 UTC"
+__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>"
+
+import string
+from weakref import ref as wkref
+import copy
+import sys
+import warnings
+import re
+import sre_constants
+import collections
+import pprint
+import traceback
+import types
+from datetime import datetime
+
+try:
+ from _thread import RLock
+except ImportError:
+ from threading import RLock
+
+try:
+ # Python 3
+ from collections.abc import Iterable
+ from collections.abc import MutableMapping
+except ImportError:
+ # Python 2.7
+ from collections import Iterable
+ from collections import MutableMapping
+
+try:
+ from collections import OrderedDict as _OrderedDict
+except ImportError:
+ try:
+ from ordereddict import OrderedDict as _OrderedDict
+ except ImportError:
+ _OrderedDict = None
+
+#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
+
+__all__ = [
+'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
+'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
+'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
+'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
+'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
+'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
+'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
+'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
+'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
+'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
+'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
+'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
+'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
+'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
+'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
+'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
+'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
+'CloseMatch', 'tokenMap', 'pyparsing_common',
+]
+
+system_version = tuple(sys.version_info)[:3]
+PY_3 = system_version[0] == 3
+if PY_3:
+ _MAX_INT = sys.maxsize
+ basestring = str
+ unichr = chr
+ _ustr = str
+
+ # build list of single arg builtins, that can be used as parse actions
+ singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
+
+else:
+ _MAX_INT = sys.maxint
+ range = xrange
+
+ def _ustr(obj):
+ """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
+ str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
+ then < returns the unicode object | encodes it with the default encoding | ... >.
+ """
+ if isinstance(obj,unicode):
+ return obj
+
+ try:
+ # If this works, then _ustr(obj) has the same behaviour as str(obj), so
+ # it won't break any existing code.
+ return str(obj)
+
+ except UnicodeEncodeError:
+ # Else encode it
+ ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
+ xmlcharref = Regex(r'&#\d+;')
+ xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
+ return xmlcharref.transformString(ret)
+
+ # build list of single arg builtins, tolerant of Python version, that can be used as parse actions
+ singleArgBuiltins = []
+ import __builtin__
+ for fname in "sum len sorted reversed list tuple set any all min max".split():
+ try:
+ singleArgBuiltins.append(getattr(__builtin__,fname))
+ except AttributeError:
+ continue
+
+_generatorType = type((y for y in range(1)))
+
+def _xml_escape(data):
+ """Escape &, <, >, ", ', etc. in a string of data."""
+
+ # ampersand must be replaced first
+ from_symbols = '&><"\''
+ to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
+ for from_,to_ in zip(from_symbols, to_symbols):
+ data = data.replace(from_, to_)
+ return data
+
+class _Constants(object):
+ pass
+
+alphas = string.ascii_uppercase + string.ascii_lowercase
+nums = "0123456789"
+hexnums = nums + "ABCDEFabcdef"
+alphanums = alphas + nums
+_bslash = chr(92)
+printables = "".join(c for c in string.printable if c not in string.whitespace)
+
+class ParseBaseException(Exception):
+ """base exception class for all parsing runtime exceptions"""
+ # Performance tuning: we construct a *lot* of these, so keep this
+ # constructor as small and fast as possible
+ def __init__( self, pstr, loc=0, msg=None, elem=None ):
+ self.loc = loc
+ if msg is None:
+ self.msg = pstr
+ self.pstr = ""
+ else:
+ self.msg = msg
+ self.pstr = pstr
+ self.parserElement = elem
+ self.args = (pstr, loc, msg)
+
+ @classmethod
+ def _from_exception(cls, pe):
+ """
+ internal factory method to simplify creating one type of ParseException
+ from another - avoids having __init__ signature conflicts among subclasses
+ """
+ return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
+
+ def __getattr__( self, aname ):
+ """supported attributes by name are:
+ - lineno - returns the line number of the exception text
+ - col - returns the column number of the exception text
+ - line - returns the line containing the exception text
+ """
+ if( aname == "lineno" ):
+ return lineno( self.loc, self.pstr )
+ elif( aname in ("col", "column") ):
+ return col( self.loc, self.pstr )
+ elif( aname == "line" ):
+ return line( self.loc, self.pstr )
+ else:
+ raise AttributeError(aname)
+
+ def __str__( self ):
+ return "%s (at char %d), (line:%d, col:%d)" % \
+ ( self.msg, self.loc, self.lineno, self.column )
+ def __repr__( self ):
+ return _ustr(self)
+ def markInputline( self, markerString = ">!<" ):
+ """Extracts the exception line from the input string, and marks
+ the location of the exception with a special symbol.
+ """
+ line_str = self.line
+ line_column = self.column - 1
+ if markerString:
+ line_str = "".join((line_str[:line_column],
+ markerString, line_str[line_column:]))
+ return line_str.strip()
+ def __dir__(self):
+ return "lineno col line".split() + dir(type(self))
+
+class ParseException(ParseBaseException):
+ """
+ Exception thrown when parse expressions don't match class;
+ supported attributes by name are:
+ - lineno - returns the line number of the exception text
+ - col - returns the column number of the exception text
+ - line - returns the line containing the exception text
+
+ Example::
+ try:
+ Word(nums).setName("integer").parseString("ABC")
+ except ParseException as pe:
+ print(pe)
+ print("column: {}".format(pe.col))
+
+ prints::
+ Expected integer (at char 0), (line:1, col:1)
+ column: 1
+ """
+ pass
+
+class ParseFatalException(ParseBaseException):
+ """user-throwable exception thrown when inconsistent parse content
+ is found; stops all parsing immediately"""
+ pass
+
+class ParseSyntaxException(ParseFatalException):
+ """just like L{ParseFatalException}, but thrown internally when an
+ L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop
+ immediately because an unbacktrackable syntax error has been found"""
+ pass
+
+#~ class ReparseException(ParseBaseException):
+ #~ """Experimental class - parse actions can raise this exception to cause
+ #~ pyparsing to reparse the input string:
+ #~ - with a modified input string, and/or
+ #~ - with a modified start location
+ #~ Set the values of the ReparseException in the constructor, and raise the
+ #~ exception in a parse action to cause pyparsing to use the new string/location.
+ #~ Setting the values as None causes no change to be made.
+ #~ """
+ #~ def __init_( self, newstring, restartLoc ):
+ #~ self.newParseText = newstring
+ #~ self.reparseLoc = restartLoc
+
+class RecursiveGrammarException(Exception):
+ """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
+ def __init__( self, parseElementList ):
+ self.parseElementTrace = parseElementList
+
+ def __str__( self ):
+ return "RecursiveGrammarException: %s" % self.parseElementTrace
+
+class _ParseResultsWithOffset(object):
+ def __init__(self,p1,p2):
+ self.tup = (p1,p2)
+ def __getitem__(self,i):
+ return self.tup[i]
+ def __repr__(self):
+ return repr(self.tup[0])
+ def setOffset(self,i):
+ self.tup = (self.tup[0],i)
+
+class ParseResults(object):
+ """
+ Structured parse results, to provide multiple means of access to the parsed data:
+ - as a list (C{len(results)})
+ - by list index (C{results[0], results[1]}, etc.)
+ - by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})
+
+ Example::
+ integer = Word(nums)
+ date_str = (integer.setResultsName("year") + '/'
+ + integer.setResultsName("month") + '/'
+ + integer.setResultsName("day"))
+ # equivalent form:
+ # date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ # parseString returns a ParseResults object
+ result = date_str.parseString("1999/12/31")
+
+ def test(s, fn=repr):
+ print("%s -> %s" % (s, fn(eval(s))))
+ test("list(result)")
+ test("result[0]")
+ test("result['month']")
+ test("result.day")
+ test("'month' in result")
+ test("'minutes' in result")
+ test("result.dump()", str)
+ prints::
+ list(result) -> ['1999', '/', '12', '/', '31']
+ result[0] -> '1999'
+ result['month'] -> '12'
+ result.day -> '31'
+ 'month' in result -> True
+ 'minutes' in result -> False
+ result.dump() -> ['1999', '/', '12', '/', '31']
+ - day: 31
+ - month: 12
+ - year: 1999
+ """
+ def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
+ if isinstance(toklist, cls):
+ return toklist
+ retobj = object.__new__(cls)
+ retobj.__doinit = True
+ return retobj
+
+ # Performance tuning: we construct a *lot* of these, so keep this
+ # constructor as small and fast as possible
+ def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
+ if self.__doinit:
+ self.__doinit = False
+ self.__name = None
+ self.__parent = None
+ self.__accumNames = {}
+ self.__asList = asList
+ self.__modal = modal
+ if toklist is None:
+ toklist = []
+ if isinstance(toklist, list):
+ self.__toklist = toklist[:]
+ elif isinstance(toklist, _generatorType):
+ self.__toklist = list(toklist)
+ else:
+ self.__toklist = [toklist]
+ self.__tokdict = dict()
+
+ if name is not None and name:
+ if not modal:
+ self.__accumNames[name] = 0
+ if isinstance(name,int):
+ name = _ustr(name) # will always return a str, but use _ustr for consistency
+ self.__name = name
+ if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
+ if isinstance(toklist,basestring):
+ toklist = [ toklist ]
+ if asList:
+ if isinstance(toklist,ParseResults):
+ self[name] = _ParseResultsWithOffset(toklist.copy(),0)
+ else:
+ self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
+ self[name].__name = name
+ else:
+ try:
+ self[name] = toklist[0]
+ except (KeyError,TypeError,IndexError):
+ self[name] = toklist
+
+ def __getitem__( self, i ):
+ if isinstance( i, (int,slice) ):
+ return self.__toklist[i]
+ else:
+ if i not in self.__accumNames:
+ return self.__tokdict[i][-1][0]
+ else:
+ return ParseResults([ v[0] for v in self.__tokdict[i] ])
+
+ def __setitem__( self, k, v, isinstance=isinstance ):
+ if isinstance(v,_ParseResultsWithOffset):
+ self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
+ sub = v[0]
+ elif isinstance(k,(int,slice)):
+ self.__toklist[k] = v
+ sub = v
+ else:
+ self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
+ sub = v
+ if isinstance(sub,ParseResults):
+ sub.__parent = wkref(self)
+
+ def __delitem__( self, i ):
+ if isinstance(i,(int,slice)):
+ mylen = len( self.__toklist )
+ del self.__toklist[i]
+
+ # convert int to slice
+ if isinstance(i, int):
+ if i < 0:
+ i += mylen
+ i = slice(i, i+1)
+ # get removed indices
+ removed = list(range(*i.indices(mylen)))
+ removed.reverse()
+ # fixup indices in token dictionary
+ for name,occurrences in self.__tokdict.items():
+ for j in removed:
+ for k, (value, position) in enumerate(occurrences):
+ occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
+ else:
+ del self.__tokdict[i]
+
+ def __contains__( self, k ):
+ return k in self.__tokdict
+
+ def __len__( self ): return len( self.__toklist )
+ def __bool__(self): return ( not not self.__toklist )
+ __nonzero__ = __bool__
+ def __iter__( self ): return iter( self.__toklist )
+ def __reversed__( self ): return iter( self.__toklist[::-1] )
+ def _iterkeys( self ):
+ if hasattr(self.__tokdict, "iterkeys"):
+ return self.__tokdict.iterkeys()
+ else:
+ return iter(self.__tokdict)
+
+ def _itervalues( self ):
+ return (self[k] for k in self._iterkeys())
+
+ def _iteritems( self ):
+ return ((k, self[k]) for k in self._iterkeys())
+
+ if PY_3:
+ keys = _iterkeys
+ """Returns an iterator of all named result keys (Python 3.x only)."""
+
+ values = _itervalues
+ """Returns an iterator of all named result values (Python 3.x only)."""
+
+ items = _iteritems
+ """Returns an iterator of all named result key-value tuples (Python 3.x only)."""
+
+ else:
+ iterkeys = _iterkeys
+ """Returns an iterator of all named result keys (Python 2.x only)."""
+
+ itervalues = _itervalues
+ """Returns an iterator of all named result values (Python 2.x only)."""
+
+ iteritems = _iteritems
+ """Returns an iterator of all named result key-value tuples (Python 2.x only)."""
+
+ def keys( self ):
+ """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
+ return list(self.iterkeys())
+
+ def values( self ):
+ """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
+ return list(self.itervalues())
+
+ def items( self ):
+ """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
+ return list(self.iteritems())
+
+ def haskeys( self ):
+ """Since keys() returns an iterator, this method is helpful in bypassing
+ code that looks for the existence of any defined results names."""
+ return bool(self.__tokdict)
+
+ def pop( self, *args, **kwargs):
+ """
+ Removes and returns item at specified index (default=C{last}).
+ Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
+ argument or an integer argument, it will use C{list} semantics
+ and pop tokens from the list of parsed tokens. If passed a
+ non-integer argument (most likely a string), it will use C{dict}
+ semantics and pop the corresponding value from any defined
+ results names. A second default return value argument is
+ supported, just as in C{dict.pop()}.
+
+ Example::
+ def remove_first(tokens):
+ tokens.pop(0)
+ print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+ print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
+
+ label = Word(alphas)
+ patt = label("LABEL") + OneOrMore(Word(nums))
+ print(patt.parseString("AAB 123 321").dump())
+
+ # Use pop() in a parse action to remove named result (note that corresponding value is not
+ # removed from list form of results)
+ def remove_LABEL(tokens):
+ tokens.pop("LABEL")
+ return tokens
+ patt.addParseAction(remove_LABEL)
+ print(patt.parseString("AAB 123 321").dump())
+ prints::
+ ['AAB', '123', '321']
+ - LABEL: AAB
+
+ ['AAB', '123', '321']
+ """
+ if not args:
+ args = [-1]
+ for k,v in kwargs.items():
+ if k == 'default':
+ args = (args[0], v)
+ else:
+ raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
+ if (isinstance(args[0], int) or
+ len(args) == 1 or
+ args[0] in self):
+ index = args[0]
+ ret = self[index]
+ del self[index]
+ return ret
+ else:
+ defaultvalue = args[1]
+ return defaultvalue
+
+ def get(self, key, defaultValue=None):
+ """
+ Returns named result matching the given key, or if there is no
+ such name, then returns the given C{defaultValue} or C{None} if no
+ C{defaultValue} is specified.
+
+ Similar to C{dict.get()}.
+
+ Example::
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parseString("1999/12/31")
+ print(result.get("year")) # -> '1999'
+ print(result.get("hour", "not specified")) # -> 'not specified'
+ print(result.get("hour")) # -> None
+ """
+ if key in self:
+ return self[key]
+ else:
+ return defaultValue
+
+ def insert( self, index, insStr ):
+ """
+ Inserts new element at location index in the list of parsed tokens.
+
+ Similar to C{list.insert()}.
+
+ Example::
+ print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+
+ # use a parse action to insert the parse location in the front of the parsed results
+ def insert_locn(locn, tokens):
+ tokens.insert(0, locn)
+ print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
+ """
+ self.__toklist.insert(index, insStr)
+ # fixup indices in token dictionary
+ for name,occurrences in self.__tokdict.items():
+ for k, (value, position) in enumerate(occurrences):
+ occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
+
+ def append( self, item ):
+ """
+ Add single element to end of ParseResults list of elements.
+
+ Example::
+ print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
+
+ # use a parse action to compute the sum of the parsed integers, and add it to the end
+ def append_sum(tokens):
+ tokens.append(sum(map(int, tokens)))
+ print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
+ """
+ self.__toklist.append(item)
+
+ def extend( self, itemseq ):
+ """
+ Add sequence of elements to end of ParseResults list of elements.
+
+ Example::
+ patt = OneOrMore(Word(alphas))
+
+ # use a parse action to append the reverse of the matched strings, to make a palindrome
+ def make_palindrome(tokens):
+ tokens.extend(reversed([t[::-1] for t in tokens]))
+ return ''.join(tokens)
+ print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
+ """
+ if isinstance(itemseq, ParseResults):
+ self += itemseq
+ else:
+ self.__toklist.extend(itemseq)
+
+ def clear( self ):
+ """
+ Clear all elements and results names.
+ """
+ del self.__toklist[:]
+ self.__tokdict.clear()
+
+ def __getattr__( self, name ):
+ try:
+ return self[name]
+ except KeyError:
+ return ""
+
+ if name in self.__tokdict:
+ if name not in self.__accumNames:
+ return self.__tokdict[name][-1][0]
+ else:
+ return ParseResults([ v[0] for v in self.__tokdict[name] ])
+ else:
+ return ""
+
+ def __add__( self, other ):
+ ret = self.copy()
+ ret += other
+ return ret
+
+ def __iadd__( self, other ):
+ if other.__tokdict:
+ offset = len(self.__toklist)
+ addoffset = lambda a: offset if a<0 else a+offset
+ otheritems = other.__tokdict.items()
+ otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
+ for (k,vlist) in otheritems for v in vlist]
+ for k,v in otherdictitems:
+ self[k] = v
+ if isinstance(v[0],ParseResults):
+ v[0].__parent = wkref(self)
+
+ self.__toklist += other.__toklist
+ self.__accumNames.update( other.__accumNames )
+ return self
+
+ def __radd__(self, other):
+ if isinstance(other,int) and other == 0:
+ # useful for merging many ParseResults using sum() builtin
+ return self.copy()
+ else:
+ # this may raise a TypeError - so be it
+ return other + self
+
+ def __repr__( self ):
+ return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
+
+ def __str__( self ):
+ return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
+
+ def _asStringList( self, sep='' ):
+ out = []
+ for item in self.__toklist:
+ if out and sep:
+ out.append(sep)
+ if isinstance( item, ParseResults ):
+ out += item._asStringList()
+ else:
+ out.append( _ustr(item) )
+ return out
+
+ def asList( self ):
+ """
+ Returns the parse results as a nested list of matching tokens, all converted to strings.
+
+ Example::
+ patt = OneOrMore(Word(alphas))
+ result = patt.parseString("sldkj lsdkj sldkj")
+ # even though the result prints in string-like form, it is actually a pyparsing ParseResults
+ print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
+
+ # Use asList() to create an actual list
+ result_list = result.asList()
+ print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
+ """
+ return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
+
+ def asDict( self ):
+ """
+ Returns the named parse results as a nested dictionary.
+
+ Example::
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parseString('12/31/1999')
+ print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
+
+ result_dict = result.asDict()
+ print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
+
+ # even though a ParseResults supports dict-like access, sometime you just need to have a dict
+ import json
+ print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
+ print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
+ """
+ if PY_3:
+ item_fn = self.items
+ else:
+ item_fn = self.iteritems
+
+ def toItem(obj):
+ if isinstance(obj, ParseResults):
+ if obj.haskeys():
+ return obj.asDict()
+ else:
+ return [toItem(v) for v in obj]
+ else:
+ return obj
+
+ return dict((k,toItem(v)) for k,v in item_fn())
+
+ def copy( self ):
+ """
+ Returns a new copy of a C{ParseResults} object.
+ """
+ ret = ParseResults( self.__toklist )
+ ret.__tokdict = self.__tokdict.copy()
+ ret.__parent = self.__parent
+ ret.__accumNames.update( self.__accumNames )
+ ret.__name = self.__name
+ return ret
+
+ def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
+ """
+ (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
+ """
+ nl = "\n"
+ out = []
+ namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
+ for v in vlist)
+ nextLevelIndent = indent + " "
+
+ # collapse out indents if formatting is not desired
+ if not formatted:
+ indent = ""
+ nextLevelIndent = ""
+ nl = ""
+
+ selfTag = None
+ if doctag is not None:
+ selfTag = doctag
+ else:
+ if self.__name:
+ selfTag = self.__name
+
+ if not selfTag:
+ if namedItemsOnly:
+ return ""
+ else:
+ selfTag = "ITEM"
+
+ out += [ nl, indent, "<", selfTag, ">" ]
+
+ for i,res in enumerate(self.__toklist):
+ if isinstance(res,ParseResults):
+ if i in namedItems:
+ out += [ res.asXML(namedItems[i],
+ namedItemsOnly and doctag is None,
+ nextLevelIndent,
+ formatted)]
+ else:
+ out += [ res.asXML(None,
+ namedItemsOnly and doctag is None,
+ nextLevelIndent,
+ formatted)]
+ else:
+ # individual token, see if there is a name for it
+ resTag = None
+ if i in namedItems:
+ resTag = namedItems[i]
+ if not resTag:
+ if namedItemsOnly:
+ continue
+ else:
+ resTag = "ITEM"
+ xmlBodyText = _xml_escape(_ustr(res))
+ out += [ nl, nextLevelIndent, "<", resTag, ">",
+ xmlBodyText,
+ "</", resTag, ">" ]
+
+ out += [ nl, indent, "</", selfTag, ">" ]
+ return "".join(out)
+
+ def __lookup(self,sub):
+ for k,vlist in self.__tokdict.items():
+ for v,loc in vlist:
+ if sub is v:
+ return k
+ return None
+
+ def getName(self):
+ r"""
+ Returns the results name for this token expression. Useful when several
+ different expressions might match at a particular location.
+
+ Example::
+ integer = Word(nums)
+ ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
+ house_number_expr = Suppress('#') + Word(nums, alphanums)
+ user_data = (Group(house_number_expr)("house_number")
+ | Group(ssn_expr)("ssn")
+ | Group(integer)("age"))
+ user_info = OneOrMore(user_data)
+
+ result = user_info.parseString("22 111-22-3333 #221B")
+ for item in result:
+ print(item.getName(), ':', item[0])
+ prints::
+ age : 22
+ ssn : 111-22-3333
+ house_number : 221B
+ """
+ if self.__name:
+ return self.__name
+ elif self.__parent:
+ par = self.__parent()
+ if par:
+ return par.__lookup(self)
+ else:
+ return None
+ elif (len(self) == 1 and
+ len(self.__tokdict) == 1 and
+ next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
+ return next(iter(self.__tokdict.keys()))
+ else:
+ return None
+
+ def dump(self, indent='', depth=0, full=True):
+ """
+ Diagnostic method for listing out the contents of a C{ParseResults}.
+ Accepts an optional C{indent} argument so that this string can be embedded
+ in a nested display of other data.
+
+ Example::
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parseString('12/31/1999')
+ print(result.dump())
+ prints::
+ ['12', '/', '31', '/', '1999']
+ - day: 1999
+ - month: 31
+ - year: 12
+ """
+ out = []
+ NL = '\n'
+ out.append( indent+_ustr(self.asList()) )
+ if full:
+ if self.haskeys():
+ items = sorted((str(k), v) for k,v in self.items())
+ for k,v in items:
+ if out:
+ out.append(NL)
+ out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
+ if isinstance(v,ParseResults):
+ if v:
+ out.append( v.dump(indent,depth+1) )
+ else:
+ out.append(_ustr(v))
+ else:
+ out.append(repr(v))
+ elif any(isinstance(vv,ParseResults) for vv in self):
+ v = self
+ for i,vv in enumerate(v):
+ if isinstance(vv,ParseResults):
+ out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) ))
+ else:
+ out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv)))
+
+ return "".join(out)
+
+ def pprint(self, *args, **kwargs):
+ """
+ Pretty-printer for parsed results as a list, using the C{pprint} module.
+ Accepts additional positional or keyword args as defined for the
+ C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
+
+ Example::
+ ident = Word(alphas, alphanums)
+ num = Word(nums)
+ func = Forward()
+ term = ident | num | Group('(' + func + ')')
+ func <<= ident + Group(Optional(delimitedList(term)))
+ result = func.parseString("fna a,b,(fnb c,d,200),100")
+ result.pprint(width=40)
+ prints::
+ ['fna',
+ ['a',
+ 'b',
+ ['(', 'fnb', ['c', 'd', '200'], ')'],
+ '100']]
+ """
+ pprint.pprint(self.asList(), *args, **kwargs)
+
+ # add support for pickle protocol
+ def __getstate__(self):
+ return ( self.__toklist,
+ ( self.__tokdict.copy(),
+ self.__parent is not None and self.__parent() or None,
+ self.__accumNames,
+ self.__name ) )
+
+ def __setstate__(self,state):
+ self.__toklist = state[0]
+ (self.__tokdict,
+ par,
+ inAccumNames,
+ self.__name) = state[1]
+ self.__accumNames = {}
+ self.__accumNames.update(inAccumNames)
+ if par is not None:
+ self.__parent = wkref(par)
+ else:
+ self.__parent = None
+
+ def __getnewargs__(self):
+ return self.__toklist, self.__name, self.__asList, self.__modal
+
+ def __dir__(self):
+ return (dir(type(self)) + list(self.keys()))
+
+MutableMapping.register(ParseResults)
+
+def col (loc,strg):
+ """Returns current column within a string, counting newlines as line separators.
+ The first column is number 1.
+
+ Note: the default parsing behavior is to expand tabs in the input string
+ before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
+ on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
+ consistent view of the parsed string, the parse location, and line and column
+ positions within the parsed string.
+ """
+ s = strg
+ return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
+
+def lineno(loc,strg):
+ """Returns current line number within a string, counting newlines as line separators.
+ The first line is number 1.
+
+ Note: the default parsing behavior is to expand tabs in the input string
+ before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
+ on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
+ consistent view of the parsed string, the parse location, and line and column
+ positions within the parsed string.
+ """
+ return strg.count("\n",0,loc) + 1
+
+def line( loc, strg ):
+ """Returns the line of text containing loc within a string, counting newlines as line separators.
+ """
+ lastCR = strg.rfind("\n", 0, loc)
+ nextCR = strg.find("\n", loc)
+ if nextCR >= 0:
+ return strg[lastCR+1:nextCR]
+ else:
+ return strg[lastCR+1:]
+
+def _defaultStartDebugAction( instring, loc, expr ):
+ print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
+
+def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
+ print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
+
+def _defaultExceptionDebugAction( instring, loc, expr, exc ):
+ print ("Exception raised:" + _ustr(exc))
+
+def nullDebugAction(*args):
+ """'Do-nothing' debug action, to suppress debugging output during parsing."""
+ pass
+
+# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
+#~ 'decorator to trim function calls to match the arity of the target'
+#~ def _trim_arity(func, maxargs=3):
+ #~ if func in singleArgBuiltins:
+ #~ return lambda s,l,t: func(t)
+ #~ limit = 0
+ #~ foundArity = False
+ #~ def wrapper(*args):
+ #~ nonlocal limit,foundArity
+ #~ while 1:
+ #~ try:
+ #~ ret = func(*args[limit:])
+ #~ foundArity = True
+ #~ return ret
+ #~ except TypeError:
+ #~ if limit == maxargs or foundArity:
+ #~ raise
+ #~ limit += 1
+ #~ continue
+ #~ return wrapper
+
+# this version is Python 2.x-3.x cross-compatible
+'decorator to trim function calls to match the arity of the target'
+def _trim_arity(func, maxargs=2):
+ if func in singleArgBuiltins:
+ return lambda s,l,t: func(t)
+ limit = [0]
+ foundArity = [False]
+
+ # traceback return data structure changed in Py3.5 - normalize back to plain tuples
+ if system_version[:2] >= (3,5):
+ def extract_stack(limit=0):
+ # special handling for Python 3.5.0 - extra deep call stack by 1
+ offset = -3 if system_version == (3,5,0) else -2
+ frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
+ return [frame_summary[:2]]
+ def extract_tb(tb, limit=0):
+ frames = traceback.extract_tb(tb, limit=limit)
+ frame_summary = frames[-1]
+ return [frame_summary[:2]]
+ else:
+ extract_stack = traceback.extract_stack
+ extract_tb = traceback.extract_tb
+
+ # synthesize what would be returned by traceback.extract_stack at the call to
+ # user's parse action 'func', so that we don't incur call penalty at parse time
+
+ LINE_DIFF = 6
+ # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
+ # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
+ this_line = extract_stack(limit=2)[-1]
+ pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
+
+ def wrapper(*args):
+ while 1:
+ try:
+ ret = func(*args[limit[0]:])
+ foundArity[0] = True
+ return ret
+ except TypeError:
+ # re-raise TypeErrors if they did not come from our arity testing
+ if foundArity[0]:
+ raise
+ else:
+ try:
+ tb = sys.exc_info()[-1]
+ if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
+ raise
+ finally:
+ del tb
+
+ if limit[0] <= maxargs:
+ limit[0] += 1
+ continue
+ raise
+
+ # copy func name to wrapper for sensible debug output
+ func_name = "<parse action>"
+ try:
+ func_name = getattr(func, '__name__',
+ getattr(func, '__class__').__name__)
+ except Exception:
+ func_name = str(func)
+ wrapper.__name__ = func_name
+
+ return wrapper
+
+class ParserElement(object):
+ """Abstract base level parser element class."""
+ DEFAULT_WHITE_CHARS = " \n\t\r"
+ verbose_stacktrace = False
+
+ @staticmethod
+ def setDefaultWhitespaceChars( chars ):
+ r"""
+ Overrides the default whitespace chars
+
+ Example::
+ # default whitespace chars are space, <TAB> and newline
+ OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
+
+ # change to just treat newline as significant
+ ParserElement.setDefaultWhitespaceChars(" \t")
+ OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
+ """
+ ParserElement.DEFAULT_WHITE_CHARS = chars
+
+ @staticmethod
+ def inlineLiteralsUsing(cls):
+ """
+ Set class to be used for inclusion of string literals into a parser.
+
+ Example::
+ # default literal class used is Literal
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
+
+
+ # change to Suppress
+ ParserElement.inlineLiteralsUsing(Suppress)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
+ """
+ ParserElement._literalStringClass = cls
+
+ def __init__( self, savelist=False ):
+ self.parseAction = list()
+ self.failAction = None
+ #~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
+ self.strRepr = None
+ self.resultsName = None
+ self.saveAsList = savelist
+ self.skipWhitespace = True
+ self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
+ self.copyDefaultWhiteChars = True
+ self.mayReturnEmpty = False # used when checking for left-recursion
+ self.keepTabs = False
+ self.ignoreExprs = list()
+ self.debug = False
+ self.streamlined = False
+ self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
+ self.errmsg = ""
+ self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
+ self.debugActions = ( None, None, None ) #custom debug actions
+ self.re = None
+ self.callPreparse = True # used to avoid redundant calls to preParse
+ self.callDuringTry = False
+
+ def copy( self ):
+ """
+ Make a copy of this C{ParserElement}. Useful for defining different parse actions
+ for the same parsing pattern, using copies of the original parse element.
+
+ Example::
+ integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+ integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
+ integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
+
+ print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
+ prints::
+ [5120, 100, 655360, 268435456]
+ Equivalent form of C{expr.copy()} is just C{expr()}::
+ integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
+ """
+ cpy = copy.copy( self )
+ cpy.parseAction = self.parseAction[:]
+ cpy.ignoreExprs = self.ignoreExprs[:]
+ if self.copyDefaultWhiteChars:
+ cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
+ return cpy
+
+ def setName( self, name ):
+ """
+ Define name for this expression, makes debugging and exception messages clearer.
+
+ Example::
+ Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
+ Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
+ """
+ self.name = name
+ self.errmsg = "Expected " + self.name
+ if hasattr(self,"exception"):
+ self.exception.msg = self.errmsg
+ return self
+
+ def setResultsName( self, name, listAllMatches=False ):
+ """
+ Define name for referencing matching tokens as a nested attribute
+ of the returned parse results.
+ NOTE: this returns a *copy* of the original C{ParserElement} object;
+ this is so that the client can define a basic element, such as an
+ integer, and reference it in multiple places with different names.
+
+ You can also set results names using the abbreviated syntax,
+ C{expr("name")} in place of C{expr.setResultsName("name")} -
+ see L{I{__call__}<__call__>}.
+
+ Example::
+ date_str = (integer.setResultsName("year") + '/'
+ + integer.setResultsName("month") + '/'
+ + integer.setResultsName("day"))
+
+ # equivalent form:
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+ """
+ newself = self.copy()
+ if name.endswith("*"):
+ name = name[:-1]
+ listAllMatches=True
+ newself.resultsName = name
+ newself.modalResults = not listAllMatches
+ return newself
+
+ def setBreak(self,breakFlag = True):
+ """Method to invoke the Python pdb debugger when this element is
+ about to be parsed. Set C{breakFlag} to True to enable, False to
+ disable.
+ """
+ if breakFlag:
+ _parseMethod = self._parse
+ def breaker(instring, loc, doActions=True, callPreParse=True):
+ import pdb
+ pdb.set_trace()
+ return _parseMethod( instring, loc, doActions, callPreParse )
+ breaker._originalParseMethod = _parseMethod
+ self._parse = breaker
+ else:
+ if hasattr(self._parse,"_originalParseMethod"):
+ self._parse = self._parse._originalParseMethod
+ return self
+
+ def setParseAction( self, *fns, **kwargs ):
+ """
+ Define one or more actions to perform when successfully matching parse element definition.
+ Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
+ C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
+ - s = the original string being parsed (see note below)
+ - loc = the location of the matching substring
+ - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
+ If the functions in fns modify the tokens, they can return them as the return
+ value from fn, and the modified list of tokens will replace the original.
+ Otherwise, fn does not need to return any value.
+
+ Optional keyword arguments:
+ - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
+
+ Note: the default parsing behavior is to expand tabs in the input string
+ before starting the parsing process. See L{I{parseString}<parseString>} for more information
+ on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
+ consistent view of the parsed string, the parse location, and line and column
+ positions within the parsed string.
+
+ Example::
+ integer = Word(nums)
+ date_str = integer + '/' + integer + '/' + integer
+
+ date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
+
+ # use parse action to convert to ints at parse time
+ integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+ date_str = integer + '/' + integer + '/' + integer
+
+ # note that integer fields are now ints, not strings
+ date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
+ """
+ self.parseAction = list(map(_trim_arity, list(fns)))
+ self.callDuringTry = kwargs.get("callDuringTry", False)
+ return self
+
+ def addParseAction( self, *fns, **kwargs ):
+ """
+ Add one or more parse actions to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
+
+ See examples in L{I{copy}<copy>}.
+ """
+ self.parseAction += list(map(_trim_arity, list(fns)))
+ self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
+ return self
+
+ def addCondition(self, *fns, **kwargs):
+ """Add a boolean predicate function to expression's list of parse actions. See
+ L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction},
+ functions passed to C{addCondition} need to return boolean success/fail of the condition.
+
+ Optional keyword arguments:
+ - message = define a custom message to be used in the raised exception
+ - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
+
+ Example::
+ integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
+ year_int = integer.copy()
+ year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
+ date_str = year_int + '/' + integer + '/' + integer
+
+ result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
+ """
+ msg = kwargs.get("message", "failed user-defined condition")
+ exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
+ for fn in fns:
+ def pa(s,l,t):
+ if not bool(_trim_arity(fn)(s,l,t)):
+ raise exc_type(s,l,msg)
+ self.parseAction.append(pa)
+ self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
+ return self
+
+ def setFailAction( self, fn ):
+ """Define action to perform if parsing fails at this expression.
+ Fail acton fn is a callable function that takes the arguments
+ C{fn(s,loc,expr,err)} where:
+ - s = string being parsed
+ - loc = location where expression match was attempted and failed
+ - expr = the parse expression that failed
+ - err = the exception thrown
+ The function returns no value. It may throw C{L{ParseFatalException}}
+ if it is desired to stop parsing immediately."""
+ self.failAction = fn
+ return self
+
+ def _skipIgnorables( self, instring, loc ):
+ exprsFound = True
+ while exprsFound:
+ exprsFound = False
+ for e in self.ignoreExprs:
+ try:
+ while 1:
+ loc,dummy = e._parse( instring, loc )
+ exprsFound = True
+ except ParseException:
+ pass
+ return loc
+
+ def preParse( self, instring, loc ):
+ if self.ignoreExprs:
+ loc = self._skipIgnorables( instring, loc )
+
+ if self.skipWhitespace:
+ wt = self.whiteChars
+ instrlen = len(instring)
+ while loc < instrlen and instring[loc] in wt:
+ loc += 1
+
+ return loc
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ return loc, []
+
+ def postParse( self, instring, loc, tokenlist ):
+ return tokenlist
+
+ #~ @profile
+ def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
+ debugging = ( self.debug ) #and doActions )
+
+ if debugging or self.failAction:
+ #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
+ if (self.debugActions[0] ):
+ self.debugActions[0]( instring, loc, self )
+ if callPreParse and self.callPreparse:
+ preloc = self.preParse( instring, loc )
+ else:
+ preloc = loc
+ tokensStart = preloc
+ try:
+ try:
+ loc,tokens = self.parseImpl( instring, preloc, doActions )
+ except IndexError:
+ raise ParseException( instring, len(instring), self.errmsg, self )
+ except ParseBaseException as err:
+ #~ print ("Exception raised:", err)
+ if self.debugActions[2]:
+ self.debugActions[2]( instring, tokensStart, self, err )
+ if self.failAction:
+ self.failAction( instring, tokensStart, self, err )
+ raise
+ else:
+ if callPreParse and self.callPreparse:
+ preloc = self.preParse( instring, loc )
+ else:
+ preloc = loc
+ tokensStart = preloc
+ if self.mayIndexError or preloc >= len(instring):
+ try:
+ loc,tokens = self.parseImpl( instring, preloc, doActions )
+ except IndexError:
+ raise ParseException( instring, len(instring), self.errmsg, self )
+ else:
+ loc,tokens = self.parseImpl( instring, preloc, doActions )
+
+ tokens = self.postParse( instring, loc, tokens )
+
+ retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
+ if self.parseAction and (doActions or self.callDuringTry):
+ if debugging:
+ try:
+ for fn in self.parseAction:
+ tokens = fn( instring, tokensStart, retTokens )
+ if tokens is not None:
+ retTokens = ParseResults( tokens,
+ self.resultsName,
+ asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
+ modal=self.modalResults )
+ except ParseBaseException as err:
+ #~ print "Exception raised in user parse action:", err
+ if (self.debugActions[2] ):
+ self.debugActions[2]( instring, tokensStart, self, err )
+ raise
+ else:
+ for fn in self.parseAction:
+ tokens = fn( instring, tokensStart, retTokens )
+ if tokens is not None:
+ retTokens = ParseResults( tokens,
+ self.resultsName,
+ asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
+ modal=self.modalResults )
+ if debugging:
+ #~ print ("Matched",self,"->",retTokens.asList())
+ if (self.debugActions[1] ):
+ self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
+
+ return loc, retTokens
+
+ def tryParse( self, instring, loc ):
+ try:
+ return self._parse( instring, loc, doActions=False )[0]
+ except ParseFatalException:
+ raise ParseException( instring, loc, self.errmsg, self)
+
+ def canParseNext(self, instring, loc):
+ try:
+ self.tryParse(instring, loc)
+ except (ParseException, IndexError):
+ return False
+ else:
+ return True
+
+ class _UnboundedCache(object):
+ def __init__(self):
+ cache = {}
+ self.not_in_cache = not_in_cache = object()
+
+ def get(self, key):
+ return cache.get(key, not_in_cache)
+
+ def set(self, key, value):
+ cache[key] = value
+
+ def clear(self):
+ cache.clear()
+
+ def cache_len(self):
+ return len(cache)
+
+ self.get = types.MethodType(get, self)
+ self.set = types.MethodType(set, self)
+ self.clear = types.MethodType(clear, self)
+ self.__len__ = types.MethodType(cache_len, self)
+
+ if _OrderedDict is not None:
+ class _FifoCache(object):
+ def __init__(self, size):
+ self.not_in_cache = not_in_cache = object()
+
+ cache = _OrderedDict()
+
+ def get(self, key):
+ return cache.get(key, not_in_cache)
+
+ def set(self, key, value):
+ cache[key] = value
+ while len(cache) > size:
+ try:
+ cache.popitem(False)
+ except KeyError:
+ pass
+
+ def clear(self):
+ cache.clear()
+
+ def cache_len(self):
+ return len(cache)
+
+ self.get = types.MethodType(get, self)
+ self.set = types.MethodType(set, self)
+ self.clear = types.MethodType(clear, self)
+ self.__len__ = types.MethodType(cache_len, self)
+
+ else:
+ class _FifoCache(object):
+ def __init__(self, size):
+ self.not_in_cache = not_in_cache = object()
+
+ cache = {}
+ key_fifo = collections.deque([], size)
+
+ def get(self, key):
+ return cache.get(key, not_in_cache)
+
+ def set(self, key, value):
+ cache[key] = value
+ while len(key_fifo) > size:
+ cache.pop(key_fifo.popleft(), None)
+ key_fifo.append(key)
+
+ def clear(self):
+ cache.clear()
+ key_fifo.clear()
+
+ def cache_len(self):
+ return len(cache)
+
+ self.get = types.MethodType(get, self)
+ self.set = types.MethodType(set, self)
+ self.clear = types.MethodType(clear, self)
+ self.__len__ = types.MethodType(cache_len, self)
+
+ # argument cache for optimizing repeated calls when backtracking through recursive expressions
+ packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
+ packrat_cache_lock = RLock()
+ packrat_cache_stats = [0, 0]
+
+ # this method gets repeatedly called during backtracking with the same arguments -
+ # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
+ def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
+ HIT, MISS = 0, 1
+ lookup = (self, instring, loc, callPreParse, doActions)
+ with ParserElement.packrat_cache_lock:
+ cache = ParserElement.packrat_cache
+ value = cache.get(lookup)
+ if value is cache.not_in_cache:
+ ParserElement.packrat_cache_stats[MISS] += 1
+ try:
+ value = self._parseNoCache(instring, loc, doActions, callPreParse)
+ except ParseBaseException as pe:
+ # cache a copy of the exception, without the traceback
+ cache.set(lookup, pe.__class__(*pe.args))
+ raise
+ else:
+ cache.set(lookup, (value[0], value[1].copy()))
+ return value
+ else:
+ ParserElement.packrat_cache_stats[HIT] += 1
+ if isinstance(value, Exception):
+ raise value
+ return (value[0], value[1].copy())
+
+ _parse = _parseNoCache
+
+ @staticmethod
+ def resetCache():
+ ParserElement.packrat_cache.clear()
+ ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
+
+ _packratEnabled = False
+ @staticmethod
+ def enablePackrat(cache_size_limit=128):
+ """Enables "packrat" parsing, which adds memoizing to the parsing logic.
+ Repeated parse attempts at the same string location (which happens
+ often in many complex grammars) can immediately return a cached value,
+ instead of re-executing parsing/validating code. Memoizing is done of
+ both valid results and parsing exceptions.
+
+ Parameters:
+ - cache_size_limit - (default=C{128}) - if an integer value is provided
+ will limit the size of the packrat cache; if None is passed, then
+ the cache size will be unbounded; if 0 is passed, the cache will
+ be effectively disabled.
+
+ This speedup may break existing programs that use parse actions that
+ have side-effects. For this reason, packrat parsing is disabled when
+ you first import pyparsing. To activate the packrat feature, your
+ program must call the class method C{ParserElement.enablePackrat()}. If
+ your program uses C{psyco} to "compile as you go", you must call
+ C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
+ Python will crash. For best results, call C{enablePackrat()} immediately
+ after importing pyparsing.
+
+ Example::
+ import pyparsing
+ pyparsing.ParserElement.enablePackrat()
+ """
+ if not ParserElement._packratEnabled:
+ ParserElement._packratEnabled = True
+ if cache_size_limit is None:
+ ParserElement.packrat_cache = ParserElement._UnboundedCache()
+ else:
+ ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
+ ParserElement._parse = ParserElement._parseCache
+
+ def parseString( self, instring, parseAll=False ):
+ """
+ Execute the parse expression with the given string.
+ This is the main interface to the client code, once the complete
+ expression has been built.
+
+ If you want the grammar to require that the entire input string be
+ successfully parsed, then set C{parseAll} to True (equivalent to ending
+ the grammar with C{L{StringEnd()}}).
+
+ Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
+ in order to report proper column numbers in parse actions.
+ If the input string contains tabs and
+ the grammar uses parse actions that use the C{loc} argument to index into the
+ string being parsed, you can ensure you have a consistent view of the input
+ string by:
+ - calling C{parseWithTabs} on your grammar before calling C{parseString}
+ (see L{I{parseWithTabs}<parseWithTabs>})
+ - define your parse action using the full C{(s,loc,toks)} signature, and
+ reference the input string using the parse action's C{s} argument
+ - explictly expand the tabs in your input string before calling
+ C{parseString}
+
+ Example::
+ Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
+ Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
+ """
+ ParserElement.resetCache()
+ if not self.streamlined:
+ self.streamline()
+ #~ self.saveAsList = True
+ for e in self.ignoreExprs:
+ e.streamline()
+ if not self.keepTabs:
+ instring = instring.expandtabs()
+ try:
+ loc, tokens = self._parse( instring, 0 )
+ if parseAll:
+ loc = self.preParse( instring, loc )
+ se = Empty() + StringEnd()
+ se._parse( instring, loc )
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc
+ else:
+ return tokens
+
+ def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
+ """
+ Scan the input string for expression matches. Each match will return the
+ matching tokens, start location, and end location. May be called with optional
+ C{maxMatches} argument, to clip scanning after 'n' matches are found. If
+ C{overlap} is specified, then overlapping matches will be reported.
+
+ Note that the start and end locations are reported relative to the string
+ being parsed. See L{I{parseString}<parseString>} for more information on parsing
+ strings with embedded tabs.
+
+ Example::
+ source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
+ print(source)
+ for tokens,start,end in Word(alphas).scanString(source):
+ print(' '*start + '^'*(end-start))
+ print(' '*start + tokens[0])
+
+ prints::
+
+ sldjf123lsdjjkf345sldkjf879lkjsfd987
+ ^^^^^
+ sldjf
+ ^^^^^^^
+ lsdjjkf
+ ^^^^^^
+ sldkjf
+ ^^^^^^
+ lkjsfd
+ """
+ if not self.streamlined:
+ self.streamline()
+ for e in self.ignoreExprs:
+ e.streamline()
+
+ if not self.keepTabs:
+ instring = _ustr(instring).expandtabs()
+ instrlen = len(instring)
+ loc = 0
+ preparseFn = self.preParse
+ parseFn = self._parse
+ ParserElement.resetCache()
+ matches = 0
+ try:
+ while loc <= instrlen and matches < maxMatches:
+ try:
+ preloc = preparseFn( instring, loc )
+ nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
+ except ParseException:
+ loc = preloc+1
+ else:
+ if nextLoc > loc:
+ matches += 1
+ yield tokens, preloc, nextLoc
+ if overlap:
+ nextloc = preparseFn( instring, loc )
+ if nextloc > loc:
+ loc = nextLoc
+ else:
+ loc += 1
+ else:
+ loc = nextLoc
+ else:
+ loc = preloc+1
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc
+
+ def transformString( self, instring ):
+ """
+ Extension to C{L{scanString}}, to modify matching text with modified tokens that may
+ be returned from a parse action. To use C{transformString}, define a grammar and
+ attach a parse action to it that modifies the returned token list.
+ Invoking C{transformString()} on a target string will then scan for matches,
+ and replace the matched text patterns according to the logic in the parse
+ action. C{transformString()} returns the resulting transformed string.
+
+ Example::
+ wd = Word(alphas)
+ wd.setParseAction(lambda toks: toks[0].title())
+
+ print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
+ Prints::
+ Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
+ """
+ out = []
+ lastE = 0
+ # force preservation of <TAB>s, to minimize unwanted transformation of string, and to
+ # keep string locs straight between transformString and scanString
+ self.keepTabs = True
+ try:
+ for t,s,e in self.scanString( instring ):
+ out.append( instring[lastE:s] )
+ if t:
+ if isinstance(t,ParseResults):
+ out += t.asList()
+ elif isinstance(t,list):
+ out += t
+ else:
+ out.append(t)
+ lastE = e
+ out.append(instring[lastE:])
+ out = [o for o in out if o]
+ return "".join(map(_ustr,_flatten(out)))
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc
+
+ def searchString( self, instring, maxMatches=_MAX_INT ):
+ """
+ Another extension to C{L{scanString}}, simplifying the access to the tokens found
+ to match the given parse expression. May be called with optional
+ C{maxMatches} argument, to clip searching after 'n' matches are found.
+
+ Example::
+ # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
+ cap_word = Word(alphas.upper(), alphas.lower())
+
+ print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
+
+ # the sum() builtin can be used to merge results into a single ParseResults object
+ print(sum(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")))
+ prints::
+ [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']]
+ ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity']
+ """
+ try:
+ return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc
+
+ def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
+ """
+ Generator method to split a string using the given expression as a separator.
+ May be called with optional C{maxsplit} argument, to limit the number of splits;
+ and the optional C{includeSeparators} argument (default=C{False}), if the separating
+ matching text should be included in the split results.
+
+ Example::
+ punc = oneOf(list(".,;:/-!?"))
+ print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
+ prints::
+ ['This', ' this', '', ' this sentence', ' is badly punctuated', '']
+ """
+ splits = 0
+ last = 0
+ for t,s,e in self.scanString(instring, maxMatches=maxsplit):
+ yield instring[last:s]
+ if includeSeparators:
+ yield t[0]
+ last = e
+ yield instring[last:]
+
+ def __add__(self, other ):
+ """
+ Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
+ converts them to L{Literal}s by default.
+
+ Example::
+ greet = Word(alphas) + "," + Word(alphas) + "!"
+ hello = "Hello, World!"
+ print (hello, "->", greet.parseString(hello))
+ Prints::
+ Hello, World! -> ['Hello', ',', 'World', '!']
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return And( [ self, other ] )
+
+ def __radd__(self, other ):
+ """
+ Implementation of + operator when left operand is not a C{L{ParserElement}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other + self
+
+ def __sub__(self, other):
+ """
+ Implementation of - operator, returns C{L{And}} with error stop
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return self + And._ErrorStop() + other
+
+ def __rsub__(self, other ):
+ """
+ Implementation of - operator when left operand is not a C{L{ParserElement}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other - self
+
+ def __mul__(self,other):
+ """
+ Implementation of * operator, allows use of C{expr * 3} in place of
+ C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
+ tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
+ may also include C{None} as in:
+ - C{expr*(n,None)} or C{expr*(n,)} is equivalent
+ to C{expr*n + L{ZeroOrMore}(expr)}
+ (read as "at least n instances of C{expr}")
+ - C{expr*(None,n)} is equivalent to C{expr*(0,n)}
+ (read as "0 to n instances of C{expr}")
+ - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
+ - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
+
+ Note that C{expr*(None,n)} does not raise an exception if
+ more than n exprs exist in the input stream; that is,
+ C{expr*(None,n)} does not enforce a maximum number of expr
+ occurrences. If this behavior is desired, then write
+ C{expr*(None,n) + ~expr}
+ """
+ if isinstance(other,int):
+ minElements, optElements = other,0
+ elif isinstance(other,tuple):
+ other = (other + (None, None))[:2]
+ if other[0] is None:
+ other = (0, other[1])
+ if isinstance(other[0],int) and other[1] is None:
+ if other[0] == 0:
+ return ZeroOrMore(self)
+ if other[0] == 1:
+ return OneOrMore(self)
+ else:
+ return self*other[0] + ZeroOrMore(self)
+ elif isinstance(other[0],int) and isinstance(other[1],int):
+ minElements, optElements = other
+ optElements -= minElements
+ else:
+ raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
+ else:
+ raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
+
+ if minElements < 0:
+ raise ValueError("cannot multiply ParserElement by negative value")
+ if optElements < 0:
+ raise ValueError("second tuple value must be greater or equal to first tuple value")
+ if minElements == optElements == 0:
+ raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
+
+ if (optElements):
+ def makeOptionalList(n):
+ if n>1:
+ return Optional(self + makeOptionalList(n-1))
+ else:
+ return Optional(self)
+ if minElements:
+ if minElements == 1:
+ ret = self + makeOptionalList(optElements)
+ else:
+ ret = And([self]*minElements) + makeOptionalList(optElements)
+ else:
+ ret = makeOptionalList(optElements)
+ else:
+ if minElements == 1:
+ ret = self
+ else:
+ ret = And([self]*minElements)
+ return ret
+
+ def __rmul__(self, other):
+ return self.__mul__(other)
+
+ def __or__(self, other ):
+ """
+ Implementation of | operator - returns C{L{MatchFirst}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return MatchFirst( [ self, other ] )
+
+ def __ror__(self, other ):
+ """
+ Implementation of | operator when left operand is not a C{L{ParserElement}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other | self
+
+ def __xor__(self, other ):
+ """
+ Implementation of ^ operator - returns C{L{Or}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return Or( [ self, other ] )
+
+ def __rxor__(self, other ):
+ """
+ Implementation of ^ operator when left operand is not a C{L{ParserElement}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other ^ self
+
+ def __and__(self, other ):
+ """
+ Implementation of & operator - returns C{L{Each}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return Each( [ self, other ] )
+
+ def __rand__(self, other ):
+ """
+ Implementation of & operator when left operand is not a C{L{ParserElement}}
+ """
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ if not isinstance( other, ParserElement ):
+ warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
+ SyntaxWarning, stacklevel=2)
+ return None
+ return other & self
+
+ def __invert__( self ):
+ """
+ Implementation of ~ operator - returns C{L{NotAny}}
+ """
+ return NotAny( self )
+
+ def __call__(self, name=None):
+ """
+ Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
+
+ If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
+ passed as C{True}.
+
+ If C{name} is omitted, same as calling C{L{copy}}.
+
+ Example::
+ # these are equivalent
+ userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
+ userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
+ """
+ if name is not None:
+ return self.setResultsName(name)
+ else:
+ return self.copy()
+
+ def suppress( self ):
+ """
+ Suppresses the output of this C{ParserElement}; useful to keep punctuation from
+ cluttering up returned output.
+ """
+ return Suppress( self )
+
+ def leaveWhitespace( self ):
+ """
+ Disables the skipping of whitespace before matching the characters in the
+ C{ParserElement}'s defined pattern. This is normally only used internally by
+ the pyparsing module, but may be needed in some whitespace-sensitive grammars.
+ """
+ self.skipWhitespace = False
+ return self
+
+ def setWhitespaceChars( self, chars ):
+ """
+ Overrides the default whitespace chars
+ """
+ self.skipWhitespace = True
+ self.whiteChars = chars
+ self.copyDefaultWhiteChars = False
+ return self
+
+ def parseWithTabs( self ):
+ """
+ Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
+ Must be called before C{parseString} when the input grammar contains elements that
+ match C{<TAB>} characters.
+ """
+ self.keepTabs = True
+ return self
+
+ def ignore( self, other ):
+ """
+ Define expression to be ignored (e.g., comments) while doing pattern
+ matching; may be called repeatedly, to define multiple comment or other
+ ignorable patterns.
+
+ Example::
+ patt = OneOrMore(Word(alphas))
+ patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
+
+ patt.ignore(cStyleComment)
+ patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
+ """
+ if isinstance(other, basestring):
+ other = Suppress(other)
+
+ if isinstance( other, Suppress ):
+ if other not in self.ignoreExprs:
+ self.ignoreExprs.append(other)
+ else:
+ self.ignoreExprs.append( Suppress( other.copy() ) )
+ return self
+
+ def setDebugActions( self, startAction, successAction, exceptionAction ):
+ """
+ Enable display of debugging messages while doing pattern matching.
+ """
+ self.debugActions = (startAction or _defaultStartDebugAction,
+ successAction or _defaultSuccessDebugAction,
+ exceptionAction or _defaultExceptionDebugAction)
+ self.debug = True
+ return self
+
+ def setDebug( self, flag=True ):
+ """
+ Enable display of debugging messages while doing pattern matching.
+ Set C{flag} to True to enable, False to disable.
+
+ Example::
+ wd = Word(alphas).setName("alphaword")
+ integer = Word(nums).setName("numword")
+ term = wd | integer
+
+ # turn on debugging for wd
+ wd.setDebug()
+
+ OneOrMore(term).parseString("abc 123 xyz 890")
+
+ prints::
+ Match alphaword at loc 0(1,1)
+ Matched alphaword -> ['abc']
+ Match alphaword at loc 3(1,4)
+ Exception raised:Expected alphaword (at char 4), (line:1, col:5)
+ Match alphaword at loc 7(1,8)
+ Matched alphaword -> ['xyz']
+ Match alphaword at loc 11(1,12)
+ Exception raised:Expected alphaword (at char 12), (line:1, col:13)
+ Match alphaword at loc 15(1,16)
+ Exception raised:Expected alphaword (at char 15), (line:1, col:16)
+
+ The output shown is that produced by the default debug actions - custom debug actions can be
+ specified using L{setDebugActions}. Prior to attempting
+ to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
+ is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
+ message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
+ which makes debugging and exception messages easier to understand - for instance, the default
+ name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
+ """
+ if flag:
+ self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
+ else:
+ self.debug = False
+ return self
+
+ def __str__( self ):
+ return self.name
+
+ def __repr__( self ):
+ return _ustr(self)
+
+ def streamline( self ):
+ self.streamlined = True
+ self.strRepr = None
+ return self
+
+ def checkRecursion( self, parseElementList ):
+ pass
+
+ def validate( self, validateTrace=[] ):
+ """
+ Check defined expressions for valid structure, check for infinite recursive definitions.
+ """
+ self.checkRecursion( [] )
+
+ def parseFile( self, file_or_filename, parseAll=False ):
+ """
+ Execute the parse expression on the given file or filename.
+ If a filename is specified (instead of a file object),
+ the entire file is opened, read, and closed before parsing.
+ """
+ try:
+ file_contents = file_or_filename.read()
+ except AttributeError:
+ with open(file_or_filename, "r") as f:
+ file_contents = f.read()
+ try:
+ return self.parseString(file_contents, parseAll)
+ except ParseBaseException as exc:
+ if ParserElement.verbose_stacktrace:
+ raise
+ else:
+ # catch and re-raise exception from here, clears out pyparsing internal stack trace
+ raise exc
+
+ def __eq__(self,other):
+ if isinstance(other, ParserElement):
+ return self is other or vars(self) == vars(other)
+ elif isinstance(other, basestring):
+ return self.matches(other)
+ else:
+ return super(ParserElement,self)==other
+
+ def __ne__(self,other):
+ return not (self == other)
+
+ def __hash__(self):
+ return hash(id(self))
+
+ def __req__(self,other):
+ return self == other
+
+ def __rne__(self,other):
+ return not (self == other)
+
+ def matches(self, testString, parseAll=True):
+ """
+ Method for quick testing of a parser against a test string. Good for simple
+ inline microtests of sub expressions while building up larger parser.
+
+ Parameters:
+ - testString - to test against this expression for a match
+ - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
+
+ Example::
+ expr = Word(nums)
+ assert expr.matches("100")
+ """
+ try:
+ self.parseString(_ustr(testString), parseAll=parseAll)
+ return True
+ except ParseBaseException:
+ return False
+
+ def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
+ """
+ Execute the parse expression on a series of test strings, showing each
+ test, the parsed results or where the parse failed. Quick and easy way to
+ run a parse expression against a list of sample strings.
+
+ Parameters:
+ - tests - a list of separate test strings, or a multiline string of test strings
+ - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
+ - comment - (default=C{'#'}) - expression for indicating embedded comments in the test
+ string; pass None to disable comment filtering
+ - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
+ if False, only dump nested list
+ - printResults - (default=C{True}) prints test output to stdout
+ - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
+
+ Returns: a (success, results) tuple, where success indicates that all tests succeeded
+ (or failed if C{failureTests} is True), and the results contain a list of lines of each
+ test's output
+
+ Example::
+ number_expr = pyparsing_common.number.copy()
+
+ result = number_expr.runTests('''
+ # unsigned integer
+ 100
+ # negative integer
+ -100
+ # float with scientific notation
+ 6.02e23
+ # integer with scientific notation
+ 1e-12
+ ''')
+ print("Success" if result[0] else "Failed!")
+
+ result = number_expr.runTests('''
+ # stray character
+ 100Z
+ # missing leading digit before '.'
+ -.100
+ # too many '.'
+ 3.14.159
+ ''', failureTests=True)
+ print("Success" if result[0] else "Failed!")
+ prints::
+ # unsigned integer
+ 100
+ [100]
+
+ # negative integer
+ -100
+ [-100]
+
+ # float with scientific notation
+ 6.02e23
+ [6.02e+23]
+
+ # integer with scientific notation
+ 1e-12
+ [1e-12]
+
+ Success
+
+ # stray character
+ 100Z
+ ^
+ FAIL: Expected end of text (at char 3), (line:1, col:4)
+
+ # missing leading digit before '.'
+ -.100
+ ^
+ FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
+
+ # too many '.'
+ 3.14.159
+ ^
+ FAIL: Expected end of text (at char 4), (line:1, col:5)
+
+ Success
+
+ Each test string must be on a single line. If you want to test a string that spans multiple
+ lines, create a test like this::
+
+ expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
+
+ (Note that this is a raw string literal, you must include the leading 'r'.)
+ """
+ if isinstance(tests, basestring):
+ tests = list(map(str.strip, tests.rstrip().splitlines()))
+ if isinstance(comment, basestring):
+ comment = Literal(comment)
+ allResults = []
+ comments = []
+ success = True
+ for t in tests:
+ if comment is not None and comment.matches(t, False) or comments and not t:
+ comments.append(t)
+ continue
+ if not t:
+ continue
+ out = ['\n'.join(comments), t]
+ comments = []
+ try:
+ t = t.replace(r'\n','\n')
+ result = self.parseString(t, parseAll=parseAll)
+ out.append(result.dump(full=fullDump))
+ success = success and not failureTests
+ except ParseBaseException as pe:
+ fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
+ if '\n' in t:
+ out.append(line(pe.loc, t))
+ out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
+ else:
+ out.append(' '*pe.loc + '^' + fatal)
+ out.append("FAIL: " + str(pe))
+ success = success and failureTests
+ result = pe
+ except Exception as exc:
+ out.append("FAIL-EXCEPTION: " + str(exc))
+ success = success and failureTests
+ result = exc
+
+ if printResults:
+ if fullDump:
+ out.append('')
+ print('\n'.join(out))
+
+ allResults.append((t, result))
+
+ return success, allResults
+
+
+class Token(ParserElement):
+ """
+ Abstract C{ParserElement} subclass, for defining atomic matching patterns.
+ """
+ def __init__( self ):
+ super(Token,self).__init__( savelist=False )
+
+
+class Empty(Token):
+ """
+ An empty token, will always match.
+ """
+ def __init__( self ):
+ super(Empty,self).__init__()
+ self.name = "Empty"
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+
+
+class NoMatch(Token):
+ """
+ A token that will never match.
+ """
+ def __init__( self ):
+ super(NoMatch,self).__init__()
+ self.name = "NoMatch"
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+ self.errmsg = "Unmatchable token"
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ raise ParseException(instring, loc, self.errmsg, self)
+
+
+class Literal(Token):
+ """
+ Token to exactly match a specified string.
+
+ Example::
+ Literal('blah').parseString('blah') # -> ['blah']
+ Literal('blah').parseString('blahfooblah') # -> ['blah']
+ Literal('blah').parseString('bla') # -> Exception: Expected "blah"
+
+ For case-insensitive matching, use L{CaselessLiteral}.
+
+ For keyword matching (force word break before and after the matched string),
+ use L{Keyword} or L{CaselessKeyword}.
+ """
+ def __init__( self, matchString ):
+ super(Literal,self).__init__()
+ self.match = matchString
+ self.matchLen = len(matchString)
+ try:
+ self.firstMatchChar = matchString[0]
+ except IndexError:
+ warnings.warn("null string passed to Literal; use Empty() instead",
+ SyntaxWarning, stacklevel=2)
+ self.__class__ = Empty
+ self.name = '"%s"' % _ustr(self.match)
+ self.errmsg = "Expected " + self.name
+ self.mayReturnEmpty = False
+ self.mayIndexError = False
+
+ # Performance tuning: this routine gets called a *lot*
+ # if this is a single character match string and the first character matches,
+ # short-circuit as quickly as possible, and avoid calling startswith
+ #~ @profile
+ def parseImpl( self, instring, loc, doActions=True ):
+ if (instring[loc] == self.firstMatchChar and
+ (self.matchLen==1 or instring.startswith(self.match,loc)) ):
+ return loc+self.matchLen, self.match
+ raise ParseException(instring, loc, self.errmsg, self)
+_L = Literal
+ParserElement._literalStringClass = Literal
+
+class Keyword(Token):
+ """
+ Token to exactly match a specified string as a keyword, that is, it must be
+ immediately followed by a non-keyword character. Compare with C{L{Literal}}:
+ - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
+ - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
+ Accepts two optional constructor arguments in addition to the keyword string:
+ - C{identChars} is a string of characters that would be valid identifier characters,
+ defaulting to all alphanumerics + "_" and "$"
+ - C{caseless} allows case-insensitive matching, default is C{False}.
+
+ Example::
+ Keyword("start").parseString("start") # -> ['start']
+ Keyword("start").parseString("starting") # -> Exception
+
+ For case-insensitive matching, use L{CaselessKeyword}.
+ """
+ DEFAULT_KEYWORD_CHARS = alphanums+"_$"
+
+ def __init__( self, matchString, identChars=None, caseless=False ):
+ super(Keyword,self).__init__()
+ if identChars is None:
+ identChars = Keyword.DEFAULT_KEYWORD_CHARS
+ self.match = matchString
+ self.matchLen = len(matchString)
+ try:
+ self.firstMatchChar = matchString[0]
+ except IndexError:
+ warnings.warn("null string passed to Keyword; use Empty() instead",
+ SyntaxWarning, stacklevel=2)
+ self.name = '"%s"' % self.match
+ self.errmsg = "Expected " + self.name
+ self.mayReturnEmpty = False
+ self.mayIndexError = False
+ self.caseless = caseless
+ if caseless:
+ self.caselessmatch = matchString.upper()
+ identChars = identChars.upper()
+ self.identChars = set(identChars)
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if self.caseless:
+ if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
+ (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
+ (loc == 0 or instring[loc-1].upper() not in self.identChars) ):
+ return loc+self.matchLen, self.match
+ else:
+ if (instring[loc] == self.firstMatchChar and
+ (self.matchLen==1 or instring.startswith(self.match,loc)) and
+ (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
+ (loc == 0 or instring[loc-1] not in self.identChars) ):
+ return loc+self.matchLen, self.match
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ def copy(self):
+ c = super(Keyword,self).copy()
+ c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
+ return c
+
+ @staticmethod
+ def setDefaultKeywordChars( chars ):
+ """Overrides the default Keyword chars
+ """
+ Keyword.DEFAULT_KEYWORD_CHARS = chars
+
+class CaselessLiteral(Literal):
+ """
+ Token to match a specified string, ignoring case of letters.
+ Note: the matched results will always be in the case of the given
+ match string, NOT the case of the input text.
+
+ Example::
+ OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
+
+ (Contrast with example for L{CaselessKeyword}.)
+ """
+ def __init__( self, matchString ):
+ super(CaselessLiteral,self).__init__( matchString.upper() )
+ # Preserve the defining literal.
+ self.returnString = matchString
+ self.name = "'%s'" % self.returnString
+ self.errmsg = "Expected " + self.name
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if instring[ loc:loc+self.matchLen ].upper() == self.match:
+ return loc+self.matchLen, self.returnString
+ raise ParseException(instring, loc, self.errmsg, self)
+
+class CaselessKeyword(Keyword):
+ """
+ Caseless version of L{Keyword}.
+
+ Example::
+ OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
+
+ (Contrast with example for L{CaselessLiteral}.)
+ """
+ def __init__( self, matchString, identChars=None ):
+ super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
+ (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
+ return loc+self.matchLen, self.match
+ raise ParseException(instring, loc, self.errmsg, self)
+
+class CloseMatch(Token):
+ """
+ A variation on L{Literal} which matches "close" matches, that is,
+ strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
+ - C{match_string} - string to be matched
+ - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
+
+ The results from a successful parse will contain the matched text from the input string and the following named results:
+ - C{mismatches} - a list of the positions within the match_string where mismatches were found
+ - C{original} - the original match_string used to compare against the input string
+
+ If C{mismatches} is an empty list, then the match was an exact match.
+
+ Example::
+ patt = CloseMatch("ATCATCGAATGGA")
+ patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
+ patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
+
+ # exact match
+ patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
+
+ # close match allowing up to 2 mismatches
+ patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
+ patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
+ """
+ def __init__(self, match_string, maxMismatches=1):
+ super(CloseMatch,self).__init__()
+ self.name = match_string
+ self.match_string = match_string
+ self.maxMismatches = maxMismatches
+ self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
+ self.mayIndexError = False
+ self.mayReturnEmpty = False
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ start = loc
+ instrlen = len(instring)
+ maxloc = start + len(self.match_string)
+
+ if maxloc <= instrlen:
+ match_string = self.match_string
+ match_stringloc = 0
+ mismatches = []
+ maxMismatches = self.maxMismatches
+
+ for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
+ src,mat = s_m
+ if src != mat:
+ mismatches.append(match_stringloc)
+ if len(mismatches) > maxMismatches:
+ break
+ else:
+ loc = match_stringloc + 1
+ results = ParseResults([instring[start:loc]])
+ results['original'] = self.match_string
+ results['mismatches'] = mismatches
+ return loc, results
+
+ raise ParseException(instring, loc, self.errmsg, self)
+
+
+class Word(Token):
+ """
+ Token for matching words composed of allowed character sets.
+ Defined with string containing all allowed initial characters,
+ an optional string containing allowed body characters (if omitted,
+ defaults to the initial character set), and an optional minimum,
+ maximum, and/or exact length. The default value for C{min} is 1 (a
+ minimum value < 1 is not valid); the default values for C{max} and C{exact}
+ are 0, meaning no maximum or exact length restriction. An optional
+ C{excludeChars} parameter can list characters that might be found in
+ the input C{bodyChars} string; useful to define a word of all printables
+ except for one or two characters, for instance.
+
+ L{srange} is useful for defining custom character set strings for defining
+ C{Word} expressions, using range notation from regular expression character sets.
+
+ A common mistake is to use C{Word} to match a specific literal string, as in
+ C{Word("Address")}. Remember that C{Word} uses the string argument to define
+ I{sets} of matchable characters. This expression would match "Add", "AAA",
+ "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
+ To match an exact literal string, use L{Literal} or L{Keyword}.
+
+ pyparsing includes helper strings for building Words:
+ - L{alphas}
+ - L{nums}
+ - L{alphanums}
+ - L{hexnums}
+ - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
+ - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
+ - L{printables} (any non-whitespace character)
+
+ Example::
+ # a word composed of digits
+ integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
+
+ # a word with a leading capital, and zero or more lowercase
+ capital_word = Word(alphas.upper(), alphas.lower())
+
+ # hostnames are alphanumeric, with leading alpha, and '-'
+ hostname = Word(alphas, alphanums+'-')
+
+ # roman numeral (not a strict parser, accepts invalid mix of characters)
+ roman = Word("IVXLCDM")
+
+ # any string of non-whitespace characters, except for ','
+ csv_value = Word(printables, excludeChars=",")
+ """
+ def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
+ super(Word,self).__init__()
+ if excludeChars:
+ initChars = ''.join(c for c in initChars if c not in excludeChars)
+ if bodyChars:
+ bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
+ self.initCharsOrig = initChars
+ self.initChars = set(initChars)
+ if bodyChars :
+ self.bodyCharsOrig = bodyChars
+ self.bodyChars = set(bodyChars)
+ else:
+ self.bodyCharsOrig = initChars
+ self.bodyChars = set(initChars)
+
+ self.maxSpecified = max > 0
+
+ if min < 1:
+ raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
+
+ self.minLen = min
+
+ if max > 0:
+ self.maxLen = max
+ else:
+ self.maxLen = _MAX_INT
+
+ if exact > 0:
+ self.maxLen = exact
+ self.minLen = exact
+
+ self.name = _ustr(self)
+ self.errmsg = "Expected " + self.name
+ self.mayIndexError = False
+ self.asKeyword = asKeyword
+
+ if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
+ if self.bodyCharsOrig == self.initCharsOrig:
+ self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
+ elif len(self.initCharsOrig) == 1:
+ self.reString = "%s[%s]*" % \
+ (re.escape(self.initCharsOrig),
+ _escapeRegexRangeChars(self.bodyCharsOrig),)
+ else:
+ self.reString = "[%s][%s]*" % \
+ (_escapeRegexRangeChars(self.initCharsOrig),
+ _escapeRegexRangeChars(self.bodyCharsOrig),)
+ if self.asKeyword:
+ self.reString = r"\b"+self.reString+r"\b"
+ try:
+ self.re = re.compile( self.reString )
+ except Exception:
+ self.re = None
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if self.re:
+ result = self.re.match(instring,loc)
+ if not result:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ loc = result.end()
+ return loc, result.group()
+
+ if not(instring[ loc ] in self.initChars):
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ start = loc
+ loc += 1
+ instrlen = len(instring)
+ bodychars = self.bodyChars
+ maxloc = start + self.maxLen
+ maxloc = min( maxloc, instrlen )
+ while loc < maxloc and instring[loc] in bodychars:
+ loc += 1
+
+ throwException = False
+ if loc - start < self.minLen:
+ throwException = True
+ if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
+ throwException = True
+ if self.asKeyword:
+ if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
+ throwException = True
+
+ if throwException:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ return loc, instring[start:loc]
+
+ def __str__( self ):
+ try:
+ return super(Word,self).__str__()
+ except Exception:
+ pass
+
+
+ if self.strRepr is None:
+
+ def charsAsStr(s):
+ if len(s)>4:
+ return s[:4]+"..."
+ else:
+ return s
+
+ if ( self.initCharsOrig != self.bodyCharsOrig ):
+ self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
+ else:
+ self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
+
+ return self.strRepr
+
+
+class Regex(Token):
+ r"""
+ Token for matching strings that match a given regular expression.
+ Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
+ If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as
+ named parse results.
+
+ Example::
+ realnum = Regex(r"[+-]?\d+\.\d*")
+ date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
+ # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
+ roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
+ """
+ compiledREtype = type(re.compile("[A-Z]"))
+ def __init__( self, pattern, flags=0):
+ """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
+ super(Regex,self).__init__()
+
+ if isinstance(pattern, basestring):
+ if not pattern:
+ warnings.warn("null string passed to Regex; use Empty() instead",
+ SyntaxWarning, stacklevel=2)
+
+ self.pattern = pattern
+ self.flags = flags
+
+ try:
+ self.re = re.compile(self.pattern, self.flags)
+ self.reString = self.pattern
+ except sre_constants.error:
+ warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
+ SyntaxWarning, stacklevel=2)
+ raise
+
+ elif isinstance(pattern, Regex.compiledREtype):
+ self.re = pattern
+ self.pattern = \
+ self.reString = str(pattern)
+ self.flags = flags
+
+ else:
+ raise ValueError("Regex may only be constructed with a string or a compiled RE object")
+
+ self.name = _ustr(self)
+ self.errmsg = "Expected " + self.name
+ self.mayIndexError = False
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ result = self.re.match(instring,loc)
+ if not result:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ loc = result.end()
+ d = result.groupdict()
+ ret = ParseResults(result.group())
+ if d:
+ for k in d:
+ ret[k] = d[k]
+ return loc,ret
+
+ def __str__( self ):
+ try:
+ return super(Regex,self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None:
+ self.strRepr = "Re:(%s)" % repr(self.pattern)
+
+ return self.strRepr
+
+
+class QuotedString(Token):
+ r"""
+ Token for matching strings that are delimited by quoting characters.
+
+ Defined with the following parameters:
+ - quoteChar - string of one or more characters defining the quote delimiting string
+ - escChar - character to escape quotes, typically backslash (default=C{None})
+ - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
+ - multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
+ - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
+ - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
+ - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
+
+ Example::
+ qs = QuotedString('"')
+ print(qs.searchString('lsjdf "This is the quote" sldjf'))
+ complex_qs = QuotedString('{{', endQuoteChar='}}')
+ print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
+ sql_qs = QuotedString('"', escQuote='""')
+ print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
+ prints::
+ [['This is the quote']]
+ [['This is the "quote"']]
+ [['This is the quote with "embedded" quotes']]
+ """
+ def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
+ super(QuotedString,self).__init__()
+
+ # remove white space from quote chars - wont work anyway
+ quoteChar = quoteChar.strip()
+ if not quoteChar:
+ warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
+ raise SyntaxError()
+
+ if endQuoteChar is None:
+ endQuoteChar = quoteChar
+ else:
+ endQuoteChar = endQuoteChar.strip()
+ if not endQuoteChar:
+ warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
+ raise SyntaxError()
+
+ self.quoteChar = quoteChar
+ self.quoteCharLen = len(quoteChar)
+ self.firstQuoteChar = quoteChar[0]
+ self.endQuoteChar = endQuoteChar
+ self.endQuoteCharLen = len(endQuoteChar)
+ self.escChar = escChar
+ self.escQuote = escQuote
+ self.unquoteResults = unquoteResults
+ self.convertWhitespaceEscapes = convertWhitespaceEscapes
+
+ if multiline:
+ self.flags = re.MULTILINE | re.DOTALL
+ self.pattern = r'%s(?:[^%s%s]' % \
+ ( re.escape(self.quoteChar),
+ _escapeRegexRangeChars(self.endQuoteChar[0]),
+ (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
+ else:
+ self.flags = 0
+ self.pattern = r'%s(?:[^%s\n\r%s]' % \
+ ( re.escape(self.quoteChar),
+ _escapeRegexRangeChars(self.endQuoteChar[0]),
+ (escChar is not None and _escapeRegexRangeChars(escChar) or '') )
+ if len(self.endQuoteChar) > 1:
+ self.pattern += (
+ '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
+ _escapeRegexRangeChars(self.endQuoteChar[i]))
+ for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
+ )
+ if escQuote:
+ self.pattern += (r'|(?:%s)' % re.escape(escQuote))
+ if escChar:
+ self.pattern += (r'|(?:%s.)' % re.escape(escChar))
+ self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
+ self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
+
+ try:
+ self.re = re.compile(self.pattern, self.flags)
+ self.reString = self.pattern
+ except sre_constants.error:
+ warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
+ SyntaxWarning, stacklevel=2)
+ raise
+
+ self.name = _ustr(self)
+ self.errmsg = "Expected " + self.name
+ self.mayIndexError = False
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
+ if not result:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ loc = result.end()
+ ret = result.group()
+
+ if self.unquoteResults:
+
+ # strip off quotes
+ ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
+
+ if isinstance(ret,basestring):
+ # replace escaped whitespace
+ if '\\' in ret and self.convertWhitespaceEscapes:
+ ws_map = {
+ r'\t' : '\t',
+ r'\n' : '\n',
+ r'\f' : '\f',
+ r'\r' : '\r',
+ }
+ for wslit,wschar in ws_map.items():
+ ret = ret.replace(wslit, wschar)
+
+ # replace escaped characters
+ if self.escChar:
+ ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret)
+
+ # replace escaped quotes
+ if self.escQuote:
+ ret = ret.replace(self.escQuote, self.endQuoteChar)
+
+ return loc, ret
+
+ def __str__( self ):
+ try:
+ return super(QuotedString,self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None:
+ self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
+
+ return self.strRepr
+
+
+class CharsNotIn(Token):
+ """
+ Token for matching words composed of characters I{not} in a given set (will
+ include whitespace in matched characters if not listed in the provided exclusion set - see example).
+ Defined with string containing all disallowed characters, and an optional
+ minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
+ minimum value < 1 is not valid); the default values for C{max} and C{exact}
+ are 0, meaning no maximum or exact length restriction.
+
+ Example::
+ # define a comma-separated-value as anything that is not a ','
+ csv_value = CharsNotIn(',')
+ print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
+ prints::
+ ['dkls', 'lsdkjf', 's12 34', '@!#', '213']
+ """
+ def __init__( self, notChars, min=1, max=0, exact=0 ):
+ super(CharsNotIn,self).__init__()
+ self.skipWhitespace = False
+ self.notChars = notChars
+
+ if min < 1:
+ raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
+
+ self.minLen = min
+
+ if max > 0:
+ self.maxLen = max
+ else:
+ self.maxLen = _MAX_INT
+
+ if exact > 0:
+ self.maxLen = exact
+ self.minLen = exact
+
+ self.name = _ustr(self)
+ self.errmsg = "Expected " + self.name
+ self.mayReturnEmpty = ( self.minLen == 0 )
+ self.mayIndexError = False
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if instring[loc] in self.notChars:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ start = loc
+ loc += 1
+ notchars = self.notChars
+ maxlen = min( start+self.maxLen, len(instring) )
+ while loc < maxlen and \
+ (instring[loc] not in notchars):
+ loc += 1
+
+ if loc - start < self.minLen:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ return loc, instring[start:loc]
+
+ def __str__( self ):
+ try:
+ return super(CharsNotIn, self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None:
+ if len(self.notChars) > 4:
+ self.strRepr = "!W:(%s...)" % self.notChars[:4]
+ else:
+ self.strRepr = "!W:(%s)" % self.notChars
+
+ return self.strRepr
+
+class White(Token):
+ """
+ Special matching class for matching whitespace. Normally, whitespace is ignored
+ by pyparsing grammars. This class is included when some whitespace structures
+ are significant. Define with a string containing the whitespace characters to be
+ matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
+ as defined for the C{L{Word}} class.
+ """
+ whiteStrs = {
+ " " : "<SPC>",
+ "\t": "<TAB>",
+ "\n": "<LF>",
+ "\r": "<CR>",
+ "\f": "<FF>",
+ }
+ def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
+ super(White,self).__init__()
+ self.matchWhite = ws
+ self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
+ #~ self.leaveWhitespace()
+ self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
+ self.mayReturnEmpty = True
+ self.errmsg = "Expected " + self.name
+
+ self.minLen = min
+
+ if max > 0:
+ self.maxLen = max
+ else:
+ self.maxLen = _MAX_INT
+
+ if exact > 0:
+ self.maxLen = exact
+ self.minLen = exact
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if not(instring[ loc ] in self.matchWhite):
+ raise ParseException(instring, loc, self.errmsg, self)
+ start = loc
+ loc += 1
+ maxloc = start + self.maxLen
+ maxloc = min( maxloc, len(instring) )
+ while loc < maxloc and instring[loc] in self.matchWhite:
+ loc += 1
+
+ if loc - start < self.minLen:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ return loc, instring[start:loc]
+
+
+class _PositionToken(Token):
+ def __init__( self ):
+ super(_PositionToken,self).__init__()
+ self.name=self.__class__.__name__
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+
+class GoToColumn(_PositionToken):
+ """
+ Token to advance to a specific column of input text; useful for tabular report scraping.
+ """
+ def __init__( self, colno ):
+ super(GoToColumn,self).__init__()
+ self.col = colno
+
+ def preParse( self, instring, loc ):
+ if col(loc,instring) != self.col:
+ instrlen = len(instring)
+ if self.ignoreExprs:
+ loc = self._skipIgnorables( instring, loc )
+ while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
+ loc += 1
+ return loc
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ thiscol = col( loc, instring )
+ if thiscol > self.col:
+ raise ParseException( instring, loc, "Text not in expected column", self )
+ newloc = loc + self.col - thiscol
+ ret = instring[ loc: newloc ]
+ return newloc, ret
+
+
+class LineStart(_PositionToken):
+ """
+ Matches if current position is at the beginning of a line within the parse string
+
+ Example::
+
+ test = '''\
+ AAA this line
+ AAA and this line
+ AAA but not this one
+ B AAA and definitely not this one
+ '''
+
+ for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
+ print(t)
+
+ Prints::
+ ['AAA', ' this line']
+ ['AAA', ' and this line']
+
+ """
+ def __init__( self ):
+ super(LineStart,self).__init__()
+ self.errmsg = "Expected start of line"
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if col(loc, instring) == 1:
+ return loc, []
+ raise ParseException(instring, loc, self.errmsg, self)
+
+class LineEnd(_PositionToken):
+ """
+ Matches if current position is at the end of a line within the parse string
+ """
+ def __init__( self ):
+ super(LineEnd,self).__init__()
+ self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
+ self.errmsg = "Expected end of line"
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if loc<len(instring):
+ if instring[loc] == "\n":
+ return loc+1, "\n"
+ else:
+ raise ParseException(instring, loc, self.errmsg, self)
+ elif loc == len(instring):
+ return loc+1, []
+ else:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+class StringStart(_PositionToken):
+ """
+ Matches if current position is at the beginning of the parse string
+ """
+ def __init__( self ):
+ super(StringStart,self).__init__()
+ self.errmsg = "Expected start of text"
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if loc != 0:
+ # see if entire string up to here is just whitespace and ignoreables
+ if loc != self.preParse( instring, 0 ):
+ raise ParseException(instring, loc, self.errmsg, self)
+ return loc, []
+
+class StringEnd(_PositionToken):
+ """
+ Matches if current position is at the end of the parse string
+ """
+ def __init__( self ):
+ super(StringEnd,self).__init__()
+ self.errmsg = "Expected end of text"
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if loc < len(instring):
+ raise ParseException(instring, loc, self.errmsg, self)
+ elif loc == len(instring):
+ return loc+1, []
+ elif loc > len(instring):
+ return loc, []
+ else:
+ raise ParseException(instring, loc, self.errmsg, self)
+
+class WordStart(_PositionToken):
+ """
+ Matches if the current position is at the beginning of a Word, and
+ is not preceded by any character in a given set of C{wordChars}
+ (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
+ use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
+ the string being parsed, or at the beginning of a line.
+ """
+ def __init__(self, wordChars = printables):
+ super(WordStart,self).__init__()
+ self.wordChars = set(wordChars)
+ self.errmsg = "Not at the start of a word"
+
+ def parseImpl(self, instring, loc, doActions=True ):
+ if loc != 0:
+ if (instring[loc-1] in self.wordChars or
+ instring[loc] not in self.wordChars):
+ raise ParseException(instring, loc, self.errmsg, self)
+ return loc, []
+
+class WordEnd(_PositionToken):
+ """
+ Matches if the current position is at the end of a Word, and
+ is not followed by any character in a given set of C{wordChars}
+ (default=C{printables}). To emulate the C{\b} behavior of regular expressions,
+ use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
+ the string being parsed, or at the end of a line.
+ """
+ def __init__(self, wordChars = printables):
+ super(WordEnd,self).__init__()
+ self.wordChars = set(wordChars)
+ self.skipWhitespace = False
+ self.errmsg = "Not at the end of a word"
+
+ def parseImpl(self, instring, loc, doActions=True ):
+ instrlen = len(instring)
+ if instrlen>0 and loc<instrlen:
+ if (instring[loc] in self.wordChars or
+ instring[loc-1] not in self.wordChars):
+ raise ParseException(instring, loc, self.errmsg, self)
+ return loc, []
+
+
+class ParseExpression(ParserElement):
+ """
+ Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
+ """
+ def __init__( self, exprs, savelist = False ):
+ super(ParseExpression,self).__init__(savelist)
+ if isinstance( exprs, _generatorType ):
+ exprs = list(exprs)
+
+ if isinstance( exprs, basestring ):
+ self.exprs = [ ParserElement._literalStringClass( exprs ) ]
+ elif isinstance( exprs, Iterable ):
+ exprs = list(exprs)
+ # if sequence of strings provided, wrap with Literal
+ if all(isinstance(expr, basestring) for expr in exprs):
+ exprs = map(ParserElement._literalStringClass, exprs)
+ self.exprs = list(exprs)
+ else:
+ try:
+ self.exprs = list( exprs )
+ except TypeError:
+ self.exprs = [ exprs ]
+ self.callPreparse = False
+
+ def __getitem__( self, i ):
+ return self.exprs[i]
+
+ def append( self, other ):
+ self.exprs.append( other )
+ self.strRepr = None
+ return self
+
+ def leaveWhitespace( self ):
+ """Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
+ all contained expressions."""
+ self.skipWhitespace = False
+ self.exprs = [ e.copy() for e in self.exprs ]
+ for e in self.exprs:
+ e.leaveWhitespace()
+ return self
+
+ def ignore( self, other ):
+ if isinstance( other, Suppress ):
+ if other not in self.ignoreExprs:
+ super( ParseExpression, self).ignore( other )
+ for e in self.exprs:
+ e.ignore( self.ignoreExprs[-1] )
+ else:
+ super( ParseExpression, self).ignore( other )
+ for e in self.exprs:
+ e.ignore( self.ignoreExprs[-1] )
+ return self
+
+ def __str__( self ):
+ try:
+ return super(ParseExpression,self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None:
+ self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
+ return self.strRepr
+
+ def streamline( self ):
+ super(ParseExpression,self).streamline()
+
+ for e in self.exprs:
+ e.streamline()
+
+ # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
+ # but only if there are no parse actions or resultsNames on the nested And's
+ # (likewise for Or's and MatchFirst's)
+ if ( len(self.exprs) == 2 ):
+ other = self.exprs[0]
+ if ( isinstance( other, self.__class__ ) and
+ not(other.parseAction) and
+ other.resultsName is None and
+ not other.debug ):
+ self.exprs = other.exprs[:] + [ self.exprs[1] ]
+ self.strRepr = None
+ self.mayReturnEmpty |= other.mayReturnEmpty
+ self.mayIndexError |= other.mayIndexError
+
+ other = self.exprs[-1]
+ if ( isinstance( other, self.__class__ ) and
+ not(other.parseAction) and
+ other.resultsName is None and
+ not other.debug ):
+ self.exprs = self.exprs[:-1] + other.exprs[:]
+ self.strRepr = None
+ self.mayReturnEmpty |= other.mayReturnEmpty
+ self.mayIndexError |= other.mayIndexError
+
+ self.errmsg = "Expected " + _ustr(self)
+
+ return self
+
+ def setResultsName( self, name, listAllMatches=False ):
+ ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
+ return ret
+
+ def validate( self, validateTrace=[] ):
+ tmp = validateTrace[:]+[self]
+ for e in self.exprs:
+ e.validate(tmp)
+ self.checkRecursion( [] )
+
+ def copy(self):
+ ret = super(ParseExpression,self).copy()
+ ret.exprs = [e.copy() for e in self.exprs]
+ return ret
+
+class And(ParseExpression):
+ """
+ Requires all given C{ParseExpression}s to be found in the given order.
+ Expressions may be separated by whitespace.
+ May be constructed using the C{'+'} operator.
+ May also be constructed using the C{'-'} operator, which will suppress backtracking.
+
+ Example::
+ integer = Word(nums)
+ name_expr = OneOrMore(Word(alphas))
+
+ expr = And([integer("id"),name_expr("name"),integer("age")])
+ # more easily written as:
+ expr = integer("id") + name_expr("name") + integer("age")
+ """
+
+ class _ErrorStop(Empty):
+ def __init__(self, *args, **kwargs):
+ super(And._ErrorStop,self).__init__(*args, **kwargs)
+ self.name = '-'
+ self.leaveWhitespace()
+
+ def __init__( self, exprs, savelist = True ):
+ super(And,self).__init__(exprs, savelist)
+ self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+ self.setWhitespaceChars( self.exprs[0].whiteChars )
+ self.skipWhitespace = self.exprs[0].skipWhitespace
+ self.callPreparse = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ # pass False as last arg to _parse for first element, since we already
+ # pre-parsed the string as part of our And pre-parsing
+ loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
+ errorStop = False
+ for e in self.exprs[1:]:
+ if isinstance(e, And._ErrorStop):
+ errorStop = True
+ continue
+ if errorStop:
+ try:
+ loc, exprtokens = e._parse( instring, loc, doActions )
+ except ParseSyntaxException:
+ raise
+ except ParseBaseException as pe:
+ pe.__traceback__ = None
+ raise ParseSyntaxException._from_exception(pe)
+ except IndexError:
+ raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
+ else:
+ loc, exprtokens = e._parse( instring, loc, doActions )
+ if exprtokens or exprtokens.haskeys():
+ resultlist += exprtokens
+ return loc, resultlist
+
+ def __iadd__(self, other ):
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ return self.append( other ) #And( [ self, other ] )
+
+ def checkRecursion( self, parseElementList ):
+ subRecCheckList = parseElementList[:] + [ self ]
+ for e in self.exprs:
+ e.checkRecursion( subRecCheckList )
+ if not e.mayReturnEmpty:
+ break
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
+
+ return self.strRepr
+
+
+class Or(ParseExpression):
+ """
+ Requires that at least one C{ParseExpression} is found.
+ If two expressions match, the expression that matches the longest string will be used.
+ May be constructed using the C{'^'} operator.
+
+ Example::
+ # construct Or using '^' operator
+
+ number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
+ print(number.searchString("123 3.1416 789"))
+ prints::
+ [['123'], ['3.1416'], ['789']]
+ """
+ def __init__( self, exprs, savelist = False ):
+ super(Or,self).__init__(exprs, savelist)
+ if self.exprs:
+ self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
+ else:
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ maxExcLoc = -1
+ maxException = None
+ matches = []
+ for e in self.exprs:
+ try:
+ loc2 = e.tryParse( instring, loc )
+ except ParseException as err:
+ err.__traceback__ = None
+ if err.loc > maxExcLoc:
+ maxException = err
+ maxExcLoc = err.loc
+ except IndexError:
+ if len(instring) > maxExcLoc:
+ maxException = ParseException(instring,len(instring),e.errmsg,self)
+ maxExcLoc = len(instring)
+ else:
+ # save match among all matches, to retry longest to shortest
+ matches.append((loc2, e))
+
+ if matches:
+ matches.sort(key=lambda x: -x[0])
+ for _,e in matches:
+ try:
+ return e._parse( instring, loc, doActions )
+ except ParseException as err:
+ err.__traceback__ = None
+ if err.loc > maxExcLoc:
+ maxException = err
+ maxExcLoc = err.loc
+
+ if maxException is not None:
+ maxException.msg = self.errmsg
+ raise maxException
+ else:
+ raise ParseException(instring, loc, "no defined alternatives to match", self)
+
+
+ def __ixor__(self, other ):
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ return self.append( other ) #Or( [ self, other ] )
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
+
+ return self.strRepr
+
+ def checkRecursion( self, parseElementList ):
+ subRecCheckList = parseElementList[:] + [ self ]
+ for e in self.exprs:
+ e.checkRecursion( subRecCheckList )
+
+
+class MatchFirst(ParseExpression):
+ """
+ Requires that at least one C{ParseExpression} is found.
+ If two expressions match, the first one listed is the one that will match.
+ May be constructed using the C{'|'} operator.
+
+ Example::
+ # construct MatchFirst using '|' operator
+
+ # watch the order of expressions to match
+ number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
+ print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
+
+ # put more selective expression first
+ number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
+ print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
+ """
+ def __init__( self, exprs, savelist = False ):
+ super(MatchFirst,self).__init__(exprs, savelist)
+ if self.exprs:
+ self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
+ else:
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ maxExcLoc = -1
+ maxException = None
+ for e in self.exprs:
+ try:
+ ret = e._parse( instring, loc, doActions )
+ return ret
+ except ParseException as err:
+ if err.loc > maxExcLoc:
+ maxException = err
+ maxExcLoc = err.loc
+ except IndexError:
+ if len(instring) > maxExcLoc:
+ maxException = ParseException(instring,len(instring),e.errmsg,self)
+ maxExcLoc = len(instring)
+
+ # only got here if no expression matched, raise exception for match that made it the furthest
+ else:
+ if maxException is not None:
+ maxException.msg = self.errmsg
+ raise maxException
+ else:
+ raise ParseException(instring, loc, "no defined alternatives to match", self)
+
+ def __ior__(self, other ):
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass( other )
+ return self.append( other ) #MatchFirst( [ self, other ] )
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
+
+ return self.strRepr
+
+ def checkRecursion( self, parseElementList ):
+ subRecCheckList = parseElementList[:] + [ self ]
+ for e in self.exprs:
+ e.checkRecursion( subRecCheckList )
+
+
+class Each(ParseExpression):
+ """
+ Requires all given C{ParseExpression}s to be found, but in any order.
+ Expressions may be separated by whitespace.
+ May be constructed using the C{'&'} operator.
+
+ Example::
+ color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
+ shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
+ integer = Word(nums)
+ shape_attr = "shape:" + shape_type("shape")
+ posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
+ color_attr = "color:" + color("color")
+ size_attr = "size:" + integer("size")
+
+ # use Each (using operator '&') to accept attributes in any order
+ # (shape and posn are required, color and size are optional)
+ shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
+
+ shape_spec.runTests('''
+ shape: SQUARE color: BLACK posn: 100, 120
+ shape: CIRCLE size: 50 color: BLUE posn: 50,80
+ color:GREEN size:20 shape:TRIANGLE posn:20,40
+ '''
+ )
+ prints::
+ shape: SQUARE color: BLACK posn: 100, 120
+ ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
+ - color: BLACK
+ - posn: ['100', ',', '120']
+ - x: 100
+ - y: 120
+ - shape: SQUARE
+
+
+ shape: CIRCLE size: 50 color: BLUE posn: 50,80
+ ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
+ - color: BLUE
+ - posn: ['50', ',', '80']
+ - x: 50
+ - y: 80
+ - shape: CIRCLE
+ - size: 50
+
+
+ color: GREEN size: 20 shape: TRIANGLE posn: 20,40
+ ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
+ - color: GREEN
+ - posn: ['20', ',', '40']
+ - x: 20
+ - y: 40
+ - shape: TRIANGLE
+ - size: 20
+ """
+ def __init__( self, exprs, savelist = True ):
+ super(Each,self).__init__(exprs, savelist)
+ self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
+ self.skipWhitespace = True
+ self.initExprGroups = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if self.initExprGroups:
+ self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
+ opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
+ opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
+ self.optionals = opt1 + opt2
+ self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
+ self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
+ self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
+ self.required += self.multirequired
+ self.initExprGroups = False
+ tmpLoc = loc
+ tmpReqd = self.required[:]
+ tmpOpt = self.optionals[:]
+ matchOrder = []
+
+ keepMatching = True
+ while keepMatching:
+ tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
+ failed = []
+ for e in tmpExprs:
+ try:
+ tmpLoc = e.tryParse( instring, tmpLoc )
+ except ParseException:
+ failed.append(e)
+ else:
+ matchOrder.append(self.opt1map.get(id(e),e))
+ if e in tmpReqd:
+ tmpReqd.remove(e)
+ elif e in tmpOpt:
+ tmpOpt.remove(e)
+ if len(failed) == len(tmpExprs):
+ keepMatching = False
+
+ if tmpReqd:
+ missing = ", ".join(_ustr(e) for e in tmpReqd)
+ raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
+
+ # add any unmatched Optionals, in case they have default values defined
+ matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
+
+ resultlist = []
+ for e in matchOrder:
+ loc,results = e._parse(instring,loc,doActions)
+ resultlist.append(results)
+
+ finalResults = sum(resultlist, ParseResults([]))
+ return loc, finalResults
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
+
+ return self.strRepr
+
+ def checkRecursion( self, parseElementList ):
+ subRecCheckList = parseElementList[:] + [ self ]
+ for e in self.exprs:
+ e.checkRecursion( subRecCheckList )
+
+
+class ParseElementEnhance(ParserElement):
+ """
+ Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
+ """
+ def __init__( self, expr, savelist=False ):
+ super(ParseElementEnhance,self).__init__(savelist)
+ if isinstance( expr, basestring ):
+ if issubclass(ParserElement._literalStringClass, Token):
+ expr = ParserElement._literalStringClass(expr)
+ else:
+ expr = ParserElement._literalStringClass(Literal(expr))
+ self.expr = expr
+ self.strRepr = None
+ if expr is not None:
+ self.mayIndexError = expr.mayIndexError
+ self.mayReturnEmpty = expr.mayReturnEmpty
+ self.setWhitespaceChars( expr.whiteChars )
+ self.skipWhitespace = expr.skipWhitespace
+ self.saveAsList = expr.saveAsList
+ self.callPreparse = expr.callPreparse
+ self.ignoreExprs.extend(expr.ignoreExprs)
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if self.expr is not None:
+ return self.expr._parse( instring, loc, doActions, callPreParse=False )
+ else:
+ raise ParseException("",loc,self.errmsg,self)
+
+ def leaveWhitespace( self ):
+ self.skipWhitespace = False
+ self.expr = self.expr.copy()
+ if self.expr is not None:
+ self.expr.leaveWhitespace()
+ return self
+
+ def ignore( self, other ):
+ if isinstance( other, Suppress ):
+ if other not in self.ignoreExprs:
+ super( ParseElementEnhance, self).ignore( other )
+ if self.expr is not None:
+ self.expr.ignore( self.ignoreExprs[-1] )
+ else:
+ super( ParseElementEnhance, self).ignore( other )
+ if self.expr is not None:
+ self.expr.ignore( self.ignoreExprs[-1] )
+ return self
+
+ def streamline( self ):
+ super(ParseElementEnhance,self).streamline()
+ if self.expr is not None:
+ self.expr.streamline()
+ return self
+
+ def checkRecursion( self, parseElementList ):
+ if self in parseElementList:
+ raise RecursiveGrammarException( parseElementList+[self] )
+ subRecCheckList = parseElementList[:] + [ self ]
+ if self.expr is not None:
+ self.expr.checkRecursion( subRecCheckList )
+
+ def validate( self, validateTrace=[] ):
+ tmp = validateTrace[:]+[self]
+ if self.expr is not None:
+ self.expr.validate(tmp)
+ self.checkRecursion( [] )
+
+ def __str__( self ):
+ try:
+ return super(ParseElementEnhance,self).__str__()
+ except Exception:
+ pass
+
+ if self.strRepr is None and self.expr is not None:
+ self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
+ return self.strRepr
+
+
+class FollowedBy(ParseElementEnhance):
+ """
+ Lookahead matching of the given parse expression. C{FollowedBy}
+ does I{not} advance the parsing position within the input string, it only
+ verifies that the specified parse expression matches at the current
+ position. C{FollowedBy} always returns a null token list.
+
+ Example::
+ # use FollowedBy to match a label only if it is followed by a ':'
+ data_word = Word(alphas)
+ label = data_word + FollowedBy(':')
+ attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+
+ OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
+ prints::
+ [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
+ """
+ def __init__( self, expr ):
+ super(FollowedBy,self).__init__(expr)
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ self.expr.tryParse( instring, loc )
+ return loc, []
+
+
+class NotAny(ParseElementEnhance):
+ """
+ Lookahead to disallow matching with the given parse expression. C{NotAny}
+ does I{not} advance the parsing position within the input string, it only
+ verifies that the specified parse expression does I{not} match at the current
+ position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
+ always returns a null token list. May be constructed using the '~' operator.
+
+ Example::
+
+ """
+ def __init__( self, expr ):
+ super(NotAny,self).__init__(expr)
+ #~ self.leaveWhitespace()
+ self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
+ self.mayReturnEmpty = True
+ self.errmsg = "Found unwanted token, "+_ustr(self.expr)
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ if self.expr.canParseNext(instring, loc):
+ raise ParseException(instring, loc, self.errmsg, self)
+ return loc, []
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "~{" + _ustr(self.expr) + "}"
+
+ return self.strRepr
+
+class _MultipleMatch(ParseElementEnhance):
+ def __init__( self, expr, stopOn=None):
+ super(_MultipleMatch, self).__init__(expr)
+ self.saveAsList = True
+ ender = stopOn
+ if isinstance(ender, basestring):
+ ender = ParserElement._literalStringClass(ender)
+ self.not_ender = ~ender if ender is not None else None
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ self_expr_parse = self.expr._parse
+ self_skip_ignorables = self._skipIgnorables
+ check_ender = self.not_ender is not None
+ if check_ender:
+ try_not_ender = self.not_ender.tryParse
+
+ # must be at least one (but first see if we are the stopOn sentinel;
+ # if so, fail)
+ if check_ender:
+ try_not_ender(instring, loc)
+ loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
+ try:
+ hasIgnoreExprs = (not not self.ignoreExprs)
+ while 1:
+ if check_ender:
+ try_not_ender(instring, loc)
+ if hasIgnoreExprs:
+ preloc = self_skip_ignorables( instring, loc )
+ else:
+ preloc = loc
+ loc, tmptokens = self_expr_parse( instring, preloc, doActions )
+ if tmptokens or tmptokens.haskeys():
+ tokens += tmptokens
+ except (ParseException,IndexError):
+ pass
+
+ return loc, tokens
+
+class OneOrMore(_MultipleMatch):
+ """
+ Repetition of one or more of the given expression.
+
+ Parameters:
+ - expr - expression that must match one or more times
+ - stopOn - (default=C{None}) - expression for a terminating sentinel
+ (only required if the sentinel would ordinarily match the repetition
+ expression)
+
+ Example::
+ data_word = Word(alphas)
+ label = data_word + FollowedBy(':')
+ attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
+
+ text = "shape: SQUARE posn: upper left color: BLACK"
+ OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
+
+ # use stopOn attribute for OneOrMore to avoid reading label string as part of the data
+ attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+ OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
+
+ # could also be written as
+ (attr_expr * (1,)).parseString(text).pprint()
+ """
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "{" + _ustr(self.expr) + "}..."
+
+ return self.strRepr
+
+class ZeroOrMore(_MultipleMatch):
+ """
+ Optional repetition of zero or more of the given expression.
+
+ Parameters:
+ - expr - expression that must match zero or more times
+ - stopOn - (default=C{None}) - expression for a terminating sentinel
+ (only required if the sentinel would ordinarily match the repetition
+ expression)
+
+ Example: similar to L{OneOrMore}
+ """
+ def __init__( self, expr, stopOn=None):
+ super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ try:
+ return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
+ except (ParseException,IndexError):
+ return loc, []
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "[" + _ustr(self.expr) + "]..."
+
+ return self.strRepr
+
+class _NullToken(object):
+ def __bool__(self):
+ return False
+ __nonzero__ = __bool__
+ def __str__(self):
+ return ""
+
+_optionalNotMatched = _NullToken()
+class Optional(ParseElementEnhance):
+ """
+ Optional matching of the given expression.
+
+ Parameters:
+ - expr - expression that must match zero or more times
+ - default (optional) - value to be returned if the optional expression is not found.
+
+ Example::
+ # US postal code can be a 5-digit zip, plus optional 4-digit qualifier
+ zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
+ zip.runTests('''
+ # traditional ZIP code
+ 12345
+
+ # ZIP+4 form
+ 12101-0001
+
+ # invalid ZIP
+ 98765-
+ ''')
+ prints::
+ # traditional ZIP code
+ 12345
+ ['12345']
+
+ # ZIP+4 form
+ 12101-0001
+ ['12101-0001']
+
+ # invalid ZIP
+ 98765-
+ ^
+ FAIL: Expected end of text (at char 5), (line:1, col:6)
+ """
+ def __init__( self, expr, default=_optionalNotMatched ):
+ super(Optional,self).__init__( expr, savelist=False )
+ self.saveAsList = self.expr.saveAsList
+ self.defaultValue = default
+ self.mayReturnEmpty = True
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ try:
+ loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
+ except (ParseException,IndexError):
+ if self.defaultValue is not _optionalNotMatched:
+ if self.expr.resultsName:
+ tokens = ParseResults([ self.defaultValue ])
+ tokens[self.expr.resultsName] = self.defaultValue
+ else:
+ tokens = [ self.defaultValue ]
+ else:
+ tokens = []
+ return loc, tokens
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+
+ if self.strRepr is None:
+ self.strRepr = "[" + _ustr(self.expr) + "]"
+
+ return self.strRepr
+
+class SkipTo(ParseElementEnhance):
+ """
+ Token for skipping over all undefined text until the matched expression is found.
+
+ Parameters:
+ - expr - target expression marking the end of the data to be skipped
+ - include - (default=C{False}) if True, the target expression is also parsed
+ (the skipped text and target expression are returned as a 2-element list).
+ - ignore - (default=C{None}) used to define grammars (typically quoted strings and
+ comments) that might contain false matches to the target expression
+ - failOn - (default=C{None}) define expressions that are not allowed to be
+ included in the skipped test; if found before the target expression is found,
+ the SkipTo is not a match
+
+ Example::
+ report = '''
+ Outstanding Issues Report - 1 Jan 2000
+
+ # | Severity | Description | Days Open
+ -----+----------+-------------------------------------------+-----------
+ 101 | Critical | Intermittent system crash | 6
+ 94 | Cosmetic | Spelling error on Login ('log|n') | 14
+ 79 | Minor | System slow when running too many reports | 47
+ '''
+ integer = Word(nums)
+ SEP = Suppress('|')
+ # use SkipTo to simply match everything up until the next SEP
+ # - ignore quoted strings, so that a '|' character inside a quoted string does not match
+ # - parse action will call token.strip() for each matched token, i.e., the description body
+ string_data = SkipTo(SEP, ignore=quotedString)
+ string_data.setParseAction(tokenMap(str.strip))
+ ticket_expr = (integer("issue_num") + SEP
+ + string_data("sev") + SEP
+ + string_data("desc") + SEP
+ + integer("days_open"))
+
+ for tkt in ticket_expr.searchString(report):
+ print tkt.dump()
+ prints::
+ ['101', 'Critical', 'Intermittent system crash', '6']
+ - days_open: 6
+ - desc: Intermittent system crash
+ - issue_num: 101
+ - sev: Critical
+ ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
+ - days_open: 14
+ - desc: Spelling error on Login ('log|n')
+ - issue_num: 94
+ - sev: Cosmetic
+ ['79', 'Minor', 'System slow when running too many reports', '47']
+ - days_open: 47
+ - desc: System slow when running too many reports
+ - issue_num: 79
+ - sev: Minor
+ """
+ def __init__( self, other, include=False, ignore=None, failOn=None ):
+ super( SkipTo, self ).__init__( other )
+ self.ignoreExpr = ignore
+ self.mayReturnEmpty = True
+ self.mayIndexError = False
+ self.includeMatch = include
+ self.asList = False
+ if isinstance(failOn, basestring):
+ self.failOn = ParserElement._literalStringClass(failOn)
+ else:
+ self.failOn = failOn
+ self.errmsg = "No match found for "+_ustr(self.expr)
+
+ def parseImpl( self, instring, loc, doActions=True ):
+ startloc = loc
+ instrlen = len(instring)
+ expr = self.expr
+ expr_parse = self.expr._parse
+ self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
+ self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
+
+ tmploc = loc
+ while tmploc <= instrlen:
+ if self_failOn_canParseNext is not None:
+ # break if failOn expression matches
+ if self_failOn_canParseNext(instring, tmploc):
+ break
+
+ if self_ignoreExpr_tryParse is not None:
+ # advance past ignore expressions
+ while 1:
+ try:
+ tmploc = self_ignoreExpr_tryParse(instring, tmploc)
+ except ParseBaseException:
+ break
+
+ try:
+ expr_parse(instring, tmploc, doActions=False, callPreParse=False)
+ except (ParseException, IndexError):
+ # no match, advance loc in string
+ tmploc += 1
+ else:
+ # matched skipto expr, done
+ break
+
+ else:
+ # ran off the end of the input string without matching skipto expr, fail
+ raise ParseException(instring, loc, self.errmsg, self)
+
+ # build up return values
+ loc = tmploc
+ skiptext = instring[startloc:loc]
+ skipresult = ParseResults(skiptext)
+
+ if self.includeMatch:
+ loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
+ skipresult += mat
+
+ return loc, skipresult
+
+class Forward(ParseElementEnhance):
+ """
+ Forward declaration of an expression to be defined later -
+ used for recursive grammars, such as algebraic infix notation.
+ When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
+
+ Note: take care when assigning to C{Forward} not to overlook precedence of operators.
+ Specifically, '|' has a lower precedence than '<<', so that::
+ fwdExpr << a | b | c
+ will actually be evaluated as::
+ (fwdExpr << a) | b | c
+ thereby leaving b and c out as parseable alternatives. It is recommended that you
+ explicitly group the values inserted into the C{Forward}::
+ fwdExpr << (a | b | c)
+ Converting to use the '<<=' operator instead will avoid this problem.
+
+ See L{ParseResults.pprint} for an example of a recursive parser created using
+ C{Forward}.
+ """
+ def __init__( self, other=None ):
+ super(Forward,self).__init__( other, savelist=False )
+
+ def __lshift__( self, other ):
+ if isinstance( other, basestring ):
+ other = ParserElement._literalStringClass(other)
+ self.expr = other
+ self.strRepr = None
+ self.mayIndexError = self.expr.mayIndexError
+ self.mayReturnEmpty = self.expr.mayReturnEmpty
+ self.setWhitespaceChars( self.expr.whiteChars )
+ self.skipWhitespace = self.expr.skipWhitespace
+ self.saveAsList = self.expr.saveAsList
+ self.ignoreExprs.extend(self.expr.ignoreExprs)
+ return self
+
+ def __ilshift__(self, other):
+ return self << other
+
+ def leaveWhitespace( self ):
+ self.skipWhitespace = False
+ return self
+
+ def streamline( self ):
+ if not self.streamlined:
+ self.streamlined = True
+ if self.expr is not None:
+ self.expr.streamline()
+ return self
+
+ def validate( self, validateTrace=[] ):
+ if self not in validateTrace:
+ tmp = validateTrace[:]+[self]
+ if self.expr is not None:
+ self.expr.validate(tmp)
+ self.checkRecursion([])
+
+ def __str__( self ):
+ if hasattr(self,"name"):
+ return self.name
+ return self.__class__.__name__ + ": ..."
+
+ # stubbed out for now - creates awful memory and perf issues
+ self._revertClass = self.__class__
+ self.__class__ = _ForwardNoRecurse
+ try:
+ if self.expr is not None:
+ retString = _ustr(self.expr)
+ else:
+ retString = "None"
+ finally:
+ self.__class__ = self._revertClass
+ return self.__class__.__name__ + ": " + retString
+
+ def copy(self):
+ if self.expr is not None:
+ return super(Forward,self).copy()
+ else:
+ ret = Forward()
+ ret <<= self
+ return ret
+
+class _ForwardNoRecurse(Forward):
+ def __str__( self ):
+ return "..."
+
+class TokenConverter(ParseElementEnhance):
+ """
+ Abstract subclass of C{ParseExpression}, for converting parsed results.
+ """
+ def __init__( self, expr, savelist=False ):
+ super(TokenConverter,self).__init__( expr )#, savelist )
+ self.saveAsList = False
+
+class Combine(TokenConverter):
+ """
+ Converter to concatenate all matching tokens to a single string.
+ By default, the matching patterns must also be contiguous in the input string;
+ this can be disabled by specifying C{'adjacent=False'} in the constructor.
+
+ Example::
+ real = Word(nums) + '.' + Word(nums)
+ print(real.parseString('3.1416')) # -> ['3', '.', '1416']
+ # will also erroneously match the following
+ print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
+
+ real = Combine(Word(nums) + '.' + Word(nums))
+ print(real.parseString('3.1416')) # -> ['3.1416']
+ # no match when there are internal spaces
+ print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
+ """
+ def __init__( self, expr, joinString="", adjacent=True ):
+ super(Combine,self).__init__( expr )
+ # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
+ if adjacent:
+ self.leaveWhitespace()
+ self.adjacent = adjacent
+ self.skipWhitespace = True
+ self.joinString = joinString
+ self.callPreparse = True
+
+ def ignore( self, other ):
+ if self.adjacent:
+ ParserElement.ignore(self, other)
+ else:
+ super( Combine, self).ignore( other )
+ return self
+
+ def postParse( self, instring, loc, tokenlist ):
+ retToks = tokenlist.copy()
+ del retToks[:]
+ retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
+
+ if self.resultsName and retToks.haskeys():
+ return [ retToks ]
+ else:
+ return retToks
+
+class Group(TokenConverter):
+ """
+ Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
+
+ Example::
+ ident = Word(alphas)
+ num = Word(nums)
+ term = ident | num
+ func = ident + Optional(delimitedList(term))
+ print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100']
+
+ func = ident + Group(Optional(delimitedList(term)))
+ print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']]
+ """
+ def __init__( self, expr ):
+ super(Group,self).__init__( expr )
+ self.saveAsList = True
+
+ def postParse( self, instring, loc, tokenlist ):
+ return [ tokenlist ]
+
+class Dict(TokenConverter):
+ """
+ Converter to return a repetitive expression as a list, but also as a dictionary.
+ Each element can also be referenced using the first token in the expression as its key.
+ Useful for tabular report scraping when the first column can be used as a item key.
+
+ Example::
+ data_word = Word(alphas)
+ label = data_word + FollowedBy(':')
+ attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
+
+ text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+ attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+
+ # print attributes as plain groups
+ print(OneOrMore(attr_expr).parseString(text).dump())
+
+ # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
+ result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
+ print(result.dump())
+
+ # access named fields as dict entries, or output as dict
+ print(result['shape'])
+ print(result.asDict())
+ prints::
+ ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
+
+ [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+ - color: light blue
+ - posn: upper left
+ - shape: SQUARE
+ - texture: burlap
+ SQUARE
+ {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
+ See more examples at L{ParseResults} of accessing fields by results name.
+ """
+ def __init__( self, expr ):
+ super(Dict,self).__init__( expr )
+ self.saveAsList = True
+
+ def postParse( self, instring, loc, tokenlist ):
+ for i,tok in enumerate(tokenlist):
+ if len(tok) == 0:
+ continue
+ ikey = tok[0]
+ if isinstance(ikey,int):
+ ikey = _ustr(tok[0]).strip()
+ if len(tok)==1:
+ tokenlist[ikey] = _ParseResultsWithOffset("",i)
+ elif len(tok)==2 and not isinstance(tok[1],ParseResults):
+ tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
+ else:
+ dictvalue = tok.copy() #ParseResults(i)
+ del dictvalue[0]
+ if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
+ tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
+ else:
+ tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
+
+ if self.resultsName:
+ return [ tokenlist ]
+ else:
+ return tokenlist
+
+
+class Suppress(TokenConverter):
+ """
+ Converter for ignoring the results of a parsed expression.
+
+ Example::
+ source = "a, b, c,d"
+ wd = Word(alphas)
+ wd_list1 = wd + ZeroOrMore(',' + wd)
+ print(wd_list1.parseString(source))
+
+ # often, delimiters that are useful during parsing are just in the
+ # way afterward - use Suppress to keep them out of the parsed output
+ wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
+ print(wd_list2.parseString(source))
+ prints::
+ ['a', ',', 'b', ',', 'c', ',', 'd']
+ ['a', 'b', 'c', 'd']
+ (See also L{delimitedList}.)
+ """
+ def postParse( self, instring, loc, tokenlist ):
+ return []
+
+ def suppress( self ):
+ return self
+
+
+class OnlyOnce(object):
+ """
+ Wrapper for parse actions, to ensure they are only called once.
+ """
+ def __init__(self, methodCall):
+ self.callable = _trim_arity(methodCall)
+ self.called = False
+ def __call__(self,s,l,t):
+ if not self.called:
+ results = self.callable(s,l,t)
+ self.called = True
+ return results
+ raise ParseException(s,l,"")
+ def reset(self):
+ self.called = False
+
+def traceParseAction(f):
+ """
+ Decorator for debugging parse actions.
+
+ When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
+ When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
+
+ Example::
+ wd = Word(alphas)
+
+ @traceParseAction
+ def remove_duplicate_chars(tokens):
+ return ''.join(sorted(set(''.join(tokens))))
+
+ wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
+ print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
+ prints::
+ >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
+ <<leaving remove_duplicate_chars (ret: 'dfjkls')
+ ['dfjkls']
+ """
+ f = _trim_arity(f)
+ def z(*paArgs):
+ thisFunc = f.__name__
+ s,l,t = paArgs[-3:]
+ if len(paArgs)>3:
+ thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
+ sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
+ try:
+ ret = f(*paArgs)
+ except Exception as exc:
+ sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
+ raise
+ sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )
+ return ret
+ try:
+ z.__name__ = f.__name__
+ except AttributeError:
+ pass
+ return z
+
+#
+# global helpers
+#
+def delimitedList( expr, delim=",", combine=False ):
+ """
+ Helper to define a delimited list of expressions - the delimiter defaults to ','.
+ By default, the list elements and delimiters can have intervening whitespace, and
+ comments, but this can be overridden by passing C{combine=True} in the constructor.
+ If C{combine} is set to C{True}, the matching tokens are returned as a single token
+ string, with the delimiters included; otherwise, the matching tokens are returned
+ as a list of tokens, with the delimiters suppressed.
+
+ Example::
+ delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
+ delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
+ """
+ dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
+ if combine:
+ return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
+ else:
+ return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
+
+def countedArray( expr, intExpr=None ):
+ """
+ Helper to define a counted list of expressions.
+ This helper defines a pattern of the form::
+ integer expr expr expr...
+ where the leading integer tells how many expr expressions follow.
+ The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
+
+ If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
+
+ Example::
+ countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
+
+ # in this parser, the leading integer value is given in binary,
+ # '10' indicating that 2 values are in the array
+ binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
+ countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
+ """
+ arrayExpr = Forward()
+ def countFieldParseAction(s,l,t):
+ n = t[0]
+ arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
+ return []
+ if intExpr is None:
+ intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
+ else:
+ intExpr = intExpr.copy()
+ intExpr.setName("arrayLen")
+ intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
+ return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
+
+def _flatten(L):
+ ret = []
+ for i in L:
+ if isinstance(i,list):
+ ret.extend(_flatten(i))
+ else:
+ ret.append(i)
+ return ret
+
+def matchPreviousLiteral(expr):
+ """
+ Helper to define an expression that is indirectly defined from
+ the tokens matched in a previous expression, that is, it looks
+ for a 'repeat' of a previous expression. For example::
+ first = Word(nums)
+ second = matchPreviousLiteral(first)
+ matchExpr = first + ":" + second
+ will match C{"1:1"}, but not C{"1:2"}. Because this matches a
+ previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
+ If this is not desired, use C{matchPreviousExpr}.
+ Do I{not} use with packrat parsing enabled.
+ """
+ rep = Forward()
+ def copyTokenToRepeater(s,l,t):
+ if t:
+ if len(t) == 1:
+ rep << t[0]
+ else:
+ # flatten t tokens
+ tflat = _flatten(t.asList())
+ rep << And(Literal(tt) for tt in tflat)
+ else:
+ rep << Empty()
+ expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
+ rep.setName('(prev) ' + _ustr(expr))
+ return rep
+
+def matchPreviousExpr(expr):
+ """
+ Helper to define an expression that is indirectly defined from
+ the tokens matched in a previous expression, that is, it looks
+ for a 'repeat' of a previous expression. For example::
+ first = Word(nums)
+ second = matchPreviousExpr(first)
+ matchExpr = first + ":" + second
+ will match C{"1:1"}, but not C{"1:2"}. Because this matches by
+ expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
+ the expressions are evaluated first, and then compared, so
+ C{"1"} is compared with C{"10"}.
+ Do I{not} use with packrat parsing enabled.
+ """
+ rep = Forward()
+ e2 = expr.copy()
+ rep <<= e2
+ def copyTokenToRepeater(s,l,t):
+ matchTokens = _flatten(t.asList())
+ def mustMatchTheseTokens(s,l,t):
+ theseTokens = _flatten(t.asList())
+ if theseTokens != matchTokens:
+ raise ParseException("",0,"")
+ rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
+ expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
+ rep.setName('(prev) ' + _ustr(expr))
+ return rep
+
+def _escapeRegexRangeChars(s):
+ #~ escape these chars: ^-]
+ for c in r"\^-]":
+ s = s.replace(c,_bslash+c)
+ s = s.replace("\n",r"\n")
+ s = s.replace("\t",r"\t")
+ return _ustr(s)
+
+def oneOf( strs, caseless=False, useRegex=True ):
+ """
+ Helper to quickly define a set of alternative Literals, and makes sure to do
+ longest-first testing when there is a conflict, regardless of the input order,
+ but returns a C{L{MatchFirst}} for best performance.
+
+ Parameters:
+ - strs - a string of space-delimited literals, or a collection of string literals
+ - caseless - (default=C{False}) - treat all literals as caseless
+ - useRegex - (default=C{True}) - as an optimization, will generate a Regex
+ object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
+ if creating a C{Regex} raises an exception)
+
+ Example::
+ comp_oper = oneOf("< = > <= >= !=")
+ var = Word(alphas)
+ number = Word(nums)
+ term = var | number
+ comparison_expr = term + comp_oper + term
+ print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
+ prints::
+ [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
+ """
+ if caseless:
+ isequal = ( lambda a,b: a.upper() == b.upper() )
+ masks = ( lambda a,b: b.upper().startswith(a.upper()) )
+ parseElementClass = CaselessLiteral
+ else:
+ isequal = ( lambda a,b: a == b )
+ masks = ( lambda a,b: b.startswith(a) )
+ parseElementClass = Literal
+
+ symbols = []
+ if isinstance(strs,basestring):
+ symbols = strs.split()
+ elif isinstance(strs, Iterable):
+ symbols = list(strs)
+ else:
+ warnings.warn("Invalid argument to oneOf, expected string or iterable",
+ SyntaxWarning, stacklevel=2)
+ if not symbols:
+ return NoMatch()
+
+ i = 0
+ while i < len(symbols)-1:
+ cur = symbols[i]
+ for j,other in enumerate(symbols[i+1:]):
+ if ( isequal(other, cur) ):
+ del symbols[i+j+1]
+ break
+ elif ( masks(cur, other) ):
+ del symbols[i+j+1]
+ symbols.insert(i,other)
+ cur = other
+ break
+ else:
+ i += 1
+
+ if not caseless and useRegex:
+ #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
+ try:
+ if len(symbols)==len("".join(symbols)):
+ return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
+ else:
+ return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
+ except Exception:
+ warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
+ SyntaxWarning, stacklevel=2)
+
+
+ # last resort, just use MatchFirst
+ return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
+
+def dictOf( key, value ):
+ """
+ Helper to easily and clearly define a dictionary by specifying the respective patterns
+ for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
+ in the proper order. The key pattern can include delimiting markers or punctuation,
+ as long as they are suppressed, thereby leaving the significant key text. The value
+ pattern can include named results, so that the C{Dict} results can include named token
+ fields.
+
+ Example::
+ text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+ attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
+ print(OneOrMore(attr_expr).parseString(text).dump())
+
+ attr_label = label
+ attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
+
+ # similar to Dict, but simpler call format
+ result = dictOf(attr_label, attr_value).parseString(text)
+ print(result.dump())
+ print(result['shape'])
+ print(result.shape) # object attribute access works too
+ print(result.asDict())
+ prints::
+ [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+ - color: light blue
+ - posn: upper left
+ - shape: SQUARE
+ - texture: burlap
+ SQUARE
+ SQUARE
+ {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
+ """
+ return Dict( ZeroOrMore( Group ( key + value ) ) )
+
+def originalTextFor(expr, asString=True):
+ """
+ Helper to return the original, untokenized text for a given expression. Useful to
+ restore the parsed fields of an HTML start tag into the raw tag text itself, or to
+ revert separate tokens with intervening whitespace back to the original matching
+ input text. By default, returns astring containing the original parsed text.
+
+ If the optional C{asString} argument is passed as C{False}, then the return value is a
+ C{L{ParseResults}} containing any results names that were originally matched, and a
+ single token containing the original matched text from the input string. So if
+ the expression passed to C{L{originalTextFor}} contains expressions with defined
+ results names, you must set C{asString} to C{False} if you want to preserve those
+ results name values.
+
+ Example::
+ src = "this is test <b> bold <i>text</i> </b> normal text "
+ for tag in ("b","i"):
+ opener,closer = makeHTMLTags(tag)
+ patt = originalTextFor(opener + SkipTo(closer) + closer)
+ print(patt.searchString(src)[0])
+ prints::
+ ['<b> bold <i>text</i> </b>']
+ ['<i>text</i>']
+ """
+ locMarker = Empty().setParseAction(lambda s,loc,t: loc)
+ endlocMarker = locMarker.copy()
+ endlocMarker.callPreparse = False
+ matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
+ if asString:
+ extractText = lambda s,l,t: s[t._original_start:t._original_end]
+ else:
+ def extractText(s,l,t):
+ t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
+ matchExpr.setParseAction(extractText)
+ matchExpr.ignoreExprs = expr.ignoreExprs
+ return matchExpr
+
+def ungroup(expr):
+ """
+ Helper to undo pyparsing's default grouping of And expressions, even
+ if all but one are non-empty.
+ """
+ return TokenConverter(expr).setParseAction(lambda t:t[0])
+
+def locatedExpr(expr):
+ """
+ Helper to decorate a returned token with its starting and ending locations in the input string.
+ This helper adds the following results names:
+ - locn_start = location where matched expression begins
+ - locn_end = location where matched expression ends
+ - value = the actual parsed results
+
+ Be careful if the input text contains C{<TAB>} characters, you may want to call
+ C{L{ParserElement.parseWithTabs}}
+
+ Example::
+ wd = Word(alphas)
+ for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
+ print(match)
+ prints::
+ [[0, 'ljsdf', 5]]
+ [[8, 'lksdjjf', 15]]
+ [[18, 'lkkjj', 23]]
+ """
+ locator = Empty().setParseAction(lambda s,l,t: l)
+ return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
+
+
+# convenience constants for positional expressions
+empty = Empty().setName("empty")
+lineStart = LineStart().setName("lineStart")
+lineEnd = LineEnd().setName("lineEnd")
+stringStart = StringStart().setName("stringStart")
+stringEnd = StringEnd().setName("stringEnd")
+
+_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
+_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
+_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
+_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | CharsNotIn(r'\]', exact=1)
+_charRange = Group(_singleChar + Suppress("-") + _singleChar)
+_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
+
+def srange(s):
+ r"""
+ Helper to easily define string ranges for use in Word construction. Borrows
+ syntax from regexp '[]' string range definitions::
+ srange("[0-9]") -> "0123456789"
+ srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
+ srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
+ The input string must be enclosed in []'s, and the returned string is the expanded
+ character set joined into a single string.
+ The values enclosed in the []'s may be:
+ - a single character
+ - an escaped character with a leading backslash (such as C{\-} or C{\]})
+ - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character)
+ (C{\0x##} is also supported for backwards compatibility)
+ - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
+ - a range of any of the above, separated by a dash (C{'a-z'}, etc.)
+ - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
+ """
+ _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
+ try:
+ return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
+ except Exception:
+ return ""
+
+def matchOnlyAtCol(n):
+ """
+ Helper method for defining parse actions that require matching at a specific
+ column in the input text.
+ """
+ def verifyCol(strg,locn,toks):
+ if col(locn,strg) != n:
+ raise ParseException(strg,locn,"matched token not at column %d" % n)
+ return verifyCol
+
+def replaceWith(replStr):
+ """
+ Helper method for common parse actions that simply return a literal value. Especially
+ useful when used with C{L{transformString<ParserElement.transformString>}()}.
+
+ Example::
+ num = Word(nums).setParseAction(lambda toks: int(toks[0]))
+ na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
+ term = na | num
+
+ OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
+ """
+ return lambda s,l,t: [replStr]
+
+def removeQuotes(s,l,t):
+ """
+ Helper parse action for removing quotation marks from parsed quoted strings.
+
+ Example::
+ # by default, quotation marks are included in parsed results
+ quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
+
+ # use removeQuotes to strip quotation marks from parsed results
+ quotedString.setParseAction(removeQuotes)
+ quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
+ """
+ return t[0][1:-1]
+
+def tokenMap(func, *args):
+ """
+ Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional
+ args are passed, they are forwarded to the given function as additional arguments after
+ the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
+ parsed data to an integer using base 16.
+
+ Example (compare the last to example in L{ParserElement.transformString}::
+ hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
+ hex_ints.runTests('''
+ 00 11 22 aa FF 0a 0d 1a
+ ''')
+
+ upperword = Word(alphas).setParseAction(tokenMap(str.upper))
+ OneOrMore(upperword).runTests('''
+ my kingdom for a horse
+ ''')
+
+ wd = Word(alphas).setParseAction(tokenMap(str.title))
+ OneOrMore(wd).setParseAction(' '.join).runTests('''
+ now is the winter of our discontent made glorious summer by this sun of york
+ ''')
+ prints::
+ 00 11 22 aa FF 0a 0d 1a
+ [0, 17, 34, 170, 255, 10, 13, 26]
+
+ my kingdom for a horse
+ ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
+
+ now is the winter of our discontent made glorious summer by this sun of york
+ ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
+ """
+ def pa(s,l,t):
+ return [func(tokn, *args) for tokn in t]
+
+ try:
+ func_name = getattr(func, '__name__',
+ getattr(func, '__class__').__name__)
+ except Exception:
+ func_name = str(func)
+ pa.__name__ = func_name
+
+ return pa
+
+upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
+"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
+
+downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
+"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
+
+def _makeTags(tagStr, xml):
+ """Internal helper to construct opening and closing tag expressions, given a tag name"""
+ if isinstance(tagStr,basestring):
+ resname = tagStr
+ tagStr = Keyword(tagStr, caseless=not xml)
+ else:
+ resname = tagStr.name
+
+ tagAttrName = Word(alphas,alphanums+"_-:")
+ if (xml):
+ tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
+ openTag = Suppress("<") + tagStr("tag") + \
+ Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
+ Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
+ else:
+ printablesLessRAbrack = "".join(c for c in printables if c not in ">")
+ tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
+ openTag = Suppress("<") + tagStr("tag") + \
+ Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
+ Optional( Suppress("=") + tagAttrValue ) ))) + \
+ Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
+ closeTag = Combine(_L("</") + tagStr + ">")
+
+ openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
+ closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
+ openTag.tag = resname
+ closeTag.tag = resname
+ return openTag, closeTag
+
+def makeHTMLTags(tagStr):
+ """
+ Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
+ tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
+
+ Example::
+ text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
+ # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
+ a,a_end = makeHTMLTags("A")
+ link_expr = a + SkipTo(a_end)("link_text") + a_end
+
+ for link in link_expr.searchString(text):
+ # attributes in the <A> tag (like "href" shown here) are also accessible as named results
+ print(link.link_text, '->', link.href)
+ prints::
+ pyparsing -> http://pyparsing.wikispaces.com
+ """
+ return _makeTags( tagStr, False )
+
+def makeXMLTags(tagStr):
+ """
+ Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
+ tags only in the given upper/lower case.
+
+ Example: similar to L{makeHTMLTags}
+ """
+ return _makeTags( tagStr, True )
+
+def withAttribute(*args,**attrDict):
+ """
+ Helper to create a validating parse action to be used with start tags created
+ with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
+ with a required attribute value, to avoid false matches on common tags such as
+ C{<TD>} or C{<DIV>}.
+
+ Call C{withAttribute} with a series of attribute names and values. Specify the list
+ of filter attributes names and values as:
+ - keyword arguments, as in C{(align="right")}, or
+ - as an explicit dict with C{**} operator, when an attribute name is also a Python
+ reserved word, as in C{**{"class":"Customer", "align":"right"}}
+ - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
+ For attribute names with a namespace prefix, you must use the second form. Attribute
+ names are matched insensitive to upper/lower case.
+
+ If just testing for C{class} (with or without a namespace), use C{L{withClass}}.
+
+ To verify that the attribute exists, but without specifying a value, pass
+ C{withAttribute.ANY_VALUE} as the value.
+
+ Example::
+ html = '''
+ <div>
+ Some text
+ <div type="grid">1 4 0 1 0</div>
+ <div type="graph">1,3 2,3 1,1</div>
+ <div>this has no type</div>
+ </div>
+
+ '''
+ div,div_end = makeHTMLTags("div")
+
+ # only match div tag having a type attribute with value "grid"
+ div_grid = div().setParseAction(withAttribute(type="grid"))
+ grid_expr = div_grid + SkipTo(div | div_end)("body")
+ for grid_header in grid_expr.searchString(html):
+ print(grid_header.body)
+
+ # construct a match with any div tag having a type attribute, regardless of the value
+ div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
+ div_expr = div_any_type + SkipTo(div | div_end)("body")
+ for div_header in div_expr.searchString(html):
+ print(div_header.body)
+ prints::
+ 1 4 0 1 0
+
+ 1 4 0 1 0
+ 1,3 2,3 1,1
+ """
+ if args:
+ attrs = args[:]
+ else:
+ attrs = attrDict.items()
+ attrs = [(k,v) for k,v in attrs]
+ def pa(s,l,tokens):
+ for attrName,attrValue in attrs:
+ if attrName not in tokens:
+ raise ParseException(s,l,"no matching attribute " + attrName)
+ if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
+ raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
+ (attrName, tokens[attrName], attrValue))
+ return pa
+withAttribute.ANY_VALUE = object()
+
+def withClass(classname, namespace=''):
+ """
+ Simplified version of C{L{withAttribute}} when matching on a div class - made
+ difficult because C{class} is a reserved word in Python.
+
+ Example::
+ html = '''
+ <div>
+ Some text
+ <div class="grid">1 4 0 1 0</div>
+ <div class="graph">1,3 2,3 1,1</div>
+ <div>this &lt;div&gt; has no class</div>
+ </div>
+
+ '''
+ div,div_end = makeHTMLTags("div")
+ div_grid = div().setParseAction(withClass("grid"))
+
+ grid_expr = div_grid + SkipTo(div | div_end)("body")
+ for grid_header in grid_expr.searchString(html):
+ print(grid_header.body)
+
+ div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
+ div_expr = div_any_type + SkipTo(div | div_end)("body")
+ for div_header in div_expr.searchString(html):
+ print(div_header.body)
+ prints::
+ 1 4 0 1 0
+
+ 1 4 0 1 0
+ 1,3 2,3 1,1
+ """
+ classattr = "%s:class" % namespace if namespace else "class"
+ return withAttribute(**{classattr : classname})
+
+opAssoc = _Constants()
+opAssoc.LEFT = object()
+opAssoc.RIGHT = object()
+
+def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
+ """
+ Helper method for constructing grammars of expressions made up of
+ operators working in a precedence hierarchy. Operators may be unary or
+ binary, left- or right-associative. Parse actions can also be attached
+ to operator expressions. The generated parser will also recognize the use
+ of parentheses to override operator precedences (see example below).
+
+ Note: if you define a deep operator list, you may see performance issues
+ when using infixNotation. See L{ParserElement.enablePackrat} for a
+ mechanism to potentially improve your parser performance.
+
+ Parameters:
+ - baseExpr - expression representing the most basic element for the nested
+ - opList - list of tuples, one for each operator precedence level in the
+ expression grammar; each tuple is of the form
+ (opExpr, numTerms, rightLeftAssoc, parseAction), where:
+ - opExpr is the pyparsing expression for the operator;
+ may also be a string, which will be converted to a Literal;
+ if numTerms is 3, opExpr is a tuple of two expressions, for the
+ two operators separating the 3 terms
+ - numTerms is the number of terms for this operator (must
+ be 1, 2, or 3)
+ - rightLeftAssoc is the indicator whether the operator is
+ right or left associative, using the pyparsing-defined
+ constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
+ - parseAction is the parse action to be associated with
+ expressions matching this operator expression (the
+ parse action tuple member may be omitted); if the parse action
+ is passed a tuple or list of functions, this is equivalent to
+ calling C{setParseAction(*fn)} (L{ParserElement.setParseAction})
+ - lpar - expression for matching left-parentheses (default=C{Suppress('(')})
+ - rpar - expression for matching right-parentheses (default=C{Suppress(')')})
+
+ Example::
+ # simple example of four-function arithmetic with ints and variable names
+ integer = pyparsing_common.signed_integer
+ varname = pyparsing_common.identifier
+
+ arith_expr = infixNotation(integer | varname,
+ [
+ ('-', 1, opAssoc.RIGHT),
+ (oneOf('* /'), 2, opAssoc.LEFT),
+ (oneOf('+ -'), 2, opAssoc.LEFT),
+ ])
+
+ arith_expr.runTests('''
+ 5+3*6
+ (5+3)*6
+ -2--11
+ ''', fullDump=False)
+ prints::
+ 5+3*6
+ [[5, '+', [3, '*', 6]]]
+
+ (5+3)*6
+ [[[5, '+', 3], '*', 6]]
+
+ -2--11
+ [[['-', 2], '-', ['-', 11]]]
+ """
+ ret = Forward()
+ lastExpr = baseExpr | ( lpar + ret + rpar )
+ for i,operDef in enumerate(opList):
+ opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
+ termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
+ if arity == 3:
+ if opExpr is None or len(opExpr) != 2:
+ raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
+ opExpr1, opExpr2 = opExpr
+ thisExpr = Forward().setName(termName)
+ if rightLeftAssoc == opAssoc.LEFT:
+ if arity == 1:
+ matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
+ elif arity == 2:
+ if opExpr is not None:
+ matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
+ else:
+ matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
+ elif arity == 3:
+ matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
+ Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
+ else:
+ raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
+ elif rightLeftAssoc == opAssoc.RIGHT:
+ if arity == 1:
+ # try to avoid LR with this extra test
+ if not isinstance(opExpr, Optional):
+ opExpr = Optional(opExpr)
+ matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
+ elif arity == 2:
+ if opExpr is not None:
+ matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
+ else:
+ matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
+ elif arity == 3:
+ matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
+ Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
+ else:
+ raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
+ else:
+ raise ValueError("operator must indicate right or left associativity")
+ if pa:
+ if isinstance(pa, (tuple, list)):
+ matchExpr.setParseAction(*pa)
+ else:
+ matchExpr.setParseAction(pa)
+ thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
+ lastExpr = thisExpr
+ ret <<= lastExpr
+ return ret
+
+operatorPrecedence = infixNotation
+"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""
+
+dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")
+sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")
+quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|
+ Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")
+unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
+
+def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
+ """
+ Helper method for defining nested lists enclosed in opening and closing
+ delimiters ("(" and ")" are the default).
+
+ Parameters:
+ - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
+ - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
+ - content - expression for items within the nested lists (default=C{None})
+ - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})
+
+ If an expression is not provided for the content argument, the nested
+ expression will capture all whitespace-delimited content between delimiters
+ as a list of separate values.
+
+ Use the C{ignoreExpr} argument to define expressions that may contain
+ opening or closing characters that should not be treated as opening
+ or closing characters for nesting, such as quotedString or a comment
+ expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
+ The default is L{quotedString}, but if no expressions are to be ignored,
+ then pass C{None} for this argument.
+
+ Example::
+ data_type = oneOf("void int short long char float double")
+ decl_data_type = Combine(data_type + Optional(Word('*')))
+ ident = Word(alphas+'_', alphanums+'_')
+ number = pyparsing_common.number
+ arg = Group(decl_data_type + ident)
+ LPAR,RPAR = map(Suppress, "()")
+
+ code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
+
+ c_function = (decl_data_type("type")
+ + ident("name")
+ + LPAR + Optional(delimitedList(arg), [])("args") + RPAR
+ + code_body("body"))
+ c_function.ignore(cStyleComment)
+
+ source_code = '''
+ int is_odd(int x) {
+ return (x%2);
+ }
+
+ int dec_to_hex(char hchar) {
+ if (hchar >= '0' && hchar <= '9') {
+ return (ord(hchar)-ord('0'));
+ } else {
+ return (10+ord(hchar)-ord('A'));
+ }
+ }
+ '''
+ for func in c_function.searchString(source_code):
+ print("%(name)s (%(type)s) args: %(args)s" % func)
+
+ prints::
+ is_odd (int) args: [['int', 'x']]
+ dec_to_hex (int) args: [['char', 'hchar']]
+ """
+ if opener == closer:
+ raise ValueError("opening and closing strings cannot be the same")
+ if content is None:
+ if isinstance(opener,basestring) and isinstance(closer,basestring):
+ if len(opener) == 1 and len(closer)==1:
+ if ignoreExpr is not None:
+ content = (Combine(OneOrMore(~ignoreExpr +
+ CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+ ).setParseAction(lambda t:t[0].strip()))
+ else:
+ content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
+ ).setParseAction(lambda t:t[0].strip()))
+ else:
+ if ignoreExpr is not None:
+ content = (Combine(OneOrMore(~ignoreExpr +
+ ~Literal(opener) + ~Literal(closer) +
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+ ).setParseAction(lambda t:t[0].strip()))
+ else:
+ content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
+ CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
+ ).setParseAction(lambda t:t[0].strip()))
+ else:
+ raise ValueError("opening and closing arguments must be strings if no content expression is given")
+ ret = Forward()
+ if ignoreExpr is not None:
+ ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
+ else:
+ ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
+ ret.setName('nested %s%s expression' % (opener,closer))
+ return ret
+
+def indentedBlock(blockStatementExpr, indentStack, indent=True):
+ """
+ Helper method for defining space-delimited indentation blocks, such as
+ those used to define block statements in Python source code.
+
+ Parameters:
+ - blockStatementExpr - expression defining syntax of statement that
+ is repeated within the indented block
+ - indentStack - list created by caller to manage indentation stack
+ (multiple statementWithIndentedBlock expressions within a single grammar
+ should share a common indentStack)
+ - indent - boolean indicating whether block must be indented beyond the
+ the current level; set to False for block of left-most statements
+ (default=C{True})
+
+ A valid block must contain at least one C{blockStatement}.
+
+ Example::
+ data = '''
+ def A(z):
+ A1
+ B = 100
+ G = A2
+ A2
+ A3
+ B
+ def BB(a,b,c):
+ BB1
+ def BBA():
+ bba1
+ bba2
+ bba3
+ C
+ D
+ def spam(x,y):
+ def eggs(z):
+ pass
+ '''
+
+
+ indentStack = [1]
+ stmt = Forward()
+
+ identifier = Word(alphas, alphanums)
+ funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
+ func_body = indentedBlock(stmt, indentStack)
+ funcDef = Group( funcDecl + func_body )
+
+ rvalue = Forward()
+ funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
+ rvalue << (funcCall | identifier | Word(nums))
+ assignment = Group(identifier + "=" + rvalue)
+ stmt << ( funcDef | assignment | identifier )
+
+ module_body = OneOrMore(stmt)
+
+ parseTree = module_body.parseString(data)
+ parseTree.pprint()
+ prints::
+ [['def',
+ 'A',
+ ['(', 'z', ')'],
+ ':',
+ [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
+ 'B',
+ ['def',
+ 'BB',
+ ['(', 'a', 'b', 'c', ')'],
+ ':',
+ [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
+ 'C',
+ 'D',
+ ['def',
+ 'spam',
+ ['(', 'x', 'y', ')'],
+ ':',
+ [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
+ """
+ def checkPeerIndent(s,l,t):
+ if l >= len(s): return
+ curCol = col(l,s)
+ if curCol != indentStack[-1]:
+ if curCol > indentStack[-1]:
+ raise ParseFatalException(s,l,"illegal nesting")
+ raise ParseException(s,l,"not a peer entry")
+
+ def checkSubIndent(s,l,t):
+ curCol = col(l,s)
+ if curCol > indentStack[-1]:
+ indentStack.append( curCol )
+ else:
+ raise ParseException(s,l,"not a subentry")
+
+ def checkUnindent(s,l,t):
+ if l >= len(s): return
+ curCol = col(l,s)
+ if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
+ raise ParseException(s,l,"not an unindent")
+ indentStack.pop()
+
+ NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
+ INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
+ PEER = Empty().setParseAction(checkPeerIndent).setName('')
+ UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
+ if indent:
+ smExpr = Group( Optional(NL) +
+ #~ FollowedBy(blockStatementExpr) +
+ INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
+ else:
+ smExpr = Group( Optional(NL) +
+ (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
+ blockStatementExpr.ignore(_bslash + LineEnd())
+ return smExpr.setName('indented block')
+
+alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
+punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
+
+anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
+_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
+commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
+def replaceHTMLEntity(t):
+ """Helper parser action to replace common HTML entities with their special characters"""
+ return _htmlEntityMap.get(t.entity)
+
+# it's easy to get these comment structures wrong - they're very common, so may as well make them available
+cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
+"Comment of the form C{/* ... */}"
+
+htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
+"Comment of the form C{<!-- ... -->}"
+
+restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
+dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
+"Comment of the form C{// ... (to end of line)}"
+
+cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")
+"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"
+
+javaStyleComment = cppStyleComment
+"Same as C{L{cppStyleComment}}"
+
+pythonStyleComment = Regex(r"#.*").setName("Python style comment")
+"Comment of the form C{# ... (to end of line)}"
+
+_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
+ Optional( Word(" \t") +
+ ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
+commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
+"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.
+ This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""
+
+# some other useful expressions - using lower-case class name since we are really using this as a namespace
+class pyparsing_common:
+ """
+ Here are some common low-level expressions that may be useful in jump-starting parser development:
+ - numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
+ - common L{programming identifiers<identifier>}
+ - network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
+ - ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
+ - L{UUID<uuid>}
+ - L{comma-separated list<comma_separated_list>}
+ Parse actions:
+ - C{L{convertToInteger}}
+ - C{L{convertToFloat}}
+ - C{L{convertToDate}}
+ - C{L{convertToDatetime}}
+ - C{L{stripHTMLTags}}
+ - C{L{upcaseTokens}}
+ - C{L{downcaseTokens}}
+
+ Example::
+ pyparsing_common.number.runTests('''
+ # any int or real number, returned as the appropriate type
+ 100
+ -100
+ +100
+ 3.14159
+ 6.02e23
+ 1e-12
+ ''')
+
+ pyparsing_common.fnumber.runTests('''
+ # any int or real number, returned as float
+ 100
+ -100
+ +100
+ 3.14159
+ 6.02e23
+ 1e-12
+ ''')
+
+ pyparsing_common.hex_integer.runTests('''
+ # hex numbers
+ 100
+ FF
+ ''')
+
+ pyparsing_common.fraction.runTests('''
+ # fractions
+ 1/2
+ -3/4
+ ''')
+
+ pyparsing_common.mixed_integer.runTests('''
+ # mixed fractions
+ 1
+ 1/2
+ -3/4
+ 1-3/4
+ ''')
+
+ import uuid
+ pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
+ pyparsing_common.uuid.runTests('''
+ # uuid
+ 12345678-1234-5678-1234-567812345678
+ ''')
+ prints::
+ # any int or real number, returned as the appropriate type
+ 100
+ [100]
+
+ -100
+ [-100]
+
+ +100
+ [100]
+
+ 3.14159
+ [3.14159]
+
+ 6.02e23
+ [6.02e+23]
+
+ 1e-12
+ [1e-12]
+
+ # any int or real number, returned as float
+ 100
+ [100.0]
+
+ -100
+ [-100.0]
+
+ +100
+ [100.0]
+
+ 3.14159
+ [3.14159]
+
+ 6.02e23
+ [6.02e+23]
+
+ 1e-12
+ [1e-12]
+
+ # hex numbers
+ 100
+ [256]
+
+ FF
+ [255]
+
+ # fractions
+ 1/2
+ [0.5]
+
+ -3/4
+ [-0.75]
+
+ # mixed fractions
+ 1
+ [1]
+
+ 1/2
+ [0.5]
+
+ -3/4
+ [-0.75]
+
+ 1-3/4
+ [1.75]
+
+ # uuid
+ 12345678-1234-5678-1234-567812345678
+ [UUID('12345678-1234-5678-1234-567812345678')]
+ """
+
+ convertToInteger = tokenMap(int)
+ """
+ Parse action for converting parsed integers to Python int
+ """
+
+ convertToFloat = tokenMap(float)
+ """
+ Parse action for converting parsed numbers to Python float
+ """
+
+ integer = Word(nums).setName("integer").setParseAction(convertToInteger)
+ """expression that parses an unsigned integer, returns an int"""
+
+ hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))
+ """expression that parses a hexadecimal integer, returns an int"""
+
+ signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
+ """expression that parses an integer with optional leading sign, returns an int"""
+
+ fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
+ """fractional expression of an integer divided by an integer, returns a float"""
+ fraction.addParseAction(lambda t: t[0]/t[-1])
+
+ mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
+ """mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
+ mixed_integer.addParseAction(sum)
+
+ real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)
+ """expression that parses a floating point number and returns a float"""
+
+ sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
+ """expression that parses a floating point number with optional scientific notation and returns a float"""
+
+ # streamlining this expression makes the docs nicer-looking
+ number = (sci_real | real | signed_integer).streamline()
+ """any numeric expression, returns the corresponding Python type"""
+
+ fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
+ """any int or real number, returned as float"""
+
+ identifier = Word(alphas+'_', alphanums+'_').setName("identifier")
+ """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
+
+ ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
+ "IPv4 address (C{0.0.0.0 - 255.255.255.255})"
+
+ _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
+ _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")
+ _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")
+ _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
+ _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
+ ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
+ "IPv6 address (long, short, or mixed form)"
+
+ mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
+ "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
+
+ @staticmethod
+ def convertToDate(fmt="%Y-%m-%d"):
+ """
+ Helper to create a parse action for converting parsed date string to Python datetime.date
+
+ Params -
+ - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})
+
+ Example::
+ date_expr = pyparsing_common.iso8601_date.copy()
+ date_expr.setParseAction(pyparsing_common.convertToDate())
+ print(date_expr.parseString("1999-12-31"))
+ prints::
+ [datetime.date(1999, 12, 31)]
+ """
+ def cvt_fn(s,l,t):
+ try:
+ return datetime.strptime(t[0], fmt).date()
+ except ValueError as ve:
+ raise ParseException(s, l, str(ve))
+ return cvt_fn
+
+ @staticmethod
+ def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
+ """
+ Helper to create a parse action for converting parsed datetime string to Python datetime.datetime
+
+ Params -
+ - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})
+
+ Example::
+ dt_expr = pyparsing_common.iso8601_datetime.copy()
+ dt_expr.setParseAction(pyparsing_common.convertToDatetime())
+ print(dt_expr.parseString("1999-12-31T23:59:59.999"))
+ prints::
+ [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
+ """
+ def cvt_fn(s,l,t):
+ try:
+ return datetime.strptime(t[0], fmt)
+ except ValueError as ve:
+ raise ParseException(s, l, str(ve))
+ return cvt_fn
+
+ iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
+ "ISO8601 date (C{yyyy-mm-dd})"
+
+ iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
+ "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"
+
+ uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
+ "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"
+
+ _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
+ @staticmethod
+ def stripHTMLTags(s, l, tokens):
+ """
+ Parse action to remove HTML tags from web page HTML source
+
+ Example::
+ # strip HTML links from normal text
+ text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
+ td,td_end = makeHTMLTags("TD")
+ table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
+
+ print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
+ """
+ return pyparsing_common._html_stripper.transformString(tokens[0])
+
+ _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',')
+ + Optional( White(" \t") ) ) ).streamline().setName("commaItem")
+ comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")
+ """Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
+
+ upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
+ """Parse action to convert tokens to upper case."""
+
+ downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
+ """Parse action to convert tokens to lower case."""
+
+
+if __name__ == "__main__":
+
+ selectToken = CaselessLiteral("select")
+ fromToken = CaselessLiteral("from")
+
+ ident = Word(alphas, alphanums + "_$")
+
+ columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
+ columnNameList = Group(delimitedList(columnName)).setName("columns")
+ columnSpec = ('*' | columnNameList)
+
+ tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
+ tableNameList = Group(delimitedList(tableName)).setName("tables")
+
+ simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
+
+ # demo runTests method, including embedded comments in test string
+ simpleSQL.runTests("""
+ # '*' as column list and dotted table name
+ select * from SYS.XYZZY
+
+ # caseless match on "SELECT", and casts back to "select"
+ SELECT * from XYZZY, ABC
+
+ # list of column names, and mixed case SELECT keyword
+ Select AA,BB,CC from Sys.dual
+
+ # multiple tables
+ Select A, B, C from Sys.dual, Table2
+
+ # invalid SELECT keyword - should fail
+ Xelect A, B, C from Sys.dual
+
+ # incomplete command - should fail
+ Select
+
+ # invalid column name - should fail
+ Select ^^^ frox Sys.dual
+
+ """)
+
+ pyparsing_common.number.runTests("""
+ 100
+ -100
+ +100
+ 3.14159
+ 6.02e23
+ 1e-12
+ """)
+
+ # any int or real number, returned as float
+ pyparsing_common.fnumber.runTests("""
+ 100
+ -100
+ +100
+ 3.14159
+ 6.02e23
+ 1e-12
+ """)
+
+ pyparsing_common.hex_integer.runTests("""
+ 100
+ FF
+ """)
+
+ import uuid
+ pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
+ pyparsing_common.uuid.runTests("""
+ 12345678-1234-5678-1234-567812345678
+ """)
diff --git a/third_party/python/setuptools/setuptools/archive_util.py b/third_party/python/setuptools/setuptools/archive_util.py
new file mode 100644
index 0000000000..0ce190b8cf
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/archive_util.py
@@ -0,0 +1,175 @@
+"""Utilities for extracting common archive formats"""
+
+import zipfile
+import tarfile
+import os
+import shutil
+import posixpath
+import contextlib
+from distutils.errors import DistutilsError
+
+from pkg_resources import ensure_directory
+
+__all__ = [
+ "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter",
+ "UnrecognizedFormat", "extraction_drivers", "unpack_directory",
+]
+
+
+class UnrecognizedFormat(DistutilsError):
+ """Couldn't recognize the archive type"""
+
+
+def default_filter(src, dst):
+ """The default progress/filter callback; returns True for all files"""
+ return dst
+
+
+def unpack_archive(
+ filename, extract_dir, progress_filter=default_filter,
+ drivers=None):
+ """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat``
+
+ `progress_filter` is a function taking two arguments: a source path
+ internal to the archive ('/'-separated), and a filesystem path where it
+ will be extracted. The callback must return the desired extract path
+ (which may be the same as the one passed in), or else ``None`` to skip
+ that file or directory. The callback can thus be used to report on the
+ progress of the extraction, as well as to filter the items extracted or
+ alter their extraction paths.
+
+ `drivers`, if supplied, must be a non-empty sequence of functions with the
+ same signature as this function (minus the `drivers` argument), that raise
+ ``UnrecognizedFormat`` if they do not support extracting the designated
+ archive type. The `drivers` are tried in sequence until one is found that
+ does not raise an error, or until all are exhausted (in which case
+ ``UnrecognizedFormat`` is raised). If you do not supply a sequence of
+ drivers, the module's ``extraction_drivers`` constant will be used, which
+ means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that
+ order.
+ """
+ for driver in drivers or extraction_drivers:
+ try:
+ driver(filename, extract_dir, progress_filter)
+ except UnrecognizedFormat:
+ continue
+ else:
+ return
+ else:
+ raise UnrecognizedFormat(
+ "Not a recognized archive type: %s" % filename
+ )
+
+
+def unpack_directory(filename, extract_dir, progress_filter=default_filter):
+ """"Unpack" a directory, using the same interface as for archives
+
+ Raises ``UnrecognizedFormat`` if `filename` is not a directory
+ """
+ if not os.path.isdir(filename):
+ raise UnrecognizedFormat("%s is not a directory" % filename)
+
+ paths = {
+ filename: ('', extract_dir),
+ }
+ for base, dirs, files in os.walk(filename):
+ src, dst = paths[base]
+ for d in dirs:
+ paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d)
+ for f in files:
+ target = os.path.join(dst, f)
+ target = progress_filter(src + f, target)
+ if not target:
+ # skip non-files
+ continue
+ ensure_directory(target)
+ f = os.path.join(base, f)
+ shutil.copyfile(f, target)
+ shutil.copystat(f, target)
+
+
+def unpack_zipfile(filename, extract_dir, progress_filter=default_filter):
+ """Unpack zip `filename` to `extract_dir`
+
+ Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
+ by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation
+ of the `progress_filter` argument.
+ """
+
+ if not zipfile.is_zipfile(filename):
+ raise UnrecognizedFormat("%s is not a zip file" % (filename,))
+
+ with zipfile.ZipFile(filename) as z:
+ for info in z.infolist():
+ name = info.filename
+
+ # don't extract absolute paths or ones with .. in them
+ if name.startswith('/') or '..' in name.split('/'):
+ continue
+
+ target = os.path.join(extract_dir, *name.split('/'))
+ target = progress_filter(name, target)
+ if not target:
+ continue
+ if name.endswith('/'):
+ # directory
+ ensure_directory(target)
+ else:
+ # file
+ ensure_directory(target)
+ data = z.read(info.filename)
+ with open(target, 'wb') as f:
+ f.write(data)
+ unix_attributes = info.external_attr >> 16
+ if unix_attributes:
+ os.chmod(target, unix_attributes)
+
+
+def unpack_tarfile(filename, extract_dir, progress_filter=default_filter):
+ """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir`
+
+ Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined
+ by ``tarfile.open()``). See ``unpack_archive()`` for an explanation
+ of the `progress_filter` argument.
+ """
+ try:
+ tarobj = tarfile.open(filename)
+ except tarfile.TarError as e:
+ raise UnrecognizedFormat(
+ "%s is not a compressed or uncompressed tar file" % (filename,)
+ ) from e
+ with contextlib.closing(tarobj):
+ # don't do any chowning!
+ tarobj.chown = lambda *args: None
+ for member in tarobj:
+ name = member.name
+ # don't extract absolute paths or ones with .. in them
+ if not name.startswith('/') and '..' not in name.split('/'):
+ prelim_dst = os.path.join(extract_dir, *name.split('/'))
+
+ # resolve any links and to extract the link targets as normal
+ # files
+ while member is not None and (
+ member.islnk() or member.issym()):
+ linkpath = member.linkname
+ if member.issym():
+ base = posixpath.dirname(member.name)
+ linkpath = posixpath.join(base, linkpath)
+ linkpath = posixpath.normpath(linkpath)
+ member = tarobj._getmember(linkpath)
+
+ if member is not None and (member.isfile() or member.isdir()):
+ final_dst = progress_filter(name, prelim_dst)
+ if final_dst:
+ if final_dst.endswith(os.sep):
+ final_dst = final_dst[:-1]
+ try:
+ # XXX Ugh
+ tarobj._extract_member(member, final_dst)
+ except tarfile.ExtractError:
+ # chown/chmod/mkfifo/mknode/makedev failed
+ pass
+ return True
+
+
+extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile
diff --git a/third_party/python/setuptools/setuptools/build_meta.py b/third_party/python/setuptools/setuptools/build_meta.py
new file mode 100644
index 0000000000..b9e8a2b3fa
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/build_meta.py
@@ -0,0 +1,276 @@
+"""A PEP 517 interface to setuptools
+
+Previously, when a user or a command line tool (let's call it a "frontend")
+needed to make a request of setuptools to take a certain action, for
+example, generating a list of installation requirements, the frontend would
+would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line.
+
+PEP 517 defines a different method of interfacing with setuptools. Rather
+than calling "setup.py" directly, the frontend should:
+
+ 1. Set the current directory to the directory with a setup.py file
+ 2. Import this module into a safe python interpreter (one in which
+ setuptools can potentially set global variables or crash hard).
+ 3. Call one of the functions defined in PEP 517.
+
+What each function does is defined in PEP 517. However, here is a "casual"
+definition of the functions (this definition should not be relied on for
+bug reports or API stability):
+
+ - `build_wheel`: build a wheel in the folder and return the basename
+ - `get_requires_for_build_wheel`: get the `setup_requires` to build
+ - `prepare_metadata_for_build_wheel`: get the `install_requires`
+ - `build_sdist`: build an sdist in the folder and return the basename
+ - `get_requires_for_build_sdist`: get the `setup_requires` to build
+
+Again, this is not a formal definition! Just a "taste" of the module.
+"""
+
+import io
+import os
+import sys
+import tokenize
+import shutil
+import contextlib
+import tempfile
+
+import setuptools
+import distutils
+
+from pkg_resources import parse_requirements
+
+__all__ = ['get_requires_for_build_sdist',
+ 'get_requires_for_build_wheel',
+ 'prepare_metadata_for_build_wheel',
+ 'build_wheel',
+ 'build_sdist',
+ '__legacy__',
+ 'SetupRequirementsError']
+
+
+class SetupRequirementsError(BaseException):
+ def __init__(self, specifiers):
+ self.specifiers = specifiers
+
+
+class Distribution(setuptools.dist.Distribution):
+ def fetch_build_eggs(self, specifiers):
+ specifier_list = list(map(str, parse_requirements(specifiers)))
+
+ raise SetupRequirementsError(specifier_list)
+
+ @classmethod
+ @contextlib.contextmanager
+ def patch(cls):
+ """
+ Replace
+ distutils.dist.Distribution with this class
+ for the duration of this context.
+ """
+ orig = distutils.core.Distribution
+ distutils.core.Distribution = cls
+ try:
+ yield
+ finally:
+ distutils.core.Distribution = orig
+
+
+@contextlib.contextmanager
+def no_install_setup_requires():
+ """Temporarily disable installing setup_requires
+
+ Under PEP 517, the backend reports build dependencies to the frontend,
+ and the frontend is responsible for ensuring they're installed.
+ So setuptools (acting as a backend) should not try to install them.
+ """
+ orig = setuptools._install_setup_requires
+ setuptools._install_setup_requires = lambda attrs: None
+ try:
+ yield
+ finally:
+ setuptools._install_setup_requires = orig
+
+
+def _get_immediate_subdirectories(a_dir):
+ return [name for name in os.listdir(a_dir)
+ if os.path.isdir(os.path.join(a_dir, name))]
+
+
+def _file_with_extension(directory, extension):
+ matching = (
+ f for f in os.listdir(directory)
+ if f.endswith(extension)
+ )
+ file, = matching
+ return file
+
+
+def _open_setup_script(setup_script):
+ if not os.path.exists(setup_script):
+ # Supply a default setup.py
+ return io.StringIO(u"from setuptools import setup; setup()")
+
+ return getattr(tokenize, 'open', open)(setup_script)
+
+
+class _BuildMetaBackend(object):
+
+ def _fix_config(self, config_settings):
+ config_settings = config_settings or {}
+ config_settings.setdefault('--global-option', [])
+ return config_settings
+
+ def _get_build_requires(self, config_settings, requirements):
+ config_settings = self._fix_config(config_settings)
+
+ sys.argv = sys.argv[:1] + ['egg_info'] + \
+ config_settings["--global-option"]
+ try:
+ with Distribution.patch():
+ self.run_setup()
+ except SetupRequirementsError as e:
+ requirements += e.specifiers
+
+ return requirements
+
+ def run_setup(self, setup_script='setup.py'):
+ # Note that we can reuse our build directory between calls
+ # Correctness comes first, then optimization later
+ __file__ = setup_script
+ __name__ = '__main__'
+
+ with _open_setup_script(__file__) as f:
+ code = f.read().replace(r'\r\n', r'\n')
+
+ exec(compile(code, __file__, 'exec'), locals())
+
+ def get_requires_for_build_wheel(self, config_settings=None):
+ config_settings = self._fix_config(config_settings)
+ return self._get_build_requires(
+ config_settings, requirements=['wheel'])
+
+ def get_requires_for_build_sdist(self, config_settings=None):
+ config_settings = self._fix_config(config_settings)
+ return self._get_build_requires(config_settings, requirements=[])
+
+ def prepare_metadata_for_build_wheel(self, metadata_directory,
+ config_settings=None):
+ sys.argv = sys.argv[:1] + [
+ 'dist_info', '--egg-base', metadata_directory]
+ with no_install_setup_requires():
+ self.run_setup()
+
+ dist_info_directory = metadata_directory
+ while True:
+ dist_infos = [f for f in os.listdir(dist_info_directory)
+ if f.endswith('.dist-info')]
+
+ if (
+ len(dist_infos) == 0 and
+ len(_get_immediate_subdirectories(dist_info_directory)) == 1
+ ):
+
+ dist_info_directory = os.path.join(
+ dist_info_directory, os.listdir(dist_info_directory)[0])
+ continue
+
+ assert len(dist_infos) == 1
+ break
+
+ # PEP 517 requires that the .dist-info directory be placed in the
+ # metadata_directory. To comply, we MUST copy the directory to the root
+ if dist_info_directory != metadata_directory:
+ shutil.move(
+ os.path.join(dist_info_directory, dist_infos[0]),
+ metadata_directory)
+ shutil.rmtree(dist_info_directory, ignore_errors=True)
+
+ return dist_infos[0]
+
+ def _build_with_temp_dir(self, setup_command, result_extension,
+ result_directory, config_settings):
+ config_settings = self._fix_config(config_settings)
+ result_directory = os.path.abspath(result_directory)
+
+ # Build in a temporary directory, then copy to the target.
+ os.makedirs(result_directory, exist_ok=True)
+ with tempfile.TemporaryDirectory(dir=result_directory) as tmp_dist_dir:
+ sys.argv = (sys.argv[:1] + setup_command +
+ ['--dist-dir', tmp_dist_dir] +
+ config_settings["--global-option"])
+ with no_install_setup_requires():
+ self.run_setup()
+
+ result_basename = _file_with_extension(
+ tmp_dist_dir, result_extension)
+ result_path = os.path.join(result_directory, result_basename)
+ if os.path.exists(result_path):
+ # os.rename will fail overwriting on non-Unix.
+ os.remove(result_path)
+ os.rename(os.path.join(tmp_dist_dir, result_basename), result_path)
+
+ return result_basename
+
+ def build_wheel(self, wheel_directory, config_settings=None,
+ metadata_directory=None):
+ return self._build_with_temp_dir(['bdist_wheel'], '.whl',
+ wheel_directory, config_settings)
+
+ def build_sdist(self, sdist_directory, config_settings=None):
+ return self._build_with_temp_dir(['sdist', '--formats', 'gztar'],
+ '.tar.gz', sdist_directory,
+ config_settings)
+
+
+class _BuildMetaLegacyBackend(_BuildMetaBackend):
+ """Compatibility backend for setuptools
+
+ This is a version of setuptools.build_meta that endeavors
+ to maintain backwards
+ compatibility with pre-PEP 517 modes of invocation. It
+ exists as a temporary
+ bridge between the old packaging mechanism and the new
+ packaging mechanism,
+ and will eventually be removed.
+ """
+ def run_setup(self, setup_script='setup.py'):
+ # In order to maintain compatibility with scripts assuming that
+ # the setup.py script is in a directory on the PYTHONPATH, inject
+ # '' into sys.path. (pypa/setuptools#1642)
+ sys_path = list(sys.path) # Save the original path
+
+ script_dir = os.path.dirname(os.path.abspath(setup_script))
+ if script_dir not in sys.path:
+ sys.path.insert(0, script_dir)
+
+ # Some setup.py scripts (e.g. in pygame and numpy) use sys.argv[0] to
+ # get the directory of the source code. They expect it to refer to the
+ # setup.py script.
+ sys_argv_0 = sys.argv[0]
+ sys.argv[0] = setup_script
+
+ try:
+ super(_BuildMetaLegacyBackend,
+ self).run_setup(setup_script=setup_script)
+ finally:
+ # While PEP 517 frontends should be calling each hook in a fresh
+ # subprocess according to the standard (and thus it should not be
+ # strictly necessary to restore the old sys.path), we'll restore
+ # the original path so that the path manipulation does not persist
+ # within the hook after run_setup is called.
+ sys.path[:] = sys_path
+ sys.argv[0] = sys_argv_0
+
+
+# The primary backend
+_BACKEND = _BuildMetaBackend()
+
+get_requires_for_build_wheel = _BACKEND.get_requires_for_build_wheel
+get_requires_for_build_sdist = _BACKEND.get_requires_for_build_sdist
+prepare_metadata_for_build_wheel = _BACKEND.prepare_metadata_for_build_wheel
+build_wheel = _BACKEND.build_wheel
+build_sdist = _BACKEND.build_sdist
+
+
+# The legacy backend
+__legacy__ = _BuildMetaLegacyBackend()
diff --git a/third_party/python/setuptools/setuptools/cli-32.exe b/third_party/python/setuptools/setuptools/cli-32.exe
new file mode 100644
index 0000000000..b1487b7819
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/cli-32.exe
Binary files differ
diff --git a/third_party/python/setuptools/setuptools/cli-64.exe b/third_party/python/setuptools/setuptools/cli-64.exe
new file mode 100644
index 0000000000..675e6bf374
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/cli-64.exe
Binary files differ
diff --git a/third_party/python/setuptools/setuptools/cli.exe b/third_party/python/setuptools/setuptools/cli.exe
new file mode 100644
index 0000000000..b1487b7819
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/cli.exe
Binary files differ
diff --git a/third_party/python/setuptools/setuptools/command/__init__.py b/third_party/python/setuptools/setuptools/command/__init__.py
new file mode 100644
index 0000000000..743f5588fa
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/__init__.py
@@ -0,0 +1,17 @@
+__all__ = [
+ 'alias', 'bdist_egg', 'bdist_rpm', 'build_ext', 'build_py', 'develop',
+ 'easy_install', 'egg_info', 'install', 'install_lib', 'rotate', 'saveopts',
+ 'sdist', 'setopt', 'test', 'install_egg_info', 'install_scripts',
+ 'bdist_wininst', 'upload_docs', 'build_clib', 'dist_info',
+]
+
+from distutils.command.bdist import bdist
+import sys
+
+from setuptools.command import install_scripts
+
+if 'egg' not in bdist.format_commands:
+ bdist.format_command['egg'] = ('bdist_egg', "Python .egg file")
+ bdist.format_commands.append('egg')
+
+del bdist, sys
diff --git a/third_party/python/setuptools/setuptools/command/alias.py b/third_party/python/setuptools/setuptools/command/alias.py
new file mode 100644
index 0000000000..452a9244ea
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/alias.py
@@ -0,0 +1,78 @@
+from distutils.errors import DistutilsOptionError
+
+from setuptools.command.setopt import edit_config, option_base, config_file
+
+
+def shquote(arg):
+ """Quote an argument for later parsing by shlex.split()"""
+ for c in '"', "'", "\\", "#":
+ if c in arg:
+ return repr(arg)
+ if arg.split() != [arg]:
+ return repr(arg)
+ return arg
+
+
+class alias(option_base):
+ """Define a shortcut that invokes one or more commands"""
+
+ description = "define a shortcut to invoke one or more commands"
+ command_consumes_arguments = True
+
+ user_options = [
+ ('remove', 'r', 'remove (unset) the alias'),
+ ] + option_base.user_options
+
+ boolean_options = option_base.boolean_options + ['remove']
+
+ def initialize_options(self):
+ option_base.initialize_options(self)
+ self.args = None
+ self.remove = None
+
+ def finalize_options(self):
+ option_base.finalize_options(self)
+ if self.remove and len(self.args) != 1:
+ raise DistutilsOptionError(
+ "Must specify exactly one argument (the alias name) when "
+ "using --remove"
+ )
+
+ def run(self):
+ aliases = self.distribution.get_option_dict('aliases')
+
+ if not self.args:
+ print("Command Aliases")
+ print("---------------")
+ for alias in aliases:
+ print("setup.py alias", format_alias(alias, aliases))
+ return
+
+ elif len(self.args) == 1:
+ alias, = self.args
+ if self.remove:
+ command = None
+ elif alias in aliases:
+ print("setup.py alias", format_alias(alias, aliases))
+ return
+ else:
+ print("No alias definition found for %r" % alias)
+ return
+ else:
+ alias = self.args[0]
+ command = ' '.join(map(shquote, self.args[1:]))
+
+ edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run)
+
+
+def format_alias(name, aliases):
+ source, command = aliases[name]
+ if source == config_file('global'):
+ source = '--global-config '
+ elif source == config_file('user'):
+ source = '--user-config '
+ elif source == config_file('local'):
+ source = ''
+ else:
+ source = '--filename=%r' % source
+ return source + name + ' ' + command
diff --git a/third_party/python/setuptools/setuptools/command/bdist_egg.py b/third_party/python/setuptools/setuptools/command/bdist_egg.py
new file mode 100644
index 0000000000..a88efb45b8
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/bdist_egg.py
@@ -0,0 +1,501 @@
+"""setuptools.command.bdist_egg
+
+Build .egg distributions"""
+
+from distutils.errors import DistutilsSetupError
+from distutils.dir_util import remove_tree, mkpath
+from distutils import log
+from types import CodeType
+import sys
+import os
+import re
+import textwrap
+import marshal
+import warnings
+
+from pkg_resources import get_build_platform, Distribution, ensure_directory
+from pkg_resources import EntryPoint
+from setuptools.extension import Library
+from setuptools import Command, SetuptoolsDeprecationWarning
+
+from sysconfig import get_path, get_python_version
+
+
+def _get_purelib():
+ return get_path("purelib")
+
+
+def strip_module(filename):
+ if '.' in filename:
+ filename = os.path.splitext(filename)[0]
+ if filename.endswith('module'):
+ filename = filename[:-6]
+ return filename
+
+
+def sorted_walk(dir):
+ """Do os.walk in a reproducible way,
+ independent of indeterministic filesystem readdir order
+ """
+ for base, dirs, files in os.walk(dir):
+ dirs.sort()
+ files.sort()
+ yield base, dirs, files
+
+
+def write_stub(resource, pyfile):
+ _stub_template = textwrap.dedent("""
+ def __bootstrap__():
+ global __bootstrap__, __loader__, __file__
+ import sys, pkg_resources, importlib.util
+ __file__ = pkg_resources.resource_filename(__name__, %r)
+ __loader__ = None; del __bootstrap__, __loader__
+ spec = importlib.util.spec_from_file_location(__name__,__file__)
+ mod = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(mod)
+ __bootstrap__()
+ """).lstrip()
+ with open(pyfile, 'w') as f:
+ f.write(_stub_template % resource)
+
+
+class bdist_egg(Command):
+ description = "create an \"egg\" distribution"
+
+ user_options = [
+ ('bdist-dir=', 'b',
+ "temporary directory for creating the distribution"),
+ ('plat-name=', 'p', "platform name to embed in generated filenames "
+ "(default: %s)" % get_build_platform()),
+ ('exclude-source-files', None,
+ "remove all .py files from the generated egg"),
+ ('keep-temp', 'k',
+ "keep the pseudo-installation tree around after " +
+ "creating the distribution archive"),
+ ('dist-dir=', 'd',
+ "directory to put final built distributions in"),
+ ('skip-build', None,
+ "skip rebuilding everything (for testing/debugging)"),
+ ]
+
+ boolean_options = [
+ 'keep-temp', 'skip-build', 'exclude-source-files'
+ ]
+
+ def initialize_options(self):
+ self.bdist_dir = None
+ self.plat_name = None
+ self.keep_temp = 0
+ self.dist_dir = None
+ self.skip_build = 0
+ self.egg_output = None
+ self.exclude_source_files = None
+
+ def finalize_options(self):
+ ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info")
+ self.egg_info = ei_cmd.egg_info
+
+ if self.bdist_dir is None:
+ bdist_base = self.get_finalized_command('bdist').bdist_base
+ self.bdist_dir = os.path.join(bdist_base, 'egg')
+
+ if self.plat_name is None:
+ self.plat_name = get_build_platform()
+
+ self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
+
+ if self.egg_output is None:
+
+ # Compute filename of the output egg
+ basename = Distribution(
+ None, None, ei_cmd.egg_name, ei_cmd.egg_version,
+ get_python_version(),
+ self.distribution.has_ext_modules() and self.plat_name
+ ).egg_name()
+
+ self.egg_output = os.path.join(self.dist_dir, basename + '.egg')
+
+ def do_install_data(self):
+ # Hack for packages that install data to install's --install-lib
+ self.get_finalized_command('install').install_lib = self.bdist_dir
+
+ site_packages = os.path.normcase(os.path.realpath(_get_purelib()))
+ old, self.distribution.data_files = self.distribution.data_files, []
+
+ for item in old:
+ if isinstance(item, tuple) and len(item) == 2:
+ if os.path.isabs(item[0]):
+ realpath = os.path.realpath(item[0])
+ normalized = os.path.normcase(realpath)
+ if normalized == site_packages or normalized.startswith(
+ site_packages + os.sep
+ ):
+ item = realpath[len(site_packages) + 1:], item[1]
+ # XXX else: raise ???
+ self.distribution.data_files.append(item)
+
+ try:
+ log.info("installing package data to %s", self.bdist_dir)
+ self.call_command('install_data', force=0, root=None)
+ finally:
+ self.distribution.data_files = old
+
+ def get_outputs(self):
+ return [self.egg_output]
+
+ def call_command(self, cmdname, **kw):
+ """Invoke reinitialized command `cmdname` with keyword args"""
+ for dirname in INSTALL_DIRECTORY_ATTRS:
+ kw.setdefault(dirname, self.bdist_dir)
+ kw.setdefault('skip_build', self.skip_build)
+ kw.setdefault('dry_run', self.dry_run)
+ cmd = self.reinitialize_command(cmdname, **kw)
+ self.run_command(cmdname)
+ return cmd
+
+ def run(self):
+ # Generate metadata first
+ self.run_command("egg_info")
+ # We run install_lib before install_data, because some data hacks
+ # pull their data path from the install_lib command.
+ log.info("installing library code to %s", self.bdist_dir)
+ instcmd = self.get_finalized_command('install')
+ old_root = instcmd.root
+ instcmd.root = None
+ if self.distribution.has_c_libraries() and not self.skip_build:
+ self.run_command('build_clib')
+ cmd = self.call_command('install_lib', warn_dir=0)
+ instcmd.root = old_root
+
+ all_outputs, ext_outputs = self.get_ext_outputs()
+ self.stubs = []
+ to_compile = []
+ for (p, ext_name) in enumerate(ext_outputs):
+ filename, ext = os.path.splitext(ext_name)
+ pyfile = os.path.join(self.bdist_dir, strip_module(filename) +
+ '.py')
+ self.stubs.append(pyfile)
+ log.info("creating stub loader for %s", ext_name)
+ if not self.dry_run:
+ write_stub(os.path.basename(ext_name), pyfile)
+ to_compile.append(pyfile)
+ ext_outputs[p] = ext_name.replace(os.sep, '/')
+
+ if to_compile:
+ cmd.byte_compile(to_compile)
+ if self.distribution.data_files:
+ self.do_install_data()
+
+ # Make the EGG-INFO directory
+ archive_root = self.bdist_dir
+ egg_info = os.path.join(archive_root, 'EGG-INFO')
+ self.mkpath(egg_info)
+ if self.distribution.scripts:
+ script_dir = os.path.join(egg_info, 'scripts')
+ log.info("installing scripts to %s", script_dir)
+ self.call_command('install_scripts', install_dir=script_dir,
+ no_ep=1)
+
+ self.copy_metadata_to(egg_info)
+ native_libs = os.path.join(egg_info, "native_libs.txt")
+ if all_outputs:
+ log.info("writing %s", native_libs)
+ if not self.dry_run:
+ ensure_directory(native_libs)
+ libs_file = open(native_libs, 'wt')
+ libs_file.write('\n'.join(all_outputs))
+ libs_file.write('\n')
+ libs_file.close()
+ elif os.path.isfile(native_libs):
+ log.info("removing %s", native_libs)
+ if not self.dry_run:
+ os.unlink(native_libs)
+
+ write_safety_flag(
+ os.path.join(archive_root, 'EGG-INFO'), self.zip_safe()
+ )
+
+ if os.path.exists(os.path.join(self.egg_info, 'depends.txt')):
+ log.warn(
+ "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n"
+ "Use the install_requires/extras_require setup() args instead."
+ )
+
+ if self.exclude_source_files:
+ self.zap_pyfiles()
+
+ # Make the archive
+ make_zipfile(self.egg_output, archive_root, verbose=self.verbose,
+ dry_run=self.dry_run, mode=self.gen_header())
+ if not self.keep_temp:
+ remove_tree(self.bdist_dir, dry_run=self.dry_run)
+
+ # Add to 'Distribution.dist_files' so that the "upload" command works
+ getattr(self.distribution, 'dist_files', []).append(
+ ('bdist_egg', get_python_version(), self.egg_output))
+
+ def zap_pyfiles(self):
+ log.info("Removing .py files from temporary directory")
+ for base, dirs, files in walk_egg(self.bdist_dir):
+ for name in files:
+ path = os.path.join(base, name)
+
+ if name.endswith('.py'):
+ log.debug("Deleting %s", path)
+ os.unlink(path)
+
+ if base.endswith('__pycache__'):
+ path_old = path
+
+ pattern = r'(?P<name>.+)\.(?P<magic>[^.]+)\.pyc'
+ m = re.match(pattern, name)
+ path_new = os.path.join(
+ base, os.pardir, m.group('name') + '.pyc')
+ log.info(
+ "Renaming file from [%s] to [%s]"
+ % (path_old, path_new))
+ try:
+ os.remove(path_new)
+ except OSError:
+ pass
+ os.rename(path_old, path_new)
+
+ def zip_safe(self):
+ safe = getattr(self.distribution, 'zip_safe', None)
+ if safe is not None:
+ return safe
+ log.warn("zip_safe flag not set; analyzing archive contents...")
+ return analyze_egg(self.bdist_dir, self.stubs)
+
+ def gen_header(self):
+ epm = EntryPoint.parse_map(self.distribution.entry_points or '')
+ ep = epm.get('setuptools.installation', {}).get('eggsecutable')
+ if ep is None:
+ return 'w' # not an eggsecutable, do it the usual way.
+
+ warnings.warn(
+ "Eggsecutables are deprecated and will be removed in a future "
+ "version.",
+ SetuptoolsDeprecationWarning
+ )
+
+ if not ep.attrs or ep.extras:
+ raise DistutilsSetupError(
+ "eggsecutable entry point (%r) cannot have 'extras' "
+ "or refer to a module" % (ep,)
+ )
+
+ pyver = '{}.{}'.format(*sys.version_info)
+ pkg = ep.module_name
+ full = '.'.join(ep.attrs)
+ base = ep.attrs[0]
+ basename = os.path.basename(self.egg_output)
+
+ header = (
+ "#!/bin/sh\n"
+ 'if [ `basename $0` = "%(basename)s" ]\n'
+ 'then exec python%(pyver)s -c "'
+ "import sys, os; sys.path.insert(0, os.path.abspath('$0')); "
+ "from %(pkg)s import %(base)s; sys.exit(%(full)s())"
+ '" "$@"\n'
+ 'else\n'
+ ' echo $0 is not the correct name for this egg file.\n'
+ ' echo Please rename it back to %(basename)s and try again.\n'
+ ' exec false\n'
+ 'fi\n'
+ ) % locals()
+
+ if not self.dry_run:
+ mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run)
+ f = open(self.egg_output, 'w')
+ f.write(header)
+ f.close()
+ return 'a'
+
+ def copy_metadata_to(self, target_dir):
+ "Copy metadata (egg info) to the target_dir"
+ # normalize the path (so that a forward-slash in egg_info will
+ # match using startswith below)
+ norm_egg_info = os.path.normpath(self.egg_info)
+ prefix = os.path.join(norm_egg_info, '')
+ for path in self.ei_cmd.filelist.files:
+ if path.startswith(prefix):
+ target = os.path.join(target_dir, path[len(prefix):])
+ ensure_directory(target)
+ self.copy_file(path, target)
+
+ def get_ext_outputs(self):
+ """Get a list of relative paths to C extensions in the output distro"""
+
+ all_outputs = []
+ ext_outputs = []
+
+ paths = {self.bdist_dir: ''}
+ for base, dirs, files in sorted_walk(self.bdist_dir):
+ for filename in files:
+ if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS:
+ all_outputs.append(paths[base] + filename)
+ for filename in dirs:
+ paths[os.path.join(base, filename)] = (paths[base] +
+ filename + '/')
+
+ if self.distribution.has_ext_modules():
+ build_cmd = self.get_finalized_command('build_ext')
+ for ext in build_cmd.extensions:
+ if isinstance(ext, Library):
+ continue
+ fullname = build_cmd.get_ext_fullname(ext.name)
+ filename = build_cmd.get_ext_filename(fullname)
+ if not os.path.basename(filename).startswith('dl-'):
+ if os.path.exists(os.path.join(self.bdist_dir, filename)):
+ ext_outputs.append(filename)
+
+ return all_outputs, ext_outputs
+
+
+NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split())
+
+
+def walk_egg(egg_dir):
+ """Walk an unpacked egg's contents, skipping the metadata directory"""
+ walker = sorted_walk(egg_dir)
+ base, dirs, files = next(walker)
+ if 'EGG-INFO' in dirs:
+ dirs.remove('EGG-INFO')
+ yield base, dirs, files
+ for bdf in walker:
+ yield bdf
+
+
+def analyze_egg(egg_dir, stubs):
+ # check for existing flag in EGG-INFO
+ for flag, fn in safety_flags.items():
+ if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)):
+ return flag
+ if not can_scan():
+ return False
+ safe = True
+ for base, dirs, files in walk_egg(egg_dir):
+ for name in files:
+ if name.endswith('.py') or name.endswith('.pyw'):
+ continue
+ elif name.endswith('.pyc') or name.endswith('.pyo'):
+ # always scan, even if we already know we're not safe
+ safe = scan_module(egg_dir, base, name, stubs) and safe
+ return safe
+
+
+def write_safety_flag(egg_dir, safe):
+ # Write or remove zip safety flag file(s)
+ for flag, fn in safety_flags.items():
+ fn = os.path.join(egg_dir, fn)
+ if os.path.exists(fn):
+ if safe is None or bool(safe) != flag:
+ os.unlink(fn)
+ elif safe is not None and bool(safe) == flag:
+ f = open(fn, 'wt')
+ f.write('\n')
+ f.close()
+
+
+safety_flags = {
+ True: 'zip-safe',
+ False: 'not-zip-safe',
+}
+
+
+def scan_module(egg_dir, base, name, stubs):
+ """Check whether module possibly uses unsafe-for-zipfile stuff"""
+
+ filename = os.path.join(base, name)
+ if filename[:-1] in stubs:
+ return True # Extension module
+ pkg = base[len(egg_dir) + 1:].replace(os.sep, '.')
+ module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0]
+ if sys.version_info < (3, 7):
+ skip = 12 # skip magic & date & file size
+ else:
+ skip = 16 # skip magic & reserved? & date & file size
+ f = open(filename, 'rb')
+ f.read(skip)
+ code = marshal.load(f)
+ f.close()
+ safe = True
+ symbols = dict.fromkeys(iter_symbols(code))
+ for bad in ['__file__', '__path__']:
+ if bad in symbols:
+ log.warn("%s: module references %s", module, bad)
+ safe = False
+ if 'inspect' in symbols:
+ for bad in [
+ 'getsource', 'getabsfile', 'getsourcefile', 'getfile'
+ 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo',
+ 'getinnerframes', 'getouterframes', 'stack', 'trace'
+ ]:
+ if bad in symbols:
+ log.warn("%s: module MAY be using inspect.%s", module, bad)
+ safe = False
+ return safe
+
+
+def iter_symbols(code):
+ """Yield names and strings used by `code` and its nested code objects"""
+ for name in code.co_names:
+ yield name
+ for const in code.co_consts:
+ if isinstance(const, str):
+ yield const
+ elif isinstance(const, CodeType):
+ for name in iter_symbols(const):
+ yield name
+
+
+def can_scan():
+ if not sys.platform.startswith('java') and sys.platform != 'cli':
+ # CPython, PyPy, etc.
+ return True
+ log.warn("Unable to analyze compiled code on this platform.")
+ log.warn("Please ask the author to include a 'zip_safe'"
+ " setting (either True or False) in the package's setup.py")
+
+
+# Attribute names of options for commands that might need to be convinced to
+# install to the egg build directory
+
+INSTALL_DIRECTORY_ATTRS = [
+ 'install_lib', 'install_dir', 'install_data', 'install_base'
+]
+
+
+def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True,
+ mode='w'):
+ """Create a zip file from all the files under 'base_dir'. The output
+ zip file will be named 'base_dir' + ".zip". Uses either the "zipfile"
+ Python module (if available) or the InfoZIP "zip" utility (if installed
+ and found on the default search path). If neither tool is available,
+ raises DistutilsExecError. Returns the name of the output zip file.
+ """
+ import zipfile
+
+ mkpath(os.path.dirname(zip_filename), dry_run=dry_run)
+ log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
+
+ def visit(z, dirname, names):
+ for name in names:
+ path = os.path.normpath(os.path.join(dirname, name))
+ if os.path.isfile(path):
+ p = path[len(base_dir) + 1:]
+ if not dry_run:
+ z.write(path, p)
+ log.debug("adding '%s'", p)
+
+ compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED
+ if not dry_run:
+ z = zipfile.ZipFile(zip_filename, mode, compression=compression)
+ for dirname, dirs, files in sorted_walk(base_dir):
+ visit(z, dirname, files)
+ z.close()
+ else:
+ for dirname, dirs, files in sorted_walk(base_dir):
+ visit(None, dirname, files)
+ return zip_filename
diff --git a/third_party/python/setuptools/setuptools/command/bdist_rpm.py b/third_party/python/setuptools/setuptools/command/bdist_rpm.py
new file mode 100644
index 0000000000..0eb1b9c254
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/bdist_rpm.py
@@ -0,0 +1,31 @@
+import distutils.command.bdist_rpm as orig
+
+
+class bdist_rpm(orig.bdist_rpm):
+ """
+ Override the default bdist_rpm behavior to do the following:
+
+ 1. Run egg_info to ensure the name and version are properly calculated.
+ 2. Always run 'install' using --single-version-externally-managed to
+ disable eggs in RPM distributions.
+ """
+
+ def run(self):
+ # ensure distro name is up-to-date
+ self.run_command('egg_info')
+
+ orig.bdist_rpm.run(self)
+
+ def _make_spec_file(self):
+ spec = orig.bdist_rpm._make_spec_file(self)
+ spec = [
+ line.replace(
+ "setup.py install ",
+ "setup.py install --single-version-externally-managed "
+ ).replace(
+ "%setup",
+ "%setup -n %{name}-%{unmangled_version}"
+ )
+ for line in spec
+ ]
+ return spec
diff --git a/third_party/python/setuptools/setuptools/command/bdist_wininst.py b/third_party/python/setuptools/setuptools/command/bdist_wininst.py
new file mode 100644
index 0000000000..ff4b634592
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/bdist_wininst.py
@@ -0,0 +1,30 @@
+import distutils.command.bdist_wininst as orig
+import warnings
+
+from setuptools import SetuptoolsDeprecationWarning
+
+
+class bdist_wininst(orig.bdist_wininst):
+ def reinitialize_command(self, command, reinit_subcommands=0):
+ """
+ Supplement reinitialize_command to work around
+ http://bugs.python.org/issue20819
+ """
+ cmd = self.distribution.reinitialize_command(
+ command, reinit_subcommands)
+ if command in ('install', 'install_lib'):
+ cmd.install_lib = None
+ return cmd
+
+ def run(self):
+ warnings.warn(
+ "bdist_wininst is deprecated and will be removed in a future "
+ "version. Use bdist_wheel (wheel packages) instead.",
+ SetuptoolsDeprecationWarning
+ )
+
+ self._is_running = True
+ try:
+ orig.bdist_wininst.run(self)
+ finally:
+ self._is_running = False
diff --git a/third_party/python/setuptools/setuptools/command/build_clib.py b/third_party/python/setuptools/setuptools/command/build_clib.py
new file mode 100644
index 0000000000..67ce2444ea
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/build_clib.py
@@ -0,0 +1,101 @@
+import distutils.command.build_clib as orig
+from distutils.errors import DistutilsSetupError
+from distutils import log
+from setuptools.dep_util import newer_pairwise_group
+
+
+class build_clib(orig.build_clib):
+ """
+ Override the default build_clib behaviour to do the following:
+
+ 1. Implement a rudimentary timestamp-based dependency system
+ so 'compile()' doesn't run every time.
+ 2. Add more keys to the 'build_info' dictionary:
+ * obj_deps - specify dependencies for each object compiled.
+ this should be a dictionary mapping a key
+ with the source filename to a list of
+ dependencies. Use an empty string for global
+ dependencies.
+ * cflags - specify a list of additional flags to pass to
+ the compiler.
+ """
+
+ def build_libraries(self, libraries):
+ for (lib_name, build_info) in libraries:
+ sources = build_info.get('sources')
+ if sources is None or not isinstance(sources, (list, tuple)):
+ raise DistutilsSetupError(
+ "in 'libraries' option (library '%s'), "
+ "'sources' must be present and must be "
+ "a list of source filenames" % lib_name)
+ sources = list(sources)
+
+ log.info("building '%s' library", lib_name)
+
+ # Make sure everything is the correct type.
+ # obj_deps should be a dictionary of keys as sources
+ # and a list/tuple of files that are its dependencies.
+ obj_deps = build_info.get('obj_deps', dict())
+ if not isinstance(obj_deps, dict):
+ raise DistutilsSetupError(
+ "in 'libraries' option (library '%s'), "
+ "'obj_deps' must be a dictionary of "
+ "type 'source: list'" % lib_name)
+ dependencies = []
+
+ # Get the global dependencies that are specified by the '' key.
+ # These will go into every source's dependency list.
+ global_deps = obj_deps.get('', list())
+ if not isinstance(global_deps, (list, tuple)):
+ raise DistutilsSetupError(
+ "in 'libraries' option (library '%s'), "
+ "'obj_deps' must be a dictionary of "
+ "type 'source: list'" % lib_name)
+
+ # Build the list to be used by newer_pairwise_group
+ # each source will be auto-added to its dependencies.
+ for source in sources:
+ src_deps = [source]
+ src_deps.extend(global_deps)
+ extra_deps = obj_deps.get(source, list())
+ if not isinstance(extra_deps, (list, tuple)):
+ raise DistutilsSetupError(
+ "in 'libraries' option (library '%s'), "
+ "'obj_deps' must be a dictionary of "
+ "type 'source: list'" % lib_name)
+ src_deps.extend(extra_deps)
+ dependencies.append(src_deps)
+
+ expected_objects = self.compiler.object_filenames(
+ sources,
+ output_dir=self.build_temp,
+ )
+
+ if (
+ newer_pairwise_group(dependencies, expected_objects)
+ != ([], [])
+ ):
+ # First, compile the source code to object files in the library
+ # directory. (This should probably change to putting object
+ # files in a temporary build directory.)
+ macros = build_info.get('macros')
+ include_dirs = build_info.get('include_dirs')
+ cflags = build_info.get('cflags')
+ self.compiler.compile(
+ sources,
+ output_dir=self.build_temp,
+ macros=macros,
+ include_dirs=include_dirs,
+ extra_postargs=cflags,
+ debug=self.debug
+ )
+
+ # Now "link" the object files together into a static library.
+ # (On Unix at least, this isn't really linking -- it just
+ # builds an archive. Whatever.)
+ self.compiler.create_static_lib(
+ expected_objects,
+ lib_name,
+ output_dir=self.build_clib,
+ debug=self.debug
+ )
diff --git a/third_party/python/setuptools/setuptools/command/build_ext.py b/third_party/python/setuptools/setuptools/command/build_ext.py
new file mode 100644
index 0000000000..03a72b4fce
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/build_ext.py
@@ -0,0 +1,322 @@
+import os
+import sys
+import itertools
+from importlib.machinery import EXTENSION_SUFFIXES
+from distutils.command.build_ext import build_ext as _du_build_ext
+from distutils.file_util import copy_file
+from distutils.ccompiler import new_compiler
+from distutils.sysconfig import customize_compiler, get_config_var
+from distutils.errors import DistutilsError
+from distutils import log
+
+from setuptools.extension import Library
+
+try:
+ # Attempt to use Cython for building extensions, if available
+ from Cython.Distutils.build_ext import build_ext as _build_ext
+ # Additionally, assert that the compiler module will load
+ # also. Ref #1229.
+ __import__('Cython.Compiler.Main')
+except ImportError:
+ _build_ext = _du_build_ext
+
+# make sure _config_vars is initialized
+get_config_var("LDSHARED")
+from distutils.sysconfig import _config_vars as _CONFIG_VARS # noqa
+
+
+def _customize_compiler_for_shlib(compiler):
+ if sys.platform == "darwin":
+ # building .dylib requires additional compiler flags on OSX; here we
+ # temporarily substitute the pyconfig.h variables so that distutils'
+ # 'customize_compiler' uses them before we build the shared libraries.
+ tmp = _CONFIG_VARS.copy()
+ try:
+ # XXX Help! I don't have any idea whether these are right...
+ _CONFIG_VARS['LDSHARED'] = (
+ "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup")
+ _CONFIG_VARS['CCSHARED'] = " -dynamiclib"
+ _CONFIG_VARS['SO'] = ".dylib"
+ customize_compiler(compiler)
+ finally:
+ _CONFIG_VARS.clear()
+ _CONFIG_VARS.update(tmp)
+ else:
+ customize_compiler(compiler)
+
+
+have_rtld = False
+use_stubs = False
+libtype = 'shared'
+
+if sys.platform == "darwin":
+ use_stubs = True
+elif os.name != 'nt':
+ try:
+ import dl
+ use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW')
+ except ImportError:
+ pass
+
+
+def if_dl(s):
+ return s if have_rtld else ''
+
+
+def get_abi3_suffix():
+ """Return the file extension for an abi3-compliant Extension()"""
+ for suffix in EXTENSION_SUFFIXES:
+ if '.abi3' in suffix: # Unix
+ return suffix
+ elif suffix == '.pyd': # Windows
+ return suffix
+
+
+class build_ext(_build_ext):
+ def run(self):
+ """Build extensions in build directory, then copy if --inplace"""
+ old_inplace, self.inplace = self.inplace, 0
+ _build_ext.run(self)
+ self.inplace = old_inplace
+ if old_inplace:
+ self.copy_extensions_to_source()
+
+ def copy_extensions_to_source(self):
+ build_py = self.get_finalized_command('build_py')
+ for ext in self.extensions:
+ fullname = self.get_ext_fullname(ext.name)
+ filename = self.get_ext_filename(fullname)
+ modpath = fullname.split('.')
+ package = '.'.join(modpath[:-1])
+ package_dir = build_py.get_package_dir(package)
+ dest_filename = os.path.join(package_dir,
+ os.path.basename(filename))
+ src_filename = os.path.join(self.build_lib, filename)
+
+ # Always copy, even if source is older than destination, to ensure
+ # that the right extensions for the current Python/platform are
+ # used.
+ copy_file(
+ src_filename, dest_filename, verbose=self.verbose,
+ dry_run=self.dry_run
+ )
+ if ext._needs_stub:
+ self.write_stub(package_dir or os.curdir, ext, True)
+
+ def get_ext_filename(self, fullname):
+ filename = _build_ext.get_ext_filename(self, fullname)
+ if fullname in self.ext_map:
+ ext = self.ext_map[fullname]
+ use_abi3 = getattr(ext, 'py_limited_api') and get_abi3_suffix()
+ if use_abi3:
+ so_ext = get_config_var('EXT_SUFFIX')
+ filename = filename[:-len(so_ext)]
+ filename = filename + get_abi3_suffix()
+ if isinstance(ext, Library):
+ fn, ext = os.path.splitext(filename)
+ return self.shlib_compiler.library_filename(fn, libtype)
+ elif use_stubs and ext._links_to_dynamic:
+ d, fn = os.path.split(filename)
+ return os.path.join(d, 'dl-' + fn)
+ return filename
+
+ def initialize_options(self):
+ _build_ext.initialize_options(self)
+ self.shlib_compiler = None
+ self.shlibs = []
+ self.ext_map = {}
+
+ def finalize_options(self):
+ _build_ext.finalize_options(self)
+ self.extensions = self.extensions or []
+ self.check_extensions_list(self.extensions)
+ self.shlibs = [ext for ext in self.extensions
+ if isinstance(ext, Library)]
+ if self.shlibs:
+ self.setup_shlib_compiler()
+ for ext in self.extensions:
+ ext._full_name = self.get_ext_fullname(ext.name)
+ for ext in self.extensions:
+ fullname = ext._full_name
+ self.ext_map[fullname] = ext
+
+ # distutils 3.1 will also ask for module names
+ # XXX what to do with conflicts?
+ self.ext_map[fullname.split('.')[-1]] = ext
+
+ ltd = self.shlibs and self.links_to_dynamic(ext) or False
+ ns = ltd and use_stubs and not isinstance(ext, Library)
+ ext._links_to_dynamic = ltd
+ ext._needs_stub = ns
+ filename = ext._file_name = self.get_ext_filename(fullname)
+ libdir = os.path.dirname(os.path.join(self.build_lib, filename))
+ if ltd and libdir not in ext.library_dirs:
+ ext.library_dirs.append(libdir)
+ if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
+ ext.runtime_library_dirs.append(os.curdir)
+
+ def setup_shlib_compiler(self):
+ compiler = self.shlib_compiler = new_compiler(
+ compiler=self.compiler, dry_run=self.dry_run, force=self.force
+ )
+ _customize_compiler_for_shlib(compiler)
+
+ if self.include_dirs is not None:
+ compiler.set_include_dirs(self.include_dirs)
+ if self.define is not None:
+ # 'define' option is a list of (name,value) tuples
+ for (name, value) in self.define:
+ compiler.define_macro(name, value)
+ if self.undef is not None:
+ for macro in self.undef:
+ compiler.undefine_macro(macro)
+ if self.libraries is not None:
+ compiler.set_libraries(self.libraries)
+ if self.library_dirs is not None:
+ compiler.set_library_dirs(self.library_dirs)
+ if self.rpath is not None:
+ compiler.set_runtime_library_dirs(self.rpath)
+ if self.link_objects is not None:
+ compiler.set_link_objects(self.link_objects)
+
+ # hack so distutils' build_extension() builds a library instead
+ compiler.link_shared_object = link_shared_object.__get__(compiler)
+
+ def get_export_symbols(self, ext):
+ if isinstance(ext, Library):
+ return ext.export_symbols
+ return _build_ext.get_export_symbols(self, ext)
+
+ def build_extension(self, ext):
+ ext._convert_pyx_sources_to_lang()
+ _compiler = self.compiler
+ try:
+ if isinstance(ext, Library):
+ self.compiler = self.shlib_compiler
+ _build_ext.build_extension(self, ext)
+ if ext._needs_stub:
+ cmd = self.get_finalized_command('build_py').build_lib
+ self.write_stub(cmd, ext)
+ finally:
+ self.compiler = _compiler
+
+ def links_to_dynamic(self, ext):
+ """Return true if 'ext' links to a dynamic lib in the same package"""
+ # XXX this should check to ensure the lib is actually being built
+ # XXX as dynamic, and not just using a locally-found version or a
+ # XXX static-compiled version
+ libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
+ pkg = '.'.join(ext._full_name.split('.')[:-1] + [''])
+ return any(pkg + libname in libnames for libname in ext.libraries)
+
+ def get_outputs(self):
+ return _build_ext.get_outputs(self) + self.__get_stubs_outputs()
+
+ def __get_stubs_outputs(self):
+ # assemble the base name for each extension that needs a stub
+ ns_ext_bases = (
+ os.path.join(self.build_lib, *ext._full_name.split('.'))
+ for ext in self.extensions
+ if ext._needs_stub
+ )
+ # pair each base with the extension
+ pairs = itertools.product(ns_ext_bases, self.__get_output_extensions())
+ return list(base + fnext for base, fnext in pairs)
+
+ def __get_output_extensions(self):
+ yield '.py'
+ yield '.pyc'
+ if self.get_finalized_command('build_py').optimize:
+ yield '.pyo'
+
+ def write_stub(self, output_dir, ext, compile=False):
+ log.info("writing stub loader for %s to %s", ext._full_name,
+ output_dir)
+ stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) +
+ '.py')
+ if compile and os.path.exists(stub_file):
+ raise DistutilsError(stub_file + " already exists! Please delete.")
+ if not self.dry_run:
+ f = open(stub_file, 'w')
+ f.write(
+ '\n'.join([
+ "def __bootstrap__():",
+ " global __bootstrap__, __file__, __loader__",
+ " import sys, os, pkg_resources, importlib.util" +
+ if_dl(", dl"),
+ " __file__ = pkg_resources.resource_filename"
+ "(__name__,%r)"
+ % os.path.basename(ext._file_name),
+ " del __bootstrap__",
+ " if '__loader__' in globals():",
+ " del __loader__",
+ if_dl(" old_flags = sys.getdlopenflags()"),
+ " old_dir = os.getcwd()",
+ " try:",
+ " os.chdir(os.path.dirname(__file__))",
+ if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
+ " spec = importlib.util.spec_from_file_location(",
+ " __name__, __file__)",
+ " mod = importlib.util.module_from_spec(spec)",
+ " spec.loader.exec_module(mod)",
+ " finally:",
+ if_dl(" sys.setdlopenflags(old_flags)"),
+ " os.chdir(old_dir)",
+ "__bootstrap__()",
+ "" # terminal \n
+ ])
+ )
+ f.close()
+ if compile:
+ from distutils.util import byte_compile
+
+ byte_compile([stub_file], optimize=0,
+ force=True, dry_run=self.dry_run)
+ optimize = self.get_finalized_command('install_lib').optimize
+ if optimize > 0:
+ byte_compile([stub_file], optimize=optimize,
+ force=True, dry_run=self.dry_run)
+ if os.path.exists(stub_file) and not self.dry_run:
+ os.unlink(stub_file)
+
+
+if use_stubs or os.name == 'nt':
+ # Build shared libraries
+ #
+ def link_shared_object(
+ self, objects, output_libname, output_dir=None, libraries=None,
+ library_dirs=None, runtime_library_dirs=None, export_symbols=None,
+ debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
+ target_lang=None):
+ self.link(
+ self.SHARED_LIBRARY, objects, output_libname,
+ output_dir, libraries, library_dirs, runtime_library_dirs,
+ export_symbols, debug, extra_preargs, extra_postargs,
+ build_temp, target_lang
+ )
+else:
+ # Build static libraries everywhere else
+ libtype = 'static'
+
+ def link_shared_object(
+ self, objects, output_libname, output_dir=None, libraries=None,
+ library_dirs=None, runtime_library_dirs=None, export_symbols=None,
+ debug=0, extra_preargs=None, extra_postargs=None, build_temp=None,
+ target_lang=None):
+ # XXX we need to either disallow these attrs on Library instances,
+ # or warn/abort here if set, or something...
+ # libraries=None, library_dirs=None, runtime_library_dirs=None,
+ # export_symbols=None, extra_preargs=None, extra_postargs=None,
+ # build_temp=None
+
+ assert output_dir is None # distutils build_ext doesn't pass this
+ output_dir, filename = os.path.split(output_libname)
+ basename, ext = os.path.splitext(filename)
+ if self.library_filename("x").startswith('lib'):
+ # strip 'lib' prefix; this is kludgy if some platform uses
+ # a different prefix
+ basename = basename[3:]
+
+ self.create_static_lib(
+ objects, basename, output_dir, debug, target_lang
+ )
diff --git a/third_party/python/setuptools/setuptools/command/build_py.py b/third_party/python/setuptools/setuptools/command/build_py.py
new file mode 100644
index 0000000000..b30aa1290a
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/build_py.py
@@ -0,0 +1,270 @@
+from glob import glob
+from distutils.util import convert_path
+import distutils.command.build_py as orig
+import os
+import fnmatch
+import textwrap
+import io
+import distutils.errors
+import itertools
+import stat
+
+try:
+ from setuptools.lib2to3_ex import Mixin2to3
+except Exception:
+
+ class Mixin2to3:
+ def run_2to3(self, files, doctests=True):
+ "do nothing"
+
+
+def make_writable(target):
+ os.chmod(target, os.stat(target).st_mode | stat.S_IWRITE)
+
+
+class build_py(orig.build_py, Mixin2to3):
+ """Enhanced 'build_py' command that includes data files with packages
+
+ The data files are specified via a 'package_data' argument to 'setup()'.
+ See 'setuptools.dist.Distribution' for more details.
+
+ Also, this version of the 'build_py' command allows you to specify both
+ 'py_modules' and 'packages' in the same setup operation.
+ """
+
+ def finalize_options(self):
+ orig.build_py.finalize_options(self)
+ self.package_data = self.distribution.package_data
+ self.exclude_package_data = (self.distribution.exclude_package_data or
+ {})
+ if 'data_files' in self.__dict__:
+ del self.__dict__['data_files']
+ self.__updated_files = []
+ self.__doctests_2to3 = []
+
+ def run(self):
+ """Build modules, packages, and copy data files to build directory"""
+ if not self.py_modules and not self.packages:
+ return
+
+ if self.py_modules:
+ self.build_modules()
+
+ if self.packages:
+ self.build_packages()
+ self.build_package_data()
+
+ self.run_2to3(self.__updated_files, False)
+ self.run_2to3(self.__updated_files, True)
+ self.run_2to3(self.__doctests_2to3, True)
+
+ # Only compile actual .py files, using our base class' idea of what our
+ # output files are.
+ self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0))
+
+ def __getattr__(self, attr):
+ "lazily compute data files"
+ if attr == 'data_files':
+ self.data_files = self._get_data_files()
+ return self.data_files
+ return orig.build_py.__getattr__(self, attr)
+
+ def build_module(self, module, module_file, package):
+ outfile, copied = orig.build_py.build_module(self, module, module_file,
+ package)
+ if copied:
+ self.__updated_files.append(outfile)
+ return outfile, copied
+
+ def _get_data_files(self):
+ """Generate list of '(package,src_dir,build_dir,filenames)' tuples"""
+ self.analyze_manifest()
+ return list(map(self._get_pkg_data_files, self.packages or ()))
+
+ def _get_pkg_data_files(self, package):
+ # Locate package source directory
+ src_dir = self.get_package_dir(package)
+
+ # Compute package build directory
+ build_dir = os.path.join(*([self.build_lib] + package.split('.')))
+
+ # Strip directory from globbed filenames
+ filenames = [
+ os.path.relpath(file, src_dir)
+ for file in self.find_data_files(package, src_dir)
+ ]
+ return package, src_dir, build_dir, filenames
+
+ def find_data_files(self, package, src_dir):
+ """Return filenames for package's data files in 'src_dir'"""
+ patterns = self._get_platform_patterns(
+ self.package_data,
+ package,
+ src_dir,
+ )
+ globs_expanded = map(glob, patterns)
+ # flatten the expanded globs into an iterable of matches
+ globs_matches = itertools.chain.from_iterable(globs_expanded)
+ glob_files = filter(os.path.isfile, globs_matches)
+ files = itertools.chain(
+ self.manifest_files.get(package, []),
+ glob_files,
+ )
+ return self.exclude_data_files(package, src_dir, files)
+
+ def build_package_data(self):
+ """Copy data files into build directory"""
+ for package, src_dir, build_dir, filenames in self.data_files:
+ for filename in filenames:
+ target = os.path.join(build_dir, filename)
+ self.mkpath(os.path.dirname(target))
+ srcfile = os.path.join(src_dir, filename)
+ outf, copied = self.copy_file(srcfile, target)
+ make_writable(target)
+ srcfile = os.path.abspath(srcfile)
+ if (copied and
+ srcfile in self.distribution.convert_2to3_doctests):
+ self.__doctests_2to3.append(outf)
+
+ def analyze_manifest(self):
+ self.manifest_files = mf = {}
+ if not self.distribution.include_package_data:
+ return
+ src_dirs = {}
+ for package in self.packages or ():
+ # Locate package source directory
+ src_dirs[assert_relative(self.get_package_dir(package))] = package
+
+ self.run_command('egg_info')
+ ei_cmd = self.get_finalized_command('egg_info')
+ for path in ei_cmd.filelist.files:
+ d, f = os.path.split(assert_relative(path))
+ prev = None
+ oldf = f
+ while d and d != prev and d not in src_dirs:
+ prev = d
+ d, df = os.path.split(d)
+ f = os.path.join(df, f)
+ if d in src_dirs:
+ if path.endswith('.py') and f == oldf:
+ continue # it's a module, not data
+ mf.setdefault(src_dirs[d], []).append(path)
+
+ def get_data_files(self):
+ pass # Lazily compute data files in _get_data_files() function.
+
+ def check_package(self, package, package_dir):
+ """Check namespace packages' __init__ for declare_namespace"""
+ try:
+ return self.packages_checked[package]
+ except KeyError:
+ pass
+
+ init_py = orig.build_py.check_package(self, package, package_dir)
+ self.packages_checked[package] = init_py
+
+ if not init_py or not self.distribution.namespace_packages:
+ return init_py
+
+ for pkg in self.distribution.namespace_packages:
+ if pkg == package or pkg.startswith(package + '.'):
+ break
+ else:
+ return init_py
+
+ with io.open(init_py, 'rb') as f:
+ contents = f.read()
+ if b'declare_namespace' not in contents:
+ raise distutils.errors.DistutilsError(
+ "Namespace package problem: %s is a namespace package, but "
+ "its\n__init__.py does not call declare_namespace()! Please "
+ 'fix it.\n(See the setuptools manual under '
+ '"Namespace Packages" for details.)\n"' % (package,)
+ )
+ return init_py
+
+ def initialize_options(self):
+ self.packages_checked = {}
+ orig.build_py.initialize_options(self)
+
+ def get_package_dir(self, package):
+ res = orig.build_py.get_package_dir(self, package)
+ if self.distribution.src_root is not None:
+ return os.path.join(self.distribution.src_root, res)
+ return res
+
+ def exclude_data_files(self, package, src_dir, files):
+ """Filter filenames for package's data files in 'src_dir'"""
+ files = list(files)
+ patterns = self._get_platform_patterns(
+ self.exclude_package_data,
+ package,
+ src_dir,
+ )
+ match_groups = (
+ fnmatch.filter(files, pattern)
+ for pattern in patterns
+ )
+ # flatten the groups of matches into an iterable of matches
+ matches = itertools.chain.from_iterable(match_groups)
+ bad = set(matches)
+ keepers = (
+ fn
+ for fn in files
+ if fn not in bad
+ )
+ # ditch dupes
+ return list(_unique_everseen(keepers))
+
+ @staticmethod
+ def _get_platform_patterns(spec, package, src_dir):
+ """
+ yield platform-specific path patterns (suitable for glob
+ or fn_match) from a glob-based spec (such as
+ self.package_data or self.exclude_package_data)
+ matching package in src_dir.
+ """
+ raw_patterns = itertools.chain(
+ spec.get('', []),
+ spec.get(package, []),
+ )
+ return (
+ # Each pattern has to be converted to a platform-specific path
+ os.path.join(src_dir, convert_path(pattern))
+ for pattern in raw_patterns
+ )
+
+
+# from Python docs
+def _unique_everseen(iterable, key=None):
+ "List unique elements, preserving order. Remember all elements ever seen."
+ # unique_everseen('AAAABBBCCDAABBB') --> A B C D
+ # unique_everseen('ABBCcAD', str.lower) --> A B C D
+ seen = set()
+ seen_add = seen.add
+ if key is None:
+ for element in itertools.filterfalse(seen.__contains__, iterable):
+ seen_add(element)
+ yield element
+ else:
+ for element in iterable:
+ k = key(element)
+ if k not in seen:
+ seen_add(k)
+ yield element
+
+
+def assert_relative(path):
+ if not os.path.isabs(path):
+ return path
+ from distutils.errors import DistutilsSetupError
+
+ msg = textwrap.dedent("""
+ Error: setup script specifies an absolute path:
+
+ %s
+
+ setup() arguments must *always* be /-separated paths relative to the
+ setup.py directory, *never* absolute paths.
+ """).lstrip() % path
+ raise DistutilsSetupError(msg)
diff --git a/third_party/python/setuptools/setuptools/command/develop.py b/third_party/python/setuptools/setuptools/command/develop.py
new file mode 100644
index 0000000000..faf8c988e2
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/develop.py
@@ -0,0 +1,216 @@
+from distutils.util import convert_path
+from distutils import log
+from distutils.errors import DistutilsError, DistutilsOptionError
+import os
+import glob
+import io
+
+import pkg_resources
+from setuptools.command.easy_install import easy_install
+from setuptools import namespaces
+import setuptools
+
+
+class develop(namespaces.DevelopInstaller, easy_install):
+ """Set up package for development"""
+
+ description = "install package in 'development mode'"
+
+ user_options = easy_install.user_options + [
+ ("uninstall", "u", "Uninstall this source package"),
+ ("egg-path=", None, "Set the path to be used in the .egg-link file"),
+ ]
+
+ boolean_options = easy_install.boolean_options + ['uninstall']
+
+ command_consumes_arguments = False # override base
+
+ def run(self):
+ if self.uninstall:
+ self.multi_version = True
+ self.uninstall_link()
+ self.uninstall_namespaces()
+ else:
+ self.install_for_development()
+ self.warn_deprecated_options()
+
+ def initialize_options(self):
+ self.uninstall = None
+ self.egg_path = None
+ easy_install.initialize_options(self)
+ self.setup_path = None
+ self.always_copy_from = '.' # always copy eggs installed in curdir
+
+ def finalize_options(self):
+ ei = self.get_finalized_command("egg_info")
+ if ei.broken_egg_info:
+ template = "Please rename %r to %r before using 'develop'"
+ args = ei.egg_info, ei.broken_egg_info
+ raise DistutilsError(template % args)
+ self.args = [ei.egg_name]
+
+ easy_install.finalize_options(self)
+ self.expand_basedirs()
+ self.expand_dirs()
+ # pick up setup-dir .egg files only: no .egg-info
+ self.package_index.scan(glob.glob('*.egg'))
+
+ egg_link_fn = ei.egg_name + '.egg-link'
+ self.egg_link = os.path.join(self.install_dir, egg_link_fn)
+ self.egg_base = ei.egg_base
+ if self.egg_path is None:
+ self.egg_path = os.path.abspath(ei.egg_base)
+
+ target = pkg_resources.normalize_path(self.egg_base)
+ egg_path = pkg_resources.normalize_path(
+ os.path.join(self.install_dir, self.egg_path))
+ if egg_path != target:
+ raise DistutilsOptionError(
+ "--egg-path must be a relative path from the install"
+ " directory to " + target
+ )
+
+ # Make a distribution for the package's source
+ self.dist = pkg_resources.Distribution(
+ target,
+ pkg_resources.PathMetadata(target, os.path.abspath(ei.egg_info)),
+ project_name=ei.egg_name
+ )
+
+ self.setup_path = self._resolve_setup_path(
+ self.egg_base,
+ self.install_dir,
+ self.egg_path,
+ )
+
+ @staticmethod
+ def _resolve_setup_path(egg_base, install_dir, egg_path):
+ """
+ Generate a path from egg_base back to '.' where the
+ setup script resides and ensure that path points to the
+ setup path from $install_dir/$egg_path.
+ """
+ path_to_setup = egg_base.replace(os.sep, '/').rstrip('/')
+ if path_to_setup != os.curdir:
+ path_to_setup = '../' * (path_to_setup.count('/') + 1)
+ resolved = pkg_resources.normalize_path(
+ os.path.join(install_dir, egg_path, path_to_setup)
+ )
+ if resolved != pkg_resources.normalize_path(os.curdir):
+ raise DistutilsOptionError(
+ "Can't get a consistent path to setup script from"
+ " installation directory", resolved,
+ pkg_resources.normalize_path(os.curdir))
+ return path_to_setup
+
+ def install_for_development(self):
+ if getattr(self.distribution, 'use_2to3', False):
+ # If we run 2to3 we can not do this inplace:
+
+ # Ensure metadata is up-to-date
+ self.reinitialize_command('build_py', inplace=0)
+ self.run_command('build_py')
+ bpy_cmd = self.get_finalized_command("build_py")
+ build_path = pkg_resources.normalize_path(bpy_cmd.build_lib)
+
+ # Build extensions
+ self.reinitialize_command('egg_info', egg_base=build_path)
+ self.run_command('egg_info')
+
+ self.reinitialize_command('build_ext', inplace=0)
+ self.run_command('build_ext')
+
+ # Fixup egg-link and easy-install.pth
+ ei_cmd = self.get_finalized_command("egg_info")
+ self.egg_path = build_path
+ self.dist.location = build_path
+ # XXX
+ self.dist._provider = pkg_resources.PathMetadata(
+ build_path, ei_cmd.egg_info)
+ else:
+ # Without 2to3 inplace works fine:
+ self.run_command('egg_info')
+
+ # Build extensions in-place
+ self.reinitialize_command('build_ext', inplace=1)
+ self.run_command('build_ext')
+
+ if setuptools.bootstrap_install_from:
+ self.easy_install(setuptools.bootstrap_install_from)
+ setuptools.bootstrap_install_from = None
+
+ self.install_namespaces()
+
+ # create an .egg-link in the installation dir, pointing to our egg
+ log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
+ if not self.dry_run:
+ with open(self.egg_link, "w") as f:
+ f.write(self.egg_path + "\n" + self.setup_path)
+ # postprocess the installed distro, fixing up .pth, installing scripts,
+ # and handling requirements
+ self.process_distribution(None, self.dist, not self.no_deps)
+
+ def uninstall_link(self):
+ if os.path.exists(self.egg_link):
+ log.info("Removing %s (link to %s)", self.egg_link, self.egg_base)
+ egg_link_file = open(self.egg_link)
+ contents = [line.rstrip() for line in egg_link_file]
+ egg_link_file.close()
+ if contents not in ([self.egg_path],
+ [self.egg_path, self.setup_path]):
+ log.warn("Link points to %s: uninstall aborted", contents)
+ return
+ if not self.dry_run:
+ os.unlink(self.egg_link)
+ if not self.dry_run:
+ self.update_pth(self.dist) # remove any .pth link to us
+ if self.distribution.scripts:
+ # XXX should also check for entry point scripts!
+ log.warn("Note: you must uninstall or replace scripts manually!")
+
+ def install_egg_scripts(self, dist):
+ if dist is not self.dist:
+ # Installing a dependency, so fall back to normal behavior
+ return easy_install.install_egg_scripts(self, dist)
+
+ # create wrapper scripts in the script dir, pointing to dist.scripts
+
+ # new-style...
+ self.install_wrapper_scripts(dist)
+
+ # ...and old-style
+ for script_name in self.distribution.scripts or []:
+ script_path = os.path.abspath(convert_path(script_name))
+ script_name = os.path.basename(script_path)
+ with io.open(script_path) as strm:
+ script_text = strm.read()
+ self.install_script(dist, script_name, script_text, script_path)
+
+ def install_wrapper_scripts(self, dist):
+ dist = VersionlessRequirement(dist)
+ return easy_install.install_wrapper_scripts(self, dist)
+
+
+class VersionlessRequirement:
+ """
+ Adapt a pkg_resources.Distribution to simply return the project
+ name as the 'requirement' so that scripts will work across
+ multiple versions.
+
+ >>> from pkg_resources import Distribution
+ >>> dist = Distribution(project_name='foo', version='1.0')
+ >>> str(dist.as_requirement())
+ 'foo==1.0'
+ >>> adapted_dist = VersionlessRequirement(dist)
+ >>> str(adapted_dist.as_requirement())
+ 'foo'
+ """
+
+ def __init__(self, dist):
+ self.__dist = dist
+
+ def __getattr__(self, name):
+ return getattr(self.__dist, name)
+
+ def as_requirement(self):
+ return self.project_name
diff --git a/third_party/python/setuptools/setuptools/command/dist_info.py b/third_party/python/setuptools/setuptools/command/dist_info.py
new file mode 100644
index 0000000000..c45258fa03
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/dist_info.py
@@ -0,0 +1,36 @@
+"""
+Create a dist_info directory
+As defined in the wheel specification
+"""
+
+import os
+
+from distutils.core import Command
+from distutils import log
+
+
+class dist_info(Command):
+
+ description = 'create a .dist-info directory'
+
+ user_options = [
+ ('egg-base=', 'e', "directory containing .egg-info directories"
+ " (default: top of the source tree)"),
+ ]
+
+ def initialize_options(self):
+ self.egg_base = None
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ egg_info = self.get_finalized_command('egg_info')
+ egg_info.egg_base = self.egg_base
+ egg_info.finalize_options()
+ egg_info.run()
+ dist_info_dir = egg_info.egg_info[:-len('.egg-info')] + '.dist-info'
+ log.info("creating '{}'".format(os.path.abspath(dist_info_dir)))
+
+ bdist_wheel = self.get_finalized_command('bdist_wheel')
+ bdist_wheel.egg2dist(egg_info.egg_info, dist_info_dir)
diff --git a/third_party/python/setuptools/setuptools/command/easy_install.py b/third_party/python/setuptools/setuptools/command/easy_install.py
new file mode 100644
index 0000000000..9ec83b7d8b
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/easy_install.py
@@ -0,0 +1,2318 @@
+"""
+Easy Install
+------------
+
+A tool for doing automatic download/extract/build of distutils-based Python
+packages. For detailed documentation, see the accompanying EasyInstall.txt
+file, or visit the `EasyInstall home page`__.
+
+__ https://setuptools.readthedocs.io/en/latest/easy_install.html
+
+"""
+
+from glob import glob
+from distutils.util import get_platform
+from distutils.util import convert_path, subst_vars
+from distutils.errors import (
+ DistutilsArgError, DistutilsOptionError,
+ DistutilsError, DistutilsPlatformError,
+)
+from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
+from distutils import log, dir_util
+from distutils.command.build_scripts import first_line_re
+from distutils.spawn import find_executable
+import sys
+import os
+import zipimport
+import shutil
+import tempfile
+import zipfile
+import re
+import stat
+import random
+import textwrap
+import warnings
+import site
+import struct
+import contextlib
+import subprocess
+import shlex
+import io
+import configparser
+
+
+from sysconfig import get_config_vars, get_path
+
+from setuptools import SetuptoolsDeprecationWarning
+
+from setuptools import Command
+from setuptools.sandbox import run_setup
+from setuptools.command import setopt
+from setuptools.archive_util import unpack_archive
+from setuptools.package_index import (
+ PackageIndex, parse_requirement_arg, URL_SCHEME,
+)
+from setuptools.command import bdist_egg, egg_info
+from setuptools.wheel import Wheel
+from pkg_resources import (
+ yield_lines, normalize_path, resource_string, ensure_directory,
+ get_distribution, find_distributions, Environment, Requirement,
+ Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
+ VersionConflict, DEVELOP_DIST,
+)
+import pkg_resources
+
+# Turn on PEP440Warnings
+warnings.filterwarnings("default", category=pkg_resources.PEP440Warning)
+
+__all__ = [
+ 'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
+ 'main', 'get_exe_prefixes',
+]
+
+
+def is_64bit():
+ return struct.calcsize("P") == 8
+
+
+def samefile(p1, p2):
+ """
+ Determine if two paths reference the same file.
+
+ Augments os.path.samefile to work on Windows and
+ suppresses errors if the path doesn't exist.
+ """
+ both_exist = os.path.exists(p1) and os.path.exists(p2)
+ use_samefile = hasattr(os.path, 'samefile') and both_exist
+ if use_samefile:
+ return os.path.samefile(p1, p2)
+ norm_p1 = os.path.normpath(os.path.normcase(p1))
+ norm_p2 = os.path.normpath(os.path.normcase(p2))
+ return norm_p1 == norm_p2
+
+
+def _to_bytes(s):
+ return s.encode('utf8')
+
+
+def isascii(s):
+ try:
+ s.encode('ascii')
+ return True
+ except UnicodeError:
+ return False
+
+
+def _one_liner(text):
+ return textwrap.dedent(text).strip().replace('\n', '; ')
+
+
+class easy_install(Command):
+ """Manage a download/build/install process"""
+ description = "Find/get/install Python packages"
+ command_consumes_arguments = True
+
+ user_options = [
+ ('prefix=', None, "installation prefix"),
+ ("zip-ok", "z", "install package as a zipfile"),
+ ("multi-version", "m", "make apps have to require() a version"),
+ ("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
+ ("install-dir=", "d", "install package to DIR"),
+ ("script-dir=", "s", "install scripts to DIR"),
+ ("exclude-scripts", "x", "Don't install scripts"),
+ ("always-copy", "a", "Copy all needed packages to install dir"),
+ ("index-url=", "i", "base URL of Python Package Index"),
+ ("find-links=", "f", "additional URL(s) to search for packages"),
+ ("build-directory=", "b",
+ "download/extract/build in DIR; keep the results"),
+ ('optimize=', 'O',
+ "also compile with optimization: -O1 for \"python -O\", "
+ "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
+ ('record=', None,
+ "filename in which to record list of installed files"),
+ ('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
+ ('site-dirs=', 'S', "list of directories where .pth files work"),
+ ('editable', 'e', "Install specified packages in editable form"),
+ ('no-deps', 'N', "don't install dependencies"),
+ ('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
+ ('local-snapshots-ok', 'l',
+ "allow building eggs from local checkouts"),
+ ('version', None, "print version information and exit"),
+ ('no-find-links', None,
+ "Don't load find-links defined in packages being installed"),
+ ('user', None, "install in user site-package '%s'" % site.USER_SITE)
+ ]
+ boolean_options = [
+ 'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
+ 'editable',
+ 'no-deps', 'local-snapshots-ok', 'version',
+ 'user'
+ ]
+
+ negative_opt = {'always-unzip': 'zip-ok'}
+ create_index = PackageIndex
+
+ def initialize_options(self):
+ # the --user option seems to be an opt-in one,
+ # so the default should be False.
+ self.user = 0
+ self.zip_ok = self.local_snapshots_ok = None
+ self.install_dir = self.script_dir = self.exclude_scripts = None
+ self.index_url = None
+ self.find_links = None
+ self.build_directory = None
+ self.args = None
+ self.optimize = self.record = None
+ self.upgrade = self.always_copy = self.multi_version = None
+ self.editable = self.no_deps = self.allow_hosts = None
+ self.root = self.prefix = self.no_report = None
+ self.version = None
+ self.install_purelib = None # for pure module distributions
+ self.install_platlib = None # non-pure (dists w/ extensions)
+ self.install_headers = None # for C/C++ headers
+ self.install_lib = None # set to either purelib or platlib
+ self.install_scripts = None
+ self.install_data = None
+ self.install_base = None
+ self.install_platbase = None
+ if site.ENABLE_USER_SITE:
+ self.install_userbase = site.USER_BASE
+ self.install_usersite = site.USER_SITE
+ else:
+ self.install_userbase = None
+ self.install_usersite = None
+ self.no_find_links = None
+
+ # Options not specifiable via command line
+ self.package_index = None
+ self.pth_file = self.always_copy_from = None
+ self.site_dirs = None
+ self.installed_projects = {}
+ # Always read easy_install options, even if we are subclassed, or have
+ # an independent instance created. This ensures that defaults will
+ # always come from the standard configuration file(s)' "easy_install"
+ # section, even if this is a "develop" or "install" command, or some
+ # other embedding.
+ self._dry_run = None
+ self.verbose = self.distribution.verbose
+ self.distribution._set_command_options(
+ self, self.distribution.get_option_dict('easy_install')
+ )
+
+ def delete_blockers(self, blockers):
+ extant_blockers = (
+ filename for filename in blockers
+ if os.path.exists(filename) or os.path.islink(filename)
+ )
+ list(map(self._delete_path, extant_blockers))
+
+ def _delete_path(self, path):
+ log.info("Deleting %s", path)
+ if self.dry_run:
+ return
+
+ is_tree = os.path.isdir(path) and not os.path.islink(path)
+ remover = rmtree if is_tree else os.unlink
+ remover(path)
+
+ @staticmethod
+ def _render_version():
+ """
+ Render the Setuptools version and installation details, then exit.
+ """
+ ver = '{}.{}'.format(*sys.version_info)
+ dist = get_distribution('setuptools')
+ tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})'
+ print(tmpl.format(**locals()))
+ raise SystemExit()
+
+ def finalize_options(self):
+ self.version and self._render_version()
+
+ py_version = sys.version.split()[0]
+ prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
+
+ self.config_vars = {
+ 'dist_name': self.distribution.get_name(),
+ 'dist_version': self.distribution.get_version(),
+ 'dist_fullname': self.distribution.get_fullname(),
+ 'py_version': py_version,
+ 'py_version_short': py_version[0:3],
+ 'py_version_nodot': py_version[0] + py_version[2],
+ 'sys_prefix': prefix,
+ 'prefix': prefix,
+ 'sys_exec_prefix': exec_prefix,
+ 'exec_prefix': exec_prefix,
+ # Only python 3.2+ has abiflags
+ 'abiflags': getattr(sys, 'abiflags', ''),
+ }
+
+ if site.ENABLE_USER_SITE:
+ self.config_vars['userbase'] = self.install_userbase
+ self.config_vars['usersite'] = self.install_usersite
+
+ elif self.user:
+ log.warn("WARNING: The user site-packages directory is disabled.")
+
+ self._fix_install_dir_for_user_site()
+
+ self.expand_basedirs()
+ self.expand_dirs()
+
+ self._expand(
+ 'install_dir', 'script_dir', 'build_directory',
+ 'site_dirs',
+ )
+ # If a non-default installation directory was specified, default the
+ # script directory to match it.
+ if self.script_dir is None:
+ self.script_dir = self.install_dir
+
+ if self.no_find_links is None:
+ self.no_find_links = False
+
+ # Let install_dir get set by install_lib command, which in turn
+ # gets its info from the install command, and takes into account
+ # --prefix and --home and all that other crud.
+ self.set_undefined_options(
+ 'install_lib', ('install_dir', 'install_dir')
+ )
+ # Likewise, set default script_dir from 'install_scripts.install_dir'
+ self.set_undefined_options(
+ 'install_scripts', ('install_dir', 'script_dir')
+ )
+
+ if self.user and self.install_purelib:
+ self.install_dir = self.install_purelib
+ self.script_dir = self.install_scripts
+ # default --record from the install command
+ self.set_undefined_options('install', ('record', 'record'))
+ # Should this be moved to the if statement below? It's not used
+ # elsewhere
+ normpath = map(normalize_path, sys.path)
+ self.all_site_dirs = get_site_dirs()
+ if self.site_dirs is not None:
+ site_dirs = [
+ os.path.expanduser(s.strip()) for s in
+ self.site_dirs.split(',')
+ ]
+ for d in site_dirs:
+ if not os.path.isdir(d):
+ log.warn("%s (in --site-dirs) does not exist", d)
+ elif normalize_path(d) not in normpath:
+ raise DistutilsOptionError(
+ d + " (in --site-dirs) is not on sys.path"
+ )
+ else:
+ self.all_site_dirs.append(normalize_path(d))
+ if not self.editable:
+ self.check_site_dir()
+ self.index_url = self.index_url or "https://pypi.org/simple/"
+ self.shadow_path = self.all_site_dirs[:]
+ for path_item in self.install_dir, normalize_path(self.script_dir):
+ if path_item not in self.shadow_path:
+ self.shadow_path.insert(0, path_item)
+
+ if self.allow_hosts is not None:
+ hosts = [s.strip() for s in self.allow_hosts.split(',')]
+ else:
+ hosts = ['*']
+ if self.package_index is None:
+ self.package_index = self.create_index(
+ self.index_url, search_path=self.shadow_path, hosts=hosts,
+ )
+ self.local_index = Environment(self.shadow_path + sys.path)
+
+ if self.find_links is not None:
+ if isinstance(self.find_links, str):
+ self.find_links = self.find_links.split()
+ else:
+ self.find_links = []
+ if self.local_snapshots_ok:
+ self.package_index.scan_egg_links(self.shadow_path + sys.path)
+ if not self.no_find_links:
+ self.package_index.add_find_links(self.find_links)
+ self.set_undefined_options('install_lib', ('optimize', 'optimize'))
+ if not isinstance(self.optimize, int):
+ try:
+ self.optimize = int(self.optimize)
+ if not (0 <= self.optimize <= 2):
+ raise ValueError
+ except ValueError as e:
+ raise DistutilsOptionError(
+ "--optimize must be 0, 1, or 2"
+ ) from e
+
+ if self.editable and not self.build_directory:
+ raise DistutilsArgError(
+ "Must specify a build directory (-b) when using --editable"
+ )
+ if not self.args:
+ raise DistutilsArgError(
+ "No urls, filenames, or requirements specified (see --help)")
+
+ self.outputs = []
+
+ def _fix_install_dir_for_user_site(self):
+ """
+ Fix the install_dir if "--user" was used.
+ """
+ if not self.user or not site.ENABLE_USER_SITE:
+ return
+
+ self.create_home_path()
+ if self.install_userbase is None:
+ msg = "User base directory is not specified"
+ raise DistutilsPlatformError(msg)
+ self.install_base = self.install_platbase = self.install_userbase
+ scheme_name = os.name.replace('posix', 'unix') + '_user'
+ self.select_scheme(scheme_name)
+
+ def _expand_attrs(self, attrs):
+ for attr in attrs:
+ val = getattr(self, attr)
+ if val is not None:
+ if os.name == 'posix' or os.name == 'nt':
+ val = os.path.expanduser(val)
+ val = subst_vars(val, self.config_vars)
+ setattr(self, attr, val)
+
+ def expand_basedirs(self):
+ """Calls `os.path.expanduser` on install_base, install_platbase and
+ root."""
+ self._expand_attrs(['install_base', 'install_platbase', 'root'])
+
+ def expand_dirs(self):
+ """Calls `os.path.expanduser` on install dirs."""
+ dirs = [
+ 'install_purelib',
+ 'install_platlib',
+ 'install_lib',
+ 'install_headers',
+ 'install_scripts',
+ 'install_data',
+ ]
+ self._expand_attrs(dirs)
+
+ def run(self, show_deprecation=True):
+ if show_deprecation:
+ self.announce(
+ "WARNING: The easy_install command is deprecated "
+ "and will be removed in a future version.",
+ log.WARN,
+ )
+ if self.verbose != self.distribution.verbose:
+ log.set_verbosity(self.verbose)
+ try:
+ for spec in self.args:
+ self.easy_install(spec, not self.no_deps)
+ if self.record:
+ outputs = self.outputs
+ if self.root: # strip any package prefix
+ root_len = len(self.root)
+ for counter in range(len(outputs)):
+ outputs[counter] = outputs[counter][root_len:]
+ from distutils import file_util
+
+ self.execute(
+ file_util.write_file, (self.record, outputs),
+ "writing list of installed files to '%s'" %
+ self.record
+ )
+ self.warn_deprecated_options()
+ finally:
+ log.set_verbosity(self.distribution.verbose)
+
+ def pseudo_tempname(self):
+ """Return a pseudo-tempname base in the install directory.
+ This code is intentionally naive; if a malicious party can write to
+ the target directory you're already in deep doodoo.
+ """
+ try:
+ pid = os.getpid()
+ except Exception:
+ pid = random.randint(0, sys.maxsize)
+ return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
+
+ def warn_deprecated_options(self):
+ pass
+
+ def check_site_dir(self):
+ """Verify that self.install_dir is .pth-capable dir, if needed"""
+
+ instdir = normalize_path(self.install_dir)
+ pth_file = os.path.join(instdir, 'easy-install.pth')
+
+ if not os.path.exists(instdir):
+ try:
+ os.makedirs(instdir)
+ except (OSError, IOError):
+ self.cant_write_to_target()
+
+ # Is it a configured, PYTHONPATH, implicit, or explicit site dir?
+ is_site_dir = instdir in self.all_site_dirs
+
+ if not is_site_dir and not self.multi_version:
+ # No? Then directly test whether it does .pth file processing
+ is_site_dir = self.check_pth_processing()
+ else:
+ # make sure we can write to target dir
+ testfile = self.pseudo_tempname() + '.write-test'
+ test_exists = os.path.exists(testfile)
+ try:
+ if test_exists:
+ os.unlink(testfile)
+ open(testfile, 'w').close()
+ os.unlink(testfile)
+ except (OSError, IOError):
+ self.cant_write_to_target()
+
+ if not is_site_dir and not self.multi_version:
+ # Can't install non-multi to non-site dir with easy_install
+ pythonpath = os.environ.get('PYTHONPATH', '')
+ log.warn(self.__no_default_msg, self.install_dir, pythonpath)
+
+ if is_site_dir:
+ if self.pth_file is None:
+ self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
+ else:
+ self.pth_file = None
+
+ if self.multi_version and not os.path.exists(pth_file):
+ self.pth_file = None # don't create a .pth file
+ self.install_dir = instdir
+
+ __cant_write_msg = textwrap.dedent("""
+ can't create or remove files in install directory
+
+ The following error occurred while trying to add or remove files in the
+ installation directory:
+
+ %s
+
+ The installation directory you specified (via --install-dir, --prefix, or
+ the distutils default setting) was:
+
+ %s
+ """).lstrip() # noqa
+
+ __not_exists_id = textwrap.dedent("""
+ This directory does not currently exist. Please create it and try again, or
+ choose a different installation directory (using the -d or --install-dir
+ option).
+ """).lstrip() # noqa
+
+ __access_msg = textwrap.dedent("""
+ Perhaps your account does not have write access to this directory? If the
+ installation directory is a system-owned directory, you may need to sign in
+ as the administrator or "root" account. If you do not have administrative
+ access to this machine, you may wish to choose a different installation
+ directory, preferably one that is listed in your PYTHONPATH environment
+ variable.
+
+ For information on other options, you may wish to consult the
+ documentation at:
+
+ https://setuptools.readthedocs.io/en/latest/easy_install.html
+
+ Please make the appropriate changes for your system and try again.
+ """).lstrip() # noqa
+
+ def cant_write_to_target(self):
+ msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,)
+
+ if not os.path.exists(self.install_dir):
+ msg += '\n' + self.__not_exists_id
+ else:
+ msg += '\n' + self.__access_msg
+ raise DistutilsError(msg)
+
+ def check_pth_processing(self):
+ """Empirically verify whether .pth files are supported in inst. dir"""
+ instdir = self.install_dir
+ log.info("Checking .pth file support in %s", instdir)
+ pth_file = self.pseudo_tempname() + ".pth"
+ ok_file = pth_file + '.ok'
+ ok_exists = os.path.exists(ok_file)
+ tmpl = _one_liner("""
+ import os
+ f = open({ok_file!r}, 'w')
+ f.write('OK')
+ f.close()
+ """) + '\n'
+ try:
+ if ok_exists:
+ os.unlink(ok_file)
+ dirname = os.path.dirname(ok_file)
+ os.makedirs(dirname, exist_ok=True)
+ f = open(pth_file, 'w')
+ except (OSError, IOError):
+ self.cant_write_to_target()
+ else:
+ try:
+ f.write(tmpl.format(**locals()))
+ f.close()
+ f = None
+ executable = sys.executable
+ if os.name == 'nt':
+ dirname, basename = os.path.split(executable)
+ alt = os.path.join(dirname, 'pythonw.exe')
+ use_alt = (
+ basename.lower() == 'python.exe' and
+ os.path.exists(alt)
+ )
+ if use_alt:
+ # use pythonw.exe to avoid opening a console window
+ executable = alt
+
+ from distutils.spawn import spawn
+
+ spawn([executable, '-E', '-c', 'pass'], 0)
+
+ if os.path.exists(ok_file):
+ log.info(
+ "TEST PASSED: %s appears to support .pth files",
+ instdir
+ )
+ return True
+ finally:
+ if f:
+ f.close()
+ if os.path.exists(ok_file):
+ os.unlink(ok_file)
+ if os.path.exists(pth_file):
+ os.unlink(pth_file)
+ if not self.multi_version:
+ log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
+ return False
+
+ def install_egg_scripts(self, dist):
+ """Write all the scripts for `dist`, unless scripts are excluded"""
+ if not self.exclude_scripts and dist.metadata_isdir('scripts'):
+ for script_name in dist.metadata_listdir('scripts'):
+ if dist.metadata_isdir('scripts/' + script_name):
+ # The "script" is a directory, likely a Python 3
+ # __pycache__ directory, so skip it.
+ continue
+ self.install_script(
+ dist, script_name,
+ dist.get_metadata('scripts/' + script_name)
+ )
+ self.install_wrapper_scripts(dist)
+
+ def add_output(self, path):
+ if os.path.isdir(path):
+ for base, dirs, files in os.walk(path):
+ for filename in files:
+ self.outputs.append(os.path.join(base, filename))
+ else:
+ self.outputs.append(path)
+
+ def not_editable(self, spec):
+ if self.editable:
+ raise DistutilsArgError(
+ "Invalid argument %r: you can't use filenames or URLs "
+ "with --editable (except via the --find-links option)."
+ % (spec,)
+ )
+
+ def check_editable(self, spec):
+ if not self.editable:
+ return
+
+ if os.path.exists(os.path.join(self.build_directory, spec.key)):
+ raise DistutilsArgError(
+ "%r already exists in %s; can't do a checkout there" %
+ (spec.key, self.build_directory)
+ )
+
+ @contextlib.contextmanager
+ def _tmpdir(self):
+ tmpdir = tempfile.mkdtemp(prefix=u"easy_install-")
+ try:
+ # cast to str as workaround for #709 and #710 and #712
+ yield str(tmpdir)
+ finally:
+ os.path.exists(tmpdir) and rmtree(tmpdir)
+
+ def easy_install(self, spec, deps=False):
+ with self._tmpdir() as tmpdir:
+ if not isinstance(spec, Requirement):
+ if URL_SCHEME(spec):
+ # It's a url, download it to tmpdir and process
+ self.not_editable(spec)
+ dl = self.package_index.download(spec, tmpdir)
+ return self.install_item(None, dl, tmpdir, deps, True)
+
+ elif os.path.exists(spec):
+ # Existing file or directory, just process it directly
+ self.not_editable(spec)
+ return self.install_item(None, spec, tmpdir, deps, True)
+ else:
+ spec = parse_requirement_arg(spec)
+
+ self.check_editable(spec)
+ dist = self.package_index.fetch_distribution(
+ spec, tmpdir, self.upgrade, self.editable,
+ not self.always_copy, self.local_index
+ )
+ if dist is None:
+ msg = "Could not find suitable distribution for %r" % spec
+ if self.always_copy:
+ msg += " (--always-copy skips system and development eggs)"
+ raise DistutilsError(msg)
+ elif dist.precedence == DEVELOP_DIST:
+ # .egg-info dists don't need installing, just process deps
+ self.process_distribution(spec, dist, deps, "Using")
+ return dist
+ else:
+ return self.install_item(spec, dist.location, tmpdir, deps)
+
+ def install_item(self, spec, download, tmpdir, deps, install_needed=False):
+
+ # Installation is also needed if file in tmpdir or is not an egg
+ install_needed = install_needed or self.always_copy
+ install_needed = install_needed or os.path.dirname(download) == tmpdir
+ install_needed = install_needed or not download.endswith('.egg')
+ install_needed = install_needed or (
+ self.always_copy_from is not None and
+ os.path.dirname(normalize_path(download)) ==
+ normalize_path(self.always_copy_from)
+ )
+
+ if spec and not install_needed:
+ # at this point, we know it's a local .egg, we just don't know if
+ # it's already installed.
+ for dist in self.local_index[spec.project_name]:
+ if dist.location == download:
+ break
+ else:
+ install_needed = True # it's not in the local index
+
+ log.info("Processing %s", os.path.basename(download))
+
+ if install_needed:
+ dists = self.install_eggs(spec, download, tmpdir)
+ for dist in dists:
+ self.process_distribution(spec, dist, deps)
+ else:
+ dists = [self.egg_distribution(download)]
+ self.process_distribution(spec, dists[0], deps, "Using")
+
+ if spec is not None:
+ for dist in dists:
+ if dist in spec:
+ return dist
+
+ def select_scheme(self, name):
+ """Sets the install directories by applying the install schemes."""
+ # it's the caller's problem if they supply a bad name!
+ scheme = INSTALL_SCHEMES[name]
+ for key in SCHEME_KEYS:
+ attrname = 'install_' + key
+ if getattr(self, attrname) is None:
+ setattr(self, attrname, scheme[key])
+
+ def process_distribution(self, requirement, dist, deps=True, *info):
+ self.update_pth(dist)
+ self.package_index.add(dist)
+ if dist in self.local_index[dist.key]:
+ self.local_index.remove(dist)
+ self.local_index.add(dist)
+ self.install_egg_scripts(dist)
+ self.installed_projects[dist.key] = dist
+ log.info(self.installation_report(requirement, dist, *info))
+ if (dist.has_metadata('dependency_links.txt') and
+ not self.no_find_links):
+ self.package_index.add_find_links(
+ dist.get_metadata_lines('dependency_links.txt')
+ )
+ if not deps and not self.always_copy:
+ return
+ elif requirement is not None and dist.key != requirement.key:
+ log.warn("Skipping dependencies for %s", dist)
+ return # XXX this is not the distribution we were looking for
+ elif requirement is None or dist not in requirement:
+ # if we wound up with a different version, resolve what we've got
+ distreq = dist.as_requirement()
+ requirement = Requirement(str(distreq))
+ log.info("Processing dependencies for %s", requirement)
+ try:
+ distros = WorkingSet([]).resolve(
+ [requirement], self.local_index, self.easy_install
+ )
+ except DistributionNotFound as e:
+ raise DistutilsError(str(e)) from e
+ except VersionConflict as e:
+ raise DistutilsError(e.report()) from e
+ if self.always_copy or self.always_copy_from:
+ # Force all the relevant distros to be copied or activated
+ for dist in distros:
+ if dist.key not in self.installed_projects:
+ self.easy_install(dist.as_requirement())
+ log.info("Finished processing dependencies for %s", requirement)
+
+ def should_unzip(self, dist):
+ if self.zip_ok is not None:
+ return not self.zip_ok
+ if dist.has_metadata('not-zip-safe'):
+ return True
+ if not dist.has_metadata('zip-safe'):
+ return True
+ return False
+
+ def maybe_move(self, spec, dist_filename, setup_base):
+ dst = os.path.join(self.build_directory, spec.key)
+ if os.path.exists(dst):
+ msg = (
+ "%r already exists in %s; build directory %s will not be kept"
+ )
+ log.warn(msg, spec.key, self.build_directory, setup_base)
+ return setup_base
+ if os.path.isdir(dist_filename):
+ setup_base = dist_filename
+ else:
+ if os.path.dirname(dist_filename) == setup_base:
+ os.unlink(dist_filename) # get it out of the tmp dir
+ contents = os.listdir(setup_base)
+ if len(contents) == 1:
+ dist_filename = os.path.join(setup_base, contents[0])
+ if os.path.isdir(dist_filename):
+ # if the only thing there is a directory, move it instead
+ setup_base = dist_filename
+ ensure_directory(dst)
+ shutil.move(setup_base, dst)
+ return dst
+
+ def install_wrapper_scripts(self, dist):
+ if self.exclude_scripts:
+ return
+ for args in ScriptWriter.best().get_args(dist):
+ self.write_script(*args)
+
+ def install_script(self, dist, script_name, script_text, dev_path=None):
+ """Generate a legacy script wrapper and install it"""
+ spec = str(dist.as_requirement())
+ is_script = is_python_script(script_text, script_name)
+
+ if is_script:
+ body = self._load_template(dev_path) % locals()
+ script_text = ScriptWriter.get_header(script_text) + body
+ self.write_script(script_name, _to_bytes(script_text), 'b')
+
+ @staticmethod
+ def _load_template(dev_path):
+ """
+ There are a couple of template scripts in the package. This
+ function loads one of them and prepares it for use.
+ """
+ # See https://github.com/pypa/setuptools/issues/134 for info
+ # on script file naming and downstream issues with SVR4
+ name = 'script.tmpl'
+ if dev_path:
+ name = name.replace('.tmpl', ' (dev).tmpl')
+
+ raw_bytes = resource_string('setuptools', name)
+ return raw_bytes.decode('utf-8')
+
+ def write_script(self, script_name, contents, mode="t", blockers=()):
+ """Write an executable file to the scripts directory"""
+ self.delete_blockers( # clean up old .py/.pyw w/o a script
+ [os.path.join(self.script_dir, x) for x in blockers]
+ )
+ log.info("Installing %s script to %s", script_name, self.script_dir)
+ target = os.path.join(self.script_dir, script_name)
+ self.add_output(target)
+
+ if self.dry_run:
+ return
+
+ mask = current_umask()
+ ensure_directory(target)
+ if os.path.exists(target):
+ os.unlink(target)
+ with open(target, "w" + mode) as f:
+ f.write(contents)
+ chmod(target, 0o777 - mask)
+
+ def install_eggs(self, spec, dist_filename, tmpdir):
+ # .egg dirs or files are already built, so just return them
+ if dist_filename.lower().endswith('.egg'):
+ return [self.install_egg(dist_filename, tmpdir)]
+ elif dist_filename.lower().endswith('.exe'):
+ return [self.install_exe(dist_filename, tmpdir)]
+ elif dist_filename.lower().endswith('.whl'):
+ return [self.install_wheel(dist_filename, tmpdir)]
+
+ # Anything else, try to extract and build
+ setup_base = tmpdir
+ if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
+ unpack_archive(dist_filename, tmpdir, self.unpack_progress)
+ elif os.path.isdir(dist_filename):
+ setup_base = os.path.abspath(dist_filename)
+
+ if (setup_base.startswith(tmpdir) # something we downloaded
+ and self.build_directory and spec is not None):
+ setup_base = self.maybe_move(spec, dist_filename, setup_base)
+
+ # Find the setup.py file
+ setup_script = os.path.join(setup_base, 'setup.py')
+
+ if not os.path.exists(setup_script):
+ setups = glob(os.path.join(setup_base, '*', 'setup.py'))
+ if not setups:
+ raise DistutilsError(
+ "Couldn't find a setup script in %s" %
+ os.path.abspath(dist_filename)
+ )
+ if len(setups) > 1:
+ raise DistutilsError(
+ "Multiple setup scripts in %s" %
+ os.path.abspath(dist_filename)
+ )
+ setup_script = setups[0]
+
+ # Now run it, and return the result
+ if self.editable:
+ log.info(self.report_editable(spec, setup_script))
+ return []
+ else:
+ return self.build_and_install(setup_script, setup_base)
+
+ def egg_distribution(self, egg_path):
+ if os.path.isdir(egg_path):
+ metadata = PathMetadata(egg_path, os.path.join(egg_path,
+ 'EGG-INFO'))
+ else:
+ metadata = EggMetadata(zipimport.zipimporter(egg_path))
+ return Distribution.from_filename(egg_path, metadata=metadata)
+
+ def install_egg(self, egg_path, tmpdir):
+ destination = os.path.join(
+ self.install_dir,
+ os.path.basename(egg_path),
+ )
+ destination = os.path.abspath(destination)
+ if not self.dry_run:
+ ensure_directory(destination)
+
+ dist = self.egg_distribution(egg_path)
+ if not samefile(egg_path, destination):
+ if os.path.isdir(destination) and not os.path.islink(destination):
+ dir_util.remove_tree(destination, dry_run=self.dry_run)
+ elif os.path.exists(destination):
+ self.execute(
+ os.unlink,
+ (destination,),
+ "Removing " + destination,
+ )
+ try:
+ new_dist_is_zipped = False
+ if os.path.isdir(egg_path):
+ if egg_path.startswith(tmpdir):
+ f, m = shutil.move, "Moving"
+ else:
+ f, m = shutil.copytree, "Copying"
+ elif self.should_unzip(dist):
+ self.mkpath(destination)
+ f, m = self.unpack_and_compile, "Extracting"
+ else:
+ new_dist_is_zipped = True
+ if egg_path.startswith(tmpdir):
+ f, m = shutil.move, "Moving"
+ else:
+ f, m = shutil.copy2, "Copying"
+ self.execute(
+ f,
+ (egg_path, destination),
+ (m + " %s to %s") % (
+ os.path.basename(egg_path),
+ os.path.dirname(destination)
+ ),
+ )
+ update_dist_caches(
+ destination,
+ fix_zipimporter_caches=new_dist_is_zipped,
+ )
+ except Exception:
+ update_dist_caches(destination, fix_zipimporter_caches=False)
+ raise
+
+ self.add_output(destination)
+ return self.egg_distribution(destination)
+
+ def install_exe(self, dist_filename, tmpdir):
+ # See if it's valid, get data
+ cfg = extract_wininst_cfg(dist_filename)
+ if cfg is None:
+ raise DistutilsError(
+ "%s is not a valid distutils Windows .exe" % dist_filename
+ )
+ # Create a dummy distribution object until we build the real distro
+ dist = Distribution(
+ None,
+ project_name=cfg.get('metadata', 'name'),
+ version=cfg.get('metadata', 'version'), platform=get_platform(),
+ )
+
+ # Convert the .exe to an unpacked egg
+ egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg')
+ dist.location = egg_path
+ egg_tmp = egg_path + '.tmp'
+ _egg_info = os.path.join(egg_tmp, 'EGG-INFO')
+ pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
+ ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
+ dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
+ self.exe_to_egg(dist_filename, egg_tmp)
+
+ # Write EGG-INFO/PKG-INFO
+ if not os.path.exists(pkg_inf):
+ f = open(pkg_inf, 'w')
+ f.write('Metadata-Version: 1.0\n')
+ for k, v in cfg.items('metadata'):
+ if k != 'target_version':
+ f.write('%s: %s\n' % (k.replace('_', '-').title(), v))
+ f.close()
+ script_dir = os.path.join(_egg_info, 'scripts')
+ # delete entry-point scripts to avoid duping
+ self.delete_blockers([
+ os.path.join(script_dir, args[0])
+ for args in ScriptWriter.get_args(dist)
+ ])
+ # Build .egg file from tmpdir
+ bdist_egg.make_zipfile(
+ egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run,
+ )
+ # install the .egg
+ return self.install_egg(egg_path, tmpdir)
+
+ def exe_to_egg(self, dist_filename, egg_tmp):
+ """Extract a bdist_wininst to the directories an egg would use"""
+ # Check for .pth file and set up prefix translations
+ prefixes = get_exe_prefixes(dist_filename)
+ to_compile = []
+ native_libs = []
+ top_level = {}
+
+ def process(src, dst):
+ s = src.lower()
+ for old, new in prefixes:
+ if s.startswith(old):
+ src = new + src[len(old):]
+ parts = src.split('/')
+ dst = os.path.join(egg_tmp, *parts)
+ dl = dst.lower()
+ if dl.endswith('.pyd') or dl.endswith('.dll'):
+ parts[-1] = bdist_egg.strip_module(parts[-1])
+ top_level[os.path.splitext(parts[0])[0]] = 1
+ native_libs.append(src)
+ elif dl.endswith('.py') and old != 'SCRIPTS/':
+ top_level[os.path.splitext(parts[0])[0]] = 1
+ to_compile.append(dst)
+ return dst
+ if not src.endswith('.pth'):
+ log.warn("WARNING: can't process %s", src)
+ return None
+
+ # extract, tracking .pyd/.dll->native_libs and .py -> to_compile
+ unpack_archive(dist_filename, egg_tmp, process)
+ stubs = []
+ for res in native_libs:
+ if res.lower().endswith('.pyd'): # create stubs for .pyd's
+ parts = res.split('/')
+ resource = parts[-1]
+ parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py'
+ pyfile = os.path.join(egg_tmp, *parts)
+ to_compile.append(pyfile)
+ stubs.append(pyfile)
+ bdist_egg.write_stub(resource, pyfile)
+ self.byte_compile(to_compile) # compile .py's
+ bdist_egg.write_safety_flag(
+ os.path.join(egg_tmp, 'EGG-INFO'),
+ bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
+
+ for name in 'top_level', 'native_libs':
+ if locals()[name]:
+ txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt')
+ if not os.path.exists(txt):
+ f = open(txt, 'w')
+ f.write('\n'.join(locals()[name]) + '\n')
+ f.close()
+
+ def install_wheel(self, wheel_path, tmpdir):
+ wheel = Wheel(wheel_path)
+ assert wheel.is_compatible()
+ destination = os.path.join(self.install_dir, wheel.egg_name())
+ destination = os.path.abspath(destination)
+ if not self.dry_run:
+ ensure_directory(destination)
+ if os.path.isdir(destination) and not os.path.islink(destination):
+ dir_util.remove_tree(destination, dry_run=self.dry_run)
+ elif os.path.exists(destination):
+ self.execute(
+ os.unlink,
+ (destination,),
+ "Removing " + destination,
+ )
+ try:
+ self.execute(
+ wheel.install_as_egg,
+ (destination,),
+ ("Installing %s to %s") % (
+ os.path.basename(wheel_path),
+ os.path.dirname(destination)
+ ),
+ )
+ finally:
+ update_dist_caches(destination, fix_zipimporter_caches=False)
+ self.add_output(destination)
+ return self.egg_distribution(destination)
+
+ __mv_warning = textwrap.dedent("""
+ Because this distribution was installed --multi-version, before you can
+ import modules from this package in an application, you will need to
+ 'import pkg_resources' and then use a 'require()' call similar to one of
+ these examples, in order to select the desired version:
+
+ pkg_resources.require("%(name)s") # latest installed version
+ pkg_resources.require("%(name)s==%(version)s") # this exact version
+ pkg_resources.require("%(name)s>=%(version)s") # this version or higher
+ """).lstrip() # noqa
+
+ __id_warning = textwrap.dedent("""
+ Note also that the installation directory must be on sys.path at runtime for
+ this to work. (e.g. by being the application's script directory, by being on
+ PYTHONPATH, or by being added to sys.path by your code.)
+ """) # noqa
+
+ def installation_report(self, req, dist, what="Installed"):
+ """Helpful installation message for display to package users"""
+ msg = "\n%(what)s %(eggloc)s%(extras)s"
+ if self.multi_version and not self.no_report:
+ msg += '\n' + self.__mv_warning
+ if self.install_dir not in map(normalize_path, sys.path):
+ msg += '\n' + self.__id_warning
+
+ eggloc = dist.location
+ name = dist.project_name
+ version = dist.version
+ extras = '' # TODO: self.report_extras(req, dist)
+ return msg % locals()
+
+ __editable_msg = textwrap.dedent("""
+ Extracted editable version of %(spec)s to %(dirname)s
+
+ If it uses setuptools in its setup script, you can activate it in
+ "development" mode by going to that directory and running::
+
+ %(python)s setup.py develop
+
+ See the setuptools documentation for the "develop" command for more info.
+ """).lstrip() # noqa
+
+ def report_editable(self, spec, setup_script):
+ dirname = os.path.dirname(setup_script)
+ python = sys.executable
+ return '\n' + self.__editable_msg % locals()
+
+ def run_setup(self, setup_script, setup_base, args):
+ sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
+ sys.modules.setdefault('distutils.command.egg_info', egg_info)
+
+ args = list(args)
+ if self.verbose > 2:
+ v = 'v' * (self.verbose - 1)
+ args.insert(0, '-' + v)
+ elif self.verbose < 2:
+ args.insert(0, '-q')
+ if self.dry_run:
+ args.insert(0, '-n')
+ log.info(
+ "Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args)
+ )
+ try:
+ run_setup(setup_script, args)
+ except SystemExit as v:
+ raise DistutilsError(
+ "Setup script exited with %s" % (v.args[0],)
+ ) from v
+
+ def build_and_install(self, setup_script, setup_base):
+ args = ['bdist_egg', '--dist-dir']
+
+ dist_dir = tempfile.mkdtemp(
+ prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
+ )
+ try:
+ self._set_fetcher_options(os.path.dirname(setup_script))
+ args.append(dist_dir)
+
+ self.run_setup(setup_script, setup_base, args)
+ all_eggs = Environment([dist_dir])
+ eggs = []
+ for key in all_eggs:
+ for dist in all_eggs[key]:
+ eggs.append(self.install_egg(dist.location, setup_base))
+ if not eggs and not self.dry_run:
+ log.warn("No eggs found in %s (setup script problem?)",
+ dist_dir)
+ return eggs
+ finally:
+ rmtree(dist_dir)
+ log.set_verbosity(self.verbose) # restore our log verbosity
+
+ def _set_fetcher_options(self, base):
+ """
+ When easy_install is about to run bdist_egg on a source dist, that
+ source dist might have 'setup_requires' directives, requiring
+ additional fetching. Ensure the fetcher options given to easy_install
+ are available to that command as well.
+ """
+ # find the fetch options from easy_install and write them out
+ # to the setup.cfg file.
+ ei_opts = self.distribution.get_option_dict('easy_install').copy()
+ fetch_directives = (
+ 'find_links', 'site_dirs', 'index_url', 'optimize', 'allow_hosts',
+ )
+ fetch_options = {}
+ for key, val in ei_opts.items():
+ if key not in fetch_directives:
+ continue
+ fetch_options[key.replace('_', '-')] = val[1]
+ # create a settings dictionary suitable for `edit_config`
+ settings = dict(easy_install=fetch_options)
+ cfg_filename = os.path.join(base, 'setup.cfg')
+ setopt.edit_config(cfg_filename, settings)
+
+ def update_pth(self, dist):
+ if self.pth_file is None:
+ return
+
+ for d in self.pth_file[dist.key]: # drop old entries
+ if self.multi_version or d.location != dist.location:
+ log.info("Removing %s from easy-install.pth file", d)
+ self.pth_file.remove(d)
+ if d.location in self.shadow_path:
+ self.shadow_path.remove(d.location)
+
+ if not self.multi_version:
+ if dist.location in self.pth_file.paths:
+ log.info(
+ "%s is already the active version in easy-install.pth",
+ dist,
+ )
+ else:
+ log.info("Adding %s to easy-install.pth file", dist)
+ self.pth_file.add(dist) # add new entry
+ if dist.location not in self.shadow_path:
+ self.shadow_path.append(dist.location)
+
+ if not self.dry_run:
+
+ self.pth_file.save()
+
+ if dist.key == 'setuptools':
+ # Ensure that setuptools itself never becomes unavailable!
+ # XXX should this check for latest version?
+ filename = os.path.join(self.install_dir, 'setuptools.pth')
+ if os.path.islink(filename):
+ os.unlink(filename)
+ f = open(filename, 'wt')
+ f.write(self.pth_file.make_relative(dist.location) + '\n')
+ f.close()
+
+ def unpack_progress(self, src, dst):
+ # Progress filter for unpacking
+ log.debug("Unpacking %s to %s", src, dst)
+ return dst # only unpack-and-compile skips files for dry run
+
+ def unpack_and_compile(self, egg_path, destination):
+ to_compile = []
+ to_chmod = []
+
+ def pf(src, dst):
+ if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
+ to_compile.append(dst)
+ elif dst.endswith('.dll') or dst.endswith('.so'):
+ to_chmod.append(dst)
+ self.unpack_progress(src, dst)
+ return not self.dry_run and dst or None
+
+ unpack_archive(egg_path, destination, pf)
+ self.byte_compile(to_compile)
+ if not self.dry_run:
+ for f in to_chmod:
+ mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755
+ chmod(f, mode)
+
+ def byte_compile(self, to_compile):
+ if sys.dont_write_bytecode:
+ return
+
+ from distutils.util import byte_compile
+
+ try:
+ # try to make the byte compile messages quieter
+ log.set_verbosity(self.verbose - 1)
+
+ byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
+ if self.optimize:
+ byte_compile(
+ to_compile, optimize=self.optimize, force=1,
+ dry_run=self.dry_run,
+ )
+ finally:
+ log.set_verbosity(self.verbose) # restore original verbosity
+
+ __no_default_msg = textwrap.dedent("""
+ bad install directory or PYTHONPATH
+
+ You are attempting to install a package to a directory that is not
+ on PYTHONPATH and which Python does not read ".pth" files from. The
+ installation directory you specified (via --install-dir, --prefix, or
+ the distutils default setting) was:
+
+ %s
+
+ and your PYTHONPATH environment variable currently contains:
+
+ %r
+
+ Here are some of your options for correcting the problem:
+
+ * You can choose a different installation directory, i.e., one that is
+ on PYTHONPATH or supports .pth files
+
+ * You can add the installation directory to the PYTHONPATH environment
+ variable. (It must then also be on PYTHONPATH whenever you run
+ Python and want to use the package(s) you are installing.)
+
+ * You can set up the installation directory to support ".pth" files by
+ using one of the approaches described here:
+
+ https://setuptools.readthedocs.io/en/latest/easy_install.html#custom-installation-locations
+
+
+ Please make the appropriate changes for your system and try again.
+ """).strip()
+
+ def create_home_path(self):
+ """Create directories under ~."""
+ if not self.user:
+ return
+ home = convert_path(os.path.expanduser("~"))
+ for name, path in self.config_vars.items():
+ if path.startswith(home) and not os.path.isdir(path):
+ self.debug_print("os.makedirs('%s', 0o700)" % path)
+ os.makedirs(path, 0o700)
+
+ INSTALL_SCHEMES = dict(
+ posix=dict(
+ install_dir='$base/lib/python$py_version_short/site-packages',
+ script_dir='$base/bin',
+ ),
+ )
+
+ DEFAULT_SCHEME = dict(
+ install_dir='$base/Lib/site-packages',
+ script_dir='$base/Scripts',
+ )
+
+ def _expand(self, *attrs):
+ config_vars = self.get_finalized_command('install').config_vars
+
+ if self.prefix:
+ # Set default install_dir/scripts from --prefix
+ config_vars = config_vars.copy()
+ config_vars['base'] = self.prefix
+ scheme = self.INSTALL_SCHEMES.get(os.name, self.DEFAULT_SCHEME)
+ for attr, val in scheme.items():
+ if getattr(self, attr, None) is None:
+ setattr(self, attr, val)
+
+ from distutils.util import subst_vars
+
+ for attr in attrs:
+ val = getattr(self, attr)
+ if val is not None:
+ val = subst_vars(val, config_vars)
+ if os.name == 'posix':
+ val = os.path.expanduser(val)
+ setattr(self, attr, val)
+
+
+def _pythonpath():
+ items = os.environ.get('PYTHONPATH', '').split(os.pathsep)
+ return filter(None, items)
+
+
+def get_site_dirs():
+ """
+ Return a list of 'site' dirs
+ """
+
+ sitedirs = []
+
+ # start with PYTHONPATH
+ sitedirs.extend(_pythonpath())
+
+ prefixes = [sys.prefix]
+ if sys.exec_prefix != sys.prefix:
+ prefixes.append(sys.exec_prefix)
+ for prefix in prefixes:
+ if prefix:
+ if sys.platform in ('os2emx', 'riscos'):
+ sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
+ elif os.sep == '/':
+ sitedirs.extend([
+ os.path.join(
+ prefix,
+ "lib",
+ "python{}.{}".format(*sys.version_info),
+ "site-packages",
+ ),
+ os.path.join(prefix, "lib", "site-python"),
+ ])
+ else:
+ sitedirs.extend([
+ prefix,
+ os.path.join(prefix, "lib", "site-packages"),
+ ])
+ if sys.platform == 'darwin':
+ # for framework builds *only* we add the standard Apple
+ # locations. Currently only per-user, but /Library and
+ # /Network/Library could be added too
+ if 'Python.framework' in prefix:
+ home = os.environ.get('HOME')
+ if home:
+ home_sp = os.path.join(
+ home,
+ 'Library',
+ 'Python',
+ '{}.{}'.format(*sys.version_info),
+ 'site-packages',
+ )
+ sitedirs.append(home_sp)
+ lib_paths = get_path('purelib'), get_path('platlib')
+ for site_lib in lib_paths:
+ if site_lib not in sitedirs:
+ sitedirs.append(site_lib)
+
+ if site.ENABLE_USER_SITE:
+ sitedirs.append(site.USER_SITE)
+
+ try:
+ sitedirs.extend(site.getsitepackages())
+ except AttributeError:
+ pass
+
+ sitedirs = list(map(normalize_path, sitedirs))
+
+ return sitedirs
+
+
+def expand_paths(inputs):
+ """Yield sys.path directories that might contain "old-style" packages"""
+
+ seen = {}
+
+ for dirname in inputs:
+ dirname = normalize_path(dirname)
+ if dirname in seen:
+ continue
+
+ seen[dirname] = 1
+ if not os.path.isdir(dirname):
+ continue
+
+ files = os.listdir(dirname)
+ yield dirname, files
+
+ for name in files:
+ if not name.endswith('.pth'):
+ # We only care about the .pth files
+ continue
+ if name in ('easy-install.pth', 'setuptools.pth'):
+ # Ignore .pth files that we control
+ continue
+
+ # Read the .pth file
+ f = open(os.path.join(dirname, name))
+ lines = list(yield_lines(f))
+ f.close()
+
+ # Yield existing non-dupe, non-import directory lines from it
+ for line in lines:
+ if not line.startswith("import"):
+ line = normalize_path(line.rstrip())
+ if line not in seen:
+ seen[line] = 1
+ if not os.path.isdir(line):
+ continue
+ yield line, os.listdir(line)
+
+
+def extract_wininst_cfg(dist_filename):
+ """Extract configuration data from a bdist_wininst .exe
+
+ Returns a configparser.RawConfigParser, or None
+ """
+ f = open(dist_filename, 'rb')
+ try:
+ endrec = zipfile._EndRecData(f)
+ if endrec is None:
+ return None
+
+ prepended = (endrec[9] - endrec[5]) - endrec[6]
+ if prepended < 12: # no wininst data here
+ return None
+ f.seek(prepended - 12)
+
+ tag, cfglen, bmlen = struct.unpack("<iii", f.read(12))
+ if tag not in (0x1234567A, 0x1234567B):
+ return None # not a valid tag
+
+ f.seek(prepended - (12 + cfglen))
+ init = {'version': '', 'target_version': ''}
+ cfg = configparser.RawConfigParser(init)
+ try:
+ part = f.read(cfglen)
+ # Read up to the first null byte.
+ config = part.split(b'\0', 1)[0]
+ # Now the config is in bytes, but for RawConfigParser, it should
+ # be text, so decode it.
+ config = config.decode(sys.getfilesystemencoding())
+ cfg.readfp(io.StringIO(config))
+ except configparser.Error:
+ return None
+ if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
+ return None
+ return cfg
+
+ finally:
+ f.close()
+
+
+def get_exe_prefixes(exe_filename):
+ """Get exe->egg path translations for a given .exe file"""
+
+ prefixes = [
+ ('PURELIB/', ''),
+ ('PLATLIB/pywin32_system32', ''),
+ ('PLATLIB/', ''),
+ ('SCRIPTS/', 'EGG-INFO/scripts/'),
+ ('DATA/lib/site-packages', ''),
+ ]
+ z = zipfile.ZipFile(exe_filename)
+ try:
+ for info in z.infolist():
+ name = info.filename
+ parts = name.split('/')
+ if len(parts) == 3 and parts[2] == 'PKG-INFO':
+ if parts[1].endswith('.egg-info'):
+ prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/'))
+ break
+ if len(parts) != 2 or not name.endswith('.pth'):
+ continue
+ if name.endswith('-nspkg.pth'):
+ continue
+ if parts[0].upper() in ('PURELIB', 'PLATLIB'):
+ contents = z.read(name).decode()
+ for pth in yield_lines(contents):
+ pth = pth.strip().replace('\\', '/')
+ if not pth.startswith('import'):
+ prefixes.append((('%s/%s/' % (parts[0], pth)), ''))
+ finally:
+ z.close()
+ prefixes = [(x.lower(), y) for x, y in prefixes]
+ prefixes.sort()
+ prefixes.reverse()
+ return prefixes
+
+
+class PthDistributions(Environment):
+ """A .pth file with Distribution paths in it"""
+
+ dirty = False
+
+ def __init__(self, filename, sitedirs=()):
+ self.filename = filename
+ self.sitedirs = list(map(normalize_path, sitedirs))
+ self.basedir = normalize_path(os.path.dirname(self.filename))
+ self._load()
+ Environment.__init__(self, [], None, None)
+ for path in yield_lines(self.paths):
+ list(map(self.add, find_distributions(path, True)))
+
+ def _load(self):
+ self.paths = []
+ saw_import = False
+ seen = dict.fromkeys(self.sitedirs)
+ if os.path.isfile(self.filename):
+ f = open(self.filename, 'rt')
+ for line in f:
+ if line.startswith('import'):
+ saw_import = True
+ continue
+ path = line.rstrip()
+ self.paths.append(path)
+ if not path.strip() or path.strip().startswith('#'):
+ continue
+ # skip non-existent paths, in case somebody deleted a package
+ # manually, and duplicate paths as well
+ path = self.paths[-1] = normalize_path(
+ os.path.join(self.basedir, path)
+ )
+ if not os.path.exists(path) or path in seen:
+ self.paths.pop() # skip it
+ self.dirty = True # we cleaned up, so we're dirty now :)
+ continue
+ seen[path] = 1
+ f.close()
+
+ if self.paths and not saw_import:
+ self.dirty = True # ensure anything we touch has import wrappers
+ while self.paths and not self.paths[-1].strip():
+ self.paths.pop()
+
+ def save(self):
+ """Write changed .pth file back to disk"""
+ if not self.dirty:
+ return
+
+ rel_paths = list(map(self.make_relative, self.paths))
+ if rel_paths:
+ log.debug("Saving %s", self.filename)
+ lines = self._wrap_lines(rel_paths)
+ data = '\n'.join(lines) + '\n'
+
+ if os.path.islink(self.filename):
+ os.unlink(self.filename)
+ with open(self.filename, 'wt') as f:
+ f.write(data)
+
+ elif os.path.exists(self.filename):
+ log.debug("Deleting empty %s", self.filename)
+ os.unlink(self.filename)
+
+ self.dirty = False
+
+ @staticmethod
+ def _wrap_lines(lines):
+ return lines
+
+ def add(self, dist):
+ """Add `dist` to the distribution map"""
+ new_path = (
+ dist.location not in self.paths and (
+ dist.location not in self.sitedirs or
+ # account for '.' being in PYTHONPATH
+ dist.location == os.getcwd()
+ )
+ )
+ if new_path:
+ self.paths.append(dist.location)
+ self.dirty = True
+ Environment.add(self, dist)
+
+ def remove(self, dist):
+ """Remove `dist` from the distribution map"""
+ while dist.location in self.paths:
+ self.paths.remove(dist.location)
+ self.dirty = True
+ Environment.remove(self, dist)
+
+ def make_relative(self, path):
+ npath, last = os.path.split(normalize_path(path))
+ baselen = len(self.basedir)
+ parts = [last]
+ sep = os.altsep == '/' and '/' or os.sep
+ while len(npath) >= baselen:
+ if npath == self.basedir:
+ parts.append(os.curdir)
+ parts.reverse()
+ return sep.join(parts)
+ npath, last = os.path.split(npath)
+ parts.append(last)
+ else:
+ return path
+
+
+class RewritePthDistributions(PthDistributions):
+ @classmethod
+ def _wrap_lines(cls, lines):
+ yield cls.prelude
+ for line in lines:
+ yield line
+ yield cls.postlude
+
+ prelude = _one_liner("""
+ import sys
+ sys.__plen = len(sys.path)
+ """)
+ postlude = _one_liner("""
+ import sys
+ new = sys.path[sys.__plen:]
+ del sys.path[sys.__plen:]
+ p = getattr(sys, '__egginsert', 0)
+ sys.path[p:p] = new
+ sys.__egginsert = p + len(new)
+ """)
+
+
+if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite':
+ PthDistributions = RewritePthDistributions
+
+
+def _first_line_re():
+ """
+ Return a regular expression based on first_line_re suitable for matching
+ strings.
+ """
+ if isinstance(first_line_re.pattern, str):
+ return first_line_re
+
+ # first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
+ return re.compile(first_line_re.pattern.decode())
+
+
+def auto_chmod(func, arg, exc):
+ if func in [os.unlink, os.remove] and os.name == 'nt':
+ chmod(arg, stat.S_IWRITE)
+ return func(arg)
+ et, ev, _ = sys.exc_info()
+ # TODO: This code doesn't make sense. What is it trying to do?
+ raise (ev[0], ev[1] + (" %s %s" % (func, arg)))
+
+
+def update_dist_caches(dist_path, fix_zipimporter_caches):
+ """
+ Fix any globally cached `dist_path` related data
+
+ `dist_path` should be a path of a newly installed egg distribution (zipped
+ or unzipped).
+
+ sys.path_importer_cache contains finder objects that have been cached when
+ importing data from the original distribution. Any such finders need to be
+ cleared since the replacement distribution might be packaged differently,
+ e.g. a zipped egg distribution might get replaced with an unzipped egg
+ folder or vice versa. Having the old finders cached may then cause Python
+ to attempt loading modules from the replacement distribution using an
+ incorrect loader.
+
+ zipimport.zipimporter objects are Python loaders charged with importing
+ data packaged inside zip archives. If stale loaders referencing the
+ original distribution, are left behind, they can fail to load modules from
+ the replacement distribution. E.g. if an old zipimport.zipimporter instance
+ is used to load data from a new zipped egg archive, it may cause the
+ operation to attempt to locate the requested data in the wrong location -
+ one indicated by the original distribution's zip archive directory
+ information. Such an operation may then fail outright, e.g. report having
+ read a 'bad local file header', or even worse, it may fail silently &
+ return invalid data.
+
+ zipimport._zip_directory_cache contains cached zip archive directory
+ information for all existing zipimport.zipimporter instances and all such
+ instances connected to the same archive share the same cached directory
+ information.
+
+ If asked, and the underlying Python implementation allows it, we can fix
+ all existing zipimport.zipimporter instances instead of having to track
+ them down and remove them one by one, by updating their shared cached zip
+ archive directory information. This, of course, assumes that the
+ replacement distribution is packaged as a zipped egg.
+
+ If not asked to fix existing zipimport.zipimporter instances, we still do
+ our best to clear any remaining zipimport.zipimporter related cached data
+ that might somehow later get used when attempting to load data from the new
+ distribution and thus cause such load operations to fail. Note that when
+ tracking down such remaining stale data, we can not catch every conceivable
+ usage from here, and we clear only those that we know of and have found to
+ cause problems if left alive. Any remaining caches should be updated by
+ whomever is in charge of maintaining them, i.e. they should be ready to
+ handle us replacing their zip archives with new distributions at runtime.
+
+ """
+ # There are several other known sources of stale zipimport.zipimporter
+ # instances that we do not clear here, but might if ever given a reason to
+ # do so:
+ # * Global setuptools pkg_resources.working_set (a.k.a. 'master working
+ # set') may contain distributions which may in turn contain their
+ # zipimport.zipimporter loaders.
+ # * Several zipimport.zipimporter loaders held by local variables further
+ # up the function call stack when running the setuptools installation.
+ # * Already loaded modules may have their __loader__ attribute set to the
+ # exact loader instance used when importing them. Python 3.4 docs state
+ # that this information is intended mostly for introspection and so is
+ # not expected to cause us problems.
+ normalized_path = normalize_path(dist_path)
+ _uncache(normalized_path, sys.path_importer_cache)
+ if fix_zipimporter_caches:
+ _replace_zip_directory_cache_data(normalized_path)
+ else:
+ # Here, even though we do not want to fix existing and now stale
+ # zipimporter cache information, we still want to remove it. Related to
+ # Python's zip archive directory information cache, we clear each of
+ # its stale entries in two phases:
+ # 1. Clear the entry so attempting to access zip archive information
+ # via any existing stale zipimport.zipimporter instances fails.
+ # 2. Remove the entry from the cache so any newly constructed
+ # zipimport.zipimporter instances do not end up using old stale
+ # zip archive directory information.
+ # This whole stale data removal step does not seem strictly necessary,
+ # but has been left in because it was done before we started replacing
+ # the zip archive directory information cache content if possible, and
+ # there are no relevant unit tests that we can depend on to tell us if
+ # this is really needed.
+ _remove_and_clear_zip_directory_cache_data(normalized_path)
+
+
+def _collect_zipimporter_cache_entries(normalized_path, cache):
+ """
+ Return zipimporter cache entry keys related to a given normalized path.
+
+ Alternative path spellings (e.g. those using different character case or
+ those using alternative path separators) related to the same path are
+ included. Any sub-path entries are included as well, i.e. those
+ corresponding to zip archives embedded in other zip archives.
+
+ """
+ result = []
+ prefix_len = len(normalized_path)
+ for p in cache:
+ np = normalize_path(p)
+ if (np.startswith(normalized_path) and
+ np[prefix_len:prefix_len + 1] in (os.sep, '')):
+ result.append(p)
+ return result
+
+
+def _update_zipimporter_cache(normalized_path, cache, updater=None):
+ """
+ Update zipimporter cache data for a given normalized path.
+
+ Any sub-path entries are processed as well, i.e. those corresponding to zip
+ archives embedded in other zip archives.
+
+ Given updater is a callable taking a cache entry key and the original entry
+ (after already removing the entry from the cache), and expected to update
+ the entry and possibly return a new one to be inserted in its place.
+ Returning None indicates that the entry should not be replaced with a new
+ one. If no updater is given, the cache entries are simply removed without
+ any additional processing, the same as if the updater simply returned None.
+
+ """
+ for p in _collect_zipimporter_cache_entries(normalized_path, cache):
+ # N.B. pypy's custom zipimport._zip_directory_cache implementation does
+ # not support the complete dict interface:
+ # * Does not support item assignment, thus not allowing this function
+ # to be used only for removing existing cache entries.
+ # * Does not support the dict.pop() method, forcing us to use the
+ # get/del patterns instead. For more detailed information see the
+ # following links:
+ # https://github.com/pypa/setuptools/issues/202#issuecomment-202913420
+ # http://bit.ly/2h9itJX
+ old_entry = cache[p]
+ del cache[p]
+ new_entry = updater and updater(p, old_entry)
+ if new_entry is not None:
+ cache[p] = new_entry
+
+
+def _uncache(normalized_path, cache):
+ _update_zipimporter_cache(normalized_path, cache)
+
+
+def _remove_and_clear_zip_directory_cache_data(normalized_path):
+ def clear_and_remove_cached_zip_archive_directory_data(path, old_entry):
+ old_entry.clear()
+
+ _update_zipimporter_cache(
+ normalized_path, zipimport._zip_directory_cache,
+ updater=clear_and_remove_cached_zip_archive_directory_data)
+
+
+# PyPy Python implementation does not allow directly writing to the
+# zipimport._zip_directory_cache and so prevents us from attempting to correct
+# its content. The best we can do there is clear the problematic cache content
+# and have PyPy repopulate it as needed. The downside is that if there are any
+# stale zipimport.zipimporter instances laying around, attempting to use them
+# will fail due to not having its zip archive directory information available
+# instead of being automatically corrected to use the new correct zip archive
+# directory information.
+if '__pypy__' in sys.builtin_module_names:
+ _replace_zip_directory_cache_data = \
+ _remove_and_clear_zip_directory_cache_data
+else:
+
+ def _replace_zip_directory_cache_data(normalized_path):
+ def replace_cached_zip_archive_directory_data(path, old_entry):
+ # N.B. In theory, we could load the zip directory information just
+ # once for all updated path spellings, and then copy it locally and
+ # update its contained path strings to contain the correct
+ # spelling, but that seems like a way too invasive move (this cache
+ # structure is not officially documented anywhere and could in
+ # theory change with new Python releases) for no significant
+ # benefit.
+ old_entry.clear()
+ zipimport.zipimporter(path)
+ old_entry.update(zipimport._zip_directory_cache[path])
+ return old_entry
+
+ _update_zipimporter_cache(
+ normalized_path, zipimport._zip_directory_cache,
+ updater=replace_cached_zip_archive_directory_data)
+
+
+def is_python(text, filename='<string>'):
+ "Is this string a valid Python script?"
+ try:
+ compile(text, filename, 'exec')
+ except (SyntaxError, TypeError):
+ return False
+ else:
+ return True
+
+
+def is_sh(executable):
+ """Determine if the specified executable is a .sh (contains a #! line)"""
+ try:
+ with io.open(executable, encoding='latin-1') as fp:
+ magic = fp.read(2)
+ except (OSError, IOError):
+ return executable
+ return magic == '#!'
+
+
+def nt_quote_arg(arg):
+ """Quote a command line argument according to Windows parsing rules"""
+ return subprocess.list2cmdline([arg])
+
+
+def is_python_script(script_text, filename):
+ """Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
+ """
+ if filename.endswith('.py') or filename.endswith('.pyw'):
+ return True # extension says it's Python
+ if is_python(script_text, filename):
+ return True # it's syntactically valid Python
+ if script_text.startswith('#!'):
+ # It begins with a '#!' line, so check if 'python' is in it somewhere
+ return 'python' in script_text.splitlines()[0].lower()
+
+ return False # Not any Python I can recognize
+
+
+try:
+ from os import chmod as _chmod
+except ImportError:
+ # Jython compatibility
+ def _chmod(*args):
+ pass
+
+
+def chmod(path, mode):
+ log.debug("changing mode of %s to %o", path, mode)
+ try:
+ _chmod(path, mode)
+ except os.error as e:
+ log.debug("chmod failed: %s", e)
+
+
+class CommandSpec(list):
+ """
+ A command spec for a #! header, specified as a list of arguments akin to
+ those passed to Popen.
+ """
+
+ options = []
+ split_args = dict()
+
+ @classmethod
+ def best(cls):
+ """
+ Choose the best CommandSpec class based on environmental conditions.
+ """
+ return cls
+
+ @classmethod
+ def _sys_executable(cls):
+ _default = os.path.normpath(sys.executable)
+ return os.environ.get('__PYVENV_LAUNCHER__', _default)
+
+ @classmethod
+ def from_param(cls, param):
+ """
+ Construct a CommandSpec from a parameter to build_scripts, which may
+ be None.
+ """
+ if isinstance(param, cls):
+ return param
+ if isinstance(param, list):
+ return cls(param)
+ if param is None:
+ return cls.from_environment()
+ # otherwise, assume it's a string.
+ return cls.from_string(param)
+
+ @classmethod
+ def from_environment(cls):
+ return cls([cls._sys_executable()])
+
+ @classmethod
+ def from_string(cls, string):
+ """
+ Construct a command spec from a simple string representing a command
+ line parseable by shlex.split.
+ """
+ items = shlex.split(string, **cls.split_args)
+ return cls(items)
+
+ def install_options(self, script_text):
+ self.options = shlex.split(self._extract_options(script_text))
+ cmdline = subprocess.list2cmdline(self)
+ if not isascii(cmdline):
+ self.options[:0] = ['-x']
+
+ @staticmethod
+ def _extract_options(orig_script):
+ """
+ Extract any options from the first line of the script.
+ """
+ first = (orig_script + '\n').splitlines()[0]
+ match = _first_line_re().match(first)
+ options = match.group(1) or '' if match else ''
+ return options.strip()
+
+ def as_header(self):
+ return self._render(self + list(self.options))
+
+ @staticmethod
+ def _strip_quotes(item):
+ _QUOTES = '"\''
+ for q in _QUOTES:
+ if item.startswith(q) and item.endswith(q):
+ return item[1:-1]
+ return item
+
+ @staticmethod
+ def _render(items):
+ cmdline = subprocess.list2cmdline(
+ CommandSpec._strip_quotes(item.strip()) for item in items)
+ return '#!' + cmdline + '\n'
+
+
+# For pbr compat; will be removed in a future version.
+sys_executable = CommandSpec._sys_executable()
+
+
+class WindowsCommandSpec(CommandSpec):
+ split_args = dict(posix=False)
+
+
+class ScriptWriter:
+ """
+ Encapsulates behavior around writing entry point scripts for console and
+ gui apps.
+ """
+
+ template = textwrap.dedent(r"""
+ # EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
+ import re
+ import sys
+
+ # for compatibility with easy_install; see #2198
+ __requires__ = %(spec)r
+
+ try:
+ from importlib.metadata import distribution
+ except ImportError:
+ try:
+ from importlib_metadata import distribution
+ except ImportError:
+ from pkg_resources import load_entry_point
+
+
+ def importlib_load_entry_point(spec, group, name):
+ dist_name, _, _ = spec.partition('==')
+ matches = (
+ entry_point
+ for entry_point in distribution(dist_name).entry_points
+ if entry_point.group == group and entry_point.name == name
+ )
+ return next(matches).load()
+
+
+ globals().setdefault('load_entry_point', importlib_load_entry_point)
+
+
+ if __name__ == '__main__':
+ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
+ sys.exit(load_entry_point(%(spec)r, %(group)r, %(name)r)())
+ """).lstrip()
+
+ command_spec_class = CommandSpec
+
+ @classmethod
+ def get_script_args(cls, dist, executable=None, wininst=False):
+ # for backward compatibility
+ warnings.warn("Use get_args", EasyInstallDeprecationWarning)
+ writer = (WindowsScriptWriter if wininst else ScriptWriter).best()
+ header = cls.get_script_header("", executable, wininst)
+ return writer.get_args(dist, header)
+
+ @classmethod
+ def get_script_header(cls, script_text, executable=None, wininst=False):
+ # for backward compatibility
+ warnings.warn(
+ "Use get_header", EasyInstallDeprecationWarning, stacklevel=2)
+ if wininst:
+ executable = "python.exe"
+ return cls.get_header(script_text, executable)
+
+ @classmethod
+ def get_args(cls, dist, header=None):
+ """
+ Yield write_script() argument tuples for a distribution's
+ console_scripts and gui_scripts entry points.
+ """
+ if header is None:
+ header = cls.get_header()
+ spec = str(dist.as_requirement())
+ for type_ in 'console', 'gui':
+ group = type_ + '_scripts'
+ for name, ep in dist.get_entry_map(group).items():
+ cls._ensure_safe_name(name)
+ script_text = cls.template % locals()
+ args = cls._get_script_args(type_, name, header, script_text)
+ for res in args:
+ yield res
+
+ @staticmethod
+ def _ensure_safe_name(name):
+ """
+ Prevent paths in *_scripts entry point names.
+ """
+ has_path_sep = re.search(r'[\\/]', name)
+ if has_path_sep:
+ raise ValueError("Path separators not allowed in script names")
+
+ @classmethod
+ def get_writer(cls, force_windows):
+ # for backward compatibility
+ warnings.warn("Use best", EasyInstallDeprecationWarning)
+ return WindowsScriptWriter.best() if force_windows else cls.best()
+
+ @classmethod
+ def best(cls):
+ """
+ Select the best ScriptWriter for this environment.
+ """
+ if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'):
+ return WindowsScriptWriter.best()
+ else:
+ return cls
+
+ @classmethod
+ def _get_script_args(cls, type_, name, header, script_text):
+ # Simply write the stub with no extension.
+ yield (name, header + script_text)
+
+ @classmethod
+ def get_header(cls, script_text="", executable=None):
+ """Create a #! line, getting options (if any) from script_text"""
+ cmd = cls.command_spec_class.best().from_param(executable)
+ cmd.install_options(script_text)
+ return cmd.as_header()
+
+
+class WindowsScriptWriter(ScriptWriter):
+ command_spec_class = WindowsCommandSpec
+
+ @classmethod
+ def get_writer(cls):
+ # for backward compatibility
+ warnings.warn("Use best", EasyInstallDeprecationWarning)
+ return cls.best()
+
+ @classmethod
+ def best(cls):
+ """
+ Select the best ScriptWriter suitable for Windows
+ """
+ writer_lookup = dict(
+ executable=WindowsExecutableLauncherWriter,
+ natural=cls,
+ )
+ # for compatibility, use the executable launcher by default
+ launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
+ return writer_lookup[launcher]
+
+ @classmethod
+ def _get_script_args(cls, type_, name, header, script_text):
+ "For Windows, add a .py extension"
+ ext = dict(console='.pya', gui='.pyw')[type_]
+ if ext not in os.environ['PATHEXT'].lower().split(';'):
+ msg = (
+ "{ext} not listed in PATHEXT; scripts will not be "
+ "recognized as executables."
+ ).format(**locals())
+ warnings.warn(msg, UserWarning)
+ old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
+ old.remove(ext)
+ header = cls._adjust_header(type_, header)
+ blockers = [name + x for x in old]
+ yield name + ext, header + script_text, 't', blockers
+
+ @classmethod
+ def _adjust_header(cls, type_, orig_header):
+ """
+ Make sure 'pythonw' is used for gui and and 'python' is used for
+ console (regardless of what sys.executable is).
+ """
+ pattern = 'pythonw.exe'
+ repl = 'python.exe'
+ if type_ == 'gui':
+ pattern, repl = repl, pattern
+ pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
+ new_header = pattern_ob.sub(string=orig_header, repl=repl)
+ return new_header if cls._use_header(new_header) else orig_header
+
+ @staticmethod
+ def _use_header(new_header):
+ """
+ Should _adjust_header use the replaced header?
+
+ On non-windows systems, always use. On
+ Windows systems, only use the replaced header if it resolves
+ to an executable on the system.
+ """
+ clean_header = new_header[2:-1].strip('"')
+ return sys.platform != 'win32' or find_executable(clean_header)
+
+
+class WindowsExecutableLauncherWriter(WindowsScriptWriter):
+ @classmethod
+ def _get_script_args(cls, type_, name, header, script_text):
+ """
+ For Windows, add a .py extension and an .exe launcher
+ """
+ if type_ == 'gui':
+ launcher_type = 'gui'
+ ext = '-script.pyw'
+ old = ['.pyw']
+ else:
+ launcher_type = 'cli'
+ ext = '-script.py'
+ old = ['.py', '.pyc', '.pyo']
+ hdr = cls._adjust_header(type_, header)
+ blockers = [name + x for x in old]
+ yield (name + ext, hdr + script_text, 't', blockers)
+ yield (
+ name + '.exe', get_win_launcher(launcher_type),
+ 'b' # write in binary mode
+ )
+ if not is_64bit():
+ # install a manifest for the launcher to prevent Windows
+ # from detecting it as an installer (which it will for
+ # launchers like easy_install.exe). Consider only
+ # adding a manifest for launchers detected as installers.
+ # See Distribute #143 for details.
+ m_name = name + '.exe.manifest'
+ yield (m_name, load_launcher_manifest(name), 't')
+
+
+# for backward-compatibility
+get_script_args = ScriptWriter.get_script_args
+get_script_header = ScriptWriter.get_script_header
+
+
+def get_win_launcher(type):
+ """
+ Load the Windows launcher (executable) suitable for launching a script.
+
+ `type` should be either 'cli' or 'gui'
+
+ Returns the executable as a byte string.
+ """
+ launcher_fn = '%s.exe' % type
+ if is_64bit():
+ launcher_fn = launcher_fn.replace(".", "-64.")
+ else:
+ launcher_fn = launcher_fn.replace(".", "-32.")
+ return resource_string('setuptools', launcher_fn)
+
+
+def load_launcher_manifest(name):
+ manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
+ return manifest.decode('utf-8') % vars()
+
+
+def rmtree(path, ignore_errors=False, onerror=auto_chmod):
+ return shutil.rmtree(path, ignore_errors, onerror)
+
+
+def current_umask():
+ tmp = os.umask(0o022)
+ os.umask(tmp)
+ return tmp
+
+
+def bootstrap():
+ # This function is called when setuptools*.egg is run using /bin/sh
+ import setuptools
+
+ argv0 = os.path.dirname(setuptools.__path__[0])
+ sys.argv[0] = argv0
+ sys.argv.append(argv0)
+ main()
+
+
+def main(argv=None, **kw):
+ from setuptools import setup
+ from setuptools.dist import Distribution
+
+ class DistributionWithoutHelpCommands(Distribution):
+ common_usage = ""
+
+ def _show_help(self, *args, **kw):
+ with _patch_usage():
+ Distribution._show_help(self, *args, **kw)
+
+ if argv is None:
+ argv = sys.argv[1:]
+
+ with _patch_usage():
+ setup(
+ script_args=['-q', 'easy_install', '-v'] + argv,
+ script_name=sys.argv[0] or 'easy_install',
+ distclass=DistributionWithoutHelpCommands,
+ **kw
+ )
+
+
+@contextlib.contextmanager
+def _patch_usage():
+ import distutils.core
+ USAGE = textwrap.dedent("""
+ usage: %(script)s [options] requirement_or_url ...
+ or: %(script)s --help
+ """).lstrip()
+
+ def gen_usage(script_name):
+ return USAGE % dict(
+ script=os.path.basename(script_name),
+ )
+
+ saved = distutils.core.gen_usage
+ distutils.core.gen_usage = gen_usage
+ try:
+ yield
+ finally:
+ distutils.core.gen_usage = saved
+
+
+class EasyInstallDeprecationWarning(SetuptoolsDeprecationWarning):
+ """
+ Warning for EasyInstall deprecations, bypassing suppression.
+ """
diff --git a/third_party/python/setuptools/setuptools/command/egg_info.py b/third_party/python/setuptools/setuptools/command/egg_info.py
new file mode 100644
index 0000000000..0b7ad677f2
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/egg_info.py
@@ -0,0 +1,722 @@
+"""setuptools.command.egg_info
+
+Create a distribution's .egg-info directory and contents"""
+
+from distutils.filelist import FileList as _FileList
+from distutils.errors import DistutilsInternalError
+from distutils.util import convert_path
+from distutils import log
+import distutils.errors
+import distutils.filelist
+import os
+import re
+import sys
+import io
+import warnings
+import time
+import collections
+
+from setuptools import Command
+from setuptools.command.sdist import sdist
+from setuptools.command.sdist import walk_revctrl
+from setuptools.command.setopt import edit_config
+from setuptools.command import bdist_egg
+from pkg_resources import (
+ parse_requirements, safe_name, parse_version,
+ safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename)
+import setuptools.unicode_utils as unicode_utils
+from setuptools.glob import glob
+
+from setuptools.extern import packaging
+from setuptools import SetuptoolsDeprecationWarning
+
+
+def translate_pattern(glob):
+ """
+ Translate a file path glob like '*.txt' in to a regular expression.
+ This differs from fnmatch.translate which allows wildcards to match
+ directory separators. It also knows about '**/' which matches any number of
+ directories.
+ """
+ pat = ''
+
+ # This will split on '/' within [character classes]. This is deliberate.
+ chunks = glob.split(os.path.sep)
+
+ sep = re.escape(os.sep)
+ valid_char = '[^%s]' % (sep,)
+
+ for c, chunk in enumerate(chunks):
+ last_chunk = c == len(chunks) - 1
+
+ # Chunks that are a literal ** are globstars. They match anything.
+ if chunk == '**':
+ if last_chunk:
+ # Match anything if this is the last component
+ pat += '.*'
+ else:
+ # Match '(name/)*'
+ pat += '(?:%s+%s)*' % (valid_char, sep)
+ continue # Break here as the whole path component has been handled
+
+ # Find any special characters in the remainder
+ i = 0
+ chunk_len = len(chunk)
+ while i < chunk_len:
+ char = chunk[i]
+ if char == '*':
+ # Match any number of name characters
+ pat += valid_char + '*'
+ elif char == '?':
+ # Match a name character
+ pat += valid_char
+ elif char == '[':
+ # Character class
+ inner_i = i + 1
+ # Skip initial !/] chars
+ if inner_i < chunk_len and chunk[inner_i] == '!':
+ inner_i = inner_i + 1
+ if inner_i < chunk_len and chunk[inner_i] == ']':
+ inner_i = inner_i + 1
+
+ # Loop till the closing ] is found
+ while inner_i < chunk_len and chunk[inner_i] != ']':
+ inner_i = inner_i + 1
+
+ if inner_i >= chunk_len:
+ # Got to the end of the string without finding a closing ]
+ # Do not treat this as a matching group, but as a literal [
+ pat += re.escape(char)
+ else:
+ # Grab the insides of the [brackets]
+ inner = chunk[i + 1:inner_i]
+ char_class = ''
+
+ # Class negation
+ if inner[0] == '!':
+ char_class = '^'
+ inner = inner[1:]
+
+ char_class += re.escape(inner)
+ pat += '[%s]' % (char_class,)
+
+ # Skip to the end ]
+ i = inner_i
+ else:
+ pat += re.escape(char)
+ i += 1
+
+ # Join each chunk with the dir separator
+ if not last_chunk:
+ pat += sep
+
+ pat += r'\Z'
+ return re.compile(pat, flags=re.MULTILINE | re.DOTALL)
+
+
+class InfoCommon:
+ tag_build = None
+ tag_date = None
+
+ @property
+ def name(self):
+ return safe_name(self.distribution.get_name())
+
+ def tagged_version(self):
+ return safe_version(self._maybe_tag(self.distribution.get_version()))
+
+ def _maybe_tag(self, version):
+ """
+ egg_info may be called more than once for a distribution,
+ in which case the version string already contains all tags.
+ """
+ return (
+ version if self.vtags and version.endswith(self.vtags)
+ else version + self.vtags
+ )
+
+ def tags(self):
+ version = ''
+ if self.tag_build:
+ version += self.tag_build
+ if self.tag_date:
+ version += time.strftime("-%Y%m%d")
+ return version
+ vtags = property(tags)
+
+
+class egg_info(InfoCommon, Command):
+ description = "create a distribution's .egg-info directory"
+
+ user_options = [
+ ('egg-base=', 'e', "directory containing .egg-info directories"
+ " (default: top of the source tree)"),
+ ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"),
+ ('tag-build=', 'b', "Specify explicit tag to add to version number"),
+ ('no-date', 'D', "Don't include date stamp [default]"),
+ ]
+
+ boolean_options = ['tag-date']
+ negative_opt = {
+ 'no-date': 'tag-date',
+ }
+
+ def initialize_options(self):
+ self.egg_base = None
+ self.egg_name = None
+ self.egg_info = None
+ self.egg_version = None
+ self.broken_egg_info = False
+
+ ####################################
+ # allow the 'tag_svn_revision' to be detected and
+ # set, supporting sdists built on older Setuptools.
+ @property
+ def tag_svn_revision(self):
+ pass
+
+ @tag_svn_revision.setter
+ def tag_svn_revision(self, value):
+ pass
+ ####################################
+
+ def save_version_info(self, filename):
+ """
+ Materialize the value of date into the
+ build tag. Install build keys in a deterministic order
+ to avoid arbitrary reordering on subsequent builds.
+ """
+ egg_info = collections.OrderedDict()
+ # follow the order these keys would have been added
+ # when PYTHONHASHSEED=0
+ egg_info['tag_build'] = self.tags()
+ egg_info['tag_date'] = 0
+ edit_config(filename, dict(egg_info=egg_info))
+
+ def finalize_options(self):
+ # Note: we need to capture the current value returned
+ # by `self.tagged_version()`, so we can later update
+ # `self.distribution.metadata.version` without
+ # repercussions.
+ self.egg_name = self.name
+ self.egg_version = self.tagged_version()
+ parsed_version = parse_version(self.egg_version)
+
+ try:
+ is_version = isinstance(parsed_version, packaging.version.Version)
+ spec = (
+ "%s==%s" if is_version else "%s===%s"
+ )
+ list(
+ parse_requirements(spec % (self.egg_name, self.egg_version))
+ )
+ except ValueError as e:
+ raise distutils.errors.DistutilsOptionError(
+ "Invalid distribution name or version syntax: %s-%s" %
+ (self.egg_name, self.egg_version)
+ ) from e
+
+ if self.egg_base is None:
+ dirs = self.distribution.package_dir
+ self.egg_base = (dirs or {}).get('', os.curdir)
+
+ self.ensure_dirname('egg_base')
+ self.egg_info = to_filename(self.egg_name) + '.egg-info'
+ if self.egg_base != os.curdir:
+ self.egg_info = os.path.join(self.egg_base, self.egg_info)
+ if '-' in self.egg_name:
+ self.check_broken_egg_info()
+
+ # Set package version for the benefit of dumber commands
+ # (e.g. sdist, bdist_wininst, etc.)
+ #
+ self.distribution.metadata.version = self.egg_version
+
+ # If we bootstrapped around the lack of a PKG-INFO, as might be the
+ # case in a fresh checkout, make sure that any special tags get added
+ # to the version info
+ #
+ pd = self.distribution._patched_dist
+ if pd is not None and pd.key == self.egg_name.lower():
+ pd._version = self.egg_version
+ pd._parsed_version = parse_version(self.egg_version)
+ self.distribution._patched_dist = None
+
+ def write_or_delete_file(self, what, filename, data, force=False):
+ """Write `data` to `filename` or delete if empty
+
+ If `data` is non-empty, this routine is the same as ``write_file()``.
+ If `data` is empty but not ``None``, this is the same as calling
+ ``delete_file(filename)`. If `data` is ``None``, then this is a no-op
+ unless `filename` exists, in which case a warning is issued about the
+ orphaned file (if `force` is false), or deleted (if `force` is true).
+ """
+ if data:
+ self.write_file(what, filename, data)
+ elif os.path.exists(filename):
+ if data is None and not force:
+ log.warn(
+ "%s not set in setup(), but %s exists", what, filename
+ )
+ return
+ else:
+ self.delete_file(filename)
+
+ def write_file(self, what, filename, data):
+ """Write `data` to `filename` (if not a dry run) after announcing it
+
+ `what` is used in a log message to identify what is being written
+ to the file.
+ """
+ log.info("writing %s to %s", what, filename)
+ data = data.encode("utf-8")
+ if not self.dry_run:
+ f = open(filename, 'wb')
+ f.write(data)
+ f.close()
+
+ def delete_file(self, filename):
+ """Delete `filename` (if not a dry run) after announcing it"""
+ log.info("deleting %s", filename)
+ if not self.dry_run:
+ os.unlink(filename)
+
+ def run(self):
+ self.mkpath(self.egg_info)
+ os.utime(self.egg_info, None)
+ installer = self.distribution.fetch_build_egg
+ for ep in iter_entry_points('egg_info.writers'):
+ ep.require(installer=installer)
+ writer = ep.resolve()
+ writer(self, ep.name, os.path.join(self.egg_info, ep.name))
+
+ # Get rid of native_libs.txt if it was put there by older bdist_egg
+ nl = os.path.join(self.egg_info, "native_libs.txt")
+ if os.path.exists(nl):
+ self.delete_file(nl)
+
+ self.find_sources()
+
+ def find_sources(self):
+ """Generate SOURCES.txt manifest file"""
+ manifest_filename = os.path.join(self.egg_info, "SOURCES.txt")
+ mm = manifest_maker(self.distribution)
+ mm.manifest = manifest_filename
+ mm.run()
+ self.filelist = mm.filelist
+
+ def check_broken_egg_info(self):
+ bei = self.egg_name + '.egg-info'
+ if self.egg_base != os.curdir:
+ bei = os.path.join(self.egg_base, bei)
+ if os.path.exists(bei):
+ log.warn(
+ "-" * 78 + '\n'
+ "Note: Your current .egg-info directory has a '-' in its name;"
+ '\nthis will not work correctly with "setup.py develop".\n\n'
+ 'Please rename %s to %s to correct this problem.\n' + '-' * 78,
+ bei, self.egg_info
+ )
+ self.broken_egg_info = self.egg_info
+ self.egg_info = bei # make it work for now
+
+
+class FileList(_FileList):
+ # Implementations of the various MANIFEST.in commands
+
+ def process_template_line(self, line):
+ # Parse the line: split it up, make sure the right number of words
+ # is there, and return the relevant words. 'action' is always
+ # defined: it's the first word of the line. Which of the other
+ # three are defined depends on the action; it'll be either
+ # patterns, (dir and patterns), or (dir_pattern).
+ (action, patterns, dir, dir_pattern) = self._parse_template_line(line)
+
+ # OK, now we know that the action is valid and we have the
+ # right number of words on the line for that action -- so we
+ # can proceed with minimal error-checking.
+ if action == 'include':
+ self.debug_print("include " + ' '.join(patterns))
+ for pattern in patterns:
+ if not self.include(pattern):
+ log.warn("warning: no files found matching '%s'", pattern)
+
+ elif action == 'exclude':
+ self.debug_print("exclude " + ' '.join(patterns))
+ for pattern in patterns:
+ if not self.exclude(pattern):
+ log.warn(("warning: no previously-included files "
+ "found matching '%s'"), pattern)
+
+ elif action == 'global-include':
+ self.debug_print("global-include " + ' '.join(patterns))
+ for pattern in patterns:
+ if not self.global_include(pattern):
+ log.warn(("warning: no files found matching '%s' "
+ "anywhere in distribution"), pattern)
+
+ elif action == 'global-exclude':
+ self.debug_print("global-exclude " + ' '.join(patterns))
+ for pattern in patterns:
+ if not self.global_exclude(pattern):
+ log.warn(("warning: no previously-included files matching "
+ "'%s' found anywhere in distribution"),
+ pattern)
+
+ elif action == 'recursive-include':
+ self.debug_print("recursive-include %s %s" %
+ (dir, ' '.join(patterns)))
+ for pattern in patterns:
+ if not self.recursive_include(dir, pattern):
+ log.warn(("warning: no files found matching '%s' "
+ "under directory '%s'"),
+ pattern, dir)
+
+ elif action == 'recursive-exclude':
+ self.debug_print("recursive-exclude %s %s" %
+ (dir, ' '.join(patterns)))
+ for pattern in patterns:
+ if not self.recursive_exclude(dir, pattern):
+ log.warn(("warning: no previously-included files matching "
+ "'%s' found under directory '%s'"),
+ pattern, dir)
+
+ elif action == 'graft':
+ self.debug_print("graft " + dir_pattern)
+ if not self.graft(dir_pattern):
+ log.warn("warning: no directories found matching '%s'",
+ dir_pattern)
+
+ elif action == 'prune':
+ self.debug_print("prune " + dir_pattern)
+ if not self.prune(dir_pattern):
+ log.warn(("no previously-included directories found "
+ "matching '%s'"), dir_pattern)
+
+ else:
+ raise DistutilsInternalError(
+ "this cannot happen: invalid action '%s'" % action)
+
+ def _remove_files(self, predicate):
+ """
+ Remove all files from the file list that match the predicate.
+ Return True if any matching files were removed
+ """
+ found = False
+ for i in range(len(self.files) - 1, -1, -1):
+ if predicate(self.files[i]):
+ self.debug_print(" removing " + self.files[i])
+ del self.files[i]
+ found = True
+ return found
+
+ def include(self, pattern):
+ """Include files that match 'pattern'."""
+ found = [f for f in glob(pattern) if not os.path.isdir(f)]
+ self.extend(found)
+ return bool(found)
+
+ def exclude(self, pattern):
+ """Exclude files that match 'pattern'."""
+ match = translate_pattern(pattern)
+ return self._remove_files(match.match)
+
+ def recursive_include(self, dir, pattern):
+ """
+ Include all files anywhere in 'dir/' that match the pattern.
+ """
+ full_pattern = os.path.join(dir, '**', pattern)
+ found = [f for f in glob(full_pattern, recursive=True)
+ if not os.path.isdir(f)]
+ self.extend(found)
+ return bool(found)
+
+ def recursive_exclude(self, dir, pattern):
+ """
+ Exclude any file anywhere in 'dir/' that match the pattern.
+ """
+ match = translate_pattern(os.path.join(dir, '**', pattern))
+ return self._remove_files(match.match)
+
+ def graft(self, dir):
+ """Include all files from 'dir/'."""
+ found = [
+ item
+ for match_dir in glob(dir)
+ for item in distutils.filelist.findall(match_dir)
+ ]
+ self.extend(found)
+ return bool(found)
+
+ def prune(self, dir):
+ """Filter out files from 'dir/'."""
+ match = translate_pattern(os.path.join(dir, '**'))
+ return self._remove_files(match.match)
+
+ def global_include(self, pattern):
+ """
+ Include all files anywhere in the current directory that match the
+ pattern. This is very inefficient on large file trees.
+ """
+ if self.allfiles is None:
+ self.findall()
+ match = translate_pattern(os.path.join('**', pattern))
+ found = [f for f in self.allfiles if match.match(f)]
+ self.extend(found)
+ return bool(found)
+
+ def global_exclude(self, pattern):
+ """
+ Exclude all files anywhere that match the pattern.
+ """
+ match = translate_pattern(os.path.join('**', pattern))
+ return self._remove_files(match.match)
+
+ def append(self, item):
+ if item.endswith('\r'): # Fix older sdists built on Windows
+ item = item[:-1]
+ path = convert_path(item)
+
+ if self._safe_path(path):
+ self.files.append(path)
+
+ def extend(self, paths):
+ self.files.extend(filter(self._safe_path, paths))
+
+ def _repair(self):
+ """
+ Replace self.files with only safe paths
+
+ Because some owners of FileList manipulate the underlying
+ ``files`` attribute directly, this method must be called to
+ repair those paths.
+ """
+ self.files = list(filter(self._safe_path, self.files))
+
+ def _safe_path(self, path):
+ enc_warn = "'%s' not %s encodable -- skipping"
+
+ # To avoid accidental trans-codings errors, first to unicode
+ u_path = unicode_utils.filesys_decode(path)
+ if u_path is None:
+ log.warn("'%s' in unexpected encoding -- skipping" % path)
+ return False
+
+ # Must ensure utf-8 encodability
+ utf8_path = unicode_utils.try_encode(u_path, "utf-8")
+ if utf8_path is None:
+ log.warn(enc_warn, path, 'utf-8')
+ return False
+
+ try:
+ # accept is either way checks out
+ if os.path.exists(u_path) or os.path.exists(utf8_path):
+ return True
+ # this will catch any encode errors decoding u_path
+ except UnicodeEncodeError:
+ log.warn(enc_warn, path, sys.getfilesystemencoding())
+
+
+class manifest_maker(sdist):
+ template = "MANIFEST.in"
+
+ def initialize_options(self):
+ self.use_defaults = 1
+ self.prune = 1
+ self.manifest_only = 1
+ self.force_manifest = 1
+
+ def finalize_options(self):
+ pass
+
+ def run(self):
+ self.filelist = FileList()
+ if not os.path.exists(self.manifest):
+ self.write_manifest() # it must exist so it'll get in the list
+ self.add_defaults()
+ if os.path.exists(self.template):
+ self.read_template()
+ self.prune_file_list()
+ self.filelist.sort()
+ self.filelist.remove_duplicates()
+ self.write_manifest()
+
+ def _manifest_normalize(self, path):
+ path = unicode_utils.filesys_decode(path)
+ return path.replace(os.sep, '/')
+
+ def write_manifest(self):
+ """
+ Write the file list in 'self.filelist' to the manifest file
+ named by 'self.manifest'.
+ """
+ self.filelist._repair()
+
+ # Now _repairs should encodability, but not unicode
+ files = [self._manifest_normalize(f) for f in self.filelist.files]
+ msg = "writing manifest file '%s'" % self.manifest
+ self.execute(write_file, (self.manifest, files), msg)
+
+ def warn(self, msg):
+ if not self._should_suppress_warning(msg):
+ sdist.warn(self, msg)
+
+ @staticmethod
+ def _should_suppress_warning(msg):
+ """
+ suppress missing-file warnings from sdist
+ """
+ return re.match(r"standard file .*not found", msg)
+
+ def add_defaults(self):
+ sdist.add_defaults(self)
+ self.check_license()
+ self.filelist.append(self.template)
+ self.filelist.append(self.manifest)
+ rcfiles = list(walk_revctrl())
+ if rcfiles:
+ self.filelist.extend(rcfiles)
+ elif os.path.exists(self.manifest):
+ self.read_manifest()
+
+ if os.path.exists("setup.py"):
+ # setup.py should be included by default, even if it's not
+ # the script called to create the sdist
+ self.filelist.append("setup.py")
+
+ ei_cmd = self.get_finalized_command('egg_info')
+ self.filelist.graft(ei_cmd.egg_info)
+
+ def prune_file_list(self):
+ build = self.get_finalized_command('build')
+ base_dir = self.distribution.get_fullname()
+ self.filelist.prune(build.build_base)
+ self.filelist.prune(base_dir)
+ sep = re.escape(os.sep)
+ self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep,
+ is_regex=1)
+
+
+def write_file(filename, contents):
+ """Create a file with the specified name and write 'contents' (a
+ sequence of strings without line terminators) to it.
+ """
+ contents = "\n".join(contents)
+
+ # assuming the contents has been vetted for utf-8 encoding
+ contents = contents.encode("utf-8")
+
+ with open(filename, "wb") as f: # always write POSIX-style manifest
+ f.write(contents)
+
+
+def write_pkg_info(cmd, basename, filename):
+ log.info("writing %s", filename)
+ if not cmd.dry_run:
+ metadata = cmd.distribution.metadata
+ metadata.version, oldver = cmd.egg_version, metadata.version
+ metadata.name, oldname = cmd.egg_name, metadata.name
+
+ try:
+ # write unescaped data to PKG-INFO, so older pkg_resources
+ # can still parse it
+ metadata.write_pkg_info(cmd.egg_info)
+ finally:
+ metadata.name, metadata.version = oldname, oldver
+
+ safe = getattr(cmd.distribution, 'zip_safe', None)
+
+ bdist_egg.write_safety_flag(cmd.egg_info, safe)
+
+
+def warn_depends_obsolete(cmd, basename, filename):
+ if os.path.exists(filename):
+ log.warn(
+ "WARNING: 'depends.txt' is not used by setuptools 0.6!\n"
+ "Use the install_requires/extras_require setup() args instead."
+ )
+
+
+def _write_requirements(stream, reqs):
+ lines = yield_lines(reqs or ())
+
+ def append_cr(line):
+ return line + '\n'
+ lines = map(append_cr, lines)
+ stream.writelines(lines)
+
+
+def write_requirements(cmd, basename, filename):
+ dist = cmd.distribution
+ data = io.StringIO()
+ _write_requirements(data, dist.install_requires)
+ extras_require = dist.extras_require or {}
+ for extra in sorted(extras_require):
+ data.write('\n[{extra}]\n'.format(**vars()))
+ _write_requirements(data, extras_require[extra])
+ cmd.write_or_delete_file("requirements", filename, data.getvalue())
+
+
+def write_setup_requirements(cmd, basename, filename):
+ data = io.StringIO()
+ _write_requirements(data, cmd.distribution.setup_requires)
+ cmd.write_or_delete_file("setup-requirements", filename, data.getvalue())
+
+
+def write_toplevel_names(cmd, basename, filename):
+ pkgs = dict.fromkeys(
+ [
+ k.split('.', 1)[0]
+ for k in cmd.distribution.iter_distribution_names()
+ ]
+ )
+ cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n')
+
+
+def overwrite_arg(cmd, basename, filename):
+ write_arg(cmd, basename, filename, True)
+
+
+def write_arg(cmd, basename, filename, force=False):
+ argname = os.path.splitext(basename)[0]
+ value = getattr(cmd.distribution, argname, None)
+ if value is not None:
+ value = '\n'.join(value) + '\n'
+ cmd.write_or_delete_file(argname, filename, value, force)
+
+
+def write_entries(cmd, basename, filename):
+ ep = cmd.distribution.entry_points
+
+ if isinstance(ep, str) or ep is None:
+ data = ep
+ elif ep is not None:
+ data = []
+ for section, contents in sorted(ep.items()):
+ if not isinstance(contents, str):
+ contents = EntryPoint.parse_group(section, contents)
+ contents = '\n'.join(sorted(map(str, contents.values())))
+ data.append('[%s]\n%s\n\n' % (section, contents))
+ data = ''.join(data)
+
+ cmd.write_or_delete_file('entry points', filename, data, True)
+
+
+def get_pkg_info_revision():
+ """
+ Get a -r### off of PKG-INFO Version in case this is an sdist of
+ a subversion revision.
+ """
+ warnings.warn(
+ "get_pkg_info_revision is deprecated.", EggInfoDeprecationWarning)
+ if os.path.exists('PKG-INFO'):
+ with io.open('PKG-INFO') as f:
+ for line in f:
+ match = re.match(r"Version:.*-r(\d+)\s*$", line)
+ if match:
+ return int(match.group(1))
+ return 0
+
+
+class EggInfoDeprecationWarning(SetuptoolsDeprecationWarning):
+ """Deprecated behavior warning for EggInfo, bypassing suppression."""
diff --git a/third_party/python/setuptools/setuptools/command/install.py b/third_party/python/setuptools/setuptools/command/install.py
new file mode 100644
index 0000000000..72b9a3e424
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/install.py
@@ -0,0 +1,125 @@
+from distutils.errors import DistutilsArgError
+import inspect
+import glob
+import warnings
+import platform
+import distutils.command.install as orig
+
+import setuptools
+
+# Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for
+# now. See https://github.com/pypa/setuptools/issues/199/
+_install = orig.install
+
+
+class install(orig.install):
+ """Use easy_install to install the package, w/dependencies"""
+
+ user_options = orig.install.user_options + [
+ ('old-and-unmanageable', None, "Try not to use this!"),
+ ('single-version-externally-managed', None,
+ "used by system package builders to create 'flat' eggs"),
+ ]
+ boolean_options = orig.install.boolean_options + [
+ 'old-and-unmanageable', 'single-version-externally-managed',
+ ]
+ new_commands = [
+ ('install_egg_info', lambda self: True),
+ ('install_scripts', lambda self: True),
+ ]
+ _nc = dict(new_commands)
+
+ def initialize_options(self):
+ orig.install.initialize_options(self)
+ self.old_and_unmanageable = None
+ self.single_version_externally_managed = None
+
+ def finalize_options(self):
+ orig.install.finalize_options(self)
+ if self.root:
+ self.single_version_externally_managed = True
+ elif self.single_version_externally_managed:
+ if not self.root and not self.record:
+ raise DistutilsArgError(
+ "You must specify --record or --root when building system"
+ " packages"
+ )
+
+ def handle_extra_path(self):
+ if self.root or self.single_version_externally_managed:
+ # explicit backward-compatibility mode, allow extra_path to work
+ return orig.install.handle_extra_path(self)
+
+ # Ignore extra_path when installing an egg (or being run by another
+ # command without --root or --single-version-externally-managed
+ self.path_file = None
+ self.extra_dirs = ''
+
+ def run(self):
+ # Explicit request for old-style install? Just do it
+ if self.old_and_unmanageable or self.single_version_externally_managed:
+ return orig.install.run(self)
+
+ if not self._called_from_setup(inspect.currentframe()):
+ # Run in backward-compatibility mode to support bdist_* commands.
+ orig.install.run(self)
+ else:
+ self.do_egg_install()
+
+ @staticmethod
+ def _called_from_setup(run_frame):
+ """
+ Attempt to detect whether run() was called from setup() or by another
+ command. If called by setup(), the parent caller will be the
+ 'run_command' method in 'distutils.dist', and *its* caller will be
+ the 'run_commands' method. If called any other way, the
+ immediate caller *might* be 'run_command', but it won't have been
+ called by 'run_commands'. Return True in that case or if a call stack
+ is unavailable. Return False otherwise.
+ """
+ if run_frame is None:
+ msg = "Call stack not available. bdist_* commands may fail."
+ warnings.warn(msg)
+ if platform.python_implementation() == 'IronPython':
+ msg = "For best results, pass -X:Frames to enable call stack."
+ warnings.warn(msg)
+ return True
+ res = inspect.getouterframes(run_frame)[2]
+ caller, = res[:1]
+ info = inspect.getframeinfo(caller)
+ caller_module = caller.f_globals.get('__name__', '')
+ return (
+ caller_module == 'distutils.dist'
+ and info.function == 'run_commands'
+ )
+
+ def do_egg_install(self):
+
+ easy_install = self.distribution.get_command_class('easy_install')
+
+ cmd = easy_install(
+ self.distribution, args="x", root=self.root, record=self.record,
+ )
+ cmd.ensure_finalized() # finalize before bdist_egg munges install cmd
+ cmd.always_copy_from = '.' # make sure local-dir eggs get installed
+
+ # pick up setup-dir .egg files only: no .egg-info
+ cmd.package_index.scan(glob.glob('*.egg'))
+
+ self.run_command('bdist_egg')
+ args = [self.distribution.get_command_obj('bdist_egg').egg_output]
+
+ if setuptools.bootstrap_install_from:
+ # Bootstrap self-installation of setuptools
+ args.insert(0, setuptools.bootstrap_install_from)
+
+ cmd.args = args
+ cmd.run(show_deprecation=False)
+ setuptools.bootstrap_install_from = None
+
+
+# XXX Python 3.1 doesn't see _nc if this is inside the class
+install.sub_commands = (
+ [cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] +
+ install.new_commands
+)
diff --git a/third_party/python/setuptools/setuptools/command/install_egg_info.py b/third_party/python/setuptools/setuptools/command/install_egg_info.py
new file mode 100644
index 0000000000..edc4718b68
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/install_egg_info.py
@@ -0,0 +1,62 @@
+from distutils import log, dir_util
+import os
+
+from setuptools import Command
+from setuptools import namespaces
+from setuptools.archive_util import unpack_archive
+import pkg_resources
+
+
+class install_egg_info(namespaces.Installer, Command):
+ """Install an .egg-info directory for the package"""
+
+ description = "Install an .egg-info directory for the package"
+
+ user_options = [
+ ('install-dir=', 'd', "directory to install to"),
+ ]
+
+ def initialize_options(self):
+ self.install_dir = None
+
+ def finalize_options(self):
+ self.set_undefined_options('install_lib',
+ ('install_dir', 'install_dir'))
+ ei_cmd = self.get_finalized_command("egg_info")
+ basename = pkg_resources.Distribution(
+ None, None, ei_cmd.egg_name, ei_cmd.egg_version
+ ).egg_name() + '.egg-info'
+ self.source = ei_cmd.egg_info
+ self.target = os.path.join(self.install_dir, basename)
+ self.outputs = []
+
+ def run(self):
+ self.run_command('egg_info')
+ if os.path.isdir(self.target) and not os.path.islink(self.target):
+ dir_util.remove_tree(self.target, dry_run=self.dry_run)
+ elif os.path.exists(self.target):
+ self.execute(os.unlink, (self.target,), "Removing " + self.target)
+ if not self.dry_run:
+ pkg_resources.ensure_directory(self.target)
+ self.execute(
+ self.copytree, (), "Copying %s to %s" % (self.source, self.target)
+ )
+ self.install_namespaces()
+
+ def get_outputs(self):
+ return self.outputs
+
+ def copytree(self):
+ # Copy the .egg-info tree to site-packages
+ def skimmer(src, dst):
+ # filter out source-control directories; note that 'src' is always
+ # a '/'-separated path, regardless of platform. 'dst' is a
+ # platform-specific path.
+ for skip in '.svn/', 'CVS/':
+ if src.startswith(skip) or '/' + skip in src:
+ return None
+ self.outputs.append(dst)
+ log.debug("Copying %s to %s", src, dst)
+ return dst
+
+ unpack_archive(self.source, self.target, skimmer)
diff --git a/third_party/python/setuptools/setuptools/command/install_lib.py b/third_party/python/setuptools/setuptools/command/install_lib.py
new file mode 100644
index 0000000000..2e9d8757a5
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/install_lib.py
@@ -0,0 +1,122 @@
+import os
+import sys
+from itertools import product, starmap
+import distutils.command.install_lib as orig
+
+
+class install_lib(orig.install_lib):
+ """Don't add compiled flags to filenames of non-Python files"""
+
+ def run(self):
+ self.build()
+ outfiles = self.install()
+ if outfiles is not None:
+ # always compile, in case we have any extension stubs to deal with
+ self.byte_compile(outfiles)
+
+ def get_exclusions(self):
+ """
+ Return a collections.Sized collections.Container of paths to be
+ excluded for single_version_externally_managed installations.
+ """
+ all_packages = (
+ pkg
+ for ns_pkg in self._get_SVEM_NSPs()
+ for pkg in self._all_packages(ns_pkg)
+ )
+
+ excl_specs = product(all_packages, self._gen_exclusion_paths())
+ return set(starmap(self._exclude_pkg_path, excl_specs))
+
+ def _exclude_pkg_path(self, pkg, exclusion_path):
+ """
+ Given a package name and exclusion path within that package,
+ compute the full exclusion path.
+ """
+ parts = pkg.split('.') + [exclusion_path]
+ return os.path.join(self.install_dir, *parts)
+
+ @staticmethod
+ def _all_packages(pkg_name):
+ """
+ >>> list(install_lib._all_packages('foo.bar.baz'))
+ ['foo.bar.baz', 'foo.bar', 'foo']
+ """
+ while pkg_name:
+ yield pkg_name
+ pkg_name, sep, child = pkg_name.rpartition('.')
+
+ def _get_SVEM_NSPs(self):
+ """
+ Get namespace packages (list) but only for
+ single_version_externally_managed installations and empty otherwise.
+ """
+ # TODO: is it necessary to short-circuit here? i.e. what's the cost
+ # if get_finalized_command is called even when namespace_packages is
+ # False?
+ if not self.distribution.namespace_packages:
+ return []
+
+ install_cmd = self.get_finalized_command('install')
+ svem = install_cmd.single_version_externally_managed
+
+ return self.distribution.namespace_packages if svem else []
+
+ @staticmethod
+ def _gen_exclusion_paths():
+ """
+ Generate file paths to be excluded for namespace packages (bytecode
+ cache files).
+ """
+ # always exclude the package module itself
+ yield '__init__.py'
+
+ yield '__init__.pyc'
+ yield '__init__.pyo'
+
+ if not hasattr(sys, 'implementation'):
+ return
+
+ base = os.path.join(
+ '__pycache__', '__init__.' + sys.implementation.cache_tag)
+ yield base + '.pyc'
+ yield base + '.pyo'
+ yield base + '.opt-1.pyc'
+ yield base + '.opt-2.pyc'
+
+ def copy_tree(
+ self, infile, outfile,
+ preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
+ ):
+ assert preserve_mode and preserve_times and not preserve_symlinks
+ exclude = self.get_exclusions()
+
+ if not exclude:
+ return orig.install_lib.copy_tree(self, infile, outfile)
+
+ # Exclude namespace package __init__.py* files from the output
+
+ from setuptools.archive_util import unpack_directory
+ from distutils import log
+
+ outfiles = []
+
+ def pf(src, dst):
+ if dst in exclude:
+ log.warn("Skipping installation of %s (namespace package)",
+ dst)
+ return False
+
+ log.info("copying %s -> %s", src, os.path.dirname(dst))
+ outfiles.append(dst)
+ return dst
+
+ unpack_directory(infile, outfile, pf)
+ return outfiles
+
+ def get_outputs(self):
+ outputs = orig.install_lib.get_outputs(self)
+ exclude = self.get_exclusions()
+ if exclude:
+ return [f for f in outputs if f not in exclude]
+ return outputs
diff --git a/third_party/python/setuptools/setuptools/command/install_scripts.py b/third_party/python/setuptools/setuptools/command/install_scripts.py
new file mode 100644
index 0000000000..8c9a15e2bb
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/install_scripts.py
@@ -0,0 +1,68 @@
+from distutils import log
+import distutils.command.install_scripts as orig
+import os
+import sys
+
+from pkg_resources import Distribution, PathMetadata, ensure_directory
+
+
+class install_scripts(orig.install_scripts):
+ """Do normal script install, plus any egg_info wrapper scripts"""
+
+ def initialize_options(self):
+ orig.install_scripts.initialize_options(self)
+ self.no_ep = False
+
+ def run(self):
+ import setuptools.command.easy_install as ei
+
+ self.run_command("egg_info")
+ if self.distribution.scripts:
+ orig.install_scripts.run(self) # run first to set up self.outfiles
+ else:
+ self.outfiles = []
+ if self.no_ep:
+ # don't install entry point scripts into .egg file!
+ return
+
+ ei_cmd = self.get_finalized_command("egg_info")
+ dist = Distribution(
+ ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
+ ei_cmd.egg_name, ei_cmd.egg_version,
+ )
+ bs_cmd = self.get_finalized_command('build_scripts')
+ exec_param = getattr(bs_cmd, 'executable', None)
+ try:
+ bw_cmd = self.get_finalized_command("bdist_wininst")
+ is_wininst = getattr(bw_cmd, '_is_running', False)
+ except ImportError:
+ is_wininst = False
+ writer = ei.ScriptWriter
+ if is_wininst:
+ exec_param = "python.exe"
+ writer = ei.WindowsScriptWriter
+ if exec_param == sys.executable:
+ # In case the path to the Python executable contains a space, wrap
+ # it so it's not split up.
+ exec_param = [exec_param]
+ # resolve the writer to the environment
+ writer = writer.best()
+ cmd = writer.command_spec_class.best().from_param(exec_param)
+ for args in writer.get_args(dist, cmd.as_header()):
+ self.write_script(*args)
+
+ def write_script(self, script_name, contents, mode="t", *ignored):
+ """Write an executable file to the scripts directory"""
+ from setuptools.command.easy_install import chmod, current_umask
+
+ log.info("Installing %s script to %s", script_name, self.install_dir)
+ target = os.path.join(self.install_dir, script_name)
+ self.outfiles.append(target)
+
+ mask = current_umask()
+ if not self.dry_run:
+ ensure_directory(target)
+ f = open(target, "w" + mode)
+ f.write(contents)
+ f.close()
+ chmod(target, 0o777 - mask)
diff --git a/third_party/python/setuptools/setuptools/command/launcher manifest.xml b/third_party/python/setuptools/setuptools/command/launcher manifest.xml
new file mode 100644
index 0000000000..5972a96d8d
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/launcher manifest.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
+ <assemblyIdentity version="1.0.0.0"
+ processorArchitecture="X86"
+ name="%(name)s"
+ type="win32"/>
+ <!-- Identify the application security requirements. -->
+ <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
+ <security>
+ <requestedPrivileges>
+ <requestedExecutionLevel level="asInvoker" uiAccess="false"/>
+ </requestedPrivileges>
+ </security>
+ </trustInfo>
+</assembly>
diff --git a/third_party/python/setuptools/setuptools/command/py36compat.py b/third_party/python/setuptools/setuptools/command/py36compat.py
new file mode 100644
index 0000000000..343547a4d3
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/py36compat.py
@@ -0,0 +1,134 @@
+import os
+from glob import glob
+from distutils.util import convert_path
+from distutils.command import sdist
+
+
+class sdist_add_defaults:
+ """
+ Mix-in providing forward-compatibility for functionality as found in
+ distutils on Python 3.7.
+
+ Do not edit the code in this class except to update functionality
+ as implemented in distutils. Instead, override in the subclass.
+ """
+
+ def add_defaults(self):
+ """Add all the default files to self.filelist:
+ - README or README.txt
+ - setup.py
+ - test/test*.py
+ - all pure Python modules mentioned in setup script
+ - all files pointed by package_data (build_py)
+ - all files defined in data_files.
+ - all files defined as scripts.
+ - all C sources listed as part of extensions or C libraries
+ in the setup script (doesn't catch C headers!)
+ Warns if (README or README.txt) or setup.py are missing; everything
+ else is optional.
+ """
+ self._add_defaults_standards()
+ self._add_defaults_optional()
+ self._add_defaults_python()
+ self._add_defaults_data_files()
+ self._add_defaults_ext()
+ self._add_defaults_c_libs()
+ self._add_defaults_scripts()
+
+ @staticmethod
+ def _cs_path_exists(fspath):
+ """
+ Case-sensitive path existence check
+
+ >>> sdist_add_defaults._cs_path_exists(__file__)
+ True
+ >>> sdist_add_defaults._cs_path_exists(__file__.upper())
+ False
+ """
+ if not os.path.exists(fspath):
+ return False
+ # make absolute so we always have a directory
+ abspath = os.path.abspath(fspath)
+ directory, filename = os.path.split(abspath)
+ return filename in os.listdir(directory)
+
+ def _add_defaults_standards(self):
+ standards = [self.READMES, self.distribution.script_name]
+ for fn in standards:
+ if isinstance(fn, tuple):
+ alts = fn
+ got_it = False
+ for fn in alts:
+ if self._cs_path_exists(fn):
+ got_it = True
+ self.filelist.append(fn)
+ break
+
+ if not got_it:
+ self.warn("standard file not found: should have one of " +
+ ', '.join(alts))
+ else:
+ if self._cs_path_exists(fn):
+ self.filelist.append(fn)
+ else:
+ self.warn("standard file '%s' not found" % fn)
+
+ def _add_defaults_optional(self):
+ optional = ['test/test*.py', 'setup.cfg']
+ for pattern in optional:
+ files = filter(os.path.isfile, glob(pattern))
+ self.filelist.extend(files)
+
+ def _add_defaults_python(self):
+ # build_py is used to get:
+ # - python modules
+ # - files defined in package_data
+ build_py = self.get_finalized_command('build_py')
+
+ # getting python files
+ if self.distribution.has_pure_modules():
+ self.filelist.extend(build_py.get_source_files())
+
+ # getting package_data files
+ # (computed in build_py.data_files by build_py.finalize_options)
+ for pkg, src_dir, build_dir, filenames in build_py.data_files:
+ for filename in filenames:
+ self.filelist.append(os.path.join(src_dir, filename))
+
+ def _add_defaults_data_files(self):
+ # getting distribution.data_files
+ if self.distribution.has_data_files():
+ for item in self.distribution.data_files:
+ if isinstance(item, str):
+ # plain file
+ item = convert_path(item)
+ if os.path.isfile(item):
+ self.filelist.append(item)
+ else:
+ # a (dirname, filenames) tuple
+ dirname, filenames = item
+ for f in filenames:
+ f = convert_path(f)
+ if os.path.isfile(f):
+ self.filelist.append(f)
+
+ def _add_defaults_ext(self):
+ if self.distribution.has_ext_modules():
+ build_ext = self.get_finalized_command('build_ext')
+ self.filelist.extend(build_ext.get_source_files())
+
+ def _add_defaults_c_libs(self):
+ if self.distribution.has_c_libraries():
+ build_clib = self.get_finalized_command('build_clib')
+ self.filelist.extend(build_clib.get_source_files())
+
+ def _add_defaults_scripts(self):
+ if self.distribution.has_scripts():
+ build_scripts = self.get_finalized_command('build_scripts')
+ self.filelist.extend(build_scripts.get_source_files())
+
+
+if hasattr(sdist.sdist, '_add_defaults_standards'):
+ # disable the functionality already available upstream
+ class sdist_add_defaults: # noqa
+ pass
diff --git a/third_party/python/setuptools/setuptools/command/register.py b/third_party/python/setuptools/setuptools/command/register.py
new file mode 100644
index 0000000000..b8266b9a60
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/register.py
@@ -0,0 +1,18 @@
+from distutils import log
+import distutils.command.register as orig
+
+from setuptools.errors import RemovedCommandError
+
+
+class register(orig.register):
+ """Formerly used to register packages on PyPI."""
+
+ def run(self):
+ msg = (
+ "The register command has been removed, use twine to upload "
+ + "instead (https://pypi.org/p/twine)"
+ )
+
+ self.announce("ERROR: " + msg, log.ERROR)
+
+ raise RemovedCommandError(msg)
diff --git a/third_party/python/setuptools/setuptools/command/rotate.py b/third_party/python/setuptools/setuptools/command/rotate.py
new file mode 100644
index 0000000000..74795ba922
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/rotate.py
@@ -0,0 +1,64 @@
+from distutils.util import convert_path
+from distutils import log
+from distutils.errors import DistutilsOptionError
+import os
+import shutil
+
+from setuptools import Command
+
+
+class rotate(Command):
+ """Delete older distributions"""
+
+ description = "delete older distributions, keeping N newest files"
+ user_options = [
+ ('match=', 'm', "patterns to match (required)"),
+ ('dist-dir=', 'd', "directory where the distributions are"),
+ ('keep=', 'k', "number of matching distributions to keep"),
+ ]
+
+ boolean_options = []
+
+ def initialize_options(self):
+ self.match = None
+ self.dist_dir = None
+ self.keep = None
+
+ def finalize_options(self):
+ if self.match is None:
+ raise DistutilsOptionError(
+ "Must specify one or more (comma-separated) match patterns "
+ "(e.g. '.zip' or '.egg')"
+ )
+ if self.keep is None:
+ raise DistutilsOptionError("Must specify number of files to keep")
+ try:
+ self.keep = int(self.keep)
+ except ValueError as e:
+ raise DistutilsOptionError("--keep must be an integer") from e
+ if isinstance(self.match, str):
+ self.match = [
+ convert_path(p.strip()) for p in self.match.split(',')
+ ]
+ self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
+
+ def run(self):
+ self.run_command("egg_info")
+ from glob import glob
+
+ for pattern in self.match:
+ pattern = self.distribution.get_name() + '*' + pattern
+ files = glob(os.path.join(self.dist_dir, pattern))
+ files = [(os.path.getmtime(f), f) for f in files]
+ files.sort()
+ files.reverse()
+
+ log.info("%d file(s) matching %s", len(files), pattern)
+ files = files[self.keep:]
+ for (t, f) in files:
+ log.info("Deleting %s", f)
+ if not self.dry_run:
+ if os.path.isdir(f):
+ shutil.rmtree(f)
+ else:
+ os.unlink(f)
diff --git a/third_party/python/setuptools/setuptools/command/saveopts.py b/third_party/python/setuptools/setuptools/command/saveopts.py
new file mode 100644
index 0000000000..611cec5528
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/saveopts.py
@@ -0,0 +1,22 @@
+from setuptools.command.setopt import edit_config, option_base
+
+
+class saveopts(option_base):
+ """Save command-line options to a file"""
+
+ description = "save supplied options to setup.cfg or other config file"
+
+ def run(self):
+ dist = self.distribution
+ settings = {}
+
+ for cmd in dist.command_options:
+
+ if cmd == 'saveopts':
+ continue # don't save our own options!
+
+ for opt, (src, val) in dist.get_option_dict(cmd).items():
+ if src == "command line":
+ settings.setdefault(cmd, {})[opt] = val
+
+ edit_config(self.filename, settings, self.dry_run)
diff --git a/third_party/python/setuptools/setuptools/command/sdist.py b/third_party/python/setuptools/setuptools/command/sdist.py
new file mode 100644
index 0000000000..887b7efa05
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/sdist.py
@@ -0,0 +1,222 @@
+from distutils import log
+import distutils.command.sdist as orig
+import os
+import sys
+import io
+import contextlib
+
+from setuptools.extern import ordered_set
+
+from .py36compat import sdist_add_defaults
+
+import pkg_resources
+
+_default_revctrl = list
+
+
+def walk_revctrl(dirname=''):
+ """Find all files under revision control"""
+ for ep in pkg_resources.iter_entry_points('setuptools.file_finders'):
+ for item in ep.load()(dirname):
+ yield item
+
+
+class sdist(sdist_add_defaults, orig.sdist):
+ """Smart sdist that finds anything supported by revision control"""
+
+ user_options = [
+ ('formats=', None,
+ "formats for source distribution (comma-separated list)"),
+ ('keep-temp', 'k',
+ "keep the distribution tree around after creating " +
+ "archive file(s)"),
+ ('dist-dir=', 'd',
+ "directory to put the source distribution archive(s) in "
+ "[default: dist]"),
+ ]
+
+ negative_opt = {}
+
+ README_EXTENSIONS = ['', '.rst', '.txt', '.md']
+ READMES = tuple('README{0}'.format(ext) for ext in README_EXTENSIONS)
+
+ def run(self):
+ self.run_command('egg_info')
+ ei_cmd = self.get_finalized_command('egg_info')
+ self.filelist = ei_cmd.filelist
+ self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt'))
+ self.check_readme()
+
+ # Run sub commands
+ for cmd_name in self.get_sub_commands():
+ self.run_command(cmd_name)
+
+ self.make_distribution()
+
+ dist_files = getattr(self.distribution, 'dist_files', [])
+ for file in self.archive_files:
+ data = ('sdist', '', file)
+ if data not in dist_files:
+ dist_files.append(data)
+
+ def initialize_options(self):
+ orig.sdist.initialize_options(self)
+
+ self._default_to_gztar()
+
+ def _default_to_gztar(self):
+ # only needed on Python prior to 3.6.
+ if sys.version_info >= (3, 6, 0, 'beta', 1):
+ return
+ self.formats = ['gztar']
+
+ def make_distribution(self):
+ """
+ Workaround for #516
+ """
+ with self._remove_os_link():
+ orig.sdist.make_distribution(self)
+
+ @staticmethod
+ @contextlib.contextmanager
+ def _remove_os_link():
+ """
+ In a context, remove and restore os.link if it exists
+ """
+
+ class NoValue:
+ pass
+
+ orig_val = getattr(os, 'link', NoValue)
+ try:
+ del os.link
+ except Exception:
+ pass
+ try:
+ yield
+ finally:
+ if orig_val is not NoValue:
+ setattr(os, 'link', orig_val)
+
+ def _add_defaults_optional(self):
+ super()._add_defaults_optional()
+ if os.path.isfile('pyproject.toml'):
+ self.filelist.append('pyproject.toml')
+
+ def _add_defaults_python(self):
+ """getting python files"""
+ if self.distribution.has_pure_modules():
+ build_py = self.get_finalized_command('build_py')
+ self.filelist.extend(build_py.get_source_files())
+ self._add_data_files(self._safe_data_files(build_py))
+
+ def _safe_data_files(self, build_py):
+ """
+ Extracting data_files from build_py is known to cause
+ infinite recursion errors when `include_package_data`
+ is enabled, so suppress it in that case.
+ """
+ if self.distribution.include_package_data:
+ return ()
+ return build_py.data_files
+
+ def _add_data_files(self, data_files):
+ """
+ Add data files as found in build_py.data_files.
+ """
+ self.filelist.extend(
+ os.path.join(src_dir, name)
+ for _, src_dir, _, filenames in data_files
+ for name in filenames
+ )
+
+ def _add_defaults_data_files(self):
+ try:
+ super()._add_defaults_data_files()
+ except TypeError:
+ log.warn("data_files contains unexpected objects")
+
+ def check_readme(self):
+ for f in self.READMES:
+ if os.path.exists(f):
+ return
+ else:
+ self.warn(
+ "standard file not found: should have one of " +
+ ', '.join(self.READMES)
+ )
+
+ def make_release_tree(self, base_dir, files):
+ orig.sdist.make_release_tree(self, base_dir, files)
+
+ # Save any egg_info command line options used to create this sdist
+ dest = os.path.join(base_dir, 'setup.cfg')
+ if hasattr(os, 'link') and os.path.exists(dest):
+ # unlink and re-copy, since it might be hard-linked, and
+ # we don't want to change the source version
+ os.unlink(dest)
+ self.copy_file('setup.cfg', dest)
+
+ self.get_finalized_command('egg_info').save_version_info(dest)
+
+ def _manifest_is_not_generated(self):
+ # check for special comment used in 2.7.1 and higher
+ if not os.path.isfile(self.manifest):
+ return False
+
+ with io.open(self.manifest, 'rb') as fp:
+ first_line = fp.readline()
+ return (first_line !=
+ '# file GENERATED by distutils, do NOT edit\n'.encode())
+
+ def read_manifest(self):
+ """Read the manifest file (named by 'self.manifest') and use it to
+ fill in 'self.filelist', the list of files to include in the source
+ distribution.
+ """
+ log.info("reading manifest file '%s'", self.manifest)
+ manifest = open(self.manifest, 'rb')
+ for line in manifest:
+ # The manifest must contain UTF-8. See #303.
+ try:
+ line = line.decode('UTF-8')
+ except UnicodeDecodeError:
+ log.warn("%r not UTF-8 decodable -- skipping" % line)
+ continue
+ # ignore comments and blank lines
+ line = line.strip()
+ if line.startswith('#') or not line:
+ continue
+ self.filelist.append(line)
+ manifest.close()
+
+ def check_license(self):
+ """Checks if license_file' or 'license_files' is configured and adds any
+ valid paths to 'self.filelist'.
+ """
+
+ files = ordered_set.OrderedSet()
+
+ opts = self.distribution.get_option_dict('metadata')
+
+ # ignore the source of the value
+ _, license_file = opts.get('license_file', (None, None))
+
+ if license_file is None:
+ log.debug("'license_file' option was not specified")
+ else:
+ files.add(license_file)
+
+ try:
+ files.update(self.distribution.metadata.license_files)
+ except TypeError:
+ log.warn("warning: 'license_files' option is malformed")
+
+ for f in files:
+ if not os.path.exists(f):
+ log.warn(
+ "warning: Failed to find the configured license file '%s'",
+ f)
+ files.remove(f)
+
+ self.filelist.extend(files)
diff --git a/third_party/python/setuptools/setuptools/command/setopt.py b/third_party/python/setuptools/setuptools/command/setopt.py
new file mode 100644
index 0000000000..e18057c81e
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/setopt.py
@@ -0,0 +1,148 @@
+from distutils.util import convert_path
+from distutils import log
+from distutils.errors import DistutilsOptionError
+import distutils
+import os
+import configparser
+
+from setuptools import Command
+
+__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
+
+
+def config_file(kind="local"):
+ """Get the filename of the distutils, local, global, or per-user config
+
+ `kind` must be one of "local", "global", or "user"
+ """
+ if kind == 'local':
+ return 'setup.cfg'
+ if kind == 'global':
+ return os.path.join(
+ os.path.dirname(distutils.__file__), 'distutils.cfg'
+ )
+ if kind == 'user':
+ dot = os.name == 'posix' and '.' or ''
+ return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
+ raise ValueError(
+ "config_file() type must be 'local', 'global', or 'user'", kind
+ )
+
+
+def edit_config(filename, settings, dry_run=False):
+ """Edit a configuration file to include `settings`
+
+ `settings` is a dictionary of dictionaries or ``None`` values, keyed by
+ command/section name. A ``None`` value means to delete the entire section,
+ while a dictionary lists settings to be changed or deleted in that section.
+ A setting of ``None`` means to delete that setting.
+ """
+ log.debug("Reading configuration from %s", filename)
+ opts = configparser.RawConfigParser()
+ opts.read([filename])
+ for section, options in settings.items():
+ if options is None:
+ log.info("Deleting section [%s] from %s", section, filename)
+ opts.remove_section(section)
+ else:
+ if not opts.has_section(section):
+ log.debug("Adding new section [%s] to %s", section, filename)
+ opts.add_section(section)
+ for option, value in options.items():
+ if value is None:
+ log.debug(
+ "Deleting %s.%s from %s",
+ section, option, filename
+ )
+ opts.remove_option(section, option)
+ if not opts.options(section):
+ log.info("Deleting empty [%s] section from %s",
+ section, filename)
+ opts.remove_section(section)
+ else:
+ log.debug(
+ "Setting %s.%s to %r in %s",
+ section, option, value, filename
+ )
+ opts.set(section, option, value)
+
+ log.info("Writing %s", filename)
+ if not dry_run:
+ with open(filename, 'w') as f:
+ opts.write(f)
+
+
+class option_base(Command):
+ """Abstract base class for commands that mess with config files"""
+
+ user_options = [
+ ('global-config', 'g',
+ "save options to the site-wide distutils.cfg file"),
+ ('user-config', 'u',
+ "save options to the current user's pydistutils.cfg file"),
+ ('filename=', 'f',
+ "configuration file to use (default=setup.cfg)"),
+ ]
+
+ boolean_options = [
+ 'global-config', 'user-config',
+ ]
+
+ def initialize_options(self):
+ self.global_config = None
+ self.user_config = None
+ self.filename = None
+
+ def finalize_options(self):
+ filenames = []
+ if self.global_config:
+ filenames.append(config_file('global'))
+ if self.user_config:
+ filenames.append(config_file('user'))
+ if self.filename is not None:
+ filenames.append(self.filename)
+ if not filenames:
+ filenames.append(config_file('local'))
+ if len(filenames) > 1:
+ raise DistutilsOptionError(
+ "Must specify only one configuration file option",
+ filenames
+ )
+ self.filename, = filenames
+
+
+class setopt(option_base):
+ """Save command-line options to a file"""
+
+ description = "set an option in setup.cfg or another config file"
+
+ user_options = [
+ ('command=', 'c', 'command to set an option for'),
+ ('option=', 'o', 'option to set'),
+ ('set-value=', 's', 'value of the option'),
+ ('remove', 'r', 'remove (unset) the value'),
+ ] + option_base.user_options
+
+ boolean_options = option_base.boolean_options + ['remove']
+
+ def initialize_options(self):
+ option_base.initialize_options(self)
+ self.command = None
+ self.option = None
+ self.set_value = None
+ self.remove = None
+
+ def finalize_options(self):
+ option_base.finalize_options(self)
+ if self.command is None or self.option is None:
+ raise DistutilsOptionError("Must specify --command *and* --option")
+ if self.set_value is None and not self.remove:
+ raise DistutilsOptionError("Must specify --set-value or --remove")
+
+ def run(self):
+ edit_config(
+ self.filename, {
+ self.command: {self.option.replace('-', '_'): self.set_value}
+ },
+ self.dry_run
+ )
diff --git a/third_party/python/setuptools/setuptools/command/test.py b/third_party/python/setuptools/setuptools/command/test.py
new file mode 100644
index 0000000000..cf71ad015d
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/test.py
@@ -0,0 +1,274 @@
+import os
+import operator
+import sys
+import contextlib
+import itertools
+import unittest
+from distutils.errors import DistutilsError, DistutilsOptionError
+from distutils import log
+from unittest import TestLoader
+
+from pkg_resources import (resource_listdir, resource_exists, normalize_path,
+ working_set, _namespace_packages, evaluate_marker,
+ add_activation_listener, require, EntryPoint)
+from setuptools import Command
+from .build_py import _unique_everseen
+
+
+class ScanningLoader(TestLoader):
+
+ def __init__(self):
+ TestLoader.__init__(self)
+ self._visited = set()
+
+ def loadTestsFromModule(self, module, pattern=None):
+ """Return a suite of all tests cases contained in the given module
+
+ If the module is a package, load tests from all the modules in it.
+ If the module has an ``additional_tests`` function, call it and add
+ the return value to the tests.
+ """
+ if module in self._visited:
+ return None
+ self._visited.add(module)
+
+ tests = []
+ tests.append(TestLoader.loadTestsFromModule(self, module))
+
+ if hasattr(module, "additional_tests"):
+ tests.append(module.additional_tests())
+
+ if hasattr(module, '__path__'):
+ for file in resource_listdir(module.__name__, ''):
+ if file.endswith('.py') and file != '__init__.py':
+ submodule = module.__name__ + '.' + file[:-3]
+ else:
+ if resource_exists(module.__name__, file + '/__init__.py'):
+ submodule = module.__name__ + '.' + file
+ else:
+ continue
+ tests.append(self.loadTestsFromName(submodule))
+
+ if len(tests) != 1:
+ return self.suiteClass(tests)
+ else:
+ return tests[0] # don't create a nested suite for only one return
+
+
+# adapted from jaraco.classes.properties:NonDataProperty
+class NonDataProperty:
+ def __init__(self, fget):
+ self.fget = fget
+
+ def __get__(self, obj, objtype=None):
+ if obj is None:
+ return self
+ return self.fget(obj)
+
+
+class test(Command):
+ """Command to run unit tests after in-place build"""
+
+ description = "run unit tests after in-place build (deprecated)"
+
+ user_options = [
+ ('test-module=', 'm', "Run 'test_suite' in specified module"),
+ ('test-suite=', 's',
+ "Run single test, case or suite (e.g. 'module.test_suite')"),
+ ('test-runner=', 'r', "Test runner to use"),
+ ]
+
+ def initialize_options(self):
+ self.test_suite = None
+ self.test_module = None
+ self.test_loader = None
+ self.test_runner = None
+
+ def finalize_options(self):
+
+ if self.test_suite and self.test_module:
+ msg = "You may specify a module or a suite, but not both"
+ raise DistutilsOptionError(msg)
+
+ if self.test_suite is None:
+ if self.test_module is None:
+ self.test_suite = self.distribution.test_suite
+ else:
+ self.test_suite = self.test_module + ".test_suite"
+
+ if self.test_loader is None:
+ self.test_loader = getattr(self.distribution, 'test_loader', None)
+ if self.test_loader is None:
+ self.test_loader = "setuptools.command.test:ScanningLoader"
+ if self.test_runner is None:
+ self.test_runner = getattr(self.distribution, 'test_runner', None)
+
+ @NonDataProperty
+ def test_args(self):
+ return list(self._test_args())
+
+ def _test_args(self):
+ if not self.test_suite and sys.version_info >= (2, 7):
+ yield 'discover'
+ if self.verbose:
+ yield '--verbose'
+ if self.test_suite:
+ yield self.test_suite
+
+ def with_project_on_sys_path(self, func):
+ """
+ Backward compatibility for project_on_sys_path context.
+ """
+ with self.project_on_sys_path():
+ func()
+
+ @contextlib.contextmanager
+ def project_on_sys_path(self, include_dists=[]):
+ with_2to3 = getattr(self.distribution, 'use_2to3', False)
+
+ if with_2to3:
+ # If we run 2to3 we can not do this inplace:
+
+ # Ensure metadata is up-to-date
+ self.reinitialize_command('build_py', inplace=0)
+ self.run_command('build_py')
+ bpy_cmd = self.get_finalized_command("build_py")
+ build_path = normalize_path(bpy_cmd.build_lib)
+
+ # Build extensions
+ self.reinitialize_command('egg_info', egg_base=build_path)
+ self.run_command('egg_info')
+
+ self.reinitialize_command('build_ext', inplace=0)
+ self.run_command('build_ext')
+ else:
+ # Without 2to3 inplace works fine:
+ self.run_command('egg_info')
+
+ # Build extensions in-place
+ self.reinitialize_command('build_ext', inplace=1)
+ self.run_command('build_ext')
+
+ ei_cmd = self.get_finalized_command("egg_info")
+
+ old_path = sys.path[:]
+ old_modules = sys.modules.copy()
+
+ try:
+ project_path = normalize_path(ei_cmd.egg_base)
+ sys.path.insert(0, project_path)
+ working_set.__init__()
+ add_activation_listener(lambda dist: dist.activate())
+ require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
+ with self.paths_on_pythonpath([project_path]):
+ yield
+ finally:
+ sys.path[:] = old_path
+ sys.modules.clear()
+ sys.modules.update(old_modules)
+ working_set.__init__()
+
+ @staticmethod
+ @contextlib.contextmanager
+ def paths_on_pythonpath(paths):
+ """
+ Add the indicated paths to the head of the PYTHONPATH environment
+ variable so that subprocesses will also see the packages at
+ these paths.
+
+ Do this in a context that restores the value on exit.
+ """
+ nothing = object()
+ orig_pythonpath = os.environ.get('PYTHONPATH', nothing)
+ current_pythonpath = os.environ.get('PYTHONPATH', '')
+ try:
+ prefix = os.pathsep.join(_unique_everseen(paths))
+ to_join = filter(None, [prefix, current_pythonpath])
+ new_path = os.pathsep.join(to_join)
+ if new_path:
+ os.environ['PYTHONPATH'] = new_path
+ yield
+ finally:
+ if orig_pythonpath is nothing:
+ os.environ.pop('PYTHONPATH', None)
+ else:
+ os.environ['PYTHONPATH'] = orig_pythonpath
+
+ @staticmethod
+ def install_dists(dist):
+ """
+ Install the requirements indicated by self.distribution and
+ return an iterable of the dists that were built.
+ """
+ ir_d = dist.fetch_build_eggs(dist.install_requires)
+ tr_d = dist.fetch_build_eggs(dist.tests_require or [])
+ er_d = dist.fetch_build_eggs(
+ v for k, v in dist.extras_require.items()
+ if k.startswith(':') and evaluate_marker(k[1:])
+ )
+ return itertools.chain(ir_d, tr_d, er_d)
+
+ def run(self):
+ self.announce(
+ "WARNING: Testing via this command is deprecated and will be "
+ "removed in a future version. Users looking for a generic test "
+ "entry point independent of test runner are encouraged to use "
+ "tox.",
+ log.WARN,
+ )
+
+ installed_dists = self.install_dists(self.distribution)
+
+ cmd = ' '.join(self._argv)
+ if self.dry_run:
+ self.announce('skipping "%s" (dry run)' % cmd)
+ return
+
+ self.announce('running "%s"' % cmd)
+
+ paths = map(operator.attrgetter('location'), installed_dists)
+ with self.paths_on_pythonpath(paths):
+ with self.project_on_sys_path():
+ self.run_tests()
+
+ def run_tests(self):
+ # Purge modules under test from sys.modules. The test loader will
+ # re-import them from the build location. Required when 2to3 is used
+ # with namespace packages.
+ if getattr(self.distribution, 'use_2to3', False):
+ module = self.test_suite.split('.')[0]
+ if module in _namespace_packages:
+ del_modules = []
+ if module in sys.modules:
+ del_modules.append(module)
+ module += '.'
+ for name in sys.modules:
+ if name.startswith(module):
+ del_modules.append(name)
+ list(map(sys.modules.__delitem__, del_modules))
+
+ test = unittest.main(
+ None, None, self._argv,
+ testLoader=self._resolve_as_ep(self.test_loader),
+ testRunner=self._resolve_as_ep(self.test_runner),
+ exit=False,
+ )
+ if not test.result.wasSuccessful():
+ msg = 'Test failed: %s' % test.result
+ self.announce(msg, log.ERROR)
+ raise DistutilsError(msg)
+
+ @property
+ def _argv(self):
+ return ['unittest'] + self.test_args
+
+ @staticmethod
+ def _resolve_as_ep(val):
+ """
+ Load the indicated attribute value, called, as a as if it were
+ specified as an entry point.
+ """
+ if val is None:
+ return
+ parsed = EntryPoint.parse("x=" + val)
+ return parsed.resolve()()
diff --git a/third_party/python/setuptools/setuptools/command/upload.py b/third_party/python/setuptools/setuptools/command/upload.py
new file mode 100644
index 0000000000..ec7f81e227
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/upload.py
@@ -0,0 +1,17 @@
+from distutils import log
+from distutils.command import upload as orig
+
+from setuptools.errors import RemovedCommandError
+
+
+class upload(orig.upload):
+ """Formerly used to upload packages to PyPI."""
+
+ def run(self):
+ msg = (
+ "The upload command has been removed, use twine to upload "
+ + "instead (https://pypi.org/p/twine)"
+ )
+
+ self.announce("ERROR: " + msg, log.ERROR)
+ raise RemovedCommandError(msg)
diff --git a/third_party/python/setuptools/setuptools/command/upload_docs.py b/third_party/python/setuptools/setuptools/command/upload_docs.py
new file mode 100644
index 0000000000..2559458a1d
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/command/upload_docs.py
@@ -0,0 +1,202 @@
+# -*- coding: utf-8 -*-
+"""upload_docs
+
+Implements a Distutils 'upload_docs' subcommand (upload documentation to
+PyPI's pythonhosted.org).
+"""
+
+from base64 import standard_b64encode
+from distutils import log
+from distutils.errors import DistutilsOptionError
+import os
+import socket
+import zipfile
+import tempfile
+import shutil
+import itertools
+import functools
+import http.client
+import urllib.parse
+
+from pkg_resources import iter_entry_points
+from .upload import upload
+
+
+def _encode(s):
+ return s.encode('utf-8', 'surrogateescape')
+
+
+class upload_docs(upload):
+ # override the default repository as upload_docs isn't
+ # supported by Warehouse (and won't be).
+ DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'
+
+ description = 'Upload documentation to PyPI'
+
+ user_options = [
+ ('repository=', 'r',
+ "url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
+ ('show-response', None,
+ 'display full response text from server'),
+ ('upload-dir=', None, 'directory to upload'),
+ ]
+ boolean_options = upload.boolean_options
+
+ def has_sphinx(self):
+ if self.upload_dir is None:
+ for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
+ return True
+
+ sub_commands = [('build_sphinx', has_sphinx)]
+
+ def initialize_options(self):
+ upload.initialize_options(self)
+ self.upload_dir = None
+ self.target_dir = None
+
+ def finalize_options(self):
+ upload.finalize_options(self)
+ if self.upload_dir is None:
+ if self.has_sphinx():
+ build_sphinx = self.get_finalized_command('build_sphinx')
+ self.target_dir = build_sphinx.builder_target_dir
+ else:
+ build = self.get_finalized_command('build')
+ self.target_dir = os.path.join(build.build_base, 'docs')
+ else:
+ self.ensure_dirname('upload_dir')
+ self.target_dir = self.upload_dir
+ if 'pypi.python.org' in self.repository:
+ log.warn("Upload_docs command is deprecated. Use RTD instead.")
+ self.announce('Using upload directory %s' % self.target_dir)
+
+ def create_zipfile(self, filename):
+ zip_file = zipfile.ZipFile(filename, "w")
+ try:
+ self.mkpath(self.target_dir) # just in case
+ for root, dirs, files in os.walk(self.target_dir):
+ if root == self.target_dir and not files:
+ tmpl = "no files found in upload directory '%s'"
+ raise DistutilsOptionError(tmpl % self.target_dir)
+ for name in files:
+ full = os.path.join(root, name)
+ relative = root[len(self.target_dir):].lstrip(os.path.sep)
+ dest = os.path.join(relative, name)
+ zip_file.write(full, dest)
+ finally:
+ zip_file.close()
+
+ def run(self):
+ # Run sub commands
+ for cmd_name in self.get_sub_commands():
+ self.run_command(cmd_name)
+
+ tmp_dir = tempfile.mkdtemp()
+ name = self.distribution.metadata.get_name()
+ zip_file = os.path.join(tmp_dir, "%s.zip" % name)
+ try:
+ self.create_zipfile(zip_file)
+ self.upload_file(zip_file)
+ finally:
+ shutil.rmtree(tmp_dir)
+
+ @staticmethod
+ def _build_part(item, sep_boundary):
+ key, values = item
+ title = '\nContent-Disposition: form-data; name="%s"' % key
+ # handle multiple entries for the same name
+ if not isinstance(values, list):
+ values = [values]
+ for value in values:
+ if isinstance(value, tuple):
+ title += '; filename="%s"' % value[0]
+ value = value[1]
+ else:
+ value = _encode(value)
+ yield sep_boundary
+ yield _encode(title)
+ yield b"\n\n"
+ yield value
+ if value and value[-1:] == b'\r':
+ yield b'\n' # write an extra newline (lurve Macs)
+
+ @classmethod
+ def _build_multipart(cls, data):
+ """
+ Build up the MIME payload for the POST data
+ """
+ boundary = '--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
+ sep_boundary = b'\n--' + boundary.encode('ascii')
+ end_boundary = sep_boundary + b'--'
+ end_items = end_boundary, b"\n",
+ builder = functools.partial(
+ cls._build_part,
+ sep_boundary=sep_boundary,
+ )
+ part_groups = map(builder, data.items())
+ parts = itertools.chain.from_iterable(part_groups)
+ body_items = itertools.chain(parts, end_items)
+ content_type = 'multipart/form-data; boundary=%s' % boundary
+ return b''.join(body_items), content_type
+
+ def upload_file(self, filename):
+ with open(filename, 'rb') as f:
+ content = f.read()
+ meta = self.distribution.metadata
+ data = {
+ ':action': 'doc_upload',
+ 'name': meta.get_name(),
+ 'content': (os.path.basename(filename), content),
+ }
+ # set up the authentication
+ credentials = _encode(self.username + ':' + self.password)
+ credentials = standard_b64encode(credentials).decode('ascii')
+ auth = "Basic " + credentials
+
+ body, ct = self._build_multipart(data)
+
+ msg = "Submitting documentation to %s" % (self.repository)
+ self.announce(msg, log.INFO)
+
+ # build the Request
+ # We can't use urllib2 since we need to send the Basic
+ # auth right with the first request
+ schema, netloc, url, params, query, fragments = \
+ urllib.parse.urlparse(self.repository)
+ assert not params and not query and not fragments
+ if schema == 'http':
+ conn = http.client.HTTPConnection(netloc)
+ elif schema == 'https':
+ conn = http.client.HTTPSConnection(netloc)
+ else:
+ raise AssertionError("unsupported schema " + schema)
+
+ data = ''
+ try:
+ conn.connect()
+ conn.putrequest("POST", url)
+ content_type = ct
+ conn.putheader('Content-type', content_type)
+ conn.putheader('Content-length', str(len(body)))
+ conn.putheader('Authorization', auth)
+ conn.endheaders()
+ conn.send(body)
+ except socket.error as e:
+ self.announce(str(e), log.ERROR)
+ return
+
+ r = conn.getresponse()
+ if r.status == 200:
+ msg = 'Server response (%s): %s' % (r.status, r.reason)
+ self.announce(msg, log.INFO)
+ elif r.status == 301:
+ location = r.getheader('Location')
+ if location is None:
+ location = 'https://pythonhosted.org/%s/' % meta.get_name()
+ msg = 'Upload successful. Visit %s' % location
+ self.announce(msg, log.INFO)
+ else:
+ msg = 'Upload failed (%s): %s' % (r.status, r.reason)
+ self.announce(msg, log.ERROR)
+ if self.show_response:
+ print('-' * 75, r.read(), '-' * 75)
diff --git a/third_party/python/setuptools/setuptools/config.py b/third_party/python/setuptools/setuptools/config.py
new file mode 100644
index 0000000000..af3a3bcbd5
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/config.py
@@ -0,0 +1,693 @@
+import ast
+import io
+import os
+import sys
+
+import warnings
+import functools
+import importlib
+from collections import defaultdict
+from functools import partial
+from functools import wraps
+import contextlib
+
+from distutils.errors import DistutilsOptionError, DistutilsFileError
+from setuptools.extern.packaging.version import LegacyVersion, parse
+from setuptools.extern.packaging.specifiers import SpecifierSet
+
+
+class StaticModule:
+ """
+ Attempt to load the module by the name
+ """
+ def __init__(self, name):
+ spec = importlib.util.find_spec(name)
+ with open(spec.origin) as strm:
+ src = strm.read()
+ module = ast.parse(src)
+ vars(self).update(locals())
+ del self.self
+
+ def __getattr__(self, attr):
+ try:
+ return next(
+ ast.literal_eval(statement.value)
+ for statement in self.module.body
+ if isinstance(statement, ast.Assign)
+ for target in statement.targets
+ if isinstance(target, ast.Name) and target.id == attr
+ )
+ except Exception as e:
+ raise AttributeError(
+ "{self.name} has no attribute {attr}".format(**locals())
+ ) from e
+
+
+@contextlib.contextmanager
+def patch_path(path):
+ """
+ Add path to front of sys.path for the duration of the context.
+ """
+ try:
+ sys.path.insert(0, path)
+ yield
+ finally:
+ sys.path.remove(path)
+
+
+def read_configuration(
+ filepath, find_others=False, ignore_option_errors=False):
+ """Read given configuration file and returns options from it as a dict.
+
+ :param str|unicode filepath: Path to configuration file
+ to get options from.
+
+ :param bool find_others: Whether to search for other configuration files
+ which could be on in various places.
+
+ :param bool ignore_option_errors: Whether to silently ignore
+ options, values of which could not be resolved (e.g. due to exceptions
+ in directives such as file:, attr:, etc.).
+ If False exceptions are propagated as expected.
+
+ :rtype: dict
+ """
+ from setuptools.dist import Distribution, _Distribution
+
+ filepath = os.path.abspath(filepath)
+
+ if not os.path.isfile(filepath):
+ raise DistutilsFileError(
+ 'Configuration file %s does not exist.' % filepath)
+
+ current_directory = os.getcwd()
+ os.chdir(os.path.dirname(filepath))
+
+ try:
+ dist = Distribution()
+
+ filenames = dist.find_config_files() if find_others else []
+ if filepath not in filenames:
+ filenames.append(filepath)
+
+ _Distribution.parse_config_files(dist, filenames=filenames)
+
+ handlers = parse_configuration(
+ dist, dist.command_options,
+ ignore_option_errors=ignore_option_errors)
+
+ finally:
+ os.chdir(current_directory)
+
+ return configuration_to_dict(handlers)
+
+
+def _get_option(target_obj, key):
+ """
+ Given a target object and option key, get that option from
+ the target object, either through a get_{key} method or
+ from an attribute directly.
+ """
+ getter_name = 'get_{key}'.format(**locals())
+ by_attribute = functools.partial(getattr, target_obj, key)
+ getter = getattr(target_obj, getter_name, by_attribute)
+ return getter()
+
+
+def configuration_to_dict(handlers):
+ """Returns configuration data gathered by given handlers as a dict.
+
+ :param list[ConfigHandler] handlers: Handlers list,
+ usually from parse_configuration()
+
+ :rtype: dict
+ """
+ config_dict = defaultdict(dict)
+
+ for handler in handlers:
+ for option in handler.set_options:
+ value = _get_option(handler.target_obj, option)
+ config_dict[handler.section_prefix][option] = value
+
+ return config_dict
+
+
+def parse_configuration(
+ distribution, command_options, ignore_option_errors=False):
+ """Performs additional parsing of configuration options
+ for a distribution.
+
+ Returns a list of used option handlers.
+
+ :param Distribution distribution:
+ :param dict command_options:
+ :param bool ignore_option_errors: Whether to silently ignore
+ options, values of which could not be resolved (e.g. due to exceptions
+ in directives such as file:, attr:, etc.).
+ If False exceptions are propagated as expected.
+ :rtype: list
+ """
+ options = ConfigOptionsHandler(
+ distribution, command_options, ignore_option_errors)
+ options.parse()
+
+ meta = ConfigMetadataHandler(
+ distribution.metadata, command_options, ignore_option_errors,
+ distribution.package_dir)
+ meta.parse()
+
+ return meta, options
+
+
+class ConfigHandler:
+ """Handles metadata supplied in configuration files."""
+
+ section_prefix = None
+ """Prefix for config sections handled by this handler.
+ Must be provided by class heirs.
+
+ """
+
+ aliases = {}
+ """Options aliases.
+ For compatibility with various packages. E.g.: d2to1 and pbr.
+ Note: `-` in keys is replaced with `_` by config parser.
+
+ """
+
+ def __init__(self, target_obj, options, ignore_option_errors=False):
+ sections = {}
+
+ section_prefix = self.section_prefix
+ for section_name, section_options in options.items():
+ if not section_name.startswith(section_prefix):
+ continue
+
+ section_name = section_name.replace(section_prefix, '').strip('.')
+ sections[section_name] = section_options
+
+ self.ignore_option_errors = ignore_option_errors
+ self.target_obj = target_obj
+ self.sections = sections
+ self.set_options = []
+
+ @property
+ def parsers(self):
+ """Metadata item name to parser function mapping."""
+ raise NotImplementedError(
+ '%s must provide .parsers property' % self.__class__.__name__)
+
+ def __setitem__(self, option_name, value):
+ unknown = tuple()
+ target_obj = self.target_obj
+
+ # Translate alias into real name.
+ option_name = self.aliases.get(option_name, option_name)
+
+ current_value = getattr(target_obj, option_name, unknown)
+
+ if current_value is unknown:
+ raise KeyError(option_name)
+
+ if current_value:
+ # Already inhabited. Skipping.
+ return
+
+ skip_option = False
+ parser = self.parsers.get(option_name)
+ if parser:
+ try:
+ value = parser(value)
+
+ except Exception:
+ skip_option = True
+ if not self.ignore_option_errors:
+ raise
+
+ if skip_option:
+ return
+
+ setter = getattr(target_obj, 'set_%s' % option_name, None)
+ if setter is None:
+ setattr(target_obj, option_name, value)
+ else:
+ setter(value)
+
+ self.set_options.append(option_name)
+
+ @classmethod
+ def _parse_list(cls, value, separator=','):
+ """Represents value as a list.
+
+ Value is split either by separator (defaults to comma) or by lines.
+
+ :param value:
+ :param separator: List items separator character.
+ :rtype: list
+ """
+ if isinstance(value, list): # _get_parser_compound case
+ return value
+
+ if '\n' in value:
+ value = value.splitlines()
+ else:
+ value = value.split(separator)
+
+ return [chunk.strip() for chunk in value if chunk.strip()]
+
+ @classmethod
+ def _parse_dict(cls, value):
+ """Represents value as a dict.
+
+ :param value:
+ :rtype: dict
+ """
+ separator = '='
+ result = {}
+ for line in cls._parse_list(value):
+ key, sep, val = line.partition(separator)
+ if sep != separator:
+ raise DistutilsOptionError(
+ 'Unable to parse option value to dict: %s' % value)
+ result[key.strip()] = val.strip()
+
+ return result
+
+ @classmethod
+ def _parse_bool(cls, value):
+ """Represents value as boolean.
+
+ :param value:
+ :rtype: bool
+ """
+ value = value.lower()
+ return value in ('1', 'true', 'yes')
+
+ @classmethod
+ def _exclude_files_parser(cls, key):
+ """Returns a parser function to make sure field inputs
+ are not files.
+
+ Parses a value after getting the key so error messages are
+ more informative.
+
+ :param key:
+ :rtype: callable
+ """
+ def parser(value):
+ exclude_directive = 'file:'
+ if value.startswith(exclude_directive):
+ raise ValueError(
+ 'Only strings are accepted for the {0} field, '
+ 'files are not accepted'.format(key))
+ return value
+ return parser
+
+ @classmethod
+ def _parse_file(cls, value):
+ """Represents value as a string, allowing including text
+ from nearest files using `file:` directive.
+
+ Directive is sandboxed and won't reach anything outside
+ directory with setup.py.
+
+ Examples:
+ file: README.rst, CHANGELOG.md, src/file.txt
+
+ :param str value:
+ :rtype: str
+ """
+ include_directive = 'file:'
+
+ if not isinstance(value, str):
+ return value
+
+ if not value.startswith(include_directive):
+ return value
+
+ spec = value[len(include_directive):]
+ filepaths = (os.path.abspath(path.strip()) for path in spec.split(','))
+ return '\n'.join(
+ cls._read_file(path)
+ for path in filepaths
+ if (cls._assert_local(path) or True)
+ and os.path.isfile(path)
+ )
+
+ @staticmethod
+ def _assert_local(filepath):
+ if not filepath.startswith(os.getcwd()):
+ raise DistutilsOptionError(
+ '`file:` directive can not access %s' % filepath)
+
+ @staticmethod
+ def _read_file(filepath):
+ with io.open(filepath, encoding='utf-8') as f:
+ return f.read()
+
+ @classmethod
+ def _parse_attr(cls, value, package_dir=None):
+ """Represents value as a module attribute.
+
+ Examples:
+ attr: package.attr
+ attr: package.module.attr
+
+ :param str value:
+ :rtype: str
+ """
+ attr_directive = 'attr:'
+ if not value.startswith(attr_directive):
+ return value
+
+ attrs_path = value.replace(attr_directive, '').strip().split('.')
+ attr_name = attrs_path.pop()
+
+ module_name = '.'.join(attrs_path)
+ module_name = module_name or '__init__'
+
+ parent_path = os.getcwd()
+ if package_dir:
+ if attrs_path[0] in package_dir:
+ # A custom path was specified for the module we want to import
+ custom_path = package_dir[attrs_path[0]]
+ parts = custom_path.rsplit('/', 1)
+ if len(parts) > 1:
+ parent_path = os.path.join(os.getcwd(), parts[0])
+ module_name = parts[1]
+ else:
+ module_name = custom_path
+ elif '' in package_dir:
+ # A custom parent directory was specified for all root modules
+ parent_path = os.path.join(os.getcwd(), package_dir[''])
+
+ with patch_path(parent_path):
+ try:
+ # attempt to load value statically
+ return getattr(StaticModule(module_name), attr_name)
+ except Exception:
+ # fallback to simple import
+ module = importlib.import_module(module_name)
+
+ return getattr(module, attr_name)
+
+ @classmethod
+ def _get_parser_compound(cls, *parse_methods):
+ """Returns parser function to represents value as a list.
+
+ Parses a value applying given methods one after another.
+
+ :param parse_methods:
+ :rtype: callable
+ """
+ def parse(value):
+ parsed = value
+
+ for method in parse_methods:
+ parsed = method(parsed)
+
+ return parsed
+
+ return parse
+
+ @classmethod
+ def _parse_section_to_dict(cls, section_options, values_parser=None):
+ """Parses section options into a dictionary.
+
+ Optionally applies a given parser to values.
+
+ :param dict section_options:
+ :param callable values_parser:
+ :rtype: dict
+ """
+ value = {}
+ values_parser = values_parser or (lambda val: val)
+ for key, (_, val) in section_options.items():
+ value[key] = values_parser(val)
+ return value
+
+ def parse_section(self, section_options):
+ """Parses configuration file section.
+
+ :param dict section_options:
+ """
+ for (name, (_, value)) in section_options.items():
+ try:
+ self[name] = value
+
+ except KeyError:
+ pass # Keep silent for a new option may appear anytime.
+
+ def parse(self):
+ """Parses configuration file items from one
+ or more related sections.
+
+ """
+ for section_name, section_options in self.sections.items():
+
+ method_postfix = ''
+ if section_name: # [section.option] variant
+ method_postfix = '_%s' % section_name
+
+ section_parser_method = getattr(
+ self,
+ # Dots in section names are translated into dunderscores.
+ ('parse_section%s' % method_postfix).replace('.', '__'),
+ None)
+
+ if section_parser_method is None:
+ raise DistutilsOptionError(
+ 'Unsupported distribution option section: [%s.%s]' % (
+ self.section_prefix, section_name))
+
+ section_parser_method(section_options)
+
+ def _deprecated_config_handler(self, func, msg, warning_class):
+ """ this function will wrap around parameters that are deprecated
+
+ :param msg: deprecation message
+ :param warning_class: class of warning exception to be raised
+ :param func: function to be wrapped around
+ """
+ @wraps(func)
+ def config_handler(*args, **kwargs):
+ warnings.warn(msg, warning_class)
+ return func(*args, **kwargs)
+
+ return config_handler
+
+
+class ConfigMetadataHandler(ConfigHandler):
+
+ section_prefix = 'metadata'
+
+ aliases = {
+ 'home_page': 'url',
+ 'summary': 'description',
+ 'classifier': 'classifiers',
+ 'platform': 'platforms',
+ }
+
+ strict_mode = False
+ """We need to keep it loose, to be partially compatible with
+ `pbr` and `d2to1` packages which also uses `metadata` section.
+
+ """
+
+ def __init__(self, target_obj, options, ignore_option_errors=False,
+ package_dir=None):
+ super(ConfigMetadataHandler, self).__init__(target_obj, options,
+ ignore_option_errors)
+ self.package_dir = package_dir
+
+ @property
+ def parsers(self):
+ """Metadata item name to parser function mapping."""
+ parse_list = self._parse_list
+ parse_file = self._parse_file
+ parse_dict = self._parse_dict
+ exclude_files_parser = self._exclude_files_parser
+
+ return {
+ 'platforms': parse_list,
+ 'keywords': parse_list,
+ 'provides': parse_list,
+ 'requires': self._deprecated_config_handler(
+ parse_list,
+ "The requires parameter is deprecated, please use "
+ "install_requires for runtime dependencies.",
+ DeprecationWarning),
+ 'obsoletes': parse_list,
+ 'classifiers': self._get_parser_compound(parse_file, parse_list),
+ 'license': exclude_files_parser('license'),
+ 'license_files': parse_list,
+ 'description': parse_file,
+ 'long_description': parse_file,
+ 'version': self._parse_version,
+ 'project_urls': parse_dict,
+ }
+
+ def _parse_version(self, value):
+ """Parses `version` option value.
+
+ :param value:
+ :rtype: str
+
+ """
+ version = self._parse_file(value)
+
+ if version != value:
+ version = version.strip()
+ # Be strict about versions loaded from file because it's easy to
+ # accidentally include newlines and other unintended content
+ if isinstance(parse(version), LegacyVersion):
+ tmpl = (
+ 'Version loaded from {value} does not '
+ 'comply with PEP 440: {version}'
+ )
+ raise DistutilsOptionError(tmpl.format(**locals()))
+
+ return version
+
+ version = self._parse_attr(value, self.package_dir)
+
+ if callable(version):
+ version = version()
+
+ if not isinstance(version, str):
+ if hasattr(version, '__iter__'):
+ version = '.'.join(map(str, version))
+ else:
+ version = '%s' % version
+
+ return version
+
+
+class ConfigOptionsHandler(ConfigHandler):
+
+ section_prefix = 'options'
+
+ @property
+ def parsers(self):
+ """Metadata item name to parser function mapping."""
+ parse_list = self._parse_list
+ parse_list_semicolon = partial(self._parse_list, separator=';')
+ parse_bool = self._parse_bool
+ parse_dict = self._parse_dict
+
+ return {
+ 'zip_safe': parse_bool,
+ 'use_2to3': parse_bool,
+ 'include_package_data': parse_bool,
+ 'package_dir': parse_dict,
+ 'use_2to3_fixers': parse_list,
+ 'use_2to3_exclude_fixers': parse_list,
+ 'convert_2to3_doctests': parse_list,
+ 'scripts': parse_list,
+ 'eager_resources': parse_list,
+ 'dependency_links': parse_list,
+ 'namespace_packages': parse_list,
+ 'install_requires': parse_list_semicolon,
+ 'setup_requires': parse_list_semicolon,
+ 'tests_require': parse_list_semicolon,
+ 'packages': self._parse_packages,
+ 'entry_points': self._parse_file,
+ 'py_modules': parse_list,
+ 'python_requires': SpecifierSet,
+ }
+
+ def _parse_packages(self, value):
+ """Parses `packages` option value.
+
+ :param value:
+ :rtype: list
+ """
+ find_directives = ['find:', 'find_namespace:']
+ trimmed_value = value.strip()
+
+ if trimmed_value not in find_directives:
+ return self._parse_list(value)
+
+ findns = trimmed_value == find_directives[1]
+
+ # Read function arguments from a dedicated section.
+ find_kwargs = self.parse_section_packages__find(
+ self.sections.get('packages.find', {}))
+
+ if findns:
+ from setuptools import find_namespace_packages as find_packages
+ else:
+ from setuptools import find_packages
+
+ return find_packages(**find_kwargs)
+
+ def parse_section_packages__find(self, section_options):
+ """Parses `packages.find` configuration file section.
+
+ To be used in conjunction with _parse_packages().
+
+ :param dict section_options:
+ """
+ section_data = self._parse_section_to_dict(
+ section_options, self._parse_list)
+
+ valid_keys = ['where', 'include', 'exclude']
+
+ find_kwargs = dict(
+ [(k, v) for k, v in section_data.items() if k in valid_keys and v])
+
+ where = find_kwargs.get('where')
+ if where is not None:
+ find_kwargs['where'] = where[0] # cast list to single val
+
+ return find_kwargs
+
+ def parse_section_entry_points(self, section_options):
+ """Parses `entry_points` configuration file section.
+
+ :param dict section_options:
+ """
+ parsed = self._parse_section_to_dict(section_options, self._parse_list)
+ self['entry_points'] = parsed
+
+ def _parse_package_data(self, section_options):
+ parsed = self._parse_section_to_dict(section_options, self._parse_list)
+
+ root = parsed.get('*')
+ if root:
+ parsed[''] = root
+ del parsed['*']
+
+ return parsed
+
+ def parse_section_package_data(self, section_options):
+ """Parses `package_data` configuration file section.
+
+ :param dict section_options:
+ """
+ self['package_data'] = self._parse_package_data(section_options)
+
+ def parse_section_exclude_package_data(self, section_options):
+ """Parses `exclude_package_data` configuration file section.
+
+ :param dict section_options:
+ """
+ self['exclude_package_data'] = self._parse_package_data(
+ section_options)
+
+ def parse_section_extras_require(self, section_options):
+ """Parses `extras_require` configuration file section.
+
+ :param dict section_options:
+ """
+ parse_list = partial(self._parse_list, separator=';')
+ self['extras_require'] = self._parse_section_to_dict(
+ section_options, parse_list)
+
+ def parse_section_data_files(self, section_options):
+ """Parses `data_files` configuration file section.
+
+ :param dict section_options:
+ """
+ parsed = self._parse_section_to_dict(section_options, self._parse_list)
+ self['data_files'] = [(k, v) for k, v in parsed.items()]
diff --git a/third_party/python/setuptools/setuptools/dep_util.py b/third_party/python/setuptools/setuptools/dep_util.py
new file mode 100644
index 0000000000..521eb716a5
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/dep_util.py
@@ -0,0 +1,25 @@
+from distutils.dep_util import newer_group
+
+
+# yes, this is was almost entirely copy-pasted from
+# 'newer_pairwise()', this is just another convenience
+# function.
+def newer_pairwise_group(sources_groups, targets):
+ """Walk both arguments in parallel, testing if each source group is newer
+ than its corresponding target. Returns a pair of lists (sources_groups,
+ targets) where sources is newer than target, according to the semantics
+ of 'newer_group()'.
+ """
+ if len(sources_groups) != len(targets):
+ raise ValueError(
+ "'sources_group' and 'targets' must be the same length")
+
+ # build a pair of lists (sources_groups, targets) where source is newer
+ n_sources = []
+ n_targets = []
+ for i in range(len(sources_groups)):
+ if newer_group(sources_groups[i], targets[i]):
+ n_sources.append(sources_groups[i])
+ n_targets.append(targets[i])
+
+ return n_sources, n_targets
diff --git a/third_party/python/setuptools/setuptools/depends.py b/third_party/python/setuptools/setuptools/depends.py
new file mode 100644
index 0000000000..8be6928a31
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/depends.py
@@ -0,0 +1,175 @@
+import sys
+import marshal
+import contextlib
+import dis
+from distutils.version import StrictVersion
+
+from ._imp import find_module, PY_COMPILED, PY_FROZEN, PY_SOURCE
+from . import _imp
+
+
+__all__ = [
+ 'Require', 'find_module', 'get_module_constant', 'extract_constant'
+]
+
+
+class Require:
+ """A prerequisite to building or installing a distribution"""
+
+ def __init__(
+ self, name, requested_version, module, homepage='',
+ attribute=None, format=None):
+
+ if format is None and requested_version is not None:
+ format = StrictVersion
+
+ if format is not None:
+ requested_version = format(requested_version)
+ if attribute is None:
+ attribute = '__version__'
+
+ self.__dict__.update(locals())
+ del self.self
+
+ def full_name(self):
+ """Return full package/distribution name, w/version"""
+ if self.requested_version is not None:
+ return '%s-%s' % (self.name, self.requested_version)
+ return self.name
+
+ def version_ok(self, version):
+ """Is 'version' sufficiently up-to-date?"""
+ return self.attribute is None or self.format is None or \
+ str(version) != "unknown" and version >= self.requested_version
+
+ def get_version(self, paths=None, default="unknown"):
+ """Get version number of installed module, 'None', or 'default'
+
+ Search 'paths' for module. If not found, return 'None'. If found,
+ return the extracted version attribute, or 'default' if no version
+ attribute was specified, or the value cannot be determined without
+ importing the module. The version is formatted according to the
+ requirement's version format (if any), unless it is 'None' or the
+ supplied 'default'.
+ """
+
+ if self.attribute is None:
+ try:
+ f, p, i = find_module(self.module, paths)
+ if f:
+ f.close()
+ return default
+ except ImportError:
+ return None
+
+ v = get_module_constant(self.module, self.attribute, default, paths)
+
+ if v is not None and v is not default and self.format is not None:
+ return self.format(v)
+
+ return v
+
+ def is_present(self, paths=None):
+ """Return true if dependency is present on 'paths'"""
+ return self.get_version(paths) is not None
+
+ def is_current(self, paths=None):
+ """Return true if dependency is present and up-to-date on 'paths'"""
+ version = self.get_version(paths)
+ if version is None:
+ return False
+ return self.version_ok(version)
+
+
+def maybe_close(f):
+ @contextlib.contextmanager
+ def empty():
+ yield
+ return
+ if not f:
+ return empty()
+
+ return contextlib.closing(f)
+
+
+def get_module_constant(module, symbol, default=-1, paths=None):
+ """Find 'module' by searching 'paths', and extract 'symbol'
+
+ Return 'None' if 'module' does not exist on 'paths', or it does not define
+ 'symbol'. If the module defines 'symbol' as a constant, return the
+ constant. Otherwise, return 'default'."""
+
+ try:
+ f, path, (suffix, mode, kind) = info = find_module(module, paths)
+ except ImportError:
+ # Module doesn't exist
+ return None
+
+ with maybe_close(f):
+ if kind == PY_COMPILED:
+ f.read(8) # skip magic & date
+ code = marshal.load(f)
+ elif kind == PY_FROZEN:
+ code = _imp.get_frozen_object(module, paths)
+ elif kind == PY_SOURCE:
+ code = compile(f.read(), path, 'exec')
+ else:
+ # Not something we can parse; we'll have to import it. :(
+ imported = _imp.get_module(module, paths, info)
+ return getattr(imported, symbol, None)
+
+ return extract_constant(code, symbol, default)
+
+
+def extract_constant(code, symbol, default=-1):
+ """Extract the constant value of 'symbol' from 'code'
+
+ If the name 'symbol' is bound to a constant value by the Python code
+ object 'code', return that value. If 'symbol' is bound to an expression,
+ return 'default'. Otherwise, return 'None'.
+
+ Return value is based on the first assignment to 'symbol'. 'symbol' must
+ be a global, or at least a non-"fast" local in the code block. That is,
+ only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol'
+ must be present in 'code.co_names'.
+ """
+ if symbol not in code.co_names:
+ # name's not there, can't possibly be an assignment
+ return None
+
+ name_idx = list(code.co_names).index(symbol)
+
+ STORE_NAME = 90
+ STORE_GLOBAL = 97
+ LOAD_CONST = 100
+
+ const = default
+
+ for byte_code in dis.Bytecode(code):
+ op = byte_code.opcode
+ arg = byte_code.arg
+
+ if op == LOAD_CONST:
+ const = code.co_consts[arg]
+ elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL):
+ return const
+ else:
+ const = default
+
+
+def _update_globals():
+ """
+ Patch the globals to remove the objects not available on some platforms.
+
+ XXX it'd be better to test assertions about bytecode instead.
+ """
+
+ if not sys.platform.startswith('java') and sys.platform != 'cli':
+ return
+ incompatible = 'extract_constant', 'get_module_constant'
+ for name in incompatible:
+ del globals()[name]
+ __all__.remove(name)
+
+
+_update_globals()
diff --git a/third_party/python/setuptools/setuptools/dist.py b/third_party/python/setuptools/setuptools/dist.py
new file mode 100644
index 0000000000..2c088ef8cb
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/dist.py
@@ -0,0 +1,1009 @@
+# -*- coding: utf-8 -*-
+__all__ = ['Distribution']
+
+import io
+import sys
+import re
+import os
+import warnings
+import numbers
+import distutils.log
+import distutils.core
+import distutils.cmd
+import distutils.dist
+from distutils.util import strtobool
+from distutils.debug import DEBUG
+from distutils.fancy_getopt import translate_longopt
+import itertools
+
+from collections import defaultdict
+from email import message_from_file
+
+from distutils.errors import DistutilsOptionError, DistutilsSetupError
+from distutils.util import rfc822_escape
+from distutils.version import StrictVersion
+
+from setuptools.extern import packaging
+from setuptools.extern import ordered_set
+
+from . import SetuptoolsDeprecationWarning
+
+import setuptools
+from setuptools import windows_support
+from setuptools.monkey import get_unpatched
+from setuptools.config import parse_configuration
+import pkg_resources
+
+__import__('setuptools.extern.packaging.specifiers')
+__import__('setuptools.extern.packaging.version')
+
+
+def _get_unpatched(cls):
+ warnings.warn("Do not call this function", DistDeprecationWarning)
+ return get_unpatched(cls)
+
+
+def get_metadata_version(self):
+ mv = getattr(self, 'metadata_version', None)
+
+ if mv is None:
+ if self.long_description_content_type or self.provides_extras:
+ mv = StrictVersion('2.1')
+ elif (self.maintainer is not None or
+ self.maintainer_email is not None or
+ getattr(self, 'python_requires', None) is not None or
+ self.project_urls):
+ mv = StrictVersion('1.2')
+ elif (self.provides or self.requires or self.obsoletes or
+ self.classifiers or self.download_url):
+ mv = StrictVersion('1.1')
+ else:
+ mv = StrictVersion('1.0')
+
+ self.metadata_version = mv
+
+ return mv
+
+
+def read_pkg_file(self, file):
+ """Reads the metadata values from a file object."""
+ msg = message_from_file(file)
+
+ def _read_field(name):
+ value = msg[name]
+ if value == 'UNKNOWN':
+ return None
+ return value
+
+ def _read_list(name):
+ values = msg.get_all(name, None)
+ if values == []:
+ return None
+ return values
+
+ self.metadata_version = StrictVersion(msg['metadata-version'])
+ self.name = _read_field('name')
+ self.version = _read_field('version')
+ self.description = _read_field('summary')
+ # we are filling author only.
+ self.author = _read_field('author')
+ self.maintainer = None
+ self.author_email = _read_field('author-email')
+ self.maintainer_email = None
+ self.url = _read_field('home-page')
+ self.license = _read_field('license')
+
+ if 'download-url' in msg:
+ self.download_url = _read_field('download-url')
+ else:
+ self.download_url = None
+
+ self.long_description = _read_field('description')
+ self.description = _read_field('summary')
+
+ if 'keywords' in msg:
+ self.keywords = _read_field('keywords').split(',')
+
+ self.platforms = _read_list('platform')
+ self.classifiers = _read_list('classifier')
+
+ # PEP 314 - these fields only exist in 1.1
+ if self.metadata_version == StrictVersion('1.1'):
+ self.requires = _read_list('requires')
+ self.provides = _read_list('provides')
+ self.obsoletes = _read_list('obsoletes')
+ else:
+ self.requires = None
+ self.provides = None
+ self.obsoletes = None
+
+
+# Based on Python 3.5 version
+def write_pkg_file(self, file):
+ """Write the PKG-INFO format data to a file object.
+ """
+ version = self.get_metadata_version()
+
+ def write_field(key, value):
+ file.write("%s: %s\n" % (key, value))
+
+ write_field('Metadata-Version', str(version))
+ write_field('Name', self.get_name())
+ write_field('Version', self.get_version())
+ write_field('Summary', self.get_description())
+ write_field('Home-page', self.get_url())
+
+ if version < StrictVersion('1.2'):
+ write_field('Author', self.get_contact())
+ write_field('Author-email', self.get_contact_email())
+ else:
+ optional_fields = (
+ ('Author', 'author'),
+ ('Author-email', 'author_email'),
+ ('Maintainer', 'maintainer'),
+ ('Maintainer-email', 'maintainer_email'),
+ )
+
+ for field, attr in optional_fields:
+ attr_val = getattr(self, attr)
+
+ if attr_val is not None:
+ write_field(field, attr_val)
+
+ write_field('License', self.get_license())
+ if self.download_url:
+ write_field('Download-URL', self.download_url)
+ for project_url in self.project_urls.items():
+ write_field('Project-URL', '%s, %s' % project_url)
+
+ long_desc = rfc822_escape(self.get_long_description())
+ write_field('Description', long_desc)
+
+ keywords = ','.join(self.get_keywords())
+ if keywords:
+ write_field('Keywords', keywords)
+
+ if version >= StrictVersion('1.2'):
+ for platform in self.get_platforms():
+ write_field('Platform', platform)
+ else:
+ self._write_list(file, 'Platform', self.get_platforms())
+
+ self._write_list(file, 'Classifier', self.get_classifiers())
+
+ # PEP 314
+ self._write_list(file, 'Requires', self.get_requires())
+ self._write_list(file, 'Provides', self.get_provides())
+ self._write_list(file, 'Obsoletes', self.get_obsoletes())
+
+ # Setuptools specific for PEP 345
+ if hasattr(self, 'python_requires'):
+ write_field('Requires-Python', self.python_requires)
+
+ # PEP 566
+ if self.long_description_content_type:
+ write_field(
+ 'Description-Content-Type',
+ self.long_description_content_type
+ )
+ if self.provides_extras:
+ for extra in self.provides_extras:
+ write_field('Provides-Extra', extra)
+
+
+sequence = tuple, list
+
+
+def check_importable(dist, attr, value):
+ try:
+ ep = pkg_resources.EntryPoint.parse('x=' + value)
+ assert not ep.extras
+ except (TypeError, ValueError, AttributeError, AssertionError) as e:
+ raise DistutilsSetupError(
+ "%r must be importable 'module:attrs' string (got %r)"
+ % (attr, value)
+ ) from e
+
+
+def assert_string_list(dist, attr, value):
+ """Verify that value is a string list"""
+ try:
+ # verify that value is a list or tuple to exclude unordered
+ # or single-use iterables
+ assert isinstance(value, (list, tuple))
+ # verify that elements of value are strings
+ assert ''.join(value) != value
+ except (TypeError, ValueError, AttributeError, AssertionError) as e:
+ raise DistutilsSetupError(
+ "%r must be a list of strings (got %r)" % (attr, value)
+ ) from e
+
+
+def check_nsp(dist, attr, value):
+ """Verify that namespace packages are valid"""
+ ns_packages = value
+ assert_string_list(dist, attr, ns_packages)
+ for nsp in ns_packages:
+ if not dist.has_contents_for(nsp):
+ raise DistutilsSetupError(
+ "Distribution contains no modules or packages for " +
+ "namespace package %r" % nsp
+ )
+ parent, sep, child = nsp.rpartition('.')
+ if parent and parent not in ns_packages:
+ distutils.log.warn(
+ "WARNING: %r is declared as a package namespace, but %r"
+ " is not: please correct this in setup.py", nsp, parent
+ )
+
+
+def check_extras(dist, attr, value):
+ """Verify that extras_require mapping is valid"""
+ try:
+ list(itertools.starmap(_check_extra, value.items()))
+ except (TypeError, ValueError, AttributeError) as e:
+ raise DistutilsSetupError(
+ "'extras_require' must be a dictionary whose values are "
+ "strings or lists of strings containing valid project/version "
+ "requirement specifiers."
+ ) from e
+
+
+def _check_extra(extra, reqs):
+ name, sep, marker = extra.partition(':')
+ if marker and pkg_resources.invalid_marker(marker):
+ raise DistutilsSetupError("Invalid environment marker: " + marker)
+ list(pkg_resources.parse_requirements(reqs))
+
+
+def assert_bool(dist, attr, value):
+ """Verify that value is True, False, 0, or 1"""
+ if bool(value) != value:
+ tmpl = "{attr!r} must be a boolean value (got {value!r})"
+ raise DistutilsSetupError(tmpl.format(attr=attr, value=value))
+
+
+def check_requirements(dist, attr, value):
+ """Verify that install_requires is a valid requirements list"""
+ try:
+ list(pkg_resources.parse_requirements(value))
+ if isinstance(value, (dict, set)):
+ raise TypeError("Unordered types are not allowed")
+ except (TypeError, ValueError) as error:
+ tmpl = (
+ "{attr!r} must be a string or list of strings "
+ "containing valid project/version requirement specifiers; {error}"
+ )
+ raise DistutilsSetupError(
+ tmpl.format(attr=attr, error=error)
+ ) from error
+
+
+def check_specifier(dist, attr, value):
+ """Verify that value is a valid version specifier"""
+ try:
+ packaging.specifiers.SpecifierSet(value)
+ except packaging.specifiers.InvalidSpecifier as error:
+ tmpl = (
+ "{attr!r} must be a string "
+ "containing valid version specifiers; {error}"
+ )
+ raise DistutilsSetupError(
+ tmpl.format(attr=attr, error=error)
+ ) from error
+
+
+def check_entry_points(dist, attr, value):
+ """Verify that entry_points map is parseable"""
+ try:
+ pkg_resources.EntryPoint.parse_map(value)
+ except ValueError as e:
+ raise DistutilsSetupError(e) from e
+
+
+def check_test_suite(dist, attr, value):
+ if not isinstance(value, str):
+ raise DistutilsSetupError("test_suite must be a string")
+
+
+def check_package_data(dist, attr, value):
+ """Verify that value is a dictionary of package names to glob lists"""
+ if not isinstance(value, dict):
+ raise DistutilsSetupError(
+ "{!r} must be a dictionary mapping package names to lists of "
+ "string wildcard patterns".format(attr))
+ for k, v in value.items():
+ if not isinstance(k, str):
+ raise DistutilsSetupError(
+ "keys of {!r} dict must be strings (got {!r})"
+ .format(attr, k)
+ )
+ assert_string_list(dist, 'values of {!r} dict'.format(attr), v)
+
+
+def check_packages(dist, attr, value):
+ for pkgname in value:
+ if not re.match(r'\w+(\.\w+)*', pkgname):
+ distutils.log.warn(
+ "WARNING: %r not a valid package name; please use only "
+ ".-separated package names in setup.py", pkgname
+ )
+
+
+_Distribution = get_unpatched(distutils.core.Distribution)
+
+
+class Distribution(_Distribution):
+ """Distribution with support for tests and package data
+
+ This is an enhanced version of 'distutils.dist.Distribution' that
+ effectively adds the following new optional keyword arguments to 'setup()':
+
+ 'install_requires' -- a string or sequence of strings specifying project
+ versions that the distribution requires when installed, in the format
+ used by 'pkg_resources.require()'. They will be installed
+ automatically when the package is installed. If you wish to use
+ packages that are not available in PyPI, or want to give your users an
+ alternate download location, you can add a 'find_links' option to the
+ '[easy_install]' section of your project's 'setup.cfg' file, and then
+ setuptools will scan the listed web pages for links that satisfy the
+ requirements.
+
+ 'extras_require' -- a dictionary mapping names of optional "extras" to the
+ additional requirement(s) that using those extras incurs. For example,
+ this::
+
+ extras_require = dict(reST = ["docutils>=0.3", "reSTedit"])
+
+ indicates that the distribution can optionally provide an extra
+ capability called "reST", but it can only be used if docutils and
+ reSTedit are installed. If the user installs your package using
+ EasyInstall and requests one of your extras, the corresponding
+ additional requirements will be installed if needed.
+
+ 'test_suite' -- the name of a test suite to run for the 'test' command.
+ If the user runs 'python setup.py test', the package will be installed,
+ and the named test suite will be run. The format is the same as
+ would be used on a 'unittest.py' command line. That is, it is the
+ dotted name of an object to import and call to generate a test suite.
+
+ 'package_data' -- a dictionary mapping package names to lists of filenames
+ or globs to use to find data files contained in the named packages.
+ If the dictionary has filenames or globs listed under '""' (the empty
+ string), those names will be searched for in every package, in addition
+ to any names for the specific package. Data files found using these
+ names/globs will be installed along with the package, in the same
+ location as the package. Note that globs are allowed to reference
+ the contents of non-package subdirectories, as long as you use '/' as
+ a path separator. (Globs are automatically converted to
+ platform-specific paths at runtime.)
+
+ In addition to these new keywords, this class also has several new methods
+ for manipulating the distribution's contents. For example, the 'include()'
+ and 'exclude()' methods can be thought of as in-place add and subtract
+ commands that add or remove packages, modules, extensions, and so on from
+ the distribution.
+ """
+
+ _DISTUTILS_UNSUPPORTED_METADATA = {
+ 'long_description_content_type': None,
+ 'project_urls': dict,
+ 'provides_extras': ordered_set.OrderedSet,
+ 'license_files': ordered_set.OrderedSet,
+ }
+
+ _patched_dist = None
+
+ def patch_missing_pkg_info(self, attrs):
+ # Fake up a replacement for the data that would normally come from
+ # PKG-INFO, but which might not yet be built if this is a fresh
+ # checkout.
+ #
+ if not attrs or 'name' not in attrs or 'version' not in attrs:
+ return
+ key = pkg_resources.safe_name(str(attrs['name'])).lower()
+ dist = pkg_resources.working_set.by_key.get(key)
+ if dist is not None and not dist.has_metadata('PKG-INFO'):
+ dist._version = pkg_resources.safe_version(str(attrs['version']))
+ self._patched_dist = dist
+
+ def __init__(self, attrs=None):
+ have_package_data = hasattr(self, "package_data")
+ if not have_package_data:
+ self.package_data = {}
+ attrs = attrs or {}
+ self.dist_files = []
+ # Filter-out setuptools' specific options.
+ self.src_root = attrs.pop("src_root", None)
+ self.patch_missing_pkg_info(attrs)
+ self.dependency_links = attrs.pop('dependency_links', [])
+ self.setup_requires = attrs.pop('setup_requires', [])
+ for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
+ vars(self).setdefault(ep.name, None)
+ _Distribution.__init__(self, {
+ k: v for k, v in attrs.items()
+ if k not in self._DISTUTILS_UNSUPPORTED_METADATA
+ })
+
+ # Fill-in missing metadata fields not supported by distutils.
+ # Note some fields may have been set by other tools (e.g. pbr)
+ # above; they are taken preferrentially to setup() arguments
+ for option, default in self._DISTUTILS_UNSUPPORTED_METADATA.items():
+ for source in self.metadata.__dict__, attrs:
+ if option in source:
+ value = source[option]
+ break
+ else:
+ value = default() if default else None
+ setattr(self.metadata, option, value)
+
+ self.metadata.version = self._normalize_version(
+ self._validate_version(self.metadata.version))
+ self._finalize_requires()
+
+ @staticmethod
+ def _normalize_version(version):
+ if isinstance(version, setuptools.sic) or version is None:
+ return version
+
+ normalized = str(packaging.version.Version(version))
+ if version != normalized:
+ tmpl = "Normalizing '{version}' to '{normalized}'"
+ warnings.warn(tmpl.format(**locals()))
+ return normalized
+ return version
+
+ @staticmethod
+ def _validate_version(version):
+ if isinstance(version, numbers.Number):
+ # Some people apparently take "version number" too literally :)
+ version = str(version)
+
+ if version is not None:
+ try:
+ packaging.version.Version(version)
+ except (packaging.version.InvalidVersion, TypeError):
+ warnings.warn(
+ "The version specified (%r) is an invalid version, this "
+ "may not work as expected with newer versions of "
+ "setuptools, pip, and PyPI. Please see PEP 440 for more "
+ "details." % version
+ )
+ return setuptools.sic(version)
+ return version
+
+ def _finalize_requires(self):
+ """
+ Set `metadata.python_requires` and fix environment markers
+ in `install_requires` and `extras_require`.
+ """
+ if getattr(self, 'python_requires', None):
+ self.metadata.python_requires = self.python_requires
+
+ if getattr(self, 'extras_require', None):
+ for extra in self.extras_require.keys():
+ # Since this gets called multiple times at points where the
+ # keys have become 'converted' extras, ensure that we are only
+ # truly adding extras we haven't seen before here.
+ extra = extra.split(':')[0]
+ if extra:
+ self.metadata.provides_extras.add(extra)
+
+ self._convert_extras_requirements()
+ self._move_install_requirements_markers()
+
+ def _convert_extras_requirements(self):
+ """
+ Convert requirements in `extras_require` of the form
+ `"extra": ["barbazquux; {marker}"]` to
+ `"extra:{marker}": ["barbazquux"]`.
+ """
+ spec_ext_reqs = getattr(self, 'extras_require', None) or {}
+ self._tmp_extras_require = defaultdict(list)
+ for section, v in spec_ext_reqs.items():
+ # Do not strip empty sections.
+ self._tmp_extras_require[section]
+ for r in pkg_resources.parse_requirements(v):
+ suffix = self._suffix_for(r)
+ self._tmp_extras_require[section + suffix].append(r)
+
+ @staticmethod
+ def _suffix_for(req):
+ """
+ For a requirement, return the 'extras_require' suffix for
+ that requirement.
+ """
+ return ':' + str(req.marker) if req.marker else ''
+
+ def _move_install_requirements_markers(self):
+ """
+ Move requirements in `install_requires` that are using environment
+ markers `extras_require`.
+ """
+
+ # divide the install_requires into two sets, simple ones still
+ # handled by install_requires and more complex ones handled
+ # by extras_require.
+
+ def is_simple_req(req):
+ return not req.marker
+
+ spec_inst_reqs = getattr(self, 'install_requires', None) or ()
+ inst_reqs = list(pkg_resources.parse_requirements(spec_inst_reqs))
+ simple_reqs = filter(is_simple_req, inst_reqs)
+ complex_reqs = itertools.filterfalse(is_simple_req, inst_reqs)
+ self.install_requires = list(map(str, simple_reqs))
+
+ for r in complex_reqs:
+ self._tmp_extras_require[':' + str(r.marker)].append(r)
+ self.extras_require = dict(
+ (k, [str(r) for r in map(self._clean_req, v)])
+ for k, v in self._tmp_extras_require.items()
+ )
+
+ def _clean_req(self, req):
+ """
+ Given a Requirement, remove environment markers and return it.
+ """
+ req.marker = None
+ return req
+
+ def _parse_config_files(self, filenames=None):
+ """
+ Adapted from distutils.dist.Distribution.parse_config_files,
+ this method provides the same functionality in subtly-improved
+ ways.
+ """
+ from configparser import ConfigParser
+
+ # Ignore install directory options if we have a venv
+ if sys.prefix != sys.base_prefix:
+ ignore_options = [
+ 'install-base', 'install-platbase', 'install-lib',
+ 'install-platlib', 'install-purelib', 'install-headers',
+ 'install-scripts', 'install-data', 'prefix', 'exec-prefix',
+ 'home', 'user', 'root']
+ else:
+ ignore_options = []
+
+ ignore_options = frozenset(ignore_options)
+
+ if filenames is None:
+ filenames = self.find_config_files()
+
+ if DEBUG:
+ self.announce("Distribution.parse_config_files():")
+
+ parser = ConfigParser()
+ for filename in filenames:
+ with io.open(filename, encoding='utf-8') as reader:
+ if DEBUG:
+ self.announce(" reading {filename}".format(**locals()))
+ parser.read_file(reader)
+ for section in parser.sections():
+ options = parser.options(section)
+ opt_dict = self.get_option_dict(section)
+
+ for opt in options:
+ if opt != '__name__' and opt not in ignore_options:
+ val = parser.get(section, opt)
+ opt = opt.replace('-', '_')
+ opt_dict[opt] = (filename, val)
+
+ # Make the ConfigParser forget everything (so we retain
+ # the original filenames that options come from)
+ parser.__init__()
+
+ # If there was a "global" section in the config file, use it
+ # to set Distribution options.
+
+ if 'global' in self.command_options:
+ for (opt, (src, val)) in self.command_options['global'].items():
+ alias = self.negative_opt.get(opt)
+ try:
+ if alias:
+ setattr(self, alias, not strtobool(val))
+ elif opt in ('verbose', 'dry_run'): # ugh!
+ setattr(self, opt, strtobool(val))
+ else:
+ setattr(self, opt, val)
+ except ValueError as e:
+ raise DistutilsOptionError(e) from e
+
+ def _set_command_options(self, command_obj, option_dict=None):
+ """
+ Set the options for 'command_obj' from 'option_dict'. Basically
+ this means copying elements of a dictionary ('option_dict') to
+ attributes of an instance ('command').
+
+ 'command_obj' must be a Command instance. If 'option_dict' is not
+ supplied, uses the standard option dictionary for this command
+ (from 'self.command_options').
+
+ (Adopted from distutils.dist.Distribution._set_command_options)
+ """
+ command_name = command_obj.get_command_name()
+ if option_dict is None:
+ option_dict = self.get_option_dict(command_name)
+
+ if DEBUG:
+ self.announce(" setting options for '%s' command:" % command_name)
+ for (option, (source, value)) in option_dict.items():
+ if DEBUG:
+ self.announce(" %s = %s (from %s)" % (option, value,
+ source))
+ try:
+ bool_opts = [translate_longopt(o)
+ for o in command_obj.boolean_options]
+ except AttributeError:
+ bool_opts = []
+ try:
+ neg_opt = command_obj.negative_opt
+ except AttributeError:
+ neg_opt = {}
+
+ try:
+ is_string = isinstance(value, str)
+ if option in neg_opt and is_string:
+ setattr(command_obj, neg_opt[option], not strtobool(value))
+ elif option in bool_opts and is_string:
+ setattr(command_obj, option, strtobool(value))
+ elif hasattr(command_obj, option):
+ setattr(command_obj, option, value)
+ else:
+ raise DistutilsOptionError(
+ "error in %s: command '%s' has no such option '%s'"
+ % (source, command_name, option))
+ except ValueError as e:
+ raise DistutilsOptionError(e) from e
+
+ def parse_config_files(self, filenames=None, ignore_option_errors=False):
+ """Parses configuration files from various levels
+ and loads configuration.
+
+ """
+ self._parse_config_files(filenames=filenames)
+
+ parse_configuration(self, self.command_options,
+ ignore_option_errors=ignore_option_errors)
+ self._finalize_requires()
+
+ def fetch_build_eggs(self, requires):
+ """Resolve pre-setup requirements"""
+ resolved_dists = pkg_resources.working_set.resolve(
+ pkg_resources.parse_requirements(requires),
+ installer=self.fetch_build_egg,
+ replace_conflicting=True,
+ )
+ for dist in resolved_dists:
+ pkg_resources.working_set.add(dist, replace=True)
+ return resolved_dists
+
+ def finalize_options(self):
+ """
+ Allow plugins to apply arbitrary operations to the
+ distribution. Each hook may optionally define a 'order'
+ to influence the order of execution. Smaller numbers
+ go first and the default is 0.
+ """
+ group = 'setuptools.finalize_distribution_options'
+
+ def by_order(hook):
+ return getattr(hook, 'order', 0)
+ eps = map(lambda e: e.load(), pkg_resources.iter_entry_points(group))
+ for ep in sorted(eps, key=by_order):
+ ep(self)
+
+ def _finalize_setup_keywords(self):
+ for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'):
+ value = getattr(self, ep.name, None)
+ if value is not None:
+ ep.require(installer=self.fetch_build_egg)
+ ep.load()(self, ep.name, value)
+
+ def _finalize_2to3_doctests(self):
+ if getattr(self, 'convert_2to3_doctests', None):
+ # XXX may convert to set here when we can rely on set being builtin
+ self.convert_2to3_doctests = [
+ os.path.abspath(p)
+ for p in self.convert_2to3_doctests
+ ]
+ else:
+ self.convert_2to3_doctests = []
+
+ def get_egg_cache_dir(self):
+ egg_cache_dir = os.path.join(os.curdir, '.eggs')
+ if not os.path.exists(egg_cache_dir):
+ os.mkdir(egg_cache_dir)
+ windows_support.hide_file(egg_cache_dir)
+ readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt')
+ with open(readme_txt_filename, 'w') as f:
+ f.write('This directory contains eggs that were downloaded '
+ 'by setuptools to build, test, and run plug-ins.\n\n')
+ f.write('This directory caches those eggs to prevent '
+ 'repeated downloads.\n\n')
+ f.write('However, it is safe to delete this directory.\n\n')
+
+ return egg_cache_dir
+
+ def fetch_build_egg(self, req):
+ """Fetch an egg needed for building"""
+ from setuptools.installer import fetch_build_egg
+ return fetch_build_egg(self, req)
+
+ def get_command_class(self, command):
+ """Pluggable version of get_command_class()"""
+ if command in self.cmdclass:
+ return self.cmdclass[command]
+
+ eps = pkg_resources.iter_entry_points('distutils.commands', command)
+ for ep in eps:
+ ep.require(installer=self.fetch_build_egg)
+ self.cmdclass[command] = cmdclass = ep.load()
+ return cmdclass
+ else:
+ return _Distribution.get_command_class(self, command)
+
+ def print_commands(self):
+ for ep in pkg_resources.iter_entry_points('distutils.commands'):
+ if ep.name not in self.cmdclass:
+ # don't require extras as the commands won't be invoked
+ cmdclass = ep.resolve()
+ self.cmdclass[ep.name] = cmdclass
+ return _Distribution.print_commands(self)
+
+ def get_command_list(self):
+ for ep in pkg_resources.iter_entry_points('distutils.commands'):
+ if ep.name not in self.cmdclass:
+ # don't require extras as the commands won't be invoked
+ cmdclass = ep.resolve()
+ self.cmdclass[ep.name] = cmdclass
+ return _Distribution.get_command_list(self)
+
+ def include(self, **attrs):
+ """Add items to distribution that are named in keyword arguments
+
+ For example, 'dist.include(py_modules=["x"])' would add 'x' to
+ the distribution's 'py_modules' attribute, if it was not already
+ there.
+
+ Currently, this method only supports inclusion for attributes that are
+ lists or tuples. If you need to add support for adding to other
+ attributes in this or a subclass, you can add an '_include_X' method,
+ where 'X' is the name of the attribute. The method will be called with
+ the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})'
+ will try to call 'dist._include_foo({"bar":"baz"})', which can then
+ handle whatever special inclusion logic is needed.
+ """
+ for k, v in attrs.items():
+ include = getattr(self, '_include_' + k, None)
+ if include:
+ include(v)
+ else:
+ self._include_misc(k, v)
+
+ def exclude_package(self, package):
+ """Remove packages, modules, and extensions in named package"""
+
+ pfx = package + '.'
+ if self.packages:
+ self.packages = [
+ p for p in self.packages
+ if p != package and not p.startswith(pfx)
+ ]
+
+ if self.py_modules:
+ self.py_modules = [
+ p for p in self.py_modules
+ if p != package and not p.startswith(pfx)
+ ]
+
+ if self.ext_modules:
+ self.ext_modules = [
+ p for p in self.ext_modules
+ if p.name != package and not p.name.startswith(pfx)
+ ]
+
+ def has_contents_for(self, package):
+ """Return true if 'exclude_package(package)' would do something"""
+
+ pfx = package + '.'
+
+ for p in self.iter_distribution_names():
+ if p == package or p.startswith(pfx):
+ return True
+
+ def _exclude_misc(self, name, value):
+ """Handle 'exclude()' for list/tuple attrs without a special handler"""
+ if not isinstance(value, sequence):
+ raise DistutilsSetupError(
+ "%s: setting must be a list or tuple (%r)" % (name, value)
+ )
+ try:
+ old = getattr(self, name)
+ except AttributeError as e:
+ raise DistutilsSetupError(
+ "%s: No such distribution setting" % name
+ ) from e
+ if old is not None and not isinstance(old, sequence):
+ raise DistutilsSetupError(
+ name + ": this setting cannot be changed via include/exclude"
+ )
+ elif old:
+ setattr(self, name, [item for item in old if item not in value])
+
+ def _include_misc(self, name, value):
+ """Handle 'include()' for list/tuple attrs without a special handler"""
+
+ if not isinstance(value, sequence):
+ raise DistutilsSetupError(
+ "%s: setting must be a list (%r)" % (name, value)
+ )
+ try:
+ old = getattr(self, name)
+ except AttributeError as e:
+ raise DistutilsSetupError(
+ "%s: No such distribution setting" % name
+ ) from e
+ if old is None:
+ setattr(self, name, value)
+ elif not isinstance(old, sequence):
+ raise DistutilsSetupError(
+ name + ": this setting cannot be changed via include/exclude"
+ )
+ else:
+ new = [item for item in value if item not in old]
+ setattr(self, name, old + new)
+
+ def exclude(self, **attrs):
+ """Remove items from distribution that are named in keyword arguments
+
+ For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from
+ the distribution's 'py_modules' attribute. Excluding packages uses
+ the 'exclude_package()' method, so all of the package's contained
+ packages, modules, and extensions are also excluded.
+
+ Currently, this method only supports exclusion from attributes that are
+ lists or tuples. If you need to add support for excluding from other
+ attributes in this or a subclass, you can add an '_exclude_X' method,
+ where 'X' is the name of the attribute. The method will be called with
+ the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})'
+ will try to call 'dist._exclude_foo({"bar":"baz"})', which can then
+ handle whatever special exclusion logic is needed.
+ """
+ for k, v in attrs.items():
+ exclude = getattr(self, '_exclude_' + k, None)
+ if exclude:
+ exclude(v)
+ else:
+ self._exclude_misc(k, v)
+
+ def _exclude_packages(self, packages):
+ if not isinstance(packages, sequence):
+ raise DistutilsSetupError(
+ "packages: setting must be a list or tuple (%r)" % (packages,)
+ )
+ list(map(self.exclude_package, packages))
+
+ def _parse_command_opts(self, parser, args):
+ # Remove --with-X/--without-X options when processing command args
+ self.global_options = self.__class__.global_options
+ self.negative_opt = self.__class__.negative_opt
+
+ # First, expand any aliases
+ command = args[0]
+ aliases = self.get_option_dict('aliases')
+ while command in aliases:
+ src, alias = aliases[command]
+ del aliases[command] # ensure each alias can expand only once!
+ import shlex
+ args[:1] = shlex.split(alias, True)
+ command = args[0]
+
+ nargs = _Distribution._parse_command_opts(self, parser, args)
+
+ # Handle commands that want to consume all remaining arguments
+ cmd_class = self.get_command_class(command)
+ if getattr(cmd_class, 'command_consumes_arguments', None):
+ self.get_option_dict(command)['args'] = ("command line", nargs)
+ if nargs is not None:
+ return []
+
+ return nargs
+
+ def get_cmdline_options(self):
+ """Return a '{cmd: {opt:val}}' map of all command-line options
+
+ Option names are all long, but do not include the leading '--', and
+ contain dashes rather than underscores. If the option doesn't take
+ an argument (e.g. '--quiet'), the 'val' is 'None'.
+
+ Note that options provided by config files are intentionally excluded.
+ """
+
+ d = {}
+
+ for cmd, opts in self.command_options.items():
+
+ for opt, (src, val) in opts.items():
+
+ if src != "command line":
+ continue
+
+ opt = opt.replace('_', '-')
+
+ if val == 0:
+ cmdobj = self.get_command_obj(cmd)
+ neg_opt = self.negative_opt.copy()
+ neg_opt.update(getattr(cmdobj, 'negative_opt', {}))
+ for neg, pos in neg_opt.items():
+ if pos == opt:
+ opt = neg
+ val = None
+ break
+ else:
+ raise AssertionError("Shouldn't be able to get here")
+
+ elif val == 1:
+ val = None
+
+ d.setdefault(cmd, {})[opt] = val
+
+ return d
+
+ def iter_distribution_names(self):
+ """Yield all packages, modules, and extension names in distribution"""
+
+ for pkg in self.packages or ():
+ yield pkg
+
+ for module in self.py_modules or ():
+ yield module
+
+ for ext in self.ext_modules or ():
+ if isinstance(ext, tuple):
+ name, buildinfo = ext
+ else:
+ name = ext.name
+ if name.endswith('module'):
+ name = name[:-6]
+ yield name
+
+ def handle_display_options(self, option_order):
+ """If there were any non-global "display-only" options
+ (--help-commands or the metadata display options) on the command
+ line, display the requested info and return true; else return
+ false.
+ """
+ import sys
+
+ if self.help_commands:
+ return _Distribution.handle_display_options(self, option_order)
+
+ # Stdout may be StringIO (e.g. in tests)
+ if not isinstance(sys.stdout, io.TextIOWrapper):
+ return _Distribution.handle_display_options(self, option_order)
+
+ # Don't wrap stdout if utf-8 is already the encoding. Provides
+ # workaround for #334.
+ if sys.stdout.encoding.lower() in ('utf-8', 'utf8'):
+ return _Distribution.handle_display_options(self, option_order)
+
+ # Print metadata in UTF-8 no matter the platform
+ encoding = sys.stdout.encoding
+ errors = sys.stdout.errors
+ newline = sys.platform != 'win32' and '\n' or None
+ line_buffering = sys.stdout.line_buffering
+
+ sys.stdout = io.TextIOWrapper(
+ sys.stdout.detach(), 'utf-8', errors, newline, line_buffering)
+ try:
+ return _Distribution.handle_display_options(self, option_order)
+ finally:
+ sys.stdout = io.TextIOWrapper(
+ sys.stdout.detach(), encoding, errors, newline, line_buffering)
+
+
+class DistDeprecationWarning(SetuptoolsDeprecationWarning):
+ """Class for warning about deprecations in dist in
+ setuptools. Not ignored by default, unlike DeprecationWarning."""
diff --git a/third_party/python/setuptools/setuptools/errors.py b/third_party/python/setuptools/setuptools/errors.py
new file mode 100644
index 0000000000..2701747f56
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/errors.py
@@ -0,0 +1,16 @@
+"""setuptools.errors
+
+Provides exceptions used by setuptools modules.
+"""
+
+from distutils.errors import DistutilsError
+
+
+class RemovedCommandError(DistutilsError, RuntimeError):
+ """Error used for commands that have been removed in setuptools.
+
+ Since ``setuptools`` is built on ``distutils``, simply removing a command
+ from ``setuptools`` will make the behavior fall back to ``distutils``; this
+ error is raised if a command exists in ``distutils`` but has been actively
+ removed in ``setuptools``.
+ """
diff --git a/third_party/python/setuptools/setuptools/extension.py b/third_party/python/setuptools/setuptools/extension.py
new file mode 100644
index 0000000000..1820722a49
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/extension.py
@@ -0,0 +1,55 @@
+import re
+import functools
+import distutils.core
+import distutils.errors
+import distutils.extension
+
+from .monkey import get_unpatched
+
+
+def _have_cython():
+ """
+ Return True if Cython can be imported.
+ """
+ cython_impl = 'Cython.Distutils.build_ext'
+ try:
+ # from (cython_impl) import build_ext
+ __import__(cython_impl, fromlist=['build_ext']).build_ext
+ return True
+ except Exception:
+ pass
+ return False
+
+
+# for compatibility
+have_pyrex = _have_cython
+
+_Extension = get_unpatched(distutils.core.Extension)
+
+
+class Extension(_Extension):
+ """Extension that uses '.c' files in place of '.pyx' files"""
+
+ def __init__(self, name, sources, *args, **kw):
+ # The *args is needed for compatibility as calls may use positional
+ # arguments. py_limited_api may be set only via keyword.
+ self.py_limited_api = kw.pop("py_limited_api", False)
+ _Extension.__init__(self, name, sources, *args, **kw)
+
+ def _convert_pyx_sources_to_lang(self):
+ """
+ Replace sources with .pyx extensions to sources with the target
+ language extension. This mechanism allows language authors to supply
+ pre-converted sources but to prefer the .pyx sources.
+ """
+ if _have_cython():
+ # the build has Cython, so allow it to compile the .pyx files
+ return
+ lang = self.language or ''
+ target_ext = '.cpp' if lang.lower() == 'c++' else '.c'
+ sub = functools.partial(re.sub, '.pyx$', target_ext)
+ self.sources = list(map(sub, self.sources))
+
+
+class Library(Extension):
+ """Just like a regular Extension, but built as a library instead"""
diff --git a/third_party/python/setuptools/setuptools/extern/__init__.py b/third_party/python/setuptools/setuptools/extern/__init__.py
new file mode 100644
index 0000000000..b7f30dc2e3
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/extern/__init__.py
@@ -0,0 +1,66 @@
+import sys
+
+
+class VendorImporter:
+ """
+ A PEP 302 meta path importer for finding optionally-vendored
+ or otherwise naturally-installed packages from root_name.
+ """
+
+ def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
+ self.root_name = root_name
+ self.vendored_names = set(vendored_names)
+ self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
+
+ @property
+ def search_path(self):
+ """
+ Search first the vendor package then as a natural package.
+ """
+ yield self.vendor_pkg + '.'
+ yield ''
+
+ def find_module(self, fullname, path=None):
+ """
+ Return self when fullname starts with root_name and the
+ target module is one vendored through this importer.
+ """
+ root, base, target = fullname.partition(self.root_name + '.')
+ if root:
+ return
+ if not any(map(target.startswith, self.vendored_names)):
+ return
+ return self
+
+ def load_module(self, fullname):
+ """
+ Iterate over the search path to locate and load fullname.
+ """
+ root, base, target = fullname.partition(self.root_name + '.')
+ for prefix in self.search_path:
+ try:
+ extant = prefix + target
+ __import__(extant)
+ mod = sys.modules[extant]
+ sys.modules[fullname] = mod
+ return mod
+ except ImportError:
+ pass
+ else:
+ raise ImportError(
+ "The '{target}' package is required; "
+ "normally this is bundled with this package so if you get "
+ "this warning, consult the packager of your "
+ "distribution.".format(**locals())
+ )
+
+ def install(self):
+ """
+ Install this importer into sys.meta_path if not already present.
+ """
+ if self not in sys.meta_path:
+ sys.meta_path.append(self)
+
+
+names = 'packaging', 'pyparsing', 'ordered_set',
+VendorImporter(__name__, names, 'setuptools._vendor').install()
diff --git a/third_party/python/setuptools/setuptools/glob.py b/third_party/python/setuptools/setuptools/glob.py
new file mode 100644
index 0000000000..9d7cbc5da6
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/glob.py
@@ -0,0 +1,174 @@
+"""
+Filename globbing utility. Mostly a copy of `glob` from Python 3.5.
+
+Changes include:
+ * `yield from` and PEP3102 `*` removed.
+ * Hidden files are not ignored.
+"""
+
+import os
+import re
+import fnmatch
+
+__all__ = ["glob", "iglob", "escape"]
+
+
+def glob(pathname, recursive=False):
+ """Return a list of paths matching a pathname pattern.
+
+ The pattern may contain simple shell-style wildcards a la
+ fnmatch. However, unlike fnmatch, filenames starting with a
+ dot are special cases that are not matched by '*' and '?'
+ patterns.
+
+ If recursive is true, the pattern '**' will match any files and
+ zero or more directories and subdirectories.
+ """
+ return list(iglob(pathname, recursive=recursive))
+
+
+def iglob(pathname, recursive=False):
+ """Return an iterator which yields the paths matching a pathname pattern.
+
+ The pattern may contain simple shell-style wildcards a la
+ fnmatch. However, unlike fnmatch, filenames starting with a
+ dot are special cases that are not matched by '*' and '?'
+ patterns.
+
+ If recursive is true, the pattern '**' will match any files and
+ zero or more directories and subdirectories.
+ """
+ it = _iglob(pathname, recursive)
+ if recursive and _isrecursive(pathname):
+ s = next(it) # skip empty string
+ assert not s
+ return it
+
+
+def _iglob(pathname, recursive):
+ dirname, basename = os.path.split(pathname)
+ if not has_magic(pathname):
+ if basename:
+ if os.path.lexists(pathname):
+ yield pathname
+ else:
+ # Patterns ending with a slash should match only directories
+ if os.path.isdir(dirname):
+ yield pathname
+ return
+ if not dirname:
+ if recursive and _isrecursive(basename):
+ for x in glob2(dirname, basename):
+ yield x
+ else:
+ for x in glob1(dirname, basename):
+ yield x
+ return
+ # `os.path.split()` returns the argument itself as a dirname if it is a
+ # drive or UNC path. Prevent an infinite recursion if a drive or UNC path
+ # contains magic characters (i.e. r'\\?\C:').
+ if dirname != pathname and has_magic(dirname):
+ dirs = _iglob(dirname, recursive)
+ else:
+ dirs = [dirname]
+ if has_magic(basename):
+ if recursive and _isrecursive(basename):
+ glob_in_dir = glob2
+ else:
+ glob_in_dir = glob1
+ else:
+ glob_in_dir = glob0
+ for dirname in dirs:
+ for name in glob_in_dir(dirname, basename):
+ yield os.path.join(dirname, name)
+
+
+# These 2 helper functions non-recursively glob inside a literal directory.
+# They return a list of basenames. `glob1` accepts a pattern while `glob0`
+# takes a literal basename (so it only has to check for its existence).
+
+
+def glob1(dirname, pattern):
+ if not dirname:
+ if isinstance(pattern, bytes):
+ dirname = os.curdir.encode('ASCII')
+ else:
+ dirname = os.curdir
+ try:
+ names = os.listdir(dirname)
+ except OSError:
+ return []
+ return fnmatch.filter(names, pattern)
+
+
+def glob0(dirname, basename):
+ if not basename:
+ # `os.path.split()` returns an empty basename for paths ending with a
+ # directory separator. 'q*x/' should match only directories.
+ if os.path.isdir(dirname):
+ return [basename]
+ else:
+ if os.path.lexists(os.path.join(dirname, basename)):
+ return [basename]
+ return []
+
+
+# This helper function recursively yields relative pathnames inside a literal
+# directory.
+
+
+def glob2(dirname, pattern):
+ assert _isrecursive(pattern)
+ yield pattern[:0]
+ for x in _rlistdir(dirname):
+ yield x
+
+
+# Recursively yields relative pathnames inside a literal directory.
+def _rlistdir(dirname):
+ if not dirname:
+ if isinstance(dirname, bytes):
+ dirname = os.curdir.encode('ASCII')
+ else:
+ dirname = os.curdir
+ try:
+ names = os.listdir(dirname)
+ except os.error:
+ return
+ for x in names:
+ yield x
+ path = os.path.join(dirname, x) if dirname else x
+ for y in _rlistdir(path):
+ yield os.path.join(x, y)
+
+
+magic_check = re.compile('([*?[])')
+magic_check_bytes = re.compile(b'([*?[])')
+
+
+def has_magic(s):
+ if isinstance(s, bytes):
+ match = magic_check_bytes.search(s)
+ else:
+ match = magic_check.search(s)
+ return match is not None
+
+
+def _isrecursive(pattern):
+ if isinstance(pattern, bytes):
+ return pattern == b'**'
+ else:
+ return pattern == '**'
+
+
+def escape(pathname):
+ """Escape all special characters.
+ """
+ # Escaping is done by wrapping any of "*?[" between square brackets.
+ # Metacharacters do not work in the drive part and shouldn't be escaped.
+ drive, pathname = os.path.splitdrive(pathname)
+ if isinstance(pathname, bytes):
+ pathname = magic_check_bytes.sub(br'[\1]', pathname)
+ else:
+ pathname = magic_check.sub(r'[\1]', pathname)
+ return drive + pathname
diff --git a/third_party/python/setuptools/setuptools/gui-32.exe b/third_party/python/setuptools/setuptools/gui-32.exe
new file mode 100644
index 0000000000..f8d3509653
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/gui-32.exe
Binary files differ
diff --git a/third_party/python/setuptools/setuptools/gui-64.exe b/third_party/python/setuptools/setuptools/gui-64.exe
new file mode 100644
index 0000000000..330c51a5dd
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/gui-64.exe
Binary files differ
diff --git a/third_party/python/setuptools/setuptools/gui.exe b/third_party/python/setuptools/setuptools/gui.exe
new file mode 100644
index 0000000000..f8d3509653
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/gui.exe
Binary files differ
diff --git a/third_party/python/setuptools/setuptools/installer.py b/third_party/python/setuptools/setuptools/installer.py
new file mode 100644
index 0000000000..e630b87479
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/installer.py
@@ -0,0 +1,148 @@
+import glob
+import os
+import subprocess
+import sys
+import tempfile
+from distutils import log
+from distutils.errors import DistutilsError
+
+import pkg_resources
+from setuptools.command.easy_install import easy_install
+from setuptools.wheel import Wheel
+
+
+def _fixup_find_links(find_links):
+ """Ensure find-links option end-up being a list of strings."""
+ if isinstance(find_links, str):
+ return find_links.split()
+ assert isinstance(find_links, (tuple, list))
+ return find_links
+
+
+def _legacy_fetch_build_egg(dist, req):
+ """Fetch an egg needed for building.
+
+ Legacy path using EasyInstall.
+ """
+ tmp_dist = dist.__class__({'script_args': ['easy_install']})
+ opts = tmp_dist.get_option_dict('easy_install')
+ opts.clear()
+ opts.update(
+ (k, v)
+ for k, v in dist.get_option_dict('easy_install').items()
+ if k in (
+ # don't use any other settings
+ 'find_links', 'site_dirs', 'index_url',
+ 'optimize', 'site_dirs', 'allow_hosts',
+ ))
+ if dist.dependency_links:
+ links = dist.dependency_links[:]
+ if 'find_links' in opts:
+ links = _fixup_find_links(opts['find_links'][1]) + links
+ opts['find_links'] = ('setup', links)
+ install_dir = dist.get_egg_cache_dir()
+ cmd = easy_install(
+ tmp_dist, args=["x"], install_dir=install_dir,
+ exclude_scripts=True,
+ always_copy=False, build_directory=None, editable=False,
+ upgrade=False, multi_version=True, no_report=True, user=False
+ )
+ cmd.ensure_finalized()
+ return cmd.easy_install(req)
+
+
+def fetch_build_egg(dist, req):
+ """Fetch an egg needed for building.
+
+ Use pip/wheel to fetch/build a wheel."""
+ # Check pip is available.
+ try:
+ pkg_resources.get_distribution('pip')
+ except pkg_resources.DistributionNotFound:
+ dist.announce(
+ 'WARNING: The pip package is not available, falling back '
+ 'to EasyInstall for handling setup_requires/test_requires; '
+ 'this is deprecated and will be removed in a future version.',
+ log.WARN
+ )
+ return _legacy_fetch_build_egg(dist, req)
+ # Warn if wheel is not.
+ try:
+ pkg_resources.get_distribution('wheel')
+ except pkg_resources.DistributionNotFound:
+ dist.announce('WARNING: The wheel package is not available.', log.WARN)
+ # Ignore environment markers; if supplied, it is required.
+ req = strip_marker(req)
+ # Take easy_install options into account, but do not override relevant
+ # pip environment variables (like PIP_INDEX_URL or PIP_QUIET); they'll
+ # take precedence.
+ opts = dist.get_option_dict('easy_install')
+ if 'allow_hosts' in opts:
+ raise DistutilsError('the `allow-hosts` option is not supported '
+ 'when using pip to install requirements.')
+ if 'PIP_QUIET' in os.environ or 'PIP_VERBOSE' in os.environ:
+ quiet = False
+ else:
+ quiet = True
+ if 'PIP_INDEX_URL' in os.environ:
+ index_url = None
+ elif 'index_url' in opts:
+ index_url = opts['index_url'][1]
+ else:
+ index_url = None
+ if 'find_links' in opts:
+ find_links = _fixup_find_links(opts['find_links'][1])[:]
+ else:
+ find_links = []
+ if dist.dependency_links:
+ find_links.extend(dist.dependency_links)
+ eggs_dir = os.path.realpath(dist.get_egg_cache_dir())
+ environment = pkg_resources.Environment()
+ for egg_dist in pkg_resources.find_distributions(eggs_dir):
+ if egg_dist in req and environment.can_add(egg_dist):
+ return egg_dist
+ with tempfile.TemporaryDirectory() as tmpdir:
+ cmd = [
+ sys.executable, '-m', 'pip',
+ '--disable-pip-version-check',
+ 'wheel', '--no-deps',
+ '-w', tmpdir,
+ ]
+ if quiet:
+ cmd.append('--quiet')
+ if index_url is not None:
+ cmd.extend(('--index-url', index_url))
+ if find_links is not None:
+ for link in find_links:
+ cmd.extend(('--find-links', link))
+ # If requirement is a PEP 508 direct URL, directly pass
+ # the URL to pip, as `req @ url` does not work on the
+ # command line.
+ if req.url:
+ cmd.append(req.url)
+ else:
+ cmd.append(str(req))
+ try:
+ subprocess.check_call(cmd)
+ except subprocess.CalledProcessError as e:
+ raise DistutilsError(str(e)) from e
+ wheel = Wheel(glob.glob(os.path.join(tmpdir, '*.whl'))[0])
+ dist_location = os.path.join(eggs_dir, wheel.egg_name())
+ wheel.install_as_egg(dist_location)
+ dist_metadata = pkg_resources.PathMetadata(
+ dist_location, os.path.join(dist_location, 'EGG-INFO'))
+ dist = pkg_resources.Distribution.from_filename(
+ dist_location, metadata=dist_metadata)
+ return dist
+
+
+def strip_marker(req):
+ """
+ Return a new requirement without the environment marker to avoid
+ calling pip with something like `babel; extra == "i18n"`, which
+ would always be ignored.
+ """
+ # create a copy to avoid mutating the input
+ req = pkg_resources.Requirement.parse(str(req))
+ req.marker = None
+ return req
diff --git a/third_party/python/setuptools/setuptools/launch.py b/third_party/python/setuptools/setuptools/launch.py
new file mode 100644
index 0000000000..0208fdf33b
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/launch.py
@@ -0,0 +1,36 @@
+"""
+Launch the Python script on the command line after
+setuptools is bootstrapped via import.
+"""
+
+# Note that setuptools gets imported implicitly by the
+# invocation of this script using python -m setuptools.launch
+
+import tokenize
+import sys
+
+
+def run():
+ """
+ Run the script in sys.argv[1] as if it had
+ been invoked naturally.
+ """
+ __builtins__
+ script_name = sys.argv[1]
+ namespace = dict(
+ __file__=script_name,
+ __name__='__main__',
+ __doc__=None,
+ )
+ sys.argv[:] = sys.argv[1:]
+
+ open_ = getattr(tokenize, 'open', open)
+ with open_(script_name) as fid:
+ script = fid.read()
+ norm_script = script.replace('\\r\\n', '\\n')
+ code = compile(norm_script, script_name, 'exec')
+ exec(code, namespace)
+
+
+if __name__ == '__main__':
+ run()
diff --git a/third_party/python/setuptools/setuptools/lib2to3_ex.py b/third_party/python/setuptools/setuptools/lib2to3_ex.py
new file mode 100644
index 0000000000..c176abf633
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/lib2to3_ex.py
@@ -0,0 +1,68 @@
+"""
+Customized Mixin2to3 support:
+
+ - adds support for converting doctests
+"""
+
+import warnings
+from distutils.util import Mixin2to3 as _Mixin2to3
+from distutils import log
+from lib2to3.refactor import RefactoringTool, get_fixers_from_package
+
+import setuptools
+from ._deprecation_warning import SetuptoolsDeprecationWarning
+
+
+class DistutilsRefactoringTool(RefactoringTool):
+ def log_error(self, msg, *args, **kw):
+ log.error(msg, *args)
+
+ def log_message(self, msg, *args):
+ log.info(msg, *args)
+
+ def log_debug(self, msg, *args):
+ log.debug(msg, *args)
+
+
+class Mixin2to3(_Mixin2to3):
+ def run_2to3(self, files, doctests=False):
+ # See of the distribution option has been set, otherwise check the
+ # setuptools default.
+ if self.distribution.use_2to3 is not True:
+ return
+ if not files:
+ return
+
+ warnings.warn(
+ "2to3 support is deprecated. If the project still "
+ "requires Python 2 support, please migrate to "
+ "a single-codebase solution or employ an "
+ "independent conversion process.",
+ SetuptoolsDeprecationWarning)
+ log.info("Fixing " + " ".join(files))
+ self.__build_fixer_names()
+ self.__exclude_fixers()
+ if doctests:
+ if setuptools.run_2to3_on_doctests:
+ r = DistutilsRefactoringTool(self.fixer_names)
+ r.refactor(files, write=True, doctests_only=True)
+ else:
+ _Mixin2to3.run_2to3(self, files)
+
+ def __build_fixer_names(self):
+ if self.fixer_names:
+ return
+ self.fixer_names = []
+ for p in setuptools.lib2to3_fixer_packages:
+ self.fixer_names.extend(get_fixers_from_package(p))
+ if self.distribution.use_2to3_fixers is not None:
+ for p in self.distribution.use_2to3_fixers:
+ self.fixer_names.extend(get_fixers_from_package(p))
+
+ def __exclude_fixers(self):
+ excluded_fixers = getattr(self, 'exclude_fixers', [])
+ if self.distribution.use_2to3_exclude_fixers is not None:
+ excluded_fixers.extend(self.distribution.use_2to3_exclude_fixers)
+ for fixer_name in excluded_fixers:
+ if fixer_name in self.fixer_names:
+ self.fixer_names.remove(fixer_name)
diff --git a/third_party/python/setuptools/setuptools/monkey.py b/third_party/python/setuptools/setuptools/monkey.py
new file mode 100644
index 0000000000..fb36dc1a97
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/monkey.py
@@ -0,0 +1,177 @@
+"""
+Monkey patching of distutils.
+"""
+
+import sys
+import distutils.filelist
+import platform
+import types
+import functools
+from importlib import import_module
+import inspect
+
+import setuptools
+
+__all__ = []
+"""
+Everything is private. Contact the project team
+if you think you need this functionality.
+"""
+
+
+def _get_mro(cls):
+ """
+ Returns the bases classes for cls sorted by the MRO.
+
+ Works around an issue on Jython where inspect.getmro will not return all
+ base classes if multiple classes share the same name. Instead, this
+ function will return a tuple containing the class itself, and the contents
+ of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024.
+ """
+ if platform.python_implementation() == "Jython":
+ return (cls,) + cls.__bases__
+ return inspect.getmro(cls)
+
+
+def get_unpatched(item):
+ lookup = (
+ get_unpatched_class if isinstance(item, type) else
+ get_unpatched_function if isinstance(item, types.FunctionType) else
+ lambda item: None
+ )
+ return lookup(item)
+
+
+def get_unpatched_class(cls):
+ """Protect against re-patching the distutils if reloaded
+
+ Also ensures that no other distutils extension monkeypatched the distutils
+ first.
+ """
+ external_bases = (
+ cls
+ for cls in _get_mro(cls)
+ if not cls.__module__.startswith('setuptools')
+ )
+ base = next(external_bases)
+ if not base.__module__.startswith('distutils'):
+ msg = "distutils has already been patched by %r" % cls
+ raise AssertionError(msg)
+ return base
+
+
+def patch_all():
+ # we can't patch distutils.cmd, alas
+ distutils.core.Command = setuptools.Command
+
+ has_issue_12885 = sys.version_info <= (3, 5, 3)
+
+ if has_issue_12885:
+ # fix findall bug in distutils (http://bugs.python.org/issue12885)
+ distutils.filelist.findall = setuptools.findall
+
+ needs_warehouse = (
+ sys.version_info < (2, 7, 13)
+ or
+ (3, 4) < sys.version_info < (3, 4, 6)
+ or
+ (3, 5) < sys.version_info <= (3, 5, 3)
+ )
+
+ if needs_warehouse:
+ warehouse = 'https://upload.pypi.org/legacy/'
+ distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse
+
+ _patch_distribution_metadata()
+
+ # Install Distribution throughout the distutils
+ for module in distutils.dist, distutils.core, distutils.cmd:
+ module.Distribution = setuptools.dist.Distribution
+
+ # Install the patched Extension
+ distutils.core.Extension = setuptools.extension.Extension
+ distutils.extension.Extension = setuptools.extension.Extension
+ if 'distutils.command.build_ext' in sys.modules:
+ sys.modules['distutils.command.build_ext'].Extension = (
+ setuptools.extension.Extension
+ )
+
+ patch_for_msvc_specialized_compiler()
+
+
+def _patch_distribution_metadata():
+ """Patch write_pkg_file and read_pkg_file for higher metadata standards"""
+ for attr in ('write_pkg_file', 'read_pkg_file', 'get_metadata_version'):
+ new_val = getattr(setuptools.dist, attr)
+ setattr(distutils.dist.DistributionMetadata, attr, new_val)
+
+
+def patch_func(replacement, target_mod, func_name):
+ """
+ Patch func_name in target_mod with replacement
+
+ Important - original must be resolved by name to avoid
+ patching an already patched function.
+ """
+ original = getattr(target_mod, func_name)
+
+ # set the 'unpatched' attribute on the replacement to
+ # point to the original.
+ vars(replacement).setdefault('unpatched', original)
+
+ # replace the function in the original module
+ setattr(target_mod, func_name, replacement)
+
+
+def get_unpatched_function(candidate):
+ return getattr(candidate, 'unpatched')
+
+
+def patch_for_msvc_specialized_compiler():
+ """
+ Patch functions in distutils to use standalone Microsoft Visual C++
+ compilers.
+ """
+ # import late to avoid circular imports on Python < 3.5
+ msvc = import_module('setuptools.msvc')
+
+ if platform.system() != 'Windows':
+ # Compilers only available on Microsoft Windows
+ return
+
+ def patch_params(mod_name, func_name):
+ """
+ Prepare the parameters for patch_func to patch indicated function.
+ """
+ repl_prefix = 'msvc9_' if 'msvc9' in mod_name else 'msvc14_'
+ repl_name = repl_prefix + func_name.lstrip('_')
+ repl = getattr(msvc, repl_name)
+ mod = import_module(mod_name)
+ if not hasattr(mod, func_name):
+ raise ImportError(func_name)
+ return repl, mod, func_name
+
+ # Python 2.7 to 3.4
+ msvc9 = functools.partial(patch_params, 'distutils.msvc9compiler')
+
+ # Python 3.5+
+ msvc14 = functools.partial(patch_params, 'distutils._msvccompiler')
+
+ try:
+ # Patch distutils.msvc9compiler
+ patch_func(*msvc9('find_vcvarsall'))
+ patch_func(*msvc9('query_vcvarsall'))
+ except ImportError:
+ pass
+
+ try:
+ # Patch distutils._msvccompiler._get_vc_env
+ patch_func(*msvc14('_get_vc_env'))
+ except ImportError:
+ pass
+
+ try:
+ # Patch distutils._msvccompiler.gen_lib_options for Numpy
+ patch_func(*msvc14('gen_lib_options'))
+ except ImportError:
+ pass
diff --git a/third_party/python/setuptools/setuptools/msvc.py b/third_party/python/setuptools/setuptools/msvc.py
new file mode 100644
index 0000000000..1ead72b421
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/msvc.py
@@ -0,0 +1,1830 @@
+"""
+Improved support for Microsoft Visual C++ compilers.
+
+Known supported compilers:
+--------------------------
+Microsoft Visual C++ 9.0:
+ Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64)
+ Microsoft Windows SDK 6.1 (x86, x64, ia64)
+ Microsoft Windows SDK 7.0 (x86, x64, ia64)
+
+Microsoft Visual C++ 10.0:
+ Microsoft Windows SDK 7.1 (x86, x64, ia64)
+
+Microsoft Visual C++ 14.X:
+ Microsoft Visual C++ Build Tools 2015 (x86, x64, arm)
+ Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64)
+ Microsoft Visual Studio Build Tools 2019 (x86, x64, arm, arm64)
+
+This may also support compilers shipped with compatible Visual Studio versions.
+"""
+
+import json
+from io import open
+from os import listdir, pathsep
+from os.path import join, isfile, isdir, dirname
+import sys
+import platform
+import itertools
+import subprocess
+import distutils.errors
+from setuptools.extern.packaging.version import LegacyVersion
+
+from .monkey import get_unpatched
+
+if platform.system() == 'Windows':
+ import winreg
+ from os import environ
+else:
+ # Mock winreg and environ so the module can be imported on this platform.
+
+ class winreg:
+ HKEY_USERS = None
+ HKEY_CURRENT_USER = None
+ HKEY_LOCAL_MACHINE = None
+ HKEY_CLASSES_ROOT = None
+
+ environ = dict()
+
+_msvc9_suppress_errors = (
+ # msvc9compiler isn't available on some platforms
+ ImportError,
+
+ # msvc9compiler raises DistutilsPlatformError in some
+ # environments. See #1118.
+ distutils.errors.DistutilsPlatformError,
+)
+
+try:
+ from distutils.msvc9compiler import Reg
+except _msvc9_suppress_errors:
+ pass
+
+
+def msvc9_find_vcvarsall(version):
+ """
+ Patched "distutils.msvc9compiler.find_vcvarsall" to use the standalone
+ compiler build for Python
+ (VCForPython / Microsoft Visual C++ Compiler for Python 2.7).
+
+ Fall back to original behavior when the standalone compiler is not
+ available.
+
+ Redirect the path of "vcvarsall.bat".
+
+ Parameters
+ ----------
+ version: float
+ Required Microsoft Visual C++ version.
+
+ Return
+ ------
+ str
+ vcvarsall.bat path
+ """
+ vc_base = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f'
+ key = vc_base % ('', version)
+ try:
+ # Per-user installs register the compiler path here
+ productdir = Reg.get_value(key, "installdir")
+ except KeyError:
+ try:
+ # All-user installs on a 64-bit system register here
+ key = vc_base % ('Wow6432Node\\', version)
+ productdir = Reg.get_value(key, "installdir")
+ except KeyError:
+ productdir = None
+
+ if productdir:
+ vcvarsall = join(productdir, "vcvarsall.bat")
+ if isfile(vcvarsall):
+ return vcvarsall
+
+ return get_unpatched(msvc9_find_vcvarsall)(version)
+
+
+def msvc9_query_vcvarsall(ver, arch='x86', *args, **kwargs):
+ """
+ Patched "distutils.msvc9compiler.query_vcvarsall" for support extra
+ Microsoft Visual C++ 9.0 and 10.0 compilers.
+
+ Set environment without use of "vcvarsall.bat".
+
+ Parameters
+ ----------
+ ver: float
+ Required Microsoft Visual C++ version.
+ arch: str
+ Target architecture.
+
+ Return
+ ------
+ dict
+ environment
+ """
+ # Try to get environment from vcvarsall.bat (Classical way)
+ try:
+ orig = get_unpatched(msvc9_query_vcvarsall)
+ return orig(ver, arch, *args, **kwargs)
+ except distutils.errors.DistutilsPlatformError:
+ # Pass error if Vcvarsall.bat is missing
+ pass
+ except ValueError:
+ # Pass error if environment not set after executing vcvarsall.bat
+ pass
+
+ # If error, try to set environment directly
+ try:
+ return EnvironmentInfo(arch, ver).return_env()
+ except distutils.errors.DistutilsPlatformError as exc:
+ _augment_exception(exc, ver, arch)
+ raise
+
+
+def _msvc14_find_vc2015():
+ """Python 3.8 "distutils/_msvccompiler.py" backport"""
+ try:
+ key = winreg.OpenKey(
+ winreg.HKEY_LOCAL_MACHINE,
+ r"Software\Microsoft\VisualStudio\SxS\VC7",
+ 0,
+ winreg.KEY_READ | winreg.KEY_WOW64_32KEY
+ )
+ except OSError:
+ return None, None
+
+ best_version = 0
+ best_dir = None
+ with key:
+ for i in itertools.count():
+ try:
+ v, vc_dir, vt = winreg.EnumValue(key, i)
+ except OSError:
+ break
+ if v and vt == winreg.REG_SZ and isdir(vc_dir):
+ try:
+ version = int(float(v))
+ except (ValueError, TypeError):
+ continue
+ if version >= 14 and version > best_version:
+ best_version, best_dir = version, vc_dir
+ return best_version, best_dir
+
+
+def _msvc14_find_vc2017():
+ """Python 3.8 "distutils/_msvccompiler.py" backport
+
+ Returns "15, path" based on the result of invoking vswhere.exe
+ If no install is found, returns "None, None"
+
+ The version is returned to avoid unnecessarily changing the function
+ result. It may be ignored when the path is not None.
+
+ If vswhere.exe is not available, by definition, VS 2017 is not
+ installed.
+ """
+ root = environ.get("ProgramFiles(x86)") or environ.get("ProgramFiles")
+ if not root:
+ return None, None
+
+ try:
+ path = subprocess.check_output([
+ join(root, "Microsoft Visual Studio", "Installer", "vswhere.exe"),
+ "-latest",
+ "-prerelease",
+ "-requires", "Microsoft.VisualStudio.Component.VC.Tools.x86.x64",
+ "-property", "installationPath",
+ "-products", "*",
+ ]).decode(encoding="mbcs", errors="strict").strip()
+ except (subprocess.CalledProcessError, OSError, UnicodeDecodeError):
+ return None, None
+
+ path = join(path, "VC", "Auxiliary", "Build")
+ if isdir(path):
+ return 15, path
+
+ return None, None
+
+
+PLAT_SPEC_TO_RUNTIME = {
+ 'x86': 'x86',
+ 'x86_amd64': 'x64',
+ 'x86_arm': 'arm',
+ 'x86_arm64': 'arm64'
+}
+
+
+def _msvc14_find_vcvarsall(plat_spec):
+ """Python 3.8 "distutils/_msvccompiler.py" backport"""
+ _, best_dir = _msvc14_find_vc2017()
+ vcruntime = None
+
+ if plat_spec in PLAT_SPEC_TO_RUNTIME:
+ vcruntime_plat = PLAT_SPEC_TO_RUNTIME[plat_spec]
+ else:
+ vcruntime_plat = 'x64' if 'amd64' in plat_spec else 'x86'
+
+ if best_dir:
+ vcredist = join(best_dir, "..", "..", "redist", "MSVC", "**",
+ vcruntime_plat, "Microsoft.VC14*.CRT",
+ "vcruntime140.dll")
+ try:
+ import glob
+ vcruntime = glob.glob(vcredist, recursive=True)[-1]
+ except (ImportError, OSError, LookupError):
+ vcruntime = None
+
+ if not best_dir:
+ best_version, best_dir = _msvc14_find_vc2015()
+ if best_version:
+ vcruntime = join(best_dir, 'redist', vcruntime_plat,
+ "Microsoft.VC140.CRT", "vcruntime140.dll")
+
+ if not best_dir:
+ return None, None
+
+ vcvarsall = join(best_dir, "vcvarsall.bat")
+ if not isfile(vcvarsall):
+ return None, None
+
+ if not vcruntime or not isfile(vcruntime):
+ vcruntime = None
+
+ return vcvarsall, vcruntime
+
+
+def _msvc14_get_vc_env(plat_spec):
+ """Python 3.8 "distutils/_msvccompiler.py" backport"""
+ if "DISTUTILS_USE_SDK" in environ:
+ return {
+ key.lower(): value
+ for key, value in environ.items()
+ }
+
+ vcvarsall, vcruntime = _msvc14_find_vcvarsall(plat_spec)
+ if not vcvarsall:
+ raise distutils.errors.DistutilsPlatformError(
+ "Unable to find vcvarsall.bat"
+ )
+
+ try:
+ out = subprocess.check_output(
+ 'cmd /u /c "{}" {} && set'.format(vcvarsall, plat_spec),
+ stderr=subprocess.STDOUT,
+ ).decode('utf-16le', errors='replace')
+ except subprocess.CalledProcessError as exc:
+ raise distutils.errors.DistutilsPlatformError(
+ "Error executing {}".format(exc.cmd)
+ ) from exc
+
+ env = {
+ key.lower(): value
+ for key, _, value in
+ (line.partition('=') for line in out.splitlines())
+ if key and value
+ }
+
+ if vcruntime:
+ env['py_vcruntime_redist'] = vcruntime
+ return env
+
+
+def msvc14_get_vc_env(plat_spec):
+ """
+ Patched "distutils._msvccompiler._get_vc_env" for support extra
+ Microsoft Visual C++ 14.X compilers.
+
+ Set environment without use of "vcvarsall.bat".
+
+ Parameters
+ ----------
+ plat_spec: str
+ Target architecture.
+
+ Return
+ ------
+ dict
+ environment
+ """
+
+ # Always use backport from CPython 3.8
+ try:
+ return _msvc14_get_vc_env(plat_spec)
+ except distutils.errors.DistutilsPlatformError as exc:
+ _augment_exception(exc, 14.0)
+ raise
+
+
+def msvc14_gen_lib_options(*args, **kwargs):
+ """
+ Patched "distutils._msvccompiler.gen_lib_options" for fix
+ compatibility between "numpy.distutils" and "distutils._msvccompiler"
+ (for Numpy < 1.11.2)
+ """
+ if "numpy.distutils" in sys.modules:
+ import numpy as np
+ if LegacyVersion(np.__version__) < LegacyVersion('1.11.2'):
+ return np.distutils.ccompiler.gen_lib_options(*args, **kwargs)
+ return get_unpatched(msvc14_gen_lib_options)(*args, **kwargs)
+
+
+def _augment_exception(exc, version, arch=''):
+ """
+ Add details to the exception message to help guide the user
+ as to what action will resolve it.
+ """
+ # Error if MSVC++ directory not found or environment not set
+ message = exc.args[0]
+
+ if "vcvarsall" in message.lower() or "visual c" in message.lower():
+ # Special error message if MSVC++ not installed
+ tmpl = 'Microsoft Visual C++ {version:0.1f} or greater is required.'
+ message = tmpl.format(**locals())
+ msdownload = 'www.microsoft.com/download/details.aspx?id=%d'
+ if version == 9.0:
+ if arch.lower().find('ia64') > -1:
+ # For VC++ 9.0, if IA64 support is needed, redirect user
+ # to Windows SDK 7.0.
+ # Note: No download link available from Microsoft.
+ message += ' Get it with "Microsoft Windows SDK 7.0"'
+ else:
+ # For VC++ 9.0 redirect user to Vc++ for Python 2.7 :
+ # This redirection link is maintained by Microsoft.
+ # Contact vspython@microsoft.com if it needs updating.
+ message += ' Get it from http://aka.ms/vcpython27'
+ elif version == 10.0:
+ # For VC++ 10.0 Redirect user to Windows SDK 7.1
+ message += ' Get it with "Microsoft Windows SDK 7.1": '
+ message += msdownload % 8279
+ elif version >= 14.0:
+ # For VC++ 14.X Redirect user to latest Visual C++ Build Tools
+ message += (' Get it with "Microsoft C++ Build Tools": '
+ r'https://visualstudio.microsoft.com'
+ r'/visual-cpp-build-tools/')
+
+ exc.args = (message, )
+
+
+class PlatformInfo:
+ """
+ Current and Target Architectures information.
+
+ Parameters
+ ----------
+ arch: str
+ Target architecture.
+ """
+ current_cpu = environ.get('processor_architecture', '').lower()
+
+ def __init__(self, arch):
+ self.arch = arch.lower().replace('x64', 'amd64')
+
+ @property
+ def target_cpu(self):
+ """
+ Return Target CPU architecture.
+
+ Return
+ ------
+ str
+ Target CPU
+ """
+ return self.arch[self.arch.find('_') + 1:]
+
+ def target_is_x86(self):
+ """
+ Return True if target CPU is x86 32 bits..
+
+ Return
+ ------
+ bool
+ CPU is x86 32 bits
+ """
+ return self.target_cpu == 'x86'
+
+ def current_is_x86(self):
+ """
+ Return True if current CPU is x86 32 bits..
+
+ Return
+ ------
+ bool
+ CPU is x86 32 bits
+ """
+ return self.current_cpu == 'x86'
+
+ def current_dir(self, hidex86=False, x64=False):
+ """
+ Current platform specific subfolder.
+
+ Parameters
+ ----------
+ hidex86: bool
+ return '' and not '\x86' if architecture is x86.
+ x64: bool
+ return '\x64' and not '\amd64' if architecture is amd64.
+
+ Return
+ ------
+ str
+ subfolder: '\target', or '' (see hidex86 parameter)
+ """
+ return (
+ '' if (self.current_cpu == 'x86' and hidex86) else
+ r'\x64' if (self.current_cpu == 'amd64' and x64) else
+ r'\%s' % self.current_cpu
+ )
+
+ def target_dir(self, hidex86=False, x64=False):
+ r"""
+ Target platform specific subfolder.
+
+ Parameters
+ ----------
+ hidex86: bool
+ return '' and not '\x86' if architecture is x86.
+ x64: bool
+ return '\x64' and not '\amd64' if architecture is amd64.
+
+ Return
+ ------
+ str
+ subfolder: '\current', or '' (see hidex86 parameter)
+ """
+ return (
+ '' if (self.target_cpu == 'x86' and hidex86) else
+ r'\x64' if (self.target_cpu == 'amd64' and x64) else
+ r'\%s' % self.target_cpu
+ )
+
+ def cross_dir(self, forcex86=False):
+ r"""
+ Cross platform specific subfolder.
+
+ Parameters
+ ----------
+ forcex86: bool
+ Use 'x86' as current architecture even if current architecture is
+ not x86.
+
+ Return
+ ------
+ str
+ subfolder: '' if target architecture is current architecture,
+ '\current_target' if not.
+ """
+ current = 'x86' if forcex86 else self.current_cpu
+ return (
+ '' if self.target_cpu == current else
+ self.target_dir().replace('\\', '\\%s_' % current)
+ )
+
+
+class RegistryInfo:
+ """
+ Microsoft Visual Studio related registry information.
+
+ Parameters
+ ----------
+ platform_info: PlatformInfo
+ "PlatformInfo" instance.
+ """
+ HKEYS = (winreg.HKEY_USERS,
+ winreg.HKEY_CURRENT_USER,
+ winreg.HKEY_LOCAL_MACHINE,
+ winreg.HKEY_CLASSES_ROOT)
+
+ def __init__(self, platform_info):
+ self.pi = platform_info
+
+ @property
+ def visualstudio(self):
+ """
+ Microsoft Visual Studio root registry key.
+
+ Return
+ ------
+ str
+ Registry key
+ """
+ return 'VisualStudio'
+
+ @property
+ def sxs(self):
+ """
+ Microsoft Visual Studio SxS registry key.
+
+ Return
+ ------
+ str
+ Registry key
+ """
+ return join(self.visualstudio, 'SxS')
+
+ @property
+ def vc(self):
+ """
+ Microsoft Visual C++ VC7 registry key.
+
+ Return
+ ------
+ str
+ Registry key
+ """
+ return join(self.sxs, 'VC7')
+
+ @property
+ def vs(self):
+ """
+ Microsoft Visual Studio VS7 registry key.
+
+ Return
+ ------
+ str
+ Registry key
+ """
+ return join(self.sxs, 'VS7')
+
+ @property
+ def vc_for_python(self):
+ """
+ Microsoft Visual C++ for Python registry key.
+
+ Return
+ ------
+ str
+ Registry key
+ """
+ return r'DevDiv\VCForPython'
+
+ @property
+ def microsoft_sdk(self):
+ """
+ Microsoft SDK registry key.
+
+ Return
+ ------
+ str
+ Registry key
+ """
+ return 'Microsoft SDKs'
+
+ @property
+ def windows_sdk(self):
+ """
+ Microsoft Windows/Platform SDK registry key.
+
+ Return
+ ------
+ str
+ Registry key
+ """
+ return join(self.microsoft_sdk, 'Windows')
+
+ @property
+ def netfx_sdk(self):
+ """
+ Microsoft .NET Framework SDK registry key.
+
+ Return
+ ------
+ str
+ Registry key
+ """
+ return join(self.microsoft_sdk, 'NETFXSDK')
+
+ @property
+ def windows_kits_roots(self):
+ """
+ Microsoft Windows Kits Roots registry key.
+
+ Return
+ ------
+ str
+ Registry key
+ """
+ return r'Windows Kits\Installed Roots'
+
+ def microsoft(self, key, x86=False):
+ """
+ Return key in Microsoft software registry.
+
+ Parameters
+ ----------
+ key: str
+ Registry key path where look.
+ x86: str
+ Force x86 software registry.
+
+ Return
+ ------
+ str
+ Registry key
+ """
+ node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node'
+ return join('Software', node64, 'Microsoft', key)
+
+ def lookup(self, key, name):
+ """
+ Look for values in registry in Microsoft software registry.
+
+ Parameters
+ ----------
+ key: str
+ Registry key path where look.
+ name: str
+ Value name to find.
+
+ Return
+ ------
+ str
+ value
+ """
+ key_read = winreg.KEY_READ
+ openkey = winreg.OpenKey
+ closekey = winreg.CloseKey
+ ms = self.microsoft
+ for hkey in self.HKEYS:
+ bkey = None
+ try:
+ bkey = openkey(hkey, ms(key), 0, key_read)
+ except (OSError, IOError):
+ if not self.pi.current_is_x86():
+ try:
+ bkey = openkey(hkey, ms(key, True), 0, key_read)
+ except (OSError, IOError):
+ continue
+ else:
+ continue
+ try:
+ return winreg.QueryValueEx(bkey, name)[0]
+ except (OSError, IOError):
+ pass
+ finally:
+ if bkey:
+ closekey(bkey)
+
+
+class SystemInfo:
+ """
+ Microsoft Windows and Visual Studio related system information.
+
+ Parameters
+ ----------
+ registry_info: RegistryInfo
+ "RegistryInfo" instance.
+ vc_ver: float
+ Required Microsoft Visual C++ version.
+ """
+
+ # Variables and properties in this class use originals CamelCase variables
+ # names from Microsoft source files for more easy comparison.
+ WinDir = environ.get('WinDir', '')
+ ProgramFiles = environ.get('ProgramFiles', '')
+ ProgramFilesx86 = environ.get('ProgramFiles(x86)', ProgramFiles)
+
+ def __init__(self, registry_info, vc_ver=None):
+ self.ri = registry_info
+ self.pi = self.ri.pi
+
+ self.known_vs_paths = self.find_programdata_vs_vers()
+
+ # Except for VS15+, VC version is aligned with VS version
+ self.vs_ver = self.vc_ver = (
+ vc_ver or self._find_latest_available_vs_ver())
+
+ def _find_latest_available_vs_ver(self):
+ """
+ Find the latest VC version
+
+ Return
+ ------
+ float
+ version
+ """
+ reg_vc_vers = self.find_reg_vs_vers()
+
+ if not (reg_vc_vers or self.known_vs_paths):
+ raise distutils.errors.DistutilsPlatformError(
+ 'No Microsoft Visual C++ version found')
+
+ vc_vers = set(reg_vc_vers)
+ vc_vers.update(self.known_vs_paths)
+ return sorted(vc_vers)[-1]
+
+ def find_reg_vs_vers(self):
+ """
+ Find Microsoft Visual Studio versions available in registry.
+
+ Return
+ ------
+ list of float
+ Versions
+ """
+ ms = self.ri.microsoft
+ vckeys = (self.ri.vc, self.ri.vc_for_python, self.ri.vs)
+ vs_vers = []
+ for hkey in self.ri.HKEYS:
+ for key in vckeys:
+ try:
+ bkey = winreg.OpenKey(hkey, ms(key), 0, winreg.KEY_READ)
+ except (OSError, IOError):
+ continue
+ with bkey:
+ subkeys, values, _ = winreg.QueryInfoKey(bkey)
+ for i in range(values):
+ try:
+ ver = float(winreg.EnumValue(bkey, i)[0])
+ if ver not in vs_vers:
+ vs_vers.append(ver)
+ except ValueError:
+ pass
+ for i in range(subkeys):
+ try:
+ ver = float(winreg.EnumKey(bkey, i))
+ if ver not in vs_vers:
+ vs_vers.append(ver)
+ except ValueError:
+ pass
+ return sorted(vs_vers)
+
+ def find_programdata_vs_vers(self):
+ r"""
+ Find Visual studio 2017+ versions from information in
+ "C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances".
+
+ Return
+ ------
+ dict
+ float version as key, path as value.
+ """
+ vs_versions = {}
+ instances_dir = \
+ r'C:\ProgramData\Microsoft\VisualStudio\Packages\_Instances'
+
+ try:
+ hashed_names = listdir(instances_dir)
+
+ except (OSError, IOError):
+ # Directory not exists with all Visual Studio versions
+ return vs_versions
+
+ for name in hashed_names:
+ try:
+ # Get VS installation path from "state.json" file
+ state_path = join(instances_dir, name, 'state.json')
+ with open(state_path, 'rt', encoding='utf-8') as state_file:
+ state = json.load(state_file)
+ vs_path = state['installationPath']
+
+ # Raises OSError if this VS installation does not contain VC
+ listdir(join(vs_path, r'VC\Tools\MSVC'))
+
+ # Store version and path
+ vs_versions[self._as_float_version(
+ state['installationVersion'])] = vs_path
+
+ except (OSError, IOError, KeyError):
+ # Skip if "state.json" file is missing or bad format
+ continue
+
+ return vs_versions
+
+ @staticmethod
+ def _as_float_version(version):
+ """
+ Return a string version as a simplified float version (major.minor)
+
+ Parameters
+ ----------
+ version: str
+ Version.
+
+ Return
+ ------
+ float
+ version
+ """
+ return float('.'.join(version.split('.')[:2]))
+
+ @property
+ def VSInstallDir(self):
+ """
+ Microsoft Visual Studio directory.
+
+ Return
+ ------
+ str
+ path
+ """
+ # Default path
+ default = join(self.ProgramFilesx86,
+ 'Microsoft Visual Studio %0.1f' % self.vs_ver)
+
+ # Try to get path from registry, if fail use default path
+ return self.ri.lookup(self.ri.vs, '%0.1f' % self.vs_ver) or default
+
+ @property
+ def VCInstallDir(self):
+ """
+ Microsoft Visual C++ directory.
+
+ Return
+ ------
+ str
+ path
+ """
+ path = self._guess_vc() or self._guess_vc_legacy()
+
+ if not isdir(path):
+ msg = 'Microsoft Visual C++ directory not found'
+ raise distutils.errors.DistutilsPlatformError(msg)
+
+ return path
+
+ def _guess_vc(self):
+ """
+ Locate Visual C++ for VS2017+.
+
+ Return
+ ------
+ str
+ path
+ """
+ if self.vs_ver <= 14.0:
+ return ''
+
+ try:
+ # First search in known VS paths
+ vs_dir = self.known_vs_paths[self.vs_ver]
+ except KeyError:
+ # Else, search with path from registry
+ vs_dir = self.VSInstallDir
+
+ guess_vc = join(vs_dir, r'VC\Tools\MSVC')
+
+ # Subdir with VC exact version as name
+ try:
+ # Update the VC version with real one instead of VS version
+ vc_ver = listdir(guess_vc)[-1]
+ self.vc_ver = self._as_float_version(vc_ver)
+ return join(guess_vc, vc_ver)
+ except (OSError, IOError, IndexError):
+ return ''
+
+ def _guess_vc_legacy(self):
+ """
+ Locate Visual C++ for versions prior to 2017.
+
+ Return
+ ------
+ str
+ path
+ """
+ default = join(self.ProgramFilesx86,
+ r'Microsoft Visual Studio %0.1f\VC' % self.vs_ver)
+
+ # Try to get "VC++ for Python" path from registry as default path
+ reg_path = join(self.ri.vc_for_python, '%0.1f' % self.vs_ver)
+ python_vc = self.ri.lookup(reg_path, 'installdir')
+ default_vc = join(python_vc, 'VC') if python_vc else default
+
+ # Try to get path from registry, if fail use default path
+ return self.ri.lookup(self.ri.vc, '%0.1f' % self.vs_ver) or default_vc
+
+ @property
+ def WindowsSdkVersion(self):
+ """
+ Microsoft Windows SDK versions for specified MSVC++ version.
+
+ Return
+ ------
+ tuple of str
+ versions
+ """
+ if self.vs_ver <= 9.0:
+ return '7.0', '6.1', '6.0a'
+ elif self.vs_ver == 10.0:
+ return '7.1', '7.0a'
+ elif self.vs_ver == 11.0:
+ return '8.0', '8.0a'
+ elif self.vs_ver == 12.0:
+ return '8.1', '8.1a'
+ elif self.vs_ver >= 14.0:
+ return '10.0', '8.1'
+
+ @property
+ def WindowsSdkLastVersion(self):
+ """
+ Microsoft Windows SDK last version.
+
+ Return
+ ------
+ str
+ version
+ """
+ return self._use_last_dir_name(join(self.WindowsSdkDir, 'lib'))
+
+ @property
+ def WindowsSdkDir(self):
+ """
+ Microsoft Windows SDK directory.
+
+ Return
+ ------
+ str
+ path
+ """
+ sdkdir = ''
+ for ver in self.WindowsSdkVersion:
+ # Try to get it from registry
+ loc = join(self.ri.windows_sdk, 'v%s' % ver)
+ sdkdir = self.ri.lookup(loc, 'installationfolder')
+ if sdkdir:
+ break
+ if not sdkdir or not isdir(sdkdir):
+ # Try to get "VC++ for Python" version from registry
+ path = join(self.ri.vc_for_python, '%0.1f' % self.vc_ver)
+ install_base = self.ri.lookup(path, 'installdir')
+ if install_base:
+ sdkdir = join(install_base, 'WinSDK')
+ if not sdkdir or not isdir(sdkdir):
+ # If fail, use default new path
+ for ver in self.WindowsSdkVersion:
+ intver = ver[:ver.rfind('.')]
+ path = r'Microsoft SDKs\Windows Kits\%s' % intver
+ d = join(self.ProgramFiles, path)
+ if isdir(d):
+ sdkdir = d
+ if not sdkdir or not isdir(sdkdir):
+ # If fail, use default old path
+ for ver in self.WindowsSdkVersion:
+ path = r'Microsoft SDKs\Windows\v%s' % ver
+ d = join(self.ProgramFiles, path)
+ if isdir(d):
+ sdkdir = d
+ if not sdkdir:
+ # If fail, use Platform SDK
+ sdkdir = join(self.VCInstallDir, 'PlatformSDK')
+ return sdkdir
+
+ @property
+ def WindowsSDKExecutablePath(self):
+ """
+ Microsoft Windows SDK executable directory.
+
+ Return
+ ------
+ str
+ path
+ """
+ # Find WinSDK NetFx Tools registry dir name
+ if self.vs_ver <= 11.0:
+ netfxver = 35
+ arch = ''
+ else:
+ netfxver = 40
+ hidex86 = True if self.vs_ver <= 12.0 else False
+ arch = self.pi.current_dir(x64=True, hidex86=hidex86)
+ fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\', '-'))
+
+ # list all possibles registry paths
+ regpaths = []
+ if self.vs_ver >= 14.0:
+ for ver in self.NetFxSdkVersion:
+ regpaths += [join(self.ri.netfx_sdk, ver, fx)]
+
+ for ver in self.WindowsSdkVersion:
+ regpaths += [join(self.ri.windows_sdk, 'v%sA' % ver, fx)]
+
+ # Return installation folder from the more recent path
+ for path in regpaths:
+ execpath = self.ri.lookup(path, 'installationfolder')
+ if execpath:
+ return execpath
+
+ @property
+ def FSharpInstallDir(self):
+ """
+ Microsoft Visual F# directory.
+
+ Return
+ ------
+ str
+ path
+ """
+ path = join(self.ri.visualstudio, r'%0.1f\Setup\F#' % self.vs_ver)
+ return self.ri.lookup(path, 'productdir') or ''
+
+ @property
+ def UniversalCRTSdkDir(self):
+ """
+ Microsoft Universal CRT SDK directory.
+
+ Return
+ ------
+ str
+ path
+ """
+ # Set Kit Roots versions for specified MSVC++ version
+ vers = ('10', '81') if self.vs_ver >= 14.0 else ()
+
+ # Find path of the more recent Kit
+ for ver in vers:
+ sdkdir = self.ri.lookup(self.ri.windows_kits_roots,
+ 'kitsroot%s' % ver)
+ if sdkdir:
+ return sdkdir or ''
+
+ @property
+ def UniversalCRTSdkLastVersion(self):
+ """
+ Microsoft Universal C Runtime SDK last version.
+
+ Return
+ ------
+ str
+ version
+ """
+ return self._use_last_dir_name(join(self.UniversalCRTSdkDir, 'lib'))
+
+ @property
+ def NetFxSdkVersion(self):
+ """
+ Microsoft .NET Framework SDK versions.
+
+ Return
+ ------
+ tuple of str
+ versions
+ """
+ # Set FxSdk versions for specified VS version
+ return (('4.7.2', '4.7.1', '4.7',
+ '4.6.2', '4.6.1', '4.6',
+ '4.5.2', '4.5.1', '4.5')
+ if self.vs_ver >= 14.0 else ())
+
+ @property
+ def NetFxSdkDir(self):
+ """
+ Microsoft .NET Framework SDK directory.
+
+ Return
+ ------
+ str
+ path
+ """
+ sdkdir = ''
+ for ver in self.NetFxSdkVersion:
+ loc = join(self.ri.netfx_sdk, ver)
+ sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder')
+ if sdkdir:
+ break
+ return sdkdir
+
+ @property
+ def FrameworkDir32(self):
+ """
+ Microsoft .NET Framework 32bit directory.
+
+ Return
+ ------
+ str
+ path
+ """
+ # Default path
+ guess_fw = join(self.WinDir, r'Microsoft.NET\Framework')
+
+ # Try to get path from registry, if fail use default path
+ return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw
+
+ @property
+ def FrameworkDir64(self):
+ """
+ Microsoft .NET Framework 64bit directory.
+
+ Return
+ ------
+ str
+ path
+ """
+ # Default path
+ guess_fw = join(self.WinDir, r'Microsoft.NET\Framework64')
+
+ # Try to get path from registry, if fail use default path
+ return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw
+
+ @property
+ def FrameworkVersion32(self):
+ """
+ Microsoft .NET Framework 32bit versions.
+
+ Return
+ ------
+ tuple of str
+ versions
+ """
+ return self._find_dot_net_versions(32)
+
+ @property
+ def FrameworkVersion64(self):
+ """
+ Microsoft .NET Framework 64bit versions.
+
+ Return
+ ------
+ tuple of str
+ versions
+ """
+ return self._find_dot_net_versions(64)
+
+ def _find_dot_net_versions(self, bits):
+ """
+ Find Microsoft .NET Framework versions.
+
+ Parameters
+ ----------
+ bits: int
+ Platform number of bits: 32 or 64.
+
+ Return
+ ------
+ tuple of str
+ versions
+ """
+ # Find actual .NET version in registry
+ reg_ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits)
+ dot_net_dir = getattr(self, 'FrameworkDir%d' % bits)
+ ver = reg_ver or self._use_last_dir_name(dot_net_dir, 'v') or ''
+
+ # Set .NET versions for specified MSVC++ version
+ if self.vs_ver >= 12.0:
+ return ver, 'v4.0'
+ elif self.vs_ver >= 10.0:
+ return 'v4.0.30319' if ver.lower()[:2] != 'v4' else ver, 'v3.5'
+ elif self.vs_ver == 9.0:
+ return 'v3.5', 'v2.0.50727'
+ elif self.vs_ver == 8.0:
+ return 'v3.0', 'v2.0.50727'
+
+ @staticmethod
+ def _use_last_dir_name(path, prefix=''):
+ """
+ Return name of the last dir in path or '' if no dir found.
+
+ Parameters
+ ----------
+ path: str
+ Use dirs in this path
+ prefix: str
+ Use only dirs starting by this prefix
+
+ Return
+ ------
+ str
+ name
+ """
+ matching_dirs = (
+ dir_name
+ for dir_name in reversed(listdir(path))
+ if isdir(join(path, dir_name)) and
+ dir_name.startswith(prefix)
+ )
+ return next(matching_dirs, None) or ''
+
+
+class EnvironmentInfo:
+ """
+ Return environment variables for specified Microsoft Visual C++ version
+ and platform : Lib, Include, Path and libpath.
+
+ This function is compatible with Microsoft Visual C++ 9.0 to 14.X.
+
+ Script created by analysing Microsoft environment configuration files like
+ "vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ...
+
+ Parameters
+ ----------
+ arch: str
+ Target architecture.
+ vc_ver: float
+ Required Microsoft Visual C++ version. If not set, autodetect the last
+ version.
+ vc_min_ver: float
+ Minimum Microsoft Visual C++ version.
+ """
+
+ # Variables and properties in this class use originals CamelCase variables
+ # names from Microsoft source files for more easy comparison.
+
+ def __init__(self, arch, vc_ver=None, vc_min_ver=0):
+ self.pi = PlatformInfo(arch)
+ self.ri = RegistryInfo(self.pi)
+ self.si = SystemInfo(self.ri, vc_ver)
+
+ if self.vc_ver < vc_min_ver:
+ err = 'No suitable Microsoft Visual C++ version found'
+ raise distutils.errors.DistutilsPlatformError(err)
+
+ @property
+ def vs_ver(self):
+ """
+ Microsoft Visual Studio.
+
+ Return
+ ------
+ float
+ version
+ """
+ return self.si.vs_ver
+
+ @property
+ def vc_ver(self):
+ """
+ Microsoft Visual C++ version.
+
+ Return
+ ------
+ float
+ version
+ """
+ return self.si.vc_ver
+
+ @property
+ def VSTools(self):
+ """
+ Microsoft Visual Studio Tools.
+
+ Return
+ ------
+ list of str
+ paths
+ """
+ paths = [r'Common7\IDE', r'Common7\Tools']
+
+ if self.vs_ver >= 14.0:
+ arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
+ paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow']
+ paths += [r'Team Tools\Performance Tools']
+ paths += [r'Team Tools\Performance Tools%s' % arch_subdir]
+
+ return [join(self.si.VSInstallDir, path) for path in paths]
+
+ @property
+ def VCIncludes(self):
+ """
+ Microsoft Visual C++ & Microsoft Foundation Class Includes.
+
+ Return
+ ------
+ list of str
+ paths
+ """
+ return [join(self.si.VCInstallDir, 'Include'),
+ join(self.si.VCInstallDir, r'ATLMFC\Include')]
+
+ @property
+ def VCLibraries(self):
+ """
+ Microsoft Visual C++ & Microsoft Foundation Class Libraries.
+
+ Return
+ ------
+ list of str
+ paths
+ """
+ if self.vs_ver >= 15.0:
+ arch_subdir = self.pi.target_dir(x64=True)
+ else:
+ arch_subdir = self.pi.target_dir(hidex86=True)
+ paths = ['Lib%s' % arch_subdir, r'ATLMFC\Lib%s' % arch_subdir]
+
+ if self.vs_ver >= 14.0:
+ paths += [r'Lib\store%s' % arch_subdir]
+
+ return [join(self.si.VCInstallDir, path) for path in paths]
+
+ @property
+ def VCStoreRefs(self):
+ """
+ Microsoft Visual C++ store references Libraries.
+
+ Return
+ ------
+ list of str
+ paths
+ """
+ if self.vs_ver < 14.0:
+ return []
+ return [join(self.si.VCInstallDir, r'Lib\store\references')]
+
+ @property
+ def VCTools(self):
+ """
+ Microsoft Visual C++ Tools.
+
+ Return
+ ------
+ list of str
+ paths
+ """
+ si = self.si
+ tools = [join(si.VCInstallDir, 'VCPackages')]
+
+ forcex86 = True if self.vs_ver <= 10.0 else False
+ arch_subdir = self.pi.cross_dir(forcex86)
+ if arch_subdir:
+ tools += [join(si.VCInstallDir, 'Bin%s' % arch_subdir)]
+
+ if self.vs_ver == 14.0:
+ path = 'Bin%s' % self.pi.current_dir(hidex86=True)
+ tools += [join(si.VCInstallDir, path)]
+
+ elif self.vs_ver >= 15.0:
+ host_dir = (r'bin\HostX86%s' if self.pi.current_is_x86() else
+ r'bin\HostX64%s')
+ tools += [join(
+ si.VCInstallDir, host_dir % self.pi.target_dir(x64=True))]
+
+ if self.pi.current_cpu != self.pi.target_cpu:
+ tools += [join(
+ si.VCInstallDir, host_dir % self.pi.current_dir(x64=True))]
+
+ else:
+ tools += [join(si.VCInstallDir, 'Bin')]
+
+ return tools
+
+ @property
+ def OSLibraries(self):
+ """
+ Microsoft Windows SDK Libraries.
+
+ Return
+ ------
+ list of str
+ paths
+ """
+ if self.vs_ver <= 10.0:
+ arch_subdir = self.pi.target_dir(hidex86=True, x64=True)
+ return [join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)]
+
+ else:
+ arch_subdir = self.pi.target_dir(x64=True)
+ lib = join(self.si.WindowsSdkDir, 'lib')
+ libver = self._sdk_subdir
+ return [join(lib, '%sum%s' % (libver, arch_subdir))]
+
+ @property
+ def OSIncludes(self):
+ """
+ Microsoft Windows SDK Include.
+
+ Return
+ ------
+ list of str
+ paths
+ """
+ include = join(self.si.WindowsSdkDir, 'include')
+
+ if self.vs_ver <= 10.0:
+ return [include, join(include, 'gl')]
+
+ else:
+ if self.vs_ver >= 14.0:
+ sdkver = self._sdk_subdir
+ else:
+ sdkver = ''
+ return [join(include, '%sshared' % sdkver),
+ join(include, '%sum' % sdkver),
+ join(include, '%swinrt' % sdkver)]
+
+ @property
+ def OSLibpath(self):
+ """
+ Microsoft Windows SDK Libraries Paths.
+
+ Return
+ ------
+ list of str
+ paths
+ """
+ ref = join(self.si.WindowsSdkDir, 'References')
+ libpath = []
+
+ if self.vs_ver <= 9.0:
+ libpath += self.OSLibraries
+
+ if self.vs_ver >= 11.0:
+ libpath += [join(ref, r'CommonConfiguration\Neutral')]
+
+ if self.vs_ver >= 14.0:
+ libpath += [
+ ref,
+ join(self.si.WindowsSdkDir, 'UnionMetadata'),
+ join(
+ ref, 'Windows.Foundation.UniversalApiContract', '1.0.0.0'),
+ join(ref, 'Windows.Foundation.FoundationContract', '1.0.0.0'),
+ join(
+ ref, 'Windows.Networking.Connectivity.WwanContract',
+ '1.0.0.0'),
+ join(
+ self.si.WindowsSdkDir, 'ExtensionSDKs', 'Microsoft.VCLibs',
+ '%0.1f' % self.vs_ver, 'References', 'CommonConfiguration',
+ 'neutral'),
+ ]
+ return libpath
+
+ @property
+ def SdkTools(self):
+ """
+ Microsoft Windows SDK Tools.
+
+ Return
+ ------
+ list of str
+ paths
+ """
+ return list(self._sdk_tools())
+
+ def _sdk_tools(self):
+ """
+ Microsoft Windows SDK Tools paths generator.
+
+ Return
+ ------
+ generator of str
+ paths
+ """
+ if self.vs_ver < 15.0:
+ bin_dir = 'Bin' if self.vs_ver <= 11.0 else r'Bin\x86'
+ yield join(self.si.WindowsSdkDir, bin_dir)
+
+ if not self.pi.current_is_x86():
+ arch_subdir = self.pi.current_dir(x64=True)
+ path = 'Bin%s' % arch_subdir
+ yield join(self.si.WindowsSdkDir, path)
+
+ if self.vs_ver in (10.0, 11.0):
+ if self.pi.target_is_x86():
+ arch_subdir = ''
+ else:
+ arch_subdir = self.pi.current_dir(hidex86=True, x64=True)
+ path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir
+ yield join(self.si.WindowsSdkDir, path)
+
+ elif self.vs_ver >= 15.0:
+ path = join(self.si.WindowsSdkDir, 'Bin')
+ arch_subdir = self.pi.current_dir(x64=True)
+ sdkver = self.si.WindowsSdkLastVersion
+ yield join(path, '%s%s' % (sdkver, arch_subdir))
+
+ if self.si.WindowsSDKExecutablePath:
+ yield self.si.WindowsSDKExecutablePath
+
+ @property
+ def _sdk_subdir(self):
+ """
+ Microsoft Windows SDK version subdir.
+
+ Return
+ ------
+ str
+ subdir
+ """
+ ucrtver = self.si.WindowsSdkLastVersion
+ return ('%s\\' % ucrtver) if ucrtver else ''
+
+ @property
+ def SdkSetup(self):
+ """
+ Microsoft Windows SDK Setup.
+
+ Return
+ ------
+ list of str
+ paths
+ """
+ if self.vs_ver > 9.0:
+ return []
+
+ return [join(self.si.WindowsSdkDir, 'Setup')]
+
+ @property
+ def FxTools(self):
+ """
+ Microsoft .NET Framework Tools.
+
+ Return
+ ------
+ list of str
+ paths
+ """
+ pi = self.pi
+ si = self.si
+
+ if self.vs_ver <= 10.0:
+ include32 = True
+ include64 = not pi.target_is_x86() and not pi.current_is_x86()
+ else:
+ include32 = pi.target_is_x86() or pi.current_is_x86()
+ include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64'
+
+ tools = []
+ if include32:
+ tools += [join(si.FrameworkDir32, ver)
+ for ver in si.FrameworkVersion32]
+ if include64:
+ tools += [join(si.FrameworkDir64, ver)
+ for ver in si.FrameworkVersion64]
+ return tools
+
+ @property
+ def NetFxSDKLibraries(self):
+ """
+ Microsoft .Net Framework SDK Libraries.
+
+ Return
+ ------
+ list of str
+ paths
+ """
+ if self.vs_ver < 14.0 or not self.si.NetFxSdkDir:
+ return []
+
+ arch_subdir = self.pi.target_dir(x64=True)
+ return [join(self.si.NetFxSdkDir, r'lib\um%s' % arch_subdir)]
+
+ @property
+ def NetFxSDKIncludes(self):
+ """
+ Microsoft .Net Framework SDK Includes.
+
+ Return
+ ------
+ list of str
+ paths
+ """
+ if self.vs_ver < 14.0 or not self.si.NetFxSdkDir:
+ return []
+
+ return [join(self.si.NetFxSdkDir, r'include\um')]
+
+ @property
+ def VsTDb(self):
+ """
+ Microsoft Visual Studio Team System Database.
+
+ Return
+ ------
+ list of str
+ paths
+ """
+ return [join(self.si.VSInstallDir, r'VSTSDB\Deploy')]
+
+ @property
+ def MSBuild(self):
+ """
+ Microsoft Build Engine.
+
+ Return
+ ------
+ list of str
+ paths
+ """
+ if self.vs_ver < 12.0:
+ return []
+ elif self.vs_ver < 15.0:
+ base_path = self.si.ProgramFilesx86
+ arch_subdir = self.pi.current_dir(hidex86=True)
+ else:
+ base_path = self.si.VSInstallDir
+ arch_subdir = ''
+
+ path = r'MSBuild\%0.1f\bin%s' % (self.vs_ver, arch_subdir)
+ build = [join(base_path, path)]
+
+ if self.vs_ver >= 15.0:
+ # Add Roslyn C# & Visual Basic Compiler
+ build += [join(base_path, path, 'Roslyn')]
+
+ return build
+
+ @property
+ def HTMLHelpWorkshop(self):
+ """
+ Microsoft HTML Help Workshop.
+
+ Return
+ ------
+ list of str
+ paths
+ """
+ if self.vs_ver < 11.0:
+ return []
+
+ return [join(self.si.ProgramFilesx86, 'HTML Help Workshop')]
+
+ @property
+ def UCRTLibraries(self):
+ """
+ Microsoft Universal C Runtime SDK Libraries.
+
+ Return
+ ------
+ list of str
+ paths
+ """
+ if self.vs_ver < 14.0:
+ return []
+
+ arch_subdir = self.pi.target_dir(x64=True)
+ lib = join(self.si.UniversalCRTSdkDir, 'lib')
+ ucrtver = self._ucrt_subdir
+ return [join(lib, '%sucrt%s' % (ucrtver, arch_subdir))]
+
+ @property
+ def UCRTIncludes(self):
+ """
+ Microsoft Universal C Runtime SDK Include.
+
+ Return
+ ------
+ list of str
+ paths
+ """
+ if self.vs_ver < 14.0:
+ return []
+
+ include = join(self.si.UniversalCRTSdkDir, 'include')
+ return [join(include, '%sucrt' % self._ucrt_subdir)]
+
+ @property
+ def _ucrt_subdir(self):
+ """
+ Microsoft Universal C Runtime SDK version subdir.
+
+ Return
+ ------
+ str
+ subdir
+ """
+ ucrtver = self.si.UniversalCRTSdkLastVersion
+ return ('%s\\' % ucrtver) if ucrtver else ''
+
+ @property
+ def FSharp(self):
+ """
+ Microsoft Visual F#.
+
+ Return
+ ------
+ list of str
+ paths
+ """
+ if 11.0 > self.vs_ver > 12.0:
+ return []
+
+ return [self.si.FSharpInstallDir]
+
+ @property
+ def VCRuntimeRedist(self):
+ """
+ Microsoft Visual C++ runtime redistributable dll.
+
+ Return
+ ------
+ str
+ path
+ """
+ vcruntime = 'vcruntime%d0.dll' % self.vc_ver
+ arch_subdir = self.pi.target_dir(x64=True).strip('\\')
+
+ # Installation prefixes candidates
+ prefixes = []
+ tools_path = self.si.VCInstallDir
+ redist_path = dirname(tools_path.replace(r'\Tools', r'\Redist'))
+ if isdir(redist_path):
+ # Redist version may not be exactly the same as tools
+ redist_path = join(redist_path, listdir(redist_path)[-1])
+ prefixes += [redist_path, join(redist_path, 'onecore')]
+
+ prefixes += [join(tools_path, 'redist')] # VS14 legacy path
+
+ # CRT directory
+ crt_dirs = ('Microsoft.VC%d.CRT' % (self.vc_ver * 10),
+ # Sometime store in directory with VS version instead of VC
+ 'Microsoft.VC%d.CRT' % (int(self.vs_ver) * 10))
+
+ # vcruntime path
+ for prefix, crt_dir in itertools.product(prefixes, crt_dirs):
+ path = join(prefix, arch_subdir, crt_dir, vcruntime)
+ if isfile(path):
+ return path
+
+ def return_env(self, exists=True):
+ """
+ Return environment dict.
+
+ Parameters
+ ----------
+ exists: bool
+ It True, only return existing paths.
+
+ Return
+ ------
+ dict
+ environment
+ """
+ env = dict(
+ include=self._build_paths('include',
+ [self.VCIncludes,
+ self.OSIncludes,
+ self.UCRTIncludes,
+ self.NetFxSDKIncludes],
+ exists),
+ lib=self._build_paths('lib',
+ [self.VCLibraries,
+ self.OSLibraries,
+ self.FxTools,
+ self.UCRTLibraries,
+ self.NetFxSDKLibraries],
+ exists),
+ libpath=self._build_paths('libpath',
+ [self.VCLibraries,
+ self.FxTools,
+ self.VCStoreRefs,
+ self.OSLibpath],
+ exists),
+ path=self._build_paths('path',
+ [self.VCTools,
+ self.VSTools,
+ self.VsTDb,
+ self.SdkTools,
+ self.SdkSetup,
+ self.FxTools,
+ self.MSBuild,
+ self.HTMLHelpWorkshop,
+ self.FSharp],
+ exists),
+ )
+ if self.vs_ver >= 14 and isfile(self.VCRuntimeRedist):
+ env['py_vcruntime_redist'] = self.VCRuntimeRedist
+ return env
+
+ def _build_paths(self, name, spec_path_lists, exists):
+ """
+ Given an environment variable name and specified paths,
+ return a pathsep-separated string of paths containing
+ unique, extant, directories from those paths and from
+ the environment variable. Raise an error if no paths
+ are resolved.
+
+ Parameters
+ ----------
+ name: str
+ Environment variable name
+ spec_path_lists: list of str
+ Paths
+ exists: bool
+ It True, only return existing paths.
+
+ Return
+ ------
+ str
+ Pathsep-separated paths
+ """
+ # flatten spec_path_lists
+ spec_paths = itertools.chain.from_iterable(spec_path_lists)
+ env_paths = environ.get(name, '').split(pathsep)
+ paths = itertools.chain(spec_paths, env_paths)
+ extant_paths = list(filter(isdir, paths)) if exists else paths
+ if not extant_paths:
+ msg = "%s environment variable is empty" % name.upper()
+ raise distutils.errors.DistutilsPlatformError(msg)
+ unique_paths = self._unique_everseen(extant_paths)
+ return pathsep.join(unique_paths)
+
+ # from Python docs
+ @staticmethod
+ def _unique_everseen(iterable, key=None):
+ """
+ List unique elements, preserving order.
+ Remember all elements ever seen.
+
+ _unique_everseen('AAAABBBCCDAABBB') --> A B C D
+
+ _unique_everseen('ABBCcAD', str.lower) --> A B C D
+ """
+ seen = set()
+ seen_add = seen.add
+ if key is None:
+ for element in itertools.filterfalse(seen.__contains__, iterable):
+ seen_add(element)
+ yield element
+ else:
+ for element in iterable:
+ k = key(element)
+ if k not in seen:
+ seen_add(k)
+ yield element
diff --git a/third_party/python/setuptools/setuptools/namespaces.py b/third_party/python/setuptools/setuptools/namespaces.py
new file mode 100644
index 0000000000..44939e1c6d
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/namespaces.py
@@ -0,0 +1,107 @@
+import os
+from distutils import log
+import itertools
+
+
+flatten = itertools.chain.from_iterable
+
+
+class Installer:
+
+ nspkg_ext = '-nspkg.pth'
+
+ def install_namespaces(self):
+ nsp = self._get_all_ns_packages()
+ if not nsp:
+ return
+ filename, ext = os.path.splitext(self._get_target())
+ filename += self.nspkg_ext
+ self.outputs.append(filename)
+ log.info("Installing %s", filename)
+ lines = map(self._gen_nspkg_line, nsp)
+
+ if self.dry_run:
+ # always generate the lines, even in dry run
+ list(lines)
+ return
+
+ with open(filename, 'wt') as f:
+ f.writelines(lines)
+
+ def uninstall_namespaces(self):
+ filename, ext = os.path.splitext(self._get_target())
+ filename += self.nspkg_ext
+ if not os.path.exists(filename):
+ return
+ log.info("Removing %s", filename)
+ os.remove(filename)
+
+ def _get_target(self):
+ return self.target
+
+ _nspkg_tmpl = (
+ "import sys, types, os",
+ "has_mfs = sys.version_info > (3, 5)",
+ "p = os.path.join(%(root)s, *%(pth)r)",
+ "importlib = has_mfs and __import__('importlib.util')",
+ "has_mfs and __import__('importlib.machinery')",
+ (
+ "m = has_mfs and "
+ "sys.modules.setdefault(%(pkg)r, "
+ "importlib.util.module_from_spec("
+ "importlib.machinery.PathFinder.find_spec(%(pkg)r, "
+ "[os.path.dirname(p)])))"
+ ),
+ (
+ "m = m or "
+ "sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))"
+ ),
+ "mp = (m or []) and m.__dict__.setdefault('__path__',[])",
+ "(p not in mp) and mp.append(p)",
+ )
+ "lines for the namespace installer"
+
+ _nspkg_tmpl_multi = (
+ 'm and setattr(sys.modules[%(parent)r], %(child)r, m)',
+ )
+ "additional line(s) when a parent package is indicated"
+
+ def _get_root(self):
+ return "sys._getframe(1).f_locals['sitedir']"
+
+ def _gen_nspkg_line(self, pkg):
+ pth = tuple(pkg.split('.'))
+ root = self._get_root()
+ tmpl_lines = self._nspkg_tmpl
+ parent, sep, child = pkg.rpartition('.')
+ if parent:
+ tmpl_lines += self._nspkg_tmpl_multi
+ return ';'.join(tmpl_lines) % locals() + '\n'
+
+ def _get_all_ns_packages(self):
+ """Return sorted list of all package namespaces"""
+ pkgs = self.distribution.namespace_packages or []
+ return sorted(flatten(map(self._pkg_names, pkgs)))
+
+ @staticmethod
+ def _pkg_names(pkg):
+ """
+ Given a namespace package, yield the components of that
+ package.
+
+ >>> names = Installer._pkg_names('a.b.c')
+ >>> set(names) == set(['a', 'a.b', 'a.b.c'])
+ True
+ """
+ parts = pkg.split('.')
+ while parts:
+ yield '.'.join(parts)
+ parts.pop()
+
+
+class DevelopInstaller(Installer):
+ def _get_root(self):
+ return repr(str(self.egg_path))
+
+ def _get_target(self):
+ return self.egg_link
diff --git a/third_party/python/setuptools/setuptools/package_index.py b/third_party/python/setuptools/setuptools/package_index.py
new file mode 100644
index 0000000000..3979b131b5
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/package_index.py
@@ -0,0 +1,1139 @@
+"""PyPI and direct package downloading"""
+import sys
+import os
+import re
+import io
+import shutil
+import socket
+import base64
+import hashlib
+import itertools
+import warnings
+import configparser
+import html
+import http.client
+import urllib.parse
+import urllib.request
+import urllib.error
+from functools import wraps
+
+import setuptools
+from pkg_resources import (
+ CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST,
+ Environment, find_distributions, safe_name, safe_version,
+ to_filename, Requirement, DEVELOP_DIST, EGG_DIST,
+)
+from setuptools import ssl_support
+from distutils import log
+from distutils.errors import DistutilsError
+from fnmatch import translate
+from setuptools.wheel import Wheel
+
+EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.+!]+)$')
+HREF = re.compile(r"""href\s*=\s*['"]?([^'"> ]+)""", re.I)
+PYPI_MD5 = re.compile(
+ r'<a href="([^"#]+)">([^<]+)</a>\n\s+\(<a (?:title="MD5 hash"\n\s+)'
+ r'href="[^?]+\?:action=show_md5&amp;digest=([0-9a-f]{32})">md5</a>\)'
+)
+URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match
+EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split()
+
+__all__ = [
+ 'PackageIndex', 'distros_for_url', 'parse_bdist_wininst',
+ 'interpret_distro_name',
+]
+
+_SOCKET_TIMEOUT = 15
+
+_tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}"
+user_agent = _tmpl.format(
+ py_major='{}.{}'.format(*sys.version_info), setuptools=setuptools)
+
+
+def parse_requirement_arg(spec):
+ try:
+ return Requirement.parse(spec)
+ except ValueError as e:
+ raise DistutilsError(
+ "Not a URL, existing file, or requirement spec: %r" % (spec,)
+ ) from e
+
+
+def parse_bdist_wininst(name):
+ """Return (base,pyversion) or (None,None) for possible .exe name"""
+
+ lower = name.lower()
+ base, py_ver, plat = None, None, None
+
+ if lower.endswith('.exe'):
+ if lower.endswith('.win32.exe'):
+ base = name[:-10]
+ plat = 'win32'
+ elif lower.startswith('.win32-py', -16):
+ py_ver = name[-7:-4]
+ base = name[:-16]
+ plat = 'win32'
+ elif lower.endswith('.win-amd64.exe'):
+ base = name[:-14]
+ plat = 'win-amd64'
+ elif lower.startswith('.win-amd64-py', -20):
+ py_ver = name[-7:-4]
+ base = name[:-20]
+ plat = 'win-amd64'
+ return base, py_ver, plat
+
+
+def egg_info_for_url(url):
+ parts = urllib.parse.urlparse(url)
+ scheme, server, path, parameters, query, fragment = parts
+ base = urllib.parse.unquote(path.split('/')[-1])
+ if server == 'sourceforge.net' and base == 'download': # XXX Yuck
+ base = urllib.parse.unquote(path.split('/')[-2])
+ if '#' in base:
+ base, fragment = base.split('#', 1)
+ return base, fragment
+
+
+def distros_for_url(url, metadata=None):
+ """Yield egg or source distribution objects that might be found at a URL"""
+ base, fragment = egg_info_for_url(url)
+ for dist in distros_for_location(url, base, metadata):
+ yield dist
+ if fragment:
+ match = EGG_FRAGMENT.match(fragment)
+ if match:
+ for dist in interpret_distro_name(
+ url, match.group(1), metadata, precedence=CHECKOUT_DIST
+ ):
+ yield dist
+
+
+def distros_for_location(location, basename, metadata=None):
+ """Yield egg or source distribution objects based on basename"""
+ if basename.endswith('.egg.zip'):
+ basename = basename[:-4] # strip the .zip
+ if basename.endswith('.egg') and '-' in basename:
+ # only one, unambiguous interpretation
+ return [Distribution.from_location(location, basename, metadata)]
+ if basename.endswith('.whl') and '-' in basename:
+ wheel = Wheel(basename)
+ if not wheel.is_compatible():
+ return []
+ return [Distribution(
+ location=location,
+ project_name=wheel.project_name,
+ version=wheel.version,
+ # Increase priority over eggs.
+ precedence=EGG_DIST + 1,
+ )]
+ if basename.endswith('.exe'):
+ win_base, py_ver, platform = parse_bdist_wininst(basename)
+ if win_base is not None:
+ return interpret_distro_name(
+ location, win_base, metadata, py_ver, BINARY_DIST, platform
+ )
+ # Try source distro extensions (.zip, .tgz, etc.)
+ #
+ for ext in EXTENSIONS:
+ if basename.endswith(ext):
+ basename = basename[:-len(ext)]
+ return interpret_distro_name(location, basename, metadata)
+ return [] # no extension matched
+
+
+def distros_for_filename(filename, metadata=None):
+ """Yield possible egg or source distribution objects based on a filename"""
+ return distros_for_location(
+ normalize_path(filename), os.path.basename(filename), metadata
+ )
+
+
+def interpret_distro_name(
+ location, basename, metadata, py_version=None, precedence=SOURCE_DIST,
+ platform=None
+):
+ """Generate alternative interpretations of a source distro name
+
+ Note: if `location` is a filesystem filename, you should call
+ ``pkg_resources.normalize_path()`` on it before passing it to this
+ routine!
+ """
+ # Generate alternative interpretations of a source distro name
+ # Because some packages are ambiguous as to name/versions split
+ # e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc.
+ # So, we generate each possible interepretation (e.g. "adns, python-1.1.0"
+ # "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice,
+ # the spurious interpretations should be ignored, because in the event
+ # there's also an "adns" package, the spurious "python-1.1.0" version will
+ # compare lower than any numeric version number, and is therefore unlikely
+ # to match a request for it. It's still a potential problem, though, and
+ # in the long run PyPI and the distutils should go for "safe" names and
+ # versions in distribution archive names (sdist and bdist).
+
+ parts = basename.split('-')
+ if not py_version and any(re.match(r'py\d\.\d$', p) for p in parts[2:]):
+ # it is a bdist_dumb, not an sdist -- bail out
+ return
+
+ for p in range(1, len(parts) + 1):
+ yield Distribution(
+ location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]),
+ py_version=py_version, precedence=precedence,
+ platform=platform
+ )
+
+
+# From Python 2.7 docs
+def unique_everseen(iterable, key=None):
+ "List unique elements, preserving order. Remember all elements ever seen."
+ # unique_everseen('AAAABBBCCDAABBB') --> A B C D
+ # unique_everseen('ABBCcAD', str.lower) --> A B C D
+ seen = set()
+ seen_add = seen.add
+ if key is None:
+ for element in itertools.filterfalse(seen.__contains__, iterable):
+ seen_add(element)
+ yield element
+ else:
+ for element in iterable:
+ k = key(element)
+ if k not in seen:
+ seen_add(k)
+ yield element
+
+
+def unique_values(func):
+ """
+ Wrap a function returning an iterable such that the resulting iterable
+ only ever yields unique items.
+ """
+
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ return unique_everseen(func(*args, **kwargs))
+
+ return wrapper
+
+
+REL = re.compile(r"""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I)
+# this line is here to fix emacs' cruddy broken syntax highlighting
+
+
+@unique_values
+def find_external_links(url, page):
+ """Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
+
+ for match in REL.finditer(page):
+ tag, rel = match.groups()
+ rels = set(map(str.strip, rel.lower().split(',')))
+ if 'homepage' in rels or 'download' in rels:
+ for match in HREF.finditer(tag):
+ yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
+
+ for tag in ("<th>Home Page", "<th>Download URL"):
+ pos = page.find(tag)
+ if pos != -1:
+ match = HREF.search(page, pos)
+ if match:
+ yield urllib.parse.urljoin(url, htmldecode(match.group(1)))
+
+
+class ContentChecker:
+ """
+ A null content checker that defines the interface for checking content
+ """
+
+ def feed(self, block):
+ """
+ Feed a block of data to the hash.
+ """
+ return
+
+ def is_valid(self):
+ """
+ Check the hash. Return False if validation fails.
+ """
+ return True
+
+ def report(self, reporter, template):
+ """
+ Call reporter with information about the checker (hash name)
+ substituted into the template.
+ """
+ return
+
+
+class HashChecker(ContentChecker):
+ pattern = re.compile(
+ r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)='
+ r'(?P<expected>[a-f0-9]+)'
+ )
+
+ def __init__(self, hash_name, expected):
+ self.hash_name = hash_name
+ self.hash = hashlib.new(hash_name)
+ self.expected = expected
+
+ @classmethod
+ def from_url(cls, url):
+ "Construct a (possibly null) ContentChecker from a URL"
+ fragment = urllib.parse.urlparse(url)[-1]
+ if not fragment:
+ return ContentChecker()
+ match = cls.pattern.search(fragment)
+ if not match:
+ return ContentChecker()
+ return cls(**match.groupdict())
+
+ def feed(self, block):
+ self.hash.update(block)
+
+ def is_valid(self):
+ return self.hash.hexdigest() == self.expected
+
+ def report(self, reporter, template):
+ msg = template % self.hash_name
+ return reporter(msg)
+
+
+class PackageIndex(Environment):
+ """A distribution index that scans web pages for download URLs"""
+
+ def __init__(
+ self, index_url="https://pypi.org/simple/", hosts=('*',),
+ ca_bundle=None, verify_ssl=True, *args, **kw
+ ):
+ Environment.__init__(self, *args, **kw)
+ self.index_url = index_url + "/" [:not index_url.endswith('/')]
+ self.scanned_urls = {}
+ self.fetched_urls = {}
+ self.package_pages = {}
+ self.allows = re.compile('|'.join(map(translate, hosts))).match
+ self.to_scan = []
+ use_ssl = (
+ verify_ssl
+ and ssl_support.is_available
+ and (ca_bundle or ssl_support.find_ca_bundle())
+ )
+ if use_ssl:
+ self.opener = ssl_support.opener_for(ca_bundle)
+ else:
+ self.opener = urllib.request.urlopen
+
+ def process_url(self, url, retrieve=False):
+ """Evaluate a URL as a possible download, and maybe retrieve it"""
+ if url in self.scanned_urls and not retrieve:
+ return
+ self.scanned_urls[url] = True
+ if not URL_SCHEME(url):
+ self.process_filename(url)
+ return
+ else:
+ dists = list(distros_for_url(url))
+ if dists:
+ if not self.url_ok(url):
+ return
+ self.debug("Found link: %s", url)
+
+ if dists or not retrieve or url in self.fetched_urls:
+ list(map(self.add, dists))
+ return # don't need the actual page
+
+ if not self.url_ok(url):
+ self.fetched_urls[url] = True
+ return
+
+ self.info("Reading %s", url)
+ self.fetched_urls[url] = True # prevent multiple fetch attempts
+ tmpl = "Download error on %s: %%s -- Some packages may not be found!"
+ f = self.open_url(url, tmpl % url)
+ if f is None:
+ return
+ if isinstance(f, urllib.error.HTTPError) and f.code == 401:
+ self.info("Authentication error: %s" % f.msg)
+ self.fetched_urls[f.url] = True
+ if 'html' not in f.headers.get('content-type', '').lower():
+ f.close() # not html, we can't process it
+ return
+
+ base = f.url # handle redirects
+ page = f.read()
+ if not isinstance(page, str):
+ # In Python 3 and got bytes but want str.
+ if isinstance(f, urllib.error.HTTPError):
+ # Errors have no charset, assume latin1:
+ charset = 'latin-1'
+ else:
+ charset = f.headers.get_param('charset') or 'latin-1'
+ page = page.decode(charset, "ignore")
+ f.close()
+ for match in HREF.finditer(page):
+ link = urllib.parse.urljoin(base, htmldecode(match.group(1)))
+ self.process_url(link)
+ if url.startswith(self.index_url) and getattr(f, 'code', None) != 404:
+ page = self.process_index(url, page)
+
+ def process_filename(self, fn, nested=False):
+ # process filenames or directories
+ if not os.path.exists(fn):
+ self.warn("Not found: %s", fn)
+ return
+
+ if os.path.isdir(fn) and not nested:
+ path = os.path.realpath(fn)
+ for item in os.listdir(path):
+ self.process_filename(os.path.join(path, item), True)
+
+ dists = distros_for_filename(fn)
+ if dists:
+ self.debug("Found: %s", fn)
+ list(map(self.add, dists))
+
+ def url_ok(self, url, fatal=False):
+ s = URL_SCHEME(url)
+ is_file = s and s.group(1).lower() == 'file'
+ if is_file or self.allows(urllib.parse.urlparse(url)[1]):
+ return True
+ msg = (
+ "\nNote: Bypassing %s (disallowed host; see "
+ "http://bit.ly/2hrImnY for details).\n")
+ if fatal:
+ raise DistutilsError(msg % url)
+ else:
+ self.warn(msg, url)
+
+ def scan_egg_links(self, search_path):
+ dirs = filter(os.path.isdir, search_path)
+ egg_links = (
+ (path, entry)
+ for path in dirs
+ for entry in os.listdir(path)
+ if entry.endswith('.egg-link')
+ )
+ list(itertools.starmap(self.scan_egg_link, egg_links))
+
+ def scan_egg_link(self, path, entry):
+ with open(os.path.join(path, entry)) as raw_lines:
+ # filter non-empty lines
+ lines = list(filter(None, map(str.strip, raw_lines)))
+
+ if len(lines) != 2:
+ # format is not recognized; punt
+ return
+
+ egg_path, setup_path = lines
+
+ for dist in find_distributions(os.path.join(path, egg_path)):
+ dist.location = os.path.join(path, *lines)
+ dist.precedence = SOURCE_DIST
+ self.add(dist)
+
+ def process_index(self, url, page):
+ """Process the contents of a PyPI page"""
+
+ def scan(link):
+ # Process a URL to see if it's for a package page
+ if link.startswith(self.index_url):
+ parts = list(map(
+ urllib.parse.unquote, link[len(self.index_url):].split('/')
+ ))
+ if len(parts) == 2 and '#' not in parts[1]:
+ # it's a package page, sanitize and index it
+ pkg = safe_name(parts[0])
+ ver = safe_version(parts[1])
+ self.package_pages.setdefault(pkg.lower(), {})[link] = True
+ return to_filename(pkg), to_filename(ver)
+ return None, None
+
+ # process an index page into the package-page index
+ for match in HREF.finditer(page):
+ try:
+ scan(urllib.parse.urljoin(url, htmldecode(match.group(1))))
+ except ValueError:
+ pass
+
+ pkg, ver = scan(url) # ensure this page is in the page index
+ if pkg:
+ # process individual package page
+ for new_url in find_external_links(url, page):
+ # Process the found URL
+ base, frag = egg_info_for_url(new_url)
+ if base.endswith('.py') and not frag:
+ if ver:
+ new_url += '#egg=%s-%s' % (pkg, ver)
+ else:
+ self.need_version_info(url)
+ self.scan_url(new_url)
+
+ return PYPI_MD5.sub(
+ lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page
+ )
+ else:
+ return "" # no sense double-scanning non-package pages
+
+ def need_version_info(self, url):
+ self.scan_all(
+ "Page at %s links to .py file(s) without version info; an index "
+ "scan is required.", url
+ )
+
+ def scan_all(self, msg=None, *args):
+ if self.index_url not in self.fetched_urls:
+ if msg:
+ self.warn(msg, *args)
+ self.info(
+ "Scanning index of all packages (this may take a while)"
+ )
+ self.scan_url(self.index_url)
+
+ def find_packages(self, requirement):
+ self.scan_url(self.index_url + requirement.unsafe_name + '/')
+
+ if not self.package_pages.get(requirement.key):
+ # Fall back to safe version of the name
+ self.scan_url(self.index_url + requirement.project_name + '/')
+
+ if not self.package_pages.get(requirement.key):
+ # We couldn't find the target package, so search the index page too
+ self.not_found_in_index(requirement)
+
+ for url in list(self.package_pages.get(requirement.key, ())):
+ # scan each page that might be related to the desired package
+ self.scan_url(url)
+
+ def obtain(self, requirement, installer=None):
+ self.prescan()
+ self.find_packages(requirement)
+ for dist in self[requirement.key]:
+ if dist in requirement:
+ return dist
+ self.debug("%s does not match %s", requirement, dist)
+ return super(PackageIndex, self).obtain(requirement, installer)
+
+ def check_hash(self, checker, filename, tfp):
+ """
+ checker is a ContentChecker
+ """
+ checker.report(
+ self.debug,
+ "Validating %%s checksum for %s" % filename)
+ if not checker.is_valid():
+ tfp.close()
+ os.unlink(filename)
+ raise DistutilsError(
+ "%s validation failed for %s; "
+ "possible download problem?"
+ % (checker.hash.name, os.path.basename(filename))
+ )
+
+ def add_find_links(self, urls):
+ """Add `urls` to the list that will be prescanned for searches"""
+ for url in urls:
+ if (
+ self.to_scan is None # if we have already "gone online"
+ or not URL_SCHEME(url) # or it's a local file/directory
+ or url.startswith('file:')
+ or list(distros_for_url(url)) # or a direct package link
+ ):
+ # then go ahead and process it now
+ self.scan_url(url)
+ else:
+ # otherwise, defer retrieval till later
+ self.to_scan.append(url)
+
+ def prescan(self):
+ """Scan urls scheduled for prescanning (e.g. --find-links)"""
+ if self.to_scan:
+ list(map(self.scan_url, self.to_scan))
+ self.to_scan = None # from now on, go ahead and process immediately
+
+ def not_found_in_index(self, requirement):
+ if self[requirement.key]: # we've seen at least one distro
+ meth, msg = self.info, "Couldn't retrieve index page for %r"
+ else: # no distros seen for this name, might be misspelled
+ meth, msg = (
+ self.warn,
+ "Couldn't find index page for %r (maybe misspelled?)")
+ meth(msg, requirement.unsafe_name)
+ self.scan_all()
+
+ def download(self, spec, tmpdir):
+ """Locate and/or download `spec` to `tmpdir`, returning a local path
+
+ `spec` may be a ``Requirement`` object, or a string containing a URL,
+ an existing local filename, or a project/version requirement spec
+ (i.e. the string form of a ``Requirement`` object). If it is the URL
+ of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one
+ that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is
+ automatically created alongside the downloaded file.
+
+ If `spec` is a ``Requirement`` object or a string containing a
+ project/version requirement spec, this method returns the location of
+ a matching distribution (possibly after downloading it to `tmpdir`).
+ If `spec` is a locally existing file or directory name, it is simply
+ returned unchanged. If `spec` is a URL, it is downloaded to a subpath
+ of `tmpdir`, and the local filename is returned. Various errors may be
+ raised if a problem occurs during downloading.
+ """
+ if not isinstance(spec, Requirement):
+ scheme = URL_SCHEME(spec)
+ if scheme:
+ # It's a url, download it to tmpdir
+ found = self._download_url(scheme.group(1), spec, tmpdir)
+ base, fragment = egg_info_for_url(spec)
+ if base.endswith('.py'):
+ found = self.gen_setup(found, fragment, tmpdir)
+ return found
+ elif os.path.exists(spec):
+ # Existing file or directory, just return it
+ return spec
+ else:
+ spec = parse_requirement_arg(spec)
+ return getattr(self.fetch_distribution(spec, tmpdir), 'location', None)
+
+ def fetch_distribution(
+ self, requirement, tmpdir, force_scan=False, source=False,
+ develop_ok=False, local_index=None):
+ """Obtain a distribution suitable for fulfilling `requirement`
+
+ `requirement` must be a ``pkg_resources.Requirement`` instance.
+ If necessary, or if the `force_scan` flag is set, the requirement is
+ searched for in the (online) package index as well as the locally
+ installed packages. If a distribution matching `requirement` is found,
+ the returned distribution's ``location`` is the value you would have
+ gotten from calling the ``download()`` method with the matching
+ distribution's URL or filename. If no matching distribution is found,
+ ``None`` is returned.
+
+ If the `source` flag is set, only source distributions and source
+ checkout links will be considered. Unless the `develop_ok` flag is
+ set, development and system eggs (i.e., those using the ``.egg-info``
+ format) will be ignored.
+ """
+ # process a Requirement
+ self.info("Searching for %s", requirement)
+ skipped = {}
+ dist = None
+
+ def find(req, env=None):
+ if env is None:
+ env = self
+ # Find a matching distribution; may be called more than once
+
+ for dist in env[req.key]:
+
+ if dist.precedence == DEVELOP_DIST and not develop_ok:
+ if dist not in skipped:
+ self.warn(
+ "Skipping development or system egg: %s", dist,
+ )
+ skipped[dist] = 1
+ continue
+
+ test = (
+ dist in req
+ and (dist.precedence <= SOURCE_DIST or not source)
+ )
+ if test:
+ loc = self.download(dist.location, tmpdir)
+ dist.download_location = loc
+ if os.path.exists(dist.download_location):
+ return dist
+
+ if force_scan:
+ self.prescan()
+ self.find_packages(requirement)
+ dist = find(requirement)
+
+ if not dist and local_index is not None:
+ dist = find(requirement, local_index)
+
+ if dist is None:
+ if self.to_scan is not None:
+ self.prescan()
+ dist = find(requirement)
+
+ if dist is None and not force_scan:
+ self.find_packages(requirement)
+ dist = find(requirement)
+
+ if dist is None:
+ self.warn(
+ "No local packages or working download links found for %s%s",
+ (source and "a source distribution of " or ""),
+ requirement,
+ )
+ else:
+ self.info("Best match: %s", dist)
+ return dist.clone(location=dist.download_location)
+
+ def fetch(self, requirement, tmpdir, force_scan=False, source=False):
+ """Obtain a file suitable for fulfilling `requirement`
+
+ DEPRECATED; use the ``fetch_distribution()`` method now instead. For
+ backward compatibility, this routine is identical but returns the
+ ``location`` of the downloaded distribution instead of a distribution
+ object.
+ """
+ dist = self.fetch_distribution(requirement, tmpdir, force_scan, source)
+ if dist is not None:
+ return dist.location
+ return None
+
+ def gen_setup(self, filename, fragment, tmpdir):
+ match = EGG_FRAGMENT.match(fragment)
+ dists = match and [
+ d for d in
+ interpret_distro_name(filename, match.group(1), None) if d.version
+ ] or []
+
+ if len(dists) == 1: # unambiguous ``#egg`` fragment
+ basename = os.path.basename(filename)
+
+ # Make sure the file has been downloaded to the temp dir.
+ if os.path.dirname(filename) != tmpdir:
+ dst = os.path.join(tmpdir, basename)
+ from setuptools.command.easy_install import samefile
+ if not samefile(filename, dst):
+ shutil.copy2(filename, dst)
+ filename = dst
+
+ with open(os.path.join(tmpdir, 'setup.py'), 'w') as file:
+ file.write(
+ "from setuptools import setup\n"
+ "setup(name=%r, version=%r, py_modules=[%r])\n"
+ % (
+ dists[0].project_name, dists[0].version,
+ os.path.splitext(basename)[0]
+ )
+ )
+ return filename
+
+ elif match:
+ raise DistutilsError(
+ "Can't unambiguously interpret project/version identifier %r; "
+ "any dashes in the name or version should be escaped using "
+ "underscores. %r" % (fragment, dists)
+ )
+ else:
+ raise DistutilsError(
+ "Can't process plain .py files without an '#egg=name-version'"
+ " suffix to enable automatic setup script generation."
+ )
+
+ dl_blocksize = 8192
+
+ def _download_to(self, url, filename):
+ self.info("Downloading %s", url)
+ # Download the file
+ fp = None
+ try:
+ checker = HashChecker.from_url(url)
+ fp = self.open_url(url)
+ if isinstance(fp, urllib.error.HTTPError):
+ raise DistutilsError(
+ "Can't download %s: %s %s" % (url, fp.code, fp.msg)
+ )
+ headers = fp.info()
+ blocknum = 0
+ bs = self.dl_blocksize
+ size = -1
+ if "content-length" in headers:
+ # Some servers return multiple Content-Length headers :(
+ sizes = headers.get_all('Content-Length')
+ size = max(map(int, sizes))
+ self.reporthook(url, filename, blocknum, bs, size)
+ with open(filename, 'wb') as tfp:
+ while True:
+ block = fp.read(bs)
+ if block:
+ checker.feed(block)
+ tfp.write(block)
+ blocknum += 1
+ self.reporthook(url, filename, blocknum, bs, size)
+ else:
+ break
+ self.check_hash(checker, filename, tfp)
+ return headers
+ finally:
+ if fp:
+ fp.close()
+
+ def reporthook(self, url, filename, blocknum, blksize, size):
+ pass # no-op
+
+ def open_url(self, url, warning=None):
+ if url.startswith('file:'):
+ return local_open(url)
+ try:
+ return open_with_auth(url, self.opener)
+ except (ValueError, http.client.InvalidURL) as v:
+ msg = ' '.join([str(arg) for arg in v.args])
+ if warning:
+ self.warn(warning, msg)
+ else:
+ raise DistutilsError('%s %s' % (url, msg)) from v
+ except urllib.error.HTTPError as v:
+ return v
+ except urllib.error.URLError as v:
+ if warning:
+ self.warn(warning, v.reason)
+ else:
+ raise DistutilsError("Download error for %s: %s"
+ % (url, v.reason)) from v
+ except http.client.BadStatusLine as v:
+ if warning:
+ self.warn(warning, v.line)
+ else:
+ raise DistutilsError(
+ '%s returned a bad status line. The server might be '
+ 'down, %s' %
+ (url, v.line)
+ ) from v
+ except (http.client.HTTPException, socket.error) as v:
+ if warning:
+ self.warn(warning, v)
+ else:
+ raise DistutilsError("Download error for %s: %s"
+ % (url, v)) from v
+
+ def _download_url(self, scheme, url, tmpdir):
+ # Determine download filename
+ #
+ name, fragment = egg_info_for_url(url)
+ if name:
+ while '..' in name:
+ name = name.replace('..', '.').replace('\\', '_')
+ else:
+ name = "__downloaded__" # default if URL has no path contents
+
+ if name.endswith('.egg.zip'):
+ name = name[:-4] # strip the extra .zip before download
+
+ filename = os.path.join(tmpdir, name)
+
+ # Download the file
+ #
+ if scheme == 'svn' or scheme.startswith('svn+'):
+ return self._download_svn(url, filename)
+ elif scheme == 'git' or scheme.startswith('git+'):
+ return self._download_git(url, filename)
+ elif scheme.startswith('hg+'):
+ return self._download_hg(url, filename)
+ elif scheme == 'file':
+ return urllib.request.url2pathname(urllib.parse.urlparse(url)[2])
+ else:
+ self.url_ok(url, True) # raises error if not allowed
+ return self._attempt_download(url, filename)
+
+ def scan_url(self, url):
+ self.process_url(url, True)
+
+ def _attempt_download(self, url, filename):
+ headers = self._download_to(url, filename)
+ if 'html' in headers.get('content-type', '').lower():
+ return self._download_html(url, headers, filename)
+ else:
+ return filename
+
+ def _download_html(self, url, headers, filename):
+ file = open(filename)
+ for line in file:
+ if line.strip():
+ # Check for a subversion index page
+ if re.search(r'<title>([^- ]+ - )?Revision \d+:', line):
+ # it's a subversion index page:
+ file.close()
+ os.unlink(filename)
+ return self._download_svn(url, filename)
+ break # not an index page
+ file.close()
+ os.unlink(filename)
+ raise DistutilsError("Unexpected HTML page found at " + url)
+
+ def _download_svn(self, url, filename):
+ warnings.warn("SVN download support is deprecated", UserWarning)
+ url = url.split('#', 1)[0] # remove any fragment for svn's sake
+ creds = ''
+ if url.lower().startswith('svn:') and '@' in url:
+ scheme, netloc, path, p, q, f = urllib.parse.urlparse(url)
+ if not netloc and path.startswith('//') and '/' in path[2:]:
+ netloc, path = path[2:].split('/', 1)
+ auth, host = _splituser(netloc)
+ if auth:
+ if ':' in auth:
+ user, pw = auth.split(':', 1)
+ creds = " --username=%s --password=%s" % (user, pw)
+ else:
+ creds = " --username=" + auth
+ netloc = host
+ parts = scheme, netloc, url, p, q, f
+ url = urllib.parse.urlunparse(parts)
+ self.info("Doing subversion checkout from %s to %s", url, filename)
+ os.system("svn checkout%s -q %s %s" % (creds, url, filename))
+ return filename
+
+ @staticmethod
+ def _vcs_split_rev_from_url(url, pop_prefix=False):
+ scheme, netloc, path, query, frag = urllib.parse.urlsplit(url)
+
+ scheme = scheme.split('+', 1)[-1]
+
+ # Some fragment identification fails
+ path = path.split('#', 1)[0]
+
+ rev = None
+ if '@' in path:
+ path, rev = path.rsplit('@', 1)
+
+ # Also, discard fragment
+ url = urllib.parse.urlunsplit((scheme, netloc, path, query, ''))
+
+ return url, rev
+
+ def _download_git(self, url, filename):
+ filename = filename.split('#', 1)[0]
+ url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
+
+ self.info("Doing git clone from %s to %s", url, filename)
+ os.system("git clone --quiet %s %s" % (url, filename))
+
+ if rev is not None:
+ self.info("Checking out %s", rev)
+ os.system("git -C %s checkout --quiet %s" % (
+ filename,
+ rev,
+ ))
+
+ return filename
+
+ def _download_hg(self, url, filename):
+ filename = filename.split('#', 1)[0]
+ url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True)
+
+ self.info("Doing hg clone from %s to %s", url, filename)
+ os.system("hg clone --quiet %s %s" % (url, filename))
+
+ if rev is not None:
+ self.info("Updating to %s", rev)
+ os.system("hg --cwd %s up -C -r %s -q" % (
+ filename,
+ rev,
+ ))
+
+ return filename
+
+ def debug(self, msg, *args):
+ log.debug(msg, *args)
+
+ def info(self, msg, *args):
+ log.info(msg, *args)
+
+ def warn(self, msg, *args):
+ log.warn(msg, *args)
+
+
+# This pattern matches a character entity reference (a decimal numeric
+# references, a hexadecimal numeric reference, or a named reference).
+entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub
+
+
+def decode_entity(match):
+ what = match.group(0)
+ return html.unescape(what)
+
+
+def htmldecode(text):
+ """
+ Decode HTML entities in the given text.
+
+ >>> htmldecode(
+ ... 'https://../package_name-0.1.2.tar.gz'
+ ... '?tokena=A&amp;tokenb=B">package_name-0.1.2.tar.gz')
+ 'https://../package_name-0.1.2.tar.gz?tokena=A&tokenb=B">package_name-0.1.2.tar.gz'
+ """
+ return entity_sub(decode_entity, text)
+
+
+def socket_timeout(timeout=15):
+ def _socket_timeout(func):
+ def _socket_timeout(*args, **kwargs):
+ old_timeout = socket.getdefaulttimeout()
+ socket.setdefaulttimeout(timeout)
+ try:
+ return func(*args, **kwargs)
+ finally:
+ socket.setdefaulttimeout(old_timeout)
+
+ return _socket_timeout
+
+ return _socket_timeout
+
+
+def _encode_auth(auth):
+ """
+ Encode auth from a URL suitable for an HTTP header.
+ >>> str(_encode_auth('username%3Apassword'))
+ 'dXNlcm5hbWU6cGFzc3dvcmQ='
+
+ Long auth strings should not cause a newline to be inserted.
+ >>> long_auth = 'username:' + 'password'*10
+ >>> chr(10) in str(_encode_auth(long_auth))
+ False
+ """
+ auth_s = urllib.parse.unquote(auth)
+ # convert to bytes
+ auth_bytes = auth_s.encode()
+ encoded_bytes = base64.b64encode(auth_bytes)
+ # convert back to a string
+ encoded = encoded_bytes.decode()
+ # strip the trailing carriage return
+ return encoded.replace('\n', '')
+
+
+class Credential:
+ """
+ A username/password pair. Use like a namedtuple.
+ """
+
+ def __init__(self, username, password):
+ self.username = username
+ self.password = password
+
+ def __iter__(self):
+ yield self.username
+ yield self.password
+
+ def __str__(self):
+ return '%(username)s:%(password)s' % vars(self)
+
+
+class PyPIConfig(configparser.RawConfigParser):
+ def __init__(self):
+ """
+ Load from ~/.pypirc
+ """
+ defaults = dict.fromkeys(['username', 'password', 'repository'], '')
+ configparser.RawConfigParser.__init__(self, defaults)
+
+ rc = os.path.join(os.path.expanduser('~'), '.pypirc')
+ if os.path.exists(rc):
+ self.read(rc)
+
+ @property
+ def creds_by_repository(self):
+ sections_with_repositories = [
+ section for section in self.sections()
+ if self.get(section, 'repository').strip()
+ ]
+
+ return dict(map(self._get_repo_cred, sections_with_repositories))
+
+ def _get_repo_cred(self, section):
+ repo = self.get(section, 'repository').strip()
+ return repo, Credential(
+ self.get(section, 'username').strip(),
+ self.get(section, 'password').strip(),
+ )
+
+ def find_credential(self, url):
+ """
+ If the URL indicated appears to be a repository defined in this
+ config, return the credential for that repository.
+ """
+ for repository, cred in self.creds_by_repository.items():
+ if url.startswith(repository):
+ return cred
+
+
+def open_with_auth(url, opener=urllib.request.urlopen):
+ """Open a urllib2 request, handling HTTP authentication"""
+
+ parsed = urllib.parse.urlparse(url)
+ scheme, netloc, path, params, query, frag = parsed
+
+ # Double scheme does not raise on macOS as revealed by a
+ # failing test. We would expect "nonnumeric port". Refs #20.
+ if netloc.endswith(':'):
+ raise http.client.InvalidURL("nonnumeric port: ''")
+
+ if scheme in ('http', 'https'):
+ auth, address = _splituser(netloc)
+ else:
+ auth = None
+
+ if not auth:
+ cred = PyPIConfig().find_credential(url)
+ if cred:
+ auth = str(cred)
+ info = cred.username, url
+ log.info('Authenticating as %s for %s (from .pypirc)', *info)
+
+ if auth:
+ auth = "Basic " + _encode_auth(auth)
+ parts = scheme, address, path, params, query, frag
+ new_url = urllib.parse.urlunparse(parts)
+ request = urllib.request.Request(new_url)
+ request.add_header("Authorization", auth)
+ else:
+ request = urllib.request.Request(url)
+
+ request.add_header('User-Agent', user_agent)
+ fp = opener(request)
+
+ if auth:
+ # Put authentication info back into request URL if same host,
+ # so that links found on the page will work
+ s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url)
+ if s2 == scheme and h2 == address:
+ parts = s2, netloc, path2, param2, query2, frag2
+ fp.url = urllib.parse.urlunparse(parts)
+
+ return fp
+
+
+# copy of urllib.parse._splituser from Python 3.8
+def _splituser(host):
+ """splituser('user[:passwd]@host[:port]')
+ --> 'user[:passwd]', 'host[:port]'."""
+ user, delim, host = host.rpartition('@')
+ return (user if delim else None), host
+
+
+# adding a timeout to avoid freezing package_index
+open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth)
+
+
+def fix_sf_url(url):
+ return url # backward compatibility
+
+
+def local_open(url):
+ """Read a local path, with special support for directories"""
+ scheme, server, path, param, query, frag = urllib.parse.urlparse(url)
+ filename = urllib.request.url2pathname(path)
+ if os.path.isfile(filename):
+ return urllib.request.urlopen(url)
+ elif path.endswith('/') and os.path.isdir(filename):
+ files = []
+ for f in os.listdir(filename):
+ filepath = os.path.join(filename, f)
+ if f == 'index.html':
+ with open(filepath, 'r') as fp:
+ body = fp.read()
+ break
+ elif os.path.isdir(filepath):
+ f += '/'
+ files.append('<a href="{name}">{name}</a>'.format(name=f))
+ else:
+ tmpl = (
+ "<html><head><title>{url}</title>"
+ "</head><body>{files}</body></html>")
+ body = tmpl.format(url=url, files='\n'.join(files))
+ status, message = 200, "OK"
+ else:
+ status, message, body = 404, "Path not found", "Not found"
+
+ headers = {'content-type': 'text/html'}
+ body_stream = io.StringIO(body)
+ return urllib.error.HTTPError(url, status, message, headers, body_stream)
diff --git a/third_party/python/setuptools/setuptools/py34compat.py b/third_party/python/setuptools/setuptools/py34compat.py
new file mode 100644
index 0000000000..3ad917222a
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/py34compat.py
@@ -0,0 +1,13 @@
+import importlib
+
+try:
+ import importlib.util
+except ImportError:
+ pass
+
+
+try:
+ module_from_spec = importlib.util.module_from_spec
+except AttributeError:
+ def module_from_spec(spec):
+ return spec.loader.load_module(spec.name)
diff --git a/third_party/python/setuptools/setuptools/sandbox.py b/third_party/python/setuptools/setuptools/sandbox.py
new file mode 100644
index 0000000000..91b960d899
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/sandbox.py
@@ -0,0 +1,496 @@
+import os
+import sys
+import tempfile
+import operator
+import functools
+import itertools
+import re
+import contextlib
+import pickle
+import textwrap
+import builtins
+
+import pkg_resources
+from distutils.errors import DistutilsError
+from pkg_resources import working_set
+
+if sys.platform.startswith('java'):
+ import org.python.modules.posix.PosixModule as _os
+else:
+ _os = sys.modules[os.name]
+try:
+ _file = file
+except NameError:
+ _file = None
+_open = open
+
+
+__all__ = [
+ "AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
+]
+
+
+def _execfile(filename, globals, locals=None):
+ """
+ Python 3 implementation of execfile.
+ """
+ mode = 'rb'
+ with open(filename, mode) as stream:
+ script = stream.read()
+ if locals is None:
+ locals = globals
+ code = compile(script, filename, 'exec')
+ exec(code, globals, locals)
+
+
+@contextlib.contextmanager
+def save_argv(repl=None):
+ saved = sys.argv[:]
+ if repl is not None:
+ sys.argv[:] = repl
+ try:
+ yield saved
+ finally:
+ sys.argv[:] = saved
+
+
+@contextlib.contextmanager
+def save_path():
+ saved = sys.path[:]
+ try:
+ yield saved
+ finally:
+ sys.path[:] = saved
+
+
+@contextlib.contextmanager
+def override_temp(replacement):
+ """
+ Monkey-patch tempfile.tempdir with replacement, ensuring it exists
+ """
+ os.makedirs(replacement, exist_ok=True)
+
+ saved = tempfile.tempdir
+
+ tempfile.tempdir = replacement
+
+ try:
+ yield
+ finally:
+ tempfile.tempdir = saved
+
+
+@contextlib.contextmanager
+def pushd(target):
+ saved = os.getcwd()
+ os.chdir(target)
+ try:
+ yield saved
+ finally:
+ os.chdir(saved)
+
+
+class UnpickleableException(Exception):
+ """
+ An exception representing another Exception that could not be pickled.
+ """
+
+ @staticmethod
+ def dump(type, exc):
+ """
+ Always return a dumped (pickled) type and exc. If exc can't be pickled,
+ wrap it in UnpickleableException first.
+ """
+ try:
+ return pickle.dumps(type), pickle.dumps(exc)
+ except Exception:
+ # get UnpickleableException inside the sandbox
+ from setuptools.sandbox import UnpickleableException as cls
+ return cls.dump(cls, cls(repr(exc)))
+
+
+class ExceptionSaver:
+ """
+ A Context Manager that will save an exception, serialized, and restore it
+ later.
+ """
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, exc, tb):
+ if not exc:
+ return
+
+ # dump the exception
+ self._saved = UnpickleableException.dump(type, exc)
+ self._tb = tb
+
+ # suppress the exception
+ return True
+
+ def resume(self):
+ "restore and re-raise any exception"
+
+ if '_saved' not in vars(self):
+ return
+
+ type, exc = map(pickle.loads, self._saved)
+ raise exc.with_traceback(self._tb)
+
+
+@contextlib.contextmanager
+def save_modules():
+ """
+ Context in which imported modules are saved.
+
+ Translates exceptions internal to the context into the equivalent exception
+ outside the context.
+ """
+ saved = sys.modules.copy()
+ with ExceptionSaver() as saved_exc:
+ yield saved
+
+ sys.modules.update(saved)
+ # remove any modules imported since
+ del_modules = (
+ mod_name for mod_name in sys.modules
+ if mod_name not in saved
+ # exclude any encodings modules. See #285
+ and not mod_name.startswith('encodings.')
+ )
+ _clear_modules(del_modules)
+
+ saved_exc.resume()
+
+
+def _clear_modules(module_names):
+ for mod_name in list(module_names):
+ del sys.modules[mod_name]
+
+
+@contextlib.contextmanager
+def save_pkg_resources_state():
+ saved = pkg_resources.__getstate__()
+ try:
+ yield saved
+ finally:
+ pkg_resources.__setstate__(saved)
+
+
+@contextlib.contextmanager
+def setup_context(setup_dir):
+ temp_dir = os.path.join(setup_dir, 'temp')
+ with save_pkg_resources_state():
+ with save_modules():
+ with save_path():
+ hide_setuptools()
+ with save_argv():
+ with override_temp(temp_dir):
+ with pushd(setup_dir):
+ # ensure setuptools commands are available
+ __import__('setuptools')
+ yield
+
+
+_MODULES_TO_HIDE = {
+ 'setuptools',
+ 'distutils',
+ 'pkg_resources',
+ 'Cython',
+ '_distutils_hack',
+}
+
+
+def _needs_hiding(mod_name):
+ """
+ >>> _needs_hiding('setuptools')
+ True
+ >>> _needs_hiding('pkg_resources')
+ True
+ >>> _needs_hiding('setuptools_plugin')
+ False
+ >>> _needs_hiding('setuptools.__init__')
+ True
+ >>> _needs_hiding('distutils')
+ True
+ >>> _needs_hiding('os')
+ False
+ >>> _needs_hiding('Cython')
+ True
+ """
+ base_module = mod_name.split('.', 1)[0]
+ return base_module in _MODULES_TO_HIDE
+
+
+def hide_setuptools():
+ """
+ Remove references to setuptools' modules from sys.modules to allow the
+ invocation to import the most appropriate setuptools. This technique is
+ necessary to avoid issues such as #315 where setuptools upgrading itself
+ would fail to find a function declared in the metadata.
+ """
+ _distutils_hack = sys.modules.get('_distutils_hack', None)
+ if _distutils_hack is not None:
+ _distutils_hack.remove_shim()
+
+ modules = filter(_needs_hiding, sys.modules)
+ _clear_modules(modules)
+
+
+def run_setup(setup_script, args):
+ """Run a distutils setup script, sandboxed in its directory"""
+ setup_dir = os.path.abspath(os.path.dirname(setup_script))
+ with setup_context(setup_dir):
+ try:
+ sys.argv[:] = [setup_script] + list(args)
+ sys.path.insert(0, setup_dir)
+ # reset to include setup dir, w/clean callback list
+ working_set.__init__()
+ working_set.callbacks.append(lambda dist: dist.activate())
+
+ with DirectorySandbox(setup_dir):
+ ns = dict(__file__=setup_script, __name__='__main__')
+ _execfile(setup_script, ns)
+ except SystemExit as v:
+ if v.args and v.args[0]:
+ raise
+ # Normal exit, just return
+
+
+class AbstractSandbox:
+ """Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
+
+ _active = False
+
+ def __init__(self):
+ self._attrs = [
+ name for name in dir(_os)
+ if not name.startswith('_') and hasattr(self, name)
+ ]
+
+ def _copy(self, source):
+ for name in self._attrs:
+ setattr(os, name, getattr(source, name))
+
+ def __enter__(self):
+ self._copy(self)
+ if _file:
+ builtins.file = self._file
+ builtins.open = self._open
+ self._active = True
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ self._active = False
+ if _file:
+ builtins.file = _file
+ builtins.open = _open
+ self._copy(_os)
+
+ def run(self, func):
+ """Run 'func' under os sandboxing"""
+ with self:
+ return func()
+
+ def _mk_dual_path_wrapper(name):
+ original = getattr(_os, name)
+
+ def wrap(self, src, dst, *args, **kw):
+ if self._active:
+ src, dst = self._remap_pair(name, src, dst, *args, **kw)
+ return original(src, dst, *args, **kw)
+
+ return wrap
+
+ for name in ["rename", "link", "symlink"]:
+ if hasattr(_os, name):
+ locals()[name] = _mk_dual_path_wrapper(name)
+
+ def _mk_single_path_wrapper(name, original=None):
+ original = original or getattr(_os, name)
+
+ def wrap(self, path, *args, **kw):
+ if self._active:
+ path = self._remap_input(name, path, *args, **kw)
+ return original(path, *args, **kw)
+
+ return wrap
+
+ if _file:
+ _file = _mk_single_path_wrapper('file', _file)
+ _open = _mk_single_path_wrapper('open', _open)
+ for name in [
+ "stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
+ "remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
+ "startfile", "mkfifo", "mknod", "pathconf", "access"
+ ]:
+ if hasattr(_os, name):
+ locals()[name] = _mk_single_path_wrapper(name)
+
+ def _mk_single_with_return(name):
+ original = getattr(_os, name)
+
+ def wrap(self, path, *args, **kw):
+ if self._active:
+ path = self._remap_input(name, path, *args, **kw)
+ return self._remap_output(name, original(path, *args, **kw))
+ return original(path, *args, **kw)
+
+ return wrap
+
+ for name in ['readlink', 'tempnam']:
+ if hasattr(_os, name):
+ locals()[name] = _mk_single_with_return(name)
+
+ def _mk_query(name):
+ original = getattr(_os, name)
+
+ def wrap(self, *args, **kw):
+ retval = original(*args, **kw)
+ if self._active:
+ return self._remap_output(name, retval)
+ return retval
+
+ return wrap
+
+ for name in ['getcwd', 'tmpnam']:
+ if hasattr(_os, name):
+ locals()[name] = _mk_query(name)
+
+ def _validate_path(self, path):
+ """Called to remap or validate any path, whether input or output"""
+ return path
+
+ def _remap_input(self, operation, path, *args, **kw):
+ """Called for path inputs"""
+ return self._validate_path(path)
+
+ def _remap_output(self, operation, path):
+ """Called for path outputs"""
+ return self._validate_path(path)
+
+ def _remap_pair(self, operation, src, dst, *args, **kw):
+ """Called for path pairs like rename, link, and symlink operations"""
+ return (
+ self._remap_input(operation + '-from', src, *args, **kw),
+ self._remap_input(operation + '-to', dst, *args, **kw)
+ )
+
+
+if hasattr(os, 'devnull'):
+ _EXCEPTIONS = [os.devnull]
+else:
+ _EXCEPTIONS = []
+
+
+class DirectorySandbox(AbstractSandbox):
+ """Restrict operations to a single subdirectory - pseudo-chroot"""
+
+ write_ops = dict.fromkeys([
+ "open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
+ "utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
+ ])
+
+ _exception_patterns = [
+ # Allow lib2to3 to attempt to save a pickled grammar object (#121)
+ r'.*lib2to3.*\.pickle$',
+ ]
+ "exempt writing to paths that match the pattern"
+
+ def __init__(self, sandbox, exceptions=_EXCEPTIONS):
+ self._sandbox = os.path.normcase(os.path.realpath(sandbox))
+ self._prefix = os.path.join(self._sandbox, '')
+ self._exceptions = [
+ os.path.normcase(os.path.realpath(path))
+ for path in exceptions
+ ]
+ AbstractSandbox.__init__(self)
+
+ def _violation(self, operation, *args, **kw):
+ from setuptools.sandbox import SandboxViolation
+ raise SandboxViolation(operation, args, kw)
+
+ if _file:
+
+ def _file(self, path, mode='r', *args, **kw):
+ if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
+ self._violation("file", path, mode, *args, **kw)
+ return _file(path, mode, *args, **kw)
+
+ def _open(self, path, mode='r', *args, **kw):
+ if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
+ self._violation("open", path, mode, *args, **kw)
+ return _open(path, mode, *args, **kw)
+
+ def tmpnam(self):
+ self._violation("tmpnam")
+
+ def _ok(self, path):
+ active = self._active
+ try:
+ self._active = False
+ realpath = os.path.normcase(os.path.realpath(path))
+ return (
+ self._exempted(realpath)
+ or realpath == self._sandbox
+ or realpath.startswith(self._prefix)
+ )
+ finally:
+ self._active = active
+
+ def _exempted(self, filepath):
+ start_matches = (
+ filepath.startswith(exception)
+ for exception in self._exceptions
+ )
+ pattern_matches = (
+ re.match(pattern, filepath)
+ for pattern in self._exception_patterns
+ )
+ candidates = itertools.chain(start_matches, pattern_matches)
+ return any(candidates)
+
+ def _remap_input(self, operation, path, *args, **kw):
+ """Called for path inputs"""
+ if operation in self.write_ops and not self._ok(path):
+ self._violation(operation, os.path.realpath(path), *args, **kw)
+ return path
+
+ def _remap_pair(self, operation, src, dst, *args, **kw):
+ """Called for path pairs like rename, link, and symlink operations"""
+ if not self._ok(src) or not self._ok(dst):
+ self._violation(operation, src, dst, *args, **kw)
+ return (src, dst)
+
+ def open(self, file, flags, mode=0o777, *args, **kw):
+ """Called for low-level os.open()"""
+ if flags & WRITE_FLAGS and not self._ok(file):
+ self._violation("os.open", file, flags, mode, *args, **kw)
+ return _os.open(file, flags, mode, *args, **kw)
+
+
+WRITE_FLAGS = functools.reduce(
+ operator.or_, [
+ getattr(_os, a, 0) for a in
+ "O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()]
+)
+
+
+class SandboxViolation(DistutilsError):
+ """A setup script attempted to modify the filesystem outside the sandbox"""
+
+ tmpl = textwrap.dedent("""
+ SandboxViolation: {cmd}{args!r} {kwargs}
+
+ The package setup script has attempted to modify files on your system
+ that are not within the EasyInstall build area, and has been aborted.
+
+ This package cannot be safely installed by EasyInstall, and may not
+ support alternate installation locations even if you run its setup
+ script by hand. Please inform the package's author and the EasyInstall
+ maintainers to find out if a fix or workaround is available.
+ """).lstrip()
+
+ def __str__(self):
+ cmd, args, kwargs = self.args
+ return self.tmpl.format(**locals())
diff --git a/third_party/python/setuptools/setuptools/script (dev).tmpl b/third_party/python/setuptools/setuptools/script (dev).tmpl
new file mode 100644
index 0000000000..39a24b0488
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/script (dev).tmpl
@@ -0,0 +1,6 @@
+# EASY-INSTALL-DEV-SCRIPT: %(spec)r,%(script_name)r
+__requires__ = %(spec)r
+__import__('pkg_resources').require(%(spec)r)
+__file__ = %(dev_path)r
+with open(__file__) as f:
+ exec(compile(f.read(), __file__, 'exec'))
diff --git a/third_party/python/setuptools/setuptools/script.tmpl b/third_party/python/setuptools/setuptools/script.tmpl
new file mode 100644
index 0000000000..ff5efbcab3
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/script.tmpl
@@ -0,0 +1,3 @@
+# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r
+__requires__ = %(spec)r
+__import__('pkg_resources').run_script(%(spec)r, %(script_name)r)
diff --git a/third_party/python/setuptools/setuptools/ssl_support.py b/third_party/python/setuptools/setuptools/ssl_support.py
new file mode 100644
index 0000000000..eac5e65608
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/ssl_support.py
@@ -0,0 +1,266 @@
+import os
+import socket
+import atexit
+import re
+import functools
+import urllib.request
+import http.client
+
+
+from pkg_resources import ResolutionError, ExtractionError
+
+try:
+ import ssl
+except ImportError:
+ ssl = None
+
+__all__ = [
+ 'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths',
+ 'opener_for'
+]
+
+cert_paths = """
+/etc/pki/tls/certs/ca-bundle.crt
+/etc/ssl/certs/ca-certificates.crt
+/usr/share/ssl/certs/ca-bundle.crt
+/usr/local/share/certs/ca-root.crt
+/etc/ssl/cert.pem
+/System/Library/OpenSSL/certs/cert.pem
+/usr/local/share/certs/ca-root-nss.crt
+/etc/ssl/ca-bundle.pem
+""".strip().split()
+
+try:
+ HTTPSHandler = urllib.request.HTTPSHandler
+ HTTPSConnection = http.client.HTTPSConnection
+except AttributeError:
+ HTTPSHandler = HTTPSConnection = object
+
+is_available = ssl is not None and object not in (
+ HTTPSHandler, HTTPSConnection)
+
+
+try:
+ from ssl import CertificateError, match_hostname
+except ImportError:
+ try:
+ from backports.ssl_match_hostname import CertificateError
+ from backports.ssl_match_hostname import match_hostname
+ except ImportError:
+ CertificateError = None
+ match_hostname = None
+
+if not CertificateError:
+
+ class CertificateError(ValueError):
+ pass
+
+
+if not match_hostname:
+
+ def _dnsname_match(dn, hostname, max_wildcards=1):
+ """Matching according to RFC 6125, section 6.4.3
+
+ https://tools.ietf.org/html/rfc6125#section-6.4.3
+ """
+ pats = []
+ if not dn:
+ return False
+
+ # Ported from python3-syntax:
+ # leftmost, *remainder = dn.split(r'.')
+ parts = dn.split(r'.')
+ leftmost = parts[0]
+ remainder = parts[1:]
+
+ wildcards = leftmost.count('*')
+ if wildcards > max_wildcards:
+ # Issue #17980: avoid denials of service by refusing more
+ # than one wildcard per fragment. A survey of established
+ # policy among SSL implementations showed it to be a
+ # reasonable choice.
+ raise CertificateError(
+ "too many wildcards in certificate DNS name: " + repr(dn))
+
+ # speed up common case w/o wildcards
+ if not wildcards:
+ return dn.lower() == hostname.lower()
+
+ # RFC 6125, section 6.4.3, subitem 1.
+ # The client SHOULD NOT attempt to match a
+ # presented identifier in which the wildcard
+ # character comprises a label other than the
+ # left-most label.
+ if leftmost == '*':
+ # When '*' is a fragment by itself, it matches a non-empty dotless
+ # fragment.
+ pats.append('[^.]+')
+ elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
+ # RFC 6125, section 6.4.3, subitem 3.
+ # The client SHOULD NOT attempt to match a presented identifier
+ # where the wildcard character is embedded within an A-label or
+ # U-label of an internationalized domain name.
+ pats.append(re.escape(leftmost))
+ else:
+ # Otherwise, '*' matches any dotless string, e.g. www*
+ pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
+
+ # add the remaining fragments, ignore any wildcards
+ for frag in remainder:
+ pats.append(re.escape(frag))
+
+ pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
+ return pat.match(hostname)
+
+ def match_hostname(cert, hostname):
+ """Verify that *cert* (in decoded format as returned by
+ SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
+ rules are followed, but IP addresses are not accepted for *hostname*.
+
+ CertificateError is raised on failure. On success, the function
+ returns nothing.
+ """
+ if not cert:
+ raise ValueError("empty or no certificate")
+ dnsnames = []
+ san = cert.get('subjectAltName', ())
+ for key, value in san:
+ if key == 'DNS':
+ if _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ if not dnsnames:
+ # The subject is only checked when there is no dNSName entry
+ # in subjectAltName
+ for sub in cert.get('subject', ()):
+ for key, value in sub:
+ # XXX according to RFC 2818, the most specific Common Name
+ # must be used.
+ if key == 'commonName':
+ if _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ if len(dnsnames) > 1:
+ raise CertificateError(
+ "hostname %r doesn't match either of %s"
+ % (hostname, ', '.join(map(repr, dnsnames))))
+ elif len(dnsnames) == 1:
+ raise CertificateError(
+ "hostname %r doesn't match %r"
+ % (hostname, dnsnames[0]))
+ else:
+ raise CertificateError(
+ "no appropriate commonName or "
+ "subjectAltName fields were found")
+
+
+class VerifyingHTTPSHandler(HTTPSHandler):
+ """Simple verifying handler: no auth, subclasses, timeouts, etc."""
+
+ def __init__(self, ca_bundle):
+ self.ca_bundle = ca_bundle
+ HTTPSHandler.__init__(self)
+
+ def https_open(self, req):
+ return self.do_open(
+ lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw),
+ req
+ )
+
+
+class VerifyingHTTPSConn(HTTPSConnection):
+ """Simple verifying connection: no auth, subclasses, timeouts, etc."""
+
+ def __init__(self, host, ca_bundle, **kw):
+ HTTPSConnection.__init__(self, host, **kw)
+ self.ca_bundle = ca_bundle
+
+ def connect(self):
+ sock = socket.create_connection(
+ (self.host, self.port), getattr(self, 'source_address', None)
+ )
+
+ # Handle the socket if a (proxy) tunnel is present
+ if hasattr(self, '_tunnel') and getattr(self, '_tunnel_host', None):
+ self.sock = sock
+ self._tunnel()
+ # http://bugs.python.org/issue7776: Python>=3.4.1 and >=2.7.7
+ # change self.host to mean the proxy server host when tunneling is
+ # being used. Adapt, since we are interested in the destination
+ # host for the match_hostname() comparison.
+ actual_host = self._tunnel_host
+ else:
+ actual_host = self.host
+
+ if hasattr(ssl, 'create_default_context'):
+ ctx = ssl.create_default_context(cafile=self.ca_bundle)
+ self.sock = ctx.wrap_socket(sock, server_hostname=actual_host)
+ else:
+ # This is for python < 2.7.9 and < 3.4?
+ self.sock = ssl.wrap_socket(
+ sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle
+ )
+ try:
+ match_hostname(self.sock.getpeercert(), actual_host)
+ except CertificateError:
+ self.sock.shutdown(socket.SHUT_RDWR)
+ self.sock.close()
+ raise
+
+
+def opener_for(ca_bundle=None):
+ """Get a urlopen() replacement that uses ca_bundle for verification"""
+ return urllib.request.build_opener(
+ VerifyingHTTPSHandler(ca_bundle or find_ca_bundle())
+ ).open
+
+
+# from jaraco.functools
+def once(func):
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ if not hasattr(func, 'always_returns'):
+ func.always_returns = func(*args, **kwargs)
+ return func.always_returns
+ return wrapper
+
+
+@once
+def get_win_certfile():
+ try:
+ import wincertstore
+ except ImportError:
+ return None
+
+ class CertFile(wincertstore.CertFile):
+ def __init__(self):
+ super(CertFile, self).__init__()
+ atexit.register(self.close)
+
+ def close(self):
+ try:
+ super(CertFile, self).close()
+ except OSError:
+ pass
+
+ _wincerts = CertFile()
+ _wincerts.addstore('CA')
+ _wincerts.addstore('ROOT')
+ return _wincerts.name
+
+
+def find_ca_bundle():
+ """Return an existing CA bundle path, or None"""
+ extant_cert_paths = filter(os.path.isfile, cert_paths)
+ return (
+ get_win_certfile()
+ or next(extant_cert_paths, None)
+ or _certifi_where()
+ )
+
+
+def _certifi_where():
+ try:
+ return __import__('certifi').where()
+ except (ImportError, ResolutionError, ExtractionError):
+ pass
diff --git a/third_party/python/setuptools/setuptools/unicode_utils.py b/third_party/python/setuptools/setuptools/unicode_utils.py
new file mode 100644
index 0000000000..e84e65e3e1
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/unicode_utils.py
@@ -0,0 +1,42 @@
+import unicodedata
+import sys
+
+
+# HFS Plus uses decomposed UTF-8
+def decompose(path):
+ if isinstance(path, str):
+ return unicodedata.normalize('NFD', path)
+ try:
+ path = path.decode('utf-8')
+ path = unicodedata.normalize('NFD', path)
+ path = path.encode('utf-8')
+ except UnicodeError:
+ pass # Not UTF-8
+ return path
+
+
+def filesys_decode(path):
+ """
+ Ensure that the given path is decoded,
+ NONE when no expected encoding works
+ """
+
+ if isinstance(path, str):
+ return path
+
+ fs_enc = sys.getfilesystemencoding() or 'utf-8'
+ candidates = fs_enc, 'utf-8'
+
+ for enc in candidates:
+ try:
+ return path.decode(enc)
+ except UnicodeDecodeError:
+ continue
+
+
+def try_encode(string, enc):
+ "turn unicode encoding into a functional routine"
+ try:
+ return string.encode(enc)
+ except UnicodeEncodeError:
+ return None
diff --git a/third_party/python/setuptools/setuptools/version.py b/third_party/python/setuptools/setuptools/version.py
new file mode 100644
index 0000000000..95e1869658
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/version.py
@@ -0,0 +1,6 @@
+import pkg_resources
+
+try:
+ __version__ = pkg_resources.get_distribution('setuptools').version
+except Exception:
+ __version__ = 'unknown'
diff --git a/third_party/python/setuptools/setuptools/wheel.py b/third_party/python/setuptools/setuptools/wheel.py
new file mode 100644
index 0000000000..0be811af2c
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/wheel.py
@@ -0,0 +1,213 @@
+"""Wheels support."""
+
+from distutils.util import get_platform
+from distutils import log
+import email
+import itertools
+import os
+import posixpath
+import re
+import zipfile
+
+import pkg_resources
+import setuptools
+from pkg_resources import parse_version
+from setuptools.extern.packaging.tags import sys_tags
+from setuptools.extern.packaging.utils import canonicalize_name
+from setuptools.command.egg_info import write_requirements
+
+
+WHEEL_NAME = re.compile(
+ r"""^(?P<project_name>.+?)-(?P<version>\d.*?)
+ ((-(?P<build>\d.*?))?-(?P<py_version>.+?)-(?P<abi>.+?)-(?P<platform>.+?)
+ )\.whl$""",
+ re.VERBOSE).match
+
+NAMESPACE_PACKAGE_INIT = \
+ "__import__('pkg_resources').declare_namespace(__name__)\n"
+
+
+def unpack(src_dir, dst_dir):
+ '''Move everything under `src_dir` to `dst_dir`, and delete the former.'''
+ for dirpath, dirnames, filenames in os.walk(src_dir):
+ subdir = os.path.relpath(dirpath, src_dir)
+ for f in filenames:
+ src = os.path.join(dirpath, f)
+ dst = os.path.join(dst_dir, subdir, f)
+ os.renames(src, dst)
+ for n, d in reversed(list(enumerate(dirnames))):
+ src = os.path.join(dirpath, d)
+ dst = os.path.join(dst_dir, subdir, d)
+ if not os.path.exists(dst):
+ # Directory does not exist in destination,
+ # rename it and prune it from os.walk list.
+ os.renames(src, dst)
+ del dirnames[n]
+ # Cleanup.
+ for dirpath, dirnames, filenames in os.walk(src_dir, topdown=True):
+ assert not filenames
+ os.rmdir(dirpath)
+
+
+class Wheel:
+
+ def __init__(self, filename):
+ match = WHEEL_NAME(os.path.basename(filename))
+ if match is None:
+ raise ValueError('invalid wheel name: %r' % filename)
+ self.filename = filename
+ for k, v in match.groupdict().items():
+ setattr(self, k, v)
+
+ def tags(self):
+ '''List tags (py_version, abi, platform) supported by this wheel.'''
+ return itertools.product(
+ self.py_version.split('.'),
+ self.abi.split('.'),
+ self.platform.split('.'),
+ )
+
+ def is_compatible(self):
+ '''Is the wheel is compatible with the current platform?'''
+ supported_tags = set(
+ (t.interpreter, t.abi, t.platform) for t in sys_tags())
+ return next((True for t in self.tags() if t in supported_tags), False)
+
+ def egg_name(self):
+ return pkg_resources.Distribution(
+ project_name=self.project_name, version=self.version,
+ platform=(None if self.platform == 'any' else get_platform()),
+ ).egg_name() + '.egg'
+
+ def get_dist_info(self, zf):
+ # find the correct name of the .dist-info dir in the wheel file
+ for member in zf.namelist():
+ dirname = posixpath.dirname(member)
+ if (dirname.endswith('.dist-info') and
+ canonicalize_name(dirname).startswith(
+ canonicalize_name(self.project_name))):
+ return dirname
+ raise ValueError("unsupported wheel format. .dist-info not found")
+
+ def install_as_egg(self, destination_eggdir):
+ '''Install wheel as an egg directory.'''
+ with zipfile.ZipFile(self.filename) as zf:
+ self._install_as_egg(destination_eggdir, zf)
+
+ def _install_as_egg(self, destination_eggdir, zf):
+ dist_basename = '%s-%s' % (self.project_name, self.version)
+ dist_info = self.get_dist_info(zf)
+ dist_data = '%s.data' % dist_basename
+ egg_info = os.path.join(destination_eggdir, 'EGG-INFO')
+
+ self._convert_metadata(zf, destination_eggdir, dist_info, egg_info)
+ self._move_data_entries(destination_eggdir, dist_data)
+ self._fix_namespace_packages(egg_info, destination_eggdir)
+
+ @staticmethod
+ def _convert_metadata(zf, destination_eggdir, dist_info, egg_info):
+ def get_metadata(name):
+ with zf.open(posixpath.join(dist_info, name)) as fp:
+ value = fp.read().decode('utf-8')
+ return email.parser.Parser().parsestr(value)
+
+ wheel_metadata = get_metadata('WHEEL')
+ # Check wheel format version is supported.
+ wheel_version = parse_version(wheel_metadata.get('Wheel-Version'))
+ wheel_v1 = (
+ parse_version('1.0') <= wheel_version < parse_version('2.0dev0')
+ )
+ if not wheel_v1:
+ raise ValueError(
+ 'unsupported wheel format version: %s' % wheel_version)
+ # Extract to target directory.
+ os.mkdir(destination_eggdir)
+ zf.extractall(destination_eggdir)
+ # Convert metadata.
+ dist_info = os.path.join(destination_eggdir, dist_info)
+ dist = pkg_resources.Distribution.from_location(
+ destination_eggdir, dist_info,
+ metadata=pkg_resources.PathMetadata(destination_eggdir, dist_info),
+ )
+
+ # Note: Evaluate and strip markers now,
+ # as it's difficult to convert back from the syntax:
+ # foobar; "linux" in sys_platform and extra == 'test'
+ def raw_req(req):
+ req.marker = None
+ return str(req)
+ install_requires = list(sorted(map(raw_req, dist.requires())))
+ extras_require = {
+ extra: sorted(
+ req
+ for req in map(raw_req, dist.requires((extra,)))
+ if req not in install_requires
+ )
+ for extra in dist.extras
+ }
+ os.rename(dist_info, egg_info)
+ os.rename(
+ os.path.join(egg_info, 'METADATA'),
+ os.path.join(egg_info, 'PKG-INFO'),
+ )
+ setup_dist = setuptools.Distribution(
+ attrs=dict(
+ install_requires=install_requires,
+ extras_require=extras_require,
+ ),
+ )
+ # Temporarily disable info traces.
+ log_threshold = log._global_log.threshold
+ log.set_threshold(log.WARN)
+ try:
+ write_requirements(
+ setup_dist.get_command_obj('egg_info'),
+ None,
+ os.path.join(egg_info, 'requires.txt'),
+ )
+ finally:
+ log.set_threshold(log_threshold)
+
+ @staticmethod
+ def _move_data_entries(destination_eggdir, dist_data):
+ """Move data entries to their correct location."""
+ dist_data = os.path.join(destination_eggdir, dist_data)
+ dist_data_scripts = os.path.join(dist_data, 'scripts')
+ if os.path.exists(dist_data_scripts):
+ egg_info_scripts = os.path.join(
+ destination_eggdir, 'EGG-INFO', 'scripts')
+ os.mkdir(egg_info_scripts)
+ for entry in os.listdir(dist_data_scripts):
+ # Remove bytecode, as it's not properly handled
+ # during easy_install scripts install phase.
+ if entry.endswith('.pyc'):
+ os.unlink(os.path.join(dist_data_scripts, entry))
+ else:
+ os.rename(
+ os.path.join(dist_data_scripts, entry),
+ os.path.join(egg_info_scripts, entry),
+ )
+ os.rmdir(dist_data_scripts)
+ for subdir in filter(os.path.exists, (
+ os.path.join(dist_data, d)
+ for d in ('data', 'headers', 'purelib', 'platlib')
+ )):
+ unpack(subdir, destination_eggdir)
+ if os.path.exists(dist_data):
+ os.rmdir(dist_data)
+
+ @staticmethod
+ def _fix_namespace_packages(egg_info, destination_eggdir):
+ namespace_packages = os.path.join(
+ egg_info, 'namespace_packages.txt')
+ if os.path.exists(namespace_packages):
+ with open(namespace_packages) as fp:
+ namespace_packages = fp.read().split()
+ for mod in namespace_packages:
+ mod_dir = os.path.join(destination_eggdir, *mod.split('.'))
+ mod_init = os.path.join(mod_dir, '__init__.py')
+ if not os.path.exists(mod_dir):
+ os.mkdir(mod_dir)
+ if not os.path.exists(mod_init):
+ with open(mod_init, 'w') as fp:
+ fp.write(NAMESPACE_PACKAGE_INIT)
diff --git a/third_party/python/setuptools/setuptools/windows_support.py b/third_party/python/setuptools/setuptools/windows_support.py
new file mode 100644
index 0000000000..cb977cff95
--- /dev/null
+++ b/third_party/python/setuptools/setuptools/windows_support.py
@@ -0,0 +1,29 @@
+import platform
+import ctypes
+
+
+def windows_only(func):
+ if platform.system() != 'Windows':
+ return lambda *args, **kwargs: None
+ return func
+
+
+@windows_only
+def hide_file(path):
+ """
+ Set the hidden attribute on a file or directory.
+
+ From http://stackoverflow.com/questions/19622133/
+
+ `path` must be text.
+ """
+ __import__('ctypes.wintypes')
+ SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW
+ SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD
+ SetFileAttributes.restype = ctypes.wintypes.BOOL
+
+ FILE_ATTRIBUTE_HIDDEN = 0x02
+
+ ret = SetFileAttributes(path, FILE_ATTRIBUTE_HIDDEN)
+ if not ret:
+ raise ctypes.WinError()
diff --git a/third_party/python/six/six-1.13.0.dist-info/LICENSE b/third_party/python/six/six-1.13.0.dist-info/LICENSE
new file mode 100644
index 0000000000..4b05a54526
--- /dev/null
+++ b/third_party/python/six/six-1.13.0.dist-info/LICENSE
@@ -0,0 +1,18 @@
+Copyright (c) 2010-2019 Benjamin Peterson
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/third_party/python/six/six-1.13.0.dist-info/METADATA b/third_party/python/six/six-1.13.0.dist-info/METADATA
new file mode 100644
index 0000000000..b0c8f51e1f
--- /dev/null
+++ b/third_party/python/six/six-1.13.0.dist-info/METADATA
@@ -0,0 +1,52 @@
+Metadata-Version: 2.1
+Name: six
+Version: 1.13.0
+Summary: Python 2 and 3 compatibility utilities
+Home-page: https://github.com/benjaminp/six
+Author: Benjamin Peterson
+Author-email: benjamin@python.org
+License: MIT
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Utilities
+Requires-Python: >=2.6, !=3.0.*, !=3.1.*
+
+.. image:: https://img.shields.io/pypi/v/six.svg
+ :target: https://pypi.org/project/six/
+ :alt: six on PyPI
+
+.. image:: https://travis-ci.org/benjaminp/six.svg?branch=master
+ :target: https://travis-ci.org/benjaminp/six
+ :alt: six on TravisCI
+
+.. image:: https://readthedocs.org/projects/six/badge/?version=latest
+ :target: https://six.readthedocs.io/
+ :alt: six's documentation on Read the Docs
+
+.. image:: https://img.shields.io/badge/license-MIT-green.svg
+ :target: https://github.com/benjaminp/six/blob/master/LICENSE
+ :alt: MIT License badge
+
+Six is a Python 2 and 3 compatibility library. It provides utility functions
+for smoothing over the differences between the Python versions with the goal of
+writing Python code that is compatible on both Python versions. See the
+documentation for more information on what is provided.
+
+Six supports every Python version since 2.6. It is contained in only one Python
+file, so it can be easily copied into your project. (The copyright and license
+notice must be retained.)
+
+Online documentation is at https://six.readthedocs.io/.
+
+Bugs can be reported to https://github.com/benjaminp/six. The code can also
+be found there.
+
+For questions about six or porting in general, email the python-porting mailing
+list: https://mail.python.org/mailman/listinfo/python-porting
+
+
diff --git a/third_party/python/six/six-1.13.0.dist-info/RECORD b/third_party/python/six/six-1.13.0.dist-info/RECORD
new file mode 100644
index 0000000000..a0e6c1fd4b
--- /dev/null
+++ b/third_party/python/six/six-1.13.0.dist-info/RECORD
@@ -0,0 +1,6 @@
+six.py,sha256=bsEzSFTZTx49wQttLORmSZTrpjGc8UbXt-HBa_LZX7Q,33045
+six-1.13.0.dist-info/LICENSE,sha256=t1KbjAcXGniow2wyg5BVKOSBKUXZd9El65JujMvyRbY,1066
+six-1.13.0.dist-info/METADATA,sha256=hxS4rSPRfO8ewbcLS30anoFi6LFgUQ3mk_xknZ8RV4w,1940
+six-1.13.0.dist-info/WHEEL,sha256=8zNYZbwQSXoB9IfXOjPfeNwvAsALAjffgk27FqvCWbo,110
+six-1.13.0.dist-info/top_level.txt,sha256=_iVH_iYEtEXnD8nYGQYpYFUvkUW9sEO1GYbkeKSAais,4
+six-1.13.0.dist-info/RECORD,,
diff --git a/third_party/python/six/six-1.13.0.dist-info/WHEEL b/third_party/python/six/six-1.13.0.dist-info/WHEEL
new file mode 100644
index 0000000000..8b701e93c2
--- /dev/null
+++ b/third_party/python/six/six-1.13.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.6)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/six/six-1.13.0.dist-info/top_level.txt b/third_party/python/six/six-1.13.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..ffe2fce498
--- /dev/null
+++ b/third_party/python/six/six-1.13.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+six
diff --git a/third_party/python/six/six.py b/third_party/python/six/six.py
new file mode 100644
index 0000000000..357e624abc
--- /dev/null
+++ b/third_party/python/six/six.py
@@ -0,0 +1,963 @@
+# Copyright (c) 2010-2019 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.13.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+ string_types = str,
+ integer_types = int,
+ class_types = type,
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = basestring,
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+
+ def __len__(self):
+ return 1 << 31
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ result = self._resolve()
+ setattr(obj, self.name, result) # Invokes __set__.
+ try:
+ # This is a bit ugly, but it avoids running this again by
+ # removing this descriptor.
+ delattr(obj.__class__, self.name)
+ except AttributeError:
+ pass
+ return result
+
+
+class MovedModule(_LazyDescr):
+
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+ def __getattr__(self, attr):
+ _module = self._resolve()
+ value = getattr(_module, attr)
+ setattr(self, attr, value)
+ return value
+
+
+class _LazyModule(types.ModuleType):
+
+ def __init__(self, name):
+ super(_LazyModule, self).__init__(name)
+ self.__doc__ = self.__class__.__doc__
+
+ def __dir__(self):
+ attrs = ["__doc__", "__name__"]
+ attrs += [attr.name for attr in self._moved_attributes]
+ return attrs
+
+ # Subclasses should override this
+ _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+ """
+ A meta path importer to import six.moves and its submodules.
+
+ This class implements a PEP302 finder and loader. It should be compatible
+ with Python 2.5 and all existing versions of Python3
+ """
+
+ def __init__(self, six_module_name):
+ self.name = six_module_name
+ self.known_modules = {}
+
+ def _add_module(self, mod, *fullnames):
+ for fullname in fullnames:
+ self.known_modules[self.name + "." + fullname] = mod
+
+ def _get_module(self, fullname):
+ return self.known_modules[self.name + "." + fullname]
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.known_modules:
+ return self
+ return None
+
+ def __get_module(self, fullname):
+ try:
+ return self.known_modules[fullname]
+ except KeyError:
+ raise ImportError("This loader does not know module " + fullname)
+
+ def load_module(self, fullname):
+ try:
+ # in case of a reload
+ return sys.modules[fullname]
+ except KeyError:
+ pass
+ mod = self.__get_module(fullname)
+ if isinstance(mod, MovedModule):
+ mod = mod._resolve()
+ else:
+ mod.__loader__ = self
+ sys.modules[fullname] = mod
+ return mod
+
+ def is_package(self, fullname):
+ """
+ Return true, if the named module is a package.
+
+ We need this method to get correct spec objects with
+ Python 3.4 (see PEP451)
+ """
+ return hasattr(self.__get_module(fullname), "__path__")
+
+ def get_code(self, fullname):
+ """Return None
+
+ Required, if is_package is implemented"""
+ self.__get_module(fullname) # eventually raises ImportError
+ return None
+ get_source = get_code # same as get_code
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+ """Lazy loading of moved objects"""
+ __path__ = [] # mark as package
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("intern", "__builtin__", "sys"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+ MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("getoutput", "commands", "subprocess"),
+ MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("UserDict", "UserDict", "collections"),
+ MovedAttribute("UserList", "UserList", "collections"),
+ MovedAttribute("UserString", "UserString", "collections"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+ MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("_thread", "thread", "_thread"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser",
+ "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog",
+ "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
+ "tkinter.simpledialog"),
+ MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+ MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+ _moved_attributes += [
+ MovedModule("winreg", "_winreg"),
+ ]
+
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+ if isinstance(attr, MovedModule):
+ _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+ MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+ MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+ MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+ MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+ MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("quote", "urllib", "urllib.parse"),
+ MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
+ MovedAttribute("urlencode", "urllib", "urllib.parse"),
+ MovedAttribute("splitquery", "urllib", "urllib.parse"),
+ MovedAttribute("splittag", "urllib", "urllib.parse"),
+ MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("splitvalue", "urllib", "urllib.parse"),
+ MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+ setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+ "moves.urllib_parse", "moves.urllib.parse")
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+ MovedAttribute("URLError", "urllib2", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+ setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+ "moves.urllib_error", "moves.urllib.error")
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+ MovedAttribute("urlopen", "urllib2", "urllib.request"),
+ MovedAttribute("install_opener", "urllib2", "urllib.request"),
+ MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("pathname2url", "urllib", "urllib.request"),
+ MovedAttribute("url2pathname", "urllib", "urllib.request"),
+ MovedAttribute("getproxies", "urllib", "urllib.request"),
+ MovedAttribute("Request", "urllib2", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+ MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+ MovedAttribute("URLopener", "urllib", "urllib.request"),
+ MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+ MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+ MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
+ MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+ setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+ "moves.urllib_request", "moves.urllib.request")
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+ MovedAttribute("addbase", "urllib", "urllib.response"),
+ MovedAttribute("addclosehook", "urllib", "urllib.response"),
+ MovedAttribute("addinfo", "urllib", "urllib.response"),
+ MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+ setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+ "moves.urllib_response", "moves.urllib.response")
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
+]
+for attr in _urllib_robotparser_moved_attributes:
+ setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
+
+_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+ "moves.urllib_robotparser", "moves.urllib.robotparser")
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+ """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+ __path__ = [] # mark as package
+ parse = _importer._get_module("moves.urllib_parse")
+ error = _importer._get_module("moves.urllib_error")
+ request = _importer._get_module("moves.urllib_request")
+ response = _importer._get_module("moves.urllib_response")
+ robotparser = _importer._get_module("moves.urllib_robotparser")
+
+ def __dir__(self):
+ return ['parse', 'error', 'request', 'response', 'robotparser']
+
+_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
+ "moves.urllib")
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+
+try:
+ advance_iterator = next
+except NameError:
+ def advance_iterator(it):
+ return it.next()
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+ def get_unbound_function(unbound):
+ return unbound
+
+ create_bound_method = types.MethodType
+
+ def create_unbound_method(func, cls):
+ return func
+
+ Iterator = object
+else:
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ def create_bound_method(func, obj):
+ return types.MethodType(func, obj, obj.__class__)
+
+ def create_unbound_method(func, cls):
+ return types.MethodType(func, None, cls)
+
+ class Iterator(object):
+
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(get_unbound_function,
+ """Get the function out of a possibly unbound function""")
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+ def iterkeys(d, **kw):
+ return iter(d.keys(**kw))
+
+ def itervalues(d, **kw):
+ return iter(d.values(**kw))
+
+ def iteritems(d, **kw):
+ return iter(d.items(**kw))
+
+ def iterlists(d, **kw):
+ return iter(d.lists(**kw))
+
+ viewkeys = operator.methodcaller("keys")
+
+ viewvalues = operator.methodcaller("values")
+
+ viewitems = operator.methodcaller("items")
+else:
+ def iterkeys(d, **kw):
+ return d.iterkeys(**kw)
+
+ def itervalues(d, **kw):
+ return d.itervalues(**kw)
+
+ def iteritems(d, **kw):
+ return d.iteritems(**kw)
+
+ def iterlists(d, **kw):
+ return d.iterlists(**kw)
+
+ viewkeys = operator.methodcaller("viewkeys")
+
+ viewvalues = operator.methodcaller("viewvalues")
+
+ viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems,
+ "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(iterlists,
+ "Return an iterator over the (key, [values]) pairs of a dictionary.")
+
+
+if PY3:
+ def b(s):
+ return s.encode("latin-1")
+
+ def u(s):
+ return s
+ unichr = chr
+ import struct
+ int2byte = struct.Struct(">B").pack
+ del struct
+ byte2int = operator.itemgetter(0)
+ indexbytes = operator.getitem
+ iterbytes = iter
+ import io
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+ del io
+ _assertCountEqual = "assertCountEqual"
+ if sys.version_info[1] <= 1:
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ else:
+ _assertRaisesRegex = "assertRaisesRegex"
+ _assertRegex = "assertRegex"
+else:
+ def b(s):
+ return s
+ # Workaround for standalone backslash
+
+ def u(s):
+ return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
+ unichr = unichr
+ int2byte = chr
+
+ def byte2int(bs):
+ return ord(bs[0])
+
+ def indexbytes(buf, i):
+ return ord(buf[i])
+ iterbytes = functools.partial(itertools.imap, ord)
+ import StringIO
+ StringIO = BytesIO = StringIO.StringIO
+ _assertCountEqual = "assertItemsEqual"
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+ return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+ return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+ return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+if PY3:
+ exec_ = getattr(moves.builtins, "exec")
+
+ def reraise(tp, value, tb=None):
+ try:
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+ finally:
+ value = None
+ tb = None
+
+else:
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+ exec_("""def reraise(tp, value, tb=None):
+ try:
+ raise tp, value, tb
+ finally:
+ tb = None
+""")
+
+
+if sys.version_info[:2] == (3, 2):
+ exec_("""def raise_from(value, from_value):
+ try:
+ if from_value is None:
+ raise value
+ raise value from from_value
+ finally:
+ value = None
+""")
+elif sys.version_info[:2] > (3, 2):
+ exec_("""def raise_from(value, from_value):
+ try:
+ raise value from from_value
+ finally:
+ value = None
+""")
+else:
+ def raise_from(value, from_value):
+ raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+ def print_(*args, **kwargs):
+ """The new-style print function for Python 2.4 and 2.5."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ # If the file has an encoding, encode unicode with it.
+ if (isinstance(fp, file) and
+ isinstance(data, unicode) and
+ fp.encoding is not None):
+ errors = getattr(fp, "errors", None)
+ if errors is None:
+ errors = "strict"
+ data = data.encode(fp.encoding, errors)
+ fp.write(data)
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+if sys.version_info[:2] < (3, 3):
+ _print = print_
+
+ def print_(*args, **kwargs):
+ fp = kwargs.get("file", sys.stdout)
+ flush = kwargs.pop("flush", False)
+ _print(*args, **kwargs)
+ if flush and fp is not None:
+ fp.flush()
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+ def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES):
+ def wrapper(f):
+ f = functools.wraps(wrapped, assigned, updated)(f)
+ f.__wrapped__ = wrapped
+ return f
+ return wrapper
+else:
+ wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(type):
+
+ def __new__(cls, name, this_bases, d):
+ if sys.version_info[:2] >= (3, 7):
+ # This version introduced PEP 560 that requires a bit
+ # of extra care (we mimic what is done by __build_class__).
+ resolved_bases = types.resolve_bases(bases)
+ if resolved_bases is not bases:
+ d['__orig_bases__'] = bases
+ else:
+ resolved_bases = bases
+ return meta(name, resolved_bases, d)
+
+ @classmethod
+ def __prepare__(cls, name, this_bases):
+ return meta.__prepare__(name, bases)
+ return type.__new__(metaclass, 'temporary_class', (), {})
+
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ slots = orig_vars.get('__slots__')
+ if slots is not None:
+ if isinstance(slots, str):
+ slots = [slots]
+ for slots_var in slots:
+ orig_vars.pop(slots_var)
+ orig_vars.pop('__dict__', None)
+ orig_vars.pop('__weakref__', None)
+ if hasattr(cls, '__qualname__'):
+ orig_vars['__qualname__'] = cls.__qualname__
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+ return wrapper
+
+
+def ensure_binary(s, encoding='utf-8', errors='strict'):
+ """Coerce **s** to six.binary_type.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> encoded to `bytes`
+ - `bytes` -> `bytes`
+ """
+ if isinstance(s, text_type):
+ return s.encode(encoding, errors)
+ elif isinstance(s, binary_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def ensure_str(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to `str`.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if not isinstance(s, (text_type, binary_type)):
+ raise TypeError("not expecting type '%s'" % type(s))
+ if PY2 and isinstance(s, text_type):
+ s = s.encode(encoding, errors)
+ elif PY3 and isinstance(s, binary_type):
+ s = s.decode(encoding, errors)
+ return s
+
+
+def ensure_text(s, encoding='utf-8', errors='strict'):
+ """Coerce *s* to six.text_type.
+
+ For Python 2:
+ - `unicode` -> `unicode`
+ - `str` -> `unicode`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if isinstance(s, binary_type):
+ return s.decode(encoding, errors)
+ elif isinstance(s, text_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+
+def python_2_unicode_compatible(klass):
+ """
+ A decorator that defines __unicode__ and __str__ methods under Python 2.
+ Under Python 3 it does nothing.
+
+ To support Python 2 and 3 with a single code base, define a __str__ method
+ returning text and apply this decorator to the class.
+ """
+ if PY2:
+ if '__str__' not in klass.__dict__:
+ raise ValueError("@python_2_unicode_compatible cannot be applied "
+ "to %s because it doesn't define __str__()." %
+ klass.__name__)
+ klass.__unicode__ = klass.__str__
+ klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
+ return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = [] # required for PEP 302 and PEP 451
+__package__ = __name__ # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+ __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+ for i, importer in enumerate(sys.meta_path):
+ # Here's some real nastiness: Another "instance" of the six module might
+ # be floating around. Therefore, we can't use isinstance() to check for
+ # the six meta path importer, since the other six instance will have
+ # inserted an importer with different class.
+ if (type(importer).__name__ == "_SixMetaPathImporter" and
+ importer.name == __name__):
+ del sys.meta_path[i]
+ break
+ del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/third_party/python/slugid/slugid-2.0.0.dist-info/LICENSE b/third_party/python/slugid/slugid-2.0.0.dist-info/LICENSE
new file mode 100644
index 0000000000..a612ad9813
--- /dev/null
+++ b/third_party/python/slugid/slugid-2.0.0.dist-info/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/third_party/python/slugid/slugid-2.0.0.dist-info/METADATA b/third_party/python/slugid/slugid-2.0.0.dist-info/METADATA
new file mode 100644
index 0000000000..8a0ac94a95
--- /dev/null
+++ b/third_party/python/slugid/slugid-2.0.0.dist-info/METADATA
@@ -0,0 +1,17 @@
+Metadata-Version: 2.1
+Name: slugid
+Version: 2.0.0
+Summary: Base64 encoded uuid v4 slugs
+Home-page: http://taskcluster.github.io/slugid.py
+Author: Pete Moore
+Author-email: pmoore@mozilla.com
+License: MPL 2.0
+Platform: UNKNOWN
+Classifier: Intended Audience :: Developers
+Classifier: Natural Language :: English
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.5
+
+UNKNOWN
+
+
diff --git a/third_party/python/slugid/slugid-2.0.0.dist-info/RECORD b/third_party/python/slugid/slugid-2.0.0.dist-info/RECORD
new file mode 100644
index 0000000000..66d8d309d1
--- /dev/null
+++ b/third_party/python/slugid/slugid-2.0.0.dist-info/RECORD
@@ -0,0 +1,7 @@
+slugid/__init__.py,sha256=rj2SDYEJRXS1IWZAt703dwsLne9DAZsbQ3brdQb9Abc,1195
+slugid/slugid.py,sha256=-dXeq5mHYqmJQnus7dkC2OhQ4P46ZWFaESRWgjGBOjc,1789
+slugid-2.0.0.dist-info/LICENSE,sha256=HyVuytGSiAUQ6ErWBHTqt1iSGHhLmlC8fO7jTCuR8dU,16725
+slugid-2.0.0.dist-info/METADATA,sha256=SZjQWGL040P3sozslirzFbYz2WwsZDkuaUno1lWAkVs,420
+slugid-2.0.0.dist-info/WHEEL,sha256=aSdOKpzTGLLkKenfdFGiq92od_Dmr98YfEe8iw7iZoo,110
+slugid-2.0.0.dist-info/top_level.txt,sha256=Q10EBhKySghdJ8a8z02foSmev8ksyf_Ar1ICJ8uK5n8,7
+slugid-2.0.0.dist-info/RECORD,,
diff --git a/third_party/python/slugid/slugid-2.0.0.dist-info/WHEEL b/third_party/python/slugid/slugid-2.0.0.dist-info/WHEEL
new file mode 100644
index 0000000000..131c7a865b
--- /dev/null
+++ b/third_party/python/slugid/slugid-2.0.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/slugid/slugid-2.0.0.dist-info/top_level.txt b/third_party/python/slugid/slugid-2.0.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..e1d5cb9085
--- /dev/null
+++ b/third_party/python/slugid/slugid-2.0.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+slugid
diff --git a/third_party/python/slugid/slugid/__init__.py b/third_party/python/slugid/slugid/__init__.py
new file mode 100644
index 0000000000..7953f444aa
--- /dev/null
+++ b/third_party/python/slugid/slugid/__init__.py
@@ -0,0 +1,48 @@
+# -*- coding: utf-8 -*-
+
+# **************
+# * Slugid API *
+# **************
+#
+# @)@)
+# _|_| ( )
+# _(___,`\ _,--------------._ (( /`, ))
+# `==` `*-_,' O `~._ ( ( _/ | ) )
+# `, : o } `~._.~` * ',
+# \ - _ O - ,'
+# | ; - - " ; o /
+# | O o ,-`
+# \ _,-:""""""'`:-._ - . O /
+# `""""""~'` `._ _,-`
+# """"""
+
+"""
+SlugID: Base 64 encoded v4 UUIDs
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Usage:
+
+ >>> import slugid
+ >>> s = slugid.nice()
+ >>> s
+ eWIgwMgxSfeXQ36iPbOxiQ
+ >>> u = slugid.decode(s)
+ >>> u
+ UUID('796220c0-c831-49f7-9743-7ea23db3b189')
+ >>> slugid.encode(u)
+ eWIgwMgxSfeXQ36iPbOxiQ
+ >>> slugid.v4()
+ -9OpXaCORAaFh4sJRk7PUA
+"""
+from .slugid import decode, encode, nice, v4
+
+__title__ = 'slugid'
+__version__ = '2.0.0'
+__author__ = 'Peter Moore'
+__license__ = 'MPL 2.0'
+__all__ = [
+ 'decode',
+ 'encode',
+ 'nice',
+ 'v4',
+]
diff --git a/third_party/python/slugid/slugid/slugid.py b/third_party/python/slugid/slugid/slugid.py
new file mode 100644
index 0000000000..003ee3c2aa
--- /dev/null
+++ b/third_party/python/slugid/slugid/slugid.py
@@ -0,0 +1,55 @@
+# Licensed under the Mozilla Public Licence 2.0.
+# https://www.mozilla.org/en-US/MPL/2.0
+
+import sys
+import uuid
+import base64
+
+
+def encode(uuid_):
+ """
+ Returns the given uuid.UUID object as a 22 character slug. This can be a
+ regular v4 slug or a "nice" slug.
+ """
+ return _convert_bytes_to_slug(uuid_.bytes)
+
+
+def _convert_bytes_to_slug(bytes_):
+ slug = base64.urlsafe_b64encode(bytes_)[:-2] # Drop '==' padding
+ if sys.version_info.major != 2 and isinstance(slug, bytes):
+ slug = slug.decode('utf-8')
+ return slug
+
+
+def decode(slug):
+ """
+ Returns the uuid.UUID object represented by the given v4 or "nice" slug
+ """
+ if sys.version_info.major != 2 and isinstance(slug, bytes):
+ slug = slug.decode('ascii')
+ slug = slug + '==' # base64 padding
+ return uuid.UUID(bytes=base64.urlsafe_b64decode(slug))
+
+
+def v4():
+ """
+ Returns a randomly generated uuid v4 compliant slug
+ """
+ return _convert_bytes_to_slug(uuid.uuid4().bytes)
+
+
+def nice():
+ """
+ Returns a randomly generated uuid v4 compliant slug which conforms to a set
+ of "nice" properties, at the cost of some entropy. Currently this means one
+ extra fixed bit (the first bit of the uuid is set to 0) which guarantees the
+ slug will begin with [A-Za-f]. For example such slugs don't require special
+ handling when used as command line parameters (whereas non-nice slugs may
+ start with `-` which can confuse command line tools).
+
+ Potentially other "nice" properties may be added in future to further
+ restrict the range of potential uuids that may be generated.
+ """
+ rawBytes = bytearray(uuid.uuid4().bytes)
+ rawBytes[0] = rawBytes[0] & 0x7f # Ensure slug starts with [A-Za-f]
+ return _convert_bytes_to_slug(rawBytes)
diff --git a/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/LICENSE b/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/LICENSE
new file mode 100644
index 0000000000..a612ad9813
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/METADATA b/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/METADATA
new file mode 100644
index 0000000000..5cd6e69c5d
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/METADATA
@@ -0,0 +1,595 @@
+Metadata-Version: 2.1
+Name: taskcluster
+Version: 44.2.2
+Summary: Python client for Taskcluster
+Home-page: https://github.com/taskcluster/taskcluster
+Author: Mozilla Taskcluster and Release Engineering
+Author-email: release+python@mozilla.com
+License: UNKNOWN
+Platform: UNKNOWN
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Description-Content-Type: text/markdown
+License-File: LICENSE
+Requires-Dist: requests (>=2.4.3)
+Requires-Dist: mohawk (>=0.3.4)
+Requires-Dist: slugid (>=2)
+Requires-Dist: taskcluster-urls (>=12.1.0)
+Requires-Dist: six (>=1.10.0)
+Requires-Dist: aiohttp (>=3.7.4) ; python_version >= "3.6"
+Requires-Dist: async-timeout (>=2.0.0) ; python_version >= "3.6"
+Provides-Extra: test
+Requires-Dist: pytest ; extra == 'test'
+Requires-Dist: pytest-cov ; extra == 'test'
+Requires-Dist: pytest-mock ; extra == 'test'
+Requires-Dist: httmock ; extra == 'test'
+Requires-Dist: mock ; extra == 'test'
+Requires-Dist: setuptools-lint ; extra == 'test'
+Requires-Dist: flake8 ; extra == 'test'
+Requires-Dist: psutil ; extra == 'test'
+Requires-Dist: hypothesis ; extra == 'test'
+Requires-Dist: tox ; extra == 'test'
+Requires-Dist: coverage ; extra == 'test'
+Requires-Dist: python-dateutil ; extra == 'test'
+Requires-Dist: subprocess32 ; (python_version == "2.7") and extra == 'test'
+Requires-Dist: pytest-asyncio ; (python_version >= "3.6") and extra == 'test'
+Requires-Dist: aiofiles ; (python_version >= "3.6") and extra == 'test'
+Requires-Dist: httptest ; (python_version >= "3.6") and extra == 'test'
+
+# Taskcluster Client for Python
+
+[![Download](https://img.shields.io/badge/pypi-taskcluster-brightgreen)](https://pypi.python.org/pypi/taskcluster)
+[![License](https://img.shields.io/badge/license-MPL%202.0-orange.svg)](http://mozilla.org/MPL/2.0)
+
+**A Taskcluster client library for Python.**
+
+This library is a complete interface to Taskcluster in Python. It provides
+both synchronous and asynchronous interfaces for all Taskcluster API methods,
+in both Python-2 and Python-3 variants.
+
+## Usage
+
+For a general guide to using Taskcluster clients, see [Calling Taskcluster APIs](https://docs.taskcluster.net/docs/manual/using/api).
+
+### Setup
+
+Before calling an API end-point, you'll need to create a client instance.
+There is a class for each service, e.g., `Queue` and `Auth`. Each takes the
+same options, described below. Note that only `rootUrl` is
+required, and it's unusual to configure any other options aside from
+`credentials`.
+
+For each service, there are sync and async variants. The classes under
+`taskcluster` (e.g., `taskcluster.Queue`) are Python-2 compatible and operate
+synchronously. The classes under `taskcluster.aio` (e.g.,
+`taskcluster.aio.Queue`) require Python >= 3.6.
+
+#### Authentication Options
+
+Here is a simple set-up of an Index client:
+
+```python
+import taskcluster
+index = taskcluster.Index({
+ 'rootUrl': 'https://tc.example.com',
+ 'credentials': {'clientId': 'id', 'accessToken': 'accessToken'},
+})
+```
+
+The `rootUrl` option is required as it gives the Taskcluster deployment to
+which API requests should be sent. Credentials are only required if the
+request is to be authenticated -- many Taskcluster API methods do not require
+authentication.
+
+In most cases, the root URL and Taskcluster credentials should be provided in [standard environment variables](https://docs.taskcluster.net/docs/manual/design/env-vars). Use `taskcluster.optionsFromEnvironment()` to read these variables automatically:
+
+```python
+auth = taskcluster.Auth(taskcluster.optionsFromEnvironment())
+```
+
+Note that this function does not respect `TASKCLUSTER_PROXY_URL`. To use the Taskcluster Proxy from within a task:
+
+```python
+auth = taskcluster.Auth({'rootUrl': os.environ['TASKCLUSTER_PROXY_URL']})
+```
+
+#### Authorized Scopes
+
+If you wish to perform requests on behalf of a third-party that has small set
+of scopes than you do. You can specify [which scopes your request should be
+allowed to
+use](https://docs.taskcluster.net/docs/manual/design/apis/hawk/authorized-scopes),
+in the `authorizedScopes` option.
+
+```python
+opts = taskcluster.optionsFromEnvironment()
+opts['authorizedScopes'] = ['queue:create-task:highest:my-provisioner/my-worker-type']
+queue = taskcluster.Queue(opts)
+```
+
+#### Other Options
+
+The following additional options are accepted when constructing a client object:
+
+* `signedUrlExpiration` - default value for the `expiration` argument to `buildSignedUrl`
+* `maxRetries` - maximum number of times to retry a failed request
+
+### Calling API Methods
+
+API methods are available as methods on the corresponding client object. For
+sync clients, these are sync methods, and for async clients they are async
+methods; the calling convention is the same in either case.
+
+There are four calling conventions for methods:
+
+```python
+client.method(v1, v1, payload)
+client.method(payload, k1=v1, k2=v2)
+client.method(payload=payload, query=query, params={k1: v1, k2: v2})
+client.method(v1, v2, payload=payload, query=query)
+```
+
+Here, `v1` and `v2` are URL parameters (named `k1` and `k2`), `payload` is the
+request payload, and `query` is a dictionary of query arguments.
+
+For example, in order to call an API method with query-string arguments:
+
+```python
+await queue.listTaskGroup('JzTGxwxhQ76_Tt1dxkaG5g',
+ query={'continuationToken': previousResponse.get('continuationToken')})
+```
+
+
+### Generating URLs
+
+It is often necessary to generate the URL for an API method without actually calling the method.
+To do so, use `buildUrl` or, for an API method that requires authentication, `buildSignedUrl`.
+
+```python
+import taskcluster
+
+index = taskcluster.Index(taskcluster.optionsFromEnvironment())
+print(index.buildUrl('findTask', 'builds.v1.latest'))
+secrets = taskcluster.Secrets(taskcluster.optionsFromEnvironment())
+print(secret.buildSignedUrl('get', 'my-secret'))
+```
+
+Note that signed URLs are time-limited; the expiration can be set with the `signedUrlExpiration` option to the client constructor, or with the `expiration` keyword arguement to `buildSignedUrl`, both given in seconds.
+
+### Generating Temporary Credentials
+
+If you have non-temporary taskcluster credentials you can generate a set of
+[temporary credentials](https://docs.taskcluster.net/docs/manual/design/apis/hawk/temporary-credentials) as follows. Notice that the credentials cannot last more
+than 31 days, and you can only revoke them by revoking the credentials that was
+used to issue them (this takes up to one hour).
+
+It is not the responsibility of the caller to apply any clock drift adjustment
+to the start or expiry time - this is handled by the auth service directly.
+
+```python
+import datetime
+
+start = datetime.datetime.now()
+expiry = start + datetime.timedelta(0,60)
+scopes = ['ScopeA', 'ScopeB']
+name = 'foo'
+
+credentials = taskcluster.createTemporaryCredentials(
+ # issuing clientId
+ clientId,
+ # issuing accessToken
+ accessToken,
+ # Validity of temporary credentials starts here, in timestamp
+ start,
+ # Expiration of temporary credentials, in timestamp
+ expiry,
+ # Scopes to grant the temporary credentials
+ scopes,
+ # credential name (optional)
+ name
+)
+```
+
+You cannot use temporary credentials to issue new temporary credentials. You
+must have `auth:create-client:<name>` to create a named temporary credential,
+but unnamed temporary credentials can be created regardless of your scopes.
+
+### Handling Timestamps
+Many taskcluster APIs require ISO 8601 time stamps offset into the future
+as way of providing expiration, deadlines, etc. These can be easily created
+using `datetime.datetime.isoformat()`, however, it can be rather error prone
+and tedious to offset `datetime.datetime` objects into the future. Therefore
+this library comes with two utility functions for this purposes.
+
+```python
+dateObject = taskcluster.fromNow("2 days 3 hours 1 minute")
+ # -> datetime.datetime(2017, 1, 21, 17, 8, 1, 607929)
+dateString = taskcluster.fromNowJSON("2 days 3 hours 1 minute")
+ # -> '2017-01-21T17:09:23.240178Z'
+```
+
+By default it will offset the date time into the future, if the offset strings
+are prefixed minus (`-`) the date object will be offset into the past. This is
+useful in some corner cases.
+
+```python
+dateObject = taskcluster.fromNow("- 1 year 2 months 3 weeks 5 seconds");
+ # -> datetime.datetime(2015, 10, 30, 18, 16, 50, 931161)
+```
+
+The offset string is ignorant of whitespace and case insensitive. It may also
+optionally be prefixed plus `+` (if not prefixed minus), any `+` prefix will be
+ignored. However, entries in the offset string must be given in order from
+high to low, ie. `2 years 1 day`. Additionally, various shorthands may be
+employed, as illustrated below.
+
+```
+ years, year, yr, y
+ months, month, mo
+ weeks, week, w
+ days, day, d
+ hours, hour, h
+ minutes, minute, min
+ seconds, second, sec, s
+```
+
+The `fromNow` method may also be given a date to be relative to as a second
+argument. This is useful if offset the task expiration relative to the the task
+deadline or doing something similar. This argument can also be passed as the
+kwarg `dateObj`
+
+```python
+dateObject1 = taskcluster.fromNow("2 days 3 hours");
+dateObject2 = taskcluster.fromNow("1 year", dateObject1);
+taskcluster.fromNow("1 year", dateObj=dateObject1);
+ # -> datetime.datetime(2018, 1, 21, 17, 59, 0, 328934)
+```
+### Generating SlugIDs
+
+To generate slugIds (Taskcluster's client-generated unique IDs), use
+`taskcluster.slugId()`, which will return a unique slugId on each call.
+
+In some cases it is useful to be able to create a mapping from names to
+slugIds, with the ability to generate the same slugId multiple times.
+The `taskcluster.stableSlugId()` function returns a callable that does
+just this.
+
+```python
+gen = taskcluster.stableSlugId()
+sometask = gen('sometask')
+assert gen('sometask') == sometask # same input generates same output
+assert gen('sometask') != gen('othertask')
+
+gen2 = taskcluster.stableSlugId()
+sometask2 = gen('sometask')
+assert sometask2 != sometask # but different slugId generators produce
+ # different output
+```
+
+### Scope Analysis
+
+The `scopeMatch(assumedScopes, requiredScopeSets)` function determines
+whether one or more of a set of required scopes are satisfied by the assumed
+scopes, taking *-expansion into account. This is useful for making local
+decisions on scope satisfaction, but note that `assumed_scopes` must be the
+*expanded* scopes, as this function cannot perform expansion.
+
+It takes a list of a assumed scopes, and a list of required scope sets on
+disjunctive normal form, and checks if any of the required scope sets are
+satisfied.
+
+Example:
+
+```python
+requiredScopeSets = [
+ ["scopeA", "scopeB"],
+ ["scopeC:*"]
+]
+assert scopesMatch(['scopeA', 'scopeB'], requiredScopeSets)
+assert scopesMatch(['scopeC:xyz'], requiredScopeSets)
+assert not scopesMatch(['scopeA'], requiredScopeSets)
+assert not scopesMatch(['scopeC'], requiredScopeSets)
+```
+
+### Pagination
+
+Many Taskcluster API methods are paginated. There are two ways to handle
+pagination easily with the python client. The first is to implement pagination
+in your code:
+
+```python
+import taskcluster
+queue = taskcluster.Queue({'rootUrl': 'https://tc.example.com'})
+i = 0
+tasks = 0
+outcome = queue.listTaskGroup('JzTGxwxhQ76_Tt1dxkaG5g')
+while outcome.get('continuationToken'):
+ print('Response %d gave us %d more tasks' % (i, len(outcome['tasks'])))
+ if outcome.get('continuationToken'):
+ outcome = queue.listTaskGroup('JzTGxwxhQ76_Tt1dxkaG5g', query={'continuationToken': outcome.get('continuationToken')})
+ i += 1
+ tasks += len(outcome.get('tasks', []))
+print('Task Group %s has %d tasks' % (outcome['taskGroupId'], tasks))
+```
+
+There's also an experimental feature to support built in automatic pagination
+in the sync client. This feature allows passing a callback as the
+'paginationHandler' keyword-argument. This function will be passed the
+response body of the API method as its sole positional arugment.
+
+This example of the built in pagination shows how a list of tasks could be
+built and then counted:
+
+```python
+import taskcluster
+queue = taskcluster.Queue({'rootUrl': 'https://tc.example.com'})
+
+responses = []
+
+def handle_page(y):
+ print("%d tasks fetched" % len(y.get('tasks', [])))
+ responses.append(y)
+
+queue.listTaskGroup('JzTGxwxhQ76_Tt1dxkaG5g', paginationHandler=handle_page)
+
+tasks = 0
+for response in responses:
+ tasks += len(response.get('tasks', []))
+
+print("%d requests fetch %d tasks" % (len(responses), tasks))
+```
+
+### Pulse Events
+
+This library can generate exchange patterns for Pulse messages based on the
+Exchanges definitions provded by each service. This is done by instantiating a
+`<service>Events` class and calling a method with the name of the vent.
+Options for the topic exchange methods can be in the form of either a single
+dictionary argument or keyword arguments. Only one form is allowed.
+
+```python
+from taskcluster import client
+qEvt = client.QueueEvents({rootUrl: 'https://tc.example.com'})
+# The following calls are equivalent
+print(qEvt.taskCompleted({'taskId': 'atask'}))
+print(qEvt.taskCompleted(taskId='atask'))
+```
+
+Note that the client library does *not* provide support for interfacing with a Pulse server.
+
+### Logging
+
+Logging is set up in `taskcluster/__init__.py`. If the special
+`DEBUG_TASKCLUSTER_CLIENT` environment variable is set, the `__init__.py`
+module will set the `logging` module's level for its logger to `logging.DEBUG`
+and if there are no existing handlers, add a `logging.StreamHandler()`
+instance. This is meant to assist those who do not wish to bother figuring out
+how to configure the python logging module but do want debug messages
+
+## Uploading and Downloading Objects
+
+The Object service provides an API for reliable uploads and downloads of large objects.
+This library provides convenience methods to implement the client portion of those APIs, providing well-tested, resilient upload and download functionality.
+These methods will negotiate the appropriate method with the object service and perform the required steps to transfer the data.
+
+All methods are available in both sync and async versions, with identical APIs except for the `async`/`await` keywords.
+These methods are not available for Python-2.7.
+
+In either case, you will need to provide a configured `Object` instance with appropriate credentials for the operation.
+
+NOTE: There is an helper function to upload `s3` artifacts, `taskcluster.helper.upload_artifact`, but it is deprecated as it only supports the `s3` artifact type.
+
+### Uploads
+
+To upload, use any of the following:
+
+* `await taskcluster.aio.upload.uploadFromBuf(projectId=.., name=.., contentType=.., contentLength=.., uploadId=.., expires=.., maxRetries=.., objectService=.., data=..)` - asynchronously upload data from a buffer full of bytes.
+* `await taskcluster.aio.upload.uploadFromFile(projectId=.., name=.., contentType=.., contentLength=.., uploadId=.., expires=.., maxRetries=.., objectService=.., file=..)` - asynchronously upload data from a standard Python file.
+ Note that this is [probably what you want](https://github.com/python/asyncio/wiki/ThirdParty#filesystem), even in an async context.
+* `await taskcluster.aio.upload(projectId=.., name=.., contentType=.., contentLength=.., expires=.., uploadId=.., maxRetries=.., objectService=.., readerFactory=..)` - asynchronously upload data from an async reader factory.
+* `taskcluster.upload.uploadFromBuf(projectId=.., name=.., contentType=.., contentLength=.., expires=.., uploadId=.., maxRetries=.., objectService=.., data=..)` - upload data from a buffer full of bytes.
+* `taskcluster.upload.uploadFromFile(projectId=.., name=.., contentType=.., contentLength=.., expires=.., uploadId=.., maxRetries=.., objectService=.., file=..)` - upload data from a standard Python file.
+* `taskcluster.upload(projectId=.., name=.., contentType=.., contentLength=.., expires=.., uploadId=.., maxRetries=.., objectService=.., readerFactory=..)` - upload data from a sync reader factory.
+
+A "reader" is an object with a `read(max_size=-1)` method which reads and returns a chunk of 1 .. `max_size` bytes, or returns an empty string at EOF, async for the async functions and sync for the remainder.
+A "reader factory" is an async callable which returns a fresh reader, ready to read the first byte of the object.
+When uploads are retried, the reader factory may be called more than once.
+
+The `uploadId` parameter may be omitted, in which case a new slugId will be generated.
+
+### Downloads
+
+To download, use any of the following:
+
+* `await taskcluster.aio.download.downloadToBuf(name=.., maxRetries=.., objectService=..)` - asynchronously download an object to an in-memory buffer, returning a tuple (buffer, content-type).
+ If the file is larger than available memory, this will crash.
+* `await taskcluster.aio.download.downloadToFile(name=.., maxRetries=.., objectService=.., file=..)` - asynchronously download an object to a standard Python file, returning the content type.
+* `await taskcluster.aio.download.download(name=.., maxRetries=.., objectService=.., writerFactory=..)` - asynchronously download an object to an async writer factory, returning the content type.
+* `taskcluster.download.downloadToBuf(name=.., maxRetries=.., objectService=..)` - download an object to an in-memory buffer, returning a tuple (buffer, content-type).
+ If the file is larger than available memory, this will crash.
+* `taskcluster.download.downloadToFile(name=.., maxRetries=.., objectService=.., file=..)` - download an object to a standard Python file, returning the content type.
+* `taskcluster.download.download(name=.., maxRetries=.., objectService=.., writerFactory=..)` - download an object to a sync writer factory, returning the content type.
+
+A "writer" is an object with a `write(data)` method which writes the given data, async for the async functions and sync for the remainder.
+A "writer factory" is a callable (again either async or sync) which returns a fresh writer, ready to write the first byte of the object.
+When uploads are retried, the writer factory may be called more than once.
+
+### Artifact Downloads
+
+Artifacts can be downloaded from the queue service with similar functions to those above.
+These functions support all of the queue's storage types, raising an error for `error` artifacts.
+In each case, if `runId` is omitted then the most recent run will be used.
+
+* `await taskcluster.aio.download.downloadArtifactToBuf(taskId=.., runId=.., name=.., maxRetries=.., queueService=..)` - asynchronously download an object to an in-memory buffer, returning a tuple (buffer, content-type).
+ If the file is larger than available memory, this will crash.
+* `await taskcluster.aio.download.downloadArtifactToFile(taskId=.., runId=.., name=.., maxRetries=.., queueService=.., file=..)` - asynchronously download an object to a standard Python file, returning the content type.
+* `await taskcluster.aio.download.downloadArtifact(taskId=.., runId=.., name=.., maxRetries=.., queueService=.., writerFactory=..)` - asynchronously download an object to an async writer factory, returning the content type.
+* `taskcluster.download.downloadArtifactToBuf(taskId=.., runId=.., name=.., maxRetries=.., queueService=..)` - download an object to an in-memory buffer, returning a tuple (buffer, content-type).
+ If the file is larger than available memory, this will crash.
+* `taskcluster.download.downloadArtifactToFile(taskId=.., runId=.., name=.., maxRetries=.., queueService=.., file=..)` - download an object to a standard Python file, returning the content type.
+* `taskcluster.download.downloadArtifact(taskId=.., runId=.., name=.., maxRetries=.., queueService=.., writerFactory=..)` - download an object to a sync writer factory, returning the content type.
+
+## Integration Helpers
+
+The Python Taskcluster client has a module `taskcluster.helper` with utilities which allows you to easily share authentication options across multiple services in your project.
+
+Generally a project using this library will face different use cases and authentication options:
+
+* No authentication for a new contributor without Taskcluster access,
+* Specific client credentials through environment variables on a developer's computer,
+* Taskcluster Proxy when running inside a task.
+
+### Shared authentication
+
+The class `taskcluster.helper.TaskclusterConfig` is made to be instantiated once in your project, usually in a top level module. That singleton is then accessed by different parts of your projects, whenever a Taskcluster service is needed.
+
+Here is a sample usage:
+
+1. in `project/__init__.py`, no call to Taskcluster is made at that point:
+
+```python
+from taskcluster.helper import Taskcluster config
+
+tc = TaskclusterConfig('https://community-tc.services.mozilla.com')
+```
+
+2. in `project/boot.py`, we authenticate on Taskcuster with provided credentials, or environment variables, or taskcluster proxy (in that order):
+
+```python
+from project import tc
+
+tc.auth(client_id='XXX', access_token='YYY')
+```
+
+3. at that point, you can load any service using the authenticated wrapper from anywhere in your code:
+
+```python
+from project import tc
+
+def sync_usage():
+ queue = tc.get_service('queue')
+ queue.ping()
+
+async def async_usage():
+ hooks = tc.get_service('hooks', use_async=True) # Asynchronous service class
+ await hooks.ping()
+```
+
+Supported environment variables are:
+- `TASKCLUSTER_ROOT_URL` to specify your Taskcluster instance base url. You can either use that variable or instanciate `TaskclusterConfig` with the base url.
+- `TASKCLUSTER_CLIENT_ID` & `TASKCLUSTER_ACCESS_TOKEN` to specify your client credentials instead of providing them to `TaskclusterConfig.auth`
+- `TASKCLUSTER_PROXY_URL` to specify the proxy address used to reach Taskcluster in a task. It defaults to `http://taskcluster` when not specified.
+
+For more details on Taskcluster environment variables, [here is the documentation](https://docs.taskcluster.net/docs/manual/design/env-vars).
+
+### Loading secrets across multiple authentications
+
+Another available utility is `taskcluster.helper.load_secrets` which allows you to retrieve a secret using an authenticated `taskcluster.Secrets` instance (using `TaskclusterConfig.get_service` or the synchronous class directly).
+
+This utility loads a secret, but allows you to:
+1. share a secret across multiple projects, by using key prefixes inside the secret,
+2. check that some required keys are present in the secret,
+3. provide some default values,
+4. provide a local secret source instead of using the Taskcluster service (useful for local development or sharing _secrets_ with contributors)
+
+Let's say you have a secret on a Taskcluster instance named `project/foo/prod-config`, which is needed by a backend and some tasks. Here is its content:
+
+```yaml
+common:
+ environment: production
+ remote_log: https://log.xx.com/payload
+
+backend:
+ bugzilla_token: XXXX
+
+task:
+ backend_url: https://backend.foo.mozilla.com
+```
+
+In your backend, you would do:
+
+```python
+from taskcluster import Secrets
+from taskcluster.helper import load_secrets
+
+prod_config = load_secrets(
+ Secrets({...}),
+ 'project/foo/prod-config',
+
+ # We only need the common & backend parts
+ prefixes=['common', 'backend'],
+
+ # We absolutely need a bugzilla token to run
+ required=['bugzilla_token'],
+
+ # Let's provide some default value for the environment
+ existing={
+ 'environment': 'dev',
+ }
+)
+ # -> prod_config == {
+ # "environment": "production"
+ # "remote_log": "https://log.xx.com/payload",
+ # "bugzilla_token": "XXXX",
+ # }
+```
+
+In your task, you could do the following using `TaskclusterConfig` mentionned above (the class has a shortcut to use an authenticated `Secrets` service automatically):
+
+```python
+from project import tc
+
+prod_config = tc.load_secrets(
+ 'project/foo/prod-config',
+
+ # We only need the common & bot parts
+ prefixes=['common', 'bot'],
+
+ # Let's provide some default value for the environment and backend_url
+ existing={
+ 'environment': 'dev',
+ 'backend_url': 'http://localhost:8000',
+ }
+)
+ # -> prod_config == {
+ # "environment": "production"
+ # "remote_log": "https://log.xx.com/payload",
+ # "backend_url": "https://backend.foo.mozilla.com",
+ # }
+```
+
+To provide local secrets value, you first need to load these values as a dictionary (usually by reading a local file in your format of choice : YAML, JSON, ...) and providing the dictionary to `load_secrets` by using the `local_secrets` parameter:
+
+```python
+import os
+import yaml
+
+from taskcluster import Secrets
+from taskcluster.helper import load_secrets
+
+local_path = 'path/to/file.yml'
+
+prod_config = load_secrets(
+ Secrets({...}),
+ 'project/foo/prod-config',
+
+ # We support an optional local file to provide some configuration without reaching Taskcluster
+ local_secrets=yaml.safe_load(open(local_path)) if os.path.exists(local_path) else None,
+)
+```
+
+## Compatibility
+
+This library is co-versioned with Taskcluster itself.
+That is, a client with version x.y.z contains API methods corresponding to Taskcluster version x.y.z.
+Taskcluster is careful to maintain API compatibility, and guarantees it within a major version.
+That means that any client with version x.* will work against any Taskcluster services at version x.*, and is very likely to work for many other major versions of the Taskcluster services.
+Any incompatibilities are noted in the [Changelog](https://github.com/taskcluster/taskcluster/blob/main/CHANGELOG.md).
+
+
+
+
+
+
diff --git a/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/RECORD b/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/RECORD
new file mode 100644
index 0000000000..126a485225
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/RECORD
@@ -0,0 +1,92 @@
+taskcluster/__init__.py,sha256=Y9afafyyNPMdTxXxk76Q1Yp6PwNnLKe85arUHbWyS4c,622
+taskcluster/auth.py,sha256=AME2kJmq42R7lQnxKioxrcbPHx-jM_HaNNbSHtPvfOQ,78
+taskcluster/authevents.py,sha256=cRnrErBR2FdevR4fapWFZejFKG8ZudqdxsI4fN-dcC0,84
+taskcluster/awsprovisioner.py,sha256=5-orMBEkSvOS-wjJOQEunkteyGhMiOsu1Ho-WsgSr14,88
+taskcluster/client.py,sha256=yrGFwp3hcxK2vLVifH97jD_K4ZAROdwHFN3aM3ptK7g,27026
+taskcluster/download.py,sha256=JNVLxj_MI0WjRHgz8Vx2ocEOWnszTF6vVgos-kpe7Fk,3797
+taskcluster/ec2manager.py,sha256=D7abzT8gwqNP-p3YsGLRwOf3HH2yVvwvQUIIIsS6YIc,84
+taskcluster/exceptions.py,sha256=Gf_YjoDj_fR9NUEBcfH142sAtIMBMDyoOu8Dv3a_yzU,1322
+taskcluster/github.py,sha256=77_G0H9vKy6-gqKw5vzU_Ny9l4iTQZYvPgVNUtMzc9M,80
+taskcluster/githubevents.py,sha256=fZPDLXcruH1-1V7EVAFxXNh5zabp3ifOvYtqH3Ql6Uo,86
+taskcluster/helper.py,sha256=F5rz9lbBictmZTBKW8iamf6UhFyYYgsqcZH1Ti2jj3M,6493
+taskcluster/hooks.py,sha256=EQAX26VkR-7vUE-8tWJGA20UzIRpNQFeu3B1X0IR4jw,79
+taskcluster/hooksevents.py,sha256=p4Vwj7cWE9dr7x7o2gx1vAC1p5rHyMlKi-65Yo_kgD4,85
+taskcluster/index.py,sha256=lia2B9-pISrK_r_wDAdxv9g2xdswfk7lgD3WkjefBAw,79
+taskcluster/login.py,sha256=8SxrTSYMkPTx0tMmj5ReHTe7qaHQlp-PIUm3HKUFR9o,79
+taskcluster/notify.py,sha256=Ug9IdJbfk1jDIPdN6AN2VCdEncFkNG6QuN19tJ3_k2o,80
+taskcluster/notifyevents.py,sha256=LcunL5OXFp1GrplOS4ibKXeoBPO5DUrFimUAR1TK43U,86
+taskcluster/purgecache.py,sha256=SIu9Q6Q4WtQneCv6VjWddUH8hXF8rDeWafEPuSZTXsM,84
+taskcluster/queue.py,sha256=Yq46lvAQ3jvdI_GujIvyrmzG8MiSqiEU0v4wEh0vwgI,79
+taskcluster/queueevents.py,sha256=AP1lMvDXeobiSY0zqTxAAKKWaFCiuMbxmEFM1Muw6-0,85
+taskcluster/retry.py,sha256=ppxRmAMTxyVvPD-0RQe2doih6tde4eD3FTa9mEOPRBg,1114
+taskcluster/secrets.py,sha256=k1ngR8DGCONOlAsLEz8VsnhuhdsoSHf2ycOYpSVUesE,81
+taskcluster/upload.py,sha256=S5TIV0F84b043Oic7nRTSC73a0FCb63hM4wrn7QVtww,2244
+taskcluster/utils.py,sha256=xRMFVykiWkCmzNE7V9v-cm5JMFVqSzLpaJsw37vVvME,11036
+taskcluster/workermanager.py,sha256=TKgO5XlL_awYPvK41O2cbHCE-WTApzR34uiJz8DfJlo,87
+taskcluster/workermanagerevents.py,sha256=AhgX5zop6vaczuMLh8XrF9GdIXblbnd7IMxvXJTqHRo,93
+taskcluster/aio/__init__.py,sha256=K9gSKiS7jUnTe_tO4nfFpVBah_TeecoEbZ-4L75_RVw,483
+taskcluster/aio/asyncclient.py,sha256=tAK-oiOMLaCRuTzH1C1_JIfBuOS6sGAvL7ygNWD5huM,10960
+taskcluster/aio/asyncutils.py,sha256=Ryf3MMSQzjApg6egeE6lcC6OfOgIqFZhBBwTg17xooM,5138
+taskcluster/aio/auth.py,sha256=sd5FVqwaRzJGQlAEImSsnPUZigVnHZkPWBFm1dxYLaY,83
+taskcluster/aio/authevents.py,sha256=57nValWTawxx-JgYLuxIY0kAoBZK0LgCCu2o9FfYgfs,89
+taskcluster/aio/awsprovisioner.py,sha256=gEK7O2Ptqm8PTnX2lghVdsUabH5zH8I4vAMevEpft3I,93
+taskcluster/aio/download.py,sha256=P87uBADRsmk5WD_G6Ad-GtmunAXn0djtJqJqKXbT1Zs,6966
+taskcluster/aio/ec2manager.py,sha256=k6EX4v-YtxTfKXfrW40z7Iuvnl6qdeYTkHXx5XJPG3o,89
+taskcluster/aio/github.py,sha256=iW2oYpf1AdMeWX-LP_bhLlX1swUglH_Z2V9kLz9y57M,85
+taskcluster/aio/githubevents.py,sha256=CAbBsqRAPvihfvCp2-juTQE0TybFEDtJcHYvXtsJ5mk,91
+taskcluster/aio/hooks.py,sha256=s4G5XHe_cnjqFyeSFAX6hXNPuaHISO7MIlwiKfG0kI4,84
+taskcluster/aio/hooksevents.py,sha256=4IULmioTI0cZhaTG5Pft80RJ9iv0ROdppS7XV0G5aWQ,90
+taskcluster/aio/index.py,sha256=hROSSbdy7B1_fSV2kGfz2_364xQPYLWVu81LxHz93bk,84
+taskcluster/aio/login.py,sha256=ZDaf8OT43EtHq2ub6w9oMY9bKDDZsIlBXyYbziuW8w4,84
+taskcluster/aio/notify.py,sha256=DNwTTRaIrqcYXte45QgxJJSWheHBN2pSIFIEjSpREUQ,85
+taskcluster/aio/notifyevents.py,sha256=tRQ5VfMIiUkkK0PcAHPybpXEZg_QSHypjZp7Y3ewA_I,91
+taskcluster/aio/purgecache.py,sha256=fwzKCePo1ZZ1SGYV7idms9-9tVog3mDY1Jp-WpXY46k,89
+taskcluster/aio/queue.py,sha256=sberomzhztT-2Fg--x1shyHnLjPvpDDIjpL6TlJzrJ0,84
+taskcluster/aio/queueevents.py,sha256=lv9B9fyRQaeicNCilsLKDPoLG3sTP2eeBBpLiPIwZgM,90
+taskcluster/aio/reader_writer.py,sha256=WDYwNeb-lyDDTh9Avq2pwBTX5C-zce9Yil8Pd3rEwEA,2236
+taskcluster/aio/retry.py,sha256=gln9WP1yJWzz5Scgt3FxwAH4I3ikOnRqiT9NRIKIqMI,1144
+taskcluster/aio/secrets.py,sha256=oZOlT1akPX_vsi1LmES7RHJqe_GxfmwCMgXwK8b_Kek,86
+taskcluster/aio/upload.py,sha256=ewvSnz2tzmVsiR7u0DJD-jlwVvfNNwVrd3V-unQIqvE,6006
+taskcluster/aio/workermanager.py,sha256=u9tF-rq3XT_HTT8xGQkAfjIAl8Zz3sc4PbCucJyPyy8,92
+taskcluster/aio/workermanagerevents.py,sha256=FTdOv2qGprRGgFefLFaTQJH-B0ZwRRaahOfYQZYAxq8,98
+taskcluster/generated/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+taskcluster/generated/_client_importer.py,sha256=PqxVoOGtpDYMgWj3I3SuwsvkmVMKMflbyN14aGay7fs,829
+taskcluster/generated/auth.py,sha256=_OSEEE-OikD-6lRszwfZwwYcdB82VMd1VimbZl2GT8w,27153
+taskcluster/generated/authevents.py,sha256=8utWiTCrQ7p-Dz0uYYXj2JXzKkOsWKQ78FQNJUyVbYE,5670
+taskcluster/generated/github.py,sha256=CWxs_8yzH7ybuIbR2r81ITyUZDgCuNHHM9Itf-aCs6E,5989
+taskcluster/generated/githubevents.py,sha256=NpiVaIlZKye5B-7VshxDcTJqdh8U4m3hqvYmheE1fDA,8162
+taskcluster/generated/hooks.py,sha256=PvsI6GBcXVV8_9OLWS576vEW_-qld52vzf_0I5RKq_4,9395
+taskcluster/generated/hooksevents.py,sha256=byRSNpQJmgkYjd8K14AgvoRei84gsYgI0S-LcgWp5y8,3933
+taskcluster/generated/index.py,sha256=Be_Fd93_-trPQpTeo05FkNGZl-CdMIODsXBs6DCHzu4,7022
+taskcluster/generated/notify.py,sha256=PQITG_sLvXmfx2NEiKUA7g7CDHGWMU7_yONj-1HzLi8,6395
+taskcluster/generated/notifyevents.py,sha256=7XZNazqU1acHhn6Krbvl1tGaS7xVDQywCdjD7s5LvVs,2201
+taskcluster/generated/object.py,sha256=k9dmS7vImQWBFs0RR1WQlaAz0-ATubvdiYcJf0EcBQo,6751
+taskcluster/generated/purgecache.py,sha256=EM4t3l6NKZrozBFQum3D4xXBAXSiN04aTy4pTAuWV5o,3761
+taskcluster/generated/queue.py,sha256=Tz-G3ZC5ONUhZ5Uin5s9FbPulhgJsF8igPVHxNYwz8A,44340
+taskcluster/generated/queueevents.py,sha256=fBXHthI0GuigfZPOI7Q3IJXIXA6vYuCA99r3YD4rH1U,27135
+taskcluster/generated/secrets.py,sha256=opcNJIDcy_as-Hzmik1YlGH7H2tj9bMKdFNESg6VIQw,4385
+taskcluster/generated/workermanager.py,sha256=QEALsnQrwljiE8_ly9LTmZ6GdmIfAmpKJkWDJibely8,14025
+taskcluster/generated/workermanagerevents.py,sha256=pqbn6QUJ-rkcb41m32myzg7lW4tlxrJ1zgmS5ZdnKwI,3396
+taskcluster/generated/aio/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+taskcluster/generated/aio/_client_importer.py,sha256=PqxVoOGtpDYMgWj3I3SuwsvkmVMKMflbyN14aGay7fs,829
+taskcluster/generated/aio/auth.py,sha256=E8DhNSLLXtgk3zbr-bfb7vX0r68woh9Z7BZo2zvNpxw,27573
+taskcluster/generated/aio/authevents.py,sha256=Xvhh2riCSXKUXu6lf-toLAnhDqiBwDg1PePolNvmdsw,5730
+taskcluster/generated/aio/github.py,sha256=mULWUV2qpn7177qCKqtKJSIhZ07cxb5FvkDkkEx8Uxc,6145
+taskcluster/generated/aio/githubevents.py,sha256=q0pOuUVSGQ64Rlw_FKkcwvXbsEcnH2r1AzIGFfWte6o,8222
+taskcluster/generated/aio/hooks.py,sha256=NgSC63oQ8J_krW4AFeUMcUSery2pUhmWCwLyurrC__A,9611
+taskcluster/generated/aio/hooksevents.py,sha256=i5fXbMPZDR7qXYMIkzFG0e_cGzHBs6Lpr8kAYo6Pe1g,3993
+taskcluster/generated/aio/index.py,sha256=HuYLtMag0SvYG5W1VY7ro5qDvfg1FOOP9RxalDZz3qk,7166
+taskcluster/generated/aio/notify.py,sha256=33iUtwzsXjdY3DS2-HdpHHB1Ve6rlKPgljf4c1e9iSI,6551
+taskcluster/generated/aio/notifyevents.py,sha256=XfBhEA_295uIzB1hzgscyTNw0JZURMetD80Wd6Q0l1U,2261
+taskcluster/generated/aio/object.py,sha256=u5Ws6jVulIMSk9U7cpxehNTPC7m0ilNod8NkshKDrGM,6883
+taskcluster/generated/aio/purgecache.py,sha256=3wgT_W0C_FOU5mJwZCBiXV-cAxbpxmvkeo4fIQfPyV8,3869
+taskcluster/generated/aio/queue.py,sha256=FslTOocclr5yO--Iti36Fif9elg2ac5Urb77-OoegQs,44856
+taskcluster/generated/aio/queueevents.py,sha256=2yG4WiPhkGE491iojMdyrgXVmrGkxer_92TuCYRLByw,27195
+taskcluster/generated/aio/secrets.py,sha256=T4kdIS6gPAIOieLNhSZdzc6sGs6d7VdBZy7IfMQfaQU,4505
+taskcluster/generated/aio/workermanager.py,sha256=DpSkv3jh5Bv4U8JzGnqYP4p62ZkbPxfLbAJHzVgpcts,14289
+taskcluster/generated/aio/workermanagerevents.py,sha256=xQObLJouXZF54K3aofo9I-czZ7joW-UiT9OQNZNGxes,3456
+taskcluster-44.2.2.dist-info/LICENSE,sha256=HyVuytGSiAUQ6ErWBHTqt1iSGHhLmlC8fO7jTCuR8dU,16725
+taskcluster-44.2.2.dist-info/METADATA,sha256=NmCX-DTWKlMsv_FrbtVbzKEQtHHoEL3IaxEmDw8t3CI,25736
+taskcluster-44.2.2.dist-info/WHEEL,sha256=ewwEueio1C2XeHTvT17n8dZUJgOvyCWCt0WVNLClP9o,92
+taskcluster-44.2.2.dist-info/top_level.txt,sha256=Uxnnep-l0fTSnwOst3XkLMA-KHfY5ONwwtSgRmcErXU,12
+taskcluster-44.2.2.dist-info/RECORD,,
diff --git a/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/WHEEL b/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/WHEEL
new file mode 100644
index 0000000000..5bad85fdc1
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/top_level.txt b/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/top_level.txt
new file mode 100644
index 0000000000..cb1e1bb482
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster-44.2.2.dist-info/top_level.txt
@@ -0,0 +1 @@
+taskcluster
diff --git a/third_party/python/taskcluster/taskcluster/__init__.py b/third_party/python/taskcluster/taskcluster/__init__.py
new file mode 100644
index 0000000000..55102dd5ff
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/__init__.py
@@ -0,0 +1,18 @@
+""" Python client for Taskcluster """
+from __future__ import absolute_import, division, print_function, unicode_literals
+
+import logging
+import os
+from .client import createSession # NOQA
+from .client import createTemporaryCredentials # NOQA
+from taskcluster.utils import * # NOQA
+from taskcluster.exceptions import * # NOQA
+from taskcluster.generated._client_importer import * # NOQA
+
+log = logging.getLogger(__name__)
+
+if os.environ.get("DEBUG_TASKCLUSTER_CLIENT"):
+ log.setLevel(logging.DEBUG)
+ if len(log.handlers) == 0:
+ log.addHandler(logging.StreamHandler())
+log.addHandler(logging.NullHandler())
diff --git a/third_party/python/taskcluster/taskcluster/aio/__init__.py b/third_party/python/taskcluster/taskcluster/aio/__init__.py
new file mode 100644
index 0000000000..a7d85f96c5
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/__init__.py
@@ -0,0 +1,16 @@
+""" Python client for Taskcluster """
+
+import logging
+import os
+from .asyncclient import createSession # NOQA
+from taskcluster.utils import * # NOQA
+from taskcluster.exceptions import * # NOQA
+from ..generated.aio._client_importer import * # NOQA
+
+log = logging.getLogger(__name__)
+
+if os.environ.get("DEBUG_TASKCLUSTER_CLIENT"):
+ log.setLevel(logging.DEBUG)
+ if len(log.handlers) == 0:
+ log.addHandler(logging.StreamHandler())
+log.addHandler(logging.NullHandler())
diff --git a/third_party/python/taskcluster/taskcluster/aio/asyncclient.py b/third_party/python/taskcluster/taskcluster/aio/asyncclient.py
new file mode 100644
index 0000000000..5882d81ad2
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/asyncclient.py
@@ -0,0 +1,306 @@
+"""This module is used to interact with taskcluster rest apis"""
+
+from __future__ import absolute_import, division, print_function
+
+import os
+import logging
+from six.moves import urllib
+
+import mohawk
+import mohawk.bewit
+import aiohttp
+
+from .. import exceptions
+from .. import utils
+from ..client import BaseClient, createTemporaryCredentials
+from . import asyncutils, retry
+
+log = logging.getLogger(__name__)
+
+
+# Default configuration
+_defaultConfig = config = {
+ 'credentials': {
+ 'clientId': os.environ.get('TASKCLUSTER_CLIENT_ID'),
+ 'accessToken': os.environ.get('TASKCLUSTER_ACCESS_TOKEN'),
+ 'certificate': os.environ.get('TASKCLUSTER_CERTIFICATE'),
+ },
+ 'maxRetries': 5,
+ 'signedUrlExpiration': 15 * 60,
+}
+
+
+def createSession(*args, **kwargs):
+ """ Create a new aiohttp session. This passes through all positional and
+ keyword arguments to the asyncutils.createSession() constructor.
+
+ It's preferred to do something like
+
+ async with createSession(...) as session:
+ queue = Queue(session=session)
+ await queue.ping()
+
+ or
+
+ async with createSession(...) as session:
+ async with Queue(session=session) as queue:
+ await queue.ping()
+
+ in the client code.
+ """
+ return asyncutils.createSession(*args, **kwargs)
+
+
+class AsyncBaseClient(BaseClient):
+ """ Base Class for API Client Classes. Each individual Client class
+ needs to set up its own methods for REST endpoints and Topic Exchange
+ routing key patterns. The _makeApiCall() and _topicExchange() methods
+ help with this.
+ """
+
+ def __init__(self, *args, **kwargs):
+ super(AsyncBaseClient, self).__init__(*args, **kwargs)
+ self._implicitSession = False
+ if self.session is None:
+ self._implicitSession = True
+
+ def _createSession(self):
+ """ If self.session isn't set, don't create an implicit.
+
+ To avoid `session.close()` warnings at the end of tasks, and
+ various strongly-worded aiohttp warnings about using `async with`,
+ let's set `self.session` to `None` if no session is passed in to
+ `__init__`. The `asyncutils` functions will create a new session
+ per call in that case.
+ """
+ return None
+
+ async def _makeApiCall(self, entry, *args, **kwargs):
+ """ This function is used to dispatch calls to other functions
+ for a given API Reference entry"""
+
+ x = self._processArgs(entry, *args, **kwargs)
+ routeParams, payload, query, paginationHandler, paginationLimit = x
+ route = self._subArgsInRoute(entry, routeParams)
+
+ if paginationLimit and 'limit' in entry.get('query', []):
+ query['limit'] = paginationLimit
+
+ if query:
+ _route = route + '?' + urllib.parse.urlencode(query)
+ else:
+ _route = route
+ response = await self._makeHttpRequest(entry['method'], _route, payload)
+
+ if paginationHandler:
+ paginationHandler(response)
+ while response.get('continuationToken'):
+ query['continuationToken'] = response['continuationToken']
+ _route = route + '?' + urllib.parse.urlencode(query)
+ response = await self._makeHttpRequest(entry['method'], _route, payload)
+ paginationHandler(response)
+ else:
+ return response
+
+ async def _makeHttpRequest(self, method, route, payload):
+ """ Make an HTTP Request for the API endpoint. This method wraps
+ the logic about doing failure retry and passes off the actual work
+ of doing an HTTP request to another method."""
+
+ url = self._constructUrl(route)
+ log.debug('Full URL used is: %s', url)
+
+ hawkExt = self.makeHawkExt()
+
+ # Serialize payload if given
+ if payload is not None:
+ payload = utils.dumpJson(payload)
+
+ async def tryRequest(retryFor):
+ # Construct header
+ if self._hasCredentials():
+ sender = mohawk.Sender(
+ credentials={
+ 'id': self.options['credentials']['clientId'],
+ 'key': self.options['credentials']['accessToken'],
+ 'algorithm': 'sha256',
+ },
+ ext=hawkExt if hawkExt else {},
+ url=url,
+ content=payload if payload else '',
+ content_type='application/json' if payload else '',
+ method=method,
+ )
+
+ headers = {'Authorization': sender.request_header}
+ else:
+ log.debug('Not using hawk!')
+ headers = {}
+ if payload:
+ # Set header for JSON if payload is given, note that we serialize
+ # outside this loop.
+ headers['Content-Type'] = 'application/json'
+
+ try:
+ response = await asyncutils.makeSingleHttpRequest(
+ method, url, payload, headers, session=self.session
+ )
+ except aiohttp.ClientError as rerr:
+ return retryFor(exceptions.TaskclusterConnectionError(
+ "Failed to establish connection",
+ superExc=rerr
+ ))
+
+ status = response.status
+ if status == 204:
+ return None
+
+ # Catch retryable errors and go to the beginning of the loop
+ # to do the retry
+ if 500 <= status and status < 600:
+ try:
+ response.raise_for_status()
+ except Exception as exc:
+ return retryFor(exc)
+
+ # Throw errors for non-retryable errors
+ if status < 200 or status >= 300:
+ # Parse messages from errors
+ data = {}
+ try:
+ data = await response.json()
+ except Exception:
+ pass # Ignore JSON errors in error messages
+ # Find error message
+ message = "Unknown Server Error"
+ if isinstance(data, dict) and 'message' in data:
+ message = data['message']
+ else:
+ if status == 401:
+ message = "Authentication Error"
+ elif status == 500:
+ message = "Internal Server Error"
+ else:
+ message = "Unknown Server Error %s\n%s" % (str(status), str(data)[:1024])
+ # Raise TaskclusterAuthFailure if this is an auth issue
+ if status == 401:
+ raise exceptions.TaskclusterAuthFailure(
+ message,
+ status_code=status,
+ body=data,
+ superExc=None
+ )
+ # Raise TaskclusterRestFailure for all other issues
+ raise exceptions.TaskclusterRestFailure(
+ message,
+ status_code=status,
+ body=data,
+ superExc=None
+ )
+
+ # Try to load JSON
+ try:
+ await response.release()
+ return await response.json()
+ except (ValueError, aiohttp.client_exceptions.ContentTypeError):
+ return {"response": response}
+
+ return await retry.retry(self.options['maxRetries'], tryRequest)
+
+ async def __aenter__(self):
+ if self._implicitSession and not self.session:
+ self.session = createSession()
+ return self
+
+ async def __aexit__(self, *args):
+ if self._implicitSession and self.session:
+ await self.session.close()
+ self.session = None
+
+
+def createApiClient(name, api):
+ api = api['reference']
+
+ attributes = dict(
+ name=name,
+ __doc__=api.get('description'),
+ classOptions={},
+ funcinfo={},
+ )
+
+ # apply a default for apiVersion; this can be removed when all services
+ # have apiVersion
+ if 'apiVersion' not in api:
+ api['apiVersion'] = 'v1'
+
+ copiedOptions = ('exchangePrefix',)
+ for opt in copiedOptions:
+ if opt in api:
+ attributes['classOptions'][opt] = api[opt]
+
+ copiedProperties = ('serviceName', 'apiVersion')
+ for opt in copiedProperties:
+ if opt in api:
+ attributes[opt] = api[opt]
+
+ for entry in api['entries']:
+ if entry['type'] == 'function':
+ def addApiCall(e):
+ async def apiCall(self, *args, **kwargs):
+ return await self._makeApiCall(e, *args, **kwargs)
+ return apiCall
+ f = addApiCall(entry)
+
+ docStr = "Call the %s api's %s method. " % (name, entry['name'])
+
+ if entry['args'] and len(entry['args']) > 0:
+ docStr += "This method takes:\n\n"
+ docStr += '\n'.join(['- ``%s``' % x for x in entry['args']])
+ docStr += '\n\n'
+ else:
+ docStr += "This method takes no arguments. "
+
+ if 'input' in entry:
+ docStr += "This method takes input ``%s``. " % entry['input']
+
+ if 'output' in entry:
+ docStr += "This method gives output ``%s``" % entry['output']
+
+ docStr += '\n\nThis method does a ``%s`` to ``%s``.' % (
+ entry['method'].upper(), entry['route'])
+
+ f.__doc__ = docStr
+ attributes['funcinfo'][entry['name']] = entry
+
+ elif entry['type'] == 'topic-exchange':
+ def addTopicExchange(e):
+ def topicExchange(self, *args, **kwargs):
+ return self._makeTopicExchange(e, *args, **kwargs)
+ return topicExchange
+
+ f = addTopicExchange(entry)
+
+ docStr = 'Generate a routing key pattern for the %s exchange. ' % entry['exchange']
+ docStr += 'This method takes a given routing key as a string or a '
+ docStr += 'dictionary. For each given dictionary key, the corresponding '
+ docStr += 'routing key token takes its value. For routing key tokens '
+ docStr += 'which are not specified by the dictionary, the * or # character '
+ docStr += 'is used depending on whether or not the key allows multiple words.\n\n'
+ docStr += 'This exchange takes the following keys:\n\n'
+ docStr += '\n'.join(['- ``%s``' % x['name'] for x in entry['routingKey']])
+
+ f.__doc__ = docStr
+
+ # Add whichever function we created
+ f.__name__ = str(entry['name'])
+ attributes[entry['name']] = f
+
+ return type(utils.toStr(name), (BaseClient,), attributes)
+
+
+__all__ = [
+ 'createTemporaryCredentials',
+ 'config',
+ 'BaseClient',
+ 'createApiClient',
+]
diff --git a/third_party/python/taskcluster/taskcluster/aio/asyncutils.py b/third_party/python/taskcluster/taskcluster/aio/asyncutils.py
new file mode 100644
index 0000000000..ce2b9f6945
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/asyncutils.py
@@ -0,0 +1,147 @@
+from __future__ import absolute_import, division, print_function
+import aiohttp
+import aiohttp.hdrs
+import asyncio
+import async_timeout
+import functools
+import logging
+import os
+import six
+
+import taskcluster.utils as utils
+import taskcluster.exceptions as exceptions
+
+log = logging.getLogger(__name__)
+
+
+def createSession(*args, **kwargs):
+ return aiohttp.ClientSession(*args, **kwargs)
+
+
+# Useful information: https://www.blog.pythonlibrary.org/2016/07/26/python-3-an-intro-to-asyncio/
+async def makeHttpRequest(method, url, payload, headers, retries=utils.MAX_RETRIES, session=None):
+ """ Make an HTTP request and retry it until success, return request """
+ retry = -1
+ response = None
+ implicit = False
+ if session is None:
+ implicit = True
+ session = aiohttp.ClientSession()
+
+ def cleanup():
+ if implicit:
+ loop = asyncio.get_event_loop()
+ loop.run_until_complete(session.close())
+
+ try:
+ while True:
+ retry += 1
+ # if this isn't the first retry then we sleep
+ if retry > 0:
+ snooze = float(retry * retry) / 10.0
+ log.info('Sleeping %0.2f seconds for exponential backoff', snooze)
+ await asyncio.sleep(snooze)
+
+ # Seek payload to start, if it is a file
+ if hasattr(payload, 'seek'):
+ payload.seek(0)
+
+ log.debug('Making attempt %d', retry)
+ try:
+ with async_timeout.timeout(60):
+ response = await makeSingleHttpRequest(method, url, payload, headers, session)
+ except aiohttp.ClientError as rerr:
+ if retry < retries:
+ log.warn('Retrying because of: %s' % rerr)
+ continue
+ # raise a connection exception
+ raise rerr
+ except ValueError as rerr:
+ log.warn('ValueError from aiohttp: redirect to non-http or https')
+ raise rerr
+ except RuntimeError as rerr:
+ log.warn('RuntimeError from aiohttp: session closed')
+ raise rerr
+ # Handle non 2xx status code and retry if possible
+ status = response.status
+ if 500 <= status and status < 600 and retry < retries:
+ if retry < retries:
+ log.warn('Retrying because of: %d status' % status)
+ continue
+ else:
+ raise exceptions.TaskclusterRestFailure("Unknown Server Error", superExc=None)
+ return response
+ finally:
+ cleanup()
+ # This code-path should be unreachable
+ assert False, "Error from last retry should have been raised!"
+
+
+async def makeSingleHttpRequest(method, url, payload, headers, session=None):
+ method = method.upper()
+ log.debug('Making a %s request to %s', method, url)
+ log.debug('HTTP Headers: %s' % str(headers))
+ log.debug('HTTP Payload: %s (limit 100 char)' % str(payload)[:100])
+ implicit = False
+ if session is None:
+ implicit = True
+ session = aiohttp.ClientSession()
+
+ skip_auto_headers = [aiohttp.hdrs.CONTENT_TYPE]
+
+ try:
+ # https://docs.aiohttp.org/en/stable/client_quickstart.html#passing-parameters-in-urls
+ # we must avoid aiohttp's helpful "requoting" functionality, as it breaks Hawk signatures
+ url = aiohttp.client.URL(url, encoded=True)
+ async with session.request(
+ method, url, data=payload, headers=headers,
+ skip_auto_headers=skip_auto_headers, compress=False
+ ) as resp:
+ response_text = await resp.text()
+ log.debug('Received HTTP Status: %s' % resp.status)
+ log.debug('Received HTTP Headers: %s' % str(resp.headers))
+ log.debug('Received HTTP Payload: %s (limit 1024 char)' %
+ six.text_type(response_text)[:1024])
+ return resp
+ finally:
+ if implicit:
+ await session.close()
+
+
+async def putFile(filename, url, contentType, session=None):
+ with open(filename, 'rb') as f:
+ contentLength = os.fstat(f.fileno()).st_size
+ return await makeHttpRequest('put', url, f, headers={
+ 'Content-Length': str(contentLength),
+ 'Content-Type': contentType,
+ }, session=session)
+
+
+def runAsync(coro):
+ """
+ Replacement of asyncio.run, as it doesn't exist in python<3.7.
+ """
+ asyncio.set_event_loop(asyncio.new_event_loop())
+ loop = asyncio.get_event_loop()
+ result = loop.run_until_complete(coro)
+ loop.close()
+ return result
+
+
+def ensureCoro(func):
+ """
+ If func is a regular function, execute in a thread and return an
+ async version of it. If func is already an async function, return
+ it without change.
+ """
+ if asyncio.iscoroutinefunction(func):
+ return func
+
+ @functools.wraps(func)
+ async def coro(*args, **kwargs):
+ loop = asyncio.get_event_loop()
+ return await loop.run_in_executor(
+ None,
+ functools.partial(func, *args, **kwargs)
+ )
+ return coro
diff --git a/third_party/python/taskcluster/taskcluster/aio/auth.py b/third_party/python/taskcluster/taskcluster/aio/auth.py
new file mode 100644
index 0000000000..0e4d7be87e
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/auth.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.auth import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/authevents.py b/third_party/python/taskcluster/taskcluster/aio/authevents.py
new file mode 100644
index 0000000000..8f04d57072
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/authevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.authevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/awsprovisioner.py b/third_party/python/taskcluster/taskcluster/aio/awsprovisioner.py
new file mode 100644
index 0000000000..5095053d51
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/awsprovisioner.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.awsprovisioner import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/download.py b/third_party/python/taskcluster/taskcluster/aio/download.py
new file mode 100644
index 0000000000..4d2aceee1a
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/download.py
@@ -0,0 +1,191 @@
+"""
+Support for downloading objects from the object service, following best
+practices for that service.
+
+Downloaded data is written to a "writer" provided by a "writer factory". A
+writer has an async `write` method which writes the entire passed buffer to
+storage. A writer factory is an async callable which returns a fresh writer,
+ready to write the first byte of the object. When downloads are retried, the
+writer factory may be called more than once.
+
+Note that `aiofile.open` returns a value suitable for use as a writer, if async
+file IO is important to the application.
+
+This module provides several pre-defined writers and writer factories for
+common cases.
+"""
+import six
+
+if six.PY2:
+ raise ImportError("download is only supported in Python 3")
+
+import aiohttp
+import contextlib
+
+from .asyncutils import ensureCoro
+from .reader_writer import streamingCopy, BufferWriter, FileWriter
+from .retry import retry
+from . import Object
+from ..exceptions import TaskclusterArtifactError, TaskclusterFailure
+
+
+async def downloadToBuf(**kwargs):
+ """
+ Convenience method to download data to an in-memory buffer and return the
+ downloaded data. Arguments are the same as `download`, except that
+ `writerFactory` should not be supplied. Returns a tuple (buffer, contentType).
+ """
+ writer = None
+
+ async def writerFactory():
+ nonlocal writer
+ writer = BufferWriter()
+ return writer
+
+ contentType = await download(writerFactory=writerFactory, **kwargs)
+ return writer.getbuffer(), contentType
+
+
+async def downloadToFile(file, **kwargs):
+ """
+ Convenience method to download data to a file object. The file must be
+ writeable, in binary mode, seekable (`f.seek`), and truncatable
+ (`f.truncate`) to support retries. Arguments are the same as `download`,
+ except that `writerFactory` should not be supplied. Returns the content-type.
+ """
+ async def writerFactory():
+ file.seek(0)
+ file.truncate()
+ return FileWriter(file)
+
+ return await download(writerFactory=writerFactory, **kwargs)
+
+
+async def download(*, name, maxRetries=5, objectService, writerFactory):
+ """
+ Download the named object from the object service, using a writer returned
+ from `writerFactory` to write the data. The `maxRetries` parameter has
+ the same meaning as for service clients. The `objectService` parameter is
+ an instance of the Object class, configured with credentials for the
+ download. Returns the content-type.
+ """
+ async with aiohttp.ClientSession() as session:
+ downloadResp = await ensureCoro(objectService.startDownload)(name, {
+ "acceptDownloadMethods": {
+ "simple": True,
+ },
+ })
+
+ method = downloadResp["method"]
+
+ if method == "simple":
+ async def tryDownload(retryFor):
+ with _maybeRetryHttpRequest(retryFor):
+ writer = await writerFactory()
+ url = downloadResp['url']
+ return await _doSimpleDownload(url, writer, session)
+
+ return await retry(maxRetries, tryDownload)
+ else:
+ raise RuntimeError(f'Unknown download method {method}')
+
+
+async def downloadArtifactToBuf(**kwargs):
+ """
+ Convenience method to download an artifact to an in-memory buffer and return the
+ downloaded data. Arguments are the same as `downloadArtifact`, except that
+ `writerFactory` should not be supplied. Returns a tuple (buffer, contentType).
+ """
+ writer = None
+
+ async def writerFactory():
+ nonlocal writer
+ writer = BufferWriter()
+ return writer
+
+ contentType = await downloadArtifact(writerFactory=writerFactory, **kwargs)
+ return writer.getbuffer(), contentType
+
+
+async def downloadArtifactToFile(file, **kwargs):
+ """
+ Convenience method to download an artifact to a file object. The file must be
+ writeable, in binary mode, seekable (`f.seek`), and truncatable
+ (`f.truncate`) to support retries. Arguments are the same as `downloadArtifac`,
+ except that `writerFactory` should not be supplied. Returns the content-type.
+ """
+ async def writerFactory():
+ file.seek(0)
+ file.truncate()
+ return FileWriter(file)
+
+ return await downloadArtifact(writerFactory=writerFactory, **kwargs)
+
+
+async def downloadArtifact(*, taskId, name, runId=None, maxRetries=5, queueService, writerFactory):
+ """
+ Download the named artifact with the appropriate storageType, using a writer returned
+ from `writerFactory` to write the data. The `maxRetries` parameter has
+ the same meaning as for service clients. The `queueService` parameter is
+ an instance of the Queue class, configured with credentials for the
+ download. Returns the content-type.
+ """
+ if runId is None:
+ artifact = await ensureCoro(queueService.latestArtifact)(taskId, name)
+ else:
+ artifact = await ensureCoro(queueService.artifact)(taskId, runId, name)
+
+ if artifact["storageType"] == 's3' or artifact["storageType"] == 'reference':
+ async with aiohttp.ClientSession() as session:
+
+ async def tryDownload(retryFor):
+ with _maybeRetryHttpRequest(retryFor):
+ writer = await writerFactory()
+ return await _doSimpleDownload(artifact["url"], writer, session)
+
+ return await retry(maxRetries, tryDownload)
+
+ elif artifact["storageType"] == 'object':
+ objectService = Object({
+ "rootUrl": queueService.options["rootUrl"],
+ "maxRetries": maxRetries,
+ "credentials": artifact["credentials"],
+ })
+ return await download(
+ name=artifact["name"],
+ maxRetries=maxRetries,
+ objectService=objectService,
+ writerFactory=writerFactory)
+
+ elif artifact["storageType"] == 'error':
+ raise TaskclusterArtifactError(artifact["message"], artifact["reason"])
+
+ else:
+ raise TaskclusterFailure(f"Unknown storageType f{artifact['storageType']}")
+
+
+@contextlib.contextmanager
+def _maybeRetryHttpRequest(retryFor):
+ "Catch errors from an aiohttp request and retry the retriable responses."
+ try:
+ yield
+ except aiohttp.ClientResponseError as exc:
+ # treat 4xx's as fatal, and retry others
+ if 400 <= exc.status < 500:
+ raise exc
+ return retryFor(exc)
+ except aiohttp.ClientError as exc:
+ # retry for all other aiohttp errors
+ return retryFor(exc)
+ # .. anything else is considered fatal
+
+
+async def _doSimpleDownload(url, writer, session):
+ async with session.get(url) as resp:
+ contentType = resp.content_type
+ resp.raise_for_status()
+ # note that `resp.content` is a StreamReader and satisfies the
+ # requirements of a reader in this case
+ await streamingCopy(resp.content, writer)
+
+ return contentType
diff --git a/third_party/python/taskcluster/taskcluster/aio/ec2manager.py b/third_party/python/taskcluster/taskcluster/aio/ec2manager.py
new file mode 100644
index 0000000000..8c167b2972
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/ec2manager.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.ec2manager import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/github.py b/third_party/python/taskcluster/taskcluster/aio/github.py
new file mode 100644
index 0000000000..8ef5f8015e
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/github.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.github import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/githubevents.py b/third_party/python/taskcluster/taskcluster/aio/githubevents.py
new file mode 100644
index 0000000000..34bed63dc1
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/githubevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.githubevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/hooks.py b/third_party/python/taskcluster/taskcluster/aio/hooks.py
new file mode 100644
index 0000000000..e24e4d4292
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/hooks.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.hooks import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/hooksevents.py b/third_party/python/taskcluster/taskcluster/aio/hooksevents.py
new file mode 100644
index 0000000000..7177399bc8
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/hooksevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.hooksevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/index.py b/third_party/python/taskcluster/taskcluster/aio/index.py
new file mode 100644
index 0000000000..5de09cc9bf
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/index.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.index import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/login.py b/third_party/python/taskcluster/taskcluster/aio/login.py
new file mode 100644
index 0000000000..f354e1490a
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/login.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.login import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/notify.py b/third_party/python/taskcluster/taskcluster/aio/notify.py
new file mode 100644
index 0000000000..1fe99a6851
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/notify.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.notify import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/notifyevents.py b/third_party/python/taskcluster/taskcluster/aio/notifyevents.py
new file mode 100644
index 0000000000..583329d364
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/notifyevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.notifyevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/purgecache.py b/third_party/python/taskcluster/taskcluster/aio/purgecache.py
new file mode 100644
index 0000000000..42281a73bf
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/purgecache.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.purgecache import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/queue.py b/third_party/python/taskcluster/taskcluster/aio/queue.py
new file mode 100644
index 0000000000..58484ad5ad
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/queue.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.queue import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/queueevents.py b/third_party/python/taskcluster/taskcluster/aio/queueevents.py
new file mode 100644
index 0000000000..e4dec31c92
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/queueevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.queueevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/reader_writer.py b/third_party/python/taskcluster/taskcluster/aio/reader_writer.py
new file mode 100644
index 0000000000..2d9880b3a0
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/reader_writer.py
@@ -0,0 +1,81 @@
+"""
+Utilities supporting the "reader" and "writer" definitions used in uploads and downloads.
+"""
+import asyncio
+import io
+
+
+class BufferWriter:
+ """A writer that writes to an in-memory buffer"""
+ def __init__(self):
+ self.buf = io.BytesIO()
+
+ async def write(self, chunk):
+ self.buf.write(chunk)
+
+ def getbuffer(self):
+ """Get the content of the in-memory buffer"""
+ return self.buf.getbuffer()
+
+
+class BufferReader:
+ """A reader that reads from an in-memory buffer"""
+ def __init__(self, data):
+ self.buf = io.BytesIO(data)
+
+ async def read(self, max_size):
+ return self.buf.read(max_size)
+
+
+class FileWriter:
+ """A writer that writes to a (sync) file. The file should be opened in binary mode
+ and empty."""
+ def __init__(self, file):
+ self.file = file
+
+ async def write(self, chunk):
+ self.file.write(chunk)
+
+
+class FileReader:
+ """A reader that reads from a (sync) file. The file should be opened in binary mode,
+ and positioned at its beginning."""
+ def __init__(self, file):
+ self.file = file
+
+ async def read(self, max_size):
+ return self.file.read(max_size)
+
+
+async def streamingCopy(reader, writer):
+ "Copy data from a reader to a writer, as those are defined in upload.py and download.py"
+ # we will read and write concurrently, but with limited buffering -- just enough
+ # that read and write operations are not forced to alternate
+ chunk_size = 64 * 1024
+ q = asyncio.Queue(maxsize=1)
+
+ async def read_loop():
+ while True:
+ chunk = await reader.read(chunk_size)
+ await q.put(chunk)
+ if not chunk:
+ break
+
+ async def write_loop():
+ while True:
+ chunk = await q.get()
+ if not chunk:
+ q.task_done()
+ break
+ await writer.write(chunk)
+ q.task_done()
+
+ read_task = asyncio.ensure_future(read_loop())
+ write_task = asyncio.ensure_future(write_loop())
+
+ try:
+ await asyncio.gather(read_task, write_task)
+ finally:
+ # cancel any still-running tasks, as in case of an exception
+ read_task.cancel()
+ write_task.cancel()
diff --git a/third_party/python/taskcluster/taskcluster/aio/retry.py b/third_party/python/taskcluster/taskcluster/aio/retry.py
new file mode 100644
index 0000000000..d4f743f2d5
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/retry.py
@@ -0,0 +1,41 @@
+import logging
+import asyncio
+
+from .. import utils
+
+log = logging.getLogger(__name__)
+
+
+async def retry(maxRetries, tryFn):
+ """
+ Retry async `tryFn` based on `maxRetries`. Each call to `tryFn` will pass a callable
+ which should be called with the exception object when an exception can be retried.
+ Exceptions raised from `tryFn` are treated as fatal.
+ """
+
+ retry = -1 # we plus first in the loop, and attempt 1 is retry 0
+ while True:
+ retry += 1
+
+ # if this isn't the first retry then we sleep
+ if retry > 0:
+ snooze = float(retry * retry) / 10.0
+ log.info('Sleeping %0.2f seconds for exponential backoff', snooze)
+ await asyncio.sleep(utils.calculateSleepTime(retry))
+
+ retriableException = None
+
+ def retryFor(exc):
+ nonlocal retriableException
+ retriableException = exc
+
+ res = await tryFn(retryFor)
+
+ if not retriableException:
+ return res
+
+ if retry < maxRetries:
+ log.warning(f'Retrying because of: {retriableException}')
+ continue
+
+ raise retriableException
diff --git a/third_party/python/taskcluster/taskcluster/aio/secrets.py b/third_party/python/taskcluster/taskcluster/aio/secrets.py
new file mode 100644
index 0000000000..b48680a29a
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/secrets.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.secrets import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/upload.py b/third_party/python/taskcluster/taskcluster/aio/upload.py
new file mode 100644
index 0000000000..f072afaec9
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/upload.py
@@ -0,0 +1,177 @@
+"""
+Support for uploading objects to the object service, following best
+practices for that service.
+
+Data for upload is read from a "reader" provided by a "reader factory". A
+reader has an async `read(max_size)` method which reads and returns a chunk of
+1 .. `max_size` bytes, or returns an empty string at EOF. A reader factory is an async
+callable which returns a fresh reader, ready to read the first byte of the
+object. When uploads are retried, the reader factory may be called more than
+once.
+
+Note that `aiofile.open` returns a value suitable for use as a reader, if async
+file IO is important to the application.
+
+This module provides several pre-defined readers and reader factories for
+common cases.
+"""
+import six
+
+if six.PY2:
+ raise ImportError("upload is only supported in Python 3")
+
+import base64
+import hashlib
+
+import aiohttp
+
+import taskcluster
+from .asyncutils import ensureCoro
+from .reader_writer import streamingCopy, BufferReader, BufferWriter, FileReader
+from .retry import retry
+
+DATA_INLINE_MAX_SIZE = 8192
+
+
+async def uploadFromBuf(*, data, **kwargs):
+ """
+ Convenience method to upload data from an in-memory buffer. Arguments are the same
+ as `upload` except that `readerFactory` should not be supplied.
+ """
+ async def readerFactory():
+ return BufferReader(data)
+
+ await upload(**kwargs, readerFactory=readerFactory)
+
+
+async def uploadFromFile(*, file, **kwargs):
+ """
+ Convenience method to upload data from a a file. The file should be open
+ for reading, in binary mode, and be seekable (`f.seek`). Remaining
+ arguments are the same as `upload` except that `readerFactory` should not
+ be supplied.
+ """
+ async def readerFactory():
+ file.seek(0)
+ return FileReader(file)
+
+ await upload(**kwargs, readerFactory=readerFactory)
+
+
+async def upload(*, projectId, name, contentType, contentLength, expires,
+ readerFactory, maxRetries=5, uploadId=None, objectService):
+ """
+ Upload the given data to the object service with the given metadata.
+ The `maxRetries` parameter has the same meaning as for service clients.
+ The `objectService` parameter is an instance of the Object class,
+ configured with credentials for the upload.
+ """
+ # wrap the readerFactory with one that will also hash the data
+ hashingReader = None
+
+ async def hashingReaderFactory():
+ nonlocal hashingReader
+ hashingReader = HashingReader(await readerFactory())
+ return hashingReader
+
+ async with aiohttp.ClientSession() as session:
+ if not uploadId:
+ uploadId = taskcluster.slugid.nice()
+ proposedUploadMethods = {}
+
+ if contentLength < DATA_INLINE_MAX_SIZE:
+ reader = await hashingReaderFactory()
+ writer = BufferWriter()
+ await streamingCopy(reader, writer)
+ encoded = base64.b64encode(writer.getbuffer())
+ proposedUploadMethods['dataInline'] = {
+ "contentType": contentType,
+ "objectData": encoded,
+ }
+
+ proposedUploadMethods['putUrl'] = {
+ "contentType": contentType,
+ "contentLength": contentLength,
+ }
+
+ uploadResp = await ensureCoro(objectService.createUpload)(name, {
+ "expires": expires,
+ "projectId": projectId,
+ "uploadId": uploadId,
+ "proposedUploadMethods": proposedUploadMethods,
+ })
+
+ async def tryUpload(retryFor):
+ try:
+ uploadMethod = uploadResp["uploadMethod"]
+ if 'dataInline' in uploadMethod:
+ # data is already uploaded -- nothing to do
+ pass
+ elif 'putUrl' in uploadMethod:
+ reader = await hashingReaderFactory()
+ await _putUrlUpload(uploadMethod['putUrl'], reader, session)
+ else:
+ raise RuntimeError("Could not negotiate an upload method")
+ except aiohttp.ClientResponseError as exc:
+ # treat 4xx's as fatal, and retry others
+ if 400 <= exc.status < 500:
+ raise exc
+ return retryFor(exc)
+ except aiohttp.ClientError as exc:
+ # retry for all other aiohttp errors
+ return retryFor(exc)
+ # .. anything else is considered fatal
+
+ await retry(maxRetries, tryUpload)
+
+ hashes = hashingReader.hashes(contentLength)
+
+ await ensureCoro(objectService.finishUpload)(name, {
+ "projectId": projectId,
+ "uploadId": uploadId,
+ "hashes": hashes,
+ })
+
+
+async def _putUrlUpload(method, reader, session):
+ chunk_size = 64 * 1024
+
+ async def reader_gen():
+ while True:
+ chunk = await reader.read(chunk_size)
+ if not chunk:
+ break
+ yield chunk
+
+ resp = await session.put(method['url'], headers=method['headers'], data=reader_gen())
+ resp.raise_for_status()
+
+
+class HashingReader:
+ """A Reader implementation that hashes contents as they are read."""
+
+ def __init__(self, inner):
+ self.inner = inner
+ self.sha256 = hashlib.sha256()
+ self.sha512 = hashlib.sha512()
+ self.bytes = 0
+
+ async def read(self, max_size):
+ chunk = await self.inner.read(max_size)
+ self.update(chunk)
+ return chunk
+
+ def update(self, chunk):
+ self.sha256.update(chunk)
+ self.sha512.update(chunk)
+ self.bytes += len(chunk)
+
+ def hashes(self, contentLength):
+ """Return the hashes in a format suitable for finishUpload, first checking that all the bytes
+ in the content were hashed."""
+ if contentLength != self.bytes:
+ raise RuntimeError(f"hashed {self.bytes} bytes but content length is {contentLength}")
+ return {
+ "sha256": self.sha256.hexdigest(),
+ "sha512": self.sha512.hexdigest(),
+ }
diff --git a/third_party/python/taskcluster/taskcluster/aio/workermanager.py b/third_party/python/taskcluster/taskcluster/aio/workermanager.py
new file mode 100644
index 0000000000..f7d981cf94
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/workermanager.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.workermanager import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/aio/workermanagerevents.py b/third_party/python/taskcluster/taskcluster/aio/workermanagerevents.py
new file mode 100644
index 0000000000..61f355ba6d
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/aio/workermanagerevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from ..generated.aio.workermanagerevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/auth.py b/third_party/python/taskcluster/taskcluster/auth.py
new file mode 100644
index 0000000000..74cf843de9
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/auth.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.auth import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/authevents.py b/third_party/python/taskcluster/taskcluster/authevents.py
new file mode 100644
index 0000000000..1d4af5ef6d
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/authevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.authevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/awsprovisioner.py b/third_party/python/taskcluster/taskcluster/awsprovisioner.py
new file mode 100644
index 0000000000..e868f87244
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/awsprovisioner.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.awsprovisioner import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/client.py b/third_party/python/taskcluster/taskcluster/client.py
new file mode 100644
index 0000000000..516f957728
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/client.py
@@ -0,0 +1,711 @@
+"""This module is used to interact with taskcluster rest apis"""
+
+from __future__ import absolute_import, division, print_function
+
+import json
+import logging
+import copy
+import hashlib
+import hmac
+import datetime
+import calendar
+import requests
+import time
+import six
+import warnings
+from six.moves import urllib
+
+import mohawk
+import mohawk.bewit
+
+import taskcluster.exceptions as exceptions
+import taskcluster.utils as utils
+import taskcluster_urls as liburls
+
+log = logging.getLogger(__name__)
+
+
+# Default configuration
+_defaultConfig = config = {
+ 'credentials': {
+ 'clientId': None,
+ 'accessToken': None,
+ 'certificate': None,
+ },
+ 'rootUrl': None,
+ 'maxRetries': 5,
+ 'signedUrlExpiration': 15 * 60,
+}
+
+
+def createSession(*args, **kwargs):
+ """ Create a new requests session. This passes through all positional and
+ keyword arguments to the requests.Session() constructor
+ """
+ return requests.Session(*args, **kwargs)
+
+
+class BaseClient(object):
+ """ Base Class for API Client Classes. Each individual Client class
+ needs to set up its own methods for REST endpoints and Topic Exchange
+ routing key patterns. The _makeApiCall() and _topicExchange() methods
+ help with this.
+ """
+
+ def __init__(self, options=None, session=None):
+ if options and options.get('baseUrl'):
+ raise exceptions.TaskclusterFailure('baseUrl option is no longer allowed')
+ o = copy.deepcopy(self.classOptions)
+ o.update(_defaultConfig)
+ if options:
+ o.update(options)
+ if not o.get('rootUrl'):
+ raise exceptions.TaskclusterFailure('rootUrl option is required')
+
+ credentials = o.get('credentials')
+ if credentials:
+ for x in ('accessToken', 'clientId', 'certificate'):
+ value = credentials.get(x)
+ if value and not isinstance(value, six.binary_type):
+ try:
+ credentials[x] = credentials[x].encode('ascii')
+ except Exception:
+ s = '%s (%s) must be unicode encodable' % (x, credentials[x])
+ raise exceptions.TaskclusterAuthFailure(s)
+
+ self.options = o
+ if 'credentials' in o:
+ log.debug('credentials key scrubbed from logging output')
+ log.debug(dict((k, v) for k, v in o.items() if k != 'credentials'))
+
+ if session:
+ self.session = session
+ else:
+ self.session = self._createSession()
+
+ def _createSession(self):
+ """ Create a requests session.
+
+ Helper method which can be overridden by child classes.
+ """
+ return createSession()
+
+ def makeHawkExt(self):
+ """ Make an 'ext' for Hawk authentication """
+ o = self.options
+ c = o.get('credentials', {})
+ if c.get('clientId') and c.get('accessToken'):
+ ext = {}
+ cert = c.get('certificate')
+ if cert:
+ if six.PY3 and isinstance(cert, six.binary_type):
+ cert = cert.decode()
+ if isinstance(cert, six.string_types):
+ cert = json.loads(cert)
+ ext['certificate'] = cert
+
+ if 'authorizedScopes' in o:
+ ext['authorizedScopes'] = o['authorizedScopes']
+
+ # .encode('base64') inserts a newline, which hawk doesn't
+ # like but doesn't strip itself
+ return utils.makeB64UrlSafe(utils.encodeStringForB64Header(utils.dumpJson(ext)).strip())
+ else:
+ return {}
+
+ def _makeTopicExchange(self, entry, *args, **kwargs):
+ if len(args) == 0 and not kwargs:
+ routingKeyPattern = {}
+ elif len(args) >= 1:
+ if kwargs or len(args) != 1:
+ errStr = 'Pass either a string, single dictionary or only kwargs'
+ raise exceptions.TaskclusterTopicExchangeFailure(errStr)
+ routingKeyPattern = args[0]
+ else:
+ routingKeyPattern = kwargs
+
+ data = {
+ 'exchange': '%s/%s' % (self.options['exchangePrefix'].rstrip('/'),
+ entry['exchange'].lstrip('/'))
+ }
+
+ # If we are passed in a string, we can short-circuit this function
+ if isinstance(routingKeyPattern, six.string_types):
+ log.debug('Passing through string for topic exchange key')
+ data['routingKeyPattern'] = routingKeyPattern
+ return data
+
+ if type(routingKeyPattern) != dict:
+ errStr = 'routingKeyPattern must eventually be a dict'
+ raise exceptions.TaskclusterTopicExchangeFailure(errStr)
+
+ if not routingKeyPattern:
+ routingKeyPattern = {}
+
+ # There is no canonical meaning for the maxSize and required
+ # reference entry in the JS client, so we don't try to define
+ # them here, even though they sound pretty obvious
+
+ routingKey = []
+ for key in entry['routingKey']:
+ if 'constant' in key:
+ value = key['constant']
+ elif key['name'] in routingKeyPattern:
+ log.debug('Found %s in routing key params', key['name'])
+ value = str(routingKeyPattern[key['name']])
+ if not key.get('multipleWords') and '.' in value:
+ raise exceptions.TaskclusterTopicExchangeFailure(
+ 'Cannot have periods in single word keys')
+ else:
+ value = '#' if key.get('multipleWords') else '*'
+ log.debug('Did not find %s in input params, using %s', key['name'], value)
+
+ routingKey.append(value)
+
+ data['routingKeyPattern'] = '.'.join([str(x) for x in routingKey])
+ return data
+
+ def buildUrl(self, methodName, *args, **kwargs):
+ entry = self.funcinfo.get(methodName)
+ if not entry:
+ raise exceptions.TaskclusterFailure(
+ 'Requested method "%s" not found in API Reference' % methodName)
+ routeParams, _, query, _, _ = self._processArgs(entry, *args, **kwargs)
+ route = self._subArgsInRoute(entry, routeParams)
+ if query:
+ route += '?' + urllib.parse.urlencode(query)
+ return liburls.api(self.options['rootUrl'], self.serviceName, self.apiVersion, route)
+
+ def buildSignedUrl(self, methodName, *args, **kwargs):
+ """ Build a signed URL. This URL contains the credentials needed to access
+ a resource."""
+
+ if 'expiration' in kwargs:
+ expiration = kwargs['expiration']
+ del kwargs['expiration']
+ else:
+ expiration = self.options['signedUrlExpiration']
+
+ expiration = int(time.time() + expiration) # Mainly so that we throw if it's not a number
+
+ requestUrl = self.buildUrl(methodName, *args, **kwargs)
+
+ if not self._hasCredentials():
+ raise exceptions.TaskclusterAuthFailure('Invalid Hawk Credentials')
+
+ clientId = utils.toStr(self.options['credentials']['clientId'])
+ accessToken = utils.toStr(self.options['credentials']['accessToken'])
+
+ def genBewit():
+ # We need to fix the output of get_bewit. It returns a url-safe base64
+ # encoded string, which contains a list of tokens separated by '\'.
+ # The first one is the clientId, the second is an int, the third is
+ # url-safe base64 encoded MAC, the fourth is the ext param.
+ # The problem is that the nested url-safe base64 encoded MAC must be
+ # base64 (i.e. not url safe) or server-side will complain.
+
+ # id + '\\' + exp + '\\' + mac + '\\' + options.ext;
+ resource = mohawk.base.Resource(
+ credentials={
+ 'id': clientId,
+ 'key': accessToken,
+ 'algorithm': 'sha256',
+ },
+ method='GET',
+ ext=utils.toStr(self.makeHawkExt()),
+ url=requestUrl,
+ timestamp=expiration,
+ nonce='',
+ # content='',
+ # content_type='',
+ )
+ bewit = mohawk.bewit.get_bewit(resource)
+ return bewit.rstrip('=')
+
+ bewit = genBewit()
+
+ if not bewit:
+ raise exceptions.TaskclusterFailure('Did not receive a bewit')
+
+ u = urllib.parse.urlparse(requestUrl)
+
+ qs = u.query
+ if qs:
+ qs += '&'
+ qs += 'bewit=%s' % bewit
+
+ return urllib.parse.urlunparse((
+ u.scheme,
+ u.netloc,
+ u.path,
+ u.params,
+ qs,
+ u.fragment,
+ ))
+
+ def _constructUrl(self, route):
+ """Construct a URL for the given route on this service, based on the
+ rootUrl"""
+ return liburls.api(
+ self.options['rootUrl'],
+ self.serviceName,
+ self.apiVersion,
+ route.rstrip('/'))
+
+ def _makeApiCall(self, entry, *args, **kwargs):
+ """ This function is used to dispatch calls to other functions
+ for a given API Reference entry"""
+
+ x = self._processArgs(entry, *args, **kwargs)
+ routeParams, payload, query, paginationHandler, paginationLimit = x
+ route = self._subArgsInRoute(entry, routeParams)
+
+ if paginationLimit and 'limit' in entry.get('query', []):
+ query['limit'] = paginationLimit
+
+ if query:
+ _route = route + '?' + urllib.parse.urlencode(query)
+ else:
+ _route = route
+ response = self._makeHttpRequest(entry['method'], _route, payload)
+
+ if paginationHandler:
+ paginationHandler(response)
+ while response.get('continuationToken'):
+ query['continuationToken'] = response['continuationToken']
+ _route = route + '?' + urllib.parse.urlencode(query)
+ response = self._makeHttpRequest(entry['method'], _route, payload)
+ paginationHandler(response)
+ else:
+ return response
+
+ def _processArgs(self, entry, *_args, **_kwargs):
+ """ Given an entry, positional and keyword arguments, figure out what
+ the query-string options, payload and api arguments are.
+ """
+
+ # We need the args to be a list so we can mutate them
+ args = list(_args)
+ kwargs = copy.deepcopy(_kwargs)
+
+ reqArgs = entry['args']
+ routeParams = {}
+
+ query = {}
+ payload = None
+ kwApiArgs = {}
+
+ paginationHandler = None
+ paginationLimit = None
+
+ # There are three formats for calling methods:
+ # 1. method(v1, v1, payload)
+ # 2. method(payload, k1=v1, k2=v2)
+ # 3. method(payload=payload, query=query, params={k1: v1, k2: v2})
+ if len(kwargs) == 0:
+ if 'input' in entry and len(args) == len(reqArgs) + 1:
+ payload = args.pop()
+ if len(args) != len(reqArgs):
+ log.debug(args)
+ log.debug(reqArgs)
+ raise exceptions.TaskclusterFailure('Incorrect number of positional arguments')
+ log.debug('Using method(v1, v2, payload) calling convention')
+ else:
+ # We're considering kwargs which are the api route parameters to be
+ # called 'flat' because they're top level keys. We're special
+ # casing calls which have only api-arg kwargs and possibly a payload
+ # value and handling them directly.
+ isFlatKwargs = True
+ if len(kwargs) == len(reqArgs):
+ for arg in reqArgs:
+ if not kwargs.get(arg, False):
+ isFlatKwargs = False
+ break
+ if 'input' in entry and len(args) != 1:
+ isFlatKwargs = False
+ if 'input' not in entry and len(args) != 0:
+ isFlatKwargs = False
+ else:
+ pass # We're using payload=, query= and param=
+ else:
+ isFlatKwargs = False
+
+ # Now we're going to handle the two types of kwargs. The first is
+ # 'flat' ones, which are where the api params
+ if isFlatKwargs:
+ if 'input' in entry:
+ payload = args.pop()
+ kwApiArgs = kwargs
+ log.debug('Using method(payload, k1=v1, k2=v2) calling convention')
+ warnings.warn(
+ "The method(payload, k1=v1, k2=v2) calling convention will soon be deprecated",
+ PendingDeprecationWarning
+ )
+ else:
+ kwApiArgs = kwargs.get('params', {})
+ payload = kwargs.get('payload', None)
+ query = kwargs.get('query', {})
+ paginationHandler = kwargs.get('paginationHandler', None)
+ paginationLimit = kwargs.get('paginationLimit', None)
+ log.debug('Using method(payload=payload, query=query, params={k1: v1, k2: v2}) calling convention')
+
+ if 'input' in entry and isinstance(payload, type(None)):
+ raise exceptions.TaskclusterFailure('Payload is required')
+
+ # These all need to be rendered down to a string, let's just check that
+ # they are up front and fail fast
+ for arg in args:
+ if not isinstance(arg, six.string_types) and not isinstance(arg, int):
+ raise exceptions.TaskclusterFailure(
+ 'Positional arg "%s" to %s is not a string or int' % (arg, entry['name']))
+
+ for name, arg in six.iteritems(kwApiArgs):
+ if not isinstance(arg, six.string_types) and not isinstance(arg, int):
+ raise exceptions.TaskclusterFailure(
+ 'KW arg "%s: %s" to %s is not a string or int' % (name, arg, entry['name']))
+
+ if len(args) > 0 and len(kwApiArgs) > 0:
+ raise exceptions.TaskclusterFailure('Specify either positional or key word arguments')
+
+ # We know for sure that if we don't give enough arguments that the call
+ # should fail. We don't yet know if we should fail because of two many
+ # arguments because we might be overwriting positional ones with kw ones
+ if len(reqArgs) > len(args) + len(kwApiArgs):
+ raise exceptions.TaskclusterFailure(
+ '%s takes %d args, only %d were given' % (
+ entry['name'], len(reqArgs), len(args) + len(kwApiArgs)))
+
+ # We also need to error out when we have more positional args than required
+ # because we'll need to go through the lists of provided and required args
+ # at the same time. Not disqualifying early means we'll get IndexErrors if
+ # there are more positional arguments than required
+ if len(args) > len(reqArgs):
+ raise exceptions.TaskclusterFailure('%s called with too many positional args',
+ entry['name'])
+
+ i = 0
+ for arg in args:
+ log.debug('Found a positional argument: %s', arg)
+ routeParams[reqArgs[i]] = arg
+ i += 1
+
+ log.debug('After processing positional arguments, we have: %s', routeParams)
+
+ routeParams.update(kwApiArgs)
+
+ log.debug('After keyword arguments, we have: %s', routeParams)
+
+ if len(reqArgs) != len(routeParams):
+ errMsg = '%s takes %s args, %s given' % (
+ entry['name'],
+ ','.join(reqArgs),
+ routeParams.keys())
+ log.error(errMsg)
+ raise exceptions.TaskclusterFailure(errMsg)
+
+ for reqArg in reqArgs:
+ if reqArg not in routeParams:
+ errMsg = '%s requires a "%s" argument which was not provided' % (
+ entry['name'], reqArg)
+ log.error(errMsg)
+ raise exceptions.TaskclusterFailure(errMsg)
+
+ return routeParams, payload, query, paginationHandler, paginationLimit
+
+ def _subArgsInRoute(self, entry, args):
+ """ Given a route like "/task/<taskId>/artifacts" and a mapping like
+ {"taskId": "12345"}, return a string like "/task/12345/artifacts"
+ """
+
+ route = entry['route']
+
+ for arg, val in six.iteritems(args):
+ toReplace = "<%s>" % arg
+ if toReplace not in route:
+ raise exceptions.TaskclusterFailure(
+ 'Arg %s not found in route for %s' % (arg, entry['name']))
+ val = urllib.parse.quote(str(val).encode("utf-8"), '')
+ route = route.replace("<%s>" % arg, val)
+
+ return route.lstrip('/')
+
+ def _hasCredentials(self):
+ """ Return True, if credentials is given """
+ cred = self.options.get('credentials')
+ return (
+ cred and
+ 'clientId' in cred and
+ 'accessToken' in cred and
+ cred['clientId'] and
+ cred['accessToken']
+ )
+
+ def _makeHttpRequest(self, method, route, payload):
+ """ Make an HTTP Request for the API endpoint. This method wraps
+ the logic about doing failure retry and passes off the actual work
+ of doing an HTTP request to another method."""
+
+ url = self._constructUrl(route)
+ log.debug('Full URL used is: %s', url)
+
+ hawkExt = self.makeHawkExt()
+
+ # Serialize payload if given
+ if payload is not None:
+ payload = utils.dumpJson(payload)
+
+ # Do a loop of retries
+ retry = -1 # we plus first in the loop, and attempt 1 is retry 0
+ retries = self.options['maxRetries']
+ while retry < retries:
+ retry += 1
+ # if this isn't the first retry then we sleep
+ if retry > 0:
+ time.sleep(utils.calculateSleepTime(retry))
+ # Construct header
+ if self._hasCredentials():
+ sender = mohawk.Sender(
+ credentials={
+ 'id': self.options['credentials']['clientId'],
+ 'key': self.options['credentials']['accessToken'],
+ 'algorithm': 'sha256',
+ },
+ ext=hawkExt if hawkExt else {},
+ url=url,
+ content=payload if payload else '',
+ content_type='application/json' if payload else '',
+ method=method,
+ )
+
+ headers = {'Authorization': sender.request_header}
+ else:
+ log.debug('Not using hawk!')
+ headers = {}
+ if payload:
+ # Set header for JSON if payload is given, note that we serialize
+ # outside this loop.
+ headers['Content-Type'] = 'application/json'
+
+ log.debug('Making attempt %d', retry)
+ try:
+ response = utils.makeSingleHttpRequest(method, url, payload, headers)
+ except requests.exceptions.RequestException as rerr:
+ if retry < retries:
+ log.warn('Retrying because of: %s' % rerr)
+ continue
+ # raise a connection exception
+ raise exceptions.TaskclusterConnectionError(
+ "Failed to establish connection",
+ superExc=rerr
+ )
+
+ # Handle non 2xx status code and retry if possible
+ status = response.status_code
+ if status == 204:
+ return None
+
+ # Catch retryable errors and go to the beginning of the loop
+ # to do the retry
+ if 500 <= status and status < 600 and retry < retries:
+ log.warn('Retrying because of a %s status code' % status)
+ continue
+
+ # Throw errors for non-retryable errors
+ if status < 200 or status >= 300:
+ data = {}
+ try:
+ data = response.json()
+ except Exception:
+ pass # Ignore JSON errors in error messages
+ # Find error message
+ message = "Unknown Server Error"
+ if isinstance(data, dict):
+ message = data.get('message')
+ else:
+ if status == 401:
+ message = "Authentication Error"
+ elif status == 500:
+ message = "Internal Server Error"
+ # Raise TaskclusterAuthFailure if this is an auth issue
+ if status == 401:
+ raise exceptions.TaskclusterAuthFailure(
+ message,
+ status_code=status,
+ body=data,
+ superExc=None
+ )
+ # Raise TaskclusterRestFailure for all other issues
+ raise exceptions.TaskclusterRestFailure(
+ message,
+ status_code=status,
+ body=data,
+ superExc=None
+ )
+
+ # Try to load JSON
+ try:
+ return response.json()
+ except ValueError:
+ return {"response": response}
+
+ # This code-path should be unreachable
+ assert False, "Error from last retry should have been raised!"
+
+
+def createApiClient(name, api):
+ api = api['reference']
+
+ attributes = dict(
+ name=name,
+ __doc__=api.get('description'),
+ classOptions={},
+ funcinfo={},
+ )
+
+ # apply a default for apiVersion; this can be removed when all services
+ # have apiVersion
+ if 'apiVersion' not in api:
+ api['apiVersion'] = 'v1'
+
+ copiedOptions = ('exchangePrefix',)
+ for opt in copiedOptions:
+ if opt in api:
+ attributes['classOptions'][opt] = api[opt]
+
+ copiedProperties = ('serviceName', 'apiVersion')
+ for opt in copiedProperties:
+ if opt in api:
+ attributes[opt] = api[opt]
+
+ for entry in api['entries']:
+ if entry['type'] == 'function':
+ def addApiCall(e):
+ def apiCall(self, *args, **kwargs):
+ return self._makeApiCall(e, *args, **kwargs)
+ return apiCall
+ f = addApiCall(entry)
+
+ docStr = "Call the %s api's %s method. " % (name, entry['name'])
+
+ if entry['args'] and len(entry['args']) > 0:
+ docStr += "This method takes:\n\n"
+ docStr += '\n'.join(['- ``%s``' % x for x in entry['args']])
+ docStr += '\n\n'
+ else:
+ docStr += "This method takes no arguments. "
+
+ if 'input' in entry:
+ docStr += "This method takes input ``%s``. " % entry['input']
+
+ if 'output' in entry:
+ docStr += "This method gives output ``%s``" % entry['output']
+
+ docStr += '\n\nThis method does a ``%s`` to ``%s``.' % (
+ entry['method'].upper(), entry['route'])
+
+ f.__doc__ = docStr
+ attributes['funcinfo'][entry['name']] = entry
+
+ elif entry['type'] == 'topic-exchange':
+ def addTopicExchange(e):
+ def topicExchange(self, *args, **kwargs):
+ return self._makeTopicExchange(e, *args, **kwargs)
+ return topicExchange
+
+ f = addTopicExchange(entry)
+
+ docStr = 'Generate a routing key pattern for the %s exchange. ' % entry['exchange']
+ docStr += 'This method takes a given routing key as a string or a '
+ docStr += 'dictionary. For each given dictionary key, the corresponding '
+ docStr += 'routing key token takes its value. For routing key tokens '
+ docStr += 'which are not specified by the dictionary, the * or # character '
+ docStr += 'is used depending on whether or not the key allows multiple words.\n\n'
+ docStr += 'This exchange takes the following keys:\n\n'
+ docStr += '\n'.join(['- ``%s``' % x['name'] for x in entry['routingKey']])
+
+ f.__doc__ = docStr
+
+ # Add whichever function we created
+ f.__name__ = str(entry['name'])
+ attributes[entry['name']] = f
+
+ return type(utils.toStr(name), (BaseClient,), attributes)
+
+
+def createTemporaryCredentials(clientId, accessToken, start, expiry, scopes, name=None):
+ """ Create a set of temporary credentials
+
+ Callers should not apply any clock skew; clock drift is accounted for by
+ auth service.
+
+ clientId: the issuing clientId
+ accessToken: the issuer's accessToken
+ start: start time of credentials (datetime.datetime)
+ expiry: expiration time of credentials, (datetime.datetime)
+ scopes: list of scopes granted
+ name: credential name (optional)
+
+ Returns a dictionary in the form:
+ { 'clientId': str, 'accessToken: str, 'certificate': str}
+ """
+
+ for scope in scopes:
+ if not isinstance(scope, six.string_types):
+ raise exceptions.TaskclusterFailure('Scope must be string')
+
+ # Credentials can only be valid for 31 days. I hope that
+ # this is validated on the server somehow...
+
+ if expiry - start > datetime.timedelta(days=31):
+ raise exceptions.TaskclusterFailure('Only 31 days allowed')
+
+ # We multiply times by 1000 because the auth service is JS and as a result
+ # uses milliseconds instead of seconds
+ cert = dict(
+ version=1,
+ scopes=scopes,
+ start=calendar.timegm(start.utctimetuple()) * 1000,
+ expiry=calendar.timegm(expiry.utctimetuple()) * 1000,
+ seed=utils.slugId().encode('ascii') + utils.slugId().encode('ascii'),
+ )
+
+ # if this is a named temporary credential, include the issuer in the certificate
+ if name:
+ cert['issuer'] = utils.toStr(clientId)
+
+ sig = ['version:' + utils.toStr(cert['version'])]
+ if name:
+ sig.extend([
+ 'clientId:' + utils.toStr(name),
+ 'issuer:' + utils.toStr(clientId),
+ ])
+ sig.extend([
+ 'seed:' + utils.toStr(cert['seed']),
+ 'start:' + utils.toStr(cert['start']),
+ 'expiry:' + utils.toStr(cert['expiry']),
+ 'scopes:'
+ ] + scopes)
+ sigStr = '\n'.join(sig).encode()
+
+ if isinstance(accessToken, six.text_type):
+ accessToken = accessToken.encode()
+ sig = hmac.new(accessToken, sigStr, hashlib.sha256).digest()
+
+ cert['signature'] = utils.encodeStringForB64Header(sig)
+
+ newToken = hmac.new(accessToken, cert['seed'], hashlib.sha256).digest()
+ newToken = utils.makeB64UrlSafe(utils.encodeStringForB64Header(newToken)).replace(b'=', b'')
+
+ return {
+ 'clientId': name or clientId,
+ 'accessToken': newToken,
+ 'certificate': utils.dumpJson(cert),
+ }
+
+
+__all__ = [
+ 'createTemporaryCredentials',
+ 'config',
+ 'BaseClient',
+ 'createApiClient',
+]
diff --git a/third_party/python/taskcluster/taskcluster/download.py b/third_party/python/taskcluster/taskcluster/download.py
new file mode 100644
index 0000000000..5584398ea8
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/download.py
@@ -0,0 +1,94 @@
+"""
+Support for downloading objects from the object service, following best
+practices for that service.
+
+Downloaded data is written to a "writer" provided by a "writer factory". A
+writer has a `write` method which writes the entire passed buffer to storage.
+A writer factory is a callable which returns a fresh writer, ready to write the
+first byte of the object. When downloads are retried, the writer factory may
+be called more than once.
+
+This module provides several pre-defined writers and writer factories for
+common cases.
+"""
+import functools
+import six
+
+if six.PY2:
+ raise ImportError("download is only supported in Python 3")
+
+from .aio import download as aio_download
+from .aio.asyncutils import ensureCoro, runAsync
+
+
+def downloadToBuf(**kwargs):
+ """
+ Convenience method to download data to an in-memory buffer and return the
+ downloaded data. Arguments are the same as `download`, except that
+ `writerFactory` should not be supplied. Returns a tuple (buffer, contentType).
+ """
+ return runAsync(aio_download.downloadToBuf(**kwargs))
+
+
+def downloadToFile(file, **kwargs):
+ """
+ Convenience method to download data to a file object. The file must be
+ writeable, in binary mode, seekable (`f.seek`), and truncatable
+ (`f.truncate`) to support retries. Arguments are the same as `download`,
+ except that `writerFactory` should not be supplied. Returns the content-type.
+ """
+ return runAsync(aio_download.downloadToFile(file=file, **kwargs))
+
+
+def download(*, writerFactory, **kwargs):
+ """
+ Download the named object from the object service, using a writer returned
+ from `writerFactory` to write the data. The `maxRetries` parameter has
+ the same meaning as for service clients. The `objectService` parameter is
+ an instance of the Object class, configured with credentials for the
+ upload. Returns the content-type.
+ """
+ wrappedWriterFactory = _wrapSyncWriterFactory(writerFactory)
+ return runAsync(aio_download.download(writerFactory=wrappedWriterFactory, **kwargs))
+
+
+def downloadArtifactToBuf(**kwargs):
+ """
+ Convenience method to download an artifact to an in-memory buffer and return the
+ downloaded data. Arguments are the same as `downloadArtifact`, except that
+ `writerFactory` should not be supplied. Returns a tuple (buffer, contentType).
+ """
+ return runAsync(aio_download.downloadArtifactToBuf(**kwargs))
+
+
+def downloadArtifactToFile(file, **kwargs):
+ """
+ Convenience method to download an artifact to a file object. The file must be
+ writeable, in binary mode, seekable (`f.seek`), and truncatable
+ (`f.truncate`) to support retries. Arguments are the same as `downloadArtifac`,
+ except that `writerFactory` should not be supplied. Returns the content-type.
+ """
+ return runAsync(aio_download.downloadArtifactToFile(file=file, **kwargs))
+
+
+def downloadArtifact(*, writerFactory, **kwargs):
+ """
+ Download the named artifact with the appropriate storageType, using a writer returned
+ from `writerFactory` to write the data. The `maxRetries` parameter has
+ the same meaning as for service clients. The `queueService` parameter is
+ an instance of the Queue class, configured with credentials for the
+ download. Returns the content-type.
+ """
+ wrappedWriterFactory = _wrapSyncWriterFactory(writerFactory)
+ return runAsync(aio_download.downloadArtifact(writerFactory=wrappedWriterFactory, **kwargs))
+
+
+def _wrapSyncWriterFactory(writerFactory):
+ """Modify the reader returned by readerFactory to have an async read."""
+ @functools.wraps(writerFactory)
+ async def wrappedFactory():
+ writer = writerFactory()
+ writer.write = ensureCoro(writer.write)
+ return writer
+
+ return wrappedFactory
diff --git a/third_party/python/taskcluster/taskcluster/ec2manager.py b/third_party/python/taskcluster/taskcluster/ec2manager.py
new file mode 100644
index 0000000000..64ebd27aa0
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/ec2manager.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.ec2manager import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/exceptions.py b/third_party/python/taskcluster/taskcluster/exceptions.py
new file mode 100644
index 0000000000..bcfc9b1b64
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/exceptions.py
@@ -0,0 +1,43 @@
+""" Taskcluster client exceptions """
+
+
+class TaskclusterFailure(Exception):
+ """ Base exception for all Taskcluster client errors"""
+ pass
+
+
+class TaskclusterRestFailure(TaskclusterFailure):
+ """ Failures in the HTTP Rest API """
+ def __init__(self, msg, superExc, status_code=500, body={}):
+ TaskclusterFailure.__init__(self, msg)
+ self.superExc = superExc
+ self.status_code = status_code
+ self.body = body
+
+
+class TaskclusterConnectionError(TaskclusterFailure):
+ """ Error connecting to resource """
+ def __init__(self, msg, superExc):
+ TaskclusterFailure.__init__(self, msg, superExc)
+ self.superExc = superExc
+
+
+class TaskclusterAuthFailure(TaskclusterFailure):
+ """ Invalid Credentials """
+ def __init__(self, msg, superExc=None, status_code=500, body={}):
+ TaskclusterFailure.__init__(self, msg)
+ self.superExc = superExc
+ self.status_code = status_code
+ self.body = body
+
+
+class TaskclusterTopicExchangeFailure(TaskclusterFailure):
+ """ Error while creating a Topic Exchange routing key """
+ pass
+
+
+class TaskclusterArtifactError(TaskclusterFailure):
+ """Download of an 'error' Artifact"""
+ def __init__(self, message, reason):
+ TaskclusterFailure.__init__(self, message)
+ self.reason = reason
diff --git a/third_party/python/taskcluster/taskcluster/generated/__init__.py b/third_party/python/taskcluster/taskcluster/generated/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/__init__.py
diff --git a/third_party/python/taskcluster/taskcluster/generated/_client_importer.py b/third_party/python/taskcluster/taskcluster/generated/_client_importer.py
new file mode 100644
index 0000000000..fd6edec960
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/_client_importer.py
@@ -0,0 +1,20 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+from .auth import Auth # NOQA
+from .authevents import AuthEvents # NOQA
+from .github import Github # NOQA
+from .githubevents import GithubEvents # NOQA
+from .hooks import Hooks # NOQA
+from .hooksevents import HooksEvents # NOQA
+from .index import Index # NOQA
+from .notify import Notify # NOQA
+from .notifyevents import NotifyEvents # NOQA
+from .object import Object # NOQA
+from .purgecache import PurgeCache # NOQA
+from .queue import Queue # NOQA
+from .queueevents import QueueEvents # NOQA
+from .secrets import Secrets # NOQA
+from .workermanager import WorkerManager # NOQA
+from .workermanagerevents import WorkerManagerEvents # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/__init__.py b/third_party/python/taskcluster/taskcluster/generated/aio/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/__init__.py
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/_client_importer.py b/third_party/python/taskcluster/taskcluster/generated/aio/_client_importer.py
new file mode 100644
index 0000000000..fd6edec960
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/_client_importer.py
@@ -0,0 +1,20 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+from .auth import Auth # NOQA
+from .authevents import AuthEvents # NOQA
+from .github import Github # NOQA
+from .githubevents import GithubEvents # NOQA
+from .hooks import Hooks # NOQA
+from .hooksevents import HooksEvents # NOQA
+from .index import Index # NOQA
+from .notify import Notify # NOQA
+from .notifyevents import NotifyEvents # NOQA
+from .object import Object # NOQA
+from .purgecache import PurgeCache # NOQA
+from .queue import Queue # NOQA
+from .queueevents import QueueEvents # NOQA
+from .secrets import Secrets # NOQA
+from .workermanager import WorkerManager # NOQA
+from .workermanagerevents import WorkerManagerEvents # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/auth.py b/third_party/python/taskcluster/taskcluster/generated/aio/auth.py
new file mode 100644
index 0000000000..39752f3a89
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/auth.py
@@ -0,0 +1,781 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class Auth(AsyncBaseClient):
+ """
+ Authentication related API end-points for Taskcluster and related
+ services. These API end-points are of interest if you wish to:
+ * Authorize a request signed with Taskcluster credentials,
+ * Manage clients and roles,
+ * Inspect or audit clients and roles,
+ * Gain access to various services guarded by this API.
+
+ """
+
+ classOptions = {
+ }
+ serviceName = 'auth'
+ apiVersion = 'v1'
+
+ async def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ async def listClients(self, *args, **kwargs):
+ """
+ List Clients
+
+ Get a list of all clients. With `prefix`, only clients for which
+ it is a prefix of the clientId are returned.
+
+ By default this end-point will try to return up to 1000 clients in one
+ request. But it **may return less, even none**.
+ It may also return a `continuationToken` even though there are no more
+ results. However, you can only be sure to have seen all results if you
+ keep calling `listClients` with the last `continuationToken` until you
+ get a result without a `continuationToken`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listClients"], *args, **kwargs)
+
+ async def client(self, *args, **kwargs):
+ """
+ Get Client
+
+ Get information about a single client.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["client"], *args, **kwargs)
+
+ async def createClient(self, *args, **kwargs):
+ """
+ Create Client
+
+ Create a new client and get the `accessToken` for this client.
+ You should store the `accessToken` from this API call as there is no
+ other way to retrieve it.
+
+ If you loose the `accessToken` you can call `resetAccessToken` to reset
+ it, and a new `accessToken` will be returned, but you cannot retrieve the
+ current `accessToken`.
+
+ If a client with the same `clientId` already exists this operation will
+ fail. Use `updateClient` if you wish to update an existing client.
+
+ The caller's scopes must satisfy `scopes`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["createClient"], *args, **kwargs)
+
+ async def resetAccessToken(self, *args, **kwargs):
+ """
+ Reset `accessToken`
+
+ Reset a clients `accessToken`, this will revoke the existing
+ `accessToken`, generate a new `accessToken` and return it from this
+ call.
+
+ There is no way to retrieve an existing `accessToken`, so if you loose it
+ you must reset the accessToken to acquire it again.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["resetAccessToken"], *args, **kwargs)
+
+ async def updateClient(self, *args, **kwargs):
+ """
+ Update Client
+
+ Update an exisiting client. The `clientId` and `accessToken` cannot be
+ updated, but `scopes` can be modified. The caller's scopes must
+ satisfy all scopes being added to the client in the update operation.
+ If no scopes are given in the request, the client's scopes remain
+ unchanged
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["updateClient"], *args, **kwargs)
+
+ async def enableClient(self, *args, **kwargs):
+ """
+ Enable Client
+
+ Enable a client that was disabled with `disableClient`. If the client
+ is already enabled, this does nothing.
+
+ This is typically used by identity providers to re-enable clients that
+ had been disabled when the corresponding identity's scopes changed.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["enableClient"], *args, **kwargs)
+
+ async def disableClient(self, *args, **kwargs):
+ """
+ Disable Client
+
+ Disable a client. If the client is already disabled, this does nothing.
+
+ This is typically used by identity providers to disable clients when the
+ corresponding identity's scopes no longer satisfy the client's scopes.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["disableClient"], *args, **kwargs)
+
+ async def deleteClient(self, *args, **kwargs):
+ """
+ Delete Client
+
+ Delete a client, please note that any roles related to this client must
+ be deleted independently.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["deleteClient"], *args, **kwargs)
+
+ async def listRoles(self, *args, **kwargs):
+ """
+ List Roles (no pagination)
+
+ Get a list of all roles. Each role object also includes the list of
+ scopes it expands to. This always returns all roles in a single HTTP
+ request.
+
+ To get paginated results, use `listRoles2`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listRoles"], *args, **kwargs)
+
+ async def listRoles2(self, *args, **kwargs):
+ """
+ List Roles
+
+ Get a list of all roles. Each role object also includes the list of
+ scopes it expands to. This is similar to `listRoles` but differs in the
+ format of the response.
+
+ If no limit is given, all roles are returned. Since this
+ list may become long, callers can use the `limit` and `continuationToken`
+ query arguments to page through the responses.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listRoles2"], *args, **kwargs)
+
+ async def listRoleIds(self, *args, **kwargs):
+ """
+ List Role IDs
+
+ Get a list of all role IDs.
+
+ If no limit is given, the roleIds of all roles are returned. Since this
+ list may become long, callers can use the `limit` and `continuationToken`
+ query arguments to page through the responses.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listRoleIds"], *args, **kwargs)
+
+ async def role(self, *args, **kwargs):
+ """
+ Get Role
+
+ Get information about a single role, including the set of scopes that the
+ role expands to.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["role"], *args, **kwargs)
+
+ async def createRole(self, *args, **kwargs):
+ """
+ Create Role
+
+ Create a new role.
+
+ The caller's scopes must satisfy the new role's scopes.
+
+ If there already exists a role with the same `roleId` this operation
+ will fail. Use `updateRole` to modify an existing role.
+
+ Creation of a role that will generate an infinite expansion will result
+ in an error response.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["createRole"], *args, **kwargs)
+
+ async def updateRole(self, *args, **kwargs):
+ """
+ Update Role
+
+ Update an existing role.
+
+ The caller's scopes must satisfy all of the new scopes being added, but
+ need not satisfy all of the role's existing scopes.
+
+ An update of a role that will generate an infinite expansion will result
+ in an error response.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["updateRole"], *args, **kwargs)
+
+ async def deleteRole(self, *args, **kwargs):
+ """
+ Delete Role
+
+ Delete a role. This operation will succeed regardless of whether or not
+ the role exists.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["deleteRole"], *args, **kwargs)
+
+ async def expandScopes(self, *args, **kwargs):
+ """
+ Expand Scopes
+
+ Return an expanded copy of the given scopeset, with scopes implied by any
+ roles included.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["expandScopes"], *args, **kwargs)
+
+ async def currentScopes(self, *args, **kwargs):
+ """
+ Get Current Scopes
+
+ Return the expanded scopes available in the request, taking into account all sources
+ of scopes and scope restrictions (temporary credentials, assumeScopes, client scopes,
+ and roles).
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["currentScopes"], *args, **kwargs)
+
+ async def awsS3Credentials(self, *args, **kwargs):
+ """
+ Get Temporary Read/Write Credentials S3
+
+ Get temporary AWS credentials for `read-write` or `read-only` access to
+ a given `bucket` and `prefix` within that bucket.
+ The `level` parameter can be `read-write` or `read-only` and determines
+ which type of credentials are returned. Please note that the `level`
+ parameter is required in the scope guarding access. The bucket name must
+ not contain `.`, as recommended by Amazon.
+
+ This method can only allow access to a whitelisted set of buckets, as configured
+ in the Taskcluster deployment
+
+ The credentials are set to expire after an hour, but this behavior is
+ subject to change. Hence, you should always read the `expires` property
+ from the response, if you intend to maintain active credentials in your
+ application.
+
+ Please note that your `prefix` may not start with slash `/`. Such a prefix
+ is allowed on S3, but we forbid it here to discourage bad behavior.
+
+ Also note that if your `prefix` doesn't end in a slash `/`, the STS
+ credentials may allow access to unexpected keys, as S3 does not treat
+ slashes specially. For example, a prefix of `my-folder` will allow
+ access to `my-folder/file.txt` as expected, but also to `my-folder.txt`,
+ which may not be intended.
+
+ Finally, note that the `PutObjectAcl` call is not allowed. Passing a canned
+ ACL other than `private` to `PutObject` is treated as a `PutObjectAcl` call, and
+ will result in an access-denied error from AWS. This limitation is due to a
+ security flaw in Amazon S3 which might otherwise allow indefinite access to
+ uploaded objects.
+
+ **EC2 metadata compatibility**, if the querystring parameter
+ `?format=iam-role-compat` is given, the response will be compatible
+ with the JSON exposed by the EC2 metadata service. This aims to ease
+ compatibility for libraries and tools built to auto-refresh credentials.
+ For details on the format returned by EC2 metadata service see:
+ [EC2 User Guide](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials).
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["awsS3Credentials"], *args, **kwargs)
+
+ async def azureAccounts(self, *args, **kwargs):
+ """
+ List Accounts Managed by Auth
+
+ Retrieve a list of all Azure accounts managed by Taskcluster Auth.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["azureAccounts"], *args, **kwargs)
+
+ async def azureTables(self, *args, **kwargs):
+ """
+ List Tables in an Account Managed by Auth
+
+ Retrieve a list of all tables in an account.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["azureTables"], *args, **kwargs)
+
+ async def azureTableSAS(self, *args, **kwargs):
+ """
+ Get Shared-Access-Signature for Azure Table
+
+ Get a shared access signature (SAS) string for use with a specific Azure
+ Table Storage table.
+
+ The `level` parameter can be `read-write` or `read-only` and determines
+ which type of credentials are returned. If level is read-write, it will create the
+ table if it doesn't already exist.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["azureTableSAS"], *args, **kwargs)
+
+ async def azureContainers(self, *args, **kwargs):
+ """
+ List containers in an Account Managed by Auth
+
+ Retrieve a list of all containers in an account.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["azureContainers"], *args, **kwargs)
+
+ async def azureContainerSAS(self, *args, **kwargs):
+ """
+ Get Shared-Access-Signature for Azure Container
+
+ Get a shared access signature (SAS) string for use with a specific Azure
+ Blob Storage container.
+
+ The `level` parameter can be `read-write` or `read-only` and determines
+ which type of credentials are returned. If level is read-write, it will create the
+ container if it doesn't already exist.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["azureContainerSAS"], *args, **kwargs)
+
+ async def sentryDSN(self, *args, **kwargs):
+ """
+ Get DSN for Sentry Project
+
+ Get temporary DSN (access credentials) for a sentry project.
+ The credentials returned can be used with any Sentry client for up to
+ 24 hours, after which the credentials will be automatically disabled.
+
+ If the project doesn't exist it will be created, and assigned to the
+ initial team configured for this component. Contact a Sentry admin
+ to have the project transferred to a team you have access to if needed
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["sentryDSN"], *args, **kwargs)
+
+ async def websocktunnelToken(self, *args, **kwargs):
+ """
+ Get a client token for the Websocktunnel service
+
+ Get a temporary token suitable for use connecting to a
+ [websocktunnel](https://github.com/taskcluster/taskcluster/tree/main/tools/websocktunnel) server.
+
+ The resulting token will only be accepted by servers with a matching audience
+ value. Reaching such a server is the callers responsibility. In general,
+ a server URL or set of URLs should be provided to the caller as configuration
+ along with the audience value.
+
+ The token is valid for a limited time (on the scale of hours). Callers should
+ refresh it before expiration.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["websocktunnelToken"], *args, **kwargs)
+
+ async def gcpCredentials(self, *args, **kwargs):
+ """
+ Get Temporary GCP Credentials
+
+ Get temporary GCP credentials for the given serviceAccount in the given project.
+
+ Only preconfigured projects and serviceAccounts are allowed, as defined in the
+ deployment of the Taskcluster services.
+
+ The credentials are set to expire after an hour, but this behavior is
+ subject to change. Hence, you should always read the `expires` property
+ from the response, if you intend to maintain active credentials in your
+ application.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["gcpCredentials"], *args, **kwargs)
+
+ async def authenticateHawk(self, *args, **kwargs):
+ """
+ Authenticate Hawk Request
+
+ Validate the request signature given on input and return list of scopes
+ that the authenticating client has.
+
+ This method is used by other services that wish rely on Taskcluster
+ credentials for authentication. This way we can use Hawk without having
+ the secret credentials leave this service.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["authenticateHawk"], *args, **kwargs)
+
+ async def testAuthenticate(self, *args, **kwargs):
+ """
+ Test Authentication
+
+ Utility method to test client implementations of Taskcluster
+ authentication.
+
+ Rather than using real credentials, this endpoint accepts requests with
+ clientId `tester` and accessToken `no-secret`. That client's scopes are
+ based on `clientScopes` in the request body.
+
+ The request is validated, with any certificate, authorizedScopes, etc.
+ applied, and the resulting scopes are checked against `requiredScopes`
+ from the request body. On success, the response contains the clientId
+ and scopes as seen by the API method.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["testAuthenticate"], *args, **kwargs)
+
+ async def testAuthenticateGet(self, *args, **kwargs):
+ """
+ Test Authentication (GET)
+
+ Utility method similar to `testAuthenticate`, but with the GET method,
+ so it can be used with signed URLs (bewits).
+
+ Rather than using real credentials, this endpoint accepts requests with
+ clientId `tester` and accessToken `no-secret`. That client's scopes are
+ `['test:*', 'auth:create-client:test:*']`. The call fails if the
+ `test:authenticate-get` scope is not available.
+
+ The request is validated, with any certificate, authorizedScopes, etc.
+ applied, and the resulting scopes are checked, just like any API call.
+ On success, the response contains the clientId and scopes as seen by
+ the API method.
+
+ This method may later be extended to allow specification of client and
+ required scopes via query arguments.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["testAuthenticateGet"], *args, **kwargs)
+
+ funcinfo = {
+ "authenticateHawk": {
+ 'args': [],
+ 'input': 'v1/authenticate-hawk-request.json#',
+ 'method': 'post',
+ 'name': 'authenticateHawk',
+ 'output': 'v1/authenticate-hawk-response.json#',
+ 'route': '/authenticate-hawk',
+ 'stability': 'stable',
+ },
+ "awsS3Credentials": {
+ 'args': ['level', 'bucket', 'prefix'],
+ 'method': 'get',
+ 'name': 'awsS3Credentials',
+ 'output': 'v1/aws-s3-credentials-response.json#',
+ 'query': ['format'],
+ 'route': '/aws/s3/<level>/<bucket>/<prefix>',
+ 'stability': 'stable',
+ },
+ "azureAccounts": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'azureAccounts',
+ 'output': 'v1/azure-account-list-response.json#',
+ 'route': '/azure/accounts',
+ 'stability': 'stable',
+ },
+ "azureContainerSAS": {
+ 'args': ['account', 'container', 'level'],
+ 'method': 'get',
+ 'name': 'azureContainerSAS',
+ 'output': 'v1/azure-container-response.json#',
+ 'route': '/azure/<account>/containers/<container>/<level>',
+ 'stability': 'stable',
+ },
+ "azureContainers": {
+ 'args': ['account'],
+ 'method': 'get',
+ 'name': 'azureContainers',
+ 'output': 'v1/azure-container-list-response.json#',
+ 'query': ['continuationToken'],
+ 'route': '/azure/<account>/containers',
+ 'stability': 'stable',
+ },
+ "azureTableSAS": {
+ 'args': ['account', 'table', 'level'],
+ 'method': 'get',
+ 'name': 'azureTableSAS',
+ 'output': 'v1/azure-table-access-response.json#',
+ 'route': '/azure/<account>/table/<table>/<level>',
+ 'stability': 'stable',
+ },
+ "azureTables": {
+ 'args': ['account'],
+ 'method': 'get',
+ 'name': 'azureTables',
+ 'output': 'v1/azure-table-list-response.json#',
+ 'query': ['continuationToken'],
+ 'route': '/azure/<account>/tables',
+ 'stability': 'stable',
+ },
+ "client": {
+ 'args': ['clientId'],
+ 'method': 'get',
+ 'name': 'client',
+ 'output': 'v1/get-client-response.json#',
+ 'route': '/clients/<clientId>',
+ 'stability': 'stable',
+ },
+ "createClient": {
+ 'args': ['clientId'],
+ 'input': 'v1/create-client-request.json#',
+ 'method': 'put',
+ 'name': 'createClient',
+ 'output': 'v1/create-client-response.json#',
+ 'route': '/clients/<clientId>',
+ 'stability': 'stable',
+ },
+ "createRole": {
+ 'args': ['roleId'],
+ 'input': 'v1/create-role-request.json#',
+ 'method': 'put',
+ 'name': 'createRole',
+ 'output': 'v1/get-role-response.json#',
+ 'route': '/roles/<roleId>',
+ 'stability': 'stable',
+ },
+ "currentScopes": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'currentScopes',
+ 'output': 'v1/scopeset.json#',
+ 'route': '/scopes/current',
+ 'stability': 'stable',
+ },
+ "deleteClient": {
+ 'args': ['clientId'],
+ 'method': 'delete',
+ 'name': 'deleteClient',
+ 'route': '/clients/<clientId>',
+ 'stability': 'stable',
+ },
+ "deleteRole": {
+ 'args': ['roleId'],
+ 'method': 'delete',
+ 'name': 'deleteRole',
+ 'route': '/roles/<roleId>',
+ 'stability': 'stable',
+ },
+ "disableClient": {
+ 'args': ['clientId'],
+ 'method': 'post',
+ 'name': 'disableClient',
+ 'output': 'v1/get-client-response.json#',
+ 'route': '/clients/<clientId>/disable',
+ 'stability': 'stable',
+ },
+ "enableClient": {
+ 'args': ['clientId'],
+ 'method': 'post',
+ 'name': 'enableClient',
+ 'output': 'v1/get-client-response.json#',
+ 'route': '/clients/<clientId>/enable',
+ 'stability': 'stable',
+ },
+ "expandScopes": {
+ 'args': [],
+ 'input': 'v1/scopeset.json#',
+ 'method': 'post',
+ 'name': 'expandScopes',
+ 'output': 'v1/scopeset.json#',
+ 'route': '/scopes/expand',
+ 'stability': 'stable',
+ },
+ "gcpCredentials": {
+ 'args': ['projectId', 'serviceAccount'],
+ 'method': 'get',
+ 'name': 'gcpCredentials',
+ 'output': 'v1/gcp-credentials-response.json#',
+ 'route': '/gcp/credentials/<projectId>/<serviceAccount>',
+ 'stability': 'stable',
+ },
+ "listClients": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listClients',
+ 'output': 'v1/list-clients-response.json#',
+ 'query': ['prefix', 'continuationToken', 'limit'],
+ 'route': '/clients/',
+ 'stability': 'stable',
+ },
+ "listRoleIds": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listRoleIds',
+ 'output': 'v1/list-role-ids-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/roleids/',
+ 'stability': 'stable',
+ },
+ "listRoles": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listRoles',
+ 'output': 'v1/list-roles-response.json#',
+ 'route': '/roles/',
+ 'stability': 'stable',
+ },
+ "listRoles2": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listRoles2',
+ 'output': 'v1/list-roles2-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/roles2/',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "resetAccessToken": {
+ 'args': ['clientId'],
+ 'method': 'post',
+ 'name': 'resetAccessToken',
+ 'output': 'v1/create-client-response.json#',
+ 'route': '/clients/<clientId>/reset',
+ 'stability': 'stable',
+ },
+ "role": {
+ 'args': ['roleId'],
+ 'method': 'get',
+ 'name': 'role',
+ 'output': 'v1/get-role-response.json#',
+ 'route': '/roles/<roleId>',
+ 'stability': 'stable',
+ },
+ "sentryDSN": {
+ 'args': ['project'],
+ 'method': 'get',
+ 'name': 'sentryDSN',
+ 'output': 'v1/sentry-dsn-response.json#',
+ 'route': '/sentry/<project>/dsn',
+ 'stability': 'stable',
+ },
+ "testAuthenticate": {
+ 'args': [],
+ 'input': 'v1/test-authenticate-request.json#',
+ 'method': 'post',
+ 'name': 'testAuthenticate',
+ 'output': 'v1/test-authenticate-response.json#',
+ 'route': '/test-authenticate',
+ 'stability': 'stable',
+ },
+ "testAuthenticateGet": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'testAuthenticateGet',
+ 'output': 'v1/test-authenticate-response.json#',
+ 'route': '/test-authenticate-get/',
+ 'stability': 'stable',
+ },
+ "updateClient": {
+ 'args': ['clientId'],
+ 'input': 'v1/create-client-request.json#',
+ 'method': 'post',
+ 'name': 'updateClient',
+ 'output': 'v1/get-client-response.json#',
+ 'route': '/clients/<clientId>',
+ 'stability': 'stable',
+ },
+ "updateRole": {
+ 'args': ['roleId'],
+ 'input': 'v1/create-role-request.json#',
+ 'method': 'post',
+ 'name': 'updateRole',
+ 'output': 'v1/get-role-response.json#',
+ 'route': '/roles/<roleId>',
+ 'stability': 'stable',
+ },
+ "websocktunnelToken": {
+ 'args': ['wstAudience', 'wstClient'],
+ 'method': 'get',
+ 'name': 'websocktunnelToken',
+ 'output': 'v1/websocktunnel-token-response.json#',
+ 'route': '/websocktunnel/<wstAudience>/<wstClient>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Auth']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/authevents.py b/third_party/python/taskcluster/taskcluster/generated/aio/authevents.py
new file mode 100644
index 0000000000..6bd552a147
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/authevents.py
@@ -0,0 +1,180 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class AuthEvents(AsyncBaseClient):
+ """
+ The auth service is responsible for storing credentials, managing
+ assignment of scopes, and validation of request signatures from other
+ services.
+
+ These exchanges provides notifications when credentials or roles are
+ updated. This is mostly so that multiple instances of the auth service
+ can purge their caches and synchronize state. But you are of course
+ welcome to use these for other purposes, monitoring changes for example.
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-auth/v1/",
+ }
+ serviceName = 'auth'
+ apiVersion = 'v1'
+
+ def clientCreated(self, *args, **kwargs):
+ """
+ Client Created Messages
+
+ Message that a new client has been created.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'client-created',
+ 'name': 'clientCreated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/client-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def clientUpdated(self, *args, **kwargs):
+ """
+ Client Updated Messages
+
+ Message that a new client has been updated.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'client-updated',
+ 'name': 'clientUpdated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/client-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def clientDeleted(self, *args, **kwargs):
+ """
+ Client Deleted Messages
+
+ Message that a new client has been deleted.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'client-deleted',
+ 'name': 'clientDeleted',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/client-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def roleCreated(self, *args, **kwargs):
+ """
+ Role Created Messages
+
+ Message that a new role has been created.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'role-created',
+ 'name': 'roleCreated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/role-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def roleUpdated(self, *args, **kwargs):
+ """
+ Role Updated Messages
+
+ Message that a new role has been updated.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'role-updated',
+ 'name': 'roleUpdated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/role-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def roleDeleted(self, *args, **kwargs):
+ """
+ Role Deleted Messages
+
+ Message that a new role has been deleted.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'role-deleted',
+ 'name': 'roleDeleted',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/role-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'AuthEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/github.py b/third_party/python/taskcluster/taskcluster/generated/aio/github.py
new file mode 100644
index 0000000000..94f19770e5
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/github.py
@@ -0,0 +1,197 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class Github(AsyncBaseClient):
+ """
+ The github service is responsible for creating tasks in response
+ to GitHub events, and posting results to the GitHub UI.
+
+ This document describes the API end-point for consuming GitHub
+ web hooks, as well as some useful consumer APIs.
+
+ When Github forbids an action, this service returns an HTTP 403
+ with code ForbiddenByGithub.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'github'
+ apiVersion = 'v1'
+
+ async def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ async def githubWebHookConsumer(self, *args, **kwargs):
+ """
+ Consume GitHub WebHook
+
+ Capture a GitHub event and publish it via pulse, if it's a push,
+ release or pull request.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["githubWebHookConsumer"], *args, **kwargs)
+
+ async def builds(self, *args, **kwargs):
+ """
+ List of Builds
+
+ A paginated list of builds that have been run in
+ Taskcluster. Can be filtered on various git-specific
+ fields.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["builds"], *args, **kwargs)
+
+ async def badge(self, *args, **kwargs):
+ """
+ Latest Build Status Badge
+
+ Checks the status of the latest build of a given branch
+ and returns corresponding badge svg.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["badge"], *args, **kwargs)
+
+ async def repository(self, *args, **kwargs):
+ """
+ Get Repository Info
+
+ Returns any repository metadata that is
+ useful within Taskcluster related services.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["repository"], *args, **kwargs)
+
+ async def latest(self, *args, **kwargs):
+ """
+ Latest Status for Branch
+
+ For a given branch of a repository, this will always point
+ to a status page for the most recent task triggered by that
+ branch.
+
+ Note: This is a redirect rather than a direct link.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["latest"], *args, **kwargs)
+
+ async def createStatus(self, *args, **kwargs):
+ """
+ Post a status against a given changeset
+
+ For a given changeset (SHA) of a repository, this will attach a "commit status"
+ on github. These statuses are links displayed next to each revision.
+ The status is either OK (green check) or FAILURE (red cross),
+ made of a custom title and link.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["createStatus"], *args, **kwargs)
+
+ async def createComment(self, *args, **kwargs):
+ """
+ Post a comment on a given GitHub Issue or Pull Request
+
+ For a given Issue or Pull Request of a repository, this will write a new message.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["createComment"], *args, **kwargs)
+
+ funcinfo = {
+ "badge": {
+ 'args': ['owner', 'repo', 'branch'],
+ 'method': 'get',
+ 'name': 'badge',
+ 'route': '/repository/<owner>/<repo>/<branch>/badge.svg',
+ 'stability': 'experimental',
+ },
+ "builds": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'builds',
+ 'output': 'v1/build-list.json#',
+ 'query': ['continuationToken', 'limit', 'organization', 'repository', 'sha'],
+ 'route': '/builds',
+ 'stability': 'stable',
+ },
+ "createComment": {
+ 'args': ['owner', 'repo', 'number'],
+ 'input': 'v1/create-comment.json#',
+ 'method': 'post',
+ 'name': 'createComment',
+ 'route': '/repository/<owner>/<repo>/issues/<number>/comments',
+ 'stability': 'stable',
+ },
+ "createStatus": {
+ 'args': ['owner', 'repo', 'sha'],
+ 'input': 'v1/create-status.json#',
+ 'method': 'post',
+ 'name': 'createStatus',
+ 'route': '/repository/<owner>/<repo>/statuses/<sha>',
+ 'stability': 'experimental',
+ },
+ "githubWebHookConsumer": {
+ 'args': [],
+ 'method': 'post',
+ 'name': 'githubWebHookConsumer',
+ 'route': '/github',
+ 'stability': 'stable',
+ },
+ "latest": {
+ 'args': ['owner', 'repo', 'branch'],
+ 'method': 'get',
+ 'name': 'latest',
+ 'route': '/repository/<owner>/<repo>/<branch>/latest',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "repository": {
+ 'args': ['owner', 'repo'],
+ 'method': 'get',
+ 'name': 'repository',
+ 'output': 'v1/repository.json#',
+ 'route': '/repository/<owner>/<repo>',
+ 'stability': 'experimental',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Github']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/githubevents.py b/third_party/python/taskcluster/taskcluster/generated/aio/githubevents.py
new file mode 100644
index 0000000000..a70180d78f
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/githubevents.py
@@ -0,0 +1,199 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class GithubEvents(AsyncBaseClient):
+ """
+ The github service publishes a pulse
+ message for supported github events, translating Github webhook
+ events into pulse messages.
+
+ This document describes the exchange offered by the taskcluster
+ github service
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-github/v1/",
+ }
+ serviceName = 'github'
+ apiVersion = 'v1'
+
+ def pullRequest(self, *args, **kwargs):
+ """
+ GitHub Pull Request Event
+
+ When a GitHub pull request event is posted it will be broadcast on this
+ exchange with the designated `organization` and `repository`
+ in the routing-key along with event specific metadata in the payload.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
+
+ * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+
+ * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+
+ * action: The GitHub `action` which triggered an event. See for possible values see the payload actions property. (required)
+ """
+
+ ref = {
+ 'exchange': 'pull-request',
+ 'name': 'pullRequest',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'organization',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'repository',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'action',
+ },
+ ],
+ 'schema': 'v1/github-pull-request-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def push(self, *args, **kwargs):
+ """
+ GitHub push Event
+
+ When a GitHub push event is posted it will be broadcast on this
+ exchange with the designated `organization` and `repository`
+ in the routing-key along with event specific metadata in the payload.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
+
+ * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+
+ * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+ """
+
+ ref = {
+ 'exchange': 'push',
+ 'name': 'push',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'organization',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'repository',
+ },
+ ],
+ 'schema': 'v1/github-push-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def release(self, *args, **kwargs):
+ """
+ GitHub release Event
+
+ When a GitHub release event is posted it will be broadcast on this
+ exchange with the designated `organization` and `repository`
+ in the routing-key along with event specific metadata in the payload.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
+
+ * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+
+ * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+ """
+
+ ref = {
+ 'exchange': 'release',
+ 'name': 'release',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'organization',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'repository',
+ },
+ ],
+ 'schema': 'v1/github-release-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskGroupCreationRequested(self, *args, **kwargs):
+ """
+ tc-gh requested the Queue service to create all the tasks in a group
+
+ supposed to signal that taskCreate API has been called for every task in the task group
+ for this particular repo and this particular organization
+ currently used for creating initial status indicators in GitHub UI using Statuses API.
+ This particular exchange can also be bound to RabbitMQ queues by custom routes - for that,
+ Pass in the array of routes as a second argument to the publish method. Currently, we do
+ use the statuses routes to bind the handler that creates the initial status.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
+
+ * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+
+ * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+ """
+
+ ref = {
+ 'exchange': 'task-group-creation-requested',
+ 'name': 'taskGroupCreationRequested',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'organization',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'repository',
+ },
+ ],
+ 'schema': 'v1/task-group-creation-requested.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'GithubEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/hooks.py b/third_party/python/taskcluster/taskcluster/generated/aio/hooks.py
new file mode 100644
index 0000000000..59abb7a938
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/hooks.py
@@ -0,0 +1,300 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class Hooks(AsyncBaseClient):
+ """
+ The hooks service provides a mechanism for creating tasks in response to events.
+
+ """
+
+ classOptions = {
+ }
+ serviceName = 'hooks'
+ apiVersion = 'v1'
+
+ async def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ async def listHookGroups(self, *args, **kwargs):
+ """
+ List hook groups
+
+ This endpoint will return a list of all hook groups with at least one hook.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listHookGroups"], *args, **kwargs)
+
+ async def listHooks(self, *args, **kwargs):
+ """
+ List hooks in a given group
+
+ This endpoint will return a list of all the hook definitions within a
+ given hook group.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listHooks"], *args, **kwargs)
+
+ async def hook(self, *args, **kwargs):
+ """
+ Get hook definition
+
+ This endpoint will return the hook definition for the given `hookGroupId`
+ and hookId.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["hook"], *args, **kwargs)
+
+ async def getHookStatus(self, *args, **kwargs):
+ """
+ Get hook status
+
+ This endpoint will return the current status of the hook. This represents a
+ snapshot in time and may vary from one call to the next.
+
+ This method is deprecated in favor of listLastFires.
+
+ This method is ``deprecated``
+ """
+
+ return await self._makeApiCall(self.funcinfo["getHookStatus"], *args, **kwargs)
+
+ async def createHook(self, *args, **kwargs):
+ """
+ Create a hook
+
+ This endpoint will create a new hook.
+
+ The caller's credentials must include the role that will be used to
+ create the task. That role must satisfy task.scopes as well as the
+ necessary scopes to add the task to the queue.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["createHook"], *args, **kwargs)
+
+ async def updateHook(self, *args, **kwargs):
+ """
+ Update a hook
+
+ This endpoint will update an existing hook. All fields except
+ `hookGroupId` and `hookId` can be modified.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["updateHook"], *args, **kwargs)
+
+ async def removeHook(self, *args, **kwargs):
+ """
+ Delete a hook
+
+ This endpoint will remove a hook definition.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["removeHook"], *args, **kwargs)
+
+ async def triggerHook(self, *args, **kwargs):
+ """
+ Trigger a hook
+
+ This endpoint will trigger the creation of a task from a hook definition.
+
+ The HTTP payload must match the hooks `triggerSchema`. If it does, it is
+ provided as the `payload` property of the JSON-e context used to render the
+ task template.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["triggerHook"], *args, **kwargs)
+
+ async def getTriggerToken(self, *args, **kwargs):
+ """
+ Get a trigger token
+
+ Retrieve a unique secret token for triggering the specified hook. This
+ token can be deactivated with `resetTriggerToken`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["getTriggerToken"], *args, **kwargs)
+
+ async def resetTriggerToken(self, *args, **kwargs):
+ """
+ Reset a trigger token
+
+ Reset the token for triggering a given hook. This invalidates token that
+ may have been issued via getTriggerToken with a new token.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["resetTriggerToken"], *args, **kwargs)
+
+ async def triggerHookWithToken(self, *args, **kwargs):
+ """
+ Trigger a hook with a token
+
+ This endpoint triggers a defined hook with a valid token.
+
+ The HTTP payload must match the hooks `triggerSchema`. If it does, it is
+ provided as the `payload` property of the JSON-e context used to render the
+ task template.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["triggerHookWithToken"], *args, **kwargs)
+
+ async def listLastFires(self, *args, **kwargs):
+ """
+ Get information about recent hook fires
+
+ This endpoint will return information about the the last few times this hook has been
+ fired, including whether the hook was fired successfully or not
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listLastFires"], *args, **kwargs)
+
+ funcinfo = {
+ "createHook": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'input': 'v1/create-hook-request.json#',
+ 'method': 'put',
+ 'name': 'createHook',
+ 'output': 'v1/hook-definition.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>',
+ 'stability': 'stable',
+ },
+ "getHookStatus": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'get',
+ 'name': 'getHookStatus',
+ 'output': 'v1/hook-status.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/status',
+ 'stability': 'deprecated',
+ },
+ "getTriggerToken": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'get',
+ 'name': 'getTriggerToken',
+ 'output': 'v1/trigger-token-response.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/token',
+ 'stability': 'stable',
+ },
+ "hook": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'get',
+ 'name': 'hook',
+ 'output': 'v1/hook-definition.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>',
+ 'stability': 'stable',
+ },
+ "listHookGroups": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listHookGroups',
+ 'output': 'v1/list-hook-groups-response.json#',
+ 'route': '/hooks',
+ 'stability': 'stable',
+ },
+ "listHooks": {
+ 'args': ['hookGroupId'],
+ 'method': 'get',
+ 'name': 'listHooks',
+ 'output': 'v1/list-hooks-response.json#',
+ 'route': '/hooks/<hookGroupId>',
+ 'stability': 'stable',
+ },
+ "listLastFires": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'get',
+ 'name': 'listLastFires',
+ 'output': 'v1/list-lastFires-response.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/last-fires',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "removeHook": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'delete',
+ 'name': 'removeHook',
+ 'route': '/hooks/<hookGroupId>/<hookId>',
+ 'stability': 'stable',
+ },
+ "resetTriggerToken": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'post',
+ 'name': 'resetTriggerToken',
+ 'output': 'v1/trigger-token-response.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/token',
+ 'stability': 'stable',
+ },
+ "triggerHook": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'input': 'v1/trigger-hook.json#',
+ 'method': 'post',
+ 'name': 'triggerHook',
+ 'output': 'v1/trigger-hook-response.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/trigger',
+ 'stability': 'stable',
+ },
+ "triggerHookWithToken": {
+ 'args': ['hookGroupId', 'hookId', 'token'],
+ 'input': 'v1/trigger-hook.json#',
+ 'method': 'post',
+ 'name': 'triggerHookWithToken',
+ 'output': 'v1/trigger-hook-response.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/trigger/<token>',
+ 'stability': 'stable',
+ },
+ "updateHook": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'input': 'v1/create-hook-request.json#',
+ 'method': 'post',
+ 'name': 'updateHook',
+ 'output': 'v1/hook-definition.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Hooks']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/hooksevents.py b/third_party/python/taskcluster/taskcluster/generated/aio/hooksevents.py
new file mode 100644
index 0000000000..0e841a256d
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/hooksevents.py
@@ -0,0 +1,101 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class HooksEvents(AsyncBaseClient):
+ """
+ The hooks service is responsible for creating tasks at specific times orin . response to webhooks and API calls.Using this exchange allows us tomake hooks which repsond to particular pulse messagesThese exchanges provide notifications when a hook is created, updatedor deleted. This is so that the listener running in a different hooks process at the other end can direct another listener specified by`hookGroupId` and `hookId` to synchronize its bindings. But you are ofcourse welcome to use these for other purposes, monitoring changes for example.
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-hooks/v1/",
+ }
+ serviceName = 'hooks'
+ apiVersion = 'v1'
+
+ def hookCreated(self, *args, **kwargs):
+ """
+ Hook Created Messages
+
+ Whenever the api receives a request to create apulse based hook, a message is posted to this exchange andthe receiver creates a listener with the bindings, to create a task
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'hook-created',
+ 'name': 'hookCreated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/pulse-hook-changed-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def hookUpdated(self, *args, **kwargs):
+ """
+ Hook Updated Messages
+
+ Whenever the api receives a request to update apulse based hook, a message is posted to this exchange andthe receiver updates the listener associated with that hook.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'hook-updated',
+ 'name': 'hookUpdated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/pulse-hook-changed-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def hookDeleted(self, *args, **kwargs):
+ """
+ Hook Deleted Messages
+
+ Whenever the api receives a request to delete apulse based hook, a message is posted to this exchange andthe receiver deletes the listener associated with that hook.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'hook-deleted',
+ 'name': 'hookDeleted',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/pulse-hook-changed-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'HooksEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/index.py b/third_party/python/taskcluster/taskcluster/generated/aio/index.py
new file mode 100644
index 0000000000..e7aabd3c49
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/index.py
@@ -0,0 +1,204 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class Index(AsyncBaseClient):
+ """
+ The index service is responsible for indexing tasks. The service ensures that
+ tasks can be located by user-defined names.
+
+ As described in the service documentation, tasks are typically indexed via Pulse
+ messages, so the most common use of API methods is to read from the index.
+
+ Slashes (`/`) aren't allowed in index paths.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'index'
+ apiVersion = 'v1'
+
+ async def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ async def findTask(self, *args, **kwargs):
+ """
+ Find Indexed Task
+
+ Find a task by index path, returning the highest-rank task with that path. If no
+ task exists for the given path, this API end-point will respond with a 404 status.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["findTask"], *args, **kwargs)
+
+ async def listNamespaces(self, *args, **kwargs):
+ """
+ List Namespaces
+
+ List the namespaces immediately under a given namespace.
+
+ This endpoint
+ lists up to 1000 namespaces. If more namespaces are present, a
+ `continuationToken` will be returned, which can be given in the next
+ request. For the initial request, the payload should be an empty JSON
+ object.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listNamespaces"], *args, **kwargs)
+
+ async def listTasks(self, *args, **kwargs):
+ """
+ List Tasks
+
+ List the tasks immediately under a given namespace.
+
+ This endpoint
+ lists up to 1000 tasks. If more tasks are present, a
+ `continuationToken` will be returned, which can be given in the next
+ request. For the initial request, the payload should be an empty JSON
+ object.
+
+ **Remark**, this end-point is designed for humans browsing for tasks, not
+ services, as that makes little sense.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listTasks"], *args, **kwargs)
+
+ async def insertTask(self, *args, **kwargs):
+ """
+ Insert Task into Index
+
+ Insert a task into the index. If the new rank is less than the existing rank
+ at the given index path, the task is not indexed but the response is still 200 OK.
+
+ Please see the introduction above for information
+ about indexing successfully completed tasks automatically using custom routes.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["insertTask"], *args, **kwargs)
+
+ async def deleteTask(self, *args, **kwargs):
+ """
+ Remove Task from Index
+
+ Remove a task from the index. This is intended for administrative use,
+ where an index entry is no longer appropriate. The parent namespace is
+ not automatically deleted. Index entries with lower rank that were
+ previously inserted will not re-appear, as they were never stored.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["deleteTask"], *args, **kwargs)
+
+ async def findArtifactFromTask(self, *args, **kwargs):
+ """
+ Get Artifact From Indexed Task
+
+ Find a task by index path and redirect to the artifact on the most recent
+ run with the given `name`.
+
+ Note that multiple calls to this endpoint may return artifacts from differen tasks
+ if a new task is inserted into the index between calls. Avoid using this method as
+ a stable link to multiple, connected files if the index path does not contain a
+ unique identifier. For example, the following two links may return unrelated files:
+ * https://tc.example.com/api/index/v1/task/some-app.win64.latest.installer/artifacts/public/installer.exe`
+ * https://tc.example.com/api/index/v1/task/some-app.win64.latest.installer/artifacts/public/debug-symbols.zip`
+
+ This problem be remedied by including the revision in the index path or by bundling both
+ installer and debug symbols into a single artifact.
+
+ If no task exists for the given index path, this API end-point responds with 404.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["findArtifactFromTask"], *args, **kwargs)
+
+ funcinfo = {
+ "deleteTask": {
+ 'args': ['namespace'],
+ 'method': 'delete',
+ 'name': 'deleteTask',
+ 'route': '/task/<namespace>',
+ 'stability': 'stable',
+ },
+ "findArtifactFromTask": {
+ 'args': ['indexPath', 'name'],
+ 'method': 'get',
+ 'name': 'findArtifactFromTask',
+ 'route': '/task/<indexPath>/artifacts/<name>',
+ 'stability': 'stable',
+ },
+ "findTask": {
+ 'args': ['indexPath'],
+ 'method': 'get',
+ 'name': 'findTask',
+ 'output': 'v1/indexed-task-response.json#',
+ 'route': '/task/<indexPath>',
+ 'stability': 'stable',
+ },
+ "insertTask": {
+ 'args': ['namespace'],
+ 'input': 'v1/insert-task-request.json#',
+ 'method': 'put',
+ 'name': 'insertTask',
+ 'output': 'v1/indexed-task-response.json#',
+ 'route': '/task/<namespace>',
+ 'stability': 'stable',
+ },
+ "listNamespaces": {
+ 'args': ['namespace'],
+ 'method': 'get',
+ 'name': 'listNamespaces',
+ 'output': 'v1/list-namespaces-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/namespaces/<namespace>',
+ 'stability': 'stable',
+ },
+ "listTasks": {
+ 'args': ['namespace'],
+ 'method': 'get',
+ 'name': 'listTasks',
+ 'output': 'v1/list-tasks-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/tasks/<namespace>',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Index']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/notify.py b/third_party/python/taskcluster/taskcluster/generated/aio/notify.py
new file mode 100644
index 0000000000..391e0516a7
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/notify.py
@@ -0,0 +1,207 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class Notify(AsyncBaseClient):
+ """
+ The notification service listens for tasks with associated notifications
+ and handles requests to send emails and post pulse messages.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'notify'
+ apiVersion = 'v1'
+
+ async def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ async def email(self, *args, **kwargs):
+ """
+ Send an Email
+
+ Send an email to `address`. The content is markdown and will be rendered
+ to HTML, but both the HTML and raw markdown text will be sent in the
+ email. If a link is included, it will be rendered to a nice button in the
+ HTML version of the email
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["email"], *args, **kwargs)
+
+ async def pulse(self, *args, **kwargs):
+ """
+ Publish a Pulse Message
+
+ Publish a message on pulse with the given `routingKey`.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["pulse"], *args, **kwargs)
+
+ async def matrix(self, *args, **kwargs):
+ """
+ Post Matrix Message
+
+ Post a message to a room in Matrix. Optionally includes formatted message.
+
+ The `roomId` in the scopes is a fully formed `roomId` with leading `!` such
+ as `!foo:bar.com`.
+
+ Note that the matrix client used by taskcluster must be invited to a room before
+ it can post there!
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["matrix"], *args, **kwargs)
+
+ async def slack(self, *args, **kwargs):
+ """
+ Post Slack Message
+
+ Post a message to a Slack channel.
+
+ The `channelId` in the scopes is a Slack channel ID, starting with a capital C.
+
+ The Slack app can post into public channels by default but will need to be added
+ to private channels before it can post messages there.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["slack"], *args, **kwargs)
+
+ async def addDenylistAddress(self, *args, **kwargs):
+ """
+ Denylist Given Address
+
+ Add the given address to the notification denylist. Addresses in the denylist will be ignored
+ by the notification service.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["addDenylistAddress"], *args, **kwargs)
+
+ async def deleteDenylistAddress(self, *args, **kwargs):
+ """
+ Delete Denylisted Address
+
+ Delete the specified address from the notification denylist.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["deleteDenylistAddress"], *args, **kwargs)
+
+ async def listDenylist(self, *args, **kwargs):
+ """
+ List Denylisted Notifications
+
+ Lists all the denylisted addresses.
+
+ By default this end-point will try to return up to 1000 addresses in one
+ request. But it **may return less**, even if more tasks are available.
+ It may also return a `continuationToken` even though there are no more
+ results. However, you can only be sure to have seen all results if you
+ keep calling `list` with the last `continuationToken` until you
+ get a result without a `continuationToken`.
+
+ If you are not interested in listing all the members at once, you may
+ use the query-string option `limit` to return fewer.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listDenylist"], *args, **kwargs)
+
+ funcinfo = {
+ "addDenylistAddress": {
+ 'args': [],
+ 'input': 'v1/notification-address.json#',
+ 'method': 'post',
+ 'name': 'addDenylistAddress',
+ 'route': '/denylist/add',
+ 'stability': 'experimental',
+ },
+ "deleteDenylistAddress": {
+ 'args': [],
+ 'input': 'v1/notification-address.json#',
+ 'method': 'delete',
+ 'name': 'deleteDenylistAddress',
+ 'route': '/denylist/delete',
+ 'stability': 'experimental',
+ },
+ "email": {
+ 'args': [],
+ 'input': 'v1/email-request.json#',
+ 'method': 'post',
+ 'name': 'email',
+ 'route': '/email',
+ 'stability': 'experimental',
+ },
+ "listDenylist": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listDenylist',
+ 'output': 'v1/notification-address-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/denylist/list',
+ 'stability': 'experimental',
+ },
+ "matrix": {
+ 'args': [],
+ 'input': 'v1/matrix-request.json#',
+ 'method': 'post',
+ 'name': 'matrix',
+ 'route': '/matrix',
+ 'stability': 'experimental',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "pulse": {
+ 'args': [],
+ 'input': 'v1/pulse-request.json#',
+ 'method': 'post',
+ 'name': 'pulse',
+ 'route': '/pulse',
+ 'stability': 'experimental',
+ },
+ "slack": {
+ 'args': [],
+ 'input': 'v1/slack-request.json#',
+ 'method': 'post',
+ 'name': 'slack',
+ 'route': '/slack',
+ 'stability': 'experimental',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Notify']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/notifyevents.py b/third_party/python/taskcluster/taskcluster/generated/aio/notifyevents.py
new file mode 100644
index 0000000000..614adc548d
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/notifyevents.py
@@ -0,0 +1,68 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class NotifyEvents(AsyncBaseClient):
+ """
+ This pretty much only contains the simple free-form
+ message that can be published from this service from a request
+ by anybody with the proper scopes.
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-notify/v1/",
+ }
+ serviceName = 'notify'
+ apiVersion = 'v1'
+
+ def notify(self, *args, **kwargs):
+ """
+ Notification Messages
+
+ An arbitrary message that a taskcluster user
+ can trigger if they like.
+
+ The standard one that is published by us watching
+ for the completion of tasks is just the task status
+ data that we pull from the queue `status()` endpoint
+ when we notice a task is complete.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'notification',
+ 'name': 'notify',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/notification-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'NotifyEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/object.py b/third_party/python/taskcluster/taskcluster/generated/aio/object.py
new file mode 100644
index 0000000000..b575e11f80
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/object.py
@@ -0,0 +1,187 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class Object(AsyncBaseClient):
+ """
+ The object service provides HTTP-accessible storage for large blobs of data.
+
+ Objects can be uploaded and downloaded, with the object data flowing directly
+ from the storage "backend" to the caller, and not directly via this service.
+ Once uploaded, objects are immutable until their expiration time.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'object'
+ apiVersion = 'v1'
+
+ async def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ async def createUpload(self, *args, **kwargs):
+ """
+ Begin upload of a new object
+
+ Create a new object by initiating upload of its data.
+
+ This endpoint implements negotiation of upload methods. It can be called
+ multiple times if necessary, either to propose new upload methods or to
+ renew credentials for an already-agreed upload.
+
+ The `name` parameter can contain any printable ASCII character (0x20 - 0x7e).
+ The `uploadId` must be supplied by the caller, and any attempts to upload
+ an object with the same name but a different `uploadId` will fail.
+ Thus the first call to this method establishes the `uploadId` for the
+ object, and as long as that value is kept secret, no other caller can
+ upload an object of that name, regardless of scopes. Object expiration
+ cannot be changed after the initial call, either. It is possible to call
+ this method with no proposed upload methods, which has the effect of "locking
+ in" the `expiration`, `projectId`, and `uploadId` properties and any
+ supplied hashes.
+
+ Unfinished uploads expire after 1 day.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["createUpload"], *args, **kwargs)
+
+ async def finishUpload(self, *args, **kwargs):
+ """
+ Mark an upload as complete.
+
+ This endpoint marks an upload as complete. This indicates that all data has been
+ transmitted to the backend. After this call, no further calls to `uploadObject` are
+ allowed, and downloads of the object may begin. This method is idempotent, but will
+ fail if given an incorrect uploadId for an unfinished upload.
+
+ Note that, once `finishUpload` is complete, the object is considered immutable.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["finishUpload"], *args, **kwargs)
+
+ async def startDownload(self, *args, **kwargs):
+ """
+ Download object data
+
+ Start the process of downloading an object's data. Call this endpoint with a list of acceptable
+ download methods, and the server will select a method and return the corresponding payload.
+
+ Returns a 406 error if none of the given download methods are available.
+
+ See [Download Methods](https://docs.taskcluster.net/docs/reference/platform/object/download-methods) for more detail.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["startDownload"], *args, **kwargs)
+
+ async def object(self, *args, **kwargs):
+ """
+ Get an object's metadata
+
+ Get the metadata for the named object. This metadata is not sufficient to
+ get the object's content; for that use `startDownload`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["object"], *args, **kwargs)
+
+ async def download(self, *args, **kwargs):
+ """
+ Get an object's data
+
+ Get the data in an object directly. This method does not return a JSON body, but
+ redirects to a location that will serve the object content directly.
+
+ URLs for this endpoint, perhaps with attached authentication (`?bewit=..`),
+ are typically used for downloads of objects by simple HTTP clients such as
+ web browsers, curl, or wget.
+
+ This method is limited by the common capabilities of HTTP, so it may not be
+ the most efficient, resilient, or featureful way to retrieve an artifact.
+ Situations where such functionality is required should ues the
+ `startDownload` API endpoint.
+
+ See [Simple Downloads](https://docs.taskcluster.net/docs/reference/platform/object/simple-downloads) for more detail.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["download"], *args, **kwargs)
+
+ funcinfo = {
+ "createUpload": {
+ 'args': ['name'],
+ 'input': 'v1/create-upload-request.json#',
+ 'method': 'put',
+ 'name': 'createUpload',
+ 'output': 'v1/create-upload-response.json#',
+ 'route': '/upload/<name>',
+ 'stability': 'stable',
+ },
+ "download": {
+ 'args': ['name'],
+ 'method': 'get',
+ 'name': 'download',
+ 'route': '/download/<name>',
+ 'stability': 'stable',
+ },
+ "finishUpload": {
+ 'args': ['name'],
+ 'input': 'v1/finish-upload-request.json#',
+ 'method': 'post',
+ 'name': 'finishUpload',
+ 'route': '/finish-upload/<name>',
+ 'stability': 'stable',
+ },
+ "object": {
+ 'args': ['name'],
+ 'method': 'get',
+ 'name': 'object',
+ 'output': 'v1/get-object-response.json#',
+ 'route': '/metadata/<name>',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "startDownload": {
+ 'args': ['name'],
+ 'input': 'v1/download-object-request.json#',
+ 'method': 'put',
+ 'name': 'startDownload',
+ 'output': 'v1/download-object-response.json#',
+ 'route': '/start-download/<name>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Object']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/purgecache.py b/third_party/python/taskcluster/taskcluster/generated/aio/purgecache.py
new file mode 100644
index 0000000000..d7f5c3881d
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/purgecache.py
@@ -0,0 +1,123 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class PurgeCache(AsyncBaseClient):
+ """
+ The purge-cache service is responsible for tracking cache-purge requests.
+
+ User create purge requests for specific caches on specific workers, and
+ these requests are timestamped. Workers consult the service before
+ starting a new task, and purge any caches older than the timestamp.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'purge-cache'
+ apiVersion = 'v1'
+
+ async def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ async def purgeCache(self, *args, **kwargs):
+ """
+ Purge Worker Cache
+
+ Publish a request to purge caches named `cacheName` with
+ on `workerPoolId` workers.
+
+ If such a request already exists, its `before` timestamp is updated to
+ the current time.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["purgeCache"], *args, **kwargs)
+
+ async def allPurgeRequests(self, *args, **kwargs):
+ """
+ All Open Purge Requests
+
+ View all active purge requests.
+
+ This is useful mostly for administors to view
+ the set of open purge requests. It should not
+ be used by workers. They should use the purgeRequests
+ endpoint that is specific to their workerType and
+ provisionerId.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["allPurgeRequests"], *args, **kwargs)
+
+ async def purgeRequests(self, *args, **kwargs):
+ """
+ Open Purge Requests for a worker pool
+
+ List the caches for this `workerPoolId` that should to be
+ purged if they are from before the time given in the response.
+
+ This is intended to be used by workers to determine which caches to purge.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["purgeRequests"], *args, **kwargs)
+
+ funcinfo = {
+ "allPurgeRequests": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'allPurgeRequests',
+ 'output': 'v1/all-purge-cache-request-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/purge-cache/list',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "purgeCache": {
+ 'args': ['workerPoolId'],
+ 'input': 'v1/purge-cache-request.json#',
+ 'method': 'post',
+ 'name': 'purgeCache',
+ 'route': '/purge-cache/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "purgeRequests": {
+ 'args': ['workerPoolId'],
+ 'method': 'get',
+ 'name': 'purgeRequests',
+ 'output': 'v1/purge-cache-request-list.json#',
+ 'query': ['since'],
+ 'route': '/purge-cache/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'PurgeCache']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/queue.py b/third_party/python/taskcluster/taskcluster/generated/aio/queue.py
new file mode 100644
index 0000000000..ec07ac2cf8
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/queue.py
@@ -0,0 +1,1120 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class Queue(AsyncBaseClient):
+ """
+ The queue service is responsible for accepting tasks and tracking their state
+ as they are executed by workers, in order to ensure they are eventually
+ resolved.
+
+ ## Artifact Storage Types
+
+ * **Object artifacts** contain arbitrary data, stored via the object service.
+ * **Redirect artifacts**, will redirect the caller to URL when fetched
+ with a a 303 (See Other) response. Clients will not apply any kind of
+ authentication to that URL.
+ * **Link artifacts**, will be treated as if the caller requested the linked
+ artifact on the same task. Links may be chained, but cycles are forbidden.
+ The caller must have scopes for the linked artifact, or a 403 response will
+ be returned.
+ * **Error artifacts**, only consists of meta-data which the queue will
+ store for you. These artifacts are only meant to indicate that you the
+ worker or the task failed to generate a specific artifact, that you
+ would otherwise have uploaded. For example docker-worker will upload an
+ error artifact, if the file it was supposed to upload doesn't exists or
+ turns out to be a directory. Clients requesting an error artifact will
+ get a `424` (Failed Dependency) response. This is mainly designed to
+ ensure that dependent tasks can distinguish between artifacts that were
+ suppose to be generated and artifacts for which the name is misspelled.
+ * **S3 artifacts** are used for static files which will be
+ stored on S3. When creating an S3 artifact the queue will return a
+ pre-signed URL to which you can do a `PUT` request to upload your
+ artifact. Note that `PUT` request **must** specify the `content-length`
+ header and **must** give the `content-type` header the same value as in
+ the request to `createArtifact`. S3 artifacts will be deprecated soon,
+ and users should prefer object artifacts instead.
+
+ ## Artifact immutability
+
+ Generally speaking you cannot overwrite an artifact when created.
+ But if you repeat the request with the same properties the request will
+ succeed as the operation is idempotent.
+ This is useful if you need to refresh a signed URL while uploading.
+ Do not abuse this to overwrite artifacts created by another entity!
+ Such as worker-host overwriting artifact created by worker-code.
+
+ The queue defines the following *immutability special cases*:
+
+ * A `reference` artifact can replace an existing `reference` artifact.
+ * A `link` artifact can replace an existing `reference` artifact.
+ * Any artifact's `expires` can be extended (made later, but not earlier).
+ """
+
+ classOptions = {
+ }
+ serviceName = 'queue'
+ apiVersion = 'v1'
+
+ async def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ async def task(self, *args, **kwargs):
+ """
+ Get Task Definition
+
+ This end-point will return the task-definition. Notice that the task
+ definition may have been modified by queue, if an optional property is
+ not specified the queue may provide a default value.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["task"], *args, **kwargs)
+
+ async def status(self, *args, **kwargs):
+ """
+ Get task status
+
+ Get task status structure from `taskId`
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["status"], *args, **kwargs)
+
+ async def listTaskGroup(self, *args, **kwargs):
+ """
+ List Task Group
+
+ List tasks sharing the same `taskGroupId`.
+
+ As a task-group may contain an unbounded number of tasks, this end-point
+ may return a `continuationToken`. To continue listing tasks you must call
+ the `listTaskGroup` again with the `continuationToken` as the
+ query-string option `continuationToken`.
+
+ By default this end-point will try to return up to 1000 members in one
+ request. But it **may return less**, even if more tasks are available.
+ It may also return a `continuationToken` even though there are no more
+ results. However, you can only be sure to have seen all results if you
+ keep calling `listTaskGroup` with the last `continuationToken` until you
+ get a result without a `continuationToken`.
+
+ If you are not interested in listing all the members at once, you may
+ use the query-string option `limit` to return fewer.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listTaskGroup"], *args, **kwargs)
+
+ async def listDependentTasks(self, *args, **kwargs):
+ """
+ List Dependent Tasks
+
+ List tasks that depend on the given `taskId`.
+
+ As many tasks from different task-groups may dependent on a single tasks,
+ this end-point may return a `continuationToken`. To continue listing
+ tasks you must call `listDependentTasks` again with the
+ `continuationToken` as the query-string option `continuationToken`.
+
+ By default this end-point will try to return up to 1000 tasks in one
+ request. But it **may return less**, even if more tasks are available.
+ It may also return a `continuationToken` even though there are no more
+ results. However, you can only be sure to have seen all results if you
+ keep calling `listDependentTasks` with the last `continuationToken` until
+ you get a result without a `continuationToken`.
+
+ If you are not interested in listing all the tasks at once, you may
+ use the query-string option `limit` to return fewer.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listDependentTasks"], *args, **kwargs)
+
+ async def createTask(self, *args, **kwargs):
+ """
+ Create New Task
+
+ Create a new task, this is an **idempotent** operation, so repeat it if
+ you get an internal server error or network connection is dropped.
+
+ **Task `deadline`**: the deadline property can be no more than 5 days
+ into the future. This is to limit the amount of pending tasks not being
+ taken care of. Ideally, you should use a much shorter deadline.
+
+ **Task expiration**: the `expires` property must be greater than the
+ task `deadline`. If not provided it will default to `deadline` + one
+ year. Notice that artifacts created by a task must expire before the
+ task's expiration.
+
+ **Task specific routing-keys**: using the `task.routes` property you may
+ define task specific routing-keys. If a task has a task specific
+ routing-key: `<route>`, then when the AMQP message about the task is
+ published, the message will be CC'ed with the routing-key:
+ `route.<route>`. This is useful if you want another component to listen
+ for completed tasks you have posted. The caller must have scope
+ `queue:route:<route>` for each route.
+
+ **Dependencies**: any tasks referenced in `task.dependencies` must have
+ already been created at the time of this call.
+
+ **Scopes**: Note that the scopes required to complete this API call depend
+ on the content of the `scopes`, `routes`, `schedulerId`, `priority`,
+ `provisionerId`, and `workerType` properties of the task definition.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["createTask"], *args, **kwargs)
+
+ async def scheduleTask(self, *args, **kwargs):
+ """
+ Schedule Defined Task
+
+ scheduleTask will schedule a task to be executed, even if it has
+ unresolved dependencies. A task would otherwise only be scheduled if
+ its dependencies were resolved.
+
+ This is useful if you have defined a task that depends on itself or on
+ some other task that has not been resolved, but you wish the task to be
+ scheduled immediately.
+
+ This will announce the task as pending and workers will be allowed to
+ claim it and resolve the task.
+
+ **Note** this operation is **idempotent** and will not fail or complain
+ if called with a `taskId` that is already scheduled, or even resolved.
+ To reschedule a task previously resolved, use `rerunTask`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["scheduleTask"], *args, **kwargs)
+
+ async def rerunTask(self, *args, **kwargs):
+ """
+ Rerun a Resolved Task
+
+ This method _reruns_ a previously resolved task, even if it was
+ _completed_. This is useful if your task completes unsuccessfully, and
+ you just want to run it from scratch again. This will also reset the
+ number of `retries` allowed. It will schedule a task that is _unscheduled_
+ regardless of the state of its dependencies.
+
+ This method is deprecated in favour of creating a new task with the same
+ task definition (but with a new taskId).
+
+ Remember that `retries` in the task status counts the number of runs that
+ the queue have started because the worker stopped responding, for example
+ because a spot node died.
+
+ **Remark** this operation is idempotent: if it is invoked for a task that
+ is `pending` or `running`, it will just return the current task status.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["rerunTask"], *args, **kwargs)
+
+ async def cancelTask(self, *args, **kwargs):
+ """
+ Cancel Task
+
+ This method will cancel a task that is either `unscheduled`, `pending` or
+ `running`. It will resolve the current run as `exception` with
+ `reasonResolved` set to `canceled`. If the task isn't scheduled yet, ie.
+ it doesn't have any runs, an initial run will be added and resolved as
+ described above. Hence, after canceling a task, it cannot be scheduled
+ with `queue.scheduleTask`, but a new run can be created with
+ `queue.rerun`. These semantics is equivalent to calling
+ `queue.scheduleTask` immediately followed by `queue.cancelTask`.
+
+ **Remark** this operation is idempotent, if you try to cancel a task that
+ isn't `unscheduled`, `pending` or `running`, this operation will just
+ return the current task status.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["cancelTask"], *args, **kwargs)
+
+ async def claimWork(self, *args, **kwargs):
+ """
+ Claim Work
+
+ Claim pending task(s) for the given task queue.
+
+ If any work is available (even if fewer than the requested number of
+ tasks, this will return immediately. Otherwise, it will block for tens of
+ seconds waiting for work. If no work appears, it will return an emtpy
+ list of tasks. Callers should sleep a short while (to avoid denial of
+ service in an error condition) and call the endpoint again. This is a
+ simple implementation of "long polling".
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["claimWork"], *args, **kwargs)
+
+ async def claimTask(self, *args, **kwargs):
+ """
+ Claim Task
+
+ claim a task - never documented
+
+ This method is ``deprecated``
+ """
+
+ return await self._makeApiCall(self.funcinfo["claimTask"], *args, **kwargs)
+
+ async def reclaimTask(self, *args, **kwargs):
+ """
+ Reclaim task
+
+ Refresh the claim for a specific `runId` for given `taskId`. This updates
+ the `takenUntil` property and returns a new set of temporary credentials
+ for performing requests on behalf of the task. These credentials should
+ be used in-place of the credentials returned by `claimWork`.
+
+ The `reclaimTask` requests serves to:
+ * Postpone `takenUntil` preventing the queue from resolving
+ `claim-expired`,
+ * Refresh temporary credentials used for processing the task, and
+ * Abort execution if the task/run have been resolved.
+
+ If the `takenUntil` timestamp is exceeded the queue will resolve the run
+ as _exception_ with reason `claim-expired`, and proceeded to retry to the
+ task. This ensures that tasks are retried, even if workers disappear
+ without warning.
+
+ If the task is resolved, this end-point will return `409` reporting
+ `RequestConflict`. This typically happens if the task have been canceled
+ or the `task.deadline` have been exceeded. If reclaiming fails, workers
+ should abort the task and forget about the given `runId`. There is no
+ need to resolve the run or upload artifacts.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["reclaimTask"], *args, **kwargs)
+
+ async def reportCompleted(self, *args, **kwargs):
+ """
+ Report Run Completed
+
+ Report a task completed, resolving the run as `completed`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["reportCompleted"], *args, **kwargs)
+
+ async def reportFailed(self, *args, **kwargs):
+ """
+ Report Run Failed
+
+ Report a run failed, resolving the run as `failed`. Use this to resolve
+ a run that failed because the task specific code behaved unexpectedly.
+ For example the task exited non-zero, or didn't produce expected output.
+
+ Do not use this if the task couldn't be run because if malformed
+ payload, or other unexpected condition. In these cases we have a task
+ exception, which should be reported with `reportException`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["reportFailed"], *args, **kwargs)
+
+ async def reportException(self, *args, **kwargs):
+ """
+ Report Task Exception
+
+ Resolve a run as _exception_. Generally, you will want to report tasks as
+ failed instead of exception. You should `reportException` if,
+
+ * The `task.payload` is invalid,
+ * Non-existent resources are referenced,
+ * Declared actions cannot be executed due to unavailable resources,
+ * The worker had to shutdown prematurely,
+ * The worker experienced an unknown error, or,
+ * The task explicitly requested a retry.
+
+ Do not use this to signal that some user-specified code crashed for any
+ reason specific to this code. If user-specific code hits a resource that
+ is temporarily unavailable worker should report task _failed_.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["reportException"], *args, **kwargs)
+
+ async def createArtifact(self, *args, **kwargs):
+ """
+ Create Artifact
+
+ This API end-point creates an artifact for a specific run of a task. This
+ should **only** be used by a worker currently operating on this task, or
+ from a process running within the task (ie. on the worker).
+
+ All artifacts must specify when they expire. The queue will
+ automatically take care of deleting artifacts past their
+ expiration point. This feature makes it feasible to upload large
+ intermediate artifacts from data processing applications, as the
+ artifacts can be set to expire a few days later.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["createArtifact"], *args, **kwargs)
+
+ async def finishArtifact(self, *args, **kwargs):
+ """
+ Finish Artifact
+
+ This endpoint marks an artifact as present for the given task, and
+ should be called when the artifact data is fully uploaded.
+
+ The storage types `reference`, `link`, and `error` do not need to
+ be finished, as they are finished immediately by `createArtifact`.
+ The storage type `s3` does not support this functionality and cannot
+ be finished. In all such cases, calling this method is an input error
+ (400).
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["finishArtifact"], *args, **kwargs)
+
+ async def getArtifact(self, *args, **kwargs):
+ """
+ Get Artifact Data from Run
+
+ Get artifact by `<name>` from a specific run.
+
+ **Artifact Access**, in order to get an artifact you need the scope
+ `queue:get-artifact:<name>`, where `<name>` is the name of the artifact.
+ To allow access to fetch artifacts with a client like `curl` or a web
+ browser, without using Taskcluster credentials, include a scope in the
+ `anonymous` role. The convention is to include
+ `queue:get-artifact:public/*`.
+
+ **Response**: the HTTP response to this method is a 303 redirect to the
+ URL from which the artifact can be downloaded. The body of that response
+ contains the data described in the output schema, contianing the same URL.
+ Callers are encouraged to use whichever method of gathering the URL is
+ most convenient. Standard HTTP clients will follow the redirect, while
+ API client libraries will return the JSON body.
+
+ In order to download an artifact the following must be done:
+
+ 1. Obtain queue url. Building a signed url with a taskcluster client is
+ recommended
+ 1. Make a GET request which does not follow redirects
+ 1. In all cases, if specified, the
+ x-taskcluster-location-{content,transfer}-{sha256,length} values must be
+ validated to be equal to the Content-Length and Sha256 checksum of the
+ final artifact downloaded. as well as any intermediate redirects
+ 1. If this response is a 500-series error, retry using an exponential
+ backoff. No more than 5 retries should be attempted
+ 1. If this response is a 400-series error, treat it appropriately for
+ your context. This might be an error in responding to this request or
+ an Error storage type body. This request should not be retried.
+ 1. If this response is a 200-series response, the response body is the artifact.
+ If the x-taskcluster-location-{content,transfer}-{sha256,length} and
+ x-taskcluster-location-content-encoding are specified, they should match
+ this response body
+ 1. If the response type is a 300-series redirect, the artifact will be at the
+ location specified by the `Location` header. There are multiple artifact storage
+ types which use a 300-series redirect.
+ 1. For all redirects followed, the user must verify that the content-sha256, content-length,
+ transfer-sha256, transfer-length and content-encoding match every further request. The final
+ artifact must also be validated against the values specified in the original queue response
+ 1. Caching of requests with an x-taskcluster-artifact-storage-type value of `reference`
+ must not occur
+
+ **Headers**
+ The following important headers are set on the response to this method:
+
+ * location: the url of the artifact if a redirect is to be performed
+ * x-taskcluster-artifact-storage-type: the storage type. Example: s3
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["getArtifact"], *args, **kwargs)
+
+ async def getLatestArtifact(self, *args, **kwargs):
+ """
+ Get Artifact Data from Latest Run
+
+ Get artifact by `<name>` from the last run of a task.
+
+ **Artifact Access**, in order to get an artifact you need the scope
+ `queue:get-artifact:<name>`, where `<name>` is the name of the artifact.
+ To allow access to fetch artifacts with a client like `curl` or a web
+ browser, without using Taskcluster credentials, include a scope in the
+ `anonymous` role. The convention is to include
+ `queue:get-artifact:public/*`.
+
+ **API Clients**, this method will redirect you to the artifact, if it is
+ stored externally. Either way, the response may not be JSON. So API
+ client users might want to generate a signed URL for this end-point and
+ use that URL with a normal HTTP client.
+
+ **Remark**, this end-point is slightly slower than
+ `queue.getArtifact`, so consider that if you already know the `runId` of
+ the latest run. Otherwise, just us the most convenient API end-point.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["getLatestArtifact"], *args, **kwargs)
+
+ async def listArtifacts(self, *args, **kwargs):
+ """
+ Get Artifacts from Run
+
+ Returns a list of artifacts and associated meta-data for a given run.
+
+ As a task may have many artifacts paging may be necessary. If this
+ end-point returns a `continuationToken`, you should call the end-point
+ again with the `continuationToken` as the query-string option:
+ `continuationToken`.
+
+ By default this end-point will list up-to 1000 artifacts in a single page
+ you may limit this with the query-string parameter `limit`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listArtifacts"], *args, **kwargs)
+
+ async def listLatestArtifacts(self, *args, **kwargs):
+ """
+ Get Artifacts from Latest Run
+
+ Returns a list of artifacts and associated meta-data for the latest run
+ from the given task.
+
+ As a task may have many artifacts paging may be necessary. If this
+ end-point returns a `continuationToken`, you should call the end-point
+ again with the `continuationToken` as the query-string option:
+ `continuationToken`.
+
+ By default this end-point will list up-to 1000 artifacts in a single page
+ you may limit this with the query-string parameter `limit`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listLatestArtifacts"], *args, **kwargs)
+
+ async def artifactInfo(self, *args, **kwargs):
+ """
+ Get Artifact Information From Run
+
+ Returns associated metadata for a given artifact, in the given task run.
+ The metadata is the same as that returned from `listArtifacts`, and does
+ not grant access to the artifact data.
+
+ Note that this method does *not* automatically follow link artifacts.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["artifactInfo"], *args, **kwargs)
+
+ async def latestArtifactInfo(self, *args, **kwargs):
+ """
+ Get Artifact Information From Latest Run
+
+ Returns associated metadata for a given artifact, in the latest run of the
+ task. The metadata is the same as that returned from `listArtifacts`,
+ and does not grant access to the artifact data.
+
+ Note that this method does *not* automatically follow link artifacts.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["latestArtifactInfo"], *args, **kwargs)
+
+ async def artifact(self, *args, **kwargs):
+ """
+ Get Artifact Content From Run
+
+ Returns information about the content of the artifact, in the given task run.
+
+ Depending on the storage type, the endpoint returns the content of the artifact
+ or enough information to access that content.
+
+ This method follows link artifacts, so it will not return content
+ for a link artifact.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["artifact"], *args, **kwargs)
+
+ async def latestArtifact(self, *args, **kwargs):
+ """
+ Get Artifact Content From Latest Run
+
+ Returns information about the content of the artifact, in the latest task run.
+
+ Depending on the storage type, the endpoint returns the content of the artifact
+ or enough information to access that content.
+
+ This method follows link artifacts, so it will not return content
+ for a link artifact.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["latestArtifact"], *args, **kwargs)
+
+ async def listProvisioners(self, *args, **kwargs):
+ """
+ Get a list of all active provisioners
+
+ Get all active provisioners.
+
+ The term "provisioner" is taken broadly to mean anything with a provisionerId.
+ This does not necessarily mean there is an associated service performing any
+ provisioning activity.
+
+ The response is paged. If this end-point returns a `continuationToken`, you
+ should call the end-point again with the `continuationToken` as a query-string
+ option. By default this end-point will list up to 1000 provisioners in a single
+ page. You may limit this with the query-string parameter `limit`.
+
+ This method is ``deprecated``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listProvisioners"], *args, **kwargs)
+
+ async def getProvisioner(self, *args, **kwargs):
+ """
+ Get an active provisioner
+
+ Get an active provisioner.
+
+ The term "provisioner" is taken broadly to mean anything with a provisionerId.
+ This does not necessarily mean there is an associated service performing any
+ provisioning activity.
+
+ This method is ``deprecated``
+ """
+
+ return await self._makeApiCall(self.funcinfo["getProvisioner"], *args, **kwargs)
+
+ async def declareProvisioner(self, *args, **kwargs):
+ """
+ Update a provisioner
+
+ Declare a provisioner, supplying some details about it.
+
+ `declareProvisioner` allows updating one or more properties of a provisioner as long as the required scopes are
+ possessed. For example, a request to update the `my-provisioner`
+ provisioner with a body `{description: 'This provisioner is great'}` would require you to have the scope
+ `queue:declare-provisioner:my-provisioner#description`.
+
+ The term "provisioner" is taken broadly to mean anything with a provisionerId.
+ This does not necessarily mean there is an associated service performing any
+ provisioning activity.
+
+ This method is ``deprecated``
+ """
+
+ return await self._makeApiCall(self.funcinfo["declareProvisioner"], *args, **kwargs)
+
+ async def pendingTasks(self, *args, **kwargs):
+ """
+ Get Number of Pending Tasks
+
+ Get an approximate number of pending tasks for the given `taskQueueId`.
+
+ The underlying Azure Storage Queues only promises to give us an estimate.
+ Furthermore, we cache the result in memory for 20 seconds. So consumers
+ should be no means expect this to be an accurate number.
+ It is, however, a solid estimate of the number of pending tasks.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["pendingTasks"], *args, **kwargs)
+
+ async def listWorkerTypes(self, *args, **kwargs):
+ """
+ Get a list of all active worker-types
+
+ Get all active worker-types for the given provisioner.
+
+ The response is paged. If this end-point returns a `continuationToken`, you
+ should call the end-point again with the `continuationToken` as a query-string
+ option. By default this end-point will list up to 1000 worker-types in a single
+ page. You may limit this with the query-string parameter `limit`.
+
+ This method is ``deprecated``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listWorkerTypes"], *args, **kwargs)
+
+ async def getWorkerType(self, *args, **kwargs):
+ """
+ Get a worker-type
+
+ Get a worker-type from a provisioner.
+
+ This method is ``deprecated``
+ """
+
+ return await self._makeApiCall(self.funcinfo["getWorkerType"], *args, **kwargs)
+
+ async def declareWorkerType(self, *args, **kwargs):
+ """
+ Update a worker-type
+
+ Declare a workerType, supplying some details about it.
+
+ `declareWorkerType` allows updating one or more properties of a worker-type as long as the required scopes are
+ possessed. For example, a request to update the `highmem` worker-type within the `my-provisioner`
+ provisioner with a body `{description: 'This worker type is great'}` would require you to have the scope
+ `queue:declare-worker-type:my-provisioner/highmem#description`.
+
+ This method is ``deprecated``
+ """
+
+ return await self._makeApiCall(self.funcinfo["declareWorkerType"], *args, **kwargs)
+
+ async def listTaskQueues(self, *args, **kwargs):
+ """
+ Get a list of all active task queues
+
+ Get all active task queues.
+
+ The response is paged. If this end-point returns a `continuationToken`, you
+ should call the end-point again with the `continuationToken` as a query-string
+ option. By default this end-point will list up to 1000 task queues in a single
+ page. You may limit this with the query-string parameter `limit`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listTaskQueues"], *args, **kwargs)
+
+ async def getTaskQueue(self, *args, **kwargs):
+ """
+ Get a task queue
+
+ Get a task queue.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["getTaskQueue"], *args, **kwargs)
+
+ async def listWorkers(self, *args, **kwargs):
+ """
+ Get a list of all active workers of a workerType
+
+ Get a list of all active workers of a workerType.
+
+ `listWorkers` allows a response to be filtered by quarantined and non quarantined workers.
+ To filter the query, you should call the end-point with `quarantined` as a query-string option with a
+ true or false value.
+
+ The response is paged. If this end-point returns a `continuationToken`, you
+ should call the end-point again with the `continuationToken` as a query-string
+ option. By default this end-point will list up to 1000 workers in a single
+ page. You may limit this with the query-string parameter `limit`.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listWorkers"], *args, **kwargs)
+
+ async def getWorker(self, *args, **kwargs):
+ """
+ Get a worker-type
+
+ Get a worker from a worker-type.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["getWorker"], *args, **kwargs)
+
+ async def quarantineWorker(self, *args, **kwargs):
+ """
+ Quarantine a worker
+
+ Quarantine a worker
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["quarantineWorker"], *args, **kwargs)
+
+ async def declareWorker(self, *args, **kwargs):
+ """
+ Declare a worker
+
+ Declare a worker, supplying some details about it.
+
+ `declareWorker` allows updating one or more properties of a worker as long as the required scopes are
+ possessed.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["declareWorker"], *args, **kwargs)
+
+ funcinfo = {
+ "artifact": {
+ 'args': ['taskId', 'runId', 'name'],
+ 'method': 'get',
+ 'name': 'artifact',
+ 'output': 'v1/artifact-content-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/artifact-content/<name>',
+ 'stability': 'stable',
+ },
+ "artifactInfo": {
+ 'args': ['taskId', 'runId', 'name'],
+ 'method': 'get',
+ 'name': 'artifactInfo',
+ 'output': 'v1/artifact-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/artifact-info/<name>',
+ 'stability': 'stable',
+ },
+ "cancelTask": {
+ 'args': ['taskId'],
+ 'method': 'post',
+ 'name': 'cancelTask',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/cancel',
+ 'stability': 'stable',
+ },
+ "claimTask": {
+ 'args': ['taskId', 'runId'],
+ 'input': 'v1/task-claim-request.json#',
+ 'method': 'post',
+ 'name': 'claimTask',
+ 'output': 'v1/task-claim-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/claim',
+ 'stability': 'deprecated',
+ },
+ "claimWork": {
+ 'args': ['taskQueueId'],
+ 'input': 'v1/claim-work-request.json#',
+ 'method': 'post',
+ 'name': 'claimWork',
+ 'output': 'v1/claim-work-response.json#',
+ 'route': '/claim-work/<taskQueueId>',
+ 'stability': 'stable',
+ },
+ "createArtifact": {
+ 'args': ['taskId', 'runId', 'name'],
+ 'input': 'v1/post-artifact-request.json#',
+ 'method': 'post',
+ 'name': 'createArtifact',
+ 'output': 'v1/post-artifact-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/artifacts/<name>',
+ 'stability': 'stable',
+ },
+ "createTask": {
+ 'args': ['taskId'],
+ 'input': 'v1/create-task-request.json#',
+ 'method': 'put',
+ 'name': 'createTask',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>',
+ 'stability': 'stable',
+ },
+ "declareProvisioner": {
+ 'args': ['provisionerId'],
+ 'input': 'v1/update-provisioner-request.json#',
+ 'method': 'put',
+ 'name': 'declareProvisioner',
+ 'output': 'v1/provisioner-response.json#',
+ 'route': '/provisioners/<provisionerId>',
+ 'stability': 'deprecated',
+ },
+ "declareWorker": {
+ 'args': ['provisionerId', 'workerType', 'workerGroup', 'workerId'],
+ 'input': 'v1/update-worker-request.json#',
+ 'method': 'put',
+ 'name': 'declareWorker',
+ 'output': 'v1/worker-response.json#',
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>/<workerGroup>/<workerId>',
+ 'stability': 'experimental',
+ },
+ "declareWorkerType": {
+ 'args': ['provisionerId', 'workerType'],
+ 'input': 'v1/update-workertype-request.json#',
+ 'method': 'put',
+ 'name': 'declareWorkerType',
+ 'output': 'v1/workertype-response.json#',
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>',
+ 'stability': 'deprecated',
+ },
+ "finishArtifact": {
+ 'args': ['taskId', 'runId', 'name'],
+ 'input': 'v1/finish-artifact-request.json#',
+ 'method': 'put',
+ 'name': 'finishArtifact',
+ 'route': '/task/<taskId>/runs/<runId>/artifacts/<name>',
+ 'stability': 'stable',
+ },
+ "getArtifact": {
+ 'args': ['taskId', 'runId', 'name'],
+ 'method': 'get',
+ 'name': 'getArtifact',
+ 'output': 'v1/get-artifact-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/artifacts/<name>',
+ 'stability': 'stable',
+ },
+ "getLatestArtifact": {
+ 'args': ['taskId', 'name'],
+ 'method': 'get',
+ 'name': 'getLatestArtifact',
+ 'output': 'v1/get-artifact-response.json#',
+ 'route': '/task/<taskId>/artifacts/<name>',
+ 'stability': 'stable',
+ },
+ "getProvisioner": {
+ 'args': ['provisionerId'],
+ 'method': 'get',
+ 'name': 'getProvisioner',
+ 'output': 'v1/provisioner-response.json#',
+ 'route': '/provisioners/<provisionerId>',
+ 'stability': 'deprecated',
+ },
+ "getTaskQueue": {
+ 'args': ['taskQueueId'],
+ 'method': 'get',
+ 'name': 'getTaskQueue',
+ 'output': 'v1/taskqueue-response.json#',
+ 'route': '/task-queues/<taskQueueId>',
+ 'stability': 'stable',
+ },
+ "getWorker": {
+ 'args': ['provisionerId', 'workerType', 'workerGroup', 'workerId'],
+ 'method': 'get',
+ 'name': 'getWorker',
+ 'output': 'v1/worker-response.json#',
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>/workers/<workerGroup>/<workerId>',
+ 'stability': 'experimental',
+ },
+ "getWorkerType": {
+ 'args': ['provisionerId', 'workerType'],
+ 'method': 'get',
+ 'name': 'getWorkerType',
+ 'output': 'v1/workertype-response.json#',
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>',
+ 'stability': 'deprecated',
+ },
+ "latestArtifact": {
+ 'args': ['taskId', 'name'],
+ 'method': 'get',
+ 'name': 'latestArtifact',
+ 'output': 'v1/artifact-content-response.json#',
+ 'route': '/task/<taskId>/artifact-content/<name>',
+ 'stability': 'stable',
+ },
+ "latestArtifactInfo": {
+ 'args': ['taskId', 'name'],
+ 'method': 'get',
+ 'name': 'latestArtifactInfo',
+ 'output': 'v1/artifact-response.json#',
+ 'route': '/task/<taskId>/artifact-info/<name>',
+ 'stability': 'stable',
+ },
+ "listArtifacts": {
+ 'args': ['taskId', 'runId'],
+ 'method': 'get',
+ 'name': 'listArtifacts',
+ 'output': 'v1/list-artifacts-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/task/<taskId>/runs/<runId>/artifacts',
+ 'stability': 'stable',
+ },
+ "listDependentTasks": {
+ 'args': ['taskId'],
+ 'method': 'get',
+ 'name': 'listDependentTasks',
+ 'output': 'v1/list-dependent-tasks-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/task/<taskId>/dependents',
+ 'stability': 'stable',
+ },
+ "listLatestArtifacts": {
+ 'args': ['taskId'],
+ 'method': 'get',
+ 'name': 'listLatestArtifacts',
+ 'output': 'v1/list-artifacts-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/task/<taskId>/artifacts',
+ 'stability': 'stable',
+ },
+ "listProvisioners": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listProvisioners',
+ 'output': 'v1/list-provisioners-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/provisioners',
+ 'stability': 'deprecated',
+ },
+ "listTaskGroup": {
+ 'args': ['taskGroupId'],
+ 'method': 'get',
+ 'name': 'listTaskGroup',
+ 'output': 'v1/list-task-group-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/task-group/<taskGroupId>/list',
+ 'stability': 'stable',
+ },
+ "listTaskQueues": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listTaskQueues',
+ 'output': 'v1/list-taskqueues-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/task-queues',
+ 'stability': 'stable',
+ },
+ "listWorkerTypes": {
+ 'args': ['provisionerId'],
+ 'method': 'get',
+ 'name': 'listWorkerTypes',
+ 'output': 'v1/list-workertypes-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/provisioners/<provisionerId>/worker-types',
+ 'stability': 'deprecated',
+ },
+ "listWorkers": {
+ 'args': ['provisionerId', 'workerType'],
+ 'method': 'get',
+ 'name': 'listWorkers',
+ 'output': 'v1/list-workers-response.json#',
+ 'query': ['continuationToken', 'limit', 'quarantined'],
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>/workers',
+ 'stability': 'experimental',
+ },
+ "pendingTasks": {
+ 'args': ['taskQueueId'],
+ 'method': 'get',
+ 'name': 'pendingTasks',
+ 'output': 'v1/pending-tasks-response.json#',
+ 'route': '/pending/<taskQueueId>',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "quarantineWorker": {
+ 'args': ['provisionerId', 'workerType', 'workerGroup', 'workerId'],
+ 'input': 'v1/quarantine-worker-request.json#',
+ 'method': 'put',
+ 'name': 'quarantineWorker',
+ 'output': 'v1/worker-response.json#',
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>/workers/<workerGroup>/<workerId>',
+ 'stability': 'experimental',
+ },
+ "reclaimTask": {
+ 'args': ['taskId', 'runId'],
+ 'method': 'post',
+ 'name': 'reclaimTask',
+ 'output': 'v1/task-reclaim-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/reclaim',
+ 'stability': 'stable',
+ },
+ "reportCompleted": {
+ 'args': ['taskId', 'runId'],
+ 'method': 'post',
+ 'name': 'reportCompleted',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/completed',
+ 'stability': 'stable',
+ },
+ "reportException": {
+ 'args': ['taskId', 'runId'],
+ 'input': 'v1/task-exception-request.json#',
+ 'method': 'post',
+ 'name': 'reportException',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/exception',
+ 'stability': 'stable',
+ },
+ "reportFailed": {
+ 'args': ['taskId', 'runId'],
+ 'method': 'post',
+ 'name': 'reportFailed',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/failed',
+ 'stability': 'stable',
+ },
+ "rerunTask": {
+ 'args': ['taskId'],
+ 'method': 'post',
+ 'name': 'rerunTask',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/rerun',
+ 'stability': 'stable',
+ },
+ "scheduleTask": {
+ 'args': ['taskId'],
+ 'method': 'post',
+ 'name': 'scheduleTask',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/schedule',
+ 'stability': 'stable',
+ },
+ "status": {
+ 'args': ['taskId'],
+ 'method': 'get',
+ 'name': 'status',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/status',
+ 'stability': 'stable',
+ },
+ "task": {
+ 'args': ['taskId'],
+ 'method': 'get',
+ 'name': 'task',
+ 'output': 'v1/task.json#',
+ 'route': '/task/<taskId>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Queue']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/queueevents.py b/third_party/python/taskcluster/taskcluster/generated/aio/queueevents.py
new file mode 100644
index 0000000000..be916b2ca3
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/queueevents.py
@@ -0,0 +1,719 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class QueueEvents(AsyncBaseClient):
+ """
+ The queue service is responsible for accepting tasks and track their state
+ as they are executed by workers. In order ensure they are eventually
+ resolved.
+
+ This document describes AMQP exchanges offered by the queue, which allows
+ third-party listeners to monitor tasks as they progress to resolution.
+ These exchanges targets the following audience:
+ * Schedulers, who takes action after tasks are completed,
+ * Workers, who wants to listen for new or canceled tasks (optional),
+ * Tools, that wants to update their view as task progress.
+
+ You'll notice that all the exchanges in the document shares the same
+ routing key pattern. This makes it very easy to bind to all messages
+ about a certain kind tasks.
+
+ **Task specific routes**, a task can define a task specific route using
+ the `task.routes` property. See task creation documentation for details
+ on permissions required to provide task specific routes. If a task has
+ the entry `'notify.by-email'` in as task specific route defined in
+ `task.routes` all messages about this task will be CC'ed with the
+ routing-key `'route.notify.by-email'`.
+
+ These routes will always be prefixed `route.`, so that cannot interfere
+ with the _primary_ routing key as documented here. Notice that the
+ _primary_ routing key is always prefixed `primary.`. This is ensured
+ in the routing key reference, so API clients will do this automatically.
+
+ Please, note that the way RabbitMQ works, the message will only arrive
+ in your queue once, even though you may have bound to the exchange with
+ multiple routing key patterns that matches more of the CC'ed routing
+ routing keys.
+
+ **Delivery guarantees**, most operations on the queue are idempotent,
+ which means that if repeated with the same arguments then the requests
+ will ensure completion of the operation and return the same response.
+ This is useful if the server crashes or the TCP connection breaks, but
+ when re-executing an idempotent operation, the queue will also resend
+ any related AMQP messages. Hence, messages may be repeated.
+
+ This shouldn't be much of a problem, as the best you can achieve using
+ confirm messages with AMQP is at-least-once delivery semantics. Hence,
+ this only prevents you from obtaining at-most-once delivery semantics.
+
+ **Remark**, some message generated by timeouts maybe dropped if the
+ server crashes at wrong time. Ideally, we'll address this in the
+ future. For now we suggest you ignore this corner case, and notify us
+ if this corner case is of concern to you.
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-queue/v1/",
+ }
+ serviceName = 'queue'
+ apiVersion = 'v1'
+
+ def taskDefined(self, *args, **kwargs):
+ """
+ Task Defined Messages
+
+ When a task is created or just defined a message is posted to this
+ exchange.
+
+ This message exchange is mainly useful when tasks are created with dependencies
+ on incomplete tasks, as this does not make the task
+ `pending`. Thus, no `taskPending` message is published.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-defined',
+ 'name': 'taskDefined',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-defined-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskPending(self, *args, **kwargs):
+ """
+ Task Pending Messages
+
+ When a task becomes `pending` a message is posted to this exchange.
+
+ This is useful for workers who doesn't want to constantly poll the queue
+ for new tasks. The queue will also be authority for task states and
+ claims. But using this exchange workers should be able to distribute work
+ efficiently and they would be able to reduce their polling interval
+ significantly without affecting general responsiveness.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-pending',
+ 'name': 'taskPending',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-pending-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskRunning(self, *args, **kwargs):
+ """
+ Task Running Messages
+
+ Whenever a task is claimed by a worker, a run is started on the worker,
+ and a message is posted on this exchange.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-running',
+ 'name': 'taskRunning',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-running-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def artifactCreated(self, *args, **kwargs):
+ """
+ Artifact Creation Messages
+
+ Whenever the `createArtifact` end-point is called, the queue will create
+ a record of the artifact and post a message on this exchange. All of this
+ happens before the queue returns a signed URL for the caller to upload
+ the actual artifact with (pending on `storageType`).
+
+ This means that the actual artifact is rarely available when this message
+ is posted. But it is not unreasonable to assume that the artifact will
+ will become available at some point later. Most signatures will expire in
+ 30 minutes or so, forcing the uploader to call `createArtifact` with
+ the same payload again in-order to continue uploading the artifact.
+
+ However, in most cases (especially for small artifacts) it's very
+ reasonable assume the artifact will be available within a few minutes.
+ This property means that this exchange is mostly useful for tools
+ monitoring task evaluation. One could also use it count number of
+ artifacts per task, or _index_ artifacts though in most cases it'll be
+ smarter to index artifacts after the task in question have completed
+ successfully.
+
+ *NOTE*: this message is currently only sent for reference and error
+ artifacts. This will be remedied in a future version of Taskcluster.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'artifact-created',
+ 'name': 'artifactCreated',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/artifact-created-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskCompleted(self, *args, **kwargs):
+ """
+ Task Completed Messages
+
+ When a task is successfully completed by a worker a message is posted
+ this exchange.
+ This message is routed using the `runId`, `workerGroup` and `workerId`
+ that completed the task. But information about additional runs is also
+ available from the task status structure.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-completed',
+ 'name': 'taskCompleted',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-completed-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskFailed(self, *args, **kwargs):
+ """
+ Task Failed Messages
+
+ When a task ran, but failed to complete successfully a message is posted
+ to this exchange. This is same as worker ran task-specific code, but the
+ task specific code exited non-zero.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-failed',
+ 'name': 'taskFailed',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-failed-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskException(self, *args, **kwargs):
+ """
+ Task Exception Messages
+
+ Whenever Taskcluster fails to run a message is posted to this exchange.
+ This happens if the task isn't completed before its `deadlìne`,
+ all retries failed (i.e. workers stopped responding), the task was
+ canceled by another entity, or the task carried a malformed payload.
+
+ The specific _reason_ is evident from that task status structure, refer
+ to the `reasonResolved` property for the last run.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-exception',
+ 'name': 'taskException',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-exception-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskGroupResolved(self, *args, **kwargs):
+ """
+ Task Group Resolved Messages
+
+ A message is published on task-group-resolved whenever all submitted
+ tasks (whether scheduled or unscheduled) for a given task group have
+ been resolved, regardless of whether they resolved as successful or
+ not. A task group may be resolved multiple times, since new tasks may
+ be submitted against an already resolved task group.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskGroupId: `taskGroupId` for the task-group this message concerns (required)
+
+ * schedulerId: `schedulerId` for the task-group this message concerns (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-group-resolved',
+ 'name': 'taskGroupResolved',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-group-resolved.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'QueueEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/secrets.py b/third_party/python/taskcluster/taskcluster/generated/aio/secrets.py
new file mode 100644
index 0000000000..f8ccdc2366
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/secrets.py
@@ -0,0 +1,143 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class Secrets(AsyncBaseClient):
+ """
+ The secrets service provides a simple key/value store for small bits of secret
+ data. Access is limited by scopes, so values can be considered secret from
+ those who do not have the relevant scopes.
+
+ Secrets also have an expiration date, and once a secret has expired it can no
+ longer be read. This is useful for short-term secrets such as a temporary
+ service credential or a one-time signing key.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'secrets'
+ apiVersion = 'v1'
+
+ async def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ async def set(self, *args, **kwargs):
+ """
+ Set Secret
+
+ Set the secret associated with some key. If the secret already exists, it is
+ updated instead.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["set"], *args, **kwargs)
+
+ async def remove(self, *args, **kwargs):
+ """
+ Delete Secret
+
+ Delete the secret associated with some key. It will succeed whether or not the secret exists
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["remove"], *args, **kwargs)
+
+ async def get(self, *args, **kwargs):
+ """
+ Read Secret
+
+ Read the secret associated with some key. If the secret has recently
+ expired, the response code 410 is returned. If the caller lacks the
+ scope necessary to get the secret, the call will fail with a 403 code
+ regardless of whether the secret exists.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["get"], *args, **kwargs)
+
+ async def list(self, *args, **kwargs):
+ """
+ List Secrets
+
+ List the names of all secrets.
+
+ By default this end-point will try to return up to 1000 secret names in one
+ request. But it **may return less**, even if more tasks are available.
+ It may also return a `continuationToken` even though there are no more
+ results. However, you can only be sure to have seen all results if you
+ keep calling `listTaskGroup` with the last `continuationToken` until you
+ get a result without a `continuationToken`.
+
+ If you are not interested in listing all the members at once, you may
+ use the query-string option `limit` to return fewer.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["list"], *args, **kwargs)
+
+ funcinfo = {
+ "get": {
+ 'args': ['name'],
+ 'method': 'get',
+ 'name': 'get',
+ 'output': 'v1/secret.json#',
+ 'route': '/secret/<name>',
+ 'stability': 'stable',
+ },
+ "list": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'list',
+ 'output': 'v1/secret-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/secrets',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "remove": {
+ 'args': ['name'],
+ 'method': 'delete',
+ 'name': 'remove',
+ 'route': '/secret/<name>',
+ 'stability': 'stable',
+ },
+ "set": {
+ 'args': ['name'],
+ 'input': 'v1/secret.json#',
+ 'method': 'put',
+ 'name': 'set',
+ 'route': '/secret/<name>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Secrets']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/workermanager.py b/third_party/python/taskcluster/taskcluster/generated/aio/workermanager.py
new file mode 100644
index 0000000000..3d4e1197fd
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/workermanager.py
@@ -0,0 +1,406 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class WorkerManager(AsyncBaseClient):
+ """
+ This service manages workers, including provisioning for dynamic worker pools.
+
+ Methods interacting with a provider may return a 503 response if that provider has
+ not been able to start up, such as if the service to which it interfaces has an
+ outage. Such requests can be retried as for any other 5xx response.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'worker-manager'
+ apiVersion = 'v1'
+
+ async def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ async def listProviders(self, *args, **kwargs):
+ """
+ List Providers
+
+ Retrieve a list of providers that are available for worker pools.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listProviders"], *args, **kwargs)
+
+ async def createWorkerPool(self, *args, **kwargs):
+ """
+ Create Worker Pool
+
+ Create a new worker pool. If the worker pool already exists, this will throw an error.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["createWorkerPool"], *args, **kwargs)
+
+ async def updateWorkerPool(self, *args, **kwargs):
+ """
+ Update Worker Pool
+
+ Given an existing worker pool definition, this will modify it and return
+ the new definition.
+
+ To delete a worker pool, set its `providerId` to `"null-provider"`.
+ After any existing workers have exited, a cleanup job will remove the
+ worker pool. During that time, the worker pool can be updated again, such
+ as to set its `providerId` to a real provider.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["updateWorkerPool"], *args, **kwargs)
+
+ async def deleteWorkerPool(self, *args, **kwargs):
+ """
+ Delete Worker Pool
+
+ Mark a worker pool for deletion. This is the same as updating the pool to
+ set its providerId to `"null-provider"`, but does not require scope
+ `worker-manager:provider:null-provider`.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["deleteWorkerPool"], *args, **kwargs)
+
+ async def workerPool(self, *args, **kwargs):
+ """
+ Get Worker Pool
+
+ Fetch an existing worker pool defition.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["workerPool"], *args, **kwargs)
+
+ async def listWorkerPools(self, *args, **kwargs):
+ """
+ List All Worker Pools
+
+ Get the list of all the existing worker pools.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listWorkerPools"], *args, **kwargs)
+
+ async def reportWorkerError(self, *args, **kwargs):
+ """
+ Report an error from a worker
+
+ Report an error that occurred on a worker. This error will be included
+ with the other errors in `listWorkerPoolErrors(workerPoolId)`.
+
+ Workers can use this endpoint to report startup or configuration errors
+ that might be associated with the worker pool configuration and thus of
+ interest to a worker-pool administrator.
+
+ NOTE: errors are publicly visible. Ensure that none of the content
+ contains secrets or other sensitive information.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["reportWorkerError"], *args, **kwargs)
+
+ async def listWorkerPoolErrors(self, *args, **kwargs):
+ """
+ List Worker Pool Errors
+
+ Get the list of worker pool errors.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listWorkerPoolErrors"], *args, **kwargs)
+
+ async def listWorkersForWorkerGroup(self, *args, **kwargs):
+ """
+ Workers in a specific Worker Group in a Worker Pool
+
+ Get the list of all the existing workers in a given group in a given worker pool.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listWorkersForWorkerGroup"], *args, **kwargs)
+
+ async def worker(self, *args, **kwargs):
+ """
+ Get a Worker
+
+ Get a single worker.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["worker"], *args, **kwargs)
+
+ async def createWorker(self, *args, **kwargs):
+ """
+ Create a Worker
+
+ Create a new worker. This is only useful for worker pools where the provider
+ does not create workers automatically, such as those with a `static` provider
+ type. Providers that do not support creating workers will return a 400 error.
+ See the documentation for the individual providers, and in particular the
+ [static provider](https://docs.taskcluster.net/docs/reference/core/worker-manager/)
+ for more information.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["createWorker"], *args, **kwargs)
+
+ async def updateWorker(self, *args, **kwargs):
+ """
+ Update an existing Worker
+
+ Update an existing worker in-place. Like `createWorker`, this is only useful for
+ worker pools where the provider does not create workers automatically.
+ This method allows updating all fields in the schema unless otherwise indicated
+ in the provider documentation.
+ See the documentation for the individual providers, and in particular the
+ [static provider](https://docs.taskcluster.net/docs/reference/core/worker-manager/)
+ for more information.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["updateWorker"], *args, **kwargs)
+
+ async def removeWorker(self, *args, **kwargs):
+ """
+ Remove a Worker
+
+ Remove an existing worker. The precise behavior of this method depends
+ on the provider implementing the given worker. Some providers
+ do not support removing workers at all, and will return a 400 error.
+ Others may begin removing the worker, but it may remain available via
+ the API (perhaps even in state RUNNING) afterward.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["removeWorker"], *args, **kwargs)
+
+ async def listWorkersForWorkerPool(self, *args, **kwargs):
+ """
+ Workers in a Worker Pool
+
+ Get the list of all the existing workers in a given worker pool.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["listWorkersForWorkerPool"], *args, **kwargs)
+
+ async def registerWorker(self, *args, **kwargs):
+ """
+ Register a running worker
+
+ Register a running worker. Workers call this method on worker start-up.
+
+ This call both marks the worker as running and returns the credentials
+ the worker will require to perform its work. The worker must provide
+ some proof of its identity, and that proof varies by provider type.
+
+ This method is ``stable``
+ """
+
+ return await self._makeApiCall(self.funcinfo["registerWorker"], *args, **kwargs)
+
+ async def reregisterWorker(self, *args, **kwargs):
+ """
+ Reregister a Worker
+
+ Reregister a running worker.
+
+ This will generate and return new Taskcluster credentials for the worker
+ on that instance to use. The credentials will not live longer the
+ `registrationTimeout` for that worker. The endpoint will update `terminateAfter`
+ for the worker so that worker-manager does not terminate the instance.
+
+ This method is ``experimental``
+ """
+
+ return await self._makeApiCall(self.funcinfo["reregisterWorker"], *args, **kwargs)
+
+ funcinfo = {
+ "createWorker": {
+ 'args': ['workerPoolId', 'workerGroup', 'workerId'],
+ 'input': 'v1/create-worker-request.json#',
+ 'method': 'put',
+ 'name': 'createWorker',
+ 'output': 'v1/worker-full.json#',
+ 'route': '/workers/<workerPoolId>:/<workerGroup>/<workerId>',
+ 'stability': 'stable',
+ },
+ "createWorkerPool": {
+ 'args': ['workerPoolId'],
+ 'input': 'v1/create-worker-pool-request.json#',
+ 'method': 'put',
+ 'name': 'createWorkerPool',
+ 'output': 'v1/worker-pool-full.json#',
+ 'route': '/worker-pool/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "deleteWorkerPool": {
+ 'args': ['workerPoolId'],
+ 'method': 'delete',
+ 'name': 'deleteWorkerPool',
+ 'output': 'v1/worker-pool-full.json#',
+ 'route': '/worker-pool/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "listProviders": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listProviders',
+ 'output': 'v1/provider-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/providers',
+ 'stability': 'stable',
+ },
+ "listWorkerPoolErrors": {
+ 'args': ['workerPoolId'],
+ 'method': 'get',
+ 'name': 'listWorkerPoolErrors',
+ 'output': 'v1/worker-pool-error-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/worker-pool-errors/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "listWorkerPools": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listWorkerPools',
+ 'output': 'v1/worker-pool-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/worker-pools',
+ 'stability': 'stable',
+ },
+ "listWorkersForWorkerGroup": {
+ 'args': ['workerPoolId', 'workerGroup'],
+ 'method': 'get',
+ 'name': 'listWorkersForWorkerGroup',
+ 'output': 'v1/worker-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/workers/<workerPoolId>:/<workerGroup>',
+ 'stability': 'stable',
+ },
+ "listWorkersForWorkerPool": {
+ 'args': ['workerPoolId'],
+ 'method': 'get',
+ 'name': 'listWorkersForWorkerPool',
+ 'output': 'v1/worker-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/workers/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "registerWorker": {
+ 'args': [],
+ 'input': 'v1/register-worker-request.json#',
+ 'method': 'post',
+ 'name': 'registerWorker',
+ 'output': 'v1/register-worker-response.json#',
+ 'route': '/worker/register',
+ 'stability': 'stable',
+ },
+ "removeWorker": {
+ 'args': ['workerPoolId', 'workerGroup', 'workerId'],
+ 'method': 'delete',
+ 'name': 'removeWorker',
+ 'route': '/workers/<workerPoolId>/<workerGroup>/<workerId>',
+ 'stability': 'stable',
+ },
+ "reportWorkerError": {
+ 'args': ['workerPoolId'],
+ 'input': 'v1/report-worker-error-request.json#',
+ 'method': 'post',
+ 'name': 'reportWorkerError',
+ 'output': 'v1/worker-pool-error.json#',
+ 'route': '/worker-pool-errors/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "reregisterWorker": {
+ 'args': [],
+ 'input': 'v1/reregister-worker-request.json#',
+ 'method': 'post',
+ 'name': 'reregisterWorker',
+ 'output': 'v1/reregister-worker-response.json#',
+ 'route': '/worker/reregister',
+ 'stability': 'experimental',
+ },
+ "updateWorker": {
+ 'args': ['workerPoolId', 'workerGroup', 'workerId'],
+ 'input': 'v1/create-worker-request.json#',
+ 'method': 'post',
+ 'name': 'updateWorker',
+ 'output': 'v1/worker-full.json#',
+ 'route': '/workers/<workerPoolId>:/<workerGroup>/<workerId>',
+ 'stability': 'stable',
+ },
+ "updateWorkerPool": {
+ 'args': ['workerPoolId'],
+ 'input': 'v1/update-worker-pool-request.json#',
+ 'method': 'post',
+ 'name': 'updateWorkerPool',
+ 'output': 'v1/worker-pool-full.json#',
+ 'route': '/worker-pool/<workerPoolId>',
+ 'stability': 'experimental',
+ },
+ "worker": {
+ 'args': ['workerPoolId', 'workerGroup', 'workerId'],
+ 'method': 'get',
+ 'name': 'worker',
+ 'output': 'v1/worker-full.json#',
+ 'route': '/workers/<workerPoolId>:/<workerGroup>/<workerId>',
+ 'stability': 'stable',
+ },
+ "workerPool": {
+ 'args': ['workerPoolId'],
+ 'method': 'get',
+ 'name': 'workerPool',
+ 'output': 'v1/worker-pool-full.json#',
+ 'route': '/worker-pool/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'WorkerManager']
diff --git a/third_party/python/taskcluster/taskcluster/generated/aio/workermanagerevents.py b/third_party/python/taskcluster/taskcluster/generated/aio/workermanagerevents.py
new file mode 100644
index 0000000000..80bd60729a
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/aio/workermanagerevents.py
@@ -0,0 +1,91 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ...aio.asyncclient import AsyncBaseClient
+from ...aio.asyncclient import createApiClient
+from ...aio.asyncclient import config
+from ...aio.asyncclient import createTemporaryCredentials
+from ...aio.asyncclient import createSession
+_defaultConfig = config
+
+
+class WorkerManagerEvents(AsyncBaseClient):
+ """
+ These exchanges provide notifications when a worker pool is created or updated.This is so that the provisioner running in a differentprocess at the other end can synchronize to the changes. But you are ofcourse welcome to use these for other purposes, monitoring changes for example.
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-worker-manager/v1/",
+ }
+ serviceName = 'worker-manager'
+ apiVersion = 'v1'
+
+ def workerPoolCreated(self, *args, **kwargs):
+ """
+ Worker Pool Created Messages
+
+ Whenever the api receives a request to create aworker pool, a message is posted to this exchange anda provider can act upon it.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'worker-pool-created',
+ 'name': 'workerPoolCreated',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/pulse-worker-pool-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def workerPoolUpdated(self, *args, **kwargs):
+ """
+ Worker Pool Updated Messages
+
+ Whenever the api receives a request to update aworker pool, a message is posted to this exchange anda provider can act upon it.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'worker-pool-updated',
+ 'name': 'workerPoolUpdated',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/pulse-worker-pool-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'WorkerManagerEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/auth.py b/third_party/python/taskcluster/taskcluster/generated/auth.py
new file mode 100644
index 0000000000..a4738ee143
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/auth.py
@@ -0,0 +1,781 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class Auth(BaseClient):
+ """
+ Authentication related API end-points for Taskcluster and related
+ services. These API end-points are of interest if you wish to:
+ * Authorize a request signed with Taskcluster credentials,
+ * Manage clients and roles,
+ * Inspect or audit clients and roles,
+ * Gain access to various services guarded by this API.
+
+ """
+
+ classOptions = {
+ }
+ serviceName = 'auth'
+ apiVersion = 'v1'
+
+ def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ def listClients(self, *args, **kwargs):
+ """
+ List Clients
+
+ Get a list of all clients. With `prefix`, only clients for which
+ it is a prefix of the clientId are returned.
+
+ By default this end-point will try to return up to 1000 clients in one
+ request. But it **may return less, even none**.
+ It may also return a `continuationToken` even though there are no more
+ results. However, you can only be sure to have seen all results if you
+ keep calling `listClients` with the last `continuationToken` until you
+ get a result without a `continuationToken`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listClients"], *args, **kwargs)
+
+ def client(self, *args, **kwargs):
+ """
+ Get Client
+
+ Get information about a single client.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["client"], *args, **kwargs)
+
+ def createClient(self, *args, **kwargs):
+ """
+ Create Client
+
+ Create a new client and get the `accessToken` for this client.
+ You should store the `accessToken` from this API call as there is no
+ other way to retrieve it.
+
+ If you loose the `accessToken` you can call `resetAccessToken` to reset
+ it, and a new `accessToken` will be returned, but you cannot retrieve the
+ current `accessToken`.
+
+ If a client with the same `clientId` already exists this operation will
+ fail. Use `updateClient` if you wish to update an existing client.
+
+ The caller's scopes must satisfy `scopes`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["createClient"], *args, **kwargs)
+
+ def resetAccessToken(self, *args, **kwargs):
+ """
+ Reset `accessToken`
+
+ Reset a clients `accessToken`, this will revoke the existing
+ `accessToken`, generate a new `accessToken` and return it from this
+ call.
+
+ There is no way to retrieve an existing `accessToken`, so if you loose it
+ you must reset the accessToken to acquire it again.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["resetAccessToken"], *args, **kwargs)
+
+ def updateClient(self, *args, **kwargs):
+ """
+ Update Client
+
+ Update an exisiting client. The `clientId` and `accessToken` cannot be
+ updated, but `scopes` can be modified. The caller's scopes must
+ satisfy all scopes being added to the client in the update operation.
+ If no scopes are given in the request, the client's scopes remain
+ unchanged
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["updateClient"], *args, **kwargs)
+
+ def enableClient(self, *args, **kwargs):
+ """
+ Enable Client
+
+ Enable a client that was disabled with `disableClient`. If the client
+ is already enabled, this does nothing.
+
+ This is typically used by identity providers to re-enable clients that
+ had been disabled when the corresponding identity's scopes changed.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["enableClient"], *args, **kwargs)
+
+ def disableClient(self, *args, **kwargs):
+ """
+ Disable Client
+
+ Disable a client. If the client is already disabled, this does nothing.
+
+ This is typically used by identity providers to disable clients when the
+ corresponding identity's scopes no longer satisfy the client's scopes.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["disableClient"], *args, **kwargs)
+
+ def deleteClient(self, *args, **kwargs):
+ """
+ Delete Client
+
+ Delete a client, please note that any roles related to this client must
+ be deleted independently.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["deleteClient"], *args, **kwargs)
+
+ def listRoles(self, *args, **kwargs):
+ """
+ List Roles (no pagination)
+
+ Get a list of all roles. Each role object also includes the list of
+ scopes it expands to. This always returns all roles in a single HTTP
+ request.
+
+ To get paginated results, use `listRoles2`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listRoles"], *args, **kwargs)
+
+ def listRoles2(self, *args, **kwargs):
+ """
+ List Roles
+
+ Get a list of all roles. Each role object also includes the list of
+ scopes it expands to. This is similar to `listRoles` but differs in the
+ format of the response.
+
+ If no limit is given, all roles are returned. Since this
+ list may become long, callers can use the `limit` and `continuationToken`
+ query arguments to page through the responses.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listRoles2"], *args, **kwargs)
+
+ def listRoleIds(self, *args, **kwargs):
+ """
+ List Role IDs
+
+ Get a list of all role IDs.
+
+ If no limit is given, the roleIds of all roles are returned. Since this
+ list may become long, callers can use the `limit` and `continuationToken`
+ query arguments to page through the responses.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listRoleIds"], *args, **kwargs)
+
+ def role(self, *args, **kwargs):
+ """
+ Get Role
+
+ Get information about a single role, including the set of scopes that the
+ role expands to.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["role"], *args, **kwargs)
+
+ def createRole(self, *args, **kwargs):
+ """
+ Create Role
+
+ Create a new role.
+
+ The caller's scopes must satisfy the new role's scopes.
+
+ If there already exists a role with the same `roleId` this operation
+ will fail. Use `updateRole` to modify an existing role.
+
+ Creation of a role that will generate an infinite expansion will result
+ in an error response.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["createRole"], *args, **kwargs)
+
+ def updateRole(self, *args, **kwargs):
+ """
+ Update Role
+
+ Update an existing role.
+
+ The caller's scopes must satisfy all of the new scopes being added, but
+ need not satisfy all of the role's existing scopes.
+
+ An update of a role that will generate an infinite expansion will result
+ in an error response.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["updateRole"], *args, **kwargs)
+
+ def deleteRole(self, *args, **kwargs):
+ """
+ Delete Role
+
+ Delete a role. This operation will succeed regardless of whether or not
+ the role exists.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["deleteRole"], *args, **kwargs)
+
+ def expandScopes(self, *args, **kwargs):
+ """
+ Expand Scopes
+
+ Return an expanded copy of the given scopeset, with scopes implied by any
+ roles included.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["expandScopes"], *args, **kwargs)
+
+ def currentScopes(self, *args, **kwargs):
+ """
+ Get Current Scopes
+
+ Return the expanded scopes available in the request, taking into account all sources
+ of scopes and scope restrictions (temporary credentials, assumeScopes, client scopes,
+ and roles).
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["currentScopes"], *args, **kwargs)
+
+ def awsS3Credentials(self, *args, **kwargs):
+ """
+ Get Temporary Read/Write Credentials S3
+
+ Get temporary AWS credentials for `read-write` or `read-only` access to
+ a given `bucket` and `prefix` within that bucket.
+ The `level` parameter can be `read-write` or `read-only` and determines
+ which type of credentials are returned. Please note that the `level`
+ parameter is required in the scope guarding access. The bucket name must
+ not contain `.`, as recommended by Amazon.
+
+ This method can only allow access to a whitelisted set of buckets, as configured
+ in the Taskcluster deployment
+
+ The credentials are set to expire after an hour, but this behavior is
+ subject to change. Hence, you should always read the `expires` property
+ from the response, if you intend to maintain active credentials in your
+ application.
+
+ Please note that your `prefix` may not start with slash `/`. Such a prefix
+ is allowed on S3, but we forbid it here to discourage bad behavior.
+
+ Also note that if your `prefix` doesn't end in a slash `/`, the STS
+ credentials may allow access to unexpected keys, as S3 does not treat
+ slashes specially. For example, a prefix of `my-folder` will allow
+ access to `my-folder/file.txt` as expected, but also to `my-folder.txt`,
+ which may not be intended.
+
+ Finally, note that the `PutObjectAcl` call is not allowed. Passing a canned
+ ACL other than `private` to `PutObject` is treated as a `PutObjectAcl` call, and
+ will result in an access-denied error from AWS. This limitation is due to a
+ security flaw in Amazon S3 which might otherwise allow indefinite access to
+ uploaded objects.
+
+ **EC2 metadata compatibility**, if the querystring parameter
+ `?format=iam-role-compat` is given, the response will be compatible
+ with the JSON exposed by the EC2 metadata service. This aims to ease
+ compatibility for libraries and tools built to auto-refresh credentials.
+ For details on the format returned by EC2 metadata service see:
+ [EC2 User Guide](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials).
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["awsS3Credentials"], *args, **kwargs)
+
+ def azureAccounts(self, *args, **kwargs):
+ """
+ List Accounts Managed by Auth
+
+ Retrieve a list of all Azure accounts managed by Taskcluster Auth.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["azureAccounts"], *args, **kwargs)
+
+ def azureTables(self, *args, **kwargs):
+ """
+ List Tables in an Account Managed by Auth
+
+ Retrieve a list of all tables in an account.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["azureTables"], *args, **kwargs)
+
+ def azureTableSAS(self, *args, **kwargs):
+ """
+ Get Shared-Access-Signature for Azure Table
+
+ Get a shared access signature (SAS) string for use with a specific Azure
+ Table Storage table.
+
+ The `level` parameter can be `read-write` or `read-only` and determines
+ which type of credentials are returned. If level is read-write, it will create the
+ table if it doesn't already exist.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["azureTableSAS"], *args, **kwargs)
+
+ def azureContainers(self, *args, **kwargs):
+ """
+ List containers in an Account Managed by Auth
+
+ Retrieve a list of all containers in an account.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["azureContainers"], *args, **kwargs)
+
+ def azureContainerSAS(self, *args, **kwargs):
+ """
+ Get Shared-Access-Signature for Azure Container
+
+ Get a shared access signature (SAS) string for use with a specific Azure
+ Blob Storage container.
+
+ The `level` parameter can be `read-write` or `read-only` and determines
+ which type of credentials are returned. If level is read-write, it will create the
+ container if it doesn't already exist.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["azureContainerSAS"], *args, **kwargs)
+
+ def sentryDSN(self, *args, **kwargs):
+ """
+ Get DSN for Sentry Project
+
+ Get temporary DSN (access credentials) for a sentry project.
+ The credentials returned can be used with any Sentry client for up to
+ 24 hours, after which the credentials will be automatically disabled.
+
+ If the project doesn't exist it will be created, and assigned to the
+ initial team configured for this component. Contact a Sentry admin
+ to have the project transferred to a team you have access to if needed
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["sentryDSN"], *args, **kwargs)
+
+ def websocktunnelToken(self, *args, **kwargs):
+ """
+ Get a client token for the Websocktunnel service
+
+ Get a temporary token suitable for use connecting to a
+ [websocktunnel](https://github.com/taskcluster/taskcluster/tree/main/tools/websocktunnel) server.
+
+ The resulting token will only be accepted by servers with a matching audience
+ value. Reaching such a server is the callers responsibility. In general,
+ a server URL or set of URLs should be provided to the caller as configuration
+ along with the audience value.
+
+ The token is valid for a limited time (on the scale of hours). Callers should
+ refresh it before expiration.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["websocktunnelToken"], *args, **kwargs)
+
+ def gcpCredentials(self, *args, **kwargs):
+ """
+ Get Temporary GCP Credentials
+
+ Get temporary GCP credentials for the given serviceAccount in the given project.
+
+ Only preconfigured projects and serviceAccounts are allowed, as defined in the
+ deployment of the Taskcluster services.
+
+ The credentials are set to expire after an hour, but this behavior is
+ subject to change. Hence, you should always read the `expires` property
+ from the response, if you intend to maintain active credentials in your
+ application.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["gcpCredentials"], *args, **kwargs)
+
+ def authenticateHawk(self, *args, **kwargs):
+ """
+ Authenticate Hawk Request
+
+ Validate the request signature given on input and return list of scopes
+ that the authenticating client has.
+
+ This method is used by other services that wish rely on Taskcluster
+ credentials for authentication. This way we can use Hawk without having
+ the secret credentials leave this service.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["authenticateHawk"], *args, **kwargs)
+
+ def testAuthenticate(self, *args, **kwargs):
+ """
+ Test Authentication
+
+ Utility method to test client implementations of Taskcluster
+ authentication.
+
+ Rather than using real credentials, this endpoint accepts requests with
+ clientId `tester` and accessToken `no-secret`. That client's scopes are
+ based on `clientScopes` in the request body.
+
+ The request is validated, with any certificate, authorizedScopes, etc.
+ applied, and the resulting scopes are checked against `requiredScopes`
+ from the request body. On success, the response contains the clientId
+ and scopes as seen by the API method.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["testAuthenticate"], *args, **kwargs)
+
+ def testAuthenticateGet(self, *args, **kwargs):
+ """
+ Test Authentication (GET)
+
+ Utility method similar to `testAuthenticate`, but with the GET method,
+ so it can be used with signed URLs (bewits).
+
+ Rather than using real credentials, this endpoint accepts requests with
+ clientId `tester` and accessToken `no-secret`. That client's scopes are
+ `['test:*', 'auth:create-client:test:*']`. The call fails if the
+ `test:authenticate-get` scope is not available.
+
+ The request is validated, with any certificate, authorizedScopes, etc.
+ applied, and the resulting scopes are checked, just like any API call.
+ On success, the response contains the clientId and scopes as seen by
+ the API method.
+
+ This method may later be extended to allow specification of client and
+ required scopes via query arguments.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["testAuthenticateGet"], *args, **kwargs)
+
+ funcinfo = {
+ "authenticateHawk": {
+ 'args': [],
+ 'input': 'v1/authenticate-hawk-request.json#',
+ 'method': 'post',
+ 'name': 'authenticateHawk',
+ 'output': 'v1/authenticate-hawk-response.json#',
+ 'route': '/authenticate-hawk',
+ 'stability': 'stable',
+ },
+ "awsS3Credentials": {
+ 'args': ['level', 'bucket', 'prefix'],
+ 'method': 'get',
+ 'name': 'awsS3Credentials',
+ 'output': 'v1/aws-s3-credentials-response.json#',
+ 'query': ['format'],
+ 'route': '/aws/s3/<level>/<bucket>/<prefix>',
+ 'stability': 'stable',
+ },
+ "azureAccounts": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'azureAccounts',
+ 'output': 'v1/azure-account-list-response.json#',
+ 'route': '/azure/accounts',
+ 'stability': 'stable',
+ },
+ "azureContainerSAS": {
+ 'args': ['account', 'container', 'level'],
+ 'method': 'get',
+ 'name': 'azureContainerSAS',
+ 'output': 'v1/azure-container-response.json#',
+ 'route': '/azure/<account>/containers/<container>/<level>',
+ 'stability': 'stable',
+ },
+ "azureContainers": {
+ 'args': ['account'],
+ 'method': 'get',
+ 'name': 'azureContainers',
+ 'output': 'v1/azure-container-list-response.json#',
+ 'query': ['continuationToken'],
+ 'route': '/azure/<account>/containers',
+ 'stability': 'stable',
+ },
+ "azureTableSAS": {
+ 'args': ['account', 'table', 'level'],
+ 'method': 'get',
+ 'name': 'azureTableSAS',
+ 'output': 'v1/azure-table-access-response.json#',
+ 'route': '/azure/<account>/table/<table>/<level>',
+ 'stability': 'stable',
+ },
+ "azureTables": {
+ 'args': ['account'],
+ 'method': 'get',
+ 'name': 'azureTables',
+ 'output': 'v1/azure-table-list-response.json#',
+ 'query': ['continuationToken'],
+ 'route': '/azure/<account>/tables',
+ 'stability': 'stable',
+ },
+ "client": {
+ 'args': ['clientId'],
+ 'method': 'get',
+ 'name': 'client',
+ 'output': 'v1/get-client-response.json#',
+ 'route': '/clients/<clientId>',
+ 'stability': 'stable',
+ },
+ "createClient": {
+ 'args': ['clientId'],
+ 'input': 'v1/create-client-request.json#',
+ 'method': 'put',
+ 'name': 'createClient',
+ 'output': 'v1/create-client-response.json#',
+ 'route': '/clients/<clientId>',
+ 'stability': 'stable',
+ },
+ "createRole": {
+ 'args': ['roleId'],
+ 'input': 'v1/create-role-request.json#',
+ 'method': 'put',
+ 'name': 'createRole',
+ 'output': 'v1/get-role-response.json#',
+ 'route': '/roles/<roleId>',
+ 'stability': 'stable',
+ },
+ "currentScopes": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'currentScopes',
+ 'output': 'v1/scopeset.json#',
+ 'route': '/scopes/current',
+ 'stability': 'stable',
+ },
+ "deleteClient": {
+ 'args': ['clientId'],
+ 'method': 'delete',
+ 'name': 'deleteClient',
+ 'route': '/clients/<clientId>',
+ 'stability': 'stable',
+ },
+ "deleteRole": {
+ 'args': ['roleId'],
+ 'method': 'delete',
+ 'name': 'deleteRole',
+ 'route': '/roles/<roleId>',
+ 'stability': 'stable',
+ },
+ "disableClient": {
+ 'args': ['clientId'],
+ 'method': 'post',
+ 'name': 'disableClient',
+ 'output': 'v1/get-client-response.json#',
+ 'route': '/clients/<clientId>/disable',
+ 'stability': 'stable',
+ },
+ "enableClient": {
+ 'args': ['clientId'],
+ 'method': 'post',
+ 'name': 'enableClient',
+ 'output': 'v1/get-client-response.json#',
+ 'route': '/clients/<clientId>/enable',
+ 'stability': 'stable',
+ },
+ "expandScopes": {
+ 'args': [],
+ 'input': 'v1/scopeset.json#',
+ 'method': 'post',
+ 'name': 'expandScopes',
+ 'output': 'v1/scopeset.json#',
+ 'route': '/scopes/expand',
+ 'stability': 'stable',
+ },
+ "gcpCredentials": {
+ 'args': ['projectId', 'serviceAccount'],
+ 'method': 'get',
+ 'name': 'gcpCredentials',
+ 'output': 'v1/gcp-credentials-response.json#',
+ 'route': '/gcp/credentials/<projectId>/<serviceAccount>',
+ 'stability': 'stable',
+ },
+ "listClients": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listClients',
+ 'output': 'v1/list-clients-response.json#',
+ 'query': ['prefix', 'continuationToken', 'limit'],
+ 'route': '/clients/',
+ 'stability': 'stable',
+ },
+ "listRoleIds": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listRoleIds',
+ 'output': 'v1/list-role-ids-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/roleids/',
+ 'stability': 'stable',
+ },
+ "listRoles": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listRoles',
+ 'output': 'v1/list-roles-response.json#',
+ 'route': '/roles/',
+ 'stability': 'stable',
+ },
+ "listRoles2": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listRoles2',
+ 'output': 'v1/list-roles2-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/roles2/',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "resetAccessToken": {
+ 'args': ['clientId'],
+ 'method': 'post',
+ 'name': 'resetAccessToken',
+ 'output': 'v1/create-client-response.json#',
+ 'route': '/clients/<clientId>/reset',
+ 'stability': 'stable',
+ },
+ "role": {
+ 'args': ['roleId'],
+ 'method': 'get',
+ 'name': 'role',
+ 'output': 'v1/get-role-response.json#',
+ 'route': '/roles/<roleId>',
+ 'stability': 'stable',
+ },
+ "sentryDSN": {
+ 'args': ['project'],
+ 'method': 'get',
+ 'name': 'sentryDSN',
+ 'output': 'v1/sentry-dsn-response.json#',
+ 'route': '/sentry/<project>/dsn',
+ 'stability': 'stable',
+ },
+ "testAuthenticate": {
+ 'args': [],
+ 'input': 'v1/test-authenticate-request.json#',
+ 'method': 'post',
+ 'name': 'testAuthenticate',
+ 'output': 'v1/test-authenticate-response.json#',
+ 'route': '/test-authenticate',
+ 'stability': 'stable',
+ },
+ "testAuthenticateGet": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'testAuthenticateGet',
+ 'output': 'v1/test-authenticate-response.json#',
+ 'route': '/test-authenticate-get/',
+ 'stability': 'stable',
+ },
+ "updateClient": {
+ 'args': ['clientId'],
+ 'input': 'v1/create-client-request.json#',
+ 'method': 'post',
+ 'name': 'updateClient',
+ 'output': 'v1/get-client-response.json#',
+ 'route': '/clients/<clientId>',
+ 'stability': 'stable',
+ },
+ "updateRole": {
+ 'args': ['roleId'],
+ 'input': 'v1/create-role-request.json#',
+ 'method': 'post',
+ 'name': 'updateRole',
+ 'output': 'v1/get-role-response.json#',
+ 'route': '/roles/<roleId>',
+ 'stability': 'stable',
+ },
+ "websocktunnelToken": {
+ 'args': ['wstAudience', 'wstClient'],
+ 'method': 'get',
+ 'name': 'websocktunnelToken',
+ 'output': 'v1/websocktunnel-token-response.json#',
+ 'route': '/websocktunnel/<wstAudience>/<wstClient>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Auth']
diff --git a/third_party/python/taskcluster/taskcluster/generated/authevents.py b/third_party/python/taskcluster/taskcluster/generated/authevents.py
new file mode 100644
index 0000000000..23d7b1f5c7
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/authevents.py
@@ -0,0 +1,180 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class AuthEvents(BaseClient):
+ """
+ The auth service is responsible for storing credentials, managing
+ assignment of scopes, and validation of request signatures from other
+ services.
+
+ These exchanges provides notifications when credentials or roles are
+ updated. This is mostly so that multiple instances of the auth service
+ can purge their caches and synchronize state. But you are of course
+ welcome to use these for other purposes, monitoring changes for example.
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-auth/v1/",
+ }
+ serviceName = 'auth'
+ apiVersion = 'v1'
+
+ def clientCreated(self, *args, **kwargs):
+ """
+ Client Created Messages
+
+ Message that a new client has been created.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'client-created',
+ 'name': 'clientCreated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/client-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def clientUpdated(self, *args, **kwargs):
+ """
+ Client Updated Messages
+
+ Message that a new client has been updated.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'client-updated',
+ 'name': 'clientUpdated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/client-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def clientDeleted(self, *args, **kwargs):
+ """
+ Client Deleted Messages
+
+ Message that a new client has been deleted.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'client-deleted',
+ 'name': 'clientDeleted',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/client-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def roleCreated(self, *args, **kwargs):
+ """
+ Role Created Messages
+
+ Message that a new role has been created.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'role-created',
+ 'name': 'roleCreated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/role-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def roleUpdated(self, *args, **kwargs):
+ """
+ Role Updated Messages
+
+ Message that a new role has been updated.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'role-updated',
+ 'name': 'roleUpdated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/role-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def roleDeleted(self, *args, **kwargs):
+ """
+ Role Deleted Messages
+
+ Message that a new role has been deleted.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'role-deleted',
+ 'name': 'roleDeleted',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/role-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'AuthEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/github.py b/third_party/python/taskcluster/taskcluster/generated/github.py
new file mode 100644
index 0000000000..88507d34a0
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/github.py
@@ -0,0 +1,197 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class Github(BaseClient):
+ """
+ The github service is responsible for creating tasks in response
+ to GitHub events, and posting results to the GitHub UI.
+
+ This document describes the API end-point for consuming GitHub
+ web hooks, as well as some useful consumer APIs.
+
+ When Github forbids an action, this service returns an HTTP 403
+ with code ForbiddenByGithub.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'github'
+ apiVersion = 'v1'
+
+ def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ def githubWebHookConsumer(self, *args, **kwargs):
+ """
+ Consume GitHub WebHook
+
+ Capture a GitHub event and publish it via pulse, if it's a push,
+ release or pull request.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["githubWebHookConsumer"], *args, **kwargs)
+
+ def builds(self, *args, **kwargs):
+ """
+ List of Builds
+
+ A paginated list of builds that have been run in
+ Taskcluster. Can be filtered on various git-specific
+ fields.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["builds"], *args, **kwargs)
+
+ def badge(self, *args, **kwargs):
+ """
+ Latest Build Status Badge
+
+ Checks the status of the latest build of a given branch
+ and returns corresponding badge svg.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["badge"], *args, **kwargs)
+
+ def repository(self, *args, **kwargs):
+ """
+ Get Repository Info
+
+ Returns any repository metadata that is
+ useful within Taskcluster related services.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["repository"], *args, **kwargs)
+
+ def latest(self, *args, **kwargs):
+ """
+ Latest Status for Branch
+
+ For a given branch of a repository, this will always point
+ to a status page for the most recent task triggered by that
+ branch.
+
+ Note: This is a redirect rather than a direct link.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["latest"], *args, **kwargs)
+
+ def createStatus(self, *args, **kwargs):
+ """
+ Post a status against a given changeset
+
+ For a given changeset (SHA) of a repository, this will attach a "commit status"
+ on github. These statuses are links displayed next to each revision.
+ The status is either OK (green check) or FAILURE (red cross),
+ made of a custom title and link.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["createStatus"], *args, **kwargs)
+
+ def createComment(self, *args, **kwargs):
+ """
+ Post a comment on a given GitHub Issue or Pull Request
+
+ For a given Issue or Pull Request of a repository, this will write a new message.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["createComment"], *args, **kwargs)
+
+ funcinfo = {
+ "badge": {
+ 'args': ['owner', 'repo', 'branch'],
+ 'method': 'get',
+ 'name': 'badge',
+ 'route': '/repository/<owner>/<repo>/<branch>/badge.svg',
+ 'stability': 'experimental',
+ },
+ "builds": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'builds',
+ 'output': 'v1/build-list.json#',
+ 'query': ['continuationToken', 'limit', 'organization', 'repository', 'sha'],
+ 'route': '/builds',
+ 'stability': 'stable',
+ },
+ "createComment": {
+ 'args': ['owner', 'repo', 'number'],
+ 'input': 'v1/create-comment.json#',
+ 'method': 'post',
+ 'name': 'createComment',
+ 'route': '/repository/<owner>/<repo>/issues/<number>/comments',
+ 'stability': 'stable',
+ },
+ "createStatus": {
+ 'args': ['owner', 'repo', 'sha'],
+ 'input': 'v1/create-status.json#',
+ 'method': 'post',
+ 'name': 'createStatus',
+ 'route': '/repository/<owner>/<repo>/statuses/<sha>',
+ 'stability': 'experimental',
+ },
+ "githubWebHookConsumer": {
+ 'args': [],
+ 'method': 'post',
+ 'name': 'githubWebHookConsumer',
+ 'route': '/github',
+ 'stability': 'stable',
+ },
+ "latest": {
+ 'args': ['owner', 'repo', 'branch'],
+ 'method': 'get',
+ 'name': 'latest',
+ 'route': '/repository/<owner>/<repo>/<branch>/latest',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "repository": {
+ 'args': ['owner', 'repo'],
+ 'method': 'get',
+ 'name': 'repository',
+ 'output': 'v1/repository.json#',
+ 'route': '/repository/<owner>/<repo>',
+ 'stability': 'experimental',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Github']
diff --git a/third_party/python/taskcluster/taskcluster/generated/githubevents.py b/third_party/python/taskcluster/taskcluster/generated/githubevents.py
new file mode 100644
index 0000000000..2bdfff2314
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/githubevents.py
@@ -0,0 +1,199 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class GithubEvents(BaseClient):
+ """
+ The github service publishes a pulse
+ message for supported github events, translating Github webhook
+ events into pulse messages.
+
+ This document describes the exchange offered by the taskcluster
+ github service
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-github/v1/",
+ }
+ serviceName = 'github'
+ apiVersion = 'v1'
+
+ def pullRequest(self, *args, **kwargs):
+ """
+ GitHub Pull Request Event
+
+ When a GitHub pull request event is posted it will be broadcast on this
+ exchange with the designated `organization` and `repository`
+ in the routing-key along with event specific metadata in the payload.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
+
+ * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+
+ * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+
+ * action: The GitHub `action` which triggered an event. See for possible values see the payload actions property. (required)
+ """
+
+ ref = {
+ 'exchange': 'pull-request',
+ 'name': 'pullRequest',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'organization',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'repository',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'action',
+ },
+ ],
+ 'schema': 'v1/github-pull-request-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def push(self, *args, **kwargs):
+ """
+ GitHub push Event
+
+ When a GitHub push event is posted it will be broadcast on this
+ exchange with the designated `organization` and `repository`
+ in the routing-key along with event specific metadata in the payload.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
+
+ * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+
+ * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+ """
+
+ ref = {
+ 'exchange': 'push',
+ 'name': 'push',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'organization',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'repository',
+ },
+ ],
+ 'schema': 'v1/github-push-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def release(self, *args, **kwargs):
+ """
+ GitHub release Event
+
+ When a GitHub release event is posted it will be broadcast on this
+ exchange with the designated `organization` and `repository`
+ in the routing-key along with event specific metadata in the payload.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
+
+ * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+
+ * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+ """
+
+ ref = {
+ 'exchange': 'release',
+ 'name': 'release',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'organization',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'repository',
+ },
+ ],
+ 'schema': 'v1/github-release-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskGroupCreationRequested(self, *args, **kwargs):
+ """
+ tc-gh requested the Queue service to create all the tasks in a group
+
+ supposed to signal that taskCreate API has been called for every task in the task group
+ for this particular repo and this particular organization
+ currently used for creating initial status indicators in GitHub UI using Statuses API.
+ This particular exchange can also be bound to RabbitMQ queues by custom routes - for that,
+ Pass in the array of routes as a second argument to the publish method. Currently, we do
+ use the statuses routes to bind the handler that creates the initial status.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `"primary"` for the formalized routing key. (required)
+
+ * organization: The GitHub `organization` which had an event. All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+
+ * repository: The GitHub `repository` which had an event.All periods have been replaced by % - such that foo.bar becomes foo%bar - and all other special characters aside from - and _ have been stripped. (required)
+ """
+
+ ref = {
+ 'exchange': 'task-group-creation-requested',
+ 'name': 'taskGroupCreationRequested',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'organization',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'repository',
+ },
+ ],
+ 'schema': 'v1/task-group-creation-requested.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'GithubEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/hooks.py b/third_party/python/taskcluster/taskcluster/generated/hooks.py
new file mode 100644
index 0000000000..efaaf4b831
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/hooks.py
@@ -0,0 +1,300 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class Hooks(BaseClient):
+ """
+ The hooks service provides a mechanism for creating tasks in response to events.
+
+ """
+
+ classOptions = {
+ }
+ serviceName = 'hooks'
+ apiVersion = 'v1'
+
+ def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ def listHookGroups(self, *args, **kwargs):
+ """
+ List hook groups
+
+ This endpoint will return a list of all hook groups with at least one hook.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listHookGroups"], *args, **kwargs)
+
+ def listHooks(self, *args, **kwargs):
+ """
+ List hooks in a given group
+
+ This endpoint will return a list of all the hook definitions within a
+ given hook group.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listHooks"], *args, **kwargs)
+
+ def hook(self, *args, **kwargs):
+ """
+ Get hook definition
+
+ This endpoint will return the hook definition for the given `hookGroupId`
+ and hookId.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["hook"], *args, **kwargs)
+
+ def getHookStatus(self, *args, **kwargs):
+ """
+ Get hook status
+
+ This endpoint will return the current status of the hook. This represents a
+ snapshot in time and may vary from one call to the next.
+
+ This method is deprecated in favor of listLastFires.
+
+ This method is ``deprecated``
+ """
+
+ return self._makeApiCall(self.funcinfo["getHookStatus"], *args, **kwargs)
+
+ def createHook(self, *args, **kwargs):
+ """
+ Create a hook
+
+ This endpoint will create a new hook.
+
+ The caller's credentials must include the role that will be used to
+ create the task. That role must satisfy task.scopes as well as the
+ necessary scopes to add the task to the queue.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["createHook"], *args, **kwargs)
+
+ def updateHook(self, *args, **kwargs):
+ """
+ Update a hook
+
+ This endpoint will update an existing hook. All fields except
+ `hookGroupId` and `hookId` can be modified.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["updateHook"], *args, **kwargs)
+
+ def removeHook(self, *args, **kwargs):
+ """
+ Delete a hook
+
+ This endpoint will remove a hook definition.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["removeHook"], *args, **kwargs)
+
+ def triggerHook(self, *args, **kwargs):
+ """
+ Trigger a hook
+
+ This endpoint will trigger the creation of a task from a hook definition.
+
+ The HTTP payload must match the hooks `triggerSchema`. If it does, it is
+ provided as the `payload` property of the JSON-e context used to render the
+ task template.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["triggerHook"], *args, **kwargs)
+
+ def getTriggerToken(self, *args, **kwargs):
+ """
+ Get a trigger token
+
+ Retrieve a unique secret token for triggering the specified hook. This
+ token can be deactivated with `resetTriggerToken`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["getTriggerToken"], *args, **kwargs)
+
+ def resetTriggerToken(self, *args, **kwargs):
+ """
+ Reset a trigger token
+
+ Reset the token for triggering a given hook. This invalidates token that
+ may have been issued via getTriggerToken with a new token.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["resetTriggerToken"], *args, **kwargs)
+
+ def triggerHookWithToken(self, *args, **kwargs):
+ """
+ Trigger a hook with a token
+
+ This endpoint triggers a defined hook with a valid token.
+
+ The HTTP payload must match the hooks `triggerSchema`. If it does, it is
+ provided as the `payload` property of the JSON-e context used to render the
+ task template.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["triggerHookWithToken"], *args, **kwargs)
+
+ def listLastFires(self, *args, **kwargs):
+ """
+ Get information about recent hook fires
+
+ This endpoint will return information about the the last few times this hook has been
+ fired, including whether the hook was fired successfully or not
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listLastFires"], *args, **kwargs)
+
+ funcinfo = {
+ "createHook": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'input': 'v1/create-hook-request.json#',
+ 'method': 'put',
+ 'name': 'createHook',
+ 'output': 'v1/hook-definition.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>',
+ 'stability': 'stable',
+ },
+ "getHookStatus": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'get',
+ 'name': 'getHookStatus',
+ 'output': 'v1/hook-status.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/status',
+ 'stability': 'deprecated',
+ },
+ "getTriggerToken": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'get',
+ 'name': 'getTriggerToken',
+ 'output': 'v1/trigger-token-response.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/token',
+ 'stability': 'stable',
+ },
+ "hook": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'get',
+ 'name': 'hook',
+ 'output': 'v1/hook-definition.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>',
+ 'stability': 'stable',
+ },
+ "listHookGroups": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listHookGroups',
+ 'output': 'v1/list-hook-groups-response.json#',
+ 'route': '/hooks',
+ 'stability': 'stable',
+ },
+ "listHooks": {
+ 'args': ['hookGroupId'],
+ 'method': 'get',
+ 'name': 'listHooks',
+ 'output': 'v1/list-hooks-response.json#',
+ 'route': '/hooks/<hookGroupId>',
+ 'stability': 'stable',
+ },
+ "listLastFires": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'get',
+ 'name': 'listLastFires',
+ 'output': 'v1/list-lastFires-response.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/last-fires',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "removeHook": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'delete',
+ 'name': 'removeHook',
+ 'route': '/hooks/<hookGroupId>/<hookId>',
+ 'stability': 'stable',
+ },
+ "resetTriggerToken": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'method': 'post',
+ 'name': 'resetTriggerToken',
+ 'output': 'v1/trigger-token-response.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/token',
+ 'stability': 'stable',
+ },
+ "triggerHook": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'input': 'v1/trigger-hook.json#',
+ 'method': 'post',
+ 'name': 'triggerHook',
+ 'output': 'v1/trigger-hook-response.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/trigger',
+ 'stability': 'stable',
+ },
+ "triggerHookWithToken": {
+ 'args': ['hookGroupId', 'hookId', 'token'],
+ 'input': 'v1/trigger-hook.json#',
+ 'method': 'post',
+ 'name': 'triggerHookWithToken',
+ 'output': 'v1/trigger-hook-response.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>/trigger/<token>',
+ 'stability': 'stable',
+ },
+ "updateHook": {
+ 'args': ['hookGroupId', 'hookId'],
+ 'input': 'v1/create-hook-request.json#',
+ 'method': 'post',
+ 'name': 'updateHook',
+ 'output': 'v1/hook-definition.json#',
+ 'route': '/hooks/<hookGroupId>/<hookId>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Hooks']
diff --git a/third_party/python/taskcluster/taskcluster/generated/hooksevents.py b/third_party/python/taskcluster/taskcluster/generated/hooksevents.py
new file mode 100644
index 0000000000..73e4a08c69
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/hooksevents.py
@@ -0,0 +1,101 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class HooksEvents(BaseClient):
+ """
+ The hooks service is responsible for creating tasks at specific times orin . response to webhooks and API calls.Using this exchange allows us tomake hooks which repsond to particular pulse messagesThese exchanges provide notifications when a hook is created, updatedor deleted. This is so that the listener running in a different hooks process at the other end can direct another listener specified by`hookGroupId` and `hookId` to synchronize its bindings. But you are ofcourse welcome to use these for other purposes, monitoring changes for example.
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-hooks/v1/",
+ }
+ serviceName = 'hooks'
+ apiVersion = 'v1'
+
+ def hookCreated(self, *args, **kwargs):
+ """
+ Hook Created Messages
+
+ Whenever the api receives a request to create apulse based hook, a message is posted to this exchange andthe receiver creates a listener with the bindings, to create a task
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'hook-created',
+ 'name': 'hookCreated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/pulse-hook-changed-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def hookUpdated(self, *args, **kwargs):
+ """
+ Hook Updated Messages
+
+ Whenever the api receives a request to update apulse based hook, a message is posted to this exchange andthe receiver updates the listener associated with that hook.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'hook-updated',
+ 'name': 'hookUpdated',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/pulse-hook-changed-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def hookDeleted(self, *args, **kwargs):
+ """
+ Hook Deleted Messages
+
+ Whenever the api receives a request to delete apulse based hook, a message is posted to this exchange andthe receiver deletes the listener associated with that hook.
+
+ This exchange takes the following keys:
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'hook-deleted',
+ 'name': 'hookDeleted',
+ 'routingKey': [
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/pulse-hook-changed-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'HooksEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/index.py b/third_party/python/taskcluster/taskcluster/generated/index.py
new file mode 100644
index 0000000000..627d16a150
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/index.py
@@ -0,0 +1,204 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class Index(BaseClient):
+ """
+ The index service is responsible for indexing tasks. The service ensures that
+ tasks can be located by user-defined names.
+
+ As described in the service documentation, tasks are typically indexed via Pulse
+ messages, so the most common use of API methods is to read from the index.
+
+ Slashes (`/`) aren't allowed in index paths.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'index'
+ apiVersion = 'v1'
+
+ def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ def findTask(self, *args, **kwargs):
+ """
+ Find Indexed Task
+
+ Find a task by index path, returning the highest-rank task with that path. If no
+ task exists for the given path, this API end-point will respond with a 404 status.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["findTask"], *args, **kwargs)
+
+ def listNamespaces(self, *args, **kwargs):
+ """
+ List Namespaces
+
+ List the namespaces immediately under a given namespace.
+
+ This endpoint
+ lists up to 1000 namespaces. If more namespaces are present, a
+ `continuationToken` will be returned, which can be given in the next
+ request. For the initial request, the payload should be an empty JSON
+ object.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listNamespaces"], *args, **kwargs)
+
+ def listTasks(self, *args, **kwargs):
+ """
+ List Tasks
+
+ List the tasks immediately under a given namespace.
+
+ This endpoint
+ lists up to 1000 tasks. If more tasks are present, a
+ `continuationToken` will be returned, which can be given in the next
+ request. For the initial request, the payload should be an empty JSON
+ object.
+
+ **Remark**, this end-point is designed for humans browsing for tasks, not
+ services, as that makes little sense.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listTasks"], *args, **kwargs)
+
+ def insertTask(self, *args, **kwargs):
+ """
+ Insert Task into Index
+
+ Insert a task into the index. If the new rank is less than the existing rank
+ at the given index path, the task is not indexed but the response is still 200 OK.
+
+ Please see the introduction above for information
+ about indexing successfully completed tasks automatically using custom routes.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["insertTask"], *args, **kwargs)
+
+ def deleteTask(self, *args, **kwargs):
+ """
+ Remove Task from Index
+
+ Remove a task from the index. This is intended for administrative use,
+ where an index entry is no longer appropriate. The parent namespace is
+ not automatically deleted. Index entries with lower rank that were
+ previously inserted will not re-appear, as they were never stored.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["deleteTask"], *args, **kwargs)
+
+ def findArtifactFromTask(self, *args, **kwargs):
+ """
+ Get Artifact From Indexed Task
+
+ Find a task by index path and redirect to the artifact on the most recent
+ run with the given `name`.
+
+ Note that multiple calls to this endpoint may return artifacts from differen tasks
+ if a new task is inserted into the index between calls. Avoid using this method as
+ a stable link to multiple, connected files if the index path does not contain a
+ unique identifier. For example, the following two links may return unrelated files:
+ * https://tc.example.com/api/index/v1/task/some-app.win64.latest.installer/artifacts/public/installer.exe`
+ * https://tc.example.com/api/index/v1/task/some-app.win64.latest.installer/artifacts/public/debug-symbols.zip`
+
+ This problem be remedied by including the revision in the index path or by bundling both
+ installer and debug symbols into a single artifact.
+
+ If no task exists for the given index path, this API end-point responds with 404.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["findArtifactFromTask"], *args, **kwargs)
+
+ funcinfo = {
+ "deleteTask": {
+ 'args': ['namespace'],
+ 'method': 'delete',
+ 'name': 'deleteTask',
+ 'route': '/task/<namespace>',
+ 'stability': 'stable',
+ },
+ "findArtifactFromTask": {
+ 'args': ['indexPath', 'name'],
+ 'method': 'get',
+ 'name': 'findArtifactFromTask',
+ 'route': '/task/<indexPath>/artifacts/<name>',
+ 'stability': 'stable',
+ },
+ "findTask": {
+ 'args': ['indexPath'],
+ 'method': 'get',
+ 'name': 'findTask',
+ 'output': 'v1/indexed-task-response.json#',
+ 'route': '/task/<indexPath>',
+ 'stability': 'stable',
+ },
+ "insertTask": {
+ 'args': ['namespace'],
+ 'input': 'v1/insert-task-request.json#',
+ 'method': 'put',
+ 'name': 'insertTask',
+ 'output': 'v1/indexed-task-response.json#',
+ 'route': '/task/<namespace>',
+ 'stability': 'stable',
+ },
+ "listNamespaces": {
+ 'args': ['namespace'],
+ 'method': 'get',
+ 'name': 'listNamespaces',
+ 'output': 'v1/list-namespaces-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/namespaces/<namespace>',
+ 'stability': 'stable',
+ },
+ "listTasks": {
+ 'args': ['namespace'],
+ 'method': 'get',
+ 'name': 'listTasks',
+ 'output': 'v1/list-tasks-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/tasks/<namespace>',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Index']
diff --git a/third_party/python/taskcluster/taskcluster/generated/notify.py b/third_party/python/taskcluster/taskcluster/generated/notify.py
new file mode 100644
index 0000000000..c249782d2d
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/notify.py
@@ -0,0 +1,207 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class Notify(BaseClient):
+ """
+ The notification service listens for tasks with associated notifications
+ and handles requests to send emails and post pulse messages.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'notify'
+ apiVersion = 'v1'
+
+ def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ def email(self, *args, **kwargs):
+ """
+ Send an Email
+
+ Send an email to `address`. The content is markdown and will be rendered
+ to HTML, but both the HTML and raw markdown text will be sent in the
+ email. If a link is included, it will be rendered to a nice button in the
+ HTML version of the email
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["email"], *args, **kwargs)
+
+ def pulse(self, *args, **kwargs):
+ """
+ Publish a Pulse Message
+
+ Publish a message on pulse with the given `routingKey`.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["pulse"], *args, **kwargs)
+
+ def matrix(self, *args, **kwargs):
+ """
+ Post Matrix Message
+
+ Post a message to a room in Matrix. Optionally includes formatted message.
+
+ The `roomId` in the scopes is a fully formed `roomId` with leading `!` such
+ as `!foo:bar.com`.
+
+ Note that the matrix client used by taskcluster must be invited to a room before
+ it can post there!
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["matrix"], *args, **kwargs)
+
+ def slack(self, *args, **kwargs):
+ """
+ Post Slack Message
+
+ Post a message to a Slack channel.
+
+ The `channelId` in the scopes is a Slack channel ID, starting with a capital C.
+
+ The Slack app can post into public channels by default but will need to be added
+ to private channels before it can post messages there.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["slack"], *args, **kwargs)
+
+ def addDenylistAddress(self, *args, **kwargs):
+ """
+ Denylist Given Address
+
+ Add the given address to the notification denylist. Addresses in the denylist will be ignored
+ by the notification service.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["addDenylistAddress"], *args, **kwargs)
+
+ def deleteDenylistAddress(self, *args, **kwargs):
+ """
+ Delete Denylisted Address
+
+ Delete the specified address from the notification denylist.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["deleteDenylistAddress"], *args, **kwargs)
+
+ def listDenylist(self, *args, **kwargs):
+ """
+ List Denylisted Notifications
+
+ Lists all the denylisted addresses.
+
+ By default this end-point will try to return up to 1000 addresses in one
+ request. But it **may return less**, even if more tasks are available.
+ It may also return a `continuationToken` even though there are no more
+ results. However, you can only be sure to have seen all results if you
+ keep calling `list` with the last `continuationToken` until you
+ get a result without a `continuationToken`.
+
+ If you are not interested in listing all the members at once, you may
+ use the query-string option `limit` to return fewer.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["listDenylist"], *args, **kwargs)
+
+ funcinfo = {
+ "addDenylistAddress": {
+ 'args': [],
+ 'input': 'v1/notification-address.json#',
+ 'method': 'post',
+ 'name': 'addDenylistAddress',
+ 'route': '/denylist/add',
+ 'stability': 'experimental',
+ },
+ "deleteDenylistAddress": {
+ 'args': [],
+ 'input': 'v1/notification-address.json#',
+ 'method': 'delete',
+ 'name': 'deleteDenylistAddress',
+ 'route': '/denylist/delete',
+ 'stability': 'experimental',
+ },
+ "email": {
+ 'args': [],
+ 'input': 'v1/email-request.json#',
+ 'method': 'post',
+ 'name': 'email',
+ 'route': '/email',
+ 'stability': 'experimental',
+ },
+ "listDenylist": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listDenylist',
+ 'output': 'v1/notification-address-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/denylist/list',
+ 'stability': 'experimental',
+ },
+ "matrix": {
+ 'args': [],
+ 'input': 'v1/matrix-request.json#',
+ 'method': 'post',
+ 'name': 'matrix',
+ 'route': '/matrix',
+ 'stability': 'experimental',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "pulse": {
+ 'args': [],
+ 'input': 'v1/pulse-request.json#',
+ 'method': 'post',
+ 'name': 'pulse',
+ 'route': '/pulse',
+ 'stability': 'experimental',
+ },
+ "slack": {
+ 'args': [],
+ 'input': 'v1/slack-request.json#',
+ 'method': 'post',
+ 'name': 'slack',
+ 'route': '/slack',
+ 'stability': 'experimental',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Notify']
diff --git a/third_party/python/taskcluster/taskcluster/generated/notifyevents.py b/third_party/python/taskcluster/taskcluster/generated/notifyevents.py
new file mode 100644
index 0000000000..33a54e9b70
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/notifyevents.py
@@ -0,0 +1,68 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class NotifyEvents(BaseClient):
+ """
+ This pretty much only contains the simple free-form
+ message that can be published from this service from a request
+ by anybody with the proper scopes.
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-notify/v1/",
+ }
+ serviceName = 'notify'
+ apiVersion = 'v1'
+
+ def notify(self, *args, **kwargs):
+ """
+ Notification Messages
+
+ An arbitrary message that a taskcluster user
+ can trigger if they like.
+
+ The standard one that is published by us watching
+ for the completion of tasks is just the task status
+ data that we pull from the queue `status()` endpoint
+ when we notice a task is complete.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'notification',
+ 'name': 'notify',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/notification-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'NotifyEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/object.py b/third_party/python/taskcluster/taskcluster/generated/object.py
new file mode 100644
index 0000000000..d2baaa0ff4
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/object.py
@@ -0,0 +1,187 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class Object(BaseClient):
+ """
+ The object service provides HTTP-accessible storage for large blobs of data.
+
+ Objects can be uploaded and downloaded, with the object data flowing directly
+ from the storage "backend" to the caller, and not directly via this service.
+ Once uploaded, objects are immutable until their expiration time.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'object'
+ apiVersion = 'v1'
+
+ def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ def createUpload(self, *args, **kwargs):
+ """
+ Begin upload of a new object
+
+ Create a new object by initiating upload of its data.
+
+ This endpoint implements negotiation of upload methods. It can be called
+ multiple times if necessary, either to propose new upload methods or to
+ renew credentials for an already-agreed upload.
+
+ The `name` parameter can contain any printable ASCII character (0x20 - 0x7e).
+ The `uploadId` must be supplied by the caller, and any attempts to upload
+ an object with the same name but a different `uploadId` will fail.
+ Thus the first call to this method establishes the `uploadId` for the
+ object, and as long as that value is kept secret, no other caller can
+ upload an object of that name, regardless of scopes. Object expiration
+ cannot be changed after the initial call, either. It is possible to call
+ this method with no proposed upload methods, which has the effect of "locking
+ in" the `expiration`, `projectId`, and `uploadId` properties and any
+ supplied hashes.
+
+ Unfinished uploads expire after 1 day.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["createUpload"], *args, **kwargs)
+
+ def finishUpload(self, *args, **kwargs):
+ """
+ Mark an upload as complete.
+
+ This endpoint marks an upload as complete. This indicates that all data has been
+ transmitted to the backend. After this call, no further calls to `uploadObject` are
+ allowed, and downloads of the object may begin. This method is idempotent, but will
+ fail if given an incorrect uploadId for an unfinished upload.
+
+ Note that, once `finishUpload` is complete, the object is considered immutable.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["finishUpload"], *args, **kwargs)
+
+ def startDownload(self, *args, **kwargs):
+ """
+ Download object data
+
+ Start the process of downloading an object's data. Call this endpoint with a list of acceptable
+ download methods, and the server will select a method and return the corresponding payload.
+
+ Returns a 406 error if none of the given download methods are available.
+
+ See [Download Methods](https://docs.taskcluster.net/docs/reference/platform/object/download-methods) for more detail.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["startDownload"], *args, **kwargs)
+
+ def object(self, *args, **kwargs):
+ """
+ Get an object's metadata
+
+ Get the metadata for the named object. This metadata is not sufficient to
+ get the object's content; for that use `startDownload`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["object"], *args, **kwargs)
+
+ def download(self, *args, **kwargs):
+ """
+ Get an object's data
+
+ Get the data in an object directly. This method does not return a JSON body, but
+ redirects to a location that will serve the object content directly.
+
+ URLs for this endpoint, perhaps with attached authentication (`?bewit=..`),
+ are typically used for downloads of objects by simple HTTP clients such as
+ web browsers, curl, or wget.
+
+ This method is limited by the common capabilities of HTTP, so it may not be
+ the most efficient, resilient, or featureful way to retrieve an artifact.
+ Situations where such functionality is required should ues the
+ `startDownload` API endpoint.
+
+ See [Simple Downloads](https://docs.taskcluster.net/docs/reference/platform/object/simple-downloads) for more detail.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["download"], *args, **kwargs)
+
+ funcinfo = {
+ "createUpload": {
+ 'args': ['name'],
+ 'input': 'v1/create-upload-request.json#',
+ 'method': 'put',
+ 'name': 'createUpload',
+ 'output': 'v1/create-upload-response.json#',
+ 'route': '/upload/<name>',
+ 'stability': 'stable',
+ },
+ "download": {
+ 'args': ['name'],
+ 'method': 'get',
+ 'name': 'download',
+ 'route': '/download/<name>',
+ 'stability': 'stable',
+ },
+ "finishUpload": {
+ 'args': ['name'],
+ 'input': 'v1/finish-upload-request.json#',
+ 'method': 'post',
+ 'name': 'finishUpload',
+ 'route': '/finish-upload/<name>',
+ 'stability': 'stable',
+ },
+ "object": {
+ 'args': ['name'],
+ 'method': 'get',
+ 'name': 'object',
+ 'output': 'v1/get-object-response.json#',
+ 'route': '/metadata/<name>',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "startDownload": {
+ 'args': ['name'],
+ 'input': 'v1/download-object-request.json#',
+ 'method': 'put',
+ 'name': 'startDownload',
+ 'output': 'v1/download-object-response.json#',
+ 'route': '/start-download/<name>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Object']
diff --git a/third_party/python/taskcluster/taskcluster/generated/purgecache.py b/third_party/python/taskcluster/taskcluster/generated/purgecache.py
new file mode 100644
index 0000000000..659a087c4e
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/purgecache.py
@@ -0,0 +1,123 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class PurgeCache(BaseClient):
+ """
+ The purge-cache service is responsible for tracking cache-purge requests.
+
+ User create purge requests for specific caches on specific workers, and
+ these requests are timestamped. Workers consult the service before
+ starting a new task, and purge any caches older than the timestamp.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'purge-cache'
+ apiVersion = 'v1'
+
+ def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ def purgeCache(self, *args, **kwargs):
+ """
+ Purge Worker Cache
+
+ Publish a request to purge caches named `cacheName` with
+ on `workerPoolId` workers.
+
+ If such a request already exists, its `before` timestamp is updated to
+ the current time.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["purgeCache"], *args, **kwargs)
+
+ def allPurgeRequests(self, *args, **kwargs):
+ """
+ All Open Purge Requests
+
+ View all active purge requests.
+
+ This is useful mostly for administors to view
+ the set of open purge requests. It should not
+ be used by workers. They should use the purgeRequests
+ endpoint that is specific to their workerType and
+ provisionerId.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["allPurgeRequests"], *args, **kwargs)
+
+ def purgeRequests(self, *args, **kwargs):
+ """
+ Open Purge Requests for a worker pool
+
+ List the caches for this `workerPoolId` that should to be
+ purged if they are from before the time given in the response.
+
+ This is intended to be used by workers to determine which caches to purge.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["purgeRequests"], *args, **kwargs)
+
+ funcinfo = {
+ "allPurgeRequests": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'allPurgeRequests',
+ 'output': 'v1/all-purge-cache-request-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/purge-cache/list',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "purgeCache": {
+ 'args': ['workerPoolId'],
+ 'input': 'v1/purge-cache-request.json#',
+ 'method': 'post',
+ 'name': 'purgeCache',
+ 'route': '/purge-cache/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "purgeRequests": {
+ 'args': ['workerPoolId'],
+ 'method': 'get',
+ 'name': 'purgeRequests',
+ 'output': 'v1/purge-cache-request-list.json#',
+ 'query': ['since'],
+ 'route': '/purge-cache/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'PurgeCache']
diff --git a/third_party/python/taskcluster/taskcluster/generated/queue.py b/third_party/python/taskcluster/taskcluster/generated/queue.py
new file mode 100644
index 0000000000..990d00aeec
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/queue.py
@@ -0,0 +1,1120 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class Queue(BaseClient):
+ """
+ The queue service is responsible for accepting tasks and tracking their state
+ as they are executed by workers, in order to ensure they are eventually
+ resolved.
+
+ ## Artifact Storage Types
+
+ * **Object artifacts** contain arbitrary data, stored via the object service.
+ * **Redirect artifacts**, will redirect the caller to URL when fetched
+ with a a 303 (See Other) response. Clients will not apply any kind of
+ authentication to that URL.
+ * **Link artifacts**, will be treated as if the caller requested the linked
+ artifact on the same task. Links may be chained, but cycles are forbidden.
+ The caller must have scopes for the linked artifact, or a 403 response will
+ be returned.
+ * **Error artifacts**, only consists of meta-data which the queue will
+ store for you. These artifacts are only meant to indicate that you the
+ worker or the task failed to generate a specific artifact, that you
+ would otherwise have uploaded. For example docker-worker will upload an
+ error artifact, if the file it was supposed to upload doesn't exists or
+ turns out to be a directory. Clients requesting an error artifact will
+ get a `424` (Failed Dependency) response. This is mainly designed to
+ ensure that dependent tasks can distinguish between artifacts that were
+ suppose to be generated and artifacts for which the name is misspelled.
+ * **S3 artifacts** are used for static files which will be
+ stored on S3. When creating an S3 artifact the queue will return a
+ pre-signed URL to which you can do a `PUT` request to upload your
+ artifact. Note that `PUT` request **must** specify the `content-length`
+ header and **must** give the `content-type` header the same value as in
+ the request to `createArtifact`. S3 artifacts will be deprecated soon,
+ and users should prefer object artifacts instead.
+
+ ## Artifact immutability
+
+ Generally speaking you cannot overwrite an artifact when created.
+ But if you repeat the request with the same properties the request will
+ succeed as the operation is idempotent.
+ This is useful if you need to refresh a signed URL while uploading.
+ Do not abuse this to overwrite artifacts created by another entity!
+ Such as worker-host overwriting artifact created by worker-code.
+
+ The queue defines the following *immutability special cases*:
+
+ * A `reference` artifact can replace an existing `reference` artifact.
+ * A `link` artifact can replace an existing `reference` artifact.
+ * Any artifact's `expires` can be extended (made later, but not earlier).
+ """
+
+ classOptions = {
+ }
+ serviceName = 'queue'
+ apiVersion = 'v1'
+
+ def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ def task(self, *args, **kwargs):
+ """
+ Get Task Definition
+
+ This end-point will return the task-definition. Notice that the task
+ definition may have been modified by queue, if an optional property is
+ not specified the queue may provide a default value.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["task"], *args, **kwargs)
+
+ def status(self, *args, **kwargs):
+ """
+ Get task status
+
+ Get task status structure from `taskId`
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["status"], *args, **kwargs)
+
+ def listTaskGroup(self, *args, **kwargs):
+ """
+ List Task Group
+
+ List tasks sharing the same `taskGroupId`.
+
+ As a task-group may contain an unbounded number of tasks, this end-point
+ may return a `continuationToken`. To continue listing tasks you must call
+ the `listTaskGroup` again with the `continuationToken` as the
+ query-string option `continuationToken`.
+
+ By default this end-point will try to return up to 1000 members in one
+ request. But it **may return less**, even if more tasks are available.
+ It may also return a `continuationToken` even though there are no more
+ results. However, you can only be sure to have seen all results if you
+ keep calling `listTaskGroup` with the last `continuationToken` until you
+ get a result without a `continuationToken`.
+
+ If you are not interested in listing all the members at once, you may
+ use the query-string option `limit` to return fewer.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listTaskGroup"], *args, **kwargs)
+
+ def listDependentTasks(self, *args, **kwargs):
+ """
+ List Dependent Tasks
+
+ List tasks that depend on the given `taskId`.
+
+ As many tasks from different task-groups may dependent on a single tasks,
+ this end-point may return a `continuationToken`. To continue listing
+ tasks you must call `listDependentTasks` again with the
+ `continuationToken` as the query-string option `continuationToken`.
+
+ By default this end-point will try to return up to 1000 tasks in one
+ request. But it **may return less**, even if more tasks are available.
+ It may also return a `continuationToken` even though there are no more
+ results. However, you can only be sure to have seen all results if you
+ keep calling `listDependentTasks` with the last `continuationToken` until
+ you get a result without a `continuationToken`.
+
+ If you are not interested in listing all the tasks at once, you may
+ use the query-string option `limit` to return fewer.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listDependentTasks"], *args, **kwargs)
+
+ def createTask(self, *args, **kwargs):
+ """
+ Create New Task
+
+ Create a new task, this is an **idempotent** operation, so repeat it if
+ you get an internal server error or network connection is dropped.
+
+ **Task `deadline`**: the deadline property can be no more than 5 days
+ into the future. This is to limit the amount of pending tasks not being
+ taken care of. Ideally, you should use a much shorter deadline.
+
+ **Task expiration**: the `expires` property must be greater than the
+ task `deadline`. If not provided it will default to `deadline` + one
+ year. Notice that artifacts created by a task must expire before the
+ task's expiration.
+
+ **Task specific routing-keys**: using the `task.routes` property you may
+ define task specific routing-keys. If a task has a task specific
+ routing-key: `<route>`, then when the AMQP message about the task is
+ published, the message will be CC'ed with the routing-key:
+ `route.<route>`. This is useful if you want another component to listen
+ for completed tasks you have posted. The caller must have scope
+ `queue:route:<route>` for each route.
+
+ **Dependencies**: any tasks referenced in `task.dependencies` must have
+ already been created at the time of this call.
+
+ **Scopes**: Note that the scopes required to complete this API call depend
+ on the content of the `scopes`, `routes`, `schedulerId`, `priority`,
+ `provisionerId`, and `workerType` properties of the task definition.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["createTask"], *args, **kwargs)
+
+ def scheduleTask(self, *args, **kwargs):
+ """
+ Schedule Defined Task
+
+ scheduleTask will schedule a task to be executed, even if it has
+ unresolved dependencies. A task would otherwise only be scheduled if
+ its dependencies were resolved.
+
+ This is useful if you have defined a task that depends on itself or on
+ some other task that has not been resolved, but you wish the task to be
+ scheduled immediately.
+
+ This will announce the task as pending and workers will be allowed to
+ claim it and resolve the task.
+
+ **Note** this operation is **idempotent** and will not fail or complain
+ if called with a `taskId` that is already scheduled, or even resolved.
+ To reschedule a task previously resolved, use `rerunTask`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["scheduleTask"], *args, **kwargs)
+
+ def rerunTask(self, *args, **kwargs):
+ """
+ Rerun a Resolved Task
+
+ This method _reruns_ a previously resolved task, even if it was
+ _completed_. This is useful if your task completes unsuccessfully, and
+ you just want to run it from scratch again. This will also reset the
+ number of `retries` allowed. It will schedule a task that is _unscheduled_
+ regardless of the state of its dependencies.
+
+ This method is deprecated in favour of creating a new task with the same
+ task definition (but with a new taskId).
+
+ Remember that `retries` in the task status counts the number of runs that
+ the queue have started because the worker stopped responding, for example
+ because a spot node died.
+
+ **Remark** this operation is idempotent: if it is invoked for a task that
+ is `pending` or `running`, it will just return the current task status.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["rerunTask"], *args, **kwargs)
+
+ def cancelTask(self, *args, **kwargs):
+ """
+ Cancel Task
+
+ This method will cancel a task that is either `unscheduled`, `pending` or
+ `running`. It will resolve the current run as `exception` with
+ `reasonResolved` set to `canceled`. If the task isn't scheduled yet, ie.
+ it doesn't have any runs, an initial run will be added and resolved as
+ described above. Hence, after canceling a task, it cannot be scheduled
+ with `queue.scheduleTask`, but a new run can be created with
+ `queue.rerun`. These semantics is equivalent to calling
+ `queue.scheduleTask` immediately followed by `queue.cancelTask`.
+
+ **Remark** this operation is idempotent, if you try to cancel a task that
+ isn't `unscheduled`, `pending` or `running`, this operation will just
+ return the current task status.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["cancelTask"], *args, **kwargs)
+
+ def claimWork(self, *args, **kwargs):
+ """
+ Claim Work
+
+ Claim pending task(s) for the given task queue.
+
+ If any work is available (even if fewer than the requested number of
+ tasks, this will return immediately. Otherwise, it will block for tens of
+ seconds waiting for work. If no work appears, it will return an emtpy
+ list of tasks. Callers should sleep a short while (to avoid denial of
+ service in an error condition) and call the endpoint again. This is a
+ simple implementation of "long polling".
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["claimWork"], *args, **kwargs)
+
+ def claimTask(self, *args, **kwargs):
+ """
+ Claim Task
+
+ claim a task - never documented
+
+ This method is ``deprecated``
+ """
+
+ return self._makeApiCall(self.funcinfo["claimTask"], *args, **kwargs)
+
+ def reclaimTask(self, *args, **kwargs):
+ """
+ Reclaim task
+
+ Refresh the claim for a specific `runId` for given `taskId`. This updates
+ the `takenUntil` property and returns a new set of temporary credentials
+ for performing requests on behalf of the task. These credentials should
+ be used in-place of the credentials returned by `claimWork`.
+
+ The `reclaimTask` requests serves to:
+ * Postpone `takenUntil` preventing the queue from resolving
+ `claim-expired`,
+ * Refresh temporary credentials used for processing the task, and
+ * Abort execution if the task/run have been resolved.
+
+ If the `takenUntil` timestamp is exceeded the queue will resolve the run
+ as _exception_ with reason `claim-expired`, and proceeded to retry to the
+ task. This ensures that tasks are retried, even if workers disappear
+ without warning.
+
+ If the task is resolved, this end-point will return `409` reporting
+ `RequestConflict`. This typically happens if the task have been canceled
+ or the `task.deadline` have been exceeded. If reclaiming fails, workers
+ should abort the task and forget about the given `runId`. There is no
+ need to resolve the run or upload artifacts.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["reclaimTask"], *args, **kwargs)
+
+ def reportCompleted(self, *args, **kwargs):
+ """
+ Report Run Completed
+
+ Report a task completed, resolving the run as `completed`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["reportCompleted"], *args, **kwargs)
+
+ def reportFailed(self, *args, **kwargs):
+ """
+ Report Run Failed
+
+ Report a run failed, resolving the run as `failed`. Use this to resolve
+ a run that failed because the task specific code behaved unexpectedly.
+ For example the task exited non-zero, or didn't produce expected output.
+
+ Do not use this if the task couldn't be run because if malformed
+ payload, or other unexpected condition. In these cases we have a task
+ exception, which should be reported with `reportException`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["reportFailed"], *args, **kwargs)
+
+ def reportException(self, *args, **kwargs):
+ """
+ Report Task Exception
+
+ Resolve a run as _exception_. Generally, you will want to report tasks as
+ failed instead of exception. You should `reportException` if,
+
+ * The `task.payload` is invalid,
+ * Non-existent resources are referenced,
+ * Declared actions cannot be executed due to unavailable resources,
+ * The worker had to shutdown prematurely,
+ * The worker experienced an unknown error, or,
+ * The task explicitly requested a retry.
+
+ Do not use this to signal that some user-specified code crashed for any
+ reason specific to this code. If user-specific code hits a resource that
+ is temporarily unavailable worker should report task _failed_.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["reportException"], *args, **kwargs)
+
+ def createArtifact(self, *args, **kwargs):
+ """
+ Create Artifact
+
+ This API end-point creates an artifact for a specific run of a task. This
+ should **only** be used by a worker currently operating on this task, or
+ from a process running within the task (ie. on the worker).
+
+ All artifacts must specify when they expire. The queue will
+ automatically take care of deleting artifacts past their
+ expiration point. This feature makes it feasible to upload large
+ intermediate artifacts from data processing applications, as the
+ artifacts can be set to expire a few days later.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["createArtifact"], *args, **kwargs)
+
+ def finishArtifact(self, *args, **kwargs):
+ """
+ Finish Artifact
+
+ This endpoint marks an artifact as present for the given task, and
+ should be called when the artifact data is fully uploaded.
+
+ The storage types `reference`, `link`, and `error` do not need to
+ be finished, as they are finished immediately by `createArtifact`.
+ The storage type `s3` does not support this functionality and cannot
+ be finished. In all such cases, calling this method is an input error
+ (400).
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["finishArtifact"], *args, **kwargs)
+
+ def getArtifact(self, *args, **kwargs):
+ """
+ Get Artifact Data from Run
+
+ Get artifact by `<name>` from a specific run.
+
+ **Artifact Access**, in order to get an artifact you need the scope
+ `queue:get-artifact:<name>`, where `<name>` is the name of the artifact.
+ To allow access to fetch artifacts with a client like `curl` or a web
+ browser, without using Taskcluster credentials, include a scope in the
+ `anonymous` role. The convention is to include
+ `queue:get-artifact:public/*`.
+
+ **Response**: the HTTP response to this method is a 303 redirect to the
+ URL from which the artifact can be downloaded. The body of that response
+ contains the data described in the output schema, contianing the same URL.
+ Callers are encouraged to use whichever method of gathering the URL is
+ most convenient. Standard HTTP clients will follow the redirect, while
+ API client libraries will return the JSON body.
+
+ In order to download an artifact the following must be done:
+
+ 1. Obtain queue url. Building a signed url with a taskcluster client is
+ recommended
+ 1. Make a GET request which does not follow redirects
+ 1. In all cases, if specified, the
+ x-taskcluster-location-{content,transfer}-{sha256,length} values must be
+ validated to be equal to the Content-Length and Sha256 checksum of the
+ final artifact downloaded. as well as any intermediate redirects
+ 1. If this response is a 500-series error, retry using an exponential
+ backoff. No more than 5 retries should be attempted
+ 1. If this response is a 400-series error, treat it appropriately for
+ your context. This might be an error in responding to this request or
+ an Error storage type body. This request should not be retried.
+ 1. If this response is a 200-series response, the response body is the artifact.
+ If the x-taskcluster-location-{content,transfer}-{sha256,length} and
+ x-taskcluster-location-content-encoding are specified, they should match
+ this response body
+ 1. If the response type is a 300-series redirect, the artifact will be at the
+ location specified by the `Location` header. There are multiple artifact storage
+ types which use a 300-series redirect.
+ 1. For all redirects followed, the user must verify that the content-sha256, content-length,
+ transfer-sha256, transfer-length and content-encoding match every further request. The final
+ artifact must also be validated against the values specified in the original queue response
+ 1. Caching of requests with an x-taskcluster-artifact-storage-type value of `reference`
+ must not occur
+
+ **Headers**
+ The following important headers are set on the response to this method:
+
+ * location: the url of the artifact if a redirect is to be performed
+ * x-taskcluster-artifact-storage-type: the storage type. Example: s3
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["getArtifact"], *args, **kwargs)
+
+ def getLatestArtifact(self, *args, **kwargs):
+ """
+ Get Artifact Data from Latest Run
+
+ Get artifact by `<name>` from the last run of a task.
+
+ **Artifact Access**, in order to get an artifact you need the scope
+ `queue:get-artifact:<name>`, where `<name>` is the name of the artifact.
+ To allow access to fetch artifacts with a client like `curl` or a web
+ browser, without using Taskcluster credentials, include a scope in the
+ `anonymous` role. The convention is to include
+ `queue:get-artifact:public/*`.
+
+ **API Clients**, this method will redirect you to the artifact, if it is
+ stored externally. Either way, the response may not be JSON. So API
+ client users might want to generate a signed URL for this end-point and
+ use that URL with a normal HTTP client.
+
+ **Remark**, this end-point is slightly slower than
+ `queue.getArtifact`, so consider that if you already know the `runId` of
+ the latest run. Otherwise, just us the most convenient API end-point.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["getLatestArtifact"], *args, **kwargs)
+
+ def listArtifacts(self, *args, **kwargs):
+ """
+ Get Artifacts from Run
+
+ Returns a list of artifacts and associated meta-data for a given run.
+
+ As a task may have many artifacts paging may be necessary. If this
+ end-point returns a `continuationToken`, you should call the end-point
+ again with the `continuationToken` as the query-string option:
+ `continuationToken`.
+
+ By default this end-point will list up-to 1000 artifacts in a single page
+ you may limit this with the query-string parameter `limit`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listArtifacts"], *args, **kwargs)
+
+ def listLatestArtifacts(self, *args, **kwargs):
+ """
+ Get Artifacts from Latest Run
+
+ Returns a list of artifacts and associated meta-data for the latest run
+ from the given task.
+
+ As a task may have many artifacts paging may be necessary. If this
+ end-point returns a `continuationToken`, you should call the end-point
+ again with the `continuationToken` as the query-string option:
+ `continuationToken`.
+
+ By default this end-point will list up-to 1000 artifacts in a single page
+ you may limit this with the query-string parameter `limit`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listLatestArtifacts"], *args, **kwargs)
+
+ def artifactInfo(self, *args, **kwargs):
+ """
+ Get Artifact Information From Run
+
+ Returns associated metadata for a given artifact, in the given task run.
+ The metadata is the same as that returned from `listArtifacts`, and does
+ not grant access to the artifact data.
+
+ Note that this method does *not* automatically follow link artifacts.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["artifactInfo"], *args, **kwargs)
+
+ def latestArtifactInfo(self, *args, **kwargs):
+ """
+ Get Artifact Information From Latest Run
+
+ Returns associated metadata for a given artifact, in the latest run of the
+ task. The metadata is the same as that returned from `listArtifacts`,
+ and does not grant access to the artifact data.
+
+ Note that this method does *not* automatically follow link artifacts.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["latestArtifactInfo"], *args, **kwargs)
+
+ def artifact(self, *args, **kwargs):
+ """
+ Get Artifact Content From Run
+
+ Returns information about the content of the artifact, in the given task run.
+
+ Depending on the storage type, the endpoint returns the content of the artifact
+ or enough information to access that content.
+
+ This method follows link artifacts, so it will not return content
+ for a link artifact.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["artifact"], *args, **kwargs)
+
+ def latestArtifact(self, *args, **kwargs):
+ """
+ Get Artifact Content From Latest Run
+
+ Returns information about the content of the artifact, in the latest task run.
+
+ Depending on the storage type, the endpoint returns the content of the artifact
+ or enough information to access that content.
+
+ This method follows link artifacts, so it will not return content
+ for a link artifact.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["latestArtifact"], *args, **kwargs)
+
+ def listProvisioners(self, *args, **kwargs):
+ """
+ Get a list of all active provisioners
+
+ Get all active provisioners.
+
+ The term "provisioner" is taken broadly to mean anything with a provisionerId.
+ This does not necessarily mean there is an associated service performing any
+ provisioning activity.
+
+ The response is paged. If this end-point returns a `continuationToken`, you
+ should call the end-point again with the `continuationToken` as a query-string
+ option. By default this end-point will list up to 1000 provisioners in a single
+ page. You may limit this with the query-string parameter `limit`.
+
+ This method is ``deprecated``
+ """
+
+ return self._makeApiCall(self.funcinfo["listProvisioners"], *args, **kwargs)
+
+ def getProvisioner(self, *args, **kwargs):
+ """
+ Get an active provisioner
+
+ Get an active provisioner.
+
+ The term "provisioner" is taken broadly to mean anything with a provisionerId.
+ This does not necessarily mean there is an associated service performing any
+ provisioning activity.
+
+ This method is ``deprecated``
+ """
+
+ return self._makeApiCall(self.funcinfo["getProvisioner"], *args, **kwargs)
+
+ def declareProvisioner(self, *args, **kwargs):
+ """
+ Update a provisioner
+
+ Declare a provisioner, supplying some details about it.
+
+ `declareProvisioner` allows updating one or more properties of a provisioner as long as the required scopes are
+ possessed. For example, a request to update the `my-provisioner`
+ provisioner with a body `{description: 'This provisioner is great'}` would require you to have the scope
+ `queue:declare-provisioner:my-provisioner#description`.
+
+ The term "provisioner" is taken broadly to mean anything with a provisionerId.
+ This does not necessarily mean there is an associated service performing any
+ provisioning activity.
+
+ This method is ``deprecated``
+ """
+
+ return self._makeApiCall(self.funcinfo["declareProvisioner"], *args, **kwargs)
+
+ def pendingTasks(self, *args, **kwargs):
+ """
+ Get Number of Pending Tasks
+
+ Get an approximate number of pending tasks for the given `taskQueueId`.
+
+ The underlying Azure Storage Queues only promises to give us an estimate.
+ Furthermore, we cache the result in memory for 20 seconds. So consumers
+ should be no means expect this to be an accurate number.
+ It is, however, a solid estimate of the number of pending tasks.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["pendingTasks"], *args, **kwargs)
+
+ def listWorkerTypes(self, *args, **kwargs):
+ """
+ Get a list of all active worker-types
+
+ Get all active worker-types for the given provisioner.
+
+ The response is paged. If this end-point returns a `continuationToken`, you
+ should call the end-point again with the `continuationToken` as a query-string
+ option. By default this end-point will list up to 1000 worker-types in a single
+ page. You may limit this with the query-string parameter `limit`.
+
+ This method is ``deprecated``
+ """
+
+ return self._makeApiCall(self.funcinfo["listWorkerTypes"], *args, **kwargs)
+
+ def getWorkerType(self, *args, **kwargs):
+ """
+ Get a worker-type
+
+ Get a worker-type from a provisioner.
+
+ This method is ``deprecated``
+ """
+
+ return self._makeApiCall(self.funcinfo["getWorkerType"], *args, **kwargs)
+
+ def declareWorkerType(self, *args, **kwargs):
+ """
+ Update a worker-type
+
+ Declare a workerType, supplying some details about it.
+
+ `declareWorkerType` allows updating one or more properties of a worker-type as long as the required scopes are
+ possessed. For example, a request to update the `highmem` worker-type within the `my-provisioner`
+ provisioner with a body `{description: 'This worker type is great'}` would require you to have the scope
+ `queue:declare-worker-type:my-provisioner/highmem#description`.
+
+ This method is ``deprecated``
+ """
+
+ return self._makeApiCall(self.funcinfo["declareWorkerType"], *args, **kwargs)
+
+ def listTaskQueues(self, *args, **kwargs):
+ """
+ Get a list of all active task queues
+
+ Get all active task queues.
+
+ The response is paged. If this end-point returns a `continuationToken`, you
+ should call the end-point again with the `continuationToken` as a query-string
+ option. By default this end-point will list up to 1000 task queues in a single
+ page. You may limit this with the query-string parameter `limit`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listTaskQueues"], *args, **kwargs)
+
+ def getTaskQueue(self, *args, **kwargs):
+ """
+ Get a task queue
+
+ Get a task queue.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["getTaskQueue"], *args, **kwargs)
+
+ def listWorkers(self, *args, **kwargs):
+ """
+ Get a list of all active workers of a workerType
+
+ Get a list of all active workers of a workerType.
+
+ `listWorkers` allows a response to be filtered by quarantined and non quarantined workers.
+ To filter the query, you should call the end-point with `quarantined` as a query-string option with a
+ true or false value.
+
+ The response is paged. If this end-point returns a `continuationToken`, you
+ should call the end-point again with the `continuationToken` as a query-string
+ option. By default this end-point will list up to 1000 workers in a single
+ page. You may limit this with the query-string parameter `limit`.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["listWorkers"], *args, **kwargs)
+
+ def getWorker(self, *args, **kwargs):
+ """
+ Get a worker-type
+
+ Get a worker from a worker-type.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["getWorker"], *args, **kwargs)
+
+ def quarantineWorker(self, *args, **kwargs):
+ """
+ Quarantine a worker
+
+ Quarantine a worker
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["quarantineWorker"], *args, **kwargs)
+
+ def declareWorker(self, *args, **kwargs):
+ """
+ Declare a worker
+
+ Declare a worker, supplying some details about it.
+
+ `declareWorker` allows updating one or more properties of a worker as long as the required scopes are
+ possessed.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["declareWorker"], *args, **kwargs)
+
+ funcinfo = {
+ "artifact": {
+ 'args': ['taskId', 'runId', 'name'],
+ 'method': 'get',
+ 'name': 'artifact',
+ 'output': 'v1/artifact-content-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/artifact-content/<name>',
+ 'stability': 'stable',
+ },
+ "artifactInfo": {
+ 'args': ['taskId', 'runId', 'name'],
+ 'method': 'get',
+ 'name': 'artifactInfo',
+ 'output': 'v1/artifact-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/artifact-info/<name>',
+ 'stability': 'stable',
+ },
+ "cancelTask": {
+ 'args': ['taskId'],
+ 'method': 'post',
+ 'name': 'cancelTask',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/cancel',
+ 'stability': 'stable',
+ },
+ "claimTask": {
+ 'args': ['taskId', 'runId'],
+ 'input': 'v1/task-claim-request.json#',
+ 'method': 'post',
+ 'name': 'claimTask',
+ 'output': 'v1/task-claim-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/claim',
+ 'stability': 'deprecated',
+ },
+ "claimWork": {
+ 'args': ['taskQueueId'],
+ 'input': 'v1/claim-work-request.json#',
+ 'method': 'post',
+ 'name': 'claimWork',
+ 'output': 'v1/claim-work-response.json#',
+ 'route': '/claim-work/<taskQueueId>',
+ 'stability': 'stable',
+ },
+ "createArtifact": {
+ 'args': ['taskId', 'runId', 'name'],
+ 'input': 'v1/post-artifact-request.json#',
+ 'method': 'post',
+ 'name': 'createArtifact',
+ 'output': 'v1/post-artifact-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/artifacts/<name>',
+ 'stability': 'stable',
+ },
+ "createTask": {
+ 'args': ['taskId'],
+ 'input': 'v1/create-task-request.json#',
+ 'method': 'put',
+ 'name': 'createTask',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>',
+ 'stability': 'stable',
+ },
+ "declareProvisioner": {
+ 'args': ['provisionerId'],
+ 'input': 'v1/update-provisioner-request.json#',
+ 'method': 'put',
+ 'name': 'declareProvisioner',
+ 'output': 'v1/provisioner-response.json#',
+ 'route': '/provisioners/<provisionerId>',
+ 'stability': 'deprecated',
+ },
+ "declareWorker": {
+ 'args': ['provisionerId', 'workerType', 'workerGroup', 'workerId'],
+ 'input': 'v1/update-worker-request.json#',
+ 'method': 'put',
+ 'name': 'declareWorker',
+ 'output': 'v1/worker-response.json#',
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>/<workerGroup>/<workerId>',
+ 'stability': 'experimental',
+ },
+ "declareWorkerType": {
+ 'args': ['provisionerId', 'workerType'],
+ 'input': 'v1/update-workertype-request.json#',
+ 'method': 'put',
+ 'name': 'declareWorkerType',
+ 'output': 'v1/workertype-response.json#',
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>',
+ 'stability': 'deprecated',
+ },
+ "finishArtifact": {
+ 'args': ['taskId', 'runId', 'name'],
+ 'input': 'v1/finish-artifact-request.json#',
+ 'method': 'put',
+ 'name': 'finishArtifact',
+ 'route': '/task/<taskId>/runs/<runId>/artifacts/<name>',
+ 'stability': 'stable',
+ },
+ "getArtifact": {
+ 'args': ['taskId', 'runId', 'name'],
+ 'method': 'get',
+ 'name': 'getArtifact',
+ 'output': 'v1/get-artifact-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/artifacts/<name>',
+ 'stability': 'stable',
+ },
+ "getLatestArtifact": {
+ 'args': ['taskId', 'name'],
+ 'method': 'get',
+ 'name': 'getLatestArtifact',
+ 'output': 'v1/get-artifact-response.json#',
+ 'route': '/task/<taskId>/artifacts/<name>',
+ 'stability': 'stable',
+ },
+ "getProvisioner": {
+ 'args': ['provisionerId'],
+ 'method': 'get',
+ 'name': 'getProvisioner',
+ 'output': 'v1/provisioner-response.json#',
+ 'route': '/provisioners/<provisionerId>',
+ 'stability': 'deprecated',
+ },
+ "getTaskQueue": {
+ 'args': ['taskQueueId'],
+ 'method': 'get',
+ 'name': 'getTaskQueue',
+ 'output': 'v1/taskqueue-response.json#',
+ 'route': '/task-queues/<taskQueueId>',
+ 'stability': 'stable',
+ },
+ "getWorker": {
+ 'args': ['provisionerId', 'workerType', 'workerGroup', 'workerId'],
+ 'method': 'get',
+ 'name': 'getWorker',
+ 'output': 'v1/worker-response.json#',
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>/workers/<workerGroup>/<workerId>',
+ 'stability': 'experimental',
+ },
+ "getWorkerType": {
+ 'args': ['provisionerId', 'workerType'],
+ 'method': 'get',
+ 'name': 'getWorkerType',
+ 'output': 'v1/workertype-response.json#',
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>',
+ 'stability': 'deprecated',
+ },
+ "latestArtifact": {
+ 'args': ['taskId', 'name'],
+ 'method': 'get',
+ 'name': 'latestArtifact',
+ 'output': 'v1/artifact-content-response.json#',
+ 'route': '/task/<taskId>/artifact-content/<name>',
+ 'stability': 'stable',
+ },
+ "latestArtifactInfo": {
+ 'args': ['taskId', 'name'],
+ 'method': 'get',
+ 'name': 'latestArtifactInfo',
+ 'output': 'v1/artifact-response.json#',
+ 'route': '/task/<taskId>/artifact-info/<name>',
+ 'stability': 'stable',
+ },
+ "listArtifacts": {
+ 'args': ['taskId', 'runId'],
+ 'method': 'get',
+ 'name': 'listArtifacts',
+ 'output': 'v1/list-artifacts-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/task/<taskId>/runs/<runId>/artifacts',
+ 'stability': 'stable',
+ },
+ "listDependentTasks": {
+ 'args': ['taskId'],
+ 'method': 'get',
+ 'name': 'listDependentTasks',
+ 'output': 'v1/list-dependent-tasks-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/task/<taskId>/dependents',
+ 'stability': 'stable',
+ },
+ "listLatestArtifacts": {
+ 'args': ['taskId'],
+ 'method': 'get',
+ 'name': 'listLatestArtifacts',
+ 'output': 'v1/list-artifacts-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/task/<taskId>/artifacts',
+ 'stability': 'stable',
+ },
+ "listProvisioners": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listProvisioners',
+ 'output': 'v1/list-provisioners-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/provisioners',
+ 'stability': 'deprecated',
+ },
+ "listTaskGroup": {
+ 'args': ['taskGroupId'],
+ 'method': 'get',
+ 'name': 'listTaskGroup',
+ 'output': 'v1/list-task-group-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/task-group/<taskGroupId>/list',
+ 'stability': 'stable',
+ },
+ "listTaskQueues": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listTaskQueues',
+ 'output': 'v1/list-taskqueues-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/task-queues',
+ 'stability': 'stable',
+ },
+ "listWorkerTypes": {
+ 'args': ['provisionerId'],
+ 'method': 'get',
+ 'name': 'listWorkerTypes',
+ 'output': 'v1/list-workertypes-response.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/provisioners/<provisionerId>/worker-types',
+ 'stability': 'deprecated',
+ },
+ "listWorkers": {
+ 'args': ['provisionerId', 'workerType'],
+ 'method': 'get',
+ 'name': 'listWorkers',
+ 'output': 'v1/list-workers-response.json#',
+ 'query': ['continuationToken', 'limit', 'quarantined'],
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>/workers',
+ 'stability': 'experimental',
+ },
+ "pendingTasks": {
+ 'args': ['taskQueueId'],
+ 'method': 'get',
+ 'name': 'pendingTasks',
+ 'output': 'v1/pending-tasks-response.json#',
+ 'route': '/pending/<taskQueueId>',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "quarantineWorker": {
+ 'args': ['provisionerId', 'workerType', 'workerGroup', 'workerId'],
+ 'input': 'v1/quarantine-worker-request.json#',
+ 'method': 'put',
+ 'name': 'quarantineWorker',
+ 'output': 'v1/worker-response.json#',
+ 'route': '/provisioners/<provisionerId>/worker-types/<workerType>/workers/<workerGroup>/<workerId>',
+ 'stability': 'experimental',
+ },
+ "reclaimTask": {
+ 'args': ['taskId', 'runId'],
+ 'method': 'post',
+ 'name': 'reclaimTask',
+ 'output': 'v1/task-reclaim-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/reclaim',
+ 'stability': 'stable',
+ },
+ "reportCompleted": {
+ 'args': ['taskId', 'runId'],
+ 'method': 'post',
+ 'name': 'reportCompleted',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/completed',
+ 'stability': 'stable',
+ },
+ "reportException": {
+ 'args': ['taskId', 'runId'],
+ 'input': 'v1/task-exception-request.json#',
+ 'method': 'post',
+ 'name': 'reportException',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/exception',
+ 'stability': 'stable',
+ },
+ "reportFailed": {
+ 'args': ['taskId', 'runId'],
+ 'method': 'post',
+ 'name': 'reportFailed',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/runs/<runId>/failed',
+ 'stability': 'stable',
+ },
+ "rerunTask": {
+ 'args': ['taskId'],
+ 'method': 'post',
+ 'name': 'rerunTask',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/rerun',
+ 'stability': 'stable',
+ },
+ "scheduleTask": {
+ 'args': ['taskId'],
+ 'method': 'post',
+ 'name': 'scheduleTask',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/schedule',
+ 'stability': 'stable',
+ },
+ "status": {
+ 'args': ['taskId'],
+ 'method': 'get',
+ 'name': 'status',
+ 'output': 'v1/task-status-response.json#',
+ 'route': '/task/<taskId>/status',
+ 'stability': 'stable',
+ },
+ "task": {
+ 'args': ['taskId'],
+ 'method': 'get',
+ 'name': 'task',
+ 'output': 'v1/task.json#',
+ 'route': '/task/<taskId>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Queue']
diff --git a/third_party/python/taskcluster/taskcluster/generated/queueevents.py b/third_party/python/taskcluster/taskcluster/generated/queueevents.py
new file mode 100644
index 0000000000..fec36671f0
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/queueevents.py
@@ -0,0 +1,719 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class QueueEvents(BaseClient):
+ """
+ The queue service is responsible for accepting tasks and track their state
+ as they are executed by workers. In order ensure they are eventually
+ resolved.
+
+ This document describes AMQP exchanges offered by the queue, which allows
+ third-party listeners to monitor tasks as they progress to resolution.
+ These exchanges targets the following audience:
+ * Schedulers, who takes action after tasks are completed,
+ * Workers, who wants to listen for new or canceled tasks (optional),
+ * Tools, that wants to update their view as task progress.
+
+ You'll notice that all the exchanges in the document shares the same
+ routing key pattern. This makes it very easy to bind to all messages
+ about a certain kind tasks.
+
+ **Task specific routes**, a task can define a task specific route using
+ the `task.routes` property. See task creation documentation for details
+ on permissions required to provide task specific routes. If a task has
+ the entry `'notify.by-email'` in as task specific route defined in
+ `task.routes` all messages about this task will be CC'ed with the
+ routing-key `'route.notify.by-email'`.
+
+ These routes will always be prefixed `route.`, so that cannot interfere
+ with the _primary_ routing key as documented here. Notice that the
+ _primary_ routing key is always prefixed `primary.`. This is ensured
+ in the routing key reference, so API clients will do this automatically.
+
+ Please, note that the way RabbitMQ works, the message will only arrive
+ in your queue once, even though you may have bound to the exchange with
+ multiple routing key patterns that matches more of the CC'ed routing
+ routing keys.
+
+ **Delivery guarantees**, most operations on the queue are idempotent,
+ which means that if repeated with the same arguments then the requests
+ will ensure completion of the operation and return the same response.
+ This is useful if the server crashes or the TCP connection breaks, but
+ when re-executing an idempotent operation, the queue will also resend
+ any related AMQP messages. Hence, messages may be repeated.
+
+ This shouldn't be much of a problem, as the best you can achieve using
+ confirm messages with AMQP is at-least-once delivery semantics. Hence,
+ this only prevents you from obtaining at-most-once delivery semantics.
+
+ **Remark**, some message generated by timeouts maybe dropped if the
+ server crashes at wrong time. Ideally, we'll address this in the
+ future. For now we suggest you ignore this corner case, and notify us
+ if this corner case is of concern to you.
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-queue/v1/",
+ }
+ serviceName = 'queue'
+ apiVersion = 'v1'
+
+ def taskDefined(self, *args, **kwargs):
+ """
+ Task Defined Messages
+
+ When a task is created or just defined a message is posted to this
+ exchange.
+
+ This message exchange is mainly useful when tasks are created with dependencies
+ on incomplete tasks, as this does not make the task
+ `pending`. Thus, no `taskPending` message is published.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-defined',
+ 'name': 'taskDefined',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-defined-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskPending(self, *args, **kwargs):
+ """
+ Task Pending Messages
+
+ When a task becomes `pending` a message is posted to this exchange.
+
+ This is useful for workers who doesn't want to constantly poll the queue
+ for new tasks. The queue will also be authority for task states and
+ claims. But using this exchange workers should be able to distribute work
+ efficiently and they would be able to reduce their polling interval
+ significantly without affecting general responsiveness.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-pending',
+ 'name': 'taskPending',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-pending-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskRunning(self, *args, **kwargs):
+ """
+ Task Running Messages
+
+ Whenever a task is claimed by a worker, a run is started on the worker,
+ and a message is posted on this exchange.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-running',
+ 'name': 'taskRunning',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-running-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def artifactCreated(self, *args, **kwargs):
+ """
+ Artifact Creation Messages
+
+ Whenever the `createArtifact` end-point is called, the queue will create
+ a record of the artifact and post a message on this exchange. All of this
+ happens before the queue returns a signed URL for the caller to upload
+ the actual artifact with (pending on `storageType`).
+
+ This means that the actual artifact is rarely available when this message
+ is posted. But it is not unreasonable to assume that the artifact will
+ will become available at some point later. Most signatures will expire in
+ 30 minutes or so, forcing the uploader to call `createArtifact` with
+ the same payload again in-order to continue uploading the artifact.
+
+ However, in most cases (especially for small artifacts) it's very
+ reasonable assume the artifact will be available within a few minutes.
+ This property means that this exchange is mostly useful for tools
+ monitoring task evaluation. One could also use it count number of
+ artifacts per task, or _index_ artifacts though in most cases it'll be
+ smarter to index artifacts after the task in question have completed
+ successfully.
+
+ *NOTE*: this message is currently only sent for reference and error
+ artifacts. This will be remedied in a future version of Taskcluster.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'artifact-created',
+ 'name': 'artifactCreated',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/artifact-created-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskCompleted(self, *args, **kwargs):
+ """
+ Task Completed Messages
+
+ When a task is successfully completed by a worker a message is posted
+ this exchange.
+ This message is routed using the `runId`, `workerGroup` and `workerId`
+ that completed the task. But information about additional runs is also
+ available from the task status structure.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task. (required)
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-completed',
+ 'name': 'taskCompleted',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-completed-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskFailed(self, *args, **kwargs):
+ """
+ Task Failed Messages
+
+ When a task ran, but failed to complete successfully a message is posted
+ to this exchange. This is same as worker ran task-specific code, but the
+ task specific code exited non-zero.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-failed',
+ 'name': 'taskFailed',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-failed-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskException(self, *args, **kwargs):
+ """
+ Task Exception Messages
+
+ Whenever Taskcluster fails to run a message is posted to this exchange.
+ This happens if the task isn't completed before its `deadlìne`,
+ all retries failed (i.e. workers stopped responding), the task was
+ canceled by another entity, or the task carried a malformed payload.
+
+ The specific _reason_ is evident from that task status structure, refer
+ to the `reasonResolved` property for the last run.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskId: `taskId` for the task this message concerns (required)
+
+ * runId: `runId` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerGroup: `workerGroup` of latest run for the task, `_` if no run is exists for the task.
+
+ * workerId: `workerId` of latest run for the task, `_` if no run is exists for the task.
+
+ * provisionerId: `provisionerId` this task is targeted at. (required)
+
+ * workerType: `workerType` this task must run on. (required)
+
+ * schedulerId: `schedulerId` this task was created by. (required)
+
+ * taskGroupId: `taskGroupId` this task was created in. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-exception',
+ 'name': 'taskException',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'runId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerGroup',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'provisionerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'workerType',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-exception-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def taskGroupResolved(self, *args, **kwargs):
+ """
+ Task Group Resolved Messages
+
+ A message is published on task-group-resolved whenever all submitted
+ tasks (whether scheduled or unscheduled) for a given task group have
+ been resolved, regardless of whether they resolved as successful or
+ not. A task group may be resolved multiple times, since new tasks may
+ be submitted against an already resolved task group.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * taskGroupId: `taskGroupId` for the task-group this message concerns (required)
+
+ * schedulerId: `schedulerId` for the task-group this message concerns (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'task-group-resolved',
+ 'name': 'taskGroupResolved',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'taskGroupId',
+ },
+ {
+ 'multipleWords': False,
+ 'name': 'schedulerId',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/task-group-resolved.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'QueueEvents']
diff --git a/third_party/python/taskcluster/taskcluster/generated/secrets.py b/third_party/python/taskcluster/taskcluster/generated/secrets.py
new file mode 100644
index 0000000000..8482a678ba
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/secrets.py
@@ -0,0 +1,143 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class Secrets(BaseClient):
+ """
+ The secrets service provides a simple key/value store for small bits of secret
+ data. Access is limited by scopes, so values can be considered secret from
+ those who do not have the relevant scopes.
+
+ Secrets also have an expiration date, and once a secret has expired it can no
+ longer be read. This is useful for short-term secrets such as a temporary
+ service credential or a one-time signing key.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'secrets'
+ apiVersion = 'v1'
+
+ def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ def set(self, *args, **kwargs):
+ """
+ Set Secret
+
+ Set the secret associated with some key. If the secret already exists, it is
+ updated instead.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["set"], *args, **kwargs)
+
+ def remove(self, *args, **kwargs):
+ """
+ Delete Secret
+
+ Delete the secret associated with some key. It will succeed whether or not the secret exists
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["remove"], *args, **kwargs)
+
+ def get(self, *args, **kwargs):
+ """
+ Read Secret
+
+ Read the secret associated with some key. If the secret has recently
+ expired, the response code 410 is returned. If the caller lacks the
+ scope necessary to get the secret, the call will fail with a 403 code
+ regardless of whether the secret exists.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["get"], *args, **kwargs)
+
+ def list(self, *args, **kwargs):
+ """
+ List Secrets
+
+ List the names of all secrets.
+
+ By default this end-point will try to return up to 1000 secret names in one
+ request. But it **may return less**, even if more tasks are available.
+ It may also return a `continuationToken` even though there are no more
+ results. However, you can only be sure to have seen all results if you
+ keep calling `listTaskGroup` with the last `continuationToken` until you
+ get a result without a `continuationToken`.
+
+ If you are not interested in listing all the members at once, you may
+ use the query-string option `limit` to return fewer.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["list"], *args, **kwargs)
+
+ funcinfo = {
+ "get": {
+ 'args': ['name'],
+ 'method': 'get',
+ 'name': 'get',
+ 'output': 'v1/secret.json#',
+ 'route': '/secret/<name>',
+ 'stability': 'stable',
+ },
+ "list": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'list',
+ 'output': 'v1/secret-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/secrets',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "remove": {
+ 'args': ['name'],
+ 'method': 'delete',
+ 'name': 'remove',
+ 'route': '/secret/<name>',
+ 'stability': 'stable',
+ },
+ "set": {
+ 'args': ['name'],
+ 'input': 'v1/secret.json#',
+ 'method': 'put',
+ 'name': 'set',
+ 'route': '/secret/<name>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'Secrets']
diff --git a/third_party/python/taskcluster/taskcluster/generated/workermanager.py b/third_party/python/taskcluster/taskcluster/generated/workermanager.py
new file mode 100644
index 0000000000..4d8dd7a010
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/workermanager.py
@@ -0,0 +1,406 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class WorkerManager(BaseClient):
+ """
+ This service manages workers, including provisioning for dynamic worker pools.
+
+ Methods interacting with a provider may return a 503 response if that provider has
+ not been able to start up, such as if the service to which it interfaces has an
+ outage. Such requests can be retried as for any other 5xx response.
+ """
+
+ classOptions = {
+ }
+ serviceName = 'worker-manager'
+ apiVersion = 'v1'
+
+ def ping(self, *args, **kwargs):
+ """
+ Ping Server
+
+ Respond without doing anything.
+ This endpoint is used to check that the service is up.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["ping"], *args, **kwargs)
+
+ def listProviders(self, *args, **kwargs):
+ """
+ List Providers
+
+ Retrieve a list of providers that are available for worker pools.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listProviders"], *args, **kwargs)
+
+ def createWorkerPool(self, *args, **kwargs):
+ """
+ Create Worker Pool
+
+ Create a new worker pool. If the worker pool already exists, this will throw an error.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["createWorkerPool"], *args, **kwargs)
+
+ def updateWorkerPool(self, *args, **kwargs):
+ """
+ Update Worker Pool
+
+ Given an existing worker pool definition, this will modify it and return
+ the new definition.
+
+ To delete a worker pool, set its `providerId` to `"null-provider"`.
+ After any existing workers have exited, a cleanup job will remove the
+ worker pool. During that time, the worker pool can be updated again, such
+ as to set its `providerId` to a real provider.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["updateWorkerPool"], *args, **kwargs)
+
+ def deleteWorkerPool(self, *args, **kwargs):
+ """
+ Delete Worker Pool
+
+ Mark a worker pool for deletion. This is the same as updating the pool to
+ set its providerId to `"null-provider"`, but does not require scope
+ `worker-manager:provider:null-provider`.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["deleteWorkerPool"], *args, **kwargs)
+
+ def workerPool(self, *args, **kwargs):
+ """
+ Get Worker Pool
+
+ Fetch an existing worker pool defition.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["workerPool"], *args, **kwargs)
+
+ def listWorkerPools(self, *args, **kwargs):
+ """
+ List All Worker Pools
+
+ Get the list of all the existing worker pools.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listWorkerPools"], *args, **kwargs)
+
+ def reportWorkerError(self, *args, **kwargs):
+ """
+ Report an error from a worker
+
+ Report an error that occurred on a worker. This error will be included
+ with the other errors in `listWorkerPoolErrors(workerPoolId)`.
+
+ Workers can use this endpoint to report startup or configuration errors
+ that might be associated with the worker pool configuration and thus of
+ interest to a worker-pool administrator.
+
+ NOTE: errors are publicly visible. Ensure that none of the content
+ contains secrets or other sensitive information.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["reportWorkerError"], *args, **kwargs)
+
+ def listWorkerPoolErrors(self, *args, **kwargs):
+ """
+ List Worker Pool Errors
+
+ Get the list of worker pool errors.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listWorkerPoolErrors"], *args, **kwargs)
+
+ def listWorkersForWorkerGroup(self, *args, **kwargs):
+ """
+ Workers in a specific Worker Group in a Worker Pool
+
+ Get the list of all the existing workers in a given group in a given worker pool.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listWorkersForWorkerGroup"], *args, **kwargs)
+
+ def worker(self, *args, **kwargs):
+ """
+ Get a Worker
+
+ Get a single worker.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["worker"], *args, **kwargs)
+
+ def createWorker(self, *args, **kwargs):
+ """
+ Create a Worker
+
+ Create a new worker. This is only useful for worker pools where the provider
+ does not create workers automatically, such as those with a `static` provider
+ type. Providers that do not support creating workers will return a 400 error.
+ See the documentation for the individual providers, and in particular the
+ [static provider](https://docs.taskcluster.net/docs/reference/core/worker-manager/)
+ for more information.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["createWorker"], *args, **kwargs)
+
+ def updateWorker(self, *args, **kwargs):
+ """
+ Update an existing Worker
+
+ Update an existing worker in-place. Like `createWorker`, this is only useful for
+ worker pools where the provider does not create workers automatically.
+ This method allows updating all fields in the schema unless otherwise indicated
+ in the provider documentation.
+ See the documentation for the individual providers, and in particular the
+ [static provider](https://docs.taskcluster.net/docs/reference/core/worker-manager/)
+ for more information.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["updateWorker"], *args, **kwargs)
+
+ def removeWorker(self, *args, **kwargs):
+ """
+ Remove a Worker
+
+ Remove an existing worker. The precise behavior of this method depends
+ on the provider implementing the given worker. Some providers
+ do not support removing workers at all, and will return a 400 error.
+ Others may begin removing the worker, but it may remain available via
+ the API (perhaps even in state RUNNING) afterward.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["removeWorker"], *args, **kwargs)
+
+ def listWorkersForWorkerPool(self, *args, **kwargs):
+ """
+ Workers in a Worker Pool
+
+ Get the list of all the existing workers in a given worker pool.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["listWorkersForWorkerPool"], *args, **kwargs)
+
+ def registerWorker(self, *args, **kwargs):
+ """
+ Register a running worker
+
+ Register a running worker. Workers call this method on worker start-up.
+
+ This call both marks the worker as running and returns the credentials
+ the worker will require to perform its work. The worker must provide
+ some proof of its identity, and that proof varies by provider type.
+
+ This method is ``stable``
+ """
+
+ return self._makeApiCall(self.funcinfo["registerWorker"], *args, **kwargs)
+
+ def reregisterWorker(self, *args, **kwargs):
+ """
+ Reregister a Worker
+
+ Reregister a running worker.
+
+ This will generate and return new Taskcluster credentials for the worker
+ on that instance to use. The credentials will not live longer the
+ `registrationTimeout` for that worker. The endpoint will update `terminateAfter`
+ for the worker so that worker-manager does not terminate the instance.
+
+ This method is ``experimental``
+ """
+
+ return self._makeApiCall(self.funcinfo["reregisterWorker"], *args, **kwargs)
+
+ funcinfo = {
+ "createWorker": {
+ 'args': ['workerPoolId', 'workerGroup', 'workerId'],
+ 'input': 'v1/create-worker-request.json#',
+ 'method': 'put',
+ 'name': 'createWorker',
+ 'output': 'v1/worker-full.json#',
+ 'route': '/workers/<workerPoolId>:/<workerGroup>/<workerId>',
+ 'stability': 'stable',
+ },
+ "createWorkerPool": {
+ 'args': ['workerPoolId'],
+ 'input': 'v1/create-worker-pool-request.json#',
+ 'method': 'put',
+ 'name': 'createWorkerPool',
+ 'output': 'v1/worker-pool-full.json#',
+ 'route': '/worker-pool/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "deleteWorkerPool": {
+ 'args': ['workerPoolId'],
+ 'method': 'delete',
+ 'name': 'deleteWorkerPool',
+ 'output': 'v1/worker-pool-full.json#',
+ 'route': '/worker-pool/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "listProviders": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listProviders',
+ 'output': 'v1/provider-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/providers',
+ 'stability': 'stable',
+ },
+ "listWorkerPoolErrors": {
+ 'args': ['workerPoolId'],
+ 'method': 'get',
+ 'name': 'listWorkerPoolErrors',
+ 'output': 'v1/worker-pool-error-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/worker-pool-errors/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "listWorkerPools": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'listWorkerPools',
+ 'output': 'v1/worker-pool-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/worker-pools',
+ 'stability': 'stable',
+ },
+ "listWorkersForWorkerGroup": {
+ 'args': ['workerPoolId', 'workerGroup'],
+ 'method': 'get',
+ 'name': 'listWorkersForWorkerGroup',
+ 'output': 'v1/worker-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/workers/<workerPoolId>:/<workerGroup>',
+ 'stability': 'stable',
+ },
+ "listWorkersForWorkerPool": {
+ 'args': ['workerPoolId'],
+ 'method': 'get',
+ 'name': 'listWorkersForWorkerPool',
+ 'output': 'v1/worker-list.json#',
+ 'query': ['continuationToken', 'limit'],
+ 'route': '/workers/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "ping": {
+ 'args': [],
+ 'method': 'get',
+ 'name': 'ping',
+ 'route': '/ping',
+ 'stability': 'stable',
+ },
+ "registerWorker": {
+ 'args': [],
+ 'input': 'v1/register-worker-request.json#',
+ 'method': 'post',
+ 'name': 'registerWorker',
+ 'output': 'v1/register-worker-response.json#',
+ 'route': '/worker/register',
+ 'stability': 'stable',
+ },
+ "removeWorker": {
+ 'args': ['workerPoolId', 'workerGroup', 'workerId'],
+ 'method': 'delete',
+ 'name': 'removeWorker',
+ 'route': '/workers/<workerPoolId>/<workerGroup>/<workerId>',
+ 'stability': 'stable',
+ },
+ "reportWorkerError": {
+ 'args': ['workerPoolId'],
+ 'input': 'v1/report-worker-error-request.json#',
+ 'method': 'post',
+ 'name': 'reportWorkerError',
+ 'output': 'v1/worker-pool-error.json#',
+ 'route': '/worker-pool-errors/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ "reregisterWorker": {
+ 'args': [],
+ 'input': 'v1/reregister-worker-request.json#',
+ 'method': 'post',
+ 'name': 'reregisterWorker',
+ 'output': 'v1/reregister-worker-response.json#',
+ 'route': '/worker/reregister',
+ 'stability': 'experimental',
+ },
+ "updateWorker": {
+ 'args': ['workerPoolId', 'workerGroup', 'workerId'],
+ 'input': 'v1/create-worker-request.json#',
+ 'method': 'post',
+ 'name': 'updateWorker',
+ 'output': 'v1/worker-full.json#',
+ 'route': '/workers/<workerPoolId>:/<workerGroup>/<workerId>',
+ 'stability': 'stable',
+ },
+ "updateWorkerPool": {
+ 'args': ['workerPoolId'],
+ 'input': 'v1/update-worker-pool-request.json#',
+ 'method': 'post',
+ 'name': 'updateWorkerPool',
+ 'output': 'v1/worker-pool-full.json#',
+ 'route': '/worker-pool/<workerPoolId>',
+ 'stability': 'experimental',
+ },
+ "worker": {
+ 'args': ['workerPoolId', 'workerGroup', 'workerId'],
+ 'method': 'get',
+ 'name': 'worker',
+ 'output': 'v1/worker-full.json#',
+ 'route': '/workers/<workerPoolId>:/<workerGroup>/<workerId>',
+ 'stability': 'stable',
+ },
+ "workerPool": {
+ 'args': ['workerPoolId'],
+ 'method': 'get',
+ 'name': 'workerPool',
+ 'output': 'v1/worker-pool-full.json#',
+ 'route': '/worker-pool/<workerPoolId>',
+ 'stability': 'stable',
+ },
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'WorkerManager']
diff --git a/third_party/python/taskcluster/taskcluster/generated/workermanagerevents.py b/third_party/python/taskcluster/taskcluster/generated/workermanagerevents.py
new file mode 100644
index 0000000000..b9a7ce2062
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/generated/workermanagerevents.py
@@ -0,0 +1,91 @@
+# coding=utf-8
+#####################################################
+# THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT #
+#####################################################
+# noqa: E128,E201
+from ..client import BaseClient
+from ..client import createApiClient
+from ..client import config
+from ..client import createTemporaryCredentials
+from ..client import createSession
+_defaultConfig = config
+
+
+class WorkerManagerEvents(BaseClient):
+ """
+ These exchanges provide notifications when a worker pool is created or updated.This is so that the provisioner running in a differentprocess at the other end can synchronize to the changes. But you are ofcourse welcome to use these for other purposes, monitoring changes for example.
+ """
+
+ classOptions = {
+ "exchangePrefix": "exchange/taskcluster-worker-manager/v1/",
+ }
+ serviceName = 'worker-manager'
+ apiVersion = 'v1'
+
+ def workerPoolCreated(self, *args, **kwargs):
+ """
+ Worker Pool Created Messages
+
+ Whenever the api receives a request to create aworker pool, a message is posted to this exchange anda provider can act upon it.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'worker-pool-created',
+ 'name': 'workerPoolCreated',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/pulse-worker-pool-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ def workerPoolUpdated(self, *args, **kwargs):
+ """
+ Worker Pool Updated Messages
+
+ Whenever the api receives a request to update aworker pool, a message is posted to this exchange anda provider can act upon it.
+
+ This exchange takes the following keys:
+
+ * routingKeyKind: Identifier for the routing-key kind. This is always `'primary'` for the formalized routing key. (required)
+
+ * reserved: Space reserved for future routing-key entries, you should always match this entry with `#`. As automatically done by our tooling, if not specified.
+ """
+
+ ref = {
+ 'exchange': 'worker-pool-updated',
+ 'name': 'workerPoolUpdated',
+ 'routingKey': [
+ {
+ 'constant': 'primary',
+ 'multipleWords': False,
+ 'name': 'routingKeyKind',
+ },
+ {
+ 'multipleWords': True,
+ 'name': 'reserved',
+ },
+ ],
+ 'schema': 'v1/pulse-worker-pool-message.json#',
+ }
+ return self._makeTopicExchange(ref, *args, **kwargs)
+
+ funcinfo = {
+ }
+
+
+__all__ = ['createTemporaryCredentials', 'config', '_defaultConfig', 'createApiClient', 'createSession', 'WorkerManagerEvents']
diff --git a/third_party/python/taskcluster/taskcluster/github.py b/third_party/python/taskcluster/taskcluster/github.py
new file mode 100644
index 0000000000..2d47274ee4
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/github.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.github import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/githubevents.py b/third_party/python/taskcluster/taskcluster/githubevents.py
new file mode 100644
index 0000000000..e53249bd3b
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/githubevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.githubevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/helper.py b/third_party/python/taskcluster/taskcluster/helper.py
new file mode 100644
index 0000000000..7fec5d5acc
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/helper.py
@@ -0,0 +1,185 @@
+# -*- coding: utf-8 -*-
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import os
+
+import datetime
+import logging
+import requests
+from taskcluster.generated import _client_importer
+from taskcluster.generated.aio import _client_importer as _async_client_importer
+from taskcluster.utils import stringDate
+import urllib.parse
+
+logger = logging.getLogger(__name__)
+
+
+class TaskclusterConfig(object):
+ """
+ Local configuration used to access Taskcluster service and objects
+ """
+
+ def __init__(self, url=None):
+ self.options = None
+ self.secrets = None
+ self.default_url = url if url is not None else os.environ.get("TASKCLUSTER_ROOT_URL")
+ assert self.default_url is not None, "You must specify a Taskcluster deployment url"
+
+ def auth(self, client_id=None, access_token=None, max_retries=12):
+ """
+ Build Taskcluster credentials options
+ Supports, by order of preference:
+ * directly provided credentials
+ * credentials from environment variables
+ * taskclusterProxy
+ * no authentication
+ """
+ self.options = {"maxRetries": max_retries}
+
+ if client_id is None and access_token is None:
+ # Credentials preference: Use env. variables
+ client_id = os.environ.get("TASKCLUSTER_CLIENT_ID")
+ access_token = os.environ.get("TASKCLUSTER_ACCESS_TOKEN")
+ logger.info("Using taskcluster credentials from environment")
+ else:
+ logger.info("Using taskcluster credentials from cli")
+
+ if client_id is not None and access_token is not None:
+ # Use provided credentials
+ self.options["credentials"] = {
+ "clientId": client_id,
+ "accessToken": access_token,
+ }
+ self.options["rootUrl"] = self.default_url
+
+ elif "TASK_ID" in os.environ:
+ # Use Taskcluster Proxy when running in a task
+ logger.info("Taskcluster Proxy enabled")
+ self.options["rootUrl"] = os.environ.get("TASKCLUSTER_PROXY_URL", "http://taskcluster")
+
+ else:
+ logger.info("No Taskcluster authentication.")
+ self.options["rootUrl"] = self.default_url
+
+ def get_service(self, service_name, use_async=False):
+ """
+ Build a Taskcluster service instance using current authentication
+ """
+ if self.options is None:
+ self.auth()
+
+ client_importer = _async_client_importer if use_async else _client_importer
+ service = getattr(client_importer, service_name.capitalize(), None)
+ assert service is not None, "Invalid Taskcluster service {}".format(
+ service_name
+ )
+ return service(self.options)
+
+ def load_secrets(
+ self, secret_name, prefixes=[], required=[], existing={}, local_secrets=None
+ ):
+ """Shortcut to use load_secrets helper with current authentication"""
+ self.secrets = load_secrets(
+ self.get_service('secrets'),
+ secret_name,
+ prefixes,
+ required,
+ existing,
+ local_secrets,
+ )
+ return self.secrets
+
+ def upload_artifact(self, artifact_path, content, content_type, ttl):
+ """Shortcut to use upload_artifact helper with current authentication"""
+ path = upload_artifact(
+ self.get_service('queue'),
+ artifact_path,
+ content,
+ content_type,
+ ttl,
+ )
+
+ return urllib.parse.urljoin(self.default_url, path)
+
+
+def load_secrets(
+ secrets_service, secret_name, prefixes=[], required=[], existing={}, local_secrets=None
+):
+ """
+ Fetch a specific set of secrets by name and verify that the required
+ secrets exist.
+ Also supports providing local secrets to avoid using remote Taskcluster service
+ for local development (or contributor onboarding)
+ A user can specify prefixes to limit the part of secrets used (useful when a secret
+ is shared amongst several services)
+ """
+ secrets = {}
+ if existing:
+ secrets.update(existing)
+
+ if isinstance(local_secrets, dict):
+ # Use local secrets file to avoid using Taskcluster secrets
+ logger.info("Using provided local secrets")
+ all_secrets = local_secrets
+ else:
+ # Use Taskcluster secret service
+ assert secret_name is not None, "Missing Taskcluster secret secret_name"
+ all_secrets = secrets_service.get(secret_name).get("secret", dict())
+ logger.info("Loaded Taskcluster secret {}".format(secret_name))
+
+ if prefixes:
+ # Use secrets behind supported prefixes
+ for prefix in prefixes:
+ secrets.update(all_secrets.get(prefix, dict()))
+
+ else:
+ # Use all secrets available
+ secrets.update(all_secrets)
+
+ # Check required secrets
+ for required_secret in required:
+ if required_secret not in secrets:
+ raise Exception("Missing value {} in secrets.".format(required_secret))
+
+ return secrets
+
+
+def upload_artifact(queue_service, artifact_path, content, content_type, ttl):
+ """
+ DEPRECATED. Do not use.
+ """
+ task_id = os.environ.get("TASK_ID")
+ run_id = os.environ.get("RUN_ID")
+ proxy = os.environ.get("TASKCLUSTER_PROXY_URL")
+ assert task_id and run_id and proxy, "Can only run in Taskcluster tasks with proxy"
+ assert isinstance(content, str)
+ assert isinstance(ttl, datetime.timedelta)
+
+ # Create S3 artifact on Taskcluster
+ resp = queue_service.createArtifact(
+ task_id,
+ run_id,
+ artifact_path,
+ {
+ "storageType": "s3",
+ "expires": stringDate(datetime.datetime.utcnow() + ttl),
+ "contentType": content_type,
+ },
+ )
+ assert resp["storageType"] == "s3", "Not an s3 storage"
+ assert "putUrl" in resp, "Missing putUrl"
+ assert "contentType" in resp, "Missing contentType"
+
+ # Push the artifact on storage service
+ headers = {"Content-Type": resp["contentType"]}
+ push = requests.put(url=resp["putUrl"], headers=headers, data=content)
+ push.raise_for_status()
+
+ # Build the absolute url
+ return "/api/queue/v1/task/{task_id}/runs/{run_id}/artifacts/{path}".format(
+ task_id=task_id,
+ run_id=run_id,
+ path=artifact_path,
+ )
diff --git a/third_party/python/taskcluster/taskcluster/hooks.py b/third_party/python/taskcluster/taskcluster/hooks.py
new file mode 100644
index 0000000000..178fae04f1
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/hooks.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.hooks import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/hooksevents.py b/third_party/python/taskcluster/taskcluster/hooksevents.py
new file mode 100644
index 0000000000..93ede272c0
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/hooksevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.hooksevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/index.py b/third_party/python/taskcluster/taskcluster/index.py
new file mode 100644
index 0000000000..21238fd6f9
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/index.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.index import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/login.py b/third_party/python/taskcluster/taskcluster/login.py
new file mode 100644
index 0000000000..4741c2397d
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/login.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.login import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/notify.py b/third_party/python/taskcluster/taskcluster/notify.py
new file mode 100644
index 0000000000..4edf44541b
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/notify.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.notify import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/notifyevents.py b/third_party/python/taskcluster/taskcluster/notifyevents.py
new file mode 100644
index 0000000000..5a329a8290
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/notifyevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.notifyevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/purgecache.py b/third_party/python/taskcluster/taskcluster/purgecache.py
new file mode 100644
index 0000000000..a4dfac897a
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/purgecache.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.purgecache import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/queue.py b/third_party/python/taskcluster/taskcluster/queue.py
new file mode 100644
index 0000000000..782195cefa
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/queue.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.queue import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/queueevents.py b/third_party/python/taskcluster/taskcluster/queueevents.py
new file mode 100644
index 0000000000..aa32aa35ca
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/queueevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.queueevents import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/retry.py b/third_party/python/taskcluster/taskcluster/retry.py
new file mode 100644
index 0000000000..59cf581e48
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/retry.py
@@ -0,0 +1,41 @@
+import logging
+import time
+
+from . import utils
+
+log = logging.getLogger(__name__)
+
+
+def retry(maxRetries, tryFn):
+ """
+ Retry `tryFn` based on `maxRetries`. Each call to `tryFn` will pass a
+ callable which should be called with the exception object when an exception
+ can be retried. Exceptions raised from `tryFn` are treated as fatal.
+ """
+
+ retry = -1 # we plus first in the loop, and attempt 1 is retry 0
+ while True:
+ retry += 1
+
+ # if this isn't the first retry then we sleep
+ if retry > 0:
+ snooze = float(retry * retry) / 10.0
+ log.info('Sleeping %0.2f seconds for exponential backoff', snooze)
+ time.sleep(utils.calculateSleepTime(retry))
+
+ retriableException = None
+
+ def retryFor(exc):
+ nonlocal retriableException
+ retriableException = exc
+
+ res = tryFn(retryFor)
+
+ if not retriableException:
+ return res
+
+ if retry < maxRetries:
+ log.warning(f'Retrying because of: {retriableException}')
+ continue
+
+ raise retriableException
diff --git a/third_party/python/taskcluster/taskcluster/secrets.py b/third_party/python/taskcluster/taskcluster/secrets.py
new file mode 100644
index 0000000000..3177e08e46
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/secrets.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.secrets import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/upload.py b/third_party/python/taskcluster/taskcluster/upload.py
new file mode 100644
index 0000000000..ed47ba8dcf
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/upload.py
@@ -0,0 +1,65 @@
+"""
+Support for uploading objects to the object service, following best
+practices for that service.
+
+Data for upload is read from a "reader" provided by a "reader factory". A
+reader has a `read(max_size)` method which reads and returns a chunk of 1 ..
+`max_size` bytes, or returns an empty string at EOF. A reader factory is a
+callable which returns a fresh reader, ready to read the first byte of the
+object. When uploads are retried, the reader factory may be called more than
+once.
+
+This module provides several pre-defined readers and reader factories for
+common cases.
+"""
+import functools
+import six
+
+if six.PY2:
+ raise ImportError("upload is only supported in Python 3")
+
+from .aio import upload as aio_upload
+from .aio.asyncutils import ensureCoro, runAsync
+
+
+DATA_INLINE_MAX_SIZE = 8192
+
+
+def uploadFromBuf(*, data, **kwargs):
+ """
+ Convenience method to upload data from an in-memory buffer. Arguments are the same
+ as `upload` except that `readerFactory` should not be supplied.
+ """
+ return runAsync(aio_upload.uploadFromBuf(data=data, **kwargs))
+
+
+def uploadFromFile(*, file, **kwargs):
+ """
+ Convenience method to upload data from a a file. The file should be open
+ for reading, in binary mode, and be seekable (`f.seek`). Remaining
+ arguments are the same as `upload` except that `readerFactory` should not
+ be supplied.
+ """
+ return runAsync(aio_upload.uploadFromFile(file=file, **kwargs))
+
+
+def upload(*, readerFactory, **kwargs):
+ """
+ Upload the given data to the object service with the given metadata.
+ The `maxRetries` parameter has the same meaning as for service clients.
+ The `objectService` parameter is an instance of the Object class,
+ configured with credentials for the upload.
+ """
+ wrappedReaderFactory = _wrapSyncReaderFactory(readerFactory)
+ return runAsync(aio_upload.upload(readerFactory=wrappedReaderFactory, **kwargs))
+
+
+def _wrapSyncReaderFactory(readerFactory):
+ """Modify the reader returned by readerFactory to have an async read."""
+ @functools.wraps(readerFactory)
+ async def wrappedFactory():
+ reader = readerFactory()
+ reader.read = ensureCoro(reader.read)
+ return reader
+
+ return wrappedFactory
diff --git a/third_party/python/taskcluster/taskcluster/utils.py b/third_party/python/taskcluster/taskcluster/utils.py
new file mode 100644
index 0000000000..9e005a36f3
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/utils.py
@@ -0,0 +1,354 @@
+# -*- coding: UTF-8 -*-
+from __future__ import absolute_import, division, print_function
+import re
+import json
+import datetime
+import base64
+import logging
+import os
+import requests
+import requests.exceptions
+import slugid
+import time
+import six
+import random
+
+import taskcluster_urls as liburls
+
+from . import exceptions
+
+MAX_RETRIES = 5
+
+DELAY_FACTOR = 0.1
+RANDOMIZATION_FACTOR = 0.25
+MAX_DELAY = 30
+
+
+log = logging.getLogger(__name__)
+
+# Regular expression matching: X days Y hours Z minutes
+# todo: support hr, wk, yr
+r = re.compile(''.join([
+ r'^(\s*(?P<years>\d+)\s*y(ears?)?)?',
+ r'(\s*(?P<months>\d+)\s*mo(nths?)?)?',
+ r'(\s*(?P<weeks>\d+)\s*w(eeks?)?)?',
+ r'(\s*(?P<days>\d+)\s*d(ays?)?)?',
+ r'(\s*(?P<hours>\d+)\s*h(ours?)?)?',
+ r'(\s*(?P<minutes>\d+)\s*m(in(utes?)?)?)?\s*',
+ r'(\s*(?P<seconds>\d+)\s*s(ec(onds?)?)?)?\s*$',
+]))
+
+
+def calculateSleepTime(attempt):
+ """ From the go client
+ https://github.com/taskcluster/go-got/blob/031f55c/backoff.go#L24-L29
+ """
+ if attempt <= 0:
+ return 0
+
+ # We subtract one to get exponents: 1, 2, 3, 4, 5, ..
+ delay = float(2 ** (attempt - 1)) * float(DELAY_FACTOR)
+ # Apply randomization factor
+ delay = delay * (RANDOMIZATION_FACTOR * (random.random() * 2 - 1) + 1)
+ # Always limit with a maximum delay
+ return min(delay, MAX_DELAY)
+
+
+def toStr(obj, encoding='utf-8'):
+ if six.PY3 and isinstance(obj, six.binary_type):
+ obj = obj.decode(encoding)
+ else:
+ obj = str(obj)
+ return obj
+
+
+def fromNow(offset, dateObj=None):
+ """
+ Generate a `datetime.datetime` instance which is offset using a string.
+ See the README.md for a full example, but offset could be '1 day' for
+ a datetime object one day in the future
+ """
+
+ # We want to handle past dates as well as future
+ future = True
+ offset = offset.lstrip()
+ if offset.startswith('-'):
+ future = False
+ offset = offset[1:].lstrip()
+ if offset.startswith('+'):
+ offset = offset[1:].lstrip()
+
+ # Parse offset
+ m = r.match(offset)
+ if m is None:
+ raise ValueError("offset string: '%s' does not parse" % offset)
+
+ # In order to calculate years and months we need to calculate how many days
+ # to offset the offset by, since timedelta only goes as high as weeks
+ days = 0
+ hours = 0
+ minutes = 0
+ seconds = 0
+ if m.group('years'):
+ years = int(m.group('years'))
+ days += 365 * years
+ if m.group('months'):
+ months = int(m.group('months'))
+ days += 30 * months
+ days += int(m.group('days') or 0)
+ hours += int(m.group('hours') or 0)
+ minutes += int(m.group('minutes') or 0)
+ seconds += int(m.group('seconds') or 0)
+
+ # Offset datetime from utc
+ delta = datetime.timedelta(
+ weeks=int(m.group('weeks') or 0),
+ days=days,
+ hours=hours,
+ minutes=minutes,
+ seconds=seconds,
+ )
+
+ if not dateObj:
+ dateObj = datetime.datetime.utcnow()
+
+ return dateObj + delta if future else dateObj - delta
+
+
+def fromNowJSON(offset):
+ """
+ Like fromNow() but returns in a taskcluster-json compatible way
+ """
+ return stringDate(fromNow(offset))
+
+
+def dumpJson(obj, **kwargs):
+ """ Match JS's JSON.stringify. When using the default seperators,
+ base64 encoding JSON results in \n sequences in the output. Hawk
+ barfs in your face if you have that in the text"""
+ def handleDateAndBinaryForJs(x):
+ if six.PY3 and isinstance(x, six.binary_type):
+ x = x.decode()
+ if isinstance(x, datetime.datetime) or isinstance(x, datetime.date):
+ return stringDate(x)
+ else:
+ return x
+ d = json.dumps(obj, separators=(',', ':'), default=handleDateAndBinaryForJs, **kwargs)
+ assert '\n' not in d
+ return d
+
+
+def stringDate(date):
+ # Convert to isoFormat
+ string = date.isoformat()
+
+ # If there is no timezone and no Z added, we'll add one at the end.
+ # This is just to be fully compliant with:
+ # https://tools.ietf.org/html/rfc3339#section-5.6
+ if string.endswith('+00:00'):
+ return string[:-6] + 'Z'
+ if date.utcoffset() is None and string[-1] != 'Z':
+ return string + 'Z'
+ return string
+
+
+def makeB64UrlSafe(b64str):
+ """ Make a base64 string URL Safe """
+ if isinstance(b64str, six.text_type):
+ b64str = b64str.encode()
+ # see RFC 4648, sec. 5
+ return b64str.replace(b'+', b'-').replace(b'/', b'_')
+
+
+def makeB64UrlUnsafe(b64str):
+ """ Make a base64 string URL Unsafe """
+ if isinstance(b64str, six.text_type):
+ b64str = b64str.encode()
+ # see RFC 4648, sec. 5
+ return b64str.replace(b'-', b'+').replace(b'_', b'/')
+
+
+def encodeStringForB64Header(s):
+ """ HTTP Headers can't have new lines in them, let's """
+ if isinstance(s, six.text_type):
+ s = s.encode()
+ if six.PY3:
+ b64str = base64.encodebytes(s)
+ else:
+ b64str = base64.encodestring(s)
+ return b64str.strip().replace(b'\n', b'')
+
+
+def slugId():
+ """ Generate a taskcluster slugid. This is a V4 UUID encoded into
+ URL-Safe Base64 (RFC 4648, sec 5) with '=' padding removed """
+ return slugid.nice()
+
+
+def stableSlugId():
+ """Returns a closure which can be used to generate stable slugIds.
+ Stable slugIds can be used in a graph to specify task IDs in multiple
+ places without regenerating them, e.g. taskId, requires, etc.
+ """
+ _cache = {}
+
+ def closure(name):
+ if name not in _cache:
+ _cache[name] = slugId()
+ return _cache[name]
+
+ return closure
+
+
+def scopeMatch(assumedScopes, requiredScopeSets):
+ """
+ Take a list of a assumed scopes, and a list of required scope sets on
+ disjunctive normal form, and check if any of the required scope sets are
+ satisfied.
+
+ Example:
+
+ requiredScopeSets = [
+ ["scopeA", "scopeB"],
+ ["scopeC"]
+ ]
+
+ In this case assumed_scopes must contain, either:
+ "scopeA" AND "scopeB", OR just "scopeC".
+ """
+ for scopeSet in requiredScopeSets:
+ for requiredScope in scopeSet:
+ for scope in assumedScopes:
+ if scope == requiredScope:
+ # requiredScope satisifed, no need to check more scopes
+ break
+ if scope.endswith("*") and requiredScope.startswith(scope[:-1]):
+ # requiredScope satisifed, no need to check more scopes
+ break
+ else:
+ # requiredScope not satisfied, stop checking scopeSet
+ break
+ else:
+ # scopeSet satisfied, so we're happy
+ return True
+ # none of the requiredScopeSets were satisfied
+ return False
+
+
+def scope_match(assumed_scopes, required_scope_sets):
+ """ This is a deprecated form of def scopeMatch(assumedScopes, requiredScopeSets).
+ That form should be used.
+ """
+ import warnings
+ warnings.warn('NOTE: scope_match is deprecated. Use scopeMatch')
+ return scopeMatch(assumed_scopes, required_scope_sets)
+
+
+def makeHttpRequest(method, url, payload, headers, retries=MAX_RETRIES, session=None):
+ """ Make an HTTP request and retry it until success, return request """
+ retry = -1
+ response = None
+ while retry < retries:
+ retry += 1
+ # if this isn't the first retry then we sleep
+ if retry > 0:
+ snooze = float(retry * retry) / 10.0
+ log.info('Sleeping %0.2f seconds for exponential backoff', snooze)
+ time.sleep(snooze)
+
+ # Seek payload to start, if it is a file
+ if hasattr(payload, 'seek'):
+ payload.seek(0)
+
+ log.debug('Making attempt %d', retry)
+ try:
+ response = makeSingleHttpRequest(method, url, payload, headers, session)
+ except requests.exceptions.RequestException as rerr:
+ if retry < retries:
+ log.warn('Retrying because of: %s' % rerr)
+ continue
+ # raise a connection exception
+ raise rerr
+ # Handle non 2xx status code and retry if possible
+ try:
+ response.raise_for_status()
+ except requests.exceptions.RequestException:
+ pass
+ status = response.status_code
+ if 500 <= status and status < 600 and retry < retries:
+ if retry < retries:
+ log.warn('Retrying because of: %d status' % status)
+ continue
+ else:
+ raise exceptions.TaskclusterRestFailure("Unknown Server Error", superExc=None)
+ return response
+
+ # This code-path should be unreachable
+ assert False, "Error from last retry should have been raised!"
+
+
+def makeSingleHttpRequest(method, url, payload, headers, session=None):
+ method = method.upper()
+ log.debug('Making a %s request to %s', method, url)
+ log.debug('HTTP Headers: %s' % str(headers))
+ log.debug('HTTP Payload: %s (limit 100 char)' % str(payload)[:100])
+ obj = session if session else requests
+ response = obj.request(method.upper(), url, data=payload, headers=headers, allow_redirects=False)
+ log.debug('Received HTTP Status: %s' % response.status_code)
+ log.debug('Received HTTP Headers: %s' % str(response.headers))
+
+ return response
+
+
+def putFile(filename, url, contentType):
+ with open(filename, 'rb') as f:
+ contentLength = os.fstat(f.fileno()).st_size
+ return makeHttpRequest('put', url, f, headers={
+ 'Content-Length': str(contentLength),
+ 'Content-Type': contentType,
+ })
+
+
+def encryptEnvVar(taskId, startTime, endTime, name, value, keyFile):
+ raise Exception("Encrypted environment variables are no longer supported")
+
+
+def decryptMessage(message, privateKey):
+ raise Exception("Decryption is no longer supported")
+
+
+def isExpired(certificate):
+ """ Check if certificate is expired """
+ if isinstance(certificate, six.string_types):
+ certificate = json.loads(certificate)
+ expiry = certificate.get('expiry', 0)
+ return expiry < int(time.time() * 1000) + 20 * 60
+
+
+def optionsFromEnvironment(defaults=None):
+ """Fetch root URL and credentials from the standard TASKCLUSTER_…
+ environment variables and return them in a format suitable for passing to a
+ client constructor."""
+ options = defaults or {}
+ credentials = options.get('credentials', {})
+
+ rootUrl = os.environ.get('TASKCLUSTER_ROOT_URL')
+ if rootUrl:
+ options['rootUrl'] = liburls.normalize_root_url(rootUrl)
+
+ clientId = os.environ.get('TASKCLUSTER_CLIENT_ID')
+ if clientId:
+ credentials['clientId'] = clientId
+
+ accessToken = os.environ.get('TASKCLUSTER_ACCESS_TOKEN')
+ if accessToken:
+ credentials['accessToken'] = accessToken
+
+ certificate = os.environ.get('TASKCLUSTER_CERTIFICATE')
+ if certificate:
+ credentials['certificate'] = certificate
+
+ if credentials:
+ options['credentials'] = credentials
+
+ return options
diff --git a/third_party/python/taskcluster/taskcluster/workermanager.py b/third_party/python/taskcluster/taskcluster/workermanager.py
new file mode 100644
index 0000000000..57ee384b18
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/workermanager.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.workermanager import * # NOQA
diff --git a/third_party/python/taskcluster/taskcluster/workermanagerevents.py b/third_party/python/taskcluster/taskcluster/workermanagerevents.py
new file mode 100644
index 0000000000..e879f0d0a3
--- /dev/null
+++ b/third_party/python/taskcluster/taskcluster/workermanagerevents.py
@@ -0,0 +1,2 @@
+# stub to support existing import paths
+from .generated.workermanagerevents import * # NOQA
diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/LICENSE b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/LICENSE
new file mode 100644
index 0000000000..a612ad9813
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/METADATA b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/METADATA
new file mode 100644
index 0000000000..79aded7e6c
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/METADATA
@@ -0,0 +1,33 @@
+Metadata-Version: 2.1
+Name: taskcluster-taskgraph
+Version: 3.5.2
+Summary: Build taskcluster taskgraphs
+Home-page: https://github.com/taskcluster/taskgraph
+License: UNKNOWN
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Topic :: Software Development
+License-File: LICENSE
+Requires-Dist: appdirs (>=1.4)
+Requires-Dist: attrs (>=19.1.0)
+Requires-Dist: json-e (>=2.7)
+Requires-Dist: mozilla-repo-urls
+Requires-Dist: PyYAML (>=5.4)
+Requires-Dist: redo (>=2.0)
+Requires-Dist: requests (>=2.25)
+Requires-Dist: requests-unixsocket (>=0.2)
+Requires-Dist: slugid (>=2.0)
+Requires-Dist: taskcluster-urls (>=11.0)
+Requires-Dist: voluptuous (>=0.12.1)
+Provides-Extra: load-image
+Requires-Dist: zstandard ; extra == 'load-image'
+
+UNKNOWN
+
diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/RECORD b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/RECORD
new file mode 100644
index 0000000000..8af8cb00e6
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/RECORD
@@ -0,0 +1,74 @@
+taskgraph/__init__.py,sha256=jwOtU7TkmU317LP_IsgIswpj2T1OPUXXgMRv4sIU7nE,707
+taskgraph/config.py,sha256=MoFLjKPUViWYGALi_acWDVXZs7M8cy0zQpUKsJSlBMs,4411
+taskgraph/create.py,sha256=1z2AyLvHMkZfDkmPy6um86HG9xTRhE0Sphnbpd-kuEg,5190
+taskgraph/decision.py,sha256=ApfQeXumRH7uq55DLt7gjQCh_eKls6lPhnNaH2ZpR-0,12849
+taskgraph/docker.py,sha256=dB282jKjfLnHwL73YSg1Eeqj-ojHQc676vEpWt4PjVw,7835
+taskgraph/files_changed.py,sha256=W3_gEgUT-mVH9DaaU_8X6gYpftrqBU3kgveGbzPLziU,2793
+taskgraph/filter_tasks.py,sha256=R7tYXiaVPGIkQ6O1c9-QJrKZ59m9pFXCloUlPraVnZU,866
+taskgraph/generator.py,sha256=tonQ3UvaZYRdpWOtmdQ5Mr4en1FRCUJvbvlbzfChluM,15590
+taskgraph/graph.py,sha256=9tE3bSSBRHvRLgJzK4dTieGT3RrzQZdR1YbKizEhzlw,4667
+taskgraph/main.py,sha256=rb7cwghT5U97kSpIho0KzXo4HSXp2Iw_jaL2A2Qrf18,23581
+taskgraph/morph.py,sha256=8qxYdruEQkbHGqv7dh3e1OWhH9Y5i6bFUKzDMs-Ctnw,9625
+taskgraph/parameters.py,sha256=4JWaL_otzQaQjmXc7-HnjfhlHYSaltYRb_6xeUNbERY,11906
+taskgraph/target_tasks.py,sha256=41BIVwiATy8DCQujPduTtnFmgHlKOfw6RPGL4b20WO8,3324
+taskgraph/task.py,sha256=QCrOzMaTsy5QHShKUo89XgjJVMl3cSZGZJPLuHCXItE,3132
+taskgraph/taskgraph.py,sha256=tfj0ZMqjuwEQDET0W57EcP-_KBEbqkxJci9Z6DkeOEQ,2397
+taskgraph/actions/__init__.py,sha256=lVP1e0YyELg7-_42MWWDbT0cKv_p53BApVE6vWOiPww,416
+taskgraph/actions/add_new_jobs.py,sha256=mX_DFDJaQUHetjyMNi5b8zPCCeqfzDrCjDg5DxTaA-I,1831
+taskgraph/actions/cancel.py,sha256=UQSt_6y3S6PXNmUo_mNaUOuDvK2bixWjzdjTKXieEEg,1309
+taskgraph/actions/cancel_all.py,sha256=-ETWKl8BHkk5HjGZRIJpUsFOySE6co0pL0dBDupolu8,1947
+taskgraph/actions/registry.py,sha256=xmhoEGMyYj6TTRFwMowZAUp0aqvtLvdVfmRWM7Yh7xo,13122
+taskgraph/actions/retrigger.py,sha256=awSC8XRtPJxADz5tbEWTKdNEudG8SpwUOM7z2lXxH1U,9382
+taskgraph/actions/util.py,sha256=jA5xXehV8N2G542LZOEci_gMHEFN-BrIjkA55On0kc0,10673
+taskgraph/loader/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+taskgraph/loader/transform.py,sha256=olUBPjxk3eEIg25sduxlcyqhjoig4ts5kPlT_zs6g9g,2147
+taskgraph/optimize/__init__.py,sha256=Oqpq1RW8QzOcu7zaMlNQ3BHT9ws9e_93FWfCqzNcQps,123
+taskgraph/optimize/base.py,sha256=WvoDNewyHG46IQbG3th-aau9OxSKegsYNfvdOEmunbA,18341
+taskgraph/optimize/strategies.py,sha256=Y5fS-f_3xsQNfFjCXIwDxrwXBvyp4yZxdPVNh49c7XU,2381
+taskgraph/run-task/fetch-content,sha256=z3kx-vxaaaAmfqW-JW7dPKIFpjnxdZiXMdpPj1jAG8M,29915
+taskgraph/run-task/hgrc,sha256=BybWLDR89bWi3pE5T05UqmDHs02CbLypE-omLZWU6Uk,896
+taskgraph/run-task/robustcheckout.py,sha256=tZi_FRGFhX27fspaUj2RGsMCmkwn8IfpRiSsPOrGfXQ,29802
+taskgraph/run-task/run-task,sha256=zT83gWFaB0qBWdxCLxOVHiMdq1bmSmi90FjXjcegfpk,43584
+taskgraph/transforms/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+taskgraph/transforms/base.py,sha256=N9ec4kw65V_J2KY4C4QRPlbIREbRDYwTlhClstYmOBU,5285
+taskgraph/transforms/cached_tasks.py,sha256=Z10VD1kEBVXJvj8qSsNTq2mYpklh0V1EN8OT6QK3v_E,2607
+taskgraph/transforms/code_review.py,sha256=eE2xrDtdD_n3HT3caQ2HGAkPm6Uutdm4hDCpCoFjEps,707
+taskgraph/transforms/docker_image.py,sha256=ADiOUB-Ngm9Y6uwzGDpQsDJ_-4w6-ZYwLCxQ-0b16E0,7567
+taskgraph/transforms/fetch.py,sha256=Q7Co4wdBKL6Tr3Uc-eitJ3NGgGUYmRXNLuC5m-59-M8,10443
+taskgraph/transforms/release_notifications.py,sha256=jrb9CCT-z_etDf690T-AeCvdzIoVWBAeM_FGoW7FIzA,3305
+taskgraph/transforms/task.py,sha256=fBiSCyC0Lzd2GDSZ_QwhQ1RRebXLmkw4ZCPte9fwEL8,48212
+taskgraph/transforms/job/__init__.py,sha256=ayAytoDmlmNvJNArJc-_nBz1Xuc191rZdbobUgp9hQA,17192
+taskgraph/transforms/job/common.py,sha256=XtKSxUCwRYqpPgRTyLD_8JGRuJs2JYuR0RXpTarPdTE,6826
+taskgraph/transforms/job/index_search.py,sha256=Ngh9FFu1bx2kHVTChW2vcrbnb3SzMneRHopXk18RfB4,1220
+taskgraph/transforms/job/run_task.py,sha256=z5DqgHmmHYEbKtnpMQqcMY6ksgCnnoB7CugH3Z41Gag,8610
+taskgraph/transforms/job/toolchain.py,sha256=WWsj6L_db9rJxzo26TdEf_0jcrK4MCoHHJDzFBkSFpI,5978
+taskgraph/util/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+taskgraph/util/archive.py,sha256=nzYn8cQ3NfLAeV-2SuTNoeQ6hg8m40f6FQcSTyVIKwQ,2855
+taskgraph/util/attributes.py,sha256=zSaRws02rdF1TgvOoFzVNLg2XmwbtVVCTsp4M_qm3RI,2617
+taskgraph/util/cached_tasks.py,sha256=o-yJ91wlWbzoDB2GvKPpGcDE27_IEMgczp_figEBjV8,3406
+taskgraph/util/decision.py,sha256=uTC143FpTKQkGff5jIz3voWRYXBCHgx-XAm7FMW53hE,2433
+taskgraph/util/docker.py,sha256=vdTruZT2Z_GVcyAYilaHt8VaRj4b-dtBKVWlq_GwYvE,11699
+taskgraph/util/hash.py,sha256=71R979-mlDnwTXC5GXrOWTS5VpW4DFWWK9S8Urm_Uic,1560
+taskgraph/util/keyed_by.py,sha256=cgBH4tG8eH5UUrm5q4ODG7A4fzkGAOI7feVoZy3V8Ho,3419
+taskgraph/util/memoize.py,sha256=XDlwc-56gzoY8QTwOoiCOYL-igX7JoMcY-9Ih80Euc8,1331
+taskgraph/util/parameterization.py,sha256=dzxh8Bc8MBKoDMwj2V2AQab9UrC-JcM3tg0hDVTWpjc,3184
+taskgraph/util/path.py,sha256=GOWPdvC144PVy8rsLda8SPenofwSnBaD0L5aJdDNtao,4688
+taskgraph/util/python_path.py,sha256=ed4F5z2mId56LauVczgxm_LGxgQi8XlxlYDgXOPZyII,1576
+taskgraph/util/readonlydict.py,sha256=XzTG-gqGqWVlSkDxSyOL6Ur7Z0ONhIJ9DVLWV3q4q1w,787
+taskgraph/util/schema.py,sha256=JGd0Imjfv6JKCY_tjJtOYwI6uwKUaNgzAcvcZj5WE6A,8323
+taskgraph/util/shell.py,sha256=MB9zHVSvxgOuszgmKr2rWUDahANZkbHHNkjjagZG_3I,1317
+taskgraph/util/taskcluster.py,sha256=cGUGvkrefRHngjyZm_iQRYKRlGi4jMIr7ky0fi_YBrg,12445
+taskgraph/util/taskgraph.py,sha256=ecKEvTfmLVvEKLPO_0g34CqVvc0iCzuNMh3064BZNrE,1969
+taskgraph/util/templates.py,sha256=Dqxfl244u-PX7dnsk3_vYyzDwpDgJtANK6NmZwN3Qow,1417
+taskgraph/util/time.py,sha256=pNFcTH-iYRfm2-okm1lMATc4B5wO-_FXbOFXEtXD27g,3390
+taskgraph/util/treeherder.py,sha256=XrdE-Je0ZvXe6_8f0DvvqNbrHherUk-hUuxirImPEIo,2138
+taskgraph/util/vcs.py,sha256=i13idS8y9ooR216mnd1gksdjSgHBNlAZEdq7Xr-ROwE,18536
+taskgraph/util/verify.py,sha256=YETuZVkwnfYe57GRPx2x_vedstgqdGiH46HLWAdcks8,8827
+taskgraph/util/workertypes.py,sha256=5g2mgIbEKMzDpZNnmPMoMNyy7Wahi-jmWcV1amDAcPo,2341
+taskgraph/util/yaml.py,sha256=hfKI_D8Q7dimq4_VvO3WEh8CJsTrsIMwN6set7HIQbY,990
+taskcluster_taskgraph-3.5.2.dist-info/LICENSE,sha256=HyVuytGSiAUQ6ErWBHTqt1iSGHhLmlC8fO7jTCuR8dU,16725
+taskcluster_taskgraph-3.5.2.dist-info/METADATA,sha256=8vZXhtvvL0WcQK5Sp9vslS9bdJHFN0LWZG0YzEUZips,1126
+taskcluster_taskgraph-3.5.2.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92
+taskcluster_taskgraph-3.5.2.dist-info/entry_points.txt,sha256=VoXNtZpN4LvyXYB1wq47AU9CO-DMYMJ0VktKxjugzbY,51
+taskcluster_taskgraph-3.5.2.dist-info/top_level.txt,sha256=3JNeYn_hNiNXC7DrdH_vcv-WYSE7QdgGjdvUYvSjVp0,10
+taskcluster_taskgraph-3.5.2.dist-info/RECORD,,
diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/WHEEL b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/WHEEL
new file mode 100644
index 0000000000..becc9a66ea
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.1)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/entry_points.txt b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/entry_points.txt
new file mode 100644
index 0000000000..086555b5cc
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+taskgraph = taskgraph.main:main
+
diff --git a/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/top_level.txt b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/top_level.txt
new file mode 100644
index 0000000000..f3840b68ef
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskcluster_taskgraph-3.5.2.dist-info/top_level.txt
@@ -0,0 +1 @@
+taskgraph
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/__init__.py b/third_party/python/taskcluster_taskgraph/taskgraph/__init__.py
new file mode 100644
index 0000000000..9aef5a8b7e
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/__init__.py
@@ -0,0 +1,15 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+# Maximum number of dependencies a single task can have
+# https://docs.taskcluster.net/reference/platform/taskcluster-queue/references/api#createTask
+# specifies 100, but we also optionally add the decision task id as a dep in
+# taskgraph.create, so let's set this to 99.
+MAX_DEPENDENCIES = 99
+
+# Enable fast task generation for local debugging
+# This is normally switched on via the --fast/-F flag to `mach taskgraph`
+# Currently this skips toolchain task optimizations and schema validation
+fast = False
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/actions/__init__.py b/third_party/python/taskcluster_taskgraph/taskgraph/actions/__init__.py
new file mode 100644
index 0000000000..590a957282
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/actions/__init__.py
@@ -0,0 +1,16 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+from .registry import (
+ register_callback_action,
+ render_actions_json,
+ trigger_action_callback,
+)
+
+__all__ = [
+ "register_callback_action",
+ "render_actions_json",
+ "trigger_action_callback",
+]
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/actions/add_new_jobs.py b/third_party/python/taskcluster_taskgraph/taskgraph/actions/add_new_jobs.py
new file mode 100644
index 0000000000..fc10668566
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/actions/add_new_jobs.py
@@ -0,0 +1,64 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+from taskgraph.actions.registry import register_callback_action
+from taskgraph.actions.util import (
+ combine_task_graph_files,
+ create_tasks,
+ fetch_graph_and_labels,
+)
+
+
+@register_callback_action(
+ name="add-new-jobs",
+ title="Add new jobs",
+ generic=True,
+ symbol="add-new",
+ description="Add new jobs using task labels.",
+ order=100,
+ context=[],
+ schema={
+ "type": "object",
+ "properties": {
+ "tasks": {
+ "type": "array",
+ "description": "An array of task labels",
+ "items": {"type": "string"},
+ },
+ "times": {
+ "type": "integer",
+ "default": 1,
+ "minimum": 1,
+ "maximum": 100,
+ "title": "Times",
+ "description": "How many times to run each task.",
+ },
+ },
+ },
+)
+def add_new_jobs_action(parameters, graph_config, input, task_group_id, task_id):
+ decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
+ parameters, graph_config
+ )
+
+ to_run = []
+ for elem in input["tasks"]:
+ if elem in full_task_graph.tasks:
+ to_run.append(elem)
+ else:
+ raise Exception(f"{elem} was not found in the task-graph")
+
+ times = input.get("times", 1)
+ for i in range(times):
+ create_tasks(
+ graph_config,
+ to_run,
+ full_task_graph,
+ label_to_taskid,
+ parameters,
+ decision_task_id,
+ i,
+ )
+ combine_task_graph_files(list(range(times)))
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/actions/cancel.py b/third_party/python/taskcluster_taskgraph/taskgraph/actions/cancel.py
new file mode 100644
index 0000000000..03788c6538
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/actions/cancel.py
@@ -0,0 +1,42 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import logging
+
+import requests
+
+from taskgraph.util.taskcluster import cancel_task
+
+from .registry import register_callback_action
+
+logger = logging.getLogger(__name__)
+
+
+@register_callback_action(
+ title="Cancel Task",
+ name="cancel",
+ symbol="cx",
+ generic=True,
+ description=("Cancel the given task"),
+ order=350,
+ context=[{}],
+)
+def cancel_action(parameters, graph_config, input, task_group_id, task_id):
+ # Note that this is limited by the scopes afforded to generic actions to
+ # only cancel tasks with the level-specific schedulerId.
+ try:
+ cancel_task(task_id, use_proxy=True)
+ except requests.HTTPError as e:
+ if e.response.status_code == 409:
+ # A 409 response indicates that this task is past its deadline. It
+ # cannot be cancelled at this time, but it's also not running
+ # anymore, so we can ignore this error.
+ logger.info(
+ 'Task "{}" is past its deadline and cannot be cancelled.'.format(
+ task_id
+ )
+ )
+ return
+ raise
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/actions/cancel_all.py b/third_party/python/taskcluster_taskgraph/taskgraph/actions/cancel_all.py
new file mode 100644
index 0000000000..b2636f46a3
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/actions/cancel_all.py
@@ -0,0 +1,61 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import concurrent.futures as futures
+import logging
+import os
+
+import requests
+
+from taskgraph.util.taskcluster import (
+ CONCURRENCY,
+ cancel_task,
+ list_task_group_incomplete_tasks,
+)
+
+from .registry import register_callback_action
+
+logger = logging.getLogger(__name__)
+
+
+@register_callback_action(
+ title="Cancel All",
+ name="cancel-all",
+ generic=True,
+ symbol="cAll",
+ description=(
+ "Cancel all running and pending tasks created by the decision task "
+ "this action task is associated with."
+ ),
+ order=400,
+ context=[],
+)
+def cancel_all_action(parameters, graph_config, input, task_group_id, task_id):
+ def do_cancel_task(task_id):
+ logger.info(f"Cancelling task {task_id}")
+ try:
+ cancel_task(task_id, use_proxy=True)
+ except requests.HTTPError as e:
+ if e.response.status_code == 409:
+ # A 409 response indicates that this task is past its deadline. It
+ # cannot be cancelled at this time, but it's also not running
+ # anymore, so we can ignore this error.
+ logger.info(
+ "Task {} is past its deadline and cannot be cancelled.".format(
+ task_id
+ )
+ )
+ return
+ raise
+
+ own_task_id = os.environ.get("TASK_ID", "")
+ to_cancel = [
+ t for t in list_task_group_incomplete_tasks(task_group_id) if t != own_task_id
+ ]
+ logger.info(f"Cancelling {len(to_cancel)} tasks")
+ with futures.ThreadPoolExecutor(CONCURRENCY) as e:
+ cancel_futs = [e.submit(do_cancel_task, t) for t in to_cancel]
+ for f in futures.as_completed(cancel_futs):
+ f.result()
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/actions/registry.py b/third_party/python/taskcluster_taskgraph/taskgraph/actions/registry.py
new file mode 100644
index 0000000000..1e909d30c7
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/actions/registry.py
@@ -0,0 +1,352 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import json
+from collections import namedtuple
+from types import FunctionType
+
+from mozilla_repo_urls import parse
+
+from taskgraph import create
+from taskgraph.config import load_graph_config
+from taskgraph.parameters import Parameters
+from taskgraph.util import hash, taskcluster, yaml
+from taskgraph.util.memoize import memoize
+from taskgraph.util.python_path import import_sibling_modules
+
+actions = []
+callbacks = {}
+
+Action = namedtuple("Action", ["order", "cb_name", "generic", "action_builder"])
+
+
+def is_json(data):
+ """Return ``True``, if ``data`` is a JSON serializable data structure."""
+ try:
+ json.dumps(data)
+ except ValueError:
+ return False
+ return True
+
+
+@memoize
+def read_taskcluster_yml(filename):
+ """Load and parse .taskcluster.yml, memoized to save some time"""
+ return yaml.load_yaml(filename)
+
+
+@memoize
+def hash_taskcluster_yml(filename):
+ """
+ Generate a hash of the given .taskcluster.yml. This is the first 10 digits
+ of the sha256 of the file's content, and is used by administrative scripts
+ to create a hook based on this content.
+ """
+ return hash.hash_path(filename)[:10]
+
+
+def register_callback_action(
+ name,
+ title,
+ symbol,
+ description,
+ order=10000,
+ context=[],
+ available=lambda parameters: True,
+ schema=None,
+ generic=True,
+ cb_name=None,
+):
+ """
+ Register an action callback that can be triggered from supporting
+ user interfaces, such as Treeherder.
+
+ This function is to be used as a decorator for a callback that takes
+ parameters as follows:
+
+ ``parameters``:
+ Decision task :class:`parameters <taskgraph.parameters.Parameters>`.
+ ``input``:
+ Input matching specified JSON schema, ``None`` if no ``schema``
+ parameter is given to ``register_callback_action``.
+ ``task_group_id``:
+ The id of the task-group this was triggered for.
+ ``task_id`` and `task``:
+ task identifier and task definition for task the action was triggered
+ for, ``None`` if no ``context`` parameters was given to
+ ``register_callback_action``.
+
+ Args:
+ name (str):
+ An identifier for this action, used by UIs to find the action.
+ title (str):
+ A human readable title for the action to be used as label on a button
+ or text on a link for triggering the action.
+ symbol (str):
+ Treeherder symbol for the action callback, this is the symbol that the
+ task calling your callback will be displayed as. This is usually 1-3
+ letters abbreviating the action title.
+ description (str):
+ A human readable description of the action in **markdown**.
+ This will be display as tooltip and in dialog window when the action
+ is triggered. This is a good place to describe how to use the action.
+ order (int):
+ Order of the action in menus, this is relative to the ``order`` of
+ other actions declared.
+ context (list of dict):
+ List of tag-sets specifying which tasks the action is can take as input.
+ If no tag-sets is specified as input the action is related to the
+ entire task-group, and won't be triggered with a given task.
+
+ Otherwise, if ``context = [{'k': 'b', 'p': 'l'}, {'k': 't'}]`` will only
+ be displayed in the context menu for tasks that has
+ ``task.tags.k == 'b' && task.tags.p = 'l'`` or ``task.tags.k = 't'``.
+ Essentially, this allows filtering on ``task.tags``.
+
+ If this is a function, it is given the decision parameters and must return
+ a value of the form described above.
+ available (function):
+ An optional function that given decision parameters decides if the
+ action is available. Defaults to a function that always returns ``True``.
+ schema (dict):
+ JSON schema specifying input accepted by the action.
+ This is optional and can be left ``null`` if no input is taken.
+ generic (bool)
+ Whether this is a generic action or has its own permissions.
+ cb_name (str):
+ The name under which this function should be registered, defaulting to
+ `name`. This is used to generation actionPerm for non-generic hook
+ actions, and thus appears in ci-configuration and various role and hook
+ names. Unlike `name`, which can appear multiple times, cb_name must be
+ unique among all registered callbacks.
+
+ Returns:
+ function: Decorator to be used for the callback function.
+ """
+ mem = {"registered": False} # workaround nonlocal missing in 2.x
+
+ assert isinstance(title, str), "title must be a string"
+ assert isinstance(description, str), "description must be a string"
+ title = title.strip()
+ description = description.strip()
+
+ # ensure that context is callable
+ if not callable(context):
+ context_value = context
+ context = lambda params: context_value # noqa
+
+ def register_callback(cb, cb_name=cb_name):
+ assert isinstance(name, str), "name must be a string"
+ assert isinstance(order, int), "order must be an integer"
+ assert callable(schema) or is_json(
+ schema
+ ), "schema must be a JSON compatible object"
+ assert isinstance(cb, FunctionType), "callback must be a function"
+ # Allow for json-e > 25 chars in the symbol.
+ if "$" not in symbol:
+ assert 1 <= len(symbol) <= 25, "symbol must be between 1 and 25 characters"
+ assert isinstance(symbol, str), "symbol must be a string"
+
+ assert not mem[
+ "registered"
+ ], "register_callback_action must be used as decorator"
+ if not cb_name:
+ cb_name = name
+ assert cb_name not in callbacks, "callback name {} is not unique".format(
+ cb_name
+ )
+
+ def action_builder(parameters, graph_config, decision_task_id):
+ if not available(parameters):
+ return None
+
+ actionPerm = "generic" if generic else cb_name
+
+ # gather up the common decision-task-supplied data for this action
+ repo_param = "head_repository"
+ repository = {
+ "url": parameters[repo_param],
+ "project": parameters["project"],
+ "level": parameters["level"],
+ }
+
+ revision = parameters["head_rev"]
+ push = {
+ "owner": "mozilla-taskcluster-maintenance@mozilla.com",
+ "pushlog_id": parameters["pushlog_id"],
+ "revision": revision,
+ }
+ branch = parameters.get("head_ref")
+ if branch:
+ push["branch"] = branch
+
+ action = {
+ "name": name,
+ "title": title,
+ "description": description,
+ # target taskGroupId (the task group this decision task is creating)
+ "taskGroupId": decision_task_id,
+ "cb_name": cb_name,
+ "symbol": symbol,
+ }
+
+ rv = {
+ "name": name,
+ "title": title,
+ "description": description,
+ "context": context(parameters),
+ }
+ if schema:
+ rv["schema"] = (
+ schema(graph_config=graph_config) if callable(schema) else schema
+ )
+
+ trustDomain = graph_config["trust-domain"]
+ level = parameters["level"]
+ tcyml_hash = hash_taskcluster_yml(graph_config.taskcluster_yml)
+
+ # the tcyml_hash is prefixed with `/` in the hookId, so users will be granted
+ # hooks:trigger-hook:project-gecko/in-tree-action-3-myaction/*; if another
+ # action was named `myaction/release`, then the `*` in the scope would also
+ # match that action. To prevent such an accident, we prohibit `/` in hook
+ # names.
+ if "/" in actionPerm:
+ raise Exception("`/` is not allowed in action names; use `-`")
+
+ rv.update(
+ {
+ "kind": "hook",
+ "hookGroupId": f"project-{trustDomain}",
+ "hookId": "in-tree-action-{}-{}/{}".format(
+ level, actionPerm, tcyml_hash
+ ),
+ "hookPayload": {
+ # provide the decision-task parameters as context for triggerHook
+ "decision": {
+ "action": action,
+ "repository": repository,
+ "push": push,
+ },
+ # and pass everything else through from our own context
+ "user": {
+ "input": {"$eval": "input"},
+ "taskId": {"$eval": "taskId"}, # target taskId (or null)
+ "taskGroupId": {
+ "$eval": "taskGroupId"
+ }, # target task group
+ },
+ },
+ "extra": {
+ "actionPerm": actionPerm,
+ },
+ }
+ )
+
+ return rv
+
+ actions.append(Action(order, cb_name, generic, action_builder))
+
+ mem["registered"] = True
+ callbacks[cb_name] = cb
+ return cb
+
+ return register_callback
+
+
+def render_actions_json(parameters, graph_config, decision_task_id):
+ """
+ Render JSON object for the ``public/actions.json`` artifact.
+
+ Args:
+ parameters (:class:`~taskgraph.parameters.Parameters`):
+ Decision task parameters.
+
+ Returns:
+ dict:
+ JSON object representation of the ``public/actions.json``
+ artifact.
+ """
+ assert isinstance(parameters, Parameters), "requires instance of Parameters"
+ actions = []
+ for action in sorted(_get_actions(graph_config), key=lambda action: action.order):
+ action = action.action_builder(parameters, graph_config, decision_task_id)
+ if action:
+ assert is_json(action), "action must be a JSON compatible object"
+ actions.append(action)
+ return {
+ "version": 1,
+ "variables": {},
+ "actions": actions,
+ }
+
+
+def sanity_check_task_scope(callback, parameters, graph_config):
+ """
+ If this action is not generic, then verify that this task has the necessary
+ scope to run the action. This serves as a backstop preventing abuse by
+ running non-generic actions using generic hooks. While scopes should
+ prevent serious damage from such abuse, it's never a valid thing to do.
+ """
+ for action in _get_actions(graph_config):
+ if action.cb_name == callback:
+ break
+ else:
+ raise ValueError(f"No action with cb_name {callback}")
+
+ actionPerm = "generic" if action.generic else action.cb_name
+
+ repo_param = "head_repository"
+ raw_url = parameters[repo_param]
+ parsed_url = parse(raw_url)
+ expected_scope = f"assume:{parsed_url.taskcluster_role_prefix}:action:{actionPerm}"
+
+ # the scope should appear literally; no need for a satisfaction check. The use of
+ # get_current_scopes here calls the auth service through the Taskcluster Proxy, giving
+ # the precise scopes available to this task.
+ if expected_scope not in taskcluster.get_current_scopes():
+ raise ValueError(f"Expected task scope {expected_scope} for this action")
+
+
+def trigger_action_callback(
+ task_group_id, task_id, input, callback, parameters, root, test=False
+):
+ """
+ Trigger action callback with the given inputs. If `test` is true, then run
+ the action callback in testing mode, without actually creating tasks.
+ """
+ graph_config = load_graph_config(root)
+ graph_config.register()
+ callbacks = _get_callbacks(graph_config)
+ cb = callbacks.get(callback, None)
+ if not cb:
+ raise Exception(
+ "Unknown callback: {}. Known callbacks: {}".format(
+ callback, ", ".join(callbacks)
+ )
+ )
+
+ if test:
+ create.testing = True
+ taskcluster.testing = True
+
+ if not test:
+ sanity_check_task_scope(callback, parameters, graph_config)
+
+ cb(Parameters(**parameters), graph_config, input, task_group_id, task_id)
+
+
+def _load(graph_config):
+ # Load all modules from this folder, relying on the side-effects of register_
+ # functions to populate the action registry.
+ import_sibling_modules(exceptions=("util.py",))
+ return callbacks, actions
+
+
+def _get_callbacks(graph_config):
+ return _load(graph_config)[0]
+
+
+def _get_actions(graph_config):
+ return _load(graph_config)[1]
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/actions/retrigger.py b/third_party/python/taskcluster_taskgraph/taskgraph/actions/retrigger.py
new file mode 100644
index 0000000000..4758beb625
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/actions/retrigger.py
@@ -0,0 +1,301 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import logging
+import sys
+import textwrap
+
+from slugid import nice as slugid
+
+from taskgraph.util import taskcluster
+
+from .registry import register_callback_action
+from .util import (
+ combine_task_graph_files,
+ create_task_from_def,
+ create_tasks,
+ fetch_graph_and_labels,
+ relativize_datestamps,
+)
+
+logger = logging.getLogger(__name__)
+
+RERUN_STATES = ("exception", "failed")
+
+
+def _should_retrigger(task_graph, label):
+ """
+ Return whether a given task in the taskgraph should be retriggered.
+
+ This handles the case where the task isn't there by assuming it should not be.
+ """
+ if label not in task_graph:
+ logger.info(
+ "Task {} not in full taskgraph, assuming task should not be retriggered.".format(
+ label
+ )
+ )
+ return False
+ return task_graph[label].attributes.get("retrigger", False)
+
+
+@register_callback_action(
+ title="Retrigger",
+ name="retrigger",
+ symbol="rt",
+ cb_name="retrigger-decision",
+ description=textwrap.dedent(
+ """\
+ Create a clone of the task (retriggering decision, action, and cron tasks requires
+ special scopes)."""
+ ),
+ order=11,
+ context=[
+ {"kind": "decision-task"},
+ {"kind": "action-callback"},
+ {"kind": "cron-task"},
+ ],
+)
+def retrigger_decision_action(parameters, graph_config, input, task_group_id, task_id):
+ """For a single task, we try to just run exactly the same task once more.
+ It's quite possible that we don't have the scopes to do so (especially for
+ an action), but this is best-effort."""
+
+ # make all of the timestamps relative; they will then be turned back into
+ # absolute timestamps relative to the current time.
+ task = taskcluster.get_task_definition(task_id)
+ task = relativize_datestamps(task)
+ create_task_from_def(slugid(), task, parameters["level"])
+
+
+@register_callback_action(
+ title="Retrigger",
+ name="retrigger",
+ symbol="rt",
+ generic=True,
+ description=("Create a clone of the task."),
+ order=19, # must be greater than other orders in this file, as this is the fallback version
+ context=[{"retrigger": "true"}],
+ schema={
+ "type": "object",
+ "properties": {
+ "downstream": {
+ "type": "boolean",
+ "description": (
+ "If true, downstream tasks from this one will be cloned as well. "
+ "The dependencies will be updated to work with the new task at the root."
+ ),
+ "default": False,
+ },
+ "times": {
+ "type": "integer",
+ "default": 1,
+ "minimum": 1,
+ "maximum": 100,
+ "title": "Times",
+ "description": "How many times to run each task.",
+ },
+ },
+ },
+)
+@register_callback_action(
+ title="Retrigger (disabled)",
+ name="retrigger",
+ cb_name="retrigger-disabled",
+ symbol="rt",
+ generic=True,
+ description=(
+ "Create a clone of the task.\n\n"
+ "This type of task should typically be re-run instead of re-triggered."
+ ),
+ order=20, # must be greater than other orders in this file, as this is the fallback version
+ context=[{}],
+ schema={
+ "type": "object",
+ "properties": {
+ "downstream": {
+ "type": "boolean",
+ "description": (
+ "If true, downstream tasks from this one will be cloned as well. "
+ "The dependencies will be updated to work with the new task at the root."
+ ),
+ "default": False,
+ },
+ "times": {
+ "type": "integer",
+ "default": 1,
+ "minimum": 1,
+ "maximum": 100,
+ "title": "Times",
+ "description": "How many times to run each task.",
+ },
+ "force": {
+ "type": "boolean",
+ "default": False,
+ "description": (
+ "This task should not be re-triggered. "
+ "This can be overridden by passing `true` here."
+ ),
+ },
+ },
+ },
+)
+def retrigger_action(parameters, graph_config, input, task_group_id, task_id):
+ decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
+ parameters, graph_config
+ )
+
+ task = taskcluster.get_task_definition(task_id)
+ label = task["metadata"]["name"]
+
+ with_downstream = " "
+ to_run = [label]
+
+ if not input.get("force", None) and not _should_retrigger(full_task_graph, label):
+ logger.info(
+ "Not retriggering task {}, task should not be retrigged "
+ "and force not specified.".format(label)
+ )
+ sys.exit(1)
+
+ if input.get("downstream"):
+ to_run = full_task_graph.graph.transitive_closure(
+ set(to_run), reverse=True
+ ).nodes
+ to_run = to_run & set(label_to_taskid.keys())
+ with_downstream = " (with downstream) "
+
+ times = input.get("times", 1)
+ for i in range(times):
+ create_tasks(
+ graph_config,
+ to_run,
+ full_task_graph,
+ label_to_taskid,
+ parameters,
+ decision_task_id,
+ i,
+ )
+
+ logger.info(f"Scheduled {label}{with_downstream}(time {i + 1}/{times})")
+ combine_task_graph_files(list(range(times)))
+
+
+@register_callback_action(
+ title="Rerun",
+ name="rerun",
+ generic=True,
+ symbol="rr",
+ description=(
+ "Rerun a task.\n\n"
+ "This only works on failed or exception tasks in the original taskgraph,"
+ " and is CoT friendly."
+ ),
+ order=300,
+ context=[{}],
+ schema={"type": "object", "properties": {}},
+)
+def rerun_action(parameters, graph_config, input, task_group_id, task_id):
+ task = taskcluster.get_task_definition(task_id)
+ parameters = dict(parameters)
+ decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
+ parameters, graph_config
+ )
+ label = task["metadata"]["name"]
+ if task_id not in label_to_taskid.values():
+ logger.error(
+ "Refusing to rerun {}: taskId {} not in decision task {} label_to_taskid!".format(
+ label, task_id, decision_task_id
+ )
+ )
+
+ _rerun_task(task_id, label)
+
+
+def _rerun_task(task_id, label):
+ state = taskcluster.state_task(task_id)
+ if state not in RERUN_STATES:
+ logger.warning(
+ "No need to rerun {}: state '{}' not in {}!".format(
+ label, state, RERUN_STATES
+ )
+ )
+ return
+ taskcluster.rerun_task(task_id)
+ logger.info(f"Reran {label}")
+
+
+@register_callback_action(
+ title="Retrigger",
+ name="retrigger-multiple",
+ symbol="rt",
+ generic=True,
+ description=("Create a clone of the task."),
+ context=[],
+ schema={
+ "type": "object",
+ "properties": {
+ "requests": {
+ "type": "array",
+ "items": {
+ "tasks": {
+ "type": "array",
+ "description": "An array of task labels",
+ "items": {"type": "string"},
+ },
+ "times": {
+ "type": "integer",
+ "minimum": 1,
+ "maximum": 100,
+ "title": "Times",
+ "description": "How many times to run each task.",
+ },
+ "additionalProperties": False,
+ },
+ },
+ "additionalProperties": False,
+ },
+ },
+)
+def retrigger_multiple(parameters, graph_config, input, task_group_id, task_id):
+ decision_task_id, full_task_graph, label_to_taskid = fetch_graph_and_labels(
+ parameters, graph_config
+ )
+
+ suffixes = []
+ for i, request in enumerate(input.get("requests", [])):
+ times = request.get("times", 1)
+ rerun_tasks = [
+ label
+ for label in request.get("tasks")
+ if not _should_retrigger(full_task_graph, label)
+ ]
+ retrigger_tasks = [
+ label
+ for label in request.get("tasks")
+ if _should_retrigger(full_task_graph, label)
+ ]
+
+ for label in rerun_tasks:
+ # XXX we should not re-run tasks pulled in from other pushes
+ # In practice, this shouldn't matter, as only completed tasks
+ # are pulled in from other pushes and treeherder won't pass
+ # those labels.
+ _rerun_task(label_to_taskid[label], label)
+
+ for j in range(times):
+ suffix = f"{i}-{j}"
+ suffixes.append(suffix)
+ create_tasks(
+ graph_config,
+ retrigger_tasks,
+ full_task_graph,
+ label_to_taskid,
+ parameters,
+ decision_task_id,
+ suffix,
+ )
+
+ combine_task_graph_files(suffixes)
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/actions/util.py b/third_party/python/taskcluster_taskgraph/taskgraph/actions/util.py
new file mode 100644
index 0000000000..dd3248d209
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/actions/util.py
@@ -0,0 +1,282 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import concurrent.futures as futures
+import copy
+import logging
+import os
+import re
+from functools import reduce
+
+from requests.exceptions import HTTPError
+
+from taskgraph import create
+from taskgraph.decision import read_artifact, rename_artifact, write_artifact
+from taskgraph.optimize.base import optimize_task_graph
+from taskgraph.taskgraph import TaskGraph
+from taskgraph.util.taskcluster import (
+ CONCURRENCY,
+ get_artifact,
+ get_session,
+ list_tasks,
+ parse_time,
+)
+from taskgraph.util.taskgraph import find_decision_task
+
+logger = logging.getLogger(__name__)
+
+
+def get_parameters(decision_task_id):
+ return get_artifact(decision_task_id, "public/parameters.yml")
+
+
+def fetch_graph_and_labels(parameters, graph_config):
+ decision_task_id = find_decision_task(parameters, graph_config)
+
+ # First grab the graph and labels generated during the initial decision task
+ full_task_graph = get_artifact(decision_task_id, "public/full-task-graph.json")
+ _, full_task_graph = TaskGraph.from_json(full_task_graph)
+ label_to_taskid = get_artifact(decision_task_id, "public/label-to-taskid.json")
+
+ # fetch everything in parallel; this avoids serializing any delay in downloading
+ # each artifact (such as waiting for the artifact to be mirrored locally)
+ with futures.ThreadPoolExecutor(CONCURRENCY) as e:
+ fetches = []
+
+ # fetch any modifications made by action tasks and swap out new tasks
+ # for old ones
+ def fetch_action(task_id):
+ logger.info(f"fetching label-to-taskid.json for action task {task_id}")
+ try:
+ run_label_to_id = get_artifact(task_id, "public/label-to-taskid.json")
+ label_to_taskid.update(run_label_to_id)
+ except HTTPError as e:
+ if e.response.status_code != 404:
+ raise
+ logger.debug(f"No label-to-taskid.json found for {task_id}: {e}")
+
+ namespace = "{}.v2.{}.pushlog-id.{}.actions".format(
+ graph_config["trust-domain"],
+ parameters["project"],
+ parameters["pushlog_id"],
+ )
+ for task_id in list_tasks(namespace):
+ fetches.append(e.submit(fetch_action, task_id))
+
+ # Similarly for cron tasks..
+ def fetch_cron(task_id):
+ logger.info(f"fetching label-to-taskid.json for cron task {task_id}")
+ try:
+ run_label_to_id = get_artifact(task_id, "public/label-to-taskid.json")
+ label_to_taskid.update(run_label_to_id)
+ except HTTPError as e:
+ if e.response.status_code != 404:
+ raise
+ logger.debug(f"No label-to-taskid.json found for {task_id}: {e}")
+
+ namespace = "{}.v2.{}.revision.{}.cron".format(
+ graph_config["trust-domain"], parameters["project"], parameters["head_rev"]
+ )
+ for task_id in list_tasks(namespace):
+ fetches.append(e.submit(fetch_cron, task_id))
+
+ # now wait for each fetch to complete, raising an exception if there
+ # were any issues
+ for f in futures.as_completed(fetches):
+ f.result()
+
+ return (decision_task_id, full_task_graph, label_to_taskid)
+
+
+def create_task_from_def(task_id, task_def, level):
+ """Create a new task from a definition rather than from a label
+ that is already in the full-task-graph. The task definition will
+ have {relative-datestamp': '..'} rendered just like in a decision task.
+ Use this for entirely new tasks or ones that change internals of the task.
+ It is useful if you want to "edit" the full_task_graph and then hand
+ it to this function. No dependencies will be scheduled. You must handle
+ this yourself. Seeing how create_tasks handles it might prove helpful."""
+ task_def["schedulerId"] = f"gecko-level-{level}"
+ label = task_def["metadata"]["name"]
+ session = get_session()
+ create.create_task(session, task_id, label, task_def)
+
+
+def update_parent(task, graph):
+ task.task.setdefault("extra", {})["parent"] = os.environ.get("TASK_ID", "")
+ return task
+
+
+def update_dependencies(task, graph):
+ if os.environ.get("TASK_ID"):
+ task.task.setdefault("dependencies", []).append(os.environ["TASK_ID"])
+ return task
+
+
+def create_tasks(
+ graph_config,
+ to_run,
+ full_task_graph,
+ label_to_taskid,
+ params,
+ decision_task_id=None,
+ suffix="",
+ modifier=lambda t: t,
+):
+ """Create new tasks. The task definition will have {relative-datestamp':
+ '..'} rendered just like in a decision task. Action callbacks should use
+ this function to create new tasks,
+ allowing easy debugging with `mach taskgraph action-callback --test`.
+ This builds up all required tasks to run in order to run the tasks requested.
+
+ Optionally this function takes a `modifier` function that is passed in each
+ task before it is put into a new graph. It should return a valid task. Note
+ that this is passed _all_ tasks in the graph, not just the set in to_run. You
+ may want to skip modifying tasks not in your to_run list.
+
+ If `suffix` is given, then it is used to give unique names to the resulting
+ artifacts. If you call this function multiple times in the same action,
+ pass a different suffix each time to avoid overwriting artifacts.
+
+ If you wish to create the tasks in a new group, leave out decision_task_id.
+
+ Returns an updated label_to_taskid containing the new tasks"""
+ if suffix != "":
+ suffix = f"-{suffix}"
+ to_run = set(to_run)
+
+ # Copy to avoid side-effects later
+ full_task_graph = copy.deepcopy(full_task_graph)
+ label_to_taskid = label_to_taskid.copy()
+
+ target_graph = full_task_graph.graph.transitive_closure(to_run)
+ target_task_graph = TaskGraph(
+ {l: modifier(full_task_graph[l]) for l in target_graph.nodes}, target_graph
+ )
+ target_task_graph.for_each_task(update_parent)
+ if decision_task_id and decision_task_id != os.environ.get("TASK_ID"):
+ target_task_graph.for_each_task(update_dependencies)
+ optimized_task_graph, label_to_taskid = optimize_task_graph(
+ target_task_graph,
+ to_run,
+ params,
+ to_run,
+ decision_task_id,
+ existing_tasks=label_to_taskid,
+ )
+ write_artifact(f"task-graph{suffix}.json", optimized_task_graph.to_json())
+ write_artifact(f"label-to-taskid{suffix}.json", label_to_taskid)
+ write_artifact(f"to-run{suffix}.json", list(to_run))
+ create.create_tasks(
+ graph_config,
+ optimized_task_graph,
+ label_to_taskid,
+ params,
+ decision_task_id,
+ )
+ return label_to_taskid
+
+
+def _update_reducer(accumulator, new_value):
+ "similar to set or dict `update` method, but returning the modified object"
+ accumulator.update(new_value)
+ return accumulator
+
+
+def combine_task_graph_files(suffixes):
+ """Combine task-graph-{suffix}.json files into a single task-graph.json file.
+
+ Since Chain of Trust verification requires a task-graph.json file that
+ contains all children tasks, we can combine the various task-graph-0.json
+ type files into a master task-graph.json file at the end.
+
+ Actions also look for various artifacts, so we combine those in a similar
+ fashion.
+
+ In the case where there is only one suffix, we simply rename it to avoid the
+ additional cost of uploading two copies of the same data.
+ """
+
+ if len(suffixes) == 1:
+ for filename in ["task-graph", "label-to-taskid", "to-run"]:
+ rename_artifact(f"{filename}-{suffixes[0]}.json", f"{filename}.json")
+ return
+
+ def combine(file_contents, base):
+ return reduce(_update_reducer, file_contents, base)
+
+ files = [read_artifact(f"task-graph-{suffix}.json") for suffix in suffixes]
+ write_artifact("task-graph.json", combine(files, dict()))
+
+ files = [read_artifact(f"label-to-taskid-{suffix}.json") for suffix in suffixes]
+ write_artifact("label-to-taskid.json", combine(files, dict()))
+
+ files = [read_artifact(f"to-run-{suffix}.json") for suffix in suffixes]
+ write_artifact("to-run.json", list(combine(files, set())))
+
+
+def relativize_datestamps(task_def):
+ """
+ Given a task definition as received from the queue, convert all datestamps
+ to {relative_datestamp: ..} format, with the task creation time as "now".
+ The result is useful for handing to ``create_task``.
+ """
+ base = parse_time(task_def["created"])
+ # borrowed from https://github.com/epoberezkin/ajv/blob/master/lib/compile/formats.js
+ ts_pattern = re.compile(
+ r"^\d\d\d\d-[0-1]\d-[0-3]\d[t\s]"
+ r"(?:[0-2]\d:[0-5]\d:[0-5]\d|23:59:60)(?:\.\d+)?"
+ r"(?:z|[+-]\d\d:\d\d)$",
+ re.I,
+ )
+
+ def recurse(value):
+ if isinstance(value, str):
+ if ts_pattern.match(value):
+ value = parse_time(value)
+ diff = value - base
+ return {"relative-datestamp": f"{int(diff.total_seconds())} seconds"}
+ if isinstance(value, list):
+ return [recurse(e) for e in value]
+ if isinstance(value, dict):
+ return {k: recurse(v) for k, v in value.items()}
+ return value
+
+ return recurse(task_def)
+
+
+def add_args_to_command(cmd_parts, extra_args=[]):
+ """
+ Add custom command line args to a given command.
+
+ Args:
+ cmd_parts: the raw command as seen by taskcluster
+ extra_args: array of args we want to add
+ """
+ cmd_type = "default"
+ if len(cmd_parts) == 1 and isinstance(cmd_parts[0], dict):
+ # windows has single cmd part as dict: 'task-reference', with long string
+ cmd_parts = cmd_parts[0]["task-reference"].split(" ")
+ cmd_type = "dict"
+ elif len(cmd_parts) == 1 and (
+ isinstance(cmd_parts[0], str) or isinstance(cmd_parts[0], str)
+ ):
+ # windows has single cmd part as a long string
+ cmd_parts = cmd_parts[0].split(" ")
+ cmd_type = "unicode"
+ elif len(cmd_parts) == 1 and isinstance(cmd_parts[0], list):
+ # osx has an single value array with an array inside
+ cmd_parts = cmd_parts[0]
+ cmd_type = "subarray"
+
+ cmd_parts.extend(extra_args)
+
+ if cmd_type == "dict":
+ cmd_parts = [{"task-reference": " ".join(cmd_parts)}]
+ elif cmd_type == "unicode":
+ cmd_parts = [" ".join(cmd_parts)]
+ elif cmd_type == "subarray":
+ cmd_parts = [cmd_parts]
+ return cmd_parts
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/config.py b/third_party/python/taskcluster_taskgraph/taskgraph/config.py
new file mode 100644
index 0000000000..9517a4316c
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/config.py
@@ -0,0 +1,136 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import logging
+import os
+import sys
+
+import attr
+from voluptuous import All, Any, Extra, Length, Optional, Required
+
+from .util import path
+from .util.python_path import find_object
+from .util.schema import Schema, optionally_keyed_by, validate_schema
+from .util.yaml import load_yaml
+
+logger = logging.getLogger(__name__)
+
+graph_config_schema = Schema(
+ {
+ # The trust-domain for this graph.
+ # (See https://firefox-source-docs.mozilla.org/taskcluster/taskcluster/taskgraph.html#taskgraph-trust-domain) # noqa
+ Required("trust-domain"): str,
+ Required("task-priority"): optionally_keyed_by(
+ "project",
+ Any(
+ "highest",
+ "very-high",
+ "high",
+ "medium",
+ "low",
+ "very-low",
+ "lowest",
+ ),
+ ),
+ Required("workers"): {
+ Required("aliases"): {
+ str: {
+ Required("provisioner"): optionally_keyed_by("level", str),
+ Required("implementation"): str,
+ Required("os"): str,
+ Required("worker-type"): optionally_keyed_by("level", str),
+ }
+ },
+ },
+ Required("taskgraph"): {
+ Optional(
+ "register",
+ description="Python function to call to register extensions.",
+ ): str,
+ Optional("decision-parameters"): str,
+ Optional(
+ "cached-task-prefix",
+ description="The taskcluster index prefix to use for caching tasks. "
+ "Defaults to `trust-domain`.",
+ ): str,
+ Required("repositories"): All(
+ {
+ str: {
+ Required("name"): str,
+ Optional("project-regex"): str,
+ Optional("ssh-secret-name"): str,
+ # FIXME
+ Extra: str,
+ }
+ },
+ Length(min=1),
+ ),
+ },
+ Extra: object,
+ }
+)
+"""Schema for GraphConfig"""
+
+
+@attr.s(frozen=True, cmp=False)
+class GraphConfig:
+ _config = attr.ib()
+ root_dir = attr.ib()
+
+ _PATH_MODIFIED = False
+
+ def __getitem__(self, name):
+ return self._config[name]
+
+ def __contains__(self, name):
+ return name in self._config
+
+ def register(self):
+ """
+ Add the project's taskgraph directory to the python path, and register
+ any extensions present.
+ """
+ modify_path = os.path.dirname(self.root_dir)
+ if GraphConfig._PATH_MODIFIED:
+ if GraphConfig._PATH_MODIFIED == modify_path:
+ # Already modified path with the same root_dir.
+ # We currently need to do this to enable actions to call
+ # taskgraph_decision, e.g. relpro.
+ return
+ raise Exception("Can't register multiple directories on python path.")
+ GraphConfig._PATH_MODIFIED = modify_path
+ sys.path.insert(0, modify_path)
+ register_path = self["taskgraph"].get("register")
+ if register_path:
+ find_object(register_path)(self)
+
+ @property
+ def vcs_root(self):
+ if path.split(self.root_dir)[-2:] != ["taskcluster", "ci"]:
+ raise Exception(
+ "Not guessing path to vcs root. "
+ "Graph config in non-standard location."
+ )
+ return os.path.dirname(os.path.dirname(self.root_dir))
+
+ @property
+ def taskcluster_yml(self):
+ return os.path.join(self.vcs_root, ".taskcluster.yml")
+
+
+def validate_graph_config(config):
+ validate_schema(graph_config_schema, config, "Invalid graph configuration:")
+
+
+def load_graph_config(root_dir):
+ config_yml = os.path.join(root_dir, "config.yml")
+ if not os.path.exists(config_yml):
+ raise Exception(f"Couldn't find taskgraph configuration: {config_yml}")
+
+ logger.debug(f"loading config from `{config_yml}`")
+ config = load_yaml(config_yml)
+
+ validate_graph_config(config)
+ return GraphConfig(config=config, root_dir=root_dir)
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/create.py b/third_party/python/taskcluster_taskgraph/taskgraph/create.py
new file mode 100644
index 0000000000..3661ac8271
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/create.py
@@ -0,0 +1,132 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import concurrent.futures as futures
+import json
+import logging
+import sys
+
+from slugid import nice as slugid
+
+from taskgraph.util.parameterization import resolve_timestamps
+from taskgraph.util.taskcluster import CONCURRENCY, get_session
+from taskgraph.util.time import current_json_time
+
+logger = logging.getLogger(__name__)
+
+# this is set to true for `mach taskgraph action-callback --test`
+testing = False
+
+
+def create_tasks(graph_config, taskgraph, label_to_taskid, params, decision_task_id):
+ taskid_to_label = {t: l for l, t in label_to_taskid.items()}
+
+ # when running as an actual decision task, we use the decision task's
+ # taskId as the taskGroupId. The process that created the decision task
+ # helpfully placed it in this same taskGroup. If there is no $TASK_ID,
+ # fall back to a slugid
+ scheduler_id = "{}-level-{}".format(graph_config["trust-domain"], params["level"])
+
+ # Add the taskGroupId, schedulerId and optionally the decision task
+ # dependency
+ for task_id in taskgraph.graph.nodes:
+ task_def = taskgraph.tasks[task_id].task
+
+ # if this task has no dependencies *within* this taskgraph, make it
+ # depend on this decision task. If it has another dependency within
+ # the taskgraph, then it already implicitly depends on the decision
+ # task. The result is that tasks do not start immediately. if this
+ # loop fails halfway through, none of the already-created tasks run.
+ if not any(t in taskgraph.tasks for t in task_def.get("dependencies", [])):
+ task_def.setdefault("dependencies", []).append(decision_task_id)
+
+ task_def["taskGroupId"] = decision_task_id
+ task_def["schedulerId"] = scheduler_id
+
+ # If `testing` is True, then run without parallelization
+ concurrency = CONCURRENCY if not testing else 1
+ session = get_session()
+ with futures.ThreadPoolExecutor(concurrency) as e:
+ fs = {}
+
+ # We can't submit a task until its dependencies have been submitted.
+ # So our strategy is to walk the graph and submit tasks once all
+ # their dependencies have been submitted.
+ tasklist = set(taskgraph.graph.visit_postorder())
+ alltasks = tasklist.copy()
+
+ def schedule_tasks():
+ # bail out early if any futures have failed
+ if any(f.done() and f.exception() for f in fs.values()):
+ return
+
+ to_remove = set()
+ new = set()
+
+ def submit(task_id, label, task_def):
+ fut = e.submit(create_task, session, task_id, label, task_def)
+ new.add(fut)
+ fs[task_id] = fut
+
+ for task_id in tasklist:
+ task_def = taskgraph.tasks[task_id].task
+ # If we haven't finished submitting all our dependencies yet,
+ # come back to this later.
+ # Some dependencies aren't in our graph, so make sure to filter
+ # those out
+ deps = set(task_def.get("dependencies", [])) & alltasks
+ if any((d not in fs or not fs[d].done()) for d in deps):
+ continue
+
+ submit(task_id, taskid_to_label[task_id], task_def)
+ to_remove.add(task_id)
+
+ # Schedule tasks as many times as task_duplicates indicates
+ attributes = taskgraph.tasks[task_id].attributes
+ for i in range(1, attributes.get("task_duplicates", 1)):
+ # We use slugid() since we want a distinct task id
+ submit(slugid(), taskid_to_label[task_id], task_def)
+ tasklist.difference_update(to_remove)
+
+ # as each of those futures complete, try to schedule more tasks
+ for f in futures.as_completed(new):
+ schedule_tasks()
+
+ # start scheduling tasks and run until everything is scheduled
+ schedule_tasks()
+
+ # check the result of each future, raising an exception if it failed
+ for f in futures.as_completed(fs.values()):
+ f.result()
+
+
+def create_task(session, task_id, label, task_def):
+ # create the task using 'http://taskcluster/queue', which is proxied to the queue service
+ # with credentials appropriate to this job.
+
+ # Resolve timestamps
+ now = current_json_time(datetime_format=True)
+ task_def = resolve_timestamps(now, task_def)
+
+ if testing:
+ json.dump(
+ [task_id, task_def],
+ sys.stdout,
+ sort_keys=True,
+ indent=4,
+ separators=(",", ": "),
+ )
+ # add a newline
+ print("")
+ return
+
+ logger.info(f"Creating task with taskId {task_id} for {label}")
+ res = session.put(f"http://taskcluster/queue/v1/task/{task_id}", json=task_def)
+ if res.status_code != 200:
+ try:
+ logger.error(res.json()["message"])
+ except Exception:
+ logger.error(res.text)
+ res.raise_for_status()
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/decision.py b/third_party/python/taskcluster_taskgraph/taskgraph/decision.py
new file mode 100644
index 0000000000..6c5da8c65d
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/decision.py
@@ -0,0 +1,377 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import json
+import logging
+import os
+import pathlib
+import shutil
+import time
+from pathlib import Path
+
+import yaml
+from voluptuous import Optional
+
+from taskgraph.actions import render_actions_json
+from taskgraph.create import create_tasks
+from taskgraph.generator import TaskGraphGenerator
+from taskgraph.parameters import Parameters, get_version
+from taskgraph.taskgraph import TaskGraph
+from taskgraph.util.python_path import find_object
+from taskgraph.util.schema import Schema, validate_schema
+from taskgraph.util.vcs import Repository, get_repository
+from taskgraph.util.yaml import load_yaml
+
+logger = logging.getLogger(__name__)
+
+ARTIFACTS_DIR = Path("artifacts")
+
+
+# For each project, this gives a set of parameters specific to the project.
+# See `taskcluster/docs/parameters.rst` for information on parameters.
+PER_PROJECT_PARAMETERS = {
+ # the default parameters are used for projects that do not match above.
+ "default": {
+ "target_tasks_method": "default",
+ }
+}
+
+
+try_task_config_schema_v2 = Schema(
+ {
+ Optional("parameters"): {str: object},
+ }
+)
+
+
+def full_task_graph_to_runnable_jobs(full_task_json):
+ runnable_jobs = {}
+ for label, node in full_task_json.items():
+ if not ("extra" in node["task"] and "treeherder" in node["task"]["extra"]):
+ continue
+
+ th = node["task"]["extra"]["treeherder"]
+ runnable_jobs[label] = {"symbol": th["symbol"]}
+
+ for i in ("groupName", "groupSymbol", "collection"):
+ if i in th:
+ runnable_jobs[label][i] = th[i]
+ if th.get("machine", {}).get("platform"):
+ runnable_jobs[label]["platform"] = th["machine"]["platform"]
+ return runnable_jobs
+
+
+def taskgraph_decision(options, parameters=None):
+ """
+ Run the decision task. This function implements `mach taskgraph decision`,
+ and is responsible for
+
+ * processing decision task command-line options into parameters
+ * running task-graph generation exactly the same way the other `mach
+ taskgraph` commands do
+ * generating a set of artifacts to memorialize the graph
+ * calling TaskCluster APIs to create the graph
+ """
+
+ parameters = parameters or (
+ lambda graph_config: get_decision_parameters(graph_config, options)
+ )
+
+ decision_task_id = os.environ["TASK_ID"]
+
+ # create a TaskGraphGenerator instance
+ tgg = TaskGraphGenerator(
+ root_dir=options.get("root"),
+ parameters=parameters,
+ decision_task_id=decision_task_id,
+ write_artifacts=True,
+ )
+
+ # write out the parameters used to generate this graph
+ write_artifact("parameters.yml", dict(**tgg.parameters))
+
+ # write out the public/actions.json file
+ write_artifact(
+ "actions.json",
+ render_actions_json(tgg.parameters, tgg.graph_config, decision_task_id),
+ )
+
+ # write out the full graph for reference
+ full_task_json = tgg.full_task_graph.to_json()
+ write_artifact("full-task-graph.json", full_task_json)
+
+ # write out the public/runnable-jobs.json file
+ write_artifact(
+ "runnable-jobs.json", full_task_graph_to_runnable_jobs(full_task_json)
+ )
+
+ # this is just a test to check whether the from_json() function is working
+ _, _ = TaskGraph.from_json(full_task_json)
+
+ # write out the target task set to allow reproducing this as input
+ write_artifact("target-tasks.json", list(tgg.target_task_set.tasks.keys()))
+
+ # write out the optimized task graph to describe what will actually happen,
+ # and the map of labels to taskids
+ write_artifact("task-graph.json", tgg.morphed_task_graph.to_json())
+ write_artifact("label-to-taskid.json", tgg.label_to_taskid)
+
+ # write out current run-task and fetch-content scripts
+ RUN_TASK_DIR = pathlib.Path(__file__).parent / "run-task"
+ shutil.copy2(RUN_TASK_DIR / "run-task", ARTIFACTS_DIR)
+ shutil.copy2(RUN_TASK_DIR / "fetch-content", ARTIFACTS_DIR)
+
+ # actually create the graph
+ create_tasks(
+ tgg.graph_config,
+ tgg.morphed_task_graph,
+ tgg.label_to_taskid,
+ tgg.parameters,
+ decision_task_id=decision_task_id,
+ )
+
+
+def get_decision_parameters(graph_config, options):
+ """
+ Load parameters from the command-line options for 'taskgraph decision'.
+ This also applies per-project parameters, based on the given project.
+
+ """
+ parameters = {
+ n: options[n]
+ for n in [
+ "base_repository",
+ "base_ref",
+ "base_rev",
+ "head_repository",
+ "head_rev",
+ "head_ref",
+ "head_tag",
+ "project",
+ "pushlog_id",
+ "pushdate",
+ "repository_type",
+ "owner",
+ "level",
+ "target_tasks_method",
+ "tasks_for",
+ ]
+ if n in options
+ }
+
+ repo_path = os.getcwd()
+ repo = get_repository(repo_path)
+ try:
+ commit_message = repo.get_commit_message()
+ except UnicodeDecodeError:
+ commit_message = ""
+
+ parameters["base_ref"] = _determine_more_accurate_base_ref(
+ repo,
+ candidate_base_ref=options.get("base_ref"),
+ head_ref=options.get("head_ref"),
+ base_rev=options.get("base_rev"),
+ )
+
+ parameters["base_rev"] = _determine_more_accurate_base_rev(
+ repo,
+ base_ref=parameters["base_ref"],
+ candidate_base_rev=options.get("base_rev"),
+ head_rev=options.get("head_rev"),
+ env_prefix=_get_env_prefix(graph_config),
+ )
+
+ # Define default filter list, as most configurations shouldn't need
+ # custom filters.
+ parameters["filters"] = [
+ "target_tasks_method",
+ ]
+ parameters["optimize_strategies"] = None
+ parameters["optimize_target_tasks"] = True
+ parameters["existing_tasks"] = {}
+ parameters["do_not_optimize"] = []
+ parameters["enable_always_target"] = True
+ parameters["build_number"] = 1
+ parameters["version"] = get_version(repo_path)
+ parameters["next_version"] = None
+
+ # owner must be an email, but sometimes (e.g., for ffxbld) it is not, in which
+ # case, fake it
+ if "@" not in parameters["owner"]:
+ parameters["owner"] += "@noreply.mozilla.org"
+
+ # use the pushdate as build_date if given, else use current time
+ parameters["build_date"] = parameters["pushdate"] or int(time.time())
+ # moz_build_date is the build identifier based on build_date
+ parameters["moz_build_date"] = time.strftime(
+ "%Y%m%d%H%M%S", time.gmtime(parameters["build_date"])
+ )
+
+ project = parameters["project"]
+ try:
+ parameters.update(PER_PROJECT_PARAMETERS[project])
+ except KeyError:
+ logger.warning(
+ "using default project parameters; add {} to "
+ "PER_PROJECT_PARAMETERS in {} to customize behavior "
+ "for this project".format(project, __file__)
+ )
+ parameters.update(PER_PROJECT_PARAMETERS["default"])
+
+ # `target_tasks_method` has higher precedence than `project` parameters
+ if options.get("target_tasks_method"):
+ parameters["target_tasks_method"] = options["target_tasks_method"]
+
+ # ..but can be overridden by the commit message: if it contains the special
+ # string "DONTBUILD" and this is an on-push decision task, then use the
+ # special 'nothing' target task method.
+ if "DONTBUILD" in commit_message and options["tasks_for"] == "hg-push":
+ parameters["target_tasks_method"] = "nothing"
+
+ if options.get("optimize_target_tasks") is not None:
+ parameters["optimize_target_tasks"] = options["optimize_target_tasks"]
+
+ if "decision-parameters" in graph_config["taskgraph"]:
+ find_object(graph_config["taskgraph"]["decision-parameters"])(
+ graph_config, parameters
+ )
+
+ if options.get("try_task_config_file"):
+ task_config_file = os.path.abspath(options.get("try_task_config_file"))
+ else:
+ # if try_task_config.json is present, load it
+ task_config_file = os.path.join(os.getcwd(), "try_task_config.json")
+
+ # load try settings
+ if ("try" in project and options["tasks_for"] == "hg-push") or options[
+ "tasks_for"
+ ] == "github-pull-request":
+ set_try_config(parameters, task_config_file)
+
+ result = Parameters(**parameters)
+ result.check()
+ return result
+
+
+def _determine_more_accurate_base_ref(repo, candidate_base_ref, head_ref, base_rev):
+ base_ref = candidate_base_ref
+
+ if not candidate_base_ref:
+ base_ref = repo.default_branch
+ elif candidate_base_ref == head_ref and base_rev == Repository.NULL_REVISION:
+ logger.info(
+ "base_ref and head_ref are identical but base_rev equals the null revision. "
+ "This is a new branch but Github didn't identify its actual base."
+ )
+ base_ref = repo.default_branch
+
+ if base_ref != candidate_base_ref:
+ logger.info(
+ f'base_ref has been reset from "{candidate_base_ref}" to "{base_ref}".'
+ )
+
+ return base_ref
+
+
+def _determine_more_accurate_base_rev(
+ repo, base_ref, candidate_base_rev, head_rev, env_prefix
+):
+ if not candidate_base_rev:
+ logger.info("base_rev is not set.")
+ base_ref_or_rev = base_ref
+ elif candidate_base_rev == Repository.NULL_REVISION:
+ logger.info("base_rev equals the null revision. This branch is a new one.")
+ base_ref_or_rev = base_ref
+ elif not repo.does_revision_exist_locally(candidate_base_rev):
+ logger.warning(
+ "base_rev does not exist locally. It is likely because the branch was force-pushed. "
+ "taskgraph is not able to assess how many commits were changed and assumes it is only "
+ f"the last one. Please set the {env_prefix.upper()}_BASE_REV environment variable "
+ "in the decision task and provide `--base-rev` to taskgraph."
+ )
+ base_ref_or_rev = base_ref
+ else:
+ base_ref_or_rev = candidate_base_rev
+
+ if base_ref_or_rev == base_ref:
+ logger.info(
+ f'Using base_ref "{base_ref}" to determine latest common revision...'
+ )
+
+ base_rev = repo.find_latest_common_revision(base_ref_or_rev, head_rev)
+ if base_rev != candidate_base_rev:
+ if base_ref_or_rev == candidate_base_rev:
+ logger.info("base_rev is not an ancestor of head_rev.")
+
+ logger.info(
+ f'base_rev has been reset from "{candidate_base_rev}" to "{base_rev}".'
+ )
+
+ return base_rev
+
+
+def _get_env_prefix(graph_config):
+ repo_keys = list(graph_config["taskgraph"].get("repositories", {}).keys())
+ return repo_keys[0] if repo_keys else ""
+
+
+def set_try_config(parameters, task_config_file):
+ if os.path.isfile(task_config_file):
+ logger.info(f"using try tasks from {task_config_file}")
+ with open(task_config_file) as fh:
+ task_config = json.load(fh)
+ task_config_version = task_config.pop("version")
+ if task_config_version == 2:
+ validate_schema(
+ try_task_config_schema_v2,
+ task_config,
+ "Invalid v2 `try_task_config.json`.",
+ )
+ parameters.update(task_config["parameters"])
+ return
+ else:
+ raise Exception(
+ f"Unknown `try_task_config.json` version: {task_config_version}"
+ )
+
+
+def write_artifact(filename, data):
+ logger.info(f"writing artifact file `{filename}`")
+ if not os.path.isdir(ARTIFACTS_DIR):
+ os.mkdir(ARTIFACTS_DIR)
+ path = ARTIFACTS_DIR / filename
+ if filename.endswith(".yml"):
+ with open(path, "w") as f:
+ yaml.safe_dump(data, f, allow_unicode=True, default_flow_style=False)
+ elif filename.endswith(".json"):
+ with open(path, "w") as f:
+ json.dump(data, f, sort_keys=True, indent=2, separators=(",", ": "))
+ elif filename.endswith(".gz"):
+ import gzip
+
+ with gzip.open(path, "wb") as f:
+ f.write(json.dumps(data))
+ else:
+ raise TypeError(f"Don't know how to write to {filename}")
+
+
+def read_artifact(filename):
+ path = ARTIFACTS_DIR / filename
+ if filename.endswith(".yml"):
+ return load_yaml(path, filename)
+ elif filename.endswith(".json"):
+ with open(path) as f:
+ return json.load(f)
+ elif filename.endswith(".gz"):
+ import gzip
+
+ with gzip.open(path, "rb") as f:
+ return json.load(f)
+ else:
+ raise TypeError(f"Don't know how to read {filename}")
+
+
+def rename_artifact(src, dest):
+ os.rename(ARTIFACTS_DIR / src, ARTIFACTS_DIR / dest)
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/docker.py b/third_party/python/taskcluster_taskgraph/taskgraph/docker.py
new file mode 100644
index 0000000000..c142f36391
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/docker.py
@@ -0,0 +1,215 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import json
+import os
+import tarfile
+from io import BytesIO
+from textwrap import dedent
+
+try:
+ import zstandard as zstd
+except ImportError as e:
+ zstd = e
+
+from taskgraph.util import docker
+from taskgraph.util.taskcluster import get_artifact_url, get_session
+
+
+def get_image_digest(image_name):
+ from taskgraph.generator import load_tasks_for_kind
+ from taskgraph.parameters import Parameters
+
+ params = Parameters(
+ level=os.environ.get("MOZ_SCM_LEVEL", "3"),
+ strict=False,
+ )
+ tasks = load_tasks_for_kind(params, "docker-image")
+ task = tasks[f"build-docker-image-{image_name}"]
+ return task.attributes["cached_task"]["digest"]
+
+
+def load_image_by_name(image_name, tag=None):
+ from taskgraph.generator import load_tasks_for_kind
+ from taskgraph.optimize import IndexSearch
+ from taskgraph.parameters import Parameters
+
+ params = Parameters(
+ level=os.environ.get("MOZ_SCM_LEVEL", "3"),
+ strict=False,
+ )
+ tasks = load_tasks_for_kind(params, "docker-image")
+ task = tasks[f"build-docker-image-{image_name}"]
+ task_id = IndexSearch().should_replace_task(
+ task, {}, task.optimization.get("index-search", [])
+ )
+
+ if task_id in (True, False):
+ print(
+ "Could not find artifacts for a docker image "
+ "named `{image_name}`. Local commits and other changes "
+ "in your checkout may cause this error. Try "
+ "updating to a fresh checkout of mozilla-central "
+ "to download image.".format(image_name=image_name)
+ )
+ return False
+
+ return load_image_by_task_id(task_id, tag)
+
+
+def load_image_by_task_id(task_id, tag=None):
+ artifact_url = get_artifact_url(task_id, "public/image.tar.zst")
+ result = load_image(artifact_url, tag)
+ print("Found docker image: {}:{}".format(result["image"], result["tag"]))
+ if tag:
+ print(f"Re-tagged as: {tag}")
+ else:
+ tag = "{}:{}".format(result["image"], result["tag"])
+ print(f"Try: docker run -ti --rm {tag} bash")
+ return True
+
+
+def build_context(name, outputFile, args=None):
+ """Build a context.tar for image with specified name."""
+ if not name:
+ raise ValueError("must provide a Docker image name")
+ if not outputFile:
+ raise ValueError("must provide a outputFile")
+
+ image_dir = docker.image_path(name)
+ if not os.path.isdir(image_dir):
+ raise Exception("image directory does not exist: %s" % image_dir)
+
+ docker.create_context_tar(".", image_dir, outputFile, args)
+
+
+def build_image(name, tag, args=None):
+ """Build a Docker image of specified name.
+
+ Output from image building process will be printed to stdout.
+ """
+ if not name:
+ raise ValueError("must provide a Docker image name")
+
+ image_dir = docker.image_path(name)
+ if not os.path.isdir(image_dir):
+ raise Exception("image directory does not exist: %s" % image_dir)
+
+ tag = tag or docker.docker_image(name, by_tag=True)
+
+ buf = BytesIO()
+ docker.stream_context_tar(".", image_dir, buf, "", args)
+ docker.post_to_docker(buf.getvalue(), "/build", nocache=1, t=tag)
+
+ print(f"Successfully built {name} and tagged with {tag}")
+
+ if tag.endswith(":latest"):
+ print("*" * 50)
+ print("WARNING: no VERSION file found in image directory.")
+ print("Image is not suitable for deploying/pushing.")
+ print("Create an image suitable for deploying/pushing by creating")
+ print("a VERSION file in the image directory.")
+ print("*" * 50)
+
+
+def load_image(url, imageName=None, imageTag=None):
+ """
+ Load docker image from URL as imageName:tag, if no imageName or tag is given
+ it will use whatever is inside the zstd compressed tarball.
+
+ Returns an object with properties 'image', 'tag' and 'layer'.
+ """
+ if isinstance(zstd, ImportError):
+ raise ImportError(
+ dedent(
+ """
+ zstandard is not installed! Use `pip install taskcluster-taskgraph[load-image]`
+ to use this feature.
+ """
+ )
+ ) from zstd
+
+ # If imageName is given and we don't have an imageTag
+ # we parse out the imageTag from imageName, or default it to 'latest'
+ # if no imageName and no imageTag is given, 'repositories' won't be rewritten
+ if imageName and not imageTag:
+ if ":" in imageName:
+ imageName, imageTag = imageName.split(":", 1)
+ else:
+ imageTag = "latest"
+
+ info = {}
+
+ def download_and_modify_image():
+ # This function downloads and edits the downloaded tar file on the fly.
+ # It emits chunked buffers of the edited tar file, as a generator.
+ print(f"Downloading from {url}")
+ # get_session() gets us a requests.Session set to retry several times.
+ req = get_session().get(url, stream=True)
+ req.raise_for_status()
+
+ with zstd.ZstdDecompressor().stream_reader(req.raw) as ifh:
+
+ tarin = tarfile.open(
+ mode="r|",
+ fileobj=ifh,
+ bufsize=zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE,
+ )
+
+ # Stream through each member of the downloaded tar file individually.
+ for member in tarin:
+ # Non-file members only need a tar header. Emit one.
+ if not member.isfile():
+ yield member.tobuf(tarfile.GNU_FORMAT)
+ continue
+
+ # Open stream reader for the member
+ reader = tarin.extractfile(member)
+
+ # If member is `repositories`, we parse and possibly rewrite the
+ # image tags.
+ if member.name == "repositories":
+ # Read and parse repositories
+ repos = json.loads(reader.read())
+ reader.close()
+
+ # If there is more than one image or tag, we can't handle it
+ # here.
+ if len(repos.keys()) > 1:
+ raise Exception("file contains more than one image")
+ info["image"] = image = list(repos.keys())[0]
+ if len(repos[image].keys()) > 1:
+ raise Exception("file contains more than one tag")
+ info["tag"] = tag = list(repos[image].keys())[0]
+ info["layer"] = layer = repos[image][tag]
+
+ # Rewrite the repositories file
+ data = json.dumps({imageName or image: {imageTag or tag: layer}})
+ reader = BytesIO(data.encode("utf-8"))
+ member.size = len(data)
+
+ # Emit the tar header for this member.
+ yield member.tobuf(tarfile.GNU_FORMAT)
+ # Then emit its content.
+ remaining = member.size
+ while remaining:
+ length = min(remaining, zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE)
+ buf = reader.read(length)
+ remaining -= len(buf)
+ yield buf
+ # Pad to fill a 512 bytes block, per tar format.
+ remainder = member.size % 512
+ if remainder:
+ yield ("\0" * (512 - remainder)).encode("utf-8")
+
+ reader.close()
+
+ docker.post_to_docker(download_and_modify_image(), "/images/load", quiet=0)
+
+ # Check that we found a repositories file
+ if not info.get("image") or not info.get("tag") or not info.get("layer"):
+ raise Exception("No repositories file found!")
+
+ return info
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/files_changed.py b/third_party/python/taskcluster_taskgraph/taskgraph/files_changed.py
new file mode 100644
index 0000000000..6be6e5eeee
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/files_changed.py
@@ -0,0 +1,91 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Support for optimizing tasks based on the set of files that have changed.
+"""
+
+
+import logging
+import os
+
+import requests
+from redo import retry
+
+from .util.memoize import memoize
+from .util.path import match as match_path
+from .util.vcs import get_repository
+
+logger = logging.getLogger(__name__)
+
+
+@memoize
+def get_changed_files(head_repository_url, head_rev, base_rev=None):
+ """
+ Get the set of files changed between revisions.
+ Responses are cached, so multiple calls with the same arguments are OK.
+ """
+ repo_path = os.getcwd()
+ repository = get_repository(repo_path)
+
+ if repository.tool == "hg":
+ # TODO Use VCS version once tested enough
+ return _get_changed_files_json_automationrelevance(
+ head_repository_url, head_rev
+ )
+
+ return repository.get_changed_files(rev=head_rev, base_rev=base_rev)
+
+
+def _get_changed_files_json_automationrelevance(head_repository_url, head_rev):
+ """
+ Get the set of files changed in the push headed by the given revision.
+ """
+ url = "{}/json-automationrelevance/{}".format(
+ head_repository_url.rstrip("/"), head_rev
+ )
+ logger.debug("Querying version control for metadata: %s", url)
+
+ def get_automationrelevance():
+ response = requests.get(url, timeout=30)
+ return response.json()
+
+ contents = retry(get_automationrelevance, attempts=10, sleeptime=10)
+
+ logger.debug(
+ "{} commits influencing task scheduling:".format(len(contents["changesets"]))
+ )
+ changed_files = set()
+ for c in contents["changesets"]:
+ desc = "" # Support empty desc
+ if c["desc"]:
+ desc = c["desc"].splitlines()[0].encode("ascii", "ignore")
+ logger.debug(" {cset} {desc}".format(cset=c["node"][0:12], desc=desc))
+ changed_files |= set(c["files"])
+
+ return changed_files
+
+
+def check(params, file_patterns):
+ """Determine whether any of the files changed between 2 revisions
+ match any of the given file patterns."""
+
+ head_repository_url = params.get("head_repository")
+ head_rev = params.get("head_rev")
+ if not head_repository_url or not head_rev:
+ logger.warning(
+ "Missing `head_repository` or `head_rev` parameters; "
+ "assuming all files have changed"
+ )
+ return True
+
+ base_rev = params.get("base_rev")
+ changed_files = get_changed_files(head_repository_url, head_rev, base_rev)
+
+ for pattern in file_patterns:
+ for path in changed_files:
+ if match_path(path, pattern):
+ return True
+
+ return False
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/filter_tasks.py b/third_party/python/taskcluster_taskgraph/taskgraph/filter_tasks.py
new file mode 100644
index 0000000000..63bd2874d6
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/filter_tasks.py
@@ -0,0 +1,34 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import logging
+
+from . import target_tasks
+
+logger = logging.getLogger(__name__)
+
+filter_task_functions = {}
+
+
+def filter_task(name):
+ """Generator to declare a task filter function."""
+
+ def wrap(func):
+ filter_task_functions[name] = func
+ return func
+
+ return wrap
+
+
+@filter_task("target_tasks_method")
+def filter_target_tasks(graph, parameters, graph_config):
+ """Proxy filter to use legacy target tasks code.
+
+ This should go away once target_tasks are converted to filters.
+ """
+
+ attr = parameters.get("target_tasks_method", "all_tasks")
+ fn = target_tasks.get_method(attr)
+ return fn(graph, parameters, graph_config)
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/generator.py b/third_party/python/taskcluster_taskgraph/taskgraph/generator.py
new file mode 100644
index 0000000000..e1b900cf65
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/generator.py
@@ -0,0 +1,449 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import copy
+import logging
+import os
+from typing import AnyStr
+
+import attr
+
+from . import filter_tasks
+from .config import GraphConfig, load_graph_config
+from .graph import Graph
+from .morph import morph
+from .optimize.base import optimize_task_graph
+from .parameters import parameters_loader
+from .task import Task
+from .taskgraph import TaskGraph
+from .transforms.base import TransformConfig, TransformSequence
+from .util.python_path import find_object
+from .util.verify import verifications
+from .util.yaml import load_yaml
+
+logger = logging.getLogger(__name__)
+
+
+class KindNotFound(Exception):
+ """
+ Raised when trying to load kind from a directory without a kind.yml.
+ """
+
+
+@attr.s(frozen=True)
+class Kind:
+
+ name = attr.ib(type=AnyStr)
+ path = attr.ib(type=AnyStr)
+ config = attr.ib(type=dict)
+ graph_config = attr.ib(type=GraphConfig)
+
+ def _get_loader(self):
+ try:
+ loader = self.config["loader"]
+ except KeyError:
+ raise KeyError(f"{self.path!r} does not define `loader`")
+ return find_object(loader)
+
+ def load_tasks(self, parameters, loaded_tasks, write_artifacts):
+ loader = self._get_loader()
+ config = copy.deepcopy(self.config)
+
+ kind_dependencies = config.get("kind-dependencies", [])
+ kind_dependencies_tasks = {
+ task.label: task for task in loaded_tasks if task.kind in kind_dependencies
+ }
+
+ inputs = loader(self.name, self.path, config, parameters, loaded_tasks)
+
+ transforms = TransformSequence()
+ for xform_path in config["transforms"]:
+ transform = find_object(xform_path)
+ transforms.add(transform)
+
+ # perform the transformations on the loaded inputs
+ trans_config = TransformConfig(
+ self.name,
+ self.path,
+ config,
+ parameters,
+ kind_dependencies_tasks,
+ self.graph_config,
+ write_artifacts=write_artifacts,
+ )
+ tasks = [
+ Task(
+ self.name,
+ label=task_dict["label"],
+ description=task_dict["description"],
+ attributes=task_dict["attributes"],
+ task=task_dict["task"],
+ optimization=task_dict.get("optimization"),
+ dependencies=task_dict.get("dependencies"),
+ soft_dependencies=task_dict.get("soft-dependencies"),
+ if_dependencies=task_dict.get("if-dependencies"),
+ )
+ for task_dict in transforms(trans_config, inputs)
+ ]
+ return tasks
+
+ @classmethod
+ def load(cls, root_dir, graph_config, kind_name):
+ path = os.path.join(root_dir, kind_name)
+ kind_yml = os.path.join(path, "kind.yml")
+ if not os.path.exists(kind_yml):
+ raise KindNotFound(kind_yml)
+
+ logger.debug(f"loading kind `{kind_name}` from `{path}`")
+ config = load_yaml(kind_yml)
+
+ return cls(kind_name, path, config, graph_config)
+
+
+class TaskGraphGenerator:
+ """
+ The central controller for taskgraph. This handles all phases of graph
+ generation. The task is generated from all of the kinds defined in
+ subdirectories of the generator's root directory.
+
+ Access to the results of this generation, as well as intermediate values at
+ various phases of generation, is available via properties. This encourages
+ the provision of all generation inputs at instance construction time.
+ """
+
+ # Task-graph generation is implemented as a Python generator that yields
+ # each "phase" of generation. This allows some mach subcommands to short-
+ # circuit generation of the entire graph by never completing the generator.
+
+ def __init__(
+ self,
+ root_dir,
+ parameters,
+ decision_task_id="DECISION-TASK",
+ write_artifacts=False,
+ ):
+ """
+ @param root_dir: root directory, with subdirectories for each kind
+ @param parameters: parameters for this task-graph generation, or callable
+ taking a `GraphConfig` and returning parameters
+ @type parameters: Union[Parameters, Callable[[GraphConfig], Parameters]]
+ """
+ if root_dir is None:
+ root_dir = "taskcluster/ci"
+ self.root_dir = root_dir
+ self._parameters = parameters
+ self._decision_task_id = decision_task_id
+ self._write_artifacts = write_artifacts
+
+ # start the generator
+ self._run = self._run()
+ self._run_results = {}
+
+ @property
+ def parameters(self):
+ """
+ The properties used for this graph.
+
+ @type: Properties
+ """
+ return self._run_until("parameters")
+
+ @property
+ def full_task_set(self):
+ """
+ The full task set: all tasks defined by any kind (a graph without edges)
+
+ @type: TaskGraph
+ """
+ return self._run_until("full_task_set")
+
+ @property
+ def full_task_graph(self):
+ """
+ The full task graph: the full task set, with edges representing
+ dependencies.
+
+ @type: TaskGraph
+ """
+ return self._run_until("full_task_graph")
+
+ @property
+ def target_task_set(self):
+ """
+ The set of targeted tasks (a graph without edges)
+
+ @type: TaskGraph
+ """
+ return self._run_until("target_task_set")
+
+ @property
+ def target_task_graph(self):
+ """
+ The set of targeted tasks and all of their dependencies
+
+ @type: TaskGraph
+ """
+ return self._run_until("target_task_graph")
+
+ @property
+ def optimized_task_graph(self):
+ """
+ The set of targeted tasks and all of their dependencies; tasks that
+ have been optimized out are either omitted or replaced with a Task
+ instance containing only a task_id.
+
+ @type: TaskGraph
+ """
+ return self._run_until("optimized_task_graph")
+
+ @property
+ def label_to_taskid(self):
+ """
+ A dictionary mapping task label to assigned taskId. This property helps
+ in interpreting `optimized_task_graph`.
+
+ @type: dictionary
+ """
+ return self._run_until("label_to_taskid")
+
+ @property
+ def morphed_task_graph(self):
+ """
+ The optimized task graph, with any subsequent morphs applied. This graph
+ will have the same meaning as the optimized task graph, but be in a form
+ more palatable to TaskCluster.
+
+ @type: TaskGraph
+ """
+ return self._run_until("morphed_task_graph")
+
+ @property
+ def graph_config(self):
+ """
+ The configuration for this graph.
+
+ @type: TaskGraph
+ """
+ return self._run_until("graph_config")
+
+ def _load_kinds(self, graph_config, target_kind=None):
+ if target_kind:
+ # docker-image is an implicit dependency that never appears in
+ # kind-dependencies.
+ queue = [target_kind, "docker-image"]
+ seen_kinds = set()
+ while queue:
+ kind_name = queue.pop()
+ if kind_name in seen_kinds:
+ continue
+ seen_kinds.add(kind_name)
+ kind = Kind.load(self.root_dir, graph_config, kind_name)
+ yield kind
+ queue.extend(kind.config.get("kind-dependencies", []))
+ else:
+ for kind_name in os.listdir(self.root_dir):
+ try:
+ yield Kind.load(self.root_dir, graph_config, kind_name)
+ except KindNotFound:
+ continue
+
+ def _run(self):
+ logger.info("Loading graph configuration.")
+ graph_config = load_graph_config(self.root_dir)
+
+ yield ("graph_config", graph_config)
+
+ graph_config.register()
+
+ # Initial verifications that don't depend on any generation state.
+ verifications("initial")
+
+ if callable(self._parameters):
+ parameters = self._parameters(graph_config)
+ else:
+ parameters = self._parameters
+
+ logger.info(f"Using {parameters}")
+ logger.debug(f"Dumping parameters:\n{repr(parameters)}")
+
+ filters = parameters.get("filters", [])
+ # Always add legacy target tasks method until we deprecate that API.
+ if "target_tasks_method" not in filters:
+ filters.insert(0, "target_tasks_method")
+ filters = [filter_tasks.filter_task_functions[f] for f in filters]
+
+ yield self.verify("parameters", parameters)
+
+ logger.info("Loading kinds")
+ # put the kinds into a graph and sort topologically so that kinds are loaded
+ # in post-order
+ if parameters.get("target-kind"):
+ target_kind = parameters["target-kind"]
+ logger.info(
+ "Limiting kinds to {target_kind} and dependencies".format(
+ target_kind=target_kind
+ )
+ )
+ kinds = {
+ kind.name: kind
+ for kind in self._load_kinds(graph_config, parameters.get("target-kind"))
+ }
+ verifications("kinds", kinds)
+
+ edges = set()
+ for kind in kinds.values():
+ for dep in kind.config.get("kind-dependencies", []):
+ edges.add((kind.name, dep, "kind-dependency"))
+ kind_graph = Graph(set(kinds), edges)
+
+ if parameters.get("target-kind"):
+ kind_graph = kind_graph.transitive_closure({target_kind, "docker-image"})
+
+ logger.info("Generating full task set")
+ all_tasks = {}
+ for kind_name in kind_graph.visit_postorder():
+ logger.debug(f"Loading tasks for kind {kind_name}")
+ kind = kinds[kind_name]
+ try:
+ new_tasks = kind.load_tasks(
+ parameters,
+ list(all_tasks.values()),
+ self._write_artifacts,
+ )
+ except Exception:
+ logger.exception(f"Error loading tasks for kind {kind_name}:")
+ raise
+ for task in new_tasks:
+ if task.label in all_tasks:
+ raise Exception("duplicate tasks with label " + task.label)
+ all_tasks[task.label] = task
+ logger.info(f"Generated {len(new_tasks)} tasks for kind {kind_name}")
+ full_task_set = TaskGraph(all_tasks, Graph(set(all_tasks), set()))
+ yield self.verify("full_task_set", full_task_set, graph_config, parameters)
+
+ logger.info("Generating full task graph")
+ edges = set()
+ for t in full_task_set:
+ for depname, dep in t.dependencies.items():
+ edges.add((t.label, dep, depname))
+
+ full_task_graph = TaskGraph(all_tasks, Graph(full_task_set.graph.nodes, edges))
+ logger.info(
+ "Full task graph contains %d tasks and %d dependencies"
+ % (len(full_task_set.graph.nodes), len(edges))
+ )
+ yield self.verify("full_task_graph", full_task_graph, graph_config, parameters)
+
+ logger.info("Generating target task set")
+ target_task_set = TaskGraph(
+ dict(all_tasks), Graph(set(all_tasks.keys()), set())
+ )
+ for fltr in filters:
+ old_len = len(target_task_set.graph.nodes)
+ target_tasks = set(fltr(target_task_set, parameters, graph_config))
+ target_task_set = TaskGraph(
+ {l: all_tasks[l] for l in target_tasks}, Graph(target_tasks, set())
+ )
+ logger.info(
+ "Filter %s pruned %d tasks (%d remain)"
+ % (fltr.__name__, old_len - len(target_tasks), len(target_tasks))
+ )
+
+ yield self.verify("target_task_set", target_task_set, graph_config, parameters)
+
+ logger.info("Generating target task graph")
+ # include all docker-image build tasks here, in case they are needed for a graph morph
+ docker_image_tasks = {
+ t.label
+ for t in full_task_graph.tasks.values()
+ if t.attributes["kind"] == "docker-image"
+ }
+ # include all tasks with `always_target` set
+ if parameters["enable_always_target"]:
+ always_target_tasks = {
+ t.label
+ for t in full_task_graph.tasks.values()
+ if t.attributes.get("always_target")
+ }
+ else:
+ always_target_tasks = set()
+ logger.info(
+ "Adding %d tasks with `always_target` attribute"
+ % (len(always_target_tasks) - len(always_target_tasks & target_tasks))
+ )
+ requested_tasks = target_tasks | docker_image_tasks | always_target_tasks
+ target_graph = full_task_graph.graph.transitive_closure(requested_tasks)
+ target_task_graph = TaskGraph(
+ {l: all_tasks[l] for l in target_graph.nodes}, target_graph
+ )
+ yield self.verify(
+ "target_task_graph", target_task_graph, graph_config, parameters
+ )
+
+ logger.info("Generating optimized task graph")
+ existing_tasks = parameters.get("existing_tasks")
+ do_not_optimize = set(parameters.get("do_not_optimize", []))
+ if not parameters.get("optimize_target_tasks", True):
+ do_not_optimize = set(target_task_set.graph.nodes).union(do_not_optimize)
+
+ # this is used for testing experimental optimization strategies
+ strategies = os.environ.get(
+ "TASKGRAPH_OPTIMIZE_STRATEGIES", parameters.get("optimize_strategies")
+ )
+ if strategies:
+ strategies = find_object(strategies)
+
+ optimized_task_graph, label_to_taskid = optimize_task_graph(
+ target_task_graph,
+ requested_tasks,
+ parameters,
+ do_not_optimize,
+ self._decision_task_id,
+ existing_tasks=existing_tasks,
+ strategy_override=strategies,
+ )
+
+ yield self.verify(
+ "optimized_task_graph", optimized_task_graph, graph_config, parameters
+ )
+
+ morphed_task_graph, label_to_taskid = morph(
+ optimized_task_graph, label_to_taskid, parameters, graph_config
+ )
+
+ yield "label_to_taskid", label_to_taskid
+ yield self.verify(
+ "morphed_task_graph", morphed_task_graph, graph_config, parameters
+ )
+
+ def _run_until(self, name):
+ while name not in self._run_results:
+ try:
+ k, v = next(self._run)
+ except StopIteration:
+ raise AttributeError(f"No such run result {name}")
+ self._run_results[k] = v
+ return self._run_results[name]
+
+ def verify(self, name, obj, *args, **kwargs):
+ verifications(name, obj, *args, **kwargs)
+ return name, obj
+
+
+def load_tasks_for_kind(parameters, kind, root_dir=None):
+ """
+ Get all the tasks of a given kind.
+
+ This function is designed to be called from outside of taskgraph.
+ """
+ # make parameters read-write
+ parameters = dict(parameters)
+ parameters["target-kind"] = kind
+ parameters = parameters_loader(spec=None, strict=False, overrides=parameters)
+ tgg = TaskGraphGenerator(root_dir=root_dir, parameters=parameters)
+ return {
+ task.task["metadata"]["name"]: task
+ for task in tgg.full_task_set
+ if task.kind == kind
+ }
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/graph.py b/third_party/python/taskcluster_taskgraph/taskgraph/graph.py
new file mode 100644
index 0000000000..cdd280e2b1
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/graph.py
@@ -0,0 +1,134 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import collections
+
+import attr
+
+
+@attr.s(frozen=True)
+class Graph:
+ """Generic representation of a directed acyclic graph with labeled edges
+ connecting the nodes. Graph operations are implemented in a functional
+ manner, so the data structure is immutable.
+
+ It permits at most one edge of a given name between any set of nodes. The
+ graph is not checked for cycles, and methods may hang or otherwise fail if
+ given a cyclic graph.
+
+ The `nodes` and `edges` attributes may be accessed in a read-only fashion.
+ The `nodes` attribute is a set of node names, while `edges` is a set of
+ `(left, right, name)` tuples representing an edge named `name` going from
+ node `left` to node `right`..
+ """
+
+ nodes = attr.ib(converter=frozenset)
+ edges = attr.ib(converter=frozenset)
+
+ def transitive_closure(self, nodes, reverse=False):
+ """Return the transitive closure of <nodes>: the graph containing all
+ specified nodes as well as any nodes reachable from them, and any
+ intervening edges.
+
+ If `reverse` is true, the "reachability" will be reversed and this
+ will return the set of nodes that can reach the specified nodes.
+
+ Example:
+
+ .. code-block::
+
+ a ------> b ------> c
+ |
+ `-------> d
+
+ transitive_closure([b]).nodes == set([a, b])
+ transitive_closure([c]).nodes == set([c, b, a])
+ transitive_closure([c], reverse=True).nodes == set([c])
+ transitive_closure([b], reverse=True).nodes == set([b, c, d])
+ """
+ assert isinstance(nodes, set)
+ if not (nodes <= self.nodes):
+ raise Exception(
+ f"Unknown nodes in transitive closure: {nodes - self.nodes}"
+ )
+
+ # generate a new graph by expanding along edges until reaching a fixed
+ # point
+ new_nodes, new_edges = nodes, set()
+ nodes, edges = set(), set()
+ while (new_nodes, new_edges) != (nodes, edges):
+ nodes, edges = new_nodes, new_edges
+ add_edges = {
+ (left, right, name)
+ for (left, right, name) in self.edges
+ if (right if reverse else left) in nodes
+ }
+ add_nodes = {(left if reverse else right) for (left, right, _) in add_edges}
+ new_nodes = nodes | add_nodes
+ new_edges = edges | add_edges
+ return Graph(new_nodes, new_edges)
+
+ def _visit(self, reverse):
+ queue = collections.deque(sorted(self.nodes))
+ links_by_node = self.reverse_links_dict() if reverse else self.links_dict()
+ seen = set()
+ while queue:
+ node = queue.popleft()
+ if node in seen:
+ continue
+ links = links_by_node[node]
+ if all((n in seen) for n in links):
+ seen.add(node)
+ yield node
+ else:
+ queue.extend(n for n in links if n not in seen)
+ queue.append(node)
+
+ def visit_postorder(self):
+ """
+ Generate a sequence of nodes in postorder, such that every node is
+ visited *after* any nodes it links to.
+
+ Behavior is undefined (read: it will hang) if the graph contains a
+ cycle.
+ """
+ return self._visit(False)
+
+ def visit_preorder(self):
+ """
+ Like visit_postorder, but in reverse: evrey node is visited *before*
+ any nodes it links to.
+ """
+ return self._visit(True)
+
+ def links_dict(self):
+ """
+ Return a dictionary mapping each node to a set of the nodes it links to
+ (omitting edge names)
+ """
+ links = collections.defaultdict(set)
+ for left, right, _ in self.edges:
+ links[left].add(right)
+ return links
+
+ def named_links_dict(self):
+ """
+ Return a two-level dictionary mapping each node to a dictionary mapping
+ edge names to labels.
+ """
+ links = collections.defaultdict(dict)
+ for left, right, name in self.edges:
+ links[left][name] = right
+ return links
+
+ def reverse_links_dict(self):
+ """
+ Return a dictionary mapping each node to a set of the nodes linking to
+ it (omitting edge names)
+ """
+ links = collections.defaultdict(set)
+ for left, right, _ in self.edges:
+ links[right].add(left)
+ return links
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/loader/__init__.py b/third_party/python/taskcluster_taskgraph/taskgraph/loader/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/loader/__init__.py
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/loader/transform.py b/third_party/python/taskcluster_taskgraph/taskgraph/loader/transform.py
new file mode 100644
index 0000000000..a134ffd127
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/loader/transform.py
@@ -0,0 +1,58 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import logging
+
+from taskgraph.util.templates import merge
+from taskgraph.util.yaml import load_yaml
+
+logger = logging.getLogger(__name__)
+
+
+def loader(kind, path, config, params, loaded_tasks):
+ """
+ Get the input elements that will be transformed into tasks in a generic
+ way. The elements themselves are free-form, and become the input to the
+ first transform.
+
+ By default, this reads tasks from the `tasks` key, or from yaml files
+ named by `tasks-from`. The entities are read from mappings, and the
+ keys to those mappings are added in the `name` key of each entity.
+
+ If there is a `task-defaults` config, then every task is merged with it.
+ This provides a simple way to set default values for all tasks of a kind.
+ The `task-defaults` key can also be specified in a yaml file pointed to by
+ `tasks-from`. In this case it will only apply to tasks defined in the same
+ file.
+
+ Other kind implementations can use a different loader function to
+ produce inputs and hand them to `transform_inputs`.
+ """
+
+ def generate_tasks():
+ defaults = config.get("task-defaults")
+ for name, task in config.get("tasks", {}).items():
+ if defaults:
+ task = merge(defaults, task)
+ task["task-from"] = "kind.yml"
+ yield name, task
+
+ for filename in config.get("tasks-from", []):
+ tasks = load_yaml(path, filename)
+
+ file_defaults = tasks.pop("task-defaults", None)
+ if defaults:
+ file_defaults = merge(defaults, file_defaults or {})
+
+ for name, task in tasks.items():
+ if file_defaults:
+ task = merge(file_defaults, task)
+ task["task-from"] = filename
+ yield name, task
+
+ for name, task in generate_tasks():
+ task["name"] = name
+ logger.debug(f"Generating tasks for {kind} {name}")
+ yield task
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/main.py b/third_party/python/taskcluster_taskgraph/taskgraph/main.py
new file mode 100644
index 0000000000..88f2f6d37d
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/main.py
@@ -0,0 +1,756 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import argparse
+import atexit
+import json
+import logging
+import os
+import re
+import shutil
+import subprocess
+import sys
+import tempfile
+import traceback
+from collections import namedtuple
+from concurrent.futures import ProcessPoolExecutor, as_completed
+from pathlib import Path
+from typing import Any, List
+
+import appdirs
+import yaml
+
+Command = namedtuple("Command", ["func", "args", "kwargs", "defaults"])
+commands = {}
+
+
+def command(*args, **kwargs):
+ defaults = kwargs.pop("defaults", {})
+
+ def decorator(func):
+ commands[args[0]] = Command(func, args, kwargs, defaults)
+ return func
+
+ return decorator
+
+
+def argument(*args, **kwargs):
+ def decorator(func):
+ if not hasattr(func, "args"):
+ func.args = []
+ func.args.append((args, kwargs))
+ return func
+
+ return decorator
+
+
+def format_taskgraph_labels(taskgraph):
+ return "\n".join(
+ sorted(
+ taskgraph.tasks[index].label for index in taskgraph.graph.visit_postorder()
+ )
+ )
+
+
+def format_taskgraph_json(taskgraph):
+ return json.dumps(
+ taskgraph.to_json(), sort_keys=True, indent=2, separators=(",", ": ")
+ )
+
+
+def format_taskgraph_yaml(taskgraph):
+ return yaml.safe_dump(taskgraph.to_json(), default_flow_style=False)
+
+
+def get_filtered_taskgraph(taskgraph, tasksregex, exclude_keys):
+ """
+ Filter all the tasks on basis of a regular expression
+ and returns a new TaskGraph object
+ """
+ from taskgraph.graph import Graph
+ from taskgraph.task import Task
+ from taskgraph.taskgraph import TaskGraph
+
+ if tasksregex:
+ named_links_dict = taskgraph.graph.named_links_dict()
+ filteredtasks = {}
+ filterededges = set()
+ regexprogram = re.compile(tasksregex)
+
+ for key in taskgraph.graph.visit_postorder():
+ task = taskgraph.tasks[key]
+ if regexprogram.match(task.label):
+ filteredtasks[key] = task
+ for depname, dep in named_links_dict[key].items():
+ if regexprogram.match(dep):
+ filterededges.add((key, dep, depname))
+
+ taskgraph = TaskGraph(filteredtasks, Graph(set(filteredtasks), filterededges))
+
+ if exclude_keys:
+ for label, task in taskgraph.tasks.items():
+ task = task.to_json()
+ for key in exclude_keys:
+ obj = task
+ attrs = key.split(".")
+ while attrs[0] in obj:
+ if len(attrs) == 1:
+ del obj[attrs[0]]
+ break
+ obj = obj[attrs[0]]
+ attrs = attrs[1:]
+ taskgraph.tasks[label] = Task.from_json(task)
+
+ return taskgraph
+
+
+FORMAT_METHODS = {
+ "labels": format_taskgraph_labels,
+ "json": format_taskgraph_json,
+ "yaml": format_taskgraph_yaml,
+}
+
+
+def get_taskgraph_generator(root, parameters):
+ """Helper function to make testing a little easier."""
+ from taskgraph.generator import TaskGraphGenerator
+
+ return TaskGraphGenerator(root_dir=root, parameters=parameters)
+
+
+def format_taskgraph(options, parameters, logfile=None):
+ import taskgraph
+ from taskgraph.parameters import parameters_loader
+
+ if logfile:
+ handler = logging.FileHandler(logfile, mode="w")
+ if logging.root.handlers:
+ oldhandler = logging.root.handlers[-1]
+ logging.root.removeHandler(oldhandler)
+ handler.setFormatter(oldhandler.formatter)
+ logging.root.addHandler(handler)
+
+ if options["fast"]:
+ taskgraph.fast = True
+
+ if isinstance(parameters, str):
+ parameters = parameters_loader(
+ parameters,
+ overrides={"target-kind": options.get("target_kind")},
+ strict=False,
+ )
+
+ tgg = get_taskgraph_generator(options.get("root"), parameters)
+
+ tg = getattr(tgg, options["graph_attr"])
+ tg = get_filtered_taskgraph(tg, options["tasks_regex"], options["exclude_keys"])
+ format_method = FORMAT_METHODS[options["format"] or "labels"]
+ return format_method(tg)
+
+
+def dump_output(out, path=None, params_spec=None):
+ from taskgraph.parameters import Parameters
+
+ params_name = Parameters.format_spec(params_spec)
+ fh = None
+ if path:
+ # Substitute params name into file path if necessary
+ if params_spec and "{params}" not in path:
+ name, ext = os.path.splitext(path)
+ name += "_{params}"
+ path = name + ext
+
+ path = path.format(params=params_name)
+ fh = open(path, "w")
+ else:
+ print(
+ f"Dumping result with parameters from {params_name}:",
+ file=sys.stderr,
+ )
+ print(out + "\n", file=fh)
+
+
+def generate_taskgraph(options, parameters, logdir):
+ from taskgraph.parameters import Parameters
+
+ def logfile(spec):
+ """Determine logfile given a parameters specification."""
+ if logdir is None:
+ return None
+ return os.path.join(
+ logdir,
+ "{}_{}.log".format(options["graph_attr"], Parameters.format_spec(spec)),
+ )
+
+ # Don't bother using futures if there's only one parameter. This can make
+ # tracebacks a little more readable and avoids additional process overhead.
+ if len(parameters) == 1:
+ spec = parameters[0]
+ out = format_taskgraph(options, spec, logfile(spec))
+ dump_output(out, options["output_file"])
+ return
+
+ futures = {}
+ with ProcessPoolExecutor() as executor:
+ for spec in parameters:
+ f = executor.submit(format_taskgraph, options, spec, logfile(spec))
+ futures[f] = spec
+
+ for future in as_completed(futures):
+ output_file = options["output_file"]
+ spec = futures[future]
+ e = future.exception()
+ if e:
+ out = "".join(traceback.format_exception(type(e), e, e.__traceback__))
+ if options["diff"]:
+ # Dump to console so we don't accidentally diff the tracebacks.
+ output_file = None
+ else:
+ out = future.result()
+
+ dump_output(
+ out,
+ path=output_file,
+ params_spec=spec if len(parameters) > 1 else None,
+ )
+
+
+@command(
+ "tasks",
+ help="Show all tasks in the taskgraph.",
+ defaults={"graph_attr": "full_task_set"},
+)
+@command(
+ "full", help="Show the full taskgraph.", defaults={"graph_attr": "full_task_graph"}
+)
+@command(
+ "target",
+ help="Show the set of target tasks.",
+ defaults={"graph_attr": "target_task_set"},
+)
+@command(
+ "target-graph",
+ help="Show the target graph.",
+ defaults={"graph_attr": "target_task_graph"},
+)
+@command(
+ "optimized",
+ help="Show the optimized graph.",
+ defaults={"graph_attr": "optimized_task_graph"},
+)
+@command(
+ "morphed",
+ help="Show the morphed graph.",
+ defaults={"graph_attr": "morphed_task_graph"},
+)
+@argument("--root", "-r", help="root of the taskgraph definition relative to topsrcdir")
+@argument("--quiet", "-q", action="store_true", help="suppress all logging output")
+@argument(
+ "--verbose", "-v", action="store_true", help="include debug-level logging output"
+)
+@argument(
+ "--json",
+ "-J",
+ action="store_const",
+ dest="format",
+ const="json",
+ help="Output task graph as a JSON object",
+)
+@argument(
+ "--yaml",
+ "-Y",
+ action="store_const",
+ dest="format",
+ const="yaml",
+ help="Output task graph as a YAML object",
+)
+@argument(
+ "--labels",
+ "-L",
+ action="store_const",
+ dest="format",
+ const="labels",
+ help="Output the label for each task in the task graph (default)",
+)
+@argument(
+ "--parameters",
+ "-p",
+ default=None,
+ action="append",
+ help="Parameters to use for the generation. Can be a path to file (.yml or "
+ ".json; see `taskcluster/docs/parameters.rst`), a directory (containing "
+ "parameters files), a url, of the form `project=mozilla-central` to download "
+ "latest parameters file for the specified project from CI, or of the form "
+ "`task-id=<decision task id>` to download parameters from the specified "
+ "decision task. Can be specified multiple times, in which case multiple "
+ "generations will happen from the same invocation (one per parameters "
+ "specified).",
+)
+@argument(
+ "--no-optimize",
+ dest="optimize",
+ action="store_false",
+ default="true",
+ help="do not remove tasks from the graph that are found in the "
+ "index (a.k.a. optimize the graph)",
+)
+@argument(
+ "-o",
+ "--output-file",
+ default=None,
+ help="file path to store generated output.",
+)
+@argument(
+ "--tasks-regex",
+ "--tasks",
+ default=None,
+ help="only return tasks with labels matching this regular " "expression.",
+)
+@argument(
+ "--exclude-key",
+ default=None,
+ dest="exclude_keys",
+ action="append",
+ help="Exclude the specified key (using dot notation) from the final result. "
+ "This is mainly useful with '--diff' to filter out expected differences. Can be "
+ "used multiple times.",
+)
+@argument(
+ "--target-kind",
+ default=None,
+ help="only return tasks that are of the given kind, or their dependencies.",
+)
+@argument(
+ "-F",
+ "--fast",
+ default=False,
+ action="store_true",
+ help="enable fast task generation for local debugging.",
+)
+@argument(
+ "--diff",
+ const="default",
+ nargs="?",
+ default=None,
+ help="Generate and diff the current taskgraph against another revision. "
+ "Without args the base revision will be used. A revision specifier such as "
+ "the hash or `.~1` (hg) or `HEAD~1` (git) can be used as well.",
+)
+def show_taskgraph(options):
+ from taskgraph.parameters import Parameters, parameters_loader
+ from taskgraph.util.vcs import get_repository
+
+ if options.pop("verbose", False):
+ logging.root.setLevel(logging.DEBUG)
+
+ repo = None
+ cur_rev = None
+ diffdir = None
+ output_file = options["output_file"]
+
+ if options["diff"]:
+ repo = get_repository(os.getcwd())
+
+ if not repo.working_directory_clean():
+ print(
+ "abort: can't diff taskgraph with dirty working directory",
+ file=sys.stderr,
+ )
+ return 1
+
+ # We want to return the working directory to the current state
+ # as best we can after we're done. In all known cases, using
+ # branch or bookmark (which are both available on the VCS object)
+ # as `branch` is preferable to a specific revision.
+ cur_rev = repo.branch or repo.head_rev[:12]
+
+ diffdir = tempfile.mkdtemp()
+ atexit.register(
+ shutil.rmtree, diffdir
+ ) # make sure the directory gets cleaned up
+ options["output_file"] = os.path.join(
+ diffdir, f"{options['graph_attr']}_{cur_rev}"
+ )
+ print(f"Generating {options['graph_attr']} @ {cur_rev}", file=sys.stderr)
+
+ parameters: List[Any[str, Parameters]] = options.pop("parameters")
+ if not parameters:
+ overrides = {
+ "target-kind": options.get("target_kind"),
+ }
+ parameters = [
+ parameters_loader(None, strict=False, overrides=overrides)
+ ] # will use default values
+
+ for param in parameters[:]:
+ if isinstance(param, str) and os.path.isdir(param):
+ parameters.remove(param)
+ parameters.extend(
+ [
+ p.as_posix()
+ for p in Path(param).iterdir()
+ if p.suffix in (".yml", ".json")
+ ]
+ )
+
+ logdir = None
+ if len(parameters) > 1:
+ # Log to separate files for each process instead of stderr to
+ # avoid interleaving.
+ basename = os.path.basename(os.getcwd())
+ logdir = os.path.join(appdirs.user_log_dir("taskgraph"), basename)
+ if not os.path.isdir(logdir):
+ os.makedirs(logdir)
+ else:
+ # Only setup logging if we have a single parameter spec. Otherwise
+ # logging will go to files. This is also used as a hook for Gecko
+ # to setup its `mach` based logging.
+ setup_logging()
+
+ generate_taskgraph(options, parameters, logdir)
+
+ if options["diff"]:
+ assert diffdir is not None
+ assert repo is not None
+
+ # Reload taskgraph modules to pick up changes and clear global state.
+ for mod in sys.modules.copy():
+ if mod != __name__ and mod.split(".", 1)[0].endswith("taskgraph"):
+ del sys.modules[mod]
+
+ if options["diff"] == "default":
+ base_rev = repo.base_rev
+ else:
+ base_rev = options["diff"]
+
+ try:
+ repo.update(base_rev)
+ base_rev = repo.head_rev[:12]
+ options["output_file"] = os.path.join(
+ diffdir, f"{options['graph_attr']}_{base_rev}"
+ )
+ print(f"Generating {options['graph_attr']} @ {base_rev}", file=sys.stderr)
+ generate_taskgraph(options, parameters, logdir)
+ finally:
+ repo.update(cur_rev)
+
+ # Generate diff(s)
+ diffcmd = [
+ "diff",
+ "-U20",
+ "--report-identical-files",
+ f"--label={options['graph_attr']}@{base_rev}",
+ f"--label={options['graph_attr']}@{cur_rev}",
+ ]
+
+ for spec in parameters:
+ base_path = os.path.join(diffdir, f"{options['graph_attr']}_{base_rev}")
+ cur_path = os.path.join(diffdir, f"{options['graph_attr']}_{cur_rev}")
+
+ params_name = None
+ if len(parameters) > 1:
+ params_name = Parameters.format_spec(spec)
+ base_path += f"_{params_name}"
+ cur_path += f"_{params_name}"
+
+ try:
+ proc = subprocess.run(
+ diffcmd + [base_path, cur_path],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ universal_newlines=True,
+ check=True,
+ )
+ diff_output = proc.stdout
+ returncode = 0
+ except subprocess.CalledProcessError as e:
+ # returncode 1 simply means diffs were found
+ if e.returncode != 1:
+ print(e.stderr, file=sys.stderr)
+ raise
+ diff_output = e.output
+ returncode = e.returncode
+
+ dump_output(
+ diff_output,
+ # Don't bother saving file if no diffs were found. Log to
+ # console in this case instead.
+ path=None if returncode == 0 else output_file,
+ params_spec=spec if len(parameters) > 1 else None,
+ )
+
+ if options["format"] != "json":
+ print(
+ "If you were expecting differences in task bodies "
+ 'you should pass "-J"\n',
+ file=sys.stderr,
+ )
+
+ if len(parameters) > 1:
+ print(f"See '{logdir}' for logs", file=sys.stderr)
+
+
+@command("build-image", help="Build a Docker image")
+@argument("image_name", help="Name of the image to build")
+@argument(
+ "-t", "--tag", help="tag that the image should be built as.", metavar="name:tag"
+)
+@argument(
+ "--context-only",
+ help="File name the context tarball should be written to."
+ "with this option it will only build the context.tar.",
+ metavar="context.tar",
+)
+def build_image(args):
+ from taskgraph.docker import build_context, build_image
+
+ if args["context_only"] is None:
+ build_image(args["image_name"], args["tag"], os.environ)
+ else:
+ build_context(args["image_name"], args["context_only"], os.environ)
+
+
+@command(
+ "load-image",
+ help="Load a pre-built Docker image. Note that you need to "
+ "have docker installed and running for this to work.",
+)
+@argument(
+ "--task-id",
+ help="Load the image at public/image.tar.zst in this task, "
+ "rather than searching the index",
+)
+@argument(
+ "-t",
+ "--tag",
+ help="tag that the image should be loaded as. If not "
+ "image will be loaded with tag from the tarball",
+ metavar="name:tag",
+)
+@argument(
+ "image_name",
+ nargs="?",
+ help="Load the image of this name based on the current "
+ "contents of the tree (as built for mozilla-central "
+ "or mozilla-inbound)",
+)
+def load_image(args):
+ from taskgraph.docker import load_image_by_name, load_image_by_task_id
+
+ if not args.get("image_name") and not args.get("task_id"):
+ print("Specify either IMAGE-NAME or TASK-ID")
+ sys.exit(1)
+ try:
+ if args["task_id"]:
+ ok = load_image_by_task_id(args["task_id"], args.get("tag"))
+ else:
+ ok = load_image_by_name(args["image_name"], args.get("tag"))
+ if not ok:
+ sys.exit(1)
+ except Exception:
+ traceback.print_exc()
+ sys.exit(1)
+
+
+@command("image-digest", help="Print the digest of a docker image.")
+@argument(
+ "image_name",
+ help="Print the digest of the image of this name based on the current "
+ "contents of the tree.",
+)
+def image_digest(args):
+ from taskgraph.docker import get_image_digest
+
+ try:
+ digest = get_image_digest(args["image_name"])
+ print(digest)
+ except Exception:
+ traceback.print_exc()
+ sys.exit(1)
+
+
+@command("decision", help="Run the decision task")
+@argument("--root", "-r", help="root of the taskgraph definition relative to topsrcdir")
+@argument(
+ "--message",
+ required=False,
+ help=argparse.SUPPRESS,
+)
+@argument(
+ "--project",
+ required=True,
+ help="Project to use for creating task graph. Example: --project=try",
+)
+@argument("--pushlog-id", dest="pushlog_id", required=True, default="0")
+@argument("--pushdate", dest="pushdate", required=True, type=int, default=0)
+@argument("--owner", required=True, help="email address of who owns this graph")
+@argument("--level", required=True, help="SCM level of this repository")
+@argument(
+ "--target-tasks-method", help="method for selecting the target tasks to generate"
+)
+@argument(
+ "--repository-type",
+ required=True,
+ help='Type of repository, either "hg" or "git"',
+)
+@argument("--base-repository", required=True, help='URL for "base" repository to clone')
+@argument(
+ "--base-ref", default="", help='Reference of the revision in the "base" repository'
+)
+@argument(
+ "--base-rev",
+ default="",
+ help="Taskgraph decides what to do based on the revision range between "
+ "`--base-rev` and `--head-rev`. Value is determined automatically if not provided",
+)
+@argument(
+ "--head-repository",
+ required=True,
+ help='URL for "head" repository to fetch revision from',
+)
+@argument(
+ "--head-ref", required=True, help="Reference (this is same as rev usually for hg)"
+)
+@argument(
+ "--head-rev", required=True, help="Commit revision to use from head repository"
+)
+@argument("--head-tag", help="Tag attached to the revision", default="")
+@argument(
+ "--tasks-for", required=True, help="the tasks_for value used to generate this task"
+)
+@argument("--try-task-config-file", help="path to try task configuration file")
+def decision(options):
+ from taskgraph.decision import taskgraph_decision
+
+ taskgraph_decision(options)
+
+
+@command("action-callback", description="Run action callback used by action tasks")
+@argument(
+ "--root",
+ "-r",
+ default="taskcluster/ci",
+ help="root of the taskgraph definition relative to topsrcdir",
+)
+def action_callback(options):
+ from taskgraph.actions import trigger_action_callback
+ from taskgraph.actions.util import get_parameters
+
+ try:
+ # the target task for this action (or null if it's a group action)
+ task_id = json.loads(os.environ.get("ACTION_TASK_ID", "null"))
+ # the target task group for this action
+ task_group_id = os.environ.get("ACTION_TASK_GROUP_ID", None)
+ input = json.loads(os.environ.get("ACTION_INPUT", "null"))
+ callback = os.environ.get("ACTION_CALLBACK", None)
+ root = options["root"]
+
+ parameters = get_parameters(task_group_id)
+
+ return trigger_action_callback(
+ task_group_id=task_group_id,
+ task_id=task_id,
+ input=input,
+ callback=callback,
+ parameters=parameters,
+ root=root,
+ test=False,
+ )
+ except Exception:
+ traceback.print_exc()
+ sys.exit(1)
+
+
+@command("test-action-callback", description="Run an action callback in a testing mode")
+@argument(
+ "--root",
+ "-r",
+ default="taskcluster/ci",
+ help="root of the taskgraph definition relative to topsrcdir",
+)
+@argument(
+ "--parameters",
+ "-p",
+ default="",
+ help="parameters file (.yml or .json; see " "`taskcluster/docs/parameters.rst`)`",
+)
+@argument("--task-id", default=None, help="TaskId to which the action applies")
+@argument(
+ "--task-group-id", default=None, help="TaskGroupId to which the action applies"
+)
+@argument("--input", default=None, help="Action input (.yml or .json)")
+@argument("callback", default=None, help="Action callback name (Python function name)")
+def test_action_callback(options):
+ import taskgraph.actions
+ import taskgraph.parameters
+ from taskgraph.config import load_graph_config
+ from taskgraph.util import yaml
+
+ def load_data(filename):
+ with open(filename) as f:
+ if filename.endswith(".yml"):
+ return yaml.load_stream(f)
+ elif filename.endswith(".json"):
+ return json.load(f)
+ else:
+ raise Exception(f"unknown filename {filename}")
+
+ try:
+ task_id = options["task_id"]
+
+ if options["input"]:
+ input = load_data(options["input"])
+ else:
+ input = None
+
+ root = options["root"]
+ graph_config = load_graph_config(root)
+ trust_domain = graph_config["trust-domain"]
+ graph_config.register()
+
+ parameters = taskgraph.parameters.load_parameters_file(
+ options["parameters"], strict=False, trust_domain=trust_domain
+ )
+ parameters.check()
+
+ return taskgraph.actions.trigger_action_callback(
+ task_group_id=options["task_group_id"],
+ task_id=task_id,
+ input=input,
+ callback=options["callback"],
+ parameters=parameters,
+ root=root,
+ test=True,
+ )
+ except Exception:
+ traceback.print_exc()
+ sys.exit(1)
+
+
+def create_parser():
+ parser = argparse.ArgumentParser(description="Interact with taskgraph")
+ subparsers = parser.add_subparsers()
+ for _, (func, args, kwargs, defaults) in commands.items():
+ subparser = subparsers.add_parser(*args, **kwargs)
+ for arg in func.args:
+ subparser.add_argument(*arg[0], **arg[1])
+ subparser.set_defaults(command=func, **defaults)
+ return parser
+
+
+def setup_logging():
+ logging.basicConfig(
+ format="%(asctime)s - %(levelname)s - %(message)s", level=logging.INFO
+ )
+
+
+def main(args=sys.argv[1:]):
+ setup_logging()
+ parser = create_parser()
+ args = parser.parse_args(args)
+ try:
+ args.command(vars(args))
+ except Exception:
+ traceback.print_exc()
+ sys.exit(1)
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/morph.py b/third_party/python/taskcluster_taskgraph/taskgraph/morph.py
new file mode 100644
index 0000000000..c488317782
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/morph.py
@@ -0,0 +1,271 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Graph morphs are modifications to task-graphs that take place *after* the
+optimization phase.
+
+These graph morphs are largely invisible to developers running `./mach`
+locally, so they should be limited to changes that do not modify the meaning of
+the graph.
+"""
+
+# Note that the translation of `{'task-reference': '..'}` and
+# `artifact-reference` are handled in the optimization phase (since
+# optimization involves dealing with taskIds directly). Similarly,
+# `{'relative-datestamp': '..'}` is handled at the last possible moment during
+# task creation.
+
+
+import logging
+import os
+import re
+
+from slugid import nice as slugid
+
+from .graph import Graph
+from .task import Task
+from .taskgraph import TaskGraph
+from .util.workertypes import get_worker_type
+
+here = os.path.abspath(os.path.dirname(__file__))
+logger = logging.getLogger(__name__)
+MAX_ROUTES = 10
+
+registered_morphs = []
+
+
+def register_morph(func):
+ registered_morphs.append(func)
+
+
+def amend_taskgraph(taskgraph, label_to_taskid, to_add):
+ """Add the given tasks to the taskgraph, returning a new taskgraph"""
+ new_tasks = taskgraph.tasks.copy()
+ new_edges = set(taskgraph.graph.edges)
+ for task in to_add:
+ new_tasks[task.task_id] = task
+ assert task.label not in label_to_taskid
+ label_to_taskid[task.label] = task.task_id
+ for depname, dep in task.dependencies.items():
+ new_edges.add((task.task_id, dep, depname))
+
+ taskgraph = TaskGraph(new_tasks, Graph(set(new_tasks), new_edges))
+ return taskgraph, label_to_taskid
+
+
+def derive_index_task(task, taskgraph, label_to_taskid, parameters, graph_config):
+ """Create the shell of a task that depends on `task` and on the given docker
+ image."""
+ purpose = "index-task"
+ label = f"{purpose}-{task.label}"
+ provisioner_id, worker_type = get_worker_type(
+ graph_config, "misc", parameters["level"]
+ )
+
+ task_def = {
+ "provisionerId": provisioner_id,
+ "workerType": worker_type,
+ "dependencies": [task.task_id],
+ "created": {"relative-datestamp": "0 seconds"},
+ "deadline": task.task["deadline"],
+ # no point existing past the parent task's deadline
+ "expires": task.task["deadline"],
+ "metadata": {
+ "name": label,
+ "description": "{} for {}".format(
+ purpose, task.task["metadata"]["description"]
+ ),
+ "owner": task.task["metadata"]["owner"],
+ "source": task.task["metadata"]["source"],
+ },
+ "scopes": [],
+ "payload": {
+ "image": {
+ "path": "public/image.tar.zst",
+ "namespace": "taskgraph.cache.level-3.docker-images.v2.index-task.latest",
+ "type": "indexed-image",
+ },
+ "features": {
+ "taskclusterProxy": True,
+ },
+ "maxRunTime": 600,
+ },
+ }
+
+ # only include the docker-image dependency here if it is actually in the
+ # taskgraph (has not been optimized). It is included in
+ # task_def['dependencies'] unconditionally.
+ dependencies = {"parent": task.task_id}
+
+ task = Task(
+ kind="misc",
+ label=label,
+ attributes={},
+ task=task_def,
+ dependencies=dependencies,
+ )
+ task.task_id = slugid()
+ return task, taskgraph, label_to_taskid
+
+
+# these regular expressions capture route prefixes for which we have a star
+# scope, allowing them to be summarized. Each should correspond to a star scope
+# in each Gecko `assume:repo:hg.mozilla.org/...` role.
+_SCOPE_SUMMARY_REGEXPS = [
+ # TODO Bug 1631839 - Remove these scopes once the migration is done
+ re.compile(r"(index:insert-task:project\.mobile\.fenix\.v2\.[^.]*\.).*"),
+ re.compile(
+ r"(index:insert-task:project\.mobile\.reference-browser\.v3\.[^.]*\.).*"
+ ),
+]
+
+
+def make_index_task(parent_task, taskgraph, label_to_taskid, parameters, graph_config):
+ index_paths = [
+ r.split(".", 1)[1] for r in parent_task.task["routes"] if r.startswith("index.")
+ ]
+ parent_task.task["routes"] = [
+ r for r in parent_task.task["routes"] if not r.startswith("index.")
+ ]
+
+ task, taskgraph, label_to_taskid = derive_index_task(
+ parent_task, taskgraph, label_to_taskid, parameters, graph_config
+ )
+
+ # we need to "summarize" the scopes, otherwise a particularly
+ # namespace-heavy index task might have more scopes than can fit in a
+ # temporary credential.
+ scopes = set()
+ domain_scope_regex = re.compile(
+ r"(index:insert-task:{trust_domain}\.v2\.[^.]*\.).*".format(
+ trust_domain=re.escape(graph_config["trust-domain"])
+ )
+ )
+ all_scopes_summary_regexps = _SCOPE_SUMMARY_REGEXPS + [domain_scope_regex]
+ for path in index_paths:
+ scope = f"index:insert-task:{path}"
+ for summ_re in all_scopes_summary_regexps:
+ match = summ_re.match(scope)
+ if match:
+ scope = match.group(1) + "*"
+ break
+ scopes.add(scope)
+ task.task["scopes"] = sorted(scopes)
+
+ task.task["payload"]["command"] = ["insert-indexes.js"] + index_paths
+ task.task["payload"]["env"] = {
+ "TARGET_TASKID": parent_task.task_id,
+ "INDEX_RANK": parent_task.task.get("extra", {}).get("index", {}).get("rank", 0),
+ }
+ return task, taskgraph, label_to_taskid
+
+
+@register_morph
+def add_index_tasks(taskgraph, label_to_taskid, parameters, graph_config):
+ """
+ The TaskCluster queue only allows 10 routes on a task, but we have tasks
+ with many more routes, for purposes of indexing. This graph morph adds
+ "index tasks" that depend on such tasks and do the index insertions
+ directly, avoiding the limits on task.routes.
+ """
+ logger.debug("Morphing: adding index tasks")
+
+ added = []
+ for label, task in taskgraph.tasks.items():
+ if len(task.task.get("routes", [])) <= MAX_ROUTES:
+ continue
+ task, taskgraph, label_to_taskid = make_index_task(
+ task, taskgraph, label_to_taskid, parameters, graph_config
+ )
+ added.append(task)
+
+ if added:
+ taskgraph, label_to_taskid = amend_taskgraph(taskgraph, label_to_taskid, added)
+ logger.info(f"Added {len(added)} index tasks")
+
+ return taskgraph, label_to_taskid
+
+
+def _get_morph_url():
+ """
+ Guess a URL for the current file, for source metadata for created tasks.
+
+ If we checked out the taskgraph code with run-task in the decision task,
+ we can use TASKGRAPH_* to find the right version, which covers the
+ existing use case.
+ """
+ taskgraph_repo = os.environ.get(
+ "TASKGRAPH_HEAD_REPOSITORY", "https://github.com/taskcluster/taskgraph"
+ )
+ taskgraph_rev = os.environ.get("TASKGRAPH_HEAD_REV", "default")
+ return f"{taskgraph_repo}/raw-file/{taskgraph_rev}/src/taskgraph/morph.py"
+
+
+@register_morph
+def add_code_review_task(taskgraph, label_to_taskid, parameters, graph_config):
+ logger.debug("Morphing: adding code review task")
+
+ review_config = parameters.get("code-review")
+ if not review_config:
+ return taskgraph, label_to_taskid
+
+ code_review_tasks = {}
+ for label, task in taskgraph.tasks.items():
+ if task.attributes.get("code-review"):
+ code_review_tasks[task.label] = task.task_id
+
+ if code_review_tasks:
+ code_review_task_def = {
+ "provisionerId": "built-in",
+ "workerType": "succeed",
+ "dependencies": sorted(code_review_tasks.values()),
+ # This option permits to run the task
+ # regardless of the dependencies tasks exit status
+ # as we are interested in the task failures
+ "requires": "all-resolved",
+ "created": {"relative-datestamp": "0 seconds"},
+ "deadline": {"relative-datestamp": "1 day"},
+ # no point existing past the parent task's deadline
+ "expires": {"relative-datestamp": "1 day"},
+ "metadata": {
+ "name": "code-review",
+ "description": "List all issues found in static analysis and linting tasks",
+ "owner": parameters["owner"],
+ "source": _get_morph_url(),
+ },
+ "scopes": [],
+ "payload": {},
+ "routes": ["project.relman.codereview.v1.try_ending"],
+ "extra": {
+ "code-review": {
+ "phabricator-build-target": review_config[
+ "phabricator-build-target"
+ ],
+ "repository": parameters["head_repository"],
+ "revision": parameters["head_rev"],
+ }
+ },
+ }
+ task = Task(
+ kind="misc",
+ label="code-review",
+ attributes={},
+ task=code_review_task_def,
+ dependencies=code_review_tasks,
+ )
+ task.task_id = slugid()
+ taskgraph, label_to_taskid = amend_taskgraph(taskgraph, label_to_taskid, [task])
+ logger.info("Added code review task.")
+
+ return taskgraph, label_to_taskid
+
+
+def morph(taskgraph, label_to_taskid, parameters, graph_config):
+ """Apply all morphs"""
+ for m in registered_morphs:
+ taskgraph, label_to_taskid = m(
+ taskgraph, label_to_taskid, parameters, graph_config
+ )
+ return taskgraph, label_to_taskid
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/optimize/__init__.py b/third_party/python/taskcluster_taskgraph/taskgraph/optimize/__init__.py
new file mode 100644
index 0000000000..06287d877d
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/optimize/__init__.py
@@ -0,0 +1,8 @@
+from .base import ( # noqa: F401
+ Alias,
+ All,
+ Any,
+ Not,
+ OptimizationStrategy,
+ register_strategy,
+)
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/optimize/base.py b/third_party/python/taskcluster_taskgraph/taskgraph/optimize/base.py
new file mode 100644
index 0000000000..367b94e1de
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/optimize/base.py
@@ -0,0 +1,551 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+The objective of optimization is to remove as many tasks from the graph as
+possible, as efficiently as possible, thereby delivering useful results as
+quickly as possible. For example, ideally if only a test script is modified in
+a push, then the resulting graph contains only the corresponding test suite
+task.
+
+See ``taskcluster/docs/optimization.rst`` for more information.
+"""
+
+import datetime
+import logging
+from abc import ABCMeta, abstractmethod, abstractproperty
+from collections import defaultdict
+
+from slugid import nice as slugid
+
+from taskgraph.graph import Graph
+from taskgraph.taskgraph import TaskGraph
+from taskgraph.util.parameterization import resolve_task_references, resolve_timestamps
+from taskgraph.util.python_path import import_sibling_modules
+
+logger = logging.getLogger(__name__)
+registry = {}
+
+
+def register_strategy(name, args=()):
+ def wrap(cls):
+ if name not in registry:
+ registry[name] = cls(*args)
+ if not hasattr(registry[name], "description"):
+ registry[name].description = name
+ return cls
+
+ return wrap
+
+
+def optimize_task_graph(
+ target_task_graph,
+ requested_tasks,
+ params,
+ do_not_optimize,
+ decision_task_id,
+ existing_tasks=None,
+ strategy_override=None,
+):
+ """
+ Perform task optimization, returning a taskgraph and a map from label to
+ assigned taskId, including replacement tasks.
+ """
+ label_to_taskid = {}
+ if not existing_tasks:
+ existing_tasks = {}
+
+ # instantiate the strategies for this optimization process
+ strategies = registry.copy()
+ if strategy_override:
+ strategies.update(strategy_override)
+
+ optimizations = _get_optimizations(target_task_graph, strategies)
+
+ removed_tasks = remove_tasks(
+ target_task_graph=target_task_graph,
+ requested_tasks=requested_tasks,
+ optimizations=optimizations,
+ params=params,
+ do_not_optimize=do_not_optimize,
+ )
+
+ replaced_tasks = replace_tasks(
+ target_task_graph=target_task_graph,
+ optimizations=optimizations,
+ params=params,
+ do_not_optimize=do_not_optimize,
+ label_to_taskid=label_to_taskid,
+ existing_tasks=existing_tasks,
+ removed_tasks=removed_tasks,
+ )
+
+ return (
+ get_subgraph(
+ target_task_graph,
+ removed_tasks,
+ replaced_tasks,
+ label_to_taskid,
+ decision_task_id,
+ ),
+ label_to_taskid,
+ )
+
+
+def _get_optimizations(target_task_graph, strategies):
+ def optimizations(label):
+ task = target_task_graph.tasks[label]
+ if task.optimization:
+ opt_by, arg = list(task.optimization.items())[0]
+ strategy = strategies[opt_by]
+ if hasattr(strategy, "description"):
+ opt_by += f" ({strategy.description})"
+ return (opt_by, strategy, arg)
+ else:
+ return ("never", strategies["never"], None)
+
+ return optimizations
+
+
+def _log_optimization(verb, opt_counts, opt_reasons=None):
+ if opt_reasons:
+ message = "optimize: {label} {action} because of {reason}"
+ for label, (action, reason) in opt_reasons.items():
+ logger.debug(message.format(label=label, action=action, reason=reason))
+
+ if opt_counts:
+ logger.info(
+ f"{verb.title()} "
+ + ", ".join(f"{c} tasks by {b}" for b, c in sorted(opt_counts.items()))
+ + " during optimization."
+ )
+ else:
+ logger.info(f"No tasks {verb} during optimization")
+
+
+def remove_tasks(
+ target_task_graph, requested_tasks, params, optimizations, do_not_optimize
+):
+ """
+ Implement the "Removing Tasks" phase, returning a set of task labels of all removed tasks.
+ """
+ opt_counts = defaultdict(int)
+ opt_reasons = {}
+ removed = set()
+ dependents_of = target_task_graph.graph.reverse_links_dict()
+ tasks = target_task_graph.tasks
+ prune_candidates = set()
+
+ # Traverse graph so dependents (child nodes) are guaranteed to be processed
+ # first.
+ for label in target_task_graph.graph.visit_preorder():
+ # Dependents that can be pruned away (shouldn't cause this task to run).
+ # Only dependents that either:
+ # A) Explicitly reference this task in their 'if_dependencies' list, or
+ # B) Don't have an 'if_dependencies' attribute (i.e are in 'prune_candidates'
+ # because they should be removed but have prune_deps themselves)
+ # should be considered.
+ prune_deps = {
+ l
+ for l in dependents_of[label]
+ if l in prune_candidates
+ if not tasks[l].if_dependencies or label in tasks[l].if_dependencies
+ }
+
+ def _keep(reason):
+ """Mark a task as being kept in the graph. Also recursively removes
+ any dependents from `prune_candidates`, assuming they should be
+ kept because of this task.
+ """
+ opt_reasons[label] = ("kept", reason)
+
+ # Removes dependents that were in 'prune_candidates' from a task
+ # that ended up being kept (and therefore the dependents should
+ # also be kept).
+ queue = list(prune_deps)
+ while queue:
+ l = queue.pop()
+
+ # If l is a prune_dep of multiple tasks it could be queued up
+ # multiple times. Guard against it being already removed.
+ if l not in prune_candidates:
+ continue
+
+ # If a task doesn't set 'if_dependencies' itself (rather it was
+ # added to 'prune_candidates' due to one of its depenendents),
+ # then we shouldn't remove it.
+ if not tasks[l].if_dependencies:
+ continue
+
+ prune_candidates.remove(l)
+ queue.extend([r for r in dependents_of[l] if r in prune_candidates])
+
+ def _remove(reason):
+ """Potentially mark a task as being removed from the graph. If the
+ task has dependents that can be pruned, add this task to
+ `prune_candidates` rather than removing it.
+ """
+ if prune_deps:
+ # If there are prune_deps, unsure if we can remove this task yet.
+ prune_candidates.add(label)
+ else:
+ opt_reasons[label] = ("removed", reason)
+ opt_counts[reason] += 1
+ removed.add(label)
+
+ # if we're not allowed to optimize, that's easy..
+ if label in do_not_optimize:
+ _keep("do not optimize")
+ continue
+
+ # If there are remaining tasks depending on this one, do not remove.
+ if any(
+ l for l in dependents_of[label] if l not in removed and l not in prune_deps
+ ):
+ _keep("dependent tasks")
+ continue
+
+ # Some tasks in the task graph only exist because they were required
+ # by a task that has just been optimized away. They can now be removed.
+ if label not in requested_tasks:
+ _remove("dependents optimized")
+ continue
+
+ # Call the optimization strategy.
+ task = tasks[label]
+ opt_by, opt, arg = optimizations(label)
+ if opt.should_remove_task(task, params, arg):
+ _remove(opt_by)
+ continue
+
+ # Some tasks should only run if their dependency was also run. Since we
+ # haven't processed dependencies yet, we add them to a list of
+ # candidate tasks for pruning.
+ if task.if_dependencies:
+ opt_reasons[label] = ("kept", opt_by)
+ prune_candidates.add(label)
+ else:
+ _keep(opt_by)
+
+ if prune_candidates:
+ reason = "if-dependencies pruning"
+ for label in prune_candidates:
+ # There's an edge case where a triangle graph can cause a
+ # dependency to stay in 'prune_candidates' when the dependent
+ # remains. Do a final check to ensure we don't create any bad
+ # edges.
+ dependents = any(
+ d
+ for d in dependents_of[label]
+ if d not in prune_candidates
+ if d not in removed
+ )
+ if dependents:
+ opt_reasons[label] = ("kept", "dependent tasks")
+ continue
+ removed.add(label)
+ opt_counts[reason] += 1
+ opt_reasons[label] = ("removed", reason)
+
+ _log_optimization("removed", opt_counts, opt_reasons)
+ return removed
+
+
+def replace_tasks(
+ target_task_graph,
+ params,
+ optimizations,
+ do_not_optimize,
+ label_to_taskid,
+ removed_tasks,
+ existing_tasks,
+):
+ """
+ Implement the "Replacing Tasks" phase, returning a set of task labels of
+ all replaced tasks. The replacement taskIds are added to label_to_taskid as
+ a side-effect.
+ """
+ opt_counts = defaultdict(int)
+ replaced = set()
+ dependents_of = target_task_graph.graph.reverse_links_dict()
+ dependencies_of = target_task_graph.graph.links_dict()
+
+ for label in target_task_graph.graph.visit_postorder():
+ # if we're not allowed to optimize, that's easy..
+ if label in do_not_optimize:
+ continue
+
+ # if this task depends on un-replaced, un-removed tasks, do not replace
+ if any(
+ l not in replaced and l not in removed_tasks for l in dependencies_of[label]
+ ):
+ continue
+
+ # if the task already exists, that's an easy replacement
+ repl = existing_tasks.get(label)
+ if repl:
+ label_to_taskid[label] = repl
+ replaced.add(label)
+ opt_counts["existing_tasks"] += 1
+ continue
+
+ # call the optimization strategy
+ task = target_task_graph.tasks[label]
+ opt_by, opt, arg = optimizations(label)
+
+ # compute latest deadline of dependents (if any)
+ dependents = [target_task_graph.tasks[l] for l in dependents_of[label]]
+ deadline = None
+ if dependents:
+ now = datetime.datetime.utcnow()
+ deadline = max(
+ resolve_timestamps(now, task.task["deadline"]) for task in dependents
+ )
+ repl = opt.should_replace_task(task, params, deadline, arg)
+ if repl:
+ if repl is True:
+ # True means remove this task; get_subgraph will catch any
+ # problems with removed tasks being depended on
+ removed_tasks.add(label)
+ else:
+ label_to_taskid[label] = repl
+ replaced.add(label)
+ opt_counts[opt_by] += 1
+ continue
+
+ _log_optimization("replaced", opt_counts)
+ return replaced
+
+
+def get_subgraph(
+ target_task_graph,
+ removed_tasks,
+ replaced_tasks,
+ label_to_taskid,
+ decision_task_id,
+):
+ """
+ Return the subgraph of target_task_graph consisting only of
+ non-optimized tasks and edges between them.
+
+ To avoid losing track of taskIds for tasks optimized away, this method
+ simultaneously substitutes real taskIds for task labels in the graph, and
+ populates each task definition's `dependencies` key with the appropriate
+ taskIds. Task references are resolved in the process.
+ """
+
+ # check for any dependency edges from included to removed tasks
+ bad_edges = [
+ (l, r, n)
+ for l, r, n in target_task_graph.graph.edges
+ if l not in removed_tasks and r in removed_tasks
+ ]
+ if bad_edges:
+ probs = ", ".join(
+ f"{l} depends on {r} as {n} but it has been removed"
+ for l, r, n in bad_edges
+ )
+ raise Exception("Optimization error: " + probs)
+
+ # fill in label_to_taskid for anything not removed or replaced
+ assert replaced_tasks <= set(label_to_taskid)
+ for label in sorted(
+ target_task_graph.graph.nodes - removed_tasks - set(label_to_taskid)
+ ):
+ label_to_taskid[label] = slugid()
+
+ # resolve labels to taskIds and populate task['dependencies']
+ tasks_by_taskid = {}
+ named_links_dict = target_task_graph.graph.named_links_dict()
+ omit = removed_tasks | replaced_tasks
+ for label, task in target_task_graph.tasks.items():
+ if label in omit:
+ continue
+ task.task_id = label_to_taskid[label]
+ named_task_dependencies = {
+ name: label_to_taskid[label]
+ for name, label in named_links_dict.get(label, {}).items()
+ }
+
+ # Add remaining soft dependencies
+ if task.soft_dependencies:
+ named_task_dependencies.update(
+ {
+ label: label_to_taskid[label]
+ for label in task.soft_dependencies
+ if label in label_to_taskid and label not in omit
+ }
+ )
+
+ task.task = resolve_task_references(
+ task.label,
+ task.task,
+ task_id=task.task_id,
+ decision_task_id=decision_task_id,
+ dependencies=named_task_dependencies,
+ )
+ deps = task.task.setdefault("dependencies", [])
+ deps.extend(sorted(named_task_dependencies.values()))
+ tasks_by_taskid[task.task_id] = task
+
+ # resolve edges to taskIds
+ edges_by_taskid = (
+ (label_to_taskid.get(left), label_to_taskid.get(right), name)
+ for (left, right, name) in target_task_graph.graph.edges
+ )
+ # ..and drop edges that are no longer entirely in the task graph
+ # (note that this omits edges to replaced tasks, but they are still in task.dependnecies)
+ edges_by_taskid = {
+ (left, right, name)
+ for (left, right, name) in edges_by_taskid
+ if left in tasks_by_taskid and right in tasks_by_taskid
+ }
+
+ return TaskGraph(tasks_by_taskid, Graph(set(tasks_by_taskid), edges_by_taskid))
+
+
+@register_strategy("never")
+class OptimizationStrategy:
+ def should_remove_task(self, task, params, arg):
+ """Determine whether to optimize this task by removing it. Returns
+ True to remove."""
+ return False
+
+ def should_replace_task(self, task, params, deadline, arg):
+ """Determine whether to optimize this task by replacing it. Returns a
+ taskId to replace this task, True to replace with nothing, or False to
+ keep the task."""
+ return False
+
+
+@register_strategy("always")
+class Always(OptimizationStrategy):
+ def should_remove_task(self, task, params, arg):
+ return True
+
+
+class CompositeStrategy(OptimizationStrategy, metaclass=ABCMeta):
+ def __init__(self, *substrategies, **kwargs):
+ self.substrategies = []
+ missing = set()
+ for sub in substrategies:
+ if isinstance(sub, str):
+ if sub not in registry.keys():
+ missing.add(sub)
+ continue
+ sub = registry[sub]
+
+ self.substrategies.append(sub)
+
+ if missing:
+ raise TypeError(
+ "substrategies aren't registered: {}".format(
+ ", ".join(sorted(missing))
+ )
+ )
+
+ self.split_args = kwargs.pop("split_args", None)
+ if not self.split_args:
+ self.split_args = lambda arg, substrategies: [arg] * len(substrategies)
+ if kwargs:
+ raise TypeError("unexpected keyword args")
+
+ @abstractproperty
+ def description(self):
+ """A textual description of the combined substrategies."""
+
+ @abstractmethod
+ def reduce(self, results):
+ """Given all substrategy results as a generator, return the overall
+ result."""
+
+ def _generate_results(self, fname, *args):
+ *passthru, arg = args
+ for sub, arg in zip(
+ self.substrategies, self.split_args(arg, self.substrategies)
+ ):
+ yield getattr(sub, fname)(*passthru, arg)
+
+ def should_remove_task(self, *args):
+ results = self._generate_results("should_remove_task", *args)
+ return self.reduce(results)
+
+ def should_replace_task(self, *args):
+ results = self._generate_results("should_replace_task", *args)
+ return self.reduce(results)
+
+
+class Any(CompositeStrategy):
+ """Given one or more optimization strategies, remove or replace a task if any of them
+ says to.
+
+ Replacement will use the value returned by the first strategy that says to replace.
+ """
+
+ @property
+ def description(self):
+ return "-or-".join([s.description for s in self.substrategies])
+
+ @classmethod
+ def reduce(cls, results):
+ for rv in results:
+ if rv:
+ return rv
+ return False
+
+
+class All(CompositeStrategy):
+ """Given one or more optimization strategies, remove or replace a task if all of them
+ says to.
+
+ Replacement will use the value returned by the first strategy passed in.
+ Note the values used for replacement need not be the same, as long as they
+ all say to replace.
+ """
+
+ @property
+ def description(self):
+ return "-and-".join([s.description for s in self.substrategies])
+
+ @classmethod
+ def reduce(cls, results):
+ for rv in results:
+ if not rv:
+ return rv
+ return True
+
+
+class Alias(CompositeStrategy):
+ """Provides an alias to an existing strategy.
+
+ This can be useful to swap strategies in and out without needing to modify
+ the task transforms.
+ """
+
+ def __init__(self, strategy):
+ super().__init__(strategy)
+
+ @property
+ def description(self):
+ return self.substrategies[0].description
+
+ def reduce(self, results):
+ return next(results)
+
+
+class Not(CompositeStrategy):
+ """Given a strategy, returns the opposite."""
+
+ def __init__(self, strategy):
+ super().__init__(strategy)
+
+ @property
+ def description(self):
+ return "not-" + self.substrategies[0].description
+
+ def reduce(self, results):
+ return not next(results)
+
+
+# Trigger registration in sibling modules.
+import_sibling_modules()
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/optimize/strategies.py b/third_party/python/taskcluster_taskgraph/taskgraph/optimize/strategies.py
new file mode 100644
index 0000000000..c6846e60c5
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/optimize/strategies.py
@@ -0,0 +1,65 @@
+import logging
+from datetime import datetime
+
+from taskgraph import files_changed
+from taskgraph.optimize.base import OptimizationStrategy, register_strategy
+from taskgraph.util.taskcluster import find_task_id, status_task
+
+logger = logging.getLogger(__name__)
+
+
+@register_strategy("index-search")
+class IndexSearch(OptimizationStrategy):
+
+ # A task with no dependencies remaining after optimization will be replaced
+ # if artifacts exist for the corresponding index_paths.
+ # Otherwise, we're in one of the following cases:
+ # - the task has un-optimized dependencies
+ # - the artifacts have expired
+ # - some changes altered the index_paths and new artifacts need to be
+ # created.
+ # In every of those cases, we need to run the task to create or refresh
+ # artifacts.
+
+ fmt = "%Y-%m-%dT%H:%M:%S.%fZ"
+
+ def should_replace_task(self, task, params, deadline, index_paths):
+ "Look for a task with one of the given index paths"
+ for index_path in index_paths:
+ try:
+ task_id = find_task_id(index_path)
+ status = status_task(task_id)
+ # status can be `None` if we're in `testing` mode
+ # (e.g. test-action-callback)
+ if not status or status.get("state") in ("exception", "failed"):
+ continue
+
+ if deadline and datetime.strptime(
+ status["expires"], self.fmt
+ ) < datetime.strptime(deadline, self.fmt):
+ continue
+
+ return task_id
+ except KeyError:
+ # 404 will end up here and go on to the next index path
+ pass
+
+ return False
+
+
+@register_strategy("skip-unless-changed")
+class SkipUnlessChanged(OptimizationStrategy):
+ def should_remove_task(self, task, params, file_patterns):
+ # pushlog_id == -1 - this is the case when run from a cron.yml job or on a git repository
+ if params.get("repository_type") == "hg" and params.get("pushlog_id") == -1:
+ return False
+
+ changed = files_changed.check(params, file_patterns)
+ if not changed:
+ logger.debug(
+ 'no files found matching a pattern in `skip-unless-changed` for "{}"'.format(
+ task.label
+ )
+ )
+ return True
+ return False
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/parameters.py b/third_party/python/taskcluster_taskgraph/taskgraph/parameters.py
new file mode 100644
index 0000000000..ed662c704e
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/parameters.py
@@ -0,0 +1,369 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import gzip
+import hashlib
+import json
+import os
+import time
+from datetime import datetime
+from io import BytesIO
+from pprint import pformat
+from subprocess import CalledProcessError
+from urllib.parse import urlparse
+from urllib.request import urlopen
+
+import mozilla_repo_urls
+from voluptuous import ALLOW_EXTRA, Any, Optional, Required, Schema
+
+from taskgraph.util import yaml
+from taskgraph.util.readonlydict import ReadOnlyDict
+from taskgraph.util.schema import validate_schema
+from taskgraph.util.taskcluster import find_task_id, get_artifact_url
+from taskgraph.util.vcs import get_repository
+
+
+class ParameterMismatch(Exception):
+ """Raised when a parameters.yml has extra or missing parameters."""
+
+
+# Please keep this list sorted and in sync with docs/reference/parameters.rst
+base_schema = Schema(
+ {
+ Required("base_repository"): str,
+ Required("base_ref"): str,
+ Required("base_rev"): str,
+ Required("build_date"): int,
+ Required("build_number"): int,
+ Required("do_not_optimize"): [str],
+ Required("enable_always_target"): bool,
+ Required("existing_tasks"): {str: str},
+ Required("filters"): [str],
+ Required("head_ref"): str,
+ Required("head_repository"): str,
+ Required("head_rev"): str,
+ Required("head_tag"): str,
+ Required("level"): str,
+ Required("moz_build_date"): str,
+ Required("next_version"): Any(str, None),
+ Required("optimize_strategies"): Any(str, None),
+ Required("optimize_target_tasks"): bool,
+ Required("owner"): str,
+ Required("project"): str,
+ Required("pushdate"): int,
+ Required("pushlog_id"): str,
+ Required("repository_type"): str,
+ # target-kind is not included, since it should never be
+ # used at run-time
+ Required("target_tasks_method"): str,
+ Required("tasks_for"): str,
+ Required("version"): Any(str, None),
+ Optional("code-review"): {
+ Required("phabricator-build-target"): str,
+ },
+ }
+)
+
+
+def get_contents(path):
+ with open(path) as fh:
+ contents = fh.readline().rstrip()
+ return contents
+
+
+def get_version(repo_path):
+ version_path = os.path.join(repo_path, "version.txt")
+ return get_contents(version_path) if os.path.isfile(version_path) else None
+
+
+def _get_defaults(repo_root=None):
+ repo_path = repo_root or os.getcwd()
+ repo = get_repository(repo_path)
+ try:
+ repo_url = repo.get_url()
+ parsed_url = mozilla_repo_urls.parse(repo_url)
+ project = parsed_url.repo_name
+ except (
+ CalledProcessError,
+ mozilla_repo_urls.errors.InvalidRepoUrlError,
+ mozilla_repo_urls.errors.UnsupportedPlatformError,
+ ):
+ repo_url = ""
+ project = ""
+
+ return {
+ "base_repository": repo_url,
+ "base_ref": "",
+ "base_rev": "",
+ "build_date": int(time.time()),
+ "build_number": 1,
+ "do_not_optimize": [],
+ "enable_always_target": True,
+ "existing_tasks": {},
+ "filters": ["target_tasks_method"],
+ "head_ref": repo.branch or repo.head_rev,
+ "head_repository": repo_url,
+ "head_rev": repo.head_rev,
+ "head_tag": "",
+ "level": "3",
+ "moz_build_date": datetime.now().strftime("%Y%m%d%H%M%S"),
+ "next_version": None,
+ "optimize_strategies": None,
+ "optimize_target_tasks": True,
+ "owner": "nobody@mozilla.com",
+ "project": project,
+ "pushdate": int(time.time()),
+ "pushlog_id": "0",
+ "repository_type": repo.tool,
+ "target_tasks_method": "default",
+ "tasks_for": "",
+ "version": get_version(repo_path),
+ }
+
+
+defaults_functions = [_get_defaults]
+
+
+def extend_parameters_schema(schema, defaults_fn=None):
+ """
+ Extend the schema for parameters to include per-project configuration.
+
+ This should be called by the `taskgraph.register` function in the
+ graph-configuration.
+
+ Args:
+ schema (Schema): The voluptuous.Schema object used to describe extended
+ parameters.
+ defaults_fn (function): A function which takes no arguments and returns a
+ dict mapping parameter name to default value in the
+ event strict=False (optional).
+ """
+ global base_schema
+ global defaults_functions
+ base_schema = base_schema.extend(schema)
+ if defaults_fn:
+ defaults_functions.append(defaults_fn)
+
+
+class Parameters(ReadOnlyDict):
+ """An immutable dictionary with nicer KeyError messages on failure"""
+
+ def __init__(self, strict=True, repo_root=None, **kwargs):
+ self.strict = strict
+ self.spec = kwargs.pop("spec", None)
+ self._id = None
+
+ if not self.strict:
+ # apply defaults to missing parameters
+ kwargs = Parameters._fill_defaults(repo_root=repo_root, **kwargs)
+
+ ReadOnlyDict.__init__(self, **kwargs)
+
+ @property
+ def id(self):
+ if not self._id:
+ self._id = hashlib.sha256(
+ json.dumps(self, sort_keys=True).encode("utf-8")
+ ).hexdigest()[:12]
+
+ return self._id
+
+ @staticmethod
+ def format_spec(spec):
+ """
+ Get a friendly identifier from a parameters specifier.
+
+ Args:
+ spec (str): Parameters specifier.
+
+ Returns:
+ str: Name to identify parameters by.
+ """
+ if spec is None:
+ return "defaults"
+
+ if any(spec.startswith(s) for s in ("task-id=", "project=")):
+ return spec
+
+ result = urlparse(spec)
+ if result.scheme in ("http", "https"):
+ spec = result.path
+
+ return os.path.splitext(os.path.basename(spec))[0]
+
+ @staticmethod
+ def _fill_defaults(repo_root=None, **kwargs):
+ defaults = {}
+ for fn in defaults_functions:
+ defaults.update(fn(repo_root))
+
+ for name, default in defaults.items():
+ if name not in kwargs:
+ kwargs[name] = default
+ return kwargs
+
+ def check(self):
+ schema = (
+ base_schema if self.strict else base_schema.extend({}, extra=ALLOW_EXTRA)
+ )
+ try:
+ validate_schema(schema, self.copy(), "Invalid parameters:")
+ except Exception as e:
+ raise ParameterMismatch(str(e))
+
+ def __getitem__(self, k):
+ try:
+ return super().__getitem__(k)
+ except KeyError:
+ raise KeyError(f"taskgraph parameter {k!r} not found")
+
+ def is_try(self):
+ """
+ Determine whether this graph is being built on a try project or for
+ `mach try fuzzy`.
+ """
+ return "try" in self["project"] or self["tasks_for"] == "github-pull-request"
+
+ @property
+ def moz_build_date(self):
+ # XXX self["moz_build_date"] is left as a string because:
+ # * of backward compatibility
+ # * parameters are output in a YAML file
+ return datetime.strptime(self["moz_build_date"], "%Y%m%d%H%M%S")
+
+ def file_url(self, path, pretty=False):
+ """
+ Determine the VCS URL for viewing a file in the tree, suitable for
+ viewing by a human.
+
+ :param str path: The path, relative to the root of the repository.
+ :param bool pretty: Whether to return a link to a formatted version of the
+ file, or the raw file version.
+
+ :return str: The URL displaying the given path.
+ """
+ if self["repository_type"] == "hg":
+ if path.startswith("comm/"):
+ path = path[len("comm/") :]
+ repo = self["comm_head_repository"]
+ rev = self["comm_head_rev"]
+ else:
+ repo = self["head_repository"]
+ rev = self["head_rev"]
+ endpoint = "file" if pretty else "raw-file"
+ return f"{repo}/{endpoint}/{rev}/{path}"
+ elif self["repository_type"] == "git":
+ # For getting the file URL for git repositories, we only support a Github HTTPS remote
+ repo = self["head_repository"]
+ if repo.startswith("https://github.com/"):
+ if repo.endswith("/"):
+ repo = repo[:-1]
+
+ rev = self["head_rev"]
+ endpoint = "blob" if pretty else "raw"
+ return f"{repo}/{endpoint}/{rev}/{path}"
+ elif repo.startswith("git@github.com:"):
+ if repo.endswith(".git"):
+ repo = repo[:-4]
+ rev = self["head_rev"]
+ endpoint = "blob" if pretty else "raw"
+ return "{}/{}/{}/{}".format(
+ repo.replace("git@github.com:", "https://github.com/"),
+ endpoint,
+ rev,
+ path,
+ )
+ else:
+ raise ParameterMismatch(
+ "Don't know how to determine file URL for non-github"
+ "repo: {}".format(repo)
+ )
+ else:
+ raise RuntimeError(
+ 'Only the "git" and "hg" repository types are supported for using file_url()'
+ )
+
+ def __str__(self):
+ return f"Parameters(id={self.id}) (from {self.format_spec(self.spec)})"
+
+ def __repr__(self):
+ return pformat(dict(self), indent=2)
+
+
+def load_parameters_file(
+ spec, strict=True, overrides=None, trust_domain=None, repo_root=None
+):
+ """
+ Load parameters from a path, url, decision task-id or project.
+
+ Examples:
+ task-id=fdtgsD5DQUmAQZEaGMvQ4Q
+ project=mozilla-central
+ """
+
+ if overrides is None:
+ overrides = {}
+ overrides["spec"] = spec
+
+ if not spec:
+ return Parameters(strict=strict, repo_root=repo_root, **overrides)
+
+ try:
+ # reading parameters from a local parameters.yml file
+ f = open(spec)
+ except OSError:
+ # fetching parameters.yml using task task-id, project or supplied url
+ task_id = None
+ if spec.startswith("task-id="):
+ task_id = spec.split("=")[1]
+ elif spec.startswith("project="):
+ if trust_domain is None:
+ raise ValueError(
+ "Can't specify parameters by project "
+ "if trust domain isn't supplied.",
+ )
+ index = "{trust_domain}.v2.{project}.latest.taskgraph.decision".format(
+ trust_domain=trust_domain,
+ project=spec.split("=")[1],
+ )
+ task_id = find_task_id(index)
+
+ if task_id:
+ spec = get_artifact_url(task_id, "public/parameters.yml")
+ f = urlopen(spec)
+
+ # Decompress gzipped parameters.
+ if f.info().get("Content-Encoding") == "gzip":
+ buf = BytesIO(f.read())
+ f = gzip.GzipFile(fileobj=buf)
+
+ if spec.endswith(".yml"):
+ kwargs = yaml.load_stream(f)
+ elif spec.endswith(".json"):
+ kwargs = json.load(f)
+ else:
+ raise TypeError(f"Parameters file `{spec}` is not JSON or YAML")
+
+ kwargs.update(overrides)
+ return Parameters(strict=strict, repo_root=repo_root, **kwargs)
+
+
+def parameters_loader(spec, strict=True, overrides=None):
+ def get_parameters(graph_config):
+ try:
+ repo_root = graph_config.vcs_root
+ except Exception:
+ repo_root = None
+
+ parameters = load_parameters_file(
+ spec,
+ strict=strict,
+ overrides=overrides,
+ repo_root=repo_root,
+ trust_domain=graph_config["trust-domain"],
+ )
+ parameters.check()
+ return parameters
+
+ return get_parameters
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/run-task/fetch-content b/third_party/python/taskcluster_taskgraph/taskgraph/run-task/fetch-content
new file mode 100755
index 0000000000..42dc5e2b28
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/run-task/fetch-content
@@ -0,0 +1,899 @@
+#!/usr/bin/python3 -u
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import argparse
+import bz2
+import concurrent.futures
+import contextlib
+import datetime
+import gzip
+import hashlib
+import json
+import lzma
+import multiprocessing
+import os
+import pathlib
+import random
+import re
+import stat
+import subprocess
+import sys
+import tarfile
+import tempfile
+import time
+import urllib.parse
+import urllib.request
+import zipfile
+
+try:
+ import zstandard
+except ImportError:
+ zstandard = None
+
+try:
+ import certifi
+except ImportError:
+ certifi = None
+
+
+CONCURRENCY = multiprocessing.cpu_count()
+
+
+def log(msg):
+ print(msg, file=sys.stderr)
+ sys.stderr.flush()
+
+
+class IntegrityError(Exception):
+ """Represents an integrity error when downloading a URL."""
+
+
+def ZstdCompressor(*args, **kwargs):
+ if not zstandard:
+ raise ValueError("zstandard Python package not available")
+ return zstandard.ZstdCompressor(*args, **kwargs)
+
+
+def ZstdDecompressor(*args, **kwargs):
+ if not zstandard:
+ raise ValueError("zstandard Python package not available")
+ return zstandard.ZstdDecompressor(*args, **kwargs)
+
+
+@contextlib.contextmanager
+def rename_after_close(fname, *args, **kwargs):
+ """
+ Context manager that opens a temporary file to use as a writer,
+ and closes the file on context exit, renaming it to the expected
+ file name in case of success, or removing it in case of failure.
+
+ Takes the same options as open(), but must be used as a context
+ manager.
+ """
+ path = pathlib.Path(fname)
+ tmp = path.with_name("%s.tmp" % path.name)
+ try:
+ with tmp.open(*args, **kwargs) as fh:
+ yield fh
+ except Exception:
+ tmp.unlink()
+ raise
+ else:
+ tmp.rename(fname)
+
+
+# The following is copied from
+# https://github.com/mozilla-releng/redo/blob/6d07678a014e0c525e54a860381a165d34db10ff/redo/__init__.py#L15-L85
+def retrier(attempts=5, sleeptime=10, max_sleeptime=300, sleepscale=1.5, jitter=1):
+ """
+ A generator function that sleeps between retries, handles exponential
+ backoff and jitter. The action you are retrying is meant to run after
+ retrier yields.
+
+ At each iteration, we sleep for sleeptime + random.randint(-jitter, jitter).
+ Afterwards sleeptime is multiplied by sleepscale for the next iteration.
+
+ Args:
+ attempts (int): maximum number of times to try; defaults to 5
+ sleeptime (float): how many seconds to sleep between tries; defaults to
+ 60s (one minute)
+ max_sleeptime (float): the longest we'll sleep, in seconds; defaults to
+ 300s (five minutes)
+ sleepscale (float): how much to multiply the sleep time by each
+ iteration; defaults to 1.5
+ jitter (int): random jitter to introduce to sleep time each iteration.
+ the amount is chosen at random between [-jitter, +jitter]
+ defaults to 1
+
+ Yields:
+ None, a maximum of `attempts` number of times
+
+ Example:
+ >>> n = 0
+ >>> for _ in retrier(sleeptime=0, jitter=0):
+ ... if n == 3:
+ ... # We did the thing!
+ ... break
+ ... n += 1
+ >>> n
+ 3
+
+ >>> n = 0
+ >>> for _ in retrier(sleeptime=0, jitter=0):
+ ... if n == 6:
+ ... # We did the thing!
+ ... break
+ ... n += 1
+ ... else:
+ ... print("max tries hit")
+ max tries hit
+ """
+ jitter = jitter or 0 # py35 barfs on the next line if jitter is None
+ if jitter > sleeptime:
+ # To prevent negative sleep times
+ raise Exception(
+ "jitter ({}) must be less than sleep time ({})".format(jitter, sleeptime)
+ )
+
+ sleeptime_real = sleeptime
+ for _ in range(attempts):
+ log("attempt %i/%i" % (_ + 1, attempts))
+
+ yield sleeptime_real
+
+ if jitter:
+ sleeptime_real = sleeptime + random.randint(-jitter, jitter)
+ # our jitter should scale along with the sleeptime
+ jitter = int(jitter * sleepscale)
+ else:
+ sleeptime_real = sleeptime
+
+ sleeptime *= sleepscale
+
+ if sleeptime_real > max_sleeptime:
+ sleeptime_real = max_sleeptime
+
+ # Don't need to sleep the last time
+ if _ < attempts - 1:
+ log(
+ "sleeping for %.2fs (attempt %i/%i)" % (sleeptime_real, _ + 1, attempts)
+ )
+ time.sleep(sleeptime_real)
+
+
+def stream_download(url, sha256=None, size=None, headers=None):
+ """Download a URL to a generator, optionally with content verification.
+
+ If ``sha256`` or ``size`` are defined, the downloaded URL will be
+ validated against those requirements and ``IntegrityError`` will be
+ raised if expectations do not match.
+
+ Because verification cannot occur until the file is completely downloaded
+ it is recommended for consumers to not do anything meaningful with the
+ data if content verification is being used. To securely handle retrieved
+ content, it should be streamed to a file or memory and only operated
+ on after the generator is exhausted without raising.
+ """
+ log("Downloading %s" % url)
+ headers = headers or []
+
+ h = hashlib.sha256()
+ length = 0
+
+ t0 = time.time()
+ req_headers = {}
+ for header in headers:
+ key, val = header.split(":")
+ req_headers[key.strip()] = val.strip()
+
+ req = urllib.request.Request(url, None, req_headers)
+ with urllib.request.urlopen(
+ req, cafile=certifi.where()
+ ) if certifi else urllib.request.urlopen(req) as fh:
+ if not url.endswith(".gz") and fh.info().get("Content-Encoding") == "gzip":
+ fh = gzip.GzipFile(fileobj=fh)
+
+ while True:
+ chunk = fh.read(65536)
+ if not chunk:
+ break
+
+ h.update(chunk)
+ length += len(chunk)
+
+ yield chunk
+
+ duration = time.time() - t0
+ digest = h.hexdigest()
+
+ log(
+ "%s resolved to %d bytes with sha256 %s in %.3fs"
+ % (url, length, digest, duration)
+ )
+
+ if size:
+ if size == length:
+ log("Verified size of %s" % url)
+ else:
+ raise IntegrityError(
+ "size mismatch on %s: wanted %d; got %d" % (url, size, length)
+ )
+
+ if sha256:
+ if digest == sha256:
+ log("Verified sha256 integrity of %s" % url)
+ else:
+ raise IntegrityError(
+ "sha256 mismatch on %s: wanted %s; got %s" % (url, sha256, digest)
+ )
+
+
+def download_to_path(url, path, sha256=None, size=None, headers=None):
+ """Download a URL to a filesystem path, possibly with verification."""
+
+ # We download to a temporary file and rename at the end so there's
+ # no chance of the final file being partially written or containing
+ # bad data.
+ try:
+ path.unlink()
+ except FileNotFoundError:
+ pass
+
+ for _ in retrier(attempts=5, sleeptime=60):
+ try:
+ log("Downloading %s to %s" % (url, path))
+
+ with rename_after_close(path, "wb") as fh:
+ for chunk in stream_download(
+ url, sha256=sha256, size=size, headers=headers
+ ):
+ fh.write(chunk)
+
+ return
+ except IntegrityError:
+ raise
+ except Exception as e:
+ log("Download failed: {}".format(e))
+ continue
+
+ raise Exception("Download failed, no more retries!")
+
+
+def download_to_memory(url, sha256=None, size=None):
+ """Download a URL to memory, possibly with verification."""
+
+ data = b""
+ for _ in retrier(attempts=5, sleeptime=60):
+ try:
+ log("Downloading %s" % (url))
+
+ for chunk in stream_download(url, sha256=sha256, size=size):
+ data += chunk
+
+ return data
+ except IntegrityError:
+ raise
+ except Exception as e:
+ log("Download failed: {}".format(e))
+ continue
+
+ raise Exception("Download failed, no more retries!")
+
+
+def gpg_verify_path(path: pathlib.Path, public_key_data: bytes, signature_data: bytes):
+ """Verify that a filesystem path verifies using GPG.
+
+ Takes a Path defining a file to verify. ``public_key_data`` contains
+ bytes with GPG public key data. ``signature_data`` contains a signed
+ GPG document to use with ``gpg --verify``.
+ """
+ log("Validating GPG signature of %s" % path)
+ log("GPG key data:\n%s" % public_key_data.decode("ascii"))
+
+ with tempfile.TemporaryDirectory() as td:
+ try:
+ # --batch since we're running unattended.
+ gpg_args = ["gpg", "--homedir", td, "--batch"]
+
+ log("Importing GPG key...")
+ subprocess.run(gpg_args + ["--import"], input=public_key_data, check=True)
+
+ log("Verifying GPG signature...")
+ subprocess.run(
+ gpg_args + ["--verify", "-", "%s" % path],
+ input=signature_data,
+ check=True,
+ )
+
+ log("GPG signature verified!")
+ finally:
+ # There is a race between the agent self-terminating and
+ # shutil.rmtree() from the temporary directory cleanup that can
+ # lead to exceptions. Kill the agent before cleanup to prevent this.
+ env = dict(os.environ)
+ env["GNUPGHOME"] = td
+ subprocess.run(["gpgconf", "--kill", "gpg-agent"], env=env)
+
+
+def open_tar_stream(path: pathlib.Path):
+ """"""
+ if path.suffix == ".bz2":
+ return bz2.open(str(path), "rb")
+ elif path.suffix == ".gz":
+ return gzip.open(str(path), "rb")
+ elif path.suffix == ".xz":
+ return lzma.open(str(path), "rb")
+ elif path.suffix == ".zst":
+ dctx = ZstdDecompressor()
+ return dctx.stream_reader(path.open("rb"))
+ elif path.suffix == ".tar":
+ return path.open("rb")
+ else:
+ raise ValueError("unknown archive format for tar file: %s" % path)
+
+
+def archive_type(path: pathlib.Path):
+ """Attempt to identify a path as an extractable archive."""
+ if path.suffixes[-2:-1] == [".tar"]:
+ return "tar"
+ elif path.suffix == ".zip":
+ return "zip"
+ else:
+ return None
+
+
+def extract_archive(path, dest_dir, typ):
+ """Extract an archive to a destination directory."""
+
+ # Resolve paths to absolute variants.
+ path = path.resolve()
+ dest_dir = dest_dir.resolve()
+
+ log("Extracting %s to %s" % (path, dest_dir))
+ t0 = time.time()
+
+ # We pipe input to the decompressor program so that we can apply
+ # custom decompressors that the program may not know about.
+ if typ == "tar":
+ ifh = open_tar_stream(path)
+ # On Windows, the tar program doesn't support things like symbolic
+ # links, while Windows actually support them. The tarfile module in
+ # python does. So use that. But since it's significantly slower than
+ # the tar program on Linux, only use tarfile on Windows (tarfile is
+ # also not much slower on Windows, presumably because of the
+ # notoriously bad I/O).
+ if sys.platform == "win32":
+ tar = tarfile.open(fileobj=ifh, mode="r|")
+ tar.extractall(str(dest_dir))
+ args = []
+ else:
+ args = ["tar", "xf", "-"]
+ pipe_stdin = True
+ elif typ == "zip":
+ # unzip from stdin has wonky behavior. We don't use a pipe for it.
+ ifh = open(os.devnull, "rb")
+ args = ["unzip", "-o", str(path)]
+ pipe_stdin = False
+ else:
+ raise ValueError("unknown archive format: %s" % path)
+
+ if args:
+ with ifh, subprocess.Popen(
+ args, cwd=str(dest_dir), bufsize=0, stdin=subprocess.PIPE
+ ) as p:
+ while True:
+ if not pipe_stdin:
+ break
+
+ chunk = ifh.read(131072)
+ if not chunk:
+ break
+
+ p.stdin.write(chunk)
+
+ if p.returncode:
+ raise Exception("%r exited %d" % (args, p.returncode))
+
+ log("%s extracted in %.3fs" % (path, time.time() - t0))
+
+
+def repack_archive(
+ orig: pathlib.Path, dest: pathlib.Path, strip_components=0, prefix=""
+):
+ assert orig != dest
+ log("Repacking as %s" % dest)
+ orig_typ = archive_type(orig)
+ typ = archive_type(dest)
+ if not orig_typ:
+ raise Exception("Archive type not supported for %s" % orig.name)
+ if not typ:
+ raise Exception("Archive type not supported for %s" % dest.name)
+
+ if dest.suffixes[-2:] != [".tar", ".zst"]:
+ raise Exception("Only producing .tar.zst archives is supported.")
+
+ if strip_components or prefix:
+
+ def filter(name):
+ if strip_components:
+ stripped = "/".join(name.split("/")[strip_components:])
+ if not stripped:
+ raise Exception(
+ "Stripping %d components would remove files" % strip_components
+ )
+ name = stripped
+ return prefix + name
+
+ else:
+ filter = None
+
+ with rename_after_close(dest, "wb") as fh:
+ ctx = ZstdCompressor()
+ if orig_typ == "zip":
+ assert typ == "tar"
+ zip = zipfile.ZipFile(orig)
+ # Convert the zip stream to a tar on the fly.
+ with ctx.stream_writer(fh) as compressor, tarfile.open(
+ fileobj=compressor, mode="w:"
+ ) as tar:
+ for zipinfo in zip.infolist():
+ if zipinfo.is_dir():
+ continue
+ tarinfo = tarfile.TarInfo()
+ filename = zipinfo.filename
+ tarinfo.name = filter(filename) if filter else filename
+ tarinfo.size = zipinfo.file_size
+ # Zip files don't have any knowledge of the timezone
+ # they were created in. Which is not really convenient to
+ # reliably convert to a timestamp. But we don't really
+ # care about accuracy, but rather about reproducibility,
+ # so we pick UTC.
+ time = datetime.datetime(
+ *zipinfo.date_time, tzinfo=datetime.timezone.utc
+ )
+ tarinfo.mtime = time.timestamp()
+ # 0 is MS-DOS, 3 is UNIX. Only in the latter case do we
+ # get anything useful for the tar file mode.
+ if zipinfo.create_system == 3:
+ mode = zipinfo.external_attr >> 16
+ else:
+ mode = 0o0644
+ tarinfo.mode = stat.S_IMODE(mode)
+ if stat.S_ISLNK(mode):
+ tarinfo.type = tarfile.SYMTYPE
+ tarinfo.linkname = zip.read(filename).decode()
+ tar.addfile(tarinfo, zip.open(filename))
+ elif stat.S_ISREG(mode) or stat.S_IFMT(mode) == 0:
+ tar.addfile(tarinfo, zip.open(filename))
+ else:
+ raise Exception("Unsupported file mode %o" % stat.S_IFMT(mode))
+
+ elif orig_typ == "tar":
+ if typ == "zip":
+ raise Exception("Repacking a tar to zip is not supported")
+ assert typ == "tar"
+
+ ifh = open_tar_stream(orig)
+ if filter:
+ # To apply the filter, we need to open the tar stream and
+ # tweak it.
+ origtar = tarfile.open(fileobj=ifh, mode="r|")
+ with ctx.stream_writer(fh) as compressor, tarfile.open(
+ fileobj=compressor,
+ mode="w:",
+ format=origtar.format,
+ ) as tar:
+ for tarinfo in origtar:
+ if tarinfo.isdir():
+ continue
+ tarinfo.name = filter(tarinfo.name)
+ if "path" in tarinfo.pax_headers:
+ tarinfo.pax_headers["path"] = filter(
+ tarinfo.pax_headers["path"]
+ )
+ if tarinfo.isfile():
+ tar.addfile(tarinfo, origtar.extractfile(tarinfo))
+ else:
+ tar.addfile(tarinfo)
+ else:
+ # We only change compression here. The tar stream is unchanged.
+ ctx.copy_stream(ifh, fh)
+
+
+def fetch_and_extract(url, dest_dir, extract=True, sha256=None, size=None):
+ """Fetch a URL and extract it to a destination path.
+
+ If the downloaded URL is an archive, it is extracted automatically
+ and the archive is deleted. Otherwise the file remains in place in
+ the destination directory.
+ """
+
+ basename = urllib.parse.urlparse(url).path.split("/")[-1]
+ dest_path = dest_dir / basename
+
+ download_to_path(url, dest_path, sha256=sha256, size=size)
+
+ if not extract:
+ return
+
+ typ = archive_type(dest_path)
+ if typ:
+ extract_archive(dest_path, dest_dir, typ)
+ log("Removing %s" % dest_path)
+ dest_path.unlink()
+
+
+def fetch_urls(downloads):
+ """Fetch URLs pairs to a pathlib.Path."""
+ with concurrent.futures.ThreadPoolExecutor(CONCURRENCY) as e:
+ fs = []
+
+ for download in downloads:
+ fs.append(e.submit(fetch_and_extract, *download))
+
+ for f in fs:
+ f.result()
+
+
+def _git_checkout_github_archive(
+ dest_path: pathlib.Path, repo: str, commit: str, prefix: str
+):
+ "Use github archive generator to speed up github git repo cloning"
+ repo = repo.rstrip("/")
+ github_url = "{repo}/archive/{commit}.tar.gz".format(**locals())
+
+ with tempfile.TemporaryDirectory() as td:
+ temp_dir = pathlib.Path(td)
+ dl_dest = temp_dir / "archive.tar.gz"
+ download_to_path(github_url, dl_dest)
+ repack_archive(dl_dest, dest_path, strip_components=1, prefix=prefix + "/")
+
+
+def _github_submodule_required(repo: str, commit: str):
+ "Use github API to check if submodules are used"
+ url = "{repo}/blob/{commit}/.gitmodules".format(**locals())
+ try:
+ status_code = urllib.request.urlopen(url).getcode()
+ return status_code == 200
+ except:
+ return False
+
+
+def git_checkout_archive(
+ dest_path: pathlib.Path,
+ repo: str,
+ commit: str,
+ prefix=None,
+ ssh_key=None,
+ include_dot_git=False,
+):
+ """Produce an archive of the files comprising a Git checkout."""
+ dest_path.parent.mkdir(parents=True, exist_ok=True)
+
+ if not prefix:
+ prefix = repo.rstrip("/").rsplit("/", 1)[-1]
+
+ if dest_path.suffixes[-2:] != [".tar", ".zst"]:
+ raise Exception("Only producing .tar.zst archives is supported.")
+
+ if repo.startswith("https://github.com/"):
+ if not include_dot_git and not _github_submodule_required(repo, commit):
+ log("Using github archive service to speedup archive creation")
+ # Always log sha1 info, either from commit or resolved from repo.
+ if re.match(r"^[a-fA-F0-9]{40}$", commit):
+ revision = commit
+ else:
+ ref_output = subprocess.check_output(["git", "ls-remote", repo,
+ 'refs/heads/' + commit])
+ revision, _ = ref_output.decode().split(maxsplit=1)
+ log("Fetching revision {}".format(revision))
+ return _git_checkout_github_archive(dest_path, repo, commit, prefix)
+
+ with tempfile.TemporaryDirectory() as td:
+ temp_dir = pathlib.Path(td)
+
+ git_dir = temp_dir / prefix
+
+ # This could be faster with a shallow clone. However, Git requires a ref
+ # to initiate a clone. Since the commit-ish may not refer to a ref, we
+ # simply perform a full clone followed by a checkout.
+ print("cloning %s to %s" % (repo, git_dir))
+
+ env = os.environ.copy()
+ keypath = ""
+ if ssh_key:
+ taskcluster_secret_url = api(
+ os.environ.get("TASKCLUSTER_PROXY_URL"),
+ "secrets",
+ "v1",
+ "secret/{keypath}".format(keypath=ssh_key),
+ )
+ taskcluster_secret = b"".join(stream_download(taskcluster_secret_url))
+ taskcluster_secret = json.loads(taskcluster_secret)
+ sshkey = taskcluster_secret["secret"]["ssh_privkey"]
+
+ keypath = temp_dir.joinpath("ssh-key")
+ keypath.write_text(sshkey)
+ keypath.chmod(0o600)
+
+ env = {
+ "GIT_SSH_COMMAND": "ssh -o 'StrictHostKeyChecking no' -i {keypath}".format(
+ keypath=keypath
+ )
+ }
+
+ subprocess.run(["git", "clone", "-n", repo, str(git_dir)], check=True, env=env)
+
+ # Always use a detached head so that git prints out what it checked out.
+ subprocess.run(
+ ["git", "checkout", "--detach", commit], cwd=str(git_dir), check=True
+ )
+
+ # When including the .git, we want --depth 1, but a direct clone would not
+ # necessarily be able to give us the right commit.
+ if include_dot_git:
+ initial_clone = git_dir.with_name(git_dir.name + ".orig")
+ git_dir.rename(initial_clone)
+ subprocess.run(
+ [
+ "git",
+ "clone",
+ "file://" + str(initial_clone),
+ str(git_dir),
+ "--depth",
+ "1",
+ ],
+ check=True,
+ )
+ subprocess.run(
+ ["git", "remote", "set-url", "origin", repo],
+ cwd=str(git_dir),
+ check=True,
+ )
+
+ # --depth 1 can induce more work on the server side, so only use it for
+ # submodule initialization when we want to keep the .git directory.
+ depth = ["--depth", "1"] if include_dot_git else []
+ subprocess.run(
+ ["git", "submodule", "update", "--init"] + depth,
+ cwd=str(git_dir),
+ check=True,
+ )
+
+ if keypath:
+ os.remove(keypath)
+
+ print("creating archive %s of commit %s" % (dest_path, commit))
+ exclude_dot_git = [] if include_dot_git else ["--exclude=.git"]
+ proc = subprocess.Popen(
+ [
+ "tar",
+ "cf",
+ "-",
+ ]
+ + exclude_dot_git
+ + [
+ "-C",
+ str(temp_dir),
+ prefix,
+ ],
+ stdout=subprocess.PIPE,
+ )
+
+ with rename_after_close(dest_path, "wb") as out:
+ ctx = ZstdCompressor()
+ ctx.copy_stream(proc.stdout, out)
+
+ proc.wait()
+
+
+def command_git_checkout_archive(args):
+ dest = pathlib.Path(args.dest)
+
+ try:
+ git_checkout_archive(
+ dest,
+ args.repo,
+ args.commit,
+ prefix=args.path_prefix,
+ ssh_key=args.ssh_key_secret,
+ include_dot_git=args.include_dot_git,
+ )
+ except Exception:
+ try:
+ dest.unlink()
+ except FileNotFoundError:
+ pass
+
+ raise
+
+
+def command_static_url(args):
+ gpg_sig_url = args.gpg_sig_url
+ gpg_env_key = args.gpg_key_env
+
+ if bool(gpg_sig_url) != bool(gpg_env_key):
+ print("--gpg-sig-url and --gpg-key-env must both be defined")
+ return 1
+
+ if gpg_sig_url:
+ gpg_signature = b"".join(stream_download(gpg_sig_url))
+ gpg_key = os.environb[gpg_env_key.encode("ascii")]
+
+ dest = pathlib.Path(args.dest)
+ dest.parent.mkdir(parents=True, exist_ok=True)
+
+ basename = urllib.parse.urlparse(args.url).path.split("/")[-1]
+ if basename.endswith("".join(dest.suffixes)):
+ dl_dest = dest
+ else:
+ dl_dest = dest.parent / basename
+
+ try:
+ download_to_path(
+ args.url, dl_dest, sha256=args.sha256, size=args.size, headers=args.headers
+ )
+
+ if gpg_sig_url:
+ gpg_verify_path(dl_dest, gpg_key, gpg_signature)
+
+ if dl_dest != dest or args.strip_components or args.add_prefix:
+ repack_archive(dl_dest, dest, args.strip_components, args.add_prefix)
+ except Exception:
+ try:
+ dl_dest.unlink()
+ except FileNotFoundError:
+ pass
+
+ raise
+
+ if dl_dest != dest:
+ log("Removing %s" % dl_dest)
+ dl_dest.unlink()
+
+
+def api(root_url, service, version, path):
+ # taskcluster-lib-urls is not available when this script runs, so
+ # simulate its behavior:
+ return "{root_url}/api/{service}/{version}/{path}".format(
+ root_url=root_url, service=service, version=version, path=path
+ )
+
+
+def get_hash(fetch, root_url):
+ path = "task/{task}/artifacts/{artifact}".format(
+ task=fetch["task"], artifact="public/chain-of-trust.json"
+ )
+ url = api(root_url, "queue", "v1", path)
+ cot = json.loads(download_to_memory(url))
+ return cot["artifacts"][fetch["artifact"]]["sha256"]
+
+
+def command_task_artifacts(args):
+ start = time.monotonic()
+ fetches = json.loads(os.environ["MOZ_FETCHES"])
+ downloads = []
+ for fetch in fetches:
+ extdir = pathlib.Path(args.dest)
+ if "dest" in fetch:
+ # Note: normpath doesn't like pathlib.Path in python 3.5
+ extdir = pathlib.Path(os.path.normpath(str(extdir.joinpath(fetch["dest"]))))
+ extdir.mkdir(parents=True, exist_ok=True)
+ root_url = os.environ["TASKCLUSTER_ROOT_URL"]
+ sha256 = None
+ if fetch.get("verify-hash"):
+ sha256 = get_hash(fetch, root_url)
+ if fetch["artifact"].startswith("public/"):
+ path = "task/{task}/artifacts/{artifact}".format(
+ task=fetch["task"], artifact=fetch["artifact"]
+ )
+ url = api(root_url, "queue", "v1", path)
+ else:
+ url = ("{proxy_url}/api/queue/v1/task/{task}/artifacts/{artifact}").format(
+ proxy_url=os.environ["TASKCLUSTER_PROXY_URL"],
+ task=fetch["task"],
+ artifact=fetch["artifact"],
+ )
+ downloads.append((url, extdir, fetch["extract"], sha256))
+
+ fetch_urls(downloads)
+ end = time.monotonic()
+
+ perfherder_data = {
+ "framework": {"name": "build_metrics"},
+ "suites": [
+ {
+ "name": "fetch_content",
+ "value": end - start,
+ "lowerIsBetter": True,
+ "shouldAlert": False,
+ "subtests": [],
+ }
+ ],
+ }
+ print("PERFHERDER_DATA: {}".format(json.dumps(perfherder_data)), file=sys.stderr)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ subparsers = parser.add_subparsers(title="sub commands")
+
+ git_checkout = subparsers.add_parser(
+ "git-checkout-archive",
+ help="Obtain an archive of files from a Git repository checkout",
+ )
+ git_checkout.set_defaults(func=command_git_checkout_archive)
+ git_checkout.add_argument(
+ "--path-prefix", help="Prefix for paths in produced archive"
+ )
+ git_checkout.add_argument("repo", help="URL to Git repository to be cloned")
+ git_checkout.add_argument("commit", help="Git commit to check out")
+ git_checkout.add_argument("dest", help="Destination path of archive")
+ git_checkout.add_argument(
+ "--ssh-key-secret", help="The scope path of the ssh key to used for checkout"
+ )
+ git_checkout.add_argument(
+ "--include-dot-git", action="store_true", help="Include the .git directory"
+ )
+
+ url = subparsers.add_parser("static-url", help="Download a static URL")
+ url.set_defaults(func=command_static_url)
+ url.add_argument("--sha256", required=True, help="SHA-256 of downloaded content")
+ url.add_argument(
+ "--size", required=True, type=int, help="Size of downloaded content, in bytes"
+ )
+ url.add_argument(
+ "--gpg-sig-url",
+ help="URL containing signed GPG document validating " "URL to fetch",
+ )
+ url.add_argument(
+ "--gpg-key-env", help="Environment variable containing GPG key to validate"
+ )
+ url.add_argument(
+ "--strip-components",
+ type=int,
+ default=0,
+ help="Number of leading components to strip from file "
+ "names in the downloaded archive",
+ )
+ url.add_argument(
+ "--add-prefix",
+ default="",
+ help="Prefix to add to file names in the downloaded " "archive",
+ )
+ url.add_argument(
+ "-H",
+ "--header",
+ default=[],
+ action="append",
+ dest="headers",
+ help="Header to send as part of the request, can be passed " "multiple times",
+ )
+ url.add_argument("url", help="URL to fetch")
+ url.add_argument("dest", help="Destination path")
+
+ artifacts = subparsers.add_parser("task-artifacts", help="Fetch task artifacts")
+ artifacts.set_defaults(func=command_task_artifacts)
+ artifacts.add_argument(
+ "-d",
+ "--dest",
+ default=os.environ.get("MOZ_FETCHES_DIR"),
+ help="Destination directory which will contain all "
+ "artifacts (defaults to $MOZ_FETCHES_DIR)",
+ )
+
+ args = parser.parse_args()
+
+ if not args.dest:
+ parser.error(
+ "no destination directory specified, either pass in --dest "
+ "or set $MOZ_FETCHES_DIR"
+ )
+
+ return args.func(args)
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/run-task/hgrc b/third_party/python/taskcluster_taskgraph/taskgraph/run-task/hgrc
new file mode 100755
index 0000000000..f6a2f6643c
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/run-task/hgrc
@@ -0,0 +1,33 @@
+# By default the progress bar starts after 3s and updates every 0.1s. We
+# change this so it shows and updates every 1.0s.
+# We also tell progress to assume a TTY is present so updates are printed
+# even if there is no known TTY.
+[progress]
+delay = 1.0
+refresh = 1.0
+assume-tty = true
+
+[extensions]
+share =
+sparse =
+robustcheckout = /usr/local/mercurial/robustcheckout.py
+
+[hostsecurity]
+# When running a modern Python, Mercurial will default to TLS 1.1+.
+# When running on a legacy Python, Mercurial will default to TLS 1.0+.
+# There is no good reason we shouldn't be running a modern Python
+# capable of speaking TLS 1.2. And the only Mercurial servers we care
+# about should be running TLS 1.2. So make TLS 1.2 the minimum.
+minimumprotocol = tls1.2
+
+# Settings to make 1-click loaners more useful.
+[extensions]
+histedit =
+rebase =
+
+[diff]
+git = 1
+showfunc = 1
+
+[pager]
+pager = LESS=FRSXQ less
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/run-task/robustcheckout.py b/third_party/python/taskcluster_taskgraph/taskgraph/run-task/robustcheckout.py
new file mode 100644
index 0000000000..7e12d07d50
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/run-task/robustcheckout.py
@@ -0,0 +1,826 @@
+# This software may be used and distributed according to the terms of the
+# GNU General Public License version 2 or any later version.
+
+"""Robustly perform a checkout.
+
+This extension provides the ``hg robustcheckout`` command for
+ensuring a working directory is updated to the specified revision
+from a source repo using best practices to ensure optimal clone
+times and storage efficiency.
+"""
+
+from __future__ import absolute_import
+
+import contextlib
+import json
+import os
+import random
+import re
+import socket
+import ssl
+import time
+
+from mercurial.i18n import _
+from mercurial.node import hex, nullid
+from mercurial import (
+ commands,
+ configitems,
+ error,
+ exchange,
+ extensions,
+ hg,
+ match as matchmod,
+ pycompat,
+ registrar,
+ scmutil,
+ urllibcompat,
+ util,
+ vfs,
+)
+
+# Causes worker to purge caches on process exit and for task to retry.
+EXIT_PURGE_CACHE = 72
+
+testedwith = b"4.5 4.6 4.7 4.8 4.9 5.0 5.1 5.2 5.3 5.4 5.5 5.6 5.7 5.8 5.9"
+minimumhgversion = b"4.5"
+
+cmdtable = {}
+command = registrar.command(cmdtable)
+
+configtable = {}
+configitem = registrar.configitem(configtable)
+
+configitem(b"robustcheckout", b"retryjittermin", default=configitems.dynamicdefault)
+configitem(b"robustcheckout", b"retryjittermax", default=configitems.dynamicdefault)
+
+
+def getsparse():
+ from mercurial import sparse
+
+ return sparse
+
+
+def peerlookup(remote, v):
+ with remote.commandexecutor() as e:
+ return e.callcommand(b"lookup", {b"key": v}).result()
+
+
+@command(
+ b"robustcheckout",
+ [
+ (b"", b"upstream", b"", b"URL of upstream repo to clone from"),
+ (b"r", b"revision", b"", b"Revision to check out"),
+ (b"b", b"branch", b"", b"Branch to check out"),
+ (b"", b"purge", False, b"Whether to purge the working directory"),
+ (b"", b"sharebase", b"", b"Directory where shared repos should be placed"),
+ (
+ b"",
+ b"networkattempts",
+ 3,
+ b"Maximum number of attempts for network " b"operations",
+ ),
+ (b"", b"sparseprofile", b"", b"Sparse checkout profile to use (path in repo)"),
+ (
+ b"U",
+ b"noupdate",
+ False,
+ b"the clone will include an empty working directory\n"
+ b"(only a repository)",
+ ),
+ ],
+ b"[OPTION]... URL DEST",
+ norepo=True,
+)
+def robustcheckout(
+ ui,
+ url,
+ dest,
+ upstream=None,
+ revision=None,
+ branch=None,
+ purge=False,
+ sharebase=None,
+ networkattempts=None,
+ sparseprofile=None,
+ noupdate=False,
+):
+ """Ensure a working copy has the specified revision checked out.
+
+ Repository data is automatically pooled into the common directory
+ specified by ``--sharebase``, which is a required argument. It is required
+ because pooling storage prevents excessive cloning, which makes operations
+ complete faster.
+
+ One of ``--revision`` or ``--branch`` must be specified. ``--revision``
+ is preferred, as it is deterministic and there is no ambiguity as to which
+ revision will actually be checked out.
+
+ If ``--upstream`` is used, the repo at that URL is used to perform the
+ initial clone instead of cloning from the repo where the desired revision
+ is located.
+
+ ``--purge`` controls whether to removed untracked and ignored files from
+ the working directory. If used, the end state of the working directory
+ should only contain files explicitly under version control for the requested
+ revision.
+
+ ``--sparseprofile`` can be used to specify a sparse checkout profile to use.
+ The sparse checkout profile corresponds to a file in the revision to be
+ checked out. If a previous sparse profile or config is present, it will be
+ replaced by this sparse profile. We choose not to "widen" the sparse config
+ so operations are as deterministic as possible. If an existing checkout
+ is present and it isn't using a sparse checkout, we error. This is to
+ prevent accidentally enabling sparse on a repository that may have
+ clients that aren't sparse aware. Sparse checkout support requires Mercurial
+ 4.3 or newer and the ``sparse`` extension must be enabled.
+ """
+ if not revision and not branch:
+ raise error.Abort(b"must specify one of --revision or --branch")
+
+ if revision and branch:
+ raise error.Abort(b"cannot specify both --revision and --branch")
+
+ # Require revision to look like a SHA-1.
+ if revision:
+ if (
+ len(revision) < 12
+ or len(revision) > 40
+ or not re.match(b"^[a-f0-9]+$", revision)
+ ):
+ raise error.Abort(
+ b"--revision must be a SHA-1 fragment 12-40 " b"characters long"
+ )
+
+ sharebase = sharebase or ui.config(b"share", b"pool")
+ if not sharebase:
+ raise error.Abort(
+ b"share base directory not defined; refusing to operate",
+ hint=b"define share.pool config option or pass --sharebase",
+ )
+
+ # Sparse profile support was added in Mercurial 4.3, where it was highly
+ # experimental. Because of the fragility of it, we only support sparse
+ # profiles on 4.3. When 4.4 is released, we'll need to opt in to sparse
+ # support. We /could/ silently fall back to non-sparse when not supported.
+ # However, given that sparse has performance implications, we want to fail
+ # fast if we can't satisfy the desired checkout request.
+ if sparseprofile:
+ try:
+ extensions.find(b"sparse")
+ except KeyError:
+ raise error.Abort(
+ b"sparse extension must be enabled to use " b"--sparseprofile"
+ )
+
+ ui.warn(b"(using Mercurial %s)\n" % util.version())
+
+ # worker.backgroundclose only makes things faster if running anti-virus,
+ # which our automation doesn't. Disable it.
+ ui.setconfig(b"worker", b"backgroundclose", False)
+
+ # By default the progress bar starts after 3s and updates every 0.1s. We
+ # change this so it shows and updates every 1.0s.
+ # We also tell progress to assume a TTY is present so updates are printed
+ # even if there is no known TTY.
+ # We make the config change here instead of in a config file because
+ # otherwise we're at the whim of whatever configs are used in automation.
+ ui.setconfig(b"progress", b"delay", 1.0)
+ ui.setconfig(b"progress", b"refresh", 1.0)
+ ui.setconfig(b"progress", b"assume-tty", True)
+
+ sharebase = os.path.realpath(sharebase)
+
+ optimes = []
+ behaviors = set()
+ start = time.time()
+
+ try:
+ return _docheckout(
+ ui,
+ url,
+ dest,
+ upstream,
+ revision,
+ branch,
+ purge,
+ sharebase,
+ optimes,
+ behaviors,
+ networkattempts,
+ sparse_profile=sparseprofile,
+ noupdate=noupdate,
+ )
+ finally:
+ overall = time.time() - start
+
+ # We store the overall time multiple ways in order to help differentiate
+ # the various "flavors" of operations.
+
+ # ``overall`` is always the total operation time.
+ optimes.append(("overall", overall))
+
+ def record_op(name):
+ # If special behaviors due to "corrupt" storage occur, we vary the
+ # name to convey that.
+ if "remove-store" in behaviors:
+ name += "_rmstore"
+ if "remove-wdir" in behaviors:
+ name += "_rmwdir"
+
+ optimes.append((name, overall))
+
+ # We break out overall operations primarily by their network interaction
+ # We have variants within for working directory operations.
+ if "clone" in behaviors and "create-store" in behaviors:
+ record_op("overall_clone")
+
+ if "sparse-update" in behaviors:
+ record_op("overall_clone_sparsecheckout")
+ else:
+ record_op("overall_clone_fullcheckout")
+
+ elif "pull" in behaviors or "clone" in behaviors:
+ record_op("overall_pull")
+
+ if "sparse-update" in behaviors:
+ record_op("overall_pull_sparsecheckout")
+ else:
+ record_op("overall_pull_fullcheckout")
+
+ if "empty-wdir" in behaviors:
+ record_op("overall_pull_emptywdir")
+ else:
+ record_op("overall_pull_populatedwdir")
+
+ else:
+ record_op("overall_nopull")
+
+ if "sparse-update" in behaviors:
+ record_op("overall_nopull_sparsecheckout")
+ else:
+ record_op("overall_nopull_fullcheckout")
+
+ if "empty-wdir" in behaviors:
+ record_op("overall_nopull_emptywdir")
+ else:
+ record_op("overall_nopull_populatedwdir")
+
+ server_url = urllibcompat.urlreq.urlparse(url).netloc
+
+ if "TASKCLUSTER_INSTANCE_TYPE" in os.environ:
+ perfherder = {
+ "framework": {
+ "name": "vcs",
+ },
+ "suites": [],
+ }
+ for op, duration in optimes:
+ perfherder["suites"].append(
+ {
+ "name": op,
+ "value": duration,
+ "lowerIsBetter": True,
+ "shouldAlert": False,
+ "serverUrl": server_url.decode("utf-8"),
+ "hgVersion": util.version().decode("utf-8"),
+ "extraOptions": [os.environ["TASKCLUSTER_INSTANCE_TYPE"]],
+ "subtests": [],
+ }
+ )
+ ui.write(
+ b"PERFHERDER_DATA: %s\n"
+ % pycompat.bytestr(json.dumps(perfherder, sort_keys=True))
+ )
+
+
+def _docheckout(
+ ui,
+ url,
+ dest,
+ upstream,
+ revision,
+ branch,
+ purge,
+ sharebase,
+ optimes,
+ behaviors,
+ networkattemptlimit,
+ networkattempts=None,
+ sparse_profile=None,
+ noupdate=False,
+):
+ if not networkattempts:
+ networkattempts = [1]
+
+ def callself():
+ return _docheckout(
+ ui,
+ url,
+ dest,
+ upstream,
+ revision,
+ branch,
+ purge,
+ sharebase,
+ optimes,
+ behaviors,
+ networkattemptlimit,
+ networkattempts=networkattempts,
+ sparse_profile=sparse_profile,
+ noupdate=noupdate,
+ )
+
+ @contextlib.contextmanager
+ def timeit(op, behavior):
+ behaviors.add(behavior)
+ errored = False
+ try:
+ start = time.time()
+ yield
+ except Exception:
+ errored = True
+ raise
+ finally:
+ elapsed = time.time() - start
+
+ if errored:
+ op += "_errored"
+
+ optimes.append((op, elapsed))
+
+ ui.write(b"ensuring %s@%s is available at %s\n" % (url, revision or branch, dest))
+
+ # We assume that we're the only process on the machine touching the
+ # repository paths that we were told to use. This means our recovery
+ # scenario when things aren't "right" is to just nuke things and start
+ # from scratch. This is easier to implement than verifying the state
+ # of the data and attempting recovery. And in some scenarios (such as
+ # potential repo corruption), it is probably faster, since verifying
+ # repos can take a while.
+
+ destvfs = vfs.vfs(dest, audit=False, realpath=True)
+
+ def deletesharedstore(path=None):
+ storepath = path or destvfs.read(b".hg/sharedpath").strip()
+ if storepath.endswith(b".hg"):
+ storepath = os.path.dirname(storepath)
+
+ storevfs = vfs.vfs(storepath, audit=False)
+ storevfs.rmtree(forcibly=True)
+
+ if destvfs.exists() and not destvfs.exists(b".hg"):
+ raise error.Abort(b"destination exists but no .hg directory")
+
+ # Refuse to enable sparse checkouts on existing checkouts. The reasoning
+ # here is that another consumer of this repo may not be sparse aware. If we
+ # enabled sparse, we would lock them out.
+ if destvfs.exists() and sparse_profile and not destvfs.exists(b".hg/sparse"):
+ raise error.Abort(
+ b"cannot enable sparse profile on existing " b"non-sparse checkout",
+ hint=b"use a separate working directory to use sparse",
+ )
+
+ # And the other direction for symmetry.
+ if not sparse_profile and destvfs.exists(b".hg/sparse"):
+ raise error.Abort(
+ b"cannot use non-sparse checkout on existing sparse " b"checkout",
+ hint=b"use a separate working directory to use sparse",
+ )
+
+ # Require checkouts to be tied to shared storage because efficiency.
+ if destvfs.exists(b".hg") and not destvfs.exists(b".hg/sharedpath"):
+ ui.warn(b"(destination is not shared; deleting)\n")
+ with timeit("remove_unshared_dest", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+
+ # Verify the shared path exists and is using modern pooled storage.
+ if destvfs.exists(b".hg/sharedpath"):
+ storepath = destvfs.read(b".hg/sharedpath").strip()
+
+ ui.write(b"(existing repository shared store: %s)\n" % storepath)
+
+ if not os.path.exists(storepath):
+ ui.warn(b"(shared store does not exist; deleting destination)\n")
+ with timeit("removed_missing_shared_store", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+ elif not re.search(b"[a-f0-9]{40}/\.hg$", storepath.replace(b"\\", b"/")):
+ ui.warn(
+ b"(shared store does not belong to pooled storage; "
+ b"deleting destination to improve efficiency)\n"
+ )
+ with timeit("remove_unpooled_store", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+
+ if destvfs.isfileorlink(b".hg/wlock"):
+ ui.warn(
+ b"(dest has an active working directory lock; assuming it is "
+ b"left over from a previous process and that the destination "
+ b"is corrupt; deleting it just to be sure)\n"
+ )
+ with timeit("remove_locked_wdir", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+
+ def handlerepoerror(e):
+ if pycompat.bytestr(e) == _(b"abandoned transaction found"):
+ ui.warn(b"(abandoned transaction found; trying to recover)\n")
+ repo = hg.repository(ui, dest)
+ if not repo.recover():
+ ui.warn(b"(could not recover repo state; " b"deleting shared store)\n")
+ with timeit("remove_unrecovered_shared_store", "remove-store"):
+ deletesharedstore()
+
+ ui.warn(b"(attempting checkout from beginning)\n")
+ return callself()
+
+ raise
+
+ # At this point we either have an existing working directory using
+ # shared, pooled storage or we have nothing.
+
+ def handlenetworkfailure():
+ if networkattempts[0] >= networkattemptlimit:
+ raise error.Abort(
+ b"reached maximum number of network attempts; " b"giving up\n"
+ )
+
+ ui.warn(
+ b"(retrying after network failure on attempt %d of %d)\n"
+ % (networkattempts[0], networkattemptlimit)
+ )
+
+ # Do a backoff on retries to mitigate the thundering herd
+ # problem. This is an exponential backoff with a multipler
+ # plus random jitter thrown in for good measure.
+ # With the default settings, backoffs will be:
+ # 1) 2.5 - 6.5
+ # 2) 5.5 - 9.5
+ # 3) 11.5 - 15.5
+ backoff = (2 ** networkattempts[0] - 1) * 1.5
+ jittermin = ui.configint(b"robustcheckout", b"retryjittermin", 1000)
+ jittermax = ui.configint(b"robustcheckout", b"retryjittermax", 5000)
+ backoff += float(random.randint(jittermin, jittermax)) / 1000.0
+ ui.warn(b"(waiting %.2fs before retry)\n" % backoff)
+ time.sleep(backoff)
+
+ networkattempts[0] += 1
+
+ def handlepullerror(e):
+ """Handle an exception raised during a pull.
+
+ Returns True if caller should call ``callself()`` to retry.
+ """
+ if isinstance(e, error.Abort):
+ if e.args[0] == _(b"repository is unrelated"):
+ ui.warn(b"(repository is unrelated; deleting)\n")
+ destvfs.rmtree(forcibly=True)
+ return True
+ elif e.args[0].startswith(_(b"stream ended unexpectedly")):
+ ui.warn(b"%s\n" % e.args[0])
+ # Will raise if failure limit reached.
+ handlenetworkfailure()
+ return True
+ # TODO test this branch
+ elif isinstance(e, error.ResponseError):
+ if e.args[0].startswith(_(b"unexpected response from remote server:")):
+ ui.warn(b"(unexpected response from remote server; retrying)\n")
+ destvfs.rmtree(forcibly=True)
+ # Will raise if failure limit reached.
+ handlenetworkfailure()
+ return True
+ elif isinstance(e, ssl.SSLError):
+ # Assume all SSL errors are due to the network, as Mercurial
+ # should convert non-transport errors like cert validation failures
+ # to error.Abort.
+ ui.warn(b"ssl error: %s\n" % pycompat.bytestr(str(e)))
+ handlenetworkfailure()
+ return True
+ elif isinstance(e, urllibcompat.urlerr.urlerror):
+ if isinstance(e.reason, socket.error):
+ ui.warn(b"socket error: %s\n" % pycompat.bytestr(str(e.reason)))
+ handlenetworkfailure()
+ return True
+ else:
+ ui.warn(
+ b"unhandled URLError; reason type: %s; value: %s\n"
+ % (
+ pycompat.bytestr(e.reason.__class__.__name__),
+ pycompat.bytestr(str(e.reason)),
+ )
+ )
+ else:
+ ui.warn(
+ b"unhandled exception during network operation; type: %s; "
+ b"value: %s\n"
+ % (pycompat.bytestr(e.__class__.__name__), pycompat.bytestr(str(e)))
+ )
+
+ return False
+
+ # Perform sanity checking of store. We may or may not know the path to the
+ # local store. It depends if we have an existing destvfs pointing to a
+ # share. To ensure we always find a local store, perform the same logic
+ # that Mercurial's pooled storage does to resolve the local store path.
+ cloneurl = upstream or url
+
+ try:
+ clonepeer = hg.peer(ui, {}, cloneurl)
+ rootnode = peerlookup(clonepeer, b"0")
+ except error.RepoLookupError:
+ raise error.Abort(b"unable to resolve root revision from clone " b"source")
+ except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
+ if handlepullerror(e):
+ return callself()
+ raise
+
+ if rootnode == nullid:
+ raise error.Abort(b"source repo appears to be empty")
+
+ storepath = os.path.join(sharebase, hex(rootnode))
+ storevfs = vfs.vfs(storepath, audit=False)
+
+ if storevfs.isfileorlink(b".hg/store/lock"):
+ ui.warn(
+ b"(shared store has an active lock; assuming it is left "
+ b"over from a previous process and that the store is "
+ b"corrupt; deleting store and destination just to be "
+ b"sure)\n"
+ )
+ if destvfs.exists():
+ with timeit("remove_dest_active_lock", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+
+ with timeit("remove_shared_store_active_lock", "remove-store"):
+ storevfs.rmtree(forcibly=True)
+
+ if storevfs.exists() and not storevfs.exists(b".hg/requires"):
+ ui.warn(
+ b"(shared store missing requires file; this is a really "
+ b"odd failure; deleting store and destination)\n"
+ )
+ if destvfs.exists():
+ with timeit("remove_dest_no_requires", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+
+ with timeit("remove_shared_store_no_requires", "remove-store"):
+ storevfs.rmtree(forcibly=True)
+
+ if storevfs.exists(b".hg/requires"):
+ requires = set(storevfs.read(b".hg/requires").splitlines())
+ # "share-safe" (enabled by default as of hg 6.1) moved most
+ # requirements to a new file, so we need to look there as well to avoid
+ # deleting and re-cloning each time
+ if b"share-safe" in requires:
+ requires |= set(storevfs.read(b".hg/store/requires").splitlines())
+ # FUTURE when we require generaldelta, this is where we can check
+ # for that.
+ required = {b"dotencode", b"fncache"}
+
+ missing = required - requires
+ if missing:
+ ui.warn(
+ b"(shared store missing requirements: %s; deleting "
+ b"store and destination to ensure optimal behavior)\n"
+ % b", ".join(sorted(missing))
+ )
+ if destvfs.exists():
+ with timeit("remove_dest_missing_requires", "remove-wdir"):
+ destvfs.rmtree(forcibly=True)
+
+ with timeit("remove_shared_store_missing_requires", "remove-store"):
+ storevfs.rmtree(forcibly=True)
+
+ created = False
+
+ if not destvfs.exists():
+ # Ensure parent directories of destination exist.
+ # Mercurial 3.8 removed ensuredirs and made makedirs race safe.
+ if util.safehasattr(util, "ensuredirs"):
+ makedirs = util.ensuredirs
+ else:
+ makedirs = util.makedirs
+
+ makedirs(os.path.dirname(destvfs.base), notindexed=True)
+ makedirs(sharebase, notindexed=True)
+
+ if upstream:
+ ui.write(b"(cloning from upstream repo %s)\n" % upstream)
+
+ if not storevfs.exists():
+ behaviors.add(b"create-store")
+
+ try:
+ with timeit("clone", "clone"):
+ shareopts = {b"pool": sharebase, b"mode": b"identity"}
+ res = hg.clone(
+ ui,
+ {},
+ clonepeer,
+ dest=dest,
+ update=False,
+ shareopts=shareopts,
+ stream=True,
+ )
+ except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
+ if handlepullerror(e):
+ return callself()
+ raise
+ except error.RepoError as e:
+ return handlerepoerror(e)
+ except error.RevlogError as e:
+ ui.warn(b"(repo corruption: %s; deleting shared store)\n" % e)
+ with timeit("remove_shared_store_revlogerror", "remote-store"):
+ deletesharedstore()
+ return callself()
+
+ # TODO retry here.
+ if res is None:
+ raise error.Abort(b"clone failed")
+
+ # Verify it is using shared pool storage.
+ if not destvfs.exists(b".hg/sharedpath"):
+ raise error.Abort(b"clone did not create a shared repo")
+
+ created = True
+
+ # The destination .hg directory should exist. Now make sure we have the
+ # wanted revision.
+
+ repo = hg.repository(ui, dest)
+
+ # We only pull if we are using symbolic names or the requested revision
+ # doesn't exist.
+ havewantedrev = False
+
+ if revision:
+ try:
+ ctx = scmutil.revsingle(repo, revision)
+ except error.RepoLookupError:
+ ctx = None
+
+ if ctx:
+ if not ctx.hex().startswith(revision):
+ raise error.Abort(
+ b"--revision argument is ambiguous",
+ hint=b"must be the first 12+ characters of a " b"SHA-1 fragment",
+ )
+
+ checkoutrevision = ctx.hex()
+ havewantedrev = True
+
+ if not havewantedrev:
+ ui.write(b"(pulling to obtain %s)\n" % (revision or branch,))
+
+ remote = None
+ try:
+ remote = hg.peer(repo, {}, url)
+ pullrevs = [peerlookup(remote, revision or branch)]
+ checkoutrevision = hex(pullrevs[0])
+ if branch:
+ ui.warn(
+ b"(remote resolved %s to %s; "
+ b"result is not deterministic)\n" % (branch, checkoutrevision)
+ )
+
+ if checkoutrevision in repo:
+ ui.warn(b"(revision already present locally; not pulling)\n")
+ else:
+ with timeit("pull", "pull"):
+ pullop = exchange.pull(repo, remote, heads=pullrevs)
+ if not pullop.rheads:
+ raise error.Abort(b"unable to pull requested revision")
+ except (error.Abort, ssl.SSLError, urllibcompat.urlerr.urlerror) as e:
+ if handlepullerror(e):
+ return callself()
+ raise
+ except error.RepoError as e:
+ return handlerepoerror(e)
+ except error.RevlogError as e:
+ ui.warn(b"(repo corruption: %s; deleting shared store)\n" % e)
+ deletesharedstore()
+ return callself()
+ finally:
+ if remote:
+ remote.close()
+
+ # Now we should have the wanted revision in the store. Perform
+ # working directory manipulation.
+
+ # Avoid any working directory manipulations if `-U`/`--noupdate` was passed
+ if noupdate:
+ ui.write(b"(skipping update since `-U` was passed)\n")
+ return None
+
+ # Purge if requested. We purge before update because this way we're
+ # guaranteed to not have conflicts on `hg update`.
+ if purge and not created:
+ ui.write(b"(purging working directory)\n")
+ purge = getattr(commands, "purge", None)
+ if not purge:
+ purge = extensions.find(b"purge").purge
+
+ # Mercurial 4.3 doesn't purge files outside the sparse checkout.
+ # See https://bz.mercurial-scm.org/show_bug.cgi?id=5626. Force
+ # purging by monkeypatching the sparse matcher.
+ try:
+ old_sparse_fn = getattr(repo.dirstate, "_sparsematchfn", None)
+ if old_sparse_fn is not None:
+ repo.dirstate._sparsematchfn = lambda: matchmod.always()
+
+ with timeit("purge", "purge"):
+ if purge(
+ ui,
+ repo,
+ all=True,
+ abort_on_err=True,
+ # The function expects all arguments to be
+ # defined.
+ **{"print": None, "print0": None, "dirs": None, "files": None}
+ ):
+ raise error.Abort(b"error purging")
+ finally:
+ if old_sparse_fn is not None:
+ repo.dirstate._sparsematchfn = old_sparse_fn
+
+ # Update the working directory.
+
+ if repo[b"."].node() == nullid:
+ behaviors.add("empty-wdir")
+ else:
+ behaviors.add("populated-wdir")
+
+ if sparse_profile:
+ sparsemod = getsparse()
+
+ # By default, Mercurial will ignore unknown sparse profiles. This could
+ # lead to a full checkout. Be more strict.
+ try:
+ repo.filectx(sparse_profile, changeid=checkoutrevision).data()
+ except error.ManifestLookupError:
+ raise error.Abort(
+ b"sparse profile %s does not exist at revision "
+ b"%s" % (sparse_profile, checkoutrevision)
+ )
+
+ old_config = sparsemod.parseconfig(
+ repo.ui, repo.vfs.tryread(b"sparse"), b"sparse"
+ )
+
+ old_includes, old_excludes, old_profiles = old_config
+
+ if old_profiles == {sparse_profile} and not old_includes and not old_excludes:
+ ui.write(
+ b"(sparse profile %s already set; no need to update "
+ b"sparse config)\n" % sparse_profile
+ )
+ else:
+ if old_includes or old_excludes or old_profiles:
+ ui.write(
+ b"(replacing existing sparse config with profile "
+ b"%s)\n" % sparse_profile
+ )
+ else:
+ ui.write(b"(setting sparse config to profile %s)\n" % sparse_profile)
+
+ # If doing an incremental update, this will perform two updates:
+ # one to change the sparse profile and another to update to the new
+ # revision. This is not desired. But there's not a good API in
+ # Mercurial to do this as one operation.
+ with repo.wlock(), repo.dirstate.parentchange(), timeit(
+ "sparse_update_config", "sparse-update-config"
+ ):
+ # pylint --py3k: W1636
+ fcounts = list(
+ map(
+ len,
+ sparsemod._updateconfigandrefreshwdir(
+ repo, [], [], [sparse_profile], force=True
+ ),
+ )
+ )
+
+ repo.ui.status(
+ b"%d files added, %d files dropped, "
+ b"%d files conflicting\n" % tuple(fcounts)
+ )
+
+ ui.write(b"(sparse refresh complete)\n")
+
+ op = "update_sparse" if sparse_profile else "update"
+ behavior = "update-sparse" if sparse_profile else "update"
+
+ with timeit(op, behavior):
+ if commands.update(ui, repo, rev=checkoutrevision, clean=True):
+ raise error.Abort(b"error updating")
+
+ ui.write(b"updated to %s\n" % checkoutrevision)
+
+ return None
+
+
+def extsetup(ui):
+ # Ensure required extensions are loaded.
+ for ext in (b"purge", b"share"):
+ try:
+ extensions.find(ext)
+ except KeyError:
+ extensions.load(ui, ext, None)
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/run-task/run-task b/third_party/python/taskcluster_taskgraph/taskgraph/run-task/run-task
new file mode 100755
index 0000000000..f1e281f5cd
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/run-task/run-task
@@ -0,0 +1,1307 @@
+#!/usr/bin/python3 -u
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""Run a task after performing common actions.
+
+This script is meant to be the "driver" for TaskCluster based tasks.
+It receives some common arguments to control the run-time environment.
+
+It performs actions as requested from the arguments. Then it executes
+the requested process and prints its output, prefixing it with the
+current time to improve log usefulness.
+"""
+
+import sys
+from typing import Optional
+
+if sys.version_info[0:2] < (3, 5):
+ print("run-task requires Python 3.5+")
+ sys.exit(1)
+
+
+import argparse
+import datetime
+import errno
+import io
+import json
+import os
+from pathlib import Path
+import re
+import shutil
+import signal
+import socket
+import stat
+import subprocess
+import time
+
+import urllib.error
+import urllib.request
+
+from threading import Thread
+
+SECRET_BASEURL_TPL = "http://taskcluster/secrets/v1/secret/{}"
+
+GITHUB_SSH_FINGERPRINT = (
+ b"github.com ssh-rsa "
+ b"AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkcc"
+ b"Krpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFz"
+ b"LQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaS"
+ b"jB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3sku"
+ b"a2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==\n"
+)
+
+
+CACHE_UID_GID_MISMATCH = """
+There is a UID/GID mismatch on the cache. This likely means:
+
+a) different tasks are running as a different user/group
+b) different Docker images have different UID/GID for the same user/group
+
+Our cache policy is that the UID/GID for ALL tasks must be consistent
+for the lifetime of the cache. This eliminates permissions problems due
+to file/directory user/group ownership.
+
+To make this error go away, ensure that all Docker images are use
+a consistent UID/GID and that all tasks using this cache are running as
+the same user/group.
+"""
+
+
+NON_EMPTY_VOLUME = """
+error: volume %s is not empty
+
+Our Docker image policy requires volumes to be empty.
+
+The volume was likely populated as part of building the Docker image.
+Change the Dockerfile and anything run from it to not create files in
+any VOLUME.
+
+A lesser possibility is that you stumbled upon a TaskCluster platform bug
+where it fails to use new volumes for tasks.
+"""
+
+
+FETCH_CONTENT_NOT_FOUND = """
+error: fetch-content script not found
+
+The script at `taskcluster/scripts/misc/fetch-content` could not be
+detected in the current environment.
+"""
+
+# The exit code to use when caches should be purged and the task retried.
+# This is EX_OSFILE (from sysexits.h):
+# Some system file does not exist, cannot be opened, or has some
+# sort of error (e.g., syntax error).
+EXIT_PURGE_CACHE = 72
+
+
+IS_MACOSX = sys.platform == "darwin"
+IS_POSIX = os.name == "posix"
+IS_WINDOWS = os.name == "nt"
+
+# Both mercurial and git use sha1 as revision idenfiers. Luckily, both define
+# the same value as the null revision.
+#
+# https://github.com/git/git/blob/dc04167d378fb29d30e1647ff6ff51dd182bc9a3/t/oid-info/hash-info#L7
+# https://www.mercurial-scm.org/repo/hg-stable/file/82efc31bd152/mercurial/node.py#l30
+NULL_REVISION = "0000000000000000000000000000000000000000"
+
+
+def print_line(prefix, m):
+ now = datetime.datetime.utcnow().isoformat().encode("utf-8")
+ # slice microseconds to 3 decimals.
+ now = now[:-3] if now[-7:-6] == b"." else now
+ sys.stdout.buffer.write(b"[%s %sZ] %s" % (prefix, now, m))
+ sys.stdout.buffer.flush()
+
+
+def _call_windows_retry(func, args=(), retry_max=5, retry_delay=0.5):
+ """
+ It's possible to see spurious errors on Windows due to various things
+ keeping a handle to the directory open (explorer, virus scanners, etc)
+ So we try a few times if it fails with a known error.
+ retry_delay is multiplied by the number of failed attempts to increase
+ the likelihood of success in subsequent attempts.
+ """
+ retry_count = 0
+ while True:
+ try:
+ func(*args)
+ except OSError as e:
+ # Error codes are defined in:
+ # https://docs.python.org/3/library/errno.html#module-errno
+ if e.errno not in (errno.EACCES, errno.ENOTEMPTY, errno.ENOENT):
+ raise
+
+ if retry_count == retry_max:
+ raise
+
+ retry_count += 1
+
+ print(
+ '%s() failed for "%s". Reason: %s (%s). Retrying...'
+ % (func.__name__, args, e.strerror, e.errno)
+ )
+ time.sleep(retry_count * retry_delay)
+ else:
+ # If no exception has been thrown it should be done
+ break
+
+
+def remove(path):
+ """Removes the specified file, link, or directory tree.
+
+ This is a replacement for shutil.rmtree that works better under
+ windows. It does the following things:
+
+ - check path access for the current user before trying to remove
+ - retry operations on some known errors due to various things keeping
+ a handle on file paths - like explorer, virus scanners, etc. The
+ known errors are errno.EACCES and errno.ENOTEMPTY, and it will
+ retry up to 5 five times with a delay of (failed_attempts * 0.5) seconds
+ between each attempt.
+
+ Note that no error will be raised if the given path does not exists.
+
+ :param path: path to be removed
+ """
+
+ def _update_permissions(path):
+ """Sets specified pemissions depending on filetype"""
+ if os.path.islink(path):
+ # Path is a symlink which we don't have to modify
+ # because it should already have all the needed permissions
+ return
+
+ stats = os.stat(path)
+
+ if os.path.isfile(path):
+ mode = stats.st_mode | stat.S_IWUSR
+ elif os.path.isdir(path):
+ mode = stats.st_mode | stat.S_IWUSR | stat.S_IXUSR
+ else:
+ # Not supported type
+ return
+
+ _call_windows_retry(os.chmod, (path, mode))
+
+ if not os.path.lexists(path):
+ print_line(b"remove", b"WARNING: %s does not exists!\n" % path.encode("utf-8"))
+ return
+
+ """
+ On Windows, adds '\\\\?\\' to paths which match ^[A-Za-z]:\\.* to access
+ files or directories that exceed MAX_PATH(260) limitation or that ends
+ with a period.
+ """
+ if (
+ sys.platform in ("win32", "cygwin")
+ and len(path) >= 3
+ and path[1] == ":"
+ and path[2] == "\\"
+ ):
+ path = "\\\\?\\%s" % path
+
+ if os.path.isfile(path) or os.path.islink(path):
+ # Verify the file or link is read/write for the current user
+ _update_permissions(path)
+ _call_windows_retry(os.remove, (path,))
+
+ elif os.path.isdir(path):
+ # Verify the directory is read/write/execute for the current user
+ _update_permissions(path)
+
+ # We're ensuring that every nested item has writable permission.
+ for root, dirs, files in os.walk(path):
+ for entry in dirs + files:
+ _update_permissions(os.path.join(root, entry))
+ _call_windows_retry(shutil.rmtree, (path,))
+
+
+def run_required_command(prefix, args, *, extra_env=None, cwd=None):
+ res = run_command(prefix, args, extra_env=extra_env, cwd=cwd)
+ if res:
+ sys.exit(res)
+
+
+def retry_required_command(prefix, args, *, extra_env=None, cwd=None, retries=2):
+ backoff = 1
+ while True:
+ res = run_command(prefix, args, extra_env=extra_env, cwd=cwd)
+ if not res:
+ return
+ if not retries:
+ sys.exit(res)
+ retries -= 1
+ backoff *= 2
+ time.sleep(backoff)
+
+
+def run_command(prefix, args, *, extra_env=None, cwd=None):
+ """Runs a process and prefixes its output with the time.
+
+ Returns the process exit code.
+ """
+ print_line(prefix, b"executing %r\n" % args)
+
+ env = dict(os.environ)
+ env.update(extra_env or {})
+
+ # Note: TaskCluster's stdin is a TTY. This attribute is lost
+ # when we pass sys.stdin to the invoked process. If we cared
+ # to preserve stdin as a TTY, we could make this work. But until
+ # someone needs it, don't bother.
+
+ # We want stdout to be bytes on Python 3. That means we can't use
+ # universal_newlines=True (because it implies text mode). But
+ # p.stdout.readline() won't work for bytes text streams. So, on Python 3,
+ # we manually install a latin1 stream wrapper. This allows us to readline()
+ # and preserves bytes, without losing any data.
+
+ p = subprocess.Popen(
+ args,
+ # Disable buffering because we want to receive output
+ # as it is generated so timestamps in logs are
+ # accurate.
+ bufsize=0,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ stdin=sys.stdin.fileno(),
+ cwd=cwd,
+ env=env,
+ )
+
+ stdout = io.TextIOWrapper(p.stdout, encoding="latin1")
+
+ while True:
+ data = stdout.readline().encode("latin1")
+
+ if data == b"":
+ break
+
+ print_line(prefix, data)
+
+ return p.wait()
+
+
+def get_posix_user_group(user, group):
+ import grp
+ import pwd
+
+ try:
+ user_record = pwd.getpwnam(user)
+ except KeyError:
+ print("could not find user %s; specify a valid user with --user" % user)
+ sys.exit(1)
+
+ try:
+ group_record = grp.getgrnam(group)
+ except KeyError:
+ print("could not find group %s; specify a valid group with --group" % group)
+ sys.exit(1)
+
+ # Most tasks use worker:worker. We require they have a specific numeric ID
+ # because otherwise it is too easy for files written to caches to have
+ # mismatched numeric IDs, which results in permissions errors.
+ if user_record.pw_name == "worker" and user_record.pw_uid != 1000:
+ print("user `worker` must have uid=1000; got %d" % user_record.pw_uid)
+ sys.exit(1)
+
+ if group_record.gr_name == "worker" and group_record.gr_gid != 1000:
+ print("group `worker` must have gid=1000; got %d" % group_record.gr_gid)
+ sys.exit(1)
+
+ # Find all groups to which this user is a member.
+ gids = [g.gr_gid for g in grp.getgrall() if group in g.gr_mem]
+
+ return user_record, group_record, gids
+
+
+def write_audit_entry(path, msg):
+ now = datetime.datetime.utcnow().isoformat().encode("utf-8")
+ with open(path, "ab") as fh:
+ fh.write(b"[%sZ %s] %s\n" % (now, os.environb.get(b"TASK_ID", b"UNKNOWN"), msg))
+
+
+WANTED_DIR_MODE = stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR
+
+
+def set_dir_permissions(path, uid, gid):
+ st = os.lstat(path)
+
+ if st.st_uid != uid or st.st_gid != gid:
+ os.chown(path, uid, gid)
+
+ # Also make sure dirs are writable in case we need to delete
+ # them.
+ if st.st_mode & WANTED_DIR_MODE != WANTED_DIR_MODE:
+ os.chmod(path, st.st_mode | WANTED_DIR_MODE)
+
+
+def chown_recursive(path, user, group, uid, gid):
+ print_line(
+ b"chown",
+ b"recursively changing ownership of %s to %s:%s\n"
+ % (path.encode("utf-8"), user.encode("utf-8"), group.encode("utf-8")),
+ )
+
+ set_dir_permissions(path, uid, gid)
+
+ for root, dirs, files in os.walk(path):
+ for d in dirs:
+ set_dir_permissions(os.path.join(root, d), uid, gid)
+
+ for f in files:
+ # File may be a symlink that points to nowhere. In which case
+ # os.chown() would fail because it attempts to follow the
+ # symlink. We only care about directory entries, not what
+ # they point to. So setting the owner of the symlink should
+ # be sufficient.
+ os.lchown(os.path.join(root, f), uid, gid)
+
+
+def configure_cache_posix(cache, user, group, untrusted_caches, running_as_root):
+ """Configure a cache path on POSIX platforms.
+
+ For each cache, we write out a special file denoting attributes and
+ capabilities of run-task and the task being executed. These attributes
+ are used by subsequent run-task invocations to validate that use of
+ the cache is acceptable.
+
+ We /could/ blow away the cache data on requirements mismatch.
+ While this would be convenient, this could result in "competing" tasks
+ effectively undoing the other's work. This would slow down task
+ execution in aggregate. Without monitoring for this, people may not notice
+ the problem and tasks would be slower than they could be. We follow the
+ principle of "fail fast" to ensure optimal task execution.
+
+ We also write an audit log of who used the caches. This log is printed
+ during failures to help aid debugging.
+ """
+
+ our_requirements = {
+ # Include a version string that we can bump whenever to trigger
+ # fresh caches. The actual value is not relevant and doesn't need
+ # to follow any explicit order. Since taskgraph bakes this file's
+ # hash into cache names, any change to this file/version is sufficient
+ # to force the use of a new cache.
+ b"version=1",
+ # Include the UID and GID the task will run as to ensure that tasks
+ # with different UID and GID don't share the same cache.
+ b"uid=%d" % user.pw_uid,
+ b"gid=%d" % group.gr_gid,
+ }
+
+ requires_path = os.path.join(cache, ".cacherequires")
+ audit_path = os.path.join(cache, ".cachelog")
+
+ # The cache is empty. Configure it.
+ if not os.listdir(cache):
+ print_line(
+ b"cache",
+ b"cache %s is empty; writing requirements: "
+ b"%s\n" % (cache.encode("utf-8"), b" ".join(sorted(our_requirements))),
+ )
+
+ # We write a requirements file so future invocations know what the
+ # requirements are.
+ with open(requires_path, "wb") as fh:
+ fh.write(b"\n".join(sorted(our_requirements)))
+
+ # And make it read-only as a precaution against deletion.
+ os.chmod(requires_path, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
+
+ write_audit_entry(
+ audit_path,
+ b"created; requirements: %s" % b", ".join(sorted(our_requirements)),
+ )
+
+ set_dir_permissions(cache, user.pw_uid, group.gr_gid)
+ return
+
+ # The cache has content and we have a requirements file. Validate
+ # requirements alignment.
+ if os.path.exists(requires_path):
+ with open(requires_path, "rb") as fh:
+ wanted_requirements = set(fh.read().splitlines())
+
+ print_line(
+ b"cache",
+ b"cache %s exists; requirements: %s\n"
+ % (cache.encode("utf-8"), b" ".join(sorted(wanted_requirements))),
+ )
+
+ missing = wanted_requirements - our_requirements
+
+ # Allow requirements mismatch for uid/gid if and only if caches
+ # are untrusted. This allows cache behavior on Try to be
+ # reasonable. Otherwise, random tasks could "poison" cache
+ # usability by introducing uid/gid mismatches. For untrusted
+ # environments like Try, this is a perfectly reasonable thing to
+ # allow.
+ if (
+ missing
+ and untrusted_caches
+ and running_as_root
+ and all(s.startswith((b"uid=", b"gid=")) for s in missing)
+ ):
+ print_line(
+ b"cache",
+ b"cache %s uid/gid mismatch; this is acceptable "
+ b"because caches for this task are untrusted; "
+ b"changing ownership to facilitate cache use\n" % cache.encode("utf-8"),
+ )
+ chown_recursive(
+ cache, user.pw_name, group.gr_name, user.pw_uid, group.gr_gid
+ )
+
+ # And write out the updated reality.
+ with open(requires_path, "wb") as fh:
+ fh.write(b"\n".join(sorted(our_requirements)))
+
+ write_audit_entry(
+ audit_path,
+ b"chown; requirements: %s" % b", ".join(sorted(our_requirements)),
+ )
+
+ elif missing:
+ print(
+ "error: requirements for populated cache %s differ from "
+ "this task" % cache
+ )
+ print(
+ "cache requirements: %s"
+ % " ".join(sorted(s.decode("utf-8") for s in wanted_requirements))
+ )
+ print(
+ "our requirements: %s"
+ % " ".join(sorted(s.decode("utf-8") for s in our_requirements))
+ )
+ if any(s.startswith((b"uid=", b"gid=")) for s in missing):
+ print(CACHE_UID_GID_MISMATCH)
+
+ write_audit_entry(
+ audit_path,
+ b"requirements mismatch; wanted: %s"
+ % b", ".join(sorted(our_requirements)),
+ )
+
+ print("")
+ print("audit log:")
+ with open(audit_path, "r") as fh:
+ print(fh.read())
+
+ return True
+ else:
+ write_audit_entry(audit_path, b"used")
+
+ # We don't need to adjust permissions here because the cache is
+ # associated with a uid/gid and the first task should have set
+ # a proper owner/group.
+
+ return
+
+ # The cache has content and no requirements file. This shouldn't
+ # happen because run-task should be the first thing that touches a
+ # cache.
+ print(
+ "error: cache %s is not empty and is missing a "
+ ".cacherequires file; the cache names for this task are "
+ "likely mis-configured or TASKCLUSTER_CACHES is not set "
+ "properly" % cache
+ )
+
+ write_audit_entry(audit_path, b"missing .cacherequires")
+ return True
+
+
+def configure_volume_posix(volume, user, group, running_as_root):
+ # The only time we should see files in the volume is if the Docker
+ # image build put files there.
+ #
+ # For the sake of simplicity, our policy is that volumes should be
+ # empty. This also has the advantage that an empty volume looks
+ # a lot like an empty cache. Tasks can rely on caches being
+ # swapped in and out on any volume without any noticeable change
+ # of behavior.
+ volume_files = os.listdir(volume)
+ if volume_files:
+ print(NON_EMPTY_VOLUME % volume)
+ print("entries in root directory: %s" % " ".join(sorted(volume_files)))
+ sys.exit(1)
+
+ # The volume is almost certainly owned by root:root. Chown it so it
+ # is writable.
+
+ if running_as_root:
+ print_line(
+ b"volume",
+ b"changing ownership of volume %s "
+ b"to %d:%d\n" % (volume.encode("utf-8"), user.pw_uid, group.gr_gid),
+ )
+ set_dir_permissions(volume, user.pw_uid, group.gr_gid)
+
+
+def _clean_git_checkout(destination_path):
+ # Delete untracked files (i.e. build products)
+ print_line(b"vcs", b"cleaning git checkout...\n")
+ args = [
+ "git",
+ "clean",
+ # Two -f`s causes subdirectories with `.git`
+ # directories to be cleaned as well.
+ "-nxdff",
+ ]
+ print_line(b"vcs", b"executing %r\n" % args)
+ p = subprocess.Popen(
+ args,
+ # Disable buffering because we want to receive output
+ # as it is generated so timestamps in logs are
+ # accurate.
+ bufsize=0,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ stdin=sys.stdin.fileno(),
+ cwd=destination_path,
+ env=os.environ,
+ )
+ stdout = io.TextIOWrapper(p.stdout, encoding="latin1")
+ ret = p.wait()
+ if ret:
+ sys.exit(ret)
+ data = stdout.read()
+ prefix = "Would remove "
+ filenames = [
+ os.path.join(destination_path, line[len(prefix) :])
+ for line in data.splitlines()
+ ]
+ print_line(b"vcs", b"removing %r\n" % filenames)
+ for filename in filenames:
+ remove(filename)
+ print_line(b"vcs", b"successfully cleaned git checkout!\n")
+
+
+def git_checkout(
+ destination_path: str,
+ head_repo: str,
+ base_repo: Optional[str],
+ base_ref: Optional[str],
+ base_rev: Optional[str],
+ ref: Optional[str],
+ commit: Optional[str],
+ ssh_key_file: Optional[Path],
+ ssh_known_hosts_file: Optional[Path],
+):
+ env = {"PYTHONUNBUFFERED": "1"}
+
+ if ssh_key_file and ssh_known_hosts_file:
+ if not ssh_key_file.exists():
+ raise RuntimeError("Can't find specified ssh_key file.")
+ if not ssh_known_hosts_file.exists():
+ raise RuntimeError("Can't find specified known_hosts file.")
+ env["GIT_SSH_COMMAND"] = " ".join(
+ [
+ "ssh",
+ "-oIdentityFile={}".format(ssh_key_file.as_posix()),
+ "-oStrictHostKeyChecking=yes",
+ "-oUserKnownHostsFile={}".format(ssh_known_hosts_file.as_posix()),
+ ]
+ )
+ elif ssh_key_file or ssh_known_hosts_file:
+ raise RuntimeError(
+ "Must specify both ssh_key_file and ssh_known_hosts_file, if either are specified",
+ )
+
+ if not os.path.exists(destination_path):
+ # Repository doesn't already exist, needs to be cloned
+ args = [
+ "git",
+ "clone",
+ base_repo if base_repo else head_repo,
+ destination_path,
+ ]
+
+ retry_required_command(b"vcs", args, extra_env=env)
+
+ if base_ref:
+ args = ["git", "fetch", "origin", base_ref]
+
+ retry_required_command(b"vcs", args, cwd=destination_path, extra_env=env)
+
+ # Create local branch so that taskgraph is able to compute differences
+ # between the head branch and the base one, if needed
+ args = ["git", "checkout", base_ref]
+
+ retry_required_command(b"vcs", args, cwd=destination_path, extra_env=env)
+
+ # When commits are force-pushed (like on a testing branch), base_rev doesn't
+ # exist on base_ref. Fetching it allows taskgraph to compute differences
+ # between the previous state before the force-push and the current state.
+ #
+ # Unlike base_ref just above, there is no need to checkout the revision:
+ # it's immediately avaiable after the fetch.
+ if base_rev and base_rev != NULL_REVISION:
+ args = ["git", "fetch", "origin", base_rev]
+
+ retry_required_command(b"vcs", args, cwd=destination_path, extra_env=env)
+
+ # If a ref isn't provided, we fetch all refs from head_repo, which may be slow
+ args = [
+ "git",
+ "fetch",
+ "--no-tags",
+ head_repo,
+ ref if ref else "+refs/heads/*:refs/remotes/work/*",
+ ]
+
+ retry_required_command(b"vcs", args, cwd=destination_path, extra_env=env)
+
+ args = [
+ "git",
+ "checkout",
+ "-f",
+ ]
+
+ if ref:
+ args.extend(["-B", ref])
+ args.append(commit if commit else ref)
+
+ run_required_command(b"vcs", args, cwd=destination_path)
+
+ if os.path.exists(os.path.join(destination_path, ".gitmodules")):
+ args = [
+ "git",
+ "submodule",
+ "init",
+ ]
+
+ run_required_command(b"vcs", args, cwd=destination_path)
+
+ args = [
+ "git",
+ "submodule",
+ "update",
+ ]
+
+ run_required_command(b"vcs", args, cwd=destination_path)
+
+ _clean_git_checkout(destination_path)
+
+ args = ["git", "rev-parse", "--verify", "HEAD"]
+
+ commit_hash = subprocess.check_output(
+ args, cwd=destination_path, universal_newlines=True
+ ).strip()
+ assert re.match("^[a-f0-9]{40}$", commit_hash)
+
+ if head_repo.startswith("https://github.com"):
+ if head_repo.endswith("/"):
+ head_repo = head_repo[:-1]
+
+ tinderbox_link = "{}/commit/{}".format(head_repo, commit_hash)
+ repo_name = head_repo.split("/")[-1]
+ else:
+ tinderbox_link = head_repo
+ repo_name = head_repo
+
+ msg = (
+ "TinderboxPrint:<a href='{link}' "
+ "title='Built from {name} commit {commit_hash}'>"
+ "{commit_hash}</a>\n".format(
+ commit_hash=commit_hash, link=tinderbox_link, name=repo_name
+ )
+ )
+
+ print_line(b"vcs", msg.encode("utf-8"))
+
+ return commit_hash
+
+
+def fetch_ssh_secret(secret_name):
+ """Retrieves the private ssh key, and returns it as a StringIO object"""
+ secret_url = SECRET_BASEURL_TPL.format(secret_name)
+ try:
+ print_line(
+ b"vcs",
+ b"fetching secret %s from %s\n"
+ % (secret_name.encode("utf-8"), secret_url.encode("utf-8")),
+ )
+ res = urllib.request.urlopen(secret_url, timeout=10)
+ secret = res.read()
+ try:
+ secret = json.loads(secret.decode("utf-8"))
+ except ValueError:
+ print_line(b"vcs", b"invalid JSON in secret")
+ sys.exit(1)
+ except (urllib.error.URLError, socket.timeout):
+ print_line(b"vcs", b"Unable to retrieve ssh secret. aborting...")
+ sys.exit(1)
+
+ return secret["secret"]["ssh_privkey"]
+
+
+def hg_checkout(
+ destination_path: str,
+ head_repo: str,
+ base_repo: Optional[str],
+ store_path: str,
+ sparse_profile: Optional[str],
+ branch: Optional[str],
+ revision: Optional[str],
+):
+ if IS_MACOSX:
+ hg_bin = "/tools/python27-mercurial/bin/hg"
+ elif IS_POSIX:
+ hg_bin = "hg"
+ elif IS_WINDOWS:
+ # This is where OCC installs it in the AMIs.
+ hg_bin = r"C:\Program Files\Mercurial\hg.exe"
+ if not os.path.exists(hg_bin):
+ print("could not find Mercurial executable: %s" % hg_bin)
+ sys.exit(1)
+ else:
+ raise RuntimeError("Must be running on mac, posix or windows")
+
+ args = [
+ hg_bin,
+ "robustcheckout",
+ "--sharebase",
+ store_path,
+ "--purge",
+ ]
+
+ if base_repo:
+ args.extend(["--upstream", base_repo])
+ if sparse_profile:
+ args.extend(["--sparseprofile", sparse_profile])
+
+ # Specify method to checkout a revision. This defaults to revisions as
+ # SHA-1 strings, but also supports symbolic revisions like `tip` via the
+ # branch flag.
+ args.extend(
+ [
+ "--branch" if branch else "--revision",
+ branch or revision,
+ head_repo,
+ destination_path,
+ ]
+ )
+
+ run_required_command(b"vcs", args, extra_env={"PYTHONUNBUFFERED": "1"})
+
+ # Update the current revision hash and ensure that it is well formed.
+ revision = subprocess.check_output(
+ [hg_bin, "log", "--rev", ".", "--template", "{node}"],
+ cwd=destination_path,
+ # Triggers text mode on Python 3.
+ universal_newlines=True,
+ )
+
+ assert re.match("^[a-f0-9]{40}$", revision)
+
+ msg = (
+ "TinderboxPrint:<a href={head_repo}/rev/{revision} "
+ "title='Built from {repo_name} revision {revision}'>"
+ "{revision}</a>\n".format(
+ revision=revision, head_repo=head_repo, repo_name=head_repo.split("/")[-1]
+ )
+ )
+
+ print_line(b"vcs", msg.encode("utf-8"))
+
+ return revision
+
+
+def fetch_artifacts():
+ print_line(b"fetches", b"fetching artifacts\n")
+
+ fetch_content = shutil.which("fetch-content")
+
+ if not fetch_content or not os.path.isfile(fetch_content):
+ fetch_content = os.path.join(os.path.dirname(__file__), "fetch-content")
+
+ if not os.path.isfile(fetch_content):
+ print(FETCH_CONTENT_NOT_FOUND)
+ sys.exit(1)
+
+ cmd = [sys.executable, "-u", fetch_content, "task-artifacts"]
+ print_line(b"fetches", b"executing %r\n" % cmd)
+ subprocess.run(cmd, check=True, env=os.environ)
+ print_line(b"fetches", b"finished fetching artifacts\n")
+
+
+def add_vcs_arguments(parser, project, name):
+ """Adds arguments to ArgumentParser to control VCS options for a project."""
+
+ parser.add_argument(
+ "--%s-checkout" % project,
+ help="Directory where %s checkout should be created" % name,
+ )
+ parser.add_argument(
+ "--%s-sparse-profile" % project,
+ help="Path to sparse profile for %s checkout" % name,
+ )
+
+
+def collect_vcs_options(args, project, name):
+ checkout = getattr(args, "%s_checkout" % project)
+ sparse_profile = getattr(args, "%s_sparse_profile" % project)
+
+ env_prefix = project.upper()
+
+ repo_type = os.environ.get("%s_REPOSITORY_TYPE" % env_prefix)
+ base_repo = os.environ.get("%s_BASE_REPOSITORY" % env_prefix)
+ base_ref = os.environ.get("%s_BASE_REF" % env_prefix)
+ base_rev = os.environ.get("%s_BASE_REV" % env_prefix)
+ head_repo = os.environ.get("%s_HEAD_REPOSITORY" % env_prefix)
+ revision = os.environ.get("%s_HEAD_REV" % env_prefix)
+ ref = os.environ.get("%s_HEAD_REF" % env_prefix)
+ pip_requirements = os.environ.get("%s_PIP_REQUIREMENTS" % env_prefix)
+ private_key_secret = os.environ.get("%s_SSH_SECRET_NAME" % env_prefix)
+
+ store_path = os.environ.get("HG_STORE_PATH")
+
+ # Expand ~ in some paths.
+ if checkout:
+ checkout = os.path.abspath(os.path.expanduser(checkout))
+ if store_path:
+ store_path = os.path.abspath(os.path.expanduser(store_path))
+
+ if pip_requirements:
+ pip_requirements = os.path.join(checkout, pip_requirements)
+
+ # Some callers set the base repository to mozilla-central for historical
+ # reasons. Switch to mozilla-unified because robustcheckout works best
+ # with it.
+ if base_repo == "https://hg.mozilla.org/mozilla-central":
+ base_repo = "https://hg.mozilla.org/mozilla-unified"
+
+ return {
+ "store-path": store_path,
+ "project": project,
+ "name": name,
+ "env-prefix": env_prefix,
+ "checkout": checkout,
+ "sparse-profile": sparse_profile,
+ "base-repo": base_repo,
+ "base-ref": base_ref,
+ "base-rev": base_rev,
+ "head-repo": head_repo,
+ "revision": revision,
+ "ref": ref,
+ "repo-type": repo_type,
+ "ssh-secret-name": private_key_secret,
+ "pip-requirements": pip_requirements,
+ }
+
+
+def vcs_checkout_from_args(options):
+
+ if not options["checkout"]:
+ if options["ref"] and not options["revision"]:
+ print("task should be defined in terms of non-symbolic revision")
+ sys.exit(1)
+ return
+
+ revision = options["revision"]
+ ref = options["ref"]
+ ssh_key_file = None
+ ssh_known_hosts_file = None
+ ssh_dir = None
+
+ try:
+ if options.get("ssh-secret-name"):
+ ssh_dir = Path("~/.ssh-run-task").expanduser()
+ os.makedirs(ssh_dir, 0o700)
+ ssh_key_file = ssh_dir.joinpath("private_ssh_key")
+ ssh_key = fetch_ssh_secret(options["ssh-secret-name"])
+ # We don't use write_text here, to avoid \n -> \r\n on windows
+ ssh_key_file.write_bytes(ssh_key.encode("ascii"))
+ ssh_key_file.chmod(0o600)
+ # TODO: We should pull this from a secret, so it can be updated on old trees
+ ssh_known_hosts_file = ssh_dir.joinpath("known_hosts")
+ ssh_known_hosts_file.write_bytes(GITHUB_SSH_FINGERPRINT)
+
+ if options["repo-type"] == "git":
+ if not revision and not ref:
+ raise RuntimeError(
+ "Git requires that either a ref, a revision, or both are provided"
+ )
+
+ if not ref:
+ print("Providing a ref will improve the performance of this checkout")
+
+ revision = git_checkout(
+ options["checkout"],
+ options["head-repo"],
+ options["base-repo"],
+ options["base-ref"],
+ options["base-rev"],
+ ref,
+ revision,
+ ssh_key_file,
+ ssh_known_hosts_file,
+ )
+ elif options["repo-type"] == "hg":
+ if not revision and not ref:
+ raise RuntimeError(
+ "Hg requires that at least one of a ref or revision " "is provided"
+ )
+
+ revision = hg_checkout(
+ options["checkout"],
+ options["head-repo"],
+ options["base-repo"],
+ options["store-path"],
+ options["sparse-profile"],
+ ref,
+ revision,
+ )
+ else:
+ raise RuntimeError('Type of VCS must be either "git" or "hg"')
+ finally:
+ if ssh_dir:
+ shutil.rmtree(ssh_dir, ignore_errors=True)
+ pass
+
+ os.environ["%s_HEAD_REV" % options["env-prefix"]] = revision
+
+
+def install_pip_requirements(repositories):
+ """Install pip requirements files from specified repositories, if necessary."""
+ requirements = [
+ r["pip-requirements"] for r in repositories if r["pip-requirements"]
+ ]
+ if not requirements:
+ return
+
+ cmd = [sys.executable, "-mpip", "install"]
+ if os.environ.get("PIP_DISABLE_REQUIRE_HASHES") != "1":
+ cmd.append("--require-hashes")
+
+ for path in requirements:
+ cmd.extend(["-r", path])
+
+ run_required_command(b"pip-install", cmd)
+
+
+def maybe_run_resource_monitoring():
+ """Run the resource monitor if available.
+
+ Discussion in https://github.com/taskcluster/taskcluster-rfcs/pull/160
+ and https://bugzil.la/1648051
+
+ """
+ if "MOZ_FETCHES" not in os.environ:
+ return
+ if "RESOURCE_MONITOR_OUTPUT" not in os.environ:
+ return
+
+ prefix = b"resource_monitor"
+
+ executable = "{}/resource-monitor/resource-monitor{}".format(
+ os.environ.get("MOZ_FETCHES_DIR"), ".exe" if IS_WINDOWS else ""
+ )
+
+ if not os.path.exists(executable) or not os.access(executable, os.X_OK):
+ print_line(prefix, b"%s not executable\n" % executable.encode("utf-8"))
+ return
+ args = [
+ executable,
+ "-process",
+ str(os.getpid()),
+ "-output",
+ os.environ["RESOURCE_MONITOR_OUTPUT"],
+ ]
+ print_line(prefix, b"Resource monitor starting: %s\n" % str(args).encode("utf-8"))
+ # Avoid environment variables the payload doesn't need.
+ del os.environ["RESOURCE_MONITOR_OUTPUT"]
+
+ # Without CREATE_NEW_PROCESS_GROUP Windows signals will attempt to kill run-task, too.
+ process = subprocess.Popen(
+ args,
+ # Disable buffering because we want to receive output
+ # as it is generated so timestamps in logs are
+ # accurate.
+ bufsize=0,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ creationflags=subprocess.CREATE_NEW_PROCESS_GROUP if IS_WINDOWS else 0,
+ cwd=os.getcwd(),
+ )
+
+ def capture_output():
+ fh = io.TextIOWrapper(process.stdout, encoding="latin1")
+ while True:
+ data = fh.readline().encode("latin1")
+ if data == b"":
+ break
+ print_line(prefix, data)
+
+ monitor_process = Thread(target=capture_output)
+ monitor_process.start()
+ return process
+
+
+def main(args):
+ os.environ["TASK_WORKDIR"] = os.getcwd()
+ print_line(
+ b"setup",
+ b"run-task started in %s\n" % os.environ["TASK_WORKDIR"].encode("utf-8"),
+ )
+ running_as_root = IS_POSIX and os.getuid() == 0
+
+ # Arguments up to '--' are ours. After are for the main task
+ # to be executed.
+ try:
+ i = args.index("--")
+ our_args = args[0:i]
+ task_args = args[i + 1 :]
+ except ValueError:
+ our_args = args
+ task_args = []
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--user", default="worker", help="user to run as")
+ parser.add_argument("--group", default="worker", help="group to run as")
+ parser.add_argument("--task-cwd", help="directory to run the provided command in")
+
+ repositories = os.environ.get("REPOSITORIES")
+ if repositories:
+ repositories = json.loads(repositories)
+ else:
+ repositories = {"vcs": "repository"}
+
+ for repository, name in repositories.items():
+ add_vcs_arguments(parser, repository, name)
+
+ parser.add_argument(
+ "--fetch-hgfingerprint", action="store_true", help=argparse.SUPPRESS
+ )
+
+ args = parser.parse_args(our_args)
+
+ repositories = [
+ collect_vcs_options(args, repository, name)
+ for (repository, name) in repositories.items()
+ ]
+ # Sort repositories so that parent checkout paths come before children
+ repositories.sort(key=lambda repo: Path(repo["checkout"] or "/").parts)
+
+ uid = gid = gids = None
+ if IS_POSIX and running_as_root:
+ user, group, gids = get_posix_user_group(args.user, args.group)
+ uid = user.pw_uid
+ gid = group.gr_gid
+
+ if running_as_root and os.path.exists("/dev/kvm"):
+ # Ensure kvm permissions for worker, required for Android x86
+ st = os.stat("/dev/kvm")
+ os.chmod("/dev/kvm", st.st_mode | 0o666)
+
+ # Validate caches.
+ #
+ # Taskgraph should pass in a list of paths that are caches via an
+ # environment variable (which we don't want to pass down to child
+ # processes).
+
+ if "TASKCLUSTER_CACHES" in os.environ:
+ caches = os.environ["TASKCLUSTER_CACHES"].split(";")
+ del os.environ["TASKCLUSTER_CACHES"]
+ else:
+ caches = []
+
+ if "TASKCLUSTER_UNTRUSTED_CACHES" in os.environ:
+ untrusted_caches = True
+ del os.environ["TASKCLUSTER_UNTRUSTED_CACHES"]
+ else:
+ untrusted_caches = False
+
+ for cache in caches:
+ if not os.path.isdir(cache):
+ print(
+ "error: cache %s is not a directory; this should never "
+ "happen" % cache
+ )
+ return 1
+
+ purge = configure_cache_posix(
+ cache, user, group, untrusted_caches, running_as_root
+ )
+
+ if purge:
+ return EXIT_PURGE_CACHE
+
+ if "TASKCLUSTER_VOLUMES" in os.environ:
+ volumes = os.environ["TASKCLUSTER_VOLUMES"].split(";")
+ del os.environ["TASKCLUSTER_VOLUMES"]
+ else:
+ volumes = []
+
+ if volumes and not IS_POSIX:
+ print("assertion failed: volumes not expected on Windows")
+ return 1
+
+ # Sanitize volumes.
+ for volume in volumes:
+ # If a volume is a cache, it was dealt with above.
+ if volume in caches:
+ print_line(b"volume", b"volume %s is a cache\n" % volume.encode("utf-8"))
+ continue
+
+ configure_volume_posix(volume, user, group, running_as_root)
+
+ all_caches_and_volumes = set(map(os.path.normpath, caches))
+ all_caches_and_volumes |= set(map(os.path.normpath, volumes))
+
+ def path_in_cache_or_volume(path):
+ path = os.path.normpath(path)
+
+ while path:
+ if path in all_caches_and_volumes:
+ return True
+
+ path, child = os.path.split(path)
+ if not child:
+ break
+
+ return False
+
+ def prepare_checkout_dir(checkout):
+ if not checkout:
+ return
+
+ # The checkout path becomes the working directory. Since there are
+ # special cache files in the cache's root directory and working
+ # directory purging could blow them away, disallow this scenario.
+ if os.path.exists(os.path.join(checkout, ".cacherequires")):
+ print("error: cannot perform vcs checkout into cache root: %s" % checkout)
+ sys.exit(1)
+
+ # TODO given the performance implications, consider making this a fatal
+ # error.
+ if not path_in_cache_or_volume(checkout):
+ print_line(
+ b"vcs",
+ b"WARNING: vcs checkout path (%s) not in cache "
+ b"or volume; performance will likely suffer\n"
+ % checkout.encode("utf-8"),
+ )
+
+ # Ensure the directory for the source checkout exists.
+ try:
+ os.makedirs(os.path.dirname(checkout))
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ # And that it is owned by the appropriate user/group.
+ if running_as_root:
+ os.chown(os.path.dirname(checkout), uid, gid)
+
+ def prepare_hg_store_path():
+ # And ensure the shared store path exists and has proper permissions.
+ if "HG_STORE_PATH" not in os.environ:
+ print("error: HG_STORE_PATH environment variable not set")
+ sys.exit(1)
+
+ store_path = os.environ["HG_STORE_PATH"]
+
+ if not path_in_cache_or_volume(store_path):
+ print_line(
+ b"vcs",
+ b"WARNING: HG_STORE_PATH (%s) not in cache or "
+ b"volume; performance will likely suffer\n"
+ % store_path.encode("utf-8"),
+ )
+
+ try:
+ os.makedirs(store_path)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
+ if running_as_root:
+ os.chown(store_path, uid, gid)
+
+ repository_paths = [
+ Path(repo["checkout"]) for repo in repositories if repo["checkout"]
+ ]
+ for repo in repositories:
+ if not repo["checkout"]:
+ continue
+ parents = Path(repo["checkout"]).parents
+ if any((path in repository_paths) for path in parents):
+ # Skip creating any checkouts that are inside other checokuts
+ continue
+ prepare_checkout_dir(repo["checkout"])
+
+ if any(repo["checkout"] and repo["repo-type"] == "hg" for repo in repositories):
+ prepare_hg_store_path()
+
+ if IS_POSIX and running_as_root:
+ # Drop permissions to requested user.
+ # This code is modeled after what `sudo` was observed to do in a Docker
+ # container. We do not bother calling setrlimit() because containers have
+ # their own limits.
+ print_line(
+ b"setup",
+ b"running as %s:%s\n"
+ % (args.user.encode("utf-8"), args.group.encode("utf-8")),
+ )
+
+ os.setgroups(gids)
+ os.umask(0o22)
+ os.setresgid(gid, gid, gid)
+ os.setresuid(uid, uid, uid)
+
+ for repo in repositories:
+ vcs_checkout_from_args(repo)
+
+ resource_process = None
+
+ try:
+ for k in ["MOZ_FETCHES_DIR", "UPLOAD_DIR"] + [
+ "{}_PATH".format(repository["project"].upper())
+ for repository in repositories
+ ]:
+ if k in os.environ:
+ os.environ[k] = os.path.abspath(os.environ[k])
+ print_line(
+ b"setup",
+ b"%s is %s\n" % (k.encode("utf-8"), os.environ[k].encode("utf-8")),
+ )
+
+ if "MOZ_FETCHES" in os.environ:
+ fetch_artifacts()
+
+ # Install Python requirements after fetches in case tasks want to use
+ # fetches to grab dependencies.
+ install_pip_requirements(repositories)
+
+ resource_process = maybe_run_resource_monitoring()
+
+ return run_command(b"task", task_args, cwd=args.task_cwd)
+ finally:
+ if resource_process:
+ print_line(b"resource_monitor", b"terminating\n")
+ if IS_WINDOWS:
+ # .terminate() on Windows is not a graceful shutdown, due to
+ # differences in signals. CTRL_BREAK_EVENT will work provided
+ # the subprocess is in a different process group, so this script
+ # isn't also killed.
+ os.kill(resource_process.pid, signal.CTRL_BREAK_EVENT)
+ else:
+ resource_process.terminate()
+ resource_process.wait()
+ fetches_dir = os.environ.get("MOZ_FETCHES_DIR")
+ if fetches_dir and os.path.isdir(fetches_dir):
+ print_line(b"fetches", b"removing %s\n" % fetches_dir.encode("utf-8"))
+ remove(fetches_dir)
+ print_line(b"fetches", b"finished\n")
+
+
+if __name__ == "__main__":
+ sys.exit(main(sys.argv[1:]))
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/target_tasks.py b/third_party/python/taskcluster_taskgraph/taskgraph/target_tasks.py
new file mode 100644
index 0000000000..1119a1c960
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/target_tasks.py
@@ -0,0 +1,107 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+from taskgraph.util.attributes import (
+ match_run_on_git_branches,
+ match_run_on_projects,
+ match_run_on_tasks_for,
+)
+
+_target_task_methods = {}
+
+_GIT_REFS_HEADS_PREFIX = "refs/heads/"
+
+
+def _target_task(name):
+ def wrap(func):
+ _target_task_methods[name] = func
+ return func
+
+ return wrap
+
+
+def get_method(method):
+ """Get a target_task_method to pass to a TaskGraphGenerator."""
+ return _target_task_methods[method]
+
+
+def filter_out_cron(task, parameters):
+ """
+ Filter out tasks that run via cron.
+ """
+ return not task.attributes.get("cron")
+
+
+def filter_for_project(task, parameters):
+ """Filter tasks by project. Optionally enable nightlies."""
+ run_on_projects = set(task.attributes.get("run_on_projects", []))
+ return match_run_on_projects(parameters["project"], run_on_projects)
+
+
+def filter_for_tasks_for(task, parameters):
+ run_on_tasks_for = set(task.attributes.get("run_on_tasks_for", ["all"]))
+ return match_run_on_tasks_for(parameters["tasks_for"], run_on_tasks_for)
+
+
+def filter_for_git_branch(task, parameters):
+ """Filter tasks by git branch.
+ If `run_on_git_branch` is not defined, then task runs on all branches"""
+ # We cannot filter out on git branches if we not on a git repository
+ if parameters.get("repository_type") != "git":
+ return True
+
+ # Pull requests usually have arbitrary names, let's not filter git branches on them.
+ if parameters["tasks_for"] == "github-pull-request":
+ return True
+
+ run_on_git_branches = set(task.attributes.get("run_on_git_branches", ["all"]))
+ git_branch = parameters["head_ref"]
+ if git_branch.startswith(_GIT_REFS_HEADS_PREFIX):
+ git_branch = git_branch[len(_GIT_REFS_HEADS_PREFIX) :]
+
+ return match_run_on_git_branches(git_branch, run_on_git_branches)
+
+
+def filter_out_shipping_phase(task, parameters):
+ return task.attributes.get("shipping_phase") in (None, "build")
+
+
+def standard_filter(task, parameters):
+ return all(
+ filter_func(task, parameters)
+ for filter_func in (
+ filter_out_cron,
+ filter_out_shipping_phase,
+ filter_for_project,
+ filter_for_tasks_for,
+ filter_for_git_branch,
+ )
+ )
+
+
+@_target_task("default")
+def target_tasks_default(full_task_graph, parameters, graph_config):
+ """Target the tasks which have indicated they should be run on this project
+ via the `run_on_projects` attributes."""
+ return [
+ l for l, t in full_task_graph.tasks.items() if standard_filter(t, parameters)
+ ]
+
+
+@_target_task("codereview")
+def target_tasks_codereview(full_task_graph, parameters, graph_config):
+ """Target the tasks which have indicated they should be run on this project
+ via the `run_on_projects` attributes."""
+ return [
+ l
+ for l, t in full_task_graph.tasks.items()
+ if standard_filter(t, parameters) and t.attributes.get("code-review")
+ ]
+
+
+@_target_task("nothing")
+def target_tasks_nothing(full_task_graph, parameters, graph_config):
+ """Select nothing, for DONTBUILD pushes"""
+ return []
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/task.py b/third_party/python/taskcluster_taskgraph/taskgraph/task.py
new file mode 100644
index 0000000000..a38a52b38e
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/task.py
@@ -0,0 +1,84 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import attr
+
+
+@attr.s
+class Task:
+ """
+ Representation of a task in a TaskGraph. Each Task has, at creation:
+
+ - kind: the name of the task kind
+ - label; the label for this task
+ - attributes: a dictionary of attributes for this task (used for filtering)
+ - task: the task definition (JSON-able dictionary)
+ - optimization: optimization to apply to the task (see taskgraph.optimize)
+ - dependencies: tasks this one depends on, in the form {name: label}, for example
+ {'build': 'build-linux64/opt', 'docker-image': 'build-docker-image-desktop-test'}
+ - soft_dependencies: tasks this one may depend on if they are available post
+ optimisation. They are set as a list of tasks label.
+ - if_dependencies: only run this task if at least one of these dependencies
+ are present.
+
+ And later, as the task-graph processing proceeds:
+
+ - task_id -- TaskCluster taskId under which this task will be created
+
+ This class is just a convenience wrapper for the data type and managing
+ display, comparison, serialization, etc. It has no functionality of its own.
+ """
+
+ kind = attr.ib()
+ label = attr.ib()
+ attributes = attr.ib()
+ task = attr.ib()
+ description = attr.ib(default="")
+ task_id = attr.ib(default=None, init=False)
+ optimization = attr.ib(default=None)
+ dependencies = attr.ib(factory=dict)
+ soft_dependencies = attr.ib(factory=list)
+ if_dependencies = attr.ib(factory=list)
+
+ def __attrs_post_init__(self):
+ self.attributes["kind"] = self.kind
+
+ def to_json(self):
+ rv = {
+ "kind": self.kind,
+ "label": self.label,
+ "description": self.description,
+ "attributes": self.attributes,
+ "dependencies": self.dependencies,
+ "soft_dependencies": self.soft_dependencies,
+ "if_dependencies": self.if_dependencies,
+ "optimization": self.optimization,
+ "task": self.task,
+ }
+ if self.task_id:
+ rv["task_id"] = self.task_id
+ return rv
+
+ @classmethod
+ def from_json(cls, task_dict):
+ """
+ Given a data structure as produced by taskgraph.to_json, re-construct
+ the original Task object. This is used to "resume" the task-graph
+ generation process, for example in Action tasks.
+ """
+ rv = cls(
+ kind=task_dict["kind"],
+ label=task_dict["label"],
+ description=task_dict.get("description", ""),
+ attributes=task_dict["attributes"],
+ task=task_dict["task"],
+ optimization=task_dict["optimization"],
+ dependencies=task_dict.get("dependencies"),
+ soft_dependencies=task_dict.get("soft_dependencies"),
+ if_dependencies=task_dict.get("if_dependencies"),
+ )
+ if "task_id" in task_dict:
+ rv.task_id = task_dict["task_id"]
+ return rv
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/taskgraph.py b/third_party/python/taskcluster_taskgraph/taskgraph/taskgraph.py
new file mode 100644
index 0000000000..158cfb861c
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/taskgraph.py
@@ -0,0 +1,72 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import attr
+
+from .graph import Graph
+from .task import Task
+
+
+@attr.s(frozen=True)
+class TaskGraph:
+ """
+ Representation of a task graph.
+
+ A task graph is a combination of a Graph and a dictionary of tasks indexed
+ by label. TaskGraph instances should be treated as immutable.
+
+ In the graph, tasks are said to "link to" their dependencies. Whereas
+ tasks are "linked from" their dependents.
+ """
+
+ tasks = attr.ib()
+ graph = attr.ib()
+
+ def __attrs_post_init__(self):
+ assert set(self.tasks) == self.graph.nodes
+
+ def for_each_task(self, f, *args, **kwargs):
+ for task_label in self.graph.visit_postorder():
+ task = self.tasks[task_label]
+ f(task, self, *args, **kwargs)
+
+ def __getitem__(self, label):
+ "Get a task by label"
+ return self.tasks[label]
+
+ def __contains__(self, label):
+ return label in self.tasks
+
+ def __iter__(self):
+ "Iterate over tasks in undefined order"
+ return iter(self.tasks.values())
+
+ def to_json(self):
+ "Return a JSON-able object representing the task graph, as documented"
+ named_links_dict = self.graph.named_links_dict()
+ # this dictionary may be keyed by label or by taskid, so let's just call it 'key'
+ tasks = {}
+ for key in self.graph.visit_postorder():
+ tasks[key] = self.tasks[key].to_json()
+ # overwrite dependencies with the information in the taskgraph's edges.
+ tasks[key]["dependencies"] = named_links_dict.get(key, {})
+ return tasks
+
+ @classmethod
+ def from_json(cls, tasks_dict):
+ """
+ This code is used to generate the a TaskGraph using a dictionary
+ which is representative of the TaskGraph.
+ """
+ tasks = {}
+ edges = set()
+ for key, value in tasks_dict.items():
+ tasks[key] = Task.from_json(value)
+ if "task_id" in value:
+ tasks[key].task_id = value["task_id"]
+ for depname, dep in value["dependencies"].items():
+ edges.add((key, dep, depname))
+ task_graph = cls(tasks, Graph(set(tasks), edges))
+ return tasks, task_graph
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/__init__.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/__init__.py
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/base.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/base.py
new file mode 100644
index 0000000000..383e6a4798
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/base.py
@@ -0,0 +1,157 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import re
+from typing import AnyStr
+
+import attr
+
+from ..config import GraphConfig
+from ..parameters import Parameters
+from ..util.memoize import memoize
+from ..util.schema import Schema, validate_schema
+
+
+@attr.s(frozen=True)
+class RepoConfig:
+ prefix = attr.ib(type=str)
+ name = attr.ib(type=str)
+ base_repository = attr.ib(type=str)
+ head_repository = attr.ib(type=str)
+ head_ref = attr.ib(type=str)
+ type = attr.ib(type=str)
+ path = attr.ib(type=str, default="")
+ head_rev = attr.ib(type=str, default=None)
+ ssh_secret_name = attr.ib(type=str, default=None)
+
+
+@attr.s(frozen=True, cmp=False)
+class TransformConfig:
+ """
+ A container for configuration affecting transforms. The `config` argument
+ to transforms is an instance of this class.
+ """
+
+ # the name of the current kind
+ kind = attr.ib()
+
+ # the path to the kind configuration directory
+ path = attr.ib(type=AnyStr)
+
+ # the parsed contents of kind.yml
+ config = attr.ib(type=dict)
+
+ # the parameters for this task-graph generation run
+ params = attr.ib(type=Parameters)
+
+ # a dict of all the tasks associated with the kind dependencies of the
+ # current kind
+ kind_dependencies_tasks = attr.ib(type=dict)
+
+ # Global configuration of the taskgraph
+ graph_config = attr.ib(type=GraphConfig)
+
+ # whether to write out artifacts for the decision task
+ write_artifacts = attr.ib(type=bool)
+
+ @property
+ @memoize
+ def repo_configs(self):
+ repositories = self.graph_config["taskgraph"]["repositories"]
+ if len(repositories) == 1:
+ current_prefix = list(repositories.keys())[0]
+ else:
+ project = self.params["project"]
+ matching_repos = {
+ repo_prefix: repo
+ for (repo_prefix, repo) in repositories.items()
+ if re.match(repo["project-regex"], project)
+ }
+ if len(matching_repos) != 1:
+ raise Exception(
+ f"Couldn't find repository matching project `{project}`"
+ )
+ current_prefix = list(matching_repos.keys())[0]
+
+ repo_configs = {
+ current_prefix: RepoConfig(
+ prefix=current_prefix,
+ name=repositories[current_prefix]["name"],
+ base_repository=self.params["base_repository"],
+ head_repository=self.params["head_repository"],
+ head_ref=self.params["head_ref"],
+ head_rev=self.params["head_rev"],
+ type=self.params["repository_type"],
+ ssh_secret_name=repositories[current_prefix].get("ssh-secret-name"),
+ ),
+ }
+ if len(repositories) != 1:
+ repo_configs.update(
+ {
+ repo_prefix: RepoConfig(
+ prefix=repo_prefix,
+ name=repo["name"],
+ base_repository=repo["default-repository"],
+ head_repository=repo["default-repository"],
+ head_ref=repo["default-ref"],
+ type=repo["type"],
+ ssh_secret_name=repo.get("ssh-secret-name"),
+ )
+ for (repo_prefix, repo) in repositories.items()
+ if repo_prefix != current_prefix
+ }
+ )
+ return repo_configs
+
+
+@attr.s()
+class TransformSequence:
+ """
+ Container for a sequence of transforms. Each transform is represented as a
+ callable taking (config, items) and returning a generator which will yield
+ transformed items. The resulting sequence has the same interface.
+
+ This is convenient to use in a file full of transforms, as it provides a
+ decorator, @transforms.add, that will add the decorated function to the
+ sequence.
+ """
+
+ _transforms = attr.ib(factory=list)
+
+ def __call__(self, config, items):
+ for xform in self._transforms:
+ items = xform(config, items)
+ if items is None:
+ raise Exception(f"Transform {xform} is not a generator")
+ return items
+
+ def add(self, func):
+ self._transforms.append(func)
+ return func
+
+ def add_validate(self, schema):
+ self.add(ValidateSchema(schema))
+
+
+@attr.s
+class ValidateSchema:
+ schema = attr.ib(type=Schema)
+
+ def __call__(self, config, tasks):
+ for task in tasks:
+ if "name" in task:
+ error = "In {kind} kind task {name!r}:".format(
+ kind=config.kind, name=task["name"]
+ )
+ elif "label" in task:
+ error = "In job {label!r}:".format(label=task["label"])
+ elif "primary-dependency" in task:
+ error = "In {kind} kind task for {dependency!r}:".format(
+ kind=config.kind, dependency=task["primary-dependency"].label
+ )
+ else:
+ error = "In unknown task:"
+ validate_schema(self.schema, task, error)
+ yield task
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/cached_tasks.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/cached_tasks.py
new file mode 100644
index 0000000000..57a55dffb3
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/cached_tasks.py
@@ -0,0 +1,90 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+from collections import deque
+
+import taskgraph
+from taskgraph.transforms.base import TransformSequence
+from taskgraph.util.cached_tasks import add_optimization
+
+transforms = TransformSequence()
+
+
+def order_tasks(config, tasks):
+ """Iterate image tasks in an order where parent tasks come first."""
+ if config.kind == "docker-image":
+ kind_prefix = "build-docker-image-"
+ else:
+ kind_prefix = config.kind + "-"
+
+ pending = deque(tasks)
+ task_labels = {task["label"] for task in pending}
+ emitted = set()
+ while True:
+ try:
+ task = pending.popleft()
+ except IndexError:
+ break
+ parents = {
+ task
+ for task in task.get("dependencies", {}).values()
+ if task.startswith(kind_prefix)
+ }
+ if parents and not emitted.issuperset(parents & task_labels):
+ pending.append(task)
+ continue
+ emitted.add(task["label"])
+ yield task
+
+
+def format_task_digest(cached_task):
+ return "/".join(
+ [
+ cached_task["type"],
+ cached_task["name"],
+ cached_task["digest"],
+ ]
+ )
+
+
+@transforms.add
+def cache_task(config, tasks):
+ if taskgraph.fast:
+ for task in tasks:
+ yield task
+ return
+
+ digests = {}
+ for task in config.kind_dependencies_tasks.values():
+ if "cached_task" in task.attributes:
+ digests[task.label] = format_task_digest(task.attributes["cached_task"])
+
+ for task in order_tasks(config, tasks):
+ cache = task.pop("cache", None)
+ if cache is None:
+ yield task
+ continue
+
+ dependency_digests = []
+ for p in task.get("dependencies", {}).values():
+ if p in digests:
+ dependency_digests.append(digests[p])
+ else:
+ raise Exception(
+ "Cached task {} has uncached parent task: {}".format(
+ task["label"], p
+ )
+ )
+ digest_data = cache["digest-data"] + sorted(dependency_digests)
+ add_optimization(
+ config,
+ task,
+ cache_type=cache["type"],
+ cache_name=cache["name"],
+ digest_data=digest_data,
+ )
+ digests[task["label"]] = format_task_digest(task["attributes"]["cached_task"])
+
+ yield task
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/code_review.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/code_review.py
new file mode 100644
index 0000000000..bdb655b97d
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/code_review.py
@@ -0,0 +1,23 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Add soft dependencies and configuration to code-review tasks.
+"""
+
+
+from taskgraph.transforms.base import TransformSequence
+
+transforms = TransformSequence()
+
+
+@transforms.add
+def add_dependencies(config, jobs):
+ for job in jobs:
+ job.setdefault("soft-dependencies", [])
+ job["soft-dependencies"] += [
+ dep_task.label
+ for dep_task in config.kind_dependencies_tasks.values()
+ if dep_task.attributes.get("code-review") is True
+ ]
+ yield job
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/docker_image.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/docker_image.py
new file mode 100644
index 0000000000..dd7c01e5a9
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/docker_image.py
@@ -0,0 +1,213 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import json
+import logging
+import os
+import re
+
+from voluptuous import Optional, Required
+
+import taskgraph
+from taskgraph.transforms.base import TransformSequence
+from taskgraph.util.docker import create_context_tar, generate_context_hash
+from taskgraph.util.schema import Schema
+
+from .task import task_description_schema
+
+logger = logging.getLogger(__name__)
+
+CONTEXTS_DIR = "docker-contexts"
+
+DIGEST_RE = re.compile("^[0-9a-f]{64}$")
+
+IMAGE_BUILDER_IMAGE = (
+ "taskcluster/image_builder:4.0.0"
+ "@sha256:"
+ "866c304445334703b68653e1390816012c9e6bdabfbd1906842b5b229e8ed044"
+)
+
+transforms = TransformSequence()
+
+docker_image_schema = Schema(
+ {
+ # Name of the docker image.
+ Required("name"): str,
+ # Name of the parent docker image.
+ Optional("parent"): str,
+ # Treeherder symbol.
+ Optional("symbol"): str,
+ # relative path (from config.path) to the file the docker image was defined
+ # in.
+ Optional("task-from"): str,
+ # Arguments to use for the Dockerfile.
+ Optional("args"): {str: str},
+ # Name of the docker image definition under taskcluster/docker, when
+ # different from the docker image name.
+ Optional("definition"): str,
+ # List of package tasks this docker image depends on.
+ Optional("packages"): [str],
+ Optional(
+ "index",
+ description="information for indexing this build so its artifacts can be discovered",
+ ): task_description_schema["index"],
+ Optional(
+ "cache",
+ description="Whether this image should be cached based on inputs.",
+ ): bool,
+ }
+)
+
+
+transforms.add_validate(docker_image_schema)
+
+
+@transforms.add
+def fill_template(config, tasks):
+ available_packages = set()
+ for task in config.kind_dependencies_tasks.values():
+ if task.kind != "packages":
+ continue
+ name = task.label.replace("packages-", "")
+ available_packages.add(name)
+
+ context_hashes = {}
+
+ tasks = list(tasks)
+
+ if not taskgraph.fast and config.write_artifacts:
+ if not os.path.isdir(CONTEXTS_DIR):
+ os.makedirs(CONTEXTS_DIR)
+
+ for task in tasks:
+ image_name = task.pop("name")
+ job_symbol = task.pop("symbol", None)
+ args = task.pop("args", {})
+ definition = task.pop("definition", image_name)
+ packages = task.pop("packages", [])
+ parent = task.pop("parent", None)
+
+ for p in packages:
+ if p not in available_packages:
+ raise Exception(
+ "Missing package job for {}-{}: {}".format(
+ config.kind, image_name, p
+ )
+ )
+
+ if not taskgraph.fast:
+ context_path = os.path.join("taskcluster", "docker", definition)
+ topsrcdir = os.path.dirname(config.graph_config.taskcluster_yml)
+ if config.write_artifacts:
+ context_file = os.path.join(CONTEXTS_DIR, f"{image_name}.tar.gz")
+ logger.info(f"Writing {context_file} for docker image {image_name}")
+ context_hash = create_context_tar(
+ topsrcdir,
+ context_path,
+ context_file,
+ args,
+ )
+ else:
+ context_hash = generate_context_hash(topsrcdir, context_path, args)
+ else:
+ if config.write_artifacts:
+ raise Exception("Can't write artifacts if `taskgraph.fast` is set.")
+ context_hash = "0" * 40
+ digest_data = [context_hash]
+ digest_data += [json.dumps(args, sort_keys=True)]
+ context_hashes[image_name] = context_hash
+
+ description = "Build the docker image {} for use by dependent tasks".format(
+ image_name
+ )
+
+ args["DOCKER_IMAGE_PACKAGES"] = " ".join(f"<{p}>" for p in packages)
+
+ # Adjust the zstandard compression level based on the execution level.
+ # We use faster compression for level 1 because we care more about
+ # end-to-end times. We use slower/better compression for other levels
+ # because images are read more often and it is worth the trade-off to
+ # burn more CPU once to reduce image size.
+ zstd_level = "3" if int(config.params["level"]) == 1 else "10"
+
+ # include some information that is useful in reconstructing this task
+ # from JSON
+ taskdesc = {
+ "label": "build-docker-image-" + image_name,
+ "description": description,
+ "attributes": {
+ "image_name": image_name,
+ "artifact_prefix": "public",
+ },
+ "expires-after": "28 days" if config.params.is_try() else "1 year",
+ "scopes": [],
+ "run-on-projects": [],
+ "worker-type": "images",
+ "worker": {
+ "implementation": "docker-worker",
+ "os": "linux",
+ "artifacts": [
+ {
+ "type": "file",
+ "path": "/workspace/image.tar.zst",
+ "name": "public/image.tar.zst",
+ }
+ ],
+ "env": {
+ "CONTEXT_TASK_ID": {"task-reference": "<decision>"},
+ "CONTEXT_PATH": "public/docker-contexts/{}.tar.gz".format(
+ image_name
+ ),
+ "HASH": context_hash,
+ "PROJECT": config.params["project"],
+ "IMAGE_NAME": image_name,
+ "DOCKER_IMAGE_ZSTD_LEVEL": zstd_level,
+ "DOCKER_BUILD_ARGS": {
+ "task-reference": json.dumps(args),
+ },
+ "VCS_BASE_REPOSITORY": config.params["base_repository"],
+ "VCS_HEAD_REPOSITORY": config.params["head_repository"],
+ "VCS_HEAD_REV": config.params["head_rev"],
+ "VCS_REPOSITORY_TYPE": config.params["repository_type"],
+ },
+ "chain-of-trust": True,
+ "max-run-time": 7200,
+ },
+ }
+ if "index" in task:
+ taskdesc["index"] = task["index"]
+ if job_symbol:
+ taskdesc["treeherder"] = {
+ "symbol": job_symbol,
+ "platform": "taskcluster-images/opt",
+ "kind": "other",
+ "tier": 1,
+ }
+
+ worker = taskdesc["worker"]
+
+ worker["docker-image"] = IMAGE_BUILDER_IMAGE
+ digest_data.append(f"image-builder-image:{IMAGE_BUILDER_IMAGE}")
+
+ if packages:
+ deps = taskdesc.setdefault("dependencies", {})
+ for p in sorted(packages):
+ deps[p] = f"packages-{p}"
+
+ if parent:
+ deps = taskdesc.setdefault("dependencies", {})
+ deps["parent"] = f"build-docker-image-{parent}"
+ worker["env"]["PARENT_TASK_ID"] = {
+ "task-reference": "<parent>",
+ }
+
+ if task.get("cache", True) and not taskgraph.fast:
+ taskdesc["cache"] = {
+ "type": "docker-images.v2",
+ "name": image_name,
+ "digest-data": digest_data,
+ }
+
+ yield taskdesc
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/fetch.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/fetch.py
new file mode 100644
index 0000000000..65d4b62482
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/fetch.py
@@ -0,0 +1,335 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Support for running tasks that download remote content and re-export
+# it as task artifacts.
+
+
+import os
+import re
+
+import attr
+from voluptuous import Extra, Optional, Required
+
+import taskgraph
+
+from ..util import path
+from ..util.cached_tasks import add_optimization
+from ..util.schema import Schema, validate_schema
+from ..util.treeherder import join_symbol
+from .base import TransformSequence
+
+CACHE_TYPE = "content.v1"
+
+FETCH_SCHEMA = Schema(
+ {
+ # Name of the task.
+ Required("name"): str,
+ # Relative path (from config.path) to the file the task was defined
+ # in.
+ Optional("task-from"): str,
+ # Description of the task.
+ Required("description"): str,
+ Optional("docker-image"): object,
+ Optional(
+ "fetch-alias",
+ description="An alias that can be used instead of the real fetch job name in "
+ "fetch stanzas for jobs.",
+ ): str,
+ Optional(
+ "artifact-prefix",
+ description="The prefix of the taskcluster artifact being uploaded. "
+ "Defaults to `public/`; if it starts with something other than "
+ "`public/` the artifact will require scopes to access.",
+ ): str,
+ Optional("attributes"): {str: object},
+ Required("fetch"): {
+ Required("type"): str,
+ Extra: object,
+ },
+ }
+)
+
+
+# define a collection of payload builders, depending on the worker implementation
+fetch_builders = {}
+
+
+@attr.s(frozen=True)
+class FetchBuilder:
+ schema = attr.ib(type=Schema)
+ builder = attr.ib()
+
+
+def fetch_builder(name, schema):
+ schema = Schema({Required("type"): name}).extend(schema)
+
+ def wrap(func):
+ fetch_builders[name] = FetchBuilder(schema, func)
+ return func
+
+ return wrap
+
+
+transforms = TransformSequence()
+transforms.add_validate(FETCH_SCHEMA)
+
+
+@transforms.add
+def process_fetch_job(config, jobs):
+ # Converts fetch-url entries to the job schema.
+ for job in jobs:
+ typ = job["fetch"]["type"]
+ name = job["name"]
+ fetch = job.pop("fetch")
+
+ if typ not in fetch_builders:
+ raise Exception(f"Unknown fetch type {typ} in fetch {name}")
+ validate_schema(fetch_builders[typ].schema, fetch, f"In task.fetch {name!r}:")
+
+ job.update(configure_fetch(config, typ, name, fetch))
+
+ yield job
+
+
+def configure_fetch(config, typ, name, fetch):
+ if typ not in fetch_builders:
+ raise Exception(f"No fetch type {typ} in fetch {name}")
+ validate_schema(fetch_builders[typ].schema, fetch, f"In task.fetch {name!r}:")
+
+ return fetch_builders[typ].builder(config, name, fetch)
+
+
+@transforms.add
+def make_task(config, jobs):
+ # Fetch tasks are idempotent and immutable. Have them live for
+ # essentially forever.
+ if config.params["level"] == "3":
+ expires = "1000 years"
+ else:
+ expires = "28 days"
+
+ for job in jobs:
+ name = job["name"]
+ artifact_prefix = job.get("artifact-prefix", "public")
+ env = job.get("env", {})
+ env.update({"UPLOAD_DIR": "/builds/worker/artifacts"})
+ attributes = job.get("attributes", {})
+ attributes["fetch-artifact"] = path.join(artifact_prefix, job["artifact_name"])
+ alias = job.get("fetch-alias")
+ if alias:
+ attributes["fetch-alias"] = alias
+
+ task = {
+ "attributes": attributes,
+ "name": name,
+ "description": job["description"],
+ "expires-after": expires,
+ "label": "fetch-%s" % name,
+ "run-on-projects": [],
+ "run": {
+ "using": "run-task",
+ "checkout": False,
+ "command": job["command"],
+ },
+ "worker-type": "images",
+ "worker": {
+ "chain-of-trust": True,
+ "docker-image": job.get("docker-image", {"in-tree": "fetch"}),
+ "env": env,
+ "max-run-time": 900,
+ "artifacts": [
+ {
+ "type": "directory",
+ "name": artifact_prefix,
+ "path": "/builds/worker/artifacts",
+ }
+ ],
+ },
+ }
+
+ if "treeherder" in config.graph_config:
+ task["treeherder"] = {
+ "symbol": join_symbol("Fetch", name),
+ "kind": "build",
+ "platform": "fetch/opt",
+ "tier": 1,
+ }
+
+ if job.get("secret", None):
+ task["scopes"] = ["secrets:get:" + job.get("secret")]
+ task["worker"]["taskcluster-proxy"] = True
+
+ if not taskgraph.fast:
+ cache_name = task["label"].replace(f"{config.kind}-", "", 1)
+
+ # This adds the level to the index path automatically.
+ add_optimization(
+ config,
+ task,
+ cache_type=CACHE_TYPE,
+ cache_name=cache_name,
+ digest_data=job["digest_data"],
+ )
+ yield task
+
+
+@fetch_builder(
+ "static-url",
+ schema={
+ # The URL to download.
+ Required("url"): str,
+ # The SHA-256 of the downloaded content.
+ Required("sha256"): str,
+ # Size of the downloaded entity, in bytes.
+ Required("size"): int,
+ # GPG signature verification.
+ Optional("gpg-signature"): {
+ # URL where GPG signature document can be obtained. Can contain the
+ # value ``{url}``, which will be substituted with the value from
+ # ``url``.
+ Required("sig-url"): str,
+ # Path to file containing GPG public key(s) used to validate
+ # download.
+ Required("key-path"): str,
+ },
+ # The name to give to the generated artifact. Defaults to the file
+ # portion of the URL. Using a different extension converts the
+ # archive to the given type. Only conversion to .tar.zst is
+ # supported.
+ Optional("artifact-name"): str,
+ # Strip the given number of path components at the beginning of
+ # each file entry in the archive.
+ # Requires an artifact-name ending with .tar.zst.
+ Optional("strip-components"): int,
+ # Add the given prefix to each file entry in the archive.
+ # Requires an artifact-name ending with .tar.zst.
+ Optional("add-prefix"): str,
+ # Headers to pass alongside the request.
+ Optional("headers"): {
+ str: str,
+ },
+ # IMPORTANT: when adding anything that changes the behavior of the task,
+ # it is important to update the digest data used to compute cache hits.
+ },
+)
+def create_fetch_url_task(config, name, fetch):
+ artifact_name = fetch.get("artifact-name")
+ if not artifact_name:
+ artifact_name = fetch["url"].split("/")[-1]
+
+ command = [
+ "fetch-content",
+ "static-url",
+ ]
+
+ # Arguments that matter to the cache digest
+ args = [
+ "--sha256",
+ fetch["sha256"],
+ "--size",
+ "%d" % fetch["size"],
+ ]
+
+ if fetch.get("strip-components"):
+ args.extend(["--strip-components", "%d" % fetch["strip-components"]])
+
+ if fetch.get("add-prefix"):
+ args.extend(["--add-prefix", fetch["add-prefix"]])
+
+ command.extend(args)
+
+ env = {}
+
+ if "gpg-signature" in fetch:
+ sig_url = fetch["gpg-signature"]["sig-url"].format(url=fetch["url"])
+ key_path = os.path.join(taskgraph.GECKO, fetch["gpg-signature"]["key-path"])
+
+ with open(key_path) as fh:
+ gpg_key = fh.read()
+
+ env["FETCH_GPG_KEY"] = gpg_key
+ command.extend(
+ [
+ "--gpg-sig-url",
+ sig_url,
+ "--gpg-key-env",
+ "FETCH_GPG_KEY",
+ ]
+ )
+
+ if "headers" in fetch:
+ for k, v in fetch["headers"].items():
+ command.extend(["-H", f"{k}:{v}"])
+
+ command.extend(
+ [
+ fetch["url"],
+ "/builds/worker/artifacts/%s" % artifact_name,
+ ]
+ )
+
+ return {
+ "command": command,
+ "artifact_name": artifact_name,
+ "env": env,
+ # We don't include the GPG signature in the digest because it isn't
+ # materially important for caching: GPG signatures are supplemental
+ # trust checking beyond what the shasum already provides.
+ "digest_data": args + [artifact_name],
+ }
+
+
+@fetch_builder(
+ "git",
+ schema={
+ Required("repo"): str,
+ Required("revision"): str,
+ Optional("include-dot-git"): bool,
+ Optional("artifact-name"): str,
+ Optional("path-prefix"): str,
+ # ssh-key is a taskcluster secret path (e.g. project/civet/github-deploy-key)
+ # In the secret dictionary, the key should be specified as
+ # "ssh_privkey": "-----BEGIN OPENSSH PRIVATE KEY-----\nkfksnb3jc..."
+ # n.b. The OpenSSH private key file format requires a newline at the end of the file.
+ Optional("ssh-key"): str,
+ },
+)
+def create_git_fetch_task(config, name, fetch):
+ path_prefix = fetch.get("path-prefix")
+ if not path_prefix:
+ path_prefix = fetch["repo"].rstrip("/").rsplit("/", 1)[-1]
+ artifact_name = fetch.get("artifact-name")
+ if not artifact_name:
+ artifact_name = f"{path_prefix}.tar.zst"
+
+ if not re.match(r"[0-9a-fA-F]{40}", fetch["revision"]):
+ raise Exception(f'Revision is not a sha1 in fetch task "{name}"')
+
+ args = [
+ "fetch-content",
+ "git-checkout-archive",
+ "--path-prefix",
+ path_prefix,
+ fetch["repo"],
+ fetch["revision"],
+ "/builds/worker/artifacts/%s" % artifact_name,
+ ]
+
+ ssh_key = fetch.get("ssh-key")
+ if ssh_key:
+ args.append("--ssh-key-secret")
+ args.append(ssh_key)
+
+ digest_data = [fetch["revision"], path_prefix, artifact_name]
+ if fetch.get("include-dot-git", False):
+ args.append("--include-dot-git")
+ digest_data.append(".git")
+
+ return {
+ "command": args,
+ "artifact_name": artifact_name,
+ "digest_data": digest_data,
+ "secret": ssh_key,
+ }
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/__init__.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/__init__.py
new file mode 100644
index 0000000000..cc2615b702
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/__init__.py
@@ -0,0 +1,438 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Convert a job description into a task description.
+
+Jobs descriptions are similar to task descriptions, but they specify how to run
+the job at a higher level, using a "run" field that can be interpreted by
+run-using handlers in `taskcluster/taskgraph/transforms/job`.
+"""
+
+
+import copy
+import json
+import logging
+
+from voluptuous import Any, Exclusive, Extra, Optional, Required
+
+from taskgraph.transforms.base import TransformSequence
+from taskgraph.transforms.cached_tasks import order_tasks
+from taskgraph.transforms.task import task_description_schema
+from taskgraph.util import path as mozpath
+from taskgraph.util.python_path import import_sibling_modules
+from taskgraph.util.schema import Schema, validate_schema
+from taskgraph.util.taskcluster import get_artifact_prefix
+from taskgraph.util.workertypes import worker_type_implementation
+
+logger = logging.getLogger(__name__)
+
+# Schema for a build description
+job_description_schema = Schema(
+ {
+ # The name of the job and the job's label. At least one must be specified,
+ # and the label will be generated from the name if necessary, by prepending
+ # the kind.
+ Optional("name"): str,
+ Optional("label"): str,
+ # the following fields are passed directly through to the task description,
+ # possibly modified by the run implementation. See
+ # taskcluster/taskgraph/transforms/task.py for the schema details.
+ Required("description"): task_description_schema["description"],
+ Optional("attributes"): task_description_schema["attributes"],
+ Optional("task-from"): task_description_schema["task-from"],
+ Optional("dependencies"): task_description_schema["dependencies"],
+ Optional("soft-dependencies"): task_description_schema["soft-dependencies"],
+ Optional("if-dependencies"): task_description_schema["if-dependencies"],
+ Optional("requires"): task_description_schema["requires"],
+ Optional("expires-after"): task_description_schema["expires-after"],
+ Optional("routes"): task_description_schema["routes"],
+ Optional("scopes"): task_description_schema["scopes"],
+ Optional("tags"): task_description_schema["tags"],
+ Optional("extra"): task_description_schema["extra"],
+ Optional("treeherder"): task_description_schema["treeherder"],
+ Optional("index"): task_description_schema["index"],
+ Optional("run-on-projects"): task_description_schema["run-on-projects"],
+ Optional("run-on-tasks-for"): task_description_schema["run-on-tasks-for"],
+ Optional("run-on-git-branches"): task_description_schema["run-on-git-branches"],
+ Optional("always-target"): task_description_schema["always-target"],
+ Exclusive("optimization", "optimization"): task_description_schema[
+ "optimization"
+ ],
+ Optional("needs-sccache"): task_description_schema["needs-sccache"],
+ # The "when" section contains descriptions of the circumstances under which
+ # this task should be included in the task graph. This will be converted
+ # into an optimization, so it cannot be specified in a job description that
+ # also gives 'optimization'.
+ Exclusive("when", "optimization"): {
+ # This task only needs to be run if a file matching one of the given
+ # patterns has changed in the push. The patterns use the mozpack
+ # match function (python/mozbuild/mozpack/path.py).
+ Optional("files-changed"): [str],
+ },
+ # A list of artifacts to install from 'fetch' tasks.
+ Optional("fetches"): {
+ Any("toolchain", "fetch"): [str],
+ str: [
+ str,
+ {
+ Required("artifact"): str,
+ Optional("dest"): str,
+ Optional("extract"): bool,
+ Optional("verify-hash"): bool,
+ },
+ ],
+ },
+ # A description of how to run this job.
+ "run": {
+ # The key to a job implementation in a peer module to this one
+ "using": str,
+ # Base work directory used to set up the task.
+ Optional("workdir"): str,
+ # Any remaining content is verified against that job implementation's
+ # own schema.
+ Extra: object,
+ },
+ Required("worker-type"): task_description_schema["worker-type"],
+ # This object will be passed through to the task description, with additions
+ # provided by the job's run-using function
+ Optional("worker"): dict,
+ }
+)
+
+transforms = TransformSequence()
+transforms.add_validate(job_description_schema)
+
+
+@transforms.add
+def rewrite_when_to_optimization(config, jobs):
+ for job in jobs:
+ when = job.pop("when", {})
+ if not when:
+ yield job
+ continue
+
+ files_changed = when.get("files-changed")
+
+ # implicitly add task config directory.
+ files_changed.append(f"{config.path}/**")
+
+ # "only when files changed" implies "skip if files have not changed"
+ job["optimization"] = {"skip-unless-changed": files_changed}
+
+ assert "when" not in job
+ yield job
+
+
+@transforms.add
+def set_implementation(config, jobs):
+ for job in jobs:
+ impl, os = worker_type_implementation(config.graph_config, job["worker-type"])
+ if os:
+ job.setdefault("tags", {})["os"] = os
+ if impl:
+ job.setdefault("tags", {})["worker-implementation"] = impl
+ worker = job.setdefault("worker", {})
+ assert "implementation" not in worker
+ worker["implementation"] = impl
+ if os:
+ worker["os"] = os
+ yield job
+
+
+@transforms.add
+def set_label(config, jobs):
+ for job in jobs:
+ if "label" not in job:
+ if "name" not in job:
+ raise Exception("job has neither a name nor a label")
+ job["label"] = "{}-{}".format(config.kind, job["name"])
+ if job.get("name"):
+ del job["name"]
+ yield job
+
+
+@transforms.add
+def add_resource_monitor(config, jobs):
+ for job in jobs:
+ if job.get("attributes", {}).get("resource-monitor"):
+ worker_implementation, worker_os = worker_type_implementation(
+ config.graph_config, job["worker-type"]
+ )
+ # Normalise worker os so that linux-bitbar and similar use linux tools.
+ worker_os = worker_os.split("-")[0]
+ if "win7" in job["worker-type"]:
+ arch = "32"
+ else:
+ arch = "64"
+ job.setdefault("fetches", {})
+ job["fetches"].setdefault("toolchain", [])
+ job["fetches"]["toolchain"].append(f"{worker_os}{arch}-resource-monitor")
+
+ if worker_implementation == "docker-worker":
+ artifact_source = "/builds/worker/monitoring/resource-monitor.json"
+ else:
+ artifact_source = "monitoring/resource-monitor.json"
+ job["worker"].setdefault("artifacts", [])
+ job["worker"]["artifacts"].append(
+ {
+ "name": "public/monitoring/resource-monitor.json",
+ "type": "file",
+ "path": artifact_source,
+ }
+ )
+ # Set env for output file
+ job["worker"].setdefault("env", {})
+ job["worker"]["env"]["RESOURCE_MONITOR_OUTPUT"] = artifact_source
+
+ yield job
+
+
+def get_attribute(dict, key, attributes, attribute_name):
+ """Get `attribute_name` from the given `attributes` dict, and if there
+ is a corresponding value, set `key` in `dict` to that value."""
+ value = attributes.get(attribute_name)
+ if value:
+ dict[key] = value
+
+
+@transforms.add
+def use_fetches(config, jobs):
+ artifact_names = {}
+ aliases = {}
+ extra_env = {}
+
+ if config.kind in ("toolchain", "fetch"):
+ jobs = list(jobs)
+ for job in jobs:
+ run = job.get("run", {})
+ label = job["label"]
+ get_attribute(artifact_names, label, run, "toolchain-artifact")
+ value = run.get(f"{config.kind}-alias")
+ if value:
+ aliases[f"{config.kind}-{value}"] = label
+
+ for task in config.kind_dependencies_tasks.values():
+ if task.kind in ("fetch", "toolchain"):
+ get_attribute(
+ artifact_names,
+ task.label,
+ task.attributes,
+ f"{task.kind}-artifact",
+ )
+ get_attribute(extra_env, task.label, task.attributes, f"{task.kind}-env")
+ value = task.attributes.get(f"{task.kind}-alias")
+ if value:
+ aliases[f"{task.kind}-{value}"] = task.label
+
+ artifact_prefixes = {}
+ for job in order_tasks(config, jobs):
+ artifact_prefixes[job["label"]] = get_artifact_prefix(job)
+
+ fetches = job.pop("fetches", None)
+ if not fetches:
+ yield job
+ continue
+
+ job_fetches = []
+ name = job.get("name", job.get("label"))
+ dependencies = job.setdefault("dependencies", {})
+ worker = job.setdefault("worker", {})
+ env = worker.setdefault("env", {})
+ prefix = get_artifact_prefix(job)
+ for kind, artifacts in fetches.items():
+ if kind in ("fetch", "toolchain"):
+ for fetch_name in artifacts:
+ label = f"{kind}-{fetch_name}"
+ label = aliases.get(label, label)
+ if label not in artifact_names:
+ raise Exception(
+ "Missing fetch job for {kind}-{name}: {fetch}".format(
+ kind=config.kind, name=name, fetch=fetch_name
+ )
+ )
+ if label in extra_env:
+ env.update(extra_env[label])
+
+ path = artifact_names[label]
+
+ dependencies[label] = label
+ job_fetches.append(
+ {
+ "artifact": path,
+ "task": f"<{label}>",
+ "extract": True,
+ }
+ )
+ else:
+ if kind not in dependencies:
+ raise Exception(
+ "{name} can't fetch {kind} artifacts because "
+ "it has no {kind} dependencies!".format(name=name, kind=kind)
+ )
+ dep_label = dependencies[kind]
+ if dep_label in artifact_prefixes:
+ prefix = artifact_prefixes[dep_label]
+ else:
+ dep_tasks = [
+ task
+ for label, task in config.kind_dependencies_tasks.items()
+ if label == dep_label
+ ]
+ if len(dep_tasks) != 1:
+ raise Exception(
+ "{name} can't fetch {kind} artifacts because "
+ "there are {tasks} with label {label} in kind dependencies!".format(
+ name=name,
+ kind=kind,
+ label=dependencies[kind],
+ tasks="no tasks"
+ if len(dep_tasks) == 0
+ else "multiple tasks",
+ )
+ )
+
+ prefix = get_artifact_prefix(dep_tasks[0])
+
+ for artifact in artifacts:
+ if isinstance(artifact, str):
+ path = artifact
+ dest = None
+ extract = True
+ verify_hash = False
+ else:
+ path = artifact["artifact"]
+ dest = artifact.get("dest")
+ extract = artifact.get("extract", True)
+ verify_hash = artifact.get("verify-hash", False)
+
+ fetch = {
+ "artifact": f"{prefix}/{path}",
+ "task": f"<{kind}>",
+ "extract": extract,
+ }
+ if dest is not None:
+ fetch["dest"] = dest
+ if verify_hash:
+ fetch["verify-hash"] = verify_hash
+ job_fetches.append(fetch)
+
+ job_artifact_prefixes = {
+ mozpath.dirname(fetch["artifact"])
+ for fetch in job_fetches
+ if not fetch["artifact"].startswith("public/")
+ }
+ if job_artifact_prefixes:
+ # Use taskcluster-proxy and request appropriate scope. For example, add
+ # 'scopes: [queue:get-artifact:path/to/*]' for 'path/to/artifact.tar.xz'.
+ worker["taskcluster-proxy"] = True
+ for prefix in sorted(job_artifact_prefixes):
+ scope = f"queue:get-artifact:{prefix}/*"
+ if scope not in job.setdefault("scopes", []):
+ job["scopes"].append(scope)
+
+ env["MOZ_FETCHES"] = {"task-reference": json.dumps(job_fetches, sort_keys=True)}
+
+ env.setdefault("MOZ_FETCHES_DIR", "fetches")
+
+ yield job
+
+
+@transforms.add
+def make_task_description(config, jobs):
+ """Given a build description, create a task description"""
+ # import plugin modules first, before iterating over jobs
+ import_sibling_modules(exceptions=("common.py",))
+
+ for job in jobs:
+ # always-optimized tasks never execute, so have no workdir
+ if job["worker"]["implementation"] in ("docker-worker", "generic-worker"):
+ job["run"].setdefault("workdir", "/builds/worker")
+
+ taskdesc = copy.deepcopy(job)
+
+ # fill in some empty defaults to make run implementations easier
+ taskdesc.setdefault("attributes", {})
+ taskdesc.setdefault("dependencies", {})
+ taskdesc.setdefault("soft-dependencies", [])
+ taskdesc.setdefault("routes", [])
+ taskdesc.setdefault("scopes", [])
+ taskdesc.setdefault("extra", {})
+
+ # give the function for job.run.using on this worker implementation a
+ # chance to set up the task description.
+ configure_taskdesc_for_run(
+ config, job, taskdesc, job["worker"]["implementation"]
+ )
+ del taskdesc["run"]
+
+ # yield only the task description, discarding the job description
+ yield taskdesc
+
+
+# A registry of all functions decorated with run_job_using
+registry = {}
+
+
+def run_job_using(worker_implementation, run_using, schema=None, defaults={}):
+ """Register the decorated function as able to set up a task description for
+ jobs with the given worker implementation and `run.using` property. If
+ `schema` is given, the job's run field will be verified to match it.
+
+ The decorated function should have the signature `using_foo(config, job, taskdesc)`
+ and should modify the task description in-place. The skeleton of
+ the task description is already set up, but without a payload."""
+
+ def wrap(func):
+ for_run_using = registry.setdefault(run_using, {})
+ if worker_implementation in for_run_using:
+ raise Exception(
+ "run_job_using({!r}, {!r}) already exists: {!r}".format(
+ run_using, worker_implementation, for_run_using[run_using]
+ )
+ )
+ for_run_using[worker_implementation] = (func, schema, defaults)
+ return func
+
+ return wrap
+
+
+@run_job_using(
+ "always-optimized", "always-optimized", Schema({"using": "always-optimized"})
+)
+def always_optimized(config, job, taskdesc):
+ pass
+
+
+def configure_taskdesc_for_run(config, job, taskdesc, worker_implementation):
+ """
+ Run the appropriate function for this job against the given task
+ description.
+
+ This will raise an appropriate error if no function exists, or if the job's
+ run is not valid according to the schema.
+ """
+ run_using = job["run"]["using"]
+ if run_using not in registry:
+ raise Exception(f"no functions for run.using {run_using!r}")
+
+ if worker_implementation not in registry[run_using]:
+ raise Exception(
+ "no functions for run.using {!r} on {!r}".format(
+ run_using, worker_implementation
+ )
+ )
+
+ func, schema, defaults = registry[run_using][worker_implementation]
+ for k, v in defaults.items():
+ job["run"].setdefault(k, v)
+
+ if schema:
+ validate_schema(
+ schema,
+ job["run"],
+ "In job.run using {!r}/{!r} for job {!r}:".format(
+ job["run"]["using"], worker_implementation, job["label"]
+ ),
+ )
+ func(config, job, taskdesc)
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/common.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/common.py
new file mode 100644
index 0000000000..1660d0856a
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/common.py
@@ -0,0 +1,196 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Common support for various job types. These functions are all named after the
+worker implementation they operate on, and take the same three parameters, for
+consistency.
+"""
+
+
+import hashlib
+import json
+
+from taskgraph.util.taskcluster import get_artifact_prefix
+
+
+def get_vcsdir_name(os):
+ if os == "windows":
+ return "src"
+ else:
+ return "vcs"
+
+
+def add_cache(job, taskdesc, name, mount_point, skip_untrusted=False):
+ """Adds a cache based on the worker's implementation.
+
+ Args:
+ job (dict): Task's job description.
+ taskdesc (dict): Target task description to modify.
+ name (str): Name of the cache.
+ mount_point (path): Path on the host to mount the cache.
+ skip_untrusted (bool): Whether cache is used in untrusted environments
+ (default: False). Only applies to docker-worker.
+ """
+ if not job["run"].get("use-caches", True):
+ return
+
+ worker = job["worker"]
+
+ if worker["implementation"] == "docker-worker":
+ taskdesc["worker"].setdefault("caches", []).append(
+ {
+ "type": "persistent",
+ "name": name,
+ "mount-point": mount_point,
+ "skip-untrusted": skip_untrusted,
+ }
+ )
+
+ elif worker["implementation"] == "generic-worker":
+ taskdesc["worker"].setdefault("mounts", []).append(
+ {
+ "cache-name": name,
+ "directory": mount_point,
+ }
+ )
+
+ else:
+ # Caches not implemented
+ pass
+
+
+def docker_worker_add_workspace_cache(config, job, taskdesc, extra=None):
+ """Add the workspace cache.
+
+ Args:
+ config (TransformConfig): Transform configuration object.
+ job (dict): Task's job description.
+ taskdesc (dict): Target task description to modify.
+ extra (str): Optional context passed in that supports extending the cache
+ key name to avoid undesired conflicts with other caches.
+ """
+ cache_name = "{}-build-{}-{}-workspace".format(
+ config.params["project"],
+ taskdesc["attributes"]["build_platform"],
+ taskdesc["attributes"]["build_type"],
+ )
+ if extra:
+ cache_name = f"{cache_name}-{extra}"
+
+ mount_point = "{workdir}/workspace".format(**job["run"])
+
+ # Don't enable the workspace cache when we can't guarantee its
+ # behavior, like on Try.
+ add_cache(job, taskdesc, cache_name, mount_point, skip_untrusted=True)
+
+
+def add_artifacts(config, job, taskdesc, path):
+ taskdesc["worker"].setdefault("artifacts", []).append(
+ {
+ "name": get_artifact_prefix(taskdesc),
+ "path": path,
+ "type": "directory",
+ }
+ )
+
+
+def docker_worker_add_artifacts(config, job, taskdesc):
+ """Adds an artifact directory to the task"""
+ path = "{workdir}/artifacts/".format(**job["run"])
+ taskdesc["worker"]["env"]["UPLOAD_DIR"] = path
+ add_artifacts(config, job, taskdesc, path)
+
+
+def generic_worker_add_artifacts(config, job, taskdesc):
+ """Adds an artifact directory to the task"""
+ # The path is the location on disk; it doesn't necessarily
+ # mean the artifacts will be public or private; that is set via the name
+ # attribute in add_artifacts.
+ add_artifacts(config, job, taskdesc, path=get_artifact_prefix(taskdesc))
+
+
+def support_vcs_checkout(config, job, taskdesc, repo_configs, sparse=False):
+ """Update a job/task with parameters to enable a VCS checkout.
+
+ This can only be used with ``run-task`` tasks, as the cache name is
+ reserved for ``run-task`` tasks.
+ """
+ worker = job["worker"]
+ is_mac = worker["os"] == "macosx"
+ is_win = worker["os"] == "windows"
+ is_linux = worker["os"] == "linux"
+ is_docker = worker["implementation"] == "docker-worker"
+ assert is_mac or is_win or is_linux
+
+ if is_win:
+ checkoutdir = "./build"
+ hgstore = "y:/hg-shared"
+ elif is_docker:
+ checkoutdir = "{workdir}/checkouts".format(**job["run"])
+ hgstore = f"{checkoutdir}/hg-store"
+ else:
+ checkoutdir = "./checkouts"
+ hgstore = f"{checkoutdir}/hg-shared"
+
+ vcsdir = checkoutdir + "/" + get_vcsdir_name(worker["os"])
+ cache_name = "checkouts"
+
+ # Robust checkout does not clean up subrepositories, so ensure that tasks
+ # that checkout different sets of paths have separate caches.
+ # See https://bugzilla.mozilla.org/show_bug.cgi?id=1631610
+ if len(repo_configs) > 1:
+ checkout_paths = {
+ "\t".join([repo_config.path, repo_config.prefix])
+ for repo_config in sorted(
+ repo_configs.values(), key=lambda repo_config: repo_config.path
+ )
+ }
+ checkout_paths_str = "\n".join(checkout_paths).encode("utf-8")
+ digest = hashlib.sha256(checkout_paths_str).hexdigest()
+ cache_name += f"-repos-{digest}"
+
+ # Sparse checkouts need their own cache because they can interfere
+ # with clients that aren't sparse aware.
+ if sparse:
+ cache_name += "-sparse"
+
+ # Workers using Mercurial >= 5.8 will enable revlog-compression-zstd, which
+ # workers using older versions can't understand, so they can't share cache.
+ # At the moment, only docker workers use the newer version.
+ if is_docker:
+ cache_name += "-hg58"
+
+ add_cache(job, taskdesc, cache_name, checkoutdir)
+
+ env = taskdesc["worker"].setdefault("env", {})
+ env.update(
+ {
+ "HG_STORE_PATH": hgstore,
+ "REPOSITORIES": json.dumps(
+ {repo.prefix: repo.name for repo in repo_configs.values()}
+ ),
+ "VCS_PATH": vcsdir,
+ }
+ )
+ for repo_config in repo_configs.values():
+ env.update(
+ {
+ f"{repo_config.prefix.upper()}_{key}": value
+ for key, value in {
+ "BASE_REPOSITORY": repo_config.base_repository,
+ "HEAD_REPOSITORY": repo_config.head_repository,
+ "HEAD_REV": repo_config.head_rev,
+ "HEAD_REF": repo_config.head_ref,
+ "REPOSITORY_TYPE": repo_config.type,
+ "SSH_SECRET_NAME": repo_config.ssh_secret_name,
+ }.items()
+ if value is not None
+ }
+ )
+ if repo_config.ssh_secret_name:
+ taskdesc["scopes"].append(f"secrets:get:{repo_config.ssh_secret_name}")
+
+ # only some worker platforms have taskcluster-proxy enabled
+ if job["worker"]["implementation"] in ("docker-worker",):
+ taskdesc["worker"]["taskcluster-proxy"] = True
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/index_search.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/index_search.py
new file mode 100644
index 0000000000..09b48fe594
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/index_search.py
@@ -0,0 +1,37 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+This transform allows including indexed tasks from other projects in the
+current taskgraph. The transform takes a list of indexes, and the optimization
+phase will replace the task with the task from the other graph.
+"""
+
+
+from voluptuous import Required
+
+from taskgraph.transforms.base import TransformSequence
+from taskgraph.transforms.job import run_job_using
+from taskgraph.util.schema import Schema
+
+transforms = TransformSequence()
+
+run_task_schema = Schema(
+ {
+ Required("using"): "index-search",
+ Required(
+ "index-search",
+ "A list of indexes in decreasing order of priority at which to lookup for this "
+ "task. This is interpolated with the graph parameters.",
+ ): [str],
+ }
+)
+
+
+@run_job_using("always-optimized", "index-search", schema=run_task_schema)
+def fill_template(config, job, taskdesc):
+ run = job["run"]
+ taskdesc["optimization"] = {
+ "index-search": [index.format(**config.params) for index in run["index-search"]]
+ }
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/run_task.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/run_task.py
new file mode 100644
index 0000000000..a44f30d5bd
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/run_task.py
@@ -0,0 +1,240 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Support for running jobs that are invoked via the `run-task` script.
+"""
+
+
+import os
+
+import attr
+from voluptuous import Any, Optional, Required
+
+from taskgraph.transforms.job import run_job_using
+from taskgraph.transforms.job.common import support_vcs_checkout
+from taskgraph.transforms.task import taskref_or_string
+from taskgraph.util import path, taskcluster
+from taskgraph.util.schema import Schema
+
+EXEC_COMMANDS = {
+ "bash": ["bash", "-cx"],
+ "powershell": ["powershell.exe", "-ExecutionPolicy", "Bypass"],
+}
+
+run_task_schema = Schema(
+ {
+ Required("using"): "run-task",
+ # if true, add a cache at ~worker/.cache, which is where things like pip
+ # tend to hide their caches. This cache is never added for level-1 jobs.
+ # TODO Once bug 1526028 is fixed, this and 'use-caches' should be merged.
+ Required("cache-dotcache"): bool,
+ # Whether or not to use caches.
+ Optional("use-caches"): bool,
+ # if true (the default), perform a checkout on the worker
+ Required("checkout"): Any(bool, {str: dict}),
+ Optional(
+ "cwd",
+ description="Path to run command in. If a checkout is present, the path "
+ "to the checkout will be interpolated with the key `checkout`",
+ ): str,
+ # The sparse checkout profile to use. Value is the filename relative to the
+ # directory where sparse profiles are defined (build/sparse-profiles/).
+ Required("sparse-profile"): Any(str, None),
+ # The command arguments to pass to the `run-task` script, after the
+ # checkout arguments. If a list, it will be passed directly; otherwise
+ # it will be included in a single argument to the command specified by
+ # `exec-with`.
+ Required("command"): Any([taskref_or_string], taskref_or_string),
+ # Context to substitute into the command using format string
+ # substitution (e.g {value}). This is useful if certain aspects of the
+ # command need to be generated in transforms.
+ Optional("command-context"): dict,
+ # What to execute the command with in the event command is a string.
+ Optional("exec-with"): Any(*list(EXEC_COMMANDS)),
+ # Base work directory used to set up the task.
+ Required("workdir"): str,
+ # Whether to run as root. (defaults to False)
+ Optional("run-as-root"): bool,
+ }
+)
+
+
+def common_setup(config, job, taskdesc, command):
+ run = job["run"]
+ if run["checkout"]:
+ repo_configs = config.repo_configs
+ if len(repo_configs) > 1 and run["checkout"] is True:
+ raise Exception("Must explicitly specify checkouts with multiple repos.")
+ elif run["checkout"] is not True:
+ repo_configs = {
+ repo: attr.evolve(repo_configs[repo], **config)
+ for (repo, config) in run["checkout"].items()
+ }
+
+ support_vcs_checkout(
+ config,
+ job,
+ taskdesc,
+ repo_configs=repo_configs,
+ sparse=bool(run["sparse-profile"]),
+ )
+
+ vcs_path = taskdesc["worker"]["env"]["VCS_PATH"]
+ for repo_config in repo_configs.values():
+ checkout_path = path.join(vcs_path, repo_config.path)
+ command.append(f"--{repo_config.prefix}-checkout={checkout_path}")
+
+ if run["sparse-profile"]:
+ command.append(
+ "--{}-sparse-profile=build/sparse-profiles/{}".format(
+ repo_config.prefix,
+ run["sparse-profile"],
+ )
+ )
+
+ if "cwd" in run:
+ run["cwd"] = path.normpath(run["cwd"].format(checkout=vcs_path))
+ elif "cwd" in run and "{checkout}" in run["cwd"]:
+ raise Exception(
+ "Found `{{checkout}}` interpolation in `cwd` for task {name} "
+ "but the task doesn't have a checkout: {cwd}".format(
+ cwd=run["cwd"], name=job.get("name", job.get("label"))
+ )
+ )
+
+ if "cwd" in run:
+ command.extend(("--task-cwd", run["cwd"]))
+
+ taskdesc["worker"].setdefault("env", {})["MOZ_SCM_LEVEL"] = config.params["level"]
+
+
+worker_defaults = {
+ "cache-dotcache": False,
+ "checkout": True,
+ "sparse-profile": None,
+ "run-as-root": False,
+}
+
+
+def script_url(config, script):
+ if "MOZ_AUTOMATION" in os.environ and "TASK_ID" not in os.environ:
+ raise Exception("TASK_ID must be defined to use run-task on generic-worker")
+ task_id = os.environ.get("TASK_ID", "<TASK_ID>")
+ # use_proxy = False to avoid having all generic-workers turn on proxy
+ # Assumes the cluster allows anonymous downloads of public artifacts
+ tc_url = taskcluster.get_root_url(False)
+ # TODO: Use util/taskcluster.py:get_artifact_url once hack for Bug 1405889 is removed
+ return f"{tc_url}/api/queue/v1/task/{task_id}/artifacts/public/{script}"
+
+
+@run_job_using(
+ "docker-worker", "run-task", schema=run_task_schema, defaults=worker_defaults
+)
+def docker_worker_run_task(config, job, taskdesc):
+ run = job["run"]
+ worker = taskdesc["worker"] = job["worker"]
+ command = ["/usr/local/bin/run-task"]
+ common_setup(config, job, taskdesc, command)
+
+ if run.get("cache-dotcache"):
+ worker["caches"].append(
+ {
+ "type": "persistent",
+ "name": "{project}-dotcache".format(**config.params),
+ "mount-point": "{workdir}/.cache".format(**run),
+ "skip-untrusted": True,
+ }
+ )
+
+ run_command = run["command"]
+
+ command_context = run.get("command-context")
+ if command_context:
+ run_command = run_command.format(**command_context)
+
+ # dict is for the case of `{'task-reference': str}`.
+ if isinstance(run_command, str) or isinstance(run_command, dict):
+ exec_cmd = EXEC_COMMANDS[run.pop("exec-with", "bash")]
+ run_command = exec_cmd + [run_command]
+ if run["run-as-root"]:
+ command.extend(("--user", "root", "--group", "root"))
+ command.append("--")
+ command.extend(run_command)
+ worker["command"] = command
+
+
+@run_job_using(
+ "generic-worker", "run-task", schema=run_task_schema, defaults=worker_defaults
+)
+def generic_worker_run_task(config, job, taskdesc):
+ run = job["run"]
+ worker = taskdesc["worker"] = job["worker"]
+ is_win = worker["os"] == "windows"
+ is_mac = worker["os"] == "macosx"
+ is_bitbar = worker["os"] == "linux-bitbar"
+
+ if is_win:
+ command = ["C:/mozilla-build/python3/python3.exe", "run-task"]
+ elif is_mac:
+ command = ["/tools/python36/bin/python3", "run-task"]
+ else:
+ command = ["./run-task"]
+
+ common_setup(config, job, taskdesc, command)
+
+ worker.setdefault("mounts", [])
+ if run.get("cache-dotcache"):
+ worker["mounts"].append(
+ {
+ "cache-name": "{project}-dotcache".format(**config.params),
+ "directory": "{workdir}/.cache".format(**run),
+ }
+ )
+ worker["mounts"].append(
+ {
+ "content": {
+ "url": script_url(config, "run-task"),
+ },
+ "file": "./run-task",
+ }
+ )
+ if worker.get("env", {}).get("MOZ_FETCHES"):
+ worker["mounts"].append(
+ {
+ "content": {
+ "url": script_url(config, "fetch-content"),
+ },
+ "file": "./fetch-content",
+ }
+ )
+
+ run_command = run["command"]
+
+ if isinstance(run_command, str):
+ if is_win:
+ run_command = f'"{run_command}"'
+ exec_cmd = EXEC_COMMANDS[run.pop("exec-with", "bash")]
+ run_command = exec_cmd + [run_command]
+
+ command_context = run.get("command-context")
+ if command_context:
+ for i in range(len(run_command)):
+ run_command[i] = run_command[i].format(**command_context)
+
+ if run["run-as-root"]:
+ command.extend(("--user", "root", "--group", "root"))
+ command.append("--")
+ if is_bitbar:
+ # Use the bitbar wrapper script which sets up the device and adb
+ # environment variables
+ command.append("/builds/taskcluster/script.py")
+ command.extend(run_command)
+
+ if is_win:
+ worker["command"] = [" ".join(command)]
+ else:
+ worker["command"] = [
+ ["chmod", "+x", "run-task"],
+ command,
+ ]
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/toolchain.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/toolchain.py
new file mode 100644
index 0000000000..5d4ee02f4a
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/job/toolchain.py
@@ -0,0 +1,174 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Support for running toolchain-building jobs via dedicated scripts
+"""
+
+from voluptuous import Any, Optional, Required
+
+import taskgraph
+from taskgraph.transforms.job import configure_taskdesc_for_run, run_job_using
+from taskgraph.transforms.job.common import (
+ docker_worker_add_artifacts,
+ generic_worker_add_artifacts,
+ get_vcsdir_name,
+)
+from taskgraph.util.hash import hash_paths
+from taskgraph.util.schema import Schema
+from taskgraph.util.shell import quote as shell_quote
+
+CACHE_TYPE = "toolchains.v3"
+
+toolchain_run_schema = Schema(
+ {
+ Required("using"): "toolchain-script",
+ # The script (in taskcluster/scripts/misc) to run.
+ Required("script"): str,
+ # Arguments to pass to the script.
+ Optional("arguments"): [str],
+ # Sparse profile to give to checkout using `run-task`. If given,
+ # a filename in `build/sparse-profiles`. Defaults to
+ # "toolchain-build", i.e., to
+ # `build/sparse-profiles/toolchain-build`. If `None`, instructs
+ # `run-task` to not use a sparse profile at all.
+ Required("sparse-profile"): Any(str, None),
+ # Paths/patterns pointing to files that influence the outcome of a
+ # toolchain build.
+ Optional("resources"): [str],
+ # Path to the artifact produced by the toolchain job
+ Required("toolchain-artifact"): str,
+ Optional(
+ "toolchain-alias",
+ description="An alias that can be used instead of the real toolchain job name in "
+ "fetch stanzas for jobs.",
+ ): Any(str, [str]),
+ Optional(
+ "toolchain-env",
+ description="Additional env variables to add to the worker when using this toolchain",
+ ): {str: object},
+ # Base work directory used to set up the task.
+ Required("workdir"): str,
+ }
+)
+
+
+def get_digest_data(config, run, taskdesc):
+ files = list(run.pop("resources", []))
+ # The script
+ files.append("taskcluster/scripts/toolchain/{}".format(run["script"]))
+
+ # Accumulate dependency hashes for index generation.
+ data = [hash_paths(config.graph_config.vcs_root, files)]
+
+ data.append(taskdesc["attributes"]["toolchain-artifact"])
+
+ # If the task uses an in-tree docker image, we want it to influence
+ # the index path as well. Ideally, the content of the docker image itself
+ # should have an influence, but at the moment, we can't get that
+ # information here. So use the docker image name as a proxy. Not a lot of
+ # changes to docker images actually have an impact on the resulting
+ # toolchain artifact, so we'll just rely on such important changes to be
+ # accompanied with a docker image name change.
+ image = taskdesc["worker"].get("docker-image", {}).get("in-tree")
+ if image:
+ data.append(image)
+
+ # Likewise script arguments should influence the index.
+ args = run.get("arguments")
+ if args:
+ data.extend(args)
+ return data
+
+
+def common_toolchain(config, job, taskdesc, is_docker):
+ run = job["run"]
+
+ worker = taskdesc["worker"] = job["worker"]
+ worker["chain-of-trust"] = True
+
+ srcdir = get_vcsdir_name(worker["os"])
+
+ if is_docker:
+ # If the task doesn't have a docker-image, set a default
+ worker.setdefault("docker-image", {"in-tree": "toolchain-build"})
+
+ # Allow the job to specify where artifacts come from, but add
+ # public/build if it's not there already.
+ artifacts = worker.setdefault("artifacts", [])
+ if not any(artifact.get("name") == "public/build" for artifact in artifacts):
+ if is_docker:
+ docker_worker_add_artifacts(config, job, taskdesc)
+ else:
+ generic_worker_add_artifacts(config, job, taskdesc)
+
+ env = worker["env"]
+ env.update(
+ {
+ "MOZ_BUILD_DATE": config.params["moz_build_date"],
+ "MOZ_SCM_LEVEL": config.params["level"],
+ }
+ )
+
+ attributes = taskdesc.setdefault("attributes", {})
+ attributes["toolchain-artifact"] = run.pop("toolchain-artifact")
+ if "toolchain-alias" in run:
+ attributes["toolchain-alias"] = run.pop("toolchain-alias")
+ if "toolchain-env" in run:
+ attributes["toolchain-env"] = run.pop("toolchain-env")
+
+ if not taskgraph.fast:
+ name = taskdesc["label"].replace(f"{config.kind}-", "", 1)
+ taskdesc["cache"] = {
+ "type": CACHE_TYPE,
+ "name": name,
+ "digest-data": get_digest_data(config, run, taskdesc),
+ }
+
+ script = run.pop("script")
+ run["using"] = "run-task"
+ run["cwd"] = "{checkout}/.."
+
+ if script.endswith(".ps1"):
+ run["exec-with"] = "powershell"
+
+ command = [f"{srcdir}/taskcluster/scripts/toolchain/{script}"] + run.pop(
+ "arguments", []
+ )
+
+ if not is_docker:
+ # Don't quote the first item in the command because it purposely contains
+ # an environment variable that is not meant to be quoted.
+ if len(command) > 1:
+ command = command[0] + " " + shell_quote(*command[1:])
+ else:
+ command = command[0]
+
+ run["command"] = command
+
+ configure_taskdesc_for_run(config, job, taskdesc, worker["implementation"])
+
+
+toolchain_defaults = {
+ "sparse-profile": "toolchain-build",
+}
+
+
+@run_job_using(
+ "docker-worker",
+ "toolchain-script",
+ schema=toolchain_run_schema,
+ defaults=toolchain_defaults,
+)
+def docker_worker_toolchain(config, job, taskdesc):
+ common_toolchain(config, job, taskdesc, is_docker=True)
+
+
+@run_job_using(
+ "generic-worker",
+ "toolchain-script",
+ schema=toolchain_run_schema,
+ defaults=toolchain_defaults,
+)
+def generic_worker_toolchain(config, job, taskdesc):
+ common_toolchain(config, job, taskdesc, is_docker=False)
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/release_notifications.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/release_notifications.py
new file mode 100644
index 0000000000..0796b028e8
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/release_notifications.py
@@ -0,0 +1,100 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+Add notifications via taskcluster-notify for release tasks
+"""
+from string import Formatter
+
+from voluptuous import ALLOW_EXTRA, Any, Optional, Required
+
+from taskgraph.transforms.base import TransformSequence
+from taskgraph.util.schema import Schema, optionally_keyed_by, resolve_keyed_by
+
+RELEASE_NOTIFICATIONS_SCHEMA = Schema(
+ {
+ Optional("notifications"): {
+ Required("emails"): optionally_keyed_by("project", "level", [str]),
+ Required("subject"): str,
+ Optional("message"): str,
+ Optional("status-types"): [
+ Any(
+ "on-completed",
+ "on-defined",
+ "on-exception",
+ "on-failed",
+ "on-pending",
+ "on-resolved",
+ "on-running",
+ )
+ ],
+ },
+ },
+ extra=ALLOW_EXTRA,
+)
+
+
+transforms = TransformSequence()
+transforms.add_validate(RELEASE_NOTIFICATIONS_SCHEMA)
+
+
+class TitleCaseFormatter(Formatter):
+ """Support title formatter for strings"""
+
+ def convert_field(self, value, conversion):
+ if conversion == "t":
+ return str(value).title()
+ super().convert_field(value, conversion)
+ return value
+
+
+titleformatter = TitleCaseFormatter()
+
+
+@transforms.add
+def add_notifications(config, jobs):
+ for job in jobs:
+ label = "{}-{}".format(config.kind, job["name"])
+
+ notifications = job.pop("notifications", None)
+ if notifications:
+ resolve_keyed_by(
+ notifications,
+ "emails",
+ label,
+ **{
+ "level": config.params["level"],
+ "project": config.params["project"],
+ },
+ )
+ emails = notifications["emails"]
+ format_kwargs = dict(
+ task=job,
+ config=config.__dict__,
+ )
+ subject = titleformatter.format(notifications["subject"], **format_kwargs)
+ message = notifications.get("message", notifications["subject"])
+ message = titleformatter.format(message, **format_kwargs)
+ emails = [email.format(**format_kwargs) for email in emails]
+
+ # By default, we only send mail on success to avoid messages like 'blah is in the
+ # candidates dir' when cancelling graphs, dummy job failure, etc
+ status_types = notifications.get("status-types", ["on-completed"])
+ for s in status_types:
+ job.setdefault("routes", []).extend(
+ [f"notify.email.{email}.{s}" for email in emails]
+ )
+
+ # Customize the email subject to include release name and build number
+ job.setdefault("extra", {}).update(
+ {
+ "notify": {
+ "email": {
+ "subject": subject,
+ "content": message,
+ }
+ }
+ }
+ )
+
+ yield job
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/transforms/task.py b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/task.py
new file mode 100644
index 0000000000..8ab3762b8c
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/transforms/task.py
@@ -0,0 +1,1288 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+"""
+These transformations take a task description and turn it into a TaskCluster
+task definition (along with attributes, label, etc.). The input to these
+transformations is generic to any kind of task, but abstracts away some of the
+complexities of worker implementations, scopes, and treeherder annotations.
+"""
+
+
+import hashlib
+import os
+import re
+import time
+from copy import deepcopy
+
+import attr
+from voluptuous import All, Any, Extra, NotIn, Optional, Required
+
+from taskgraph import MAX_DEPENDENCIES
+from taskgraph.transforms.base import TransformSequence
+from taskgraph.util.hash import hash_path
+from taskgraph.util.keyed_by import evaluate_keyed_by
+from taskgraph.util.memoize import memoize
+from taskgraph.util.schema import (
+ OptimizationSchema,
+ Schema,
+ optionally_keyed_by,
+ resolve_keyed_by,
+ taskref_or_string,
+ validate_schema,
+)
+from taskgraph.util.treeherder import split_symbol
+from taskgraph.util.workertypes import worker_type_implementation
+
+from ..util import docker as dockerutil
+from ..util.workertypes import get_worker_type
+
+RUN_TASK = os.path.join(
+ os.path.dirname(os.path.dirname(__file__)), "run-task", "run-task"
+)
+
+
+@memoize
+def _run_task_suffix():
+ """String to append to cache names under control of run-task."""
+ return hash_path(RUN_TASK)[0:20]
+
+
+# A task description is a general description of a TaskCluster task
+task_description_schema = Schema(
+ {
+ # the label for this task
+ Required("label"): str,
+ # description of the task (for metadata)
+ Required("description"): str,
+ # attributes for this task
+ Optional("attributes"): {str: object},
+ # relative path (from config.path) to the file task was defined in
+ Optional("task-from"): str,
+ # dependencies of this task, keyed by name; these are passed through
+ # verbatim and subject to the interpretation of the Task's get_dependencies
+ # method.
+ Optional("dependencies"): {
+ All(
+ str,
+ NotIn(
+ ["self", "decision"],
+ "Can't use 'self` or 'decision' as dependency names.",
+ ),
+ ): object,
+ },
+ # Soft dependencies of this task, as a list of tasks labels
+ Optional("soft-dependencies"): [str],
+ # Dependencies that must be scheduled in order for this task to run.
+ Optional("if-dependencies"): [str],
+ Optional("requires"): Any("all-completed", "all-resolved"),
+ # expiration and deadline times, relative to task creation, with units
+ # (e.g., "14 days"). Defaults are set based on the project.
+ Optional("expires-after"): str,
+ Optional("deadline-after"): str,
+ # custom routes for this task; the default treeherder routes will be added
+ # automatically
+ Optional("routes"): [str],
+ # custom scopes for this task; any scopes required for the worker will be
+ # added automatically. The following parameters will be substituted in each
+ # scope:
+ # {level} -- the scm level of this push
+ # {project} -- the project of this push
+ Optional("scopes"): [str],
+ # Tags
+ Optional("tags"): {str: str},
+ # custom "task.extra" content
+ Optional("extra"): {str: object},
+ # treeherder-related information; see
+ # https://schemas.taskcluster.net/taskcluster-treeherder/v1/task-treeherder-config.json
+ # If not specified, no treeherder extra information or routes will be
+ # added to the task
+ Optional("treeherder"): {
+ # either a bare symbol, or "grp(sym)".
+ "symbol": str,
+ # the job kind
+ "kind": Any("build", "test", "other"),
+ # tier for this task
+ "tier": int,
+ # task platform, in the form platform/collection, used to set
+ # treeherder.machine.platform and treeherder.collection or
+ # treeherder.labels
+ "platform": str,
+ },
+ # information for indexing this build so its artifacts can be discovered;
+ # if omitted, the build will not be indexed.
+ Optional("index"): {
+ # the name of the product this build produces
+ "product": str,
+ # the names to use for this job in the TaskCluster index
+ "job-name": str,
+ # Type of gecko v2 index to use
+ "type": str,
+ # The rank that the task will receive in the TaskCluster
+ # index. A newly completed task supersedes the currently
+ # indexed task iff it has a higher rank. If unspecified,
+ # 'by-tier' behavior will be used.
+ "rank": Any(
+ # Rank is equal the timestamp of the build_date for tier-1
+ # tasks, and zero for non-tier-1. This sorts tier-{2,3}
+ # builds below tier-1 in the index.
+ "by-tier",
+ # Rank is given as an integer constant (e.g. zero to make
+ # sure a task is last in the index).
+ int,
+ # Rank is equal to the timestamp of the build_date. This
+ # option can be used to override the 'by-tier' behavior
+ # for non-tier-1 tasks.
+ "build_date",
+ ),
+ },
+ # The `run_on_projects` attribute, defaulting to "all". This dictates the
+ # projects on which this task should be included in the target task set.
+ # See the attributes documentation for details.
+ Optional("run-on-projects"): optionally_keyed_by("build-platform", [str]),
+ Optional("run-on-tasks-for"): [str],
+ Optional("run-on-git-branches"): [str],
+ # The `always-target` attribute will cause the task to be included in the
+ # target_task_graph regardless of filtering. Tasks included in this manner
+ # will be candidates for optimization even when `optimize_target_tasks` is
+ # False, unless the task was also explicitly chosen by the target_tasks
+ # method.
+ Required("always-target"): bool,
+ # Optimization to perform on this task during the optimization phase.
+ # Optimizations are defined in taskcluster/taskgraph/optimize.py.
+ Required("optimization"): OptimizationSchema,
+ # the provisioner-id/worker-type for the task. The following parameters will
+ # be substituted in this string:
+ # {level} -- the scm level of this push
+ "worker-type": str,
+ # Whether the job should use sccache compiler caching.
+ Required("needs-sccache"): bool,
+ # information specific to the worker implementation that will run this task
+ Optional("worker"): {
+ Required("implementation"): str,
+ Extra: object,
+ },
+ }
+)
+
+TC_TREEHERDER_SCHEMA_URL = (
+ "https://github.com/taskcluster/taskcluster-treeherder/"
+ "blob/master/schemas/task-treeherder-config.yml"
+)
+
+
+UNKNOWN_GROUP_NAME = (
+ "Treeherder group {} (from {}) has no name; " "add it to taskcluster/ci/config.yml"
+)
+
+V2_ROUTE_TEMPLATES = [
+ "index.{trust-domain}.v2.{project}.latest.{product}.{job-name}",
+ "index.{trust-domain}.v2.{project}.pushdate.{build_date_long}.{product}.{job-name}",
+ "index.{trust-domain}.v2.{project}.pushlog-id.{pushlog_id}.{product}.{job-name}",
+ "index.{trust-domain}.v2.{project}.revision.{branch_rev}.{product}.{job-name}",
+]
+
+# the roots of the treeherder routes
+TREEHERDER_ROUTE_ROOT = "tc-treeherder"
+
+
+def get_branch_rev(config):
+ return config.params["head_rev"]
+
+
+@memoize
+def get_default_priority(graph_config, project):
+ return evaluate_keyed_by(
+ graph_config["task-priority"], "Graph Config", {"project": project}
+ )
+
+
+# define a collection of payload builders, depending on the worker implementation
+payload_builders = {}
+
+
+@attr.s(frozen=True)
+class PayloadBuilder:
+ schema = attr.ib(type=Schema)
+ builder = attr.ib()
+
+
+def payload_builder(name, schema):
+ schema = Schema({Required("implementation"): name, Optional("os"): str}).extend(
+ schema
+ )
+
+ def wrap(func):
+ payload_builders[name] = PayloadBuilder(schema, func)
+ return func
+
+ return wrap
+
+
+# define a collection of index builders, depending on the type implementation
+index_builders = {}
+
+
+def index_builder(name):
+ def wrap(func):
+ index_builders[name] = func
+ return func
+
+ return wrap
+
+
+UNSUPPORTED_INDEX_PRODUCT_ERROR = """\
+The index product {product} is not in the list of configured products in
+`taskcluster/ci/config.yml'.
+"""
+
+
+def verify_index(config, index):
+ product = index["product"]
+ if product not in config.graph_config["index"]["products"]:
+ raise Exception(UNSUPPORTED_INDEX_PRODUCT_ERROR.format(product=product))
+
+
+@payload_builder(
+ "docker-worker",
+ schema={
+ Required("os"): "linux",
+ # For tasks that will run in docker-worker, this is the name of the docker
+ # image or in-tree docker image to run the task in. If in-tree, then a
+ # dependency will be created automatically. This is generally
+ # `desktop-test`, or an image that acts an awful lot like it.
+ Required("docker-image"): Any(
+ # a raw Docker image path (repo/image:tag)
+ str,
+ # an in-tree generated docker image (from `taskcluster/docker/<name>`)
+ {"in-tree": str},
+ # an indexed docker image
+ {"indexed": str},
+ ),
+ # worker features that should be enabled
+ Required("relengapi-proxy"): bool,
+ Required("chain-of-trust"): bool,
+ Required("taskcluster-proxy"): bool,
+ Required("allow-ptrace"): bool,
+ Required("loopback-video"): bool,
+ Required("loopback-audio"): bool,
+ Required("docker-in-docker"): bool, # (aka 'dind')
+ Required("privileged"): bool,
+ Required("disable-seccomp"): bool,
+ # Paths to Docker volumes.
+ #
+ # For in-tree Docker images, volumes can be parsed from Dockerfile.
+ # This only works for the Dockerfile itself: if a volume is defined in
+ # a base image, it will need to be declared here. Out-of-tree Docker
+ # images will also require explicit volume annotation.
+ #
+ # Caches are often mounted to the same path as Docker volumes. In this
+ # case, they take precedence over a Docker volume. But a volume still
+ # needs to be declared for the path.
+ Optional("volumes"): [str],
+ # caches to set up for the task
+ Optional("caches"): [
+ {
+ # only one type is supported by any of the workers right now
+ "type": "persistent",
+ # name of the cache, allowing re-use by subsequent tasks naming the
+ # same cache
+ "name": str,
+ # location in the task image where the cache will be mounted
+ "mount-point": str,
+ # Whether the cache is not used in untrusted environments
+ # (like the Try repo).
+ Optional("skip-untrusted"): bool,
+ }
+ ],
+ # artifacts to extract from the task image after completion
+ Optional("artifacts"): [
+ {
+ # type of artifact -- simple file, or recursive directory
+ "type": Any("file", "directory"),
+ # task image path from which to read artifact
+ "path": str,
+ # name of the produced artifact (root of the names for
+ # type=directory)
+ "name": str,
+ }
+ ],
+ # environment variables
+ Required("env"): {str: taskref_or_string},
+ # the command to run; if not given, docker-worker will default to the
+ # command in the docker image
+ Optional("command"): [taskref_or_string],
+ # the maximum time to run, in seconds
+ Required("max-run-time"): int,
+ # the exit status code(s) that indicates the task should be retried
+ Optional("retry-exit-status"): [int],
+ # the exit status code(s) that indicates the caches used by the task
+ # should be purged
+ Optional("purge-caches-exit-status"): [int],
+ # Whether any artifacts are assigned to this worker
+ Optional("skip-artifacts"): bool,
+ },
+)
+def build_docker_worker_payload(config, task, task_def):
+ worker = task["worker"]
+ level = int(config.params["level"])
+
+ image = worker["docker-image"]
+ if isinstance(image, dict):
+ if "in-tree" in image:
+ name = image["in-tree"]
+ docker_image_task = "build-docker-image-" + image["in-tree"]
+ task.setdefault("dependencies", {})["docker-image"] = docker_image_task
+
+ image = {
+ "path": "public/image.tar.zst",
+ "taskId": {"task-reference": "<docker-image>"},
+ "type": "task-image",
+ }
+
+ # Find VOLUME in Dockerfile.
+ volumes = dockerutil.parse_volumes(name)
+ for v in sorted(volumes):
+ if v in worker["volumes"]:
+ raise Exception(
+ "volume %s already defined; "
+ "if it is defined in a Dockerfile, "
+ "it does not need to be specified in the "
+ "worker definition" % v
+ )
+
+ worker["volumes"].append(v)
+
+ elif "indexed" in image:
+ image = {
+ "path": "public/image.tar.zst",
+ "namespace": image["indexed"],
+ "type": "indexed-image",
+ }
+ else:
+ raise Exception("unknown docker image type")
+
+ features = {}
+
+ if worker.get("relengapi-proxy"):
+ features["relengAPIProxy"] = True
+
+ if worker.get("taskcluster-proxy"):
+ features["taskclusterProxy"] = True
+
+ if worker.get("allow-ptrace"):
+ features["allowPtrace"] = True
+ task_def["scopes"].append("docker-worker:feature:allowPtrace")
+
+ if worker.get("chain-of-trust"):
+ features["chainOfTrust"] = True
+
+ if worker.get("docker-in-docker"):
+ features["dind"] = True
+
+ if task.get("needs-sccache"):
+ features["taskclusterProxy"] = True
+ task_def["scopes"].append(
+ "assume:project:taskcluster:{trust_domain}:level-{level}-sccache-buckets".format(
+ trust_domain=config.graph_config["trust-domain"],
+ level=config.params["level"],
+ )
+ )
+ worker["env"]["USE_SCCACHE"] = "1"
+ # Disable sccache idle shutdown.
+ worker["env"]["SCCACHE_IDLE_TIMEOUT"] = "0"
+ else:
+ worker["env"]["SCCACHE_DISABLE"] = "1"
+
+ capabilities = {}
+
+ for lo in "audio", "video":
+ if worker.get("loopback-" + lo):
+ capitalized = "loopback" + lo.capitalize()
+ devices = capabilities.setdefault("devices", {})
+ devices[capitalized] = True
+ task_def["scopes"].append("docker-worker:capability:device:" + capitalized)
+
+ if worker.get("privileged"):
+ capabilities["privileged"] = True
+ task_def["scopes"].append("docker-worker:capability:privileged")
+
+ if worker.get("disable-seccomp"):
+ capabilities["disableSeccomp"] = True
+ task_def["scopes"].append("docker-worker:capability:disableSeccomp")
+
+ task_def["payload"] = payload = {
+ "image": image,
+ "env": worker["env"],
+ }
+ if "command" in worker:
+ payload["command"] = worker["command"]
+
+ if "max-run-time" in worker:
+ payload["maxRunTime"] = worker["max-run-time"]
+
+ run_task = payload.get("command", [""])[0].endswith("run-task")
+
+ # run-task exits EXIT_PURGE_CACHES if there is a problem with caches.
+ # Automatically retry the tasks and purge caches if we see this exit
+ # code.
+ # TODO move this closer to code adding run-task once bug 1469697 is
+ # addressed.
+ if run_task:
+ worker.setdefault("retry-exit-status", []).append(72)
+ worker.setdefault("purge-caches-exit-status", []).append(72)
+
+ payload["onExitStatus"] = {}
+ if "retry-exit-status" in worker:
+ payload["onExitStatus"]["retry"] = worker["retry-exit-status"]
+ if "purge-caches-exit-status" in worker:
+ payload["onExitStatus"]["purgeCaches"] = worker["purge-caches-exit-status"]
+
+ if "artifacts" in worker:
+ artifacts = {}
+ for artifact in worker["artifacts"]:
+ artifacts[artifact["name"]] = {
+ "path": artifact["path"],
+ "type": artifact["type"],
+ "expires": task_def["expires"], # always expire with the task
+ }
+ payload["artifacts"] = artifacts
+
+ if isinstance(worker.get("docker-image"), str):
+ out_of_tree_image = worker["docker-image"]
+ else:
+ out_of_tree_image = None
+ image = worker.get("docker-image", {}).get("in-tree")
+
+ if "caches" in worker:
+ caches = {}
+
+ # run-task knows how to validate caches.
+ #
+ # To help ensure new run-task features and bug fixes don't interfere
+ # with existing caches, we seed the hash of run-task into cache names.
+ # So, any time run-task changes, we should get a fresh set of caches.
+ # This means run-task can make changes to cache interaction at any time
+ # without regards for backwards or future compatibility.
+ #
+ # But this mechanism only works for in-tree Docker images that are built
+ # with the current run-task! For out-of-tree Docker images, we have no
+ # way of knowing their content of run-task. So, in addition to varying
+ # cache names by the contents of run-task, we also take the Docker image
+ # name into consideration. This means that different Docker images will
+ # never share the same cache. This is a bit unfortunate. But it is the
+ # safest thing to do. Fortunately, most images are defined in-tree.
+ #
+ # For out-of-tree Docker images, we don't strictly need to incorporate
+ # the run-task content into the cache name. However, doing so preserves
+ # the mechanism whereby changing run-task results in new caches
+ # everywhere.
+
+ # As an additional mechanism to force the use of different caches, the
+ # string literal in the variable below can be changed. This is
+ # preferred to changing run-task because it doesn't require images
+ # to be rebuilt.
+ cache_version = "v3"
+
+ if run_task:
+ suffix = f"{cache_version}-{_run_task_suffix()}"
+
+ if out_of_tree_image:
+ name_hash = hashlib.sha256(
+ out_of_tree_image.encode("utf-8")
+ ).hexdigest()
+ suffix += name_hash[0:12]
+
+ else:
+ suffix = cache_version
+
+ skip_untrusted = config.params.is_try() or level == 1
+
+ for cache in worker["caches"]:
+ # Some caches aren't enabled in environments where we can't
+ # guarantee certain behavior. Filter those out.
+ if cache.get("skip-untrusted") and skip_untrusted:
+ continue
+
+ name = "{trust_domain}-level-{level}-{name}-{suffix}".format(
+ trust_domain=config.graph_config["trust-domain"],
+ level=config.params["level"],
+ name=cache["name"],
+ suffix=suffix,
+ )
+ caches[name] = cache["mount-point"]
+ task_def["scopes"].append("docker-worker:cache:%s" % name)
+
+ # Assertion: only run-task is interested in this.
+ if run_task:
+ payload["env"]["TASKCLUSTER_CACHES"] = ";".join(sorted(caches.values()))
+
+ payload["cache"] = caches
+
+ # And send down volumes information to run-task as well.
+ if run_task and worker.get("volumes"):
+ payload["env"]["TASKCLUSTER_VOLUMES"] = ";".join(sorted(worker["volumes"]))
+
+ if payload.get("cache") and skip_untrusted:
+ payload["env"]["TASKCLUSTER_UNTRUSTED_CACHES"] = "1"
+
+ if features:
+ payload["features"] = features
+ if capabilities:
+ payload["capabilities"] = capabilities
+
+ check_caches_are_volumes(task)
+
+
+@payload_builder(
+ "generic-worker",
+ schema={
+ Required("os"): Any("windows", "macosx", "linux", "linux-bitbar"),
+ # see http://schemas.taskcluster.net/generic-worker/v1/payload.json
+ # and https://docs.taskcluster.net/reference/workers/generic-worker/payload
+ # command is a list of commands to run, sequentially
+ # on Windows, each command is a string, on OS X and Linux, each command is
+ # a string array
+ Required("command"): Any(
+ [taskref_or_string], [[taskref_or_string]] # Windows # Linux / OS X
+ ),
+ # artifacts to extract from the task image after completion; note that artifacts
+ # for the generic worker cannot have names
+ Optional("artifacts"): [
+ {
+ # type of artifact -- simple file, or recursive directory
+ "type": Any("file", "directory"),
+ # filesystem path from which to read artifact
+ "path": str,
+ # if not specified, path is used for artifact name
+ Optional("name"): str,
+ }
+ ],
+ # Directories and/or files to be mounted.
+ # The actual allowed combinations are stricter than the model below,
+ # but this provides a simple starting point.
+ # See https://docs.taskcluster.net/reference/workers/generic-worker/payload
+ Optional("mounts"): [
+ {
+ # A unique name for the cache volume, implies writable cache directory
+ # (otherwise mount is a read-only file or directory).
+ Optional("cache-name"): str,
+ # Optional content for pre-loading cache, or mandatory content for
+ # read-only file or directory. Pre-loaded content can come from either
+ # a task artifact or from a URL.
+ Optional("content"): {
+ # *** Either (artifact and task-id) or url must be specified. ***
+ # Artifact name that contains the content.
+ Optional("artifact"): str,
+ # Task ID that has the artifact that contains the content.
+ Optional("task-id"): taskref_or_string,
+ # URL that supplies the content in response to an unauthenticated
+ # GET request.
+ Optional("url"): str,
+ },
+ # *** Either file or directory must be specified. ***
+ # If mounting a cache or read-only directory, the filesystem location of
+ # the directory should be specified as a relative path to the task
+ # directory here.
+ Optional("directory"): str,
+ # If mounting a file, specify the relative path within the task
+ # directory to mount the file (the file will be read only).
+ Optional("file"): str,
+ # Required if and only if `content` is specified and mounting a
+ # directory (not a file). This should be the archive format of the
+ # content (either pre-loaded cache or read-only directory).
+ Optional("format"): Any("rar", "tar.bz2", "tar.gz", "zip"),
+ }
+ ],
+ # environment variables
+ Required("env"): {str: taskref_or_string},
+ # the maximum time to run, in seconds
+ Required("max-run-time"): int,
+ # os user groups for test task workers
+ Optional("os-groups"): [str],
+ # feature for test task to run as administarotr
+ Optional("run-as-administrator"): bool,
+ # optional features
+ Required("chain-of-trust"): bool,
+ Optional("taskcluster-proxy"): bool,
+ # Whether any artifacts are assigned to this worker
+ Optional("skip-artifacts"): bool,
+ },
+)
+def build_generic_worker_payload(config, task, task_def):
+ worker = task["worker"]
+
+ task_def["payload"] = {
+ "command": worker["command"],
+ "maxRunTime": worker["max-run-time"],
+ }
+
+ on_exit_status = {}
+ if "retry-exit-status" in worker:
+ on_exit_status["retry"] = worker["retry-exit-status"]
+ if worker["os"] == "windows":
+ on_exit_status.setdefault("retry", []).extend(
+ [
+ # These codes (on windows) indicate a process interruption,
+ # rather than a task run failure. See bug 1544403.
+ 1073807364, # process force-killed due to system shutdown
+ 3221225786, # sigint (any interrupt)
+ ]
+ )
+ if on_exit_status:
+ task_def["payload"]["onExitStatus"] = on_exit_status
+
+ env = worker.get("env", {})
+
+ if task.get("needs-sccache"):
+ env["USE_SCCACHE"] = "1"
+ # Disable sccache idle shutdown.
+ env["SCCACHE_IDLE_TIMEOUT"] = "0"
+ else:
+ env["SCCACHE_DISABLE"] = "1"
+
+ if env:
+ task_def["payload"]["env"] = env
+
+ artifacts = []
+
+ for artifact in worker.get("artifacts", []):
+ a = {
+ "path": artifact["path"],
+ "type": artifact["type"],
+ }
+ if "name" in artifact:
+ a["name"] = artifact["name"]
+ artifacts.append(a)
+
+ if artifacts:
+ task_def["payload"]["artifacts"] = artifacts
+
+ # Need to copy over mounts, but rename keys to respect naming convention
+ # * 'cache-name' -> 'cacheName'
+ # * 'task-id' -> 'taskId'
+ # All other key names are already suitable, and don't need renaming.
+ mounts = deepcopy(worker.get("mounts", []))
+ for mount in mounts:
+ if "cache-name" in mount:
+ mount["cacheName"] = "{trust_domain}-level-{level}-{name}".format(
+ trust_domain=config.graph_config["trust-domain"],
+ level=config.params["level"],
+ name=mount.pop("cache-name"),
+ )
+ task_def["scopes"].append(
+ "generic-worker:cache:{}".format(mount["cacheName"])
+ )
+ if "content" in mount:
+ if "task-id" in mount["content"]:
+ mount["content"]["taskId"] = mount["content"].pop("task-id")
+ if "artifact" in mount["content"]:
+ if not mount["content"]["artifact"].startswith("public/"):
+ task_def["scopes"].append(
+ "queue:get-artifact:{}".format(mount["content"]["artifact"])
+ )
+
+ if mounts:
+ task_def["payload"]["mounts"] = mounts
+
+ if worker.get("os-groups"):
+ task_def["payload"]["osGroups"] = worker["os-groups"]
+ task_def["scopes"].extend(
+ [
+ "generic-worker:os-group:{}/{}".format(task["worker-type"], group)
+ for group in worker["os-groups"]
+ ]
+ )
+
+ features = {}
+
+ if worker.get("chain-of-trust"):
+ features["chainOfTrust"] = True
+
+ if worker.get("taskcluster-proxy"):
+ features["taskclusterProxy"] = True
+
+ if worker.get("run-as-administrator", False):
+ features["runAsAdministrator"] = True
+ task_def["scopes"].append(
+ "generic-worker:run-as-administrator:{}".format(task["worker-type"]),
+ )
+
+ if features:
+ task_def["payload"]["features"] = features
+
+
+@payload_builder(
+ "beetmover",
+ schema={
+ # the maximum time to run, in seconds
+ Required("max-run-time"): int,
+ # locale key, if this is a locale beetmover job
+ Optional("locale"): str,
+ Optional("partner-public"): bool,
+ Required("release-properties"): {
+ "app-name": str,
+ "app-version": str,
+ "branch": str,
+ "build-id": str,
+ "hash-type": str,
+ "platform": str,
+ },
+ # list of artifact URLs for the artifacts that should be beetmoved
+ Required("upstream-artifacts"): [
+ {
+ # taskId of the task with the artifact
+ Required("taskId"): taskref_or_string,
+ # type of signing task (for CoT)
+ Required("taskType"): str,
+ # Paths to the artifacts to sign
+ Required("paths"): [str],
+ # locale is used to map upload path and allow for duplicate simple names
+ Required("locale"): str,
+ }
+ ],
+ Optional("artifact-map"): object,
+ },
+)
+def build_beetmover_payload(config, task, task_def):
+ worker = task["worker"]
+ release_properties = worker["release-properties"]
+
+ task_def["payload"] = {
+ "maxRunTime": worker["max-run-time"],
+ "releaseProperties": {
+ "appName": release_properties["app-name"],
+ "appVersion": release_properties["app-version"],
+ "branch": release_properties["branch"],
+ "buildid": release_properties["build-id"],
+ "hashType": release_properties["hash-type"],
+ "platform": release_properties["platform"],
+ },
+ "upload_date": config.params["build_date"],
+ "upstreamArtifacts": worker["upstream-artifacts"],
+ }
+ if worker.get("locale"):
+ task_def["payload"]["locale"] = worker["locale"]
+ if worker.get("artifact-map"):
+ task_def["payload"]["artifactMap"] = worker["artifact-map"]
+ if worker.get("partner-public"):
+ task_def["payload"]["is_partner_repack_public"] = worker["partner-public"]
+
+
+@payload_builder(
+ "invalid",
+ schema={
+ # an invalid task is one which should never actually be created; this is used in
+ # release automation on branches where the task just doesn't make sense
+ Extra: object,
+ },
+)
+def build_invalid_payload(config, task, task_def):
+ task_def["payload"] = "invalid task - should never be created"
+
+
+@payload_builder(
+ "always-optimized",
+ schema={
+ Extra: object,
+ },
+)
+@payload_builder("succeed", schema={})
+def build_dummy_payload(config, task, task_def):
+ task_def["payload"] = {}
+
+
+transforms = TransformSequence()
+
+
+@transforms.add
+def set_implementation(config, tasks):
+ """
+ Set the worker implementation based on the worker-type alias.
+ """
+ for task in tasks:
+ worker = task.setdefault("worker", {})
+ if "implementation" in task["worker"]:
+ yield task
+ continue
+
+ impl, os = worker_type_implementation(config.graph_config, task["worker-type"])
+
+ tags = task.setdefault("tags", {})
+ tags["worker-implementation"] = impl
+ if os:
+ task["tags"]["os"] = os
+ worker["implementation"] = impl
+ if os:
+ worker["os"] = os
+
+ yield task
+
+
+@transforms.add
+def set_defaults(config, tasks):
+ for task in tasks:
+ task.setdefault("always-target", False)
+ task.setdefault("optimization", None)
+ task.setdefault("needs-sccache", False)
+
+ worker = task["worker"]
+ if worker["implementation"] in ("docker-worker",):
+ worker.setdefault("relengapi-proxy", False)
+ worker.setdefault("chain-of-trust", False)
+ worker.setdefault("taskcluster-proxy", False)
+ worker.setdefault("allow-ptrace", False)
+ worker.setdefault("loopback-video", False)
+ worker.setdefault("loopback-audio", False)
+ worker.setdefault("docker-in-docker", False)
+ worker.setdefault("privileged", False)
+ worker.setdefault("disable-seccomp", False)
+ worker.setdefault("volumes", [])
+ worker.setdefault("env", {})
+ if "caches" in worker:
+ for c in worker["caches"]:
+ c.setdefault("skip-untrusted", False)
+ elif worker["implementation"] == "generic-worker":
+ worker.setdefault("env", {})
+ worker.setdefault("os-groups", [])
+ if worker["os-groups"] and worker["os"] != "windows":
+ raise Exception(
+ "os-groups feature of generic-worker is only supported on "
+ "Windows, not on {}".format(worker["os"])
+ )
+ worker.setdefault("chain-of-trust", False)
+ elif worker["implementation"] in (
+ "scriptworker-signing",
+ "beetmover",
+ "beetmover-push-to-release",
+ "beetmover-maven",
+ ):
+ worker.setdefault("max-run-time", 600)
+ elif worker["implementation"] == "push-apk":
+ worker.setdefault("commit", False)
+
+ yield task
+
+
+@transforms.add
+def task_name_from_label(config, tasks):
+ for task in tasks:
+ if "label" not in task:
+ if "name" not in task:
+ raise Exception("task has neither a name nor a label")
+ task["label"] = "{}-{}".format(config.kind, task["name"])
+ if task.get("name"):
+ del task["name"]
+ yield task
+
+
+@transforms.add
+def validate(config, tasks):
+ for task in tasks:
+ validate_schema(
+ task_description_schema,
+ task,
+ "In task {!r}:".format(task.get("label", "?no-label?")),
+ )
+ validate_schema(
+ payload_builders[task["worker"]["implementation"]].schema,
+ task["worker"],
+ "In task.run {!r}:".format(task.get("label", "?no-label?")),
+ )
+ yield task
+
+
+@index_builder("generic")
+def add_generic_index_routes(config, task):
+ index = task.get("index")
+ routes = task.setdefault("routes", [])
+
+ verify_index(config, index)
+
+ subs = config.params.copy()
+ subs["job-name"] = index["job-name"]
+ subs["build_date_long"] = time.strftime(
+ "%Y.%m.%d.%Y%m%d%H%M%S", time.gmtime(config.params["build_date"])
+ )
+ subs["product"] = index["product"]
+ subs["trust-domain"] = config.graph_config["trust-domain"]
+ subs["branch_rev"] = get_branch_rev(config)
+
+ for tpl in V2_ROUTE_TEMPLATES:
+ routes.append(tpl.format(**subs))
+
+ return task
+
+
+@transforms.add
+def add_index_routes(config, tasks):
+ for task in tasks:
+ index = task.get("index", {})
+
+ # The default behavior is to rank tasks according to their tier
+ extra_index = task.setdefault("extra", {}).setdefault("index", {})
+ rank = index.get("rank", "by-tier")
+
+ if rank == "by-tier":
+ # rank is zero for non-tier-1 tasks and based on pushid for others;
+ # this sorts tier-{2,3} builds below tier-1 in the index
+ tier = task.get("treeherder", {}).get("tier", 3)
+ extra_index["rank"] = 0 if tier > 1 else int(config.params["build_date"])
+ elif rank == "build_date":
+ extra_index["rank"] = int(config.params["build_date"])
+ else:
+ extra_index["rank"] = rank
+
+ if not index:
+ yield task
+ continue
+
+ index_type = index.get("type", "generic")
+ if index_type not in index_builders:
+ raise ValueError(f"Unknown index-type {index_type}")
+ task = index_builders[index_type](config, task)
+
+ del task["index"]
+ yield task
+
+
+@transforms.add
+def build_task(config, tasks):
+ for task in tasks:
+ level = str(config.params["level"])
+
+ provisioner_id, worker_type = get_worker_type(
+ config.graph_config,
+ task["worker-type"],
+ level,
+ )
+ task["worker-type"] = "/".join([provisioner_id, worker_type])
+ project = config.params["project"]
+
+ routes = task.get("routes", [])
+ scopes = [
+ s.format(level=level, project=project) for s in task.get("scopes", [])
+ ]
+
+ # set up extra
+ extra = task.get("extra", {})
+ extra["parent"] = os.environ.get("TASK_ID", "")
+ task_th = task.get("treeherder")
+ if task_th:
+ extra.setdefault("treeherder-platform", task_th["platform"])
+ treeherder = extra.setdefault("treeherder", {})
+
+ machine_platform, collection = task_th["platform"].split("/", 1)
+ treeherder["machine"] = {"platform": machine_platform}
+ treeherder["collection"] = {collection: True}
+
+ group_names = config.graph_config["treeherder"]["group-names"]
+ groupSymbol, symbol = split_symbol(task_th["symbol"])
+ if groupSymbol != "?":
+ treeherder["groupSymbol"] = groupSymbol
+ if groupSymbol not in group_names:
+ path = os.path.join(config.path, task.get("task-from", ""))
+ raise Exception(UNKNOWN_GROUP_NAME.format(groupSymbol, path))
+ treeherder["groupName"] = group_names[groupSymbol]
+ treeherder["symbol"] = symbol
+ if len(symbol) > 25 or len(groupSymbol) > 25:
+ raise RuntimeError(
+ "Treeherder group and symbol names must not be longer than "
+ "25 characters: {} (see {})".format(
+ task_th["symbol"],
+ TC_TREEHERDER_SCHEMA_URL,
+ )
+ )
+ treeherder["jobKind"] = task_th["kind"]
+ treeherder["tier"] = task_th["tier"]
+
+ branch_rev = get_branch_rev(config)
+
+ if config.params["tasks_for"].startswith("github-pull-request"):
+ # In the past we used `project` for this, but that ends up being
+ # set to the repository name of the _head_ repo, which is not correct
+ # (and causes scope issues) if it doesn't match the name of the
+ # base repo
+ base_project = config.params["base_repository"].split("/")[-1]
+ if base_project.endswith(".git"):
+ base_project = base_project[:-4]
+ th_project_suffix = "-pr"
+ else:
+ base_project = config.params["project"]
+ th_project_suffix = ""
+
+ routes.append(
+ "{}.v2.{}.{}.{}".format(
+ TREEHERDER_ROUTE_ROOT,
+ base_project + th_project_suffix,
+ branch_rev,
+ config.params["pushlog_id"],
+ )
+ )
+
+ if "expires-after" not in task:
+ task["expires-after"] = "28 days" if config.params.is_try() else "1 year"
+
+ if "deadline-after" not in task:
+ task["deadline-after"] = "1 day"
+
+ if "priority" not in task:
+ task["priority"] = get_default_priority(
+ config.graph_config, config.params["project"]
+ )
+
+ tags = task.get("tags", {})
+ tags.update(
+ {
+ "createdForUser": config.params["owner"],
+ "kind": config.kind,
+ "label": task["label"],
+ }
+ )
+
+ task_def = {
+ "provisionerId": provisioner_id,
+ "workerType": worker_type,
+ "routes": routes,
+ "created": {"relative-datestamp": "0 seconds"},
+ "deadline": {"relative-datestamp": task["deadline-after"]},
+ "expires": {"relative-datestamp": task["expires-after"]},
+ "scopes": scopes,
+ "metadata": {
+ "description": task["description"],
+ "name": task["label"],
+ "owner": config.params["owner"],
+ "source": config.params.file_url(config.path, pretty=True),
+ },
+ "extra": extra,
+ "tags": tags,
+ "priority": task["priority"],
+ }
+
+ if task.get("requires", None):
+ task_def["requires"] = task["requires"]
+
+ if task_th:
+ # link back to treeherder in description
+ th_push_link = (
+ "https://treeherder.mozilla.org/#/jobs?repo={}&revision={}".format(
+ config.params["project"] + th_project_suffix, branch_rev
+ )
+ )
+ task_def["metadata"]["description"] += " ([Treeherder push]({}))".format(
+ th_push_link
+ )
+
+ # add the payload and adjust anything else as required (e.g., scopes)
+ payload_builders[task["worker"]["implementation"]].builder(
+ config, task, task_def
+ )
+
+ attributes = task.get("attributes", {})
+ # Resolve run-on-projects
+ build_platform = attributes.get("build_platform")
+ resolve_keyed_by(
+ task,
+ "run-on-projects",
+ item_name=task["label"],
+ **{"build-platform": build_platform},
+ )
+ attributes["run_on_projects"] = task.get("run-on-projects", ["all"])
+ attributes["run_on_tasks_for"] = task.get("run-on-tasks-for", ["all"])
+ # We don't want to pollute non git repos with this attribute. Moreover, target_tasks
+ # already assumes the default value is ['all']
+ if task.get("run-on-git-branches"):
+ attributes["run_on_git_branches"] = task["run-on-git-branches"]
+
+ attributes["always_target"] = task["always-target"]
+
+ # Set MOZ_AUTOMATION on all jobs.
+ if task["worker"]["implementation"] in (
+ "generic-worker",
+ "docker-worker",
+ ):
+ payload = task_def.get("payload")
+ if payload:
+ env = payload.setdefault("env", {})
+ env["MOZ_AUTOMATION"] = "1"
+
+ dependencies = task.get("dependencies", {})
+ if_dependencies = task.get("if-dependencies", [])
+ if if_dependencies:
+ for i, dep in enumerate(if_dependencies):
+ if dep in dependencies:
+ if_dependencies[i] = dependencies[dep]
+ continue
+
+ raise Exception(
+ "{label} specifies '{dep}' in if-dependencies, "
+ "but {dep} is not a dependency!".format(
+ label=task["label"], dep=dep
+ )
+ )
+
+ yield {
+ "label": task["label"],
+ "description": task["description"],
+ "task": task_def,
+ "dependencies": dependencies,
+ "if-dependencies": if_dependencies,
+ "soft-dependencies": task.get("soft-dependencies", []),
+ "attributes": attributes,
+ "optimization": task.get("optimization", None),
+ }
+
+
+@transforms.add
+def add_github_checks(config, tasks):
+ """
+ For git repositories, add checks route to all tasks.
+
+ This will be replaced by a configurable option in the future.
+ """
+ if config.params["repository_type"] != "git":
+ for task in tasks:
+ yield task
+
+ for task in tasks:
+ task["task"]["routes"].append("checks")
+ yield task
+
+
+@transforms.add
+def chain_of_trust(config, tasks):
+ for task in tasks:
+ if task["task"].get("payload", {}).get("features", {}).get("chainOfTrust"):
+ image = task.get("dependencies", {}).get("docker-image")
+ if image:
+ cot = (
+ task["task"].setdefault("extra", {}).setdefault("chainOfTrust", {})
+ )
+ cot.setdefault("inputs", {})["docker-image"] = {
+ "task-reference": "<docker-image>"
+ }
+ yield task
+
+
+@transforms.add
+def check_task_identifiers(config, tasks):
+ """Ensures that all tasks have well defined identifiers:
+ ``^[a-zA-Z0-9_-]{1,38}$``
+ """
+ e = re.compile("^[a-zA-Z0-9_-]{1,38}$")
+ for task in tasks:
+ for attrib in ("workerType", "provisionerId"):
+ if not e.match(task["task"][attrib]):
+ raise Exception(
+ "task {}.{} is not a valid identifier: {}".format(
+ task["label"], attrib, task["task"][attrib]
+ )
+ )
+ yield task
+
+
+@transforms.add
+def check_task_dependencies(config, tasks):
+ """Ensures that tasks don't have more than 100 dependencies."""
+ for task in tasks:
+ if len(task["dependencies"]) > MAX_DEPENDENCIES:
+ raise Exception(
+ "task {}/{} has too many dependencies ({} > {})".format(
+ config.kind,
+ task["label"],
+ len(task["dependencies"]),
+ MAX_DEPENDENCIES,
+ )
+ )
+ yield task
+
+
+def check_caches_are_volumes(task):
+ """Ensures that all cache paths are defined as volumes.
+
+ Caches and volumes are the only filesystem locations whose content
+ isn't defined by the Docker image itself. Some caches are optional
+ depending on the job environment. We want paths that are potentially
+ caches to have as similar behavior regardless of whether a cache is
+ used. To help enforce this, we require that all paths used as caches
+ to be declared as Docker volumes. This check won't catch all offenders.
+ But it is better than nothing.
+ """
+ volumes = set(task["worker"]["volumes"])
+ paths = {c["mount-point"] for c in task["worker"].get("caches", [])}
+ missing = paths - volumes
+
+ if not missing:
+ return
+
+ raise Exception(
+ "task %s (image %s) has caches that are not declared as "
+ "Docker volumes: %s "
+ "(have you added them as VOLUMEs in the Dockerfile?)"
+ % (task["label"], task["worker"]["docker-image"], ", ".join(sorted(missing)))
+ )
+
+
+@transforms.add
+def check_run_task_caches(config, tasks):
+ """Audit for caches requiring run-task.
+
+ run-task manages caches in certain ways. If a cache managed by run-task
+ is used by a non run-task task, it could cause problems. So we audit for
+ that and make sure certain cache names are exclusive to run-task.
+
+ IF YOU ARE TEMPTED TO MAKE EXCLUSIONS TO THIS POLICY, YOU ARE LIKELY
+ CONTRIBUTING TECHNICAL DEBT AND WILL HAVE TO SOLVE MANY OF THE PROBLEMS
+ THAT RUN-TASK ALREADY SOLVES. THINK LONG AND HARD BEFORE DOING THAT.
+ """
+ re_reserved_caches = re.compile(
+ """^
+ (checkouts|tooltool-cache)
+ """,
+ re.VERBOSE,
+ )
+
+ cache_prefix = "{trust_domain}-level-{level}-".format(
+ trust_domain=config.graph_config["trust-domain"],
+ level=config.params["level"],
+ )
+
+ suffix = _run_task_suffix()
+
+ for task in tasks:
+ payload = task["task"].get("payload", {})
+ command = payload.get("command") or [""]
+
+ main_command = command[0] if isinstance(command[0], str) else ""
+ run_task = main_command.endswith("run-task")
+
+ for cache in payload.get("cache", {}):
+ if not cache.startswith(cache_prefix):
+ raise Exception(
+ "{} is using a cache ({}) which is not appropriate "
+ "for its trust-domain and level. It should start with {}.".format(
+ task["label"], cache, cache_prefix
+ )
+ )
+
+ cache = cache[len(cache_prefix) :]
+
+ if not re_reserved_caches.match(cache):
+ continue
+
+ if not run_task:
+ raise Exception(
+ "%s is using a cache (%s) reserved for run-task "
+ "change the task to use run-task or use a different "
+ "cache name" % (task["label"], cache)
+ )
+
+ if not cache.endswith(suffix):
+ raise Exception(
+ "%s is using a cache (%s) reserved for run-task "
+ "but the cache name is not dependent on the contents "
+ "of run-task; change the cache name to conform to the "
+ "naming requirements" % (task["label"], cache)
+ )
+
+ yield task
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/__init__.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/__init__.py
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/archive.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/archive.py
new file mode 100644
index 0000000000..ee59ba4548
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/archive.py
@@ -0,0 +1,86 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import gzip
+import os
+import stat
+import tarfile
+
+# 2016-01-01T00:00:00+0000
+DEFAULT_MTIME = 1451606400
+
+
+def create_tar_from_files(fp, files):
+ """Create a tar file deterministically.
+
+ Receives a dict mapping names of files in the archive to local filesystem
+ paths or ``mozpack.files.BaseFile`` instances.
+
+ The files will be archived and written to the passed file handle opened
+ for writing.
+
+ Only regular files can be written.
+
+ FUTURE accept a filename argument (or create APIs to write files)
+ """
+ with tarfile.open(name="", mode="w", fileobj=fp, dereference=True) as tf:
+ for archive_path, f in sorted(files.items()):
+ if isinstance(f, str):
+ mode = os.stat(f).st_mode
+ f = open(f, "rb")
+ else:
+ mode = 0o0644
+
+ ti = tarfile.TarInfo(archive_path)
+ ti.mode = mode
+ ti.type = tarfile.REGTYPE
+
+ if not ti.isreg():
+ raise ValueError("not a regular file: %s" % f)
+
+ # Disallow setuid and setgid bits. This is an arbitrary restriction.
+ # However, since we set uid/gid to root:root, setuid and setgid
+ # would be a glaring security hole if the archive were
+ # uncompressed as root.
+ if ti.mode & (stat.S_ISUID | stat.S_ISGID):
+ raise ValueError("cannot add file with setuid or setgid set: " "%s" % f)
+
+ # Set uid, gid, username, and group as deterministic values.
+ ti.uid = 0
+ ti.gid = 0
+ ti.uname = ""
+ ti.gname = ""
+
+ # Set mtime to a constant value.
+ ti.mtime = DEFAULT_MTIME
+
+ f.seek(0, 2)
+ ti.size = f.tell()
+ f.seek(0, 0)
+ # tarfile wants to pass a size argument to read(). So just
+ # wrap/buffer in a proper file object interface.
+ tf.addfile(ti, f)
+
+
+def create_tar_gz_from_files(fp, files, filename=None, compresslevel=9):
+ """Create a tar.gz file deterministically from files.
+
+ This is a glorified wrapper around ``create_tar_from_files`` that
+ adds gzip compression.
+
+ The passed file handle should be opened for writing in binary mode.
+ When the function returns, all data has been written to the handle.
+ """
+ # Offset 3-7 in the gzip header contains an mtime. Pin it to a known
+ # value so output is deterministic.
+ gf = gzip.GzipFile(
+ filename=filename or "",
+ mode="wb",
+ fileobj=fp,
+ compresslevel=compresslevel,
+ mtime=DEFAULT_MTIME,
+ )
+ with gf:
+ create_tar_from_files(gf, files)
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/attributes.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/attributes.py
new file mode 100644
index 0000000000..cf6f11c573
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/attributes.py
@@ -0,0 +1,84 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import re
+
+
+def attrmatch(attributes, **kwargs):
+ """Determine whether the given set of task attributes matches. The
+ conditions are given as keyword arguments, where each keyword names an
+ attribute. The keyword value can be a literal, a set, or a callable. A
+ literal must match the attribute exactly. Given a set, the attribute value
+ must be in the set. A callable is called with the attribute value. If an
+ attribute is specified as a keyword argument but not present in the
+ attributes, the result is False."""
+ for kwkey, kwval in kwargs.items():
+ if kwkey not in attributes:
+ return False
+ attval = attributes[kwkey]
+ if isinstance(kwval, set):
+ if attval not in kwval:
+ return False
+ elif callable(kwval):
+ if not kwval(attval):
+ return False
+ elif kwval != attributes[kwkey]:
+ return False
+ return True
+
+
+def keymatch(attributes, target):
+ """Determine if any keys in attributes are a match to target, then return
+ a list of matching values. First exact matches will be checked. Failing
+ that, regex matches and finally a default key.
+ """
+ # exact match
+ if target in attributes:
+ return [attributes[target]]
+
+ # regular expression match
+ matches = [v for k, v in attributes.items() if re.match(k + "$", target)]
+ if matches:
+ return matches
+
+ # default
+ if "default" in attributes:
+ return [attributes["default"]]
+
+ return []
+
+
+def _match_run_on(key, run_on):
+ """
+ Determine whether the given parameter is included in the corresponding `run-on-attribute`.
+ """
+ if "all" in run_on:
+ return True
+ return key in run_on
+
+
+match_run_on_projects = _match_run_on
+match_run_on_tasks_for = _match_run_on
+
+
+def match_run_on_git_branches(git_branch, run_on_git_branches):
+ """
+ Determine whether the given project is included in the `run-on-git-branches` parameter.
+ Allows 'all'.
+ """
+ if "all" in run_on_git_branches:
+ return True
+
+ for expected_git_branch_pattern in run_on_git_branches:
+ if re.match(expected_git_branch_pattern, git_branch):
+ return True
+
+ return False
+
+
+def sorted_unique_list(*args):
+ """Join one or more lists, and return a sorted list of unique members"""
+ combined = set().union(*args)
+ return sorted(combined)
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/cached_tasks.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/cached_tasks.py
new file mode 100644
index 0000000000..974b114902
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/cached_tasks.py
@@ -0,0 +1,86 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import hashlib
+import time
+
+TARGET_CACHE_INDEX = "{cache_prefix}.cache.level-{level}.{type}.{name}.hash.{digest}"
+EXTRA_CACHE_INDEXES = [
+ "{cache_prefix}.cache.level-{level}.{type}.{name}.latest",
+ "{cache_prefix}.cache.level-{level}.{type}.{name}.pushdate.{build_date_long}",
+]
+
+
+def add_optimization(
+ config, taskdesc, cache_type, cache_name, digest=None, digest_data=None
+):
+ """
+ Allow the results of this task to be cached. This adds index routes to the
+ task so it can be looked up for future runs, and optimization hints so that
+ cached artifacts can be found. Exactly one of `digest` and `digest_data`
+ must be passed.
+
+ :param TransformConfig config: The configuration for the kind being transformed.
+ :param dict taskdesc: The description of the current task.
+ :param str cache_type: The type of task result being cached.
+ :param str cache_name: The name of the object being cached.
+ :param digest: A unique string identifying this version of the artifacts
+ being generated. Typically this will be the hash of inputs to the task.
+ :type digest: bytes or None
+ :param digest_data: A list of bytes representing the inputs of this task.
+ They will be concatenated and hashed to create the digest for this
+ task.
+ :type digest_data: list of bytes or None
+ """
+ if (digest is None) == (digest_data is None):
+ raise Exception("Must pass exactly one of `digest` and `digest_data`.")
+ if digest is None:
+ digest = hashlib.sha256("\n".join(digest_data).encode("utf-8")).hexdigest()
+
+ if "cached-task-prefix" in config.graph_config["taskgraph"]:
+ cache_prefix = config.graph_config["taskgraph"]["cached-task-prefix"]
+ else:
+ cache_prefix = config.graph_config["trust-domain"]
+
+ subs = {
+ "cache_prefix": cache_prefix,
+ "type": cache_type,
+ "name": cache_name,
+ "digest": digest,
+ }
+
+ # We'll try to find a cached version of the toolchain at levels above and
+ # including the current level, starting at the highest level.
+ # Chain-of-trust doesn't handle tasks not built on the tip of a
+ # pull-request, so don't look for level-1 tasks if building a pull-request.
+ index_routes = []
+ min_level = int(config.params["level"])
+ if config.params["tasks_for"] == "github-pull-request":
+ min_level = max(min_level, 3)
+ for level in reversed(range(min_level, 4)):
+ subs["level"] = level
+ index_routes.append(TARGET_CACHE_INDEX.format(**subs))
+
+ taskdesc["optimization"] = {"index-search": index_routes}
+
+ # ... and cache at the lowest level.
+ subs["level"] = config.params["level"]
+ taskdesc.setdefault("routes", []).append(
+ f"index.{TARGET_CACHE_INDEX.format(**subs)}"
+ )
+
+ # ... and add some extra routes for humans
+ subs["build_date_long"] = time.strftime(
+ "%Y.%m.%d.%Y%m%d%H%M%S", time.gmtime(config.params["build_date"])
+ )
+ taskdesc["routes"].extend(
+ [f"index.{route.format(**subs)}" for route in EXTRA_CACHE_INDEXES]
+ )
+
+ taskdesc["attributes"]["cached_task"] = {
+ "type": cache_type,
+ "name": cache_name,
+ "digest": digest,
+ }
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/decision.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/decision.py
new file mode 100644
index 0000000000..d0e1e1079f
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/decision.py
@@ -0,0 +1,79 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Utilities for generating a decision task from :file:`.taskcluster.yml`.
+"""
+
+
+import os
+
+import jsone
+import slugid
+import yaml
+
+from .templates import merge
+from .time import current_json_time
+from .vcs import find_hg_revision_push_info
+
+
+def make_decision_task(params, root, context, head_rev=None):
+ """Generate a basic decision task, based on the root .taskcluster.yml"""
+ with open(os.path.join(root, ".taskcluster.yml"), "rb") as f:
+ taskcluster_yml = yaml.safe_load(f)
+
+ if not head_rev:
+ head_rev = params["head_rev"]
+
+ if params["repository_type"] == "hg":
+ pushlog = find_hg_revision_push_info(params["repository_url"], head_rev)
+
+ hg_push_context = {
+ "pushlog_id": pushlog["pushid"],
+ "pushdate": pushlog["pushdate"],
+ "owner": pushlog["user"],
+ }
+ else:
+ hg_push_context = {}
+
+ slugids = {}
+
+ def as_slugid(name):
+ # https://github.com/taskcluster/json-e/issues/164
+ name = name[0]
+ if name not in slugids:
+ slugids[name] = slugid.nice()
+ return slugids[name]
+
+ # provide a similar JSON-e context to what mozilla-taskcluster provides:
+ # https://docs.taskcluster.net/reference/integrations/mozilla-taskcluster/docs/taskcluster-yml
+ # but with a different tasks_for and an extra `cron` section
+ context = merge(
+ {
+ "repository": {
+ "url": params["repository_url"],
+ "project": params["project"],
+ "level": params["level"],
+ },
+ "push": merge(
+ {
+ "revision": params["head_rev"],
+ # remainder are fake values, but the decision task expects them anyway
+ "comment": " ",
+ },
+ hg_push_context,
+ ),
+ "now": current_json_time(),
+ "as_slugid": as_slugid,
+ },
+ context,
+ )
+
+ rendered = jsone.render(taskcluster_yml, context)
+ if len(rendered["tasks"]) != 1:
+ raise Exception("Expected .taskcluster.yml to only produce one cron task")
+ task = rendered["tasks"][0]
+
+ task_id = task.pop("taskId")
+ return (task_id, task)
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/docker.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/docker.py
new file mode 100644
index 0000000000..4b211cc4b3
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/docker.py
@@ -0,0 +1,342 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import hashlib
+import io
+import json
+import os
+import re
+import sys
+import urllib.parse
+
+import requests_unixsocket
+
+from taskgraph.util.archive import create_tar_gz_from_files
+from taskgraph.util.memoize import memoize
+
+IMAGE_DIR = os.path.join(".", "taskcluster", "docker")
+
+from .yaml import load_yaml
+
+
+def docker_url(path, **kwargs):
+ docker_socket = os.environ.get("DOCKER_SOCKET", "/var/run/docker.sock")
+ return urllib.parse.urlunparse(
+ (
+ "http+unix",
+ urllib.parse.quote(docker_socket, safe=""),
+ path,
+ "",
+ urllib.parse.urlencode(kwargs),
+ "",
+ )
+ )
+
+
+def post_to_docker(tar, api_path, **kwargs):
+ """POSTs a tar file to a given docker API path.
+
+ The tar argument can be anything that can be passed to requests.post()
+ as data (e.g. iterator or file object).
+ The extra keyword arguments are passed as arguments to the docker API.
+ """
+ req = requests_unixsocket.Session().post(
+ docker_url(api_path, **kwargs),
+ data=tar,
+ stream=True,
+ headers={"Content-Type": "application/x-tar"},
+ )
+ if req.status_code != 200:
+ message = req.json().get("message")
+ if not message:
+ message = f"docker API returned HTTP code {req.status_code}"
+ raise Exception(message)
+ status_line = {}
+
+ buf = b""
+ for content in req.iter_content(chunk_size=None):
+ if not content:
+ continue
+ # Sometimes, a chunk of content is not a complete json, so we cumulate
+ # with leftovers from previous iterations.
+ buf += content
+ try:
+ data = json.loads(buf)
+ except Exception:
+ continue
+ buf = b""
+ # data is sometimes an empty dict.
+ if not data:
+ continue
+ # Mimic how docker itself presents the output. This code was tested
+ # with API version 1.18 and 1.26.
+ if "status" in data:
+ if "id" in data:
+ if sys.stderr.isatty():
+ total_lines = len(status_line)
+ line = status_line.setdefault(data["id"], total_lines)
+ n = total_lines - line
+ if n > 0:
+ # Move the cursor up n lines.
+ sys.stderr.write(f"\033[{n}A")
+ # Clear line and move the cursor to the beginning of it.
+ sys.stderr.write("\033[2K\r")
+ sys.stderr.write(
+ "{}: {} {}\n".format(
+ data["id"], data["status"], data.get("progress", "")
+ )
+ )
+ if n > 1:
+ # Move the cursor down n - 1 lines, which, considering
+ # the carriage return on the last write, gets us back
+ # where we started.
+ sys.stderr.write(f"\033[{n - 1}B")
+ else:
+ status = status_line.get(data["id"])
+ # Only print status changes.
+ if status != data["status"]:
+ sys.stderr.write("{}: {}\n".format(data["id"], data["status"]))
+ status_line[data["id"]] = data["status"]
+ else:
+ status_line = {}
+ sys.stderr.write("{}\n".format(data["status"]))
+ elif "stream" in data:
+ sys.stderr.write(data["stream"])
+ elif "aux" in data:
+ sys.stderr.write(repr(data["aux"]))
+ elif "error" in data:
+ sys.stderr.write("{}\n".format(data["error"]))
+ # Sadly, docker doesn't give more than a plain string for errors,
+ # so the best we can do to propagate the error code from the command
+ # that failed is to parse the error message...
+ errcode = 1
+ m = re.search(r"returned a non-zero code: (\d+)", data["error"])
+ if m:
+ errcode = int(m.group(1))
+ sys.exit(errcode)
+ else:
+ raise NotImplementedError(repr(data))
+ sys.stderr.flush()
+
+
+def docker_image(name, by_tag=False):
+ """
+ Resolve in-tree prebuilt docker image to ``<registry>/<repository>@sha256:<digest>``,
+ or ``<registry>/<repository>:<tag>`` if `by_tag` is `True`.
+ """
+ try:
+ with open(os.path.join(IMAGE_DIR, name, "REGISTRY")) as f:
+ registry = f.read().strip()
+ except OSError:
+ with open(os.path.join(IMAGE_DIR, "REGISTRY")) as f:
+ registry = f.read().strip()
+
+ if not by_tag:
+ hashfile = os.path.join(IMAGE_DIR, name, "HASH")
+ try:
+ with open(hashfile) as f:
+ return f"{registry}/{name}@{f.read().strip()}"
+ except OSError:
+ raise Exception(f"Failed to read HASH file {hashfile}")
+
+ try:
+ with open(os.path.join(IMAGE_DIR, name, "VERSION")) as f:
+ tag = f.read().strip()
+ except OSError:
+ tag = "latest"
+ return f"{registry}/{name}:{tag}"
+
+
+class VoidWriter:
+ """A file object with write capabilities that does nothing with the written
+ data."""
+
+ def write(self, buf):
+ pass
+
+
+def generate_context_hash(topsrcdir, image_path, args=None):
+ """Generates a sha256 hash for context directory used to build an image."""
+
+ return stream_context_tar(topsrcdir, image_path, VoidWriter(), args=args)
+
+
+class HashingWriter:
+ """A file object with write capabilities that hashes the written data at
+ the same time it passes down to a real file object."""
+
+ def __init__(self, writer):
+ self._hash = hashlib.sha256()
+ self._writer = writer
+
+ def write(self, buf):
+ self._hash.update(buf)
+ self._writer.write(buf)
+
+ def hexdigest(self):
+ return self._hash.hexdigest()
+
+
+def create_context_tar(topsrcdir, context_dir, out_path, args=None):
+ """Create a context tarball.
+
+ A directory ``context_dir`` containing a Dockerfile will be assembled into
+ a gzipped tar file at ``out_path``.
+
+ We also scan the source Dockerfile for special syntax that influences
+ context generation.
+
+ If a line in the Dockerfile has the form ``# %include <path>``,
+ the relative path specified on that line will be matched against
+ files in the source repository and added to the context under the
+ path ``topsrcdir/``. If an entry is a directory, we add all files
+ under that directory.
+
+ If a line in the Dockerfile has the form ``# %ARG <name>``, occurrences of
+ the string ``$<name>`` in subsequent lines are replaced with the value
+ found in the ``args`` argument. Exception: this doesn't apply to VOLUME
+ definitions.
+
+ Returns the SHA-256 hex digest of the created archive.
+ """
+ with open(out_path, "wb") as fh:
+ return stream_context_tar(
+ topsrcdir,
+ context_dir,
+ fh,
+ image_name=os.path.basename(out_path),
+ args=args,
+ )
+
+
+RUN_TASK_ROOT = os.path.join(os.path.dirname(os.path.dirname(__file__)), "run-task")
+RUN_TASK_FILES = {
+ f"run-task/{path}": os.path.join(RUN_TASK_ROOT, path)
+ for path in [
+ "run-task",
+ "fetch-content",
+ "hgrc",
+ "robustcheckout.py",
+ ]
+}
+RUN_TASK_SNIPPET = [
+ "COPY run-task/run-task /usr/local/bin/run-task\n",
+ "COPY run-task/fetch-content /usr/local/bin/fetch-content\n",
+ "COPY run-task/robustcheckout.py /usr/local/mercurial/robustcheckout.py\n"
+ "COPY run-task/hgrc /etc/mercurial/hgrc.d/mozilla.rc\n",
+]
+
+
+def stream_context_tar(topsrcdir, context_dir, out_file, image_name=None, args=None):
+ """Like create_context_tar, but streams the tar file to the `out_file` file
+ object."""
+ archive_files = {}
+ replace = []
+ content = []
+
+ topsrcdir = os.path.abspath(topsrcdir)
+ context_dir = os.path.join(topsrcdir, context_dir)
+
+ for root, dirs, files in os.walk(context_dir):
+ for f in files:
+ source_path = os.path.join(root, f)
+ archive_path = source_path[len(context_dir) + 1 :]
+ archive_files[archive_path] = open(source_path, "rb")
+
+ # Parse Dockerfile for special syntax of extra files to include.
+ content = []
+ with open(os.path.join(context_dir, "Dockerfile")) as fh:
+ for line in fh:
+ if line.startswith("# %ARG"):
+ p = line[len("# %ARG ") :].strip()
+ if not args or p not in args:
+ raise Exception(f"missing argument: {p}")
+ replace.append((re.compile(rf"\${p}\b"), args[p]))
+ continue
+
+ for regexp, s in replace:
+ line = re.sub(regexp, s, line)
+
+ content.append(line)
+
+ if not line.startswith("# %include"):
+ continue
+
+ if line.strip() == "# %include-run-task":
+ content.extend(RUN_TASK_SNIPPET)
+ archive_files.update(RUN_TASK_FILES)
+ continue
+
+ p = line[len("# %include ") :].strip()
+ if os.path.isabs(p):
+ raise Exception("extra include path cannot be absolute: %s" % p)
+
+ fs_path = os.path.normpath(os.path.join(topsrcdir, p))
+ # Check for filesystem traversal exploits.
+ if not fs_path.startswith(topsrcdir):
+ raise Exception("extra include path outside topsrcdir: %s" % p)
+
+ if not os.path.exists(fs_path):
+ raise Exception("extra include path does not exist: %s" % p)
+
+ if os.path.isdir(fs_path):
+ for root, dirs, files in os.walk(fs_path):
+ for f in files:
+ source_path = os.path.join(root, f)
+ rel = source_path[len(fs_path) + 1 :]
+ archive_path = os.path.join("topsrcdir", p, rel)
+ archive_files[archive_path] = source_path
+ else:
+ archive_path = os.path.join("topsrcdir", p)
+ archive_files[archive_path] = fs_path
+
+ archive_files["Dockerfile"] = io.BytesIO("".join(content).encode("utf-8"))
+
+ writer = HashingWriter(out_file)
+ create_tar_gz_from_files(writer, archive_files, image_name)
+ return writer.hexdigest()
+
+
+@memoize
+def image_paths():
+ """Return a map of image name to paths containing their Dockerfile."""
+ config = load_yaml("taskcluster", "ci", "docker-image", "kind.yml")
+ return {
+ k: os.path.join(IMAGE_DIR, v.get("definition", k))
+ for k, v in config["tasks"].items()
+ }
+
+
+def image_path(name):
+ paths = image_paths()
+ if name in paths:
+ return paths[name]
+ return os.path.join(IMAGE_DIR, name)
+
+
+@memoize
+def parse_volumes(image):
+ """Parse VOLUME entries from a Dockerfile for an image."""
+ volumes = set()
+
+ path = image_path(image)
+
+ with open(os.path.join(path, "Dockerfile"), "rb") as fh:
+ for line in fh:
+ line = line.strip()
+ # We assume VOLUME definitions don't use %ARGS.
+ if not line.startswith(b"VOLUME "):
+ continue
+
+ v = line.split(None, 1)[1]
+ if v.startswith(b"["):
+ raise ValueError(
+ "cannot parse array syntax for VOLUME; "
+ "convert to multiple entries"
+ )
+
+ volumes |= {volume.decode("utf-8") for volume in v.split()}
+
+ return volumes
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/hash.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/hash.py
new file mode 100644
index 0000000000..bf786e92e4
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/hash.py
@@ -0,0 +1,54 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import hashlib
+from pathlib import Path
+
+from taskgraph.util import path as mozpath
+from taskgraph.util.memoize import memoize
+
+
+@memoize
+def hash_path(path):
+ """Hash a single file.
+
+ Returns the SHA-256 hash in hex form.
+ """
+ with open(path, "rb") as fh:
+ return hashlib.sha256(fh.read()).hexdigest()
+
+
+def _find_files(base_path):
+ for path in Path(base_path).rglob("*"):
+ if path.is_file():
+ yield str(path)
+
+
+def hash_paths(base_path, patterns):
+ """
+ Give a list of path patterns, return a digest of the contents of all
+ the corresponding files, similarly to git tree objects or mercurial
+ manifests.
+
+ Each file is hashed. The list of all hashes and file paths is then
+ itself hashed to produce the result.
+ """
+ h = hashlib.sha256()
+
+ found = set()
+ for pattern in patterns:
+ files = _find_files(base_path)
+ matches = [path for path in files if mozpath.match(path, pattern)]
+ if matches:
+ found.update(matches)
+ else:
+ raise Exception("%s did not match anything" % pattern)
+ for path in sorted(found):
+ h.update(
+ "{} {}\n".format(
+ hash_path(mozpath.abspath(mozpath.join(base_path, path))),
+ mozpath.normsep(path),
+ ).encode("utf-8")
+ )
+ return h.hexdigest()
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/keyed_by.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/keyed_by.py
new file mode 100644
index 0000000000..9b0c5a44fb
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/keyed_by.py
@@ -0,0 +1,97 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+from .attributes import keymatch
+
+
+def evaluate_keyed_by(
+ value, item_name, attributes, defer=None, enforce_single_match=True
+):
+ """
+ For values which can either accept a literal value, or be keyed by some
+ attributes, perform that lookup and return the result.
+
+ For example, given item::
+
+ by-test-platform:
+ macosx-10.11/debug: 13
+ win.*: 6
+ default: 12
+
+ a call to `evaluate_keyed_by(item, 'thing-name', {'test-platform': 'linux96')`
+ would return `12`.
+
+ Items can be nested as deeply as desired::
+
+ by-test-platform:
+ win.*:
+ by-project:
+ ash: ..
+ cedar: ..
+ linux: 13
+ default: 12
+
+ Args:
+ value (str): Name of the value to perform evaluation on.
+ item_name (str): Used to generate useful error messages.
+ attributes (dict): Dictionary of attributes used to lookup 'by-<key>' with.
+ defer (list):
+ Allows evaluating a by-* entry at a later time. In the example
+ above it's possible that the project attribute hasn't been set yet,
+ in which case we'd want to stop before resolving that subkey and
+ then call this function again later. This can be accomplished by
+ setting `defer=["project"]` in this example.
+ enforce_single_match (bool):
+ If True (default), each task may only match a single arm of the
+ evaluation.
+ """
+ while True:
+ if not isinstance(value, dict) or len(value) != 1:
+ return value
+ value_key = next(iter(value))
+ if not value_key.startswith("by-"):
+ return value
+
+ keyed_by = value_key[3:] # strip off 'by-' prefix
+
+ if defer and keyed_by in defer:
+ return value
+
+ key = attributes.get(keyed_by)
+ alternatives = next(iter(value.values()))
+
+ if len(alternatives) == 1 and "default" in alternatives:
+ # Error out when only 'default' is specified as only alternatives,
+ # because we don't need to by-{keyed_by} there.
+ raise Exception(
+ "Keyed-by '{}' unnecessary with only value 'default' "
+ "found, when determining item {}".format(keyed_by, item_name)
+ )
+
+ if key is None:
+ if "default" in alternatives:
+ value = alternatives["default"]
+ continue
+ else:
+ raise Exception(
+ "No attribute {} and no value for 'default' found "
+ "while determining item {}".format(keyed_by, item_name)
+ )
+
+ matches = keymatch(alternatives, key)
+ if enforce_single_match and len(matches) > 1:
+ raise Exception(
+ "Multiple matching values for {} {!r} found while "
+ "determining item {}".format(keyed_by, key, item_name)
+ )
+ elif matches:
+ value = matches[0]
+ continue
+
+ raise Exception(
+ "No {} matching {!r} nor 'default' found while determining item {}".format(
+ keyed_by, key, item_name
+ )
+ )
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/memoize.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/memoize.py
new file mode 100644
index 0000000000..56b513e74c
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/memoize.py
@@ -0,0 +1,40 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Imported from
+# https://searchfox.org/mozilla-central/rev/c3ebaf6de2d481c262c04bb9657eaf76bf47e2ac/python/mozbuild/mozbuild/util.py#923-949
+
+
+import functools
+
+
+class memoize(dict):
+ """A decorator to memoize the results of function calls depending
+ on its arguments.
+ Both functions and instance methods are handled, although in the
+ instance method case, the results are cache in the instance itself.
+ """
+
+ def __init__(self, func):
+ self.func = func
+ functools.update_wrapper(self, func)
+
+ def __call__(self, *args):
+ if args not in self:
+ self[args] = self.func(*args)
+ return self[args]
+
+ def method_call(self, instance, *args):
+ name = "_%s" % self.func.__name__
+ if not hasattr(instance, name):
+ setattr(instance, name, {})
+ cache = getattr(instance, name)
+ if args not in cache:
+ cache[args] = self.func(instance, *args)
+ return cache[args]
+
+ def __get__(self, instance, cls):
+ return functools.update_wrapper(
+ functools.partial(self.method_call, instance), self.func
+ )
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/parameterization.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/parameterization.py
new file mode 100644
index 0000000000..6233a98a40
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/parameterization.py
@@ -0,0 +1,97 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import re
+
+from taskgraph.util.taskcluster import get_artifact_url
+from taskgraph.util.time import json_time_from_now
+
+TASK_REFERENCE_PATTERN = re.compile("<([^>]+)>")
+ARTIFACT_REFERENCE_PATTERN = re.compile("<([^/]+)/([^>]+)>")
+
+
+def _recurse(val, param_fns):
+ def recurse(val):
+ if isinstance(val, list):
+ return [recurse(v) for v in val]
+ elif isinstance(val, dict):
+ if len(val) == 1:
+ for param_key, param_fn in param_fns.items():
+ if set(val.keys()) == {param_key}:
+ return param_fn(val[param_key])
+ return {k: recurse(v) for k, v in val.items()}
+ else:
+ return val
+
+ return recurse(val)
+
+
+def resolve_timestamps(now, task_def):
+ """Resolve all instances of `{'relative-datestamp': '..'}` in the given task definition"""
+ return _recurse(
+ task_def,
+ {
+ "relative-datestamp": lambda v: json_time_from_now(v, now),
+ },
+ )
+
+
+def resolve_task_references(label, task_def, task_id, decision_task_id, dependencies):
+ """Resolve all instances of ``{'task-reference': '..<..>..'} ``
+ and ``{'artifact-reference`: '..<dependency/artifact/path>..'}``
+ in the given task definition, using the given dependencies.
+ """
+
+ def task_reference(val):
+ def repl(match):
+ key = match.group(1)
+ if key == "self":
+ return task_id
+ elif key == "decision":
+ return decision_task_id
+ try:
+ return dependencies[key]
+ except KeyError:
+ # handle escaping '<'
+ if key == "<":
+ return key
+ raise KeyError(f"task '{label}' has no dependency named '{key}'")
+
+ return TASK_REFERENCE_PATTERN.sub(repl, val)
+
+ def artifact_reference(val):
+ def repl(match):
+ dependency, artifact_name = match.group(1, 2)
+
+ if dependency == "self":
+ raise KeyError(f"task '{label}' can't reference artifacts of self")
+ elif dependency == "decision":
+ task_id = decision_task_id
+ else:
+ try:
+ task_id = dependencies[dependency]
+ except KeyError:
+ raise KeyError(
+ "task '{}' has no dependency named '{}'".format(
+ label, dependency
+ )
+ )
+
+ assert artifact_name.startswith(
+ "public/"
+ ), "artifact-reference only supports public artifacts, not `{}`".format(
+ artifact_name
+ )
+ return get_artifact_url(task_id, artifact_name)
+
+ return ARTIFACT_REFERENCE_PATTERN.sub(repl, val)
+
+ return _recurse(
+ task_def,
+ {
+ "task-reference": task_reference,
+ "artifact-reference": artifact_reference,
+ },
+ )
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/path.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/path.py
new file mode 100644
index 0000000000..728b648ac1
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/path.py
@@ -0,0 +1,172 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Like :py:mod:`os.path`, with a reduced set of functions, and with normalized path
+separators (always use forward slashes).
+Also contains a few additional utilities not found in :py:mod:`os.path`.
+"""
+
+# Imported from
+# https://searchfox.org/mozilla-central/rev/c3ebaf6de2d481c262c04bb9657eaf76bf47e2ac/python/mozbuild/mozpack/path.py
+
+
+import os
+import posixpath
+import re
+
+
+def normsep(path):
+ """
+ Normalize path separators, by using forward slashes instead of whatever
+ :py:const:`os.sep` is.
+ """
+ if os.sep != "/":
+ path = path.replace(os.sep, "/")
+ if os.altsep and os.altsep != "/":
+ path = path.replace(os.altsep, "/")
+ return path
+
+
+def relpath(path, start):
+ rel = normsep(os.path.relpath(path, start))
+ return "" if rel == "." else rel
+
+
+def realpath(path):
+ return normsep(os.path.realpath(path))
+
+
+def abspath(path):
+ return normsep(os.path.abspath(path))
+
+
+def join(*paths):
+ return normsep(os.path.join(*paths))
+
+
+def normpath(path):
+ return posixpath.normpath(normsep(path))
+
+
+def dirname(path):
+ return posixpath.dirname(normsep(path))
+
+
+def commonprefix(paths):
+ return posixpath.commonprefix([normsep(path) for path in paths])
+
+
+def basename(path):
+ return os.path.basename(path)
+
+
+def splitext(path):
+ return posixpath.splitext(normsep(path))
+
+
+def split(path):
+ """
+ Return the normalized path as a list of its components.
+
+ ``split('foo/bar/baz')`` returns ``['foo', 'bar', 'baz']``
+ """
+ return normsep(path).split("/")
+
+
+def basedir(path, bases):
+ """
+ Given a list of directories (`bases`), return which one contains the given
+ path. If several matches are found, the deepest base directory is returned.
+
+ ``basedir('foo/bar/baz', ['foo', 'baz', 'foo/bar'])`` returns ``'foo/bar'``
+ (`'foo'` and `'foo/bar'` both match, but `'foo/bar'` is the deepest match)
+ """
+ path = normsep(path)
+ bases = [normsep(b) for b in bases]
+ if path in bases:
+ return path
+ for b in sorted(bases, reverse=True):
+ if b == "" or path.startswith(b + "/"):
+ return b
+
+
+re_cache = {}
+# Python versions < 3.7 return r'\/' for re.escape('/').
+if re.escape("/") == "/":
+ MATCH_STAR_STAR_RE = re.compile(r"(^|/)\\\*\\\*/")
+ MATCH_STAR_STAR_END_RE = re.compile(r"(^|/)\\\*\\\*$")
+else:
+ MATCH_STAR_STAR_RE = re.compile(r"(^|\\\/)\\\*\\\*\\\/")
+ MATCH_STAR_STAR_END_RE = re.compile(r"(^|\\\/)\\\*\\\*$")
+
+
+def match(path, pattern):
+ """
+ Return whether the given path matches the given pattern.
+ An asterisk can be used to match any string, including the null string, in
+ one part of the path:
+
+ ``foo`` matches ``*``, ``f*`` or ``fo*o``
+
+ However, an asterisk matching a subdirectory may not match the null string:
+
+ ``foo/bar`` does *not* match ``foo/*/bar``
+
+ If the pattern matches one of the ancestor directories of the path, the
+ patch is considered matching:
+
+ ``foo/bar`` matches ``foo``
+
+ Two adjacent asterisks can be used to match files and zero or more
+ directories and subdirectories.
+
+ ``foo/bar`` matches ``foo/**/bar``, or ``**/bar``
+ """
+ if not pattern:
+ return True
+ if pattern not in re_cache:
+ p = re.escape(pattern)
+ p = MATCH_STAR_STAR_RE.sub(r"\1(?:.+/)?", p)
+ p = MATCH_STAR_STAR_END_RE.sub(r"(?:\1.+)?", p)
+ p = p.replace(r"\*", "[^/]*") + "(?:/.*)?$"
+ re_cache[pattern] = re.compile(p)
+ return re_cache[pattern].match(path) is not None
+
+
+def rebase(oldbase, base, relativepath):
+ """
+ Return `relativepath` relative to `base` instead of `oldbase`.
+ """
+ if base == oldbase:
+ return relativepath
+ if len(base) < len(oldbase):
+ assert basedir(oldbase, [base]) == base
+ relbase = relpath(oldbase, base)
+ result = join(relbase, relativepath)
+ else:
+ assert basedir(base, [oldbase]) == oldbase
+ relbase = relpath(base, oldbase)
+ result = relpath(relativepath, relbase)
+ result = normpath(result)
+ if relativepath.endswith("/") and not result.endswith("/"):
+ result += "/"
+ return result
+
+
+def ancestors(path):
+ """Emit the parent directories of a path.
+
+ Args:
+ path (str): Path to emit parents of.
+
+ Yields:
+ str: Path of parent directory.
+ """
+ while path:
+ yield path
+ newpath = os.path.dirname(path)
+ if newpath == path:
+ break
+ path = newpath
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/python_path.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/python_path.py
new file mode 100644
index 0000000000..3eb61dfbf3
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/python_path.py
@@ -0,0 +1,52 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import inspect
+import os
+
+
+def find_object(path):
+ """
+ Find a Python object given a path of the form <modulepath>:<objectpath>.
+ Conceptually equivalent to
+
+ def find_object(modulepath, objectpath):
+ import <modulepath> as mod
+ return mod.<objectpath>
+ """
+ if path.count(":") != 1:
+ raise ValueError(f'python path {path!r} does not have the form "module:object"')
+
+ modulepath, objectpath = path.split(":")
+ obj = __import__(modulepath)
+ for a in modulepath.split(".")[1:]:
+ obj = getattr(obj, a)
+ for a in objectpath.split("."):
+ obj = getattr(obj, a)
+ return obj
+
+
+def import_sibling_modules(exceptions=None):
+ """
+ Import all Python modules that are siblings of the calling module.
+
+ Args:
+ exceptions (list): A list of file names to exclude (caller and
+ __init__.py are implicitly excluded).
+ """
+ frame = inspect.stack()[1]
+ mod = inspect.getmodule(frame[0])
+
+ name = os.path.basename(mod.__file__)
+ excs = {"__init__.py", name}
+ if exceptions:
+ excs.update(exceptions)
+
+ modpath = mod.__name__
+ if not name.startswith("__init__.py"):
+ modpath = modpath.rsplit(".", 1)[0]
+
+ for f in os.listdir(os.path.dirname(mod.__file__)):
+ if f.endswith(".py") and f not in excs:
+ __import__(modpath + "." + f[:-3])
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/readonlydict.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/readonlydict.py
new file mode 100644
index 0000000000..55d74f479a
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/readonlydict.py
@@ -0,0 +1,22 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Imported from
+# https://searchfox.org/mozilla-central/rev/c3ebaf6de2d481c262c04bb9657eaf76bf47e2ac/python/mozbuild/mozbuild/util.py#115-127
+
+
+class ReadOnlyDict(dict):
+ """A read-only dictionary."""
+
+ def __init__(self, *args, **kwargs):
+ dict.__init__(self, *args, **kwargs)
+
+ def __delitem__(self, key):
+ raise Exception("Object does not support deletion.")
+
+ def __setitem__(self, key, value):
+ raise Exception("Object does not support assignment.")
+
+ def update(self, *args, **kwargs):
+ raise Exception("Object does not support update.")
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/schema.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/schema.py
new file mode 100644
index 0000000000..3989f71182
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/schema.py
@@ -0,0 +1,260 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import collections
+import pprint
+import re
+
+import voluptuous
+
+import taskgraph
+
+from .keyed_by import evaluate_keyed_by
+
+
+def validate_schema(schema, obj, msg_prefix):
+ """
+ Validate that object satisfies schema. If not, generate a useful exception
+ beginning with msg_prefix.
+ """
+ if taskgraph.fast:
+ return
+ try:
+ schema(obj)
+ except voluptuous.MultipleInvalid as exc:
+ msg = [msg_prefix]
+ for error in exc.errors:
+ msg.append(str(error))
+ raise Exception("\n".join(msg) + "\n" + pprint.pformat(obj))
+
+
+def optionally_keyed_by(*arguments):
+ """
+ Mark a schema value as optionally keyed by any of a number of fields. The
+ schema is the last argument, and the remaining fields are taken to be the
+ field names. For example:
+
+ 'some-value': optionally_keyed_by(
+ 'test-platform', 'build-platform',
+ Any('a', 'b', 'c'))
+
+ The resulting schema will allow nesting of `by-test-platform` and
+ `by-build-platform` in either order.
+ """
+ schema = arguments[-1]
+ fields = arguments[:-1]
+
+ def validator(obj):
+ if isinstance(obj, dict) and len(obj) == 1:
+ k, v = list(obj.items())[0]
+ if k.startswith("by-") and k[len("by-") :] in fields:
+ res = {}
+ for kk, vv in v.items():
+ try:
+ res[kk] = validator(vv)
+ except voluptuous.Invalid as e:
+ e.prepend([k, kk])
+ raise
+ return res
+ return Schema(schema)(obj)
+
+ return validator
+
+
+def resolve_keyed_by(
+ item, field, item_name, defer=None, enforce_single_match=True, **extra_values
+):
+ """
+ For values which can either accept a literal value, or be keyed by some
+ other attribute of the item, perform that lookup and replacement in-place
+ (modifying `item` directly). The field is specified using dotted notation
+ to traverse dictionaries.
+
+ For example, given item::
+
+ job:
+ test-platform: linux128
+ chunks:
+ by-test-platform:
+ macosx-10.11/debug: 13
+ win.*: 6
+ default: 12
+
+ a call to `resolve_keyed_by(item, 'job.chunks', item['thing-name'])`
+ would mutate item in-place to::
+
+ job:
+ test-platform: linux128
+ chunks: 12
+
+ The `item_name` parameter is used to generate useful error messages.
+
+ If extra_values are supplied, they represent additional values available
+ for reference from by-<field>.
+
+ Items can be nested as deeply as the schema will allow::
+
+ chunks:
+ by-test-platform:
+ win.*:
+ by-project:
+ ash: ..
+ cedar: ..
+ linux: 13
+ default: 12
+
+ Args:
+ item (dict): Object being evaluated.
+ field (str): Name of the key to perform evaluation on.
+ item_name (str): Used to generate useful error messages.
+ defer (list):
+ Allows evaluating a by-* entry at a later time. In the example
+ above it's possible that the project attribute hasn't been set yet,
+ in which case we'd want to stop before resolving that subkey and
+ then call this function again later. This can be accomplished by
+ setting `defer=["project"]` in this example.
+ enforce_single_match (bool):
+ If True (default), each task may only match a single arm of the
+ evaluation.
+ extra_values (kwargs):
+ If supplied, represent additional values available
+ for reference from by-<field>.
+
+ Returns:
+ dict: item which has also been modified in-place.
+ """
+ # find the field, returning the item unchanged if anything goes wrong
+ container, subfield = item, field
+ while "." in subfield:
+ f, subfield = subfield.split(".", 1)
+ if f not in container:
+ return item
+ container = container[f]
+ if not isinstance(container, dict):
+ return item
+
+ if subfield not in container:
+ return item
+
+ container[subfield] = evaluate_keyed_by(
+ value=container[subfield],
+ item_name=f"`{field}` in `{item_name}`",
+ defer=defer,
+ enforce_single_match=enforce_single_match,
+ attributes=dict(item, **extra_values),
+ )
+
+ return item
+
+
+# Schemas for YAML files should use dashed identifiers by default. If there are
+# components of the schema for which there is a good reason to use another format,
+# they can be excepted here.
+EXCEPTED_SCHEMA_IDENTIFIERS = [
+ # upstream-artifacts and artifact-map are handed directly to scriptWorker,
+ # which expects interCaps
+ "upstream-artifacts",
+ "artifact-map",
+]
+
+
+def check_schema(schema):
+ identifier_re = re.compile(r"^\$?[a-z][a-z0-9-]*$")
+
+ def excepted(item):
+ for esi in EXCEPTED_SCHEMA_IDENTIFIERS:
+ if isinstance(esi, str):
+ if f"[{esi!r}]" in item:
+ return True
+ elif esi(item):
+ return True
+ return False
+
+ def iter(path, sch):
+ def check_identifier(path, k):
+ if k in (str,) or k in (str, voluptuous.Extra):
+ pass
+ elif isinstance(k, voluptuous.NotIn):
+ pass
+ elif isinstance(k, str):
+ if not identifier_re.match(k) and not excepted(path):
+ raise RuntimeError(
+ "YAML schemas should use dashed lower-case identifiers, "
+ "not {!r} @ {}".format(k, path)
+ )
+ elif isinstance(k, (voluptuous.Optional, voluptuous.Required)):
+ check_identifier(path, k.schema)
+ elif isinstance(k, (voluptuous.Any, voluptuous.All)):
+ for v in k.validators:
+ check_identifier(path, v)
+ elif not excepted(path):
+ raise RuntimeError(
+ "Unexpected type in YAML schema: {} @ {}".format(
+ type(k).__name__, path
+ )
+ )
+
+ if isinstance(sch, collections.abc.Mapping):
+ for k, v in sch.items():
+ child = f"{path}[{k!r}]"
+ check_identifier(child, k)
+ iter(child, v)
+ elif isinstance(sch, (list, tuple)):
+ for i, v in enumerate(sch):
+ iter(f"{path}[{i}]", v)
+ elif isinstance(sch, voluptuous.Any):
+ for v in sch.validators:
+ iter(path, v)
+
+ iter("schema", schema.schema)
+
+
+class Schema(voluptuous.Schema):
+ """
+ Operates identically to voluptuous.Schema, but applying some taskgraph-specific checks
+ in the process.
+ """
+
+ def __init__(self, *args, check=True, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ self.check = check
+ if not taskgraph.fast and self.check:
+ check_schema(self)
+
+ def extend(self, *args, **kwargs):
+ schema = super().extend(*args, **kwargs)
+
+ if self.check:
+ check_schema(schema)
+ # We want twice extend schema to be checked too.
+ schema.__class__ = Schema
+ return schema
+
+ def _compile(self, schema):
+ if taskgraph.fast:
+ return
+ return super()._compile(schema)
+
+ def __getitem__(self, item):
+ return self.schema[item]
+
+
+OptimizationSchema = voluptuous.Any(
+ # always run this task (default)
+ None,
+ # search the index for the given index namespaces, and replace this task if found
+ # the search occurs in order, with the first match winning
+ {"index-search": [str]},
+ # skip this task if none of the given file patterns match
+ {"skip-unless-changed": [str]},
+)
+
+# shortcut for a string where task references are allowed
+taskref_or_string = voluptuous.Any(
+ str,
+ {voluptuous.Required("task-reference"): str},
+ {voluptuous.Required("artifact-reference"): str},
+)
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/shell.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/shell.py
new file mode 100644
index 0000000000..d695767f05
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/shell.py
@@ -0,0 +1,40 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this file,
+# You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+
+SHELL_QUOTE_RE = re.compile(r"[\\\t\r\n \'\"#<>&|`(){}$;\*\?]")
+
+
+def _quote(s):
+ """Given a string, returns a version that can be used literally on a shell
+ command line, enclosing it with single quotes if necessary.
+
+ As a special case, if given an int, returns a string containing the int,
+ not enclosed in quotes.
+ """
+ if type(s) == int:
+ return "%d" % s
+
+ # Empty strings need to be quoted to have any significance
+ if s and not SHELL_QUOTE_RE.search(s) and not s.startswith("~"):
+ return s
+
+ # Single quoted strings can contain any characters unescaped except the
+ # single quote itself, which can't even be escaped, so the string needs to
+ # be closed, an escaped single quote added, and reopened.
+ t = type(s)
+ return t("'%s'") % s.replace(t("'"), t("'\\''"))
+
+
+def quote(*strings):
+ """Given one or more strings, returns a quoted string that can be used
+ literally on a shell command line.
+
+ >>> quote('a', 'b')
+ "a b"
+ >>> quote('a b', 'c')
+ "'a b' c"
+ """
+ return " ".join(_quote(s) for s in strings)
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/taskcluster.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/taskcluster.py
new file mode 100644
index 0000000000..a830a473b3
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/taskcluster.py
@@ -0,0 +1,373 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import datetime
+import functools
+import logging
+import os
+
+import requests
+import taskcluster_urls as liburls
+from requests.packages.urllib3.util.retry import Retry
+
+from taskgraph.task import Task
+from taskgraph.util import yaml
+from taskgraph.util.memoize import memoize
+
+logger = logging.getLogger(__name__)
+
+# this is set to true for `mach taskgraph action-callback --test`
+testing = False
+
+# Default rootUrl to use if none is given in the environment; this should point
+# to the production Taskcluster deployment used for CI.
+PRODUCTION_TASKCLUSTER_ROOT_URL = None
+
+# the maximum number of parallel Taskcluster API calls to make
+CONCURRENCY = 50
+
+
+@memoize
+def get_root_url(use_proxy):
+ """Get the current TASKCLUSTER_ROOT_URL.
+
+ When running in a task, this must come from $TASKCLUSTER_ROOT_URL; when run
+ on the command line, a default may be provided that points to the
+ production deployment of Taskcluster. If use_proxy is set, this attempts to
+ get TASKCLUSTER_PROXY_URL instead, failing if it is not set.
+ """
+ if use_proxy:
+ try:
+ return liburls.normalize_root_url(os.environ["TASKCLUSTER_PROXY_URL"])
+ except KeyError:
+ if "TASK_ID" not in os.environ:
+ raise RuntimeError(
+ "taskcluster-proxy is not available when not executing in a task"
+ )
+ else:
+ raise RuntimeError("taskcluster-proxy is not enabled for this task")
+
+ if "TASKCLUSTER_ROOT_URL" in os.environ:
+ logger.debug(
+ "Running in Taskcluster instance {}{}".format(
+ os.environ["TASKCLUSTER_ROOT_URL"],
+ " with taskcluster-proxy"
+ if "TASKCLUSTER_PROXY_URL" in os.environ
+ else "",
+ )
+ )
+ return liburls.normalize_root_url(os.environ["TASKCLUSTER_ROOT_URL"])
+
+ if "TASK_ID" in os.environ:
+ raise RuntimeError("$TASKCLUSTER_ROOT_URL must be set when running in a task")
+
+ if PRODUCTION_TASKCLUSTER_ROOT_URL is None:
+ raise RuntimeError(
+ "Could not detect Taskcluster instance, set $TASKCLUSTER_ROOT_URL"
+ )
+
+ logger.debug("Using default TASKCLUSTER_ROOT_URL")
+ return liburls.normalize_root_url(PRODUCTION_TASKCLUSTER_ROOT_URL)
+
+
+def requests_retry_session(
+ retries,
+ backoff_factor=0.1,
+ status_forcelist=(500, 502, 503, 504),
+ concurrency=CONCURRENCY,
+ session=None,
+):
+ session = session or requests.Session()
+ retry = Retry(
+ total=retries,
+ read=retries,
+ connect=retries,
+ backoff_factor=backoff_factor,
+ status_forcelist=status_forcelist,
+ )
+
+ # Default HTTPAdapter uses 10 connections. Mount custom adapter to increase
+ # that limit. Connections are established as needed, so using a large value
+ # should not negatively impact performance.
+ http_adapter = requests.adapters.HTTPAdapter(
+ pool_connections=concurrency,
+ pool_maxsize=concurrency,
+ max_retries=retry,
+ )
+ session.mount("http://", http_adapter)
+ session.mount("https://", http_adapter)
+
+ return session
+
+
+@memoize
+def get_session():
+ return requests_retry_session(retries=5)
+
+
+def _do_request(url, method=None, **kwargs):
+ if method is None:
+ method = "post" if kwargs else "get"
+
+ session = get_session()
+ if method == "get":
+ kwargs["stream"] = True
+
+ response = getattr(session, method)(url, **kwargs)
+
+ if response.status_code >= 400:
+ # Consume content before raise_for_status, so that the connection can be
+ # reused.
+ response.content
+ response.raise_for_status()
+ return response
+
+
+def _handle_artifact(path, response):
+ if path.endswith(".json"):
+ return response.json()
+ if path.endswith(".yml"):
+ return yaml.load_stream(response.text)
+ response.raw.read = functools.partial(response.raw.read, decode_content=True)
+ return response.raw
+
+
+def get_artifact_url(task_id, path, use_proxy=False):
+ artifact_tmpl = liburls.api(
+ get_root_url(False), "queue", "v1", "task/{}/artifacts/{}"
+ )
+ data = artifact_tmpl.format(task_id, path)
+ if use_proxy:
+ # Until Bug 1405889 is deployed, we can't download directly
+ # from the taskcluster-proxy. Work around by using the /bewit
+ # endpoint instead.
+ # The bewit URL is the body of a 303 redirect, which we don't
+ # want to follow (which fetches a potentially large resource).
+ response = _do_request(
+ os.environ["TASKCLUSTER_PROXY_URL"] + "/bewit",
+ data=data,
+ allow_redirects=False,
+ )
+ return response.text
+ return data
+
+
+def get_artifact(task_id, path, use_proxy=False):
+ """
+ Returns the artifact with the given path for the given task id.
+
+ If the path ends with ".json" or ".yml", the content is deserialized as,
+ respectively, json or yaml, and the corresponding python data (usually
+ dict) is returned.
+ For other types of content, a file-like object is returned.
+ """
+ response = _do_request(get_artifact_url(task_id, path, use_proxy))
+ return _handle_artifact(path, response)
+
+
+def list_artifacts(task_id, use_proxy=False):
+ response = _do_request(get_artifact_url(task_id, "", use_proxy).rstrip("/"))
+ return response.json()["artifacts"]
+
+
+def get_artifact_prefix(task):
+ prefix = None
+ if isinstance(task, dict):
+ prefix = task.get("attributes", {}).get("artifact_prefix")
+ elif isinstance(task, Task):
+ prefix = task.attributes.get("artifact_prefix")
+ else:
+ raise Exception(f"Can't find artifact-prefix of non-task: {task}")
+ return prefix or "public/build"
+
+
+def get_artifact_path(task, path):
+ return f"{get_artifact_prefix(task)}/{path}"
+
+
+def get_index_url(index_path, use_proxy=False, multiple=False):
+ index_tmpl = liburls.api(get_root_url(use_proxy), "index", "v1", "task{}/{}")
+ return index_tmpl.format("s" if multiple else "", index_path)
+
+
+def find_task_id(index_path, use_proxy=False):
+ try:
+ response = _do_request(get_index_url(index_path, use_proxy))
+ except requests.exceptions.HTTPError as e:
+ if e.response.status_code == 404:
+ raise KeyError(f"index path {index_path} not found")
+ raise
+ return response.json()["taskId"]
+
+
+def get_artifact_from_index(index_path, artifact_path, use_proxy=False):
+ full_path = index_path + "/artifacts/" + artifact_path
+ response = _do_request(get_index_url(full_path, use_proxy))
+ return _handle_artifact(full_path, response)
+
+
+def list_tasks(index_path, use_proxy=False):
+ """
+ Returns a list of task_ids where each task_id is indexed under a path
+ in the index. Results are sorted by expiration date from oldest to newest.
+ """
+ results = []
+ data = {}
+ while True:
+ response = _do_request(
+ get_index_url(index_path, use_proxy, multiple=True), json=data
+ )
+ response = response.json()
+ results += response["tasks"]
+ if response.get("continuationToken"):
+ data = {"continuationToken": response.get("continuationToken")}
+ else:
+ break
+
+ # We can sort on expires because in the general case
+ # all of these tasks should be created with the same expires time so they end up in
+ # order from earliest to latest action. If more correctness is needed, consider
+ # fetching each task and sorting on the created date.
+ results.sort(key=lambda t: parse_time(t["expires"]))
+ return [t["taskId"] for t in results]
+
+
+def parse_time(timestamp):
+ """Turn a "JSON timestamp" as used in TC APIs into a datetime"""
+ return datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%fZ")
+
+
+def get_task_url(task_id, use_proxy=False):
+ task_tmpl = liburls.api(get_root_url(use_proxy), "queue", "v1", "task/{}")
+ return task_tmpl.format(task_id)
+
+
+def get_task_definition(task_id, use_proxy=False):
+ response = _do_request(get_task_url(task_id, use_proxy))
+ return response.json()
+
+
+def cancel_task(task_id, use_proxy=False):
+ """Cancels a task given a task_id. In testing mode, just logs that it would
+ have cancelled."""
+ if testing:
+ logger.info(f"Would have cancelled {task_id}.")
+ else:
+ _do_request(get_task_url(task_id, use_proxy) + "/cancel", json={})
+
+
+def status_task(task_id, use_proxy=False):
+ """Gets the status of a task given a task_id.
+
+ In testing mode, just logs that it would have retrieved status.
+
+ Args:
+ task_id (str): A task id.
+ use_proxy (bool): Whether to use taskcluster-proxy (default: False)
+
+ Returns:
+ dict: A dictionary object as defined here:
+ https://docs.taskcluster.net/docs/reference/platform/queue/api#status
+ """
+ if testing:
+ logger.info(f"Would have gotten status for {task_id}.")
+ else:
+ resp = _do_request(get_task_url(task_id, use_proxy) + "/status")
+ status = resp.json().get("status", {})
+ return status
+
+
+def state_task(task_id, use_proxy=False):
+ """Gets the state of a task given a task_id.
+
+ In testing mode, just logs that it would have retrieved state. This is a subset of the
+ data returned by :func:`status_task`.
+
+ Args:
+ task_id (str): A task id.
+ use_proxy (bool): Whether to use taskcluster-proxy (default: False)
+
+ Returns:
+ str: The state of the task, one of
+ ``pending, running, completed, failed, exception, unknown``.
+ """
+ if testing:
+ logger.info(f"Would have gotten state for {task_id}.")
+ else:
+ status = status_task(task_id, use_proxy=use_proxy).get("state") or "unknown"
+ return status
+
+
+def rerun_task(task_id):
+ """Reruns a task given a task_id. In testing mode, just logs that it would
+ have reran."""
+ if testing:
+ logger.info(f"Would have rerun {task_id}.")
+ else:
+ _do_request(get_task_url(task_id, use_proxy=True) + "/rerun", json={})
+
+
+def get_current_scopes():
+ """Get the current scopes. This only makes sense in a task with the Taskcluster
+ proxy enabled, where it returns the actual scopes accorded to the task."""
+ auth_url = liburls.api(get_root_url(True), "auth", "v1", "scopes/current")
+ resp = _do_request(auth_url)
+ return resp.json().get("scopes", [])
+
+
+def get_purge_cache_url(provisioner_id, worker_type, use_proxy=False):
+ url_tmpl = liburls.api(
+ get_root_url(use_proxy), "purge-cache", "v1", "purge-cache/{}/{}"
+ )
+ return url_tmpl.format(provisioner_id, worker_type)
+
+
+def purge_cache(provisioner_id, worker_type, cache_name, use_proxy=False):
+ """Requests a cache purge from the purge-caches service."""
+ if testing:
+ logger.info(
+ "Would have purged {}/{}/{}.".format(
+ provisioner_id, worker_type, cache_name
+ )
+ )
+ else:
+ logger.info(f"Purging {provisioner_id}/{worker_type}/{cache_name}.")
+ purge_cache_url = get_purge_cache_url(provisioner_id, worker_type, use_proxy)
+ _do_request(purge_cache_url, json={"cacheName": cache_name})
+
+
+def send_email(address, subject, content, link, use_proxy=False):
+ """Sends an email using the notify service"""
+ logger.info(f"Sending email to {address}.")
+ url = liburls.api(get_root_url(use_proxy), "notify", "v1", "email")
+ _do_request(
+ url,
+ json={
+ "address": address,
+ "subject": subject,
+ "content": content,
+ "link": link,
+ },
+ )
+
+
+def list_task_group_incomplete_tasks(task_group_id):
+ """Generate the incomplete tasks in a task group"""
+ params = {}
+ while True:
+ url = liburls.api(
+ get_root_url(False),
+ "queue",
+ "v1",
+ f"task-group/{task_group_id}/list",
+ )
+ resp = _do_request(url, method="get", params=params).json()
+ for task in [t["status"] for t in resp["tasks"]]:
+ if task["state"] in ["running", "pending", "unscheduled"]:
+ yield task["taskId"]
+ if resp.get("continuationToken"):
+ params = {"continuationToken": resp.get("continuationToken")}
+ else:
+ break
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/taskgraph.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/taskgraph.py
new file mode 100644
index 0000000000..7b545595ef
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/taskgraph.py
@@ -0,0 +1,54 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+"""
+Tools for interacting with existing taskgraphs.
+"""
+
+
+from taskgraph.util.taskcluster import find_task_id, get_artifact
+
+
+def find_decision_task(parameters, graph_config):
+ """Given the parameters for this action, find the taskId of the decision
+ task"""
+ if parameters.get("repository_type", "hg") == "hg":
+ return find_task_id(
+ "{}.v2.{}.pushlog-id.{}.decision".format(
+ graph_config["trust-domain"],
+ parameters["project"],
+ parameters["pushlog_id"],
+ )
+ )
+ elif parameters["repository_type"] == "git":
+ return find_task_id(
+ "{}.v2.{}.revision.{}.taskgraph.decision".format(
+ graph_config["trust-domain"],
+ parameters["project"],
+ parameters["head_rev"],
+ )
+ )
+ else:
+ raise Exception(
+ "Unknown repository_type {}!".format(parameters["repository_type"])
+ )
+
+
+def find_existing_tasks_from_previous_kinds(
+ full_task_graph, previous_graph_ids, rebuild_kinds
+):
+ """Given a list of previous decision/action taskIds and kinds to ignore
+ from the previous graphs, return a dictionary of labels-to-taskids to use
+ as ``existing_tasks`` in the optimization step."""
+ existing_tasks = {}
+ for previous_graph_id in previous_graph_ids:
+ label_to_taskid = get_artifact(previous_graph_id, "public/label-to-taskid.json")
+ kind_labels = {
+ t.label
+ for t in full_task_graph.tasks.values()
+ if t.attributes["kind"] not in rebuild_kinds
+ }
+ for label in set(label_to_taskid.keys()).intersection(kind_labels):
+ existing_tasks[label] = label_to_taskid[label]
+ return existing_tasks
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/templates.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/templates.py
new file mode 100644
index 0000000000..465e4a43de
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/templates.py
@@ -0,0 +1,50 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import copy
+
+
+def merge_to(source, dest):
+ """
+ Merge dict and arrays (override scalar values)
+
+ Keys from source override keys from dest, and elements from lists in source
+ are appended to lists in dest.
+
+ :param dict source: to copy from
+ :param dict dest: to copy to (modified in place)
+ """
+
+ for key, value in source.items():
+ # Override mismatching or empty types
+ if type(value) != type(dest.get(key)): # noqa
+ dest[key] = source[key]
+ continue
+
+ # Merge dict
+ if isinstance(value, dict):
+ merge_to(value, dest[key])
+ continue
+
+ if isinstance(value, list):
+ dest[key] = dest[key] + source[key]
+ continue
+
+ dest[key] = source[key]
+
+ return dest
+
+
+def merge(*objects):
+ """
+ Merge the given objects, using the semantics described for merge_to, with
+ objects later in the list taking precedence. From an inheritance
+ perspective, "parents" should be listed before "children".
+
+ Returns the result without modifying any arguments.
+ """
+ if len(objects) == 1:
+ return copy.deepcopy(objects[0])
+ return merge_to(objects[-1], merge(*objects[:-1]))
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/time.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/time.py
new file mode 100644
index 0000000000..e511978b5f
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/time.py
@@ -0,0 +1,115 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Python port of the ms.js node module this is not a direct port some things are
+# more complicated or less precise and we lean on time delta here.
+
+
+import datetime
+import re
+
+PATTERN = re.compile(r"((?:\d+)?\.?\d+) *([a-z]+)")
+
+
+def seconds(value):
+ return datetime.timedelta(seconds=int(value))
+
+
+def minutes(value):
+ return datetime.timedelta(minutes=int(value))
+
+
+def hours(value):
+ return datetime.timedelta(hours=int(value))
+
+
+def days(value):
+ return datetime.timedelta(days=int(value))
+
+
+def months(value):
+ # See warning in years(), below
+ return datetime.timedelta(days=int(value) * 30)
+
+
+def years(value):
+ # Warning here "years" are vague don't use this for really sensitive date
+ # computation the idea is to give you a absolute amount of time in the
+ # future which is not the same thing as "precisely on this date next year"
+ return datetime.timedelta(days=int(value) * 365)
+
+
+ALIASES = {}
+ALIASES["seconds"] = ALIASES["second"] = ALIASES["s"] = seconds
+ALIASES["minutes"] = ALIASES["minute"] = ALIASES["min"] = minutes
+ALIASES["hours"] = ALIASES["hour"] = ALIASES["h"] = hours
+ALIASES["days"] = ALIASES["day"] = ALIASES["d"] = days
+ALIASES["months"] = ALIASES["month"] = ALIASES["mo"] = months
+ALIASES["years"] = ALIASES["year"] = ALIASES["y"] = years
+
+
+class InvalidString(Exception):
+ pass
+
+
+class UnknownTimeMeasurement(Exception):
+ pass
+
+
+def value_of(input_str):
+ """
+ Convert a string to a json date in the future
+ :param str input_str: (ex: 1d, 2d, 6years, 2 seconds)
+ :returns: Unit given in seconds
+ """
+
+ matches = PATTERN.search(input_str)
+
+ if matches is None or len(matches.groups()) < 2:
+ raise InvalidString(f"'{input_str}' is invalid string")
+
+ value, unit = matches.groups()
+
+ if unit not in ALIASES:
+ raise UnknownTimeMeasurement(
+ "{} is not a valid time measure use one of {}".format(
+ unit, sorted(ALIASES.keys())
+ )
+ )
+
+ return ALIASES[unit](value)
+
+
+def json_time_from_now(input_str, now=None, datetime_format=False):
+ """
+ :param str input_str: Input string (see value of)
+ :param datetime now: Optionally set the definition of `now`
+ :param boolean datetime_format: Set `True` to get a `datetime` output
+ :returns: JSON string representation of time in future.
+ """
+
+ if now is None:
+ now = datetime.datetime.utcnow()
+
+ time = now + value_of(input_str)
+
+ if datetime_format is True:
+ return time
+ else:
+ # Sorta a big hack but the json schema validator for date does not like the
+ # ISO dates until 'Z' (for timezone) is added...
+ # Microseconds are excluded (see bug 1381801)
+ return time.isoformat(timespec="milliseconds") + "Z"
+
+
+def current_json_time(datetime_format=False):
+ """
+ :param boolean datetime_format: Set `True` to get a `datetime` output
+ :returns: JSON string representation of the current time.
+ """
+ if datetime_format is True:
+ return datetime.datetime.utcnow()
+ else:
+ # Microseconds are excluded (see bug 1381801)
+ return datetime.datetime.utcnow().isoformat(timespec="milliseconds") + "Z"
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/treeherder.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/treeherder.py
new file mode 100644
index 0000000000..9d0c032a1b
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/treeherder.py
@@ -0,0 +1,64 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import re
+
+_JOINED_SYMBOL_RE = re.compile(r"([^(]*)\(([^)]*)\)$")
+
+
+def split_symbol(treeherder_symbol):
+ """Split a symbol expressed as grp(sym) into its two parts. If no group is
+ given, the returned group is '?'"""
+ groupSymbol = "?"
+ symbol = treeherder_symbol
+ if "(" in symbol:
+ match = _JOINED_SYMBOL_RE.match(symbol)
+ if match:
+ groupSymbol, symbol = match.groups()
+ else:
+ raise Exception(f"`{symbol}` is not a valid treeherder symbol.")
+ return groupSymbol, symbol
+
+
+def join_symbol(group, symbol):
+ """Perform the reverse of split_symbol, combining the given group and
+ symbol. If the group is '?', then it is omitted."""
+ if group == "?":
+ return symbol
+ return f"{group}({symbol})"
+
+
+def add_suffix(treeherder_symbol, suffix):
+ """Add a suffix to a treeherder symbol that may contain a group."""
+ group, symbol = split_symbol(treeherder_symbol)
+ symbol += str(suffix)
+ return join_symbol(group, symbol)
+
+
+def replace_group(treeherder_symbol, new_group):
+ """Add a suffix to a treeherder symbol that may contain a group."""
+ _, symbol = split_symbol(treeherder_symbol)
+ return join_symbol(new_group, symbol)
+
+
+def inherit_treeherder_from_dep(job, dep_job):
+ """Inherit treeherder defaults from dep_job"""
+ treeherder = job.get("treeherder", {})
+
+ dep_th_platform = (
+ dep_job.task.get("extra", {})
+ .get("treeherder", {})
+ .get("machine", {})
+ .get("platform", "")
+ )
+ dep_th_collection = list(
+ dep_job.task.get("extra", {}).get("treeherder", {}).get("collection", {}).keys()
+ )[0]
+ treeherder.setdefault("platform", f"{dep_th_platform}/{dep_th_collection}")
+ treeherder.setdefault(
+ "tier", dep_job.task.get("extra", {}).get("treeherder", {}).get("tier", 1)
+ )
+ # Does not set symbol
+ treeherder.setdefault("kind", "build")
+ return treeherder
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/vcs.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/vcs.py
new file mode 100644
index 0000000000..ba1d909019
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/vcs.py
@@ -0,0 +1,539 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import logging
+import os
+import re
+import subprocess
+from abc import ABC, abstractmethod, abstractproperty
+from shutil import which
+
+import requests
+from redo import retry
+
+from taskgraph.util.path import ancestors
+
+PUSHLOG_TMPL = "{}/json-pushes?version=2&changeset={}&tipsonly=1&full=1"
+
+logger = logging.getLogger(__name__)
+
+
+class Repository(ABC):
+ # Both mercurial and git use sha1 as revision idenfiers. Luckily, both define
+ # the same value as the null revision.
+ #
+ # https://github.com/git/git/blob/dc04167d378fb29d30e1647ff6ff51dd182bc9a3/t/oid-info/hash-info#L7
+ # https://www.mercurial-scm.org/repo/hg-stable/file/82efc31bd152/mercurial/node.py#l30
+ NULL_REVISION = "0000000000000000000000000000000000000000"
+
+ def __init__(self, path):
+ self.path = path
+ self.binary = which(self.tool)
+ if self.binary is None:
+ raise OSError(f"{self.tool} not found!")
+ self._valid_diff_filter = ("m", "a", "d")
+
+ self._env = os.environ.copy()
+
+ def run(self, *args: str, **kwargs):
+ return_codes = kwargs.pop("return_codes", [])
+ cmd = (self.binary,) + args
+
+ try:
+ return subprocess.check_output(
+ cmd, cwd=self.path, env=self._env, encoding="utf-8", **kwargs
+ )
+ except subprocess.CalledProcessError as e:
+ if e.returncode in return_codes:
+ return ""
+ raise
+
+ @abstractproperty
+ def tool(self) -> str:
+ """Version control system being used, either 'hg' or 'git'."""
+
+ @abstractproperty
+ def head_rev(self) -> str:
+ """Hash of HEAD revision."""
+
+ @abstractproperty
+ def base_rev(self):
+ """Hash of revision the current topic branch is based on."""
+
+ @abstractproperty
+ def branch(self):
+ """Current branch or bookmark the checkout has active."""
+
+ @abstractproperty
+ def all_remote_names(self):
+ """Name of all configured remote repositories."""
+
+ @abstractproperty
+ def default_remote_name(self):
+ """Name the VCS defines for the remote repository when cloning
+ it for the first time. This name may not exist anymore if users
+ changed the default configuration, for instance."""
+
+ @abstractproperty
+ def remote_name(self):
+ """Name of the remote repository."""
+
+ def _get_most_suitable_remote(self, remote_instructions):
+ remotes = self.all_remote_names
+ if len(remotes) == 1:
+ return remotes[0]
+
+ if self.default_remote_name in remotes:
+ return self.default_remote_name
+
+ first_remote = remotes[0]
+ logger.warning(
+ f"Unable to determine which remote repository to use between: {remotes}. "
+ f'Arbitrarily using the first one "{first_remote}". Please set an '
+ f"`{self.default_remote_name}` remote if the arbitrarily selected one "
+ f"is not right. To do so: {remote_instructions}"
+ )
+
+ return first_remote
+
+ @abstractproperty
+ def default_branch(self):
+ """Name of the default branch."""
+
+ @abstractmethod
+ def get_url(self, remote=None):
+ """Get URL of the upstream repository."""
+
+ @abstractmethod
+ def get_commit_message(self, revision=None):
+ """Commit message of specified revision or current commit."""
+
+ @abstractmethod
+ def get_changed_files(self, diff_filter, mode="unstaged", rev=None, base_rev=None):
+ """Return a list of files that are changed in:
+ * either this repository's working copy,
+ * or at a given revision (``rev``)
+ * or between 2 revisions (``base_rev`` and ``rev``)
+
+ ``diff_filter`` controls which kinds of modifications are returned.
+ It is a string which may only contain the following characters:
+
+ A - Include files that were added
+ D - Include files that were deleted
+ M - Include files that were modified
+
+ By default, all three will be included.
+
+ ``mode`` can be one of 'unstaged', 'staged' or 'all'. Only has an
+ effect on git. Defaults to 'unstaged'.
+
+ ``rev`` is a specifier for which changesets to consider for
+ changes. The exact meaning depends on the vcs system being used.
+
+ ``base_rev`` specifies the range of changesets. This parameter cannot
+ be used without ``rev``. The range includes ``rev`` but excludes
+ ``base_rev``.
+ """
+
+ @abstractmethod
+ def get_outgoing_files(self, diff_filter, upstream):
+ """Return a list of changed files compared to upstream.
+
+ ``diff_filter`` works the same as `get_changed_files`.
+ ``upstream`` is a remote ref to compare against. If unspecified,
+ this will be determined automatically. If there is no remote ref,
+ a MissingUpstreamRepo exception will be raised.
+ """
+
+ @abstractmethod
+ def working_directory_clean(self, untracked=False, ignored=False):
+ """Determine if the working directory is free of modifications.
+
+ Returns True if the working directory does not have any file
+ modifications. False otherwise.
+
+ By default, untracked and ignored files are not considered. If
+ ``untracked`` or ``ignored`` are set, they influence the clean check
+ to factor these file classes into consideration.
+ """
+
+ @abstractmethod
+ def update(self, ref):
+ """Update the working directory to the specified reference."""
+
+ @abstractmethod
+ def find_latest_common_revision(self, base_ref_or_rev, head_rev):
+ """Find the latest revision that is common to both the given
+ ``head_rev`` and ``base_ref_or_rev``"""
+
+ @abstractmethod
+ def does_revision_exist_locally(self, revision):
+ """Check whether this revision exists in the local repository.
+
+ If this function returns an unexpected value, then make sure
+ the revision was fetched from the remote repository."""
+
+
+class HgRepository(Repository):
+ tool = "hg"
+ default_remote_name = "default"
+
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self._env["HGPLAIN"] = "1"
+
+ @property
+ def head_rev(self):
+ return self.run("log", "-r", ".", "-T", "{node}").strip()
+
+ @property
+ def base_rev(self):
+ return self.run("log", "-r", "last(ancestors(.) and public())", "-T", "{node}")
+
+ @property
+ def branch(self):
+ bookmarks_fn = os.path.join(self.path, ".hg", "bookmarks.current")
+ if os.path.exists(bookmarks_fn):
+ with open(bookmarks_fn) as f:
+ bookmark = f.read()
+ return bookmark or None
+
+ return None
+
+ @property
+ def all_remote_names(self):
+ remotes = self.run("paths", "--quiet").splitlines()
+ if not remotes:
+ raise RuntimeError("No remotes defined")
+ return remotes
+
+ @property
+ def remote_name(self):
+ return self._get_most_suitable_remote(
+ "Edit .hg/hgrc and add:\n\n[paths]\ndefault = $URL",
+ )
+
+ @property
+ def default_branch(self):
+ # Mercurial recommends keeping "default"
+ # https://www.mercurial-scm.org/wiki/StandardBranching#Don.27t_use_a_name_other_than_default_for_your_main_development_branch
+ return "default"
+
+ def get_url(self, remote="default"):
+ return self.run("path", "-T", "{url}", remote).strip()
+
+ def get_commit_message(self, revision=None):
+ revision = revision or self.head_rev
+ return self.run("log", "-r", ".", "-T", "{desc}")
+
+ def _format_diff_filter(self, diff_filter, for_status=False):
+ df = diff_filter.lower()
+ assert all(f in self._valid_diff_filter for f in df)
+
+ # When looking at the changes in the working directory, the hg status
+ # command uses 'd' for files that have been deleted with a non-hg
+ # command, and 'r' for files that have been `hg rm`ed. Use both.
+ return df.replace("d", "dr") if for_status else df
+
+ def _files_template(self, diff_filter):
+ template = ""
+ df = self._format_diff_filter(diff_filter)
+ if "a" in df:
+ template += "{file_adds % '{file}\\n'}"
+ if "d" in df:
+ template += "{file_dels % '{file}\\n'}"
+ if "m" in df:
+ template += "{file_mods % '{file}\\n'}"
+ return template
+
+ def get_changed_files(
+ self, diff_filter="ADM", mode="unstaged", rev=None, base_rev=None
+ ):
+ if rev is None:
+ if base_rev is not None:
+ raise ValueError("Cannot specify `base_rev` without `rev`")
+ # Use --no-status to print just the filename.
+ df = self._format_diff_filter(diff_filter, for_status=True)
+ return self.run("status", "--no-status", f"-{df}").splitlines()
+ else:
+ template = self._files_template(diff_filter)
+ revision_argument = rev if base_rev is None else f"{base_rev}~-1::{rev}"
+ return self.run("log", "-r", revision_argument, "-T", template).splitlines()
+
+ def get_outgoing_files(self, diff_filter="ADM", upstream=None):
+ template = self._files_template(diff_filter)
+
+ if not upstream:
+ return self.run(
+ "log", "-r", "draft() and ancestors(.)", "--template", template
+ ).split()
+
+ return self.run(
+ "outgoing",
+ "-r",
+ ".",
+ "--quiet",
+ "--template",
+ template,
+ upstream,
+ return_codes=(1,),
+ ).split()
+
+ def working_directory_clean(self, untracked=False, ignored=False):
+ args = ["status", "--modified", "--added", "--removed", "--deleted"]
+ if untracked:
+ args.append("--unknown")
+ if ignored:
+ args.append("--ignored")
+
+ # If output is empty, there are no entries of requested status, which
+ # means we are clean.
+ return not len(self.run(*args).strip())
+
+ def update(self, ref):
+ return self.run("update", "--check", ref)
+
+ def find_latest_common_revision(self, base_ref_or_rev, head_rev):
+ return self.run(
+ "log",
+ "-r",
+ f"last(ancestors('{base_ref_or_rev}') and ancestors('{head_rev}'))",
+ "--template",
+ "{node}",
+ ).strip()
+
+ def does_revision_exist_locally(self, revision):
+ try:
+ return self.run("log", "-r", revision).strip() != ""
+ except subprocess.CalledProcessError as e:
+ # Error code 255 comes with the message:
+ # "abort: unknown revision $REVISION"
+ if e.returncode == 255:
+ return False
+ raise
+
+
+class GitRepository(Repository):
+ tool = "git"
+ default_remote_name = "origin"
+
+ _LS_REMOTE_PATTERN = re.compile(r"ref:\s+refs/heads/(?P<branch_name>\S+)\s+HEAD")
+
+ @property
+ def head_rev(self):
+ return self.run("rev-parse", "--verify", "HEAD").strip()
+
+ @property
+ def base_rev(self):
+ refs = self.run(
+ "rev-list", "HEAD", "--topo-order", "--boundary", "--not", "--remotes"
+ ).splitlines()
+ if refs:
+ return refs[-1][1:] # boundary starts with a prefix `-`
+ return self.head_rev
+
+ @property
+ def branch(self):
+ return self.run("branch", "--show-current").strip() or None
+
+ @property
+ def all_remote_names(self):
+ remotes = self.run("remote").splitlines()
+ if not remotes:
+ raise RuntimeError("No remotes defined")
+ return remotes
+
+ @property
+ def remote_name(self):
+ try:
+ remote_branch_name = self.run(
+ "rev-parse", "--verify", "--abbrev-ref", "--symbolic-full-name", "@{u}"
+ ).strip()
+ return remote_branch_name.split("/")[0]
+ except subprocess.CalledProcessError as e:
+ # Error code 128 comes with the message:
+ # "fatal: no upstream configured for branch $BRANCH"
+ if e.returncode != 128:
+ raise
+
+ return self._get_most_suitable_remote("`git remote add origin $URL`")
+
+ @property
+ def default_branch(self):
+ try:
+ # this one works if the current repo was cloned from an existing
+ # repo elsewhere
+ return self._get_default_branch_from_cloned_metadata()
+ except (subprocess.CalledProcessError, RuntimeError):
+ pass
+
+ try:
+ # This call works if you have (network) access to the repo
+ return self._get_default_branch_from_remote_query()
+ except (subprocess.CalledProcessError, RuntimeError):
+ pass
+
+ # this one is the last resort in case the remote is not accessible and
+ # the local repo is where `git init` was made
+ return self._guess_default_branch()
+
+ def _get_default_branch_from_remote_query(self):
+ # This function requires network access to the repo
+ remote_name = self.remote_name
+ output = self.run("ls-remote", "--symref", remote_name, "HEAD")
+ matches = self._LS_REMOTE_PATTERN.search(output)
+ if not matches:
+ raise RuntimeError(
+ f'Could not find the default branch of remote repository "{remote_name}". '
+ "Got: {output}"
+ )
+
+ branch_name = matches.group("branch_name")
+ return f"{remote_name}/{branch_name}"
+
+ def _get_default_branch_from_cloned_metadata(self):
+ return self.run("rev-parse", "--abbrev-ref", f"{self.remote_name}/HEAD").strip()
+
+ def _guess_default_branch(self):
+ branches = [
+ line.strip()
+ for line in self.run(
+ "branch", "--all", "--no-color", "--format=%(refname)"
+ ).splitlines()
+ for candidate_branch in ("main", "master", "branches/default/tip")
+ if line.strip().endswith(candidate_branch)
+ ]
+
+ if len(branches) == 1:
+ return branches[0]
+
+ raise RuntimeError(f"Unable to find default branch. Got: {branches}")
+
+ def get_url(self, remote="origin"):
+ return self.run("remote", "get-url", remote).strip()
+
+ def get_commit_message(self, revision=None):
+ revision = revision or self.head_rev
+ return self.run("log", "-n1", "--format=%B")
+
+ def get_changed_files(
+ self, diff_filter="ADM", mode="unstaged", rev=None, base_rev=None
+ ):
+ assert all(f.lower() in self._valid_diff_filter for f in diff_filter)
+
+ if rev is None:
+ if base_rev is not None:
+ raise ValueError("Cannot specify `base_rev` without `rev`")
+ cmd = ["diff"]
+ if mode == "staged":
+ cmd.append("--cached")
+ elif mode == "all":
+ cmd.append("HEAD")
+ else:
+ revision_argument = (
+ f"{rev}~1..{rev}" if base_rev is None else f"{base_rev}..{rev}"
+ )
+ cmd = ["log", "--format=format:", revision_argument]
+
+ cmd.append("--name-only")
+ cmd.append("--diff-filter=" + diff_filter.upper())
+
+ files = self.run(*cmd).splitlines()
+ return [f for f in files if f]
+
+ def get_outgoing_files(self, diff_filter="ADM", upstream=None):
+ assert all(f.lower() in self._valid_diff_filter for f in diff_filter)
+
+ not_condition = upstream if upstream else "--remotes"
+
+ files = self.run(
+ "log",
+ "--name-only",
+ f"--diff-filter={diff_filter.upper()}",
+ "--oneline",
+ "--pretty=format:",
+ "HEAD",
+ "--not",
+ not_condition,
+ ).splitlines()
+ return [f for f in files if f]
+
+ def working_directory_clean(self, untracked=False, ignored=False):
+ args = ["status", "--porcelain"]
+
+ # Even in --porcelain mode, behavior is affected by the
+ # ``status.showUntrackedFiles`` option, which means we need to be
+ # explicit about how to treat untracked files.
+ if untracked:
+ args.append("--untracked-files=all")
+ else:
+ args.append("--untracked-files=no")
+
+ if ignored:
+ args.append("--ignored")
+
+ # If output is empty, there are no entries of requested status, which
+ # means we are clean.
+ return not len(self.run(*args).strip())
+
+ def update(self, ref):
+ self.run("checkout", ref)
+
+ def find_latest_common_revision(self, base_ref_or_rev, head_rev):
+ return self.run("merge-base", base_ref_or_rev, head_rev).strip()
+
+ def does_revision_exist_locally(self, revision):
+ try:
+ return self.run("cat-file", "-t", revision).strip() == "commit"
+ except subprocess.CalledProcessError as e:
+ # Error code 128 comes with the message:
+ # "git cat-file: could not get object info"
+ if e.returncode == 128:
+ return False
+ raise
+
+
+def get_repository(path):
+ """Get a repository object for the repository at `path`.
+ If `path` is not a known VCS repository, raise an exception.
+ """
+ for path in ancestors(path):
+ if os.path.isdir(os.path.join(path, ".hg")):
+ return HgRepository(path)
+ elif os.path.exists(os.path.join(path, ".git")):
+ return GitRepository(path)
+
+ raise RuntimeError("Current directory is neither a git or hg repository")
+
+
+def find_hg_revision_push_info(repository, revision):
+ """Given the parameters for this action and a revision, find the
+ pushlog_id of the revision."""
+ pushlog_url = PUSHLOG_TMPL.format(repository, revision)
+
+ def query_pushlog(url):
+ r = requests.get(pushlog_url, timeout=60)
+ r.raise_for_status()
+ return r
+
+ r = retry(
+ query_pushlog,
+ args=(pushlog_url,),
+ attempts=5,
+ sleeptime=10,
+ )
+ pushes = r.json()["pushes"]
+ if len(pushes) != 1:
+ raise RuntimeError(
+ "Unable to find a single pushlog_id for {} revision {}: {}".format(
+ repository, revision, pushes
+ )
+ )
+ pushid = list(pushes.keys())[0]
+ return {
+ "pushdate": pushes[pushid]["date"],
+ "pushid": pushid,
+ "user": pushes[pushid]["user"],
+ }
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/verify.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/verify.py
new file mode 100644
index 0000000000..5911914f13
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/verify.py
@@ -0,0 +1,283 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import logging
+import sys
+from abc import ABC, abstractmethod
+
+import attr
+
+from taskgraph.config import GraphConfig
+from taskgraph.parameters import Parameters
+from taskgraph.taskgraph import TaskGraph
+from taskgraph.util.attributes import match_run_on_projects
+from taskgraph.util.treeherder import join_symbol
+
+logger = logging.getLogger(__name__)
+
+
+@attr.s(frozen=True)
+class Verification(ABC):
+ func = attr.ib()
+
+ @abstractmethod
+ def verify(self, **kwargs) -> None:
+ pass
+
+
+@attr.s(frozen=True)
+class InitialVerification(Verification):
+ """Verification that doesn't depend on any generation state."""
+
+ def verify(self):
+ self.func()
+
+
+@attr.s(frozen=True)
+class GraphVerification(Verification):
+ """Verification for a TaskGraph object."""
+
+ run_on_projects = attr.ib(default=None)
+
+ def verify(
+ self, graph: TaskGraph, graph_config: GraphConfig, parameters: Parameters
+ ):
+ if self.run_on_projects and not match_run_on_projects(
+ parameters["project"], self.run_on_projects
+ ):
+ return
+
+ scratch_pad = {}
+ graph.for_each_task(
+ self.func,
+ scratch_pad=scratch_pad,
+ graph_config=graph_config,
+ parameters=parameters,
+ )
+ self.func(
+ None,
+ graph,
+ scratch_pad=scratch_pad,
+ graph_config=graph_config,
+ parameters=parameters,
+ )
+
+
+@attr.s(frozen=True)
+class ParametersVerification(Verification):
+ """Verification for a set of parameters."""
+
+ def verify(self, parameters: Parameters):
+ self.func(parameters)
+
+
+@attr.s(frozen=True)
+class KindsVerification(Verification):
+ """Verification for kinds."""
+
+ def verify(self, kinds: dict):
+ self.func(kinds)
+
+
+@attr.s(frozen=True)
+class VerificationSequence:
+ """
+ Container for a sequence of verifications over a TaskGraph. Each
+ verification is represented as a callable taking (task, taskgraph,
+ scratch_pad), called for each task in the taskgraph, and one more
+ time with no task but with the taskgraph and the same scratch_pad
+ that was passed for each task.
+ """
+
+ _verifications = attr.ib(factory=dict)
+ _verification_types = {
+ "graph": GraphVerification,
+ "initial": InitialVerification,
+ "kinds": KindsVerification,
+ "parameters": ParametersVerification,
+ }
+
+ def __call__(self, name, *args, **kwargs):
+ for verification in self._verifications.get(name, []):
+ verification.verify(*args, **kwargs)
+
+ def add(self, name, **kwargs):
+ cls = self._verification_types.get(name, GraphVerification)
+
+ def wrap(func):
+ self._verifications.setdefault(name, []).append(cls(func, **kwargs))
+ return func
+
+ return wrap
+
+
+verifications = VerificationSequence()
+
+
+@verifications.add("full_task_graph")
+def verify_task_graph_symbol(task, taskgraph, scratch_pad, graph_config, parameters):
+ """
+ This function verifies that tuple
+ (collection.keys(), machine.platform, groupSymbol, symbol) is unique
+ for a target task graph.
+ """
+ if task is None:
+ return
+ task_dict = task.task
+ if "extra" in task_dict:
+ extra = task_dict["extra"]
+ if "treeherder" in extra:
+ treeherder = extra["treeherder"]
+
+ collection_keys = tuple(sorted(treeherder.get("collection", {}).keys()))
+ if len(collection_keys) != 1:
+ raise Exception(
+ "Task {} can't be in multiple treeherder collections "
+ "(the part of the platform after `/`): {}".format(
+ task.label, collection_keys
+ )
+ )
+ platform = treeherder.get("machine", {}).get("platform")
+ group_symbol = treeherder.get("groupSymbol")
+ symbol = treeherder.get("symbol")
+
+ key = (platform, collection_keys[0], group_symbol, symbol)
+ if key in scratch_pad:
+ raise Exception(
+ "Duplicate treeherder platform and symbol in tasks "
+ "`{}`and `{}`: {} {}".format(
+ task.label,
+ scratch_pad[key],
+ f"{platform}/{collection_keys[0]}",
+ join_symbol(group_symbol, symbol),
+ )
+ )
+ else:
+ scratch_pad[key] = task.label
+
+
+@verifications.add("full_task_graph")
+def verify_trust_domain_v2_routes(
+ task, taskgraph, scratch_pad, graph_config, parameters
+):
+ """
+ This function ensures that any two tasks have distinct ``index.{trust-domain}.v2`` routes.
+ """
+ if task is None:
+ return
+ route_prefix = "index.{}.v2".format(graph_config["trust-domain"])
+ task_dict = task.task
+ routes = task_dict.get("routes", [])
+
+ for route in routes:
+ if route.startswith(route_prefix):
+ if route in scratch_pad:
+ raise Exception(
+ "conflict between {}:{} for route: {}".format(
+ task.label, scratch_pad[route], route
+ )
+ )
+ else:
+ scratch_pad[route] = task.label
+
+
+@verifications.add("full_task_graph")
+def verify_routes_notification_filters(
+ task, taskgraph, scratch_pad, graph_config, parameters
+):
+ """
+ This function ensures that only understood filters for notifications are
+ specified.
+
+ See: https://docs.taskcluster.net/reference/core/taskcluster-notify/docs/usage
+ """
+ if task is None:
+ return
+ route_prefix = "notify."
+ valid_filters = ("on-any", "on-completed", "on-failed", "on-exception")
+ task_dict = task.task
+ routes = task_dict.get("routes", [])
+
+ for route in routes:
+ if route.startswith(route_prefix):
+ # Get the filter of the route
+ route_filter = route.split(".")[-1]
+ if route_filter not in valid_filters:
+ raise Exception(
+ "{} has invalid notification filter ({})".format(
+ task.label, route_filter
+ )
+ )
+
+
+@verifications.add("full_task_graph")
+def verify_dependency_tiers(task, taskgraph, scratch_pad, graph_config, parameters):
+ tiers = scratch_pad
+ if task is not None:
+ tiers[task.label] = (
+ task.task.get("extra", {}).get("treeherder", {}).get("tier", sys.maxsize)
+ )
+ else:
+
+ def printable_tier(tier):
+ if tier == sys.maxsize:
+ return "unknown"
+ return tier
+
+ for task in taskgraph.tasks.values():
+ tier = tiers[task.label]
+ for d in task.dependencies.values():
+ if taskgraph[d].task.get("workerType") == "always-optimized":
+ continue
+ if "dummy" in taskgraph[d].kind:
+ continue
+ if tier < tiers[d]:
+ raise Exception(
+ "{} (tier {}) cannot depend on {} (tier {})".format(
+ task.label,
+ printable_tier(tier),
+ d,
+ printable_tier(tiers[d]),
+ )
+ )
+
+
+@verifications.add("full_task_graph")
+def verify_toolchain_alias(task, taskgraph, scratch_pad, graph_config, parameters):
+ """
+ This function verifies that toolchain aliases are not reused.
+ """
+ if task is None:
+ return
+ attributes = task.attributes
+ if "toolchain-alias" in attributes:
+ keys = attributes["toolchain-alias"]
+ if not keys:
+ keys = []
+ elif isinstance(keys, str):
+ keys = [keys]
+ for key in keys:
+ if key in scratch_pad:
+ raise Exception(
+ "Duplicate toolchain-alias in tasks "
+ "`{}`and `{}`: {}".format(
+ task.label,
+ scratch_pad[key],
+ key,
+ )
+ )
+ else:
+ scratch_pad[key] = task.label
+
+
+@verifications.add("optimized_task_graph")
+def verify_always_optimized(task, taskgraph, scratch_pad, graph_config, parameters):
+ """
+ This function ensures that always-optimized tasks have been optimized.
+ """
+ if task is None:
+ return
+ if task.task.get("workerType") == "always-optimized":
+ raise Exception(f"Could not optimize the task {task.label!r}")
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/workertypes.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/workertypes.py
new file mode 100644
index 0000000000..d71f7e06a3
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/workertypes.py
@@ -0,0 +1,75 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import attr
+
+from .keyed_by import evaluate_keyed_by
+from .memoize import memoize
+
+
+@attr.s
+class _BuiltinWorkerType:
+ provisioner = attr.ib(str)
+ worker_type = attr.ib(str)
+
+ @property
+ def implementation(self):
+ """
+ Since the list of built-in worker-types is small and fixed, we can get
+ away with punning the implementation name (in
+ `taskgraph.transforms.task`) and the worker_type.
+ """
+ return self.worker_type
+
+
+_BUILTIN_TYPES = {
+ "always-optimized": _BuiltinWorkerType("invalid", "always-optimized"),
+ "succeed": _BuiltinWorkerType("built-in", "succeed"),
+}
+
+
+@memoize
+def worker_type_implementation(graph_config, worker_type):
+ """Get the worker implementation and OS for the given workerType, where the
+ OS represents the host system, not the target OS, in the case of
+ cross-compiles."""
+ if worker_type in _BUILTIN_TYPES:
+ # For the built-in worker-types, we use an `implementation that matches
+ # the worker-type.
+ return _BUILTIN_TYPES[worker_type].implementation, None
+ worker_config = evaluate_keyed_by(
+ {"by-worker-type": graph_config["workers"]["aliases"]},
+ "worker-types.yml",
+ {"worker-type": worker_type},
+ )
+ return worker_config["implementation"], worker_config.get("os")
+
+
+@memoize
+def get_worker_type(graph_config, alias, level):
+ """
+ Get the worker type based, evaluating aliases from the graph config.
+ """
+ if alias in _BUILTIN_TYPES:
+ builtin_type = _BUILTIN_TYPES[alias]
+ return builtin_type.provisioner, builtin_type.worker_type
+
+ level = str(level)
+ worker_config = evaluate_keyed_by(
+ {"by-alias": graph_config["workers"]["aliases"]},
+ "graph_config.workers.aliases",
+ {"alias": alias},
+ )
+ provisioner = evaluate_keyed_by(
+ worker_config["provisioner"],
+ alias,
+ {"level": level},
+ ).format(level=level)
+ worker_type = evaluate_keyed_by(
+ worker_config["worker-type"],
+ alias,
+ {"level": level},
+ ).format(level=level, alias=alias)
+ return provisioner, worker_type
diff --git a/third_party/python/taskcluster_taskgraph/taskgraph/util/yaml.py b/third_party/python/taskcluster_taskgraph/taskgraph/util/yaml.py
new file mode 100644
index 0000000000..141c7a16d3
--- /dev/null
+++ b/third_party/python/taskcluster_taskgraph/taskgraph/util/yaml.py
@@ -0,0 +1,36 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+
+import os
+
+from yaml.loader import SafeLoader
+
+
+class UnicodeLoader(SafeLoader):
+ def construct_yaml_str(self, node):
+ return self.construct_scalar(node)
+
+
+UnicodeLoader.add_constructor("tag:yaml.org,2002:str", UnicodeLoader.construct_yaml_str)
+
+
+def load_stream(stream):
+ """
+ Parse the first YAML document in a stream
+ and produce the corresponding Python object.
+ """
+ loader = UnicodeLoader(stream)
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
+
+
+def load_yaml(*parts):
+ """Convenience function to load a YAML file in the given path. This is
+ useful for loading kind configuration files from the kind path."""
+ filename = os.path.join(*parts)
+ with open(filename, "rb") as f:
+ return load_stream(f)
diff --git a/third_party/python/taskcluster_urls/taskcluster_urls-13.0.1.dist-info/LICENSE b/third_party/python/taskcluster_urls/taskcluster_urls-13.0.1.dist-info/LICENSE
new file mode 100644
index 0000000000..a612ad9813
--- /dev/null
+++ b/third_party/python/taskcluster_urls/taskcluster_urls-13.0.1.dist-info/LICENSE
@@ -0,0 +1,373 @@
+Mozilla Public License Version 2.0
+==================================
+
+1. Definitions
+--------------
+
+1.1. "Contributor"
+ means each individual or legal entity that creates, contributes to
+ the creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+ means the combination of the Contributions of others (if any) used
+ by a Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+ means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+ means Source Code Form to which the initial Contributor has attached
+ the notice in Exhibit A, the Executable Form of such Source Code
+ Form, and Modifications of such Source Code Form, in each case
+ including portions thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+ means
+
+ (a) that the initial Contributor has attached the notice described
+ in Exhibit B to the Covered Software; or
+
+ (b) that the Covered Software was made available under the terms of
+ version 1.1 or earlier of the License, but not also under the
+ terms of a Secondary License.
+
+1.6. "Executable Form"
+ means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+ means a work that combines Covered Software with other material, in
+ a separate file or files, that is not Covered Software.
+
+1.8. "License"
+ means this document.
+
+1.9. "Licensable"
+ means having the right to grant, to the maximum extent possible,
+ whether at the time of the initial grant or subsequently, any and
+ all of the rights conveyed by this License.
+
+1.10. "Modifications"
+ means any of the following:
+
+ (a) any file in Source Code Form that results from an addition to,
+ deletion from, or modification of the contents of Covered
+ Software; or
+
+ (b) any new file in Source Code Form that contains any Covered
+ Software.
+
+1.11. "Patent Claims" of a Contributor
+ means any patent claim(s), including without limitation, method,
+ process, and apparatus claims, in any patent Licensable by such
+ Contributor that would be infringed, but for the grant of the
+ License, by the making, using, selling, offering for sale, having
+ made, import, or transfer of either its Contributions or its
+ Contributor Version.
+
+1.12. "Secondary License"
+ means either the GNU General Public License, Version 2.0, the GNU
+ Lesser General Public License, Version 2.1, the GNU Affero General
+ Public License, Version 3.0, or any later versions of those
+ licenses.
+
+1.13. "Source Code Form"
+ means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+ means an individual or a legal entity exercising rights under this
+ License. For legal entities, "You" includes any entity that
+ controls, is controlled by, or is under common control with You. For
+ purposes of this definition, "control" means (a) the power, direct
+ or indirect, to cause the direction or management of such entity,
+ whether by contract or otherwise, or (b) ownership of more than
+ fifty percent (50%) of the outstanding shares or beneficial
+ ownership of such entity.
+
+2. License Grants and Conditions
+--------------------------------
+
+2.1. Grants
+
+Each Contributor hereby grants You a world-wide, royalty-free,
+non-exclusive license:
+
+(a) under intellectual property rights (other than patent or trademark)
+ Licensable by such Contributor to use, reproduce, make available,
+ modify, display, perform, distribute, and otherwise exploit its
+ Contributions, either on an unmodified basis, with Modifications, or
+ as part of a Larger Work; and
+
+(b) under Patent Claims of such Contributor to make, use, sell, offer
+ for sale, have made, import, and otherwise transfer either its
+ Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+The licenses granted in Section 2.1 with respect to any Contribution
+become effective for each Contribution on the date the Contributor first
+distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+The licenses granted in this Section 2 are the only rights granted under
+this License. No additional rights or licenses will be implied from the
+distribution or licensing of Covered Software under this License.
+Notwithstanding Section 2.1(b) above, no patent license is granted by a
+Contributor:
+
+(a) for any code that a Contributor has removed from Covered Software;
+ or
+
+(b) for infringements caused by: (i) Your and any other third party's
+ modifications of Covered Software, or (ii) the combination of its
+ Contributions with other software (except as part of its Contributor
+ Version); or
+
+(c) under Patent Claims infringed by Covered Software in the absence of
+ its Contributions.
+
+This License does not grant any rights in the trademarks, service marks,
+or logos of any Contributor (except as may be necessary to comply with
+the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+No Contributor makes additional grants as a result of Your choice to
+distribute the Covered Software under a subsequent version of this
+License (see Section 10.2) or under the terms of a Secondary License (if
+permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+Each Contributor represents that the Contributor believes its
+Contributions are its original creation(s) or it has sufficient rights
+to grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+This License is not intended to limit any rights You have under
+applicable copyright doctrines of fair use, fair dealing, or other
+equivalents.
+
+2.7. Conditions
+
+Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
+in Section 2.1.
+
+3. Responsibilities
+-------------------
+
+3.1. Distribution of Source Form
+
+All distribution of Covered Software in Source Code Form, including any
+Modifications that You create or to which You contribute, must be under
+the terms of this License. You must inform recipients that the Source
+Code Form of the Covered Software is governed by the terms of this
+License, and how they can obtain a copy of this License. You may not
+attempt to alter or restrict the recipients' rights in the Source Code
+Form.
+
+3.2. Distribution of Executable Form
+
+If You distribute Covered Software in Executable Form then:
+
+(a) such Covered Software must also be made available in Source Code
+ Form, as described in Section 3.1, and You must inform recipients of
+ the Executable Form how they can obtain a copy of such Source Code
+ Form by reasonable means in a timely manner, at a charge no more
+ than the cost of distribution to the recipient; and
+
+(b) You may distribute such Executable Form under the terms of this
+ License, or sublicense it under different terms, provided that the
+ license for the Executable Form does not attempt to limit or alter
+ the recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+You may create and distribute a Larger Work under terms of Your choice,
+provided that You also comply with the requirements of this License for
+the Covered Software. If the Larger Work is a combination of Covered
+Software with a work governed by one or more Secondary Licenses, and the
+Covered Software is not Incompatible With Secondary Licenses, this
+License permits You to additionally distribute such Covered Software
+under the terms of such Secondary License(s), so that the recipient of
+the Larger Work may, at their option, further distribute the Covered
+Software under the terms of either this License or such Secondary
+License(s).
+
+3.4. Notices
+
+You may not remove or alter the substance of any license notices
+(including copyright notices, patent notices, disclaimers of warranty,
+or limitations of liability) contained within the Source Code Form of
+the Covered Software, except that You may alter any license notices to
+the extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+You may choose to offer, and to charge a fee for, warranty, support,
+indemnity or liability obligations to one or more recipients of Covered
+Software. However, You may do so only on Your own behalf, and not on
+behalf of any Contributor. You must make it absolutely clear that any
+such warranty, support, indemnity, or liability obligation is offered by
+You alone, and You hereby agree to indemnify every Contributor for any
+liability incurred by such Contributor as a result of warranty, support,
+indemnity or liability terms You offer. You may include additional
+disclaimers of warranty and limitations of liability specific to any
+jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+---------------------------------------------------
+
+If it is impossible for You to comply with any of the terms of this
+License with respect to some or all of the Covered Software due to
+statute, judicial order, or regulation then You must: (a) comply with
+the terms of this License to the maximum extent possible; and (b)
+describe the limitations and the code they affect. Such description must
+be placed in a text file included with all distributions of the Covered
+Software under this License. Except to the extent prohibited by statute
+or regulation, such description must be sufficiently detailed for a
+recipient of ordinary skill to be able to understand it.
+
+5. Termination
+--------------
+
+5.1. The rights granted under this License will terminate automatically
+if You fail to comply with any of its terms. However, if You become
+compliant, then the rights granted under this License from a particular
+Contributor are reinstated (a) provisionally, unless and until such
+Contributor explicitly and finally terminates Your grants, and (b) on an
+ongoing basis, if such Contributor fails to notify You of the
+non-compliance by some reasonable means prior to 60 days after You have
+come back into compliance. Moreover, Your grants from a particular
+Contributor are reinstated on an ongoing basis if such Contributor
+notifies You of the non-compliance by some reasonable means, this is the
+first time You have received notice of non-compliance with this License
+from such Contributor, and You become compliant prior to 30 days after
+Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+infringement claim (excluding declaratory judgment actions,
+counter-claims, and cross-claims) alleging that a Contributor Version
+directly or indirectly infringes any patent, then the rights granted to
+You by any and all Contributors for the Covered Software under Section
+2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all
+end user license agreements (excluding distributors and resellers) which
+have been validly granted by You or Your distributors under this License
+prior to termination shall survive termination.
+
+************************************************************************
+* *
+* 6. Disclaimer of Warranty *
+* ------------------------- *
+* *
+* Covered Software is provided under this License on an "as is" *
+* basis, without warranty of any kind, either expressed, implied, or *
+* statutory, including, without limitation, warranties that the *
+* Covered Software is free of defects, merchantable, fit for a *
+* particular purpose or non-infringing. The entire risk as to the *
+* quality and performance of the Covered Software is with You. *
+* Should any Covered Software prove defective in any respect, You *
+* (not any Contributor) assume the cost of any necessary servicing, *
+* repair, or correction. This disclaimer of warranty constitutes an *
+* essential part of this License. No use of any Covered Software is *
+* authorized under this License except under this disclaimer. *
+* *
+************************************************************************
+
+************************************************************************
+* *
+* 7. Limitation of Liability *
+* -------------------------- *
+* *
+* Under no circumstances and under no legal theory, whether tort *
+* (including negligence), contract, or otherwise, shall any *
+* Contributor, or anyone who distributes Covered Software as *
+* permitted above, be liable to You for any direct, indirect, *
+* special, incidental, or consequential damages of any character *
+* including, without limitation, damages for lost profits, loss of *
+* goodwill, work stoppage, computer failure or malfunction, or any *
+* and all other commercial damages or losses, even if such party *
+* shall have been informed of the possibility of such damages. This *
+* limitation of liability shall not apply to liability for death or *
+* personal injury resulting from such party's negligence to the *
+* extent applicable law prohibits such limitation. Some *
+* jurisdictions do not allow the exclusion or limitation of *
+* incidental or consequential damages, so this exclusion and *
+* limitation may not apply to You. *
+* *
+************************************************************************
+
+8. Litigation
+-------------
+
+Any litigation relating to this License may be brought only in the
+courts of a jurisdiction where the defendant maintains its principal
+place of business and such litigation shall be governed by laws of that
+jurisdiction, without reference to its conflict-of-law provisions.
+Nothing in this Section shall prevent a party's ability to bring
+cross-claims or counter-claims.
+
+9. Miscellaneous
+----------------
+
+This License represents the complete agreement concerning the subject
+matter hereof. If any provision of this License is held to be
+unenforceable, such provision shall be reformed only to the extent
+necessary to make it enforceable. Any law or regulation which provides
+that the language of a contract shall be construed against the drafter
+shall not be used to construe this License against a Contributor.
+
+10. Versions of the License
+---------------------------
+
+10.1. New Versions
+
+Mozilla Foundation is the license steward. Except as provided in Section
+10.3, no one other than the license steward has the right to modify or
+publish new versions of this License. Each version will be given a
+distinguishing version number.
+
+10.2. Effect of New Versions
+
+You may distribute the Covered Software under the terms of the version
+of the License under which You originally received the Covered Software,
+or under the terms of any subsequent version published by the license
+steward.
+
+10.3. Modified Versions
+
+If you create software not governed by this License, and you want to
+create a new license for such software, you may create and use a
+modified version of this License if you rename the license and remove
+any references to the name of the license steward (except to note that
+such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+Licenses
+
+If You choose to distribute Source Code Form that is Incompatible With
+Secondary Licenses under the terms of this version of the License, the
+notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+-------------------------------------------
+
+ This Source Code Form is subject to the terms of the Mozilla Public
+ License, v. 2.0. If a copy of the MPL was not distributed with this
+ file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular
+file, then You may include the notice in a location (such as a LICENSE
+file in a relevant directory) where a recipient would be likely to look
+for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+---------------------------------------------------------
+
+ This Source Code Form is "Incompatible With Secondary Licenses", as
+ defined by the Mozilla Public License, v. 2.0.
diff --git a/third_party/python/taskcluster_urls/taskcluster_urls-13.0.1.dist-info/METADATA b/third_party/python/taskcluster_urls/taskcluster_urls-13.0.1.dist-info/METADATA
new file mode 100644
index 0000000000..08f8e28788
--- /dev/null
+++ b/third_party/python/taskcluster_urls/taskcluster_urls-13.0.1.dist-info/METADATA
@@ -0,0 +1,291 @@
+Metadata-Version: 2.1
+Name: taskcluster-urls
+Version: 13.0.1
+Summary: Standardized url generator for taskcluster resources.
+Home-page: https://github.com/taskcluster/taskcluster-lib-urls
+Author: Brian Stack
+Author-email: bstack@mozilla.com
+License: MPL2
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Description-Content-Type: text/markdown
+
+# Taskcluster URL Building Library
+
+[![License](https://img.shields.io/badge/license-MPL%202.0-orange.svg)](http://mozilla.org/MPL/2.0)
+
+A simple library to generate URLs for various Taskcluster resources across our various deployment methods.
+
+This serves as both a simple shim for projects that use JavaScript but also is the reference implementation for
+how we define these paths.
+
+URLs are defined in the '[Taskcluster URL Format](https://docs.taskcluster.net/docs/reference/url-structure).
+
+Changelog
+---------
+View the changelog on the [releases page](https://github.com/taskcluster/taskcluster-lib-urls/releases).
+
+Requirements
+------------
+
+This is tested on and should run on any of Node.js `{8, 10}`.
+
+General Usage
+-------------
+
+While the capitalization and punctunation of the function names varies depending on the language, each language provides the following methods:
+
+| method | result |
+| --- | --- |
+| api(rootUrl, service, version, path) -> | `<rootUrl>/api/<service>/<version>/<path>` |
+| apiReference(rootUrl, service, version) -> | `<rootUrl>/references/<service>/<version>/api.json` |
+| docs(rootUrl, path) -> | `<rootUrl>/docs/<path>` |
+| exchangeReference(rootUrl, service, version) -> | `<rootUrl>/references/<service>/<version>/exchanges.json` |
+| schema(rootUrl, service, schema) -> | `<rootUrl>/schemas/<service>/<schema>` |
+| apiSchema(rootUrl, version) -> | `<rootUrl>/schemas/common/api-reference-<version>.json` |
+| exchangesSchema(rootUrl, version) -> | `<rootUrl>/schemas/common/exchanges-reference-<version>.json` |
+| apiManifestSchema(rootUrl, version) -> | `<rootUrl>/schemas/common/manifest-<version>.json` |
+| metadataMchema(rootUrl) -> | `<rootUrl>/schemas/common/metadata-metaschema.json` |
+| ui(rootUrl, path) -> | `<rootUrl>/<path>` |
+| apiManifest(rootUrl) -> | `<rootUrl>/references/manifest.json` |
+| normalizeRootUrl(rootUrl) -> | the normal form of the given rootUrl |
+| testRootUrl() -> | `https://tc-tests.example.com` |
+
+`testRootUrl()` is used to share a common fake `rootUrl` between various Taskcluster mocks in testing.
+The URL does not resolve.
+
+JS Usage
+--------
+[![Node.js Build Status](https://travis-ci.org/taskcluster/taskcluster-lib-urls.svg?branch=master)](https://travis-ci.org/taskcluster/taskcluster-lib-urls)
+[![npm](https://img.shields.io/npm/v/taskcluster-lib-urls.svg?maxAge=2592000)](https://www.npmjs.com/package/taskcluster-lib-urls)
+
+This package exports several methods for generating URLs conditionally based on
+a root URL, as well as a few helper classes for generating URLs for a pre-determined
+root URL:
+
+* `api(rootUrl, service, version, path)` -> `String`
+* `apiReference(rootUrl, service, version)` -> `String`
+* `docs(rootUrl, path)` -> `String`
+* `exchangeReference(rootUrl, service, version)` -> `String`
+* `schema(rootUrl, service, schema)` -> `String`
+* `apiManifestSchema(rootUrl, version)` -> `String`
+* `apiReferenceSchema(rootUrl, version)` -> `String`
+* `exchangesReferenceSchema(rootUrl, version)` -> `String`
+* `metadataMetaschema(rootUrl)` -> `String`
+* `ui(rootUrl, path)` -> `String`
+* `apiManifest(rootUrl)` -> `String`
+* `testRootUrl()` -> `String`
+* `withRootUrl(rootUrl)` -> `Class` instance for above methods
+* `normalizeRootUrl(rootUrl)` -> `String` (the "normalized" form of the given rootUrl)
+
+```js
+// Specifying root URL every time:
+const libUrls = require('taskcluster-lib-urls');
+
+libUrls.api(rootUrl, 'auth', 'v1', 'foo/bar');
+libUrls.schema(rootUrl, 'auth', 'v1/foo.yml'); // Note that schema names have versions in them
+libUrls.apiReference(rootUrl, 'auth', 'v1');
+libUrls.exchangeReference(rootUrl, 'auth', 'v1');
+libUrls.ui(rootUrl, 'foo/bar');
+libUrls.apiManifest(rootUrl);
+libUrls.docs(rootUrl, 'foo/bar');
+```
+
+```js
+// Specifying root URL in advance:
+const libUrls = require('taskcluster-lib-urls');
+
+const urls = libUrls.withRoot(rootUrl);
+
+urls.api('auth', 'v1', 'foo/bar');
+urls.schema('auth', 'v1/foo.yml');
+urls.apiReference('auth', 'v1');
+urls.exchangeReference('auth', 'v1');
+urls.ui('foo/bar');
+urls.apiManifest();
+urls.docs('foo/bar');
+```
+
+If you would like, you can set this up via [taskcluster-lib-loader](https://github.com/taskcluster/taskcluster-lib-loader) as follows:
+
+```js
+{
+ libUrlss: {
+ require: ['cfg'],
+ setup: ({cfg}) => withRootUrl(cfg.rootURl),
+ },
+}
+```
+
+Test with:
+
+```
+yarn install
+yarn test
+```
+
+
+Go Usage
+--------
+
+[![GoDoc](https://godoc.org/github.com/taskcluster/taskcluster-lib-urls?status.svg)](https://godoc.org/github.com/taskcluster/taskcluster-lib-urls)
+
+The go package exports the following functions:
+
+```go
+func API(rootURL string, service string, version string, path string) string
+func APIReference(rootURL string, service string, version string) string
+func Docs(rootURL string, path string) string
+func ExchangeReference(rootURL string, service string, version string) string
+func Schema(rootURL string, service string, name string) string
+func APIManifestSchema(rootURL string, version string) string
+func APIReferenceSchema(rootURL string, version string) string
+func ExchangesReferenceSchema(rootURL string, version string) string
+func MetadataMetaschema(rootURL string) string
+func UI(rootURL string, path string) string
+func APIManifest(rootURL string) string
+func NormalizeRootURL(rootURL string) string
+```
+
+Install with:
+
+```
+go install ./..
+```
+
+Test with:
+
+```
+go test -v ./...
+```
+
+Python Usage
+------------
+
+You can install the python client with `pip install taskcluster-urls`;
+
+```python
+import taskcluster_urls
+
+taskcluster_urls.api(root_url, 'auth', 'v1', 'foo/bar')
+taskcluster_urls.schema(root_url, 'auth', 'v1/foo.yml') # Note that schema names have versions in them
+taskcluster_urls.api_manifest_schema(root_url, 'v1')
+taskcluster_urls.api_reference_schema(root_url, 'v1')
+taskcluster_urls.exchanges_reference_schema(root_url, 'v1')
+taskcluster_urls.metadata_metaschema(root_url, 'v1')
+taskcluster_urls.api_reference(root_url, 'auth', 'v1')
+taskcluster_urls.exchange_reference(root_url, 'auth', 'v1')
+taskcluster_urls.ui(root_url, 'foo/bar')
+taskcluster_urls.apiManifest(root_url)
+taskcluster_urls.docs(root_url, 'foo/bar')
+taskcluster_urls.normalize_root_url(root_url)
+taskcluster_urls.test_root_url()
+```
+
+Test with:
+
+```
+tox
+```
+
+Java Usage
+----------
+
+[![JavaDoc](https://img.shields.io/badge/javadoc-reference-blue.svg)](http://taskcluster.github.io/taskcluster-lib-urls/apidocs)
+
+In order to use this library from your maven project, simply include it as a project dependency:
+
+```
+<project>
+ ...
+ <dependencies>
+ ...
+ <dependency>
+ <groupId>org.mozilla.taskcluster</groupId>
+ <artifactId>taskcluster-lib-urls</artifactId>
+ <version>1.0.0</version>
+ </dependency>
+ </dependencies>
+</project>
+```
+
+The taskcluster-lib-urls artifacts are now available from the [maven central repository](http://central.sonatype.org/):
+
+* [Search Results](http://search.maven.org/#search|gav|1|g%3A%22org.mozilla.taskcluster%22%20AND%20a%3A%22taskcluster-lib-urls%22)
+* [Directory Listing](https://repo1.maven.org/maven2/org/mozilla/taskcluster/taskcluster-lib-urls/)
+
+To use the library, do as follows:
+
+```java
+import org.mozilla.taskcluster.urls.*;
+
+...
+
+ URLProvider urlProvider = URLs.provider("https://mytaskcluster.acme.org");
+
+ String fooBarAPI = urlProvider.api("auth", "v1", "foo/bar");
+ String fooSchema = urlProvider.schema("auth", "v1/foo.yml"); // Note that schema names have versions in them
+ String apiSchema = urlProvider.apiReferenceSchema("v1");
+ String exchangesSchema = urlProvider.exchangesReferenceSchema("v1");
+ String manifestSchema = urlProvider.apiManifestSchema("v1");
+ String metaschema = urlProvider.metadataMetaschema();
+ String authAPIRef = urlProvider.apiReference("auth", "v1");
+ String authExchangesRef = urlProvider.exchangeReference("auth", "v1");
+ String uiFooBar = urlProvider.ui("foo/bar");
+ String apiManifest = urlProvider.apiManifest();
+ String docsFooBar = urlProvider.docs("foo/bar");
+
+...
+```
+
+Install with:
+
+```
+mvn install
+```
+
+Test with:
+
+```
+mvn test
+```
+
+
+Releasing
+---------
+
+New releases should be tested on Travis and Taskcluster to allow for all supported versions of various languages to be tested. Once satisfied that it works, new versions should be created with
+`npm version` rather than by manually editing `package.json` and tags should be pushed to Github.
+
+Make the Node release first, as Python's version depends on its `package.json`. This follows the typical tag-and-push-to-publish approach:
+
+```sh
+$ npm version minor # or patch, or major
+$ git push upstream
+```
+
+Once that's done, build the Python sdists (only possible by the [maintainers on pypi](https://pypi.org/project/taskcluster-urls/#files)):
+
+```sh
+rm -rf dist/*
+pip install -U wheel
+python setup.py sdist bdist_wheel
+pip3 install -U wheel
+python3 setup.py bdist_wheel
+pip install twine
+twine upload dist/*
+```
+
+Make sure to update [the changelog](https://github.com/taskcluster/taskcluster-lib-urls/releases)!
+
+License
+-------
+
+[Mozilla Public License Version 2.0](https://github.com/taskcluster/taskcluster-lib-urls/blob/master/LICENSE)
+
+
diff --git a/third_party/python/taskcluster_urls/taskcluster_urls-13.0.1.dist-info/RECORD b/third_party/python/taskcluster_urls/taskcluster_urls-13.0.1.dist-info/RECORD
new file mode 100644
index 0000000000..c2e18761d5
--- /dev/null
+++ b/third_party/python/taskcluster_urls/taskcluster_urls-13.0.1.dist-info/RECORD
@@ -0,0 +1,6 @@
+taskcluster_urls/__init__.py,sha256=Xh97T_KROF0Dvx8yfipL8ake9Itgxpd0bwVgITZAWnQ,2560
+taskcluster_urls-13.0.1.dist-info/LICENSE,sha256=HyVuytGSiAUQ6ErWBHTqt1iSGHhLmlC8fO7jTCuR8dU,16725
+taskcluster_urls-13.0.1.dist-info/METADATA,sha256=hL5iDBAflh_1mcUp6tspB2dqxP1R_lbzTvBDjf1dags,9809
+taskcluster_urls-13.0.1.dist-info/WHEEL,sha256=g4nMs7d-Xl9-xC9XovUrsDHGXt-FT0E17Yqo92DEfvY,92
+taskcluster_urls-13.0.1.dist-info/top_level.txt,sha256=ZOahZE9aH516RGht4_177HGJ9cJg6JgsD9PVUdwnATo,17
+taskcluster_urls-13.0.1.dist-info/RECORD,,
diff --git a/third_party/python/taskcluster_urls/taskcluster_urls-13.0.1.dist-info/WHEEL b/third_party/python/taskcluster_urls/taskcluster_urls-13.0.1.dist-info/WHEEL
new file mode 100644
index 0000000000..b552003ff9
--- /dev/null
+++ b/third_party/python/taskcluster_urls/taskcluster_urls-13.0.1.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.34.2)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/third_party/python/taskcluster_urls/taskcluster_urls-13.0.1.dist-info/top_level.txt b/third_party/python/taskcluster_urls/taskcluster_urls-13.0.1.dist-info/top_level.txt
new file mode 100644
index 0000000000..99a7791a99
--- /dev/null
+++ b/third_party/python/taskcluster_urls/taskcluster_urls-13.0.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+taskcluster_urls
diff --git a/third_party/python/taskcluster_urls/taskcluster_urls/__init__.py b/third_party/python/taskcluster_urls/taskcluster_urls/__init__.py
new file mode 100644
index 0000000000..1a8f85f11e
--- /dev/null
+++ b/third_party/python/taskcluster_urls/taskcluster_urls/__init__.py
@@ -0,0 +1,63 @@
+def api(root_url, service, version, path):
+ """Generate URL for path in a Taskcluster service."""
+ root_url = normalize_root_url(root_url)
+ path = path.lstrip('/')
+ return '{}/api/{}/{}/{}'.format(root_url, service, version, path)
+
+def api_reference(root_url, service, version):
+ """Generate URL for a Taskcluster api reference."""
+ root_url = normalize_root_url(root_url)
+ return '{}/references/{}/{}/api.json'.format(root_url, service, version)
+
+def docs(root_url, path):
+ """Generate URL for path in the Taskcluster docs."""
+ root_url = normalize_root_url(root_url)
+ path = path.lstrip('/')
+ return '{}/docs/{}'.format(root_url, path)
+
+def exchange_reference(root_url, service, version):
+ """Generate URL for a Taskcluster exchange reference."""
+ root_url = normalize_root_url(root_url)
+ return '{}/references/{}/{}/exchanges.json'.format(root_url, service, version)
+
+def schema(root_url, service, name):
+ """Generate URL for a schema in a Taskcluster service."""
+ root_url = normalize_root_url(root_url)
+ name = name.lstrip('/')
+ return '{}/schemas/{}/{}'.format(root_url, service, name)
+
+def api_reference_schema(root_url, version):
+ """Generate URL for the api reference schema."""
+ return schema(root_url, 'common', 'api-reference-{}.json'.format(version))
+
+def exchanges_reference_schema(root_url, version):
+ """Generate URL for the exchanges reference schema."""
+ return schema(root_url, 'common', 'exchanges-reference-{}.json'.format(version))
+
+def api_manifest_schema(root_url, version):
+ """Generate URL for the api manifest schema"""
+ return schema(root_url, 'common', 'manifest-{}.json'.format(version))
+
+def metadata_metaschema(root_url, version):
+ """Generate URL for the metadata metaschema"""
+ return schema(root_url, 'common', 'metadata-metaschema.json')
+
+def ui(root_url, path):
+ """Generate URL for a path in the Taskcluster ui."""
+ root_url = normalize_root_url(root_url)
+ path = path.lstrip('/')
+ return '{}/{}'.format(root_url, path)
+
+def api_manifest(root_url):
+ """Returns a URL for the API manifest of a taskcluster deployment."""
+ root_url = normalize_root_url(root_url)
+ return '{}/references/manifest.json'.format(root_url)
+
+def test_root_url():
+ """Returns a standardized "testing" rootUrl that does not resolve but
+ is easily recognizable in test failures."""
+ return 'https://tc-tests.example.com'
+
+def normalize_root_url(root_url):
+ """Return the normal form of the given rootUrl"""
+ return root_url.rstrip('/')
diff --git a/third_party/python/toml/toml-0.10.2.dist-info/LICENSE b/third_party/python/toml/toml-0.10.2.dist-info/LICENSE
new file mode 100644
index 0000000000..5010e3075e
--- /dev/null
+++ b/third_party/python/toml/toml-0.10.2.dist-info/LICENSE
@@ -0,0 +1,27 @@
+The MIT License
+
+Copyright 2013-2019 William Pearson
+Copyright 2015-2016 Julien Enselme
+Copyright 2016 Google Inc.
+Copyright 2017 Samuel Vasko
+Copyright 2017 Nate Prewitt
+Copyright 2017 Jack Evans
+Copyright 2019 Filippo Broggini
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE. \ No newline at end of file
diff --git a/third_party/python/toml/toml-0.10.2.dist-info/METADATA b/third_party/python/toml/toml-0.10.2.dist-info/METADATA
new file mode 100644
index 0000000000..6f2635ce4d
--- /dev/null
+++ b/third_party/python/toml/toml-0.10.2.dist-info/METADATA
@@ -0,0 +1,255 @@
+Metadata-Version: 2.1
+Name: toml
+Version: 0.10.2
+Summary: Python Library for Tom's Obvious, Minimal Language
+Home-page: https://github.com/uiri/toml
+Author: William Pearson
+Author-email: uiri@xqz.ca
+License: MIT
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Requires-Python: >=2.6, !=3.0.*, !=3.1.*, !=3.2.*
+
+****
+TOML
+****
+
+.. image:: https://img.shields.io/pypi/v/toml
+ :target: https://pypi.org/project/toml/
+
+.. image:: https://travis-ci.org/uiri/toml.svg?branch=master
+ :target: https://travis-ci.org/uiri/toml
+
+.. image:: https://img.shields.io/pypi/pyversions/toml.svg
+ :target: https://pypi.org/project/toml/
+
+
+A Python library for parsing and creating `TOML <https://en.wikipedia.org/wiki/TOML>`_.
+
+The module passes `the TOML test suite <https://github.com/BurntSushi/toml-test>`_.
+
+See also:
+
+* `The TOML Standard <https://github.com/toml-lang/toml>`_
+* `The currently supported TOML specification <https://github.com/toml-lang/toml/blob/v0.5.0/README.md>`_
+
+Installation
+============
+
+To install the latest release on `PyPI <https://pypi.org/project/toml/>`_,
+simply run:
+
+::
+
+ pip install toml
+
+Or to install the latest development version, run:
+
+::
+
+ git clone https://github.com/uiri/toml.git
+ cd toml
+ python setup.py install
+
+Quick Tutorial
+==============
+
+*toml.loads* takes in a string containing standard TOML-formatted data and
+returns a dictionary containing the parsed data.
+
+.. code:: pycon
+
+ >>> import toml
+ >>> toml_string = """
+ ... # This is a TOML document.
+ ...
+ ... title = "TOML Example"
+ ...
+ ... [owner]
+ ... name = "Tom Preston-Werner"
+ ... dob = 1979-05-27T07:32:00-08:00 # First class dates
+ ...
+ ... [database]
+ ... server = "192.168.1.1"
+ ... ports = [ 8001, 8001, 8002 ]
+ ... connection_max = 5000
+ ... enabled = true
+ ...
+ ... [servers]
+ ...
+ ... # Indentation (tabs and/or spaces) is allowed but not required
+ ... [servers.alpha]
+ ... ip = "10.0.0.1"
+ ... dc = "eqdc10"
+ ...
+ ... [servers.beta]
+ ... ip = "10.0.0.2"
+ ... dc = "eqdc10"
+ ...
+ ... [clients]
+ ... data = [ ["gamma", "delta"], [1, 2] ]
+ ...
+ ... # Line breaks are OK when inside arrays
+ ... hosts = [
+ ... "alpha",
+ ... "omega"
+ ... ]
+ ... """
+ >>> parsed_toml = toml.loads(toml_string)
+
+
+*toml.dumps* takes a dictionary and returns a string containing the
+corresponding TOML-formatted data.
+
+.. code:: pycon
+
+ >>> new_toml_string = toml.dumps(parsed_toml)
+ >>> print(new_toml_string)
+ title = "TOML Example"
+ [owner]
+ name = "Tom Preston-Werner"
+ dob = 1979-05-27T07:32:00Z
+ [database]
+ server = "192.168.1.1"
+ ports = [ 8001, 8001, 8002,]
+ connection_max = 5000
+ enabled = true
+ [clients]
+ data = [ [ "gamma", "delta",], [ 1, 2,],]
+ hosts = [ "alpha", "omega",]
+ [servers.alpha]
+ ip = "10.0.0.1"
+ dc = "eqdc10"
+ [servers.beta]
+ ip = "10.0.0.2"
+ dc = "eqdc10"
+
+*toml.dump* takes a dictionary and a file descriptor and returns a string containing the
+corresponding TOML-formatted data.
+
+.. code:: pycon
+
+ >>> with open('new_toml_file.toml', 'w') as f:
+ ... new_toml_string = toml.dump(parsed_toml, f)
+ >>> print(new_toml_string)
+ title = "TOML Example"
+ [owner]
+ name = "Tom Preston-Werner"
+ dob = 1979-05-27T07:32:00Z
+ [database]
+ server = "192.168.1.1"
+ ports = [ 8001, 8001, 8002,]
+ connection_max = 5000
+ enabled = true
+ [clients]
+ data = [ [ "gamma", "delta",], [ 1, 2,],]
+ hosts = [ "alpha", "omega",]
+ [servers.alpha]
+ ip = "10.0.0.1"
+ dc = "eqdc10"
+ [servers.beta]
+ ip = "10.0.0.2"
+ dc = "eqdc10"
+
+For more functions, view the API Reference below.
+
+Note
+----
+
+For Numpy users, by default the data types ``np.floatX`` will not be translated to floats by toml, but will instead be encoded as strings. To get around this, specify the ``TomlNumpyEncoder`` when saving your data.
+
+.. code:: pycon
+
+ >>> import toml
+ >>> import numpy as np
+ >>> a = np.arange(0, 10, dtype=np.double)
+ >>> output = {'a': a}
+ >>> toml.dumps(output)
+ 'a = [ "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "6.0", "7.0", "8.0", "9.0",]\n'
+ >>> toml.dumps(output, encoder=toml.TomlNumpyEncoder())
+ 'a = [ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0,]\n'
+
+API Reference
+=============
+
+``toml.load(f, _dict=dict)``
+ Parse a file or a list of files as TOML and return a dictionary.
+
+ :Args:
+ * ``f``: A path to a file, list of filepaths (to be read into single
+ object) or a file descriptor
+ * ``_dict``: The class of the dictionary object to be returned
+
+ :Returns:
+ A dictionary (or object ``_dict``) containing parsed TOML data
+
+ :Raises:
+ * ``TypeError``: When ``f`` is an invalid type or is a list containing
+ invalid types
+ * ``TomlDecodeError``: When an error occurs while decoding the file(s)
+
+``toml.loads(s, _dict=dict)``
+ Parse a TOML-formatted string to a dictionary.
+
+ :Args:
+ * ``s``: The TOML-formatted string to be parsed
+ * ``_dict``: Specifies the class of the returned toml dictionary
+
+ :Returns:
+ A dictionary (or object ``_dict``) containing parsed TOML data
+
+ :Raises:
+ * ``TypeError``: When a non-string object is passed
+ * ``TomlDecodeError``: When an error occurs while decoding the
+ TOML-formatted string
+
+``toml.dump(o, f, encoder=None)``
+ Write a dictionary to a file containing TOML-formatted data
+
+ :Args:
+ * ``o``: An object to be converted into TOML
+ * ``f``: A File descriptor where the TOML-formatted output should be stored
+ * ``encoder``: An instance of ``TomlEncoder`` (or subclass) for encoding the object. If ``None``, will default to ``TomlEncoder``
+
+ :Returns:
+ A string containing the TOML-formatted data corresponding to object ``o``
+
+ :Raises:
+ * ``TypeError``: When anything other than file descriptor is passed
+
+``toml.dumps(o, encoder=None)``
+ Create a TOML-formatted string from an input object
+
+ :Args:
+ * ``o``: An object to be converted into TOML
+ * ``encoder``: An instance of ``TomlEncoder`` (or subclass) for encoding the object. If ``None``, will default to ``TomlEncoder``
+
+ :Returns:
+ A string containing the TOML-formatted data corresponding to object ``o``
+
+
+
+Licensing
+=========
+
+This project is released under the terms of the MIT Open Source License. View
+*LICENSE.txt* for more information.
+
+
diff --git a/third_party/python/toml/toml-0.10.2.dist-info/RECORD b/third_party/python/toml/toml-0.10.2.dist-info/RECORD
new file mode 100644
index 0000000000..6b3a3a604d
--- /dev/null
+++ b/third_party/python/toml/toml-0.10.2.dist-info/RECORD
@@ -0,0 +1,10 @@
+toml/__init__.py,sha256=Au3kqCwKD0cjbf4yJGOpUFwpsY0WHsC1ZRGvWgIKmpc,723
+toml/decoder.py,sha256=hSGTLf-2WBDZ_ddoCHWFy6N647XyMSh1o3rN2o4dEFg,38942
+toml/encoder.py,sha256=XjBc8ayvvlsLyd_qDA4tMWDNmMFRS4DpwtuDSWBq7zo,9940
+toml/ordered.py,sha256=mz03lZmV0bmc9lsYRIUOuj7Dsu5Ptwq-UtGVq5FdVZ4,354
+toml/tz.py,sha256=-5vg8wkg_atnVi2TnEveexIVE7T_FxBVr_-2WVfO1oA,701
+toml-0.10.2.dist-info/LICENSE,sha256=LZKUgj32yJNXyL5JJ_znk2HWVh5e51MtWSbmOTmqpTY,1252
+toml-0.10.2.dist-info/METADATA,sha256=n_YkspvEihd_QXLIZZ50WVSFz3rZ_k7jQP-OU1WUpWY,7142
+toml-0.10.2.dist-info/WHEEL,sha256=ADKeyaGyKF5DwBNE0sRE5pvW-bSkFMJfBuhzZ3rceP4,110
+toml-0.10.2.dist-info/top_level.txt,sha256=2BO8ZRNnvJWgXyiQv66LBb_v87qBzcoUtEBefA75Ouk,5
+toml-0.10.2.dist-info/RECORD,,
diff --git a/third_party/python/toml/toml-0.10.2.dist-info/WHEEL b/third_party/python/toml/toml-0.10.2.dist-info/WHEEL
new file mode 100644
index 0000000000..6d38aa0601
--- /dev/null
+++ b/third_party/python/toml/toml-0.10.2.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.35.1)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/toml/toml-0.10.2.dist-info/top_level.txt b/third_party/python/toml/toml-0.10.2.dist-info/top_level.txt
new file mode 100644
index 0000000000..bd79a658fe
--- /dev/null
+++ b/third_party/python/toml/toml-0.10.2.dist-info/top_level.txt
@@ -0,0 +1 @@
+toml
diff --git a/third_party/python/toml/toml/__init__.py b/third_party/python/toml/toml/__init__.py
new file mode 100644
index 0000000000..7719ac23a7
--- /dev/null
+++ b/third_party/python/toml/toml/__init__.py
@@ -0,0 +1,25 @@
+"""Python module which parses and emits TOML.
+
+Released under the MIT license.
+"""
+
+from toml import encoder
+from toml import decoder
+
+__version__ = "0.10.2"
+_spec_ = "0.5.0"
+
+load = decoder.load
+loads = decoder.loads
+TomlDecoder = decoder.TomlDecoder
+TomlDecodeError = decoder.TomlDecodeError
+TomlPreserveCommentDecoder = decoder.TomlPreserveCommentDecoder
+
+dump = encoder.dump
+dumps = encoder.dumps
+TomlEncoder = encoder.TomlEncoder
+TomlArraySeparatorEncoder = encoder.TomlArraySeparatorEncoder
+TomlPreserveInlineDictEncoder = encoder.TomlPreserveInlineDictEncoder
+TomlNumpyEncoder = encoder.TomlNumpyEncoder
+TomlPreserveCommentEncoder = encoder.TomlPreserveCommentEncoder
+TomlPathlibEncoder = encoder.TomlPathlibEncoder
diff --git a/third_party/python/toml/toml/decoder.py b/third_party/python/toml/toml/decoder.py
new file mode 100644
index 0000000000..bf400e9761
--- /dev/null
+++ b/third_party/python/toml/toml/decoder.py
@@ -0,0 +1,1057 @@
+import datetime
+import io
+from os import linesep
+import re
+import sys
+
+from toml.tz import TomlTz
+
+if sys.version_info < (3,):
+ _range = xrange # noqa: F821
+else:
+ unicode = str
+ _range = range
+ basestring = str
+ unichr = chr
+
+
+def _detect_pathlib_path(p):
+ if (3, 4) <= sys.version_info:
+ import pathlib
+ if isinstance(p, pathlib.PurePath):
+ return True
+ return False
+
+
+def _ispath(p):
+ if isinstance(p, (bytes, basestring)):
+ return True
+ return _detect_pathlib_path(p)
+
+
+def _getpath(p):
+ if (3, 6) <= sys.version_info:
+ import os
+ return os.fspath(p)
+ if _detect_pathlib_path(p):
+ return str(p)
+ return p
+
+
+try:
+ FNFError = FileNotFoundError
+except NameError:
+ FNFError = IOError
+
+
+TIME_RE = re.compile(r"([0-9]{2}):([0-9]{2}):([0-9]{2})(\.([0-9]{3,6}))?")
+
+
+class TomlDecodeError(ValueError):
+ """Base toml Exception / Error."""
+
+ def __init__(self, msg, doc, pos):
+ lineno = doc.count('\n', 0, pos) + 1
+ colno = pos - doc.rfind('\n', 0, pos)
+ emsg = '{} (line {} column {} char {})'.format(msg, lineno, colno, pos)
+ ValueError.__init__(self, emsg)
+ self.msg = msg
+ self.doc = doc
+ self.pos = pos
+ self.lineno = lineno
+ self.colno = colno
+
+
+# Matches a TOML number, which allows underscores for readability
+_number_with_underscores = re.compile('([0-9])(_([0-9]))*')
+
+
+class CommentValue(object):
+ def __init__(self, val, comment, beginline, _dict):
+ self.val = val
+ separator = "\n" if beginline else " "
+ self.comment = separator + comment
+ self._dict = _dict
+
+ def __getitem__(self, key):
+ return self.val[key]
+
+ def __setitem__(self, key, value):
+ self.val[key] = value
+
+ def dump(self, dump_value_func):
+ retstr = dump_value_func(self.val)
+ if isinstance(self.val, self._dict):
+ return self.comment + "\n" + unicode(retstr)
+ else:
+ return unicode(retstr) + self.comment
+
+
+def _strictly_valid_num(n):
+ n = n.strip()
+ if not n:
+ return False
+ if n[0] == '_':
+ return False
+ if n[-1] == '_':
+ return False
+ if "_." in n or "._" in n:
+ return False
+ if len(n) == 1:
+ return True
+ if n[0] == '0' and n[1] not in ['.', 'o', 'b', 'x']:
+ return False
+ if n[0] == '+' or n[0] == '-':
+ n = n[1:]
+ if len(n) > 1 and n[0] == '0' and n[1] != '.':
+ return False
+ if '__' in n:
+ return False
+ return True
+
+
+def load(f, _dict=dict, decoder=None):
+ """Parses named file or files as toml and returns a dictionary
+
+ Args:
+ f: Path to the file to open, array of files to read into single dict
+ or a file descriptor
+ _dict: (optional) Specifies the class of the returned toml dictionary
+ decoder: The decoder to use
+
+ Returns:
+ Parsed toml file represented as a dictionary
+
+ Raises:
+ TypeError -- When f is invalid type
+ TomlDecodeError: Error while decoding toml
+ IOError / FileNotFoundError -- When an array with no valid (existing)
+ (Python 2 / Python 3) file paths is passed
+ """
+
+ if _ispath(f):
+ with io.open(_getpath(f), encoding='utf-8') as ffile:
+ return loads(ffile.read(), _dict, decoder)
+ elif isinstance(f, list):
+ from os import path as op
+ from warnings import warn
+ if not [path for path in f if op.exists(path)]:
+ error_msg = "Load expects a list to contain filenames only."
+ error_msg += linesep
+ error_msg += ("The list needs to contain the path of at least one "
+ "existing file.")
+ raise FNFError(error_msg)
+ if decoder is None:
+ decoder = TomlDecoder(_dict)
+ d = decoder.get_empty_table()
+ for l in f: # noqa: E741
+ if op.exists(l):
+ d.update(load(l, _dict, decoder))
+ else:
+ warn("Non-existent filename in list with at least one valid "
+ "filename")
+ return d
+ else:
+ try:
+ return loads(f.read(), _dict, decoder)
+ except AttributeError:
+ raise TypeError("You can only load a file descriptor, filename or "
+ "list")
+
+
+_groupname_re = re.compile(r'^[A-Za-z0-9_-]+$')
+
+
+def loads(s, _dict=dict, decoder=None):
+ """Parses string as toml
+
+ Args:
+ s: String to be parsed
+ _dict: (optional) Specifies the class of the returned toml dictionary
+
+ Returns:
+ Parsed toml file represented as a dictionary
+
+ Raises:
+ TypeError: When a non-string is passed
+ TomlDecodeError: Error while decoding toml
+ """
+
+ implicitgroups = []
+ if decoder is None:
+ decoder = TomlDecoder(_dict)
+ retval = decoder.get_empty_table()
+ currentlevel = retval
+ if not isinstance(s, basestring):
+ raise TypeError("Expecting something like a string")
+
+ if not isinstance(s, unicode):
+ s = s.decode('utf8')
+
+ original = s
+ sl = list(s)
+ openarr = 0
+ openstring = False
+ openstrchar = ""
+ multilinestr = False
+ arrayoftables = False
+ beginline = True
+ keygroup = False
+ dottedkey = False
+ keyname = 0
+ key = ''
+ prev_key = ''
+ line_no = 1
+
+ for i, item in enumerate(sl):
+ if item == '\r' and sl[i + 1] == '\n':
+ sl[i] = ' '
+ continue
+ if keyname:
+ key += item
+ if item == '\n':
+ raise TomlDecodeError("Key name found without value."
+ " Reached end of line.", original, i)
+ if openstring:
+ if item == openstrchar:
+ oddbackslash = False
+ k = 1
+ while i >= k and sl[i - k] == '\\':
+ oddbackslash = not oddbackslash
+ k += 1
+ if not oddbackslash:
+ keyname = 2
+ openstring = False
+ openstrchar = ""
+ continue
+ elif keyname == 1:
+ if item.isspace():
+ keyname = 2
+ continue
+ elif item == '.':
+ dottedkey = True
+ continue
+ elif item.isalnum() or item == '_' or item == '-':
+ continue
+ elif (dottedkey and sl[i - 1] == '.' and
+ (item == '"' or item == "'")):
+ openstring = True
+ openstrchar = item
+ continue
+ elif keyname == 2:
+ if item.isspace():
+ if dottedkey:
+ nextitem = sl[i + 1]
+ if not nextitem.isspace() and nextitem != '.':
+ keyname = 1
+ continue
+ if item == '.':
+ dottedkey = True
+ nextitem = sl[i + 1]
+ if not nextitem.isspace() and nextitem != '.':
+ keyname = 1
+ continue
+ if item == '=':
+ keyname = 0
+ prev_key = key[:-1].rstrip()
+ key = ''
+ dottedkey = False
+ else:
+ raise TomlDecodeError("Found invalid character in key name: '" +
+ item + "'. Try quoting the key name.",
+ original, i)
+ if item == "'" and openstrchar != '"':
+ k = 1
+ try:
+ while sl[i - k] == "'":
+ k += 1
+ if k == 3:
+ break
+ except IndexError:
+ pass
+ if k == 3:
+ multilinestr = not multilinestr
+ openstring = multilinestr
+ else:
+ openstring = not openstring
+ if openstring:
+ openstrchar = "'"
+ else:
+ openstrchar = ""
+ if item == '"' and openstrchar != "'":
+ oddbackslash = False
+ k = 1
+ tripquote = False
+ try:
+ while sl[i - k] == '"':
+ k += 1
+ if k == 3:
+ tripquote = True
+ break
+ if k == 1 or (k == 3 and tripquote):
+ while sl[i - k] == '\\':
+ oddbackslash = not oddbackslash
+ k += 1
+ except IndexError:
+ pass
+ if not oddbackslash:
+ if tripquote:
+ multilinestr = not multilinestr
+ openstring = multilinestr
+ else:
+ openstring = not openstring
+ if openstring:
+ openstrchar = '"'
+ else:
+ openstrchar = ""
+ if item == '#' and (not openstring and not keygroup and
+ not arrayoftables):
+ j = i
+ comment = ""
+ try:
+ while sl[j] != '\n':
+ comment += s[j]
+ sl[j] = ' '
+ j += 1
+ except IndexError:
+ break
+ if not openarr:
+ decoder.preserve_comment(line_no, prev_key, comment, beginline)
+ if item == '[' and (not openstring and not keygroup and
+ not arrayoftables):
+ if beginline:
+ if len(sl) > i + 1 and sl[i + 1] == '[':
+ arrayoftables = True
+ else:
+ keygroup = True
+ else:
+ openarr += 1
+ if item == ']' and not openstring:
+ if keygroup:
+ keygroup = False
+ elif arrayoftables:
+ if sl[i - 1] == ']':
+ arrayoftables = False
+ else:
+ openarr -= 1
+ if item == '\n':
+ if openstring or multilinestr:
+ if not multilinestr:
+ raise TomlDecodeError("Unbalanced quotes", original, i)
+ if ((sl[i - 1] == "'" or sl[i - 1] == '"') and (
+ sl[i - 2] == sl[i - 1])):
+ sl[i] = sl[i - 1]
+ if sl[i - 3] == sl[i - 1]:
+ sl[i - 3] = ' '
+ elif openarr:
+ sl[i] = ' '
+ else:
+ beginline = True
+ line_no += 1
+ elif beginline and sl[i] != ' ' and sl[i] != '\t':
+ beginline = False
+ if not keygroup and not arrayoftables:
+ if sl[i] == '=':
+ raise TomlDecodeError("Found empty keyname. ", original, i)
+ keyname = 1
+ key += item
+ if keyname:
+ raise TomlDecodeError("Key name found without value."
+ " Reached end of file.", original, len(s))
+ if openstring: # reached EOF and have an unterminated string
+ raise TomlDecodeError("Unterminated string found."
+ " Reached end of file.", original, len(s))
+ s = ''.join(sl)
+ s = s.split('\n')
+ multikey = None
+ multilinestr = ""
+ multibackslash = False
+ pos = 0
+ for idx, line in enumerate(s):
+ if idx > 0:
+ pos += len(s[idx - 1]) + 1
+
+ decoder.embed_comments(idx, currentlevel)
+
+ if not multilinestr or multibackslash or '\n' not in multilinestr:
+ line = line.strip()
+ if line == "" and (not multikey or multibackslash):
+ continue
+ if multikey:
+ if multibackslash:
+ multilinestr += line
+ else:
+ multilinestr += line
+ multibackslash = False
+ closed = False
+ if multilinestr[0] == '[':
+ closed = line[-1] == ']'
+ elif len(line) > 2:
+ closed = (line[-1] == multilinestr[0] and
+ line[-2] == multilinestr[0] and
+ line[-3] == multilinestr[0])
+ if closed:
+ try:
+ value, vtype = decoder.load_value(multilinestr)
+ except ValueError as err:
+ raise TomlDecodeError(str(err), original, pos)
+ currentlevel[multikey] = value
+ multikey = None
+ multilinestr = ""
+ else:
+ k = len(multilinestr) - 1
+ while k > -1 and multilinestr[k] == '\\':
+ multibackslash = not multibackslash
+ k -= 1
+ if multibackslash:
+ multilinestr = multilinestr[:-1]
+ else:
+ multilinestr += "\n"
+ continue
+ if line[0] == '[':
+ arrayoftables = False
+ if len(line) == 1:
+ raise TomlDecodeError("Opening key group bracket on line by "
+ "itself.", original, pos)
+ if line[1] == '[':
+ arrayoftables = True
+ line = line[2:]
+ splitstr = ']]'
+ else:
+ line = line[1:]
+ splitstr = ']'
+ i = 1
+ quotesplits = decoder._get_split_on_quotes(line)
+ quoted = False
+ for quotesplit in quotesplits:
+ if not quoted and splitstr in quotesplit:
+ break
+ i += quotesplit.count(splitstr)
+ quoted = not quoted
+ line = line.split(splitstr, i)
+ if len(line) < i + 1 or line[-1].strip() != "":
+ raise TomlDecodeError("Key group not on a line by itself.",
+ original, pos)
+ groups = splitstr.join(line[:-1]).split('.')
+ i = 0
+ while i < len(groups):
+ groups[i] = groups[i].strip()
+ if len(groups[i]) > 0 and (groups[i][0] == '"' or
+ groups[i][0] == "'"):
+ groupstr = groups[i]
+ j = i + 1
+ while ((not groupstr[0] == groupstr[-1]) or
+ len(groupstr) == 1):
+ j += 1
+ if j > len(groups) + 2:
+ raise TomlDecodeError("Invalid group name '" +
+ groupstr + "' Something " +
+ "went wrong.", original, pos)
+ groupstr = '.'.join(groups[i:j]).strip()
+ groups[i] = groupstr[1:-1]
+ groups[i + 1:j] = []
+ else:
+ if not _groupname_re.match(groups[i]):
+ raise TomlDecodeError("Invalid group name '" +
+ groups[i] + "'. Try quoting it.",
+ original, pos)
+ i += 1
+ currentlevel = retval
+ for i in _range(len(groups)):
+ group = groups[i]
+ if group == "":
+ raise TomlDecodeError("Can't have a keygroup with an empty "
+ "name", original, pos)
+ try:
+ currentlevel[group]
+ if i == len(groups) - 1:
+ if group in implicitgroups:
+ implicitgroups.remove(group)
+ if arrayoftables:
+ raise TomlDecodeError("An implicitly defined "
+ "table can't be an array",
+ original, pos)
+ elif arrayoftables:
+ currentlevel[group].append(decoder.get_empty_table()
+ )
+ else:
+ raise TomlDecodeError("What? " + group +
+ " already exists?" +
+ str(currentlevel),
+ original, pos)
+ except TypeError:
+ currentlevel = currentlevel[-1]
+ if group not in currentlevel:
+ currentlevel[group] = decoder.get_empty_table()
+ if i == len(groups) - 1 and arrayoftables:
+ currentlevel[group] = [decoder.get_empty_table()]
+ except KeyError:
+ if i != len(groups) - 1:
+ implicitgroups.append(group)
+ currentlevel[group] = decoder.get_empty_table()
+ if i == len(groups) - 1 and arrayoftables:
+ currentlevel[group] = [decoder.get_empty_table()]
+ currentlevel = currentlevel[group]
+ if arrayoftables:
+ try:
+ currentlevel = currentlevel[-1]
+ except KeyError:
+ pass
+ elif line[0] == "{":
+ if line[-1] != "}":
+ raise TomlDecodeError("Line breaks are not allowed in inline"
+ "objects", original, pos)
+ try:
+ decoder.load_inline_object(line, currentlevel, multikey,
+ multibackslash)
+ except ValueError as err:
+ raise TomlDecodeError(str(err), original, pos)
+ elif "=" in line:
+ try:
+ ret = decoder.load_line(line, currentlevel, multikey,
+ multibackslash)
+ except ValueError as err:
+ raise TomlDecodeError(str(err), original, pos)
+ if ret is not None:
+ multikey, multilinestr, multibackslash = ret
+ return retval
+
+
+def _load_date(val):
+ microsecond = 0
+ tz = None
+ try:
+ if len(val) > 19:
+ if val[19] == '.':
+ if val[-1].upper() == 'Z':
+ subsecondval = val[20:-1]
+ tzval = "Z"
+ else:
+ subsecondvalandtz = val[20:]
+ if '+' in subsecondvalandtz:
+ splitpoint = subsecondvalandtz.index('+')
+ subsecondval = subsecondvalandtz[:splitpoint]
+ tzval = subsecondvalandtz[splitpoint:]
+ elif '-' in subsecondvalandtz:
+ splitpoint = subsecondvalandtz.index('-')
+ subsecondval = subsecondvalandtz[:splitpoint]
+ tzval = subsecondvalandtz[splitpoint:]
+ else:
+ tzval = None
+ subsecondval = subsecondvalandtz
+ if tzval is not None:
+ tz = TomlTz(tzval)
+ microsecond = int(int(subsecondval) *
+ (10 ** (6 - len(subsecondval))))
+ else:
+ tz = TomlTz(val[19:])
+ except ValueError:
+ tz = None
+ if "-" not in val[1:]:
+ return None
+ try:
+ if len(val) == 10:
+ d = datetime.date(
+ int(val[:4]), int(val[5:7]),
+ int(val[8:10]))
+ else:
+ d = datetime.datetime(
+ int(val[:4]), int(val[5:7]),
+ int(val[8:10]), int(val[11:13]),
+ int(val[14:16]), int(val[17:19]), microsecond, tz)
+ except ValueError:
+ return None
+ return d
+
+
+def _load_unicode_escapes(v, hexbytes, prefix):
+ skip = False
+ i = len(v) - 1
+ while i > -1 and v[i] == '\\':
+ skip = not skip
+ i -= 1
+ for hx in hexbytes:
+ if skip:
+ skip = False
+ i = len(hx) - 1
+ while i > -1 and hx[i] == '\\':
+ skip = not skip
+ i -= 1
+ v += prefix
+ v += hx
+ continue
+ hxb = ""
+ i = 0
+ hxblen = 4
+ if prefix == "\\U":
+ hxblen = 8
+ hxb = ''.join(hx[i:i + hxblen]).lower()
+ if hxb.strip('0123456789abcdef'):
+ raise ValueError("Invalid escape sequence: " + hxb)
+ if hxb[0] == "d" and hxb[1].strip('01234567'):
+ raise ValueError("Invalid escape sequence: " + hxb +
+ ". Only scalar unicode points are allowed.")
+ v += unichr(int(hxb, 16))
+ v += unicode(hx[len(hxb):])
+ return v
+
+
+# Unescape TOML string values.
+
+# content after the \
+_escapes = ['0', 'b', 'f', 'n', 'r', 't', '"']
+# What it should be replaced by
+_escapedchars = ['\0', '\b', '\f', '\n', '\r', '\t', '\"']
+# Used for substitution
+_escape_to_escapedchars = dict(zip(_escapes, _escapedchars))
+
+
+def _unescape(v):
+ """Unescape characters in a TOML string."""
+ i = 0
+ backslash = False
+ while i < len(v):
+ if backslash:
+ backslash = False
+ if v[i] in _escapes:
+ v = v[:i - 1] + _escape_to_escapedchars[v[i]] + v[i + 1:]
+ elif v[i] == '\\':
+ v = v[:i - 1] + v[i:]
+ elif v[i] == 'u' or v[i] == 'U':
+ i += 1
+ else:
+ raise ValueError("Reserved escape sequence used")
+ continue
+ elif v[i] == '\\':
+ backslash = True
+ i += 1
+ return v
+
+
+class InlineTableDict(object):
+ """Sentinel subclass of dict for inline tables."""
+
+
+class TomlDecoder(object):
+
+ def __init__(self, _dict=dict):
+ self._dict = _dict
+
+ def get_empty_table(self):
+ return self._dict()
+
+ def get_empty_inline_table(self):
+ class DynamicInlineTableDict(self._dict, InlineTableDict):
+ """Concrete sentinel subclass for inline tables.
+ It is a subclass of _dict which is passed in dynamically at load
+ time
+
+ It is also a subclass of InlineTableDict
+ """
+
+ return DynamicInlineTableDict()
+
+ def load_inline_object(self, line, currentlevel, multikey=False,
+ multibackslash=False):
+ candidate_groups = line[1:-1].split(",")
+ groups = []
+ if len(candidate_groups) == 1 and not candidate_groups[0].strip():
+ candidate_groups.pop()
+ while len(candidate_groups) > 0:
+ candidate_group = candidate_groups.pop(0)
+ try:
+ _, value = candidate_group.split('=', 1)
+ except ValueError:
+ raise ValueError("Invalid inline table encountered")
+ value = value.strip()
+ if ((value[0] == value[-1] and value[0] in ('"', "'")) or (
+ value[0] in '-0123456789' or
+ value in ('true', 'false') or
+ (value[0] == "[" and value[-1] == "]") or
+ (value[0] == '{' and value[-1] == '}'))):
+ groups.append(candidate_group)
+ elif len(candidate_groups) > 0:
+ candidate_groups[0] = (candidate_group + "," +
+ candidate_groups[0])
+ else:
+ raise ValueError("Invalid inline table value encountered")
+ for group in groups:
+ status = self.load_line(group, currentlevel, multikey,
+ multibackslash)
+ if status is not None:
+ break
+
+ def _get_split_on_quotes(self, line):
+ doublequotesplits = line.split('"')
+ quoted = False
+ quotesplits = []
+ if len(doublequotesplits) > 1 and "'" in doublequotesplits[0]:
+ singlequotesplits = doublequotesplits[0].split("'")
+ doublequotesplits = doublequotesplits[1:]
+ while len(singlequotesplits) % 2 == 0 and len(doublequotesplits):
+ singlequotesplits[-1] += '"' + doublequotesplits[0]
+ doublequotesplits = doublequotesplits[1:]
+ if "'" in singlequotesplits[-1]:
+ singlequotesplits = (singlequotesplits[:-1] +
+ singlequotesplits[-1].split("'"))
+ quotesplits += singlequotesplits
+ for doublequotesplit in doublequotesplits:
+ if quoted:
+ quotesplits.append(doublequotesplit)
+ else:
+ quotesplits += doublequotesplit.split("'")
+ quoted = not quoted
+ return quotesplits
+
+ def load_line(self, line, currentlevel, multikey, multibackslash):
+ i = 1
+ quotesplits = self._get_split_on_quotes(line)
+ quoted = False
+ for quotesplit in quotesplits:
+ if not quoted and '=' in quotesplit:
+ break
+ i += quotesplit.count('=')
+ quoted = not quoted
+ pair = line.split('=', i)
+ strictly_valid = _strictly_valid_num(pair[-1])
+ if _number_with_underscores.match(pair[-1]):
+ pair[-1] = pair[-1].replace('_', '')
+ while len(pair[-1]) and (pair[-1][0] != ' ' and pair[-1][0] != '\t' and
+ pair[-1][0] != "'" and pair[-1][0] != '"' and
+ pair[-1][0] != '[' and pair[-1][0] != '{' and
+ pair[-1].strip() != 'true' and
+ pair[-1].strip() != 'false'):
+ try:
+ float(pair[-1])
+ break
+ except ValueError:
+ pass
+ if _load_date(pair[-1]) is not None:
+ break
+ if TIME_RE.match(pair[-1]):
+ break
+ i += 1
+ prev_val = pair[-1]
+ pair = line.split('=', i)
+ if prev_val == pair[-1]:
+ raise ValueError("Invalid date or number")
+ if strictly_valid:
+ strictly_valid = _strictly_valid_num(pair[-1])
+ pair = ['='.join(pair[:-1]).strip(), pair[-1].strip()]
+ if '.' in pair[0]:
+ if '"' in pair[0] or "'" in pair[0]:
+ quotesplits = self._get_split_on_quotes(pair[0])
+ quoted = False
+ levels = []
+ for quotesplit in quotesplits:
+ if quoted:
+ levels.append(quotesplit)
+ else:
+ levels += [level.strip() for level in
+ quotesplit.split('.')]
+ quoted = not quoted
+ else:
+ levels = pair[0].split('.')
+ while levels[-1] == "":
+ levels = levels[:-1]
+ for level in levels[:-1]:
+ if level == "":
+ continue
+ if level not in currentlevel:
+ currentlevel[level] = self.get_empty_table()
+ currentlevel = currentlevel[level]
+ pair[0] = levels[-1].strip()
+ elif (pair[0][0] == '"' or pair[0][0] == "'") and \
+ (pair[0][-1] == pair[0][0]):
+ pair[0] = _unescape(pair[0][1:-1])
+ k, koffset = self._load_line_multiline_str(pair[1])
+ if k > -1:
+ while k > -1 and pair[1][k + koffset] == '\\':
+ multibackslash = not multibackslash
+ k -= 1
+ if multibackslash:
+ multilinestr = pair[1][:-1]
+ else:
+ multilinestr = pair[1] + "\n"
+ multikey = pair[0]
+ else:
+ value, vtype = self.load_value(pair[1], strictly_valid)
+ try:
+ currentlevel[pair[0]]
+ raise ValueError("Duplicate keys!")
+ except TypeError:
+ raise ValueError("Duplicate keys!")
+ except KeyError:
+ if multikey:
+ return multikey, multilinestr, multibackslash
+ else:
+ currentlevel[pair[0]] = value
+
+ def _load_line_multiline_str(self, p):
+ poffset = 0
+ if len(p) < 3:
+ return -1, poffset
+ if p[0] == '[' and (p.strip()[-1] != ']' and
+ self._load_array_isstrarray(p)):
+ newp = p[1:].strip().split(',')
+ while len(newp) > 1 and newp[-1][0] != '"' and newp[-1][0] != "'":
+ newp = newp[:-2] + [newp[-2] + ',' + newp[-1]]
+ newp = newp[-1]
+ poffset = len(p) - len(newp)
+ p = newp
+ if p[0] != '"' and p[0] != "'":
+ return -1, poffset
+ if p[1] != p[0] or p[2] != p[0]:
+ return -1, poffset
+ if len(p) > 5 and p[-1] == p[0] and p[-2] == p[0] and p[-3] == p[0]:
+ return -1, poffset
+ return len(p) - 1, poffset
+
+ def load_value(self, v, strictly_valid=True):
+ if not v:
+ raise ValueError("Empty value is invalid")
+ if v == 'true':
+ return (True, "bool")
+ elif v.lower() == 'true':
+ raise ValueError("Only all lowercase booleans allowed")
+ elif v == 'false':
+ return (False, "bool")
+ elif v.lower() == 'false':
+ raise ValueError("Only all lowercase booleans allowed")
+ elif v[0] == '"' or v[0] == "'":
+ quotechar = v[0]
+ testv = v[1:].split(quotechar)
+ triplequote = False
+ triplequotecount = 0
+ if len(testv) > 1 and testv[0] == '' and testv[1] == '':
+ testv = testv[2:]
+ triplequote = True
+ closed = False
+ for tv in testv:
+ if tv == '':
+ if triplequote:
+ triplequotecount += 1
+ else:
+ closed = True
+ else:
+ oddbackslash = False
+ try:
+ i = -1
+ j = tv[i]
+ while j == '\\':
+ oddbackslash = not oddbackslash
+ i -= 1
+ j = tv[i]
+ except IndexError:
+ pass
+ if not oddbackslash:
+ if closed:
+ raise ValueError("Found tokens after a closed " +
+ "string. Invalid TOML.")
+ else:
+ if not triplequote or triplequotecount > 1:
+ closed = True
+ else:
+ triplequotecount = 0
+ if quotechar == '"':
+ escapeseqs = v.split('\\')[1:]
+ backslash = False
+ for i in escapeseqs:
+ if i == '':
+ backslash = not backslash
+ else:
+ if i[0] not in _escapes and (i[0] != 'u' and
+ i[0] != 'U' and
+ not backslash):
+ raise ValueError("Reserved escape sequence used")
+ if backslash:
+ backslash = False
+ for prefix in ["\\u", "\\U"]:
+ if prefix in v:
+ hexbytes = v.split(prefix)
+ v = _load_unicode_escapes(hexbytes[0], hexbytes[1:],
+ prefix)
+ v = _unescape(v)
+ if len(v) > 1 and v[1] == quotechar and (len(v) < 3 or
+ v[1] == v[2]):
+ v = v[2:-2]
+ return (v[1:-1], "str")
+ elif v[0] == '[':
+ return (self.load_array(v), "array")
+ elif v[0] == '{':
+ inline_object = self.get_empty_inline_table()
+ self.load_inline_object(v, inline_object)
+ return (inline_object, "inline_object")
+ elif TIME_RE.match(v):
+ h, m, s, _, ms = TIME_RE.match(v).groups()
+ time = datetime.time(int(h), int(m), int(s), int(ms) if ms else 0)
+ return (time, "time")
+ else:
+ parsed_date = _load_date(v)
+ if parsed_date is not None:
+ return (parsed_date, "date")
+ if not strictly_valid:
+ raise ValueError("Weirdness with leading zeroes or "
+ "underscores in your number.")
+ itype = "int"
+ neg = False
+ if v[0] == '-':
+ neg = True
+ v = v[1:]
+ elif v[0] == '+':
+ v = v[1:]
+ v = v.replace('_', '')
+ lowerv = v.lower()
+ if '.' in v or ('x' not in v and ('e' in v or 'E' in v)):
+ if '.' in v and v.split('.', 1)[1] == '':
+ raise ValueError("This float is missing digits after "
+ "the point")
+ if v[0] not in '0123456789':
+ raise ValueError("This float doesn't have a leading "
+ "digit")
+ v = float(v)
+ itype = "float"
+ elif len(lowerv) == 3 and (lowerv == 'inf' or lowerv == 'nan'):
+ v = float(v)
+ itype = "float"
+ if itype == "int":
+ v = int(v, 0)
+ if neg:
+ return (0 - v, itype)
+ return (v, itype)
+
+ def bounded_string(self, s):
+ if len(s) == 0:
+ return True
+ if s[-1] != s[0]:
+ return False
+ i = -2
+ backslash = False
+ while len(s) + i > 0:
+ if s[i] == "\\":
+ backslash = not backslash
+ i -= 1
+ else:
+ break
+ return not backslash
+
+ def _load_array_isstrarray(self, a):
+ a = a[1:-1].strip()
+ if a != '' and (a[0] == '"' or a[0] == "'"):
+ return True
+ return False
+
+ def load_array(self, a):
+ atype = None
+ retval = []
+ a = a.strip()
+ if '[' not in a[1:-1] or "" != a[1:-1].split('[')[0].strip():
+ strarray = self._load_array_isstrarray(a)
+ if not a[1:-1].strip().startswith('{'):
+ a = a[1:-1].split(',')
+ else:
+ # a is an inline object, we must find the matching parenthesis
+ # to define groups
+ new_a = []
+ start_group_index = 1
+ end_group_index = 2
+ open_bracket_count = 1 if a[start_group_index] == '{' else 0
+ in_str = False
+ while end_group_index < len(a[1:]):
+ if a[end_group_index] == '"' or a[end_group_index] == "'":
+ if in_str:
+ backslash_index = end_group_index - 1
+ while (backslash_index > -1 and
+ a[backslash_index] == '\\'):
+ in_str = not in_str
+ backslash_index -= 1
+ in_str = not in_str
+ if not in_str and a[end_group_index] == '{':
+ open_bracket_count += 1
+ if in_str or a[end_group_index] != '}':
+ end_group_index += 1
+ continue
+ elif a[end_group_index] == '}' and open_bracket_count > 1:
+ open_bracket_count -= 1
+ end_group_index += 1
+ continue
+
+ # Increase end_group_index by 1 to get the closing bracket
+ end_group_index += 1
+
+ new_a.append(a[start_group_index:end_group_index])
+
+ # The next start index is at least after the closing
+ # bracket, a closing bracket can be followed by a comma
+ # since we are in an array.
+ start_group_index = end_group_index + 1
+ while (start_group_index < len(a[1:]) and
+ a[start_group_index] != '{'):
+ start_group_index += 1
+ end_group_index = start_group_index + 1
+ a = new_a
+ b = 0
+ if strarray:
+ while b < len(a) - 1:
+ ab = a[b].strip()
+ while (not self.bounded_string(ab) or
+ (len(ab) > 2 and
+ ab[0] == ab[1] == ab[2] and
+ ab[-2] != ab[0] and
+ ab[-3] != ab[0])):
+ a[b] = a[b] + ',' + a[b + 1]
+ ab = a[b].strip()
+ if b < len(a) - 2:
+ a = a[:b + 1] + a[b + 2:]
+ else:
+ a = a[:b + 1]
+ b += 1
+ else:
+ al = list(a[1:-1])
+ a = []
+ openarr = 0
+ j = 0
+ for i in _range(len(al)):
+ if al[i] == '[':
+ openarr += 1
+ elif al[i] == ']':
+ openarr -= 1
+ elif al[i] == ',' and not openarr:
+ a.append(''.join(al[j:i]))
+ j = i + 1
+ a.append(''.join(al[j:]))
+ for i in _range(len(a)):
+ a[i] = a[i].strip()
+ if a[i] != '':
+ nval, ntype = self.load_value(a[i])
+ if atype:
+ if ntype != atype:
+ raise ValueError("Not a homogeneous array")
+ else:
+ atype = ntype
+ retval.append(nval)
+ return retval
+
+ def preserve_comment(self, line_no, key, comment, beginline):
+ pass
+
+ def embed_comments(self, idx, currentlevel):
+ pass
+
+
+class TomlPreserveCommentDecoder(TomlDecoder):
+
+ def __init__(self, _dict=dict):
+ self.saved_comments = {}
+ super(TomlPreserveCommentDecoder, self).__init__(_dict)
+
+ def preserve_comment(self, line_no, key, comment, beginline):
+ self.saved_comments[line_no] = (key, comment, beginline)
+
+ def embed_comments(self, idx, currentlevel):
+ if idx not in self.saved_comments:
+ return
+
+ key, comment, beginline = self.saved_comments[idx]
+ currentlevel[key] = CommentValue(currentlevel[key], comment, beginline,
+ self._dict)
diff --git a/third_party/python/toml/toml/encoder.py b/third_party/python/toml/toml/encoder.py
new file mode 100644
index 0000000000..bf17a72b62
--- /dev/null
+++ b/third_party/python/toml/toml/encoder.py
@@ -0,0 +1,304 @@
+import datetime
+import re
+import sys
+from decimal import Decimal
+
+from toml.decoder import InlineTableDict
+
+if sys.version_info >= (3,):
+ unicode = str
+
+
+def dump(o, f, encoder=None):
+ """Writes out dict as toml to a file
+
+ Args:
+ o: Object to dump into toml
+ f: File descriptor where the toml should be stored
+ encoder: The ``TomlEncoder`` to use for constructing the output string
+
+ Returns:
+ String containing the toml corresponding to dictionary
+
+ Raises:
+ TypeError: When anything other than file descriptor is passed
+ """
+
+ if not f.write:
+ raise TypeError("You can only dump an object to a file descriptor")
+ d = dumps(o, encoder=encoder)
+ f.write(d)
+ return d
+
+
+def dumps(o, encoder=None):
+ """Stringifies input dict as toml
+
+ Args:
+ o: Object to dump into toml
+ encoder: The ``TomlEncoder`` to use for constructing the output string
+
+ Returns:
+ String containing the toml corresponding to dict
+
+ Examples:
+ ```python
+ >>> import toml
+ >>> output = {
+ ... 'a': "I'm a string",
+ ... 'b': ["I'm", "a", "list"],
+ ... 'c': 2400
+ ... }
+ >>> toml.dumps(output)
+ 'a = "I\'m a string"\nb = [ "I\'m", "a", "list",]\nc = 2400\n'
+ ```
+ """
+
+ retval = ""
+ if encoder is None:
+ encoder = TomlEncoder(o.__class__)
+ addtoretval, sections = encoder.dump_sections(o, "")
+ retval += addtoretval
+ outer_objs = [id(o)]
+ while sections:
+ section_ids = [id(section) for section in sections.values()]
+ for outer_obj in outer_objs:
+ if outer_obj in section_ids:
+ raise ValueError("Circular reference detected")
+ outer_objs += section_ids
+ newsections = encoder.get_empty_table()
+ for section in sections:
+ addtoretval, addtosections = encoder.dump_sections(
+ sections[section], section)
+
+ if addtoretval or (not addtoretval and not addtosections):
+ if retval and retval[-2:] != "\n\n":
+ retval += "\n"
+ retval += "[" + section + "]\n"
+ if addtoretval:
+ retval += addtoretval
+ for s in addtosections:
+ newsections[section + "." + s] = addtosections[s]
+ sections = newsections
+ return retval
+
+
+def _dump_str(v):
+ if sys.version_info < (3,) and hasattr(v, 'decode') and isinstance(v, str):
+ v = v.decode('utf-8')
+ v = "%r" % v
+ if v[0] == 'u':
+ v = v[1:]
+ singlequote = v.startswith("'")
+ if singlequote or v.startswith('"'):
+ v = v[1:-1]
+ if singlequote:
+ v = v.replace("\\'", "'")
+ v = v.replace('"', '\\"')
+ v = v.split("\\x")
+ while len(v) > 1:
+ i = -1
+ if not v[0]:
+ v = v[1:]
+ v[0] = v[0].replace("\\\\", "\\")
+ # No, I don't know why != works and == breaks
+ joinx = v[0][i] != "\\"
+ while v[0][:i] and v[0][i] == "\\":
+ joinx = not joinx
+ i -= 1
+ if joinx:
+ joiner = "x"
+ else:
+ joiner = "u00"
+ v = [v[0] + joiner + v[1]] + v[2:]
+ return unicode('"' + v[0] + '"')
+
+
+def _dump_float(v):
+ return "{}".format(v).replace("e+0", "e+").replace("e-0", "e-")
+
+
+def _dump_time(v):
+ utcoffset = v.utcoffset()
+ if utcoffset is None:
+ return v.isoformat()
+ # The TOML norm specifies that it's local time thus we drop the offset
+ return v.isoformat()[:-6]
+
+
+class TomlEncoder(object):
+
+ def __init__(self, _dict=dict, preserve=False):
+ self._dict = _dict
+ self.preserve = preserve
+ self.dump_funcs = {
+ str: _dump_str,
+ unicode: _dump_str,
+ list: self.dump_list,
+ bool: lambda v: unicode(v).lower(),
+ int: lambda v: v,
+ float: _dump_float,
+ Decimal: _dump_float,
+ datetime.datetime: lambda v: v.isoformat().replace('+00:00', 'Z'),
+ datetime.time: _dump_time,
+ datetime.date: lambda v: v.isoformat()
+ }
+
+ def get_empty_table(self):
+ return self._dict()
+
+ def dump_list(self, v):
+ retval = "["
+ for u in v:
+ retval += " " + unicode(self.dump_value(u)) + ","
+ retval += "]"
+ return retval
+
+ def dump_inline_table(self, section):
+ """Preserve inline table in its compact syntax instead of expanding
+ into subsection.
+
+ https://github.com/toml-lang/toml#user-content-inline-table
+ """
+ retval = ""
+ if isinstance(section, dict):
+ val_list = []
+ for k, v in section.items():
+ val = self.dump_inline_table(v)
+ val_list.append(k + " = " + val)
+ retval += "{ " + ", ".join(val_list) + " }\n"
+ return retval
+ else:
+ return unicode(self.dump_value(section))
+
+ def dump_value(self, v):
+ # Lookup function corresponding to v's type
+ dump_fn = self.dump_funcs.get(type(v))
+ if dump_fn is None and hasattr(v, '__iter__'):
+ dump_fn = self.dump_funcs[list]
+ # Evaluate function (if it exists) else return v
+ return dump_fn(v) if dump_fn is not None else self.dump_funcs[str](v)
+
+ def dump_sections(self, o, sup):
+ retstr = ""
+ if sup != "" and sup[-1] != ".":
+ sup += '.'
+ retdict = self._dict()
+ arraystr = ""
+ for section in o:
+ section = unicode(section)
+ qsection = section
+ if not re.match(r'^[A-Za-z0-9_-]+$', section):
+ qsection = _dump_str(section)
+ if not isinstance(o[section], dict):
+ arrayoftables = False
+ if isinstance(o[section], list):
+ for a in o[section]:
+ if isinstance(a, dict):
+ arrayoftables = True
+ if arrayoftables:
+ for a in o[section]:
+ arraytabstr = "\n"
+ arraystr += "[[" + sup + qsection + "]]\n"
+ s, d = self.dump_sections(a, sup + qsection)
+ if s:
+ if s[0] == "[":
+ arraytabstr += s
+ else:
+ arraystr += s
+ while d:
+ newd = self._dict()
+ for dsec in d:
+ s1, d1 = self.dump_sections(d[dsec], sup +
+ qsection + "." +
+ dsec)
+ if s1:
+ arraytabstr += ("[" + sup + qsection +
+ "." + dsec + "]\n")
+ arraytabstr += s1
+ for s1 in d1:
+ newd[dsec + "." + s1] = d1[s1]
+ d = newd
+ arraystr += arraytabstr
+ else:
+ if o[section] is not None:
+ retstr += (qsection + " = " +
+ unicode(self.dump_value(o[section])) + '\n')
+ elif self.preserve and isinstance(o[section], InlineTableDict):
+ retstr += (qsection + " = " +
+ self.dump_inline_table(o[section]))
+ else:
+ retdict[qsection] = o[section]
+ retstr += arraystr
+ return (retstr, retdict)
+
+
+class TomlPreserveInlineDictEncoder(TomlEncoder):
+
+ def __init__(self, _dict=dict):
+ super(TomlPreserveInlineDictEncoder, self).__init__(_dict, True)
+
+
+class TomlArraySeparatorEncoder(TomlEncoder):
+
+ def __init__(self, _dict=dict, preserve=False, separator=","):
+ super(TomlArraySeparatorEncoder, self).__init__(_dict, preserve)
+ if separator.strip() == "":
+ separator = "," + separator
+ elif separator.strip(' \t\n\r,'):
+ raise ValueError("Invalid separator for arrays")
+ self.separator = separator
+
+ def dump_list(self, v):
+ t = []
+ retval = "["
+ for u in v:
+ t.append(self.dump_value(u))
+ while t != []:
+ s = []
+ for u in t:
+ if isinstance(u, list):
+ for r in u:
+ s.append(r)
+ else:
+ retval += " " + unicode(u) + self.separator
+ t = s
+ retval += "]"
+ return retval
+
+
+class TomlNumpyEncoder(TomlEncoder):
+
+ def __init__(self, _dict=dict, preserve=False):
+ import numpy as np
+ super(TomlNumpyEncoder, self).__init__(_dict, preserve)
+ self.dump_funcs[np.float16] = _dump_float
+ self.dump_funcs[np.float32] = _dump_float
+ self.dump_funcs[np.float64] = _dump_float
+ self.dump_funcs[np.int16] = self._dump_int
+ self.dump_funcs[np.int32] = self._dump_int
+ self.dump_funcs[np.int64] = self._dump_int
+
+ def _dump_int(self, v):
+ return "{}".format(int(v))
+
+
+class TomlPreserveCommentEncoder(TomlEncoder):
+
+ def __init__(self, _dict=dict, preserve=False):
+ from toml.decoder import CommentValue
+ super(TomlPreserveCommentEncoder, self).__init__(_dict, preserve)
+ self.dump_funcs[CommentValue] = lambda v: v.dump(self.dump_value)
+
+
+class TomlPathlibEncoder(TomlEncoder):
+
+ def _dump_pathlib_path(self, v):
+ return _dump_str(str(v))
+
+ def dump_value(self, v):
+ if (3, 4) <= sys.version_info:
+ import pathlib
+ if isinstance(v, pathlib.PurePath):
+ v = str(v)
+ return super(TomlPathlibEncoder, self).dump_value(v)
diff --git a/third_party/python/toml/toml/ordered.py b/third_party/python/toml/toml/ordered.py
new file mode 100644
index 0000000000..9c20c41a1b
--- /dev/null
+++ b/third_party/python/toml/toml/ordered.py
@@ -0,0 +1,15 @@
+from collections import OrderedDict
+from toml import TomlEncoder
+from toml import TomlDecoder
+
+
+class TomlOrderedDecoder(TomlDecoder):
+
+ def __init__(self):
+ super(self.__class__, self).__init__(_dict=OrderedDict)
+
+
+class TomlOrderedEncoder(TomlEncoder):
+
+ def __init__(self):
+ super(self.__class__, self).__init__(_dict=OrderedDict)
diff --git a/third_party/python/toml/toml/tz.py b/third_party/python/toml/toml/tz.py
new file mode 100644
index 0000000000..bf20593a26
--- /dev/null
+++ b/third_party/python/toml/toml/tz.py
@@ -0,0 +1,24 @@
+from datetime import tzinfo, timedelta
+
+
+class TomlTz(tzinfo):
+ def __init__(self, toml_offset):
+ if toml_offset == "Z":
+ self._raw_offset = "+00:00"
+ else:
+ self._raw_offset = toml_offset
+ self._sign = -1 if self._raw_offset[0] == '-' else 1
+ self._hours = int(self._raw_offset[1:3])
+ self._minutes = int(self._raw_offset[4:6])
+
+ def __deepcopy__(self, memo):
+ return self.__class__(self._raw_offset)
+
+ def tzname(self, dt):
+ return "UTC" + self._raw_offset
+
+ def utcoffset(self, dt):
+ return self._sign * timedelta(hours=self._hours, minutes=self._minutes)
+
+ def dst(self, dt):
+ return timedelta(0)
diff --git a/third_party/python/tqdm/tqdm-4.62.3.dist-info/LICENCE b/third_party/python/tqdm/tqdm-4.62.3.dist-info/LICENCE
new file mode 100644
index 0000000000..5b3cab7fb5
--- /dev/null
+++ b/third_party/python/tqdm/tqdm-4.62.3.dist-info/LICENCE
@@ -0,0 +1,49 @@
+`tqdm` is a product of collaborative work.
+Unless otherwise stated, all authors (see commit logs) retain copyright
+for their respective work, and release the work under the MIT licence
+(text below).
+
+Exceptions or notable authors are listed below
+in reverse chronological order:
+
+* files: *
+ MPLv2.0 2015-2021 (c) Casper da Costa-Luis
+ [casperdcl](https://github.com/casperdcl).
+* files: tqdm/_tqdm.py
+ MIT 2016 (c) [PR #96] on behalf of Google Inc.
+* files: tqdm/_tqdm.py setup.py README.rst MANIFEST.in .gitignore
+ MIT 2013 (c) Noam Yorav-Raphael, original author.
+
+[PR #96]: https://github.com/tqdm/tqdm/pull/96
+
+
+Mozilla Public Licence (MPL) v. 2.0 - Exhibit A
+-----------------------------------------------
+
+This Source Code Form is subject to the terms of the
+Mozilla Public License, v. 2.0.
+If a copy of the MPL was not distributed with this project,
+You can obtain one at https://mozilla.org/MPL/2.0/.
+
+
+MIT License (MIT)
+-----------------
+
+Copyright (c) 2013 noamraph
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/third_party/python/tqdm/tqdm-4.62.3.dist-info/METADATA b/third_party/python/tqdm/tqdm-4.62.3.dist-info/METADATA
new file mode 100644
index 0000000000..48461ec16e
--- /dev/null
+++ b/third_party/python/tqdm/tqdm-4.62.3.dist-info/METADATA
@@ -0,0 +1,1585 @@
+Metadata-Version: 2.1
+Name: tqdm
+Version: 4.62.3
+Summary: Fast, Extensible Progress Meter
+Home-page: https://tqdm.github.io
+Maintainer: tqdm developers
+Maintainer-email: python.tqdm@gmail.com
+License: MPLv2.0, MIT Licences
+Project-URL: Changelog, https://tqdm.github.io/releases
+Project-URL: Source, https://github.com/tqdm/tqdm
+Project-URL: Wiki, https://github.com/tqdm/tqdm/wiki
+Keywords: progressbar,progressmeter,progress,bar,meter,rate,eta,console,terminal,time
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Environment :: MacOS X
+Classifier: Environment :: Other Environment
+Classifier: Environment :: Win32 (MS Windows)
+Classifier: Environment :: X11 Applications
+Classifier: Framework :: IPython
+Classifier: Framework :: Jupyter
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Education
+Classifier: Intended Audience :: End Users/Desktop
+Classifier: Intended Audience :: Other Audience
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: MIT License
+Classifier: License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)
+Classifier: Operating System :: MacOS
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: Microsoft
+Classifier: Operating System :: Microsoft :: MS-DOS
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: POSIX :: BSD
+Classifier: Operating System :: POSIX :: BSD :: FreeBSD
+Classifier: Operating System :: POSIX :: Linux
+Classifier: Operating System :: POSIX :: SunOS/Solaris
+Classifier: Operating System :: Unix
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: Implementation
+Classifier: Programming Language :: Python :: Implementation :: IronPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Programming Language :: Unix Shell
+Classifier: Topic :: Desktop Environment
+Classifier: Topic :: Education :: Computer Aided Instruction (CAI)
+Classifier: Topic :: Education :: Testing
+Classifier: Topic :: Office/Business
+Classifier: Topic :: Other/Nonlisted Topic
+Classifier: Topic :: Software Development :: Build Tools
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Software Development :: Pre-processors
+Classifier: Topic :: Software Development :: User Interfaces
+Classifier: Topic :: System :: Installation/Setup
+Classifier: Topic :: System :: Logging
+Classifier: Topic :: System :: Monitoring
+Classifier: Topic :: System :: Shells
+Classifier: Topic :: Terminals
+Classifier: Topic :: Utilities
+Provides: tqdm
+Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7
+Description-Content-Type: text/x-rst
+License-File: LICENCE
+Requires-Dist: colorama ; platform_system == "Windows"
+Provides-Extra: dev
+Requires-Dist: py-make (>=0.1.0) ; extra == 'dev'
+Requires-Dist: twine ; extra == 'dev'
+Requires-Dist: wheel ; extra == 'dev'
+Provides-Extra: notebook
+Requires-Dist: ipywidgets (>=6) ; extra == 'notebook'
+Provides-Extra: telegram
+Requires-Dist: requests ; extra == 'telegram'
+
+|Logo|
+
+tqdm
+====
+
+|Py-Versions| |Versions| |Conda-Forge-Status| |Docker| |Snapcraft|
+
+|Build-Status| |Coverage-Status| |Branch-Coverage-Status| |Codacy-Grade| |Libraries-Rank| |PyPI-Downloads|
+
+|LICENCE| |OpenHub-Status| |binder-demo| |awesome-python|
+
+``tqdm`` derives from the Arabic word *taqaddum* (تقدّم) which can mean "progress,"
+and is an abbreviation for "I love you so much" in Spanish (*te quiero demasiado*).
+
+Instantly make your loops show a smart progress meter - just wrap any
+iterable with ``tqdm(iterable)``, and you're done!
+
+.. code:: python
+
+ from tqdm import tqdm
+ for i in tqdm(range(10000)):
+ ...
+
+``76%|████████████████████████        | 7568/10000 [00:33<00:10, 229.00it/s]``
+
+``trange(N)`` can be also used as a convenient shortcut for
+``tqdm(range(N))``.
+
+|Screenshot|
+ |Video| |Slides| |Merch|
+
+It can also be executed as a module with pipes:
+
+.. code:: sh
+
+ $ seq 9999999 | tqdm --bytes | wc -l
+ 75.2MB [00:00, 217MB/s]
+ 9999999
+
+ $ tar -zcf - docs/ | tqdm --bytes --total `du -sb docs/ | cut -f1` \
+ > backup.tgz
+ 32%|██████████▍ | 8.89G/27.9G [00:42<01:31, 223MB/s]
+
+Overhead is low -- about 60ns per iteration (80ns with ``tqdm.gui``), and is
+unit tested against performance regression.
+By comparison, the well-established
+`ProgressBar <https://github.com/niltonvolpato/python-progressbar>`__ has
+an 800ns/iter overhead.
+
+In addition to its low overhead, ``tqdm`` uses smart algorithms to predict
+the remaining time and to skip unnecessary iteration displays, which allows
+for a negligible overhead in most cases.
+
+``tqdm`` works on any platform
+(Linux, Windows, Mac, FreeBSD, NetBSD, Solaris/SunOS),
+in any console or in a GUI, and is also friendly with IPython/Jupyter notebooks.
+
+``tqdm`` does not require any dependencies (not even ``curses``!), just
+Python and an environment supporting ``carriage return \r`` and
+``line feed \n`` control characters.
+
+------------------------------------------
+
+.. contents:: Table of contents
+ :backlinks: top
+ :local:
+
+
+Installation
+------------
+
+Latest PyPI stable release
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+|Versions| |PyPI-Downloads| |Libraries-Dependents|
+
+.. code:: sh
+
+ pip install tqdm
+
+Latest development release on GitHub
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+|GitHub-Status| |GitHub-Stars| |GitHub-Commits| |GitHub-Forks| |GitHub-Updated|
+
+Pull and install pre-release ``devel`` branch:
+
+.. code:: sh
+
+ pip install "git+https://github.com/tqdm/tqdm.git@devel#egg=tqdm"
+
+Latest Conda release
+~~~~~~~~~~~~~~~~~~~~
+
+|Conda-Forge-Status|
+
+.. code:: sh
+
+ conda install -c conda-forge tqdm
+
+Latest Snapcraft release
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+|Snapcraft|
+
+There are 3 channels to choose from:
+
+.. code:: sh
+
+ snap install tqdm # implies --stable, i.e. latest tagged release
+ snap install tqdm --candidate # master branch
+ snap install tqdm --edge # devel branch
+
+Note that ``snap`` binaries are purely for CLI use (not ``import``-able), and
+automatically set up ``bash`` tab-completion.
+
+Latest Docker release
+~~~~~~~~~~~~~~~~~~~~~
+
+|Docker|
+
+.. code:: sh
+
+ docker pull tqdm/tqdm
+ docker run -i --rm tqdm/tqdm --help
+
+Other
+~~~~~
+
+There are other (unofficial) places where ``tqdm`` may be downloaded, particularly for CLI use:
+
+|Repology|
+
+.. |Repology| image:: https://repology.org/badge/tiny-repos/python:tqdm.svg
+ :target: https://repology.org/project/python:tqdm/versions
+
+Changelog
+---------
+
+The list of all changes is available either on GitHub's Releases:
+|GitHub-Status|, on the
+`wiki <https://github.com/tqdm/tqdm/wiki/Releases>`__, or on the
+`website <https://tqdm.github.io/releases>`__.
+
+
+Usage
+-----
+
+``tqdm`` is very versatile and can be used in a number of ways.
+The three main ones are given below.
+
+Iterable-based
+~~~~~~~~~~~~~~
+
+Wrap ``tqdm()`` around any iterable:
+
+.. code:: python
+
+ from tqdm import tqdm
+ from time import sleep
+
+ text = ""
+ for char in tqdm(["a", "b", "c", "d"]):
+ sleep(0.25)
+ text = text + char
+
+``trange(i)`` is a special optimised instance of ``tqdm(range(i))``:
+
+.. code:: python
+
+ from tqdm import trange
+
+ for i in trange(100):
+ sleep(0.01)
+
+Instantiation outside of the loop allows for manual control over ``tqdm()``:
+
+.. code:: python
+
+ pbar = tqdm(["a", "b", "c", "d"])
+ for char in pbar:
+ sleep(0.25)
+ pbar.set_description("Processing %s" % char)
+
+Manual
+~~~~~~
+
+Manual control of ``tqdm()`` updates using a ``with`` statement:
+
+.. code:: python
+
+ with tqdm(total=100) as pbar:
+ for i in range(10):
+ sleep(0.1)
+ pbar.update(10)
+
+If the optional variable ``total`` (or an iterable with ``len()``) is
+provided, predictive stats are displayed.
+
+``with`` is also optional (you can just assign ``tqdm()`` to a variable,
+but in this case don't forget to ``del`` or ``close()`` at the end:
+
+.. code:: python
+
+ pbar = tqdm(total=100)
+ for i in range(10):
+ sleep(0.1)
+ pbar.update(10)
+ pbar.close()
+
+Module
+~~~~~~
+
+Perhaps the most wonderful use of ``tqdm`` is in a script or on the command
+line. Simply inserting ``tqdm`` (or ``python -m tqdm``) between pipes will pass
+through all ``stdin`` to ``stdout`` while printing progress to ``stderr``.
+
+The example below demonstrate counting the number of lines in all Python files
+in the current directory, with timing information included.
+
+.. code:: sh
+
+ $ time find . -name '*.py' -type f -exec cat \{} \; | wc -l
+ 857365
+
+ real 0m3.458s
+ user 0m0.274s
+ sys 0m3.325s
+
+ $ time find . -name '*.py' -type f -exec cat \{} \; | tqdm | wc -l
+ 857366it [00:03, 246471.31it/s]
+ 857365
+
+ real 0m3.585s
+ user 0m0.862s
+ sys 0m3.358s
+
+Note that the usual arguments for ``tqdm`` can also be specified.
+
+.. code:: sh
+
+ $ find . -name '*.py' -type f -exec cat \{} \; |
+ tqdm --unit loc --unit_scale --total 857366 >> /dev/null
+ 100%|█████████████████████████████████| 857K/857K [00:04<00:00, 246Kloc/s]
+
+Backing up a large directory?
+
+.. code:: sh
+
+ $ tar -zcf - docs/ | tqdm --bytes --total `du -sb docs/ | cut -f1` \
+ > backup.tgz
+ 44%|██████████████▊ | 153M/352M [00:14<00:18, 11.0MB/s]
+
+This can be beautified further:
+
+.. code:: sh
+
+ $ BYTES="$(du -sb docs/ | cut -f1)"
+ $ tar -cf - docs/ \
+ | tqdm --bytes --total "$BYTES" --desc Processing | gzip \
+ | tqdm --bytes --total "$BYTES" --desc Compressed --position 1 \
+ > ~/backup.tgz
+ Processing: 100%|██████████████████████| 352M/352M [00:14<00:00, 30.2MB/s]
+ Compressed: 42%|█████████▎ | 148M/352M [00:14<00:19, 10.9MB/s]
+
+Or done on a file level using 7-zip:
+
+.. code:: sh
+
+ $ 7z a -bd -r backup.7z docs/ | grep Compressing \
+ | tqdm --total $(find docs/ -type f | wc -l) --unit files \
+ | grep -v Compressing
+ 100%|██████████████████████████▉| 15327/15327 [01:00<00:00, 712.96files/s]
+
+Pre-existing CLI programs already outputting basic progress information will
+benefit from ``tqdm``'s ``--update`` and ``--update_to`` flags:
+
+.. code:: sh
+
+ $ seq 3 0.1 5 | tqdm --total 5 --update_to --null
+ 100%|████████████████████████████████████| 5.0/5 [00:00<00:00, 9673.21it/s]
+ $ seq 10 | tqdm --update --null # 1 + 2 + ... + 10 = 55 iterations
+ 55it [00:00, 90006.52it/s]
+
+FAQ and Known Issues
+--------------------
+
+|GitHub-Issues|
+
+The most common issues relate to excessive output on multiple lines, instead
+of a neat one-line progress bar.
+
+- Consoles in general: require support for carriage return (``CR``, ``\r``).
+- Nested progress bars:
+
+ * Consoles in general: require support for moving cursors up to the
+ previous line. For example,
+ `IDLE <https://github.com/tqdm/tqdm/issues/191#issuecomment-230168030>`__,
+ `ConEmu <https://github.com/tqdm/tqdm/issues/254>`__ and
+ `PyCharm <https://github.com/tqdm/tqdm/issues/203>`__ (also
+ `here <https://github.com/tqdm/tqdm/issues/208>`__,
+ `here <https://github.com/tqdm/tqdm/issues/307>`__, and
+ `here <https://github.com/tqdm/tqdm/issues/454#issuecomment-335416815>`__)
+ lack full support.
+ * Windows: additionally may require the Python module ``colorama``
+ to ensure nested bars stay within their respective lines.
+
+- Unicode:
+
+ * Environments which report that they support unicode will have solid smooth
+ progressbars. The fallback is an ``ascii``-only bar.
+ * Windows consoles often only partially support unicode and thus
+ `often require explicit ascii=True <https://github.com/tqdm/tqdm/issues/454#issuecomment-335416815>`__
+ (also `here <https://github.com/tqdm/tqdm/issues/499>`__). This is due to
+ either normal-width unicode characters being incorrectly displayed as
+ "wide", or some unicode characters not rendering.
+
+- Wrapping generators:
+
+ * Generator wrapper functions tend to hide the length of iterables.
+ ``tqdm`` does not.
+ * Replace ``tqdm(enumerate(...))`` with ``enumerate(tqdm(...))`` or
+ ``tqdm(enumerate(x), total=len(x), ...)``.
+ The same applies to ``numpy.ndenumerate``.
+ * Replace ``tqdm(zip(a, b))`` with ``zip(tqdm(a), b)`` or even
+ ``zip(tqdm(a), tqdm(b))``.
+ * The same applies to ``itertools``.
+ * Some useful convenience functions can be found under ``tqdm.contrib``.
+
+- `Hanging pipes in python2 <https://github.com/tqdm/tqdm/issues/359>`__:
+ when using ``tqdm`` on the CLI, you may need to use Python 3.5+ for correct
+ buffering.
+- `No intermediate output in docker-compose <https://github.com/tqdm/tqdm/issues/771>`__:
+ use ``docker-compose run`` instead of ``docker-compose up`` and ``tty: true``.
+
+If you come across any other difficulties, browse and file |GitHub-Issues|.
+
+Documentation
+-------------
+
+|Py-Versions| |README-Hits| (Since 19 May 2016)
+
+.. code:: python
+
+ class tqdm():
+ """
+ Decorate an iterable object, returning an iterator which acts exactly
+ like the original iterable, but prints a dynamically updating
+ progressbar every time a value is requested.
+ """
+
+ def __init__(self, iterable=None, desc=None, total=None, leave=True,
+ file=None, ncols=None, mininterval=0.1,
+ maxinterval=10.0, miniters=None, ascii=None, disable=False,
+ unit='it', unit_scale=False, dynamic_ncols=False,
+ smoothing=0.3, bar_format=None, initial=0, position=None,
+ postfix=None, unit_divisor=1000):
+
+Parameters
+~~~~~~~~~~
+
+* iterable : iterable, optional
+ Iterable to decorate with a progressbar.
+ Leave blank to manually manage the updates.
+* desc : str, optional
+ Prefix for the progressbar.
+* total : int or float, optional
+ The number of expected iterations. If unspecified,
+ len(iterable) is used if possible. If float("inf") or as a last
+ resort, only basic progress statistics are displayed
+ (no ETA, no progressbar).
+ If ``gui`` is True and this parameter needs subsequent updating,
+ specify an initial arbitrary large positive number,
+ e.g. 9e9.
+* leave : bool, optional
+ If [default: True], keeps all traces of the progressbar
+ upon termination of iteration.
+ If ``None``, will leave only if ``position`` is ``0``.
+* file : ``io.TextIOWrapper`` or ``io.StringIO``, optional
+ Specifies where to output the progress messages
+ (default: sys.stderr). Uses ``file.write(str)`` and ``file.flush()``
+ methods. For encoding, see ``write_bytes``.
+* ncols : int, optional
+ The width of the entire output message. If specified,
+ dynamically resizes the progressbar to stay within this bound.
+ If unspecified, attempts to use environment width. The
+ fallback is a meter width of 10 and no limit for the counter and
+ statistics. If 0, will not print any meter (only stats).
+* mininterval : float, optional
+ Minimum progress display update interval [default: 0.1] seconds.
+* maxinterval : float, optional
+ Maximum progress display update interval [default: 10] seconds.
+ Automatically adjusts ``miniters`` to correspond to ``mininterval``
+ after long display update lag. Only works if ``dynamic_miniters``
+ or monitor thread is enabled.
+* miniters : int or float, optional
+ Minimum progress display update interval, in iterations.
+ If 0 and ``dynamic_miniters``, will automatically adjust to equal
+ ``mininterval`` (more CPU efficient, good for tight loops).
+ If > 0, will skip display of specified number of iterations.
+ Tweak this and ``mininterval`` to get very efficient loops.
+ If your progress is erratic with both fast and slow iterations
+ (network, skipping items, etc) you should set miniters=1.
+* ascii : bool or str, optional
+ If unspecified or False, use unicode (smooth blocks) to fill
+ the meter. The fallback is to use ASCII characters " 123456789#".
+* disable : bool, optional
+ Whether to disable the entire progressbar wrapper
+ [default: False]. If set to None, disable on non-TTY.
+* unit : str, optional
+ String that will be used to define the unit of each iteration
+ [default: it].
+* unit_scale : bool or int or float, optional
+ If 1 or True, the number of iterations will be reduced/scaled
+ automatically and a metric prefix following the
+ International System of Units standard will be added
+ (kilo, mega, etc.) [default: False]. If any other non-zero
+ number, will scale ``total`` and ``n``.
+* dynamic_ncols : bool, optional
+ If set, constantly alters ``ncols`` and ``nrows`` to the
+ environment (allowing for window resizes) [default: False].
+* smoothing : float, optional
+ Exponential moving average smoothing factor for speed estimates
+ (ignored in GUI mode). Ranges from 0 (average speed) to 1
+ (current/instantaneous speed) [default: 0.3].
+* bar_format : str, optional
+ Specify a custom bar string formatting. May impact performance.
+ [default: '{l_bar}{bar}{r_bar}'], where
+ l_bar='{desc}: {percentage:3.0f}%|' and
+ r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
+ '{rate_fmt}{postfix}]'
+ Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
+ percentage, elapsed, elapsed_s, ncols, nrows, desc, unit,
+ rate, rate_fmt, rate_noinv, rate_noinv_fmt,
+ rate_inv, rate_inv_fmt, postfix, unit_divisor,
+ remaining, remaining_s, eta.
+ Note that a trailing ": " is automatically removed after {desc}
+ if the latter is empty.
+* initial : int or float, optional
+ The initial counter value. Useful when restarting a progress
+ bar [default: 0]. If using float, consider specifying ``{n:.3f}``
+ or similar in ``bar_format``, or specifying ``unit_scale``.
+* position : int, optional
+ Specify the line offset to print this bar (starting from 0)
+ Automatic if unspecified.
+ Useful to manage multiple bars at once (eg, from threads).
+* postfix : dict or ``*``, optional
+ Specify additional stats to display at the end of the bar.
+ Calls ``set_postfix(**postfix)`` if possible (dict).
+* unit_divisor : float, optional
+ [default: 1000], ignored unless ``unit_scale`` is True.
+* write_bytes : bool, optional
+ If (default: None) and ``file`` is unspecified,
+ bytes will be written in Python 2. If ``True`` will also write
+ bytes. In all other cases will default to unicode.
+* lock_args : tuple, optional
+ Passed to ``refresh`` for intermediate output
+ (initialisation, iterating, and updating).
+* nrows : int, optional
+ The screen height. If specified, hides nested bars outside this
+ bound. If unspecified, attempts to use environment height.
+ The fallback is 20.
+* colour : str, optional
+ Bar colour (e.g. 'green', '#00ff00').
+* delay : float, optional
+ Don't display until [default: 0] seconds have elapsed.
+
+Extra CLI Options
+~~~~~~~~~~~~~~~~~
+
+* delim : chr, optional
+ Delimiting character [default: '\n']. Use '\0' for null.
+ N.B.: on Windows systems, Python converts '\n' to '\r\n'.
+* buf_size : int, optional
+ String buffer size in bytes [default: 256]
+ used when ``delim`` is specified.
+* bytes : bool, optional
+ If true, will count bytes, ignore ``delim``, and default
+ ``unit_scale`` to True, ``unit_divisor`` to 1024, and ``unit`` to 'B'.
+* tee : bool, optional
+ If true, passes ``stdin`` to both ``stderr`` and ``stdout``.
+* update : bool, optional
+ If true, will treat input as newly elapsed iterations,
+ i.e. numbers to pass to ``update()``. Note that this is slow
+ (~2e5 it/s) since every input must be decoded as a number.
+* update_to : bool, optional
+ If true, will treat input as total elapsed iterations,
+ i.e. numbers to assign to ``self.n``. Note that this is slow
+ (~2e5 it/s) since every input must be decoded as a number.
+* null : bool, optional
+ If true, will discard input (no stdout).
+* manpath : str, optional
+ Directory in which to install tqdm man pages.
+* comppath : str, optional
+ Directory in which to place tqdm completion.
+* log : str, optional
+ CRITICAL|FATAL|ERROR|WARN(ING)|[default: 'INFO']|DEBUG|NOTSET.
+
+Returns
+~~~~~~~
+
+* out : decorated iterator.
+
+.. code:: python
+
+ class tqdm():
+ def update(self, n=1):
+ """
+ Manually update the progress bar, useful for streams
+ such as reading files.
+ E.g.:
+ >>> t = tqdm(total=filesize) # Initialise
+ >>> for current_buffer in stream:
+ ... ...
+ ... t.update(len(current_buffer))
+ >>> t.close()
+ The last line is highly recommended, but possibly not necessary if
+ ``t.update()`` will be called in such a way that ``filesize`` will be
+ exactly reached and printed.
+
+ Parameters
+ ----------
+ n : int or float, optional
+ Increment to add to the internal counter of iterations
+ [default: 1]. If using float, consider specifying ``{n:.3f}``
+ or similar in ``bar_format``, or specifying ``unit_scale``.
+
+ Returns
+ -------
+ out : bool or None
+ True if a ``display()`` was triggered.
+ """
+
+ def close(self):
+ """Cleanup and (if leave=False) close the progressbar."""
+
+ def clear(self, nomove=False):
+ """Clear current bar display."""
+
+ def refresh(self):
+ """
+ Force refresh the display of this bar.
+
+ Parameters
+ ----------
+ nolock : bool, optional
+ If ``True``, does not lock.
+ If [default: ``False``]: calls ``acquire()`` on internal lock.
+ lock_args : tuple, optional
+ Passed to internal lock's ``acquire()``.
+ If specified, will only ``display()`` if ``acquire()`` returns ``True``.
+ """
+
+ def unpause(self):
+ """Restart tqdm timer from last print time."""
+
+ def reset(self, total=None):
+ """
+ Resets to 0 iterations for repeated use.
+
+ Consider combining with ``leave=True``.
+
+ Parameters
+ ----------
+ total : int or float, optional. Total to use for the new bar.
+ """
+
+ def set_description(self, desc=None, refresh=True):
+ """
+ Set/modify description of the progress bar.
+
+ Parameters
+ ----------
+ desc : str, optional
+ refresh : bool, optional
+ Forces refresh [default: True].
+ """
+
+ def set_postfix(self, ordered_dict=None, refresh=True, **tqdm_kwargs):
+ """
+ Set/modify postfix (additional stats)
+ with automatic formatting based on datatype.
+
+ Parameters
+ ----------
+ ordered_dict : dict or OrderedDict, optional
+ refresh : bool, optional
+ Forces refresh [default: True].
+ kwargs : dict, optional
+ """
+
+ @classmethod
+ def write(cls, s, file=sys.stdout, end="\n"):
+ """Print a message via tqdm (without overlap with bars)."""
+
+ @property
+ def format_dict(self):
+ """Public API for read-only member access."""
+
+ def display(self, msg=None, pos=None):
+ """
+ Use ``self.sp`` to display ``msg`` in the specified ``pos``.
+
+ Consider overloading this function when inheriting to use e.g.:
+ ``self.some_frontend(**self.format_dict)`` instead of ``self.sp``.
+
+ Parameters
+ ----------
+ msg : str, optional. What to display (default: ``repr(self)``).
+ pos : int, optional. Position to ``moveto``
+ (default: ``abs(self.pos)``).
+ """
+
+ @classmethod
+ @contextmanager
+ def wrapattr(cls, stream, method, total=None, bytes=True, **tqdm_kwargs):
+ """
+ stream : file-like object.
+ method : str, "read" or "write". The result of ``read()`` and
+ the first argument of ``write()`` should have a ``len()``.
+
+ >>> with tqdm.wrapattr(file_obj, "read", total=file_obj.size) as fobj:
+ ... while True:
+ ... chunk = fobj.read(chunk_size)
+ ... if not chunk:
+ ... break
+ """
+
+ @classmethod
+ def pandas(cls, *targs, **tqdm_kwargs):
+ """Registers the current `tqdm` class with `pandas`."""
+
+ def trange(*args, **tqdm_kwargs):
+ """
+ A shortcut for `tqdm(xrange(*args), **tqdm_kwargs)`.
+ On Python3+, `range` is used instead of `xrange`.
+ """
+
+Convenience Functions
+~~~~~~~~~~~~~~~~~~~~~
+
+.. code:: python
+
+ def tqdm.contrib.tenumerate(iterable, start=0, total=None,
+ tqdm_class=tqdm.auto.tqdm, **tqdm_kwargs):
+ """Equivalent of `numpy.ndenumerate` or builtin `enumerate`."""
+
+ def tqdm.contrib.tzip(iter1, *iter2plus, **tqdm_kwargs):
+ """Equivalent of builtin `zip`."""
+
+ def tqdm.contrib.tmap(function, *sequences, **tqdm_kwargs):
+ """Equivalent of builtin `map`."""
+
+Submodules
+~~~~~~~~~~
+
+.. code:: python
+
+ class tqdm.notebook.tqdm(tqdm.tqdm):
+ """IPython/Jupyter Notebook widget."""
+
+ class tqdm.auto.tqdm(tqdm.tqdm):
+ """Automatically chooses beween `tqdm.notebook` and `tqdm.tqdm`."""
+
+ class tqdm.asyncio.tqdm(tqdm.tqdm):
+ """Asynchronous version."""
+ @classmethod
+ def as_completed(cls, fs, *, loop=None, timeout=None, total=None,
+ **tqdm_kwargs):
+ """Wrapper for `asyncio.as_completed`."""
+
+ class tqdm.gui.tqdm(tqdm.tqdm):
+ """Matplotlib GUI version."""
+
+ class tqdm.tk.tqdm(tqdm.tqdm):
+ """Tkinter GUI version."""
+
+ class tqdm.rich.tqdm(tqdm.tqdm):
+ """`rich.progress` version."""
+
+ class tqdm.keras.TqdmCallback(keras.callbacks.Callback):
+ """Keras callback for epoch and batch progress."""
+
+ class tqdm.dask.TqdmCallback(dask.callbacks.Callback):
+ """Dask callback for task progress."""
+
+
+``contrib``
++++++++++++
+
+The ``tqdm.contrib`` package also contains experimental modules:
+
+- ``tqdm.contrib.itertools``: Thin wrappers around ``itertools``
+- ``tqdm.contrib.concurrent``: Thin wrappers around ``concurrent.futures``
+- ``tqdm.contrib.discord``: Posts to `Discord <https://discord.com>`__ bots
+- ``tqdm.contrib.telegram``: Posts to `Telegram <https://telegram.org>`__ bots
+- ``tqdm.contrib.bells``: Automagically enables all optional features
+
+ * ``auto``, ``pandas``, ``discord``, ``telegram``
+
+Examples and Advanced Usage
+---------------------------
+
+- See the `examples <https://github.com/tqdm/tqdm/tree/master/examples>`__
+ folder;
+- import the module and run ``help()``;
+- consult the `wiki <https://github.com/tqdm/tqdm/wiki>`__;
+
+ * this has an
+ `excellent article <https://github.com/tqdm/tqdm/wiki/How-to-make-a-great-Progress-Bar>`__
+ on how to make a **great** progressbar;
+
+- check out the `slides from PyData London <https://tqdm.github.io/PyData2019/slides.html>`__, or
+- run the |binder-demo|.
+
+Description and additional stats
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Custom information can be displayed and updated dynamically on ``tqdm`` bars
+with the ``desc`` and ``postfix`` arguments:
+
+.. code:: python
+
+ from tqdm import tqdm, trange
+ from random import random, randint
+ from time import sleep
+
+ with trange(10) as t:
+ for i in t:
+ # Description will be displayed on the left
+ t.set_description('GEN %i' % i)
+ # Postfix will be displayed on the right,
+ # formatted automatically based on argument's datatype
+ t.set_postfix(loss=random(), gen=randint(1,999), str='h',
+ lst=[1, 2])
+ sleep(0.1)
+
+ with tqdm(total=10, bar_format="{postfix[0]} {postfix[1][value]:>8.2g}",
+ postfix=["Batch", dict(value=0)]) as t:
+ for i in range(10):
+ sleep(0.1)
+ t.postfix[1]["value"] = i / 2
+ t.update()
+
+Points to remember when using ``{postfix[...]}`` in the ``bar_format`` string:
+
+- ``postfix`` also needs to be passed as an initial argument in a compatible
+ format, and
+- ``postfix`` will be auto-converted to a string if it is a ``dict``-like
+ object. To prevent this behaviour, insert an extra item into the dictionary
+ where the key is not a string.
+
+Additional ``bar_format`` parameters may also be defined by overriding
+``format_dict``, and the bar itself may be modified using ``ascii``:
+
+.. code:: python
+
+ from tqdm import tqdm
+ class TqdmExtraFormat(tqdm):
+ """Provides a `total_time` format parameter"""
+ @property
+ def format_dict(self):
+ d = super(TqdmExtraFormat, self).format_dict
+ total_time = d["elapsed"] * (d["total"] or 0) / max(d["n"], 1)
+ d.update(total_time=self.format_interval(total_time) + " in total")
+ return d
+
+ for i in TqdmExtraFormat(
+ range(9), ascii=" .oO0",
+ bar_format="{total_time}: {percentage:.0f}%|{bar}{r_bar}"):
+ if i == 4:
+ break
+
+.. code::
+
+ 00:00 in total: 44%|0000. | 4/9 [00:00<00:00, 962.93it/s]
+
+Note that ``{bar}`` also supports a format specifier ``[width][type]``.
+
+- ``width``
+
+ * unspecified (default): automatic to fill ``ncols``
+ * ``int >= 0``: fixed width overriding ``ncols`` logic
+ * ``int < 0``: subtract from the automatic default
+
+- ``type``
+
+ * ``a``: ascii (``ascii=True`` override)
+ * ``u``: unicode (``ascii=False`` override)
+ * ``b``: blank (``ascii=" "`` override)
+
+This means a fixed bar with right-justified text may be created by using:
+``bar_format="{l_bar}{bar:10}|{bar:-10b}right-justified"``
+
+Nested progress bars
+~~~~~~~~~~~~~~~~~~~~
+
+``tqdm`` supports nested progress bars. Here's an example:
+
+.. code:: python
+
+ from tqdm.auto import trange
+ from time import sleep
+
+ for i in trange(4, desc='1st loop'):
+ for j in trange(5, desc='2nd loop'):
+ for k in trange(50, desc='3rd loop', leave=False):
+ sleep(0.01)
+
+For manual control over positioning (e.g. for multi-processing use),
+you may specify ``position=n`` where ``n=0`` for the outermost bar,
+``n=1`` for the next, and so on.
+However, it's best to check if ``tqdm`` can work without manual ``position``
+first.
+
+.. code:: python
+
+ from time import sleep
+ from tqdm import trange, tqdm
+ from multiprocessing import Pool, RLock, freeze_support
+
+ L = list(range(9))
+
+ def progresser(n):
+ interval = 0.001 / (n + 2)
+ total = 5000
+ text = "#{}, est. {:<04.2}s".format(n, interval * total)
+ for _ in trange(total, desc=text, position=n):
+ sleep(interval)
+
+ if __name__ == '__main__':
+ freeze_support() # for Windows support
+ tqdm.set_lock(RLock()) # for managing output contention
+ p = Pool(initializer=tqdm.set_lock, initargs=(tqdm.get_lock(),))
+ p.map(progresser, L)
+
+Note that in Python 3, ``tqdm.write`` is thread-safe:
+
+.. code:: python
+
+ from time import sleep
+ from tqdm import tqdm, trange
+ from concurrent.futures import ThreadPoolExecutor
+
+ L = list(range(9))
+
+ def progresser(n):
+ interval = 0.001 / (n + 2)
+ total = 5000
+ text = "#{}, est. {:<04.2}s".format(n, interval * total)
+ for _ in trange(total, desc=text):
+ sleep(interval)
+ if n == 6:
+ tqdm.write("n == 6 completed.")
+ tqdm.write("`tqdm.write()` is thread-safe in py3!")
+
+ if __name__ == '__main__':
+ with ThreadPoolExecutor() as p:
+ p.map(progresser, L)
+
+Hooks and callbacks
+~~~~~~~~~~~~~~~~~~~
+
+``tqdm`` can easily support callbacks/hooks and manual updates.
+Here's an example with ``urllib``:
+
+**``urllib.urlretrieve`` documentation**
+
+ | [...]
+ | If present, the hook function will be called once
+ | on establishment of the network connection and once after each block read
+ | thereafter. The hook will be passed three arguments; a count of blocks
+ | transferred so far, a block size in bytes, and the total size of the file.
+ | [...]
+
+.. code:: python
+
+ import urllib, os
+ from tqdm import tqdm
+ urllib = getattr(urllib, 'request', urllib)
+
+ class TqdmUpTo(tqdm):
+ """Provides `update_to(n)` which uses `tqdm.update(delta_n)`."""
+ def update_to(self, b=1, bsize=1, tsize=None):
+ """
+ b : int, optional
+ Number of blocks transferred so far [default: 1].
+ bsize : int, optional
+ Size of each block (in tqdm units) [default: 1].
+ tsize : int, optional
+ Total size (in tqdm units). If [default: None] remains unchanged.
+ """
+ if tsize is not None:
+ self.total = tsize
+ return self.update(b * bsize - self.n) # also sets self.n = b * bsize
+
+ eg_link = "https://caspersci.uk.to/matryoshka.zip"
+ with TqdmUpTo(unit='B', unit_scale=True, unit_divisor=1024, miniters=1,
+ desc=eg_link.split('/')[-1]) as t: # all optional kwargs
+ urllib.urlretrieve(eg_link, filename=os.devnull,
+ reporthook=t.update_to, data=None)
+ t.total = t.n
+
+Inspired by `twine#242 <https://github.com/pypa/twine/pull/242>`__.
+Functional alternative in
+`examples/tqdm_wget.py <https://github.com/tqdm/tqdm/blob/master/examples/tqdm_wget.py>`__.
+
+It is recommend to use ``miniters=1`` whenever there is potentially
+large differences in iteration speed (e.g. downloading a file over
+a patchy connection).
+
+**Wrapping read/write methods**
+
+To measure throughput through a file-like object's ``read`` or ``write``
+methods, use ``CallbackIOWrapper``:
+
+.. code:: python
+
+ from tqdm.auto import tqdm
+ from tqdm.utils import CallbackIOWrapper
+
+ with tqdm(total=file_obj.size,
+ unit='B', unit_scale=True, unit_divisor=1024) as t:
+ fobj = CallbackIOWrapper(t.update, file_obj, "read")
+ while True:
+ chunk = fobj.read(chunk_size)
+ if not chunk:
+ break
+ t.reset()
+ # ... continue to use `t` for something else
+
+Alternatively, use the even simpler ``wrapattr`` convenience function,
+which would condense both the ``urllib`` and ``CallbackIOWrapper`` examples
+down to:
+
+.. code:: python
+
+ import urllib, os
+ from tqdm import tqdm
+
+ eg_link = "https://caspersci.uk.to/matryoshka.zip"
+ response = getattr(urllib, 'request', urllib).urlopen(eg_link)
+ with tqdm.wrapattr(open(os.devnull, "wb"), "write",
+ miniters=1, desc=eg_link.split('/')[-1],
+ total=getattr(response, 'length', None)) as fout:
+ for chunk in response:
+ fout.write(chunk)
+
+The ``requests`` equivalent is nearly identical:
+
+.. code:: python
+
+ import requests, os
+ from tqdm import tqdm
+
+ eg_link = "https://caspersci.uk.to/matryoshka.zip"
+ response = requests.get(eg_link, stream=True)
+ with tqdm.wrapattr(open(os.devnull, "wb"), "write",
+ miniters=1, desc=eg_link.split('/')[-1],
+ total=int(response.headers.get('content-length', 0))) as fout:
+ for chunk in response.iter_content(chunk_size=4096):
+ fout.write(chunk)
+
+**Custom callback**
+
+``tqdm`` is known for intelligently skipping unnecessary displays. To make a
+custom callback take advantage of this, simply use the return value of
+``update()``. This is set to ``True`` if a ``display()`` was triggered.
+
+.. code:: python
+
+ from tqdm.auto import tqdm as std_tqdm
+
+ def external_callback(*args, **kwargs):
+ ...
+
+ class TqdmExt(std_tqdm):
+ def update(self, n=1):
+ displayed = super(TqdmExt, self).update(n):
+ if displayed:
+ external_callback(**self.format_dict)
+ return displayed
+
+``asyncio``
+~~~~~~~~~~~
+
+Note that ``break`` isn't currently caught by asynchronous iterators.
+This means that ``tqdm`` cannot clean up after itself in this case:
+
+.. code:: python
+
+ from tqdm.asyncio import tqdm
+
+ async for i in tqdm(range(9)):
+ if i == 2:
+ break
+
+Instead, either call ``pbar.close()`` manually or use the context manager syntax:
+
+.. code:: python
+
+ from tqdm.asyncio import tqdm
+
+ with tqdm(range(9)) as pbar:
+ async for i in pbar:
+ if i == 2:
+ break
+
+Pandas Integration
+~~~~~~~~~~~~~~~~~~
+
+Due to popular demand we've added support for ``pandas`` -- here's an example
+for ``DataFrame.progress_apply`` and ``DataFrameGroupBy.progress_apply``:
+
+.. code:: python
+
+ import pandas as pd
+ import numpy as np
+ from tqdm import tqdm
+
+ df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
+
+ # Register `pandas.progress_apply` and `pandas.Series.map_apply` with `tqdm`
+ # (can use `tqdm.gui.tqdm`, `tqdm.notebook.tqdm`, optional kwargs, etc.)
+ tqdm.pandas(desc="my bar!")
+
+ # Now you can use `progress_apply` instead of `apply`
+ # and `progress_map` instead of `map`
+ df.progress_apply(lambda x: x**2)
+ # can also groupby:
+ # df.groupby(0).progress_apply(lambda x: x**2)
+
+In case you're interested in how this works (and how to modify it for your
+own callbacks), see the
+`examples <https://github.com/tqdm/tqdm/tree/master/examples>`__
+folder or import the module and run ``help()``.
+
+Keras Integration
+~~~~~~~~~~~~~~~~~
+
+A ``keras`` callback is also available:
+
+.. code:: python
+
+ from tqdm.keras import TqdmCallback
+
+ ...
+
+ model.fit(..., verbose=0, callbacks=[TqdmCallback()])
+
+Dask Integration
+~~~~~~~~~~~~~~~~
+
+A ``dask`` callback is also available:
+
+.. code:: python
+
+ from tqdm.dask import TqdmCallback
+
+ with TqdmCallback(desc="compute"):
+ ...
+ arr.compute()
+
+ # or use callback globally
+ cb = TqdmCallback(desc="global")
+ cb.register()
+ arr.compute()
+
+IPython/Jupyter Integration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+IPython/Jupyter is supported via the ``tqdm.notebook`` submodule:
+
+.. code:: python
+
+ from tqdm.notebook import trange, tqdm
+ from time import sleep
+
+ for i in trange(3, desc='1st loop'):
+ for j in tqdm(range(100), desc='2nd loop'):
+ sleep(0.01)
+
+In addition to ``tqdm`` features, the submodule provides a native Jupyter
+widget (compatible with IPython v1-v4 and Jupyter), fully working nested bars
+and colour hints (blue: normal, green: completed, red: error/interrupt,
+light blue: no ETA); as demonstrated below.
+
+|Screenshot-Jupyter1|
+|Screenshot-Jupyter2|
+|Screenshot-Jupyter3|
+
+The ``notebook`` version supports percentage or pixels for overall width
+(e.g.: ``ncols='100%'`` or ``ncols='480px'``).
+
+It is also possible to let ``tqdm`` automatically choose between
+console or notebook versions by using the ``autonotebook`` submodule:
+
+.. code:: python
+
+ from tqdm.autonotebook import tqdm
+ tqdm.pandas()
+
+Note that this will issue a ``TqdmExperimentalWarning`` if run in a notebook
+since it is not meant to be possible to distinguish between ``jupyter notebook``
+and ``jupyter console``. Use ``auto`` instead of ``autonotebook`` to suppress
+this warning.
+
+Note that notebooks will display the bar in the cell where it was created.
+This may be a different cell from the one where it is used.
+If this is not desired, either
+
+- delay the creation of the bar to the cell where it must be displayed, or
+- create the bar with ``display=False``, and in a later cell call
+ ``display(bar.container)``:
+
+.. code:: python
+
+ from tqdm.notebook import tqdm
+ pbar = tqdm(..., display=False)
+
+.. code:: python
+
+ # different cell
+ display(pbar.container)
+
+The ``keras`` callback has a ``display()`` method which can be used likewise:
+
+.. code:: python
+
+ from tqdm.keras import TqdmCallback
+ cbk = TqdmCallback(display=False)
+
+.. code:: python
+
+ # different cell
+ cbk.display()
+ model.fit(..., verbose=0, callbacks=[cbk])
+
+Another possibility is to have a single bar (near the top of the notebook)
+which is constantly re-used (using ``reset()`` rather than ``close()``).
+For this reason, the notebook version (unlike the CLI version) does not
+automatically call ``close()`` upon ``Exception``.
+
+.. code:: python
+
+ from tqdm.notebook import tqdm
+ pbar = tqdm()
+
+.. code:: python
+
+ # different cell
+ iterable = range(100)
+ pbar.reset(total=len(iterable)) # initialise with new `total`
+ for i in iterable:
+ pbar.update()
+ pbar.refresh() # force print final status but don't `close()`
+
+Custom Integration
+~~~~~~~~~~~~~~~~~~
+
+To change the default arguments (such as making ``dynamic_ncols=True``),
+simply use built-in Python magic:
+
+.. code:: python
+
+ from functools import partial
+ from tqdm import tqdm as std_tqdm
+ tqdm = partial(std_tqdm, dynamic_ncols=True)
+
+For further customisation,
+``tqdm`` may be inherited from to create custom callbacks (as with the
+``TqdmUpTo`` example `above <#hooks-and-callbacks>`__) or for custom frontends
+(e.g. GUIs such as notebook or plotting packages). In the latter case:
+
+1. ``def __init__()`` to call ``super().__init__(..., gui=True)`` to disable
+ terminal ``status_printer`` creation.
+2. Redefine: ``close()``, ``clear()``, ``display()``.
+
+Consider overloading ``display()`` to use e.g.
+``self.frontend(**self.format_dict)`` instead of ``self.sp(repr(self))``.
+
+Some submodule examples of inheritance:
+
+- `tqdm/notebook.py <https://github.com/tqdm/tqdm/blob/master/tqdm/notebook.py>`__
+- `tqdm/gui.py <https://github.com/tqdm/tqdm/blob/master/tqdm/gui.py>`__
+- `tqdm/tk.py <https://github.com/tqdm/tqdm/blob/master/tqdm/tk.py>`__
+- `tqdm/contrib/telegram.py <https://github.com/tqdm/tqdm/blob/master/tqdm/contrib/telegram.py>`__
+- `tqdm/contrib/discord.py <https://github.com/tqdm/tqdm/blob/master/tqdm/contrib/discord.py>`__
+
+Dynamic Monitor/Meter
+~~~~~~~~~~~~~~~~~~~~~
+
+You can use a ``tqdm`` as a meter which is not monotonically increasing.
+This could be because ``n`` decreases (e.g. a CPU usage monitor) or ``total``
+changes.
+
+One example would be recursively searching for files. The ``total`` is the
+number of objects found so far, while ``n`` is the number of those objects which
+are files (rather than folders):
+
+.. code:: python
+
+ from tqdm import tqdm
+ import os.path
+
+ def find_files_recursively(path, show_progress=True):
+ files = []
+ # total=1 assumes `path` is a file
+ t = tqdm(total=1, unit="file", disable=not show_progress)
+ if not os.path.exists(path):
+ raise IOError("Cannot find:" + path)
+
+ def append_found_file(f):
+ files.append(f)
+ t.update()
+
+ def list_found_dir(path):
+ """returns os.listdir(path) assuming os.path.isdir(path)"""
+ listing = os.listdir(path)
+ # subtract 1 since a "file" we found was actually this directory
+ t.total += len(listing) - 1
+ # fancy way to give info without forcing a refresh
+ t.set_postfix(dir=path[-10:], refresh=False)
+ t.update(0) # may trigger a refresh
+ return listing
+
+ def recursively_search(path):
+ if os.path.isdir(path):
+ for f in list_found_dir(path):
+ recursively_search(os.path.join(path, f))
+ else:
+ append_found_file(path)
+
+ recursively_search(path)
+ t.set_postfix(dir=path)
+ t.close()
+ return files
+
+Using ``update(0)`` is a handy way to let ``tqdm`` decide when to trigger a
+display refresh to avoid console spamming.
+
+Writing messages
+~~~~~~~~~~~~~~~~
+
+This is a work in progress (see
+`#737 <https://github.com/tqdm/tqdm/issues/737>`__).
+
+Since ``tqdm`` uses a simple printing mechanism to display progress bars,
+you should not write any message in the terminal using ``print()`` while
+a progressbar is open.
+
+To write messages in the terminal without any collision with ``tqdm`` bar
+display, a ``.write()`` method is provided:
+
+.. code:: python
+
+ from tqdm.auto import tqdm, trange
+ from time import sleep
+
+ bar = trange(10)
+ for i in bar:
+ # Print using tqdm class method .write()
+ sleep(0.1)
+ if not (i % 3):
+ tqdm.write("Done task %i" % i)
+ # Can also use bar.write()
+
+By default, this will print to standard output ``sys.stdout``. but you can
+specify any file-like object using the ``file`` argument. For example, this
+can be used to redirect the messages writing to a log file or class.
+
+Redirecting writing
+~~~~~~~~~~~~~~~~~~~
+
+If using a library that can print messages to the console, editing the library
+by replacing ``print()`` with ``tqdm.write()`` may not be desirable.
+In that case, redirecting ``sys.stdout`` to ``tqdm.write()`` is an option.
+
+To redirect ``sys.stdout``, create a file-like class that will write
+any input string to ``tqdm.write()``, and supply the arguments
+``file=sys.stdout, dynamic_ncols=True``.
+
+A reusable canonical example is given below:
+
+.. code:: python
+
+ from time import sleep
+ import contextlib
+ import sys
+ from tqdm import tqdm
+ from tqdm.contrib import DummyTqdmFile
+
+
+ @contextlib.contextmanager
+ def std_out_err_redirect_tqdm():
+ orig_out_err = sys.stdout, sys.stderr
+ try:
+ sys.stdout, sys.stderr = map(DummyTqdmFile, orig_out_err)
+ yield orig_out_err[0]
+ # Relay exceptions
+ except Exception as exc:
+ raise exc
+ # Always restore sys.stdout/err if necessary
+ finally:
+ sys.stdout, sys.stderr = orig_out_err
+
+ def some_fun(i):
+ print("Fee, fi, fo,".split()[i])
+
+ # Redirect stdout to tqdm.write() (don't forget the `as save_stdout`)
+ with std_out_err_redirect_tqdm() as orig_stdout:
+ # tqdm needs the original stdout
+ # and dynamic_ncols=True to autodetect console width
+ for i in tqdm(range(3), file=orig_stdout, dynamic_ncols=True):
+ sleep(.5)
+ some_fun(i)
+
+ # After the `with`, printing is restored
+ print("Done!")
+
+Redirecting ``logging``
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Similar to ``sys.stdout``/``sys.stderr`` as detailed above, console ``logging``
+may also be redirected to ``tqdm.write()``.
+
+Warning: if also redirecting ``sys.stdout``/``sys.stderr``, make sure to
+redirect ``logging`` first if needed.
+
+Helper methods are available in ``tqdm.contrib.logging``. For example:
+
+.. code:: python
+
+ import logging
+ from tqdm import trange
+ from tqdm.contrib.logging import logging_redirect_tqdm
+
+ LOG = logging.getLogger(__name__)
+
+ if __name__ == '__main__':
+ logging.basicConfig(level=logging.INFO)
+ with logging_redirect_tqdm():
+ for i in trange(9):
+ if i == 4:
+ LOG.info("console logging redirected to `tqdm.write()`")
+ # logging restored
+
+Monitoring thread, intervals and miniters
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+``tqdm`` implements a few tricks to increase efficiency and reduce overhead.
+
+- Avoid unnecessary frequent bar refreshing: ``mininterval`` defines how long
+ to wait between each refresh. ``tqdm`` always gets updated in the background,
+ but it will display only every ``mininterval``.
+- Reduce number of calls to check system clock/time.
+- ``mininterval`` is more intuitive to configure than ``miniters``.
+ A clever adjustment system ``dynamic_miniters`` will automatically adjust
+ ``miniters`` to the amount of iterations that fit into time ``mininterval``.
+ Essentially, ``tqdm`` will check if it's time to print without actually
+ checking time. This behaviour can be still be bypassed by manually setting
+ ``miniters``.
+
+However, consider a case with a combination of fast and slow iterations.
+After a few fast iterations, ``dynamic_miniters`` will set ``miniters`` to a
+large number. When iteration rate subsequently slows, ``miniters`` will
+remain large and thus reduce display update frequency. To address this:
+
+- ``maxinterval`` defines the maximum time between display refreshes.
+ A concurrent monitoring thread checks for overdue updates and forces one
+ where necessary.
+
+The monitoring thread should not have a noticeable overhead, and guarantees
+updates at least every 10 seconds by default.
+This value can be directly changed by setting the ``monitor_interval`` of
+any ``tqdm`` instance (i.e. ``t = tqdm.tqdm(...); t.monitor_interval = 2``).
+The monitor thread may be disabled application-wide by setting
+``tqdm.tqdm.monitor_interval = 0`` before instantiation of any ``tqdm`` bar.
+
+
+Merch
+-----
+
+You can buy `tqdm branded merch <https://tqdm.github.io/merch>`__ now!
+
+Contributions
+-------------
+
+|GitHub-Commits| |GitHub-Issues| |GitHub-PRs| |OpenHub-Status| |GitHub-Contributions| |CII Best Practices|
+
+All source code is hosted on `GitHub <https://github.com/tqdm/tqdm>`__.
+Contributions are welcome.
+
+See the
+`CONTRIBUTING <https://github.com/tqdm/tqdm/blob/master/CONTRIBUTING.md>`__
+file for more information.
+
+Developers who have made significant contributions, ranked by *SLoC*
+(surviving lines of code,
+`git fame <https://github.com/casperdcl/git-fame>`__ ``-wMC --excl '\.(png|gif|jpg)$'``),
+are:
+
+==================== ======================================================== ==== ================================
+Name ID SLoC Notes
+==================== ======================================================== ==== ================================
+Casper da Costa-Luis `casperdcl <https://github.com/casperdcl>`__ ~78% primary maintainer |Gift-Casper|
+Stephen Larroque `lrq3000 <https://github.com/lrq3000>`__ ~10% team member
+Martin Zugnoni `martinzugnoni <https://github.com/martinzugnoni>`__ ~4%
+Daniel Ecer `de-code <https://github.com/de-code>`__ ~2%
+Richard Sheridan `richardsheridan <https://github.com/richardsheridan>`__ ~1%
+Guangshuo Chen `chengs <https://github.com/chengs>`__ ~1%
+Kyle Altendorf `altendky <https://github.com/altendky>`__ <1%
+Matthew Stevens `mjstevens777 <https://github.com/mjstevens777>`__ <1%
+Hadrien Mary `hadim <https://github.com/hadim>`__ <1% team member
+Noam Yorav-Raphael `noamraph <https://github.com/noamraph>`__ <1% original author
+Mikhail Korobov `kmike <https://github.com/kmike>`__ <1% team member
+==================== ======================================================== ==== ================================
+
+Ports to Other Languages
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+A list is available on
+`this wiki page <https://github.com/tqdm/tqdm/wiki/tqdm-ports>`__.
+
+
+LICENCE
+-------
+
+Open Source (OSI approved): |LICENCE|
+
+Citation information: |DOI|
+
+|README-Hits| (Since 19 May 2016)
+
+.. |Logo| image:: https://img.tqdm.ml/logo.gif
+.. |Screenshot| image:: https://img.tqdm.ml/tqdm.gif
+.. |Video| image:: https://img.tqdm.ml/video.jpg
+ :target: https://tqdm.github.io/video
+.. |Slides| image:: https://img.tqdm.ml/slides.jpg
+ :target: https://tqdm.github.io/PyData2019/slides.html
+.. |Merch| image:: https://img.tqdm.ml/merch.jpg
+ :target: https://tqdm.github.io/merch
+.. |Build-Status| image:: https://img.shields.io/github/workflow/status/tqdm/tqdm/Test/master?logo=GitHub
+ :target: https://github.com/tqdm/tqdm/actions?query=workflow%3ATest
+.. |Coverage-Status| image:: https://img.shields.io/coveralls/github/tqdm/tqdm/master?logo=coveralls
+ :target: https://coveralls.io/github/tqdm/tqdm
+.. |Branch-Coverage-Status| image:: https://codecov.io/gh/tqdm/tqdm/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/tqdm/tqdm
+.. |Codacy-Grade| image:: https://app.codacy.com/project/badge/Grade/3f965571598f44549c7818f29cdcf177
+ :target: https://www.codacy.com/gh/tqdm/tqdm/dashboard
+.. |CII Best Practices| image:: https://bestpractices.coreinfrastructure.org/projects/3264/badge
+ :target: https://bestpractices.coreinfrastructure.org/projects/3264
+.. |GitHub-Status| image:: https://img.shields.io/github/tag/tqdm/tqdm.svg?maxAge=86400&logo=github&logoColor=white
+ :target: https://github.com/tqdm/tqdm/releases
+.. |GitHub-Forks| image:: https://img.shields.io/github/forks/tqdm/tqdm.svg?logo=github&logoColor=white
+ :target: https://github.com/tqdm/tqdm/network
+.. |GitHub-Stars| image:: https://img.shields.io/github/stars/tqdm/tqdm.svg?logo=github&logoColor=white
+ :target: https://github.com/tqdm/tqdm/stargazers
+.. |GitHub-Commits| image:: https://img.shields.io/github/commit-activity/y/tqdm/tqdm.svg?logo=git&logoColor=white
+ :target: https://github.com/tqdm/tqdm/graphs/commit-activity
+.. |GitHub-Issues| image:: https://img.shields.io/github/issues-closed/tqdm/tqdm.svg?logo=github&logoColor=white
+ :target: https://github.com/tqdm/tqdm/issues?q=
+.. |GitHub-PRs| image:: https://img.shields.io/github/issues-pr-closed/tqdm/tqdm.svg?logo=github&logoColor=white
+ :target: https://github.com/tqdm/tqdm/pulls
+.. |GitHub-Contributions| image:: https://img.shields.io/github/contributors/tqdm/tqdm.svg?logo=github&logoColor=white
+ :target: https://github.com/tqdm/tqdm/graphs/contributors
+.. |GitHub-Updated| image:: https://img.shields.io/github/last-commit/tqdm/tqdm/master.svg?logo=github&logoColor=white&label=pushed
+ :target: https://github.com/tqdm/tqdm/pulse
+.. |Gift-Casper| image:: https://img.shields.io/badge/dynamic/json.svg?color=ff69b4&label=gifts%20received&prefix=%C2%A3&query=%24..sum&url=https%3A%2F%2Fcaspersci.uk.to%2Fgifts.json
+ :target: https://cdcl.ml/sponsor
+.. |Versions| image:: https://img.shields.io/pypi/v/tqdm.svg
+ :target: https://tqdm.github.io/releases
+.. |PyPI-Downloads| image:: https://img.shields.io/pypi/dm/tqdm.svg?label=pypi%20downloads&logo=PyPI&logoColor=white
+ :target: https://pepy.tech/project/tqdm
+.. |Py-Versions| image:: https://img.shields.io/pypi/pyversions/tqdm.svg?logo=python&logoColor=white
+ :target: https://pypi.org/project/tqdm
+.. |Conda-Forge-Status| image:: https://img.shields.io/conda/v/conda-forge/tqdm.svg?label=conda-forge&logo=conda-forge
+ :target: https://anaconda.org/conda-forge/tqdm
+.. |Snapcraft| image:: https://img.shields.io/badge/snap-install-82BEA0.svg?logo=snapcraft
+ :target: https://snapcraft.io/tqdm
+.. |Docker| image:: https://img.shields.io/badge/docker-pull-blue.svg?logo=docker&logoColor=white
+ :target: https://hub.docker.com/r/tqdm/tqdm
+.. |Libraries-Rank| image:: https://img.shields.io/librariesio/sourcerank/pypi/tqdm.svg?logo=koding&logoColor=white
+ :target: https://libraries.io/pypi/tqdm
+.. |Libraries-Dependents| image:: https://img.shields.io/librariesio/dependent-repos/pypi/tqdm.svg?logo=koding&logoColor=white
+ :target: https://github.com/tqdm/tqdm/network/dependents
+.. |OpenHub-Status| image:: https://www.openhub.net/p/tqdm/widgets/project_thin_badge?format=gif
+ :target: https://www.openhub.net/p/tqdm?ref=Thin+badge
+.. |awesome-python| image:: https://awesome.re/mentioned-badge.svg
+ :target: https://github.com/vinta/awesome-python
+.. |LICENCE| image:: https://img.shields.io/pypi/l/tqdm.svg
+ :target: https://raw.githubusercontent.com/tqdm/tqdm/master/LICENCE
+.. |DOI| image:: https://img.shields.io/badge/DOI-10.5281/zenodo.595120-blue.svg
+ :target: https://doi.org/10.5281/zenodo.595120
+.. |binder-demo| image:: https://mybinder.org/badge_logo.svg
+ :target: https://mybinder.org/v2/gh/tqdm/tqdm/master?filepath=DEMO.ipynb
+.. |Screenshot-Jupyter1| image:: https://img.tqdm.ml/jupyter-1.gif
+.. |Screenshot-Jupyter2| image:: https://img.tqdm.ml/jupyter-2.gif
+.. |Screenshot-Jupyter3| image:: https://img.tqdm.ml/jupyter-3.gif
+.. |README-Hits| image:: https://caspersci.uk.to/cgi-bin/hits.cgi?q=tqdm&style=social&r=https://github.com/tqdm/tqdm&l=https://img.tqdm.ml/favicon.png&f=https://img.tqdm.ml/logo.gif
+ :target: https://caspersci.uk.to/cgi-bin/hits.cgi?q=tqdm&a=plot&r=https://github.com/tqdm/tqdm&l=https://img.tqdm.ml/favicon.png&f=https://img.tqdm.ml/logo.gif&style=social
+
+
diff --git a/third_party/python/tqdm/tqdm-4.62.3.dist-info/RECORD b/third_party/python/tqdm/tqdm-4.62.3.dist-info/RECORD
new file mode 100644
index 0000000000..8b98eca2b6
--- /dev/null
+++ b/third_party/python/tqdm/tqdm-4.62.3.dist-info/RECORD
@@ -0,0 +1,39 @@
+tqdm/__init__.py,sha256=LiezHIqATK3FOij_365eMcu11UPmijmDFKogl72fHp4,1639
+tqdm/__main__.py,sha256=bYt9eEaoRQWdejEHFD8REx9jxVEdZptECFsV7F49Ink,30
+tqdm/_dist_ver.py,sha256=mXM_6fFcFakqg8WfBGHFnOAJldFvVy1ECtxe-NkcRxA,23
+tqdm/_main.py,sha256=9ySvgmi_2Sw4CAo5UDW0Q2dxfTryboEWGHohfCJz0sA,283
+tqdm/_monitor.py,sha256=Uku-DPWgzJ7dO5CK08xKJK-E_F6qQ-JB3ksuXczSYR0,3699
+tqdm/_tqdm.py,sha256=LfLCuJ6bpsVo9xilmtBXyEm1vGnUCFrliW85j3J-nD4,283
+tqdm/_tqdm_gui.py,sha256=03Hc8KayxJveieI5-0-2NGiDpLvw9jZekofJUV7CCwk,287
+tqdm/_tqdm_notebook.py,sha256=BuHiLuxu6uEfZFaPJW3RPpPaxaVctEQA3kdSJSDL1hw,307
+tqdm/_tqdm_pandas.py,sha256=c9jptUgigN6axRDhRd4Rif98Tmxeopc1nFNFhIpbFUE,888
+tqdm/_utils.py,sha256=YIwj0ZJQonXgYa2HaA3U_paP4xOXJqj0ZWMPeZSf6Pw,596
+tqdm/asyncio.py,sha256=7CWT2150uMvyXSMDkl9PvG9G_HrfOVY32rWbeP2bw1Y,2789
+tqdm/auto.py,sha256=P__dIfklVGqcRdzV4q68SOBVhLHe9QWnrCk3IJIA-fM,1106
+tqdm/autonotebook.py,sha256=5LdOJz8_HnA55hJRUdq_69Zv1qjKI4AlJEC7RiQmzoQ,857
+tqdm/cli.py,sha256=x_c8nmc4Huc-lKEsAXj78ZiyqSJ9hJ71j7vltY67icw,10509
+tqdm/completion.sh,sha256=j79KbSmpIj_E11jfTfBXrGnUTzKXVpQ1vGVQvsyDRl4,946
+tqdm/dask.py,sha256=BqPQ2O_Bd59hnXlC7B5rS7y9C2wI4cPkIHDdeCWGtzc,1377
+tqdm/gui.py,sha256=kQP-ezwAUSvJ44f50Up2fEG4Hq-p4snrEyKwSNwcgkI,5943
+tqdm/keras.py,sha256=auQQJvAZMHgr0Y3kY6pKQVD-6EAJMPg91ZOJ2WSTN-A,4409
+tqdm/notebook.py,sha256=Y6d5z8p2T3hV3w_O-iWpbNdRPik1jYPI2KD_HY0V8Es,11231
+tqdm/rich.py,sha256=Hs8iu1pXZHUGj2DJVMiEUO897zmiodf3KwZQlO0Z_G4,4964
+tqdm/std.py,sha256=yvqDOoKVOkX47d8Wpk26Xv502ytw2EtNC_xCmWNA1ZM,57760
+tqdm/tk.py,sha256=a3lbj1GsP7jyDpQQgm5ohsFm9Y9-adeklYIhPH69P88,6948
+tqdm/tqdm.1,sha256=1YMLZFiY0wGAUYgjmrfr9vQTlyMql6LT31oUWvOyQdU,7997
+tqdm/utils.py,sha256=KvE0DM28X__NHYKgGl5jUrk6CM5BV60G4Nf55ITPeJI,9803
+tqdm/version.py,sha256=-1yWjfu3P0eghVsysHH07fbzdiADNRdzRtYPqOaqR2A,333
+tqdm/contrib/__init__.py,sha256=gpiBeuWB1OaaoGFwiS-G_Nodv8fLPZ_xVxbENL0EYL4,2604
+tqdm/contrib/bells.py,sha256=_CURLQOkZHjn429e6HGB5U3ESTPBjxr15flG1346eF8,735
+tqdm/contrib/concurrent.py,sha256=YmHJG_jUYUsg2NR1eAhsrl6X-_BfzlANtW32IhNmRTA,4644
+tqdm/contrib/discord.py,sha256=H5Wq6xbvH0H8c_53jpSWZtyElK_RprVSPqkXZjDRVW0,3989
+tqdm/contrib/itertools.py,sha256=BLsGWBjSjI5grz3UBBf-qcmlmJqN29AGMNNH-Bjk8HA,798
+tqdm/contrib/logging.py,sha256=F4pEE2mRecNKoZNm7jIWr2nMeelvogvZ8aopo_irK44,3844
+tqdm/contrib/telegram.py,sha256=5S6IIZMjDg7rcdxJaGGcvTRC37DnHf0r36ahje_JyyQ,5228
+tqdm/contrib/utils_worker.py,sha256=3Mj9TvDa3qRGoZvrmU5cQTnmQLPd8oP7AURuJjVVFXo,1247
+tqdm-4.62.3.dist-info/LICENCE,sha256=oPwXhajyogCjEk1wPUlVBgG3dBzP_IYXE8LdqgelN90,2006
+tqdm-4.62.3.dist-info/METADATA,sha256=j5GU21D1jHKgPWY0OOH9Fmcvx0ADzIJ_c_MXzn5hGnE,56972
+tqdm-4.62.3.dist-info/WHEEL,sha256=WzZ8cwjh8l0jtULNjYq1Hpr-WCqCRgPr--TX4P5I1Wo,110
+tqdm-4.62.3.dist-info/entry_points.txt,sha256=wDAae6wt2WNgXb_Zk5TNu9YmqoafUG2pV7K1CXf3k_U,40
+tqdm-4.62.3.dist-info/top_level.txt,sha256=NLiUJNfmc9At15s7JURiwvqMEjUi9G5PMGRrmMYzNSM,5
+tqdm-4.62.3.dist-info/RECORD,,
diff --git a/third_party/python/tqdm/tqdm-4.62.3.dist-info/WHEEL b/third_party/python/tqdm/tqdm-4.62.3.dist-info/WHEEL
new file mode 100644
index 0000000000..b733a60d37
--- /dev/null
+++ b/third_party/python/tqdm/tqdm-4.62.3.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/tqdm/tqdm-4.62.3.dist-info/entry_points.txt b/third_party/python/tqdm/tqdm-4.62.3.dist-info/entry_points.txt
new file mode 100644
index 0000000000..9b3087d455
--- /dev/null
+++ b/third_party/python/tqdm/tqdm-4.62.3.dist-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+tqdm = tqdm.cli:main
+
diff --git a/third_party/python/tqdm/tqdm-4.62.3.dist-info/top_level.txt b/third_party/python/tqdm/tqdm-4.62.3.dist-info/top_level.txt
new file mode 100644
index 0000000000..78620c472c
--- /dev/null
+++ b/third_party/python/tqdm/tqdm-4.62.3.dist-info/top_level.txt
@@ -0,0 +1 @@
+tqdm
diff --git a/third_party/python/tqdm/tqdm/__init__.py b/third_party/python/tqdm/tqdm/__init__.py
new file mode 100644
index 0000000000..a021d16e9a
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/__init__.py
@@ -0,0 +1,41 @@
+from ._monitor import TMonitor, TqdmSynchronisationWarning
+from ._tqdm_pandas import tqdm_pandas
+from .cli import main # TODO: remove in v5.0.0
+from .gui import tqdm as tqdm_gui # TODO: remove in v5.0.0
+from .gui import trange as tgrange # TODO: remove in v5.0.0
+from .std import (
+ TqdmDeprecationWarning, TqdmExperimentalWarning, TqdmKeyError, TqdmMonitorWarning,
+ TqdmTypeError, TqdmWarning, tqdm, trange)
+from .version import __version__
+
+__all__ = ['tqdm', 'tqdm_gui', 'trange', 'tgrange', 'tqdm_pandas',
+ 'tqdm_notebook', 'tnrange', 'main', 'TMonitor',
+ 'TqdmTypeError', 'TqdmKeyError',
+ 'TqdmWarning', 'TqdmDeprecationWarning',
+ 'TqdmExperimentalWarning',
+ 'TqdmMonitorWarning', 'TqdmSynchronisationWarning',
+ '__version__']
+
+
+def tqdm_notebook(*args, **kwargs): # pragma: no cover
+ """See tqdm.notebook.tqdm for full documentation"""
+ from warnings import warn
+
+ from .notebook import tqdm as _tqdm_notebook
+ warn("This function will be removed in tqdm==5.0.0\n"
+ "Please use `tqdm.notebook.tqdm` instead of `tqdm.tqdm_notebook`",
+ TqdmDeprecationWarning, stacklevel=2)
+ return _tqdm_notebook(*args, **kwargs)
+
+
+def tnrange(*args, **kwargs): # pragma: no cover
+ """
+ A shortcut for `tqdm.notebook.tqdm(xrange(*args), **kwargs)`.
+ On Python3+, `range` is used instead of `xrange`.
+ """
+ from warnings import warn
+
+ from .notebook import trange as _tnrange
+ warn("Please use `tqdm.notebook.trange` instead of `tqdm.tnrange`",
+ TqdmDeprecationWarning, stacklevel=2)
+ return _tnrange(*args, **kwargs)
diff --git a/third_party/python/tqdm/tqdm/__main__.py b/third_party/python/tqdm/tqdm/__main__.py
new file mode 100644
index 0000000000..4e28416e10
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/__main__.py
@@ -0,0 +1,3 @@
+from .cli import main
+
+main()
diff --git a/third_party/python/tqdm/tqdm/_dist_ver.py b/third_party/python/tqdm/tqdm/_dist_ver.py
new file mode 100644
index 0000000000..0e32760553
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/_dist_ver.py
@@ -0,0 +1 @@
+__version__ = '4.62.3'
diff --git a/third_party/python/tqdm/tqdm/_main.py b/third_party/python/tqdm/tqdm/_main.py
new file mode 100644
index 0000000000..04fdeeff17
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/_main.py
@@ -0,0 +1,9 @@
+from warnings import warn
+
+from .cli import * # NOQA
+from .cli import __all__ # NOQA
+from .std import TqdmDeprecationWarning
+
+warn("This function will be removed in tqdm==5.0.0\n"
+ "Please use `tqdm.cli.*` instead of `tqdm._main.*`",
+ TqdmDeprecationWarning, stacklevel=2)
diff --git a/third_party/python/tqdm/tqdm/_monitor.py b/third_party/python/tqdm/tqdm/_monitor.py
new file mode 100644
index 0000000000..f71aa56817
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/_monitor.py
@@ -0,0 +1,95 @@
+import atexit
+from threading import Event, Thread, current_thread
+from time import time
+from warnings import warn
+
+__all__ = ["TMonitor", "TqdmSynchronisationWarning"]
+
+
+class TqdmSynchronisationWarning(RuntimeWarning):
+ """tqdm multi-thread/-process errors which may cause incorrect nesting
+ but otherwise no adverse effects"""
+ pass
+
+
+class TMonitor(Thread):
+ """
+ Monitoring thread for tqdm bars.
+ Monitors if tqdm bars are taking too much time to display
+ and readjusts miniters automatically if necessary.
+
+ Parameters
+ ----------
+ tqdm_cls : class
+ tqdm class to use (can be core tqdm or a submodule).
+ sleep_interval : float
+ Time to sleep between monitoring checks.
+ """
+ _test = {} # internal vars for unit testing
+
+ def __init__(self, tqdm_cls, sleep_interval):
+ Thread.__init__(self)
+ self.daemon = True # kill thread when main killed (KeyboardInterrupt)
+ self.woken = 0 # last time woken up, to sync with monitor
+ self.tqdm_cls = tqdm_cls
+ self.sleep_interval = sleep_interval
+ self._time = self._test.get("time", time)
+ self.was_killed = self._test.get("Event", Event)()
+ atexit.register(self.exit)
+ self.start()
+
+ def exit(self):
+ self.was_killed.set()
+ if self is not current_thread():
+ self.join()
+ return self.report()
+
+ def get_instances(self):
+ # returns a copy of started `tqdm_cls` instances
+ return [i for i in self.tqdm_cls._instances.copy()
+ # Avoid race by checking that the instance started
+ if hasattr(i, 'start_t')]
+
+ def run(self):
+ cur_t = self._time()
+ while True:
+ # After processing and before sleeping, notify that we woke
+ # Need to be done just before sleeping
+ self.woken = cur_t
+ # Sleep some time...
+ self.was_killed.wait(self.sleep_interval)
+ # Quit if killed
+ if self.was_killed.is_set():
+ return
+ # Then monitor!
+ # Acquire lock (to access _instances)
+ with self.tqdm_cls.get_lock():
+ cur_t = self._time()
+ # Check tqdm instances are waiting too long to print
+ instances = self.get_instances()
+ for instance in instances:
+ # Check event in loop to reduce blocking time on exit
+ if self.was_killed.is_set():
+ return
+ # Only if mininterval > 1 (else iterations are just slow)
+ # and last refresh exceeded maxinterval
+ if (
+ instance.miniters > 1
+ and (cur_t - instance.last_print_t) >= instance.maxinterval
+ ):
+ # force bypassing miniters on next iteration
+ # (dynamic_miniters adjusts mininterval automatically)
+ instance.miniters = 1
+ # Refresh now! (works only for manual tqdm)
+ instance.refresh(nolock=True)
+ # Remove accidental long-lived strong reference
+ del instance
+ if instances != self.get_instances(): # pragma: nocover
+ warn("Set changed size during iteration" +
+ " (see https://github.com/tqdm/tqdm/issues/481)",
+ TqdmSynchronisationWarning, stacklevel=2)
+ # Remove accidental long-lived strong references
+ del instances
+
+ def report(self):
+ return not self.was_killed.is_set()
diff --git a/third_party/python/tqdm/tqdm/_tqdm.py b/third_party/python/tqdm/tqdm/_tqdm.py
new file mode 100644
index 0000000000..7fc4962774
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/_tqdm.py
@@ -0,0 +1,9 @@
+from warnings import warn
+
+from .std import * # NOQA
+from .std import __all__ # NOQA
+from .std import TqdmDeprecationWarning
+
+warn("This function will be removed in tqdm==5.0.0\n"
+ "Please use `tqdm.std.*` instead of `tqdm._tqdm.*`",
+ TqdmDeprecationWarning, stacklevel=2)
diff --git a/third_party/python/tqdm/tqdm/_tqdm_gui.py b/third_party/python/tqdm/tqdm/_tqdm_gui.py
new file mode 100644
index 0000000000..f32aa894f5
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/_tqdm_gui.py
@@ -0,0 +1,9 @@
+from warnings import warn
+
+from .gui import * # NOQA
+from .gui import __all__ # NOQA
+from .std import TqdmDeprecationWarning
+
+warn("This function will be removed in tqdm==5.0.0\n"
+ "Please use `tqdm.gui.*` instead of `tqdm._tqdm_gui.*`",
+ TqdmDeprecationWarning, stacklevel=2)
diff --git a/third_party/python/tqdm/tqdm/_tqdm_notebook.py b/third_party/python/tqdm/tqdm/_tqdm_notebook.py
new file mode 100644
index 0000000000..f225fbf5b5
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/_tqdm_notebook.py
@@ -0,0 +1,9 @@
+from warnings import warn
+
+from .notebook import * # NOQA
+from .notebook import __all__ # NOQA
+from .std import TqdmDeprecationWarning
+
+warn("This function will be removed in tqdm==5.0.0\n"
+ "Please use `tqdm.notebook.*` instead of `tqdm._tqdm_notebook.*`",
+ TqdmDeprecationWarning, stacklevel=2)
diff --git a/third_party/python/tqdm/tqdm/_tqdm_pandas.py b/third_party/python/tqdm/tqdm/_tqdm_pandas.py
new file mode 100644
index 0000000000..c4fe6efdc6
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/_tqdm_pandas.py
@@ -0,0 +1,24 @@
+import sys
+
+__author__ = "github.com/casperdcl"
+__all__ = ['tqdm_pandas']
+
+
+def tqdm_pandas(tclass, **tqdm_kwargs):
+ """
+ Registers the given `tqdm` instance with
+ `pandas.core.groupby.DataFrameGroupBy.progress_apply`.
+ """
+ from tqdm import TqdmDeprecationWarning
+
+ if isinstance(tclass, type) or (getattr(tclass, '__name__', '').startswith(
+ 'tqdm_')): # delayed adapter case
+ TqdmDeprecationWarning(
+ "Please use `tqdm.pandas(...)` instead of `tqdm_pandas(tqdm, ...)`.",
+ fp_write=getattr(tqdm_kwargs.get('file', None), 'write', sys.stderr.write))
+ tclass.pandas(**tqdm_kwargs)
+ else:
+ TqdmDeprecationWarning(
+ "Please use `tqdm.pandas(...)` instead of `tqdm_pandas(tqdm(...))`.",
+ fp_write=getattr(tclass.fp, 'write', sys.stderr.write))
+ type(tclass).pandas(deprecated_t=tclass)
diff --git a/third_party/python/tqdm/tqdm/_utils.py b/third_party/python/tqdm/tqdm/_utils.py
new file mode 100644
index 0000000000..2cf10909f5
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/_utils.py
@@ -0,0 +1,12 @@
+from warnings import warn
+
+from .std import TqdmDeprecationWarning
+from .utils import ( # NOQA, pylint: disable=unused-import
+ CUR_OS, IS_NIX, IS_WIN, RE_ANSI, Comparable, FormatReplace, SimpleTextIOWrapper, _basestring,
+ _environ_cols_wrapper, _is_ascii, _is_utf, _range, _screen_shape_linux, _screen_shape_tput,
+ _screen_shape_windows, _screen_shape_wrapper, _supports_unicode, _term_move_up, _unich,
+ _unicode, colorama)
+
+warn("This function will be removed in tqdm==5.0.0\n"
+ "Please use `tqdm.utils.*` instead of `tqdm._utils.*`",
+ TqdmDeprecationWarning, stacklevel=2)
diff --git a/third_party/python/tqdm/tqdm/asyncio.py b/third_party/python/tqdm/tqdm/asyncio.py
new file mode 100644
index 0000000000..97c5f88fc2
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/asyncio.py
@@ -0,0 +1,93 @@
+"""
+Asynchronous progressbar decorator for iterators.
+Includes a default `range` iterator printing to `stderr`.
+
+Usage:
+>>> from tqdm.asyncio import trange, tqdm
+>>> async for i in trange(10):
+... ...
+"""
+import asyncio
+from sys import version_info
+
+from .std import tqdm as std_tqdm
+
+__author__ = {"github.com/": ["casperdcl"]}
+__all__ = ['tqdm_asyncio', 'tarange', 'tqdm', 'trange']
+
+
+class tqdm_asyncio(std_tqdm):
+ """
+ Asynchronous-friendly version of tqdm (Python 3.6+).
+ """
+ def __init__(self, iterable=None, *args, **kwargs):
+ super(tqdm_asyncio, self).__init__(iterable, *args, **kwargs)
+ self.iterable_awaitable = False
+ if iterable is not None:
+ if hasattr(iterable, "__anext__"):
+ self.iterable_next = iterable.__anext__
+ self.iterable_awaitable = True
+ elif hasattr(iterable, "__next__"):
+ self.iterable_next = iterable.__next__
+ else:
+ self.iterable_iterator = iter(iterable)
+ self.iterable_next = self.iterable_iterator.__next__
+
+ def __aiter__(self):
+ return self
+
+ async def __anext__(self):
+ try:
+ if self.iterable_awaitable:
+ res = await self.iterable_next()
+ else:
+ res = self.iterable_next()
+ self.update()
+ return res
+ except StopIteration:
+ self.close()
+ raise StopAsyncIteration
+ except BaseException:
+ self.close()
+ raise
+
+ def send(self, *args, **kwargs):
+ return self.iterable.send(*args, **kwargs)
+
+ @classmethod
+ def as_completed(cls, fs, *, loop=None, timeout=None, total=None, **tqdm_kwargs):
+ """
+ Wrapper for `asyncio.as_completed`.
+ """
+ if total is None:
+ total = len(fs)
+ kwargs = {}
+ if version_info[:2] < (3, 10):
+ kwargs['loop'] = loop
+ yield from cls(asyncio.as_completed(fs, timeout=timeout, **kwargs),
+ total=total, **tqdm_kwargs)
+
+ @classmethod
+ async def gather(cls, *fs, loop=None, timeout=None, total=None, **tqdm_kwargs):
+ """
+ Wrapper for `asyncio.gather`.
+ """
+ async def wrap_awaitable(i, f):
+ return i, await f
+
+ ifs = [wrap_awaitable(i, f) for i, f in enumerate(fs)]
+ res = [await f for f in cls.as_completed(ifs, loop=loop, timeout=timeout,
+ total=total, **tqdm_kwargs)]
+ return [i for _, i in sorted(res)]
+
+
+def tarange(*args, **kwargs):
+ """
+ A shortcut for `tqdm.asyncio.tqdm(range(*args), **kwargs)`.
+ """
+ return tqdm_asyncio(range(*args), **kwargs)
+
+
+# Aliases
+tqdm = tqdm_asyncio
+trange = tarange
diff --git a/third_party/python/tqdm/tqdm/auto.py b/third_party/python/tqdm/tqdm/auto.py
new file mode 100644
index 0000000000..cffca206ff
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/auto.py
@@ -0,0 +1,44 @@
+"""
+Enables multiple commonly used features.
+
+Method resolution order:
+
+- `tqdm.autonotebook` without import warnings
+- `tqdm.asyncio` on Python3.6+
+- `tqdm.std` base class
+
+Usage:
+>>> from tqdm.auto import trange, tqdm
+>>> for i in trange(10):
+... ...
+"""
+import sys
+import warnings
+
+from .std import TqdmExperimentalWarning
+
+with warnings.catch_warnings():
+ warnings.simplefilter("ignore", category=TqdmExperimentalWarning)
+ from .autonotebook import tqdm as notebook_tqdm
+ from .autonotebook import trange as notebook_trange
+
+if sys.version_info[:2] < (3, 6):
+ tqdm = notebook_tqdm
+ trange = notebook_trange
+else: # Python3.6+
+ from .asyncio import tqdm as asyncio_tqdm
+ from .std import tqdm as std_tqdm
+
+ if notebook_tqdm != std_tqdm:
+ class tqdm(notebook_tqdm, asyncio_tqdm): # pylint: disable=inconsistent-mro
+ pass
+ else:
+ tqdm = asyncio_tqdm
+
+ def trange(*args, **kwargs):
+ """
+ A shortcut for `tqdm.auto.tqdm(range(*args), **kwargs)`.
+ """
+ return tqdm(range(*args), **kwargs)
+
+__all__ = ["tqdm", "trange"]
diff --git a/third_party/python/tqdm/tqdm/autonotebook.py b/third_party/python/tqdm/tqdm/autonotebook.py
new file mode 100644
index 0000000000..b032061bfb
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/autonotebook.py
@@ -0,0 +1,28 @@
+"""
+Automatically choose between `tqdm.notebook` and `tqdm.std`.
+
+Usage:
+>>> from tqdm.autonotebook import trange, tqdm
+>>> for i in trange(10):
+... ...
+"""
+import os
+import sys
+
+try:
+ get_ipython = sys.modules['IPython'].get_ipython
+ if 'IPKernelApp' not in get_ipython().config: # pragma: no cover
+ raise ImportError("console")
+ if 'VSCODE_PID' in os.environ: # pragma: no cover
+ raise ImportError("vscode")
+except Exception:
+ from .std import tqdm, trange
+else: # pragma: no cover
+ from warnings import warn
+
+ from .notebook import tqdm, trange
+ from .std import TqdmExperimentalWarning
+ warn("Using `tqdm.autonotebook.tqdm` in notebook mode."
+ " Use `tqdm.tqdm` instead to force console mode"
+ " (e.g. in jupyter console)", TqdmExperimentalWarning, stacklevel=2)
+__all__ = ["tqdm", "trange"]
diff --git a/third_party/python/tqdm/tqdm/cli.py b/third_party/python/tqdm/tqdm/cli.py
new file mode 100644
index 0000000000..b5a16142b9
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/cli.py
@@ -0,0 +1,308 @@
+"""
+Module version for monitoring CLI pipes (`... | python -m tqdm | ...`).
+"""
+import logging
+import re
+import sys
+from ast import literal_eval as numeric
+
+from .std import TqdmKeyError, TqdmTypeError, tqdm
+from .version import __version__
+
+__all__ = ["main"]
+log = logging.getLogger(__name__)
+
+
+def cast(val, typ):
+ log.debug((val, typ))
+ if " or " in typ:
+ for t in typ.split(" or "):
+ try:
+ return cast(val, t)
+ except TqdmTypeError:
+ pass
+ raise TqdmTypeError(val + ' : ' + typ)
+
+ # sys.stderr.write('\ndebug | `val:type`: `' + val + ':' + typ + '`.\n')
+ if typ == 'bool':
+ if (val == 'True') or (val == ''):
+ return True
+ elif val == 'False':
+ return False
+ else:
+ raise TqdmTypeError(val + ' : ' + typ)
+ try:
+ return eval(typ + '("' + val + '")')
+ except Exception:
+ if typ == 'chr':
+ return chr(ord(eval('"' + val + '"'))).encode()
+ else:
+ raise TqdmTypeError(val + ' : ' + typ)
+
+
+def posix_pipe(fin, fout, delim=b'\\n', buf_size=256,
+ callback=lambda float: None, callback_len=True):
+ """
+ Params
+ ------
+ fin : binary file with `read(buf_size : int)` method
+ fout : binary file with `write` (and optionally `flush`) methods.
+ callback : function(float), e.g.: `tqdm.update`
+ callback_len : If (default: True) do `callback(len(buffer))`.
+ Otherwise, do `callback(data) for data in buffer.split(delim)`.
+ """
+ fp_write = fout.write
+
+ if not delim:
+ while True:
+ tmp = fin.read(buf_size)
+
+ # flush at EOF
+ if not tmp:
+ getattr(fout, 'flush', lambda: None)()
+ return
+
+ fp_write(tmp)
+ callback(len(tmp))
+ # return
+
+ buf = b''
+ # n = 0
+ while True:
+ tmp = fin.read(buf_size)
+
+ # flush at EOF
+ if not tmp:
+ if buf:
+ fp_write(buf)
+ if callback_len:
+ # n += 1 + buf.count(delim)
+ callback(1 + buf.count(delim))
+ else:
+ for i in buf.split(delim):
+ callback(i)
+ getattr(fout, 'flush', lambda: None)()
+ return # n
+
+ while True:
+ try:
+ i = tmp.index(delim)
+ except ValueError:
+ buf += tmp
+ break
+ else:
+ fp_write(buf + tmp[:i + len(delim)])
+ # n += 1
+ callback(1 if callback_len else (buf + tmp[:i]))
+ buf = b''
+ tmp = tmp[i + len(delim):]
+
+
+# ((opt, type), ... )
+RE_OPTS = re.compile(r'\n {8}(\S+)\s{2,}:\s*([^,]+)')
+# better split method assuming no positional args
+RE_SHLEX = re.compile(r'\s*(?<!\S)--?([^\s=]+)(\s+|=|$)')
+
+# TODO: add custom support for some of the following?
+UNSUPPORTED_OPTS = ('iterable', 'gui', 'out', 'file')
+
+# The 8 leading spaces are required for consistency
+CLI_EXTRA_DOC = r"""
+ Extra CLI Options
+ -----------------
+ name : type, optional
+ TODO: find out why this is needed.
+ delim : chr, optional
+ Delimiting character [default: '\n']. Use '\0' for null.
+ N.B.: on Windows systems, Python converts '\n' to '\r\n'.
+ buf_size : int, optional
+ String buffer size in bytes [default: 256]
+ used when `delim` is specified.
+ bytes : bool, optional
+ If true, will count bytes, ignore `delim`, and default
+ `unit_scale` to True, `unit_divisor` to 1024, and `unit` to 'B'.
+ tee : bool, optional
+ If true, passes `stdin` to both `stderr` and `stdout`.
+ update : bool, optional
+ If true, will treat input as newly elapsed iterations,
+ i.e. numbers to pass to `update()`. Note that this is slow
+ (~2e5 it/s) since every input must be decoded as a number.
+ update_to : bool, optional
+ If true, will treat input as total elapsed iterations,
+ i.e. numbers to assign to `self.n`. Note that this is slow
+ (~2e5 it/s) since every input must be decoded as a number.
+ null : bool, optional
+ If true, will discard input (no stdout).
+ manpath : str, optional
+ Directory in which to install tqdm man pages.
+ comppath : str, optional
+ Directory in which to place tqdm completion.
+ log : str, optional
+ CRITICAL|FATAL|ERROR|WARN(ING)|[default: 'INFO']|DEBUG|NOTSET.
+"""
+
+
+def main(fp=sys.stderr, argv=None):
+ """
+ Parameters (internal use only)
+ ---------
+ fp : file-like object for tqdm
+ argv : list (default: sys.argv[1:])
+ """
+ if argv is None:
+ argv = sys.argv[1:]
+ try:
+ log_idx = argv.index('--log')
+ except ValueError:
+ for i in argv:
+ if i.startswith('--log='):
+ logLevel = i[len('--log='):]
+ break
+ else:
+ logLevel = 'INFO'
+ else:
+ # argv.pop(log_idx)
+ # logLevel = argv.pop(log_idx)
+ logLevel = argv[log_idx + 1]
+ logging.basicConfig(level=getattr(logging, logLevel),
+ format="%(levelname)s:%(module)s:%(lineno)d:%(message)s")
+
+ d = tqdm.__init__.__doc__ + CLI_EXTRA_DOC
+
+ opt_types = dict(RE_OPTS.findall(d))
+ # opt_types['delim'] = 'chr'
+
+ for o in UNSUPPORTED_OPTS:
+ opt_types.pop(o)
+
+ log.debug(sorted(opt_types.items()))
+
+ # d = RE_OPTS.sub(r' --\1=<\1> : \2', d)
+ split = RE_OPTS.split(d)
+ opt_types_desc = zip(split[1::3], split[2::3], split[3::3])
+ d = ''.join(('\n --{0} : {2}{3}' if otd[1] == 'bool' else
+ '\n --{0}=<{1}> : {2}{3}').format(
+ otd[0].replace('_', '-'), otd[0], *otd[1:])
+ for otd in opt_types_desc if otd[0] not in UNSUPPORTED_OPTS)
+
+ d = """Usage:
+ tqdm [--help | options]
+
+Options:
+ -h, --help Print this help and exit.
+ -v, --version Print version and exit.
+""" + d.strip('\n') + '\n'
+
+ # opts = docopt(d, version=__version__)
+ if any(v in argv for v in ('-v', '--version')):
+ sys.stdout.write(__version__ + '\n')
+ sys.exit(0)
+ elif any(v in argv for v in ('-h', '--help')):
+ sys.stdout.write(d + '\n')
+ sys.exit(0)
+
+ argv = RE_SHLEX.split(' '.join(["tqdm"] + argv))
+ opts = dict(zip(argv[1::3], argv[3::3]))
+
+ log.debug(opts)
+ opts.pop('log', True)
+
+ tqdm_args = {'file': fp}
+ try:
+ for (o, v) in opts.items():
+ o = o.replace('-', '_')
+ try:
+ tqdm_args[o] = cast(v, opt_types[o])
+ except KeyError as e:
+ raise TqdmKeyError(str(e))
+ log.debug('args:' + str(tqdm_args))
+
+ delim_per_char = tqdm_args.pop('bytes', False)
+ update = tqdm_args.pop('update', False)
+ update_to = tqdm_args.pop('update_to', False)
+ if sum((delim_per_char, update, update_to)) > 1:
+ raise TqdmKeyError("Can only have one of --bytes --update --update_to")
+ except Exception:
+ fp.write('\nError:\nUsage:\n tqdm [--help | options]\n')
+ for i in sys.stdin:
+ sys.stdout.write(i)
+ raise
+ else:
+ buf_size = tqdm_args.pop('buf_size', 256)
+ delim = tqdm_args.pop('delim', b'\\n')
+ tee = tqdm_args.pop('tee', False)
+ manpath = tqdm_args.pop('manpath', None)
+ comppath = tqdm_args.pop('comppath', None)
+ if tqdm_args.pop('null', False):
+ class stdout(object):
+ @staticmethod
+ def write(_):
+ pass
+ else:
+ stdout = sys.stdout
+ stdout = getattr(stdout, 'buffer', stdout)
+ stdin = getattr(sys.stdin, 'buffer', sys.stdin)
+ if manpath or comppath:
+ from os import path
+ from shutil import copyfile
+
+ from pkg_resources import Requirement, resource_filename
+
+ def cp(src, dst):
+ """copies from src path to dst"""
+ copyfile(src, dst)
+ log.info("written:" + dst)
+ if manpath is not None:
+ cp(resource_filename(Requirement.parse('tqdm'), 'tqdm/tqdm.1'),
+ path.join(manpath, 'tqdm.1'))
+ if comppath is not None:
+ cp(resource_filename(Requirement.parse('tqdm'), 'tqdm/completion.sh'),
+ path.join(comppath, 'tqdm_completion.sh'))
+ sys.exit(0)
+ if tee:
+ stdout_write = stdout.write
+ fp_write = getattr(fp, 'buffer', fp).write
+
+ class stdout(object): # pylint: disable=function-redefined
+ @staticmethod
+ def write(x):
+ with tqdm.external_write_mode(file=fp):
+ fp_write(x)
+ stdout_write(x)
+ if delim_per_char:
+ tqdm_args.setdefault('unit', 'B')
+ tqdm_args.setdefault('unit_scale', True)
+ tqdm_args.setdefault('unit_divisor', 1024)
+ log.debug(tqdm_args)
+ with tqdm(**tqdm_args) as t:
+ posix_pipe(stdin, stdout, '', buf_size, t.update)
+ elif delim == b'\\n':
+ log.debug(tqdm_args)
+ if update or update_to:
+ with tqdm(**tqdm_args) as t:
+ if update:
+ def callback(i):
+ t.update(numeric(i.decode()))
+ else: # update_to
+ def callback(i):
+ t.update(numeric(i.decode()) - t.n)
+ for i in stdin:
+ stdout.write(i)
+ callback(i)
+ else:
+ for i in tqdm(stdin, **tqdm_args):
+ stdout.write(i)
+ else:
+ log.debug(tqdm_args)
+ with tqdm(**tqdm_args) as t:
+ callback_len = False
+ if update:
+ def callback(i):
+ t.update(numeric(i.decode()))
+ elif update_to:
+ def callback(i):
+ t.update(numeric(i.decode()) - t.n)
+ else:
+ callback = t.update
+ callback_len = True
+ posix_pipe(stdin, stdout, delim, buf_size, callback, callback_len)
diff --git a/third_party/python/tqdm/tqdm/completion.sh b/third_party/python/tqdm/tqdm/completion.sh
new file mode 100755
index 0000000000..9f61c7f14b
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/completion.sh
@@ -0,0 +1,19 @@
+#!/usr/bin/env bash
+_tqdm(){
+ local cur prv
+ cur="${COMP_WORDS[COMP_CWORD]}"
+ prv="${COMP_WORDS[COMP_CWORD - 1]}"
+
+ case ${prv} in
+ --bar_format|--buf_size|--colour|--comppath|--delay|--delim|--desc|--initial|--lock_args|--manpath|--maxinterval|--mininterval|--miniters|--ncols|--nrows|--position|--postfix|--smoothing|--total|--unit|--unit_divisor)
+ # await user input
+ ;;
+ "--log")
+ COMPREPLY=($(compgen -W 'CRITICAL FATAL ERROR WARN WARNING INFO DEBUG NOTSET' -- ${cur}))
+ ;;
+ *)
+ COMPREPLY=($(compgen -W '--ascii --bar_format --buf_size --bytes --colour --comppath --delay --delim --desc --disable --dynamic_ncols --help --initial --leave --lock_args --log --manpath --maxinterval --mininterval --miniters --ncols --nrows --null --position --postfix --smoothing --tee --total --unit --unit_divisor --unit_scale --update --update_to --version --write_bytes -h -v' -- ${cur}))
+ ;;
+ esac
+}
+complete -F _tqdm tqdm
diff --git a/third_party/python/tqdm/tqdm/contrib/__init__.py b/third_party/python/tqdm/tqdm/contrib/__init__.py
new file mode 100644
index 0000000000..0b52177073
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/contrib/__init__.py
@@ -0,0 +1,98 @@
+"""
+Thin wrappers around common functions.
+
+Subpackages contain potentially unstable extensions.
+"""
+import sys
+from functools import wraps
+
+from ..auto import tqdm as tqdm_auto
+from ..std import tqdm
+from ..utils import ObjectWrapper
+
+__author__ = {"github.com/": ["casperdcl"]}
+__all__ = ['tenumerate', 'tzip', 'tmap']
+
+
+class DummyTqdmFile(ObjectWrapper):
+ """Dummy file-like that will write to tqdm"""
+
+ def __init__(self, wrapped):
+ super(DummyTqdmFile, self).__init__(wrapped)
+ self._buf = []
+
+ def write(self, x, nolock=False):
+ nl = b"\n" if isinstance(x, bytes) else "\n"
+ pre, sep, post = x.rpartition(nl)
+ if sep:
+ blank = type(nl)()
+ tqdm.write(blank.join(self._buf + [pre, sep]),
+ end=blank, file=self._wrapped, nolock=nolock)
+ self._buf = [post]
+ else:
+ self._buf.append(x)
+
+ def __del__(self):
+ if self._buf:
+ blank = type(self._buf[0])()
+ try:
+ tqdm.write(blank.join(self._buf), end=blank, file=self._wrapped)
+ except (OSError, ValueError):
+ pass
+
+
+def builtin_iterable(func):
+ """Wraps `func()` output in a `list()` in py2"""
+ if sys.version_info[:1] < (3,):
+ @wraps(func)
+ def inner(*args, **kwargs):
+ return list(func(*args, **kwargs))
+ return inner
+ return func
+
+
+def tenumerate(iterable, start=0, total=None, tqdm_class=tqdm_auto, **tqdm_kwargs):
+ """
+ Equivalent of `numpy.ndenumerate` or builtin `enumerate`.
+
+ Parameters
+ ----------
+ tqdm_class : [default: tqdm.auto.tqdm].
+ """
+ try:
+ import numpy as np
+ except ImportError:
+ pass
+ else:
+ if isinstance(iterable, np.ndarray):
+ return tqdm_class(np.ndenumerate(iterable), total=total or iterable.size,
+ **tqdm_kwargs)
+ return enumerate(tqdm_class(iterable, total=total, **tqdm_kwargs), start)
+
+
+@builtin_iterable
+def tzip(iter1, *iter2plus, **tqdm_kwargs):
+ """
+ Equivalent of builtin `zip`.
+
+ Parameters
+ ----------
+ tqdm_class : [default: tqdm.auto.tqdm].
+ """
+ kwargs = tqdm_kwargs.copy()
+ tqdm_class = kwargs.pop("tqdm_class", tqdm_auto)
+ for i in zip(tqdm_class(iter1, **kwargs), *iter2plus):
+ yield i
+
+
+@builtin_iterable
+def tmap(function, *sequences, **tqdm_kwargs):
+ """
+ Equivalent of builtin `map`.
+
+ Parameters
+ ----------
+ tqdm_class : [default: tqdm.auto.tqdm].
+ """
+ for i in tzip(*sequences, **tqdm_kwargs):
+ yield function(*i)
diff --git a/third_party/python/tqdm/tqdm/contrib/bells.py b/third_party/python/tqdm/tqdm/contrib/bells.py
new file mode 100644
index 0000000000..be22768842
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/contrib/bells.py
@@ -0,0 +1,24 @@
+"""
+Even more features than `tqdm.auto` (all the bells & whistles):
+
+- `tqdm.auto`
+- `tqdm.tqdm.pandas`
+- `tqdm.contrib.telegram`
+ + uses `${TQDM_TELEGRAM_TOKEN}` and `${TQDM_TELEGRAM_CHAT_ID}`
+- `tqdm.contrib.discord`
+ + uses `${TQDM_DISCORD_TOKEN}` and `${TQDM_DISCORD_CHANNEL_ID}`
+"""
+__all__ = ['tqdm', 'trange']
+import warnings
+from os import getenv
+
+if getenv("TQDM_TELEGRAM_TOKEN") and getenv("TQDM_TELEGRAM_CHAT_ID"):
+ from .telegram import tqdm, trange
+elif getenv("TQDM_DISCORD_TOKEN") and getenv("TQDM_DISCORD_CHANNEL_ID"):
+ from .discord import tqdm, trange
+else:
+ from ..auto import tqdm, trange
+
+with warnings.catch_warnings():
+ warnings.simplefilter("ignore", category=FutureWarning)
+ tqdm.pandas()
diff --git a/third_party/python/tqdm/tqdm/contrib/concurrent.py b/third_party/python/tqdm/tqdm/contrib/concurrent.py
new file mode 100644
index 0000000000..ccb5e12529
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/contrib/concurrent.py
@@ -0,0 +1,130 @@
+"""
+Thin wrappers around `concurrent.futures`.
+"""
+from __future__ import absolute_import
+
+from contextlib import contextmanager
+
+from ..auto import tqdm as tqdm_auto
+from ..std import TqdmWarning
+
+try:
+ from operator import length_hint
+except ImportError:
+ def length_hint(it, default=0):
+ """Returns `len(it)`, falling back to `default`"""
+ try:
+ return len(it)
+ except TypeError:
+ return default
+try:
+ from os import cpu_count
+except ImportError:
+ try:
+ from multiprocessing import cpu_count
+ except ImportError:
+ def cpu_count():
+ return 4
+import sys
+
+__author__ = {"github.com/": ["casperdcl"]}
+__all__ = ['thread_map', 'process_map']
+
+
+@contextmanager
+def ensure_lock(tqdm_class, lock_name=""):
+ """get (create if necessary) and then restore `tqdm_class`'s lock"""
+ old_lock = getattr(tqdm_class, '_lock', None) # don't create a new lock
+ lock = old_lock or tqdm_class.get_lock() # maybe create a new lock
+ lock = getattr(lock, lock_name, lock) # maybe subtype
+ tqdm_class.set_lock(lock)
+ yield lock
+ if old_lock is None:
+ del tqdm_class._lock
+ else:
+ tqdm_class.set_lock(old_lock)
+
+
+def _executor_map(PoolExecutor, fn, *iterables, **tqdm_kwargs):
+ """
+ Implementation of `thread_map` and `process_map`.
+
+ Parameters
+ ----------
+ tqdm_class : [default: tqdm.auto.tqdm].
+ max_workers : [default: min(32, cpu_count() + 4)].
+ chunksize : [default: 1].
+ lock_name : [default: "":str].
+ """
+ kwargs = tqdm_kwargs.copy()
+ if "total" not in kwargs:
+ kwargs["total"] = length_hint(iterables[0])
+ tqdm_class = kwargs.pop("tqdm_class", tqdm_auto)
+ max_workers = kwargs.pop("max_workers", min(32, cpu_count() + 4))
+ chunksize = kwargs.pop("chunksize", 1)
+ lock_name = kwargs.pop("lock_name", "")
+ with ensure_lock(tqdm_class, lock_name=lock_name) as lk:
+ pool_kwargs = {'max_workers': max_workers}
+ sys_version = sys.version_info[:2]
+ if sys_version >= (3, 7):
+ # share lock in case workers are already using `tqdm`
+ pool_kwargs.update(initializer=tqdm_class.set_lock, initargs=(lk,))
+ map_args = {}
+ if not (3, 0) < sys_version < (3, 5):
+ map_args.update(chunksize=chunksize)
+ with PoolExecutor(**pool_kwargs) as ex:
+ return list(tqdm_class(ex.map(fn, *iterables, **map_args), **kwargs))
+
+
+def thread_map(fn, *iterables, **tqdm_kwargs):
+ """
+ Equivalent of `list(map(fn, *iterables))`
+ driven by `concurrent.futures.ThreadPoolExecutor`.
+
+ Parameters
+ ----------
+ tqdm_class : optional
+ `tqdm` class to use for bars [default: tqdm.auto.tqdm].
+ max_workers : int, optional
+ Maximum number of workers to spawn; passed to
+ `concurrent.futures.ThreadPoolExecutor.__init__`.
+ [default: max(32, cpu_count() + 4)].
+ """
+ from concurrent.futures import ThreadPoolExecutor
+ return _executor_map(ThreadPoolExecutor, fn, *iterables, **tqdm_kwargs)
+
+
+def process_map(fn, *iterables, **tqdm_kwargs):
+ """
+ Equivalent of `list(map(fn, *iterables))`
+ driven by `concurrent.futures.ProcessPoolExecutor`.
+
+ Parameters
+ ----------
+ tqdm_class : optional
+ `tqdm` class to use for bars [default: tqdm.auto.tqdm].
+ max_workers : int, optional
+ Maximum number of workers to spawn; passed to
+ `concurrent.futures.ProcessPoolExecutor.__init__`.
+ [default: min(32, cpu_count() + 4)].
+ chunksize : int, optional
+ Size of chunks sent to worker processes; passed to
+ `concurrent.futures.ProcessPoolExecutor.map`. [default: 1].
+ lock_name : str, optional
+ Member of `tqdm_class.get_lock()` to use [default: mp_lock].
+ """
+ from concurrent.futures import ProcessPoolExecutor
+ if iterables and "chunksize" not in tqdm_kwargs:
+ # default `chunksize=1` has poor performance for large iterables
+ # (most time spent dispatching items to workers).
+ longest_iterable_len = max(map(length_hint, iterables))
+ if longest_iterable_len > 1000:
+ from warnings import warn
+ warn("Iterable length %d > 1000 but `chunksize` is not set."
+ " This may seriously degrade multiprocess performance."
+ " Set `chunksize=1` or more." % longest_iterable_len,
+ TqdmWarning, stacklevel=2)
+ if "lock_name" not in tqdm_kwargs:
+ tqdm_kwargs = tqdm_kwargs.copy()
+ tqdm_kwargs["lock_name"] = "mp_lock"
+ return _executor_map(ProcessPoolExecutor, fn, *iterables, **tqdm_kwargs)
diff --git a/third_party/python/tqdm/tqdm/contrib/discord.py b/third_party/python/tqdm/tqdm/contrib/discord.py
new file mode 100644
index 0000000000..713a2f8220
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/contrib/discord.py
@@ -0,0 +1,121 @@
+"""
+Sends updates to a Discord bot.
+
+Usage:
+>>> from tqdm.contrib.discord import tqdm, trange
+>>> for i in tqdm(iterable, token='{token}', channel_id='{channel_id}'):
+... ...
+
+![screenshot](https://img.tqdm.ml/screenshot-discord.png)
+"""
+from __future__ import absolute_import
+
+import logging
+from os import getenv
+
+try:
+ from disco.client import Client, ClientConfig
+except ImportError:
+ raise ImportError("Please `pip install disco-py`")
+
+from ..auto import tqdm as tqdm_auto
+from ..utils import _range
+from .utils_worker import MonoWorker
+
+__author__ = {"github.com/": ["casperdcl"]}
+__all__ = ['DiscordIO', 'tqdm_discord', 'tdrange', 'tqdm', 'trange']
+
+
+class DiscordIO(MonoWorker):
+ """Non-blocking file-like IO using a Discord Bot."""
+ def __init__(self, token, channel_id):
+ """Creates a new message in the given `channel_id`."""
+ super(DiscordIO, self).__init__()
+ config = ClientConfig()
+ config.token = token
+ client = Client(config)
+ self.text = self.__class__.__name__
+ try:
+ self.message = client.api.channels_messages_create(channel_id, self.text)
+ except Exception as e:
+ tqdm_auto.write(str(e))
+
+ def write(self, s):
+ """Replaces internal `message`'s text with `s`."""
+ if not s:
+ s = "..."
+ s = s.replace('\r', '').strip()
+ if s == self.text:
+ return # skip duplicate message
+ self.text = s
+ try:
+ future = self.submit(self.message.edit, '`' + s + '`')
+ except Exception as e:
+ tqdm_auto.write(str(e))
+ else:
+ return future
+
+
+class tqdm_discord(tqdm_auto):
+ """
+ Standard `tqdm.auto.tqdm` but also sends updates to a Discord Bot.
+ May take a few seconds to create (`__init__`).
+
+ - create a discord bot (not public, no requirement of OAuth2 code
+ grant, only send message permissions) & invite it to a channel:
+ <https://discordpy.readthedocs.io/en/latest/discord.html>
+ - copy the bot `{token}` & `{channel_id}` and paste below
+
+ >>> from tqdm.contrib.discord import tqdm, trange
+ >>> for i in tqdm(iterable, token='{token}', channel_id='{channel_id}'):
+ ... ...
+ """
+ def __init__(self, *args, **kwargs):
+ """
+ Parameters
+ ----------
+ token : str, required. Discord token
+ [default: ${TQDM_DISCORD_TOKEN}].
+ channel_id : int, required. Discord channel ID
+ [default: ${TQDM_DISCORD_CHANNEL_ID}].
+ mininterval : float, optional.
+ Minimum of [default: 1.5] to avoid rate limit.
+
+ See `tqdm.auto.tqdm.__init__` for other parameters.
+ """
+ if not kwargs.get('disable'):
+ kwargs = kwargs.copy()
+ logging.getLogger("HTTPClient").setLevel(logging.WARNING)
+ self.dio = DiscordIO(
+ kwargs.pop('token', getenv("TQDM_DISCORD_TOKEN")),
+ kwargs.pop('channel_id', getenv("TQDM_DISCORD_CHANNEL_ID")))
+ kwargs['mininterval'] = max(1.5, kwargs.get('mininterval', 1.5))
+ super(tqdm_discord, self).__init__(*args, **kwargs)
+
+ def display(self, **kwargs):
+ super(tqdm_discord, self).display(**kwargs)
+ fmt = self.format_dict
+ if fmt.get('bar_format', None):
+ fmt['bar_format'] = fmt['bar_format'].replace(
+ '<bar/>', '{bar:10u}').replace('{bar}', '{bar:10u}')
+ else:
+ fmt['bar_format'] = '{l_bar}{bar:10u}{r_bar}'
+ self.dio.write(self.format_meter(**fmt))
+
+ def clear(self, *args, **kwargs):
+ super(tqdm_discord, self).clear(*args, **kwargs)
+ if not self.disable:
+ self.dio.write("")
+
+
+def tdrange(*args, **kwargs):
+ """
+ A shortcut for `tqdm.contrib.discord.tqdm(xrange(*args), **kwargs)`.
+ On Python3+, `range` is used instead of `xrange`.
+ """
+ return tqdm_discord(_range(*args), **kwargs)
+
+
+# Aliases
+tqdm = tqdm_discord
+trange = tdrange
diff --git a/third_party/python/tqdm/tqdm/contrib/itertools.py b/third_party/python/tqdm/tqdm/contrib/itertools.py
new file mode 100644
index 0000000000..9cce75e8f8
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/contrib/itertools.py
@@ -0,0 +1,36 @@
+"""
+Thin wrappers around `itertools`.
+"""
+from __future__ import absolute_import
+
+import itertools
+
+from ..auto import tqdm as tqdm_auto
+
+__author__ = {"github.com/": ["casperdcl"]}
+__all__ = ['product']
+
+
+def product(*iterables, **tqdm_kwargs):
+ """
+ Equivalent of `itertools.product`.
+
+ Parameters
+ ----------
+ tqdm_class : [default: tqdm.auto.tqdm].
+ """
+ kwargs = tqdm_kwargs.copy()
+ tqdm_class = kwargs.pop("tqdm_class", tqdm_auto)
+ try:
+ lens = list(map(len, iterables))
+ except TypeError:
+ total = None
+ else:
+ total = 1
+ for i in lens:
+ total *= i
+ kwargs.setdefault("total", total)
+ with tqdm_class(**kwargs) as t:
+ for i in itertools.product(*iterables):
+ yield i
+ t.update()
diff --git a/third_party/python/tqdm/tqdm/contrib/logging.py b/third_party/python/tqdm/tqdm/contrib/logging.py
new file mode 100644
index 0000000000..cd9925ec13
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/contrib/logging.py
@@ -0,0 +1,128 @@
+"""
+Helper functionality for interoperability with stdlib `logging`.
+"""
+from __future__ import absolute_import
+
+import logging
+import sys
+from contextlib import contextmanager
+
+try:
+ from typing import Iterator, List, Optional, Type # pylint: disable=unused-import
+except ImportError:
+ pass
+
+from ..std import tqdm as std_tqdm
+
+
+class _TqdmLoggingHandler(logging.StreamHandler):
+ def __init__(
+ self,
+ tqdm_class=std_tqdm # type: Type[std_tqdm]
+ ):
+ super(_TqdmLoggingHandler, self).__init__()
+ self.tqdm_class = tqdm_class
+
+ def emit(self, record):
+ try:
+ msg = self.format(record)
+ self.tqdm_class.write(msg, file=self.stream)
+ self.flush()
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except: # noqa pylint: disable=bare-except
+ self.handleError(record)
+
+
+def _is_console_logging_handler(handler):
+ return (isinstance(handler, logging.StreamHandler)
+ and handler.stream in {sys.stdout, sys.stderr})
+
+
+def _get_first_found_console_logging_handler(handlers):
+ for handler in handlers:
+ if _is_console_logging_handler(handler):
+ return handler
+
+
+@contextmanager
+def logging_redirect_tqdm(
+ loggers=None, # type: Optional[List[logging.Logger]],
+ tqdm_class=std_tqdm # type: Type[std_tqdm]
+):
+ # type: (...) -> Iterator[None]
+ """
+ Context manager redirecting console logging to `tqdm.write()`, leaving
+ other logging handlers (e.g. log files) unaffected.
+
+ Parameters
+ ----------
+ loggers : list, optional
+ Which handlers to redirect (default: [logging.root]).
+ tqdm_class : optional
+
+ Example
+ -------
+ ```python
+ import logging
+ from tqdm import trange
+ from tqdm.contrib.logging import logging_redirect_tqdm
+
+ LOG = logging.getLogger(__name__)
+
+ if __name__ == '__main__':
+ logging.basicConfig(level=logging.INFO)
+ with logging_redirect_tqdm():
+ for i in trange(9):
+ if i == 4:
+ LOG.info("console logging redirected to `tqdm.write()`")
+ # logging restored
+ ```
+ """
+ if loggers is None:
+ loggers = [logging.root]
+ original_handlers_list = [logger.handlers for logger in loggers]
+ try:
+ for logger in loggers:
+ tqdm_handler = _TqdmLoggingHandler(tqdm_class)
+ orig_handler = _get_first_found_console_logging_handler(logger.handlers)
+ if orig_handler is not None:
+ tqdm_handler.setFormatter(orig_handler.formatter)
+ tqdm_handler.stream = orig_handler.stream
+ logger.handlers = [
+ handler for handler in logger.handlers
+ if not _is_console_logging_handler(handler)] + [tqdm_handler]
+ yield
+ finally:
+ for logger, original_handlers in zip(loggers, original_handlers_list):
+ logger.handlers = original_handlers
+
+
+@contextmanager
+def tqdm_logging_redirect(
+ *args,
+ # loggers=None, # type: Optional[List[logging.Logger]]
+ # tqdm=None, # type: Optional[Type[tqdm.tqdm]]
+ **kwargs
+):
+ # type: (...) -> Iterator[None]
+ """
+ Convenience shortcut for:
+ ```python
+ with tqdm_class(*args, **tqdm_kwargs) as pbar:
+ with logging_redirect_tqdm(loggers=loggers, tqdm_class=tqdm_class):
+ yield pbar
+ ```
+
+ Parameters
+ ----------
+ tqdm_class : optional, (default: tqdm.std.tqdm).
+ loggers : optional, list.
+ **tqdm_kwargs : passed to `tqdm_class`.
+ """
+ tqdm_kwargs = kwargs.copy()
+ loggers = tqdm_kwargs.pop('loggers', None)
+ tqdm_class = tqdm_kwargs.pop('tqdm_class', std_tqdm)
+ with tqdm_class(*args, **tqdm_kwargs) as pbar:
+ with logging_redirect_tqdm(loggers=loggers, tqdm_class=tqdm_class):
+ yield pbar
diff --git a/third_party/python/tqdm/tqdm/contrib/telegram.py b/third_party/python/tqdm/tqdm/contrib/telegram.py
new file mode 100644
index 0000000000..99cbe8c888
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/contrib/telegram.py
@@ -0,0 +1,159 @@
+"""
+Sends updates to a Telegram bot.
+
+Usage:
+>>> from tqdm.contrib.telegram import tqdm, trange
+>>> for i in trange(10, token='{token}', chat_id='{chat_id}'):
+... ...
+
+![screenshot](https://img.tqdm.ml/screenshot-telegram.gif)
+"""
+from __future__ import absolute_import
+
+from os import getenv
+from warnings import warn
+
+from requests import Session
+
+from ..auto import tqdm as tqdm_auto
+from ..std import TqdmWarning
+from ..utils import _range
+from .utils_worker import MonoWorker
+
+__author__ = {"github.com/": ["casperdcl"]}
+__all__ = ['TelegramIO', 'tqdm_telegram', 'ttgrange', 'tqdm', 'trange']
+
+
+class TelegramIO(MonoWorker):
+ """Non-blocking file-like IO using a Telegram Bot."""
+ API = 'https://api.telegram.org/bot'
+
+ def __init__(self, token, chat_id):
+ """Creates a new message in the given `chat_id`."""
+ super(TelegramIO, self).__init__()
+ self.token = token
+ self.chat_id = chat_id
+ self.session = Session()
+ self.text = self.__class__.__name__
+ self.message_id
+
+ @property
+ def message_id(self):
+ if hasattr(self, '_message_id'):
+ return self._message_id
+ try:
+ res = self.session.post(
+ self.API + '%s/sendMessage' % self.token,
+ data={'text': '`' + self.text + '`', 'chat_id': self.chat_id,
+ 'parse_mode': 'MarkdownV2'}).json()
+ except Exception as e:
+ tqdm_auto.write(str(e))
+ else:
+ if res.get('error_code') == 429:
+ warn("Creation rate limit: try increasing `mininterval`.",
+ TqdmWarning, stacklevel=2)
+ else:
+ self._message_id = res['result']['message_id']
+ return self._message_id
+
+ def write(self, s):
+ """Replaces internal `message_id`'s text with `s`."""
+ if not s:
+ s = "..."
+ s = s.replace('\r', '').strip()
+ if s == self.text:
+ return # avoid duplicate message Bot error
+ message_id = self.message_id
+ if message_id is None:
+ return
+ self.text = s
+ try:
+ future = self.submit(
+ self.session.post, self.API + '%s/editMessageText' % self.token,
+ data={'text': '`' + s + '`', 'chat_id': self.chat_id,
+ 'message_id': message_id, 'parse_mode': 'MarkdownV2'})
+ except Exception as e:
+ tqdm_auto.write(str(e))
+ else:
+ return future
+
+ def delete(self):
+ """Deletes internal `message_id`."""
+ try:
+ future = self.submit(
+ self.session.post, self.API + '%s/deleteMessage' % self.token,
+ data={'chat_id': self.chat_id, 'message_id': self.message_id})
+ except Exception as e:
+ tqdm_auto.write(str(e))
+ else:
+ return future
+
+
+class tqdm_telegram(tqdm_auto):
+ """
+ Standard `tqdm.auto.tqdm` but also sends updates to a Telegram Bot.
+ May take a few seconds to create (`__init__`).
+
+ - create a bot <https://core.telegram.org/bots#6-botfather>
+ - copy its `{token}`
+ - add the bot to a chat and send it a message such as `/start`
+ - go to <https://api.telegram.org/bot`{token}`/getUpdates> to find out
+ the `{chat_id}`
+ - paste the `{token}` & `{chat_id}` below
+
+ >>> from tqdm.contrib.telegram import tqdm, trange
+ >>> for i in tqdm(iterable, token='{token}', chat_id='{chat_id}'):
+ ... ...
+ """
+ def __init__(self, *args, **kwargs):
+ """
+ Parameters
+ ----------
+ token : str, required. Telegram token
+ [default: ${TQDM_TELEGRAM_TOKEN}].
+ chat_id : str, required. Telegram chat ID
+ [default: ${TQDM_TELEGRAM_CHAT_ID}].
+
+ See `tqdm.auto.tqdm.__init__` for other parameters.
+ """
+ if not kwargs.get('disable'):
+ kwargs = kwargs.copy()
+ self.tgio = TelegramIO(
+ kwargs.pop('token', getenv('TQDM_TELEGRAM_TOKEN')),
+ kwargs.pop('chat_id', getenv('TQDM_TELEGRAM_CHAT_ID')))
+ super(tqdm_telegram, self).__init__(*args, **kwargs)
+
+ def display(self, **kwargs):
+ super(tqdm_telegram, self).display(**kwargs)
+ fmt = self.format_dict
+ if fmt.get('bar_format', None):
+ fmt['bar_format'] = fmt['bar_format'].replace(
+ '<bar/>', '{bar:10u}').replace('{bar}', '{bar:10u}')
+ else:
+ fmt['bar_format'] = '{l_bar}{bar:10u}{r_bar}'
+ self.tgio.write(self.format_meter(**fmt))
+
+ def clear(self, *args, **kwargs):
+ super(tqdm_telegram, self).clear(*args, **kwargs)
+ if not self.disable:
+ self.tgio.write("")
+
+ def close(self):
+ if self.disable:
+ return
+ super(tqdm_telegram, self).close()
+ if not (self.leave or (self.leave is None and self.pos == 0)):
+ self.tgio.delete()
+
+
+def ttgrange(*args, **kwargs):
+ """
+ A shortcut for `tqdm.contrib.telegram.tqdm(xrange(*args), **kwargs)`.
+ On Python3+, `range` is used instead of `xrange`.
+ """
+ return tqdm_telegram(_range(*args), **kwargs)
+
+
+# Aliases
+tqdm = tqdm_telegram
+trange = ttgrange
diff --git a/third_party/python/tqdm/tqdm/contrib/utils_worker.py b/third_party/python/tqdm/tqdm/contrib/utils_worker.py
new file mode 100644
index 0000000000..17adda6678
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/contrib/utils_worker.py
@@ -0,0 +1,40 @@
+"""
+IO/concurrency helpers for `tqdm.contrib`.
+"""
+from __future__ import absolute_import
+
+from collections import deque
+from concurrent.futures import ThreadPoolExecutor
+
+from ..auto import tqdm as tqdm_auto
+
+__author__ = {"github.com/": ["casperdcl"]}
+__all__ = ['MonoWorker']
+
+
+class MonoWorker(object):
+ """
+ Supports one running task and one waiting task.
+ The waiting task is the most recent submitted (others are discarded).
+ """
+ def __init__(self):
+ self.pool = ThreadPoolExecutor(max_workers=1)
+ self.futures = deque([], 2)
+
+ def submit(self, func, *args, **kwargs):
+ """`func(*args, **kwargs)` may replace currently waiting task."""
+ futures = self.futures
+ if len(futures) == futures.maxlen:
+ running = futures.popleft()
+ if not running.done():
+ if len(futures): # clear waiting
+ waiting = futures.pop()
+ waiting.cancel()
+ futures.appendleft(running) # re-insert running
+ try:
+ waiting = self.pool.submit(func, *args, **kwargs)
+ except Exception as e:
+ tqdm_auto.write(str(e))
+ else:
+ futures.append(waiting)
+ return waiting
diff --git a/third_party/python/tqdm/tqdm/dask.py b/third_party/python/tqdm/tqdm/dask.py
new file mode 100644
index 0000000000..6fc7504c79
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/dask.py
@@ -0,0 +1,46 @@
+from __future__ import absolute_import
+
+from functools import partial
+
+from dask.callbacks import Callback
+
+from .auto import tqdm as tqdm_auto
+
+__author__ = {"github.com/": ["casperdcl"]}
+__all__ = ['TqdmCallback']
+
+
+class TqdmCallback(Callback):
+ """Dask callback for task progress."""
+ def __init__(self, start=None, pretask=None, tqdm_class=tqdm_auto,
+ **tqdm_kwargs):
+ """
+ Parameters
+ ----------
+ tqdm_class : optional
+ `tqdm` class to use for bars [default: `tqdm.auto.tqdm`].
+ tqdm_kwargs : optional
+ Any other arguments used for all bars.
+ """
+ super(TqdmCallback, self).__init__(start=start, pretask=pretask)
+ if tqdm_kwargs:
+ tqdm_class = partial(tqdm_class, **tqdm_kwargs)
+ self.tqdm_class = tqdm_class
+
+ def _start_state(self, _, state):
+ self.pbar = self.tqdm_class(total=sum(
+ len(state[k]) for k in ['ready', 'waiting', 'running', 'finished']))
+
+ def _posttask(self, *_, **__):
+ self.pbar.update()
+
+ def _finish(self, *_, **__):
+ self.pbar.close()
+
+ def display(self):
+ """Displays in the current cell in Notebooks."""
+ container = getattr(self.bar, 'container', None)
+ if container is None:
+ return
+ from .notebook import display
+ display(container)
diff --git a/third_party/python/tqdm/tqdm/gui.py b/third_party/python/tqdm/tqdm/gui.py
new file mode 100644
index 0000000000..4612701d2a
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/gui.py
@@ -0,0 +1,191 @@
+"""
+Matplotlib GUI progressbar decorator for iterators.
+
+Usage:
+>>> from tqdm.gui import trange, tqdm
+>>> for i in trange(10):
+... ...
+"""
+# future division is important to divide integers and get as
+# a result precise floating numbers (instead of truncated int)
+from __future__ import absolute_import, division
+
+import re
+from warnings import warn
+
+# to inherit from the tqdm class
+from .std import TqdmExperimentalWarning
+from .std import tqdm as std_tqdm
+# import compatibility functions and utilities
+from .utils import _range
+
+__author__ = {"github.com/": ["casperdcl", "lrq3000"]}
+__all__ = ['tqdm_gui', 'tgrange', 'tqdm', 'trange']
+
+
+class tqdm_gui(std_tqdm): # pragma: no cover
+ """Experimental Matplotlib GUI version of tqdm!"""
+ # TODO: @classmethod: write() on GUI?
+ def __init__(self, *args, **kwargs):
+ from collections import deque
+
+ import matplotlib as mpl
+ import matplotlib.pyplot as plt
+ kwargs = kwargs.copy()
+ kwargs['gui'] = True
+ colour = kwargs.pop('colour', 'g')
+ super(tqdm_gui, self).__init__(*args, **kwargs)
+
+ if self.disable:
+ return
+
+ warn("GUI is experimental/alpha", TqdmExperimentalWarning, stacklevel=2)
+ self.mpl = mpl
+ self.plt = plt
+
+ # Remember if external environment uses toolbars
+ self.toolbar = self.mpl.rcParams['toolbar']
+ self.mpl.rcParams['toolbar'] = 'None'
+
+ self.mininterval = max(self.mininterval, 0.5)
+ self.fig, ax = plt.subplots(figsize=(9, 2.2))
+ # self.fig.subplots_adjust(bottom=0.2)
+ total = self.__len__() # avoids TypeError on None #971
+ if total is not None:
+ self.xdata = []
+ self.ydata = []
+ self.zdata = []
+ else:
+ self.xdata = deque([])
+ self.ydata = deque([])
+ self.zdata = deque([])
+ self.line1, = ax.plot(self.xdata, self.ydata, color='b')
+ self.line2, = ax.plot(self.xdata, self.zdata, color='k')
+ ax.set_ylim(0, 0.001)
+ if total is not None:
+ ax.set_xlim(0, 100)
+ ax.set_xlabel("percent")
+ self.fig.legend((self.line1, self.line2), ("cur", "est"),
+ loc='center right')
+ # progressbar
+ self.hspan = plt.axhspan(0, 0.001, xmin=0, xmax=0, color=colour)
+ else:
+ # ax.set_xlim(-60, 0)
+ ax.set_xlim(0, 60)
+ ax.invert_xaxis()
+ ax.set_xlabel("seconds")
+ ax.legend(("cur", "est"), loc='lower left')
+ ax.grid()
+ # ax.set_xlabel('seconds')
+ ax.set_ylabel((self.unit if self.unit else "it") + "/s")
+ if self.unit_scale:
+ plt.ticklabel_format(style='sci', axis='y', scilimits=(0, 0))
+ ax.yaxis.get_offset_text().set_x(-0.15)
+
+ # Remember if external environment is interactive
+ self.wasion = plt.isinteractive()
+ plt.ion()
+ self.ax = ax
+
+ def close(self):
+ if self.disable:
+ return
+
+ self.disable = True
+
+ with self.get_lock():
+ self._instances.remove(self)
+
+ # Restore toolbars
+ self.mpl.rcParams['toolbar'] = self.toolbar
+ # Return to non-interactive mode
+ if not self.wasion:
+ self.plt.ioff()
+ if self.leave:
+ self.display()
+ else:
+ self.plt.close(self.fig)
+
+ def clear(self, *_, **__):
+ pass
+
+ def display(self, *_, **__):
+ n = self.n
+ cur_t = self._time()
+ elapsed = cur_t - self.start_t
+ delta_it = n - self.last_print_n
+ delta_t = cur_t - self.last_print_t
+
+ # Inline due to multiple calls
+ total = self.total
+ xdata = self.xdata
+ ydata = self.ydata
+ zdata = self.zdata
+ ax = self.ax
+ line1 = self.line1
+ line2 = self.line2
+ # instantaneous rate
+ y = delta_it / delta_t
+ # overall rate
+ z = n / elapsed
+ # update line data
+ xdata.append(n * 100.0 / total if total else cur_t)
+ ydata.append(y)
+ zdata.append(z)
+
+ # Discard old values
+ # xmin, xmax = ax.get_xlim()
+ # if (not total) and elapsed > xmin * 1.1:
+ if (not total) and elapsed > 66:
+ xdata.popleft()
+ ydata.popleft()
+ zdata.popleft()
+
+ ymin, ymax = ax.get_ylim()
+ if y > ymax or z > ymax:
+ ymax = 1.1 * y
+ ax.set_ylim(ymin, ymax)
+ ax.figure.canvas.draw()
+
+ if total:
+ line1.set_data(xdata, ydata)
+ line2.set_data(xdata, zdata)
+ try:
+ poly_lims = self.hspan.get_xy()
+ except AttributeError:
+ self.hspan = self.plt.axhspan(0, 0.001, xmin=0, xmax=0, color='g')
+ poly_lims = self.hspan.get_xy()
+ poly_lims[0, 1] = ymin
+ poly_lims[1, 1] = ymax
+ poly_lims[2] = [n / total, ymax]
+ poly_lims[3] = [poly_lims[2, 0], ymin]
+ if len(poly_lims) > 4:
+ poly_lims[4, 1] = ymin
+ self.hspan.set_xy(poly_lims)
+ else:
+ t_ago = [cur_t - i for i in xdata]
+ line1.set_data(t_ago, ydata)
+ line2.set_data(t_ago, zdata)
+
+ d = self.format_dict
+ # remove {bar}
+ d['bar_format'] = (d['bar_format'] or "{l_bar}<bar/>{r_bar}").replace(
+ "{bar}", "<bar/>")
+ msg = self.format_meter(**d)
+ if '<bar/>' in msg:
+ msg = "".join(re.split(r'\|?<bar/>\|?', msg, 1))
+ ax.set_title(msg, fontname="DejaVu Sans Mono", fontsize=11)
+ self.plt.pause(1e-9)
+
+
+def tgrange(*args, **kwargs):
+ """
+ A shortcut for `tqdm.gui.tqdm(xrange(*args), **kwargs)`.
+ On Python3+, `range` is used instead of `xrange`.
+ """
+ return tqdm_gui(_range(*args), **kwargs)
+
+
+# Aliases
+tqdm = tqdm_gui
+trange = tgrange
diff --git a/third_party/python/tqdm/tqdm/keras.py b/third_party/python/tqdm/tqdm/keras.py
new file mode 100644
index 0000000000..523e62e947
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/keras.py
@@ -0,0 +1,124 @@
+from __future__ import absolute_import, division
+
+from copy import copy
+from functools import partial
+
+from .auto import tqdm as tqdm_auto
+
+try:
+ import keras
+except (ImportError, AttributeError) as e:
+ try:
+ from tensorflow import keras
+ except ImportError:
+ raise e
+__author__ = {"github.com/": ["casperdcl"]}
+__all__ = ['TqdmCallback']
+
+
+class TqdmCallback(keras.callbacks.Callback):
+ """Keras callback for epoch and batch progress."""
+ @staticmethod
+ def bar2callback(bar, pop=None, delta=(lambda logs: 1)):
+ def callback(_, logs=None):
+ n = delta(logs)
+ if logs:
+ if pop:
+ logs = copy(logs)
+ [logs.pop(i, 0) for i in pop]
+ bar.set_postfix(logs, refresh=False)
+ bar.update(n)
+
+ return callback
+
+ def __init__(self, epochs=None, data_size=None, batch_size=None, verbose=1,
+ tqdm_class=tqdm_auto, **tqdm_kwargs):
+ """
+ Parameters
+ ----------
+ epochs : int, optional
+ data_size : int, optional
+ Number of training pairs.
+ batch_size : int, optional
+ Number of training pairs per batch.
+ verbose : int
+ 0: epoch, 1: batch (transient), 2: batch. [default: 1].
+ Will be set to `0` unless both `data_size` and `batch_size`
+ are given.
+ tqdm_class : optional
+ `tqdm` class to use for bars [default: `tqdm.auto.tqdm`].
+ tqdm_kwargs : optional
+ Any other arguments used for all bars.
+ """
+ if tqdm_kwargs:
+ tqdm_class = partial(tqdm_class, **tqdm_kwargs)
+ self.tqdm_class = tqdm_class
+ self.epoch_bar = tqdm_class(total=epochs, unit='epoch')
+ self.on_epoch_end = self.bar2callback(self.epoch_bar)
+ if data_size and batch_size:
+ self.batches = batches = (data_size + batch_size - 1) // batch_size
+ else:
+ self.batches = batches = None
+ self.verbose = verbose
+ if verbose == 1:
+ self.batch_bar = tqdm_class(total=batches, unit='batch', leave=False)
+ self.on_batch_end = self.bar2callback(
+ self.batch_bar, pop=['batch', 'size'],
+ delta=lambda logs: logs.get('size', 1))
+
+ def on_train_begin(self, *_, **__):
+ params = self.params.get
+ auto_total = params('epochs', params('nb_epoch', None))
+ if auto_total is not None and auto_total != self.epoch_bar.total:
+ self.epoch_bar.reset(total=auto_total)
+
+ def on_epoch_begin(self, epoch, *_, **__):
+ if self.epoch_bar.n < epoch:
+ ebar = self.epoch_bar
+ ebar.n = ebar.last_print_n = ebar.initial = epoch
+ if self.verbose:
+ params = self.params.get
+ total = params('samples', params(
+ 'nb_sample', params('steps', None))) or self.batches
+ if self.verbose == 2:
+ if hasattr(self, 'batch_bar'):
+ self.batch_bar.close()
+ self.batch_bar = self.tqdm_class(
+ total=total, unit='batch', leave=True,
+ unit_scale=1 / (params('batch_size', 1) or 1))
+ self.on_batch_end = self.bar2callback(
+ self.batch_bar, pop=['batch', 'size'],
+ delta=lambda logs: logs.get('size', 1))
+ elif self.verbose == 1:
+ self.batch_bar.unit_scale = 1 / (params('batch_size', 1) or 1)
+ self.batch_bar.reset(total=total)
+ else:
+ raise KeyError('Unknown verbosity')
+
+ def on_train_end(self, *_, **__):
+ if self.verbose:
+ self.batch_bar.close()
+ self.epoch_bar.close()
+
+ def display(self):
+ """Displays in the current cell in Notebooks."""
+ container = getattr(self.epoch_bar, 'container', None)
+ if container is None:
+ return
+ from .notebook import display
+ display(container)
+ batch_bar = getattr(self, 'batch_bar', None)
+ if batch_bar is not None:
+ display(batch_bar.container)
+
+ @staticmethod
+ def _implements_train_batch_hooks():
+ return True
+
+ @staticmethod
+ def _implements_test_batch_hooks():
+ return True
+
+ @staticmethod
+ def _implements_predict_batch_hooks():
+ return True
diff --git a/third_party/python/tqdm/tqdm/notebook.py b/third_party/python/tqdm/tqdm/notebook.py
new file mode 100644
index 0000000000..1f488d25f3
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/notebook.py
@@ -0,0 +1,327 @@
+"""
+IPython/Jupyter Notebook progressbar decorator for iterators.
+Includes a default `range` iterator printing to `stderr`.
+
+Usage:
+>>> from tqdm.notebook import trange, tqdm
+>>> for i in trange(10):
+... ...
+"""
+# future division is important to divide integers and get as
+# a result precise floating numbers (instead of truncated int)
+from __future__ import absolute_import, division
+
+# import compatibility functions and utilities
+import re
+import sys
+from weakref import proxy
+
+# to inherit from the tqdm class
+from .std import tqdm as std_tqdm
+from .utils import _range
+
+if True: # pragma: no cover
+ # import IPython/Jupyter base widget and display utilities
+ IPY = 0
+ try: # IPython 4.x
+ import ipywidgets
+ IPY = 4
+ except ImportError: # IPython 3.x / 2.x
+ IPY = 32
+ import warnings
+ with warnings.catch_warnings():
+ warnings.filterwarnings(
+ 'ignore', message=".*The `IPython.html` package has been deprecated.*")
+ try:
+ import IPython.html.widgets as ipywidgets # NOQA: F401
+ except ImportError:
+ pass
+
+ try: # IPython 4.x / 3.x
+ if IPY == 32:
+ from IPython.html.widgets import HTML
+ from IPython.html.widgets import FloatProgress as IProgress
+ from IPython.html.widgets import HBox
+ IPY = 3
+ else:
+ from ipywidgets import HTML
+ from ipywidgets import FloatProgress as IProgress
+ from ipywidgets import HBox
+ except ImportError:
+ try: # IPython 2.x
+ from IPython.html.widgets import HTML
+ from IPython.html.widgets import ContainerWidget as HBox
+ from IPython.html.widgets import FloatProgressWidget as IProgress
+ IPY = 2
+ except ImportError:
+ IPY = 0
+ IProgress = None
+ HBox = object
+
+ try:
+ from IPython.display import display # , clear_output
+ except ImportError:
+ pass
+
+ # HTML encoding
+ try: # Py3
+ from html import escape
+ except ImportError: # Py2
+ from cgi import escape
+
+__author__ = {"github.com/": ["lrq3000", "casperdcl", "alexanderkuk"]}
+__all__ = ['tqdm_notebook', 'tnrange', 'tqdm', 'trange']
+
+
+class TqdmHBox(HBox):
+ """`ipywidgets.HBox` with a pretty representation"""
+ def _repr_json_(self, pretty=None):
+ pbar = getattr(self, 'pbar', None)
+ if pbar is None:
+ return {}
+ d = pbar.format_dict
+ if pretty is not None:
+ d["ascii"] = not pretty
+ return d
+
+ def __repr__(self, pretty=False):
+ pbar = getattr(self, 'pbar', None)
+ if pbar is None:
+ return super(TqdmHBox, self).__repr__()
+ return pbar.format_meter(**self._repr_json_(pretty))
+
+ def _repr_pretty_(self, pp, *_, **__):
+ pp.text(self.__repr__(True))
+
+
+class tqdm_notebook(std_tqdm):
+ """
+ Experimental IPython/Jupyter Notebook widget using tqdm!
+ """
+ @staticmethod
+ def status_printer(_, total=None, desc=None, ncols=None):
+ """
+ Manage the printing of an IPython/Jupyter Notebook progress bar widget.
+ """
+ # Fallback to text bar if there's no total
+ # DEPRECATED: replaced with an 'info' style bar
+ # if not total:
+ # return super(tqdm_notebook, tqdm_notebook).status_printer(file)
+
+ # fp = file
+
+ # Prepare IPython progress bar
+ if IProgress is None: # #187 #451 #558 #872
+ raise ImportError(
+ "IProgress not found. Please update jupyter and ipywidgets."
+ " See https://ipywidgets.readthedocs.io/en/stable"
+ "/user_install.html")
+ if total:
+ pbar = IProgress(min=0, max=total)
+ else: # No total? Show info style bar with no progress tqdm status
+ pbar = IProgress(min=0, max=1)
+ pbar.value = 1
+ pbar.bar_style = 'info'
+ if ncols is None:
+ pbar.layout.width = "20px"
+
+ ltext = HTML()
+ rtext = HTML()
+ if desc:
+ ltext.value = desc
+ container = TqdmHBox(children=[ltext, pbar, rtext])
+ # Prepare layout
+ if ncols is not None: # use default style of ipywidgets
+ # ncols could be 100, "100px", "100%"
+ ncols = str(ncols) # ipywidgets only accepts string
+ try:
+ if int(ncols) > 0: # isnumeric and positive
+ ncols += 'px'
+ except ValueError:
+ pass
+ pbar.layout.flex = '2'
+ container.layout.width = ncols
+ container.layout.display = 'inline-flex'
+ container.layout.flex_flow = 'row wrap'
+
+ return container
+
+ def display(self, msg=None, pos=None,
+ # additional signals
+ close=False, bar_style=None, check_delay=True):
+ # Note: contrary to native tqdm, msg='' does NOT clear bar
+ # goal is to keep all infos if error happens so user knows
+ # at which iteration the loop failed.
+
+ # Clear previous output (really necessary?)
+ # clear_output(wait=1)
+
+ if not msg and not close:
+ d = self.format_dict
+ # remove {bar}
+ d['bar_format'] = (d['bar_format'] or "{l_bar}<bar/>{r_bar}").replace(
+ "{bar}", "<bar/>")
+ msg = self.format_meter(**d)
+
+ ltext, pbar, rtext = self.container.children
+ pbar.value = self.n
+
+ if msg:
+ # html escape special characters (like '&')
+ if '<bar/>' in msg:
+ left, right = map(escape, re.split(r'\|?<bar/>\|?', msg, 1))
+ else:
+ left, right = '', escape(msg)
+
+ # Update description
+ ltext.value = left
+ # never clear the bar (signal: msg='')
+ if right:
+ rtext.value = right
+
+ # Change bar style
+ if bar_style:
+ # Hack-ish way to avoid the danger bar_style being overridden by
+ # success because the bar gets closed after the error...
+ if pbar.bar_style != 'danger' or bar_style != 'success':
+ pbar.bar_style = bar_style
+
+ # Special signal to close the bar
+ if close and pbar.bar_style != 'danger': # hide only if no error
+ try:
+ self.container.close()
+ except AttributeError:
+ self.container.visible = False
+
+ if check_delay and self.delay > 0 and not self.displayed:
+ display(self.container)
+ self.displayed = True
+
+ @property
+ def colour(self):
+ if hasattr(self, 'container'):
+ return self.container.children[-2].style.bar_color
+
+ @colour.setter
+ def colour(self, bar_color):
+ if hasattr(self, 'container'):
+ self.container.children[-2].style.bar_color = bar_color
+
+ def __init__(self, *args, **kwargs):
+ """
+ Supports the usual `tqdm.tqdm` parameters as well as those listed below.
+
+ Parameters
+ ----------
+ display : Whether to call `display(self.container)` immediately
+ [default: True].
+ """
+ kwargs = kwargs.copy()
+ # Setup default output
+ file_kwarg = kwargs.get('file', sys.stderr)
+ if file_kwarg is sys.stderr or file_kwarg is None:
+ kwargs['file'] = sys.stdout # avoid the red block in IPython
+
+ # Initialize parent class + avoid printing by using gui=True
+ kwargs['gui'] = True
+ # convert disable = None to False
+ kwargs['disable'] = bool(kwargs.get('disable', False))
+ colour = kwargs.pop('colour', None)
+ display_here = kwargs.pop('display', True)
+ super(tqdm_notebook, self).__init__(*args, **kwargs)
+ if self.disable or not kwargs['gui']:
+ self.disp = lambda *_, **__: None
+ return
+
+ # Get bar width
+ self.ncols = '100%' if self.dynamic_ncols else kwargs.get("ncols", None)
+
+ # Replace with IPython progress bar display (with correct total)
+ unit_scale = 1 if self.unit_scale is True else self.unit_scale or 1
+ total = self.total * unit_scale if self.total else self.total
+ self.container = self.status_printer(self.fp, total, self.desc, self.ncols)
+ self.container.pbar = proxy(self)
+ self.displayed = False
+ if display_here and self.delay <= 0:
+ display(self.container)
+ self.displayed = True
+ self.disp = self.display
+ self.colour = colour
+
+ # Print initial bar state
+ if not self.disable:
+ self.display(check_delay=False)
+
+ def __iter__(self):
+ try:
+ for obj in super(tqdm_notebook, self).__iter__():
+ # return super(tqdm...) will not catch exception
+ yield obj
+ # NB: except ... [ as ...] breaks IPython async KeyboardInterrupt
+ except: # NOQA
+ self.disp(bar_style='danger')
+ raise
+ # NB: don't `finally: close()`
+ # since this could be a shared bar which the user will `reset()`
+
+ def update(self, n=1):
+ try:
+ return super(tqdm_notebook, self).update(n=n)
+ # NB: except ... [ as ...] breaks IPython async KeyboardInterrupt
+ except: # NOQA
+ # cannot catch KeyboardInterrupt when using manual tqdm
+ # as the interrupt will most likely happen on another statement
+ self.disp(bar_style='danger')
+ raise
+ # NB: don't `finally: close()`
+ # since this could be a shared bar which the user will `reset()`
+
+ def close(self):
+ if self.disable:
+ return
+ super(tqdm_notebook, self).close()
+ # Try to detect if there was an error or KeyboardInterrupt
+ # in manual mode: if n < total, things probably got wrong
+ if self.total and self.n < self.total:
+ self.disp(bar_style='danger', check_delay=False)
+ else:
+ if self.leave:
+ self.disp(bar_style='success', check_delay=False)
+ else:
+ self.disp(close=True, check_delay=False)
+
+ def clear(self, *_, **__):
+ pass
+
+ def reset(self, total=None):
+ """
+ Resets to 0 iterations for repeated use.
+
+ Consider combining with `leave=True`.
+
+ Parameters
+ ----------
+ total : int or float, optional. Total to use for the new bar.
+ """
+ if self.disable:
+ return super(tqdm_notebook, self).reset(total=total)
+ _, pbar, _ = self.container.children
+ pbar.bar_style = ''
+ if total is not None:
+ pbar.max = total
+ if not self.total and self.ncols is None: # no longer unknown total
+ pbar.layout.width = None # reset width
+ return super(tqdm_notebook, self).reset(total=total)
+
+
+def tnrange(*args, **kwargs):
+ """
+ A shortcut for `tqdm.notebook.tqdm(xrange(*args), **kwargs)`.
+ On Python3+, `range` is used instead of `xrange`.
+ """
+ return tqdm_notebook(_range(*args), **kwargs)
+
+
+# Aliases
+tqdm = tqdm_notebook
+trange = tnrange
diff --git a/third_party/python/tqdm/tqdm/rich.py b/third_party/python/tqdm/tqdm/rich.py
new file mode 100644
index 0000000000..cf8e714326
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/rich.py
@@ -0,0 +1,152 @@
+"""
+`rich.progress` decorator for iterators.
+
+Usage:
+>>> from tqdm.rich import trange, tqdm
+>>> for i in trange(10):
+... ...
+"""
+from __future__ import absolute_import
+
+from warnings import warn
+
+from rich.progress import (
+ BarColumn, Progress, ProgressColumn, Text, TimeElapsedColumn, TimeRemainingColumn, filesize)
+
+from .std import TqdmExperimentalWarning
+from .std import tqdm as std_tqdm
+from .utils import _range
+
+__author__ = {"github.com/": ["casperdcl"]}
+__all__ = ['tqdm_rich', 'trrange', 'tqdm', 'trange']
+
+
+class FractionColumn(ProgressColumn):
+ """Renders completed/total, e.g. '0.5/2.3 G'."""
+ def __init__(self, unit_scale=False, unit_divisor=1000):
+ self.unit_scale = unit_scale
+ self.unit_divisor = unit_divisor
+ super().__init__()
+
+ def render(self, task):
+ """Calculate common unit for completed and total."""
+ completed = int(task.completed)
+ total = int(task.total)
+ if self.unit_scale:
+ unit, suffix = filesize.pick_unit_and_suffix(
+ total,
+ ["", "K", "M", "G", "T", "P", "E", "Z", "Y"],
+ self.unit_divisor,
+ )
+ else:
+ unit, suffix = filesize.pick_unit_and_suffix(total, [""], 1)
+ precision = 0 if unit == 1 else 1
+ return Text(
+ f"{completed/unit:,.{precision}f}/{total/unit:,.{precision}f} {suffix}",
+ style="progress.download")
+
+
+class RateColumn(ProgressColumn):
+ """Renders human readable transfer speed."""
+ def __init__(self, unit="", unit_scale=False, unit_divisor=1000):
+ self.unit = unit
+ self.unit_scale = unit_scale
+ self.unit_divisor = unit_divisor
+ super().__init__()
+
+ def render(self, task):
+ """Show data transfer speed."""
+ speed = task.speed
+ if speed is None:
+ return Text(f"? {self.unit}/s", style="progress.data.speed")
+ if self.unit_scale:
+ unit, suffix = filesize.pick_unit_and_suffix(
+ speed,
+ ["", "K", "M", "G", "T", "P", "E", "Z", "Y"],
+ self.unit_divisor,
+ )
+ else:
+ unit, suffix = filesize.pick_unit_and_suffix(speed, [""], 1)
+ precision = 0 if unit == 1 else 1
+ return Text(f"{speed/unit:,.{precision}f} {suffix}{self.unit}/s",
+ style="progress.data.speed")
+
+
+class tqdm_rich(std_tqdm): # pragma: no cover
+ """Experimental rich.progress GUI version of tqdm!"""
+ # TODO: @classmethod: write()?
+ def __init__(self, *args, **kwargs):
+ """
+ This class accepts the following parameters *in addition* to
+ the parameters accepted by `tqdm`.
+
+ Parameters
+ ----------
+ progress : tuple, optional
+ arguments for `rich.progress.Progress()`.
+ """
+ kwargs = kwargs.copy()
+ kwargs['gui'] = True
+ # convert disable = None to False
+ kwargs['disable'] = bool(kwargs.get('disable', False))
+ progress = kwargs.pop('progress', None)
+ super(tqdm_rich, self).__init__(*args, **kwargs)
+
+ if self.disable:
+ return
+
+ warn("rich is experimental/alpha", TqdmExperimentalWarning, stacklevel=2)
+ d = self.format_dict
+ if progress is None:
+ progress = (
+ "[progress.description]{task.description}"
+ "[progress.percentage]{task.percentage:>4.0f}%",
+ BarColumn(bar_width=None),
+ FractionColumn(
+ unit_scale=d['unit_scale'], unit_divisor=d['unit_divisor']),
+ "[", TimeElapsedColumn(), "<", TimeRemainingColumn(),
+ ",", RateColumn(unit=d['unit'], unit_scale=d['unit_scale'],
+ unit_divisor=d['unit_divisor']), "]"
+ )
+ self._prog = Progress(*progress, transient=not self.leave)
+ self._prog.__enter__()
+ self._task_id = self._prog.add_task(self.desc or "", **d)
+
+ def close(self):
+ if self.disable:
+ return
+ super(tqdm_rich, self).close()
+ self._prog.__exit__(None, None, None)
+
+ def clear(self, *_, **__):
+ pass
+
+ def display(self, *_, **__):
+ if not hasattr(self, '_prog'):
+ return
+ self._prog.update(self._task_id, completed=self.n, description=self.desc)
+
+ def reset(self, total=None):
+ """
+ Resets to 0 iterations for repeated use.
+
+ Parameters
+ ----------
+ total : int or float, optional. Total to use for the new bar.
+ """
+ if hasattr(self, '_prog'):
+ self._prog.reset(total=total)
+ super(tqdm_rich, self).reset(total=total)
+
+
+def trrange(*args, **kwargs):
+ """
+ A shortcut for `tqdm.rich.tqdm(xrange(*args), **kwargs)`.
+ On Python3+, `range` is used instead of `xrange`.
+ """
+ return tqdm_rich(_range(*args), **kwargs)
+
+
+# Aliases
+tqdm = tqdm_rich
+trange = trrange
diff --git a/third_party/python/tqdm/tqdm/std.py b/third_party/python/tqdm/tqdm/std.py
new file mode 100644
index 0000000000..e81c836808
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/std.py
@@ -0,0 +1,1526 @@
+"""
+Customisable progressbar decorator for iterators.
+Includes a default `range` iterator printing to `stderr`.
+
+Usage:
+>>> from tqdm import trange, tqdm
+>>> for i in trange(10):
+... ...
+"""
+from __future__ import absolute_import, division
+
+import sys
+from collections import OrderedDict, defaultdict
+from contextlib import contextmanager
+from datetime import datetime, timedelta
+from numbers import Number
+from time import time
+from warnings import warn
+from weakref import WeakSet
+
+from ._monitor import TMonitor
+from .utils import (
+ CallbackIOWrapper, Comparable, DisableOnWriteError, FormatReplace, SimpleTextIOWrapper,
+ _basestring, _is_ascii, _range, _screen_shape_wrapper, _supports_unicode, _term_move_up,
+ _unich, _unicode, disp_len, disp_trim)
+
+__author__ = "https://github.com/tqdm/tqdm#contributions"
+__all__ = ['tqdm', 'trange',
+ 'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning',
+ 'TqdmExperimentalWarning', 'TqdmDeprecationWarning',
+ 'TqdmMonitorWarning']
+
+
+class TqdmTypeError(TypeError):
+ pass
+
+
+class TqdmKeyError(KeyError):
+ pass
+
+
+class TqdmWarning(Warning):
+ """base class for all tqdm warnings.
+
+ Used for non-external-code-breaking errors, such as garbled printing.
+ """
+ def __init__(self, msg, fp_write=None, *a, **k):
+ if fp_write is not None:
+ fp_write("\n" + self.__class__.__name__ + ": " + str(msg).rstrip() + '\n')
+ else:
+ super(TqdmWarning, self).__init__(msg, *a, **k)
+
+
+class TqdmExperimentalWarning(TqdmWarning, FutureWarning):
+ """beta feature, unstable API and behaviour"""
+ pass
+
+
+class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning):
+ # not suppressed if raised
+ pass
+
+
+class TqdmMonitorWarning(TqdmWarning, RuntimeWarning):
+ """tqdm monitor errors which do not affect external functionality"""
+ pass
+
+
+def TRLock(*args, **kwargs):
+ """threading RLock"""
+ try:
+ from threading import RLock
+ return RLock(*args, **kwargs)
+ except (ImportError, OSError): # pragma: no cover
+ pass
+
+
+class TqdmDefaultWriteLock(object):
+ """
+ Provide a default write lock for thread and multiprocessing safety.
+ Works only on platforms supporting `fork` (so Windows is excluded).
+ You must initialise a `tqdm` or `TqdmDefaultWriteLock` instance
+ before forking in order for the write lock to work.
+ On Windows, you need to supply the lock from the parent to the children as
+ an argument to joblib or the parallelism lib you use.
+ """
+ # global thread lock so no setup required for multithreading.
+ # NB: Do not create multiprocessing lock as it sets the multiprocessing
+ # context, disallowing `spawn()`/`forkserver()`
+ th_lock = TRLock()
+
+ def __init__(self):
+ # Create global parallelism locks to avoid racing issues with parallel
+ # bars works only if fork available (Linux/MacOSX, but not Windows)
+ cls = type(self)
+ root_lock = cls.th_lock
+ if root_lock is not None:
+ root_lock.acquire()
+ cls.create_mp_lock()
+ self.locks = [lk for lk in [cls.mp_lock, cls.th_lock] if lk is not None]
+ if root_lock is not None:
+ root_lock.release()
+
+ def acquire(self, *a, **k):
+ for lock in self.locks:
+ lock.acquire(*a, **k)
+
+ def release(self):
+ for lock in self.locks[::-1]: # Release in inverse order of acquisition
+ lock.release()
+
+ def __enter__(self):
+ self.acquire()
+
+ def __exit__(self, *exc):
+ self.release()
+
+ @classmethod
+ def create_mp_lock(cls):
+ if not hasattr(cls, 'mp_lock'):
+ try:
+ from multiprocessing import RLock
+ cls.mp_lock = RLock()
+ except (ImportError, OSError): # pragma: no cover
+ cls.mp_lock = None
+
+ @classmethod
+ def create_th_lock(cls):
+ assert hasattr(cls, 'th_lock')
+ warn("create_th_lock not needed anymore", TqdmDeprecationWarning, stacklevel=2)
+
+
+class Bar(object):
+ """
+ `str.format`-able bar with format specifiers: `[width][type]`
+
+ - `width`
+ + unspecified (default): use `self.default_len`
+ + `int >= 0`: overrides `self.default_len`
+ + `int < 0`: subtract from `self.default_len`
+ - `type`
+ + `a`: ascii (`charset=self.ASCII` override)
+ + `u`: unicode (`charset=self.UTF` override)
+ + `b`: blank (`charset=" "` override)
+ """
+ ASCII = " 123456789#"
+ UTF = u" " + u''.join(map(_unich, range(0x258F, 0x2587, -1)))
+ BLANK = " "
+ COLOUR_RESET = '\x1b[0m'
+ COLOUR_RGB = '\x1b[38;2;%d;%d;%dm'
+ COLOURS = {'BLACK': '\x1b[30m', 'RED': '\x1b[31m', 'GREEN': '\x1b[32m',
+ 'YELLOW': '\x1b[33m', 'BLUE': '\x1b[34m', 'MAGENTA': '\x1b[35m',
+ 'CYAN': '\x1b[36m', 'WHITE': '\x1b[37m'}
+
+ def __init__(self, frac, default_len=10, charset=UTF, colour=None):
+ if not 0 <= frac <= 1:
+ warn("clamping frac to range [0, 1]", TqdmWarning, stacklevel=2)
+ frac = max(0, min(1, frac))
+ assert default_len > 0
+ self.frac = frac
+ self.default_len = default_len
+ self.charset = charset
+ self.colour = colour
+
+ @property
+ def colour(self):
+ return self._colour
+
+ @colour.setter
+ def colour(self, value):
+ if not value:
+ self._colour = None
+ return
+ try:
+ if value.upper() in self.COLOURS:
+ self._colour = self.COLOURS[value.upper()]
+ elif value[0] == '#' and len(value) == 7:
+ self._colour = self.COLOUR_RGB % tuple(
+ int(i, 16) for i in (value[1:3], value[3:5], value[5:7]))
+ else:
+ raise KeyError
+ except (KeyError, AttributeError):
+ warn("Unknown colour (%s); valid choices: [hex (#00ff00), %s]" % (
+ value, ", ".join(self.COLOURS)),
+ TqdmWarning, stacklevel=2)
+ self._colour = None
+
+ def __format__(self, format_spec):
+ if format_spec:
+ _type = format_spec[-1].lower()
+ try:
+ charset = {'a': self.ASCII, 'u': self.UTF, 'b': self.BLANK}[_type]
+ except KeyError:
+ charset = self.charset
+ else:
+ format_spec = format_spec[:-1]
+ if format_spec:
+ N_BARS = int(format_spec)
+ if N_BARS < 0:
+ N_BARS += self.default_len
+ else:
+ N_BARS = self.default_len
+ else:
+ charset = self.charset
+ N_BARS = self.default_len
+
+ nsyms = len(charset) - 1
+ bar_length, frac_bar_length = divmod(int(self.frac * N_BARS * nsyms), nsyms)
+
+ res = charset[-1] * bar_length
+ if bar_length < N_BARS: # whitespace padding
+ res = res + charset[frac_bar_length] + charset[0] * (N_BARS - bar_length - 1)
+ return self.colour + res + self.COLOUR_RESET if self.colour else res
+
+
+class EMA(object):
+ """
+ Exponential moving average: smoothing to give progressively lower
+ weights to older values.
+
+ Parameters
+ ----------
+ smoothing : float, optional
+ Smoothing factor in range [0, 1], [default: 0.3].
+ Increase to give more weight to recent values.
+ Ranges from 0 (yields old value) to 1 (yields new value).
+ """
+ def __init__(self, smoothing=0.3):
+ self.alpha = smoothing
+ self.last = 0
+ self.calls = 0
+
+ def __call__(self, x=None):
+ """
+ Parameters
+ ----------
+ x : float
+ New value to include in EMA.
+ """
+ beta = 1 - self.alpha
+ if x is not None:
+ self.last = self.alpha * x + beta * self.last
+ self.calls += 1
+ return self.last / (1 - beta ** self.calls) if self.calls else self.last
+
+
+class tqdm(Comparable):
+ """
+ Decorate an iterable object, returning an iterator which acts exactly
+ like the original iterable, but prints a dynamically updating
+ progressbar every time a value is requested.
+ """
+
+ monitor_interval = 10 # set to 0 to disable the thread
+ monitor = None
+ _instances = WeakSet()
+
+ @staticmethod
+ def format_sizeof(num, suffix='', divisor=1000):
+ """
+ Formats a number (greater than unity) with SI Order of Magnitude
+ prefixes.
+
+ Parameters
+ ----------
+ num : float
+ Number ( >= 1) to format.
+ suffix : str, optional
+ Post-postfix [default: ''].
+ divisor : float, optional
+ Divisor between prefixes [default: 1000].
+
+ Returns
+ -------
+ out : str
+ Number with Order of Magnitude SI unit postfix.
+ """
+ for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
+ if abs(num) < 999.5:
+ if abs(num) < 99.95:
+ if abs(num) < 9.995:
+ return '{0:1.2f}'.format(num) + unit + suffix
+ return '{0:2.1f}'.format(num) + unit + suffix
+ return '{0:3.0f}'.format(num) + unit + suffix
+ num /= divisor
+ return '{0:3.1f}Y'.format(num) + suffix
+
+ @staticmethod
+ def format_interval(t):
+ """
+ Formats a number of seconds as a clock time, [H:]MM:SS
+
+ Parameters
+ ----------
+ t : int
+ Number of seconds.
+
+ Returns
+ -------
+ out : str
+ [H:]MM:SS
+ """
+ mins, s = divmod(int(t), 60)
+ h, m = divmod(mins, 60)
+ if h:
+ return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
+ else:
+ return '{0:02d}:{1:02d}'.format(m, s)
+
+ @staticmethod
+ def format_num(n):
+ """
+ Intelligent scientific notation (.3g).
+
+ Parameters
+ ----------
+ n : int or float or Numeric
+ A Number.
+
+ Returns
+ -------
+ out : str
+ Formatted number.
+ """
+ f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-')
+ n = str(n)
+ return f if len(f) < len(n) else n
+
+ @staticmethod
+ def status_printer(file):
+ """
+ Manage the printing and in-place updating of a line of characters.
+ Note that if the string is longer than a line, then in-place
+ updating may not work (it will print a new line at each refresh).
+ """
+ fp = file
+ fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
+ if fp in (sys.stderr, sys.stdout):
+ sys.stderr.flush()
+ sys.stdout.flush()
+
+ def fp_write(s):
+ fp.write(_unicode(s))
+ fp_flush()
+
+ last_len = [0]
+
+ def print_status(s):
+ len_s = disp_len(s)
+ fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
+ last_len[0] = len_s
+
+ return print_status
+
+ @staticmethod
+ def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False, unit='it',
+ unit_scale=False, rate=None, bar_format=None, postfix=None,
+ unit_divisor=1000, initial=0, colour=None, **extra_kwargs):
+ """
+ Return a string-based progress bar given some parameters
+
+ Parameters
+ ----------
+ n : int or float
+ Number of finished iterations.
+ total : int or float
+ The expected total number of iterations. If meaningless (None),
+ only basic progress statistics are displayed (no ETA).
+ elapsed : float
+ Number of seconds passed since start.
+ ncols : int, optional
+ The width of the entire output message. If specified,
+ dynamically resizes `{bar}` to stay within this bound
+ [default: None]. If `0`, will not print any bar (only stats).
+ The fallback is `{bar:10}`.
+ prefix : str, optional
+ Prefix message (included in total width) [default: ''].
+ Use as {desc} in bar_format string.
+ ascii : bool, optional or str, optional
+ If not set, use unicode (smooth blocks) to fill the meter
+ [default: False]. The fallback is to use ASCII characters
+ " 123456789#".
+ unit : str, optional
+ The iteration unit [default: 'it'].
+ unit_scale : bool or int or float, optional
+ If 1 or True, the number of iterations will be printed with an
+ appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
+ [default: False]. If any other non-zero number, will scale
+ `total` and `n`.
+ rate : float, optional
+ Manual override for iteration rate.
+ If [default: None], uses n/elapsed.
+ bar_format : str, optional
+ Specify a custom bar string formatting. May impact performance.
+ [default: '{l_bar}{bar}{r_bar}'], where
+ l_bar='{desc}: {percentage:3.0f}%|' and
+ r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
+ '{rate_fmt}{postfix}]'
+ Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
+ percentage, elapsed, elapsed_s, ncols, nrows, desc, unit,
+ rate, rate_fmt, rate_noinv, rate_noinv_fmt,
+ rate_inv, rate_inv_fmt, postfix, unit_divisor,
+ remaining, remaining_s, eta.
+ Note that a trailing ": " is automatically removed after {desc}
+ if the latter is empty.
+ postfix : *, optional
+ Similar to `prefix`, but placed at the end
+ (e.g. for additional stats).
+ Note: postfix is usually a string (not a dict) for this method,
+ and will if possible be set to postfix = ', ' + postfix.
+ However other types are supported (#382).
+ unit_divisor : float, optional
+ [default: 1000], ignored unless `unit_scale` is True.
+ initial : int or float, optional
+ The initial counter value [default: 0].
+ colour : str, optional
+ Bar colour (e.g. 'green', '#00ff00').
+
+ Returns
+ -------
+ out : Formatted meter and stats, ready to display.
+ """
+
+ # sanity check: total
+ if total and n >= (total + 0.5): # allow float imprecision (#849)
+ total = None
+
+ # apply custom scale if necessary
+ if unit_scale and unit_scale not in (True, 1):
+ if total:
+ total *= unit_scale
+ n *= unit_scale
+ if rate:
+ rate *= unit_scale # by default rate = self.avg_dn / self.avg_dt
+ unit_scale = False
+
+ elapsed_str = tqdm.format_interval(elapsed)
+
+ # if unspecified, attempt to use rate = average speed
+ # (we allow manual override since predicting time is an arcane art)
+ if rate is None and elapsed:
+ rate = (n - initial) / elapsed
+ inv_rate = 1 / rate if rate else None
+ format_sizeof = tqdm.format_sizeof
+ rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
+ '{0:5.2f}'.format(rate)) if rate else '?') + unit + '/s'
+ rate_inv_fmt = (
+ (format_sizeof(inv_rate) if unit_scale else '{0:5.2f}'.format(inv_rate))
+ if inv_rate else '?') + 's/' + unit
+ rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
+
+ if unit_scale:
+ n_fmt = format_sizeof(n, divisor=unit_divisor)
+ total_fmt = format_sizeof(total, divisor=unit_divisor) if total is not None else '?'
+ else:
+ n_fmt = str(n)
+ total_fmt = str(total) if total is not None else '?'
+
+ try:
+ postfix = ', ' + postfix if postfix else ''
+ except TypeError:
+ pass
+
+ remaining = (total - n) / rate if rate and total else 0
+ remaining_str = tqdm.format_interval(remaining) if rate else '?'
+ try:
+ eta_dt = (datetime.now() + timedelta(seconds=remaining)
+ if rate and total else datetime.utcfromtimestamp(0))
+ except OverflowError:
+ eta_dt = datetime.max
+
+ # format the stats displayed to the left and right sides of the bar
+ if prefix:
+ # old prefix setup work around
+ bool_prefix_colon_already = (prefix[-2:] == ": ")
+ l_bar = prefix if bool_prefix_colon_already else prefix + ": "
+ else:
+ l_bar = ''
+
+ r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
+ n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix)
+
+ # Custom bar formatting
+ # Populate a dict with all available progress indicators
+ format_dict = dict(
+ # slight extension of self.format_dict
+ n=n, n_fmt=n_fmt, total=total, total_fmt=total_fmt,
+ elapsed=elapsed_str, elapsed_s=elapsed,
+ ncols=ncols, desc=prefix or '', unit=unit,
+ rate=inv_rate if inv_rate and inv_rate > 1 else rate,
+ rate_fmt=rate_fmt, rate_noinv=rate,
+ rate_noinv_fmt=rate_noinv_fmt, rate_inv=inv_rate,
+ rate_inv_fmt=rate_inv_fmt,
+ postfix=postfix, unit_divisor=unit_divisor,
+ colour=colour,
+ # plus more useful definitions
+ remaining=remaining_str, remaining_s=remaining,
+ l_bar=l_bar, r_bar=r_bar, eta=eta_dt,
+ **extra_kwargs)
+
+ # total is known: we can predict some stats
+ if total:
+ # fractional and percentage progress
+ frac = n / total
+ percentage = frac * 100
+
+ l_bar += '{0:3.0f}%|'.format(percentage)
+
+ if ncols == 0:
+ return l_bar[:-1] + r_bar[1:]
+
+ format_dict.update(l_bar=l_bar)
+ if bar_format:
+ format_dict.update(percentage=percentage)
+
+ # auto-remove colon for empty `desc`
+ if not prefix:
+ bar_format = bar_format.replace("{desc}: ", '')
+ else:
+ bar_format = "{l_bar}{bar}{r_bar}"
+
+ full_bar = FormatReplace()
+ try:
+ nobar = bar_format.format(bar=full_bar, **format_dict)
+ except UnicodeEncodeError:
+ bar_format = _unicode(bar_format)
+ nobar = bar_format.format(bar=full_bar, **format_dict)
+ if not full_bar.format_called:
+ # no {bar}, we can just format and return
+ return nobar
+
+ # Formatting progress bar space available for bar's display
+ full_bar = Bar(frac,
+ max(1, ncols - disp_len(nobar)) if ncols else 10,
+ charset=Bar.ASCII if ascii is True else ascii or Bar.UTF,
+ colour=colour)
+ if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):
+ bar_format = _unicode(bar_format)
+ res = bar_format.format(bar=full_bar, **format_dict)
+ return disp_trim(res, ncols) if ncols else res
+
+ elif bar_format:
+ # user-specified bar_format but no total
+ l_bar += '|'
+ format_dict.update(l_bar=l_bar, percentage=0)
+ full_bar = FormatReplace()
+ nobar = bar_format.format(bar=full_bar, **format_dict)
+ if not full_bar.format_called:
+ return nobar
+ full_bar = Bar(0,
+ max(1, ncols - disp_len(nobar)) if ncols else 10,
+ charset=Bar.BLANK, colour=colour)
+ res = bar_format.format(bar=full_bar, **format_dict)
+ return disp_trim(res, ncols) if ncols else res
+ else:
+ # no total: no progressbar, ETA, just progress stats
+ return '{0}{1}{2} [{3}, {4}{5}]'.format(
+ (prefix + ": ") if prefix else '', n_fmt, unit, elapsed_str, rate_fmt, postfix)
+
+ def __new__(cls, *_, **__):
+ instance = object.__new__(cls)
+ with cls.get_lock(): # also constructs lock if non-existent
+ cls._instances.add(instance)
+ # create monitoring thread
+ if cls.monitor_interval and (cls.monitor is None
+ or not cls.monitor.report()):
+ try:
+ cls.monitor = TMonitor(cls, cls.monitor_interval)
+ except Exception as e: # pragma: nocover
+ warn("tqdm:disabling monitor support"
+ " (monitor_interval = 0) due to:\n" + str(e),
+ TqdmMonitorWarning, stacklevel=2)
+ cls.monitor_interval = 0
+ return instance
+
+ @classmethod
+ def _get_free_pos(cls, instance=None):
+ """Skips specified instance."""
+ positions = {abs(inst.pos) for inst in cls._instances
+ if inst is not instance and hasattr(inst, "pos")}
+ return min(set(range(len(positions) + 1)).difference(positions))
+
+ @classmethod
+ def _decr_instances(cls, instance):
+ """
+ Remove from list and reposition another unfixed bar
+ to fill the new gap.
+
+ This means that by default (where all nested bars are unfixed),
+ order is not maintained but screen flicker/blank space is minimised.
+ (tqdm<=4.44.1 moved ALL subsequent unfixed bars up.)
+ """
+ with cls._lock:
+ try:
+ cls._instances.remove(instance)
+ except KeyError:
+ # if not instance.gui: # pragma: no cover
+ # raise
+ pass # py2: maybe magically removed already
+ # else:
+ if not instance.gui:
+ last = (instance.nrows or 20) - 1
+ # find unfixed (`pos >= 0`) overflow (`pos >= nrows - 1`)
+ instances = list(filter(
+ lambda i: hasattr(i, "pos") and last <= i.pos,
+ cls._instances))
+ # set first found to current `pos`
+ if instances:
+ inst = min(instances, key=lambda i: i.pos)
+ inst.clear(nolock=True)
+ inst.pos = abs(instance.pos)
+
+ @classmethod
+ def write(cls, s, file=None, end="\n", nolock=False):
+ """Print a message via tqdm (without overlap with bars)."""
+ fp = file if file is not None else sys.stdout
+ with cls.external_write_mode(file=file, nolock=nolock):
+ # Write the message
+ fp.write(s)
+ fp.write(end)
+
+ @classmethod
+ @contextmanager
+ def external_write_mode(cls, file=None, nolock=False):
+ """
+ Disable tqdm within context and refresh tqdm when exits.
+ Useful when writing to standard output stream
+ """
+ fp = file if file is not None else sys.stdout
+
+ try:
+ if not nolock:
+ cls.get_lock().acquire()
+ # Clear all bars
+ inst_cleared = []
+ for inst in getattr(cls, '_instances', []):
+ # Clear instance if in the target output file
+ # or if write output + tqdm output are both either
+ # sys.stdout or sys.stderr (because both are mixed in terminal)
+ if hasattr(inst, "start_t") and (inst.fp == fp or all(
+ f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):
+ inst.clear(nolock=True)
+ inst_cleared.append(inst)
+ yield
+ # Force refresh display of bars we cleared
+ for inst in inst_cleared:
+ inst.refresh(nolock=True)
+ finally:
+ if not nolock:
+ cls._lock.release()
+
+ @classmethod
+ def set_lock(cls, lock):
+ """Set the global lock."""
+ cls._lock = lock
+
+ @classmethod
+ def get_lock(cls):
+ """Get the global lock. Construct it if it does not exist."""
+ if not hasattr(cls, '_lock'):
+ cls._lock = TqdmDefaultWriteLock()
+ return cls._lock
+
+ @classmethod
+ def pandas(cls, **tqdm_kwargs):
+ """
+ Registers the current `tqdm` class with
+ pandas.core.
+ ( frame.DataFrame
+ | series.Series
+ | groupby.(generic.)DataFrameGroupBy
+ | groupby.(generic.)SeriesGroupBy
+ ).progress_apply
+
+ A new instance will be create every time `progress_apply` is called,
+ and each instance will automatically `close()` upon completion.
+
+ Parameters
+ ----------
+ tqdm_kwargs : arguments for the tqdm instance
+
+ Examples
+ --------
+ >>> import pandas as pd
+ >>> import numpy as np
+ >>> from tqdm import tqdm
+ >>> from tqdm.gui import tqdm as tqdm_gui
+ >>>
+ >>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
+ >>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
+ >>> # Now you can use `progress_apply` instead of `apply`
+ >>> df.groupby(0).progress_apply(lambda x: x**2)
+
+ References
+ ----------
+ <https://stackoverflow.com/questions/18603270/\
+ progress-indicator-during-pandas-operations-python>
+ """
+ from warnings import catch_warnings, simplefilter
+
+ from pandas.core.frame import DataFrame
+ from pandas.core.series import Series
+ try:
+ with catch_warnings():
+ simplefilter("ignore", category=FutureWarning)
+ from pandas import Panel
+ except ImportError: # pandas>=1.2.0
+ Panel = None
+ Rolling, Expanding = None, None
+ try: # pandas>=1.0.0
+ from pandas.core.window.rolling import _Rolling_and_Expanding
+ except ImportError:
+ try: # pandas>=0.18.0
+ from pandas.core.window import _Rolling_and_Expanding
+ except ImportError: # pandas>=1.2.0
+ try: # pandas>=1.2.0
+ from pandas.core.window.expanding import Expanding
+ from pandas.core.window.rolling import Rolling
+ _Rolling_and_Expanding = Rolling, Expanding
+ except ImportError: # pragma: no cover
+ _Rolling_and_Expanding = None
+ try: # pandas>=0.25.0
+ from pandas.core.groupby.generic import SeriesGroupBy # , NDFrameGroupBy
+ from pandas.core.groupby.generic import DataFrameGroupBy
+ except ImportError: # pragma: no cover
+ try: # pandas>=0.23.0
+ from pandas.core.groupby.groupby import DataFrameGroupBy, SeriesGroupBy
+ except ImportError:
+ from pandas.core.groupby import DataFrameGroupBy, SeriesGroupBy
+ try: # pandas>=0.23.0
+ from pandas.core.groupby.groupby import GroupBy
+ except ImportError: # pragma: no cover
+ from pandas.core.groupby import GroupBy
+
+ try: # pandas>=0.23.0
+ from pandas.core.groupby.groupby import PanelGroupBy
+ except ImportError:
+ try:
+ from pandas.core.groupby import PanelGroupBy
+ except ImportError: # pandas>=0.25.0
+ PanelGroupBy = None
+
+ tqdm_kwargs = tqdm_kwargs.copy()
+ deprecated_t = [tqdm_kwargs.pop('deprecated_t', None)]
+
+ def inner_generator(df_function='apply'):
+ def inner(df, func, *args, **kwargs):
+ """
+ Parameters
+ ----------
+ df : (DataFrame|Series)[GroupBy]
+ Data (may be grouped).
+ func : function
+ To be applied on the (grouped) data.
+ **kwargs : optional
+ Transmitted to `df.apply()`.
+ """
+
+ # Precompute total iterations
+ total = tqdm_kwargs.pop("total", getattr(df, 'ngroups', None))
+ if total is None: # not grouped
+ if df_function == 'applymap':
+ total = df.size
+ elif isinstance(df, Series):
+ total = len(df)
+ elif (_Rolling_and_Expanding is None or
+ not isinstance(df, _Rolling_and_Expanding)):
+ # DataFrame or Panel
+ axis = kwargs.get('axis', 0)
+ if axis == 'index':
+ axis = 0
+ elif axis == 'columns':
+ axis = 1
+ # when axis=0, total is shape[axis1]
+ total = df.size // df.shape[axis]
+
+ # Init bar
+ if deprecated_t[0] is not None:
+ t = deprecated_t[0]
+ deprecated_t[0] = None
+ else:
+ t = cls(total=total, **tqdm_kwargs)
+
+ if len(args) > 0:
+ # *args intentionally not supported (see #244, #299)
+ TqdmDeprecationWarning(
+ "Except func, normal arguments are intentionally" +
+ " not supported by" +
+ " `(DataFrame|Series|GroupBy).progress_apply`." +
+ " Use keyword arguments instead.",
+ fp_write=getattr(t.fp, 'write', sys.stderr.write))
+
+ try: # pandas>=1.3.0
+ from pandas.core.common import is_builtin_func
+ except ImportError:
+ is_builtin_func = df._is_builtin_func
+ try:
+ func = is_builtin_func(func)
+ except TypeError:
+ pass
+
+ # Define bar updating wrapper
+ def wrapper(*args, **kwargs):
+ # update tbar correctly
+ # it seems `pandas apply` calls `func` twice
+ # on the first column/row to decide whether it can
+ # take a fast or slow code path; so stop when t.total==t.n
+ t.update(n=1 if not t.total or t.n < t.total else 0)
+ return func(*args, **kwargs)
+
+ # Apply the provided function (in **kwargs)
+ # on the df using our wrapper (which provides bar updating)
+ try:
+ return getattr(df, df_function)(wrapper, **kwargs)
+ finally:
+ t.close()
+
+ return inner
+
+ # Monkeypatch pandas to provide easy methods
+ # Enable custom tqdm progress in pandas!
+ Series.progress_apply = inner_generator()
+ SeriesGroupBy.progress_apply = inner_generator()
+ Series.progress_map = inner_generator('map')
+ SeriesGroupBy.progress_map = inner_generator('map')
+
+ DataFrame.progress_apply = inner_generator()
+ DataFrameGroupBy.progress_apply = inner_generator()
+ DataFrame.progress_applymap = inner_generator('applymap')
+
+ if Panel is not None:
+ Panel.progress_apply = inner_generator()
+ if PanelGroupBy is not None:
+ PanelGroupBy.progress_apply = inner_generator()
+
+ GroupBy.progress_apply = inner_generator()
+ GroupBy.progress_aggregate = inner_generator('aggregate')
+ GroupBy.progress_transform = inner_generator('transform')
+
+ if Rolling is not None and Expanding is not None:
+ Rolling.progress_apply = inner_generator()
+ Expanding.progress_apply = inner_generator()
+ elif _Rolling_and_Expanding is not None:
+ _Rolling_and_Expanding.progress_apply = inner_generator()
+
+ def __init__(self, iterable=None, desc=None, total=None, leave=True, file=None,
+ ncols=None, mininterval=0.1, maxinterval=10.0, miniters=None,
+ ascii=None, disable=False, unit='it', unit_scale=False,
+ dynamic_ncols=False, smoothing=0.3, bar_format=None, initial=0,
+ position=None, postfix=None, unit_divisor=1000, write_bytes=None,
+ lock_args=None, nrows=None, colour=None, delay=0, gui=False,
+ **kwargs):
+ """
+ Parameters
+ ----------
+ iterable : iterable, optional
+ Iterable to decorate with a progressbar.
+ Leave blank to manually manage the updates.
+ desc : str, optional
+ Prefix for the progressbar.
+ total : int or float, optional
+ The number of expected iterations. If unspecified,
+ len(iterable) is used if possible. If float("inf") or as a last
+ resort, only basic progress statistics are displayed
+ (no ETA, no progressbar).
+ If `gui` is True and this parameter needs subsequent updating,
+ specify an initial arbitrary large positive number,
+ e.g. 9e9.
+ leave : bool, optional
+ If [default: True], keeps all traces of the progressbar
+ upon termination of iteration.
+ If `None`, will leave only if `position` is `0`.
+ file : `io.TextIOWrapper` or `io.StringIO`, optional
+ Specifies where to output the progress messages
+ (default: sys.stderr). Uses `file.write(str)` and `file.flush()`
+ methods. For encoding, see `write_bytes`.
+ ncols : int, optional
+ The width of the entire output message. If specified,
+ dynamically resizes the progressbar to stay within this bound.
+ If unspecified, attempts to use environment width. The
+ fallback is a meter width of 10 and no limit for the counter and
+ statistics. If 0, will not print any meter (only stats).
+ mininterval : float, optional
+ Minimum progress display update interval [default: 0.1] seconds.
+ maxinterval : float, optional
+ Maximum progress display update interval [default: 10] seconds.
+ Automatically adjusts `miniters` to correspond to `mininterval`
+ after long display update lag. Only works if `dynamic_miniters`
+ or monitor thread is enabled.
+ miniters : int or float, optional
+ Minimum progress display update interval, in iterations.
+ If 0 and `dynamic_miniters`, will automatically adjust to equal
+ `mininterval` (more CPU efficient, good for tight loops).
+ If > 0, will skip display of specified number of iterations.
+ Tweak this and `mininterval` to get very efficient loops.
+ If your progress is erratic with both fast and slow iterations
+ (network, skipping items, etc) you should set miniters=1.
+ ascii : bool or str, optional
+ If unspecified or False, use unicode (smooth blocks) to fill
+ the meter. The fallback is to use ASCII characters " 123456789#".
+ disable : bool, optional
+ Whether to disable the entire progressbar wrapper
+ [default: False]. If set to None, disable on non-TTY.
+ unit : str, optional
+ String that will be used to define the unit of each iteration
+ [default: it].
+ unit_scale : bool or int or float, optional
+ If 1 or True, the number of iterations will be reduced/scaled
+ automatically and a metric prefix following the
+ International System of Units standard will be added
+ (kilo, mega, etc.) [default: False]. If any other non-zero
+ number, will scale `total` and `n`.
+ dynamic_ncols : bool, optional
+ If set, constantly alters `ncols` and `nrows` to the
+ environment (allowing for window resizes) [default: False].
+ smoothing : float, optional
+ Exponential moving average smoothing factor for speed estimates
+ (ignored in GUI mode). Ranges from 0 (average speed) to 1
+ (current/instantaneous speed) [default: 0.3].
+ bar_format : str, optional
+ Specify a custom bar string formatting. May impact performance.
+ [default: '{l_bar}{bar}{r_bar}'], where
+ l_bar='{desc}: {percentage:3.0f}%|' and
+ r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
+ '{rate_fmt}{postfix}]'
+ Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
+ percentage, elapsed, elapsed_s, ncols, nrows, desc, unit,
+ rate, rate_fmt, rate_noinv, rate_noinv_fmt,
+ rate_inv, rate_inv_fmt, postfix, unit_divisor,
+ remaining, remaining_s, eta.
+ Note that a trailing ": " is automatically removed after {desc}
+ if the latter is empty.
+ initial : int or float, optional
+ The initial counter value. Useful when restarting a progress
+ bar [default: 0]. If using float, consider specifying `{n:.3f}`
+ or similar in `bar_format`, or specifying `unit_scale`.
+ position : int, optional
+ Specify the line offset to print this bar (starting from 0)
+ Automatic if unspecified.
+ Useful to manage multiple bars at once (eg, from threads).
+ postfix : dict or *, optional
+ Specify additional stats to display at the end of the bar.
+ Calls `set_postfix(**postfix)` if possible (dict).
+ unit_divisor : float, optional
+ [default: 1000], ignored unless `unit_scale` is True.
+ write_bytes : bool, optional
+ If (default: None) and `file` is unspecified,
+ bytes will be written in Python 2. If `True` will also write
+ bytes. In all other cases will default to unicode.
+ lock_args : tuple, optional
+ Passed to `refresh` for intermediate output
+ (initialisation, iterating, and updating).
+ nrows : int, optional
+ The screen height. If specified, hides nested bars outside this
+ bound. If unspecified, attempts to use environment height.
+ The fallback is 20.
+ colour : str, optional
+ Bar colour (e.g. 'green', '#00ff00').
+ delay : float, optional
+ Don't display until [default: 0] seconds have elapsed.
+ gui : bool, optional
+ WARNING: internal parameter - do not use.
+ Use tqdm.gui.tqdm(...) instead. If set, will attempt to use
+ matplotlib animations for a graphical output [default: False].
+
+ Returns
+ -------
+ out : decorated iterator.
+ """
+ if write_bytes is None:
+ write_bytes = file is None and sys.version_info < (3,)
+
+ if file is None:
+ file = sys.stderr
+
+ if write_bytes:
+ # Despite coercing unicode into bytes, py2 sys.std* streams
+ # should have bytes written to them.
+ file = SimpleTextIOWrapper(
+ file, encoding=getattr(file, 'encoding', None) or 'utf-8')
+
+ file = DisableOnWriteError(file, tqdm_instance=self)
+
+ if disable is None and hasattr(file, "isatty") and not file.isatty():
+ disable = True
+
+ if total is None and iterable is not None:
+ try:
+ total = len(iterable)
+ except (TypeError, AttributeError):
+ total = None
+ if total == float("inf"):
+ # Infinite iterations, behave same as unknown
+ total = None
+
+ if disable:
+ self.iterable = iterable
+ self.disable = disable
+ with self._lock:
+ self.pos = self._get_free_pos(self)
+ self._instances.remove(self)
+ self.n = initial
+ self.total = total
+ self.leave = leave
+ return
+
+ if kwargs:
+ self.disable = True
+ with self._lock:
+ self.pos = self._get_free_pos(self)
+ self._instances.remove(self)
+ raise (
+ TqdmDeprecationWarning(
+ "`nested` is deprecated and automated.\n"
+ "Use `position` instead for manual control.\n",
+ fp_write=getattr(file, 'write', sys.stderr.write))
+ if "nested" in kwargs else
+ TqdmKeyError("Unknown argument(s): " + str(kwargs)))
+
+ # Preprocess the arguments
+ if (
+ (ncols is None or nrows is None) and (file in (sys.stderr, sys.stdout))
+ ) or dynamic_ncols: # pragma: no cover
+ if dynamic_ncols:
+ dynamic_ncols = _screen_shape_wrapper()
+ if dynamic_ncols:
+ ncols, nrows = dynamic_ncols(file)
+ else:
+ _dynamic_ncols = _screen_shape_wrapper()
+ if _dynamic_ncols:
+ _ncols, _nrows = _dynamic_ncols(file)
+ if ncols is None:
+ ncols = _ncols
+ if nrows is None:
+ nrows = _nrows
+
+ if miniters is None:
+ miniters = 0
+ dynamic_miniters = True
+ else:
+ dynamic_miniters = False
+
+ if mininterval is None:
+ mininterval = 0
+
+ if maxinterval is None:
+ maxinterval = 0
+
+ if ascii is None:
+ ascii = not _supports_unicode(file)
+
+ if bar_format and ascii is not True and not _is_ascii(ascii):
+ # Convert bar format into unicode since terminal uses unicode
+ bar_format = _unicode(bar_format)
+
+ if smoothing is None:
+ smoothing = 0
+
+ # Store the arguments
+ self.iterable = iterable
+ self.desc = desc or ''
+ self.total = total
+ self.leave = leave
+ self.fp = file
+ self.ncols = ncols
+ self.nrows = nrows
+ self.mininterval = mininterval
+ self.maxinterval = maxinterval
+ self.miniters = miniters
+ self.dynamic_miniters = dynamic_miniters
+ self.ascii = ascii
+ self.disable = disable
+ self.unit = unit
+ self.unit_scale = unit_scale
+ self.unit_divisor = unit_divisor
+ self.initial = initial
+ self.lock_args = lock_args
+ self.delay = delay
+ self.gui = gui
+ self.dynamic_ncols = dynamic_ncols
+ self.smoothing = smoothing
+ self._ema_dn = EMA(smoothing)
+ self._ema_dt = EMA(smoothing)
+ self._ema_miniters = EMA(smoothing)
+ self.bar_format = bar_format
+ self.postfix = None
+ self.colour = colour
+ self._time = time
+ if postfix:
+ try:
+ self.set_postfix(refresh=False, **postfix)
+ except TypeError:
+ self.postfix = postfix
+
+ # Init the iterations counters
+ self.last_print_n = initial
+ self.n = initial
+
+ # if nested, at initial sp() call we replace '\r' by '\n' to
+ # not overwrite the outer progress bar
+ with self._lock:
+ # mark fixed positions as negative
+ self.pos = self._get_free_pos(self) if position is None else -position
+
+ if not gui:
+ # Initialize the screen printer
+ self.sp = self.status_printer(self.fp)
+ if delay <= 0:
+ self.refresh(lock_args=self.lock_args)
+
+ # Init the time counter
+ self.last_print_t = self._time()
+ # NB: Avoid race conditions by setting start_t at the very end of init
+ self.start_t = self.last_print_t
+
+ def __bool__(self):
+ if self.total is not None:
+ return self.total > 0
+ if self.iterable is None:
+ raise TypeError('bool() undefined when iterable == total == None')
+ return bool(self.iterable)
+
+ def __nonzero__(self):
+ return self.__bool__()
+
+ def __len__(self):
+ return (
+ self.total if self.iterable is None
+ else self.iterable.shape[0] if hasattr(self.iterable, "shape")
+ else len(self.iterable) if hasattr(self.iterable, "__len__")
+ else self.iterable.__length_hint__() if hasattr(self.iterable, "__length_hint__")
+ else getattr(self, "total", None))
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ try:
+ self.close()
+ except AttributeError:
+ # maybe eager thread cleanup upon external error
+ if (exc_type, exc_value, traceback) == (None, None, None):
+ raise
+ warn("AttributeError ignored", TqdmWarning, stacklevel=2)
+
+ def __del__(self):
+ self.close()
+
+ def __str__(self):
+ return self.format_meter(**self.format_dict)
+
+ @property
+ def _comparable(self):
+ return abs(getattr(self, "pos", 1 << 31))
+
+ def __hash__(self):
+ return id(self)
+
+ def __iter__(self):
+ """Backward-compatibility to use: for x in tqdm(iterable)"""
+
+ # Inlining instance variables as locals (speed optimisation)
+ iterable = self.iterable
+
+ # If the bar is disabled, then just walk the iterable
+ # (note: keep this check outside the loop for performance)
+ if self.disable:
+ for obj in iterable:
+ yield obj
+ return
+
+ mininterval = self.mininterval
+ last_print_t = self.last_print_t
+ last_print_n = self.last_print_n
+ min_start_t = self.start_t + self.delay
+ n = self.n
+ time = self._time
+
+ try:
+ for obj in iterable:
+ yield obj
+ # Update and possibly print the progressbar.
+ # Note: does not call self.update(1) for speed optimisation.
+ n += 1
+
+ if n - last_print_n >= self.miniters:
+ cur_t = time()
+ dt = cur_t - last_print_t
+ if dt >= mininterval and cur_t >= min_start_t:
+ self.update(n - last_print_n)
+ last_print_n = self.last_print_n
+ last_print_t = self.last_print_t
+ finally:
+ self.n = n
+ self.close()
+
+ def update(self, n=1):
+ """
+ Manually update the progress bar, useful for streams
+ such as reading files.
+ E.g.:
+ >>> t = tqdm(total=filesize) # Initialise
+ >>> for current_buffer in stream:
+ ... ...
+ ... t.update(len(current_buffer))
+ >>> t.close()
+ The last line is highly recommended, but possibly not necessary if
+ `t.update()` will be called in such a way that `filesize` will be
+ exactly reached and printed.
+
+ Parameters
+ ----------
+ n : int or float, optional
+ Increment to add to the internal counter of iterations
+ [default: 1]. If using float, consider specifying `{n:.3f}`
+ or similar in `bar_format`, or specifying `unit_scale`.
+
+ Returns
+ -------
+ out : bool or None
+ True if a `display()` was triggered.
+ """
+ if self.disable:
+ return
+
+ if n < 0:
+ self.last_print_n += n # for auto-refresh logic to work
+ self.n += n
+
+ # check counter first to reduce calls to time()
+ if self.n - self.last_print_n >= self.miniters:
+ cur_t = self._time()
+ dt = cur_t - self.last_print_t
+ if dt >= self.mininterval and cur_t >= self.start_t + self.delay:
+ cur_t = self._time()
+ dn = self.n - self.last_print_n # >= n
+ if self.smoothing and dt and dn:
+ # EMA (not just overall average)
+ self._ema_dn(dn)
+ self._ema_dt(dt)
+ self.refresh(lock_args=self.lock_args)
+ if self.dynamic_miniters:
+ # If no `miniters` was specified, adjust automatically to the
+ # maximum iteration rate seen so far between two prints.
+ # e.g.: After running `tqdm.update(5)`, subsequent
+ # calls to `tqdm.update()` will only cause an update after
+ # at least 5 more iterations.
+ if self.maxinterval and dt >= self.maxinterval:
+ self.miniters = dn * (self.mininterval or self.maxinterval) / dt
+ elif self.smoothing:
+ # EMA miniters update
+ self.miniters = self._ema_miniters(
+ dn * (self.mininterval / dt if self.mininterval and dt
+ else 1))
+ else:
+ # max iters between two prints
+ self.miniters = max(self.miniters, dn)
+
+ # Store old values for next call
+ self.last_print_n = self.n
+ self.last_print_t = cur_t
+ return True
+
+ def close(self):
+ """Cleanup and (if leave=False) close the progressbar."""
+ if self.disable:
+ return
+
+ # Prevent multiple closures
+ self.disable = True
+
+ # decrement instance pos and remove from internal set
+ pos = abs(self.pos)
+ self._decr_instances(self)
+
+ if self.last_print_t < self.start_t + self.delay:
+ # haven't ever displayed; nothing to clear
+ return
+
+ # GUI mode
+ if getattr(self, 'sp', None) is None:
+ return
+
+ # annoyingly, _supports_unicode isn't good enough
+ def fp_write(s):
+ self.fp.write(_unicode(s))
+
+ try:
+ fp_write('')
+ except ValueError as e:
+ if 'closed' in str(e):
+ return
+ raise # pragma: no cover
+
+ leave = pos == 0 if self.leave is None else self.leave
+
+ with self._lock:
+ if leave:
+ # stats for overall rate (no weighted average)
+ self._ema_dt = lambda: None
+ self.display(pos=0)
+ fp_write('\n')
+ else:
+ # clear previous display
+ if self.display(msg='', pos=pos) and not pos:
+ fp_write('\r')
+
+ def clear(self, nolock=False):
+ """Clear current bar display."""
+ if self.disable:
+ return
+
+ if not nolock:
+ self._lock.acquire()
+ pos = abs(self.pos)
+ if pos < (self.nrows or 20):
+ self.moveto(pos)
+ self.sp('')
+ self.fp.write('\r') # place cursor back at the beginning of line
+ self.moveto(-pos)
+ if not nolock:
+ self._lock.release()
+
+ def refresh(self, nolock=False, lock_args=None):
+ """
+ Force refresh the display of this bar.
+
+ Parameters
+ ----------
+ nolock : bool, optional
+ If `True`, does not lock.
+ If [default: `False`]: calls `acquire()` on internal lock.
+ lock_args : tuple, optional
+ Passed to internal lock's `acquire()`.
+ If specified, will only `display()` if `acquire()` returns `True`.
+ """
+ if self.disable:
+ return
+
+ if not nolock:
+ if lock_args:
+ if not self._lock.acquire(*lock_args):
+ return False
+ else:
+ self._lock.acquire()
+ self.display()
+ if not nolock:
+ self._lock.release()
+ return True
+
+ def unpause(self):
+ """Restart tqdm timer from last print time."""
+ if self.disable:
+ return
+ cur_t = self._time()
+ self.start_t += cur_t - self.last_print_t
+ self.last_print_t = cur_t
+
+ def reset(self, total=None):
+ """
+ Resets to 0 iterations for repeated use.
+
+ Consider combining with `leave=True`.
+
+ Parameters
+ ----------
+ total : int or float, optional. Total to use for the new bar.
+ """
+ self.n = 0
+ if total is not None:
+ self.total = total
+ if self.disable:
+ return
+ self.last_print_n = 0
+ self.last_print_t = self.start_t = self._time()
+ self._ema_dn = EMA(self.smoothing)
+ self._ema_dt = EMA(self.smoothing)
+ self._ema_miniters = EMA(self.smoothing)
+ self.refresh()
+
+ def set_description(self, desc=None, refresh=True):
+ """
+ Set/modify description of the progress bar.
+
+ Parameters
+ ----------
+ desc : str, optional
+ refresh : bool, optional
+ Forces refresh [default: True].
+ """
+ self.desc = desc + ': ' if desc else ''
+ if refresh:
+ self.refresh()
+
+ def set_description_str(self, desc=None, refresh=True):
+ """Set/modify description without ': ' appended."""
+ self.desc = desc or ''
+ if refresh:
+ self.refresh()
+
+ def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):
+ """
+ Set/modify postfix (additional stats)
+ with automatic formatting based on datatype.
+
+ Parameters
+ ----------
+ ordered_dict : dict or OrderedDict, optional
+ refresh : bool, optional
+ Forces refresh [default: True].
+ kwargs : dict, optional
+ """
+ # Sort in alphabetical order to be more deterministic
+ postfix = OrderedDict([] if ordered_dict is None else ordered_dict)
+ for key in sorted(kwargs.keys()):
+ postfix[key] = kwargs[key]
+ # Preprocess stats according to datatype
+ for key in postfix.keys():
+ # Number: limit the length of the string
+ if isinstance(postfix[key], Number):
+ postfix[key] = self.format_num(postfix[key])
+ # Else for any other type, try to get the string conversion
+ elif not isinstance(postfix[key], _basestring):
+ postfix[key] = str(postfix[key])
+ # Else if it's a string, don't need to preprocess anything
+ # Stitch together to get the final postfix
+ self.postfix = ', '.join(key + '=' + postfix[key].strip()
+ for key in postfix.keys())
+ if refresh:
+ self.refresh()
+
+ def set_postfix_str(self, s='', refresh=True):
+ """
+ Postfix without dictionary expansion, similar to prefix handling.
+ """
+ self.postfix = str(s)
+ if refresh:
+ self.refresh()
+
+ def moveto(self, n):
+ # TODO: private method
+ self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
+ self.fp.flush()
+
+ @property
+ def format_dict(self):
+ """Public API for read-only member access."""
+ if self.disable and not hasattr(self, 'unit'):
+ return defaultdict(lambda: None, {
+ 'n': self.n, 'total': self.total, 'elapsed': 0, 'unit': 'it'})
+ if self.dynamic_ncols:
+ self.ncols, self.nrows = self.dynamic_ncols(self.fp)
+ return {
+ 'n': self.n, 'total': self.total,
+ 'elapsed': self._time() - self.start_t if hasattr(self, 'start_t') else 0,
+ 'ncols': self.ncols, 'nrows': self.nrows, 'prefix': self.desc,
+ 'ascii': self.ascii, 'unit': self.unit, 'unit_scale': self.unit_scale,
+ 'rate': self._ema_dn() / self._ema_dt() if self._ema_dt() else None,
+ 'bar_format': self.bar_format, 'postfix': self.postfix,
+ 'unit_divisor': self.unit_divisor, 'initial': self.initial,
+ 'colour': self.colour}
+
+ def display(self, msg=None, pos=None):
+ """
+ Use `self.sp` to display `msg` in the specified `pos`.
+
+ Consider overloading this function when inheriting to use e.g.:
+ `self.some_frontend(**self.format_dict)` instead of `self.sp`.
+
+ Parameters
+ ----------
+ msg : str, optional. What to display (default: `repr(self)`).
+ pos : int, optional. Position to `moveto`
+ (default: `abs(self.pos)`).
+ """
+ if pos is None:
+ pos = abs(self.pos)
+
+ nrows = self.nrows or 20
+ if pos >= nrows - 1:
+ if pos >= nrows:
+ return False
+ if msg or msg is None: # override at `nrows - 1`
+ msg = " ... (more hidden) ..."
+
+ if not hasattr(self, "sp"):
+ raise TqdmDeprecationWarning(
+ "Please use `tqdm.gui.tqdm(...)`"
+ " instead of `tqdm(..., gui=True)`\n",
+ fp_write=getattr(self.fp, 'write', sys.stderr.write))
+
+ if pos:
+ self.moveto(pos)
+ self.sp(self.__str__() if msg is None else msg)
+ if pos:
+ self.moveto(-pos)
+ return True
+
+ @classmethod
+ @contextmanager
+ def wrapattr(cls, stream, method, total=None, bytes=True, **tqdm_kwargs):
+ """
+ stream : file-like object.
+ method : str, "read" or "write". The result of `read()` and
+ the first argument of `write()` should have a `len()`.
+
+ >>> with tqdm.wrapattr(file_obj, "read", total=file_obj.size) as fobj:
+ ... while True:
+ ... chunk = fobj.read(chunk_size)
+ ... if not chunk:
+ ... break
+ """
+ with cls(total=total, **tqdm_kwargs) as t:
+ if bytes:
+ t.unit = "B"
+ t.unit_scale = True
+ t.unit_divisor = 1024
+ yield CallbackIOWrapper(t.update, stream, method)
+
+
+def trange(*args, **kwargs):
+ """
+ A shortcut for tqdm(xrange(*args), **kwargs).
+ On Python3+ range is used instead of xrange.
+ """
+ return tqdm(_range(*args), **kwargs)
diff --git a/third_party/python/tqdm/tqdm/tk.py b/third_party/python/tqdm/tqdm/tk.py
new file mode 100644
index 0000000000..92adb51db3
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/tk.py
@@ -0,0 +1,207 @@
+"""
+Tkinter GUI progressbar decorator for iterators.
+
+Usage:
+>>> from tqdm.tk import trange, tqdm
+>>> for i in trange(10):
+... ...
+"""
+from __future__ import absolute_import, division
+
+import re
+import sys
+from warnings import warn
+
+try:
+ import tkinter
+ import tkinter.ttk as ttk
+except ImportError:
+ import Tkinter as tkinter
+ import ttk as ttk
+
+from .std import TqdmExperimentalWarning, TqdmWarning
+from .std import tqdm as std_tqdm
+from .utils import _range
+
+__author__ = {"github.com/": ["richardsheridan", "casperdcl"]}
+__all__ = ['tqdm_tk', 'ttkrange', 'tqdm', 'trange']
+
+
+class tqdm_tk(std_tqdm): # pragma: no cover
+ """
+ Experimental Tkinter GUI version of tqdm!
+
+ Note: Window interactivity suffers if `tqdm_tk` is not running within
+ a Tkinter mainloop and values are generated infrequently. In this case,
+ consider calling `tqdm_tk.refresh()` frequently in the Tk thread.
+ """
+
+ # TODO: @classmethod: write()?
+
+ def __init__(self, *args, **kwargs):
+ """
+ This class accepts the following parameters *in addition* to
+ the parameters accepted by `tqdm`.
+
+ Parameters
+ ----------
+ grab : bool, optional
+ Grab the input across all windows of the process.
+ tk_parent : `tkinter.Wm`, optional
+ Parent Tk window.
+ cancel_callback : Callable, optional
+ Create a cancel button and set `cancel_callback` to be called
+ when the cancel or window close button is clicked.
+ """
+ kwargs = kwargs.copy()
+ kwargs['gui'] = True
+ # convert disable = None to False
+ kwargs['disable'] = bool(kwargs.get('disable', False))
+ self._warn_leave = 'leave' in kwargs
+ grab = kwargs.pop('grab', False)
+ tk_parent = kwargs.pop('tk_parent', None)
+ self._cancel_callback = kwargs.pop('cancel_callback', None)
+ super(tqdm_tk, self).__init__(*args, **kwargs)
+
+ if self.disable:
+ return
+
+ if tk_parent is None: # Discover parent widget
+ try:
+ tk_parent = tkinter._default_root
+ except AttributeError:
+ raise AttributeError(
+ "`tk_parent` required when using `tkinter.NoDefaultRoot()`")
+ if tk_parent is None: # use new default root window as display
+ self._tk_window = tkinter.Tk()
+ else: # some other windows already exist
+ self._tk_window = tkinter.Toplevel()
+ else:
+ self._tk_window = tkinter.Toplevel(tk_parent)
+
+ warn("GUI is experimental/alpha", TqdmExperimentalWarning, stacklevel=2)
+ self._tk_dispatching = self._tk_dispatching_helper()
+
+ self._tk_window.protocol("WM_DELETE_WINDOW", self.cancel)
+ self._tk_window.wm_title(self.desc)
+ self._tk_window.wm_attributes("-topmost", 1)
+ self._tk_window.after(0, lambda: self._tk_window.wm_attributes("-topmost", 0))
+ self._tk_n_var = tkinter.DoubleVar(self._tk_window, value=0)
+ self._tk_text_var = tkinter.StringVar(self._tk_window)
+ pbar_frame = ttk.Frame(self._tk_window, padding=5)
+ pbar_frame.pack()
+ _tk_label = ttk.Label(pbar_frame, textvariable=self._tk_text_var,
+ wraplength=600, anchor="center", justify="center")
+ _tk_label.pack()
+ self._tk_pbar = ttk.Progressbar(
+ pbar_frame, variable=self._tk_n_var, length=450)
+ if self.total is not None:
+ self._tk_pbar.configure(maximum=self.total)
+ else:
+ self._tk_pbar.configure(mode="indeterminate")
+ self._tk_pbar.pack()
+ if self._cancel_callback is not None:
+ _tk_button = ttk.Button(pbar_frame, text="Cancel", command=self.cancel)
+ _tk_button.pack()
+ if grab:
+ self._tk_window.grab_set()
+
+ def close(self):
+ if self.disable:
+ return
+
+ self.disable = True
+
+ with self.get_lock():
+ self._instances.remove(self)
+
+ def _close():
+ self._tk_window.after('idle', self._tk_window.destroy)
+ if not self._tk_dispatching:
+ self._tk_window.update()
+
+ self._tk_window.protocol("WM_DELETE_WINDOW", _close)
+
+ # if leave is set but we are self-dispatching, the left window is
+ # totally unresponsive unless the user manually dispatches
+ if not self.leave:
+ _close()
+ elif not self._tk_dispatching:
+ if self._warn_leave:
+ warn("leave flag ignored if not in tkinter mainloop",
+ TqdmWarning, stacklevel=2)
+ _close()
+
+ def clear(self, *_, **__):
+ pass
+
+ def display(self, *_, **__):
+ self._tk_n_var.set(self.n)
+ d = self.format_dict
+ # remove {bar}
+ d['bar_format'] = (d['bar_format'] or "{l_bar}<bar/>{r_bar}").replace(
+ "{bar}", "<bar/>")
+ msg = self.format_meter(**d)
+ if '<bar/>' in msg:
+ msg = "".join(re.split(r'\|?<bar/>\|?', msg, 1))
+ self._tk_text_var.set(msg)
+ if not self._tk_dispatching:
+ self._tk_window.update()
+
+ def set_description(self, desc=None, refresh=True):
+ self.set_description_str(desc, refresh)
+
+ def set_description_str(self, desc=None, refresh=True):
+ self.desc = desc
+ if not self.disable:
+ self._tk_window.wm_title(desc)
+ if refresh and not self._tk_dispatching:
+ self._tk_window.update()
+
+ def cancel(self):
+ """
+ `cancel_callback()` followed by `close()`
+ when close/cancel buttons clicked.
+ """
+ if self._cancel_callback is not None:
+ self._cancel_callback()
+ self.close()
+
+ def reset(self, total=None):
+ """
+ Resets to 0 iterations for repeated use.
+
+ Parameters
+ ----------
+ total : int or float, optional. Total to use for the new bar.
+ """
+ if hasattr(self, '_tk_pbar'):
+ if total is None:
+ self._tk_pbar.configure(maximum=100, mode="indeterminate")
+ else:
+ self._tk_pbar.configure(maximum=total, mode="determinate")
+ super(tqdm_tk, self).reset(total=total)
+
+ @staticmethod
+ def _tk_dispatching_helper():
+ """determine if Tkinter mainloop is dispatching events"""
+ codes = {tkinter.mainloop.__code__, tkinter.Misc.mainloop.__code__}
+ for frame in sys._current_frames().values():
+ while frame:
+ if frame.f_code in codes:
+ return True
+ frame = frame.f_back
+ return False
+
+
+def ttkrange(*args, **kwargs):
+ """
+ A shortcut for `tqdm.tk.tqdm(xrange(*args), **kwargs)`.
+ On Python3+, `range` is used instead of `xrange`.
+ """
+ return tqdm_tk(_range(*args), **kwargs)
+
+
+# Aliases
+tqdm = tqdm_tk
+trange = ttkrange
diff --git a/third_party/python/tqdm/tqdm/tqdm.1 b/third_party/python/tqdm/tqdm/tqdm.1
new file mode 100644
index 0000000000..0533198ca5
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/tqdm.1
@@ -0,0 +1,316 @@
+.\" Automatically generated by Pandoc 1.19.2
+.\"
+.TH "TQDM" "1" "2015\-2021" "tqdm User Manuals" ""
+.hy
+.SH NAME
+.PP
+tqdm \- fast, extensible progress bar for Python and CLI
+.SH SYNOPSIS
+.PP
+tqdm [\f[I]options\f[]]
+.SH DESCRIPTION
+.PP
+See <https://github.com/tqdm/tqdm>.
+Can be used as a pipe:
+.IP
+.nf
+\f[C]
+$\ #\ count\ lines\ of\ code
+$\ cat\ *.py\ |\ tqdm\ |\ wc\ \-l
+327it\ [00:00,\ 981773.38it/s]
+327
+
+$\ #\ find\ all\ files
+$\ find\ .\ \-name\ "*.py"\ |\ tqdm\ |\ wc\ \-l
+432it\ [00:00,\ 833842.30it/s]
+432
+
+#\ ...\ and\ more\ info
+$\ find\ .\ \-name\ \[aq]*.py\[aq]\ \-exec\ wc\ \-l\ \\{}\ \\;\ \\
+\ \ |\ tqdm\ \-\-total\ 432\ \-\-unit\ files\ \-\-desc\ counting\ \\
+\ \ |\ awk\ \[aq]{\ sum\ +=\ $1\ };\ END\ {\ print\ sum\ }\[aq]
+counting:\ 100%|█████████|\ 432/432\ [00:00<00:00,\ 794361.83files/s]
+131998
+\f[]
+.fi
+.SH OPTIONS
+.TP
+.B \-h, \-\-help
+Print this help and exit.
+.RS
+.RE
+.TP
+.B \-v, \-\-version
+Print version and exit.
+.RS
+.RE
+.TP
+.B \-\-desc=\f[I]desc\f[]
+str, optional.
+Prefix for the progressbar.
+.RS
+.RE
+.TP
+.B \-\-total=\f[I]total\f[]
+int or float, optional.
+The number of expected iterations.
+If unspecified, len(iterable) is used if possible.
+If float("inf") or as a last resort, only basic progress statistics are
+displayed (no ETA, no progressbar).
+If \f[C]gui\f[] is True and this parameter needs subsequent updating,
+specify an initial arbitrary large positive number, e.g.
+9e9.
+.RS
+.RE
+.TP
+.B \-\-leave
+bool, optional.
+If [default: True], keeps all traces of the progressbar upon termination
+of iteration.
+If \f[C]None\f[], will leave only if \f[C]position\f[] is \f[C]0\f[].
+.RS
+.RE
+.TP
+.B \-\-ncols=\f[I]ncols\f[]
+int, optional.
+The width of the entire output message.
+If specified, dynamically resizes the progressbar to stay within this
+bound.
+If unspecified, attempts to use environment width.
+The fallback is a meter width of 10 and no limit for the counter and
+statistics.
+If 0, will not print any meter (only stats).
+.RS
+.RE
+.TP
+.B \-\-mininterval=\f[I]mininterval\f[]
+float, optional.
+Minimum progress display update interval [default: 0.1] seconds.
+.RS
+.RE
+.TP
+.B \-\-maxinterval=\f[I]maxinterval\f[]
+float, optional.
+Maximum progress display update interval [default: 10] seconds.
+Automatically adjusts \f[C]miniters\f[] to correspond to
+\f[C]mininterval\f[] after long display update lag.
+Only works if \f[C]dynamic_miniters\f[] or monitor thread is enabled.
+.RS
+.RE
+.TP
+.B \-\-miniters=\f[I]miniters\f[]
+int or float, optional.
+Minimum progress display update interval, in iterations.
+If 0 and \f[C]dynamic_miniters\f[], will automatically adjust to equal
+\f[C]mininterval\f[] (more CPU efficient, good for tight loops).
+If > 0, will skip display of specified number of iterations.
+Tweak this and \f[C]mininterval\f[] to get very efficient loops.
+If your progress is erratic with both fast and slow iterations (network,
+skipping items, etc) you should set miniters=1.
+.RS
+.RE
+.TP
+.B \-\-ascii=\f[I]ascii\f[]
+bool or str, optional.
+If unspecified or False, use unicode (smooth blocks) to fill the meter.
+The fallback is to use ASCII characters " 123456789#".
+.RS
+.RE
+.TP
+.B \-\-disable
+bool, optional.
+Whether to disable the entire progressbar wrapper [default: False].
+If set to None, disable on non\-TTY.
+.RS
+.RE
+.TP
+.B \-\-unit=\f[I]unit\f[]
+str, optional.
+String that will be used to define the unit of each iteration [default:
+it].
+.RS
+.RE
+.TP
+.B \-\-unit\-scale=\f[I]unit_scale\f[]
+bool or int or float, optional.
+If 1 or True, the number of iterations will be reduced/scaled
+automatically and a metric prefix following the International System of
+Units standard will be added (kilo, mega, etc.) [default: False].
+If any other non\-zero number, will scale \f[C]total\f[] and \f[C]n\f[].
+.RS
+.RE
+.TP
+.B \-\-dynamic\-ncols
+bool, optional.
+If set, constantly alters \f[C]ncols\f[] and \f[C]nrows\f[] to the
+environment (allowing for window resizes) [default: False].
+.RS
+.RE
+.TP
+.B \-\-smoothing=\f[I]smoothing\f[]
+float, optional.
+Exponential moving average smoothing factor for speed estimates (ignored
+in GUI mode).
+Ranges from 0 (average speed) to 1 (current/instantaneous speed)
+[default: 0.3].
+.RS
+.RE
+.TP
+.B \-\-bar\-format=\f[I]bar_format\f[]
+str, optional.
+Specify a custom bar string formatting.
+May impact performance.
+[default: \[aq]{l_bar}{bar}{r_bar}\[aq]], where l_bar=\[aq]{desc}:
+{percentage:3.0f}%|\[aq] and r_bar=\[aq]| {n_fmt}/{total_fmt}
+[{elapsed}<{remaining}, \[aq] \[aq]{rate_fmt}{postfix}]\[aq] Possible
+vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt, percentage,
+elapsed, elapsed_s, ncols, nrows, desc, unit, rate, rate_fmt,
+rate_noinv, rate_noinv_fmt, rate_inv, rate_inv_fmt, postfix,
+unit_divisor, remaining, remaining_s, eta.
+Note that a trailing ": " is automatically removed after {desc} if the
+latter is empty.
+.RS
+.RE
+.TP
+.B \-\-initial=\f[I]initial\f[]
+int or float, optional.
+The initial counter value.
+Useful when restarting a progress bar [default: 0].
+If using float, consider specifying \f[C]{n:.3f}\f[] or similar in
+\f[C]bar_format\f[], or specifying \f[C]unit_scale\f[].
+.RS
+.RE
+.TP
+.B \-\-position=\f[I]position\f[]
+int, optional.
+Specify the line offset to print this bar (starting from 0) Automatic if
+unspecified.
+Useful to manage multiple bars at once (eg, from threads).
+.RS
+.RE
+.TP
+.B \-\-postfix=\f[I]postfix\f[]
+dict or *, optional.
+Specify additional stats to display at the end of the bar.
+Calls \f[C]set_postfix(**postfix)\f[] if possible (dict).
+.RS
+.RE
+.TP
+.B \-\-unit\-divisor=\f[I]unit_divisor\f[]
+float, optional.
+[default: 1000], ignored unless \f[C]unit_scale\f[] is True.
+.RS
+.RE
+.TP
+.B \-\-write\-bytes
+bool, optional.
+If (default: None) and \f[C]file\f[] is unspecified, bytes will be
+written in Python 2.
+If \f[C]True\f[] will also write bytes.
+In all other cases will default to unicode.
+.RS
+.RE
+.TP
+.B \-\-lock\-args=\f[I]lock_args\f[]
+tuple, optional.
+Passed to \f[C]refresh\f[] for intermediate output (initialisation,
+iterating, and updating).
+.RS
+.RE
+.TP
+.B \-\-nrows=\f[I]nrows\f[]
+int, optional.
+The screen height.
+If specified, hides nested bars outside this bound.
+If unspecified, attempts to use environment height.
+The fallback is 20.
+.RS
+.RE
+.TP
+.B \-\-colour=\f[I]colour\f[]
+str, optional.
+Bar colour (e.g.
+\[aq]green\[aq], \[aq]#00ff00\[aq]).
+.RS
+.RE
+.TP
+.B \-\-delay=\f[I]delay\f[]
+float, optional.
+Don\[aq]t display until [default: 0] seconds have elapsed.
+.RS
+.RE
+.TP
+.B \-\-delim=\f[I]delim\f[]
+chr, optional.
+Delimiting character [default: \[aq]\\n\[aq]].
+Use \[aq]\\0\[aq] for null.
+N.B.: on Windows systems, Python converts \[aq]\\n\[aq] to
+\[aq]\\r\\n\[aq].
+.RS
+.RE
+.TP
+.B \-\-buf\-size=\f[I]buf_size\f[]
+int, optional.
+String buffer size in bytes [default: 256] used when \f[C]delim\f[] is
+specified.
+.RS
+.RE
+.TP
+.B \-\-bytes
+bool, optional.
+If true, will count bytes, ignore \f[C]delim\f[], and default
+\f[C]unit_scale\f[] to True, \f[C]unit_divisor\f[] to 1024, and
+\f[C]unit\f[] to \[aq]B\[aq].
+.RS
+.RE
+.TP
+.B \-\-tee
+bool, optional.
+If true, passes \f[C]stdin\f[] to both \f[C]stderr\f[] and
+\f[C]stdout\f[].
+.RS
+.RE
+.TP
+.B \-\-update
+bool, optional.
+If true, will treat input as newly elapsed iterations, i.e.
+numbers to pass to \f[C]update()\f[].
+Note that this is slow (~2e5 it/s) since every input must be decoded as
+a number.
+.RS
+.RE
+.TP
+.B \-\-update\-to
+bool, optional.
+If true, will treat input as total elapsed iterations, i.e.
+numbers to assign to \f[C]self.n\f[].
+Note that this is slow (~2e5 it/s) since every input must be decoded as
+a number.
+.RS
+.RE
+.TP
+.B \-\-null
+bool, optional.
+If true, will discard input (no stdout).
+.RS
+.RE
+.TP
+.B \-\-manpath=\f[I]manpath\f[]
+str, optional.
+Directory in which to install tqdm man pages.
+.RS
+.RE
+.TP
+.B \-\-comppath=\f[I]comppath\f[]
+str, optional.
+Directory in which to place tqdm completion.
+.RS
+.RE
+.TP
+.B \-\-log=\f[I]log\f[]
+str, optional.
+CRITICAL|FATAL|ERROR|WARN(ING)|[default: \[aq]INFO\[aq]]|DEBUG|NOTSET.
+.RS
+.RE
+.SH AUTHORS
+tqdm developers <https://github.com/tqdm>.
diff --git a/third_party/python/tqdm/tqdm/utils.py b/third_party/python/tqdm/tqdm/utils.py
new file mode 100644
index 0000000000..0632b8dd05
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/utils.py
@@ -0,0 +1,354 @@
+"""
+General helpers required for `tqdm.std`.
+"""
+import os
+import re
+import sys
+from functools import wraps
+from warnings import warn
+from weakref import proxy
+
+# py2/3 compat
+try:
+ _range = xrange
+except NameError:
+ _range = range
+
+try:
+ _unich = unichr
+except NameError:
+ _unich = chr
+
+try:
+ _unicode = unicode
+except NameError:
+ _unicode = str
+
+try:
+ _basestring = basestring
+except NameError:
+ _basestring = str
+
+CUR_OS = sys.platform
+IS_WIN = any(CUR_OS.startswith(i) for i in ['win32', 'cygwin'])
+IS_NIX = any(CUR_OS.startswith(i) for i in ['aix', 'linux', 'darwin'])
+RE_ANSI = re.compile(r"\x1b\[[;\d]*[A-Za-z]")
+
+try:
+ if IS_WIN:
+ import colorama
+ else:
+ raise ImportError
+except ImportError:
+ colorama = None
+else:
+ try:
+ colorama.init(strip=False)
+ except TypeError:
+ colorama.init()
+
+
+class FormatReplace(object):
+ """
+ >>> a = FormatReplace('something')
+ >>> "{:5d}".format(a)
+ 'something'
+ """ # NOQA: P102
+ def __init__(self, replace=''):
+ self.replace = replace
+ self.format_called = 0
+
+ def __format__(self, _):
+ self.format_called += 1
+ return self.replace
+
+
+class Comparable(object):
+ """Assumes child has self._comparable attr/@property"""
+ def __lt__(self, other):
+ return self._comparable < other._comparable
+
+ def __le__(self, other):
+ return (self < other) or (self == other)
+
+ def __eq__(self, other):
+ return self._comparable == other._comparable
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __gt__(self, other):
+ return not self <= other
+
+ def __ge__(self, other):
+ return not self < other
+
+
+class ObjectWrapper(object):
+ def __getattr__(self, name):
+ return getattr(self._wrapped, name)
+
+ def __setattr__(self, name, value):
+ return setattr(self._wrapped, name, value)
+
+ def wrapper_getattr(self, name):
+ """Actual `self.getattr` rather than self._wrapped.getattr"""
+ try:
+ return object.__getattr__(self, name)
+ except AttributeError: # py2
+ return getattr(self, name)
+
+ def wrapper_setattr(self, name, value):
+ """Actual `self.setattr` rather than self._wrapped.setattr"""
+ return object.__setattr__(self, name, value)
+
+ def __init__(self, wrapped):
+ """
+ Thin wrapper around a given object
+ """
+ self.wrapper_setattr('_wrapped', wrapped)
+
+
+class SimpleTextIOWrapper(ObjectWrapper):
+ """
+ Change only `.write()` of the wrapped object by encoding the passed
+ value and passing the result to the wrapped object's `.write()` method.
+ """
+ # pylint: disable=too-few-public-methods
+ def __init__(self, wrapped, encoding):
+ super(SimpleTextIOWrapper, self).__init__(wrapped)
+ self.wrapper_setattr('encoding', encoding)
+
+ def write(self, s):
+ """
+ Encode `s` and pass to the wrapped object's `.write()` method.
+ """
+ return self._wrapped.write(s.encode(self.wrapper_getattr('encoding')))
+
+ def __eq__(self, other):
+ return self._wrapped == getattr(other, '_wrapped', other)
+
+
+class DisableOnWriteError(ObjectWrapper):
+ """
+ Disable the given `tqdm_instance` upon `write()` or `flush()` errors.
+ """
+ @staticmethod
+ def disable_on_exception(tqdm_instance, func):
+ """
+ Quietly set `tqdm_instance.miniters=inf` if `func` raises `errno=5`.
+ """
+ tqdm_instance = proxy(tqdm_instance)
+
+ def inner(*args, **kwargs):
+ try:
+ return func(*args, **kwargs)
+ except OSError as e:
+ if e.errno != 5:
+ raise
+ try:
+ tqdm_instance.miniters = float('inf')
+ except ReferenceError:
+ pass
+ except ValueError as e:
+ if 'closed' not in str(e):
+ raise
+ try:
+ tqdm_instance.miniters = float('inf')
+ except ReferenceError:
+ pass
+ return inner
+
+ def __init__(self, wrapped, tqdm_instance):
+ super(DisableOnWriteError, self).__init__(wrapped)
+ if hasattr(wrapped, 'write'):
+ self.wrapper_setattr(
+ 'write', self.disable_on_exception(tqdm_instance, wrapped.write))
+ if hasattr(wrapped, 'flush'):
+ self.wrapper_setattr(
+ 'flush', self.disable_on_exception(tqdm_instance, wrapped.flush))
+
+ def __eq__(self, other):
+ return self._wrapped == getattr(other, '_wrapped', other)
+
+
+class CallbackIOWrapper(ObjectWrapper):
+ def __init__(self, callback, stream, method="read"):
+ """
+ Wrap a given `file`-like object's `read()` or `write()` to report
+ lengths to the given `callback`
+ """
+ super(CallbackIOWrapper, self).__init__(stream)
+ func = getattr(stream, method)
+ if method == "write":
+ @wraps(func)
+ def write(data, *args, **kwargs):
+ res = func(data, *args, **kwargs)
+ callback(len(data))
+ return res
+ self.wrapper_setattr('write', write)
+ elif method == "read":
+ @wraps(func)
+ def read(*args, **kwargs):
+ data = func(*args, **kwargs)
+ callback(len(data))
+ return data
+ self.wrapper_setattr('read', read)
+ else:
+ raise KeyError("Can only wrap read/write methods")
+
+
+def _is_utf(encoding):
+ try:
+ u'\u2588\u2589'.encode(encoding)
+ except UnicodeEncodeError:
+ return False
+ except Exception:
+ try:
+ return encoding.lower().startswith('utf-') or ('U8' == encoding)
+ except Exception:
+ return False
+ else:
+ return True
+
+
+def _supports_unicode(fp):
+ try:
+ return _is_utf(fp.encoding)
+ except AttributeError:
+ return False
+
+
+def _is_ascii(s):
+ if isinstance(s, str):
+ for c in s:
+ if ord(c) > 255:
+ return False
+ return True
+ return _supports_unicode(s)
+
+
+def _screen_shape_wrapper(): # pragma: no cover
+ """
+ Return a function which returns console dimensions (width, height).
+ Supported: linux, osx, windows, cygwin.
+ """
+ _screen_shape = None
+ if IS_WIN:
+ _screen_shape = _screen_shape_windows
+ if _screen_shape is None:
+ _screen_shape = _screen_shape_tput
+ if IS_NIX:
+ _screen_shape = _screen_shape_linux
+ return _screen_shape
+
+
+def _screen_shape_windows(fp): # pragma: no cover
+ try:
+ import struct
+ from ctypes import create_string_buffer, windll
+ from sys import stdin, stdout
+
+ io_handle = -12 # assume stderr
+ if fp == stdin:
+ io_handle = -10
+ elif fp == stdout:
+ io_handle = -11
+
+ h = windll.kernel32.GetStdHandle(io_handle)
+ csbi = create_string_buffer(22)
+ res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)
+ if res:
+ (_bufx, _bufy, _curx, _cury, _wattr, left, top, right, bottom,
+ _maxx, _maxy) = struct.unpack("hhhhHhhhhhh", csbi.raw)
+ return right - left, bottom - top # +1
+ except Exception: # nosec
+ pass
+ return None, None
+
+
+def _screen_shape_tput(*_): # pragma: no cover
+ """cygwin xterm (windows)"""
+ try:
+ import shlex
+ from subprocess import check_call # nosec
+ return [int(check_call(shlex.split('tput ' + i))) - 1
+ for i in ('cols', 'lines')]
+ except Exception: # nosec
+ pass
+ return None, None
+
+
+def _screen_shape_linux(fp): # pragma: no cover
+
+ try:
+ from array import array
+ from fcntl import ioctl
+ from termios import TIOCGWINSZ
+ except ImportError:
+ return None, None
+ else:
+ try:
+ rows, cols = array('h', ioctl(fp, TIOCGWINSZ, '\0' * 8))[:2]
+ return cols, rows
+ except Exception:
+ try:
+ return [int(os.environ[i]) - 1 for i in ("COLUMNS", "LINES")]
+ except (KeyError, ValueError):
+ return None, None
+
+
+def _environ_cols_wrapper(): # pragma: no cover
+ """
+ Return a function which returns console width.
+ Supported: linux, osx, windows, cygwin.
+ """
+ warn("Use `_screen_shape_wrapper()(file)[0]` instead of"
+ " `_environ_cols_wrapper()(file)`", DeprecationWarning, stacklevel=2)
+ shape = _screen_shape_wrapper()
+ if not shape:
+ return None
+
+ @wraps(shape)
+ def inner(fp):
+ return shape(fp)[0]
+
+ return inner
+
+
+def _term_move_up(): # pragma: no cover
+ return '' if (os.name == 'nt') and (colorama is None) else '\x1b[A'
+
+
+try:
+ # TODO consider using wcswidth third-party package for 0-width characters
+ from unicodedata import east_asian_width
+except ImportError:
+ _text_width = len
+else:
+ def _text_width(s):
+ return sum(2 if east_asian_width(ch) in 'FW' else 1 for ch in _unicode(s))
+
+
+def disp_len(data):
+ """
+ Returns the real on-screen length of a string which may contain
+ ANSI control codes and wide chars.
+ """
+ return _text_width(RE_ANSI.sub('', data))
+
+
+def disp_trim(data, length):
+ """
+ Trim a string which may contain ANSI control characters.
+ """
+ if len(data) == disp_len(data):
+ return data[:length]
+
+ ansi_present = bool(RE_ANSI.search(data))
+ while disp_len(data) > length: # carefully delete one char at a time
+ data = data[:-1]
+ if ansi_present and bool(RE_ANSI.search(data)):
+ # assume ANSI reset is required
+ return data if data.endswith("\033[0m") else data + "\033[0m"
+ return data
diff --git a/third_party/python/tqdm/tqdm/version.py b/third_party/python/tqdm/tqdm/version.py
new file mode 100644
index 0000000000..11cbaea79d
--- /dev/null
+++ b/third_party/python/tqdm/tqdm/version.py
@@ -0,0 +1,9 @@
+"""`tqdm` version detector. Precedence: installed dist, git, 'UNKNOWN'."""
+try:
+ from ._dist_ver import __version__
+except ImportError:
+ try:
+ from setuptools_scm import get_version
+ __version__ = get_version(root='..', relative_to=__file__)
+ except (ImportError, LookupError):
+ __version__ = "UNKNOWN"
diff --git a/third_party/python/typing_extensions/typing_extensions-3.10.0.0.dist-info/LICENSE b/third_party/python/typing_extensions/typing_extensions-3.10.0.0.dist-info/LICENSE
new file mode 100644
index 0000000000..583f9f6e61
--- /dev/null
+++ b/third_party/python/typing_extensions/typing_extensions-3.10.0.0.dist-info/LICENSE
@@ -0,0 +1,254 @@
+A. HISTORY OF THE SOFTWARE
+==========================
+
+Python was created in the early 1990s by Guido van Rossum at Stichting
+Mathematisch Centrum (CWI, see http://www.cwi.nl) in the Netherlands
+as a successor of a language called ABC. Guido remains Python's
+principal author, although it includes many contributions from others.
+
+In 1995, Guido continued his work on Python at the Corporation for
+National Research Initiatives (CNRI, see http://www.cnri.reston.va.us)
+in Reston, Virginia where he released several versions of the
+software.
+
+In May 2000, Guido and the Python core development team moved to
+BeOpen.com to form the BeOpen PythonLabs team. In October of the same
+year, the PythonLabs team moved to Digital Creations (now Zope
+Corporation, see http://www.zope.com). In 2001, the Python Software
+Foundation (PSF, see http://www.python.org/psf/) was formed, a
+non-profit organization created specifically to own Python-related
+Intellectual Property. Zope Corporation is a sponsoring member of
+the PSF.
+
+All Python releases are Open Source (see http://www.opensource.org for
+the Open Source Definition). Historically, most, but not all, Python
+releases have also been GPL-compatible; the table below summarizes
+the various releases.
+
+ Release Derived Year Owner GPL-
+ from compatible? (1)
+
+ 0.9.0 thru 1.2 1991-1995 CWI yes
+ 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes
+ 1.6 1.5.2 2000 CNRI no
+ 2.0 1.6 2000 BeOpen.com no
+ 1.6.1 1.6 2001 CNRI yes (2)
+ 2.1 2.0+1.6.1 2001 PSF no
+ 2.0.1 2.0+1.6.1 2001 PSF yes
+ 2.1.1 2.1+2.0.1 2001 PSF yes
+ 2.1.2 2.1.1 2002 PSF yes
+ 2.1.3 2.1.2 2002 PSF yes
+ 2.2 and above 2.1.1 2001-now PSF yes
+
+Footnotes:
+
+(1) GPL-compatible doesn't mean that we're distributing Python under
+ the GPL. All Python licenses, unlike the GPL, let you distribute
+ a modified version without making your changes open source. The
+ GPL-compatible licenses make it possible to combine Python with
+ other software that is released under the GPL; the others don't.
+
+(2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
+ because its license has a choice of law clause. According to
+ CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
+ is "not incompatible" with the GPL.
+
+Thanks to the many outside volunteers who have worked under Guido's
+direction to make these releases possible.
+
+
+B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
+===============================================================
+
+PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
+--------------------------------------------
+
+1. This LICENSE AGREEMENT is between the Python Software Foundation
+("PSF"), and the Individual or Organization ("Licensee") accessing and
+otherwise using this software ("Python") in source or binary form and
+its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, PSF hereby
+grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
+analyze, test, perform and/or display publicly, prepare derivative works,
+distribute, and otherwise use Python alone or in any derivative version,
+provided, however, that PSF's License Agreement and PSF's notice of copyright,
+i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
+2011, 2012, 2013, 2014 Python Software Foundation; All Rights Reserved" are
+retained in Python alone or in any derivative version prepared by Licensee.
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python.
+
+4. PSF is making Python available to Licensee on an "AS IS"
+basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. Nothing in this License Agreement shall be deemed to create any
+relationship of agency, partnership, or joint venture between PSF and
+Licensee. This License Agreement does not grant permission to use PSF
+trademarks or trade name in a trademark sense to endorse or promote
+products or services of Licensee, or any third party.
+
+8. By copying, installing or otherwise using Python, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
+
+
+BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
+-------------------------------------------
+
+BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
+
+1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
+office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
+Individual or Organization ("Licensee") accessing and otherwise using
+this software in source or binary form and its associated
+documentation ("the Software").
+
+2. Subject to the terms and conditions of this BeOpen Python License
+Agreement, BeOpen hereby grants Licensee a non-exclusive,
+royalty-free, world-wide license to reproduce, analyze, test, perform
+and/or display publicly, prepare derivative works, distribute, and
+otherwise use the Software alone or in any derivative version,
+provided, however, that the BeOpen Python License is retained in the
+Software, alone or in any derivative version prepared by Licensee.
+
+3. BeOpen is making the Software available to Licensee on an "AS IS"
+basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
+SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
+AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
+DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+5. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+6. This License Agreement shall be governed by and interpreted in all
+respects by the law of the State of California, excluding conflict of
+law provisions. Nothing in this License Agreement shall be deemed to
+create any relationship of agency, partnership, or joint venture
+between BeOpen and Licensee. This License Agreement does not grant
+permission to use BeOpen trademarks or trade names in a trademark
+sense to endorse or promote products or services of Licensee, or any
+third party. As an exception, the "BeOpen Python" logos available at
+http://www.pythonlabs.com/logos.html may be used according to the
+permissions granted on that web page.
+
+7. By copying, installing or otherwise using the software, Licensee
+agrees to be bound by the terms and conditions of this License
+Agreement.
+
+
+CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
+---------------------------------------
+
+1. This LICENSE AGREEMENT is between the Corporation for National
+Research Initiatives, having an office at 1895 Preston White Drive,
+Reston, VA 20191 ("CNRI"), and the Individual or Organization
+("Licensee") accessing and otherwise using Python 1.6.1 software in
+source or binary form and its associated documentation.
+
+2. Subject to the terms and conditions of this License Agreement, CNRI
+hereby grants Licensee a nonexclusive, royalty-free, world-wide
+license to reproduce, analyze, test, perform and/or display publicly,
+prepare derivative works, distribute, and otherwise use Python 1.6.1
+alone or in any derivative version, provided, however, that CNRI's
+License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
+1995-2001 Corporation for National Research Initiatives; All Rights
+Reserved" are retained in Python 1.6.1 alone or in any derivative
+version prepared by Licensee. Alternately, in lieu of CNRI's License
+Agreement, Licensee may substitute the following text (omitting the
+quotes): "Python 1.6.1 is made available subject to the terms and
+conditions in CNRI's License Agreement. This Agreement together with
+Python 1.6.1 may be located on the Internet using the following
+unique, persistent identifier (known as a handle): 1895.22/1013. This
+Agreement may also be obtained from a proxy server on the Internet
+using the following URL: http://hdl.handle.net/1895.22/1013".
+
+3. In the event Licensee prepares a derivative work that is based on
+or incorporates Python 1.6.1 or any part thereof, and wants to make
+the derivative work available to others as provided herein, then
+Licensee hereby agrees to include in any such work a brief summary of
+the changes made to Python 1.6.1.
+
+4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
+basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
+IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
+DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
+FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
+INFRINGE ANY THIRD PARTY RIGHTS.
+
+5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
+1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
+A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
+OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
+
+6. This License Agreement will automatically terminate upon a material
+breach of its terms and conditions.
+
+7. This License Agreement shall be governed by the federal
+intellectual property law of the United States, including without
+limitation the federal copyright law, and, to the extent such
+U.S. federal law does not apply, by the law of the Commonwealth of
+Virginia, excluding Virginia's conflict of law provisions.
+Notwithstanding the foregoing, with regard to derivative works based
+on Python 1.6.1 that incorporate non-separable material that was
+previously distributed under the GNU General Public License (GPL), the
+law of the Commonwealth of Virginia shall govern this License
+Agreement only as to issues arising under or with respect to
+Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this
+License Agreement shall be deemed to create any relationship of
+agency, partnership, or joint venture between CNRI and Licensee. This
+License Agreement does not grant permission to use CNRI trademarks or
+trade name in a trademark sense to endorse or promote products or
+services of Licensee, or any third party.
+
+8. By clicking on the "ACCEPT" button where indicated, or by copying,
+installing or otherwise using Python 1.6.1, Licensee agrees to be
+bound by the terms and conditions of this License Agreement.
+
+ ACCEPT
+
+
+CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
+--------------------------------------------------
+
+Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
+The Netherlands. All rights reserved.
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose and without fee is hereby granted,
+provided that the above copyright notice appear in all copies and that
+both that copyright notice and this permission notice appear in
+supporting documentation, and that the name of Stichting Mathematisch
+Centrum or CWI not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior
+permission.
+
+STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
+THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
+FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
+FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/third_party/python/typing_extensions/typing_extensions-3.10.0.0.dist-info/METADATA b/third_party/python/typing_extensions/typing_extensions-3.10.0.0.dist-info/METADATA
new file mode 100644
index 0000000000..fa314015ef
--- /dev/null
+++ b/third_party/python/typing_extensions/typing_extensions-3.10.0.0.dist-info/METADATA
@@ -0,0 +1,45 @@
+Metadata-Version: 2.1
+Name: typing-extensions
+Version: 3.10.0.0
+Summary: Backported and Experimental Type Hints for Python 3.5+
+Home-page: https://github.com/python/typing/blob/master/typing_extensions/README.rst
+Author: Guido van Rossum, Jukka Lehtosalo, Łukasz Langa, Michael Lee
+Author-email: levkivskyi@gmail.com
+License: PSF
+Keywords: typing function annotations type hints hinting checking checker typehints typehinting typechecking backport
+Platform: UNKNOWN
+Classifier: Development Status :: 3 - Alpha
+Classifier: Environment :: Console
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: Python Software Foundation License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Topic :: Software Development
+Requires-Dist: typing (>=3.7.4) ; python_version < "3.5"
+
+Typing Extensions -- Backported and Experimental Type Hints for Python
+
+The ``typing`` module was added to the standard library in Python 3.5, but
+many new features have been added to the module since then.
+This means users of Python 3.5 - 3.6 who are unable to upgrade will not be
+able to take advantage of new types added to the ``typing`` module, such as
+``typing.Protocol`` or ``typing.TypedDict``.
+
+The ``typing_extensions`` module contains backports of these changes.
+Experimental types that will eventually be added to the ``typing``
+module are also included in ``typing_extensions``, such as
+``typing.ParamSpec`` and ``typing.TypeGuard``.
+
+Users of Python versions before 3.5 should install and use
+the ``typing`` module from PyPI instead of using this one, unless specifically
+writing code that must be compatible with multiple Python versions or requires
+experimental types.
+
+
diff --git a/third_party/python/typing_extensions/typing_extensions-3.10.0.0.dist-info/RECORD b/third_party/python/typing_extensions/typing_extensions-3.10.0.0.dist-info/RECORD
new file mode 100644
index 0000000000..217df4acdd
--- /dev/null
+++ b/third_party/python/typing_extensions/typing_extensions-3.10.0.0.dist-info/RECORD
@@ -0,0 +1,6 @@
+typing_extensions.py,sha256=upcRc-ygmoZSgbJ4WZa34ZE_PVJsYrOlGM7WWbBrJuo,108429
+typing_extensions-3.10.0.0.dist-info/LICENSE,sha256=_xfOlOECAk3raHc-scx0ynbaTmWPNzUx8Kwi1oprsa0,12755
+typing_extensions-3.10.0.0.dist-info/METADATA,sha256=zjlcNCeUQUETPe37jftee4IwkGKxm8YPKQxFFOMgyqQ,2099
+typing_extensions-3.10.0.0.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92
+typing_extensions-3.10.0.0.dist-info/top_level.txt,sha256=hkDmk3VmrfXPOD--jS4aKTCu6kFZo-kVT1cIFfq1eU8,18
+typing_extensions-3.10.0.0.dist-info/RECORD,,
diff --git a/third_party/python/typing_extensions/typing_extensions-3.10.0.0.dist-info/WHEEL b/third_party/python/typing_extensions/typing_extensions-3.10.0.0.dist-info/WHEEL
new file mode 100644
index 0000000000..385faab052
--- /dev/null
+++ b/third_party/python/typing_extensions/typing_extensions-3.10.0.0.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.36.2)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/third_party/python/typing_extensions/typing_extensions-3.10.0.0.dist-info/top_level.txt b/third_party/python/typing_extensions/typing_extensions-3.10.0.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..5fd4f05f34
--- /dev/null
+++ b/third_party/python/typing_extensions/typing_extensions-3.10.0.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+typing_extensions
diff --git a/third_party/python/typing_extensions/typing_extensions.py b/third_party/python/typing_extensions/typing_extensions.py
new file mode 100644
index 0000000000..82d1c2dc2c
--- /dev/null
+++ b/third_party/python/typing_extensions/typing_extensions.py
@@ -0,0 +1,2805 @@
+import abc
+import collections
+import contextlib
+import sys
+import typing
+import collections.abc as collections_abc
+import operator
+
+# These are used by Protocol implementation
+# We use internal typing helpers here, but this significantly reduces
+# code duplication. (Also this is only until Protocol is in typing.)
+from typing import Generic, Callable, TypeVar, Tuple
+
+# After PEP 560, internal typing API was substantially reworked.
+# This is especially important for Protocol class which uses internal APIs
+# quite extensivelly.
+PEP_560 = sys.version_info[:3] >= (3, 7, 0)
+
+if PEP_560:
+ GenericMeta = TypingMeta = type
+else:
+ from typing import GenericMeta, TypingMeta
+OLD_GENERICS = False
+try:
+ from typing import _type_vars, _next_in_mro, _type_check
+except ImportError:
+ OLD_GENERICS = True
+try:
+ from typing import _subs_tree # noqa
+ SUBS_TREE = True
+except ImportError:
+ SUBS_TREE = False
+try:
+ from typing import _tp_cache
+except ImportError:
+ def _tp_cache(x):
+ return x
+try:
+ from typing import _TypingEllipsis, _TypingEmpty
+except ImportError:
+ class _TypingEllipsis:
+ pass
+
+ class _TypingEmpty:
+ pass
+
+
+# The two functions below are copies of typing internal helpers.
+# They are needed by _ProtocolMeta
+
+
+def _no_slots_copy(dct):
+ dict_copy = dict(dct)
+ if '__slots__' in dict_copy:
+ for slot in dict_copy['__slots__']:
+ dict_copy.pop(slot, None)
+ return dict_copy
+
+
+def _check_generic(cls, parameters):
+ if not cls.__parameters__:
+ raise TypeError("%s is not a generic class" % repr(cls))
+ alen = len(parameters)
+ elen = len(cls.__parameters__)
+ if alen != elen:
+ raise TypeError("Too %s parameters for %s; actual %s, expected %s" %
+ ("many" if alen > elen else "few", repr(cls), alen, elen))
+
+
+if hasattr(typing, '_generic_new'):
+ _generic_new = typing._generic_new
+else:
+ # Note: The '_generic_new(...)' function is used as a part of the
+ # process of creating a generic type and was added to the typing module
+ # as of Python 3.5.3.
+ #
+ # We've defined '_generic_new(...)' below to exactly match the behavior
+ # implemented in older versions of 'typing' bundled with Python 3.5.0 to
+ # 3.5.2. This helps eliminate redundancy when defining collection types
+ # like 'Deque' later.
+ #
+ # See https://github.com/python/typing/pull/308 for more details -- in
+ # particular, compare and contrast the definition of types like
+ # 'typing.List' before and after the merge.
+
+ def _generic_new(base_cls, cls, *args, **kwargs):
+ return base_cls.__new__(cls, *args, **kwargs)
+
+# See https://github.com/python/typing/pull/439
+if hasattr(typing, '_geqv'):
+ from typing import _geqv
+ _geqv_defined = True
+else:
+ _geqv = None
+ _geqv_defined = False
+
+if sys.version_info[:2] >= (3, 6):
+ import _collections_abc
+ _check_methods_in_mro = _collections_abc._check_methods
+else:
+ def _check_methods_in_mro(C, *methods):
+ mro = C.__mro__
+ for method in methods:
+ for B in mro:
+ if method in B.__dict__:
+ if B.__dict__[method] is None:
+ return NotImplemented
+ break
+ else:
+ return NotImplemented
+ return True
+
+
+# Please keep __all__ alphabetized within each category.
+__all__ = [
+ # Super-special typing primitives.
+ 'ClassVar',
+ 'Concatenate',
+ 'Final',
+ 'ParamSpec',
+ 'Type',
+
+ # ABCs (from collections.abc).
+ # The following are added depending on presence
+ # of their non-generic counterparts in stdlib:
+ # 'Awaitable',
+ # 'AsyncIterator',
+ # 'AsyncIterable',
+ # 'Coroutine',
+ # 'AsyncGenerator',
+ # 'AsyncContextManager',
+ # 'ChainMap',
+
+ # Concrete collection types.
+ 'ContextManager',
+ 'Counter',
+ 'Deque',
+ 'DefaultDict',
+ 'OrderedDict'
+ 'TypedDict',
+
+ # Structural checks, a.k.a. protocols.
+ 'SupportsIndex',
+
+ # One-off things.
+ 'final',
+ 'IntVar',
+ 'Literal',
+ 'NewType',
+ 'overload',
+ 'Text',
+ 'TypeAlias',
+ 'TypeGuard',
+ 'TYPE_CHECKING',
+]
+
+# Annotated relies on substitution trees of pep 560. It will not work for
+# versions of typing older than 3.5.3
+HAVE_ANNOTATED = PEP_560 or SUBS_TREE
+
+if PEP_560:
+ __all__.extend(["get_args", "get_origin", "get_type_hints"])
+
+if HAVE_ANNOTATED:
+ __all__.append("Annotated")
+
+# Protocols are hard to backport to the original version of typing 3.5.0
+HAVE_PROTOCOLS = sys.version_info[:3] != (3, 5, 0)
+
+if HAVE_PROTOCOLS:
+ __all__.extend(['Protocol', 'runtime', 'runtime_checkable'])
+
+
+# TODO
+if hasattr(typing, 'NoReturn'):
+ NoReturn = typing.NoReturn
+elif hasattr(typing, '_FinalTypingBase'):
+ class _NoReturn(typing._FinalTypingBase, _root=True):
+ """Special type indicating functions that never return.
+ Example::
+
+ from typing import NoReturn
+
+ def stop() -> NoReturn:
+ raise Exception('no way')
+
+ This type is invalid in other positions, e.g., ``List[NoReturn]``
+ will fail in static type checkers.
+ """
+ __slots__ = ()
+
+ def __instancecheck__(self, obj):
+ raise TypeError("NoReturn cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("NoReturn cannot be used with issubclass().")
+
+ NoReturn = _NoReturn(_root=True)
+else:
+ class _NoReturnMeta(typing.TypingMeta):
+ """Metaclass for NoReturn"""
+ def __new__(cls, name, bases, namespace, _root=False):
+ return super().__new__(cls, name, bases, namespace, _root=_root)
+
+ def __instancecheck__(self, obj):
+ raise TypeError("NoReturn cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("NoReturn cannot be used with issubclass().")
+
+ class NoReturn(typing.Final, metaclass=_NoReturnMeta, _root=True):
+ """Special type indicating functions that never return.
+ Example::
+
+ from typing import NoReturn
+
+ def stop() -> NoReturn:
+ raise Exception('no way')
+
+ This type is invalid in other positions, e.g., ``List[NoReturn]``
+ will fail in static type checkers.
+ """
+ __slots__ = ()
+
+
+# Some unconstrained type variables. These are used by the container types.
+# (These are not for export.)
+T = typing.TypeVar('T') # Any type.
+KT = typing.TypeVar('KT') # Key type.
+VT = typing.TypeVar('VT') # Value type.
+T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
+V_co = typing.TypeVar('V_co', covariant=True) # Any type covariant containers.
+VT_co = typing.TypeVar('VT_co', covariant=True) # Value type covariant containers.
+T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
+
+
+if hasattr(typing, 'ClassVar'):
+ ClassVar = typing.ClassVar
+elif hasattr(typing, '_FinalTypingBase'):
+ class _ClassVar(typing._FinalTypingBase, _root=True):
+ """Special type construct to mark class variables.
+
+ An annotation wrapped in ClassVar indicates that a given
+ attribute is intended to be used as a class variable and
+ should not be set on instances of that class. Usage::
+
+ class Starship:
+ stats: ClassVar[Dict[str, int]] = {} # class variable
+ damage: int = 10 # instance variable
+
+ ClassVar accepts only types and cannot be further subscribed.
+
+ Note that ClassVar is not a class itself, and should not
+ be used with isinstance() or issubclass().
+ """
+
+ __slots__ = ('__type__',)
+
+ def __init__(self, tp=None, **kwds):
+ self.__type__ = tp
+
+ def __getitem__(self, item):
+ cls = type(self)
+ if self.__type__ is None:
+ return cls(typing._type_check(item,
+ '{} accepts only single type.'.format(cls.__name__[1:])),
+ _root=True)
+ raise TypeError('{} cannot be further subscripted'
+ .format(cls.__name__[1:]))
+
+ def _eval_type(self, globalns, localns):
+ new_tp = typing._eval_type(self.__type__, globalns, localns)
+ if new_tp == self.__type__:
+ return self
+ return type(self)(new_tp, _root=True)
+
+ def __repr__(self):
+ r = super().__repr__()
+ if self.__type__ is not None:
+ r += '[{}]'.format(typing._type_repr(self.__type__))
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__type__))
+
+ def __eq__(self, other):
+ if not isinstance(other, _ClassVar):
+ return NotImplemented
+ if self.__type__ is not None:
+ return self.__type__ == other.__type__
+ return self is other
+
+ ClassVar = _ClassVar(_root=True)
+else:
+ class _ClassVarMeta(typing.TypingMeta):
+ """Metaclass for ClassVar"""
+
+ def __new__(cls, name, bases, namespace, tp=None, _root=False):
+ self = super().__new__(cls, name, bases, namespace, _root=_root)
+ if tp is not None:
+ self.__type__ = tp
+ return self
+
+ def __instancecheck__(self, obj):
+ raise TypeError("ClassVar cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("ClassVar cannot be used with issubclass().")
+
+ def __getitem__(self, item):
+ cls = type(self)
+ if self.__type__ is not None:
+ raise TypeError('{} cannot be further subscripted'
+ .format(cls.__name__[1:]))
+
+ param = typing._type_check(
+ item,
+ '{} accepts only single type.'.format(cls.__name__[1:]))
+ return cls(self.__name__, self.__bases__,
+ dict(self.__dict__), tp=param, _root=True)
+
+ def _eval_type(self, globalns, localns):
+ new_tp = typing._eval_type(self.__type__, globalns, localns)
+ if new_tp == self.__type__:
+ return self
+ return type(self)(self.__name__, self.__bases__,
+ dict(self.__dict__), tp=self.__type__,
+ _root=True)
+
+ def __repr__(self):
+ r = super().__repr__()
+ if self.__type__ is not None:
+ r += '[{}]'.format(typing._type_repr(self.__type__))
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__type__))
+
+ def __eq__(self, other):
+ if not isinstance(other, ClassVar):
+ return NotImplemented
+ if self.__type__ is not None:
+ return self.__type__ == other.__type__
+ return self is other
+
+ class ClassVar(typing.Final, metaclass=_ClassVarMeta, _root=True):
+ """Special type construct to mark class variables.
+
+ An annotation wrapped in ClassVar indicates that a given
+ attribute is intended to be used as a class variable and
+ should not be set on instances of that class. Usage::
+
+ class Starship:
+ stats: ClassVar[Dict[str, int]] = {} # class variable
+ damage: int = 10 # instance variable
+
+ ClassVar accepts only types and cannot be further subscribed.
+
+ Note that ClassVar is not a class itself, and should not
+ be used with isinstance() or issubclass().
+ """
+
+ __type__ = None
+
+# On older versions of typing there is an internal class named "Final".
+if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7):
+ Final = typing.Final
+elif sys.version_info[:2] >= (3, 7):
+ class _FinalForm(typing._SpecialForm, _root=True):
+
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ def __getitem__(self, parameters):
+ item = typing._type_check(parameters,
+ '{} accepts only single type'.format(self._name))
+ return _GenericAlias(self, (item,))
+
+ Final = _FinalForm('Final',
+ doc="""A special typing construct to indicate that a name
+ cannot be re-assigned or overridden in a subclass.
+ For example:
+
+ MAX_SIZE: Final = 9000
+ MAX_SIZE += 1 # Error reported by type checker
+
+ class Connection:
+ TIMEOUT: Final[int] = 10
+ class FastConnector(Connection):
+ TIMEOUT = 1 # Error reported by type checker
+
+ There is no runtime checking of these properties.""")
+elif hasattr(typing, '_FinalTypingBase'):
+ class _Final(typing._FinalTypingBase, _root=True):
+ """A special typing construct to indicate that a name
+ cannot be re-assigned or overridden in a subclass.
+ For example:
+
+ MAX_SIZE: Final = 9000
+ MAX_SIZE += 1 # Error reported by type checker
+
+ class Connection:
+ TIMEOUT: Final[int] = 10
+ class FastConnector(Connection):
+ TIMEOUT = 1 # Error reported by type checker
+
+ There is no runtime checking of these properties.
+ """
+
+ __slots__ = ('__type__',)
+
+ def __init__(self, tp=None, **kwds):
+ self.__type__ = tp
+
+ def __getitem__(self, item):
+ cls = type(self)
+ if self.__type__ is None:
+ return cls(typing._type_check(item,
+ '{} accepts only single type.'.format(cls.__name__[1:])),
+ _root=True)
+ raise TypeError('{} cannot be further subscripted'
+ .format(cls.__name__[1:]))
+
+ def _eval_type(self, globalns, localns):
+ new_tp = typing._eval_type(self.__type__, globalns, localns)
+ if new_tp == self.__type__:
+ return self
+ return type(self)(new_tp, _root=True)
+
+ def __repr__(self):
+ r = super().__repr__()
+ if self.__type__ is not None:
+ r += '[{}]'.format(typing._type_repr(self.__type__))
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__type__))
+
+ def __eq__(self, other):
+ if not isinstance(other, _Final):
+ return NotImplemented
+ if self.__type__ is not None:
+ return self.__type__ == other.__type__
+ return self is other
+
+ Final = _Final(_root=True)
+else:
+ class _FinalMeta(typing.TypingMeta):
+ """Metaclass for Final"""
+
+ def __new__(cls, name, bases, namespace, tp=None, _root=False):
+ self = super().__new__(cls, name, bases, namespace, _root=_root)
+ if tp is not None:
+ self.__type__ = tp
+ return self
+
+ def __instancecheck__(self, obj):
+ raise TypeError("Final cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("Final cannot be used with issubclass().")
+
+ def __getitem__(self, item):
+ cls = type(self)
+ if self.__type__ is not None:
+ raise TypeError('{} cannot be further subscripted'
+ .format(cls.__name__[1:]))
+
+ param = typing._type_check(
+ item,
+ '{} accepts only single type.'.format(cls.__name__[1:]))
+ return cls(self.__name__, self.__bases__,
+ dict(self.__dict__), tp=param, _root=True)
+
+ def _eval_type(self, globalns, localns):
+ new_tp = typing._eval_type(self.__type__, globalns, localns)
+ if new_tp == self.__type__:
+ return self
+ return type(self)(self.__name__, self.__bases__,
+ dict(self.__dict__), tp=self.__type__,
+ _root=True)
+
+ def __repr__(self):
+ r = super().__repr__()
+ if self.__type__ is not None:
+ r += '[{}]'.format(typing._type_repr(self.__type__))
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__type__))
+
+ def __eq__(self, other):
+ if not isinstance(other, Final):
+ return NotImplemented
+ if self.__type__ is not None:
+ return self.__type__ == other.__type__
+ return self is other
+
+ class Final(typing.Final, metaclass=_FinalMeta, _root=True):
+ """A special typing construct to indicate that a name
+ cannot be re-assigned or overridden in a subclass.
+ For example:
+
+ MAX_SIZE: Final = 9000
+ MAX_SIZE += 1 # Error reported by type checker
+
+ class Connection:
+ TIMEOUT: Final[int] = 10
+ class FastConnector(Connection):
+ TIMEOUT = 1 # Error reported by type checker
+
+ There is no runtime checking of these properties.
+ """
+
+ __type__ = None
+
+
+if hasattr(typing, 'final'):
+ final = typing.final
+else:
+ def final(f):
+ """This decorator can be used to indicate to type checkers that
+ the decorated method cannot be overridden, and decorated class
+ cannot be subclassed. For example:
+
+ class Base:
+ @final
+ def done(self) -> None:
+ ...
+ class Sub(Base):
+ def done(self) -> None: # Error reported by type checker
+ ...
+ @final
+ class Leaf:
+ ...
+ class Other(Leaf): # Error reported by type checker
+ ...
+
+ There is no runtime checking of these properties.
+ """
+ return f
+
+
+def IntVar(name):
+ return TypeVar(name)
+
+
+if hasattr(typing, 'Literal'):
+ Literal = typing.Literal
+elif sys.version_info[:2] >= (3, 7):
+ class _LiteralForm(typing._SpecialForm, _root=True):
+
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ def __getitem__(self, parameters):
+ return _GenericAlias(self, parameters)
+
+ Literal = _LiteralForm('Literal',
+ doc="""A type that can be used to indicate to type checkers
+ that the corresponding value has a value literally equivalent
+ to the provided parameter. For example:
+
+ var: Literal[4] = 4
+
+ The type checker understands that 'var' is literally equal to
+ the value 4 and no other value.
+
+ Literal[...] cannot be subclassed. There is no runtime
+ checking verifying that the parameter is actually a value
+ instead of a type.""")
+elif hasattr(typing, '_FinalTypingBase'):
+ class _Literal(typing._FinalTypingBase, _root=True):
+ """A type that can be used to indicate to type checkers that the
+ corresponding value has a value literally equivalent to the
+ provided parameter. For example:
+
+ var: Literal[4] = 4
+
+ The type checker understands that 'var' is literally equal to the
+ value 4 and no other value.
+
+ Literal[...] cannot be subclassed. There is no runtime checking
+ verifying that the parameter is actually a value instead of a type.
+ """
+
+ __slots__ = ('__values__',)
+
+ def __init__(self, values=None, **kwds):
+ self.__values__ = values
+
+ def __getitem__(self, values):
+ cls = type(self)
+ if self.__values__ is None:
+ if not isinstance(values, tuple):
+ values = (values,)
+ return cls(values, _root=True)
+ raise TypeError('{} cannot be further subscripted'
+ .format(cls.__name__[1:]))
+
+ def _eval_type(self, globalns, localns):
+ return self
+
+ def __repr__(self):
+ r = super().__repr__()
+ if self.__values__ is not None:
+ r += '[{}]'.format(', '.join(map(typing._type_repr, self.__values__)))
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__values__))
+
+ def __eq__(self, other):
+ if not isinstance(other, _Literal):
+ return NotImplemented
+ if self.__values__ is not None:
+ return self.__values__ == other.__values__
+ return self is other
+
+ Literal = _Literal(_root=True)
+else:
+ class _LiteralMeta(typing.TypingMeta):
+ """Metaclass for Literal"""
+
+ def __new__(cls, name, bases, namespace, values=None, _root=False):
+ self = super().__new__(cls, name, bases, namespace, _root=_root)
+ if values is not None:
+ self.__values__ = values
+ return self
+
+ def __instancecheck__(self, obj):
+ raise TypeError("Literal cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("Literal cannot be used with issubclass().")
+
+ def __getitem__(self, item):
+ cls = type(self)
+ if self.__values__ is not None:
+ raise TypeError('{} cannot be further subscripted'
+ .format(cls.__name__[1:]))
+
+ if not isinstance(item, tuple):
+ item = (item,)
+ return cls(self.__name__, self.__bases__,
+ dict(self.__dict__), values=item, _root=True)
+
+ def _eval_type(self, globalns, localns):
+ return self
+
+ def __repr__(self):
+ r = super().__repr__()
+ if self.__values__ is not None:
+ r += '[{}]'.format(', '.join(map(typing._type_repr, self.__values__)))
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__values__))
+
+ def __eq__(self, other):
+ if not isinstance(other, Literal):
+ return NotImplemented
+ if self.__values__ is not None:
+ return self.__values__ == other.__values__
+ return self is other
+
+ class Literal(typing.Final, metaclass=_LiteralMeta, _root=True):
+ """A type that can be used to indicate to type checkers that the
+ corresponding value has a value literally equivalent to the
+ provided parameter. For example:
+
+ var: Literal[4] = 4
+
+ The type checker understands that 'var' is literally equal to the
+ value 4 and no other value.
+
+ Literal[...] cannot be subclassed. There is no runtime checking
+ verifying that the parameter is actually a value instead of a type.
+ """
+
+ __values__ = None
+
+
+def _overload_dummy(*args, **kwds):
+ """Helper for @overload to raise when called."""
+ raise NotImplementedError(
+ "You should not call an overloaded function. "
+ "A series of @overload-decorated functions "
+ "outside a stub module should always be followed "
+ "by an implementation that is not @overload-ed.")
+
+
+def overload(func):
+ """Decorator for overloaded functions/methods.
+
+ In a stub file, place two or more stub definitions for the same
+ function in a row, each decorated with @overload. For example:
+
+ @overload
+ def utf8(value: None) -> None: ...
+ @overload
+ def utf8(value: bytes) -> bytes: ...
+ @overload
+ def utf8(value: str) -> bytes: ...
+
+ In a non-stub file (i.e. a regular .py file), do the same but
+ follow it with an implementation. The implementation should *not*
+ be decorated with @overload. For example:
+
+ @overload
+ def utf8(value: None) -> None: ...
+ @overload
+ def utf8(value: bytes) -> bytes: ...
+ @overload
+ def utf8(value: str) -> bytes: ...
+ def utf8(value):
+ # implementation goes here
+ """
+ return _overload_dummy
+
+
+# This is not a real generic class. Don't use outside annotations.
+if hasattr(typing, 'Type'):
+ Type = typing.Type
+else:
+ # Internal type variable used for Type[].
+ CT_co = typing.TypeVar('CT_co', covariant=True, bound=type)
+
+ class Type(typing.Generic[CT_co], extra=type):
+ """A special construct usable to annotate class objects.
+
+ For example, suppose we have the following classes::
+
+ class User: ... # Abstract base for User classes
+ class BasicUser(User): ...
+ class ProUser(User): ...
+ class TeamUser(User): ...
+
+ And a function that takes a class argument that's a subclass of
+ User and returns an instance of the corresponding class::
+
+ U = TypeVar('U', bound=User)
+ def new_user(user_class: Type[U]) -> U:
+ user = user_class()
+ # (Here we could write the user object to a database)
+ return user
+ joe = new_user(BasicUser)
+
+ At this point the type checker knows that joe has type BasicUser.
+ """
+
+ __slots__ = ()
+
+
+# Various ABCs mimicking those in collections.abc.
+# A few are simply re-exported for completeness.
+
+def _define_guard(type_name):
+ """
+ Returns True if the given type isn't defined in typing but
+ is defined in collections_abc.
+
+ Adds the type to __all__ if the collection is found in either
+ typing or collection_abc.
+ """
+ if hasattr(typing, type_name):
+ __all__.append(type_name)
+ globals()[type_name] = getattr(typing, type_name)
+ return False
+ elif hasattr(collections_abc, type_name):
+ __all__.append(type_name)
+ return True
+ else:
+ return False
+
+
+class _ExtensionsGenericMeta(GenericMeta):
+ def __subclasscheck__(self, subclass):
+ """This mimics a more modern GenericMeta.__subclasscheck__() logic
+ (that does not have problems with recursion) to work around interactions
+ between collections, typing, and typing_extensions on older
+ versions of Python, see https://github.com/python/typing/issues/501.
+ """
+ if sys.version_info[:3] >= (3, 5, 3) or sys.version_info[:3] < (3, 5, 0):
+ if self.__origin__ is not None:
+ if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']:
+ raise TypeError("Parameterized generics cannot be used with class "
+ "or instance checks")
+ return False
+ if not self.__extra__:
+ return super().__subclasscheck__(subclass)
+ res = self.__extra__.__subclasshook__(subclass)
+ if res is not NotImplemented:
+ return res
+ if self.__extra__ in subclass.__mro__:
+ return True
+ for scls in self.__extra__.__subclasses__():
+ if isinstance(scls, GenericMeta):
+ continue
+ if issubclass(subclass, scls):
+ return True
+ return False
+
+
+if _define_guard('Awaitable'):
+ class Awaitable(typing.Generic[T_co], metaclass=_ExtensionsGenericMeta,
+ extra=collections_abc.Awaitable):
+ __slots__ = ()
+
+
+if _define_guard('Coroutine'):
+ class Coroutine(Awaitable[V_co], typing.Generic[T_co, T_contra, V_co],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections_abc.Coroutine):
+ __slots__ = ()
+
+
+if _define_guard('AsyncIterable'):
+ class AsyncIterable(typing.Generic[T_co],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections_abc.AsyncIterable):
+ __slots__ = ()
+
+
+if _define_guard('AsyncIterator'):
+ class AsyncIterator(AsyncIterable[T_co],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections_abc.AsyncIterator):
+ __slots__ = ()
+
+
+if hasattr(typing, 'Deque'):
+ Deque = typing.Deque
+elif _geqv_defined:
+ class Deque(collections.deque, typing.MutableSequence[T],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections.deque):
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if _geqv(cls, Deque):
+ return collections.deque(*args, **kwds)
+ return _generic_new(collections.deque, cls, *args, **kwds)
+else:
+ class Deque(collections.deque, typing.MutableSequence[T],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections.deque):
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is Deque:
+ return collections.deque(*args, **kwds)
+ return _generic_new(collections.deque, cls, *args, **kwds)
+
+
+if hasattr(typing, 'ContextManager'):
+ ContextManager = typing.ContextManager
+elif hasattr(contextlib, 'AbstractContextManager'):
+ class ContextManager(typing.Generic[T_co],
+ metaclass=_ExtensionsGenericMeta,
+ extra=contextlib.AbstractContextManager):
+ __slots__ = ()
+else:
+ class ContextManager(typing.Generic[T_co]):
+ __slots__ = ()
+
+ def __enter__(self):
+ return self
+
+ @abc.abstractmethod
+ def __exit__(self, exc_type, exc_value, traceback):
+ return None
+
+ @classmethod
+ def __subclasshook__(cls, C):
+ if cls is ContextManager:
+ # In Python 3.6+, it is possible to set a method to None to
+ # explicitly indicate that the class does not implement an ABC
+ # (https://bugs.python.org/issue25958), but we do not support
+ # that pattern here because this fallback class is only used
+ # in Python 3.5 and earlier.
+ if (any("__enter__" in B.__dict__ for B in C.__mro__) and
+ any("__exit__" in B.__dict__ for B in C.__mro__)):
+ return True
+ return NotImplemented
+
+
+if hasattr(typing, 'AsyncContextManager'):
+ AsyncContextManager = typing.AsyncContextManager
+ __all__.append('AsyncContextManager')
+elif hasattr(contextlib, 'AbstractAsyncContextManager'):
+ class AsyncContextManager(typing.Generic[T_co],
+ metaclass=_ExtensionsGenericMeta,
+ extra=contextlib.AbstractAsyncContextManager):
+ __slots__ = ()
+
+ __all__.append('AsyncContextManager')
+elif sys.version_info[:2] >= (3, 5):
+ exec("""
+class AsyncContextManager(typing.Generic[T_co]):
+ __slots__ = ()
+
+ async def __aenter__(self):
+ return self
+
+ @abc.abstractmethod
+ async def __aexit__(self, exc_type, exc_value, traceback):
+ return None
+
+ @classmethod
+ def __subclasshook__(cls, C):
+ if cls is AsyncContextManager:
+ return _check_methods_in_mro(C, "__aenter__", "__aexit__")
+ return NotImplemented
+
+__all__.append('AsyncContextManager')
+""")
+
+
+if hasattr(typing, 'DefaultDict'):
+ DefaultDict = typing.DefaultDict
+elif _geqv_defined:
+ class DefaultDict(collections.defaultdict, typing.MutableMapping[KT, VT],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections.defaultdict):
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if _geqv(cls, DefaultDict):
+ return collections.defaultdict(*args, **kwds)
+ return _generic_new(collections.defaultdict, cls, *args, **kwds)
+else:
+ class DefaultDict(collections.defaultdict, typing.MutableMapping[KT, VT],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections.defaultdict):
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is DefaultDict:
+ return collections.defaultdict(*args, **kwds)
+ return _generic_new(collections.defaultdict, cls, *args, **kwds)
+
+
+if hasattr(typing, 'OrderedDict'):
+ OrderedDict = typing.OrderedDict
+elif (3, 7, 0) <= sys.version_info[:3] < (3, 7, 2):
+ OrderedDict = typing._alias(collections.OrderedDict, (KT, VT))
+elif _geqv_defined:
+ class OrderedDict(collections.OrderedDict, typing.MutableMapping[KT, VT],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections.OrderedDict):
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if _geqv(cls, OrderedDict):
+ return collections.OrderedDict(*args, **kwds)
+ return _generic_new(collections.OrderedDict, cls, *args, **kwds)
+else:
+ class OrderedDict(collections.OrderedDict, typing.MutableMapping[KT, VT],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections.OrderedDict):
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is OrderedDict:
+ return collections.OrderedDict(*args, **kwds)
+ return _generic_new(collections.OrderedDict, cls, *args, **kwds)
+
+
+if hasattr(typing, 'Counter'):
+ Counter = typing.Counter
+elif (3, 5, 0) <= sys.version_info[:3] <= (3, 5, 1):
+ assert _geqv_defined
+ _TInt = typing.TypeVar('_TInt')
+
+ class _CounterMeta(typing.GenericMeta):
+ """Metaclass for Counter"""
+ def __getitem__(self, item):
+ return super().__getitem__((item, int))
+
+ class Counter(collections.Counter,
+ typing.Dict[T, int],
+ metaclass=_CounterMeta,
+ extra=collections.Counter):
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if _geqv(cls, Counter):
+ return collections.Counter(*args, **kwds)
+ return _generic_new(collections.Counter, cls, *args, **kwds)
+
+elif _geqv_defined:
+ class Counter(collections.Counter,
+ typing.Dict[T, int],
+ metaclass=_ExtensionsGenericMeta, extra=collections.Counter):
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if _geqv(cls, Counter):
+ return collections.Counter(*args, **kwds)
+ return _generic_new(collections.Counter, cls, *args, **kwds)
+
+else:
+ class Counter(collections.Counter,
+ typing.Dict[T, int],
+ metaclass=_ExtensionsGenericMeta, extra=collections.Counter):
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is Counter:
+ return collections.Counter(*args, **kwds)
+ return _generic_new(collections.Counter, cls, *args, **kwds)
+
+
+if hasattr(typing, 'ChainMap'):
+ ChainMap = typing.ChainMap
+ __all__.append('ChainMap')
+elif hasattr(collections, 'ChainMap'):
+ # ChainMap only exists in 3.3+
+ if _geqv_defined:
+ class ChainMap(collections.ChainMap, typing.MutableMapping[KT, VT],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections.ChainMap):
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if _geqv(cls, ChainMap):
+ return collections.ChainMap(*args, **kwds)
+ return _generic_new(collections.ChainMap, cls, *args, **kwds)
+ else:
+ class ChainMap(collections.ChainMap, typing.MutableMapping[KT, VT],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections.ChainMap):
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwds):
+ if cls._gorg is ChainMap:
+ return collections.ChainMap(*args, **kwds)
+ return _generic_new(collections.ChainMap, cls, *args, **kwds)
+
+ __all__.append('ChainMap')
+
+
+if _define_guard('AsyncGenerator'):
+ class AsyncGenerator(AsyncIterator[T_co], typing.Generic[T_co, T_contra],
+ metaclass=_ExtensionsGenericMeta,
+ extra=collections_abc.AsyncGenerator):
+ __slots__ = ()
+
+
+if hasattr(typing, 'NewType'):
+ NewType = typing.NewType
+else:
+ def NewType(name, tp):
+ """NewType creates simple unique types with almost zero
+ runtime overhead. NewType(name, tp) is considered a subtype of tp
+ by static type checkers. At runtime, NewType(name, tp) returns
+ a dummy function that simply returns its argument. Usage::
+
+ UserId = NewType('UserId', int)
+
+ def name_by_id(user_id: UserId) -> str:
+ ...
+
+ UserId('user') # Fails type check
+
+ name_by_id(42) # Fails type check
+ name_by_id(UserId(42)) # OK
+
+ num = UserId(5) + 1 # type: int
+ """
+
+ def new_type(x):
+ return x
+
+ new_type.__name__ = name
+ new_type.__supertype__ = tp
+ return new_type
+
+
+if hasattr(typing, 'Text'):
+ Text = typing.Text
+else:
+ Text = str
+
+
+if hasattr(typing, 'TYPE_CHECKING'):
+ TYPE_CHECKING = typing.TYPE_CHECKING
+else:
+ # Constant that's True when type checking, but False here.
+ TYPE_CHECKING = False
+
+
+def _gorg(cls):
+ """This function exists for compatibility with old typing versions."""
+ assert isinstance(cls, GenericMeta)
+ if hasattr(cls, '_gorg'):
+ return cls._gorg
+ while cls.__origin__ is not None:
+ cls = cls.__origin__
+ return cls
+
+
+if OLD_GENERICS:
+ def _next_in_mro(cls): # noqa
+ """This function exists for compatibility with old typing versions."""
+ next_in_mro = object
+ for i, c in enumerate(cls.__mro__[:-1]):
+ if isinstance(c, GenericMeta) and _gorg(c) is Generic:
+ next_in_mro = cls.__mro__[i + 1]
+ return next_in_mro
+
+
+_PROTO_WHITELIST = ['Callable', 'Awaitable',
+ 'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator',
+ 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible',
+ 'ContextManager', 'AsyncContextManager']
+
+
+def _get_protocol_attrs(cls):
+ attrs = set()
+ for base in cls.__mro__[:-1]: # without object
+ if base.__name__ in ('Protocol', 'Generic'):
+ continue
+ annotations = getattr(base, '__annotations__', {})
+ for attr in list(base.__dict__.keys()) + list(annotations.keys()):
+ if (not attr.startswith('_abc_') and attr not in (
+ '__abstractmethods__', '__annotations__', '__weakref__',
+ '_is_protocol', '_is_runtime_protocol', '__dict__',
+ '__args__', '__slots__',
+ '__next_in_mro__', '__parameters__', '__origin__',
+ '__orig_bases__', '__extra__', '__tree_hash__',
+ '__doc__', '__subclasshook__', '__init__', '__new__',
+ '__module__', '_MutableMapping__marker', '_gorg')):
+ attrs.add(attr)
+ return attrs
+
+
+def _is_callable_members_only(cls):
+ return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls))
+
+
+if hasattr(typing, 'Protocol'):
+ Protocol = typing.Protocol
+elif HAVE_PROTOCOLS and not PEP_560:
+
+ def _no_init(self, *args, **kwargs):
+ if type(self)._is_protocol:
+ raise TypeError('Protocols cannot be instantiated')
+
+ class _ProtocolMeta(GenericMeta):
+ """Internal metaclass for Protocol.
+
+ This exists so Protocol classes can be generic without deriving
+ from Generic.
+ """
+ if not OLD_GENERICS:
+ def __new__(cls, name, bases, namespace,
+ tvars=None, args=None, origin=None, extra=None, orig_bases=None):
+ # This is just a version copied from GenericMeta.__new__ that
+ # includes "Protocol" special treatment. (Comments removed for brevity.)
+ assert extra is None # Protocols should not have extra
+ if tvars is not None:
+ assert origin is not None
+ assert all(isinstance(t, TypeVar) for t in tvars), tvars
+ else:
+ tvars = _type_vars(bases)
+ gvars = None
+ for base in bases:
+ if base is Generic:
+ raise TypeError("Cannot inherit from plain Generic")
+ if (isinstance(base, GenericMeta) and
+ base.__origin__ in (Generic, Protocol)):
+ if gvars is not None:
+ raise TypeError(
+ "Cannot inherit from Generic[...] or"
+ " Protocol[...] multiple times.")
+ gvars = base.__parameters__
+ if gvars is None:
+ gvars = tvars
+ else:
+ tvarset = set(tvars)
+ gvarset = set(gvars)
+ if not tvarset <= gvarset:
+ raise TypeError(
+ "Some type variables (%s) "
+ "are not listed in %s[%s]" %
+ (", ".join(str(t) for t in tvars if t not in gvarset),
+ "Generic" if any(b.__origin__ is Generic
+ for b in bases) else "Protocol",
+ ", ".join(str(g) for g in gvars)))
+ tvars = gvars
+
+ initial_bases = bases
+ if (extra is not None and type(extra) is abc.ABCMeta and
+ extra not in bases):
+ bases = (extra,) + bases
+ bases = tuple(_gorg(b) if isinstance(b, GenericMeta) else b
+ for b in bases)
+ if any(isinstance(b, GenericMeta) and b is not Generic for b in bases):
+ bases = tuple(b for b in bases if b is not Generic)
+ namespace.update({'__origin__': origin, '__extra__': extra})
+ self = super(GenericMeta, cls).__new__(cls, name, bases, namespace,
+ _root=True)
+ super(GenericMeta, self).__setattr__('_gorg',
+ self if not origin else
+ _gorg(origin))
+ self.__parameters__ = tvars
+ self.__args__ = tuple(... if a is _TypingEllipsis else
+ () if a is _TypingEmpty else
+ a for a in args) if args else None
+ self.__next_in_mro__ = _next_in_mro(self)
+ if orig_bases is None:
+ self.__orig_bases__ = initial_bases
+ elif origin is not None:
+ self._abc_registry = origin._abc_registry
+ self._abc_cache = origin._abc_cache
+ if hasattr(self, '_subs_tree'):
+ self.__tree_hash__ = (hash(self._subs_tree()) if origin else
+ super(GenericMeta, self).__hash__())
+ return self
+
+ def __init__(cls, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ if not cls.__dict__.get('_is_protocol', None):
+ cls._is_protocol = any(b is Protocol or
+ isinstance(b, _ProtocolMeta) and
+ b.__origin__ is Protocol
+ for b in cls.__bases__)
+ if cls._is_protocol:
+ for base in cls.__mro__[1:]:
+ if not (base in (object, Generic) or
+ base.__module__ == 'collections.abc' and
+ base.__name__ in _PROTO_WHITELIST or
+ isinstance(base, TypingMeta) and base._is_protocol or
+ isinstance(base, GenericMeta) and
+ base.__origin__ is Generic):
+ raise TypeError('Protocols can only inherit from other'
+ ' protocols, got %r' % base)
+
+ cls.__init__ = _no_init
+
+ def _proto_hook(other):
+ if not cls.__dict__.get('_is_protocol', None):
+ return NotImplemented
+ if not isinstance(other, type):
+ # Same error as for issubclass(1, int)
+ raise TypeError('issubclass() arg 1 must be a class')
+ for attr in _get_protocol_attrs(cls):
+ for base in other.__mro__:
+ if attr in base.__dict__:
+ if base.__dict__[attr] is None:
+ return NotImplemented
+ break
+ annotations = getattr(base, '__annotations__', {})
+ if (isinstance(annotations, typing.Mapping) and
+ attr in annotations and
+ isinstance(other, _ProtocolMeta) and
+ other._is_protocol):
+ break
+ else:
+ return NotImplemented
+ return True
+ if '__subclasshook__' not in cls.__dict__:
+ cls.__subclasshook__ = _proto_hook
+
+ def __instancecheck__(self, instance):
+ # We need this method for situations where attributes are
+ # assigned in __init__.
+ if ((not getattr(self, '_is_protocol', False) or
+ _is_callable_members_only(self)) and
+ issubclass(instance.__class__, self)):
+ return True
+ if self._is_protocol:
+ if all(hasattr(instance, attr) and
+ (not callable(getattr(self, attr, None)) or
+ getattr(instance, attr) is not None)
+ for attr in _get_protocol_attrs(self)):
+ return True
+ return super(GenericMeta, self).__instancecheck__(instance)
+
+ def __subclasscheck__(self, cls):
+ if self.__origin__ is not None:
+ if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools']:
+ raise TypeError("Parameterized generics cannot be used with class "
+ "or instance checks")
+ return False
+ if (self.__dict__.get('_is_protocol', None) and
+ not self.__dict__.get('_is_runtime_protocol', None)):
+ if sys._getframe(1).f_globals['__name__'] in ['abc',
+ 'functools',
+ 'typing']:
+ return False
+ raise TypeError("Instance and class checks can only be used with"
+ " @runtime protocols")
+ if (self.__dict__.get('_is_runtime_protocol', None) and
+ not _is_callable_members_only(self)):
+ if sys._getframe(1).f_globals['__name__'] in ['abc',
+ 'functools',
+ 'typing']:
+ return super(GenericMeta, self).__subclasscheck__(cls)
+ raise TypeError("Protocols with non-method members"
+ " don't support issubclass()")
+ return super(GenericMeta, self).__subclasscheck__(cls)
+
+ if not OLD_GENERICS:
+ @_tp_cache
+ def __getitem__(self, params):
+ # We also need to copy this from GenericMeta.__getitem__ to get
+ # special treatment of "Protocol". (Comments removed for brevity.)
+ if not isinstance(params, tuple):
+ params = (params,)
+ if not params and _gorg(self) is not Tuple:
+ raise TypeError(
+ "Parameter list to %s[...] cannot be empty" % self.__qualname__)
+ msg = "Parameters to generic types must be types."
+ params = tuple(_type_check(p, msg) for p in params)
+ if self in (Generic, Protocol):
+ if not all(isinstance(p, TypeVar) for p in params):
+ raise TypeError(
+ "Parameters to %r[...] must all be type variables" % self)
+ if len(set(params)) != len(params):
+ raise TypeError(
+ "Parameters to %r[...] must all be unique" % self)
+ tvars = params
+ args = params
+ elif self in (Tuple, Callable):
+ tvars = _type_vars(params)
+ args = params
+ elif self.__origin__ in (Generic, Protocol):
+ raise TypeError("Cannot subscript already-subscripted %s" %
+ repr(self))
+ else:
+ _check_generic(self, params)
+ tvars = _type_vars(params)
+ args = params
+
+ prepend = (self,) if self.__origin__ is None else ()
+ return self.__class__(self.__name__,
+ prepend + self.__bases__,
+ _no_slots_copy(self.__dict__),
+ tvars=tvars,
+ args=args,
+ origin=self,
+ extra=self.__extra__,
+ orig_bases=self.__orig_bases__)
+
+ class Protocol(metaclass=_ProtocolMeta):
+ """Base class for protocol classes. Protocol classes are defined as::
+
+ class Proto(Protocol):
+ def meth(self) -> int:
+ ...
+
+ Such classes are primarily used with static type checkers that recognize
+ structural subtyping (static duck-typing), for example::
+
+ class C:
+ def meth(self) -> int:
+ return 0
+
+ def func(x: Proto) -> int:
+ return x.meth()
+
+ func(C()) # Passes static type check
+
+ See PEP 544 for details. Protocol classes decorated with
+ @typing_extensions.runtime act as simple-minded runtime protocol that checks
+ only the presence of given attributes, ignoring their type signatures.
+
+ Protocol classes can be generic, they are defined as::
+
+ class GenProto({bases}):
+ def meth(self) -> T:
+ ...
+ """
+ __slots__ = ()
+ _is_protocol = True
+
+ def __new__(cls, *args, **kwds):
+ if _gorg(cls) is Protocol:
+ raise TypeError("Type Protocol cannot be instantiated; "
+ "it can be used only as a base class")
+ if OLD_GENERICS:
+ return _generic_new(_next_in_mro(cls), cls, *args, **kwds)
+ return _generic_new(cls.__next_in_mro__, cls, *args, **kwds)
+ if Protocol.__doc__ is not None:
+ Protocol.__doc__ = Protocol.__doc__.format(bases="Protocol, Generic[T]" if
+ OLD_GENERICS else "Protocol[T]")
+
+
+elif PEP_560:
+ from typing import _type_check, _GenericAlias, _collect_type_vars # noqa
+
+ def _no_init(self, *args, **kwargs):
+ if type(self)._is_protocol:
+ raise TypeError('Protocols cannot be instantiated')
+
+ class _ProtocolMeta(abc.ABCMeta):
+ # This metaclass is a bit unfortunate and exists only because of the lack
+ # of __instancehook__.
+ def __instancecheck__(cls, instance):
+ # We need this method for situations where attributes are
+ # assigned in __init__.
+ if ((not getattr(cls, '_is_protocol', False) or
+ _is_callable_members_only(cls)) and
+ issubclass(instance.__class__, cls)):
+ return True
+ if cls._is_protocol:
+ if all(hasattr(instance, attr) and
+ (not callable(getattr(cls, attr, None)) or
+ getattr(instance, attr) is not None)
+ for attr in _get_protocol_attrs(cls)):
+ return True
+ return super().__instancecheck__(instance)
+
+ class Protocol(metaclass=_ProtocolMeta):
+ # There is quite a lot of overlapping code with typing.Generic.
+ # Unfortunately it is hard to avoid this while these live in two different
+ # modules. The duplicated code will be removed when Protocol is moved to typing.
+ """Base class for protocol classes. Protocol classes are defined as::
+
+ class Proto(Protocol):
+ def meth(self) -> int:
+ ...
+
+ Such classes are primarily used with static type checkers that recognize
+ structural subtyping (static duck-typing), for example::
+
+ class C:
+ def meth(self) -> int:
+ return 0
+
+ def func(x: Proto) -> int:
+ return x.meth()
+
+ func(C()) # Passes static type check
+
+ See PEP 544 for details. Protocol classes decorated with
+ @typing_extensions.runtime act as simple-minded runtime protocol that checks
+ only the presence of given attributes, ignoring their type signatures.
+
+ Protocol classes can be generic, they are defined as::
+
+ class GenProto(Protocol[T]):
+ def meth(self) -> T:
+ ...
+ """
+ __slots__ = ()
+ _is_protocol = True
+
+ def __new__(cls, *args, **kwds):
+ if cls is Protocol:
+ raise TypeError("Type Protocol cannot be instantiated; "
+ "it can only be used as a base class")
+ return super().__new__(cls)
+
+ @_tp_cache
+ def __class_getitem__(cls, params):
+ if not isinstance(params, tuple):
+ params = (params,)
+ if not params and cls is not Tuple:
+ raise TypeError(
+ "Parameter list to {}[...] cannot be empty".format(cls.__qualname__))
+ msg = "Parameters to generic types must be types."
+ params = tuple(_type_check(p, msg) for p in params)
+ if cls is Protocol:
+ # Generic can only be subscripted with unique type variables.
+ if not all(isinstance(p, TypeVar) for p in params):
+ i = 0
+ while isinstance(params[i], TypeVar):
+ i += 1
+ raise TypeError(
+ "Parameters to Protocol[...] must all be type variables."
+ " Parameter {} is {}".format(i + 1, params[i]))
+ if len(set(params)) != len(params):
+ raise TypeError(
+ "Parameters to Protocol[...] must all be unique")
+ else:
+ # Subscripting a regular Generic subclass.
+ _check_generic(cls, params)
+ return _GenericAlias(cls, params)
+
+ def __init_subclass__(cls, *args, **kwargs):
+ tvars = []
+ if '__orig_bases__' in cls.__dict__:
+ error = Generic in cls.__orig_bases__
+ else:
+ error = Generic in cls.__bases__
+ if error:
+ raise TypeError("Cannot inherit from plain Generic")
+ if '__orig_bases__' in cls.__dict__:
+ tvars = _collect_type_vars(cls.__orig_bases__)
+ # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn].
+ # If found, tvars must be a subset of it.
+ # If not found, tvars is it.
+ # Also check for and reject plain Generic,
+ # and reject multiple Generic[...] and/or Protocol[...].
+ gvars = None
+ for base in cls.__orig_bases__:
+ if (isinstance(base, _GenericAlias) and
+ base.__origin__ in (Generic, Protocol)):
+ # for error messages
+ the_base = 'Generic' if base.__origin__ is Generic else 'Protocol'
+ if gvars is not None:
+ raise TypeError(
+ "Cannot inherit from Generic[...]"
+ " and/or Protocol[...] multiple types.")
+ gvars = base.__parameters__
+ if gvars is None:
+ gvars = tvars
+ else:
+ tvarset = set(tvars)
+ gvarset = set(gvars)
+ if not tvarset <= gvarset:
+ s_vars = ', '.join(str(t) for t in tvars if t not in gvarset)
+ s_args = ', '.join(str(g) for g in gvars)
+ raise TypeError("Some type variables ({}) are"
+ " not listed in {}[{}]".format(s_vars,
+ the_base, s_args))
+ tvars = gvars
+ cls.__parameters__ = tuple(tvars)
+
+ # Determine if this is a protocol or a concrete subclass.
+ if not cls.__dict__.get('_is_protocol', None):
+ cls._is_protocol = any(b is Protocol for b in cls.__bases__)
+
+ # Set (or override) the protocol subclass hook.
+ def _proto_hook(other):
+ if not cls.__dict__.get('_is_protocol', None):
+ return NotImplemented
+ if not getattr(cls, '_is_runtime_protocol', False):
+ if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
+ return NotImplemented
+ raise TypeError("Instance and class checks can only be used with"
+ " @runtime protocols")
+ if not _is_callable_members_only(cls):
+ if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']:
+ return NotImplemented
+ raise TypeError("Protocols with non-method members"
+ " don't support issubclass()")
+ if not isinstance(other, type):
+ # Same error as for issubclass(1, int)
+ raise TypeError('issubclass() arg 1 must be a class')
+ for attr in _get_protocol_attrs(cls):
+ for base in other.__mro__:
+ if attr in base.__dict__:
+ if base.__dict__[attr] is None:
+ return NotImplemented
+ break
+ annotations = getattr(base, '__annotations__', {})
+ if (isinstance(annotations, typing.Mapping) and
+ attr in annotations and
+ isinstance(other, _ProtocolMeta) and
+ other._is_protocol):
+ break
+ else:
+ return NotImplemented
+ return True
+ if '__subclasshook__' not in cls.__dict__:
+ cls.__subclasshook__ = _proto_hook
+
+ # We have nothing more to do for non-protocols.
+ if not cls._is_protocol:
+ return
+
+ # Check consistency of bases.
+ for base in cls.__bases__:
+ if not (base in (object, Generic) or
+ base.__module__ == 'collections.abc' and
+ base.__name__ in _PROTO_WHITELIST or
+ isinstance(base, _ProtocolMeta) and base._is_protocol):
+ raise TypeError('Protocols can only inherit from other'
+ ' protocols, got %r' % base)
+ cls.__init__ = _no_init
+
+
+if hasattr(typing, 'runtime_checkable'):
+ runtime_checkable = typing.runtime_checkable
+elif HAVE_PROTOCOLS:
+ def runtime_checkable(cls):
+ """Mark a protocol class as a runtime protocol, so that it
+ can be used with isinstance() and issubclass(). Raise TypeError
+ if applied to a non-protocol class.
+
+ This allows a simple-minded structural check very similar to the
+ one-offs in collections.abc such as Hashable.
+ """
+ if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol:
+ raise TypeError('@runtime_checkable can be only applied to protocol classes,'
+ ' got %r' % cls)
+ cls._is_runtime_protocol = True
+ return cls
+
+
+if HAVE_PROTOCOLS:
+ # Exists for backwards compatibility.
+ runtime = runtime_checkable
+
+
+if hasattr(typing, 'SupportsIndex'):
+ SupportsIndex = typing.SupportsIndex
+elif HAVE_PROTOCOLS:
+ @runtime_checkable
+ class SupportsIndex(Protocol):
+ __slots__ = ()
+
+ @abc.abstractmethod
+ def __index__(self) -> int:
+ pass
+
+
+if sys.version_info >= (3, 9, 2):
+ # The standard library TypedDict in Python 3.8 does not store runtime information
+ # about which (if any) keys are optional. See https://bugs.python.org/issue38834
+ # The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
+ # keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
+ TypedDict = typing.TypedDict
+else:
+ def _check_fails(cls, other):
+ try:
+ if sys._getframe(1).f_globals['__name__'] not in ['abc',
+ 'functools',
+ 'typing']:
+ # Typed dicts are only for static structural subtyping.
+ raise TypeError('TypedDict does not support instance and class checks')
+ except (AttributeError, ValueError):
+ pass
+ return False
+
+ def _dict_new(*args, **kwargs):
+ if not args:
+ raise TypeError('TypedDict.__new__(): not enough arguments')
+ _, args = args[0], args[1:] # allow the "cls" keyword be passed
+ return dict(*args, **kwargs)
+
+ _dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)'
+
+ def _typeddict_new(*args, total=True, **kwargs):
+ if not args:
+ raise TypeError('TypedDict.__new__(): not enough arguments')
+ _, args = args[0], args[1:] # allow the "cls" keyword be passed
+ if args:
+ typename, args = args[0], args[1:] # allow the "_typename" keyword be passed
+ elif '_typename' in kwargs:
+ typename = kwargs.pop('_typename')
+ import warnings
+ warnings.warn("Passing '_typename' as keyword argument is deprecated",
+ DeprecationWarning, stacklevel=2)
+ else:
+ raise TypeError("TypedDict.__new__() missing 1 required positional "
+ "argument: '_typename'")
+ if args:
+ try:
+ fields, = args # allow the "_fields" keyword be passed
+ except ValueError:
+ raise TypeError('TypedDict.__new__() takes from 2 to 3 '
+ 'positional arguments but {} '
+ 'were given'.format(len(args) + 2))
+ elif '_fields' in kwargs and len(kwargs) == 1:
+ fields = kwargs.pop('_fields')
+ import warnings
+ warnings.warn("Passing '_fields' as keyword argument is deprecated",
+ DeprecationWarning, stacklevel=2)
+ else:
+ fields = None
+
+ if fields is None:
+ fields = kwargs
+ elif kwargs:
+ raise TypeError("TypedDict takes either a dict or keyword arguments,"
+ " but not both")
+
+ ns = {'__annotations__': dict(fields)}
+ try:
+ # Setting correct module is necessary to make typed dict classes pickleable.
+ ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__')
+ except (AttributeError, ValueError):
+ pass
+
+ return _TypedDictMeta(typename, (), ns, total=total)
+
+ _typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,'
+ ' /, *, total=True, **kwargs)')
+
+ class _TypedDictMeta(type):
+ def __init__(cls, name, bases, ns, total=True):
+ # In Python 3.4 and 3.5 the __init__ method also needs to support the keyword arguments.
+ # See https://www.python.org/dev/peps/pep-0487/#implementation-details
+ super(_TypedDictMeta, cls).__init__(name, bases, ns)
+
+ def __new__(cls, name, bases, ns, total=True):
+ # Create new typed dict class object.
+ # This method is called directly when TypedDict is subclassed,
+ # or via _typeddict_new when TypedDict is instantiated. This way
+ # TypedDict supports all three syntaxes described in its docstring.
+ # Subclasses and instances of TypedDict return actual dictionaries
+ # via _dict_new.
+ ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new
+ tp_dict = super(_TypedDictMeta, cls).__new__(cls, name, (dict,), ns)
+
+ annotations = {}
+ own_annotations = ns.get('__annotations__', {})
+ own_annotation_keys = set(own_annotations.keys())
+ msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
+ own_annotations = {
+ n: typing._type_check(tp, msg) for n, tp in own_annotations.items()
+ }
+ required_keys = set()
+ optional_keys = set()
+
+ for base in bases:
+ annotations.update(base.__dict__.get('__annotations__', {}))
+ required_keys.update(base.__dict__.get('__required_keys__', ()))
+ optional_keys.update(base.__dict__.get('__optional_keys__', ()))
+
+ annotations.update(own_annotations)
+ if total:
+ required_keys.update(own_annotation_keys)
+ else:
+ optional_keys.update(own_annotation_keys)
+
+ tp_dict.__annotations__ = annotations
+ tp_dict.__required_keys__ = frozenset(required_keys)
+ tp_dict.__optional_keys__ = frozenset(optional_keys)
+ if not hasattr(tp_dict, '__total__'):
+ tp_dict.__total__ = total
+ return tp_dict
+
+ __instancecheck__ = __subclasscheck__ = _check_fails
+
+ TypedDict = _TypedDictMeta('TypedDict', (dict,), {})
+ TypedDict.__module__ = __name__
+ TypedDict.__doc__ = \
+ """A simple typed name space. At runtime it is equivalent to a plain dict.
+
+ TypedDict creates a dictionary type that expects all of its
+ instances to have a certain set of keys, with each key
+ associated with a value of a consistent type. This expectation
+ is not checked at runtime but is only enforced by type checkers.
+ Usage::
+
+ class Point2D(TypedDict):
+ x: int
+ y: int
+ label: str
+
+ a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
+ b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
+
+ assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
+
+ The type info can be accessed via the Point2D.__annotations__ dict, and
+ the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
+ TypedDict supports two additional equivalent forms::
+
+ Point2D = TypedDict('Point2D', x=int, y=int, label=str)
+ Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
+
+ The class syntax is only supported in Python 3.6+, while two other
+ syntax forms work for Python 2.7 and 3.2+
+ """
+
+
+# Python 3.9+ has PEP 593 (Annotated and modified get_type_hints)
+if hasattr(typing, 'Annotated'):
+ Annotated = typing.Annotated
+ get_type_hints = typing.get_type_hints
+ # Not exported and not a public API, but needed for get_origin() and get_args()
+ # to work.
+ _AnnotatedAlias = typing._AnnotatedAlias
+elif PEP_560:
+ class _AnnotatedAlias(typing._GenericAlias, _root=True):
+ """Runtime representation of an annotated type.
+
+ At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
+ with extra annotations. The alias behaves like a normal typing alias,
+ instantiating is the same as instantiating the underlying type, binding
+ it to types is also the same.
+ """
+ def __init__(self, origin, metadata):
+ if isinstance(origin, _AnnotatedAlias):
+ metadata = origin.__metadata__ + metadata
+ origin = origin.__origin__
+ super().__init__(origin, origin)
+ self.__metadata__ = metadata
+
+ def copy_with(self, params):
+ assert len(params) == 1
+ new_type = params[0]
+ return _AnnotatedAlias(new_type, self.__metadata__)
+
+ def __repr__(self):
+ return "typing_extensions.Annotated[{}, {}]".format(
+ typing._type_repr(self.__origin__),
+ ", ".join(repr(a) for a in self.__metadata__)
+ )
+
+ def __reduce__(self):
+ return operator.getitem, (
+ Annotated, (self.__origin__,) + self.__metadata__
+ )
+
+ def __eq__(self, other):
+ if not isinstance(other, _AnnotatedAlias):
+ return NotImplemented
+ if self.__origin__ != other.__origin__:
+ return False
+ return self.__metadata__ == other.__metadata__
+
+ def __hash__(self):
+ return hash((self.__origin__, self.__metadata__))
+
+ class Annotated:
+ """Add context specific metadata to a type.
+
+ Example: Annotated[int, runtime_check.Unsigned] indicates to the
+ hypothetical runtime_check module that this type is an unsigned int.
+ Every other consumer of this type can ignore this metadata and treat
+ this type as int.
+
+ The first argument to Annotated must be a valid type (and will be in
+ the __origin__ field), the remaining arguments are kept as a tuple in
+ the __extra__ field.
+
+ Details:
+
+ - It's an error to call `Annotated` with less than two arguments.
+ - Nested Annotated are flattened::
+
+ Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
+
+ - Instantiating an annotated type is equivalent to instantiating the
+ underlying type::
+
+ Annotated[C, Ann1](5) == C(5)
+
+ - Annotated can be used as a generic type alias::
+
+ Optimized = Annotated[T, runtime.Optimize()]
+ Optimized[int] == Annotated[int, runtime.Optimize()]
+
+ OptimizedList = Annotated[List[T], runtime.Optimize()]
+ OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
+ """
+
+ __slots__ = ()
+
+ def __new__(cls, *args, **kwargs):
+ raise TypeError("Type Annotated cannot be instantiated.")
+
+ @_tp_cache
+ def __class_getitem__(cls, params):
+ if not isinstance(params, tuple) or len(params) < 2:
+ raise TypeError("Annotated[...] should be used "
+ "with at least two arguments (a type and an "
+ "annotation).")
+ msg = "Annotated[t, ...]: t must be a type."
+ origin = typing._type_check(params[0], msg)
+ metadata = tuple(params[1:])
+ return _AnnotatedAlias(origin, metadata)
+
+ def __init_subclass__(cls, *args, **kwargs):
+ raise TypeError(
+ "Cannot subclass {}.Annotated".format(cls.__module__)
+ )
+
+ def _strip_annotations(t):
+ """Strips the annotations from a given type.
+ """
+ if isinstance(t, _AnnotatedAlias):
+ return _strip_annotations(t.__origin__)
+ if isinstance(t, typing._GenericAlias):
+ stripped_args = tuple(_strip_annotations(a) for a in t.__args__)
+ if stripped_args == t.__args__:
+ return t
+ res = t.copy_with(stripped_args)
+ res._special = t._special
+ return res
+ return t
+
+ def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
+ """Return type hints for an object.
+
+ This is often the same as obj.__annotations__, but it handles
+ forward references encoded as string literals, adds Optional[t] if a
+ default value equal to None is set and recursively replaces all
+ 'Annotated[T, ...]' with 'T' (unless 'include_extras=True').
+
+ The argument may be a module, class, method, or function. The annotations
+ are returned as a dictionary. For classes, annotations include also
+ inherited members.
+
+ TypeError is raised if the argument is not of a type that can contain
+ annotations, and an empty dictionary is returned if no annotations are
+ present.
+
+ BEWARE -- the behavior of globalns and localns is counterintuitive
+ (unless you are familiar with how eval() and exec() work). The
+ search order is locals first, then globals.
+
+ - If no dict arguments are passed, an attempt is made to use the
+ globals from obj (or the respective module's globals for classes),
+ and these are also used as the locals. If the object does not appear
+ to have globals, an empty dictionary is used.
+
+ - If one dict argument is passed, it is used for both globals and
+ locals.
+
+ - If two dict arguments are passed, they specify globals and
+ locals, respectively.
+ """
+ hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
+ if include_extras:
+ return hint
+ return {k: _strip_annotations(t) for k, t in hint.items()}
+
+elif HAVE_ANNOTATED:
+
+ def _is_dunder(name):
+ """Returns True if name is a __dunder_variable_name__."""
+ return len(name) > 4 and name.startswith('__') and name.endswith('__')
+
+ # Prior to Python 3.7 types did not have `copy_with`. A lot of the equality
+ # checks, argument expansion etc. are done on the _subs_tre. As a result we
+ # can't provide a get_type_hints function that strips out annotations.
+
+ class AnnotatedMeta(typing.GenericMeta):
+ """Metaclass for Annotated"""
+
+ def __new__(cls, name, bases, namespace, **kwargs):
+ if any(b is not object for b in bases):
+ raise TypeError("Cannot subclass " + str(Annotated))
+ return super().__new__(cls, name, bases, namespace, **kwargs)
+
+ @property
+ def __metadata__(self):
+ return self._subs_tree()[2]
+
+ def _tree_repr(self, tree):
+ cls, origin, metadata = tree
+ if not isinstance(origin, tuple):
+ tp_repr = typing._type_repr(origin)
+ else:
+ tp_repr = origin[0]._tree_repr(origin)
+ metadata_reprs = ", ".join(repr(arg) for arg in metadata)
+ return '%s[%s, %s]' % (cls, tp_repr, metadata_reprs)
+
+ def _subs_tree(self, tvars=None, args=None): # noqa
+ if self is Annotated:
+ return Annotated
+ res = super()._subs_tree(tvars=tvars, args=args)
+ # Flatten nested Annotated
+ if isinstance(res[1], tuple) and res[1][0] is Annotated:
+ sub_tp = res[1][1]
+ sub_annot = res[1][2]
+ return (Annotated, sub_tp, sub_annot + res[2])
+ return res
+
+ def _get_cons(self):
+ """Return the class used to create instance of this type."""
+ if self.__origin__ is None:
+ raise TypeError("Cannot get the underlying type of a "
+ "non-specialized Annotated type.")
+ tree = self._subs_tree()
+ while isinstance(tree, tuple) and tree[0] is Annotated:
+ tree = tree[1]
+ if isinstance(tree, tuple):
+ return tree[0]
+ else:
+ return tree
+
+ @_tp_cache
+ def __getitem__(self, params):
+ if not isinstance(params, tuple):
+ params = (params,)
+ if self.__origin__ is not None: # specializing an instantiated type
+ return super().__getitem__(params)
+ elif not isinstance(params, tuple) or len(params) < 2:
+ raise TypeError("Annotated[...] should be instantiated "
+ "with at least two arguments (a type and an "
+ "annotation).")
+ else:
+ msg = "Annotated[t, ...]: t must be a type."
+ tp = typing._type_check(params[0], msg)
+ metadata = tuple(params[1:])
+ return self.__class__(
+ self.__name__,
+ self.__bases__,
+ _no_slots_copy(self.__dict__),
+ tvars=_type_vars((tp,)),
+ # Metadata is a tuple so it won't be touched by _replace_args et al.
+ args=(tp, metadata),
+ origin=self,
+ )
+
+ def __call__(self, *args, **kwargs):
+ cons = self._get_cons()
+ result = cons(*args, **kwargs)
+ try:
+ result.__orig_class__ = self
+ except AttributeError:
+ pass
+ return result
+
+ def __getattr__(self, attr):
+ # For simplicity we just don't relay all dunder names
+ if self.__origin__ is not None and not _is_dunder(attr):
+ return getattr(self._get_cons(), attr)
+ raise AttributeError(attr)
+
+ def __setattr__(self, attr, value):
+ if _is_dunder(attr) or attr.startswith('_abc_'):
+ super().__setattr__(attr, value)
+ elif self.__origin__ is None:
+ raise AttributeError(attr)
+ else:
+ setattr(self._get_cons(), attr, value)
+
+ def __instancecheck__(self, obj):
+ raise TypeError("Annotated cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("Annotated cannot be used with issubclass().")
+
+ class Annotated(metaclass=AnnotatedMeta):
+ """Add context specific metadata to a type.
+
+ Example: Annotated[int, runtime_check.Unsigned] indicates to the
+ hypothetical runtime_check module that this type is an unsigned int.
+ Every other consumer of this type can ignore this metadata and treat
+ this type as int.
+
+ The first argument to Annotated must be a valid type, the remaining
+ arguments are kept as a tuple in the __metadata__ field.
+
+ Details:
+
+ - It's an error to call `Annotated` with less than two arguments.
+ - Nested Annotated are flattened::
+
+ Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
+
+ - Instantiating an annotated type is equivalent to instantiating the
+ underlying type::
+
+ Annotated[C, Ann1](5) == C(5)
+
+ - Annotated can be used as a generic type alias::
+
+ Optimized = Annotated[T, runtime.Optimize()]
+ Optimized[int] == Annotated[int, runtime.Optimize()]
+
+ OptimizedList = Annotated[List[T], runtime.Optimize()]
+ OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
+ """
+
+# Python 3.8 has get_origin() and get_args() but those implementations aren't
+# Annotated-aware, so we can't use those, only Python 3.9 versions will do.
+# Similarly, Python 3.9's implementation doesn't support ParamSpecArgs and
+# ParamSpecKwargs.
+if sys.version_info[:2] >= (3, 10):
+ get_origin = typing.get_origin
+ get_args = typing.get_args
+elif PEP_560:
+ from typing import _GenericAlias
+ try:
+ # 3.9+
+ from typing import _BaseGenericAlias
+ except ImportError:
+ _BaseGenericAlias = _GenericAlias
+ try:
+ # 3.9+
+ from typing import GenericAlias
+ except ImportError:
+ GenericAlias = _GenericAlias
+
+ def get_origin(tp):
+ """Get the unsubscripted version of a type.
+
+ This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
+ and Annotated. Return None for unsupported types. Examples::
+
+ get_origin(Literal[42]) is Literal
+ get_origin(int) is None
+ get_origin(ClassVar[int]) is ClassVar
+ get_origin(Generic) is Generic
+ get_origin(Generic[T]) is Generic
+ get_origin(Union[T, int]) is Union
+ get_origin(List[Tuple[T, T]][int]) == list
+ get_origin(P.args) is P
+ """
+ if isinstance(tp, _AnnotatedAlias):
+ return Annotated
+ if isinstance(tp, (_GenericAlias, GenericAlias, _BaseGenericAlias,
+ ParamSpecArgs, ParamSpecKwargs)):
+ return tp.__origin__
+ if tp is Generic:
+ return Generic
+ return None
+
+ def get_args(tp):
+ """Get type arguments with all substitutions performed.
+
+ For unions, basic simplifications used by Union constructor are performed.
+ Examples::
+ get_args(Dict[str, int]) == (str, int)
+ get_args(int) == ()
+ get_args(Union[int, Union[T, int], str][int]) == (int, str)
+ get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
+ get_args(Callable[[], T][int]) == ([], int)
+ """
+ if isinstance(tp, _AnnotatedAlias):
+ return (tp.__origin__,) + tp.__metadata__
+ if isinstance(tp, (_GenericAlias, GenericAlias)):
+ if getattr(tp, "_special", False):
+ return ()
+ res = tp.__args__
+ if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
+ res = (list(res[:-1]), res[-1])
+ return res
+ return ()
+
+
+if hasattr(typing, 'TypeAlias'):
+ TypeAlias = typing.TypeAlias
+elif sys.version_info[:2] >= (3, 9):
+ class _TypeAliasForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ @_TypeAliasForm
+ def TypeAlias(self, parameters):
+ """Special marker indicating that an assignment should
+ be recognized as a proper type alias definition by type
+ checkers.
+
+ For example::
+
+ Predicate: TypeAlias = Callable[..., bool]
+
+ It's invalid when used anywhere except as in the example above.
+ """
+ raise TypeError("{} is not subscriptable".format(self))
+
+elif sys.version_info[:2] >= (3, 7):
+ class _TypeAliasForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ TypeAlias = _TypeAliasForm('TypeAlias',
+ doc="""Special marker indicating that an assignment should
+ be recognized as a proper type alias definition by type
+ checkers.
+
+ For example::
+
+ Predicate: TypeAlias = Callable[..., bool]
+
+ It's invalid when used anywhere except as in the example
+ above.""")
+
+elif hasattr(typing, '_FinalTypingBase'):
+ class _TypeAliasMeta(typing.TypingMeta):
+ """Metaclass for TypeAlias"""
+
+ def __repr__(self):
+ return 'typing_extensions.TypeAlias'
+
+ class _TypeAliasBase(typing._FinalTypingBase, metaclass=_TypeAliasMeta, _root=True):
+ """Special marker indicating that an assignment should
+ be recognized as a proper type alias definition by type
+ checkers.
+
+ For example::
+
+ Predicate: TypeAlias = Callable[..., bool]
+
+ It's invalid when used anywhere except as in the example above.
+ """
+ __slots__ = ()
+
+ def __instancecheck__(self, obj):
+ raise TypeError("TypeAlias cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("TypeAlias cannot be used with issubclass().")
+
+ def __repr__(self):
+ return 'typing_extensions.TypeAlias'
+
+ TypeAlias = _TypeAliasBase(_root=True)
+else:
+ class _TypeAliasMeta(typing.TypingMeta):
+ """Metaclass for TypeAlias"""
+
+ def __instancecheck__(self, obj):
+ raise TypeError("TypeAlias cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("TypeAlias cannot be used with issubclass().")
+
+ def __call__(self, *args, **kwargs):
+ raise TypeError("Cannot instantiate TypeAlias")
+
+ class TypeAlias(metaclass=_TypeAliasMeta, _root=True):
+ """Special marker indicating that an assignment should
+ be recognized as a proper type alias definition by type
+ checkers.
+
+ For example::
+
+ Predicate: TypeAlias = Callable[..., bool]
+
+ It's invalid when used anywhere except as in the example above.
+ """
+ __slots__ = ()
+
+
+# Python 3.10+ has PEP 612
+if hasattr(typing, 'ParamSpecArgs'):
+ ParamSpecArgs = typing.ParamSpecArgs
+ ParamSpecKwargs = typing.ParamSpecKwargs
+else:
+ class _Immutable:
+ """Mixin to indicate that object should not be copied."""
+ __slots__ = ()
+
+ def __copy__(self):
+ return self
+
+ def __deepcopy__(self, memo):
+ return self
+
+ class ParamSpecArgs(_Immutable):
+ """The args for a ParamSpec object.
+
+ Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
+
+ ParamSpecArgs objects have a reference back to their ParamSpec:
+
+ P.args.__origin__ is P
+
+ This type is meant for runtime introspection and has no special meaning to
+ static type checkers.
+ """
+ def __init__(self, origin):
+ self.__origin__ = origin
+
+ def __repr__(self):
+ return "{}.args".format(self.__origin__.__name__)
+
+ class ParamSpecKwargs(_Immutable):
+ """The kwargs for a ParamSpec object.
+
+ Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
+
+ ParamSpecKwargs objects have a reference back to their ParamSpec:
+
+ P.kwargs.__origin__ is P
+
+ This type is meant for runtime introspection and has no special meaning to
+ static type checkers.
+ """
+ def __init__(self, origin):
+ self.__origin__ = origin
+
+ def __repr__(self):
+ return "{}.kwargs".format(self.__origin__.__name__)
+
+if hasattr(typing, 'ParamSpec'):
+ ParamSpec = typing.ParamSpec
+else:
+
+ # Inherits from list as a workaround for Callable checks in Python < 3.9.2.
+ class ParamSpec(list):
+ """Parameter specification variable.
+
+ Usage::
+
+ P = ParamSpec('P')
+
+ Parameter specification variables exist primarily for the benefit of static
+ type checkers. They are used to forward the parameter types of one
+ callable to another callable, a pattern commonly found in higher order
+ functions and decorators. They are only valid when used in ``Concatenate``,
+ or s the first argument to ``Callable``. In Python 3.10 and higher,
+ they are also supported in user-defined Generics at runtime.
+ See class Generic for more information on generic types. An
+ example for annotating a decorator::
+
+ T = TypeVar('T')
+ P = ParamSpec('P')
+
+ def add_logging(f: Callable[P, T]) -> Callable[P, T]:
+ '''A type-safe decorator to add logging to a function.'''
+ def inner(*args: P.args, **kwargs: P.kwargs) -> T:
+ logging.info(f'{f.__name__} was called')
+ return f(*args, **kwargs)
+ return inner
+
+ @add_logging
+ def add_two(x: float, y: float) -> float:
+ '''Add two numbers together.'''
+ return x + y
+
+ Parameter specification variables defined with covariant=True or
+ contravariant=True can be used to declare covariant or contravariant
+ generic types. These keyword arguments are valid, but their actual semantics
+ are yet to be decided. See PEP 612 for details.
+
+ Parameter specification variables can be introspected. e.g.:
+
+ P.__name__ == 'T'
+ P.__bound__ == None
+ P.__covariant__ == False
+ P.__contravariant__ == False
+
+ Note that only parameter specification variables defined in global scope can
+ be pickled.
+ """
+
+ @property
+ def args(self):
+ return ParamSpecArgs(self)
+
+ @property
+ def kwargs(self):
+ return ParamSpecKwargs(self)
+
+ def __init__(self, name, *, bound=None, covariant=False, contravariant=False):
+ super().__init__([self])
+ self.__name__ = name
+ self.__covariant__ = bool(covariant)
+ self.__contravariant__ = bool(contravariant)
+ if bound:
+ self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
+ else:
+ self.__bound__ = None
+
+ # for pickling:
+ try:
+ def_mod = sys._getframe(1).f_globals.get('__name__', '__main__')
+ except (AttributeError, ValueError):
+ def_mod = None
+ if def_mod != 'typing_extensions':
+ self.__module__ = def_mod
+
+ def __repr__(self):
+ if self.__covariant__:
+ prefix = '+'
+ elif self.__contravariant__:
+ prefix = '-'
+ else:
+ prefix = '~'
+ return prefix + self.__name__
+
+ def __hash__(self):
+ return object.__hash__(self)
+
+ def __eq__(self, other):
+ return self is other
+
+ def __reduce__(self):
+ return self.__name__
+
+ # Hack to get typing._type_check to pass.
+ def __call__(self, *args, **kwargs):
+ pass
+
+ # Note: Can't fake ParamSpec as a TypeVar to get it to work
+ # with Generics. ParamSpec isn't an instance of TypeVar in 3.10.
+ # So encouraging code like isinstance(ParamSpec('P'), TypeVar))
+ # will lead to breakage in 3.10.
+ # This also means no accurate __parameters__ for GenericAliases.
+
+# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
+class _ConcatenateGenericAlias(list):
+ def __init__(self, origin, args):
+ super().__init__(args)
+ self.__origin__ = origin
+ self.__args__ = args
+
+ def __repr__(self):
+ _type_repr = typing._type_repr
+ return '{origin}[{args}]' \
+ .format(origin=_type_repr(self.__origin__),
+ args=', '.join(_type_repr(arg) for arg in self.__args__))
+
+ def __hash__(self):
+ return hash((self.__origin__, self.__args__))
+
+@_tp_cache
+def _concatenate_getitem(self, parameters):
+ if parameters == ():
+ raise TypeError("Cannot take a Concatenate of no types.")
+ if not isinstance(parameters, tuple):
+ parameters = (parameters,)
+ if not isinstance(parameters[-1], ParamSpec):
+ raise TypeError("The last parameter to Concatenate should be a "
+ "ParamSpec variable.")
+ msg = "Concatenate[arg, ...]: each arg must be a type."
+ parameters = tuple(typing._type_check(p, msg) for p in parameters)
+ return _ConcatenateGenericAlias(self, parameters)
+
+
+if hasattr(typing, 'Concatenate'):
+ Concatenate = typing.Concatenate
+ _ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa
+elif sys.version_info[:2] >= (3, 9):
+ @_TypeAliasForm
+ def Concatenate(self, parameters):
+ """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
+ higher order function which adds, removes or transforms parameters of a
+ callable.
+
+ For example::
+
+ Callable[Concatenate[int, P], int]
+
+ See PEP 612 for detailed information.
+ """
+ return _concatenate_getitem(self, parameters)
+
+elif sys.version_info[:2] >= (3, 7):
+ class _ConcatenateForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ def __getitem__(self, parameters):
+ return _concatenate_getitem(self, parameters)
+
+ Concatenate = _ConcatenateForm('Concatenate',
+ doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
+ higher order function which adds, removes or transforms parameters of a
+ callable.
+
+ For example::
+
+ Callable[Concatenate[int, P], int]
+
+ See PEP 612 for detailed information.
+ """)
+
+elif hasattr(typing, '_FinalTypingBase'):
+ class _ConcatenateAliasMeta(typing.TypingMeta):
+ """Metaclass for Concatenate."""
+
+ def __repr__(self):
+ return 'typing_extensions.Concatenate'
+
+ class _ConcatenateAliasBase(typing._FinalTypingBase,
+ metaclass=_ConcatenateAliasMeta,
+ _root=True):
+ """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
+ higher order function which adds, removes or transforms parameters of a
+ callable.
+
+ For example::
+
+ Callable[Concatenate[int, P], int]
+
+ See PEP 612 for detailed information.
+ """
+ __slots__ = ()
+
+ def __instancecheck__(self, obj):
+ raise TypeError("Concatenate cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("Concatenate cannot be used with issubclass().")
+
+ def __repr__(self):
+ return 'typing_extensions.Concatenate'
+
+ def __getitem__(self, parameters):
+ return _concatenate_getitem(self, parameters)
+
+ Concatenate = _ConcatenateAliasBase(_root=True)
+# For 3.5.0 - 3.5.2
+else:
+ class _ConcatenateAliasMeta(typing.TypingMeta):
+ """Metaclass for Concatenate."""
+
+ def __instancecheck__(self, obj):
+ raise TypeError("TypeAlias cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("TypeAlias cannot be used with issubclass().")
+
+ def __call__(self, *args, **kwargs):
+ raise TypeError("Cannot instantiate TypeAlias")
+
+ def __getitem__(self, parameters):
+ return _concatenate_getitem(self, parameters)
+
+ class Concatenate(metaclass=_ConcatenateAliasMeta, _root=True):
+ """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
+ higher order function which adds, removes or transforms parameters of a
+ callable.
+
+ For example::
+
+ Callable[Concatenate[int, P], int]
+
+ See PEP 612 for detailed information.
+ """
+ __slots__ = ()
+
+if hasattr(typing, 'TypeGuard'):
+ TypeGuard = typing.TypeGuard
+elif sys.version_info[:2] >= (3, 9):
+ class _TypeGuardForm(typing._SpecialForm, _root=True):
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ @_TypeGuardForm
+ def TypeGuard(self, parameters):
+ """Special typing form used to annotate the return type of a user-defined
+ type guard function. ``TypeGuard`` only accepts a single type argument.
+ At runtime, functions marked this way should return a boolean.
+
+ ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
+ type checkers to determine a more precise type of an expression within a
+ program's code flow. Usually type narrowing is done by analyzing
+ conditional code flow and applying the narrowing to a block of code. The
+ conditional expression here is sometimes referred to as a "type guard".
+
+ Sometimes it would be convenient to use a user-defined boolean function
+ as a type guard. Such a function should use ``TypeGuard[...]`` as its
+ return type to alert static type checkers to this intention.
+
+ Using ``-> TypeGuard`` tells the static type checker that for a given
+ function:
+
+ 1. The return value is a boolean.
+ 2. If the return value is ``True``, the type of its argument
+ is the type inside ``TypeGuard``.
+
+ For example::
+
+ def is_str(val: Union[str, float]):
+ # "isinstance" type guard
+ if isinstance(val, str):
+ # Type of ``val`` is narrowed to ``str``
+ ...
+ else:
+ # Else, type of ``val`` is narrowed to ``float``.
+ ...
+
+ Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
+ form of ``TypeA`` (it can even be a wider form) and this may lead to
+ type-unsafe results. The main reason is to allow for things like
+ narrowing ``List[object]`` to ``List[str]`` even though the latter is not
+ a subtype of the former, since ``List`` is invariant. The responsibility of
+ writing type-safe type guards is left to the user.
+
+ ``TypeGuard`` also works with type variables. For more information, see
+ PEP 647 (User-Defined Type Guards).
+ """
+ item = typing._type_check(parameters, '{} accepts only single type.'.format(self))
+ return _GenericAlias(self, (item,))
+
+elif sys.version_info[:2] >= (3, 7):
+ class _TypeGuardForm(typing._SpecialForm, _root=True):
+
+ def __repr__(self):
+ return 'typing_extensions.' + self._name
+
+ def __getitem__(self, parameters):
+ item = typing._type_check(parameters,
+ '{} accepts only a single type'.format(self._name))
+ return _GenericAlias(self, (item,))
+
+ TypeGuard = _TypeGuardForm(
+ 'TypeGuard',
+ doc="""Special typing form used to annotate the return type of a user-defined
+ type guard function. ``TypeGuard`` only accepts a single type argument.
+ At runtime, functions marked this way should return a boolean.
+
+ ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
+ type checkers to determine a more precise type of an expression within a
+ program's code flow. Usually type narrowing is done by analyzing
+ conditional code flow and applying the narrowing to a block of code. The
+ conditional expression here is sometimes referred to as a "type guard".
+
+ Sometimes it would be convenient to use a user-defined boolean function
+ as a type guard. Such a function should use ``TypeGuard[...]`` as its
+ return type to alert static type checkers to this intention.
+
+ Using ``-> TypeGuard`` tells the static type checker that for a given
+ function:
+
+ 1. The return value is a boolean.
+ 2. If the return value is ``True``, the type of its argument
+ is the type inside ``TypeGuard``.
+
+ For example::
+
+ def is_str(val: Union[str, float]):
+ # "isinstance" type guard
+ if isinstance(val, str):
+ # Type of ``val`` is narrowed to ``str``
+ ...
+ else:
+ # Else, type of ``val`` is narrowed to ``float``.
+ ...
+
+ Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
+ form of ``TypeA`` (it can even be a wider form) and this may lead to
+ type-unsafe results. The main reason is to allow for things like
+ narrowing ``List[object]`` to ``List[str]`` even though the latter is not
+ a subtype of the former, since ``List`` is invariant. The responsibility of
+ writing type-safe type guards is left to the user.
+
+ ``TypeGuard`` also works with type variables. For more information, see
+ PEP 647 (User-Defined Type Guards).
+ """)
+elif hasattr(typing, '_FinalTypingBase'):
+ class _TypeGuard(typing._FinalTypingBase, _root=True):
+ """Special typing form used to annotate the return type of a user-defined
+ type guard function. ``TypeGuard`` only accepts a single type argument.
+ At runtime, functions marked this way should return a boolean.
+
+ ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
+ type checkers to determine a more precise type of an expression within a
+ program's code flow. Usually type narrowing is done by analyzing
+ conditional code flow and applying the narrowing to a block of code. The
+ conditional expression here is sometimes referred to as a "type guard".
+
+ Sometimes it would be convenient to use a user-defined boolean function
+ as a type guard. Such a function should use ``TypeGuard[...]`` as its
+ return type to alert static type checkers to this intention.
+
+ Using ``-> TypeGuard`` tells the static type checker that for a given
+ function:
+
+ 1. The return value is a boolean.
+ 2. If the return value is ``True``, the type of its argument
+ is the type inside ``TypeGuard``.
+
+ For example::
+
+ def is_str(val: Union[str, float]):
+ # "isinstance" type guard
+ if isinstance(val, str):
+ # Type of ``val`` is narrowed to ``str``
+ ...
+ else:
+ # Else, type of ``val`` is narrowed to ``float``.
+ ...
+
+ Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
+ form of ``TypeA`` (it can even be a wider form) and this may lead to
+ type-unsafe results. The main reason is to allow for things like
+ narrowing ``List[object]`` to ``List[str]`` even though the latter is not
+ a subtype of the former, since ``List`` is invariant. The responsibility of
+ writing type-safe type guards is left to the user.
+
+ ``TypeGuard`` also works with type variables. For more information, see
+ PEP 647 (User-Defined Type Guards).
+ """
+
+ __slots__ = ('__type__',)
+
+ def __init__(self, tp=None, **kwds):
+ self.__type__ = tp
+
+ def __getitem__(self, item):
+ cls = type(self)
+ if self.__type__ is None:
+ return cls(typing._type_check(item,
+ '{} accepts only a single type.'.format(cls.__name__[1:])),
+ _root=True)
+ raise TypeError('{} cannot be further subscripted'
+ .format(cls.__name__[1:]))
+
+ def _eval_type(self, globalns, localns):
+ new_tp = typing._eval_type(self.__type__, globalns, localns)
+ if new_tp == self.__type__:
+ return self
+ return type(self)(new_tp, _root=True)
+
+ def __repr__(self):
+ r = super().__repr__()
+ if self.__type__ is not None:
+ r += '[{}]'.format(typing._type_repr(self.__type__))
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__type__))
+
+ def __eq__(self, other):
+ if not isinstance(other, _TypeGuard):
+ return NotImplemented
+ if self.__type__ is not None:
+ return self.__type__ == other.__type__
+ return self is other
+
+ TypeGuard = _TypeGuard(_root=True)
+else:
+ class _TypeGuardMeta(typing.TypingMeta):
+ """Metaclass for TypeGuard"""
+
+ def __new__(cls, name, bases, namespace, tp=None, _root=False):
+ self = super().__new__(cls, name, bases, namespace, _root=_root)
+ if tp is not None:
+ self.__type__ = tp
+ return self
+
+ def __instancecheck__(self, obj):
+ raise TypeError("TypeGuard cannot be used with isinstance().")
+
+ def __subclasscheck__(self, cls):
+ raise TypeError("TypeGuard cannot be used with issubclass().")
+
+ def __getitem__(self, item):
+ cls = type(self)
+ if self.__type__ is not None:
+ raise TypeError('{} cannot be further subscripted'
+ .format(cls.__name__[1:]))
+
+ param = typing._type_check(
+ item,
+ '{} accepts only single type.'.format(cls.__name__[1:]))
+ return cls(self.__name__, self.__bases__,
+ dict(self.__dict__), tp=param, _root=True)
+
+ def _eval_type(self, globalns, localns):
+ new_tp = typing._eval_type(self.__type__, globalns, localns)
+ if new_tp == self.__type__:
+ return self
+ return type(self)(self.__name__, self.__bases__,
+ dict(self.__dict__), tp=self.__type__,
+ _root=True)
+
+ def __repr__(self):
+ r = super().__repr__()
+ if self.__type__ is not None:
+ r += '[{}]'.format(typing._type_repr(self.__type__))
+ return r
+
+ def __hash__(self):
+ return hash((type(self).__name__, self.__type__))
+
+ def __eq__(self, other):
+ if not hasattr(other, "__type__"):
+ return NotImplemented
+ if self.__type__ is not None:
+ return self.__type__ == other.__type__
+ return self is other
+
+ class TypeGuard(typing.Final, metaclass=_TypeGuardMeta, _root=True):
+ """Special typing form used to annotate the return type of a user-defined
+ type guard function. ``TypeGuard`` only accepts a single type argument.
+ At runtime, functions marked this way should return a boolean.
+
+ ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
+ type checkers to determine a more precise type of an expression within a
+ program's code flow. Usually type narrowing is done by analyzing
+ conditional code flow and applying the narrowing to a block of code. The
+ conditional expression here is sometimes referred to as a "type guard".
+
+ Sometimes it would be convenient to use a user-defined boolean function
+ as a type guard. Such a function should use ``TypeGuard[...]`` as its
+ return type to alert static type checkers to this intention.
+
+ Using ``-> TypeGuard`` tells the static type checker that for a given
+ function:
+
+ 1. The return value is a boolean.
+ 2. If the return value is ``True``, the type of its argument
+ is the type inside ``TypeGuard``.
+
+ For example::
+
+ def is_str(val: Union[str, float]):
+ # "isinstance" type guard
+ if isinstance(val, str):
+ # Type of ``val`` is narrowed to ``str``
+ ...
+ else:
+ # Else, type of ``val`` is narrowed to ``float``.
+ ...
+
+ Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
+ form of ``TypeA`` (it can even be a wider form) and this may lead to
+ type-unsafe results. The main reason is to allow for things like
+ narrowing ``List[object]`` to ``List[str]`` even though the latter is not
+ a subtype of the former, since ``List`` is invariant. The responsibility of
+ writing type-safe type guards is left to the user.
+
+ ``TypeGuard`` also works with type variables. For more information, see
+ PEP 647 (User-Defined Type Guards).
+ """
+ __type__ = None
diff --git a/third_party/python/urllib3/urllib3-1.26.0.dist-info/LICENSE.txt b/third_party/python/urllib3/urllib3-1.26.0.dist-info/LICENSE.txt
new file mode 100644
index 0000000000..429a1767e4
--- /dev/null
+++ b/third_party/python/urllib3/urllib3-1.26.0.dist-info/LICENSE.txt
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2008-2020 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/third_party/python/urllib3/urllib3-1.26.0.dist-info/METADATA b/third_party/python/urllib3/urllib3-1.26.0.dist-info/METADATA
new file mode 100644
index 0000000000..39869aafad
--- /dev/null
+++ b/third_party/python/urllib3/urllib3-1.26.0.dist-info/METADATA
@@ -0,0 +1,1335 @@
+Metadata-Version: 2.1
+Name: urllib3
+Version: 1.26.0
+Summary: HTTP library with thread-safe connection pooling, file post, and more.
+Home-page: https://urllib3.readthedocs.io/
+Author: Andrey Petrov
+Author-email: andrey.petrov@shazow.net
+License: MIT
+Project-URL: Documentation, https://urllib3.readthedocs.io/
+Project-URL: Code, https://github.com/urllib3/urllib3
+Project-URL: Issue tracker, https://github.com/urllib3/urllib3/issues
+Keywords: urllib httplib threadsafe filepost http https ssl pooling
+Platform: UNKNOWN
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Internet :: WWW/HTTP
+Classifier: Topic :: Software Development :: Libraries
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4
+Description-Content-Type: text/x-rst
+Provides-Extra: brotli
+Requires-Dist: brotlipy (>=0.6.0) ; extra == 'brotli'
+Provides-Extra: secure
+Requires-Dist: pyOpenSSL (>=0.14) ; extra == 'secure'
+Requires-Dist: cryptography (>=1.3.4) ; extra == 'secure'
+Requires-Dist: idna (>=2.0.0) ; extra == 'secure'
+Requires-Dist: certifi ; extra == 'secure'
+Requires-Dist: ipaddress ; (python_version == "2.7") and extra == 'secure'
+Provides-Extra: socks
+Requires-Dist: PySocks (!=1.5.7,<2.0,>=1.5.6) ; extra == 'socks'
+
+
+urllib3 is a powerful, *user-friendly* HTTP client for Python. Much of the
+Python ecosystem already uses urllib3 and you should too.
+urllib3 brings many critical features that are missing from the Python
+standard libraries:
+
+- Thread safety.
+- Connection pooling.
+- Client-side SSL/TLS verification.
+- File uploads with multipart encoding.
+- Helpers for retrying requests and dealing with HTTP redirects.
+- Support for gzip, deflate, and brotli encoding.
+- Proxy support for HTTP and SOCKS.
+- 100% test coverage.
+
+urllib3 is powerful and easy to use:
+
+.. code-block:: python
+
+ >>> import urllib3
+ >>> http = urllib3.PoolManager()
+ >>> r = http.request('GET', 'http://httpbin.org/robots.txt')
+ >>> r.status
+ 200
+ >>> r.data
+ 'User-agent: *\nDisallow: /deny\n'
+
+
+Installing
+----------
+
+urllib3 can be installed with `pip <https://pip.pypa.io>`_::
+
+ $ python -m pip install urllib3
+
+Alternatively, you can grab the latest source code from `GitHub <https://github.com/urllib3/urllib3>`_::
+
+ $ git clone git://github.com/urllib3/urllib3.git
+ $ python setup.py install
+
+
+Documentation
+-------------
+
+urllib3 has usage and reference documentation at `urllib3.readthedocs.io <https://urllib3.readthedocs.io>`_.
+
+
+Contributing
+------------
+
+urllib3 happily accepts contributions. Please see our
+`contributing documentation <https://urllib3.readthedocs.io/en/latest/contributing.html>`_
+for some tips on getting started.
+
+
+Security Disclosures
+--------------------
+
+To report a security vulnerability, please use the
+`Tidelift security contact <https://tidelift.com/security>`_.
+Tidelift will coordinate the fix and disclosure with maintainers.
+
+
+Maintainers
+-----------
+
+- `@sethmlarson <https://github.com/sethmlarson>`__ (Seth M. Larson)
+- `@pquentin <https://github.com/pquentin>`__ (Quentin Pradet)
+- `@theacodes <https://github.com/theacodes>`__ (Thea Flowers)
+- `@haikuginger <https://github.com/haikuginger>`__ (Jess Shapiro)
+- `@lukasa <https://github.com/lukasa>`__ (Cory Benfield)
+- `@sigmavirus24 <https://github.com/sigmavirus24>`__ (Ian Stapleton Cordasco)
+- `@shazow <https://github.com/shazow>`__ (Andrey Petrov)
+
+👋
+
+
+Sponsorship
+-----------
+
+If your company benefits from this library, please consider `sponsoring its
+development <https://urllib3.readthedocs.io/en/latest/sponsors.html>`_.
+
+
+For Enterprise
+--------------
+
+.. |tideliftlogo| image:: https://nedbatchelder.com/pix/Tidelift_Logos_RGB_Tidelift_Shorthand_On-White_small.png
+ :width: 75
+ :alt: Tidelift
+
+.. list-table::
+ :widths: 10 100
+
+ * - |tideliftlogo|
+ - Professional support for urllib3 is available as part of the `Tidelift
+ Subscription`_. Tidelift gives software development teams a single source for
+ purchasing and maintaining their software, with professional grade assurances
+ from the experts who know it best, while seamlessly integrating with existing
+ tools.
+
+.. _Tidelift Subscription: https://tidelift.com/subscription/pkg/pypi-urllib3?utm_source=pypi-urllib3&utm_medium=referral&utm_campaign=readme
+
+
+Changes
+=======
+
+1.26.0 (2020-11-10)
+-------------------
+
+* **NOTE: urllib3 v2.0 will drop support for Python 2**.
+ `Read more in the v2.0 Roadmap <https://urllib3.readthedocs.io/en/latest/v2-roadmap.html>`_.
+
+* Added support for HTTPS proxies contacting HTTPS servers (Pull #1923, Pull #1806)
+
+* Deprecated negotiating TLSv1 and TLSv1.1 by default. Users that
+ still wish to use TLS earlier than 1.2 without a deprecation warning
+ should opt-in explicitly by setting ``ssl_version=ssl.PROTOCOL_TLSv1_1`` (Pull #2002)
+ **Starting in urllib3 v2.0: Connections that receive a ``DeprecationWarning`` will fail**
+
+* Deprecated ``Retry`` options ``Retry.DEFAULT_METHOD_WHITELIST``, ``Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST``
+ and ``Retry(method_whitelist=...)`` in favor of ``Retry.DEFAULT_ALLOWED_METHODS``,
+ ``Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT``, and ``Retry(allowed_methods=...)``
+ (Pull #2000) **Starting in urllib3 v2.0: Deprecated options will be removed**
+
+* Added default ``User-Agent`` header to every request (Pull #1750)
+
+* Added ``urllib3.util.SKIP_HEADER`` for skipping ``User-Agent``, ``Accept-Encoding``,
+ and ``Host`` headers from being automatically emitted with requests (Pull #2018)
+
+* Collapse ``transfer-encoding: chunked`` request data and framing into
+ the same ``socket.send()`` call (Pull #1906)
+
+* Send ``http/1.1`` ALPN identifier with every TLS handshake by default (Pull #1894)
+
+* Properly terminate SecureTransport connections when CA verification fails (Pull #1977)
+
+* Don't emit an ``SNIMissingWarning`` when passing ``server_hostname=None``
+ to SecureTransport (Pull #1903)
+
+* Disabled requesting TLSv1.2 session tickets as they weren't being used by urllib3 (Pull #1970)
+
+* Suppress ``BrokenPipeError`` when writing request body after the server
+ has closed the socket (Pull #1524)
+
+* Wrap ``ssl.SSLError`` that can be raised from reading a socket (e.g. "bad MAC")
+ into an ``urllib3.exceptions.SSLError`` (Pull #1939)
+
+
+1.25.11 (2020-10-19)
+--------------------
+
+* Fix retry backoff time parsed from ``Retry-After`` header when given
+ in the HTTP date format. The HTTP date was parsed as the local timezone
+ rather than accounting for the timezone in the HTTP date (typically
+ UTC) (Pull #1932, Pull #1935, Pull #1938, Pull #1949)
+
+* Fix issue where an error would be raised when the ``SSLKEYLOGFILE``
+ environment variable was set to the empty string. Now ``SSLContext.keylog_file``
+ is not set in this situation (Pull #2016)
+
+
+1.25.10 (2020-07-22)
+--------------------
+
+* Added support for ``SSLKEYLOGFILE`` environment variable for
+ logging TLS session keys with use with programs like
+ Wireshark for decrypting captured web traffic (Pull #1867)
+
+* Fixed loading of SecureTransport libraries on macOS Big Sur
+ due to the new dynamic linker cache (Pull #1905)
+
+* Collapse chunked request bodies data and framing into one
+ call to ``send()`` to reduce the number of TCP packets by 2-4x (Pull #1906)
+
+* Don't insert ``None`` into ``ConnectionPool`` if the pool
+ was empty when requesting a connection (Pull #1866)
+
+* Avoid ``hasattr`` call in ``BrotliDecoder.decompress()`` (Pull #1858)
+
+
+1.25.9 (2020-04-16)
+-------------------
+
+* Added ``InvalidProxyConfigurationWarning`` which is raised when
+ erroneously specifying an HTTPS proxy URL. urllib3 doesn't currently
+ support connecting to HTTPS proxies but will soon be able to
+ and we would like users to migrate properly without much breakage.
+
+ See `this GitHub issue <https://github.com/urllib3/urllib3/issues/1850>`_
+ for more information on how to fix your proxy config. (Pull #1851)
+
+* Drain connection after ``PoolManager`` redirect (Pull #1817)
+
+* Ensure ``load_verify_locations`` raises ``SSLError`` for all backends (Pull #1812)
+
+* Rename ``VerifiedHTTPSConnection`` to ``HTTPSConnection`` (Pull #1805)
+
+* Allow the CA certificate data to be passed as a string (Pull #1804)
+
+* Raise ``ValueError`` if method contains control characters (Pull #1800)
+
+* Add ``__repr__`` to ``Timeout`` (Pull #1795)
+
+
+1.25.8 (2020-01-20)
+-------------------
+
+* Drop support for EOL Python 3.4 (Pull #1774)
+
+* Optimize _encode_invalid_chars (Pull #1787)
+
+
+1.25.7 (2019-11-11)
+-------------------
+
+* Preserve ``chunked`` parameter on retries (Pull #1715, Pull #1734)
+
+* Allow unset ``SERVER_SOFTWARE`` in App Engine (Pull #1704, Issue #1470)
+
+* Fix issue where URL fragment was sent within the request target. (Pull #1732)
+
+* Fix issue where an empty query section in a URL would fail to parse. (Pull #1732)
+
+* Remove TLS 1.3 support in SecureTransport due to Apple removing support (Pull #1703)
+
+
+1.25.6 (2019-09-24)
+-------------------
+
+* Fix issue where tilde (``~``) characters were incorrectly
+ percent-encoded in the path. (Pull #1692)
+
+
+1.25.5 (2019-09-19)
+-------------------
+
+* Add mitigation for BPO-37428 affecting Python <3.7.4 and OpenSSL 1.1.1+ which
+ caused certificate verification to be enabled when using ``cert_reqs=CERT_NONE``.
+ (Issue #1682)
+
+
+1.25.4 (2019-09-19)
+-------------------
+
+* Propagate Retry-After header settings to subsequent retries. (Pull #1607)
+
+* Fix edge case where Retry-After header was still respected even when
+ explicitly opted out of. (Pull #1607)
+
+* Remove dependency on ``rfc3986`` for URL parsing.
+
+* Fix issue where URLs containing invalid characters within ``Url.auth`` would
+ raise an exception instead of percent-encoding those characters.
+
+* Add support for ``HTTPResponse.auto_close = False`` which makes HTTP responses
+ work well with BufferedReaders and other ``io`` module features. (Pull #1652)
+
+* Percent-encode invalid characters in URL for ``HTTPConnectionPool.request()`` (Pull #1673)
+
+
+1.25.3 (2019-05-23)
+-------------------
+
+* Change ``HTTPSConnection`` to load system CA certificates
+ when ``ca_certs``, ``ca_cert_dir``, and ``ssl_context`` are
+ unspecified. (Pull #1608, Issue #1603)
+
+* Upgrade bundled rfc3986 to v1.3.2. (Pull #1609, Issue #1605)
+
+
+1.25.2 (2019-04-28)
+-------------------
+
+* Change ``is_ipaddress`` to not detect IPvFuture addresses. (Pull #1583)
+
+* Change ``parse_url`` to percent-encode invalid characters within the
+ path, query, and target components. (Pull #1586)
+
+
+1.25.1 (2019-04-24)
+-------------------
+
+* Add support for Google's ``Brotli`` package. (Pull #1572, Pull #1579)
+
+* Upgrade bundled rfc3986 to v1.3.1 (Pull #1578)
+
+
+1.25 (2019-04-22)
+-----------------
+
+* Require and validate certificates by default when using HTTPS (Pull #1507)
+
+* Upgraded ``urllib3.utils.parse_url()`` to be RFC 3986 compliant. (Pull #1487)
+
+* Added support for ``key_password`` for ``HTTPSConnectionPool`` to use
+ encrypted ``key_file`` without creating your own ``SSLContext`` object. (Pull #1489)
+
+* Add TLSv1.3 support to CPython, pyOpenSSL, and SecureTransport ``SSLContext``
+ implementations. (Pull #1496)
+
+* Switched the default multipart header encoder from RFC 2231 to HTML 5 working draft. (Issue #303, Pull #1492)
+
+* Fixed issue where OpenSSL would block if an encrypted client private key was
+ given and no password was given. Instead an ``SSLError`` is raised. (Pull #1489)
+
+* Added support for Brotli content encoding. It is enabled automatically if
+ ``brotlipy`` package is installed which can be requested with
+ ``urllib3[brotli]`` extra. (Pull #1532)
+
+* Drop ciphers using DSS key exchange from default TLS cipher suites.
+ Improve default ciphers when using SecureTransport. (Pull #1496)
+
+* Implemented a more efficient ``HTTPResponse.__iter__()`` method. (Issue #1483)
+
+1.24.3 (2019-05-01)
+-------------------
+
+* Apply fix for CVE-2019-9740. (Pull #1591)
+
+1.24.2 (2019-04-17)
+-------------------
+
+* Don't load system certificates by default when any other ``ca_certs``, ``ca_certs_dir`` or
+ ``ssl_context`` parameters are specified.
+
+* Remove Authorization header regardless of case when redirecting to cross-site. (Issue #1510)
+
+* Add support for IPv6 addresses in subjectAltName section of certificates. (Issue #1269)
+
+
+1.24.1 (2018-11-02)
+-------------------
+
+* Remove quadratic behavior within ``GzipDecoder.decompress()`` (Issue #1467)
+
+* Restored functionality of ``ciphers`` parameter for ``create_urllib3_context()``. (Issue #1462)
+
+
+1.24 (2018-10-16)
+-----------------
+
+* Allow key_server_hostname to be specified when initializing a PoolManager to allow custom SNI to be overridden. (Pull #1449)
+
+* Test against Python 3.7 on AppVeyor. (Pull #1453)
+
+* Early-out ipv6 checks when running on App Engine. (Pull #1450)
+
+* Change ambiguous description of backoff_factor (Pull #1436)
+
+* Add ability to handle multiple Content-Encodings (Issue #1441 and Pull #1442)
+
+* Skip DNS names that can't be idna-decoded when using pyOpenSSL (Issue #1405).
+
+* Add a server_hostname parameter to HTTPSConnection which allows for
+ overriding the SNI hostname sent in the handshake. (Pull #1397)
+
+* Drop support for EOL Python 2.6 (Pull #1429 and Pull #1430)
+
+* Fixed bug where responses with header Content-Type: message/* erroneously
+ raised HeaderParsingError, resulting in a warning being logged. (Pull #1439)
+
+* Move urllib3 to src/urllib3 (Pull #1409)
+
+
+1.23 (2018-06-04)
+-----------------
+
+* Allow providing a list of headers to strip from requests when redirecting
+ to a different host. Defaults to the ``Authorization`` header. Different
+ headers can be set via ``Retry.remove_headers_on_redirect``. (Issue #1316)
+
+* Fix ``util.selectors._fileobj_to_fd`` to accept ``long`` (Issue #1247).
+
+* Dropped Python 3.3 support. (Pull #1242)
+
+* Put the connection back in the pool when calling stream() or read_chunked() on
+ a chunked HEAD response. (Issue #1234)
+
+* Fixed pyOpenSSL-specific ssl client authentication issue when clients
+ attempted to auth via certificate + chain (Issue #1060)
+
+* Add the port to the connectionpool connect print (Pull #1251)
+
+* Don't use the ``uuid`` module to create multipart data boundaries. (Pull #1380)
+
+* ``read_chunked()`` on a closed response returns no chunks. (Issue #1088)
+
+* Add Python 2.6 support to ``contrib.securetransport`` (Pull #1359)
+
+* Added support for auth info in url for SOCKS proxy (Pull #1363)
+
+
+1.22 (2017-07-20)
+-----------------
+
+* Fixed missing brackets in ``HTTP CONNECT`` when connecting to IPv6 address via
+ IPv6 proxy. (Issue #1222)
+
+* Made the connection pool retry on ``SSLError``. The original ``SSLError``
+ is available on ``MaxRetryError.reason``. (Issue #1112)
+
+* Drain and release connection before recursing on retry/redirect. Fixes
+ deadlocks with a blocking connectionpool. (Issue #1167)
+
+* Fixed compatibility for cookiejar. (Issue #1229)
+
+* pyopenssl: Use vendored version of ``six``. (Issue #1231)
+
+
+1.21.1 (2017-05-02)
+-------------------
+
+* Fixed SecureTransport issue that would cause long delays in response body
+ delivery. (Pull #1154)
+
+* Fixed regression in 1.21 that threw exceptions when users passed the
+ ``socket_options`` flag to the ``PoolManager``. (Issue #1165)
+
+* Fixed regression in 1.21 that threw exceptions when users passed the
+ ``assert_hostname`` or ``assert_fingerprint`` flag to the ``PoolManager``.
+ (Pull #1157)
+
+
+1.21 (2017-04-25)
+-----------------
+
+* Improved performance of certain selector system calls on Python 3.5 and
+ later. (Pull #1095)
+
+* Resolved issue where the PyOpenSSL backend would not wrap SysCallError
+ exceptions appropriately when sending data. (Pull #1125)
+
+* Selectors now detects a monkey-patched select module after import for modules
+ that patch the select module like eventlet, greenlet. (Pull #1128)
+
+* Reduced memory consumption when streaming zlib-compressed responses
+ (as opposed to raw deflate streams). (Pull #1129)
+
+* Connection pools now use the entire request context when constructing the
+ pool key. (Pull #1016)
+
+* ``PoolManager.connection_from_*`` methods now accept a new keyword argument,
+ ``pool_kwargs``, which are merged with the existing ``connection_pool_kw``.
+ (Pull #1016)
+
+* Add retry counter for ``status_forcelist``. (Issue #1147)
+
+* Added ``contrib`` module for using SecureTransport on macOS:
+ ``urllib3.contrib.securetransport``. (Pull #1122)
+
+* urllib3 now only normalizes the case of ``http://`` and ``https://`` schemes:
+ for schemes it does not recognise, it assumes they are case-sensitive and
+ leaves them unchanged.
+ (Issue #1080)
+
+
+1.20 (2017-01-19)
+-----------------
+
+* Added support for waiting for I/O using selectors other than select,
+ improving urllib3's behaviour with large numbers of concurrent connections.
+ (Pull #1001)
+
+* Updated the date for the system clock check. (Issue #1005)
+
+* ConnectionPools now correctly consider hostnames to be case-insensitive.
+ (Issue #1032)
+
+* Outdated versions of PyOpenSSL now cause the PyOpenSSL contrib module
+ to fail when it is injected, rather than at first use. (Pull #1063)
+
+* Outdated versions of cryptography now cause the PyOpenSSL contrib module
+ to fail when it is injected, rather than at first use. (Issue #1044)
+
+* Automatically attempt to rewind a file-like body object when a request is
+ retried or redirected. (Pull #1039)
+
+* Fix some bugs that occur when modules incautiously patch the queue module.
+ (Pull #1061)
+
+* Prevent retries from occurring on read timeouts for which the request method
+ was not in the method whitelist. (Issue #1059)
+
+* Changed the PyOpenSSL contrib module to lazily load idna to avoid
+ unnecessarily bloating the memory of programs that don't need it. (Pull
+ #1076)
+
+* Add support for IPv6 literals with zone identifiers. (Pull #1013)
+
+* Added support for socks5h:// and socks4a:// schemes when working with SOCKS
+ proxies, and controlled remote DNS appropriately. (Issue #1035)
+
+
+1.19.1 (2016-11-16)
+-------------------
+
+* Fixed AppEngine import that didn't function on Python 3.5. (Pull #1025)
+
+
+1.19 (2016-11-03)
+-----------------
+
+* urllib3 now respects Retry-After headers on 413, 429, and 503 responses when
+ using the default retry logic. (Pull #955)
+
+* Remove markers from setup.py to assist ancient setuptools versions. (Issue
+ #986)
+
+* Disallow superscripts and other integerish things in URL ports. (Issue #989)
+
+* Allow urllib3's HTTPResponse.stream() method to continue to work with
+ non-httplib underlying FPs. (Pull #990)
+
+* Empty filenames in multipart headers are now emitted as such, rather than
+ being suppressed. (Issue #1015)
+
+* Prefer user-supplied Host headers on chunked uploads. (Issue #1009)
+
+
+1.18.1 (2016-10-27)
+-------------------
+
+* CVE-2016-9015. Users who are using urllib3 version 1.17 or 1.18 along with
+ PyOpenSSL injection and OpenSSL 1.1.0 *must* upgrade to this version. This
+ release fixes a vulnerability whereby urllib3 in the above configuration
+ would silently fail to validate TLS certificates due to erroneously setting
+ invalid flags in OpenSSL's ``SSL_CTX_set_verify`` function. These erroneous
+ flags do not cause a problem in OpenSSL versions before 1.1.0, which
+ interprets the presence of any flag as requesting certificate validation.
+
+ There is no PR for this patch, as it was prepared for simultaneous disclosure
+ and release. The master branch received the same fix in Pull #1010.
+
+
+1.18 (2016-09-26)
+-----------------
+
+* Fixed incorrect message for IncompleteRead exception. (Pull #973)
+
+* Accept ``iPAddress`` subject alternative name fields in TLS certificates.
+ (Issue #258)
+
+* Fixed consistency of ``HTTPResponse.closed`` between Python 2 and 3.
+ (Issue #977)
+
+* Fixed handling of wildcard certificates when using PyOpenSSL. (Issue #979)
+
+
+1.17 (2016-09-06)
+-----------------
+
+* Accept ``SSLContext`` objects for use in SSL/TLS negotiation. (Issue #835)
+
+* ConnectionPool debug log now includes scheme, host, and port. (Issue #897)
+
+* Substantially refactored documentation. (Issue #887)
+
+* Used URLFetch default timeout on AppEngine, rather than hardcoding our own.
+ (Issue #858)
+
+* Normalize the scheme and host in the URL parser (Issue #833)
+
+* ``HTTPResponse`` contains the last ``Retry`` object, which now also
+ contains retries history. (Issue #848)
+
+* Timeout can no longer be set as boolean, and must be greater than zero.
+ (Pull #924)
+
+* Removed pyasn1 and ndg-httpsclient from dependencies used for PyOpenSSL. We
+ now use cryptography and idna, both of which are already dependencies of
+ PyOpenSSL. (Pull #930)
+
+* Fixed infinite loop in ``stream`` when amt=None. (Issue #928)
+
+* Try to use the operating system's certificates when we are using an
+ ``SSLContext``. (Pull #941)
+
+* Updated cipher suite list to allow ChaCha20+Poly1305. AES-GCM is preferred to
+ ChaCha20, but ChaCha20 is then preferred to everything else. (Pull #947)
+
+* Updated cipher suite list to remove 3DES-based cipher suites. (Pull #958)
+
+* Removed the cipher suite fallback to allow HIGH ciphers. (Pull #958)
+
+* Implemented ``length_remaining`` to determine remaining content
+ to be read. (Pull #949)
+
+* Implemented ``enforce_content_length`` to enable exceptions when
+ incomplete data chunks are received. (Pull #949)
+
+* Dropped connection start, dropped connection reset, redirect, forced retry,
+ and new HTTPS connection log levels to DEBUG, from INFO. (Pull #967)
+
+
+1.16 (2016-06-11)
+-----------------
+
+* Disable IPv6 DNS when IPv6 connections are not possible. (Issue #840)
+
+* Provide ``key_fn_by_scheme`` pool keying mechanism that can be
+ overridden. (Issue #830)
+
+* Normalize scheme and host to lowercase for pool keys, and include
+ ``source_address``. (Issue #830)
+
+* Cleaner exception chain in Python 3 for ``_make_request``.
+ (Issue #861)
+
+* Fixed installing ``urllib3[socks]`` extra. (Issue #864)
+
+* Fixed signature of ``ConnectionPool.close`` so it can actually safely be
+ called by subclasses. (Issue #873)
+
+* Retain ``release_conn`` state across retries. (Issues #651, #866)
+
+* Add customizable ``HTTPConnectionPool.ResponseCls``, which defaults to
+ ``HTTPResponse`` but can be replaced with a subclass. (Issue #879)
+
+
+1.15.1 (2016-04-11)
+-------------------
+
+* Fix packaging to include backports module. (Issue #841)
+
+
+1.15 (2016-04-06)
+-----------------
+
+* Added Retry(raise_on_status=False). (Issue #720)
+
+* Always use setuptools, no more distutils fallback. (Issue #785)
+
+* Dropped support for Python 3.2. (Issue #786)
+
+* Chunked transfer encoding when requesting with ``chunked=True``.
+ (Issue #790)
+
+* Fixed regression with IPv6 port parsing. (Issue #801)
+
+* Append SNIMissingWarning messages to allow users to specify it in
+ the PYTHONWARNINGS environment variable. (Issue #816)
+
+* Handle unicode headers in Py2. (Issue #818)
+
+* Log certificate when there is a hostname mismatch. (Issue #820)
+
+* Preserve order of request/response headers. (Issue #821)
+
+
+1.14 (2015-12-29)
+-----------------
+
+* contrib: SOCKS proxy support! (Issue #762)
+
+* Fixed AppEngine handling of transfer-encoding header and bug
+ in Timeout defaults checking. (Issue #763)
+
+
+1.13.1 (2015-12-18)
+-------------------
+
+* Fixed regression in IPv6 + SSL for match_hostname. (Issue #761)
+
+
+1.13 (2015-12-14)
+-----------------
+
+* Fixed ``pip install urllib3[secure]`` on modern pip. (Issue #706)
+
+* pyopenssl: Fixed SSL3_WRITE_PENDING error. (Issue #717)
+
+* pyopenssl: Support for TLSv1.1 and TLSv1.2. (Issue #696)
+
+* Close connections more defensively on exception. (Issue #734)
+
+* Adjusted ``read_chunked`` to handle gzipped, chunk-encoded bodies without
+ repeatedly flushing the decoder, to function better on Jython. (Issue #743)
+
+* Accept ``ca_cert_dir`` for SSL-related PoolManager configuration. (Issue #758)
+
+
+1.12 (2015-09-03)
+-----------------
+
+* Rely on ``six`` for importing ``httplib`` to work around
+ conflicts with other Python 3 shims. (Issue #688)
+
+* Add support for directories of certificate authorities, as supported by
+ OpenSSL. (Issue #701)
+
+* New exception: ``NewConnectionError``, raised when we fail to establish
+ a new connection, usually ``ECONNREFUSED`` socket error.
+
+
+1.11 (2015-07-21)
+-----------------
+
+* When ``ca_certs`` is given, ``cert_reqs`` defaults to
+ ``'CERT_REQUIRED'``. (Issue #650)
+
+* ``pip install urllib3[secure]`` will install Certifi and
+ PyOpenSSL as dependencies. (Issue #678)
+
+* Made ``HTTPHeaderDict`` usable as a ``headers`` input value
+ (Issues #632, #679)
+
+* Added `urllib3.contrib.appengine <https://urllib3.readthedocs.io/en/latest/contrib.html#google-app-engine>`_
+ which has an ``AppEngineManager`` for using ``URLFetch`` in a
+ Google AppEngine environment. (Issue #664)
+
+* Dev: Added test suite for AppEngine. (Issue #631)
+
+* Fix performance regression when using PyOpenSSL. (Issue #626)
+
+* Passing incorrect scheme (e.g. ``foo://``) will raise
+ ``ValueError`` instead of ``AssertionError`` (backwards
+ compatible for now, but please migrate). (Issue #640)
+
+* Fix pools not getting replenished when an error occurs during a
+ request using ``release_conn=False``. (Issue #644)
+
+* Fix pool-default headers not applying for url-encoded requests
+ like GET. (Issue #657)
+
+* log.warning in Python 3 when headers are skipped due to parsing
+ errors. (Issue #642)
+
+* Close and discard connections if an error occurs during read.
+ (Issue #660)
+
+* Fix host parsing for IPv6 proxies. (Issue #668)
+
+* Separate warning type SubjectAltNameWarning, now issued once
+ per host. (Issue #671)
+
+* Fix ``httplib.IncompleteRead`` not getting converted to
+ ``ProtocolError`` when using ``HTTPResponse.stream()``
+ (Issue #674)
+
+1.10.4 (2015-05-03)
+-------------------
+
+* Migrate tests to Tornado 4. (Issue #594)
+
+* Append default warning configuration rather than overwrite.
+ (Issue #603)
+
+* Fix streaming decoding regression. (Issue #595)
+
+* Fix chunked requests losing state across keep-alive connections.
+ (Issue #599)
+
+* Fix hanging when chunked HEAD response has no body. (Issue #605)
+
+
+1.10.3 (2015-04-21)
+-------------------
+
+* Emit ``InsecurePlatformWarning`` when SSLContext object is missing.
+ (Issue #558)
+
+* Fix regression of duplicate header keys being discarded.
+ (Issue #563)
+
+* ``Response.stream()`` returns a generator for chunked responses.
+ (Issue #560)
+
+* Set upper-bound timeout when waiting for a socket in PyOpenSSL.
+ (Issue #585)
+
+* Work on platforms without `ssl` module for plain HTTP requests.
+ (Issue #587)
+
+* Stop relying on the stdlib's default cipher list. (Issue #588)
+
+
+1.10.2 (2015-02-25)
+-------------------
+
+* Fix file descriptor leakage on retries. (Issue #548)
+
+* Removed RC4 from default cipher list. (Issue #551)
+
+* Header performance improvements. (Issue #544)
+
+* Fix PoolManager not obeying redirect retry settings. (Issue #553)
+
+
+1.10.1 (2015-02-10)
+-------------------
+
+* Pools can be used as context managers. (Issue #545)
+
+* Don't re-use connections which experienced an SSLError. (Issue #529)
+
+* Don't fail when gzip decoding an empty stream. (Issue #535)
+
+* Add sha256 support for fingerprint verification. (Issue #540)
+
+* Fixed handling of header values containing commas. (Issue #533)
+
+
+1.10 (2014-12-14)
+-----------------
+
+* Disabled SSLv3. (Issue #473)
+
+* Add ``Url.url`` property to return the composed url string. (Issue #394)
+
+* Fixed PyOpenSSL + gevent ``WantWriteError``. (Issue #412)
+
+* ``MaxRetryError.reason`` will always be an exception, not string.
+ (Issue #481)
+
+* Fixed SSL-related timeouts not being detected as timeouts. (Issue #492)
+
+* Py3: Use ``ssl.create_default_context()`` when available. (Issue #473)
+
+* Emit ``InsecureRequestWarning`` for *every* insecure HTTPS request.
+ (Issue #496)
+
+* Emit ``SecurityWarning`` when certificate has no ``subjectAltName``.
+ (Issue #499)
+
+* Close and discard sockets which experienced SSL-related errors.
+ (Issue #501)
+
+* Handle ``body`` param in ``.request(...)``. (Issue #513)
+
+* Respect timeout with HTTPS proxy. (Issue #505)
+
+* PyOpenSSL: Handle ZeroReturnError exception. (Issue #520)
+
+
+1.9.1 (2014-09-13)
+------------------
+
+* Apply socket arguments before binding. (Issue #427)
+
+* More careful checks if fp-like object is closed. (Issue #435)
+
+* Fixed packaging issues of some development-related files not
+ getting included. (Issue #440)
+
+* Allow performing *only* fingerprint verification. (Issue #444)
+
+* Emit ``SecurityWarning`` if system clock is waaay off. (Issue #445)
+
+* Fixed PyOpenSSL compatibility with PyPy. (Issue #450)
+
+* Fixed ``BrokenPipeError`` and ``ConnectionError`` handling in Py3.
+ (Issue #443)
+
+
+
+1.9 (2014-07-04)
+----------------
+
+* Shuffled around development-related files. If you're maintaining a distro
+ package of urllib3, you may need to tweak things. (Issue #415)
+
+* Unverified HTTPS requests will trigger a warning on the first request. See
+ our new `security documentation
+ <https://urllib3.readthedocs.io/en/latest/security.html>`_ for details.
+ (Issue #426)
+
+* New retry logic and ``urllib3.util.retry.Retry`` configuration object.
+ (Issue #326)
+
+* All raised exceptions should now wrapped in a
+ ``urllib3.exceptions.HTTPException``-extending exception. (Issue #326)
+
+* All errors during a retry-enabled request should be wrapped in
+ ``urllib3.exceptions.MaxRetryError``, including timeout-related exceptions
+ which were previously exempt. Underlying error is accessible from the
+ ``.reason`` property. (Issue #326)
+
+* ``urllib3.exceptions.ConnectionError`` renamed to
+ ``urllib3.exceptions.ProtocolError``. (Issue #326)
+
+* Errors during response read (such as IncompleteRead) are now wrapped in
+ ``urllib3.exceptions.ProtocolError``. (Issue #418)
+
+* Requesting an empty host will raise ``urllib3.exceptions.LocationValueError``.
+ (Issue #417)
+
+* Catch read timeouts over SSL connections as
+ ``urllib3.exceptions.ReadTimeoutError``. (Issue #419)
+
+* Apply socket arguments before connecting. (Issue #427)
+
+
+1.8.3 (2014-06-23)
+------------------
+
+* Fix TLS verification when using a proxy in Python 3.4.1. (Issue #385)
+
+* Add ``disable_cache`` option to ``urllib3.util.make_headers``. (Issue #393)
+
+* Wrap ``socket.timeout`` exception with
+ ``urllib3.exceptions.ReadTimeoutError``. (Issue #399)
+
+* Fixed proxy-related bug where connections were being reused incorrectly.
+ (Issues #366, #369)
+
+* Added ``socket_options`` keyword parameter which allows to define
+ ``setsockopt`` configuration of new sockets. (Issue #397)
+
+* Removed ``HTTPConnection.tcp_nodelay`` in favor of
+ ``HTTPConnection.default_socket_options``. (Issue #397)
+
+* Fixed ``TypeError`` bug in Python 2.6.4. (Issue #411)
+
+
+1.8.2 (2014-04-17)
+------------------
+
+* Fix ``urllib3.util`` not being included in the package.
+
+
+1.8.1 (2014-04-17)
+------------------
+
+* Fix AppEngine bug of HTTPS requests going out as HTTP. (Issue #356)
+
+* Don't install ``dummyserver`` into ``site-packages`` as it's only needed
+ for the test suite. (Issue #362)
+
+* Added support for specifying ``source_address``. (Issue #352)
+
+
+1.8 (2014-03-04)
+----------------
+
+* Improved url parsing in ``urllib3.util.parse_url`` (properly parse '@' in
+ username, and blank ports like 'hostname:').
+
+* New ``urllib3.connection`` module which contains all the HTTPConnection
+ objects.
+
+* Several ``urllib3.util.Timeout``-related fixes. Also changed constructor
+ signature to a more sensible order. [Backwards incompatible]
+ (Issues #252, #262, #263)
+
+* Use ``backports.ssl_match_hostname`` if it's installed. (Issue #274)
+
+* Added ``.tell()`` method to ``urllib3.response.HTTPResponse`` which
+ returns the number of bytes read so far. (Issue #277)
+
+* Support for platforms without threading. (Issue #289)
+
+* Expand default-port comparison in ``HTTPConnectionPool.is_same_host``
+ to allow a pool with no specified port to be considered equal to to an
+ HTTP/HTTPS url with port 80/443 explicitly provided. (Issue #305)
+
+* Improved default SSL/TLS settings to avoid vulnerabilities.
+ (Issue #309)
+
+* Fixed ``urllib3.poolmanager.ProxyManager`` not retrying on connect errors.
+ (Issue #310)
+
+* Disable Nagle's Algorithm on the socket for non-proxies. A subset of requests
+ will send the entire HTTP request ~200 milliseconds faster; however, some of
+ the resulting TCP packets will be smaller. (Issue #254)
+
+* Increased maximum number of SubjectAltNames in ``urllib3.contrib.pyopenssl``
+ from the default 64 to 1024 in a single certificate. (Issue #318)
+
+* Headers are now passed and stored as a custom
+ ``urllib3.collections_.HTTPHeaderDict`` object rather than a plain ``dict``.
+ (Issue #329, #333)
+
+* Headers no longer lose their case on Python 3. (Issue #236)
+
+* ``urllib3.contrib.pyopenssl`` now uses the operating system's default CA
+ certificates on inject. (Issue #332)
+
+* Requests with ``retries=False`` will immediately raise any exceptions without
+ wrapping them in ``MaxRetryError``. (Issue #348)
+
+* Fixed open socket leak with SSL-related failures. (Issue #344, #348)
+
+
+1.7.1 (2013-09-25)
+------------------
+
+* Added granular timeout support with new ``urllib3.util.Timeout`` class.
+ (Issue #231)
+
+* Fixed Python 3.4 support. (Issue #238)
+
+
+1.7 (2013-08-14)
+----------------
+
+* More exceptions are now pickle-able, with tests. (Issue #174)
+
+* Fixed redirecting with relative URLs in Location header. (Issue #178)
+
+* Support for relative urls in ``Location: ...`` header. (Issue #179)
+
+* ``urllib3.response.HTTPResponse`` now inherits from ``io.IOBase`` for bonus
+ file-like functionality. (Issue #187)
+
+* Passing ``assert_hostname=False`` when creating a HTTPSConnectionPool will
+ skip hostname verification for SSL connections. (Issue #194)
+
+* New method ``urllib3.response.HTTPResponse.stream(...)`` which acts as a
+ generator wrapped around ``.read(...)``. (Issue #198)
+
+* IPv6 url parsing enforces brackets around the hostname. (Issue #199)
+
+* Fixed thread race condition in
+ ``urllib3.poolmanager.PoolManager.connection_from_host(...)`` (Issue #204)
+
+* ``ProxyManager`` requests now include non-default port in ``Host: ...``
+ header. (Issue #217)
+
+* Added HTTPS proxy support in ``ProxyManager``. (Issue #170 #139)
+
+* New ``RequestField`` object can be passed to the ``fields=...`` param which
+ can specify headers. (Issue #220)
+
+* Raise ``urllib3.exceptions.ProxyError`` when connecting to proxy fails.
+ (Issue #221)
+
+* Use international headers when posting file names. (Issue #119)
+
+* Improved IPv6 support. (Issue #203)
+
+
+1.6 (2013-04-25)
+----------------
+
+* Contrib: Optional SNI support for Py2 using PyOpenSSL. (Issue #156)
+
+* ``ProxyManager`` automatically adds ``Host: ...`` header if not given.
+
+* Improved SSL-related code. ``cert_req`` now optionally takes a string like
+ "REQUIRED" or "NONE". Same with ``ssl_version`` takes strings like "SSLv23"
+ The string values reflect the suffix of the respective constant variable.
+ (Issue #130)
+
+* Vendored ``socksipy`` now based on Anorov's fork which handles unexpectedly
+ closed proxy connections and larger read buffers. (Issue #135)
+
+* Ensure the connection is closed if no data is received, fixes connection leak
+ on some platforms. (Issue #133)
+
+* Added SNI support for SSL/TLS connections on Py32+. (Issue #89)
+
+* Tests fixed to be compatible with Py26 again. (Issue #125)
+
+* Added ability to choose SSL version by passing an ``ssl.PROTOCOL_*`` constant
+ to the ``ssl_version`` parameter of ``HTTPSConnectionPool``. (Issue #109)
+
+* Allow an explicit content type to be specified when encoding file fields.
+ (Issue #126)
+
+* Exceptions are now pickleable, with tests. (Issue #101)
+
+* Fixed default headers not getting passed in some cases. (Issue #99)
+
+* Treat "content-encoding" header value as case-insensitive, per RFC 2616
+ Section 3.5. (Issue #110)
+
+* "Connection Refused" SocketErrors will get retried rather than raised.
+ (Issue #92)
+
+* Updated vendored ``six``, no longer overrides the global ``six`` module
+ namespace. (Issue #113)
+
+* ``urllib3.exceptions.MaxRetryError`` contains a ``reason`` property holding
+ the exception that prompted the final retry. If ``reason is None`` then it
+ was due to a redirect. (Issue #92, #114)
+
+* Fixed ``PoolManager.urlopen()`` from not redirecting more than once.
+ (Issue #149)
+
+* Don't assume ``Content-Type: text/plain`` for multi-part encoding parameters
+ that are not files. (Issue #111)
+
+* Pass `strict` param down to ``httplib.HTTPConnection``. (Issue #122)
+
+* Added mechanism to verify SSL certificates by fingerprint (md5, sha1) or
+ against an arbitrary hostname (when connecting by IP or for misconfigured
+ servers). (Issue #140)
+
+* Streaming decompression support. (Issue #159)
+
+
+1.5 (2012-08-02)
+----------------
+
+* Added ``urllib3.add_stderr_logger()`` for quickly enabling STDERR debug
+ logging in urllib3.
+
+* Native full URL parsing (including auth, path, query, fragment) available in
+ ``urllib3.util.parse_url(url)``.
+
+* Built-in redirect will switch method to 'GET' if status code is 303.
+ (Issue #11)
+
+* ``urllib3.PoolManager`` strips the scheme and host before sending the request
+ uri. (Issue #8)
+
+* New ``urllib3.exceptions.DecodeError`` exception for when automatic decoding,
+ based on the Content-Type header, fails.
+
+* Fixed bug with pool depletion and leaking connections (Issue #76). Added
+ explicit connection closing on pool eviction. Added
+ ``urllib3.PoolManager.clear()``.
+
+* 99% -> 100% unit test coverage.
+
+
+1.4 (2012-06-16)
+----------------
+
+* Minor AppEngine-related fixes.
+
+* Switched from ``mimetools.choose_boundary`` to ``uuid.uuid4()``.
+
+* Improved url parsing. (Issue #73)
+
+* IPv6 url support. (Issue #72)
+
+
+1.3 (2012-03-25)
+----------------
+
+* Removed pre-1.0 deprecated API.
+
+* Refactored helpers into a ``urllib3.util`` submodule.
+
+* Fixed multipart encoding to support list-of-tuples for keys with multiple
+ values. (Issue #48)
+
+* Fixed multiple Set-Cookie headers in response not getting merged properly in
+ Python 3. (Issue #53)
+
+* AppEngine support with Py27. (Issue #61)
+
+* Minor ``encode_multipart_formdata`` fixes related to Python 3 strings vs
+ bytes.
+
+
+1.2.2 (2012-02-06)
+------------------
+
+* Fixed packaging bug of not shipping ``test-requirements.txt``. (Issue #47)
+
+
+1.2.1 (2012-02-05)
+------------------
+
+* Fixed another bug related to when ``ssl`` module is not available. (Issue #41)
+
+* Location parsing errors now raise ``urllib3.exceptions.LocationParseError``
+ which inherits from ``ValueError``.
+
+
+1.2 (2012-01-29)
+----------------
+
+* Added Python 3 support (tested on 3.2.2)
+
+* Dropped Python 2.5 support (tested on 2.6.7, 2.7.2)
+
+* Use ``select.poll`` instead of ``select.select`` for platforms that support
+ it.
+
+* Use ``Queue.LifoQueue`` instead of ``Queue.Queue`` for more aggressive
+ connection reusing. Configurable by overriding ``ConnectionPool.QueueCls``.
+
+* Fixed ``ImportError`` during install when ``ssl`` module is not available.
+ (Issue #41)
+
+* Fixed ``PoolManager`` redirects between schemes (such as HTTP -> HTTPS) not
+ completing properly. (Issue #28, uncovered by Issue #10 in v1.1)
+
+* Ported ``dummyserver`` to use ``tornado`` instead of ``webob`` +
+ ``eventlet``. Removed extraneous unsupported dummyserver testing backends.
+ Added socket-level tests.
+
+* More tests. Achievement Unlocked: 99% Coverage.
+
+
+1.1 (2012-01-07)
+----------------
+
+* Refactored ``dummyserver`` to its own root namespace module (used for
+ testing).
+
+* Added hostname verification for ``VerifiedHTTPSConnection`` by vendoring in
+ Py32's ``ssl_match_hostname``. (Issue #25)
+
+* Fixed cross-host HTTP redirects when using ``PoolManager``. (Issue #10)
+
+* Fixed ``decode_content`` being ignored when set through ``urlopen``. (Issue
+ #27)
+
+* Fixed timeout-related bugs. (Issues #17, #23)
+
+
+1.0.2 (2011-11-04)
+------------------
+
+* Fixed typo in ``VerifiedHTTPSConnection`` which would only present as a bug if
+ you're using the object manually. (Thanks pyos)
+
+* Made RecentlyUsedContainer (and consequently PoolManager) more thread-safe by
+ wrapping the access log in a mutex. (Thanks @christer)
+
+* Made RecentlyUsedContainer more dict-like (corrected ``__delitem__`` and
+ ``__getitem__`` behaviour), with tests. Shouldn't affect core urllib3 code.
+
+
+1.0.1 (2011-10-10)
+------------------
+
+* Fixed a bug where the same connection would get returned into the pool twice,
+ causing extraneous "HttpConnectionPool is full" log warnings.
+
+
+1.0 (2011-10-08)
+----------------
+
+* Added ``PoolManager`` with LRU expiration of connections (tested and
+ documented).
+* Added ``ProxyManager`` (needs tests, docs, and confirmation that it works
+ with HTTPS proxies).
+* Added optional partial-read support for responses when
+ ``preload_content=False``. You can now make requests and just read the headers
+ without loading the content.
+* Made response decoding optional (default on, same as before).
+* Added optional explicit boundary string for ``encode_multipart_formdata``.
+* Convenience request methods are now inherited from ``RequestMethods``. Old
+ helpers like ``get_url`` and ``post_url`` should be abandoned in favour of
+ the new ``request(method, url, ...)``.
+* Refactored code to be even more decoupled, reusable, and extendable.
+* License header added to ``.py`` files.
+* Embiggened the documentation: Lots of Sphinx-friendly docstrings in the code
+ and docs in ``docs/`` and on https://urllib3.readthedocs.io/.
+* Embettered all the things!
+* Started writing this file.
+
+
+0.4.1 (2011-07-17)
+------------------
+
+* Minor bug fixes, code cleanup.
+
+
+0.4 (2011-03-01)
+----------------
+
+* Better unicode support.
+* Added ``VerifiedHTTPSConnection``.
+* Added ``NTLMConnectionPool`` in contrib.
+* Minor improvements.
+
+
+0.3.1 (2010-07-13)
+------------------
+
+* Added ``assert_host_name`` optional parameter. Now compatible with proxies.
+
+
+0.3 (2009-12-10)
+----------------
+
+* Added HTTPS support.
+* Minor bug fixes.
+* Refactored, broken backwards compatibility with 0.2.
+* API to be treated as stable from this version forward.
+
+
+0.2 (2008-11-17)
+----------------
+
+* Added unit tests.
+* Bug fixes.
+
+
+0.1 (2008-11-16)
+----------------
+
+* First release.
+
+
diff --git a/third_party/python/urllib3/urllib3-1.26.0.dist-info/RECORD b/third_party/python/urllib3/urllib3-1.26.0.dist-info/RECORD
new file mode 100644
index 0000000000..ec9088a111
--- /dev/null
+++ b/third_party/python/urllib3/urllib3-1.26.0.dist-info/RECORD
@@ -0,0 +1,44 @@
+urllib3/__init__.py,sha256=j3yzHIbmW7CS-IKQJ9-PPQf_YKO8EOAey_rMW0UR7us,2763
+urllib3/_collections.py,sha256=Rp1mVyBgc_UlAcp6M3at1skJBXR5J43NawRTvW2g_XY,10811
+urllib3/_version.py,sha256=H0vLQ8PY350EPZlZQa8ri0tEjVS-xhGdQOHcU360-0A,63
+urllib3/connection.py,sha256=BdaUSNpGzO0zq28i9MhOXb6QZspeVdVrYtjnkk2Eqg4,18396
+urllib3/connectionpool.py,sha256=IKoeuJZY9YAYm0GK4q-MXAhyXW0M_FnvabYaNsDIR-E,37133
+urllib3/exceptions.py,sha256=lNrKC5J8zeBXIu9SSKSNb7cLi8iXl9ARu9DHD2SflZM,7810
+urllib3/fields.py,sha256=kvLDCg_JmH1lLjUUEY_FLS8UhY7hBvDPuVETbY8mdrM,8579
+urllib3/filepost.py,sha256=5b_qqgRHVlL7uLtdAYBzBh-GHmU5AfJVt_2N0XS3PeY,2440
+urllib3/poolmanager.py,sha256=whzlX6UTEgODMOCy0ZDMUONRBCz5wyIM8Z9opXAY-Lk,19763
+urllib3/request.py,sha256=ZFSIqX0C6WizixecChZ3_okyu7BEv0lZu1VT0s6h4SM,5985
+urllib3/response.py,sha256=hGhGBh7TkEkh_IQg5C1W_xuPNrgIKv5BUXPyE-q0LuE,28203
+urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+urllib3/contrib/_appengine_environ.py,sha256=bDbyOEhW2CKLJcQqAKAyrEHN-aklsyHFKq6vF8ZFsmk,957
+urllib3/contrib/appengine.py,sha256=7Pxb0tKfDB_LTGPERiswH0qomhDoUUOo5kwybAKLQyE,11010
+urllib3/contrib/ntlmpool.py,sha256=6I95h1_71fzxmoMSNtY0gB8lnyCoVtP_DpqFGj14fdU,4160
+urllib3/contrib/pyopenssl.py,sha256=vgh6j52w9xgwq-3R2kfB5M2JblQATJfKAK3lIAc1kSg,16778
+urllib3/contrib/securetransport.py,sha256=KxGPZk8d4YepWm7Rc-SBt1XrzIfnLKc8JkUVV75XzgE,34286
+urllib3/contrib/socks.py,sha256=DcRjM2l0rQMIyhYrN6r-tnVkY6ZTDxHJlM8_usAkGCA,7097
+urllib3/contrib/_securetransport/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+urllib3/contrib/_securetransport/bindings.py,sha256=E1_7ScsgOchfxneozbAueK7ziCwF35fna4DuDCYJ9_o,17637
+urllib3/contrib/_securetransport/low_level.py,sha256=lgIdsSycqfB0Xm5BiJzXGeIKT7ybCQMFPJAgkcwPa1s,13908
+urllib3/packages/__init__.py,sha256=h4BLhD4tLaBx1adaDtKXfupsgqY0wWLXb_f1_yVlV6A,108
+urllib3/packages/six.py,sha256=adx4z-eM_D0Vvu0IIqVzFACQ_ux9l64y7DkSEfbxCDs,32536
+urllib3/packages/backports/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+urllib3/packages/backports/makefile.py,sha256=nbzt3i0agPVP07jqqgjhaYjMmuAi_W5E0EywZivVO8E,1417
+urllib3/packages/ssl_match_hostname/__init__.py,sha256=zppezdEQdpGsYerI6mV6MfUYy495JV4mcOWC_GgbljU,757
+urllib3/packages/ssl_match_hostname/_implementation.py,sha256=6dZ-q074g7XhsJ27MFCgkct8iVNZB3sMZvKhf-KUVy0,5679
+urllib3/util/__init__.py,sha256=JEmSmmqqLyaw8P51gUImZh8Gwg9i1zSe-DoqAitn2nc,1155
+urllib3/util/connection.py,sha256=21B-LX0c8fkxPDssyHCaK0pCnmrKmhltg5EoouHiAPU,4910
+urllib3/util/proxy.py,sha256=FGipAEnvZteyldXNjce4DEB7YzwU-a5lep8y5S0qHQg,1604
+urllib3/util/queue.py,sha256=nRgX8_eX-_VkvxoX096QWoz8Ps0QHUAExILCY_7PncM,498
+urllib3/util/request.py,sha256=NnzaEKQ1Pauw5MFMV6HmgEMHITf0Aua9fQuzi2uZzGc,4123
+urllib3/util/response.py,sha256=GJpg3Egi9qaJXRwBh5wv-MNuRWan5BIu40oReoxWP28,3510
+urllib3/util/retry.py,sha256=tn168HDMUynFmXRP-uVaLRUOlbTEJikoB1RuZdwfCes,21366
+urllib3/util/ssl_.py,sha256=cUsmU604z2zAOZcaXDpINXOokQ1RtlJMe96TBDkaJp0,16199
+urllib3/util/ssltransport.py,sha256=IvGQvs9YWkf4jzfqVjTu_UWjwAUgPn5ActajW8VLz6A,6908
+urllib3/util/timeout.py,sha256=QSbBUNOB9yh6AnDn61SrLQ0hg5oz0I9-uXEG91AJuIg,10003
+urllib3/util/url.py,sha256=LWfLSlI4l2FmUMKfCkElCaW10-0N-sJDT9bxaDZJkjs,13964
+urllib3/util/wait.py,sha256=3MUKRSAUJDB2tgco7qRUskW0zXGAWYvRRE4Q1_6xlLs,5404
+urllib3-1.26.0.dist-info/LICENSE.txt,sha256=w3vxhuJ8-dvpYZ5V7f486nswCRzrPaY8fay-Dm13kHs,1115
+urllib3-1.26.0.dist-info/METADATA,sha256=Wghdt6nLf9HfZHhWj8Dpgz4n9vGRqXYhdIwJRPgki6M,42629
+urllib3-1.26.0.dist-info/WHEEL,sha256=ADKeyaGyKF5DwBNE0sRE5pvW-bSkFMJfBuhzZ3rceP4,110
+urllib3-1.26.0.dist-info/top_level.txt,sha256=EMiXL2sKrTcmrMxIHTqdc3ET54pQI2Y072LexFEemvo,8
+urllib3-1.26.0.dist-info/RECORD,,
diff --git a/third_party/python/urllib3/urllib3-1.26.0.dist-info/WHEEL b/third_party/python/urllib3/urllib3-1.26.0.dist-info/WHEEL
new file mode 100644
index 0000000000..6d38aa0601
--- /dev/null
+++ b/third_party/python/urllib3/urllib3-1.26.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.35.1)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/urllib3/urllib3-1.26.0.dist-info/top_level.txt b/third_party/python/urllib3/urllib3-1.26.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..a42590bebe
--- /dev/null
+++ b/third_party/python/urllib3/urllib3-1.26.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+urllib3
diff --git a/third_party/python/urllib3/urllib3/__init__.py b/third_party/python/urllib3/urllib3/__init__.py
new file mode 100644
index 0000000000..fe86b59d78
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/__init__.py
@@ -0,0 +1,85 @@
+"""
+Python HTTP library with thread-safe connection pooling, file post support, user friendly, and more
+"""
+from __future__ import absolute_import
+
+# Set default logging handler to avoid "No handler found" warnings.
+import logging
+import warnings
+from logging import NullHandler
+
+from . import exceptions
+from ._version import __version__
+from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, connection_from_url
+from .filepost import encode_multipart_formdata
+from .poolmanager import PoolManager, ProxyManager, proxy_from_url
+from .response import HTTPResponse
+from .util.request import make_headers
+from .util.retry import Retry
+from .util.timeout import Timeout
+from .util.url import get_host
+
+__author__ = "Andrey Petrov (andrey.petrov@shazow.net)"
+__license__ = "MIT"
+__version__ = __version__
+
+__all__ = (
+ "HTTPConnectionPool",
+ "HTTPSConnectionPool",
+ "PoolManager",
+ "ProxyManager",
+ "HTTPResponse",
+ "Retry",
+ "Timeout",
+ "add_stderr_logger",
+ "connection_from_url",
+ "disable_warnings",
+ "encode_multipart_formdata",
+ "get_host",
+ "make_headers",
+ "proxy_from_url",
+)
+
+logging.getLogger(__name__).addHandler(NullHandler())
+
+
+def add_stderr_logger(level=logging.DEBUG):
+ """
+ Helper for quickly adding a StreamHandler to the logger. Useful for
+ debugging.
+
+ Returns the handler after adding it.
+ """
+ # This method needs to be in this __init__.py to get the __name__ correct
+ # even if urllib3 is vendored within another package.
+ logger = logging.getLogger(__name__)
+ handler = logging.StreamHandler()
+ handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(message)s"))
+ logger.addHandler(handler)
+ logger.setLevel(level)
+ logger.debug("Added a stderr logging handler to logger: %s", __name__)
+ return handler
+
+
+# ... Clean up.
+del NullHandler
+
+
+# All warning filters *must* be appended unless you're really certain that they
+# shouldn't be: otherwise, it's very hard for users to use most Python
+# mechanisms to silence them.
+# SecurityWarning's always go off by default.
+warnings.simplefilter("always", exceptions.SecurityWarning, append=True)
+# SubjectAltNameWarning's should go off once per host
+warnings.simplefilter("default", exceptions.SubjectAltNameWarning, append=True)
+# InsecurePlatformWarning's don't vary between requests, so we keep it default.
+warnings.simplefilter("default", exceptions.InsecurePlatformWarning, append=True)
+# SNIMissingWarnings should go off only once.
+warnings.simplefilter("default", exceptions.SNIMissingWarning, append=True)
+
+
+def disable_warnings(category=exceptions.HTTPWarning):
+ """
+ Helper for quickly disabling all urllib3 warnings.
+ """
+ warnings.simplefilter("ignore", category)
diff --git a/third_party/python/urllib3/urllib3/_collections.py b/third_party/python/urllib3/urllib3/_collections.py
new file mode 100644
index 0000000000..da9857e986
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/_collections.py
@@ -0,0 +1,337 @@
+from __future__ import absolute_import
+
+try:
+ from collections.abc import Mapping, MutableMapping
+except ImportError:
+ from collections import Mapping, MutableMapping
+try:
+ from threading import RLock
+except ImportError: # Platform-specific: No threads available
+
+ class RLock:
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_value, traceback):
+ pass
+
+
+from collections import OrderedDict
+
+from .exceptions import InvalidHeader
+from .packages import six
+from .packages.six import iterkeys, itervalues
+
+__all__ = ["RecentlyUsedContainer", "HTTPHeaderDict"]
+
+
+_Null = object()
+
+
+class RecentlyUsedContainer(MutableMapping):
+ """
+ Provides a thread-safe dict-like container which maintains up to
+ ``maxsize`` keys while throwing away the least-recently-used keys beyond
+ ``maxsize``.
+
+ :param maxsize:
+ Maximum number of recent elements to retain.
+
+ :param dispose_func:
+ Every time an item is evicted from the container,
+ ``dispose_func(value)`` is called. Callback which will get called
+ """
+
+ ContainerCls = OrderedDict
+
+ def __init__(self, maxsize=10, dispose_func=None):
+ self._maxsize = maxsize
+ self.dispose_func = dispose_func
+
+ self._container = self.ContainerCls()
+ self.lock = RLock()
+
+ def __getitem__(self, key):
+ # Re-insert the item, moving it to the end of the eviction line.
+ with self.lock:
+ item = self._container.pop(key)
+ self._container[key] = item
+ return item
+
+ def __setitem__(self, key, value):
+ evicted_value = _Null
+ with self.lock:
+ # Possibly evict the existing value of 'key'
+ evicted_value = self._container.get(key, _Null)
+ self._container[key] = value
+
+ # If we didn't evict an existing value, we might have to evict the
+ # least recently used item from the beginning of the container.
+ if len(self._container) > self._maxsize:
+ _key, evicted_value = self._container.popitem(last=False)
+
+ if self.dispose_func and evicted_value is not _Null:
+ self.dispose_func(evicted_value)
+
+ def __delitem__(self, key):
+ with self.lock:
+ value = self._container.pop(key)
+
+ if self.dispose_func:
+ self.dispose_func(value)
+
+ def __len__(self):
+ with self.lock:
+ return len(self._container)
+
+ def __iter__(self):
+ raise NotImplementedError(
+ "Iteration over this class is unlikely to be threadsafe."
+ )
+
+ def clear(self):
+ with self.lock:
+ # Copy pointers to all values, then wipe the mapping
+ values = list(itervalues(self._container))
+ self._container.clear()
+
+ if self.dispose_func:
+ for value in values:
+ self.dispose_func(value)
+
+ def keys(self):
+ with self.lock:
+ return list(iterkeys(self._container))
+
+
+class HTTPHeaderDict(MutableMapping):
+ """
+ :param headers:
+ An iterable of field-value pairs. Must not contain multiple field names
+ when compared case-insensitively.
+
+ :param kwargs:
+ Additional field-value pairs to pass in to ``dict.update``.
+
+ A ``dict`` like container for storing HTTP Headers.
+
+ Field names are stored and compared case-insensitively in compliance with
+ RFC 7230. Iteration provides the first case-sensitive key seen for each
+ case-insensitive pair.
+
+ Using ``__setitem__`` syntax overwrites fields that compare equal
+ case-insensitively in order to maintain ``dict``'s api. For fields that
+ compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``
+ in a loop.
+
+ If multiple fields that are equal case-insensitively are passed to the
+ constructor or ``.update``, the behavior is undefined and some will be
+ lost.
+
+ >>> headers = HTTPHeaderDict()
+ >>> headers.add('Set-Cookie', 'foo=bar')
+ >>> headers.add('set-cookie', 'baz=quxx')
+ >>> headers['content-length'] = '7'
+ >>> headers['SET-cookie']
+ 'foo=bar, baz=quxx'
+ >>> headers['Content-Length']
+ '7'
+ """
+
+ def __init__(self, headers=None, **kwargs):
+ super(HTTPHeaderDict, self).__init__()
+ self._container = OrderedDict()
+ if headers is not None:
+ if isinstance(headers, HTTPHeaderDict):
+ self._copy_from(headers)
+ else:
+ self.extend(headers)
+ if kwargs:
+ self.extend(kwargs)
+
+ def __setitem__(self, key, val):
+ self._container[key.lower()] = [key, val]
+ return self._container[key.lower()]
+
+ def __getitem__(self, key):
+ val = self._container[key.lower()]
+ return ", ".join(val[1:])
+
+ def __delitem__(self, key):
+ del self._container[key.lower()]
+
+ def __contains__(self, key):
+ return key.lower() in self._container
+
+ def __eq__(self, other):
+ if not isinstance(other, Mapping) and not hasattr(other, "keys"):
+ return False
+ if not isinstance(other, type(self)):
+ other = type(self)(other)
+ return dict((k.lower(), v) for k, v in self.itermerged()) == dict(
+ (k.lower(), v) for k, v in other.itermerged()
+ )
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+ if six.PY2: # Python 2
+ iterkeys = MutableMapping.iterkeys
+ itervalues = MutableMapping.itervalues
+
+ __marker = object()
+
+ def __len__(self):
+ return len(self._container)
+
+ def __iter__(self):
+ # Only provide the originally cased names
+ for vals in self._container.values():
+ yield vals[0]
+
+ def pop(self, key, default=__marker):
+ """D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
+ If key is not found, d is returned if given, otherwise KeyError is raised.
+ """
+ # Using the MutableMapping function directly fails due to the private marker.
+ # Using ordinary dict.pop would expose the internal structures.
+ # So let's reinvent the wheel.
+ try:
+ value = self[key]
+ except KeyError:
+ if default is self.__marker:
+ raise
+ return default
+ else:
+ del self[key]
+ return value
+
+ def discard(self, key):
+ try:
+ del self[key]
+ except KeyError:
+ pass
+
+ def add(self, key, val):
+ """Adds a (name, value) pair, doesn't overwrite the value if it already
+ exists.
+
+ >>> headers = HTTPHeaderDict(foo='bar')
+ >>> headers.add('Foo', 'baz')
+ >>> headers['foo']
+ 'bar, baz'
+ """
+ key_lower = key.lower()
+ new_vals = [key, val]
+ # Keep the common case aka no item present as fast as possible
+ vals = self._container.setdefault(key_lower, new_vals)
+ if new_vals is not vals:
+ vals.append(val)
+
+ def extend(self, *args, **kwargs):
+ """Generic import function for any type of header-like object.
+ Adapted version of MutableMapping.update in order to insert items
+ with self.add instead of self.__setitem__
+ """
+ if len(args) > 1:
+ raise TypeError(
+ "extend() takes at most 1 positional "
+ "arguments ({0} given)".format(len(args))
+ )
+ other = args[0] if len(args) >= 1 else ()
+
+ if isinstance(other, HTTPHeaderDict):
+ for key, val in other.iteritems():
+ self.add(key, val)
+ elif isinstance(other, Mapping):
+ for key in other:
+ self.add(key, other[key])
+ elif hasattr(other, "keys"):
+ for key in other.keys():
+ self.add(key, other[key])
+ else:
+ for key, value in other:
+ self.add(key, value)
+
+ for key, value in kwargs.items():
+ self.add(key, value)
+
+ def getlist(self, key, default=__marker):
+ """Returns a list of all the values for the named field. Returns an
+ empty list if the key doesn't exist."""
+ try:
+ vals = self._container[key.lower()]
+ except KeyError:
+ if default is self.__marker:
+ return []
+ return default
+ else:
+ return vals[1:]
+
+ # Backwards compatibility for httplib
+ getheaders = getlist
+ getallmatchingheaders = getlist
+ iget = getlist
+
+ # Backwards compatibility for http.cookiejar
+ get_all = getlist
+
+ def __repr__(self):
+ return "%s(%s)" % (type(self).__name__, dict(self.itermerged()))
+
+ def _copy_from(self, other):
+ for key in other:
+ val = other.getlist(key)
+ if isinstance(val, list):
+ # Don't need to convert tuples
+ val = list(val)
+ self._container[key.lower()] = [key] + val
+
+ def copy(self):
+ clone = type(self)()
+ clone._copy_from(self)
+ return clone
+
+ def iteritems(self):
+ """Iterate over all header lines, including duplicate ones."""
+ for key in self:
+ vals = self._container[key.lower()]
+ for val in vals[1:]:
+ yield vals[0], val
+
+ def itermerged(self):
+ """Iterate over all headers, merging duplicate ones together."""
+ for key in self:
+ val = self._container[key.lower()]
+ yield val[0], ", ".join(val[1:])
+
+ def items(self):
+ return list(self.iteritems())
+
+ @classmethod
+ def from_httplib(cls, message): # Python 2
+ """Read headers from a Python 2 httplib message object."""
+ # python2.7 does not expose a proper API for exporting multiheaders
+ # efficiently. This function re-reads raw lines from the message
+ # object and extracts the multiheaders properly.
+ obs_fold_continued_leaders = (" ", "\t")
+ headers = []
+
+ for line in message.headers:
+ if line.startswith(obs_fold_continued_leaders):
+ if not headers:
+ # We received a header line that starts with OWS as described
+ # in RFC-7230 S3.2.4. This indicates a multiline header, but
+ # there exists no previous header to which we can attach it.
+ raise InvalidHeader(
+ "Header continuation with no previous header: %s" % line
+ )
+ else:
+ key, value = headers[-1]
+ headers[-1] = (key, value + " " + line.strip())
+ continue
+
+ key, value = line.split(":", 1)
+ headers.append((key, value.strip()))
+
+ return cls(headers)
diff --git a/third_party/python/urllib3/urllib3/_version.py b/third_party/python/urllib3/urllib3/_version.py
new file mode 100644
index 0000000000..cee465f88a
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/_version.py
@@ -0,0 +1,2 @@
+# This file is protected via CODEOWNERS
+__version__ = "1.26.0"
diff --git a/third_party/python/urllib3/urllib3/connection.py b/third_party/python/urllib3/urllib3/connection.py
new file mode 100644
index 0000000000..52487417c9
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/connection.py
@@ -0,0 +1,534 @@
+from __future__ import absolute_import
+
+import datetime
+import logging
+import os
+import re
+import socket
+import warnings
+from socket import error as SocketError
+from socket import timeout as SocketTimeout
+
+from .packages import six
+from .packages.six.moves.http_client import HTTPConnection as _HTTPConnection
+from .packages.six.moves.http_client import HTTPException # noqa: F401
+from .util.proxy import create_proxy_ssl_context
+
+try: # Compiled with SSL?
+ import ssl
+
+ BaseSSLError = ssl.SSLError
+except (ImportError, AttributeError): # Platform-specific: No SSL.
+ ssl = None
+
+ class BaseSSLError(BaseException):
+ pass
+
+
+try:
+ # Python 3: not a no-op, we're adding this to the namespace so it can be imported.
+ ConnectionError = ConnectionError
+except NameError:
+ # Python 2
+ class ConnectionError(Exception):
+ pass
+
+
+try: # Python 3:
+ # Not a no-op, we're adding this to the namespace so it can be imported.
+ BrokenPipeError = BrokenPipeError
+except NameError: # Python 2:
+
+ class BrokenPipeError(Exception):
+ pass
+
+
+from ._version import __version__
+from .exceptions import (
+ ConnectTimeoutError,
+ NewConnectionError,
+ SubjectAltNameWarning,
+ SystemTimeWarning,
+)
+from .packages.ssl_match_hostname import CertificateError, match_hostname
+from .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection
+from .util.ssl_ import (
+ assert_fingerprint,
+ create_urllib3_context,
+ resolve_cert_reqs,
+ resolve_ssl_version,
+ ssl_wrap_socket,
+)
+
+log = logging.getLogger(__name__)
+
+port_by_scheme = {"http": 80, "https": 443}
+
+# When it comes time to update this value as a part of regular maintenance
+# (ie test_recent_date is failing) update it to ~6 months before the current date.
+RECENT_DATE = datetime.date(2019, 1, 1)
+
+_CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]")
+
+
+class HTTPConnection(_HTTPConnection, object):
+ """
+ Based on :class:`http.client.HTTPConnection` but provides an extra constructor
+ backwards-compatibility layer between older and newer Pythons.
+
+ Additional keyword parameters are used to configure attributes of the connection.
+ Accepted parameters include:
+
+ - ``strict``: See the documentation on :class:`urllib3.connectionpool.HTTPConnectionPool`
+ - ``source_address``: Set the source address for the current connection.
+ - ``socket_options``: Set specific options on the underlying socket. If not specified, then
+ defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling
+ Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy.
+
+ For example, if you wish to enable TCP Keep Alive in addition to the defaults,
+ you might pass:
+
+ .. code-block:: python
+
+ HTTPConnection.default_socket_options + [
+ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
+ ]
+
+ Or you may want to disable the defaults by passing an empty list (e.g., ``[]``).
+ """
+
+ default_port = port_by_scheme["http"]
+
+ #: Disable Nagle's algorithm by default.
+ #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]``
+ default_socket_options = [(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]
+
+ #: Whether this connection verifies the host's certificate.
+ is_verified = False
+
+ def __init__(self, *args, **kw):
+ if not six.PY2:
+ kw.pop("strict", None)
+
+ # Pre-set source_address.
+ self.source_address = kw.get("source_address")
+
+ #: The socket options provided by the user. If no options are
+ #: provided, we use the default options.
+ self.socket_options = kw.pop("socket_options", self.default_socket_options)
+
+ # Proxy options provided by the user.
+ self.proxy = kw.pop("proxy", None)
+ self.proxy_config = kw.pop("proxy_config", None)
+
+ _HTTPConnection.__init__(self, *args, **kw)
+
+ @property
+ def host(self):
+ """
+ Getter method to remove any trailing dots that indicate the hostname is an FQDN.
+
+ In general, SSL certificates don't include the trailing dot indicating a
+ fully-qualified domain name, and thus, they don't validate properly when
+ checked against a domain name that includes the dot. In addition, some
+ servers may not expect to receive the trailing dot when provided.
+
+ However, the hostname with trailing dot is critical to DNS resolution; doing a
+ lookup with the trailing dot will properly only resolve the appropriate FQDN,
+ whereas a lookup without a trailing dot will search the system's search domain
+ list. Thus, it's important to keep the original host around for use only in
+ those cases where it's appropriate (i.e., when doing DNS lookup to establish the
+ actual TCP connection across which we're going to send HTTP requests).
+ """
+ return self._dns_host.rstrip(".")
+
+ @host.setter
+ def host(self, value):
+ """
+ Setter for the `host` property.
+
+ We assume that only urllib3 uses the _dns_host attribute; httplib itself
+ only uses `host`, and it seems reasonable that other libraries follow suit.
+ """
+ self._dns_host = value
+
+ def _new_conn(self):
+ """Establish a socket connection and set nodelay settings on it.
+
+ :return: New socket connection.
+ """
+ extra_kw = {}
+ if self.source_address:
+ extra_kw["source_address"] = self.source_address
+
+ if self.socket_options:
+ extra_kw["socket_options"] = self.socket_options
+
+ try:
+ conn = connection.create_connection(
+ (self._dns_host, self.port), self.timeout, **extra_kw
+ )
+
+ except SocketTimeout:
+ raise ConnectTimeoutError(
+ self,
+ "Connection to %s timed out. (connect timeout=%s)"
+ % (self.host, self.timeout),
+ )
+
+ except SocketError as e:
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % e
+ )
+
+ return conn
+
+ def _is_using_tunnel(self):
+ # Google App Engine's httplib does not define _tunnel_host
+ return getattr(self, "_tunnel_host", None)
+
+ def _prepare_conn(self, conn):
+ self.sock = conn
+ if self._is_using_tunnel():
+ # TODO: Fix tunnel so it doesn't depend on self.sock state.
+ self._tunnel()
+ # Mark this connection as not reusable
+ self.auto_open = 0
+
+ def connect(self):
+ conn = self._new_conn()
+ self._prepare_conn(conn)
+
+ def putrequest(self, method, url, *args, **kwargs):
+ """"""
+ # Empty docstring because the indentation of CPython's implementation
+ # is broken but we don't want this method in our documentation.
+ match = _CONTAINS_CONTROL_CHAR_RE.search(method)
+ if match:
+ raise ValueError(
+ "Method cannot contain non-token characters %r (found at least %r)"
+ % (method, match.group())
+ )
+
+ return _HTTPConnection.putrequest(self, method, url, *args, **kwargs)
+
+ def putheader(self, header, *values):
+ """"""
+ if SKIP_HEADER not in values:
+ _HTTPConnection.putheader(self, header, *values)
+ elif six.ensure_str(header.lower()) not in SKIPPABLE_HEADERS:
+ raise ValueError(
+ "urllib3.util.SKIP_HEADER only supports '%s'"
+ % ("', '".join(map(str.title, sorted(SKIPPABLE_HEADERS))),)
+ )
+
+ def request(self, method, url, body=None, headers=None):
+ if headers is None:
+ headers = {}
+ else:
+ # Avoid modifying the headers passed into .request()
+ headers = headers.copy()
+ if "user-agent" not in (k.lower() for k in headers):
+ headers["User-Agent"] = _get_default_user_agent()
+ super(HTTPConnection, self).request(method, url, body=body, headers=headers)
+
+ def request_chunked(self, method, url, body=None, headers=None):
+ """
+ Alternative to the common request method, which sends the
+ body with chunked encoding and not as one block
+ """
+ headers = headers or {}
+ header_keys = set([six.ensure_str(k.lower()) for k in headers])
+ skip_accept_encoding = "accept-encoding" in header_keys
+ skip_host = "host" in header_keys
+ self.putrequest(
+ method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host
+ )
+ if "user-agent" not in header_keys:
+ self.putheader("User-Agent", _get_default_user_agent())
+ for header, value in headers.items():
+ self.putheader(header, value)
+ if "transfer-encoding" not in headers:
+ self.putheader("Transfer-Encoding", "chunked")
+ self.endheaders()
+
+ if body is not None:
+ stringish_types = six.string_types + (bytes,)
+ if isinstance(body, stringish_types):
+ body = (body,)
+ for chunk in body:
+ if not chunk:
+ continue
+ if not isinstance(chunk, bytes):
+ chunk = chunk.encode("utf8")
+ len_str = hex(len(chunk))[2:]
+ to_send = bytearray(len_str.encode())
+ to_send += b"\r\n"
+ to_send += chunk
+ to_send += b"\r\n"
+ self.send(to_send)
+
+ # After the if clause, to always have a closed body
+ self.send(b"0\r\n\r\n")
+
+
+class HTTPSConnection(HTTPConnection):
+ """
+ Many of the parameters to this constructor are passed to the underlying SSL
+ socket by means of :py:func:`urllib3.util.ssl_wrap_socket`.
+ """
+
+ default_port = port_by_scheme["https"]
+
+ cert_reqs = None
+ ca_certs = None
+ ca_cert_dir = None
+ ca_cert_data = None
+ ssl_version = None
+ assert_fingerprint = None
+ tls_in_tls_required = False
+
+ def __init__(
+ self,
+ host,
+ port=None,
+ key_file=None,
+ cert_file=None,
+ key_password=None,
+ strict=None,
+ timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ ssl_context=None,
+ server_hostname=None,
+ **kw
+ ):
+
+ HTTPConnection.__init__(self, host, port, strict=strict, timeout=timeout, **kw)
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.key_password = key_password
+ self.ssl_context = ssl_context
+ self.server_hostname = server_hostname
+
+ # Required property for Google AppEngine 1.9.0 which otherwise causes
+ # HTTPS requests to go out as HTTP. (See Issue #356)
+ self._protocol = "https"
+
+ def set_cert(
+ self,
+ key_file=None,
+ cert_file=None,
+ cert_reqs=None,
+ key_password=None,
+ ca_certs=None,
+ assert_hostname=None,
+ assert_fingerprint=None,
+ ca_cert_dir=None,
+ ca_cert_data=None,
+ ):
+ """
+ This method should only be called once, before the connection is used.
+ """
+ # If cert_reqs is not provided we'll assume CERT_REQUIRED unless we also
+ # have an SSLContext object in which case we'll use its verify_mode.
+ if cert_reqs is None:
+ if self.ssl_context is not None:
+ cert_reqs = self.ssl_context.verify_mode
+ else:
+ cert_reqs = resolve_cert_reqs(None)
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.cert_reqs = cert_reqs
+ self.key_password = key_password
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+ self.ca_certs = ca_certs and os.path.expanduser(ca_certs)
+ self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir)
+ self.ca_cert_data = ca_cert_data
+
+ def connect(self):
+ # Add certificate verification
+ conn = self._new_conn()
+ hostname = self.host
+ tls_in_tls = False
+
+ if self._is_using_tunnel():
+ if self.tls_in_tls_required:
+ conn = self._connect_tls_proxy(hostname, conn)
+ tls_in_tls = True
+
+ self.sock = conn
+
+ # Calls self._set_hostport(), so self.host is
+ # self._tunnel_host below.
+ self._tunnel()
+ # Mark this connection as not reusable
+ self.auto_open = 0
+
+ # Override the host with the one we're requesting data from.
+ hostname = self._tunnel_host
+
+ server_hostname = hostname
+ if self.server_hostname is not None:
+ server_hostname = self.server_hostname
+
+ is_time_off = datetime.date.today() < RECENT_DATE
+ if is_time_off:
+ warnings.warn(
+ (
+ "System time is way off (before {0}). This will probably "
+ "lead to SSL verification errors"
+ ).format(RECENT_DATE),
+ SystemTimeWarning,
+ )
+
+ # Wrap socket using verification with the root certs in
+ # trusted_root_certs
+ default_ssl_context = False
+ if self.ssl_context is None:
+ default_ssl_context = True
+ self.ssl_context = create_urllib3_context(
+ ssl_version=resolve_ssl_version(self.ssl_version),
+ cert_reqs=resolve_cert_reqs(self.cert_reqs),
+ )
+
+ context = self.ssl_context
+ context.verify_mode = resolve_cert_reqs(self.cert_reqs)
+
+ # Try to load OS default certs if none are given.
+ # Works well on Windows (requires Python3.4+)
+ if (
+ not self.ca_certs
+ and not self.ca_cert_dir
+ and not self.ca_cert_data
+ and default_ssl_context
+ and hasattr(context, "load_default_certs")
+ ):
+ context.load_default_certs()
+
+ self.sock = ssl_wrap_socket(
+ sock=conn,
+ keyfile=self.key_file,
+ certfile=self.cert_file,
+ key_password=self.key_password,
+ ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
+ ca_cert_data=self.ca_cert_data,
+ server_hostname=server_hostname,
+ ssl_context=context,
+ tls_in_tls=tls_in_tls,
+ )
+
+ # If we're using all defaults and the connection
+ # is TLSv1 or TLSv1.1 we throw a DeprecationWarning
+ # for the host.
+ if (
+ default_ssl_context
+ and self.ssl_version is None
+ and hasattr(self.sock, "version")
+ and self.sock.version() in {"TLSv1", "TLSv1.1"}
+ ):
+ warnings.warn(
+ "Negotiating TLSv1/TLSv1.1 by default is deprecated "
+ "and will be disabled in urllib3 v2.0.0. Connecting to "
+ "'%s' with '%s' can be enabled by explicitly opting-in "
+ "with 'ssl_version'" % (self.host, self.sock.version()),
+ DeprecationWarning,
+ )
+
+ if self.assert_fingerprint:
+ assert_fingerprint(
+ self.sock.getpeercert(binary_form=True), self.assert_fingerprint
+ )
+ elif (
+ context.verify_mode != ssl.CERT_NONE
+ and not getattr(context, "check_hostname", False)
+ and self.assert_hostname is not False
+ ):
+ # While urllib3 attempts to always turn off hostname matching from
+ # the TLS library, this cannot always be done. So we check whether
+ # the TLS Library still thinks it's matching hostnames.
+ cert = self.sock.getpeercert()
+ if not cert.get("subjectAltName", ()):
+ warnings.warn(
+ (
+ "Certificate for {0} has no `subjectAltName`, falling back to check for a "
+ "`commonName` for now. This feature is being removed by major browsers and "
+ "deprecated by RFC 2818. (See https://github.com/urllib3/urllib3/issues/497 "
+ "for details.)".format(hostname)
+ ),
+ SubjectAltNameWarning,
+ )
+ _match_hostname(cert, self.assert_hostname or server_hostname)
+
+ self.is_verified = (
+ context.verify_mode == ssl.CERT_REQUIRED
+ or self.assert_fingerprint is not None
+ )
+
+ def _connect_tls_proxy(self, hostname, conn):
+ """
+ Establish a TLS connection to the proxy using the provided SSL context.
+ """
+ proxy_config = self.proxy_config
+ ssl_context = proxy_config.ssl_context
+ if ssl_context:
+ # If the user provided a proxy context, we assume CA and client
+ # certificates have already been set
+ return ssl_wrap_socket(
+ sock=conn,
+ server_hostname=hostname,
+ ssl_context=ssl_context,
+ )
+
+ ssl_context = create_proxy_ssl_context(
+ self.ssl_version,
+ self.cert_reqs,
+ self.ca_certs,
+ self.ca_cert_dir,
+ self.ca_cert_data,
+ )
+
+ # If no cert was provided, use only the default options for server
+ # certificate validation
+ return ssl_wrap_socket(
+ sock=conn,
+ ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
+ ca_cert_data=self.ca_cert_data,
+ server_hostname=hostname,
+ ssl_context=ssl_context,
+ )
+
+
+def _match_hostname(cert, asserted_hostname):
+ try:
+ match_hostname(cert, asserted_hostname)
+ except CertificateError as e:
+ log.warning(
+ "Certificate did not match expected hostname: %s. Certificate: %s",
+ asserted_hostname,
+ cert,
+ )
+ # Add cert to exception and reraise so client code can inspect
+ # the cert when catching the exception, if they want to
+ e._peer_cert = cert
+ raise
+
+
+def _get_default_user_agent():
+ return "python-urllib3/%s" % __version__
+
+
+class DummyConnection(object):
+ """Used to detect a failed ConnectionCls import."""
+
+ pass
+
+
+if not ssl:
+ HTTPSConnection = DummyConnection # noqa: F811
+
+
+VerifiedHTTPSConnection = HTTPSConnection
diff --git a/third_party/python/urllib3/urllib3/connectionpool.py b/third_party/python/urllib3/urllib3/connectionpool.py
new file mode 100644
index 0000000000..4708c5bfc7
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/connectionpool.py
@@ -0,0 +1,1067 @@
+from __future__ import absolute_import
+
+import errno
+import logging
+import socket
+import sys
+import warnings
+from socket import error as SocketError
+from socket import timeout as SocketTimeout
+
+from .connection import (
+ BaseSSLError,
+ BrokenPipeError,
+ DummyConnection,
+ HTTPConnection,
+ HTTPException,
+ HTTPSConnection,
+ VerifiedHTTPSConnection,
+ port_by_scheme,
+)
+from .exceptions import (
+ ClosedPoolError,
+ EmptyPoolError,
+ HeaderParsingError,
+ HostChangedError,
+ InsecureRequestWarning,
+ LocationValueError,
+ MaxRetryError,
+ NewConnectionError,
+ ProtocolError,
+ ProxyError,
+ ReadTimeoutError,
+ SSLError,
+ TimeoutError,
+)
+from .packages import six
+from .packages.six.moves import queue
+from .packages.ssl_match_hostname import CertificateError
+from .request import RequestMethods
+from .response import HTTPResponse
+from .util.connection import is_connection_dropped
+from .util.proxy import connection_requires_http_tunnel
+from .util.queue import LifoQueue
+from .util.request import set_file_position
+from .util.response import assert_header_parsing
+from .util.retry import Retry
+from .util.timeout import Timeout
+from .util.url import Url, _encode_target
+from .util.url import _normalize_host as normalize_host
+from .util.url import get_host, parse_url
+
+xrange = six.moves.xrange
+
+log = logging.getLogger(__name__)
+
+_Default = object()
+
+
+# Pool objects
+class ConnectionPool(object):
+ """
+ Base class for all connection pools, such as
+ :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
+
+ .. note::
+ ConnectionPool.urlopen() does not normalize or percent-encode target URIs
+ which is useful if your target server doesn't support percent-encoded
+ target URIs.
+ """
+
+ scheme = None
+ QueueCls = LifoQueue
+
+ def __init__(self, host, port=None):
+ if not host:
+ raise LocationValueError("No host specified.")
+
+ self.host = _normalize_host(host, scheme=self.scheme)
+ self._proxy_host = host.lower()
+ self.port = port
+
+ def __str__(self):
+ return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.close()
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def close(self):
+ """
+ Close all pooled connections and disable the pool.
+ """
+ pass
+
+
+# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
+_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
+
+
+class HTTPConnectionPool(ConnectionPool, RequestMethods):
+ """
+ Thread-safe connection pool for one host.
+
+ :param host:
+ Host used for this HTTP Connection (e.g. "localhost"), passed into
+ :class:`http.client.HTTPConnection`.
+
+ :param port:
+ Port used for this HTTP Connection (None is equivalent to 80), passed
+ into :class:`http.client.HTTPConnection`.
+
+ :param strict:
+ Causes BadStatusLine to be raised if the status line can't be parsed
+ as a valid HTTP/1.0 or 1.1 status line, passed into
+ :class:`http.client.HTTPConnection`.
+
+ .. note::
+ Only works in Python 2. This parameter is ignored in Python 3.
+
+ :param timeout:
+ Socket timeout in seconds for each individual connection. This can
+ be a float or integer, which sets the timeout for the HTTP request,
+ or an instance of :class:`urllib3.util.Timeout` which gives you more
+ fine-grained control over request timeouts. After the constructor has
+ been parsed, this is always a `urllib3.util.Timeout` object.
+
+ :param maxsize:
+ Number of connections to save that can be reused. More than 1 is useful
+ in multithreaded situations. If ``block`` is set to False, more
+ connections will be created but they will not be saved once they've
+ been used.
+
+ :param block:
+ If set to True, no more than ``maxsize`` connections will be used at
+ a time. When no free connections are available, the call will block
+ until a connection has been released. This is a useful side effect for
+ particular multithreaded situations where one does not want to use more
+ than maxsize connections per host to prevent flooding.
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+
+ :param retries:
+ Retry configuration to use by default with requests in this pool.
+
+ :param _proxy:
+ Parsed proxy URL, should not be used directly, instead, see
+ :class:`urllib3.ProxyManager`
+
+ :param _proxy_headers:
+ A dictionary with proxy headers, should not be used directly,
+ instead, see :class:`urllib3.ProxyManager`
+
+ :param \\**conn_kw:
+ Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
+ :class:`urllib3.connection.HTTPSConnection` instances.
+ """
+
+ scheme = "http"
+ ConnectionCls = HTTPConnection
+ ResponseCls = HTTPResponse
+
+ def __init__(
+ self,
+ host,
+ port=None,
+ strict=False,
+ timeout=Timeout.DEFAULT_TIMEOUT,
+ maxsize=1,
+ block=False,
+ headers=None,
+ retries=None,
+ _proxy=None,
+ _proxy_headers=None,
+ _proxy_config=None,
+ **conn_kw
+ ):
+ ConnectionPool.__init__(self, host, port)
+ RequestMethods.__init__(self, headers)
+
+ self.strict = strict
+
+ if not isinstance(timeout, Timeout):
+ timeout = Timeout.from_float(timeout)
+
+ if retries is None:
+ retries = Retry.DEFAULT
+
+ self.timeout = timeout
+ self.retries = retries
+
+ self.pool = self.QueueCls(maxsize)
+ self.block = block
+
+ self.proxy = _proxy
+ self.proxy_headers = _proxy_headers or {}
+ self.proxy_config = _proxy_config
+
+ # Fill the queue up so that doing get() on it will block properly
+ for _ in xrange(maxsize):
+ self.pool.put(None)
+
+ # These are mostly for testing and debugging purposes.
+ self.num_connections = 0
+ self.num_requests = 0
+ self.conn_kw = conn_kw
+
+ if self.proxy:
+ # Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
+ # We cannot know if the user has added default socket options, so we cannot replace the
+ # list.
+ self.conn_kw.setdefault("socket_options", [])
+
+ self.conn_kw["proxy"] = self.proxy
+ self.conn_kw["proxy_config"] = self.proxy_config
+
+ def _new_conn(self):
+ """
+ Return a fresh :class:`HTTPConnection`.
+ """
+ self.num_connections += 1
+ log.debug(
+ "Starting new HTTP connection (%d): %s:%s",
+ self.num_connections,
+ self.host,
+ self.port or "80",
+ )
+
+ conn = self.ConnectionCls(
+ host=self.host,
+ port=self.port,
+ timeout=self.timeout.connect_timeout,
+ strict=self.strict,
+ **self.conn_kw
+ )
+ return conn
+
+ def _get_conn(self, timeout=None):
+ """
+ Get a connection. Will return a pooled connection if one is available.
+
+ If no connections are available and :prop:`.block` is ``False``, then a
+ fresh connection is returned.
+
+ :param timeout:
+ Seconds to wait before giving up and raising
+ :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
+ :prop:`.block` is ``True``.
+ """
+ conn = None
+ try:
+ conn = self.pool.get(block=self.block, timeout=timeout)
+
+ except AttributeError: # self.pool is None
+ raise ClosedPoolError(self, "Pool is closed.")
+
+ except queue.Empty:
+ if self.block:
+ raise EmptyPoolError(
+ self,
+ "Pool reached maximum size and no more connections are allowed.",
+ )
+ pass # Oh well, we'll create a new connection then
+
+ # If this is a persistent connection, check if it got disconnected
+ if conn and is_connection_dropped(conn):
+ log.debug("Resetting dropped connection: %s", self.host)
+ conn.close()
+ if getattr(conn, "auto_open", 1) == 0:
+ # This is a proxied connection that has been mutated by
+ # http.client._tunnel() and cannot be reused (since it would
+ # attempt to bypass the proxy)
+ conn = None
+
+ return conn or self._new_conn()
+
+ def _put_conn(self, conn):
+ """
+ Put a connection back into the pool.
+
+ :param conn:
+ Connection object for the current host and port as returned by
+ :meth:`._new_conn` or :meth:`._get_conn`.
+
+ If the pool is already full, the connection is closed and discarded
+ because we exceeded maxsize. If connections are discarded frequently,
+ then maxsize should be increased.
+
+ If the pool is closed, then the connection will be closed and discarded.
+ """
+ try:
+ self.pool.put(conn, block=False)
+ return # Everything is dandy, done.
+ except AttributeError:
+ # self.pool is None.
+ pass
+ except queue.Full:
+ # This should never happen if self.block == True
+ log.warning("Connection pool is full, discarding connection: %s", self.host)
+
+ # Connection never got put back into the pool, close it.
+ if conn:
+ conn.close()
+
+ def _validate_conn(self, conn):
+ """
+ Called right before a request is made, after the socket is created.
+ """
+ pass
+
+ def _prepare_proxy(self, conn):
+ # Nothing to do for HTTP connections.
+ pass
+
+ def _get_timeout(self, timeout):
+ """ Helper that always returns a :class:`urllib3.util.Timeout` """
+ if timeout is _Default:
+ return self.timeout.clone()
+
+ if isinstance(timeout, Timeout):
+ return timeout.clone()
+ else:
+ # User passed us an int/float. This is for backwards compatibility,
+ # can be removed later
+ return Timeout.from_float(timeout)
+
+ def _raise_timeout(self, err, url, timeout_value):
+ """Is the error actually a timeout? Will raise a ReadTimeout or pass"""
+
+ if isinstance(err, SocketTimeout):
+ raise ReadTimeoutError(
+ self, url, "Read timed out. (read timeout=%s)" % timeout_value
+ )
+
+ # See the above comment about EAGAIN in Python 3. In Python 2 we have
+ # to specifically catch it and throw the timeout error
+ if hasattr(err, "errno") and err.errno in _blocking_errnos:
+ raise ReadTimeoutError(
+ self, url, "Read timed out. (read timeout=%s)" % timeout_value
+ )
+
+ # Catch possible read timeouts thrown as SSL errors. If not the
+ # case, rethrow the original. We need to do this because of:
+ # http://bugs.python.org/issue10272
+ if "timed out" in str(err) or "did not complete (read)" in str(
+ err
+ ): # Python < 2.7.4
+ raise ReadTimeoutError(
+ self, url, "Read timed out. (read timeout=%s)" % timeout_value
+ )
+
+ def _make_request(
+ self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw
+ ):
+ """
+ Perform a request on a given urllib connection object taken from our
+ pool.
+
+ :param conn:
+ a connection from one of our connection pools
+
+ :param timeout:
+ Socket timeout in seconds for the request. This can be a
+ float or integer, which will set the same timeout value for
+ the socket connect and the socket read, or an instance of
+ :class:`urllib3.util.Timeout`, which gives you more fine-grained
+ control over your timeouts.
+ """
+ self.num_requests += 1
+
+ timeout_obj = self._get_timeout(timeout)
+ timeout_obj.start_connect()
+ conn.timeout = timeout_obj.connect_timeout
+
+ # Trigger any extra validation we need to do.
+ try:
+ self._validate_conn(conn)
+ except (SocketTimeout, BaseSSLError) as e:
+ # Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
+ self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
+ raise
+
+ # conn.request() calls http.client.*.request, not the method in
+ # urllib3.request. It also calls makefile (recv) on the socket.
+ try:
+ if chunked:
+ conn.request_chunked(method, url, **httplib_request_kw)
+ else:
+ conn.request(method, url, **httplib_request_kw)
+
+ # We are swallowing BrokenPipeError (errno.EPIPE) since the server is
+ # legitimately able to close the connection after sending a valid response.
+ # With this behaviour, the received response is still readable.
+ except BrokenPipeError:
+ # Python 3
+ pass
+ except IOError as e:
+ # Python 2 and macOS/Linux
+ # EPIPE and ESHUTDOWN are BrokenPipeError on Python 2, and EPROTOTYPE is needed on macOS
+ # https://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
+ if e.errno not in {
+ errno.EPIPE,
+ errno.ESHUTDOWN,
+ errno.EPROTOTYPE,
+ }:
+ raise
+
+ # Reset the timeout for the recv() on the socket
+ read_timeout = timeout_obj.read_timeout
+
+ # App Engine doesn't have a sock attr
+ if getattr(conn, "sock", None):
+ # In Python 3 socket.py will catch EAGAIN and return None when you
+ # try and read into the file pointer created by http.client, which
+ # instead raises a BadStatusLine exception. Instead of catching
+ # the exception and assuming all BadStatusLine exceptions are read
+ # timeouts, check for a zero timeout before making the request.
+ if read_timeout == 0:
+ raise ReadTimeoutError(
+ self, url, "Read timed out. (read timeout=%s)" % read_timeout
+ )
+ if read_timeout is Timeout.DEFAULT_TIMEOUT:
+ conn.sock.settimeout(socket.getdefaulttimeout())
+ else: # None or a value
+ conn.sock.settimeout(read_timeout)
+
+ # Receive the response from the server
+ try:
+ try:
+ # Python 2.7, use buffering of HTTP responses
+ httplib_response = conn.getresponse(buffering=True)
+ except TypeError:
+ # Python 3
+ try:
+ httplib_response = conn.getresponse()
+ except BaseException as e:
+ # Remove the TypeError from the exception chain in
+ # Python 3 (including for exceptions like SystemExit).
+ # Otherwise it looks like a bug in the code.
+ six.raise_from(e, None)
+ except (SocketTimeout, BaseSSLError, SocketError) as e:
+ self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
+ raise
+
+ # AppEngine doesn't have a version attr.
+ http_version = getattr(conn, "_http_vsn_str", "HTTP/?")
+ log.debug(
+ '%s://%s:%s "%s %s %s" %s %s',
+ self.scheme,
+ self.host,
+ self.port,
+ method,
+ url,
+ http_version,
+ httplib_response.status,
+ httplib_response.length,
+ )
+
+ try:
+ assert_header_parsing(httplib_response.msg)
+ except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3
+ log.warning(
+ "Failed to parse headers (url=%s): %s",
+ self._absolute_url(url),
+ hpe,
+ exc_info=True,
+ )
+
+ return httplib_response
+
+ def _absolute_url(self, path):
+ return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
+
+ def close(self):
+ """
+ Close all pooled connections and disable the pool.
+ """
+ if self.pool is None:
+ return
+ # Disable access to the pool
+ old_pool, self.pool = self.pool, None
+
+ try:
+ while True:
+ conn = old_pool.get(block=False)
+ if conn:
+ conn.close()
+
+ except queue.Empty:
+ pass # Done.
+
+ def is_same_host(self, url):
+ """
+ Check if the given ``url`` is a member of the same host as this
+ connection pool.
+ """
+ if url.startswith("/"):
+ return True
+
+ # TODO: Add optional support for socket.gethostbyname checking.
+ scheme, host, port = get_host(url)
+ if host is not None:
+ host = _normalize_host(host, scheme=scheme)
+
+ # Use explicit default port for comparison when none is given
+ if self.port and not port:
+ port = port_by_scheme.get(scheme)
+ elif not self.port and port == port_by_scheme.get(scheme):
+ port = None
+
+ return (scheme, host, port) == (self.scheme, self.host, self.port)
+
+ def urlopen(
+ self,
+ method,
+ url,
+ body=None,
+ headers=None,
+ retries=None,
+ redirect=True,
+ assert_same_host=True,
+ timeout=_Default,
+ pool_timeout=None,
+ release_conn=None,
+ chunked=False,
+ body_pos=None,
+ **response_kw
+ ):
+ """
+ Get a connection from the pool and perform an HTTP request. This is the
+ lowest level call for making a request, so you'll need to specify all
+ the raw details.
+
+ .. note::
+
+ More commonly, it's appropriate to use a convenience method provided
+ by :class:`.RequestMethods`, such as :meth:`request`.
+
+ .. note::
+
+ `release_conn` will only behave as expected if
+ `preload_content=False` because we want to make
+ `preload_content=False` the default behaviour someday soon without
+ breaking backwards compatibility.
+
+ :param method:
+ HTTP request method (such as GET, POST, PUT, etc.)
+
+ :param url:
+ The URL to perform the request on.
+
+ :param body:
+ Data to send in the request body, either :class:`str`, :class:`bytes`,
+ an iterable of :class:`str`/:class:`bytes`, or a file-like object.
+
+ :param headers:
+ Dictionary of custom headers to send, such as User-Agent,
+ If-None-Match, etc. If None, pool headers are used. If provided,
+ these headers completely replace any pool-specific headers.
+
+ :param retries:
+ Configure the number of retries to allow before raising a
+ :class:`~urllib3.exceptions.MaxRetryError` exception.
+
+ Pass ``None`` to retry until you receive a response. Pass a
+ :class:`~urllib3.util.retry.Retry` object for fine-grained control
+ over different types of retries.
+ Pass an integer number to retry connection errors that many times,
+ but no other types of errors. Pass zero to never retry.
+
+ If ``False``, then retries are disabled and any exception is raised
+ immediately. Also, instead of raising a MaxRetryError on redirects,
+ the redirect response will be returned.
+
+ :type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
+
+ :param redirect:
+ If True, automatically handle redirects (status codes 301, 302,
+ 303, 307, 308). Each redirect counts as a retry. Disabling retries
+ will disable redirect, too.
+
+ :param assert_same_host:
+ If ``True``, will make sure that the host of the pool requests is
+ consistent else will raise HostChangedError. When ``False``, you can
+ use the pool on an HTTP proxy and request foreign hosts.
+
+ :param timeout:
+ If specified, overrides the default timeout for this one
+ request. It may be a float (in seconds) or an instance of
+ :class:`urllib3.util.Timeout`.
+
+ :param pool_timeout:
+ If set and the pool is set to block=True, then this method will
+ block for ``pool_timeout`` seconds and raise EmptyPoolError if no
+ connection is available within the time period.
+
+ :param release_conn:
+ If False, then the urlopen call will not release the connection
+ back into the pool once a response is received (but will release if
+ you read the entire contents of the response such as when
+ `preload_content=True`). This is useful if you're not preloading
+ the response's content immediately. You will need to call
+ ``r.release_conn()`` on the response ``r`` to return the connection
+ back into the pool. If None, it takes the value of
+ ``response_kw.get('preload_content', True)``.
+
+ :param chunked:
+ If True, urllib3 will send the body using chunked transfer
+ encoding. Otherwise, urllib3 will send the body using the standard
+ content-length form. Defaults to False.
+
+ :param int body_pos:
+ Position to seek to in file-like body in the event of a retry or
+ redirect. Typically this won't need to be set because urllib3 will
+ auto-populate the value when needed.
+
+ :param \\**response_kw:
+ Additional parameters are passed to
+ :meth:`urllib3.response.HTTPResponse.from_httplib`
+ """
+
+ parsed_url = parse_url(url)
+ destination_scheme = parsed_url.scheme
+
+ if headers is None:
+ headers = self.headers
+
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
+
+ if release_conn is None:
+ release_conn = response_kw.get("preload_content", True)
+
+ # Check host
+ if assert_same_host and not self.is_same_host(url):
+ raise HostChangedError(self, url, retries)
+
+ # Ensure that the URL we're connecting to is properly encoded
+ if url.startswith("/"):
+ url = six.ensure_str(_encode_target(url))
+ else:
+ url = six.ensure_str(parsed_url.url)
+
+ conn = None
+
+ # Track whether `conn` needs to be released before
+ # returning/raising/recursing. Update this variable if necessary, and
+ # leave `release_conn` constant throughout the function. That way, if
+ # the function recurses, the original value of `release_conn` will be
+ # passed down into the recursive call, and its value will be respected.
+ #
+ # See issue #651 [1] for details.
+ #
+ # [1] <https://github.com/urllib3/urllib3/issues/651>
+ release_this_conn = release_conn
+
+ http_tunnel_required = connection_requires_http_tunnel(
+ self.proxy, self.proxy_config, destination_scheme
+ )
+
+ # Merge the proxy headers. Only done when not using HTTP CONNECT. We
+ # have to copy the headers dict so we can safely change it without those
+ # changes being reflected in anyone else's copy.
+ if not http_tunnel_required:
+ headers = headers.copy()
+ headers.update(self.proxy_headers)
+
+ # Must keep the exception bound to a separate variable or else Python 3
+ # complains about UnboundLocalError.
+ err = None
+
+ # Keep track of whether we cleanly exited the except block. This
+ # ensures we do proper cleanup in finally.
+ clean_exit = False
+
+ # Rewind body position, if needed. Record current position
+ # for future rewinds in the event of a redirect/retry.
+ body_pos = set_file_position(body, body_pos)
+
+ try:
+ # Request a connection from the queue.
+ timeout_obj = self._get_timeout(timeout)
+ conn = self._get_conn(timeout=pool_timeout)
+
+ conn.timeout = timeout_obj.connect_timeout
+
+ is_new_proxy_conn = self.proxy is not None and not getattr(
+ conn, "sock", None
+ )
+ if is_new_proxy_conn and http_tunnel_required:
+ self._prepare_proxy(conn)
+
+ # Make the request on the httplib connection object.
+ httplib_response = self._make_request(
+ conn,
+ method,
+ url,
+ timeout=timeout_obj,
+ body=body,
+ headers=headers,
+ chunked=chunked,
+ )
+
+ # If we're going to release the connection in ``finally:``, then
+ # the response doesn't need to know about the connection. Otherwise
+ # it will also try to release it and we'll have a double-release
+ # mess.
+ response_conn = conn if not release_conn else None
+
+ # Pass method to Response for length checking
+ response_kw["request_method"] = method
+
+ # Import httplib's response into our own wrapper object
+ response = self.ResponseCls.from_httplib(
+ httplib_response,
+ pool=self,
+ connection=response_conn,
+ retries=retries,
+ **response_kw
+ )
+
+ # Everything went great!
+ clean_exit = True
+
+ except EmptyPoolError:
+ # Didn't get a connection from the pool, no need to clean up
+ clean_exit = True
+ release_this_conn = False
+ raise
+
+ except (
+ TimeoutError,
+ HTTPException,
+ SocketError,
+ ProtocolError,
+ BaseSSLError,
+ SSLError,
+ CertificateError,
+ ) as e:
+ # Discard the connection for these exceptions. It will be
+ # replaced during the next _get_conn() call.
+ clean_exit = False
+ if isinstance(e, (BaseSSLError, CertificateError)):
+ e = SSLError(e)
+ elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
+ e = ProxyError("Cannot connect to proxy.", e)
+ elif isinstance(e, (SocketError, HTTPException)):
+ e = ProtocolError("Connection aborted.", e)
+
+ retries = retries.increment(
+ method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
+ )
+ retries.sleep()
+
+ # Keep track of the error for the retry warning.
+ err = e
+
+ finally:
+ if not clean_exit:
+ # We hit some kind of exception, handled or otherwise. We need
+ # to throw the connection away unless explicitly told not to.
+ # Close the connection, set the variable to None, and make sure
+ # we put the None back in the pool to avoid leaking it.
+ conn = conn and conn.close()
+ release_this_conn = True
+
+ if release_this_conn:
+ # Put the connection back to be reused. If the connection is
+ # expired then it will be None, which will get replaced with a
+ # fresh connection during _get_conn.
+ self._put_conn(conn)
+
+ if not conn:
+ # Try again
+ log.warning(
+ "Retrying (%r) after connection broken by '%r': %s", retries, err, url
+ )
+ return self.urlopen(
+ method,
+ url,
+ body,
+ headers,
+ retries,
+ redirect,
+ assert_same_host,
+ timeout=timeout,
+ pool_timeout=pool_timeout,
+ release_conn=release_conn,
+ chunked=chunked,
+ body_pos=body_pos,
+ **response_kw
+ )
+
+ # Handle redirect?
+ redirect_location = redirect and response.get_redirect_location()
+ if redirect_location:
+ if response.status == 303:
+ method = "GET"
+
+ try:
+ retries = retries.increment(method, url, response=response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ response.drain_conn()
+ raise
+ return response
+
+ response.drain_conn()
+ retries.sleep_for_retry(response)
+ log.debug("Redirecting %s -> %s", url, redirect_location)
+ return self.urlopen(
+ method,
+ redirect_location,
+ body,
+ headers,
+ retries=retries,
+ redirect=redirect,
+ assert_same_host=assert_same_host,
+ timeout=timeout,
+ pool_timeout=pool_timeout,
+ release_conn=release_conn,
+ chunked=chunked,
+ body_pos=body_pos,
+ **response_kw
+ )
+
+ # Check if we should retry the HTTP response.
+ has_retry_after = bool(response.getheader("Retry-After"))
+ if retries.is_retry(method, response.status, has_retry_after):
+ try:
+ retries = retries.increment(method, url, response=response, _pool=self)
+ except MaxRetryError:
+ if retries.raise_on_status:
+ response.drain_conn()
+ raise
+ return response
+
+ response.drain_conn()
+ retries.sleep(response)
+ log.debug("Retry: %s", url)
+ return self.urlopen(
+ method,
+ url,
+ body,
+ headers,
+ retries=retries,
+ redirect=redirect,
+ assert_same_host=assert_same_host,
+ timeout=timeout,
+ pool_timeout=pool_timeout,
+ release_conn=release_conn,
+ chunked=chunked,
+ body_pos=body_pos,
+ **response_kw
+ )
+
+ return response
+
+
+class HTTPSConnectionPool(HTTPConnectionPool):
+ """
+ Same as :class:`.HTTPConnectionPool`, but HTTPS.
+
+ :class:`.HTTPSConnection` uses one of ``assert_fingerprint``,
+ ``assert_hostname`` and ``host`` in this order to verify connections.
+ If ``assert_hostname`` is False, no verification is done.
+
+ The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
+ ``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
+ is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
+ the connection socket into an SSL socket.
+ """
+
+ scheme = "https"
+ ConnectionCls = HTTPSConnection
+
+ def __init__(
+ self,
+ host,
+ port=None,
+ strict=False,
+ timeout=Timeout.DEFAULT_TIMEOUT,
+ maxsize=1,
+ block=False,
+ headers=None,
+ retries=None,
+ _proxy=None,
+ _proxy_headers=None,
+ key_file=None,
+ cert_file=None,
+ cert_reqs=None,
+ key_password=None,
+ ca_certs=None,
+ ssl_version=None,
+ assert_hostname=None,
+ assert_fingerprint=None,
+ ca_cert_dir=None,
+ **conn_kw
+ ):
+
+ HTTPConnectionPool.__init__(
+ self,
+ host,
+ port,
+ strict,
+ timeout,
+ maxsize,
+ block,
+ headers,
+ retries,
+ _proxy,
+ _proxy_headers,
+ **conn_kw
+ )
+
+ self.key_file = key_file
+ self.cert_file = cert_file
+ self.cert_reqs = cert_reqs
+ self.key_password = key_password
+ self.ca_certs = ca_certs
+ self.ca_cert_dir = ca_cert_dir
+ self.ssl_version = ssl_version
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+
+ def _prepare_conn(self, conn):
+ """
+ Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
+ and establish the tunnel if proxy is used.
+ """
+
+ if isinstance(conn, VerifiedHTTPSConnection):
+ conn.set_cert(
+ key_file=self.key_file,
+ key_password=self.key_password,
+ cert_file=self.cert_file,
+ cert_reqs=self.cert_reqs,
+ ca_certs=self.ca_certs,
+ ca_cert_dir=self.ca_cert_dir,
+ assert_hostname=self.assert_hostname,
+ assert_fingerprint=self.assert_fingerprint,
+ )
+ conn.ssl_version = self.ssl_version
+ return conn
+
+ def _prepare_proxy(self, conn):
+ """
+ Establishes a tunnel connection through HTTP CONNECT.
+
+ Tunnel connection is established early because otherwise httplib would
+ improperly set Host: header to proxy's IP:port.
+ """
+
+ conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers)
+
+ if self.proxy.scheme == "https":
+ conn.tls_in_tls_required = True
+
+ conn.connect()
+
+ def _new_conn(self):
+ """
+ Return a fresh :class:`http.client.HTTPSConnection`.
+ """
+ self.num_connections += 1
+ log.debug(
+ "Starting new HTTPS connection (%d): %s:%s",
+ self.num_connections,
+ self.host,
+ self.port or "443",
+ )
+
+ if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
+ raise SSLError(
+ "Can't connect to HTTPS URL because the SSL module is not available."
+ )
+
+ actual_host = self.host
+ actual_port = self.port
+ if self.proxy is not None:
+ actual_host = self.proxy.host
+ actual_port = self.proxy.port
+
+ conn = self.ConnectionCls(
+ host=actual_host,
+ port=actual_port,
+ timeout=self.timeout.connect_timeout,
+ strict=self.strict,
+ cert_file=self.cert_file,
+ key_file=self.key_file,
+ key_password=self.key_password,
+ **self.conn_kw
+ )
+
+ return self._prepare_conn(conn)
+
+ def _validate_conn(self, conn):
+ """
+ Called right before a request is made, after the socket is created.
+ """
+ super(HTTPSConnectionPool, self)._validate_conn(conn)
+
+ # Force connect early to allow us to validate the connection.
+ if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
+ conn.connect()
+
+ if not conn.is_verified:
+ warnings.warn(
+ (
+ "Unverified HTTPS request is being made to host '%s'. "
+ "Adding certificate verification is strongly advised. See: "
+ "https://urllib3.readthedocs.io/en/latest/advanced-usage.html"
+ "#ssl-warnings" % conn.host
+ ),
+ InsecureRequestWarning,
+ )
+
+
+def connection_from_url(url, **kw):
+ """
+ Given a url, return an :class:`.ConnectionPool` instance of its host.
+
+ This is a shortcut for not having to parse out the scheme, host, and port
+ of the url before creating an :class:`.ConnectionPool` instance.
+
+ :param url:
+ Absolute URL string that must include the scheme. Port is optional.
+
+ :param \\**kw:
+ Passes additional parameters to the constructor of the appropriate
+ :class:`.ConnectionPool`. Useful for specifying things like
+ timeout, maxsize, headers, etc.
+
+ Example::
+
+ >>> conn = connection_from_url('http://google.com/')
+ >>> r = conn.request('GET', '/')
+ """
+ scheme, host, port = get_host(url)
+ port = port or port_by_scheme.get(scheme, 80)
+ if scheme == "https":
+ return HTTPSConnectionPool(host, port=port, **kw)
+ else:
+ return HTTPConnectionPool(host, port=port, **kw)
+
+
+def _normalize_host(host, scheme):
+ """
+ Normalize hosts for comparisons and use with sockets.
+ """
+
+ host = normalize_host(host, scheme)
+
+ # httplib doesn't like it when we include brackets in IPv6 addresses
+ # Specifically, if we include brackets but also pass the port then
+ # httplib crazily doubles up the square brackets on the Host header.
+ # Instead, we need to make sure we never pass ``None`` as the port.
+ # However, for backward compatibility reasons we can't actually
+ # *assert* that. See http://bugs.python.org/issue28539
+ if host.startswith("[") and host.endswith("]"):
+ host = host[1:-1]
+ return host
diff --git a/third_party/python/urllib3/urllib3/contrib/__init__.py b/third_party/python/urllib3/urllib3/contrib/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/contrib/__init__.py
diff --git a/third_party/python/urllib3/urllib3/contrib/_appengine_environ.py b/third_party/python/urllib3/urllib3/contrib/_appengine_environ.py
new file mode 100644
index 0000000000..8765b907d7
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/contrib/_appengine_environ.py
@@ -0,0 +1,36 @@
+"""
+This module provides means to detect the App Engine environment.
+"""
+
+import os
+
+
+def is_appengine():
+ return is_local_appengine() or is_prod_appengine()
+
+
+def is_appengine_sandbox():
+ """Reports if the app is running in the first generation sandbox.
+
+ The second generation runtimes are technically still in a sandbox, but it
+ is much less restrictive, so generally you shouldn't need to check for it.
+ see https://cloud.google.com/appengine/docs/standard/runtimes
+ """
+ return is_appengine() and os.environ["APPENGINE_RUNTIME"] == "python27"
+
+
+def is_local_appengine():
+ return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
+ "SERVER_SOFTWARE", ""
+ ).startswith("Development/")
+
+
+def is_prod_appengine():
+ return "APPENGINE_RUNTIME" in os.environ and os.environ.get(
+ "SERVER_SOFTWARE", ""
+ ).startswith("Google App Engine/")
+
+
+def is_prod_appengine_mvms():
+ """Deprecated."""
+ return False
diff --git a/third_party/python/urllib3/urllib3/contrib/_securetransport/__init__.py b/third_party/python/urllib3/urllib3/contrib/_securetransport/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/contrib/_securetransport/__init__.py
diff --git a/third_party/python/urllib3/urllib3/contrib/_securetransport/bindings.py b/third_party/python/urllib3/urllib3/contrib/_securetransport/bindings.py
new file mode 100644
index 0000000000..11524d400b
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/contrib/_securetransport/bindings.py
@@ -0,0 +1,519 @@
+"""
+This module uses ctypes to bind a whole bunch of functions and constants from
+SecureTransport. The goal here is to provide the low-level API to
+SecureTransport. These are essentially the C-level functions and constants, and
+they're pretty gross to work with.
+
+This code is a bastardised version of the code found in Will Bond's oscrypto
+library. An enormous debt is owed to him for blazing this trail for us. For
+that reason, this code should be considered to be covered both by urllib3's
+license and by oscrypto's:
+
+ Copyright (c) 2015-2016 Will Bond <will@wbond.net>
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+"""
+from __future__ import absolute_import
+
+import platform
+from ctypes import (
+ CDLL,
+ CFUNCTYPE,
+ POINTER,
+ c_bool,
+ c_byte,
+ c_char_p,
+ c_int32,
+ c_long,
+ c_size_t,
+ c_uint32,
+ c_ulong,
+ c_void_p,
+)
+from ctypes.util import find_library
+
+from urllib3.packages.six import raise_from
+
+if platform.system() != "Darwin":
+ raise ImportError("Only macOS is supported")
+
+version = platform.mac_ver()[0]
+version_info = tuple(map(int, version.split(".")))
+if version_info < (10, 8):
+ raise OSError(
+ "Only OS X 10.8 and newer are supported, not %s.%s"
+ % (version_info[0], version_info[1])
+ )
+
+
+def load_cdll(name, macos10_16_path):
+ """Loads a CDLL by name, falling back to known path on 10.16+"""
+ try:
+ # Big Sur is technically 11 but we use 10.16 due to the Big Sur
+ # beta being labeled as 10.16.
+ if version_info >= (10, 16):
+ path = macos10_16_path
+ else:
+ path = find_library(name)
+ if not path:
+ raise OSError # Caught and reraised as 'ImportError'
+ return CDLL(path, use_errno=True)
+ except OSError:
+ raise_from(ImportError("The library %s failed to load" % name), None)
+
+
+Security = load_cdll(
+ "Security", "/System/Library/Frameworks/Security.framework/Security"
+)
+CoreFoundation = load_cdll(
+ "CoreFoundation",
+ "/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation",
+)
+
+
+Boolean = c_bool
+CFIndex = c_long
+CFStringEncoding = c_uint32
+CFData = c_void_p
+CFString = c_void_p
+CFArray = c_void_p
+CFMutableArray = c_void_p
+CFDictionary = c_void_p
+CFError = c_void_p
+CFType = c_void_p
+CFTypeID = c_ulong
+
+CFTypeRef = POINTER(CFType)
+CFAllocatorRef = c_void_p
+
+OSStatus = c_int32
+
+CFDataRef = POINTER(CFData)
+CFStringRef = POINTER(CFString)
+CFArrayRef = POINTER(CFArray)
+CFMutableArrayRef = POINTER(CFMutableArray)
+CFDictionaryRef = POINTER(CFDictionary)
+CFArrayCallBacks = c_void_p
+CFDictionaryKeyCallBacks = c_void_p
+CFDictionaryValueCallBacks = c_void_p
+
+SecCertificateRef = POINTER(c_void_p)
+SecExternalFormat = c_uint32
+SecExternalItemType = c_uint32
+SecIdentityRef = POINTER(c_void_p)
+SecItemImportExportFlags = c_uint32
+SecItemImportExportKeyParameters = c_void_p
+SecKeychainRef = POINTER(c_void_p)
+SSLProtocol = c_uint32
+SSLCipherSuite = c_uint32
+SSLContextRef = POINTER(c_void_p)
+SecTrustRef = POINTER(c_void_p)
+SSLConnectionRef = c_uint32
+SecTrustResultType = c_uint32
+SecTrustOptionFlags = c_uint32
+SSLProtocolSide = c_uint32
+SSLConnectionType = c_uint32
+SSLSessionOption = c_uint32
+
+
+try:
+ Security.SecItemImport.argtypes = [
+ CFDataRef,
+ CFStringRef,
+ POINTER(SecExternalFormat),
+ POINTER(SecExternalItemType),
+ SecItemImportExportFlags,
+ POINTER(SecItemImportExportKeyParameters),
+ SecKeychainRef,
+ POINTER(CFArrayRef),
+ ]
+ Security.SecItemImport.restype = OSStatus
+
+ Security.SecCertificateGetTypeID.argtypes = []
+ Security.SecCertificateGetTypeID.restype = CFTypeID
+
+ Security.SecIdentityGetTypeID.argtypes = []
+ Security.SecIdentityGetTypeID.restype = CFTypeID
+
+ Security.SecKeyGetTypeID.argtypes = []
+ Security.SecKeyGetTypeID.restype = CFTypeID
+
+ Security.SecCertificateCreateWithData.argtypes = [CFAllocatorRef, CFDataRef]
+ Security.SecCertificateCreateWithData.restype = SecCertificateRef
+
+ Security.SecCertificateCopyData.argtypes = [SecCertificateRef]
+ Security.SecCertificateCopyData.restype = CFDataRef
+
+ Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p]
+ Security.SecCopyErrorMessageString.restype = CFStringRef
+
+ Security.SecIdentityCreateWithCertificate.argtypes = [
+ CFTypeRef,
+ SecCertificateRef,
+ POINTER(SecIdentityRef),
+ ]
+ Security.SecIdentityCreateWithCertificate.restype = OSStatus
+
+ Security.SecKeychainCreate.argtypes = [
+ c_char_p,
+ c_uint32,
+ c_void_p,
+ Boolean,
+ c_void_p,
+ POINTER(SecKeychainRef),
+ ]
+ Security.SecKeychainCreate.restype = OSStatus
+
+ Security.SecKeychainDelete.argtypes = [SecKeychainRef]
+ Security.SecKeychainDelete.restype = OSStatus
+
+ Security.SecPKCS12Import.argtypes = [
+ CFDataRef,
+ CFDictionaryRef,
+ POINTER(CFArrayRef),
+ ]
+ Security.SecPKCS12Import.restype = OSStatus
+
+ SSLReadFunc = CFUNCTYPE(OSStatus, SSLConnectionRef, c_void_p, POINTER(c_size_t))
+ SSLWriteFunc = CFUNCTYPE(
+ OSStatus, SSLConnectionRef, POINTER(c_byte), POINTER(c_size_t)
+ )
+
+ Security.SSLSetIOFuncs.argtypes = [SSLContextRef, SSLReadFunc, SSLWriteFunc]
+ Security.SSLSetIOFuncs.restype = OSStatus
+
+ Security.SSLSetPeerID.argtypes = [SSLContextRef, c_char_p, c_size_t]
+ Security.SSLSetPeerID.restype = OSStatus
+
+ Security.SSLSetCertificate.argtypes = [SSLContextRef, CFArrayRef]
+ Security.SSLSetCertificate.restype = OSStatus
+
+ Security.SSLSetCertificateAuthorities.argtypes = [SSLContextRef, CFTypeRef, Boolean]
+ Security.SSLSetCertificateAuthorities.restype = OSStatus
+
+ Security.SSLSetConnection.argtypes = [SSLContextRef, SSLConnectionRef]
+ Security.SSLSetConnection.restype = OSStatus
+
+ Security.SSLSetPeerDomainName.argtypes = [SSLContextRef, c_char_p, c_size_t]
+ Security.SSLSetPeerDomainName.restype = OSStatus
+
+ Security.SSLHandshake.argtypes = [SSLContextRef]
+ Security.SSLHandshake.restype = OSStatus
+
+ Security.SSLRead.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)]
+ Security.SSLRead.restype = OSStatus
+
+ Security.SSLWrite.argtypes = [SSLContextRef, c_char_p, c_size_t, POINTER(c_size_t)]
+ Security.SSLWrite.restype = OSStatus
+
+ Security.SSLClose.argtypes = [SSLContextRef]
+ Security.SSLClose.restype = OSStatus
+
+ Security.SSLGetNumberSupportedCiphers.argtypes = [SSLContextRef, POINTER(c_size_t)]
+ Security.SSLGetNumberSupportedCiphers.restype = OSStatus
+
+ Security.SSLGetSupportedCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ POINTER(c_size_t),
+ ]
+ Security.SSLGetSupportedCiphers.restype = OSStatus
+
+ Security.SSLSetEnabledCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ c_size_t,
+ ]
+ Security.SSLSetEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetNumberEnabledCiphers.argtype = [SSLContextRef, POINTER(c_size_t)]
+ Security.SSLGetNumberEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetEnabledCiphers.argtypes = [
+ SSLContextRef,
+ POINTER(SSLCipherSuite),
+ POINTER(c_size_t),
+ ]
+ Security.SSLGetEnabledCiphers.restype = OSStatus
+
+ Security.SSLGetNegotiatedCipher.argtypes = [SSLContextRef, POINTER(SSLCipherSuite)]
+ Security.SSLGetNegotiatedCipher.restype = OSStatus
+
+ Security.SSLGetNegotiatedProtocolVersion.argtypes = [
+ SSLContextRef,
+ POINTER(SSLProtocol),
+ ]
+ Security.SSLGetNegotiatedProtocolVersion.restype = OSStatus
+
+ Security.SSLCopyPeerTrust.argtypes = [SSLContextRef, POINTER(SecTrustRef)]
+ Security.SSLCopyPeerTrust.restype = OSStatus
+
+ Security.SecTrustSetAnchorCertificates.argtypes = [SecTrustRef, CFArrayRef]
+ Security.SecTrustSetAnchorCertificates.restype = OSStatus
+
+ Security.SecTrustSetAnchorCertificatesOnly.argstypes = [SecTrustRef, Boolean]
+ Security.SecTrustSetAnchorCertificatesOnly.restype = OSStatus
+
+ Security.SecTrustEvaluate.argtypes = [SecTrustRef, POINTER(SecTrustResultType)]
+ Security.SecTrustEvaluate.restype = OSStatus
+
+ Security.SecTrustGetCertificateCount.argtypes = [SecTrustRef]
+ Security.SecTrustGetCertificateCount.restype = CFIndex
+
+ Security.SecTrustGetCertificateAtIndex.argtypes = [SecTrustRef, CFIndex]
+ Security.SecTrustGetCertificateAtIndex.restype = SecCertificateRef
+
+ Security.SSLCreateContext.argtypes = [
+ CFAllocatorRef,
+ SSLProtocolSide,
+ SSLConnectionType,
+ ]
+ Security.SSLCreateContext.restype = SSLContextRef
+
+ Security.SSLSetSessionOption.argtypes = [SSLContextRef, SSLSessionOption, Boolean]
+ Security.SSLSetSessionOption.restype = OSStatus
+
+ Security.SSLSetProtocolVersionMin.argtypes = [SSLContextRef, SSLProtocol]
+ Security.SSLSetProtocolVersionMin.restype = OSStatus
+
+ Security.SSLSetProtocolVersionMax.argtypes = [SSLContextRef, SSLProtocol]
+ Security.SSLSetProtocolVersionMax.restype = OSStatus
+
+ try:
+ Security.SSLSetALPNProtocols.argtypes = [SSLContextRef, CFArrayRef]
+ Security.SSLSetALPNProtocols.restype = OSStatus
+ except AttributeError:
+ # Supported only in 10.12+
+ pass
+
+ Security.SecCopyErrorMessageString.argtypes = [OSStatus, c_void_p]
+ Security.SecCopyErrorMessageString.restype = CFStringRef
+
+ Security.SSLReadFunc = SSLReadFunc
+ Security.SSLWriteFunc = SSLWriteFunc
+ Security.SSLContextRef = SSLContextRef
+ Security.SSLProtocol = SSLProtocol
+ Security.SSLCipherSuite = SSLCipherSuite
+ Security.SecIdentityRef = SecIdentityRef
+ Security.SecKeychainRef = SecKeychainRef
+ Security.SecTrustRef = SecTrustRef
+ Security.SecTrustResultType = SecTrustResultType
+ Security.SecExternalFormat = SecExternalFormat
+ Security.OSStatus = OSStatus
+
+ Security.kSecImportExportPassphrase = CFStringRef.in_dll(
+ Security, "kSecImportExportPassphrase"
+ )
+ Security.kSecImportItemIdentity = CFStringRef.in_dll(
+ Security, "kSecImportItemIdentity"
+ )
+
+ # CoreFoundation time!
+ CoreFoundation.CFRetain.argtypes = [CFTypeRef]
+ CoreFoundation.CFRetain.restype = CFTypeRef
+
+ CoreFoundation.CFRelease.argtypes = [CFTypeRef]
+ CoreFoundation.CFRelease.restype = None
+
+ CoreFoundation.CFGetTypeID.argtypes = [CFTypeRef]
+ CoreFoundation.CFGetTypeID.restype = CFTypeID
+
+ CoreFoundation.CFStringCreateWithCString.argtypes = [
+ CFAllocatorRef,
+ c_char_p,
+ CFStringEncoding,
+ ]
+ CoreFoundation.CFStringCreateWithCString.restype = CFStringRef
+
+ CoreFoundation.CFStringGetCStringPtr.argtypes = [CFStringRef, CFStringEncoding]
+ CoreFoundation.CFStringGetCStringPtr.restype = c_char_p
+
+ CoreFoundation.CFStringGetCString.argtypes = [
+ CFStringRef,
+ c_char_p,
+ CFIndex,
+ CFStringEncoding,
+ ]
+ CoreFoundation.CFStringGetCString.restype = c_bool
+
+ CoreFoundation.CFDataCreate.argtypes = [CFAllocatorRef, c_char_p, CFIndex]
+ CoreFoundation.CFDataCreate.restype = CFDataRef
+
+ CoreFoundation.CFDataGetLength.argtypes = [CFDataRef]
+ CoreFoundation.CFDataGetLength.restype = CFIndex
+
+ CoreFoundation.CFDataGetBytePtr.argtypes = [CFDataRef]
+ CoreFoundation.CFDataGetBytePtr.restype = c_void_p
+
+ CoreFoundation.CFDictionaryCreate.argtypes = [
+ CFAllocatorRef,
+ POINTER(CFTypeRef),
+ POINTER(CFTypeRef),
+ CFIndex,
+ CFDictionaryKeyCallBacks,
+ CFDictionaryValueCallBacks,
+ ]
+ CoreFoundation.CFDictionaryCreate.restype = CFDictionaryRef
+
+ CoreFoundation.CFDictionaryGetValue.argtypes = [CFDictionaryRef, CFTypeRef]
+ CoreFoundation.CFDictionaryGetValue.restype = CFTypeRef
+
+ CoreFoundation.CFArrayCreate.argtypes = [
+ CFAllocatorRef,
+ POINTER(CFTypeRef),
+ CFIndex,
+ CFArrayCallBacks,
+ ]
+ CoreFoundation.CFArrayCreate.restype = CFArrayRef
+
+ CoreFoundation.CFArrayCreateMutable.argtypes = [
+ CFAllocatorRef,
+ CFIndex,
+ CFArrayCallBacks,
+ ]
+ CoreFoundation.CFArrayCreateMutable.restype = CFMutableArrayRef
+
+ CoreFoundation.CFArrayAppendValue.argtypes = [CFMutableArrayRef, c_void_p]
+ CoreFoundation.CFArrayAppendValue.restype = None
+
+ CoreFoundation.CFArrayGetCount.argtypes = [CFArrayRef]
+ CoreFoundation.CFArrayGetCount.restype = CFIndex
+
+ CoreFoundation.CFArrayGetValueAtIndex.argtypes = [CFArrayRef, CFIndex]
+ CoreFoundation.CFArrayGetValueAtIndex.restype = c_void_p
+
+ CoreFoundation.kCFAllocatorDefault = CFAllocatorRef.in_dll(
+ CoreFoundation, "kCFAllocatorDefault"
+ )
+ CoreFoundation.kCFTypeArrayCallBacks = c_void_p.in_dll(
+ CoreFoundation, "kCFTypeArrayCallBacks"
+ )
+ CoreFoundation.kCFTypeDictionaryKeyCallBacks = c_void_p.in_dll(
+ CoreFoundation, "kCFTypeDictionaryKeyCallBacks"
+ )
+ CoreFoundation.kCFTypeDictionaryValueCallBacks = c_void_p.in_dll(
+ CoreFoundation, "kCFTypeDictionaryValueCallBacks"
+ )
+
+ CoreFoundation.CFTypeRef = CFTypeRef
+ CoreFoundation.CFArrayRef = CFArrayRef
+ CoreFoundation.CFStringRef = CFStringRef
+ CoreFoundation.CFDictionaryRef = CFDictionaryRef
+
+except (AttributeError):
+ raise ImportError("Error initializing ctypes")
+
+
+class CFConst(object):
+ """
+ A class object that acts as essentially a namespace for CoreFoundation
+ constants.
+ """
+
+ kCFStringEncodingUTF8 = CFStringEncoding(0x08000100)
+
+
+class SecurityConst(object):
+ """
+ A class object that acts as essentially a namespace for Security constants.
+ """
+
+ kSSLSessionOptionBreakOnServerAuth = 0
+
+ kSSLProtocol2 = 1
+ kSSLProtocol3 = 2
+ kTLSProtocol1 = 4
+ kTLSProtocol11 = 7
+ kTLSProtocol12 = 8
+ # SecureTransport does not support TLS 1.3 even if there's a constant for it
+ kTLSProtocol13 = 10
+ kTLSProtocolMaxSupported = 999
+
+ kSSLClientSide = 1
+ kSSLStreamType = 0
+
+ kSecFormatPEMSequence = 10
+
+ kSecTrustResultInvalid = 0
+ kSecTrustResultProceed = 1
+ # This gap is present on purpose: this was kSecTrustResultConfirm, which
+ # is deprecated.
+ kSecTrustResultDeny = 3
+ kSecTrustResultUnspecified = 4
+ kSecTrustResultRecoverableTrustFailure = 5
+ kSecTrustResultFatalTrustFailure = 6
+ kSecTrustResultOtherError = 7
+
+ errSSLProtocol = -9800
+ errSSLWouldBlock = -9803
+ errSSLClosedGraceful = -9805
+ errSSLClosedNoNotify = -9816
+ errSSLClosedAbort = -9806
+
+ errSSLXCertChainInvalid = -9807
+ errSSLCrypto = -9809
+ errSSLInternal = -9810
+ errSSLCertExpired = -9814
+ errSSLCertNotYetValid = -9815
+ errSSLUnknownRootCert = -9812
+ errSSLNoRootCert = -9813
+ errSSLHostNameMismatch = -9843
+ errSSLPeerHandshakeFail = -9824
+ errSSLPeerUserCancelled = -9839
+ errSSLWeakPeerEphemeralDHKey = -9850
+ errSSLServerAuthCompleted = -9841
+ errSSLRecordOverflow = -9847
+
+ errSecVerifyFailed = -67808
+ errSecNoTrustSettings = -25263
+ errSecItemNotFound = -25300
+ errSecInvalidTrustSettings = -25262
+
+ # Cipher suites. We only pick the ones our default cipher string allows.
+ # Source: https://developer.apple.com/documentation/security/1550981-ssl_cipher_suite_values
+ TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = 0xC02C
+ TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = 0xC030
+ TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = 0xC02B
+ TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = 0xC02F
+ TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA9
+ TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = 0xCCA8
+ TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = 0x009F
+ TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = 0x009E
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = 0xC024
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = 0xC028
+ TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = 0xC00A
+ TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = 0xC014
+ TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = 0x006B
+ TLS_DHE_RSA_WITH_AES_256_CBC_SHA = 0x0039
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = 0xC023
+ TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = 0xC027
+ TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = 0xC009
+ TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = 0xC013
+ TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = 0x0067
+ TLS_DHE_RSA_WITH_AES_128_CBC_SHA = 0x0033
+ TLS_RSA_WITH_AES_256_GCM_SHA384 = 0x009D
+ TLS_RSA_WITH_AES_128_GCM_SHA256 = 0x009C
+ TLS_RSA_WITH_AES_256_CBC_SHA256 = 0x003D
+ TLS_RSA_WITH_AES_128_CBC_SHA256 = 0x003C
+ TLS_RSA_WITH_AES_256_CBC_SHA = 0x0035
+ TLS_RSA_WITH_AES_128_CBC_SHA = 0x002F
+ TLS_AES_128_GCM_SHA256 = 0x1301
+ TLS_AES_256_GCM_SHA384 = 0x1302
+ TLS_AES_128_CCM_8_SHA256 = 0x1305
+ TLS_AES_128_CCM_SHA256 = 0x1304
diff --git a/third_party/python/urllib3/urllib3/contrib/_securetransport/low_level.py b/third_party/python/urllib3/urllib3/contrib/_securetransport/low_level.py
new file mode 100644
index 0000000000..ed8120190c
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/contrib/_securetransport/low_level.py
@@ -0,0 +1,396 @@
+"""
+Low-level helpers for the SecureTransport bindings.
+
+These are Python functions that are not directly related to the high-level APIs
+but are necessary to get them to work. They include a whole bunch of low-level
+CoreFoundation messing about and memory management. The concerns in this module
+are almost entirely about trying to avoid memory leaks and providing
+appropriate and useful assistance to the higher-level code.
+"""
+import base64
+import ctypes
+import itertools
+import os
+import re
+import ssl
+import struct
+import tempfile
+
+from .bindings import CFConst, CoreFoundation, Security
+
+# This regular expression is used to grab PEM data out of a PEM bundle.
+_PEM_CERTS_RE = re.compile(
+ b"-----BEGIN CERTIFICATE-----\n(.*?)\n-----END CERTIFICATE-----", re.DOTALL
+)
+
+
+def _cf_data_from_bytes(bytestring):
+ """
+ Given a bytestring, create a CFData object from it. This CFData object must
+ be CFReleased by the caller.
+ """
+ return CoreFoundation.CFDataCreate(
+ CoreFoundation.kCFAllocatorDefault, bytestring, len(bytestring)
+ )
+
+
+def _cf_dictionary_from_tuples(tuples):
+ """
+ Given a list of Python tuples, create an associated CFDictionary.
+ """
+ dictionary_size = len(tuples)
+
+ # We need to get the dictionary keys and values out in the same order.
+ keys = (t[0] for t in tuples)
+ values = (t[1] for t in tuples)
+ cf_keys = (CoreFoundation.CFTypeRef * dictionary_size)(*keys)
+ cf_values = (CoreFoundation.CFTypeRef * dictionary_size)(*values)
+
+ return CoreFoundation.CFDictionaryCreate(
+ CoreFoundation.kCFAllocatorDefault,
+ cf_keys,
+ cf_values,
+ dictionary_size,
+ CoreFoundation.kCFTypeDictionaryKeyCallBacks,
+ CoreFoundation.kCFTypeDictionaryValueCallBacks,
+ )
+
+
+def _cfstr(py_bstr):
+ """
+ Given a Python binary data, create a CFString.
+ The string must be CFReleased by the caller.
+ """
+ c_str = ctypes.c_char_p(py_bstr)
+ cf_str = CoreFoundation.CFStringCreateWithCString(
+ CoreFoundation.kCFAllocatorDefault,
+ c_str,
+ CFConst.kCFStringEncodingUTF8,
+ )
+ return cf_str
+
+
+def _create_cfstring_array(lst):
+ """
+ Given a list of Python binary data, create an associated CFMutableArray.
+ The array must be CFReleased by the caller.
+
+ Raises an ssl.SSLError on failure.
+ """
+ cf_arr = None
+ try:
+ cf_arr = CoreFoundation.CFArrayCreateMutable(
+ CoreFoundation.kCFAllocatorDefault,
+ 0,
+ ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
+ )
+ if not cf_arr:
+ raise MemoryError("Unable to allocate memory!")
+ for item in lst:
+ cf_str = _cfstr(item)
+ if not cf_str:
+ raise MemoryError("Unable to allocate memory!")
+ try:
+ CoreFoundation.CFArrayAppendValue(cf_arr, cf_str)
+ finally:
+ CoreFoundation.CFRelease(cf_str)
+ except BaseException as e:
+ if cf_arr:
+ CoreFoundation.CFRelease(cf_arr)
+ raise ssl.SSLError("Unable to allocate array: %s" % (e,))
+ return cf_arr
+
+
+def _cf_string_to_unicode(value):
+ """
+ Creates a Unicode string from a CFString object. Used entirely for error
+ reporting.
+
+ Yes, it annoys me quite a lot that this function is this complex.
+ """
+ value_as_void_p = ctypes.cast(value, ctypes.POINTER(ctypes.c_void_p))
+
+ string = CoreFoundation.CFStringGetCStringPtr(
+ value_as_void_p, CFConst.kCFStringEncodingUTF8
+ )
+ if string is None:
+ buffer = ctypes.create_string_buffer(1024)
+ result = CoreFoundation.CFStringGetCString(
+ value_as_void_p, buffer, 1024, CFConst.kCFStringEncodingUTF8
+ )
+ if not result:
+ raise OSError("Error copying C string from CFStringRef")
+ string = buffer.value
+ if string is not None:
+ string = string.decode("utf-8")
+ return string
+
+
+def _assert_no_error(error, exception_class=None):
+ """
+ Checks the return code and throws an exception if there is an error to
+ report
+ """
+ if error == 0:
+ return
+
+ cf_error_string = Security.SecCopyErrorMessageString(error, None)
+ output = _cf_string_to_unicode(cf_error_string)
+ CoreFoundation.CFRelease(cf_error_string)
+
+ if output is None or output == u"":
+ output = u"OSStatus %s" % error
+
+ if exception_class is None:
+ exception_class = ssl.SSLError
+
+ raise exception_class(output)
+
+
+def _cert_array_from_pem(pem_bundle):
+ """
+ Given a bundle of certs in PEM format, turns them into a CFArray of certs
+ that can be used to validate a cert chain.
+ """
+ # Normalize the PEM bundle's line endings.
+ pem_bundle = pem_bundle.replace(b"\r\n", b"\n")
+
+ der_certs = [
+ base64.b64decode(match.group(1)) for match in _PEM_CERTS_RE.finditer(pem_bundle)
+ ]
+ if not der_certs:
+ raise ssl.SSLError("No root certificates specified")
+
+ cert_array = CoreFoundation.CFArrayCreateMutable(
+ CoreFoundation.kCFAllocatorDefault,
+ 0,
+ ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
+ )
+ if not cert_array:
+ raise ssl.SSLError("Unable to allocate memory!")
+
+ try:
+ for der_bytes in der_certs:
+ certdata = _cf_data_from_bytes(der_bytes)
+ if not certdata:
+ raise ssl.SSLError("Unable to allocate memory!")
+ cert = Security.SecCertificateCreateWithData(
+ CoreFoundation.kCFAllocatorDefault, certdata
+ )
+ CoreFoundation.CFRelease(certdata)
+ if not cert:
+ raise ssl.SSLError("Unable to build cert object!")
+
+ CoreFoundation.CFArrayAppendValue(cert_array, cert)
+ CoreFoundation.CFRelease(cert)
+ except Exception:
+ # We need to free the array before the exception bubbles further.
+ # We only want to do that if an error occurs: otherwise, the caller
+ # should free.
+ CoreFoundation.CFRelease(cert_array)
+
+ return cert_array
+
+
+def _is_cert(item):
+ """
+ Returns True if a given CFTypeRef is a certificate.
+ """
+ expected = Security.SecCertificateGetTypeID()
+ return CoreFoundation.CFGetTypeID(item) == expected
+
+
+def _is_identity(item):
+ """
+ Returns True if a given CFTypeRef is an identity.
+ """
+ expected = Security.SecIdentityGetTypeID()
+ return CoreFoundation.CFGetTypeID(item) == expected
+
+
+def _temporary_keychain():
+ """
+ This function creates a temporary Mac keychain that we can use to work with
+ credentials. This keychain uses a one-time password and a temporary file to
+ store the data. We expect to have one keychain per socket. The returned
+ SecKeychainRef must be freed by the caller, including calling
+ SecKeychainDelete.
+
+ Returns a tuple of the SecKeychainRef and the path to the temporary
+ directory that contains it.
+ """
+ # Unfortunately, SecKeychainCreate requires a path to a keychain. This
+ # means we cannot use mkstemp to use a generic temporary file. Instead,
+ # we're going to create a temporary directory and a filename to use there.
+ # This filename will be 8 random bytes expanded into base64. We also need
+ # some random bytes to password-protect the keychain we're creating, so we
+ # ask for 40 random bytes.
+ random_bytes = os.urandom(40)
+ filename = base64.b16encode(random_bytes[:8]).decode("utf-8")
+ password = base64.b16encode(random_bytes[8:]) # Must be valid UTF-8
+ tempdirectory = tempfile.mkdtemp()
+
+ keychain_path = os.path.join(tempdirectory, filename).encode("utf-8")
+
+ # We now want to create the keychain itself.
+ keychain = Security.SecKeychainRef()
+ status = Security.SecKeychainCreate(
+ keychain_path, len(password), password, False, None, ctypes.byref(keychain)
+ )
+ _assert_no_error(status)
+
+ # Having created the keychain, we want to pass it off to the caller.
+ return keychain, tempdirectory
+
+
+def _load_items_from_file(keychain, path):
+ """
+ Given a single file, loads all the trust objects from it into arrays and
+ the keychain.
+ Returns a tuple of lists: the first list is a list of identities, the
+ second a list of certs.
+ """
+ certificates = []
+ identities = []
+ result_array = None
+
+ with open(path, "rb") as f:
+ raw_filedata = f.read()
+
+ try:
+ filedata = CoreFoundation.CFDataCreate(
+ CoreFoundation.kCFAllocatorDefault, raw_filedata, len(raw_filedata)
+ )
+ result_array = CoreFoundation.CFArrayRef()
+ result = Security.SecItemImport(
+ filedata, # cert data
+ None, # Filename, leaving it out for now
+ None, # What the type of the file is, we don't care
+ None, # what's in the file, we don't care
+ 0, # import flags
+ None, # key params, can include passphrase in the future
+ keychain, # The keychain to insert into
+ ctypes.byref(result_array), # Results
+ )
+ _assert_no_error(result)
+
+ # A CFArray is not very useful to us as an intermediary
+ # representation, so we are going to extract the objects we want
+ # and then free the array. We don't need to keep hold of keys: the
+ # keychain already has them!
+ result_count = CoreFoundation.CFArrayGetCount(result_array)
+ for index in range(result_count):
+ item = CoreFoundation.CFArrayGetValueAtIndex(result_array, index)
+ item = ctypes.cast(item, CoreFoundation.CFTypeRef)
+
+ if _is_cert(item):
+ CoreFoundation.CFRetain(item)
+ certificates.append(item)
+ elif _is_identity(item):
+ CoreFoundation.CFRetain(item)
+ identities.append(item)
+ finally:
+ if result_array:
+ CoreFoundation.CFRelease(result_array)
+
+ CoreFoundation.CFRelease(filedata)
+
+ return (identities, certificates)
+
+
+def _load_client_cert_chain(keychain, *paths):
+ """
+ Load certificates and maybe keys from a number of files. Has the end goal
+ of returning a CFArray containing one SecIdentityRef, and then zero or more
+ SecCertificateRef objects, suitable for use as a client certificate trust
+ chain.
+ """
+ # Ok, the strategy.
+ #
+ # This relies on knowing that macOS will not give you a SecIdentityRef
+ # unless you have imported a key into a keychain. This is a somewhat
+ # artificial limitation of macOS (for example, it doesn't necessarily
+ # affect iOS), but there is nothing inside Security.framework that lets you
+ # get a SecIdentityRef without having a key in a keychain.
+ #
+ # So the policy here is we take all the files and iterate them in order.
+ # Each one will use SecItemImport to have one or more objects loaded from
+ # it. We will also point at a keychain that macOS can use to work with the
+ # private key.
+ #
+ # Once we have all the objects, we'll check what we actually have. If we
+ # already have a SecIdentityRef in hand, fab: we'll use that. Otherwise,
+ # we'll take the first certificate (which we assume to be our leaf) and
+ # ask the keychain to give us a SecIdentityRef with that cert's associated
+ # key.
+ #
+ # We'll then return a CFArray containing the trust chain: one
+ # SecIdentityRef and then zero-or-more SecCertificateRef objects. The
+ # responsibility for freeing this CFArray will be with the caller. This
+ # CFArray must remain alive for the entire connection, so in practice it
+ # will be stored with a single SSLSocket, along with the reference to the
+ # keychain.
+ certificates = []
+ identities = []
+
+ # Filter out bad paths.
+ paths = (path for path in paths if path)
+
+ try:
+ for file_path in paths:
+ new_identities, new_certs = _load_items_from_file(keychain, file_path)
+ identities.extend(new_identities)
+ certificates.extend(new_certs)
+
+ # Ok, we have everything. The question is: do we have an identity? If
+ # not, we want to grab one from the first cert we have.
+ if not identities:
+ new_identity = Security.SecIdentityRef()
+ status = Security.SecIdentityCreateWithCertificate(
+ keychain, certificates[0], ctypes.byref(new_identity)
+ )
+ _assert_no_error(status)
+ identities.append(new_identity)
+
+ # We now want to release the original certificate, as we no longer
+ # need it.
+ CoreFoundation.CFRelease(certificates.pop(0))
+
+ # We now need to build a new CFArray that holds the trust chain.
+ trust_chain = CoreFoundation.CFArrayCreateMutable(
+ CoreFoundation.kCFAllocatorDefault,
+ 0,
+ ctypes.byref(CoreFoundation.kCFTypeArrayCallBacks),
+ )
+ for item in itertools.chain(identities, certificates):
+ # ArrayAppendValue does a CFRetain on the item. That's fine,
+ # because the finally block will release our other refs to them.
+ CoreFoundation.CFArrayAppendValue(trust_chain, item)
+
+ return trust_chain
+ finally:
+ for obj in itertools.chain(identities, certificates):
+ CoreFoundation.CFRelease(obj)
+
+
+TLS_PROTOCOL_VERSIONS = {
+ "SSLv2": (0, 2),
+ "SSLv3": (3, 0),
+ "TLSv1": (3, 1),
+ "TLSv1.1": (3, 2),
+ "TLSv1.2": (3, 3),
+}
+
+
+def _build_tls_unknown_ca_alert(version):
+ """
+ Builds a TLS alert record for an unknown CA.
+ """
+ ver_maj, ver_min = TLS_PROTOCOL_VERSIONS[version]
+ severity_fatal = 0x02
+ description_unknown_ca = 0x30
+ msg = struct.pack(">BB", severity_fatal, description_unknown_ca)
+ msg_len = len(msg)
+ record_type_alert = 0x15
+ record = struct.pack(">BBBH", record_type_alert, ver_maj, ver_min, msg_len) + msg
+ return record
diff --git a/third_party/python/urllib3/urllib3/contrib/appengine.py b/third_party/python/urllib3/urllib3/contrib/appengine.py
new file mode 100644
index 0000000000..aa64a0914c
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/contrib/appengine.py
@@ -0,0 +1,314 @@
+"""
+This module provides a pool manager that uses Google App Engine's
+`URLFetch Service <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
+
+Example usage::
+
+ from urllib3 import PoolManager
+ from urllib3.contrib.appengine import AppEngineManager, is_appengine_sandbox
+
+ if is_appengine_sandbox():
+ # AppEngineManager uses AppEngine's URLFetch API behind the scenes
+ http = AppEngineManager()
+ else:
+ # PoolManager uses a socket-level API behind the scenes
+ http = PoolManager()
+
+ r = http.request('GET', 'https://google.com/')
+
+There are `limitations <https://cloud.google.com/appengine/docs/python/\
+urlfetch/#Python_Quotas_and_limits>`_ to the URLFetch service and it may not be
+the best choice for your application. There are three options for using
+urllib3 on Google App Engine:
+
+1. You can use :class:`AppEngineManager` with URLFetch. URLFetch is
+ cost-effective in many circumstances as long as your usage is within the
+ limitations.
+2. You can use a normal :class:`~urllib3.PoolManager` by enabling sockets.
+ Sockets also have `limitations and restrictions
+ <https://cloud.google.com/appengine/docs/python/sockets/\
+ #limitations-and-restrictions>`_ and have a lower free quota than URLFetch.
+ To use sockets, be sure to specify the following in your ``app.yaml``::
+
+ env_variables:
+ GAE_USE_SOCKETS_HTTPLIB : 'true'
+
+3. If you are using `App Engine Flexible
+<https://cloud.google.com/appengine/docs/flexible/>`_, you can use the standard
+:class:`PoolManager` without any configuration or special environment variables.
+"""
+
+from __future__ import absolute_import
+
+import io
+import logging
+import warnings
+
+from ..exceptions import (
+ HTTPError,
+ HTTPWarning,
+ MaxRetryError,
+ ProtocolError,
+ SSLError,
+ TimeoutError,
+)
+from ..packages.six.moves.urllib.parse import urljoin
+from ..request import RequestMethods
+from ..response import HTTPResponse
+from ..util.retry import Retry
+from ..util.timeout import Timeout
+from . import _appengine_environ
+
+try:
+ from google.appengine.api import urlfetch
+except ImportError:
+ urlfetch = None
+
+
+log = logging.getLogger(__name__)
+
+
+class AppEnginePlatformWarning(HTTPWarning):
+ pass
+
+
+class AppEnginePlatformError(HTTPError):
+ pass
+
+
+class AppEngineManager(RequestMethods):
+ """
+ Connection manager for Google App Engine sandbox applications.
+
+ This manager uses the URLFetch service directly instead of using the
+ emulated httplib, and is subject to URLFetch limitations as described in
+ the App Engine documentation `here
+ <https://cloud.google.com/appengine/docs/python/urlfetch>`_.
+
+ Notably it will raise an :class:`AppEnginePlatformError` if:
+ * URLFetch is not available.
+ * If you attempt to use this on App Engine Flexible, as full socket
+ support is available.
+ * If a request size is more than 10 megabytes.
+ * If a response size is more than 32 megabytes.
+ * If you use an unsupported request method such as OPTIONS.
+
+ Beyond those cases, it will raise normal urllib3 errors.
+ """
+
+ def __init__(
+ self,
+ headers=None,
+ retries=None,
+ validate_certificate=True,
+ urlfetch_retries=True,
+ ):
+ if not urlfetch:
+ raise AppEnginePlatformError(
+ "URLFetch is not available in this environment."
+ )
+
+ warnings.warn(
+ "urllib3 is using URLFetch on Google App Engine sandbox instead "
+ "of sockets. To use sockets directly instead of URLFetch see "
+ "https://urllib3.readthedocs.io/en/latest/reference/urllib3.contrib.html.",
+ AppEnginePlatformWarning,
+ )
+
+ RequestMethods.__init__(self, headers)
+ self.validate_certificate = validate_certificate
+ self.urlfetch_retries = urlfetch_retries
+
+ self.retries = retries or Retry.DEFAULT
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def urlopen(
+ self,
+ method,
+ url,
+ body=None,
+ headers=None,
+ retries=None,
+ redirect=True,
+ timeout=Timeout.DEFAULT_TIMEOUT,
+ **response_kw
+ ):
+
+ retries = self._get_retries(retries, redirect)
+
+ try:
+ follow_redirects = redirect and retries.redirect != 0 and retries.total
+ response = urlfetch.fetch(
+ url,
+ payload=body,
+ method=method,
+ headers=headers or {},
+ allow_truncated=False,
+ follow_redirects=self.urlfetch_retries and follow_redirects,
+ deadline=self._get_absolute_timeout(timeout),
+ validate_certificate=self.validate_certificate,
+ )
+ except urlfetch.DeadlineExceededError as e:
+ raise TimeoutError(self, e)
+
+ except urlfetch.InvalidURLError as e:
+ if "too large" in str(e):
+ raise AppEnginePlatformError(
+ "URLFetch request too large, URLFetch only "
+ "supports requests up to 10mb in size.",
+ e,
+ )
+ raise ProtocolError(e)
+
+ except urlfetch.DownloadError as e:
+ if "Too many redirects" in str(e):
+ raise MaxRetryError(self, url, reason=e)
+ raise ProtocolError(e)
+
+ except urlfetch.ResponseTooLargeError as e:
+ raise AppEnginePlatformError(
+ "URLFetch response too large, URLFetch only supports"
+ "responses up to 32mb in size.",
+ e,
+ )
+
+ except urlfetch.SSLCertificateError as e:
+ raise SSLError(e)
+
+ except urlfetch.InvalidMethodError as e:
+ raise AppEnginePlatformError(
+ "URLFetch does not support method: %s" % method, e
+ )
+
+ http_response = self._urlfetch_response_to_http_response(
+ response, retries=retries, **response_kw
+ )
+
+ # Handle redirect?
+ redirect_location = redirect and http_response.get_redirect_location()
+ if redirect_location:
+ # Check for redirect response
+ if self.urlfetch_retries and retries.raise_on_redirect:
+ raise MaxRetryError(self, url, "too many redirects")
+ else:
+ if http_response.status == 303:
+ method = "GET"
+
+ try:
+ retries = retries.increment(
+ method, url, response=http_response, _pool=self
+ )
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ raise MaxRetryError(self, url, "too many redirects")
+ return http_response
+
+ retries.sleep_for_retry(http_response)
+ log.debug("Redirecting %s -> %s", url, redirect_location)
+ redirect_url = urljoin(url, redirect_location)
+ return self.urlopen(
+ method,
+ redirect_url,
+ body,
+ headers,
+ retries=retries,
+ redirect=redirect,
+ timeout=timeout,
+ **response_kw
+ )
+
+ # Check if we should retry the HTTP response.
+ has_retry_after = bool(http_response.getheader("Retry-After"))
+ if retries.is_retry(method, http_response.status, has_retry_after):
+ retries = retries.increment(method, url, response=http_response, _pool=self)
+ log.debug("Retry: %s", url)
+ retries.sleep(http_response)
+ return self.urlopen(
+ method,
+ url,
+ body=body,
+ headers=headers,
+ retries=retries,
+ redirect=redirect,
+ timeout=timeout,
+ **response_kw
+ )
+
+ return http_response
+
+ def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
+
+ if is_prod_appengine():
+ # Production GAE handles deflate encoding automatically, but does
+ # not remove the encoding header.
+ content_encoding = urlfetch_resp.headers.get("content-encoding")
+
+ if content_encoding == "deflate":
+ del urlfetch_resp.headers["content-encoding"]
+
+ transfer_encoding = urlfetch_resp.headers.get("transfer-encoding")
+ # We have a full response's content,
+ # so let's make sure we don't report ourselves as chunked data.
+ if transfer_encoding == "chunked":
+ encodings = transfer_encoding.split(",")
+ encodings.remove("chunked")
+ urlfetch_resp.headers["transfer-encoding"] = ",".join(encodings)
+
+ original_response = HTTPResponse(
+ # In order for decoding to work, we must present the content as
+ # a file-like object.
+ body=io.BytesIO(urlfetch_resp.content),
+ msg=urlfetch_resp.header_msg,
+ headers=urlfetch_resp.headers,
+ status=urlfetch_resp.status_code,
+ **response_kw
+ )
+
+ return HTTPResponse(
+ body=io.BytesIO(urlfetch_resp.content),
+ headers=urlfetch_resp.headers,
+ status=urlfetch_resp.status_code,
+ original_response=original_response,
+ **response_kw
+ )
+
+ def _get_absolute_timeout(self, timeout):
+ if timeout is Timeout.DEFAULT_TIMEOUT:
+ return None # Defer to URLFetch's default.
+ if isinstance(timeout, Timeout):
+ if timeout._read is not None or timeout._connect is not None:
+ warnings.warn(
+ "URLFetch does not support granular timeout settings, "
+ "reverting to total or default URLFetch timeout.",
+ AppEnginePlatformWarning,
+ )
+ return timeout.total
+ return timeout
+
+ def _get_retries(self, retries, redirect):
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
+
+ if retries.connect or retries.read or retries.redirect:
+ warnings.warn(
+ "URLFetch only supports total retries and does not "
+ "recognize connect, read, or redirect retry parameters.",
+ AppEnginePlatformWarning,
+ )
+
+ return retries
+
+
+# Alias methods from _appengine_environ to maintain public API interface.
+
+is_appengine = _appengine_environ.is_appengine
+is_appengine_sandbox = _appengine_environ.is_appengine_sandbox
+is_local_appengine = _appengine_environ.is_local_appengine
+is_prod_appengine = _appengine_environ.is_prod_appengine
+is_prod_appengine_mvms = _appengine_environ.is_prod_appengine_mvms
diff --git a/third_party/python/urllib3/urllib3/contrib/ntlmpool.py b/third_party/python/urllib3/urllib3/contrib/ntlmpool.py
new file mode 100644
index 0000000000..b2df45dcf6
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/contrib/ntlmpool.py
@@ -0,0 +1,121 @@
+"""
+NTLM authenticating pool, contributed by erikcederstran
+
+Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
+"""
+from __future__ import absolute_import
+
+from logging import getLogger
+
+from ntlm import ntlm
+
+from .. import HTTPSConnectionPool
+from ..packages.six.moves.http_client import HTTPSConnection
+
+log = getLogger(__name__)
+
+
+class NTLMConnectionPool(HTTPSConnectionPool):
+ """
+ Implements an NTLM authentication version of an urllib3 connection pool
+ """
+
+ scheme = "https"
+
+ def __init__(self, user, pw, authurl, *args, **kwargs):
+ """
+ authurl is a random URL on the server that is protected by NTLM.
+ user is the Windows user, probably in the DOMAIN\\username format.
+ pw is the password for the user.
+ """
+ super(NTLMConnectionPool, self).__init__(*args, **kwargs)
+ self.authurl = authurl
+ self.rawuser = user
+ user_parts = user.split("\\", 1)
+ self.domain = user_parts[0].upper()
+ self.user = user_parts[1]
+ self.pw = pw
+
+ def _new_conn(self):
+ # Performs the NTLM handshake that secures the connection. The socket
+ # must be kept open while requests are performed.
+ self.num_connections += 1
+ log.debug(
+ "Starting NTLM HTTPS connection no. %d: https://%s%s",
+ self.num_connections,
+ self.host,
+ self.authurl,
+ )
+
+ headers = {"Connection": "Keep-Alive"}
+ req_header = "Authorization"
+ resp_header = "www-authenticate"
+
+ conn = HTTPSConnection(host=self.host, port=self.port)
+
+ # Send negotiation message
+ headers[req_header] = "NTLM %s" % ntlm.create_NTLM_NEGOTIATE_MESSAGE(
+ self.rawuser
+ )
+ log.debug("Request headers: %s", headers)
+ conn.request("GET", self.authurl, None, headers)
+ res = conn.getresponse()
+ reshdr = dict(res.getheaders())
+ log.debug("Response status: %s %s", res.status, res.reason)
+ log.debug("Response headers: %s", reshdr)
+ log.debug("Response data: %s [...]", res.read(100))
+
+ # Remove the reference to the socket, so that it can not be closed by
+ # the response object (we want to keep the socket open)
+ res.fp = None
+
+ # Server should respond with a challenge message
+ auth_header_values = reshdr[resp_header].split(", ")
+ auth_header_value = None
+ for s in auth_header_values:
+ if s[:5] == "NTLM ":
+ auth_header_value = s[5:]
+ if auth_header_value is None:
+ raise Exception(
+ "Unexpected %s response header: %s" % (resp_header, reshdr[resp_header])
+ )
+
+ # Send authentication message
+ ServerChallenge, NegotiateFlags = ntlm.parse_NTLM_CHALLENGE_MESSAGE(
+ auth_header_value
+ )
+ auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(
+ ServerChallenge, self.user, self.domain, self.pw, NegotiateFlags
+ )
+ headers[req_header] = "NTLM %s" % auth_msg
+ log.debug("Request headers: %s", headers)
+ conn.request("GET", self.authurl, None, headers)
+ res = conn.getresponse()
+ log.debug("Response status: %s %s", res.status, res.reason)
+ log.debug("Response headers: %s", dict(res.getheaders()))
+ log.debug("Response data: %s [...]", res.read()[:100])
+ if res.status != 200:
+ if res.status == 401:
+ raise Exception("Server rejected request: wrong username or password")
+ raise Exception("Wrong server response: %s %s" % (res.status, res.reason))
+
+ res.fp = None
+ log.debug("Connection established")
+ return conn
+
+ def urlopen(
+ self,
+ method,
+ url,
+ body=None,
+ headers=None,
+ retries=3,
+ redirect=True,
+ assert_same_host=True,
+ ):
+ if headers is None:
+ headers = {}
+ headers["Connection"] = "Keep-Alive"
+ return super(NTLMConnectionPool, self).urlopen(
+ method, url, body, headers, retries, redirect, assert_same_host
+ )
diff --git a/third_party/python/urllib3/urllib3/contrib/pyopenssl.py b/third_party/python/urllib3/urllib3/contrib/pyopenssl.py
new file mode 100644
index 0000000000..0cabab1aed
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/contrib/pyopenssl.py
@@ -0,0 +1,509 @@
+"""
+TLS with SNI_-support for Python 2. Follow these instructions if you would
+like to verify TLS certificates in Python 2. Note, the default libraries do
+*not* do certificate checking; you need to do additional work to validate
+certificates yourself.
+
+This needs the following packages installed:
+
+* `pyOpenSSL`_ (tested with 16.0.0)
+* `cryptography`_ (minimum 1.3.4, from pyopenssl)
+* `idna`_ (minimum 2.0, from cryptography)
+
+However, pyopenssl depends on cryptography, which depends on idna, so while we
+use all three directly here we end up having relatively few packages required.
+
+You can install them with the following command:
+
+.. code-block:: bash
+
+ $ python -m pip install pyopenssl cryptography idna
+
+To activate certificate checking, call
+:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code
+before you begin making HTTP requests. This can be done in a ``sitecustomize``
+module, or at any other time before your application begins using ``urllib3``,
+like this:
+
+.. code-block:: python
+
+ try:
+ import urllib3.contrib.pyopenssl
+ urllib3.contrib.pyopenssl.inject_into_urllib3()
+ except ImportError:
+ pass
+
+Now you can use :mod:`urllib3` as you normally would, and it will support SNI
+when the required modules are installed.
+
+Activating this module also has the positive side effect of disabling SSL/TLS
+compression in Python 2 (see `CRIME attack`_).
+
+.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication
+.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit)
+.. _pyopenssl: https://www.pyopenssl.org
+.. _cryptography: https://cryptography.io
+.. _idna: https://github.com/kjd/idna
+"""
+from __future__ import absolute_import
+
+import OpenSSL.SSL
+from cryptography import x509
+from cryptography.hazmat.backends.openssl import backend as openssl_backend
+from cryptography.hazmat.backends.openssl.x509 import _Certificate
+
+try:
+ from cryptography.x509 import UnsupportedExtension
+except ImportError:
+ # UnsupportedExtension is gone in cryptography >= 2.1.0
+ class UnsupportedExtension(Exception):
+ pass
+
+
+from io import BytesIO
+from socket import error as SocketError
+from socket import timeout
+
+try: # Platform-specific: Python 2
+ from socket import _fileobject
+except ImportError: # Platform-specific: Python 3
+ _fileobject = None
+ from ..packages.backports.makefile import backport_makefile
+
+import logging
+import ssl
+import sys
+
+from .. import util
+from ..packages import six
+
+__all__ = ["inject_into_urllib3", "extract_from_urllib3"]
+
+# SNI always works.
+HAS_SNI = True
+
+# Map from urllib3 to PyOpenSSL compatible parameter-values.
+_openssl_versions = {
+ util.PROTOCOL_TLS: OpenSSL.SSL.SSLv23_METHOD,
+ ssl.PROTOCOL_TLSv1: OpenSSL.SSL.TLSv1_METHOD,
+}
+
+if hasattr(ssl, "PROTOCOL_SSLv3") and hasattr(OpenSSL.SSL, "SSLv3_METHOD"):
+ _openssl_versions[ssl.PROTOCOL_SSLv3] = OpenSSL.SSL.SSLv3_METHOD
+
+if hasattr(ssl, "PROTOCOL_TLSv1_1") and hasattr(OpenSSL.SSL, "TLSv1_1_METHOD"):
+ _openssl_versions[ssl.PROTOCOL_TLSv1_1] = OpenSSL.SSL.TLSv1_1_METHOD
+
+if hasattr(ssl, "PROTOCOL_TLSv1_2") and hasattr(OpenSSL.SSL, "TLSv1_2_METHOD"):
+ _openssl_versions[ssl.PROTOCOL_TLSv1_2] = OpenSSL.SSL.TLSv1_2_METHOD
+
+
+_stdlib_to_openssl_verify = {
+ ssl.CERT_NONE: OpenSSL.SSL.VERIFY_NONE,
+ ssl.CERT_OPTIONAL: OpenSSL.SSL.VERIFY_PEER,
+ ssl.CERT_REQUIRED: OpenSSL.SSL.VERIFY_PEER
+ + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
+}
+_openssl_to_stdlib_verify = dict((v, k) for k, v in _stdlib_to_openssl_verify.items())
+
+# OpenSSL will only write 16K at a time
+SSL_WRITE_BLOCKSIZE = 16384
+
+orig_util_HAS_SNI = util.HAS_SNI
+orig_util_SSLContext = util.ssl_.SSLContext
+
+
+log = logging.getLogger(__name__)
+
+
+def inject_into_urllib3():
+ "Monkey-patch urllib3 with PyOpenSSL-backed SSL-support."
+
+ _validate_dependencies_met()
+
+ util.SSLContext = PyOpenSSLContext
+ util.ssl_.SSLContext = PyOpenSSLContext
+ util.HAS_SNI = HAS_SNI
+ util.ssl_.HAS_SNI = HAS_SNI
+ util.IS_PYOPENSSL = True
+ util.ssl_.IS_PYOPENSSL = True
+
+
+def extract_from_urllib3():
+ "Undo monkey-patching by :func:`inject_into_urllib3`."
+
+ util.SSLContext = orig_util_SSLContext
+ util.ssl_.SSLContext = orig_util_SSLContext
+ util.HAS_SNI = orig_util_HAS_SNI
+ util.ssl_.HAS_SNI = orig_util_HAS_SNI
+ util.IS_PYOPENSSL = False
+ util.ssl_.IS_PYOPENSSL = False
+
+
+def _validate_dependencies_met():
+ """
+ Verifies that PyOpenSSL's package-level dependencies have been met.
+ Throws `ImportError` if they are not met.
+ """
+ # Method added in `cryptography==1.1`; not available in older versions
+ from cryptography.x509.extensions import Extensions
+
+ if getattr(Extensions, "get_extension_for_class", None) is None:
+ raise ImportError(
+ "'cryptography' module missing required functionality. "
+ "Try upgrading to v1.3.4 or newer."
+ )
+
+ # pyOpenSSL 0.14 and above use cryptography for OpenSSL bindings. The _x509
+ # attribute is only present on those versions.
+ from OpenSSL.crypto import X509
+
+ x509 = X509()
+ if getattr(x509, "_x509", None) is None:
+ raise ImportError(
+ "'pyOpenSSL' module missing required functionality. "
+ "Try upgrading to v0.14 or newer."
+ )
+
+
+def _dnsname_to_stdlib(name):
+ """
+ Converts a dNSName SubjectAlternativeName field to the form used by the
+ standard library on the given Python version.
+
+ Cryptography produces a dNSName as a unicode string that was idna-decoded
+ from ASCII bytes. We need to idna-encode that string to get it back, and
+ then on Python 3 we also need to convert to unicode via UTF-8 (the stdlib
+ uses PyUnicode_FromStringAndSize on it, which decodes via UTF-8).
+
+ If the name cannot be idna-encoded then we return None signalling that
+ the name given should be skipped.
+ """
+
+ def idna_encode(name):
+ """
+ Borrowed wholesale from the Python Cryptography Project. It turns out
+ that we can't just safely call `idna.encode`: it can explode for
+ wildcard names. This avoids that problem.
+ """
+ import idna
+
+ try:
+ for prefix in [u"*.", u"."]:
+ if name.startswith(prefix):
+ name = name[len(prefix) :]
+ return prefix.encode("ascii") + idna.encode(name)
+ return idna.encode(name)
+ except idna.core.IDNAError:
+ return None
+
+ # Don't send IPv6 addresses through the IDNA encoder.
+ if ":" in name:
+ return name
+
+ name = idna_encode(name)
+ if name is None:
+ return None
+ elif sys.version_info >= (3, 0):
+ name = name.decode("utf-8")
+ return name
+
+
+def get_subj_alt_name(peer_cert):
+ """
+ Given an PyOpenSSL certificate, provides all the subject alternative names.
+ """
+ # Pass the cert to cryptography, which has much better APIs for this.
+ if hasattr(peer_cert, "to_cryptography"):
+ cert = peer_cert.to_cryptography()
+ else:
+ # This is technically using private APIs, but should work across all
+ # relevant versions before PyOpenSSL got a proper API for this.
+ cert = _Certificate(openssl_backend, peer_cert._x509)
+
+ # We want to find the SAN extension. Ask Cryptography to locate it (it's
+ # faster than looping in Python)
+ try:
+ ext = cert.extensions.get_extension_for_class(x509.SubjectAlternativeName).value
+ except x509.ExtensionNotFound:
+ # No such extension, return the empty list.
+ return []
+ except (
+ x509.DuplicateExtension,
+ UnsupportedExtension,
+ x509.UnsupportedGeneralNameType,
+ UnicodeError,
+ ) as e:
+ # A problem has been found with the quality of the certificate. Assume
+ # no SAN field is present.
+ log.warning(
+ "A problem was encountered with the certificate that prevented "
+ "urllib3 from finding the SubjectAlternativeName field. This can "
+ "affect certificate validation. The error was %s",
+ e,
+ )
+ return []
+
+ # We want to return dNSName and iPAddress fields. We need to cast the IPs
+ # back to strings because the match_hostname function wants them as
+ # strings.
+ # Sadly the DNS names need to be idna encoded and then, on Python 3, UTF-8
+ # decoded. This is pretty frustrating, but that's what the standard library
+ # does with certificates, and so we need to attempt to do the same.
+ # We also want to skip over names which cannot be idna encoded.
+ names = [
+ ("DNS", name)
+ for name in map(_dnsname_to_stdlib, ext.get_values_for_type(x509.DNSName))
+ if name is not None
+ ]
+ names.extend(
+ ("IP Address", str(name)) for name in ext.get_values_for_type(x509.IPAddress)
+ )
+
+ return names
+
+
+class WrappedSocket(object):
+ """API-compatibility wrapper for Python OpenSSL's Connection-class.
+
+ Note: _makefile_refs, _drop() and _reuse() are needed for the garbage
+ collector of pypy.
+ """
+
+ def __init__(self, connection, socket, suppress_ragged_eofs=True):
+ self.connection = connection
+ self.socket = socket
+ self.suppress_ragged_eofs = suppress_ragged_eofs
+ self._makefile_refs = 0
+ self._closed = False
+
+ def fileno(self):
+ return self.socket.fileno()
+
+ # Copy-pasted from Python 3.5 source code
+ def _decref_socketios(self):
+ if self._makefile_refs > 0:
+ self._makefile_refs -= 1
+ if self._closed:
+ self.close()
+
+ def recv(self, *args, **kwargs):
+ try:
+ data = self.connection.recv(*args, **kwargs)
+ except OpenSSL.SSL.SysCallError as e:
+ if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"):
+ return b""
+ else:
+ raise SocketError(str(e))
+ except OpenSSL.SSL.ZeroReturnError:
+ if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
+ return b""
+ else:
+ raise
+ except OpenSSL.SSL.WantReadError:
+ if not util.wait_for_read(self.socket, self.socket.gettimeout()):
+ raise timeout("The read operation timed out")
+ else:
+ return self.recv(*args, **kwargs)
+
+ # TLS 1.3 post-handshake authentication
+ except OpenSSL.SSL.Error as e:
+ raise ssl.SSLError("read error: %r" % e)
+ else:
+ return data
+
+ def recv_into(self, *args, **kwargs):
+ try:
+ return self.connection.recv_into(*args, **kwargs)
+ except OpenSSL.SSL.SysCallError as e:
+ if self.suppress_ragged_eofs and e.args == (-1, "Unexpected EOF"):
+ return 0
+ else:
+ raise SocketError(str(e))
+ except OpenSSL.SSL.ZeroReturnError:
+ if self.connection.get_shutdown() == OpenSSL.SSL.RECEIVED_SHUTDOWN:
+ return 0
+ else:
+ raise
+ except OpenSSL.SSL.WantReadError:
+ if not util.wait_for_read(self.socket, self.socket.gettimeout()):
+ raise timeout("The read operation timed out")
+ else:
+ return self.recv_into(*args, **kwargs)
+
+ # TLS 1.3 post-handshake authentication
+ except OpenSSL.SSL.Error as e:
+ raise ssl.SSLError("read error: %r" % e)
+
+ def settimeout(self, timeout):
+ return self.socket.settimeout(timeout)
+
+ def _send_until_done(self, data):
+ while True:
+ try:
+ return self.connection.send(data)
+ except OpenSSL.SSL.WantWriteError:
+ if not util.wait_for_write(self.socket, self.socket.gettimeout()):
+ raise timeout()
+ continue
+ except OpenSSL.SSL.SysCallError as e:
+ raise SocketError(str(e))
+
+ def sendall(self, data):
+ total_sent = 0
+ while total_sent < len(data):
+ sent = self._send_until_done(
+ data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE]
+ )
+ total_sent += sent
+
+ def shutdown(self):
+ # FIXME rethrow compatible exceptions should we ever use this
+ self.connection.shutdown()
+
+ def close(self):
+ if self._makefile_refs < 1:
+ try:
+ self._closed = True
+ return self.connection.close()
+ except OpenSSL.SSL.Error:
+ return
+ else:
+ self._makefile_refs -= 1
+
+ def getpeercert(self, binary_form=False):
+ x509 = self.connection.get_peer_certificate()
+
+ if not x509:
+ return x509
+
+ if binary_form:
+ return OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_ASN1, x509)
+
+ return {
+ "subject": ((("commonName", x509.get_subject().CN),),),
+ "subjectAltName": get_subj_alt_name(x509),
+ }
+
+ def version(self):
+ return self.connection.get_protocol_version_name()
+
+ def _reuse(self):
+ self._makefile_refs += 1
+
+ def _drop(self):
+ if self._makefile_refs < 1:
+ self.close()
+ else:
+ self._makefile_refs -= 1
+
+
+if _fileobject: # Platform-specific: Python 2
+
+ def makefile(self, mode, bufsize=-1):
+ self._makefile_refs += 1
+ return _fileobject(self, mode, bufsize, close=True)
+
+
+else: # Platform-specific: Python 3
+ makefile = backport_makefile
+
+WrappedSocket.makefile = makefile
+
+
+class PyOpenSSLContext(object):
+ """
+ I am a wrapper class for the PyOpenSSL ``Context`` object. I am responsible
+ for translating the interface of the standard library ``SSLContext`` object
+ to calls into PyOpenSSL.
+ """
+
+ def __init__(self, protocol):
+ self.protocol = _openssl_versions[protocol]
+ self._ctx = OpenSSL.SSL.Context(self.protocol)
+ self._options = 0
+ self.check_hostname = False
+
+ @property
+ def options(self):
+ return self._options
+
+ @options.setter
+ def options(self, value):
+ self._options = value
+ self._ctx.set_options(value)
+
+ @property
+ def verify_mode(self):
+ return _openssl_to_stdlib_verify[self._ctx.get_verify_mode()]
+
+ @verify_mode.setter
+ def verify_mode(self, value):
+ self._ctx.set_verify(_stdlib_to_openssl_verify[value], _verify_callback)
+
+ def set_default_verify_paths(self):
+ self._ctx.set_default_verify_paths()
+
+ def set_ciphers(self, ciphers):
+ if isinstance(ciphers, six.text_type):
+ ciphers = ciphers.encode("utf-8")
+ self._ctx.set_cipher_list(ciphers)
+
+ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+ if cafile is not None:
+ cafile = cafile.encode("utf-8")
+ if capath is not None:
+ capath = capath.encode("utf-8")
+ try:
+ self._ctx.load_verify_locations(cafile, capath)
+ if cadata is not None:
+ self._ctx.load_verify_locations(BytesIO(cadata))
+ except OpenSSL.SSL.Error as e:
+ raise ssl.SSLError("unable to load trusted certificates: %r" % e)
+
+ def load_cert_chain(self, certfile, keyfile=None, password=None):
+ self._ctx.use_certificate_chain_file(certfile)
+ if password is not None:
+ if not isinstance(password, six.binary_type):
+ password = password.encode("utf-8")
+ self._ctx.set_passwd_cb(lambda *_: password)
+ self._ctx.use_privatekey_file(keyfile or certfile)
+
+ def set_alpn_protocols(self, protocols):
+ protocols = [six.ensure_binary(p) for p in protocols]
+ return self._ctx.set_alpn_protos(protocols)
+
+ def wrap_socket(
+ self,
+ sock,
+ server_side=False,
+ do_handshake_on_connect=True,
+ suppress_ragged_eofs=True,
+ server_hostname=None,
+ ):
+ cnx = OpenSSL.SSL.Connection(self._ctx, sock)
+
+ if isinstance(server_hostname, six.text_type): # Platform-specific: Python 3
+ server_hostname = server_hostname.encode("utf-8")
+
+ if server_hostname is not None:
+ cnx.set_tlsext_host_name(server_hostname)
+
+ cnx.set_connect_state()
+
+ while True:
+ try:
+ cnx.do_handshake()
+ except OpenSSL.SSL.WantReadError:
+ if not util.wait_for_read(sock, sock.gettimeout()):
+ raise timeout("select timed out")
+ continue
+ except OpenSSL.SSL.Error as e:
+ raise ssl.SSLError("bad handshake: %r" % e)
+ break
+
+ return WrappedSocket(cnx, sock)
+
+
+def _verify_callback(cnx, x509, err_no, err_depth, return_code):
+ return err_no == 0
diff --git a/third_party/python/urllib3/urllib3/contrib/securetransport.py b/third_party/python/urllib3/urllib3/contrib/securetransport.py
new file mode 100644
index 0000000000..ab092de67a
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/contrib/securetransport.py
@@ -0,0 +1,920 @@
+"""
+SecureTranport support for urllib3 via ctypes.
+
+This makes platform-native TLS available to urllib3 users on macOS without the
+use of a compiler. This is an important feature because the Python Package
+Index is moving to become a TLSv1.2-or-higher server, and the default OpenSSL
+that ships with macOS is not capable of doing TLSv1.2. The only way to resolve
+this is to give macOS users an alternative solution to the problem, and that
+solution is to use SecureTransport.
+
+We use ctypes here because this solution must not require a compiler. That's
+because pip is not allowed to require a compiler either.
+
+This is not intended to be a seriously long-term solution to this problem.
+The hope is that PEP 543 will eventually solve this issue for us, at which
+point we can retire this contrib module. But in the short term, we need to
+solve the impending tire fire that is Python on Mac without this kind of
+contrib module. So...here we are.
+
+To use this module, simply import and inject it::
+
+ import urllib3.contrib.securetransport
+ urllib3.contrib.securetransport.inject_into_urllib3()
+
+Happy TLSing!
+
+This code is a bastardised version of the code found in Will Bond's oscrypto
+library. An enormous debt is owed to him for blazing this trail for us. For
+that reason, this code should be considered to be covered both by urllib3's
+license and by oscrypto's:
+
+.. code-block::
+
+ Copyright (c) 2015-2016 Will Bond <will@wbond.net>
+
+ Permission is hereby granted, free of charge, to any person obtaining a
+ copy of this software and associated documentation files (the "Software"),
+ to deal in the Software without restriction, including without limitation
+ the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ and/or sell copies of the Software, and to permit persons to whom the
+ Software is furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS IN THE SOFTWARE.
+"""
+from __future__ import absolute_import
+
+import contextlib
+import ctypes
+import errno
+import os.path
+import shutil
+import socket
+import ssl
+import struct
+import threading
+import weakref
+
+import six
+
+from .. import util
+from ._securetransport.bindings import CoreFoundation, Security, SecurityConst
+from ._securetransport.low_level import (
+ _assert_no_error,
+ _build_tls_unknown_ca_alert,
+ _cert_array_from_pem,
+ _create_cfstring_array,
+ _load_client_cert_chain,
+ _temporary_keychain,
+)
+
+try: # Platform-specific: Python 2
+ from socket import _fileobject
+except ImportError: # Platform-specific: Python 3
+ _fileobject = None
+ from ..packages.backports.makefile import backport_makefile
+
+__all__ = ["inject_into_urllib3", "extract_from_urllib3"]
+
+# SNI always works
+HAS_SNI = True
+
+orig_util_HAS_SNI = util.HAS_SNI
+orig_util_SSLContext = util.ssl_.SSLContext
+
+# This dictionary is used by the read callback to obtain a handle to the
+# calling wrapped socket. This is a pretty silly approach, but for now it'll
+# do. I feel like I should be able to smuggle a handle to the wrapped socket
+# directly in the SSLConnectionRef, but for now this approach will work I
+# guess.
+#
+# We need to lock around this structure for inserts, but we don't do it for
+# reads/writes in the callbacks. The reasoning here goes as follows:
+#
+# 1. It is not possible to call into the callbacks before the dictionary is
+# populated, so once in the callback the id must be in the dictionary.
+# 2. The callbacks don't mutate the dictionary, they only read from it, and
+# so cannot conflict with any of the insertions.
+#
+# This is good: if we had to lock in the callbacks we'd drastically slow down
+# the performance of this code.
+_connection_refs = weakref.WeakValueDictionary()
+_connection_ref_lock = threading.Lock()
+
+# Limit writes to 16kB. This is OpenSSL's limit, but we'll cargo-cult it over
+# for no better reason than we need *a* limit, and this one is right there.
+SSL_WRITE_BLOCKSIZE = 16384
+
+# This is our equivalent of util.ssl_.DEFAULT_CIPHERS, but expanded out to
+# individual cipher suites. We need to do this because this is how
+# SecureTransport wants them.
+CIPHER_SUITES = [
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256,
+ SecurityConst.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
+ SecurityConst.TLS_AES_256_GCM_SHA384,
+ SecurityConst.TLS_AES_128_GCM_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_256_GCM_SHA384,
+ SecurityConst.TLS_RSA_WITH_AES_128_GCM_SHA256,
+ SecurityConst.TLS_AES_128_CCM_8_SHA256,
+ SecurityConst.TLS_AES_128_CCM_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA256,
+ SecurityConst.TLS_RSA_WITH_AES_256_CBC_SHA,
+ SecurityConst.TLS_RSA_WITH_AES_128_CBC_SHA,
+]
+
+# Basically this is simple: for PROTOCOL_SSLv23 we turn it into a low of
+# TLSv1 and a high of TLSv1.2. For everything else, we pin to that version.
+# TLSv1 to 1.2 are supported on macOS 10.8+
+_protocol_to_min_max = {
+ util.PROTOCOL_TLS: (SecurityConst.kTLSProtocol1, SecurityConst.kTLSProtocol12)
+}
+
+if hasattr(ssl, "PROTOCOL_SSLv2"):
+ _protocol_to_min_max[ssl.PROTOCOL_SSLv2] = (
+ SecurityConst.kSSLProtocol2,
+ SecurityConst.kSSLProtocol2,
+ )
+if hasattr(ssl, "PROTOCOL_SSLv3"):
+ _protocol_to_min_max[ssl.PROTOCOL_SSLv3] = (
+ SecurityConst.kSSLProtocol3,
+ SecurityConst.kSSLProtocol3,
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1] = (
+ SecurityConst.kTLSProtocol1,
+ SecurityConst.kTLSProtocol1,
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1_1"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1_1] = (
+ SecurityConst.kTLSProtocol11,
+ SecurityConst.kTLSProtocol11,
+ )
+if hasattr(ssl, "PROTOCOL_TLSv1_2"):
+ _protocol_to_min_max[ssl.PROTOCOL_TLSv1_2] = (
+ SecurityConst.kTLSProtocol12,
+ SecurityConst.kTLSProtocol12,
+ )
+
+
+def inject_into_urllib3():
+ """
+ Monkey-patch urllib3 with SecureTransport-backed SSL-support.
+ """
+ util.SSLContext = SecureTransportContext
+ util.ssl_.SSLContext = SecureTransportContext
+ util.HAS_SNI = HAS_SNI
+ util.ssl_.HAS_SNI = HAS_SNI
+ util.IS_SECURETRANSPORT = True
+ util.ssl_.IS_SECURETRANSPORT = True
+
+
+def extract_from_urllib3():
+ """
+ Undo monkey-patching by :func:`inject_into_urllib3`.
+ """
+ util.SSLContext = orig_util_SSLContext
+ util.ssl_.SSLContext = orig_util_SSLContext
+ util.HAS_SNI = orig_util_HAS_SNI
+ util.ssl_.HAS_SNI = orig_util_HAS_SNI
+ util.IS_SECURETRANSPORT = False
+ util.ssl_.IS_SECURETRANSPORT = False
+
+
+def _read_callback(connection_id, data_buffer, data_length_pointer):
+ """
+ SecureTransport read callback. This is called by ST to request that data
+ be returned from the socket.
+ """
+ wrapped_socket = None
+ try:
+ wrapped_socket = _connection_refs.get(connection_id)
+ if wrapped_socket is None:
+ return SecurityConst.errSSLInternal
+ base_socket = wrapped_socket.socket
+
+ requested_length = data_length_pointer[0]
+
+ timeout = wrapped_socket.gettimeout()
+ error = None
+ read_count = 0
+
+ try:
+ while read_count < requested_length:
+ if timeout is None or timeout >= 0:
+ if not util.wait_for_read(base_socket, timeout):
+ raise socket.error(errno.EAGAIN, "timed out")
+
+ remaining = requested_length - read_count
+ buffer = (ctypes.c_char * remaining).from_address(
+ data_buffer + read_count
+ )
+ chunk_size = base_socket.recv_into(buffer, remaining)
+ read_count += chunk_size
+ if not chunk_size:
+ if not read_count:
+ return SecurityConst.errSSLClosedGraceful
+ break
+ except (socket.error) as e:
+ error = e.errno
+
+ if error is not None and error != errno.EAGAIN:
+ data_length_pointer[0] = read_count
+ if error == errno.ECONNRESET or error == errno.EPIPE:
+ return SecurityConst.errSSLClosedAbort
+ raise
+
+ data_length_pointer[0] = read_count
+
+ if read_count != requested_length:
+ return SecurityConst.errSSLWouldBlock
+
+ return 0
+ except Exception as e:
+ if wrapped_socket is not None:
+ wrapped_socket._exception = e
+ return SecurityConst.errSSLInternal
+
+
+def _write_callback(connection_id, data_buffer, data_length_pointer):
+ """
+ SecureTransport write callback. This is called by ST to request that data
+ actually be sent on the network.
+ """
+ wrapped_socket = None
+ try:
+ wrapped_socket = _connection_refs.get(connection_id)
+ if wrapped_socket is None:
+ return SecurityConst.errSSLInternal
+ base_socket = wrapped_socket.socket
+
+ bytes_to_write = data_length_pointer[0]
+ data = ctypes.string_at(data_buffer, bytes_to_write)
+
+ timeout = wrapped_socket.gettimeout()
+ error = None
+ sent = 0
+
+ try:
+ while sent < bytes_to_write:
+ if timeout is None or timeout >= 0:
+ if not util.wait_for_write(base_socket, timeout):
+ raise socket.error(errno.EAGAIN, "timed out")
+ chunk_sent = base_socket.send(data)
+ sent += chunk_sent
+
+ # This has some needless copying here, but I'm not sure there's
+ # much value in optimising this data path.
+ data = data[chunk_sent:]
+ except (socket.error) as e:
+ error = e.errno
+
+ if error is not None and error != errno.EAGAIN:
+ data_length_pointer[0] = sent
+ if error == errno.ECONNRESET or error == errno.EPIPE:
+ return SecurityConst.errSSLClosedAbort
+ raise
+
+ data_length_pointer[0] = sent
+
+ if sent != bytes_to_write:
+ return SecurityConst.errSSLWouldBlock
+
+ return 0
+ except Exception as e:
+ if wrapped_socket is not None:
+ wrapped_socket._exception = e
+ return SecurityConst.errSSLInternal
+
+
+# We need to keep these two objects references alive: if they get GC'd while
+# in use then SecureTransport could attempt to call a function that is in freed
+# memory. That would be...uh...bad. Yeah, that's the word. Bad.
+_read_callback_pointer = Security.SSLReadFunc(_read_callback)
+_write_callback_pointer = Security.SSLWriteFunc(_write_callback)
+
+
+class WrappedSocket(object):
+ """
+ API-compatibility wrapper for Python's OpenSSL wrapped socket object.
+
+ Note: _makefile_refs, _drop(), and _reuse() are needed for the garbage
+ collector of PyPy.
+ """
+
+ def __init__(self, socket):
+ self.socket = socket
+ self.context = None
+ self._makefile_refs = 0
+ self._closed = False
+ self._exception = None
+ self._keychain = None
+ self._keychain_dir = None
+ self._client_cert_chain = None
+
+ # We save off the previously-configured timeout and then set it to
+ # zero. This is done because we use select and friends to handle the
+ # timeouts, but if we leave the timeout set on the lower socket then
+ # Python will "kindly" call select on that socket again for us. Avoid
+ # that by forcing the timeout to zero.
+ self._timeout = self.socket.gettimeout()
+ self.socket.settimeout(0)
+
+ @contextlib.contextmanager
+ def _raise_on_error(self):
+ """
+ A context manager that can be used to wrap calls that do I/O from
+ SecureTransport. If any of the I/O callbacks hit an exception, this
+ context manager will correctly propagate the exception after the fact.
+ This avoids silently swallowing those exceptions.
+
+ It also correctly forces the socket closed.
+ """
+ self._exception = None
+
+ # We explicitly don't catch around this yield because in the unlikely
+ # event that an exception was hit in the block we don't want to swallow
+ # it.
+ yield
+ if self._exception is not None:
+ exception, self._exception = self._exception, None
+ self.close()
+ raise exception
+
+ def _set_ciphers(self):
+ """
+ Sets up the allowed ciphers. By default this matches the set in
+ util.ssl_.DEFAULT_CIPHERS, at least as supported by macOS. This is done
+ custom and doesn't allow changing at this time, mostly because parsing
+ OpenSSL cipher strings is going to be a freaking nightmare.
+ """
+ ciphers = (Security.SSLCipherSuite * len(CIPHER_SUITES))(*CIPHER_SUITES)
+ result = Security.SSLSetEnabledCiphers(
+ self.context, ciphers, len(CIPHER_SUITES)
+ )
+ _assert_no_error(result)
+
+ def _set_alpn_protocols(self, protocols):
+ """
+ Sets up the ALPN protocols on the context.
+ """
+ if not protocols:
+ return
+ protocols_arr = _create_cfstring_array(protocols)
+ try:
+ result = Security.SSLSetALPNProtocols(self.context, protocols_arr)
+ _assert_no_error(result)
+ finally:
+ CoreFoundation.CFRelease(protocols_arr)
+
+ def _custom_validate(self, verify, trust_bundle):
+ """
+ Called when we have set custom validation. We do this in two cases:
+ first, when cert validation is entirely disabled; and second, when
+ using a custom trust DB.
+ Raises an SSLError if the connection is not trusted.
+ """
+ # If we disabled cert validation, just say: cool.
+ if not verify:
+ return
+
+ successes = (
+ SecurityConst.kSecTrustResultUnspecified,
+ SecurityConst.kSecTrustResultProceed,
+ )
+ try:
+ trust_result = self._evaluate_trust(trust_bundle)
+ if trust_result in successes:
+ return
+ reason = "error code: %d" % (trust_result,)
+ except Exception as e:
+ # Do not trust on error
+ reason = "exception: %r" % (e,)
+
+ # SecureTransport does not send an alert nor shuts down the connection.
+ rec = _build_tls_unknown_ca_alert(self.version())
+ self.socket.sendall(rec)
+ # close the connection immediately
+ # l_onoff = 1, activate linger
+ # l_linger = 0, linger for 0 seoncds
+ opts = struct.pack("ii", 1, 0)
+ self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, opts)
+ self.close()
+ raise ssl.SSLError("certificate verify failed, %s" % reason)
+
+ def _evaluate_trust(self, trust_bundle):
+ # We want data in memory, so load it up.
+ if os.path.isfile(trust_bundle):
+ with open(trust_bundle, "rb") as f:
+ trust_bundle = f.read()
+
+ cert_array = None
+ trust = Security.SecTrustRef()
+
+ try:
+ # Get a CFArray that contains the certs we want.
+ cert_array = _cert_array_from_pem(trust_bundle)
+
+ # Ok, now the hard part. We want to get the SecTrustRef that ST has
+ # created for this connection, shove our CAs into it, tell ST to
+ # ignore everything else it knows, and then ask if it can build a
+ # chain. This is a buuuunch of code.
+ result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust))
+ _assert_no_error(result)
+ if not trust:
+ raise ssl.SSLError("Failed to copy trust reference")
+
+ result = Security.SecTrustSetAnchorCertificates(trust, cert_array)
+ _assert_no_error(result)
+
+ result = Security.SecTrustSetAnchorCertificatesOnly(trust, True)
+ _assert_no_error(result)
+
+ trust_result = Security.SecTrustResultType()
+ result = Security.SecTrustEvaluate(trust, ctypes.byref(trust_result))
+ _assert_no_error(result)
+ finally:
+ if trust:
+ CoreFoundation.CFRelease(trust)
+
+ if cert_array is not None:
+ CoreFoundation.CFRelease(cert_array)
+
+ return trust_result.value
+
+ def handshake(
+ self,
+ server_hostname,
+ verify,
+ trust_bundle,
+ min_version,
+ max_version,
+ client_cert,
+ client_key,
+ client_key_passphrase,
+ alpn_protocols,
+ ):
+ """
+ Actually performs the TLS handshake. This is run automatically by
+ wrapped socket, and shouldn't be needed in user code.
+ """
+ # First, we do the initial bits of connection setup. We need to create
+ # a context, set its I/O funcs, and set the connection reference.
+ self.context = Security.SSLCreateContext(
+ None, SecurityConst.kSSLClientSide, SecurityConst.kSSLStreamType
+ )
+ result = Security.SSLSetIOFuncs(
+ self.context, _read_callback_pointer, _write_callback_pointer
+ )
+ _assert_no_error(result)
+
+ # Here we need to compute the handle to use. We do this by taking the
+ # id of self modulo 2**31 - 1. If this is already in the dictionary, we
+ # just keep incrementing by one until we find a free space.
+ with _connection_ref_lock:
+ handle = id(self) % 2147483647
+ while handle in _connection_refs:
+ handle = (handle + 1) % 2147483647
+ _connection_refs[handle] = self
+
+ result = Security.SSLSetConnection(self.context, handle)
+ _assert_no_error(result)
+
+ # If we have a server hostname, we should set that too.
+ if server_hostname:
+ if not isinstance(server_hostname, bytes):
+ server_hostname = server_hostname.encode("utf-8")
+
+ result = Security.SSLSetPeerDomainName(
+ self.context, server_hostname, len(server_hostname)
+ )
+ _assert_no_error(result)
+
+ # Setup the ciphers.
+ self._set_ciphers()
+
+ # Setup the ALPN protocols.
+ self._set_alpn_protocols(alpn_protocols)
+
+ # Set the minimum and maximum TLS versions.
+ result = Security.SSLSetProtocolVersionMin(self.context, min_version)
+ _assert_no_error(result)
+
+ result = Security.SSLSetProtocolVersionMax(self.context, max_version)
+ _assert_no_error(result)
+
+ # If there's a trust DB, we need to use it. We do that by telling
+ # SecureTransport to break on server auth. We also do that if we don't
+ # want to validate the certs at all: we just won't actually do any
+ # authing in that case.
+ if not verify or trust_bundle is not None:
+ result = Security.SSLSetSessionOption(
+ self.context, SecurityConst.kSSLSessionOptionBreakOnServerAuth, True
+ )
+ _assert_no_error(result)
+
+ # If there's a client cert, we need to use it.
+ if client_cert:
+ self._keychain, self._keychain_dir = _temporary_keychain()
+ self._client_cert_chain = _load_client_cert_chain(
+ self._keychain, client_cert, client_key
+ )
+ result = Security.SSLSetCertificate(self.context, self._client_cert_chain)
+ _assert_no_error(result)
+
+ while True:
+ with self._raise_on_error():
+ result = Security.SSLHandshake(self.context)
+
+ if result == SecurityConst.errSSLWouldBlock:
+ raise socket.timeout("handshake timed out")
+ elif result == SecurityConst.errSSLServerAuthCompleted:
+ self._custom_validate(verify, trust_bundle)
+ continue
+ else:
+ _assert_no_error(result)
+ break
+
+ def fileno(self):
+ return self.socket.fileno()
+
+ # Copy-pasted from Python 3.5 source code
+ def _decref_socketios(self):
+ if self._makefile_refs > 0:
+ self._makefile_refs -= 1
+ if self._closed:
+ self.close()
+
+ def recv(self, bufsiz):
+ buffer = ctypes.create_string_buffer(bufsiz)
+ bytes_read = self.recv_into(buffer, bufsiz)
+ data = buffer[:bytes_read]
+ return data
+
+ def recv_into(self, buffer, nbytes=None):
+ # Read short on EOF.
+ if self._closed:
+ return 0
+
+ if nbytes is None:
+ nbytes = len(buffer)
+
+ buffer = (ctypes.c_char * nbytes).from_buffer(buffer)
+ processed_bytes = ctypes.c_size_t(0)
+
+ with self._raise_on_error():
+ result = Security.SSLRead(
+ self.context, buffer, nbytes, ctypes.byref(processed_bytes)
+ )
+
+ # There are some result codes that we want to treat as "not always
+ # errors". Specifically, those are errSSLWouldBlock,
+ # errSSLClosedGraceful, and errSSLClosedNoNotify.
+ if result == SecurityConst.errSSLWouldBlock:
+ # If we didn't process any bytes, then this was just a time out.
+ # However, we can get errSSLWouldBlock in situations when we *did*
+ # read some data, and in those cases we should just read "short"
+ # and return.
+ if processed_bytes.value == 0:
+ # Timed out, no data read.
+ raise socket.timeout("recv timed out")
+ elif result in (
+ SecurityConst.errSSLClosedGraceful,
+ SecurityConst.errSSLClosedNoNotify,
+ ):
+ # The remote peer has closed this connection. We should do so as
+ # well. Note that we don't actually return here because in
+ # principle this could actually be fired along with return data.
+ # It's unlikely though.
+ self.close()
+ else:
+ _assert_no_error(result)
+
+ # Ok, we read and probably succeeded. We should return whatever data
+ # was actually read.
+ return processed_bytes.value
+
+ def settimeout(self, timeout):
+ self._timeout = timeout
+
+ def gettimeout(self):
+ return self._timeout
+
+ def send(self, data):
+ processed_bytes = ctypes.c_size_t(0)
+
+ with self._raise_on_error():
+ result = Security.SSLWrite(
+ self.context, data, len(data), ctypes.byref(processed_bytes)
+ )
+
+ if result == SecurityConst.errSSLWouldBlock and processed_bytes.value == 0:
+ # Timed out
+ raise socket.timeout("send timed out")
+ else:
+ _assert_no_error(result)
+
+ # We sent, and probably succeeded. Tell them how much we sent.
+ return processed_bytes.value
+
+ def sendall(self, data):
+ total_sent = 0
+ while total_sent < len(data):
+ sent = self.send(data[total_sent : total_sent + SSL_WRITE_BLOCKSIZE])
+ total_sent += sent
+
+ def shutdown(self):
+ with self._raise_on_error():
+ Security.SSLClose(self.context)
+
+ def close(self):
+ # TODO: should I do clean shutdown here? Do I have to?
+ if self._makefile_refs < 1:
+ self._closed = True
+ if self.context:
+ CoreFoundation.CFRelease(self.context)
+ self.context = None
+ if self._client_cert_chain:
+ CoreFoundation.CFRelease(self._client_cert_chain)
+ self._client_cert_chain = None
+ if self._keychain:
+ Security.SecKeychainDelete(self._keychain)
+ CoreFoundation.CFRelease(self._keychain)
+ shutil.rmtree(self._keychain_dir)
+ self._keychain = self._keychain_dir = None
+ return self.socket.close()
+ else:
+ self._makefile_refs -= 1
+
+ def getpeercert(self, binary_form=False):
+ # Urgh, annoying.
+ #
+ # Here's how we do this:
+ #
+ # 1. Call SSLCopyPeerTrust to get hold of the trust object for this
+ # connection.
+ # 2. Call SecTrustGetCertificateAtIndex for index 0 to get the leaf.
+ # 3. To get the CN, call SecCertificateCopyCommonName and process that
+ # string so that it's of the appropriate type.
+ # 4. To get the SAN, we need to do something a bit more complex:
+ # a. Call SecCertificateCopyValues to get the data, requesting
+ # kSecOIDSubjectAltName.
+ # b. Mess about with this dictionary to try to get the SANs out.
+ #
+ # This is gross. Really gross. It's going to be a few hundred LoC extra
+ # just to repeat something that SecureTransport can *already do*. So my
+ # operating assumption at this time is that what we want to do is
+ # instead to just flag to urllib3 that it shouldn't do its own hostname
+ # validation when using SecureTransport.
+ if not binary_form:
+ raise ValueError("SecureTransport only supports dumping binary certs")
+ trust = Security.SecTrustRef()
+ certdata = None
+ der_bytes = None
+
+ try:
+ # Grab the trust store.
+ result = Security.SSLCopyPeerTrust(self.context, ctypes.byref(trust))
+ _assert_no_error(result)
+ if not trust:
+ # Probably we haven't done the handshake yet. No biggie.
+ return None
+
+ cert_count = Security.SecTrustGetCertificateCount(trust)
+ if not cert_count:
+ # Also a case that might happen if we haven't handshaked.
+ # Handshook? Handshaken?
+ return None
+
+ leaf = Security.SecTrustGetCertificateAtIndex(trust, 0)
+ assert leaf
+
+ # Ok, now we want the DER bytes.
+ certdata = Security.SecCertificateCopyData(leaf)
+ assert certdata
+
+ data_length = CoreFoundation.CFDataGetLength(certdata)
+ data_buffer = CoreFoundation.CFDataGetBytePtr(certdata)
+ der_bytes = ctypes.string_at(data_buffer, data_length)
+ finally:
+ if certdata:
+ CoreFoundation.CFRelease(certdata)
+ if trust:
+ CoreFoundation.CFRelease(trust)
+
+ return der_bytes
+
+ def version(self):
+ protocol = Security.SSLProtocol()
+ result = Security.SSLGetNegotiatedProtocolVersion(
+ self.context, ctypes.byref(protocol)
+ )
+ _assert_no_error(result)
+ if protocol.value == SecurityConst.kTLSProtocol13:
+ raise ssl.SSLError("SecureTransport does not support TLS 1.3")
+ elif protocol.value == SecurityConst.kTLSProtocol12:
+ return "TLSv1.2"
+ elif protocol.value == SecurityConst.kTLSProtocol11:
+ return "TLSv1.1"
+ elif protocol.value == SecurityConst.kTLSProtocol1:
+ return "TLSv1"
+ elif protocol.value == SecurityConst.kSSLProtocol3:
+ return "SSLv3"
+ elif protocol.value == SecurityConst.kSSLProtocol2:
+ return "SSLv2"
+ else:
+ raise ssl.SSLError("Unknown TLS version: %r" % protocol)
+
+ def _reuse(self):
+ self._makefile_refs += 1
+
+ def _drop(self):
+ if self._makefile_refs < 1:
+ self.close()
+ else:
+ self._makefile_refs -= 1
+
+
+if _fileobject: # Platform-specific: Python 2
+
+ def makefile(self, mode, bufsize=-1):
+ self._makefile_refs += 1
+ return _fileobject(self, mode, bufsize, close=True)
+
+
+else: # Platform-specific: Python 3
+
+ def makefile(self, mode="r", buffering=None, *args, **kwargs):
+ # We disable buffering with SecureTransport because it conflicts with
+ # the buffering that ST does internally (see issue #1153 for more).
+ buffering = 0
+ return backport_makefile(self, mode, buffering, *args, **kwargs)
+
+
+WrappedSocket.makefile = makefile
+
+
+class SecureTransportContext(object):
+ """
+ I am a wrapper class for the SecureTransport library, to translate the
+ interface of the standard library ``SSLContext`` object to calls into
+ SecureTransport.
+ """
+
+ def __init__(self, protocol):
+ self._min_version, self._max_version = _protocol_to_min_max[protocol]
+ self._options = 0
+ self._verify = False
+ self._trust_bundle = None
+ self._client_cert = None
+ self._client_key = None
+ self._client_key_passphrase = None
+ self._alpn_protocols = None
+
+ @property
+ def check_hostname(self):
+ """
+ SecureTransport cannot have its hostname checking disabled. For more,
+ see the comment on getpeercert() in this file.
+ """
+ return True
+
+ @check_hostname.setter
+ def check_hostname(self, value):
+ """
+ SecureTransport cannot have its hostname checking disabled. For more,
+ see the comment on getpeercert() in this file.
+ """
+ pass
+
+ @property
+ def options(self):
+ # TODO: Well, crap.
+ #
+ # So this is the bit of the code that is the most likely to cause us
+ # trouble. Essentially we need to enumerate all of the SSL options that
+ # users might want to use and try to see if we can sensibly translate
+ # them, or whether we should just ignore them.
+ return self._options
+
+ @options.setter
+ def options(self, value):
+ # TODO: Update in line with above.
+ self._options = value
+
+ @property
+ def verify_mode(self):
+ return ssl.CERT_REQUIRED if self._verify else ssl.CERT_NONE
+
+ @verify_mode.setter
+ def verify_mode(self, value):
+ self._verify = True if value == ssl.CERT_REQUIRED else False
+
+ def set_default_verify_paths(self):
+ # So, this has to do something a bit weird. Specifically, what it does
+ # is nothing.
+ #
+ # This means that, if we had previously had load_verify_locations
+ # called, this does not undo that. We need to do that because it turns
+ # out that the rest of the urllib3 code will attempt to load the
+ # default verify paths if it hasn't been told about any paths, even if
+ # the context itself was sometime earlier. We resolve that by just
+ # ignoring it.
+ pass
+
+ def load_default_certs(self):
+ return self.set_default_verify_paths()
+
+ def set_ciphers(self, ciphers):
+ # For now, we just require the default cipher string.
+ if ciphers != util.ssl_.DEFAULT_CIPHERS:
+ raise ValueError("SecureTransport doesn't support custom cipher strings")
+
+ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+ # OK, we only really support cadata and cafile.
+ if capath is not None:
+ raise ValueError("SecureTransport does not support cert directories")
+
+ # Raise if cafile does not exist.
+ if cafile is not None:
+ with open(cafile):
+ pass
+
+ self._trust_bundle = cafile or cadata
+
+ def load_cert_chain(self, certfile, keyfile=None, password=None):
+ self._client_cert = certfile
+ self._client_key = keyfile
+ self._client_cert_passphrase = password
+
+ def set_alpn_protocols(self, protocols):
+ """
+ Sets the ALPN protocols that will later be set on the context.
+
+ Raises a NotImplementedError if ALPN is not supported.
+ """
+ if not hasattr(Security, "SSLSetALPNProtocols"):
+ raise NotImplementedError(
+ "SecureTransport supports ALPN only in macOS 10.12+"
+ )
+ self._alpn_protocols = [six.ensure_binary(p) for p in protocols]
+
+ def wrap_socket(
+ self,
+ sock,
+ server_side=False,
+ do_handshake_on_connect=True,
+ suppress_ragged_eofs=True,
+ server_hostname=None,
+ ):
+ # So, what do we do here? Firstly, we assert some properties. This is a
+ # stripped down shim, so there is some functionality we don't support.
+ # See PEP 543 for the real deal.
+ assert not server_side
+ assert do_handshake_on_connect
+ assert suppress_ragged_eofs
+
+ # Ok, we're good to go. Now we want to create the wrapped socket object
+ # and store it in the appropriate place.
+ wrapped_socket = WrappedSocket(sock)
+
+ # Now we can handshake
+ wrapped_socket.handshake(
+ server_hostname,
+ self._verify,
+ self._trust_bundle,
+ self._min_version,
+ self._max_version,
+ self._client_cert,
+ self._client_key,
+ self._client_key_passphrase,
+ self._alpn_protocols,
+ )
+ return wrapped_socket
diff --git a/third_party/python/urllib3/urllib3/contrib/socks.py b/third_party/python/urllib3/urllib3/contrib/socks.py
new file mode 100644
index 0000000000..93df8325d5
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/contrib/socks.py
@@ -0,0 +1,216 @@
+# -*- coding: utf-8 -*-
+"""
+This module contains provisional support for SOCKS proxies from within
+urllib3. This module supports SOCKS4, SOCKS4A (an extension of SOCKS4), and
+SOCKS5. To enable its functionality, either install PySocks or install this
+module with the ``socks`` extra.
+
+The SOCKS implementation supports the full range of urllib3 features. It also
+supports the following SOCKS features:
+
+- SOCKS4A (``proxy_url='socks4a://...``)
+- SOCKS4 (``proxy_url='socks4://...``)
+- SOCKS5 with remote DNS (``proxy_url='socks5h://...``)
+- SOCKS5 with local DNS (``proxy_url='socks5://...``)
+- Usernames and passwords for the SOCKS proxy
+
+.. note::
+ It is recommended to use ``socks5h://`` or ``socks4a://`` schemes in
+ your ``proxy_url`` to ensure that DNS resolution is done from the remote
+ server instead of client-side when connecting to a domain name.
+
+SOCKS4 supports IPv4 and domain names with the SOCKS4A extension. SOCKS5
+supports IPv4, IPv6, and domain names.
+
+When connecting to a SOCKS4 proxy the ``username`` portion of the ``proxy_url``
+will be sent as the ``userid`` section of the SOCKS request:
+
+.. code-block:: python
+
+ proxy_url="socks4a://<userid>@proxy-host"
+
+When connecting to a SOCKS5 proxy the ``username`` and ``password`` portion
+of the ``proxy_url`` will be sent as the username/password to authenticate
+with the proxy:
+
+.. code-block:: python
+
+ proxy_url="socks5h://<username>:<password>@proxy-host"
+
+"""
+from __future__ import absolute_import
+
+try:
+ import socks
+except ImportError:
+ import warnings
+
+ from ..exceptions import DependencyWarning
+
+ warnings.warn(
+ (
+ "SOCKS support in urllib3 requires the installation of optional "
+ "dependencies: specifically, PySocks. For more information, see "
+ "https://urllib3.readthedocs.io/en/latest/contrib.html#socks-proxies"
+ ),
+ DependencyWarning,
+ )
+ raise
+
+from socket import error as SocketError
+from socket import timeout as SocketTimeout
+
+from ..connection import HTTPConnection, HTTPSConnection
+from ..connectionpool import HTTPConnectionPool, HTTPSConnectionPool
+from ..exceptions import ConnectTimeoutError, NewConnectionError
+from ..poolmanager import PoolManager
+from ..util.url import parse_url
+
+try:
+ import ssl
+except ImportError:
+ ssl = None
+
+
+class SOCKSConnection(HTTPConnection):
+ """
+ A plain-text HTTP connection that connects via a SOCKS proxy.
+ """
+
+ def __init__(self, *args, **kwargs):
+ self._socks_options = kwargs.pop("_socks_options")
+ super(SOCKSConnection, self).__init__(*args, **kwargs)
+
+ def _new_conn(self):
+ """
+ Establish a new connection via the SOCKS proxy.
+ """
+ extra_kw = {}
+ if self.source_address:
+ extra_kw["source_address"] = self.source_address
+
+ if self.socket_options:
+ extra_kw["socket_options"] = self.socket_options
+
+ try:
+ conn = socks.create_connection(
+ (self.host, self.port),
+ proxy_type=self._socks_options["socks_version"],
+ proxy_addr=self._socks_options["proxy_host"],
+ proxy_port=self._socks_options["proxy_port"],
+ proxy_username=self._socks_options["username"],
+ proxy_password=self._socks_options["password"],
+ proxy_rdns=self._socks_options["rdns"],
+ timeout=self.timeout,
+ **extra_kw
+ )
+
+ except SocketTimeout:
+ raise ConnectTimeoutError(
+ self,
+ "Connection to %s timed out. (connect timeout=%s)"
+ % (self.host, self.timeout),
+ )
+
+ except socks.ProxyError as e:
+ # This is fragile as hell, but it seems to be the only way to raise
+ # useful errors here.
+ if e.socket_err:
+ error = e.socket_err
+ if isinstance(error, SocketTimeout):
+ raise ConnectTimeoutError(
+ self,
+ "Connection to %s timed out. (connect timeout=%s)"
+ % (self.host, self.timeout),
+ )
+ else:
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % error
+ )
+ else:
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % e
+ )
+
+ except SocketError as e: # Defensive: PySocks should catch all these.
+ raise NewConnectionError(
+ self, "Failed to establish a new connection: %s" % e
+ )
+
+ return conn
+
+
+# We don't need to duplicate the Verified/Unverified distinction from
+# urllib3/connection.py here because the HTTPSConnection will already have been
+# correctly set to either the Verified or Unverified form by that module. This
+# means the SOCKSHTTPSConnection will automatically be the correct type.
+class SOCKSHTTPSConnection(SOCKSConnection, HTTPSConnection):
+ pass
+
+
+class SOCKSHTTPConnectionPool(HTTPConnectionPool):
+ ConnectionCls = SOCKSConnection
+
+
+class SOCKSHTTPSConnectionPool(HTTPSConnectionPool):
+ ConnectionCls = SOCKSHTTPSConnection
+
+
+class SOCKSProxyManager(PoolManager):
+ """
+ A version of the urllib3 ProxyManager that routes connections via the
+ defined SOCKS proxy.
+ """
+
+ pool_classes_by_scheme = {
+ "http": SOCKSHTTPConnectionPool,
+ "https": SOCKSHTTPSConnectionPool,
+ }
+
+ def __init__(
+ self,
+ proxy_url,
+ username=None,
+ password=None,
+ num_pools=10,
+ headers=None,
+ **connection_pool_kw
+ ):
+ parsed = parse_url(proxy_url)
+
+ if username is None and password is None and parsed.auth is not None:
+ split = parsed.auth.split(":")
+ if len(split) == 2:
+ username, password = split
+ if parsed.scheme == "socks5":
+ socks_version = socks.PROXY_TYPE_SOCKS5
+ rdns = False
+ elif parsed.scheme == "socks5h":
+ socks_version = socks.PROXY_TYPE_SOCKS5
+ rdns = True
+ elif parsed.scheme == "socks4":
+ socks_version = socks.PROXY_TYPE_SOCKS4
+ rdns = False
+ elif parsed.scheme == "socks4a":
+ socks_version = socks.PROXY_TYPE_SOCKS4
+ rdns = True
+ else:
+ raise ValueError("Unable to determine SOCKS version from %s" % proxy_url)
+
+ self.proxy_url = proxy_url
+
+ socks_options = {
+ "socks_version": socks_version,
+ "proxy_host": parsed.host,
+ "proxy_port": parsed.port,
+ "username": username,
+ "password": password,
+ "rdns": rdns,
+ }
+ connection_pool_kw["_socks_options"] = socks_options
+
+ super(SOCKSProxyManager, self).__init__(
+ num_pools, headers, **connection_pool_kw
+ )
+
+ self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
diff --git a/third_party/python/urllib3/urllib3/exceptions.py b/third_party/python/urllib3/urllib3/exceptions.py
new file mode 100644
index 0000000000..d69958d5df
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/exceptions.py
@@ -0,0 +1,313 @@
+from __future__ import absolute_import
+
+from .packages.six.moves.http_client import IncompleteRead as httplib_IncompleteRead
+
+# Base Exceptions
+
+
+class HTTPError(Exception):
+ """Base exception used by this module."""
+
+ pass
+
+
+class HTTPWarning(Warning):
+ """Base warning used by this module."""
+
+ pass
+
+
+class PoolError(HTTPError):
+ """Base exception for errors caused within a pool."""
+
+ def __init__(self, pool, message):
+ self.pool = pool
+ HTTPError.__init__(self, "%s: %s" % (pool, message))
+
+ def __reduce__(self):
+ # For pickling purposes.
+ return self.__class__, (None, None)
+
+
+class RequestError(PoolError):
+ """Base exception for PoolErrors that have associated URLs."""
+
+ def __init__(self, pool, url, message):
+ self.url = url
+ PoolError.__init__(self, pool, message)
+
+ def __reduce__(self):
+ # For pickling purposes.
+ return self.__class__, (None, self.url, None)
+
+
+class SSLError(HTTPError):
+ """Raised when SSL certificate fails in an HTTPS connection."""
+
+ pass
+
+
+class ProxyError(HTTPError):
+ """Raised when the connection to a proxy fails."""
+
+ def __init__(self, message, error, *args):
+ super(ProxyError, self).__init__(message, error, *args)
+ self.original_error = error
+
+
+class DecodeError(HTTPError):
+ """Raised when automatic decoding based on Content-Type fails."""
+
+ pass
+
+
+class ProtocolError(HTTPError):
+ """Raised when something unexpected happens mid-request/response."""
+
+ pass
+
+
+#: Renamed to ProtocolError but aliased for backwards compatibility.
+ConnectionError = ProtocolError
+
+
+# Leaf Exceptions
+
+
+class MaxRetryError(RequestError):
+ """Raised when the maximum number of retries is exceeded.
+
+ :param pool: The connection pool
+ :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
+ :param string url: The requested Url
+ :param exceptions.Exception reason: The underlying error
+
+ """
+
+ def __init__(self, pool, url, reason=None):
+ self.reason = reason
+
+ message = "Max retries exceeded with url: %s (Caused by %r)" % (url, reason)
+
+ RequestError.__init__(self, pool, url, message)
+
+
+class HostChangedError(RequestError):
+ """Raised when an existing pool gets a request for a foreign host."""
+
+ def __init__(self, pool, url, retries=3):
+ message = "Tried to open a foreign host with url: %s" % url
+ RequestError.__init__(self, pool, url, message)
+ self.retries = retries
+
+
+class TimeoutStateError(HTTPError):
+ """Raised when passing an invalid state to a timeout"""
+
+ pass
+
+
+class TimeoutError(HTTPError):
+ """Raised when a socket timeout error occurs.
+
+ Catching this error will catch both :exc:`ReadTimeoutErrors
+ <ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
+ """
+
+ pass
+
+
+class ReadTimeoutError(TimeoutError, RequestError):
+ """Raised when a socket timeout occurs while receiving data from a server"""
+
+ pass
+
+
+# This timeout error does not have a URL attached and needs to inherit from the
+# base HTTPError
+class ConnectTimeoutError(TimeoutError):
+ """Raised when a socket timeout occurs while connecting to a server"""
+
+ pass
+
+
+class NewConnectionError(ConnectTimeoutError, PoolError):
+ """Raised when we fail to establish a new connection. Usually ECONNREFUSED."""
+
+ pass
+
+
+class EmptyPoolError(PoolError):
+ """Raised when a pool runs out of connections and no more are allowed."""
+
+ pass
+
+
+class ClosedPoolError(PoolError):
+ """Raised when a request enters a pool after the pool has been closed."""
+
+ pass
+
+
+class LocationValueError(ValueError, HTTPError):
+ """Raised when there is something wrong with a given URL input."""
+
+ pass
+
+
+class LocationParseError(LocationValueError):
+ """Raised when get_host or similar fails to parse the URL input."""
+
+ def __init__(self, location):
+ message = "Failed to parse: %s" % location
+ HTTPError.__init__(self, message)
+
+ self.location = location
+
+
+class URLSchemeUnknown(LocationValueError):
+ """Raised when a URL input has an unsupported scheme."""
+
+ def __init__(self, scheme):
+ message = "Not supported URL scheme %s" % scheme
+ super(URLSchemeUnknown, self).__init__(message)
+
+ self.scheme = scheme
+
+
+class ResponseError(HTTPError):
+ """Used as a container for an error reason supplied in a MaxRetryError."""
+
+ GENERIC_ERROR = "too many error responses"
+ SPECIFIC_ERROR = "too many {status_code} error responses"
+
+
+class SecurityWarning(HTTPWarning):
+ """Warned when performing security reducing actions"""
+
+ pass
+
+
+class SubjectAltNameWarning(SecurityWarning):
+ """Warned when connecting to a host with a certificate missing a SAN."""
+
+ pass
+
+
+class InsecureRequestWarning(SecurityWarning):
+ """Warned when making an unverified HTTPS request."""
+
+ pass
+
+
+class SystemTimeWarning(SecurityWarning):
+ """Warned when system time is suspected to be wrong"""
+
+ pass
+
+
+class InsecurePlatformWarning(SecurityWarning):
+ """Warned when certain TLS/SSL configuration is not available on a platform."""
+
+ pass
+
+
+class SNIMissingWarning(HTTPWarning):
+ """Warned when making a HTTPS request without SNI available."""
+
+ pass
+
+
+class DependencyWarning(HTTPWarning):
+ """
+ Warned when an attempt is made to import a module with missing optional
+ dependencies.
+ """
+
+ pass
+
+
+class ResponseNotChunked(ProtocolError, ValueError):
+ """Response needs to be chunked in order to read it as chunks."""
+
+ pass
+
+
+class BodyNotHttplibCompatible(HTTPError):
+ """
+ Body should be :class:`http.client.HTTPResponse` like
+ (have an fp attribute which returns raw chunks) for read_chunked().
+ """
+
+ pass
+
+
+class IncompleteRead(HTTPError, httplib_IncompleteRead):
+ """
+ Response length doesn't match expected Content-Length
+
+ Subclass of :class:`http.client.IncompleteRead` to allow int value
+ for ``partial`` to avoid creating large objects on streamed reads.
+ """
+
+ def __init__(self, partial, expected):
+ super(IncompleteRead, self).__init__(partial, expected)
+
+ def __repr__(self):
+ return "IncompleteRead(%i bytes read, %i more expected)" % (
+ self.partial,
+ self.expected,
+ )
+
+
+class InvalidChunkLength(HTTPError, httplib_IncompleteRead):
+ """Invalid chunk length in a chunked response."""
+
+ def __init__(self, response, length):
+ super(InvalidChunkLength, self).__init__(
+ response.tell(), response.length_remaining
+ )
+ self.response = response
+ self.length = length
+
+ def __repr__(self):
+ return "InvalidChunkLength(got length %r, %i bytes read)" % (
+ self.length,
+ self.partial,
+ )
+
+
+class InvalidHeader(HTTPError):
+ """The header provided was somehow invalid."""
+
+ pass
+
+
+class ProxySchemeUnknown(AssertionError, URLSchemeUnknown):
+ """ProxyManager does not support the supplied scheme"""
+
+ # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
+
+ def __init__(self, scheme):
+ message = "Not supported proxy scheme %s" % scheme
+ super(ProxySchemeUnknown, self).__init__(message)
+
+
+class ProxySchemeUnsupported(ValueError):
+ """Fetching HTTPS resources through HTTPS proxies is unsupported"""
+
+ pass
+
+
+class HeaderParsingError(HTTPError):
+ """Raised by assert_header_parsing, but we convert it to a log.warning statement."""
+
+ def __init__(self, defects, unparsed_data):
+ message = "%s, unparsed data: %r" % (defects or "Unknown", unparsed_data)
+ super(HeaderParsingError, self).__init__(message)
+
+
+class UnrewindableBodyError(HTTPError):
+ """urllib3 encountered an error when trying to rewind a body"""
+
+ pass
diff --git a/third_party/python/urllib3/urllib3/fields.py b/third_party/python/urllib3/urllib3/fields.py
new file mode 100644
index 0000000000..9d630f491d
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/fields.py
@@ -0,0 +1,274 @@
+from __future__ import absolute_import
+
+import email.utils
+import mimetypes
+import re
+
+from .packages import six
+
+
+def guess_content_type(filename, default="application/octet-stream"):
+ """
+ Guess the "Content-Type" of a file.
+
+ :param filename:
+ The filename to guess the "Content-Type" of using :mod:`mimetypes`.
+ :param default:
+ If no "Content-Type" can be guessed, default to `default`.
+ """
+ if filename:
+ return mimetypes.guess_type(filename)[0] or default
+ return default
+
+
+def format_header_param_rfc2231(name, value):
+ """
+ Helper function to format and quote a single header parameter using the
+ strategy defined in RFC 2231.
+
+ Particularly useful for header parameters which might contain
+ non-ASCII values, like file names. This follows
+ `RFC 2388 Section 4.4 <https://tools.ietf.org/html/rfc2388#section-4.4>`_.
+
+ :param name:
+ The name of the parameter, a string expected to be ASCII only.
+ :param value:
+ The value of the parameter, provided as ``bytes`` or `str``.
+ :ret:
+ An RFC-2231-formatted unicode string.
+ """
+ if isinstance(value, six.binary_type):
+ value = value.decode("utf-8")
+
+ if not any(ch in value for ch in '"\\\r\n'):
+ result = u'%s="%s"' % (name, value)
+ try:
+ result.encode("ascii")
+ except (UnicodeEncodeError, UnicodeDecodeError):
+ pass
+ else:
+ return result
+
+ if six.PY2: # Python 2:
+ value = value.encode("utf-8")
+
+ # encode_rfc2231 accepts an encoded string and returns an ascii-encoded
+ # string in Python 2 but accepts and returns unicode strings in Python 3
+ value = email.utils.encode_rfc2231(value, "utf-8")
+ value = "%s*=%s" % (name, value)
+
+ if six.PY2: # Python 2:
+ value = value.decode("utf-8")
+
+ return value
+
+
+_HTML5_REPLACEMENTS = {
+ u"\u0022": u"%22",
+ # Replace "\" with "\\".
+ u"\u005C": u"\u005C\u005C",
+}
+
+# All control characters from 0x00 to 0x1F *except* 0x1B.
+_HTML5_REPLACEMENTS.update(
+ {
+ six.unichr(cc): u"%{:02X}".format(cc)
+ for cc in range(0x00, 0x1F + 1)
+ if cc not in (0x1B,)
+ }
+)
+
+
+def _replace_multiple(value, needles_and_replacements):
+ def replacer(match):
+ return needles_and_replacements[match.group(0)]
+
+ pattern = re.compile(
+ r"|".join([re.escape(needle) for needle in needles_and_replacements.keys()])
+ )
+
+ result = pattern.sub(replacer, value)
+
+ return result
+
+
+def format_header_param_html5(name, value):
+ """
+ Helper function to format and quote a single header parameter using the
+ HTML5 strategy.
+
+ Particularly useful for header parameters which might contain
+ non-ASCII values, like file names. This follows the `HTML5 Working Draft
+ Section 4.10.22.7`_ and matches the behavior of curl and modern browsers.
+
+ .. _HTML5 Working Draft Section 4.10.22.7:
+ https://w3c.github.io/html/sec-forms.html#multipart-form-data
+
+ :param name:
+ The name of the parameter, a string expected to be ASCII only.
+ :param value:
+ The value of the parameter, provided as ``bytes`` or `str``.
+ :ret:
+ A unicode string, stripped of troublesome characters.
+ """
+ if isinstance(value, six.binary_type):
+ value = value.decode("utf-8")
+
+ value = _replace_multiple(value, _HTML5_REPLACEMENTS)
+
+ return u'%s="%s"' % (name, value)
+
+
+# For backwards-compatibility.
+format_header_param = format_header_param_html5
+
+
+class RequestField(object):
+ """
+ A data container for request body parameters.
+
+ :param name:
+ The name of this request field. Must be unicode.
+ :param data:
+ The data/value body.
+ :param filename:
+ An optional filename of the request field. Must be unicode.
+ :param headers:
+ An optional dict-like object of headers to initially use for the field.
+ :param header_formatter:
+ An optional callable that is used to encode and format the headers. By
+ default, this is :func:`format_header_param_html5`.
+ """
+
+ def __init__(
+ self,
+ name,
+ data,
+ filename=None,
+ headers=None,
+ header_formatter=format_header_param_html5,
+ ):
+ self._name = name
+ self._filename = filename
+ self.data = data
+ self.headers = {}
+ if headers:
+ self.headers = dict(headers)
+ self.header_formatter = header_formatter
+
+ @classmethod
+ def from_tuples(cls, fieldname, value, header_formatter=format_header_param_html5):
+ """
+ A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
+
+ Supports constructing :class:`~urllib3.fields.RequestField` from
+ parameter of key/value strings AND key/filetuple. A filetuple is a
+ (filename, data, MIME type) tuple where the MIME type is optional.
+ For example::
+
+ 'foo': 'bar',
+ 'fakefile': ('foofile.txt', 'contents of foofile'),
+ 'realfile': ('barfile.txt', open('realfile').read()),
+ 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
+ 'nonamefile': 'contents of nonamefile field',
+
+ Field names and filenames must be unicode.
+ """
+ if isinstance(value, tuple):
+ if len(value) == 3:
+ filename, data, content_type = value
+ else:
+ filename, data = value
+ content_type = guess_content_type(filename)
+ else:
+ filename = None
+ content_type = None
+ data = value
+
+ request_param = cls(
+ fieldname, data, filename=filename, header_formatter=header_formatter
+ )
+ request_param.make_multipart(content_type=content_type)
+
+ return request_param
+
+ def _render_part(self, name, value):
+ """
+ Overridable helper function to format a single header parameter. By
+ default, this calls ``self.header_formatter``.
+
+ :param name:
+ The name of the parameter, a string expected to be ASCII only.
+ :param value:
+ The value of the parameter, provided as a unicode string.
+ """
+
+ return self.header_formatter(name, value)
+
+ def _render_parts(self, header_parts):
+ """
+ Helper function to format and quote a single header.
+
+ Useful for single headers that are composed of multiple items. E.g.,
+ 'Content-Disposition' fields.
+
+ :param header_parts:
+ A sequence of (k, v) tuples or a :class:`dict` of (k, v) to format
+ as `k1="v1"; k2="v2"; ...`.
+ """
+ parts = []
+ iterable = header_parts
+ if isinstance(header_parts, dict):
+ iterable = header_parts.items()
+
+ for name, value in iterable:
+ if value is not None:
+ parts.append(self._render_part(name, value))
+
+ return u"; ".join(parts)
+
+ def render_headers(self):
+ """
+ Renders the headers for this request field.
+ """
+ lines = []
+
+ sort_keys = ["Content-Disposition", "Content-Type", "Content-Location"]
+ for sort_key in sort_keys:
+ if self.headers.get(sort_key, False):
+ lines.append(u"%s: %s" % (sort_key, self.headers[sort_key]))
+
+ for header_name, header_value in self.headers.items():
+ if header_name not in sort_keys:
+ if header_value:
+ lines.append(u"%s: %s" % (header_name, header_value))
+
+ lines.append(u"\r\n")
+ return u"\r\n".join(lines)
+
+ def make_multipart(
+ self, content_disposition=None, content_type=None, content_location=None
+ ):
+ """
+ Makes this request field into a multipart request field.
+
+ This method overrides "Content-Disposition", "Content-Type" and
+ "Content-Location" headers to the request parameter.
+
+ :param content_type:
+ The 'Content-Type' of the request body.
+ :param content_location:
+ The 'Content-Location' of the request body.
+
+ """
+ self.headers["Content-Disposition"] = content_disposition or u"form-data"
+ self.headers["Content-Disposition"] += u"; ".join(
+ [
+ u"",
+ self._render_parts(
+ ((u"name", self._name), (u"filename", self._filename))
+ ),
+ ]
+ )
+ self.headers["Content-Type"] = content_type
+ self.headers["Content-Location"] = content_location
diff --git a/third_party/python/urllib3/urllib3/filepost.py b/third_party/python/urllib3/urllib3/filepost.py
new file mode 100644
index 0000000000..36c9252c64
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/filepost.py
@@ -0,0 +1,98 @@
+from __future__ import absolute_import
+
+import binascii
+import codecs
+import os
+from io import BytesIO
+
+from .fields import RequestField
+from .packages import six
+from .packages.six import b
+
+writer = codecs.lookup("utf-8")[3]
+
+
+def choose_boundary():
+ """
+ Our embarrassingly-simple replacement for mimetools.choose_boundary.
+ """
+ boundary = binascii.hexlify(os.urandom(16))
+ if not six.PY2:
+ boundary = boundary.decode("ascii")
+ return boundary
+
+
+def iter_field_objects(fields):
+ """
+ Iterate over fields.
+
+ Supports list of (k, v) tuples and dicts, and lists of
+ :class:`~urllib3.fields.RequestField`.
+
+ """
+ if isinstance(fields, dict):
+ i = six.iteritems(fields)
+ else:
+ i = iter(fields)
+
+ for field in i:
+ if isinstance(field, RequestField):
+ yield field
+ else:
+ yield RequestField.from_tuples(*field)
+
+
+def iter_fields(fields):
+ """
+ .. deprecated:: 1.6
+
+ Iterate over fields.
+
+ The addition of :class:`~urllib3.fields.RequestField` makes this function
+ obsolete. Instead, use :func:`iter_field_objects`, which returns
+ :class:`~urllib3.fields.RequestField` objects.
+
+ Supports list of (k, v) tuples and dicts.
+ """
+ if isinstance(fields, dict):
+ return ((k, v) for k, v in six.iteritems(fields))
+
+ return ((k, v) for k, v in fields)
+
+
+def encode_multipart_formdata(fields, boundary=None):
+ """
+ Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
+
+ :param fields:
+ Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
+
+ :param boundary:
+ If not specified, then a random boundary will be generated using
+ :func:`urllib3.filepost.choose_boundary`.
+ """
+ body = BytesIO()
+ if boundary is None:
+ boundary = choose_boundary()
+
+ for field in iter_field_objects(fields):
+ body.write(b("--%s\r\n" % (boundary)))
+
+ writer(body).write(field.render_headers())
+ data = field.data
+
+ if isinstance(data, int):
+ data = str(data) # Backwards compatibility
+
+ if isinstance(data, six.text_type):
+ writer(body).write(data)
+ else:
+ body.write(data)
+
+ body.write(b"\r\n")
+
+ body.write(b("--%s--\r\n" % (boundary)))
+
+ content_type = str("multipart/form-data; boundary=%s" % boundary)
+
+ return body.getvalue(), content_type
diff --git a/third_party/python/urllib3/urllib3/packages/__init__.py b/third_party/python/urllib3/urllib3/packages/__init__.py
new file mode 100644
index 0000000000..fce4caa65d
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/packages/__init__.py
@@ -0,0 +1,5 @@
+from __future__ import absolute_import
+
+from . import ssl_match_hostname
+
+__all__ = ("ssl_match_hostname",)
diff --git a/third_party/python/urllib3/urllib3/packages/backports/__init__.py b/third_party/python/urllib3/urllib3/packages/backports/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/packages/backports/__init__.py
diff --git a/third_party/python/urllib3/urllib3/packages/backports/makefile.py b/third_party/python/urllib3/urllib3/packages/backports/makefile.py
new file mode 100644
index 0000000000..b8fb2154b6
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/packages/backports/makefile.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+"""
+backports.makefile
+~~~~~~~~~~~~~~~~~~
+
+Backports the Python 3 ``socket.makefile`` method for use with anything that
+wants to create a "fake" socket object.
+"""
+import io
+from socket import SocketIO
+
+
+def backport_makefile(
+ self, mode="r", buffering=None, encoding=None, errors=None, newline=None
+):
+ """
+ Backport of ``socket.makefile`` from Python 3.5.
+ """
+ if not set(mode) <= {"r", "w", "b"}:
+ raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
+ writing = "w" in mode
+ reading = "r" in mode or not writing
+ assert reading or writing
+ binary = "b" in mode
+ rawmode = ""
+ if reading:
+ rawmode += "r"
+ if writing:
+ rawmode += "w"
+ raw = SocketIO(self, rawmode)
+ self._makefile_refs += 1
+ if buffering is None:
+ buffering = -1
+ if buffering < 0:
+ buffering = io.DEFAULT_BUFFER_SIZE
+ if buffering == 0:
+ if not binary:
+ raise ValueError("unbuffered streams must be binary")
+ return raw
+ if reading and writing:
+ buffer = io.BufferedRWPair(raw, raw, buffering)
+ elif reading:
+ buffer = io.BufferedReader(raw, buffering)
+ else:
+ assert writing
+ buffer = io.BufferedWriter(raw, buffering)
+ if binary:
+ return buffer
+ text = io.TextIOWrapper(buffer, encoding, errors, newline)
+ text.mode = mode
+ return text
diff --git a/third_party/python/urllib3/urllib3/packages/six.py b/third_party/python/urllib3/urllib3/packages/six.py
new file mode 100644
index 0000000000..314424099f
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/packages/six.py
@@ -0,0 +1,1021 @@
+# Copyright (c) 2010-2019 Benjamin Peterson
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+"""Utilities for writing code that runs on Python 2 and 3"""
+
+from __future__ import absolute_import
+
+import functools
+import itertools
+import operator
+import sys
+import types
+
+__author__ = "Benjamin Peterson <benjamin@python.org>"
+__version__ = "1.12.0"
+
+
+# Useful for very coarse version differentiation.
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+PY34 = sys.version_info[0:2] >= (3, 4)
+
+if PY3:
+ string_types = (str,)
+ integer_types = (int,)
+ class_types = (type,)
+ text_type = str
+ binary_type = bytes
+
+ MAXSIZE = sys.maxsize
+else:
+ string_types = (basestring,)
+ integer_types = (int, long)
+ class_types = (type, types.ClassType)
+ text_type = unicode
+ binary_type = str
+
+ if sys.platform.startswith("java"):
+ # Jython always uses 32 bits.
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # It's possible to have sizeof(long) != sizeof(Py_ssize_t).
+ class X(object):
+ def __len__(self):
+ return 1 << 31
+
+ try:
+ len(X())
+ except OverflowError:
+ # 32-bit
+ MAXSIZE = int((1 << 31) - 1)
+ else:
+ # 64-bit
+ MAXSIZE = int((1 << 63) - 1)
+ del X
+
+
+def _add_doc(func, doc):
+ """Add documentation to a function."""
+ func.__doc__ = doc
+
+
+def _import_module(name):
+ """Import module, returning the module after the last dot."""
+ __import__(name)
+ return sys.modules[name]
+
+
+class _LazyDescr(object):
+ def __init__(self, name):
+ self.name = name
+
+ def __get__(self, obj, tp):
+ result = self._resolve()
+ setattr(obj, self.name, result) # Invokes __set__.
+ try:
+ # This is a bit ugly, but it avoids running this again by
+ # removing this descriptor.
+ delattr(obj.__class__, self.name)
+ except AttributeError:
+ pass
+ return result
+
+
+class MovedModule(_LazyDescr):
+ def __init__(self, name, old, new=None):
+ super(MovedModule, self).__init__(name)
+ if PY3:
+ if new is None:
+ new = name
+ self.mod = new
+ else:
+ self.mod = old
+
+ def _resolve(self):
+ return _import_module(self.mod)
+
+ def __getattr__(self, attr):
+ _module = self._resolve()
+ value = getattr(_module, attr)
+ setattr(self, attr, value)
+ return value
+
+
+class _LazyModule(types.ModuleType):
+ def __init__(self, name):
+ super(_LazyModule, self).__init__(name)
+ self.__doc__ = self.__class__.__doc__
+
+ def __dir__(self):
+ attrs = ["__doc__", "__name__"]
+ attrs += [attr.name for attr in self._moved_attributes]
+ return attrs
+
+ # Subclasses should override this
+ _moved_attributes = []
+
+
+class MovedAttribute(_LazyDescr):
+ def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
+ super(MovedAttribute, self).__init__(name)
+ if PY3:
+ if new_mod is None:
+ new_mod = name
+ self.mod = new_mod
+ if new_attr is None:
+ if old_attr is None:
+ new_attr = name
+ else:
+ new_attr = old_attr
+ self.attr = new_attr
+ else:
+ self.mod = old_mod
+ if old_attr is None:
+ old_attr = name
+ self.attr = old_attr
+
+ def _resolve(self):
+ module = _import_module(self.mod)
+ return getattr(module, self.attr)
+
+
+class _SixMetaPathImporter(object):
+
+ """
+ A meta path importer to import six.moves and its submodules.
+
+ This class implements a PEP302 finder and loader. It should be compatible
+ with Python 2.5 and all existing versions of Python3
+ """
+
+ def __init__(self, six_module_name):
+ self.name = six_module_name
+ self.known_modules = {}
+
+ def _add_module(self, mod, *fullnames):
+ for fullname in fullnames:
+ self.known_modules[self.name + "." + fullname] = mod
+
+ def _get_module(self, fullname):
+ return self.known_modules[self.name + "." + fullname]
+
+ def find_module(self, fullname, path=None):
+ if fullname in self.known_modules:
+ return self
+ return None
+
+ def __get_module(self, fullname):
+ try:
+ return self.known_modules[fullname]
+ except KeyError:
+ raise ImportError("This loader does not know module " + fullname)
+
+ def load_module(self, fullname):
+ try:
+ # in case of a reload
+ return sys.modules[fullname]
+ except KeyError:
+ pass
+ mod = self.__get_module(fullname)
+ if isinstance(mod, MovedModule):
+ mod = mod._resolve()
+ else:
+ mod.__loader__ = self
+ sys.modules[fullname] = mod
+ return mod
+
+ def is_package(self, fullname):
+ """
+ Return true, if the named module is a package.
+
+ We need this method to get correct spec objects with
+ Python 3.4 (see PEP451)
+ """
+ return hasattr(self.__get_module(fullname), "__path__")
+
+ def get_code(self, fullname):
+ """Return None
+
+ Required, if is_package is implemented"""
+ self.__get_module(fullname) # eventually raises ImportError
+ return None
+
+ get_source = get_code # same as get_code
+
+
+_importer = _SixMetaPathImporter(__name__)
+
+
+class _MovedItems(_LazyModule):
+
+ """Lazy loading of moved objects"""
+
+ __path__ = [] # mark as package
+
+
+_moved_attributes = [
+ MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
+ MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
+ MovedAttribute(
+ "filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"
+ ),
+ MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
+ MovedAttribute("intern", "__builtin__", "sys"),
+ MovedAttribute("map", "itertools", "builtins", "imap", "map"),
+ MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
+ MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
+ MovedAttribute("getoutput", "commands", "subprocess"),
+ MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute(
+ "reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"
+ ),
+ MovedAttribute("reduce", "__builtin__", "functools"),
+ MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
+ MovedAttribute("StringIO", "StringIO", "io"),
+ MovedAttribute("UserDict", "UserDict", "collections"),
+ MovedAttribute("UserList", "UserList", "collections"),
+ MovedAttribute("UserString", "UserString", "collections"),
+ MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
+ MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
+ MovedAttribute(
+ "zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"
+ ),
+ MovedModule("builtins", "__builtin__"),
+ MovedModule("configparser", "ConfigParser"),
+ MovedModule("copyreg", "copy_reg"),
+ MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
+ MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"),
+ MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
+ MovedModule("http_cookies", "Cookie", "http.cookies"),
+ MovedModule("html_entities", "htmlentitydefs", "html.entities"),
+ MovedModule("html_parser", "HTMLParser", "html.parser"),
+ MovedModule("http_client", "httplib", "http.client"),
+ MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
+ MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
+ MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
+ MovedModule(
+ "email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"
+ ),
+ MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
+ MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
+ MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
+ MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
+ MovedModule("cPickle", "cPickle", "pickle"),
+ MovedModule("queue", "Queue"),
+ MovedModule("reprlib", "repr"),
+ MovedModule("socketserver", "SocketServer"),
+ MovedModule("_thread", "thread", "_thread"),
+ MovedModule("tkinter", "Tkinter"),
+ MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
+ MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
+ MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
+ MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
+ MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
+ MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
+ MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
+ MovedModule("tkinter_colorchooser", "tkColorChooser", "tkinter.colorchooser"),
+ MovedModule("tkinter_commondialog", "tkCommonDialog", "tkinter.commondialog"),
+ MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
+ MovedModule("tkinter_font", "tkFont", "tkinter.font"),
+ MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
+ MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", "tkinter.simpledialog"),
+ MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
+ MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
+ MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
+ MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
+ MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
+ MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
+]
+# Add windows specific modules.
+if sys.platform == "win32":
+ _moved_attributes += [MovedModule("winreg", "_winreg")]
+
+for attr in _moved_attributes:
+ setattr(_MovedItems, attr.name, attr)
+ if isinstance(attr, MovedModule):
+ _importer._add_module(attr, "moves." + attr.name)
+del attr
+
+_MovedItems._moved_attributes = _moved_attributes
+
+moves = _MovedItems(__name__ + ".moves")
+_importer._add_module(moves, "moves")
+
+
+class Module_six_moves_urllib_parse(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_parse"""
+
+
+_urllib_parse_moved_attributes = [
+ MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
+ MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
+ MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
+ MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
+ MovedAttribute("urljoin", "urlparse", "urllib.parse"),
+ MovedAttribute("urlparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
+ MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
+ MovedAttribute("quote", "urllib", "urllib.parse"),
+ MovedAttribute("quote_plus", "urllib", "urllib.parse"),
+ MovedAttribute("unquote", "urllib", "urllib.parse"),
+ MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
+ MovedAttribute(
+ "unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"
+ ),
+ MovedAttribute("urlencode", "urllib", "urllib.parse"),
+ MovedAttribute("splitquery", "urllib", "urllib.parse"),
+ MovedAttribute("splittag", "urllib", "urllib.parse"),
+ MovedAttribute("splituser", "urllib", "urllib.parse"),
+ MovedAttribute("splitvalue", "urllib", "urllib.parse"),
+ MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_params", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_query", "urlparse", "urllib.parse"),
+ MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
+]
+for attr in _urllib_parse_moved_attributes:
+ setattr(Module_six_moves_urllib_parse, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
+
+_importer._add_module(
+ Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
+ "moves.urllib_parse",
+ "moves.urllib.parse",
+)
+
+
+class Module_six_moves_urllib_error(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_error"""
+
+
+_urllib_error_moved_attributes = [
+ MovedAttribute("URLError", "urllib2", "urllib.error"),
+ MovedAttribute("HTTPError", "urllib2", "urllib.error"),
+ MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
+]
+for attr in _urllib_error_moved_attributes:
+ setattr(Module_six_moves_urllib_error, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
+
+_importer._add_module(
+ Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
+ "moves.urllib_error",
+ "moves.urllib.error",
+)
+
+
+class Module_six_moves_urllib_request(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_request"""
+
+
+_urllib_request_moved_attributes = [
+ MovedAttribute("urlopen", "urllib2", "urllib.request"),
+ MovedAttribute("install_opener", "urllib2", "urllib.request"),
+ MovedAttribute("build_opener", "urllib2", "urllib.request"),
+ MovedAttribute("pathname2url", "urllib", "urllib.request"),
+ MovedAttribute("url2pathname", "urllib", "urllib.request"),
+ MovedAttribute("getproxies", "urllib", "urllib.request"),
+ MovedAttribute("Request", "urllib2", "urllib.request"),
+ MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
+ MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FileHandler", "urllib2", "urllib.request"),
+ MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
+ MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
+ MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
+ MovedAttribute("urlretrieve", "urllib", "urllib.request"),
+ MovedAttribute("urlcleanup", "urllib", "urllib.request"),
+ MovedAttribute("URLopener", "urllib", "urllib.request"),
+ MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
+ MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
+ MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
+ MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
+]
+for attr in _urllib_request_moved_attributes:
+ setattr(Module_six_moves_urllib_request, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
+
+_importer._add_module(
+ Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
+ "moves.urllib_request",
+ "moves.urllib.request",
+)
+
+
+class Module_six_moves_urllib_response(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_response"""
+
+
+_urllib_response_moved_attributes = [
+ MovedAttribute("addbase", "urllib", "urllib.response"),
+ MovedAttribute("addclosehook", "urllib", "urllib.response"),
+ MovedAttribute("addinfo", "urllib", "urllib.response"),
+ MovedAttribute("addinfourl", "urllib", "urllib.response"),
+]
+for attr in _urllib_response_moved_attributes:
+ setattr(Module_six_moves_urllib_response, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
+
+_importer._add_module(
+ Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
+ "moves.urllib_response",
+ "moves.urllib.response",
+)
+
+
+class Module_six_moves_urllib_robotparser(_LazyModule):
+
+ """Lazy loading of moved objects in six.moves.urllib_robotparser"""
+
+
+_urllib_robotparser_moved_attributes = [
+ MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser")
+]
+for attr in _urllib_robotparser_moved_attributes:
+ setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
+del attr
+
+Module_six_moves_urllib_robotparser._moved_attributes = (
+ _urllib_robotparser_moved_attributes
+)
+
+_importer._add_module(
+ Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
+ "moves.urllib_robotparser",
+ "moves.urllib.robotparser",
+)
+
+
+class Module_six_moves_urllib(types.ModuleType):
+
+ """Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
+
+ __path__ = [] # mark as package
+ parse = _importer._get_module("moves.urllib_parse")
+ error = _importer._get_module("moves.urllib_error")
+ request = _importer._get_module("moves.urllib_request")
+ response = _importer._get_module("moves.urllib_response")
+ robotparser = _importer._get_module("moves.urllib_robotparser")
+
+ def __dir__(self):
+ return ["parse", "error", "request", "response", "robotparser"]
+
+
+_importer._add_module(
+ Module_six_moves_urllib(__name__ + ".moves.urllib"), "moves.urllib"
+)
+
+
+def add_move(move):
+ """Add an item to six.moves."""
+ setattr(_MovedItems, move.name, move)
+
+
+def remove_move(name):
+ """Remove item from six.moves."""
+ try:
+ delattr(_MovedItems, name)
+ except AttributeError:
+ try:
+ del moves.__dict__[name]
+ except KeyError:
+ raise AttributeError("no such move, %r" % (name,))
+
+
+if PY3:
+ _meth_func = "__func__"
+ _meth_self = "__self__"
+
+ _func_closure = "__closure__"
+ _func_code = "__code__"
+ _func_defaults = "__defaults__"
+ _func_globals = "__globals__"
+else:
+ _meth_func = "im_func"
+ _meth_self = "im_self"
+
+ _func_closure = "func_closure"
+ _func_code = "func_code"
+ _func_defaults = "func_defaults"
+ _func_globals = "func_globals"
+
+
+try:
+ advance_iterator = next
+except NameError:
+
+ def advance_iterator(it):
+ return it.next()
+
+
+next = advance_iterator
+
+
+try:
+ callable = callable
+except NameError:
+
+ def callable(obj):
+ return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
+
+
+if PY3:
+
+ def get_unbound_function(unbound):
+ return unbound
+
+ create_bound_method = types.MethodType
+
+ def create_unbound_method(func, cls):
+ return func
+
+ Iterator = object
+else:
+
+ def get_unbound_function(unbound):
+ return unbound.im_func
+
+ def create_bound_method(func, obj):
+ return types.MethodType(func, obj, obj.__class__)
+
+ def create_unbound_method(func, cls):
+ return types.MethodType(func, None, cls)
+
+ class Iterator(object):
+ def next(self):
+ return type(self).__next__(self)
+
+ callable = callable
+_add_doc(
+ get_unbound_function, """Get the function out of a possibly unbound function"""
+)
+
+
+get_method_function = operator.attrgetter(_meth_func)
+get_method_self = operator.attrgetter(_meth_self)
+get_function_closure = operator.attrgetter(_func_closure)
+get_function_code = operator.attrgetter(_func_code)
+get_function_defaults = operator.attrgetter(_func_defaults)
+get_function_globals = operator.attrgetter(_func_globals)
+
+
+if PY3:
+
+ def iterkeys(d, **kw):
+ return iter(d.keys(**kw))
+
+ def itervalues(d, **kw):
+ return iter(d.values(**kw))
+
+ def iteritems(d, **kw):
+ return iter(d.items(**kw))
+
+ def iterlists(d, **kw):
+ return iter(d.lists(**kw))
+
+ viewkeys = operator.methodcaller("keys")
+
+ viewvalues = operator.methodcaller("values")
+
+ viewitems = operator.methodcaller("items")
+else:
+
+ def iterkeys(d, **kw):
+ return d.iterkeys(**kw)
+
+ def itervalues(d, **kw):
+ return d.itervalues(**kw)
+
+ def iteritems(d, **kw):
+ return d.iteritems(**kw)
+
+ def iterlists(d, **kw):
+ return d.iterlists(**kw)
+
+ viewkeys = operator.methodcaller("viewkeys")
+
+ viewvalues = operator.methodcaller("viewvalues")
+
+ viewitems = operator.methodcaller("viewitems")
+
+_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
+_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
+_add_doc(iteritems, "Return an iterator over the (key, value) pairs of a dictionary.")
+_add_doc(
+ iterlists, "Return an iterator over the (key, [values]) pairs of a dictionary."
+)
+
+
+if PY3:
+
+ def b(s):
+ return s.encode("latin-1")
+
+ def u(s):
+ return s
+
+ unichr = chr
+ import struct
+
+ int2byte = struct.Struct(">B").pack
+ del struct
+ byte2int = operator.itemgetter(0)
+ indexbytes = operator.getitem
+ iterbytes = iter
+ import io
+
+ StringIO = io.StringIO
+ BytesIO = io.BytesIO
+ del io
+ _assertCountEqual = "assertCountEqual"
+ if sys.version_info[1] <= 1:
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+ else:
+ _assertRaisesRegex = "assertRaisesRegex"
+ _assertRegex = "assertRegex"
+else:
+
+ def b(s):
+ return s
+
+ # Workaround for standalone backslash
+
+ def u(s):
+ return unicode(s.replace(r"\\", r"\\\\"), "unicode_escape")
+
+ unichr = unichr
+ int2byte = chr
+
+ def byte2int(bs):
+ return ord(bs[0])
+
+ def indexbytes(buf, i):
+ return ord(buf[i])
+
+ iterbytes = functools.partial(itertools.imap, ord)
+ import StringIO
+
+ StringIO = BytesIO = StringIO.StringIO
+ _assertCountEqual = "assertItemsEqual"
+ _assertRaisesRegex = "assertRaisesRegexp"
+ _assertRegex = "assertRegexpMatches"
+_add_doc(b, """Byte literal""")
+_add_doc(u, """Text literal""")
+
+
+def assertCountEqual(self, *args, **kwargs):
+ return getattr(self, _assertCountEqual)(*args, **kwargs)
+
+
+def assertRaisesRegex(self, *args, **kwargs):
+ return getattr(self, _assertRaisesRegex)(*args, **kwargs)
+
+
+def assertRegex(self, *args, **kwargs):
+ return getattr(self, _assertRegex)(*args, **kwargs)
+
+
+if PY3:
+ exec_ = getattr(moves.builtins, "exec")
+
+ def reraise(tp, value, tb=None):
+ try:
+ if value is None:
+ value = tp()
+ if value.__traceback__ is not tb:
+ raise value.with_traceback(tb)
+ raise value
+ finally:
+ value = None
+ tb = None
+
+
+else:
+
+ def exec_(_code_, _globs_=None, _locs_=None):
+ """Execute code in a namespace."""
+ if _globs_ is None:
+ frame = sys._getframe(1)
+ _globs_ = frame.f_globals
+ if _locs_ is None:
+ _locs_ = frame.f_locals
+ del frame
+ elif _locs_ is None:
+ _locs_ = _globs_
+ exec("""exec _code_ in _globs_, _locs_""")
+
+ exec_(
+ """def reraise(tp, value, tb=None):
+ try:
+ raise tp, value, tb
+ finally:
+ tb = None
+"""
+ )
+
+
+if sys.version_info[:2] == (3, 2):
+ exec_(
+ """def raise_from(value, from_value):
+ try:
+ if from_value is None:
+ raise value
+ raise value from from_value
+ finally:
+ value = None
+"""
+ )
+elif sys.version_info[:2] > (3, 2):
+ exec_(
+ """def raise_from(value, from_value):
+ try:
+ raise value from from_value
+ finally:
+ value = None
+"""
+ )
+else:
+
+ def raise_from(value, from_value):
+ raise value
+
+
+print_ = getattr(moves.builtins, "print", None)
+if print_ is None:
+
+ def print_(*args, **kwargs):
+ """The new-style print function for Python 2.4 and 2.5."""
+ fp = kwargs.pop("file", sys.stdout)
+ if fp is None:
+ return
+
+ def write(data):
+ if not isinstance(data, basestring):
+ data = str(data)
+ # If the file has an encoding, encode unicode with it.
+ if (
+ isinstance(fp, file)
+ and isinstance(data, unicode)
+ and fp.encoding is not None
+ ):
+ errors = getattr(fp, "errors", None)
+ if errors is None:
+ errors = "strict"
+ data = data.encode(fp.encoding, errors)
+ fp.write(data)
+
+ want_unicode = False
+ sep = kwargs.pop("sep", None)
+ if sep is not None:
+ if isinstance(sep, unicode):
+ want_unicode = True
+ elif not isinstance(sep, str):
+ raise TypeError("sep must be None or a string")
+ end = kwargs.pop("end", None)
+ if end is not None:
+ if isinstance(end, unicode):
+ want_unicode = True
+ elif not isinstance(end, str):
+ raise TypeError("end must be None or a string")
+ if kwargs:
+ raise TypeError("invalid keyword arguments to print()")
+ if not want_unicode:
+ for arg in args:
+ if isinstance(arg, unicode):
+ want_unicode = True
+ break
+ if want_unicode:
+ newline = unicode("\n")
+ space = unicode(" ")
+ else:
+ newline = "\n"
+ space = " "
+ if sep is None:
+ sep = space
+ if end is None:
+ end = newline
+ for i, arg in enumerate(args):
+ if i:
+ write(sep)
+ write(arg)
+ write(end)
+
+
+if sys.version_info[:2] < (3, 3):
+ _print = print_
+
+ def print_(*args, **kwargs):
+ fp = kwargs.get("file", sys.stdout)
+ flush = kwargs.pop("flush", False)
+ _print(*args, **kwargs)
+ if flush and fp is not None:
+ fp.flush()
+
+
+_add_doc(reraise, """Reraise an exception.""")
+
+if sys.version_info[0:2] < (3, 4):
+
+ def wraps(
+ wrapped,
+ assigned=functools.WRAPPER_ASSIGNMENTS,
+ updated=functools.WRAPPER_UPDATES,
+ ):
+ def wrapper(f):
+ f = functools.wraps(wrapped, assigned, updated)(f)
+ f.__wrapped__ = wrapped
+ return f
+
+ return wrapper
+
+
+else:
+ wraps = functools.wraps
+
+
+def with_metaclass(meta, *bases):
+ """Create a base class with a metaclass."""
+ # This requires a bit of explanation: the basic idea is to make a dummy
+ # metaclass for one level of class instantiation that replaces itself with
+ # the actual metaclass.
+ class metaclass(type):
+ def __new__(cls, name, this_bases, d):
+ return meta(name, bases, d)
+
+ @classmethod
+ def __prepare__(cls, name, this_bases):
+ return meta.__prepare__(name, bases)
+
+ return type.__new__(metaclass, "temporary_class", (), {})
+
+
+def add_metaclass(metaclass):
+ """Class decorator for creating a class with a metaclass."""
+
+ def wrapper(cls):
+ orig_vars = cls.__dict__.copy()
+ slots = orig_vars.get("__slots__")
+ if slots is not None:
+ if isinstance(slots, str):
+ slots = [slots]
+ for slots_var in slots:
+ orig_vars.pop(slots_var)
+ orig_vars.pop("__dict__", None)
+ orig_vars.pop("__weakref__", None)
+ if hasattr(cls, "__qualname__"):
+ orig_vars["__qualname__"] = cls.__qualname__
+ return metaclass(cls.__name__, cls.__bases__, orig_vars)
+
+ return wrapper
+
+
+def ensure_binary(s, encoding="utf-8", errors="strict"):
+ """Coerce **s** to six.binary_type.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> encoded to `bytes`
+ - `bytes` -> `bytes`
+ """
+ if isinstance(s, text_type):
+ return s.encode(encoding, errors)
+ elif isinstance(s, binary_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def ensure_str(s, encoding="utf-8", errors="strict"):
+ """Coerce *s* to `str`.
+
+ For Python 2:
+ - `unicode` -> encoded to `str`
+ - `str` -> `str`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if not isinstance(s, (text_type, binary_type)):
+ raise TypeError("not expecting type '%s'" % type(s))
+ if PY2 and isinstance(s, text_type):
+ s = s.encode(encoding, errors)
+ elif PY3 and isinstance(s, binary_type):
+ s = s.decode(encoding, errors)
+ return s
+
+
+def ensure_text(s, encoding="utf-8", errors="strict"):
+ """Coerce *s* to six.text_type.
+
+ For Python 2:
+ - `unicode` -> `unicode`
+ - `str` -> `unicode`
+
+ For Python 3:
+ - `str` -> `str`
+ - `bytes` -> decoded to `str`
+ """
+ if isinstance(s, binary_type):
+ return s.decode(encoding, errors)
+ elif isinstance(s, text_type):
+ return s
+ else:
+ raise TypeError("not expecting type '%s'" % type(s))
+
+
+def python_2_unicode_compatible(klass):
+ """
+ A decorator that defines __unicode__ and __str__ methods under Python 2.
+ Under Python 3 it does nothing.
+
+ To support Python 2 and 3 with a single code base, define a __str__ method
+ returning text and apply this decorator to the class.
+ """
+ if PY2:
+ if "__str__" not in klass.__dict__:
+ raise ValueError(
+ "@python_2_unicode_compatible cannot be applied "
+ "to %s because it doesn't define __str__()." % klass.__name__
+ )
+ klass.__unicode__ = klass.__str__
+ klass.__str__ = lambda self: self.__unicode__().encode("utf-8")
+ return klass
+
+
+# Complete the moves implementation.
+# This code is at the end of this module to speed up module loading.
+# Turn this module into a package.
+__path__ = [] # required for PEP 302 and PEP 451
+__package__ = __name__ # see PEP 366 @ReservedAssignment
+if globals().get("__spec__") is not None:
+ __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
+# Remove other six meta path importers, since they cause problems. This can
+# happen if six is removed from sys.modules and then reloaded. (Setuptools does
+# this for some reason.)
+if sys.meta_path:
+ for i, importer in enumerate(sys.meta_path):
+ # Here's some real nastiness: Another "instance" of the six module might
+ # be floating around. Therefore, we can't use isinstance() to check for
+ # the six meta path importer, since the other six instance will have
+ # inserted an importer with different class.
+ if (
+ type(importer).__name__ == "_SixMetaPathImporter"
+ and importer.name == __name__
+ ):
+ del sys.meta_path[i]
+ break
+ del i, importer
+# Finally, add the importer to the meta path import hook.
+sys.meta_path.append(_importer)
diff --git a/third_party/python/urllib3/urllib3/packages/ssl_match_hostname/__init__.py b/third_party/python/urllib3/urllib3/packages/ssl_match_hostname/__init__.py
new file mode 100644
index 0000000000..6b12fd90aa
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/packages/ssl_match_hostname/__init__.py
@@ -0,0 +1,22 @@
+import sys
+
+try:
+ # Our match_hostname function is the same as 3.5's, so we only want to
+ # import the match_hostname function if it's at least that good.
+ if sys.version_info < (3, 5):
+ raise ImportError("Fallback to vendored code")
+
+ from ssl import CertificateError, match_hostname
+except ImportError:
+ try:
+ # Backport of the function from a pypi module
+ from backports.ssl_match_hostname import ( # type: ignore
+ CertificateError,
+ match_hostname,
+ )
+ except ImportError:
+ # Our vendored copy
+ from ._implementation import CertificateError, match_hostname # type: ignore
+
+# Not needed, but documenting what we provide.
+__all__ = ("CertificateError", "match_hostname")
diff --git a/third_party/python/urllib3/urllib3/packages/ssl_match_hostname/_implementation.py b/third_party/python/urllib3/urllib3/packages/ssl_match_hostname/_implementation.py
new file mode 100644
index 0000000000..689208d3c6
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/packages/ssl_match_hostname/_implementation.py
@@ -0,0 +1,160 @@
+"""The match_hostname() function from Python 3.3.3, essential when using SSL."""
+
+# Note: This file is under the PSF license as the code comes from the python
+# stdlib. http://docs.python.org/3/license.html
+
+import re
+import sys
+
+# ipaddress has been backported to 2.6+ in pypi. If it is installed on the
+# system, use it to handle IPAddress ServerAltnames (this was added in
+# python-3.5) otherwise only do DNS matching. This allows
+# backports.ssl_match_hostname to continue to be used in Python 2.7.
+try:
+ import ipaddress
+except ImportError:
+ ipaddress = None
+
+__version__ = "3.5.0.1"
+
+
+class CertificateError(ValueError):
+ pass
+
+
+def _dnsname_match(dn, hostname, max_wildcards=1):
+ """Matching according to RFC 6125, section 6.4.3
+
+ http://tools.ietf.org/html/rfc6125#section-6.4.3
+ """
+ pats = []
+ if not dn:
+ return False
+
+ # Ported from python3-syntax:
+ # leftmost, *remainder = dn.split(r'.')
+ parts = dn.split(r".")
+ leftmost = parts[0]
+ remainder = parts[1:]
+
+ wildcards = leftmost.count("*")
+ if wildcards > max_wildcards:
+ # Issue #17980: avoid denials of service by refusing more
+ # than one wildcard per fragment. A survey of established
+ # policy among SSL implementations showed it to be a
+ # reasonable choice.
+ raise CertificateError(
+ "too many wildcards in certificate DNS name: " + repr(dn)
+ )
+
+ # speed up common case w/o wildcards
+ if not wildcards:
+ return dn.lower() == hostname.lower()
+
+ # RFC 6125, section 6.4.3, subitem 1.
+ # The client SHOULD NOT attempt to match a presented identifier in which
+ # the wildcard character comprises a label other than the left-most label.
+ if leftmost == "*":
+ # When '*' is a fragment by itself, it matches a non-empty dotless
+ # fragment.
+ pats.append("[^.]+")
+ elif leftmost.startswith("xn--") or hostname.startswith("xn--"):
+ # RFC 6125, section 6.4.3, subitem 3.
+ # The client SHOULD NOT attempt to match a presented identifier
+ # where the wildcard character is embedded within an A-label or
+ # U-label of an internationalized domain name.
+ pats.append(re.escape(leftmost))
+ else:
+ # Otherwise, '*' matches any dotless string, e.g. www*
+ pats.append(re.escape(leftmost).replace(r"\*", "[^.]*"))
+
+ # add the remaining fragments, ignore any wildcards
+ for frag in remainder:
+ pats.append(re.escape(frag))
+
+ pat = re.compile(r"\A" + r"\.".join(pats) + r"\Z", re.IGNORECASE)
+ return pat.match(hostname)
+
+
+def _to_unicode(obj):
+ if isinstance(obj, str) and sys.version_info < (3,):
+ obj = unicode(obj, encoding="ascii", errors="strict")
+ return obj
+
+
+def _ipaddress_match(ipname, host_ip):
+ """Exact matching of IP addresses.
+
+ RFC 6125 explicitly doesn't define an algorithm for this
+ (section 1.7.2 - "Out of Scope").
+ """
+ # OpenSSL may add a trailing newline to a subjectAltName's IP address
+ # Divergence from upstream: ipaddress can't handle byte str
+ ip = ipaddress.ip_address(_to_unicode(ipname).rstrip())
+ return ip == host_ip
+
+
+def match_hostname(cert, hostname):
+ """Verify that *cert* (in decoded format as returned by
+ SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
+ rules are followed, but IP addresses are not accepted for *hostname*.
+
+ CertificateError is raised on failure. On success, the function
+ returns nothing.
+ """
+ if not cert:
+ raise ValueError(
+ "empty or no certificate, match_hostname needs a "
+ "SSL socket or SSL context with either "
+ "CERT_OPTIONAL or CERT_REQUIRED"
+ )
+ try:
+ # Divergence from upstream: ipaddress can't handle byte str
+ host_ip = ipaddress.ip_address(_to_unicode(hostname))
+ except ValueError:
+ # Not an IP address (common case)
+ host_ip = None
+ except UnicodeError:
+ # Divergence from upstream: Have to deal with ipaddress not taking
+ # byte strings. addresses should be all ascii, so we consider it not
+ # an ipaddress in this case
+ host_ip = None
+ except AttributeError:
+ # Divergence from upstream: Make ipaddress library optional
+ if ipaddress is None:
+ host_ip = None
+ else:
+ raise
+ dnsnames = []
+ san = cert.get("subjectAltName", ())
+ for key, value in san:
+ if key == "DNS":
+ if host_ip is None and _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ elif key == "IP Address":
+ if host_ip is not None and _ipaddress_match(value, host_ip):
+ return
+ dnsnames.append(value)
+ if not dnsnames:
+ # The subject is only checked when there is no dNSName entry
+ # in subjectAltName
+ for sub in cert.get("subject", ()):
+ for key, value in sub:
+ # XXX according to RFC 2818, the most specific Common Name
+ # must be used.
+ if key == "commonName":
+ if _dnsname_match(value, hostname):
+ return
+ dnsnames.append(value)
+ if len(dnsnames) > 1:
+ raise CertificateError(
+ "hostname %r "
+ "doesn't match either of %s" % (hostname, ", ".join(map(repr, dnsnames)))
+ )
+ elif len(dnsnames) == 1:
+ raise CertificateError("hostname %r doesn't match %r" % (hostname, dnsnames[0]))
+ else:
+ raise CertificateError(
+ "no appropriate commonName or subjectAltName fields were found"
+ )
diff --git a/third_party/python/urllib3/urllib3/poolmanager.py b/third_party/python/urllib3/urllib3/poolmanager.py
new file mode 100644
index 0000000000..3a31a285bf
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/poolmanager.py
@@ -0,0 +1,536 @@
+from __future__ import absolute_import
+
+import collections
+import functools
+import logging
+
+from ._collections import RecentlyUsedContainer
+from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme
+from .exceptions import (
+ LocationValueError,
+ MaxRetryError,
+ ProxySchemeUnknown,
+ ProxySchemeUnsupported,
+ URLSchemeUnknown,
+)
+from .packages import six
+from .packages.six.moves.urllib.parse import urljoin
+from .request import RequestMethods
+from .util.proxy import connection_requires_http_tunnel
+from .util.retry import Retry
+from .util.url import parse_url
+
+__all__ = ["PoolManager", "ProxyManager", "proxy_from_url"]
+
+
+log = logging.getLogger(__name__)
+
+SSL_KEYWORDS = (
+ "key_file",
+ "cert_file",
+ "cert_reqs",
+ "ca_certs",
+ "ssl_version",
+ "ca_cert_dir",
+ "ssl_context",
+ "key_password",
+)
+
+# All known keyword arguments that could be provided to the pool manager, its
+# pools, or the underlying connections. This is used to construct a pool key.
+_key_fields = (
+ "key_scheme", # str
+ "key_host", # str
+ "key_port", # int
+ "key_timeout", # int or float or Timeout
+ "key_retries", # int or Retry
+ "key_strict", # bool
+ "key_block", # bool
+ "key_source_address", # str
+ "key_key_file", # str
+ "key_key_password", # str
+ "key_cert_file", # str
+ "key_cert_reqs", # str
+ "key_ca_certs", # str
+ "key_ssl_version", # str
+ "key_ca_cert_dir", # str
+ "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext
+ "key_maxsize", # int
+ "key_headers", # dict
+ "key__proxy", # parsed proxy url
+ "key__proxy_headers", # dict
+ "key__proxy_config", # class
+ "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples
+ "key__socks_options", # dict
+ "key_assert_hostname", # bool or string
+ "key_assert_fingerprint", # str
+ "key_server_hostname", # str
+)
+
+#: The namedtuple class used to construct keys for the connection pool.
+#: All custom key schemes should include the fields in this key at a minimum.
+PoolKey = collections.namedtuple("PoolKey", _key_fields)
+
+_proxy_config_fields = ("ssl_context", "use_forwarding_for_https")
+ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields)
+
+
+def _default_key_normalizer(key_class, request_context):
+ """
+ Create a pool key out of a request context dictionary.
+
+ According to RFC 3986, both the scheme and host are case-insensitive.
+ Therefore, this function normalizes both before constructing the pool
+ key for an HTTPS request. If you wish to change this behaviour, provide
+ alternate callables to ``key_fn_by_scheme``.
+
+ :param key_class:
+ The class to use when constructing the key. This should be a namedtuple
+ with the ``scheme`` and ``host`` keys at a minimum.
+ :type key_class: namedtuple
+ :param request_context:
+ A dictionary-like object that contain the context for a request.
+ :type request_context: dict
+
+ :return: A namedtuple that can be used as a connection pool key.
+ :rtype: PoolKey
+ """
+ # Since we mutate the dictionary, make a copy first
+ context = request_context.copy()
+ context["scheme"] = context["scheme"].lower()
+ context["host"] = context["host"].lower()
+
+ # These are both dictionaries and need to be transformed into frozensets
+ for key in ("headers", "_proxy_headers", "_socks_options"):
+ if key in context and context[key] is not None:
+ context[key] = frozenset(context[key].items())
+
+ # The socket_options key may be a list and needs to be transformed into a
+ # tuple.
+ socket_opts = context.get("socket_options")
+ if socket_opts is not None:
+ context["socket_options"] = tuple(socket_opts)
+
+ # Map the kwargs to the names in the namedtuple - this is necessary since
+ # namedtuples can't have fields starting with '_'.
+ for key in list(context.keys()):
+ context["key_" + key] = context.pop(key)
+
+ # Default to ``None`` for keys missing from the context
+ for field in key_class._fields:
+ if field not in context:
+ context[field] = None
+
+ return key_class(**context)
+
+
+#: A dictionary that maps a scheme to a callable that creates a pool key.
+#: This can be used to alter the way pool keys are constructed, if desired.
+#: Each PoolManager makes a copy of this dictionary so they can be configured
+#: globally here, or individually on the instance.
+key_fn_by_scheme = {
+ "http": functools.partial(_default_key_normalizer, PoolKey),
+ "https": functools.partial(_default_key_normalizer, PoolKey),
+}
+
+pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool}
+
+
+class PoolManager(RequestMethods):
+ """
+ Allows for arbitrary requests while transparently keeping track of
+ necessary connection pools for you.
+
+ :param num_pools:
+ Number of connection pools to cache before discarding the least
+ recently used pool.
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+
+ :param \\**connection_pool_kw:
+ Additional parameters are used to create fresh
+ :class:`urllib3.connectionpool.ConnectionPool` instances.
+
+ Example::
+
+ >>> manager = PoolManager(num_pools=2)
+ >>> r = manager.request('GET', 'http://google.com/')
+ >>> r = manager.request('GET', 'http://google.com/mail')
+ >>> r = manager.request('GET', 'http://yahoo.com/')
+ >>> len(manager.pools)
+ 2
+
+ """
+
+ proxy = None
+ proxy_config = None
+
+ def __init__(self, num_pools=10, headers=None, **connection_pool_kw):
+ RequestMethods.__init__(self, headers)
+ self.connection_pool_kw = connection_pool_kw
+ self.pools = RecentlyUsedContainer(num_pools, dispose_func=lambda p: p.close())
+
+ # Locally set the pool classes and keys so other PoolManagers can
+ # override them.
+ self.pool_classes_by_scheme = pool_classes_by_scheme
+ self.key_fn_by_scheme = key_fn_by_scheme.copy()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ self.clear()
+ # Return False to re-raise any potential exceptions
+ return False
+
+ def _new_pool(self, scheme, host, port, request_context=None):
+ """
+ Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and
+ any additional pool keyword arguments.
+
+ If ``request_context`` is provided, it is provided as keyword arguments
+ to the pool class used. This method is used to actually create the
+ connection pools handed out by :meth:`connection_from_url` and
+ companion methods. It is intended to be overridden for customization.
+ """
+ pool_cls = self.pool_classes_by_scheme[scheme]
+ if request_context is None:
+ request_context = self.connection_pool_kw.copy()
+
+ # Although the context has everything necessary to create the pool,
+ # this function has historically only used the scheme, host, and port
+ # in the positional args. When an API change is acceptable these can
+ # be removed.
+ for key in ("scheme", "host", "port"):
+ request_context.pop(key, None)
+
+ if scheme == "http":
+ for kw in SSL_KEYWORDS:
+ request_context.pop(kw, None)
+
+ return pool_cls(host, port, **request_context)
+
+ def clear(self):
+ """
+ Empty our store of pools and direct them all to close.
+
+ This will not affect in-flight connections, but they will not be
+ re-used after completion.
+ """
+ self.pools.clear()
+
+ def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
+ """
+ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme.
+
+ If ``port`` isn't given, it will be derived from the ``scheme`` using
+ ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is
+ provided, it is merged with the instance's ``connection_pool_kw``
+ variable and used to create the new connection pool, if one is
+ needed.
+ """
+
+ if not host:
+ raise LocationValueError("No host specified.")
+
+ request_context = self._merge_pool_kwargs(pool_kwargs)
+ request_context["scheme"] = scheme or "http"
+ if not port:
+ port = port_by_scheme.get(request_context["scheme"].lower(), 80)
+ request_context["port"] = port
+ request_context["host"] = host
+
+ return self.connection_from_context(request_context)
+
+ def connection_from_context(self, request_context):
+ """
+ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context.
+
+ ``request_context`` must at least contain the ``scheme`` key and its
+ value must be a key in ``key_fn_by_scheme`` instance variable.
+ """
+ scheme = request_context["scheme"].lower()
+ pool_key_constructor = self.key_fn_by_scheme.get(scheme)
+ if not pool_key_constructor:
+ raise URLSchemeUnknown(scheme)
+ pool_key = pool_key_constructor(request_context)
+
+ return self.connection_from_pool_key(pool_key, request_context=request_context)
+
+ def connection_from_pool_key(self, pool_key, request_context=None):
+ """
+ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key.
+
+ ``pool_key`` should be a namedtuple that only contains immutable
+ objects. At a minimum it must have the ``scheme``, ``host``, and
+ ``port`` fields.
+ """
+ with self.pools.lock:
+ # If the scheme, host, or port doesn't match existing open
+ # connections, open a new ConnectionPool.
+ pool = self.pools.get(pool_key)
+ if pool:
+ return pool
+
+ # Make a fresh ConnectionPool of the desired type
+ scheme = request_context["scheme"]
+ host = request_context["host"]
+ port = request_context["port"]
+ pool = self._new_pool(scheme, host, port, request_context=request_context)
+ self.pools[pool_key] = pool
+
+ return pool
+
+ def connection_from_url(self, url, pool_kwargs=None):
+ """
+ Similar to :func:`urllib3.connectionpool.connection_from_url`.
+
+ If ``pool_kwargs`` is not provided and a new pool needs to be
+ constructed, ``self.connection_pool_kw`` is used to initialize
+ the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs``
+ is provided, it is used instead. Note that if a new pool does not
+ need to be created for the request, the provided ``pool_kwargs`` are
+ not used.
+ """
+ u = parse_url(url)
+ return self.connection_from_host(
+ u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs
+ )
+
+ def _merge_pool_kwargs(self, override):
+ """
+ Merge a dictionary of override values for self.connection_pool_kw.
+
+ This does not modify self.connection_pool_kw and returns a new dict.
+ Any keys in the override dictionary with a value of ``None`` are
+ removed from the merged dictionary.
+ """
+ base_pool_kwargs = self.connection_pool_kw.copy()
+ if override:
+ for key, value in override.items():
+ if value is None:
+ try:
+ del base_pool_kwargs[key]
+ except KeyError:
+ pass
+ else:
+ base_pool_kwargs[key] = value
+ return base_pool_kwargs
+
+ def _proxy_requires_url_absolute_form(self, parsed_url):
+ """
+ Indicates if the proxy requires the complete destination URL in the
+ request. Normally this is only needed when not using an HTTP CONNECT
+ tunnel.
+ """
+ if self.proxy is None:
+ return False
+
+ return not connection_requires_http_tunnel(
+ self.proxy, self.proxy_config, parsed_url.scheme
+ )
+
+ def _validate_proxy_scheme_url_selection(self, url_scheme):
+ """
+ Validates that were not attempting to do TLS in TLS connections on
+ Python2 or with unsupported SSL implementations.
+ """
+ if self.proxy is None or url_scheme != "https":
+ return
+
+ if self.proxy.scheme != "https":
+ return
+
+ if six.PY2 and not self.proxy_config.use_forwarding_for_https:
+ raise ProxySchemeUnsupported(
+ "Contacting HTTPS destinations through HTTPS proxies "
+ "'via CONNECT tunnels' is not supported in Python 2"
+ )
+
+ def urlopen(self, method, url, redirect=True, **kw):
+ """
+ Same as :meth:`urllib3.HTTPConnectionPool.urlopen`
+ with custom cross-host redirect logic and only sends the request-uri
+ portion of the ``url``.
+
+ The given ``url`` parameter must be absolute, such that an appropriate
+ :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it.
+ """
+ u = parse_url(url)
+ self._validate_proxy_scheme_url_selection(u.scheme)
+
+ conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme)
+
+ kw["assert_same_host"] = False
+ kw["redirect"] = False
+
+ if "headers" not in kw:
+ kw["headers"] = self.headers.copy()
+
+ if self._proxy_requires_url_absolute_form(u):
+ response = conn.urlopen(method, url, **kw)
+ else:
+ response = conn.urlopen(method, u.request_uri, **kw)
+
+ redirect_location = redirect and response.get_redirect_location()
+ if not redirect_location:
+ return response
+
+ # Support relative URLs for redirecting.
+ redirect_location = urljoin(url, redirect_location)
+
+ # RFC 7231, Section 6.4.4
+ if response.status == 303:
+ method = "GET"
+
+ retries = kw.get("retries")
+ if not isinstance(retries, Retry):
+ retries = Retry.from_int(retries, redirect=redirect)
+
+ # Strip headers marked as unsafe to forward to the redirected location.
+ # Check remove_headers_on_redirect to avoid a potential network call within
+ # conn.is_same_host() which may use socket.gethostbyname() in the future.
+ if retries.remove_headers_on_redirect and not conn.is_same_host(
+ redirect_location
+ ):
+ headers = list(six.iterkeys(kw["headers"]))
+ for header in headers:
+ if header.lower() in retries.remove_headers_on_redirect:
+ kw["headers"].pop(header, None)
+
+ try:
+ retries = retries.increment(method, url, response=response, _pool=conn)
+ except MaxRetryError:
+ if retries.raise_on_redirect:
+ response.drain_conn()
+ raise
+ return response
+
+ kw["retries"] = retries
+ kw["redirect"] = redirect
+
+ log.info("Redirecting %s -> %s", url, redirect_location)
+
+ response.drain_conn()
+ return self.urlopen(method, redirect_location, **kw)
+
+
+class ProxyManager(PoolManager):
+ """
+ Behaves just like :class:`PoolManager`, but sends all requests through
+ the defined proxy, using the CONNECT method for HTTPS URLs.
+
+ :param proxy_url:
+ The URL of the proxy to be used.
+
+ :param proxy_headers:
+ A dictionary containing headers that will be sent to the proxy. In case
+ of HTTP they are being sent with each request, while in the
+ HTTPS/CONNECT case they are sent only once. Could be used for proxy
+ authentication.
+
+ :param proxy_ssl_context:
+ The proxy SSL context is used to establish the TLS connection to the
+ proxy when using HTTPS proxies.
+
+ :param use_forwarding_for_https:
+ (Defaults to False) If set to True will forward requests to the HTTPS
+ proxy to be made on behalf of the client instead of creating a TLS
+ tunnel via the CONNECT method. **Enabling this flag means that request
+ and response headers and content will be visible from the HTTPS proxy**
+ whereas tunneling keeps request and response headers and content
+ private. IP address, target hostname, SNI, and port are always visible
+ to an HTTPS proxy even when this flag is disabled.
+
+ Example:
+ >>> proxy = urllib3.ProxyManager('http://localhost:3128/')
+ >>> r1 = proxy.request('GET', 'http://google.com/')
+ >>> r2 = proxy.request('GET', 'http://httpbin.org/')
+ >>> len(proxy.pools)
+ 1
+ >>> r3 = proxy.request('GET', 'https://httpbin.org/')
+ >>> r4 = proxy.request('GET', 'https://twitter.com/')
+ >>> len(proxy.pools)
+ 3
+
+ """
+
+ def __init__(
+ self,
+ proxy_url,
+ num_pools=10,
+ headers=None,
+ proxy_headers=None,
+ proxy_ssl_context=None,
+ use_forwarding_for_https=False,
+ **connection_pool_kw
+ ):
+
+ if isinstance(proxy_url, HTTPConnectionPool):
+ proxy_url = "%s://%s:%i" % (
+ proxy_url.scheme,
+ proxy_url.host,
+ proxy_url.port,
+ )
+ proxy = parse_url(proxy_url)
+
+ if proxy.scheme not in ("http", "https"):
+ raise ProxySchemeUnknown(proxy.scheme)
+
+ if not proxy.port:
+ port = port_by_scheme.get(proxy.scheme, 80)
+ proxy = proxy._replace(port=port)
+
+ self.proxy = proxy
+ self.proxy_headers = proxy_headers or {}
+ self.proxy_ssl_context = proxy_ssl_context
+ self.proxy_config = ProxyConfig(proxy_ssl_context, use_forwarding_for_https)
+
+ connection_pool_kw["_proxy"] = self.proxy
+ connection_pool_kw["_proxy_headers"] = self.proxy_headers
+ connection_pool_kw["_proxy_config"] = self.proxy_config
+
+ super(ProxyManager, self).__init__(num_pools, headers, **connection_pool_kw)
+
+ def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None):
+ if scheme == "https":
+ return super(ProxyManager, self).connection_from_host(
+ host, port, scheme, pool_kwargs=pool_kwargs
+ )
+
+ return super(ProxyManager, self).connection_from_host(
+ self.proxy.host, self.proxy.port, self.proxy.scheme, pool_kwargs=pool_kwargs
+ )
+
+ def _set_proxy_headers(self, url, headers=None):
+ """
+ Sets headers needed by proxies: specifically, the Accept and Host
+ headers. Only sets headers not provided by the user.
+ """
+ headers_ = {"Accept": "*/*"}
+
+ netloc = parse_url(url).netloc
+ if netloc:
+ headers_["Host"] = netloc
+
+ if headers:
+ headers_.update(headers)
+ return headers_
+
+ def urlopen(self, method, url, redirect=True, **kw):
+ "Same as HTTP(S)ConnectionPool.urlopen, ``url`` must be absolute."
+ u = parse_url(url)
+ if not connection_requires_http_tunnel(self.proxy, self.proxy_config, u.scheme):
+ # For connections using HTTP CONNECT, httplib sets the necessary
+ # headers on the CONNECT to the proxy. If we're not using CONNECT,
+ # we'll definitely need to set 'Host' at the very least.
+ headers = kw.get("headers", self.headers)
+ kw["headers"] = self._set_proxy_headers(url, headers)
+
+ return super(ProxyManager, self).urlopen(method, url, redirect=redirect, **kw)
+
+
+def proxy_from_url(url, **kw):
+ return ProxyManager(proxy_url=url, **kw)
diff --git a/third_party/python/urllib3/urllib3/request.py b/third_party/python/urllib3/urllib3/request.py
new file mode 100644
index 0000000000..398386a5b9
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/request.py
@@ -0,0 +1,170 @@
+from __future__ import absolute_import
+
+from .filepost import encode_multipart_formdata
+from .packages.six.moves.urllib.parse import urlencode
+
+__all__ = ["RequestMethods"]
+
+
+class RequestMethods(object):
+ """
+ Convenience mixin for classes who implement a :meth:`urlopen` method, such
+ as :class:`urllib3.HTTPConnectionPool` and
+ :class:`urllib3.PoolManager`.
+
+ Provides behavior for making common types of HTTP request methods and
+ decides which type of request field encoding to use.
+
+ Specifically,
+
+ :meth:`.request_encode_url` is for sending requests whose fields are
+ encoded in the URL (such as GET, HEAD, DELETE).
+
+ :meth:`.request_encode_body` is for sending requests whose fields are
+ encoded in the *body* of the request using multipart or www-form-urlencoded
+ (such as for POST, PUT, PATCH).
+
+ :meth:`.request` is for making any kind of request, it will look up the
+ appropriate encoding format and use one of the above two methods to make
+ the request.
+
+ Initializer parameters:
+
+ :param headers:
+ Headers to include with all requests, unless other headers are given
+ explicitly.
+ """
+
+ _encode_url_methods = {"DELETE", "GET", "HEAD", "OPTIONS"}
+
+ def __init__(self, headers=None):
+ self.headers = headers or {}
+
+ def urlopen(
+ self,
+ method,
+ url,
+ body=None,
+ headers=None,
+ encode_multipart=True,
+ multipart_boundary=None,
+ **kw
+ ): # Abstract
+ raise NotImplementedError(
+ "Classes extending RequestMethods must implement "
+ "their own ``urlopen`` method."
+ )
+
+ def request(self, method, url, fields=None, headers=None, **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the appropriate encoding of
+ ``fields`` based on the ``method`` used.
+
+ This is a convenience method that requires the least amount of manual
+ effort. It can be used in most situations, while still having the
+ option to drop down to more specific methods when necessary, such as
+ :meth:`request_encode_url`, :meth:`request_encode_body`,
+ or even the lowest level :meth:`urlopen`.
+ """
+ method = method.upper()
+
+ urlopen_kw["request_url"] = url
+
+ if method in self._encode_url_methods:
+ return self.request_encode_url(
+ method, url, fields=fields, headers=headers, **urlopen_kw
+ )
+ else:
+ return self.request_encode_body(
+ method, url, fields=fields, headers=headers, **urlopen_kw
+ )
+
+ def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw):
+ """
+ Make a request using :meth:`urlopen` with the ``fields`` encoded in
+ the url. This is useful for request methods like GET, HEAD, DELETE, etc.
+ """
+ if headers is None:
+ headers = self.headers
+
+ extra_kw = {"headers": headers}
+ extra_kw.update(urlopen_kw)
+
+ if fields:
+ url += "?" + urlencode(fields)
+
+ return self.urlopen(method, url, **extra_kw)
+
+ def request_encode_body(
+ self,
+ method,
+ url,
+ fields=None,
+ headers=None,
+ encode_multipart=True,
+ multipart_boundary=None,
+ **urlopen_kw
+ ):
+ """
+ Make a request using :meth:`urlopen` with the ``fields`` encoded in
+ the body. This is useful for request methods like POST, PUT, PATCH, etc.
+
+ When ``encode_multipart=True`` (default), then
+ :func:`urllib3.encode_multipart_formdata` is used to encode
+ the payload with the appropriate content type. Otherwise
+ :func:`urllib.parse.urlencode` is used with the
+ 'application/x-www-form-urlencoded' content type.
+
+ Multipart encoding must be used when posting files, and it's reasonably
+ safe to use it in other times too. However, it may break request
+ signing, such as with OAuth.
+
+ Supports an optional ``fields`` parameter of key/value strings AND
+ key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
+ the MIME type is optional. For example::
+
+ fields = {
+ 'foo': 'bar',
+ 'fakefile': ('foofile.txt', 'contents of foofile'),
+ 'realfile': ('barfile.txt', open('realfile').read()),
+ 'typedfile': ('bazfile.bin', open('bazfile').read(),
+ 'image/jpeg'),
+ 'nonamefile': 'contents of nonamefile field',
+ }
+
+ When uploading a file, providing a filename (the first parameter of the
+ tuple) is optional but recommended to best mimic behavior of browsers.
+
+ Note that if ``headers`` are supplied, the 'Content-Type' header will
+ be overwritten because it depends on the dynamic random boundary string
+ which is used to compose the body of the request. The random boundary
+ string can be explicitly set with the ``multipart_boundary`` parameter.
+ """
+ if headers is None:
+ headers = self.headers
+
+ extra_kw = {"headers": {}}
+
+ if fields:
+ if "body" in urlopen_kw:
+ raise TypeError(
+ "request got values for both 'fields' and 'body', can only specify one."
+ )
+
+ if encode_multipart:
+ body, content_type = encode_multipart_formdata(
+ fields, boundary=multipart_boundary
+ )
+ else:
+ body, content_type = (
+ urlencode(fields),
+ "application/x-www-form-urlencoded",
+ )
+
+ extra_kw["body"] = body
+ extra_kw["headers"] = {"Content-Type": content_type}
+
+ extra_kw["headers"].update(headers)
+ extra_kw.update(urlopen_kw)
+
+ return self.urlopen(method, url, **extra_kw)
diff --git a/third_party/python/urllib3/urllib3/response.py b/third_party/python/urllib3/urllib3/response.py
new file mode 100644
index 0000000000..38693f4fc6
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/response.py
@@ -0,0 +1,821 @@
+from __future__ import absolute_import
+
+import io
+import logging
+import zlib
+from contextlib import contextmanager
+from socket import error as SocketError
+from socket import timeout as SocketTimeout
+
+try:
+ import brotli
+except ImportError:
+ brotli = None
+
+from ._collections import HTTPHeaderDict
+from .connection import BaseSSLError, HTTPException
+from .exceptions import (
+ BodyNotHttplibCompatible,
+ DecodeError,
+ HTTPError,
+ IncompleteRead,
+ InvalidChunkLength,
+ InvalidHeader,
+ ProtocolError,
+ ReadTimeoutError,
+ ResponseNotChunked,
+ SSLError,
+)
+from .packages import six
+from .util.response import is_fp_closed, is_response_to_head
+
+log = logging.getLogger(__name__)
+
+
+class DeflateDecoder(object):
+ def __init__(self):
+ self._first_try = True
+ self._data = b""
+ self._obj = zlib.decompressobj()
+
+ def __getattr__(self, name):
+ return getattr(self._obj, name)
+
+ def decompress(self, data):
+ if not data:
+ return data
+
+ if not self._first_try:
+ return self._obj.decompress(data)
+
+ self._data += data
+ try:
+ decompressed = self._obj.decompress(data)
+ if decompressed:
+ self._first_try = False
+ self._data = None
+ return decompressed
+ except zlib.error:
+ self._first_try = False
+ self._obj = zlib.decompressobj(-zlib.MAX_WBITS)
+ try:
+ return self.decompress(self._data)
+ finally:
+ self._data = None
+
+
+class GzipDecoderState(object):
+
+ FIRST_MEMBER = 0
+ OTHER_MEMBERS = 1
+ SWALLOW_DATA = 2
+
+
+class GzipDecoder(object):
+ def __init__(self):
+ self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
+ self._state = GzipDecoderState.FIRST_MEMBER
+
+ def __getattr__(self, name):
+ return getattr(self._obj, name)
+
+ def decompress(self, data):
+ ret = bytearray()
+ if self._state == GzipDecoderState.SWALLOW_DATA or not data:
+ return bytes(ret)
+ while True:
+ try:
+ ret += self._obj.decompress(data)
+ except zlib.error:
+ previous_state = self._state
+ # Ignore data after the first error
+ self._state = GzipDecoderState.SWALLOW_DATA
+ if previous_state == GzipDecoderState.OTHER_MEMBERS:
+ # Allow trailing garbage acceptable in other gzip clients
+ return bytes(ret)
+ raise
+ data = self._obj.unused_data
+ if not data:
+ return bytes(ret)
+ self._state = GzipDecoderState.OTHER_MEMBERS
+ self._obj = zlib.decompressobj(16 + zlib.MAX_WBITS)
+
+
+if brotli is not None:
+
+ class BrotliDecoder(object):
+ # Supports both 'brotlipy' and 'Brotli' packages
+ # since they share an import name. The top branches
+ # are for 'brotlipy' and bottom branches for 'Brotli'
+ def __init__(self):
+ self._obj = brotli.Decompressor()
+ if hasattr(self._obj, "decompress"):
+ self.decompress = self._obj.decompress
+ else:
+ self.decompress = self._obj.process
+
+ def flush(self):
+ if hasattr(self._obj, "flush"):
+ return self._obj.flush()
+ return b""
+
+
+class MultiDecoder(object):
+ """
+ From RFC7231:
+ If one or more encodings have been applied to a representation, the
+ sender that applied the encodings MUST generate a Content-Encoding
+ header field that lists the content codings in the order in which
+ they were applied.
+ """
+
+ def __init__(self, modes):
+ self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")]
+
+ def flush(self):
+ return self._decoders[0].flush()
+
+ def decompress(self, data):
+ for d in reversed(self._decoders):
+ data = d.decompress(data)
+ return data
+
+
+def _get_decoder(mode):
+ if "," in mode:
+ return MultiDecoder(mode)
+
+ if mode == "gzip":
+ return GzipDecoder()
+
+ if brotli is not None and mode == "br":
+ return BrotliDecoder()
+
+ return DeflateDecoder()
+
+
+class HTTPResponse(io.IOBase):
+ """
+ HTTP Response container.
+
+ Backwards-compatible with :class:`http.client.HTTPResponse` but the response ``body`` is
+ loaded and decoded on-demand when the ``data`` property is accessed. This
+ class is also compatible with the Python standard library's :mod:`io`
+ module, and can hence be treated as a readable object in the context of that
+ framework.
+
+ Extra parameters for behaviour not present in :class:`http.client.HTTPResponse`:
+
+ :param preload_content:
+ If True, the response's body will be preloaded during construction.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+
+ :param original_response:
+ When this HTTPResponse wrapper is generated from an :class:`http.client.HTTPResponse`
+ object, it's convenient to include the original for debug purposes. It's
+ otherwise unused.
+
+ :param retries:
+ The retries contains the last :class:`~urllib3.util.retry.Retry` that
+ was used during the request.
+
+ :param enforce_content_length:
+ Enforce content length checking. Body returned by server must match
+ value of Content-Length header, if present. Otherwise, raise error.
+ """
+
+ CONTENT_DECODERS = ["gzip", "deflate"]
+ if brotli is not None:
+ CONTENT_DECODERS += ["br"]
+ REDIRECT_STATUSES = [301, 302, 303, 307, 308]
+
+ def __init__(
+ self,
+ body="",
+ headers=None,
+ status=0,
+ version=0,
+ reason=None,
+ strict=0,
+ preload_content=True,
+ decode_content=True,
+ original_response=None,
+ pool=None,
+ connection=None,
+ msg=None,
+ retries=None,
+ enforce_content_length=False,
+ request_method=None,
+ request_url=None,
+ auto_close=True,
+ ):
+
+ if isinstance(headers, HTTPHeaderDict):
+ self.headers = headers
+ else:
+ self.headers = HTTPHeaderDict(headers)
+ self.status = status
+ self.version = version
+ self.reason = reason
+ self.strict = strict
+ self.decode_content = decode_content
+ self.retries = retries
+ self.enforce_content_length = enforce_content_length
+ self.auto_close = auto_close
+
+ self._decoder = None
+ self._body = None
+ self._fp = None
+ self._original_response = original_response
+ self._fp_bytes_read = 0
+ self.msg = msg
+ self._request_url = request_url
+
+ if body and isinstance(body, (six.string_types, bytes)):
+ self._body = body
+
+ self._pool = pool
+ self._connection = connection
+
+ if hasattr(body, "read"):
+ self._fp = body
+
+ # Are we using the chunked-style of transfer encoding?
+ self.chunked = False
+ self.chunk_left = None
+ tr_enc = self.headers.get("transfer-encoding", "").lower()
+ # Don't incur the penalty of creating a list and then discarding it
+ encodings = (enc.strip() for enc in tr_enc.split(","))
+ if "chunked" in encodings:
+ self.chunked = True
+
+ # Determine length of response
+ self.length_remaining = self._init_length(request_method)
+
+ # If requested, preload the body.
+ if preload_content and not self._body:
+ self._body = self.read(decode_content=decode_content)
+
+ def get_redirect_location(self):
+ """
+ Should we redirect and where to?
+
+ :returns: Truthy redirect location string if we got a redirect status
+ code and valid location. ``None`` if redirect status and no
+ location. ``False`` if not a redirect status code.
+ """
+ if self.status in self.REDIRECT_STATUSES:
+ return self.headers.get("location")
+
+ return False
+
+ def release_conn(self):
+ if not self._pool or not self._connection:
+ return
+
+ self._pool._put_conn(self._connection)
+ self._connection = None
+
+ def drain_conn(self):
+ """
+ Read and discard any remaining HTTP response data in the response connection.
+
+ Unread data in the HTTPResponse connection blocks the connection from being released back to the pool.
+ """
+ try:
+ self.read()
+ except (HTTPError, SocketError, BaseSSLError, HTTPException):
+ pass
+
+ @property
+ def data(self):
+ # For backwards-compat with earlier urllib3 0.4 and earlier.
+ if self._body:
+ return self._body
+
+ if self._fp:
+ return self.read(cache_content=True)
+
+ @property
+ def connection(self):
+ return self._connection
+
+ def isclosed(self):
+ return is_fp_closed(self._fp)
+
+ def tell(self):
+ """
+ Obtain the number of bytes pulled over the wire so far. May differ from
+ the amount of content returned by :meth:``urllib3.response.HTTPResponse.read``
+ if bytes are encoded on the wire (e.g, compressed).
+ """
+ return self._fp_bytes_read
+
+ def _init_length(self, request_method):
+ """
+ Set initial length value for Response content if available.
+ """
+ length = self.headers.get("content-length")
+
+ if length is not None:
+ if self.chunked:
+ # This Response will fail with an IncompleteRead if it can't be
+ # received as chunked. This method falls back to attempt reading
+ # the response before raising an exception.
+ log.warning(
+ "Received response with both Content-Length and "
+ "Transfer-Encoding set. This is expressly forbidden "
+ "by RFC 7230 sec 3.3.2. Ignoring Content-Length and "
+ "attempting to process response as Transfer-Encoding: "
+ "chunked."
+ )
+ return None
+
+ try:
+ # RFC 7230 section 3.3.2 specifies multiple content lengths can
+ # be sent in a single Content-Length header
+ # (e.g. Content-Length: 42, 42). This line ensures the values
+ # are all valid ints and that as long as the `set` length is 1,
+ # all values are the same. Otherwise, the header is invalid.
+ lengths = set([int(val) for val in length.split(",")])
+ if len(lengths) > 1:
+ raise InvalidHeader(
+ "Content-Length contained multiple "
+ "unmatching values (%s)" % length
+ )
+ length = lengths.pop()
+ except ValueError:
+ length = None
+ else:
+ if length < 0:
+ length = None
+
+ # Convert status to int for comparison
+ # In some cases, httplib returns a status of "_UNKNOWN"
+ try:
+ status = int(self.status)
+ except ValueError:
+ status = 0
+
+ # Check for responses that shouldn't include a body
+ if status in (204, 304) or 100 <= status < 200 or request_method == "HEAD":
+ length = 0
+
+ return length
+
+ def _init_decoder(self):
+ """
+ Set-up the _decoder attribute if necessary.
+ """
+ # Note: content-encoding value should be case-insensitive, per RFC 7230
+ # Section 3.2
+ content_encoding = self.headers.get("content-encoding", "").lower()
+ if self._decoder is None:
+ if content_encoding in self.CONTENT_DECODERS:
+ self._decoder = _get_decoder(content_encoding)
+ elif "," in content_encoding:
+ encodings = [
+ e.strip()
+ for e in content_encoding.split(",")
+ if e.strip() in self.CONTENT_DECODERS
+ ]
+ if len(encodings):
+ self._decoder = _get_decoder(content_encoding)
+
+ DECODER_ERROR_CLASSES = (IOError, zlib.error)
+ if brotli is not None:
+ DECODER_ERROR_CLASSES += (brotli.error,)
+
+ def _decode(self, data, decode_content, flush_decoder):
+ """
+ Decode the data passed in and potentially flush the decoder.
+ """
+ if not decode_content:
+ return data
+
+ try:
+ if self._decoder:
+ data = self._decoder.decompress(data)
+ except self.DECODER_ERROR_CLASSES as e:
+ content_encoding = self.headers.get("content-encoding", "").lower()
+ raise DecodeError(
+ "Received response with content-encoding: %s, but "
+ "failed to decode it." % content_encoding,
+ e,
+ )
+ if flush_decoder:
+ data += self._flush_decoder()
+
+ return data
+
+ def _flush_decoder(self):
+ """
+ Flushes the decoder. Should only be called if the decoder is actually
+ being used.
+ """
+ if self._decoder:
+ buf = self._decoder.decompress(b"")
+ return buf + self._decoder.flush()
+
+ return b""
+
+ @contextmanager
+ def _error_catcher(self):
+ """
+ Catch low-level python exceptions, instead re-raising urllib3
+ variants, so that low-level exceptions are not leaked in the
+ high-level api.
+
+ On exit, release the connection back to the pool.
+ """
+ clean_exit = False
+
+ try:
+ try:
+ yield
+
+ except SocketTimeout:
+ # FIXME: Ideally we'd like to include the url in the ReadTimeoutError but
+ # there is yet no clean way to get at it from this context.
+ raise ReadTimeoutError(self._pool, None, "Read timed out.")
+
+ except BaseSSLError as e:
+ # FIXME: Is there a better way to differentiate between SSLErrors?
+ if "read operation timed out" not in str(e):
+ # SSL errors related to framing/MAC get wrapped and reraised here
+ raise SSLError(e)
+
+ raise ReadTimeoutError(self._pool, None, "Read timed out.")
+
+ except (HTTPException, SocketError) as e:
+ # This includes IncompleteRead.
+ raise ProtocolError("Connection broken: %r" % e, e)
+
+ # If no exception is thrown, we should avoid cleaning up
+ # unnecessarily.
+ clean_exit = True
+ finally:
+ # If we didn't terminate cleanly, we need to throw away our
+ # connection.
+ if not clean_exit:
+ # The response may not be closed but we're not going to use it
+ # anymore so close it now to ensure that the connection is
+ # released back to the pool.
+ if self._original_response:
+ self._original_response.close()
+
+ # Closing the response may not actually be sufficient to close
+ # everything, so if we have a hold of the connection close that
+ # too.
+ if self._connection:
+ self._connection.close()
+
+ # If we hold the original response but it's closed now, we should
+ # return the connection back to the pool.
+ if self._original_response and self._original_response.isclosed():
+ self.release_conn()
+
+ def read(self, amt=None, decode_content=None, cache_content=False):
+ """
+ Similar to :meth:`http.client.HTTPResponse.read`, but with two additional
+ parameters: ``decode_content`` and ``cache_content``.
+
+ :param amt:
+ How much of the content to read. If specified, caching is skipped
+ because it doesn't make sense to cache partial content as the full
+ response.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+
+ :param cache_content:
+ If True, will save the returned data such that the same result is
+ returned despite of the state of the underlying file object. This
+ is useful if you want the ``.data`` property to continue working
+ after having ``.read()`` the file object. (Overridden if ``amt`` is
+ set.)
+ """
+ self._init_decoder()
+ if decode_content is None:
+ decode_content = self.decode_content
+
+ if self._fp is None:
+ return
+
+ flush_decoder = False
+ fp_closed = getattr(self._fp, "closed", False)
+
+ with self._error_catcher():
+ if amt is None:
+ # cStringIO doesn't like amt=None
+ data = self._fp.read() if not fp_closed else b""
+ flush_decoder = True
+ else:
+ cache_content = False
+ data = self._fp.read(amt) if not fp_closed else b""
+ if (
+ amt != 0 and not data
+ ): # Platform-specific: Buggy versions of Python.
+ # Close the connection when no data is returned
+ #
+ # This is redundant to what httplib/http.client _should_
+ # already do. However, versions of python released before
+ # December 15, 2012 (http://bugs.python.org/issue16298) do
+ # not properly close the connection in all cases. There is
+ # no harm in redundantly calling close.
+ self._fp.close()
+ flush_decoder = True
+ if self.enforce_content_length and self.length_remaining not in (
+ 0,
+ None,
+ ):
+ # This is an edge case that httplib failed to cover due
+ # to concerns of backward compatibility. We're
+ # addressing it here to make sure IncompleteRead is
+ # raised during streaming, so all calls with incorrect
+ # Content-Length are caught.
+ raise IncompleteRead(self._fp_bytes_read, self.length_remaining)
+
+ if data:
+ self._fp_bytes_read += len(data)
+ if self.length_remaining is not None:
+ self.length_remaining -= len(data)
+
+ data = self._decode(data, decode_content, flush_decoder)
+
+ if cache_content:
+ self._body = data
+
+ return data
+
+ def stream(self, amt=2 ** 16, decode_content=None):
+ """
+ A generator wrapper for the read() method. A call will block until
+ ``amt`` bytes have been read from the connection or until the
+ connection is closed.
+
+ :param amt:
+ How much of the content to read. The generator will return up to
+ much data per iteration, but may return less. This is particularly
+ likely when using compressed data. However, the empty string will
+ never be returned.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+ """
+ if self.chunked and self.supports_chunked_reads():
+ for line in self.read_chunked(amt, decode_content=decode_content):
+ yield line
+ else:
+ while not is_fp_closed(self._fp):
+ data = self.read(amt=amt, decode_content=decode_content)
+
+ if data:
+ yield data
+
+ @classmethod
+ def from_httplib(ResponseCls, r, **response_kw):
+ """
+ Given an :class:`http.client.HTTPResponse` instance ``r``, return a
+ corresponding :class:`urllib3.response.HTTPResponse` object.
+
+ Remaining parameters are passed to the HTTPResponse constructor, along
+ with ``original_response=r``.
+ """
+ headers = r.msg
+
+ if not isinstance(headers, HTTPHeaderDict):
+ if six.PY2:
+ # Python 2.7
+ headers = HTTPHeaderDict.from_httplib(headers)
+ else:
+ headers = HTTPHeaderDict(headers.items())
+
+ # HTTPResponse objects in Python 3 don't have a .strict attribute
+ strict = getattr(r, "strict", 0)
+ resp = ResponseCls(
+ body=r,
+ headers=headers,
+ status=r.status,
+ version=r.version,
+ reason=r.reason,
+ strict=strict,
+ original_response=r,
+ **response_kw
+ )
+ return resp
+
+ # Backwards-compatibility methods for http.client.HTTPResponse
+ def getheaders(self):
+ return self.headers
+
+ def getheader(self, name, default=None):
+ return self.headers.get(name, default)
+
+ # Backwards compatibility for http.cookiejar
+ def info(self):
+ return self.headers
+
+ # Overrides from io.IOBase
+ def close(self):
+ if not self.closed:
+ self._fp.close()
+
+ if self._connection:
+ self._connection.close()
+
+ if not self.auto_close:
+ io.IOBase.close(self)
+
+ @property
+ def closed(self):
+ if not self.auto_close:
+ return io.IOBase.closed.__get__(self)
+ elif self._fp is None:
+ return True
+ elif hasattr(self._fp, "isclosed"):
+ return self._fp.isclosed()
+ elif hasattr(self._fp, "closed"):
+ return self._fp.closed
+ else:
+ return True
+
+ def fileno(self):
+ if self._fp is None:
+ raise IOError("HTTPResponse has no file to get a fileno from")
+ elif hasattr(self._fp, "fileno"):
+ return self._fp.fileno()
+ else:
+ raise IOError(
+ "The file-like object this HTTPResponse is wrapped "
+ "around has no file descriptor"
+ )
+
+ def flush(self):
+ if (
+ self._fp is not None
+ and hasattr(self._fp, "flush")
+ and not getattr(self._fp, "closed", False)
+ ):
+ return self._fp.flush()
+
+ def readable(self):
+ # This method is required for `io` module compatibility.
+ return True
+
+ def readinto(self, b):
+ # This method is required for `io` module compatibility.
+ temp = self.read(len(b))
+ if len(temp) == 0:
+ return 0
+ else:
+ b[: len(temp)] = temp
+ return len(temp)
+
+ def supports_chunked_reads(self):
+ """
+ Checks if the underlying file-like object looks like a
+ :class:`http.client.HTTPResponse` object. We do this by testing for
+ the fp attribute. If it is present we assume it returns raw chunks as
+ processed by read_chunked().
+ """
+ return hasattr(self._fp, "fp")
+
+ def _update_chunk_length(self):
+ # First, we'll figure out length of a chunk and then
+ # we'll try to read it from socket.
+ if self.chunk_left is not None:
+ return
+ line = self._fp.fp.readline()
+ line = line.split(b";", 1)[0]
+ try:
+ self.chunk_left = int(line, 16)
+ except ValueError:
+ # Invalid chunked protocol response, abort.
+ self.close()
+ raise InvalidChunkLength(self, line)
+
+ def _handle_chunk(self, amt):
+ returned_chunk = None
+ if amt is None:
+ chunk = self._fp._safe_read(self.chunk_left)
+ returned_chunk = chunk
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ elif amt < self.chunk_left:
+ value = self._fp._safe_read(amt)
+ self.chunk_left = self.chunk_left - amt
+ returned_chunk = value
+ elif amt == self.chunk_left:
+ value = self._fp._safe_read(amt)
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ returned_chunk = value
+ else: # amt > self.chunk_left
+ returned_chunk = self._fp._safe_read(self.chunk_left)
+ self._fp._safe_read(2) # Toss the CRLF at the end of the chunk.
+ self.chunk_left = None
+ return returned_chunk
+
+ def read_chunked(self, amt=None, decode_content=None):
+ """
+ Similar to :meth:`HTTPResponse.read`, but with an additional
+ parameter: ``decode_content``.
+
+ :param amt:
+ How much of the content to read. If specified, caching is skipped
+ because it doesn't make sense to cache partial content as the full
+ response.
+
+ :param decode_content:
+ If True, will attempt to decode the body based on the
+ 'content-encoding' header.
+ """
+ self._init_decoder()
+ # FIXME: Rewrite this method and make it a class with a better structured logic.
+ if not self.chunked:
+ raise ResponseNotChunked(
+ "Response is not chunked. "
+ "Header 'transfer-encoding: chunked' is missing."
+ )
+ if not self.supports_chunked_reads():
+ raise BodyNotHttplibCompatible(
+ "Body should be http.client.HTTPResponse like. "
+ "It should have have an fp attribute which returns raw chunks."
+ )
+
+ with self._error_catcher():
+ # Don't bother reading the body of a HEAD request.
+ if self._original_response and is_response_to_head(self._original_response):
+ self._original_response.close()
+ return
+
+ # If a response is already read and closed
+ # then return immediately.
+ if self._fp.fp is None:
+ return
+
+ while True:
+ self._update_chunk_length()
+ if self.chunk_left == 0:
+ break
+ chunk = self._handle_chunk(amt)
+ decoded = self._decode(
+ chunk, decode_content=decode_content, flush_decoder=False
+ )
+ if decoded:
+ yield decoded
+
+ if decode_content:
+ # On CPython and PyPy, we should never need to flush the
+ # decoder. However, on Jython we *might* need to, so
+ # lets defensively do it anyway.
+ decoded = self._flush_decoder()
+ if decoded: # Platform-specific: Jython.
+ yield decoded
+
+ # Chunk content ends with \r\n: discard it.
+ while True:
+ line = self._fp.fp.readline()
+ if not line:
+ # Some sites may not end with '\r\n'.
+ break
+ if line == b"\r\n":
+ break
+
+ # We read everything; close the "file".
+ if self._original_response:
+ self._original_response.close()
+
+ def geturl(self):
+ """
+ Returns the URL that was the source of this response.
+ If the request that generated this response redirected, this method
+ will return the final redirect location.
+ """
+ if self.retries is not None and len(self.retries.history):
+ return self.retries.history[-1].redirect_location
+ else:
+ return self._request_url
+
+ def __iter__(self):
+ buffer = []
+ for chunk in self.stream(decode_content=True):
+ if b"\n" in chunk:
+ chunk = chunk.split(b"\n")
+ yield b"".join(buffer) + chunk[0] + b"\n"
+ for x in chunk[1:-1]:
+ yield x + b"\n"
+ if chunk[-1]:
+ buffer = [chunk[-1]]
+ else:
+ buffer = []
+ else:
+ buffer.append(chunk)
+ if buffer:
+ yield b"".join(buffer)
diff --git a/third_party/python/urllib3/urllib3/util/__init__.py b/third_party/python/urllib3/urllib3/util/__init__.py
new file mode 100644
index 0000000000..4547fc522b
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/util/__init__.py
@@ -0,0 +1,49 @@
+from __future__ import absolute_import
+
+# For backwards compatibility, provide imports that used to be here.
+from .connection import is_connection_dropped
+from .request import SKIP_HEADER, SKIPPABLE_HEADERS, make_headers
+from .response import is_fp_closed
+from .retry import Retry
+from .ssl_ import (
+ ALPN_PROTOCOLS,
+ HAS_SNI,
+ IS_PYOPENSSL,
+ IS_SECURETRANSPORT,
+ PROTOCOL_TLS,
+ SSLContext,
+ assert_fingerprint,
+ resolve_cert_reqs,
+ resolve_ssl_version,
+ ssl_wrap_socket,
+)
+from .timeout import Timeout, current_time
+from .url import Url, get_host, parse_url, split_first
+from .wait import wait_for_read, wait_for_write
+
+__all__ = (
+ "HAS_SNI",
+ "IS_PYOPENSSL",
+ "IS_SECURETRANSPORT",
+ "SSLContext",
+ "PROTOCOL_TLS",
+ "ALPN_PROTOCOLS",
+ "Retry",
+ "Timeout",
+ "Url",
+ "assert_fingerprint",
+ "current_time",
+ "is_connection_dropped",
+ "is_fp_closed",
+ "get_host",
+ "parse_url",
+ "make_headers",
+ "resolve_cert_reqs",
+ "resolve_ssl_version",
+ "split_first",
+ "ssl_wrap_socket",
+ "wait_for_read",
+ "wait_for_write",
+ "SKIP_HEADER",
+ "SKIPPABLE_HEADERS",
+)
diff --git a/third_party/python/urllib3/urllib3/util/connection.py b/third_party/python/urllib3/urllib3/util/connection.py
new file mode 100644
index 0000000000..cd57455748
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/util/connection.py
@@ -0,0 +1,150 @@
+from __future__ import absolute_import
+
+import socket
+
+from urllib3.exceptions import LocationParseError
+
+from ..contrib import _appengine_environ
+from ..packages import six
+from .wait import NoWayToWaitForSocketError, wait_for_read
+
+
+def is_connection_dropped(conn): # Platform-specific
+ """
+ Returns True if the connection is dropped and should be closed.
+
+ :param conn:
+ :class:`http.client.HTTPConnection` object.
+
+ Note: For platforms like AppEngine, this will always return ``False`` to
+ let the platform handle connection recycling transparently for us.
+ """
+ sock = getattr(conn, "sock", False)
+ if sock is False: # Platform-specific: AppEngine
+ return False
+ if sock is None: # Connection already closed (such as by httplib).
+ return True
+ try:
+ # Returns True if readable, which here means it's been dropped
+ return wait_for_read(sock, timeout=0.0)
+ except NoWayToWaitForSocketError: # Platform-specific: AppEngine
+ return False
+
+
+# This function is copied from socket.py in the Python 2.7 standard
+# library test suite. Added to its signature is only `socket_options`.
+# One additional modification is that we avoid binding to IPv6 servers
+# discovered in DNS if the system doesn't have IPv6 functionality.
+def create_connection(
+ address,
+ timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
+ source_address=None,
+ socket_options=None,
+):
+ """Connect to *address* and return the socket object.
+
+ Convenience function. Connect to *address* (a 2-tuple ``(host,
+ port)``) and return the socket object. Passing the optional
+ *timeout* parameter will set the timeout on the socket instance
+ before attempting to connect. If no *timeout* is supplied, the
+ global default timeout setting returned by :func:`socket.getdefaulttimeout`
+ is used. If *source_address* is set it must be a tuple of (host, port)
+ for the socket to bind as a source address before making the connection.
+ An host of '' or port 0 tells the OS to use the default.
+ """
+
+ host, port = address
+ if host.startswith("["):
+ host = host.strip("[]")
+ err = None
+
+ # Using the value from allowed_gai_family() in the context of getaddrinfo lets
+ # us select whether to work with IPv4 DNS records, IPv6 records, or both.
+ # The original create_connection function always returns all records.
+ family = allowed_gai_family()
+
+ try:
+ host.encode("idna")
+ except UnicodeError:
+ return six.raise_from(
+ LocationParseError(u"'%s', label empty or too long" % host), None
+ )
+
+ for res in socket.getaddrinfo(host, port, family, socket.SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ sock = None
+ try:
+ sock = socket.socket(af, socktype, proto)
+
+ # If provided, set socket level options before connecting.
+ _set_socket_options(sock, socket_options)
+
+ if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
+ sock.settimeout(timeout)
+ if source_address:
+ sock.bind(source_address)
+ sock.connect(sa)
+ return sock
+
+ except socket.error as e:
+ err = e
+ if sock is not None:
+ sock.close()
+ sock = None
+
+ if err is not None:
+ raise err
+
+ raise socket.error("getaddrinfo returns an empty list")
+
+
+def _set_socket_options(sock, options):
+ if options is None:
+ return
+
+ for opt in options:
+ sock.setsockopt(*opt)
+
+
+def allowed_gai_family():
+ """This function is designed to work in the context of
+ getaddrinfo, where family=socket.AF_UNSPEC is the default and
+ will perform a DNS search for both IPv6 and IPv4 records."""
+
+ family = socket.AF_INET
+ if HAS_IPV6:
+ family = socket.AF_UNSPEC
+ return family
+
+
+def _has_ipv6(host):
+ """ Returns True if the system can bind an IPv6 address. """
+ sock = None
+ has_ipv6 = False
+
+ # App Engine doesn't support IPV6 sockets and actually has a quota on the
+ # number of sockets that can be used, so just early out here instead of
+ # creating a socket needlessly.
+ # See https://github.com/urllib3/urllib3/issues/1446
+ if _appengine_environ.is_appengine_sandbox():
+ return False
+
+ if socket.has_ipv6:
+ # has_ipv6 returns true if cPython was compiled with IPv6 support.
+ # It does not tell us if the system has IPv6 support enabled. To
+ # determine that we must bind to an IPv6 address.
+ # https://github.com/urllib3/urllib3/pull/611
+ # https://bugs.python.org/issue658327
+ try:
+ sock = socket.socket(socket.AF_INET6)
+ sock.bind((host, 0))
+ has_ipv6 = True
+ except Exception:
+ pass
+
+ if sock:
+ sock.close()
+ return has_ipv6
+
+
+HAS_IPV6 = _has_ipv6("::1")
diff --git a/third_party/python/urllib3/urllib3/util/proxy.py b/third_party/python/urllib3/urllib3/util/proxy.py
new file mode 100644
index 0000000000..34f884d5b3
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/util/proxy.py
@@ -0,0 +1,56 @@
+from .ssl_ import create_urllib3_context, resolve_cert_reqs, resolve_ssl_version
+
+
+def connection_requires_http_tunnel(
+ proxy_url=None, proxy_config=None, destination_scheme=None
+):
+ """
+ Returns True if the connection requires an HTTP CONNECT through the proxy.
+
+ :param URL proxy_url:
+ URL of the proxy.
+ :param ProxyConfig proxy_config:
+ Proxy configuration from poolmanager.py
+ :param str destination_scheme:
+ The scheme of the destination. (i.e https, http, etc)
+ """
+ # If we're not using a proxy, no way to use a tunnel.
+ if proxy_url is None:
+ return False
+
+ # HTTP destinations never require tunneling, we always forward.
+ if destination_scheme == "http":
+ return False
+
+ # Support for forwarding with HTTPS proxies and HTTPS destinations.
+ if (
+ proxy_url.scheme == "https"
+ and proxy_config
+ and proxy_config.use_forwarding_for_https
+ ):
+ return False
+
+ # Otherwise always use a tunnel.
+ return True
+
+
+def create_proxy_ssl_context(
+ ssl_version, cert_reqs, ca_certs=None, ca_cert_dir=None, ca_cert_data=None
+):
+ """
+ Generates a default proxy ssl context if one hasn't been provided by the
+ user.
+ """
+ ssl_context = create_urllib3_context(
+ ssl_version=resolve_ssl_version(ssl_version),
+ cert_reqs=resolve_cert_reqs(cert_reqs),
+ )
+ if (
+ not ca_certs
+ and not ca_cert_dir
+ and not ca_cert_data
+ and hasattr(ssl_context, "load_default_certs")
+ ):
+ ssl_context.load_default_certs()
+
+ return ssl_context
diff --git a/third_party/python/urllib3/urllib3/util/queue.py b/third_party/python/urllib3/urllib3/util/queue.py
new file mode 100644
index 0000000000..41784104ee
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/util/queue.py
@@ -0,0 +1,22 @@
+import collections
+
+from ..packages import six
+from ..packages.six.moves import queue
+
+if six.PY2:
+ # Queue is imported for side effects on MS Windows. See issue #229.
+ import Queue as _unused_module_Queue # noqa: F401
+
+
+class LifoQueue(queue.Queue):
+ def _init(self, _):
+ self.queue = collections.deque()
+
+ def _qsize(self, len=len):
+ return len(self.queue)
+
+ def _put(self, item):
+ self.queue.append(item)
+
+ def _get(self):
+ return self.queue.pop()
diff --git a/third_party/python/urllib3/urllib3/util/request.py b/third_party/python/urllib3/urllib3/util/request.py
new file mode 100644
index 0000000000..25103383ec
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/util/request.py
@@ -0,0 +1,143 @@
+from __future__ import absolute_import
+
+from base64 import b64encode
+
+from ..exceptions import UnrewindableBodyError
+from ..packages.six import b, integer_types
+
+# Pass as a value within ``headers`` to skip
+# emitting some HTTP headers that are added automatically.
+# The only headers that are supported are ``Accept-Encoding``,
+# ``Host``, and ``User-Agent``.
+SKIP_HEADER = "@@@SKIP_HEADER@@@"
+SKIPPABLE_HEADERS = frozenset(["accept-encoding", "host", "user-agent"])
+
+ACCEPT_ENCODING = "gzip,deflate"
+try:
+ import brotli as _unused_module_brotli # noqa: F401
+except ImportError:
+ pass
+else:
+ ACCEPT_ENCODING += ",br"
+
+_FAILEDTELL = object()
+
+
+def make_headers(
+ keep_alive=None,
+ accept_encoding=None,
+ user_agent=None,
+ basic_auth=None,
+ proxy_basic_auth=None,
+ disable_cache=None,
+):
+ """
+ Shortcuts for generating request headers.
+
+ :param keep_alive:
+ If ``True``, adds 'connection: keep-alive' header.
+
+ :param accept_encoding:
+ Can be a boolean, list, or string.
+ ``True`` translates to 'gzip,deflate'.
+ List will get joined by comma.
+ String will be used as provided.
+
+ :param user_agent:
+ String representing the user-agent you want, such as
+ "python-urllib3/0.6"
+
+ :param basic_auth:
+ Colon-separated username:password string for 'authorization: basic ...'
+ auth header.
+
+ :param proxy_basic_auth:
+ Colon-separated username:password string for 'proxy-authorization: basic ...'
+ auth header.
+
+ :param disable_cache:
+ If ``True``, adds 'cache-control: no-cache' header.
+
+ Example::
+
+ >>> make_headers(keep_alive=True, user_agent="Batman/1.0")
+ {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
+ >>> make_headers(accept_encoding=True)
+ {'accept-encoding': 'gzip,deflate'}
+ """
+ headers = {}
+ if accept_encoding:
+ if isinstance(accept_encoding, str):
+ pass
+ elif isinstance(accept_encoding, list):
+ accept_encoding = ",".join(accept_encoding)
+ else:
+ accept_encoding = ACCEPT_ENCODING
+ headers["accept-encoding"] = accept_encoding
+
+ if user_agent:
+ headers["user-agent"] = user_agent
+
+ if keep_alive:
+ headers["connection"] = "keep-alive"
+
+ if basic_auth:
+ headers["authorization"] = "Basic " + b64encode(b(basic_auth)).decode("utf-8")
+
+ if proxy_basic_auth:
+ headers["proxy-authorization"] = "Basic " + b64encode(
+ b(proxy_basic_auth)
+ ).decode("utf-8")
+
+ if disable_cache:
+ headers["cache-control"] = "no-cache"
+
+ return headers
+
+
+def set_file_position(body, pos):
+ """
+ If a position is provided, move file to that point.
+ Otherwise, we'll attempt to record a position for future use.
+ """
+ if pos is not None:
+ rewind_body(body, pos)
+ elif getattr(body, "tell", None) is not None:
+ try:
+ pos = body.tell()
+ except (IOError, OSError):
+ # This differentiates from None, allowing us to catch
+ # a failed `tell()` later when trying to rewind the body.
+ pos = _FAILEDTELL
+
+ return pos
+
+
+def rewind_body(body, body_pos):
+ """
+ Attempt to rewind body to a certain position.
+ Primarily used for request redirects and retries.
+
+ :param body:
+ File-like object that supports seek.
+
+ :param int pos:
+ Position to seek to in file.
+ """
+ body_seek = getattr(body, "seek", None)
+ if body_seek is not None and isinstance(body_pos, integer_types):
+ try:
+ body_seek(body_pos)
+ except (IOError, OSError):
+ raise UnrewindableBodyError(
+ "An error occurred when rewinding request body for redirect/retry."
+ )
+ elif body_pos is _FAILEDTELL:
+ raise UnrewindableBodyError(
+ "Unable to record file position for rewinding "
+ "request body during a redirect/retry."
+ )
+ else:
+ raise ValueError(
+ "body_pos must be of type integer, instead it was %s." % type(body_pos)
+ )
diff --git a/third_party/python/urllib3/urllib3/util/response.py b/third_party/python/urllib3/urllib3/util/response.py
new file mode 100644
index 0000000000..5ea609cced
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/util/response.py
@@ -0,0 +1,107 @@
+from __future__ import absolute_import
+
+from email.errors import MultipartInvariantViolationDefect, StartBoundaryNotFoundDefect
+
+from ..exceptions import HeaderParsingError
+from ..packages.six.moves import http_client as httplib
+
+
+def is_fp_closed(obj):
+ """
+ Checks whether a given file-like object is closed.
+
+ :param obj:
+ The file-like object to check.
+ """
+
+ try:
+ # Check `isclosed()` first, in case Python3 doesn't set `closed`.
+ # GH Issue #928
+ return obj.isclosed()
+ except AttributeError:
+ pass
+
+ try:
+ # Check via the official file-like-object way.
+ return obj.closed
+ except AttributeError:
+ pass
+
+ try:
+ # Check if the object is a container for another file-like object that
+ # gets released on exhaustion (e.g. HTTPResponse).
+ return obj.fp is None
+ except AttributeError:
+ pass
+
+ raise ValueError("Unable to determine whether fp is closed.")
+
+
+def assert_header_parsing(headers):
+ """
+ Asserts whether all headers have been successfully parsed.
+ Extracts encountered errors from the result of parsing headers.
+
+ Only works on Python 3.
+
+ :param http.client.HTTPMessage headers: Headers to verify.
+
+ :raises urllib3.exceptions.HeaderParsingError:
+ If parsing errors are found.
+ """
+
+ # This will fail silently if we pass in the wrong kind of parameter.
+ # To make debugging easier add an explicit check.
+ if not isinstance(headers, httplib.HTTPMessage):
+ raise TypeError("expected httplib.Message, got {0}.".format(type(headers)))
+
+ defects = getattr(headers, "defects", None)
+ get_payload = getattr(headers, "get_payload", None)
+
+ unparsed_data = None
+ if get_payload:
+ # get_payload is actually email.message.Message.get_payload;
+ # we're only interested in the result if it's not a multipart message
+ if not headers.is_multipart():
+ payload = get_payload()
+
+ if isinstance(payload, (bytes, str)):
+ unparsed_data = payload
+ if defects:
+ # httplib is assuming a response body is available
+ # when parsing headers even when httplib only sends
+ # header data to parse_headers() This results in
+ # defects on multipart responses in particular.
+ # See: https://github.com/urllib3/urllib3/issues/800
+
+ # So we ignore the following defects:
+ # - StartBoundaryNotFoundDefect:
+ # The claimed start boundary was never found.
+ # - MultipartInvariantViolationDefect:
+ # A message claimed to be a multipart but no subparts were found.
+ defects = [
+ defect
+ for defect in defects
+ if not isinstance(
+ defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect)
+ )
+ ]
+
+ if defects or unparsed_data:
+ raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
+
+
+def is_response_to_head(response):
+ """
+ Checks whether the request of a response has been a HEAD-request.
+ Handles the quirks of AppEngine.
+
+ :param http.client.HTTPResponse response:
+ Response to check if the originating request
+ used 'HEAD' as a method.
+ """
+ # FIXME: Can we do this somehow without accessing private httplib _method?
+ method = response._method
+ if isinstance(method, int): # Platform-specific: Appengine
+ return method == 3
+ return method.upper() == "HEAD"
diff --git a/third_party/python/urllib3/urllib3/util/retry.py b/third_party/python/urllib3/urllib3/util/retry.py
new file mode 100644
index 0000000000..ee51f922f8
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/util/retry.py
@@ -0,0 +1,601 @@
+from __future__ import absolute_import
+
+import email
+import logging
+import re
+import time
+import warnings
+from collections import namedtuple
+from itertools import takewhile
+
+from ..exceptions import (
+ ConnectTimeoutError,
+ InvalidHeader,
+ MaxRetryError,
+ ProtocolError,
+ ProxyError,
+ ReadTimeoutError,
+ ResponseError,
+)
+from ..packages import six
+
+log = logging.getLogger(__name__)
+
+
+# Data structure for representing the metadata of requests that result in a retry.
+RequestHistory = namedtuple(
+ "RequestHistory", ["method", "url", "error", "status", "redirect_location"]
+)
+
+
+# TODO: In v2 we can remove this sentinel and metaclass with deprecated options.
+_Default = object()
+
+
+class _RetryMeta(type):
+ @property
+ def DEFAULT_METHOD_WHITELIST(cls):
+ warnings.warn(
+ "Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and "
+ "will be removed in v2.0. Use 'Retry.DEFAULT_METHODS_ALLOWED' instead",
+ DeprecationWarning,
+ )
+ return cls.DEFAULT_ALLOWED_METHODS
+
+ @DEFAULT_METHOD_WHITELIST.setter
+ def DEFAULT_METHOD_WHITELIST(cls, value):
+ warnings.warn(
+ "Using 'Retry.DEFAULT_METHOD_WHITELIST' is deprecated and "
+ "will be removed in v2.0. Use 'Retry.DEFAULT_ALLOWED_METHODS' instead",
+ DeprecationWarning,
+ )
+ cls.DEFAULT_ALLOWED_METHODS = value
+
+ @property
+ def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls):
+ warnings.warn(
+ "Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and "
+ "will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead",
+ DeprecationWarning,
+ )
+ return cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
+
+ @DEFAULT_REDIRECT_HEADERS_BLACKLIST.setter
+ def DEFAULT_REDIRECT_HEADERS_BLACKLIST(cls, value):
+ warnings.warn(
+ "Using 'Retry.DEFAULT_REDIRECT_HEADERS_BLACKLIST' is deprecated and "
+ "will be removed in v2.0. Use 'Retry.DEFAULT_REMOVE_HEADERS_ON_REDIRECT' instead",
+ DeprecationWarning,
+ )
+ cls.DEFAULT_REMOVE_HEADERS_ON_REDIRECT = value
+
+
+@six.add_metaclass(_RetryMeta)
+class Retry(object):
+ """Retry configuration.
+
+ Each retry attempt will create a new Retry object with updated values, so
+ they can be safely reused.
+
+ Retries can be defined as a default for a pool::
+
+ retries = Retry(connect=5, read=2, redirect=5)
+ http = PoolManager(retries=retries)
+ response = http.request('GET', 'http://example.com/')
+
+ Or per-request (which overrides the default for the pool)::
+
+ response = http.request('GET', 'http://example.com/', retries=Retry(10))
+
+ Retries can be disabled by passing ``False``::
+
+ response = http.request('GET', 'http://example.com/', retries=False)
+
+ Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless
+ retries are disabled, in which case the causing exception will be raised.
+
+ :param int total:
+ Total number of retries to allow. Takes precedence over other counts.
+
+ Set to ``None`` to remove this constraint and fall back on other
+ counts.
+
+ Set to ``0`` to fail on the first retry.
+
+ Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+ :param int connect:
+ How many connection-related errors to retry on.
+
+ These are errors raised before the request is sent to the remote server,
+ which we assume has not triggered the server to process the request.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param int read:
+ How many times to retry on read errors.
+
+ These errors are raised after the request was sent to the server, so the
+ request may have side-effects.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param int redirect:
+ How many redirects to perform. Limit this to avoid infinite redirect
+ loops.
+
+ A redirect is a HTTP response with a status code 301, 302, 303, 307 or
+ 308.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ Set to ``False`` to disable and imply ``raise_on_redirect=False``.
+
+ :param int status:
+ How many times to retry on bad status codes.
+
+ These are retries made on responses, where status code matches
+ ``status_forcelist``.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ :param int other:
+ How many times to retry on other errors.
+
+ Other errors are errors that are not connect, read, redirect or status errors.
+ These errors might be raised after the request was sent to the server, so the
+ request might have side-effects.
+
+ Set to ``0`` to fail on the first retry of this type.
+
+ If ``total`` is not set, it's a good idea to set this to 0 to account
+ for unexpected edge cases and avoid infinite retry loops.
+
+ :param iterable allowed_methods:
+ Set of uppercased HTTP method verbs that we should retry on.
+
+ By default, we only retry on methods which are considered to be
+ idempotent (multiple requests with the same parameters end with the
+ same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.
+
+ Set to a ``False`` value to retry on any verb.
+
+ .. warning::
+
+ Previously this parameter was named ``method_whitelist``, that
+ usage is deprecated in v1.26.0 and will be removed in v2.0.
+
+ :param iterable status_forcelist:
+ A set of integer HTTP status codes that we should force a retry on.
+ A retry is initiated if the request method is in ``allowed_methods``
+ and the response status code is in ``status_forcelist``.
+
+ By default, this is disabled with ``None``.
+
+ :param float backoff_factor:
+ A backoff factor to apply between attempts after the second try
+ (most errors are resolved immediately by a second try without a
+ delay). urllib3 will sleep for::
+
+ {backoff factor} * (2 ** ({number of total retries} - 1))
+
+ seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
+ for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
+ than :attr:`Retry.BACKOFF_MAX`.
+
+ By default, backoff is disabled (set to 0).
+
+ :param bool raise_on_redirect: Whether, if the number of redirects is
+ exhausted, to raise a MaxRetryError, or to return a response with a
+ response code in the 3xx range.
+
+ :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:
+ whether we should raise an exception, or return a response,
+ if status falls in ``status_forcelist`` range and retries have
+ been exhausted.
+
+ :param tuple history: The history of the request encountered during
+ each call to :meth:`~Retry.increment`. The list is in the order
+ the requests occurred. Each list item is of class :class:`RequestHistory`.
+
+ :param bool respect_retry_after_header:
+ Whether to respect Retry-After header on status codes defined as
+ :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.
+
+ :param iterable remove_headers_on_redirect:
+ Sequence of headers to remove from the request when a response
+ indicating a redirect is returned before firing off the redirected
+ request.
+ """
+
+ #: Default methods to be used for ``allowed_methods``
+ DEFAULT_ALLOWED_METHODS = frozenset(
+ ["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"]
+ )
+
+ #: Default status codes to be used for ``status_forcelist``
+ RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])
+
+ #: Default headers to be used for ``remove_headers_on_redirect``
+ DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset(["Authorization"])
+
+ #: Maximum backoff time.
+ BACKOFF_MAX = 120
+
+ def __init__(
+ self,
+ total=10,
+ connect=None,
+ read=None,
+ redirect=None,
+ status=None,
+ other=None,
+ allowed_methods=_Default,
+ status_forcelist=None,
+ backoff_factor=0,
+ raise_on_redirect=True,
+ raise_on_status=True,
+ history=None,
+ respect_retry_after_header=True,
+ remove_headers_on_redirect=_Default,
+ # TODO: Deprecated, remove in v2.0
+ method_whitelist=_Default,
+ ):
+
+ if method_whitelist is not _Default:
+ if allowed_methods is not _Default:
+ raise ValueError(
+ "Using both 'allowed_methods' and "
+ "'method_whitelist' together is not allowed. "
+ "Instead only use 'allowed_methods'"
+ )
+ warnings.warn(
+ "Using 'method_whitelist' with Retry is deprecated and "
+ "will be removed in v2.0. Use 'allowed_methods' instead",
+ DeprecationWarning,
+ )
+ allowed_methods = method_whitelist
+ if allowed_methods is _Default:
+ allowed_methods = self.DEFAULT_ALLOWED_METHODS
+ if remove_headers_on_redirect is _Default:
+ remove_headers_on_redirect = self.DEFAULT_REMOVE_HEADERS_ON_REDIRECT
+
+ self.total = total
+ self.connect = connect
+ self.read = read
+ self.status = status
+ self.other = other
+
+ if redirect is False or total is False:
+ redirect = 0
+ raise_on_redirect = False
+
+ self.redirect = redirect
+ self.status_forcelist = status_forcelist or set()
+ self.allowed_methods = allowed_methods
+ self.backoff_factor = backoff_factor
+ self.raise_on_redirect = raise_on_redirect
+ self.raise_on_status = raise_on_status
+ self.history = history or tuple()
+ self.respect_retry_after_header = respect_retry_after_header
+ self.remove_headers_on_redirect = frozenset(
+ [h.lower() for h in remove_headers_on_redirect]
+ )
+
+ def new(self, **kw):
+ params = dict(
+ total=self.total,
+ connect=self.connect,
+ read=self.read,
+ redirect=self.redirect,
+ status=self.status,
+ other=self.other,
+ status_forcelist=self.status_forcelist,
+ backoff_factor=self.backoff_factor,
+ raise_on_redirect=self.raise_on_redirect,
+ raise_on_status=self.raise_on_status,
+ history=self.history,
+ remove_headers_on_redirect=self.remove_headers_on_redirect,
+ respect_retry_after_header=self.respect_retry_after_header,
+ )
+
+ # TODO: If already given in **kw we use what's given to us
+ # If not given we need to figure out what to pass. We decide
+ # based on whether our class has the 'method_whitelist' property
+ # and if so we pass the deprecated 'method_whitelist' otherwise
+ # we use 'allowed_methods'. Remove in v2.0
+ if "method_whitelist" not in kw and "allowed_methods" not in kw:
+ if "method_whitelist" in self.__dict__:
+ warnings.warn(
+ "Using 'method_whitelist' with Retry is deprecated and "
+ "will be removed in v2.0. Use 'allowed_methods' instead",
+ DeprecationWarning,
+ )
+ params["method_whitelist"] = self.allowed_methods
+ else:
+ params["allowed_methods"] = self.allowed_methods
+
+ params.update(kw)
+ return type(self)(**params)
+
+ @classmethod
+ def from_int(cls, retries, redirect=True, default=None):
+ """ Backwards-compatibility for the old retries format."""
+ if retries is None:
+ retries = default if default is not None else cls.DEFAULT
+
+ if isinstance(retries, Retry):
+ return retries
+
+ redirect = bool(redirect) and None
+ new_retries = cls(retries, redirect=redirect)
+ log.debug("Converted retries value: %r -> %r", retries, new_retries)
+ return new_retries
+
+ def get_backoff_time(self):
+ """Formula for computing the current backoff
+
+ :rtype: float
+ """
+ # We want to consider only the last consecutive errors sequence (Ignore redirects).
+ consecutive_errors_len = len(
+ list(
+ takewhile(lambda x: x.redirect_location is None, reversed(self.history))
+ )
+ )
+ if consecutive_errors_len <= 1:
+ return 0
+
+ backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))
+ return min(self.BACKOFF_MAX, backoff_value)
+
+ def parse_retry_after(self, retry_after):
+ # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4
+ if re.match(r"^\s*[0-9]+\s*$", retry_after):
+ seconds = int(retry_after)
+ else:
+ retry_date_tuple = email.utils.parsedate_tz(retry_after)
+ if retry_date_tuple is None:
+ raise InvalidHeader("Invalid Retry-After header: %s" % retry_after)
+ if retry_date_tuple[9] is None: # Python 2
+ # Assume UTC if no timezone was specified
+ # On Python2.7, parsedate_tz returns None for a timezone offset
+ # instead of 0 if no timezone is given, where mktime_tz treats
+ # a None timezone offset as local time.
+ retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:]
+
+ retry_date = email.utils.mktime_tz(retry_date_tuple)
+ seconds = retry_date - time.time()
+
+ if seconds < 0:
+ seconds = 0
+
+ return seconds
+
+ def get_retry_after(self, response):
+ """ Get the value of Retry-After in seconds. """
+
+ retry_after = response.getheader("Retry-After")
+
+ if retry_after is None:
+ return None
+
+ return self.parse_retry_after(retry_after)
+
+ def sleep_for_retry(self, response=None):
+ retry_after = self.get_retry_after(response)
+ if retry_after:
+ time.sleep(retry_after)
+ return True
+
+ return False
+
+ def _sleep_backoff(self):
+ backoff = self.get_backoff_time()
+ if backoff <= 0:
+ return
+ time.sleep(backoff)
+
+ def sleep(self, response=None):
+ """Sleep between retry attempts.
+
+ This method will respect a server's ``Retry-After`` response header
+ and sleep the duration of the time requested. If that is not present, it
+ will use an exponential backoff. By default, the backoff factor is 0 and
+ this method will return immediately.
+ """
+
+ if self.respect_retry_after_header and response:
+ slept = self.sleep_for_retry(response)
+ if slept:
+ return
+
+ self._sleep_backoff()
+
+ def _is_connection_error(self, err):
+ """Errors when we're fairly sure that the server did not receive the
+ request, so it should be safe to retry.
+ """
+ if isinstance(err, ProxyError):
+ err = err.original_error
+ return isinstance(err, ConnectTimeoutError)
+
+ def _is_read_error(self, err):
+ """Errors that occur after the request has been started, so we should
+ assume that the server began processing it.
+ """
+ return isinstance(err, (ReadTimeoutError, ProtocolError))
+
+ def _is_method_retryable(self, method):
+ """Checks if a given HTTP method should be retried upon, depending if
+ it is included in the allowed_methods
+ """
+ # TODO: For now favor if the Retry implementation sets its own method_whitelist
+ # property outside of our constructor to avoid breaking custom implementations.
+ if "method_whitelist" in self.__dict__:
+ warnings.warn(
+ "Using 'method_whitelist' with Retry is deprecated and "
+ "will be removed in v2.0. Use 'allowed_methods' instead",
+ DeprecationWarning,
+ )
+ allowed_methods = self.method_whitelist
+ else:
+ allowed_methods = self.allowed_methods
+
+ if allowed_methods and method.upper() not in allowed_methods:
+ return False
+ return True
+
+ def is_retry(self, method, status_code, has_retry_after=False):
+ """Is this method/status code retryable? (Based on allowlists and control
+ variables such as the number of total retries to allow, whether to
+ respect the Retry-After header, whether this header is present, and
+ whether the returned status code is on the list of status codes to
+ be retried upon on the presence of the aforementioned header)
+ """
+ if not self._is_method_retryable(method):
+ return False
+
+ if self.status_forcelist and status_code in self.status_forcelist:
+ return True
+
+ return (
+ self.total
+ and self.respect_retry_after_header
+ and has_retry_after
+ and (status_code in self.RETRY_AFTER_STATUS_CODES)
+ )
+
+ def is_exhausted(self):
+ """ Are we out of retries? """
+ retry_counts = (
+ self.total,
+ self.connect,
+ self.read,
+ self.redirect,
+ self.status,
+ self.other,
+ )
+ retry_counts = list(filter(None, retry_counts))
+ if not retry_counts:
+ return False
+
+ return min(retry_counts) < 0
+
+ def increment(
+ self,
+ method=None,
+ url=None,
+ response=None,
+ error=None,
+ _pool=None,
+ _stacktrace=None,
+ ):
+ """Return a new Retry object with incremented retry counters.
+
+ :param response: A response object, or None, if the server did not
+ return a response.
+ :type response: :class:`~urllib3.response.HTTPResponse`
+ :param Exception error: An error encountered during the request, or
+ None if the response was received successfully.
+
+ :return: A new ``Retry`` object.
+ """
+ if self.total is False and error:
+ # Disabled, indicate to re-raise the error.
+ raise six.reraise(type(error), error, _stacktrace)
+
+ total = self.total
+ if total is not None:
+ total -= 1
+
+ connect = self.connect
+ read = self.read
+ redirect = self.redirect
+ status_count = self.status
+ other = self.other
+ cause = "unknown"
+ status = None
+ redirect_location = None
+
+ if error and self._is_connection_error(error):
+ # Connect retry?
+ if connect is False:
+ raise six.reraise(type(error), error, _stacktrace)
+ elif connect is not None:
+ connect -= 1
+
+ elif error and self._is_read_error(error):
+ # Read retry?
+ if read is False or not self._is_method_retryable(method):
+ raise six.reraise(type(error), error, _stacktrace)
+ elif read is not None:
+ read -= 1
+
+ elif error:
+ # Other retry?
+ if other is not None:
+ other -= 1
+
+ elif response and response.get_redirect_location():
+ # Redirect retry?
+ if redirect is not None:
+ redirect -= 1
+ cause = "too many redirects"
+ redirect_location = response.get_redirect_location()
+ status = response.status
+
+ else:
+ # Incrementing because of a server error like a 500 in
+ # status_forcelist and the given method is in the allowed_methods
+ cause = ResponseError.GENERIC_ERROR
+ if response and response.status:
+ if status_count is not None:
+ status_count -= 1
+ cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)
+ status = response.status
+
+ history = self.history + (
+ RequestHistory(method, url, error, status, redirect_location),
+ )
+
+ new_retry = self.new(
+ total=total,
+ connect=connect,
+ read=read,
+ redirect=redirect,
+ status=status_count,
+ other=other,
+ history=history,
+ )
+
+ if new_retry.is_exhausted():
+ raise MaxRetryError(_pool, url, error or ResponseError(cause))
+
+ log.debug("Incremented Retry for (url='%s'): %r", url, new_retry)
+
+ return new_retry
+
+ def __repr__(self):
+ return (
+ "{cls.__name__}(total={self.total}, connect={self.connect}, "
+ "read={self.read}, redirect={self.redirect}, status={self.status})"
+ ).format(cls=type(self), self=self)
+
+ def __getattr__(self, item):
+ if item == "method_whitelist":
+ # TODO: Remove this deprecated alias in v2.0
+ warnings.warn(
+ "Using 'method_whitelist' with Retry is deprecated and "
+ "will be removed in v2.0. Use 'allowed_methods' instead",
+ DeprecationWarning,
+ )
+ return self.allowed_methods
+ try:
+ return getattr(super(Retry, self), item)
+ except AttributeError:
+ return getattr(Retry, item)
+
+
+# For backwards compatibility (equivalent to pre-v1.9):
+Retry.DEFAULT = Retry(3)
diff --git a/third_party/python/urllib3/urllib3/util/ssl_.py b/third_party/python/urllib3/urllib3/util/ssl_.py
new file mode 100644
index 0000000000..1cb5e7cdc1
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/util/ssl_.py
@@ -0,0 +1,466 @@
+from __future__ import absolute_import
+
+import hmac
+import os
+import sys
+import warnings
+from binascii import hexlify, unhexlify
+from hashlib import md5, sha1, sha256
+
+from ..exceptions import (
+ InsecurePlatformWarning,
+ ProxySchemeUnsupported,
+ SNIMissingWarning,
+ SSLError,
+)
+from ..packages import six
+from .url import BRACELESS_IPV6_ADDRZ_RE, IPV4_RE
+
+SSLContext = None
+SSLTransport = None
+HAS_SNI = False
+IS_PYOPENSSL = False
+IS_SECURETRANSPORT = False
+ALPN_PROTOCOLS = ["http/1.1"]
+
+# Maps the length of a digest to a possible hash function producing this digest
+HASHFUNC_MAP = {32: md5, 40: sha1, 64: sha256}
+
+
+def _const_compare_digest_backport(a, b):
+ """
+ Compare two digests of equal length in constant time.
+
+ The digests must be of type str/bytes.
+ Returns True if the digests match, and False otherwise.
+ """
+ result = abs(len(a) - len(b))
+ for left, right in zip(bytearray(a), bytearray(b)):
+ result |= left ^ right
+ return result == 0
+
+
+_const_compare_digest = getattr(hmac, "compare_digest", _const_compare_digest_backport)
+
+try: # Test for SSL features
+ import ssl
+ from ssl import HAS_SNI # Has SNI?
+ from ssl import CERT_REQUIRED, wrap_socket
+
+ from .ssltransport import SSLTransport
+except ImportError:
+ pass
+
+try: # Platform-specific: Python 3.6
+ from ssl import PROTOCOL_TLS
+
+ PROTOCOL_SSLv23 = PROTOCOL_TLS
+except ImportError:
+ try:
+ from ssl import PROTOCOL_SSLv23 as PROTOCOL_TLS
+
+ PROTOCOL_SSLv23 = PROTOCOL_TLS
+ except ImportError:
+ PROTOCOL_SSLv23 = PROTOCOL_TLS = 2
+
+
+try:
+ from ssl import OP_NO_COMPRESSION, OP_NO_SSLv2, OP_NO_SSLv3
+except ImportError:
+ OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
+ OP_NO_COMPRESSION = 0x20000
+
+
+try: # OP_NO_TICKET was added in Python 3.6
+ from ssl import OP_NO_TICKET
+except ImportError:
+ OP_NO_TICKET = 0x4000
+
+
+# A secure default.
+# Sources for more information on TLS ciphers:
+#
+# - https://wiki.mozilla.org/Security/Server_Side_TLS
+# - https://www.ssllabs.com/projects/best-practices/index.html
+# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
+#
+# The general intent is:
+# - prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
+# - prefer ECDHE over DHE for better performance,
+# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
+# security,
+# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
+# - disable NULL authentication, MD5 MACs, DSS, and other
+# insecure ciphers for security reasons.
+# - NOTE: TLS 1.3 cipher suites are managed through a different interface
+# not exposed by CPython (yet!) and are enabled by default if they're available.
+DEFAULT_CIPHERS = ":".join(
+ [
+ "ECDHE+AESGCM",
+ "ECDHE+CHACHA20",
+ "DHE+AESGCM",
+ "DHE+CHACHA20",
+ "ECDH+AESGCM",
+ "DH+AESGCM",
+ "ECDH+AES",
+ "DH+AES",
+ "RSA+AESGCM",
+ "RSA+AES",
+ "!aNULL",
+ "!eNULL",
+ "!MD5",
+ "!DSS",
+ ]
+)
+
+try:
+ from ssl import SSLContext # Modern SSL?
+except ImportError:
+
+ class SSLContext(object): # Platform-specific: Python 2
+ def __init__(self, protocol_version):
+ self.protocol = protocol_version
+ # Use default values from a real SSLContext
+ self.check_hostname = False
+ self.verify_mode = ssl.CERT_NONE
+ self.ca_certs = None
+ self.options = 0
+ self.certfile = None
+ self.keyfile = None
+ self.ciphers = None
+
+ def load_cert_chain(self, certfile, keyfile):
+ self.certfile = certfile
+ self.keyfile = keyfile
+
+ def load_verify_locations(self, cafile=None, capath=None, cadata=None):
+ self.ca_certs = cafile
+
+ if capath is not None:
+ raise SSLError("CA directories not supported in older Pythons")
+
+ if cadata is not None:
+ raise SSLError("CA data not supported in older Pythons")
+
+ def set_ciphers(self, cipher_suite):
+ self.ciphers = cipher_suite
+
+ def wrap_socket(self, socket, server_hostname=None, server_side=False):
+ warnings.warn(
+ "A true SSLContext object is not available. This prevents "
+ "urllib3 from configuring SSL appropriately and may cause "
+ "certain SSL connections to fail. You can upgrade to a newer "
+ "version of Python to solve this. For more information, see "
+ "https://urllib3.readthedocs.io/en/latest/advanced-usage.html"
+ "#ssl-warnings",
+ InsecurePlatformWarning,
+ )
+ kwargs = {
+ "keyfile": self.keyfile,
+ "certfile": self.certfile,
+ "ca_certs": self.ca_certs,
+ "cert_reqs": self.verify_mode,
+ "ssl_version": self.protocol,
+ "server_side": server_side,
+ }
+ return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
+
+
+def assert_fingerprint(cert, fingerprint):
+ """
+ Checks if given fingerprint matches the supplied certificate.
+
+ :param cert:
+ Certificate as bytes object.
+ :param fingerprint:
+ Fingerprint as string of hexdigits, can be interspersed by colons.
+ """
+
+ fingerprint = fingerprint.replace(":", "").lower()
+ digest_length = len(fingerprint)
+ hashfunc = HASHFUNC_MAP.get(digest_length)
+ if not hashfunc:
+ raise SSLError("Fingerprint of invalid length: {0}".format(fingerprint))
+
+ # We need encode() here for py32; works on py2 and p33.
+ fingerprint_bytes = unhexlify(fingerprint.encode())
+
+ cert_digest = hashfunc(cert).digest()
+
+ if not _const_compare_digest(cert_digest, fingerprint_bytes):
+ raise SSLError(
+ 'Fingerprints did not match. Expected "{0}", got "{1}".'.format(
+ fingerprint, hexlify(cert_digest)
+ )
+ )
+
+
+def resolve_cert_reqs(candidate):
+ """
+ Resolves the argument to a numeric constant, which can be passed to
+ the wrap_socket function/method from the ssl module.
+ Defaults to :data:`ssl.CERT_REQUIRED`.
+ If given a string it is assumed to be the name of the constant in the
+ :mod:`ssl` module or its abbreviation.
+ (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
+ If it's neither `None` nor a string we assume it is already the numeric
+ constant which can directly be passed to wrap_socket.
+ """
+ if candidate is None:
+ return CERT_REQUIRED
+
+ if isinstance(candidate, str):
+ res = getattr(ssl, candidate, None)
+ if res is None:
+ res = getattr(ssl, "CERT_" + candidate)
+ return res
+
+ return candidate
+
+
+def resolve_ssl_version(candidate):
+ """
+ like resolve_cert_reqs
+ """
+ if candidate is None:
+ return PROTOCOL_TLS
+
+ if isinstance(candidate, str):
+ res = getattr(ssl, candidate, None)
+ if res is None:
+ res = getattr(ssl, "PROTOCOL_" + candidate)
+ return res
+
+ return candidate
+
+
+def create_urllib3_context(
+ ssl_version=None, cert_reqs=None, options=None, ciphers=None
+):
+ """All arguments have the same meaning as ``ssl_wrap_socket``.
+
+ By default, this function does a lot of the same work that
+ ``ssl.create_default_context`` does on Python 3.4+. It:
+
+ - Disables SSLv2, SSLv3, and compression
+ - Sets a restricted set of server ciphers
+
+ If you wish to enable SSLv3, you can do::
+
+ from urllib3.util import ssl_
+ context = ssl_.create_urllib3_context()
+ context.options &= ~ssl_.OP_NO_SSLv3
+
+ You can do the same to enable compression (substituting ``COMPRESSION``
+ for ``SSLv3`` in the last line above).
+
+ :param ssl_version:
+ The desired protocol version to use. This will default to
+ PROTOCOL_SSLv23 which will negotiate the highest protocol that both
+ the server and your installation of OpenSSL support.
+ :param cert_reqs:
+ Whether to require the certificate verification. This defaults to
+ ``ssl.CERT_REQUIRED``.
+ :param options:
+ Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
+ ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``, and ``ssl.OP_NO_TICKET``.
+ :param ciphers:
+ Which cipher suites to allow the server to select.
+ :returns:
+ Constructed SSLContext object with specified options
+ :rtype: SSLContext
+ """
+ context = SSLContext(ssl_version or PROTOCOL_TLS)
+
+ context.set_ciphers(ciphers or DEFAULT_CIPHERS)
+
+ # Setting the default here, as we may have no ssl module on import
+ cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
+
+ if options is None:
+ options = 0
+ # SSLv2 is easily broken and is considered harmful and dangerous
+ options |= OP_NO_SSLv2
+ # SSLv3 has several problems and is now dangerous
+ options |= OP_NO_SSLv3
+ # Disable compression to prevent CRIME attacks for OpenSSL 1.0+
+ # (issue #309)
+ options |= OP_NO_COMPRESSION
+ # TLSv1.2 only. Unless set explicitly, do not request tickets.
+ # This may save some bandwidth on wire, and although the ticket is encrypted,
+ # there is a risk associated with it being on wire,
+ # if the server is not rotating its ticketing keys properly.
+ options |= OP_NO_TICKET
+
+ context.options |= options
+
+ # Enable post-handshake authentication for TLS 1.3, see GH #1634. PHA is
+ # necessary for conditional client cert authentication with TLS 1.3.
+ # The attribute is None for OpenSSL <= 1.1.0 or does not exist in older
+ # versions of Python. We only enable on Python 3.7.4+ or if certificate
+ # verification is enabled to work around Python issue #37428
+ # See: https://bugs.python.org/issue37428
+ if (cert_reqs == ssl.CERT_REQUIRED or sys.version_info >= (3, 7, 4)) and getattr(
+ context, "post_handshake_auth", None
+ ) is not None:
+ context.post_handshake_auth = True
+
+ context.verify_mode = cert_reqs
+ if (
+ getattr(context, "check_hostname", None) is not None
+ ): # Platform-specific: Python 3.2
+ # We do our own verification, including fingerprints and alternative
+ # hostnames. So disable it here
+ context.check_hostname = False
+
+ # Enable logging of TLS session keys via defacto standard environment variable
+ # 'SSLKEYLOGFILE', if the feature is available (Python 3.8+). Skip empty values.
+ if hasattr(context, "keylog_filename"):
+ sslkeylogfile = os.environ.get("SSLKEYLOGFILE")
+ if sslkeylogfile:
+ context.keylog_filename = sslkeylogfile
+
+ return context
+
+
+def ssl_wrap_socket(
+ sock,
+ keyfile=None,
+ certfile=None,
+ cert_reqs=None,
+ ca_certs=None,
+ server_hostname=None,
+ ssl_version=None,
+ ciphers=None,
+ ssl_context=None,
+ ca_cert_dir=None,
+ key_password=None,
+ ca_cert_data=None,
+ tls_in_tls=False,
+):
+ """
+ All arguments except for server_hostname, ssl_context, and ca_cert_dir have
+ the same meaning as they do when using :func:`ssl.wrap_socket`.
+
+ :param server_hostname:
+ When SNI is supported, the expected hostname of the certificate
+ :param ssl_context:
+ A pre-made :class:`SSLContext` object. If none is provided, one will
+ be created using :func:`create_urllib3_context`.
+ :param ciphers:
+ A string of ciphers we wish the client to support.
+ :param ca_cert_dir:
+ A directory containing CA certificates in multiple separate files, as
+ supported by OpenSSL's -CApath flag or the capath argument to
+ SSLContext.load_verify_locations().
+ :param key_password:
+ Optional password if the keyfile is encrypted.
+ :param ca_cert_data:
+ Optional string containing CA certificates in PEM format suitable for
+ passing as the cadata parameter to SSLContext.load_verify_locations()
+ :param tls_in_tls:
+ Use SSLTransport to wrap the existing socket.
+ """
+ context = ssl_context
+ if context is None:
+ # Note: This branch of code and all the variables in it are no longer
+ # used by urllib3 itself. We should consider deprecating and removing
+ # this code.
+ context = create_urllib3_context(ssl_version, cert_reqs, ciphers=ciphers)
+
+ if ca_certs or ca_cert_dir or ca_cert_data:
+ try:
+ context.load_verify_locations(ca_certs, ca_cert_dir, ca_cert_data)
+ except (IOError, OSError) as e:
+ raise SSLError(e)
+
+ elif ssl_context is None and hasattr(context, "load_default_certs"):
+ # try to load OS default certs; works well on Windows (require Python3.4+)
+ context.load_default_certs()
+
+ # Attempt to detect if we get the goofy behavior of the
+ # keyfile being encrypted and OpenSSL asking for the
+ # passphrase via the terminal and instead error out.
+ if keyfile and key_password is None and _is_key_file_encrypted(keyfile):
+ raise SSLError("Client private key is encrypted, password is required")
+
+ if certfile:
+ if key_password is None:
+ context.load_cert_chain(certfile, keyfile)
+ else:
+ context.load_cert_chain(certfile, keyfile, key_password)
+
+ try:
+ if hasattr(context, "set_alpn_protocols"):
+ context.set_alpn_protocols(ALPN_PROTOCOLS)
+ except NotImplementedError:
+ pass
+
+ # If we detect server_hostname is an IP address then the SNI
+ # extension should not be used according to RFC3546 Section 3.1
+ use_sni_hostname = server_hostname and not is_ipaddress(server_hostname)
+ # SecureTransport uses server_hostname in certificate verification.
+ send_sni = (use_sni_hostname and HAS_SNI) or (
+ IS_SECURETRANSPORT and server_hostname
+ )
+ # Do not warn the user if server_hostname is an invalid SNI hostname.
+ if not HAS_SNI and use_sni_hostname:
+ warnings.warn(
+ "An HTTPS request has been made, but the SNI (Server Name "
+ "Indication) extension to TLS is not available on this platform. "
+ "This may cause the server to present an incorrect TLS "
+ "certificate, which can cause validation failures. You can upgrade to "
+ "a newer version of Python to solve this. For more information, see "
+ "https://urllib3.readthedocs.io/en/latest/advanced-usage.html"
+ "#ssl-warnings",
+ SNIMissingWarning,
+ )
+
+ if send_sni:
+ ssl_sock = _ssl_wrap_socket_impl(
+ sock, context, tls_in_tls, server_hostname=server_hostname
+ )
+ else:
+ ssl_sock = _ssl_wrap_socket_impl(sock, context, tls_in_tls)
+ return ssl_sock
+
+
+def is_ipaddress(hostname):
+ """Detects whether the hostname given is an IPv4 or IPv6 address.
+ Also detects IPv6 addresses with Zone IDs.
+
+ :param str hostname: Hostname to examine.
+ :return: True if the hostname is an IP address, False otherwise.
+ """
+ if not six.PY2 and isinstance(hostname, bytes):
+ # IDN A-label bytes are ASCII compatible.
+ hostname = hostname.decode("ascii")
+ return bool(IPV4_RE.match(hostname) or BRACELESS_IPV6_ADDRZ_RE.match(hostname))
+
+
+def _is_key_file_encrypted(key_file):
+ """Detects if a key file is encrypted or not."""
+ with open(key_file, "r") as f:
+ for line in f:
+ # Look for Proc-Type: 4,ENCRYPTED
+ if "ENCRYPTED" in line:
+ return True
+
+ return False
+
+
+def _ssl_wrap_socket_impl(sock, ssl_context, tls_in_tls, server_hostname=None):
+ if tls_in_tls:
+ if not SSLTransport:
+ # Import error, ssl is not available.
+ raise ProxySchemeUnsupported(
+ "TLS in TLS requires support for the 'ssl' module"
+ )
+
+ SSLTransport._validate_ssl_context_for_tls_in_tls(ssl_context)
+ return SSLTransport(sock, ssl_context, server_hostname)
+
+ if server_hostname:
+ return ssl_context.wrap_socket(sock, server_hostname=server_hostname)
+ else:
+ return ssl_context.wrap_socket(sock)
diff --git a/third_party/python/urllib3/urllib3/util/ssltransport.py b/third_party/python/urllib3/urllib3/util/ssltransport.py
new file mode 100644
index 0000000000..1e41354f5d
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/util/ssltransport.py
@@ -0,0 +1,221 @@
+import io
+import socket
+import ssl
+
+from urllib3.exceptions import ProxySchemeUnsupported
+from urllib3.packages import six
+
+SSL_BLOCKSIZE = 16384
+
+
+class SSLTransport:
+ """
+ The SSLTransport wraps an existing socket and establishes an SSL connection.
+
+ Contrary to Python's implementation of SSLSocket, it allows you to chain
+ multiple TLS connections together. It's particularly useful if you need to
+ implement TLS within TLS.
+
+ The class supports most of the socket API operations.
+ """
+
+ @staticmethod
+ def _validate_ssl_context_for_tls_in_tls(ssl_context):
+ """
+ Raises a ProxySchemeUnsupported if the provided ssl_context can't be used
+ for TLS in TLS.
+
+ The only requirement is that the ssl_context provides the 'wrap_bio'
+ methods.
+ """
+
+ if not hasattr(ssl_context, "wrap_bio"):
+ if six.PY2:
+ raise ProxySchemeUnsupported(
+ "TLS in TLS requires SSLContext.wrap_bio() which isn't "
+ "supported on Python 2"
+ )
+ else:
+ raise ProxySchemeUnsupported(
+ "TLS in TLS requires SSLContext.wrap_bio() which isn't "
+ "available on non-native SSLContext"
+ )
+
+ def __init__(
+ self, socket, ssl_context, server_hostname=None, suppress_ragged_eofs=True
+ ):
+ """
+ Create an SSLTransport around socket using the provided ssl_context.
+ """
+ self.incoming = ssl.MemoryBIO()
+ self.outgoing = ssl.MemoryBIO()
+
+ self.suppress_ragged_eofs = suppress_ragged_eofs
+ self.socket = socket
+
+ self.sslobj = ssl_context.wrap_bio(
+ self.incoming, self.outgoing, server_hostname=server_hostname
+ )
+
+ # Perform initial handshake.
+ self._ssl_io_loop(self.sslobj.do_handshake)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *_):
+ self.close()
+
+ def fileno(self):
+ return self.socket.fileno()
+
+ def read(self, len=1024, buffer=None):
+ return self._wrap_ssl_read(len, buffer)
+
+ def recv(self, len=1024, flags=0):
+ if flags != 0:
+ raise ValueError("non-zero flags not allowed in calls to recv")
+ return self._wrap_ssl_read(len)
+
+ def recv_into(self, buffer, nbytes=None, flags=0):
+ if flags != 0:
+ raise ValueError("non-zero flags not allowed in calls to recv_into")
+ if buffer and (nbytes is None):
+ nbytes = len(buffer)
+ elif nbytes is None:
+ nbytes = 1024
+ return self.read(nbytes, buffer)
+
+ def sendall(self, data, flags=0):
+ if flags != 0:
+ raise ValueError("non-zero flags not allowed in calls to sendall")
+ count = 0
+ with memoryview(data) as view, view.cast("B") as byte_view:
+ amount = len(byte_view)
+ while count < amount:
+ v = self.send(byte_view[count:])
+ count += v
+
+ def send(self, data, flags=0):
+ if flags != 0:
+ raise ValueError("non-zero flags not allowed in calls to send")
+ response = self._ssl_io_loop(self.sslobj.write, data)
+ return response
+
+ def makefile(
+ self, mode="r", buffering=None, encoding=None, errors=None, newline=None
+ ):
+ """
+ Python's httpclient uses makefile and buffered io when reading HTTP
+ messages and we need to support it.
+
+ This is unfortunately a copy and paste of socket.py makefile with small
+ changes to point to the socket directly.
+ """
+ if not set(mode) <= {"r", "w", "b"}:
+ raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
+
+ writing = "w" in mode
+ reading = "r" in mode or not writing
+ assert reading or writing
+ binary = "b" in mode
+ rawmode = ""
+ if reading:
+ rawmode += "r"
+ if writing:
+ rawmode += "w"
+ raw = socket.SocketIO(self, rawmode)
+ self.socket._io_refs += 1
+ if buffering is None:
+ buffering = -1
+ if buffering < 0:
+ buffering = io.DEFAULT_BUFFER_SIZE
+ if buffering == 0:
+ if not binary:
+ raise ValueError("unbuffered streams must be binary")
+ return raw
+ if reading and writing:
+ buffer = io.BufferedRWPair(raw, raw, buffering)
+ elif reading:
+ buffer = io.BufferedReader(raw, buffering)
+ else:
+ assert writing
+ buffer = io.BufferedWriter(raw, buffering)
+ if binary:
+ return buffer
+ text = io.TextIOWrapper(buffer, encoding, errors, newline)
+ text.mode = mode
+ return text
+
+ def unwrap(self):
+ self._ssl_io_loop(self.sslobj.unwrap)
+
+ def close(self):
+ self.socket.close()
+
+ def getpeercert(self, binary_form=False):
+ return self.sslobj.getpeercert(binary_form)
+
+ def version(self):
+ return self.sslobj.version()
+
+ def cipher(self):
+ return self.sslobj.cipher()
+
+ def selected_alpn_protocol(self):
+ return self.sslobj.selected_alpn_protocol()
+
+ def selected_npn_protocol(self):
+ return self.sslobj.selected_npn_protocol()
+
+ def shared_ciphers(self):
+ return self.sslobj.shared_ciphers()
+
+ def compression(self):
+ return self.sslobj.compression()
+
+ def settimeout(self, value):
+ self.socket.settimeout(value)
+
+ def gettimeout(self):
+ return self.socket.gettimeout()
+
+ def _decref_socketios(self):
+ self.socket._decref_socketios()
+
+ def _wrap_ssl_read(self, len, buffer=None):
+ try:
+ return self._ssl_io_loop(self.sslobj.read, len, buffer)
+ except ssl.SSLError as e:
+ if e.errno == ssl.SSL_ERROR_EOF and self.suppress_ragged_eofs:
+ return 0 # eof, return 0.
+ else:
+ raise
+
+ def _ssl_io_loop(self, func, *args):
+ """ Performs an I/O loop between incoming/outgoing and the socket."""
+ should_loop = True
+ ret = None
+
+ while should_loop:
+ errno = None
+ try:
+ ret = func(*args)
+ except ssl.SSLError as e:
+ if e.errno not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
+ # WANT_READ, and WANT_WRITE are expected, others are not.
+ raise e
+ errno = e.errno
+
+ buf = self.outgoing.read()
+ self.socket.sendall(buf)
+
+ if errno is None:
+ should_loop = False
+ elif errno == ssl.SSL_ERROR_WANT_READ:
+ buf = self.socket.recv(SSL_BLOCKSIZE)
+ if buf:
+ self.incoming.write(buf)
+ else:
+ self.incoming.write_eof()
+ return ret
diff --git a/third_party/python/urllib3/urllib3/util/timeout.py b/third_party/python/urllib3/urllib3/util/timeout.py
new file mode 100644
index 0000000000..ff69593b05
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/util/timeout.py
@@ -0,0 +1,268 @@
+from __future__ import absolute_import
+
+import time
+
+# The default socket timeout, used by httplib to indicate that no timeout was
+# specified by the user
+from socket import _GLOBAL_DEFAULT_TIMEOUT
+
+from ..exceptions import TimeoutStateError
+
+# A sentinel value to indicate that no timeout was specified by the user in
+# urllib3
+_Default = object()
+
+
+# Use time.monotonic if available.
+current_time = getattr(time, "monotonic", time.time)
+
+
+class Timeout(object):
+ """Timeout configuration.
+
+ Timeouts can be defined as a default for a pool:
+
+ .. code-block:: python
+
+ timeout = Timeout(connect=2.0, read=7.0)
+ http = PoolManager(timeout=timeout)
+ response = http.request('GET', 'http://example.com/')
+
+ Or per-request (which overrides the default for the pool):
+
+ .. code-block:: python
+
+ response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
+
+ Timeouts can be disabled by setting all the parameters to ``None``:
+
+ .. code-block:: python
+
+ no_timeout = Timeout(connect=None, read=None)
+ response = http.request('GET', 'http://example.com/, timeout=no_timeout)
+
+
+ :param total:
+ This combines the connect and read timeouts into one; the read timeout
+ will be set to the time leftover from the connect attempt. In the
+ event that both a connect timeout and a total are specified, or a read
+ timeout and a total are specified, the shorter timeout will be applied.
+
+ Defaults to None.
+
+ :type total: int, float, or None
+
+ :param connect:
+ The maximum amount of time (in seconds) to wait for a connection
+ attempt to a server to succeed. Omitting the parameter will default the
+ connect timeout to the system default, probably `the global default
+ timeout in socket.py
+ <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
+ None will set an infinite timeout for connection attempts.
+
+ :type connect: int, float, or None
+
+ :param read:
+ The maximum amount of time (in seconds) to wait between consecutive
+ read operations for a response from the server. Omitting the parameter
+ will default the read timeout to the system default, probably `the
+ global default timeout in socket.py
+ <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
+ None will set an infinite timeout.
+
+ :type read: int, float, or None
+
+ .. note::
+
+ Many factors can affect the total amount of time for urllib3 to return
+ an HTTP response.
+
+ For example, Python's DNS resolver does not obey the timeout specified
+ on the socket. Other factors that can affect total request time include
+ high CPU load, high swap, the program running at a low priority level,
+ or other behaviors.
+
+ In addition, the read and total timeouts only measure the time between
+ read operations on the socket connecting the client and the server,
+ not the total amount of time for the request to return a complete
+ response. For most requests, the timeout is raised because the server
+ has not sent the first byte in the specified time. This is not always
+ the case; if a server streams one byte every fifteen seconds, a timeout
+ of 20 seconds will not trigger, even though the request will take
+ several minutes to complete.
+
+ If your goal is to cut off any request after a set amount of wall clock
+ time, consider having a second "watcher" thread to cut off a slow
+ request.
+ """
+
+ #: A sentinel object representing the default timeout value
+ DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
+
+ def __init__(self, total=None, connect=_Default, read=_Default):
+ self._connect = self._validate_timeout(connect, "connect")
+ self._read = self._validate_timeout(read, "read")
+ self.total = self._validate_timeout(total, "total")
+ self._start_connect = None
+
+ def __repr__(self):
+ return "%s(connect=%r, read=%r, total=%r)" % (
+ type(self).__name__,
+ self._connect,
+ self._read,
+ self.total,
+ )
+
+ # __str__ provided for backwards compatibility
+ __str__ = __repr__
+
+ @classmethod
+ def _validate_timeout(cls, value, name):
+ """Check that a timeout attribute is valid.
+
+ :param value: The timeout value to validate
+ :param name: The name of the timeout attribute to validate. This is
+ used to specify in error messages.
+ :return: The validated and casted version of the given value.
+ :raises ValueError: If it is a numeric value less than or equal to
+ zero, or the type is not an integer, float, or None.
+ """
+ if value is _Default:
+ return cls.DEFAULT_TIMEOUT
+
+ if value is None or value is cls.DEFAULT_TIMEOUT:
+ return value
+
+ if isinstance(value, bool):
+ raise ValueError(
+ "Timeout cannot be a boolean value. It must "
+ "be an int, float or None."
+ )
+ try:
+ float(value)
+ except (TypeError, ValueError):
+ raise ValueError(
+ "Timeout value %s was %s, but it must be an "
+ "int, float or None." % (name, value)
+ )
+
+ try:
+ if value <= 0:
+ raise ValueError(
+ "Attempted to set %s timeout to %s, but the "
+ "timeout cannot be set to a value less "
+ "than or equal to 0." % (name, value)
+ )
+ except TypeError:
+ # Python 3
+ raise ValueError(
+ "Timeout value %s was %s, but it must be an "
+ "int, float or None." % (name, value)
+ )
+
+ return value
+
+ @classmethod
+ def from_float(cls, timeout):
+ """Create a new Timeout from a legacy timeout value.
+
+ The timeout value used by httplib.py sets the same timeout on the
+ connect(), and recv() socket requests. This creates a :class:`Timeout`
+ object that sets the individual timeouts to the ``timeout`` value
+ passed to this function.
+
+ :param timeout: The legacy timeout value.
+ :type timeout: integer, float, sentinel default object, or None
+ :return: Timeout object
+ :rtype: :class:`Timeout`
+ """
+ return Timeout(read=timeout, connect=timeout)
+
+ def clone(self):
+ """Create a copy of the timeout object
+
+ Timeout properties are stored per-pool but each request needs a fresh
+ Timeout object to ensure each one has its own start/stop configured.
+
+ :return: a copy of the timeout object
+ :rtype: :class:`Timeout`
+ """
+ # We can't use copy.deepcopy because that will also create a new object
+ # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
+ # detect the user default.
+ return Timeout(connect=self._connect, read=self._read, total=self.total)
+
+ def start_connect(self):
+ """Start the timeout clock, used during a connect() attempt
+
+ :raises urllib3.exceptions.TimeoutStateError: if you attempt
+ to start a timer that has been started already.
+ """
+ if self._start_connect is not None:
+ raise TimeoutStateError("Timeout timer has already been started.")
+ self._start_connect = current_time()
+ return self._start_connect
+
+ def get_connect_duration(self):
+ """Gets the time elapsed since the call to :meth:`start_connect`.
+
+ :return: Elapsed time in seconds.
+ :rtype: float
+ :raises urllib3.exceptions.TimeoutStateError: if you attempt
+ to get duration for a timer that hasn't been started.
+ """
+ if self._start_connect is None:
+ raise TimeoutStateError(
+ "Can't get connect duration for timer that has not started."
+ )
+ return current_time() - self._start_connect
+
+ @property
+ def connect_timeout(self):
+ """Get the value to use when setting a connection timeout.
+
+ This will be a positive float or integer, the value None
+ (never timeout), or the default system timeout.
+
+ :return: Connect timeout.
+ :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+ """
+ if self.total is None:
+ return self._connect
+
+ if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
+ return self.total
+
+ return min(self._connect, self.total)
+
+ @property
+ def read_timeout(self):
+ """Get the value for the read timeout.
+
+ This assumes some time has elapsed in the connection timeout and
+ computes the read timeout appropriately.
+
+ If self.total is set, the read timeout is dependent on the amount of
+ time taken by the connect timeout. If the connection time has not been
+ established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
+ raised.
+
+ :return: Value to use for the read timeout.
+ :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
+ :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
+ has not yet been called on this object.
+ """
+ if (
+ self.total is not None
+ and self.total is not self.DEFAULT_TIMEOUT
+ and self._read is not None
+ and self._read is not self.DEFAULT_TIMEOUT
+ ):
+ # In case the connect timeout has not yet been established.
+ if self._start_connect is None:
+ return self._read
+ return max(0, min(self.total - self.get_connect_duration(), self._read))
+ elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
+ return max(0, self.total - self.get_connect_duration())
+ else:
+ return self._read
diff --git a/third_party/python/urllib3/urllib3/util/url.py b/third_party/python/urllib3/urllib3/util/url.py
new file mode 100644
index 0000000000..6ff238fe3c
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/util/url.py
@@ -0,0 +1,430 @@
+from __future__ import absolute_import
+
+import re
+from collections import namedtuple
+
+from ..exceptions import LocationParseError
+from ..packages import six
+
+url_attrs = ["scheme", "auth", "host", "port", "path", "query", "fragment"]
+
+# We only want to normalize urls with an HTTP(S) scheme.
+# urllib3 infers URLs without a scheme (None) to be http.
+NORMALIZABLE_SCHEMES = ("http", "https", None)
+
+# Almost all of these patterns were derived from the
+# 'rfc3986' module: https://github.com/python-hyper/rfc3986
+PERCENT_RE = re.compile(r"%[a-fA-F0-9]{2}")
+SCHEME_RE = re.compile(r"^(?:[a-zA-Z][a-zA-Z0-9+-]*:|/)")
+URI_RE = re.compile(
+ r"^(?:([a-zA-Z][a-zA-Z0-9+.-]*):)?"
+ r"(?://([^\\/?#]*))?"
+ r"([^?#]*)"
+ r"(?:\?([^#]*))?"
+ r"(?:#(.*))?$",
+ re.UNICODE | re.DOTALL,
+)
+
+IPV4_PAT = r"(?:[0-9]{1,3}\.){3}[0-9]{1,3}"
+HEX_PAT = "[0-9A-Fa-f]{1,4}"
+LS32_PAT = "(?:{hex}:{hex}|{ipv4})".format(hex=HEX_PAT, ipv4=IPV4_PAT)
+_subs = {"hex": HEX_PAT, "ls32": LS32_PAT}
+_variations = [
+ # 6( h16 ":" ) ls32
+ "(?:%(hex)s:){6}%(ls32)s",
+ # "::" 5( h16 ":" ) ls32
+ "::(?:%(hex)s:){5}%(ls32)s",
+ # [ h16 ] "::" 4( h16 ":" ) ls32
+ "(?:%(hex)s)?::(?:%(hex)s:){4}%(ls32)s",
+ # [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
+ "(?:(?:%(hex)s:)?%(hex)s)?::(?:%(hex)s:){3}%(ls32)s",
+ # [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
+ "(?:(?:%(hex)s:){0,2}%(hex)s)?::(?:%(hex)s:){2}%(ls32)s",
+ # [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
+ "(?:(?:%(hex)s:){0,3}%(hex)s)?::%(hex)s:%(ls32)s",
+ # [ *4( h16 ":" ) h16 ] "::" ls32
+ "(?:(?:%(hex)s:){0,4}%(hex)s)?::%(ls32)s",
+ # [ *5( h16 ":" ) h16 ] "::" h16
+ "(?:(?:%(hex)s:){0,5}%(hex)s)?::%(hex)s",
+ # [ *6( h16 ":" ) h16 ] "::"
+ "(?:(?:%(hex)s:){0,6}%(hex)s)?::",
+]
+
+UNRESERVED_PAT = r"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._!\-~"
+IPV6_PAT = "(?:" + "|".join([x % _subs for x in _variations]) + ")"
+ZONE_ID_PAT = "(?:%25|%)(?:[" + UNRESERVED_PAT + "]|%[a-fA-F0-9]{2})+"
+IPV6_ADDRZ_PAT = r"\[" + IPV6_PAT + r"(?:" + ZONE_ID_PAT + r")?\]"
+REG_NAME_PAT = r"(?:[^\[\]%:/?#]|%[a-fA-F0-9]{2})*"
+TARGET_RE = re.compile(r"^(/[^?#]*)(?:\?([^#]*))?(?:#.*)?$")
+
+IPV4_RE = re.compile("^" + IPV4_PAT + "$")
+IPV6_RE = re.compile("^" + IPV6_PAT + "$")
+IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT + "$")
+BRACELESS_IPV6_ADDRZ_RE = re.compile("^" + IPV6_ADDRZ_PAT[2:-2] + "$")
+ZONE_ID_RE = re.compile("(" + ZONE_ID_PAT + r")\]$")
+
+SUBAUTHORITY_PAT = (u"^(?:(.*)@)?(%s|%s|%s)(?::([0-9]{0,5}))?$") % (
+ REG_NAME_PAT,
+ IPV4_PAT,
+ IPV6_ADDRZ_PAT,
+)
+SUBAUTHORITY_RE = re.compile(SUBAUTHORITY_PAT, re.UNICODE | re.DOTALL)
+
+UNRESERVED_CHARS = set(
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789._-~"
+)
+SUB_DELIM_CHARS = set("!$&'()*+,;=")
+USERINFO_CHARS = UNRESERVED_CHARS | SUB_DELIM_CHARS | {":"}
+PATH_CHARS = USERINFO_CHARS | {"@", "/"}
+QUERY_CHARS = FRAGMENT_CHARS = PATH_CHARS | {"?"}
+
+
+class Url(namedtuple("Url", url_attrs)):
+ """
+ Data structure for representing an HTTP URL. Used as a return value for
+ :func:`parse_url`. Both the scheme and host are normalized as they are
+ both case-insensitive according to RFC 3986.
+ """
+
+ __slots__ = ()
+
+ def __new__(
+ cls,
+ scheme=None,
+ auth=None,
+ host=None,
+ port=None,
+ path=None,
+ query=None,
+ fragment=None,
+ ):
+ if path and not path.startswith("/"):
+ path = "/" + path
+ if scheme is not None:
+ scheme = scheme.lower()
+ return super(Url, cls).__new__(
+ cls, scheme, auth, host, port, path, query, fragment
+ )
+
+ @property
+ def hostname(self):
+ """For backwards-compatibility with urlparse. We're nice like that."""
+ return self.host
+
+ @property
+ def request_uri(self):
+ """Absolute path including the query string."""
+ uri = self.path or "/"
+
+ if self.query is not None:
+ uri += "?" + self.query
+
+ return uri
+
+ @property
+ def netloc(self):
+ """Network location including host and port"""
+ if self.port:
+ return "%s:%d" % (self.host, self.port)
+ return self.host
+
+ @property
+ def url(self):
+ """
+ Convert self into a url
+
+ This function should more or less round-trip with :func:`.parse_url`. The
+ returned url may not be exactly the same as the url inputted to
+ :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
+ with a blank port will have : removed).
+
+ Example: ::
+
+ >>> U = parse_url('http://google.com/mail/')
+ >>> U.url
+ 'http://google.com/mail/'
+ >>> Url('http', 'username:password', 'host.com', 80,
+ ... '/path', 'query', 'fragment').url
+ 'http://username:password@host.com:80/path?query#fragment'
+ """
+ scheme, auth, host, port, path, query, fragment = self
+ url = u""
+
+ # We use "is not None" we want things to happen with empty strings (or 0 port)
+ if scheme is not None:
+ url += scheme + u"://"
+ if auth is not None:
+ url += auth + u"@"
+ if host is not None:
+ url += host
+ if port is not None:
+ url += u":" + str(port)
+ if path is not None:
+ url += path
+ if query is not None:
+ url += u"?" + query
+ if fragment is not None:
+ url += u"#" + fragment
+
+ return url
+
+ def __str__(self):
+ return self.url
+
+
+def split_first(s, delims):
+ """
+ .. deprecated:: 1.25
+
+ Given a string and an iterable of delimiters, split on the first found
+ delimiter. Return two split parts and the matched delimiter.
+
+ If not found, then the first part is the full input string.
+
+ Example::
+
+ >>> split_first('foo/bar?baz', '?/=')
+ ('foo', 'bar?baz', '/')
+ >>> split_first('foo/bar?baz', '123')
+ ('foo/bar?baz', '', None)
+
+ Scales linearly with number of delims. Not ideal for large number of delims.
+ """
+ min_idx = None
+ min_delim = None
+ for d in delims:
+ idx = s.find(d)
+ if idx < 0:
+ continue
+
+ if min_idx is None or idx < min_idx:
+ min_idx = idx
+ min_delim = d
+
+ if min_idx is None or min_idx < 0:
+ return s, "", None
+
+ return s[:min_idx], s[min_idx + 1 :], min_delim
+
+
+def _encode_invalid_chars(component, allowed_chars, encoding="utf-8"):
+ """Percent-encodes a URI component without reapplying
+ onto an already percent-encoded component.
+ """
+ if component is None:
+ return component
+
+ component = six.ensure_text(component)
+
+ # Normalize existing percent-encoded bytes.
+ # Try to see if the component we're encoding is already percent-encoded
+ # so we can skip all '%' characters but still encode all others.
+ component, percent_encodings = PERCENT_RE.subn(
+ lambda match: match.group(0).upper(), component
+ )
+
+ uri_bytes = component.encode("utf-8", "surrogatepass")
+ is_percent_encoded = percent_encodings == uri_bytes.count(b"%")
+ encoded_component = bytearray()
+
+ for i in range(0, len(uri_bytes)):
+ # Will return a single character bytestring on both Python 2 & 3
+ byte = uri_bytes[i : i + 1]
+ byte_ord = ord(byte)
+ if (is_percent_encoded and byte == b"%") or (
+ byte_ord < 128 and byte.decode() in allowed_chars
+ ):
+ encoded_component += byte
+ continue
+ encoded_component.extend(b"%" + (hex(byte_ord)[2:].encode().zfill(2).upper()))
+
+ return encoded_component.decode(encoding)
+
+
+def _remove_path_dot_segments(path):
+ # See http://tools.ietf.org/html/rfc3986#section-5.2.4 for pseudo-code
+ segments = path.split("/") # Turn the path into a list of segments
+ output = [] # Initialize the variable to use to store output
+
+ for segment in segments:
+ # '.' is the current directory, so ignore it, it is superfluous
+ if segment == ".":
+ continue
+ # Anything other than '..', should be appended to the output
+ elif segment != "..":
+ output.append(segment)
+ # In this case segment == '..', if we can, we should pop the last
+ # element
+ elif output:
+ output.pop()
+
+ # If the path starts with '/' and the output is empty or the first string
+ # is non-empty
+ if path.startswith("/") and (not output or output[0]):
+ output.insert(0, "")
+
+ # If the path starts with '/.' or '/..' ensure we add one more empty
+ # string to add a trailing '/'
+ if path.endswith(("/.", "/..")):
+ output.append("")
+
+ return "/".join(output)
+
+
+def _normalize_host(host, scheme):
+ if host:
+ if isinstance(host, six.binary_type):
+ host = six.ensure_str(host)
+
+ if scheme in NORMALIZABLE_SCHEMES:
+ is_ipv6 = IPV6_ADDRZ_RE.match(host)
+ if is_ipv6:
+ match = ZONE_ID_RE.search(host)
+ if match:
+ start, end = match.span(1)
+ zone_id = host[start:end]
+
+ if zone_id.startswith("%25") and zone_id != "%25":
+ zone_id = zone_id[3:]
+ else:
+ zone_id = zone_id[1:]
+ zone_id = "%" + _encode_invalid_chars(zone_id, UNRESERVED_CHARS)
+ return host[:start].lower() + zone_id + host[end:]
+ else:
+ return host.lower()
+ elif not IPV4_RE.match(host):
+ return six.ensure_str(
+ b".".join([_idna_encode(label) for label in host.split(".")])
+ )
+ return host
+
+
+def _idna_encode(name):
+ if name and any([ord(x) > 128 for x in name]):
+ try:
+ import idna
+ except ImportError:
+ six.raise_from(
+ LocationParseError("Unable to parse URL without the 'idna' module"),
+ None,
+ )
+ try:
+ return idna.encode(name.lower(), strict=True, std3_rules=True)
+ except idna.IDNAError:
+ six.raise_from(
+ LocationParseError(u"Name '%s' is not a valid IDNA label" % name), None
+ )
+ return name.lower().encode("ascii")
+
+
+def _encode_target(target):
+ """Percent-encodes a request target so that there are no invalid characters"""
+ path, query = TARGET_RE.match(target).groups()
+ target = _encode_invalid_chars(path, PATH_CHARS)
+ query = _encode_invalid_chars(query, QUERY_CHARS)
+ if query is not None:
+ target += "?" + query
+ return target
+
+
+def parse_url(url):
+ """
+ Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
+ performed to parse incomplete urls. Fields not provided will be None.
+ This parser is RFC 3986 compliant.
+
+ The parser logic and helper functions are based heavily on
+ work done in the ``rfc3986`` module.
+
+ :param str url: URL to parse into a :class:`.Url` namedtuple.
+
+ Partly backwards-compatible with :mod:`urlparse`.
+
+ Example::
+
+ >>> parse_url('http://google.com/mail/')
+ Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
+ >>> parse_url('google.com:80')
+ Url(scheme=None, host='google.com', port=80, path=None, ...)
+ >>> parse_url('/foo?bar')
+ Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
+ """
+ if not url:
+ # Empty
+ return Url()
+
+ source_url = url
+ if not SCHEME_RE.search(url):
+ url = "//" + url
+
+ try:
+ scheme, authority, path, query, fragment = URI_RE.match(url).groups()
+ normalize_uri = scheme is None or scheme.lower() in NORMALIZABLE_SCHEMES
+
+ if scheme:
+ scheme = scheme.lower()
+
+ if authority:
+ auth, host, port = SUBAUTHORITY_RE.match(authority).groups()
+ if auth and normalize_uri:
+ auth = _encode_invalid_chars(auth, USERINFO_CHARS)
+ if port == "":
+ port = None
+ else:
+ auth, host, port = None, None, None
+
+ if port is not None:
+ port = int(port)
+ if not (0 <= port <= 65535):
+ raise LocationParseError(url)
+
+ host = _normalize_host(host, scheme)
+
+ if normalize_uri and path:
+ path = _remove_path_dot_segments(path)
+ path = _encode_invalid_chars(path, PATH_CHARS)
+ if normalize_uri and query:
+ query = _encode_invalid_chars(query, QUERY_CHARS)
+ if normalize_uri and fragment:
+ fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS)
+
+ except (ValueError, AttributeError):
+ return six.raise_from(LocationParseError(source_url), None)
+
+ # For the sake of backwards compatibility we put empty
+ # string values for path if there are any defined values
+ # beyond the path in the URL.
+ # TODO: Remove this when we break backwards compatibility.
+ if not path:
+ if query is not None or fragment is not None:
+ path = ""
+ else:
+ path = None
+
+ # Ensure that each part of the URL is a `str` for
+ # backwards compatibility.
+ if isinstance(url, six.text_type):
+ ensure_func = six.ensure_text
+ else:
+ ensure_func = six.ensure_str
+
+ def ensure_type(x):
+ return x if x is None else ensure_func(x)
+
+ return Url(
+ scheme=ensure_type(scheme),
+ auth=ensure_type(auth),
+ host=ensure_type(host),
+ port=port,
+ path=ensure_type(path),
+ query=ensure_type(query),
+ fragment=ensure_type(fragment),
+ )
+
+
+def get_host(url):
+ """
+ Deprecated. Use :func:`parse_url` instead.
+ """
+ p = parse_url(url)
+ return p.scheme or "http", p.hostname, p.port
diff --git a/third_party/python/urllib3/urllib3/util/wait.py b/third_party/python/urllib3/urllib3/util/wait.py
new file mode 100644
index 0000000000..c280646c7b
--- /dev/null
+++ b/third_party/python/urllib3/urllib3/util/wait.py
@@ -0,0 +1,153 @@
+import errno
+import select
+import sys
+from functools import partial
+
+try:
+ from time import monotonic
+except ImportError:
+ from time import time as monotonic
+
+__all__ = ["NoWayToWaitForSocketError", "wait_for_read", "wait_for_write"]
+
+
+class NoWayToWaitForSocketError(Exception):
+ pass
+
+
+# How should we wait on sockets?
+#
+# There are two types of APIs you can use for waiting on sockets: the fancy
+# modern stateful APIs like epoll/kqueue, and the older stateless APIs like
+# select/poll. The stateful APIs are more efficient when you have a lots of
+# sockets to keep track of, because you can set them up once and then use them
+# lots of times. But we only ever want to wait on a single socket at a time
+# and don't want to keep track of state, so the stateless APIs are actually
+# more efficient. So we want to use select() or poll().
+#
+# Now, how do we choose between select() and poll()? On traditional Unixes,
+# select() has a strange calling convention that makes it slow, or fail
+# altogether, for high-numbered file descriptors. The point of poll() is to fix
+# that, so on Unixes, we prefer poll().
+#
+# On Windows, there is no poll() (or at least Python doesn't provide a wrapper
+# for it), but that's OK, because on Windows, select() doesn't have this
+# strange calling convention; plain select() works fine.
+#
+# So: on Windows we use select(), and everywhere else we use poll(). We also
+# fall back to select() in case poll() is somehow broken or missing.
+
+if sys.version_info >= (3, 5):
+ # Modern Python, that retries syscalls by default
+ def _retry_on_intr(fn, timeout):
+ return fn(timeout)
+
+
+else:
+ # Old and broken Pythons.
+ def _retry_on_intr(fn, timeout):
+ if timeout is None:
+ deadline = float("inf")
+ else:
+ deadline = monotonic() + timeout
+
+ while True:
+ try:
+ return fn(timeout)
+ # OSError for 3 <= pyver < 3.5, select.error for pyver <= 2.7
+ except (OSError, select.error) as e:
+ # 'e.args[0]' incantation works for both OSError and select.error
+ if e.args[0] != errno.EINTR:
+ raise
+ else:
+ timeout = deadline - monotonic()
+ if timeout < 0:
+ timeout = 0
+ if timeout == float("inf"):
+ timeout = None
+ continue
+
+
+def select_wait_for_socket(sock, read=False, write=False, timeout=None):
+ if not read and not write:
+ raise RuntimeError("must specify at least one of read=True, write=True")
+ rcheck = []
+ wcheck = []
+ if read:
+ rcheck.append(sock)
+ if write:
+ wcheck.append(sock)
+ # When doing a non-blocking connect, most systems signal success by
+ # marking the socket writable. Windows, though, signals success by marked
+ # it as "exceptional". We paper over the difference by checking the write
+ # sockets for both conditions. (The stdlib selectors module does the same
+ # thing.)
+ fn = partial(select.select, rcheck, wcheck, wcheck)
+ rready, wready, xready = _retry_on_intr(fn, timeout)
+ return bool(rready or wready or xready)
+
+
+def poll_wait_for_socket(sock, read=False, write=False, timeout=None):
+ if not read and not write:
+ raise RuntimeError("must specify at least one of read=True, write=True")
+ mask = 0
+ if read:
+ mask |= select.POLLIN
+ if write:
+ mask |= select.POLLOUT
+ poll_obj = select.poll()
+ poll_obj.register(sock, mask)
+
+ # For some reason, poll() takes timeout in milliseconds
+ def do_poll(t):
+ if t is not None:
+ t *= 1000
+ return poll_obj.poll(t)
+
+ return bool(_retry_on_intr(do_poll, timeout))
+
+
+def null_wait_for_socket(*args, **kwargs):
+ raise NoWayToWaitForSocketError("no select-equivalent available")
+
+
+def _have_working_poll():
+ # Apparently some systems have a select.poll that fails as soon as you try
+ # to use it, either due to strange configuration or broken monkeypatching
+ # from libraries like eventlet/greenlet.
+ try:
+ poll_obj = select.poll()
+ _retry_on_intr(poll_obj.poll, 0)
+ except (AttributeError, OSError):
+ return False
+ else:
+ return True
+
+
+def wait_for_socket(*args, **kwargs):
+ # We delay choosing which implementation to use until the first time we're
+ # called. We could do it at import time, but then we might make the wrong
+ # decision if someone goes wild with monkeypatching select.poll after
+ # we're imported.
+ global wait_for_socket
+ if _have_working_poll():
+ wait_for_socket = poll_wait_for_socket
+ elif hasattr(select, "select"):
+ wait_for_socket = select_wait_for_socket
+ else: # Platform-specific: Appengine.
+ wait_for_socket = null_wait_for_socket
+ return wait_for_socket(*args, **kwargs)
+
+
+def wait_for_read(sock, timeout=None):
+ """Waits for reading to be available on a given socket.
+ Returns True if the socket is readable, or False if the timeout expired.
+ """
+ return wait_for_socket(sock, read=True, timeout=timeout)
+
+
+def wait_for_write(sock, timeout=None):
+ """Waits for writing to be available on a given socket.
+ Returns True if the socket is readable, or False if the timeout expired.
+ """
+ return wait_for_socket(sock, write=True, timeout=timeout)
diff --git a/third_party/python/voluptuous/voluptuous-0.12.1.dist-info/COPYING b/third_party/python/voluptuous/voluptuous-0.12.1.dist-info/COPYING
new file mode 100644
index 0000000000..a19b7057fa
--- /dev/null
+++ b/third_party/python/voluptuous/voluptuous-0.12.1.dist-info/COPYING
@@ -0,0 +1,25 @@
+Copyright (c) 2010, Alec Thomas
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ - Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+ - Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ - Neither the name of SwapOff.org nor the names of its contributors may
+ be used to endorse or promote products derived from this software without
+ specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/third_party/python/voluptuous/voluptuous-0.12.1.dist-info/METADATA b/third_party/python/voluptuous/voluptuous-0.12.1.dist-info/METADATA
new file mode 100644
index 0000000000..a74534f4ea
--- /dev/null
+++ b/third_party/python/voluptuous/voluptuous-0.12.1.dist-info/METADATA
@@ -0,0 +1,760 @@
+Metadata-Version: 2.1
+Name: voluptuous
+Version: 0.12.1
+Summary: UNKNOWN
+Home-page: https://github.com/alecthomas/voluptuous
+Author: Alec Thomas
+Author-email: alec@swapoff.org
+License: BSD
+Download-URL: https://pypi.python.org/pypi/voluptuous
+Platform: any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Description-Content-Type: text/markdown
+
+
+# CONTRIBUTIONS ONLY
+
+**What does this mean?** I do not have time to fix issues myself. The only way fixes or new features will be added is by people submitting PRs.
+
+**Current status:** Voluptuous is largely feature stable. There hasn't been a need to add new features in a while, but there are some bugs that should be fixed.
+
+**Why?** I no longer use Voluptuous personally (in fact I no longer regularly write Python code). Rather than leave the project in a limbo of people filing issues and wondering why they're not being worked on, I believe this notice will more clearly set expectations.
+
+# Voluptuous is a Python data validation library
+
+[![image](https://img.shields.io/pypi/v/voluptuous.svg)](https://python.org/pypi/voluptuous)
+[![image](https://img.shields.io/pypi/l/voluptuous.svg)](https://python.org/pypi/voluptuous)
+[![image](https://img.shields.io/pypi/pyversions/voluptuous.svg)](https://python.org/pypi/voluptuous)
+[![Build Status](https://travis-ci.org/alecthomas/voluptuous.svg)](https://travis-ci.org/alecthomas/voluptuous)
+[![Coverage Status](https://coveralls.io/repos/github/alecthomas/voluptuous/badge.svg?branch=master)](https://coveralls.io/github/alecthomas/voluptuous?branch=master) [![Gitter chat](https://badges.gitter.im/alecthomas.svg)](https://gitter.im/alecthomas/Lobby)
+
+Voluptuous, *despite* the name, is a Python data validation library. It
+is primarily intended for validating data coming into Python as JSON,
+YAML, etc.
+
+It has three goals:
+
+1. Simplicity.
+2. Support for complex data structures.
+3. Provide useful error messages.
+
+## Contact
+
+Voluptuous now has a mailing list! Send a mail to
+[<voluptuous@librelist.com>](mailto:voluptuous@librelist.com) to subscribe. Instructions
+will follow.
+
+You can also contact me directly via [email](mailto:alec@swapoff.org) or
+[Twitter](https://twitter.com/alecthomas).
+
+To file a bug, create a [new issue](https://github.com/alecthomas/voluptuous/issues/new) on GitHub with a short example of how to replicate the issue.
+
+## Documentation
+
+The documentation is provided [here](http://alecthomas.github.io/voluptuous/).
+
+## Changelog
+
+See [CHANGELOG.md](https://github.com/alecthomas/voluptuous/blob/master/CHANGELOG.md).
+
+## Why use Voluptuous over another validation library?
+
+**Validators are simple callables:**
+No need to subclass anything, just use a function.
+
+**Errors are simple exceptions:**
+A validator can just `raise Invalid(msg)` and expect the user to get
+useful messages.
+
+**Schemas are basic Python data structures:**
+Should your data be a dictionary of integer keys to strings?
+`{int: str}` does what you expect. List of integers, floats or
+strings? `[int, float, str]`.
+
+**Designed from the ground up for validating more than just forms:**
+Nested data structures are treated in the same way as any other
+type. Need a list of dictionaries? `[{}]`
+
+**Consistency:**
+Types in the schema are checked as types. Values are compared as
+values. Callables are called to validate. Simple.
+
+## Show me an example
+
+Twitter's [user search API](https://dev.twitter.com/rest/reference/get/users/search) accepts
+query URLs like:
+
+```
+$ curl 'https://api.twitter.com/1.1/users/search.json?q=python&per_page=20&page=1'
+```
+
+To validate this we might use a schema like:
+
+```pycon
+>>> from voluptuous import Schema
+>>> schema = Schema({
+... 'q': str,
+... 'per_page': int,
+... 'page': int,
+... })
+
+```
+
+This schema very succinctly and roughly describes the data required by
+the API, and will work fine. But it has a few problems. Firstly, it
+doesn't fully express the constraints of the API. According to the API,
+`per_page` should be restricted to at most 20, defaulting to 5, for
+example. To describe the semantics of the API more accurately, our
+schema will need to be more thoroughly defined:
+
+```pycon
+>>> from voluptuous import Required, All, Length, Range
+>>> schema = Schema({
+... Required('q'): All(str, Length(min=1)),
+... Required('per_page', default=5): All(int, Range(min=1, max=20)),
+... 'page': All(int, Range(min=0)),
+... })
+
+```
+
+This schema fully enforces the interface defined in Twitter's
+documentation, and goes a little further for completeness.
+
+"q" is required:
+
+```pycon
+>>> from voluptuous import MultipleInvalid, Invalid
+>>> try:
+... schema({})
+... raise AssertionError('MultipleInvalid not raised')
+... except MultipleInvalid as e:
+... exc = e
+>>> str(exc) == "required key not provided @ data['q']"
+True
+
+```
+
+...must be a string:
+
+```pycon
+>>> try:
+... schema({'q': 123})
+... raise AssertionError('MultipleInvalid not raised')
+... except MultipleInvalid as e:
+... exc = e
+>>> str(exc) == "expected str for dictionary value @ data['q']"
+True
+
+```
+
+...and must be at least one character in length:
+
+```pycon
+>>> try:
+... schema({'q': ''})
+... raise AssertionError('MultipleInvalid not raised')
+... except MultipleInvalid as e:
+... exc = e
+>>> str(exc) == "length of value must be at least 1 for dictionary value @ data['q']"
+True
+>>> schema({'q': '#topic'}) == {'q': '#topic', 'per_page': 5}
+True
+
+```
+
+"per\_page" is a positive integer no greater than 20:
+
+```pycon
+>>> try:
+... schema({'q': '#topic', 'per_page': 900})
+... raise AssertionError('MultipleInvalid not raised')
+... except MultipleInvalid as e:
+... exc = e
+>>> str(exc) == "value must be at most 20 for dictionary value @ data['per_page']"
+True
+>>> try:
+... schema({'q': '#topic', 'per_page': -10})
+... raise AssertionError('MultipleInvalid not raised')
+... except MultipleInvalid as e:
+... exc = e
+>>> str(exc) == "value must be at least 1 for dictionary value @ data['per_page']"
+True
+
+```
+
+"page" is an integer \>= 0:
+
+```pycon
+>>> try:
+... schema({'q': '#topic', 'per_page': 'one'})
+... raise AssertionError('MultipleInvalid not raised')
+... except MultipleInvalid as e:
+... exc = e
+>>> str(exc)
+"expected int for dictionary value @ data['per_page']"
+>>> schema({'q': '#topic', 'page': 1}) == {'q': '#topic', 'page': 1, 'per_page': 5}
+True
+
+```
+
+## Defining schemas
+
+Schemas are nested data structures consisting of dictionaries, lists,
+scalars and *validators*. Each node in the input schema is pattern
+matched against corresponding nodes in the input data.
+
+### Literals
+
+Literals in the schema are matched using normal equality checks:
+
+```pycon
+>>> schema = Schema(1)
+>>> schema(1)
+1
+>>> schema = Schema('a string')
+>>> schema('a string')
+'a string'
+
+```
+
+### Types
+
+Types in the schema are matched by checking if the corresponding value
+is an instance of the type:
+
+```pycon
+>>> schema = Schema(int)
+>>> schema(1)
+1
+>>> try:
+... schema('one')
+... raise AssertionError('MultipleInvalid not raised')
+... except MultipleInvalid as e:
+... exc = e
+>>> str(exc) == "expected int"
+True
+
+```
+
+### URLs
+
+URLs in the schema are matched by using `urlparse` library.
+
+```pycon
+>>> from voluptuous import Url
+>>> schema = Schema(Url())
+>>> schema('http://w3.org')
+'http://w3.org'
+>>> try:
+... schema('one')
+... raise AssertionError('MultipleInvalid not raised')
+... except MultipleInvalid as e:
+... exc = e
+>>> str(exc) == "expected a URL"
+True
+
+```
+
+### Lists
+
+Lists in the schema are treated as a set of valid values. Each element
+in the schema list is compared to each value in the input data:
+
+```pycon
+>>> schema = Schema([1, 'a', 'string'])
+>>> schema([1])
+[1]
+>>> schema([1, 1, 1])
+[1, 1, 1]
+>>> schema(['a', 1, 'string', 1, 'string'])
+['a', 1, 'string', 1, 'string']
+
+```
+
+However, an empty list (`[]`) is treated as is. If you want to specify a list that can
+contain anything, specify it as `list`:
+
+```pycon
+>>> schema = Schema([])
+>>> try:
+... schema([1])
+... raise AssertionError('MultipleInvalid not raised')
+... except MultipleInvalid as e:
+... exc = e
+>>> str(exc) == "not a valid value @ data[1]"
+True
+>>> schema([])
+[]
+>>> schema = Schema(list)
+>>> schema([])
+[]
+>>> schema([1, 2])
+[1, 2]
+
+```
+
+### Sets and frozensets
+
+Sets and frozensets are treated as a set of valid values. Each element
+in the schema set is compared to each value in the input data:
+
+```pycon
+>>> schema = Schema({42})
+>>> schema({42}) == {42}
+True
+>>> try:
+... schema({43})
+... raise AssertionError('MultipleInvalid not raised')
+... except MultipleInvalid as e:
+... exc = e
+>>> str(exc) == "invalid value in set"
+True
+>>> schema = Schema({int})
+>>> schema({1, 2, 3}) == {1, 2, 3}
+True
+>>> schema = Schema({int, str})
+>>> schema({1, 2, 'abc'}) == {1, 2, 'abc'}
+True
+>>> schema = Schema(frozenset([int]))
+>>> try:
+... schema({3})
+... raise AssertionError('Invalid not raised')
+... except Invalid as e:
+... exc = e
+>>> str(exc) == 'expected a frozenset'
+True
+
+```
+
+However, an empty set (`set()`) is treated as is. If you want to specify a set
+that can contain anything, specify it as `set`:
+
+```pycon
+>>> schema = Schema(set())
+>>> try:
+... schema({1})
+... raise AssertionError('MultipleInvalid not raised')
+... except MultipleInvalid as e:
+... exc = e
+>>> str(exc) == "invalid value in set"
+True
+>>> schema(set()) == set()
+True
+>>> schema = Schema(set)
+>>> schema({1, 2}) == {1, 2}
+True
+
+```
+
+### Validation functions
+
+Validators are simple callables that raise an `Invalid` exception when
+they encounter invalid data. The criteria for determining validity is
+entirely up to the implementation; it may check that a value is a valid
+username with `pwd.getpwnam()`, it may check that a value is of a
+specific type, and so on.
+
+The simplest kind of validator is a Python function that raises
+ValueError when its argument is invalid. Conveniently, many builtin
+Python functions have this property. Here's an example of a date
+validator:
+
+```pycon
+>>> from datetime import datetime
+>>> def Date(fmt='%Y-%m-%d'):
+... return lambda v: datetime.strptime(v, fmt)
+
+```
+
+```pycon
+>>> schema = Schema(Date())
+>>> schema('2013-03-03')
+datetime.datetime(2013, 3, 3, 0, 0)
+>>> try:
+... schema('2013-03')
+... raise AssertionError('MultipleInvalid not raised')
+... except MultipleInvalid as e:
+... exc = e
+>>> str(exc) == "not a valid value"
+True
+
+```
+
+In addition to simply determining if a value is valid, validators may
+mutate the value into a valid form. An example of this is the
+`Coerce(type)` function, which returns a function that coerces its
+argument to the given type:
+
+```python
+def Coerce(type, msg=None):
+ """Coerce a value to a type.
+
+ If the type constructor throws a ValueError, the value will be marked as
+ Invalid.
+ """
+ def f(v):
+ try:
+ return type(v)
+ except ValueError:
+ raise Invalid(msg or ('expected %s' % type.__name__))
+ return f
+
+```
+
+This example also shows a common idiom where an optional human-readable
+message can be provided. This can vastly improve the usefulness of the
+resulting error messages.
+
+### Dictionaries
+
+Each key-value pair in a schema dictionary is validated against each
+key-value pair in the corresponding data dictionary:
+
+```pycon
+>>> schema = Schema({1: 'one', 2: 'two'})
+>>> schema({1: 'one'})
+{1: 'one'}
+
+```
+
+#### Extra dictionary keys
+
+By default any additional keys in the data, not in the schema will
+trigger exceptions:
+
+```pycon
+>>> schema = Schema({2: 3})
+>>> try:
+... schema({1: 2, 2: 3})
+... raise AssertionError('MultipleInvalid not raised')
+... except MultipleInvalid as e:
+... exc = e
+>>> str(exc) == "extra keys not allowed @ data[1]"
+True
+
+```
+
+This behaviour can be altered on a per-schema basis. To allow
+additional keys use
+`Schema(..., extra=ALLOW_EXTRA)`:
+
+```pycon
+>>> from voluptuous import ALLOW_EXTRA
+>>> schema = Schema({2: 3}, extra=ALLOW_EXTRA)
+>>> schema({1: 2, 2: 3})
+{1: 2, 2: 3}
+
+```
+
+To remove additional keys use
+`Schema(..., extra=REMOVE_EXTRA)`:
+
+```pycon
+>>> from voluptuous import REMOVE_EXTRA
+>>> schema = Schema({2: 3}, extra=REMOVE_EXTRA)
+>>> schema({1: 2, 2: 3})
+{2: 3}
+
+```
+
+It can also be overridden per-dictionary by using the catch-all marker
+token `extra` as a key:
+
+```pycon
+>>> from voluptuous import Extra
+>>> schema = Schema({1: {Extra: object}})
+>>> schema({1: {'foo': 'bar'}})
+{1: {'foo': 'bar'}}
+
+```
+
+#### Required dictionary keys
+
+By default, keys in the schema are not required to be in the data:
+
+```pycon
+>>> schema = Schema({1: 2, 3: 4})
+>>> schema({3: 4})
+{3: 4}
+
+```
+
+Similarly to how extra\_ keys work, this behaviour can be overridden
+per-schema:
+
+```pycon
+>>> schema = Schema({1: 2, 3: 4}, required=True)
+>>> try:
+... schema({3: 4})
+... raise AssertionError('MultipleInvalid not raised')
+... except MultipleInvalid as e:
+... exc = e
+>>> str(exc) == "required key not provided @ data[1]"
+True
+
+```
+
+And per-key, with the marker token `Required(key)`:
+
+```pycon
+>>> schema = Schema({Required(1): 2, 3: 4})
+>>> try:
+... schema({3: 4})
+... raise AssertionError('MultipleInvalid not raised')
+... except MultipleInvalid as e:
+... exc = e
+>>> str(exc) == "required key not provided @ data[1]"
+True
+>>> schema({1: 2})
+{1: 2}
+
+```
+
+#### Optional dictionary keys
+
+If a schema has `required=True`, keys may be individually marked as
+optional using the marker token `Optional(key)`:
+
+```pycon
+>>> from voluptuous import Optional
+>>> schema = Schema({1: 2, Optional(3): 4}, required=True)
+>>> try:
+... schema({})
+... raise AssertionError('MultipleInvalid not raised')
+... except MultipleInvalid as e:
+... exc = e
+>>> str(exc) == "required key not provided @ data[1]"
+True
+>>> schema({1: 2})
+{1: 2}
+>>> try:
+... schema({1: 2, 4: 5})
+... raise AssertionError('MultipleInvalid not raised')
+... except MultipleInvalid as e:
+... exc = e
+>>> str(exc) == "extra keys not allowed @ data[4]"
+True
+
+```
+
+```pycon
+>>> schema({1: 2, 3: 4})
+{1: 2, 3: 4}
+
+```
+
+### Recursive / nested schema
+
+You can use `voluptuous.Self` to define a nested schema:
+
+```pycon
+>>> from voluptuous import Schema, Self
+>>> recursive = Schema({"more": Self, "value": int})
+>>> recursive({"more": {"value": 42}, "value": 41}) == {'more': {'value': 42}, 'value': 41}
+True
+
+```
+
+### Extending an existing Schema
+
+Often it comes handy to have a base `Schema` that is extended with more
+requirements. In that case you can use `Schema.extend` to create a new
+`Schema`:
+
+```pycon
+>>> from voluptuous import Schema
+>>> person = Schema({'name': str})
+>>> person_with_age = person.extend({'age': int})
+>>> sorted(list(person_with_age.schema.keys()))
+['age', 'name']
+
+```
+
+The original `Schema` remains unchanged.
+
+### Objects
+
+Each key-value pair in a schema dictionary is validated against each
+attribute-value pair in the corresponding object:
+
+```pycon
+>>> from voluptuous import Object
+>>> class Structure(object):
+... def __init__(self, q=None):
+... self.q = q
+... def __repr__(self):
+... return '<Structure(q={0.q!r})>'.format(self)
+...
+>>> schema = Schema(Object({'q': 'one'}, cls=Structure))
+>>> schema(Structure(q='one'))
+<Structure(q='one')>
+
+```
+
+### Allow None values
+
+To allow value to be None as well, use Any:
+
+```pycon
+>>> from voluptuous import Any
+
+>>> schema = Schema(Any(None, int))
+>>> schema(None)
+>>> schema(5)
+5
+
+```
+
+## Error reporting
+
+Validators must throw an `Invalid` exception if invalid data is passed
+to them. All other exceptions are treated as errors in the validator and
+will not be caught.
+
+Each `Invalid` exception has an associated `path` attribute representing
+the path in the data structure to our currently validating value, as well
+as an `error_message` attribute that contains the message of the original
+exception. This is especially useful when you want to catch `Invalid`
+exceptions and give some feedback to the user, for instance in the context of
+an HTTP API.
+
+
+```pycon
+>>> def validate_email(email):
+... """Validate email."""
+... if not "@" in email:
+... raise Invalid("This email is invalid.")
+... return email
+>>> schema = Schema({"email": validate_email})
+>>> exc = None
+>>> try:
+... schema({"email": "whatever"})
+... except MultipleInvalid as e:
+... exc = e
+>>> str(exc)
+"This email is invalid. for dictionary value @ data['email']"
+>>> exc.path
+['email']
+>>> exc.msg
+'This email is invalid.'
+>>> exc.error_message
+'This email is invalid.'
+
+```
+
+The `path` attribute is used during error reporting, but also during matching
+to determine whether an error should be reported to the user or if the next
+match should be attempted. This is determined by comparing the depth of the
+path where the check is, to the depth of the path where the error occurred. If
+the error is more than one level deeper, it is reported.
+
+The upshot of this is that *matching is depth-first and fail-fast*.
+
+To illustrate this, here is an example schema:
+
+```pycon
+>>> schema = Schema([[2, 3], 6])
+
+```
+
+Each value in the top-level list is matched depth-first in-order. Given
+input data of `[[6]]`, the inner list will match the first element of
+the schema, but the literal `6` will not match any of the elements of
+that list. This error will be reported back to the user immediately. No
+backtracking is attempted:
+
+```pycon
+>>> try:
+... schema([[6]])
+... raise AssertionError('MultipleInvalid not raised')
+... except MultipleInvalid as e:
+... exc = e
+>>> str(exc) == "not a valid value @ data[0][0]"
+True
+
+```
+
+If we pass the data `[6]`, the `6` is not a list type and so will not
+recurse into the first element of the schema. Matching will continue on
+to the second element in the schema, and succeed:
+
+```pycon
+>>> schema([6])
+[6]
+
+```
+
+## Multi-field validation
+
+Validation rules that involve multiple fields can be implemented as
+custom validators. It's recommended to use `All()` to do a two-pass
+validation - the first pass checking the basic structure of the data,
+and only after that, the second pass applying your cross-field
+validator:
+
+```python
+def passwords_must_match(passwords):
+ if passwords['password'] != passwords['password_again']:
+ raise Invalid('passwords must match')
+ return passwords
+
+s=Schema(All(
+ # First "pass" for field types
+ {'password':str, 'password_again':str},
+ # Follow up the first "pass" with your multi-field rules
+ passwords_must_match
+))
+
+# valid
+s({'password':'123', 'password_again':'123'})
+
+# raises MultipleInvalid: passwords must match
+s({'password':'123', 'password_again':'and now for something completely different'})
+
+```
+
+With this structure, your multi-field validator will run with
+pre-validated data from the first "pass" and so will not have to do
+its own type checking on its inputs.
+
+The flipside is that if the first "pass" of validation fails, your
+cross-field validator will not run:
+
+```
+# raises Invalid because password_again is not a string
+# passwords_must_match() will not run because first-pass validation already failed
+s({'password':'123', 'password_again': 1337})
+```
+
+## Running tests
+
+Voluptuous is using nosetests:
+
+ $ nosetests
+
+
+## Other libraries and inspirations
+
+Voluptuous is heavily inspired by
+[Validino](http://code.google.com/p/validino/), and to a lesser extent,
+[jsonvalidator](http://code.google.com/p/jsonvalidator/) and
+[json\_schema](http://blog.sendapatch.se/category/json_schema.html).
+
+[pytest-voluptuous](https://github.com/F-Secure/pytest-voluptuous) is a
+[pytest](https://github.com/pytest-dev/pytest) plugin that helps in
+using voluptuous validators in `assert`s.
+
+I greatly prefer the light-weight style promoted by these libraries to
+the complexity of libraries like FormEncode.
+
+
diff --git a/third_party/python/voluptuous/voluptuous-0.12.1.dist-info/RECORD b/third_party/python/voluptuous/voluptuous-0.12.1.dist-info/RECORD
new file mode 100644
index 0000000000..5f7fde6e52
--- /dev/null
+++ b/third_party/python/voluptuous/voluptuous-0.12.1.dist-info/RECORD
@@ -0,0 +1,11 @@
+voluptuous/__init__.py,sha256=tSYWPAIWee6YwcMK8hxmaiagG_swokUMeH8MluJLWZM,203
+voluptuous/error.py,sha256=fLRmJwKp0bqRGgBM34ztg9MTxhEOf465sbQcvJlEtAk,4026
+voluptuous/humanize.py,sha256=hZlhdN4aVeGDIXdtSTeyEbmku65SDPRuut3mOfuRQP0,1606
+voluptuous/schema_builder.py,sha256=xVJpf3uJMyS1CKwzDw3rEK39ebqDiF_A2Kbq4VnZ3Aw,43677
+voluptuous/util.py,sha256=RXLZ2b5y-A4az3teG6UpCx2UZcXpS11sIVCdORyKar8,3150
+voluptuous/validators.py,sha256=xZgyKH-EVqUHCHral5gzViXq4HfEjJEsGdQy7z6llc0,32798
+voluptuous-0.12.1.dist-info/COPYING,sha256=JHtJdren-k2J2Vh8qlCVVh60bcVFfyJ59ipitUUq3qk,1486
+voluptuous-0.12.1.dist-info/METADATA,sha256=OdEydy5NydPFFzAhP8qj_YqJsQPQvoIt5ZT1t8B14Ok,20120
+voluptuous-0.12.1.dist-info/WHEEL,sha256=S6zePDbUAjzMmpYOg2cHDxuYFWw7WiOXt6ogM6hIB5Q,92
+voluptuous-0.12.1.dist-info/top_level.txt,sha256=TTdVb7M-vndb67UqTmAxuVjpAUakrlAWJYqvo3w4Iqc,11
+voluptuous-0.12.1.dist-info/RECORD,,
diff --git a/third_party/python/voluptuous/voluptuous-0.12.1.dist-info/WHEEL b/third_party/python/voluptuous/voluptuous-0.12.1.dist-info/WHEEL
new file mode 100644
index 0000000000..3e5d84c493
--- /dev/null
+++ b/third_party/python/voluptuous/voluptuous-0.12.1.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.36.1)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/third_party/python/voluptuous/voluptuous-0.12.1.dist-info/top_level.txt b/third_party/python/voluptuous/voluptuous-0.12.1.dist-info/top_level.txt
new file mode 100644
index 0000000000..55356d5da8
--- /dev/null
+++ b/third_party/python/voluptuous/voluptuous-0.12.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+voluptuous
diff --git a/third_party/python/voluptuous/voluptuous/__init__.py b/third_party/python/voluptuous/voluptuous/__init__.py
new file mode 100644
index 0000000000..4d09fe670f
--- /dev/null
+++ b/third_party/python/voluptuous/voluptuous/__init__.py
@@ -0,0 +1,9 @@
+# flake8: noqa
+
+from voluptuous.schema_builder import *
+from voluptuous.validators import *
+from voluptuous.util import *
+from voluptuous.error import *
+
+__version__ = '0.12.1'
+__author__ = 'alecthomas'
diff --git a/third_party/python/voluptuous/voluptuous/error.py b/third_party/python/voluptuous/voluptuous/error.py
new file mode 100644
index 0000000000..97f37d2c7c
--- /dev/null
+++ b/third_party/python/voluptuous/voluptuous/error.py
@@ -0,0 +1,199 @@
+
+class Error(Exception):
+ """Base validation exception."""
+
+
+class SchemaError(Error):
+ """An error was encountered in the schema."""
+
+
+class Invalid(Error):
+ """The data was invalid.
+
+ :attr msg: The error message.
+ :attr path: The path to the error, as a list of keys in the source data.
+ :attr error_message: The actual error message that was raised, as a
+ string.
+
+ """
+
+ def __init__(self, message, path=None, error_message=None, error_type=None):
+ Error.__init__(self, message)
+ self.path = path or []
+ self.error_message = error_message or message
+ self.error_type = error_type
+
+ @property
+ def msg(self):
+ return self.args[0]
+
+ def __str__(self):
+ path = ' @ data[%s]' % ']['.join(map(repr, self.path)) \
+ if self.path else ''
+ output = Exception.__str__(self)
+ if self.error_type:
+ output += ' for ' + self.error_type
+ return output + path
+
+ def prepend(self, path):
+ self.path = path + self.path
+
+
+class MultipleInvalid(Invalid):
+ def __init__(self, errors=None):
+ self.errors = errors[:] if errors else []
+
+ def __repr__(self):
+ return 'MultipleInvalid(%r)' % self.errors
+
+ @property
+ def msg(self):
+ return self.errors[0].msg
+
+ @property
+ def path(self):
+ return self.errors[0].path
+
+ @property
+ def error_message(self):
+ return self.errors[0].error_message
+
+ def add(self, error):
+ self.errors.append(error)
+
+ def __str__(self):
+ return str(self.errors[0])
+
+ def prepend(self, path):
+ for error in self.errors:
+ error.prepend(path)
+
+
+class RequiredFieldInvalid(Invalid):
+ """Required field was missing."""
+
+
+class ObjectInvalid(Invalid):
+ """The value we found was not an object."""
+
+
+class DictInvalid(Invalid):
+ """The value found was not a dict."""
+
+
+class ExclusiveInvalid(Invalid):
+ """More than one value found in exclusion group."""
+
+
+class InclusiveInvalid(Invalid):
+ """Not all values found in inclusion group."""
+
+
+class SequenceTypeInvalid(Invalid):
+ """The type found is not a sequence type."""
+
+
+class TypeInvalid(Invalid):
+ """The value was not of required type."""
+
+
+class ValueInvalid(Invalid):
+ """The value was found invalid by evaluation function."""
+
+
+class ContainsInvalid(Invalid):
+ """List does not contain item"""
+
+
+class ScalarInvalid(Invalid):
+ """Scalars did not match."""
+
+
+class CoerceInvalid(Invalid):
+ """Impossible to coerce value to type."""
+
+
+class AnyInvalid(Invalid):
+ """The value did not pass any validator."""
+
+
+class AllInvalid(Invalid):
+ """The value did not pass all validators."""
+
+
+class MatchInvalid(Invalid):
+ """The value does not match the given regular expression."""
+
+
+class RangeInvalid(Invalid):
+ """The value is not in given range."""
+
+
+class TrueInvalid(Invalid):
+ """The value is not True."""
+
+
+class FalseInvalid(Invalid):
+ """The value is not False."""
+
+
+class BooleanInvalid(Invalid):
+ """The value is not a boolean."""
+
+
+class UrlInvalid(Invalid):
+ """The value is not a URL."""
+
+
+class EmailInvalid(Invalid):
+ """The value is not an email address."""
+
+
+class FileInvalid(Invalid):
+ """The value is not a file."""
+
+
+class DirInvalid(Invalid):
+ """The value is not a directory."""
+
+
+class PathInvalid(Invalid):
+ """The value is not a path."""
+
+
+class LiteralInvalid(Invalid):
+ """The literal values do not match."""
+
+
+class LengthInvalid(Invalid):
+ pass
+
+
+class DatetimeInvalid(Invalid):
+ """The value is not a formatted datetime string."""
+
+
+class DateInvalid(Invalid):
+ """The value is not a formatted date string."""
+
+
+class InInvalid(Invalid):
+ pass
+
+
+class NotInInvalid(Invalid):
+ pass
+
+
+class ExactSequenceInvalid(Invalid):
+ pass
+
+
+class NotEnoughValid(Invalid):
+ """The value did not pass enough validations."""
+ pass
+
+
+class TooManyValid(Invalid):
+ """The value passed more than expected validations."""
+ pass
diff --git a/third_party/python/voluptuous/voluptuous/humanize.py b/third_party/python/voluptuous/voluptuous/humanize.py
new file mode 100644
index 0000000000..91ab2015fb
--- /dev/null
+++ b/third_party/python/voluptuous/voluptuous/humanize.py
@@ -0,0 +1,40 @@
+from voluptuous import Invalid, MultipleInvalid
+from voluptuous.error import Error
+
+
+MAX_VALIDATION_ERROR_ITEM_LENGTH = 500
+
+
+def _nested_getitem(data, path):
+ for item_index in path:
+ try:
+ data = data[item_index]
+ except (KeyError, IndexError, TypeError):
+ # The index is not present in the dictionary, list or other
+ # indexable or data is not subscriptable
+ return None
+ return data
+
+
+def humanize_error(data, validation_error, max_sub_error_length=MAX_VALIDATION_ERROR_ITEM_LENGTH):
+ """ Provide a more helpful + complete validation error message than that provided automatically
+ Invalid and MultipleInvalid do not include the offending value in error messages,
+ and MultipleInvalid.__str__ only provides the first error.
+ """
+ if isinstance(validation_error, MultipleInvalid):
+ return '\n'.join(sorted(
+ humanize_error(data, sub_error, max_sub_error_length)
+ for sub_error in validation_error.errors
+ ))
+ else:
+ offending_item_summary = repr(_nested_getitem(data, validation_error.path))
+ if len(offending_item_summary) > max_sub_error_length:
+ offending_item_summary = offending_item_summary[:max_sub_error_length - 3] + '...'
+ return '%s. Got %s' % (validation_error, offending_item_summary)
+
+
+def validate_with_humanized_errors(data, schema, max_sub_error_length=MAX_VALIDATION_ERROR_ITEM_LENGTH):
+ try:
+ return schema(data)
+ except (Invalid, MultipleInvalid) as e:
+ raise Error(humanize_error(data, e, max_sub_error_length))
diff --git a/third_party/python/voluptuous/voluptuous/schema_builder.py b/third_party/python/voluptuous/voluptuous/schema_builder.py
new file mode 100644
index 0000000000..df19c8da2d
--- /dev/null
+++ b/third_party/python/voluptuous/voluptuous/schema_builder.py
@@ -0,0 +1,1301 @@
+import collections
+import inspect
+import re
+from functools import wraps
+import sys
+from contextlib import contextmanager
+
+import itertools
+from voluptuous import error as er
+
+if sys.version_info >= (3,):
+ long = int
+ unicode = str
+ basestring = str
+ ifilter = filter
+
+ def iteritems(d):
+ return d.items()
+else:
+ from itertools import ifilter
+
+ def iteritems(d):
+ return d.iteritems()
+
+if sys.version_info >= (3, 3):
+ _Mapping = collections.abc.Mapping
+else:
+ _Mapping = collections.Mapping
+
+"""Schema validation for Python data structures.
+
+Given eg. a nested data structure like this:
+
+ {
+ 'exclude': ['Users', 'Uptime'],
+ 'include': [],
+ 'set': {
+ 'snmp_community': 'public',
+ 'snmp_timeout': 15,
+ 'snmp_version': '2c',
+ },
+ 'targets': {
+ 'localhost': {
+ 'exclude': ['Uptime'],
+ 'features': {
+ 'Uptime': {
+ 'retries': 3,
+ },
+ 'Users': {
+ 'snmp_community': 'monkey',
+ 'snmp_port': 15,
+ },
+ },
+ 'include': ['Users'],
+ 'set': {
+ 'snmp_community': 'monkeys',
+ },
+ },
+ },
+ }
+
+A schema like this:
+
+ >>> settings = {
+ ... 'snmp_community': str,
+ ... 'retries': int,
+ ... 'snmp_version': All(Coerce(str), Any('3', '2c', '1')),
+ ... }
+ >>> features = ['Ping', 'Uptime', 'Http']
+ >>> schema = Schema({
+ ... 'exclude': features,
+ ... 'include': features,
+ ... 'set': settings,
+ ... 'targets': {
+ ... 'exclude': features,
+ ... 'include': features,
+ ... 'features': {
+ ... str: settings,
+ ... },
+ ... },
+ ... })
+
+Validate like so:
+
+ >>> schema({
+ ... 'set': {
+ ... 'snmp_community': 'public',
+ ... 'snmp_version': '2c',
+ ... },
+ ... 'targets': {
+ ... 'exclude': ['Ping'],
+ ... 'features': {
+ ... 'Uptime': {'retries': 3},
+ ... 'Users': {'snmp_community': 'monkey'},
+ ... },
+ ... },
+ ... }) == {
+ ... 'set': {'snmp_version': '2c', 'snmp_community': 'public'},
+ ... 'targets': {
+ ... 'exclude': ['Ping'],
+ ... 'features': {'Uptime': {'retries': 3},
+ ... 'Users': {'snmp_community': 'monkey'}}}}
+ True
+"""
+
+# options for extra keys
+PREVENT_EXTRA = 0 # any extra key not in schema will raise an error
+ALLOW_EXTRA = 1 # extra keys not in schema will be included in output
+REMOVE_EXTRA = 2 # extra keys not in schema will be excluded from output
+
+
+def _isnamedtuple(obj):
+ return isinstance(obj, tuple) and hasattr(obj, '_fields')
+
+
+primitive_types = (str, unicode, bool, int, float)
+
+
+class Undefined(object):
+ def __nonzero__(self):
+ return False
+
+ def __repr__(self):
+ return '...'
+
+
+UNDEFINED = Undefined()
+
+
+def Self():
+ raise er.SchemaError('"Self" should never be called')
+
+
+def default_factory(value):
+ if value is UNDEFINED or callable(value):
+ return value
+ return lambda: value
+
+
+@contextmanager
+def raises(exc, msg=None, regex=None):
+ try:
+ yield
+ except exc as e:
+ if msg is not None:
+ assert str(e) == msg, '%r != %r' % (str(e), msg)
+ if regex is not None:
+ assert re.search(regex, str(e)), '%r does not match %r' % (str(e), regex)
+
+
+def Extra(_):
+ """Allow keys in the data that are not present in the schema."""
+ raise er.SchemaError('"Extra" should never be called')
+
+
+# As extra() is never called there's no way to catch references to the
+# deprecated object, so we just leave an alias here instead.
+extra = Extra
+
+
+class Schema(object):
+ """A validation schema.
+
+ The schema is a Python tree-like structure where nodes are pattern
+ matched against corresponding trees of values.
+
+ Nodes can be values, in which case a direct comparison is used, types,
+ in which case an isinstance() check is performed, or callables, which will
+ validate and optionally convert the value.
+
+ We can equate schemas also.
+
+ For Example:
+
+ >>> v = Schema({Required('a'): unicode})
+ >>> v1 = Schema({Required('a'): unicode})
+ >>> v2 = Schema({Required('b'): unicode})
+ >>> assert v == v1
+ >>> assert v != v2
+
+ """
+
+ _extra_to_name = {
+ REMOVE_EXTRA: 'REMOVE_EXTRA',
+ ALLOW_EXTRA: 'ALLOW_EXTRA',
+ PREVENT_EXTRA: 'PREVENT_EXTRA',
+ }
+
+ def __init__(self, schema, required=False, extra=PREVENT_EXTRA):
+ """Create a new Schema.
+
+ :param schema: Validation schema. See :module:`voluptuous` for details.
+ :param required: Keys defined in the schema must be in the data.
+ :param extra: Specify how extra keys in the data are treated:
+ - :const:`~voluptuous.PREVENT_EXTRA`: to disallow any undefined
+ extra keys (raise ``Invalid``).
+ - :const:`~voluptuous.ALLOW_EXTRA`: to include undefined extra
+ keys in the output.
+ - :const:`~voluptuous.REMOVE_EXTRA`: to exclude undefined extra keys
+ from the output.
+ - Any value other than the above defaults to
+ :const:`~voluptuous.PREVENT_EXTRA`
+ """
+ self.schema = schema
+ self.required = required
+ self.extra = int(extra) # ensure the value is an integer
+ self._compiled = self._compile(schema)
+
+ @classmethod
+ def infer(cls, data, **kwargs):
+ """Create a Schema from concrete data (e.g. an API response).
+
+ For example, this will take a dict like:
+
+ {
+ 'foo': 1,
+ 'bar': {
+ 'a': True,
+ 'b': False
+ },
+ 'baz': ['purple', 'monkey', 'dishwasher']
+ }
+
+ And return a Schema:
+
+ {
+ 'foo': int,
+ 'bar': {
+ 'a': bool,
+ 'b': bool
+ },
+ 'baz': [str]
+ }
+
+ Note: only very basic inference is supported.
+ """
+ def value_to_schema_type(value):
+ if isinstance(value, dict):
+ if len(value) == 0:
+ return dict
+ return {k: value_to_schema_type(v)
+ for k, v in iteritems(value)}
+ if isinstance(value, list):
+ if len(value) == 0:
+ return list
+ else:
+ return [value_to_schema_type(v)
+ for v in value]
+ return type(value)
+
+ return cls(value_to_schema_type(data), **kwargs)
+
+ def __eq__(self, other):
+ if not isinstance(other, Schema):
+ return False
+ return other.schema == self.schema
+
+ def __ne__(self, other):
+ return not (self == other)
+
+ def __str__(self):
+ return str(self.schema)
+
+ def __repr__(self):
+ return "<Schema(%s, extra=%s, required=%s) object at 0x%x>" % (
+ self.schema, self._extra_to_name.get(self.extra, '??'),
+ self.required, id(self))
+
+ def __call__(self, data):
+ """Validate data against this schema."""
+ try:
+ return self._compiled([], data)
+ except er.MultipleInvalid:
+ raise
+ except er.Invalid as e:
+ raise er.MultipleInvalid([e])
+ # return self.validate([], self.schema, data)
+
+ def _compile(self, schema):
+ if schema is Extra:
+ return lambda _, v: v
+ if schema is Self:
+ return lambda p, v: self._compiled(p, v)
+ elif hasattr(schema, "__voluptuous_compile__"):
+ return schema.__voluptuous_compile__(self)
+ if isinstance(schema, Object):
+ return self._compile_object(schema)
+ if isinstance(schema, _Mapping):
+ return self._compile_dict(schema)
+ elif isinstance(schema, list):
+ return self._compile_list(schema)
+ elif isinstance(schema, tuple):
+ return self._compile_tuple(schema)
+ elif isinstance(schema, (frozenset, set)):
+ return self._compile_set(schema)
+ type_ = type(schema)
+ if inspect.isclass(schema):
+ type_ = schema
+ if type_ in (bool, bytes, int, long, str, unicode, float, complex, object,
+ list, dict, type(None)) or callable(schema):
+ return _compile_scalar(schema)
+ raise er.SchemaError('unsupported schema data type %r' %
+ type(schema).__name__)
+
+ def _compile_mapping(self, schema, invalid_msg=None):
+ """Create validator for given mapping."""
+ invalid_msg = invalid_msg or 'mapping value'
+
+ # Keys that may be required
+ all_required_keys = set(key for key in schema
+ if key is not Extra and
+ ((self.required and not isinstance(key, (Optional, Remove))) or
+ isinstance(key, Required)))
+
+ # Keys that may have defaults
+ all_default_keys = set(key for key in schema
+ if isinstance(key, Required) or
+ isinstance(key, Optional))
+
+ _compiled_schema = {}
+ for skey, svalue in iteritems(schema):
+ new_key = self._compile(skey)
+ new_value = self._compile(svalue)
+ _compiled_schema[skey] = (new_key, new_value)
+
+ candidates = list(_iterate_mapping_candidates(_compiled_schema))
+
+ # After we have the list of candidates in the correct order, we want to apply some optimization so that each
+ # key in the data being validated will be matched against the relevant schema keys only.
+ # No point in matching against different keys
+ additional_candidates = []
+ candidates_by_key = {}
+ for skey, (ckey, cvalue) in candidates:
+ if type(skey) in primitive_types:
+ candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue)))
+ elif isinstance(skey, Marker) and type(skey.schema) in primitive_types:
+ candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue)))
+ else:
+ # These are wildcards such as 'int', 'str', 'Remove' and others which should be applied to all keys
+ additional_candidates.append((skey, (ckey, cvalue)))
+
+ def validate_mapping(path, iterable, out):
+ required_keys = all_required_keys.copy()
+
+ # Build a map of all provided key-value pairs.
+ # The type(out) is used to retain ordering in case a ordered
+ # map type is provided as input.
+ key_value_map = type(out)()
+ for key, value in iterable:
+ key_value_map[key] = value
+
+ # Insert default values for non-existing keys.
+ for key in all_default_keys:
+ if not isinstance(key.default, Undefined) and \
+ key.schema not in key_value_map:
+ # A default value has been specified for this missing
+ # key, insert it.
+ key_value_map[key.schema] = key.default()
+
+ error = None
+ errors = []
+ for key, value in key_value_map.items():
+ key_path = path + [key]
+ remove_key = False
+
+ # Optimization. Validate against the matching key first, then fallback to the rest
+ relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates)
+
+ # compare each given key/value against all compiled key/values
+ # schema key, (compiled key, compiled value)
+ for skey, (ckey, cvalue) in relevant_candidates:
+ try:
+ new_key = ckey(key_path, key)
+ except er.Invalid as e:
+ if len(e.path) > len(key_path):
+ raise
+ if not error or len(e.path) > len(error.path):
+ error = e
+ continue
+ # Backtracking is not performed once a key is selected, so if
+ # the value is invalid we immediately throw an exception.
+ exception_errors = []
+ # check if the key is marked for removal
+ is_remove = new_key is Remove
+ try:
+ cval = cvalue(key_path, value)
+ # include if it's not marked for removal
+ if not is_remove:
+ out[new_key] = cval
+ else:
+ remove_key = True
+ continue
+ except er.MultipleInvalid as e:
+ exception_errors.extend(e.errors)
+ except er.Invalid as e:
+ exception_errors.append(e)
+
+ if exception_errors:
+ if is_remove or remove_key:
+ continue
+ for err in exception_errors:
+ if len(err.path) <= len(key_path):
+ err.error_type = invalid_msg
+ errors.append(err)
+ # If there is a validation error for a required
+ # key, this means that the key was provided.
+ # Discard the required key so it does not
+ # create an additional, noisy exception.
+ required_keys.discard(skey)
+ break
+
+ # Key and value okay, mark as found in case it was
+ # a Required() field.
+ required_keys.discard(skey)
+
+ break
+ else:
+ if remove_key:
+ # remove key
+ continue
+ elif self.extra == ALLOW_EXTRA:
+ out[key] = value
+ elif self.extra != REMOVE_EXTRA:
+ errors.append(er.Invalid('extra keys not allowed', key_path))
+ # else REMOVE_EXTRA: ignore the key so it's removed from output
+
+ # for any required keys left that weren't found and don't have defaults:
+ for key in required_keys:
+ msg = key.msg if hasattr(key, 'msg') and key.msg else 'required key not provided'
+ errors.append(er.RequiredFieldInvalid(msg, path + [key]))
+ if errors:
+ raise er.MultipleInvalid(errors)
+
+ return out
+
+ return validate_mapping
+
+ def _compile_object(self, schema):
+ """Validate an object.
+
+ Has the same behavior as dictionary validator but work with object
+ attributes.
+
+ For example:
+
+ >>> class Structure(object):
+ ... def __init__(self, one=None, three=None):
+ ... self.one = one
+ ... self.three = three
+ ...
+ >>> validate = Schema(Object({'one': 'two', 'three': 'four'}, cls=Structure))
+ >>> with raises(er.MultipleInvalid, "not a valid value for object value @ data['one']"):
+ ... validate(Structure(one='three'))
+
+ """
+ base_validate = self._compile_mapping(
+ schema, invalid_msg='object value')
+
+ def validate_object(path, data):
+ if schema.cls is not UNDEFINED and not isinstance(data, schema.cls):
+ raise er.ObjectInvalid('expected a {0!r}'.format(schema.cls), path)
+ iterable = _iterate_object(data)
+ iterable = ifilter(lambda item: item[1] is not None, iterable)
+ out = base_validate(path, iterable, {})
+ return type(data)(**out)
+
+ return validate_object
+
+ def _compile_dict(self, schema):
+ """Validate a dictionary.
+
+ A dictionary schema can contain a set of values, or at most one
+ validator function/type.
+
+ A dictionary schema will only validate a dictionary:
+
+ >>> validate = Schema({})
+ >>> with raises(er.MultipleInvalid, 'expected a dictionary'):
+ ... validate([])
+
+ An invalid dictionary value:
+
+ >>> validate = Schema({'one': 'two', 'three': 'four'})
+ >>> with raises(er.MultipleInvalid, "not a valid value for dictionary value @ data['one']"):
+ ... validate({'one': 'three'})
+
+ An invalid key:
+
+ >>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['two']"):
+ ... validate({'two': 'three'})
+
+
+ Validation function, in this case the "int" type:
+
+ >>> validate = Schema({'one': 'two', 'three': 'four', int: str})
+
+ Valid integer input:
+
+ >>> validate({10: 'twenty'})
+ {10: 'twenty'}
+
+ By default, a "type" in the schema (in this case "int") will be used
+ purely to validate that the corresponding value is of that type. It
+ will not Coerce the value:
+
+ >>> with raises(er.MultipleInvalid, "extra keys not allowed @ data['10']"):
+ ... validate({'10': 'twenty'})
+
+ Wrap them in the Coerce() function to achieve this:
+ >>> from voluptuous import Coerce
+ >>> validate = Schema({'one': 'two', 'three': 'four',
+ ... Coerce(int): str})
+ >>> validate({'10': 'twenty'})
+ {10: 'twenty'}
+
+ Custom message for required key
+
+ >>> validate = Schema({Required('one', 'required'): 'two'})
+ >>> with raises(er.MultipleInvalid, "required @ data['one']"):
+ ... validate({})
+
+ (This is to avoid unexpected surprises.)
+
+ Multiple errors for nested field in a dict:
+
+ >>> validate = Schema({
+ ... 'adict': {
+ ... 'strfield': str,
+ ... 'intfield': int
+ ... }
+ ... })
+ >>> try:
+ ... validate({
+ ... 'adict': {
+ ... 'strfield': 123,
+ ... 'intfield': 'one'
+ ... }
+ ... })
+ ... except er.MultipleInvalid as e:
+ ... print(sorted(str(i) for i in e.errors)) # doctest: +NORMALIZE_WHITESPACE
+ ["expected int for dictionary value @ data['adict']['intfield']",
+ "expected str for dictionary value @ data['adict']['strfield']"]
+
+ """
+ base_validate = self._compile_mapping(
+ schema, invalid_msg='dictionary value')
+
+ groups_of_exclusion = {}
+ groups_of_inclusion = {}
+ for node in schema:
+ if isinstance(node, Exclusive):
+ g = groups_of_exclusion.setdefault(node.group_of_exclusion, [])
+ g.append(node)
+ elif isinstance(node, Inclusive):
+ g = groups_of_inclusion.setdefault(node.group_of_inclusion, [])
+ g.append(node)
+
+ def validate_dict(path, data):
+ if not isinstance(data, dict):
+ raise er.DictInvalid('expected a dictionary', path)
+
+ errors = []
+ for label, group in groups_of_exclusion.items():
+ exists = False
+ for exclusive in group:
+ if exclusive.schema in data:
+ if exists:
+ msg = exclusive.msg if hasattr(exclusive, 'msg') and exclusive.msg else \
+ "two or more values in the same group of exclusion '%s'" % label
+ next_path = path + [VirtualPathComponent(label)]
+ errors.append(er.ExclusiveInvalid(msg, next_path))
+ break
+ exists = True
+
+ if errors:
+ raise er.MultipleInvalid(errors)
+
+ for label, group in groups_of_inclusion.items():
+ included = [node.schema in data for node in group]
+ if any(included) and not all(included):
+ msg = "some but not all values in the same group of inclusion '%s'" % label
+ for g in group:
+ if hasattr(g, 'msg') and g.msg:
+ msg = g.msg
+ break
+ next_path = path + [VirtualPathComponent(label)]
+ errors.append(er.InclusiveInvalid(msg, next_path))
+ break
+
+ if errors:
+ raise er.MultipleInvalid(errors)
+
+ out = data.__class__()
+ return base_validate(path, iteritems(data), out)
+
+ return validate_dict
+
+ def _compile_sequence(self, schema, seq_type):
+ """Validate a sequence type.
+
+ This is a sequence of valid values or validators tried in order.
+
+ >>> validator = Schema(['one', 'two', int])
+ >>> validator(['one'])
+ ['one']
+ >>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
+ ... validator([3.5])
+ >>> validator([1])
+ [1]
+ """
+ _compiled = [self._compile(s) for s in schema]
+ seq_type_name = seq_type.__name__
+
+ def validate_sequence(path, data):
+ if not isinstance(data, seq_type):
+ raise er.SequenceTypeInvalid('expected a %s' % seq_type_name, path)
+
+ # Empty seq schema, reject any data.
+ if not schema:
+ if data:
+ raise er.MultipleInvalid([
+ er.ValueInvalid('not a valid value', path if path else data)
+ ])
+ return data
+
+ out = []
+ invalid = None
+ errors = []
+ index_path = UNDEFINED
+ for i, value in enumerate(data):
+ index_path = path + [i]
+ invalid = None
+ for validate in _compiled:
+ try:
+ cval = validate(index_path, value)
+ if cval is not Remove: # do not include Remove values
+ out.append(cval)
+ break
+ except er.Invalid as e:
+ if len(e.path) > len(index_path):
+ raise
+ invalid = e
+ else:
+ errors.append(invalid)
+ if errors:
+ raise er.MultipleInvalid(errors)
+
+ if _isnamedtuple(data):
+ return type(data)(*out)
+ else:
+ return type(data)(out)
+
+ return validate_sequence
+
+ def _compile_tuple(self, schema):
+ """Validate a tuple.
+
+ A tuple is a sequence of valid values or validators tried in order.
+
+ >>> validator = Schema(('one', 'two', int))
+ >>> validator(('one',))
+ ('one',)
+ >>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
+ ... validator((3.5,))
+ >>> validator((1,))
+ (1,)
+ """
+ return self._compile_sequence(schema, tuple)
+
+ def _compile_list(self, schema):
+ """Validate a list.
+
+ A list is a sequence of valid values or validators tried in order.
+
+ >>> validator = Schema(['one', 'two', int])
+ >>> validator(['one'])
+ ['one']
+ >>> with raises(er.MultipleInvalid, 'expected int @ data[0]'):
+ ... validator([3.5])
+ >>> validator([1])
+ [1]
+ """
+ return self._compile_sequence(schema, list)
+
+ def _compile_set(self, schema):
+ """Validate a set.
+
+ A set is an unordered collection of unique elements.
+
+ >>> validator = Schema({int})
+ >>> validator(set([42])) == set([42])
+ True
+ >>> with raises(er.Invalid, 'expected a set'):
+ ... validator(42)
+ >>> with raises(er.MultipleInvalid, 'invalid value in set'):
+ ... validator(set(['a']))
+ """
+ type_ = type(schema)
+ type_name = type_.__name__
+
+ def validate_set(path, data):
+ if not isinstance(data, type_):
+ raise er.Invalid('expected a %s' % type_name, path)
+
+ _compiled = [self._compile(s) for s in schema]
+ errors = []
+ for value in data:
+ for validate in _compiled:
+ try:
+ validate(path, value)
+ break
+ except er.Invalid:
+ pass
+ else:
+ invalid = er.Invalid('invalid value in %s' % type_name, path)
+ errors.append(invalid)
+
+ if errors:
+ raise er.MultipleInvalid(errors)
+
+ return data
+
+ return validate_set
+
+ def extend(self, schema, required=None, extra=None):
+ """Create a new `Schema` by merging this and the provided `schema`.
+
+ Neither this `Schema` nor the provided `schema` are modified. The
+ resulting `Schema` inherits the `required` and `extra` parameters of
+ this, unless overridden.
+
+ Both schemas must be dictionary-based.
+
+ :param schema: dictionary to extend this `Schema` with
+ :param required: if set, overrides `required` of this `Schema`
+ :param extra: if set, overrides `extra` of this `Schema`
+ """
+
+ assert type(self.schema) == dict and type(schema) == dict, 'Both schemas must be dictionary-based'
+
+ result = self.schema.copy()
+
+ # returns the key that may have been passed as an argument to Marker constructor
+ def key_literal(key):
+ return (key.schema if isinstance(key, Marker) else key)
+
+ # build a map that takes the key literals to the needed objects
+ # literal -> Required|Optional|literal
+ result_key_map = dict((key_literal(key), key) for key in result)
+
+ # for each item in the extension schema, replace duplicates
+ # or add new keys
+ for key, value in iteritems(schema):
+
+ # if the key is already in the dictionary, we need to replace it
+ # transform key to literal before checking presence
+ if key_literal(key) in result_key_map:
+
+ result_key = result_key_map[key_literal(key)]
+ result_value = result[result_key]
+
+ # if both are dictionaries, we need to extend recursively
+ # create the new extended sub schema, then remove the old key and add the new one
+ if type(result_value) == dict and type(value) == dict:
+ new_value = Schema(result_value).extend(value).schema
+ del result[result_key]
+ result[key] = new_value
+ # one or the other or both are not sub-schemas, simple replacement is fine
+ # remove old key and add new one
+ else:
+ del result[result_key]
+ result[key] = value
+
+ # key is new and can simply be added
+ else:
+ result[key] = value
+
+ # recompile and send old object
+ result_cls = type(self)
+ result_required = (required if required is not None else self.required)
+ result_extra = (extra if extra is not None else self.extra)
+ return result_cls(result, required=result_required, extra=result_extra)
+
+
+def _compile_scalar(schema):
+ """A scalar value.
+
+ The schema can either be a value or a type.
+
+ >>> _compile_scalar(int)([], 1)
+ 1
+ >>> with raises(er.Invalid, 'expected float'):
+ ... _compile_scalar(float)([], '1')
+
+ Callables have
+ >>> _compile_scalar(lambda v: float(v))([], '1')
+ 1.0
+
+ As a convenience, ValueError's are trapped:
+
+ >>> with raises(er.Invalid, 'not a valid value'):
+ ... _compile_scalar(lambda v: float(v))([], 'a')
+ """
+ if inspect.isclass(schema):
+ def validate_instance(path, data):
+ if isinstance(data, schema):
+ return data
+ else:
+ msg = 'expected %s' % schema.__name__
+ raise er.TypeInvalid(msg, path)
+
+ return validate_instance
+
+ if callable(schema):
+ def validate_callable(path, data):
+ try:
+ return schema(data)
+ except ValueError:
+ raise er.ValueInvalid('not a valid value', path)
+ except er.Invalid as e:
+ e.prepend(path)
+ raise
+
+ return validate_callable
+
+ def validate_value(path, data):
+ if data != schema:
+ raise er.ScalarInvalid('not a valid value', path)
+ return data
+
+ return validate_value
+
+
+def _compile_itemsort():
+ '''return sort function of mappings'''
+
+ def is_extra(key_):
+ return key_ is Extra
+
+ def is_remove(key_):
+ return isinstance(key_, Remove)
+
+ def is_marker(key_):
+ return isinstance(key_, Marker)
+
+ def is_type(key_):
+ return inspect.isclass(key_)
+
+ def is_callable(key_):
+ return callable(key_)
+
+ # priority list for map sorting (in order of checking)
+ # We want Extra to match last, because it's a catch-all. On the other hand,
+ # Remove markers should match first (since invalid values will not
+ # raise an Error, instead the validator will check if other schemas match
+ # the same value).
+ priority = [(1, is_remove), # Remove highest priority after values
+ (2, is_marker), # then other Markers
+ (4, is_type), # types/classes lowest before Extra
+ (3, is_callable), # callables after markers
+ (5, is_extra)] # Extra lowest priority
+
+ def item_priority(item_):
+ key_ = item_[0]
+ for i, check_ in priority:
+ if check_(key_):
+ return i
+ # values have hightest priorities
+ return 0
+
+ return item_priority
+
+
+_sort_item = _compile_itemsort()
+
+
+def _iterate_mapping_candidates(schema):
+ """Iterate over schema in a meaningful order."""
+ # Without this, Extra might appear first in the iterator, and fail to
+ # validate a key even though it's a Required that has its own validation,
+ # generating a false positive.
+ return sorted(iteritems(schema), key=_sort_item)
+
+
+def _iterate_object(obj):
+ """Return iterator over object attributes. Respect objects with
+ defined __slots__.
+
+ """
+ d = {}
+ try:
+ d = vars(obj)
+ except TypeError:
+ # maybe we have named tuple here?
+ if hasattr(obj, '_asdict'):
+ d = obj._asdict()
+ for item in iteritems(d):
+ yield item
+ try:
+ slots = obj.__slots__
+ except AttributeError:
+ pass
+ else:
+ for key in slots:
+ if key != '__dict__':
+ yield (key, getattr(obj, key))
+
+
+class Msg(object):
+ """Report a user-friendly message if a schema fails to validate.
+
+ >>> validate = Schema(
+ ... Msg(['one', 'two', int],
+ ... 'should be one of "one", "two" or an integer'))
+ >>> with raises(er.MultipleInvalid, 'should be one of "one", "two" or an integer'):
+ ... validate(['three'])
+
+ Messages are only applied to invalid direct descendants of the schema:
+
+ >>> validate = Schema(Msg([['one', 'two', int]], 'not okay!'))
+ >>> with raises(er.MultipleInvalid, 'expected int @ data[0][0]'):
+ ... validate([['three']])
+
+ The type which is thrown can be overridden but needs to be a subclass of Invalid
+
+ >>> with raises(er.SchemaError, 'Msg can only use subclases of Invalid as custom class'):
+ ... validate = Schema(Msg([int], 'should be int', cls=KeyError))
+
+ If you do use a subclass of Invalid, that error will be thrown (wrapped in a MultipleInvalid)
+
+ >>> validate = Schema(Msg([['one', 'two', int]], 'not okay!', cls=er.RangeInvalid))
+ >>> try:
+ ... validate(['three'])
+ ... except er.MultipleInvalid as e:
+ ... assert isinstance(e.errors[0], er.RangeInvalid)
+ """
+
+ def __init__(self, schema, msg, cls=None):
+ if cls and not issubclass(cls, er.Invalid):
+ raise er.SchemaError("Msg can only use subclases of"
+ " Invalid as custom class")
+ self._schema = schema
+ self.schema = Schema(schema)
+ self.msg = msg
+ self.cls = cls
+
+ def __call__(self, v):
+ try:
+ return self.schema(v)
+ except er.Invalid as e:
+ if len(e.path) > 1:
+ raise e
+ else:
+ raise (self.cls or er.Invalid)(self.msg)
+
+ def __repr__(self):
+ return 'Msg(%s, %s, cls=%s)' % (self._schema, self.msg, self.cls)
+
+
+class Object(dict):
+ """Indicate that we should work with attributes, not keys."""
+
+ def __init__(self, schema, cls=UNDEFINED):
+ self.cls = cls
+ super(Object, self).__init__(schema)
+
+
+class VirtualPathComponent(str):
+ def __str__(self):
+ return '<' + self + '>'
+
+ def __repr__(self):
+ return self.__str__()
+
+
+# Markers.py
+
+
+class Marker(object):
+ """Mark nodes for special treatment."""
+
+ def __init__(self, schema_, msg=None, description=None):
+ self.schema = schema_
+ self._schema = Schema(schema_)
+ self.msg = msg
+ self.description = description
+
+ def __call__(self, v):
+ try:
+ return self._schema(v)
+ except er.Invalid as e:
+ if not self.msg or len(e.path) > 1:
+ raise
+ raise er.Invalid(self.msg)
+
+ def __str__(self):
+ return str(self.schema)
+
+ def __repr__(self):
+ return repr(self.schema)
+
+ def __lt__(self, other):
+ if isinstance(other, Marker):
+ return self.schema < other.schema
+ return self.schema < other
+
+ def __hash__(self):
+ return hash(self.schema)
+
+ def __eq__(self, other):
+ return self.schema == other
+
+ def __ne__(self, other):
+ return not(self.schema == other)
+
+
+class Optional(Marker):
+ """Mark a node in the schema as optional, and optionally provide a default
+
+ >>> schema = Schema({Optional('key'): str})
+ >>> schema({})
+ {}
+ >>> schema = Schema({Optional('key', default='value'): str})
+ >>> schema({})
+ {'key': 'value'}
+ >>> schema = Schema({Optional('key', default=list): list})
+ >>> schema({})
+ {'key': []}
+
+ If 'required' flag is set for an entire schema, optional keys aren't required
+
+ >>> schema = Schema({
+ ... Optional('key'): str,
+ ... 'key2': str
+ ... }, required=True)
+ >>> schema({'key2':'value'})
+ {'key2': 'value'}
+ """
+
+ def __init__(self, schema, msg=None, default=UNDEFINED, description=None):
+ super(Optional, self).__init__(schema, msg=msg,
+ description=description)
+ self.default = default_factory(default)
+
+
+class Exclusive(Optional):
+ """Mark a node in the schema as exclusive.
+
+ Exclusive keys inherited from Optional:
+
+ >>> schema = Schema({Exclusive('alpha', 'angles'): int, Exclusive('beta', 'angles'): int})
+ >>> schema({'alpha': 30})
+ {'alpha': 30}
+
+ Keys inside a same group of exclusion cannot be together, it only makes sense for dictionaries:
+
+ >>> with raises(er.MultipleInvalid, "two or more values in the same group of exclusion 'angles' @ data[<angles>]"):
+ ... schema({'alpha': 30, 'beta': 45})
+
+ For example, API can provides multiple types of authentication, but only one works in the same time:
+
+ >>> msg = 'Please, use only one type of authentication at the same time.'
+ >>> schema = Schema({
+ ... Exclusive('classic', 'auth', msg=msg):{
+ ... Required('email'): basestring,
+ ... Required('password'): basestring
+ ... },
+ ... Exclusive('internal', 'auth', msg=msg):{
+ ... Required('secret_key'): basestring
+ ... },
+ ... Exclusive('social', 'auth', msg=msg):{
+ ... Required('social_network'): basestring,
+ ... Required('token'): basestring
+ ... }
+ ... })
+
+ >>> with raises(er.MultipleInvalid, "Please, use only one type of authentication at the same time. @ data[<auth>]"):
+ ... schema({'classic': {'email': 'foo@example.com', 'password': 'bar'},
+ ... 'social': {'social_network': 'barfoo', 'token': 'tEMp'}})
+ """
+
+ def __init__(self, schema, group_of_exclusion, msg=None, description=None):
+ super(Exclusive, self).__init__(schema, msg=msg,
+ description=description)
+ self.group_of_exclusion = group_of_exclusion
+
+
+class Inclusive(Optional):
+ """ Mark a node in the schema as inclusive.
+
+ Inclusive keys inherited from Optional:
+
+ >>> schema = Schema({
+ ... Inclusive('filename', 'file'): str,
+ ... Inclusive('mimetype', 'file'): str
+ ... })
+ >>> data = {'filename': 'dog.jpg', 'mimetype': 'image/jpeg'}
+ >>> data == schema(data)
+ True
+
+ Keys inside a same group of inclusive must exist together, it only makes sense for dictionaries:
+
+ >>> with raises(er.MultipleInvalid, "some but not all values in the same group of inclusion 'file' @ data[<file>]"):
+ ... schema({'filename': 'dog.jpg'})
+
+ If none of the keys in the group are present, it is accepted:
+
+ >>> schema({})
+ {}
+
+ For example, API can return 'height' and 'width' together, but not separately.
+
+ >>> msg = "Height and width must exist together"
+ >>> schema = Schema({
+ ... Inclusive('height', 'size', msg=msg): int,
+ ... Inclusive('width', 'size', msg=msg): int
+ ... })
+
+ >>> with raises(er.MultipleInvalid, msg + " @ data[<size>]"):
+ ... schema({'height': 100})
+
+ >>> with raises(er.MultipleInvalid, msg + " @ data[<size>]"):
+ ... schema({'width': 100})
+
+ >>> data = {'height': 100, 'width': 100}
+ >>> data == schema(data)
+ True
+ """
+
+ def __init__(self, schema, group_of_inclusion,
+ msg=None, description=None, default=UNDEFINED):
+ super(Inclusive, self).__init__(schema, msg=msg,
+ default=default,
+ description=description)
+ self.group_of_inclusion = group_of_inclusion
+
+
+class Required(Marker):
+ """Mark a node in the schema as being required, and optionally provide a default value.
+
+ >>> schema = Schema({Required('key'): str})
+ >>> with raises(er.MultipleInvalid, "required key not provided @ data['key']"):
+ ... schema({})
+
+ >>> schema = Schema({Required('key', default='value'): str})
+ >>> schema({})
+ {'key': 'value'}
+ >>> schema = Schema({Required('key', default=list): list})
+ >>> schema({})
+ {'key': []}
+ """
+
+ def __init__(self, schema, msg=None, default=UNDEFINED, description=None):
+ super(Required, self).__init__(schema, msg=msg,
+ description=description)
+ self.default = default_factory(default)
+
+
+class Remove(Marker):
+ """Mark a node in the schema to be removed and excluded from the validated
+ output. Keys that fail validation will not raise ``Invalid``. Instead, these
+ keys will be treated as extras.
+
+ >>> schema = Schema({str: int, Remove(int): str})
+ >>> with raises(er.MultipleInvalid, "extra keys not allowed @ data[1]"):
+ ... schema({'keep': 1, 1: 1.0})
+ >>> schema({1: 'red', 'red': 1, 2: 'green'})
+ {'red': 1}
+ >>> schema = Schema([int, Remove(float), Extra])
+ >>> schema([1, 2, 3, 4.0, 5, 6.0, '7'])
+ [1, 2, 3, 5, '7']
+ """
+
+ def __call__(self, v):
+ super(Remove, self).__call__(v)
+ return self.__class__
+
+ def __repr__(self):
+ return "Remove(%r)" % (self.schema,)
+
+ def __hash__(self):
+ return object.__hash__(self)
+
+
+def message(default=None, cls=None):
+ """Convenience decorator to allow functions to provide a message.
+
+ Set a default message:
+
+ >>> @message('not an integer')
+ ... def isint(v):
+ ... return int(v)
+
+ >>> validate = Schema(isint())
+ >>> with raises(er.MultipleInvalid, 'not an integer'):
+ ... validate('a')
+
+ The message can be overridden on a per validator basis:
+
+ >>> validate = Schema(isint('bad'))
+ >>> with raises(er.MultipleInvalid, 'bad'):
+ ... validate('a')
+
+ The class thrown too:
+
+ >>> class IntegerInvalid(er.Invalid): pass
+ >>> validate = Schema(isint('bad', clsoverride=IntegerInvalid))
+ >>> try:
+ ... validate('a')
+ ... except er.MultipleInvalid as e:
+ ... assert isinstance(e.errors[0], IntegerInvalid)
+ """
+ if cls and not issubclass(cls, er.Invalid):
+ raise er.SchemaError("message can only use subclases of Invalid as custom class")
+
+ def decorator(f):
+ @wraps(f)
+ def check(msg=None, clsoverride=None):
+ @wraps(f)
+ def wrapper(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except ValueError:
+ raise (clsoverride or cls or er.ValueInvalid)(msg or default or 'invalid value')
+
+ return wrapper
+
+ return check
+
+ return decorator
+
+
+def _args_to_dict(func, args):
+ """Returns argument names as values as key-value pairs."""
+ if sys.version_info >= (3, 0):
+ arg_count = func.__code__.co_argcount
+ arg_names = func.__code__.co_varnames[:arg_count]
+ else:
+ arg_count = func.func_code.co_argcount
+ arg_names = func.func_code.co_varnames[:arg_count]
+
+ arg_value_list = list(args)
+ arguments = dict((arg_name, arg_value_list[i])
+ for i, arg_name in enumerate(arg_names)
+ if i < len(arg_value_list))
+ return arguments
+
+
+def _merge_args_with_kwargs(args_dict, kwargs_dict):
+ """Merge args with kwargs."""
+ ret = args_dict.copy()
+ ret.update(kwargs_dict)
+ return ret
+
+
+def validate(*a, **kw):
+ """Decorator for validating arguments of a function against a given schema.
+
+ Set restrictions for arguments:
+
+ >>> @validate(arg1=int, arg2=int)
+ ... def foo(arg1, arg2):
+ ... return arg1 * arg2
+
+ Set restriction for returned value:
+
+ >>> @validate(arg=int, __return__=int)
+ ... def bar(arg1):
+ ... return arg1 * 2
+
+ """
+ RETURNS_KEY = '__return__'
+
+ def validate_schema_decorator(func):
+
+ returns_defined = False
+ returns = None
+
+ schema_args_dict = _args_to_dict(func, a)
+ schema_arguments = _merge_args_with_kwargs(schema_args_dict, kw)
+
+ if RETURNS_KEY in schema_arguments:
+ returns_defined = True
+ returns = schema_arguments[RETURNS_KEY]
+ del schema_arguments[RETURNS_KEY]
+
+ input_schema = (Schema(schema_arguments, extra=ALLOW_EXTRA)
+ if len(schema_arguments) != 0 else lambda x: x)
+ output_schema = Schema(returns) if returns_defined else lambda x: x
+
+ @wraps(func)
+ def func_wrapper(*args, **kwargs):
+ args_dict = _args_to_dict(func, args)
+ arguments = _merge_args_with_kwargs(args_dict, kwargs)
+ validated_arguments = input_schema(arguments)
+ output = func(**validated_arguments)
+ return output_schema(output)
+
+ return func_wrapper
+
+ return validate_schema_decorator
diff --git a/third_party/python/voluptuous/voluptuous/util.py b/third_party/python/voluptuous/voluptuous/util.py
new file mode 100644
index 0000000000..e0ff43f850
--- /dev/null
+++ b/third_party/python/voluptuous/voluptuous/util.py
@@ -0,0 +1,162 @@
+import sys
+
+from voluptuous.error import LiteralInvalid, TypeInvalid, Invalid
+from voluptuous.schema_builder import Schema, default_factory, raises
+from voluptuous import validators
+
+__author__ = 'tusharmakkar08'
+
+
+def _fix_str(v):
+ if sys.version_info[0] == 2 and isinstance(v, unicode):
+ s = v
+ else:
+ s = str(v)
+ return s
+
+
+def Lower(v):
+ """Transform a string to lower case.
+
+ >>> s = Schema(Lower)
+ >>> s('HI')
+ 'hi'
+ """
+ return _fix_str(v).lower()
+
+
+def Upper(v):
+ """Transform a string to upper case.
+
+ >>> s = Schema(Upper)
+ >>> s('hi')
+ 'HI'
+ """
+ return _fix_str(v).upper()
+
+
+def Capitalize(v):
+ """Capitalise a string.
+
+ >>> s = Schema(Capitalize)
+ >>> s('hello world')
+ 'Hello world'
+ """
+ return _fix_str(v).capitalize()
+
+
+def Title(v):
+ """Title case a string.
+
+ >>> s = Schema(Title)
+ >>> s('hello world')
+ 'Hello World'
+ """
+ return _fix_str(v).title()
+
+
+def Strip(v):
+ """Strip whitespace from a string.
+
+ >>> s = Schema(Strip)
+ >>> s(' hello world ')
+ 'hello world'
+ """
+ return _fix_str(v).strip()
+
+
+class DefaultTo(object):
+ """Sets a value to default_value if none provided.
+
+ >>> s = Schema(DefaultTo(42))
+ >>> s(None)
+ 42
+ >>> s = Schema(DefaultTo(list))
+ >>> s(None)
+ []
+ """
+
+ def __init__(self, default_value, msg=None):
+ self.default_value = default_factory(default_value)
+ self.msg = msg
+
+ def __call__(self, v):
+ if v is None:
+ v = self.default_value()
+ return v
+
+ def __repr__(self):
+ return 'DefaultTo(%s)' % (self.default_value(),)
+
+
+class SetTo(object):
+ """Set a value, ignoring any previous value.
+
+ >>> s = Schema(validators.Any(int, SetTo(42)))
+ >>> s(2)
+ 2
+ >>> s("foo")
+ 42
+ """
+
+ def __init__(self, value):
+ self.value = default_factory(value)
+
+ def __call__(self, v):
+ return self.value()
+
+ def __repr__(self):
+ return 'SetTo(%s)' % (self.value(),)
+
+
+class Set(object):
+ """Convert a list into a set.
+
+ >>> s = Schema(Set())
+ >>> s([]) == set([])
+ True
+ >>> s([1, 2]) == set([1, 2])
+ True
+ >>> with raises(Invalid, regex="^cannot be presented as set: "):
+ ... s([set([1, 2]), set([3, 4])])
+ """
+
+ def __init__(self, msg=None):
+ self.msg = msg
+
+ def __call__(self, v):
+ try:
+ set_v = set(v)
+ except Exception as e:
+ raise TypeInvalid(
+ self.msg or 'cannot be presented as set: {0}'.format(e))
+ return set_v
+
+ def __repr__(self):
+ return 'Set()'
+
+
+class Literal(object):
+ def __init__(self, lit):
+ self.lit = lit
+
+ def __call__(self, value, msg=None):
+ if self.lit != value:
+ raise LiteralInvalid(
+ msg or '%s not match for %s' % (value, self.lit)
+ )
+ else:
+ return self.lit
+
+ def __str__(self):
+ return str(self.lit)
+
+ def __repr__(self):
+ return repr(self.lit)
+
+
+def u(x):
+ if sys.version_info < (3,):
+ return unicode(x)
+ else:
+ return x
diff --git a/third_party/python/voluptuous/voluptuous/validators.py b/third_party/python/voluptuous/voluptuous/validators.py
new file mode 100644
index 0000000000..fac9cc7717
--- /dev/null
+++ b/third_party/python/voluptuous/voluptuous/validators.py
@@ -0,0 +1,1080 @@
+import os
+import re
+import datetime
+import sys
+from functools import wraps
+from decimal import Decimal, InvalidOperation
+
+from voluptuous.schema_builder import Schema, raises, message
+from voluptuous.error import (MultipleInvalid, CoerceInvalid, TrueInvalid, FalseInvalid, BooleanInvalid, Invalid,
+ AnyInvalid, AllInvalid, MatchInvalid, UrlInvalid, EmailInvalid, FileInvalid, DirInvalid,
+ RangeInvalid, PathInvalid, ExactSequenceInvalid, LengthInvalid, DatetimeInvalid,
+ DateInvalid, InInvalid, TypeInvalid, NotInInvalid, ContainsInvalid, NotEnoughValid,
+ TooManyValid)
+
+if sys.version_info >= (3,):
+ import urllib.parse as urlparse
+
+ basestring = str
+else:
+ import urlparse
+
+# Taken from https://github.com/kvesteri/validators/blob/master/validators/email.py
+USER_REGEX = re.compile(
+ # dot-atom
+ r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+"
+ r"(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*$"
+ # quoted-string
+ r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|'
+ r"""\\[\001-\011\013\014\016-\177])*"$)""",
+ re.IGNORECASE
+)
+DOMAIN_REGEX = re.compile(
+ # domain
+ r'(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
+ r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?$)'
+ # literal form, ipv4 address (SMTP 4.1.3)
+ r'|^\[(25[0-5]|2[0-4]\d|[0-1]?\d?\d)'
+ r'(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}\]$',
+ re.IGNORECASE)
+
+__author__ = 'tusharmakkar08'
+
+
+def truth(f):
+ """Convenience decorator to convert truth functions into validators.
+
+ >>> @truth
+ ... def isdir(v):
+ ... return os.path.isdir(v)
+ >>> validate = Schema(isdir)
+ >>> validate('/')
+ '/'
+ >>> with raises(MultipleInvalid, 'not a valid value'):
+ ... validate('/notavaliddir')
+ """
+
+ @wraps(f)
+ def check(v):
+ t = f(v)
+ if not t:
+ raise ValueError
+ return v
+
+ return check
+
+
+class Coerce(object):
+ """Coerce a value to a type.
+
+ If the type constructor throws a ValueError or TypeError, the value
+ will be marked as Invalid.
+
+ Default behavior:
+
+ >>> validate = Schema(Coerce(int))
+ >>> with raises(MultipleInvalid, 'expected int'):
+ ... validate(None)
+ >>> with raises(MultipleInvalid, 'expected int'):
+ ... validate('foo')
+
+ With custom message:
+
+ >>> validate = Schema(Coerce(int, "moo"))
+ >>> with raises(MultipleInvalid, 'moo'):
+ ... validate('foo')
+ """
+
+ def __init__(self, type, msg=None):
+ self.type = type
+ self.msg = msg
+ self.type_name = type.__name__
+
+ def __call__(self, v):
+ try:
+ return self.type(v)
+ except (ValueError, TypeError, InvalidOperation):
+ msg = self.msg or ('expected %s' % self.type_name)
+ raise CoerceInvalid(msg)
+
+ def __repr__(self):
+ return 'Coerce(%s, msg=%r)' % (self.type_name, self.msg)
+
+
+@message('value was not true', cls=TrueInvalid)
+@truth
+def IsTrue(v):
+ """Assert that a value is true, in the Python sense.
+
+ >>> validate = Schema(IsTrue())
+
+ "In the Python sense" means that implicitly false values, such as empty
+ lists, dictionaries, etc. are treated as "false":
+
+ >>> with raises(MultipleInvalid, "value was not true"):
+ ... validate([])
+ >>> validate([1])
+ [1]
+ >>> with raises(MultipleInvalid, "value was not true"):
+ ... validate(False)
+
+ ...and so on.
+
+ >>> try:
+ ... validate([])
+ ... except MultipleInvalid as e:
+ ... assert isinstance(e.errors[0], TrueInvalid)
+ """
+ return v
+
+
+@message('value was not false', cls=FalseInvalid)
+def IsFalse(v):
+ """Assert that a value is false, in the Python sense.
+
+ (see :func:`IsTrue` for more detail)
+
+ >>> validate = Schema(IsFalse())
+ >>> validate([])
+ []
+ >>> with raises(MultipleInvalid, "value was not false"):
+ ... validate(True)
+
+ >>> try:
+ ... validate(True)
+ ... except MultipleInvalid as e:
+ ... assert isinstance(e.errors[0], FalseInvalid)
+ """
+ if v:
+ raise ValueError
+ return v
+
+
+@message('expected boolean', cls=BooleanInvalid)
+def Boolean(v):
+ """Convert human-readable boolean values to a bool.
+
+ Accepted values are 1, true, yes, on, enable, and their negatives.
+ Non-string values are cast to bool.
+
+ >>> validate = Schema(Boolean())
+ >>> validate(True)
+ True
+ >>> validate("1")
+ True
+ >>> validate("0")
+ False
+ >>> with raises(MultipleInvalid, "expected boolean"):
+ ... validate('moo')
+ >>> try:
+ ... validate('moo')
+ ... except MultipleInvalid as e:
+ ... assert isinstance(e.errors[0], BooleanInvalid)
+ """
+ if isinstance(v, basestring):
+ v = v.lower()
+ if v in ('1', 'true', 'yes', 'on', 'enable'):
+ return True
+ if v in ('0', 'false', 'no', 'off', 'disable'):
+ return False
+ raise ValueError
+ return bool(v)
+
+
+class _WithSubValidators(object):
+ """Base class for validators that use sub-validators.
+
+ Special class to use as a parent class for validators using sub-validators.
+ This class provides the `__voluptuous_compile__` method so the
+ sub-validators are compiled by the parent `Schema`.
+ """
+
+ def __init__(self, *validators, **kwargs):
+ self.validators = validators
+ self.msg = kwargs.pop('msg', None)
+ self.required = kwargs.pop('required', False)
+ self.discriminant = kwargs.pop('discriminant', None)
+
+ def __voluptuous_compile__(self, schema):
+ self._compiled = []
+ old_required = schema.required
+ self.schema = schema
+ for v in self.validators:
+ schema.required = self.required
+ self._compiled.append(schema._compile(v))
+ schema.required = old_required
+ return self._run
+
+ def _run(self, path, value):
+ if self.discriminant is not None:
+ self._compiled = [
+ self.schema._compile(v)
+ for v in self.discriminant(value, self.validators)
+ ]
+
+ return self._exec(self._compiled, value, path)
+
+ def __call__(self, v):
+ return self._exec((Schema(val) for val in self.validators), v)
+
+ def __repr__(self):
+ return '%s(%s, msg=%r)' % (
+ self.__class__.__name__,
+ ", ".join(repr(v) for v in self.validators),
+ self.msg
+ )
+
+
+class Any(_WithSubValidators):
+ """Use the first validated value.
+
+ :param msg: Message to deliver to user if validation fails.
+ :param kwargs: All other keyword arguments are passed to the sub-schema constructors.
+ :returns: Return value of the first validator that passes.
+
+ >>> validate = Schema(Any('true', 'false',
+ ... All(Any(int, bool), Coerce(bool))))
+ >>> validate('true')
+ 'true'
+ >>> validate(1)
+ True
+ >>> with raises(MultipleInvalid, "not a valid value"):
+ ... validate('moo')
+
+ msg argument is used
+
+ >>> validate = Schema(Any(1, 2, 3, msg="Expected 1 2 or 3"))
+ >>> validate(1)
+ 1
+ >>> with raises(MultipleInvalid, "Expected 1 2 or 3"):
+ ... validate(4)
+ """
+
+ def _exec(self, funcs, v, path=None):
+ error = None
+ for func in funcs:
+ try:
+ if path is None:
+ return func(v)
+ else:
+ return func(path, v)
+ except Invalid as e:
+ if error is None or len(e.path) > len(error.path):
+ error = e
+ else:
+ if error:
+ raise error if self.msg is None else AnyInvalid(
+ self.msg, path=path)
+ raise AnyInvalid(self.msg or 'no valid value found',
+ path=path)
+
+
+# Convenience alias
+Or = Any
+
+
+class Union(_WithSubValidators):
+ """Use the first validated value among those selected by discriminant.
+
+ :param msg: Message to deliver to user if validation fails.
+ :param discriminant(value, validators): Returns the filtered list of validators based on the value.
+ :param kwargs: All other keyword arguments are passed to the sub-schema constructors.
+ :returns: Return value of the first validator that passes.
+
+ >>> validate = Schema(Union({'type':'a', 'a_val':'1'},{'type':'b', 'b_val':'2'},
+ ... discriminant=lambda val, alt: filter(
+ ... lambda v : v['type'] == val['type'] , alt)))
+ >>> validate({'type':'a', 'a_val':'1'}) == {'type':'a', 'a_val':'1'}
+ True
+ >>> with raises(MultipleInvalid, "not a valid value for dictionary value @ data['b_val']"):
+ ... validate({'type':'b', 'b_val':'5'})
+
+ ```discriminant({'type':'b', 'a_val':'5'}, [{'type':'a', 'a_val':'1'},{'type':'b', 'b_val':'2'}])``` is invoked
+
+ Without the discriminant, the exception would be "extra keys not allowed @ data['b_val']"
+ """
+
+ def _exec(self, funcs, v, path=None):
+ error = None
+ for func in funcs:
+ try:
+ if path is None:
+ return func(v)
+ else:
+ return func(path, v)
+ except Invalid as e:
+ if error is None or len(e.path) > len(error.path):
+ error = e
+ else:
+ if error:
+ raise error if self.msg is None else AnyInvalid(
+ self.msg, path=path)
+ raise AnyInvalid(self.msg or 'no valid value found',
+ path=path)
+
+
+# Convenience alias
+Switch = Union
+
+
+class All(_WithSubValidators):
+ """Value must pass all validators.
+
+ The output of each validator is passed as input to the next.
+
+ :param msg: Message to deliver to user if validation fails.
+ :param kwargs: All other keyword arguments are passed to the sub-schema constructors.
+
+ >>> validate = Schema(All('10', Coerce(int)))
+ >>> validate('10')
+ 10
+ """
+
+ def _exec(self, funcs, v, path=None):
+ try:
+ for func in funcs:
+ if path is None:
+ v = func(v)
+ else:
+ v = func(path, v)
+ except Invalid as e:
+ raise e if self.msg is None else AllInvalid(self.msg, path=path)
+ return v
+
+
+# Convenience alias
+And = All
+
+
+class Match(object):
+ """Value must be a string that matches the regular expression.
+
+ >>> validate = Schema(Match(r'^0x[A-F0-9]+$'))
+ >>> validate('0x123EF4')
+ '0x123EF4'
+ >>> with raises(MultipleInvalid, "does not match regular expression"):
+ ... validate('123EF4')
+
+ >>> with raises(MultipleInvalid, 'expected string or buffer'):
+ ... validate(123)
+
+ Pattern may also be a compiled regular expression:
+
+ >>> validate = Schema(Match(re.compile(r'0x[A-F0-9]+', re.I)))
+ >>> validate('0x123ef4')
+ '0x123ef4'
+ """
+
+ def __init__(self, pattern, msg=None):
+ if isinstance(pattern, basestring):
+ pattern = re.compile(pattern)
+ self.pattern = pattern
+ self.msg = msg
+
+ def __call__(self, v):
+ try:
+ match = self.pattern.match(v)
+ except TypeError:
+ raise MatchInvalid("expected string or buffer")
+ if not match:
+ raise MatchInvalid(self.msg or 'does not match regular expression')
+ return v
+
+ def __repr__(self):
+ return 'Match(%r, msg=%r)' % (self.pattern.pattern, self.msg)
+
+
+class Replace(object):
+ """Regex substitution.
+
+ >>> validate = Schema(All(Replace('you', 'I'),
+ ... Replace('hello', 'goodbye')))
+ >>> validate('you say hello')
+ 'I say goodbye'
+ """
+
+ def __init__(self, pattern, substitution, msg=None):
+ if isinstance(pattern, basestring):
+ pattern = re.compile(pattern)
+ self.pattern = pattern
+ self.substitution = substitution
+ self.msg = msg
+
+ def __call__(self, v):
+ return self.pattern.sub(self.substitution, v)
+
+ def __repr__(self):
+ return 'Replace(%r, %r, msg=%r)' % (self.pattern.pattern,
+ self.substitution,
+ self.msg)
+
+
+def _url_validation(v):
+ parsed = urlparse.urlparse(v)
+ if not parsed.scheme or not parsed.netloc:
+ raise UrlInvalid("must have a URL scheme and host")
+ return parsed
+
+
+@message('expected an email address', cls=EmailInvalid)
+def Email(v):
+ """Verify that the value is an email address or not.
+
+ >>> s = Schema(Email())
+ >>> with raises(MultipleInvalid, 'expected an email address'):
+ ... s("a.com")
+ >>> with raises(MultipleInvalid, 'expected an email address'):
+ ... s("a@.com")
+ >>> with raises(MultipleInvalid, 'expected an email address'):
+ ... s("a@.com")
+ >>> s('t@x.com')
+ 't@x.com'
+ """
+ try:
+ if not v or "@" not in v:
+ raise EmailInvalid("Invalid email address")
+ user_part, domain_part = v.rsplit('@', 1)
+
+ if not (USER_REGEX.match(user_part) and DOMAIN_REGEX.match(domain_part)):
+ raise EmailInvalid("Invalid email address")
+ return v
+ except:
+ raise ValueError
+
+
+@message('expected a fully qualified domain name URL', cls=UrlInvalid)
+def FqdnUrl(v):
+ """Verify that the value is a fully qualified domain name URL.
+
+ >>> s = Schema(FqdnUrl())
+ >>> with raises(MultipleInvalid, 'expected a fully qualified domain name URL'):
+ ... s("http://localhost/")
+ >>> s('http://w3.org')
+ 'http://w3.org'
+ """
+ try:
+ parsed_url = _url_validation(v)
+ if "." not in parsed_url.netloc:
+ raise UrlInvalid("must have a domain name in URL")
+ return v
+ except:
+ raise ValueError
+
+
+@message('expected a URL', cls=UrlInvalid)
+def Url(v):
+ """Verify that the value is a URL.
+
+ >>> s = Schema(Url())
+ >>> with raises(MultipleInvalid, 'expected a URL'):
+ ... s(1)
+ >>> s('http://w3.org')
+ 'http://w3.org'
+ """
+ try:
+ _url_validation(v)
+ return v
+ except:
+ raise ValueError
+
+
+@message('Not a file', cls=FileInvalid)
+@truth
+def IsFile(v):
+ """Verify the file exists.
+
+ >>> os.path.basename(IsFile()(__file__)).startswith('validators.py')
+ True
+ >>> with raises(FileInvalid, 'Not a file'):
+ ... IsFile()("random_filename_goes_here.py")
+ >>> with raises(FileInvalid, 'Not a file'):
+ ... IsFile()(None)
+ """
+ try:
+ if v:
+ v = str(v)
+ return os.path.isfile(v)
+ else:
+ raise FileInvalid('Not a file')
+ except TypeError:
+ raise FileInvalid('Not a file')
+
+
+@message('Not a directory', cls=DirInvalid)
+@truth
+def IsDir(v):
+ """Verify the directory exists.
+
+ >>> IsDir()('/')
+ '/'
+ >>> with raises(DirInvalid, 'Not a directory'):
+ ... IsDir()(None)
+ """
+ try:
+ if v:
+ v = str(v)
+ return os.path.isdir(v)
+ else:
+ raise DirInvalid("Not a directory")
+ except TypeError:
+ raise DirInvalid("Not a directory")
+
+
+@message('path does not exist', cls=PathInvalid)
+@truth
+def PathExists(v):
+ """Verify the path exists, regardless of its type.
+
+ >>> os.path.basename(PathExists()(__file__)).startswith('validators.py')
+ True
+ >>> with raises(Invalid, 'path does not exist'):
+ ... PathExists()("random_filename_goes_here.py")
+ >>> with raises(PathInvalid, 'Not a Path'):
+ ... PathExists()(None)
+ """
+ try:
+ if v:
+ v = str(v)
+ return os.path.exists(v)
+ else:
+ raise PathInvalid("Not a Path")
+ except TypeError:
+ raise PathInvalid("Not a Path")
+
+
+def Maybe(validator, msg=None):
+ """Validate that the object matches given validator or is None.
+
+ :raises Invalid: If the value does not match the given validator and is not
+ None.
+
+ >>> s = Schema(Maybe(int))
+ >>> s(10)
+ 10
+ >>> with raises(Invalid):
+ ... s("string")
+
+ """
+ return Any(validator, None, msg=msg)
+
+
+class Range(object):
+ """Limit a value to a range.
+
+ Either min or max may be omitted.
+ Either min or max can be excluded from the range of accepted values.
+
+ :raises Invalid: If the value is outside the range.
+
+ >>> s = Schema(Range(min=1, max=10, min_included=False))
+ >>> s(5)
+ 5
+ >>> s(10)
+ 10
+ >>> with raises(MultipleInvalid, 'value must be at most 10'):
+ ... s(20)
+ >>> with raises(MultipleInvalid, 'value must be higher than 1'):
+ ... s(1)
+ >>> with raises(MultipleInvalid, 'value must be lower than 10'):
+ ... Schema(Range(max=10, max_included=False))(20)
+ """
+
+ def __init__(self, min=None, max=None, min_included=True,
+ max_included=True, msg=None):
+ self.min = min
+ self.max = max
+ self.min_included = min_included
+ self.max_included = max_included
+ self.msg = msg
+
+ def __call__(self, v):
+ try:
+ if self.min_included:
+ if self.min is not None and not v >= self.min:
+ raise RangeInvalid(
+ self.msg or 'value must be at least %s' % self.min)
+ else:
+ if self.min is not None and not v > self.min:
+ raise RangeInvalid(
+ self.msg or 'value must be higher than %s' % self.min)
+ if self.max_included:
+ if self.max is not None and not v <= self.max:
+ raise RangeInvalid(
+ self.msg or 'value must be at most %s' % self.max)
+ else:
+ if self.max is not None and not v < self.max:
+ raise RangeInvalid(
+ self.msg or 'value must be lower than %s' % self.max)
+
+ return v
+
+ # Objects that lack a partial ordering, e.g. None or strings will raise TypeError
+ except TypeError:
+ raise RangeInvalid(
+ self.msg or 'invalid value or type (must have a partial ordering)')
+
+ def __repr__(self):
+ return ('Range(min=%r, max=%r, min_included=%r,'
+ ' max_included=%r, msg=%r)' % (self.min, self.max,
+ self.min_included,
+ self.max_included,
+ self.msg))
+
+
+class Clamp(object):
+ """Clamp a value to a range.
+
+ Either min or max may be omitted.
+
+ >>> s = Schema(Clamp(min=0, max=1))
+ >>> s(0.5)
+ 0.5
+ >>> s(5)
+ 1
+ >>> s(-1)
+ 0
+ """
+
+ def __init__(self, min=None, max=None, msg=None):
+ self.min = min
+ self.max = max
+ self.msg = msg
+
+ def __call__(self, v):
+ try:
+ if self.min is not None and v < self.min:
+ v = self.min
+ if self.max is not None and v > self.max:
+ v = self.max
+ return v
+
+ # Objects that lack a partial ordering, e.g. None or strings will raise TypeError
+ except TypeError:
+ raise RangeInvalid(
+ self.msg or 'invalid value or type (must have a partial ordering)')
+
+ def __repr__(self):
+ return 'Clamp(min=%s, max=%s)' % (self.min, self.max)
+
+
+class Length(object):
+ """The length of a value must be in a certain range."""
+
+ def __init__(self, min=None, max=None, msg=None):
+ self.min = min
+ self.max = max
+ self.msg = msg
+
+ def __call__(self, v):
+ try:
+ if self.min is not None and len(v) < self.min:
+ raise LengthInvalid(
+ self.msg or 'length of value must be at least %s' % self.min)
+ if self.max is not None and len(v) > self.max:
+ raise LengthInvalid(
+ self.msg or 'length of value must be at most %s' % self.max)
+ return v
+
+ # Objects that havbe no length e.g. None or strings will raise TypeError
+ except TypeError:
+ raise RangeInvalid(
+ self.msg or 'invalid value or type')
+
+ def __repr__(self):
+ return 'Length(min=%s, max=%s)' % (self.min, self.max)
+
+
+class Datetime(object):
+ """Validate that the value matches the datetime format."""
+
+ DEFAULT_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
+
+ def __init__(self, format=None, msg=None):
+ self.format = format or self.DEFAULT_FORMAT
+ self.msg = msg
+
+ def __call__(self, v):
+ try:
+ datetime.datetime.strptime(v, self.format)
+ except (TypeError, ValueError):
+ raise DatetimeInvalid(
+ self.msg or 'value does not match'
+ ' expected format %s' % self.format)
+ return v
+
+ def __repr__(self):
+ return 'Datetime(format=%s)' % self.format
+
+
+class Date(Datetime):
+ """Validate that the value matches the date format."""
+
+ DEFAULT_FORMAT = '%Y-%m-%d'
+
+ def __call__(self, v):
+ try:
+ datetime.datetime.strptime(v, self.format)
+ except (TypeError, ValueError):
+ raise DateInvalid(
+ self.msg or 'value does not match'
+ ' expected format %s' % self.format)
+ return v
+
+ def __repr__(self):
+ return 'Date(format=%s)' % self.format
+
+
+class In(object):
+ """Validate that a value is in a collection."""
+
+ def __init__(self, container, msg=None):
+ self.container = container
+ self.msg = msg
+
+ def __call__(self, v):
+ try:
+ check = v not in self.container
+ except TypeError:
+ check = True
+ if check:
+ raise InInvalid(self.msg or
+ 'value must be one of {}'.format(sorted(self.container)))
+ return v
+
+ def __repr__(self):
+ return 'In(%s)' % (self.container,)
+
+
+class NotIn(object):
+ """Validate that a value is not in a collection."""
+
+ def __init__(self, container, msg=None):
+ self.container = container
+ self.msg = msg
+
+ def __call__(self, v):
+ try:
+ check = v in self.container
+ except TypeError:
+ check = True
+ if check:
+ raise NotInInvalid(self.msg or
+ 'value must not be one of {}'.format(sorted(self.container)))
+ return v
+
+ def __repr__(self):
+ return 'NotIn(%s)' % (self.container,)
+
+
+class Contains(object):
+ """Validate that the given schema element is in the sequence being validated.
+
+ >>> s = Contains(1)
+ >>> s([3, 2, 1])
+ [3, 2, 1]
+ >>> with raises(ContainsInvalid, 'value is not allowed'):
+ ... s([3, 2])
+ """
+
+ def __init__(self, item, msg=None):
+ self.item = item
+ self.msg = msg
+
+ def __call__(self, v):
+ try:
+ check = self.item not in v
+ except TypeError:
+ check = True
+ if check:
+ raise ContainsInvalid(self.msg or 'value is not allowed')
+ return v
+
+ def __repr__(self):
+ return 'Contains(%s)' % (self.item,)
+
+
+class ExactSequence(object):
+ """Matches each element in a sequence against the corresponding element in
+ the validators.
+
+ :param msg: Message to deliver to user if validation fails.
+ :param kwargs: All other keyword arguments are passed to the sub-schema
+ constructors.
+
+ >>> from voluptuous import Schema, ExactSequence
+ >>> validate = Schema(ExactSequence([str, int, list, list]))
+ >>> validate(['hourly_report', 10, [], []])
+ ['hourly_report', 10, [], []]
+ >>> validate(('hourly_report', 10, [], []))
+ ('hourly_report', 10, [], [])
+ """
+
+ def __init__(self, validators, **kwargs):
+ self.validators = validators
+ self.msg = kwargs.pop('msg', None)
+ self._schemas = [Schema(val, **kwargs) for val in validators]
+
+ def __call__(self, v):
+ if not isinstance(v, (list, tuple)) or len(v) != len(self._schemas):
+ raise ExactSequenceInvalid(self.msg)
+ try:
+ v = type(v)(schema(x) for x, schema in zip(v, self._schemas))
+ except Invalid as e:
+ raise e if self.msg is None else ExactSequenceInvalid(self.msg)
+ return v
+
+ def __repr__(self):
+ return 'ExactSequence([%s])' % (", ".join(repr(v)
+ for v in self.validators))
+
+
+class Unique(object):
+ """Ensure an iterable does not contain duplicate items.
+
+ Only iterables convertable to a set are supported (native types and
+ objects with correct __eq__).
+
+ JSON does not support set, so they need to be presented as arrays.
+ Unique allows ensuring that such array does not contain dupes.
+
+ >>> s = Schema(Unique())
+ >>> s([])
+ []
+ >>> s([1, 2])
+ [1, 2]
+ >>> with raises(Invalid, 'contains duplicate items: [1]'):
+ ... s([1, 1, 2])
+ >>> with raises(Invalid, "contains duplicate items: ['one']"):
+ ... s(['one', 'two', 'one'])
+ >>> with raises(Invalid, regex="^contains unhashable elements: "):
+ ... s([set([1, 2]), set([3, 4])])
+ >>> s('abc')
+ 'abc'
+ >>> with raises(Invalid, regex="^contains duplicate items: "):
+ ... s('aabbc')
+ """
+
+ def __init__(self, msg=None):
+ self.msg = msg
+
+ def __call__(self, v):
+ try:
+ set_v = set(v)
+ except TypeError as e:
+ raise TypeInvalid(
+ self.msg or 'contains unhashable elements: {0}'.format(e))
+ if len(set_v) != len(v):
+ seen = set()
+ dupes = list(set(x for x in v if x in seen or seen.add(x)))
+ raise Invalid(
+ self.msg or 'contains duplicate items: {0}'.format(dupes))
+ return v
+
+ def __repr__(self):
+ return 'Unique()'
+
+
+class Equal(object):
+ """Ensure that value matches target.
+
+ >>> s = Schema(Equal(1))
+ >>> s(1)
+ 1
+ >>> with raises(Invalid):
+ ... s(2)
+
+ Validators are not supported, match must be exact:
+
+ >>> s = Schema(Equal(str))
+ >>> with raises(Invalid):
+ ... s('foo')
+ """
+
+ def __init__(self, target, msg=None):
+ self.target = target
+ self.msg = msg
+
+ def __call__(self, v):
+ if v != self.target:
+ raise Invalid(self.msg or 'Values are not equal: value:{} != target:{}'.format(v, self.target))
+ return v
+
+ def __repr__(self):
+ return 'Equal({})'.format(self.target)
+
+
+class Unordered(object):
+ """Ensures sequence contains values in unspecified order.
+
+ >>> s = Schema(Unordered([2, 1]))
+ >>> s([2, 1])
+ [2, 1]
+ >>> s([1, 2])
+ [1, 2]
+ >>> s = Schema(Unordered([str, int]))
+ >>> s(['foo', 1])
+ ['foo', 1]
+ >>> s([1, 'foo'])
+ [1, 'foo']
+ """
+
+ def __init__(self, validators, msg=None, **kwargs):
+ self.validators = validators
+ self.msg = msg
+ self._schemas = [Schema(val, **kwargs) for val in validators]
+
+ def __call__(self, v):
+ if not isinstance(v, (list, tuple)):
+ raise Invalid(self.msg or 'Value {} is not sequence!'.format(v))
+
+ if len(v) != len(self._schemas):
+ raise Invalid(self.msg or 'List lengths differ, value:{} != target:{}'.format(len(v), len(self._schemas)))
+
+ consumed = set()
+ missing = []
+ for index, value in enumerate(v):
+ found = False
+ for i, s in enumerate(self._schemas):
+ if i in consumed:
+ continue
+ try:
+ s(value)
+ except Invalid:
+ pass
+ else:
+ found = True
+ consumed.add(i)
+ break
+ if not found:
+ missing.append((index, value))
+
+ if len(missing) == 1:
+ el = missing[0]
+ raise Invalid(self.msg or 'Element #{} ({}) is not valid against any validator'.format(el[0], el[1]))
+ elif missing:
+ raise MultipleInvalid([Invalid(self.msg or 'Element #{} ({}) is not valid against any validator'.format(
+ el[0], el[1])) for el in missing])
+ return v
+
+ def __repr__(self):
+ return 'Unordered([{}])'.format(", ".join(repr(v) for v in self.validators))
+
+
+class Number(object):
+ """
+ Verify the number of digits that are present in the number(Precision),
+ and the decimal places(Scale).
+
+ :raises Invalid: If the value does not match the provided Precision and Scale.
+
+ >>> schema = Schema(Number(precision=6, scale=2))
+ >>> schema('1234.01')
+ '1234.01'
+ >>> schema = Schema(Number(precision=6, scale=2, yield_decimal=True))
+ >>> schema('1234.01')
+ Decimal('1234.01')
+ """
+
+ def __init__(self, precision=None, scale=None, msg=None, yield_decimal=False):
+ self.precision = precision
+ self.scale = scale
+ self.msg = msg
+ self.yield_decimal = yield_decimal
+
+ def __call__(self, v):
+ """
+ :param v: is a number enclosed with string
+ :return: Decimal number
+ """
+ precision, scale, decimal_num = self._get_precision_scale(v)
+
+ if self.precision is not None and self.scale is not None and precision != self.precision\
+ and scale != self.scale:
+ raise Invalid(self.msg or "Precision must be equal to %s, and Scale must be equal to %s" % (self.precision,
+ self.scale))
+ else:
+ if self.precision is not None and precision != self.precision:
+ raise Invalid(self.msg or "Precision must be equal to %s" % self.precision)
+
+ if self.scale is not None and scale != self.scale:
+ raise Invalid(self.msg or "Scale must be equal to %s" % self.scale)
+
+ if self.yield_decimal:
+ return decimal_num
+ else:
+ return v
+
+ def __repr__(self):
+ return ('Number(precision=%s, scale=%s, msg=%s)' % (self.precision, self.scale, self.msg))
+
+ def _get_precision_scale(self, number):
+ """
+ :param number:
+ :return: tuple(precision, scale, decimal_number)
+ """
+ try:
+ decimal_num = Decimal(number)
+ except InvalidOperation:
+ raise Invalid(self.msg or 'Value must be a number enclosed with string')
+
+ return (len(decimal_num.as_tuple().digits), -(decimal_num.as_tuple().exponent), decimal_num)
+
+
+class SomeOf(_WithSubValidators):
+ """Value must pass at least some validations, determined by the given parameter.
+ Optionally, number of passed validations can be capped.
+
+ The output of each validator is passed as input to the next.
+
+ :param min_valid: Minimum number of valid schemas.
+ :param validators: List of schemas or validators to match input against.
+ :param max_valid: Maximum number of valid schemas.
+ :param msg: Message to deliver to user if validation fails.
+ :param kwargs: All other keyword arguments are passed to the sub-schema constructors.
+
+ :raises NotEnoughValid: If the minimum number of validations isn't met.
+ :raises TooManyValid: If the maximum number of validations is exceeded.
+
+ >>> validate = Schema(SomeOf(min_valid=2, validators=[Range(1, 5), Any(float, int), 6.6]))
+ >>> validate(6.6)
+ 6.6
+ >>> validate(3)
+ 3
+ >>> with raises(MultipleInvalid, 'value must be at most 5, not a valid value'):
+ ... validate(6.2)
+ """
+
+ def __init__(self, validators, min_valid=None, max_valid=None, **kwargs):
+ assert min_valid is not None or max_valid is not None, \
+ 'when using "%s" you should specify at least one of min_valid and max_valid' % (type(self).__name__,)
+ self.min_valid = min_valid or 0
+ self.max_valid = max_valid or len(validators)
+ super(SomeOf, self).__init__(*validators, **kwargs)
+
+ def _exec(self, funcs, v, path=None):
+ errors = []
+ funcs = list(funcs)
+ for func in funcs:
+ try:
+ if path is None:
+ v = func(v)
+ else:
+ v = func(path, v)
+ except Invalid as e:
+ errors.append(e)
+
+ passed_count = len(funcs) - len(errors)
+ if self.min_valid <= passed_count <= self.max_valid:
+ return v
+
+ msg = self.msg
+ if not msg:
+ msg = ', '.join(map(str, errors))
+
+ if passed_count > self.max_valid:
+ raise TooManyValid(msg)
+ raise NotEnoughValid(msg)
+
+ def __repr__(self):
+ return 'SomeOf(min_valid=%s, validators=[%s], max_valid=%s, msg=%r)' % (
+ self.min_valid, ", ".join(repr(v) for v in self.validators), self.max_valid, self.msg)
diff --git a/third_party/python/vsdownload/LICENSE.txt b/third_party/python/vsdownload/LICENSE.txt
new file mode 100644
index 0000000000..4b4682af7b
--- /dev/null
+++ b/third_party/python/vsdownload/LICENSE.txt
@@ -0,0 +1,20 @@
+The msvc-wine project - the scripts for downloading and setting up the
+toolchain - is licensed under the ISC license.
+
+This license only covers the scripts themselves. In particular, it does
+not conver the downloaded and installed tools.
+
+
+The ISC license:
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/third_party/python/vsdownload/moz.yaml b/third_party/python/vsdownload/moz.yaml
new file mode 100644
index 0000000000..9f8934c188
--- /dev/null
+++ b/third_party/python/vsdownload/moz.yaml
@@ -0,0 +1,45 @@
+# Version of this schema
+schema: 1
+
+bugzilla:
+ # Bugzilla product and component for this directory and subdirectories
+ product: "Firefox Build System"
+ component: "General"
+
+# Document the source of externally hosted code
+origin:
+
+ # Short name of the package/library
+ name: msvc-wine
+
+ description: MSVC download script
+
+ # Full URL for the package's homepage/etc
+ # Usually different from repository url
+ url: https://github.com/mstorsjo/msvc-wine/
+
+ # Human-readable identifier for this version/release
+ # Generally "version NNN", "tag SSS", "bookmark SSS"
+ release: 18102a0b3e701b43169294521d1b4dbf5b1845d4 (2023-04-13T20:31:12Z).
+
+ # Revision to pull in
+ # Must be a long or short commit SHA (long preferred)
+ revision: 18102a0b3e701b43169294521d1b4dbf5b1845d4
+
+ # The package's license, where possible using the mnemonic from
+ # https://spdx.org/licenses/
+ # Multiple licenses can be specified (as a YAML list)
+ # A "LICENSE" file must exist containing the full license text
+ license: ISC
+
+vendoring:
+ url: https://github.com/mstorsjo/msvc-wine
+ source-hosting: github
+ vendor-directory: third_party/python/vsdownload
+
+ exclude:
+ - "**"
+
+ include:
+ - vsdownload.py
+ - LICENSE.txt
diff --git a/third_party/python/vsdownload/vsdownload.py b/third_party/python/vsdownload/vsdownload.py
new file mode 100755
index 0000000000..147e29cc14
--- /dev/null
+++ b/third_party/python/vsdownload/vsdownload.py
@@ -0,0 +1,635 @@
+#!/usr/bin/python3
+#
+# Copyright (c) 2019 Martin Storsjo
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+import argparse
+import functools
+import hashlib
+import os
+import multiprocessing.pool
+try:
+ import simplejson
+except ModuleNotFoundError:
+ import json as simplejson
+import six
+import shutil
+import socket
+import subprocess
+import sys
+import tempfile
+import zipfile
+
+def getArgsParser():
+ parser = argparse.ArgumentParser(description = "Download and install Visual Studio")
+ parser.add_argument("--manifest", metavar="manifest", help="A predownloaded manifest file")
+ parser.add_argument("--save-manifest", const=True, action="store_const", help="Store the downloaded manifest to a file")
+ parser.add_argument("--major", default=17, metavar="version", help="The major version to download (defaults to 17)")
+ parser.add_argument("--preview", dest="type", default="release", const="pre", action="store_const", help="Download the preview version instead of the release version")
+ parser.add_argument("--cache", metavar="dir", help="Directory to use as a persistent cache for downloaded files")
+ parser.add_argument("--dest", metavar="dir", help="Directory to install into")
+ parser.add_argument("package", metavar="package", help="Package to install. If omitted, installs the default command line tools.", nargs="*")
+ parser.add_argument("--ignore", metavar="component", help="Package to skip", action="append")
+ parser.add_argument("--accept-license", const=True, action="store_const", help="Don't prompt for accepting the license")
+ parser.add_argument("--print-version", const=True, action="store_const", help="Stop after fetching the manifest")
+ parser.add_argument("--list-workloads", const=True, action="store_const", help="List high level workloads")
+ parser.add_argument("--list-components", const=True, action="store_const", help="List available components")
+ parser.add_argument("--list-packages", const=True, action="store_const", help="List all individual packages, regardless of type")
+ parser.add_argument("--include-optional", const=True, action="store_const", help="Include all optional dependencies")
+ parser.add_argument("--skip-recommended", const=True, action="store_const", help="Don't include recommended dependencies")
+ parser.add_argument("--print-deps-tree", const=True, action="store_const", help="Print a tree of resolved dependencies for the given selection")
+ parser.add_argument("--print-reverse-deps", const=True, action="store_const", help="Print a tree of packages that depend on the given selection")
+ parser.add_argument("--print-selection", const=True, action="store_const", help="Print a list of the individual packages that are selected to be installed")
+ parser.add_argument("--only-download", const=True, action="store_const", help="Stop after downloading package files")
+ parser.add_argument("--only-unpack", const=True, action="store_const", help="Unpack the selected packages and keep all files, in the layout they are unpacked, don't restructure and prune files other than what's needed for MSVC CLI tools")
+ parser.add_argument("--keep-unpack", const=True, action="store_const", help="Keep the unpacked files that aren't otherwise selected as needed output")
+ parser.add_argument("--msvc-version", metavar="version", help="Install a specific MSVC toolchain version")
+ parser.add_argument("--sdk-version", metavar="version", help="Install a specific Windows SDK version")
+ return parser
+
+def setPackageSelectionMSVC16(args, packages, userversion, sdk, toolversion, defaultPackages):
+ if findPackage(packages, "Microsoft.VisualStudio.Component.VC." + toolversion + ".x86.x64", None, warn=False):
+ args.package.extend(["Win10SDK_" + sdk, "Microsoft.VisualStudio.Component.VC." + toolversion + ".x86.x64", "Microsoft.VisualStudio.Component.VC." + toolversion + ".ARM", "Microsoft.VisualStudio.Component.VC." + toolversion + ".ARM64"])
+ else:
+ # Options for toolchains for specific versions. The latest version in
+ # each manifest isn't available as a pinned version though, so if that
+ # version is requested, try the default version.
+ print("Didn't find exact version packages for " + userversion + ", assuming this is provided by the default/latest version")
+ args.package.extend(defaultPackages)
+
+def setPackageSelectionMSVC15(args, packages, userversion, sdk, toolversion, defaultPackages):
+ if findPackage(packages, "Microsoft.VisualStudio.Component.VC.Tools." + toolversion, None, warn=False):
+ args.package.extend(["Win10SDK_" + sdk, "Microsoft.VisualStudio.Component.VC.Tools." + toolversion])
+ else:
+ # Options for toolchains for specific versions. The latest version in
+ # each manifest isn't available as a pinned version though, so if that
+ # version is requested, try the default version.
+ print("Didn't find exact version packages for " + userversion + ", assuming this is provided by the default/latest version")
+ args.package.extend(defaultPackages)
+
+def setPackageSelection(args, packages):
+ # If no packages are selected, install these versionless packages, which
+ # gives the latest/recommended version for the current manifest.
+ defaultPackages = ["Microsoft.VisualStudio.Workload.VCTools", "Microsoft.VisualStudio.Component.VC.Tools.ARM", "Microsoft.VisualStudio.Component.VC.Tools.ARM64"]
+
+ # Note, that in the manifest for MSVC version X.Y, only version X.Y-1
+ # exists with a package name like "Microsoft.VisualStudio.Component.VC."
+ # + toolversion + ".x86.x64".
+ if args.msvc_version == "16.0":
+ setPackageSelectionMSVC16(args, packages, args.msvc_version, "10.0.17763", "14.20", defaultPackages)
+ elif args.msvc_version == "16.1":
+ setPackageSelectionMSVC16(args, packages, args.msvc_version, "10.0.18362", "14.21", defaultPackages)
+ elif args.msvc_version == "16.2":
+ setPackageSelectionMSVC16(args, packages, args.msvc_version, "10.0.18362", "14.22", defaultPackages)
+ elif args.msvc_version == "16.3":
+ setPackageSelectionMSVC16(args, packages, args.msvc_version, "10.0.18362", "14.23", defaultPackages)
+ elif args.msvc_version == "16.4":
+ setPackageSelectionMSVC16(args, packages, args.msvc_version, "10.0.18362", "14.24", defaultPackages)
+ elif args.msvc_version == "16.5":
+ setPackageSelectionMSVC16(args, packages, args.msvc_version, "10.0.18362", "14.25", defaultPackages)
+ elif args.msvc_version == "16.6":
+ setPackageSelectionMSVC16(args, packages, args.msvc_version, "10.0.18362", "14.26", defaultPackages)
+ elif args.msvc_version == "16.7":
+ setPackageSelectionMSVC16(args, packages, args.msvc_version, "10.0.18362", "14.27", defaultPackages)
+ elif args.msvc_version == "16.8":
+ setPackageSelectionMSVC16(args, packages, args.msvc_version, "10.0.18362", "14.28", defaultPackages)
+ elif args.msvc_version == "16.9":
+ setPackageSelectionMSVC16(args, packages, args.msvc_version, "10.0.19041", "14.28.16.9", defaultPackages)
+ elif args.msvc_version == "16.10":
+ setPackageSelectionMSVC16(args, packages, args.msvc_version, "10.0.19041", "14.29.16.10", defaultPackages)
+ elif args.msvc_version == "16.11":
+ setPackageSelectionMSVC16(args, packages, args.msvc_version, "10.0.19041", "14.29.16.11", defaultPackages)
+ elif args.msvc_version == "17.0":
+ setPackageSelectionMSVC16(args, packages, args.msvc_version, "10.0.19041", "14.30.17.0", defaultPackages)
+
+ elif args.msvc_version == "15.4":
+ setPackageSelectionMSVC15(args, packages, args.msvc_version, "10.0.16299", "14.11", defaultPackages)
+ elif args.msvc_version == "15.5":
+ setPackageSelectionMSVC15(args, packages, args.msvc_version, "10.0.16299", "14.12", defaultPackages)
+ elif args.msvc_version == "15.6":
+ setPackageSelectionMSVC15(args, packages, args.msvc_version, "10.0.16299", "14.13", defaultPackages)
+ elif args.msvc_version == "15.7":
+ setPackageSelectionMSVC15(args, packages, args.msvc_version, "10.0.17134", "14.14", defaultPackages)
+ elif args.msvc_version == "15.8":
+ setPackageSelectionMSVC15(args, packages, args.msvc_version, "10.0.17134", "14.15", defaultPackages)
+ elif args.msvc_version == "15.9":
+ setPackageSelectionMSVC15(args, packages, args.msvc_version, "10.0.17763", "14.16", defaultPackages)
+ elif args.msvc_version != None:
+ print("Unsupported MSVC toolchain version " + args.msvc_version)
+ sys.exit(1)
+
+ if len(args.package) == 0:
+ args.package = defaultPackages
+
+ if args.sdk_version != None:
+ for key in packages:
+ if key.startswith("win10sdk") or key.startswith("win11sdk"):
+ base = key[0:8]
+ sdkname = base + "_" + args.sdk_version
+ if key == sdkname:
+ args.package.append(key)
+ else:
+ args.ignore.append(key)
+ p = packages[key][0]
+
+def lowercaseIgnores(args):
+ ignore = []
+ if args.ignore != None:
+ for i in args.ignore:
+ ignore.append(i.lower())
+ args.ignore = ignore
+
+def getManifest(args):
+ if args.manifest == None:
+ url = "https://aka.ms/vs/%s/%s/channel" % (args.major, args.type)
+ print("Fetching %s" % (url))
+ manifest = simplejson.loads(six.moves.urllib.request.urlopen(url).read())
+ print("Got toplevel manifest for %s" % (manifest["info"]["productDisplayVersion"]))
+ for item in manifest["channelItems"]:
+ if "type" in item and item["type"] == "Manifest":
+ args.manifest = item["payloads"][0]["url"]
+ if args.manifest == None:
+ print("Unable to find an intaller manifest!")
+ sys.exit(1)
+
+ if not args.manifest.startswith("http"):
+ args.manifest = "file:" + args.manifest
+
+ manifestdata = six.moves.urllib.request.urlopen(args.manifest).read()
+ manifest = simplejson.loads(manifestdata)
+ print("Loaded installer manifest for %s" % (manifest["info"]["productDisplayVersion"]))
+
+ if args.save_manifest:
+ filename = "%s.manifest" % (manifest["info"]["productDisplayVersion"])
+ if os.path.isfile(filename):
+ oldfile = open(filename, "rb").read()
+ if oldfile != manifestdata:
+ print("Old saved manifest in \"%s\" differs from newly downloaded one, not overwriting!" % (filename))
+ else:
+ print("Old saved manifest in \"%s\" is still current" % (filename))
+ else:
+ f = open(filename, "wb")
+ f.write(manifestdata)
+ f.close()
+ print("Saved installer manifest to \"%s\"" % (filename))
+
+ return manifest
+
+def prioritizePackage(a, b):
+ if "chip" in a and "chip" in b:
+ ax64 = a["chip"].lower() == "x64"
+ bx64 = b["chip"].lower() == "x64"
+ if ax64 and not bx64:
+ return -1
+ elif bx64 and not ax64:
+ return 1
+ if "language" in a and "language" in b:
+ aeng = a["language"].lower().startswith("en-")
+ beng = b["language"].lower().startswith("en-")
+ if aeng and not beng:
+ return -1
+ if beng and not aeng:
+ return 1
+ return 0
+
+def getPackages(manifest):
+ packages = {}
+ for p in manifest["packages"]:
+ id = p["id"].lower()
+ if not id in packages:
+ packages[id] = []
+ packages[id].append(p)
+ for key in packages:
+ packages[key] = sorted(packages[key], key=functools.cmp_to_key(prioritizePackage))
+ return packages
+
+def listPackageType(packages, type):
+ if type != None:
+ type = type.lower()
+ ids = []
+ for key in packages:
+ p = packages[key][0]
+ if type == None:
+ ids.append(p["id"])
+ elif "type" in p and p["type"].lower() == type:
+ ids.append(p["id"])
+ for id in sorted(ids):
+ print(id)
+
+def findPackage(packages, id, chip, warn=True):
+ origid = id
+ id = id.lower()
+ candidates = None
+ if not id in packages:
+ if warn:
+ print("WARNING: %s not found" % (origid))
+ return None
+ candidates = packages[id]
+ if chip != None:
+ chip = chip.lower()
+ for a in candidates:
+ if "chip" in a and a["chip"].lower() == chip:
+ return a
+ return candidates[0]
+
+def printDepends(packages, target, deptype, chip, indent, args):
+ chipstr = ""
+ if chip != None:
+ chipstr = " (" + chip + ")"
+ deptypestr = ""
+ if deptype != "":
+ deptypestr = " (" + deptype + ")"
+ ignorestr = ""
+ ignore = False
+ if target.lower() in args.ignore:
+ ignorestr = " (Ignored)"
+ ignore = True
+ print(indent + target + chipstr + deptypestr + ignorestr)
+ if deptype == "Optional" and not args.include_optional:
+ return
+ if deptype == "Recommended" and args.skip_recommended:
+ return
+ if ignore:
+ return
+ p = findPackage(packages, target, chip)
+ if p == None:
+ return
+ if "dependencies" in p:
+ deps = p["dependencies"]
+ for key in deps:
+ dep = deps[key]
+ type = ""
+ if "type" in dep:
+ type = dep["type"]
+ chip = None
+ if "chip" in dep:
+ chip = dep["chip"]
+ printDepends(packages, key, type, chip, indent + " ", args)
+
+def printReverseDepends(packages, target, deptype, indent, args):
+ deptypestr = ""
+ if deptype != "":
+ deptypestr = " (" + deptype + ")"
+ print(indent + target + deptypestr)
+ if deptype == "Optional" and not args.include_optional:
+ return
+ if deptype == "Recommended" and args.skip_recommended:
+ return
+ target = target.lower()
+ for key in packages:
+ p = packages[key][0]
+ if "dependencies" in p:
+ deps = p["dependencies"]
+ for k in deps:
+ if k.lower() != target:
+ continue
+ dep = deps[k]
+ type = ""
+ if "type" in dep:
+ type = dep["type"]
+ printReverseDepends(packages, p["id"], type, indent + " ", args)
+
+def getPackageKey(p):
+ packagekey = p["id"]
+ if "version" in p:
+ packagekey = packagekey + "-" + p["version"]
+ if "chip" in p:
+ packagekey = packagekey + "-" + p["chip"]
+ return packagekey
+
+def aggregateDepends(packages, included, target, chip, args):
+ if target.lower() in args.ignore:
+ return []
+ p = findPackage(packages, target, chip)
+ if p == None:
+ return []
+ packagekey = getPackageKey(p)
+ if packagekey in included:
+ return []
+ ret = [p]
+ included[packagekey] = True
+ if "dependencies" in p:
+ deps = p["dependencies"]
+ for key in deps:
+ dep = deps[key]
+ if "type" in dep:
+ deptype = dep["type"]
+ if deptype == "Optional" and not args.include_optional:
+ continue
+ if deptype == "Recommended" and args.skip_recommended:
+ continue
+ chip = None
+ if "chip" in dep:
+ chip = dep["chip"]
+ ret.extend(aggregateDepends(packages, included, key, chip, args))
+ return ret
+
+def getSelectedPackages(packages, args):
+ ret = []
+ included = {}
+ for i in args.package:
+ ret.extend(aggregateDepends(packages, included, i, None, args))
+ return ret
+
+def sumInstalledSize(l):
+ sum = 0
+ for p in l:
+ if "installSizes" in p:
+ sizes = p["installSizes"]
+ for location in sizes:
+ sum = sum + sizes[location]
+ return sum
+
+def sumDownloadSize(l):
+ sum = 0
+ for p in l:
+ if "payloads" in p:
+ for payload in p["payloads"]:
+ if "size" in payload:
+ sum = sum + payload["size"]
+ return sum
+
+def formatSize(s):
+ if s > 900*1024*1024:
+ return "%.1f GB" % (s/(1024*1024*1024))
+ if s > 900*1024:
+ return "%.1f MB" % (s/(1024*1024))
+ if s > 1024:
+ return "%.1f KB" % (s/1024)
+ return "%d bytes" % (s)
+
+def printPackageList(l):
+ for p in sorted(l, key=lambda p: p["id"]):
+ s = p["id"]
+ if "type" in p:
+ s = s + " (" + p["type"] + ")"
+ if "chip" in p:
+ s = s + " (" + p["chip"] + ")"
+ if "language" in p:
+ s = s + " (" + p["language"] + ")"
+ s = s + " " + formatSize(sumInstalledSize([p]))
+ print(s)
+
+def makedirs(dir):
+ try:
+ os.makedirs(dir)
+ except OSError:
+ pass
+
+def sha256File(file):
+ sha256Hash = hashlib.sha256()
+ with open(file, "rb") as f:
+ for byteBlock in iter(lambda: f.read(4096), b""):
+ sha256Hash.update(byteBlock)
+ return sha256Hash.hexdigest()
+
+def getPayloadName(payload):
+ name = payload["fileName"]
+ if "\\" in name:
+ name = name.split("\\")[-1]
+ if "/" in name:
+ name = name.split("/")[-1]
+ return name
+
+def downloadPackages(selected, cache, allowHashMismatch = False):
+ pool = multiprocessing.Pool(5)
+ tasks = []
+ makedirs(cache)
+ for p in selected:
+ if not "payloads" in p:
+ continue
+ dir = os.path.join(cache, getPackageKey(p))
+ makedirs(dir)
+ for payload in p["payloads"]:
+ name = getPayloadName(payload)
+ destname = os.path.join(dir, name)
+ fileid = os.path.join(getPackageKey(p), name)
+ args = (payload, destname, fileid, allowHashMismatch)
+ tasks.append(pool.apply_async(_downloadPayload, args))
+
+ downloaded = sum(task.get() for task in tasks)
+ pool.close()
+ print("Downloaded %s in total" % (formatSize(downloaded)))
+
+def _downloadPayload(payload, destname, fileid, allowHashMismatch):
+ attempts = 5
+ for attempt in range(attempts):
+ try:
+ if os.access(destname, os.F_OK):
+ if "sha256" in payload:
+ if sha256File(destname).lower() != payload["sha256"].lower():
+ six.print_("Incorrect existing file %s, removing" % (fileid), flush=True)
+ os.remove(destname)
+ else:
+ six.print_("Using existing file %s" % (fileid), flush=True)
+ return 0
+ else:
+ return 0
+ size = 0
+ if "size" in payload:
+ size = payload["size"]
+ six.print_("Downloading %s (%s)" % (fileid, formatSize(size)), flush=True)
+ six.moves.urllib.request.urlretrieve(payload["url"], destname)
+ if "sha256" in payload:
+ if sha256File(destname).lower() != payload["sha256"].lower():
+ if allowHashMismatch:
+ six.print_("WARNING: Incorrect hash for downloaded file %s" % (fileid), flush=True)
+ else:
+ raise Exception("Incorrect hash for downloaded file %s, aborting" % fileid)
+ return size
+ except Exception as e:
+ if attempt == attempts - 1:
+ raise
+ six.print_("%s: %s" % (type(e).__name__, e), flush=True)
+
+def mergeTrees(src, dest):
+ if not os.path.isdir(src):
+ return
+ if not os.path.isdir(dest):
+ shutil.move(src, dest)
+ return
+ names = os.listdir(src)
+ destnames = {}
+ for n in os.listdir(dest):
+ destnames[n.lower()] = n
+ for n in names:
+ srcname = os.path.join(src, n)
+ destname = os.path.join(dest, n)
+ if os.path.isdir(srcname):
+ if os.path.isdir(destname):
+ mergeTrees(srcname, destname)
+ elif n.lower() in destnames:
+ mergeTrees(srcname, os.path.join(dest, destnames[n.lower()]))
+ else:
+ shutil.move(srcname, destname)
+ else:
+ shutil.move(srcname, destname)
+
+def unzipFiltered(zip, dest):
+ tmp = os.path.join(dest, "extract")
+ for f in zip.infolist():
+ name = six.moves.urllib.parse.unquote(f.filename)
+ if "/" in name:
+ sep = name.rfind("/")
+ dir = os.path.join(dest, name[0:sep])
+ makedirs(dir)
+ extracted = zip.extract(f, tmp)
+ shutil.move(extracted, os.path.join(dest, name))
+ shutil.rmtree(tmp)
+
+def unpackVsix(file, dest, listing):
+ temp = os.path.join(dest, "vsix")
+ makedirs(temp)
+ with zipfile.ZipFile(file, 'r') as zip:
+ unzipFiltered(zip, temp)
+ with open(listing, "w") as f:
+ for n in zip.namelist():
+ f.write(n + "\n")
+ contents = os.path.join(temp, "Contents")
+ if os.access(contents, os.F_OK):
+ mergeTrees(contents, dest)
+ shutil.rmtree(temp)
+
+def unpackWin10SDK(src, payloads, dest):
+ # We could try to unpack only the MSIs we need here.
+ # Note, this extracts some files into Program Files/..., and some
+ # files directly in the root unpack directory. The files we need
+ # are under Program Files/... though.
+ for payload in payloads:
+ name = getPayloadName(payload)
+ if name.endswith(".msi"):
+ print("Extracting " + name)
+ srcfile = os.path.join(src, name)
+ if sys.platform == "win32":
+ cmd = ["msiexec", "/a", srcfile, "/qn", "TARGETDIR=" + os.path.abspath(dest)]
+ else:
+ cmd = ["msiextract", "-C", dest, srcfile]
+ with open(os.path.join(dest, "WinSDK-" + getPayloadName(payload) + "-listing.txt"), "w") as log:
+ subprocess.check_call(cmd, stdout=log)
+
+def extractPackages(selected, cache, dest):
+ makedirs(dest)
+ for p in selected:
+ type = p["type"]
+ dir = os.path.join(cache, getPackageKey(p))
+ if type == "Component" or type == "Workload" or type == "Group":
+ continue
+ if type == "Vsix":
+ print("Unpacking " + p["id"])
+ for payload in p["payloads"]:
+ unpackVsix(os.path.join(dir, getPayloadName(payload)), dest, os.path.join(dest, getPackageKey(p) + "-listing.txt"))
+ elif p["id"].startswith("Win10SDK") or p["id"].startswith("Win11SDK"):
+ print("Unpacking " + p["id"])
+ unpackWin10SDK(dir, p["payloads"], dest)
+ else:
+ print("Skipping unpacking of " + p["id"] + " of type " + type)
+
+def moveVCSDK(unpack, dest):
+ # Move the VC and Program Files\Windows Kits\10 directories
+ # out from the unpack directory, allowing the rest of unpacked
+ # files to be removed.
+ makedirs(os.path.join(dest, "kits"))
+ mergeTrees(os.path.join(unpack, "VC"), os.path.join(dest, "VC"))
+ kitsPath = unpack
+ # msiexec extracts to Windows Kits rather than Program Files\Windows Kits
+ if sys.platform != "win32":
+ kitsPath = os.path.join(kitsPath, "Program Files")
+ kitsPath = os.path.join(kitsPath, "Windows Kits", "10")
+ mergeTrees(kitsPath, os.path.join(dest, "kits", "10"))
+ # The DIA SDK isn't necessary for normal use, but can be used when e.g.
+ # compiling LLVM.
+ mergeTrees(os.path.join(unpack, "DIA SDK"), os.path.join(dest, "DIA SDK"))
+
+if __name__ == "__main__":
+ parser = getArgsParser()
+ args = parser.parse_args()
+ lowercaseIgnores(args)
+
+ socket.setdefaulttimeout(15)
+
+ packages = getPackages(getManifest(args))
+
+ if args.print_version:
+ sys.exit(0)
+
+ if not args.accept_license:
+ response = six.moves.input("Do you accept the license at " + findPackage(packages, "Microsoft.VisualStudio.Product.BuildTools", None)["localizedResources"][0]["license"] + " (yes/no)? ")
+ while response != "yes" and response != "no":
+ response = six.moves.input("Do you accept the license? Answer \"yes\" or \"no\": ")
+ if response == "no":
+ sys.exit(0)
+
+ setPackageSelection(args, packages)
+
+ if args.list_components or args.list_workloads or args.list_packages:
+ if args.list_components:
+ listPackageType(packages, "Component")
+ if args.list_workloads:
+ listPackageType(packages, "Workload")
+ if args.list_packages:
+ listPackageType(packages, None)
+ sys.exit(0)
+
+ if args.print_deps_tree:
+ for i in args.package:
+ printDepends(packages, i, "", None, "", args)
+ sys.exit(0)
+
+ if args.print_reverse_deps:
+ for i in args.package:
+ printReverseDepends(packages, i, "", "", args)
+ sys.exit(0)
+
+ selected = getSelectedPackages(packages, args)
+
+ if args.print_selection:
+ printPackageList(selected)
+
+ print("Selected %d packages, for a total download size of %s, install size of %s" % (len(selected), formatSize(sumDownloadSize(selected)), formatSize(sumInstalledSize(selected))))
+
+ if args.print_selection:
+ sys.exit(0)
+
+ tempcache = None
+ if args.cache != None:
+ cache = os.path.abspath(args.cache)
+ else:
+ cache = tempfile.mkdtemp(prefix="vsinstall-")
+ tempcache = cache
+
+ if not args.only_download and args.dest == None:
+ print("No destination directory set!")
+ sys.exit(1)
+
+ try:
+ downloadPackages(selected, cache, allowHashMismatch=args.only_download)
+ if args.only_download:
+ sys.exit(0)
+
+ dest = os.path.abspath(args.dest)
+
+ if args.only_unpack:
+ unpack = dest
+ else:
+ unpack = os.path.join(dest, "unpack")
+
+ extractPackages(selected, cache, unpack)
+
+ if not args.only_unpack:
+ moveVCSDK(unpack, dest)
+ if not args.keep_unpack:
+ shutil.rmtree(unpack)
+ finally:
+ if tempcache != None:
+ shutil.rmtree(tempcache)
diff --git a/third_party/python/wcwidth/wcwidth-0.2.5.dist-info/LICENSE b/third_party/python/wcwidth/wcwidth-0.2.5.dist-info/LICENSE
new file mode 100644
index 0000000000..a44c075724
--- /dev/null
+++ b/third_party/python/wcwidth/wcwidth-0.2.5.dist-info/LICENSE
@@ -0,0 +1,27 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Jeff Quast <contact@jeffquast.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+Markus Kuhn -- 2007-05-26 (Unicode 5.0)
+
+Permission to use, copy, modify, and distribute this software
+for any purpose and without fee is hereby granted. The author
+disclaims all warranties with regard to this software.
diff --git a/third_party/python/wcwidth/wcwidth-0.2.5.dist-info/METADATA b/third_party/python/wcwidth/wcwidth-0.2.5.dist-info/METADATA
new file mode 100644
index 0000000000..263388a061
--- /dev/null
+++ b/third_party/python/wcwidth/wcwidth-0.2.5.dist-info/METADATA
@@ -0,0 +1,309 @@
+Metadata-Version: 2.1
+Name: wcwidth
+Version: 0.2.5
+Summary: Measures the displayed width of unicode strings in a terminal
+Home-page: https://github.com/jquast/wcwidth
+Author: Jeff Quast
+Author-email: contact@jeffquast.com
+License: MIT
+Keywords: cjk,combining,console,eastasian,emojiemulator,terminal,unicode,wcswidth,wcwidth,xterm
+Platform: UNKNOWN
+Classifier: Intended Audience :: Developers
+Classifier: Natural Language :: English
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Software Development :: Localization
+Classifier: Topic :: Software Development :: Internationalization
+Classifier: Topic :: Terminals
+Requires-Dist: backports.functools-lru-cache (>=1.2.1) ; python_version < "3.2"
+
+|pypi_downloads| |codecov| |license|
+
+============
+Introduction
+============
+
+This library is mainly for CLI programs that carefully produce output for
+Terminals, or make pretend to be an emulator.
+
+**Problem Statement**: The printable length of *most* strings are equal to the
+number of cells they occupy on the screen ``1 charater : 1 cell``. However,
+there are categories of characters that *occupy 2 cells* (full-wide), and
+others that *occupy 0* cells (zero-width).
+
+**Solution**: POSIX.1-2001 and POSIX.1-2008 conforming systems provide
+`wcwidth(3)`_ and `wcswidth(3)`_ C functions of which this python module's
+functions precisely copy. *These functions return the number of cells a
+unicode string is expected to occupy.*
+
+Installation
+------------
+
+The stable version of this package is maintained on pypi, install using pip::
+
+ pip install wcwidth
+
+Example
+-------
+
+**Problem**: given the following phrase (Japanese),
+
+ >>> text = u'コンニチハ'
+
+Python **incorrectly** uses the *string length* of 5 codepoints rather than the
+*printible length* of 10 cells, so that when using the `rjust` function, the
+output length is wrong::
+
+ >>> print(len('コンニチハ'))
+ 5
+
+ >>> print('コンニチハ'.rjust(20, '_'))
+ _____コンニチハ
+
+By defining our own "rjust" function that uses wcwidth, we can correct this::
+
+ >>> def wc_rjust(text, length, padding=' '):
+ ... from wcwidth import wcswidth
+ ... return padding * max(0, (length - wcswidth(text))) + text
+ ...
+
+Our **Solution** uses wcswidth to determine the string length correctly::
+
+ >>> from wcwidth import wcswidth
+ >>> print(wcswidth('コンニチハ'))
+ 10
+
+ >>> print(wc_rjust('コンニチハ', 20, '_'))
+ __________コンニチハ
+
+
+Choosing a Version
+------------------
+
+Export an environment variable, ``UNICODE_VERSION``. This should be done by
+*terminal emulators* or those developers experimenting with authoring one of
+their own, from shell::
+
+ $ export UNICODE_VERSION=13.0
+
+If unspecified, the latest version is used. If your Terminal Emulator does not
+export this variable, you can use the `jquast/ucs-detect`_ utility to
+automatically detect and export it to your shell.
+
+wcwidth, wcswidth
+-----------------
+Use function ``wcwidth()`` to determine the length of a *single unicode
+character*, and ``wcswidth()`` to determine the length of many, a *string
+of unicode characters*.
+
+Briefly, return values of function ``wcwidth()`` are:
+
+``-1``
+ Indeterminate (not printable).
+
+``0``
+ Does not advance the cursor, such as NULL or Combining.
+
+``2``
+ Characters of category East Asian Wide (W) or East Asian
+ Full-width (F) which are displayed using two terminal cells.
+
+``1``
+ All others.
+
+Function ``wcswidth()`` simply returns the sum of all values for each character
+along a string, or ``-1`` when it occurs anywhere along a string.
+
+Full API Documentation at http://wcwidth.readthedocs.org
+
+==========
+Developing
+==========
+
+Install wcwidth in editable mode::
+
+ pip install -e.
+
+Execute unit tests using tox_::
+
+ tox
+
+Regenerate python code tables from latest Unicode Specification data files::
+
+ tox -eupdate
+
+Supplementary tools for browsing and testing terminals for wide unicode
+characters are found in the `bin/`_ of this project's source code. Just ensure
+to first ``pip install -erequirements-develop.txt`` from this projects main
+folder. For example, an interactive browser for testing::
+
+ ./bin/wcwidth-browser.py
+
+Uses
+----
+
+This library is used in:
+
+- `jquast/blessed`_: a thin, practical wrapper around terminal capabilities in
+ Python.
+
+- `jonathanslenders/python-prompt-toolkit`_: a Library for building powerful
+ interactive command lines in Python.
+
+- `dbcli/pgcli`_: Postgres CLI with autocompletion and syntax highlighting.
+
+- `thomasballinger/curtsies`_: a Curses-like terminal wrapper with a display
+ based on compositing 2d arrays of text.
+
+- `selectel/pyte`_: Simple VTXXX-compatible linux terminal emulator.
+
+- `astanin/python-tabulate`_: Pretty-print tabular data in Python, a library
+ and a command-line utility.
+
+- `LuminosoInsight/python-ftfy`_: Fixes mojibake and other glitches in Unicode
+ text.
+
+- `nbedos/termtosvg`_: Terminal recorder that renders sessions as SVG
+ animations.
+
+- `peterbrittain/asciimatics`_: Package to help people create full-screen text
+ UIs.
+
+Other Languages
+---------------
+
+- `timoxley/wcwidth`_: JavaScript
+- `janlelis/unicode-display_width`_: Ruby
+- `alecrabbit/php-wcwidth`_: PHP
+- `Text::CharWidth`_: Perl
+- `bluebear94/Terminal-WCWidth`: Perl 6
+- `mattn/go-runewidth`_: Go
+- `emugel/wcwidth`_: Haxe
+- `aperezdc/lua-wcwidth`: Lua
+- `joachimschmidt557/zig-wcwidth`: Zig
+- `fumiyas/wcwidth-cjk`: `LD_PRELOAD` override
+- `joshuarubin/wcwidth9`: Unicode version 9 in C
+
+History
+-------
+
+0.2.0 *2020-06-01*
+ * **Enhancement**: Unicode version may be selected by exporting the
+ Environment variable ``UNICODE_VERSION``, such as ``13.0``, or ``6.3.0``.
+ See the `jquast/ucs-detect`_ CLI utility for automatic detection.
+ * **Enhancement**:
+ API Documentation is published to readthedocs.org.
+ * **Updated** tables for *all* Unicode Specifications with files
+ published in a programmatically consumable format, versions 4.1.0
+ through 13.0
+ that are published
+ , versions
+
+0.1.9 *2020-03-22*
+ * **Performance** optimization by `Avram Lubkin`_, `PR #35`_.
+ * **Updated** tables to Unicode Specification 13.0.0.
+
+0.1.8 *2020-01-01*
+ * **Updated** tables to Unicode Specification 12.0.0. (`PR #30`_).
+
+0.1.7 *2016-07-01*
+ * **Updated** tables to Unicode Specification 9.0.0. (`PR #18`_).
+
+0.1.6 *2016-01-08 Production/Stable*
+ * ``LICENSE`` file now included with distribution.
+
+0.1.5 *2015-09-13 Alpha*
+ * **Bugfix**:
+ Resolution of "combining_ character width" issue, most especially
+ those that previously returned -1 now often (correctly) return 0.
+ resolved by `Philip Craig`_ via `PR #11`_.
+ * **Deprecated**:
+ The module path ``wcwidth.table_comb`` is no longer available,
+ it has been superseded by module path ``wcwidth.table_zero``.
+
+0.1.4 *2014-11-20 Pre-Alpha*
+ * **Feature**: ``wcswidth()`` now determines printable length
+ for (most) combining_ characters. The developer's tool
+ `bin/wcwidth-browser.py`_ is improved to display combining_
+ characters when provided the ``--combining`` option
+ (`Thomas Ballinger`_ and `Leta Montopoli`_ `PR #5`_).
+ * **Feature**: added static analysis (prospector_) to testing
+ framework.
+
+0.1.3 *2014-10-29 Pre-Alpha*
+ * **Bugfix**: 2nd parameter of wcswidth was not honored.
+ (`Thomas Ballinger`_, `PR #4`_).
+
+0.1.2 *2014-10-28 Pre-Alpha*
+ * **Updated** tables to Unicode Specification 7.0.0.
+ (`Thomas Ballinger`_, `PR #3`_).
+
+0.1.1 *2014-05-14 Pre-Alpha*
+ * Initial release to pypi, Based on Unicode Specification 6.3.0
+
+This code was originally derived directly from C code of the same name,
+whose latest version is available at
+http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c::
+
+ * Markus Kuhn -- 2007-05-26 (Unicode 5.0)
+ *
+ * Permission to use, copy, modify, and distribute this software
+ * for any purpose and without fee is hereby granted. The author
+ * disclaims all warranties with regard to this software.
+
+.. _`tox`: https://testrun.org/tox/latest/install.html
+.. _`prospector`: https://github.com/landscapeio/prospector
+.. _`combining`: https://en.wikipedia.org/wiki/Combining_character
+.. _`bin/`: https://github.com/jquast/wcwidth/tree/master/bin
+.. _`bin/wcwidth-browser.py`: https://github.com/jquast/wcwidth/tree/master/bin/wcwidth-browser.py
+.. _`Thomas Ballinger`: https://github.com/thomasballinger
+.. _`Leta Montopoli`: https://github.com/lmontopo
+.. _`Philip Craig`: https://github.com/philipc
+.. _`PR #3`: https://github.com/jquast/wcwidth/pull/3
+.. _`PR #4`: https://github.com/jquast/wcwidth/pull/4
+.. _`PR #5`: https://github.com/jquast/wcwidth/pull/5
+.. _`PR #11`: https://github.com/jquast/wcwidth/pull/11
+.. _`PR #18`: https://github.com/jquast/wcwidth/pull/18
+.. _`PR #30`: https://github.com/jquast/wcwidth/pull/30
+.. _`PR #35`: https://github.com/jquast/wcwidth/pull/35
+.. _`jquast/blessed`: https://github.com/jquast/blessed
+.. _`selectel/pyte`: https://github.com/selectel/pyte
+.. _`thomasballinger/curtsies`: https://github.com/thomasballinger/curtsies
+.. _`dbcli/pgcli`: https://github.com/dbcli/pgcli
+.. _`jonathanslenders/python-prompt-toolkit`: https://github.com/jonathanslenders/python-prompt-toolkit
+.. _`timoxley/wcwidth`: https://github.com/timoxley/wcwidth
+.. _`wcwidth(3)`: http://man7.org/linux/man-pages/man3/wcwidth.3.html
+.. _`wcswidth(3)`: http://man7.org/linux/man-pages/man3/wcswidth.3.html
+.. _`astanin/python-tabulate`: https://github.com/astanin/python-tabulate
+.. _`janlelis/unicode-display_width`: https://github.com/janlelis/unicode-display_width
+.. _`LuminosoInsight/python-ftfy`: https://github.com/LuminosoInsight/python-ftfy
+.. _`alecrabbit/php-wcwidth`: https://github.com/alecrabbit/php-wcwidth
+.. _`Text::CharWidth`: https://metacpan.org/pod/Text::CharWidth
+.. _`bluebear94/Terminal-WCWidth`: https://github.com/bluebear94/Terminal-WCWidth
+.. _`mattn/go-runewidth`: https://github.com/mattn/go-runewidth
+.. _`emugel/wcwidth`: https://github.com/emugel/wcwidth
+.. _`jquast/ucs-detect`: https://github.com/jquast/ucs-detect
+.. _`Avram Lubkin`: https://github.com/avylove
+.. _`nbedos/termtosvg`: https://github.com/nbedos/termtosvg
+.. _`peterbrittain/asciimatics`: https://github.com/peterbrittain/asciimatics
+.. _`aperezdc/lua-wcwidth`: https://github.com/aperezdc/lua-wcwidth
+.. _`fumiyas/wcwidth-cjk`: https://github.com/fumiyas/wcwidth-cjk
+.. |pypi_downloads| image:: https://img.shields.io/pypi/dm/wcwidth.svg?logo=pypi
+ :alt: Downloads
+ :target: https://pypi.org/project/wcwidth/
+.. |codecov| image:: https://codecov.io/gh/jquast/wcwidth/branch/master/graph/badge.svg
+ :alt: codecov.io Code Coverage
+ :target: https://codecov.io/gh/jquast/wcwidth/
+.. |license| image:: https://img.shields.io/github/license/jquast/wcwidth.svg
+ :target: https://pypi.python.org/pypi/wcwidth/
+ :alt: MIT License
+
+
diff --git a/third_party/python/wcwidth/wcwidth-0.2.5.dist-info/RECORD b/third_party/python/wcwidth/wcwidth-0.2.5.dist-info/RECORD
new file mode 100644
index 0000000000..5aa57bf71d
--- /dev/null
+++ b/third_party/python/wcwidth/wcwidth-0.2.5.dist-info/RECORD
@@ -0,0 +1,14 @@
+wcwidth/__init__.py,sha256=89fk8q2MPb4clahe_qDVIzlIa0U2872dsE4hEwxC6W8,1557
+wcwidth/table_wide.py,sha256=0yAEOGX6KIrsm5dUpzG0Jrb4s8wB6c7MC76ndXjLLj8,79422
+wcwidth/table_zero.py,sha256=w8ym8msWG1R2XhtJXRjUkT1erFIhj8d6YclXXrYoakQ,310230
+wcwidth/unicode_versions.py,sha256=pmyPU8jPCqNOjOHSVHLedKcWeW-SABSBcJ1EJemQJbo,792
+wcwidth/version.json,sha256=1zjrg0NodaudGjvcY-Eh-5JnBGKHkmVCIj2L9lN9k-U,202
+wcwidth/wcwidth.py,sha256=0bgA8U17QyvSHscy5zu1JW7rTWcEadw6HwNNbA9Phvk,14872
+wcwidth/tests/__init__.py,sha256=v_2uRwUE3hTJkuW656dLrkDiM9al7KL1j_vwGRc0RTM,42
+wcwidth/tests/test_core.py,sha256=gBaR8b3Vv2Wq1q5vGk9fAT0eW12AaEK5kuzH6fwCO-Q,3886
+wcwidth-0.2.5.dist-info/LICENSE,sha256=cLmKlaIUTrcK-AF_qMbZXOJH5AhnQ26LxknhN_4T0ho,1322
+wcwidth-0.2.5.dist-info/METADATA,sha256=K6QVxN6Eq7tMwUYeCeEgAcg0VjFqTrrUPOZL3flmEco,11257
+wcwidth-0.2.5.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
+wcwidth-0.2.5.dist-info/top_level.txt,sha256=LLjS8SFiXXuLEcD2BNdFdGhpKWe5opHtvn7KNj9AIRI,8
+wcwidth-0.2.5.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
+wcwidth-0.2.5.dist-info/RECORD,,
diff --git a/third_party/python/wcwidth/wcwidth-0.2.5.dist-info/WHEEL b/third_party/python/wcwidth/wcwidth-0.2.5.dist-info/WHEEL
new file mode 100644
index 0000000000..ef99c6cf32
--- /dev/null
+++ b/third_party/python/wcwidth/wcwidth-0.2.5.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.34.2)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/wcwidth/wcwidth-0.2.5.dist-info/top_level.txt b/third_party/python/wcwidth/wcwidth-0.2.5.dist-info/top_level.txt
new file mode 100644
index 0000000000..723a22ca00
--- /dev/null
+++ b/third_party/python/wcwidth/wcwidth-0.2.5.dist-info/top_level.txt
@@ -0,0 +1 @@
+wcwidth
diff --git a/third_party/python/wcwidth/wcwidth-0.2.5.dist-info/zip-safe b/third_party/python/wcwidth/wcwidth-0.2.5.dist-info/zip-safe
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/third_party/python/wcwidth/wcwidth-0.2.5.dist-info/zip-safe
@@ -0,0 +1 @@
+
diff --git a/third_party/python/wcwidth/wcwidth/__init__.py b/third_party/python/wcwidth/wcwidth/__init__.py
new file mode 100644
index 0000000000..a9008f8235
--- /dev/null
+++ b/third_party/python/wcwidth/wcwidth/__init__.py
@@ -0,0 +1,37 @@
+"""
+wcwidth module.
+
+https://github.com/jquast/wcwidth
+"""
+# re-export all functions & definitions, even private ones, from top-level
+# module path, to allow for 'from wcwidth import _private_func'. Of course,
+# user beware that any _private function may disappear or change signature at
+# any future version.
+
+# local
+from .wcwidth import ZERO_WIDTH # noqa
+from .wcwidth import (WIDE_EASTASIAN,
+ wcwidth,
+ wcswidth,
+ _bisearch,
+ list_versions,
+ _wcmatch_version,
+ _wcversion_value)
+
+# The __all__ attribute defines the items exported from statement,
+# 'from wcwidth import *', but also to say, "This is the public API".
+__all__ = ('wcwidth', 'wcswidth', 'list_versions')
+
+# I used to use a _get_package_version() function to use the `pkg_resources'
+# module to parse the package version from our version.json file, but this blew
+# some folks up, or more particularly, just the `xonsh' shell.
+#
+# Yikes! I always wanted to like xonsh and tried it many times but issues like
+# these always bit me, too, so I can sympathize -- this version is now manually
+# kept in sync with version.json to help them out. Shucks, this variable is just
+# for legacy, from the days before 'pip freeze' was a thing.
+#
+# We also used pkg_resources to load unicode version tables from version.json,
+# generated by bin/update-tables.py, but some environments are unable to
+# import pkg_resources for one reason or another, yikes!
+__version__ = '0.2.5'
diff --git a/third_party/python/wcwidth/wcwidth/table_wide.py b/third_party/python/wcwidth/wcwidth/table_wide.py
new file mode 100644
index 0000000000..36fb2b6048
--- /dev/null
+++ b/third_party/python/wcwidth/wcwidth/table_wide.py
@@ -0,0 +1,1102 @@
+"""Wide_Eastasian table, created by bin/update-tables.py."""
+# Generated: 2020-06-23T16:03:18.836005
+WIDE_EASTASIAN = {
+ '4.1.0': (
+ # Source: EastAsianWidth-4.1.0.txt
+ # Date: 2005-03-17, 15:21:00 PST [KW]
+ #
+ (0x01100, 0x01159,), # Hangul Choseong Kiyeok ..Hangul Choseong Yeorinhi
+ (0x0115f, 0x0115f,), # Hangul Choseong Filler ..Hangul Choseong Filler
+ (0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
+ (0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
+ (0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
+ (0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
+ (0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
+ (0x03000, 0x0303e,), # Ideographic Space ..Ideographic Variation In
+ (0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
+ (0x03099, 0x030ff,), # Combining Katakana-hirag..Katakana Digraph Koto
+ (0x03105, 0x0312c,), # Bopomofo Letter B ..Bopomofo Letter Gn
+ (0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
+ (0x03190, 0x031b7,), # Ideographic Annotation L..Bopomofo Final Letter H
+ (0x031c0, 0x031cf,), # Cjk Stroke T ..Cjk Stroke N
+ (0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
+ (0x03220, 0x03243,), # Parenthesized Ideograph ..Parenthesized Ideograph
+ (0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
+ (0x03300, 0x04db5,), # Square Apaato ..Cjk Unified Ideograph-4d
+ (0x04e00, 0x09fbb,), # Cjk Unified Ideograph-4e..Cjk Unified Ideograph-9f
+ (0x0a000, 0x0a48c,), # Yi Syllable It ..Yi Syllable Yyr
+ (0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
+ (0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
+ (0x0f900, 0x0fa2d,), # Cjk Compatibility Ideogr..Cjk Compatibility Ideogr
+ (0x0fa30, 0x0fa6a,), # Cjk Compatibility Ideogr..Cjk Compatibility Ideogr
+ (0x0fa70, 0x0fad9,), # Cjk Compatibility Ideogr..Cjk Compatibility Ideogr
+ (0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
+ (0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
+ (0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
+ (0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
+ (0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
+ (0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
+ (0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
+ (0x30000, 0x3fffd,), # (nil) ..(nil)
+ ),
+ '5.0.0': (
+ # Source: EastAsianWidth-5.0.0.txt
+ # Date: 2006-02-15, 14:39:00 PST [KW]
+ #
+ (0x01100, 0x01159,), # Hangul Choseong Kiyeok ..Hangul Choseong Yeorinhi
+ (0x0115f, 0x0115f,), # Hangul Choseong Filler ..Hangul Choseong Filler
+ (0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
+ (0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
+ (0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
+ (0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
+ (0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
+ (0x03000, 0x0303e,), # Ideographic Space ..Ideographic Variation In
+ (0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
+ (0x03099, 0x030ff,), # Combining Katakana-hirag..Katakana Digraph Koto
+ (0x03105, 0x0312c,), # Bopomofo Letter B ..Bopomofo Letter Gn
+ (0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
+ (0x03190, 0x031b7,), # Ideographic Annotation L..Bopomofo Final Letter H
+ (0x031c0, 0x031cf,), # Cjk Stroke T ..Cjk Stroke N
+ (0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
+ (0x03220, 0x03243,), # Parenthesized Ideograph ..Parenthesized Ideograph
+ (0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
+ (0x03300, 0x04db5,), # Square Apaato ..Cjk Unified Ideograph-4d
+ (0x04e00, 0x09fbb,), # Cjk Unified Ideograph-4e..Cjk Unified Ideograph-9f
+ (0x0a000, 0x0a48c,), # Yi Syllable It ..Yi Syllable Yyr
+ (0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
+ (0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
+ (0x0f900, 0x0fa2d,), # Cjk Compatibility Ideogr..Cjk Compatibility Ideogr
+ (0x0fa30, 0x0fa6a,), # Cjk Compatibility Ideogr..Cjk Compatibility Ideogr
+ (0x0fa70, 0x0fad9,), # Cjk Compatibility Ideogr..Cjk Compatibility Ideogr
+ (0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
+ (0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
+ (0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
+ (0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
+ (0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
+ (0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
+ (0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
+ (0x30000, 0x3fffd,), # (nil) ..(nil)
+ ),
+ '5.1.0': (
+ # Source: EastAsianWidth-5.1.0.txt
+ # Date: 2008-03-20, 17:42:00 PDT [KW]
+ #
+ (0x01100, 0x01159,), # Hangul Choseong Kiyeok ..Hangul Choseong Yeorinhi
+ (0x0115f, 0x0115f,), # Hangul Choseong Filler ..Hangul Choseong Filler
+ (0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
+ (0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
+ (0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
+ (0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
+ (0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
+ (0x03000, 0x0303e,), # Ideographic Space ..Ideographic Variation In
+ (0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
+ (0x03099, 0x030ff,), # Combining Katakana-hirag..Katakana Digraph Koto
+ (0x03105, 0x0312d,), # Bopomofo Letter B ..Bopomofo Letter Ih
+ (0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
+ (0x03190, 0x031b7,), # Ideographic Annotation L..Bopomofo Final Letter H
+ (0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
+ (0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
+ (0x03220, 0x03243,), # Parenthesized Ideograph ..Parenthesized Ideograph
+ (0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
+ (0x03300, 0x04db5,), # Square Apaato ..Cjk Unified Ideograph-4d
+ (0x04e00, 0x09fc3,), # Cjk Unified Ideograph-4e..Cjk Unified Ideograph-9f
+ (0x0a000, 0x0a48c,), # Yi Syllable It ..Yi Syllable Yyr
+ (0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
+ (0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
+ (0x0f900, 0x0fa2d,), # Cjk Compatibility Ideogr..Cjk Compatibility Ideogr
+ (0x0fa30, 0x0fa6a,), # Cjk Compatibility Ideogr..Cjk Compatibility Ideogr
+ (0x0fa70, 0x0fad9,), # Cjk Compatibility Ideogr..Cjk Compatibility Ideogr
+ (0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
+ (0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
+ (0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
+ (0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
+ (0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
+ (0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
+ (0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
+ (0x30000, 0x3fffd,), # (nil) ..(nil)
+ ),
+ '5.2.0': (
+ # Source: EastAsianWidth-5.2.0.txt
+ # Date: 2009-06-09, 17:47:00 PDT [KW]
+ #
+ (0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
+ (0x011a3, 0x011a7,), # Hangul Jungseong A-eu ..Hangul Jungseong O-yae
+ (0x011fa, 0x011ff,), # Hangul Jongseong Kiyeok-..Hangul Jongseong Ssangni
+ (0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
+ (0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
+ (0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
+ (0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
+ (0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
+ (0x03000, 0x0303e,), # Ideographic Space ..Ideographic Variation In
+ (0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
+ (0x03099, 0x030ff,), # Combining Katakana-hirag..Katakana Digraph Koto
+ (0x03105, 0x0312d,), # Bopomofo Letter B ..Bopomofo Letter Ih
+ (0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
+ (0x03190, 0x031b7,), # Ideographic Annotation L..Bopomofo Final Letter H
+ (0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
+ (0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
+ (0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
+ (0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
+ (0x03300, 0x04dbf,), # Square Apaato ..(nil)
+ (0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
+ (0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
+ (0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
+ (0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
+ (0x0d7b0, 0x0d7c6,), # Hangul Jungseong O-yeo ..Hangul Jungseong Araea-e
+ (0x0d7cb, 0x0d7fb,), # Hangul Jongseong Nieun-r..Hangul Jongseong Phieuph
+ (0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
+ (0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
+ (0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
+ (0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
+ (0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
+ (0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
+ (0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
+ (0x1f200, 0x1f200,), # Square Hiragana Hoka ..Square Hiragana Hoka
+ (0x1f210, 0x1f231,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
+ (0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
+ (0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
+ (0x30000, 0x3fffd,), # (nil) ..(nil)
+ ),
+ '6.0.0': (
+ # Source: EastAsianWidth-6.0.0.txt
+ # Date: 2010-08-17, 12:17:00 PDT [KW]
+ #
+ (0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
+ (0x011a3, 0x011a7,), # Hangul Jungseong A-eu ..Hangul Jungseong O-yae
+ (0x011fa, 0x011ff,), # Hangul Jongseong Kiyeok-..Hangul Jongseong Ssangni
+ (0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
+ (0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
+ (0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
+ (0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
+ (0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
+ (0x03000, 0x0303e,), # Ideographic Space ..Ideographic Variation In
+ (0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
+ (0x03099, 0x030ff,), # Combining Katakana-hirag..Katakana Digraph Koto
+ (0x03105, 0x0312d,), # Bopomofo Letter B ..Bopomofo Letter Ih
+ (0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
+ (0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
+ (0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
+ (0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
+ (0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
+ (0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
+ (0x03300, 0x04dbf,), # Square Apaato ..(nil)
+ (0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
+ (0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
+ (0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
+ (0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
+ (0x0d7b0, 0x0d7c6,), # Hangul Jungseong O-yeo ..Hangul Jungseong Araea-e
+ (0x0d7cb, 0x0d7fb,), # Hangul Jongseong Nieun-r..Hangul Jongseong Phieuph
+ (0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
+ (0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
+ (0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
+ (0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
+ (0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
+ (0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
+ (0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
+ (0x1b000, 0x1b001,), # Katakana Letter Archaic ..Hiragana Letter Archaic
+ (0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
+ (0x1f210, 0x1f23a,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
+ (0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
+ (0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
+ (0x20000, 0x2f73f,), # Cjk Unified Ideograph-20..(nil)
+ (0x2b740, 0x2fffd,), # Cjk Unified Ideograph-2b..(nil)
+ (0x30000, 0x3fffd,), # (nil) ..(nil)
+ ),
+ '6.1.0': (
+ # Source: EastAsianWidth-6.1.0.txt
+ # Date: 2011-09-19, 18:46:00 GMT [KW]
+ #
+ (0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
+ (0x011a3, 0x011a7,), # Hangul Jungseong A-eu ..Hangul Jungseong O-yae
+ (0x011fa, 0x011ff,), # Hangul Jongseong Kiyeok-..Hangul Jongseong Ssangni
+ (0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
+ (0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
+ (0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
+ (0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
+ (0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
+ (0x03000, 0x0303e,), # Ideographic Space ..Ideographic Variation In
+ (0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
+ (0x03099, 0x030ff,), # Combining Katakana-hirag..Katakana Digraph Koto
+ (0x03105, 0x0312d,), # Bopomofo Letter B ..Bopomofo Letter Ih
+ (0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
+ (0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
+ (0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
+ (0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
+ (0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
+ (0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
+ (0x03300, 0x04dbf,), # Square Apaato ..(nil)
+ (0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
+ (0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
+ (0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
+ (0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
+ (0x0d7b0, 0x0d7c6,), # Hangul Jungseong O-yeo ..Hangul Jungseong Araea-e
+ (0x0d7cb, 0x0d7fb,), # Hangul Jongseong Nieun-r..Hangul Jongseong Phieuph
+ (0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
+ (0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
+ (0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
+ (0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
+ (0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
+ (0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
+ (0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
+ (0x1b000, 0x1b001,), # Katakana Letter Archaic ..Hiragana Letter Archaic
+ (0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
+ (0x1f210, 0x1f23a,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
+ (0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
+ (0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
+ (0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
+ (0x30000, 0x3fffd,), # (nil) ..(nil)
+ ),
+ '6.2.0': (
+ # Source: EastAsianWidth-6.2.0.txt
+ # Date: 2012-05-15, 18:30:00 GMT [KW]
+ #
+ (0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
+ (0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
+ (0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
+ (0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
+ (0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
+ (0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
+ (0x03000, 0x0303e,), # Ideographic Space ..Ideographic Variation In
+ (0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
+ (0x03099, 0x030ff,), # Combining Katakana-hirag..Katakana Digraph Koto
+ (0x03105, 0x0312d,), # Bopomofo Letter B ..Bopomofo Letter Ih
+ (0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
+ (0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
+ (0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
+ (0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
+ (0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
+ (0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
+ (0x03300, 0x04dbf,), # Square Apaato ..(nil)
+ (0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
+ (0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
+ (0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
+ (0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
+ (0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
+ (0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
+ (0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
+ (0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
+ (0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
+ (0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
+ (0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
+ (0x1b000, 0x1b001,), # Katakana Letter Archaic ..Hiragana Letter Archaic
+ (0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
+ (0x1f210, 0x1f23a,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
+ (0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
+ (0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
+ (0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
+ (0x30000, 0x3fffd,), # (nil) ..(nil)
+ ),
+ '6.3.0': (
+ # Source: EastAsianWidth-6.3.0.txt
+ # Date: 2013-02-05, 20:09:00 GMT [KW, LI]
+ #
+ (0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
+ (0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
+ (0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
+ (0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
+ (0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
+ (0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
+ (0x03000, 0x0303e,), # Ideographic Space ..Ideographic Variation In
+ (0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
+ (0x03099, 0x030ff,), # Combining Katakana-hirag..Katakana Digraph Koto
+ (0x03105, 0x0312d,), # Bopomofo Letter B ..Bopomofo Letter Ih
+ (0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
+ (0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
+ (0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
+ (0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
+ (0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
+ (0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
+ (0x03300, 0x04dbf,), # Square Apaato ..(nil)
+ (0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
+ (0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
+ (0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
+ (0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
+ (0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
+ (0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
+ (0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
+ (0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
+ (0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
+ (0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
+ (0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
+ (0x1b000, 0x1b001,), # Katakana Letter Archaic ..Hiragana Letter Archaic
+ (0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
+ (0x1f210, 0x1f23a,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
+ (0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
+ (0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
+ (0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
+ (0x30000, 0x3fffd,), # (nil) ..(nil)
+ ),
+ '7.0.0': (
+ # Source: EastAsianWidth-7.0.0.txt
+ # Date: 2014-02-28, 23:15:00 GMT [KW, LI]
+ #
+ (0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
+ (0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
+ (0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
+ (0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
+ (0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
+ (0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
+ (0x03000, 0x0303e,), # Ideographic Space ..Ideographic Variation In
+ (0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
+ (0x03099, 0x030ff,), # Combining Katakana-hirag..Katakana Digraph Koto
+ (0x03105, 0x0312d,), # Bopomofo Letter B ..Bopomofo Letter Ih
+ (0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
+ (0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
+ (0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
+ (0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
+ (0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
+ (0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
+ (0x03300, 0x04dbf,), # Square Apaato ..(nil)
+ (0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
+ (0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
+ (0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
+ (0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
+ (0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
+ (0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
+ (0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
+ (0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
+ (0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
+ (0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
+ (0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
+ (0x1b000, 0x1b001,), # Katakana Letter Archaic ..Hiragana Letter Archaic
+ (0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
+ (0x1f210, 0x1f23a,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
+ (0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
+ (0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
+ (0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
+ (0x30000, 0x3fffd,), # (nil) ..(nil)
+ ),
+ '8.0.0': (
+ # Source: EastAsianWidth-8.0.0.txt
+ # Date: 2015-02-10, 21:00:00 GMT [KW, LI]
+ #
+ (0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
+ (0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
+ (0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
+ (0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
+ (0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
+ (0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
+ (0x03000, 0x0303e,), # Ideographic Space ..Ideographic Variation In
+ (0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
+ (0x03099, 0x030ff,), # Combining Katakana-hirag..Katakana Digraph Koto
+ (0x03105, 0x0312d,), # Bopomofo Letter B ..Bopomofo Letter Ih
+ (0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
+ (0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
+ (0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
+ (0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
+ (0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
+ (0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
+ (0x03300, 0x04dbf,), # Square Apaato ..(nil)
+ (0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
+ (0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
+ (0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
+ (0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
+ (0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
+ (0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
+ (0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
+ (0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
+ (0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
+ (0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
+ (0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
+ (0x1b000, 0x1b001,), # Katakana Letter Archaic ..Hiragana Letter Archaic
+ (0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
+ (0x1f210, 0x1f23a,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
+ (0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
+ (0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
+ (0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
+ (0x30000, 0x3fffd,), # (nil) ..(nil)
+ ),
+ '9.0.0': (
+ # Source: EastAsianWidth-9.0.0.txt
+ # Date: 2016-05-27, 17:00:00 GMT [KW, LI]
+ #
+ (0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
+ (0x0231a, 0x0231b,), # Watch ..Hourglass
+ (0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
+ (0x023e9, 0x023ec,), # Black Right-pointing Dou..Black Down-pointing Doub
+ (0x023f0, 0x023f0,), # Alarm Clock ..Alarm Clock
+ (0x023f3, 0x023f3,), # Hourglass With Flowing S..Hourglass With Flowing S
+ (0x025fd, 0x025fe,), # White Medium Small Squar..Black Medium Small Squar
+ (0x02614, 0x02615,), # Umbrella With Rain Drops..Hot Beverage
+ (0x02648, 0x02653,), # Aries ..Pisces
+ (0x0267f, 0x0267f,), # Wheelchair Symbol ..Wheelchair Symbol
+ (0x02693, 0x02693,), # Anchor ..Anchor
+ (0x026a1, 0x026a1,), # High Voltage Sign ..High Voltage Sign
+ (0x026aa, 0x026ab,), # Medium White Circle ..Medium Black Circle
+ (0x026bd, 0x026be,), # Soccer Ball ..Baseball
+ (0x026c4, 0x026c5,), # Snowman Without Snow ..Sun Behind Cloud
+ (0x026ce, 0x026ce,), # Ophiuchus ..Ophiuchus
+ (0x026d4, 0x026d4,), # No Entry ..No Entry
+ (0x026ea, 0x026ea,), # Church ..Church
+ (0x026f2, 0x026f3,), # Fountain ..Flag In Hole
+ (0x026f5, 0x026f5,), # Sailboat ..Sailboat
+ (0x026fa, 0x026fa,), # Tent ..Tent
+ (0x026fd, 0x026fd,), # Fuel Pump ..Fuel Pump
+ (0x02705, 0x02705,), # White Heavy Check Mark ..White Heavy Check Mark
+ (0x0270a, 0x0270b,), # Raised Fist ..Raised Hand
+ (0x02728, 0x02728,), # Sparkles ..Sparkles
+ (0x0274c, 0x0274c,), # Cross Mark ..Cross Mark
+ (0x0274e, 0x0274e,), # Negative Squared Cross M..Negative Squared Cross M
+ (0x02753, 0x02755,), # Black Question Mark Orna..White Exclamation Mark O
+ (0x02757, 0x02757,), # Heavy Exclamation Mark S..Heavy Exclamation Mark S
+ (0x02795, 0x02797,), # Heavy Plus Sign ..Heavy Division Sign
+ (0x027b0, 0x027b0,), # Curly Loop ..Curly Loop
+ (0x027bf, 0x027bf,), # Double Curly Loop ..Double Curly Loop
+ (0x02b1b, 0x02b1c,), # Black Large Square ..White Large Square
+ (0x02b50, 0x02b50,), # White Medium Star ..White Medium Star
+ (0x02b55, 0x02b55,), # Heavy Large Circle ..Heavy Large Circle
+ (0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
+ (0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
+ (0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
+ (0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
+ (0x03000, 0x0303e,), # Ideographic Space ..Ideographic Variation In
+ (0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
+ (0x03099, 0x030ff,), # Combining Katakana-hirag..Katakana Digraph Koto
+ (0x03105, 0x0312d,), # Bopomofo Letter B ..Bopomofo Letter Ih
+ (0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
+ (0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
+ (0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
+ (0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
+ (0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
+ (0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
+ (0x03300, 0x04dbf,), # Square Apaato ..(nil)
+ (0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
+ (0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
+ (0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
+ (0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
+ (0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
+ (0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
+ (0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
+ (0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
+ (0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
+ (0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
+ (0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
+ (0x16fe0, 0x16fe0,), # Tangut Iteration Mark ..Tangut Iteration Mark
+ (0x17000, 0x187ec,), # (nil) ..(nil)
+ (0x18800, 0x18af2,), # Tangut Component-001 ..Tangut Component-755
+ (0x1b000, 0x1b001,), # Katakana Letter Archaic ..Hiragana Letter Archaic
+ (0x1f004, 0x1f004,), # Mahjong Tile Red Dragon ..Mahjong Tile Red Dragon
+ (0x1f0cf, 0x1f0cf,), # Playing Card Black Joker..Playing Card Black Joker
+ (0x1f18e, 0x1f18e,), # Negative Squared Ab ..Negative Squared Ab
+ (0x1f191, 0x1f19a,), # Squared Cl ..Squared Vs
+ (0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
+ (0x1f210, 0x1f23b,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
+ (0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
+ (0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
+ (0x1f300, 0x1f320,), # Cyclone ..Shooting Star
+ (0x1f32d, 0x1f335,), # Hot Dog ..Cactus
+ (0x1f337, 0x1f37c,), # Tulip ..Baby Bottle
+ (0x1f37e, 0x1f393,), # Bottle With Popping Cork..Graduation Cap
+ (0x1f3a0, 0x1f3ca,), # Carousel Horse ..Swimmer
+ (0x1f3cf, 0x1f3d3,), # Cricket Bat And Ball ..Table Tennis Paddle And
+ (0x1f3e0, 0x1f3f0,), # House Building ..European Castle
+ (0x1f3f4, 0x1f3f4,), # Waving Black Flag ..Waving Black Flag
+ (0x1f3f8, 0x1f43e,), # Badminton Racquet And Sh..Paw Prints
+ (0x1f440, 0x1f440,), # Eyes ..Eyes
+ (0x1f442, 0x1f4fc,), # Ear ..Videocassette
+ (0x1f4ff, 0x1f53d,), # Prayer Beads ..Down-pointing Small Red
+ (0x1f54b, 0x1f54e,), # Kaaba ..Menorah With Nine Branch
+ (0x1f550, 0x1f567,), # Clock Face One Oclock ..Clock Face Twelve-thirty
+ (0x1f57a, 0x1f57a,), # Man Dancing ..Man Dancing
+ (0x1f595, 0x1f596,), # Reversed Hand With Middl..Raised Hand With Part Be
+ (0x1f5a4, 0x1f5a4,), # Black Heart ..Black Heart
+ (0x1f5fb, 0x1f64f,), # Mount Fuji ..Person With Folded Hands
+ (0x1f680, 0x1f6c5,), # Rocket ..Left Luggage
+ (0x1f6cc, 0x1f6cc,), # Sleeping Accommodation ..Sleeping Accommodation
+ (0x1f6d0, 0x1f6d2,), # Place Of Worship ..Shopping Trolley
+ (0x1f6eb, 0x1f6ec,), # Airplane Departure ..Airplane Arriving
+ (0x1f6f4, 0x1f6f6,), # Scooter ..Canoe
+ (0x1f910, 0x1f91e,), # Zipper-mouth Face ..Hand With Index And Midd
+ (0x1f920, 0x1f927,), # Face With Cowboy Hat ..Sneezing Face
+ (0x1f930, 0x1f930,), # Pregnant Woman ..Pregnant Woman
+ (0x1f933, 0x1f93e,), # Selfie ..Handball
+ (0x1f940, 0x1f94b,), # Wilted Flower ..Martial Arts Uniform
+ (0x1f950, 0x1f95e,), # Croissant ..Pancakes
+ (0x1f980, 0x1f991,), # Crab ..Squid
+ (0x1f9c0, 0x1f9c0,), # Cheese Wedge ..Cheese Wedge
+ (0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
+ (0x30000, 0x3fffd,), # (nil) ..(nil)
+ ),
+ '10.0.0': (
+ # Source: EastAsianWidth-10.0.0.txt
+ # Date: 2017-03-08, 02:00:00 GMT [KW, LI]
+ #
+ (0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
+ (0x0231a, 0x0231b,), # Watch ..Hourglass
+ (0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
+ (0x023e9, 0x023ec,), # Black Right-pointing Dou..Black Down-pointing Doub
+ (0x023f0, 0x023f0,), # Alarm Clock ..Alarm Clock
+ (0x023f3, 0x023f3,), # Hourglass With Flowing S..Hourglass With Flowing S
+ (0x025fd, 0x025fe,), # White Medium Small Squar..Black Medium Small Squar
+ (0x02614, 0x02615,), # Umbrella With Rain Drops..Hot Beverage
+ (0x02648, 0x02653,), # Aries ..Pisces
+ (0x0267f, 0x0267f,), # Wheelchair Symbol ..Wheelchair Symbol
+ (0x02693, 0x02693,), # Anchor ..Anchor
+ (0x026a1, 0x026a1,), # High Voltage Sign ..High Voltage Sign
+ (0x026aa, 0x026ab,), # Medium White Circle ..Medium Black Circle
+ (0x026bd, 0x026be,), # Soccer Ball ..Baseball
+ (0x026c4, 0x026c5,), # Snowman Without Snow ..Sun Behind Cloud
+ (0x026ce, 0x026ce,), # Ophiuchus ..Ophiuchus
+ (0x026d4, 0x026d4,), # No Entry ..No Entry
+ (0x026ea, 0x026ea,), # Church ..Church
+ (0x026f2, 0x026f3,), # Fountain ..Flag In Hole
+ (0x026f5, 0x026f5,), # Sailboat ..Sailboat
+ (0x026fa, 0x026fa,), # Tent ..Tent
+ (0x026fd, 0x026fd,), # Fuel Pump ..Fuel Pump
+ (0x02705, 0x02705,), # White Heavy Check Mark ..White Heavy Check Mark
+ (0x0270a, 0x0270b,), # Raised Fist ..Raised Hand
+ (0x02728, 0x02728,), # Sparkles ..Sparkles
+ (0x0274c, 0x0274c,), # Cross Mark ..Cross Mark
+ (0x0274e, 0x0274e,), # Negative Squared Cross M..Negative Squared Cross M
+ (0x02753, 0x02755,), # Black Question Mark Orna..White Exclamation Mark O
+ (0x02757, 0x02757,), # Heavy Exclamation Mark S..Heavy Exclamation Mark S
+ (0x02795, 0x02797,), # Heavy Plus Sign ..Heavy Division Sign
+ (0x027b0, 0x027b0,), # Curly Loop ..Curly Loop
+ (0x027bf, 0x027bf,), # Double Curly Loop ..Double Curly Loop
+ (0x02b1b, 0x02b1c,), # Black Large Square ..White Large Square
+ (0x02b50, 0x02b50,), # White Medium Star ..White Medium Star
+ (0x02b55, 0x02b55,), # Heavy Large Circle ..Heavy Large Circle
+ (0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
+ (0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
+ (0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
+ (0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
+ (0x03000, 0x0303e,), # Ideographic Space ..Ideographic Variation In
+ (0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
+ (0x03099, 0x030ff,), # Combining Katakana-hirag..Katakana Digraph Koto
+ (0x03105, 0x0312e,), # Bopomofo Letter B ..Bopomofo Letter O With D
+ (0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
+ (0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
+ (0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
+ (0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
+ (0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
+ (0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
+ (0x03300, 0x04dbf,), # Square Apaato ..(nil)
+ (0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
+ (0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
+ (0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
+ (0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
+ (0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
+ (0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
+ (0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
+ (0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
+ (0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
+ (0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
+ (0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
+ (0x16fe0, 0x16fe1,), # Tangut Iteration Mark ..Nushu Iteration Mark
+ (0x17000, 0x187ec,), # (nil) ..(nil)
+ (0x18800, 0x18af2,), # Tangut Component-001 ..Tangut Component-755
+ (0x1b000, 0x1b11e,), # Katakana Letter Archaic ..Hentaigana Letter N-mu-m
+ (0x1b170, 0x1b2fb,), # Nushu Character-1b170 ..Nushu Character-1b2fb
+ (0x1f004, 0x1f004,), # Mahjong Tile Red Dragon ..Mahjong Tile Red Dragon
+ (0x1f0cf, 0x1f0cf,), # Playing Card Black Joker..Playing Card Black Joker
+ (0x1f18e, 0x1f18e,), # Negative Squared Ab ..Negative Squared Ab
+ (0x1f191, 0x1f19a,), # Squared Cl ..Squared Vs
+ (0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
+ (0x1f210, 0x1f23b,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
+ (0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
+ (0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
+ (0x1f260, 0x1f265,), # Rounded Symbol For Fu ..Rounded Symbol For Cai
+ (0x1f300, 0x1f320,), # Cyclone ..Shooting Star
+ (0x1f32d, 0x1f335,), # Hot Dog ..Cactus
+ (0x1f337, 0x1f37c,), # Tulip ..Baby Bottle
+ (0x1f37e, 0x1f393,), # Bottle With Popping Cork..Graduation Cap
+ (0x1f3a0, 0x1f3ca,), # Carousel Horse ..Swimmer
+ (0x1f3cf, 0x1f3d3,), # Cricket Bat And Ball ..Table Tennis Paddle And
+ (0x1f3e0, 0x1f3f0,), # House Building ..European Castle
+ (0x1f3f4, 0x1f3f4,), # Waving Black Flag ..Waving Black Flag
+ (0x1f3f8, 0x1f43e,), # Badminton Racquet And Sh..Paw Prints
+ (0x1f440, 0x1f440,), # Eyes ..Eyes
+ (0x1f442, 0x1f4fc,), # Ear ..Videocassette
+ (0x1f4ff, 0x1f53d,), # Prayer Beads ..Down-pointing Small Red
+ (0x1f54b, 0x1f54e,), # Kaaba ..Menorah With Nine Branch
+ (0x1f550, 0x1f567,), # Clock Face One Oclock ..Clock Face Twelve-thirty
+ (0x1f57a, 0x1f57a,), # Man Dancing ..Man Dancing
+ (0x1f595, 0x1f596,), # Reversed Hand With Middl..Raised Hand With Part Be
+ (0x1f5a4, 0x1f5a4,), # Black Heart ..Black Heart
+ (0x1f5fb, 0x1f64f,), # Mount Fuji ..Person With Folded Hands
+ (0x1f680, 0x1f6c5,), # Rocket ..Left Luggage
+ (0x1f6cc, 0x1f6cc,), # Sleeping Accommodation ..Sleeping Accommodation
+ (0x1f6d0, 0x1f6d2,), # Place Of Worship ..Shopping Trolley
+ (0x1f6eb, 0x1f6ec,), # Airplane Departure ..Airplane Arriving
+ (0x1f6f4, 0x1f6f8,), # Scooter ..Flying Saucer
+ (0x1f910, 0x1f93e,), # Zipper-mouth Face ..Handball
+ (0x1f940, 0x1f94c,), # Wilted Flower ..Curling Stone
+ (0x1f950, 0x1f96b,), # Croissant ..Canned Food
+ (0x1f980, 0x1f997,), # Crab ..Cricket
+ (0x1f9c0, 0x1f9c0,), # Cheese Wedge ..Cheese Wedge
+ (0x1f9d0, 0x1f9e6,), # Face With Monocle ..Socks
+ (0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
+ (0x30000, 0x3fffd,), # (nil) ..(nil)
+ ),
+ '11.0.0': (
+ # Source: EastAsianWidth-11.0.0.txt
+ # Date: 2018-05-14, 09:41:59 GMT [KW, LI]
+ #
+ (0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
+ (0x0231a, 0x0231b,), # Watch ..Hourglass
+ (0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
+ (0x023e9, 0x023ec,), # Black Right-pointing Dou..Black Down-pointing Doub
+ (0x023f0, 0x023f0,), # Alarm Clock ..Alarm Clock
+ (0x023f3, 0x023f3,), # Hourglass With Flowing S..Hourglass With Flowing S
+ (0x025fd, 0x025fe,), # White Medium Small Squar..Black Medium Small Squar
+ (0x02614, 0x02615,), # Umbrella With Rain Drops..Hot Beverage
+ (0x02648, 0x02653,), # Aries ..Pisces
+ (0x0267f, 0x0267f,), # Wheelchair Symbol ..Wheelchair Symbol
+ (0x02693, 0x02693,), # Anchor ..Anchor
+ (0x026a1, 0x026a1,), # High Voltage Sign ..High Voltage Sign
+ (0x026aa, 0x026ab,), # Medium White Circle ..Medium Black Circle
+ (0x026bd, 0x026be,), # Soccer Ball ..Baseball
+ (0x026c4, 0x026c5,), # Snowman Without Snow ..Sun Behind Cloud
+ (0x026ce, 0x026ce,), # Ophiuchus ..Ophiuchus
+ (0x026d4, 0x026d4,), # No Entry ..No Entry
+ (0x026ea, 0x026ea,), # Church ..Church
+ (0x026f2, 0x026f3,), # Fountain ..Flag In Hole
+ (0x026f5, 0x026f5,), # Sailboat ..Sailboat
+ (0x026fa, 0x026fa,), # Tent ..Tent
+ (0x026fd, 0x026fd,), # Fuel Pump ..Fuel Pump
+ (0x02705, 0x02705,), # White Heavy Check Mark ..White Heavy Check Mark
+ (0x0270a, 0x0270b,), # Raised Fist ..Raised Hand
+ (0x02728, 0x02728,), # Sparkles ..Sparkles
+ (0x0274c, 0x0274c,), # Cross Mark ..Cross Mark
+ (0x0274e, 0x0274e,), # Negative Squared Cross M..Negative Squared Cross M
+ (0x02753, 0x02755,), # Black Question Mark Orna..White Exclamation Mark O
+ (0x02757, 0x02757,), # Heavy Exclamation Mark S..Heavy Exclamation Mark S
+ (0x02795, 0x02797,), # Heavy Plus Sign ..Heavy Division Sign
+ (0x027b0, 0x027b0,), # Curly Loop ..Curly Loop
+ (0x027bf, 0x027bf,), # Double Curly Loop ..Double Curly Loop
+ (0x02b1b, 0x02b1c,), # Black Large Square ..White Large Square
+ (0x02b50, 0x02b50,), # White Medium Star ..White Medium Star
+ (0x02b55, 0x02b55,), # Heavy Large Circle ..Heavy Large Circle
+ (0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
+ (0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
+ (0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
+ (0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
+ (0x03000, 0x0303e,), # Ideographic Space ..Ideographic Variation In
+ (0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
+ (0x03099, 0x030ff,), # Combining Katakana-hirag..Katakana Digraph Koto
+ (0x03105, 0x0312f,), # Bopomofo Letter B ..Bopomofo Letter Nn
+ (0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
+ (0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
+ (0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
+ (0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
+ (0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
+ (0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
+ (0x03300, 0x04dbf,), # Square Apaato ..(nil)
+ (0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
+ (0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
+ (0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
+ (0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
+ (0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
+ (0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
+ (0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
+ (0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
+ (0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
+ (0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
+ (0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
+ (0x16fe0, 0x16fe1,), # Tangut Iteration Mark ..Nushu Iteration Mark
+ (0x17000, 0x187f1,), # (nil) ..(nil)
+ (0x18800, 0x18af2,), # Tangut Component-001 ..Tangut Component-755
+ (0x1b000, 0x1b11e,), # Katakana Letter Archaic ..Hentaigana Letter N-mu-m
+ (0x1b170, 0x1b2fb,), # Nushu Character-1b170 ..Nushu Character-1b2fb
+ (0x1f004, 0x1f004,), # Mahjong Tile Red Dragon ..Mahjong Tile Red Dragon
+ (0x1f0cf, 0x1f0cf,), # Playing Card Black Joker..Playing Card Black Joker
+ (0x1f18e, 0x1f18e,), # Negative Squared Ab ..Negative Squared Ab
+ (0x1f191, 0x1f19a,), # Squared Cl ..Squared Vs
+ (0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
+ (0x1f210, 0x1f23b,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
+ (0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
+ (0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
+ (0x1f260, 0x1f265,), # Rounded Symbol For Fu ..Rounded Symbol For Cai
+ (0x1f300, 0x1f320,), # Cyclone ..Shooting Star
+ (0x1f32d, 0x1f335,), # Hot Dog ..Cactus
+ (0x1f337, 0x1f37c,), # Tulip ..Baby Bottle
+ (0x1f37e, 0x1f393,), # Bottle With Popping Cork..Graduation Cap
+ (0x1f3a0, 0x1f3ca,), # Carousel Horse ..Swimmer
+ (0x1f3cf, 0x1f3d3,), # Cricket Bat And Ball ..Table Tennis Paddle And
+ (0x1f3e0, 0x1f3f0,), # House Building ..European Castle
+ (0x1f3f4, 0x1f3f4,), # Waving Black Flag ..Waving Black Flag
+ (0x1f3f8, 0x1f43e,), # Badminton Racquet And Sh..Paw Prints
+ (0x1f440, 0x1f440,), # Eyes ..Eyes
+ (0x1f442, 0x1f4fc,), # Ear ..Videocassette
+ (0x1f4ff, 0x1f53d,), # Prayer Beads ..Down-pointing Small Red
+ (0x1f54b, 0x1f54e,), # Kaaba ..Menorah With Nine Branch
+ (0x1f550, 0x1f567,), # Clock Face One Oclock ..Clock Face Twelve-thirty
+ (0x1f57a, 0x1f57a,), # Man Dancing ..Man Dancing
+ (0x1f595, 0x1f596,), # Reversed Hand With Middl..Raised Hand With Part Be
+ (0x1f5a4, 0x1f5a4,), # Black Heart ..Black Heart
+ (0x1f5fb, 0x1f64f,), # Mount Fuji ..Person With Folded Hands
+ (0x1f680, 0x1f6c5,), # Rocket ..Left Luggage
+ (0x1f6cc, 0x1f6cc,), # Sleeping Accommodation ..Sleeping Accommodation
+ (0x1f6d0, 0x1f6d2,), # Place Of Worship ..Shopping Trolley
+ (0x1f6eb, 0x1f6ec,), # Airplane Departure ..Airplane Arriving
+ (0x1f6f4, 0x1f6f9,), # Scooter ..Skateboard
+ (0x1f910, 0x1f93e,), # Zipper-mouth Face ..Handball
+ (0x1f940, 0x1f970,), # Wilted Flower ..Smiling Face With Smilin
+ (0x1f973, 0x1f976,), # Face With Party Horn And..Freezing Face
+ (0x1f97a, 0x1f97a,), # Face With Pleading Eyes ..Face With Pleading Eyes
+ (0x1f97c, 0x1f9a2,), # Lab Coat ..Swan
+ (0x1f9b0, 0x1f9b9,), # Emoji Component Red Hair..Supervillain
+ (0x1f9c0, 0x1f9c2,), # Cheese Wedge ..Salt Shaker
+ (0x1f9d0, 0x1f9ff,), # Face With Monocle ..Nazar Amulet
+ (0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
+ (0x30000, 0x3fffd,), # (nil) ..(nil)
+ ),
+ '12.0.0': (
+ # Source: EastAsianWidth-12.0.0.txt
+ # Date: 2019-01-21, 14:12:58 GMT [KW, LI]
+ #
+ (0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
+ (0x0231a, 0x0231b,), # Watch ..Hourglass
+ (0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
+ (0x023e9, 0x023ec,), # Black Right-pointing Dou..Black Down-pointing Doub
+ (0x023f0, 0x023f0,), # Alarm Clock ..Alarm Clock
+ (0x023f3, 0x023f3,), # Hourglass With Flowing S..Hourglass With Flowing S
+ (0x025fd, 0x025fe,), # White Medium Small Squar..Black Medium Small Squar
+ (0x02614, 0x02615,), # Umbrella With Rain Drops..Hot Beverage
+ (0x02648, 0x02653,), # Aries ..Pisces
+ (0x0267f, 0x0267f,), # Wheelchair Symbol ..Wheelchair Symbol
+ (0x02693, 0x02693,), # Anchor ..Anchor
+ (0x026a1, 0x026a1,), # High Voltage Sign ..High Voltage Sign
+ (0x026aa, 0x026ab,), # Medium White Circle ..Medium Black Circle
+ (0x026bd, 0x026be,), # Soccer Ball ..Baseball
+ (0x026c4, 0x026c5,), # Snowman Without Snow ..Sun Behind Cloud
+ (0x026ce, 0x026ce,), # Ophiuchus ..Ophiuchus
+ (0x026d4, 0x026d4,), # No Entry ..No Entry
+ (0x026ea, 0x026ea,), # Church ..Church
+ (0x026f2, 0x026f3,), # Fountain ..Flag In Hole
+ (0x026f5, 0x026f5,), # Sailboat ..Sailboat
+ (0x026fa, 0x026fa,), # Tent ..Tent
+ (0x026fd, 0x026fd,), # Fuel Pump ..Fuel Pump
+ (0x02705, 0x02705,), # White Heavy Check Mark ..White Heavy Check Mark
+ (0x0270a, 0x0270b,), # Raised Fist ..Raised Hand
+ (0x02728, 0x02728,), # Sparkles ..Sparkles
+ (0x0274c, 0x0274c,), # Cross Mark ..Cross Mark
+ (0x0274e, 0x0274e,), # Negative Squared Cross M..Negative Squared Cross M
+ (0x02753, 0x02755,), # Black Question Mark Orna..White Exclamation Mark O
+ (0x02757, 0x02757,), # Heavy Exclamation Mark S..Heavy Exclamation Mark S
+ (0x02795, 0x02797,), # Heavy Plus Sign ..Heavy Division Sign
+ (0x027b0, 0x027b0,), # Curly Loop ..Curly Loop
+ (0x027bf, 0x027bf,), # Double Curly Loop ..Double Curly Loop
+ (0x02b1b, 0x02b1c,), # Black Large Square ..White Large Square
+ (0x02b50, 0x02b50,), # White Medium Star ..White Medium Star
+ (0x02b55, 0x02b55,), # Heavy Large Circle ..Heavy Large Circle
+ (0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
+ (0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
+ (0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
+ (0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
+ (0x03000, 0x0303e,), # Ideographic Space ..Ideographic Variation In
+ (0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
+ (0x03099, 0x030ff,), # Combining Katakana-hirag..Katakana Digraph Koto
+ (0x03105, 0x0312f,), # Bopomofo Letter B ..Bopomofo Letter Nn
+ (0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
+ (0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
+ (0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
+ (0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
+ (0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
+ (0x03250, 0x032fe,), # Partnership Sign ..Circled Katakana Wo
+ (0x03300, 0x04dbf,), # Square Apaato ..(nil)
+ (0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
+ (0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
+ (0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
+ (0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
+ (0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
+ (0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
+ (0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
+ (0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
+ (0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
+ (0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
+ (0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
+ (0x16fe0, 0x16fe3,), # Tangut Iteration Mark ..Old Chinese Iteration Ma
+ (0x17000, 0x187f7,), # (nil) ..(nil)
+ (0x18800, 0x18af2,), # Tangut Component-001 ..Tangut Component-755
+ (0x1b000, 0x1b11e,), # Katakana Letter Archaic ..Hentaigana Letter N-mu-m
+ (0x1b150, 0x1b152,), # Hiragana Letter Small Wi..Hiragana Letter Small Wo
+ (0x1b164, 0x1b167,), # Katakana Letter Small Wi..Katakana Letter Small N
+ (0x1b170, 0x1b2fb,), # Nushu Character-1b170 ..Nushu Character-1b2fb
+ (0x1f004, 0x1f004,), # Mahjong Tile Red Dragon ..Mahjong Tile Red Dragon
+ (0x1f0cf, 0x1f0cf,), # Playing Card Black Joker..Playing Card Black Joker
+ (0x1f18e, 0x1f18e,), # Negative Squared Ab ..Negative Squared Ab
+ (0x1f191, 0x1f19a,), # Squared Cl ..Squared Vs
+ (0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
+ (0x1f210, 0x1f23b,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
+ (0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
+ (0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
+ (0x1f260, 0x1f265,), # Rounded Symbol For Fu ..Rounded Symbol For Cai
+ (0x1f300, 0x1f320,), # Cyclone ..Shooting Star
+ (0x1f32d, 0x1f335,), # Hot Dog ..Cactus
+ (0x1f337, 0x1f37c,), # Tulip ..Baby Bottle
+ (0x1f37e, 0x1f393,), # Bottle With Popping Cork..Graduation Cap
+ (0x1f3a0, 0x1f3ca,), # Carousel Horse ..Swimmer
+ (0x1f3cf, 0x1f3d3,), # Cricket Bat And Ball ..Table Tennis Paddle And
+ (0x1f3e0, 0x1f3f0,), # House Building ..European Castle
+ (0x1f3f4, 0x1f3f4,), # Waving Black Flag ..Waving Black Flag
+ (0x1f3f8, 0x1f43e,), # Badminton Racquet And Sh..Paw Prints
+ (0x1f440, 0x1f440,), # Eyes ..Eyes
+ (0x1f442, 0x1f4fc,), # Ear ..Videocassette
+ (0x1f4ff, 0x1f53d,), # Prayer Beads ..Down-pointing Small Red
+ (0x1f54b, 0x1f54e,), # Kaaba ..Menorah With Nine Branch
+ (0x1f550, 0x1f567,), # Clock Face One Oclock ..Clock Face Twelve-thirty
+ (0x1f57a, 0x1f57a,), # Man Dancing ..Man Dancing
+ (0x1f595, 0x1f596,), # Reversed Hand With Middl..Raised Hand With Part Be
+ (0x1f5a4, 0x1f5a4,), # Black Heart ..Black Heart
+ (0x1f5fb, 0x1f64f,), # Mount Fuji ..Person With Folded Hands
+ (0x1f680, 0x1f6c5,), # Rocket ..Left Luggage
+ (0x1f6cc, 0x1f6cc,), # Sleeping Accommodation ..Sleeping Accommodation
+ (0x1f6d0, 0x1f6d2,), # Place Of Worship ..Shopping Trolley
+ (0x1f6d5, 0x1f6d5,), # Hindu Temple ..Hindu Temple
+ (0x1f6eb, 0x1f6ec,), # Airplane Departure ..Airplane Arriving
+ (0x1f6f4, 0x1f6fa,), # Scooter ..Auto Rickshaw
+ (0x1f7e0, 0x1f7eb,), # Large Orange Circle ..Large Brown Square
+ (0x1f90d, 0x1f971,), # White Heart ..Yawning Face
+ (0x1f973, 0x1f976,), # Face With Party Horn And..Freezing Face
+ (0x1f97a, 0x1f9a2,), # Face With Pleading Eyes ..Swan
+ (0x1f9a5, 0x1f9aa,), # Sloth ..Oyster
+ (0x1f9ae, 0x1f9ca,), # Guide Dog ..Ice Cube
+ (0x1f9cd, 0x1f9ff,), # Standing Person ..Nazar Amulet
+ (0x1fa70, 0x1fa73,), # Ballet Shoes ..Shorts
+ (0x1fa78, 0x1fa7a,), # Drop Of Blood ..Stethoscope
+ (0x1fa80, 0x1fa82,), # Yo-yo ..Parachute
+ (0x1fa90, 0x1fa95,), # Ringed Planet ..Banjo
+ (0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
+ (0x30000, 0x3fffd,), # (nil) ..(nil)
+ ),
+ '12.1.0': (
+ # Source: EastAsianWidth-12.1.0.txt
+ # Date: 2019-03-31, 22:01:58 GMT [KW, LI]
+ #
+ (0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
+ (0x0231a, 0x0231b,), # Watch ..Hourglass
+ (0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
+ (0x023e9, 0x023ec,), # Black Right-pointing Dou..Black Down-pointing Doub
+ (0x023f0, 0x023f0,), # Alarm Clock ..Alarm Clock
+ (0x023f3, 0x023f3,), # Hourglass With Flowing S..Hourglass With Flowing S
+ (0x025fd, 0x025fe,), # White Medium Small Squar..Black Medium Small Squar
+ (0x02614, 0x02615,), # Umbrella With Rain Drops..Hot Beverage
+ (0x02648, 0x02653,), # Aries ..Pisces
+ (0x0267f, 0x0267f,), # Wheelchair Symbol ..Wheelchair Symbol
+ (0x02693, 0x02693,), # Anchor ..Anchor
+ (0x026a1, 0x026a1,), # High Voltage Sign ..High Voltage Sign
+ (0x026aa, 0x026ab,), # Medium White Circle ..Medium Black Circle
+ (0x026bd, 0x026be,), # Soccer Ball ..Baseball
+ (0x026c4, 0x026c5,), # Snowman Without Snow ..Sun Behind Cloud
+ (0x026ce, 0x026ce,), # Ophiuchus ..Ophiuchus
+ (0x026d4, 0x026d4,), # No Entry ..No Entry
+ (0x026ea, 0x026ea,), # Church ..Church
+ (0x026f2, 0x026f3,), # Fountain ..Flag In Hole
+ (0x026f5, 0x026f5,), # Sailboat ..Sailboat
+ (0x026fa, 0x026fa,), # Tent ..Tent
+ (0x026fd, 0x026fd,), # Fuel Pump ..Fuel Pump
+ (0x02705, 0x02705,), # White Heavy Check Mark ..White Heavy Check Mark
+ (0x0270a, 0x0270b,), # Raised Fist ..Raised Hand
+ (0x02728, 0x02728,), # Sparkles ..Sparkles
+ (0x0274c, 0x0274c,), # Cross Mark ..Cross Mark
+ (0x0274e, 0x0274e,), # Negative Squared Cross M..Negative Squared Cross M
+ (0x02753, 0x02755,), # Black Question Mark Orna..White Exclamation Mark O
+ (0x02757, 0x02757,), # Heavy Exclamation Mark S..Heavy Exclamation Mark S
+ (0x02795, 0x02797,), # Heavy Plus Sign ..Heavy Division Sign
+ (0x027b0, 0x027b0,), # Curly Loop ..Curly Loop
+ (0x027bf, 0x027bf,), # Double Curly Loop ..Double Curly Loop
+ (0x02b1b, 0x02b1c,), # Black Large Square ..White Large Square
+ (0x02b50, 0x02b50,), # White Medium Star ..White Medium Star
+ (0x02b55, 0x02b55,), # Heavy Large Circle ..Heavy Large Circle
+ (0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
+ (0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
+ (0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
+ (0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
+ (0x03000, 0x0303e,), # Ideographic Space ..Ideographic Variation In
+ (0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
+ (0x03099, 0x030ff,), # Combining Katakana-hirag..Katakana Digraph Koto
+ (0x03105, 0x0312f,), # Bopomofo Letter B ..Bopomofo Letter Nn
+ (0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
+ (0x03190, 0x031ba,), # Ideographic Annotation L..Bopomofo Letter Zy
+ (0x031c0, 0x031e3,), # Cjk Stroke T ..Cjk Stroke Q
+ (0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
+ (0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
+ (0x03250, 0x04dbf,), # Partnership Sign ..(nil)
+ (0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
+ (0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
+ (0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
+ (0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
+ (0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
+ (0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
+ (0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
+ (0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
+ (0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
+ (0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
+ (0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
+ (0x16fe0, 0x16fe3,), # Tangut Iteration Mark ..Old Chinese Iteration Ma
+ (0x17000, 0x187f7,), # (nil) ..(nil)
+ (0x18800, 0x18af2,), # Tangut Component-001 ..Tangut Component-755
+ (0x1b000, 0x1b11e,), # Katakana Letter Archaic ..Hentaigana Letter N-mu-m
+ (0x1b150, 0x1b152,), # Hiragana Letter Small Wi..Hiragana Letter Small Wo
+ (0x1b164, 0x1b167,), # Katakana Letter Small Wi..Katakana Letter Small N
+ (0x1b170, 0x1b2fb,), # Nushu Character-1b170 ..Nushu Character-1b2fb
+ (0x1f004, 0x1f004,), # Mahjong Tile Red Dragon ..Mahjong Tile Red Dragon
+ (0x1f0cf, 0x1f0cf,), # Playing Card Black Joker..Playing Card Black Joker
+ (0x1f18e, 0x1f18e,), # Negative Squared Ab ..Negative Squared Ab
+ (0x1f191, 0x1f19a,), # Squared Cl ..Squared Vs
+ (0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
+ (0x1f210, 0x1f23b,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
+ (0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
+ (0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
+ (0x1f260, 0x1f265,), # Rounded Symbol For Fu ..Rounded Symbol For Cai
+ (0x1f300, 0x1f320,), # Cyclone ..Shooting Star
+ (0x1f32d, 0x1f335,), # Hot Dog ..Cactus
+ (0x1f337, 0x1f37c,), # Tulip ..Baby Bottle
+ (0x1f37e, 0x1f393,), # Bottle With Popping Cork..Graduation Cap
+ (0x1f3a0, 0x1f3ca,), # Carousel Horse ..Swimmer
+ (0x1f3cf, 0x1f3d3,), # Cricket Bat And Ball ..Table Tennis Paddle And
+ (0x1f3e0, 0x1f3f0,), # House Building ..European Castle
+ (0x1f3f4, 0x1f3f4,), # Waving Black Flag ..Waving Black Flag
+ (0x1f3f8, 0x1f43e,), # Badminton Racquet And Sh..Paw Prints
+ (0x1f440, 0x1f440,), # Eyes ..Eyes
+ (0x1f442, 0x1f4fc,), # Ear ..Videocassette
+ (0x1f4ff, 0x1f53d,), # Prayer Beads ..Down-pointing Small Red
+ (0x1f54b, 0x1f54e,), # Kaaba ..Menorah With Nine Branch
+ (0x1f550, 0x1f567,), # Clock Face One Oclock ..Clock Face Twelve-thirty
+ (0x1f57a, 0x1f57a,), # Man Dancing ..Man Dancing
+ (0x1f595, 0x1f596,), # Reversed Hand With Middl..Raised Hand With Part Be
+ (0x1f5a4, 0x1f5a4,), # Black Heart ..Black Heart
+ (0x1f5fb, 0x1f64f,), # Mount Fuji ..Person With Folded Hands
+ (0x1f680, 0x1f6c5,), # Rocket ..Left Luggage
+ (0x1f6cc, 0x1f6cc,), # Sleeping Accommodation ..Sleeping Accommodation
+ (0x1f6d0, 0x1f6d2,), # Place Of Worship ..Shopping Trolley
+ (0x1f6d5, 0x1f6d5,), # Hindu Temple ..Hindu Temple
+ (0x1f6eb, 0x1f6ec,), # Airplane Departure ..Airplane Arriving
+ (0x1f6f4, 0x1f6fa,), # Scooter ..Auto Rickshaw
+ (0x1f7e0, 0x1f7eb,), # Large Orange Circle ..Large Brown Square
+ (0x1f90d, 0x1f971,), # White Heart ..Yawning Face
+ (0x1f973, 0x1f976,), # Face With Party Horn And..Freezing Face
+ (0x1f97a, 0x1f9a2,), # Face With Pleading Eyes ..Swan
+ (0x1f9a5, 0x1f9aa,), # Sloth ..Oyster
+ (0x1f9ae, 0x1f9ca,), # Guide Dog ..Ice Cube
+ (0x1f9cd, 0x1f9ff,), # Standing Person ..Nazar Amulet
+ (0x1fa70, 0x1fa73,), # Ballet Shoes ..Shorts
+ (0x1fa78, 0x1fa7a,), # Drop Of Blood ..Stethoscope
+ (0x1fa80, 0x1fa82,), # Yo-yo ..Parachute
+ (0x1fa90, 0x1fa95,), # Ringed Planet ..Banjo
+ (0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
+ (0x30000, 0x3fffd,), # (nil) ..(nil)
+ ),
+ '13.0.0': (
+ # Source: EastAsianWidth-13.0.0.txt
+ # Date: 2029-01-21, 18:14:00 GMT [KW, LI]
+ #
+ (0x01100, 0x0115f,), # Hangul Choseong Kiyeok ..Hangul Choseong Filler
+ (0x0231a, 0x0231b,), # Watch ..Hourglass
+ (0x02329, 0x0232a,), # Left-pointing Angle Brac..Right-pointing Angle Bra
+ (0x023e9, 0x023ec,), # Black Right-pointing Dou..Black Down-pointing Doub
+ (0x023f0, 0x023f0,), # Alarm Clock ..Alarm Clock
+ (0x023f3, 0x023f3,), # Hourglass With Flowing S..Hourglass With Flowing S
+ (0x025fd, 0x025fe,), # White Medium Small Squar..Black Medium Small Squar
+ (0x02614, 0x02615,), # Umbrella With Rain Drops..Hot Beverage
+ (0x02648, 0x02653,), # Aries ..Pisces
+ (0x0267f, 0x0267f,), # Wheelchair Symbol ..Wheelchair Symbol
+ (0x02693, 0x02693,), # Anchor ..Anchor
+ (0x026a1, 0x026a1,), # High Voltage Sign ..High Voltage Sign
+ (0x026aa, 0x026ab,), # Medium White Circle ..Medium Black Circle
+ (0x026bd, 0x026be,), # Soccer Ball ..Baseball
+ (0x026c4, 0x026c5,), # Snowman Without Snow ..Sun Behind Cloud
+ (0x026ce, 0x026ce,), # Ophiuchus ..Ophiuchus
+ (0x026d4, 0x026d4,), # No Entry ..No Entry
+ (0x026ea, 0x026ea,), # Church ..Church
+ (0x026f2, 0x026f3,), # Fountain ..Flag In Hole
+ (0x026f5, 0x026f5,), # Sailboat ..Sailboat
+ (0x026fa, 0x026fa,), # Tent ..Tent
+ (0x026fd, 0x026fd,), # Fuel Pump ..Fuel Pump
+ (0x02705, 0x02705,), # White Heavy Check Mark ..White Heavy Check Mark
+ (0x0270a, 0x0270b,), # Raised Fist ..Raised Hand
+ (0x02728, 0x02728,), # Sparkles ..Sparkles
+ (0x0274c, 0x0274c,), # Cross Mark ..Cross Mark
+ (0x0274e, 0x0274e,), # Negative Squared Cross M..Negative Squared Cross M
+ (0x02753, 0x02755,), # Black Question Mark Orna..White Exclamation Mark O
+ (0x02757, 0x02757,), # Heavy Exclamation Mark S..Heavy Exclamation Mark S
+ (0x02795, 0x02797,), # Heavy Plus Sign ..Heavy Division Sign
+ (0x027b0, 0x027b0,), # Curly Loop ..Curly Loop
+ (0x027bf, 0x027bf,), # Double Curly Loop ..Double Curly Loop
+ (0x02b1b, 0x02b1c,), # Black Large Square ..White Large Square
+ (0x02b50, 0x02b50,), # White Medium Star ..White Medium Star
+ (0x02b55, 0x02b55,), # Heavy Large Circle ..Heavy Large Circle
+ (0x02e80, 0x02e99,), # Cjk Radical Repeat ..Cjk Radical Rap
+ (0x02e9b, 0x02ef3,), # Cjk Radical Choke ..Cjk Radical C-simplified
+ (0x02f00, 0x02fd5,), # Kangxi Radical One ..Kangxi Radical Flute
+ (0x02ff0, 0x02ffb,), # Ideographic Description ..Ideographic Description
+ (0x03000, 0x0303e,), # Ideographic Space ..Ideographic Variation In
+ (0x03041, 0x03096,), # Hiragana Letter Small A ..Hiragana Letter Small Ke
+ (0x03099, 0x030ff,), # Combining Katakana-hirag..Katakana Digraph Koto
+ (0x03105, 0x0312f,), # Bopomofo Letter B ..Bopomofo Letter Nn
+ (0x03131, 0x0318e,), # Hangul Letter Kiyeok ..Hangul Letter Araeae
+ (0x03190, 0x031e3,), # Ideographic Annotation L..Cjk Stroke Q
+ (0x031f0, 0x0321e,), # Katakana Letter Small Ku..Parenthesized Korean Cha
+ (0x03220, 0x03247,), # Parenthesized Ideograph ..Circled Ideograph Koto
+ (0x03250, 0x04dbf,), # Partnership Sign ..(nil)
+ (0x04e00, 0x0a48c,), # Cjk Unified Ideograph-4e..Yi Syllable Yyr
+ (0x0a490, 0x0a4c6,), # Yi Radical Qot ..Yi Radical Ke
+ (0x0a960, 0x0a97c,), # Hangul Choseong Tikeut-m..Hangul Choseong Ssangyeo
+ (0x0ac00, 0x0d7a3,), # Hangul Syllable Ga ..Hangul Syllable Hih
+ (0x0f900, 0x0faff,), # Cjk Compatibility Ideogr..(nil)
+ (0x0fe10, 0x0fe19,), # Presentation Form For Ve..Presentation Form For Ve
+ (0x0fe30, 0x0fe52,), # Presentation Form For Ve..Small Full Stop
+ (0x0fe54, 0x0fe66,), # Small Semicolon ..Small Equals Sign
+ (0x0fe68, 0x0fe6b,), # Small Reverse Solidus ..Small Commercial At
+ (0x0ff01, 0x0ff60,), # Fullwidth Exclamation Ma..Fullwidth Right White Pa
+ (0x0ffe0, 0x0ffe6,), # Fullwidth Cent Sign ..Fullwidth Won Sign
+ (0x16fe0, 0x16fe4,), # Tangut Iteration Mark ..(nil)
+ (0x16ff0, 0x16ff1,), # (nil) ..(nil)
+ (0x17000, 0x187f7,), # (nil) ..(nil)
+ (0x18800, 0x18cd5,), # Tangut Component-001 ..(nil)
+ (0x18d00, 0x18d08,), # (nil) ..(nil)
+ (0x1b000, 0x1b11e,), # Katakana Letter Archaic ..Hentaigana Letter N-mu-m
+ (0x1b150, 0x1b152,), # Hiragana Letter Small Wi..Hiragana Letter Small Wo
+ (0x1b164, 0x1b167,), # Katakana Letter Small Wi..Katakana Letter Small N
+ (0x1b170, 0x1b2fb,), # Nushu Character-1b170 ..Nushu Character-1b2fb
+ (0x1f004, 0x1f004,), # Mahjong Tile Red Dragon ..Mahjong Tile Red Dragon
+ (0x1f0cf, 0x1f0cf,), # Playing Card Black Joker..Playing Card Black Joker
+ (0x1f18e, 0x1f18e,), # Negative Squared Ab ..Negative Squared Ab
+ (0x1f191, 0x1f19a,), # Squared Cl ..Squared Vs
+ (0x1f200, 0x1f202,), # Square Hiragana Hoka ..Squared Katakana Sa
+ (0x1f210, 0x1f23b,), # Squared Cjk Unified Ideo..Squared Cjk Unified Ideo
+ (0x1f240, 0x1f248,), # Tortoise Shell Bracketed..Tortoise Shell Bracketed
+ (0x1f250, 0x1f251,), # Circled Ideograph Advant..Circled Ideograph Accept
+ (0x1f260, 0x1f265,), # Rounded Symbol For Fu ..Rounded Symbol For Cai
+ (0x1f300, 0x1f320,), # Cyclone ..Shooting Star
+ (0x1f32d, 0x1f335,), # Hot Dog ..Cactus
+ (0x1f337, 0x1f37c,), # Tulip ..Baby Bottle
+ (0x1f37e, 0x1f393,), # Bottle With Popping Cork..Graduation Cap
+ (0x1f3a0, 0x1f3ca,), # Carousel Horse ..Swimmer
+ (0x1f3cf, 0x1f3d3,), # Cricket Bat And Ball ..Table Tennis Paddle And
+ (0x1f3e0, 0x1f3f0,), # House Building ..European Castle
+ (0x1f3f4, 0x1f3f4,), # Waving Black Flag ..Waving Black Flag
+ (0x1f3f8, 0x1f43e,), # Badminton Racquet And Sh..Paw Prints
+ (0x1f440, 0x1f440,), # Eyes ..Eyes
+ (0x1f442, 0x1f4fc,), # Ear ..Videocassette
+ (0x1f4ff, 0x1f53d,), # Prayer Beads ..Down-pointing Small Red
+ (0x1f54b, 0x1f54e,), # Kaaba ..Menorah With Nine Branch
+ (0x1f550, 0x1f567,), # Clock Face One Oclock ..Clock Face Twelve-thirty
+ (0x1f57a, 0x1f57a,), # Man Dancing ..Man Dancing
+ (0x1f595, 0x1f596,), # Reversed Hand With Middl..Raised Hand With Part Be
+ (0x1f5a4, 0x1f5a4,), # Black Heart ..Black Heart
+ (0x1f5fb, 0x1f64f,), # Mount Fuji ..Person With Folded Hands
+ (0x1f680, 0x1f6c5,), # Rocket ..Left Luggage
+ (0x1f6cc, 0x1f6cc,), # Sleeping Accommodation ..Sleeping Accommodation
+ (0x1f6d0, 0x1f6d2,), # Place Of Worship ..Shopping Trolley
+ (0x1f6d5, 0x1f6d7,), # Hindu Temple ..(nil)
+ (0x1f6eb, 0x1f6ec,), # Airplane Departure ..Airplane Arriving
+ (0x1f6f4, 0x1f6fc,), # Scooter ..(nil)
+ (0x1f7e0, 0x1f7eb,), # Large Orange Circle ..Large Brown Square
+ (0x1f90c, 0x1f93a,), # (nil) ..Fencer
+ (0x1f93c, 0x1f945,), # Wrestlers ..Goal Net
+ (0x1f947, 0x1f978,), # First Place Medal ..(nil)
+ (0x1f97a, 0x1f9cb,), # Face With Pleading Eyes ..(nil)
+ (0x1f9cd, 0x1f9ff,), # Standing Person ..Nazar Amulet
+ (0x1fa70, 0x1fa74,), # Ballet Shoes ..(nil)
+ (0x1fa78, 0x1fa7a,), # Drop Of Blood ..Stethoscope
+ (0x1fa80, 0x1fa86,), # Yo-yo ..(nil)
+ (0x1fa90, 0x1faa8,), # Ringed Planet ..(nil)
+ (0x1fab0, 0x1fab6,), # (nil) ..(nil)
+ (0x1fac0, 0x1fac2,), # (nil) ..(nil)
+ (0x1fad0, 0x1fad6,), # (nil) ..(nil)
+ (0x20000, 0x2fffd,), # Cjk Unified Ideograph-20..(nil)
+ (0x30000, 0x3fffd,), # (nil) ..(nil)
+ ),
+}
diff --git a/third_party/python/wcwidth/wcwidth/table_zero.py b/third_party/python/wcwidth/wcwidth/table_zero.py
new file mode 100644
index 0000000000..ab5150f517
--- /dev/null
+++ b/third_party/python/wcwidth/wcwidth/table_zero.py
@@ -0,0 +1,3910 @@
+"""Zero_Width table, created by bin/update-tables.py."""
+# Generated: 2020-06-23T16:03:21.187024
+ZERO_WIDTH = {
+ '4.1.0': (
+ # Source: DerivedGeneralCategory-4.1.0.txt
+ # Date: 2005-02-26, 02:35:50 GMT [MD]
+ #
+ (0x00300, 0x0036f,), # Combining Grave Accent ..Combining Latin Small Le
+ (0x00483, 0x00486,), # Combining Cyrillic Titlo..Combining Cyrillic Psili
+ (0x00488, 0x00489,), # Combining Cyrillic Hundr..Combining Cyrillic Milli
+ (0x00591, 0x005b9,), # Hebrew Accent Etnahta ..Hebrew Point Holam
+ (0x005bb, 0x005bd,), # Hebrew Point Qubuts ..Hebrew Point Meteg
+ (0x005bf, 0x005bf,), # Hebrew Point Rafe ..Hebrew Point Rafe
+ (0x005c1, 0x005c2,), # Hebrew Point Shin Dot ..Hebrew Point Sin Dot
+ (0x005c4, 0x005c5,), # Hebrew Mark Upper Dot ..Hebrew Mark Lower Dot
+ (0x005c7, 0x005c7,), # Hebrew Point Qamats Qata..Hebrew Point Qamats Qata
+ (0x00610, 0x00615,), # Arabic Sign Sallallahou ..Arabic Small High Tah
+ (0x0064b, 0x0065e,), # Arabic Fathatan ..Arabic Fatha With Two Do
+ (0x00670, 0x00670,), # Arabic Letter Superscrip..Arabic Letter Superscrip
+ (0x006d6, 0x006dc,), # Arabic Small High Ligatu..Arabic Small High Seen
+ (0x006de, 0x006e4,), # Arabic Start Of Rub El H..Arabic Small High Madda
+ (0x006e7, 0x006e8,), # Arabic Small High Yeh ..Arabic Small High Noon
+ (0x006ea, 0x006ed,), # Arabic Empty Centre Low ..Arabic Small Low Meem
+ (0x00711, 0x00711,), # Syriac Letter Superscrip..Syriac Letter Superscrip
+ (0x00730, 0x0074a,), # Syriac Pthaha Above ..Syriac Barrekh
+ (0x007a6, 0x007b0,), # Thaana Abafili ..Thaana Sukun
+ (0x00901, 0x00902,), # Devanagari Sign Candrabi..Devanagari Sign Anusvara
+ (0x0093c, 0x0093c,), # Devanagari Sign Nukta ..Devanagari Sign Nukta
+ (0x00941, 0x00948,), # Devanagari Vowel Sign U ..Devanagari Vowel Sign Ai
+ (0x0094d, 0x0094d,), # Devanagari Sign Virama ..Devanagari Sign Virama
+ (0x00951, 0x00954,), # Devanagari Stress Sign U..Devanagari Acute Accent
+ (0x00962, 0x00963,), # Devanagari Vowel Sign Vo..Devanagari Vowel Sign Vo
+ (0x00981, 0x00981,), # Bengali Sign Candrabindu..Bengali Sign Candrabindu
+ (0x009bc, 0x009bc,), # Bengali Sign Nukta ..Bengali Sign Nukta
+ (0x009c1, 0x009c4,), # Bengali Vowel Sign U ..Bengali Vowel Sign Vocal
+ (0x009cd, 0x009cd,), # Bengali Sign Virama ..Bengali Sign Virama
+ (0x009e2, 0x009e3,), # Bengali Vowel Sign Vocal..Bengali Vowel Sign Vocal
+ (0x00a01, 0x00a02,), # Gurmukhi Sign Adak Bindi..Gurmukhi Sign Bindi
+ (0x00a3c, 0x00a3c,), # Gurmukhi Sign Nukta ..Gurmukhi Sign Nukta
+ (0x00a41, 0x00a42,), # Gurmukhi Vowel Sign U ..Gurmukhi Vowel Sign Uu
+ (0x00a47, 0x00a48,), # Gurmukhi Vowel Sign Ee ..Gurmukhi Vowel Sign Ai
+ (0x00a4b, 0x00a4d,), # Gurmukhi Vowel Sign Oo ..Gurmukhi Sign Virama
+ (0x00a70, 0x00a71,), # Gurmukhi Tippi ..Gurmukhi Addak
+ (0x00a81, 0x00a82,), # Gujarati Sign Candrabind..Gujarati Sign Anusvara
+ (0x00abc, 0x00abc,), # Gujarati Sign Nukta ..Gujarati Sign Nukta
+ (0x00ac1, 0x00ac5,), # Gujarati Vowel Sign U ..Gujarati Vowel Sign Cand
+ (0x00ac7, 0x00ac8,), # Gujarati Vowel Sign E ..Gujarati Vowel Sign Ai
+ (0x00acd, 0x00acd,), # Gujarati Sign Virama ..Gujarati Sign Virama
+ (0x00ae2, 0x00ae3,), # Gujarati Vowel Sign Voca..Gujarati Vowel Sign Voca
+ (0x00b01, 0x00b01,), # Oriya Sign Candrabindu ..Oriya Sign Candrabindu
+ (0x00b3c, 0x00b3c,), # Oriya Sign Nukta ..Oriya Sign Nukta
+ (0x00b3f, 0x00b3f,), # Oriya Vowel Sign I ..Oriya Vowel Sign I
+ (0x00b41, 0x00b43,), # Oriya Vowel Sign U ..Oriya Vowel Sign Vocalic
+ (0x00b4d, 0x00b4d,), # Oriya Sign Virama ..Oriya Sign Virama
+ (0x00b56, 0x00b56,), # Oriya Ai Length Mark ..Oriya Ai Length Mark
+ (0x00b82, 0x00b82,), # Tamil Sign Anusvara ..Tamil Sign Anusvara
+ (0x00bc0, 0x00bc0,), # Tamil Vowel Sign Ii ..Tamil Vowel Sign Ii
+ (0x00bcd, 0x00bcd,), # Tamil Sign Virama ..Tamil Sign Virama
+ (0x00c3e, 0x00c40,), # Telugu Vowel Sign Aa ..Telugu Vowel Sign Ii
+ (0x00c46, 0x00c48,), # Telugu Vowel Sign E ..Telugu Vowel Sign Ai
+ (0x00c4a, 0x00c4d,), # Telugu Vowel Sign O ..Telugu Sign Virama
+ (0x00c55, 0x00c56,), # Telugu Length Mark ..Telugu Ai Length Mark
+ (0x00cbc, 0x00cbc,), # Kannada Sign Nukta ..Kannada Sign Nukta
+ (0x00cbf, 0x00cbf,), # Kannada Vowel Sign I ..Kannada Vowel Sign I
+ (0x00cc6, 0x00cc6,), # Kannada Vowel Sign E ..Kannada Vowel Sign E
+ (0x00ccc, 0x00ccd,), # Kannada Vowel Sign Au ..Kannada Sign Virama
+ (0x00d41, 0x00d43,), # Malayalam Vowel Sign U ..Malayalam Vowel Sign Voc
+ (0x00d4d, 0x00d4d,), # Malayalam Sign Virama ..Malayalam Sign Virama
+ (0x00dca, 0x00dca,), # Sinhala Sign Al-lakuna ..Sinhala Sign Al-lakuna
+ (0x00dd2, 0x00dd4,), # Sinhala Vowel Sign Ketti..Sinhala Vowel Sign Ketti
+ (0x00dd6, 0x00dd6,), # Sinhala Vowel Sign Diga ..Sinhala Vowel Sign Diga
+ (0x00e31, 0x00e31,), # Thai Character Mai Han-a..Thai Character Mai Han-a
+ (0x00e34, 0x00e3a,), # Thai Character Sara I ..Thai Character Phinthu
+ (0x00e47, 0x00e4e,), # Thai Character Maitaikhu..Thai Character Yamakkan
+ (0x00eb1, 0x00eb1,), # Lao Vowel Sign Mai Kan ..Lao Vowel Sign Mai Kan
+ (0x00eb4, 0x00eb9,), # Lao Vowel Sign I ..Lao Vowel Sign Uu
+ (0x00ebb, 0x00ebc,), # Lao Vowel Sign Mai Kon ..Lao Semivowel Sign Lo
+ (0x00ec8, 0x00ecd,), # Lao Tone Mai Ek ..Lao Niggahita
+ (0x00f18, 0x00f19,), # Tibetan Astrological Sig..Tibetan Astrological Sig
+ (0x00f35, 0x00f35,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f37, 0x00f37,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f39, 0x00f39,), # Tibetan Mark Tsa -phru ..Tibetan Mark Tsa -phru
+ (0x00f71, 0x00f7e,), # Tibetan Vowel Sign Aa ..Tibetan Sign Rjes Su Nga
+ (0x00f80, 0x00f84,), # Tibetan Vowel Sign Rever..Tibetan Mark Halanta
+ (0x00f86, 0x00f87,), # Tibetan Sign Lci Rtags ..Tibetan Sign Yang Rtags
+ (0x00f90, 0x00f97,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
+ (0x00f99, 0x00fbc,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
+ (0x00fc6, 0x00fc6,), # Tibetan Symbol Padma Gda..Tibetan Symbol Padma Gda
+ (0x0102d, 0x01030,), # Myanmar Vowel Sign I ..Myanmar Vowel Sign Uu
+ (0x01032, 0x01032,), # Myanmar Vowel Sign Ai ..Myanmar Vowel Sign Ai
+ (0x01036, 0x01037,), # Myanmar Sign Anusvara ..Myanmar Sign Dot Below
+ (0x01039, 0x01039,), # Myanmar Sign Virama ..Myanmar Sign Virama
+ (0x01058, 0x01059,), # Myanmar Vowel Sign Vocal..Myanmar Vowel Sign Vocal
+ (0x0135f, 0x0135f,), # Ethiopic Combining Gemin..Ethiopic Combining Gemin
+ (0x01712, 0x01714,), # Tagalog Vowel Sign I ..Tagalog Sign Virama
+ (0x01732, 0x01734,), # Hanunoo Vowel Sign I ..Hanunoo Sign Pamudpod
+ (0x01752, 0x01753,), # Buhid Vowel Sign I ..Buhid Vowel Sign U
+ (0x01772, 0x01773,), # Tagbanwa Vowel Sign I ..Tagbanwa Vowel Sign U
+ (0x017b7, 0x017bd,), # Khmer Vowel Sign I ..Khmer Vowel Sign Ua
+ (0x017c6, 0x017c6,), # Khmer Sign Nikahit ..Khmer Sign Nikahit
+ (0x017c9, 0x017d3,), # Khmer Sign Muusikatoan ..Khmer Sign Bathamasat
+ (0x017dd, 0x017dd,), # Khmer Sign Atthacan ..Khmer Sign Atthacan
+ (0x0180b, 0x0180d,), # Mongolian Free Variation..Mongolian Free Variation
+ (0x018a9, 0x018a9,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x01920, 0x01922,), # Limbu Vowel Sign A ..Limbu Vowel Sign U
+ (0x01927, 0x01928,), # Limbu Vowel Sign E ..Limbu Vowel Sign O
+ (0x01932, 0x01932,), # Limbu Small Letter Anusv..Limbu Small Letter Anusv
+ (0x01939, 0x0193b,), # Limbu Sign Mukphreng ..Limbu Sign Sa-i
+ (0x01a17, 0x01a18,), # Buginese Vowel Sign I ..Buginese Vowel Sign U
+ (0x01dc0, 0x01dc3,), # Combining Dotted Grave A..Combining Suspension Mar
+ (0x020d0, 0x020eb,), # Combining Left Harpoon A..Combining Long Double So
+ (0x0302a, 0x0302f,), # Ideographic Level Tone M..Hangul Double Dot Tone M
+ (0x03099, 0x0309a,), # Combining Katakana-hirag..Combining Katakana-hirag
+ (0x0a806, 0x0a806,), # Syloti Nagri Sign Hasant..Syloti Nagri Sign Hasant
+ (0x0a80b, 0x0a80b,), # Syloti Nagri Sign Anusva..Syloti Nagri Sign Anusva
+ (0x0a825, 0x0a826,), # Syloti Nagri Vowel Sign ..Syloti Nagri Vowel Sign
+ (0x0fb1e, 0x0fb1e,), # Hebrew Point Judeo-spani..Hebrew Point Judeo-spani
+ (0x0fe00, 0x0fe0f,), # Variation Selector-1 ..Variation Selector-16
+ (0x0fe20, 0x0fe23,), # Combining Ligature Left ..Combining Double Tilde R
+ (0x10a01, 0x10a03,), # Kharoshthi Vowel Sign I ..Kharoshthi Vowel Sign Vo
+ (0x10a05, 0x10a06,), # Kharoshthi Vowel Sign E ..Kharoshthi Vowel Sign O
+ (0x10a0c, 0x10a0f,), # Kharoshthi Vowel Length ..Kharoshthi Sign Visarga
+ (0x10a38, 0x10a3a,), # Kharoshthi Sign Bar Abov..Kharoshthi Sign Dot Belo
+ (0x10a3f, 0x10a3f,), # Kharoshthi Virama ..Kharoshthi Virama
+ (0x1d167, 0x1d169,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d17b, 0x1d182,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d185, 0x1d18b,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d1aa, 0x1d1ad,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d242, 0x1d244,), # Combining Greek Musical ..Combining Greek Musical
+ (0xe0100, 0xe01ef,), # Variation Selector-17 ..Variation Selector-256
+ ),
+ '5.0.0': (
+ # Source: DerivedGeneralCategory-5.0.0.txt
+ # Date: 2006-02-27, 23:41:27 GMT [MD]
+ #
+ (0x00300, 0x0036f,), # Combining Grave Accent ..Combining Latin Small Le
+ (0x00483, 0x00486,), # Combining Cyrillic Titlo..Combining Cyrillic Psili
+ (0x00488, 0x00489,), # Combining Cyrillic Hundr..Combining Cyrillic Milli
+ (0x00591, 0x005bd,), # Hebrew Accent Etnahta ..Hebrew Point Meteg
+ (0x005bf, 0x005bf,), # Hebrew Point Rafe ..Hebrew Point Rafe
+ (0x005c1, 0x005c2,), # Hebrew Point Shin Dot ..Hebrew Point Sin Dot
+ (0x005c4, 0x005c5,), # Hebrew Mark Upper Dot ..Hebrew Mark Lower Dot
+ (0x005c7, 0x005c7,), # Hebrew Point Qamats Qata..Hebrew Point Qamats Qata
+ (0x00610, 0x00615,), # Arabic Sign Sallallahou ..Arabic Small High Tah
+ (0x0064b, 0x0065e,), # Arabic Fathatan ..Arabic Fatha With Two Do
+ (0x00670, 0x00670,), # Arabic Letter Superscrip..Arabic Letter Superscrip
+ (0x006d6, 0x006dc,), # Arabic Small High Ligatu..Arabic Small High Seen
+ (0x006de, 0x006e4,), # Arabic Start Of Rub El H..Arabic Small High Madda
+ (0x006e7, 0x006e8,), # Arabic Small High Yeh ..Arabic Small High Noon
+ (0x006ea, 0x006ed,), # Arabic Empty Centre Low ..Arabic Small Low Meem
+ (0x00711, 0x00711,), # Syriac Letter Superscrip..Syriac Letter Superscrip
+ (0x00730, 0x0074a,), # Syriac Pthaha Above ..Syriac Barrekh
+ (0x007a6, 0x007b0,), # Thaana Abafili ..Thaana Sukun
+ (0x007eb, 0x007f3,), # Nko Combining Short High..Nko Combining Double Dot
+ (0x00901, 0x00902,), # Devanagari Sign Candrabi..Devanagari Sign Anusvara
+ (0x0093c, 0x0093c,), # Devanagari Sign Nukta ..Devanagari Sign Nukta
+ (0x00941, 0x00948,), # Devanagari Vowel Sign U ..Devanagari Vowel Sign Ai
+ (0x0094d, 0x0094d,), # Devanagari Sign Virama ..Devanagari Sign Virama
+ (0x00951, 0x00954,), # Devanagari Stress Sign U..Devanagari Acute Accent
+ (0x00962, 0x00963,), # Devanagari Vowel Sign Vo..Devanagari Vowel Sign Vo
+ (0x00981, 0x00981,), # Bengali Sign Candrabindu..Bengali Sign Candrabindu
+ (0x009bc, 0x009bc,), # Bengali Sign Nukta ..Bengali Sign Nukta
+ (0x009c1, 0x009c4,), # Bengali Vowel Sign U ..Bengali Vowel Sign Vocal
+ (0x009cd, 0x009cd,), # Bengali Sign Virama ..Bengali Sign Virama
+ (0x009e2, 0x009e3,), # Bengali Vowel Sign Vocal..Bengali Vowel Sign Vocal
+ (0x00a01, 0x00a02,), # Gurmukhi Sign Adak Bindi..Gurmukhi Sign Bindi
+ (0x00a3c, 0x00a3c,), # Gurmukhi Sign Nukta ..Gurmukhi Sign Nukta
+ (0x00a41, 0x00a42,), # Gurmukhi Vowel Sign U ..Gurmukhi Vowel Sign Uu
+ (0x00a47, 0x00a48,), # Gurmukhi Vowel Sign Ee ..Gurmukhi Vowel Sign Ai
+ (0x00a4b, 0x00a4d,), # Gurmukhi Vowel Sign Oo ..Gurmukhi Sign Virama
+ (0x00a70, 0x00a71,), # Gurmukhi Tippi ..Gurmukhi Addak
+ (0x00a81, 0x00a82,), # Gujarati Sign Candrabind..Gujarati Sign Anusvara
+ (0x00abc, 0x00abc,), # Gujarati Sign Nukta ..Gujarati Sign Nukta
+ (0x00ac1, 0x00ac5,), # Gujarati Vowel Sign U ..Gujarati Vowel Sign Cand
+ (0x00ac7, 0x00ac8,), # Gujarati Vowel Sign E ..Gujarati Vowel Sign Ai
+ (0x00acd, 0x00acd,), # Gujarati Sign Virama ..Gujarati Sign Virama
+ (0x00ae2, 0x00ae3,), # Gujarati Vowel Sign Voca..Gujarati Vowel Sign Voca
+ (0x00b01, 0x00b01,), # Oriya Sign Candrabindu ..Oriya Sign Candrabindu
+ (0x00b3c, 0x00b3c,), # Oriya Sign Nukta ..Oriya Sign Nukta
+ (0x00b3f, 0x00b3f,), # Oriya Vowel Sign I ..Oriya Vowel Sign I
+ (0x00b41, 0x00b43,), # Oriya Vowel Sign U ..Oriya Vowel Sign Vocalic
+ (0x00b4d, 0x00b4d,), # Oriya Sign Virama ..Oriya Sign Virama
+ (0x00b56, 0x00b56,), # Oriya Ai Length Mark ..Oriya Ai Length Mark
+ (0x00b82, 0x00b82,), # Tamil Sign Anusvara ..Tamil Sign Anusvara
+ (0x00bc0, 0x00bc0,), # Tamil Vowel Sign Ii ..Tamil Vowel Sign Ii
+ (0x00bcd, 0x00bcd,), # Tamil Sign Virama ..Tamil Sign Virama
+ (0x00c3e, 0x00c40,), # Telugu Vowel Sign Aa ..Telugu Vowel Sign Ii
+ (0x00c46, 0x00c48,), # Telugu Vowel Sign E ..Telugu Vowel Sign Ai
+ (0x00c4a, 0x00c4d,), # Telugu Vowel Sign O ..Telugu Sign Virama
+ (0x00c55, 0x00c56,), # Telugu Length Mark ..Telugu Ai Length Mark
+ (0x00cbc, 0x00cbc,), # Kannada Sign Nukta ..Kannada Sign Nukta
+ (0x00cbf, 0x00cbf,), # Kannada Vowel Sign I ..Kannada Vowel Sign I
+ (0x00cc6, 0x00cc6,), # Kannada Vowel Sign E ..Kannada Vowel Sign E
+ (0x00ccc, 0x00ccd,), # Kannada Vowel Sign Au ..Kannada Sign Virama
+ (0x00ce2, 0x00ce3,), # Kannada Vowel Sign Vocal..Kannada Vowel Sign Vocal
+ (0x00d41, 0x00d43,), # Malayalam Vowel Sign U ..Malayalam Vowel Sign Voc
+ (0x00d4d, 0x00d4d,), # Malayalam Sign Virama ..Malayalam Sign Virama
+ (0x00dca, 0x00dca,), # Sinhala Sign Al-lakuna ..Sinhala Sign Al-lakuna
+ (0x00dd2, 0x00dd4,), # Sinhala Vowel Sign Ketti..Sinhala Vowel Sign Ketti
+ (0x00dd6, 0x00dd6,), # Sinhala Vowel Sign Diga ..Sinhala Vowel Sign Diga
+ (0x00e31, 0x00e31,), # Thai Character Mai Han-a..Thai Character Mai Han-a
+ (0x00e34, 0x00e3a,), # Thai Character Sara I ..Thai Character Phinthu
+ (0x00e47, 0x00e4e,), # Thai Character Maitaikhu..Thai Character Yamakkan
+ (0x00eb1, 0x00eb1,), # Lao Vowel Sign Mai Kan ..Lao Vowel Sign Mai Kan
+ (0x00eb4, 0x00eb9,), # Lao Vowel Sign I ..Lao Vowel Sign Uu
+ (0x00ebb, 0x00ebc,), # Lao Vowel Sign Mai Kon ..Lao Semivowel Sign Lo
+ (0x00ec8, 0x00ecd,), # Lao Tone Mai Ek ..Lao Niggahita
+ (0x00f18, 0x00f19,), # Tibetan Astrological Sig..Tibetan Astrological Sig
+ (0x00f35, 0x00f35,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f37, 0x00f37,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f39, 0x00f39,), # Tibetan Mark Tsa -phru ..Tibetan Mark Tsa -phru
+ (0x00f71, 0x00f7e,), # Tibetan Vowel Sign Aa ..Tibetan Sign Rjes Su Nga
+ (0x00f80, 0x00f84,), # Tibetan Vowel Sign Rever..Tibetan Mark Halanta
+ (0x00f86, 0x00f87,), # Tibetan Sign Lci Rtags ..Tibetan Sign Yang Rtags
+ (0x00f90, 0x00f97,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
+ (0x00f99, 0x00fbc,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
+ (0x00fc6, 0x00fc6,), # Tibetan Symbol Padma Gda..Tibetan Symbol Padma Gda
+ (0x0102d, 0x01030,), # Myanmar Vowel Sign I ..Myanmar Vowel Sign Uu
+ (0x01032, 0x01032,), # Myanmar Vowel Sign Ai ..Myanmar Vowel Sign Ai
+ (0x01036, 0x01037,), # Myanmar Sign Anusvara ..Myanmar Sign Dot Below
+ (0x01039, 0x01039,), # Myanmar Sign Virama ..Myanmar Sign Virama
+ (0x01058, 0x01059,), # Myanmar Vowel Sign Vocal..Myanmar Vowel Sign Vocal
+ (0x0135f, 0x0135f,), # Ethiopic Combining Gemin..Ethiopic Combining Gemin
+ (0x01712, 0x01714,), # Tagalog Vowel Sign I ..Tagalog Sign Virama
+ (0x01732, 0x01734,), # Hanunoo Vowel Sign I ..Hanunoo Sign Pamudpod
+ (0x01752, 0x01753,), # Buhid Vowel Sign I ..Buhid Vowel Sign U
+ (0x01772, 0x01773,), # Tagbanwa Vowel Sign I ..Tagbanwa Vowel Sign U
+ (0x017b7, 0x017bd,), # Khmer Vowel Sign I ..Khmer Vowel Sign Ua
+ (0x017c6, 0x017c6,), # Khmer Sign Nikahit ..Khmer Sign Nikahit
+ (0x017c9, 0x017d3,), # Khmer Sign Muusikatoan ..Khmer Sign Bathamasat
+ (0x017dd, 0x017dd,), # Khmer Sign Atthacan ..Khmer Sign Atthacan
+ (0x0180b, 0x0180d,), # Mongolian Free Variation..Mongolian Free Variation
+ (0x018a9, 0x018a9,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x01920, 0x01922,), # Limbu Vowel Sign A ..Limbu Vowel Sign U
+ (0x01927, 0x01928,), # Limbu Vowel Sign E ..Limbu Vowel Sign O
+ (0x01932, 0x01932,), # Limbu Small Letter Anusv..Limbu Small Letter Anusv
+ (0x01939, 0x0193b,), # Limbu Sign Mukphreng ..Limbu Sign Sa-i
+ (0x01a17, 0x01a18,), # Buginese Vowel Sign I ..Buginese Vowel Sign U
+ (0x01b00, 0x01b03,), # Balinese Sign Ulu Ricem ..Balinese Sign Surang
+ (0x01b34, 0x01b34,), # Balinese Sign Rerekan ..Balinese Sign Rerekan
+ (0x01b36, 0x01b3a,), # Balinese Vowel Sign Ulu ..Balinese Vowel Sign Ra R
+ (0x01b3c, 0x01b3c,), # Balinese Vowel Sign La L..Balinese Vowel Sign La L
+ (0x01b42, 0x01b42,), # Balinese Vowel Sign Pepe..Balinese Vowel Sign Pepe
+ (0x01b6b, 0x01b73,), # Balinese Musical Symbol ..Balinese Musical Symbol
+ (0x01dc0, 0x01dca,), # Combining Dotted Grave A..Combining Latin Small Le
+ (0x01dfe, 0x01dff,), # Combining Left Arrowhead..Combining Right Arrowhea
+ (0x020d0, 0x020ef,), # Combining Left Harpoon A..Combining Right Arrow Be
+ (0x0302a, 0x0302f,), # Ideographic Level Tone M..Hangul Double Dot Tone M
+ (0x03099, 0x0309a,), # Combining Katakana-hirag..Combining Katakana-hirag
+ (0x0a806, 0x0a806,), # Syloti Nagri Sign Hasant..Syloti Nagri Sign Hasant
+ (0x0a80b, 0x0a80b,), # Syloti Nagri Sign Anusva..Syloti Nagri Sign Anusva
+ (0x0a825, 0x0a826,), # Syloti Nagri Vowel Sign ..Syloti Nagri Vowel Sign
+ (0x0fb1e, 0x0fb1e,), # Hebrew Point Judeo-spani..Hebrew Point Judeo-spani
+ (0x0fe00, 0x0fe0f,), # Variation Selector-1 ..Variation Selector-16
+ (0x0fe20, 0x0fe23,), # Combining Ligature Left ..Combining Double Tilde R
+ (0x10a01, 0x10a03,), # Kharoshthi Vowel Sign I ..Kharoshthi Vowel Sign Vo
+ (0x10a05, 0x10a06,), # Kharoshthi Vowel Sign E ..Kharoshthi Vowel Sign O
+ (0x10a0c, 0x10a0f,), # Kharoshthi Vowel Length ..Kharoshthi Sign Visarga
+ (0x10a38, 0x10a3a,), # Kharoshthi Sign Bar Abov..Kharoshthi Sign Dot Belo
+ (0x10a3f, 0x10a3f,), # Kharoshthi Virama ..Kharoshthi Virama
+ (0x1d167, 0x1d169,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d17b, 0x1d182,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d185, 0x1d18b,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d1aa, 0x1d1ad,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d242, 0x1d244,), # Combining Greek Musical ..Combining Greek Musical
+ (0xe0100, 0xe01ef,), # Variation Selector-17 ..Variation Selector-256
+ ),
+ '5.1.0': (
+ # Source: DerivedGeneralCategory-5.1.0.txt
+ # Date: 2008-03-20, 17:54:57 GMT [MD]
+ #
+ (0x00300, 0x0036f,), # Combining Grave Accent ..Combining Latin Small Le
+ (0x00483, 0x00489,), # Combining Cyrillic Titlo..Combining Cyrillic Milli
+ (0x00591, 0x005bd,), # Hebrew Accent Etnahta ..Hebrew Point Meteg
+ (0x005bf, 0x005bf,), # Hebrew Point Rafe ..Hebrew Point Rafe
+ (0x005c1, 0x005c2,), # Hebrew Point Shin Dot ..Hebrew Point Sin Dot
+ (0x005c4, 0x005c5,), # Hebrew Mark Upper Dot ..Hebrew Mark Lower Dot
+ (0x005c7, 0x005c7,), # Hebrew Point Qamats Qata..Hebrew Point Qamats Qata
+ (0x00610, 0x0061a,), # Arabic Sign Sallallahou ..Arabic Small Kasra
+ (0x0064b, 0x0065e,), # Arabic Fathatan ..Arabic Fatha With Two Do
+ (0x00670, 0x00670,), # Arabic Letter Superscrip..Arabic Letter Superscrip
+ (0x006d6, 0x006dc,), # Arabic Small High Ligatu..Arabic Small High Seen
+ (0x006de, 0x006e4,), # Arabic Start Of Rub El H..Arabic Small High Madda
+ (0x006e7, 0x006e8,), # Arabic Small High Yeh ..Arabic Small High Noon
+ (0x006ea, 0x006ed,), # Arabic Empty Centre Low ..Arabic Small Low Meem
+ (0x00711, 0x00711,), # Syriac Letter Superscrip..Syriac Letter Superscrip
+ (0x00730, 0x0074a,), # Syriac Pthaha Above ..Syriac Barrekh
+ (0x007a6, 0x007b0,), # Thaana Abafili ..Thaana Sukun
+ (0x007eb, 0x007f3,), # Nko Combining Short High..Nko Combining Double Dot
+ (0x00901, 0x00902,), # Devanagari Sign Candrabi..Devanagari Sign Anusvara
+ (0x0093c, 0x0093c,), # Devanagari Sign Nukta ..Devanagari Sign Nukta
+ (0x00941, 0x00948,), # Devanagari Vowel Sign U ..Devanagari Vowel Sign Ai
+ (0x0094d, 0x0094d,), # Devanagari Sign Virama ..Devanagari Sign Virama
+ (0x00951, 0x00954,), # Devanagari Stress Sign U..Devanagari Acute Accent
+ (0x00962, 0x00963,), # Devanagari Vowel Sign Vo..Devanagari Vowel Sign Vo
+ (0x00981, 0x00981,), # Bengali Sign Candrabindu..Bengali Sign Candrabindu
+ (0x009bc, 0x009bc,), # Bengali Sign Nukta ..Bengali Sign Nukta
+ (0x009c1, 0x009c4,), # Bengali Vowel Sign U ..Bengali Vowel Sign Vocal
+ (0x009cd, 0x009cd,), # Bengali Sign Virama ..Bengali Sign Virama
+ (0x009e2, 0x009e3,), # Bengali Vowel Sign Vocal..Bengali Vowel Sign Vocal
+ (0x00a01, 0x00a02,), # Gurmukhi Sign Adak Bindi..Gurmukhi Sign Bindi
+ (0x00a3c, 0x00a3c,), # Gurmukhi Sign Nukta ..Gurmukhi Sign Nukta
+ (0x00a41, 0x00a42,), # Gurmukhi Vowel Sign U ..Gurmukhi Vowel Sign Uu
+ (0x00a47, 0x00a48,), # Gurmukhi Vowel Sign Ee ..Gurmukhi Vowel Sign Ai
+ (0x00a4b, 0x00a4d,), # Gurmukhi Vowel Sign Oo ..Gurmukhi Sign Virama
+ (0x00a51, 0x00a51,), # Gurmukhi Sign Udaat ..Gurmukhi Sign Udaat
+ (0x00a70, 0x00a71,), # Gurmukhi Tippi ..Gurmukhi Addak
+ (0x00a75, 0x00a75,), # Gurmukhi Sign Yakash ..Gurmukhi Sign Yakash
+ (0x00a81, 0x00a82,), # Gujarati Sign Candrabind..Gujarati Sign Anusvara
+ (0x00abc, 0x00abc,), # Gujarati Sign Nukta ..Gujarati Sign Nukta
+ (0x00ac1, 0x00ac5,), # Gujarati Vowel Sign U ..Gujarati Vowel Sign Cand
+ (0x00ac7, 0x00ac8,), # Gujarati Vowel Sign E ..Gujarati Vowel Sign Ai
+ (0x00acd, 0x00acd,), # Gujarati Sign Virama ..Gujarati Sign Virama
+ (0x00ae2, 0x00ae3,), # Gujarati Vowel Sign Voca..Gujarati Vowel Sign Voca
+ (0x00b01, 0x00b01,), # Oriya Sign Candrabindu ..Oriya Sign Candrabindu
+ (0x00b3c, 0x00b3c,), # Oriya Sign Nukta ..Oriya Sign Nukta
+ (0x00b3f, 0x00b3f,), # Oriya Vowel Sign I ..Oriya Vowel Sign I
+ (0x00b41, 0x00b44,), # Oriya Vowel Sign U ..Oriya Vowel Sign Vocalic
+ (0x00b4d, 0x00b4d,), # Oriya Sign Virama ..Oriya Sign Virama
+ (0x00b56, 0x00b56,), # Oriya Ai Length Mark ..Oriya Ai Length Mark
+ (0x00b62, 0x00b63,), # Oriya Vowel Sign Vocalic..Oriya Vowel Sign Vocalic
+ (0x00b82, 0x00b82,), # Tamil Sign Anusvara ..Tamil Sign Anusvara
+ (0x00bc0, 0x00bc0,), # Tamil Vowel Sign Ii ..Tamil Vowel Sign Ii
+ (0x00bcd, 0x00bcd,), # Tamil Sign Virama ..Tamil Sign Virama
+ (0x00c3e, 0x00c40,), # Telugu Vowel Sign Aa ..Telugu Vowel Sign Ii
+ (0x00c46, 0x00c48,), # Telugu Vowel Sign E ..Telugu Vowel Sign Ai
+ (0x00c4a, 0x00c4d,), # Telugu Vowel Sign O ..Telugu Sign Virama
+ (0x00c55, 0x00c56,), # Telugu Length Mark ..Telugu Ai Length Mark
+ (0x00c62, 0x00c63,), # Telugu Vowel Sign Vocali..Telugu Vowel Sign Vocali
+ (0x00cbc, 0x00cbc,), # Kannada Sign Nukta ..Kannada Sign Nukta
+ (0x00cbf, 0x00cbf,), # Kannada Vowel Sign I ..Kannada Vowel Sign I
+ (0x00cc6, 0x00cc6,), # Kannada Vowel Sign E ..Kannada Vowel Sign E
+ (0x00ccc, 0x00ccd,), # Kannada Vowel Sign Au ..Kannada Sign Virama
+ (0x00ce2, 0x00ce3,), # Kannada Vowel Sign Vocal..Kannada Vowel Sign Vocal
+ (0x00d41, 0x00d44,), # Malayalam Vowel Sign U ..Malayalam Vowel Sign Voc
+ (0x00d4d, 0x00d4d,), # Malayalam Sign Virama ..Malayalam Sign Virama
+ (0x00d62, 0x00d63,), # Malayalam Vowel Sign Voc..Malayalam Vowel Sign Voc
+ (0x00dca, 0x00dca,), # Sinhala Sign Al-lakuna ..Sinhala Sign Al-lakuna
+ (0x00dd2, 0x00dd4,), # Sinhala Vowel Sign Ketti..Sinhala Vowel Sign Ketti
+ (0x00dd6, 0x00dd6,), # Sinhala Vowel Sign Diga ..Sinhala Vowel Sign Diga
+ (0x00e31, 0x00e31,), # Thai Character Mai Han-a..Thai Character Mai Han-a
+ (0x00e34, 0x00e3a,), # Thai Character Sara I ..Thai Character Phinthu
+ (0x00e47, 0x00e4e,), # Thai Character Maitaikhu..Thai Character Yamakkan
+ (0x00eb1, 0x00eb1,), # Lao Vowel Sign Mai Kan ..Lao Vowel Sign Mai Kan
+ (0x00eb4, 0x00eb9,), # Lao Vowel Sign I ..Lao Vowel Sign Uu
+ (0x00ebb, 0x00ebc,), # Lao Vowel Sign Mai Kon ..Lao Semivowel Sign Lo
+ (0x00ec8, 0x00ecd,), # Lao Tone Mai Ek ..Lao Niggahita
+ (0x00f18, 0x00f19,), # Tibetan Astrological Sig..Tibetan Astrological Sig
+ (0x00f35, 0x00f35,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f37, 0x00f37,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f39, 0x00f39,), # Tibetan Mark Tsa -phru ..Tibetan Mark Tsa -phru
+ (0x00f71, 0x00f7e,), # Tibetan Vowel Sign Aa ..Tibetan Sign Rjes Su Nga
+ (0x00f80, 0x00f84,), # Tibetan Vowel Sign Rever..Tibetan Mark Halanta
+ (0x00f86, 0x00f87,), # Tibetan Sign Lci Rtags ..Tibetan Sign Yang Rtags
+ (0x00f90, 0x00f97,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
+ (0x00f99, 0x00fbc,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
+ (0x00fc6, 0x00fc6,), # Tibetan Symbol Padma Gda..Tibetan Symbol Padma Gda
+ (0x0102d, 0x01030,), # Myanmar Vowel Sign I ..Myanmar Vowel Sign Uu
+ (0x01032, 0x01037,), # Myanmar Vowel Sign Ai ..Myanmar Sign Dot Below
+ (0x01039, 0x0103a,), # Myanmar Sign Virama ..Myanmar Sign Asat
+ (0x0103d, 0x0103e,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01058, 0x01059,), # Myanmar Vowel Sign Vocal..Myanmar Vowel Sign Vocal
+ (0x0105e, 0x01060,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01071, 0x01074,), # Myanmar Vowel Sign Geba ..Myanmar Vowel Sign Kayah
+ (0x01082, 0x01082,), # Myanmar Consonant Sign S..Myanmar Consonant Sign S
+ (0x01085, 0x01086,), # Myanmar Vowel Sign Shan ..Myanmar Vowel Sign Shan
+ (0x0108d, 0x0108d,), # Myanmar Sign Shan Counci..Myanmar Sign Shan Counci
+ (0x0135f, 0x0135f,), # Ethiopic Combining Gemin..Ethiopic Combining Gemin
+ (0x01712, 0x01714,), # Tagalog Vowel Sign I ..Tagalog Sign Virama
+ (0x01732, 0x01734,), # Hanunoo Vowel Sign I ..Hanunoo Sign Pamudpod
+ (0x01752, 0x01753,), # Buhid Vowel Sign I ..Buhid Vowel Sign U
+ (0x01772, 0x01773,), # Tagbanwa Vowel Sign I ..Tagbanwa Vowel Sign U
+ (0x017b7, 0x017bd,), # Khmer Vowel Sign I ..Khmer Vowel Sign Ua
+ (0x017c6, 0x017c6,), # Khmer Sign Nikahit ..Khmer Sign Nikahit
+ (0x017c9, 0x017d3,), # Khmer Sign Muusikatoan ..Khmer Sign Bathamasat
+ (0x017dd, 0x017dd,), # Khmer Sign Atthacan ..Khmer Sign Atthacan
+ (0x0180b, 0x0180d,), # Mongolian Free Variation..Mongolian Free Variation
+ (0x018a9, 0x018a9,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x01920, 0x01922,), # Limbu Vowel Sign A ..Limbu Vowel Sign U
+ (0x01927, 0x01928,), # Limbu Vowel Sign E ..Limbu Vowel Sign O
+ (0x01932, 0x01932,), # Limbu Small Letter Anusv..Limbu Small Letter Anusv
+ (0x01939, 0x0193b,), # Limbu Sign Mukphreng ..Limbu Sign Sa-i
+ (0x01a17, 0x01a18,), # Buginese Vowel Sign I ..Buginese Vowel Sign U
+ (0x01b00, 0x01b03,), # Balinese Sign Ulu Ricem ..Balinese Sign Surang
+ (0x01b34, 0x01b34,), # Balinese Sign Rerekan ..Balinese Sign Rerekan
+ (0x01b36, 0x01b3a,), # Balinese Vowel Sign Ulu ..Balinese Vowel Sign Ra R
+ (0x01b3c, 0x01b3c,), # Balinese Vowel Sign La L..Balinese Vowel Sign La L
+ (0x01b42, 0x01b42,), # Balinese Vowel Sign Pepe..Balinese Vowel Sign Pepe
+ (0x01b6b, 0x01b73,), # Balinese Musical Symbol ..Balinese Musical Symbol
+ (0x01b80, 0x01b81,), # Sundanese Sign Panyecek ..Sundanese Sign Panglayar
+ (0x01ba2, 0x01ba5,), # Sundanese Consonant Sign..Sundanese Vowel Sign Pan
+ (0x01ba8, 0x01ba9,), # Sundanese Vowel Sign Pam..Sundanese Vowel Sign Pan
+ (0x01c2c, 0x01c33,), # Lepcha Vowel Sign E ..Lepcha Consonant Sign T
+ (0x01c36, 0x01c37,), # Lepcha Sign Ran ..Lepcha Sign Nukta
+ (0x01dc0, 0x01de6,), # Combining Dotted Grave A..Combining Latin Small Le
+ (0x01dfe, 0x01dff,), # Combining Left Arrowhead..Combining Right Arrowhea
+ (0x020d0, 0x020f0,), # Combining Left Harpoon A..Combining Asterisk Above
+ (0x02de0, 0x02dff,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0302a, 0x0302f,), # Ideographic Level Tone M..Hangul Double Dot Tone M
+ (0x03099, 0x0309a,), # Combining Katakana-hirag..Combining Katakana-hirag
+ (0x0a66f, 0x0a672,), # Combining Cyrillic Vzmet..Combining Cyrillic Thous
+ (0x0a67c, 0x0a67d,), # Combining Cyrillic Kavyk..Combining Cyrillic Payer
+ (0x0a802, 0x0a802,), # Syloti Nagri Sign Dvisva..Syloti Nagri Sign Dvisva
+ (0x0a806, 0x0a806,), # Syloti Nagri Sign Hasant..Syloti Nagri Sign Hasant
+ (0x0a80b, 0x0a80b,), # Syloti Nagri Sign Anusva..Syloti Nagri Sign Anusva
+ (0x0a825, 0x0a826,), # Syloti Nagri Vowel Sign ..Syloti Nagri Vowel Sign
+ (0x0a8c4, 0x0a8c4,), # Saurashtra Sign Virama ..Saurashtra Sign Virama
+ (0x0a926, 0x0a92d,), # Kayah Li Vowel Ue ..Kayah Li Tone Calya Plop
+ (0x0a947, 0x0a951,), # Rejang Vowel Sign I ..Rejang Consonant Sign R
+ (0x0aa29, 0x0aa2e,), # Cham Vowel Sign Aa ..Cham Vowel Sign Oe
+ (0x0aa31, 0x0aa32,), # Cham Vowel Sign Au ..Cham Vowel Sign Ue
+ (0x0aa35, 0x0aa36,), # Cham Consonant Sign La ..Cham Consonant Sign Wa
+ (0x0aa43, 0x0aa43,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa4c, 0x0aa4c,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0fb1e, 0x0fb1e,), # Hebrew Point Judeo-spani..Hebrew Point Judeo-spani
+ (0x0fe00, 0x0fe0f,), # Variation Selector-1 ..Variation Selector-16
+ (0x0fe20, 0x0fe26,), # Combining Ligature Left ..Combining Conjoining Mac
+ (0x101fd, 0x101fd,), # Phaistos Disc Sign Combi..Phaistos Disc Sign Combi
+ (0x10a01, 0x10a03,), # Kharoshthi Vowel Sign I ..Kharoshthi Vowel Sign Vo
+ (0x10a05, 0x10a06,), # Kharoshthi Vowel Sign E ..Kharoshthi Vowel Sign O
+ (0x10a0c, 0x10a0f,), # Kharoshthi Vowel Length ..Kharoshthi Sign Visarga
+ (0x10a38, 0x10a3a,), # Kharoshthi Sign Bar Abov..Kharoshthi Sign Dot Belo
+ (0x10a3f, 0x10a3f,), # Kharoshthi Virama ..Kharoshthi Virama
+ (0x1d167, 0x1d169,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d17b, 0x1d182,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d185, 0x1d18b,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d1aa, 0x1d1ad,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d242, 0x1d244,), # Combining Greek Musical ..Combining Greek Musical
+ (0xe0100, 0xe01ef,), # Variation Selector-17 ..Variation Selector-256
+ ),
+ '5.2.0': (
+ # Source: DerivedGeneralCategory-5.2.0.txt
+ # Date: 2009-08-22, 04:58:21 GMT [MD]
+ #
+ (0x00300, 0x0036f,), # Combining Grave Accent ..Combining Latin Small Le
+ (0x00483, 0x00489,), # Combining Cyrillic Titlo..Combining Cyrillic Milli
+ (0x00591, 0x005bd,), # Hebrew Accent Etnahta ..Hebrew Point Meteg
+ (0x005bf, 0x005bf,), # Hebrew Point Rafe ..Hebrew Point Rafe
+ (0x005c1, 0x005c2,), # Hebrew Point Shin Dot ..Hebrew Point Sin Dot
+ (0x005c4, 0x005c5,), # Hebrew Mark Upper Dot ..Hebrew Mark Lower Dot
+ (0x005c7, 0x005c7,), # Hebrew Point Qamats Qata..Hebrew Point Qamats Qata
+ (0x00610, 0x0061a,), # Arabic Sign Sallallahou ..Arabic Small Kasra
+ (0x0064b, 0x0065e,), # Arabic Fathatan ..Arabic Fatha With Two Do
+ (0x00670, 0x00670,), # Arabic Letter Superscrip..Arabic Letter Superscrip
+ (0x006d6, 0x006dc,), # Arabic Small High Ligatu..Arabic Small High Seen
+ (0x006de, 0x006e4,), # Arabic Start Of Rub El H..Arabic Small High Madda
+ (0x006e7, 0x006e8,), # Arabic Small High Yeh ..Arabic Small High Noon
+ (0x006ea, 0x006ed,), # Arabic Empty Centre Low ..Arabic Small Low Meem
+ (0x00711, 0x00711,), # Syriac Letter Superscrip..Syriac Letter Superscrip
+ (0x00730, 0x0074a,), # Syriac Pthaha Above ..Syriac Barrekh
+ (0x007a6, 0x007b0,), # Thaana Abafili ..Thaana Sukun
+ (0x007eb, 0x007f3,), # Nko Combining Short High..Nko Combining Double Dot
+ (0x00816, 0x00819,), # Samaritan Mark In ..Samaritan Mark Dagesh
+ (0x0081b, 0x00823,), # Samaritan Mark Epentheti..Samaritan Vowel Sign A
+ (0x00825, 0x00827,), # Samaritan Vowel Sign Sho..Samaritan Vowel Sign U
+ (0x00829, 0x0082d,), # Samaritan Vowel Sign Lon..Samaritan Mark Nequdaa
+ (0x00900, 0x00902,), # Devanagari Sign Inverted..Devanagari Sign Anusvara
+ (0x0093c, 0x0093c,), # Devanagari Sign Nukta ..Devanagari Sign Nukta
+ (0x00941, 0x00948,), # Devanagari Vowel Sign U ..Devanagari Vowel Sign Ai
+ (0x0094d, 0x0094d,), # Devanagari Sign Virama ..Devanagari Sign Virama
+ (0x00951, 0x00955,), # Devanagari Stress Sign U..Devanagari Vowel Sign Ca
+ (0x00962, 0x00963,), # Devanagari Vowel Sign Vo..Devanagari Vowel Sign Vo
+ (0x00981, 0x00981,), # Bengali Sign Candrabindu..Bengali Sign Candrabindu
+ (0x009bc, 0x009bc,), # Bengali Sign Nukta ..Bengali Sign Nukta
+ (0x009c1, 0x009c4,), # Bengali Vowel Sign U ..Bengali Vowel Sign Vocal
+ (0x009cd, 0x009cd,), # Bengali Sign Virama ..Bengali Sign Virama
+ (0x009e2, 0x009e3,), # Bengali Vowel Sign Vocal..Bengali Vowel Sign Vocal
+ (0x00a01, 0x00a02,), # Gurmukhi Sign Adak Bindi..Gurmukhi Sign Bindi
+ (0x00a3c, 0x00a3c,), # Gurmukhi Sign Nukta ..Gurmukhi Sign Nukta
+ (0x00a41, 0x00a42,), # Gurmukhi Vowel Sign U ..Gurmukhi Vowel Sign Uu
+ (0x00a47, 0x00a48,), # Gurmukhi Vowel Sign Ee ..Gurmukhi Vowel Sign Ai
+ (0x00a4b, 0x00a4d,), # Gurmukhi Vowel Sign Oo ..Gurmukhi Sign Virama
+ (0x00a51, 0x00a51,), # Gurmukhi Sign Udaat ..Gurmukhi Sign Udaat
+ (0x00a70, 0x00a71,), # Gurmukhi Tippi ..Gurmukhi Addak
+ (0x00a75, 0x00a75,), # Gurmukhi Sign Yakash ..Gurmukhi Sign Yakash
+ (0x00a81, 0x00a82,), # Gujarati Sign Candrabind..Gujarati Sign Anusvara
+ (0x00abc, 0x00abc,), # Gujarati Sign Nukta ..Gujarati Sign Nukta
+ (0x00ac1, 0x00ac5,), # Gujarati Vowel Sign U ..Gujarati Vowel Sign Cand
+ (0x00ac7, 0x00ac8,), # Gujarati Vowel Sign E ..Gujarati Vowel Sign Ai
+ (0x00acd, 0x00acd,), # Gujarati Sign Virama ..Gujarati Sign Virama
+ (0x00ae2, 0x00ae3,), # Gujarati Vowel Sign Voca..Gujarati Vowel Sign Voca
+ (0x00b01, 0x00b01,), # Oriya Sign Candrabindu ..Oriya Sign Candrabindu
+ (0x00b3c, 0x00b3c,), # Oriya Sign Nukta ..Oriya Sign Nukta
+ (0x00b3f, 0x00b3f,), # Oriya Vowel Sign I ..Oriya Vowel Sign I
+ (0x00b41, 0x00b44,), # Oriya Vowel Sign U ..Oriya Vowel Sign Vocalic
+ (0x00b4d, 0x00b4d,), # Oriya Sign Virama ..Oriya Sign Virama
+ (0x00b56, 0x00b56,), # Oriya Ai Length Mark ..Oriya Ai Length Mark
+ (0x00b62, 0x00b63,), # Oriya Vowel Sign Vocalic..Oriya Vowel Sign Vocalic
+ (0x00b82, 0x00b82,), # Tamil Sign Anusvara ..Tamil Sign Anusvara
+ (0x00bc0, 0x00bc0,), # Tamil Vowel Sign Ii ..Tamil Vowel Sign Ii
+ (0x00bcd, 0x00bcd,), # Tamil Sign Virama ..Tamil Sign Virama
+ (0x00c3e, 0x00c40,), # Telugu Vowel Sign Aa ..Telugu Vowel Sign Ii
+ (0x00c46, 0x00c48,), # Telugu Vowel Sign E ..Telugu Vowel Sign Ai
+ (0x00c4a, 0x00c4d,), # Telugu Vowel Sign O ..Telugu Sign Virama
+ (0x00c55, 0x00c56,), # Telugu Length Mark ..Telugu Ai Length Mark
+ (0x00c62, 0x00c63,), # Telugu Vowel Sign Vocali..Telugu Vowel Sign Vocali
+ (0x00cbc, 0x00cbc,), # Kannada Sign Nukta ..Kannada Sign Nukta
+ (0x00cbf, 0x00cbf,), # Kannada Vowel Sign I ..Kannada Vowel Sign I
+ (0x00cc6, 0x00cc6,), # Kannada Vowel Sign E ..Kannada Vowel Sign E
+ (0x00ccc, 0x00ccd,), # Kannada Vowel Sign Au ..Kannada Sign Virama
+ (0x00ce2, 0x00ce3,), # Kannada Vowel Sign Vocal..Kannada Vowel Sign Vocal
+ (0x00d41, 0x00d44,), # Malayalam Vowel Sign U ..Malayalam Vowel Sign Voc
+ (0x00d4d, 0x00d4d,), # Malayalam Sign Virama ..Malayalam Sign Virama
+ (0x00d62, 0x00d63,), # Malayalam Vowel Sign Voc..Malayalam Vowel Sign Voc
+ (0x00dca, 0x00dca,), # Sinhala Sign Al-lakuna ..Sinhala Sign Al-lakuna
+ (0x00dd2, 0x00dd4,), # Sinhala Vowel Sign Ketti..Sinhala Vowel Sign Ketti
+ (0x00dd6, 0x00dd6,), # Sinhala Vowel Sign Diga ..Sinhala Vowel Sign Diga
+ (0x00e31, 0x00e31,), # Thai Character Mai Han-a..Thai Character Mai Han-a
+ (0x00e34, 0x00e3a,), # Thai Character Sara I ..Thai Character Phinthu
+ (0x00e47, 0x00e4e,), # Thai Character Maitaikhu..Thai Character Yamakkan
+ (0x00eb1, 0x00eb1,), # Lao Vowel Sign Mai Kan ..Lao Vowel Sign Mai Kan
+ (0x00eb4, 0x00eb9,), # Lao Vowel Sign I ..Lao Vowel Sign Uu
+ (0x00ebb, 0x00ebc,), # Lao Vowel Sign Mai Kon ..Lao Semivowel Sign Lo
+ (0x00ec8, 0x00ecd,), # Lao Tone Mai Ek ..Lao Niggahita
+ (0x00f18, 0x00f19,), # Tibetan Astrological Sig..Tibetan Astrological Sig
+ (0x00f35, 0x00f35,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f37, 0x00f37,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f39, 0x00f39,), # Tibetan Mark Tsa -phru ..Tibetan Mark Tsa -phru
+ (0x00f71, 0x00f7e,), # Tibetan Vowel Sign Aa ..Tibetan Sign Rjes Su Nga
+ (0x00f80, 0x00f84,), # Tibetan Vowel Sign Rever..Tibetan Mark Halanta
+ (0x00f86, 0x00f87,), # Tibetan Sign Lci Rtags ..Tibetan Sign Yang Rtags
+ (0x00f90, 0x00f97,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
+ (0x00f99, 0x00fbc,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
+ (0x00fc6, 0x00fc6,), # Tibetan Symbol Padma Gda..Tibetan Symbol Padma Gda
+ (0x0102d, 0x01030,), # Myanmar Vowel Sign I ..Myanmar Vowel Sign Uu
+ (0x01032, 0x01037,), # Myanmar Vowel Sign Ai ..Myanmar Sign Dot Below
+ (0x01039, 0x0103a,), # Myanmar Sign Virama ..Myanmar Sign Asat
+ (0x0103d, 0x0103e,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01058, 0x01059,), # Myanmar Vowel Sign Vocal..Myanmar Vowel Sign Vocal
+ (0x0105e, 0x01060,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01071, 0x01074,), # Myanmar Vowel Sign Geba ..Myanmar Vowel Sign Kayah
+ (0x01082, 0x01082,), # Myanmar Consonant Sign S..Myanmar Consonant Sign S
+ (0x01085, 0x01086,), # Myanmar Vowel Sign Shan ..Myanmar Vowel Sign Shan
+ (0x0108d, 0x0108d,), # Myanmar Sign Shan Counci..Myanmar Sign Shan Counci
+ (0x0109d, 0x0109d,), # Myanmar Vowel Sign Aiton..Myanmar Vowel Sign Aiton
+ (0x0135f, 0x0135f,), # Ethiopic Combining Gemin..Ethiopic Combining Gemin
+ (0x01712, 0x01714,), # Tagalog Vowel Sign I ..Tagalog Sign Virama
+ (0x01732, 0x01734,), # Hanunoo Vowel Sign I ..Hanunoo Sign Pamudpod
+ (0x01752, 0x01753,), # Buhid Vowel Sign I ..Buhid Vowel Sign U
+ (0x01772, 0x01773,), # Tagbanwa Vowel Sign I ..Tagbanwa Vowel Sign U
+ (0x017b7, 0x017bd,), # Khmer Vowel Sign I ..Khmer Vowel Sign Ua
+ (0x017c6, 0x017c6,), # Khmer Sign Nikahit ..Khmer Sign Nikahit
+ (0x017c9, 0x017d3,), # Khmer Sign Muusikatoan ..Khmer Sign Bathamasat
+ (0x017dd, 0x017dd,), # Khmer Sign Atthacan ..Khmer Sign Atthacan
+ (0x0180b, 0x0180d,), # Mongolian Free Variation..Mongolian Free Variation
+ (0x018a9, 0x018a9,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x01920, 0x01922,), # Limbu Vowel Sign A ..Limbu Vowel Sign U
+ (0x01927, 0x01928,), # Limbu Vowel Sign E ..Limbu Vowel Sign O
+ (0x01932, 0x01932,), # Limbu Small Letter Anusv..Limbu Small Letter Anusv
+ (0x01939, 0x0193b,), # Limbu Sign Mukphreng ..Limbu Sign Sa-i
+ (0x01a17, 0x01a18,), # Buginese Vowel Sign I ..Buginese Vowel Sign U
+ (0x01a56, 0x01a56,), # Tai Tham Consonant Sign ..Tai Tham Consonant Sign
+ (0x01a58, 0x01a5e,), # Tai Tham Sign Mai Kang L..Tai Tham Consonant Sign
+ (0x01a60, 0x01a60,), # Tai Tham Sign Sakot ..Tai Tham Sign Sakot
+ (0x01a62, 0x01a62,), # Tai Tham Vowel Sign Mai ..Tai Tham Vowel Sign Mai
+ (0x01a65, 0x01a6c,), # Tai Tham Vowel Sign I ..Tai Tham Vowel Sign Oa B
+ (0x01a73, 0x01a7c,), # Tai Tham Vowel Sign Oa A..Tai Tham Sign Khuen-lue
+ (0x01a7f, 0x01a7f,), # Tai Tham Combining Crypt..Tai Tham Combining Crypt
+ (0x01b00, 0x01b03,), # Balinese Sign Ulu Ricem ..Balinese Sign Surang
+ (0x01b34, 0x01b34,), # Balinese Sign Rerekan ..Balinese Sign Rerekan
+ (0x01b36, 0x01b3a,), # Balinese Vowel Sign Ulu ..Balinese Vowel Sign Ra R
+ (0x01b3c, 0x01b3c,), # Balinese Vowel Sign La L..Balinese Vowel Sign La L
+ (0x01b42, 0x01b42,), # Balinese Vowel Sign Pepe..Balinese Vowel Sign Pepe
+ (0x01b6b, 0x01b73,), # Balinese Musical Symbol ..Balinese Musical Symbol
+ (0x01b80, 0x01b81,), # Sundanese Sign Panyecek ..Sundanese Sign Panglayar
+ (0x01ba2, 0x01ba5,), # Sundanese Consonant Sign..Sundanese Vowel Sign Pan
+ (0x01ba8, 0x01ba9,), # Sundanese Vowel Sign Pam..Sundanese Vowel Sign Pan
+ (0x01c2c, 0x01c33,), # Lepcha Vowel Sign E ..Lepcha Consonant Sign T
+ (0x01c36, 0x01c37,), # Lepcha Sign Ran ..Lepcha Sign Nukta
+ (0x01cd0, 0x01cd2,), # Vedic Tone Karshana ..Vedic Tone Prenkha
+ (0x01cd4, 0x01ce0,), # Vedic Sign Yajurvedic Mi..Vedic Tone Rigvedic Kash
+ (0x01ce2, 0x01ce8,), # Vedic Sign Visarga Svari..Vedic Sign Visarga Anuda
+ (0x01ced, 0x01ced,), # Vedic Sign Tiryak ..Vedic Sign Tiryak
+ (0x01dc0, 0x01de6,), # Combining Dotted Grave A..Combining Latin Small Le
+ (0x01dfd, 0x01dff,), # Combining Almost Equal T..Combining Right Arrowhea
+ (0x020d0, 0x020f0,), # Combining Left Harpoon A..Combining Asterisk Above
+ (0x02cef, 0x02cf1,), # Coptic Combining Ni Abov..Coptic Combining Spiritu
+ (0x02de0, 0x02dff,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0302a, 0x0302f,), # Ideographic Level Tone M..Hangul Double Dot Tone M
+ (0x03099, 0x0309a,), # Combining Katakana-hirag..Combining Katakana-hirag
+ (0x0a66f, 0x0a672,), # Combining Cyrillic Vzmet..Combining Cyrillic Thous
+ (0x0a67c, 0x0a67d,), # Combining Cyrillic Kavyk..Combining Cyrillic Payer
+ (0x0a6f0, 0x0a6f1,), # Bamum Combining Mark Koq..Bamum Combining Mark Tuk
+ (0x0a802, 0x0a802,), # Syloti Nagri Sign Dvisva..Syloti Nagri Sign Dvisva
+ (0x0a806, 0x0a806,), # Syloti Nagri Sign Hasant..Syloti Nagri Sign Hasant
+ (0x0a80b, 0x0a80b,), # Syloti Nagri Sign Anusva..Syloti Nagri Sign Anusva
+ (0x0a825, 0x0a826,), # Syloti Nagri Vowel Sign ..Syloti Nagri Vowel Sign
+ (0x0a8c4, 0x0a8c4,), # Saurashtra Sign Virama ..Saurashtra Sign Virama
+ (0x0a8e0, 0x0a8f1,), # Combining Devanagari Dig..Combining Devanagari Sig
+ (0x0a926, 0x0a92d,), # Kayah Li Vowel Ue ..Kayah Li Tone Calya Plop
+ (0x0a947, 0x0a951,), # Rejang Vowel Sign I ..Rejang Consonant Sign R
+ (0x0a980, 0x0a982,), # Javanese Sign Panyangga ..Javanese Sign Layar
+ (0x0a9b3, 0x0a9b3,), # Javanese Sign Cecak Telu..Javanese Sign Cecak Telu
+ (0x0a9b6, 0x0a9b9,), # Javanese Vowel Sign Wulu..Javanese Vowel Sign Suku
+ (0x0a9bc, 0x0a9bc,), # Javanese Vowel Sign Pepe..Javanese Vowel Sign Pepe
+ (0x0aa29, 0x0aa2e,), # Cham Vowel Sign Aa ..Cham Vowel Sign Oe
+ (0x0aa31, 0x0aa32,), # Cham Vowel Sign Au ..Cham Vowel Sign Ue
+ (0x0aa35, 0x0aa36,), # Cham Consonant Sign La ..Cham Consonant Sign Wa
+ (0x0aa43, 0x0aa43,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa4c, 0x0aa4c,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aab0, 0x0aab0,), # Tai Viet Mai Kang ..Tai Viet Mai Kang
+ (0x0aab2, 0x0aab4,), # Tai Viet Vowel I ..Tai Viet Vowel U
+ (0x0aab7, 0x0aab8,), # Tai Viet Mai Khit ..Tai Viet Vowel Ia
+ (0x0aabe, 0x0aabf,), # Tai Viet Vowel Am ..Tai Viet Tone Mai Ek
+ (0x0aac1, 0x0aac1,), # Tai Viet Tone Mai Tho ..Tai Viet Tone Mai Tho
+ (0x0abe5, 0x0abe5,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abe8, 0x0abe8,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abed, 0x0abed,), # Meetei Mayek Apun Iyek ..Meetei Mayek Apun Iyek
+ (0x0fb1e, 0x0fb1e,), # Hebrew Point Judeo-spani..Hebrew Point Judeo-spani
+ (0x0fe00, 0x0fe0f,), # Variation Selector-1 ..Variation Selector-16
+ (0x0fe20, 0x0fe26,), # Combining Ligature Left ..Combining Conjoining Mac
+ (0x101fd, 0x101fd,), # Phaistos Disc Sign Combi..Phaistos Disc Sign Combi
+ (0x10a01, 0x10a03,), # Kharoshthi Vowel Sign I ..Kharoshthi Vowel Sign Vo
+ (0x10a05, 0x10a06,), # Kharoshthi Vowel Sign E ..Kharoshthi Vowel Sign O
+ (0x10a0c, 0x10a0f,), # Kharoshthi Vowel Length ..Kharoshthi Sign Visarga
+ (0x10a38, 0x10a3a,), # Kharoshthi Sign Bar Abov..Kharoshthi Sign Dot Belo
+ (0x10a3f, 0x10a3f,), # Kharoshthi Virama ..Kharoshthi Virama
+ (0x11080, 0x11081,), # Kaithi Sign Candrabindu ..Kaithi Sign Anusvara
+ (0x110b3, 0x110b6,), # Kaithi Vowel Sign U ..Kaithi Vowel Sign Ai
+ (0x110b9, 0x110ba,), # Kaithi Sign Virama ..Kaithi Sign Nukta
+ (0x1d167, 0x1d169,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d17b, 0x1d182,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d185, 0x1d18b,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d1aa, 0x1d1ad,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d242, 0x1d244,), # Combining Greek Musical ..Combining Greek Musical
+ (0xe0100, 0xe01ef,), # Variation Selector-17 ..Variation Selector-256
+ ),
+ '6.0.0': (
+ # Source: DerivedGeneralCategory-6.0.0.txt
+ # Date: 2010-08-19, 00:48:09 GMT [MD]
+ #
+ (0x00300, 0x0036f,), # Combining Grave Accent ..Combining Latin Small Le
+ (0x00483, 0x00489,), # Combining Cyrillic Titlo..Combining Cyrillic Milli
+ (0x00591, 0x005bd,), # Hebrew Accent Etnahta ..Hebrew Point Meteg
+ (0x005bf, 0x005bf,), # Hebrew Point Rafe ..Hebrew Point Rafe
+ (0x005c1, 0x005c2,), # Hebrew Point Shin Dot ..Hebrew Point Sin Dot
+ (0x005c4, 0x005c5,), # Hebrew Mark Upper Dot ..Hebrew Mark Lower Dot
+ (0x005c7, 0x005c7,), # Hebrew Point Qamats Qata..Hebrew Point Qamats Qata
+ (0x00610, 0x0061a,), # Arabic Sign Sallallahou ..Arabic Small Kasra
+ (0x0064b, 0x0065f,), # Arabic Fathatan ..Arabic Wavy Hamza Below
+ (0x00670, 0x00670,), # Arabic Letter Superscrip..Arabic Letter Superscrip
+ (0x006d6, 0x006dc,), # Arabic Small High Ligatu..Arabic Small High Seen
+ (0x006df, 0x006e4,), # Arabic Small High Rounde..Arabic Small High Madda
+ (0x006e7, 0x006e8,), # Arabic Small High Yeh ..Arabic Small High Noon
+ (0x006ea, 0x006ed,), # Arabic Empty Centre Low ..Arabic Small Low Meem
+ (0x00711, 0x00711,), # Syriac Letter Superscrip..Syriac Letter Superscrip
+ (0x00730, 0x0074a,), # Syriac Pthaha Above ..Syriac Barrekh
+ (0x007a6, 0x007b0,), # Thaana Abafili ..Thaana Sukun
+ (0x007eb, 0x007f3,), # Nko Combining Short High..Nko Combining Double Dot
+ (0x00816, 0x00819,), # Samaritan Mark In ..Samaritan Mark Dagesh
+ (0x0081b, 0x00823,), # Samaritan Mark Epentheti..Samaritan Vowel Sign A
+ (0x00825, 0x00827,), # Samaritan Vowel Sign Sho..Samaritan Vowel Sign U
+ (0x00829, 0x0082d,), # Samaritan Vowel Sign Lon..Samaritan Mark Nequdaa
+ (0x00859, 0x0085b,), # Mandaic Affrication Mark..Mandaic Gemination Mark
+ (0x00900, 0x00902,), # Devanagari Sign Inverted..Devanagari Sign Anusvara
+ (0x0093a, 0x0093a,), # Devanagari Vowel Sign Oe..Devanagari Vowel Sign Oe
+ (0x0093c, 0x0093c,), # Devanagari Sign Nukta ..Devanagari Sign Nukta
+ (0x00941, 0x00948,), # Devanagari Vowel Sign U ..Devanagari Vowel Sign Ai
+ (0x0094d, 0x0094d,), # Devanagari Sign Virama ..Devanagari Sign Virama
+ (0x00951, 0x00957,), # Devanagari Stress Sign U..Devanagari Vowel Sign Uu
+ (0x00962, 0x00963,), # Devanagari Vowel Sign Vo..Devanagari Vowel Sign Vo
+ (0x00981, 0x00981,), # Bengali Sign Candrabindu..Bengali Sign Candrabindu
+ (0x009bc, 0x009bc,), # Bengali Sign Nukta ..Bengali Sign Nukta
+ (0x009c1, 0x009c4,), # Bengali Vowel Sign U ..Bengali Vowel Sign Vocal
+ (0x009cd, 0x009cd,), # Bengali Sign Virama ..Bengali Sign Virama
+ (0x009e2, 0x009e3,), # Bengali Vowel Sign Vocal..Bengali Vowel Sign Vocal
+ (0x00a01, 0x00a02,), # Gurmukhi Sign Adak Bindi..Gurmukhi Sign Bindi
+ (0x00a3c, 0x00a3c,), # Gurmukhi Sign Nukta ..Gurmukhi Sign Nukta
+ (0x00a41, 0x00a42,), # Gurmukhi Vowel Sign U ..Gurmukhi Vowel Sign Uu
+ (0x00a47, 0x00a48,), # Gurmukhi Vowel Sign Ee ..Gurmukhi Vowel Sign Ai
+ (0x00a4b, 0x00a4d,), # Gurmukhi Vowel Sign Oo ..Gurmukhi Sign Virama
+ (0x00a51, 0x00a51,), # Gurmukhi Sign Udaat ..Gurmukhi Sign Udaat
+ (0x00a70, 0x00a71,), # Gurmukhi Tippi ..Gurmukhi Addak
+ (0x00a75, 0x00a75,), # Gurmukhi Sign Yakash ..Gurmukhi Sign Yakash
+ (0x00a81, 0x00a82,), # Gujarati Sign Candrabind..Gujarati Sign Anusvara
+ (0x00abc, 0x00abc,), # Gujarati Sign Nukta ..Gujarati Sign Nukta
+ (0x00ac1, 0x00ac5,), # Gujarati Vowel Sign U ..Gujarati Vowel Sign Cand
+ (0x00ac7, 0x00ac8,), # Gujarati Vowel Sign E ..Gujarati Vowel Sign Ai
+ (0x00acd, 0x00acd,), # Gujarati Sign Virama ..Gujarati Sign Virama
+ (0x00ae2, 0x00ae3,), # Gujarati Vowel Sign Voca..Gujarati Vowel Sign Voca
+ (0x00b01, 0x00b01,), # Oriya Sign Candrabindu ..Oriya Sign Candrabindu
+ (0x00b3c, 0x00b3c,), # Oriya Sign Nukta ..Oriya Sign Nukta
+ (0x00b3f, 0x00b3f,), # Oriya Vowel Sign I ..Oriya Vowel Sign I
+ (0x00b41, 0x00b44,), # Oriya Vowel Sign U ..Oriya Vowel Sign Vocalic
+ (0x00b4d, 0x00b4d,), # Oriya Sign Virama ..Oriya Sign Virama
+ (0x00b56, 0x00b56,), # Oriya Ai Length Mark ..Oriya Ai Length Mark
+ (0x00b62, 0x00b63,), # Oriya Vowel Sign Vocalic..Oriya Vowel Sign Vocalic
+ (0x00b82, 0x00b82,), # Tamil Sign Anusvara ..Tamil Sign Anusvara
+ (0x00bc0, 0x00bc0,), # Tamil Vowel Sign Ii ..Tamil Vowel Sign Ii
+ (0x00bcd, 0x00bcd,), # Tamil Sign Virama ..Tamil Sign Virama
+ (0x00c3e, 0x00c40,), # Telugu Vowel Sign Aa ..Telugu Vowel Sign Ii
+ (0x00c46, 0x00c48,), # Telugu Vowel Sign E ..Telugu Vowel Sign Ai
+ (0x00c4a, 0x00c4d,), # Telugu Vowel Sign O ..Telugu Sign Virama
+ (0x00c55, 0x00c56,), # Telugu Length Mark ..Telugu Ai Length Mark
+ (0x00c62, 0x00c63,), # Telugu Vowel Sign Vocali..Telugu Vowel Sign Vocali
+ (0x00cbc, 0x00cbc,), # Kannada Sign Nukta ..Kannada Sign Nukta
+ (0x00cbf, 0x00cbf,), # Kannada Vowel Sign I ..Kannada Vowel Sign I
+ (0x00cc6, 0x00cc6,), # Kannada Vowel Sign E ..Kannada Vowel Sign E
+ (0x00ccc, 0x00ccd,), # Kannada Vowel Sign Au ..Kannada Sign Virama
+ (0x00ce2, 0x00ce3,), # Kannada Vowel Sign Vocal..Kannada Vowel Sign Vocal
+ (0x00d41, 0x00d44,), # Malayalam Vowel Sign U ..Malayalam Vowel Sign Voc
+ (0x00d4d, 0x00d4d,), # Malayalam Sign Virama ..Malayalam Sign Virama
+ (0x00d62, 0x00d63,), # Malayalam Vowel Sign Voc..Malayalam Vowel Sign Voc
+ (0x00dca, 0x00dca,), # Sinhala Sign Al-lakuna ..Sinhala Sign Al-lakuna
+ (0x00dd2, 0x00dd4,), # Sinhala Vowel Sign Ketti..Sinhala Vowel Sign Ketti
+ (0x00dd6, 0x00dd6,), # Sinhala Vowel Sign Diga ..Sinhala Vowel Sign Diga
+ (0x00e31, 0x00e31,), # Thai Character Mai Han-a..Thai Character Mai Han-a
+ (0x00e34, 0x00e3a,), # Thai Character Sara I ..Thai Character Phinthu
+ (0x00e47, 0x00e4e,), # Thai Character Maitaikhu..Thai Character Yamakkan
+ (0x00eb1, 0x00eb1,), # Lao Vowel Sign Mai Kan ..Lao Vowel Sign Mai Kan
+ (0x00eb4, 0x00eb9,), # Lao Vowel Sign I ..Lao Vowel Sign Uu
+ (0x00ebb, 0x00ebc,), # Lao Vowel Sign Mai Kon ..Lao Semivowel Sign Lo
+ (0x00ec8, 0x00ecd,), # Lao Tone Mai Ek ..Lao Niggahita
+ (0x00f18, 0x00f19,), # Tibetan Astrological Sig..Tibetan Astrological Sig
+ (0x00f35, 0x00f35,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f37, 0x00f37,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f39, 0x00f39,), # Tibetan Mark Tsa -phru ..Tibetan Mark Tsa -phru
+ (0x00f71, 0x00f7e,), # Tibetan Vowel Sign Aa ..Tibetan Sign Rjes Su Nga
+ (0x00f80, 0x00f84,), # Tibetan Vowel Sign Rever..Tibetan Mark Halanta
+ (0x00f86, 0x00f87,), # Tibetan Sign Lci Rtags ..Tibetan Sign Yang Rtags
+ (0x00f8d, 0x00f97,), # Tibetan Subjoined Sign L..Tibetan Subjoined Letter
+ (0x00f99, 0x00fbc,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
+ (0x00fc6, 0x00fc6,), # Tibetan Symbol Padma Gda..Tibetan Symbol Padma Gda
+ (0x0102d, 0x01030,), # Myanmar Vowel Sign I ..Myanmar Vowel Sign Uu
+ (0x01032, 0x01037,), # Myanmar Vowel Sign Ai ..Myanmar Sign Dot Below
+ (0x01039, 0x0103a,), # Myanmar Sign Virama ..Myanmar Sign Asat
+ (0x0103d, 0x0103e,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01058, 0x01059,), # Myanmar Vowel Sign Vocal..Myanmar Vowel Sign Vocal
+ (0x0105e, 0x01060,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01071, 0x01074,), # Myanmar Vowel Sign Geba ..Myanmar Vowel Sign Kayah
+ (0x01082, 0x01082,), # Myanmar Consonant Sign S..Myanmar Consonant Sign S
+ (0x01085, 0x01086,), # Myanmar Vowel Sign Shan ..Myanmar Vowel Sign Shan
+ (0x0108d, 0x0108d,), # Myanmar Sign Shan Counci..Myanmar Sign Shan Counci
+ (0x0109d, 0x0109d,), # Myanmar Vowel Sign Aiton..Myanmar Vowel Sign Aiton
+ (0x0135d, 0x0135f,), # Ethiopic Combining Gemin..Ethiopic Combining Gemin
+ (0x01712, 0x01714,), # Tagalog Vowel Sign I ..Tagalog Sign Virama
+ (0x01732, 0x01734,), # Hanunoo Vowel Sign I ..Hanunoo Sign Pamudpod
+ (0x01752, 0x01753,), # Buhid Vowel Sign I ..Buhid Vowel Sign U
+ (0x01772, 0x01773,), # Tagbanwa Vowel Sign I ..Tagbanwa Vowel Sign U
+ (0x017b7, 0x017bd,), # Khmer Vowel Sign I ..Khmer Vowel Sign Ua
+ (0x017c6, 0x017c6,), # Khmer Sign Nikahit ..Khmer Sign Nikahit
+ (0x017c9, 0x017d3,), # Khmer Sign Muusikatoan ..Khmer Sign Bathamasat
+ (0x017dd, 0x017dd,), # Khmer Sign Atthacan ..Khmer Sign Atthacan
+ (0x0180b, 0x0180d,), # Mongolian Free Variation..Mongolian Free Variation
+ (0x018a9, 0x018a9,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x01920, 0x01922,), # Limbu Vowel Sign A ..Limbu Vowel Sign U
+ (0x01927, 0x01928,), # Limbu Vowel Sign E ..Limbu Vowel Sign O
+ (0x01932, 0x01932,), # Limbu Small Letter Anusv..Limbu Small Letter Anusv
+ (0x01939, 0x0193b,), # Limbu Sign Mukphreng ..Limbu Sign Sa-i
+ (0x01a17, 0x01a18,), # Buginese Vowel Sign I ..Buginese Vowel Sign U
+ (0x01a56, 0x01a56,), # Tai Tham Consonant Sign ..Tai Tham Consonant Sign
+ (0x01a58, 0x01a5e,), # Tai Tham Sign Mai Kang L..Tai Tham Consonant Sign
+ (0x01a60, 0x01a60,), # Tai Tham Sign Sakot ..Tai Tham Sign Sakot
+ (0x01a62, 0x01a62,), # Tai Tham Vowel Sign Mai ..Tai Tham Vowel Sign Mai
+ (0x01a65, 0x01a6c,), # Tai Tham Vowel Sign I ..Tai Tham Vowel Sign Oa B
+ (0x01a73, 0x01a7c,), # Tai Tham Vowel Sign Oa A..Tai Tham Sign Khuen-lue
+ (0x01a7f, 0x01a7f,), # Tai Tham Combining Crypt..Tai Tham Combining Crypt
+ (0x01b00, 0x01b03,), # Balinese Sign Ulu Ricem ..Balinese Sign Surang
+ (0x01b34, 0x01b34,), # Balinese Sign Rerekan ..Balinese Sign Rerekan
+ (0x01b36, 0x01b3a,), # Balinese Vowel Sign Ulu ..Balinese Vowel Sign Ra R
+ (0x01b3c, 0x01b3c,), # Balinese Vowel Sign La L..Balinese Vowel Sign La L
+ (0x01b42, 0x01b42,), # Balinese Vowel Sign Pepe..Balinese Vowel Sign Pepe
+ (0x01b6b, 0x01b73,), # Balinese Musical Symbol ..Balinese Musical Symbol
+ (0x01b80, 0x01b81,), # Sundanese Sign Panyecek ..Sundanese Sign Panglayar
+ (0x01ba2, 0x01ba5,), # Sundanese Consonant Sign..Sundanese Vowel Sign Pan
+ (0x01ba8, 0x01ba9,), # Sundanese Vowel Sign Pam..Sundanese Vowel Sign Pan
+ (0x01be6, 0x01be6,), # Batak Sign Tompi ..Batak Sign Tompi
+ (0x01be8, 0x01be9,), # Batak Vowel Sign Pakpak ..Batak Vowel Sign Ee
+ (0x01bed, 0x01bed,), # Batak Vowel Sign Karo O ..Batak Vowel Sign Karo O
+ (0x01bef, 0x01bf1,), # Batak Vowel Sign U For S..Batak Consonant Sign H
+ (0x01c2c, 0x01c33,), # Lepcha Vowel Sign E ..Lepcha Consonant Sign T
+ (0x01c36, 0x01c37,), # Lepcha Sign Ran ..Lepcha Sign Nukta
+ (0x01cd0, 0x01cd2,), # Vedic Tone Karshana ..Vedic Tone Prenkha
+ (0x01cd4, 0x01ce0,), # Vedic Sign Yajurvedic Mi..Vedic Tone Rigvedic Kash
+ (0x01ce2, 0x01ce8,), # Vedic Sign Visarga Svari..Vedic Sign Visarga Anuda
+ (0x01ced, 0x01ced,), # Vedic Sign Tiryak ..Vedic Sign Tiryak
+ (0x01dc0, 0x01de6,), # Combining Dotted Grave A..Combining Latin Small Le
+ (0x01dfc, 0x01dff,), # Combining Double Inverte..Combining Right Arrowhea
+ (0x020d0, 0x020f0,), # Combining Left Harpoon A..Combining Asterisk Above
+ (0x02cef, 0x02cf1,), # Coptic Combining Ni Abov..Coptic Combining Spiritu
+ (0x02d7f, 0x02d7f,), # Tifinagh Consonant Joine..Tifinagh Consonant Joine
+ (0x02de0, 0x02dff,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0302a, 0x0302f,), # Ideographic Level Tone M..Hangul Double Dot Tone M
+ (0x03099, 0x0309a,), # Combining Katakana-hirag..Combining Katakana-hirag
+ (0x0a66f, 0x0a672,), # Combining Cyrillic Vzmet..Combining Cyrillic Thous
+ (0x0a67c, 0x0a67d,), # Combining Cyrillic Kavyk..Combining Cyrillic Payer
+ (0x0a6f0, 0x0a6f1,), # Bamum Combining Mark Koq..Bamum Combining Mark Tuk
+ (0x0a802, 0x0a802,), # Syloti Nagri Sign Dvisva..Syloti Nagri Sign Dvisva
+ (0x0a806, 0x0a806,), # Syloti Nagri Sign Hasant..Syloti Nagri Sign Hasant
+ (0x0a80b, 0x0a80b,), # Syloti Nagri Sign Anusva..Syloti Nagri Sign Anusva
+ (0x0a825, 0x0a826,), # Syloti Nagri Vowel Sign ..Syloti Nagri Vowel Sign
+ (0x0a8c4, 0x0a8c4,), # Saurashtra Sign Virama ..Saurashtra Sign Virama
+ (0x0a8e0, 0x0a8f1,), # Combining Devanagari Dig..Combining Devanagari Sig
+ (0x0a926, 0x0a92d,), # Kayah Li Vowel Ue ..Kayah Li Tone Calya Plop
+ (0x0a947, 0x0a951,), # Rejang Vowel Sign I ..Rejang Consonant Sign R
+ (0x0a980, 0x0a982,), # Javanese Sign Panyangga ..Javanese Sign Layar
+ (0x0a9b3, 0x0a9b3,), # Javanese Sign Cecak Telu..Javanese Sign Cecak Telu
+ (0x0a9b6, 0x0a9b9,), # Javanese Vowel Sign Wulu..Javanese Vowel Sign Suku
+ (0x0a9bc, 0x0a9bc,), # Javanese Vowel Sign Pepe..Javanese Vowel Sign Pepe
+ (0x0aa29, 0x0aa2e,), # Cham Vowel Sign Aa ..Cham Vowel Sign Oe
+ (0x0aa31, 0x0aa32,), # Cham Vowel Sign Au ..Cham Vowel Sign Ue
+ (0x0aa35, 0x0aa36,), # Cham Consonant Sign La ..Cham Consonant Sign Wa
+ (0x0aa43, 0x0aa43,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa4c, 0x0aa4c,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aab0, 0x0aab0,), # Tai Viet Mai Kang ..Tai Viet Mai Kang
+ (0x0aab2, 0x0aab4,), # Tai Viet Vowel I ..Tai Viet Vowel U
+ (0x0aab7, 0x0aab8,), # Tai Viet Mai Khit ..Tai Viet Vowel Ia
+ (0x0aabe, 0x0aabf,), # Tai Viet Vowel Am ..Tai Viet Tone Mai Ek
+ (0x0aac1, 0x0aac1,), # Tai Viet Tone Mai Tho ..Tai Viet Tone Mai Tho
+ (0x0abe5, 0x0abe5,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abe8, 0x0abe8,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abed, 0x0abed,), # Meetei Mayek Apun Iyek ..Meetei Mayek Apun Iyek
+ (0x0fb1e, 0x0fb1e,), # Hebrew Point Judeo-spani..Hebrew Point Judeo-spani
+ (0x0fe00, 0x0fe0f,), # Variation Selector-1 ..Variation Selector-16
+ (0x0fe20, 0x0fe26,), # Combining Ligature Left ..Combining Conjoining Mac
+ (0x101fd, 0x101fd,), # Phaistos Disc Sign Combi..Phaistos Disc Sign Combi
+ (0x10a01, 0x10a03,), # Kharoshthi Vowel Sign I ..Kharoshthi Vowel Sign Vo
+ (0x10a05, 0x10a06,), # Kharoshthi Vowel Sign E ..Kharoshthi Vowel Sign O
+ (0x10a0c, 0x10a0f,), # Kharoshthi Vowel Length ..Kharoshthi Sign Visarga
+ (0x10a38, 0x10a3a,), # Kharoshthi Sign Bar Abov..Kharoshthi Sign Dot Belo
+ (0x10a3f, 0x10a3f,), # Kharoshthi Virama ..Kharoshthi Virama
+ (0x11001, 0x11001,), # Brahmi Sign Anusvara ..Brahmi Sign Anusvara
+ (0x11038, 0x11046,), # Brahmi Vowel Sign Aa ..Brahmi Virama
+ (0x11080, 0x11081,), # Kaithi Sign Candrabindu ..Kaithi Sign Anusvara
+ (0x110b3, 0x110b6,), # Kaithi Vowel Sign U ..Kaithi Vowel Sign Ai
+ (0x110b9, 0x110ba,), # Kaithi Sign Virama ..Kaithi Sign Nukta
+ (0x1d167, 0x1d169,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d17b, 0x1d182,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d185, 0x1d18b,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d1aa, 0x1d1ad,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d242, 0x1d244,), # Combining Greek Musical ..Combining Greek Musical
+ (0xe0100, 0xe01ef,), # Variation Selector-17 ..Variation Selector-256
+ ),
+ '6.1.0': (
+ # Source: DerivedGeneralCategory-6.1.0.txt
+ # Date: 2011-11-27, 05:10:22 GMT [MD]
+ #
+ (0x00300, 0x0036f,), # Combining Grave Accent ..Combining Latin Small Le
+ (0x00483, 0x00489,), # Combining Cyrillic Titlo..Combining Cyrillic Milli
+ (0x00591, 0x005bd,), # Hebrew Accent Etnahta ..Hebrew Point Meteg
+ (0x005bf, 0x005bf,), # Hebrew Point Rafe ..Hebrew Point Rafe
+ (0x005c1, 0x005c2,), # Hebrew Point Shin Dot ..Hebrew Point Sin Dot
+ (0x005c4, 0x005c5,), # Hebrew Mark Upper Dot ..Hebrew Mark Lower Dot
+ (0x005c7, 0x005c7,), # Hebrew Point Qamats Qata..Hebrew Point Qamats Qata
+ (0x00610, 0x0061a,), # Arabic Sign Sallallahou ..Arabic Small Kasra
+ (0x0064b, 0x0065f,), # Arabic Fathatan ..Arabic Wavy Hamza Below
+ (0x00670, 0x00670,), # Arabic Letter Superscrip..Arabic Letter Superscrip
+ (0x006d6, 0x006dc,), # Arabic Small High Ligatu..Arabic Small High Seen
+ (0x006df, 0x006e4,), # Arabic Small High Rounde..Arabic Small High Madda
+ (0x006e7, 0x006e8,), # Arabic Small High Yeh ..Arabic Small High Noon
+ (0x006ea, 0x006ed,), # Arabic Empty Centre Low ..Arabic Small Low Meem
+ (0x00711, 0x00711,), # Syriac Letter Superscrip..Syriac Letter Superscrip
+ (0x00730, 0x0074a,), # Syriac Pthaha Above ..Syriac Barrekh
+ (0x007a6, 0x007b0,), # Thaana Abafili ..Thaana Sukun
+ (0x007eb, 0x007f3,), # Nko Combining Short High..Nko Combining Double Dot
+ (0x00816, 0x00819,), # Samaritan Mark In ..Samaritan Mark Dagesh
+ (0x0081b, 0x00823,), # Samaritan Mark Epentheti..Samaritan Vowel Sign A
+ (0x00825, 0x00827,), # Samaritan Vowel Sign Sho..Samaritan Vowel Sign U
+ (0x00829, 0x0082d,), # Samaritan Vowel Sign Lon..Samaritan Mark Nequdaa
+ (0x00859, 0x0085b,), # Mandaic Affrication Mark..Mandaic Gemination Mark
+ (0x008e4, 0x008fe,), # Arabic Curly Fatha ..Arabic Damma With Dot
+ (0x00900, 0x00902,), # Devanagari Sign Inverted..Devanagari Sign Anusvara
+ (0x0093a, 0x0093a,), # Devanagari Vowel Sign Oe..Devanagari Vowel Sign Oe
+ (0x0093c, 0x0093c,), # Devanagari Sign Nukta ..Devanagari Sign Nukta
+ (0x00941, 0x00948,), # Devanagari Vowel Sign U ..Devanagari Vowel Sign Ai
+ (0x0094d, 0x0094d,), # Devanagari Sign Virama ..Devanagari Sign Virama
+ (0x00951, 0x00957,), # Devanagari Stress Sign U..Devanagari Vowel Sign Uu
+ (0x00962, 0x00963,), # Devanagari Vowel Sign Vo..Devanagari Vowel Sign Vo
+ (0x00981, 0x00981,), # Bengali Sign Candrabindu..Bengali Sign Candrabindu
+ (0x009bc, 0x009bc,), # Bengali Sign Nukta ..Bengali Sign Nukta
+ (0x009c1, 0x009c4,), # Bengali Vowel Sign U ..Bengali Vowel Sign Vocal
+ (0x009cd, 0x009cd,), # Bengali Sign Virama ..Bengali Sign Virama
+ (0x009e2, 0x009e3,), # Bengali Vowel Sign Vocal..Bengali Vowel Sign Vocal
+ (0x00a01, 0x00a02,), # Gurmukhi Sign Adak Bindi..Gurmukhi Sign Bindi
+ (0x00a3c, 0x00a3c,), # Gurmukhi Sign Nukta ..Gurmukhi Sign Nukta
+ (0x00a41, 0x00a42,), # Gurmukhi Vowel Sign U ..Gurmukhi Vowel Sign Uu
+ (0x00a47, 0x00a48,), # Gurmukhi Vowel Sign Ee ..Gurmukhi Vowel Sign Ai
+ (0x00a4b, 0x00a4d,), # Gurmukhi Vowel Sign Oo ..Gurmukhi Sign Virama
+ (0x00a51, 0x00a51,), # Gurmukhi Sign Udaat ..Gurmukhi Sign Udaat
+ (0x00a70, 0x00a71,), # Gurmukhi Tippi ..Gurmukhi Addak
+ (0x00a75, 0x00a75,), # Gurmukhi Sign Yakash ..Gurmukhi Sign Yakash
+ (0x00a81, 0x00a82,), # Gujarati Sign Candrabind..Gujarati Sign Anusvara
+ (0x00abc, 0x00abc,), # Gujarati Sign Nukta ..Gujarati Sign Nukta
+ (0x00ac1, 0x00ac5,), # Gujarati Vowel Sign U ..Gujarati Vowel Sign Cand
+ (0x00ac7, 0x00ac8,), # Gujarati Vowel Sign E ..Gujarati Vowel Sign Ai
+ (0x00acd, 0x00acd,), # Gujarati Sign Virama ..Gujarati Sign Virama
+ (0x00ae2, 0x00ae3,), # Gujarati Vowel Sign Voca..Gujarati Vowel Sign Voca
+ (0x00b01, 0x00b01,), # Oriya Sign Candrabindu ..Oriya Sign Candrabindu
+ (0x00b3c, 0x00b3c,), # Oriya Sign Nukta ..Oriya Sign Nukta
+ (0x00b3f, 0x00b3f,), # Oriya Vowel Sign I ..Oriya Vowel Sign I
+ (0x00b41, 0x00b44,), # Oriya Vowel Sign U ..Oriya Vowel Sign Vocalic
+ (0x00b4d, 0x00b4d,), # Oriya Sign Virama ..Oriya Sign Virama
+ (0x00b56, 0x00b56,), # Oriya Ai Length Mark ..Oriya Ai Length Mark
+ (0x00b62, 0x00b63,), # Oriya Vowel Sign Vocalic..Oriya Vowel Sign Vocalic
+ (0x00b82, 0x00b82,), # Tamil Sign Anusvara ..Tamil Sign Anusvara
+ (0x00bc0, 0x00bc0,), # Tamil Vowel Sign Ii ..Tamil Vowel Sign Ii
+ (0x00bcd, 0x00bcd,), # Tamil Sign Virama ..Tamil Sign Virama
+ (0x00c3e, 0x00c40,), # Telugu Vowel Sign Aa ..Telugu Vowel Sign Ii
+ (0x00c46, 0x00c48,), # Telugu Vowel Sign E ..Telugu Vowel Sign Ai
+ (0x00c4a, 0x00c4d,), # Telugu Vowel Sign O ..Telugu Sign Virama
+ (0x00c55, 0x00c56,), # Telugu Length Mark ..Telugu Ai Length Mark
+ (0x00c62, 0x00c63,), # Telugu Vowel Sign Vocali..Telugu Vowel Sign Vocali
+ (0x00cbc, 0x00cbc,), # Kannada Sign Nukta ..Kannada Sign Nukta
+ (0x00cbf, 0x00cbf,), # Kannada Vowel Sign I ..Kannada Vowel Sign I
+ (0x00cc6, 0x00cc6,), # Kannada Vowel Sign E ..Kannada Vowel Sign E
+ (0x00ccc, 0x00ccd,), # Kannada Vowel Sign Au ..Kannada Sign Virama
+ (0x00ce2, 0x00ce3,), # Kannada Vowel Sign Vocal..Kannada Vowel Sign Vocal
+ (0x00d41, 0x00d44,), # Malayalam Vowel Sign U ..Malayalam Vowel Sign Voc
+ (0x00d4d, 0x00d4d,), # Malayalam Sign Virama ..Malayalam Sign Virama
+ (0x00d62, 0x00d63,), # Malayalam Vowel Sign Voc..Malayalam Vowel Sign Voc
+ (0x00dca, 0x00dca,), # Sinhala Sign Al-lakuna ..Sinhala Sign Al-lakuna
+ (0x00dd2, 0x00dd4,), # Sinhala Vowel Sign Ketti..Sinhala Vowel Sign Ketti
+ (0x00dd6, 0x00dd6,), # Sinhala Vowel Sign Diga ..Sinhala Vowel Sign Diga
+ (0x00e31, 0x00e31,), # Thai Character Mai Han-a..Thai Character Mai Han-a
+ (0x00e34, 0x00e3a,), # Thai Character Sara I ..Thai Character Phinthu
+ (0x00e47, 0x00e4e,), # Thai Character Maitaikhu..Thai Character Yamakkan
+ (0x00eb1, 0x00eb1,), # Lao Vowel Sign Mai Kan ..Lao Vowel Sign Mai Kan
+ (0x00eb4, 0x00eb9,), # Lao Vowel Sign I ..Lao Vowel Sign Uu
+ (0x00ebb, 0x00ebc,), # Lao Vowel Sign Mai Kon ..Lao Semivowel Sign Lo
+ (0x00ec8, 0x00ecd,), # Lao Tone Mai Ek ..Lao Niggahita
+ (0x00f18, 0x00f19,), # Tibetan Astrological Sig..Tibetan Astrological Sig
+ (0x00f35, 0x00f35,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f37, 0x00f37,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f39, 0x00f39,), # Tibetan Mark Tsa -phru ..Tibetan Mark Tsa -phru
+ (0x00f71, 0x00f7e,), # Tibetan Vowel Sign Aa ..Tibetan Sign Rjes Su Nga
+ (0x00f80, 0x00f84,), # Tibetan Vowel Sign Rever..Tibetan Mark Halanta
+ (0x00f86, 0x00f87,), # Tibetan Sign Lci Rtags ..Tibetan Sign Yang Rtags
+ (0x00f8d, 0x00f97,), # Tibetan Subjoined Sign L..Tibetan Subjoined Letter
+ (0x00f99, 0x00fbc,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
+ (0x00fc6, 0x00fc6,), # Tibetan Symbol Padma Gda..Tibetan Symbol Padma Gda
+ (0x0102d, 0x01030,), # Myanmar Vowel Sign I ..Myanmar Vowel Sign Uu
+ (0x01032, 0x01037,), # Myanmar Vowel Sign Ai ..Myanmar Sign Dot Below
+ (0x01039, 0x0103a,), # Myanmar Sign Virama ..Myanmar Sign Asat
+ (0x0103d, 0x0103e,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01058, 0x01059,), # Myanmar Vowel Sign Vocal..Myanmar Vowel Sign Vocal
+ (0x0105e, 0x01060,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01071, 0x01074,), # Myanmar Vowel Sign Geba ..Myanmar Vowel Sign Kayah
+ (0x01082, 0x01082,), # Myanmar Consonant Sign S..Myanmar Consonant Sign S
+ (0x01085, 0x01086,), # Myanmar Vowel Sign Shan ..Myanmar Vowel Sign Shan
+ (0x0108d, 0x0108d,), # Myanmar Sign Shan Counci..Myanmar Sign Shan Counci
+ (0x0109d, 0x0109d,), # Myanmar Vowel Sign Aiton..Myanmar Vowel Sign Aiton
+ (0x0135d, 0x0135f,), # Ethiopic Combining Gemin..Ethiopic Combining Gemin
+ (0x01712, 0x01714,), # Tagalog Vowel Sign I ..Tagalog Sign Virama
+ (0x01732, 0x01734,), # Hanunoo Vowel Sign I ..Hanunoo Sign Pamudpod
+ (0x01752, 0x01753,), # Buhid Vowel Sign I ..Buhid Vowel Sign U
+ (0x01772, 0x01773,), # Tagbanwa Vowel Sign I ..Tagbanwa Vowel Sign U
+ (0x017b4, 0x017b5,), # Khmer Vowel Inherent Aq ..Khmer Vowel Inherent Aa
+ (0x017b7, 0x017bd,), # Khmer Vowel Sign I ..Khmer Vowel Sign Ua
+ (0x017c6, 0x017c6,), # Khmer Sign Nikahit ..Khmer Sign Nikahit
+ (0x017c9, 0x017d3,), # Khmer Sign Muusikatoan ..Khmer Sign Bathamasat
+ (0x017dd, 0x017dd,), # Khmer Sign Atthacan ..Khmer Sign Atthacan
+ (0x0180b, 0x0180d,), # Mongolian Free Variation..Mongolian Free Variation
+ (0x018a9, 0x018a9,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x01920, 0x01922,), # Limbu Vowel Sign A ..Limbu Vowel Sign U
+ (0x01927, 0x01928,), # Limbu Vowel Sign E ..Limbu Vowel Sign O
+ (0x01932, 0x01932,), # Limbu Small Letter Anusv..Limbu Small Letter Anusv
+ (0x01939, 0x0193b,), # Limbu Sign Mukphreng ..Limbu Sign Sa-i
+ (0x01a17, 0x01a18,), # Buginese Vowel Sign I ..Buginese Vowel Sign U
+ (0x01a56, 0x01a56,), # Tai Tham Consonant Sign ..Tai Tham Consonant Sign
+ (0x01a58, 0x01a5e,), # Tai Tham Sign Mai Kang L..Tai Tham Consonant Sign
+ (0x01a60, 0x01a60,), # Tai Tham Sign Sakot ..Tai Tham Sign Sakot
+ (0x01a62, 0x01a62,), # Tai Tham Vowel Sign Mai ..Tai Tham Vowel Sign Mai
+ (0x01a65, 0x01a6c,), # Tai Tham Vowel Sign I ..Tai Tham Vowel Sign Oa B
+ (0x01a73, 0x01a7c,), # Tai Tham Vowel Sign Oa A..Tai Tham Sign Khuen-lue
+ (0x01a7f, 0x01a7f,), # Tai Tham Combining Crypt..Tai Tham Combining Crypt
+ (0x01b00, 0x01b03,), # Balinese Sign Ulu Ricem ..Balinese Sign Surang
+ (0x01b34, 0x01b34,), # Balinese Sign Rerekan ..Balinese Sign Rerekan
+ (0x01b36, 0x01b3a,), # Balinese Vowel Sign Ulu ..Balinese Vowel Sign Ra R
+ (0x01b3c, 0x01b3c,), # Balinese Vowel Sign La L..Balinese Vowel Sign La L
+ (0x01b42, 0x01b42,), # Balinese Vowel Sign Pepe..Balinese Vowel Sign Pepe
+ (0x01b6b, 0x01b73,), # Balinese Musical Symbol ..Balinese Musical Symbol
+ (0x01b80, 0x01b81,), # Sundanese Sign Panyecek ..Sundanese Sign Panglayar
+ (0x01ba2, 0x01ba5,), # Sundanese Consonant Sign..Sundanese Vowel Sign Pan
+ (0x01ba8, 0x01ba9,), # Sundanese Vowel Sign Pam..Sundanese Vowel Sign Pan
+ (0x01bab, 0x01bab,), # Sundanese Sign Virama ..Sundanese Sign Virama
+ (0x01be6, 0x01be6,), # Batak Sign Tompi ..Batak Sign Tompi
+ (0x01be8, 0x01be9,), # Batak Vowel Sign Pakpak ..Batak Vowel Sign Ee
+ (0x01bed, 0x01bed,), # Batak Vowel Sign Karo O ..Batak Vowel Sign Karo O
+ (0x01bef, 0x01bf1,), # Batak Vowel Sign U For S..Batak Consonant Sign H
+ (0x01c2c, 0x01c33,), # Lepcha Vowel Sign E ..Lepcha Consonant Sign T
+ (0x01c36, 0x01c37,), # Lepcha Sign Ran ..Lepcha Sign Nukta
+ (0x01cd0, 0x01cd2,), # Vedic Tone Karshana ..Vedic Tone Prenkha
+ (0x01cd4, 0x01ce0,), # Vedic Sign Yajurvedic Mi..Vedic Tone Rigvedic Kash
+ (0x01ce2, 0x01ce8,), # Vedic Sign Visarga Svari..Vedic Sign Visarga Anuda
+ (0x01ced, 0x01ced,), # Vedic Sign Tiryak ..Vedic Sign Tiryak
+ (0x01cf4, 0x01cf4,), # Vedic Tone Candra Above ..Vedic Tone Candra Above
+ (0x01dc0, 0x01de6,), # Combining Dotted Grave A..Combining Latin Small Le
+ (0x01dfc, 0x01dff,), # Combining Double Inverte..Combining Right Arrowhea
+ (0x020d0, 0x020f0,), # Combining Left Harpoon A..Combining Asterisk Above
+ (0x02cef, 0x02cf1,), # Coptic Combining Ni Abov..Coptic Combining Spiritu
+ (0x02d7f, 0x02d7f,), # Tifinagh Consonant Joine..Tifinagh Consonant Joine
+ (0x02de0, 0x02dff,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0302a, 0x0302d,), # Ideographic Level Tone M..Ideographic Entering Ton
+ (0x03099, 0x0309a,), # Combining Katakana-hirag..Combining Katakana-hirag
+ (0x0a66f, 0x0a672,), # Combining Cyrillic Vzmet..Combining Cyrillic Thous
+ (0x0a674, 0x0a67d,), # Combining Cyrillic Lette..Combining Cyrillic Payer
+ (0x0a69f, 0x0a69f,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0a6f0, 0x0a6f1,), # Bamum Combining Mark Koq..Bamum Combining Mark Tuk
+ (0x0a802, 0x0a802,), # Syloti Nagri Sign Dvisva..Syloti Nagri Sign Dvisva
+ (0x0a806, 0x0a806,), # Syloti Nagri Sign Hasant..Syloti Nagri Sign Hasant
+ (0x0a80b, 0x0a80b,), # Syloti Nagri Sign Anusva..Syloti Nagri Sign Anusva
+ (0x0a825, 0x0a826,), # Syloti Nagri Vowel Sign ..Syloti Nagri Vowel Sign
+ (0x0a8c4, 0x0a8c4,), # Saurashtra Sign Virama ..Saurashtra Sign Virama
+ (0x0a8e0, 0x0a8f1,), # Combining Devanagari Dig..Combining Devanagari Sig
+ (0x0a926, 0x0a92d,), # Kayah Li Vowel Ue ..Kayah Li Tone Calya Plop
+ (0x0a947, 0x0a951,), # Rejang Vowel Sign I ..Rejang Consonant Sign R
+ (0x0a980, 0x0a982,), # Javanese Sign Panyangga ..Javanese Sign Layar
+ (0x0a9b3, 0x0a9b3,), # Javanese Sign Cecak Telu..Javanese Sign Cecak Telu
+ (0x0a9b6, 0x0a9b9,), # Javanese Vowel Sign Wulu..Javanese Vowel Sign Suku
+ (0x0a9bc, 0x0a9bc,), # Javanese Vowel Sign Pepe..Javanese Vowel Sign Pepe
+ (0x0aa29, 0x0aa2e,), # Cham Vowel Sign Aa ..Cham Vowel Sign Oe
+ (0x0aa31, 0x0aa32,), # Cham Vowel Sign Au ..Cham Vowel Sign Ue
+ (0x0aa35, 0x0aa36,), # Cham Consonant Sign La ..Cham Consonant Sign Wa
+ (0x0aa43, 0x0aa43,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa4c, 0x0aa4c,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aab0, 0x0aab0,), # Tai Viet Mai Kang ..Tai Viet Mai Kang
+ (0x0aab2, 0x0aab4,), # Tai Viet Vowel I ..Tai Viet Vowel U
+ (0x0aab7, 0x0aab8,), # Tai Viet Mai Khit ..Tai Viet Vowel Ia
+ (0x0aabe, 0x0aabf,), # Tai Viet Vowel Am ..Tai Viet Tone Mai Ek
+ (0x0aac1, 0x0aac1,), # Tai Viet Tone Mai Tho ..Tai Viet Tone Mai Tho
+ (0x0aaec, 0x0aaed,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0aaf6, 0x0aaf6,), # Meetei Mayek Virama ..Meetei Mayek Virama
+ (0x0abe5, 0x0abe5,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abe8, 0x0abe8,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abed, 0x0abed,), # Meetei Mayek Apun Iyek ..Meetei Mayek Apun Iyek
+ (0x0fb1e, 0x0fb1e,), # Hebrew Point Judeo-spani..Hebrew Point Judeo-spani
+ (0x0fe00, 0x0fe0f,), # Variation Selector-1 ..Variation Selector-16
+ (0x0fe20, 0x0fe26,), # Combining Ligature Left ..Combining Conjoining Mac
+ (0x101fd, 0x101fd,), # Phaistos Disc Sign Combi..Phaistos Disc Sign Combi
+ (0x10a01, 0x10a03,), # Kharoshthi Vowel Sign I ..Kharoshthi Vowel Sign Vo
+ (0x10a05, 0x10a06,), # Kharoshthi Vowel Sign E ..Kharoshthi Vowel Sign O
+ (0x10a0c, 0x10a0f,), # Kharoshthi Vowel Length ..Kharoshthi Sign Visarga
+ (0x10a38, 0x10a3a,), # Kharoshthi Sign Bar Abov..Kharoshthi Sign Dot Belo
+ (0x10a3f, 0x10a3f,), # Kharoshthi Virama ..Kharoshthi Virama
+ (0x11001, 0x11001,), # Brahmi Sign Anusvara ..Brahmi Sign Anusvara
+ (0x11038, 0x11046,), # Brahmi Vowel Sign Aa ..Brahmi Virama
+ (0x11080, 0x11081,), # Kaithi Sign Candrabindu ..Kaithi Sign Anusvara
+ (0x110b3, 0x110b6,), # Kaithi Vowel Sign U ..Kaithi Vowel Sign Ai
+ (0x110b9, 0x110ba,), # Kaithi Sign Virama ..Kaithi Sign Nukta
+ (0x11100, 0x11102,), # Chakma Sign Candrabindu ..Chakma Sign Visarga
+ (0x11127, 0x1112b,), # Chakma Vowel Sign A ..Chakma Vowel Sign Uu
+ (0x1112d, 0x11134,), # Chakma Vowel Sign Ai ..Chakma Maayyaa
+ (0x11180, 0x11181,), # Sharada Sign Candrabindu..Sharada Sign Anusvara
+ (0x111b6, 0x111be,), # Sharada Vowel Sign U ..Sharada Vowel Sign O
+ (0x116ab, 0x116ab,), # Takri Sign Anusvara ..Takri Sign Anusvara
+ (0x116ad, 0x116ad,), # Takri Vowel Sign Aa ..Takri Vowel Sign Aa
+ (0x116b0, 0x116b5,), # Takri Vowel Sign U ..Takri Vowel Sign Au
+ (0x116b7, 0x116b7,), # Takri Sign Nukta ..Takri Sign Nukta
+ (0x16f8f, 0x16f92,), # Miao Tone Right ..Miao Tone Below
+ (0x1d167, 0x1d169,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d17b, 0x1d182,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d185, 0x1d18b,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d1aa, 0x1d1ad,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d242, 0x1d244,), # Combining Greek Musical ..Combining Greek Musical
+ (0xe0100, 0xe01ef,), # Variation Selector-17 ..Variation Selector-256
+ ),
+ '6.2.0': (
+ # Source: DerivedGeneralCategory-6.2.0.txt
+ # Date: 2012-05-20, 00:42:34 GMT [MD]
+ #
+ (0x00300, 0x0036f,), # Combining Grave Accent ..Combining Latin Small Le
+ (0x00483, 0x00489,), # Combining Cyrillic Titlo..Combining Cyrillic Milli
+ (0x00591, 0x005bd,), # Hebrew Accent Etnahta ..Hebrew Point Meteg
+ (0x005bf, 0x005bf,), # Hebrew Point Rafe ..Hebrew Point Rafe
+ (0x005c1, 0x005c2,), # Hebrew Point Shin Dot ..Hebrew Point Sin Dot
+ (0x005c4, 0x005c5,), # Hebrew Mark Upper Dot ..Hebrew Mark Lower Dot
+ (0x005c7, 0x005c7,), # Hebrew Point Qamats Qata..Hebrew Point Qamats Qata
+ (0x00610, 0x0061a,), # Arabic Sign Sallallahou ..Arabic Small Kasra
+ (0x0064b, 0x0065f,), # Arabic Fathatan ..Arabic Wavy Hamza Below
+ (0x00670, 0x00670,), # Arabic Letter Superscrip..Arabic Letter Superscrip
+ (0x006d6, 0x006dc,), # Arabic Small High Ligatu..Arabic Small High Seen
+ (0x006df, 0x006e4,), # Arabic Small High Rounde..Arabic Small High Madda
+ (0x006e7, 0x006e8,), # Arabic Small High Yeh ..Arabic Small High Noon
+ (0x006ea, 0x006ed,), # Arabic Empty Centre Low ..Arabic Small Low Meem
+ (0x00711, 0x00711,), # Syriac Letter Superscrip..Syriac Letter Superscrip
+ (0x00730, 0x0074a,), # Syriac Pthaha Above ..Syriac Barrekh
+ (0x007a6, 0x007b0,), # Thaana Abafili ..Thaana Sukun
+ (0x007eb, 0x007f3,), # Nko Combining Short High..Nko Combining Double Dot
+ (0x00816, 0x00819,), # Samaritan Mark In ..Samaritan Mark Dagesh
+ (0x0081b, 0x00823,), # Samaritan Mark Epentheti..Samaritan Vowel Sign A
+ (0x00825, 0x00827,), # Samaritan Vowel Sign Sho..Samaritan Vowel Sign U
+ (0x00829, 0x0082d,), # Samaritan Vowel Sign Lon..Samaritan Mark Nequdaa
+ (0x00859, 0x0085b,), # Mandaic Affrication Mark..Mandaic Gemination Mark
+ (0x008e4, 0x008fe,), # Arabic Curly Fatha ..Arabic Damma With Dot
+ (0x00900, 0x00902,), # Devanagari Sign Inverted..Devanagari Sign Anusvara
+ (0x0093a, 0x0093a,), # Devanagari Vowel Sign Oe..Devanagari Vowel Sign Oe
+ (0x0093c, 0x0093c,), # Devanagari Sign Nukta ..Devanagari Sign Nukta
+ (0x00941, 0x00948,), # Devanagari Vowel Sign U ..Devanagari Vowel Sign Ai
+ (0x0094d, 0x0094d,), # Devanagari Sign Virama ..Devanagari Sign Virama
+ (0x00951, 0x00957,), # Devanagari Stress Sign U..Devanagari Vowel Sign Uu
+ (0x00962, 0x00963,), # Devanagari Vowel Sign Vo..Devanagari Vowel Sign Vo
+ (0x00981, 0x00981,), # Bengali Sign Candrabindu..Bengali Sign Candrabindu
+ (0x009bc, 0x009bc,), # Bengali Sign Nukta ..Bengali Sign Nukta
+ (0x009c1, 0x009c4,), # Bengali Vowel Sign U ..Bengali Vowel Sign Vocal
+ (0x009cd, 0x009cd,), # Bengali Sign Virama ..Bengali Sign Virama
+ (0x009e2, 0x009e3,), # Bengali Vowel Sign Vocal..Bengali Vowel Sign Vocal
+ (0x00a01, 0x00a02,), # Gurmukhi Sign Adak Bindi..Gurmukhi Sign Bindi
+ (0x00a3c, 0x00a3c,), # Gurmukhi Sign Nukta ..Gurmukhi Sign Nukta
+ (0x00a41, 0x00a42,), # Gurmukhi Vowel Sign U ..Gurmukhi Vowel Sign Uu
+ (0x00a47, 0x00a48,), # Gurmukhi Vowel Sign Ee ..Gurmukhi Vowel Sign Ai
+ (0x00a4b, 0x00a4d,), # Gurmukhi Vowel Sign Oo ..Gurmukhi Sign Virama
+ (0x00a51, 0x00a51,), # Gurmukhi Sign Udaat ..Gurmukhi Sign Udaat
+ (0x00a70, 0x00a71,), # Gurmukhi Tippi ..Gurmukhi Addak
+ (0x00a75, 0x00a75,), # Gurmukhi Sign Yakash ..Gurmukhi Sign Yakash
+ (0x00a81, 0x00a82,), # Gujarati Sign Candrabind..Gujarati Sign Anusvara
+ (0x00abc, 0x00abc,), # Gujarati Sign Nukta ..Gujarati Sign Nukta
+ (0x00ac1, 0x00ac5,), # Gujarati Vowel Sign U ..Gujarati Vowel Sign Cand
+ (0x00ac7, 0x00ac8,), # Gujarati Vowel Sign E ..Gujarati Vowel Sign Ai
+ (0x00acd, 0x00acd,), # Gujarati Sign Virama ..Gujarati Sign Virama
+ (0x00ae2, 0x00ae3,), # Gujarati Vowel Sign Voca..Gujarati Vowel Sign Voca
+ (0x00b01, 0x00b01,), # Oriya Sign Candrabindu ..Oriya Sign Candrabindu
+ (0x00b3c, 0x00b3c,), # Oriya Sign Nukta ..Oriya Sign Nukta
+ (0x00b3f, 0x00b3f,), # Oriya Vowel Sign I ..Oriya Vowel Sign I
+ (0x00b41, 0x00b44,), # Oriya Vowel Sign U ..Oriya Vowel Sign Vocalic
+ (0x00b4d, 0x00b4d,), # Oriya Sign Virama ..Oriya Sign Virama
+ (0x00b56, 0x00b56,), # Oriya Ai Length Mark ..Oriya Ai Length Mark
+ (0x00b62, 0x00b63,), # Oriya Vowel Sign Vocalic..Oriya Vowel Sign Vocalic
+ (0x00b82, 0x00b82,), # Tamil Sign Anusvara ..Tamil Sign Anusvara
+ (0x00bc0, 0x00bc0,), # Tamil Vowel Sign Ii ..Tamil Vowel Sign Ii
+ (0x00bcd, 0x00bcd,), # Tamil Sign Virama ..Tamil Sign Virama
+ (0x00c3e, 0x00c40,), # Telugu Vowel Sign Aa ..Telugu Vowel Sign Ii
+ (0x00c46, 0x00c48,), # Telugu Vowel Sign E ..Telugu Vowel Sign Ai
+ (0x00c4a, 0x00c4d,), # Telugu Vowel Sign O ..Telugu Sign Virama
+ (0x00c55, 0x00c56,), # Telugu Length Mark ..Telugu Ai Length Mark
+ (0x00c62, 0x00c63,), # Telugu Vowel Sign Vocali..Telugu Vowel Sign Vocali
+ (0x00cbc, 0x00cbc,), # Kannada Sign Nukta ..Kannada Sign Nukta
+ (0x00cbf, 0x00cbf,), # Kannada Vowel Sign I ..Kannada Vowel Sign I
+ (0x00cc6, 0x00cc6,), # Kannada Vowel Sign E ..Kannada Vowel Sign E
+ (0x00ccc, 0x00ccd,), # Kannada Vowel Sign Au ..Kannada Sign Virama
+ (0x00ce2, 0x00ce3,), # Kannada Vowel Sign Vocal..Kannada Vowel Sign Vocal
+ (0x00d41, 0x00d44,), # Malayalam Vowel Sign U ..Malayalam Vowel Sign Voc
+ (0x00d4d, 0x00d4d,), # Malayalam Sign Virama ..Malayalam Sign Virama
+ (0x00d62, 0x00d63,), # Malayalam Vowel Sign Voc..Malayalam Vowel Sign Voc
+ (0x00dca, 0x00dca,), # Sinhala Sign Al-lakuna ..Sinhala Sign Al-lakuna
+ (0x00dd2, 0x00dd4,), # Sinhala Vowel Sign Ketti..Sinhala Vowel Sign Ketti
+ (0x00dd6, 0x00dd6,), # Sinhala Vowel Sign Diga ..Sinhala Vowel Sign Diga
+ (0x00e31, 0x00e31,), # Thai Character Mai Han-a..Thai Character Mai Han-a
+ (0x00e34, 0x00e3a,), # Thai Character Sara I ..Thai Character Phinthu
+ (0x00e47, 0x00e4e,), # Thai Character Maitaikhu..Thai Character Yamakkan
+ (0x00eb1, 0x00eb1,), # Lao Vowel Sign Mai Kan ..Lao Vowel Sign Mai Kan
+ (0x00eb4, 0x00eb9,), # Lao Vowel Sign I ..Lao Vowel Sign Uu
+ (0x00ebb, 0x00ebc,), # Lao Vowel Sign Mai Kon ..Lao Semivowel Sign Lo
+ (0x00ec8, 0x00ecd,), # Lao Tone Mai Ek ..Lao Niggahita
+ (0x00f18, 0x00f19,), # Tibetan Astrological Sig..Tibetan Astrological Sig
+ (0x00f35, 0x00f35,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f37, 0x00f37,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f39, 0x00f39,), # Tibetan Mark Tsa -phru ..Tibetan Mark Tsa -phru
+ (0x00f71, 0x00f7e,), # Tibetan Vowel Sign Aa ..Tibetan Sign Rjes Su Nga
+ (0x00f80, 0x00f84,), # Tibetan Vowel Sign Rever..Tibetan Mark Halanta
+ (0x00f86, 0x00f87,), # Tibetan Sign Lci Rtags ..Tibetan Sign Yang Rtags
+ (0x00f8d, 0x00f97,), # Tibetan Subjoined Sign L..Tibetan Subjoined Letter
+ (0x00f99, 0x00fbc,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
+ (0x00fc6, 0x00fc6,), # Tibetan Symbol Padma Gda..Tibetan Symbol Padma Gda
+ (0x0102d, 0x01030,), # Myanmar Vowel Sign I ..Myanmar Vowel Sign Uu
+ (0x01032, 0x01037,), # Myanmar Vowel Sign Ai ..Myanmar Sign Dot Below
+ (0x01039, 0x0103a,), # Myanmar Sign Virama ..Myanmar Sign Asat
+ (0x0103d, 0x0103e,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01058, 0x01059,), # Myanmar Vowel Sign Vocal..Myanmar Vowel Sign Vocal
+ (0x0105e, 0x01060,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01071, 0x01074,), # Myanmar Vowel Sign Geba ..Myanmar Vowel Sign Kayah
+ (0x01082, 0x01082,), # Myanmar Consonant Sign S..Myanmar Consonant Sign S
+ (0x01085, 0x01086,), # Myanmar Vowel Sign Shan ..Myanmar Vowel Sign Shan
+ (0x0108d, 0x0108d,), # Myanmar Sign Shan Counci..Myanmar Sign Shan Counci
+ (0x0109d, 0x0109d,), # Myanmar Vowel Sign Aiton..Myanmar Vowel Sign Aiton
+ (0x0135d, 0x0135f,), # Ethiopic Combining Gemin..Ethiopic Combining Gemin
+ (0x01712, 0x01714,), # Tagalog Vowel Sign I ..Tagalog Sign Virama
+ (0x01732, 0x01734,), # Hanunoo Vowel Sign I ..Hanunoo Sign Pamudpod
+ (0x01752, 0x01753,), # Buhid Vowel Sign I ..Buhid Vowel Sign U
+ (0x01772, 0x01773,), # Tagbanwa Vowel Sign I ..Tagbanwa Vowel Sign U
+ (0x017b4, 0x017b5,), # Khmer Vowel Inherent Aq ..Khmer Vowel Inherent Aa
+ (0x017b7, 0x017bd,), # Khmer Vowel Sign I ..Khmer Vowel Sign Ua
+ (0x017c6, 0x017c6,), # Khmer Sign Nikahit ..Khmer Sign Nikahit
+ (0x017c9, 0x017d3,), # Khmer Sign Muusikatoan ..Khmer Sign Bathamasat
+ (0x017dd, 0x017dd,), # Khmer Sign Atthacan ..Khmer Sign Atthacan
+ (0x0180b, 0x0180d,), # Mongolian Free Variation..Mongolian Free Variation
+ (0x018a9, 0x018a9,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x01920, 0x01922,), # Limbu Vowel Sign A ..Limbu Vowel Sign U
+ (0x01927, 0x01928,), # Limbu Vowel Sign E ..Limbu Vowel Sign O
+ (0x01932, 0x01932,), # Limbu Small Letter Anusv..Limbu Small Letter Anusv
+ (0x01939, 0x0193b,), # Limbu Sign Mukphreng ..Limbu Sign Sa-i
+ (0x01a17, 0x01a18,), # Buginese Vowel Sign I ..Buginese Vowel Sign U
+ (0x01a56, 0x01a56,), # Tai Tham Consonant Sign ..Tai Tham Consonant Sign
+ (0x01a58, 0x01a5e,), # Tai Tham Sign Mai Kang L..Tai Tham Consonant Sign
+ (0x01a60, 0x01a60,), # Tai Tham Sign Sakot ..Tai Tham Sign Sakot
+ (0x01a62, 0x01a62,), # Tai Tham Vowel Sign Mai ..Tai Tham Vowel Sign Mai
+ (0x01a65, 0x01a6c,), # Tai Tham Vowel Sign I ..Tai Tham Vowel Sign Oa B
+ (0x01a73, 0x01a7c,), # Tai Tham Vowel Sign Oa A..Tai Tham Sign Khuen-lue
+ (0x01a7f, 0x01a7f,), # Tai Tham Combining Crypt..Tai Tham Combining Crypt
+ (0x01b00, 0x01b03,), # Balinese Sign Ulu Ricem ..Balinese Sign Surang
+ (0x01b34, 0x01b34,), # Balinese Sign Rerekan ..Balinese Sign Rerekan
+ (0x01b36, 0x01b3a,), # Balinese Vowel Sign Ulu ..Balinese Vowel Sign Ra R
+ (0x01b3c, 0x01b3c,), # Balinese Vowel Sign La L..Balinese Vowel Sign La L
+ (0x01b42, 0x01b42,), # Balinese Vowel Sign Pepe..Balinese Vowel Sign Pepe
+ (0x01b6b, 0x01b73,), # Balinese Musical Symbol ..Balinese Musical Symbol
+ (0x01b80, 0x01b81,), # Sundanese Sign Panyecek ..Sundanese Sign Panglayar
+ (0x01ba2, 0x01ba5,), # Sundanese Consonant Sign..Sundanese Vowel Sign Pan
+ (0x01ba8, 0x01ba9,), # Sundanese Vowel Sign Pam..Sundanese Vowel Sign Pan
+ (0x01bab, 0x01bab,), # Sundanese Sign Virama ..Sundanese Sign Virama
+ (0x01be6, 0x01be6,), # Batak Sign Tompi ..Batak Sign Tompi
+ (0x01be8, 0x01be9,), # Batak Vowel Sign Pakpak ..Batak Vowel Sign Ee
+ (0x01bed, 0x01bed,), # Batak Vowel Sign Karo O ..Batak Vowel Sign Karo O
+ (0x01bef, 0x01bf1,), # Batak Vowel Sign U For S..Batak Consonant Sign H
+ (0x01c2c, 0x01c33,), # Lepcha Vowel Sign E ..Lepcha Consonant Sign T
+ (0x01c36, 0x01c37,), # Lepcha Sign Ran ..Lepcha Sign Nukta
+ (0x01cd0, 0x01cd2,), # Vedic Tone Karshana ..Vedic Tone Prenkha
+ (0x01cd4, 0x01ce0,), # Vedic Sign Yajurvedic Mi..Vedic Tone Rigvedic Kash
+ (0x01ce2, 0x01ce8,), # Vedic Sign Visarga Svari..Vedic Sign Visarga Anuda
+ (0x01ced, 0x01ced,), # Vedic Sign Tiryak ..Vedic Sign Tiryak
+ (0x01cf4, 0x01cf4,), # Vedic Tone Candra Above ..Vedic Tone Candra Above
+ (0x01dc0, 0x01de6,), # Combining Dotted Grave A..Combining Latin Small Le
+ (0x01dfc, 0x01dff,), # Combining Double Inverte..Combining Right Arrowhea
+ (0x020d0, 0x020f0,), # Combining Left Harpoon A..Combining Asterisk Above
+ (0x02cef, 0x02cf1,), # Coptic Combining Ni Abov..Coptic Combining Spiritu
+ (0x02d7f, 0x02d7f,), # Tifinagh Consonant Joine..Tifinagh Consonant Joine
+ (0x02de0, 0x02dff,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0302a, 0x0302d,), # Ideographic Level Tone M..Ideographic Entering Ton
+ (0x03099, 0x0309a,), # Combining Katakana-hirag..Combining Katakana-hirag
+ (0x0a66f, 0x0a672,), # Combining Cyrillic Vzmet..Combining Cyrillic Thous
+ (0x0a674, 0x0a67d,), # Combining Cyrillic Lette..Combining Cyrillic Payer
+ (0x0a69f, 0x0a69f,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0a6f0, 0x0a6f1,), # Bamum Combining Mark Koq..Bamum Combining Mark Tuk
+ (0x0a802, 0x0a802,), # Syloti Nagri Sign Dvisva..Syloti Nagri Sign Dvisva
+ (0x0a806, 0x0a806,), # Syloti Nagri Sign Hasant..Syloti Nagri Sign Hasant
+ (0x0a80b, 0x0a80b,), # Syloti Nagri Sign Anusva..Syloti Nagri Sign Anusva
+ (0x0a825, 0x0a826,), # Syloti Nagri Vowel Sign ..Syloti Nagri Vowel Sign
+ (0x0a8c4, 0x0a8c4,), # Saurashtra Sign Virama ..Saurashtra Sign Virama
+ (0x0a8e0, 0x0a8f1,), # Combining Devanagari Dig..Combining Devanagari Sig
+ (0x0a926, 0x0a92d,), # Kayah Li Vowel Ue ..Kayah Li Tone Calya Plop
+ (0x0a947, 0x0a951,), # Rejang Vowel Sign I ..Rejang Consonant Sign R
+ (0x0a980, 0x0a982,), # Javanese Sign Panyangga ..Javanese Sign Layar
+ (0x0a9b3, 0x0a9b3,), # Javanese Sign Cecak Telu..Javanese Sign Cecak Telu
+ (0x0a9b6, 0x0a9b9,), # Javanese Vowel Sign Wulu..Javanese Vowel Sign Suku
+ (0x0a9bc, 0x0a9bc,), # Javanese Vowel Sign Pepe..Javanese Vowel Sign Pepe
+ (0x0aa29, 0x0aa2e,), # Cham Vowel Sign Aa ..Cham Vowel Sign Oe
+ (0x0aa31, 0x0aa32,), # Cham Vowel Sign Au ..Cham Vowel Sign Ue
+ (0x0aa35, 0x0aa36,), # Cham Consonant Sign La ..Cham Consonant Sign Wa
+ (0x0aa43, 0x0aa43,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa4c, 0x0aa4c,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aab0, 0x0aab0,), # Tai Viet Mai Kang ..Tai Viet Mai Kang
+ (0x0aab2, 0x0aab4,), # Tai Viet Vowel I ..Tai Viet Vowel U
+ (0x0aab7, 0x0aab8,), # Tai Viet Mai Khit ..Tai Viet Vowel Ia
+ (0x0aabe, 0x0aabf,), # Tai Viet Vowel Am ..Tai Viet Tone Mai Ek
+ (0x0aac1, 0x0aac1,), # Tai Viet Tone Mai Tho ..Tai Viet Tone Mai Tho
+ (0x0aaec, 0x0aaed,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0aaf6, 0x0aaf6,), # Meetei Mayek Virama ..Meetei Mayek Virama
+ (0x0abe5, 0x0abe5,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abe8, 0x0abe8,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abed, 0x0abed,), # Meetei Mayek Apun Iyek ..Meetei Mayek Apun Iyek
+ (0x0fb1e, 0x0fb1e,), # Hebrew Point Judeo-spani..Hebrew Point Judeo-spani
+ (0x0fe00, 0x0fe0f,), # Variation Selector-1 ..Variation Selector-16
+ (0x0fe20, 0x0fe26,), # Combining Ligature Left ..Combining Conjoining Mac
+ (0x101fd, 0x101fd,), # Phaistos Disc Sign Combi..Phaistos Disc Sign Combi
+ (0x10a01, 0x10a03,), # Kharoshthi Vowel Sign I ..Kharoshthi Vowel Sign Vo
+ (0x10a05, 0x10a06,), # Kharoshthi Vowel Sign E ..Kharoshthi Vowel Sign O
+ (0x10a0c, 0x10a0f,), # Kharoshthi Vowel Length ..Kharoshthi Sign Visarga
+ (0x10a38, 0x10a3a,), # Kharoshthi Sign Bar Abov..Kharoshthi Sign Dot Belo
+ (0x10a3f, 0x10a3f,), # Kharoshthi Virama ..Kharoshthi Virama
+ (0x11001, 0x11001,), # Brahmi Sign Anusvara ..Brahmi Sign Anusvara
+ (0x11038, 0x11046,), # Brahmi Vowel Sign Aa ..Brahmi Virama
+ (0x11080, 0x11081,), # Kaithi Sign Candrabindu ..Kaithi Sign Anusvara
+ (0x110b3, 0x110b6,), # Kaithi Vowel Sign U ..Kaithi Vowel Sign Ai
+ (0x110b9, 0x110ba,), # Kaithi Sign Virama ..Kaithi Sign Nukta
+ (0x11100, 0x11102,), # Chakma Sign Candrabindu ..Chakma Sign Visarga
+ (0x11127, 0x1112b,), # Chakma Vowel Sign A ..Chakma Vowel Sign Uu
+ (0x1112d, 0x11134,), # Chakma Vowel Sign Ai ..Chakma Maayyaa
+ (0x11180, 0x11181,), # Sharada Sign Candrabindu..Sharada Sign Anusvara
+ (0x111b6, 0x111be,), # Sharada Vowel Sign U ..Sharada Vowel Sign O
+ (0x116ab, 0x116ab,), # Takri Sign Anusvara ..Takri Sign Anusvara
+ (0x116ad, 0x116ad,), # Takri Vowel Sign Aa ..Takri Vowel Sign Aa
+ (0x116b0, 0x116b5,), # Takri Vowel Sign U ..Takri Vowel Sign Au
+ (0x116b7, 0x116b7,), # Takri Sign Nukta ..Takri Sign Nukta
+ (0x16f8f, 0x16f92,), # Miao Tone Right ..Miao Tone Below
+ (0x1d167, 0x1d169,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d17b, 0x1d182,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d185, 0x1d18b,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d1aa, 0x1d1ad,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d242, 0x1d244,), # Combining Greek Musical ..Combining Greek Musical
+ (0xe0100, 0xe01ef,), # Variation Selector-17 ..Variation Selector-256
+ ),
+ '6.3.0': (
+ # Source: DerivedGeneralCategory-6.3.0.txt
+ # Date: 2013-07-05, 14:08:45 GMT [MD]
+ #
+ (0x00300, 0x0036f,), # Combining Grave Accent ..Combining Latin Small Le
+ (0x00483, 0x00489,), # Combining Cyrillic Titlo..Combining Cyrillic Milli
+ (0x00591, 0x005bd,), # Hebrew Accent Etnahta ..Hebrew Point Meteg
+ (0x005bf, 0x005bf,), # Hebrew Point Rafe ..Hebrew Point Rafe
+ (0x005c1, 0x005c2,), # Hebrew Point Shin Dot ..Hebrew Point Sin Dot
+ (0x005c4, 0x005c5,), # Hebrew Mark Upper Dot ..Hebrew Mark Lower Dot
+ (0x005c7, 0x005c7,), # Hebrew Point Qamats Qata..Hebrew Point Qamats Qata
+ (0x00610, 0x0061a,), # Arabic Sign Sallallahou ..Arabic Small Kasra
+ (0x0064b, 0x0065f,), # Arabic Fathatan ..Arabic Wavy Hamza Below
+ (0x00670, 0x00670,), # Arabic Letter Superscrip..Arabic Letter Superscrip
+ (0x006d6, 0x006dc,), # Arabic Small High Ligatu..Arabic Small High Seen
+ (0x006df, 0x006e4,), # Arabic Small High Rounde..Arabic Small High Madda
+ (0x006e7, 0x006e8,), # Arabic Small High Yeh ..Arabic Small High Noon
+ (0x006ea, 0x006ed,), # Arabic Empty Centre Low ..Arabic Small Low Meem
+ (0x00711, 0x00711,), # Syriac Letter Superscrip..Syriac Letter Superscrip
+ (0x00730, 0x0074a,), # Syriac Pthaha Above ..Syriac Barrekh
+ (0x007a6, 0x007b0,), # Thaana Abafili ..Thaana Sukun
+ (0x007eb, 0x007f3,), # Nko Combining Short High..Nko Combining Double Dot
+ (0x00816, 0x00819,), # Samaritan Mark In ..Samaritan Mark Dagesh
+ (0x0081b, 0x00823,), # Samaritan Mark Epentheti..Samaritan Vowel Sign A
+ (0x00825, 0x00827,), # Samaritan Vowel Sign Sho..Samaritan Vowel Sign U
+ (0x00829, 0x0082d,), # Samaritan Vowel Sign Lon..Samaritan Mark Nequdaa
+ (0x00859, 0x0085b,), # Mandaic Affrication Mark..Mandaic Gemination Mark
+ (0x008e4, 0x008fe,), # Arabic Curly Fatha ..Arabic Damma With Dot
+ (0x00900, 0x00902,), # Devanagari Sign Inverted..Devanagari Sign Anusvara
+ (0x0093a, 0x0093a,), # Devanagari Vowel Sign Oe..Devanagari Vowel Sign Oe
+ (0x0093c, 0x0093c,), # Devanagari Sign Nukta ..Devanagari Sign Nukta
+ (0x00941, 0x00948,), # Devanagari Vowel Sign U ..Devanagari Vowel Sign Ai
+ (0x0094d, 0x0094d,), # Devanagari Sign Virama ..Devanagari Sign Virama
+ (0x00951, 0x00957,), # Devanagari Stress Sign U..Devanagari Vowel Sign Uu
+ (0x00962, 0x00963,), # Devanagari Vowel Sign Vo..Devanagari Vowel Sign Vo
+ (0x00981, 0x00981,), # Bengali Sign Candrabindu..Bengali Sign Candrabindu
+ (0x009bc, 0x009bc,), # Bengali Sign Nukta ..Bengali Sign Nukta
+ (0x009c1, 0x009c4,), # Bengali Vowel Sign U ..Bengali Vowel Sign Vocal
+ (0x009cd, 0x009cd,), # Bengali Sign Virama ..Bengali Sign Virama
+ (0x009e2, 0x009e3,), # Bengali Vowel Sign Vocal..Bengali Vowel Sign Vocal
+ (0x00a01, 0x00a02,), # Gurmukhi Sign Adak Bindi..Gurmukhi Sign Bindi
+ (0x00a3c, 0x00a3c,), # Gurmukhi Sign Nukta ..Gurmukhi Sign Nukta
+ (0x00a41, 0x00a42,), # Gurmukhi Vowel Sign U ..Gurmukhi Vowel Sign Uu
+ (0x00a47, 0x00a48,), # Gurmukhi Vowel Sign Ee ..Gurmukhi Vowel Sign Ai
+ (0x00a4b, 0x00a4d,), # Gurmukhi Vowel Sign Oo ..Gurmukhi Sign Virama
+ (0x00a51, 0x00a51,), # Gurmukhi Sign Udaat ..Gurmukhi Sign Udaat
+ (0x00a70, 0x00a71,), # Gurmukhi Tippi ..Gurmukhi Addak
+ (0x00a75, 0x00a75,), # Gurmukhi Sign Yakash ..Gurmukhi Sign Yakash
+ (0x00a81, 0x00a82,), # Gujarati Sign Candrabind..Gujarati Sign Anusvara
+ (0x00abc, 0x00abc,), # Gujarati Sign Nukta ..Gujarati Sign Nukta
+ (0x00ac1, 0x00ac5,), # Gujarati Vowel Sign U ..Gujarati Vowel Sign Cand
+ (0x00ac7, 0x00ac8,), # Gujarati Vowel Sign E ..Gujarati Vowel Sign Ai
+ (0x00acd, 0x00acd,), # Gujarati Sign Virama ..Gujarati Sign Virama
+ (0x00ae2, 0x00ae3,), # Gujarati Vowel Sign Voca..Gujarati Vowel Sign Voca
+ (0x00b01, 0x00b01,), # Oriya Sign Candrabindu ..Oriya Sign Candrabindu
+ (0x00b3c, 0x00b3c,), # Oriya Sign Nukta ..Oriya Sign Nukta
+ (0x00b3f, 0x00b3f,), # Oriya Vowel Sign I ..Oriya Vowel Sign I
+ (0x00b41, 0x00b44,), # Oriya Vowel Sign U ..Oriya Vowel Sign Vocalic
+ (0x00b4d, 0x00b4d,), # Oriya Sign Virama ..Oriya Sign Virama
+ (0x00b56, 0x00b56,), # Oriya Ai Length Mark ..Oriya Ai Length Mark
+ (0x00b62, 0x00b63,), # Oriya Vowel Sign Vocalic..Oriya Vowel Sign Vocalic
+ (0x00b82, 0x00b82,), # Tamil Sign Anusvara ..Tamil Sign Anusvara
+ (0x00bc0, 0x00bc0,), # Tamil Vowel Sign Ii ..Tamil Vowel Sign Ii
+ (0x00bcd, 0x00bcd,), # Tamil Sign Virama ..Tamil Sign Virama
+ (0x00c3e, 0x00c40,), # Telugu Vowel Sign Aa ..Telugu Vowel Sign Ii
+ (0x00c46, 0x00c48,), # Telugu Vowel Sign E ..Telugu Vowel Sign Ai
+ (0x00c4a, 0x00c4d,), # Telugu Vowel Sign O ..Telugu Sign Virama
+ (0x00c55, 0x00c56,), # Telugu Length Mark ..Telugu Ai Length Mark
+ (0x00c62, 0x00c63,), # Telugu Vowel Sign Vocali..Telugu Vowel Sign Vocali
+ (0x00cbc, 0x00cbc,), # Kannada Sign Nukta ..Kannada Sign Nukta
+ (0x00cbf, 0x00cbf,), # Kannada Vowel Sign I ..Kannada Vowel Sign I
+ (0x00cc6, 0x00cc6,), # Kannada Vowel Sign E ..Kannada Vowel Sign E
+ (0x00ccc, 0x00ccd,), # Kannada Vowel Sign Au ..Kannada Sign Virama
+ (0x00ce2, 0x00ce3,), # Kannada Vowel Sign Vocal..Kannada Vowel Sign Vocal
+ (0x00d41, 0x00d44,), # Malayalam Vowel Sign U ..Malayalam Vowel Sign Voc
+ (0x00d4d, 0x00d4d,), # Malayalam Sign Virama ..Malayalam Sign Virama
+ (0x00d62, 0x00d63,), # Malayalam Vowel Sign Voc..Malayalam Vowel Sign Voc
+ (0x00dca, 0x00dca,), # Sinhala Sign Al-lakuna ..Sinhala Sign Al-lakuna
+ (0x00dd2, 0x00dd4,), # Sinhala Vowel Sign Ketti..Sinhala Vowel Sign Ketti
+ (0x00dd6, 0x00dd6,), # Sinhala Vowel Sign Diga ..Sinhala Vowel Sign Diga
+ (0x00e31, 0x00e31,), # Thai Character Mai Han-a..Thai Character Mai Han-a
+ (0x00e34, 0x00e3a,), # Thai Character Sara I ..Thai Character Phinthu
+ (0x00e47, 0x00e4e,), # Thai Character Maitaikhu..Thai Character Yamakkan
+ (0x00eb1, 0x00eb1,), # Lao Vowel Sign Mai Kan ..Lao Vowel Sign Mai Kan
+ (0x00eb4, 0x00eb9,), # Lao Vowel Sign I ..Lao Vowel Sign Uu
+ (0x00ebb, 0x00ebc,), # Lao Vowel Sign Mai Kon ..Lao Semivowel Sign Lo
+ (0x00ec8, 0x00ecd,), # Lao Tone Mai Ek ..Lao Niggahita
+ (0x00f18, 0x00f19,), # Tibetan Astrological Sig..Tibetan Astrological Sig
+ (0x00f35, 0x00f35,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f37, 0x00f37,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f39, 0x00f39,), # Tibetan Mark Tsa -phru ..Tibetan Mark Tsa -phru
+ (0x00f71, 0x00f7e,), # Tibetan Vowel Sign Aa ..Tibetan Sign Rjes Su Nga
+ (0x00f80, 0x00f84,), # Tibetan Vowel Sign Rever..Tibetan Mark Halanta
+ (0x00f86, 0x00f87,), # Tibetan Sign Lci Rtags ..Tibetan Sign Yang Rtags
+ (0x00f8d, 0x00f97,), # Tibetan Subjoined Sign L..Tibetan Subjoined Letter
+ (0x00f99, 0x00fbc,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
+ (0x00fc6, 0x00fc6,), # Tibetan Symbol Padma Gda..Tibetan Symbol Padma Gda
+ (0x0102d, 0x01030,), # Myanmar Vowel Sign I ..Myanmar Vowel Sign Uu
+ (0x01032, 0x01037,), # Myanmar Vowel Sign Ai ..Myanmar Sign Dot Below
+ (0x01039, 0x0103a,), # Myanmar Sign Virama ..Myanmar Sign Asat
+ (0x0103d, 0x0103e,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01058, 0x01059,), # Myanmar Vowel Sign Vocal..Myanmar Vowel Sign Vocal
+ (0x0105e, 0x01060,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01071, 0x01074,), # Myanmar Vowel Sign Geba ..Myanmar Vowel Sign Kayah
+ (0x01082, 0x01082,), # Myanmar Consonant Sign S..Myanmar Consonant Sign S
+ (0x01085, 0x01086,), # Myanmar Vowel Sign Shan ..Myanmar Vowel Sign Shan
+ (0x0108d, 0x0108d,), # Myanmar Sign Shan Counci..Myanmar Sign Shan Counci
+ (0x0109d, 0x0109d,), # Myanmar Vowel Sign Aiton..Myanmar Vowel Sign Aiton
+ (0x0135d, 0x0135f,), # Ethiopic Combining Gemin..Ethiopic Combining Gemin
+ (0x01712, 0x01714,), # Tagalog Vowel Sign I ..Tagalog Sign Virama
+ (0x01732, 0x01734,), # Hanunoo Vowel Sign I ..Hanunoo Sign Pamudpod
+ (0x01752, 0x01753,), # Buhid Vowel Sign I ..Buhid Vowel Sign U
+ (0x01772, 0x01773,), # Tagbanwa Vowel Sign I ..Tagbanwa Vowel Sign U
+ (0x017b4, 0x017b5,), # Khmer Vowel Inherent Aq ..Khmer Vowel Inherent Aa
+ (0x017b7, 0x017bd,), # Khmer Vowel Sign I ..Khmer Vowel Sign Ua
+ (0x017c6, 0x017c6,), # Khmer Sign Nikahit ..Khmer Sign Nikahit
+ (0x017c9, 0x017d3,), # Khmer Sign Muusikatoan ..Khmer Sign Bathamasat
+ (0x017dd, 0x017dd,), # Khmer Sign Atthacan ..Khmer Sign Atthacan
+ (0x0180b, 0x0180d,), # Mongolian Free Variation..Mongolian Free Variation
+ (0x018a9, 0x018a9,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x01920, 0x01922,), # Limbu Vowel Sign A ..Limbu Vowel Sign U
+ (0x01927, 0x01928,), # Limbu Vowel Sign E ..Limbu Vowel Sign O
+ (0x01932, 0x01932,), # Limbu Small Letter Anusv..Limbu Small Letter Anusv
+ (0x01939, 0x0193b,), # Limbu Sign Mukphreng ..Limbu Sign Sa-i
+ (0x01a17, 0x01a18,), # Buginese Vowel Sign I ..Buginese Vowel Sign U
+ (0x01a1b, 0x01a1b,), # Buginese Vowel Sign Ae ..Buginese Vowel Sign Ae
+ (0x01a56, 0x01a56,), # Tai Tham Consonant Sign ..Tai Tham Consonant Sign
+ (0x01a58, 0x01a5e,), # Tai Tham Sign Mai Kang L..Tai Tham Consonant Sign
+ (0x01a60, 0x01a60,), # Tai Tham Sign Sakot ..Tai Tham Sign Sakot
+ (0x01a62, 0x01a62,), # Tai Tham Vowel Sign Mai ..Tai Tham Vowel Sign Mai
+ (0x01a65, 0x01a6c,), # Tai Tham Vowel Sign I ..Tai Tham Vowel Sign Oa B
+ (0x01a73, 0x01a7c,), # Tai Tham Vowel Sign Oa A..Tai Tham Sign Khuen-lue
+ (0x01a7f, 0x01a7f,), # Tai Tham Combining Crypt..Tai Tham Combining Crypt
+ (0x01b00, 0x01b03,), # Balinese Sign Ulu Ricem ..Balinese Sign Surang
+ (0x01b34, 0x01b34,), # Balinese Sign Rerekan ..Balinese Sign Rerekan
+ (0x01b36, 0x01b3a,), # Balinese Vowel Sign Ulu ..Balinese Vowel Sign Ra R
+ (0x01b3c, 0x01b3c,), # Balinese Vowel Sign La L..Balinese Vowel Sign La L
+ (0x01b42, 0x01b42,), # Balinese Vowel Sign Pepe..Balinese Vowel Sign Pepe
+ (0x01b6b, 0x01b73,), # Balinese Musical Symbol ..Balinese Musical Symbol
+ (0x01b80, 0x01b81,), # Sundanese Sign Panyecek ..Sundanese Sign Panglayar
+ (0x01ba2, 0x01ba5,), # Sundanese Consonant Sign..Sundanese Vowel Sign Pan
+ (0x01ba8, 0x01ba9,), # Sundanese Vowel Sign Pam..Sundanese Vowel Sign Pan
+ (0x01bab, 0x01bab,), # Sundanese Sign Virama ..Sundanese Sign Virama
+ (0x01be6, 0x01be6,), # Batak Sign Tompi ..Batak Sign Tompi
+ (0x01be8, 0x01be9,), # Batak Vowel Sign Pakpak ..Batak Vowel Sign Ee
+ (0x01bed, 0x01bed,), # Batak Vowel Sign Karo O ..Batak Vowel Sign Karo O
+ (0x01bef, 0x01bf1,), # Batak Vowel Sign U For S..Batak Consonant Sign H
+ (0x01c2c, 0x01c33,), # Lepcha Vowel Sign E ..Lepcha Consonant Sign T
+ (0x01c36, 0x01c37,), # Lepcha Sign Ran ..Lepcha Sign Nukta
+ (0x01cd0, 0x01cd2,), # Vedic Tone Karshana ..Vedic Tone Prenkha
+ (0x01cd4, 0x01ce0,), # Vedic Sign Yajurvedic Mi..Vedic Tone Rigvedic Kash
+ (0x01ce2, 0x01ce8,), # Vedic Sign Visarga Svari..Vedic Sign Visarga Anuda
+ (0x01ced, 0x01ced,), # Vedic Sign Tiryak ..Vedic Sign Tiryak
+ (0x01cf4, 0x01cf4,), # Vedic Tone Candra Above ..Vedic Tone Candra Above
+ (0x01dc0, 0x01de6,), # Combining Dotted Grave A..Combining Latin Small Le
+ (0x01dfc, 0x01dff,), # Combining Double Inverte..Combining Right Arrowhea
+ (0x020d0, 0x020f0,), # Combining Left Harpoon A..Combining Asterisk Above
+ (0x02cef, 0x02cf1,), # Coptic Combining Ni Abov..Coptic Combining Spiritu
+ (0x02d7f, 0x02d7f,), # Tifinagh Consonant Joine..Tifinagh Consonant Joine
+ (0x02de0, 0x02dff,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0302a, 0x0302d,), # Ideographic Level Tone M..Ideographic Entering Ton
+ (0x03099, 0x0309a,), # Combining Katakana-hirag..Combining Katakana-hirag
+ (0x0a66f, 0x0a672,), # Combining Cyrillic Vzmet..Combining Cyrillic Thous
+ (0x0a674, 0x0a67d,), # Combining Cyrillic Lette..Combining Cyrillic Payer
+ (0x0a69f, 0x0a69f,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0a6f0, 0x0a6f1,), # Bamum Combining Mark Koq..Bamum Combining Mark Tuk
+ (0x0a802, 0x0a802,), # Syloti Nagri Sign Dvisva..Syloti Nagri Sign Dvisva
+ (0x0a806, 0x0a806,), # Syloti Nagri Sign Hasant..Syloti Nagri Sign Hasant
+ (0x0a80b, 0x0a80b,), # Syloti Nagri Sign Anusva..Syloti Nagri Sign Anusva
+ (0x0a825, 0x0a826,), # Syloti Nagri Vowel Sign ..Syloti Nagri Vowel Sign
+ (0x0a8c4, 0x0a8c4,), # Saurashtra Sign Virama ..Saurashtra Sign Virama
+ (0x0a8e0, 0x0a8f1,), # Combining Devanagari Dig..Combining Devanagari Sig
+ (0x0a926, 0x0a92d,), # Kayah Li Vowel Ue ..Kayah Li Tone Calya Plop
+ (0x0a947, 0x0a951,), # Rejang Vowel Sign I ..Rejang Consonant Sign R
+ (0x0a980, 0x0a982,), # Javanese Sign Panyangga ..Javanese Sign Layar
+ (0x0a9b3, 0x0a9b3,), # Javanese Sign Cecak Telu..Javanese Sign Cecak Telu
+ (0x0a9b6, 0x0a9b9,), # Javanese Vowel Sign Wulu..Javanese Vowel Sign Suku
+ (0x0a9bc, 0x0a9bc,), # Javanese Vowel Sign Pepe..Javanese Vowel Sign Pepe
+ (0x0aa29, 0x0aa2e,), # Cham Vowel Sign Aa ..Cham Vowel Sign Oe
+ (0x0aa31, 0x0aa32,), # Cham Vowel Sign Au ..Cham Vowel Sign Ue
+ (0x0aa35, 0x0aa36,), # Cham Consonant Sign La ..Cham Consonant Sign Wa
+ (0x0aa43, 0x0aa43,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa4c, 0x0aa4c,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aab0, 0x0aab0,), # Tai Viet Mai Kang ..Tai Viet Mai Kang
+ (0x0aab2, 0x0aab4,), # Tai Viet Vowel I ..Tai Viet Vowel U
+ (0x0aab7, 0x0aab8,), # Tai Viet Mai Khit ..Tai Viet Vowel Ia
+ (0x0aabe, 0x0aabf,), # Tai Viet Vowel Am ..Tai Viet Tone Mai Ek
+ (0x0aac1, 0x0aac1,), # Tai Viet Tone Mai Tho ..Tai Viet Tone Mai Tho
+ (0x0aaec, 0x0aaed,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0aaf6, 0x0aaf6,), # Meetei Mayek Virama ..Meetei Mayek Virama
+ (0x0abe5, 0x0abe5,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abe8, 0x0abe8,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abed, 0x0abed,), # Meetei Mayek Apun Iyek ..Meetei Mayek Apun Iyek
+ (0x0fb1e, 0x0fb1e,), # Hebrew Point Judeo-spani..Hebrew Point Judeo-spani
+ (0x0fe00, 0x0fe0f,), # Variation Selector-1 ..Variation Selector-16
+ (0x0fe20, 0x0fe26,), # Combining Ligature Left ..Combining Conjoining Mac
+ (0x101fd, 0x101fd,), # Phaistos Disc Sign Combi..Phaistos Disc Sign Combi
+ (0x10a01, 0x10a03,), # Kharoshthi Vowel Sign I ..Kharoshthi Vowel Sign Vo
+ (0x10a05, 0x10a06,), # Kharoshthi Vowel Sign E ..Kharoshthi Vowel Sign O
+ (0x10a0c, 0x10a0f,), # Kharoshthi Vowel Length ..Kharoshthi Sign Visarga
+ (0x10a38, 0x10a3a,), # Kharoshthi Sign Bar Abov..Kharoshthi Sign Dot Belo
+ (0x10a3f, 0x10a3f,), # Kharoshthi Virama ..Kharoshthi Virama
+ (0x11001, 0x11001,), # Brahmi Sign Anusvara ..Brahmi Sign Anusvara
+ (0x11038, 0x11046,), # Brahmi Vowel Sign Aa ..Brahmi Virama
+ (0x11080, 0x11081,), # Kaithi Sign Candrabindu ..Kaithi Sign Anusvara
+ (0x110b3, 0x110b6,), # Kaithi Vowel Sign U ..Kaithi Vowel Sign Ai
+ (0x110b9, 0x110ba,), # Kaithi Sign Virama ..Kaithi Sign Nukta
+ (0x11100, 0x11102,), # Chakma Sign Candrabindu ..Chakma Sign Visarga
+ (0x11127, 0x1112b,), # Chakma Vowel Sign A ..Chakma Vowel Sign Uu
+ (0x1112d, 0x11134,), # Chakma Vowel Sign Ai ..Chakma Maayyaa
+ (0x11180, 0x11181,), # Sharada Sign Candrabindu..Sharada Sign Anusvara
+ (0x111b6, 0x111be,), # Sharada Vowel Sign U ..Sharada Vowel Sign O
+ (0x116ab, 0x116ab,), # Takri Sign Anusvara ..Takri Sign Anusvara
+ (0x116ad, 0x116ad,), # Takri Vowel Sign Aa ..Takri Vowel Sign Aa
+ (0x116b0, 0x116b5,), # Takri Vowel Sign U ..Takri Vowel Sign Au
+ (0x116b7, 0x116b7,), # Takri Sign Nukta ..Takri Sign Nukta
+ (0x16f8f, 0x16f92,), # Miao Tone Right ..Miao Tone Below
+ (0x1d167, 0x1d169,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d17b, 0x1d182,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d185, 0x1d18b,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d1aa, 0x1d1ad,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d242, 0x1d244,), # Combining Greek Musical ..Combining Greek Musical
+ (0xe0100, 0xe01ef,), # Variation Selector-17 ..Variation Selector-256
+ ),
+ '7.0.0': (
+ # Source: DerivedGeneralCategory-7.0.0.txt
+ # Date: 2014-02-07, 18:42:12 GMT [MD]
+ #
+ (0x00300, 0x0036f,), # Combining Grave Accent ..Combining Latin Small Le
+ (0x00483, 0x00489,), # Combining Cyrillic Titlo..Combining Cyrillic Milli
+ (0x00591, 0x005bd,), # Hebrew Accent Etnahta ..Hebrew Point Meteg
+ (0x005bf, 0x005bf,), # Hebrew Point Rafe ..Hebrew Point Rafe
+ (0x005c1, 0x005c2,), # Hebrew Point Shin Dot ..Hebrew Point Sin Dot
+ (0x005c4, 0x005c5,), # Hebrew Mark Upper Dot ..Hebrew Mark Lower Dot
+ (0x005c7, 0x005c7,), # Hebrew Point Qamats Qata..Hebrew Point Qamats Qata
+ (0x00610, 0x0061a,), # Arabic Sign Sallallahou ..Arabic Small Kasra
+ (0x0064b, 0x0065f,), # Arabic Fathatan ..Arabic Wavy Hamza Below
+ (0x00670, 0x00670,), # Arabic Letter Superscrip..Arabic Letter Superscrip
+ (0x006d6, 0x006dc,), # Arabic Small High Ligatu..Arabic Small High Seen
+ (0x006df, 0x006e4,), # Arabic Small High Rounde..Arabic Small High Madda
+ (0x006e7, 0x006e8,), # Arabic Small High Yeh ..Arabic Small High Noon
+ (0x006ea, 0x006ed,), # Arabic Empty Centre Low ..Arabic Small Low Meem
+ (0x00711, 0x00711,), # Syriac Letter Superscrip..Syriac Letter Superscrip
+ (0x00730, 0x0074a,), # Syriac Pthaha Above ..Syriac Barrekh
+ (0x007a6, 0x007b0,), # Thaana Abafili ..Thaana Sukun
+ (0x007eb, 0x007f3,), # Nko Combining Short High..Nko Combining Double Dot
+ (0x00816, 0x00819,), # Samaritan Mark In ..Samaritan Mark Dagesh
+ (0x0081b, 0x00823,), # Samaritan Mark Epentheti..Samaritan Vowel Sign A
+ (0x00825, 0x00827,), # Samaritan Vowel Sign Sho..Samaritan Vowel Sign U
+ (0x00829, 0x0082d,), # Samaritan Vowel Sign Lon..Samaritan Mark Nequdaa
+ (0x00859, 0x0085b,), # Mandaic Affrication Mark..Mandaic Gemination Mark
+ (0x008e4, 0x00902,), # Arabic Curly Fatha ..Devanagari Sign Anusvara
+ (0x0093a, 0x0093a,), # Devanagari Vowel Sign Oe..Devanagari Vowel Sign Oe
+ (0x0093c, 0x0093c,), # Devanagari Sign Nukta ..Devanagari Sign Nukta
+ (0x00941, 0x00948,), # Devanagari Vowel Sign U ..Devanagari Vowel Sign Ai
+ (0x0094d, 0x0094d,), # Devanagari Sign Virama ..Devanagari Sign Virama
+ (0x00951, 0x00957,), # Devanagari Stress Sign U..Devanagari Vowel Sign Uu
+ (0x00962, 0x00963,), # Devanagari Vowel Sign Vo..Devanagari Vowel Sign Vo
+ (0x00981, 0x00981,), # Bengali Sign Candrabindu..Bengali Sign Candrabindu
+ (0x009bc, 0x009bc,), # Bengali Sign Nukta ..Bengali Sign Nukta
+ (0x009c1, 0x009c4,), # Bengali Vowel Sign U ..Bengali Vowel Sign Vocal
+ (0x009cd, 0x009cd,), # Bengali Sign Virama ..Bengali Sign Virama
+ (0x009e2, 0x009e3,), # Bengali Vowel Sign Vocal..Bengali Vowel Sign Vocal
+ (0x00a01, 0x00a02,), # Gurmukhi Sign Adak Bindi..Gurmukhi Sign Bindi
+ (0x00a3c, 0x00a3c,), # Gurmukhi Sign Nukta ..Gurmukhi Sign Nukta
+ (0x00a41, 0x00a42,), # Gurmukhi Vowel Sign U ..Gurmukhi Vowel Sign Uu
+ (0x00a47, 0x00a48,), # Gurmukhi Vowel Sign Ee ..Gurmukhi Vowel Sign Ai
+ (0x00a4b, 0x00a4d,), # Gurmukhi Vowel Sign Oo ..Gurmukhi Sign Virama
+ (0x00a51, 0x00a51,), # Gurmukhi Sign Udaat ..Gurmukhi Sign Udaat
+ (0x00a70, 0x00a71,), # Gurmukhi Tippi ..Gurmukhi Addak
+ (0x00a75, 0x00a75,), # Gurmukhi Sign Yakash ..Gurmukhi Sign Yakash
+ (0x00a81, 0x00a82,), # Gujarati Sign Candrabind..Gujarati Sign Anusvara
+ (0x00abc, 0x00abc,), # Gujarati Sign Nukta ..Gujarati Sign Nukta
+ (0x00ac1, 0x00ac5,), # Gujarati Vowel Sign U ..Gujarati Vowel Sign Cand
+ (0x00ac7, 0x00ac8,), # Gujarati Vowel Sign E ..Gujarati Vowel Sign Ai
+ (0x00acd, 0x00acd,), # Gujarati Sign Virama ..Gujarati Sign Virama
+ (0x00ae2, 0x00ae3,), # Gujarati Vowel Sign Voca..Gujarati Vowel Sign Voca
+ (0x00b01, 0x00b01,), # Oriya Sign Candrabindu ..Oriya Sign Candrabindu
+ (0x00b3c, 0x00b3c,), # Oriya Sign Nukta ..Oriya Sign Nukta
+ (0x00b3f, 0x00b3f,), # Oriya Vowel Sign I ..Oriya Vowel Sign I
+ (0x00b41, 0x00b44,), # Oriya Vowel Sign U ..Oriya Vowel Sign Vocalic
+ (0x00b4d, 0x00b4d,), # Oriya Sign Virama ..Oriya Sign Virama
+ (0x00b56, 0x00b56,), # Oriya Ai Length Mark ..Oriya Ai Length Mark
+ (0x00b62, 0x00b63,), # Oriya Vowel Sign Vocalic..Oriya Vowel Sign Vocalic
+ (0x00b82, 0x00b82,), # Tamil Sign Anusvara ..Tamil Sign Anusvara
+ (0x00bc0, 0x00bc0,), # Tamil Vowel Sign Ii ..Tamil Vowel Sign Ii
+ (0x00bcd, 0x00bcd,), # Tamil Sign Virama ..Tamil Sign Virama
+ (0x00c00, 0x00c00,), # Telugu Sign Combining Ca..Telugu Sign Combining Ca
+ (0x00c3e, 0x00c40,), # Telugu Vowel Sign Aa ..Telugu Vowel Sign Ii
+ (0x00c46, 0x00c48,), # Telugu Vowel Sign E ..Telugu Vowel Sign Ai
+ (0x00c4a, 0x00c4d,), # Telugu Vowel Sign O ..Telugu Sign Virama
+ (0x00c55, 0x00c56,), # Telugu Length Mark ..Telugu Ai Length Mark
+ (0x00c62, 0x00c63,), # Telugu Vowel Sign Vocali..Telugu Vowel Sign Vocali
+ (0x00c81, 0x00c81,), # Kannada Sign Candrabindu..Kannada Sign Candrabindu
+ (0x00cbc, 0x00cbc,), # Kannada Sign Nukta ..Kannada Sign Nukta
+ (0x00cbf, 0x00cbf,), # Kannada Vowel Sign I ..Kannada Vowel Sign I
+ (0x00cc6, 0x00cc6,), # Kannada Vowel Sign E ..Kannada Vowel Sign E
+ (0x00ccc, 0x00ccd,), # Kannada Vowel Sign Au ..Kannada Sign Virama
+ (0x00ce2, 0x00ce3,), # Kannada Vowel Sign Vocal..Kannada Vowel Sign Vocal
+ (0x00d01, 0x00d01,), # Malayalam Sign Candrabin..Malayalam Sign Candrabin
+ (0x00d41, 0x00d44,), # Malayalam Vowel Sign U ..Malayalam Vowel Sign Voc
+ (0x00d4d, 0x00d4d,), # Malayalam Sign Virama ..Malayalam Sign Virama
+ (0x00d62, 0x00d63,), # Malayalam Vowel Sign Voc..Malayalam Vowel Sign Voc
+ (0x00dca, 0x00dca,), # Sinhala Sign Al-lakuna ..Sinhala Sign Al-lakuna
+ (0x00dd2, 0x00dd4,), # Sinhala Vowel Sign Ketti..Sinhala Vowel Sign Ketti
+ (0x00dd6, 0x00dd6,), # Sinhala Vowel Sign Diga ..Sinhala Vowel Sign Diga
+ (0x00e31, 0x00e31,), # Thai Character Mai Han-a..Thai Character Mai Han-a
+ (0x00e34, 0x00e3a,), # Thai Character Sara I ..Thai Character Phinthu
+ (0x00e47, 0x00e4e,), # Thai Character Maitaikhu..Thai Character Yamakkan
+ (0x00eb1, 0x00eb1,), # Lao Vowel Sign Mai Kan ..Lao Vowel Sign Mai Kan
+ (0x00eb4, 0x00eb9,), # Lao Vowel Sign I ..Lao Vowel Sign Uu
+ (0x00ebb, 0x00ebc,), # Lao Vowel Sign Mai Kon ..Lao Semivowel Sign Lo
+ (0x00ec8, 0x00ecd,), # Lao Tone Mai Ek ..Lao Niggahita
+ (0x00f18, 0x00f19,), # Tibetan Astrological Sig..Tibetan Astrological Sig
+ (0x00f35, 0x00f35,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f37, 0x00f37,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f39, 0x00f39,), # Tibetan Mark Tsa -phru ..Tibetan Mark Tsa -phru
+ (0x00f71, 0x00f7e,), # Tibetan Vowel Sign Aa ..Tibetan Sign Rjes Su Nga
+ (0x00f80, 0x00f84,), # Tibetan Vowel Sign Rever..Tibetan Mark Halanta
+ (0x00f86, 0x00f87,), # Tibetan Sign Lci Rtags ..Tibetan Sign Yang Rtags
+ (0x00f8d, 0x00f97,), # Tibetan Subjoined Sign L..Tibetan Subjoined Letter
+ (0x00f99, 0x00fbc,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
+ (0x00fc6, 0x00fc6,), # Tibetan Symbol Padma Gda..Tibetan Symbol Padma Gda
+ (0x0102d, 0x01030,), # Myanmar Vowel Sign I ..Myanmar Vowel Sign Uu
+ (0x01032, 0x01037,), # Myanmar Vowel Sign Ai ..Myanmar Sign Dot Below
+ (0x01039, 0x0103a,), # Myanmar Sign Virama ..Myanmar Sign Asat
+ (0x0103d, 0x0103e,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01058, 0x01059,), # Myanmar Vowel Sign Vocal..Myanmar Vowel Sign Vocal
+ (0x0105e, 0x01060,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01071, 0x01074,), # Myanmar Vowel Sign Geba ..Myanmar Vowel Sign Kayah
+ (0x01082, 0x01082,), # Myanmar Consonant Sign S..Myanmar Consonant Sign S
+ (0x01085, 0x01086,), # Myanmar Vowel Sign Shan ..Myanmar Vowel Sign Shan
+ (0x0108d, 0x0108d,), # Myanmar Sign Shan Counci..Myanmar Sign Shan Counci
+ (0x0109d, 0x0109d,), # Myanmar Vowel Sign Aiton..Myanmar Vowel Sign Aiton
+ (0x0135d, 0x0135f,), # Ethiopic Combining Gemin..Ethiopic Combining Gemin
+ (0x01712, 0x01714,), # Tagalog Vowel Sign I ..Tagalog Sign Virama
+ (0x01732, 0x01734,), # Hanunoo Vowel Sign I ..Hanunoo Sign Pamudpod
+ (0x01752, 0x01753,), # Buhid Vowel Sign I ..Buhid Vowel Sign U
+ (0x01772, 0x01773,), # Tagbanwa Vowel Sign I ..Tagbanwa Vowel Sign U
+ (0x017b4, 0x017b5,), # Khmer Vowel Inherent Aq ..Khmer Vowel Inherent Aa
+ (0x017b7, 0x017bd,), # Khmer Vowel Sign I ..Khmer Vowel Sign Ua
+ (0x017c6, 0x017c6,), # Khmer Sign Nikahit ..Khmer Sign Nikahit
+ (0x017c9, 0x017d3,), # Khmer Sign Muusikatoan ..Khmer Sign Bathamasat
+ (0x017dd, 0x017dd,), # Khmer Sign Atthacan ..Khmer Sign Atthacan
+ (0x0180b, 0x0180d,), # Mongolian Free Variation..Mongolian Free Variation
+ (0x018a9, 0x018a9,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x01920, 0x01922,), # Limbu Vowel Sign A ..Limbu Vowel Sign U
+ (0x01927, 0x01928,), # Limbu Vowel Sign E ..Limbu Vowel Sign O
+ (0x01932, 0x01932,), # Limbu Small Letter Anusv..Limbu Small Letter Anusv
+ (0x01939, 0x0193b,), # Limbu Sign Mukphreng ..Limbu Sign Sa-i
+ (0x01a17, 0x01a18,), # Buginese Vowel Sign I ..Buginese Vowel Sign U
+ (0x01a1b, 0x01a1b,), # Buginese Vowel Sign Ae ..Buginese Vowel Sign Ae
+ (0x01a56, 0x01a56,), # Tai Tham Consonant Sign ..Tai Tham Consonant Sign
+ (0x01a58, 0x01a5e,), # Tai Tham Sign Mai Kang L..Tai Tham Consonant Sign
+ (0x01a60, 0x01a60,), # Tai Tham Sign Sakot ..Tai Tham Sign Sakot
+ (0x01a62, 0x01a62,), # Tai Tham Vowel Sign Mai ..Tai Tham Vowel Sign Mai
+ (0x01a65, 0x01a6c,), # Tai Tham Vowel Sign I ..Tai Tham Vowel Sign Oa B
+ (0x01a73, 0x01a7c,), # Tai Tham Vowel Sign Oa A..Tai Tham Sign Khuen-lue
+ (0x01a7f, 0x01a7f,), # Tai Tham Combining Crypt..Tai Tham Combining Crypt
+ (0x01ab0, 0x01abe,), # Combining Doubled Circum..Combining Parentheses Ov
+ (0x01b00, 0x01b03,), # Balinese Sign Ulu Ricem ..Balinese Sign Surang
+ (0x01b34, 0x01b34,), # Balinese Sign Rerekan ..Balinese Sign Rerekan
+ (0x01b36, 0x01b3a,), # Balinese Vowel Sign Ulu ..Balinese Vowel Sign Ra R
+ (0x01b3c, 0x01b3c,), # Balinese Vowel Sign La L..Balinese Vowel Sign La L
+ (0x01b42, 0x01b42,), # Balinese Vowel Sign Pepe..Balinese Vowel Sign Pepe
+ (0x01b6b, 0x01b73,), # Balinese Musical Symbol ..Balinese Musical Symbol
+ (0x01b80, 0x01b81,), # Sundanese Sign Panyecek ..Sundanese Sign Panglayar
+ (0x01ba2, 0x01ba5,), # Sundanese Consonant Sign..Sundanese Vowel Sign Pan
+ (0x01ba8, 0x01ba9,), # Sundanese Vowel Sign Pam..Sundanese Vowel Sign Pan
+ (0x01bab, 0x01bad,), # Sundanese Sign Virama ..Sundanese Consonant Sign
+ (0x01be6, 0x01be6,), # Batak Sign Tompi ..Batak Sign Tompi
+ (0x01be8, 0x01be9,), # Batak Vowel Sign Pakpak ..Batak Vowel Sign Ee
+ (0x01bed, 0x01bed,), # Batak Vowel Sign Karo O ..Batak Vowel Sign Karo O
+ (0x01bef, 0x01bf1,), # Batak Vowel Sign U For S..Batak Consonant Sign H
+ (0x01c2c, 0x01c33,), # Lepcha Vowel Sign E ..Lepcha Consonant Sign T
+ (0x01c36, 0x01c37,), # Lepcha Sign Ran ..Lepcha Sign Nukta
+ (0x01cd0, 0x01cd2,), # Vedic Tone Karshana ..Vedic Tone Prenkha
+ (0x01cd4, 0x01ce0,), # Vedic Sign Yajurvedic Mi..Vedic Tone Rigvedic Kash
+ (0x01ce2, 0x01ce8,), # Vedic Sign Visarga Svari..Vedic Sign Visarga Anuda
+ (0x01ced, 0x01ced,), # Vedic Sign Tiryak ..Vedic Sign Tiryak
+ (0x01cf4, 0x01cf4,), # Vedic Tone Candra Above ..Vedic Tone Candra Above
+ (0x01cf8, 0x01cf9,), # Vedic Tone Ring Above ..Vedic Tone Double Ring A
+ (0x01dc0, 0x01df5,), # Combining Dotted Grave A..Combining Up Tack Above
+ (0x01dfc, 0x01dff,), # Combining Double Inverte..Combining Right Arrowhea
+ (0x020d0, 0x020f0,), # Combining Left Harpoon A..Combining Asterisk Above
+ (0x02cef, 0x02cf1,), # Coptic Combining Ni Abov..Coptic Combining Spiritu
+ (0x02d7f, 0x02d7f,), # Tifinagh Consonant Joine..Tifinagh Consonant Joine
+ (0x02de0, 0x02dff,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0302a, 0x0302d,), # Ideographic Level Tone M..Ideographic Entering Ton
+ (0x03099, 0x0309a,), # Combining Katakana-hirag..Combining Katakana-hirag
+ (0x0a66f, 0x0a672,), # Combining Cyrillic Vzmet..Combining Cyrillic Thous
+ (0x0a674, 0x0a67d,), # Combining Cyrillic Lette..Combining Cyrillic Payer
+ (0x0a69f, 0x0a69f,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0a6f0, 0x0a6f1,), # Bamum Combining Mark Koq..Bamum Combining Mark Tuk
+ (0x0a802, 0x0a802,), # Syloti Nagri Sign Dvisva..Syloti Nagri Sign Dvisva
+ (0x0a806, 0x0a806,), # Syloti Nagri Sign Hasant..Syloti Nagri Sign Hasant
+ (0x0a80b, 0x0a80b,), # Syloti Nagri Sign Anusva..Syloti Nagri Sign Anusva
+ (0x0a825, 0x0a826,), # Syloti Nagri Vowel Sign ..Syloti Nagri Vowel Sign
+ (0x0a8c4, 0x0a8c4,), # Saurashtra Sign Virama ..Saurashtra Sign Virama
+ (0x0a8e0, 0x0a8f1,), # Combining Devanagari Dig..Combining Devanagari Sig
+ (0x0a926, 0x0a92d,), # Kayah Li Vowel Ue ..Kayah Li Tone Calya Plop
+ (0x0a947, 0x0a951,), # Rejang Vowel Sign I ..Rejang Consonant Sign R
+ (0x0a980, 0x0a982,), # Javanese Sign Panyangga ..Javanese Sign Layar
+ (0x0a9b3, 0x0a9b3,), # Javanese Sign Cecak Telu..Javanese Sign Cecak Telu
+ (0x0a9b6, 0x0a9b9,), # Javanese Vowel Sign Wulu..Javanese Vowel Sign Suku
+ (0x0a9bc, 0x0a9bc,), # Javanese Vowel Sign Pepe..Javanese Vowel Sign Pepe
+ (0x0a9e5, 0x0a9e5,), # Myanmar Sign Shan Saw ..Myanmar Sign Shan Saw
+ (0x0aa29, 0x0aa2e,), # Cham Vowel Sign Aa ..Cham Vowel Sign Oe
+ (0x0aa31, 0x0aa32,), # Cham Vowel Sign Au ..Cham Vowel Sign Ue
+ (0x0aa35, 0x0aa36,), # Cham Consonant Sign La ..Cham Consonant Sign Wa
+ (0x0aa43, 0x0aa43,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa4c, 0x0aa4c,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa7c, 0x0aa7c,), # Myanmar Sign Tai Laing T..Myanmar Sign Tai Laing T
+ (0x0aab0, 0x0aab0,), # Tai Viet Mai Kang ..Tai Viet Mai Kang
+ (0x0aab2, 0x0aab4,), # Tai Viet Vowel I ..Tai Viet Vowel U
+ (0x0aab7, 0x0aab8,), # Tai Viet Mai Khit ..Tai Viet Vowel Ia
+ (0x0aabe, 0x0aabf,), # Tai Viet Vowel Am ..Tai Viet Tone Mai Ek
+ (0x0aac1, 0x0aac1,), # Tai Viet Tone Mai Tho ..Tai Viet Tone Mai Tho
+ (0x0aaec, 0x0aaed,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0aaf6, 0x0aaf6,), # Meetei Mayek Virama ..Meetei Mayek Virama
+ (0x0abe5, 0x0abe5,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abe8, 0x0abe8,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abed, 0x0abed,), # Meetei Mayek Apun Iyek ..Meetei Mayek Apun Iyek
+ (0x0fb1e, 0x0fb1e,), # Hebrew Point Judeo-spani..Hebrew Point Judeo-spani
+ (0x0fe00, 0x0fe0f,), # Variation Selector-1 ..Variation Selector-16
+ (0x0fe20, 0x0fe2d,), # Combining Ligature Left ..Combining Conjoining Mac
+ (0x101fd, 0x101fd,), # Phaistos Disc Sign Combi..Phaistos Disc Sign Combi
+ (0x102e0, 0x102e0,), # Coptic Epact Thousands M..Coptic Epact Thousands M
+ (0x10376, 0x1037a,), # Combining Old Permic Let..Combining Old Permic Let
+ (0x10a01, 0x10a03,), # Kharoshthi Vowel Sign I ..Kharoshthi Vowel Sign Vo
+ (0x10a05, 0x10a06,), # Kharoshthi Vowel Sign E ..Kharoshthi Vowel Sign O
+ (0x10a0c, 0x10a0f,), # Kharoshthi Vowel Length ..Kharoshthi Sign Visarga
+ (0x10a38, 0x10a3a,), # Kharoshthi Sign Bar Abov..Kharoshthi Sign Dot Belo
+ (0x10a3f, 0x10a3f,), # Kharoshthi Virama ..Kharoshthi Virama
+ (0x10ae5, 0x10ae6,), # Manichaean Abbreviation ..Manichaean Abbreviation
+ (0x11001, 0x11001,), # Brahmi Sign Anusvara ..Brahmi Sign Anusvara
+ (0x11038, 0x11046,), # Brahmi Vowel Sign Aa ..Brahmi Virama
+ (0x1107f, 0x11081,), # Brahmi Number Joiner ..Kaithi Sign Anusvara
+ (0x110b3, 0x110b6,), # Kaithi Vowel Sign U ..Kaithi Vowel Sign Ai
+ (0x110b9, 0x110ba,), # Kaithi Sign Virama ..Kaithi Sign Nukta
+ (0x11100, 0x11102,), # Chakma Sign Candrabindu ..Chakma Sign Visarga
+ (0x11127, 0x1112b,), # Chakma Vowel Sign A ..Chakma Vowel Sign Uu
+ (0x1112d, 0x11134,), # Chakma Vowel Sign Ai ..Chakma Maayyaa
+ (0x11173, 0x11173,), # Mahajani Sign Nukta ..Mahajani Sign Nukta
+ (0x11180, 0x11181,), # Sharada Sign Candrabindu..Sharada Sign Anusvara
+ (0x111b6, 0x111be,), # Sharada Vowel Sign U ..Sharada Vowel Sign O
+ (0x1122f, 0x11231,), # Khojki Vowel Sign U ..Khojki Vowel Sign Ai
+ (0x11234, 0x11234,), # Khojki Sign Anusvara ..Khojki Sign Anusvara
+ (0x11236, 0x11237,), # Khojki Sign Nukta ..Khojki Sign Shadda
+ (0x112df, 0x112df,), # Khudawadi Sign Anusvara ..Khudawadi Sign Anusvara
+ (0x112e3, 0x112ea,), # Khudawadi Vowel Sign U ..Khudawadi Sign Virama
+ (0x11301, 0x11301,), # Grantha Sign Candrabindu..Grantha Sign Candrabindu
+ (0x1133c, 0x1133c,), # Grantha Sign Nukta ..Grantha Sign Nukta
+ (0x11340, 0x11340,), # Grantha Vowel Sign Ii ..Grantha Vowel Sign Ii
+ (0x11366, 0x1136c,), # Combining Grantha Digit ..Combining Grantha Digit
+ (0x11370, 0x11374,), # Combining Grantha Letter..Combining Grantha Letter
+ (0x114b3, 0x114b8,), # Tirhuta Vowel Sign U ..Tirhuta Vowel Sign Vocal
+ (0x114ba, 0x114ba,), # Tirhuta Vowel Sign Short..Tirhuta Vowel Sign Short
+ (0x114bf, 0x114c0,), # Tirhuta Sign Candrabindu..Tirhuta Sign Anusvara
+ (0x114c2, 0x114c3,), # Tirhuta Sign Virama ..Tirhuta Sign Nukta
+ (0x115b2, 0x115b5,), # Siddham Vowel Sign U ..Siddham Vowel Sign Vocal
+ (0x115bc, 0x115bd,), # Siddham Sign Candrabindu..Siddham Sign Anusvara
+ (0x115bf, 0x115c0,), # Siddham Sign Virama ..Siddham Sign Nukta
+ (0x11633, 0x1163a,), # Modi Vowel Sign U ..Modi Vowel Sign Ai
+ (0x1163d, 0x1163d,), # Modi Sign Anusvara ..Modi Sign Anusvara
+ (0x1163f, 0x11640,), # Modi Sign Virama ..Modi Sign Ardhacandra
+ (0x116ab, 0x116ab,), # Takri Sign Anusvara ..Takri Sign Anusvara
+ (0x116ad, 0x116ad,), # Takri Vowel Sign Aa ..Takri Vowel Sign Aa
+ (0x116b0, 0x116b5,), # Takri Vowel Sign U ..Takri Vowel Sign Au
+ (0x116b7, 0x116b7,), # Takri Sign Nukta ..Takri Sign Nukta
+ (0x16af0, 0x16af4,), # Bassa Vah Combining High..Bassa Vah Combining High
+ (0x16b30, 0x16b36,), # Pahawh Hmong Mark Cim Tu..Pahawh Hmong Mark Cim Ta
+ (0x16f8f, 0x16f92,), # Miao Tone Right ..Miao Tone Below
+ (0x1bc9d, 0x1bc9e,), # Duployan Thick Letter Se..Duployan Double Mark
+ (0x1d167, 0x1d169,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d17b, 0x1d182,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d185, 0x1d18b,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d1aa, 0x1d1ad,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d242, 0x1d244,), # Combining Greek Musical ..Combining Greek Musical
+ (0x1e8d0, 0x1e8d6,), # Mende Kikakui Combining ..Mende Kikakui Combining
+ (0xe0100, 0xe01ef,), # Variation Selector-17 ..Variation Selector-256
+ ),
+ '8.0.0': (
+ # Source: DerivedGeneralCategory-8.0.0.txt
+ # Date: 2015-02-13, 13:47:11 GMT [MD]
+ #
+ (0x00300, 0x0036f,), # Combining Grave Accent ..Combining Latin Small Le
+ (0x00483, 0x00489,), # Combining Cyrillic Titlo..Combining Cyrillic Milli
+ (0x00591, 0x005bd,), # Hebrew Accent Etnahta ..Hebrew Point Meteg
+ (0x005bf, 0x005bf,), # Hebrew Point Rafe ..Hebrew Point Rafe
+ (0x005c1, 0x005c2,), # Hebrew Point Shin Dot ..Hebrew Point Sin Dot
+ (0x005c4, 0x005c5,), # Hebrew Mark Upper Dot ..Hebrew Mark Lower Dot
+ (0x005c7, 0x005c7,), # Hebrew Point Qamats Qata..Hebrew Point Qamats Qata
+ (0x00610, 0x0061a,), # Arabic Sign Sallallahou ..Arabic Small Kasra
+ (0x0064b, 0x0065f,), # Arabic Fathatan ..Arabic Wavy Hamza Below
+ (0x00670, 0x00670,), # Arabic Letter Superscrip..Arabic Letter Superscrip
+ (0x006d6, 0x006dc,), # Arabic Small High Ligatu..Arabic Small High Seen
+ (0x006df, 0x006e4,), # Arabic Small High Rounde..Arabic Small High Madda
+ (0x006e7, 0x006e8,), # Arabic Small High Yeh ..Arabic Small High Noon
+ (0x006ea, 0x006ed,), # Arabic Empty Centre Low ..Arabic Small Low Meem
+ (0x00711, 0x00711,), # Syriac Letter Superscrip..Syriac Letter Superscrip
+ (0x00730, 0x0074a,), # Syriac Pthaha Above ..Syriac Barrekh
+ (0x007a6, 0x007b0,), # Thaana Abafili ..Thaana Sukun
+ (0x007eb, 0x007f3,), # Nko Combining Short High..Nko Combining Double Dot
+ (0x00816, 0x00819,), # Samaritan Mark In ..Samaritan Mark Dagesh
+ (0x0081b, 0x00823,), # Samaritan Mark Epentheti..Samaritan Vowel Sign A
+ (0x00825, 0x00827,), # Samaritan Vowel Sign Sho..Samaritan Vowel Sign U
+ (0x00829, 0x0082d,), # Samaritan Vowel Sign Lon..Samaritan Mark Nequdaa
+ (0x00859, 0x0085b,), # Mandaic Affrication Mark..Mandaic Gemination Mark
+ (0x008e3, 0x00902,), # Arabic Turned Damma Belo..Devanagari Sign Anusvara
+ (0x0093a, 0x0093a,), # Devanagari Vowel Sign Oe..Devanagari Vowel Sign Oe
+ (0x0093c, 0x0093c,), # Devanagari Sign Nukta ..Devanagari Sign Nukta
+ (0x00941, 0x00948,), # Devanagari Vowel Sign U ..Devanagari Vowel Sign Ai
+ (0x0094d, 0x0094d,), # Devanagari Sign Virama ..Devanagari Sign Virama
+ (0x00951, 0x00957,), # Devanagari Stress Sign U..Devanagari Vowel Sign Uu
+ (0x00962, 0x00963,), # Devanagari Vowel Sign Vo..Devanagari Vowel Sign Vo
+ (0x00981, 0x00981,), # Bengali Sign Candrabindu..Bengali Sign Candrabindu
+ (0x009bc, 0x009bc,), # Bengali Sign Nukta ..Bengali Sign Nukta
+ (0x009c1, 0x009c4,), # Bengali Vowel Sign U ..Bengali Vowel Sign Vocal
+ (0x009cd, 0x009cd,), # Bengali Sign Virama ..Bengali Sign Virama
+ (0x009e2, 0x009e3,), # Bengali Vowel Sign Vocal..Bengali Vowel Sign Vocal
+ (0x00a01, 0x00a02,), # Gurmukhi Sign Adak Bindi..Gurmukhi Sign Bindi
+ (0x00a3c, 0x00a3c,), # Gurmukhi Sign Nukta ..Gurmukhi Sign Nukta
+ (0x00a41, 0x00a42,), # Gurmukhi Vowel Sign U ..Gurmukhi Vowel Sign Uu
+ (0x00a47, 0x00a48,), # Gurmukhi Vowel Sign Ee ..Gurmukhi Vowel Sign Ai
+ (0x00a4b, 0x00a4d,), # Gurmukhi Vowel Sign Oo ..Gurmukhi Sign Virama
+ (0x00a51, 0x00a51,), # Gurmukhi Sign Udaat ..Gurmukhi Sign Udaat
+ (0x00a70, 0x00a71,), # Gurmukhi Tippi ..Gurmukhi Addak
+ (0x00a75, 0x00a75,), # Gurmukhi Sign Yakash ..Gurmukhi Sign Yakash
+ (0x00a81, 0x00a82,), # Gujarati Sign Candrabind..Gujarati Sign Anusvara
+ (0x00abc, 0x00abc,), # Gujarati Sign Nukta ..Gujarati Sign Nukta
+ (0x00ac1, 0x00ac5,), # Gujarati Vowel Sign U ..Gujarati Vowel Sign Cand
+ (0x00ac7, 0x00ac8,), # Gujarati Vowel Sign E ..Gujarati Vowel Sign Ai
+ (0x00acd, 0x00acd,), # Gujarati Sign Virama ..Gujarati Sign Virama
+ (0x00ae2, 0x00ae3,), # Gujarati Vowel Sign Voca..Gujarati Vowel Sign Voca
+ (0x00b01, 0x00b01,), # Oriya Sign Candrabindu ..Oriya Sign Candrabindu
+ (0x00b3c, 0x00b3c,), # Oriya Sign Nukta ..Oriya Sign Nukta
+ (0x00b3f, 0x00b3f,), # Oriya Vowel Sign I ..Oriya Vowel Sign I
+ (0x00b41, 0x00b44,), # Oriya Vowel Sign U ..Oriya Vowel Sign Vocalic
+ (0x00b4d, 0x00b4d,), # Oriya Sign Virama ..Oriya Sign Virama
+ (0x00b56, 0x00b56,), # Oriya Ai Length Mark ..Oriya Ai Length Mark
+ (0x00b62, 0x00b63,), # Oriya Vowel Sign Vocalic..Oriya Vowel Sign Vocalic
+ (0x00b82, 0x00b82,), # Tamil Sign Anusvara ..Tamil Sign Anusvara
+ (0x00bc0, 0x00bc0,), # Tamil Vowel Sign Ii ..Tamil Vowel Sign Ii
+ (0x00bcd, 0x00bcd,), # Tamil Sign Virama ..Tamil Sign Virama
+ (0x00c00, 0x00c00,), # Telugu Sign Combining Ca..Telugu Sign Combining Ca
+ (0x00c3e, 0x00c40,), # Telugu Vowel Sign Aa ..Telugu Vowel Sign Ii
+ (0x00c46, 0x00c48,), # Telugu Vowel Sign E ..Telugu Vowel Sign Ai
+ (0x00c4a, 0x00c4d,), # Telugu Vowel Sign O ..Telugu Sign Virama
+ (0x00c55, 0x00c56,), # Telugu Length Mark ..Telugu Ai Length Mark
+ (0x00c62, 0x00c63,), # Telugu Vowel Sign Vocali..Telugu Vowel Sign Vocali
+ (0x00c81, 0x00c81,), # Kannada Sign Candrabindu..Kannada Sign Candrabindu
+ (0x00cbc, 0x00cbc,), # Kannada Sign Nukta ..Kannada Sign Nukta
+ (0x00cbf, 0x00cbf,), # Kannada Vowel Sign I ..Kannada Vowel Sign I
+ (0x00cc6, 0x00cc6,), # Kannada Vowel Sign E ..Kannada Vowel Sign E
+ (0x00ccc, 0x00ccd,), # Kannada Vowel Sign Au ..Kannada Sign Virama
+ (0x00ce2, 0x00ce3,), # Kannada Vowel Sign Vocal..Kannada Vowel Sign Vocal
+ (0x00d01, 0x00d01,), # Malayalam Sign Candrabin..Malayalam Sign Candrabin
+ (0x00d41, 0x00d44,), # Malayalam Vowel Sign U ..Malayalam Vowel Sign Voc
+ (0x00d4d, 0x00d4d,), # Malayalam Sign Virama ..Malayalam Sign Virama
+ (0x00d62, 0x00d63,), # Malayalam Vowel Sign Voc..Malayalam Vowel Sign Voc
+ (0x00dca, 0x00dca,), # Sinhala Sign Al-lakuna ..Sinhala Sign Al-lakuna
+ (0x00dd2, 0x00dd4,), # Sinhala Vowel Sign Ketti..Sinhala Vowel Sign Ketti
+ (0x00dd6, 0x00dd6,), # Sinhala Vowel Sign Diga ..Sinhala Vowel Sign Diga
+ (0x00e31, 0x00e31,), # Thai Character Mai Han-a..Thai Character Mai Han-a
+ (0x00e34, 0x00e3a,), # Thai Character Sara I ..Thai Character Phinthu
+ (0x00e47, 0x00e4e,), # Thai Character Maitaikhu..Thai Character Yamakkan
+ (0x00eb1, 0x00eb1,), # Lao Vowel Sign Mai Kan ..Lao Vowel Sign Mai Kan
+ (0x00eb4, 0x00eb9,), # Lao Vowel Sign I ..Lao Vowel Sign Uu
+ (0x00ebb, 0x00ebc,), # Lao Vowel Sign Mai Kon ..Lao Semivowel Sign Lo
+ (0x00ec8, 0x00ecd,), # Lao Tone Mai Ek ..Lao Niggahita
+ (0x00f18, 0x00f19,), # Tibetan Astrological Sig..Tibetan Astrological Sig
+ (0x00f35, 0x00f35,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f37, 0x00f37,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f39, 0x00f39,), # Tibetan Mark Tsa -phru ..Tibetan Mark Tsa -phru
+ (0x00f71, 0x00f7e,), # Tibetan Vowel Sign Aa ..Tibetan Sign Rjes Su Nga
+ (0x00f80, 0x00f84,), # Tibetan Vowel Sign Rever..Tibetan Mark Halanta
+ (0x00f86, 0x00f87,), # Tibetan Sign Lci Rtags ..Tibetan Sign Yang Rtags
+ (0x00f8d, 0x00f97,), # Tibetan Subjoined Sign L..Tibetan Subjoined Letter
+ (0x00f99, 0x00fbc,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
+ (0x00fc6, 0x00fc6,), # Tibetan Symbol Padma Gda..Tibetan Symbol Padma Gda
+ (0x0102d, 0x01030,), # Myanmar Vowel Sign I ..Myanmar Vowel Sign Uu
+ (0x01032, 0x01037,), # Myanmar Vowel Sign Ai ..Myanmar Sign Dot Below
+ (0x01039, 0x0103a,), # Myanmar Sign Virama ..Myanmar Sign Asat
+ (0x0103d, 0x0103e,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01058, 0x01059,), # Myanmar Vowel Sign Vocal..Myanmar Vowel Sign Vocal
+ (0x0105e, 0x01060,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01071, 0x01074,), # Myanmar Vowel Sign Geba ..Myanmar Vowel Sign Kayah
+ (0x01082, 0x01082,), # Myanmar Consonant Sign S..Myanmar Consonant Sign S
+ (0x01085, 0x01086,), # Myanmar Vowel Sign Shan ..Myanmar Vowel Sign Shan
+ (0x0108d, 0x0108d,), # Myanmar Sign Shan Counci..Myanmar Sign Shan Counci
+ (0x0109d, 0x0109d,), # Myanmar Vowel Sign Aiton..Myanmar Vowel Sign Aiton
+ (0x0135d, 0x0135f,), # Ethiopic Combining Gemin..Ethiopic Combining Gemin
+ (0x01712, 0x01714,), # Tagalog Vowel Sign I ..Tagalog Sign Virama
+ (0x01732, 0x01734,), # Hanunoo Vowel Sign I ..Hanunoo Sign Pamudpod
+ (0x01752, 0x01753,), # Buhid Vowel Sign I ..Buhid Vowel Sign U
+ (0x01772, 0x01773,), # Tagbanwa Vowel Sign I ..Tagbanwa Vowel Sign U
+ (0x017b4, 0x017b5,), # Khmer Vowel Inherent Aq ..Khmer Vowel Inherent Aa
+ (0x017b7, 0x017bd,), # Khmer Vowel Sign I ..Khmer Vowel Sign Ua
+ (0x017c6, 0x017c6,), # Khmer Sign Nikahit ..Khmer Sign Nikahit
+ (0x017c9, 0x017d3,), # Khmer Sign Muusikatoan ..Khmer Sign Bathamasat
+ (0x017dd, 0x017dd,), # Khmer Sign Atthacan ..Khmer Sign Atthacan
+ (0x0180b, 0x0180d,), # Mongolian Free Variation..Mongolian Free Variation
+ (0x018a9, 0x018a9,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x01920, 0x01922,), # Limbu Vowel Sign A ..Limbu Vowel Sign U
+ (0x01927, 0x01928,), # Limbu Vowel Sign E ..Limbu Vowel Sign O
+ (0x01932, 0x01932,), # Limbu Small Letter Anusv..Limbu Small Letter Anusv
+ (0x01939, 0x0193b,), # Limbu Sign Mukphreng ..Limbu Sign Sa-i
+ (0x01a17, 0x01a18,), # Buginese Vowel Sign I ..Buginese Vowel Sign U
+ (0x01a1b, 0x01a1b,), # Buginese Vowel Sign Ae ..Buginese Vowel Sign Ae
+ (0x01a56, 0x01a56,), # Tai Tham Consonant Sign ..Tai Tham Consonant Sign
+ (0x01a58, 0x01a5e,), # Tai Tham Sign Mai Kang L..Tai Tham Consonant Sign
+ (0x01a60, 0x01a60,), # Tai Tham Sign Sakot ..Tai Tham Sign Sakot
+ (0x01a62, 0x01a62,), # Tai Tham Vowel Sign Mai ..Tai Tham Vowel Sign Mai
+ (0x01a65, 0x01a6c,), # Tai Tham Vowel Sign I ..Tai Tham Vowel Sign Oa B
+ (0x01a73, 0x01a7c,), # Tai Tham Vowel Sign Oa A..Tai Tham Sign Khuen-lue
+ (0x01a7f, 0x01a7f,), # Tai Tham Combining Crypt..Tai Tham Combining Crypt
+ (0x01ab0, 0x01abe,), # Combining Doubled Circum..Combining Parentheses Ov
+ (0x01b00, 0x01b03,), # Balinese Sign Ulu Ricem ..Balinese Sign Surang
+ (0x01b34, 0x01b34,), # Balinese Sign Rerekan ..Balinese Sign Rerekan
+ (0x01b36, 0x01b3a,), # Balinese Vowel Sign Ulu ..Balinese Vowel Sign Ra R
+ (0x01b3c, 0x01b3c,), # Balinese Vowel Sign La L..Balinese Vowel Sign La L
+ (0x01b42, 0x01b42,), # Balinese Vowel Sign Pepe..Balinese Vowel Sign Pepe
+ (0x01b6b, 0x01b73,), # Balinese Musical Symbol ..Balinese Musical Symbol
+ (0x01b80, 0x01b81,), # Sundanese Sign Panyecek ..Sundanese Sign Panglayar
+ (0x01ba2, 0x01ba5,), # Sundanese Consonant Sign..Sundanese Vowel Sign Pan
+ (0x01ba8, 0x01ba9,), # Sundanese Vowel Sign Pam..Sundanese Vowel Sign Pan
+ (0x01bab, 0x01bad,), # Sundanese Sign Virama ..Sundanese Consonant Sign
+ (0x01be6, 0x01be6,), # Batak Sign Tompi ..Batak Sign Tompi
+ (0x01be8, 0x01be9,), # Batak Vowel Sign Pakpak ..Batak Vowel Sign Ee
+ (0x01bed, 0x01bed,), # Batak Vowel Sign Karo O ..Batak Vowel Sign Karo O
+ (0x01bef, 0x01bf1,), # Batak Vowel Sign U For S..Batak Consonant Sign H
+ (0x01c2c, 0x01c33,), # Lepcha Vowel Sign E ..Lepcha Consonant Sign T
+ (0x01c36, 0x01c37,), # Lepcha Sign Ran ..Lepcha Sign Nukta
+ (0x01cd0, 0x01cd2,), # Vedic Tone Karshana ..Vedic Tone Prenkha
+ (0x01cd4, 0x01ce0,), # Vedic Sign Yajurvedic Mi..Vedic Tone Rigvedic Kash
+ (0x01ce2, 0x01ce8,), # Vedic Sign Visarga Svari..Vedic Sign Visarga Anuda
+ (0x01ced, 0x01ced,), # Vedic Sign Tiryak ..Vedic Sign Tiryak
+ (0x01cf4, 0x01cf4,), # Vedic Tone Candra Above ..Vedic Tone Candra Above
+ (0x01cf8, 0x01cf9,), # Vedic Tone Ring Above ..Vedic Tone Double Ring A
+ (0x01dc0, 0x01df5,), # Combining Dotted Grave A..Combining Up Tack Above
+ (0x01dfc, 0x01dff,), # Combining Double Inverte..Combining Right Arrowhea
+ (0x020d0, 0x020f0,), # Combining Left Harpoon A..Combining Asterisk Above
+ (0x02cef, 0x02cf1,), # Coptic Combining Ni Abov..Coptic Combining Spiritu
+ (0x02d7f, 0x02d7f,), # Tifinagh Consonant Joine..Tifinagh Consonant Joine
+ (0x02de0, 0x02dff,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0302a, 0x0302d,), # Ideographic Level Tone M..Ideographic Entering Ton
+ (0x03099, 0x0309a,), # Combining Katakana-hirag..Combining Katakana-hirag
+ (0x0a66f, 0x0a672,), # Combining Cyrillic Vzmet..Combining Cyrillic Thous
+ (0x0a674, 0x0a67d,), # Combining Cyrillic Lette..Combining Cyrillic Payer
+ (0x0a69e, 0x0a69f,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0a6f0, 0x0a6f1,), # Bamum Combining Mark Koq..Bamum Combining Mark Tuk
+ (0x0a802, 0x0a802,), # Syloti Nagri Sign Dvisva..Syloti Nagri Sign Dvisva
+ (0x0a806, 0x0a806,), # Syloti Nagri Sign Hasant..Syloti Nagri Sign Hasant
+ (0x0a80b, 0x0a80b,), # Syloti Nagri Sign Anusva..Syloti Nagri Sign Anusva
+ (0x0a825, 0x0a826,), # Syloti Nagri Vowel Sign ..Syloti Nagri Vowel Sign
+ (0x0a8c4, 0x0a8c4,), # Saurashtra Sign Virama ..Saurashtra Sign Virama
+ (0x0a8e0, 0x0a8f1,), # Combining Devanagari Dig..Combining Devanagari Sig
+ (0x0a926, 0x0a92d,), # Kayah Li Vowel Ue ..Kayah Li Tone Calya Plop
+ (0x0a947, 0x0a951,), # Rejang Vowel Sign I ..Rejang Consonant Sign R
+ (0x0a980, 0x0a982,), # Javanese Sign Panyangga ..Javanese Sign Layar
+ (0x0a9b3, 0x0a9b3,), # Javanese Sign Cecak Telu..Javanese Sign Cecak Telu
+ (0x0a9b6, 0x0a9b9,), # Javanese Vowel Sign Wulu..Javanese Vowel Sign Suku
+ (0x0a9bc, 0x0a9bc,), # Javanese Vowel Sign Pepe..Javanese Vowel Sign Pepe
+ (0x0a9e5, 0x0a9e5,), # Myanmar Sign Shan Saw ..Myanmar Sign Shan Saw
+ (0x0aa29, 0x0aa2e,), # Cham Vowel Sign Aa ..Cham Vowel Sign Oe
+ (0x0aa31, 0x0aa32,), # Cham Vowel Sign Au ..Cham Vowel Sign Ue
+ (0x0aa35, 0x0aa36,), # Cham Consonant Sign La ..Cham Consonant Sign Wa
+ (0x0aa43, 0x0aa43,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa4c, 0x0aa4c,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa7c, 0x0aa7c,), # Myanmar Sign Tai Laing T..Myanmar Sign Tai Laing T
+ (0x0aab0, 0x0aab0,), # Tai Viet Mai Kang ..Tai Viet Mai Kang
+ (0x0aab2, 0x0aab4,), # Tai Viet Vowel I ..Tai Viet Vowel U
+ (0x0aab7, 0x0aab8,), # Tai Viet Mai Khit ..Tai Viet Vowel Ia
+ (0x0aabe, 0x0aabf,), # Tai Viet Vowel Am ..Tai Viet Tone Mai Ek
+ (0x0aac1, 0x0aac1,), # Tai Viet Tone Mai Tho ..Tai Viet Tone Mai Tho
+ (0x0aaec, 0x0aaed,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0aaf6, 0x0aaf6,), # Meetei Mayek Virama ..Meetei Mayek Virama
+ (0x0abe5, 0x0abe5,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abe8, 0x0abe8,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abed, 0x0abed,), # Meetei Mayek Apun Iyek ..Meetei Mayek Apun Iyek
+ (0x0fb1e, 0x0fb1e,), # Hebrew Point Judeo-spani..Hebrew Point Judeo-spani
+ (0x0fe00, 0x0fe0f,), # Variation Selector-1 ..Variation Selector-16
+ (0x0fe20, 0x0fe2f,), # Combining Ligature Left ..Combining Cyrillic Titlo
+ (0x101fd, 0x101fd,), # Phaistos Disc Sign Combi..Phaistos Disc Sign Combi
+ (0x102e0, 0x102e0,), # Coptic Epact Thousands M..Coptic Epact Thousands M
+ (0x10376, 0x1037a,), # Combining Old Permic Let..Combining Old Permic Let
+ (0x10a01, 0x10a03,), # Kharoshthi Vowel Sign I ..Kharoshthi Vowel Sign Vo
+ (0x10a05, 0x10a06,), # Kharoshthi Vowel Sign E ..Kharoshthi Vowel Sign O
+ (0x10a0c, 0x10a0f,), # Kharoshthi Vowel Length ..Kharoshthi Sign Visarga
+ (0x10a38, 0x10a3a,), # Kharoshthi Sign Bar Abov..Kharoshthi Sign Dot Belo
+ (0x10a3f, 0x10a3f,), # Kharoshthi Virama ..Kharoshthi Virama
+ (0x10ae5, 0x10ae6,), # Manichaean Abbreviation ..Manichaean Abbreviation
+ (0x11001, 0x11001,), # Brahmi Sign Anusvara ..Brahmi Sign Anusvara
+ (0x11038, 0x11046,), # Brahmi Vowel Sign Aa ..Brahmi Virama
+ (0x1107f, 0x11081,), # Brahmi Number Joiner ..Kaithi Sign Anusvara
+ (0x110b3, 0x110b6,), # Kaithi Vowel Sign U ..Kaithi Vowel Sign Ai
+ (0x110b9, 0x110ba,), # Kaithi Sign Virama ..Kaithi Sign Nukta
+ (0x11100, 0x11102,), # Chakma Sign Candrabindu ..Chakma Sign Visarga
+ (0x11127, 0x1112b,), # Chakma Vowel Sign A ..Chakma Vowel Sign Uu
+ (0x1112d, 0x11134,), # Chakma Vowel Sign Ai ..Chakma Maayyaa
+ (0x11173, 0x11173,), # Mahajani Sign Nukta ..Mahajani Sign Nukta
+ (0x11180, 0x11181,), # Sharada Sign Candrabindu..Sharada Sign Anusvara
+ (0x111b6, 0x111be,), # Sharada Vowel Sign U ..Sharada Vowel Sign O
+ (0x111ca, 0x111cc,), # Sharada Sign Nukta ..Sharada Extra Short Vowe
+ (0x1122f, 0x11231,), # Khojki Vowel Sign U ..Khojki Vowel Sign Ai
+ (0x11234, 0x11234,), # Khojki Sign Anusvara ..Khojki Sign Anusvara
+ (0x11236, 0x11237,), # Khojki Sign Nukta ..Khojki Sign Shadda
+ (0x112df, 0x112df,), # Khudawadi Sign Anusvara ..Khudawadi Sign Anusvara
+ (0x112e3, 0x112ea,), # Khudawadi Vowel Sign U ..Khudawadi Sign Virama
+ (0x11300, 0x11301,), # Grantha Sign Combining A..Grantha Sign Candrabindu
+ (0x1133c, 0x1133c,), # Grantha Sign Nukta ..Grantha Sign Nukta
+ (0x11340, 0x11340,), # Grantha Vowel Sign Ii ..Grantha Vowel Sign Ii
+ (0x11366, 0x1136c,), # Combining Grantha Digit ..Combining Grantha Digit
+ (0x11370, 0x11374,), # Combining Grantha Letter..Combining Grantha Letter
+ (0x114b3, 0x114b8,), # Tirhuta Vowel Sign U ..Tirhuta Vowel Sign Vocal
+ (0x114ba, 0x114ba,), # Tirhuta Vowel Sign Short..Tirhuta Vowel Sign Short
+ (0x114bf, 0x114c0,), # Tirhuta Sign Candrabindu..Tirhuta Sign Anusvara
+ (0x114c2, 0x114c3,), # Tirhuta Sign Virama ..Tirhuta Sign Nukta
+ (0x115b2, 0x115b5,), # Siddham Vowel Sign U ..Siddham Vowel Sign Vocal
+ (0x115bc, 0x115bd,), # Siddham Sign Candrabindu..Siddham Sign Anusvara
+ (0x115bf, 0x115c0,), # Siddham Sign Virama ..Siddham Sign Nukta
+ (0x115dc, 0x115dd,), # Siddham Vowel Sign Alter..Siddham Vowel Sign Alter
+ (0x11633, 0x1163a,), # Modi Vowel Sign U ..Modi Vowel Sign Ai
+ (0x1163d, 0x1163d,), # Modi Sign Anusvara ..Modi Sign Anusvara
+ (0x1163f, 0x11640,), # Modi Sign Virama ..Modi Sign Ardhacandra
+ (0x116ab, 0x116ab,), # Takri Sign Anusvara ..Takri Sign Anusvara
+ (0x116ad, 0x116ad,), # Takri Vowel Sign Aa ..Takri Vowel Sign Aa
+ (0x116b0, 0x116b5,), # Takri Vowel Sign U ..Takri Vowel Sign Au
+ (0x116b7, 0x116b7,), # Takri Sign Nukta ..Takri Sign Nukta
+ (0x1171d, 0x1171f,), # Ahom Consonant Sign Medi..Ahom Consonant Sign Medi
+ (0x11722, 0x11725,), # Ahom Vowel Sign I ..Ahom Vowel Sign Uu
+ (0x11727, 0x1172b,), # Ahom Vowel Sign Aw ..Ahom Sign Killer
+ (0x16af0, 0x16af4,), # Bassa Vah Combining High..Bassa Vah Combining High
+ (0x16b30, 0x16b36,), # Pahawh Hmong Mark Cim Tu..Pahawh Hmong Mark Cim Ta
+ (0x16f8f, 0x16f92,), # Miao Tone Right ..Miao Tone Below
+ (0x1bc9d, 0x1bc9e,), # Duployan Thick Letter Se..Duployan Double Mark
+ (0x1d167, 0x1d169,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d17b, 0x1d182,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d185, 0x1d18b,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d1aa, 0x1d1ad,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d242, 0x1d244,), # Combining Greek Musical ..Combining Greek Musical
+ (0x1da00, 0x1da36,), # Signwriting Head Rim ..Signwriting Air Sucking
+ (0x1da3b, 0x1da6c,), # Signwriting Mouth Closed..Signwriting Excitement
+ (0x1da75, 0x1da75,), # Signwriting Upper Body T..Signwriting Upper Body T
+ (0x1da84, 0x1da84,), # Signwriting Location Hea..Signwriting Location Hea
+ (0x1da9b, 0x1da9f,), # Signwriting Fill Modifie..Signwriting Fill Modifie
+ (0x1daa1, 0x1daaf,), # Signwriting Rotation Mod..Signwriting Rotation Mod
+ (0x1e8d0, 0x1e8d6,), # Mende Kikakui Combining ..Mende Kikakui Combining
+ (0xe0100, 0xe01ef,), # Variation Selector-17 ..Variation Selector-256
+ ),
+ '9.0.0': (
+ # Source: DerivedGeneralCategory-9.0.0.txt
+ # Date: 2016-06-01, 10:34:26 GMT
+ #
+ (0x00300, 0x0036f,), # Combining Grave Accent ..Combining Latin Small Le
+ (0x00483, 0x00489,), # Combining Cyrillic Titlo..Combining Cyrillic Milli
+ (0x00591, 0x005bd,), # Hebrew Accent Etnahta ..Hebrew Point Meteg
+ (0x005bf, 0x005bf,), # Hebrew Point Rafe ..Hebrew Point Rafe
+ (0x005c1, 0x005c2,), # Hebrew Point Shin Dot ..Hebrew Point Sin Dot
+ (0x005c4, 0x005c5,), # Hebrew Mark Upper Dot ..Hebrew Mark Lower Dot
+ (0x005c7, 0x005c7,), # Hebrew Point Qamats Qata..Hebrew Point Qamats Qata
+ (0x00610, 0x0061a,), # Arabic Sign Sallallahou ..Arabic Small Kasra
+ (0x0064b, 0x0065f,), # Arabic Fathatan ..Arabic Wavy Hamza Below
+ (0x00670, 0x00670,), # Arabic Letter Superscrip..Arabic Letter Superscrip
+ (0x006d6, 0x006dc,), # Arabic Small High Ligatu..Arabic Small High Seen
+ (0x006df, 0x006e4,), # Arabic Small High Rounde..Arabic Small High Madda
+ (0x006e7, 0x006e8,), # Arabic Small High Yeh ..Arabic Small High Noon
+ (0x006ea, 0x006ed,), # Arabic Empty Centre Low ..Arabic Small Low Meem
+ (0x00711, 0x00711,), # Syriac Letter Superscrip..Syriac Letter Superscrip
+ (0x00730, 0x0074a,), # Syriac Pthaha Above ..Syriac Barrekh
+ (0x007a6, 0x007b0,), # Thaana Abafili ..Thaana Sukun
+ (0x007eb, 0x007f3,), # Nko Combining Short High..Nko Combining Double Dot
+ (0x00816, 0x00819,), # Samaritan Mark In ..Samaritan Mark Dagesh
+ (0x0081b, 0x00823,), # Samaritan Mark Epentheti..Samaritan Vowel Sign A
+ (0x00825, 0x00827,), # Samaritan Vowel Sign Sho..Samaritan Vowel Sign U
+ (0x00829, 0x0082d,), # Samaritan Vowel Sign Lon..Samaritan Mark Nequdaa
+ (0x00859, 0x0085b,), # Mandaic Affrication Mark..Mandaic Gemination Mark
+ (0x008d4, 0x008e1,), # Arabic Small High Word A..Arabic Small High Sign S
+ (0x008e3, 0x00902,), # Arabic Turned Damma Belo..Devanagari Sign Anusvara
+ (0x0093a, 0x0093a,), # Devanagari Vowel Sign Oe..Devanagari Vowel Sign Oe
+ (0x0093c, 0x0093c,), # Devanagari Sign Nukta ..Devanagari Sign Nukta
+ (0x00941, 0x00948,), # Devanagari Vowel Sign U ..Devanagari Vowel Sign Ai
+ (0x0094d, 0x0094d,), # Devanagari Sign Virama ..Devanagari Sign Virama
+ (0x00951, 0x00957,), # Devanagari Stress Sign U..Devanagari Vowel Sign Uu
+ (0x00962, 0x00963,), # Devanagari Vowel Sign Vo..Devanagari Vowel Sign Vo
+ (0x00981, 0x00981,), # Bengali Sign Candrabindu..Bengali Sign Candrabindu
+ (0x009bc, 0x009bc,), # Bengali Sign Nukta ..Bengali Sign Nukta
+ (0x009c1, 0x009c4,), # Bengali Vowel Sign U ..Bengali Vowel Sign Vocal
+ (0x009cd, 0x009cd,), # Bengali Sign Virama ..Bengali Sign Virama
+ (0x009e2, 0x009e3,), # Bengali Vowel Sign Vocal..Bengali Vowel Sign Vocal
+ (0x00a01, 0x00a02,), # Gurmukhi Sign Adak Bindi..Gurmukhi Sign Bindi
+ (0x00a3c, 0x00a3c,), # Gurmukhi Sign Nukta ..Gurmukhi Sign Nukta
+ (0x00a41, 0x00a42,), # Gurmukhi Vowel Sign U ..Gurmukhi Vowel Sign Uu
+ (0x00a47, 0x00a48,), # Gurmukhi Vowel Sign Ee ..Gurmukhi Vowel Sign Ai
+ (0x00a4b, 0x00a4d,), # Gurmukhi Vowel Sign Oo ..Gurmukhi Sign Virama
+ (0x00a51, 0x00a51,), # Gurmukhi Sign Udaat ..Gurmukhi Sign Udaat
+ (0x00a70, 0x00a71,), # Gurmukhi Tippi ..Gurmukhi Addak
+ (0x00a75, 0x00a75,), # Gurmukhi Sign Yakash ..Gurmukhi Sign Yakash
+ (0x00a81, 0x00a82,), # Gujarati Sign Candrabind..Gujarati Sign Anusvara
+ (0x00abc, 0x00abc,), # Gujarati Sign Nukta ..Gujarati Sign Nukta
+ (0x00ac1, 0x00ac5,), # Gujarati Vowel Sign U ..Gujarati Vowel Sign Cand
+ (0x00ac7, 0x00ac8,), # Gujarati Vowel Sign E ..Gujarati Vowel Sign Ai
+ (0x00acd, 0x00acd,), # Gujarati Sign Virama ..Gujarati Sign Virama
+ (0x00ae2, 0x00ae3,), # Gujarati Vowel Sign Voca..Gujarati Vowel Sign Voca
+ (0x00b01, 0x00b01,), # Oriya Sign Candrabindu ..Oriya Sign Candrabindu
+ (0x00b3c, 0x00b3c,), # Oriya Sign Nukta ..Oriya Sign Nukta
+ (0x00b3f, 0x00b3f,), # Oriya Vowel Sign I ..Oriya Vowel Sign I
+ (0x00b41, 0x00b44,), # Oriya Vowel Sign U ..Oriya Vowel Sign Vocalic
+ (0x00b4d, 0x00b4d,), # Oriya Sign Virama ..Oriya Sign Virama
+ (0x00b56, 0x00b56,), # Oriya Ai Length Mark ..Oriya Ai Length Mark
+ (0x00b62, 0x00b63,), # Oriya Vowel Sign Vocalic..Oriya Vowel Sign Vocalic
+ (0x00b82, 0x00b82,), # Tamil Sign Anusvara ..Tamil Sign Anusvara
+ (0x00bc0, 0x00bc0,), # Tamil Vowel Sign Ii ..Tamil Vowel Sign Ii
+ (0x00bcd, 0x00bcd,), # Tamil Sign Virama ..Tamil Sign Virama
+ (0x00c00, 0x00c00,), # Telugu Sign Combining Ca..Telugu Sign Combining Ca
+ (0x00c3e, 0x00c40,), # Telugu Vowel Sign Aa ..Telugu Vowel Sign Ii
+ (0x00c46, 0x00c48,), # Telugu Vowel Sign E ..Telugu Vowel Sign Ai
+ (0x00c4a, 0x00c4d,), # Telugu Vowel Sign O ..Telugu Sign Virama
+ (0x00c55, 0x00c56,), # Telugu Length Mark ..Telugu Ai Length Mark
+ (0x00c62, 0x00c63,), # Telugu Vowel Sign Vocali..Telugu Vowel Sign Vocali
+ (0x00c81, 0x00c81,), # Kannada Sign Candrabindu..Kannada Sign Candrabindu
+ (0x00cbc, 0x00cbc,), # Kannada Sign Nukta ..Kannada Sign Nukta
+ (0x00cbf, 0x00cbf,), # Kannada Vowel Sign I ..Kannada Vowel Sign I
+ (0x00cc6, 0x00cc6,), # Kannada Vowel Sign E ..Kannada Vowel Sign E
+ (0x00ccc, 0x00ccd,), # Kannada Vowel Sign Au ..Kannada Sign Virama
+ (0x00ce2, 0x00ce3,), # Kannada Vowel Sign Vocal..Kannada Vowel Sign Vocal
+ (0x00d01, 0x00d01,), # Malayalam Sign Candrabin..Malayalam Sign Candrabin
+ (0x00d41, 0x00d44,), # Malayalam Vowel Sign U ..Malayalam Vowel Sign Voc
+ (0x00d4d, 0x00d4d,), # Malayalam Sign Virama ..Malayalam Sign Virama
+ (0x00d62, 0x00d63,), # Malayalam Vowel Sign Voc..Malayalam Vowel Sign Voc
+ (0x00dca, 0x00dca,), # Sinhala Sign Al-lakuna ..Sinhala Sign Al-lakuna
+ (0x00dd2, 0x00dd4,), # Sinhala Vowel Sign Ketti..Sinhala Vowel Sign Ketti
+ (0x00dd6, 0x00dd6,), # Sinhala Vowel Sign Diga ..Sinhala Vowel Sign Diga
+ (0x00e31, 0x00e31,), # Thai Character Mai Han-a..Thai Character Mai Han-a
+ (0x00e34, 0x00e3a,), # Thai Character Sara I ..Thai Character Phinthu
+ (0x00e47, 0x00e4e,), # Thai Character Maitaikhu..Thai Character Yamakkan
+ (0x00eb1, 0x00eb1,), # Lao Vowel Sign Mai Kan ..Lao Vowel Sign Mai Kan
+ (0x00eb4, 0x00eb9,), # Lao Vowel Sign I ..Lao Vowel Sign Uu
+ (0x00ebb, 0x00ebc,), # Lao Vowel Sign Mai Kon ..Lao Semivowel Sign Lo
+ (0x00ec8, 0x00ecd,), # Lao Tone Mai Ek ..Lao Niggahita
+ (0x00f18, 0x00f19,), # Tibetan Astrological Sig..Tibetan Astrological Sig
+ (0x00f35, 0x00f35,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f37, 0x00f37,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f39, 0x00f39,), # Tibetan Mark Tsa -phru ..Tibetan Mark Tsa -phru
+ (0x00f71, 0x00f7e,), # Tibetan Vowel Sign Aa ..Tibetan Sign Rjes Su Nga
+ (0x00f80, 0x00f84,), # Tibetan Vowel Sign Rever..Tibetan Mark Halanta
+ (0x00f86, 0x00f87,), # Tibetan Sign Lci Rtags ..Tibetan Sign Yang Rtags
+ (0x00f8d, 0x00f97,), # Tibetan Subjoined Sign L..Tibetan Subjoined Letter
+ (0x00f99, 0x00fbc,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
+ (0x00fc6, 0x00fc6,), # Tibetan Symbol Padma Gda..Tibetan Symbol Padma Gda
+ (0x0102d, 0x01030,), # Myanmar Vowel Sign I ..Myanmar Vowel Sign Uu
+ (0x01032, 0x01037,), # Myanmar Vowel Sign Ai ..Myanmar Sign Dot Below
+ (0x01039, 0x0103a,), # Myanmar Sign Virama ..Myanmar Sign Asat
+ (0x0103d, 0x0103e,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01058, 0x01059,), # Myanmar Vowel Sign Vocal..Myanmar Vowel Sign Vocal
+ (0x0105e, 0x01060,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01071, 0x01074,), # Myanmar Vowel Sign Geba ..Myanmar Vowel Sign Kayah
+ (0x01082, 0x01082,), # Myanmar Consonant Sign S..Myanmar Consonant Sign S
+ (0x01085, 0x01086,), # Myanmar Vowel Sign Shan ..Myanmar Vowel Sign Shan
+ (0x0108d, 0x0108d,), # Myanmar Sign Shan Counci..Myanmar Sign Shan Counci
+ (0x0109d, 0x0109d,), # Myanmar Vowel Sign Aiton..Myanmar Vowel Sign Aiton
+ (0x0135d, 0x0135f,), # Ethiopic Combining Gemin..Ethiopic Combining Gemin
+ (0x01712, 0x01714,), # Tagalog Vowel Sign I ..Tagalog Sign Virama
+ (0x01732, 0x01734,), # Hanunoo Vowel Sign I ..Hanunoo Sign Pamudpod
+ (0x01752, 0x01753,), # Buhid Vowel Sign I ..Buhid Vowel Sign U
+ (0x01772, 0x01773,), # Tagbanwa Vowel Sign I ..Tagbanwa Vowel Sign U
+ (0x017b4, 0x017b5,), # Khmer Vowel Inherent Aq ..Khmer Vowel Inherent Aa
+ (0x017b7, 0x017bd,), # Khmer Vowel Sign I ..Khmer Vowel Sign Ua
+ (0x017c6, 0x017c6,), # Khmer Sign Nikahit ..Khmer Sign Nikahit
+ (0x017c9, 0x017d3,), # Khmer Sign Muusikatoan ..Khmer Sign Bathamasat
+ (0x017dd, 0x017dd,), # Khmer Sign Atthacan ..Khmer Sign Atthacan
+ (0x0180b, 0x0180d,), # Mongolian Free Variation..Mongolian Free Variation
+ (0x01885, 0x01886,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x018a9, 0x018a9,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x01920, 0x01922,), # Limbu Vowel Sign A ..Limbu Vowel Sign U
+ (0x01927, 0x01928,), # Limbu Vowel Sign E ..Limbu Vowel Sign O
+ (0x01932, 0x01932,), # Limbu Small Letter Anusv..Limbu Small Letter Anusv
+ (0x01939, 0x0193b,), # Limbu Sign Mukphreng ..Limbu Sign Sa-i
+ (0x01a17, 0x01a18,), # Buginese Vowel Sign I ..Buginese Vowel Sign U
+ (0x01a1b, 0x01a1b,), # Buginese Vowel Sign Ae ..Buginese Vowel Sign Ae
+ (0x01a56, 0x01a56,), # Tai Tham Consonant Sign ..Tai Tham Consonant Sign
+ (0x01a58, 0x01a5e,), # Tai Tham Sign Mai Kang L..Tai Tham Consonant Sign
+ (0x01a60, 0x01a60,), # Tai Tham Sign Sakot ..Tai Tham Sign Sakot
+ (0x01a62, 0x01a62,), # Tai Tham Vowel Sign Mai ..Tai Tham Vowel Sign Mai
+ (0x01a65, 0x01a6c,), # Tai Tham Vowel Sign I ..Tai Tham Vowel Sign Oa B
+ (0x01a73, 0x01a7c,), # Tai Tham Vowel Sign Oa A..Tai Tham Sign Khuen-lue
+ (0x01a7f, 0x01a7f,), # Tai Tham Combining Crypt..Tai Tham Combining Crypt
+ (0x01ab0, 0x01abe,), # Combining Doubled Circum..Combining Parentheses Ov
+ (0x01b00, 0x01b03,), # Balinese Sign Ulu Ricem ..Balinese Sign Surang
+ (0x01b34, 0x01b34,), # Balinese Sign Rerekan ..Balinese Sign Rerekan
+ (0x01b36, 0x01b3a,), # Balinese Vowel Sign Ulu ..Balinese Vowel Sign Ra R
+ (0x01b3c, 0x01b3c,), # Balinese Vowel Sign La L..Balinese Vowel Sign La L
+ (0x01b42, 0x01b42,), # Balinese Vowel Sign Pepe..Balinese Vowel Sign Pepe
+ (0x01b6b, 0x01b73,), # Balinese Musical Symbol ..Balinese Musical Symbol
+ (0x01b80, 0x01b81,), # Sundanese Sign Panyecek ..Sundanese Sign Panglayar
+ (0x01ba2, 0x01ba5,), # Sundanese Consonant Sign..Sundanese Vowel Sign Pan
+ (0x01ba8, 0x01ba9,), # Sundanese Vowel Sign Pam..Sundanese Vowel Sign Pan
+ (0x01bab, 0x01bad,), # Sundanese Sign Virama ..Sundanese Consonant Sign
+ (0x01be6, 0x01be6,), # Batak Sign Tompi ..Batak Sign Tompi
+ (0x01be8, 0x01be9,), # Batak Vowel Sign Pakpak ..Batak Vowel Sign Ee
+ (0x01bed, 0x01bed,), # Batak Vowel Sign Karo O ..Batak Vowel Sign Karo O
+ (0x01bef, 0x01bf1,), # Batak Vowel Sign U For S..Batak Consonant Sign H
+ (0x01c2c, 0x01c33,), # Lepcha Vowel Sign E ..Lepcha Consonant Sign T
+ (0x01c36, 0x01c37,), # Lepcha Sign Ran ..Lepcha Sign Nukta
+ (0x01cd0, 0x01cd2,), # Vedic Tone Karshana ..Vedic Tone Prenkha
+ (0x01cd4, 0x01ce0,), # Vedic Sign Yajurvedic Mi..Vedic Tone Rigvedic Kash
+ (0x01ce2, 0x01ce8,), # Vedic Sign Visarga Svari..Vedic Sign Visarga Anuda
+ (0x01ced, 0x01ced,), # Vedic Sign Tiryak ..Vedic Sign Tiryak
+ (0x01cf4, 0x01cf4,), # Vedic Tone Candra Above ..Vedic Tone Candra Above
+ (0x01cf8, 0x01cf9,), # Vedic Tone Ring Above ..Vedic Tone Double Ring A
+ (0x01dc0, 0x01df5,), # Combining Dotted Grave A..Combining Up Tack Above
+ (0x01dfb, 0x01dff,), # Combining Deletion Mark ..Combining Right Arrowhea
+ (0x020d0, 0x020f0,), # Combining Left Harpoon A..Combining Asterisk Above
+ (0x02cef, 0x02cf1,), # Coptic Combining Ni Abov..Coptic Combining Spiritu
+ (0x02d7f, 0x02d7f,), # Tifinagh Consonant Joine..Tifinagh Consonant Joine
+ (0x02de0, 0x02dff,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0302a, 0x0302d,), # Ideographic Level Tone M..Ideographic Entering Ton
+ (0x03099, 0x0309a,), # Combining Katakana-hirag..Combining Katakana-hirag
+ (0x0a66f, 0x0a672,), # Combining Cyrillic Vzmet..Combining Cyrillic Thous
+ (0x0a674, 0x0a67d,), # Combining Cyrillic Lette..Combining Cyrillic Payer
+ (0x0a69e, 0x0a69f,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0a6f0, 0x0a6f1,), # Bamum Combining Mark Koq..Bamum Combining Mark Tuk
+ (0x0a802, 0x0a802,), # Syloti Nagri Sign Dvisva..Syloti Nagri Sign Dvisva
+ (0x0a806, 0x0a806,), # Syloti Nagri Sign Hasant..Syloti Nagri Sign Hasant
+ (0x0a80b, 0x0a80b,), # Syloti Nagri Sign Anusva..Syloti Nagri Sign Anusva
+ (0x0a825, 0x0a826,), # Syloti Nagri Vowel Sign ..Syloti Nagri Vowel Sign
+ (0x0a8c4, 0x0a8c5,), # Saurashtra Sign Virama ..Saurashtra Sign Candrabi
+ (0x0a8e0, 0x0a8f1,), # Combining Devanagari Dig..Combining Devanagari Sig
+ (0x0a926, 0x0a92d,), # Kayah Li Vowel Ue ..Kayah Li Tone Calya Plop
+ (0x0a947, 0x0a951,), # Rejang Vowel Sign I ..Rejang Consonant Sign R
+ (0x0a980, 0x0a982,), # Javanese Sign Panyangga ..Javanese Sign Layar
+ (0x0a9b3, 0x0a9b3,), # Javanese Sign Cecak Telu..Javanese Sign Cecak Telu
+ (0x0a9b6, 0x0a9b9,), # Javanese Vowel Sign Wulu..Javanese Vowel Sign Suku
+ (0x0a9bc, 0x0a9bc,), # Javanese Vowel Sign Pepe..Javanese Vowel Sign Pepe
+ (0x0a9e5, 0x0a9e5,), # Myanmar Sign Shan Saw ..Myanmar Sign Shan Saw
+ (0x0aa29, 0x0aa2e,), # Cham Vowel Sign Aa ..Cham Vowel Sign Oe
+ (0x0aa31, 0x0aa32,), # Cham Vowel Sign Au ..Cham Vowel Sign Ue
+ (0x0aa35, 0x0aa36,), # Cham Consonant Sign La ..Cham Consonant Sign Wa
+ (0x0aa43, 0x0aa43,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa4c, 0x0aa4c,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa7c, 0x0aa7c,), # Myanmar Sign Tai Laing T..Myanmar Sign Tai Laing T
+ (0x0aab0, 0x0aab0,), # Tai Viet Mai Kang ..Tai Viet Mai Kang
+ (0x0aab2, 0x0aab4,), # Tai Viet Vowel I ..Tai Viet Vowel U
+ (0x0aab7, 0x0aab8,), # Tai Viet Mai Khit ..Tai Viet Vowel Ia
+ (0x0aabe, 0x0aabf,), # Tai Viet Vowel Am ..Tai Viet Tone Mai Ek
+ (0x0aac1, 0x0aac1,), # Tai Viet Tone Mai Tho ..Tai Viet Tone Mai Tho
+ (0x0aaec, 0x0aaed,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0aaf6, 0x0aaf6,), # Meetei Mayek Virama ..Meetei Mayek Virama
+ (0x0abe5, 0x0abe5,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abe8, 0x0abe8,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abed, 0x0abed,), # Meetei Mayek Apun Iyek ..Meetei Mayek Apun Iyek
+ (0x0fb1e, 0x0fb1e,), # Hebrew Point Judeo-spani..Hebrew Point Judeo-spani
+ (0x0fe00, 0x0fe0f,), # Variation Selector-1 ..Variation Selector-16
+ (0x0fe20, 0x0fe2f,), # Combining Ligature Left ..Combining Cyrillic Titlo
+ (0x101fd, 0x101fd,), # Phaistos Disc Sign Combi..Phaistos Disc Sign Combi
+ (0x102e0, 0x102e0,), # Coptic Epact Thousands M..Coptic Epact Thousands M
+ (0x10376, 0x1037a,), # Combining Old Permic Let..Combining Old Permic Let
+ (0x10a01, 0x10a03,), # Kharoshthi Vowel Sign I ..Kharoshthi Vowel Sign Vo
+ (0x10a05, 0x10a06,), # Kharoshthi Vowel Sign E ..Kharoshthi Vowel Sign O
+ (0x10a0c, 0x10a0f,), # Kharoshthi Vowel Length ..Kharoshthi Sign Visarga
+ (0x10a38, 0x10a3a,), # Kharoshthi Sign Bar Abov..Kharoshthi Sign Dot Belo
+ (0x10a3f, 0x10a3f,), # Kharoshthi Virama ..Kharoshthi Virama
+ (0x10ae5, 0x10ae6,), # Manichaean Abbreviation ..Manichaean Abbreviation
+ (0x11001, 0x11001,), # Brahmi Sign Anusvara ..Brahmi Sign Anusvara
+ (0x11038, 0x11046,), # Brahmi Vowel Sign Aa ..Brahmi Virama
+ (0x1107f, 0x11081,), # Brahmi Number Joiner ..Kaithi Sign Anusvara
+ (0x110b3, 0x110b6,), # Kaithi Vowel Sign U ..Kaithi Vowel Sign Ai
+ (0x110b9, 0x110ba,), # Kaithi Sign Virama ..Kaithi Sign Nukta
+ (0x11100, 0x11102,), # Chakma Sign Candrabindu ..Chakma Sign Visarga
+ (0x11127, 0x1112b,), # Chakma Vowel Sign A ..Chakma Vowel Sign Uu
+ (0x1112d, 0x11134,), # Chakma Vowel Sign Ai ..Chakma Maayyaa
+ (0x11173, 0x11173,), # Mahajani Sign Nukta ..Mahajani Sign Nukta
+ (0x11180, 0x11181,), # Sharada Sign Candrabindu..Sharada Sign Anusvara
+ (0x111b6, 0x111be,), # Sharada Vowel Sign U ..Sharada Vowel Sign O
+ (0x111ca, 0x111cc,), # Sharada Sign Nukta ..Sharada Extra Short Vowe
+ (0x1122f, 0x11231,), # Khojki Vowel Sign U ..Khojki Vowel Sign Ai
+ (0x11234, 0x11234,), # Khojki Sign Anusvara ..Khojki Sign Anusvara
+ (0x11236, 0x11237,), # Khojki Sign Nukta ..Khojki Sign Shadda
+ (0x1123e, 0x1123e,), # Khojki Sign Sukun ..Khojki Sign Sukun
+ (0x112df, 0x112df,), # Khudawadi Sign Anusvara ..Khudawadi Sign Anusvara
+ (0x112e3, 0x112ea,), # Khudawadi Vowel Sign U ..Khudawadi Sign Virama
+ (0x11300, 0x11301,), # Grantha Sign Combining A..Grantha Sign Candrabindu
+ (0x1133c, 0x1133c,), # Grantha Sign Nukta ..Grantha Sign Nukta
+ (0x11340, 0x11340,), # Grantha Vowel Sign Ii ..Grantha Vowel Sign Ii
+ (0x11366, 0x1136c,), # Combining Grantha Digit ..Combining Grantha Digit
+ (0x11370, 0x11374,), # Combining Grantha Letter..Combining Grantha Letter
+ (0x11438, 0x1143f,), # Newa Vowel Sign U ..Newa Vowel Sign Ai
+ (0x11442, 0x11444,), # Newa Sign Virama ..Newa Sign Anusvara
+ (0x11446, 0x11446,), # Newa Sign Nukta ..Newa Sign Nukta
+ (0x114b3, 0x114b8,), # Tirhuta Vowel Sign U ..Tirhuta Vowel Sign Vocal
+ (0x114ba, 0x114ba,), # Tirhuta Vowel Sign Short..Tirhuta Vowel Sign Short
+ (0x114bf, 0x114c0,), # Tirhuta Sign Candrabindu..Tirhuta Sign Anusvara
+ (0x114c2, 0x114c3,), # Tirhuta Sign Virama ..Tirhuta Sign Nukta
+ (0x115b2, 0x115b5,), # Siddham Vowel Sign U ..Siddham Vowel Sign Vocal
+ (0x115bc, 0x115bd,), # Siddham Sign Candrabindu..Siddham Sign Anusvara
+ (0x115bf, 0x115c0,), # Siddham Sign Virama ..Siddham Sign Nukta
+ (0x115dc, 0x115dd,), # Siddham Vowel Sign Alter..Siddham Vowel Sign Alter
+ (0x11633, 0x1163a,), # Modi Vowel Sign U ..Modi Vowel Sign Ai
+ (0x1163d, 0x1163d,), # Modi Sign Anusvara ..Modi Sign Anusvara
+ (0x1163f, 0x11640,), # Modi Sign Virama ..Modi Sign Ardhacandra
+ (0x116ab, 0x116ab,), # Takri Sign Anusvara ..Takri Sign Anusvara
+ (0x116ad, 0x116ad,), # Takri Vowel Sign Aa ..Takri Vowel Sign Aa
+ (0x116b0, 0x116b5,), # Takri Vowel Sign U ..Takri Vowel Sign Au
+ (0x116b7, 0x116b7,), # Takri Sign Nukta ..Takri Sign Nukta
+ (0x1171d, 0x1171f,), # Ahom Consonant Sign Medi..Ahom Consonant Sign Medi
+ (0x11722, 0x11725,), # Ahom Vowel Sign I ..Ahom Vowel Sign Uu
+ (0x11727, 0x1172b,), # Ahom Vowel Sign Aw ..Ahom Sign Killer
+ (0x11c30, 0x11c36,), # Bhaiksuki Vowel Sign I ..Bhaiksuki Vowel Sign Voc
+ (0x11c38, 0x11c3d,), # Bhaiksuki Vowel Sign E ..Bhaiksuki Sign Anusvara
+ (0x11c3f, 0x11c3f,), # Bhaiksuki Sign Virama ..Bhaiksuki Sign Virama
+ (0x11c92, 0x11ca7,), # Marchen Subjoined Letter..Marchen Subjoined Letter
+ (0x11caa, 0x11cb0,), # Marchen Subjoined Letter..Marchen Vowel Sign Aa
+ (0x11cb2, 0x11cb3,), # Marchen Vowel Sign U ..Marchen Vowel Sign E
+ (0x11cb5, 0x11cb6,), # Marchen Sign Anusvara ..Marchen Sign Candrabindu
+ (0x16af0, 0x16af4,), # Bassa Vah Combining High..Bassa Vah Combining High
+ (0x16b30, 0x16b36,), # Pahawh Hmong Mark Cim Tu..Pahawh Hmong Mark Cim Ta
+ (0x16f8f, 0x16f92,), # Miao Tone Right ..Miao Tone Below
+ (0x1bc9d, 0x1bc9e,), # Duployan Thick Letter Se..Duployan Double Mark
+ (0x1d167, 0x1d169,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d17b, 0x1d182,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d185, 0x1d18b,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d1aa, 0x1d1ad,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d242, 0x1d244,), # Combining Greek Musical ..Combining Greek Musical
+ (0x1da00, 0x1da36,), # Signwriting Head Rim ..Signwriting Air Sucking
+ (0x1da3b, 0x1da6c,), # Signwriting Mouth Closed..Signwriting Excitement
+ (0x1da75, 0x1da75,), # Signwriting Upper Body T..Signwriting Upper Body T
+ (0x1da84, 0x1da84,), # Signwriting Location Hea..Signwriting Location Hea
+ (0x1da9b, 0x1da9f,), # Signwriting Fill Modifie..Signwriting Fill Modifie
+ (0x1daa1, 0x1daaf,), # Signwriting Rotation Mod..Signwriting Rotation Mod
+ (0x1e000, 0x1e006,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e008, 0x1e018,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e01b, 0x1e021,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e023, 0x1e024,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e026, 0x1e02a,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e8d0, 0x1e8d6,), # Mende Kikakui Combining ..Mende Kikakui Combining
+ (0x1e944, 0x1e94a,), # Adlam Alif Lengthener ..Adlam Nukta
+ (0xe0100, 0xe01ef,), # Variation Selector-17 ..Variation Selector-256
+ ),
+ '10.0.0': (
+ # Source: DerivedGeneralCategory-10.0.0.txt
+ # Date: 2017-03-08, 08:41:49 GMT
+ #
+ (0x00300, 0x0036f,), # Combining Grave Accent ..Combining Latin Small Le
+ (0x00483, 0x00489,), # Combining Cyrillic Titlo..Combining Cyrillic Milli
+ (0x00591, 0x005bd,), # Hebrew Accent Etnahta ..Hebrew Point Meteg
+ (0x005bf, 0x005bf,), # Hebrew Point Rafe ..Hebrew Point Rafe
+ (0x005c1, 0x005c2,), # Hebrew Point Shin Dot ..Hebrew Point Sin Dot
+ (0x005c4, 0x005c5,), # Hebrew Mark Upper Dot ..Hebrew Mark Lower Dot
+ (0x005c7, 0x005c7,), # Hebrew Point Qamats Qata..Hebrew Point Qamats Qata
+ (0x00610, 0x0061a,), # Arabic Sign Sallallahou ..Arabic Small Kasra
+ (0x0064b, 0x0065f,), # Arabic Fathatan ..Arabic Wavy Hamza Below
+ (0x00670, 0x00670,), # Arabic Letter Superscrip..Arabic Letter Superscrip
+ (0x006d6, 0x006dc,), # Arabic Small High Ligatu..Arabic Small High Seen
+ (0x006df, 0x006e4,), # Arabic Small High Rounde..Arabic Small High Madda
+ (0x006e7, 0x006e8,), # Arabic Small High Yeh ..Arabic Small High Noon
+ (0x006ea, 0x006ed,), # Arabic Empty Centre Low ..Arabic Small Low Meem
+ (0x00711, 0x00711,), # Syriac Letter Superscrip..Syriac Letter Superscrip
+ (0x00730, 0x0074a,), # Syriac Pthaha Above ..Syriac Barrekh
+ (0x007a6, 0x007b0,), # Thaana Abafili ..Thaana Sukun
+ (0x007eb, 0x007f3,), # Nko Combining Short High..Nko Combining Double Dot
+ (0x00816, 0x00819,), # Samaritan Mark In ..Samaritan Mark Dagesh
+ (0x0081b, 0x00823,), # Samaritan Mark Epentheti..Samaritan Vowel Sign A
+ (0x00825, 0x00827,), # Samaritan Vowel Sign Sho..Samaritan Vowel Sign U
+ (0x00829, 0x0082d,), # Samaritan Vowel Sign Lon..Samaritan Mark Nequdaa
+ (0x00859, 0x0085b,), # Mandaic Affrication Mark..Mandaic Gemination Mark
+ (0x008d4, 0x008e1,), # Arabic Small High Word A..Arabic Small High Sign S
+ (0x008e3, 0x00902,), # Arabic Turned Damma Belo..Devanagari Sign Anusvara
+ (0x0093a, 0x0093a,), # Devanagari Vowel Sign Oe..Devanagari Vowel Sign Oe
+ (0x0093c, 0x0093c,), # Devanagari Sign Nukta ..Devanagari Sign Nukta
+ (0x00941, 0x00948,), # Devanagari Vowel Sign U ..Devanagari Vowel Sign Ai
+ (0x0094d, 0x0094d,), # Devanagari Sign Virama ..Devanagari Sign Virama
+ (0x00951, 0x00957,), # Devanagari Stress Sign U..Devanagari Vowel Sign Uu
+ (0x00962, 0x00963,), # Devanagari Vowel Sign Vo..Devanagari Vowel Sign Vo
+ (0x00981, 0x00981,), # Bengali Sign Candrabindu..Bengali Sign Candrabindu
+ (0x009bc, 0x009bc,), # Bengali Sign Nukta ..Bengali Sign Nukta
+ (0x009c1, 0x009c4,), # Bengali Vowel Sign U ..Bengali Vowel Sign Vocal
+ (0x009cd, 0x009cd,), # Bengali Sign Virama ..Bengali Sign Virama
+ (0x009e2, 0x009e3,), # Bengali Vowel Sign Vocal..Bengali Vowel Sign Vocal
+ (0x00a01, 0x00a02,), # Gurmukhi Sign Adak Bindi..Gurmukhi Sign Bindi
+ (0x00a3c, 0x00a3c,), # Gurmukhi Sign Nukta ..Gurmukhi Sign Nukta
+ (0x00a41, 0x00a42,), # Gurmukhi Vowel Sign U ..Gurmukhi Vowel Sign Uu
+ (0x00a47, 0x00a48,), # Gurmukhi Vowel Sign Ee ..Gurmukhi Vowel Sign Ai
+ (0x00a4b, 0x00a4d,), # Gurmukhi Vowel Sign Oo ..Gurmukhi Sign Virama
+ (0x00a51, 0x00a51,), # Gurmukhi Sign Udaat ..Gurmukhi Sign Udaat
+ (0x00a70, 0x00a71,), # Gurmukhi Tippi ..Gurmukhi Addak
+ (0x00a75, 0x00a75,), # Gurmukhi Sign Yakash ..Gurmukhi Sign Yakash
+ (0x00a81, 0x00a82,), # Gujarati Sign Candrabind..Gujarati Sign Anusvara
+ (0x00abc, 0x00abc,), # Gujarati Sign Nukta ..Gujarati Sign Nukta
+ (0x00ac1, 0x00ac5,), # Gujarati Vowel Sign U ..Gujarati Vowel Sign Cand
+ (0x00ac7, 0x00ac8,), # Gujarati Vowel Sign E ..Gujarati Vowel Sign Ai
+ (0x00acd, 0x00acd,), # Gujarati Sign Virama ..Gujarati Sign Virama
+ (0x00ae2, 0x00ae3,), # Gujarati Vowel Sign Voca..Gujarati Vowel Sign Voca
+ (0x00afa, 0x00aff,), # Gujarati Sign Sukun ..Gujarati Sign Two-circle
+ (0x00b01, 0x00b01,), # Oriya Sign Candrabindu ..Oriya Sign Candrabindu
+ (0x00b3c, 0x00b3c,), # Oriya Sign Nukta ..Oriya Sign Nukta
+ (0x00b3f, 0x00b3f,), # Oriya Vowel Sign I ..Oriya Vowel Sign I
+ (0x00b41, 0x00b44,), # Oriya Vowel Sign U ..Oriya Vowel Sign Vocalic
+ (0x00b4d, 0x00b4d,), # Oriya Sign Virama ..Oriya Sign Virama
+ (0x00b56, 0x00b56,), # Oriya Ai Length Mark ..Oriya Ai Length Mark
+ (0x00b62, 0x00b63,), # Oriya Vowel Sign Vocalic..Oriya Vowel Sign Vocalic
+ (0x00b82, 0x00b82,), # Tamil Sign Anusvara ..Tamil Sign Anusvara
+ (0x00bc0, 0x00bc0,), # Tamil Vowel Sign Ii ..Tamil Vowel Sign Ii
+ (0x00bcd, 0x00bcd,), # Tamil Sign Virama ..Tamil Sign Virama
+ (0x00c00, 0x00c00,), # Telugu Sign Combining Ca..Telugu Sign Combining Ca
+ (0x00c3e, 0x00c40,), # Telugu Vowel Sign Aa ..Telugu Vowel Sign Ii
+ (0x00c46, 0x00c48,), # Telugu Vowel Sign E ..Telugu Vowel Sign Ai
+ (0x00c4a, 0x00c4d,), # Telugu Vowel Sign O ..Telugu Sign Virama
+ (0x00c55, 0x00c56,), # Telugu Length Mark ..Telugu Ai Length Mark
+ (0x00c62, 0x00c63,), # Telugu Vowel Sign Vocali..Telugu Vowel Sign Vocali
+ (0x00c81, 0x00c81,), # Kannada Sign Candrabindu..Kannada Sign Candrabindu
+ (0x00cbc, 0x00cbc,), # Kannada Sign Nukta ..Kannada Sign Nukta
+ (0x00cbf, 0x00cbf,), # Kannada Vowel Sign I ..Kannada Vowel Sign I
+ (0x00cc6, 0x00cc6,), # Kannada Vowel Sign E ..Kannada Vowel Sign E
+ (0x00ccc, 0x00ccd,), # Kannada Vowel Sign Au ..Kannada Sign Virama
+ (0x00ce2, 0x00ce3,), # Kannada Vowel Sign Vocal..Kannada Vowel Sign Vocal
+ (0x00d00, 0x00d01,), # Malayalam Sign Combining..Malayalam Sign Candrabin
+ (0x00d3b, 0x00d3c,), # Malayalam Sign Vertical ..Malayalam Sign Circular
+ (0x00d41, 0x00d44,), # Malayalam Vowel Sign U ..Malayalam Vowel Sign Voc
+ (0x00d4d, 0x00d4d,), # Malayalam Sign Virama ..Malayalam Sign Virama
+ (0x00d62, 0x00d63,), # Malayalam Vowel Sign Voc..Malayalam Vowel Sign Voc
+ (0x00dca, 0x00dca,), # Sinhala Sign Al-lakuna ..Sinhala Sign Al-lakuna
+ (0x00dd2, 0x00dd4,), # Sinhala Vowel Sign Ketti..Sinhala Vowel Sign Ketti
+ (0x00dd6, 0x00dd6,), # Sinhala Vowel Sign Diga ..Sinhala Vowel Sign Diga
+ (0x00e31, 0x00e31,), # Thai Character Mai Han-a..Thai Character Mai Han-a
+ (0x00e34, 0x00e3a,), # Thai Character Sara I ..Thai Character Phinthu
+ (0x00e47, 0x00e4e,), # Thai Character Maitaikhu..Thai Character Yamakkan
+ (0x00eb1, 0x00eb1,), # Lao Vowel Sign Mai Kan ..Lao Vowel Sign Mai Kan
+ (0x00eb4, 0x00eb9,), # Lao Vowel Sign I ..Lao Vowel Sign Uu
+ (0x00ebb, 0x00ebc,), # Lao Vowel Sign Mai Kon ..Lao Semivowel Sign Lo
+ (0x00ec8, 0x00ecd,), # Lao Tone Mai Ek ..Lao Niggahita
+ (0x00f18, 0x00f19,), # Tibetan Astrological Sig..Tibetan Astrological Sig
+ (0x00f35, 0x00f35,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f37, 0x00f37,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f39, 0x00f39,), # Tibetan Mark Tsa -phru ..Tibetan Mark Tsa -phru
+ (0x00f71, 0x00f7e,), # Tibetan Vowel Sign Aa ..Tibetan Sign Rjes Su Nga
+ (0x00f80, 0x00f84,), # Tibetan Vowel Sign Rever..Tibetan Mark Halanta
+ (0x00f86, 0x00f87,), # Tibetan Sign Lci Rtags ..Tibetan Sign Yang Rtags
+ (0x00f8d, 0x00f97,), # Tibetan Subjoined Sign L..Tibetan Subjoined Letter
+ (0x00f99, 0x00fbc,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
+ (0x00fc6, 0x00fc6,), # Tibetan Symbol Padma Gda..Tibetan Symbol Padma Gda
+ (0x0102d, 0x01030,), # Myanmar Vowel Sign I ..Myanmar Vowel Sign Uu
+ (0x01032, 0x01037,), # Myanmar Vowel Sign Ai ..Myanmar Sign Dot Below
+ (0x01039, 0x0103a,), # Myanmar Sign Virama ..Myanmar Sign Asat
+ (0x0103d, 0x0103e,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01058, 0x01059,), # Myanmar Vowel Sign Vocal..Myanmar Vowel Sign Vocal
+ (0x0105e, 0x01060,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01071, 0x01074,), # Myanmar Vowel Sign Geba ..Myanmar Vowel Sign Kayah
+ (0x01082, 0x01082,), # Myanmar Consonant Sign S..Myanmar Consonant Sign S
+ (0x01085, 0x01086,), # Myanmar Vowel Sign Shan ..Myanmar Vowel Sign Shan
+ (0x0108d, 0x0108d,), # Myanmar Sign Shan Counci..Myanmar Sign Shan Counci
+ (0x0109d, 0x0109d,), # Myanmar Vowel Sign Aiton..Myanmar Vowel Sign Aiton
+ (0x0135d, 0x0135f,), # Ethiopic Combining Gemin..Ethiopic Combining Gemin
+ (0x01712, 0x01714,), # Tagalog Vowel Sign I ..Tagalog Sign Virama
+ (0x01732, 0x01734,), # Hanunoo Vowel Sign I ..Hanunoo Sign Pamudpod
+ (0x01752, 0x01753,), # Buhid Vowel Sign I ..Buhid Vowel Sign U
+ (0x01772, 0x01773,), # Tagbanwa Vowel Sign I ..Tagbanwa Vowel Sign U
+ (0x017b4, 0x017b5,), # Khmer Vowel Inherent Aq ..Khmer Vowel Inherent Aa
+ (0x017b7, 0x017bd,), # Khmer Vowel Sign I ..Khmer Vowel Sign Ua
+ (0x017c6, 0x017c6,), # Khmer Sign Nikahit ..Khmer Sign Nikahit
+ (0x017c9, 0x017d3,), # Khmer Sign Muusikatoan ..Khmer Sign Bathamasat
+ (0x017dd, 0x017dd,), # Khmer Sign Atthacan ..Khmer Sign Atthacan
+ (0x0180b, 0x0180d,), # Mongolian Free Variation..Mongolian Free Variation
+ (0x01885, 0x01886,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x018a9, 0x018a9,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x01920, 0x01922,), # Limbu Vowel Sign A ..Limbu Vowel Sign U
+ (0x01927, 0x01928,), # Limbu Vowel Sign E ..Limbu Vowel Sign O
+ (0x01932, 0x01932,), # Limbu Small Letter Anusv..Limbu Small Letter Anusv
+ (0x01939, 0x0193b,), # Limbu Sign Mukphreng ..Limbu Sign Sa-i
+ (0x01a17, 0x01a18,), # Buginese Vowel Sign I ..Buginese Vowel Sign U
+ (0x01a1b, 0x01a1b,), # Buginese Vowel Sign Ae ..Buginese Vowel Sign Ae
+ (0x01a56, 0x01a56,), # Tai Tham Consonant Sign ..Tai Tham Consonant Sign
+ (0x01a58, 0x01a5e,), # Tai Tham Sign Mai Kang L..Tai Tham Consonant Sign
+ (0x01a60, 0x01a60,), # Tai Tham Sign Sakot ..Tai Tham Sign Sakot
+ (0x01a62, 0x01a62,), # Tai Tham Vowel Sign Mai ..Tai Tham Vowel Sign Mai
+ (0x01a65, 0x01a6c,), # Tai Tham Vowel Sign I ..Tai Tham Vowel Sign Oa B
+ (0x01a73, 0x01a7c,), # Tai Tham Vowel Sign Oa A..Tai Tham Sign Khuen-lue
+ (0x01a7f, 0x01a7f,), # Tai Tham Combining Crypt..Tai Tham Combining Crypt
+ (0x01ab0, 0x01abe,), # Combining Doubled Circum..Combining Parentheses Ov
+ (0x01b00, 0x01b03,), # Balinese Sign Ulu Ricem ..Balinese Sign Surang
+ (0x01b34, 0x01b34,), # Balinese Sign Rerekan ..Balinese Sign Rerekan
+ (0x01b36, 0x01b3a,), # Balinese Vowel Sign Ulu ..Balinese Vowel Sign Ra R
+ (0x01b3c, 0x01b3c,), # Balinese Vowel Sign La L..Balinese Vowel Sign La L
+ (0x01b42, 0x01b42,), # Balinese Vowel Sign Pepe..Balinese Vowel Sign Pepe
+ (0x01b6b, 0x01b73,), # Balinese Musical Symbol ..Balinese Musical Symbol
+ (0x01b80, 0x01b81,), # Sundanese Sign Panyecek ..Sundanese Sign Panglayar
+ (0x01ba2, 0x01ba5,), # Sundanese Consonant Sign..Sundanese Vowel Sign Pan
+ (0x01ba8, 0x01ba9,), # Sundanese Vowel Sign Pam..Sundanese Vowel Sign Pan
+ (0x01bab, 0x01bad,), # Sundanese Sign Virama ..Sundanese Consonant Sign
+ (0x01be6, 0x01be6,), # Batak Sign Tompi ..Batak Sign Tompi
+ (0x01be8, 0x01be9,), # Batak Vowel Sign Pakpak ..Batak Vowel Sign Ee
+ (0x01bed, 0x01bed,), # Batak Vowel Sign Karo O ..Batak Vowel Sign Karo O
+ (0x01bef, 0x01bf1,), # Batak Vowel Sign U For S..Batak Consonant Sign H
+ (0x01c2c, 0x01c33,), # Lepcha Vowel Sign E ..Lepcha Consonant Sign T
+ (0x01c36, 0x01c37,), # Lepcha Sign Ran ..Lepcha Sign Nukta
+ (0x01cd0, 0x01cd2,), # Vedic Tone Karshana ..Vedic Tone Prenkha
+ (0x01cd4, 0x01ce0,), # Vedic Sign Yajurvedic Mi..Vedic Tone Rigvedic Kash
+ (0x01ce2, 0x01ce8,), # Vedic Sign Visarga Svari..Vedic Sign Visarga Anuda
+ (0x01ced, 0x01ced,), # Vedic Sign Tiryak ..Vedic Sign Tiryak
+ (0x01cf4, 0x01cf4,), # Vedic Tone Candra Above ..Vedic Tone Candra Above
+ (0x01cf8, 0x01cf9,), # Vedic Tone Ring Above ..Vedic Tone Double Ring A
+ (0x01dc0, 0x01df9,), # Combining Dotted Grave A..Combining Wide Inverted
+ (0x01dfb, 0x01dff,), # Combining Deletion Mark ..Combining Right Arrowhea
+ (0x020d0, 0x020f0,), # Combining Left Harpoon A..Combining Asterisk Above
+ (0x02cef, 0x02cf1,), # Coptic Combining Ni Abov..Coptic Combining Spiritu
+ (0x02d7f, 0x02d7f,), # Tifinagh Consonant Joine..Tifinagh Consonant Joine
+ (0x02de0, 0x02dff,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0302a, 0x0302d,), # Ideographic Level Tone M..Ideographic Entering Ton
+ (0x03099, 0x0309a,), # Combining Katakana-hirag..Combining Katakana-hirag
+ (0x0a66f, 0x0a672,), # Combining Cyrillic Vzmet..Combining Cyrillic Thous
+ (0x0a674, 0x0a67d,), # Combining Cyrillic Lette..Combining Cyrillic Payer
+ (0x0a69e, 0x0a69f,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0a6f0, 0x0a6f1,), # Bamum Combining Mark Koq..Bamum Combining Mark Tuk
+ (0x0a802, 0x0a802,), # Syloti Nagri Sign Dvisva..Syloti Nagri Sign Dvisva
+ (0x0a806, 0x0a806,), # Syloti Nagri Sign Hasant..Syloti Nagri Sign Hasant
+ (0x0a80b, 0x0a80b,), # Syloti Nagri Sign Anusva..Syloti Nagri Sign Anusva
+ (0x0a825, 0x0a826,), # Syloti Nagri Vowel Sign ..Syloti Nagri Vowel Sign
+ (0x0a8c4, 0x0a8c5,), # Saurashtra Sign Virama ..Saurashtra Sign Candrabi
+ (0x0a8e0, 0x0a8f1,), # Combining Devanagari Dig..Combining Devanagari Sig
+ (0x0a926, 0x0a92d,), # Kayah Li Vowel Ue ..Kayah Li Tone Calya Plop
+ (0x0a947, 0x0a951,), # Rejang Vowel Sign I ..Rejang Consonant Sign R
+ (0x0a980, 0x0a982,), # Javanese Sign Panyangga ..Javanese Sign Layar
+ (0x0a9b3, 0x0a9b3,), # Javanese Sign Cecak Telu..Javanese Sign Cecak Telu
+ (0x0a9b6, 0x0a9b9,), # Javanese Vowel Sign Wulu..Javanese Vowel Sign Suku
+ (0x0a9bc, 0x0a9bc,), # Javanese Vowel Sign Pepe..Javanese Vowel Sign Pepe
+ (0x0a9e5, 0x0a9e5,), # Myanmar Sign Shan Saw ..Myanmar Sign Shan Saw
+ (0x0aa29, 0x0aa2e,), # Cham Vowel Sign Aa ..Cham Vowel Sign Oe
+ (0x0aa31, 0x0aa32,), # Cham Vowel Sign Au ..Cham Vowel Sign Ue
+ (0x0aa35, 0x0aa36,), # Cham Consonant Sign La ..Cham Consonant Sign Wa
+ (0x0aa43, 0x0aa43,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa4c, 0x0aa4c,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa7c, 0x0aa7c,), # Myanmar Sign Tai Laing T..Myanmar Sign Tai Laing T
+ (0x0aab0, 0x0aab0,), # Tai Viet Mai Kang ..Tai Viet Mai Kang
+ (0x0aab2, 0x0aab4,), # Tai Viet Vowel I ..Tai Viet Vowel U
+ (0x0aab7, 0x0aab8,), # Tai Viet Mai Khit ..Tai Viet Vowel Ia
+ (0x0aabe, 0x0aabf,), # Tai Viet Vowel Am ..Tai Viet Tone Mai Ek
+ (0x0aac1, 0x0aac1,), # Tai Viet Tone Mai Tho ..Tai Viet Tone Mai Tho
+ (0x0aaec, 0x0aaed,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0aaf6, 0x0aaf6,), # Meetei Mayek Virama ..Meetei Mayek Virama
+ (0x0abe5, 0x0abe5,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abe8, 0x0abe8,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abed, 0x0abed,), # Meetei Mayek Apun Iyek ..Meetei Mayek Apun Iyek
+ (0x0fb1e, 0x0fb1e,), # Hebrew Point Judeo-spani..Hebrew Point Judeo-spani
+ (0x0fe00, 0x0fe0f,), # Variation Selector-1 ..Variation Selector-16
+ (0x0fe20, 0x0fe2f,), # Combining Ligature Left ..Combining Cyrillic Titlo
+ (0x101fd, 0x101fd,), # Phaistos Disc Sign Combi..Phaistos Disc Sign Combi
+ (0x102e0, 0x102e0,), # Coptic Epact Thousands M..Coptic Epact Thousands M
+ (0x10376, 0x1037a,), # Combining Old Permic Let..Combining Old Permic Let
+ (0x10a01, 0x10a03,), # Kharoshthi Vowel Sign I ..Kharoshthi Vowel Sign Vo
+ (0x10a05, 0x10a06,), # Kharoshthi Vowel Sign E ..Kharoshthi Vowel Sign O
+ (0x10a0c, 0x10a0f,), # Kharoshthi Vowel Length ..Kharoshthi Sign Visarga
+ (0x10a38, 0x10a3a,), # Kharoshthi Sign Bar Abov..Kharoshthi Sign Dot Belo
+ (0x10a3f, 0x10a3f,), # Kharoshthi Virama ..Kharoshthi Virama
+ (0x10ae5, 0x10ae6,), # Manichaean Abbreviation ..Manichaean Abbreviation
+ (0x11001, 0x11001,), # Brahmi Sign Anusvara ..Brahmi Sign Anusvara
+ (0x11038, 0x11046,), # Brahmi Vowel Sign Aa ..Brahmi Virama
+ (0x1107f, 0x11081,), # Brahmi Number Joiner ..Kaithi Sign Anusvara
+ (0x110b3, 0x110b6,), # Kaithi Vowel Sign U ..Kaithi Vowel Sign Ai
+ (0x110b9, 0x110ba,), # Kaithi Sign Virama ..Kaithi Sign Nukta
+ (0x11100, 0x11102,), # Chakma Sign Candrabindu ..Chakma Sign Visarga
+ (0x11127, 0x1112b,), # Chakma Vowel Sign A ..Chakma Vowel Sign Uu
+ (0x1112d, 0x11134,), # Chakma Vowel Sign Ai ..Chakma Maayyaa
+ (0x11173, 0x11173,), # Mahajani Sign Nukta ..Mahajani Sign Nukta
+ (0x11180, 0x11181,), # Sharada Sign Candrabindu..Sharada Sign Anusvara
+ (0x111b6, 0x111be,), # Sharada Vowel Sign U ..Sharada Vowel Sign O
+ (0x111ca, 0x111cc,), # Sharada Sign Nukta ..Sharada Extra Short Vowe
+ (0x1122f, 0x11231,), # Khojki Vowel Sign U ..Khojki Vowel Sign Ai
+ (0x11234, 0x11234,), # Khojki Sign Anusvara ..Khojki Sign Anusvara
+ (0x11236, 0x11237,), # Khojki Sign Nukta ..Khojki Sign Shadda
+ (0x1123e, 0x1123e,), # Khojki Sign Sukun ..Khojki Sign Sukun
+ (0x112df, 0x112df,), # Khudawadi Sign Anusvara ..Khudawadi Sign Anusvara
+ (0x112e3, 0x112ea,), # Khudawadi Vowel Sign U ..Khudawadi Sign Virama
+ (0x11300, 0x11301,), # Grantha Sign Combining A..Grantha Sign Candrabindu
+ (0x1133c, 0x1133c,), # Grantha Sign Nukta ..Grantha Sign Nukta
+ (0x11340, 0x11340,), # Grantha Vowel Sign Ii ..Grantha Vowel Sign Ii
+ (0x11366, 0x1136c,), # Combining Grantha Digit ..Combining Grantha Digit
+ (0x11370, 0x11374,), # Combining Grantha Letter..Combining Grantha Letter
+ (0x11438, 0x1143f,), # Newa Vowel Sign U ..Newa Vowel Sign Ai
+ (0x11442, 0x11444,), # Newa Sign Virama ..Newa Sign Anusvara
+ (0x11446, 0x11446,), # Newa Sign Nukta ..Newa Sign Nukta
+ (0x114b3, 0x114b8,), # Tirhuta Vowel Sign U ..Tirhuta Vowel Sign Vocal
+ (0x114ba, 0x114ba,), # Tirhuta Vowel Sign Short..Tirhuta Vowel Sign Short
+ (0x114bf, 0x114c0,), # Tirhuta Sign Candrabindu..Tirhuta Sign Anusvara
+ (0x114c2, 0x114c3,), # Tirhuta Sign Virama ..Tirhuta Sign Nukta
+ (0x115b2, 0x115b5,), # Siddham Vowel Sign U ..Siddham Vowel Sign Vocal
+ (0x115bc, 0x115bd,), # Siddham Sign Candrabindu..Siddham Sign Anusvara
+ (0x115bf, 0x115c0,), # Siddham Sign Virama ..Siddham Sign Nukta
+ (0x115dc, 0x115dd,), # Siddham Vowel Sign Alter..Siddham Vowel Sign Alter
+ (0x11633, 0x1163a,), # Modi Vowel Sign U ..Modi Vowel Sign Ai
+ (0x1163d, 0x1163d,), # Modi Sign Anusvara ..Modi Sign Anusvara
+ (0x1163f, 0x11640,), # Modi Sign Virama ..Modi Sign Ardhacandra
+ (0x116ab, 0x116ab,), # Takri Sign Anusvara ..Takri Sign Anusvara
+ (0x116ad, 0x116ad,), # Takri Vowel Sign Aa ..Takri Vowel Sign Aa
+ (0x116b0, 0x116b5,), # Takri Vowel Sign U ..Takri Vowel Sign Au
+ (0x116b7, 0x116b7,), # Takri Sign Nukta ..Takri Sign Nukta
+ (0x1171d, 0x1171f,), # Ahom Consonant Sign Medi..Ahom Consonant Sign Medi
+ (0x11722, 0x11725,), # Ahom Vowel Sign I ..Ahom Vowel Sign Uu
+ (0x11727, 0x1172b,), # Ahom Vowel Sign Aw ..Ahom Sign Killer
+ (0x11a01, 0x11a06,), # Zanabazar Square Vowel S..Zanabazar Square Vowel S
+ (0x11a09, 0x11a0a,), # Zanabazar Square Vowel S..Zanabazar Square Vowel L
+ (0x11a33, 0x11a38,), # Zanabazar Square Final C..Zanabazar Square Sign An
+ (0x11a3b, 0x11a3e,), # Zanabazar Square Cluster..Zanabazar Square Cluster
+ (0x11a47, 0x11a47,), # Zanabazar Square Subjoin..Zanabazar Square Subjoin
+ (0x11a51, 0x11a56,), # Soyombo Vowel Sign I ..Soyombo Vowel Sign Oe
+ (0x11a59, 0x11a5b,), # Soyombo Vowel Sign Vocal..Soyombo Vowel Length Mar
+ (0x11a8a, 0x11a96,), # Soyombo Final Consonant ..Soyombo Sign Anusvara
+ (0x11a98, 0x11a99,), # Soyombo Gemination Mark ..Soyombo Subjoiner
+ (0x11c30, 0x11c36,), # Bhaiksuki Vowel Sign I ..Bhaiksuki Vowel Sign Voc
+ (0x11c38, 0x11c3d,), # Bhaiksuki Vowel Sign E ..Bhaiksuki Sign Anusvara
+ (0x11c3f, 0x11c3f,), # Bhaiksuki Sign Virama ..Bhaiksuki Sign Virama
+ (0x11c92, 0x11ca7,), # Marchen Subjoined Letter..Marchen Subjoined Letter
+ (0x11caa, 0x11cb0,), # Marchen Subjoined Letter..Marchen Vowel Sign Aa
+ (0x11cb2, 0x11cb3,), # Marchen Vowel Sign U ..Marchen Vowel Sign E
+ (0x11cb5, 0x11cb6,), # Marchen Sign Anusvara ..Marchen Sign Candrabindu
+ (0x11d31, 0x11d36,), # Masaram Gondi Vowel Sign..Masaram Gondi Vowel Sign
+ (0x11d3a, 0x11d3a,), # Masaram Gondi Vowel Sign..Masaram Gondi Vowel Sign
+ (0x11d3c, 0x11d3d,), # Masaram Gondi Vowel Sign..Masaram Gondi Vowel Sign
+ (0x11d3f, 0x11d45,), # Masaram Gondi Vowel Sign..Masaram Gondi Virama
+ (0x11d47, 0x11d47,), # Masaram Gondi Ra-kara ..Masaram Gondi Ra-kara
+ (0x16af0, 0x16af4,), # Bassa Vah Combining High..Bassa Vah Combining High
+ (0x16b30, 0x16b36,), # Pahawh Hmong Mark Cim Tu..Pahawh Hmong Mark Cim Ta
+ (0x16f8f, 0x16f92,), # Miao Tone Right ..Miao Tone Below
+ (0x1bc9d, 0x1bc9e,), # Duployan Thick Letter Se..Duployan Double Mark
+ (0x1d167, 0x1d169,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d17b, 0x1d182,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d185, 0x1d18b,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d1aa, 0x1d1ad,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d242, 0x1d244,), # Combining Greek Musical ..Combining Greek Musical
+ (0x1da00, 0x1da36,), # Signwriting Head Rim ..Signwriting Air Sucking
+ (0x1da3b, 0x1da6c,), # Signwriting Mouth Closed..Signwriting Excitement
+ (0x1da75, 0x1da75,), # Signwriting Upper Body T..Signwriting Upper Body T
+ (0x1da84, 0x1da84,), # Signwriting Location Hea..Signwriting Location Hea
+ (0x1da9b, 0x1da9f,), # Signwriting Fill Modifie..Signwriting Fill Modifie
+ (0x1daa1, 0x1daaf,), # Signwriting Rotation Mod..Signwriting Rotation Mod
+ (0x1e000, 0x1e006,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e008, 0x1e018,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e01b, 0x1e021,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e023, 0x1e024,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e026, 0x1e02a,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e8d0, 0x1e8d6,), # Mende Kikakui Combining ..Mende Kikakui Combining
+ (0x1e944, 0x1e94a,), # Adlam Alif Lengthener ..Adlam Nukta
+ (0xe0100, 0xe01ef,), # Variation Selector-17 ..Variation Selector-256
+ ),
+ '11.0.0': (
+ # Source: DerivedGeneralCategory-11.0.0.txt
+ # Date: 2018-02-21, 05:34:04 GMT
+ #
+ (0x00300, 0x0036f,), # Combining Grave Accent ..Combining Latin Small Le
+ (0x00483, 0x00489,), # Combining Cyrillic Titlo..Combining Cyrillic Milli
+ (0x00591, 0x005bd,), # Hebrew Accent Etnahta ..Hebrew Point Meteg
+ (0x005bf, 0x005bf,), # Hebrew Point Rafe ..Hebrew Point Rafe
+ (0x005c1, 0x005c2,), # Hebrew Point Shin Dot ..Hebrew Point Sin Dot
+ (0x005c4, 0x005c5,), # Hebrew Mark Upper Dot ..Hebrew Mark Lower Dot
+ (0x005c7, 0x005c7,), # Hebrew Point Qamats Qata..Hebrew Point Qamats Qata
+ (0x00610, 0x0061a,), # Arabic Sign Sallallahou ..Arabic Small Kasra
+ (0x0064b, 0x0065f,), # Arabic Fathatan ..Arabic Wavy Hamza Below
+ (0x00670, 0x00670,), # Arabic Letter Superscrip..Arabic Letter Superscrip
+ (0x006d6, 0x006dc,), # Arabic Small High Ligatu..Arabic Small High Seen
+ (0x006df, 0x006e4,), # Arabic Small High Rounde..Arabic Small High Madda
+ (0x006e7, 0x006e8,), # Arabic Small High Yeh ..Arabic Small High Noon
+ (0x006ea, 0x006ed,), # Arabic Empty Centre Low ..Arabic Small Low Meem
+ (0x00711, 0x00711,), # Syriac Letter Superscrip..Syriac Letter Superscrip
+ (0x00730, 0x0074a,), # Syriac Pthaha Above ..Syriac Barrekh
+ (0x007a6, 0x007b0,), # Thaana Abafili ..Thaana Sukun
+ (0x007eb, 0x007f3,), # Nko Combining Short High..Nko Combining Double Dot
+ (0x007fd, 0x007fd,), # Nko Dantayalan ..Nko Dantayalan
+ (0x00816, 0x00819,), # Samaritan Mark In ..Samaritan Mark Dagesh
+ (0x0081b, 0x00823,), # Samaritan Mark Epentheti..Samaritan Vowel Sign A
+ (0x00825, 0x00827,), # Samaritan Vowel Sign Sho..Samaritan Vowel Sign U
+ (0x00829, 0x0082d,), # Samaritan Vowel Sign Lon..Samaritan Mark Nequdaa
+ (0x00859, 0x0085b,), # Mandaic Affrication Mark..Mandaic Gemination Mark
+ (0x008d3, 0x008e1,), # Arabic Small Low Waw ..Arabic Small High Sign S
+ (0x008e3, 0x00902,), # Arabic Turned Damma Belo..Devanagari Sign Anusvara
+ (0x0093a, 0x0093a,), # Devanagari Vowel Sign Oe..Devanagari Vowel Sign Oe
+ (0x0093c, 0x0093c,), # Devanagari Sign Nukta ..Devanagari Sign Nukta
+ (0x00941, 0x00948,), # Devanagari Vowel Sign U ..Devanagari Vowel Sign Ai
+ (0x0094d, 0x0094d,), # Devanagari Sign Virama ..Devanagari Sign Virama
+ (0x00951, 0x00957,), # Devanagari Stress Sign U..Devanagari Vowel Sign Uu
+ (0x00962, 0x00963,), # Devanagari Vowel Sign Vo..Devanagari Vowel Sign Vo
+ (0x00981, 0x00981,), # Bengali Sign Candrabindu..Bengali Sign Candrabindu
+ (0x009bc, 0x009bc,), # Bengali Sign Nukta ..Bengali Sign Nukta
+ (0x009c1, 0x009c4,), # Bengali Vowel Sign U ..Bengali Vowel Sign Vocal
+ (0x009cd, 0x009cd,), # Bengali Sign Virama ..Bengali Sign Virama
+ (0x009e2, 0x009e3,), # Bengali Vowel Sign Vocal..Bengali Vowel Sign Vocal
+ (0x009fe, 0x009fe,), # Bengali Sandhi Mark ..Bengali Sandhi Mark
+ (0x00a01, 0x00a02,), # Gurmukhi Sign Adak Bindi..Gurmukhi Sign Bindi
+ (0x00a3c, 0x00a3c,), # Gurmukhi Sign Nukta ..Gurmukhi Sign Nukta
+ (0x00a41, 0x00a42,), # Gurmukhi Vowel Sign U ..Gurmukhi Vowel Sign Uu
+ (0x00a47, 0x00a48,), # Gurmukhi Vowel Sign Ee ..Gurmukhi Vowel Sign Ai
+ (0x00a4b, 0x00a4d,), # Gurmukhi Vowel Sign Oo ..Gurmukhi Sign Virama
+ (0x00a51, 0x00a51,), # Gurmukhi Sign Udaat ..Gurmukhi Sign Udaat
+ (0x00a70, 0x00a71,), # Gurmukhi Tippi ..Gurmukhi Addak
+ (0x00a75, 0x00a75,), # Gurmukhi Sign Yakash ..Gurmukhi Sign Yakash
+ (0x00a81, 0x00a82,), # Gujarati Sign Candrabind..Gujarati Sign Anusvara
+ (0x00abc, 0x00abc,), # Gujarati Sign Nukta ..Gujarati Sign Nukta
+ (0x00ac1, 0x00ac5,), # Gujarati Vowel Sign U ..Gujarati Vowel Sign Cand
+ (0x00ac7, 0x00ac8,), # Gujarati Vowel Sign E ..Gujarati Vowel Sign Ai
+ (0x00acd, 0x00acd,), # Gujarati Sign Virama ..Gujarati Sign Virama
+ (0x00ae2, 0x00ae3,), # Gujarati Vowel Sign Voca..Gujarati Vowel Sign Voca
+ (0x00afa, 0x00aff,), # Gujarati Sign Sukun ..Gujarati Sign Two-circle
+ (0x00b01, 0x00b01,), # Oriya Sign Candrabindu ..Oriya Sign Candrabindu
+ (0x00b3c, 0x00b3c,), # Oriya Sign Nukta ..Oriya Sign Nukta
+ (0x00b3f, 0x00b3f,), # Oriya Vowel Sign I ..Oriya Vowel Sign I
+ (0x00b41, 0x00b44,), # Oriya Vowel Sign U ..Oriya Vowel Sign Vocalic
+ (0x00b4d, 0x00b4d,), # Oriya Sign Virama ..Oriya Sign Virama
+ (0x00b56, 0x00b56,), # Oriya Ai Length Mark ..Oriya Ai Length Mark
+ (0x00b62, 0x00b63,), # Oriya Vowel Sign Vocalic..Oriya Vowel Sign Vocalic
+ (0x00b82, 0x00b82,), # Tamil Sign Anusvara ..Tamil Sign Anusvara
+ (0x00bc0, 0x00bc0,), # Tamil Vowel Sign Ii ..Tamil Vowel Sign Ii
+ (0x00bcd, 0x00bcd,), # Tamil Sign Virama ..Tamil Sign Virama
+ (0x00c00, 0x00c00,), # Telugu Sign Combining Ca..Telugu Sign Combining Ca
+ (0x00c04, 0x00c04,), # Telugu Sign Combining An..Telugu Sign Combining An
+ (0x00c3e, 0x00c40,), # Telugu Vowel Sign Aa ..Telugu Vowel Sign Ii
+ (0x00c46, 0x00c48,), # Telugu Vowel Sign E ..Telugu Vowel Sign Ai
+ (0x00c4a, 0x00c4d,), # Telugu Vowel Sign O ..Telugu Sign Virama
+ (0x00c55, 0x00c56,), # Telugu Length Mark ..Telugu Ai Length Mark
+ (0x00c62, 0x00c63,), # Telugu Vowel Sign Vocali..Telugu Vowel Sign Vocali
+ (0x00c81, 0x00c81,), # Kannada Sign Candrabindu..Kannada Sign Candrabindu
+ (0x00cbc, 0x00cbc,), # Kannada Sign Nukta ..Kannada Sign Nukta
+ (0x00cbf, 0x00cbf,), # Kannada Vowel Sign I ..Kannada Vowel Sign I
+ (0x00cc6, 0x00cc6,), # Kannada Vowel Sign E ..Kannada Vowel Sign E
+ (0x00ccc, 0x00ccd,), # Kannada Vowel Sign Au ..Kannada Sign Virama
+ (0x00ce2, 0x00ce3,), # Kannada Vowel Sign Vocal..Kannada Vowel Sign Vocal
+ (0x00d00, 0x00d01,), # Malayalam Sign Combining..Malayalam Sign Candrabin
+ (0x00d3b, 0x00d3c,), # Malayalam Sign Vertical ..Malayalam Sign Circular
+ (0x00d41, 0x00d44,), # Malayalam Vowel Sign U ..Malayalam Vowel Sign Voc
+ (0x00d4d, 0x00d4d,), # Malayalam Sign Virama ..Malayalam Sign Virama
+ (0x00d62, 0x00d63,), # Malayalam Vowel Sign Voc..Malayalam Vowel Sign Voc
+ (0x00dca, 0x00dca,), # Sinhala Sign Al-lakuna ..Sinhala Sign Al-lakuna
+ (0x00dd2, 0x00dd4,), # Sinhala Vowel Sign Ketti..Sinhala Vowel Sign Ketti
+ (0x00dd6, 0x00dd6,), # Sinhala Vowel Sign Diga ..Sinhala Vowel Sign Diga
+ (0x00e31, 0x00e31,), # Thai Character Mai Han-a..Thai Character Mai Han-a
+ (0x00e34, 0x00e3a,), # Thai Character Sara I ..Thai Character Phinthu
+ (0x00e47, 0x00e4e,), # Thai Character Maitaikhu..Thai Character Yamakkan
+ (0x00eb1, 0x00eb1,), # Lao Vowel Sign Mai Kan ..Lao Vowel Sign Mai Kan
+ (0x00eb4, 0x00eb9,), # Lao Vowel Sign I ..Lao Vowel Sign Uu
+ (0x00ebb, 0x00ebc,), # Lao Vowel Sign Mai Kon ..Lao Semivowel Sign Lo
+ (0x00ec8, 0x00ecd,), # Lao Tone Mai Ek ..Lao Niggahita
+ (0x00f18, 0x00f19,), # Tibetan Astrological Sig..Tibetan Astrological Sig
+ (0x00f35, 0x00f35,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f37, 0x00f37,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f39, 0x00f39,), # Tibetan Mark Tsa -phru ..Tibetan Mark Tsa -phru
+ (0x00f71, 0x00f7e,), # Tibetan Vowel Sign Aa ..Tibetan Sign Rjes Su Nga
+ (0x00f80, 0x00f84,), # Tibetan Vowel Sign Rever..Tibetan Mark Halanta
+ (0x00f86, 0x00f87,), # Tibetan Sign Lci Rtags ..Tibetan Sign Yang Rtags
+ (0x00f8d, 0x00f97,), # Tibetan Subjoined Sign L..Tibetan Subjoined Letter
+ (0x00f99, 0x00fbc,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
+ (0x00fc6, 0x00fc6,), # Tibetan Symbol Padma Gda..Tibetan Symbol Padma Gda
+ (0x0102d, 0x01030,), # Myanmar Vowel Sign I ..Myanmar Vowel Sign Uu
+ (0x01032, 0x01037,), # Myanmar Vowel Sign Ai ..Myanmar Sign Dot Below
+ (0x01039, 0x0103a,), # Myanmar Sign Virama ..Myanmar Sign Asat
+ (0x0103d, 0x0103e,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01058, 0x01059,), # Myanmar Vowel Sign Vocal..Myanmar Vowel Sign Vocal
+ (0x0105e, 0x01060,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01071, 0x01074,), # Myanmar Vowel Sign Geba ..Myanmar Vowel Sign Kayah
+ (0x01082, 0x01082,), # Myanmar Consonant Sign S..Myanmar Consonant Sign S
+ (0x01085, 0x01086,), # Myanmar Vowel Sign Shan ..Myanmar Vowel Sign Shan
+ (0x0108d, 0x0108d,), # Myanmar Sign Shan Counci..Myanmar Sign Shan Counci
+ (0x0109d, 0x0109d,), # Myanmar Vowel Sign Aiton..Myanmar Vowel Sign Aiton
+ (0x0135d, 0x0135f,), # Ethiopic Combining Gemin..Ethiopic Combining Gemin
+ (0x01712, 0x01714,), # Tagalog Vowel Sign I ..Tagalog Sign Virama
+ (0x01732, 0x01734,), # Hanunoo Vowel Sign I ..Hanunoo Sign Pamudpod
+ (0x01752, 0x01753,), # Buhid Vowel Sign I ..Buhid Vowel Sign U
+ (0x01772, 0x01773,), # Tagbanwa Vowel Sign I ..Tagbanwa Vowel Sign U
+ (0x017b4, 0x017b5,), # Khmer Vowel Inherent Aq ..Khmer Vowel Inherent Aa
+ (0x017b7, 0x017bd,), # Khmer Vowel Sign I ..Khmer Vowel Sign Ua
+ (0x017c6, 0x017c6,), # Khmer Sign Nikahit ..Khmer Sign Nikahit
+ (0x017c9, 0x017d3,), # Khmer Sign Muusikatoan ..Khmer Sign Bathamasat
+ (0x017dd, 0x017dd,), # Khmer Sign Atthacan ..Khmer Sign Atthacan
+ (0x0180b, 0x0180d,), # Mongolian Free Variation..Mongolian Free Variation
+ (0x01885, 0x01886,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x018a9, 0x018a9,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x01920, 0x01922,), # Limbu Vowel Sign A ..Limbu Vowel Sign U
+ (0x01927, 0x01928,), # Limbu Vowel Sign E ..Limbu Vowel Sign O
+ (0x01932, 0x01932,), # Limbu Small Letter Anusv..Limbu Small Letter Anusv
+ (0x01939, 0x0193b,), # Limbu Sign Mukphreng ..Limbu Sign Sa-i
+ (0x01a17, 0x01a18,), # Buginese Vowel Sign I ..Buginese Vowel Sign U
+ (0x01a1b, 0x01a1b,), # Buginese Vowel Sign Ae ..Buginese Vowel Sign Ae
+ (0x01a56, 0x01a56,), # Tai Tham Consonant Sign ..Tai Tham Consonant Sign
+ (0x01a58, 0x01a5e,), # Tai Tham Sign Mai Kang L..Tai Tham Consonant Sign
+ (0x01a60, 0x01a60,), # Tai Tham Sign Sakot ..Tai Tham Sign Sakot
+ (0x01a62, 0x01a62,), # Tai Tham Vowel Sign Mai ..Tai Tham Vowel Sign Mai
+ (0x01a65, 0x01a6c,), # Tai Tham Vowel Sign I ..Tai Tham Vowel Sign Oa B
+ (0x01a73, 0x01a7c,), # Tai Tham Vowel Sign Oa A..Tai Tham Sign Khuen-lue
+ (0x01a7f, 0x01a7f,), # Tai Tham Combining Crypt..Tai Tham Combining Crypt
+ (0x01ab0, 0x01abe,), # Combining Doubled Circum..Combining Parentheses Ov
+ (0x01b00, 0x01b03,), # Balinese Sign Ulu Ricem ..Balinese Sign Surang
+ (0x01b34, 0x01b34,), # Balinese Sign Rerekan ..Balinese Sign Rerekan
+ (0x01b36, 0x01b3a,), # Balinese Vowel Sign Ulu ..Balinese Vowel Sign Ra R
+ (0x01b3c, 0x01b3c,), # Balinese Vowel Sign La L..Balinese Vowel Sign La L
+ (0x01b42, 0x01b42,), # Balinese Vowel Sign Pepe..Balinese Vowel Sign Pepe
+ (0x01b6b, 0x01b73,), # Balinese Musical Symbol ..Balinese Musical Symbol
+ (0x01b80, 0x01b81,), # Sundanese Sign Panyecek ..Sundanese Sign Panglayar
+ (0x01ba2, 0x01ba5,), # Sundanese Consonant Sign..Sundanese Vowel Sign Pan
+ (0x01ba8, 0x01ba9,), # Sundanese Vowel Sign Pam..Sundanese Vowel Sign Pan
+ (0x01bab, 0x01bad,), # Sundanese Sign Virama ..Sundanese Consonant Sign
+ (0x01be6, 0x01be6,), # Batak Sign Tompi ..Batak Sign Tompi
+ (0x01be8, 0x01be9,), # Batak Vowel Sign Pakpak ..Batak Vowel Sign Ee
+ (0x01bed, 0x01bed,), # Batak Vowel Sign Karo O ..Batak Vowel Sign Karo O
+ (0x01bef, 0x01bf1,), # Batak Vowel Sign U For S..Batak Consonant Sign H
+ (0x01c2c, 0x01c33,), # Lepcha Vowel Sign E ..Lepcha Consonant Sign T
+ (0x01c36, 0x01c37,), # Lepcha Sign Ran ..Lepcha Sign Nukta
+ (0x01cd0, 0x01cd2,), # Vedic Tone Karshana ..Vedic Tone Prenkha
+ (0x01cd4, 0x01ce0,), # Vedic Sign Yajurvedic Mi..Vedic Tone Rigvedic Kash
+ (0x01ce2, 0x01ce8,), # Vedic Sign Visarga Svari..Vedic Sign Visarga Anuda
+ (0x01ced, 0x01ced,), # Vedic Sign Tiryak ..Vedic Sign Tiryak
+ (0x01cf4, 0x01cf4,), # Vedic Tone Candra Above ..Vedic Tone Candra Above
+ (0x01cf8, 0x01cf9,), # Vedic Tone Ring Above ..Vedic Tone Double Ring A
+ (0x01dc0, 0x01df9,), # Combining Dotted Grave A..Combining Wide Inverted
+ (0x01dfb, 0x01dff,), # Combining Deletion Mark ..Combining Right Arrowhea
+ (0x020d0, 0x020f0,), # Combining Left Harpoon A..Combining Asterisk Above
+ (0x02cef, 0x02cf1,), # Coptic Combining Ni Abov..Coptic Combining Spiritu
+ (0x02d7f, 0x02d7f,), # Tifinagh Consonant Joine..Tifinagh Consonant Joine
+ (0x02de0, 0x02dff,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0302a, 0x0302d,), # Ideographic Level Tone M..Ideographic Entering Ton
+ (0x03099, 0x0309a,), # Combining Katakana-hirag..Combining Katakana-hirag
+ (0x0a66f, 0x0a672,), # Combining Cyrillic Vzmet..Combining Cyrillic Thous
+ (0x0a674, 0x0a67d,), # Combining Cyrillic Lette..Combining Cyrillic Payer
+ (0x0a69e, 0x0a69f,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0a6f0, 0x0a6f1,), # Bamum Combining Mark Koq..Bamum Combining Mark Tuk
+ (0x0a802, 0x0a802,), # Syloti Nagri Sign Dvisva..Syloti Nagri Sign Dvisva
+ (0x0a806, 0x0a806,), # Syloti Nagri Sign Hasant..Syloti Nagri Sign Hasant
+ (0x0a80b, 0x0a80b,), # Syloti Nagri Sign Anusva..Syloti Nagri Sign Anusva
+ (0x0a825, 0x0a826,), # Syloti Nagri Vowel Sign ..Syloti Nagri Vowel Sign
+ (0x0a8c4, 0x0a8c5,), # Saurashtra Sign Virama ..Saurashtra Sign Candrabi
+ (0x0a8e0, 0x0a8f1,), # Combining Devanagari Dig..Combining Devanagari Sig
+ (0x0a8ff, 0x0a8ff,), # Devanagari Vowel Sign Ay..Devanagari Vowel Sign Ay
+ (0x0a926, 0x0a92d,), # Kayah Li Vowel Ue ..Kayah Li Tone Calya Plop
+ (0x0a947, 0x0a951,), # Rejang Vowel Sign I ..Rejang Consonant Sign R
+ (0x0a980, 0x0a982,), # Javanese Sign Panyangga ..Javanese Sign Layar
+ (0x0a9b3, 0x0a9b3,), # Javanese Sign Cecak Telu..Javanese Sign Cecak Telu
+ (0x0a9b6, 0x0a9b9,), # Javanese Vowel Sign Wulu..Javanese Vowel Sign Suku
+ (0x0a9bc, 0x0a9bc,), # Javanese Vowel Sign Pepe..Javanese Vowel Sign Pepe
+ (0x0a9e5, 0x0a9e5,), # Myanmar Sign Shan Saw ..Myanmar Sign Shan Saw
+ (0x0aa29, 0x0aa2e,), # Cham Vowel Sign Aa ..Cham Vowel Sign Oe
+ (0x0aa31, 0x0aa32,), # Cham Vowel Sign Au ..Cham Vowel Sign Ue
+ (0x0aa35, 0x0aa36,), # Cham Consonant Sign La ..Cham Consonant Sign Wa
+ (0x0aa43, 0x0aa43,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa4c, 0x0aa4c,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa7c, 0x0aa7c,), # Myanmar Sign Tai Laing T..Myanmar Sign Tai Laing T
+ (0x0aab0, 0x0aab0,), # Tai Viet Mai Kang ..Tai Viet Mai Kang
+ (0x0aab2, 0x0aab4,), # Tai Viet Vowel I ..Tai Viet Vowel U
+ (0x0aab7, 0x0aab8,), # Tai Viet Mai Khit ..Tai Viet Vowel Ia
+ (0x0aabe, 0x0aabf,), # Tai Viet Vowel Am ..Tai Viet Tone Mai Ek
+ (0x0aac1, 0x0aac1,), # Tai Viet Tone Mai Tho ..Tai Viet Tone Mai Tho
+ (0x0aaec, 0x0aaed,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0aaf6, 0x0aaf6,), # Meetei Mayek Virama ..Meetei Mayek Virama
+ (0x0abe5, 0x0abe5,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abe8, 0x0abe8,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abed, 0x0abed,), # Meetei Mayek Apun Iyek ..Meetei Mayek Apun Iyek
+ (0x0fb1e, 0x0fb1e,), # Hebrew Point Judeo-spani..Hebrew Point Judeo-spani
+ (0x0fe00, 0x0fe0f,), # Variation Selector-1 ..Variation Selector-16
+ (0x0fe20, 0x0fe2f,), # Combining Ligature Left ..Combining Cyrillic Titlo
+ (0x101fd, 0x101fd,), # Phaistos Disc Sign Combi..Phaistos Disc Sign Combi
+ (0x102e0, 0x102e0,), # Coptic Epact Thousands M..Coptic Epact Thousands M
+ (0x10376, 0x1037a,), # Combining Old Permic Let..Combining Old Permic Let
+ (0x10a01, 0x10a03,), # Kharoshthi Vowel Sign I ..Kharoshthi Vowel Sign Vo
+ (0x10a05, 0x10a06,), # Kharoshthi Vowel Sign E ..Kharoshthi Vowel Sign O
+ (0x10a0c, 0x10a0f,), # Kharoshthi Vowel Length ..Kharoshthi Sign Visarga
+ (0x10a38, 0x10a3a,), # Kharoshthi Sign Bar Abov..Kharoshthi Sign Dot Belo
+ (0x10a3f, 0x10a3f,), # Kharoshthi Virama ..Kharoshthi Virama
+ (0x10ae5, 0x10ae6,), # Manichaean Abbreviation ..Manichaean Abbreviation
+ (0x10d24, 0x10d27,), # Hanifi Rohingya Sign Har..Hanifi Rohingya Sign Tas
+ (0x10f46, 0x10f50,), # Sogdian Combining Dot Be..Sogdian Combining Stroke
+ (0x11001, 0x11001,), # Brahmi Sign Anusvara ..Brahmi Sign Anusvara
+ (0x11038, 0x11046,), # Brahmi Vowel Sign Aa ..Brahmi Virama
+ (0x1107f, 0x11081,), # Brahmi Number Joiner ..Kaithi Sign Anusvara
+ (0x110b3, 0x110b6,), # Kaithi Vowel Sign U ..Kaithi Vowel Sign Ai
+ (0x110b9, 0x110ba,), # Kaithi Sign Virama ..Kaithi Sign Nukta
+ (0x11100, 0x11102,), # Chakma Sign Candrabindu ..Chakma Sign Visarga
+ (0x11127, 0x1112b,), # Chakma Vowel Sign A ..Chakma Vowel Sign Uu
+ (0x1112d, 0x11134,), # Chakma Vowel Sign Ai ..Chakma Maayyaa
+ (0x11173, 0x11173,), # Mahajani Sign Nukta ..Mahajani Sign Nukta
+ (0x11180, 0x11181,), # Sharada Sign Candrabindu..Sharada Sign Anusvara
+ (0x111b6, 0x111be,), # Sharada Vowel Sign U ..Sharada Vowel Sign O
+ (0x111c9, 0x111cc,), # Sharada Sandhi Mark ..Sharada Extra Short Vowe
+ (0x1122f, 0x11231,), # Khojki Vowel Sign U ..Khojki Vowel Sign Ai
+ (0x11234, 0x11234,), # Khojki Sign Anusvara ..Khojki Sign Anusvara
+ (0x11236, 0x11237,), # Khojki Sign Nukta ..Khojki Sign Shadda
+ (0x1123e, 0x1123e,), # Khojki Sign Sukun ..Khojki Sign Sukun
+ (0x112df, 0x112df,), # Khudawadi Sign Anusvara ..Khudawadi Sign Anusvara
+ (0x112e3, 0x112ea,), # Khudawadi Vowel Sign U ..Khudawadi Sign Virama
+ (0x11300, 0x11301,), # Grantha Sign Combining A..Grantha Sign Candrabindu
+ (0x1133b, 0x1133c,), # Combining Bindu Below ..Grantha Sign Nukta
+ (0x11340, 0x11340,), # Grantha Vowel Sign Ii ..Grantha Vowel Sign Ii
+ (0x11366, 0x1136c,), # Combining Grantha Digit ..Combining Grantha Digit
+ (0x11370, 0x11374,), # Combining Grantha Letter..Combining Grantha Letter
+ (0x11438, 0x1143f,), # Newa Vowel Sign U ..Newa Vowel Sign Ai
+ (0x11442, 0x11444,), # Newa Sign Virama ..Newa Sign Anusvara
+ (0x11446, 0x11446,), # Newa Sign Nukta ..Newa Sign Nukta
+ (0x1145e, 0x1145e,), # Newa Sandhi Mark ..Newa Sandhi Mark
+ (0x114b3, 0x114b8,), # Tirhuta Vowel Sign U ..Tirhuta Vowel Sign Vocal
+ (0x114ba, 0x114ba,), # Tirhuta Vowel Sign Short..Tirhuta Vowel Sign Short
+ (0x114bf, 0x114c0,), # Tirhuta Sign Candrabindu..Tirhuta Sign Anusvara
+ (0x114c2, 0x114c3,), # Tirhuta Sign Virama ..Tirhuta Sign Nukta
+ (0x115b2, 0x115b5,), # Siddham Vowel Sign U ..Siddham Vowel Sign Vocal
+ (0x115bc, 0x115bd,), # Siddham Sign Candrabindu..Siddham Sign Anusvara
+ (0x115bf, 0x115c0,), # Siddham Sign Virama ..Siddham Sign Nukta
+ (0x115dc, 0x115dd,), # Siddham Vowel Sign Alter..Siddham Vowel Sign Alter
+ (0x11633, 0x1163a,), # Modi Vowel Sign U ..Modi Vowel Sign Ai
+ (0x1163d, 0x1163d,), # Modi Sign Anusvara ..Modi Sign Anusvara
+ (0x1163f, 0x11640,), # Modi Sign Virama ..Modi Sign Ardhacandra
+ (0x116ab, 0x116ab,), # Takri Sign Anusvara ..Takri Sign Anusvara
+ (0x116ad, 0x116ad,), # Takri Vowel Sign Aa ..Takri Vowel Sign Aa
+ (0x116b0, 0x116b5,), # Takri Vowel Sign U ..Takri Vowel Sign Au
+ (0x116b7, 0x116b7,), # Takri Sign Nukta ..Takri Sign Nukta
+ (0x1171d, 0x1171f,), # Ahom Consonant Sign Medi..Ahom Consonant Sign Medi
+ (0x11722, 0x11725,), # Ahom Vowel Sign I ..Ahom Vowel Sign Uu
+ (0x11727, 0x1172b,), # Ahom Vowel Sign Aw ..Ahom Sign Killer
+ (0x1182f, 0x11837,), # Dogra Vowel Sign U ..Dogra Sign Anusvara
+ (0x11839, 0x1183a,), # Dogra Sign Virama ..Dogra Sign Nukta
+ (0x11a01, 0x11a0a,), # Zanabazar Square Vowel S..Zanabazar Square Vowel L
+ (0x11a33, 0x11a38,), # Zanabazar Square Final C..Zanabazar Square Sign An
+ (0x11a3b, 0x11a3e,), # Zanabazar Square Cluster..Zanabazar Square Cluster
+ (0x11a47, 0x11a47,), # Zanabazar Square Subjoin..Zanabazar Square Subjoin
+ (0x11a51, 0x11a56,), # Soyombo Vowel Sign I ..Soyombo Vowel Sign Oe
+ (0x11a59, 0x11a5b,), # Soyombo Vowel Sign Vocal..Soyombo Vowel Length Mar
+ (0x11a8a, 0x11a96,), # Soyombo Final Consonant ..Soyombo Sign Anusvara
+ (0x11a98, 0x11a99,), # Soyombo Gemination Mark ..Soyombo Subjoiner
+ (0x11c30, 0x11c36,), # Bhaiksuki Vowel Sign I ..Bhaiksuki Vowel Sign Voc
+ (0x11c38, 0x11c3d,), # Bhaiksuki Vowel Sign E ..Bhaiksuki Sign Anusvara
+ (0x11c3f, 0x11c3f,), # Bhaiksuki Sign Virama ..Bhaiksuki Sign Virama
+ (0x11c92, 0x11ca7,), # Marchen Subjoined Letter..Marchen Subjoined Letter
+ (0x11caa, 0x11cb0,), # Marchen Subjoined Letter..Marchen Vowel Sign Aa
+ (0x11cb2, 0x11cb3,), # Marchen Vowel Sign U ..Marchen Vowel Sign E
+ (0x11cb5, 0x11cb6,), # Marchen Sign Anusvara ..Marchen Sign Candrabindu
+ (0x11d31, 0x11d36,), # Masaram Gondi Vowel Sign..Masaram Gondi Vowel Sign
+ (0x11d3a, 0x11d3a,), # Masaram Gondi Vowel Sign..Masaram Gondi Vowel Sign
+ (0x11d3c, 0x11d3d,), # Masaram Gondi Vowel Sign..Masaram Gondi Vowel Sign
+ (0x11d3f, 0x11d45,), # Masaram Gondi Vowel Sign..Masaram Gondi Virama
+ (0x11d47, 0x11d47,), # Masaram Gondi Ra-kara ..Masaram Gondi Ra-kara
+ (0x11d90, 0x11d91,), # Gunjala Gondi Vowel Sign..Gunjala Gondi Vowel Sign
+ (0x11d95, 0x11d95,), # Gunjala Gondi Sign Anusv..Gunjala Gondi Sign Anusv
+ (0x11d97, 0x11d97,), # Gunjala Gondi Virama ..Gunjala Gondi Virama
+ (0x11ef3, 0x11ef4,), # Makasar Vowel Sign I ..Makasar Vowel Sign U
+ (0x16af0, 0x16af4,), # Bassa Vah Combining High..Bassa Vah Combining High
+ (0x16b30, 0x16b36,), # Pahawh Hmong Mark Cim Tu..Pahawh Hmong Mark Cim Ta
+ (0x16f8f, 0x16f92,), # Miao Tone Right ..Miao Tone Below
+ (0x1bc9d, 0x1bc9e,), # Duployan Thick Letter Se..Duployan Double Mark
+ (0x1d167, 0x1d169,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d17b, 0x1d182,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d185, 0x1d18b,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d1aa, 0x1d1ad,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d242, 0x1d244,), # Combining Greek Musical ..Combining Greek Musical
+ (0x1da00, 0x1da36,), # Signwriting Head Rim ..Signwriting Air Sucking
+ (0x1da3b, 0x1da6c,), # Signwriting Mouth Closed..Signwriting Excitement
+ (0x1da75, 0x1da75,), # Signwriting Upper Body T..Signwriting Upper Body T
+ (0x1da84, 0x1da84,), # Signwriting Location Hea..Signwriting Location Hea
+ (0x1da9b, 0x1da9f,), # Signwriting Fill Modifie..Signwriting Fill Modifie
+ (0x1daa1, 0x1daaf,), # Signwriting Rotation Mod..Signwriting Rotation Mod
+ (0x1e000, 0x1e006,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e008, 0x1e018,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e01b, 0x1e021,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e023, 0x1e024,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e026, 0x1e02a,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e8d0, 0x1e8d6,), # Mende Kikakui Combining ..Mende Kikakui Combining
+ (0x1e944, 0x1e94a,), # Adlam Alif Lengthener ..Adlam Nukta
+ (0xe0100, 0xe01ef,), # Variation Selector-17 ..Variation Selector-256
+ ),
+ '12.0.0': (
+ # Source: DerivedGeneralCategory-12.0.0.txt
+ # Date: 2019-01-22, 08:18:28 GMT
+ #
+ (0x00300, 0x0036f,), # Combining Grave Accent ..Combining Latin Small Le
+ (0x00483, 0x00489,), # Combining Cyrillic Titlo..Combining Cyrillic Milli
+ (0x00591, 0x005bd,), # Hebrew Accent Etnahta ..Hebrew Point Meteg
+ (0x005bf, 0x005bf,), # Hebrew Point Rafe ..Hebrew Point Rafe
+ (0x005c1, 0x005c2,), # Hebrew Point Shin Dot ..Hebrew Point Sin Dot
+ (0x005c4, 0x005c5,), # Hebrew Mark Upper Dot ..Hebrew Mark Lower Dot
+ (0x005c7, 0x005c7,), # Hebrew Point Qamats Qata..Hebrew Point Qamats Qata
+ (0x00610, 0x0061a,), # Arabic Sign Sallallahou ..Arabic Small Kasra
+ (0x0064b, 0x0065f,), # Arabic Fathatan ..Arabic Wavy Hamza Below
+ (0x00670, 0x00670,), # Arabic Letter Superscrip..Arabic Letter Superscrip
+ (0x006d6, 0x006dc,), # Arabic Small High Ligatu..Arabic Small High Seen
+ (0x006df, 0x006e4,), # Arabic Small High Rounde..Arabic Small High Madda
+ (0x006e7, 0x006e8,), # Arabic Small High Yeh ..Arabic Small High Noon
+ (0x006ea, 0x006ed,), # Arabic Empty Centre Low ..Arabic Small Low Meem
+ (0x00711, 0x00711,), # Syriac Letter Superscrip..Syriac Letter Superscrip
+ (0x00730, 0x0074a,), # Syriac Pthaha Above ..Syriac Barrekh
+ (0x007a6, 0x007b0,), # Thaana Abafili ..Thaana Sukun
+ (0x007eb, 0x007f3,), # Nko Combining Short High..Nko Combining Double Dot
+ (0x007fd, 0x007fd,), # Nko Dantayalan ..Nko Dantayalan
+ (0x00816, 0x00819,), # Samaritan Mark In ..Samaritan Mark Dagesh
+ (0x0081b, 0x00823,), # Samaritan Mark Epentheti..Samaritan Vowel Sign A
+ (0x00825, 0x00827,), # Samaritan Vowel Sign Sho..Samaritan Vowel Sign U
+ (0x00829, 0x0082d,), # Samaritan Vowel Sign Lon..Samaritan Mark Nequdaa
+ (0x00859, 0x0085b,), # Mandaic Affrication Mark..Mandaic Gemination Mark
+ (0x008d3, 0x008e1,), # Arabic Small Low Waw ..Arabic Small High Sign S
+ (0x008e3, 0x00902,), # Arabic Turned Damma Belo..Devanagari Sign Anusvara
+ (0x0093a, 0x0093a,), # Devanagari Vowel Sign Oe..Devanagari Vowel Sign Oe
+ (0x0093c, 0x0093c,), # Devanagari Sign Nukta ..Devanagari Sign Nukta
+ (0x00941, 0x00948,), # Devanagari Vowel Sign U ..Devanagari Vowel Sign Ai
+ (0x0094d, 0x0094d,), # Devanagari Sign Virama ..Devanagari Sign Virama
+ (0x00951, 0x00957,), # Devanagari Stress Sign U..Devanagari Vowel Sign Uu
+ (0x00962, 0x00963,), # Devanagari Vowel Sign Vo..Devanagari Vowel Sign Vo
+ (0x00981, 0x00981,), # Bengali Sign Candrabindu..Bengali Sign Candrabindu
+ (0x009bc, 0x009bc,), # Bengali Sign Nukta ..Bengali Sign Nukta
+ (0x009c1, 0x009c4,), # Bengali Vowel Sign U ..Bengali Vowel Sign Vocal
+ (0x009cd, 0x009cd,), # Bengali Sign Virama ..Bengali Sign Virama
+ (0x009e2, 0x009e3,), # Bengali Vowel Sign Vocal..Bengali Vowel Sign Vocal
+ (0x009fe, 0x009fe,), # Bengali Sandhi Mark ..Bengali Sandhi Mark
+ (0x00a01, 0x00a02,), # Gurmukhi Sign Adak Bindi..Gurmukhi Sign Bindi
+ (0x00a3c, 0x00a3c,), # Gurmukhi Sign Nukta ..Gurmukhi Sign Nukta
+ (0x00a41, 0x00a42,), # Gurmukhi Vowel Sign U ..Gurmukhi Vowel Sign Uu
+ (0x00a47, 0x00a48,), # Gurmukhi Vowel Sign Ee ..Gurmukhi Vowel Sign Ai
+ (0x00a4b, 0x00a4d,), # Gurmukhi Vowel Sign Oo ..Gurmukhi Sign Virama
+ (0x00a51, 0x00a51,), # Gurmukhi Sign Udaat ..Gurmukhi Sign Udaat
+ (0x00a70, 0x00a71,), # Gurmukhi Tippi ..Gurmukhi Addak
+ (0x00a75, 0x00a75,), # Gurmukhi Sign Yakash ..Gurmukhi Sign Yakash
+ (0x00a81, 0x00a82,), # Gujarati Sign Candrabind..Gujarati Sign Anusvara
+ (0x00abc, 0x00abc,), # Gujarati Sign Nukta ..Gujarati Sign Nukta
+ (0x00ac1, 0x00ac5,), # Gujarati Vowel Sign U ..Gujarati Vowel Sign Cand
+ (0x00ac7, 0x00ac8,), # Gujarati Vowel Sign E ..Gujarati Vowel Sign Ai
+ (0x00acd, 0x00acd,), # Gujarati Sign Virama ..Gujarati Sign Virama
+ (0x00ae2, 0x00ae3,), # Gujarati Vowel Sign Voca..Gujarati Vowel Sign Voca
+ (0x00afa, 0x00aff,), # Gujarati Sign Sukun ..Gujarati Sign Two-circle
+ (0x00b01, 0x00b01,), # Oriya Sign Candrabindu ..Oriya Sign Candrabindu
+ (0x00b3c, 0x00b3c,), # Oriya Sign Nukta ..Oriya Sign Nukta
+ (0x00b3f, 0x00b3f,), # Oriya Vowel Sign I ..Oriya Vowel Sign I
+ (0x00b41, 0x00b44,), # Oriya Vowel Sign U ..Oriya Vowel Sign Vocalic
+ (0x00b4d, 0x00b4d,), # Oriya Sign Virama ..Oriya Sign Virama
+ (0x00b56, 0x00b56,), # Oriya Ai Length Mark ..Oriya Ai Length Mark
+ (0x00b62, 0x00b63,), # Oriya Vowel Sign Vocalic..Oriya Vowel Sign Vocalic
+ (0x00b82, 0x00b82,), # Tamil Sign Anusvara ..Tamil Sign Anusvara
+ (0x00bc0, 0x00bc0,), # Tamil Vowel Sign Ii ..Tamil Vowel Sign Ii
+ (0x00bcd, 0x00bcd,), # Tamil Sign Virama ..Tamil Sign Virama
+ (0x00c00, 0x00c00,), # Telugu Sign Combining Ca..Telugu Sign Combining Ca
+ (0x00c04, 0x00c04,), # Telugu Sign Combining An..Telugu Sign Combining An
+ (0x00c3e, 0x00c40,), # Telugu Vowel Sign Aa ..Telugu Vowel Sign Ii
+ (0x00c46, 0x00c48,), # Telugu Vowel Sign E ..Telugu Vowel Sign Ai
+ (0x00c4a, 0x00c4d,), # Telugu Vowel Sign O ..Telugu Sign Virama
+ (0x00c55, 0x00c56,), # Telugu Length Mark ..Telugu Ai Length Mark
+ (0x00c62, 0x00c63,), # Telugu Vowel Sign Vocali..Telugu Vowel Sign Vocali
+ (0x00c81, 0x00c81,), # Kannada Sign Candrabindu..Kannada Sign Candrabindu
+ (0x00cbc, 0x00cbc,), # Kannada Sign Nukta ..Kannada Sign Nukta
+ (0x00cbf, 0x00cbf,), # Kannada Vowel Sign I ..Kannada Vowel Sign I
+ (0x00cc6, 0x00cc6,), # Kannada Vowel Sign E ..Kannada Vowel Sign E
+ (0x00ccc, 0x00ccd,), # Kannada Vowel Sign Au ..Kannada Sign Virama
+ (0x00ce2, 0x00ce3,), # Kannada Vowel Sign Vocal..Kannada Vowel Sign Vocal
+ (0x00d00, 0x00d01,), # Malayalam Sign Combining..Malayalam Sign Candrabin
+ (0x00d3b, 0x00d3c,), # Malayalam Sign Vertical ..Malayalam Sign Circular
+ (0x00d41, 0x00d44,), # Malayalam Vowel Sign U ..Malayalam Vowel Sign Voc
+ (0x00d4d, 0x00d4d,), # Malayalam Sign Virama ..Malayalam Sign Virama
+ (0x00d62, 0x00d63,), # Malayalam Vowel Sign Voc..Malayalam Vowel Sign Voc
+ (0x00dca, 0x00dca,), # Sinhala Sign Al-lakuna ..Sinhala Sign Al-lakuna
+ (0x00dd2, 0x00dd4,), # Sinhala Vowel Sign Ketti..Sinhala Vowel Sign Ketti
+ (0x00dd6, 0x00dd6,), # Sinhala Vowel Sign Diga ..Sinhala Vowel Sign Diga
+ (0x00e31, 0x00e31,), # Thai Character Mai Han-a..Thai Character Mai Han-a
+ (0x00e34, 0x00e3a,), # Thai Character Sara I ..Thai Character Phinthu
+ (0x00e47, 0x00e4e,), # Thai Character Maitaikhu..Thai Character Yamakkan
+ (0x00eb1, 0x00eb1,), # Lao Vowel Sign Mai Kan ..Lao Vowel Sign Mai Kan
+ (0x00eb4, 0x00ebc,), # Lao Vowel Sign I ..Lao Semivowel Sign Lo
+ (0x00ec8, 0x00ecd,), # Lao Tone Mai Ek ..Lao Niggahita
+ (0x00f18, 0x00f19,), # Tibetan Astrological Sig..Tibetan Astrological Sig
+ (0x00f35, 0x00f35,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f37, 0x00f37,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f39, 0x00f39,), # Tibetan Mark Tsa -phru ..Tibetan Mark Tsa -phru
+ (0x00f71, 0x00f7e,), # Tibetan Vowel Sign Aa ..Tibetan Sign Rjes Su Nga
+ (0x00f80, 0x00f84,), # Tibetan Vowel Sign Rever..Tibetan Mark Halanta
+ (0x00f86, 0x00f87,), # Tibetan Sign Lci Rtags ..Tibetan Sign Yang Rtags
+ (0x00f8d, 0x00f97,), # Tibetan Subjoined Sign L..Tibetan Subjoined Letter
+ (0x00f99, 0x00fbc,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
+ (0x00fc6, 0x00fc6,), # Tibetan Symbol Padma Gda..Tibetan Symbol Padma Gda
+ (0x0102d, 0x01030,), # Myanmar Vowel Sign I ..Myanmar Vowel Sign Uu
+ (0x01032, 0x01037,), # Myanmar Vowel Sign Ai ..Myanmar Sign Dot Below
+ (0x01039, 0x0103a,), # Myanmar Sign Virama ..Myanmar Sign Asat
+ (0x0103d, 0x0103e,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01058, 0x01059,), # Myanmar Vowel Sign Vocal..Myanmar Vowel Sign Vocal
+ (0x0105e, 0x01060,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01071, 0x01074,), # Myanmar Vowel Sign Geba ..Myanmar Vowel Sign Kayah
+ (0x01082, 0x01082,), # Myanmar Consonant Sign S..Myanmar Consonant Sign S
+ (0x01085, 0x01086,), # Myanmar Vowel Sign Shan ..Myanmar Vowel Sign Shan
+ (0x0108d, 0x0108d,), # Myanmar Sign Shan Counci..Myanmar Sign Shan Counci
+ (0x0109d, 0x0109d,), # Myanmar Vowel Sign Aiton..Myanmar Vowel Sign Aiton
+ (0x0135d, 0x0135f,), # Ethiopic Combining Gemin..Ethiopic Combining Gemin
+ (0x01712, 0x01714,), # Tagalog Vowel Sign I ..Tagalog Sign Virama
+ (0x01732, 0x01734,), # Hanunoo Vowel Sign I ..Hanunoo Sign Pamudpod
+ (0x01752, 0x01753,), # Buhid Vowel Sign I ..Buhid Vowel Sign U
+ (0x01772, 0x01773,), # Tagbanwa Vowel Sign I ..Tagbanwa Vowel Sign U
+ (0x017b4, 0x017b5,), # Khmer Vowel Inherent Aq ..Khmer Vowel Inherent Aa
+ (0x017b7, 0x017bd,), # Khmer Vowel Sign I ..Khmer Vowel Sign Ua
+ (0x017c6, 0x017c6,), # Khmer Sign Nikahit ..Khmer Sign Nikahit
+ (0x017c9, 0x017d3,), # Khmer Sign Muusikatoan ..Khmer Sign Bathamasat
+ (0x017dd, 0x017dd,), # Khmer Sign Atthacan ..Khmer Sign Atthacan
+ (0x0180b, 0x0180d,), # Mongolian Free Variation..Mongolian Free Variation
+ (0x01885, 0x01886,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x018a9, 0x018a9,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x01920, 0x01922,), # Limbu Vowel Sign A ..Limbu Vowel Sign U
+ (0x01927, 0x01928,), # Limbu Vowel Sign E ..Limbu Vowel Sign O
+ (0x01932, 0x01932,), # Limbu Small Letter Anusv..Limbu Small Letter Anusv
+ (0x01939, 0x0193b,), # Limbu Sign Mukphreng ..Limbu Sign Sa-i
+ (0x01a17, 0x01a18,), # Buginese Vowel Sign I ..Buginese Vowel Sign U
+ (0x01a1b, 0x01a1b,), # Buginese Vowel Sign Ae ..Buginese Vowel Sign Ae
+ (0x01a56, 0x01a56,), # Tai Tham Consonant Sign ..Tai Tham Consonant Sign
+ (0x01a58, 0x01a5e,), # Tai Tham Sign Mai Kang L..Tai Tham Consonant Sign
+ (0x01a60, 0x01a60,), # Tai Tham Sign Sakot ..Tai Tham Sign Sakot
+ (0x01a62, 0x01a62,), # Tai Tham Vowel Sign Mai ..Tai Tham Vowel Sign Mai
+ (0x01a65, 0x01a6c,), # Tai Tham Vowel Sign I ..Tai Tham Vowel Sign Oa B
+ (0x01a73, 0x01a7c,), # Tai Tham Vowel Sign Oa A..Tai Tham Sign Khuen-lue
+ (0x01a7f, 0x01a7f,), # Tai Tham Combining Crypt..Tai Tham Combining Crypt
+ (0x01ab0, 0x01abe,), # Combining Doubled Circum..Combining Parentheses Ov
+ (0x01b00, 0x01b03,), # Balinese Sign Ulu Ricem ..Balinese Sign Surang
+ (0x01b34, 0x01b34,), # Balinese Sign Rerekan ..Balinese Sign Rerekan
+ (0x01b36, 0x01b3a,), # Balinese Vowel Sign Ulu ..Balinese Vowel Sign Ra R
+ (0x01b3c, 0x01b3c,), # Balinese Vowel Sign La L..Balinese Vowel Sign La L
+ (0x01b42, 0x01b42,), # Balinese Vowel Sign Pepe..Balinese Vowel Sign Pepe
+ (0x01b6b, 0x01b73,), # Balinese Musical Symbol ..Balinese Musical Symbol
+ (0x01b80, 0x01b81,), # Sundanese Sign Panyecek ..Sundanese Sign Panglayar
+ (0x01ba2, 0x01ba5,), # Sundanese Consonant Sign..Sundanese Vowel Sign Pan
+ (0x01ba8, 0x01ba9,), # Sundanese Vowel Sign Pam..Sundanese Vowel Sign Pan
+ (0x01bab, 0x01bad,), # Sundanese Sign Virama ..Sundanese Consonant Sign
+ (0x01be6, 0x01be6,), # Batak Sign Tompi ..Batak Sign Tompi
+ (0x01be8, 0x01be9,), # Batak Vowel Sign Pakpak ..Batak Vowel Sign Ee
+ (0x01bed, 0x01bed,), # Batak Vowel Sign Karo O ..Batak Vowel Sign Karo O
+ (0x01bef, 0x01bf1,), # Batak Vowel Sign U For S..Batak Consonant Sign H
+ (0x01c2c, 0x01c33,), # Lepcha Vowel Sign E ..Lepcha Consonant Sign T
+ (0x01c36, 0x01c37,), # Lepcha Sign Ran ..Lepcha Sign Nukta
+ (0x01cd0, 0x01cd2,), # Vedic Tone Karshana ..Vedic Tone Prenkha
+ (0x01cd4, 0x01ce0,), # Vedic Sign Yajurvedic Mi..Vedic Tone Rigvedic Kash
+ (0x01ce2, 0x01ce8,), # Vedic Sign Visarga Svari..Vedic Sign Visarga Anuda
+ (0x01ced, 0x01ced,), # Vedic Sign Tiryak ..Vedic Sign Tiryak
+ (0x01cf4, 0x01cf4,), # Vedic Tone Candra Above ..Vedic Tone Candra Above
+ (0x01cf8, 0x01cf9,), # Vedic Tone Ring Above ..Vedic Tone Double Ring A
+ (0x01dc0, 0x01df9,), # Combining Dotted Grave A..Combining Wide Inverted
+ (0x01dfb, 0x01dff,), # Combining Deletion Mark ..Combining Right Arrowhea
+ (0x020d0, 0x020f0,), # Combining Left Harpoon A..Combining Asterisk Above
+ (0x02cef, 0x02cf1,), # Coptic Combining Ni Abov..Coptic Combining Spiritu
+ (0x02d7f, 0x02d7f,), # Tifinagh Consonant Joine..Tifinagh Consonant Joine
+ (0x02de0, 0x02dff,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0302a, 0x0302d,), # Ideographic Level Tone M..Ideographic Entering Ton
+ (0x03099, 0x0309a,), # Combining Katakana-hirag..Combining Katakana-hirag
+ (0x0a66f, 0x0a672,), # Combining Cyrillic Vzmet..Combining Cyrillic Thous
+ (0x0a674, 0x0a67d,), # Combining Cyrillic Lette..Combining Cyrillic Payer
+ (0x0a69e, 0x0a69f,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0a6f0, 0x0a6f1,), # Bamum Combining Mark Koq..Bamum Combining Mark Tuk
+ (0x0a802, 0x0a802,), # Syloti Nagri Sign Dvisva..Syloti Nagri Sign Dvisva
+ (0x0a806, 0x0a806,), # Syloti Nagri Sign Hasant..Syloti Nagri Sign Hasant
+ (0x0a80b, 0x0a80b,), # Syloti Nagri Sign Anusva..Syloti Nagri Sign Anusva
+ (0x0a825, 0x0a826,), # Syloti Nagri Vowel Sign ..Syloti Nagri Vowel Sign
+ (0x0a8c4, 0x0a8c5,), # Saurashtra Sign Virama ..Saurashtra Sign Candrabi
+ (0x0a8e0, 0x0a8f1,), # Combining Devanagari Dig..Combining Devanagari Sig
+ (0x0a8ff, 0x0a8ff,), # Devanagari Vowel Sign Ay..Devanagari Vowel Sign Ay
+ (0x0a926, 0x0a92d,), # Kayah Li Vowel Ue ..Kayah Li Tone Calya Plop
+ (0x0a947, 0x0a951,), # Rejang Vowel Sign I ..Rejang Consonant Sign R
+ (0x0a980, 0x0a982,), # Javanese Sign Panyangga ..Javanese Sign Layar
+ (0x0a9b3, 0x0a9b3,), # Javanese Sign Cecak Telu..Javanese Sign Cecak Telu
+ (0x0a9b6, 0x0a9b9,), # Javanese Vowel Sign Wulu..Javanese Vowel Sign Suku
+ (0x0a9bc, 0x0a9bd,), # Javanese Vowel Sign Pepe..Javanese Consonant Sign
+ (0x0a9e5, 0x0a9e5,), # Myanmar Sign Shan Saw ..Myanmar Sign Shan Saw
+ (0x0aa29, 0x0aa2e,), # Cham Vowel Sign Aa ..Cham Vowel Sign Oe
+ (0x0aa31, 0x0aa32,), # Cham Vowel Sign Au ..Cham Vowel Sign Ue
+ (0x0aa35, 0x0aa36,), # Cham Consonant Sign La ..Cham Consonant Sign Wa
+ (0x0aa43, 0x0aa43,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa4c, 0x0aa4c,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa7c, 0x0aa7c,), # Myanmar Sign Tai Laing T..Myanmar Sign Tai Laing T
+ (0x0aab0, 0x0aab0,), # Tai Viet Mai Kang ..Tai Viet Mai Kang
+ (0x0aab2, 0x0aab4,), # Tai Viet Vowel I ..Tai Viet Vowel U
+ (0x0aab7, 0x0aab8,), # Tai Viet Mai Khit ..Tai Viet Vowel Ia
+ (0x0aabe, 0x0aabf,), # Tai Viet Vowel Am ..Tai Viet Tone Mai Ek
+ (0x0aac1, 0x0aac1,), # Tai Viet Tone Mai Tho ..Tai Viet Tone Mai Tho
+ (0x0aaec, 0x0aaed,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0aaf6, 0x0aaf6,), # Meetei Mayek Virama ..Meetei Mayek Virama
+ (0x0abe5, 0x0abe5,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abe8, 0x0abe8,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abed, 0x0abed,), # Meetei Mayek Apun Iyek ..Meetei Mayek Apun Iyek
+ (0x0fb1e, 0x0fb1e,), # Hebrew Point Judeo-spani..Hebrew Point Judeo-spani
+ (0x0fe00, 0x0fe0f,), # Variation Selector-1 ..Variation Selector-16
+ (0x0fe20, 0x0fe2f,), # Combining Ligature Left ..Combining Cyrillic Titlo
+ (0x101fd, 0x101fd,), # Phaistos Disc Sign Combi..Phaistos Disc Sign Combi
+ (0x102e0, 0x102e0,), # Coptic Epact Thousands M..Coptic Epact Thousands M
+ (0x10376, 0x1037a,), # Combining Old Permic Let..Combining Old Permic Let
+ (0x10a01, 0x10a03,), # Kharoshthi Vowel Sign I ..Kharoshthi Vowel Sign Vo
+ (0x10a05, 0x10a06,), # Kharoshthi Vowel Sign E ..Kharoshthi Vowel Sign O
+ (0x10a0c, 0x10a0f,), # Kharoshthi Vowel Length ..Kharoshthi Sign Visarga
+ (0x10a38, 0x10a3a,), # Kharoshthi Sign Bar Abov..Kharoshthi Sign Dot Belo
+ (0x10a3f, 0x10a3f,), # Kharoshthi Virama ..Kharoshthi Virama
+ (0x10ae5, 0x10ae6,), # Manichaean Abbreviation ..Manichaean Abbreviation
+ (0x10d24, 0x10d27,), # Hanifi Rohingya Sign Har..Hanifi Rohingya Sign Tas
+ (0x10f46, 0x10f50,), # Sogdian Combining Dot Be..Sogdian Combining Stroke
+ (0x11001, 0x11001,), # Brahmi Sign Anusvara ..Brahmi Sign Anusvara
+ (0x11038, 0x11046,), # Brahmi Vowel Sign Aa ..Brahmi Virama
+ (0x1107f, 0x11081,), # Brahmi Number Joiner ..Kaithi Sign Anusvara
+ (0x110b3, 0x110b6,), # Kaithi Vowel Sign U ..Kaithi Vowel Sign Ai
+ (0x110b9, 0x110ba,), # Kaithi Sign Virama ..Kaithi Sign Nukta
+ (0x11100, 0x11102,), # Chakma Sign Candrabindu ..Chakma Sign Visarga
+ (0x11127, 0x1112b,), # Chakma Vowel Sign A ..Chakma Vowel Sign Uu
+ (0x1112d, 0x11134,), # Chakma Vowel Sign Ai ..Chakma Maayyaa
+ (0x11173, 0x11173,), # Mahajani Sign Nukta ..Mahajani Sign Nukta
+ (0x11180, 0x11181,), # Sharada Sign Candrabindu..Sharada Sign Anusvara
+ (0x111b6, 0x111be,), # Sharada Vowel Sign U ..Sharada Vowel Sign O
+ (0x111c9, 0x111cc,), # Sharada Sandhi Mark ..Sharada Extra Short Vowe
+ (0x1122f, 0x11231,), # Khojki Vowel Sign U ..Khojki Vowel Sign Ai
+ (0x11234, 0x11234,), # Khojki Sign Anusvara ..Khojki Sign Anusvara
+ (0x11236, 0x11237,), # Khojki Sign Nukta ..Khojki Sign Shadda
+ (0x1123e, 0x1123e,), # Khojki Sign Sukun ..Khojki Sign Sukun
+ (0x112df, 0x112df,), # Khudawadi Sign Anusvara ..Khudawadi Sign Anusvara
+ (0x112e3, 0x112ea,), # Khudawadi Vowel Sign U ..Khudawadi Sign Virama
+ (0x11300, 0x11301,), # Grantha Sign Combining A..Grantha Sign Candrabindu
+ (0x1133b, 0x1133c,), # Combining Bindu Below ..Grantha Sign Nukta
+ (0x11340, 0x11340,), # Grantha Vowel Sign Ii ..Grantha Vowel Sign Ii
+ (0x11366, 0x1136c,), # Combining Grantha Digit ..Combining Grantha Digit
+ (0x11370, 0x11374,), # Combining Grantha Letter..Combining Grantha Letter
+ (0x11438, 0x1143f,), # Newa Vowel Sign U ..Newa Vowel Sign Ai
+ (0x11442, 0x11444,), # Newa Sign Virama ..Newa Sign Anusvara
+ (0x11446, 0x11446,), # Newa Sign Nukta ..Newa Sign Nukta
+ (0x1145e, 0x1145e,), # Newa Sandhi Mark ..Newa Sandhi Mark
+ (0x114b3, 0x114b8,), # Tirhuta Vowel Sign U ..Tirhuta Vowel Sign Vocal
+ (0x114ba, 0x114ba,), # Tirhuta Vowel Sign Short..Tirhuta Vowel Sign Short
+ (0x114bf, 0x114c0,), # Tirhuta Sign Candrabindu..Tirhuta Sign Anusvara
+ (0x114c2, 0x114c3,), # Tirhuta Sign Virama ..Tirhuta Sign Nukta
+ (0x115b2, 0x115b5,), # Siddham Vowel Sign U ..Siddham Vowel Sign Vocal
+ (0x115bc, 0x115bd,), # Siddham Sign Candrabindu..Siddham Sign Anusvara
+ (0x115bf, 0x115c0,), # Siddham Sign Virama ..Siddham Sign Nukta
+ (0x115dc, 0x115dd,), # Siddham Vowel Sign Alter..Siddham Vowel Sign Alter
+ (0x11633, 0x1163a,), # Modi Vowel Sign U ..Modi Vowel Sign Ai
+ (0x1163d, 0x1163d,), # Modi Sign Anusvara ..Modi Sign Anusvara
+ (0x1163f, 0x11640,), # Modi Sign Virama ..Modi Sign Ardhacandra
+ (0x116ab, 0x116ab,), # Takri Sign Anusvara ..Takri Sign Anusvara
+ (0x116ad, 0x116ad,), # Takri Vowel Sign Aa ..Takri Vowel Sign Aa
+ (0x116b0, 0x116b5,), # Takri Vowel Sign U ..Takri Vowel Sign Au
+ (0x116b7, 0x116b7,), # Takri Sign Nukta ..Takri Sign Nukta
+ (0x1171d, 0x1171f,), # Ahom Consonant Sign Medi..Ahom Consonant Sign Medi
+ (0x11722, 0x11725,), # Ahom Vowel Sign I ..Ahom Vowel Sign Uu
+ (0x11727, 0x1172b,), # Ahom Vowel Sign Aw ..Ahom Sign Killer
+ (0x1182f, 0x11837,), # Dogra Vowel Sign U ..Dogra Sign Anusvara
+ (0x11839, 0x1183a,), # Dogra Sign Virama ..Dogra Sign Nukta
+ (0x119d4, 0x119d7,), # Nandinagari Vowel Sign U..Nandinagari Vowel Sign V
+ (0x119da, 0x119db,), # Nandinagari Vowel Sign E..Nandinagari Vowel Sign A
+ (0x119e0, 0x119e0,), # Nandinagari Sign Virama ..Nandinagari Sign Virama
+ (0x11a01, 0x11a0a,), # Zanabazar Square Vowel S..Zanabazar Square Vowel L
+ (0x11a33, 0x11a38,), # Zanabazar Square Final C..Zanabazar Square Sign An
+ (0x11a3b, 0x11a3e,), # Zanabazar Square Cluster..Zanabazar Square Cluster
+ (0x11a47, 0x11a47,), # Zanabazar Square Subjoin..Zanabazar Square Subjoin
+ (0x11a51, 0x11a56,), # Soyombo Vowel Sign I ..Soyombo Vowel Sign Oe
+ (0x11a59, 0x11a5b,), # Soyombo Vowel Sign Vocal..Soyombo Vowel Length Mar
+ (0x11a8a, 0x11a96,), # Soyombo Final Consonant ..Soyombo Sign Anusvara
+ (0x11a98, 0x11a99,), # Soyombo Gemination Mark ..Soyombo Subjoiner
+ (0x11c30, 0x11c36,), # Bhaiksuki Vowel Sign I ..Bhaiksuki Vowel Sign Voc
+ (0x11c38, 0x11c3d,), # Bhaiksuki Vowel Sign E ..Bhaiksuki Sign Anusvara
+ (0x11c3f, 0x11c3f,), # Bhaiksuki Sign Virama ..Bhaiksuki Sign Virama
+ (0x11c92, 0x11ca7,), # Marchen Subjoined Letter..Marchen Subjoined Letter
+ (0x11caa, 0x11cb0,), # Marchen Subjoined Letter..Marchen Vowel Sign Aa
+ (0x11cb2, 0x11cb3,), # Marchen Vowel Sign U ..Marchen Vowel Sign E
+ (0x11cb5, 0x11cb6,), # Marchen Sign Anusvara ..Marchen Sign Candrabindu
+ (0x11d31, 0x11d36,), # Masaram Gondi Vowel Sign..Masaram Gondi Vowel Sign
+ (0x11d3a, 0x11d3a,), # Masaram Gondi Vowel Sign..Masaram Gondi Vowel Sign
+ (0x11d3c, 0x11d3d,), # Masaram Gondi Vowel Sign..Masaram Gondi Vowel Sign
+ (0x11d3f, 0x11d45,), # Masaram Gondi Vowel Sign..Masaram Gondi Virama
+ (0x11d47, 0x11d47,), # Masaram Gondi Ra-kara ..Masaram Gondi Ra-kara
+ (0x11d90, 0x11d91,), # Gunjala Gondi Vowel Sign..Gunjala Gondi Vowel Sign
+ (0x11d95, 0x11d95,), # Gunjala Gondi Sign Anusv..Gunjala Gondi Sign Anusv
+ (0x11d97, 0x11d97,), # Gunjala Gondi Virama ..Gunjala Gondi Virama
+ (0x11ef3, 0x11ef4,), # Makasar Vowel Sign I ..Makasar Vowel Sign U
+ (0x16af0, 0x16af4,), # Bassa Vah Combining High..Bassa Vah Combining High
+ (0x16b30, 0x16b36,), # Pahawh Hmong Mark Cim Tu..Pahawh Hmong Mark Cim Ta
+ (0x16f4f, 0x16f4f,), # Miao Sign Consonant Modi..Miao Sign Consonant Modi
+ (0x16f8f, 0x16f92,), # Miao Tone Right ..Miao Tone Below
+ (0x1bc9d, 0x1bc9e,), # Duployan Thick Letter Se..Duployan Double Mark
+ (0x1d167, 0x1d169,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d17b, 0x1d182,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d185, 0x1d18b,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d1aa, 0x1d1ad,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d242, 0x1d244,), # Combining Greek Musical ..Combining Greek Musical
+ (0x1da00, 0x1da36,), # Signwriting Head Rim ..Signwriting Air Sucking
+ (0x1da3b, 0x1da6c,), # Signwriting Mouth Closed..Signwriting Excitement
+ (0x1da75, 0x1da75,), # Signwriting Upper Body T..Signwriting Upper Body T
+ (0x1da84, 0x1da84,), # Signwriting Location Hea..Signwriting Location Hea
+ (0x1da9b, 0x1da9f,), # Signwriting Fill Modifie..Signwriting Fill Modifie
+ (0x1daa1, 0x1daaf,), # Signwriting Rotation Mod..Signwriting Rotation Mod
+ (0x1e000, 0x1e006,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e008, 0x1e018,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e01b, 0x1e021,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e023, 0x1e024,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e026, 0x1e02a,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e130, 0x1e136,), # Nyiakeng Puachue Hmong T..Nyiakeng Puachue Hmong T
+ (0x1e2ec, 0x1e2ef,), # Wancho Tone Tup ..Wancho Tone Koini
+ (0x1e8d0, 0x1e8d6,), # Mende Kikakui Combining ..Mende Kikakui Combining
+ (0x1e944, 0x1e94a,), # Adlam Alif Lengthener ..Adlam Nukta
+ (0xe0100, 0xe01ef,), # Variation Selector-17 ..Variation Selector-256
+ ),
+ '12.1.0': (
+ # Source: DerivedGeneralCategory-12.1.0.txt
+ # Date: 2019-03-10, 10:53:08 GMT
+ #
+ (0x00300, 0x0036f,), # Combining Grave Accent ..Combining Latin Small Le
+ (0x00483, 0x00489,), # Combining Cyrillic Titlo..Combining Cyrillic Milli
+ (0x00591, 0x005bd,), # Hebrew Accent Etnahta ..Hebrew Point Meteg
+ (0x005bf, 0x005bf,), # Hebrew Point Rafe ..Hebrew Point Rafe
+ (0x005c1, 0x005c2,), # Hebrew Point Shin Dot ..Hebrew Point Sin Dot
+ (0x005c4, 0x005c5,), # Hebrew Mark Upper Dot ..Hebrew Mark Lower Dot
+ (0x005c7, 0x005c7,), # Hebrew Point Qamats Qata..Hebrew Point Qamats Qata
+ (0x00610, 0x0061a,), # Arabic Sign Sallallahou ..Arabic Small Kasra
+ (0x0064b, 0x0065f,), # Arabic Fathatan ..Arabic Wavy Hamza Below
+ (0x00670, 0x00670,), # Arabic Letter Superscrip..Arabic Letter Superscrip
+ (0x006d6, 0x006dc,), # Arabic Small High Ligatu..Arabic Small High Seen
+ (0x006df, 0x006e4,), # Arabic Small High Rounde..Arabic Small High Madda
+ (0x006e7, 0x006e8,), # Arabic Small High Yeh ..Arabic Small High Noon
+ (0x006ea, 0x006ed,), # Arabic Empty Centre Low ..Arabic Small Low Meem
+ (0x00711, 0x00711,), # Syriac Letter Superscrip..Syriac Letter Superscrip
+ (0x00730, 0x0074a,), # Syriac Pthaha Above ..Syriac Barrekh
+ (0x007a6, 0x007b0,), # Thaana Abafili ..Thaana Sukun
+ (0x007eb, 0x007f3,), # Nko Combining Short High..Nko Combining Double Dot
+ (0x007fd, 0x007fd,), # Nko Dantayalan ..Nko Dantayalan
+ (0x00816, 0x00819,), # Samaritan Mark In ..Samaritan Mark Dagesh
+ (0x0081b, 0x00823,), # Samaritan Mark Epentheti..Samaritan Vowel Sign A
+ (0x00825, 0x00827,), # Samaritan Vowel Sign Sho..Samaritan Vowel Sign U
+ (0x00829, 0x0082d,), # Samaritan Vowel Sign Lon..Samaritan Mark Nequdaa
+ (0x00859, 0x0085b,), # Mandaic Affrication Mark..Mandaic Gemination Mark
+ (0x008d3, 0x008e1,), # Arabic Small Low Waw ..Arabic Small High Sign S
+ (0x008e3, 0x00902,), # Arabic Turned Damma Belo..Devanagari Sign Anusvara
+ (0x0093a, 0x0093a,), # Devanagari Vowel Sign Oe..Devanagari Vowel Sign Oe
+ (0x0093c, 0x0093c,), # Devanagari Sign Nukta ..Devanagari Sign Nukta
+ (0x00941, 0x00948,), # Devanagari Vowel Sign U ..Devanagari Vowel Sign Ai
+ (0x0094d, 0x0094d,), # Devanagari Sign Virama ..Devanagari Sign Virama
+ (0x00951, 0x00957,), # Devanagari Stress Sign U..Devanagari Vowel Sign Uu
+ (0x00962, 0x00963,), # Devanagari Vowel Sign Vo..Devanagari Vowel Sign Vo
+ (0x00981, 0x00981,), # Bengali Sign Candrabindu..Bengali Sign Candrabindu
+ (0x009bc, 0x009bc,), # Bengali Sign Nukta ..Bengali Sign Nukta
+ (0x009c1, 0x009c4,), # Bengali Vowel Sign U ..Bengali Vowel Sign Vocal
+ (0x009cd, 0x009cd,), # Bengali Sign Virama ..Bengali Sign Virama
+ (0x009e2, 0x009e3,), # Bengali Vowel Sign Vocal..Bengali Vowel Sign Vocal
+ (0x009fe, 0x009fe,), # Bengali Sandhi Mark ..Bengali Sandhi Mark
+ (0x00a01, 0x00a02,), # Gurmukhi Sign Adak Bindi..Gurmukhi Sign Bindi
+ (0x00a3c, 0x00a3c,), # Gurmukhi Sign Nukta ..Gurmukhi Sign Nukta
+ (0x00a41, 0x00a42,), # Gurmukhi Vowel Sign U ..Gurmukhi Vowel Sign Uu
+ (0x00a47, 0x00a48,), # Gurmukhi Vowel Sign Ee ..Gurmukhi Vowel Sign Ai
+ (0x00a4b, 0x00a4d,), # Gurmukhi Vowel Sign Oo ..Gurmukhi Sign Virama
+ (0x00a51, 0x00a51,), # Gurmukhi Sign Udaat ..Gurmukhi Sign Udaat
+ (0x00a70, 0x00a71,), # Gurmukhi Tippi ..Gurmukhi Addak
+ (0x00a75, 0x00a75,), # Gurmukhi Sign Yakash ..Gurmukhi Sign Yakash
+ (0x00a81, 0x00a82,), # Gujarati Sign Candrabind..Gujarati Sign Anusvara
+ (0x00abc, 0x00abc,), # Gujarati Sign Nukta ..Gujarati Sign Nukta
+ (0x00ac1, 0x00ac5,), # Gujarati Vowel Sign U ..Gujarati Vowel Sign Cand
+ (0x00ac7, 0x00ac8,), # Gujarati Vowel Sign E ..Gujarati Vowel Sign Ai
+ (0x00acd, 0x00acd,), # Gujarati Sign Virama ..Gujarati Sign Virama
+ (0x00ae2, 0x00ae3,), # Gujarati Vowel Sign Voca..Gujarati Vowel Sign Voca
+ (0x00afa, 0x00aff,), # Gujarati Sign Sukun ..Gujarati Sign Two-circle
+ (0x00b01, 0x00b01,), # Oriya Sign Candrabindu ..Oriya Sign Candrabindu
+ (0x00b3c, 0x00b3c,), # Oriya Sign Nukta ..Oriya Sign Nukta
+ (0x00b3f, 0x00b3f,), # Oriya Vowel Sign I ..Oriya Vowel Sign I
+ (0x00b41, 0x00b44,), # Oriya Vowel Sign U ..Oriya Vowel Sign Vocalic
+ (0x00b4d, 0x00b4d,), # Oriya Sign Virama ..Oriya Sign Virama
+ (0x00b56, 0x00b56,), # Oriya Ai Length Mark ..Oriya Ai Length Mark
+ (0x00b62, 0x00b63,), # Oriya Vowel Sign Vocalic..Oriya Vowel Sign Vocalic
+ (0x00b82, 0x00b82,), # Tamil Sign Anusvara ..Tamil Sign Anusvara
+ (0x00bc0, 0x00bc0,), # Tamil Vowel Sign Ii ..Tamil Vowel Sign Ii
+ (0x00bcd, 0x00bcd,), # Tamil Sign Virama ..Tamil Sign Virama
+ (0x00c00, 0x00c00,), # Telugu Sign Combining Ca..Telugu Sign Combining Ca
+ (0x00c04, 0x00c04,), # Telugu Sign Combining An..Telugu Sign Combining An
+ (0x00c3e, 0x00c40,), # Telugu Vowel Sign Aa ..Telugu Vowel Sign Ii
+ (0x00c46, 0x00c48,), # Telugu Vowel Sign E ..Telugu Vowel Sign Ai
+ (0x00c4a, 0x00c4d,), # Telugu Vowel Sign O ..Telugu Sign Virama
+ (0x00c55, 0x00c56,), # Telugu Length Mark ..Telugu Ai Length Mark
+ (0x00c62, 0x00c63,), # Telugu Vowel Sign Vocali..Telugu Vowel Sign Vocali
+ (0x00c81, 0x00c81,), # Kannada Sign Candrabindu..Kannada Sign Candrabindu
+ (0x00cbc, 0x00cbc,), # Kannada Sign Nukta ..Kannada Sign Nukta
+ (0x00cbf, 0x00cbf,), # Kannada Vowel Sign I ..Kannada Vowel Sign I
+ (0x00cc6, 0x00cc6,), # Kannada Vowel Sign E ..Kannada Vowel Sign E
+ (0x00ccc, 0x00ccd,), # Kannada Vowel Sign Au ..Kannada Sign Virama
+ (0x00ce2, 0x00ce3,), # Kannada Vowel Sign Vocal..Kannada Vowel Sign Vocal
+ (0x00d00, 0x00d01,), # Malayalam Sign Combining..Malayalam Sign Candrabin
+ (0x00d3b, 0x00d3c,), # Malayalam Sign Vertical ..Malayalam Sign Circular
+ (0x00d41, 0x00d44,), # Malayalam Vowel Sign U ..Malayalam Vowel Sign Voc
+ (0x00d4d, 0x00d4d,), # Malayalam Sign Virama ..Malayalam Sign Virama
+ (0x00d62, 0x00d63,), # Malayalam Vowel Sign Voc..Malayalam Vowel Sign Voc
+ (0x00dca, 0x00dca,), # Sinhala Sign Al-lakuna ..Sinhala Sign Al-lakuna
+ (0x00dd2, 0x00dd4,), # Sinhala Vowel Sign Ketti..Sinhala Vowel Sign Ketti
+ (0x00dd6, 0x00dd6,), # Sinhala Vowel Sign Diga ..Sinhala Vowel Sign Diga
+ (0x00e31, 0x00e31,), # Thai Character Mai Han-a..Thai Character Mai Han-a
+ (0x00e34, 0x00e3a,), # Thai Character Sara I ..Thai Character Phinthu
+ (0x00e47, 0x00e4e,), # Thai Character Maitaikhu..Thai Character Yamakkan
+ (0x00eb1, 0x00eb1,), # Lao Vowel Sign Mai Kan ..Lao Vowel Sign Mai Kan
+ (0x00eb4, 0x00ebc,), # Lao Vowel Sign I ..Lao Semivowel Sign Lo
+ (0x00ec8, 0x00ecd,), # Lao Tone Mai Ek ..Lao Niggahita
+ (0x00f18, 0x00f19,), # Tibetan Astrological Sig..Tibetan Astrological Sig
+ (0x00f35, 0x00f35,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f37, 0x00f37,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f39, 0x00f39,), # Tibetan Mark Tsa -phru ..Tibetan Mark Tsa -phru
+ (0x00f71, 0x00f7e,), # Tibetan Vowel Sign Aa ..Tibetan Sign Rjes Su Nga
+ (0x00f80, 0x00f84,), # Tibetan Vowel Sign Rever..Tibetan Mark Halanta
+ (0x00f86, 0x00f87,), # Tibetan Sign Lci Rtags ..Tibetan Sign Yang Rtags
+ (0x00f8d, 0x00f97,), # Tibetan Subjoined Sign L..Tibetan Subjoined Letter
+ (0x00f99, 0x00fbc,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
+ (0x00fc6, 0x00fc6,), # Tibetan Symbol Padma Gda..Tibetan Symbol Padma Gda
+ (0x0102d, 0x01030,), # Myanmar Vowel Sign I ..Myanmar Vowel Sign Uu
+ (0x01032, 0x01037,), # Myanmar Vowel Sign Ai ..Myanmar Sign Dot Below
+ (0x01039, 0x0103a,), # Myanmar Sign Virama ..Myanmar Sign Asat
+ (0x0103d, 0x0103e,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01058, 0x01059,), # Myanmar Vowel Sign Vocal..Myanmar Vowel Sign Vocal
+ (0x0105e, 0x01060,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01071, 0x01074,), # Myanmar Vowel Sign Geba ..Myanmar Vowel Sign Kayah
+ (0x01082, 0x01082,), # Myanmar Consonant Sign S..Myanmar Consonant Sign S
+ (0x01085, 0x01086,), # Myanmar Vowel Sign Shan ..Myanmar Vowel Sign Shan
+ (0x0108d, 0x0108d,), # Myanmar Sign Shan Counci..Myanmar Sign Shan Counci
+ (0x0109d, 0x0109d,), # Myanmar Vowel Sign Aiton..Myanmar Vowel Sign Aiton
+ (0x0135d, 0x0135f,), # Ethiopic Combining Gemin..Ethiopic Combining Gemin
+ (0x01712, 0x01714,), # Tagalog Vowel Sign I ..Tagalog Sign Virama
+ (0x01732, 0x01734,), # Hanunoo Vowel Sign I ..Hanunoo Sign Pamudpod
+ (0x01752, 0x01753,), # Buhid Vowel Sign I ..Buhid Vowel Sign U
+ (0x01772, 0x01773,), # Tagbanwa Vowel Sign I ..Tagbanwa Vowel Sign U
+ (0x017b4, 0x017b5,), # Khmer Vowel Inherent Aq ..Khmer Vowel Inherent Aa
+ (0x017b7, 0x017bd,), # Khmer Vowel Sign I ..Khmer Vowel Sign Ua
+ (0x017c6, 0x017c6,), # Khmer Sign Nikahit ..Khmer Sign Nikahit
+ (0x017c9, 0x017d3,), # Khmer Sign Muusikatoan ..Khmer Sign Bathamasat
+ (0x017dd, 0x017dd,), # Khmer Sign Atthacan ..Khmer Sign Atthacan
+ (0x0180b, 0x0180d,), # Mongolian Free Variation..Mongolian Free Variation
+ (0x01885, 0x01886,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x018a9, 0x018a9,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x01920, 0x01922,), # Limbu Vowel Sign A ..Limbu Vowel Sign U
+ (0x01927, 0x01928,), # Limbu Vowel Sign E ..Limbu Vowel Sign O
+ (0x01932, 0x01932,), # Limbu Small Letter Anusv..Limbu Small Letter Anusv
+ (0x01939, 0x0193b,), # Limbu Sign Mukphreng ..Limbu Sign Sa-i
+ (0x01a17, 0x01a18,), # Buginese Vowel Sign I ..Buginese Vowel Sign U
+ (0x01a1b, 0x01a1b,), # Buginese Vowel Sign Ae ..Buginese Vowel Sign Ae
+ (0x01a56, 0x01a56,), # Tai Tham Consonant Sign ..Tai Tham Consonant Sign
+ (0x01a58, 0x01a5e,), # Tai Tham Sign Mai Kang L..Tai Tham Consonant Sign
+ (0x01a60, 0x01a60,), # Tai Tham Sign Sakot ..Tai Tham Sign Sakot
+ (0x01a62, 0x01a62,), # Tai Tham Vowel Sign Mai ..Tai Tham Vowel Sign Mai
+ (0x01a65, 0x01a6c,), # Tai Tham Vowel Sign I ..Tai Tham Vowel Sign Oa B
+ (0x01a73, 0x01a7c,), # Tai Tham Vowel Sign Oa A..Tai Tham Sign Khuen-lue
+ (0x01a7f, 0x01a7f,), # Tai Tham Combining Crypt..Tai Tham Combining Crypt
+ (0x01ab0, 0x01abe,), # Combining Doubled Circum..Combining Parentheses Ov
+ (0x01b00, 0x01b03,), # Balinese Sign Ulu Ricem ..Balinese Sign Surang
+ (0x01b34, 0x01b34,), # Balinese Sign Rerekan ..Balinese Sign Rerekan
+ (0x01b36, 0x01b3a,), # Balinese Vowel Sign Ulu ..Balinese Vowel Sign Ra R
+ (0x01b3c, 0x01b3c,), # Balinese Vowel Sign La L..Balinese Vowel Sign La L
+ (0x01b42, 0x01b42,), # Balinese Vowel Sign Pepe..Balinese Vowel Sign Pepe
+ (0x01b6b, 0x01b73,), # Balinese Musical Symbol ..Balinese Musical Symbol
+ (0x01b80, 0x01b81,), # Sundanese Sign Panyecek ..Sundanese Sign Panglayar
+ (0x01ba2, 0x01ba5,), # Sundanese Consonant Sign..Sundanese Vowel Sign Pan
+ (0x01ba8, 0x01ba9,), # Sundanese Vowel Sign Pam..Sundanese Vowel Sign Pan
+ (0x01bab, 0x01bad,), # Sundanese Sign Virama ..Sundanese Consonant Sign
+ (0x01be6, 0x01be6,), # Batak Sign Tompi ..Batak Sign Tompi
+ (0x01be8, 0x01be9,), # Batak Vowel Sign Pakpak ..Batak Vowel Sign Ee
+ (0x01bed, 0x01bed,), # Batak Vowel Sign Karo O ..Batak Vowel Sign Karo O
+ (0x01bef, 0x01bf1,), # Batak Vowel Sign U For S..Batak Consonant Sign H
+ (0x01c2c, 0x01c33,), # Lepcha Vowel Sign E ..Lepcha Consonant Sign T
+ (0x01c36, 0x01c37,), # Lepcha Sign Ran ..Lepcha Sign Nukta
+ (0x01cd0, 0x01cd2,), # Vedic Tone Karshana ..Vedic Tone Prenkha
+ (0x01cd4, 0x01ce0,), # Vedic Sign Yajurvedic Mi..Vedic Tone Rigvedic Kash
+ (0x01ce2, 0x01ce8,), # Vedic Sign Visarga Svari..Vedic Sign Visarga Anuda
+ (0x01ced, 0x01ced,), # Vedic Sign Tiryak ..Vedic Sign Tiryak
+ (0x01cf4, 0x01cf4,), # Vedic Tone Candra Above ..Vedic Tone Candra Above
+ (0x01cf8, 0x01cf9,), # Vedic Tone Ring Above ..Vedic Tone Double Ring A
+ (0x01dc0, 0x01df9,), # Combining Dotted Grave A..Combining Wide Inverted
+ (0x01dfb, 0x01dff,), # Combining Deletion Mark ..Combining Right Arrowhea
+ (0x020d0, 0x020f0,), # Combining Left Harpoon A..Combining Asterisk Above
+ (0x02cef, 0x02cf1,), # Coptic Combining Ni Abov..Coptic Combining Spiritu
+ (0x02d7f, 0x02d7f,), # Tifinagh Consonant Joine..Tifinagh Consonant Joine
+ (0x02de0, 0x02dff,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0302a, 0x0302d,), # Ideographic Level Tone M..Ideographic Entering Ton
+ (0x03099, 0x0309a,), # Combining Katakana-hirag..Combining Katakana-hirag
+ (0x0a66f, 0x0a672,), # Combining Cyrillic Vzmet..Combining Cyrillic Thous
+ (0x0a674, 0x0a67d,), # Combining Cyrillic Lette..Combining Cyrillic Payer
+ (0x0a69e, 0x0a69f,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0a6f0, 0x0a6f1,), # Bamum Combining Mark Koq..Bamum Combining Mark Tuk
+ (0x0a802, 0x0a802,), # Syloti Nagri Sign Dvisva..Syloti Nagri Sign Dvisva
+ (0x0a806, 0x0a806,), # Syloti Nagri Sign Hasant..Syloti Nagri Sign Hasant
+ (0x0a80b, 0x0a80b,), # Syloti Nagri Sign Anusva..Syloti Nagri Sign Anusva
+ (0x0a825, 0x0a826,), # Syloti Nagri Vowel Sign ..Syloti Nagri Vowel Sign
+ (0x0a8c4, 0x0a8c5,), # Saurashtra Sign Virama ..Saurashtra Sign Candrabi
+ (0x0a8e0, 0x0a8f1,), # Combining Devanagari Dig..Combining Devanagari Sig
+ (0x0a8ff, 0x0a8ff,), # Devanagari Vowel Sign Ay..Devanagari Vowel Sign Ay
+ (0x0a926, 0x0a92d,), # Kayah Li Vowel Ue ..Kayah Li Tone Calya Plop
+ (0x0a947, 0x0a951,), # Rejang Vowel Sign I ..Rejang Consonant Sign R
+ (0x0a980, 0x0a982,), # Javanese Sign Panyangga ..Javanese Sign Layar
+ (0x0a9b3, 0x0a9b3,), # Javanese Sign Cecak Telu..Javanese Sign Cecak Telu
+ (0x0a9b6, 0x0a9b9,), # Javanese Vowel Sign Wulu..Javanese Vowel Sign Suku
+ (0x0a9bc, 0x0a9bd,), # Javanese Vowel Sign Pepe..Javanese Consonant Sign
+ (0x0a9e5, 0x0a9e5,), # Myanmar Sign Shan Saw ..Myanmar Sign Shan Saw
+ (0x0aa29, 0x0aa2e,), # Cham Vowel Sign Aa ..Cham Vowel Sign Oe
+ (0x0aa31, 0x0aa32,), # Cham Vowel Sign Au ..Cham Vowel Sign Ue
+ (0x0aa35, 0x0aa36,), # Cham Consonant Sign La ..Cham Consonant Sign Wa
+ (0x0aa43, 0x0aa43,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa4c, 0x0aa4c,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa7c, 0x0aa7c,), # Myanmar Sign Tai Laing T..Myanmar Sign Tai Laing T
+ (0x0aab0, 0x0aab0,), # Tai Viet Mai Kang ..Tai Viet Mai Kang
+ (0x0aab2, 0x0aab4,), # Tai Viet Vowel I ..Tai Viet Vowel U
+ (0x0aab7, 0x0aab8,), # Tai Viet Mai Khit ..Tai Viet Vowel Ia
+ (0x0aabe, 0x0aabf,), # Tai Viet Vowel Am ..Tai Viet Tone Mai Ek
+ (0x0aac1, 0x0aac1,), # Tai Viet Tone Mai Tho ..Tai Viet Tone Mai Tho
+ (0x0aaec, 0x0aaed,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0aaf6, 0x0aaf6,), # Meetei Mayek Virama ..Meetei Mayek Virama
+ (0x0abe5, 0x0abe5,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abe8, 0x0abe8,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abed, 0x0abed,), # Meetei Mayek Apun Iyek ..Meetei Mayek Apun Iyek
+ (0x0fb1e, 0x0fb1e,), # Hebrew Point Judeo-spani..Hebrew Point Judeo-spani
+ (0x0fe00, 0x0fe0f,), # Variation Selector-1 ..Variation Selector-16
+ (0x0fe20, 0x0fe2f,), # Combining Ligature Left ..Combining Cyrillic Titlo
+ (0x101fd, 0x101fd,), # Phaistos Disc Sign Combi..Phaistos Disc Sign Combi
+ (0x102e0, 0x102e0,), # Coptic Epact Thousands M..Coptic Epact Thousands M
+ (0x10376, 0x1037a,), # Combining Old Permic Let..Combining Old Permic Let
+ (0x10a01, 0x10a03,), # Kharoshthi Vowel Sign I ..Kharoshthi Vowel Sign Vo
+ (0x10a05, 0x10a06,), # Kharoshthi Vowel Sign E ..Kharoshthi Vowel Sign O
+ (0x10a0c, 0x10a0f,), # Kharoshthi Vowel Length ..Kharoshthi Sign Visarga
+ (0x10a38, 0x10a3a,), # Kharoshthi Sign Bar Abov..Kharoshthi Sign Dot Belo
+ (0x10a3f, 0x10a3f,), # Kharoshthi Virama ..Kharoshthi Virama
+ (0x10ae5, 0x10ae6,), # Manichaean Abbreviation ..Manichaean Abbreviation
+ (0x10d24, 0x10d27,), # Hanifi Rohingya Sign Har..Hanifi Rohingya Sign Tas
+ (0x10f46, 0x10f50,), # Sogdian Combining Dot Be..Sogdian Combining Stroke
+ (0x11001, 0x11001,), # Brahmi Sign Anusvara ..Brahmi Sign Anusvara
+ (0x11038, 0x11046,), # Brahmi Vowel Sign Aa ..Brahmi Virama
+ (0x1107f, 0x11081,), # Brahmi Number Joiner ..Kaithi Sign Anusvara
+ (0x110b3, 0x110b6,), # Kaithi Vowel Sign U ..Kaithi Vowel Sign Ai
+ (0x110b9, 0x110ba,), # Kaithi Sign Virama ..Kaithi Sign Nukta
+ (0x11100, 0x11102,), # Chakma Sign Candrabindu ..Chakma Sign Visarga
+ (0x11127, 0x1112b,), # Chakma Vowel Sign A ..Chakma Vowel Sign Uu
+ (0x1112d, 0x11134,), # Chakma Vowel Sign Ai ..Chakma Maayyaa
+ (0x11173, 0x11173,), # Mahajani Sign Nukta ..Mahajani Sign Nukta
+ (0x11180, 0x11181,), # Sharada Sign Candrabindu..Sharada Sign Anusvara
+ (0x111b6, 0x111be,), # Sharada Vowel Sign U ..Sharada Vowel Sign O
+ (0x111c9, 0x111cc,), # Sharada Sandhi Mark ..Sharada Extra Short Vowe
+ (0x1122f, 0x11231,), # Khojki Vowel Sign U ..Khojki Vowel Sign Ai
+ (0x11234, 0x11234,), # Khojki Sign Anusvara ..Khojki Sign Anusvara
+ (0x11236, 0x11237,), # Khojki Sign Nukta ..Khojki Sign Shadda
+ (0x1123e, 0x1123e,), # Khojki Sign Sukun ..Khojki Sign Sukun
+ (0x112df, 0x112df,), # Khudawadi Sign Anusvara ..Khudawadi Sign Anusvara
+ (0x112e3, 0x112ea,), # Khudawadi Vowel Sign U ..Khudawadi Sign Virama
+ (0x11300, 0x11301,), # Grantha Sign Combining A..Grantha Sign Candrabindu
+ (0x1133b, 0x1133c,), # Combining Bindu Below ..Grantha Sign Nukta
+ (0x11340, 0x11340,), # Grantha Vowel Sign Ii ..Grantha Vowel Sign Ii
+ (0x11366, 0x1136c,), # Combining Grantha Digit ..Combining Grantha Digit
+ (0x11370, 0x11374,), # Combining Grantha Letter..Combining Grantha Letter
+ (0x11438, 0x1143f,), # Newa Vowel Sign U ..Newa Vowel Sign Ai
+ (0x11442, 0x11444,), # Newa Sign Virama ..Newa Sign Anusvara
+ (0x11446, 0x11446,), # Newa Sign Nukta ..Newa Sign Nukta
+ (0x1145e, 0x1145e,), # Newa Sandhi Mark ..Newa Sandhi Mark
+ (0x114b3, 0x114b8,), # Tirhuta Vowel Sign U ..Tirhuta Vowel Sign Vocal
+ (0x114ba, 0x114ba,), # Tirhuta Vowel Sign Short..Tirhuta Vowel Sign Short
+ (0x114bf, 0x114c0,), # Tirhuta Sign Candrabindu..Tirhuta Sign Anusvara
+ (0x114c2, 0x114c3,), # Tirhuta Sign Virama ..Tirhuta Sign Nukta
+ (0x115b2, 0x115b5,), # Siddham Vowel Sign U ..Siddham Vowel Sign Vocal
+ (0x115bc, 0x115bd,), # Siddham Sign Candrabindu..Siddham Sign Anusvara
+ (0x115bf, 0x115c0,), # Siddham Sign Virama ..Siddham Sign Nukta
+ (0x115dc, 0x115dd,), # Siddham Vowel Sign Alter..Siddham Vowel Sign Alter
+ (0x11633, 0x1163a,), # Modi Vowel Sign U ..Modi Vowel Sign Ai
+ (0x1163d, 0x1163d,), # Modi Sign Anusvara ..Modi Sign Anusvara
+ (0x1163f, 0x11640,), # Modi Sign Virama ..Modi Sign Ardhacandra
+ (0x116ab, 0x116ab,), # Takri Sign Anusvara ..Takri Sign Anusvara
+ (0x116ad, 0x116ad,), # Takri Vowel Sign Aa ..Takri Vowel Sign Aa
+ (0x116b0, 0x116b5,), # Takri Vowel Sign U ..Takri Vowel Sign Au
+ (0x116b7, 0x116b7,), # Takri Sign Nukta ..Takri Sign Nukta
+ (0x1171d, 0x1171f,), # Ahom Consonant Sign Medi..Ahom Consonant Sign Medi
+ (0x11722, 0x11725,), # Ahom Vowel Sign I ..Ahom Vowel Sign Uu
+ (0x11727, 0x1172b,), # Ahom Vowel Sign Aw ..Ahom Sign Killer
+ (0x1182f, 0x11837,), # Dogra Vowel Sign U ..Dogra Sign Anusvara
+ (0x11839, 0x1183a,), # Dogra Sign Virama ..Dogra Sign Nukta
+ (0x119d4, 0x119d7,), # Nandinagari Vowel Sign U..Nandinagari Vowel Sign V
+ (0x119da, 0x119db,), # Nandinagari Vowel Sign E..Nandinagari Vowel Sign A
+ (0x119e0, 0x119e0,), # Nandinagari Sign Virama ..Nandinagari Sign Virama
+ (0x11a01, 0x11a0a,), # Zanabazar Square Vowel S..Zanabazar Square Vowel L
+ (0x11a33, 0x11a38,), # Zanabazar Square Final C..Zanabazar Square Sign An
+ (0x11a3b, 0x11a3e,), # Zanabazar Square Cluster..Zanabazar Square Cluster
+ (0x11a47, 0x11a47,), # Zanabazar Square Subjoin..Zanabazar Square Subjoin
+ (0x11a51, 0x11a56,), # Soyombo Vowel Sign I ..Soyombo Vowel Sign Oe
+ (0x11a59, 0x11a5b,), # Soyombo Vowel Sign Vocal..Soyombo Vowel Length Mar
+ (0x11a8a, 0x11a96,), # Soyombo Final Consonant ..Soyombo Sign Anusvara
+ (0x11a98, 0x11a99,), # Soyombo Gemination Mark ..Soyombo Subjoiner
+ (0x11c30, 0x11c36,), # Bhaiksuki Vowel Sign I ..Bhaiksuki Vowel Sign Voc
+ (0x11c38, 0x11c3d,), # Bhaiksuki Vowel Sign E ..Bhaiksuki Sign Anusvara
+ (0x11c3f, 0x11c3f,), # Bhaiksuki Sign Virama ..Bhaiksuki Sign Virama
+ (0x11c92, 0x11ca7,), # Marchen Subjoined Letter..Marchen Subjoined Letter
+ (0x11caa, 0x11cb0,), # Marchen Subjoined Letter..Marchen Vowel Sign Aa
+ (0x11cb2, 0x11cb3,), # Marchen Vowel Sign U ..Marchen Vowel Sign E
+ (0x11cb5, 0x11cb6,), # Marchen Sign Anusvara ..Marchen Sign Candrabindu
+ (0x11d31, 0x11d36,), # Masaram Gondi Vowel Sign..Masaram Gondi Vowel Sign
+ (0x11d3a, 0x11d3a,), # Masaram Gondi Vowel Sign..Masaram Gondi Vowel Sign
+ (0x11d3c, 0x11d3d,), # Masaram Gondi Vowel Sign..Masaram Gondi Vowel Sign
+ (0x11d3f, 0x11d45,), # Masaram Gondi Vowel Sign..Masaram Gondi Virama
+ (0x11d47, 0x11d47,), # Masaram Gondi Ra-kara ..Masaram Gondi Ra-kara
+ (0x11d90, 0x11d91,), # Gunjala Gondi Vowel Sign..Gunjala Gondi Vowel Sign
+ (0x11d95, 0x11d95,), # Gunjala Gondi Sign Anusv..Gunjala Gondi Sign Anusv
+ (0x11d97, 0x11d97,), # Gunjala Gondi Virama ..Gunjala Gondi Virama
+ (0x11ef3, 0x11ef4,), # Makasar Vowel Sign I ..Makasar Vowel Sign U
+ (0x16af0, 0x16af4,), # Bassa Vah Combining High..Bassa Vah Combining High
+ (0x16b30, 0x16b36,), # Pahawh Hmong Mark Cim Tu..Pahawh Hmong Mark Cim Ta
+ (0x16f4f, 0x16f4f,), # Miao Sign Consonant Modi..Miao Sign Consonant Modi
+ (0x16f8f, 0x16f92,), # Miao Tone Right ..Miao Tone Below
+ (0x1bc9d, 0x1bc9e,), # Duployan Thick Letter Se..Duployan Double Mark
+ (0x1d167, 0x1d169,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d17b, 0x1d182,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d185, 0x1d18b,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d1aa, 0x1d1ad,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d242, 0x1d244,), # Combining Greek Musical ..Combining Greek Musical
+ (0x1da00, 0x1da36,), # Signwriting Head Rim ..Signwriting Air Sucking
+ (0x1da3b, 0x1da6c,), # Signwriting Mouth Closed..Signwriting Excitement
+ (0x1da75, 0x1da75,), # Signwriting Upper Body T..Signwriting Upper Body T
+ (0x1da84, 0x1da84,), # Signwriting Location Hea..Signwriting Location Hea
+ (0x1da9b, 0x1da9f,), # Signwriting Fill Modifie..Signwriting Fill Modifie
+ (0x1daa1, 0x1daaf,), # Signwriting Rotation Mod..Signwriting Rotation Mod
+ (0x1e000, 0x1e006,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e008, 0x1e018,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e01b, 0x1e021,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e023, 0x1e024,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e026, 0x1e02a,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e130, 0x1e136,), # Nyiakeng Puachue Hmong T..Nyiakeng Puachue Hmong T
+ (0x1e2ec, 0x1e2ef,), # Wancho Tone Tup ..Wancho Tone Koini
+ (0x1e8d0, 0x1e8d6,), # Mende Kikakui Combining ..Mende Kikakui Combining
+ (0x1e944, 0x1e94a,), # Adlam Alif Lengthener ..Adlam Nukta
+ (0xe0100, 0xe01ef,), # Variation Selector-17 ..Variation Selector-256
+ ),
+ '13.0.0': (
+ # Source: DerivedGeneralCategory-13.0.0.txt
+ # Date: 2019-10-21, 14:30:32 GMT
+ #
+ (0x00300, 0x0036f,), # Combining Grave Accent ..Combining Latin Small Le
+ (0x00483, 0x00489,), # Combining Cyrillic Titlo..Combining Cyrillic Milli
+ (0x00591, 0x005bd,), # Hebrew Accent Etnahta ..Hebrew Point Meteg
+ (0x005bf, 0x005bf,), # Hebrew Point Rafe ..Hebrew Point Rafe
+ (0x005c1, 0x005c2,), # Hebrew Point Shin Dot ..Hebrew Point Sin Dot
+ (0x005c4, 0x005c5,), # Hebrew Mark Upper Dot ..Hebrew Mark Lower Dot
+ (0x005c7, 0x005c7,), # Hebrew Point Qamats Qata..Hebrew Point Qamats Qata
+ (0x00610, 0x0061a,), # Arabic Sign Sallallahou ..Arabic Small Kasra
+ (0x0064b, 0x0065f,), # Arabic Fathatan ..Arabic Wavy Hamza Below
+ (0x00670, 0x00670,), # Arabic Letter Superscrip..Arabic Letter Superscrip
+ (0x006d6, 0x006dc,), # Arabic Small High Ligatu..Arabic Small High Seen
+ (0x006df, 0x006e4,), # Arabic Small High Rounde..Arabic Small High Madda
+ (0x006e7, 0x006e8,), # Arabic Small High Yeh ..Arabic Small High Noon
+ (0x006ea, 0x006ed,), # Arabic Empty Centre Low ..Arabic Small Low Meem
+ (0x00711, 0x00711,), # Syriac Letter Superscrip..Syriac Letter Superscrip
+ (0x00730, 0x0074a,), # Syriac Pthaha Above ..Syriac Barrekh
+ (0x007a6, 0x007b0,), # Thaana Abafili ..Thaana Sukun
+ (0x007eb, 0x007f3,), # Nko Combining Short High..Nko Combining Double Dot
+ (0x007fd, 0x007fd,), # Nko Dantayalan ..Nko Dantayalan
+ (0x00816, 0x00819,), # Samaritan Mark In ..Samaritan Mark Dagesh
+ (0x0081b, 0x00823,), # Samaritan Mark Epentheti..Samaritan Vowel Sign A
+ (0x00825, 0x00827,), # Samaritan Vowel Sign Sho..Samaritan Vowel Sign U
+ (0x00829, 0x0082d,), # Samaritan Vowel Sign Lon..Samaritan Mark Nequdaa
+ (0x00859, 0x0085b,), # Mandaic Affrication Mark..Mandaic Gemination Mark
+ (0x008d3, 0x008e1,), # Arabic Small Low Waw ..Arabic Small High Sign S
+ (0x008e3, 0x00902,), # Arabic Turned Damma Belo..Devanagari Sign Anusvara
+ (0x0093a, 0x0093a,), # Devanagari Vowel Sign Oe..Devanagari Vowel Sign Oe
+ (0x0093c, 0x0093c,), # Devanagari Sign Nukta ..Devanagari Sign Nukta
+ (0x00941, 0x00948,), # Devanagari Vowel Sign U ..Devanagari Vowel Sign Ai
+ (0x0094d, 0x0094d,), # Devanagari Sign Virama ..Devanagari Sign Virama
+ (0x00951, 0x00957,), # Devanagari Stress Sign U..Devanagari Vowel Sign Uu
+ (0x00962, 0x00963,), # Devanagari Vowel Sign Vo..Devanagari Vowel Sign Vo
+ (0x00981, 0x00981,), # Bengali Sign Candrabindu..Bengali Sign Candrabindu
+ (0x009bc, 0x009bc,), # Bengali Sign Nukta ..Bengali Sign Nukta
+ (0x009c1, 0x009c4,), # Bengali Vowel Sign U ..Bengali Vowel Sign Vocal
+ (0x009cd, 0x009cd,), # Bengali Sign Virama ..Bengali Sign Virama
+ (0x009e2, 0x009e3,), # Bengali Vowel Sign Vocal..Bengali Vowel Sign Vocal
+ (0x009fe, 0x009fe,), # Bengali Sandhi Mark ..Bengali Sandhi Mark
+ (0x00a01, 0x00a02,), # Gurmukhi Sign Adak Bindi..Gurmukhi Sign Bindi
+ (0x00a3c, 0x00a3c,), # Gurmukhi Sign Nukta ..Gurmukhi Sign Nukta
+ (0x00a41, 0x00a42,), # Gurmukhi Vowel Sign U ..Gurmukhi Vowel Sign Uu
+ (0x00a47, 0x00a48,), # Gurmukhi Vowel Sign Ee ..Gurmukhi Vowel Sign Ai
+ (0x00a4b, 0x00a4d,), # Gurmukhi Vowel Sign Oo ..Gurmukhi Sign Virama
+ (0x00a51, 0x00a51,), # Gurmukhi Sign Udaat ..Gurmukhi Sign Udaat
+ (0x00a70, 0x00a71,), # Gurmukhi Tippi ..Gurmukhi Addak
+ (0x00a75, 0x00a75,), # Gurmukhi Sign Yakash ..Gurmukhi Sign Yakash
+ (0x00a81, 0x00a82,), # Gujarati Sign Candrabind..Gujarati Sign Anusvara
+ (0x00abc, 0x00abc,), # Gujarati Sign Nukta ..Gujarati Sign Nukta
+ (0x00ac1, 0x00ac5,), # Gujarati Vowel Sign U ..Gujarati Vowel Sign Cand
+ (0x00ac7, 0x00ac8,), # Gujarati Vowel Sign E ..Gujarati Vowel Sign Ai
+ (0x00acd, 0x00acd,), # Gujarati Sign Virama ..Gujarati Sign Virama
+ (0x00ae2, 0x00ae3,), # Gujarati Vowel Sign Voca..Gujarati Vowel Sign Voca
+ (0x00afa, 0x00aff,), # Gujarati Sign Sukun ..Gujarati Sign Two-circle
+ (0x00b01, 0x00b01,), # Oriya Sign Candrabindu ..Oriya Sign Candrabindu
+ (0x00b3c, 0x00b3c,), # Oriya Sign Nukta ..Oriya Sign Nukta
+ (0x00b3f, 0x00b3f,), # Oriya Vowel Sign I ..Oriya Vowel Sign I
+ (0x00b41, 0x00b44,), # Oriya Vowel Sign U ..Oriya Vowel Sign Vocalic
+ (0x00b4d, 0x00b4d,), # Oriya Sign Virama ..Oriya Sign Virama
+ (0x00b55, 0x00b56,), # (nil) ..Oriya Ai Length Mark
+ (0x00b62, 0x00b63,), # Oriya Vowel Sign Vocalic..Oriya Vowel Sign Vocalic
+ (0x00b82, 0x00b82,), # Tamil Sign Anusvara ..Tamil Sign Anusvara
+ (0x00bc0, 0x00bc0,), # Tamil Vowel Sign Ii ..Tamil Vowel Sign Ii
+ (0x00bcd, 0x00bcd,), # Tamil Sign Virama ..Tamil Sign Virama
+ (0x00c00, 0x00c00,), # Telugu Sign Combining Ca..Telugu Sign Combining Ca
+ (0x00c04, 0x00c04,), # Telugu Sign Combining An..Telugu Sign Combining An
+ (0x00c3e, 0x00c40,), # Telugu Vowel Sign Aa ..Telugu Vowel Sign Ii
+ (0x00c46, 0x00c48,), # Telugu Vowel Sign E ..Telugu Vowel Sign Ai
+ (0x00c4a, 0x00c4d,), # Telugu Vowel Sign O ..Telugu Sign Virama
+ (0x00c55, 0x00c56,), # Telugu Length Mark ..Telugu Ai Length Mark
+ (0x00c62, 0x00c63,), # Telugu Vowel Sign Vocali..Telugu Vowel Sign Vocali
+ (0x00c81, 0x00c81,), # Kannada Sign Candrabindu..Kannada Sign Candrabindu
+ (0x00cbc, 0x00cbc,), # Kannada Sign Nukta ..Kannada Sign Nukta
+ (0x00cbf, 0x00cbf,), # Kannada Vowel Sign I ..Kannada Vowel Sign I
+ (0x00cc6, 0x00cc6,), # Kannada Vowel Sign E ..Kannada Vowel Sign E
+ (0x00ccc, 0x00ccd,), # Kannada Vowel Sign Au ..Kannada Sign Virama
+ (0x00ce2, 0x00ce3,), # Kannada Vowel Sign Vocal..Kannada Vowel Sign Vocal
+ (0x00d00, 0x00d01,), # Malayalam Sign Combining..Malayalam Sign Candrabin
+ (0x00d3b, 0x00d3c,), # Malayalam Sign Vertical ..Malayalam Sign Circular
+ (0x00d41, 0x00d44,), # Malayalam Vowel Sign U ..Malayalam Vowel Sign Voc
+ (0x00d4d, 0x00d4d,), # Malayalam Sign Virama ..Malayalam Sign Virama
+ (0x00d62, 0x00d63,), # Malayalam Vowel Sign Voc..Malayalam Vowel Sign Voc
+ (0x00d81, 0x00d81,), # (nil) ..(nil)
+ (0x00dca, 0x00dca,), # Sinhala Sign Al-lakuna ..Sinhala Sign Al-lakuna
+ (0x00dd2, 0x00dd4,), # Sinhala Vowel Sign Ketti..Sinhala Vowel Sign Ketti
+ (0x00dd6, 0x00dd6,), # Sinhala Vowel Sign Diga ..Sinhala Vowel Sign Diga
+ (0x00e31, 0x00e31,), # Thai Character Mai Han-a..Thai Character Mai Han-a
+ (0x00e34, 0x00e3a,), # Thai Character Sara I ..Thai Character Phinthu
+ (0x00e47, 0x00e4e,), # Thai Character Maitaikhu..Thai Character Yamakkan
+ (0x00eb1, 0x00eb1,), # Lao Vowel Sign Mai Kan ..Lao Vowel Sign Mai Kan
+ (0x00eb4, 0x00ebc,), # Lao Vowel Sign I ..Lao Semivowel Sign Lo
+ (0x00ec8, 0x00ecd,), # Lao Tone Mai Ek ..Lao Niggahita
+ (0x00f18, 0x00f19,), # Tibetan Astrological Sig..Tibetan Astrological Sig
+ (0x00f35, 0x00f35,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f37, 0x00f37,), # Tibetan Mark Ngas Bzung ..Tibetan Mark Ngas Bzung
+ (0x00f39, 0x00f39,), # Tibetan Mark Tsa -phru ..Tibetan Mark Tsa -phru
+ (0x00f71, 0x00f7e,), # Tibetan Vowel Sign Aa ..Tibetan Sign Rjes Su Nga
+ (0x00f80, 0x00f84,), # Tibetan Vowel Sign Rever..Tibetan Mark Halanta
+ (0x00f86, 0x00f87,), # Tibetan Sign Lci Rtags ..Tibetan Sign Yang Rtags
+ (0x00f8d, 0x00f97,), # Tibetan Subjoined Sign L..Tibetan Subjoined Letter
+ (0x00f99, 0x00fbc,), # Tibetan Subjoined Letter..Tibetan Subjoined Letter
+ (0x00fc6, 0x00fc6,), # Tibetan Symbol Padma Gda..Tibetan Symbol Padma Gda
+ (0x0102d, 0x01030,), # Myanmar Vowel Sign I ..Myanmar Vowel Sign Uu
+ (0x01032, 0x01037,), # Myanmar Vowel Sign Ai ..Myanmar Sign Dot Below
+ (0x01039, 0x0103a,), # Myanmar Sign Virama ..Myanmar Sign Asat
+ (0x0103d, 0x0103e,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01058, 0x01059,), # Myanmar Vowel Sign Vocal..Myanmar Vowel Sign Vocal
+ (0x0105e, 0x01060,), # Myanmar Consonant Sign M..Myanmar Consonant Sign M
+ (0x01071, 0x01074,), # Myanmar Vowel Sign Geba ..Myanmar Vowel Sign Kayah
+ (0x01082, 0x01082,), # Myanmar Consonant Sign S..Myanmar Consonant Sign S
+ (0x01085, 0x01086,), # Myanmar Vowel Sign Shan ..Myanmar Vowel Sign Shan
+ (0x0108d, 0x0108d,), # Myanmar Sign Shan Counci..Myanmar Sign Shan Counci
+ (0x0109d, 0x0109d,), # Myanmar Vowel Sign Aiton..Myanmar Vowel Sign Aiton
+ (0x0135d, 0x0135f,), # Ethiopic Combining Gemin..Ethiopic Combining Gemin
+ (0x01712, 0x01714,), # Tagalog Vowel Sign I ..Tagalog Sign Virama
+ (0x01732, 0x01734,), # Hanunoo Vowel Sign I ..Hanunoo Sign Pamudpod
+ (0x01752, 0x01753,), # Buhid Vowel Sign I ..Buhid Vowel Sign U
+ (0x01772, 0x01773,), # Tagbanwa Vowel Sign I ..Tagbanwa Vowel Sign U
+ (0x017b4, 0x017b5,), # Khmer Vowel Inherent Aq ..Khmer Vowel Inherent Aa
+ (0x017b7, 0x017bd,), # Khmer Vowel Sign I ..Khmer Vowel Sign Ua
+ (0x017c6, 0x017c6,), # Khmer Sign Nikahit ..Khmer Sign Nikahit
+ (0x017c9, 0x017d3,), # Khmer Sign Muusikatoan ..Khmer Sign Bathamasat
+ (0x017dd, 0x017dd,), # Khmer Sign Atthacan ..Khmer Sign Atthacan
+ (0x0180b, 0x0180d,), # Mongolian Free Variation..Mongolian Free Variation
+ (0x01885, 0x01886,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x018a9, 0x018a9,), # Mongolian Letter Ali Gal..Mongolian Letter Ali Gal
+ (0x01920, 0x01922,), # Limbu Vowel Sign A ..Limbu Vowel Sign U
+ (0x01927, 0x01928,), # Limbu Vowel Sign E ..Limbu Vowel Sign O
+ (0x01932, 0x01932,), # Limbu Small Letter Anusv..Limbu Small Letter Anusv
+ (0x01939, 0x0193b,), # Limbu Sign Mukphreng ..Limbu Sign Sa-i
+ (0x01a17, 0x01a18,), # Buginese Vowel Sign I ..Buginese Vowel Sign U
+ (0x01a1b, 0x01a1b,), # Buginese Vowel Sign Ae ..Buginese Vowel Sign Ae
+ (0x01a56, 0x01a56,), # Tai Tham Consonant Sign ..Tai Tham Consonant Sign
+ (0x01a58, 0x01a5e,), # Tai Tham Sign Mai Kang L..Tai Tham Consonant Sign
+ (0x01a60, 0x01a60,), # Tai Tham Sign Sakot ..Tai Tham Sign Sakot
+ (0x01a62, 0x01a62,), # Tai Tham Vowel Sign Mai ..Tai Tham Vowel Sign Mai
+ (0x01a65, 0x01a6c,), # Tai Tham Vowel Sign I ..Tai Tham Vowel Sign Oa B
+ (0x01a73, 0x01a7c,), # Tai Tham Vowel Sign Oa A..Tai Tham Sign Khuen-lue
+ (0x01a7f, 0x01a7f,), # Tai Tham Combining Crypt..Tai Tham Combining Crypt
+ (0x01ab0, 0x01ac0,), # Combining Doubled Circum..(nil)
+ (0x01b00, 0x01b03,), # Balinese Sign Ulu Ricem ..Balinese Sign Surang
+ (0x01b34, 0x01b34,), # Balinese Sign Rerekan ..Balinese Sign Rerekan
+ (0x01b36, 0x01b3a,), # Balinese Vowel Sign Ulu ..Balinese Vowel Sign Ra R
+ (0x01b3c, 0x01b3c,), # Balinese Vowel Sign La L..Balinese Vowel Sign La L
+ (0x01b42, 0x01b42,), # Balinese Vowel Sign Pepe..Balinese Vowel Sign Pepe
+ (0x01b6b, 0x01b73,), # Balinese Musical Symbol ..Balinese Musical Symbol
+ (0x01b80, 0x01b81,), # Sundanese Sign Panyecek ..Sundanese Sign Panglayar
+ (0x01ba2, 0x01ba5,), # Sundanese Consonant Sign..Sundanese Vowel Sign Pan
+ (0x01ba8, 0x01ba9,), # Sundanese Vowel Sign Pam..Sundanese Vowel Sign Pan
+ (0x01bab, 0x01bad,), # Sundanese Sign Virama ..Sundanese Consonant Sign
+ (0x01be6, 0x01be6,), # Batak Sign Tompi ..Batak Sign Tompi
+ (0x01be8, 0x01be9,), # Batak Vowel Sign Pakpak ..Batak Vowel Sign Ee
+ (0x01bed, 0x01bed,), # Batak Vowel Sign Karo O ..Batak Vowel Sign Karo O
+ (0x01bef, 0x01bf1,), # Batak Vowel Sign U For S..Batak Consonant Sign H
+ (0x01c2c, 0x01c33,), # Lepcha Vowel Sign E ..Lepcha Consonant Sign T
+ (0x01c36, 0x01c37,), # Lepcha Sign Ran ..Lepcha Sign Nukta
+ (0x01cd0, 0x01cd2,), # Vedic Tone Karshana ..Vedic Tone Prenkha
+ (0x01cd4, 0x01ce0,), # Vedic Sign Yajurvedic Mi..Vedic Tone Rigvedic Kash
+ (0x01ce2, 0x01ce8,), # Vedic Sign Visarga Svari..Vedic Sign Visarga Anuda
+ (0x01ced, 0x01ced,), # Vedic Sign Tiryak ..Vedic Sign Tiryak
+ (0x01cf4, 0x01cf4,), # Vedic Tone Candra Above ..Vedic Tone Candra Above
+ (0x01cf8, 0x01cf9,), # Vedic Tone Ring Above ..Vedic Tone Double Ring A
+ (0x01dc0, 0x01df9,), # Combining Dotted Grave A..Combining Wide Inverted
+ (0x01dfb, 0x01dff,), # Combining Deletion Mark ..Combining Right Arrowhea
+ (0x020d0, 0x020f0,), # Combining Left Harpoon A..Combining Asterisk Above
+ (0x02cef, 0x02cf1,), # Coptic Combining Ni Abov..Coptic Combining Spiritu
+ (0x02d7f, 0x02d7f,), # Tifinagh Consonant Joine..Tifinagh Consonant Joine
+ (0x02de0, 0x02dff,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0302a, 0x0302d,), # Ideographic Level Tone M..Ideographic Entering Ton
+ (0x03099, 0x0309a,), # Combining Katakana-hirag..Combining Katakana-hirag
+ (0x0a66f, 0x0a672,), # Combining Cyrillic Vzmet..Combining Cyrillic Thous
+ (0x0a674, 0x0a67d,), # Combining Cyrillic Lette..Combining Cyrillic Payer
+ (0x0a69e, 0x0a69f,), # Combining Cyrillic Lette..Combining Cyrillic Lette
+ (0x0a6f0, 0x0a6f1,), # Bamum Combining Mark Koq..Bamum Combining Mark Tuk
+ (0x0a802, 0x0a802,), # Syloti Nagri Sign Dvisva..Syloti Nagri Sign Dvisva
+ (0x0a806, 0x0a806,), # Syloti Nagri Sign Hasant..Syloti Nagri Sign Hasant
+ (0x0a80b, 0x0a80b,), # Syloti Nagri Sign Anusva..Syloti Nagri Sign Anusva
+ (0x0a825, 0x0a826,), # Syloti Nagri Vowel Sign ..Syloti Nagri Vowel Sign
+ (0x0a82c, 0x0a82c,), # (nil) ..(nil)
+ (0x0a8c4, 0x0a8c5,), # Saurashtra Sign Virama ..Saurashtra Sign Candrabi
+ (0x0a8e0, 0x0a8f1,), # Combining Devanagari Dig..Combining Devanagari Sig
+ (0x0a8ff, 0x0a8ff,), # Devanagari Vowel Sign Ay..Devanagari Vowel Sign Ay
+ (0x0a926, 0x0a92d,), # Kayah Li Vowel Ue ..Kayah Li Tone Calya Plop
+ (0x0a947, 0x0a951,), # Rejang Vowel Sign I ..Rejang Consonant Sign R
+ (0x0a980, 0x0a982,), # Javanese Sign Panyangga ..Javanese Sign Layar
+ (0x0a9b3, 0x0a9b3,), # Javanese Sign Cecak Telu..Javanese Sign Cecak Telu
+ (0x0a9b6, 0x0a9b9,), # Javanese Vowel Sign Wulu..Javanese Vowel Sign Suku
+ (0x0a9bc, 0x0a9bd,), # Javanese Vowel Sign Pepe..Javanese Consonant Sign
+ (0x0a9e5, 0x0a9e5,), # Myanmar Sign Shan Saw ..Myanmar Sign Shan Saw
+ (0x0aa29, 0x0aa2e,), # Cham Vowel Sign Aa ..Cham Vowel Sign Oe
+ (0x0aa31, 0x0aa32,), # Cham Vowel Sign Au ..Cham Vowel Sign Ue
+ (0x0aa35, 0x0aa36,), # Cham Consonant Sign La ..Cham Consonant Sign Wa
+ (0x0aa43, 0x0aa43,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa4c, 0x0aa4c,), # Cham Consonant Sign Fina..Cham Consonant Sign Fina
+ (0x0aa7c, 0x0aa7c,), # Myanmar Sign Tai Laing T..Myanmar Sign Tai Laing T
+ (0x0aab0, 0x0aab0,), # Tai Viet Mai Kang ..Tai Viet Mai Kang
+ (0x0aab2, 0x0aab4,), # Tai Viet Vowel I ..Tai Viet Vowel U
+ (0x0aab7, 0x0aab8,), # Tai Viet Mai Khit ..Tai Viet Vowel Ia
+ (0x0aabe, 0x0aabf,), # Tai Viet Vowel Am ..Tai Viet Tone Mai Ek
+ (0x0aac1, 0x0aac1,), # Tai Viet Tone Mai Tho ..Tai Viet Tone Mai Tho
+ (0x0aaec, 0x0aaed,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0aaf6, 0x0aaf6,), # Meetei Mayek Virama ..Meetei Mayek Virama
+ (0x0abe5, 0x0abe5,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abe8, 0x0abe8,), # Meetei Mayek Vowel Sign ..Meetei Mayek Vowel Sign
+ (0x0abed, 0x0abed,), # Meetei Mayek Apun Iyek ..Meetei Mayek Apun Iyek
+ (0x0fb1e, 0x0fb1e,), # Hebrew Point Judeo-spani..Hebrew Point Judeo-spani
+ (0x0fe00, 0x0fe0f,), # Variation Selector-1 ..Variation Selector-16
+ (0x0fe20, 0x0fe2f,), # Combining Ligature Left ..Combining Cyrillic Titlo
+ (0x101fd, 0x101fd,), # Phaistos Disc Sign Combi..Phaistos Disc Sign Combi
+ (0x102e0, 0x102e0,), # Coptic Epact Thousands M..Coptic Epact Thousands M
+ (0x10376, 0x1037a,), # Combining Old Permic Let..Combining Old Permic Let
+ (0x10a01, 0x10a03,), # Kharoshthi Vowel Sign I ..Kharoshthi Vowel Sign Vo
+ (0x10a05, 0x10a06,), # Kharoshthi Vowel Sign E ..Kharoshthi Vowel Sign O
+ (0x10a0c, 0x10a0f,), # Kharoshthi Vowel Length ..Kharoshthi Sign Visarga
+ (0x10a38, 0x10a3a,), # Kharoshthi Sign Bar Abov..Kharoshthi Sign Dot Belo
+ (0x10a3f, 0x10a3f,), # Kharoshthi Virama ..Kharoshthi Virama
+ (0x10ae5, 0x10ae6,), # Manichaean Abbreviation ..Manichaean Abbreviation
+ (0x10d24, 0x10d27,), # Hanifi Rohingya Sign Har..Hanifi Rohingya Sign Tas
+ (0x10eab, 0x10eac,), # (nil) ..(nil)
+ (0x10f46, 0x10f50,), # Sogdian Combining Dot Be..Sogdian Combining Stroke
+ (0x11001, 0x11001,), # Brahmi Sign Anusvara ..Brahmi Sign Anusvara
+ (0x11038, 0x11046,), # Brahmi Vowel Sign Aa ..Brahmi Virama
+ (0x1107f, 0x11081,), # Brahmi Number Joiner ..Kaithi Sign Anusvara
+ (0x110b3, 0x110b6,), # Kaithi Vowel Sign U ..Kaithi Vowel Sign Ai
+ (0x110b9, 0x110ba,), # Kaithi Sign Virama ..Kaithi Sign Nukta
+ (0x11100, 0x11102,), # Chakma Sign Candrabindu ..Chakma Sign Visarga
+ (0x11127, 0x1112b,), # Chakma Vowel Sign A ..Chakma Vowel Sign Uu
+ (0x1112d, 0x11134,), # Chakma Vowel Sign Ai ..Chakma Maayyaa
+ (0x11173, 0x11173,), # Mahajani Sign Nukta ..Mahajani Sign Nukta
+ (0x11180, 0x11181,), # Sharada Sign Candrabindu..Sharada Sign Anusvara
+ (0x111b6, 0x111be,), # Sharada Vowel Sign U ..Sharada Vowel Sign O
+ (0x111c9, 0x111cc,), # Sharada Sandhi Mark ..Sharada Extra Short Vowe
+ (0x111cf, 0x111cf,), # (nil) ..(nil)
+ (0x1122f, 0x11231,), # Khojki Vowel Sign U ..Khojki Vowel Sign Ai
+ (0x11234, 0x11234,), # Khojki Sign Anusvara ..Khojki Sign Anusvara
+ (0x11236, 0x11237,), # Khojki Sign Nukta ..Khojki Sign Shadda
+ (0x1123e, 0x1123e,), # Khojki Sign Sukun ..Khojki Sign Sukun
+ (0x112df, 0x112df,), # Khudawadi Sign Anusvara ..Khudawadi Sign Anusvara
+ (0x112e3, 0x112ea,), # Khudawadi Vowel Sign U ..Khudawadi Sign Virama
+ (0x11300, 0x11301,), # Grantha Sign Combining A..Grantha Sign Candrabindu
+ (0x1133b, 0x1133c,), # Combining Bindu Below ..Grantha Sign Nukta
+ (0x11340, 0x11340,), # Grantha Vowel Sign Ii ..Grantha Vowel Sign Ii
+ (0x11366, 0x1136c,), # Combining Grantha Digit ..Combining Grantha Digit
+ (0x11370, 0x11374,), # Combining Grantha Letter..Combining Grantha Letter
+ (0x11438, 0x1143f,), # Newa Vowel Sign U ..Newa Vowel Sign Ai
+ (0x11442, 0x11444,), # Newa Sign Virama ..Newa Sign Anusvara
+ (0x11446, 0x11446,), # Newa Sign Nukta ..Newa Sign Nukta
+ (0x1145e, 0x1145e,), # Newa Sandhi Mark ..Newa Sandhi Mark
+ (0x114b3, 0x114b8,), # Tirhuta Vowel Sign U ..Tirhuta Vowel Sign Vocal
+ (0x114ba, 0x114ba,), # Tirhuta Vowel Sign Short..Tirhuta Vowel Sign Short
+ (0x114bf, 0x114c0,), # Tirhuta Sign Candrabindu..Tirhuta Sign Anusvara
+ (0x114c2, 0x114c3,), # Tirhuta Sign Virama ..Tirhuta Sign Nukta
+ (0x115b2, 0x115b5,), # Siddham Vowel Sign U ..Siddham Vowel Sign Vocal
+ (0x115bc, 0x115bd,), # Siddham Sign Candrabindu..Siddham Sign Anusvara
+ (0x115bf, 0x115c0,), # Siddham Sign Virama ..Siddham Sign Nukta
+ (0x115dc, 0x115dd,), # Siddham Vowel Sign Alter..Siddham Vowel Sign Alter
+ (0x11633, 0x1163a,), # Modi Vowel Sign U ..Modi Vowel Sign Ai
+ (0x1163d, 0x1163d,), # Modi Sign Anusvara ..Modi Sign Anusvara
+ (0x1163f, 0x11640,), # Modi Sign Virama ..Modi Sign Ardhacandra
+ (0x116ab, 0x116ab,), # Takri Sign Anusvara ..Takri Sign Anusvara
+ (0x116ad, 0x116ad,), # Takri Vowel Sign Aa ..Takri Vowel Sign Aa
+ (0x116b0, 0x116b5,), # Takri Vowel Sign U ..Takri Vowel Sign Au
+ (0x116b7, 0x116b7,), # Takri Sign Nukta ..Takri Sign Nukta
+ (0x1171d, 0x1171f,), # Ahom Consonant Sign Medi..Ahom Consonant Sign Medi
+ (0x11722, 0x11725,), # Ahom Vowel Sign I ..Ahom Vowel Sign Uu
+ (0x11727, 0x1172b,), # Ahom Vowel Sign Aw ..Ahom Sign Killer
+ (0x1182f, 0x11837,), # Dogra Vowel Sign U ..Dogra Sign Anusvara
+ (0x11839, 0x1183a,), # Dogra Sign Virama ..Dogra Sign Nukta
+ (0x1193b, 0x1193c,), # (nil) ..(nil)
+ (0x1193e, 0x1193e,), # (nil) ..(nil)
+ (0x11943, 0x11943,), # (nil) ..(nil)
+ (0x119d4, 0x119d7,), # Nandinagari Vowel Sign U..Nandinagari Vowel Sign V
+ (0x119da, 0x119db,), # Nandinagari Vowel Sign E..Nandinagari Vowel Sign A
+ (0x119e0, 0x119e0,), # Nandinagari Sign Virama ..Nandinagari Sign Virama
+ (0x11a01, 0x11a0a,), # Zanabazar Square Vowel S..Zanabazar Square Vowel L
+ (0x11a33, 0x11a38,), # Zanabazar Square Final C..Zanabazar Square Sign An
+ (0x11a3b, 0x11a3e,), # Zanabazar Square Cluster..Zanabazar Square Cluster
+ (0x11a47, 0x11a47,), # Zanabazar Square Subjoin..Zanabazar Square Subjoin
+ (0x11a51, 0x11a56,), # Soyombo Vowel Sign I ..Soyombo Vowel Sign Oe
+ (0x11a59, 0x11a5b,), # Soyombo Vowel Sign Vocal..Soyombo Vowel Length Mar
+ (0x11a8a, 0x11a96,), # Soyombo Final Consonant ..Soyombo Sign Anusvara
+ (0x11a98, 0x11a99,), # Soyombo Gemination Mark ..Soyombo Subjoiner
+ (0x11c30, 0x11c36,), # Bhaiksuki Vowel Sign I ..Bhaiksuki Vowel Sign Voc
+ (0x11c38, 0x11c3d,), # Bhaiksuki Vowel Sign E ..Bhaiksuki Sign Anusvara
+ (0x11c3f, 0x11c3f,), # Bhaiksuki Sign Virama ..Bhaiksuki Sign Virama
+ (0x11c92, 0x11ca7,), # Marchen Subjoined Letter..Marchen Subjoined Letter
+ (0x11caa, 0x11cb0,), # Marchen Subjoined Letter..Marchen Vowel Sign Aa
+ (0x11cb2, 0x11cb3,), # Marchen Vowel Sign U ..Marchen Vowel Sign E
+ (0x11cb5, 0x11cb6,), # Marchen Sign Anusvara ..Marchen Sign Candrabindu
+ (0x11d31, 0x11d36,), # Masaram Gondi Vowel Sign..Masaram Gondi Vowel Sign
+ (0x11d3a, 0x11d3a,), # Masaram Gondi Vowel Sign..Masaram Gondi Vowel Sign
+ (0x11d3c, 0x11d3d,), # Masaram Gondi Vowel Sign..Masaram Gondi Vowel Sign
+ (0x11d3f, 0x11d45,), # Masaram Gondi Vowel Sign..Masaram Gondi Virama
+ (0x11d47, 0x11d47,), # Masaram Gondi Ra-kara ..Masaram Gondi Ra-kara
+ (0x11d90, 0x11d91,), # Gunjala Gondi Vowel Sign..Gunjala Gondi Vowel Sign
+ (0x11d95, 0x11d95,), # Gunjala Gondi Sign Anusv..Gunjala Gondi Sign Anusv
+ (0x11d97, 0x11d97,), # Gunjala Gondi Virama ..Gunjala Gondi Virama
+ (0x11ef3, 0x11ef4,), # Makasar Vowel Sign I ..Makasar Vowel Sign U
+ (0x16af0, 0x16af4,), # Bassa Vah Combining High..Bassa Vah Combining High
+ (0x16b30, 0x16b36,), # Pahawh Hmong Mark Cim Tu..Pahawh Hmong Mark Cim Ta
+ (0x16f4f, 0x16f4f,), # Miao Sign Consonant Modi..Miao Sign Consonant Modi
+ (0x16f8f, 0x16f92,), # Miao Tone Right ..Miao Tone Below
+ (0x16fe4, 0x16fe4,), # (nil) ..(nil)
+ (0x1bc9d, 0x1bc9e,), # Duployan Thick Letter Se..Duployan Double Mark
+ (0x1d167, 0x1d169,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d17b, 0x1d182,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d185, 0x1d18b,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d1aa, 0x1d1ad,), # Musical Symbol Combining..Musical Symbol Combining
+ (0x1d242, 0x1d244,), # Combining Greek Musical ..Combining Greek Musical
+ (0x1da00, 0x1da36,), # Signwriting Head Rim ..Signwriting Air Sucking
+ (0x1da3b, 0x1da6c,), # Signwriting Mouth Closed..Signwriting Excitement
+ (0x1da75, 0x1da75,), # Signwriting Upper Body T..Signwriting Upper Body T
+ (0x1da84, 0x1da84,), # Signwriting Location Hea..Signwriting Location Hea
+ (0x1da9b, 0x1da9f,), # Signwriting Fill Modifie..Signwriting Fill Modifie
+ (0x1daa1, 0x1daaf,), # Signwriting Rotation Mod..Signwriting Rotation Mod
+ (0x1e000, 0x1e006,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e008, 0x1e018,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e01b, 0x1e021,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e023, 0x1e024,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e026, 0x1e02a,), # Combining Glagolitic Let..Combining Glagolitic Let
+ (0x1e130, 0x1e136,), # Nyiakeng Puachue Hmong T..Nyiakeng Puachue Hmong T
+ (0x1e2ec, 0x1e2ef,), # Wancho Tone Tup ..Wancho Tone Koini
+ (0x1e8d0, 0x1e8d6,), # Mende Kikakui Combining ..Mende Kikakui Combining
+ (0x1e944, 0x1e94a,), # Adlam Alif Lengthener ..Adlam Nukta
+ (0xe0100, 0xe01ef,), # Variation Selector-17 ..Variation Selector-256
+ ),
+}
diff --git a/third_party/python/wcwidth/wcwidth/unicode_versions.py b/third_party/python/wcwidth/wcwidth/unicode_versions.py
new file mode 100644
index 0000000000..9bf51eabb0
--- /dev/null
+++ b/third_party/python/wcwidth/wcwidth/unicode_versions.py
@@ -0,0 +1,35 @@
+"""
+Exports function list_versions() for unicode version level support.
+
+This code generated by bin/update-tables.py on 2020-06-23 16:03:21.350604.
+"""
+
+
+def list_versions():
+ """
+ Return Unicode version levels supported by this module release.
+
+ Any of the version strings returned may be used as keyword argument
+ ``unicode_version`` to the ``wcwidth()`` family of functions.
+
+ :returns: Supported Unicode version numbers in ascending sorted order.
+ :rtype: list[str]
+ """
+ return (
+ "4.1.0",
+ "5.0.0",
+ "5.1.0",
+ "5.2.0",
+ "6.0.0",
+ "6.1.0",
+ "6.2.0",
+ "6.3.0",
+ "7.0.0",
+ "8.0.0",
+ "9.0.0",
+ "10.0.0",
+ "11.0.0",
+ "12.0.0",
+ "12.1.0",
+ "13.0.0",
+ )
diff --git a/third_party/python/wcwidth/wcwidth/version.json b/third_party/python/wcwidth/wcwidth/version.json
new file mode 100644
index 0000000000..59f1aa0eb8
--- /dev/null
+++ b/third_party/python/wcwidth/wcwidth/version.json
@@ -0,0 +1 @@
+{"tables": ["4.1.0", "5.0.0", "5.1.0", "5.2.0", "6.0.0", "6.1.0", "6.2.0", "6.3.0", "7.0.0", "8.0.0", "9.0.0", "10.0.0", "11.0.0", "12.0.0", "12.1.0", "13.0.0"], "package": "0.2.4", "default": "8.0.0"}
diff --git a/third_party/python/wcwidth/wcwidth/wcwidth.py b/third_party/python/wcwidth/wcwidth/wcwidth.py
new file mode 100644
index 0000000000..931bd0b1b3
--- /dev/null
+++ b/third_party/python/wcwidth/wcwidth/wcwidth.py
@@ -0,0 +1,375 @@
+"""
+This is a python implementation of wcwidth() and wcswidth().
+
+https://github.com/jquast/wcwidth
+
+from Markus Kuhn's C code, retrieved from:
+
+ http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
+
+This is an implementation of wcwidth() and wcswidth() (defined in
+IEEE Std 1002.1-2001) for Unicode.
+
+http://www.opengroup.org/onlinepubs/007904975/functions/wcwidth.html
+http://www.opengroup.org/onlinepubs/007904975/functions/wcswidth.html
+
+In fixed-width output devices, Latin characters all occupy a single
+"cell" position of equal width, whereas ideographic CJK characters
+occupy two such cells. Interoperability between terminal-line
+applications and (teletype-style) character terminals using the
+UTF-8 encoding requires agreement on which character should advance
+the cursor by how many cell positions. No established formal
+standards exist at present on which Unicode character shall occupy
+how many cell positions on character terminals. These routines are
+a first attempt of defining such behavior based on simple rules
+applied to data provided by the Unicode Consortium.
+
+For some graphical characters, the Unicode standard explicitly
+defines a character-cell width via the definition of the East Asian
+FullWidth (F), Wide (W), Half-width (H), and Narrow (Na) classes.
+In all these cases, there is no ambiguity about which width a
+terminal shall use. For characters in the East Asian Ambiguous (A)
+class, the width choice depends purely on a preference of backward
+compatibility with either historic CJK or Western practice.
+Choosing single-width for these characters is easy to justify as
+the appropriate long-term solution, as the CJK practice of
+displaying these characters as double-width comes from historic
+implementation simplicity (8-bit encoded characters were displayed
+single-width and 16-bit ones double-width, even for Greek,
+Cyrillic, etc.) and not any typographic considerations.
+
+Much less clear is the choice of width for the Not East Asian
+(Neutral) class. Existing practice does not dictate a width for any
+of these characters. It would nevertheless make sense
+typographically to allocate two character cells to characters such
+as for instance EM SPACE or VOLUME INTEGRAL, which cannot be
+represented adequately with a single-width glyph. The following
+routines at present merely assign a single-cell width to all
+neutral characters, in the interest of simplicity. This is not
+entirely satisfactory and should be reconsidered before
+establishing a formal standard in this area. At the moment, the
+decision which Not East Asian (Neutral) characters should be
+represented by double-width glyphs cannot yet be answered by
+applying a simple rule from the Unicode database content. Setting
+up a proper standard for the behavior of UTF-8 character terminals
+will require a careful analysis not only of each Unicode character,
+but also of each presentation form, something the author of these
+routines has avoided to do so far.
+
+http://www.unicode.org/unicode/reports/tr11/
+
+Latest version: http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
+"""
+from __future__ import division
+
+# std imports
+import os
+import sys
+import warnings
+
+# local
+from .table_wide import WIDE_EASTASIAN
+from .table_zero import ZERO_WIDTH
+from .unicode_versions import list_versions
+
+try:
+ from functools import lru_cache
+except ImportError:
+ # lru_cache was added in Python 3.2
+ from backports.functools_lru_cache import lru_cache
+
+# global cache
+_UNICODE_CMPTABLE = None
+_PY3 = (sys.version_info[0] >= 3)
+
+
+# NOTE: created by hand, there isn't anything identifiable other than
+# general Cf category code to identify these, and some characters in Cf
+# category code are of non-zero width.
+# Also includes some Cc, Mn, Zl, and Zp characters
+ZERO_WIDTH_CF = set([
+ 0, # Null (Cc)
+ 0x034F, # Combining grapheme joiner (Mn)
+ 0x200B, # Zero width space
+ 0x200C, # Zero width non-joiner
+ 0x200D, # Zero width joiner
+ 0x200E, # Left-to-right mark
+ 0x200F, # Right-to-left mark
+ 0x2028, # Line separator (Zl)
+ 0x2029, # Paragraph separator (Zp)
+ 0x202A, # Left-to-right embedding
+ 0x202B, # Right-to-left embedding
+ 0x202C, # Pop directional formatting
+ 0x202D, # Left-to-right override
+ 0x202E, # Right-to-left override
+ 0x2060, # Word joiner
+ 0x2061, # Function application
+ 0x2062, # Invisible times
+ 0x2063, # Invisible separator
+])
+
+
+def _bisearch(ucs, table):
+ """
+ Auxiliary function for binary search in interval table.
+
+ :arg int ucs: Ordinal value of unicode character.
+ :arg list table: List of starting and ending ranges of ordinal values,
+ in form of ``[(start, end), ...]``.
+ :rtype: int
+ :returns: 1 if ordinal value ucs is found within lookup table, else 0.
+ """
+ lbound = 0
+ ubound = len(table) - 1
+
+ if ucs < table[0][0] or ucs > table[ubound][1]:
+ return 0
+ while ubound >= lbound:
+ mid = (lbound + ubound) // 2
+ if ucs > table[mid][1]:
+ lbound = mid + 1
+ elif ucs < table[mid][0]:
+ ubound = mid - 1
+ else:
+ return 1
+
+ return 0
+
+
+@lru_cache(maxsize=1000)
+def wcwidth(wc, unicode_version='auto'):
+ r"""
+ Given one Unicode character, return its printable length on a terminal.
+
+ :param str wc: A single Unicode character.
+ :param str unicode_version: A Unicode version number, such as
+ ``'6.0.0'``, the list of available version levels may be
+ listed by pairing function :func:`list_versions`.
+
+ Any version string may be specified without error -- the nearest
+ matching version is selected. When ``latest`` (default), the
+ highest Unicode version level is used.
+ :return: The width, in cells, necessary to display the character of
+ Unicode string character, ``wc``. Returns 0 if the ``wc`` argument has
+ no printable effect on a terminal (such as NUL '\0'), -1 if ``wc`` is
+ not printable, or has an indeterminate effect on the terminal, such as
+ a control character. Otherwise, the number of column positions the
+ character occupies on a graphic terminal (1 or 2) is returned.
+ :rtype: int
+
+ The following have a column width of -1:
+
+ - C0 control characters (U+001 through U+01F).
+
+ - C1 control characters and DEL (U+07F through U+0A0).
+
+ The following have a column width of 0:
+
+ - Non-spacing and enclosing combining characters (general
+ category code Mn or Me in the Unicode database).
+
+ - NULL (``U+0000``).
+
+ - COMBINING GRAPHEME JOINER (``U+034F``).
+
+ - ZERO WIDTH SPACE (``U+200B``) *through*
+ RIGHT-TO-LEFT MARK (``U+200F``).
+
+ - LINE SEPARATOR (``U+2028``) *and*
+ PARAGRAPH SEPARATOR (``U+2029``).
+
+ - LEFT-TO-RIGHT EMBEDDING (``U+202A``) *through*
+ RIGHT-TO-LEFT OVERRIDE (``U+202E``).
+
+ - WORD JOINER (``U+2060``) *through*
+ INVISIBLE SEPARATOR (``U+2063``).
+
+ The following have a column width of 1:
+
+ - SOFT HYPHEN (``U+00AD``).
+
+ - All remaining characters, including all printable ISO 8859-1
+ and WGL4 characters, Unicode control characters, etc.
+
+ The following have a column width of 2:
+
+ - Spacing characters in the East Asian Wide (W) or East Asian
+ Full-width (F) category as defined in Unicode Technical
+ Report #11 have a column width of 2.
+
+ - Some kinds of Emoji or symbols.
+ """
+ # NOTE: created by hand, there isn't anything identifiable other than
+ # general Cf category code to identify these, and some characters in Cf
+ # category code are of non-zero width.
+ ucs = ord(wc)
+ if ucs in ZERO_WIDTH_CF:
+ return 0
+
+ # C0/C1 control characters
+ if ucs < 32 or 0x07F <= ucs < 0x0A0:
+ return -1
+
+ _unicode_version = _wcmatch_version(unicode_version)
+
+ # combining characters with zero width
+ if _bisearch(ucs, ZERO_WIDTH[_unicode_version]):
+ return 0
+
+ return 1 + _bisearch(ucs, WIDE_EASTASIAN[_unicode_version])
+
+
+def wcswidth(pwcs, n=None, unicode_version='auto'):
+ """
+ Given a unicode string, return its printable length on a terminal.
+
+ :param str pwcs: Measure width of given unicode string.
+ :param int n: When ``n`` is None (default), return the length of the
+ entire string, otherwise width the first ``n`` characters specified.
+ :param str unicode_version: An explicit definition of the unicode version
+ level to use for determination, may be ``auto`` (default), which uses
+ the Environment Variable, ``UNICODE_VERSION`` if defined, or the latest
+ available unicode version, otherwise.
+ :rtype: int
+ :returns: The width, in cells, necessary to display the first ``n``
+ characters of the unicode string ``pwcs``. Returns ``-1`` if
+ a non-printable character is encountered.
+ """
+ # pylint: disable=C0103
+ # Invalid argument name "n"
+
+ end = len(pwcs) if n is None else n
+ idx = slice(0, end)
+ width = 0
+ for char in pwcs[idx]:
+ wcw = wcwidth(char, unicode_version)
+ if wcw < 0:
+ return -1
+ width += wcw
+ return width
+
+
+@lru_cache(maxsize=128)
+def _wcversion_value(ver_string):
+ """
+ Integer-mapped value of given dotted version string.
+
+ :param str ver_string: Unicode version string, of form ``n.n.n``.
+ :rtype: tuple(int)
+ :returns: tuple of digit tuples, ``tuple(int, [...])``.
+ """
+ retval = tuple(map(int, (ver_string.split('.'))))
+ return retval
+
+
+@lru_cache(maxsize=8)
+def _wcmatch_version(given_version):
+ """
+ Return nearest matching supported Unicode version level.
+
+ If an exact match is not determined, the nearest lowest version level is
+ returned after a warning is emitted. For example, given supported levels
+ ``4.1.0`` and ``5.0.0``, and a version string of ``4.9.9``, then ``4.1.0``
+ is selected and returned:
+
+ >>> _wcmatch_version('4.9.9')
+ '4.1.0'
+ >>> _wcmatch_version('8.0')
+ '8.0.0'
+ >>> _wcmatch_version('1')
+ '4.1.0'
+
+ :param str given_version: given version for compare, may be ``auto``
+ (default), to select Unicode Version from Environment Variable,
+ ``UNICODE_VERSION``. If the environment variable is not set, then the
+ latest is used.
+ :rtype: str
+ :returns: unicode string, or non-unicode ``str`` type for python 2
+ when given ``version`` is also type ``str``.
+ """
+ # Design note: the choice to return the same type that is given certainly
+ # complicates it for python 2 str-type, but allows us to define an api that
+ # to use 'string-type', for unicode version level definitions, so all of our
+ # example code works with all versions of python. That, along with the
+ # string-to-numeric and comparisons of earliest, latest, matching, or
+ # nearest, greatly complicates this function.
+ _return_str = not _PY3 and isinstance(given_version, str)
+
+ if _return_str:
+ unicode_versions = [ucs.encode() for ucs in list_versions()]
+ else:
+ unicode_versions = list_versions()
+ latest_version = unicode_versions[-1]
+
+ if given_version in (u'auto', 'auto'):
+ given_version = os.environ.get(
+ 'UNICODE_VERSION',
+ 'latest' if not _return_str else latest_version.encode())
+
+ if given_version in (u'latest', 'latest'):
+ # default match, when given as 'latest', use the most latest unicode
+ # version specification level supported.
+ return latest_version if not _return_str else latest_version.encode()
+
+ if given_version in unicode_versions:
+ # exact match, downstream has specified an explicit matching version
+ # matching any value of list_versions().
+ return given_version if not _return_str else given_version.encode()
+
+ # The user's version is not supported by ours. We return the newest unicode
+ # version level that we support below their given value.
+ try:
+ cmp_given = _wcversion_value(given_version)
+
+ except ValueError:
+ # submitted value raises ValueError in int(), warn and use latest.
+ warnings.warn("UNICODE_VERSION value, {given_version!r}, is invalid. "
+ "Value should be in form of `integer[.]+', the latest "
+ "supported unicode version {latest_version!r} has been "
+ "inferred.".format(given_version=given_version,
+ latest_version=latest_version))
+ return latest_version if not _return_str else latest_version.encode()
+
+ # given version is less than any available version, return earliest
+ # version.
+ earliest_version = unicode_versions[0]
+ cmp_earliest_version = _wcversion_value(earliest_version)
+
+ if cmp_given <= cmp_earliest_version:
+ # this probably isn't what you wanted, the oldest wcwidth.c you will
+ # find in the wild is likely version 5 or 6, which we both support,
+ # but it's better than not saying anything at all.
+ warnings.warn("UNICODE_VERSION value, {given_version!r}, is lower "
+ "than any available unicode version. Returning lowest "
+ "version level, {earliest_version!r}".format(
+ given_version=given_version,
+ earliest_version=earliest_version))
+ return earliest_version if not _return_str else earliest_version.encode()
+
+ # create list of versions which are less than our equal to given version,
+ # and return the tail value, which is the highest level we may support,
+ # or the latest value we support, when completely unmatched or higher
+ # than any supported version.
+ #
+ # function will never complete, always returns.
+ for idx, unicode_version in enumerate(unicode_versions):
+ # look ahead to next value
+ try:
+ cmp_next_version = _wcversion_value(unicode_versions[idx + 1])
+ except IndexError:
+ # at end of list, return latest version
+ return latest_version if not _return_str else latest_version.encode()
+
+ # Maybe our given version has less parts, as in tuple(8, 0), than the
+ # next compare version tuple(8, 0, 0). Test for an exact match by
+ # comparison of only the leading dotted piece(s): (8, 0) == (8, 0).
+ if cmp_given == cmp_next_version[:len(cmp_given)]:
+ return unicode_versions[idx + 1]
+
+ # Or, if any next value is greater than our given support level
+ # version, return the current value in index. Even though it must
+ # be less than the given value, its our closest possible match. That
+ # is, 4.1 is returned for given 4.9.9, where 4.1 and 5.0 are available.
+ if cmp_next_version > cmp_given:
+ return unicode_version
+ assert False, ("Code path unreachable", given_version, unicode_versions)
diff --git a/third_party/python/wheel/wheel-0.37.0.dist-info/LICENSE.txt b/third_party/python/wheel/wheel-0.37.0.dist-info/LICENSE.txt
new file mode 100644
index 0000000000..c3441e6cc8
--- /dev/null
+++ b/third_party/python/wheel/wheel-0.37.0.dist-info/LICENSE.txt
@@ -0,0 +1,22 @@
+"wheel" copyright (c) 2012-2014 Daniel Holth <dholth@fastmail.fm> and
+contributors.
+
+The MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
diff --git a/third_party/python/wheel/wheel-0.37.0.dist-info/METADATA b/third_party/python/wheel/wheel-0.37.0.dist-info/METADATA
new file mode 100644
index 0000000000..a55c389e59
--- /dev/null
+++ b/third_party/python/wheel/wheel-0.37.0.dist-info/METADATA
@@ -0,0 +1,69 @@
+Metadata-Version: 2.1
+Name: wheel
+Version: 0.37.0
+Summary: A built-package format for Python
+Home-page: https://github.com/pypa/wheel
+Author: Daniel Holth
+Author-email: dholth@fastmail.fm
+Maintainer: Alex Grönholm
+Maintainer-email: alex.gronholm@nextday.fi
+License: MIT
+Project-URL: Documentation, https://wheel.readthedocs.io/
+Project-URL: Changelog, https://wheel.readthedocs.io/en/stable/news.html
+Project-URL: Issue Tracker, https://github.com/pypa/wheel/issues
+Keywords: wheel,packaging
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: Topic :: System :: Archiving :: Packaging
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Requires-Python: !=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7
+Provides-Extra: test
+Requires-Dist: pytest (>=3.0.0) ; extra == 'test'
+Requires-Dist: pytest-cov ; extra == 'test'
+
+wheel
+=====
+
+This library is the reference implementation of the Python wheel packaging
+standard, as defined in `PEP 427`_.
+
+It has two different roles:
+
+#. A setuptools_ extension for building wheels that provides the
+ ``bdist_wheel`` setuptools command
+#. A command line tool for working with wheel files
+
+It should be noted that wheel is **not** intended to be used as a library, and
+as such there is no stable, public API.
+
+.. _PEP 427: https://www.python.org/dev/peps/pep-0427/
+.. _setuptools: https://pypi.org/project/setuptools/
+
+Documentation
+-------------
+
+The documentation_ can be found on Read The Docs.
+
+.. _documentation: https://wheel.readthedocs.io/
+
+Code of Conduct
+---------------
+
+Everyone interacting in the wheel project's codebases, issue trackers, chat
+rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_.
+
+.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md
+
+
+
diff --git a/third_party/python/wheel/wheel-0.37.0.dist-info/RECORD b/third_party/python/wheel/wheel-0.37.0.dist-info/RECORD
new file mode 100644
index 0000000000..f5d03886de
--- /dev/null
+++ b/third_party/python/wheel/wheel-0.37.0.dist-info/RECORD
@@ -0,0 +1,22 @@
+wheel/__init__.py,sha256=F-akdrfLUBswGk0ywqT52zaERkIn5QiErd3eb_MxKQs,23
+wheel/__main__.py,sha256=lF-YLO4hdQmoWuh4eWZd8YL1U95RSdm76sNLBXa0vjE,417
+wheel/bdist_wheel.py,sha256=2vfv3g_b8BvZ5Do9bpLEBdu9dQEcvoMQ1flXpKYFJDU,19075
+wheel/macosx_libfile.py,sha256=Xvp-IrFyRJ9RThIrPxfEpVCDGfljJPWRTZiyopk70hI,15930
+wheel/metadata.py,sha256=b3kPhZn2w2D9wengltX5nGIZQ3ERUOQ5U-K5vHKPdeg,4344
+wheel/pkginfo.py,sha256=GR76kupQzn1x9sKDaXuE6B6FsZ4OkfRtG7pndlXPvQ4,1257
+wheel/util.py,sha256=mnNZkJCi9DHLI_q4lTudoD0mW97h_AoAWl7prNPLXJc,938
+wheel/wheelfile.py,sha256=7KgOK1znro-D8AelgNEE4jg6fDYXY_Bu6crdqLb2EQQ,7336
+wheel/cli/__init__.py,sha256=GWSoGUpRabTf8bk3FsNTPrc5Fsr8YOv2dX55iY2W7eY,2572
+wheel/cli/convert.py,sha256=7F4vj23A2OghDDWn9gX2V-_TeXMza1a5nIejmFGEUJM,9498
+wheel/cli/pack.py,sha256=S-J1iIy1GPDTTDdn-_SwxGa7N729h4iZNI11EDFCqfA,3208
+wheel/cli/unpack.py,sha256=0VWzT7U_xyenTPwEVavxqvdee93GPvAFHnR3Uu91aRc,673
+wheel/vendored/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+wheel/vendored/packaging/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+wheel/vendored/packaging/_typing.py,sha256=x59EhQ57TMT-kTRyLZV25HZvYGGwbucTo6iKh_O0tMw,1812
+wheel/vendored/packaging/tags.py,sha256=noDvA--vVKVKlg49XMuZ5_Epi85jW7gMOKfiGuJ2sqU,29560
+wheel-0.37.0.dist-info/LICENSE.txt,sha256=zKniDGrx_Pv2lAjzd3aShsvuvN7TNhAMm0o_NfvmNeQ,1125
+wheel-0.37.0.dist-info/METADATA,sha256=ROy__pavzfmN68i5vVudVA46XpXAvog9qY5uV-zsnK4,2328
+wheel-0.37.0.dist-info/WHEEL,sha256=WzZ8cwjh8l0jtULNjYq1Hpr-WCqCRgPr--TX4P5I1Wo,110
+wheel-0.37.0.dist-info/entry_points.txt,sha256=N8HbYFST3yrNQYeB2wXWBEPUhFsEtKNRPaCFGJPyqyc,108
+wheel-0.37.0.dist-info/top_level.txt,sha256=HxSBIbgEstMPe4eFawhA66Mq-QYHMopXVoAncfjb_1c,6
+wheel-0.37.0.dist-info/RECORD,,
diff --git a/third_party/python/wheel/wheel-0.37.0.dist-info/WHEEL b/third_party/python/wheel/wheel-0.37.0.dist-info/WHEEL
new file mode 100644
index 0000000000..b733a60d37
--- /dev/null
+++ b/third_party/python/wheel/wheel-0.37.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/wheel/wheel-0.37.0.dist-info/entry_points.txt b/third_party/python/wheel/wheel-0.37.0.dist-info/entry_points.txt
new file mode 100644
index 0000000000..b27acaddfd
--- /dev/null
+++ b/third_party/python/wheel/wheel-0.37.0.dist-info/entry_points.txt
@@ -0,0 +1,6 @@
+[console_scripts]
+wheel = wheel.cli:main
+
+[distutils.commands]
+bdist_wheel = wheel.bdist_wheel:bdist_wheel
+
diff --git a/third_party/python/wheel/wheel-0.37.0.dist-info/top_level.txt b/third_party/python/wheel/wheel-0.37.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..2309722a93
--- /dev/null
+++ b/third_party/python/wheel/wheel-0.37.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+wheel
diff --git a/third_party/python/wheel/wheel/__init__.py b/third_party/python/wheel/wheel/__init__.py
new file mode 100644
index 0000000000..8935b5b5d0
--- /dev/null
+++ b/third_party/python/wheel/wheel/__init__.py
@@ -0,0 +1 @@
+__version__ = '0.37.0'
diff --git a/third_party/python/wheel/wheel/__main__.py b/third_party/python/wheel/wheel/__main__.py
new file mode 100644
index 0000000000..b3773a20e0
--- /dev/null
+++ b/third_party/python/wheel/wheel/__main__.py
@@ -0,0 +1,19 @@
+"""
+Wheel command line tool (enable python -m wheel syntax)
+"""
+
+import sys
+
+
+def main(): # needed for console script
+ if __package__ == '':
+ # To be able to run 'python wheel-0.9.whl/wheel':
+ import os.path
+ path = os.path.dirname(os.path.dirname(__file__))
+ sys.path[0:0] = [path]
+ import wheel.cli
+ sys.exit(wheel.cli.main())
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/third_party/python/wheel/wheel/bdist_wheel.py b/third_party/python/wheel/wheel/bdist_wheel.py
new file mode 100644
index 0000000000..80e43d0a5f
--- /dev/null
+++ b/third_party/python/wheel/wheel/bdist_wheel.py
@@ -0,0 +1,492 @@
+"""
+Create a wheel (.whl) distribution.
+
+A wheel is a built archive format.
+"""
+
+import distutils
+import os
+import shutil
+import stat
+import sys
+import re
+import warnings
+from collections import OrderedDict
+from distutils.core import Command
+from distutils import log as logger
+from io import BytesIO
+from glob import iglob
+from shutil import rmtree
+from sysconfig import get_config_var
+from zipfile import ZIP_DEFLATED, ZIP_STORED
+
+import pkg_resources
+
+from .pkginfo import write_pkg_info
+from .macosx_libfile import calculate_macosx_platform_tag
+from .metadata import pkginfo_to_metadata
+from .vendored.packaging import tags
+from .wheelfile import WheelFile
+from . import __version__ as wheel_version
+
+if sys.version_info < (3,):
+ from email.generator import Generator as BytesGenerator
+else:
+ from email.generator import BytesGenerator
+
+safe_name = pkg_resources.safe_name
+safe_version = pkg_resources.safe_version
+
+PY_LIMITED_API_PATTERN = r'cp3\d'
+
+
+def python_tag():
+ return 'py{}'.format(sys.version_info[0])
+
+
+def get_platform(archive_root):
+ """Return our platform name 'win32', 'linux_x86_64'"""
+ # XXX remove distutils dependency
+ result = distutils.util.get_platform()
+ if result.startswith("macosx") and archive_root is not None:
+ result = calculate_macosx_platform_tag(archive_root, result)
+ if result == "linux_x86_64" and sys.maxsize == 2147483647:
+ # pip pull request #3497
+ result = "linux_i686"
+ return result
+
+
+def get_flag(var, fallback, expected=True, warn=True):
+ """Use a fallback value for determining SOABI flags if the needed config
+ var is unset or unavailable."""
+ val = get_config_var(var)
+ if val is None:
+ if warn:
+ warnings.warn("Config variable '{0}' is unset, Python ABI tag may "
+ "be incorrect".format(var), RuntimeWarning, 2)
+ return fallback
+ return val == expected
+
+
+def get_abi_tag():
+ """Return the ABI tag based on SOABI (if available) or emulate SOABI
+ (CPython 2, PyPy)."""
+ soabi = get_config_var('SOABI')
+ impl = tags.interpreter_name()
+ if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'):
+ d = ''
+ m = ''
+ u = ''
+ if get_flag('Py_DEBUG',
+ hasattr(sys, 'gettotalrefcount'),
+ warn=(impl == 'cp')):
+ d = 'd'
+ if get_flag('WITH_PYMALLOC',
+ impl == 'cp',
+ warn=(impl == 'cp' and
+ sys.version_info < (3, 8))) \
+ and sys.version_info < (3, 8):
+ m = 'm'
+ if get_flag('Py_UNICODE_SIZE',
+ sys.maxunicode == 0x10ffff,
+ expected=4,
+ warn=(impl == 'cp' and
+ sys.version_info < (3, 3))) \
+ and sys.version_info < (3, 3):
+ u = 'u'
+ abi = '%s%s%s%s%s' % (impl, tags.interpreter_version(), d, m, u)
+ elif soabi and soabi.startswith('cpython-'):
+ abi = 'cp' + soabi.split('-')[1]
+ elif soabi and soabi.startswith('pypy-'):
+ # we want something like pypy36-pp73
+ abi = '-'.join(soabi.split('-')[:2])
+ abi = abi.replace('.', '_').replace('-', '_')
+ elif soabi:
+ abi = soabi.replace('.', '_').replace('-', '_')
+ else:
+ abi = None
+ return abi
+
+
+def safer_name(name):
+ return safe_name(name).replace('-', '_')
+
+
+def safer_version(version):
+ return safe_version(version).replace('-', '_')
+
+
+def remove_readonly(func, path, excinfo):
+ print(str(excinfo[1]))
+ os.chmod(path, stat.S_IWRITE)
+ func(path)
+
+
+class bdist_wheel(Command):
+
+ description = 'create a wheel distribution'
+
+ supported_compressions = OrderedDict([
+ ('stored', ZIP_STORED),
+ ('deflated', ZIP_DEFLATED)
+ ])
+
+ user_options = [('bdist-dir=', 'b',
+ "temporary directory for creating the distribution"),
+ ('plat-name=', 'p',
+ "platform name to embed in generated filenames "
+ "(default: %s)" % get_platform(None)),
+ ('keep-temp', 'k',
+ "keep the pseudo-installation tree around after " +
+ "creating the distribution archive"),
+ ('dist-dir=', 'd',
+ "directory to put final built distributions in"),
+ ('skip-build', None,
+ "skip rebuilding everything (for testing/debugging)"),
+ ('relative', None,
+ "build the archive using relative paths "
+ "(default: false)"),
+ ('owner=', 'u',
+ "Owner name used when creating a tar file"
+ " [default: current user]"),
+ ('group=', 'g',
+ "Group name used when creating a tar file"
+ " [default: current group]"),
+ ('universal', None,
+ "make a universal wheel"
+ " (default: false)"),
+ ('compression=', None,
+ "zipfile compression (one of: {})"
+ " (default: 'deflated')"
+ .format(', '.join(supported_compressions))),
+ ('python-tag=', None,
+ "Python implementation compatibility tag"
+ " (default: '%s')" % (python_tag())),
+ ('build-number=', None,
+ "Build number for this particular version. "
+ "As specified in PEP-0427, this must start with a digit. "
+ "[default: None]"),
+ ('py-limited-api=', None,
+ "Python tag (cp32|cp33|cpNN) for abi3 wheel tag"
+ " (default: false)"),
+ ]
+
+ boolean_options = ['keep-temp', 'skip-build', 'relative', 'universal']
+
+ def initialize_options(self):
+ self.bdist_dir = None
+ self.data_dir = None
+ self.plat_name = None
+ self.plat_tag = None
+ self.format = 'zip'
+ self.keep_temp = False
+ self.dist_dir = None
+ self.egginfo_dir = None
+ self.root_is_pure = None
+ self.skip_build = None
+ self.relative = False
+ self.owner = None
+ self.group = None
+ self.universal = False
+ self.compression = 'deflated'
+ self.python_tag = python_tag()
+ self.build_number = None
+ self.py_limited_api = False
+ self.plat_name_supplied = False
+
+ def finalize_options(self):
+ if self.bdist_dir is None:
+ bdist_base = self.get_finalized_command('bdist').bdist_base
+ self.bdist_dir = os.path.join(bdist_base, 'wheel')
+
+ self.data_dir = self.wheel_dist_name + '.data'
+ self.plat_name_supplied = self.plat_name is not None
+
+ try:
+ self.compression = self.supported_compressions[self.compression]
+ except KeyError:
+ raise ValueError('Unsupported compression: {}'.format(self.compression))
+
+ need_options = ('dist_dir', 'plat_name', 'skip_build')
+
+ self.set_undefined_options('bdist',
+ *zip(need_options, need_options))
+
+ self.root_is_pure = not (self.distribution.has_ext_modules()
+ or self.distribution.has_c_libraries())
+
+ if self.py_limited_api and not re.match(PY_LIMITED_API_PATTERN, self.py_limited_api):
+ raise ValueError("py-limited-api must match '%s'" % PY_LIMITED_API_PATTERN)
+
+ # Support legacy [wheel] section for setting universal
+ wheel = self.distribution.get_option_dict('wheel')
+ if 'universal' in wheel:
+ # please don't define this in your global configs
+ logger.warn('The [wheel] section is deprecated. Use [bdist_wheel] instead.')
+ val = wheel['universal'][1].strip()
+ if val.lower() in ('1', 'true', 'yes'):
+ self.universal = True
+
+ if self.build_number is not None and not self.build_number[:1].isdigit():
+ raise ValueError("Build tag (build-number) must start with a digit.")
+
+ @property
+ def wheel_dist_name(self):
+ """Return distribution full name with - replaced with _"""
+ components = (safer_name(self.distribution.get_name()),
+ safer_version(self.distribution.get_version()))
+ if self.build_number:
+ components += (self.build_number,)
+ return '-'.join(components)
+
+ def get_tag(self):
+ # bdist sets self.plat_name if unset, we should only use it for purepy
+ # wheels if the user supplied it.
+ if self.plat_name_supplied:
+ plat_name = self.plat_name
+ elif self.root_is_pure:
+ plat_name = 'any'
+ else:
+ # macosx contains system version in platform name so need special handle
+ if self.plat_name and not self.plat_name.startswith("macosx"):
+ plat_name = self.plat_name
+ else:
+ # on macosx always limit the platform name to comply with any
+ # c-extension modules in bdist_dir, since the user can specify
+ # a higher MACOSX_DEPLOYMENT_TARGET via tools like CMake
+
+ # on other platforms, and on macosx if there are no c-extension
+ # modules, use the default platform name.
+ plat_name = get_platform(self.bdist_dir)
+
+ if plat_name in ('linux-x86_64', 'linux_x86_64') and sys.maxsize == 2147483647:
+ plat_name = 'linux_i686'
+
+ plat_name = plat_name.lower().replace('-', '_').replace('.', '_')
+
+ if self.root_is_pure:
+ if self.universal:
+ impl = 'py2.py3'
+ else:
+ impl = self.python_tag
+ tag = (impl, 'none', plat_name)
+ else:
+ impl_name = tags.interpreter_name()
+ impl_ver = tags.interpreter_version()
+ impl = impl_name + impl_ver
+ # We don't work on CPython 3.1, 3.0.
+ if self.py_limited_api and (impl_name + impl_ver).startswith('cp3'):
+ impl = self.py_limited_api
+ abi_tag = 'abi3'
+ else:
+ abi_tag = str(get_abi_tag()).lower()
+ tag = (impl, abi_tag, plat_name)
+ # issue gh-374: allow overriding plat_name
+ supported_tags = [(t.interpreter, t.abi, plat_name)
+ for t in tags.sys_tags()]
+ assert tag in supported_tags, "would build wheel with unsupported tag {}".format(tag)
+ return tag
+
+ def run(self):
+ build_scripts = self.reinitialize_command('build_scripts')
+ build_scripts.executable = 'python'
+ build_scripts.force = True
+
+ build_ext = self.reinitialize_command('build_ext')
+ build_ext.inplace = False
+
+ if not self.skip_build:
+ self.run_command('build')
+
+ install = self.reinitialize_command('install',
+ reinit_subcommands=True)
+ install.root = self.bdist_dir
+ install.compile = False
+ install.skip_build = self.skip_build
+ install.warn_dir = False
+
+ # A wheel without setuptools scripts is more cross-platform.
+ # Use the (undocumented) `no_ep` option to setuptools'
+ # install_scripts command to avoid creating entry point scripts.
+ install_scripts = self.reinitialize_command('install_scripts')
+ install_scripts.no_ep = True
+
+ # Use a custom scheme for the archive, because we have to decide
+ # at installation time which scheme to use.
+ for key in ('headers', 'scripts', 'data', 'purelib', 'platlib'):
+ setattr(install,
+ 'install_' + key,
+ os.path.join(self.data_dir, key))
+
+ basedir_observed = ''
+
+ if os.name == 'nt':
+ # win32 barfs if any of these are ''; could be '.'?
+ # (distutils.command.install:change_roots bug)
+ basedir_observed = os.path.normpath(os.path.join(self.data_dir, '..'))
+ self.install_libbase = self.install_lib = basedir_observed
+
+ setattr(install,
+ 'install_purelib' if self.root_is_pure else 'install_platlib',
+ basedir_observed)
+
+ logger.info("installing to %s", self.bdist_dir)
+
+ self.run_command('install')
+
+ impl_tag, abi_tag, plat_tag = self.get_tag()
+ archive_basename = "{}-{}-{}-{}".format(self.wheel_dist_name, impl_tag, abi_tag, plat_tag)
+ if not self.relative:
+ archive_root = self.bdist_dir
+ else:
+ archive_root = os.path.join(
+ self.bdist_dir,
+ self._ensure_relative(install.install_base))
+
+ self.set_undefined_options('install_egg_info', ('target', 'egginfo_dir'))
+ distinfo_dirname = '{}-{}.dist-info'.format(
+ safer_name(self.distribution.get_name()),
+ safer_version(self.distribution.get_version()))
+ distinfo_dir = os.path.join(self.bdist_dir, distinfo_dirname)
+ self.egg2dist(self.egginfo_dir, distinfo_dir)
+
+ self.write_wheelfile(distinfo_dir)
+
+ # Make the archive
+ if not os.path.exists(self.dist_dir):
+ os.makedirs(self.dist_dir)
+
+ wheel_path = os.path.join(self.dist_dir, archive_basename + '.whl')
+ with WheelFile(wheel_path, 'w', self.compression) as wf:
+ wf.write_files(archive_root)
+
+ # Add to 'Distribution.dist_files' so that the "upload" command works
+ getattr(self.distribution, 'dist_files', []).append(
+ ('bdist_wheel',
+ '{}.{}'.format(*sys.version_info[:2]), # like 3.7
+ wheel_path))
+
+ if not self.keep_temp:
+ logger.info('removing %s', self.bdist_dir)
+ if not self.dry_run:
+ rmtree(self.bdist_dir, onerror=remove_readonly)
+
+ def write_wheelfile(self, wheelfile_base, generator='bdist_wheel (' + wheel_version + ')'):
+ from email.message import Message
+
+ # Workaround for Python 2.7 for when "generator" is unicode
+ if sys.version_info < (3,) and not isinstance(generator, str):
+ generator = generator.encode('utf-8')
+
+ msg = Message()
+ msg['Wheel-Version'] = '1.0' # of the spec
+ msg['Generator'] = generator
+ msg['Root-Is-Purelib'] = str(self.root_is_pure).lower()
+ if self.build_number is not None:
+ msg['Build'] = self.build_number
+
+ # Doesn't work for bdist_wininst
+ impl_tag, abi_tag, plat_tag = self.get_tag()
+ for impl in impl_tag.split('.'):
+ for abi in abi_tag.split('.'):
+ for plat in plat_tag.split('.'):
+ msg['Tag'] = '-'.join((impl, abi, plat))
+
+ wheelfile_path = os.path.join(wheelfile_base, 'WHEEL')
+ logger.info('creating %s', wheelfile_path)
+ buffer = BytesIO()
+ BytesGenerator(buffer, maxheaderlen=0).flatten(msg)
+ with open(wheelfile_path, 'wb') as f:
+ f.write(buffer.getvalue().replace(b'\r\n', b'\r'))
+
+ def _ensure_relative(self, path):
+ # copied from dir_util, deleted
+ drive, path = os.path.splitdrive(path)
+ if path[0:1] == os.sep:
+ path = drive + path[1:]
+ return path
+
+ @property
+ def license_paths(self):
+ metadata = self.distribution.get_option_dict('metadata')
+ files = set()
+ patterns = sorted({
+ option for option in metadata.get('license_files', ('', ''))[1].split()
+ })
+
+ if 'license_file' in metadata:
+ warnings.warn('The "license_file" option is deprecated. Use '
+ '"license_files" instead.', DeprecationWarning)
+ files.add(metadata['license_file'][1])
+
+ if 'license_file' not in metadata and 'license_files' not in metadata:
+ patterns = ('LICEN[CS]E*', 'COPYING*', 'NOTICE*', 'AUTHORS*')
+
+ for pattern in patterns:
+ for path in iglob(pattern):
+ if path.endswith('~'):
+ logger.debug('ignoring license file "%s" as it looks like a backup', path)
+ continue
+
+ if path not in files and os.path.isfile(path):
+ logger.info('adding license file "%s" (matched pattern "%s")', path, pattern)
+ files.add(path)
+
+ return files
+
+ def egg2dist(self, egginfo_path, distinfo_path):
+ """Convert an .egg-info directory into a .dist-info directory"""
+ def adios(p):
+ """Appropriately delete directory, file or link."""
+ if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p):
+ shutil.rmtree(p)
+ elif os.path.exists(p):
+ os.unlink(p)
+
+ adios(distinfo_path)
+
+ if not os.path.exists(egginfo_path):
+ # There is no egg-info. This is probably because the egg-info
+ # file/directory is not named matching the distribution name used
+ # to name the archive file. Check for this case and report
+ # accordingly.
+ import glob
+ pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info')
+ possible = glob.glob(pat)
+ err = "Egg metadata expected at %s but not found" % (egginfo_path,)
+ if possible:
+ alt = os.path.basename(possible[0])
+ err += " (%s found - possible misnamed archive file?)" % (alt,)
+
+ raise ValueError(err)
+
+ if os.path.isfile(egginfo_path):
+ # .egg-info is a single file
+ pkginfo_path = egginfo_path
+ pkg_info = pkginfo_to_metadata(egginfo_path, egginfo_path)
+ os.mkdir(distinfo_path)
+ else:
+ # .egg-info is a directory
+ pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO')
+ pkg_info = pkginfo_to_metadata(egginfo_path, pkginfo_path)
+
+ # ignore common egg metadata that is useless to wheel
+ shutil.copytree(egginfo_path, distinfo_path,
+ ignore=lambda x, y: {'PKG-INFO', 'requires.txt', 'SOURCES.txt',
+ 'not-zip-safe'}
+ )
+
+ # delete dependency_links if it is only whitespace
+ dependency_links_path = os.path.join(distinfo_path, 'dependency_links.txt')
+ with open(dependency_links_path, 'r') as dependency_links_file:
+ dependency_links = dependency_links_file.read().strip()
+ if not dependency_links:
+ adios(dependency_links_path)
+
+ write_pkg_info(os.path.join(distinfo_path, 'METADATA'), pkg_info)
+
+ for license_path in self.license_paths:
+ filename = os.path.basename(license_path)
+ shutil.copy(license_path, os.path.join(distinfo_path, filename))
+
+ adios(egginfo_path)
diff --git a/third_party/python/wheel/wheel/cli/__init__.py b/third_party/python/wheel/wheel/cli/__init__.py
new file mode 100644
index 0000000000..95740bfb65
--- /dev/null
+++ b/third_party/python/wheel/wheel/cli/__init__.py
@@ -0,0 +1,88 @@
+"""
+Wheel command-line utility.
+"""
+
+from __future__ import print_function
+
+import argparse
+import os
+import sys
+
+
+def require_pkgresources(name):
+ try:
+ import pkg_resources # noqa: F401
+ except ImportError:
+ raise RuntimeError("'{0}' needs pkg_resources (part of setuptools).".format(name))
+
+
+class WheelError(Exception):
+ pass
+
+
+def unpack_f(args):
+ from .unpack import unpack
+ unpack(args.wheelfile, args.dest)
+
+
+def pack_f(args):
+ from .pack import pack
+ pack(args.directory, args.dest_dir, args.build_number)
+
+
+def convert_f(args):
+ from .convert import convert
+ convert(args.files, args.dest_dir, args.verbose)
+
+
+def version_f(args):
+ from .. import __version__
+ print("wheel %s" % __version__)
+
+
+def parser():
+ p = argparse.ArgumentParser()
+ s = p.add_subparsers(help="commands")
+
+ unpack_parser = s.add_parser('unpack', help='Unpack wheel')
+ unpack_parser.add_argument('--dest', '-d', help='Destination directory',
+ default='.')
+ unpack_parser.add_argument('wheelfile', help='Wheel file')
+ unpack_parser.set_defaults(func=unpack_f)
+
+ repack_parser = s.add_parser('pack', help='Repack wheel')
+ repack_parser.add_argument('directory', help='Root directory of the unpacked wheel')
+ repack_parser.add_argument('--dest-dir', '-d', default=os.path.curdir,
+ help="Directory to store the wheel (default %(default)s)")
+ repack_parser.add_argument('--build-number', help="Build tag to use in the wheel name")
+ repack_parser.set_defaults(func=pack_f)
+
+ convert_parser = s.add_parser('convert', help='Convert egg or wininst to wheel')
+ convert_parser.add_argument('files', nargs='*', help='Files to convert')
+ convert_parser.add_argument('--dest-dir', '-d', default=os.path.curdir,
+ help="Directory to store wheels (default %(default)s)")
+ convert_parser.add_argument('--verbose', '-v', action='store_true')
+ convert_parser.set_defaults(func=convert_f)
+
+ version_parser = s.add_parser('version', help='Print version and exit')
+ version_parser.set_defaults(func=version_f)
+
+ help_parser = s.add_parser('help', help='Show this help')
+ help_parser.set_defaults(func=lambda args: p.print_help())
+
+ return p
+
+
+def main():
+ p = parser()
+ args = p.parse_args()
+ if not hasattr(args, 'func'):
+ p.print_help()
+ else:
+ try:
+ args.func(args)
+ return 0
+ except WheelError as e:
+ print(e, file=sys.stderr)
+
+ return 1
diff --git a/third_party/python/wheel/wheel/cli/convert.py b/third_party/python/wheel/wheel/cli/convert.py
new file mode 100644
index 0000000000..154f1b1e2a
--- /dev/null
+++ b/third_party/python/wheel/wheel/cli/convert.py
@@ -0,0 +1,269 @@
+import os.path
+import re
+import shutil
+import sys
+import tempfile
+import zipfile
+from distutils import dist
+from glob import iglob
+
+from ..bdist_wheel import bdist_wheel
+from ..wheelfile import WheelFile
+from . import WheelError, require_pkgresources
+
+egg_info_re = re.compile(r'''
+ (?P<name>.+?)-(?P<ver>.+?)
+ (-(?P<pyver>py\d\.\d+)
+ (-(?P<arch>.+?))?
+ )?.egg$''', re.VERBOSE)
+
+
+class _bdist_wheel_tag(bdist_wheel):
+ # allow the client to override the default generated wheel tag
+ # The default bdist_wheel implementation uses python and abi tags
+ # of the running python process. This is not suitable for
+ # generating/repackaging prebuild binaries.
+
+ full_tag_supplied = False
+ full_tag = None # None or a (pytag, soabitag, plattag) triple
+
+ def get_tag(self):
+ if self.full_tag_supplied and self.full_tag is not None:
+ return self.full_tag
+ else:
+ return bdist_wheel.get_tag(self)
+
+
+def egg2wheel(egg_path, dest_dir):
+ filename = os.path.basename(egg_path)
+ match = egg_info_re.match(filename)
+ if not match:
+ raise WheelError('Invalid egg file name: {}'.format(filename))
+
+ egg_info = match.groupdict()
+ dir = tempfile.mkdtemp(suffix="_e2w")
+ if os.path.isfile(egg_path):
+ # assume we have a bdist_egg otherwise
+ with zipfile.ZipFile(egg_path) as egg:
+ egg.extractall(dir)
+ else:
+ # support buildout-style installed eggs directories
+ for pth in os.listdir(egg_path):
+ src = os.path.join(egg_path, pth)
+ if os.path.isfile(src):
+ shutil.copy2(src, dir)
+ else:
+ shutil.copytree(src, os.path.join(dir, pth))
+
+ pyver = egg_info['pyver']
+ if pyver:
+ pyver = egg_info['pyver'] = pyver.replace('.', '')
+
+ arch = (egg_info['arch'] or 'any').replace('.', '_').replace('-', '_')
+
+ # assume all binary eggs are for CPython
+ abi = 'cp' + pyver[2:] if arch != 'any' else 'none'
+
+ root_is_purelib = egg_info['arch'] is None
+ if root_is_purelib:
+ bw = bdist_wheel(dist.Distribution())
+ else:
+ bw = _bdist_wheel_tag(dist.Distribution())
+
+ bw.root_is_pure = root_is_purelib
+ bw.python_tag = pyver
+ bw.plat_name_supplied = True
+ bw.plat_name = egg_info['arch'] or 'any'
+ if not root_is_purelib:
+ bw.full_tag_supplied = True
+ bw.full_tag = (pyver, abi, arch)
+
+ dist_info_dir = os.path.join(dir, '{name}-{ver}.dist-info'.format(**egg_info))
+ bw.egg2dist(os.path.join(dir, 'EGG-INFO'), dist_info_dir)
+ bw.write_wheelfile(dist_info_dir, generator='egg2wheel')
+ wheel_name = '{name}-{ver}-{pyver}-{}-{}.whl'.format(abi, arch, **egg_info)
+ with WheelFile(os.path.join(dest_dir, wheel_name), 'w') as wf:
+ wf.write_files(dir)
+
+ shutil.rmtree(dir)
+
+
+def parse_wininst_info(wininfo_name, egginfo_name):
+ """Extract metadata from filenames.
+
+ Extracts the 4 metadataitems needed (name, version, pyversion, arch) from
+ the installer filename and the name of the egg-info directory embedded in
+ the zipfile (if any).
+
+ The egginfo filename has the format::
+
+ name-ver(-pyver)(-arch).egg-info
+
+ The installer filename has the format::
+
+ name-ver.arch(-pyver).exe
+
+ Some things to note:
+
+ 1. The installer filename is not definitive. An installer can be renamed
+ and work perfectly well as an installer. So more reliable data should
+ be used whenever possible.
+ 2. The egg-info data should be preferred for the name and version, because
+ these come straight from the distutils metadata, and are mandatory.
+ 3. The pyver from the egg-info data should be ignored, as it is
+ constructed from the version of Python used to build the installer,
+ which is irrelevant - the installer filename is correct here (even to
+ the point that when it's not there, any version is implied).
+ 4. The architecture must be taken from the installer filename, as it is
+ not included in the egg-info data.
+ 5. Architecture-neutral installers still have an architecture because the
+ installer format itself (being executable) is architecture-specific. We
+ should therefore ignore the architecture if the content is pure-python.
+ """
+
+ egginfo = None
+ if egginfo_name:
+ egginfo = egg_info_re.search(egginfo_name)
+ if not egginfo:
+ raise ValueError("Egg info filename %s is not valid" % (egginfo_name,))
+
+ # Parse the wininst filename
+ # 1. Distribution name (up to the first '-')
+ w_name, sep, rest = wininfo_name.partition('-')
+ if not sep:
+ raise ValueError("Installer filename %s is not valid" % (wininfo_name,))
+
+ # Strip '.exe'
+ rest = rest[:-4]
+ # 2. Python version (from the last '-', must start with 'py')
+ rest2, sep, w_pyver = rest.rpartition('-')
+ if sep and w_pyver.startswith('py'):
+ rest = rest2
+ w_pyver = w_pyver.replace('.', '')
+ else:
+ # Not version specific - use py2.py3. While it is possible that
+ # pure-Python code is not compatible with both Python 2 and 3, there
+ # is no way of knowing from the wininst format, so we assume the best
+ # here (the user can always manually rename the wheel to be more
+ # restrictive if needed).
+ w_pyver = 'py2.py3'
+ # 3. Version and architecture
+ w_ver, sep, w_arch = rest.rpartition('.')
+ if not sep:
+ raise ValueError("Installer filename %s is not valid" % (wininfo_name,))
+
+ if egginfo:
+ w_name = egginfo.group('name')
+ w_ver = egginfo.group('ver')
+
+ return {'name': w_name, 'ver': w_ver, 'arch': w_arch, 'pyver': w_pyver}
+
+
+def wininst2wheel(path, dest_dir):
+ with zipfile.ZipFile(path) as bdw:
+ # Search for egg-info in the archive
+ egginfo_name = None
+ for filename in bdw.namelist():
+ if '.egg-info' in filename:
+ egginfo_name = filename
+ break
+
+ info = parse_wininst_info(os.path.basename(path), egginfo_name)
+
+ root_is_purelib = True
+ for zipinfo in bdw.infolist():
+ if zipinfo.filename.startswith('PLATLIB'):
+ root_is_purelib = False
+ break
+ if root_is_purelib:
+ paths = {'purelib': ''}
+ else:
+ paths = {'platlib': ''}
+
+ dist_info = "%(name)s-%(ver)s" % info
+ datadir = "%s.data/" % dist_info
+
+ # rewrite paths to trick ZipFile into extracting an egg
+ # XXX grab wininst .ini - between .exe, padding, and first zip file.
+ members = []
+ egginfo_name = ''
+ for zipinfo in bdw.infolist():
+ key, basename = zipinfo.filename.split('/', 1)
+ key = key.lower()
+ basepath = paths.get(key, None)
+ if basepath is None:
+ basepath = datadir + key.lower() + '/'
+ oldname = zipinfo.filename
+ newname = basepath + basename
+ zipinfo.filename = newname
+ del bdw.NameToInfo[oldname]
+ bdw.NameToInfo[newname] = zipinfo
+ # Collect member names, but omit '' (from an entry like "PLATLIB/"
+ if newname:
+ members.append(newname)
+ # Remember egg-info name for the egg2dist call below
+ if not egginfo_name:
+ if newname.endswith('.egg-info'):
+ egginfo_name = newname
+ elif '.egg-info/' in newname:
+ egginfo_name, sep, _ = newname.rpartition('/')
+ dir = tempfile.mkdtemp(suffix="_b2w")
+ bdw.extractall(dir, members)
+
+ # egg2wheel
+ abi = 'none'
+ pyver = info['pyver']
+ arch = (info['arch'] or 'any').replace('.', '_').replace('-', '_')
+ # Wininst installers always have arch even if they are not
+ # architecture-specific (because the format itself is).
+ # So, assume the content is architecture-neutral if root is purelib.
+ if root_is_purelib:
+ arch = 'any'
+ # If the installer is architecture-specific, it's almost certainly also
+ # CPython-specific.
+ if arch != 'any':
+ pyver = pyver.replace('py', 'cp')
+ wheel_name = '-'.join((dist_info, pyver, abi, arch))
+ if root_is_purelib:
+ bw = bdist_wheel(dist.Distribution())
+ else:
+ bw = _bdist_wheel_tag(dist.Distribution())
+
+ bw.root_is_pure = root_is_purelib
+ bw.python_tag = pyver
+ bw.plat_name_supplied = True
+ bw.plat_name = info['arch'] or 'any'
+
+ if not root_is_purelib:
+ bw.full_tag_supplied = True
+ bw.full_tag = (pyver, abi, arch)
+
+ dist_info_dir = os.path.join(dir, '%s.dist-info' % dist_info)
+ bw.egg2dist(os.path.join(dir, egginfo_name), dist_info_dir)
+ bw.write_wheelfile(dist_info_dir, generator='wininst2wheel')
+
+ wheel_path = os.path.join(dest_dir, wheel_name)
+ with WheelFile(wheel_path, 'w') as wf:
+ wf.write_files(dir)
+
+ shutil.rmtree(dir)
+
+
+def convert(files, dest_dir, verbose):
+ # Only support wheel convert if pkg_resources is present
+ require_pkgresources('wheel convert')
+
+ for pat in files:
+ for installer in iglob(pat):
+ if os.path.splitext(installer)[1] == '.egg':
+ conv = egg2wheel
+ else:
+ conv = wininst2wheel
+
+ if verbose:
+ print("{}... ".format(installer))
+ sys.stdout.flush()
+
+ conv(installer, dest_dir)
+ if verbose:
+ print("OK")
diff --git a/third_party/python/wheel/wheel/cli/pack.py b/third_party/python/wheel/wheel/cli/pack.py
new file mode 100644
index 0000000000..1e77fdbd2c
--- /dev/null
+++ b/third_party/python/wheel/wheel/cli/pack.py
@@ -0,0 +1,79 @@
+from __future__ import print_function
+
+import os.path
+import re
+import sys
+
+from wheel.cli import WheelError
+from wheel.wheelfile import WheelFile
+
+DIST_INFO_RE = re.compile(r"^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))\.dist-info$")
+BUILD_NUM_RE = re.compile(br'Build: (\d\w*)$')
+
+
+def pack(directory, dest_dir, build_number):
+ """Repack a previously unpacked wheel directory into a new wheel file.
+
+ The .dist-info/WHEEL file must contain one or more tags so that the target
+ wheel file name can be determined.
+
+ :param directory: The unpacked wheel directory
+ :param dest_dir: Destination directory (defaults to the current directory)
+ """
+ # Find the .dist-info directory
+ dist_info_dirs = [fn for fn in os.listdir(directory)
+ if os.path.isdir(os.path.join(directory, fn)) and DIST_INFO_RE.match(fn)]
+ if len(dist_info_dirs) > 1:
+ raise WheelError('Multiple .dist-info directories found in {}'.format(directory))
+ elif not dist_info_dirs:
+ raise WheelError('No .dist-info directories found in {}'.format(directory))
+
+ # Determine the target wheel filename
+ dist_info_dir = dist_info_dirs[0]
+ name_version = DIST_INFO_RE.match(dist_info_dir).group('namever')
+
+ # Read the tags and the existing build number from .dist-info/WHEEL
+ existing_build_number = None
+ wheel_file_path = os.path.join(directory, dist_info_dir, 'WHEEL')
+ with open(wheel_file_path) as f:
+ tags = []
+ for line in f:
+ if line.startswith('Tag: '):
+ tags.append(line.split(' ')[1].rstrip())
+ elif line.startswith('Build: '):
+ existing_build_number = line.split(' ')[1].rstrip()
+
+ if not tags:
+ raise WheelError('No tags present in {}/WHEEL; cannot determine target wheel filename'
+ .format(dist_info_dir))
+
+ # Set the wheel file name and add/replace/remove the Build tag in .dist-info/WHEEL
+ build_number = build_number if build_number is not None else existing_build_number
+ if build_number is not None:
+ if build_number:
+ name_version += '-' + build_number
+
+ if build_number != existing_build_number:
+ replacement = ('Build: %s\r\n' % build_number).encode('ascii') if build_number else b''
+ with open(wheel_file_path, 'rb+') as f:
+ wheel_file_content = f.read()
+ if not BUILD_NUM_RE.subn(replacement, wheel_file_content)[1]:
+ wheel_file_content += replacement
+
+ f.truncate()
+ f.write(wheel_file_content)
+
+ # Reassemble the tags for the wheel file
+ impls = sorted({tag.split('-')[0] for tag in tags})
+ abivers = sorted({tag.split('-')[1] for tag in tags})
+ platforms = sorted({tag.split('-')[2] for tag in tags})
+ tagline = '-'.join(['.'.join(impls), '.'.join(abivers), '.'.join(platforms)])
+
+ # Repack the wheel
+ wheel_path = os.path.join(dest_dir, '{}-{}.whl'.format(name_version, tagline))
+ with WheelFile(wheel_path, 'w') as wf:
+ print("Repacking wheel as {}...".format(wheel_path), end='')
+ sys.stdout.flush()
+ wf.write_files(directory)
+
+ print('OK')
diff --git a/third_party/python/wheel/wheel/cli/unpack.py b/third_party/python/wheel/wheel/cli/unpack.py
new file mode 100644
index 0000000000..2e9857a350
--- /dev/null
+++ b/third_party/python/wheel/wheel/cli/unpack.py
@@ -0,0 +1,25 @@
+from __future__ import print_function
+
+import os.path
+import sys
+
+from ..wheelfile import WheelFile
+
+
+def unpack(path, dest='.'):
+ """Unpack a wheel.
+
+ Wheel content will be unpacked to {dest}/{name}-{ver}, where {name}
+ is the package name and {ver} its version.
+
+ :param path: The path to the wheel.
+ :param dest: Destination directory (default to current directory).
+ """
+ with WheelFile(path) as wf:
+ namever = wf.parsed_filename.group('namever')
+ destination = os.path.join(dest, namever)
+ print("Unpacking to: {}...".format(destination), end='')
+ sys.stdout.flush()
+ wf.extractall(destination)
+
+ print('OK')
diff --git a/third_party/python/wheel/wheel/macosx_libfile.py b/third_party/python/wheel/wheel/macosx_libfile.py
new file mode 100644
index 0000000000..39006fb079
--- /dev/null
+++ b/third_party/python/wheel/wheel/macosx_libfile.py
@@ -0,0 +1,428 @@
+"""
+This module contains function to analyse dynamic library
+headers to extract system information
+
+Currently only for MacOSX
+
+Library file on macosx system starts with Mach-O or Fat field.
+This can be distinguish by first 32 bites and it is called magic number.
+Proper value of magic number is with suffix _MAGIC. Suffix _CIGAM means
+reversed bytes order.
+Both fields can occur in two types: 32 and 64 bytes.
+
+FAT field inform that this library contains few version of library
+(typically for different types version). It contains
+information where Mach-O headers starts.
+
+Each section started with Mach-O header contains one library
+(So if file starts with this field it contains only one version).
+
+After filed Mach-O there are section fields.
+Each of them starts with two fields:
+cmd - magic number for this command
+cmdsize - total size occupied by this section information.
+
+In this case only sections LC_VERSION_MIN_MACOSX (for macosx 10.13 and earlier)
+and LC_BUILD_VERSION (for macosx 10.14 and newer) are interesting,
+because them contains information about minimal system version.
+
+Important remarks:
+- For fat files this implementation looks for maximum number version.
+ It not check if it is 32 or 64 and do not compare it with currently built package.
+ So it is possible to false report higher version that needed.
+- All structures signatures are taken form macosx header files.
+- I think that binary format will be more stable than `otool` output.
+ and if apple introduce some changes both implementation will need to be updated.
+- The system compile will set the deployment target no lower than
+ 11.0 for arm64 builds. For "Universal 2" builds use the x86_64 deployment
+ target when the arm64 target is 11.0.
+"""
+
+import ctypes
+import os
+import sys
+
+"""here the needed const and struct from mach-o header files"""
+
+FAT_MAGIC = 0xcafebabe
+FAT_CIGAM = 0xbebafeca
+FAT_MAGIC_64 = 0xcafebabf
+FAT_CIGAM_64 = 0xbfbafeca
+MH_MAGIC = 0xfeedface
+MH_CIGAM = 0xcefaedfe
+MH_MAGIC_64 = 0xfeedfacf
+MH_CIGAM_64 = 0xcffaedfe
+
+LC_VERSION_MIN_MACOSX = 0x24
+LC_BUILD_VERSION = 0x32
+
+CPU_TYPE_ARM64 = 0x0100000c
+
+mach_header_fields = [
+ ("magic", ctypes.c_uint32), ("cputype", ctypes.c_int),
+ ("cpusubtype", ctypes.c_int), ("filetype", ctypes.c_uint32),
+ ("ncmds", ctypes.c_uint32), ("sizeofcmds", ctypes.c_uint32),
+ ("flags", ctypes.c_uint32)
+ ]
+"""
+struct mach_header {
+ uint32_t magic; /* mach magic number identifier */
+ cpu_type_t cputype; /* cpu specifier */
+ cpu_subtype_t cpusubtype; /* machine specifier */
+ uint32_t filetype; /* type of file */
+ uint32_t ncmds; /* number of load commands */
+ uint32_t sizeofcmds; /* the size of all the load commands */
+ uint32_t flags; /* flags */
+};
+typedef integer_t cpu_type_t;
+typedef integer_t cpu_subtype_t;
+"""
+
+mach_header_fields_64 = mach_header_fields + [("reserved", ctypes.c_uint32)]
+"""
+struct mach_header_64 {
+ uint32_t magic; /* mach magic number identifier */
+ cpu_type_t cputype; /* cpu specifier */
+ cpu_subtype_t cpusubtype; /* machine specifier */
+ uint32_t filetype; /* type of file */
+ uint32_t ncmds; /* number of load commands */
+ uint32_t sizeofcmds; /* the size of all the load commands */
+ uint32_t flags; /* flags */
+ uint32_t reserved; /* reserved */
+};
+"""
+
+fat_header_fields = [("magic", ctypes.c_uint32), ("nfat_arch", ctypes.c_uint32)]
+"""
+struct fat_header {
+ uint32_t magic; /* FAT_MAGIC or FAT_MAGIC_64 */
+ uint32_t nfat_arch; /* number of structs that follow */
+};
+"""
+
+fat_arch_fields = [
+ ("cputype", ctypes.c_int), ("cpusubtype", ctypes.c_int),
+ ("offset", ctypes.c_uint32), ("size", ctypes.c_uint32),
+ ("align", ctypes.c_uint32)
+]
+"""
+struct fat_arch {
+ cpu_type_t cputype; /* cpu specifier (int) */
+ cpu_subtype_t cpusubtype; /* machine specifier (int) */
+ uint32_t offset; /* file offset to this object file */
+ uint32_t size; /* size of this object file */
+ uint32_t align; /* alignment as a power of 2 */
+};
+"""
+
+fat_arch_64_fields = [
+ ("cputype", ctypes.c_int), ("cpusubtype", ctypes.c_int),
+ ("offset", ctypes.c_uint64), ("size", ctypes.c_uint64),
+ ("align", ctypes.c_uint32), ("reserved", ctypes.c_uint32)
+]
+"""
+struct fat_arch_64 {
+ cpu_type_t cputype; /* cpu specifier (int) */
+ cpu_subtype_t cpusubtype; /* machine specifier (int) */
+ uint64_t offset; /* file offset to this object file */
+ uint64_t size; /* size of this object file */
+ uint32_t align; /* alignment as a power of 2 */
+ uint32_t reserved; /* reserved */
+};
+"""
+
+segment_base_fields = [("cmd", ctypes.c_uint32), ("cmdsize", ctypes.c_uint32)]
+"""base for reading segment info"""
+
+segment_command_fields = [
+ ("cmd", ctypes.c_uint32), ("cmdsize", ctypes.c_uint32),
+ ("segname", ctypes.c_char * 16), ("vmaddr", ctypes.c_uint32),
+ ("vmsize", ctypes.c_uint32), ("fileoff", ctypes.c_uint32),
+ ("filesize", ctypes.c_uint32), ("maxprot", ctypes.c_int),
+ ("initprot", ctypes.c_int), ("nsects", ctypes.c_uint32),
+ ("flags", ctypes.c_uint32),
+ ]
+"""
+struct segment_command { /* for 32-bit architectures */
+ uint32_t cmd; /* LC_SEGMENT */
+ uint32_t cmdsize; /* includes sizeof section structs */
+ char segname[16]; /* segment name */
+ uint32_t vmaddr; /* memory address of this segment */
+ uint32_t vmsize; /* memory size of this segment */
+ uint32_t fileoff; /* file offset of this segment */
+ uint32_t filesize; /* amount to map from the file */
+ vm_prot_t maxprot; /* maximum VM protection */
+ vm_prot_t initprot; /* initial VM protection */
+ uint32_t nsects; /* number of sections in segment */
+ uint32_t flags; /* flags */
+};
+typedef int vm_prot_t;
+"""
+
+segment_command_fields_64 = [
+ ("cmd", ctypes.c_uint32), ("cmdsize", ctypes.c_uint32),
+ ("segname", ctypes.c_char * 16), ("vmaddr", ctypes.c_uint64),
+ ("vmsize", ctypes.c_uint64), ("fileoff", ctypes.c_uint64),
+ ("filesize", ctypes.c_uint64), ("maxprot", ctypes.c_int),
+ ("initprot", ctypes.c_int), ("nsects", ctypes.c_uint32),
+ ("flags", ctypes.c_uint32),
+ ]
+"""
+struct segment_command_64 { /* for 64-bit architectures */
+ uint32_t cmd; /* LC_SEGMENT_64 */
+ uint32_t cmdsize; /* includes sizeof section_64 structs */
+ char segname[16]; /* segment name */
+ uint64_t vmaddr; /* memory address of this segment */
+ uint64_t vmsize; /* memory size of this segment */
+ uint64_t fileoff; /* file offset of this segment */
+ uint64_t filesize; /* amount to map from the file */
+ vm_prot_t maxprot; /* maximum VM protection */
+ vm_prot_t initprot; /* initial VM protection */
+ uint32_t nsects; /* number of sections in segment */
+ uint32_t flags; /* flags */
+};
+"""
+
+version_min_command_fields = segment_base_fields + \
+ [("version", ctypes.c_uint32), ("sdk", ctypes.c_uint32)]
+"""
+struct version_min_command {
+ uint32_t cmd; /* LC_VERSION_MIN_MACOSX or
+ LC_VERSION_MIN_IPHONEOS or
+ LC_VERSION_MIN_WATCHOS or
+ LC_VERSION_MIN_TVOS */
+ uint32_t cmdsize; /* sizeof(struct min_version_command) */
+ uint32_t version; /* X.Y.Z is encoded in nibbles xxxx.yy.zz */
+ uint32_t sdk; /* X.Y.Z is encoded in nibbles xxxx.yy.zz */
+};
+"""
+
+build_version_command_fields = segment_base_fields + \
+ [("platform", ctypes.c_uint32), ("minos", ctypes.c_uint32),
+ ("sdk", ctypes.c_uint32), ("ntools", ctypes.c_uint32)]
+"""
+struct build_version_command {
+ uint32_t cmd; /* LC_BUILD_VERSION */
+ uint32_t cmdsize; /* sizeof(struct build_version_command) plus */
+ /* ntools * sizeof(struct build_tool_version) */
+ uint32_t platform; /* platform */
+ uint32_t minos; /* X.Y.Z is encoded in nibbles xxxx.yy.zz */
+ uint32_t sdk; /* X.Y.Z is encoded in nibbles xxxx.yy.zz */
+ uint32_t ntools; /* number of tool entries following this */
+};
+"""
+
+
+def swap32(x):
+ return (((x << 24) & 0xFF000000) |
+ ((x << 8) & 0x00FF0000) |
+ ((x >> 8) & 0x0000FF00) |
+ ((x >> 24) & 0x000000FF))
+
+
+def get_base_class_and_magic_number(lib_file, seek=None):
+ if seek is None:
+ seek = lib_file.tell()
+ else:
+ lib_file.seek(seek)
+ magic_number = ctypes.c_uint32.from_buffer_copy(
+ lib_file.read(ctypes.sizeof(ctypes.c_uint32))).value
+
+ # Handle wrong byte order
+ if magic_number in [FAT_CIGAM, FAT_CIGAM_64, MH_CIGAM, MH_CIGAM_64]:
+ if sys.byteorder == "little":
+ BaseClass = ctypes.BigEndianStructure
+ else:
+ BaseClass = ctypes.LittleEndianStructure
+
+ magic_number = swap32(magic_number)
+ else:
+ BaseClass = ctypes.Structure
+
+ lib_file.seek(seek)
+ return BaseClass, magic_number
+
+
+def read_data(struct_class, lib_file):
+ return struct_class.from_buffer_copy(lib_file.read(
+ ctypes.sizeof(struct_class)))
+
+
+def extract_macosx_min_system_version(path_to_lib):
+ with open(path_to_lib, "rb") as lib_file:
+ BaseClass, magic_number = get_base_class_and_magic_number(lib_file, 0)
+ if magic_number not in [FAT_MAGIC, FAT_MAGIC_64, MH_MAGIC, MH_MAGIC_64]:
+ return
+
+ if magic_number in [FAT_MAGIC, FAT_CIGAM_64]:
+ class FatHeader(BaseClass):
+ _fields_ = fat_header_fields
+
+ fat_header = read_data(FatHeader, lib_file)
+ if magic_number == FAT_MAGIC:
+
+ class FatArch(BaseClass):
+ _fields_ = fat_arch_fields
+ else:
+
+ class FatArch(BaseClass):
+ _fields_ = fat_arch_64_fields
+
+ fat_arch_list = [read_data(FatArch, lib_file) for _ in range(fat_header.nfat_arch)]
+
+ versions_list = []
+ for el in fat_arch_list:
+ try:
+ version = read_mach_header(lib_file, el.offset)
+ if version is not None:
+ if el.cputype == CPU_TYPE_ARM64 and len(fat_arch_list) != 1:
+ # Xcode will not set the deployment target below 11.0.0
+ # for the arm64 architecture. Ignore the arm64 deployment
+ # in fat binaries when the target is 11.0.0, that way
+ # the other architectures can select a lower deployment
+ # target.
+ # This is safe because there is no arm64 variant for
+ # macOS 10.15 or earlier.
+ if version == (11, 0, 0):
+ continue
+ versions_list.append(version)
+ except ValueError:
+ pass
+
+ if len(versions_list) > 0:
+ return max(versions_list)
+ else:
+ return None
+
+ else:
+ try:
+ return read_mach_header(lib_file, 0)
+ except ValueError:
+ """when some error during read library files"""
+ return None
+
+
+def read_mach_header(lib_file, seek=None):
+ """
+ This funcition parse mach-O header and extract
+ information about minimal system version
+
+ :param lib_file: reference to opened library file with pointer
+ """
+ if seek is not None:
+ lib_file.seek(seek)
+ base_class, magic_number = get_base_class_and_magic_number(lib_file)
+ arch = "32" if magic_number == MH_MAGIC else "64"
+
+ class SegmentBase(base_class):
+ _fields_ = segment_base_fields
+
+ if arch == "32":
+
+ class MachHeader(base_class):
+ _fields_ = mach_header_fields
+
+ else:
+
+ class MachHeader(base_class):
+ _fields_ = mach_header_fields_64
+
+ mach_header = read_data(MachHeader, lib_file)
+ for _i in range(mach_header.ncmds):
+ pos = lib_file.tell()
+ segment_base = read_data(SegmentBase, lib_file)
+ lib_file.seek(pos)
+ if segment_base.cmd == LC_VERSION_MIN_MACOSX:
+ class VersionMinCommand(base_class):
+ _fields_ = version_min_command_fields
+
+ version_info = read_data(VersionMinCommand, lib_file)
+ return parse_version(version_info.version)
+ elif segment_base.cmd == LC_BUILD_VERSION:
+ class VersionBuild(base_class):
+ _fields_ = build_version_command_fields
+
+ version_info = read_data(VersionBuild, lib_file)
+ return parse_version(version_info.minos)
+ else:
+ lib_file.seek(pos + segment_base.cmdsize)
+ continue
+
+
+def parse_version(version):
+ x = (version & 0xffff0000) >> 16
+ y = (version & 0x0000ff00) >> 8
+ z = (version & 0x000000ff)
+ return x, y, z
+
+
+def calculate_macosx_platform_tag(archive_root, platform_tag):
+ """
+ Calculate proper macosx platform tag basing on files which are included to wheel
+
+ Example platform tag `macosx-10.14-x86_64`
+ """
+ prefix, base_version, suffix = platform_tag.split('-')
+ base_version = tuple([int(x) for x in base_version.split(".")])
+ base_version = base_version[:2]
+ if base_version[0] > 10:
+ base_version = (base_version[0], 0)
+ assert len(base_version) == 2
+ if "MACOSX_DEPLOYMENT_TARGET" in os.environ:
+ deploy_target = tuple([int(x) for x in os.environ[
+ "MACOSX_DEPLOYMENT_TARGET"].split(".")])
+ deploy_target = deploy_target[:2]
+ if deploy_target[0] > 10:
+ deploy_target = (deploy_target[0], 0)
+ if deploy_target < base_version:
+ sys.stderr.write(
+ "[WARNING] MACOSX_DEPLOYMENT_TARGET is set to a lower value ({}) than the "
+ "version on which the Python interpreter was compiled ({}), and will be "
+ "ignored.\n".format('.'.join(str(x) for x in deploy_target),
+ '.'.join(str(x) for x in base_version))
+ )
+ else:
+ base_version = deploy_target
+
+ assert len(base_version) == 2
+ start_version = base_version
+ versions_dict = {}
+ for (dirpath, dirnames, filenames) in os.walk(archive_root):
+ for filename in filenames:
+ if filename.endswith('.dylib') or filename.endswith('.so'):
+ lib_path = os.path.join(dirpath, filename)
+ min_ver = extract_macosx_min_system_version(lib_path)
+ if min_ver is not None:
+ min_ver = min_ver[0:2]
+ if min_ver[0] > 10:
+ min_ver = (min_ver[0], 0)
+ versions_dict[lib_path] = min_ver
+
+ if len(versions_dict) > 0:
+ base_version = max(base_version, max(versions_dict.values()))
+
+ # macosx platform tag do not support minor bugfix release
+ fin_base_version = "_".join([str(x) for x in base_version])
+ if start_version < base_version:
+ problematic_files = [k for k, v in versions_dict.items() if v > start_version]
+ problematic_files = "\n".join(problematic_files)
+ if len(problematic_files) == 1:
+ files_form = "this file"
+ else:
+ files_form = "these files"
+ error_message = \
+ "[WARNING] This wheel needs a higher macOS version than {} " \
+ "To silence this warning, set MACOSX_DEPLOYMENT_TARGET to at least " +\
+ fin_base_version + " or recreate " + files_form + " with lower " \
+ "MACOSX_DEPLOYMENT_TARGET: \n" + problematic_files
+
+ if "MACOSX_DEPLOYMENT_TARGET" in os.environ:
+ error_message = error_message.format("is set in MACOSX_DEPLOYMENT_TARGET variable.")
+ else:
+ error_message = error_message.format(
+ "the version your Python interpreter is compiled against.")
+
+ sys.stderr.write(error_message)
+
+ platform_tag = prefix + "_" + fin_base_version + "_" + suffix
+ return platform_tag
diff --git a/third_party/python/wheel/wheel/metadata.py b/third_party/python/wheel/wheel/metadata.py
new file mode 100644
index 0000000000..37efa74307
--- /dev/null
+++ b/third_party/python/wheel/wheel/metadata.py
@@ -0,0 +1,133 @@
+"""
+Tools for converting old- to new-style metadata.
+"""
+
+import os.path
+import textwrap
+
+import pkg_resources
+
+from .pkginfo import read_pkg_info
+
+
+def requires_to_requires_dist(requirement):
+ """Return the version specifier for a requirement in PEP 345/566 fashion."""
+ if getattr(requirement, 'url', None):
+ return " @ " + requirement.url
+
+ requires_dist = []
+ for op, ver in requirement.specs:
+ requires_dist.append(op + ver)
+ if not requires_dist:
+ return ''
+ return " (%s)" % ','.join(sorted(requires_dist))
+
+
+def convert_requirements(requirements):
+ """Yield Requires-Dist: strings for parsed requirements strings."""
+ for req in requirements:
+ parsed_requirement = pkg_resources.Requirement.parse(req)
+ spec = requires_to_requires_dist(parsed_requirement)
+ extras = ",".join(sorted(parsed_requirement.extras))
+ if extras:
+ extras = "[%s]" % extras
+ yield (parsed_requirement.project_name + extras + spec)
+
+
+def generate_requirements(extras_require):
+ """
+ Convert requirements from a setup()-style dictionary to ('Requires-Dist', 'requirement')
+ and ('Provides-Extra', 'extra') tuples.
+
+ extras_require is a dictionary of {extra: [requirements]} as passed to setup(),
+ using the empty extra {'': [requirements]} to hold install_requires.
+ """
+ for extra, depends in extras_require.items():
+ condition = ''
+ extra = extra or ''
+ if ':' in extra: # setuptools extra:condition syntax
+ extra, condition = extra.split(':', 1)
+
+ extra = pkg_resources.safe_extra(extra)
+ if extra:
+ yield 'Provides-Extra', extra
+ if condition:
+ condition = "(" + condition + ") and "
+ condition += "extra == '%s'" % extra
+
+ if condition:
+ condition = ' ; ' + condition
+
+ for new_req in convert_requirements(depends):
+ yield 'Requires-Dist', new_req + condition
+
+
+def pkginfo_to_metadata(egg_info_path, pkginfo_path):
+ """
+ Convert .egg-info directory with PKG-INFO to the Metadata 2.1 format
+ """
+ pkg_info = read_pkg_info(pkginfo_path)
+ pkg_info.replace_header('Metadata-Version', '2.1')
+ # Those will be regenerated from `requires.txt`.
+ del pkg_info['Provides-Extra']
+ del pkg_info['Requires-Dist']
+ requires_path = os.path.join(egg_info_path, 'requires.txt')
+ if os.path.exists(requires_path):
+ with open(requires_path) as requires_file:
+ requires = requires_file.read()
+
+ parsed_requirements = sorted(pkg_resources.split_sections(requires),
+ key=lambda x: x[0] or '')
+ for extra, reqs in parsed_requirements:
+ for key, value in generate_requirements({extra: reqs}):
+ if (key, value) not in pkg_info.items():
+ pkg_info[key] = value
+
+ description = pkg_info['Description']
+ if description:
+ pkg_info.set_payload(dedent_description(pkg_info))
+ del pkg_info['Description']
+
+ return pkg_info
+
+
+def pkginfo_unicode(pkg_info, field):
+ """Hack to coax Unicode out of an email Message() - Python 3.3+"""
+ text = pkg_info[field]
+ field = field.lower()
+ if not isinstance(text, str):
+ for item in pkg_info.raw_items():
+ if item[0].lower() == field:
+ text = item[1].encode('ascii', 'surrogateescape') \
+ .decode('utf-8')
+ break
+
+ return text
+
+
+def dedent_description(pkg_info):
+ """
+ Dedent and convert pkg_info['Description'] to Unicode.
+ """
+ description = pkg_info['Description']
+
+ # Python 3 Unicode handling, sorta.
+ surrogates = False
+ if not isinstance(description, str):
+ surrogates = True
+ description = pkginfo_unicode(pkg_info, 'Description')
+
+ description_lines = description.splitlines()
+ description_dedent = '\n'.join(
+ # if the first line of long_description is blank,
+ # the first line here will be indented.
+ (description_lines[0].lstrip(),
+ textwrap.dedent('\n'.join(description_lines[1:])),
+ '\n'))
+
+ if surrogates:
+ description_dedent = description_dedent \
+ .encode("utf8") \
+ .decode("ascii", "surrogateescape")
+
+ return description_dedent
diff --git a/third_party/python/wheel/wheel/pkginfo.py b/third_party/python/wheel/wheel/pkginfo.py
new file mode 100644
index 0000000000..115be45bdf
--- /dev/null
+++ b/third_party/python/wheel/wheel/pkginfo.py
@@ -0,0 +1,43 @@
+"""Tools for reading and writing PKG-INFO / METADATA without caring
+about the encoding."""
+
+from email.parser import Parser
+
+try:
+ unicode
+ _PY3 = False
+except NameError:
+ _PY3 = True
+
+if not _PY3:
+ from email.generator import Generator
+
+ def read_pkg_info_bytes(bytestr):
+ return Parser().parsestr(bytestr)
+
+ def read_pkg_info(path):
+ with open(path, "r") as headers:
+ message = Parser().parse(headers)
+ return message
+
+ def write_pkg_info(path, message):
+ with open(path, 'w') as metadata:
+ Generator(metadata, mangle_from_=False, maxheaderlen=0).flatten(message)
+else:
+ from email.generator import BytesGenerator
+
+ def read_pkg_info_bytes(bytestr):
+ headers = bytestr.decode(encoding="ascii", errors="surrogateescape")
+ message = Parser().parsestr(headers)
+ return message
+
+ def read_pkg_info(path):
+ with open(path, "r",
+ encoding="ascii",
+ errors="surrogateescape") as headers:
+ message = Parser().parse(headers)
+ return message
+
+ def write_pkg_info(path, message):
+ with open(path, "wb") as out:
+ BytesGenerator(out, mangle_from_=False, maxheaderlen=0).flatten(message)
diff --git a/third_party/python/wheel/wheel/util.py b/third_party/python/wheel/wheel/util.py
new file mode 100644
index 0000000000..3ae2b4457c
--- /dev/null
+++ b/third_party/python/wheel/wheel/util.py
@@ -0,0 +1,46 @@
+import base64
+import io
+import sys
+
+
+if sys.version_info[0] < 3:
+ text_type = unicode # noqa: F821
+
+ StringIO = io.BytesIO
+
+ def native(s, encoding='utf-8'):
+ if isinstance(s, unicode): # noqa: F821
+ return s.encode(encoding)
+ return s
+else:
+ text_type = str
+
+ StringIO = io.StringIO
+
+ def native(s, encoding='utf-8'):
+ if isinstance(s, bytes):
+ return s.decode(encoding)
+ return s
+
+
+def urlsafe_b64encode(data):
+ """urlsafe_b64encode without padding"""
+ return base64.urlsafe_b64encode(data).rstrip(b'=')
+
+
+def urlsafe_b64decode(data):
+ """urlsafe_b64decode without padding"""
+ pad = b'=' * (4 - (len(data) & 3))
+ return base64.urlsafe_b64decode(data + pad)
+
+
+def as_unicode(s):
+ if isinstance(s, bytes):
+ return s.decode('utf-8')
+ return s
+
+
+def as_bytes(s):
+ if isinstance(s, text_type):
+ return s.encode('utf-8')
+ return s
diff --git a/third_party/python/wheel/wheel/vendored/__init__.py b/third_party/python/wheel/wheel/vendored/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/wheel/wheel/vendored/__init__.py
diff --git a/third_party/python/wheel/wheel/vendored/packaging/__init__.py b/third_party/python/wheel/wheel/vendored/packaging/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/third_party/python/wheel/wheel/vendored/packaging/__init__.py
diff --git a/third_party/python/wheel/wheel/vendored/packaging/_typing.py b/third_party/python/wheel/wheel/vendored/packaging/_typing.py
new file mode 100644
index 0000000000..77a8b9185a
--- /dev/null
+++ b/third_party/python/wheel/wheel/vendored/packaging/_typing.py
@@ -0,0 +1,48 @@
+"""For neatly implementing static typing in packaging.
+
+`mypy` - the static type analysis tool we use - uses the `typing` module, which
+provides core functionality fundamental to mypy's functioning.
+
+Generally, `typing` would be imported at runtime and used in that fashion -
+it acts as a no-op at runtime and does not have any run-time overhead by
+design.
+
+As it turns out, `typing` is not vendorable - it uses separate sources for
+Python 2/Python 3. Thus, this codebase can not expect it to be present.
+To work around this, mypy allows the typing import to be behind a False-y
+optional to prevent it from running at runtime and type-comments can be used
+to remove the need for the types to be accessible directly during runtime.
+
+This module provides the False-y guard in a nicely named fashion so that a
+curious maintainer can reach here to read this.
+
+In packaging, all static-typing related imports should be guarded as follows:
+
+ from packaging._typing import TYPE_CHECKING
+
+ if TYPE_CHECKING:
+ from typing import ...
+
+Ref: https://github.com/python/mypy/issues/3216
+"""
+
+__all__ = ["TYPE_CHECKING", "cast"]
+
+# The TYPE_CHECKING constant defined by the typing module is False at runtime
+# but True while type checking.
+if False: # pragma: no cover
+ from typing import TYPE_CHECKING
+else:
+ TYPE_CHECKING = False
+
+# typing's cast syntax requires calling typing.cast at runtime, but we don't
+# want to import typing at runtime. Here, we inform the type checkers that
+# we're importing `typing.cast` as `cast` and re-implement typing.cast's
+# runtime behavior in a block that is ignored by type checkers.
+if TYPE_CHECKING: # pragma: no cover
+ # not executed at runtime
+ from typing import cast
+else:
+ # executed at runtime
+ def cast(type_, value): # noqa
+ return value
diff --git a/third_party/python/wheel/wheel/vendored/packaging/tags.py b/third_party/python/wheel/wheel/vendored/packaging/tags.py
new file mode 100644
index 0000000000..c2a140c268
--- /dev/null
+++ b/third_party/python/wheel/wheel/vendored/packaging/tags.py
@@ -0,0 +1,866 @@
+# This file is dual licensed under the terms of the Apache License, Version
+# 2.0, and the BSD License. See the LICENSE file in the root of this repository
+# for complete details.
+
+from __future__ import absolute_import
+
+import distutils.util
+
+try:
+ from importlib.machinery import EXTENSION_SUFFIXES
+except ImportError: # pragma: no cover
+ import imp
+
+ EXTENSION_SUFFIXES = [x[0] for x in imp.get_suffixes()]
+ del imp
+import collections
+import logging
+import os
+import platform
+import re
+import struct
+import sys
+import sysconfig
+import warnings
+
+from ._typing import TYPE_CHECKING, cast
+
+if TYPE_CHECKING: # pragma: no cover
+ from typing import (
+ Dict,
+ FrozenSet,
+ IO,
+ Iterable,
+ Iterator,
+ List,
+ Optional,
+ Sequence,
+ Tuple,
+ Union,
+ )
+
+ PythonVersion = Sequence[int]
+ MacVersion = Tuple[int, int]
+ GlibcVersion = Tuple[int, int]
+
+
+logger = logging.getLogger(__name__)
+
+INTERPRETER_SHORT_NAMES = {
+ "python": "py", # Generic.
+ "cpython": "cp",
+ "pypy": "pp",
+ "ironpython": "ip",
+ "jython": "jy",
+} # type: Dict[str, str]
+
+
+_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32
+
+
+_LEGACY_MANYLINUX_MAP = {
+ # CentOS 7 w/ glibc 2.17 (PEP 599)
+ (2, 17): "manylinux2014",
+ # CentOS 6 w/ glibc 2.12 (PEP 571)
+ (2, 12): "manylinux2010",
+ # CentOS 5 w/ glibc 2.5 (PEP 513)
+ (2, 5): "manylinux1",
+}
+
+# If glibc ever changes its major version, we need to know what the last
+# minor version was, so we can build the complete list of all versions.
+# For now, guess what the highest minor version might be, assume it will
+# be 50 for testing. Once this actually happens, update the dictionary
+# with the actual value.
+_LAST_GLIBC_MINOR = collections.defaultdict(lambda: 50) # type: Dict[int, int]
+glibcVersion = collections.namedtuple("Version", ["major", "minor"])
+
+
+class Tag(object):
+ """
+ A representation of the tag triple for a wheel.
+
+ Instances are considered immutable and thus are hashable. Equality checking
+ is also supported.
+ """
+
+ __slots__ = ["_interpreter", "_abi", "_platform", "_hash"]
+
+ def __init__(self, interpreter, abi, platform):
+ # type: (str, str, str) -> None
+ self._interpreter = interpreter.lower()
+ self._abi = abi.lower()
+ self._platform = platform.lower()
+ # The __hash__ of every single element in a Set[Tag] will be evaluated each time
+ # that a set calls its `.disjoint()` method, which may be called hundreds of
+ # times when scanning a page of links for packages with tags matching that
+ # Set[Tag]. Pre-computing the value here produces significant speedups for
+ # downstream consumers.
+ self._hash = hash((self._interpreter, self._abi, self._platform))
+
+ @property
+ def interpreter(self):
+ # type: () -> str
+ return self._interpreter
+
+ @property
+ def abi(self):
+ # type: () -> str
+ return self._abi
+
+ @property
+ def platform(self):
+ # type: () -> str
+ return self._platform
+
+ def __eq__(self, other):
+ # type: (object) -> bool
+ if not isinstance(other, Tag):
+ return NotImplemented
+
+ return (
+ (self.platform == other.platform)
+ and (self.abi == other.abi)
+ and (self.interpreter == other.interpreter)
+ )
+
+ def __hash__(self):
+ # type: () -> int
+ return self._hash
+
+ def __str__(self):
+ # type: () -> str
+ return "{}-{}-{}".format(self._interpreter, self._abi, self._platform)
+
+ def __repr__(self):
+ # type: () -> str
+ return "<{self} @ {self_id}>".format(self=self, self_id=id(self))
+
+
+def parse_tag(tag):
+ # type: (str) -> FrozenSet[Tag]
+ """
+ Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances.
+
+ Returning a set is required due to the possibility that the tag is a
+ compressed tag set.
+ """
+ tags = set()
+ interpreters, abis, platforms = tag.split("-")
+ for interpreter in interpreters.split("."):
+ for abi in abis.split("."):
+ for platform_ in platforms.split("."):
+ tags.add(Tag(interpreter, abi, platform_))
+ return frozenset(tags)
+
+
+def _warn_keyword_parameter(func_name, kwargs):
+ # type: (str, Dict[str, bool]) -> bool
+ """
+ Backwards-compatibility with Python 2.7 to allow treating 'warn' as keyword-only.
+ """
+ if not kwargs:
+ return False
+ elif len(kwargs) > 1 or "warn" not in kwargs:
+ kwargs.pop("warn", None)
+ arg = next(iter(kwargs.keys()))
+ raise TypeError(
+ "{}() got an unexpected keyword argument {!r}".format(func_name, arg)
+ )
+ return kwargs["warn"]
+
+
+def _get_config_var(name, warn=False):
+ # type: (str, bool) -> Union[int, str, None]
+ value = sysconfig.get_config_var(name)
+ if value is None and warn:
+ logger.debug(
+ "Config variable '%s' is unset, Python ABI tag may be incorrect", name
+ )
+ return value
+
+
+def _normalize_string(string):
+ # type: (str) -> str
+ return string.replace(".", "_").replace("-", "_")
+
+
+def _abi3_applies(python_version):
+ # type: (PythonVersion) -> bool
+ """
+ Determine if the Python version supports abi3.
+
+ PEP 384 was first implemented in Python 3.2.
+ """
+ return len(python_version) > 1 and tuple(python_version) >= (3, 2)
+
+
+def _cpython_abis(py_version, warn=False):
+ # type: (PythonVersion, bool) -> List[str]
+ py_version = tuple(py_version) # To allow for version comparison.
+ abis = []
+ version = _version_nodot(py_version[:2])
+ debug = pymalloc = ucs4 = ""
+ with_debug = _get_config_var("Py_DEBUG", warn)
+ has_refcount = hasattr(sys, "gettotalrefcount")
+ # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled
+ # extension modules is the best option.
+ # https://github.com/pypa/pip/issues/3383#issuecomment-173267692
+ has_ext = "_d.pyd" in EXTENSION_SUFFIXES
+ if with_debug or (with_debug is None and (has_refcount or has_ext)):
+ debug = "d"
+ if py_version < (3, 8):
+ with_pymalloc = _get_config_var("WITH_PYMALLOC", warn)
+ if with_pymalloc or with_pymalloc is None:
+ pymalloc = "m"
+ if py_version < (3, 3):
+ unicode_size = _get_config_var("Py_UNICODE_SIZE", warn)
+ if unicode_size == 4 or (
+ unicode_size is None and sys.maxunicode == 0x10FFFF
+ ):
+ ucs4 = "u"
+ elif debug:
+ # Debug builds can also load "normal" extension modules.
+ # We can also assume no UCS-4 or pymalloc requirement.
+ abis.append("cp{version}".format(version=version))
+ abis.insert(
+ 0,
+ "cp{version}{debug}{pymalloc}{ucs4}".format(
+ version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4
+ ),
+ )
+ return abis
+
+
+def cpython_tags(
+ python_version=None, # type: Optional[PythonVersion]
+ abis=None, # type: Optional[Iterable[str]]
+ platforms=None, # type: Optional[Iterable[str]]
+ **kwargs # type: bool
+):
+ # type: (...) -> Iterator[Tag]
+ """
+ Yields the tags for a CPython interpreter.
+
+ The tags consist of:
+ - cp<python_version>-<abi>-<platform>
+ - cp<python_version>-abi3-<platform>
+ - cp<python_version>-none-<platform>
+ - cp<less than python_version>-abi3-<platform> # Older Python versions down to 3.2.
+
+ If python_version only specifies a major version then user-provided ABIs and
+ the 'none' ABItag will be used.
+
+ If 'abi3' or 'none' are specified in 'abis' then they will be yielded at
+ their normal position and not at the beginning.
+ """
+ warn = _warn_keyword_parameter("cpython_tags", kwargs)
+ if not python_version:
+ python_version = sys.version_info[:2]
+
+ interpreter = "cp{}".format(_version_nodot(python_version[:2]))
+
+ if abis is None:
+ if len(python_version) > 1:
+ abis = _cpython_abis(python_version, warn)
+ else:
+ abis = []
+ abis = list(abis)
+ # 'abi3' and 'none' are explicitly handled later.
+ for explicit_abi in ("abi3", "none"):
+ try:
+ abis.remove(explicit_abi)
+ except ValueError:
+ pass
+
+ platforms = list(platforms or _platform_tags())
+ for abi in abis:
+ for platform_ in platforms:
+ yield Tag(interpreter, abi, platform_)
+ if _abi3_applies(python_version):
+ for tag in (Tag(interpreter, "abi3", platform_) for platform_ in platforms):
+ yield tag
+ for tag in (Tag(interpreter, "none", platform_) for platform_ in platforms):
+ yield tag
+
+ if _abi3_applies(python_version):
+ for minor_version in range(python_version[1] - 1, 1, -1):
+ for platform_ in platforms:
+ interpreter = "cp{version}".format(
+ version=_version_nodot((python_version[0], minor_version))
+ )
+ yield Tag(interpreter, "abi3", platform_)
+
+
+def _generic_abi():
+ # type: () -> Iterator[str]
+ abi = sysconfig.get_config_var("SOABI")
+ if abi:
+ yield _normalize_string(abi)
+
+
+def generic_tags(
+ interpreter=None, # type: Optional[str]
+ abis=None, # type: Optional[Iterable[str]]
+ platforms=None, # type: Optional[Iterable[str]]
+ **kwargs # type: bool
+):
+ # type: (...) -> Iterator[Tag]
+ """
+ Yields the tags for a generic interpreter.
+
+ The tags consist of:
+ - <interpreter>-<abi>-<platform>
+
+ The "none" ABI will be added if it was not explicitly provided.
+ """
+ warn = _warn_keyword_parameter("generic_tags", kwargs)
+ if not interpreter:
+ interp_name = interpreter_name()
+ interp_version = interpreter_version(warn=warn)
+ interpreter = "".join([interp_name, interp_version])
+ if abis is None:
+ abis = _generic_abi()
+ platforms = list(platforms or _platform_tags())
+ abis = list(abis)
+ if "none" not in abis:
+ abis.append("none")
+ for abi in abis:
+ for platform_ in platforms:
+ yield Tag(interpreter, abi, platform_)
+
+
+def _py_interpreter_range(py_version):
+ # type: (PythonVersion) -> Iterator[str]
+ """
+ Yields Python versions in descending order.
+
+ After the latest version, the major-only version will be yielded, and then
+ all previous versions of that major version.
+ """
+ if len(py_version) > 1:
+ yield "py{version}".format(version=_version_nodot(py_version[:2]))
+ yield "py{major}".format(major=py_version[0])
+ if len(py_version) > 1:
+ for minor in range(py_version[1] - 1, -1, -1):
+ yield "py{version}".format(version=_version_nodot((py_version[0], minor)))
+
+
+def compatible_tags(
+ python_version=None, # type: Optional[PythonVersion]
+ interpreter=None, # type: Optional[str]
+ platforms=None, # type: Optional[Iterable[str]]
+):
+ # type: (...) -> Iterator[Tag]
+ """
+ Yields the sequence of tags that are compatible with a specific version of Python.
+
+ The tags consist of:
+ - py*-none-<platform>
+ - <interpreter>-none-any # ... if `interpreter` is provided.
+ - py*-none-any
+ """
+ if not python_version:
+ python_version = sys.version_info[:2]
+ platforms = list(platforms or _platform_tags())
+ for version in _py_interpreter_range(python_version):
+ for platform_ in platforms:
+ yield Tag(version, "none", platform_)
+ if interpreter:
+ yield Tag(interpreter, "none", "any")
+ for version in _py_interpreter_range(python_version):
+ yield Tag(version, "none", "any")
+
+
+def _mac_arch(arch, is_32bit=_32_BIT_INTERPRETER):
+ # type: (str, bool) -> str
+ if not is_32bit:
+ return arch
+
+ if arch.startswith("ppc"):
+ return "ppc"
+
+ return "i386"
+
+
+def _mac_binary_formats(version, cpu_arch):
+ # type: (MacVersion, str) -> List[str]
+ formats = [cpu_arch]
+ if cpu_arch == "x86_64":
+ if version < (10, 4):
+ return []
+ formats.extend(["intel", "fat64", "fat32"])
+
+ elif cpu_arch == "i386":
+ if version < (10, 4):
+ return []
+ formats.extend(["intel", "fat32", "fat"])
+
+ elif cpu_arch == "ppc64":
+ # TODO: Need to care about 32-bit PPC for ppc64 through 10.2?
+ if version > (10, 5) or version < (10, 4):
+ return []
+ formats.append("fat64")
+
+ elif cpu_arch == "ppc":
+ if version > (10, 6):
+ return []
+ formats.extend(["fat32", "fat"])
+
+ if cpu_arch in {"arm64", "x86_64"}:
+ formats.append("universal2")
+
+ if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}:
+ formats.append("universal")
+
+ return formats
+
+
+def mac_platforms(version=None, arch=None):
+ # type: (Optional[MacVersion], Optional[str]) -> Iterator[str]
+ """
+ Yields the platform tags for a macOS system.
+
+ The `version` parameter is a two-item tuple specifying the macOS version to
+ generate platform tags for. The `arch` parameter is the CPU architecture to
+ generate platform tags for. Both parameters default to the appropriate value
+ for the current system.
+ """
+ version_str, _, cpu_arch = platform.mac_ver() # type: ignore
+ if version is None:
+ version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2])))
+ else:
+ version = version
+ if arch is None:
+ arch = _mac_arch(cpu_arch)
+ else:
+ arch = arch
+
+ if (10, 0) <= version and version < (11, 0):
+ # Prior to Mac OS 11, each yearly release of Mac OS bumped the
+ # "minor" version number. The major version was always 10.
+ for minor_version in range(version[1], -1, -1):
+ compat_version = 10, minor_version
+ binary_formats = _mac_binary_formats(compat_version, arch)
+ for binary_format in binary_formats:
+ yield "macosx_{major}_{minor}_{binary_format}".format(
+ major=10, minor=minor_version, binary_format=binary_format
+ )
+
+ if version >= (11, 0):
+ # Starting with Mac OS 11, each yearly release bumps the major version
+ # number. The minor versions are now the midyear updates.
+ for major_version in range(version[0], 10, -1):
+ compat_version = major_version, 0
+ binary_formats = _mac_binary_formats(compat_version, arch)
+ for binary_format in binary_formats:
+ yield "macosx_{major}_{minor}_{binary_format}".format(
+ major=major_version, minor=0, binary_format=binary_format
+ )
+
+ if version >= (11, 0):
+ # Mac OS 11 on x86_64 is compatible with binaries from previous releases.
+ # Arm64 support was introduced in 11.0, so no Arm binaries from previous
+ # releases exist.
+ #
+ # However, the "universal2" binary format can have a
+ # macOS version earlier than 11.0 when the x86_64 part of the binary supports
+ # that version of macOS.
+ if arch == "x86_64":
+ for minor_version in range(16, 3, -1):
+ compat_version = 10, minor_version
+ binary_formats = _mac_binary_formats(compat_version, arch)
+ for binary_format in binary_formats:
+ yield "macosx_{major}_{minor}_{binary_format}".format(
+ major=compat_version[0],
+ minor=compat_version[1],
+ binary_format=binary_format,
+ )
+ else:
+ for minor_version in range(16, 3, -1):
+ compat_version = 10, minor_version
+ binary_format = "universal2"
+ yield "macosx_{major}_{minor}_{binary_format}".format(
+ major=compat_version[0],
+ minor=compat_version[1],
+ binary_format=binary_format,
+ )
+
+
+# From PEP 513, PEP 600
+def _is_manylinux_compatible(name, arch, glibc_version):
+ # type: (str, str, GlibcVersion) -> bool
+ sys_glibc = _get_glibc_version()
+ if sys_glibc < glibc_version:
+ return False
+ # Check for presence of _manylinux module.
+ try:
+ import _manylinux # noqa
+ except ImportError:
+ pass
+ else:
+ if hasattr(_manylinux, "manylinux_compatible"):
+ result = _manylinux.manylinux_compatible(
+ glibc_version[0], glibc_version[1], arch
+ )
+ if result is not None:
+ return bool(result)
+ else:
+ if glibc_version == (2, 5):
+ if hasattr(_manylinux, "manylinux1_compatible"):
+ return bool(_manylinux.manylinux1_compatible)
+ if glibc_version == (2, 12):
+ if hasattr(_manylinux, "manylinux2010_compatible"):
+ return bool(_manylinux.manylinux2010_compatible)
+ if glibc_version == (2, 17):
+ if hasattr(_manylinux, "manylinux2014_compatible"):
+ return bool(_manylinux.manylinux2014_compatible)
+ return True
+
+
+def _glibc_version_string():
+ # type: () -> Optional[str]
+ # Returns glibc version string, or None if not using glibc.
+ return _glibc_version_string_confstr() or _glibc_version_string_ctypes()
+
+
+def _glibc_version_string_confstr():
+ # type: () -> Optional[str]
+ """
+ Primary implementation of glibc_version_string using os.confstr.
+ """
+ # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely
+ # to be broken or missing. This strategy is used in the standard library
+ # platform module.
+ # https://github.com/python/cpython/blob/fcf1d003bf4f0100c9d0921ff3d70e1127ca1b71/Lib/platform.py#L175-L183
+ try:
+ # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17".
+ version_string = os.confstr( # type: ignore[attr-defined] # noqa: F821
+ "CS_GNU_LIBC_VERSION"
+ )
+ assert version_string is not None
+ _, version = version_string.split() # type: Tuple[str, str]
+ except (AssertionError, AttributeError, OSError, ValueError):
+ # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)...
+ return None
+ return version
+
+
+def _glibc_version_string_ctypes():
+ # type: () -> Optional[str]
+ """
+ Fallback implementation of glibc_version_string using ctypes.
+ """
+ try:
+ import ctypes
+ except ImportError:
+ return None
+
+ # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen
+ # manpage says, "If filename is NULL, then the returned handle is for the
+ # main program". This way we can let the linker do the work to figure out
+ # which libc our process is actually using.
+ #
+ # We must also handle the special case where the executable is not a
+ # dynamically linked executable. This can occur when using musl libc,
+ # for example. In this situation, dlopen() will error, leading to an
+ # OSError. Interestingly, at least in the case of musl, there is no
+ # errno set on the OSError. The single string argument used to construct
+ # OSError comes from libc itself and is therefore not portable to
+ # hard code here. In any case, failure to call dlopen() means we
+ # can proceed, so we bail on our attempt.
+ try:
+ # Note: typeshed is wrong here so we are ignoring this line.
+ process_namespace = ctypes.CDLL(None) # type: ignore
+ except OSError:
+ return None
+
+ try:
+ gnu_get_libc_version = process_namespace.gnu_get_libc_version
+ except AttributeError:
+ # Symbol doesn't exist -> therefore, we are not linked to
+ # glibc.
+ return None
+
+ # Call gnu_get_libc_version, which returns a string like "2.5"
+ gnu_get_libc_version.restype = ctypes.c_char_p
+ version_str = gnu_get_libc_version() # type: str
+ # py2 / py3 compatibility:
+ if not isinstance(version_str, str):
+ version_str = version_str.decode("ascii")
+
+ return version_str
+
+
+def _parse_glibc_version(version_str):
+ # type: (str) -> Tuple[int, int]
+ # Parse glibc version.
+ #
+ # We use a regexp instead of str.split because we want to discard any
+ # random junk that might come after the minor version -- this might happen
+ # in patched/forked versions of glibc (e.g. Linaro's version of glibc
+ # uses version strings like "2.20-2014.11"). See gh-3588.
+ m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str)
+ if not m:
+ warnings.warn(
+ "Expected glibc version with 2 components major.minor,"
+ " got: %s" % version_str,
+ RuntimeWarning,
+ )
+ return -1, -1
+ return (int(m.group("major")), int(m.group("minor")))
+
+
+_glibc_version = [] # type: List[Tuple[int, int]]
+
+
+def _get_glibc_version():
+ # type: () -> Tuple[int, int]
+ if _glibc_version:
+ return _glibc_version[0]
+ version_str = _glibc_version_string()
+ if version_str is None:
+ _glibc_version.append((-1, -1))
+ else:
+ _glibc_version.append(_parse_glibc_version(version_str))
+ return _glibc_version[0]
+
+
+# Python does not provide platform information at sufficient granularity to
+# identify the architecture of the running executable in some cases, so we
+# determine it dynamically by reading the information from the running
+# process. This only applies on Linux, which uses the ELF format.
+class _ELFFileHeader(object):
+ # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header
+ class _InvalidELFFileHeader(ValueError):
+ """
+ An invalid ELF file header was found.
+ """
+
+ ELF_MAGIC_NUMBER = 0x7F454C46
+ ELFCLASS32 = 1
+ ELFCLASS64 = 2
+ ELFDATA2LSB = 1
+ ELFDATA2MSB = 2
+ EM_386 = 3
+ EM_S390 = 22
+ EM_ARM = 40
+ EM_X86_64 = 62
+ EF_ARM_ABIMASK = 0xFF000000
+ EF_ARM_ABI_VER5 = 0x05000000
+ EF_ARM_ABI_FLOAT_HARD = 0x00000400
+
+ def __init__(self, file):
+ # type: (IO[bytes]) -> None
+ def unpack(fmt):
+ # type: (str) -> int
+ try:
+ (result,) = struct.unpack(
+ fmt, file.read(struct.calcsize(fmt))
+ ) # type: (int, )
+ except struct.error:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ return result
+
+ self.e_ident_magic = unpack(">I")
+ if self.e_ident_magic != self.ELF_MAGIC_NUMBER:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_class = unpack("B")
+ if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_data = unpack("B")
+ if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}:
+ raise _ELFFileHeader._InvalidELFFileHeader()
+ self.e_ident_version = unpack("B")
+ self.e_ident_osabi = unpack("B")
+ self.e_ident_abiversion = unpack("B")
+ self.e_ident_pad = file.read(7)
+ format_h = "<H" if self.e_ident_data == self.ELFDATA2LSB else ">H"
+ format_i = "<I" if self.e_ident_data == self.ELFDATA2LSB else ">I"
+ format_q = "<Q" if self.e_ident_data == self.ELFDATA2LSB else ">Q"
+ format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q
+ self.e_type = unpack(format_h)
+ self.e_machine = unpack(format_h)
+ self.e_version = unpack(format_i)
+ self.e_entry = unpack(format_p)
+ self.e_phoff = unpack(format_p)
+ self.e_shoff = unpack(format_p)
+ self.e_flags = unpack(format_i)
+ self.e_ehsize = unpack(format_h)
+ self.e_phentsize = unpack(format_h)
+ self.e_phnum = unpack(format_h)
+ self.e_shentsize = unpack(format_h)
+ self.e_shnum = unpack(format_h)
+ self.e_shstrndx = unpack(format_h)
+
+
+def _get_elf_header():
+ # type: () -> Optional[_ELFFileHeader]
+ try:
+ with open(sys.executable, "rb") as f:
+ elf_header = _ELFFileHeader(f)
+ except (IOError, OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader):
+ return None
+ return elf_header
+
+
+def _is_linux_armhf():
+ # type: () -> bool
+ # hard-float ABI can be detected from the ELF header of the running
+ # process
+ # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf
+ elf_header = _get_elf_header()
+ if elf_header is None:
+ return False
+ result = elf_header.e_ident_class == elf_header.ELFCLASS32
+ result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
+ result &= elf_header.e_machine == elf_header.EM_ARM
+ result &= (
+ elf_header.e_flags & elf_header.EF_ARM_ABIMASK
+ ) == elf_header.EF_ARM_ABI_VER5
+ result &= (
+ elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD
+ ) == elf_header.EF_ARM_ABI_FLOAT_HARD
+ return result
+
+
+def _is_linux_i686():
+ # type: () -> bool
+ elf_header = _get_elf_header()
+ if elf_header is None:
+ return False
+ result = elf_header.e_ident_class == elf_header.ELFCLASS32
+ result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB
+ result &= elf_header.e_machine == elf_header.EM_386
+ return result
+
+
+def _have_compatible_manylinux_abi(arch):
+ # type: (str) -> bool
+ if arch == "armv7l":
+ return _is_linux_armhf()
+ if arch == "i686":
+ return _is_linux_i686()
+ return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"}
+
+
+def _manylinux_tags(linux, arch):
+ # type: (str, str) -> Iterator[str]
+ # Oldest glibc to be supported regardless of architecture is (2, 17).
+ too_old_glibc2 = glibcVersion(2, 16)
+ if arch in {"x86_64", "i686"}:
+ # On x86/i686 also oldest glibc to be supported is (2, 5).
+ too_old_glibc2 = glibcVersion(2, 4)
+ current_glibc = glibcVersion(*_get_glibc_version())
+ glibc_max_list = [current_glibc]
+ # We can assume compatibility across glibc major versions.
+ # https://sourceware.org/bugzilla/show_bug.cgi?id=24636
+ #
+ # Build a list of maximum glibc versions so that we can
+ # output the canonical list of all glibc from current_glibc
+ # down to too_old_glibc2, including all intermediary versions.
+ for glibc_major in range(current_glibc.major - 1, 1, -1):
+ glibc_max_list.append(glibcVersion(glibc_major, _LAST_GLIBC_MINOR[glibc_major]))
+ for glibc_max in glibc_max_list:
+ if glibc_max.major == too_old_glibc2.major:
+ min_minor = too_old_glibc2.minor
+ else:
+ # For other glibc major versions oldest supported is (x, 0).
+ min_minor = -1
+ for glibc_minor in range(glibc_max.minor, min_minor, -1):
+ glibc_version = (glibc_max.major, glibc_minor)
+ tag = "manylinux_{}_{}".format(*glibc_version)
+ if _is_manylinux_compatible(tag, arch, glibc_version):
+ yield linux.replace("linux", tag)
+ # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags.
+ if glibc_version in _LEGACY_MANYLINUX_MAP:
+ legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version]
+ if _is_manylinux_compatible(legacy_tag, arch, glibc_version):
+ yield linux.replace("linux", legacy_tag)
+
+
+def _linux_platforms(is_32bit=_32_BIT_INTERPRETER):
+ # type: (bool) -> Iterator[str]
+ linux = _normalize_string(distutils.util.get_platform())
+ if is_32bit:
+ if linux == "linux_x86_64":
+ linux = "linux_i686"
+ elif linux == "linux_aarch64":
+ linux = "linux_armv7l"
+ _, arch = linux.split("_", 1)
+ if _have_compatible_manylinux_abi(arch):
+ for tag in _manylinux_tags(linux, arch):
+ yield tag
+ yield linux
+
+
+def _generic_platforms():
+ # type: () -> Iterator[str]
+ yield _normalize_string(distutils.util.get_platform())
+
+
+def _platform_tags():
+ # type: () -> Iterator[str]
+ """
+ Provides the platform tags for this installation.
+ """
+ if platform.system() == "Darwin":
+ return mac_platforms()
+ elif platform.system() == "Linux":
+ return _linux_platforms()
+ else:
+ return _generic_platforms()
+
+
+def interpreter_name():
+ # type: () -> str
+ """
+ Returns the name of the running interpreter.
+ """
+ try:
+ name = sys.implementation.name # type: ignore
+ except AttributeError: # pragma: no cover
+ # Python 2.7 compatibility.
+ name = platform.python_implementation().lower()
+ return INTERPRETER_SHORT_NAMES.get(name) or name
+
+
+def interpreter_version(**kwargs):
+ # type: (bool) -> str
+ """
+ Returns the version of the running interpreter.
+ """
+ warn = _warn_keyword_parameter("interpreter_version", kwargs)
+ version = _get_config_var("py_version_nodot", warn=warn)
+ if version:
+ version = str(version)
+ else:
+ version = _version_nodot(sys.version_info[:2])
+ return version
+
+
+def _version_nodot(version):
+ # type: (PythonVersion) -> str
+ return "".join(map(str, version))
+
+
+def sys_tags(**kwargs):
+ # type: (bool) -> Iterator[Tag]
+ """
+ Returns the sequence of tag triples for the running interpreter.
+
+ The order of the sequence corresponds to priority order for the
+ interpreter, from most to least important.
+ """
+ warn = _warn_keyword_parameter("sys_tags", kwargs)
+
+ interp_name = interpreter_name()
+ if interp_name == "cp":
+ for tag in cpython_tags(warn=warn):
+ yield tag
+ else:
+ for tag in generic_tags():
+ yield tag
+
+ for tag in compatible_tags():
+ yield tag
diff --git a/third_party/python/wheel/wheel/wheelfile.py b/third_party/python/wheel/wheel/wheelfile.py
new file mode 100644
index 0000000000..3ee97dddd2
--- /dev/null
+++ b/third_party/python/wheel/wheel/wheelfile.py
@@ -0,0 +1,169 @@
+from __future__ import print_function
+
+import csv
+import hashlib
+import os.path
+import re
+import stat
+import time
+from collections import OrderedDict
+from distutils import log as logger
+from zipfile import ZIP_DEFLATED, ZipInfo, ZipFile
+
+from wheel.cli import WheelError
+from wheel.util import urlsafe_b64decode, as_unicode, native, urlsafe_b64encode, as_bytes, StringIO
+
+# Non-greedy matching of an optional build number may be too clever (more
+# invalid wheel filenames will match). Separate regex for .dist-info?
+WHEEL_INFO_RE = re.compile(
+ r"""^(?P<namever>(?P<name>.+?)-(?P<ver>.+?))(-(?P<build>\d[^-]*))?
+ -(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)\.whl$""",
+ re.VERBOSE)
+
+
+def get_zipinfo_datetime(timestamp=None):
+ # Some applications need reproducible .whl files, but they can't do this without forcing
+ # the timestamp of the individual ZipInfo objects. See issue #143.
+ timestamp = int(os.environ.get('SOURCE_DATE_EPOCH', timestamp or time.time()))
+ return time.gmtime(timestamp)[0:6]
+
+
+class WheelFile(ZipFile):
+ """A ZipFile derivative class that also reads SHA-256 hashes from
+ .dist-info/RECORD and checks any read files against those.
+ """
+
+ _default_algorithm = hashlib.sha256
+
+ def __init__(self, file, mode='r', compression=ZIP_DEFLATED):
+ basename = os.path.basename(file)
+ self.parsed_filename = WHEEL_INFO_RE.match(basename)
+ if not basename.endswith('.whl') or self.parsed_filename is None:
+ raise WheelError("Bad wheel filename {!r}".format(basename))
+
+ ZipFile.__init__(self, file, mode, compression=compression, allowZip64=True)
+
+ self.dist_info_path = '{}.dist-info'.format(self.parsed_filename.group('namever'))
+ self.record_path = self.dist_info_path + '/RECORD'
+ self._file_hashes = OrderedDict()
+ self._file_sizes = {}
+ if mode == 'r':
+ # Ignore RECORD and any embedded wheel signatures
+ self._file_hashes[self.record_path] = None, None
+ self._file_hashes[self.record_path + '.jws'] = None, None
+ self._file_hashes[self.record_path + '.p7s'] = None, None
+
+ # Fill in the expected hashes by reading them from RECORD
+ try:
+ record = self.open(self.record_path)
+ except KeyError:
+ raise WheelError('Missing {} file'.format(self.record_path))
+
+ with record:
+ for line in record:
+ line = line.decode('utf-8')
+ path, hash_sum, size = line.rsplit(u',', 2)
+ if hash_sum:
+ algorithm, hash_sum = hash_sum.split(u'=')
+ try:
+ hashlib.new(algorithm)
+ except ValueError:
+ raise WheelError('Unsupported hash algorithm: {}'.format(algorithm))
+
+ if algorithm.lower() in {'md5', 'sha1'}:
+ raise WheelError(
+ 'Weak hash algorithm ({}) is not permitted by PEP 427'
+ .format(algorithm))
+
+ self._file_hashes[path] = (
+ algorithm, urlsafe_b64decode(hash_sum.encode('ascii')))
+
+ def open(self, name_or_info, mode="r", pwd=None):
+ def _update_crc(newdata, eof=None):
+ if eof is None:
+ eof = ef._eof
+ update_crc_orig(newdata)
+ else: # Python 2
+ update_crc_orig(newdata, eof)
+
+ running_hash.update(newdata)
+ if eof and running_hash.digest() != expected_hash:
+ raise WheelError("Hash mismatch for file '{}'".format(native(ef_name)))
+
+ ef_name = as_unicode(name_or_info.filename if isinstance(name_or_info, ZipInfo)
+ else name_or_info)
+ if mode == 'r' and not ef_name.endswith('/') and ef_name not in self._file_hashes:
+ raise WheelError("No hash found for file '{}'".format(native(ef_name)))
+
+ ef = ZipFile.open(self, name_or_info, mode, pwd)
+ if mode == 'r' and not ef_name.endswith('/'):
+ algorithm, expected_hash = self._file_hashes[ef_name]
+ if expected_hash is not None:
+ # Monkey patch the _update_crc method to also check for the hash from RECORD
+ running_hash = hashlib.new(algorithm)
+ update_crc_orig, ef._update_crc = ef._update_crc, _update_crc
+
+ return ef
+
+ def write_files(self, base_dir):
+ logger.info("creating '%s' and adding '%s' to it", self.filename, base_dir)
+ deferred = []
+ for root, dirnames, filenames in os.walk(base_dir):
+ # Sort the directory names so that `os.walk` will walk them in a
+ # defined order on the next iteration.
+ dirnames.sort()
+ for name in sorted(filenames):
+ path = os.path.normpath(os.path.join(root, name))
+ if os.path.isfile(path):
+ arcname = os.path.relpath(path, base_dir).replace(os.path.sep, '/')
+ if arcname == self.record_path:
+ pass
+ elif root.endswith('.dist-info'):
+ deferred.append((path, arcname))
+ else:
+ self.write(path, arcname)
+
+ deferred.sort()
+ for path, arcname in deferred:
+ self.write(path, arcname)
+
+ def write(self, filename, arcname=None, compress_type=None):
+ with open(filename, 'rb') as f:
+ st = os.fstat(f.fileno())
+ data = f.read()
+
+ zinfo = ZipInfo(arcname or filename, date_time=get_zipinfo_datetime(st.st_mtime))
+ zinfo.external_attr = (stat.S_IMODE(st.st_mode) | stat.S_IFMT(st.st_mode)) << 16
+ zinfo.compress_type = compress_type or self.compression
+ self.writestr(zinfo, data, compress_type)
+
+ def writestr(self, zinfo_or_arcname, bytes, compress_type=None):
+ ZipFile.writestr(self, zinfo_or_arcname, bytes, compress_type)
+ fname = (zinfo_or_arcname.filename if isinstance(zinfo_or_arcname, ZipInfo)
+ else zinfo_or_arcname)
+ logger.info("adding '%s'", fname)
+ if fname != self.record_path:
+ hash_ = self._default_algorithm(bytes)
+ self._file_hashes[fname] = hash_.name, native(urlsafe_b64encode(hash_.digest()))
+ self._file_sizes[fname] = len(bytes)
+
+ def close(self):
+ # Write RECORD
+ if self.fp is not None and self.mode == 'w' and self._file_hashes:
+ data = StringIO()
+ writer = csv.writer(data, delimiter=',', quotechar='"', lineterminator='\n')
+ writer.writerows((
+ (
+ fname,
+ algorithm + "=" + hash_,
+ self._file_sizes[fname]
+ )
+ for fname, (algorithm, hash_) in self._file_hashes.items()
+ ))
+ writer.writerow((format(self.record_path), "", ""))
+ zinfo = ZipInfo(native(self.record_path), date_time=get_zipinfo_datetime())
+ zinfo.compress_type = self.compression
+ zinfo.external_attr = 0o664 << 16
+ self.writestr(zinfo, as_bytes(data.getvalue()))
+
+ ZipFile.close(self)
diff --git a/third_party/python/yamllint/yamllint-1.23.0.dist-info/LICENSE b/third_party/python/yamllint/yamllint-1.23.0.dist-info/LICENSE
new file mode 100644
index 0000000000..94a9ed024d
--- /dev/null
+++ b/third_party/python/yamllint/yamllint-1.23.0.dist-info/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<http://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<http://www.gnu.org/philosophy/why-not-lgpl.html>.
diff --git a/third_party/python/yamllint/yamllint-1.23.0.dist-info/METADATA b/third_party/python/yamllint/yamllint-1.23.0.dist-info/METADATA
new file mode 100644
index 0000000000..f97b581a3d
--- /dev/null
+++ b/third_party/python/yamllint/yamllint-1.23.0.dist-info/METADATA
@@ -0,0 +1,34 @@
+Metadata-Version: 2.1
+Name: yamllint
+Version: 1.23.0
+Summary: A linter for YAML files.
+Home-page: https://github.com/adrienverge/yamllint
+Author: Adrien Vergé
+License: GPLv3
+Keywords: yaml,lint,linter,syntax,checker
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: GNU General Public License v3 (GPLv3)
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Topic :: Software Development
+Classifier: Topic :: Software Development :: Debuggers
+Classifier: Topic :: Software Development :: Quality Assurance
+Classifier: Topic :: Software Development :: Testing
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*
+Requires-Dist: pathspec (>=0.5.3)
+Requires-Dist: pyyaml
+
+A linter for YAML files.
+
+yamllint does not only check for syntax validity, but for weirdnesses like key
+repetition and cosmetic problems such as lines length, trailing spaces,
+indentation, etc.
+
diff --git a/third_party/python/yamllint/yamllint-1.23.0.dist-info/RECORD b/third_party/python/yamllint/yamllint-1.23.0.dist-info/RECORD
new file mode 100644
index 0000000000..57b2adaa42
--- /dev/null
+++ b/third_party/python/yamllint/yamllint-1.23.0.dist-info/RECORD
@@ -0,0 +1,37 @@
+yamllint/__init__.py,sha256=09UdMyFnq1ObJn5Q-OyN0bVxnqlNdeTOGCH0DBALZKM,1098
+yamllint/__main__.py,sha256=yUeYsN2w7fyIjcnleh2ow0u_hG6YO1BfnumvApBDWXQ,67
+yamllint/cli.py,sha256=PUphAVxTsXCKHMMvyUsuhwiELnDqVMRWX7xnWMZ5adw,7880
+yamllint/config.py,sha256=-Y4QnDKAcZD6DpRhvhSo89t9IGs_2ZAkJrP_c8ZPrVQ,7877
+yamllint/linter.py,sha256=yDkPv41mMmbdCBNyvblYs7Ds4P1F_jDAgy0LzeyyUMU,8773
+yamllint/parser.py,sha256=3MwMASIm3v6z5zKwlQFPJtt7rv4i4zD6_KgHABP3_oE,5191
+yamllint/conf/default.yaml,sha256=wsK6rYJ2A1sv-0ln3Ckvr8Tgbj4asvk42vD4OZQcEfc,587
+yamllint/conf/relaxed.yaml,sha256=Bz743etRSwggNRc8hMgxMmyr-5JazJrCPv3KNhjOu28,505
+yamllint/rules/__init__.py,sha256=b-32xKjsRiUqMaLAguHTBtxjV2zCEMwa-0hfC4dIcI0,1924
+yamllint/rules/braces.py,sha256=BLjn8qlo_3BCAGqi19qJUt2eVd-TKoNrqzV1R_d0Gfc,4542
+yamllint/rules/brackets.py,sha256=IwsOdigK2pPPDyA3gup8LhgHdaUIfeP8P4ApO0PuLU4,4583
+yamllint/rules/colons.py,sha256=DtN1lRBMq58r8M1aeAcU4TAhNx7qNCsa6isVpk_ukNM,2826
+yamllint/rules/commas.py,sha256=VBIO52n0DsFrdw_v0_F76tLSrQnwSbARsSAnfUKCCyo,3737
+yamllint/rules/comments.py,sha256=WfTYRnI8nZS0je1zlVzkhC-rtXR23krvUzS0cEQI_BI,3381
+yamllint/rules/comments_indentation.py,sha256=pCS5gSOZWc4wGHr7LlnwcReuSWax4WV0Ec_0G1RvoiI,3425
+yamllint/rules/common.py,sha256=_572eFYdjdTCMrzVGpuRDTi42OazsBYezcwlFMdsDPg,3226
+yamllint/rules/document_end.py,sha256=9rMdNmLDacI3sQopFYzRuGrc6Hj68GAkX5s8a5X1UWg,2686
+yamllint/rules/document_start.py,sha256=LJFunt4mqC_Cruq316hymg9uTKorfoOA2klHqdJiKH8,2437
+yamllint/rules/empty_lines.py,sha256=C5JoI-jtTDkApiBpcT_AeVt97xfR2jlyvkTfvFBpFqA,3259
+yamllint/rules/empty_values.py,sha256=iXuIjQkUyEQ_9kiXAbYjA32BsK8oxMUQgGo1xrPIVqg,2601
+yamllint/rules/hyphens.py,sha256=OfNUNWyGiABHnZwbqDtQTEBihmeJBmVEQgg2DVIllFo,1990
+yamllint/rules/indentation.py,sha256=82DOfCNnxBxFI0TX_6VF05HxUX0CEw6nRxcnhCprFfs,19067
+yamllint/rules/key_duplicates.py,sha256=dEYrBcG68MGG7iC6OBGsA9ZPbIX2rTYt-85Yf2Z2BJU,2890
+yamllint/rules/key_ordering.py,sha256=wbATImHwoojXPcrmG8RaGrvyPLmM7Dvvlz7U14uCrxs,3083
+yamllint/rules/line_length.py,sha256=FwI_8ShkiKzfjICo4RqG7WSAJKM87op5O-MDncPOvDI,4809
+yamllint/rules/new_line_at_end_of_file.py,sha256=X0T0jPojEkxTfDQiQrWVydXn_jPj0srd9hFOLm5t9wA,1357
+yamllint/rules/new_lines.py,sha256=bZCSz9PUVIn__PkgLx1R-iWUnXyfrQ5ZfXDh9qQ7WEM,1633
+yamllint/rules/octal_values.py,sha256=cCbDT4U0qCE0_wWoJUFgnvOxv1Ydv2bVDzTH-i0aK2Q,2792
+yamllint/rules/quoted_strings.py,sha256=bUIkpR-8i5Of18RrSvCi5lmffMiFchwNnkNkHQe3wqs,7468
+yamllint/rules/trailing_spaces.py,sha256=GH8RTvR-FXA0GbtRkFWymp_LzK2mJuROhcDoJcvQ4ns,1634
+yamllint/rules/truthy.py,sha256=K_or0_h7U2ymVe0_G_5IZ2GgMJhfcCklh4ojvABN4Tw,4011
+yamllint-1.23.0.dist-info/LICENSE,sha256=jOtLnuWt7d5Hsx6XXB2QxzrSe2sWWh3NgMfFRetluQM,35147
+yamllint-1.23.0.dist-info/METADATA,sha256=I6VrDjA7fBGgMpppaPTQnfiZwWnE9zVhN9GIoDJuBu8,1318
+yamllint-1.23.0.dist-info/WHEEL,sha256=HX-v9-noUkyUoxyZ1PMSuS7auUxDAR4VBdoYLqD0xws,110
+yamllint-1.23.0.dist-info/entry_points.txt,sha256=_aTkgNklEhR_lTZHrYseFHam-CSHZSqnhYtlYFb-72k,47
+yamllint-1.23.0.dist-info/top_level.txt,sha256=ivPsPeZUDHOuLbd603ZxKClOQ1bATyMYNx3GfHQmt4g,9
+yamllint-1.23.0.dist-info/RECORD,,
diff --git a/third_party/python/yamllint/yamllint-1.23.0.dist-info/WHEEL b/third_party/python/yamllint/yamllint-1.23.0.dist-info/WHEEL
new file mode 100644
index 0000000000..c8240f03e8
--- /dev/null
+++ b/third_party/python/yamllint/yamllint-1.23.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.1)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/third_party/python/yamllint/yamllint-1.23.0.dist-info/entry_points.txt b/third_party/python/yamllint/yamllint-1.23.0.dist-info/entry_points.txt
new file mode 100644
index 0000000000..a1b443ba38
--- /dev/null
+++ b/third_party/python/yamllint/yamllint-1.23.0.dist-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+yamllint = yamllint.cli:run
+
diff --git a/third_party/python/yamllint/yamllint-1.23.0.dist-info/top_level.txt b/third_party/python/yamllint/yamllint-1.23.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..b2c729ca4d
--- /dev/null
+++ b/third_party/python/yamllint/yamllint-1.23.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+yamllint
diff --git a/third_party/python/yamllint/yamllint/__init__.py b/third_party/python/yamllint/yamllint/__init__.py
new file mode 100644
index 0000000000..b78fe9c29e
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/__init__.py
@@ -0,0 +1,31 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""A linter for YAML files.
+
+yamllint does not only check for syntax validity, but for weirdnesses like key
+repetition and cosmetic problems such as lines length, trailing spaces,
+indentation, etc."""
+
+
+APP_NAME = 'yamllint'
+APP_VERSION = '1.23.0'
+APP_DESCRIPTION = __doc__
+
+__author__ = u'Adrien Vergé'
+__copyright__ = u'Copyright 2016, Adrien Vergé'
+__license__ = 'GPLv3'
+__version__ = APP_VERSION
diff --git a/third_party/python/yamllint/yamllint/__main__.py b/third_party/python/yamllint/yamllint/__main__.py
new file mode 100644
index 0000000000..bc16534ec7
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/__main__.py
@@ -0,0 +1,4 @@
+from yamllint.cli import run
+
+if __name__ == '__main__':
+ run()
diff --git a/third_party/python/yamllint/yamllint/cli.py b/third_party/python/yamllint/yamllint/cli.py
new file mode 100644
index 0000000000..e99fd2ca84
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/cli.py
@@ -0,0 +1,207 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from __future__ import print_function
+
+import argparse
+import io
+import os
+import platform
+import sys
+
+from yamllint import APP_DESCRIPTION, APP_NAME, APP_VERSION
+from yamllint import linter
+from yamllint.config import YamlLintConfig, YamlLintConfigError
+from yamllint.linter import PROBLEM_LEVELS
+
+
+def find_files_recursively(items, conf):
+ for item in items:
+ if os.path.isdir(item):
+ for root, dirnames, filenames in os.walk(item):
+ for f in filenames:
+ filepath = os.path.join(root, f)
+ if conf.is_yaml_file(filepath):
+ yield filepath
+ else:
+ yield item
+
+
+def supports_color():
+ supported_platform = not (platform.system() == 'Windows' and not
+ ('ANSICON' in os.environ or
+ ('TERM' in os.environ and
+ os.environ['TERM'] == 'ANSI')))
+ return (supported_platform and
+ hasattr(sys.stdout, 'isatty') and sys.stdout.isatty())
+
+
+class Format(object):
+ @staticmethod
+ def parsable(problem, filename):
+ return ('%(file)s:%(line)s:%(column)s: [%(level)s] %(message)s' %
+ {'file': filename,
+ 'line': problem.line,
+ 'column': problem.column,
+ 'level': problem.level,
+ 'message': problem.message})
+
+ @staticmethod
+ def standard(problem, filename):
+ line = ' %d:%d' % (problem.line, problem.column)
+ line += max(12 - len(line), 0) * ' '
+ line += problem.level
+ line += max(21 - len(line), 0) * ' '
+ line += problem.desc
+ if problem.rule:
+ line += ' (%s)' % problem.rule
+ return line
+
+ @staticmethod
+ def standard_color(problem, filename):
+ line = ' \033[2m%d:%d\033[0m' % (problem.line, problem.column)
+ line += max(20 - len(line), 0) * ' '
+ if problem.level == 'warning':
+ line += '\033[33m%s\033[0m' % problem.level
+ else:
+ line += '\033[31m%s\033[0m' % problem.level
+ line += max(38 - len(line), 0) * ' '
+ line += problem.desc
+ if problem.rule:
+ line += ' \033[2m(%s)\033[0m' % problem.rule
+ return line
+
+
+def show_problems(problems, file, args_format, no_warn):
+ max_level = 0
+ first = True
+
+ for problem in problems:
+ max_level = max(max_level, PROBLEM_LEVELS[problem.level])
+ if no_warn and (problem.level != 'error'):
+ continue
+ if args_format == 'parsable':
+ print(Format.parsable(problem, file))
+ elif args_format == 'colored' or \
+ (args_format == 'auto' and supports_color()):
+ if first:
+ print('\033[4m%s\033[0m' % file)
+ first = False
+ print(Format.standard_color(problem, file))
+ else:
+ if first:
+ print(file)
+ first = False
+ print(Format.standard(problem, file))
+
+ if not first and args_format != 'parsable':
+ print('')
+
+ return max_level
+
+
+def run(argv=None):
+ parser = argparse.ArgumentParser(prog=APP_NAME,
+ description=APP_DESCRIPTION)
+ files_group = parser.add_mutually_exclusive_group(required=True)
+ files_group.add_argument('files', metavar='FILE_OR_DIR', nargs='*',
+ default=(),
+ help='files to check')
+ files_group.add_argument('-', action='store_true', dest='stdin',
+ help='read from standard input')
+ config_group = parser.add_mutually_exclusive_group()
+ config_group.add_argument('-c', '--config-file', dest='config_file',
+ action='store',
+ help='path to a custom configuration')
+ config_group.add_argument('-d', '--config-data', dest='config_data',
+ action='store',
+ help='custom configuration (as YAML source)')
+ parser.add_argument('-f', '--format',
+ choices=('parsable', 'standard', 'colored', 'auto'),
+ default='auto', help='format for parsing output')
+ parser.add_argument('-s', '--strict',
+ action='store_true',
+ help='return non-zero exit code on warnings '
+ 'as well as errors')
+ parser.add_argument('--no-warnings',
+ action='store_true',
+ help='output only error level problems')
+ parser.add_argument('-v', '--version', action='version',
+ version='{} {}'.format(APP_NAME, APP_VERSION))
+
+ args = parser.parse_args(argv)
+
+ # User-global config is supposed to be in ~/.config/yamllint/config
+ if 'XDG_CONFIG_HOME' in os.environ:
+ user_global_config = os.path.join(
+ os.environ['XDG_CONFIG_HOME'], 'yamllint', 'config')
+ else:
+ user_global_config = os.path.expanduser('~/.config/yamllint/config')
+
+ try:
+ if args.config_data is not None:
+ if args.config_data != '' and ':' not in args.config_data:
+ args.config_data = 'extends: ' + args.config_data
+ conf = YamlLintConfig(content=args.config_data)
+ elif args.config_file is not None:
+ conf = YamlLintConfig(file=args.config_file)
+ elif os.path.isfile('.yamllint'):
+ conf = YamlLintConfig(file='.yamllint')
+ elif os.path.isfile('.yamllint.yaml'):
+ conf = YamlLintConfig(file='.yamllint.yaml')
+ elif os.path.isfile('.yamllint.yml'):
+ conf = YamlLintConfig(file='.yamllint.yml')
+ elif os.path.isfile(user_global_config):
+ conf = YamlLintConfig(file=user_global_config)
+ else:
+ conf = YamlLintConfig('extends: default')
+ except YamlLintConfigError as e:
+ print(e, file=sys.stderr)
+ sys.exit(-1)
+
+ max_level = 0
+
+ for file in find_files_recursively(args.files, conf):
+ filepath = file[2:] if file.startswith('./') else file
+ try:
+ with io.open(file, newline='') as f:
+ problems = linter.run(f, conf, filepath)
+ except EnvironmentError as e:
+ print(e, file=sys.stderr)
+ sys.exit(-1)
+ prob_level = show_problems(problems, file, args_format=args.format,
+ no_warn=args.no_warnings)
+ max_level = max(max_level, prob_level)
+
+ # read yaml from stdin
+ if args.stdin:
+ try:
+ problems = linter.run(sys.stdin, conf, '')
+ except EnvironmentError as e:
+ print(e, file=sys.stderr)
+ sys.exit(-1)
+ prob_level = show_problems(problems, 'stdin', args_format=args.format,
+ no_warn=args.no_warnings)
+ max_level = max(max_level, prob_level)
+
+ if max_level == PROBLEM_LEVELS['error']:
+ return_code = 1
+ elif max_level == PROBLEM_LEVELS['warning']:
+ return_code = 2 if args.strict else 0
+ else:
+ return_code = 0
+
+ sys.exit(return_code)
diff --git a/third_party/python/yamllint/yamllint/conf/default.yaml b/third_party/python/yamllint/yamllint/conf/default.yaml
new file mode 100644
index 0000000000..0720dede32
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/conf/default.yaml
@@ -0,0 +1,33 @@
+---
+
+yaml-files:
+ - '*.yaml'
+ - '*.yml'
+ - '.yamllint'
+
+rules:
+ braces: enable
+ brackets: enable
+ colons: enable
+ commas: enable
+ comments:
+ level: warning
+ comments-indentation:
+ level: warning
+ document-end: disable
+ document-start:
+ level: warning
+ empty-lines: enable
+ empty-values: disable
+ hyphens: enable
+ indentation: enable
+ key-duplicates: enable
+ key-ordering: disable
+ line-length: enable
+ new-line-at-end-of-file: enable
+ new-lines: enable
+ octal-values: disable
+ quoted-strings: disable
+ trailing-spaces: enable
+ truthy:
+ level: warning
diff --git a/third_party/python/yamllint/yamllint/conf/relaxed.yaml b/third_party/python/yamllint/yamllint/conf/relaxed.yaml
new file mode 100644
index 0000000000..83f5340c7f
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/conf/relaxed.yaml
@@ -0,0 +1,29 @@
+---
+
+extends: default
+
+rules:
+ braces:
+ level: warning
+ max-spaces-inside: 1
+ brackets:
+ level: warning
+ max-spaces-inside: 1
+ colons:
+ level: warning
+ commas:
+ level: warning
+ comments: disable
+ comments-indentation: disable
+ document-start: disable
+ empty-lines:
+ level: warning
+ hyphens:
+ level: warning
+ indentation:
+ level: warning
+ indent-sequences: consistent
+ line-length:
+ level: warning
+ allow-non-breakable-inline-mappings: true
+ truthy: disable
diff --git a/third_party/python/yamllint/yamllint/config.py b/third_party/python/yamllint/yamllint/config.py
new file mode 100644
index 0000000000..a955d8e62b
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/config.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os.path
+
+import pathspec
+import yaml
+
+import yamllint.rules
+
+
+class YamlLintConfigError(Exception):
+ pass
+
+
+class YamlLintConfig(object):
+ def __init__(self, content=None, file=None):
+ assert (content is None) ^ (file is None)
+
+ self.ignore = None
+
+ self.yaml_files = pathspec.PathSpec.from_lines(
+ 'gitwildmatch', ['*.yaml', '*.yml', '.yamllint'])
+
+ if file is not None:
+ with open(file) as f:
+ content = f.read()
+
+ self.parse(content)
+ self.validate()
+
+ def is_file_ignored(self, filepath):
+ return self.ignore and self.ignore.match_file(filepath)
+
+ def is_yaml_file(self, filepath):
+ return self.yaml_files.match_file(filepath)
+
+ def enabled_rules(self, filepath):
+ return [yamllint.rules.get(id) for id, val in self.rules.items()
+ if val is not False and (
+ filepath is None or 'ignore' not in val or
+ not val['ignore'].match_file(filepath))]
+
+ def extend(self, base_config):
+ assert isinstance(base_config, YamlLintConfig)
+
+ for rule in self.rules:
+ if (isinstance(self.rules[rule], dict) and
+ rule in base_config.rules and
+ base_config.rules[rule] is not False):
+ base_config.rules[rule].update(self.rules[rule])
+ else:
+ base_config.rules[rule] = self.rules[rule]
+
+ self.rules = base_config.rules
+
+ if base_config.ignore is not None:
+ self.ignore = base_config.ignore
+
+ def parse(self, raw_content):
+ try:
+ conf = yaml.safe_load(raw_content)
+ except Exception as e:
+ raise YamlLintConfigError('invalid config: %s' % e)
+
+ if not isinstance(conf, dict):
+ raise YamlLintConfigError('invalid config: not a dict')
+
+ self.rules = conf.get('rules', {})
+ for rule in self.rules:
+ if self.rules[rule] == 'enable':
+ self.rules[rule] = {}
+ elif self.rules[rule] == 'disable':
+ self.rules[rule] = False
+
+ # Does this conf override another conf that we need to load?
+ if 'extends' in conf:
+ path = get_extended_config_file(conf['extends'])
+ base = YamlLintConfig(file=path)
+ try:
+ self.extend(base)
+ except Exception as e:
+ raise YamlLintConfigError('invalid config: %s' % e)
+
+ if 'ignore' in conf:
+ if not isinstance(conf['ignore'], str):
+ raise YamlLintConfigError(
+ 'invalid config: ignore should contain file patterns')
+ self.ignore = pathspec.PathSpec.from_lines(
+ 'gitwildmatch', conf['ignore'].splitlines())
+
+ if 'yaml-files' in conf:
+ if not (isinstance(conf['yaml-files'], list)
+ and all(isinstance(i, str) for i in conf['yaml-files'])):
+ raise YamlLintConfigError(
+ 'invalid config: yaml-files '
+ 'should be a list of file patterns')
+ self.yaml_files = pathspec.PathSpec.from_lines('gitwildmatch',
+ conf['yaml-files'])
+
+ def validate(self):
+ for id in self.rules:
+ try:
+ rule = yamllint.rules.get(id)
+ except Exception as e:
+ raise YamlLintConfigError('invalid config: %s' % e)
+
+ self.rules[id] = validate_rule_conf(rule, self.rules[id])
+
+
+def validate_rule_conf(rule, conf):
+ if conf is False: # disable
+ return False
+
+ if isinstance(conf, dict):
+ if ('ignore' in conf and
+ not isinstance(conf['ignore'], pathspec.pathspec.PathSpec)):
+ if not isinstance(conf['ignore'], str):
+ raise YamlLintConfigError(
+ 'invalid config: ignore should contain file patterns')
+ conf['ignore'] = pathspec.PathSpec.from_lines(
+ 'gitwildmatch', conf['ignore'].splitlines())
+
+ if 'level' not in conf:
+ conf['level'] = 'error'
+ elif conf['level'] not in ('error', 'warning'):
+ raise YamlLintConfigError(
+ 'invalid config: level should be "error" or "warning"')
+
+ options = getattr(rule, 'CONF', {})
+ options_default = getattr(rule, 'DEFAULT', {})
+ for optkey in conf:
+ if optkey in ('ignore', 'level'):
+ continue
+ if optkey not in options:
+ raise YamlLintConfigError(
+ 'invalid config: unknown option "%s" for rule "%s"' %
+ (optkey, rule.ID))
+ # Example: CONF = {option: (bool, 'mixed')}
+ # → {option: true} → {option: mixed}
+ if isinstance(options[optkey], tuple):
+ if (conf[optkey] not in options[optkey] and
+ type(conf[optkey]) not in options[optkey]):
+ raise YamlLintConfigError(
+ 'invalid config: option "%s" of "%s" should be in %s'
+ % (optkey, rule.ID, options[optkey]))
+ # Example: CONF = {option: ['flag1', 'flag2', int]}
+ # → {option: [flag1]} → {option: [42, flag1, flag2]}
+ elif isinstance(options[optkey], list):
+ if (type(conf[optkey]) is not list or
+ any(flag not in options[optkey] and
+ type(flag) not in options[optkey]
+ for flag in conf[optkey])):
+ raise YamlLintConfigError(
+ ('invalid config: option "%s" of "%s" should only '
+ 'contain values in %s')
+ % (optkey, rule.ID, str(options[optkey])))
+ # Example: CONF = {option: int}
+ # → {option: 42}
+ else:
+ if not isinstance(conf[optkey], options[optkey]):
+ raise YamlLintConfigError(
+ 'invalid config: option "%s" of "%s" should be %s'
+ % (optkey, rule.ID, options[optkey].__name__))
+ for optkey in options:
+ if optkey not in conf:
+ conf[optkey] = options_default[optkey]
+
+ if hasattr(rule, 'VALIDATE'):
+ res = rule.VALIDATE(conf)
+ if res:
+ raise YamlLintConfigError('invalid config: %s: %s' %
+ (rule.ID, res))
+ else:
+ raise YamlLintConfigError(('invalid config: rule "%s": should be '
+ 'either "enable", "disable" or a dict')
+ % rule.ID)
+
+ return conf
+
+
+def get_extended_config_file(name):
+ # Is it a standard conf shipped with yamllint...
+ if '/' not in name:
+ std_conf = os.path.join(os.path.dirname(os.path.realpath(__file__)),
+ 'conf', name + '.yaml')
+
+ if os.path.isfile(std_conf):
+ return std_conf
+
+ # or a custom conf on filesystem?
+ return name
diff --git a/third_party/python/yamllint/yamllint/linter.py b/third_party/python/yamllint/yamllint/linter.py
new file mode 100644
index 0000000000..c687f142ec
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/linter.py
@@ -0,0 +1,240 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import re
+
+import yaml
+
+from yamllint import parser
+
+
+PROBLEM_LEVELS = {
+ 0: None,
+ 1: 'warning',
+ 2: 'error',
+ None: 0,
+ 'warning': 1,
+ 'error': 2,
+}
+
+
+class LintProblem(object):
+ """Represents a linting problem found by yamllint."""
+ def __init__(self, line, column, desc='<no description>', rule=None):
+ #: Line on which the problem was found (starting at 1)
+ self.line = line
+ #: Column on which the problem was found (starting at 1)
+ self.column = column
+ #: Human-readable description of the problem
+ self.desc = desc
+ #: Identifier of the rule that detected the problem
+ self.rule = rule
+ self.level = None
+
+ @property
+ def message(self):
+ if self.rule is not None:
+ return '{} ({})'.format(self.desc, self.rule)
+ return self.desc
+
+ def __eq__(self, other):
+ return (self.line == other.line and
+ self.column == other.column and
+ self.rule == other.rule)
+
+ def __lt__(self, other):
+ return (self.line < other.line or
+ (self.line == other.line and self.column < other.column))
+
+ def __repr__(self):
+ return '%d:%d: %s' % (self.line, self.column, self.message)
+
+
+def get_cosmetic_problems(buffer, conf, filepath):
+ rules = conf.enabled_rules(filepath)
+
+ # Split token rules from line rules
+ token_rules = [r for r in rules if r.TYPE == 'token']
+ comment_rules = [r for r in rules if r.TYPE == 'comment']
+ line_rules = [r for r in rules if r.TYPE == 'line']
+
+ context = {}
+ for rule in token_rules:
+ context[rule.ID] = {}
+
+ class DisableDirective:
+ def __init__(self):
+ self.rules = set()
+ self.all_rules = {r.ID for r in rules}
+
+ def process_comment(self, comment):
+ try:
+ comment = str(comment)
+ except UnicodeError:
+ return # this certainly wasn't a yamllint directive comment
+
+ if re.match(r'^# yamllint disable( rule:\S+)*\s*$', comment):
+ rules = [item[5:] for item in comment[18:].split(' ')][1:]
+ if len(rules) == 0:
+ self.rules = self.all_rules.copy()
+ else:
+ for id in rules:
+ if id in self.all_rules:
+ self.rules.add(id)
+
+ elif re.match(r'^# yamllint enable( rule:\S+)*\s*$', comment):
+ rules = [item[5:] for item in comment[17:].split(' ')][1:]
+ if len(rules) == 0:
+ self.rules.clear()
+ else:
+ for id in rules:
+ self.rules.discard(id)
+
+ def is_disabled_by_directive(self, problem):
+ return problem.rule in self.rules
+
+ class DisableLineDirective(DisableDirective):
+ def process_comment(self, comment):
+ try:
+ comment = str(comment)
+ except UnicodeError:
+ return # this certainly wasn't a yamllint directive comment
+
+ if re.match(r'^# yamllint disable-line( rule:\S+)*\s*$', comment):
+ rules = [item[5:] for item in comment[23:].split(' ')][1:]
+ if len(rules) == 0:
+ self.rules = self.all_rules.copy()
+ else:
+ for id in rules:
+ if id in self.all_rules:
+ self.rules.add(id)
+
+ # Use a cache to store problems and flush it only when a end of line is
+ # found. This allows the use of yamllint directive to disable some rules on
+ # some lines.
+ cache = []
+ disabled = DisableDirective()
+ disabled_for_line = DisableLineDirective()
+ disabled_for_next_line = DisableLineDirective()
+
+ for elem in parser.token_or_comment_or_line_generator(buffer):
+ if isinstance(elem, parser.Token):
+ for rule in token_rules:
+ rule_conf = conf.rules[rule.ID]
+ for problem in rule.check(rule_conf,
+ elem.curr, elem.prev, elem.next,
+ elem.nextnext,
+ context[rule.ID]):
+ problem.rule = rule.ID
+ problem.level = rule_conf['level']
+ cache.append(problem)
+ elif isinstance(elem, parser.Comment):
+ for rule in comment_rules:
+ rule_conf = conf.rules[rule.ID]
+ for problem in rule.check(rule_conf, elem):
+ problem.rule = rule.ID
+ problem.level = rule_conf['level']
+ cache.append(problem)
+
+ disabled.process_comment(elem)
+ if elem.is_inline():
+ disabled_for_line.process_comment(elem)
+ else:
+ disabled_for_next_line.process_comment(elem)
+ elif isinstance(elem, parser.Line):
+ for rule in line_rules:
+ rule_conf = conf.rules[rule.ID]
+ for problem in rule.check(rule_conf, elem):
+ problem.rule = rule.ID
+ problem.level = rule_conf['level']
+ cache.append(problem)
+
+ # This is the last token/comment/line of this line, let's flush the
+ # problems found (but filter them according to the directives)
+ for problem in cache:
+ if not (disabled_for_line.is_disabled_by_directive(problem) or
+ disabled.is_disabled_by_directive(problem)):
+ yield problem
+
+ disabled_for_line = disabled_for_next_line
+ disabled_for_next_line = DisableLineDirective()
+ cache = []
+
+
+def get_syntax_error(buffer):
+ try:
+ list(yaml.parse(buffer, Loader=yaml.BaseLoader))
+ except yaml.error.MarkedYAMLError as e:
+ problem = LintProblem(e.problem_mark.line + 1,
+ e.problem_mark.column + 1,
+ 'syntax error: ' + e.problem + ' (syntax)')
+ problem.level = 'error'
+ return problem
+
+
+def _run(buffer, conf, filepath):
+ assert hasattr(buffer, '__getitem__'), \
+ '_run() argument must be a buffer, not a stream'
+
+ first_line = next(parser.line_generator(buffer)).content
+ if re.match(r'^#\s*yamllint disable-file\s*$', first_line):
+ return
+
+ # If the document contains a syntax error, save it and yield it at the
+ # right line
+ syntax_error = get_syntax_error(buffer)
+
+ for problem in get_cosmetic_problems(buffer, conf, filepath):
+ # Insert the syntax error (if any) at the right place...
+ if (syntax_error and syntax_error.line <= problem.line and
+ syntax_error.column <= problem.column):
+ yield syntax_error
+
+ # If there is already a yamllint error at the same place, discard
+ # it as it is probably redundant (and maybe it's just a 'warning',
+ # in which case the script won't even exit with a failure status).
+ if (syntax_error.line == problem.line and
+ syntax_error.column == problem.column):
+ syntax_error = None
+ continue
+
+ syntax_error = None
+
+ yield problem
+
+ if syntax_error:
+ yield syntax_error
+
+
+def run(input, conf, filepath=None):
+ """Lints a YAML source.
+
+ Returns a generator of LintProblem objects.
+
+ :param input: buffer, string or stream to read from
+ :param conf: yamllint configuration object
+ """
+ if conf.is_file_ignored(filepath):
+ return ()
+
+ if isinstance(input, (type(b''), type(u''))): # compat with Python 2 & 3
+ return _run(input, conf, filepath)
+ elif hasattr(input, 'read'): # Python 2's file or Python 3's io.IOBase
+ # We need to have everything in memory to parse correctly
+ content = input.read()
+ return _run(content, conf, filepath)
+ else:
+ raise TypeError('input should be a string or a stream')
diff --git a/third_party/python/yamllint/yamllint/parser.py b/third_party/python/yamllint/yamllint/parser.py
new file mode 100644
index 0000000000..de331f4729
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/parser.py
@@ -0,0 +1,161 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import yaml
+
+
+class Line(object):
+ def __init__(self, line_no, buffer, start, end):
+ self.line_no = line_no
+ self.start = start
+ self.end = end
+ self.buffer = buffer
+
+ @property
+ def content(self):
+ return self.buffer[self.start:self.end]
+
+
+class Token(object):
+ def __init__(self, line_no, curr, prev, next, nextnext):
+ self.line_no = line_no
+ self.curr = curr
+ self.prev = prev
+ self.next = next
+ self.nextnext = nextnext
+
+
+class Comment(object):
+ def __init__(self, line_no, column_no, buffer, pointer,
+ token_before=None, token_after=None, comment_before=None):
+ self.line_no = line_no
+ self.column_no = column_no
+ self.buffer = buffer
+ self.pointer = pointer
+ self.token_before = token_before
+ self.token_after = token_after
+ self.comment_before = comment_before
+
+ def __str__(self):
+ end = self.buffer.find('\n', self.pointer)
+ if end == -1:
+ end = self.buffer.find('\0', self.pointer)
+ if end != -1:
+ return self.buffer[self.pointer:end]
+ return self.buffer[self.pointer:]
+
+ def __eq__(self, other):
+ return (isinstance(other, Comment) and
+ self.line_no == other.line_no and
+ self.column_no == other.column_no and
+ str(self) == str(other))
+
+ def is_inline(self):
+ return (
+ not isinstance(self.token_before, yaml.StreamStartToken) and
+ self.line_no == self.token_before.end_mark.line + 1 and
+ # sometimes token end marks are on the next line
+ self.buffer[self.token_before.end_mark.pointer - 1] != '\n'
+ )
+
+
+def line_generator(buffer):
+ line_no = 1
+ cur = 0
+ next = buffer.find('\n')
+ while next != -1:
+ if next > 0 and buffer[next - 1] == '\r':
+ yield Line(line_no, buffer, start=cur, end=next - 1)
+ else:
+ yield Line(line_no, buffer, start=cur, end=next)
+ cur = next + 1
+ next = buffer.find('\n', cur)
+ line_no += 1
+
+ yield Line(line_no, buffer, start=cur, end=len(buffer))
+
+
+def comments_between_tokens(token1, token2):
+ """Find all comments between two tokens"""
+ if token2 is None:
+ buf = token1.end_mark.buffer[token1.end_mark.pointer:]
+ elif (token1.end_mark.line == token2.start_mark.line and
+ not isinstance(token1, yaml.StreamStartToken) and
+ not isinstance(token2, yaml.StreamEndToken)):
+ return
+ else:
+ buf = token1.end_mark.buffer[token1.end_mark.pointer:
+ token2.start_mark.pointer]
+
+ line_no = token1.end_mark.line + 1
+ column_no = token1.end_mark.column + 1
+ pointer = token1.end_mark.pointer
+
+ comment_before = None
+ for line in buf.split('\n'):
+ pos = line.find('#')
+ if pos != -1:
+ comment = Comment(line_no, column_no + pos,
+ token1.end_mark.buffer, pointer + pos,
+ token1, token2, comment_before)
+ yield comment
+
+ comment_before = comment
+
+ pointer += len(line) + 1
+ line_no += 1
+ column_no = 1
+
+
+def token_or_comment_generator(buffer):
+ yaml_loader = yaml.BaseLoader(buffer)
+
+ try:
+ prev = None
+ curr = yaml_loader.get_token()
+ while curr is not None:
+ next = yaml_loader.get_token()
+ nextnext = (yaml_loader.peek_token()
+ if yaml_loader.check_token() else None)
+
+ yield Token(curr.start_mark.line + 1, curr, prev, next, nextnext)
+
+ for comment in comments_between_tokens(curr, next):
+ yield comment
+
+ prev = curr
+ curr = next
+
+ except yaml.scanner.ScannerError:
+ pass
+
+
+def token_or_comment_or_line_generator(buffer):
+ """Generator that mixes tokens and lines, ordering them by line number"""
+ tok_or_com_gen = token_or_comment_generator(buffer)
+ line_gen = line_generator(buffer)
+
+ tok_or_com = next(tok_or_com_gen, None)
+ line = next(line_gen, None)
+
+ while tok_or_com is not None or line is not None:
+ if tok_or_com is None or (line is not None and
+ tok_or_com.line_no > line.line_no):
+ yield line
+ line = next(line_gen, None)
+ else:
+ yield tok_or_com
+ tok_or_com = next(tok_or_com_gen, None)
diff --git a/third_party/python/yamllint/yamllint/rules/__init__.py b/third_party/python/yamllint/yamllint/rules/__init__.py
new file mode 100644
index 0000000000..a084d6ee16
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/__init__.py
@@ -0,0 +1,70 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from yamllint.rules import (
+ braces,
+ brackets,
+ colons,
+ commas,
+ comments,
+ comments_indentation,
+ document_end,
+ document_start,
+ empty_lines,
+ empty_values,
+ hyphens,
+ indentation,
+ key_duplicates,
+ key_ordering,
+ line_length,
+ new_line_at_end_of_file,
+ new_lines,
+ octal_values,
+ quoted_strings,
+ trailing_spaces,
+ truthy,
+)
+
+_RULES = {
+ braces.ID: braces,
+ brackets.ID: brackets,
+ colons.ID: colons,
+ commas.ID: commas,
+ comments.ID: comments,
+ comments_indentation.ID: comments_indentation,
+ document_end.ID: document_end,
+ document_start.ID: document_start,
+ empty_lines.ID: empty_lines,
+ empty_values.ID: empty_values,
+ hyphens.ID: hyphens,
+ indentation.ID: indentation,
+ key_duplicates.ID: key_duplicates,
+ key_ordering.ID: key_ordering,
+ line_length.ID: line_length,
+ new_line_at_end_of_file.ID: new_line_at_end_of_file,
+ new_lines.ID: new_lines,
+ octal_values.ID: octal_values,
+ quoted_strings.ID: quoted_strings,
+ trailing_spaces.ID: trailing_spaces,
+ truthy.ID: truthy,
+}
+
+
+def get(id):
+ if id not in _RULES:
+ raise ValueError('no such rule: "%s"' % id)
+
+ return _RULES[id]
diff --git a/third_party/python/yamllint/yamllint/rules/braces.py b/third_party/python/yamllint/yamllint/rules/braces.py
new file mode 100644
index 0000000000..654b36d330
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/braces.py
@@ -0,0 +1,143 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to control the number of spaces inside braces (``{`` and ``}``).
+
+.. rubric:: Options
+
+* ``min-spaces-inside`` defines the minimal number of spaces required inside
+ braces.
+* ``max-spaces-inside`` defines the maximal number of spaces allowed inside
+ braces.
+* ``min-spaces-inside-empty`` defines the minimal number of spaces required
+ inside empty braces.
+* ``max-spaces-inside-empty`` defines the maximal number of spaces allowed
+ inside empty braces.
+
+.. rubric:: Examples
+
+#. With ``braces: {min-spaces-inside: 0, max-spaces-inside: 0}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ object: {key1: 4, key2: 8}
+
+ the following code snippet would **FAIL**:
+ ::
+
+ object: { key1: 4, key2: 8 }
+
+#. With ``braces: {min-spaces-inside: 1, max-spaces-inside: 3}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ object: { key1: 4, key2: 8 }
+
+ the following code snippet would **PASS**:
+ ::
+
+ object: { key1: 4, key2: 8 }
+
+ the following code snippet would **FAIL**:
+ ::
+
+ object: { key1: 4, key2: 8 }
+
+ the following code snippet would **FAIL**:
+ ::
+
+ object: {key1: 4, key2: 8 }
+
+#. With ``braces: {min-spaces-inside-empty: 0, max-spaces-inside-empty: 0}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ object: {}
+
+ the following code snippet would **FAIL**:
+ ::
+
+ object: { }
+
+#. With ``braces: {min-spaces-inside-empty: 1, max-spaces-inside-empty: -1}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ object: { }
+
+ the following code snippet would **FAIL**:
+ ::
+
+ object: {}
+"""
+
+
+import yaml
+
+from yamllint.rules.common import spaces_after, spaces_before
+
+
+ID = 'braces'
+TYPE = 'token'
+CONF = {'min-spaces-inside': int,
+ 'max-spaces-inside': int,
+ 'min-spaces-inside-empty': int,
+ 'max-spaces-inside-empty': int}
+DEFAULT = {'min-spaces-inside': 0,
+ 'max-spaces-inside': 0,
+ 'min-spaces-inside-empty': -1,
+ 'max-spaces-inside-empty': -1}
+
+
+def check(conf, token, prev, next, nextnext, context):
+ if (isinstance(token, yaml.FlowMappingStartToken) and
+ isinstance(next, yaml.FlowMappingEndToken)):
+ problem = spaces_after(token, prev, next,
+ min=(conf['min-spaces-inside-empty']
+ if conf['min-spaces-inside-empty'] != -1
+ else conf['min-spaces-inside']),
+ max=(conf['max-spaces-inside-empty']
+ if conf['max-spaces-inside-empty'] != -1
+ else conf['max-spaces-inside']),
+ min_desc='too few spaces inside empty braces',
+ max_desc='too many spaces inside empty braces')
+ if problem is not None:
+ yield problem
+
+ elif isinstance(token, yaml.FlowMappingStartToken):
+ problem = spaces_after(token, prev, next,
+ min=conf['min-spaces-inside'],
+ max=conf['max-spaces-inside'],
+ min_desc='too few spaces inside braces',
+ max_desc='too many spaces inside braces')
+ if problem is not None:
+ yield problem
+
+ elif (isinstance(token, yaml.FlowMappingEndToken) and
+ (prev is None or
+ not isinstance(prev, yaml.FlowMappingStartToken))):
+ problem = spaces_before(token, prev, next,
+ min=conf['min-spaces-inside'],
+ max=conf['max-spaces-inside'],
+ min_desc='too few spaces inside braces',
+ max_desc='too many spaces inside braces')
+ if problem is not None:
+ yield problem
diff --git a/third_party/python/yamllint/yamllint/rules/brackets.py b/third_party/python/yamllint/yamllint/rules/brackets.py
new file mode 100644
index 0000000000..b54c5154aa
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/brackets.py
@@ -0,0 +1,145 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to control the number of spaces inside brackets (``[`` and
+``]``).
+
+.. rubric:: Options
+
+* ``min-spaces-inside`` defines the minimal number of spaces required inside
+ brackets.
+* ``max-spaces-inside`` defines the maximal number of spaces allowed inside
+ brackets.
+* ``min-spaces-inside-empty`` defines the minimal number of spaces required
+ inside empty brackets.
+* ``max-spaces-inside-empty`` defines the maximal number of spaces allowed
+ inside empty brackets.
+
+.. rubric:: Examples
+
+#. With ``brackets: {min-spaces-inside: 0, max-spaces-inside: 0}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ object: [1, 2, abc]
+
+ the following code snippet would **FAIL**:
+ ::
+
+ object: [ 1, 2, abc ]
+
+#. With ``brackets: {min-spaces-inside: 1, max-spaces-inside: 3}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ object: [ 1, 2, abc ]
+
+ the following code snippet would **PASS**:
+ ::
+
+ object: [ 1, 2, abc ]
+
+ the following code snippet would **FAIL**:
+ ::
+
+ object: [ 1, 2, abc ]
+
+ the following code snippet would **FAIL**:
+ ::
+
+ object: [1, 2, abc ]
+
+#. With ``brackets: {min-spaces-inside-empty: 0, max-spaces-inside-empty: 0}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ object: []
+
+ the following code snippet would **FAIL**:
+ ::
+
+ object: [ ]
+
+#. With ``brackets: {min-spaces-inside-empty: 1, max-spaces-inside-empty: -1}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ object: [ ]
+
+ the following code snippet would **FAIL**:
+ ::
+
+ object: []
+"""
+
+
+import yaml
+
+from yamllint.rules.common import spaces_after, spaces_before
+
+
+ID = 'brackets'
+TYPE = 'token'
+CONF = {'min-spaces-inside': int,
+ 'max-spaces-inside': int,
+ 'min-spaces-inside-empty': int,
+ 'max-spaces-inside-empty': int}
+DEFAULT = {'min-spaces-inside': 0,
+ 'max-spaces-inside': 0,
+ 'min-spaces-inside-empty': -1,
+ 'max-spaces-inside-empty': -1}
+
+
+def check(conf, token, prev, next, nextnext, context):
+ if (isinstance(token, yaml.FlowSequenceStartToken) and
+ isinstance(next, yaml.FlowSequenceEndToken)):
+ problem = spaces_after(token, prev, next,
+ min=(conf['min-spaces-inside-empty']
+ if conf['min-spaces-inside-empty'] != -1
+ else conf['min-spaces-inside']),
+ max=(conf['max-spaces-inside-empty']
+ if conf['max-spaces-inside-empty'] != -1
+ else conf['max-spaces-inside']),
+ min_desc='too few spaces inside empty brackets',
+ max_desc=('too many spaces inside empty '
+ 'brackets'))
+ if problem is not None:
+ yield problem
+
+ elif isinstance(token, yaml.FlowSequenceStartToken):
+ problem = spaces_after(token, prev, next,
+ min=conf['min-spaces-inside'],
+ max=conf['max-spaces-inside'],
+ min_desc='too few spaces inside brackets',
+ max_desc='too many spaces inside brackets')
+ if problem is not None:
+ yield problem
+
+ elif (isinstance(token, yaml.FlowSequenceEndToken) and
+ (prev is None or
+ not isinstance(prev, yaml.FlowSequenceStartToken))):
+ problem = spaces_before(token, prev, next,
+ min=conf['min-spaces-inside'],
+ max=conf['max-spaces-inside'],
+ min_desc='too few spaces inside brackets',
+ max_desc='too many spaces inside brackets')
+ if problem is not None:
+ yield problem
diff --git a/third_party/python/yamllint/yamllint/rules/colons.py b/third_party/python/yamllint/yamllint/rules/colons.py
new file mode 100644
index 0000000000..1a63cadab6
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/colons.py
@@ -0,0 +1,105 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to control the number of spaces before and after colons (``:``).
+
+.. rubric:: Options
+
+* ``max-spaces-before`` defines the maximal number of spaces allowed before
+ colons (use ``-1`` to disable).
+* ``max-spaces-after`` defines the maximal number of spaces allowed after
+ colons (use ``-1`` to disable).
+
+.. rubric:: Examples
+
+#. With ``colons: {max-spaces-before: 0, max-spaces-after: 1}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ object:
+ - a
+ - b
+ key: value
+
+#. With ``colons: {max-spaces-before: 1}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ object :
+ - a
+ - b
+
+ the following code snippet would **FAIL**:
+ ::
+
+ object :
+ - a
+ - b
+
+#. With ``colons: {max-spaces-after: 2}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ first: 1
+ second: 2
+ third: 3
+
+ the following code snippet would **FAIL**:
+ ::
+
+ first: 1
+ 2nd: 2
+ third: 3
+"""
+
+
+import yaml
+
+from yamllint.rules.common import is_explicit_key, spaces_after, spaces_before
+
+
+ID = 'colons'
+TYPE = 'token'
+CONF = {'max-spaces-before': int,
+ 'max-spaces-after': int}
+DEFAULT = {'max-spaces-before': 0,
+ 'max-spaces-after': 1}
+
+
+def check(conf, token, prev, next, nextnext, context):
+ if isinstance(token, yaml.ValueToken):
+ problem = spaces_before(token, prev, next,
+ max=conf['max-spaces-before'],
+ max_desc='too many spaces before colon')
+ if problem is not None:
+ yield problem
+
+ problem = spaces_after(token, prev, next,
+ max=conf['max-spaces-after'],
+ max_desc='too many spaces after colon')
+ if problem is not None:
+ yield problem
+
+ if isinstance(token, yaml.KeyToken) and is_explicit_key(token):
+ problem = spaces_after(token, prev, next,
+ max=conf['max-spaces-after'],
+ max_desc='too many spaces after question mark')
+ if problem is not None:
+ yield problem
diff --git a/third_party/python/yamllint/yamllint/rules/commas.py b/third_party/python/yamllint/yamllint/rules/commas.py
new file mode 100644
index 0000000000..bb73044545
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/commas.py
@@ -0,0 +1,131 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to control the number of spaces before and after commas (``,``).
+
+.. rubric:: Options
+
+* ``max-spaces-before`` defines the maximal number of spaces allowed before
+ commas (use ``-1`` to disable).
+* ``min-spaces-after`` defines the minimal number of spaces required after
+ commas.
+* ``max-spaces-after`` defines the maximal number of spaces allowed after
+ commas (use ``-1`` to disable).
+
+.. rubric:: Examples
+
+#. With ``commas: {max-spaces-before: 0}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ strange var:
+ [10, 20, 30, {x: 1, y: 2}]
+
+ the following code snippet would **FAIL**:
+ ::
+
+ strange var:
+ [10, 20 , 30, {x: 1, y: 2}]
+
+#. With ``commas: {max-spaces-before: 2}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ strange var:
+ [10 , 20 , 30, {x: 1 , y: 2}]
+
+#. With ``commas: {max-spaces-before: -1}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ strange var:
+ [10,
+ 20 , 30
+ , {x: 1, y: 2}]
+
+#. With ``commas: {min-spaces-after: 1, max-spaces-after: 1}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ strange var:
+ [10, 20,30, {x: 1, y: 2}]
+
+ the following code snippet would **FAIL**:
+ ::
+
+ strange var:
+ [10, 20,30, {x: 1, y: 2}]
+
+#. With ``commas: {min-spaces-after: 1, max-spaces-after: 3}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ strange var:
+ [10, 20, 30, {x: 1, y: 2}]
+
+#. With ``commas: {min-spaces-after: 0, max-spaces-after: 1}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ strange var:
+ [10, 20,30, {x: 1, y: 2}]
+"""
+
+
+import yaml
+
+from yamllint.linter import LintProblem
+from yamllint.rules.common import spaces_after, spaces_before
+
+
+ID = 'commas'
+TYPE = 'token'
+CONF = {'max-spaces-before': int,
+ 'min-spaces-after': int,
+ 'max-spaces-after': int}
+DEFAULT = {'max-spaces-before': 0,
+ 'min-spaces-after': 1,
+ 'max-spaces-after': 1}
+
+
+def check(conf, token, prev, next, nextnext, context):
+ if isinstance(token, yaml.FlowEntryToken):
+ if (prev is not None and conf['max-spaces-before'] != -1 and
+ prev.end_mark.line < token.start_mark.line):
+ yield LintProblem(token.start_mark.line + 1,
+ max(1, token.start_mark.column),
+ 'too many spaces before comma')
+ else:
+ problem = spaces_before(token, prev, next,
+ max=conf['max-spaces-before'],
+ max_desc='too many spaces before comma')
+ if problem is not None:
+ yield problem
+
+ problem = spaces_after(token, prev, next,
+ min=conf['min-spaces-after'],
+ max=conf['max-spaces-after'],
+ min_desc='too few spaces after comma',
+ max_desc='too many spaces after comma')
+ if problem is not None:
+ yield problem
diff --git a/third_party/python/yamllint/yamllint/rules/comments.py b/third_party/python/yamllint/yamllint/rules/comments.py
new file mode 100644
index 0000000000..0122838f61
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/comments.py
@@ -0,0 +1,104 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to control the position and formatting of comments.
+
+.. rubric:: Options
+
+* Use ``require-starting-space`` to require a space character right after the
+ ``#``. Set to ``true`` to enable, ``false`` to disable.
+* Use ``ignore-shebangs`` to ignore a
+ `shebang <https://en.wikipedia.org/wiki/Shebang_(Unix)>`_ at the beginning of
+ the file when ``require-starting-space`` is set.
+* ``min-spaces-from-content`` is used to visually separate inline comments from
+ content. It defines the minimal required number of spaces between a comment
+ and its preceding content.
+
+.. rubric:: Examples
+
+#. With ``comments: {require-starting-space: true}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ # This sentence
+ # is a block comment
+
+ the following code snippet would **PASS**:
+ ::
+
+ ##############################
+ ## This is some documentation
+
+ the following code snippet would **FAIL**:
+ ::
+
+ #This sentence
+ #is a block comment
+
+#. With ``comments: {min-spaces-from-content: 2}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ x = 2 ^ 127 - 1 # Mersenne prime number
+
+ the following code snippet would **FAIL**:
+ ::
+
+ x = 2 ^ 127 - 1 # Mersenne prime number
+"""
+
+
+import re
+
+from yamllint.linter import LintProblem
+
+
+ID = 'comments'
+TYPE = 'comment'
+CONF = {'require-starting-space': bool,
+ 'ignore-shebangs': bool,
+ 'min-spaces-from-content': int}
+DEFAULT = {'require-starting-space': True,
+ 'ignore-shebangs': True,
+ 'min-spaces-from-content': 2}
+
+
+def check(conf, comment):
+ if (conf['min-spaces-from-content'] != -1 and comment.is_inline() and
+ comment.pointer - comment.token_before.end_mark.pointer <
+ conf['min-spaces-from-content']):
+ yield LintProblem(comment.line_no, comment.column_no,
+ 'too few spaces before comment')
+
+ if conf['require-starting-space']:
+ text_start = comment.pointer + 1
+ while (comment.buffer[text_start] == '#' and
+ text_start < len(comment.buffer)):
+ text_start += 1
+ if text_start < len(comment.buffer):
+ if (conf['ignore-shebangs'] and
+ comment.line_no == 1 and
+ comment.column_no == 1 and
+ re.match(r'^!\S', comment.buffer[text_start:])):
+ return
+ elif comment.buffer[text_start] not in (' ', '\n', '\0'):
+ column = comment.column_no + text_start - comment.pointer
+ yield LintProblem(comment.line_no,
+ column,
+ 'missing starting space in comment')
diff --git a/third_party/python/yamllint/yamllint/rules/comments_indentation.py b/third_party/python/yamllint/yamllint/rules/comments_indentation.py
new file mode 100644
index 0000000000..22ab55d6d3
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/comments_indentation.py
@@ -0,0 +1,139 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to force comments to be indented like content.
+
+.. rubric:: Examples
+
+#. With ``comments-indentation: {}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ # Fibonacci
+ [0, 1, 1, 2, 3, 5]
+
+ the following code snippet would **FAIL**:
+ ::
+
+ # Fibonacci
+ [0, 1, 1, 2, 3, 5]
+
+ the following code snippet would **PASS**:
+ ::
+
+ list:
+ - 2
+ - 3
+ # - 4
+ - 5
+
+ the following code snippet would **FAIL**:
+ ::
+
+ list:
+ - 2
+ - 3
+ # - 4
+ - 5
+
+ the following code snippet would **PASS**:
+ ::
+
+ # This is the first object
+ obj1:
+ - item A
+ # - item B
+ # This is the second object
+ obj2: []
+
+ the following code snippet would **PASS**:
+ ::
+
+ # This sentence
+ # is a block comment
+
+ the following code snippet would **FAIL**:
+ ::
+
+ # This sentence
+ # is a block comment
+"""
+
+
+import yaml
+
+from yamllint.linter import LintProblem
+from yamllint.rules.common import get_line_indent
+
+
+ID = 'comments-indentation'
+TYPE = 'comment'
+
+
+# Case A:
+#
+# prev: line:
+# # commented line
+# current: line
+#
+# Case B:
+#
+# prev: line
+# # commented line 1
+# # commented line 2
+# current: line
+
+def check(conf, comment):
+ # Only check block comments
+ if (not isinstance(comment.token_before, yaml.StreamStartToken) and
+ comment.token_before.end_mark.line + 1 == comment.line_no):
+ return
+
+ next_line_indent = comment.token_after.start_mark.column
+ if isinstance(comment.token_after, yaml.StreamEndToken):
+ next_line_indent = 0
+
+ if isinstance(comment.token_before, yaml.StreamStartToken):
+ prev_line_indent = 0
+ else:
+ prev_line_indent = get_line_indent(comment.token_before)
+
+ # In the following case only the next line indent is valid:
+ # list:
+ # # comment
+ # - 1
+ # - 2
+ if prev_line_indent <= next_line_indent:
+ prev_line_indent = next_line_indent
+
+ # If two indents are valid but a previous comment went back to normal
+ # indent, for the next ones to do the same. In other words, avoid this:
+ # list:
+ # - 1
+ # # comment on valid indent (0)
+ # # comment on valid indent (4)
+ # other-list:
+ # - 2
+ if (comment.comment_before is not None and
+ not comment.comment_before.is_inline()):
+ prev_line_indent = comment.comment_before.column_no - 1
+
+ if (comment.column_no - 1 != prev_line_indent and
+ comment.column_no - 1 != next_line_indent):
+ yield LintProblem(comment.line_no, comment.column_no,
+ 'comment not indented like content')
diff --git a/third_party/python/yamllint/yamllint/rules/common.py b/third_party/python/yamllint/yamllint/rules/common.py
new file mode 100644
index 0000000000..989345965c
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/common.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import string
+
+import yaml
+
+from yamllint.linter import LintProblem
+
+
+def spaces_after(token, prev, next, min=-1, max=-1,
+ min_desc=None, max_desc=None):
+ if next is not None and token.end_mark.line == next.start_mark.line:
+ spaces = next.start_mark.pointer - token.end_mark.pointer
+ if max != - 1 and spaces > max:
+ return LintProblem(token.start_mark.line + 1,
+ next.start_mark.column, max_desc)
+ elif min != - 1 and spaces < min:
+ return LintProblem(token.start_mark.line + 1,
+ next.start_mark.column + 1, min_desc)
+
+
+def spaces_before(token, prev, next, min=-1, max=-1,
+ min_desc=None, max_desc=None):
+ if (prev is not None and prev.end_mark.line == token.start_mark.line and
+ # Discard tokens (only scalars?) that end at the start of next line
+ (prev.end_mark.pointer == 0 or
+ prev.end_mark.buffer[prev.end_mark.pointer - 1] != '\n')):
+ spaces = token.start_mark.pointer - prev.end_mark.pointer
+ if max != - 1 and spaces > max:
+ return LintProblem(token.start_mark.line + 1,
+ token.start_mark.column, max_desc)
+ elif min != - 1 and spaces < min:
+ return LintProblem(token.start_mark.line + 1,
+ token.start_mark.column + 1, min_desc)
+
+
+def get_line_indent(token):
+ """Finds the indent of the line the token starts in."""
+ start = token.start_mark.buffer.rfind('\n', 0,
+ token.start_mark.pointer) + 1
+ content = start
+ while token.start_mark.buffer[content] == ' ':
+ content += 1
+ return content - start
+
+
+def get_real_end_line(token):
+ """Finds the line on which the token really ends.
+
+ With pyyaml, scalar tokens often end on a next line.
+ """
+ end_line = token.end_mark.line + 1
+
+ if not isinstance(token, yaml.ScalarToken):
+ return end_line
+
+ pos = token.end_mark.pointer - 1
+ while (pos >= token.start_mark.pointer - 1 and
+ token.end_mark.buffer[pos] in string.whitespace):
+ if token.end_mark.buffer[pos] == '\n':
+ end_line -= 1
+ pos -= 1
+ return end_line
+
+
+def is_explicit_key(token):
+ # explicit key:
+ # ? key
+ # : v
+ # or
+ # ?
+ # key
+ # : v
+ return (token.start_mark.pointer < token.end_mark.pointer and
+ token.start_mark.buffer[token.start_mark.pointer] == '?')
diff --git a/third_party/python/yamllint/yamllint/rules/document_end.py b/third_party/python/yamllint/yamllint/rules/document_end.py
new file mode 100644
index 0000000000..e98aac1d12
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/document_end.py
@@ -0,0 +1,107 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to require or forbid the use of document end marker (``...``).
+
+.. rubric:: Options
+
+* Set ``present`` to ``true`` when the document end marker is required, or to
+ ``false`` when it is forbidden.
+
+.. rubric:: Examples
+
+#. With ``document-end: {present: true}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ ---
+ this:
+ is: [a, document]
+ ...
+ ---
+ - this
+ - is: another one
+ ...
+
+ the following code snippet would **FAIL**:
+ ::
+
+ ---
+ this:
+ is: [a, document]
+ ---
+ - this
+ - is: another one
+ ...
+
+#. With ``document-end: {present: false}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ ---
+ this:
+ is: [a, document]
+ ---
+ - this
+ - is: another one
+
+ the following code snippet would **FAIL**:
+ ::
+
+ ---
+ this:
+ is: [a, document]
+ ...
+ ---
+ - this
+ - is: another one
+"""
+
+
+import yaml
+
+from yamllint.linter import LintProblem
+
+
+ID = 'document-end'
+TYPE = 'token'
+CONF = {'present': bool}
+DEFAULT = {'present': True}
+
+
+def check(conf, token, prev, next, nextnext, context):
+ if conf['present']:
+ is_stream_end = isinstance(token, yaml.StreamEndToken)
+ is_start = isinstance(token, yaml.DocumentStartToken)
+ prev_is_end_or_stream_start = isinstance(
+ prev, (yaml.DocumentEndToken, yaml.StreamStartToken)
+ )
+
+ if is_stream_end and not prev_is_end_or_stream_start:
+ yield LintProblem(token.start_mark.line, 1,
+ 'missing document end "..."')
+ elif is_start and not prev_is_end_or_stream_start:
+ yield LintProblem(token.start_mark.line + 1, 1,
+ 'missing document end "..."')
+
+ else:
+ if isinstance(token, yaml.DocumentEndToken):
+ yield LintProblem(token.start_mark.line + 1,
+ token.start_mark.column + 1,
+ 'found forbidden document end "..."')
diff --git a/third_party/python/yamllint/yamllint/rules/document_start.py b/third_party/python/yamllint/yamllint/rules/document_start.py
new file mode 100644
index 0000000000..36c3d8e8db
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/document_start.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to require or forbid the use of document start marker (``---``).
+
+.. rubric:: Options
+
+* Set ``present`` to ``true`` when the document start marker is required, or to
+ ``false`` when it is forbidden.
+
+.. rubric:: Examples
+
+#. With ``document-start: {present: true}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ ---
+ this:
+ is: [a, document]
+ ---
+ - this
+ - is: another one
+
+ the following code snippet would **FAIL**:
+ ::
+
+ this:
+ is: [a, document]
+ ---
+ - this
+ - is: another one
+
+#. With ``document-start: {present: false}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ this:
+ is: [a, document]
+ ...
+
+ the following code snippet would **FAIL**:
+ ::
+
+ ---
+ this:
+ is: [a, document]
+ ...
+"""
+
+
+import yaml
+
+from yamllint.linter import LintProblem
+
+
+ID = 'document-start'
+TYPE = 'token'
+CONF = {'present': bool}
+DEFAULT = {'present': True}
+
+
+def check(conf, token, prev, next, nextnext, context):
+ if conf['present']:
+ if (isinstance(prev, (yaml.StreamStartToken,
+ yaml.DocumentEndToken,
+ yaml.DirectiveToken)) and
+ not isinstance(token, (yaml.DocumentStartToken,
+ yaml.DirectiveToken,
+ yaml.StreamEndToken))):
+ yield LintProblem(token.start_mark.line + 1, 1,
+ 'missing document start "---"')
+
+ else:
+ if isinstance(token, yaml.DocumentStartToken):
+ yield LintProblem(token.start_mark.line + 1,
+ token.start_mark.column + 1,
+ 'found forbidden document start "---"')
diff --git a/third_party/python/yamllint/yamllint/rules/empty_lines.py b/third_party/python/yamllint/yamllint/rules/empty_lines.py
new file mode 100644
index 0000000000..d9a8c4d173
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/empty_lines.py
@@ -0,0 +1,108 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to set a maximal number of allowed consecutive blank lines.
+
+.. rubric:: Options
+
+* ``max`` defines the maximal number of empty lines allowed in the document.
+* ``max-start`` defines the maximal number of empty lines allowed at the
+ beginning of the file. This option takes precedence over ``max``.
+* ``max-end`` defines the maximal number of empty lines allowed at the end of
+ the file. This option takes precedence over ``max``.
+
+.. rubric:: Examples
+
+#. With ``empty-lines: {max: 1}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ - foo:
+ - 1
+ - 2
+
+ - bar: [3, 4]
+
+ the following code snippet would **FAIL**:
+ ::
+
+ - foo:
+ - 1
+ - 2
+
+
+ - bar: [3, 4]
+"""
+
+
+from yamllint.linter import LintProblem
+
+
+ID = 'empty-lines'
+TYPE = 'line'
+CONF = {'max': int,
+ 'max-start': int,
+ 'max-end': int}
+DEFAULT = {'max': 2,
+ 'max-start': 0,
+ 'max-end': 0}
+
+
+def check(conf, line):
+ if line.start == line.end and line.end < len(line.buffer):
+ # Only alert on the last blank line of a series
+ if (line.end + 2 <= len(line.buffer) and
+ line.buffer[line.end:line.end + 2] == '\n\n'):
+ return
+ elif (line.end + 4 <= len(line.buffer) and
+ line.buffer[line.end:line.end + 4] == '\r\n\r\n'):
+ return
+
+ blank_lines = 0
+
+ start = line.start
+ while start >= 2 and line.buffer[start - 2:start] == '\r\n':
+ blank_lines += 1
+ start -= 2
+ while start >= 1 and line.buffer[start - 1] == '\n':
+ blank_lines += 1
+ start -= 1
+
+ max = conf['max']
+
+ # Special case: start of document
+ if start == 0:
+ blank_lines += 1 # first line doesn't have a preceding \n
+ max = conf['max-start']
+
+ # Special case: end of document
+ # NOTE: The last line of a file is always supposed to end with a new
+ # line. See POSIX definition of a line at:
+ if ((line.end == len(line.buffer) - 1 and
+ line.buffer[line.end] == '\n') or
+ (line.end == len(line.buffer) - 2 and
+ line.buffer[line.end:line.end + 2] == '\r\n')):
+ # Allow the exception of the one-byte file containing '\n'
+ if line.end == 0:
+ return
+
+ max = conf['max-end']
+
+ if blank_lines > max:
+ yield LintProblem(line.line_no, 1, 'too many blank lines (%d > %d)'
+ % (blank_lines, max))
diff --git a/third_party/python/yamllint/yamllint/rules/empty_values.py b/third_party/python/yamllint/yamllint/rules/empty_values.py
new file mode 100644
index 0000000000..bb4982bfdb
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/empty_values.py
@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2017 Greg Dubicki
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to prevent nodes with empty content, that implicitly result in
+``null`` values.
+
+.. rubric:: Options
+
+* Use ``forbid-in-block-mappings`` to prevent empty values in block mappings.
+* Use ``forbid-in-flow-mappings`` to prevent empty values in flow mappings.
+
+.. rubric:: Examples
+
+#. With ``empty-values: {forbid-in-block-mappings: true}``
+
+ the following code snippets would **PASS**:
+ ::
+
+ some-mapping:
+ sub-element: correctly indented
+
+ ::
+
+ explicitly-null: null
+
+ the following code snippets would **FAIL**:
+ ::
+
+ some-mapping:
+ sub-element: incorrectly indented
+
+ ::
+
+ implicitly-null:
+
+#. With ``empty-values: {forbid-in-flow-mappings: true}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ {prop: null}
+ {a: 1, b: 2, c: 3}
+
+ the following code snippets would **FAIL**:
+ ::
+
+ {prop: }
+
+ ::
+
+ {a: 1, b:, c: 3}
+
+"""
+
+import yaml
+
+from yamllint.linter import LintProblem
+
+
+ID = 'empty-values'
+TYPE = 'token'
+CONF = {'forbid-in-block-mappings': bool,
+ 'forbid-in-flow-mappings': bool}
+DEFAULT = {'forbid-in-block-mappings': True,
+ 'forbid-in-flow-mappings': True}
+
+
+def check(conf, token, prev, next, nextnext, context):
+
+ if conf['forbid-in-block-mappings']:
+ if isinstance(token, yaml.ValueToken) and isinstance(next, (
+ yaml.KeyToken, yaml.BlockEndToken)):
+ yield LintProblem(token.start_mark.line + 1,
+ token.end_mark.column + 1,
+ 'empty value in block mapping')
+
+ if conf['forbid-in-flow-mappings']:
+ if isinstance(token, yaml.ValueToken) and isinstance(next, (
+ yaml.FlowEntryToken, yaml.FlowMappingEndToken)):
+ yield LintProblem(token.start_mark.line + 1,
+ token.end_mark.column + 1,
+ 'empty value in flow mapping')
diff --git a/third_party/python/yamllint/yamllint/rules/hyphens.py b/third_party/python/yamllint/yamllint/rules/hyphens.py
new file mode 100644
index 0000000000..df38b4c519
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/hyphens.py
@@ -0,0 +1,88 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to control the number of spaces after hyphens (``-``).
+
+.. rubric:: Options
+
+* ``max-spaces-after`` defines the maximal number of spaces allowed after
+ hyphens.
+
+.. rubric:: Examples
+
+#. With ``hyphens: {max-spaces-after: 1}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ - first list:
+ - a
+ - b
+ - - 1
+ - 2
+ - 3
+
+ the following code snippet would **FAIL**:
+ ::
+
+ - first list:
+ - a
+ - b
+
+ the following code snippet would **FAIL**:
+ ::
+
+ - - 1
+ - 2
+ - 3
+
+#. With ``hyphens: {max-spaces-after: 3}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ - key
+ - key2
+ - key42
+
+ the following code snippet would **FAIL**:
+ ::
+
+ - key
+ - key2
+ - key42
+"""
+
+
+import yaml
+
+from yamllint.rules.common import spaces_after
+
+
+ID = 'hyphens'
+TYPE = 'token'
+CONF = {'max-spaces-after': int}
+DEFAULT = {'max-spaces-after': 1}
+
+
+def check(conf, token, prev, next, nextnext, context):
+ if isinstance(token, yaml.BlockEntryToken):
+ problem = spaces_after(token, prev, next,
+ max=conf['max-spaces-after'],
+ max_desc='too many spaces after hyphen')
+ if problem is not None:
+ yield problem
diff --git a/third_party/python/yamllint/yamllint/rules/indentation.py b/third_party/python/yamllint/yamllint/rules/indentation.py
new file mode 100644
index 0000000000..d83eb6594b
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/indentation.py
@@ -0,0 +1,575 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to control the indentation.
+
+.. rubric:: Options
+
+* ``spaces`` defines the indentation width, in spaces. Set either to an integer
+ (e.g. ``2`` or ``4``, representing the number of spaces in an indentation
+ level) or to ``consistent`` to allow any number, as long as it remains the
+ same within the file.
+* ``indent-sequences`` defines whether block sequences should be indented or
+ not (when in a mapping, this indentation is not mandatory -- some people
+ perceive the ``-`` as part of the indentation). Possible values: ``true``,
+ ``false``, ``whatever`` and ``consistent``. ``consistent`` requires either
+ all block sequences to be indented, or none to be. ``whatever`` means either
+ indenting or not indenting individual block sequences is OK.
+* ``check-multi-line-strings`` defines whether to lint indentation in
+ multi-line strings. Set to ``true`` to enable, ``false`` to disable.
+
+.. rubric:: Examples
+
+#. With ``indentation: {spaces: 1}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ history:
+ - name: Unix
+ date: 1969
+ - name: Linux
+ date: 1991
+ nest:
+ recurse:
+ - haystack:
+ needle
+
+#. With ``indentation: {spaces: 4}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ history:
+ - name: Unix
+ date: 1969
+ - name: Linux
+ date: 1991
+ nest:
+ recurse:
+ - haystack:
+ needle
+
+ the following code snippet would **FAIL**:
+ ::
+
+ history:
+ - name: Unix
+ date: 1969
+ - name: Linux
+ date: 1991
+ nest:
+ recurse:
+ - haystack:
+ needle
+
+#. With ``indentation: {spaces: consistent}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ history:
+ - name: Unix
+ date: 1969
+ - name: Linux
+ date: 1991
+ nest:
+ recurse:
+ - haystack:
+ needle
+
+ the following code snippet would **FAIL**:
+ ::
+
+ some:
+ Russian:
+ dolls
+
+#. With ``indentation: {spaces: 2, indent-sequences: false}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ list:
+ - flying
+ - spaghetti
+ - monster
+
+ the following code snippet would **FAIL**:
+ ::
+
+ list:
+ - flying
+ - spaghetti
+ - monster
+
+#. With ``indentation: {spaces: 2, indent-sequences: whatever}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ list:
+ - flying:
+ - spaghetti
+ - monster
+ - not flying:
+ - spaghetti
+ - sauce
+
+#. With ``indentation: {spaces: 2, indent-sequences: consistent}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ - flying:
+ - spaghetti
+ - monster
+ - not flying:
+ - spaghetti
+ - sauce
+
+ the following code snippet would **FAIL**:
+ ::
+
+ - flying:
+ - spaghetti
+ - monster
+ - not flying:
+ - spaghetti
+ - sauce
+
+#. With ``indentation: {spaces: 4, check-multi-line-strings: true}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ Blaise Pascal:
+ Je vous écris une longue lettre parce que
+ je n'ai pas le temps d'en écrire une courte.
+
+ the following code snippet would **PASS**:
+ ::
+
+ Blaise Pascal: Je vous écris une longue lettre parce que
+ je n'ai pas le temps d'en écrire une courte.
+
+ the following code snippet would **FAIL**:
+ ::
+
+ Blaise Pascal: Je vous écris une longue lettre parce que
+ je n'ai pas le temps d'en écrire une courte.
+
+ the following code snippet would **FAIL**:
+ ::
+
+ C code:
+ void main() {
+ printf("foo");
+ }
+
+ the following code snippet would **PASS**:
+ ::
+
+ C code:
+ void main() {
+ printf("bar");
+ }
+"""
+
+import yaml
+
+from yamllint.linter import LintProblem
+from yamllint.rules.common import get_real_end_line, is_explicit_key
+
+
+ID = 'indentation'
+TYPE = 'token'
+CONF = {'spaces': (int, 'consistent'),
+ 'indent-sequences': (bool, 'whatever', 'consistent'),
+ 'check-multi-line-strings': bool}
+DEFAULT = {'spaces': 'consistent',
+ 'indent-sequences': True,
+ 'check-multi-line-strings': False}
+
+ROOT, B_MAP, F_MAP, B_SEQ, F_SEQ, B_ENT, KEY, VAL = range(8)
+labels = ('ROOT', 'B_MAP', 'F_MAP', 'B_SEQ', 'F_SEQ', 'B_ENT', 'KEY', 'VAL')
+
+
+class Parent(object):
+ def __init__(self, type, indent, line_indent=None):
+ self.type = type
+ self.indent = indent
+ self.line_indent = line_indent
+ self.explicit_key = False
+ self.implicit_block_seq = False
+
+ def __repr__(self):
+ return '%s:%d' % (labels[self.type], self.indent)
+
+
+def check_scalar_indentation(conf, token, context):
+ if token.start_mark.line == token.end_mark.line:
+ return
+
+ def compute_expected_indent(found_indent):
+ def detect_indent(base_indent):
+ if not isinstance(context['spaces'], int):
+ context['spaces'] = found_indent - base_indent
+ return base_indent + context['spaces']
+
+ if token.plain:
+ return token.start_mark.column
+ elif token.style in ('"', "'"):
+ return token.start_mark.column + 1
+ elif token.style in ('>', '|'):
+ if context['stack'][-1].type == B_ENT:
+ # - >
+ # multi
+ # line
+ return detect_indent(token.start_mark.column)
+ elif context['stack'][-1].type == KEY:
+ assert context['stack'][-1].explicit_key
+ # - ? >
+ # multi-line
+ # key
+ # : >
+ # multi-line
+ # value
+ return detect_indent(token.start_mark.column)
+ elif context['stack'][-1].type == VAL:
+ if token.start_mark.line + 1 > context['cur_line']:
+ # - key:
+ # >
+ # multi
+ # line
+ return detect_indent(context['stack'][-1].indent)
+ elif context['stack'][-2].explicit_key:
+ # - ? key
+ # : >
+ # multi-line
+ # value
+ return detect_indent(token.start_mark.column)
+ else:
+ # - key: >
+ # multi
+ # line
+ return detect_indent(context['stack'][-2].indent)
+ else:
+ return detect_indent(context['stack'][-1].indent)
+
+ expected_indent = None
+
+ line_no = token.start_mark.line + 1
+
+ line_start = token.start_mark.pointer
+ while True:
+ line_start = token.start_mark.buffer.find(
+ '\n', line_start, token.end_mark.pointer - 1) + 1
+ if line_start == 0:
+ break
+ line_no += 1
+
+ indent = 0
+ while token.start_mark.buffer[line_start + indent] == ' ':
+ indent += 1
+ if token.start_mark.buffer[line_start + indent] == '\n':
+ continue
+
+ if expected_indent is None:
+ expected_indent = compute_expected_indent(indent)
+
+ if indent != expected_indent:
+ yield LintProblem(line_no, indent + 1,
+ 'wrong indentation: expected %d but found %d' %
+ (expected_indent, indent))
+
+
+def _check(conf, token, prev, next, nextnext, context):
+ if 'stack' not in context:
+ context['stack'] = [Parent(ROOT, 0)]
+ context['cur_line'] = -1
+ context['spaces'] = conf['spaces']
+ context['indent-sequences'] = conf['indent-sequences']
+
+ # Step 1: Lint
+
+ is_visible = (
+ not isinstance(token, (yaml.StreamStartToken, yaml.StreamEndToken)) and
+ not isinstance(token, yaml.BlockEndToken) and
+ not (isinstance(token, yaml.ScalarToken) and token.value == ''))
+ first_in_line = (is_visible and
+ token.start_mark.line + 1 > context['cur_line'])
+
+ def detect_indent(base_indent, next):
+ if not isinstance(context['spaces'], int):
+ context['spaces'] = next.start_mark.column - base_indent
+ return base_indent + context['spaces']
+
+ if first_in_line:
+ found_indentation = token.start_mark.column
+ expected = context['stack'][-1].indent
+
+ if isinstance(token, (yaml.FlowMappingEndToken,
+ yaml.FlowSequenceEndToken)):
+ expected = context['stack'][-1].line_indent
+ elif (context['stack'][-1].type == KEY and
+ context['stack'][-1].explicit_key and
+ not isinstance(token, yaml.ValueToken)):
+ expected = detect_indent(expected, token)
+
+ if found_indentation != expected:
+ yield LintProblem(token.start_mark.line + 1, found_indentation + 1,
+ 'wrong indentation: expected %d but found %d' %
+ (expected, found_indentation))
+
+ if (isinstance(token, yaml.ScalarToken) and
+ conf['check-multi-line-strings']):
+ for problem in check_scalar_indentation(conf, token, context):
+ yield problem
+
+ # Step 2.a:
+
+ if is_visible:
+ context['cur_line'] = get_real_end_line(token)
+ if first_in_line:
+ context['cur_line_indent'] = found_indentation
+
+ # Step 2.b: Update state
+
+ if isinstance(token, yaml.BlockMappingStartToken):
+ # - a: 1
+ # or
+ # - ? a
+ # : 1
+ # or
+ # - ?
+ # a
+ # : 1
+ assert isinstance(next, yaml.KeyToken)
+ assert next.start_mark.line == token.start_mark.line
+
+ indent = token.start_mark.column
+
+ context['stack'].append(Parent(B_MAP, indent))
+
+ elif isinstance(token, yaml.FlowMappingStartToken):
+ if next.start_mark.line == token.start_mark.line:
+ # - {a: 1, b: 2}
+ indent = next.start_mark.column
+ else:
+ # - {
+ # a: 1, b: 2
+ # }
+ indent = detect_indent(context['cur_line_indent'], next)
+
+ context['stack'].append(Parent(F_MAP, indent,
+ line_indent=context['cur_line_indent']))
+
+ elif isinstance(token, yaml.BlockSequenceStartToken):
+ # - - a
+ # - b
+ assert isinstance(next, yaml.BlockEntryToken)
+ assert next.start_mark.line == token.start_mark.line
+
+ indent = token.start_mark.column
+
+ context['stack'].append(Parent(B_SEQ, indent))
+
+ elif (isinstance(token, yaml.BlockEntryToken) and
+ # in case of an empty entry
+ not isinstance(next, (yaml.BlockEntryToken, yaml.BlockEndToken))):
+ # It looks like pyyaml doesn't issue BlockSequenceStartTokens when the
+ # list is not indented. We need to compensate that.
+ if context['stack'][-1].type != B_SEQ:
+ context['stack'].append(Parent(B_SEQ, token.start_mark.column))
+ context['stack'][-1].implicit_block_seq = True
+
+ if next.start_mark.line == token.end_mark.line:
+ # - item 1
+ # - item 2
+ indent = next.start_mark.column
+ elif next.start_mark.column == token.start_mark.column:
+ # -
+ # key: value
+ indent = next.start_mark.column
+ else:
+ # -
+ # item 1
+ # -
+ # key:
+ # value
+ indent = detect_indent(token.start_mark.column, next)
+
+ context['stack'].append(Parent(B_ENT, indent))
+
+ elif isinstance(token, yaml.FlowSequenceStartToken):
+ if next.start_mark.line == token.start_mark.line:
+ # - [a, b]
+ indent = next.start_mark.column
+ else:
+ # - [
+ # a, b
+ # ]
+ indent = detect_indent(context['cur_line_indent'], next)
+
+ context['stack'].append(Parent(F_SEQ, indent,
+ line_indent=context['cur_line_indent']))
+
+ elif isinstance(token, yaml.KeyToken):
+ indent = context['stack'][-1].indent
+
+ context['stack'].append(Parent(KEY, indent))
+
+ context['stack'][-1].explicit_key = is_explicit_key(token)
+
+ elif isinstance(token, yaml.ValueToken):
+ assert context['stack'][-1].type == KEY
+
+ # Special cases:
+ # key: &anchor
+ # value
+ # and:
+ # key: !!tag
+ # value
+ if isinstance(next, (yaml.AnchorToken, yaml.TagToken)):
+ if (next.start_mark.line == prev.start_mark.line and
+ next.start_mark.line < nextnext.start_mark.line):
+ next = nextnext
+
+ # Only if value is not empty
+ if not isinstance(next, (yaml.BlockEndToken,
+ yaml.FlowMappingEndToken,
+ yaml.FlowSequenceEndToken,
+ yaml.KeyToken)):
+ if context['stack'][-1].explicit_key:
+ # ? k
+ # : value
+ # or
+ # ? k
+ # :
+ # value
+ indent = detect_indent(context['stack'][-1].indent, next)
+ elif next.start_mark.line == prev.start_mark.line:
+ # k: value
+ indent = next.start_mark.column
+ elif isinstance(next, (yaml.BlockSequenceStartToken,
+ yaml.BlockEntryToken)):
+ # NOTE: We add BlockEntryToken in the test above because
+ # sometimes BlockSequenceStartToken are not issued. Try
+ # yaml.scan()ning this:
+ # '- lib:\n'
+ # ' - var\n'
+ if context['indent-sequences'] is False:
+ indent = context['stack'][-1].indent
+ elif context['indent-sequences'] is True:
+ if (context['spaces'] == 'consistent' and
+ next.start_mark.column -
+ context['stack'][-1].indent == 0):
+ # In this case, the block sequence item is not indented
+ # (while it should be), but we don't know yet the
+ # indentation it should have (because `spaces` is
+ # `consistent` and its value has not been computed yet
+ # -- this is probably the beginning of the document).
+ # So we choose an arbitrary value (2).
+ indent = 2
+ else:
+ indent = detect_indent(context['stack'][-1].indent,
+ next)
+ else: # 'whatever' or 'consistent'
+ if next.start_mark.column == context['stack'][-1].indent:
+ # key:
+ # - e1
+ # - e2
+ if context['indent-sequences'] == 'consistent':
+ context['indent-sequences'] = False
+ indent = context['stack'][-1].indent
+ else:
+ if context['indent-sequences'] == 'consistent':
+ context['indent-sequences'] = True
+ # key:
+ # - e1
+ # - e2
+ indent = detect_indent(context['stack'][-1].indent,
+ next)
+ else:
+ # k:
+ # value
+ indent = detect_indent(context['stack'][-1].indent, next)
+
+ context['stack'].append(Parent(VAL, indent))
+
+ consumed_current_token = False
+ while True:
+ if (context['stack'][-1].type == F_SEQ and
+ isinstance(token, yaml.FlowSequenceEndToken) and
+ not consumed_current_token):
+ context['stack'].pop()
+ consumed_current_token = True
+
+ elif (context['stack'][-1].type == F_MAP and
+ isinstance(token, yaml.FlowMappingEndToken) and
+ not consumed_current_token):
+ context['stack'].pop()
+ consumed_current_token = True
+
+ elif (context['stack'][-1].type in (B_MAP, B_SEQ) and
+ isinstance(token, yaml.BlockEndToken) and
+ not context['stack'][-1].implicit_block_seq and
+ not consumed_current_token):
+ context['stack'].pop()
+ consumed_current_token = True
+
+ elif (context['stack'][-1].type == B_ENT and
+ not isinstance(token, yaml.BlockEntryToken) and
+ context['stack'][-2].implicit_block_seq and
+ not isinstance(token, (yaml.AnchorToken, yaml.TagToken)) and
+ not isinstance(next, yaml.BlockEntryToken)):
+ context['stack'].pop()
+ context['stack'].pop()
+
+ elif (context['stack'][-1].type == B_ENT and
+ isinstance(next, (yaml.BlockEntryToken, yaml.BlockEndToken))):
+ context['stack'].pop()
+
+ elif (context['stack'][-1].type == VAL and
+ not isinstance(token, yaml.ValueToken) and
+ not isinstance(token, (yaml.AnchorToken, yaml.TagToken))):
+ assert context['stack'][-2].type == KEY
+ context['stack'].pop()
+ context['stack'].pop()
+
+ elif (context['stack'][-1].type == KEY and
+ isinstance(next, (yaml.BlockEndToken,
+ yaml.FlowMappingEndToken,
+ yaml.FlowSequenceEndToken,
+ yaml.KeyToken))):
+ # A key without a value: it's part of a set. Let's drop this key
+ # and leave room for the next one.
+ context['stack'].pop()
+
+ else:
+ break
+
+
+def check(conf, token, prev, next, nextnext, context):
+ try:
+ for problem in _check(conf, token, prev, next, nextnext, context):
+ yield problem
+ except AssertionError:
+ yield LintProblem(token.start_mark.line + 1,
+ token.start_mark.column + 1,
+ 'cannot infer indentation: unexpected token')
diff --git a/third_party/python/yamllint/yamllint/rules/key_duplicates.py b/third_party/python/yamllint/yamllint/rules/key_duplicates.py
new file mode 100644
index 0000000000..bd38b14345
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/key_duplicates.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to prevent multiple entries with the same key in mappings.
+
+.. rubric:: Examples
+
+#. With ``key-duplicates: {}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ - key 1: v
+ key 2: val
+ key 3: value
+ - {a: 1, b: 2, c: 3}
+
+ the following code snippet would **FAIL**:
+ ::
+
+ - key 1: v
+ key 2: val
+ key 1: value
+
+ the following code snippet would **FAIL**:
+ ::
+
+ - {a: 1, b: 2, b: 3}
+
+ the following code snippet would **FAIL**:
+ ::
+
+ duplicated key: 1
+ "duplicated key": 2
+
+ other duplication: 1
+ ? >-
+ other
+ duplication
+ : 2
+"""
+
+import yaml
+
+from yamllint.linter import LintProblem
+
+
+ID = 'key-duplicates'
+TYPE = 'token'
+
+MAP, SEQ = range(2)
+
+
+class Parent(object):
+ def __init__(self, type):
+ self.type = type
+ self.keys = []
+
+
+def check(conf, token, prev, next, nextnext, context):
+ if 'stack' not in context:
+ context['stack'] = []
+
+ if isinstance(token, (yaml.BlockMappingStartToken,
+ yaml.FlowMappingStartToken)):
+ context['stack'].append(Parent(MAP))
+ elif isinstance(token, (yaml.BlockSequenceStartToken,
+ yaml.FlowSequenceStartToken)):
+ context['stack'].append(Parent(SEQ))
+ elif isinstance(token, (yaml.BlockEndToken,
+ yaml.FlowMappingEndToken,
+ yaml.FlowSequenceEndToken)):
+ context['stack'].pop()
+ elif (isinstance(token, yaml.KeyToken) and
+ isinstance(next, yaml.ScalarToken)):
+ # This check is done because KeyTokens can be found inside flow
+ # sequences... strange, but allowed.
+ if len(context['stack']) > 0 and context['stack'][-1].type == MAP:
+ if (next.value in context['stack'][-1].keys and
+ # `<<` is "merge key", see http://yaml.org/type/merge.html
+ next.value != '<<'):
+ yield LintProblem(
+ next.start_mark.line + 1, next.start_mark.column + 1,
+ 'duplication of key "%s" in mapping' % next.value)
+ else:
+ context['stack'][-1].keys.append(next.value)
diff --git a/third_party/python/yamllint/yamllint/rules/key_ordering.py b/third_party/python/yamllint/yamllint/rules/key_ordering.py
new file mode 100644
index 0000000000..1ca992b66e
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/key_ordering.py
@@ -0,0 +1,109 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2017 Johannes F. Knauf
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to enforce alphabetical ordering of keys in mappings. The sorting
+order uses the Unicode code point number. As a result, the ordering is
+case-sensitive and not accent-friendly (see examples below).
+
+.. rubric:: Examples
+
+#. With ``key-ordering: {}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ - key 1: v
+ key 2: val
+ key 3: value
+ - {a: 1, b: 2, c: 3}
+ - T-shirt: 1
+ T-shirts: 2
+ t-shirt: 3
+ t-shirts: 4
+ - hair: true
+ hais: true
+ haïr: true
+ haïssable: true
+
+ the following code snippet would **FAIL**:
+ ::
+
+ - key 2: v
+ key 1: val
+
+ the following code snippet would **FAIL**:
+ ::
+
+ - {b: 1, a: 2}
+
+ the following code snippet would **FAIL**:
+ ::
+
+ - T-shirt: 1
+ t-shirt: 2
+ T-shirts: 3
+ t-shirts: 4
+
+ the following code snippet would **FAIL**:
+ ::
+
+ - haïr: true
+ hais: true
+"""
+
+import yaml
+
+from yamllint.linter import LintProblem
+
+
+ID = 'key-ordering'
+TYPE = 'token'
+
+MAP, SEQ = range(2)
+
+
+class Parent(object):
+ def __init__(self, type):
+ self.type = type
+ self.keys = []
+
+
+def check(conf, token, prev, next, nextnext, context):
+ if 'stack' not in context:
+ context['stack'] = []
+
+ if isinstance(token, (yaml.BlockMappingStartToken,
+ yaml.FlowMappingStartToken)):
+ context['stack'].append(Parent(MAP))
+ elif isinstance(token, (yaml.BlockSequenceStartToken,
+ yaml.FlowSequenceStartToken)):
+ context['stack'].append(Parent(SEQ))
+ elif isinstance(token, (yaml.BlockEndToken,
+ yaml.FlowMappingEndToken,
+ yaml.FlowSequenceEndToken)):
+ context['stack'].pop()
+ elif (isinstance(token, yaml.KeyToken) and
+ isinstance(next, yaml.ScalarToken)):
+ # This check is done because KeyTokens can be found inside flow
+ # sequences... strange, but allowed.
+ if len(context['stack']) > 0 and context['stack'][-1].type == MAP:
+ if any(next.value < key for key in context['stack'][-1].keys):
+ yield LintProblem(
+ next.start_mark.line + 1, next.start_mark.column + 1,
+ 'wrong ordering of key "%s" in mapping' % next.value)
+ else:
+ context['stack'][-1].keys.append(next.value)
diff --git a/third_party/python/yamllint/yamllint/rules/line_length.py b/third_party/python/yamllint/yamllint/rules/line_length.py
new file mode 100644
index 0000000000..9b5a1ab687
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/line_length.py
@@ -0,0 +1,149 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to set a limit to lines length.
+
+Note: with Python 2, the ``line-length`` rule may not work properly with
+unicode characters because of the way strings are represented in bytes. We
+recommend running yamllint with Python 3.
+
+.. rubric:: Options
+
+* ``max`` defines the maximal (inclusive) length of lines.
+* ``allow-non-breakable-words`` is used to allow non breakable words (without
+ spaces inside) to overflow the limit. This is useful for long URLs, for
+ instance. Use ``true`` to allow, ``false`` to forbid.
+* ``allow-non-breakable-inline-mappings`` implies ``allow-non-breakable-words``
+ and extends it to also allow non-breakable words in inline mappings.
+
+.. rubric:: Examples
+
+#. With ``line-length: {max: 70}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ long sentence:
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do
+ eiusmod tempor incididunt ut labore et dolore magna aliqua.
+
+ the following code snippet would **FAIL**:
+ ::
+
+ long sentence:
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod
+ tempor incididunt ut labore et dolore magna aliqua.
+
+#. With ``line-length: {max: 60, allow-non-breakable-words: true}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ this:
+ is:
+ - a:
+ http://localhost/very/very/very/very/very/very/very/very/long/url
+
+ # this comment is too long,
+ # but hard to split:
+ # http://localhost/another/very/very/very/very/very/very/very/very/long/url
+
+ the following code snippet would **FAIL**:
+ ::
+
+ - this line is waaaaaaaaaaaaaay too long but could be easily split...
+
+ and the following code snippet would also **FAIL**:
+ ::
+
+ - foobar: http://localhost/very/very/very/very/very/very/very/very/long/url
+
+#. With ``line-length: {max: 60, allow-non-breakable-words: true,
+ allow-non-breakable-inline-mappings: true}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ - foobar: http://localhost/very/very/very/very/very/very/very/very/long/url
+
+#. With ``line-length: {max: 60, allow-non-breakable-words: false}``
+
+ the following code snippet would **FAIL**:
+ ::
+
+ this:
+ is:
+ - a:
+ http://localhost/very/very/very/very/very/very/very/very/long/url
+"""
+
+
+import yaml
+
+from yamllint.linter import LintProblem
+
+
+ID = 'line-length'
+TYPE = 'line'
+CONF = {'max': int,
+ 'allow-non-breakable-words': bool,
+ 'allow-non-breakable-inline-mappings': bool}
+DEFAULT = {'max': 80,
+ 'allow-non-breakable-words': True,
+ 'allow-non-breakable-inline-mappings': False}
+
+
+def check_inline_mapping(line):
+ loader = yaml.SafeLoader(line.content)
+ try:
+ while loader.peek_token():
+ if isinstance(loader.get_token(), yaml.BlockMappingStartToken):
+ while loader.peek_token():
+ if isinstance(loader.get_token(), yaml.ValueToken):
+ t = loader.get_token()
+ if isinstance(t, yaml.ScalarToken):
+ return (
+ ' ' not in line.content[t.start_mark.column:])
+ except yaml.scanner.ScannerError:
+ pass
+
+ return False
+
+
+def check(conf, line):
+ if line.end - line.start > conf['max']:
+ conf['allow-non-breakable-words'] |= \
+ conf['allow-non-breakable-inline-mappings']
+ if conf['allow-non-breakable-words']:
+ start = line.start
+ while start < line.end and line.buffer[start] == ' ':
+ start += 1
+
+ if start != line.end:
+ if line.buffer[start] in ('#', '-'):
+ start += 2
+
+ if line.buffer.find(' ', start, line.end) == -1:
+ return
+
+ if (conf['allow-non-breakable-inline-mappings'] and
+ check_inline_mapping(line)):
+ return
+
+ yield LintProblem(line.line_no, conf['max'] + 1,
+ 'line too long (%d > %d characters)' %
+ (line.end - line.start, conf['max']))
diff --git a/third_party/python/yamllint/yamllint/rules/new_line_at_end_of_file.py b/third_party/python/yamllint/yamllint/rules/new_line_at_end_of_file.py
new file mode 100644
index 0000000000..90b1cc2ae7
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/new_line_at_end_of_file.py
@@ -0,0 +1,37 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to require a new line character (``\\n``) at the end of files.
+
+The POSIX standard `requires the last line to end with a new line character
+<http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap03.html#tag_03_206>`_.
+All UNIX tools expect a new line at the end of files. Most text editors use
+this convention too.
+"""
+
+
+from yamllint.linter import LintProblem
+
+
+ID = 'new-line-at-end-of-file'
+TYPE = 'line'
+
+
+def check(conf, line):
+ if line.end == len(line.buffer) and line.end > line.start:
+ yield LintProblem(line.line_no, line.end - line.start + 1,
+ 'no new line character at the end of file')
diff --git a/third_party/python/yamllint/yamllint/rules/new_lines.py b/third_party/python/yamllint/yamllint/rules/new_lines.py
new file mode 100644
index 0000000000..686bac244b
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/new_lines.py
@@ -0,0 +1,46 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to force the type of new line characters.
+
+.. rubric:: Options
+
+* Set ``type`` to ``unix`` to use UNIX-typed new line characters (``\\n``), or
+ ``dos`` to use DOS-typed new line characters (``\\r\\n``).
+"""
+
+
+from yamllint.linter import LintProblem
+
+
+ID = 'new-lines'
+TYPE = 'line'
+CONF = {'type': ('unix', 'dos')}
+DEFAULT = {'type': 'unix'}
+
+
+def check(conf, line):
+ if line.start == 0 and len(line.buffer) > line.end:
+ if conf['type'] == 'dos':
+ if (line.end + 2 > len(line.buffer) or
+ line.buffer[line.end:line.end + 2] != '\r\n'):
+ yield LintProblem(1, line.end - line.start + 1,
+ 'wrong new line character: expected \\r\\n')
+ else:
+ if line.buffer[line.end] == '\r':
+ yield LintProblem(1, line.end - line.start + 1,
+ 'wrong new line character: expected \\n')
diff --git a/third_party/python/yamllint/yamllint/rules/octal_values.py b/third_party/python/yamllint/yamllint/rules/octal_values.py
new file mode 100644
index 0000000000..f6e80cef56
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/octal_values.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2017 ScienJus
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to prevent values with octal numbers. In YAML, numbers that
+start with ``0`` are interpreted as octal, but this is not always wanted.
+For instance ``010`` is the city code of Beijing, and should not be
+converted to ``8``.
+
+.. rubric:: Examples
+
+#. With ``octal-values: {forbid-implicit-octal: true}``
+
+ the following code snippets would **PASS**:
+ ::
+
+ user:
+ city-code: '010'
+
+ the following code snippets would **PASS**:
+ ::
+
+ user:
+ city-code: 010,021
+
+ the following code snippets would **FAIL**:
+ ::
+
+ user:
+ city-code: 010
+
+#. With ``octal-values: {forbid-explicit-octal: true}``
+
+ the following code snippets would **PASS**:
+ ::
+
+ user:
+ city-code: '0o10'
+
+ the following code snippets would **FAIL**:
+ ::
+
+ user:
+ city-code: 0o10
+"""
+
+import yaml
+
+from yamllint.linter import LintProblem
+
+
+ID = 'octal-values'
+TYPE = 'token'
+CONF = {'forbid-implicit-octal': bool,
+ 'forbid-explicit-octal': bool}
+DEFAULT = {'forbid-implicit-octal': True,
+ 'forbid-explicit-octal': True}
+
+
+def check(conf, token, prev, next, nextnext, context):
+ if prev and isinstance(prev, yaml.tokens.TagToken):
+ return
+
+ if conf['forbid-implicit-octal']:
+ if isinstance(token, yaml.tokens.ScalarToken):
+ if not token.style:
+ val = token.value
+ if val.isdigit() and len(val) > 1 and val[0] == '0':
+ yield LintProblem(
+ token.start_mark.line + 1, token.end_mark.column + 1,
+ 'forbidden implicit octal value "%s"' %
+ token.value)
+
+ if conf['forbid-explicit-octal']:
+ if isinstance(token, yaml.tokens.ScalarToken):
+ if not token.style:
+ val = token.value
+ if len(val) > 2 and val[:2] == '0o' and val[2:].isdigit():
+ yield LintProblem(
+ token.start_mark.line + 1, token.end_mark.column + 1,
+ 'forbidden explicit octal value "%s"' %
+ token.value)
diff --git a/third_party/python/yamllint/yamllint/rules/quoted_strings.py b/third_party/python/yamllint/yamllint/rules/quoted_strings.py
new file mode 100644
index 0000000000..1d997294da
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/quoted_strings.py
@@ -0,0 +1,230 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2018 ClearScore
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to forbid any string values that are not quoted, or to prevent
+quoted strings without needing it. You can also enforce the type of the quote
+used.
+
+.. rubric:: Options
+
+* ``quote-type`` defines allowed quotes: ``single``, ``double`` or ``any``
+ (default).
+* ``required`` defines whether using quotes in string values is required
+ (``true``, default) or not (``false``), or only allowed when really needed
+ (``only-when-needed``).
+* ``extra-required`` is a list of PCRE regexes to force string values to be
+ quoted, if they match any regex. This option can only be used with
+ ``required: false`` and ``required: only-when-needed``.
+* ``extra-allowed`` is a list of PCRE regexes to allow quoted string values,
+ even if ``required: only-when-needed`` is set.
+
+**Note**: Multi-line strings (with ``|`` or ``>``) will not be checked.
+
+.. rubric:: Examples
+
+#. With ``quoted-strings: {quote-type: any, required: true}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ foo: "bar"
+ bar: 'foo'
+ number: 123
+ boolean: true
+
+ the following code snippet would **FAIL**:
+ ::
+
+ foo: bar
+
+#. With ``quoted-strings: {quote-type: single, required: only-when-needed}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ foo: bar
+ bar: foo
+ not_number: '123'
+ not_boolean: 'true'
+ not_comment: '# comment'
+ not_list: '[1, 2, 3]'
+ not_map: '{a: 1, b: 2}'
+
+ the following code snippet would **FAIL**:
+ ::
+
+ foo: 'bar'
+
+#. With ``quoted-strings: {required: false, extra-required: [^http://,
+ ^ftp://]}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ - localhost
+ - "localhost"
+ - "http://localhost"
+ - "ftp://localhost"
+
+ the following code snippet would **FAIL**:
+ ::
+
+ - http://localhost
+ - ftp://localhost
+
+#. With ``quoted-strings: {required: only-when-needed, extra-allowed:
+ [^http://, ^ftp://], extra-required: [QUOTED]}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ - localhost
+ - "http://localhost"
+ - "ftp://localhost"
+ - "this is a string that needs to be QUOTED"
+
+ the following code snippet would **FAIL**:
+ ::
+
+ - "localhost"
+ - this is a string that needs to be QUOTED
+"""
+
+import re
+
+import yaml
+
+from yamllint.linter import LintProblem
+
+ID = 'quoted-strings'
+TYPE = 'token'
+CONF = {'quote-type': ('any', 'single', 'double'),
+ 'required': (True, False, 'only-when-needed'),
+ 'extra-required': [str],
+ 'extra-allowed': [str]}
+DEFAULT = {'quote-type': 'any',
+ 'required': True,
+ 'extra-required': [],
+ 'extra-allowed': []}
+
+
+def VALIDATE(conf):
+ if conf['required'] is True and len(conf['extra-allowed']) > 0:
+ return 'cannot use both "required: true" and "extra-allowed"'
+ if conf['required'] is True and len(conf['extra-required']) > 0:
+ return 'cannot use both "required: true" and "extra-required"'
+ if conf['required'] is False and len(conf['extra-allowed']) > 0:
+ return 'cannot use both "required: false" and "extra-allowed"'
+
+
+DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
+
+
+def _quote_match(quote_type, token_style):
+ return ((quote_type == 'any') or
+ (quote_type == 'single' and token_style == "'") or
+ (quote_type == 'double' and token_style == '"'))
+
+
+def _quotes_are_needed(string):
+ loader = yaml.BaseLoader('key: ' + string)
+ # Remove the 5 first tokens corresponding to 'key: ' (StreamStartToken,
+ # BlockMappingStartToken, KeyToken, ScalarToken(value=key), ValueToken)
+ for _ in range(5):
+ loader.get_token()
+ try:
+ a, b = loader.get_token(), loader.get_token()
+ if (isinstance(a, yaml.ScalarToken) and a.style is None and
+ isinstance(b, yaml.BlockEndToken)):
+ return False
+ return True
+ except yaml.scanner.ScannerError:
+ return True
+
+
+def check(conf, token, prev, next, nextnext, context):
+ if not (isinstance(token, yaml.tokens.ScalarToken) and
+ isinstance(prev, (yaml.BlockEntryToken, yaml.FlowEntryToken,
+ yaml.FlowSequenceStartToken, yaml.TagToken,
+ yaml.ValueToken))):
+
+ return
+
+ # Ignore explicit types, e.g. !!str testtest or !!int 42
+ if (prev and isinstance(prev, yaml.tokens.TagToken) and
+ prev.value[0] == '!!'):
+ return
+
+ # Ignore numbers, booleans, etc.
+ resolver = yaml.resolver.Resolver()
+ tag = resolver.resolve(yaml.nodes.ScalarNode, token.value, (True, False))
+ if token.plain and tag != DEFAULT_SCALAR_TAG:
+ return
+
+ # Ignore multi-line strings
+ if (not token.plain) and (token.style == "|" or token.style == ">"):
+ return
+
+ quote_type = conf['quote-type']
+
+ msg = None
+ if conf['required'] is True:
+
+ # Quotes are mandatory and need to match config
+ if token.style is None or not _quote_match(quote_type, token.style):
+ msg = "string value is not quoted with %s quotes" % quote_type
+
+ elif conf['required'] is False:
+
+ # Quotes are not mandatory but when used need to match config
+ if token.style and not _quote_match(quote_type, token.style):
+ msg = "string value is not quoted with %s quotes" % quote_type
+
+ elif not token.style:
+ is_extra_required = any(re.search(r, token.value)
+ for r in conf['extra-required'])
+ if is_extra_required:
+ msg = "string value is not quoted"
+
+ elif conf['required'] == 'only-when-needed':
+
+ # Quotes are not strictly needed here
+ if (token.style and tag == DEFAULT_SCALAR_TAG and token.value and
+ not _quotes_are_needed(token.value)):
+ is_extra_required = any(re.search(r, token.value)
+ for r in conf['extra-required'])
+ is_extra_allowed = any(re.search(r, token.value)
+ for r in conf['extra-allowed'])
+ if not (is_extra_required or is_extra_allowed):
+ msg = "string value is redundantly quoted with %s quotes" % (
+ quote_type)
+
+ # But when used need to match config
+ elif token.style and not _quote_match(quote_type, token.style):
+ msg = "string value is not quoted with %s quotes" % quote_type
+
+ elif not token.style:
+ is_extra_required = len(conf['extra-required']) and any(
+ re.search(r, token.value) for r in conf['extra-required'])
+ if is_extra_required:
+ msg = "string value is not quoted"
+
+ if msg is not None:
+ yield LintProblem(
+ token.start_mark.line + 1,
+ token.start_mark.column + 1,
+ msg)
diff --git a/third_party/python/yamllint/yamllint/rules/trailing_spaces.py b/third_party/python/yamllint/yamllint/rules/trailing_spaces.py
new file mode 100644
index 0000000000..2fc4bbba03
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/trailing_spaces.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Adrien Vergé
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to forbid trailing spaces at the end of lines.
+
+.. rubric:: Examples
+
+#. With ``trailing-spaces: {}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ this document doesn't contain
+ any trailing
+ spaces
+
+ the following code snippet would **FAIL**:
+ ::
+
+ this document contains """ """
+ trailing spaces
+ on lines 1 and 3 """ """
+"""
+
+
+import string
+
+from yamllint.linter import LintProblem
+
+
+ID = 'trailing-spaces'
+TYPE = 'line'
+
+
+def check(conf, line):
+ if line.end == 0:
+ return
+
+ # YAML recognizes two white space characters: space and tab.
+ # http://yaml.org/spec/1.2/spec.html#id2775170
+
+ pos = line.end
+ while line.buffer[pos - 1] in string.whitespace and pos > line.start:
+ pos -= 1
+
+ if pos != line.end and line.buffer[pos] in ' \t':
+ yield LintProblem(line.line_no, pos - line.start + 1,
+ 'trailing spaces')
diff --git a/third_party/python/yamllint/yamllint/rules/truthy.py b/third_party/python/yamllint/yamllint/rules/truthy.py
new file mode 100644
index 0000000000..64ccaaa45c
--- /dev/null
+++ b/third_party/python/yamllint/yamllint/rules/truthy.py
@@ -0,0 +1,149 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2016 Peter Ericson
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+"""
+Use this rule to forbid non-explictly typed truthy values other than allowed
+ones (by default: ``true`` and ``false``), for example ``YES`` or ``off``.
+
+This can be useful to prevent surprises from YAML parsers transforming
+``[yes, FALSE, Off]`` into ``[true, false, false]`` or
+``{y: 1, yes: 2, on: 3, true: 4, True: 5}`` into ``{y: 1, true: 5}``.
+
+.. rubric:: Options
+
+* ``allowed-values`` defines the list of truthy values which will be ignored
+ during linting. The default is ``['true', 'false']``, but can be changed to
+ any list containing: ``'TRUE'``, ``'True'``, ``'true'``, ``'FALSE'``,
+ ``'False'``, ``'false'``, ``'YES'``, ``'Yes'``, ``'yes'``, ``'NO'``,
+ ``'No'``, ``'no'``, ``'ON'``, ``'On'``, ``'on'``, ``'OFF'``, ``'Off'``,
+ ``'off'``.
+* ``check-keys`` disables verification for keys in mappings. By default,
+ ``truthy`` rule applies to both keys and values. Set this option to ``false``
+ to prevent this.
+
+.. rubric:: Examples
+
+#. With ``truthy: {}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ boolean: true
+
+ object: {"True": 1, 1: "True"}
+
+ "yes": 1
+ "on": 2
+ "True": 3
+
+ explicit:
+ string1: !!str True
+ string2: !!str yes
+ string3: !!str off
+ encoded: !!binary |
+ True
+ OFF
+ pad== # this decodes as 'N\xbb\x9e8Qii'
+ boolean1: !!bool true
+ boolean2: !!bool "false"
+ boolean3: !!bool FALSE
+ boolean4: !!bool True
+ boolean5: !!bool off
+ boolean6: !!bool NO
+
+ the following code snippet would **FAIL**:
+ ::
+
+ object: {True: 1, 1: True}
+
+ the following code snippet would **FAIL**:
+ ::
+
+ yes: 1
+ on: 2
+ True: 3
+
+#. With ``truthy: {allowed-values: ["yes", "no"]}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ - yes
+ - no
+ - "true"
+ - 'false'
+ - foo
+ - bar
+
+ the following code snippet would **FAIL**:
+ ::
+
+ - true
+ - false
+ - on
+ - off
+
+#. With ``truthy: {check-keys: false}``
+
+ the following code snippet would **PASS**:
+ ::
+
+ yes: 1
+ on: 2
+ true: 3
+
+ the following code snippet would **FAIL**:
+ ::
+
+ yes: Yes
+ on: On
+ true: True
+"""
+
+import yaml
+
+from yamllint.linter import LintProblem
+
+
+TRUTHY = ['YES', 'Yes', 'yes',
+ 'NO', 'No', 'no',
+ 'TRUE', 'True', 'true',
+ 'FALSE', 'False', 'false',
+ 'ON', 'On', 'on',
+ 'OFF', 'Off', 'off']
+
+
+ID = 'truthy'
+TYPE = 'token'
+CONF = {'allowed-values': list(TRUTHY), 'check-keys': bool}
+DEFAULT = {'allowed-values': ['true', 'false'], 'check-keys': True}
+
+
+def check(conf, token, prev, next, nextnext, context):
+ if prev and isinstance(prev, yaml.tokens.TagToken):
+ return
+
+ if (not conf['check-keys'] and isinstance(prev, yaml.tokens.KeyToken) and
+ isinstance(token, yaml.tokens.ScalarToken)):
+ return
+
+ if isinstance(token, yaml.tokens.ScalarToken):
+ if (token.value in (set(TRUTHY) - set(conf['allowed-values'])) and
+ token.style is None):
+ yield LintProblem(token.start_mark.line + 1,
+ token.start_mark.column + 1,
+ "truthy value should be one of [" +
+ ", ".join(sorted(conf['allowed-values'])) + "]")
diff --git a/third_party/python/yarl/CHANGES.rst b/third_party/python/yarl/CHANGES.rst
new file mode 100644
index 0000000000..58ee2e4828
--- /dev/null
+++ b/third_party/python/yarl/CHANGES.rst
@@ -0,0 +1,572 @@
+=========
+Changelog
+=========
+
+..
+ You should *NOT* be adding new change log entries to this file, this
+ file is managed by towncrier. You *may* edit previous change logs to
+ fix problems like typo corrections or such.
+ To add a new change log entry, please see
+ https://pip.pypa.io/en/latest/development/#adding-a-news-entry
+ we named the news folder "changes".
+
+ WARNING: Don't drop the next directive!
+
+.. towncrier release notes start
+
+1.6.3 (2020-11-14)
+==================
+
+Bugfixes
+--------
+
+- No longer loose characters when decoding incorrect percent-sequences (like ``%e2%82%f8``). All non-decodable percent-sequences are now preserved.
+ `#517 <https://github.com/aio-libs/yarl/issues/517>`_
+- Provide x86 Windows wheels.
+ `#535 <https://github.com/aio-libs/yarl/issues/535>`_
+
+
+----
+
+
+1.6.2 (2020-10-12)
+==================
+
+
+Bugfixes
+--------
+
+- Provide generated ``.c`` files in TarBall distribution.
+ `#530 <https://github.com/aio-libs/multidict/issues/530>`_
+
+1.6.1 (2020-10-12)
+==================
+
+Features
+--------
+
+- Provide wheels for ``aarch64``, ``i686``, ``ppc64le``, ``s390x`` architectures on
+ Linux as well as ``x86_64``.
+ `#507 <https://github.com/aio-libs/yarl/issues/507>`_
+- Provide wheels for Python 3.9.
+ `#526 <https://github.com/aio-libs/yarl/issues/526>`_
+
+Bugfixes
+--------
+
+- ``human_repr()`` now always produces valid representation equivalent to the original URL (if the original URL is valid).
+ `#511 <https://github.com/aio-libs/yarl/issues/511>`_
+- Fixed requoting a single percent followed by a percent-encoded character in the Cython implementation.
+ `#514 <https://github.com/aio-libs/yarl/issues/514>`_
+- Fix ValueError when decoding ``%`` which is not followed by two hexadecimal digits.
+ `#516 <https://github.com/aio-libs/yarl/issues/516>`_
+- Fix decoding ``%`` followed by a space and hexadecimal digit.
+ `#520 <https://github.com/aio-libs/yarl/issues/520>`_
+- Fix annotation of ``with_query()``/``update_query()`` methods for ``key=[val1, val2]`` case.
+ `#528 <https://github.com/aio-libs/yarl/issues/528>`_
+
+Removal
+-------
+
+- Drop Python 3.5 support; Python 3.6 is the minimal supported Python version.
+
+
+----
+
+
+1.6.0 (2020-09-23)
+==================
+
+Features
+--------
+
+- Allow for int and float subclasses in query, while still denying bool.
+ `#492 <https://github.com/aio-libs/yarl/issues/492>`_
+
+
+Bugfixes
+--------
+
+- Do not requote arguments in ``URL.build()``, ``with_xxx()`` and in ``/`` operator.
+ `#502 <https://github.com/aio-libs/yarl/issues/502>`_
+- Keep IPv6 brackets in ``origin()``.
+ `#504 <https://github.com/aio-libs/yarl/issues/504>`_
+
+
+----
+
+
+1.5.1 (2020-08-01)
+==================
+
+Bugfixes
+--------
+
+- Fix including relocated internal ``yarl._quoting_c`` C-extension into published PyPI dists.
+ `#485 <https://github.com/aio-libs/yarl/issues/485>`_
+
+
+Misc
+----
+
+- `#484 <https://github.com/aio-libs/yarl/issues/484>`_
+
+
+----
+
+
+1.5.0 (2020-07-26)
+==================
+
+Features
+--------
+
+- Convert host to lowercase on URL building.
+ `#386 <https://github.com/aio-libs/yarl/issues/386>`_
+- Allow using ``mod`` operator (`%`) for updating query string (an alias for ``update_query()`` method).
+ `#435 <https://github.com/aio-libs/yarl/issues/435>`_
+- Allow use of sequences such as ``list`` and ``tuple`` in the values
+ of a mapping such as ``dict`` to represent that a key has many values::
+
+ url = URL("http://example.com")
+ assert url.with_query({"a": [1, 2]}) == URL("http://example.com/?a=1&a=2")
+
+ `#443 <https://github.com/aio-libs/yarl/issues/443>`_
+- Support URL.build() with scheme and path (creates a relative URL).
+ `#464 <https://github.com/aio-libs/yarl/issues/464>`_
+- Cache slow IDNA encode/decode calls.
+ `#476 <https://github.com/aio-libs/yarl/issues/476>`_
+- Add ``@final`` / ``Final`` type hints
+ `#477 <https://github.com/aio-libs/yarl/issues/477>`_
+- Support URL authority/raw_authority properties and authority argument of ``URL.build()`` method.
+ `#478 <https://github.com/aio-libs/yarl/issues/478>`_
+- Hide the library implementation details, make the exposed public list very clean.
+ `#483 <https://github.com/aio-libs/yarl/issues/483>`_
+
+
+Bugfixes
+--------
+
+- Fix tests with newer Python (3.7.6, 3.8.1 and 3.9.0+).
+ `#409 <https://github.com/aio-libs/yarl/issues/409>`_
+- Fix a bug where query component, passed in a form of mapping or sequence, is unquoted in unexpected way.
+ `#426 <https://github.com/aio-libs/yarl/issues/426>`_
+- Hide `Query` and `QueryVariable` type aliases in `__init__.pyi`, now they are prefixed with underscore.
+ `#431 <https://github.com/aio-libs/yarl/issues/431>`_
+- Keep ipv6 brackets after updating port/user/password.
+ `#451 <https://github.com/aio-libs/yarl/issues/451>`_
+
+
+----
+
+
+1.4.2 (2019-12-05)
+==================
+
+Features
+--------
+
+- Workaround for missing `str.isascii()` in Python 3.6
+ `#389 <https://github.com/aio-libs/yarl/issues/389>`_
+
+
+----
+
+
+1.4.1 (2019-11-29)
+==================
+
+* Fix regression, make the library work on Python 3.5 and 3.6 again.
+
+1.4.0 (2019-11-29)
+==================
+
+* Distinguish an empty password in URL from a password not provided at all (#262)
+
+* Fixed annotations for optional parameters of ``URL.build`` (#309)
+
+* Use None as default value of ``user`` parameter of ``URL.build`` (#309)
+
+* Enforce building C Accelerated modules when installing from source tarball, use
+ ``YARL_NO_EXTENSIONS`` environment variable for falling back to (slower) Pure Python
+ implementation (#329)
+
+* Drop Python 3.5 support
+
+* Fix quoting of plus in path by pure python version (#339)
+
+* Don't create a new URL if fragment is unchanged (#292)
+
+* Included in error msg the path that produces starting slash forbidden error (#376)
+
+* Skip slow IDNA encoding for ASCII-only strings (#387)
+
+
+1.3.0 (2018-12-11)
+==================
+
+* Fix annotations for ``query`` parameter (#207)
+
+* An incoming query sequence can have int variables (the same as for
+ Mapping type) (#208)
+
+* Add ``URL.explicit_port`` property (#218)
+
+* Give a friendlier error when port cant be converted to int (#168)
+
+* ``bool(URL())`` now returns ``False`` (#272)
+
+1.2.6 (2018-06-14)
+==================
+
+* Drop Python 3.4 trove classifier (#205)
+
+1.2.5 (2018-05-23)
+==================
+
+* Fix annotations for ``build`` (#199)
+
+1.2.4 (2018-05-08)
+==================
+
+* Fix annotations for ``cached_property`` (#195)
+
+1.2.3 (2018-05-03)
+==================
+
+* Accept ``str`` subclasses in ``URL`` constructor (#190)
+
+1.2.2 (2018-05-01)
+==================
+
+* Fix build
+
+1.2.1 (2018-04-30)
+==================
+
+* Pin minimal required Python to 3.5.3 (#189)
+
+1.2.0 (2018-04-30)
+==================
+
+* Forbid inheritance, replace ``__init__`` with ``__new__`` (#171)
+
+* Support PEP-561 (provide type hinting marker) (#182)
+
+1.1.1 (2018-02-17)
+==================
+
+* Fix performance regression: don't encode enmpty netloc (#170)
+
+1.1.0 (2018-01-21)
+==================
+
+* Make pure Python quoter consistent with Cython version (#162)
+
+1.0.0 (2018-01-15)
+==================
+
+* Use fast path if quoted string does not need requoting (#154)
+
+* Speed up quoting/unquoting by ``_Quoter`` and ``_Unquoter`` classes (#155)
+
+* Drop ``yarl.quote`` and ``yarl.unquote`` public functions (#155)
+
+* Add custom string writer, reuse static buffer if available (#157)
+ Code is 50-80 times faster than Pure Python version (was 4-5 times faster)
+
+* Don't recode IP zone (#144)
+
+* Support ``encoded=True`` in ``yarl.URL.build()`` (#158)
+
+* Fix updating query with multiple keys (#160)
+
+0.18.0 (2018-01-10)
+===================
+
+* Fallback to IDNA 2003 if domain name is not IDNA 2008 compatible (#152)
+
+0.17.0 (2017-12-30)
+===================
+
+* Use IDNA 2008 for domain name processing (#149)
+
+0.16.0 (2017-12-07)
+===================
+
+* Fix raising ``TypeError`` by ``url.query_string()`` after
+ ``url.with_query({})`` (empty mapping) (#141)
+
+0.15.0 (2017-11-23)
+===================
+
+* Add ``raw_path_qs`` attribute (#137)
+
+0.14.2 (2017-11-14)
+===================
+
+* Restore ``strict`` parameter as no-op in ``quote`` / ``unquote``
+
+0.14.1 (2017-11-13)
+===================
+
+* Restore ``strict`` parameter as no-op for sake of compatibility with
+ aiohttp 2.2
+
+0.14.0 (2017-11-11)
+===================
+
+* Drop strict mode (#123)
+
+* Fix ``"ValueError: Unallowed PCT %"`` when there's a ``"%"`` in the url (#124)
+
+0.13.0 (2017-10-01)
+===================
+
+* Document ``encoded`` parameter (#102)
+
+* Support relative urls like ``'?key=value'`` (#100)
+
+* Unsafe encoding for QS fixed. Encode ``;`` char in value param (#104)
+
+* Process passwords without user names (#95)
+
+0.12.0 (2017-06-26)
+===================
+
+* Properly support paths without leading slash in ``URL.with_path()`` (#90)
+
+* Enable type annotation checks
+
+0.11.0 (2017-06-26)
+===================
+
+* Normalize path (#86)
+
+* Clear query and fragment parts in ``.with_path()`` (#85)
+
+0.10.3 (2017-06-13)
+===================
+
+* Prevent double URL args unquoting (#83)
+
+0.10.2 (2017-05-05)
+===================
+
+* Unexpected hash behaviour (#75)
+
+
+0.10.1 (2017-05-03)
+===================
+
+* Unexpected compare behaviour (#73)
+
+* Do not quote or unquote + if not a query string. (#74)
+
+
+0.10.0 (2017-03-14)
+===================
+
+* Added ``URL.build`` class method (#58)
+
+* Added ``path_qs`` attribute (#42)
+
+
+0.9.8 (2017-02-16)
+==================
+
+* Do not quote ``:`` in path
+
+
+0.9.7 (2017-02-16)
+==================
+
+* Load from pickle without _cache (#56)
+
+* Percent-encoded pluses in path variables become spaces (#59)
+
+
+0.9.6 (2017-02-15)
+==================
+
+* Revert backward incompatible change (BaseURL)
+
+
+0.9.5 (2017-02-14)
+==================
+
+* Fix BaseURL rich comparison support
+
+
+0.9.4 (2017-02-14)
+==================
+
+* Use BaseURL
+
+
+0.9.3 (2017-02-14)
+==================
+
+* Added BaseURL
+
+
+0.9.2 (2017-02-08)
+==================
+
+* Remove debug print
+
+
+0.9.1 (2017-02-07)
+==================
+
+* Do not lose tail chars (#45)
+
+
+0.9.0 (2017-02-07)
+==================
+
+* Allow to quote ``%`` in non strict mode (#21)
+
+* Incorrect parsing of query parameters with %3B (;) inside (#34)
+
+* Fix core dumps (#41)
+
+* tmpbuf - compiling error (#43)
+
+* Added ``URL.update_path()`` method
+
+* Added ``URL.update_query()`` method (#47)
+
+
+0.8.1 (2016-12-03)
+==================
+
+* Fix broken aiohttp: revert back ``quote`` / ``unquote``.
+
+
+0.8.0 (2016-12-03)
+==================
+
+* Support more verbose error messages in ``.with_query()`` (#24)
+
+* Don't percent-encode ``@`` and ``:`` in path (#32)
+
+* Don't expose ``yarl.quote`` and ``yarl.unquote``, these functions are
+ part of private API
+
+0.7.1 (2016-11-18)
+==================
+
+* Accept not only ``str`` but all classes inherited from ``str`` also (#25)
+
+0.7.0 (2016-11-07)
+==================
+
+* Accept ``int`` as value for ``.with_query()``
+
+0.6.0 (2016-11-07)
+==================
+
+* Explicitly use UTF8 encoding in setup.py (#20)
+* Properly unquote non-UTF8 strings (#19)
+
+0.5.3 (2016-11-02)
+==================
+
+* Don't use namedtuple fields but indexes on URL construction
+
+0.5.2 (2016-11-02)
+==================
+
+* Inline ``_encode`` class method
+
+0.5.1 (2016-11-02)
+==================
+
+* Make URL construction faster by removing extra classmethod calls
+
+0.5.0 (2016-11-02)
+==================
+
+* Add cython optimization for quoting/unquoting
+* Provide binary wheels
+
+0.4.3 (2016-09-29)
+==================
+
+* Fix typing stubs
+
+0.4.2 (2016-09-29)
+==================
+
+* Expose ``quote()`` and ``unquote()`` as public API
+
+0.4.1 (2016-09-28)
+==================
+
+* Support empty values in query (``'/path?arg'``)
+
+0.4.0 (2016-09-27)
+==================
+
+* Introduce ``relative()`` (#16)
+
+0.3.2 (2016-09-27)
+==================
+
+* Typo fixes #15
+
+0.3.1 (2016-09-26)
+==================
+
+* Support sequence of pairs as ``with_query()`` parameter
+
+0.3.0 (2016-09-26)
+==================
+
+* Introduce ``is_default_port()``
+
+0.2.1 (2016-09-26)
+==================
+
+* Raise ValueError for URLs like 'http://:8080/'
+
+0.2.0 (2016-09-18)
+==================
+
+* Avoid doubling slashes when joining paths (#13)
+
+* Appending path starting from slash is forbidden (#12)
+
+0.1.4 (2016-09-09)
+==================
+
+* Add kwargs support for ``with_query()`` (#10)
+
+0.1.3 (2016-09-07)
+==================
+
+* Document ``with_query()``, ``with_fragment()`` and ``origin()``
+
+* Allow ``None`` for ``with_query()`` and ``with_fragment()``
+
+0.1.2 (2016-09-07)
+==================
+
+* Fix links, tune docs theme.
+
+0.1.1 (2016-09-06)
+==================
+
+* Update README, old version used obsolete API
+
+0.1.0 (2016-09-06)
+==================
+
+* The library was deeply refactored, bytes are gone away but all
+ accepted strings are encoded if needed.
+
+0.0.1 (2016-08-30)
+==================
+
+* The first release.
diff --git a/third_party/python/yarl/LICENSE b/third_party/python/yarl/LICENSE
new file mode 100644
index 0000000000..cc5cfd6790
--- /dev/null
+++ b/third_party/python/yarl/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright 2016-2018, Andrew Svetlov and aio-libs team
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/third_party/python/yarl/MANIFEST.in b/third_party/python/yarl/MANIFEST.in
new file mode 100644
index 0000000000..dab6cb9a00
--- /dev/null
+++ b/third_party/python/yarl/MANIFEST.in
@@ -0,0 +1,13 @@
+include LICENSE
+include CHANGES.rst
+include README.rst
+graft yarl
+graft docs
+graft tests
+include yarl/*.c
+global-exclude *.pyc
+global-exclude *.cache
+exclude yarl/*.html
+exclude yarl/*.so
+exclude yarl/*.pyd
+prune docs/_build
diff --git a/third_party/python/yarl/PKG-INFO b/third_party/python/yarl/PKG-INFO
new file mode 100644
index 0000000000..3c242e5135
--- /dev/null
+++ b/third_party/python/yarl/PKG-INFO
@@ -0,0 +1,797 @@
+Metadata-Version: 2.1
+Name: yarl
+Version: 1.6.3
+Summary: Yet another URL library
+Home-page: https://github.com/aio-libs/yarl/
+Author: Andrew Svetlov
+Author-email: andrew.svetlov@gmail.com
+License: Apache 2
+Description: yarl
+ ====
+
+ .. image:: https://github.com/aio-libs/yarl/workflows/CI/badge.svg
+ :target: https://github.com/aio-libs/yarl/actions?query=workflow%3ACI
+ :align: right
+
+ .. image:: https://codecov.io/gh/aio-libs/yarl/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/aio-libs/yarl
+
+ .. image:: https://badge.fury.io/py/yarl.svg
+ :target: https://badge.fury.io/py/yarl
+
+
+ .. image:: https://readthedocs.org/projects/yarl/badge/?version=latest
+ :target: https://yarl.readthedocs.io
+
+
+ .. image:: https://img.shields.io/pypi/pyversions/yarl.svg
+ :target: https://pypi.python.org/pypi/yarl
+
+ .. image:: https://badges.gitter.im/Join%20Chat.svg
+ :target: https://gitter.im/aio-libs/Lobby
+ :alt: Chat on Gitter
+
+ Introduction
+ ------------
+
+ Url is constructed from ``str``:
+
+ .. code-block:: pycon
+
+ >>> from yarl import URL
+ >>> url = URL('https://www.python.org/~guido?arg=1#frag')
+ >>> url
+ URL('https://www.python.org/~guido?arg=1#frag')
+
+ All url parts: *scheme*, *user*, *password*, *host*, *port*, *path*,
+ *query* and *fragment* are accessible by properties:
+
+ .. code-block:: pycon
+
+ >>> url.scheme
+ 'https'
+ >>> url.host
+ 'www.python.org'
+ >>> url.path
+ '/~guido'
+ >>> url.query_string
+ 'arg=1'
+ >>> url.query
+ <MultiDictProxy('arg': '1')>
+ >>> url.fragment
+ 'frag'
+
+ All url manipulations produce a new url object:
+
+ .. code-block:: pycon
+
+ >>> url = URL('https://www.python.org')
+ >>> url / 'foo' / 'bar'
+ URL('https://www.python.org/foo/bar')
+ >>> url / 'foo' % {'bar': 'baz'}
+ URL('https://www.python.org/foo?bar=baz')
+
+ Strings passed to constructor and modification methods are
+ automatically encoded giving canonical representation as result:
+
+ .. code-block:: pycon
+
+ >>> url = URL('https://www.python.org/путь')
+ >>> url
+ URL('https://www.python.org/%D0%BF%D1%83%D1%82%D1%8C')
+
+ Regular properties are *percent-decoded*, use ``raw_`` versions for
+ getting *encoded* strings:
+
+ .. code-block:: pycon
+
+ >>> url.path
+ '/путь'
+
+ >>> url.raw_path
+ '/%D0%BF%D1%83%D1%82%D1%8C'
+
+ Human readable representation of URL is available as ``.human_repr()``:
+
+ .. code-block:: pycon
+
+ >>> url.human_repr()
+ 'https://www.python.org/путь'
+
+ For full documentation please read https://yarl.readthedocs.org.
+
+
+ Installation
+ ------------
+
+ ::
+
+ $ pip install yarl
+
+ The library is Python 3 only!
+
+ PyPI contains binary wheels for Linux, Windows and MacOS. If you want to install
+ ``yarl`` on another operating system (like *Alpine Linux*, which is not
+ manylinux-compliant because of the missing glibc and therefore, cannot be
+ used with our wheels) the the tarball will be used to compile the library from
+ the source code. It requires a C compiler and and Python headers installed.
+
+ To skip the compilation you must explicitly opt-in by setting the `YARL_NO_EXTENSIONS`
+ environment variable to a non-empty value, e.g.:
+
+ .. code-block:: bash
+
+ $ YARL_NO_EXTENSIONS=1 pip install yarl
+
+ Please note that the pure-Python (uncompiled) version is much slower. However,
+ PyPy always uses a pure-Python implementation, and, as such, it is unaffected
+ by this variable.
+
+ Dependencies
+ ------------
+
+ YARL requires multidict_ library.
+
+
+ API documentation
+ ------------------
+
+ The documentation is located at https://yarl.readthedocs.org
+
+
+ Why isn't boolean supported by the URL query API?
+ -------------------------------------------------
+
+ There is no standard for boolean representation of boolean values.
+
+ Some systems prefer ``true``/``false``, others like ``yes``/``no``, ``on``/``off``,
+ ``Y``/``N``, ``1``/``0``, etc.
+
+ ``yarl`` cannot make an unambiguous decision on how to serialize ``bool`` values because
+ it is specific to how the end-user's application is built and would be different for
+ different apps. The library doesn't accept booleans in the API; a user should convert
+ bools into strings using own preferred translation protocol.
+
+
+ Comparison with other URL libraries
+ ------------------------------------
+
+ * furl (https://pypi.python.org/pypi/furl)
+
+ The library has rich functionality but the ``furl`` object is mutable.
+
+ I'm afraid to pass this object into foreign code: who knows if the
+ code will modify my url in a terrible way while I just want to send URL
+ with handy helpers for accessing URL properties.
+
+ ``furl`` has other non-obvious tricky things but the main objection
+ is mutability.
+
+ * URLObject (https://pypi.python.org/pypi/URLObject)
+
+ URLObject is immutable, that's pretty good.
+
+ Every URL change generates a new URL object.
+
+ But the library doesn't do any decode/encode transformations leaving the
+ end user to cope with these gory details.
+
+
+ Source code
+ -----------
+
+ The project is hosted on GitHub_
+
+ Please file an issue on the `bug tracker
+ <https://github.com/aio-libs/yarl/issues>`_ if you have found a bug
+ or have some suggestion in order to improve the library.
+
+ The library uses `Azure Pipelines <https://dev.azure.com/aio-libs/yarl>`_ for
+ Continuous Integration.
+
+ Discussion list
+ ---------------
+
+ *aio-libs* google group: https://groups.google.com/forum/#!forum/aio-libs
+
+ Feel free to post your questions and ideas here.
+
+
+ Authors and License
+ -------------------
+
+ The ``yarl`` package is written by Andrew Svetlov.
+
+ It's *Apache 2* licensed and freely available.
+
+
+ .. _GitHub: https://github.com/aio-libs/yarl
+
+ .. _multidict: https://github.com/aio-libs/multidict
+
+
+ =========
+ Changelog
+ =========
+
+ ..
+ You should *NOT* be adding new change log entries to this file, this
+ file is managed by towncrier. You *may* edit previous change logs to
+ fix problems like typo corrections or such.
+ To add a new change log entry, please see
+ https://pip.pypa.io/en/latest/development/#adding-a-news-entry
+ we named the news folder "changes".
+
+ WARNING: Don't drop the next directive!
+
+ .. towncrier release notes start
+
+ 1.6.3 (2020-11-14)
+ ==================
+
+ Bugfixes
+ --------
+
+ - No longer loose characters when decoding incorrect percent-sequences (like ``%e2%82%f8``). All non-decodable percent-sequences are now preserved.
+ `#517 <https://github.com/aio-libs/yarl/issues/517>`_
+ - Provide x86 Windows wheels.
+ `#535 <https://github.com/aio-libs/yarl/issues/535>`_
+
+
+ ----
+
+
+ 1.6.2 (2020-10-12)
+ ==================
+
+
+ Bugfixes
+ --------
+
+ - Provide generated ``.c`` files in TarBall distribution.
+ `#530 <https://github.com/aio-libs/multidict/issues/530>`_
+
+ 1.6.1 (2020-10-12)
+ ==================
+
+ Features
+ --------
+
+ - Provide wheels for ``aarch64``, ``i686``, ``ppc64le``, ``s390x`` architectures on
+ Linux as well as ``x86_64``.
+ `#507 <https://github.com/aio-libs/yarl/issues/507>`_
+ - Provide wheels for Python 3.9.
+ `#526 <https://github.com/aio-libs/yarl/issues/526>`_
+
+ Bugfixes
+ --------
+
+ - ``human_repr()`` now always produces valid representation equivalent to the original URL (if the original URL is valid).
+ `#511 <https://github.com/aio-libs/yarl/issues/511>`_
+ - Fixed requoting a single percent followed by a percent-encoded character in the Cython implementation.
+ `#514 <https://github.com/aio-libs/yarl/issues/514>`_
+ - Fix ValueError when decoding ``%`` which is not followed by two hexadecimal digits.
+ `#516 <https://github.com/aio-libs/yarl/issues/516>`_
+ - Fix decoding ``%`` followed by a space and hexadecimal digit.
+ `#520 <https://github.com/aio-libs/yarl/issues/520>`_
+ - Fix annotation of ``with_query()``/``update_query()`` methods for ``key=[val1, val2]`` case.
+ `#528 <https://github.com/aio-libs/yarl/issues/528>`_
+
+ Removal
+ -------
+
+ - Drop Python 3.5 support; Python 3.6 is the minimal supported Python version.
+
+
+ ----
+
+
+ 1.6.0 (2020-09-23)
+ ==================
+
+ Features
+ --------
+
+ - Allow for int and float subclasses in query, while still denying bool.
+ `#492 <https://github.com/aio-libs/yarl/issues/492>`_
+
+
+ Bugfixes
+ --------
+
+ - Do not requote arguments in ``URL.build()``, ``with_xxx()`` and in ``/`` operator.
+ `#502 <https://github.com/aio-libs/yarl/issues/502>`_
+ - Keep IPv6 brackets in ``origin()``.
+ `#504 <https://github.com/aio-libs/yarl/issues/504>`_
+
+
+ ----
+
+
+ 1.5.1 (2020-08-01)
+ ==================
+
+ Bugfixes
+ --------
+
+ - Fix including relocated internal ``yarl._quoting_c`` C-extension into published PyPI dists.
+ `#485 <https://github.com/aio-libs/yarl/issues/485>`_
+
+
+ Misc
+ ----
+
+ - `#484 <https://github.com/aio-libs/yarl/issues/484>`_
+
+
+ ----
+
+
+ 1.5.0 (2020-07-26)
+ ==================
+
+ Features
+ --------
+
+ - Convert host to lowercase on URL building.
+ `#386 <https://github.com/aio-libs/yarl/issues/386>`_
+ - Allow using ``mod`` operator (`%`) for updating query string (an alias for ``update_query()`` method).
+ `#435 <https://github.com/aio-libs/yarl/issues/435>`_
+ - Allow use of sequences such as ``list`` and ``tuple`` in the values
+ of a mapping such as ``dict`` to represent that a key has many values::
+
+ url = URL("http://example.com")
+ assert url.with_query({"a": [1, 2]}) == URL("http://example.com/?a=1&a=2")
+
+ `#443 <https://github.com/aio-libs/yarl/issues/443>`_
+ - Support URL.build() with scheme and path (creates a relative URL).
+ `#464 <https://github.com/aio-libs/yarl/issues/464>`_
+ - Cache slow IDNA encode/decode calls.
+ `#476 <https://github.com/aio-libs/yarl/issues/476>`_
+ - Add ``@final`` / ``Final`` type hints
+ `#477 <https://github.com/aio-libs/yarl/issues/477>`_
+ - Support URL authority/raw_authority properties and authority argument of ``URL.build()`` method.
+ `#478 <https://github.com/aio-libs/yarl/issues/478>`_
+ - Hide the library implementation details, make the exposed public list very clean.
+ `#483 <https://github.com/aio-libs/yarl/issues/483>`_
+
+
+ Bugfixes
+ --------
+
+ - Fix tests with newer Python (3.7.6, 3.8.1 and 3.9.0+).
+ `#409 <https://github.com/aio-libs/yarl/issues/409>`_
+ - Fix a bug where query component, passed in a form of mapping or sequence, is unquoted in unexpected way.
+ `#426 <https://github.com/aio-libs/yarl/issues/426>`_
+ - Hide `Query` and `QueryVariable` type aliases in `__init__.pyi`, now they are prefixed with underscore.
+ `#431 <https://github.com/aio-libs/yarl/issues/431>`_
+ - Keep ipv6 brackets after updating port/user/password.
+ `#451 <https://github.com/aio-libs/yarl/issues/451>`_
+
+
+ ----
+
+
+ 1.4.2 (2019-12-05)
+ ==================
+
+ Features
+ --------
+
+ - Workaround for missing `str.isascii()` in Python 3.6
+ `#389 <https://github.com/aio-libs/yarl/issues/389>`_
+
+
+ ----
+
+
+ 1.4.1 (2019-11-29)
+ ==================
+
+ * Fix regression, make the library work on Python 3.5 and 3.6 again.
+
+ 1.4.0 (2019-11-29)
+ ==================
+
+ * Distinguish an empty password in URL from a password not provided at all (#262)
+
+ * Fixed annotations for optional parameters of ``URL.build`` (#309)
+
+ * Use None as default value of ``user`` parameter of ``URL.build`` (#309)
+
+ * Enforce building C Accelerated modules when installing from source tarball, use
+ ``YARL_NO_EXTENSIONS`` environment variable for falling back to (slower) Pure Python
+ implementation (#329)
+
+ * Drop Python 3.5 support
+
+ * Fix quoting of plus in path by pure python version (#339)
+
+ * Don't create a new URL if fragment is unchanged (#292)
+
+ * Included in error msg the path that produces starting slash forbidden error (#376)
+
+ * Skip slow IDNA encoding for ASCII-only strings (#387)
+
+
+ 1.3.0 (2018-12-11)
+ ==================
+
+ * Fix annotations for ``query`` parameter (#207)
+
+ * An incoming query sequence can have int variables (the same as for
+ Mapping type) (#208)
+
+ * Add ``URL.explicit_port`` property (#218)
+
+ * Give a friendlier error when port cant be converted to int (#168)
+
+ * ``bool(URL())`` now returns ``False`` (#272)
+
+ 1.2.6 (2018-06-14)
+ ==================
+
+ * Drop Python 3.4 trove classifier (#205)
+
+ 1.2.5 (2018-05-23)
+ ==================
+
+ * Fix annotations for ``build`` (#199)
+
+ 1.2.4 (2018-05-08)
+ ==================
+
+ * Fix annotations for ``cached_property`` (#195)
+
+ 1.2.3 (2018-05-03)
+ ==================
+
+ * Accept ``str`` subclasses in ``URL`` constructor (#190)
+
+ 1.2.2 (2018-05-01)
+ ==================
+
+ * Fix build
+
+ 1.2.1 (2018-04-30)
+ ==================
+
+ * Pin minimal required Python to 3.5.3 (#189)
+
+ 1.2.0 (2018-04-30)
+ ==================
+
+ * Forbid inheritance, replace ``__init__`` with ``__new__`` (#171)
+
+ * Support PEP-561 (provide type hinting marker) (#182)
+
+ 1.1.1 (2018-02-17)
+ ==================
+
+ * Fix performance regression: don't encode enmpty netloc (#170)
+
+ 1.1.0 (2018-01-21)
+ ==================
+
+ * Make pure Python quoter consistent with Cython version (#162)
+
+ 1.0.0 (2018-01-15)
+ ==================
+
+ * Use fast path if quoted string does not need requoting (#154)
+
+ * Speed up quoting/unquoting by ``_Quoter`` and ``_Unquoter`` classes (#155)
+
+ * Drop ``yarl.quote`` and ``yarl.unquote`` public functions (#155)
+
+ * Add custom string writer, reuse static buffer if available (#157)
+ Code is 50-80 times faster than Pure Python version (was 4-5 times faster)
+
+ * Don't recode IP zone (#144)
+
+ * Support ``encoded=True`` in ``yarl.URL.build()`` (#158)
+
+ * Fix updating query with multiple keys (#160)
+
+ 0.18.0 (2018-01-10)
+ ===================
+
+ * Fallback to IDNA 2003 if domain name is not IDNA 2008 compatible (#152)
+
+ 0.17.0 (2017-12-30)
+ ===================
+
+ * Use IDNA 2008 for domain name processing (#149)
+
+ 0.16.0 (2017-12-07)
+ ===================
+
+ * Fix raising ``TypeError`` by ``url.query_string()`` after
+ ``url.with_query({})`` (empty mapping) (#141)
+
+ 0.15.0 (2017-11-23)
+ ===================
+
+ * Add ``raw_path_qs`` attribute (#137)
+
+ 0.14.2 (2017-11-14)
+ ===================
+
+ * Restore ``strict`` parameter as no-op in ``quote`` / ``unquote``
+
+ 0.14.1 (2017-11-13)
+ ===================
+
+ * Restore ``strict`` parameter as no-op for sake of compatibility with
+ aiohttp 2.2
+
+ 0.14.0 (2017-11-11)
+ ===================
+
+ * Drop strict mode (#123)
+
+ * Fix ``"ValueError: Unallowed PCT %"`` when there's a ``"%"`` in the url (#124)
+
+ 0.13.0 (2017-10-01)
+ ===================
+
+ * Document ``encoded`` parameter (#102)
+
+ * Support relative urls like ``'?key=value'`` (#100)
+
+ * Unsafe encoding for QS fixed. Encode ``;`` char in value param (#104)
+
+ * Process passwords without user names (#95)
+
+ 0.12.0 (2017-06-26)
+ ===================
+
+ * Properly support paths without leading slash in ``URL.with_path()`` (#90)
+
+ * Enable type annotation checks
+
+ 0.11.0 (2017-06-26)
+ ===================
+
+ * Normalize path (#86)
+
+ * Clear query and fragment parts in ``.with_path()`` (#85)
+
+ 0.10.3 (2017-06-13)
+ ===================
+
+ * Prevent double URL args unquoting (#83)
+
+ 0.10.2 (2017-05-05)
+ ===================
+
+ * Unexpected hash behaviour (#75)
+
+
+ 0.10.1 (2017-05-03)
+ ===================
+
+ * Unexpected compare behaviour (#73)
+
+ * Do not quote or unquote + if not a query string. (#74)
+
+
+ 0.10.0 (2017-03-14)
+ ===================
+
+ * Added ``URL.build`` class method (#58)
+
+ * Added ``path_qs`` attribute (#42)
+
+
+ 0.9.8 (2017-02-16)
+ ==================
+
+ * Do not quote ``:`` in path
+
+
+ 0.9.7 (2017-02-16)
+ ==================
+
+ * Load from pickle without _cache (#56)
+
+ * Percent-encoded pluses in path variables become spaces (#59)
+
+
+ 0.9.6 (2017-02-15)
+ ==================
+
+ * Revert backward incompatible change (BaseURL)
+
+
+ 0.9.5 (2017-02-14)
+ ==================
+
+ * Fix BaseURL rich comparison support
+
+
+ 0.9.4 (2017-02-14)
+ ==================
+
+ * Use BaseURL
+
+
+ 0.9.3 (2017-02-14)
+ ==================
+
+ * Added BaseURL
+
+
+ 0.9.2 (2017-02-08)
+ ==================
+
+ * Remove debug print
+
+
+ 0.9.1 (2017-02-07)
+ ==================
+
+ * Do not lose tail chars (#45)
+
+
+ 0.9.0 (2017-02-07)
+ ==================
+
+ * Allow to quote ``%`` in non strict mode (#21)
+
+ * Incorrect parsing of query parameters with %3B (;) inside (#34)
+
+ * Fix core dumps (#41)
+
+ * tmpbuf - compiling error (#43)
+
+ * Added ``URL.update_path()`` method
+
+ * Added ``URL.update_query()`` method (#47)
+
+
+ 0.8.1 (2016-12-03)
+ ==================
+
+ * Fix broken aiohttp: revert back ``quote`` / ``unquote``.
+
+
+ 0.8.0 (2016-12-03)
+ ==================
+
+ * Support more verbose error messages in ``.with_query()`` (#24)
+
+ * Don't percent-encode ``@`` and ``:`` in path (#32)
+
+ * Don't expose ``yarl.quote`` and ``yarl.unquote``, these functions are
+ part of private API
+
+ 0.7.1 (2016-11-18)
+ ==================
+
+ * Accept not only ``str`` but all classes inherited from ``str`` also (#25)
+
+ 0.7.0 (2016-11-07)
+ ==================
+
+ * Accept ``int`` as value for ``.with_query()``
+
+ 0.6.0 (2016-11-07)
+ ==================
+
+ * Explicitly use UTF8 encoding in setup.py (#20)
+ * Properly unquote non-UTF8 strings (#19)
+
+ 0.5.3 (2016-11-02)
+ ==================
+
+ * Don't use namedtuple fields but indexes on URL construction
+
+ 0.5.2 (2016-11-02)
+ ==================
+
+ * Inline ``_encode`` class method
+
+ 0.5.1 (2016-11-02)
+ ==================
+
+ * Make URL construction faster by removing extra classmethod calls
+
+ 0.5.0 (2016-11-02)
+ ==================
+
+ * Add cython optimization for quoting/unquoting
+ * Provide binary wheels
+
+ 0.4.3 (2016-09-29)
+ ==================
+
+ * Fix typing stubs
+
+ 0.4.2 (2016-09-29)
+ ==================
+
+ * Expose ``quote()`` and ``unquote()`` as public API
+
+ 0.4.1 (2016-09-28)
+ ==================
+
+ * Support empty values in query (``'/path?arg'``)
+
+ 0.4.0 (2016-09-27)
+ ==================
+
+ * Introduce ``relative()`` (#16)
+
+ 0.3.2 (2016-09-27)
+ ==================
+
+ * Typo fixes #15
+
+ 0.3.1 (2016-09-26)
+ ==================
+
+ * Support sequence of pairs as ``with_query()`` parameter
+
+ 0.3.0 (2016-09-26)
+ ==================
+
+ * Introduce ``is_default_port()``
+
+ 0.2.1 (2016-09-26)
+ ==================
+
+ * Raise ValueError for URLs like 'http://:8080/'
+
+ 0.2.0 (2016-09-18)
+ ==================
+
+ * Avoid doubling slashes when joining paths (#13)
+
+ * Appending path starting from slash is forbidden (#12)
+
+ 0.1.4 (2016-09-09)
+ ==================
+
+ * Add kwargs support for ``with_query()`` (#10)
+
+ 0.1.3 (2016-09-07)
+ ==================
+
+ * Document ``with_query()``, ``with_fragment()`` and ``origin()``
+
+ * Allow ``None`` for ``with_query()`` and ``with_fragment()``
+
+ 0.1.2 (2016-09-07)
+ ==================
+
+ * Fix links, tune docs theme.
+
+ 0.1.1 (2016-09-06)
+ ==================
+
+ * Update README, old version used obsolete API
+
+ 0.1.0 (2016-09-06)
+ ==================
+
+ * The library was deeply refactored, bytes are gone away but all
+ accepted strings are encoded if needed.
+
+ 0.0.1 (2016-08-30)
+ ==================
+
+ * The first release.
+
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Intended Audience :: Developers
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Topic :: Internet :: WWW/HTTP
+Requires-Python: >=3.6
+Description-Content-Type: text/x-rst
diff --git a/third_party/python/yarl/README.rst b/third_party/python/yarl/README.rst
new file mode 100644
index 0000000000..6347ece2b9
--- /dev/null
+++ b/third_party/python/yarl/README.rst
@@ -0,0 +1,202 @@
+yarl
+====
+
+.. image:: https://github.com/aio-libs/yarl/workflows/CI/badge.svg
+ :target: https://github.com/aio-libs/yarl/actions?query=workflow%3ACI
+ :align: right
+
+.. image:: https://codecov.io/gh/aio-libs/yarl/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/aio-libs/yarl
+
+.. image:: https://badge.fury.io/py/yarl.svg
+ :target: https://badge.fury.io/py/yarl
+
+
+.. image:: https://readthedocs.org/projects/yarl/badge/?version=latest
+ :target: https://yarl.readthedocs.io
+
+
+.. image:: https://img.shields.io/pypi/pyversions/yarl.svg
+ :target: https://pypi.python.org/pypi/yarl
+
+.. image:: https://badges.gitter.im/Join%20Chat.svg
+ :target: https://gitter.im/aio-libs/Lobby
+ :alt: Chat on Gitter
+
+Introduction
+------------
+
+Url is constructed from ``str``:
+
+.. code-block:: pycon
+
+ >>> from yarl import URL
+ >>> url = URL('https://www.python.org/~guido?arg=1#frag')
+ >>> url
+ URL('https://www.python.org/~guido?arg=1#frag')
+
+All url parts: *scheme*, *user*, *password*, *host*, *port*, *path*,
+*query* and *fragment* are accessible by properties:
+
+.. code-block:: pycon
+
+ >>> url.scheme
+ 'https'
+ >>> url.host
+ 'www.python.org'
+ >>> url.path
+ '/~guido'
+ >>> url.query_string
+ 'arg=1'
+ >>> url.query
+ <MultiDictProxy('arg': '1')>
+ >>> url.fragment
+ 'frag'
+
+All url manipulations produce a new url object:
+
+.. code-block:: pycon
+
+ >>> url = URL('https://www.python.org')
+ >>> url / 'foo' / 'bar'
+ URL('https://www.python.org/foo/bar')
+ >>> url / 'foo' % {'bar': 'baz'}
+ URL('https://www.python.org/foo?bar=baz')
+
+Strings passed to constructor and modification methods are
+automatically encoded giving canonical representation as result:
+
+.. code-block:: pycon
+
+ >>> url = URL('https://www.python.org/путь')
+ >>> url
+ URL('https://www.python.org/%D0%BF%D1%83%D1%82%D1%8C')
+
+Regular properties are *percent-decoded*, use ``raw_`` versions for
+getting *encoded* strings:
+
+.. code-block:: pycon
+
+ >>> url.path
+ '/путь'
+
+ >>> url.raw_path
+ '/%D0%BF%D1%83%D1%82%D1%8C'
+
+Human readable representation of URL is available as ``.human_repr()``:
+
+.. code-block:: pycon
+
+ >>> url.human_repr()
+ 'https://www.python.org/путь'
+
+For full documentation please read https://yarl.readthedocs.org.
+
+
+Installation
+------------
+
+::
+
+ $ pip install yarl
+
+The library is Python 3 only!
+
+PyPI contains binary wheels for Linux, Windows and MacOS. If you want to install
+``yarl`` on another operating system (like *Alpine Linux*, which is not
+manylinux-compliant because of the missing glibc and therefore, cannot be
+used with our wheels) the the tarball will be used to compile the library from
+the source code. It requires a C compiler and and Python headers installed.
+
+To skip the compilation you must explicitly opt-in by setting the `YARL_NO_EXTENSIONS`
+environment variable to a non-empty value, e.g.:
+
+.. code-block:: bash
+
+ $ YARL_NO_EXTENSIONS=1 pip install yarl
+
+Please note that the pure-Python (uncompiled) version is much slower. However,
+PyPy always uses a pure-Python implementation, and, as such, it is unaffected
+by this variable.
+
+Dependencies
+------------
+
+YARL requires multidict_ library.
+
+
+API documentation
+------------------
+
+The documentation is located at https://yarl.readthedocs.org
+
+
+Why isn't boolean supported by the URL query API?
+-------------------------------------------------
+
+There is no standard for boolean representation of boolean values.
+
+Some systems prefer ``true``/``false``, others like ``yes``/``no``, ``on``/``off``,
+``Y``/``N``, ``1``/``0``, etc.
+
+``yarl`` cannot make an unambiguous decision on how to serialize ``bool`` values because
+it is specific to how the end-user's application is built and would be different for
+different apps. The library doesn't accept booleans in the API; a user should convert
+bools into strings using own preferred translation protocol.
+
+
+Comparison with other URL libraries
+------------------------------------
+
+* furl (https://pypi.python.org/pypi/furl)
+
+ The library has rich functionality but the ``furl`` object is mutable.
+
+ I'm afraid to pass this object into foreign code: who knows if the
+ code will modify my url in a terrible way while I just want to send URL
+ with handy helpers for accessing URL properties.
+
+ ``furl`` has other non-obvious tricky things but the main objection
+ is mutability.
+
+* URLObject (https://pypi.python.org/pypi/URLObject)
+
+ URLObject is immutable, that's pretty good.
+
+ Every URL change generates a new URL object.
+
+ But the library doesn't do any decode/encode transformations leaving the
+ end user to cope with these gory details.
+
+
+Source code
+-----------
+
+The project is hosted on GitHub_
+
+Please file an issue on the `bug tracker
+<https://github.com/aio-libs/yarl/issues>`_ if you have found a bug
+or have some suggestion in order to improve the library.
+
+The library uses `Azure Pipelines <https://dev.azure.com/aio-libs/yarl>`_ for
+Continuous Integration.
+
+Discussion list
+---------------
+
+*aio-libs* google group: https://groups.google.com/forum/#!forum/aio-libs
+
+Feel free to post your questions and ideas here.
+
+
+Authors and License
+-------------------
+
+The ``yarl`` package is written by Andrew Svetlov.
+
+It's *Apache 2* licensed and freely available.
+
+
+.. _GitHub: https://github.com/aio-libs/yarl
+
+.. _multidict: https://github.com/aio-libs/multidict
diff --git a/third_party/python/yarl/pyproject.toml b/third_party/python/yarl/pyproject.toml
new file mode 100644
index 0000000000..3cd69a29d5
--- /dev/null
+++ b/third_party/python/yarl/pyproject.toml
@@ -0,0 +1,7 @@
+[tool.towncrier]
+package = "yarl"
+filename = "CHANGES.rst"
+directory = "CHANGES/"
+title_format = "{version} ({project_date})"
+template = "CHANGES/.TEMPLATE.rst"
+issue_format = "`#{issue} <https://github.com/aio-libs/yarl/issues/{issue}>`_"
diff --git a/third_party/python/yarl/setup.cfg b/third_party/python/yarl/setup.cfg
new file mode 100644
index 0000000000..7515097649
--- /dev/null
+++ b/third_party/python/yarl/setup.cfg
@@ -0,0 +1,27 @@
+[metadata]
+license_file = LICENSE
+
+[tool:pytest]
+addopts = --cov=yarl -v
+filterwarnings = error
+norecursedirs = dist docs build .tox .eggs venv virtualenv .git
+minversion = 3.8.2
+testpaths = tests/
+junit_suite_name = yarl_test_suite
+
+[flake8]
+ignore = E203,E301,E302,E704,W503,W504,F811
+max-line-length = 88
+
+[mypy]
+
+[mypy-idna]
+ignore_missing_imports = true
+
+[mypy-pytest]
+ignore_missing_imports = true
+
+[egg_info]
+tag_build =
+tag_date = 0
+
diff --git a/third_party/python/yarl/setup.py b/third_party/python/yarl/setup.py
new file mode 100644
index 0000000000..d47cabcb9d
--- /dev/null
+++ b/third_party/python/yarl/setup.py
@@ -0,0 +1,83 @@
+import pathlib
+import os
+import sys
+import re
+
+from setuptools import setup, Extension
+
+
+if sys.version_info < (3, 5):
+ raise RuntimeError("yarl 1.4+ requires Python 3.5+")
+
+
+NO_EXTENSIONS = bool(os.environ.get("YARL_NO_EXTENSIONS")) # type: bool
+
+if sys.implementation.name != "cpython":
+ NO_EXTENSIONS = True
+
+
+extensions = [Extension("yarl._quoting_c", ["yarl/_quoting_c.c"])]
+# extra_compile_args=["-g"],
+# extra_link_args=["-g"],
+
+
+here = pathlib.Path(__file__).parent
+fname = here / "yarl" / "__init__.py"
+
+with fname.open(encoding="utf8") as fp:
+ try:
+ version = re.findall(r'^__version__ = "([^"]+)"$', fp.read(), re.M)[0]
+ except IndexError:
+ raise RuntimeError("Unable to determine version.")
+
+install_requires = [
+ "multidict>=4.0",
+ "idna>=2.0",
+ 'typing_extensions>=3.7.4;python_version<"3.8"',
+]
+
+
+def read(name):
+ fname = here / name
+ with fname.open(encoding="utf8") as f:
+ return f.read()
+
+
+args = dict(
+ name="yarl",
+ version=version,
+ description=("Yet another URL library"),
+ long_description="\n\n".join([read("README.rst"), read("CHANGES.rst")]),
+ long_description_content_type="text/x-rst",
+ classifiers=[
+ "License :: OSI Approved :: Apache Software License",
+ "Intended Audience :: Developers",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Programming Language :: Python :: 3.6",
+ "Programming Language :: Python :: 3.7",
+ "Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Topic :: Internet :: WWW/HTTP",
+ ],
+ author="Andrew Svetlov",
+ author_email="andrew.svetlov@gmail.com",
+ url="https://github.com/aio-libs/yarl/",
+ license="Apache 2",
+ packages=["yarl"],
+ install_requires=install_requires,
+ python_requires=">=3.6",
+ include_package_data=True,
+)
+
+
+if not NO_EXTENSIONS:
+ print("**********************")
+ print("* Accellerated build *")
+ print("**********************")
+ setup(ext_modules=extensions, **args)
+else:
+ print("*********************")
+ print("* Pure Python build *")
+ print("*********************")
+ setup(**args)
diff --git a/third_party/python/yarl/yarl.egg-info/PKG-INFO b/third_party/python/yarl/yarl.egg-info/PKG-INFO
new file mode 100644
index 0000000000..3c242e5135
--- /dev/null
+++ b/third_party/python/yarl/yarl.egg-info/PKG-INFO
@@ -0,0 +1,797 @@
+Metadata-Version: 2.1
+Name: yarl
+Version: 1.6.3
+Summary: Yet another URL library
+Home-page: https://github.com/aio-libs/yarl/
+Author: Andrew Svetlov
+Author-email: andrew.svetlov@gmail.com
+License: Apache 2
+Description: yarl
+ ====
+
+ .. image:: https://github.com/aio-libs/yarl/workflows/CI/badge.svg
+ :target: https://github.com/aio-libs/yarl/actions?query=workflow%3ACI
+ :align: right
+
+ .. image:: https://codecov.io/gh/aio-libs/yarl/branch/master/graph/badge.svg
+ :target: https://codecov.io/gh/aio-libs/yarl
+
+ .. image:: https://badge.fury.io/py/yarl.svg
+ :target: https://badge.fury.io/py/yarl
+
+
+ .. image:: https://readthedocs.org/projects/yarl/badge/?version=latest
+ :target: https://yarl.readthedocs.io
+
+
+ .. image:: https://img.shields.io/pypi/pyversions/yarl.svg
+ :target: https://pypi.python.org/pypi/yarl
+
+ .. image:: https://badges.gitter.im/Join%20Chat.svg
+ :target: https://gitter.im/aio-libs/Lobby
+ :alt: Chat on Gitter
+
+ Introduction
+ ------------
+
+ Url is constructed from ``str``:
+
+ .. code-block:: pycon
+
+ >>> from yarl import URL
+ >>> url = URL('https://www.python.org/~guido?arg=1#frag')
+ >>> url
+ URL('https://www.python.org/~guido?arg=1#frag')
+
+ All url parts: *scheme*, *user*, *password*, *host*, *port*, *path*,
+ *query* and *fragment* are accessible by properties:
+
+ .. code-block:: pycon
+
+ >>> url.scheme
+ 'https'
+ >>> url.host
+ 'www.python.org'
+ >>> url.path
+ '/~guido'
+ >>> url.query_string
+ 'arg=1'
+ >>> url.query
+ <MultiDictProxy('arg': '1')>
+ >>> url.fragment
+ 'frag'
+
+ All url manipulations produce a new url object:
+
+ .. code-block:: pycon
+
+ >>> url = URL('https://www.python.org')
+ >>> url / 'foo' / 'bar'
+ URL('https://www.python.org/foo/bar')
+ >>> url / 'foo' % {'bar': 'baz'}
+ URL('https://www.python.org/foo?bar=baz')
+
+ Strings passed to constructor and modification methods are
+ automatically encoded giving canonical representation as result:
+
+ .. code-block:: pycon
+
+ >>> url = URL('https://www.python.org/путь')
+ >>> url
+ URL('https://www.python.org/%D0%BF%D1%83%D1%82%D1%8C')
+
+ Regular properties are *percent-decoded*, use ``raw_`` versions for
+ getting *encoded* strings:
+
+ .. code-block:: pycon
+
+ >>> url.path
+ '/путь'
+
+ >>> url.raw_path
+ '/%D0%BF%D1%83%D1%82%D1%8C'
+
+ Human readable representation of URL is available as ``.human_repr()``:
+
+ .. code-block:: pycon
+
+ >>> url.human_repr()
+ 'https://www.python.org/путь'
+
+ For full documentation please read https://yarl.readthedocs.org.
+
+
+ Installation
+ ------------
+
+ ::
+
+ $ pip install yarl
+
+ The library is Python 3 only!
+
+ PyPI contains binary wheels for Linux, Windows and MacOS. If you want to install
+ ``yarl`` on another operating system (like *Alpine Linux*, which is not
+ manylinux-compliant because of the missing glibc and therefore, cannot be
+ used with our wheels) the the tarball will be used to compile the library from
+ the source code. It requires a C compiler and and Python headers installed.
+
+ To skip the compilation you must explicitly opt-in by setting the `YARL_NO_EXTENSIONS`
+ environment variable to a non-empty value, e.g.:
+
+ .. code-block:: bash
+
+ $ YARL_NO_EXTENSIONS=1 pip install yarl
+
+ Please note that the pure-Python (uncompiled) version is much slower. However,
+ PyPy always uses a pure-Python implementation, and, as such, it is unaffected
+ by this variable.
+
+ Dependencies
+ ------------
+
+ YARL requires multidict_ library.
+
+
+ API documentation
+ ------------------
+
+ The documentation is located at https://yarl.readthedocs.org
+
+
+ Why isn't boolean supported by the URL query API?
+ -------------------------------------------------
+
+ There is no standard for boolean representation of boolean values.
+
+ Some systems prefer ``true``/``false``, others like ``yes``/``no``, ``on``/``off``,
+ ``Y``/``N``, ``1``/``0``, etc.
+
+ ``yarl`` cannot make an unambiguous decision on how to serialize ``bool`` values because
+ it is specific to how the end-user's application is built and would be different for
+ different apps. The library doesn't accept booleans in the API; a user should convert
+ bools into strings using own preferred translation protocol.
+
+
+ Comparison with other URL libraries
+ ------------------------------------
+
+ * furl (https://pypi.python.org/pypi/furl)
+
+ The library has rich functionality but the ``furl`` object is mutable.
+
+ I'm afraid to pass this object into foreign code: who knows if the
+ code will modify my url in a terrible way while I just want to send URL
+ with handy helpers for accessing URL properties.
+
+ ``furl`` has other non-obvious tricky things but the main objection
+ is mutability.
+
+ * URLObject (https://pypi.python.org/pypi/URLObject)
+
+ URLObject is immutable, that's pretty good.
+
+ Every URL change generates a new URL object.
+
+ But the library doesn't do any decode/encode transformations leaving the
+ end user to cope with these gory details.
+
+
+ Source code
+ -----------
+
+ The project is hosted on GitHub_
+
+ Please file an issue on the `bug tracker
+ <https://github.com/aio-libs/yarl/issues>`_ if you have found a bug
+ or have some suggestion in order to improve the library.
+
+ The library uses `Azure Pipelines <https://dev.azure.com/aio-libs/yarl>`_ for
+ Continuous Integration.
+
+ Discussion list
+ ---------------
+
+ *aio-libs* google group: https://groups.google.com/forum/#!forum/aio-libs
+
+ Feel free to post your questions and ideas here.
+
+
+ Authors and License
+ -------------------
+
+ The ``yarl`` package is written by Andrew Svetlov.
+
+ It's *Apache 2* licensed and freely available.
+
+
+ .. _GitHub: https://github.com/aio-libs/yarl
+
+ .. _multidict: https://github.com/aio-libs/multidict
+
+
+ =========
+ Changelog
+ =========
+
+ ..
+ You should *NOT* be adding new change log entries to this file, this
+ file is managed by towncrier. You *may* edit previous change logs to
+ fix problems like typo corrections or such.
+ To add a new change log entry, please see
+ https://pip.pypa.io/en/latest/development/#adding-a-news-entry
+ we named the news folder "changes".
+
+ WARNING: Don't drop the next directive!
+
+ .. towncrier release notes start
+
+ 1.6.3 (2020-11-14)
+ ==================
+
+ Bugfixes
+ --------
+
+ - No longer loose characters when decoding incorrect percent-sequences (like ``%e2%82%f8``). All non-decodable percent-sequences are now preserved.
+ `#517 <https://github.com/aio-libs/yarl/issues/517>`_
+ - Provide x86 Windows wheels.
+ `#535 <https://github.com/aio-libs/yarl/issues/535>`_
+
+
+ ----
+
+
+ 1.6.2 (2020-10-12)
+ ==================
+
+
+ Bugfixes
+ --------
+
+ - Provide generated ``.c`` files in TarBall distribution.
+ `#530 <https://github.com/aio-libs/multidict/issues/530>`_
+
+ 1.6.1 (2020-10-12)
+ ==================
+
+ Features
+ --------
+
+ - Provide wheels for ``aarch64``, ``i686``, ``ppc64le``, ``s390x`` architectures on
+ Linux as well as ``x86_64``.
+ `#507 <https://github.com/aio-libs/yarl/issues/507>`_
+ - Provide wheels for Python 3.9.
+ `#526 <https://github.com/aio-libs/yarl/issues/526>`_
+
+ Bugfixes
+ --------
+
+ - ``human_repr()`` now always produces valid representation equivalent to the original URL (if the original URL is valid).
+ `#511 <https://github.com/aio-libs/yarl/issues/511>`_
+ - Fixed requoting a single percent followed by a percent-encoded character in the Cython implementation.
+ `#514 <https://github.com/aio-libs/yarl/issues/514>`_
+ - Fix ValueError when decoding ``%`` which is not followed by two hexadecimal digits.
+ `#516 <https://github.com/aio-libs/yarl/issues/516>`_
+ - Fix decoding ``%`` followed by a space and hexadecimal digit.
+ `#520 <https://github.com/aio-libs/yarl/issues/520>`_
+ - Fix annotation of ``with_query()``/``update_query()`` methods for ``key=[val1, val2]`` case.
+ `#528 <https://github.com/aio-libs/yarl/issues/528>`_
+
+ Removal
+ -------
+
+ - Drop Python 3.5 support; Python 3.6 is the minimal supported Python version.
+
+
+ ----
+
+
+ 1.6.0 (2020-09-23)
+ ==================
+
+ Features
+ --------
+
+ - Allow for int and float subclasses in query, while still denying bool.
+ `#492 <https://github.com/aio-libs/yarl/issues/492>`_
+
+
+ Bugfixes
+ --------
+
+ - Do not requote arguments in ``URL.build()``, ``with_xxx()`` and in ``/`` operator.
+ `#502 <https://github.com/aio-libs/yarl/issues/502>`_
+ - Keep IPv6 brackets in ``origin()``.
+ `#504 <https://github.com/aio-libs/yarl/issues/504>`_
+
+
+ ----
+
+
+ 1.5.1 (2020-08-01)
+ ==================
+
+ Bugfixes
+ --------
+
+ - Fix including relocated internal ``yarl._quoting_c`` C-extension into published PyPI dists.
+ `#485 <https://github.com/aio-libs/yarl/issues/485>`_
+
+
+ Misc
+ ----
+
+ - `#484 <https://github.com/aio-libs/yarl/issues/484>`_
+
+
+ ----
+
+
+ 1.5.0 (2020-07-26)
+ ==================
+
+ Features
+ --------
+
+ - Convert host to lowercase on URL building.
+ `#386 <https://github.com/aio-libs/yarl/issues/386>`_
+ - Allow using ``mod`` operator (`%`) for updating query string (an alias for ``update_query()`` method).
+ `#435 <https://github.com/aio-libs/yarl/issues/435>`_
+ - Allow use of sequences such as ``list`` and ``tuple`` in the values
+ of a mapping such as ``dict`` to represent that a key has many values::
+
+ url = URL("http://example.com")
+ assert url.with_query({"a": [1, 2]}) == URL("http://example.com/?a=1&a=2")
+
+ `#443 <https://github.com/aio-libs/yarl/issues/443>`_
+ - Support URL.build() with scheme and path (creates a relative URL).
+ `#464 <https://github.com/aio-libs/yarl/issues/464>`_
+ - Cache slow IDNA encode/decode calls.
+ `#476 <https://github.com/aio-libs/yarl/issues/476>`_
+ - Add ``@final`` / ``Final`` type hints
+ `#477 <https://github.com/aio-libs/yarl/issues/477>`_
+ - Support URL authority/raw_authority properties and authority argument of ``URL.build()`` method.
+ `#478 <https://github.com/aio-libs/yarl/issues/478>`_
+ - Hide the library implementation details, make the exposed public list very clean.
+ `#483 <https://github.com/aio-libs/yarl/issues/483>`_
+
+
+ Bugfixes
+ --------
+
+ - Fix tests with newer Python (3.7.6, 3.8.1 and 3.9.0+).
+ `#409 <https://github.com/aio-libs/yarl/issues/409>`_
+ - Fix a bug where query component, passed in a form of mapping or sequence, is unquoted in unexpected way.
+ `#426 <https://github.com/aio-libs/yarl/issues/426>`_
+ - Hide `Query` and `QueryVariable` type aliases in `__init__.pyi`, now they are prefixed with underscore.
+ `#431 <https://github.com/aio-libs/yarl/issues/431>`_
+ - Keep ipv6 brackets after updating port/user/password.
+ `#451 <https://github.com/aio-libs/yarl/issues/451>`_
+
+
+ ----
+
+
+ 1.4.2 (2019-12-05)
+ ==================
+
+ Features
+ --------
+
+ - Workaround for missing `str.isascii()` in Python 3.6
+ `#389 <https://github.com/aio-libs/yarl/issues/389>`_
+
+
+ ----
+
+
+ 1.4.1 (2019-11-29)
+ ==================
+
+ * Fix regression, make the library work on Python 3.5 and 3.6 again.
+
+ 1.4.0 (2019-11-29)
+ ==================
+
+ * Distinguish an empty password in URL from a password not provided at all (#262)
+
+ * Fixed annotations for optional parameters of ``URL.build`` (#309)
+
+ * Use None as default value of ``user`` parameter of ``URL.build`` (#309)
+
+ * Enforce building C Accelerated modules when installing from source tarball, use
+ ``YARL_NO_EXTENSIONS`` environment variable for falling back to (slower) Pure Python
+ implementation (#329)
+
+ * Drop Python 3.5 support
+
+ * Fix quoting of plus in path by pure python version (#339)
+
+ * Don't create a new URL if fragment is unchanged (#292)
+
+ * Included in error msg the path that produces starting slash forbidden error (#376)
+
+ * Skip slow IDNA encoding for ASCII-only strings (#387)
+
+
+ 1.3.0 (2018-12-11)
+ ==================
+
+ * Fix annotations for ``query`` parameter (#207)
+
+ * An incoming query sequence can have int variables (the same as for
+ Mapping type) (#208)
+
+ * Add ``URL.explicit_port`` property (#218)
+
+ * Give a friendlier error when port cant be converted to int (#168)
+
+ * ``bool(URL())`` now returns ``False`` (#272)
+
+ 1.2.6 (2018-06-14)
+ ==================
+
+ * Drop Python 3.4 trove classifier (#205)
+
+ 1.2.5 (2018-05-23)
+ ==================
+
+ * Fix annotations for ``build`` (#199)
+
+ 1.2.4 (2018-05-08)
+ ==================
+
+ * Fix annotations for ``cached_property`` (#195)
+
+ 1.2.3 (2018-05-03)
+ ==================
+
+ * Accept ``str`` subclasses in ``URL`` constructor (#190)
+
+ 1.2.2 (2018-05-01)
+ ==================
+
+ * Fix build
+
+ 1.2.1 (2018-04-30)
+ ==================
+
+ * Pin minimal required Python to 3.5.3 (#189)
+
+ 1.2.0 (2018-04-30)
+ ==================
+
+ * Forbid inheritance, replace ``__init__`` with ``__new__`` (#171)
+
+ * Support PEP-561 (provide type hinting marker) (#182)
+
+ 1.1.1 (2018-02-17)
+ ==================
+
+ * Fix performance regression: don't encode enmpty netloc (#170)
+
+ 1.1.0 (2018-01-21)
+ ==================
+
+ * Make pure Python quoter consistent with Cython version (#162)
+
+ 1.0.0 (2018-01-15)
+ ==================
+
+ * Use fast path if quoted string does not need requoting (#154)
+
+ * Speed up quoting/unquoting by ``_Quoter`` and ``_Unquoter`` classes (#155)
+
+ * Drop ``yarl.quote`` and ``yarl.unquote`` public functions (#155)
+
+ * Add custom string writer, reuse static buffer if available (#157)
+ Code is 50-80 times faster than Pure Python version (was 4-5 times faster)
+
+ * Don't recode IP zone (#144)
+
+ * Support ``encoded=True`` in ``yarl.URL.build()`` (#158)
+
+ * Fix updating query with multiple keys (#160)
+
+ 0.18.0 (2018-01-10)
+ ===================
+
+ * Fallback to IDNA 2003 if domain name is not IDNA 2008 compatible (#152)
+
+ 0.17.0 (2017-12-30)
+ ===================
+
+ * Use IDNA 2008 for domain name processing (#149)
+
+ 0.16.0 (2017-12-07)
+ ===================
+
+ * Fix raising ``TypeError`` by ``url.query_string()`` after
+ ``url.with_query({})`` (empty mapping) (#141)
+
+ 0.15.0 (2017-11-23)
+ ===================
+
+ * Add ``raw_path_qs`` attribute (#137)
+
+ 0.14.2 (2017-11-14)
+ ===================
+
+ * Restore ``strict`` parameter as no-op in ``quote`` / ``unquote``
+
+ 0.14.1 (2017-11-13)
+ ===================
+
+ * Restore ``strict`` parameter as no-op for sake of compatibility with
+ aiohttp 2.2
+
+ 0.14.0 (2017-11-11)
+ ===================
+
+ * Drop strict mode (#123)
+
+ * Fix ``"ValueError: Unallowed PCT %"`` when there's a ``"%"`` in the url (#124)
+
+ 0.13.0 (2017-10-01)
+ ===================
+
+ * Document ``encoded`` parameter (#102)
+
+ * Support relative urls like ``'?key=value'`` (#100)
+
+ * Unsafe encoding for QS fixed. Encode ``;`` char in value param (#104)
+
+ * Process passwords without user names (#95)
+
+ 0.12.0 (2017-06-26)
+ ===================
+
+ * Properly support paths without leading slash in ``URL.with_path()`` (#90)
+
+ * Enable type annotation checks
+
+ 0.11.0 (2017-06-26)
+ ===================
+
+ * Normalize path (#86)
+
+ * Clear query and fragment parts in ``.with_path()`` (#85)
+
+ 0.10.3 (2017-06-13)
+ ===================
+
+ * Prevent double URL args unquoting (#83)
+
+ 0.10.2 (2017-05-05)
+ ===================
+
+ * Unexpected hash behaviour (#75)
+
+
+ 0.10.1 (2017-05-03)
+ ===================
+
+ * Unexpected compare behaviour (#73)
+
+ * Do not quote or unquote + if not a query string. (#74)
+
+
+ 0.10.0 (2017-03-14)
+ ===================
+
+ * Added ``URL.build`` class method (#58)
+
+ * Added ``path_qs`` attribute (#42)
+
+
+ 0.9.8 (2017-02-16)
+ ==================
+
+ * Do not quote ``:`` in path
+
+
+ 0.9.7 (2017-02-16)
+ ==================
+
+ * Load from pickle without _cache (#56)
+
+ * Percent-encoded pluses in path variables become spaces (#59)
+
+
+ 0.9.6 (2017-02-15)
+ ==================
+
+ * Revert backward incompatible change (BaseURL)
+
+
+ 0.9.5 (2017-02-14)
+ ==================
+
+ * Fix BaseURL rich comparison support
+
+
+ 0.9.4 (2017-02-14)
+ ==================
+
+ * Use BaseURL
+
+
+ 0.9.3 (2017-02-14)
+ ==================
+
+ * Added BaseURL
+
+
+ 0.9.2 (2017-02-08)
+ ==================
+
+ * Remove debug print
+
+
+ 0.9.1 (2017-02-07)
+ ==================
+
+ * Do not lose tail chars (#45)
+
+
+ 0.9.0 (2017-02-07)
+ ==================
+
+ * Allow to quote ``%`` in non strict mode (#21)
+
+ * Incorrect parsing of query parameters with %3B (;) inside (#34)
+
+ * Fix core dumps (#41)
+
+ * tmpbuf - compiling error (#43)
+
+ * Added ``URL.update_path()`` method
+
+ * Added ``URL.update_query()`` method (#47)
+
+
+ 0.8.1 (2016-12-03)
+ ==================
+
+ * Fix broken aiohttp: revert back ``quote`` / ``unquote``.
+
+
+ 0.8.0 (2016-12-03)
+ ==================
+
+ * Support more verbose error messages in ``.with_query()`` (#24)
+
+ * Don't percent-encode ``@`` and ``:`` in path (#32)
+
+ * Don't expose ``yarl.quote`` and ``yarl.unquote``, these functions are
+ part of private API
+
+ 0.7.1 (2016-11-18)
+ ==================
+
+ * Accept not only ``str`` but all classes inherited from ``str`` also (#25)
+
+ 0.7.0 (2016-11-07)
+ ==================
+
+ * Accept ``int`` as value for ``.with_query()``
+
+ 0.6.0 (2016-11-07)
+ ==================
+
+ * Explicitly use UTF8 encoding in setup.py (#20)
+ * Properly unquote non-UTF8 strings (#19)
+
+ 0.5.3 (2016-11-02)
+ ==================
+
+ * Don't use namedtuple fields but indexes on URL construction
+
+ 0.5.2 (2016-11-02)
+ ==================
+
+ * Inline ``_encode`` class method
+
+ 0.5.1 (2016-11-02)
+ ==================
+
+ * Make URL construction faster by removing extra classmethod calls
+
+ 0.5.0 (2016-11-02)
+ ==================
+
+ * Add cython optimization for quoting/unquoting
+ * Provide binary wheels
+
+ 0.4.3 (2016-09-29)
+ ==================
+
+ * Fix typing stubs
+
+ 0.4.2 (2016-09-29)
+ ==================
+
+ * Expose ``quote()`` and ``unquote()`` as public API
+
+ 0.4.1 (2016-09-28)
+ ==================
+
+ * Support empty values in query (``'/path?arg'``)
+
+ 0.4.0 (2016-09-27)
+ ==================
+
+ * Introduce ``relative()`` (#16)
+
+ 0.3.2 (2016-09-27)
+ ==================
+
+ * Typo fixes #15
+
+ 0.3.1 (2016-09-26)
+ ==================
+
+ * Support sequence of pairs as ``with_query()`` parameter
+
+ 0.3.0 (2016-09-26)
+ ==================
+
+ * Introduce ``is_default_port()``
+
+ 0.2.1 (2016-09-26)
+ ==================
+
+ * Raise ValueError for URLs like 'http://:8080/'
+
+ 0.2.0 (2016-09-18)
+ ==================
+
+ * Avoid doubling slashes when joining paths (#13)
+
+ * Appending path starting from slash is forbidden (#12)
+
+ 0.1.4 (2016-09-09)
+ ==================
+
+ * Add kwargs support for ``with_query()`` (#10)
+
+ 0.1.3 (2016-09-07)
+ ==================
+
+ * Document ``with_query()``, ``with_fragment()`` and ``origin()``
+
+ * Allow ``None`` for ``with_query()`` and ``with_fragment()``
+
+ 0.1.2 (2016-09-07)
+ ==================
+
+ * Fix links, tune docs theme.
+
+ 0.1.1 (2016-09-06)
+ ==================
+
+ * Update README, old version used obsolete API
+
+ 0.1.0 (2016-09-06)
+ ==================
+
+ * The library was deeply refactored, bytes are gone away but all
+ accepted strings are encoded if needed.
+
+ 0.0.1 (2016-08-30)
+ ==================
+
+ * The first release.
+
+Platform: UNKNOWN
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Intended Audience :: Developers
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Topic :: Internet :: WWW/HTTP
+Requires-Python: >=3.6
+Description-Content-Type: text/x-rst
diff --git a/third_party/python/yarl/yarl.egg-info/SOURCES.txt b/third_party/python/yarl/yarl.egg-info/SOURCES.txt
new file mode 100644
index 0000000000..383d95918b
--- /dev/null
+++ b/third_party/python/yarl/yarl.egg-info/SOURCES.txt
@@ -0,0 +1,42 @@
+CHANGES.rst
+LICENSE
+MANIFEST.in
+README.rst
+pyproject.toml
+setup.cfg
+setup.py
+docs/Makefile
+docs/api.rst
+docs/conf.py
+docs/index.rst
+docs/make.bat
+docs/spelling_wordlist.txt
+docs/yarl-icon-128x128.xcf
+docs/_static/yarl-icon-128x128.png
+docs/_templates/about.html
+tests/test_cache.py
+tests/test_cached_property.py
+tests/test_normalize_path.py
+tests/test_pickle.py
+tests/test_quoting.py
+tests/test_update_query.py
+tests/test_url.py
+tests/test_url_build.py
+tests/test_url_cmp_and_hash.py
+tests/test_url_parsing.py
+tests/test_url_query.py
+tests/test_url_update_netloc.py
+yarl/__init__.py
+yarl/__init__.pyi
+yarl/_quoting.py
+yarl/_quoting_c.c
+yarl/_quoting_c.pyi
+yarl/_quoting_c.pyx
+yarl/_quoting_py.py
+yarl/_url.py
+yarl/py.typed
+yarl.egg-info/PKG-INFO
+yarl.egg-info/SOURCES.txt
+yarl.egg-info/dependency_links.txt
+yarl.egg-info/requires.txt
+yarl.egg-info/top_level.txt \ No newline at end of file
diff --git a/third_party/python/yarl/yarl.egg-info/dependency_links.txt b/third_party/python/yarl/yarl.egg-info/dependency_links.txt
new file mode 100644
index 0000000000..8b13789179
--- /dev/null
+++ b/third_party/python/yarl/yarl.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/third_party/python/yarl/yarl.egg-info/requires.txt b/third_party/python/yarl/yarl.egg-info/requires.txt
new file mode 100644
index 0000000000..bb40947df6
--- /dev/null
+++ b/third_party/python/yarl/yarl.egg-info/requires.txt
@@ -0,0 +1,5 @@
+multidict>=4.0
+idna>=2.0
+
+[:python_version < "3.8"]
+typing_extensions>=3.7.4
diff --git a/third_party/python/yarl/yarl.egg-info/top_level.txt b/third_party/python/yarl/yarl.egg-info/top_level.txt
new file mode 100644
index 0000000000..e93e8bddef
--- /dev/null
+++ b/third_party/python/yarl/yarl.egg-info/top_level.txt
@@ -0,0 +1 @@
+yarl
diff --git a/third_party/python/yarl/yarl/__init__.py b/third_party/python/yarl/yarl/__init__.py
new file mode 100644
index 0000000000..db4e94817a
--- /dev/null
+++ b/third_party/python/yarl/yarl/__init__.py
@@ -0,0 +1,5 @@
+from ._url import URL, cache_clear, cache_configure, cache_info
+
+__version__ = "1.6.3"
+
+__all__ = ("URL", "cache_clear", "cache_configure", "cache_info")
diff --git a/third_party/python/yarl/yarl/__init__.pyi b/third_party/python/yarl/yarl/__init__.pyi
new file mode 100644
index 0000000000..a8d8fbda48
--- /dev/null
+++ b/third_party/python/yarl/yarl/__init__.pyi
@@ -0,0 +1,111 @@
+from typing import overload, Any, Tuple, Optional, Mapping, Union, Sequence, Type
+import multidict
+from functools import _CacheInfo
+import sys
+
+if sys.version_info >= (3, 8):
+ from typing import TypedDict, Final, final
+else:
+ from typing_extensions import TypedDict, Final, final
+
+_SimpleQuery = Union[str, int, float]
+_QueryVariable = Union[_SimpleQuery, Sequence[_SimpleQuery]]
+_Query = Union[
+ None, str, Mapping[str, _QueryVariable], Sequence[Tuple[str, _QueryVariable]]
+]
+@final
+class URL:
+ scheme: Final[str]
+ raw_user: Final[str]
+ user: Final[Optional[str]]
+ raw_password: Final[Optional[str]]
+ password: Final[Optional[str]]
+ raw_host: Final[Optional[str]]
+ host: Final[Optional[str]]
+ port: Final[Optional[int]]
+ raw_authority: Final[str]
+ authority: Final[str]
+ raw_path: Final[str]
+ path: Final[str]
+ raw_query_string: Final[str]
+ query_string: Final[str]
+ path_qs: Final[str]
+ raw_path_qs: Final[str]
+ raw_fragment: Final[str]
+ fragment: Final[str]
+ query: Final[multidict.MultiDict[str]]
+ raw_name: Final[str]
+ name: Final[str]
+ raw_parts: Final[Tuple[str, ...]]
+ parts: Final[Tuple[str, ...]]
+ parent: Final[URL]
+ def __init__(
+ self, val: Union[str, "URL"] = ..., *, encoded: bool = ...
+ ) -> None: ...
+ @classmethod
+ def build(
+ cls,
+ *,
+ scheme: str = ...,
+ authority: str = ...,
+ user: Optional[str] = ...,
+ password: Optional[str] = ...,
+ host: str = ...,
+ port: Optional[int] = ...,
+ path: str = ...,
+ query: Optional[_Query] = ...,
+ query_string: str = ...,
+ fragment: str = ...,
+ encoded: bool = ...
+ ) -> URL: ...
+ def __str__(self) -> str: ...
+ def __repr__(self) -> str: ...
+ def __eq__(self, other: Any) -> bool: ...
+ def __le__(self, other: Any) -> bool: ...
+ def __lt__(self, other: Any) -> bool: ...
+ def __ge__(self, other: Any) -> bool: ...
+ def __gt__(self, other: Any) -> bool: ...
+ def __hash__(self) -> int: ...
+ def __truediv__(self, name: str) -> URL: ...
+ def __mod__(self, query: _Query) -> URL: ...
+ def is_absolute(self) -> bool: ...
+ def is_default_port(self) -> bool: ...
+ def origin(self) -> URL: ...
+ def relative(self) -> URL: ...
+ def with_scheme(self, scheme: str) -> URL: ...
+ def with_user(self, user: Optional[str]) -> URL: ...
+ def with_password(self, password: Optional[str]) -> URL: ...
+ def with_host(self, host: str) -> URL: ...
+ def with_port(self, port: Optional[int]) -> URL: ...
+ def with_path(self, path: str, *, encoded: bool = ...) -> URL: ...
+ @overload
+ def with_query(self, query: _Query) -> URL: ...
+ @overload
+ def with_query(self, **kwargs: _QueryVariable) -> URL: ...
+ @overload
+ def update_query(self, query: _Query) -> URL: ...
+ @overload
+ def update_query(self, **kwargs: _QueryVariable) -> URL: ...
+ def with_fragment(self, fragment: Optional[str]) -> URL: ...
+ def with_name(self, name: str) -> URL: ...
+ def join(self, url: URL) -> URL: ...
+ def human_repr(self) -> str: ...
+ # private API
+ @classmethod
+ def _normalize_path(cls, path: str) -> str: ...
+
+@final
+class cached_property:
+ def __init__(self, wrapped: Any) -> None: ...
+ def __get__(self, inst: URL, owner: Type[URL]) -> Any: ...
+ def __set__(self, inst: URL, value: Any) -> None: ...
+
+class CacheInfo(TypedDict):
+ idna_encode: _CacheInfo
+ idna_decode: _CacheInfo
+
+def cache_clear() -> None: ...
+def cache_info() -> CacheInfo: ...
+def cache_configure(
+ *, idna_encode_size: Optional[int] = ..., idna_decode_size: Optional[int] = ...
+) -> None: ...
diff --git a/third_party/python/yarl/yarl/_quoting.py b/third_party/python/yarl/yarl/_quoting.py
new file mode 100644
index 0000000000..46e100a9ee
--- /dev/null
+++ b/third_party/python/yarl/yarl/_quoting.py
@@ -0,0 +1,18 @@
+import os
+import sys
+
+__all__ = ("_Quoter", "_Unquoter")
+
+
+NO_EXTENSIONS = bool(os.environ.get("YARL_NO_EXTENSIONS")) # type: bool
+if sys.implementation.name != "cpython":
+ NO_EXTENSIONS = True
+
+
+if not NO_EXTENSIONS: # pragma: no branch
+ try:
+ from ._quoting_c import _Quoter, _Unquoter # type: ignore[misc]
+ except ImportError: # pragma: no cover
+ from ._quoting_py import _Quoter, _Unquoter # type: ignore[misc]
+else:
+ from ._quoting_py import _Quoter, _Unquoter # type: ignore[misc]
diff --git a/third_party/python/yarl/yarl/_quoting_c.c b/third_party/python/yarl/yarl/_quoting_c.c
new file mode 100644
index 0000000000..cdb46f71de
--- /dev/null
+++ b/third_party/python/yarl/yarl/_quoting_c.c
@@ -0,0 +1,11612 @@
+/* Generated by Cython 0.29.21 */
+
+#define PY_SSIZE_T_CLEAN
+#include "Python.h"
+#ifndef Py_PYTHON_H
+ #error Python headers needed to compile C extensions, please install development version of Python.
+#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
+ #error Cython requires Python 2.6+ or Python 3.3+.
+#else
+#define CYTHON_ABI "0_29_21"
+#define CYTHON_HEX_VERSION 0x001D15F0
+#define CYTHON_FUTURE_DIVISION 1
+#include <stddef.h>
+#ifndef offsetof
+ #define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
+#endif
+#if !defined(WIN32) && !defined(MS_WINDOWS)
+ #ifndef __stdcall
+ #define __stdcall
+ #endif
+ #ifndef __cdecl
+ #define __cdecl
+ #endif
+ #ifndef __fastcall
+ #define __fastcall
+ #endif
+#endif
+#ifndef DL_IMPORT
+ #define DL_IMPORT(t) t
+#endif
+#ifndef DL_EXPORT
+ #define DL_EXPORT(t) t
+#endif
+#define __PYX_COMMA ,
+#ifndef HAVE_LONG_LONG
+ #if PY_VERSION_HEX >= 0x02070000
+ #define HAVE_LONG_LONG
+ #endif
+#endif
+#ifndef PY_LONG_LONG
+ #define PY_LONG_LONG LONG_LONG
+#endif
+#ifndef Py_HUGE_VAL
+ #define Py_HUGE_VAL HUGE_VAL
+#endif
+#ifdef PYPY_VERSION
+ #define CYTHON_COMPILING_IN_PYPY 1
+ #define CYTHON_COMPILING_IN_PYSTON 0
+ #define CYTHON_COMPILING_IN_CPYTHON 0
+ #undef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 0
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #if PY_VERSION_HEX < 0x03050000
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #elif !defined(CYTHON_USE_ASYNC_SLOTS)
+ #define CYTHON_USE_ASYNC_SLOTS 1
+ #endif
+ #undef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 0
+ #undef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 0
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #undef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 1
+ #undef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 0
+ #undef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 0
+ #undef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 0
+ #undef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 0
+ #undef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT 0
+ #undef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE 0
+ #undef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS 0
+ #undef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK 0
+#elif defined(PYSTON_VERSION)
+ #define CYTHON_COMPILING_IN_PYPY 0
+ #define CYTHON_COMPILING_IN_PYSTON 1
+ #define CYTHON_COMPILING_IN_CPYTHON 0
+ #ifndef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 1
+ #endif
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #undef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 0
+ #ifndef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 1
+ #endif
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #ifndef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 0
+ #endif
+ #ifndef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 1
+ #endif
+ #ifndef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 1
+ #endif
+ #undef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 0
+ #undef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 0
+ #undef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT 0
+ #undef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE 0
+ #undef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS 0
+ #undef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK 0
+#else
+ #define CYTHON_COMPILING_IN_PYPY 0
+ #define CYTHON_COMPILING_IN_PYSTON 0
+ #define CYTHON_COMPILING_IN_CPYTHON 1
+ #ifndef CYTHON_USE_TYPE_SLOTS
+ #define CYTHON_USE_TYPE_SLOTS 1
+ #endif
+ #if PY_VERSION_HEX < 0x02070000
+ #undef CYTHON_USE_PYTYPE_LOOKUP
+ #define CYTHON_USE_PYTYPE_LOOKUP 0
+ #elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
+ #define CYTHON_USE_PYTYPE_LOOKUP 1
+ #endif
+ #if PY_MAJOR_VERSION < 3
+ #undef CYTHON_USE_ASYNC_SLOTS
+ #define CYTHON_USE_ASYNC_SLOTS 0
+ #elif !defined(CYTHON_USE_ASYNC_SLOTS)
+ #define CYTHON_USE_ASYNC_SLOTS 1
+ #endif
+ #if PY_VERSION_HEX < 0x02070000
+ #undef CYTHON_USE_PYLONG_INTERNALS
+ #define CYTHON_USE_PYLONG_INTERNALS 0
+ #elif !defined(CYTHON_USE_PYLONG_INTERNALS)
+ #define CYTHON_USE_PYLONG_INTERNALS 1
+ #endif
+ #ifndef CYTHON_USE_PYLIST_INTERNALS
+ #define CYTHON_USE_PYLIST_INTERNALS 1
+ #endif
+ #ifndef CYTHON_USE_UNICODE_INTERNALS
+ #define CYTHON_USE_UNICODE_INTERNALS 1
+ #endif
+ #if PY_VERSION_HEX < 0x030300F0
+ #undef CYTHON_USE_UNICODE_WRITER
+ #define CYTHON_USE_UNICODE_WRITER 0
+ #elif !defined(CYTHON_USE_UNICODE_WRITER)
+ #define CYTHON_USE_UNICODE_WRITER 1
+ #endif
+ #ifndef CYTHON_AVOID_BORROWED_REFS
+ #define CYTHON_AVOID_BORROWED_REFS 0
+ #endif
+ #ifndef CYTHON_ASSUME_SAFE_MACROS
+ #define CYTHON_ASSUME_SAFE_MACROS 1
+ #endif
+ #ifndef CYTHON_UNPACK_METHODS
+ #define CYTHON_UNPACK_METHODS 1
+ #endif
+ #ifndef CYTHON_FAST_THREAD_STATE
+ #define CYTHON_FAST_THREAD_STATE 1
+ #endif
+ #ifndef CYTHON_FAST_PYCALL
+ #define CYTHON_FAST_PYCALL 1
+ #endif
+ #ifndef CYTHON_PEP489_MULTI_PHASE_INIT
+ #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
+ #endif
+ #ifndef CYTHON_USE_TP_FINALIZE
+ #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
+ #endif
+ #ifndef CYTHON_USE_DICT_VERSIONS
+ #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
+ #endif
+ #ifndef CYTHON_USE_EXC_INFO_STACK
+ #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
+ #endif
+#endif
+#if !defined(CYTHON_FAST_PYCCALL)
+#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
+#endif
+#if CYTHON_USE_PYLONG_INTERNALS
+ #include "longintrepr.h"
+ #undef SHIFT
+ #undef BASE
+ #undef MASK
+ #ifdef SIZEOF_VOID_P
+ enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
+ #endif
+#endif
+#ifndef __has_attribute
+ #define __has_attribute(x) 0
+#endif
+#ifndef __has_cpp_attribute
+ #define __has_cpp_attribute(x) 0
+#endif
+#ifndef CYTHON_RESTRICT
+ #if defined(__GNUC__)
+ #define CYTHON_RESTRICT __restrict__
+ #elif defined(_MSC_VER) && _MSC_VER >= 1400
+ #define CYTHON_RESTRICT __restrict
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_RESTRICT restrict
+ #else
+ #define CYTHON_RESTRICT
+ #endif
+#endif
+#ifndef CYTHON_UNUSED
+# if defined(__GNUC__)
+# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
+# define CYTHON_UNUSED __attribute__ ((__unused__))
+# else
+# define CYTHON_UNUSED
+# endif
+#endif
+#ifndef CYTHON_MAYBE_UNUSED_VAR
+# if defined(__cplusplus)
+ template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
+# else
+# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
+# endif
+#endif
+#ifndef CYTHON_NCP_UNUSED
+# if CYTHON_COMPILING_IN_CPYTHON
+# define CYTHON_NCP_UNUSED
+# else
+# define CYTHON_NCP_UNUSED CYTHON_UNUSED
+# endif
+#endif
+#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
+#ifdef _MSC_VER
+ #ifndef _MSC_STDINT_H_
+ #if _MSC_VER < 1300
+ typedef unsigned char uint8_t;
+ typedef unsigned int uint32_t;
+ #else
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int32 uint32_t;
+ #endif
+ #endif
+#else
+ #include <stdint.h>
+#endif
+#ifndef CYTHON_FALLTHROUGH
+ #if defined(__cplusplus) && __cplusplus >= 201103L
+ #if __has_cpp_attribute(fallthrough)
+ #define CYTHON_FALLTHROUGH [[fallthrough]]
+ #elif __has_cpp_attribute(clang::fallthrough)
+ #define CYTHON_FALLTHROUGH [[clang::fallthrough]]
+ #elif __has_cpp_attribute(gnu::fallthrough)
+ #define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
+ #endif
+ #endif
+ #ifndef CYTHON_FALLTHROUGH
+ #if __has_attribute(fallthrough)
+ #define CYTHON_FALLTHROUGH __attribute__((fallthrough))
+ #else
+ #define CYTHON_FALLTHROUGH
+ #endif
+ #endif
+ #if defined(__clang__ ) && defined(__apple_build_version__)
+ #if __apple_build_version__ < 7000000
+ #undef CYTHON_FALLTHROUGH
+ #define CYTHON_FALLTHROUGH
+ #endif
+ #endif
+#endif
+
+#ifndef CYTHON_INLINE
+ #if defined(__clang__)
+ #define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
+ #elif defined(__GNUC__)
+ #define CYTHON_INLINE __inline__
+ #elif defined(_MSC_VER)
+ #define CYTHON_INLINE __inline
+ #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define CYTHON_INLINE inline
+ #else
+ #define CYTHON_INLINE
+ #endif
+#endif
+
+#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
+ #define Py_OptimizeFlag 0
+#endif
+#define __PYX_BUILD_PY_SSIZE_T "n"
+#define CYTHON_FORMAT_SSIZE_T "z"
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+ #define __Pyx_DefaultClassType PyClass_Type
+#else
+ #define __Pyx_BUILTIN_MODULE_NAME "builtins"
+#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+#else
+ #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
+ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
+#endif
+ #define __Pyx_DefaultClassType PyType_Type
+#endif
+#ifndef Py_TPFLAGS_CHECKTYPES
+ #define Py_TPFLAGS_CHECKTYPES 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_INDEX
+ #define Py_TPFLAGS_HAVE_INDEX 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
+ #define Py_TPFLAGS_HAVE_NEWBUFFER 0
+#endif
+#ifndef Py_TPFLAGS_HAVE_FINALIZE
+ #define Py_TPFLAGS_HAVE_FINALIZE 0
+#endif
+#ifndef METH_STACKLESS
+ #define METH_STACKLESS 0
+#endif
+#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
+ #ifndef METH_FASTCALL
+ #define METH_FASTCALL 0x80
+ #endif
+ typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
+ typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
+ Py_ssize_t nargs, PyObject *kwnames);
+#else
+ #define __Pyx_PyCFunctionFast _PyCFunctionFast
+ #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
+#endif
+#if CYTHON_FAST_PYCCALL
+#define __Pyx_PyFastCFunction_Check(func)\
+ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
+#else
+#define __Pyx_PyFastCFunction_Check(func) 0
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
+ #define PyObject_Malloc(s) PyMem_Malloc(s)
+ #define PyObject_Free(p) PyMem_Free(p)
+ #define PyObject_Realloc(p) PyMem_Realloc(p)
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
+ #define PyMem_RawMalloc(n) PyMem_Malloc(n)
+ #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
+ #define PyMem_RawFree(p) PyMem_Free(p)
+#endif
+#if CYTHON_COMPILING_IN_PYSTON
+ #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
+ #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
+#else
+ #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
+ #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
+#endif
+#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
+ #define __Pyx_PyThreadState_Current PyThreadState_GET()
+#elif PY_VERSION_HEX >= 0x03060000
+ #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
+#elif PY_VERSION_HEX >= 0x03000000
+ #define __Pyx_PyThreadState_Current PyThreadState_GET()
+#else
+ #define __Pyx_PyThreadState_Current _PyThreadState_Current
+#endif
+#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
+#include "pythread.h"
+#define Py_tss_NEEDS_INIT 0
+typedef int Py_tss_t;
+static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
+ *key = PyThread_create_key();
+ return 0;
+}
+static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
+ Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
+ *key = Py_tss_NEEDS_INIT;
+ return key;
+}
+static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
+ PyObject_Free(key);
+}
+static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
+ return *key != Py_tss_NEEDS_INIT;
+}
+static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
+ PyThread_delete_key(*key);
+ *key = Py_tss_NEEDS_INIT;
+}
+static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
+ return PyThread_set_key_value(*key, value);
+}
+static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
+ return PyThread_get_key_value(*key);
+}
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
+#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
+#else
+#define __Pyx_PyDict_NewPresized(n) PyDict_New()
+#endif
+#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
+#else
+ #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
+ #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
+#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
+#else
+#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
+#endif
+#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
+ #define CYTHON_PEP393_ENABLED 1
+ #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
+ 0 : _PyUnicode_Ready((PyObject *)(op)))
+ #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
+ #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
+ #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
+ #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
+ #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
+ #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
+ #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
+ #if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
+ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
+ #else
+ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
+ #endif
+#else
+ #define CYTHON_PEP393_ENABLED 0
+ #define PyUnicode_1BYTE_KIND 1
+ #define PyUnicode_2BYTE_KIND 2
+ #define PyUnicode_4BYTE_KIND 4
+ #define __Pyx_PyUnicode_READY(op) (0)
+ #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
+ #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
+ #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
+ #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
+ #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
+ #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
+ #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
+ #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
+#endif
+#if CYTHON_COMPILING_IN_PYPY
+ #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
+ #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
+#else
+ #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
+ #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
+ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
+ #define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
+ #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
+#endif
+#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
+ #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
+#endif
+#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
+#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
+#else
+ #define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
+#endif
+#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
+ #define PyObject_ASCII(o) PyObject_Repr(o)
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyBaseString_Type PyUnicode_Type
+ #define PyStringObject PyUnicodeObject
+ #define PyString_Type PyUnicode_Type
+ #define PyString_Check PyUnicode_Check
+ #define PyString_CheckExact PyUnicode_CheckExact
+#ifndef PyObject_Unicode
+ #define PyObject_Unicode PyObject_Str
+#endif
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
+ #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
+#else
+ #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
+ #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
+#endif
+#ifndef PySet_CheckExact
+ #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
+#endif
+#if PY_VERSION_HEX >= 0x030900A4
+ #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
+ #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
+#else
+ #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
+ #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
+#endif
+#if CYTHON_ASSUME_SAFE_MACROS
+ #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
+#else
+ #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyIntObject PyLongObject
+ #define PyInt_Type PyLong_Type
+ #define PyInt_Check(op) PyLong_Check(op)
+ #define PyInt_CheckExact(op) PyLong_CheckExact(op)
+ #define PyInt_FromString PyLong_FromString
+ #define PyInt_FromUnicode PyLong_FromUnicode
+ #define PyInt_FromLong PyLong_FromLong
+ #define PyInt_FromSize_t PyLong_FromSize_t
+ #define PyInt_FromSsize_t PyLong_FromSsize_t
+ #define PyInt_AsLong PyLong_AsLong
+ #define PyInt_AS_LONG PyLong_AS_LONG
+ #define PyInt_AsSsize_t PyLong_AsSsize_t
+ #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
+ #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
+ #define PyNumber_Int PyNumber_Long
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define PyBoolObject PyLongObject
+#endif
+#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
+ #ifndef PyUnicode_InternFromString
+ #define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
+ #endif
+#endif
+#if PY_VERSION_HEX < 0x030200A4
+ typedef long Py_hash_t;
+ #define __Pyx_PyInt_FromHash_t PyInt_FromLong
+ #define __Pyx_PyInt_AsHash_t PyInt_AsLong
+#else
+ #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
+ #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
+#endif
+#if PY_MAJOR_VERSION >= 3
+ #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
+#else
+ #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
+#endif
+#if CYTHON_USE_ASYNC_SLOTS
+ #if PY_VERSION_HEX >= 0x030500B1
+ #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
+ #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
+ #else
+ #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
+ #endif
+#else
+ #define __Pyx_PyType_AsAsync(obj) NULL
+#endif
+#ifndef __Pyx_PyAsyncMethodsStruct
+ typedef struct {
+ unaryfunc am_await;
+ unaryfunc am_aiter;
+ unaryfunc am_anext;
+ } __Pyx_PyAsyncMethodsStruct;
+#endif
+
+#if defined(WIN32) || defined(MS_WINDOWS)
+ #define _USE_MATH_DEFINES
+#endif
+#include <math.h>
+#ifdef NAN
+#define __PYX_NAN() ((float) NAN)
+#else
+static CYTHON_INLINE float __PYX_NAN() {
+ float value;
+ memset(&value, 0xFF, sizeof(value));
+ return value;
+}
+#endif
+#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
+#define __Pyx_truncl trunc
+#else
+#define __Pyx_truncl truncl
+#endif
+
+#define __PYX_MARK_ERR_POS(f_index, lineno) \
+ { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
+#define __PYX_ERR(f_index, lineno, Ln_error) \
+ { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
+
+#ifndef __PYX_EXTERN_C
+ #ifdef __cplusplus
+ #define __PYX_EXTERN_C extern "C"
+ #else
+ #define __PYX_EXTERN_C extern
+ #endif
+#endif
+
+#define __PYX_HAVE__yarl___quoting_c
+#define __PYX_HAVE_API__yarl___quoting_c
+/* Early includes */
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#ifdef _OPENMP
+#include <omp.h>
+#endif /* _OPENMP */
+
+#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
+#define CYTHON_WITHOUT_ASSERTIONS
+#endif
+
+typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
+ const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
+
+#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
+#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
+#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
+#define __PYX_DEFAULT_STRING_ENCODING ""
+#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
+#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#define __Pyx_uchar_cast(c) ((unsigned char)c)
+#define __Pyx_long_cast(x) ((long)x)
+#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
+ (sizeof(type) < sizeof(Py_ssize_t)) ||\
+ (sizeof(type) > sizeof(Py_ssize_t) &&\
+ likely(v < (type)PY_SSIZE_T_MAX ||\
+ v == (type)PY_SSIZE_T_MAX) &&\
+ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
+ v == (type)PY_SSIZE_T_MIN))) ||\
+ (sizeof(type) == sizeof(Py_ssize_t) &&\
+ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
+ v == (type)PY_SSIZE_T_MAX))) )
+static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
+ return (size_t) i < (size_t) limit;
+}
+#if defined (__cplusplus) && __cplusplus >= 201103L
+ #include <cstdlib>
+ #define __Pyx_sst_abs(value) std::abs(value)
+#elif SIZEOF_INT >= SIZEOF_SIZE_T
+ #define __Pyx_sst_abs(value) abs(value)
+#elif SIZEOF_LONG >= SIZEOF_SIZE_T
+ #define __Pyx_sst_abs(value) labs(value)
+#elif defined (_MSC_VER)
+ #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
+#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
+ #define __Pyx_sst_abs(value) llabs(value)
+#elif defined (__GNUC__)
+ #define __Pyx_sst_abs(value) __builtin_llabs(value)
+#else
+ #define __Pyx_sst_abs(value) ((value<0) ? -value : value)
+#endif
+static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
+static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
+#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
+#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
+#define __Pyx_PyBytes_FromString PyBytes_FromString
+#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
+#if PY_MAJOR_VERSION < 3
+ #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
+ #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
+#else
+ #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
+ #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
+#endif
+#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
+#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
+#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
+#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
+#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
+#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
+#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
+static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
+ const Py_UNICODE *u_end = u;
+ while (*u_end++) ;
+ return (size_t)(u_end - u - 1);
+}
+#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
+#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
+#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
+#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
+#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
+static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
+static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
+#define __Pyx_PySequence_Tuple(obj)\
+ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
+#if CYTHON_ASSUME_SAFE_MACROS
+#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
+#else
+#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
+#endif
+#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
+#if PY_MAJOR_VERSION >= 3
+#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
+#else
+#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
+#endif
+#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
+#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+static int __Pyx_sys_getdefaultencoding_not_ascii;
+static int __Pyx_init_sys_getdefaultencoding_params(void) {
+ PyObject* sys;
+ PyObject* default_encoding = NULL;
+ PyObject* ascii_chars_u = NULL;
+ PyObject* ascii_chars_b = NULL;
+ const char* default_encoding_c;
+ sys = PyImport_ImportModule("sys");
+ if (!sys) goto bad;
+ default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
+ Py_DECREF(sys);
+ if (!default_encoding) goto bad;
+ default_encoding_c = PyBytes_AsString(default_encoding);
+ if (!default_encoding_c) goto bad;
+ if (strcmp(default_encoding_c, "ascii") == 0) {
+ __Pyx_sys_getdefaultencoding_not_ascii = 0;
+ } else {
+ char ascii_chars[128];
+ int c;
+ for (c = 0; c < 128; c++) {
+ ascii_chars[c] = c;
+ }
+ __Pyx_sys_getdefaultencoding_not_ascii = 1;
+ ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
+ if (!ascii_chars_u) goto bad;
+ ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
+ if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
+ PyErr_Format(
+ PyExc_ValueError,
+ "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
+ default_encoding_c);
+ goto bad;
+ }
+ Py_DECREF(ascii_chars_u);
+ Py_DECREF(ascii_chars_b);
+ }
+ Py_DECREF(default_encoding);
+ return 0;
+bad:
+ Py_XDECREF(default_encoding);
+ Py_XDECREF(ascii_chars_u);
+ Py_XDECREF(ascii_chars_b);
+ return -1;
+}
+#endif
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
+#else
+#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
+#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+static char* __PYX_DEFAULT_STRING_ENCODING;
+static int __Pyx_init_sys_getdefaultencoding_params(void) {
+ PyObject* sys;
+ PyObject* default_encoding = NULL;
+ char* default_encoding_c;
+ sys = PyImport_ImportModule("sys");
+ if (!sys) goto bad;
+ default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
+ Py_DECREF(sys);
+ if (!default_encoding) goto bad;
+ default_encoding_c = PyBytes_AsString(default_encoding);
+ if (!default_encoding_c) goto bad;
+ __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
+ if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
+ strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
+ Py_DECREF(default_encoding);
+ return 0;
+bad:
+ Py_XDECREF(default_encoding);
+ return -1;
+}
+#endif
+#endif
+
+
+/* Test for GCC > 2.95 */
+#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
+ #define likely(x) __builtin_expect(!!(x), 1)
+ #define unlikely(x) __builtin_expect(!!(x), 0)
+#else /* !__GNUC__ or GCC < 2.95 */
+ #define likely(x) (x)
+ #define unlikely(x) (x)
+#endif /* __GNUC__ */
+static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
+
+static PyObject *__pyx_m = NULL;
+static PyObject *__pyx_d;
+static PyObject *__pyx_b;
+static PyObject *__pyx_cython_runtime = NULL;
+static PyObject *__pyx_empty_tuple;
+static PyObject *__pyx_empty_bytes;
+static PyObject *__pyx_empty_unicode;
+static int __pyx_lineno;
+static int __pyx_clineno = 0;
+static const char * __pyx_cfilenm= __FILE__;
+static const char *__pyx_filename;
+
+
+static const char *__pyx_f[] = {
+ "yarl/_quoting_c.pyx",
+ "stringsource",
+ "type.pxd",
+};
+
+/*--- Type declarations ---*/
+struct __pyx_obj_4yarl_10_quoting_c__Quoter;
+struct __pyx_obj_4yarl_10_quoting_c__Unquoter;
+struct __pyx_t_4yarl_10_quoting_c_Writer;
+
+/* "yarl/_quoting_c.pyx":79
+ * # ----------------- writer ---------------------------
+ *
+ * cdef struct Writer: # <<<<<<<<<<<<<<
+ * char *buf
+ * Py_ssize_t size
+ */
+struct __pyx_t_4yarl_10_quoting_c_Writer {
+ char *buf;
+ Py_ssize_t size;
+ Py_ssize_t pos;
+ int changed;
+};
+
+/* "yarl/_quoting_c.pyx":169
+ *
+ *
+ * cdef class _Quoter: # <<<<<<<<<<<<<<
+ * cdef bint _qs
+ * cdef bint _requote
+ */
+struct __pyx_obj_4yarl_10_quoting_c__Quoter {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_4yarl_10_quoting_c__Quoter *__pyx_vtab;
+ int _qs;
+ int _requote;
+ uint8_t _safe_table[16];
+ uint8_t _protected_table[16];
+};
+
+
+/* "yarl/_quoting_c.pyx":271
+ *
+ *
+ * cdef class _Unquoter: # <<<<<<<<<<<<<<
+ * cdef str _unsafe
+ * cdef bint _qs
+ */
+struct __pyx_obj_4yarl_10_quoting_c__Unquoter {
+ PyObject_HEAD
+ struct __pyx_vtabstruct_4yarl_10_quoting_c__Unquoter *__pyx_vtab;
+ PyObject *_unsafe;
+ int _qs;
+ struct __pyx_obj_4yarl_10_quoting_c__Quoter *_quoter;
+ struct __pyx_obj_4yarl_10_quoting_c__Quoter *_qs_quoter;
+};
+
+
+
+/* "yarl/_quoting_c.pyx":169
+ *
+ *
+ * cdef class _Quoter: # <<<<<<<<<<<<<<
+ * cdef bint _qs
+ * cdef bint _requote
+ */
+
+struct __pyx_vtabstruct_4yarl_10_quoting_c__Quoter {
+ PyObject *(*_do_quote)(struct __pyx_obj_4yarl_10_quoting_c__Quoter *, PyObject *, struct __pyx_t_4yarl_10_quoting_c_Writer *);
+ int (*_write)(struct __pyx_obj_4yarl_10_quoting_c__Quoter *, struct __pyx_t_4yarl_10_quoting_c_Writer *, Py_UCS4);
+};
+static struct __pyx_vtabstruct_4yarl_10_quoting_c__Quoter *__pyx_vtabptr_4yarl_10_quoting_c__Quoter;
+static CYTHON_INLINE int __pyx_f_4yarl_10_quoting_c_7_Quoter__write(struct __pyx_obj_4yarl_10_quoting_c__Quoter *, struct __pyx_t_4yarl_10_quoting_c_Writer *, Py_UCS4);
+
+
+/* "yarl/_quoting_c.pyx":271
+ *
+ *
+ * cdef class _Unquoter: # <<<<<<<<<<<<<<
+ * cdef str _unsafe
+ * cdef bint _qs
+ */
+
+struct __pyx_vtabstruct_4yarl_10_quoting_c__Unquoter {
+ PyObject *(*_do_unquote)(struct __pyx_obj_4yarl_10_quoting_c__Unquoter *, PyObject *);
+};
+static struct __pyx_vtabstruct_4yarl_10_quoting_c__Unquoter *__pyx_vtabptr_4yarl_10_quoting_c__Unquoter;
+
+/* --- Runtime support code (head) --- */
+/* Refnanny.proto */
+#ifndef CYTHON_REFNANNY
+ #define CYTHON_REFNANNY 0
+#endif
+#if CYTHON_REFNANNY
+ typedef struct {
+ void (*INCREF)(void*, PyObject*, int);
+ void (*DECREF)(void*, PyObject*, int);
+ void (*GOTREF)(void*, PyObject*, int);
+ void (*GIVEREF)(void*, PyObject*, int);
+ void* (*SetupContext)(const char*, int, const char*);
+ void (*FinishContext)(void**);
+ } __Pyx_RefNannyAPIStruct;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
+ static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
+ #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
+#ifdef WITH_THREAD
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)\
+ if (acquire_gil) {\
+ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
+ PyGILState_Release(__pyx_gilstate_save);\
+ } else {\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
+ }
+#else
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)\
+ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
+#endif
+ #define __Pyx_RefNannyFinishContext()\
+ __Pyx_RefNanny->FinishContext(&__pyx_refnanny)
+ #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
+ #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
+ #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
+ #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
+ #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
+#else
+ #define __Pyx_RefNannyDeclarations
+ #define __Pyx_RefNannySetupContext(name, acquire_gil)
+ #define __Pyx_RefNannyFinishContext()
+ #define __Pyx_INCREF(r) Py_INCREF(r)
+ #define __Pyx_DECREF(r) Py_DECREF(r)
+ #define __Pyx_GOTREF(r)
+ #define __Pyx_GIVEREF(r)
+ #define __Pyx_XINCREF(r) Py_XINCREF(r)
+ #define __Pyx_XDECREF(r) Py_XDECREF(r)
+ #define __Pyx_XGOTREF(r)
+ #define __Pyx_XGIVEREF(r)
+#endif
+#define __Pyx_XDECREF_SET(r, v) do {\
+ PyObject *tmp = (PyObject *) r;\
+ r = v; __Pyx_XDECREF(tmp);\
+ } while (0)
+#define __Pyx_DECREF_SET(r, v) do {\
+ PyObject *tmp = (PyObject *) r;\
+ r = v; __Pyx_DECREF(tmp);\
+ } while (0)
+#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
+#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
+
+/* PyObjectGetAttrStr.proto */
+#if CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
+#else
+#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
+#endif
+
+/* GetBuiltinName.proto */
+static PyObject *__Pyx_GetBuiltinName(PyObject *name);
+
+/* PyThreadStateGet.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
+#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
+#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
+#else
+#define __Pyx_PyThreadState_declare
+#define __Pyx_PyThreadState_assign
+#define __Pyx_PyErr_Occurred() PyErr_Occurred()
+#endif
+
+/* PyErrFetchRestore.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
+#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
+#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
+#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
+#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
+static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
+static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
+#if CYTHON_COMPILING_IN_CPYTHON
+#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
+#else
+#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
+#endif
+#else
+#define __Pyx_PyErr_Clear() PyErr_Clear()
+#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
+#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
+#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
+#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
+#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
+#endif
+
+/* WriteUnraisableException.proto */
+static void __Pyx_WriteUnraisable(const char *name, int clineno,
+ int lineno, const char *filename,
+ int full_traceback, int nogil);
+
+/* RaiseDoubleKeywords.proto */
+static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
+
+/* ParseKeywords.proto */
+static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
+ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
+ const char* function_name);
+
+/* RaiseArgTupleInvalid.proto */
+static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
+ Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
+
+/* ArgTypeTest.proto */
+#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
+ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
+ __Pyx__ArgTypeTest(obj, type, name, exact))
+static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
+
+/* unicode_iter.proto */
+static CYTHON_INLINE int __Pyx_init_unicode_iteration(
+ PyObject* ustring, Py_ssize_t *length, void** data, int *kind);
+
+/* PyObjectCall.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
+#else
+#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
+#endif
+
+/* RaiseException.proto */
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
+
+/* PyCFunctionFastCall.proto */
+#if CYTHON_FAST_PYCCALL
+static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
+#else
+#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
+#endif
+
+/* PyFunctionFastCall.proto */
+#if CYTHON_FAST_PYCALL
+#define __Pyx_PyFunction_FastCall(func, args, nargs)\
+ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
+#if 1 || PY_VERSION_HEX < 0x030600B1
+static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
+#else
+#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
+#endif
+#define __Pyx_BUILD_ASSERT_EXPR(cond)\
+ (sizeof(char [1 - 2*!(cond)]) - 1)
+#ifndef Py_MEMBER_SIZE
+#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
+#endif
+ static size_t __pyx_pyframe_localsplus_offset = 0;
+ #include "frameobject.h"
+ #define __Pxy_PyFrame_Initialize_Offsets()\
+ ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
+ (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
+ #define __Pyx_PyFrame_GetLocalsplus(frame)\
+ (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
+#endif
+
+/* PyObjectCallMethO.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
+#endif
+
+/* PyObjectCallOneArg.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
+
+/* GetException.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
+static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
+#else
+static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
+#endif
+
+/* SwapException.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
+static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
+#else
+static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
+#endif
+
+/* GetTopmostException.proto */
+#if CYTHON_USE_EXC_INFO_STACK
+static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
+#endif
+
+/* SaveResetException.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
+static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
+#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
+static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
+#else
+#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
+#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
+#endif
+
+/* GetItemIntUnicode.proto */
+#define __Pyx_GetItemInt_Unicode(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+ __Pyx_GetItemInt_Unicode_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
+ (PyErr_SetString(PyExc_IndexError, "string index out of range"), (Py_UCS4)-1))
+static CYTHON_INLINE Py_UCS4 __Pyx_GetItemInt_Unicode_Fast(PyObject* ustring, Py_ssize_t i,
+ int wraparound, int boundscheck);
+
+/* ReRaiseException.proto */
+static CYTHON_INLINE void __Pyx_ReraiseException(void);
+
+/* PyErrExceptionMatches.proto */
+#if CYTHON_FAST_THREAD_STATE
+#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
+static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
+#else
+#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
+#endif
+
+/* GetAttr.proto */
+static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
+
+/* GetAttr3.proto */
+static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
+
+/* PyDictVersioning.proto */
+#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
+#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
+#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
+#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
+ (version_var) = __PYX_GET_DICT_VERSION(dict);\
+ (cache_var) = (value);
+#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
+ static PY_UINT64_T __pyx_dict_version = 0;\
+ static PyObject *__pyx_dict_cached_value = NULL;\
+ if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
+ (VAR) = __pyx_dict_cached_value;\
+ } else {\
+ (VAR) = __pyx_dict_cached_value = (LOOKUP);\
+ __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
+ }\
+}
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
+static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
+#else
+#define __PYX_GET_DICT_VERSION(dict) (0)
+#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
+#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
+#endif
+
+/* GetModuleGlobalName.proto */
+#if CYTHON_USE_DICT_VERSIONS
+#define __Pyx_GetModuleGlobalName(var, name) {\
+ static PY_UINT64_T __pyx_dict_version = 0;\
+ static PyObject *__pyx_dict_cached_value = NULL;\
+ (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
+ (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
+ __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
+}
+#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
+ PY_UINT64_T __pyx_dict_version;\
+ PyObject *__pyx_dict_cached_value;\
+ (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
+}
+static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
+#else
+#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
+#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
+static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
+#endif
+
+/* PyObjectCallNoArg.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func);
+#else
+#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL)
+#endif
+
+/* ListAppend.proto */
+#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
+static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
+ PyListObject* L = (PyListObject*) list;
+ Py_ssize_t len = Py_SIZE(list);
+ if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
+ Py_INCREF(x);
+ PyList_SET_ITEM(list, len, x);
+ __Pyx_SET_SIZE(list, len + 1);
+ return 0;
+ }
+ return PyList_Append(list, x);
+}
+#else
+#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
+#endif
+
+/* PyUnicode_Substring.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Substring(
+ PyObject* text, Py_ssize_t start, Py_ssize_t stop);
+
+/* PyUnicodeContains.proto */
+static CYTHON_INLINE int __Pyx_PyUnicode_ContainsTF(PyObject* substring, PyObject* text, int eq) {
+ int result = PyUnicode_Contains(text, substring);
+ return unlikely(result < 0) ? result : (result == (eq == Py_EQ));
+}
+
+/* PyObjectCall2Args.proto */
+static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
+
+/* SliceObject.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(
+ PyObject* obj, Py_ssize_t cstart, Py_ssize_t cstop,
+ PyObject** py_start, PyObject** py_stop, PyObject** py_slice,
+ int has_cstart, int has_cstop, int wraparound);
+
+/* Import.proto */
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
+
+/* ImportFrom.proto */
+static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
+
+/* GetItemInt.proto */
+#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
+ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
+ __Pyx_GetItemInt_Generic(o, to_py_func(i))))
+#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
+ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
+ int wraparound, int boundscheck);
+#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
+ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
+ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
+ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
+ int wraparound, int boundscheck);
+static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
+ int is_list, int wraparound, int boundscheck);
+
+/* IncludeStringH.proto */
+#include <string.h>
+
+/* HasAttr.proto */
+static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
+
+/* ExtTypeTest.proto */
+static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
+
+/* PyObject_GenericGetAttrNoDict.proto */
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
+#else
+#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
+#endif
+
+/* PyObject_GenericGetAttr.proto */
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
+#else
+#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
+#endif
+
+/* SetVTable.proto */
+static int __Pyx_SetVtable(PyObject *dict, void *vtable);
+
+/* PyObjectGetAttrStrNoError.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);
+
+/* SetupReduce.proto */
+static int __Pyx_setup_reduce(PyObject* type_obj);
+
+/* TypeImport.proto */
+#ifndef __PYX_HAVE_RT_ImportType_proto
+#define __PYX_HAVE_RT_ImportType_proto
+enum __Pyx_ImportType_CheckSize {
+ __Pyx_ImportType_CheckSize_Error = 0,
+ __Pyx_ImportType_CheckSize_Warn = 1,
+ __Pyx_ImportType_CheckSize_Ignore = 2
+};
+static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size);
+#endif
+
+/* CLineInTraceback.proto */
+#ifdef CYTHON_CLINE_IN_TRACEBACK
+#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
+#else
+static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
+#endif
+
+/* CodeObjectCache.proto */
+typedef struct {
+ PyCodeObject* code_object;
+ int code_line;
+} __Pyx_CodeObjectCacheEntry;
+struct __Pyx_CodeObjectCache {
+ int count;
+ int max_count;
+ __Pyx_CodeObjectCacheEntry* entries;
+};
+static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
+static PyCodeObject *__pyx_find_code_object(int code_line);
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
+
+/* AddTraceback.proto */
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+ int py_line, const char *filename);
+
+/* CIntToPy.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
+
+/* CIntToPy.proto */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
+
+/* PyUCS4InUnicode.proto */
+static CYTHON_INLINE int __Pyx_UnicodeContainsUCS4(PyObject* unicode, Py_UCS4 character);
+
+/* UnicodeAsUCS4.proto */
+static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject*);
+
+/* CIntFromPy.proto */
+static CYTHON_INLINE uint8_t __Pyx_PyInt_As_uint8_t(PyObject *);
+
+/* CIntFromPy.proto */
+static CYTHON_INLINE uint64_t __Pyx_PyInt_As_uint64_t(PyObject *);
+
+/* CIntFromPy.proto */
+static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
+
+/* ObjectAsUCS4.proto */
+#define __Pyx_PyObject_AsPy_UCS4(x)\
+ (likely(PyUnicode_Check(x)) ? __Pyx_PyUnicode_AsPy_UCS4(x) : __Pyx__PyObject_AsPy_UCS4(x))
+static Py_UCS4 __Pyx__PyObject_AsPy_UCS4(PyObject*);
+
+/* CIntFromPy.proto */
+static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
+
+/* FastTypeChecks.proto */
+#if CYTHON_COMPILING_IN_CPYTHON
+#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
+static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
+#else
+#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
+#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
+#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
+#endif
+#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
+
+/* CheckBinaryVersion.proto */
+static int __Pyx_check_binary_version(void);
+
+/* InitStrings.proto */
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
+
+static PyObject *__pyx_f_4yarl_10_quoting_c_7_Quoter__do_quote(struct __pyx_obj_4yarl_10_quoting_c__Quoter *__pyx_v_self, PyObject *__pyx_v_val, struct __pyx_t_4yarl_10_quoting_c_Writer *__pyx_v_writer); /* proto*/
+static CYTHON_INLINE int __pyx_f_4yarl_10_quoting_c_7_Quoter__write(struct __pyx_obj_4yarl_10_quoting_c__Quoter *__pyx_v_self, struct __pyx_t_4yarl_10_quoting_c_Writer *__pyx_v_writer, Py_UCS4 __pyx_v_ch); /* proto*/
+static PyObject *__pyx_f_4yarl_10_quoting_c_9_Unquoter__do_unquote(struct __pyx_obj_4yarl_10_quoting_c__Unquoter *__pyx_v_self, PyObject *__pyx_v_val); /* proto*/
+
+/* Module declarations from 'libc.stdint' */
+
+/* Module declarations from 'libc.string' */
+
+/* Module declarations from 'libc.stdio' */
+
+/* Module declarations from '__builtin__' */
+
+/* Module declarations from 'cpython.type' */
+static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
+
+/* Module declarations from 'cpython' */
+
+/* Module declarations from 'cpython.object' */
+
+/* Module declarations from 'cpython.exc' */
+
+/* Module declarations from 'cpython.mem' */
+
+/* Module declarations from 'cpython.unicode' */
+
+/* Module declarations from 'yarl._quoting_c' */
+static PyTypeObject *__pyx_ptype_4yarl_10_quoting_c__Quoter = 0;
+static PyTypeObject *__pyx_ptype_4yarl_10_quoting_c__Unquoter = 0;
+static PyObject *__pyx_v_4yarl_10_quoting_c_GEN_DELIMS = 0;
+static PyObject *__pyx_v_4yarl_10_quoting_c_SUB_DELIMS_WITHOUT_QS = 0;
+static PyObject *__pyx_v_4yarl_10_quoting_c_SUB_DELIMS = 0;
+static PyObject *__pyx_v_4yarl_10_quoting_c_RESERVED = 0;
+static PyObject *__pyx_v_4yarl_10_quoting_c_UNRESERVED = 0;
+static PyObject *__pyx_v_4yarl_10_quoting_c_ALLOWED = 0;
+static PyObject *__pyx_v_4yarl_10_quoting_c_QS = 0;
+static char __pyx_v_4yarl_10_quoting_c_BUFFER[0x2000];
+static uint8_t __pyx_v_4yarl_10_quoting_c_ALLOWED_TABLE[16];
+static uint8_t __pyx_v_4yarl_10_quoting_c_ALLOWED_NOTQS_TABLE[16];
+static CYTHON_INLINE Py_UCS4 __pyx_f_4yarl_10_quoting_c__to_hex(uint8_t); /*proto*/
+static CYTHON_INLINE int __pyx_f_4yarl_10_quoting_c__from_hex(Py_UCS4); /*proto*/
+static CYTHON_INLINE int __pyx_f_4yarl_10_quoting_c__is_lower_hex(Py_UCS4); /*proto*/
+static CYTHON_INLINE Py_UCS4 __pyx_f_4yarl_10_quoting_c__restore_ch(Py_UCS4, Py_UCS4); /*proto*/
+static CYTHON_INLINE int __pyx_f_4yarl_10_quoting_c_bit_at(uint8_t *, uint64_t); /*proto*/
+static CYTHON_INLINE void __pyx_f_4yarl_10_quoting_c_set_bit(uint8_t *, uint64_t); /*proto*/
+static CYTHON_INLINE void __pyx_f_4yarl_10_quoting_c__init_writer(struct __pyx_t_4yarl_10_quoting_c_Writer *); /*proto*/
+static CYTHON_INLINE void __pyx_f_4yarl_10_quoting_c__release_writer(struct __pyx_t_4yarl_10_quoting_c_Writer *); /*proto*/
+static CYTHON_INLINE int __pyx_f_4yarl_10_quoting_c__write_char(struct __pyx_t_4yarl_10_quoting_c_Writer *, Py_UCS4, int); /*proto*/
+static CYTHON_INLINE int __pyx_f_4yarl_10_quoting_c__write_pct(struct __pyx_t_4yarl_10_quoting_c_Writer *, uint8_t, int); /*proto*/
+static CYTHON_INLINE int __pyx_f_4yarl_10_quoting_c__write_utf8(struct __pyx_t_4yarl_10_quoting_c_Writer *, Py_UCS4); /*proto*/
+static PyObject *__pyx_f_4yarl_10_quoting_c___pyx_unpickle__Quoter__set_state(struct __pyx_obj_4yarl_10_quoting_c__Quoter *, PyObject *); /*proto*/
+static PyObject *__pyx_f_4yarl_10_quoting_c___pyx_unpickle__Unquoter__set_state(struct __pyx_obj_4yarl_10_quoting_c__Unquoter *, PyObject *); /*proto*/
+static int __Pyx_carray_from_py_uint8_t(PyObject *, uint8_t *, Py_ssize_t); /*proto*/
+#define __Pyx_MODULE_NAME "yarl._quoting_c"
+extern int __pyx_module_is_main_yarl___quoting_c;
+int __pyx_module_is_main_yarl___quoting_c = 0;
+
+/* Implementation of 'yarl._quoting_c' */
+static PyObject *__pyx_builtin_range;
+static PyObject *__pyx_builtin_chr;
+static PyObject *__pyx_builtin_ValueError;
+static PyObject *__pyx_builtin_TypeError;
+static PyObject *__pyx_builtin_UnicodeDecodeError;
+static PyObject *__pyx_builtin_hex;
+static PyObject *__pyx_builtin_OverflowError;
+static PyObject *__pyx_builtin_enumerate;
+static PyObject *__pyx_builtin_IndexError;
+static const char __pyx_k_[] = "";
+static const char __pyx_k_i[] = "i";
+static const char __pyx_k__4[] = "+=&;";
+static const char __pyx_k__5[] = "+";
+static const char __pyx_k__6[] = " ";
+static const char __pyx_k__7[] = "%";
+static const char __pyx_k__9[] = ":/?#[]@";
+static const char __pyx_k_qs[] = "qs";
+static const char __pyx_k__10[] = "!$'()*,";
+static const char __pyx_k__11[] = "+?=;";
+static const char __pyx_k__12[] = "-._~";
+static const char __pyx_k__13[] = "+&=;";
+static const char __pyx_k_chr[] = "chr";
+static const char __pyx_k_hex[] = "hex";
+static const char __pyx_k_new[] = "__new__";
+static const char __pyx_k_val[] = "val";
+static const char __pyx_k_dict[] = "__dict__";
+static const char __pyx_k_main[] = "__main__";
+static const char __pyx_k_name[] = "__name__";
+static const char __pyx_k_safe[] = "safe";
+static const char __pyx_k_test[] = "__test__";
+static const char __pyx_k_range[] = "range";
+static const char __pyx_k_upper[] = "upper";
+static const char __pyx_k_Quoter[] = "_Quoter";
+static const char __pyx_k_digits[] = "digits";
+static const char __pyx_k_import[] = "__import__";
+static const char __pyx_k_pickle[] = "pickle";
+static const char __pyx_k_reduce[] = "__reduce__";
+static const char __pyx_k_string[] = "string";
+static const char __pyx_k_unsafe[] = "unsafe";
+static const char __pyx_k_update[] = "update";
+static const char __pyx_k_requote[] = "requote";
+static const char __pyx_k_Unquoter[] = "_Unquoter";
+static const char __pyx_k_getstate[] = "__getstate__";
+static const char __pyx_k_pyx_type[] = "__pyx_type";
+static const char __pyx_k_setstate[] = "__setstate__";
+static const char __pyx_k_TypeError[] = "TypeError";
+static const char __pyx_k_enumerate[] = "enumerate";
+static const char __pyx_k_protected[] = "protected";
+static const char __pyx_k_pyx_state[] = "__pyx_state";
+static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
+static const char __pyx_k_IndexError[] = "IndexError";
+static const char __pyx_k_ValueError[] = "ValueError";
+static const char __pyx_k_pyx_result[] = "__pyx_result";
+static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
+static const char __pyx_k_PickleError[] = "PickleError";
+static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
+static const char __pyx_k_stringsource[] = "stringsource";
+static const char __pyx_k_OverflowError[] = "OverflowError";
+static const char __pyx_k_ascii_letters[] = "ascii_letters";
+static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
+static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
+static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
+static const char __pyx_k_yarl__quoting_c[] = "yarl._quoting_c";
+static const char __pyx_k_UnicodeDecodeError[] = "UnicodeDecodeError";
+static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
+static const char __pyx_k_pyx_unpickle__Quoter[] = "__pyx_unpickle__Quoter";
+static const char __pyx_k_Argument_should_be_str[] = "Argument should be str";
+static const char __pyx_k_pyx_unpickle__Unquoter[] = "__pyx_unpickle__Unquoter";
+static const char __pyx_k_Incompatible_checksums_s_vs_0x27[] = "Incompatible checksums (%s vs 0x276577d = (_qs, _qs_quoter, _quoter, _unsafe))";
+static const char __pyx_k_Incompatible_checksums_s_vs_0xe9[] = "Incompatible checksums (%s vs 0xe91bd35 = (_protected_table, _qs, _requote, _safe_table))";
+static const char __pyx_k_Only_safe_symbols_with_ORD_128_a[] = "Only safe symbols with ORD < 128 are allowed";
+static PyObject *__pyx_kp_u_;
+static PyObject *__pyx_kp_u_Argument_should_be_str;
+static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0x27;
+static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xe9;
+static PyObject *__pyx_n_s_IndexError;
+static PyObject *__pyx_kp_u_Only_safe_symbols_with_ORD_128_a;
+static PyObject *__pyx_n_s_OverflowError;
+static PyObject *__pyx_n_s_PickleError;
+static PyObject *__pyx_n_s_Quoter;
+static PyObject *__pyx_n_s_TypeError;
+static PyObject *__pyx_n_s_UnicodeDecodeError;
+static PyObject *__pyx_n_s_Unquoter;
+static PyObject *__pyx_n_s_ValueError;
+static PyObject *__pyx_kp_u__10;
+static PyObject *__pyx_kp_u__11;
+static PyObject *__pyx_kp_u__12;
+static PyObject *__pyx_kp_u__13;
+static PyObject *__pyx_kp_u__4;
+static PyObject *__pyx_kp_u__5;
+static PyObject *__pyx_kp_u__6;
+static PyObject *__pyx_kp_u__7;
+static PyObject *__pyx_kp_u__9;
+static PyObject *__pyx_n_s_ascii_letters;
+static PyObject *__pyx_n_s_chr;
+static PyObject *__pyx_n_s_cline_in_traceback;
+static PyObject *__pyx_n_s_dict;
+static PyObject *__pyx_n_s_digits;
+static PyObject *__pyx_n_s_enumerate;
+static PyObject *__pyx_n_s_getstate;
+static PyObject *__pyx_n_s_hex;
+static PyObject *__pyx_n_s_i;
+static PyObject *__pyx_n_s_import;
+static PyObject *__pyx_n_s_main;
+static PyObject *__pyx_n_s_name;
+static PyObject *__pyx_n_s_new;
+static PyObject *__pyx_n_s_pickle;
+static PyObject *__pyx_n_s_protected;
+static PyObject *__pyx_n_s_pyx_PickleError;
+static PyObject *__pyx_n_s_pyx_checksum;
+static PyObject *__pyx_n_s_pyx_result;
+static PyObject *__pyx_n_s_pyx_state;
+static PyObject *__pyx_n_s_pyx_type;
+static PyObject *__pyx_n_s_pyx_unpickle__Quoter;
+static PyObject *__pyx_n_s_pyx_unpickle__Unquoter;
+static PyObject *__pyx_n_s_pyx_vtable;
+static PyObject *__pyx_n_s_qs;
+static PyObject *__pyx_n_s_range;
+static PyObject *__pyx_n_s_reduce;
+static PyObject *__pyx_n_s_reduce_cython;
+static PyObject *__pyx_n_s_reduce_ex;
+static PyObject *__pyx_n_s_requote;
+static PyObject *__pyx_n_s_safe;
+static PyObject *__pyx_n_s_setstate;
+static PyObject *__pyx_n_s_setstate_cython;
+static PyObject *__pyx_n_s_string;
+static PyObject *__pyx_kp_s_stringsource;
+static PyObject *__pyx_n_s_test;
+static PyObject *__pyx_n_s_unsafe;
+static PyObject *__pyx_n_s_update;
+static PyObject *__pyx_n_s_upper;
+static PyObject *__pyx_n_s_val;
+static PyObject *__pyx_n_s_yarl__quoting_c;
+static int __pyx_pf_4yarl_10_quoting_c_7_Quoter___init__(struct __pyx_obj_4yarl_10_quoting_c__Quoter *__pyx_v_self, PyObject *__pyx_v_safe, PyObject *__pyx_v_protected, int __pyx_v_qs, int __pyx_v_requote); /* proto */
+static PyObject *__pyx_pf_4yarl_10_quoting_c_7_Quoter_2__call__(struct __pyx_obj_4yarl_10_quoting_c__Quoter *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
+static PyObject *__pyx_pf_4yarl_10_quoting_c_7_Quoter_4__reduce_cython__(struct __pyx_obj_4yarl_10_quoting_c__Quoter *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_4yarl_10_quoting_c_7_Quoter_6__setstate_cython__(struct __pyx_obj_4yarl_10_quoting_c__Quoter *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
+static int __pyx_pf_4yarl_10_quoting_c_9_Unquoter___init__(struct __pyx_obj_4yarl_10_quoting_c__Unquoter *__pyx_v_self, PyObject *__pyx_v_unsafe, PyObject *__pyx_v_qs); /* proto */
+static PyObject *__pyx_pf_4yarl_10_quoting_c_9_Unquoter_2__call__(struct __pyx_obj_4yarl_10_quoting_c__Unquoter *__pyx_v_self, PyObject *__pyx_v_val); /* proto */
+static PyObject *__pyx_pf_4yarl_10_quoting_c_9_Unquoter_4__reduce_cython__(struct __pyx_obj_4yarl_10_quoting_c__Unquoter *__pyx_v_self); /* proto */
+static PyObject *__pyx_pf_4yarl_10_quoting_c_9_Unquoter_6__setstate_cython__(struct __pyx_obj_4yarl_10_quoting_c__Unquoter *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
+static PyObject *__pyx_pf_4yarl_10_quoting_c___pyx_unpickle__Quoter(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
+static PyObject *__pyx_pf_4yarl_10_quoting_c_2__pyx_unpickle__Unquoter(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
+static PyObject *__pyx_tp_new_4yarl_10_quoting_c__Quoter(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
+static PyObject *__pyx_tp_new_4yarl_10_quoting_c__Unquoter(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
+static PyObject *__pyx_int_2;
+static PyObject *__pyx_int_41310077;
+static PyObject *__pyx_int_244432181;
+static PyObject *__pyx_slice__8;
+static PyObject *__pyx_tuple__2;
+static PyObject *__pyx_tuple__3;
+static PyObject *__pyx_tuple__14;
+static PyObject *__pyx_tuple__16;
+static PyObject *__pyx_codeobj__15;
+static PyObject *__pyx_codeobj__17;
+/* Late includes */
+
+/* "yarl/_quoting_c.pyx":23
+ * cdef char BUFFER[BUF_SIZE]
+ *
+ * cdef inline Py_UCS4 _to_hex(uint8_t v): # <<<<<<<<<<<<<<
+ * if v < 10:
+ * return <Py_UCS4>(v+0x30) # ord('0') == 0x30
+ */
+
+static CYTHON_INLINE Py_UCS4 __pyx_f_4yarl_10_quoting_c__to_hex(uint8_t __pyx_v_v) {
+ Py_UCS4 __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ __Pyx_RefNannySetupContext("_to_hex", 0);
+
+ /* "yarl/_quoting_c.pyx":24
+ *
+ * cdef inline Py_UCS4 _to_hex(uint8_t v):
+ * if v < 10: # <<<<<<<<<<<<<<
+ * return <Py_UCS4>(v+0x30) # ord('0') == 0x30
+ * else:
+ */
+ __pyx_t_1 = ((__pyx_v_v < 10) != 0);
+ if (__pyx_t_1) {
+
+ /* "yarl/_quoting_c.pyx":25
+ * cdef inline Py_UCS4 _to_hex(uint8_t v):
+ * if v < 10:
+ * return <Py_UCS4>(v+0x30) # ord('0') == 0x30 # <<<<<<<<<<<<<<
+ * else:
+ * return <Py_UCS4>(v+0x41-10) # ord('A') == 0x41
+ */
+ __pyx_r = ((Py_UCS4)(__pyx_v_v + 0x30));
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":24
+ *
+ * cdef inline Py_UCS4 _to_hex(uint8_t v):
+ * if v < 10: # <<<<<<<<<<<<<<
+ * return <Py_UCS4>(v+0x30) # ord('0') == 0x30
+ * else:
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":27
+ * return <Py_UCS4>(v+0x30) # ord('0') == 0x30
+ * else:
+ * return <Py_UCS4>(v+0x41-10) # ord('A') == 0x41 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ /*else*/ {
+ __pyx_r = ((Py_UCS4)((__pyx_v_v + 0x41) - 10));
+ goto __pyx_L0;
+ }
+
+ /* "yarl/_quoting_c.pyx":23
+ * cdef char BUFFER[BUF_SIZE]
+ *
+ * cdef inline Py_UCS4 _to_hex(uint8_t v): # <<<<<<<<<<<<<<
+ * if v < 10:
+ * return <Py_UCS4>(v+0x30) # ord('0') == 0x30
+ */
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "yarl/_quoting_c.pyx":30
+ *
+ *
+ * cdef inline int _from_hex(Py_UCS4 v): # <<<<<<<<<<<<<<
+ * if '0' <= v <= '9':
+ * return <int>(v) - 0x30 # ord('0') == 0x30
+ */
+
+static CYTHON_INLINE int __pyx_f_4yarl_10_quoting_c__from_hex(Py_UCS4 __pyx_v_v) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_t_2;
+ __Pyx_RefNannySetupContext("_from_hex", 0);
+
+ /* "yarl/_quoting_c.pyx":31
+ *
+ * cdef inline int _from_hex(Py_UCS4 v):
+ * if '0' <= v <= '9': # <<<<<<<<<<<<<<
+ * return <int>(v) - 0x30 # ord('0') == 0x30
+ * elif 'A' <= v <= 'F':
+ */
+ __pyx_t_1 = (48 <= __pyx_v_v);
+ if (__pyx_t_1) {
+ __pyx_t_1 = (__pyx_v_v <= 57);
+ }
+ __pyx_t_2 = (__pyx_t_1 != 0);
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":32
+ * cdef inline int _from_hex(Py_UCS4 v):
+ * if '0' <= v <= '9':
+ * return <int>(v) - 0x30 # ord('0') == 0x30 # <<<<<<<<<<<<<<
+ * elif 'A' <= v <= 'F':
+ * return <int>(v) - 0x41 + 10 # ord('A') == 0x41
+ */
+ __pyx_r = (((int)__pyx_v_v) - 0x30);
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":31
+ *
+ * cdef inline int _from_hex(Py_UCS4 v):
+ * if '0' <= v <= '9': # <<<<<<<<<<<<<<
+ * return <int>(v) - 0x30 # ord('0') == 0x30
+ * elif 'A' <= v <= 'F':
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":33
+ * if '0' <= v <= '9':
+ * return <int>(v) - 0x30 # ord('0') == 0x30
+ * elif 'A' <= v <= 'F': # <<<<<<<<<<<<<<
+ * return <int>(v) - 0x41 + 10 # ord('A') == 0x41
+ * elif 'a' <= v <= 'f':
+ */
+ __pyx_t_2 = (65 <= __pyx_v_v);
+ if (__pyx_t_2) {
+ __pyx_t_2 = (__pyx_v_v <= 70);
+ }
+ __pyx_t_1 = (__pyx_t_2 != 0);
+ if (__pyx_t_1) {
+
+ /* "yarl/_quoting_c.pyx":34
+ * return <int>(v) - 0x30 # ord('0') == 0x30
+ * elif 'A' <= v <= 'F':
+ * return <int>(v) - 0x41 + 10 # ord('A') == 0x41 # <<<<<<<<<<<<<<
+ * elif 'a' <= v <= 'f':
+ * return <int>(v) - 0x61 + 10 # ord('a') == 0x61
+ */
+ __pyx_r = ((((int)__pyx_v_v) - 0x41) + 10);
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":33
+ * if '0' <= v <= '9':
+ * return <int>(v) - 0x30 # ord('0') == 0x30
+ * elif 'A' <= v <= 'F': # <<<<<<<<<<<<<<
+ * return <int>(v) - 0x41 + 10 # ord('A') == 0x41
+ * elif 'a' <= v <= 'f':
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":35
+ * elif 'A' <= v <= 'F':
+ * return <int>(v) - 0x41 + 10 # ord('A') == 0x41
+ * elif 'a' <= v <= 'f': # <<<<<<<<<<<<<<
+ * return <int>(v) - 0x61 + 10 # ord('a') == 0x61
+ * else:
+ */
+ __pyx_t_1 = (97 <= __pyx_v_v);
+ if (__pyx_t_1) {
+ __pyx_t_1 = (__pyx_v_v <= 0x66);
+ }
+ __pyx_t_2 = (__pyx_t_1 != 0);
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":36
+ * return <int>(v) - 0x41 + 10 # ord('A') == 0x41
+ * elif 'a' <= v <= 'f':
+ * return <int>(v) - 0x61 + 10 # ord('a') == 0x61 # <<<<<<<<<<<<<<
+ * else:
+ * return -1
+ */
+ __pyx_r = ((((int)__pyx_v_v) - 0x61) + 10);
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":35
+ * elif 'A' <= v <= 'F':
+ * return <int>(v) - 0x41 + 10 # ord('A') == 0x41
+ * elif 'a' <= v <= 'f': # <<<<<<<<<<<<<<
+ * return <int>(v) - 0x61 + 10 # ord('a') == 0x61
+ * else:
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":38
+ * return <int>(v) - 0x61 + 10 # ord('a') == 0x61
+ * else:
+ * return -1 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ /*else*/ {
+ __pyx_r = -1;
+ goto __pyx_L0;
+ }
+
+ /* "yarl/_quoting_c.pyx":30
+ *
+ *
+ * cdef inline int _from_hex(Py_UCS4 v): # <<<<<<<<<<<<<<
+ * if '0' <= v <= '9':
+ * return <int>(v) - 0x30 # ord('0') == 0x30
+ */
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "yarl/_quoting_c.pyx":41
+ *
+ *
+ * cdef inline int _is_lower_hex(Py_UCS4 v): # <<<<<<<<<<<<<<
+ * return 'a' <= v <= 'f'
+ *
+ */
+
+static CYTHON_INLINE int __pyx_f_4yarl_10_quoting_c__is_lower_hex(Py_UCS4 __pyx_v_v) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ __Pyx_RefNannySetupContext("_is_lower_hex", 0);
+
+ /* "yarl/_quoting_c.pyx":42
+ *
+ * cdef inline int _is_lower_hex(Py_UCS4 v):
+ * return 'a' <= v <= 'f' # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_t_1 = (97 <= __pyx_v_v);
+ if (__pyx_t_1) {
+ __pyx_t_1 = (__pyx_v_v <= 0x66);
+ }
+ __pyx_r = __pyx_t_1;
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":41
+ *
+ *
+ * cdef inline int _is_lower_hex(Py_UCS4 v): # <<<<<<<<<<<<<<
+ * return 'a' <= v <= 'f'
+ *
+ */
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "yarl/_quoting_c.pyx":45
+ *
+ *
+ * cdef inline Py_UCS4 _restore_ch(Py_UCS4 d1, Py_UCS4 d2): # <<<<<<<<<<<<<<
+ * cdef int digit1 = _from_hex(d1)
+ * if digit1 < 0:
+ */
+
+static CYTHON_INLINE Py_UCS4 __pyx_f_4yarl_10_quoting_c__restore_ch(Py_UCS4 __pyx_v_d1, Py_UCS4 __pyx_v_d2) {
+ int __pyx_v_digit1;
+ int __pyx_v_digit2;
+ Py_UCS4 __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ __Pyx_RefNannySetupContext("_restore_ch", 0);
+
+ /* "yarl/_quoting_c.pyx":46
+ *
+ * cdef inline Py_UCS4 _restore_ch(Py_UCS4 d1, Py_UCS4 d2):
+ * cdef int digit1 = _from_hex(d1) # <<<<<<<<<<<<<<
+ * if digit1 < 0:
+ * return <Py_UCS4>-1
+ */
+ __pyx_v_digit1 = __pyx_f_4yarl_10_quoting_c__from_hex(__pyx_v_d1);
+
+ /* "yarl/_quoting_c.pyx":47
+ * cdef inline Py_UCS4 _restore_ch(Py_UCS4 d1, Py_UCS4 d2):
+ * cdef int digit1 = _from_hex(d1)
+ * if digit1 < 0: # <<<<<<<<<<<<<<
+ * return <Py_UCS4>-1
+ * cdef int digit2 = _from_hex(d2)
+ */
+ __pyx_t_1 = ((__pyx_v_digit1 < 0) != 0);
+ if (__pyx_t_1) {
+
+ /* "yarl/_quoting_c.pyx":48
+ * cdef int digit1 = _from_hex(d1)
+ * if digit1 < 0:
+ * return <Py_UCS4>-1 # <<<<<<<<<<<<<<
+ * cdef int digit2 = _from_hex(d2)
+ * if digit2 < 0:
+ */
+ __pyx_r = ((Py_UCS4)-1L);
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":47
+ * cdef inline Py_UCS4 _restore_ch(Py_UCS4 d1, Py_UCS4 d2):
+ * cdef int digit1 = _from_hex(d1)
+ * if digit1 < 0: # <<<<<<<<<<<<<<
+ * return <Py_UCS4>-1
+ * cdef int digit2 = _from_hex(d2)
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":49
+ * if digit1 < 0:
+ * return <Py_UCS4>-1
+ * cdef int digit2 = _from_hex(d2) # <<<<<<<<<<<<<<
+ * if digit2 < 0:
+ * return <Py_UCS4>-1
+ */
+ __pyx_v_digit2 = __pyx_f_4yarl_10_quoting_c__from_hex(__pyx_v_d2);
+
+ /* "yarl/_quoting_c.pyx":50
+ * return <Py_UCS4>-1
+ * cdef int digit2 = _from_hex(d2)
+ * if digit2 < 0: # <<<<<<<<<<<<<<
+ * return <Py_UCS4>-1
+ * return <Py_UCS4>(digit1 << 4 | digit2)
+ */
+ __pyx_t_1 = ((__pyx_v_digit2 < 0) != 0);
+ if (__pyx_t_1) {
+
+ /* "yarl/_quoting_c.pyx":51
+ * cdef int digit2 = _from_hex(d2)
+ * if digit2 < 0:
+ * return <Py_UCS4>-1 # <<<<<<<<<<<<<<
+ * return <Py_UCS4>(digit1 << 4 | digit2)
+ *
+ */
+ __pyx_r = ((Py_UCS4)-1L);
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":50
+ * return <Py_UCS4>-1
+ * cdef int digit2 = _from_hex(d2)
+ * if digit2 < 0: # <<<<<<<<<<<<<<
+ * return <Py_UCS4>-1
+ * return <Py_UCS4>(digit1 << 4 | digit2)
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":52
+ * if digit2 < 0:
+ * return <Py_UCS4>-1
+ * return <Py_UCS4>(digit1 << 4 | digit2) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = ((Py_UCS4)((__pyx_v_digit1 << 4) | __pyx_v_digit2));
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":45
+ *
+ *
+ * cdef inline Py_UCS4 _restore_ch(Py_UCS4 d1, Py_UCS4 d2): # <<<<<<<<<<<<<<
+ * cdef int digit1 = _from_hex(d1)
+ * if digit1 < 0:
+ */
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "yarl/_quoting_c.pyx":59
+ *
+ *
+ * cdef inline bint bit_at(uint8_t array[], uint64_t ch): # <<<<<<<<<<<<<<
+ * return array[ch >> 3] & (1 << (ch & 7))
+ *
+ */
+
+static CYTHON_INLINE int __pyx_f_4yarl_10_quoting_c_bit_at(uint8_t *__pyx_v_array, uint64_t __pyx_v_ch) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("bit_at", 0);
+
+ /* "yarl/_quoting_c.pyx":60
+ *
+ * cdef inline bint bit_at(uint8_t array[], uint64_t ch):
+ * return array[ch >> 3] & (1 << (ch & 7)) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = ((__pyx_v_array[(__pyx_v_ch >> 3)]) & (1 << (__pyx_v_ch & 7)));
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":59
+ *
+ *
+ * cdef inline bint bit_at(uint8_t array[], uint64_t ch): # <<<<<<<<<<<<<<
+ * return array[ch >> 3] & (1 << (ch & 7))
+ *
+ */
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "yarl/_quoting_c.pyx":63
+ *
+ *
+ * cdef inline void set_bit(uint8_t array[], uint64_t ch): # <<<<<<<<<<<<<<
+ * array[ch >> 3] |= (1 << (ch & 7))
+ *
+ */
+
+static CYTHON_INLINE void __pyx_f_4yarl_10_quoting_c_set_bit(uint8_t *__pyx_v_array, uint64_t __pyx_v_ch) {
+ __Pyx_RefNannyDeclarations
+ uint64_t __pyx_t_1;
+ __Pyx_RefNannySetupContext("set_bit", 0);
+
+ /* "yarl/_quoting_c.pyx":64
+ *
+ * cdef inline void set_bit(uint8_t array[], uint64_t ch):
+ * array[ch >> 3] |= (1 << (ch & 7)) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_t_1 = (__pyx_v_ch >> 3);
+ (__pyx_v_array[__pyx_t_1]) = ((__pyx_v_array[__pyx_t_1]) | (1 << (__pyx_v_ch & 7)));
+
+ /* "yarl/_quoting_c.pyx":63
+ *
+ *
+ * cdef inline void set_bit(uint8_t array[], uint64_t ch): # <<<<<<<<<<<<<<
+ * array[ch >> 3] |= (1 << (ch & 7))
+ *
+ */
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+}
+
+/* "yarl/_quoting_c.pyx":86
+ *
+ *
+ * cdef inline void _init_writer(Writer* writer): # <<<<<<<<<<<<<<
+ * writer.buf = &BUFFER[0]
+ * writer.size = BUF_SIZE
+ */
+
+static CYTHON_INLINE void __pyx_f_4yarl_10_quoting_c__init_writer(struct __pyx_t_4yarl_10_quoting_c_Writer *__pyx_v_writer) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("_init_writer", 0);
+
+ /* "yarl/_quoting_c.pyx":87
+ *
+ * cdef inline void _init_writer(Writer* writer):
+ * writer.buf = &BUFFER[0] # <<<<<<<<<<<<<<
+ * writer.size = BUF_SIZE
+ * writer.pos = 0
+ */
+ __pyx_v_writer->buf = (&(__pyx_v_4yarl_10_quoting_c_BUFFER[0]));
+
+ /* "yarl/_quoting_c.pyx":88
+ * cdef inline void _init_writer(Writer* writer):
+ * writer.buf = &BUFFER[0]
+ * writer.size = BUF_SIZE # <<<<<<<<<<<<<<
+ * writer.pos = 0
+ * writer.changed = 0
+ */
+ __pyx_v_writer->size = 0x2000;
+
+ /* "yarl/_quoting_c.pyx":89
+ * writer.buf = &BUFFER[0]
+ * writer.size = BUF_SIZE
+ * writer.pos = 0 # <<<<<<<<<<<<<<
+ * writer.changed = 0
+ *
+ */
+ __pyx_v_writer->pos = 0;
+
+ /* "yarl/_quoting_c.pyx":90
+ * writer.size = BUF_SIZE
+ * writer.pos = 0
+ * writer.changed = 0 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_v_writer->changed = 0;
+
+ /* "yarl/_quoting_c.pyx":86
+ *
+ *
+ * cdef inline void _init_writer(Writer* writer): # <<<<<<<<<<<<<<
+ * writer.buf = &BUFFER[0]
+ * writer.size = BUF_SIZE
+ */
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+}
+
+/* "yarl/_quoting_c.pyx":93
+ *
+ *
+ * cdef inline void _release_writer(Writer* writer): # <<<<<<<<<<<<<<
+ * if writer.buf != BUFFER:
+ * PyMem_Free(writer.buf)
+ */
+
+static CYTHON_INLINE void __pyx_f_4yarl_10_quoting_c__release_writer(struct __pyx_t_4yarl_10_quoting_c_Writer *__pyx_v_writer) {
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ __Pyx_RefNannySetupContext("_release_writer", 0);
+
+ /* "yarl/_quoting_c.pyx":94
+ *
+ * cdef inline void _release_writer(Writer* writer):
+ * if writer.buf != BUFFER: # <<<<<<<<<<<<<<
+ * PyMem_Free(writer.buf)
+ *
+ */
+ __pyx_t_1 = ((__pyx_v_writer->buf != __pyx_v_4yarl_10_quoting_c_BUFFER) != 0);
+ if (__pyx_t_1) {
+
+ /* "yarl/_quoting_c.pyx":95
+ * cdef inline void _release_writer(Writer* writer):
+ * if writer.buf != BUFFER:
+ * PyMem_Free(writer.buf) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ PyMem_Free(__pyx_v_writer->buf);
+
+ /* "yarl/_quoting_c.pyx":94
+ *
+ * cdef inline void _release_writer(Writer* writer):
+ * if writer.buf != BUFFER: # <<<<<<<<<<<<<<
+ * PyMem_Free(writer.buf)
+ *
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":93
+ *
+ *
+ * cdef inline void _release_writer(Writer* writer): # <<<<<<<<<<<<<<
+ * if writer.buf != BUFFER:
+ * PyMem_Free(writer.buf)
+ */
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+}
+
+/* "yarl/_quoting_c.pyx":98
+ *
+ *
+ * cdef inline int _write_char(Writer* writer, Py_UCS4 ch, bint changed): # <<<<<<<<<<<<<<
+ * cdef char * buf
+ * cdef Py_ssize_t size
+ */
+
+static CYTHON_INLINE int __pyx_f_4yarl_10_quoting_c__write_char(struct __pyx_t_4yarl_10_quoting_c_Writer *__pyx_v_writer, Py_UCS4 __pyx_v_ch, int __pyx_v_changed) {
+ char *__pyx_v_buf;
+ Py_ssize_t __pyx_v_size;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_write_char", 0);
+
+ /* "yarl/_quoting_c.pyx":102
+ * cdef Py_ssize_t size
+ *
+ * if writer.pos == writer.size: # <<<<<<<<<<<<<<
+ * # reallocate
+ * size = writer.size + BUF_SIZE
+ */
+ __pyx_t_1 = ((__pyx_v_writer->pos == __pyx_v_writer->size) != 0);
+ if (__pyx_t_1) {
+
+ /* "yarl/_quoting_c.pyx":104
+ * if writer.pos == writer.size:
+ * # reallocate
+ * size = writer.size + BUF_SIZE # <<<<<<<<<<<<<<
+ * if writer.buf == BUFFER:
+ * buf = <char*>PyMem_Malloc(size)
+ */
+ __pyx_v_size = (__pyx_v_writer->size + 0x2000);
+
+ /* "yarl/_quoting_c.pyx":105
+ * # reallocate
+ * size = writer.size + BUF_SIZE
+ * if writer.buf == BUFFER: # <<<<<<<<<<<<<<
+ * buf = <char*>PyMem_Malloc(size)
+ * if buf == NULL:
+ */
+ __pyx_t_1 = ((__pyx_v_writer->buf == __pyx_v_4yarl_10_quoting_c_BUFFER) != 0);
+ if (__pyx_t_1) {
+
+ /* "yarl/_quoting_c.pyx":106
+ * size = writer.size + BUF_SIZE
+ * if writer.buf == BUFFER:
+ * buf = <char*>PyMem_Malloc(size) # <<<<<<<<<<<<<<
+ * if buf == NULL:
+ * PyErr_NoMemory()
+ */
+ __pyx_v_buf = ((char *)PyMem_Malloc(__pyx_v_size));
+
+ /* "yarl/_quoting_c.pyx":107
+ * if writer.buf == BUFFER:
+ * buf = <char*>PyMem_Malloc(size)
+ * if buf == NULL: # <<<<<<<<<<<<<<
+ * PyErr_NoMemory()
+ * return -1
+ */
+ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
+ if (__pyx_t_1) {
+
+ /* "yarl/_quoting_c.pyx":108
+ * buf = <char*>PyMem_Malloc(size)
+ * if buf == NULL:
+ * PyErr_NoMemory() # <<<<<<<<<<<<<<
+ * return -1
+ * memcpy(buf, writer.buf, writer.size)
+ */
+ __pyx_t_2 = PyErr_NoMemory(); if (unlikely(__pyx_t_2 == ((PyObject *)NULL))) __PYX_ERR(0, 108, __pyx_L1_error)
+
+ /* "yarl/_quoting_c.pyx":109
+ * if buf == NULL:
+ * PyErr_NoMemory()
+ * return -1 # <<<<<<<<<<<<<<
+ * memcpy(buf, writer.buf, writer.size)
+ * else:
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":107
+ * if writer.buf == BUFFER:
+ * buf = <char*>PyMem_Malloc(size)
+ * if buf == NULL: # <<<<<<<<<<<<<<
+ * PyErr_NoMemory()
+ * return -1
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":110
+ * PyErr_NoMemory()
+ * return -1
+ * memcpy(buf, writer.buf, writer.size) # <<<<<<<<<<<<<<
+ * else:
+ * buf = <char*>PyMem_Realloc(writer.buf, size)
+ */
+ (void)(memcpy(__pyx_v_buf, __pyx_v_writer->buf, __pyx_v_writer->size));
+
+ /* "yarl/_quoting_c.pyx":105
+ * # reallocate
+ * size = writer.size + BUF_SIZE
+ * if writer.buf == BUFFER: # <<<<<<<<<<<<<<
+ * buf = <char*>PyMem_Malloc(size)
+ * if buf == NULL:
+ */
+ goto __pyx_L4;
+ }
+
+ /* "yarl/_quoting_c.pyx":112
+ * memcpy(buf, writer.buf, writer.size)
+ * else:
+ * buf = <char*>PyMem_Realloc(writer.buf, size) # <<<<<<<<<<<<<<
+ * if buf == NULL:
+ * PyErr_NoMemory()
+ */
+ /*else*/ {
+ __pyx_v_buf = ((char *)PyMem_Realloc(__pyx_v_writer->buf, __pyx_v_size));
+
+ /* "yarl/_quoting_c.pyx":113
+ * else:
+ * buf = <char*>PyMem_Realloc(writer.buf, size)
+ * if buf == NULL: # <<<<<<<<<<<<<<
+ * PyErr_NoMemory()
+ * return -1
+ */
+ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
+ if (__pyx_t_1) {
+
+ /* "yarl/_quoting_c.pyx":114
+ * buf = <char*>PyMem_Realloc(writer.buf, size)
+ * if buf == NULL:
+ * PyErr_NoMemory() # <<<<<<<<<<<<<<
+ * return -1
+ * writer.buf = buf
+ */
+ __pyx_t_2 = PyErr_NoMemory(); if (unlikely(__pyx_t_2 == ((PyObject *)NULL))) __PYX_ERR(0, 114, __pyx_L1_error)
+
+ /* "yarl/_quoting_c.pyx":115
+ * if buf == NULL:
+ * PyErr_NoMemory()
+ * return -1 # <<<<<<<<<<<<<<
+ * writer.buf = buf
+ * writer.size = size
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":113
+ * else:
+ * buf = <char*>PyMem_Realloc(writer.buf, size)
+ * if buf == NULL: # <<<<<<<<<<<<<<
+ * PyErr_NoMemory()
+ * return -1
+ */
+ }
+ }
+ __pyx_L4:;
+
+ /* "yarl/_quoting_c.pyx":116
+ * PyErr_NoMemory()
+ * return -1
+ * writer.buf = buf # <<<<<<<<<<<<<<
+ * writer.size = size
+ * writer.buf[writer.pos] = <char>ch
+ */
+ __pyx_v_writer->buf = __pyx_v_buf;
+
+ /* "yarl/_quoting_c.pyx":117
+ * return -1
+ * writer.buf = buf
+ * writer.size = size # <<<<<<<<<<<<<<
+ * writer.buf[writer.pos] = <char>ch
+ * writer.pos += 1
+ */
+ __pyx_v_writer->size = __pyx_v_size;
+
+ /* "yarl/_quoting_c.pyx":102
+ * cdef Py_ssize_t size
+ *
+ * if writer.pos == writer.size: # <<<<<<<<<<<<<<
+ * # reallocate
+ * size = writer.size + BUF_SIZE
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":118
+ * writer.buf = buf
+ * writer.size = size
+ * writer.buf[writer.pos] = <char>ch # <<<<<<<<<<<<<<
+ * writer.pos += 1
+ * writer.changed |= changed
+ */
+ (__pyx_v_writer->buf[__pyx_v_writer->pos]) = ((char)__pyx_v_ch);
+
+ /* "yarl/_quoting_c.pyx":119
+ * writer.size = size
+ * writer.buf[writer.pos] = <char>ch
+ * writer.pos += 1 # <<<<<<<<<<<<<<
+ * writer.changed |= changed
+ * return 0
+ */
+ __pyx_v_writer->pos = (__pyx_v_writer->pos + 1);
+
+ /* "yarl/_quoting_c.pyx":120
+ * writer.buf[writer.pos] = <char>ch
+ * writer.pos += 1
+ * writer.changed |= changed # <<<<<<<<<<<<<<
+ * return 0
+ *
+ */
+ __pyx_v_writer->changed = (__pyx_v_writer->changed | __pyx_v_changed);
+
+ /* "yarl/_quoting_c.pyx":121
+ * writer.pos += 1
+ * writer.changed |= changed
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":98
+ *
+ *
+ * cdef inline int _write_char(Writer* writer, Py_UCS4 ch, bint changed): # <<<<<<<<<<<<<<
+ * cdef char * buf
+ * cdef Py_ssize_t size
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_WriteUnraisable("yarl._quoting_c._write_char", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "yarl/_quoting_c.pyx":124
+ *
+ *
+ * cdef inline int _write_pct(Writer* writer, uint8_t ch, bint changed): # <<<<<<<<<<<<<<
+ * if _write_char(writer, '%', changed) < 0:
+ * return -1
+ */
+
+static CYTHON_INLINE int __pyx_f_4yarl_10_quoting_c__write_pct(struct __pyx_t_4yarl_10_quoting_c_Writer *__pyx_v_writer, uint8_t __pyx_v_ch, int __pyx_v_changed) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ __Pyx_RefNannySetupContext("_write_pct", 0);
+
+ /* "yarl/_quoting_c.pyx":125
+ *
+ * cdef inline int _write_pct(Writer* writer, uint8_t ch, bint changed):
+ * if _write_char(writer, '%', changed) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * if _write_char(writer, _to_hex(<uint8_t>ch >> 4), changed) < 0:
+ */
+ __pyx_t_1 = ((__pyx_f_4yarl_10_quoting_c__write_char(__pyx_v_writer, 37, __pyx_v_changed) < 0) != 0);
+ if (__pyx_t_1) {
+
+ /* "yarl/_quoting_c.pyx":126
+ * cdef inline int _write_pct(Writer* writer, uint8_t ch, bint changed):
+ * if _write_char(writer, '%', changed) < 0:
+ * return -1 # <<<<<<<<<<<<<<
+ * if _write_char(writer, _to_hex(<uint8_t>ch >> 4), changed) < 0:
+ * return -1
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":125
+ *
+ * cdef inline int _write_pct(Writer* writer, uint8_t ch, bint changed):
+ * if _write_char(writer, '%', changed) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * if _write_char(writer, _to_hex(<uint8_t>ch >> 4), changed) < 0:
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":127
+ * if _write_char(writer, '%', changed) < 0:
+ * return -1
+ * if _write_char(writer, _to_hex(<uint8_t>ch >> 4), changed) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * return _write_char(writer, _to_hex(<uint8_t>ch & 0x0f), changed)
+ */
+ __pyx_t_1 = ((__pyx_f_4yarl_10_quoting_c__write_char(__pyx_v_writer, __pyx_f_4yarl_10_quoting_c__to_hex((((uint8_t)__pyx_v_ch) >> 4)), __pyx_v_changed) < 0) != 0);
+ if (__pyx_t_1) {
+
+ /* "yarl/_quoting_c.pyx":128
+ * return -1
+ * if _write_char(writer, _to_hex(<uint8_t>ch >> 4), changed) < 0:
+ * return -1 # <<<<<<<<<<<<<<
+ * return _write_char(writer, _to_hex(<uint8_t>ch & 0x0f), changed)
+ *
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":127
+ * if _write_char(writer, '%', changed) < 0:
+ * return -1
+ * if _write_char(writer, _to_hex(<uint8_t>ch >> 4), changed) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * return _write_char(writer, _to_hex(<uint8_t>ch & 0x0f), changed)
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":129
+ * if _write_char(writer, _to_hex(<uint8_t>ch >> 4), changed) < 0:
+ * return -1
+ * return _write_char(writer, _to_hex(<uint8_t>ch & 0x0f), changed) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = __pyx_f_4yarl_10_quoting_c__write_char(__pyx_v_writer, __pyx_f_4yarl_10_quoting_c__to_hex((((uint8_t)__pyx_v_ch) & 0x0f)), __pyx_v_changed);
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":124
+ *
+ *
+ * cdef inline int _write_pct(Writer* writer, uint8_t ch, bint changed): # <<<<<<<<<<<<<<
+ * if _write_char(writer, '%', changed) < 0:
+ * return -1
+ */
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "yarl/_quoting_c.pyx":132
+ *
+ *
+ * cdef inline int _write_utf8(Writer* writer, Py_UCS4 symbol): # <<<<<<<<<<<<<<
+ * cdef uint64_t utf = <uint64_t> symbol
+ *
+ */
+
+static CYTHON_INLINE int __pyx_f_4yarl_10_quoting_c__write_utf8(struct __pyx_t_4yarl_10_quoting_c_Writer *__pyx_v_writer, Py_UCS4 __pyx_v_symbol) {
+ uint64_t __pyx_v_utf;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_t_2;
+ __Pyx_RefNannySetupContext("_write_utf8", 0);
+
+ /* "yarl/_quoting_c.pyx":133
+ *
+ * cdef inline int _write_utf8(Writer* writer, Py_UCS4 symbol):
+ * cdef uint64_t utf = <uint64_t> symbol # <<<<<<<<<<<<<<
+ *
+ * if utf < 0x80:
+ */
+ __pyx_v_utf = ((uint64_t)__pyx_v_symbol);
+
+ /* "yarl/_quoting_c.pyx":135
+ * cdef uint64_t utf = <uint64_t> symbol
+ *
+ * if utf < 0x80: # <<<<<<<<<<<<<<
+ * return _write_pct(writer, <uint8_t>utf, True)
+ * elif utf < 0x800:
+ */
+ __pyx_t_1 = ((__pyx_v_utf < 0x80) != 0);
+ if (__pyx_t_1) {
+
+ /* "yarl/_quoting_c.pyx":136
+ *
+ * if utf < 0x80:
+ * return _write_pct(writer, <uint8_t>utf, True) # <<<<<<<<<<<<<<
+ * elif utf < 0x800:
+ * if _write_pct(writer, <uint8_t>(0xc0 | (utf >> 6)), True) < 0:
+ */
+ __pyx_r = __pyx_f_4yarl_10_quoting_c__write_pct(__pyx_v_writer, ((uint8_t)__pyx_v_utf), 1);
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":135
+ * cdef uint64_t utf = <uint64_t> symbol
+ *
+ * if utf < 0x80: # <<<<<<<<<<<<<<
+ * return _write_pct(writer, <uint8_t>utf, True)
+ * elif utf < 0x800:
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":137
+ * if utf < 0x80:
+ * return _write_pct(writer, <uint8_t>utf, True)
+ * elif utf < 0x800: # <<<<<<<<<<<<<<
+ * if _write_pct(writer, <uint8_t>(0xc0 | (utf >> 6)), True) < 0:
+ * return -1
+ */
+ __pyx_t_1 = ((__pyx_v_utf < 0x800) != 0);
+ if (__pyx_t_1) {
+
+ /* "yarl/_quoting_c.pyx":138
+ * return _write_pct(writer, <uint8_t>utf, True)
+ * elif utf < 0x800:
+ * if _write_pct(writer, <uint8_t>(0xc0 | (utf >> 6)), True) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * return _write_pct(writer, <uint8_t>(0x80 | (utf & 0x3f)), True)
+ */
+ __pyx_t_1 = ((__pyx_f_4yarl_10_quoting_c__write_pct(__pyx_v_writer, ((uint8_t)(0xc0 | (__pyx_v_utf >> 6))), 1) < 0) != 0);
+ if (__pyx_t_1) {
+
+ /* "yarl/_quoting_c.pyx":139
+ * elif utf < 0x800:
+ * if _write_pct(writer, <uint8_t>(0xc0 | (utf >> 6)), True) < 0:
+ * return -1 # <<<<<<<<<<<<<<
+ * return _write_pct(writer, <uint8_t>(0x80 | (utf & 0x3f)), True)
+ * elif 0xD800 <= utf <= 0xDFFF:
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":138
+ * return _write_pct(writer, <uint8_t>utf, True)
+ * elif utf < 0x800:
+ * if _write_pct(writer, <uint8_t>(0xc0 | (utf >> 6)), True) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * return _write_pct(writer, <uint8_t>(0x80 | (utf & 0x3f)), True)
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":140
+ * if _write_pct(writer, <uint8_t>(0xc0 | (utf >> 6)), True) < 0:
+ * return -1
+ * return _write_pct(writer, <uint8_t>(0x80 | (utf & 0x3f)), True) # <<<<<<<<<<<<<<
+ * elif 0xD800 <= utf <= 0xDFFF:
+ * # surogate pair, ignored
+ */
+ __pyx_r = __pyx_f_4yarl_10_quoting_c__write_pct(__pyx_v_writer, ((uint8_t)(0x80 | (__pyx_v_utf & 0x3f))), 1);
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":137
+ * if utf < 0x80:
+ * return _write_pct(writer, <uint8_t>utf, True)
+ * elif utf < 0x800: # <<<<<<<<<<<<<<
+ * if _write_pct(writer, <uint8_t>(0xc0 | (utf >> 6)), True) < 0:
+ * return -1
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":141
+ * return -1
+ * return _write_pct(writer, <uint8_t>(0x80 | (utf & 0x3f)), True)
+ * elif 0xD800 <= utf <= 0xDFFF: # <<<<<<<<<<<<<<
+ * # surogate pair, ignored
+ * return 0
+ */
+ __pyx_t_1 = (0xD800 <= __pyx_v_utf);
+ if (__pyx_t_1) {
+ __pyx_t_1 = (__pyx_v_utf <= 0xDFFF);
+ }
+ __pyx_t_2 = (__pyx_t_1 != 0);
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":143
+ * elif 0xD800 <= utf <= 0xDFFF:
+ * # surogate pair, ignored
+ * return 0 # <<<<<<<<<<<<<<
+ * elif utf < 0x10000:
+ * if _write_pct(writer, <uint8_t>(0xe0 | (utf >> 12)), True) < 0:
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":141
+ * return -1
+ * return _write_pct(writer, <uint8_t>(0x80 | (utf & 0x3f)), True)
+ * elif 0xD800 <= utf <= 0xDFFF: # <<<<<<<<<<<<<<
+ * # surogate pair, ignored
+ * return 0
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":144
+ * # surogate pair, ignored
+ * return 0
+ * elif utf < 0x10000: # <<<<<<<<<<<<<<
+ * if _write_pct(writer, <uint8_t>(0xe0 | (utf >> 12)), True) < 0:
+ * return -1
+ */
+ __pyx_t_2 = ((__pyx_v_utf < 0x10000) != 0);
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":145
+ * return 0
+ * elif utf < 0x10000:
+ * if _write_pct(writer, <uint8_t>(0xe0 | (utf >> 12)), True) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f)),
+ */
+ __pyx_t_2 = ((__pyx_f_4yarl_10_quoting_c__write_pct(__pyx_v_writer, ((uint8_t)(0xe0 | (__pyx_v_utf >> 12))), 1) < 0) != 0);
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":146
+ * elif utf < 0x10000:
+ * if _write_pct(writer, <uint8_t>(0xe0 | (utf >> 12)), True) < 0:
+ * return -1 # <<<<<<<<<<<<<<
+ * if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f)),
+ * True) < 0:
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":145
+ * return 0
+ * elif utf < 0x10000:
+ * if _write_pct(writer, <uint8_t>(0xe0 | (utf >> 12)), True) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f)),
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":148
+ * return -1
+ * if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f)),
+ * True) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * return _write_pct(writer, <uint8_t>(0x80 | (utf & 0x3f)), True)
+ */
+ __pyx_t_2 = ((__pyx_f_4yarl_10_quoting_c__write_pct(__pyx_v_writer, ((uint8_t)(0x80 | ((__pyx_v_utf >> 6) & 0x3f))), 1) < 0) != 0);
+
+ /* "yarl/_quoting_c.pyx":147
+ * if _write_pct(writer, <uint8_t>(0xe0 | (utf >> 12)), True) < 0:
+ * return -1
+ * if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f)), # <<<<<<<<<<<<<<
+ * True) < 0:
+ * return -1
+ */
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":149
+ * if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f)),
+ * True) < 0:
+ * return -1 # <<<<<<<<<<<<<<
+ * return _write_pct(writer, <uint8_t>(0x80 | (utf & 0x3f)), True)
+ * elif utf > 0x10FFFF:
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":147
+ * if _write_pct(writer, <uint8_t>(0xe0 | (utf >> 12)), True) < 0:
+ * return -1
+ * if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f)), # <<<<<<<<<<<<<<
+ * True) < 0:
+ * return -1
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":150
+ * True) < 0:
+ * return -1
+ * return _write_pct(writer, <uint8_t>(0x80 | (utf & 0x3f)), True) # <<<<<<<<<<<<<<
+ * elif utf > 0x10FFFF:
+ * # symbol is too large
+ */
+ __pyx_r = __pyx_f_4yarl_10_quoting_c__write_pct(__pyx_v_writer, ((uint8_t)(0x80 | (__pyx_v_utf & 0x3f))), 1);
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":144
+ * # surogate pair, ignored
+ * return 0
+ * elif utf < 0x10000: # <<<<<<<<<<<<<<
+ * if _write_pct(writer, <uint8_t>(0xe0 | (utf >> 12)), True) < 0:
+ * return -1
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":151
+ * return -1
+ * return _write_pct(writer, <uint8_t>(0x80 | (utf & 0x3f)), True)
+ * elif utf > 0x10FFFF: # <<<<<<<<<<<<<<
+ * # symbol is too large
+ * return 0
+ */
+ __pyx_t_2 = ((__pyx_v_utf > 0x10FFFF) != 0);
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":153
+ * elif utf > 0x10FFFF:
+ * # symbol is too large
+ * return 0 # <<<<<<<<<<<<<<
+ * else:
+ * if _write_pct(writer, <uint8_t>(0xf0 | (utf >> 18)), True) < 0:
+ */
+ __pyx_r = 0;
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":151
+ * return -1
+ * return _write_pct(writer, <uint8_t>(0x80 | (utf & 0x3f)), True)
+ * elif utf > 0x10FFFF: # <<<<<<<<<<<<<<
+ * # symbol is too large
+ * return 0
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":155
+ * return 0
+ * else:
+ * if _write_pct(writer, <uint8_t>(0xf0 | (utf >> 18)), True) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 12) & 0x3f)),
+ */
+ /*else*/ {
+ __pyx_t_2 = ((__pyx_f_4yarl_10_quoting_c__write_pct(__pyx_v_writer, ((uint8_t)(0xf0 | (__pyx_v_utf >> 18))), 1) < 0) != 0);
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":156
+ * else:
+ * if _write_pct(writer, <uint8_t>(0xf0 | (utf >> 18)), True) < 0:
+ * return -1 # <<<<<<<<<<<<<<
+ * if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 12) & 0x3f)),
+ * True) < 0:
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":155
+ * return 0
+ * else:
+ * if _write_pct(writer, <uint8_t>(0xf0 | (utf >> 18)), True) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 12) & 0x3f)),
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":158
+ * return -1
+ * if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 12) & 0x3f)),
+ * True) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f)),
+ */
+ __pyx_t_2 = ((__pyx_f_4yarl_10_quoting_c__write_pct(__pyx_v_writer, ((uint8_t)(0x80 | ((__pyx_v_utf >> 12) & 0x3f))), 1) < 0) != 0);
+
+ /* "yarl/_quoting_c.pyx":157
+ * if _write_pct(writer, <uint8_t>(0xf0 | (utf >> 18)), True) < 0:
+ * return -1
+ * if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 12) & 0x3f)), # <<<<<<<<<<<<<<
+ * True) < 0:
+ * return -1
+ */
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":159
+ * if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 12) & 0x3f)),
+ * True) < 0:
+ * return -1 # <<<<<<<<<<<<<<
+ * if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f)),
+ * True) < 0:
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":157
+ * if _write_pct(writer, <uint8_t>(0xf0 | (utf >> 18)), True) < 0:
+ * return -1
+ * if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 12) & 0x3f)), # <<<<<<<<<<<<<<
+ * True) < 0:
+ * return -1
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":161
+ * return -1
+ * if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f)),
+ * True) < 0: # <<<<<<<<<<<<<<
+ * return -1
+ * return _write_pct(writer, <uint8_t>(0x80 | (utf & 0x3f)), True)
+ */
+ __pyx_t_2 = ((__pyx_f_4yarl_10_quoting_c__write_pct(__pyx_v_writer, ((uint8_t)(0x80 | ((__pyx_v_utf >> 6) & 0x3f))), 1) < 0) != 0);
+
+ /* "yarl/_quoting_c.pyx":160
+ * True) < 0:
+ * return -1
+ * if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f)), # <<<<<<<<<<<<<<
+ * True) < 0:
+ * return -1
+ */
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":162
+ * if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f)),
+ * True) < 0:
+ * return -1 # <<<<<<<<<<<<<<
+ * return _write_pct(writer, <uint8_t>(0x80 | (utf & 0x3f)), True)
+ *
+ */
+ __pyx_r = -1;
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":160
+ * True) < 0:
+ * return -1
+ * if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f)), # <<<<<<<<<<<<<<
+ * True) < 0:
+ * return -1
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":163
+ * True) < 0:
+ * return -1
+ * return _write_pct(writer, <uint8_t>(0x80 | (utf & 0x3f)), True) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = __pyx_f_4yarl_10_quoting_c__write_pct(__pyx_v_writer, ((uint8_t)(0x80 | (__pyx_v_utf & 0x3f))), 1);
+ goto __pyx_L0;
+ }
+
+ /* "yarl/_quoting_c.pyx":132
+ *
+ *
+ * cdef inline int _write_utf8(Writer* writer, Py_UCS4 symbol): # <<<<<<<<<<<<<<
+ * cdef uint64_t utf = <uint64_t> symbol
+ *
+ */
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "yarl/_quoting_c.pyx":176
+ * cdef uint8_t _protected_table[16]
+ *
+ * def __init__( # <<<<<<<<<<<<<<
+ * self, *, str safe='', str protected='', bint qs=False, bint requote=True,
+ * ):
+ */
+
+/* Python wrapper */
+static int __pyx_pw_4yarl_10_quoting_c_7_Quoter_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_pw_4yarl_10_quoting_c_7_Quoter_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_safe = 0;
+ PyObject *__pyx_v_protected = 0;
+ int __pyx_v_qs;
+ int __pyx_v_requote;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_safe,&__pyx_n_s_protected,&__pyx_n_s_qs,&__pyx_n_s_requote,0};
+ PyObject* values[4] = {0,0,0,0};
+ values[0] = ((PyObject*)__pyx_kp_u_);
+ values[1] = ((PyObject*)__pyx_kp_u_);
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ if (kw_args > 0 && likely(kw_args <= 4)) {
+ Py_ssize_t index;
+ for (index = 0; index < 4 && kw_args > 0; index++) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, *__pyx_pyargnames[index]);
+ if (value) { values[index] = value; kw_args--; }
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, 0, "__init__") < 0)) __PYX_ERR(0, 176, __pyx_L3_error)
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 0) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ }
+ __pyx_v_safe = ((PyObject*)values[0]);
+ __pyx_v_protected = ((PyObject*)values[1]);
+ if (values[2]) {
+ __pyx_v_qs = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_qs == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 177, __pyx_L3_error)
+ } else {
+
+ /* "yarl/_quoting_c.pyx":177
+ *
+ * def __init__(
+ * self, *, str safe='', str protected='', bint qs=False, bint requote=True, # <<<<<<<<<<<<<<
+ * ):
+ * cdef Py_UCS4 ch
+ */
+ __pyx_v_qs = ((int)0);
+ }
+ if (values[3]) {
+ __pyx_v_requote = __Pyx_PyObject_IsTrue(values[3]); if (unlikely((__pyx_v_requote == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 177, __pyx_L3_error)
+ } else {
+ __pyx_v_requote = ((int)1);
+ }
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 176, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("yarl._quoting_c._Quoter.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return -1;
+ __pyx_L4_argument_unpacking_done:;
+ if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_safe), (&PyUnicode_Type), 1, "safe", 1))) __PYX_ERR(0, 177, __pyx_L1_error)
+ if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_protected), (&PyUnicode_Type), 1, "protected", 1))) __PYX_ERR(0, 177, __pyx_L1_error)
+ __pyx_r = __pyx_pf_4yarl_10_quoting_c_7_Quoter___init__(((struct __pyx_obj_4yarl_10_quoting_c__Quoter *)__pyx_v_self), __pyx_v_safe, __pyx_v_protected, __pyx_v_qs, __pyx_v_requote);
+
+ /* "yarl/_quoting_c.pyx":176
+ * cdef uint8_t _protected_table[16]
+ *
+ * def __init__( # <<<<<<<<<<<<<<
+ * self, *, str safe='', str protected='', bint qs=False, bint requote=True,
+ * ):
+ */
+
+ /* function exit code */
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_4yarl_10_quoting_c_7_Quoter___init__(struct __pyx_obj_4yarl_10_quoting_c__Quoter *__pyx_v_self, PyObject *__pyx_v_safe, PyObject *__pyx_v_protected, int __pyx_v_qs, int __pyx_v_requote) {
+ Py_UCS4 __pyx_v_ch;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ Py_ssize_t __pyx_t_3;
+ Py_ssize_t __pyx_t_4;
+ void *__pyx_t_5;
+ int __pyx_t_6;
+ int __pyx_t_7;
+ Py_ssize_t __pyx_t_8;
+ PyObject *__pyx_t_9 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__init__", 0);
+
+ /* "yarl/_quoting_c.pyx":181
+ * cdef Py_UCS4 ch
+ *
+ * self._qs = qs # <<<<<<<<<<<<<<
+ * self._requote = requote
+ *
+ */
+ __pyx_v_self->_qs = __pyx_v_qs;
+
+ /* "yarl/_quoting_c.pyx":182
+ *
+ * self._qs = qs
+ * self._requote = requote # <<<<<<<<<<<<<<
+ *
+ * if not self._qs:
+ */
+ __pyx_v_self->_requote = __pyx_v_requote;
+
+ /* "yarl/_quoting_c.pyx":184
+ * self._requote = requote
+ *
+ * if not self._qs: # <<<<<<<<<<<<<<
+ * memcpy(self._safe_table,
+ * ALLOWED_NOTQS_TABLE,
+ */
+ __pyx_t_1 = ((!(__pyx_v_self->_qs != 0)) != 0);
+ if (__pyx_t_1) {
+
+ /* "yarl/_quoting_c.pyx":185
+ *
+ * if not self._qs:
+ * memcpy(self._safe_table, # <<<<<<<<<<<<<<
+ * ALLOWED_NOTQS_TABLE,
+ * sizeof(self._safe_table))
+ */
+ (void)(memcpy(__pyx_v_self->_safe_table, __pyx_v_4yarl_10_quoting_c_ALLOWED_NOTQS_TABLE, (sizeof(__pyx_v_self->_safe_table))));
+
+ /* "yarl/_quoting_c.pyx":184
+ * self._requote = requote
+ *
+ * if not self._qs: # <<<<<<<<<<<<<<
+ * memcpy(self._safe_table,
+ * ALLOWED_NOTQS_TABLE,
+ */
+ goto __pyx_L3;
+ }
+
+ /* "yarl/_quoting_c.pyx":189
+ * sizeof(self._safe_table))
+ * else:
+ * memcpy(self._safe_table, # <<<<<<<<<<<<<<
+ * ALLOWED_TABLE,
+ * sizeof(self._safe_table))
+ */
+ /*else*/ {
+
+ /* "yarl/_quoting_c.pyx":191
+ * memcpy(self._safe_table,
+ * ALLOWED_TABLE,
+ * sizeof(self._safe_table)) # <<<<<<<<<<<<<<
+ * for ch in safe:
+ * if ord(ch) > 127:
+ */
+ (void)(memcpy(__pyx_v_self->_safe_table, __pyx_v_4yarl_10_quoting_c_ALLOWED_TABLE, (sizeof(__pyx_v_self->_safe_table))));
+ }
+ __pyx_L3:;
+
+ /* "yarl/_quoting_c.pyx":192
+ * ALLOWED_TABLE,
+ * sizeof(self._safe_table))
+ * for ch in safe: # <<<<<<<<<<<<<<
+ * if ord(ch) > 127:
+ * raise ValueError("Only safe symbols with ORD < 128 are allowed")
+ */
+ if (unlikely(__pyx_v_safe == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
+ __PYX_ERR(0, 192, __pyx_L1_error)
+ }
+ __Pyx_INCREF(__pyx_v_safe);
+ __pyx_t_2 = __pyx_v_safe;
+ __pyx_t_7 = __Pyx_init_unicode_iteration(__pyx_t_2, (&__pyx_t_4), (&__pyx_t_5), (&__pyx_t_6)); if (unlikely(__pyx_t_7 == ((int)-1))) __PYX_ERR(0, 192, __pyx_L1_error)
+ for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_4; __pyx_t_8++) {
+ __pyx_t_3 = __pyx_t_8;
+ __pyx_v_ch = __Pyx_PyUnicode_READ(__pyx_t_6, __pyx_t_5, __pyx_t_3);
+
+ /* "yarl/_quoting_c.pyx":193
+ * sizeof(self._safe_table))
+ * for ch in safe:
+ * if ord(ch) > 127: # <<<<<<<<<<<<<<
+ * raise ValueError("Only safe symbols with ORD < 128 are allowed")
+ * set_bit(self._safe_table, ch)
+ */
+ __pyx_t_1 = ((((long)__pyx_v_ch) > 0x7F) != 0);
+ if (unlikely(__pyx_t_1)) {
+
+ /* "yarl/_quoting_c.pyx":194
+ * for ch in safe:
+ * if ord(ch) > 127:
+ * raise ValueError("Only safe symbols with ORD < 128 are allowed") # <<<<<<<<<<<<<<
+ * set_bit(self._safe_table, ch)
+ *
+ */
+ __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 194, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_Raise(__pyx_t_9, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __PYX_ERR(0, 194, __pyx_L1_error)
+
+ /* "yarl/_quoting_c.pyx":193
+ * sizeof(self._safe_table))
+ * for ch in safe:
+ * if ord(ch) > 127: # <<<<<<<<<<<<<<
+ * raise ValueError("Only safe symbols with ORD < 128 are allowed")
+ * set_bit(self._safe_table, ch)
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":195
+ * if ord(ch) > 127:
+ * raise ValueError("Only safe symbols with ORD < 128 are allowed")
+ * set_bit(self._safe_table, ch) # <<<<<<<<<<<<<<
+ *
+ * memset(self._protected_table, 0, sizeof(self._protected_table))
+ */
+ __pyx_f_4yarl_10_quoting_c_set_bit(__pyx_v_self->_safe_table, __pyx_v_ch);
+ }
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "yarl/_quoting_c.pyx":197
+ * set_bit(self._safe_table, ch)
+ *
+ * memset(self._protected_table, 0, sizeof(self._protected_table)) # <<<<<<<<<<<<<<
+ * for ch in protected:
+ * if ord(ch) > 127:
+ */
+ (void)(memset(__pyx_v_self->_protected_table, 0, (sizeof(__pyx_v_self->_protected_table))));
+
+ /* "yarl/_quoting_c.pyx":198
+ *
+ * memset(self._protected_table, 0, sizeof(self._protected_table))
+ * for ch in protected: # <<<<<<<<<<<<<<
+ * if ord(ch) > 127:
+ * raise ValueError("Only safe symbols with ORD < 128 are allowed")
+ */
+ if (unlikely(__pyx_v_protected == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
+ __PYX_ERR(0, 198, __pyx_L1_error)
+ }
+ __Pyx_INCREF(__pyx_v_protected);
+ __pyx_t_2 = __pyx_v_protected;
+ __pyx_t_7 = __Pyx_init_unicode_iteration(__pyx_t_2, (&__pyx_t_3), (&__pyx_t_5), (&__pyx_t_6)); if (unlikely(__pyx_t_7 == ((int)-1))) __PYX_ERR(0, 198, __pyx_L1_error)
+ for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_3; __pyx_t_8++) {
+ __pyx_t_4 = __pyx_t_8;
+ __pyx_v_ch = __Pyx_PyUnicode_READ(__pyx_t_6, __pyx_t_5, __pyx_t_4);
+
+ /* "yarl/_quoting_c.pyx":199
+ * memset(self._protected_table, 0, sizeof(self._protected_table))
+ * for ch in protected:
+ * if ord(ch) > 127: # <<<<<<<<<<<<<<
+ * raise ValueError("Only safe symbols with ORD < 128 are allowed")
+ * set_bit(self._safe_table, ch)
+ */
+ __pyx_t_1 = ((((long)__pyx_v_ch) > 0x7F) != 0);
+ if (unlikely(__pyx_t_1)) {
+
+ /* "yarl/_quoting_c.pyx":200
+ * for ch in protected:
+ * if ord(ch) > 127:
+ * raise ValueError("Only safe symbols with ORD < 128 are allowed") # <<<<<<<<<<<<<<
+ * set_bit(self._safe_table, ch)
+ * set_bit(self._protected_table, ch)
+ */
+ __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 200, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_9);
+ __Pyx_Raise(__pyx_t_9, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __PYX_ERR(0, 200, __pyx_L1_error)
+
+ /* "yarl/_quoting_c.pyx":199
+ * memset(self._protected_table, 0, sizeof(self._protected_table))
+ * for ch in protected:
+ * if ord(ch) > 127: # <<<<<<<<<<<<<<
+ * raise ValueError("Only safe symbols with ORD < 128 are allowed")
+ * set_bit(self._safe_table, ch)
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":201
+ * if ord(ch) > 127:
+ * raise ValueError("Only safe symbols with ORD < 128 are allowed")
+ * set_bit(self._safe_table, ch) # <<<<<<<<<<<<<<
+ * set_bit(self._protected_table, ch)
+ *
+ */
+ __pyx_f_4yarl_10_quoting_c_set_bit(__pyx_v_self->_safe_table, __pyx_v_ch);
+
+ /* "yarl/_quoting_c.pyx":202
+ * raise ValueError("Only safe symbols with ORD < 128 are allowed")
+ * set_bit(self._safe_table, ch)
+ * set_bit(self._protected_table, ch) # <<<<<<<<<<<<<<
+ *
+ * def __call__(self, val):
+ */
+ __pyx_f_4yarl_10_quoting_c_set_bit(__pyx_v_self->_protected_table, __pyx_v_ch);
+ }
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "yarl/_quoting_c.pyx":176
+ * cdef uint8_t _protected_table[16]
+ *
+ * def __init__( # <<<<<<<<<<<<<<
+ * self, *, str safe='', str protected='', bint qs=False, bint requote=True,
+ * ):
+ */
+
+ /* function exit code */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_9);
+ __Pyx_AddTraceback("yarl._quoting_c._Quoter.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "yarl/_quoting_c.pyx":204
+ * set_bit(self._protected_table, ch)
+ *
+ * def __call__(self, val): # <<<<<<<<<<<<<<
+ * cdef Writer writer
+ * if val is None:
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_4yarl_10_quoting_c_7_Quoter_3__call__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_pw_4yarl_10_quoting_c_7_Quoter_3__call__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_val = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__call__ (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_val,0};
+ PyObject* values[1] = {0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_val)) != 0)) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__call__") < 0)) __PYX_ERR(0, 204, __pyx_L3_error)
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ }
+ __pyx_v_val = values[0];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__call__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 204, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("yarl._quoting_c._Quoter.__call__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_r = __pyx_pf_4yarl_10_quoting_c_7_Quoter_2__call__(((struct __pyx_obj_4yarl_10_quoting_c__Quoter *)__pyx_v_self), __pyx_v_val);
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_4yarl_10_quoting_c_7_Quoter_2__call__(struct __pyx_obj_4yarl_10_quoting_c__Quoter *__pyx_v_self, PyObject *__pyx_v_val) {
+ struct __pyx_t_4yarl_10_quoting_c_Writer __pyx_v_writer;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_t_4;
+ int __pyx_t_5;
+ char const *__pyx_t_6;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *__pyx_t_8 = NULL;
+ PyObject *__pyx_t_9 = NULL;
+ PyObject *__pyx_t_10 = NULL;
+ PyObject *__pyx_t_11 = NULL;
+ PyObject *__pyx_t_12 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__call__", 0);
+ __Pyx_INCREF(__pyx_v_val);
+
+ /* "yarl/_quoting_c.pyx":206
+ * def __call__(self, val):
+ * cdef Writer writer
+ * if val is None: # <<<<<<<<<<<<<<
+ * return None
+ * if type(val) is not str:
+ */
+ __pyx_t_1 = (__pyx_v_val == Py_None);
+ __pyx_t_2 = (__pyx_t_1 != 0);
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":207
+ * cdef Writer writer
+ * if val is None:
+ * return None # <<<<<<<<<<<<<<
+ * if type(val) is not str:
+ * if isinstance(val, str):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":206
+ * def __call__(self, val):
+ * cdef Writer writer
+ * if val is None: # <<<<<<<<<<<<<<
+ * return None
+ * if type(val) is not str:
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":208
+ * if val is None:
+ * return None
+ * if type(val) is not str: # <<<<<<<<<<<<<<
+ * if isinstance(val, str):
+ * # derived from str
+ */
+ __pyx_t_2 = (((PyObject *)Py_TYPE(__pyx_v_val)) != ((PyObject *)(&PyUnicode_Type)));
+ __pyx_t_1 = (__pyx_t_2 != 0);
+ if (__pyx_t_1) {
+
+ /* "yarl/_quoting_c.pyx":209
+ * return None
+ * if type(val) is not str:
+ * if isinstance(val, str): # <<<<<<<<<<<<<<
+ * # derived from str
+ * val = str(val)
+ */
+ __pyx_t_1 = PyUnicode_Check(__pyx_v_val);
+ __pyx_t_2 = (__pyx_t_1 != 0);
+ if (likely(__pyx_t_2)) {
+
+ /* "yarl/_quoting_c.pyx":211
+ * if isinstance(val, str):
+ * # derived from str
+ * val = str(val) # <<<<<<<<<<<<<<
+ * else:
+ * raise TypeError("Argument should be str")
+ */
+ __pyx_t_3 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyUnicode_Type)), __pyx_v_val); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 211, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF_SET(__pyx_v_val, __pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "yarl/_quoting_c.pyx":209
+ * return None
+ * if type(val) is not str:
+ * if isinstance(val, str): # <<<<<<<<<<<<<<
+ * # derived from str
+ * val = str(val)
+ */
+ goto __pyx_L5;
+ }
+
+ /* "yarl/_quoting_c.pyx":213
+ * val = str(val)
+ * else:
+ * raise TypeError("Argument should be str") # <<<<<<<<<<<<<<
+ * _init_writer(&writer)
+ * try:
+ */
+ /*else*/ {
+ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 213, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __PYX_ERR(0, 213, __pyx_L1_error)
+ }
+ __pyx_L5:;
+
+ /* "yarl/_quoting_c.pyx":208
+ * if val is None:
+ * return None
+ * if type(val) is not str: # <<<<<<<<<<<<<<
+ * if isinstance(val, str):
+ * # derived from str
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":214
+ * else:
+ * raise TypeError("Argument should be str")
+ * _init_writer(&writer) # <<<<<<<<<<<<<<
+ * try:
+ * return self._do_quote(<str>val, &writer)
+ */
+ __pyx_f_4yarl_10_quoting_c__init_writer((&__pyx_v_writer));
+
+ /* "yarl/_quoting_c.pyx":215
+ * raise TypeError("Argument should be str")
+ * _init_writer(&writer)
+ * try: # <<<<<<<<<<<<<<
+ * return self._do_quote(<str>val, &writer)
+ * finally:
+ */
+ /*try:*/ {
+
+ /* "yarl/_quoting_c.pyx":216
+ * _init_writer(&writer)
+ * try:
+ * return self._do_quote(<str>val, &writer) # <<<<<<<<<<<<<<
+ * finally:
+ * _release_writer(&writer)
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_3 = ((struct __pyx_vtabstruct_4yarl_10_quoting_c__Quoter *)__pyx_v_self->__pyx_vtab)->_do_quote(__pyx_v_self, ((PyObject*)__pyx_v_val), (&__pyx_v_writer)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 216, __pyx_L7_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L6_return;
+ }
+
+ /* "yarl/_quoting_c.pyx":218
+ * return self._do_quote(<str>val, &writer)
+ * finally:
+ * _release_writer(&writer) # <<<<<<<<<<<<<<
+ *
+ * cdef str _do_quote(self, str val, Writer *writer):
+ */
+ /*finally:*/ {
+ __pyx_L7_error:;
+ /*exception exit:*/{
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
+ if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9);
+ __Pyx_XGOTREF(__pyx_t_7);
+ __Pyx_XGOTREF(__pyx_t_8);
+ __Pyx_XGOTREF(__pyx_t_9);
+ __Pyx_XGOTREF(__pyx_t_10);
+ __Pyx_XGOTREF(__pyx_t_11);
+ __Pyx_XGOTREF(__pyx_t_12);
+ __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename;
+ {
+ __pyx_f_4yarl_10_quoting_c__release_writer((&__pyx_v_writer));
+ }
+ if (PY_MAJOR_VERSION >= 3) {
+ __Pyx_XGIVEREF(__pyx_t_10);
+ __Pyx_XGIVEREF(__pyx_t_11);
+ __Pyx_XGIVEREF(__pyx_t_12);
+ __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12);
+ }
+ __Pyx_XGIVEREF(__pyx_t_7);
+ __Pyx_XGIVEREF(__pyx_t_8);
+ __Pyx_XGIVEREF(__pyx_t_9);
+ __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9);
+ __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
+ __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6;
+ goto __pyx_L1_error;
+ }
+ __pyx_L6_return: {
+ __pyx_t_12 = __pyx_r;
+ __pyx_r = 0;
+ __pyx_f_4yarl_10_quoting_c__release_writer((&__pyx_v_writer));
+ __pyx_r = __pyx_t_12;
+ __pyx_t_12 = 0;
+ goto __pyx_L0;
+ }
+ }
+
+ /* "yarl/_quoting_c.pyx":204
+ * set_bit(self._protected_table, ch)
+ *
+ * def __call__(self, val): # <<<<<<<<<<<<<<
+ * cdef Writer writer
+ * if val is None:
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("yarl._quoting_c._Quoter.__call__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_val);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "yarl/_quoting_c.pyx":220
+ * _release_writer(&writer)
+ *
+ * cdef str _do_quote(self, str val, Writer *writer): # <<<<<<<<<<<<<<
+ * cdef Py_UCS4 ch
+ * cdef int changed
+ */
+
+static PyObject *__pyx_f_4yarl_10_quoting_c_7_Quoter__do_quote(struct __pyx_obj_4yarl_10_quoting_c__Quoter *__pyx_v_self, PyObject *__pyx_v_val, struct __pyx_t_4yarl_10_quoting_c_Writer *__pyx_v_writer) {
+ Py_UCS4 __pyx_v_ch;
+ int __pyx_v_changed;
+ int __pyx_v_idx;
+ int __pyx_v_length;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ Py_ssize_t __pyx_t_1;
+ int __pyx_t_2;
+ Py_UCS4 __pyx_t_3;
+ int __pyx_t_4;
+ long __pyx_t_5;
+ Py_UCS4 __pyx_t_6;
+ int __pyx_t_7;
+ int __pyx_t_8;
+ PyObject *__pyx_t_9 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_do_quote", 0);
+
+ /* "yarl/_quoting_c.pyx":223
+ * cdef Py_UCS4 ch
+ * cdef int changed
+ * cdef int idx = 0 # <<<<<<<<<<<<<<
+ * cdef int length = len(val)
+ *
+ */
+ __pyx_v_idx = 0;
+
+ /* "yarl/_quoting_c.pyx":224
+ * cdef int changed
+ * cdef int idx = 0
+ * cdef int length = len(val) # <<<<<<<<<<<<<<
+ *
+ * while idx < length:
+ */
+ if (unlikely(__pyx_v_val == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
+ __PYX_ERR(0, 224, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_PyUnicode_GET_LENGTH(__pyx_v_val); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 224, __pyx_L1_error)
+ __pyx_v_length = __pyx_t_1;
+
+ /* "yarl/_quoting_c.pyx":226
+ * cdef int length = len(val)
+ *
+ * while idx < length: # <<<<<<<<<<<<<<
+ * ch = val[idx]
+ * idx += 1
+ */
+ while (1) {
+ __pyx_t_2 = ((__pyx_v_idx < __pyx_v_length) != 0);
+ if (!__pyx_t_2) break;
+
+ /* "yarl/_quoting_c.pyx":227
+ *
+ * while idx < length:
+ * ch = val[idx] # <<<<<<<<<<<<<<
+ * idx += 1
+ * if ch == '%' and self._requote and idx <= length - 2:
+ */
+ __pyx_t_3 = __Pyx_GetItemInt_Unicode(__pyx_v_val, __pyx_v_idx, int, 1, __Pyx_PyInt_From_int, 0, 1, 1); if (unlikely(__pyx_t_3 == (Py_UCS4)-1)) __PYX_ERR(0, 227, __pyx_L1_error)
+ __pyx_v_ch = __pyx_t_3;
+
+ /* "yarl/_quoting_c.pyx":228
+ * while idx < length:
+ * ch = val[idx]
+ * idx += 1 # <<<<<<<<<<<<<<
+ * if ch == '%' and self._requote and idx <= length - 2:
+ * ch = _restore_ch(val[idx], val[idx + 1])
+ */
+ __pyx_v_idx = (__pyx_v_idx + 1);
+
+ /* "yarl/_quoting_c.pyx":229
+ * ch = val[idx]
+ * idx += 1
+ * if ch == '%' and self._requote and idx <= length - 2: # <<<<<<<<<<<<<<
+ * ch = _restore_ch(val[idx], val[idx + 1])
+ * if ch != <Py_UCS4>-1:
+ */
+ __pyx_t_4 = ((__pyx_v_ch == 37) != 0);
+ if (__pyx_t_4) {
+ } else {
+ __pyx_t_2 = __pyx_t_4;
+ goto __pyx_L6_bool_binop_done;
+ }
+ __pyx_t_4 = (__pyx_v_self->_requote != 0);
+ if (__pyx_t_4) {
+ } else {
+ __pyx_t_2 = __pyx_t_4;
+ goto __pyx_L6_bool_binop_done;
+ }
+ __pyx_t_4 = ((__pyx_v_idx <= (__pyx_v_length - 2)) != 0);
+ __pyx_t_2 = __pyx_t_4;
+ __pyx_L6_bool_binop_done:;
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":230
+ * idx += 1
+ * if ch == '%' and self._requote and idx <= length - 2:
+ * ch = _restore_ch(val[idx], val[idx + 1]) # <<<<<<<<<<<<<<
+ * if ch != <Py_UCS4>-1:
+ * idx += 2
+ */
+ __pyx_t_3 = __Pyx_GetItemInt_Unicode(__pyx_v_val, __pyx_v_idx, int, 1, __Pyx_PyInt_From_int, 0, 1, 1); if (unlikely(__pyx_t_3 == (Py_UCS4)-1)) __PYX_ERR(0, 230, __pyx_L1_error)
+ __pyx_t_5 = (__pyx_v_idx + 1);
+ __pyx_t_6 = __Pyx_GetItemInt_Unicode(__pyx_v_val, __pyx_t_5, long, 1, __Pyx_PyInt_From_long, 0, 1, 1); if (unlikely(__pyx_t_6 == (Py_UCS4)-1)) __PYX_ERR(0, 230, __pyx_L1_error)
+ __pyx_v_ch = __pyx_f_4yarl_10_quoting_c__restore_ch(__pyx_t_3, __pyx_t_6);
+
+ /* "yarl/_quoting_c.pyx":231
+ * if ch == '%' and self._requote and idx <= length - 2:
+ * ch = _restore_ch(val[idx], val[idx + 1])
+ * if ch != <Py_UCS4>-1: # <<<<<<<<<<<<<<
+ * idx += 2
+ * if ch < 128:
+ */
+ __pyx_t_2 = ((__pyx_v_ch != ((Py_UCS4)-1L)) != 0);
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":232
+ * ch = _restore_ch(val[idx], val[idx + 1])
+ * if ch != <Py_UCS4>-1:
+ * idx += 2 # <<<<<<<<<<<<<<
+ * if ch < 128:
+ * if bit_at(self._protected_table, ch):
+ */
+ __pyx_v_idx = (__pyx_v_idx + 2);
+
+ /* "yarl/_quoting_c.pyx":233
+ * if ch != <Py_UCS4>-1:
+ * idx += 2
+ * if ch < 128: # <<<<<<<<<<<<<<
+ * if bit_at(self._protected_table, ch):
+ * if _write_pct(writer, ch, True) < 0:
+ */
+ __pyx_t_2 = ((__pyx_v_ch < 0x80) != 0);
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":234
+ * idx += 2
+ * if ch < 128:
+ * if bit_at(self._protected_table, ch): # <<<<<<<<<<<<<<
+ * if _write_pct(writer, ch, True) < 0:
+ * raise
+ */
+ __pyx_t_2 = (__pyx_f_4yarl_10_quoting_c_bit_at(__pyx_v_self->_protected_table, __pyx_v_ch) != 0);
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":235
+ * if ch < 128:
+ * if bit_at(self._protected_table, ch):
+ * if _write_pct(writer, ch, True) < 0: # <<<<<<<<<<<<<<
+ * raise
+ * continue
+ */
+ __pyx_t_2 = ((__pyx_f_4yarl_10_quoting_c__write_pct(__pyx_v_writer, __pyx_v_ch, 1) < 0) != 0);
+ if (unlikely(__pyx_t_2)) {
+
+ /* "yarl/_quoting_c.pyx":236
+ * if bit_at(self._protected_table, ch):
+ * if _write_pct(writer, ch, True) < 0:
+ * raise # <<<<<<<<<<<<<<
+ * continue
+ *
+ */
+ __Pyx_ReraiseException(); __PYX_ERR(0, 236, __pyx_L1_error)
+
+ /* "yarl/_quoting_c.pyx":235
+ * if ch < 128:
+ * if bit_at(self._protected_table, ch):
+ * if _write_pct(writer, ch, True) < 0: # <<<<<<<<<<<<<<
+ * raise
+ * continue
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":237
+ * if _write_pct(writer, ch, True) < 0:
+ * raise
+ * continue # <<<<<<<<<<<<<<
+ *
+ * if bit_at(self._safe_table, ch):
+ */
+ goto __pyx_L3_continue;
+
+ /* "yarl/_quoting_c.pyx":234
+ * idx += 2
+ * if ch < 128:
+ * if bit_at(self._protected_table, ch): # <<<<<<<<<<<<<<
+ * if _write_pct(writer, ch, True) < 0:
+ * raise
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":239
+ * continue
+ *
+ * if bit_at(self._safe_table, ch): # <<<<<<<<<<<<<<
+ * if _write_char(writer, ch, True) < 0:
+ * raise
+ */
+ __pyx_t_2 = (__pyx_f_4yarl_10_quoting_c_bit_at(__pyx_v_self->_safe_table, __pyx_v_ch) != 0);
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":240
+ *
+ * if bit_at(self._safe_table, ch):
+ * if _write_char(writer, ch, True) < 0: # <<<<<<<<<<<<<<
+ * raise
+ * continue
+ */
+ __pyx_t_2 = ((__pyx_f_4yarl_10_quoting_c__write_char(__pyx_v_writer, __pyx_v_ch, 1) < 0) != 0);
+ if (unlikely(__pyx_t_2)) {
+
+ /* "yarl/_quoting_c.pyx":241
+ * if bit_at(self._safe_table, ch):
+ * if _write_char(writer, ch, True) < 0:
+ * raise # <<<<<<<<<<<<<<
+ * continue
+ *
+ */
+ __Pyx_ReraiseException(); __PYX_ERR(0, 241, __pyx_L1_error)
+
+ /* "yarl/_quoting_c.pyx":240
+ *
+ * if bit_at(self._safe_table, ch):
+ * if _write_char(writer, ch, True) < 0: # <<<<<<<<<<<<<<
+ * raise
+ * continue
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":242
+ * if _write_char(writer, ch, True) < 0:
+ * raise
+ * continue # <<<<<<<<<<<<<<
+ *
+ * changed = (_is_lower_hex(val[idx - 2]) or
+ */
+ goto __pyx_L3_continue;
+
+ /* "yarl/_quoting_c.pyx":239
+ * continue
+ *
+ * if bit_at(self._safe_table, ch): # <<<<<<<<<<<<<<
+ * if _write_char(writer, ch, True) < 0:
+ * raise
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":233
+ * if ch != <Py_UCS4>-1:
+ * idx += 2
+ * if ch < 128: # <<<<<<<<<<<<<<
+ * if bit_at(self._protected_table, ch):
+ * if _write_pct(writer, ch, True) < 0:
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":244
+ * continue
+ *
+ * changed = (_is_lower_hex(val[idx - 2]) or # <<<<<<<<<<<<<<
+ * _is_lower_hex(val[idx - 1]))
+ * if _write_pct(writer, ch, changed) < 0:
+ */
+ __pyx_t_5 = (__pyx_v_idx - 2);
+ __pyx_t_6 = __Pyx_GetItemInt_Unicode(__pyx_v_val, __pyx_t_5, long, 1, __Pyx_PyInt_From_long, 0, 1, 1); if (unlikely(__pyx_t_6 == (Py_UCS4)-1)) __PYX_ERR(0, 244, __pyx_L1_error)
+ __pyx_t_8 = __pyx_f_4yarl_10_quoting_c__is_lower_hex(__pyx_t_6);
+ if (!__pyx_t_8) {
+ } else {
+ __pyx_t_7 = __pyx_t_8;
+ goto __pyx_L15_bool_binop_done;
+ }
+
+ /* "yarl/_quoting_c.pyx":245
+ *
+ * changed = (_is_lower_hex(val[idx - 2]) or
+ * _is_lower_hex(val[idx - 1])) # <<<<<<<<<<<<<<
+ * if _write_pct(writer, ch, changed) < 0:
+ * raise
+ */
+ __pyx_t_5 = (__pyx_v_idx - 1);
+ __pyx_t_6 = __Pyx_GetItemInt_Unicode(__pyx_v_val, __pyx_t_5, long, 1, __Pyx_PyInt_From_long, 0, 1, 1); if (unlikely(__pyx_t_6 == (Py_UCS4)-1)) __PYX_ERR(0, 245, __pyx_L1_error)
+ __pyx_t_8 = __pyx_f_4yarl_10_quoting_c__is_lower_hex(__pyx_t_6);
+ __pyx_t_7 = __pyx_t_8;
+ __pyx_L15_bool_binop_done:;
+ __pyx_v_changed = __pyx_t_7;
+
+ /* "yarl/_quoting_c.pyx":246
+ * changed = (_is_lower_hex(val[idx - 2]) or
+ * _is_lower_hex(val[idx - 1]))
+ * if _write_pct(writer, ch, changed) < 0: # <<<<<<<<<<<<<<
+ * raise
+ * continue
+ */
+ __pyx_t_2 = ((__pyx_f_4yarl_10_quoting_c__write_pct(__pyx_v_writer, __pyx_v_ch, __pyx_v_changed) < 0) != 0);
+ if (unlikely(__pyx_t_2)) {
+
+ /* "yarl/_quoting_c.pyx":247
+ * _is_lower_hex(val[idx - 1]))
+ * if _write_pct(writer, ch, changed) < 0:
+ * raise # <<<<<<<<<<<<<<
+ * continue
+ * else:
+ */
+ __Pyx_ReraiseException(); __PYX_ERR(0, 247, __pyx_L1_error)
+
+ /* "yarl/_quoting_c.pyx":246
+ * changed = (_is_lower_hex(val[idx - 2]) or
+ * _is_lower_hex(val[idx - 1]))
+ * if _write_pct(writer, ch, changed) < 0: # <<<<<<<<<<<<<<
+ * raise
+ * continue
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":248
+ * if _write_pct(writer, ch, changed) < 0:
+ * raise
+ * continue # <<<<<<<<<<<<<<
+ * else:
+ * ch = '%'
+ */
+ goto __pyx_L3_continue;
+
+ /* "yarl/_quoting_c.pyx":231
+ * if ch == '%' and self._requote and idx <= length - 2:
+ * ch = _restore_ch(val[idx], val[idx + 1])
+ * if ch != <Py_UCS4>-1: # <<<<<<<<<<<<<<
+ * idx += 2
+ * if ch < 128:
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":250
+ * continue
+ * else:
+ * ch = '%' # <<<<<<<<<<<<<<
+ *
+ * if self._write(writer, ch) < 0:
+ */
+ /*else*/ {
+ __pyx_v_ch = 37;
+ }
+
+ /* "yarl/_quoting_c.pyx":229
+ * ch = val[idx]
+ * idx += 1
+ * if ch == '%' and self._requote and idx <= length - 2: # <<<<<<<<<<<<<<
+ * ch = _restore_ch(val[idx], val[idx + 1])
+ * if ch != <Py_UCS4>-1:
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":252
+ * ch = '%'
+ *
+ * if self._write(writer, ch) < 0: # <<<<<<<<<<<<<<
+ * raise
+ *
+ */
+ __pyx_t_2 = ((__pyx_f_4yarl_10_quoting_c_7_Quoter__write(__pyx_v_self, __pyx_v_writer, __pyx_v_ch) < 0) != 0);
+ if (unlikely(__pyx_t_2)) {
+
+ /* "yarl/_quoting_c.pyx":253
+ *
+ * if self._write(writer, ch) < 0:
+ * raise # <<<<<<<<<<<<<<
+ *
+ * if not writer.changed:
+ */
+ __Pyx_ReraiseException(); __PYX_ERR(0, 253, __pyx_L1_error)
+
+ /* "yarl/_quoting_c.pyx":252
+ * ch = '%'
+ *
+ * if self._write(writer, ch) < 0: # <<<<<<<<<<<<<<
+ * raise
+ *
+ */
+ }
+ __pyx_L3_continue:;
+ }
+
+ /* "yarl/_quoting_c.pyx":255
+ * raise
+ *
+ * if not writer.changed: # <<<<<<<<<<<<<<
+ * return val
+ * else:
+ */
+ __pyx_t_2 = ((!(__pyx_v_writer->changed != 0)) != 0);
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":256
+ *
+ * if not writer.changed:
+ * return val # <<<<<<<<<<<<<<
+ * else:
+ * return PyUnicode_DecodeASCII(writer.buf, writer.pos, "strict")
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_val);
+ __pyx_r = __pyx_v_val;
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":255
+ * raise
+ *
+ * if not writer.changed: # <<<<<<<<<<<<<<
+ * return val
+ * else:
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":258
+ * return val
+ * else:
+ * return PyUnicode_DecodeASCII(writer.buf, writer.pos, "strict") # <<<<<<<<<<<<<<
+ *
+ * cdef inline int _write(self, Writer *writer, Py_UCS4 ch):
+ */
+ /*else*/ {
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_9 = PyUnicode_DecodeASCII(__pyx_v_writer->buf, __pyx_v_writer->pos, ((char *)"strict")); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 258, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_9);
+ __pyx_r = ((PyObject*)__pyx_t_9);
+ __pyx_t_9 = 0;
+ goto __pyx_L0;
+ }
+
+ /* "yarl/_quoting_c.pyx":220
+ * _release_writer(&writer)
+ *
+ * cdef str _do_quote(self, str val, Writer *writer): # <<<<<<<<<<<<<<
+ * cdef Py_UCS4 ch
+ * cdef int changed
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_9);
+ __Pyx_AddTraceback("yarl._quoting_c._Quoter._do_quote", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "yarl/_quoting_c.pyx":260
+ * return PyUnicode_DecodeASCII(writer.buf, writer.pos, "strict")
+ *
+ * cdef inline int _write(self, Writer *writer, Py_UCS4 ch): # <<<<<<<<<<<<<<
+ * if self._qs:
+ * if ch == ' ':
+ */
+
+static CYTHON_INLINE int __pyx_f_4yarl_10_quoting_c_7_Quoter__write(struct __pyx_obj_4yarl_10_quoting_c__Quoter *__pyx_v_self, struct __pyx_t_4yarl_10_quoting_c_Writer *__pyx_v_writer, Py_UCS4 __pyx_v_ch) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_t_2;
+ __Pyx_RefNannySetupContext("_write", 0);
+
+ /* "yarl/_quoting_c.pyx":261
+ *
+ * cdef inline int _write(self, Writer *writer, Py_UCS4 ch):
+ * if self._qs: # <<<<<<<<<<<<<<
+ * if ch == ' ':
+ * return _write_char(writer, '+', True)
+ */
+ __pyx_t_1 = (__pyx_v_self->_qs != 0);
+ if (__pyx_t_1) {
+
+ /* "yarl/_quoting_c.pyx":262
+ * cdef inline int _write(self, Writer *writer, Py_UCS4 ch):
+ * if self._qs:
+ * if ch == ' ': # <<<<<<<<<<<<<<
+ * return _write_char(writer, '+', True)
+ *
+ */
+ __pyx_t_1 = ((__pyx_v_ch == 32) != 0);
+ if (__pyx_t_1) {
+
+ /* "yarl/_quoting_c.pyx":263
+ * if self._qs:
+ * if ch == ' ':
+ * return _write_char(writer, '+', True) # <<<<<<<<<<<<<<
+ *
+ * if ch < 128 and bit_at(self._safe_table, ch):
+ */
+ __pyx_r = __pyx_f_4yarl_10_quoting_c__write_char(__pyx_v_writer, 43, 1);
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":262
+ * cdef inline int _write(self, Writer *writer, Py_UCS4 ch):
+ * if self._qs:
+ * if ch == ' ': # <<<<<<<<<<<<<<
+ * return _write_char(writer, '+', True)
+ *
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":261
+ *
+ * cdef inline int _write(self, Writer *writer, Py_UCS4 ch):
+ * if self._qs: # <<<<<<<<<<<<<<
+ * if ch == ' ':
+ * return _write_char(writer, '+', True)
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":265
+ * return _write_char(writer, '+', True)
+ *
+ * if ch < 128 and bit_at(self._safe_table, ch): # <<<<<<<<<<<<<<
+ * return _write_char(writer, ch, False)
+ *
+ */
+ __pyx_t_2 = ((__pyx_v_ch < 0x80) != 0);
+ if (__pyx_t_2) {
+ } else {
+ __pyx_t_1 = __pyx_t_2;
+ goto __pyx_L6_bool_binop_done;
+ }
+ __pyx_t_2 = (__pyx_f_4yarl_10_quoting_c_bit_at(__pyx_v_self->_safe_table, __pyx_v_ch) != 0);
+ __pyx_t_1 = __pyx_t_2;
+ __pyx_L6_bool_binop_done:;
+ if (__pyx_t_1) {
+
+ /* "yarl/_quoting_c.pyx":266
+ *
+ * if ch < 128 and bit_at(self._safe_table, ch):
+ * return _write_char(writer, ch, False) # <<<<<<<<<<<<<<
+ *
+ * return _write_utf8(writer, ch)
+ */
+ __pyx_r = __pyx_f_4yarl_10_quoting_c__write_char(__pyx_v_writer, __pyx_v_ch, 0);
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":265
+ * return _write_char(writer, '+', True)
+ *
+ * if ch < 128 and bit_at(self._safe_table, ch): # <<<<<<<<<<<<<<
+ * return _write_char(writer, ch, False)
+ *
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":268
+ * return _write_char(writer, ch, False)
+ *
+ * return _write_utf8(writer, ch) # <<<<<<<<<<<<<<
+ *
+ *
+ */
+ __pyx_r = __pyx_f_4yarl_10_quoting_c__write_utf8(__pyx_v_writer, __pyx_v_ch);
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":260
+ * return PyUnicode_DecodeASCII(writer.buf, writer.pos, "strict")
+ *
+ * cdef inline int _write(self, Writer *writer, Py_UCS4 ch): # <<<<<<<<<<<<<<
+ * if self._qs:
+ * if ch == ' ':
+ */
+
+ /* function exit code */
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":1
+ * def __reduce_cython__(self): # <<<<<<<<<<<<<<
+ * cdef tuple state
+ * cdef object _dict
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_4yarl_10_quoting_c_7_Quoter_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pw_4yarl_10_quoting_c_7_Quoter_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_4yarl_10_quoting_c_7_Quoter_4__reduce_cython__(((struct __pyx_obj_4yarl_10_quoting_c__Quoter *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_4yarl_10_quoting_c_7_Quoter_4__reduce_cython__(struct __pyx_obj_4yarl_10_quoting_c__Quoter *__pyx_v_self) {
+ PyObject *__pyx_v_state = 0;
+ PyObject *__pyx_v__dict = 0;
+ int __pyx_v_use_setstate;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ int __pyx_t_6;
+ int __pyx_t_7;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__reduce_cython__", 0);
+
+ /* "(tree fragment)":5
+ * cdef object _dict
+ * cdef bint use_setstate
+ * state = (self._protected_table, self._qs, self._requote, self._safe_table) # <<<<<<<<<<<<<<
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None:
+ */
+ __pyx_t_1 = __Pyx_PyObject_FromCString(__pyx_v_self->_protected_table); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->_qs); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_3 = __Pyx_PyBool_FromLong(__pyx_v_self->_requote); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_4 = __Pyx_PyObject_FromCString(__pyx_v_self->_safe_table); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4);
+ __pyx_t_1 = 0;
+ __pyx_t_2 = 0;
+ __pyx_t_3 = 0;
+ __pyx_t_4 = 0;
+ __pyx_v_state = ((PyObject*)__pyx_t_5);
+ __pyx_t_5 = 0;
+
+ /* "(tree fragment)":6
+ * cdef bint use_setstate
+ * state = (self._protected_table, self._qs, self._requote, self._safe_table)
+ * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
+ * if _dict is not None:
+ * state += (_dict,)
+ */
+ __pyx_t_5 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __pyx_v__dict = __pyx_t_5;
+ __pyx_t_5 = 0;
+
+ /* "(tree fragment)":7
+ * state = (self._protected_table, self._qs, self._requote, self._safe_table)
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None: # <<<<<<<<<<<<<<
+ * state += (_dict,)
+ * use_setstate = True
+ */
+ __pyx_t_6 = (__pyx_v__dict != Py_None);
+ __pyx_t_7 = (__pyx_t_6 != 0);
+ if (__pyx_t_7) {
+
+ /* "(tree fragment)":8
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None:
+ * state += (_dict,) # <<<<<<<<<<<<<<
+ * use_setstate = True
+ * else:
+ */
+ __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 8, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(__pyx_v__dict);
+ __Pyx_GIVEREF(__pyx_v__dict);
+ PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v__dict);
+ __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 8, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
+ __pyx_t_4 = 0;
+
+ /* "(tree fragment)":9
+ * if _dict is not None:
+ * state += (_dict,)
+ * use_setstate = True # <<<<<<<<<<<<<<
+ * else:
+ * use_setstate = False
+ */
+ __pyx_v_use_setstate = 1;
+
+ /* "(tree fragment)":7
+ * state = (self._protected_table, self._qs, self._requote, self._safe_table)
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None: # <<<<<<<<<<<<<<
+ * state += (_dict,)
+ * use_setstate = True
+ */
+ goto __pyx_L3;
+ }
+
+ /* "(tree fragment)":11
+ * use_setstate = True
+ * else:
+ * use_setstate = False # <<<<<<<<<<<<<<
+ * if use_setstate:
+ * return __pyx_unpickle__Quoter, (type(self), 0xe91bd35, None), state
+ */
+ /*else*/ {
+ __pyx_v_use_setstate = 0;
+ }
+ __pyx_L3:;
+
+ /* "(tree fragment)":12
+ * else:
+ * use_setstate = False
+ * if use_setstate: # <<<<<<<<<<<<<<
+ * return __pyx_unpickle__Quoter, (type(self), 0xe91bd35, None), state
+ * else:
+ */
+ __pyx_t_7 = (__pyx_v_use_setstate != 0);
+ if (__pyx_t_7) {
+
+ /* "(tree fragment)":13
+ * use_setstate = False
+ * if use_setstate:
+ * return __pyx_unpickle__Quoter, (type(self), 0xe91bd35, None), state # <<<<<<<<<<<<<<
+ * else:
+ * return __pyx_unpickle__Quoter, (type(self), 0xe91bd35, state)
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle__Quoter); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_INCREF(__pyx_int_244432181);
+ __Pyx_GIVEREF(__pyx_int_244432181);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_244432181);
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_5, 2, Py_None);
+ __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_4);
+ PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5);
+ __Pyx_INCREF(__pyx_v_state);
+ __Pyx_GIVEREF(__pyx_v_state);
+ PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_v_state);
+ __pyx_t_4 = 0;
+ __pyx_t_5 = 0;
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+
+ /* "(tree fragment)":12
+ * else:
+ * use_setstate = False
+ * if use_setstate: # <<<<<<<<<<<<<<
+ * return __pyx_unpickle__Quoter, (type(self), 0xe91bd35, None), state
+ * else:
+ */
+ }
+
+ /* "(tree fragment)":15
+ * return __pyx_unpickle__Quoter, (type(self), 0xe91bd35, None), state
+ * else:
+ * return __pyx_unpickle__Quoter, (type(self), 0xe91bd35, state) # <<<<<<<<<<<<<<
+ * def __setstate_cython__(self, __pyx_state):
+ * __pyx_unpickle__Quoter__set_state(self, __pyx_state)
+ */
+ /*else*/ {
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_pyx_unpickle__Quoter); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_5);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ PyTuple_SET_ITEM(__pyx_t_5, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_INCREF(__pyx_int_244432181);
+ __Pyx_GIVEREF(__pyx_int_244432181);
+ PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_int_244432181);
+ __Pyx_INCREF(__pyx_v_state);
+ __Pyx_GIVEREF(__pyx_v_state);
+ PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state);
+ __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_GIVEREF(__pyx_t_3);
+ PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3);
+ __Pyx_GIVEREF(__pyx_t_5);
+ PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_5);
+ __pyx_t_3 = 0;
+ __pyx_t_5 = 0;
+ __pyx_r = __pyx_t_4;
+ __pyx_t_4 = 0;
+ goto __pyx_L0;
+ }
+
+ /* "(tree fragment)":1
+ * def __reduce_cython__(self): # <<<<<<<<<<<<<<
+ * cdef tuple state
+ * cdef object _dict
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_AddTraceback("yarl._quoting_c._Quoter.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_state);
+ __Pyx_XDECREF(__pyx_v__dict);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":16
+ * else:
+ * return __pyx_unpickle__Quoter, (type(self), 0xe91bd35, state)
+ * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_unpickle__Quoter__set_state(self, __pyx_state)
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_4yarl_10_quoting_c_7_Quoter_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
+static PyObject *__pyx_pw_4yarl_10_quoting_c_7_Quoter_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_4yarl_10_quoting_c_7_Quoter_6__setstate_cython__(((struct __pyx_obj_4yarl_10_quoting_c__Quoter *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_4yarl_10_quoting_c_7_Quoter_6__setstate_cython__(struct __pyx_obj_4yarl_10_quoting_c__Quoter *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__setstate_cython__", 0);
+
+ /* "(tree fragment)":17
+ * return __pyx_unpickle__Quoter, (type(self), 0xe91bd35, state)
+ * def __setstate_cython__(self, __pyx_state):
+ * __pyx_unpickle__Quoter__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
+ */
+ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
+ __pyx_t_1 = __pyx_f_4yarl_10_quoting_c___pyx_unpickle__Quoter__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "(tree fragment)":16
+ * else:
+ * return __pyx_unpickle__Quoter, (type(self), 0xe91bd35, state)
+ * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_unpickle__Quoter__set_state(self, __pyx_state)
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("yarl._quoting_c._Quoter.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "yarl/_quoting_c.pyx":277
+ * cdef _Quoter _qs_quoter
+ *
+ * def __init__(self, *, unsafe='', qs=False): # <<<<<<<<<<<<<<
+ * self._unsafe = unsafe
+ * self._qs = qs
+ */
+
+/* Python wrapper */
+static int __pyx_pw_4yarl_10_quoting_c_9_Unquoter_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static int __pyx_pw_4yarl_10_quoting_c_9_Unquoter_1__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_unsafe = 0;
+ PyObject *__pyx_v_qs = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_unsafe,&__pyx_n_s_qs,0};
+ PyObject* values[2] = {0,0};
+ values[0] = ((PyObject *)__pyx_kp_u_);
+ values[1] = ((PyObject *)Py_False);
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ if (kw_args > 0 && likely(kw_args <= 2)) {
+ Py_ssize_t index;
+ for (index = 0; index < 2 && kw_args > 0; index++) {
+ PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, *__pyx_pyargnames[index]);
+ if (value) { values[index] = value; kw_args--; }
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, 0, "__init__") < 0)) __PYX_ERR(0, 277, __pyx_L3_error)
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 0) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ }
+ __pyx_v_unsafe = values[0];
+ __pyx_v_qs = values[1];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__init__", 1, 0, 0, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 277, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("yarl._quoting_c._Unquoter.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return -1;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_r = __pyx_pf_4yarl_10_quoting_c_9_Unquoter___init__(((struct __pyx_obj_4yarl_10_quoting_c__Unquoter *)__pyx_v_self), __pyx_v_unsafe, __pyx_v_qs);
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static int __pyx_pf_4yarl_10_quoting_c_9_Unquoter___init__(struct __pyx_obj_4yarl_10_quoting_c__Unquoter *__pyx_v_self, PyObject *__pyx_v_unsafe, PyObject *__pyx_v_qs) {
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__init__", 0);
+
+ /* "yarl/_quoting_c.pyx":278
+ *
+ * def __init__(self, *, unsafe='', qs=False):
+ * self._unsafe = unsafe # <<<<<<<<<<<<<<
+ * self._qs = qs
+ * self._quoter = _Quoter()
+ */
+ if (!(likely(PyUnicode_CheckExact(__pyx_v_unsafe))||((__pyx_v_unsafe) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "unicode", Py_TYPE(__pyx_v_unsafe)->tp_name), 0))) __PYX_ERR(0, 278, __pyx_L1_error)
+ __pyx_t_1 = __pyx_v_unsafe;
+ __Pyx_INCREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v_self->_unsafe);
+ __Pyx_DECREF(__pyx_v_self->_unsafe);
+ __pyx_v_self->_unsafe = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "yarl/_quoting_c.pyx":279
+ * def __init__(self, *, unsafe='', qs=False):
+ * self._unsafe = unsafe
+ * self._qs = qs # <<<<<<<<<<<<<<
+ * self._quoter = _Quoter()
+ * self._qs_quoter = _Quoter(qs=True)
+ */
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_qs); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 279, __pyx_L1_error)
+ __pyx_v_self->_qs = __pyx_t_2;
+
+ /* "yarl/_quoting_c.pyx":280
+ * self._unsafe = unsafe
+ * self._qs = qs
+ * self._quoter = _Quoter() # <<<<<<<<<<<<<<
+ * self._qs_quoter = _Quoter(qs=True)
+ *
+ */
+ __pyx_t_1 = __Pyx_PyObject_CallNoArg(((PyObject *)__pyx_ptype_4yarl_10_quoting_c__Quoter)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 280, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v_self->_quoter);
+ __Pyx_DECREF(((PyObject *)__pyx_v_self->_quoter));
+ __pyx_v_self->_quoter = ((struct __pyx_obj_4yarl_10_quoting_c__Quoter *)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "yarl/_quoting_c.pyx":281
+ * self._qs = qs
+ * self._quoter = _Quoter()
+ * self._qs_quoter = _Quoter(qs=True) # <<<<<<<<<<<<<<
+ *
+ * def __call__(self, val):
+ */
+ __pyx_t_1 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 281, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_t_1, __pyx_n_s_qs, Py_True) < 0) __PYX_ERR(0, 281, __pyx_L1_error)
+ __pyx_t_3 = __Pyx_PyObject_Call(((PyObject *)__pyx_ptype_4yarl_10_quoting_c__Quoter), __pyx_empty_tuple, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 281, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_GIVEREF(__pyx_t_3);
+ __Pyx_GOTREF(__pyx_v_self->_qs_quoter);
+ __Pyx_DECREF(((PyObject *)__pyx_v_self->_qs_quoter));
+ __pyx_v_self->_qs_quoter = ((struct __pyx_obj_4yarl_10_quoting_c__Quoter *)__pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "yarl/_quoting_c.pyx":277
+ * cdef _Quoter _qs_quoter
+ *
+ * def __init__(self, *, unsafe='', qs=False): # <<<<<<<<<<<<<<
+ * self._unsafe = unsafe
+ * self._qs = qs
+ */
+
+ /* function exit code */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("yarl._quoting_c._Unquoter.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "yarl/_quoting_c.pyx":283
+ * self._qs_quoter = _Quoter(qs=True)
+ *
+ * def __call__(self, val): # <<<<<<<<<<<<<<
+ * if val is None:
+ * return None
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_4yarl_10_quoting_c_9_Unquoter_3__call__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyObject *__pyx_pw_4yarl_10_quoting_c_9_Unquoter_3__call__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v_val = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__call__ (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_val,0};
+ PyObject* values[1] = {0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_val)) != 0)) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__call__") < 0)) __PYX_ERR(0, 283, __pyx_L3_error)
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ }
+ __pyx_v_val = values[0];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__call__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 283, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("yarl._quoting_c._Unquoter.__call__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_r = __pyx_pf_4yarl_10_quoting_c_9_Unquoter_2__call__(((struct __pyx_obj_4yarl_10_quoting_c__Unquoter *)__pyx_v_self), __pyx_v_val);
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_4yarl_10_quoting_c_9_Unquoter_2__call__(struct __pyx_obj_4yarl_10_quoting_c__Unquoter *__pyx_v_self, PyObject *__pyx_v_val) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__call__", 0);
+ __Pyx_INCREF(__pyx_v_val);
+
+ /* "yarl/_quoting_c.pyx":284
+ *
+ * def __call__(self, val):
+ * if val is None: # <<<<<<<<<<<<<<
+ * return None
+ * if type(val) is not str:
+ */
+ __pyx_t_1 = (__pyx_v_val == Py_None);
+ __pyx_t_2 = (__pyx_t_1 != 0);
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":285
+ * def __call__(self, val):
+ * if val is None:
+ * return None # <<<<<<<<<<<<<<
+ * if type(val) is not str:
+ * if isinstance(val, str):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":284
+ *
+ * def __call__(self, val):
+ * if val is None: # <<<<<<<<<<<<<<
+ * return None
+ * if type(val) is not str:
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":286
+ * if val is None:
+ * return None
+ * if type(val) is not str: # <<<<<<<<<<<<<<
+ * if isinstance(val, str):
+ * # derived from str
+ */
+ __pyx_t_2 = (((PyObject *)Py_TYPE(__pyx_v_val)) != ((PyObject *)(&PyUnicode_Type)));
+ __pyx_t_1 = (__pyx_t_2 != 0);
+ if (__pyx_t_1) {
+
+ /* "yarl/_quoting_c.pyx":287
+ * return None
+ * if type(val) is not str:
+ * if isinstance(val, str): # <<<<<<<<<<<<<<
+ * # derived from str
+ * val = str(val)
+ */
+ __pyx_t_1 = PyUnicode_Check(__pyx_v_val);
+ __pyx_t_2 = (__pyx_t_1 != 0);
+ if (likely(__pyx_t_2)) {
+
+ /* "yarl/_quoting_c.pyx":289
+ * if isinstance(val, str):
+ * # derived from str
+ * val = str(val) # <<<<<<<<<<<<<<
+ * else:
+ * raise TypeError("Argument should be str")
+ */
+ __pyx_t_3 = __Pyx_PyObject_CallOneArg(((PyObject *)(&PyUnicode_Type)), __pyx_v_val); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 289, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF_SET(__pyx_v_val, __pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "yarl/_quoting_c.pyx":287
+ * return None
+ * if type(val) is not str:
+ * if isinstance(val, str): # <<<<<<<<<<<<<<
+ * # derived from str
+ * val = str(val)
+ */
+ goto __pyx_L5;
+ }
+
+ /* "yarl/_quoting_c.pyx":291
+ * val = str(val)
+ * else:
+ * raise TypeError("Argument should be str") # <<<<<<<<<<<<<<
+ * return self._do_unquote(<str>val)
+ *
+ */
+ /*else*/ {
+ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 291, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __PYX_ERR(0, 291, __pyx_L1_error)
+ }
+ __pyx_L5:;
+
+ /* "yarl/_quoting_c.pyx":286
+ * if val is None:
+ * return None
+ * if type(val) is not str: # <<<<<<<<<<<<<<
+ * if isinstance(val, str):
+ * # derived from str
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":292
+ * else:
+ * raise TypeError("Argument should be str")
+ * return self._do_unquote(<str>val) # <<<<<<<<<<<<<<
+ *
+ * cdef str _do_unquote(self, str val):
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_3 = ((struct __pyx_vtabstruct_4yarl_10_quoting_c__Unquoter *)__pyx_v_self->__pyx_vtab)->_do_unquote(__pyx_v_self, ((PyObject*)__pyx_v_val)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 292, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_r = __pyx_t_3;
+ __pyx_t_3 = 0;
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":283
+ * self._qs_quoter = _Quoter(qs=True)
+ *
+ * def __call__(self, val): # <<<<<<<<<<<<<<
+ * if val is None:
+ * return None
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_AddTraceback("yarl._quoting_c._Unquoter.__call__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_val);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "yarl/_quoting_c.pyx":294
+ * return self._do_unquote(<str>val)
+ *
+ * cdef str _do_unquote(self, str val): # <<<<<<<<<<<<<<
+ * if len(val) == 0:
+ * return val
+ */
+
+static PyObject *__pyx_f_4yarl_10_quoting_c_9_Unquoter__do_unquote(struct __pyx_obj_4yarl_10_quoting_c__Unquoter *__pyx_v_self, PyObject *__pyx_v_val) {
+ PyObject *__pyx_v_ret = 0;
+ char __pyx_v_buffer[4];
+ Py_ssize_t __pyx_v_buflen;
+ Py_ssize_t __pyx_v_consumed;
+ PyObject *__pyx_v_unquoted = 0;
+ Py_UCS4 __pyx_v_ch;
+ Py_ssize_t __pyx_v_idx;
+ Py_ssize_t __pyx_v_length;
+ Py_ssize_t __pyx_v_start_pct;
+ PyObject *__pyx_v_h = NULL;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ Py_ssize_t __pyx_t_1;
+ int __pyx_t_2;
+ PyObject *__pyx_t_3 = NULL;
+ Py_UCS4 __pyx_t_4;
+ int __pyx_t_5;
+ Py_UCS4 __pyx_t_6;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *__pyx_t_8 = NULL;
+ PyObject *__pyx_t_9 = NULL;
+ int __pyx_t_10;
+ PyObject *__pyx_t_11 = NULL;
+ PyObject *__pyx_t_12 = NULL;
+ PyObject *__pyx_t_13 = NULL;
+ int __pyx_t_14;
+ PyObject *__pyx_t_15 = NULL;
+ PyObject *__pyx_t_16 = NULL;
+ PyObject *__pyx_t_17 = NULL;
+ PyObject *__pyx_t_18 = NULL;
+ PyObject *__pyx_t_19 = NULL;
+ PyObject *__pyx_t_20 = NULL;
+ int __pyx_t_21;
+ PyObject *(*__pyx_t_22)(PyObject *);
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("_do_unquote", 0);
+
+ /* "yarl/_quoting_c.pyx":295
+ *
+ * cdef str _do_unquote(self, str val):
+ * if len(val) == 0: # <<<<<<<<<<<<<<
+ * return val
+ * cdef list ret = []
+ */
+ if (unlikely(__pyx_v_val == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
+ __PYX_ERR(0, 295, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_PyUnicode_GET_LENGTH(__pyx_v_val); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 295, __pyx_L1_error)
+ __pyx_t_2 = ((__pyx_t_1 == 0) != 0);
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":296
+ * cdef str _do_unquote(self, str val):
+ * if len(val) == 0:
+ * return val # <<<<<<<<<<<<<<
+ * cdef list ret = []
+ * cdef char buffer[4]
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v_val);
+ __pyx_r = __pyx_v_val;
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":295
+ *
+ * cdef str _do_unquote(self, str val):
+ * if len(val) == 0: # <<<<<<<<<<<<<<
+ * return val
+ * cdef list ret = []
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":297
+ * if len(val) == 0:
+ * return val
+ * cdef list ret = [] # <<<<<<<<<<<<<<
+ * cdef char buffer[4]
+ * cdef Py_ssize_t buflen = 0
+ */
+ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 297, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_v_ret = ((PyObject*)__pyx_t_3);
+ __pyx_t_3 = 0;
+
+ /* "yarl/_quoting_c.pyx":299
+ * cdef list ret = []
+ * cdef char buffer[4]
+ * cdef Py_ssize_t buflen = 0 # <<<<<<<<<<<<<<
+ * cdef Py_ssize_t consumed
+ * cdef str unquoted
+ */
+ __pyx_v_buflen = 0;
+
+ /* "yarl/_quoting_c.pyx":302
+ * cdef Py_ssize_t consumed
+ * cdef str unquoted
+ * cdef Py_UCS4 ch = 0 # <<<<<<<<<<<<<<
+ * cdef Py_ssize_t idx = 0
+ * cdef Py_ssize_t length = len(val)
+ */
+ __pyx_v_ch = 0;
+
+ /* "yarl/_quoting_c.pyx":303
+ * cdef str unquoted
+ * cdef Py_UCS4 ch = 0
+ * cdef Py_ssize_t idx = 0 # <<<<<<<<<<<<<<
+ * cdef Py_ssize_t length = len(val)
+ * cdef Py_ssize_t start_pct
+ */
+ __pyx_v_idx = 0;
+
+ /* "yarl/_quoting_c.pyx":304
+ * cdef Py_UCS4 ch = 0
+ * cdef Py_ssize_t idx = 0
+ * cdef Py_ssize_t length = len(val) # <<<<<<<<<<<<<<
+ * cdef Py_ssize_t start_pct
+ *
+ */
+ if (unlikely(__pyx_v_val == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
+ __PYX_ERR(0, 304, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_PyUnicode_GET_LENGTH(__pyx_v_val); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 304, __pyx_L1_error)
+ __pyx_v_length = __pyx_t_1;
+
+ /* "yarl/_quoting_c.pyx":307
+ * cdef Py_ssize_t start_pct
+ *
+ * while idx < length: # <<<<<<<<<<<<<<
+ * ch = val[idx]
+ * idx += 1
+ */
+ while (1) {
+ __pyx_t_2 = ((__pyx_v_idx < __pyx_v_length) != 0);
+ if (!__pyx_t_2) break;
+
+ /* "yarl/_quoting_c.pyx":308
+ *
+ * while idx < length:
+ * ch = val[idx] # <<<<<<<<<<<<<<
+ * idx += 1
+ * if ch == '%' and idx <= length - 2:
+ */
+ __pyx_t_4 = __Pyx_GetItemInt_Unicode(__pyx_v_val, __pyx_v_idx, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 1, 1); if (unlikely(__pyx_t_4 == (Py_UCS4)-1)) __PYX_ERR(0, 308, __pyx_L1_error)
+ __pyx_v_ch = __pyx_t_4;
+
+ /* "yarl/_quoting_c.pyx":309
+ * while idx < length:
+ * ch = val[idx]
+ * idx += 1 # <<<<<<<<<<<<<<
+ * if ch == '%' and idx <= length - 2:
+ * ch = _restore_ch(val[idx], val[idx + 1])
+ */
+ __pyx_v_idx = (__pyx_v_idx + 1);
+
+ /* "yarl/_quoting_c.pyx":310
+ * ch = val[idx]
+ * idx += 1
+ * if ch == '%' and idx <= length - 2: # <<<<<<<<<<<<<<
+ * ch = _restore_ch(val[idx], val[idx + 1])
+ * if ch != <Py_UCS4>-1:
+ */
+ __pyx_t_5 = ((__pyx_v_ch == 37) != 0);
+ if (__pyx_t_5) {
+ } else {
+ __pyx_t_2 = __pyx_t_5;
+ goto __pyx_L7_bool_binop_done;
+ }
+ __pyx_t_5 = ((__pyx_v_idx <= (__pyx_v_length - 2)) != 0);
+ __pyx_t_2 = __pyx_t_5;
+ __pyx_L7_bool_binop_done:;
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":311
+ * idx += 1
+ * if ch == '%' and idx <= length - 2:
+ * ch = _restore_ch(val[idx], val[idx + 1]) # <<<<<<<<<<<<<<
+ * if ch != <Py_UCS4>-1:
+ * idx += 2
+ */
+ __pyx_t_4 = __Pyx_GetItemInt_Unicode(__pyx_v_val, __pyx_v_idx, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 1, 1); if (unlikely(__pyx_t_4 == (Py_UCS4)-1)) __PYX_ERR(0, 311, __pyx_L1_error)
+ __pyx_t_1 = (__pyx_v_idx + 1);
+ __pyx_t_6 = __Pyx_GetItemInt_Unicode(__pyx_v_val, __pyx_t_1, Py_ssize_t, 1, PyInt_FromSsize_t, 0, 1, 1); if (unlikely(__pyx_t_6 == (Py_UCS4)-1)) __PYX_ERR(0, 311, __pyx_L1_error)
+ __pyx_v_ch = __pyx_f_4yarl_10_quoting_c__restore_ch(__pyx_t_4, __pyx_t_6);
+
+ /* "yarl/_quoting_c.pyx":312
+ * if ch == '%' and idx <= length - 2:
+ * ch = _restore_ch(val[idx], val[idx + 1])
+ * if ch != <Py_UCS4>-1: # <<<<<<<<<<<<<<
+ * idx += 2
+ * assert buflen < 4
+ */
+ __pyx_t_2 = ((__pyx_v_ch != ((Py_UCS4)-1L)) != 0);
+ if (__pyx_t_2) {
+
+ /* "yarl/_quoting_c.pyx":313
+ * ch = _restore_ch(val[idx], val[idx + 1])
+ * if ch != <Py_UCS4>-1:
+ * idx += 2 # <<<<<<<<<<<<<<
+ * assert buflen < 4
+ * buffer[buflen] = ch
+ */
+ __pyx_v_idx = (__pyx_v_idx + 2);
+
+ /* "yarl/_quoting_c.pyx":314
+ * if ch != <Py_UCS4>-1:
+ * idx += 2
+ * assert buflen < 4 # <<<<<<<<<<<<<<
+ * buffer[buflen] = ch
+ * buflen += 1
+ */
+ #ifndef CYTHON_WITHOUT_ASSERTIONS
+ if (unlikely(!Py_OptimizeFlag)) {
+ if (unlikely(!((__pyx_v_buflen < 4) != 0))) {
+ PyErr_SetNone(PyExc_AssertionError);
+ __PYX_ERR(0, 314, __pyx_L1_error)
+ }
+ }
+ #endif
+
+ /* "yarl/_quoting_c.pyx":315
+ * idx += 2
+ * assert buflen < 4
+ * buffer[buflen] = ch # <<<<<<<<<<<<<<
+ * buflen += 1
+ * try:
+ */
+ (__pyx_v_buffer[__pyx_v_buflen]) = __pyx_v_ch;
+
+ /* "yarl/_quoting_c.pyx":316
+ * assert buflen < 4
+ * buffer[buflen] = ch
+ * buflen += 1 # <<<<<<<<<<<<<<
+ * try:
+ * unquoted = PyUnicode_DecodeUTF8Stateful(buffer, buflen,
+ */
+ __pyx_v_buflen = (__pyx_v_buflen + 1);
+
+ /* "yarl/_quoting_c.pyx":317
+ * buffer[buflen] = ch
+ * buflen += 1
+ * try: # <<<<<<<<<<<<<<
+ * unquoted = PyUnicode_DecodeUTF8Stateful(buffer, buflen,
+ * NULL, &consumed)
+ */
+ {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ExceptionSave(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9);
+ __Pyx_XGOTREF(__pyx_t_7);
+ __Pyx_XGOTREF(__pyx_t_8);
+ __Pyx_XGOTREF(__pyx_t_9);
+ /*try:*/ {
+
+ /* "yarl/_quoting_c.pyx":318
+ * buflen += 1
+ * try:
+ * unquoted = PyUnicode_DecodeUTF8Stateful(buffer, buflen, # <<<<<<<<<<<<<<
+ * NULL, &consumed)
+ * except UnicodeDecodeError:
+ */
+ __pyx_t_3 = PyUnicode_DecodeUTF8Stateful(__pyx_v_buffer, __pyx_v_buflen, NULL, (&__pyx_v_consumed)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 318, __pyx_L10_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_XDECREF_SET(__pyx_v_unquoted, ((PyObject*)__pyx_t_3));
+ __pyx_t_3 = 0;
+
+ /* "yarl/_quoting_c.pyx":317
+ * buffer[buflen] = ch
+ * buflen += 1
+ * try: # <<<<<<<<<<<<<<
+ * unquoted = PyUnicode_DecodeUTF8Stateful(buffer, buflen,
+ * NULL, &consumed)
+ */
+ }
+ __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
+ goto __pyx_L17_try_end;
+ __pyx_L10_error:;
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "yarl/_quoting_c.pyx":320
+ * unquoted = PyUnicode_DecodeUTF8Stateful(buffer, buflen,
+ * NULL, &consumed)
+ * except UnicodeDecodeError: # <<<<<<<<<<<<<<
+ * start_pct = idx - buflen * 3
+ * buffer[0] = ch
+ */
+ __pyx_t_10 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_UnicodeDecodeError);
+ if (__pyx_t_10) {
+ __Pyx_AddTraceback("yarl._quoting_c._Unquoter._do_unquote", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ if (__Pyx_GetException(&__pyx_t_3, &__pyx_t_11, &__pyx_t_12) < 0) __PYX_ERR(0, 320, __pyx_L12_except_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_GOTREF(__pyx_t_11);
+ __Pyx_GOTREF(__pyx_t_12);
+
+ /* "yarl/_quoting_c.pyx":321
+ * NULL, &consumed)
+ * except UnicodeDecodeError:
+ * start_pct = idx - buflen * 3 # <<<<<<<<<<<<<<
+ * buffer[0] = ch
+ * buflen = 1
+ */
+ __pyx_v_start_pct = (__pyx_v_idx - (__pyx_v_buflen * 3));
+
+ /* "yarl/_quoting_c.pyx":322
+ * except UnicodeDecodeError:
+ * start_pct = idx - buflen * 3
+ * buffer[0] = ch # <<<<<<<<<<<<<<
+ * buflen = 1
+ * ret.append(val[start_pct : idx - 3])
+ */
+ (__pyx_v_buffer[0]) = __pyx_v_ch;
+
+ /* "yarl/_quoting_c.pyx":323
+ * start_pct = idx - buflen * 3
+ * buffer[0] = ch
+ * buflen = 1 # <<<<<<<<<<<<<<
+ * ret.append(val[start_pct : idx - 3])
+ * try:
+ */
+ __pyx_v_buflen = 1;
+
+ /* "yarl/_quoting_c.pyx":324
+ * buffer[0] = ch
+ * buflen = 1
+ * ret.append(val[start_pct : idx - 3]) # <<<<<<<<<<<<<<
+ * try:
+ * unquoted = PyUnicode_DecodeUTF8Stateful(buffer, buflen,
+ */
+ if (unlikely(__pyx_v_val == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(0, 324, __pyx_L12_except_error)
+ }
+ __pyx_t_13 = __Pyx_PyUnicode_Substring(__pyx_v_val, __pyx_v_start_pct, (__pyx_v_idx - 3)); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 324, __pyx_L12_except_error)
+ __Pyx_GOTREF(__pyx_t_13);
+ __pyx_t_14 = __Pyx_PyList_Append(__pyx_v_ret, __pyx_t_13); if (unlikely(__pyx_t_14 == ((int)-1))) __PYX_ERR(0, 324, __pyx_L12_except_error)
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+
+ /* "yarl/_quoting_c.pyx":325
+ * buflen = 1
+ * ret.append(val[start_pct : idx - 3])
+ * try: # <<<<<<<<<<<<<<
+ * unquoted = PyUnicode_DecodeUTF8Stateful(buffer, buflen,
+ * NULL, &consumed)
+ */
+ {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ExceptionSave(&__pyx_t_15, &__pyx_t_16, &__pyx_t_17);
+ __Pyx_XGOTREF(__pyx_t_15);
+ __Pyx_XGOTREF(__pyx_t_16);
+ __Pyx_XGOTREF(__pyx_t_17);
+ /*try:*/ {
+
+ /* "yarl/_quoting_c.pyx":326
+ * ret.append(val[start_pct : idx - 3])
+ * try:
+ * unquoted = PyUnicode_DecodeUTF8Stateful(buffer, buflen, # <<<<<<<<<<<<<<
+ * NULL, &consumed)
+ * except UnicodeDecodeError:
+ */
+ __pyx_t_13 = PyUnicode_DecodeUTF8Stateful(__pyx_v_buffer, __pyx_v_buflen, NULL, (&__pyx_v_consumed)); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 326, __pyx_L20_error)
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_XDECREF_SET(__pyx_v_unquoted, ((PyObject*)__pyx_t_13));
+ __pyx_t_13 = 0;
+
+ /* "yarl/_quoting_c.pyx":325
+ * buflen = 1
+ * ret.append(val[start_pct : idx - 3])
+ * try: # <<<<<<<<<<<<<<
+ * unquoted = PyUnicode_DecodeUTF8Stateful(buffer, buflen,
+ * NULL, &consumed)
+ */
+ }
+ __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0;
+ __Pyx_XDECREF(__pyx_t_16); __pyx_t_16 = 0;
+ __Pyx_XDECREF(__pyx_t_17); __pyx_t_17 = 0;
+ goto __pyx_L27_try_end;
+ __pyx_L20_error:;
+ __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0;
+
+ /* "yarl/_quoting_c.pyx":328
+ * unquoted = PyUnicode_DecodeUTF8Stateful(buffer, buflen,
+ * NULL, &consumed)
+ * except UnicodeDecodeError: # <<<<<<<<<<<<<<
+ * buflen = 0
+ * ret.append(val[idx - 3 : idx])
+ */
+ __pyx_t_10 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_UnicodeDecodeError);
+ if (__pyx_t_10) {
+ __Pyx_AddTraceback("yarl._quoting_c._Unquoter._do_unquote", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ if (__Pyx_GetException(&__pyx_t_13, &__pyx_t_18, &__pyx_t_19) < 0) __PYX_ERR(0, 328, __pyx_L22_except_error)
+ __Pyx_GOTREF(__pyx_t_13);
+ __Pyx_GOTREF(__pyx_t_18);
+ __Pyx_GOTREF(__pyx_t_19);
+
+ /* "yarl/_quoting_c.pyx":329
+ * NULL, &consumed)
+ * except UnicodeDecodeError:
+ * buflen = 0 # <<<<<<<<<<<<<<
+ * ret.append(val[idx - 3 : idx])
+ * continue
+ */
+ __pyx_v_buflen = 0;
+
+ /* "yarl/_quoting_c.pyx":330
+ * except UnicodeDecodeError:
+ * buflen = 0
+ * ret.append(val[idx - 3 : idx]) # <<<<<<<<<<<<<<
+ * continue
+ * if not unquoted:
+ */
+ if (unlikely(__pyx_v_val == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(0, 330, __pyx_L22_except_error)
+ }
+ __pyx_t_20 = __Pyx_PyUnicode_Substring(__pyx_v_val, (__pyx_v_idx - 3), __pyx_v_idx); if (unlikely(!__pyx_t_20)) __PYX_ERR(0, 330, __pyx_L22_except_error)
+ __Pyx_GOTREF(__pyx_t_20);
+ __pyx_t_14 = __Pyx_PyList_Append(__pyx_v_ret, __pyx_t_20); if (unlikely(__pyx_t_14 == ((int)-1))) __PYX_ERR(0, 330, __pyx_L22_except_error)
+ __Pyx_DECREF(__pyx_t_20); __pyx_t_20 = 0;
+
+ /* "yarl/_quoting_c.pyx":331
+ * buflen = 0
+ * ret.append(val[idx - 3 : idx])
+ * continue # <<<<<<<<<<<<<<
+ * if not unquoted:
+ * assert consumed == 0
+ */
+ goto __pyx_L29_except_continue;
+ __pyx_L29_except_continue:;
+ __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0;
+ __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0;
+ __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0;
+ goto __pyx_L26_try_continue;
+ }
+ goto __pyx_L22_except_error;
+ __pyx_L22_except_error:;
+
+ /* "yarl/_quoting_c.pyx":325
+ * buflen = 1
+ * ret.append(val[start_pct : idx - 3])
+ * try: # <<<<<<<<<<<<<<
+ * unquoted = PyUnicode_DecodeUTF8Stateful(buffer, buflen,
+ * NULL, &consumed)
+ */
+ __Pyx_XGIVEREF(__pyx_t_15);
+ __Pyx_XGIVEREF(__pyx_t_16);
+ __Pyx_XGIVEREF(__pyx_t_17);
+ __Pyx_ExceptionReset(__pyx_t_15, __pyx_t_16, __pyx_t_17);
+ goto __pyx_L12_except_error;
+ __pyx_L26_try_continue:;
+ __Pyx_XGIVEREF(__pyx_t_15);
+ __Pyx_XGIVEREF(__pyx_t_16);
+ __Pyx_XGIVEREF(__pyx_t_17);
+ __Pyx_ExceptionReset(__pyx_t_15, __pyx_t_16, __pyx_t_17);
+ goto __pyx_L19_except_continue;
+ __pyx_L27_try_end:;
+ }
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_XDECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __Pyx_XDECREF(__pyx_t_12); __pyx_t_12 = 0;
+ goto __pyx_L11_exception_handled;
+ __pyx_L19_except_continue:;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ goto __pyx_L16_try_continue;
+ }
+ goto __pyx_L12_except_error;
+ __pyx_L12_except_error:;
+
+ /* "yarl/_quoting_c.pyx":317
+ * buffer[buflen] = ch
+ * buflen += 1
+ * try: # <<<<<<<<<<<<<<
+ * unquoted = PyUnicode_DecodeUTF8Stateful(buffer, buflen,
+ * NULL, &consumed)
+ */
+ __Pyx_XGIVEREF(__pyx_t_7);
+ __Pyx_XGIVEREF(__pyx_t_8);
+ __Pyx_XGIVEREF(__pyx_t_9);
+ __Pyx_ExceptionReset(__pyx_t_7, __pyx_t_8, __pyx_t_9);
+ goto __pyx_L1_error;
+ __pyx_L16_try_continue:;
+ __Pyx_XGIVEREF(__pyx_t_7);
+ __Pyx_XGIVEREF(__pyx_t_8);
+ __Pyx_XGIVEREF(__pyx_t_9);
+ __Pyx_ExceptionReset(__pyx_t_7, __pyx_t_8, __pyx_t_9);
+ goto __pyx_L4_continue;
+ __pyx_L11_exception_handled:;
+ __Pyx_XGIVEREF(__pyx_t_7);
+ __Pyx_XGIVEREF(__pyx_t_8);
+ __Pyx_XGIVEREF(__pyx_t_9);
+ __Pyx_ExceptionReset(__pyx_t_7, __pyx_t_8, __pyx_t_9);
+ __pyx_L17_try_end:;
+ }
+
+ /* "yarl/_quoting_c.pyx":332
+ * ret.append(val[idx - 3 : idx])
+ * continue
+ * if not unquoted: # <<<<<<<<<<<<<<
+ * assert consumed == 0
+ * continue
+ */
+ __pyx_t_2 = (__pyx_v_unquoted != Py_None)&&(__Pyx_PyUnicode_IS_TRUE(__pyx_v_unquoted) != 0);
+ __pyx_t_5 = ((!__pyx_t_2) != 0);
+ if (__pyx_t_5) {
+
+ /* "yarl/_quoting_c.pyx":333
+ * continue
+ * if not unquoted:
+ * assert consumed == 0 # <<<<<<<<<<<<<<
+ * continue
+ * assert consumed == buflen
+ */
+ #ifndef CYTHON_WITHOUT_ASSERTIONS
+ if (unlikely(!Py_OptimizeFlag)) {
+ if (unlikely(!((__pyx_v_consumed == 0) != 0))) {
+ PyErr_SetNone(PyExc_AssertionError);
+ __PYX_ERR(0, 333, __pyx_L1_error)
+ }
+ }
+ #endif
+
+ /* "yarl/_quoting_c.pyx":334
+ * if not unquoted:
+ * assert consumed == 0
+ * continue # <<<<<<<<<<<<<<
+ * assert consumed == buflen
+ * buflen = 0
+ */
+ goto __pyx_L4_continue;
+
+ /* "yarl/_quoting_c.pyx":332
+ * ret.append(val[idx - 3 : idx])
+ * continue
+ * if not unquoted: # <<<<<<<<<<<<<<
+ * assert consumed == 0
+ * continue
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":335
+ * assert consumed == 0
+ * continue
+ * assert consumed == buflen # <<<<<<<<<<<<<<
+ * buflen = 0
+ * if self._qs and unquoted in '+=&;':
+ */
+ #ifndef CYTHON_WITHOUT_ASSERTIONS
+ if (unlikely(!Py_OptimizeFlag)) {
+ if (unlikely(!((__pyx_v_consumed == __pyx_v_buflen) != 0))) {
+ PyErr_SetNone(PyExc_AssertionError);
+ __PYX_ERR(0, 335, __pyx_L1_error)
+ }
+ }
+ #endif
+
+ /* "yarl/_quoting_c.pyx":336
+ * continue
+ * assert consumed == buflen
+ * buflen = 0 # <<<<<<<<<<<<<<
+ * if self._qs and unquoted in '+=&;':
+ * ret.append(self._qs_quoter(unquoted))
+ */
+ __pyx_v_buflen = 0;
+
+ /* "yarl/_quoting_c.pyx":337
+ * assert consumed == buflen
+ * buflen = 0
+ * if self._qs and unquoted in '+=&;': # <<<<<<<<<<<<<<
+ * ret.append(self._qs_quoter(unquoted))
+ * elif unquoted in self._unsafe:
+ */
+ __pyx_t_2 = (__pyx_v_self->_qs != 0);
+ if (__pyx_t_2) {
+ } else {
+ __pyx_t_5 = __pyx_t_2;
+ goto __pyx_L32_bool_binop_done;
+ }
+ __pyx_t_2 = (__Pyx_PyUnicode_ContainsTF(__pyx_v_unquoted, __pyx_kp_u__4, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(0, 337, __pyx_L1_error)
+ __pyx_t_21 = (__pyx_t_2 != 0);
+ __pyx_t_5 = __pyx_t_21;
+ __pyx_L32_bool_binop_done:;
+ if (__pyx_t_5) {
+
+ /* "yarl/_quoting_c.pyx":338
+ * buflen = 0
+ * if self._qs and unquoted in '+=&;':
+ * ret.append(self._qs_quoter(unquoted)) # <<<<<<<<<<<<<<
+ * elif unquoted in self._unsafe:
+ * ret.append(self._quoter(unquoted))
+ */
+ __Pyx_INCREF(((PyObject *)__pyx_v_self->_qs_quoter));
+ __pyx_t_11 = ((PyObject *)__pyx_v_self->_qs_quoter); __pyx_t_3 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) {
+ __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_11);
+ if (likely(__pyx_t_3)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11);
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_11, function);
+ }
+ }
+ __pyx_t_12 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_11, __pyx_t_3, __pyx_v_unquoted) : __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_v_unquoted);
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 338, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __pyx_t_14 = __Pyx_PyList_Append(__pyx_v_ret, __pyx_t_12); if (unlikely(__pyx_t_14 == ((int)-1))) __PYX_ERR(0, 338, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+
+ /* "yarl/_quoting_c.pyx":337
+ * assert consumed == buflen
+ * buflen = 0
+ * if self._qs and unquoted in '+=&;': # <<<<<<<<<<<<<<
+ * ret.append(self._qs_quoter(unquoted))
+ * elif unquoted in self._unsafe:
+ */
+ goto __pyx_L31;
+ }
+
+ /* "yarl/_quoting_c.pyx":339
+ * if self._qs and unquoted in '+=&;':
+ * ret.append(self._qs_quoter(unquoted))
+ * elif unquoted in self._unsafe: # <<<<<<<<<<<<<<
+ * ret.append(self._quoter(unquoted))
+ * else:
+ */
+ if (unlikely(__pyx_v_self->_unsafe == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
+ __PYX_ERR(0, 339, __pyx_L1_error)
+ }
+ __pyx_t_5 = (__Pyx_PyUnicode_ContainsTF(__pyx_v_unquoted, __pyx_v_self->_unsafe, Py_EQ)); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 339, __pyx_L1_error)
+ __pyx_t_21 = (__pyx_t_5 != 0);
+ if (__pyx_t_21) {
+
+ /* "yarl/_quoting_c.pyx":340
+ * ret.append(self._qs_quoter(unquoted))
+ * elif unquoted in self._unsafe:
+ * ret.append(self._quoter(unquoted)) # <<<<<<<<<<<<<<
+ * else:
+ * ret.append(unquoted)
+ */
+ __Pyx_INCREF(((PyObject *)__pyx_v_self->_quoter));
+ __pyx_t_11 = ((PyObject *)__pyx_v_self->_quoter); __pyx_t_3 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) {
+ __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_11);
+ if (likely(__pyx_t_3)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11);
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_11, function);
+ }
+ }
+ __pyx_t_12 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_11, __pyx_t_3, __pyx_v_unquoted) : __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_v_unquoted);
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 340, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __pyx_t_14 = __Pyx_PyList_Append(__pyx_v_ret, __pyx_t_12); if (unlikely(__pyx_t_14 == ((int)-1))) __PYX_ERR(0, 340, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+
+ /* "yarl/_quoting_c.pyx":339
+ * if self._qs and unquoted in '+=&;':
+ * ret.append(self._qs_quoter(unquoted))
+ * elif unquoted in self._unsafe: # <<<<<<<<<<<<<<
+ * ret.append(self._quoter(unquoted))
+ * else:
+ */
+ goto __pyx_L31;
+ }
+
+ /* "yarl/_quoting_c.pyx":342
+ * ret.append(self._quoter(unquoted))
+ * else:
+ * ret.append(unquoted) # <<<<<<<<<<<<<<
+ * continue
+ * else:
+ */
+ /*else*/ {
+ __pyx_t_14 = __Pyx_PyList_Append(__pyx_v_ret, __pyx_v_unquoted); if (unlikely(__pyx_t_14 == ((int)-1))) __PYX_ERR(0, 342, __pyx_L1_error)
+ }
+ __pyx_L31:;
+
+ /* "yarl/_quoting_c.pyx":343
+ * else:
+ * ret.append(unquoted)
+ * continue # <<<<<<<<<<<<<<
+ * else:
+ * ch = '%'
+ */
+ goto __pyx_L4_continue;
+
+ /* "yarl/_quoting_c.pyx":312
+ * if ch == '%' and idx <= length - 2:
+ * ch = _restore_ch(val[idx], val[idx + 1])
+ * if ch != <Py_UCS4>-1: # <<<<<<<<<<<<<<
+ * idx += 2
+ * assert buflen < 4
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":345
+ * continue
+ * else:
+ * ch = '%' # <<<<<<<<<<<<<<
+ *
+ * if buflen:
+ */
+ /*else*/ {
+ __pyx_v_ch = 37;
+ }
+
+ /* "yarl/_quoting_c.pyx":310
+ * ch = val[idx]
+ * idx += 1
+ * if ch == '%' and idx <= length - 2: # <<<<<<<<<<<<<<
+ * ch = _restore_ch(val[idx], val[idx + 1])
+ * if ch != <Py_UCS4>-1:
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":347
+ * ch = '%'
+ *
+ * if buflen: # <<<<<<<<<<<<<<
+ * start_pct = idx - 1 - buflen * 3
+ * ret.append(val[start_pct : idx - 1])
+ */
+ __pyx_t_21 = (__pyx_v_buflen != 0);
+ if (__pyx_t_21) {
+
+ /* "yarl/_quoting_c.pyx":348
+ *
+ * if buflen:
+ * start_pct = idx - 1 - buflen * 3 # <<<<<<<<<<<<<<
+ * ret.append(val[start_pct : idx - 1])
+ * buflen = 0
+ */
+ __pyx_v_start_pct = ((__pyx_v_idx - 1) - (__pyx_v_buflen * 3));
+
+ /* "yarl/_quoting_c.pyx":349
+ * if buflen:
+ * start_pct = idx - 1 - buflen * 3
+ * ret.append(val[start_pct : idx - 1]) # <<<<<<<<<<<<<<
+ * buflen = 0
+ *
+ */
+ if (unlikely(__pyx_v_val == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(0, 349, __pyx_L1_error)
+ }
+ __pyx_t_12 = __Pyx_PyUnicode_Substring(__pyx_v_val, __pyx_v_start_pct, (__pyx_v_idx - 1)); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 349, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_14 = __Pyx_PyList_Append(__pyx_v_ret, __pyx_t_12); if (unlikely(__pyx_t_14 == ((int)-1))) __PYX_ERR(0, 349, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+
+ /* "yarl/_quoting_c.pyx":350
+ * start_pct = idx - 1 - buflen * 3
+ * ret.append(val[start_pct : idx - 1])
+ * buflen = 0 # <<<<<<<<<<<<<<
+ *
+ * if ch == '+':
+ */
+ __pyx_v_buflen = 0;
+
+ /* "yarl/_quoting_c.pyx":347
+ * ch = '%'
+ *
+ * if buflen: # <<<<<<<<<<<<<<
+ * start_pct = idx - 1 - buflen * 3
+ * ret.append(val[start_pct : idx - 1])
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":352
+ * buflen = 0
+ *
+ * if ch == '+': # <<<<<<<<<<<<<<
+ * if not self._qs or ch in self._unsafe:
+ * ret.append('+')
+ */
+ __pyx_t_21 = ((__pyx_v_ch == 43) != 0);
+ if (__pyx_t_21) {
+
+ /* "yarl/_quoting_c.pyx":353
+ *
+ * if ch == '+':
+ * if not self._qs or ch in self._unsafe: # <<<<<<<<<<<<<<
+ * ret.append('+')
+ * else:
+ */
+ __pyx_t_5 = ((!(__pyx_v_self->_qs != 0)) != 0);
+ if (!__pyx_t_5) {
+ } else {
+ __pyx_t_21 = __pyx_t_5;
+ goto __pyx_L37_bool_binop_done;
+ }
+ if (unlikely(__pyx_v_self->_unsafe == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "argument of type 'NoneType' is not iterable");
+ __PYX_ERR(0, 353, __pyx_L1_error)
+ }
+ __pyx_t_5 = ((__Pyx_UnicodeContainsUCS4(__pyx_v_self->_unsafe, __pyx_v_ch)) != 0);
+ __pyx_t_21 = __pyx_t_5;
+ __pyx_L37_bool_binop_done:;
+ if (__pyx_t_21) {
+
+ /* "yarl/_quoting_c.pyx":354
+ * if ch == '+':
+ * if not self._qs or ch in self._unsafe:
+ * ret.append('+') # <<<<<<<<<<<<<<
+ * else:
+ * ret.append(' ')
+ */
+ __pyx_t_14 = __Pyx_PyList_Append(__pyx_v_ret, __pyx_kp_u__5); if (unlikely(__pyx_t_14 == ((int)-1))) __PYX_ERR(0, 354, __pyx_L1_error)
+
+ /* "yarl/_quoting_c.pyx":353
+ *
+ * if ch == '+':
+ * if not self._qs or ch in self._unsafe: # <<<<<<<<<<<<<<
+ * ret.append('+')
+ * else:
+ */
+ goto __pyx_L36;
+ }
+
+ /* "yarl/_quoting_c.pyx":356
+ * ret.append('+')
+ * else:
+ * ret.append(' ') # <<<<<<<<<<<<<<
+ * continue
+ *
+ */
+ /*else*/ {
+ __pyx_t_14 = __Pyx_PyList_Append(__pyx_v_ret, __pyx_kp_u__6); if (unlikely(__pyx_t_14 == ((int)-1))) __PYX_ERR(0, 356, __pyx_L1_error)
+ }
+ __pyx_L36:;
+
+ /* "yarl/_quoting_c.pyx":357
+ * else:
+ * ret.append(' ')
+ * continue # <<<<<<<<<<<<<<
+ *
+ * if ch in self._unsafe:
+ */
+ goto __pyx_L4_continue;
+
+ /* "yarl/_quoting_c.pyx":352
+ * buflen = 0
+ *
+ * if ch == '+': # <<<<<<<<<<<<<<
+ * if not self._qs or ch in self._unsafe:
+ * ret.append('+')
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":359
+ * continue
+ *
+ * if ch in self._unsafe: # <<<<<<<<<<<<<<
+ * ret.append('%')
+ * h = hex(ord(ch)).upper()[2:]
+ */
+ if (unlikely(__pyx_v_self->_unsafe == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "argument of type 'NoneType' is not iterable");
+ __PYX_ERR(0, 359, __pyx_L1_error)
+ }
+ __pyx_t_21 = ((__Pyx_UnicodeContainsUCS4(__pyx_v_self->_unsafe, __pyx_v_ch)) != 0);
+ if (__pyx_t_21) {
+
+ /* "yarl/_quoting_c.pyx":360
+ *
+ * if ch in self._unsafe:
+ * ret.append('%') # <<<<<<<<<<<<<<
+ * h = hex(ord(ch)).upper()[2:]
+ * for ch in h:
+ */
+ __pyx_t_14 = __Pyx_PyList_Append(__pyx_v_ret, __pyx_kp_u__7); if (unlikely(__pyx_t_14 == ((int)-1))) __PYX_ERR(0, 360, __pyx_L1_error)
+
+ /* "yarl/_quoting_c.pyx":361
+ * if ch in self._unsafe:
+ * ret.append('%')
+ * h = hex(ord(ch)).upper()[2:] # <<<<<<<<<<<<<<
+ * for ch in h:
+ * ret.append(ch)
+ */
+ __pyx_t_11 = __Pyx_PyInt_From_long(((long)__pyx_v_ch)); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 361, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_hex, __pyx_t_11); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 361, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __pyx_t_11 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_upper); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 361, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_3 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) {
+ __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_11);
+ if (likely(__pyx_t_3)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11);
+ __Pyx_INCREF(__pyx_t_3);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_11, function);
+ }
+ }
+ __pyx_t_12 = (__pyx_t_3) ? __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_t_3) : __Pyx_PyObject_CallNoArg(__pyx_t_11);
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 361, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_12);
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __pyx_t_11 = __Pyx_PyObject_GetSlice(__pyx_t_12, 2, 0, NULL, NULL, &__pyx_slice__8, 1, 0, 1); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 361, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __Pyx_XDECREF_SET(__pyx_v_h, __pyx_t_11);
+ __pyx_t_11 = 0;
+
+ /* "yarl/_quoting_c.pyx":362
+ * ret.append('%')
+ * h = hex(ord(ch)).upper()[2:]
+ * for ch in h: # <<<<<<<<<<<<<<
+ * ret.append(ch)
+ * continue
+ */
+ if (likely(PyList_CheckExact(__pyx_v_h)) || PyTuple_CheckExact(__pyx_v_h)) {
+ __pyx_t_11 = __pyx_v_h; __Pyx_INCREF(__pyx_t_11); __pyx_t_1 = 0;
+ __pyx_t_22 = NULL;
+ } else {
+ __pyx_t_1 = -1; __pyx_t_11 = PyObject_GetIter(__pyx_v_h); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 362, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ __pyx_t_22 = Py_TYPE(__pyx_t_11)->tp_iternext; if (unlikely(!__pyx_t_22)) __PYX_ERR(0, 362, __pyx_L1_error)
+ }
+ for (;;) {
+ if (likely(!__pyx_t_22)) {
+ if (likely(PyList_CheckExact(__pyx_t_11))) {
+ if (__pyx_t_1 >= PyList_GET_SIZE(__pyx_t_11)) break;
+ #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ __pyx_t_12 = PyList_GET_ITEM(__pyx_t_11, __pyx_t_1); __Pyx_INCREF(__pyx_t_12); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(0, 362, __pyx_L1_error)
+ #else
+ __pyx_t_12 = PySequence_ITEM(__pyx_t_11, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 362, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_12);
+ #endif
+ } else {
+ if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_11)) break;
+ #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ __pyx_t_12 = PyTuple_GET_ITEM(__pyx_t_11, __pyx_t_1); __Pyx_INCREF(__pyx_t_12); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(0, 362, __pyx_L1_error)
+ #else
+ __pyx_t_12 = PySequence_ITEM(__pyx_t_11, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 362, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_12);
+ #endif
+ }
+ } else {
+ __pyx_t_12 = __pyx_t_22(__pyx_t_11);
+ if (unlikely(!__pyx_t_12)) {
+ PyObject* exc_type = PyErr_Occurred();
+ if (exc_type) {
+ if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
+ else __PYX_ERR(0, 362, __pyx_L1_error)
+ }
+ break;
+ }
+ __Pyx_GOTREF(__pyx_t_12);
+ }
+ __pyx_t_6 = __Pyx_PyObject_AsPy_UCS4(__pyx_t_12); if (unlikely((__pyx_t_6 == (Py_UCS4)-1) && PyErr_Occurred())) __PYX_ERR(0, 362, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+ __pyx_v_ch = __pyx_t_6;
+
+ /* "yarl/_quoting_c.pyx":363
+ * h = hex(ord(ch)).upper()[2:]
+ * for ch in h:
+ * ret.append(ch) # <<<<<<<<<<<<<<
+ * continue
+ *
+ */
+ __pyx_t_12 = PyUnicode_FromOrdinal(__pyx_v_ch); if (unlikely(!__pyx_t_12)) __PYX_ERR(0, 363, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_12);
+ __pyx_t_14 = __Pyx_PyList_Append(__pyx_v_ret, __pyx_t_12); if (unlikely(__pyx_t_14 == ((int)-1))) __PYX_ERR(0, 363, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_12); __pyx_t_12 = 0;
+
+ /* "yarl/_quoting_c.pyx":362
+ * ret.append('%')
+ * h = hex(ord(ch)).upper()[2:]
+ * for ch in h: # <<<<<<<<<<<<<<
+ * ret.append(ch)
+ * continue
+ */
+ }
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+
+ /* "yarl/_quoting_c.pyx":364
+ * for ch in h:
+ * ret.append(ch)
+ * continue # <<<<<<<<<<<<<<
+ *
+ * ret.append(ch)
+ */
+ goto __pyx_L4_continue;
+
+ /* "yarl/_quoting_c.pyx":359
+ * continue
+ *
+ * if ch in self._unsafe: # <<<<<<<<<<<<<<
+ * ret.append('%')
+ * h = hex(ord(ch)).upper()[2:]
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":366
+ * continue
+ *
+ * ret.append(ch) # <<<<<<<<<<<<<<
+ *
+ * if buflen:
+ */
+ __pyx_t_11 = PyUnicode_FromOrdinal(__pyx_v_ch); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 366, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ __pyx_t_14 = __Pyx_PyList_Append(__pyx_v_ret, __pyx_t_11); if (unlikely(__pyx_t_14 == ((int)-1))) __PYX_ERR(0, 366, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+ __pyx_L4_continue:;
+ }
+
+ /* "yarl/_quoting_c.pyx":368
+ * ret.append(ch)
+ *
+ * if buflen: # <<<<<<<<<<<<<<
+ * ret.append(val[length - buflen * 3 : length])
+ *
+ */
+ __pyx_t_21 = (__pyx_v_buflen != 0);
+ if (__pyx_t_21) {
+
+ /* "yarl/_quoting_c.pyx":369
+ *
+ * if buflen:
+ * ret.append(val[length - buflen * 3 : length]) # <<<<<<<<<<<<<<
+ *
+ * return ''.join(ret)
+ */
+ if (unlikely(__pyx_v_val == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(0, 369, __pyx_L1_error)
+ }
+ __pyx_t_11 = __Pyx_PyUnicode_Substring(__pyx_v_val, (__pyx_v_length - (__pyx_v_buflen * 3)), __pyx_v_length); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 369, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ __pyx_t_14 = __Pyx_PyList_Append(__pyx_v_ret, __pyx_t_11); if (unlikely(__pyx_t_14 == ((int)-1))) __PYX_ERR(0, 369, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
+
+ /* "yarl/_quoting_c.pyx":368
+ * ret.append(ch)
+ *
+ * if buflen: # <<<<<<<<<<<<<<
+ * ret.append(val[length - buflen * 3 : length])
+ *
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":371
+ * ret.append(val[length - buflen * 3 : length])
+ *
+ * return ''.join(ret) # <<<<<<<<<<<<<<
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __pyx_t_11 = PyUnicode_Join(__pyx_kp_u_, __pyx_v_ret); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 371, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_11);
+ __pyx_r = ((PyObject*)__pyx_t_11);
+ __pyx_t_11 = 0;
+ goto __pyx_L0;
+
+ /* "yarl/_quoting_c.pyx":294
+ * return self._do_unquote(<str>val)
+ *
+ * cdef str _do_unquote(self, str val): # <<<<<<<<<<<<<<
+ * if len(val) == 0:
+ * return val
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_11);
+ __Pyx_XDECREF(__pyx_t_12);
+ __Pyx_XDECREF(__pyx_t_13);
+ __Pyx_XDECREF(__pyx_t_18);
+ __Pyx_XDECREF(__pyx_t_19);
+ __Pyx_XDECREF(__pyx_t_20);
+ __Pyx_AddTraceback("yarl._quoting_c._Unquoter._do_unquote", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_ret);
+ __Pyx_XDECREF(__pyx_v_unquoted);
+ __Pyx_XDECREF(__pyx_v_h);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":1
+ * def __reduce_cython__(self): # <<<<<<<<<<<<<<
+ * cdef tuple state
+ * cdef object _dict
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_4yarl_10_quoting_c_9_Unquoter_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
+static PyObject *__pyx_pw_4yarl_10_quoting_c_9_Unquoter_5__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_4yarl_10_quoting_c_9_Unquoter_4__reduce_cython__(((struct __pyx_obj_4yarl_10_quoting_c__Unquoter *)__pyx_v_self));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_4yarl_10_quoting_c_9_Unquoter_4__reduce_cython__(struct __pyx_obj_4yarl_10_quoting_c__Unquoter *__pyx_v_self) {
+ PyObject *__pyx_v_state = 0;
+ PyObject *__pyx_v__dict = 0;
+ int __pyx_v_use_setstate;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ int __pyx_t_3;
+ int __pyx_t_4;
+ int __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__reduce_cython__", 0);
+
+ /* "(tree fragment)":5
+ * cdef object _dict
+ * cdef bint use_setstate
+ * state = (self._qs, self._qs_quoter, self._quoter, self._unsafe) # <<<<<<<<<<<<<<
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None:
+ */
+ __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_v_self->_qs); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
+ __Pyx_INCREF(((PyObject *)__pyx_v_self->_qs_quoter));
+ __Pyx_GIVEREF(((PyObject *)__pyx_v_self->_qs_quoter));
+ PyTuple_SET_ITEM(__pyx_t_2, 1, ((PyObject *)__pyx_v_self->_qs_quoter));
+ __Pyx_INCREF(((PyObject *)__pyx_v_self->_quoter));
+ __Pyx_GIVEREF(((PyObject *)__pyx_v_self->_quoter));
+ PyTuple_SET_ITEM(__pyx_t_2, 2, ((PyObject *)__pyx_v_self->_quoter));
+ __Pyx_INCREF(__pyx_v_self->_unsafe);
+ __Pyx_GIVEREF(__pyx_v_self->_unsafe);
+ PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_v_self->_unsafe);
+ __pyx_t_1 = 0;
+ __pyx_v_state = ((PyObject*)__pyx_t_2);
+ __pyx_t_2 = 0;
+
+ /* "(tree fragment)":6
+ * cdef bint use_setstate
+ * state = (self._qs, self._qs_quoter, self._quoter, self._unsafe)
+ * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
+ * if _dict is not None:
+ * state += (_dict,)
+ */
+ __pyx_t_2 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_v__dict = __pyx_t_2;
+ __pyx_t_2 = 0;
+
+ /* "(tree fragment)":7
+ * state = (self._qs, self._qs_quoter, self._quoter, self._unsafe)
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None: # <<<<<<<<<<<<<<
+ * state += (_dict,)
+ * use_setstate = True
+ */
+ __pyx_t_3 = (__pyx_v__dict != Py_None);
+ __pyx_t_4 = (__pyx_t_3 != 0);
+ if (__pyx_t_4) {
+
+ /* "(tree fragment)":8
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None:
+ * state += (_dict,) # <<<<<<<<<<<<<<
+ * use_setstate = True
+ * else:
+ */
+ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 8, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_v__dict);
+ __Pyx_GIVEREF(__pyx_v__dict);
+ PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v__dict);
+ __pyx_t_1 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_1));
+ __pyx_t_1 = 0;
+
+ /* "(tree fragment)":9
+ * if _dict is not None:
+ * state += (_dict,)
+ * use_setstate = True # <<<<<<<<<<<<<<
+ * else:
+ * use_setstate = self._qs_quoter is not None or self._quoter is not None or self._unsafe is not None
+ */
+ __pyx_v_use_setstate = 1;
+
+ /* "(tree fragment)":7
+ * state = (self._qs, self._qs_quoter, self._quoter, self._unsafe)
+ * _dict = getattr(self, '__dict__', None)
+ * if _dict is not None: # <<<<<<<<<<<<<<
+ * state += (_dict,)
+ * use_setstate = True
+ */
+ goto __pyx_L3;
+ }
+
+ /* "(tree fragment)":11
+ * use_setstate = True
+ * else:
+ * use_setstate = self._qs_quoter is not None or self._quoter is not None or self._unsafe is not None # <<<<<<<<<<<<<<
+ * if use_setstate:
+ * return __pyx_unpickle__Unquoter, (type(self), 0x276577d, None), state
+ */
+ /*else*/ {
+ __pyx_t_3 = (((PyObject *)__pyx_v_self->_qs_quoter) != Py_None);
+ __pyx_t_5 = (__pyx_t_3 != 0);
+ if (!__pyx_t_5) {
+ } else {
+ __pyx_t_4 = __pyx_t_5;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_5 = (((PyObject *)__pyx_v_self->_quoter) != Py_None);
+ __pyx_t_3 = (__pyx_t_5 != 0);
+ if (!__pyx_t_3) {
+ } else {
+ __pyx_t_4 = __pyx_t_3;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_3 = (__pyx_v_self->_unsafe != ((PyObject*)Py_None));
+ __pyx_t_5 = (__pyx_t_3 != 0);
+ __pyx_t_4 = __pyx_t_5;
+ __pyx_L4_bool_binop_done:;
+ __pyx_v_use_setstate = __pyx_t_4;
+ }
+ __pyx_L3:;
+
+ /* "(tree fragment)":12
+ * else:
+ * use_setstate = self._qs_quoter is not None or self._quoter is not None or self._unsafe is not None
+ * if use_setstate: # <<<<<<<<<<<<<<
+ * return __pyx_unpickle__Unquoter, (type(self), 0x276577d, None), state
+ * else:
+ */
+ __pyx_t_4 = (__pyx_v_use_setstate != 0);
+ if (__pyx_t_4) {
+
+ /* "(tree fragment)":13
+ * use_setstate = self._qs_quoter is not None or self._quoter is not None or self._unsafe is not None
+ * if use_setstate:
+ * return __pyx_unpickle__Unquoter, (type(self), 0x276577d, None), state # <<<<<<<<<<<<<<
+ * else:
+ * return __pyx_unpickle__Unquoter, (type(self), 0x276577d, state)
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_pyx_unpickle__Unquoter); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_INCREF(__pyx_int_41310077);
+ __Pyx_GIVEREF(__pyx_int_41310077);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_41310077);
+ __Pyx_INCREF(Py_None);
+ __Pyx_GIVEREF(Py_None);
+ PyTuple_SET_ITEM(__pyx_t_2, 2, Py_None);
+ __pyx_t_6 = PyTuple_New(3); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 13, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_1);
+ PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_6, 1, __pyx_t_2);
+ __Pyx_INCREF(__pyx_v_state);
+ __Pyx_GIVEREF(__pyx_v_state);
+ PyTuple_SET_ITEM(__pyx_t_6, 2, __pyx_v_state);
+ __pyx_t_1 = 0;
+ __pyx_t_2 = 0;
+ __pyx_r = __pyx_t_6;
+ __pyx_t_6 = 0;
+ goto __pyx_L0;
+
+ /* "(tree fragment)":12
+ * else:
+ * use_setstate = self._qs_quoter is not None or self._quoter is not None or self._unsafe is not None
+ * if use_setstate: # <<<<<<<<<<<<<<
+ * return __pyx_unpickle__Unquoter, (type(self), 0x276577d, None), state
+ * else:
+ */
+ }
+
+ /* "(tree fragment)":15
+ * return __pyx_unpickle__Unquoter, (type(self), 0x276577d, None), state
+ * else:
+ * return __pyx_unpickle__Unquoter, (type(self), 0x276577d, state) # <<<<<<<<<<<<<<
+ * def __setstate_cython__(self, __pyx_state):
+ * __pyx_unpickle__Unquoter__set_state(self, __pyx_state)
+ */
+ /*else*/ {
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_pyx_unpickle__Unquoter); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_2 = PyTuple_New(3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
+ __Pyx_INCREF(__pyx_int_41310077);
+ __Pyx_GIVEREF(__pyx_int_41310077);
+ PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_41310077);
+ __Pyx_INCREF(__pyx_v_state);
+ __Pyx_GIVEREF(__pyx_v_state);
+ PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_v_state);
+ __pyx_t_1 = PyTuple_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_GIVEREF(__pyx_t_6);
+ PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_t_6);
+ __Pyx_GIVEREF(__pyx_t_2);
+ PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_2);
+ __pyx_t_6 = 0;
+ __pyx_t_2 = 0;
+ __pyx_r = __pyx_t_1;
+ __pyx_t_1 = 0;
+ goto __pyx_L0;
+ }
+
+ /* "(tree fragment)":1
+ * def __reduce_cython__(self): # <<<<<<<<<<<<<<
+ * cdef tuple state
+ * cdef object _dict
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_AddTraceback("yarl._quoting_c._Unquoter.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_state);
+ __Pyx_XDECREF(__pyx_v__dict);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":16
+ * else:
+ * return __pyx_unpickle__Unquoter, (type(self), 0x276577d, state)
+ * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_unpickle__Unquoter__set_state(self, __pyx_state)
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_4yarl_10_quoting_c_9_Unquoter_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
+static PyObject *__pyx_pw_4yarl_10_quoting_c_9_Unquoter_7__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
+ __pyx_r = __pyx_pf_4yarl_10_quoting_c_9_Unquoter_6__setstate_cython__(((struct __pyx_obj_4yarl_10_quoting_c__Unquoter *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_4yarl_10_quoting_c_9_Unquoter_6__setstate_cython__(struct __pyx_obj_4yarl_10_quoting_c__Unquoter *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__setstate_cython__", 0);
+
+ /* "(tree fragment)":17
+ * return __pyx_unpickle__Unquoter, (type(self), 0x276577d, state)
+ * def __setstate_cython__(self, __pyx_state):
+ * __pyx_unpickle__Unquoter__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
+ */
+ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 17, __pyx_L1_error)
+ __pyx_t_1 = __pyx_f_4yarl_10_quoting_c___pyx_unpickle__Unquoter__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "(tree fragment)":16
+ * else:
+ * return __pyx_unpickle__Unquoter, (type(self), 0x276577d, state)
+ * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_unpickle__Unquoter__set_state(self, __pyx_state)
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_AddTraceback("yarl._quoting_c._Unquoter.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":1
+ * def __pyx_unpickle__Quoter(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_4yarl_10_quoting_c_1__pyx_unpickle__Quoter(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyMethodDef __pyx_mdef_4yarl_10_quoting_c_1__pyx_unpickle__Quoter = {"__pyx_unpickle__Quoter", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4yarl_10_quoting_c_1__pyx_unpickle__Quoter, METH_VARARGS|METH_KEYWORDS, 0};
+static PyObject *__pyx_pw_4yarl_10_quoting_c_1__pyx_unpickle__Quoter(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v___pyx_type = 0;
+ long __pyx_v___pyx_checksum;
+ PyObject *__pyx_v___pyx_state = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__pyx_unpickle__Quoter (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
+ PyObject* values[3] = {0,0,0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ CYTHON_FALLTHROUGH;
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ CYTHON_FALLTHROUGH;
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ CYTHON_FALLTHROUGH;
+ case 1:
+ if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__pyx_unpickle__Quoter", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 2:
+ if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__pyx_unpickle__Quoter", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle__Quoter") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ }
+ __pyx_v___pyx_type = values[0];
+ __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
+ __pyx_v___pyx_state = values[2];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__pyx_unpickle__Quoter", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("yarl._quoting_c.__pyx_unpickle__Quoter", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_r = __pyx_pf_4yarl_10_quoting_c___pyx_unpickle__Quoter(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_4yarl_10_quoting_c___pyx_unpickle__Quoter(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_v___pyx_PickleError = 0;
+ PyObject *__pyx_v___pyx_result = 0;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ int __pyx_t_6;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__pyx_unpickle__Quoter", 0);
+
+ /* "(tree fragment)":4
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ * if __pyx_checksum != 0xe91bd35: # <<<<<<<<<<<<<<
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0xe91bd35 = (_protected_table, _qs, _requote, _safe_table))" % __pyx_checksum)
+ */
+ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xe91bd35) != 0);
+ if (__pyx_t_1) {
+
+ /* "(tree fragment)":5
+ * cdef object __pyx_result
+ * if __pyx_checksum != 0xe91bd35:
+ * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0xe91bd35 = (_protected_table, _qs, _requote, _safe_table))" % __pyx_checksum)
+ * __pyx_result = _Quoter.__new__(__pyx_type)
+ */
+ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_n_s_PickleError);
+ __Pyx_GIVEREF(__pyx_n_s_PickleError);
+ PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
+ __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_2);
+ __pyx_v___pyx_PickleError = __pyx_t_2;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "(tree fragment)":6
+ * if __pyx_checksum != 0xe91bd35:
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0xe91bd35 = (_protected_table, _qs, _requote, _safe_table))" % __pyx_checksum) # <<<<<<<<<<<<<<
+ * __pyx_result = _Quoter.__new__(__pyx_type)
+ * if __pyx_state is not None:
+ */
+ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xe9, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_INCREF(__pyx_v___pyx_PickleError);
+ __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_5)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __PYX_ERR(1, 6, __pyx_L1_error)
+
+ /* "(tree fragment)":4
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ * if __pyx_checksum != 0xe91bd35: # <<<<<<<<<<<<<<
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0xe91bd35 = (_protected_table, _qs, _requote, _safe_table))" % __pyx_checksum)
+ */
+ }
+
+ /* "(tree fragment)":7
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0xe91bd35 = (_protected_table, _qs, _requote, _safe_table))" % __pyx_checksum)
+ * __pyx_result = _Quoter.__new__(__pyx_type) # <<<<<<<<<<<<<<
+ * if __pyx_state is not None:
+ * __pyx_unpickle__Quoter__set_state(<_Quoter> __pyx_result, __pyx_state)
+ */
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_ptype_4yarl_10_quoting_c__Quoter), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_4)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v___pyx_result = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "(tree fragment)":8
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0xe91bd35 = (_protected_table, _qs, _requote, _safe_table))" % __pyx_checksum)
+ * __pyx_result = _Quoter.__new__(__pyx_type)
+ * if __pyx_state is not None: # <<<<<<<<<<<<<<
+ * __pyx_unpickle__Quoter__set_state(<_Quoter> __pyx_result, __pyx_state)
+ * return __pyx_result
+ */
+ __pyx_t_1 = (__pyx_v___pyx_state != Py_None);
+ __pyx_t_6 = (__pyx_t_1 != 0);
+ if (__pyx_t_6) {
+
+ /* "(tree fragment)":9
+ * __pyx_result = _Quoter.__new__(__pyx_type)
+ * if __pyx_state is not None:
+ * __pyx_unpickle__Quoter__set_state(<_Quoter> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
+ * return __pyx_result
+ * cdef __pyx_unpickle__Quoter__set_state(_Quoter __pyx_result, tuple __pyx_state):
+ */
+ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
+ __pyx_t_3 = __pyx_f_4yarl_10_quoting_c___pyx_unpickle__Quoter__set_state(((struct __pyx_obj_4yarl_10_quoting_c__Quoter *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "(tree fragment)":8
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0xe91bd35 = (_protected_table, _qs, _requote, _safe_table))" % __pyx_checksum)
+ * __pyx_result = _Quoter.__new__(__pyx_type)
+ * if __pyx_state is not None: # <<<<<<<<<<<<<<
+ * __pyx_unpickle__Quoter__set_state(<_Quoter> __pyx_result, __pyx_state)
+ * return __pyx_result
+ */
+ }
+
+ /* "(tree fragment)":10
+ * if __pyx_state is not None:
+ * __pyx_unpickle__Quoter__set_state(<_Quoter> __pyx_result, __pyx_state)
+ * return __pyx_result # <<<<<<<<<<<<<<
+ * cdef __pyx_unpickle__Quoter__set_state(_Quoter __pyx_result, tuple __pyx_state):
+ * __pyx_result._protected_table = __pyx_state[0]; __pyx_result._qs = __pyx_state[1]; __pyx_result._requote = __pyx_state[2]; __pyx_result._safe_table = __pyx_state[3]
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v___pyx_result);
+ __pyx_r = __pyx_v___pyx_result;
+ goto __pyx_L0;
+
+ /* "(tree fragment)":1
+ * def __pyx_unpickle__Quoter(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_AddTraceback("yarl._quoting_c.__pyx_unpickle__Quoter", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v___pyx_PickleError);
+ __Pyx_XDECREF(__pyx_v___pyx_result);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":11
+ * __pyx_unpickle__Quoter__set_state(<_Quoter> __pyx_result, __pyx_state)
+ * return __pyx_result
+ * cdef __pyx_unpickle__Quoter__set_state(_Quoter __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_result._protected_table = __pyx_state[0]; __pyx_result._qs = __pyx_state[1]; __pyx_result._requote = __pyx_state[2]; __pyx_result._safe_table = __pyx_state[3]
+ * if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
+ */
+
+static PyObject *__pyx_f_4yarl_10_quoting_c___pyx_unpickle__Quoter__set_state(struct __pyx_obj_4yarl_10_quoting_c__Quoter *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ uint8_t __pyx_t_2[16];
+ int __pyx_t_3;
+ Py_ssize_t __pyx_t_4;
+ int __pyx_t_5;
+ int __pyx_t_6;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *__pyx_t_8 = NULL;
+ PyObject *__pyx_t_9 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__pyx_unpickle__Quoter__set_state", 0);
+
+ /* "(tree fragment)":12
+ * return __pyx_result
+ * cdef __pyx_unpickle__Quoter__set_state(_Quoter __pyx_result, tuple __pyx_state):
+ * __pyx_result._protected_table = __pyx_state[0]; __pyx_result._qs = __pyx_state[1]; __pyx_result._requote = __pyx_state[2]; __pyx_result._safe_table = __pyx_state[3] # <<<<<<<<<<<<<<
+ * if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
+ * __pyx_result.__dict__.update(__pyx_state[4])
+ */
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (unlikely(__Pyx_carray_from_py_uint8_t(__pyx_t_1, __pyx_t_2, 16) < 0)) __PYX_ERR(1, 12, __pyx_L1_error)
+ memcpy(&(__pyx_v___pyx_result->_protected_table[0]), __pyx_t_2, sizeof(__pyx_v___pyx_result->_protected_table[0]) * (16));
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v___pyx_result->_qs = __pyx_t_3;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v___pyx_result->_requote = __pyx_t_3;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 3, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (unlikely(__Pyx_carray_from_py_uint8_t(__pyx_t_1, __pyx_t_2, 16) < 0)) __PYX_ERR(1, 12, __pyx_L1_error)
+ memcpy(&(__pyx_v___pyx_result->_safe_table[0]), __pyx_t_2, sizeof(__pyx_v___pyx_result->_safe_table[0]) * (16));
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "(tree fragment)":13
+ * cdef __pyx_unpickle__Quoter__set_state(_Quoter __pyx_result, tuple __pyx_state):
+ * __pyx_result._protected_table = __pyx_state[0]; __pyx_result._qs = __pyx_state[1]; __pyx_result._requote = __pyx_state[2]; __pyx_result._safe_table = __pyx_state[3]
+ * if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
+ * __pyx_result.__dict__.update(__pyx_state[4])
+ */
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
+ __PYX_ERR(1, 13, __pyx_L1_error)
+ }
+ __pyx_t_4 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
+ __pyx_t_5 = ((__pyx_t_4 > 4) != 0);
+ if (__pyx_t_5) {
+ } else {
+ __pyx_t_3 = __pyx_t_5;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_5 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
+ __pyx_t_6 = (__pyx_t_5 != 0);
+ __pyx_t_3 = __pyx_t_6;
+ __pyx_L4_bool_binop_done:;
+ if (__pyx_t_3) {
+
+ /* "(tree fragment)":14
+ * __pyx_result._protected_table = __pyx_state[0]; __pyx_result._qs = __pyx_state[1]; __pyx_result._requote = __pyx_state[2]; __pyx_result._safe_table = __pyx_state[3]
+ * if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
+ * __pyx_result.__dict__.update(__pyx_state[4]) # <<<<<<<<<<<<<<
+ */
+ __pyx_t_7 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_update); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_8);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 14, __pyx_L1_error)
+ }
+ __pyx_t_7 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 4, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_9 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_8))) {
+ __pyx_t_9 = PyMethod_GET_SELF(__pyx_t_8);
+ if (likely(__pyx_t_9)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8);
+ __Pyx_INCREF(__pyx_t_9);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_8, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_9) ? __Pyx_PyObject_Call2Args(__pyx_t_8, __pyx_t_9, __pyx_t_7) : __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "(tree fragment)":13
+ * cdef __pyx_unpickle__Quoter__set_state(_Quoter __pyx_result, tuple __pyx_state):
+ * __pyx_result._protected_table = __pyx_state[0]; __pyx_result._qs = __pyx_state[1]; __pyx_result._requote = __pyx_state[2]; __pyx_result._safe_table = __pyx_state[3]
+ * if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
+ * __pyx_result.__dict__.update(__pyx_state[4])
+ */
+ }
+
+ /* "(tree fragment)":11
+ * __pyx_unpickle__Quoter__set_state(<_Quoter> __pyx_result, __pyx_state)
+ * return __pyx_result
+ * cdef __pyx_unpickle__Quoter__set_state(_Quoter __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_result._protected_table = __pyx_state[0]; __pyx_result._qs = __pyx_state[1]; __pyx_result._requote = __pyx_state[2]; __pyx_result._safe_table = __pyx_state[3]
+ * if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_XDECREF(__pyx_t_9);
+ __Pyx_AddTraceback("yarl._quoting_c.__pyx_unpickle__Quoter__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":1
+ * def __pyx_unpickle__Unquoter(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ */
+
+/* Python wrapper */
+static PyObject *__pyx_pw_4yarl_10_quoting_c_3__pyx_unpickle__Unquoter(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
+static PyMethodDef __pyx_mdef_4yarl_10_quoting_c_3__pyx_unpickle__Unquoter = {"__pyx_unpickle__Unquoter", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_4yarl_10_quoting_c_3__pyx_unpickle__Unquoter, METH_VARARGS|METH_KEYWORDS, 0};
+static PyObject *__pyx_pw_4yarl_10_quoting_c_3__pyx_unpickle__Unquoter(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
+ PyObject *__pyx_v___pyx_type = 0;
+ long __pyx_v___pyx_checksum;
+ PyObject *__pyx_v___pyx_state = 0;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ PyObject *__pyx_r = 0;
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__pyx_unpickle__Unquoter (wrapper)", 0);
+ {
+ static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
+ PyObject* values[3] = {0,0,0};
+ if (unlikely(__pyx_kwds)) {
+ Py_ssize_t kw_args;
+ const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
+ switch (pos_args) {
+ case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ CYTHON_FALLTHROUGH;
+ case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ CYTHON_FALLTHROUGH;
+ case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ CYTHON_FALLTHROUGH;
+ case 0: break;
+ default: goto __pyx_L5_argtuple_error;
+ }
+ kw_args = PyDict_Size(__pyx_kwds);
+ switch (pos_args) {
+ case 0:
+ if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
+ else goto __pyx_L5_argtuple_error;
+ CYTHON_FALLTHROUGH;
+ case 1:
+ if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__pyx_unpickle__Unquoter", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error)
+ }
+ CYTHON_FALLTHROUGH;
+ case 2:
+ if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
+ else {
+ __Pyx_RaiseArgtupleInvalid("__pyx_unpickle__Unquoter", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error)
+ }
+ }
+ if (unlikely(kw_args > 0)) {
+ if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle__Unquoter") < 0)) __PYX_ERR(1, 1, __pyx_L3_error)
+ }
+ } else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
+ goto __pyx_L5_argtuple_error;
+ } else {
+ values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
+ values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
+ values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
+ }
+ __pyx_v___pyx_type = values[0];
+ __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error)
+ __pyx_v___pyx_state = values[2];
+ }
+ goto __pyx_L4_argument_unpacking_done;
+ __pyx_L5_argtuple_error:;
+ __Pyx_RaiseArgtupleInvalid("__pyx_unpickle__Unquoter", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error)
+ __pyx_L3_error:;
+ __Pyx_AddTraceback("yarl._quoting_c.__pyx_unpickle__Unquoter", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __Pyx_RefNannyFinishContext();
+ return NULL;
+ __pyx_L4_argument_unpacking_done:;
+ __pyx_r = __pyx_pf_4yarl_10_quoting_c_2__pyx_unpickle__Unquoter(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
+
+ /* function exit code */
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+static PyObject *__pyx_pf_4yarl_10_quoting_c_2__pyx_unpickle__Unquoter(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_v___pyx_PickleError = 0;
+ PyObject *__pyx_v___pyx_result = 0;
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ int __pyx_t_1;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ PyObject *__pyx_t_4 = NULL;
+ PyObject *__pyx_t_5 = NULL;
+ int __pyx_t_6;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__pyx_unpickle__Unquoter", 0);
+
+ /* "(tree fragment)":4
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ * if __pyx_checksum != 0x276577d: # <<<<<<<<<<<<<<
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x276577d = (_qs, _qs_quoter, _quoter, _unsafe))" % __pyx_checksum)
+ */
+ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0x276577d) != 0);
+ if (__pyx_t_1) {
+
+ /* "(tree fragment)":5
+ * cdef object __pyx_result
+ * if __pyx_checksum != 0x276577d:
+ * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x276577d = (_qs, _qs_quoter, _quoter, _unsafe))" % __pyx_checksum)
+ * __pyx_result = _Unquoter.__new__(__pyx_type)
+ */
+ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_n_s_PickleError);
+ __Pyx_GIVEREF(__pyx_n_s_PickleError);
+ PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
+ __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_2);
+ __pyx_v___pyx_PickleError = __pyx_t_2;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "(tree fragment)":6
+ * if __pyx_checksum != 0x276577d:
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x276577d = (_qs, _qs_quoter, _quoter, _unsafe))" % __pyx_checksum) # <<<<<<<<<<<<<<
+ * __pyx_result = _Unquoter.__new__(__pyx_type)
+ * if __pyx_state is not None:
+ */
+ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0x27, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_4);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_INCREF(__pyx_v___pyx_PickleError);
+ __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
+ if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_5)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_5);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
+ __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_Raise(__pyx_t_3, 0, 0, 0);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __PYX_ERR(1, 6, __pyx_L1_error)
+
+ /* "(tree fragment)":4
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ * if __pyx_checksum != 0x276577d: # <<<<<<<<<<<<<<
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x276577d = (_qs, _qs_quoter, _quoter, _unsafe))" % __pyx_checksum)
+ */
+ }
+
+ /* "(tree fragment)":7
+ * from pickle import PickleError as __pyx_PickleError
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x276577d = (_qs, _qs_quoter, _quoter, _unsafe))" % __pyx_checksum)
+ * __pyx_result = _Unquoter.__new__(__pyx_type) # <<<<<<<<<<<<<<
+ * if __pyx_state is not None:
+ * __pyx_unpickle__Unquoter__set_state(<_Unquoter> __pyx_result, __pyx_state)
+ */
+ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_ptype_4yarl_10_quoting_c__Unquoter), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 7, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __pyx_t_4 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
+ __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
+ if (likely(__pyx_t_4)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
+ __Pyx_INCREF(__pyx_t_4);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_2, function);
+ }
+ }
+ __pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
+ __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
+ if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __pyx_v___pyx_result = __pyx_t_3;
+ __pyx_t_3 = 0;
+
+ /* "(tree fragment)":8
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x276577d = (_qs, _qs_quoter, _quoter, _unsafe))" % __pyx_checksum)
+ * __pyx_result = _Unquoter.__new__(__pyx_type)
+ * if __pyx_state is not None: # <<<<<<<<<<<<<<
+ * __pyx_unpickle__Unquoter__set_state(<_Unquoter> __pyx_result, __pyx_state)
+ * return __pyx_result
+ */
+ __pyx_t_1 = (__pyx_v___pyx_state != Py_None);
+ __pyx_t_6 = (__pyx_t_1 != 0);
+ if (__pyx_t_6) {
+
+ /* "(tree fragment)":9
+ * __pyx_result = _Unquoter.__new__(__pyx_type)
+ * if __pyx_state is not None:
+ * __pyx_unpickle__Unquoter__set_state(<_Unquoter> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
+ * return __pyx_result
+ * cdef __pyx_unpickle__Unquoter__set_state(_Unquoter __pyx_result, tuple __pyx_state):
+ */
+ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 9, __pyx_L1_error)
+ __pyx_t_3 = __pyx_f_4yarl_10_quoting_c___pyx_unpickle__Unquoter__set_state(((struct __pyx_obj_4yarl_10_quoting_c__Unquoter *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 9, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+
+ /* "(tree fragment)":8
+ * raise __pyx_PickleError("Incompatible checksums (%s vs 0x276577d = (_qs, _qs_quoter, _quoter, _unsafe))" % __pyx_checksum)
+ * __pyx_result = _Unquoter.__new__(__pyx_type)
+ * if __pyx_state is not None: # <<<<<<<<<<<<<<
+ * __pyx_unpickle__Unquoter__set_state(<_Unquoter> __pyx_result, __pyx_state)
+ * return __pyx_result
+ */
+ }
+
+ /* "(tree fragment)":10
+ * if __pyx_state is not None:
+ * __pyx_unpickle__Unquoter__set_state(<_Unquoter> __pyx_result, __pyx_state)
+ * return __pyx_result # <<<<<<<<<<<<<<
+ * cdef __pyx_unpickle__Unquoter__set_state(_Unquoter __pyx_result, tuple __pyx_state):
+ * __pyx_result._qs = __pyx_state[0]; __pyx_result._qs_quoter = __pyx_state[1]; __pyx_result._quoter = __pyx_state[2]; __pyx_result._unsafe = __pyx_state[3]
+ */
+ __Pyx_XDECREF(__pyx_r);
+ __Pyx_INCREF(__pyx_v___pyx_result);
+ __pyx_r = __pyx_v___pyx_result;
+ goto __pyx_L0;
+
+ /* "(tree fragment)":1
+ * def __pyx_unpickle__Unquoter(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ */
+
+ /* function exit code */
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ __Pyx_XDECREF(__pyx_t_4);
+ __Pyx_XDECREF(__pyx_t_5);
+ __Pyx_AddTraceback("yarl._quoting_c.__pyx_unpickle__Unquoter", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = NULL;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v___pyx_PickleError);
+ __Pyx_XDECREF(__pyx_v___pyx_result);
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "(tree fragment)":11
+ * __pyx_unpickle__Unquoter__set_state(<_Unquoter> __pyx_result, __pyx_state)
+ * return __pyx_result
+ * cdef __pyx_unpickle__Unquoter__set_state(_Unquoter __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_result._qs = __pyx_state[0]; __pyx_result._qs_quoter = __pyx_state[1]; __pyx_result._quoter = __pyx_state[2]; __pyx_result._unsafe = __pyx_state[3]
+ * if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
+ */
+
+static PyObject *__pyx_f_4yarl_10_quoting_c___pyx_unpickle__Unquoter__set_state(struct __pyx_obj_4yarl_10_quoting_c__Unquoter *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
+ PyObject *__pyx_r = NULL;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_t_2;
+ Py_ssize_t __pyx_t_3;
+ int __pyx_t_4;
+ int __pyx_t_5;
+ PyObject *__pyx_t_6 = NULL;
+ PyObject *__pyx_t_7 = NULL;
+ PyObject *__pyx_t_8 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__pyx_unpickle__Unquoter__set_state", 0);
+
+ /* "(tree fragment)":12
+ * return __pyx_result
+ * cdef __pyx_unpickle__Unquoter__set_state(_Unquoter __pyx_result, tuple __pyx_state):
+ * __pyx_result._qs = __pyx_state[0]; __pyx_result._qs_quoter = __pyx_state[1]; __pyx_result._quoter = __pyx_state[2]; __pyx_result._unsafe = __pyx_state[3] # <<<<<<<<<<<<<<
+ * if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
+ * __pyx_result.__dict__.update(__pyx_state[4])
+ */
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_v___pyx_result->_qs = __pyx_t_2;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_4yarl_10_quoting_c__Quoter))))) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->_qs_quoter);
+ __Pyx_DECREF(((PyObject *)__pyx_v___pyx_result->_qs_quoter));
+ __pyx_v___pyx_result->_qs_quoter = ((struct __pyx_obj_4yarl_10_quoting_c__Quoter *)__pyx_t_1);
+ __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 2, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_ptype_4yarl_10_quoting_c__Quoter))))) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->_quoter);
+ __Pyx_DECREF(((PyObject *)__pyx_v___pyx_result->_quoter));
+ __pyx_v___pyx_result->_quoter = ((struct __pyx_obj_4yarl_10_quoting_c__Quoter *)__pyx_t_1);
+ __pyx_t_1 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 12, __pyx_L1_error)
+ }
+ __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 3, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (!(likely(PyUnicode_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "unicode", Py_TYPE(__pyx_t_1)->tp_name), 0))) __PYX_ERR(1, 12, __pyx_L1_error)
+ __Pyx_GIVEREF(__pyx_t_1);
+ __Pyx_GOTREF(__pyx_v___pyx_result->_unsafe);
+ __Pyx_DECREF(__pyx_v___pyx_result->_unsafe);
+ __pyx_v___pyx_result->_unsafe = ((PyObject*)__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "(tree fragment)":13
+ * cdef __pyx_unpickle__Unquoter__set_state(_Unquoter __pyx_result, tuple __pyx_state):
+ * __pyx_result._qs = __pyx_state[0]; __pyx_result._qs_quoter = __pyx_state[1]; __pyx_result._quoter = __pyx_state[2]; __pyx_result._unsafe = __pyx_state[3]
+ * if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
+ * __pyx_result.__dict__.update(__pyx_state[4])
+ */
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
+ __PYX_ERR(1, 13, __pyx_L1_error)
+ }
+ __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
+ __pyx_t_4 = ((__pyx_t_3 > 4) != 0);
+ if (__pyx_t_4) {
+ } else {
+ __pyx_t_2 = __pyx_t_4;
+ goto __pyx_L4_bool_binop_done;
+ }
+ __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error)
+ __pyx_t_5 = (__pyx_t_4 != 0);
+ __pyx_t_2 = __pyx_t_5;
+ __pyx_L4_bool_binop_done:;
+ if (__pyx_t_2) {
+
+ /* "(tree fragment)":14
+ * __pyx_result._qs = __pyx_state[0]; __pyx_result._qs_quoter = __pyx_state[1]; __pyx_result._quoter = __pyx_state[2]; __pyx_result._unsafe = __pyx_state[3]
+ * if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
+ * __pyx_result.__dict__.update(__pyx_state[4]) # <<<<<<<<<<<<<<
+ */
+ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ if (unlikely(__pyx_v___pyx_state == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
+ __PYX_ERR(1, 14, __pyx_L1_error)
+ }
+ __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 4, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_6);
+ __pyx_t_8 = NULL;
+ if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
+ __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
+ if (likely(__pyx_t_8)) {
+ PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
+ __Pyx_INCREF(__pyx_t_8);
+ __Pyx_INCREF(function);
+ __Pyx_DECREF_SET(__pyx_t_7, function);
+ }
+ }
+ __pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
+ __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
+ if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "(tree fragment)":13
+ * cdef __pyx_unpickle__Unquoter__set_state(_Unquoter __pyx_result, tuple __pyx_state):
+ * __pyx_result._qs = __pyx_state[0]; __pyx_result._qs_quoter = __pyx_state[1]; __pyx_result._quoter = __pyx_state[2]; __pyx_result._unsafe = __pyx_state[3]
+ * if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
+ * __pyx_result.__dict__.update(__pyx_state[4])
+ */
+ }
+
+ /* "(tree fragment)":11
+ * __pyx_unpickle__Unquoter__set_state(<_Unquoter> __pyx_result, __pyx_state)
+ * return __pyx_result
+ * cdef __pyx_unpickle__Unquoter__set_state(_Unquoter __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_result._qs = __pyx_state[0]; __pyx_result._qs_quoter = __pyx_state[1]; __pyx_result._quoter = __pyx_state[2]; __pyx_result._unsafe = __pyx_state[3]
+ * if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
+ */
+
+ /* function exit code */
+ __pyx_r = Py_None; __Pyx_INCREF(Py_None);
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_6);
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_8);
+ __Pyx_AddTraceback("yarl._quoting_c.__pyx_unpickle__Unquoter__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = 0;
+ __pyx_L0:;
+ __Pyx_XGIVEREF(__pyx_r);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+
+/* "carray.from_py":77
+ *
+ * @cname("__Pyx_carray_from_py_uint8_t")
+ * cdef int __Pyx_carray_from_py_uint8_t(object o, base_type *v, Py_ssize_t length) except -1: # <<<<<<<<<<<<<<
+ * cdef Py_ssize_t i = length
+ * try:
+ */
+
+static int __Pyx_carray_from_py_uint8_t(PyObject *__pyx_v_o, uint8_t *__pyx_v_v, Py_ssize_t __pyx_v_length) {
+ Py_ssize_t __pyx_v_i;
+ PyObject *__pyx_v_item = NULL;
+ int __pyx_r;
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ Py_ssize_t __pyx_t_4;
+ int __pyx_t_5;
+ int __pyx_t_6;
+ PyObject *__pyx_t_7 = NULL;
+ Py_ssize_t __pyx_t_8;
+ PyObject *(*__pyx_t_9)(PyObject *);
+ PyObject *__pyx_t_10 = NULL;
+ uint8_t __pyx_t_11;
+ char const *__pyx_t_12;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__Pyx_carray_from_py_uint8_t", 0);
+
+ /* "carray.from_py":78
+ * @cname("__Pyx_carray_from_py_uint8_t")
+ * cdef int __Pyx_carray_from_py_uint8_t(object o, base_type *v, Py_ssize_t length) except -1:
+ * cdef Py_ssize_t i = length # <<<<<<<<<<<<<<
+ * try:
+ * i = len(o)
+ */
+ __pyx_v_i = __pyx_v_length;
+
+ /* "carray.from_py":79
+ * cdef int __Pyx_carray_from_py_uint8_t(object o, base_type *v, Py_ssize_t length) except -1:
+ * cdef Py_ssize_t i = length
+ * try: # <<<<<<<<<<<<<<
+ * i = len(o)
+ * except (TypeError, OverflowError):
+ */
+ {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
+ __Pyx_XGOTREF(__pyx_t_1);
+ __Pyx_XGOTREF(__pyx_t_2);
+ __Pyx_XGOTREF(__pyx_t_3);
+ /*try:*/ {
+
+ /* "carray.from_py":80
+ * cdef Py_ssize_t i = length
+ * try:
+ * i = len(o) # <<<<<<<<<<<<<<
+ * except (TypeError, OverflowError):
+ * pass
+ */
+ __pyx_t_4 = PyObject_Length(__pyx_v_o); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1))) __PYX_ERR(1, 80, __pyx_L3_error)
+ __pyx_v_i = __pyx_t_4;
+
+ /* "carray.from_py":79
+ * cdef int __Pyx_carray_from_py_uint8_t(object o, base_type *v, Py_ssize_t length) except -1:
+ * cdef Py_ssize_t i = length
+ * try: # <<<<<<<<<<<<<<
+ * i = len(o)
+ * except (TypeError, OverflowError):
+ */
+ }
+ __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
+ goto __pyx_L8_try_end;
+ __pyx_L3_error:;
+
+ /* "carray.from_py":81
+ * try:
+ * i = len(o)
+ * except (TypeError, OverflowError): # <<<<<<<<<<<<<<
+ * pass
+ * if i == length:
+ */
+ __pyx_t_5 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError) || __Pyx_PyErr_ExceptionMatches(__pyx_builtin_OverflowError);
+ if (__pyx_t_5) {
+ __Pyx_ErrRestore(0,0,0);
+ goto __pyx_L4_exception_handled;
+ }
+ goto __pyx_L5_except_error;
+ __pyx_L5_except_error:;
+
+ /* "carray.from_py":79
+ * cdef int __Pyx_carray_from_py_uint8_t(object o, base_type *v, Py_ssize_t length) except -1:
+ * cdef Py_ssize_t i = length
+ * try: # <<<<<<<<<<<<<<
+ * i = len(o)
+ * except (TypeError, OverflowError):
+ */
+ __Pyx_XGIVEREF(__pyx_t_1);
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
+ goto __pyx_L1_error;
+ __pyx_L4_exception_handled:;
+ __Pyx_XGIVEREF(__pyx_t_1);
+ __Pyx_XGIVEREF(__pyx_t_2);
+ __Pyx_XGIVEREF(__pyx_t_3);
+ __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
+ __pyx_L8_try_end:;
+ }
+
+ /* "carray.from_py":83
+ * except (TypeError, OverflowError):
+ * pass
+ * if i == length: # <<<<<<<<<<<<<<
+ * for i, item in enumerate(o):
+ * if i >= length:
+ */
+ __pyx_t_6 = ((__pyx_v_i == __pyx_v_length) != 0);
+ if (__pyx_t_6) {
+
+ /* "carray.from_py":84
+ * pass
+ * if i == length:
+ * for i, item in enumerate(o): # <<<<<<<<<<<<<<
+ * if i >= length:
+ * break
+ */
+ __pyx_t_4 = 0;
+ if (likely(PyList_CheckExact(__pyx_v_o)) || PyTuple_CheckExact(__pyx_v_o)) {
+ __pyx_t_7 = __pyx_v_o; __Pyx_INCREF(__pyx_t_7); __pyx_t_8 = 0;
+ __pyx_t_9 = NULL;
+ } else {
+ __pyx_t_8 = -1; __pyx_t_7 = PyObject_GetIter(__pyx_v_o); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 84, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __pyx_t_9 = Py_TYPE(__pyx_t_7)->tp_iternext; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 84, __pyx_L1_error)
+ }
+ for (;;) {
+ if (likely(!__pyx_t_9)) {
+ if (likely(PyList_CheckExact(__pyx_t_7))) {
+ if (__pyx_t_8 >= PyList_GET_SIZE(__pyx_t_7)) break;
+ #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ __pyx_t_10 = PyList_GET_ITEM(__pyx_t_7, __pyx_t_8); __Pyx_INCREF(__pyx_t_10); __pyx_t_8++; if (unlikely(0 < 0)) __PYX_ERR(1, 84, __pyx_L1_error)
+ #else
+ __pyx_t_10 = PySequence_ITEM(__pyx_t_7, __pyx_t_8); __pyx_t_8++; if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 84, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_10);
+ #endif
+ } else {
+ if (__pyx_t_8 >= PyTuple_GET_SIZE(__pyx_t_7)) break;
+ #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ __pyx_t_10 = PyTuple_GET_ITEM(__pyx_t_7, __pyx_t_8); __Pyx_INCREF(__pyx_t_10); __pyx_t_8++; if (unlikely(0 < 0)) __PYX_ERR(1, 84, __pyx_L1_error)
+ #else
+ __pyx_t_10 = PySequence_ITEM(__pyx_t_7, __pyx_t_8); __pyx_t_8++; if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 84, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_10);
+ #endif
+ }
+ } else {
+ __pyx_t_10 = __pyx_t_9(__pyx_t_7);
+ if (unlikely(!__pyx_t_10)) {
+ PyObject* exc_type = PyErr_Occurred();
+ if (exc_type) {
+ if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
+ else __PYX_ERR(1, 84, __pyx_L1_error)
+ }
+ break;
+ }
+ __Pyx_GOTREF(__pyx_t_10);
+ }
+ __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_10);
+ __pyx_t_10 = 0;
+ __pyx_v_i = __pyx_t_4;
+ __pyx_t_4 = (__pyx_t_4 + 1);
+
+ /* "carray.from_py":85
+ * if i == length:
+ * for i, item in enumerate(o):
+ * if i >= length: # <<<<<<<<<<<<<<
+ * break
+ * v[i] = item
+ */
+ __pyx_t_6 = ((__pyx_v_i >= __pyx_v_length) != 0);
+ if (__pyx_t_6) {
+
+ /* "carray.from_py":86
+ * for i, item in enumerate(o):
+ * if i >= length:
+ * break # <<<<<<<<<<<<<<
+ * v[i] = item
+ * else:
+ */
+ goto __pyx_L11_break;
+
+ /* "carray.from_py":85
+ * if i == length:
+ * for i, item in enumerate(o):
+ * if i >= length: # <<<<<<<<<<<<<<
+ * break
+ * v[i] = item
+ */
+ }
+
+ /* "carray.from_py":87
+ * if i >= length:
+ * break
+ * v[i] = item # <<<<<<<<<<<<<<
+ * else:
+ * i += 1 # convert index to length
+ */
+ __pyx_t_11 = __Pyx_PyInt_As_uint8_t(__pyx_v_item); if (unlikely((__pyx_t_11 == ((uint8_t)-1)) && PyErr_Occurred())) __PYX_ERR(1, 87, __pyx_L1_error)
+ (__pyx_v_v[__pyx_v_i]) = __pyx_t_11;
+
+ /* "carray.from_py":84
+ * pass
+ * if i == length:
+ * for i, item in enumerate(o): # <<<<<<<<<<<<<<
+ * if i >= length:
+ * break
+ */
+ }
+ /*else*/ {
+
+ /* "carray.from_py":89
+ * v[i] = item
+ * else:
+ * i += 1 # convert index to length # <<<<<<<<<<<<<<
+ * if i == length:
+ * return 0
+ */
+ __pyx_v_i = (__pyx_v_i + 1);
+
+ /* "carray.from_py":90
+ * else:
+ * i += 1 # convert index to length
+ * if i == length: # <<<<<<<<<<<<<<
+ * return 0
+ *
+ */
+ __pyx_t_6 = ((__pyx_v_i == __pyx_v_length) != 0);
+ if (__pyx_t_6) {
+
+ /* "carray.from_py":91
+ * i += 1 # convert index to length
+ * if i == length:
+ * return 0 # <<<<<<<<<<<<<<
+ *
+ * PyErr_Format(
+ */
+ __pyx_r = 0;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+ goto __pyx_L0;
+
+ /* "carray.from_py":90
+ * else:
+ * i += 1 # convert index to length
+ * if i == length: # <<<<<<<<<<<<<<
+ * return 0
+ *
+ */
+ }
+ }
+
+ /* "carray.from_py":84
+ * pass
+ * if i == length:
+ * for i, item in enumerate(o): # <<<<<<<<<<<<<<
+ * if i >= length:
+ * break
+ */
+ __pyx_L11_break:;
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+
+ /* "carray.from_py":83
+ * except (TypeError, OverflowError):
+ * pass
+ * if i == length: # <<<<<<<<<<<<<<
+ * for i, item in enumerate(o):
+ * if i >= length:
+ */
+ }
+
+ /* "carray.from_py":96
+ * IndexError,
+ * ("too many values found during array assignment, expected %zd"
+ * if i >= length else # <<<<<<<<<<<<<<
+ * "not enough values found during array assignment, expected %zd, got %zd"),
+ * length, i)
+ */
+ if (((__pyx_v_i >= __pyx_v_length) != 0)) {
+ __pyx_t_12 = ((char const *)"too many values found during array assignment, expected %zd");
+ } else {
+ __pyx_t_12 = ((char const *)"not enough values found during array assignment, expected %zd, got %zd");
+ }
+
+ /* "carray.from_py":93
+ * return 0
+ *
+ * PyErr_Format( # <<<<<<<<<<<<<<
+ * IndexError,
+ * ("too many values found during array assignment, expected %zd"
+ */
+ __pyx_t_7 = PyErr_Format(__pyx_builtin_IndexError, __pyx_t_12, __pyx_v_length, __pyx_v_i); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 93, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_7);
+ __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
+
+ /* "carray.from_py":77
+ *
+ * @cname("__Pyx_carray_from_py_uint8_t")
+ * cdef int __Pyx_carray_from_py_uint8_t(object o, base_type *v, Py_ssize_t length) except -1: # <<<<<<<<<<<<<<
+ * cdef Py_ssize_t i = length
+ * try:
+ */
+
+ /* function exit code */
+ __pyx_r = 0;
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_7);
+ __Pyx_XDECREF(__pyx_t_10);
+ __Pyx_AddTraceback("carray.from_py.__Pyx_carray_from_py_uint8_t", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ __pyx_r = -1;
+ __pyx_L0:;
+ __Pyx_XDECREF(__pyx_v_item);
+ __Pyx_RefNannyFinishContext();
+ return __pyx_r;
+}
+static struct __pyx_vtabstruct_4yarl_10_quoting_c__Quoter __pyx_vtable_4yarl_10_quoting_c__Quoter;
+
+static PyObject *__pyx_tp_new_4yarl_10_quoting_c__Quoter(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
+ struct __pyx_obj_4yarl_10_quoting_c__Quoter *p;
+ PyObject *o;
+ if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
+ o = (*t->tp_alloc)(t, 0);
+ } else {
+ o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
+ }
+ if (unlikely(!o)) return 0;
+ p = ((struct __pyx_obj_4yarl_10_quoting_c__Quoter *)o);
+ p->__pyx_vtab = __pyx_vtabptr_4yarl_10_quoting_c__Quoter;
+ return o;
+}
+
+static void __pyx_tp_dealloc_4yarl_10_quoting_c__Quoter(PyObject *o) {
+ #if CYTHON_USE_TP_FINALIZE
+ if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
+ if (PyObject_CallFinalizerFromDealloc(o)) return;
+ }
+ #endif
+ (*Py_TYPE(o)->tp_free)(o);
+}
+
+static PyMethodDef __pyx_methods_4yarl_10_quoting_c__Quoter[] = {
+ {"__reduce_cython__", (PyCFunction)__pyx_pw_4yarl_10_quoting_c_7_Quoter_5__reduce_cython__, METH_NOARGS, 0},
+ {"__setstate_cython__", (PyCFunction)__pyx_pw_4yarl_10_quoting_c_7_Quoter_7__setstate_cython__, METH_O, 0},
+ {0, 0, 0, 0}
+};
+
+static PyTypeObject __pyx_type_4yarl_10_quoting_c__Quoter = {
+ PyVarObject_HEAD_INIT(0, 0)
+ "yarl._quoting_c._Quoter", /*tp_name*/
+ sizeof(struct __pyx_obj_4yarl_10_quoting_c__Quoter), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_4yarl_10_quoting_c__Quoter, /*tp_dealloc*/
+ #if PY_VERSION_HEX < 0x030800b4
+ 0, /*tp_print*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4
+ 0, /*tp_vectorcall_offset*/
+ #endif
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #endif
+ #if PY_MAJOR_VERSION >= 3
+ 0, /*tp_as_async*/
+ #endif
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ __pyx_pw_4yarl_10_quoting_c_7_Quoter_3__call__, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
+ 0, /*tp_doc*/
+ 0, /*tp_traverse*/
+ 0, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_4yarl_10_quoting_c__Quoter, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_pw_4yarl_10_quoting_c_7_Quoter_1__init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_4yarl_10_quoting_c__Quoter, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ 0, /*tp_version_tag*/
+ #if PY_VERSION_HEX >= 0x030400a1
+ 0, /*tp_finalize*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b1
+ 0, /*tp_vectorcall*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
+ 0, /*tp_print*/
+ #endif
+};
+static struct __pyx_vtabstruct_4yarl_10_quoting_c__Unquoter __pyx_vtable_4yarl_10_quoting_c__Unquoter;
+
+static PyObject *__pyx_tp_new_4yarl_10_quoting_c__Unquoter(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
+ struct __pyx_obj_4yarl_10_quoting_c__Unquoter *p;
+ PyObject *o;
+ if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
+ o = (*t->tp_alloc)(t, 0);
+ } else {
+ o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
+ }
+ if (unlikely(!o)) return 0;
+ p = ((struct __pyx_obj_4yarl_10_quoting_c__Unquoter *)o);
+ p->__pyx_vtab = __pyx_vtabptr_4yarl_10_quoting_c__Unquoter;
+ p->_unsafe = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ p->_quoter = ((struct __pyx_obj_4yarl_10_quoting_c__Quoter *)Py_None); Py_INCREF(Py_None);
+ p->_qs_quoter = ((struct __pyx_obj_4yarl_10_quoting_c__Quoter *)Py_None); Py_INCREF(Py_None);
+ return o;
+}
+
+static void __pyx_tp_dealloc_4yarl_10_quoting_c__Unquoter(PyObject *o) {
+ struct __pyx_obj_4yarl_10_quoting_c__Unquoter *p = (struct __pyx_obj_4yarl_10_quoting_c__Unquoter *)o;
+ #if CYTHON_USE_TP_FINALIZE
+ if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
+ if (PyObject_CallFinalizerFromDealloc(o)) return;
+ }
+ #endif
+ PyObject_GC_UnTrack(o);
+ Py_CLEAR(p->_unsafe);
+ Py_CLEAR(p->_quoter);
+ Py_CLEAR(p->_qs_quoter);
+ (*Py_TYPE(o)->tp_free)(o);
+}
+
+static int __pyx_tp_traverse_4yarl_10_quoting_c__Unquoter(PyObject *o, visitproc v, void *a) {
+ int e;
+ struct __pyx_obj_4yarl_10_quoting_c__Unquoter *p = (struct __pyx_obj_4yarl_10_quoting_c__Unquoter *)o;
+ if (p->_quoter) {
+ e = (*v)(((PyObject *)p->_quoter), a); if (e) return e;
+ }
+ if (p->_qs_quoter) {
+ e = (*v)(((PyObject *)p->_qs_quoter), a); if (e) return e;
+ }
+ return 0;
+}
+
+static int __pyx_tp_clear_4yarl_10_quoting_c__Unquoter(PyObject *o) {
+ PyObject* tmp;
+ struct __pyx_obj_4yarl_10_quoting_c__Unquoter *p = (struct __pyx_obj_4yarl_10_quoting_c__Unquoter *)o;
+ tmp = ((PyObject*)p->_quoter);
+ p->_quoter = ((struct __pyx_obj_4yarl_10_quoting_c__Quoter *)Py_None); Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ tmp = ((PyObject*)p->_qs_quoter);
+ p->_qs_quoter = ((struct __pyx_obj_4yarl_10_quoting_c__Quoter *)Py_None); Py_INCREF(Py_None);
+ Py_XDECREF(tmp);
+ return 0;
+}
+
+static PyMethodDef __pyx_methods_4yarl_10_quoting_c__Unquoter[] = {
+ {"__reduce_cython__", (PyCFunction)__pyx_pw_4yarl_10_quoting_c_9_Unquoter_5__reduce_cython__, METH_NOARGS, 0},
+ {"__setstate_cython__", (PyCFunction)__pyx_pw_4yarl_10_quoting_c_9_Unquoter_7__setstate_cython__, METH_O, 0},
+ {0, 0, 0, 0}
+};
+
+static PyTypeObject __pyx_type_4yarl_10_quoting_c__Unquoter = {
+ PyVarObject_HEAD_INIT(0, 0)
+ "yarl._quoting_c._Unquoter", /*tp_name*/
+ sizeof(struct __pyx_obj_4yarl_10_quoting_c__Unquoter), /*tp_basicsize*/
+ 0, /*tp_itemsize*/
+ __pyx_tp_dealloc_4yarl_10_quoting_c__Unquoter, /*tp_dealloc*/
+ #if PY_VERSION_HEX < 0x030800b4
+ 0, /*tp_print*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4
+ 0, /*tp_vectorcall_offset*/
+ #endif
+ 0, /*tp_getattr*/
+ 0, /*tp_setattr*/
+ #if PY_MAJOR_VERSION < 3
+ 0, /*tp_compare*/
+ #endif
+ #if PY_MAJOR_VERSION >= 3
+ 0, /*tp_as_async*/
+ #endif
+ 0, /*tp_repr*/
+ 0, /*tp_as_number*/
+ 0, /*tp_as_sequence*/
+ 0, /*tp_as_mapping*/
+ 0, /*tp_hash*/
+ __pyx_pw_4yarl_10_quoting_c_9_Unquoter_3__call__, /*tp_call*/
+ 0, /*tp_str*/
+ 0, /*tp_getattro*/
+ 0, /*tp_setattro*/
+ 0, /*tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
+ 0, /*tp_doc*/
+ __pyx_tp_traverse_4yarl_10_quoting_c__Unquoter, /*tp_traverse*/
+ __pyx_tp_clear_4yarl_10_quoting_c__Unquoter, /*tp_clear*/
+ 0, /*tp_richcompare*/
+ 0, /*tp_weaklistoffset*/
+ 0, /*tp_iter*/
+ 0, /*tp_iternext*/
+ __pyx_methods_4yarl_10_quoting_c__Unquoter, /*tp_methods*/
+ 0, /*tp_members*/
+ 0, /*tp_getset*/
+ 0, /*tp_base*/
+ 0, /*tp_dict*/
+ 0, /*tp_descr_get*/
+ 0, /*tp_descr_set*/
+ 0, /*tp_dictoffset*/
+ __pyx_pw_4yarl_10_quoting_c_9_Unquoter_1__init__, /*tp_init*/
+ 0, /*tp_alloc*/
+ __pyx_tp_new_4yarl_10_quoting_c__Unquoter, /*tp_new*/
+ 0, /*tp_free*/
+ 0, /*tp_is_gc*/
+ 0, /*tp_bases*/
+ 0, /*tp_mro*/
+ 0, /*tp_cache*/
+ 0, /*tp_subclasses*/
+ 0, /*tp_weaklist*/
+ 0, /*tp_del*/
+ 0, /*tp_version_tag*/
+ #if PY_VERSION_HEX >= 0x030400a1
+ 0, /*tp_finalize*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b1
+ 0, /*tp_vectorcall*/
+ #endif
+ #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
+ 0, /*tp_print*/
+ #endif
+};
+
+static PyMethodDef __pyx_methods[] = {
+ {0, 0, 0, 0}
+};
+
+#if PY_MAJOR_VERSION >= 3
+#if CYTHON_PEP489_MULTI_PHASE_INIT
+static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
+static int __pyx_pymod_exec__quoting_c(PyObject* module); /*proto*/
+static PyModuleDef_Slot __pyx_moduledef_slots[] = {
+ {Py_mod_create, (void*)__pyx_pymod_create},
+ {Py_mod_exec, (void*)__pyx_pymod_exec__quoting_c},
+ {0, NULL}
+};
+#endif
+
+static struct PyModuleDef __pyx_moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "_quoting_c",
+ 0, /* m_doc */
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ 0, /* m_size */
+ #else
+ -1, /* m_size */
+ #endif
+ __pyx_methods /* m_methods */,
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ __pyx_moduledef_slots, /* m_slots */
+ #else
+ NULL, /* m_reload */
+ #endif
+ NULL, /* m_traverse */
+ NULL, /* m_clear */
+ NULL /* m_free */
+};
+#endif
+#ifndef CYTHON_SMALL_CODE
+#if defined(__clang__)
+ #define CYTHON_SMALL_CODE
+#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
+ #define CYTHON_SMALL_CODE __attribute__((cold))
+#else
+ #define CYTHON_SMALL_CODE
+#endif
+#endif
+
+static __Pyx_StringTabEntry __pyx_string_tab[] = {
+ {&__pyx_kp_u_, __pyx_k_, sizeof(__pyx_k_), 0, 1, 0, 0},
+ {&__pyx_kp_u_Argument_should_be_str, __pyx_k_Argument_should_be_str, sizeof(__pyx_k_Argument_should_be_str), 0, 1, 0, 0},
+ {&__pyx_kp_s_Incompatible_checksums_s_vs_0x27, __pyx_k_Incompatible_checksums_s_vs_0x27, sizeof(__pyx_k_Incompatible_checksums_s_vs_0x27), 0, 0, 1, 0},
+ {&__pyx_kp_s_Incompatible_checksums_s_vs_0xe9, __pyx_k_Incompatible_checksums_s_vs_0xe9, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xe9), 0, 0, 1, 0},
+ {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1},
+ {&__pyx_kp_u_Only_safe_symbols_with_ORD_128_a, __pyx_k_Only_safe_symbols_with_ORD_128_a, sizeof(__pyx_k_Only_safe_symbols_with_ORD_128_a), 0, 1, 0, 0},
+ {&__pyx_n_s_OverflowError, __pyx_k_OverflowError, sizeof(__pyx_k_OverflowError), 0, 0, 1, 1},
+ {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1},
+ {&__pyx_n_s_Quoter, __pyx_k_Quoter, sizeof(__pyx_k_Quoter), 0, 0, 1, 1},
+ {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
+ {&__pyx_n_s_UnicodeDecodeError, __pyx_k_UnicodeDecodeError, sizeof(__pyx_k_UnicodeDecodeError), 0, 0, 1, 1},
+ {&__pyx_n_s_Unquoter, __pyx_k_Unquoter, sizeof(__pyx_k_Unquoter), 0, 0, 1, 1},
+ {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
+ {&__pyx_kp_u__10, __pyx_k__10, sizeof(__pyx_k__10), 0, 1, 0, 0},
+ {&__pyx_kp_u__11, __pyx_k__11, sizeof(__pyx_k__11), 0, 1, 0, 0},
+ {&__pyx_kp_u__12, __pyx_k__12, sizeof(__pyx_k__12), 0, 1, 0, 0},
+ {&__pyx_kp_u__13, __pyx_k__13, sizeof(__pyx_k__13), 0, 1, 0, 0},
+ {&__pyx_kp_u__4, __pyx_k__4, sizeof(__pyx_k__4), 0, 1, 0, 0},
+ {&__pyx_kp_u__5, __pyx_k__5, sizeof(__pyx_k__5), 0, 1, 0, 0},
+ {&__pyx_kp_u__6, __pyx_k__6, sizeof(__pyx_k__6), 0, 1, 0, 0},
+ {&__pyx_kp_u__7, __pyx_k__7, sizeof(__pyx_k__7), 0, 1, 0, 0},
+ {&__pyx_kp_u__9, __pyx_k__9, sizeof(__pyx_k__9), 0, 1, 0, 0},
+ {&__pyx_n_s_ascii_letters, __pyx_k_ascii_letters, sizeof(__pyx_k_ascii_letters), 0, 0, 1, 1},
+ {&__pyx_n_s_chr, __pyx_k_chr, sizeof(__pyx_k_chr), 0, 0, 1, 1},
+ {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
+ {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1},
+ {&__pyx_n_s_digits, __pyx_k_digits, sizeof(__pyx_k_digits), 0, 0, 1, 1},
+ {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
+ {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1},
+ {&__pyx_n_s_hex, __pyx_k_hex, sizeof(__pyx_k_hex), 0, 0, 1, 1},
+ {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
+ {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
+ {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
+ {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
+ {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1},
+ {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1},
+ {&__pyx_n_s_protected, __pyx_k_protected, sizeof(__pyx_k_protected), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_unpickle__Quoter, __pyx_k_pyx_unpickle__Quoter, sizeof(__pyx_k_pyx_unpickle__Quoter), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_unpickle__Unquoter, __pyx_k_pyx_unpickle__Unquoter, sizeof(__pyx_k_pyx_unpickle__Unquoter), 0, 0, 1, 1},
+ {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
+ {&__pyx_n_s_qs, __pyx_k_qs, sizeof(__pyx_k_qs), 0, 0, 1, 1},
+ {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
+ {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1},
+ {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1},
+ {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1},
+ {&__pyx_n_s_requote, __pyx_k_requote, sizeof(__pyx_k_requote), 0, 0, 1, 1},
+ {&__pyx_n_s_safe, __pyx_k_safe, sizeof(__pyx_k_safe), 0, 0, 1, 1},
+ {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1},
+ {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1},
+ {&__pyx_n_s_string, __pyx_k_string, sizeof(__pyx_k_string), 0, 0, 1, 1},
+ {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0},
+ {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
+ {&__pyx_n_s_unsafe, __pyx_k_unsafe, sizeof(__pyx_k_unsafe), 0, 0, 1, 1},
+ {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1},
+ {&__pyx_n_s_upper, __pyx_k_upper, sizeof(__pyx_k_upper), 0, 0, 1, 1},
+ {&__pyx_n_s_val, __pyx_k_val, sizeof(__pyx_k_val), 0, 0, 1, 1},
+ {&__pyx_n_s_yarl__quoting_c, __pyx_k_yarl__quoting_c, sizeof(__pyx_k_yarl__quoting_c), 0, 0, 1, 1},
+ {0, 0, 0, 0, 0, 0, 0}
+};
+static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
+ __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 70, __pyx_L1_error)
+ __pyx_builtin_chr = __Pyx_GetBuiltinName(__pyx_n_s_chr); if (!__pyx_builtin_chr) __PYX_ERR(0, 71, __pyx_L1_error)
+ __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 194, __pyx_L1_error)
+ __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(0, 213, __pyx_L1_error)
+ __pyx_builtin_UnicodeDecodeError = __Pyx_GetBuiltinName(__pyx_n_s_UnicodeDecodeError); if (!__pyx_builtin_UnicodeDecodeError) __PYX_ERR(0, 320, __pyx_L1_error)
+ __pyx_builtin_hex = __Pyx_GetBuiltinName(__pyx_n_s_hex); if (!__pyx_builtin_hex) __PYX_ERR(0, 361, __pyx_L1_error)
+ __pyx_builtin_OverflowError = __Pyx_GetBuiltinName(__pyx_n_s_OverflowError); if (!__pyx_builtin_OverflowError) __PYX_ERR(1, 81, __pyx_L1_error)
+ __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 84, __pyx_L1_error)
+ __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 94, __pyx_L1_error)
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
+
+ /* "yarl/_quoting_c.pyx":194
+ * for ch in safe:
+ * if ord(ch) > 127:
+ * raise ValueError("Only safe symbols with ORD < 128 are allowed") # <<<<<<<<<<<<<<
+ * set_bit(self._safe_table, ch)
+ *
+ */
+ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_Only_safe_symbols_with_ORD_128_a); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 194, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_tuple__2);
+ __Pyx_GIVEREF(__pyx_tuple__2);
+
+ /* "yarl/_quoting_c.pyx":213
+ * val = str(val)
+ * else:
+ * raise TypeError("Argument should be str") # <<<<<<<<<<<<<<
+ * _init_writer(&writer)
+ * try:
+ */
+ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_u_Argument_should_be_str); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(0, 213, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_tuple__3);
+ __Pyx_GIVEREF(__pyx_tuple__3);
+
+ /* "yarl/_quoting_c.pyx":361
+ * if ch in self._unsafe:
+ * ret.append('%')
+ * h = hex(ord(ch)).upper()[2:] # <<<<<<<<<<<<<<
+ * for ch in h:
+ * ret.append(ch)
+ */
+ __pyx_slice__8 = PySlice_New(__pyx_int_2, Py_None, Py_None); if (unlikely(!__pyx_slice__8)) __PYX_ERR(0, 361, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_slice__8);
+ __Pyx_GIVEREF(__pyx_slice__8);
+
+ /* "(tree fragment)":1
+ * def __pyx_unpickle__Quoter(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ */
+ __pyx_tuple__14 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_tuple__14);
+ __Pyx_GIVEREF(__pyx_tuple__14);
+ __pyx_codeobj__15 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__14, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle__Quoter, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__15)) __PYX_ERR(1, 1, __pyx_L1_error)
+ __pyx_tuple__16 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_tuple__16);
+ __Pyx_GIVEREF(__pyx_tuple__16);
+ __pyx_codeobj__17 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__16, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle__Unquoter, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__17)) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_RefNannyFinishContext();
+ return 0;
+ __pyx_L1_error:;
+ __Pyx_RefNannyFinishContext();
+ return -1;
+}
+
+static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
+ if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
+ __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_int_41310077 = PyInt_FromLong(41310077L); if (unlikely(!__pyx_int_41310077)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_int_244432181 = PyInt_FromLong(244432181L); if (unlikely(!__pyx_int_244432181)) __PYX_ERR(0, 1, __pyx_L1_error)
+ return 0;
+ __pyx_L1_error:;
+ return -1;
+}
+
+static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
+static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
+
+static int __Pyx_modinit_global_init_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
+ /*--- Global init code ---*/
+ __pyx_v_4yarl_10_quoting_c_GEN_DELIMS = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ __pyx_v_4yarl_10_quoting_c_SUB_DELIMS_WITHOUT_QS = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ __pyx_v_4yarl_10_quoting_c_SUB_DELIMS = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ __pyx_v_4yarl_10_quoting_c_RESERVED = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ __pyx_v_4yarl_10_quoting_c_UNRESERVED = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ __pyx_v_4yarl_10_quoting_c_ALLOWED = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ __pyx_v_4yarl_10_quoting_c_QS = ((PyObject*)Py_None); Py_INCREF(Py_None);
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_variable_export_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
+ /*--- Variable export code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_function_export_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
+ /*--- Function export code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_type_init_code(void) {
+ __Pyx_RefNannyDeclarations
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
+ /*--- Type init code ---*/
+ __pyx_vtabptr_4yarl_10_quoting_c__Quoter = &__pyx_vtable_4yarl_10_quoting_c__Quoter;
+ __pyx_vtable_4yarl_10_quoting_c__Quoter._do_quote = (PyObject *(*)(struct __pyx_obj_4yarl_10_quoting_c__Quoter *, PyObject *, struct __pyx_t_4yarl_10_quoting_c_Writer *))__pyx_f_4yarl_10_quoting_c_7_Quoter__do_quote;
+ __pyx_vtable_4yarl_10_quoting_c__Quoter._write = (int (*)(struct __pyx_obj_4yarl_10_quoting_c__Quoter *, struct __pyx_t_4yarl_10_quoting_c_Writer *, Py_UCS4))__pyx_f_4yarl_10_quoting_c_7_Quoter__write;
+ if (PyType_Ready(&__pyx_type_4yarl_10_quoting_c__Quoter) < 0) __PYX_ERR(0, 169, __pyx_L1_error)
+ #if PY_VERSION_HEX < 0x030800B1
+ __pyx_type_4yarl_10_quoting_c__Quoter.tp_print = 0;
+ #endif
+ if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_4yarl_10_quoting_c__Quoter.tp_dictoffset && __pyx_type_4yarl_10_quoting_c__Quoter.tp_getattro == PyObject_GenericGetAttr)) {
+ __pyx_type_4yarl_10_quoting_c__Quoter.tp_getattro = __Pyx_PyObject_GenericGetAttr;
+ }
+ if (__Pyx_SetVtable(__pyx_type_4yarl_10_quoting_c__Quoter.tp_dict, __pyx_vtabptr_4yarl_10_quoting_c__Quoter) < 0) __PYX_ERR(0, 169, __pyx_L1_error)
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s_Quoter, (PyObject *)&__pyx_type_4yarl_10_quoting_c__Quoter) < 0) __PYX_ERR(0, 169, __pyx_L1_error)
+ if (__Pyx_setup_reduce((PyObject*)&__pyx_type_4yarl_10_quoting_c__Quoter) < 0) __PYX_ERR(0, 169, __pyx_L1_error)
+ __pyx_ptype_4yarl_10_quoting_c__Quoter = &__pyx_type_4yarl_10_quoting_c__Quoter;
+ __pyx_vtabptr_4yarl_10_quoting_c__Unquoter = &__pyx_vtable_4yarl_10_quoting_c__Unquoter;
+ __pyx_vtable_4yarl_10_quoting_c__Unquoter._do_unquote = (PyObject *(*)(struct __pyx_obj_4yarl_10_quoting_c__Unquoter *, PyObject *))__pyx_f_4yarl_10_quoting_c_9_Unquoter__do_unquote;
+ if (PyType_Ready(&__pyx_type_4yarl_10_quoting_c__Unquoter) < 0) __PYX_ERR(0, 271, __pyx_L1_error)
+ #if PY_VERSION_HEX < 0x030800B1
+ __pyx_type_4yarl_10_quoting_c__Unquoter.tp_print = 0;
+ #endif
+ if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_4yarl_10_quoting_c__Unquoter.tp_dictoffset && __pyx_type_4yarl_10_quoting_c__Unquoter.tp_getattro == PyObject_GenericGetAttr)) {
+ __pyx_type_4yarl_10_quoting_c__Unquoter.tp_getattro = __Pyx_PyObject_GenericGetAttr;
+ }
+ if (__Pyx_SetVtable(__pyx_type_4yarl_10_quoting_c__Unquoter.tp_dict, __pyx_vtabptr_4yarl_10_quoting_c__Unquoter) < 0) __PYX_ERR(0, 271, __pyx_L1_error)
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s_Unquoter, (PyObject *)&__pyx_type_4yarl_10_quoting_c__Unquoter) < 0) __PYX_ERR(0, 271, __pyx_L1_error)
+ if (__Pyx_setup_reduce((PyObject*)&__pyx_type_4yarl_10_quoting_c__Unquoter) < 0) __PYX_ERR(0, 271, __pyx_L1_error)
+ __pyx_ptype_4yarl_10_quoting_c__Unquoter = &__pyx_type_4yarl_10_quoting_c__Unquoter;
+ __Pyx_RefNannyFinishContext();
+ return 0;
+ __pyx_L1_error:;
+ __Pyx_RefNannyFinishContext();
+ return -1;
+}
+
+static int __Pyx_modinit_type_import_code(void) {
+ __Pyx_RefNannyDeclarations
+ PyObject *__pyx_t_1 = NULL;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
+ /*--- Type import code ---*/
+ __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 9, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type",
+ #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000
+ sizeof(PyTypeObject),
+ #else
+ sizeof(PyHeapTypeObject),
+ #endif
+ __Pyx_ImportType_CheckSize_Warn);
+ if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(2, 9, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_RefNannyFinishContext();
+ return 0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_RefNannyFinishContext();
+ return -1;
+}
+
+static int __Pyx_modinit_variable_import_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
+ /*--- Variable import code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+static int __Pyx_modinit_function_import_code(void) {
+ __Pyx_RefNannyDeclarations
+ __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
+ /*--- Function import code ---*/
+ __Pyx_RefNannyFinishContext();
+ return 0;
+}
+
+
+#ifndef CYTHON_NO_PYINIT_EXPORT
+#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
+#elif PY_MAJOR_VERSION < 3
+#ifdef __cplusplus
+#define __Pyx_PyMODINIT_FUNC extern "C" void
+#else
+#define __Pyx_PyMODINIT_FUNC void
+#endif
+#else
+#ifdef __cplusplus
+#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
+#else
+#define __Pyx_PyMODINIT_FUNC PyObject *
+#endif
+#endif
+
+
+#if PY_MAJOR_VERSION < 3
+__Pyx_PyMODINIT_FUNC init_quoting_c(void) CYTHON_SMALL_CODE; /*proto*/
+__Pyx_PyMODINIT_FUNC init_quoting_c(void)
+#else
+__Pyx_PyMODINIT_FUNC PyInit__quoting_c(void) CYTHON_SMALL_CODE; /*proto*/
+__Pyx_PyMODINIT_FUNC PyInit__quoting_c(void)
+#if CYTHON_PEP489_MULTI_PHASE_INIT
+{
+ return PyModuleDef_Init(&__pyx_moduledef);
+}
+static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
+ #if PY_VERSION_HEX >= 0x030700A1
+ static PY_INT64_T main_interpreter_id = -1;
+ PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
+ if (main_interpreter_id == -1) {
+ main_interpreter_id = current_id;
+ return (unlikely(current_id == -1)) ? -1 : 0;
+ } else if (unlikely(main_interpreter_id != current_id))
+ #else
+ static PyInterpreterState *main_interpreter = NULL;
+ PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
+ if (!main_interpreter) {
+ main_interpreter = current_interpreter;
+ } else if (unlikely(main_interpreter != current_interpreter))
+ #endif
+ {
+ PyErr_SetString(
+ PyExc_ImportError,
+ "Interpreter change detected - this module can only be loaded into one interpreter per process.");
+ return -1;
+ }
+ return 0;
+}
+static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
+ PyObject *value = PyObject_GetAttrString(spec, from_name);
+ int result = 0;
+ if (likely(value)) {
+ if (allow_none || value != Py_None) {
+ result = PyDict_SetItemString(moddict, to_name, value);
+ }
+ Py_DECREF(value);
+ } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
+ PyErr_Clear();
+ } else {
+ result = -1;
+ }
+ return result;
+}
+static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
+ PyObject *module = NULL, *moddict, *modname;
+ if (__Pyx_check_single_interpreter())
+ return NULL;
+ if (__pyx_m)
+ return __Pyx_NewRef(__pyx_m);
+ modname = PyObject_GetAttrString(spec, "name");
+ if (unlikely(!modname)) goto bad;
+ module = PyModule_NewObject(modname);
+ Py_DECREF(modname);
+ if (unlikely(!module)) goto bad;
+ moddict = PyModule_GetDict(module);
+ if (unlikely(!moddict)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
+ if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
+ return module;
+bad:
+ Py_XDECREF(module);
+ return NULL;
+}
+
+
+static CYTHON_SMALL_CODE int __pyx_pymod_exec__quoting_c(PyObject *__pyx_pyinit_module)
+#endif
+#endif
+{
+ PyObject *__pyx_t_1 = NULL;
+ PyObject *__pyx_t_2 = NULL;
+ PyObject *__pyx_t_3 = NULL;
+ long __pyx_t_4;
+ int __pyx_t_5;
+ int __pyx_t_6;
+ uint64_t __pyx_t_7;
+ int __pyx_lineno = 0;
+ const char *__pyx_filename = NULL;
+ int __pyx_clineno = 0;
+ __Pyx_RefNannyDeclarations
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ if (__pyx_m) {
+ if (__pyx_m == __pyx_pyinit_module) return 0;
+ PyErr_SetString(PyExc_RuntimeError, "Module '_quoting_c' has already been imported. Re-initialisation is not supported.");
+ return -1;
+ }
+ #elif PY_MAJOR_VERSION >= 3
+ if (__pyx_m) return __Pyx_NewRef(__pyx_m);
+ #endif
+ #if CYTHON_REFNANNY
+__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
+if (!__Pyx_RefNanny) {
+ PyErr_Clear();
+ __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
+ if (!__Pyx_RefNanny)
+ Py_FatalError("failed to import 'refnanny' module");
+}
+#endif
+ __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit__quoting_c(void)", 0);
+ if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #ifdef __Pxy_PyFrame_Initialize_Offsets
+ __Pxy_PyFrame_Initialize_Offsets();
+ #endif
+ __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
+ #ifdef __Pyx_CyFunction_USED
+ if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_FusedFunction_USED
+ if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_Coroutine_USED
+ if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_Generator_USED
+ if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_AsyncGen_USED
+ if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ #ifdef __Pyx_StopAsyncIteration_USED
+ if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ /*--- Library function declarations ---*/
+ /*--- Threads initialization code ---*/
+ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
+ #ifdef WITH_THREAD /* Python build with threading support? */
+ PyEval_InitThreads();
+ #endif
+ #endif
+ /*--- Module creation code ---*/
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ __pyx_m = __pyx_pyinit_module;
+ Py_INCREF(__pyx_m);
+ #else
+ #if PY_MAJOR_VERSION < 3
+ __pyx_m = Py_InitModule4("_quoting_c", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
+ #else
+ __pyx_m = PyModule_Create(&__pyx_moduledef);
+ #endif
+ if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
+ Py_INCREF(__pyx_d);
+ __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
+ Py_INCREF(__pyx_b);
+ __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
+ Py_INCREF(__pyx_cython_runtime);
+ if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
+ /*--- Initialize various global constants etc. ---*/
+ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
+ if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+ if (__pyx_module_is_main_yarl___quoting_c) {
+ if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ }
+ #if PY_MAJOR_VERSION >= 3
+ {
+ PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
+ if (!PyDict_GetItemString(modules, "yarl._quoting_c")) {
+ if (unlikely(PyDict_SetItemString(modules, "yarl._quoting_c", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
+ }
+ }
+ #endif
+ /*--- Builtin init code ---*/
+ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ /*--- Constants init code ---*/
+ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ /*--- Global type/function init code ---*/
+ (void)__Pyx_modinit_global_init_code();
+ (void)__Pyx_modinit_variable_export_code();
+ (void)__Pyx_modinit_function_export_code();
+ if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
+ if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
+ (void)__Pyx_modinit_variable_import_code();
+ (void)__Pyx_modinit_function_import_code();
+ /*--- Execution code ---*/
+ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
+ if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ #endif
+
+ /* "yarl/_quoting_c.pyx":10
+ * from cpython.unicode cimport PyUnicode_DecodeASCII, PyUnicode_DecodeUTF8Stateful
+ *
+ * from string import ascii_letters, digits # <<<<<<<<<<<<<<
+ *
+ * cdef str GEN_DELIMS = ":/?#[]@"
+ */
+ __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_INCREF(__pyx_n_s_ascii_letters);
+ __Pyx_GIVEREF(__pyx_n_s_ascii_letters);
+ PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_ascii_letters);
+ __Pyx_INCREF(__pyx_n_s_digits);
+ __Pyx_GIVEREF(__pyx_n_s_digits);
+ PyList_SET_ITEM(__pyx_t_1, 1, __pyx_n_s_digits);
+ __pyx_t_2 = __Pyx_Import(__pyx_n_s_string, __pyx_t_1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 10, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_ascii_letters); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_ascii_letters, __pyx_t_1) < 0) __PYX_ERR(0, 10, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_digits); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_digits, __pyx_t_1) < 0) __PYX_ERR(0, 10, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+
+ /* "yarl/_quoting_c.pyx":12
+ * from string import ascii_letters, digits
+ *
+ * cdef str GEN_DELIMS = ":/?#[]@" # <<<<<<<<<<<<<<
+ * cdef str SUB_DELIMS_WITHOUT_QS = "!$'()*,"
+ * cdef str SUB_DELIMS = SUB_DELIMS_WITHOUT_QS + '+?=;'
+ */
+ __Pyx_INCREF(__pyx_kp_u__9);
+ __Pyx_XGOTREF(__pyx_v_4yarl_10_quoting_c_GEN_DELIMS);
+ __Pyx_DECREF_SET(__pyx_v_4yarl_10_quoting_c_GEN_DELIMS, __pyx_kp_u__9);
+ __Pyx_GIVEREF(__pyx_kp_u__9);
+
+ /* "yarl/_quoting_c.pyx":13
+ *
+ * cdef str GEN_DELIMS = ":/?#[]@"
+ * cdef str SUB_DELIMS_WITHOUT_QS = "!$'()*," # <<<<<<<<<<<<<<
+ * cdef str SUB_DELIMS = SUB_DELIMS_WITHOUT_QS + '+?=;'
+ * cdef str RESERVED = GEN_DELIMS + SUB_DELIMS
+ */
+ __Pyx_INCREF(__pyx_kp_u__10);
+ __Pyx_XGOTREF(__pyx_v_4yarl_10_quoting_c_SUB_DELIMS_WITHOUT_QS);
+ __Pyx_DECREF_SET(__pyx_v_4yarl_10_quoting_c_SUB_DELIMS_WITHOUT_QS, __pyx_kp_u__10);
+ __Pyx_GIVEREF(__pyx_kp_u__10);
+
+ /* "yarl/_quoting_c.pyx":14
+ * cdef str GEN_DELIMS = ":/?#[]@"
+ * cdef str SUB_DELIMS_WITHOUT_QS = "!$'()*,"
+ * cdef str SUB_DELIMS = SUB_DELIMS_WITHOUT_QS + '+?=;' # <<<<<<<<<<<<<<
+ * cdef str RESERVED = GEN_DELIMS + SUB_DELIMS
+ * cdef str UNRESERVED = ascii_letters + digits + '-._~'
+ */
+ __pyx_t_2 = __Pyx_PyUnicode_ConcatSafe(__pyx_v_4yarl_10_quoting_c_SUB_DELIMS_WITHOUT_QS, __pyx_kp_u__11); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_XGOTREF(__pyx_v_4yarl_10_quoting_c_SUB_DELIMS);
+ __Pyx_DECREF_SET(__pyx_v_4yarl_10_quoting_c_SUB_DELIMS, ((PyObject*)__pyx_t_2));
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+
+ /* "yarl/_quoting_c.pyx":15
+ * cdef str SUB_DELIMS_WITHOUT_QS = "!$'()*,"
+ * cdef str SUB_DELIMS = SUB_DELIMS_WITHOUT_QS + '+?=;'
+ * cdef str RESERVED = GEN_DELIMS + SUB_DELIMS # <<<<<<<<<<<<<<
+ * cdef str UNRESERVED = ascii_letters + digits + '-._~'
+ * cdef str ALLOWED = UNRESERVED + SUB_DELIMS_WITHOUT_QS
+ */
+ __pyx_t_2 = __Pyx_PyUnicode_ConcatSafe(__pyx_v_4yarl_10_quoting_c_GEN_DELIMS, __pyx_v_4yarl_10_quoting_c_SUB_DELIMS); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_XGOTREF(__pyx_v_4yarl_10_quoting_c_RESERVED);
+ __Pyx_DECREF_SET(__pyx_v_4yarl_10_quoting_c_RESERVED, ((PyObject*)__pyx_t_2));
+ __Pyx_GIVEREF(__pyx_t_2);
+ __pyx_t_2 = 0;
+
+ /* "yarl/_quoting_c.pyx":16
+ * cdef str SUB_DELIMS = SUB_DELIMS_WITHOUT_QS + '+?=;'
+ * cdef str RESERVED = GEN_DELIMS + SUB_DELIMS
+ * cdef str UNRESERVED = ascii_letters + digits + '-._~' # <<<<<<<<<<<<<<
+ * cdef str ALLOWED = UNRESERVED + SUB_DELIMS_WITHOUT_QS
+ * cdef str QS = '+&=;'
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_ascii_letters); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_2);
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_digits); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = PyNumber_Add(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_1 = PyNumber_Add(__pyx_t_3, __pyx_kp_u__12); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (!(likely(PyUnicode_CheckExact(__pyx_t_1))||((__pyx_t_1) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "unicode", Py_TYPE(__pyx_t_1)->tp_name), 0))) __PYX_ERR(0, 16, __pyx_L1_error)
+ __Pyx_XGOTREF(__pyx_v_4yarl_10_quoting_c_UNRESERVED);
+ __Pyx_DECREF_SET(__pyx_v_4yarl_10_quoting_c_UNRESERVED, ((PyObject*)__pyx_t_1));
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "yarl/_quoting_c.pyx":17
+ * cdef str RESERVED = GEN_DELIMS + SUB_DELIMS
+ * cdef str UNRESERVED = ascii_letters + digits + '-._~'
+ * cdef str ALLOWED = UNRESERVED + SUB_DELIMS_WITHOUT_QS # <<<<<<<<<<<<<<
+ * cdef str QS = '+&=;'
+ *
+ */
+ __pyx_t_1 = __Pyx_PyUnicode_ConcatSafe(__pyx_v_4yarl_10_quoting_c_UNRESERVED, __pyx_v_4yarl_10_quoting_c_SUB_DELIMS_WITHOUT_QS); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_XGOTREF(__pyx_v_4yarl_10_quoting_c_ALLOWED);
+ __Pyx_DECREF_SET(__pyx_v_4yarl_10_quoting_c_ALLOWED, ((PyObject*)__pyx_t_1));
+ __Pyx_GIVEREF(__pyx_t_1);
+ __pyx_t_1 = 0;
+
+ /* "yarl/_quoting_c.pyx":18
+ * cdef str UNRESERVED = ascii_letters + digits + '-._~'
+ * cdef str ALLOWED = UNRESERVED + SUB_DELIMS_WITHOUT_QS
+ * cdef str QS = '+&=;' # <<<<<<<<<<<<<<
+ *
+ * DEF BUF_SIZE = 8 * 1024 # 8KiB
+ */
+ __Pyx_INCREF(__pyx_kp_u__13);
+ __Pyx_XGOTREF(__pyx_v_4yarl_10_quoting_c_QS);
+ __Pyx_DECREF_SET(__pyx_v_4yarl_10_quoting_c_QS, __pyx_kp_u__13);
+ __Pyx_GIVEREF(__pyx_kp_u__13);
+
+ /* "yarl/_quoting_c.pyx":67
+ *
+ *
+ * memset(ALLOWED_TABLE, 0, sizeof(ALLOWED_TABLE)) # <<<<<<<<<<<<<<
+ * memset(ALLOWED_NOTQS_TABLE, 0, sizeof(ALLOWED_NOTQS_TABLE))
+ *
+ */
+ (void)(memset(__pyx_v_4yarl_10_quoting_c_ALLOWED_TABLE, 0, (sizeof(__pyx_v_4yarl_10_quoting_c_ALLOWED_TABLE))));
+
+ /* "yarl/_quoting_c.pyx":68
+ *
+ * memset(ALLOWED_TABLE, 0, sizeof(ALLOWED_TABLE))
+ * memset(ALLOWED_NOTQS_TABLE, 0, sizeof(ALLOWED_NOTQS_TABLE)) # <<<<<<<<<<<<<<
+ *
+ * for i in range(128):
+ */
+ (void)(memset(__pyx_v_4yarl_10_quoting_c_ALLOWED_NOTQS_TABLE, 0, (sizeof(__pyx_v_4yarl_10_quoting_c_ALLOWED_NOTQS_TABLE))));
+
+ /* "yarl/_quoting_c.pyx":70
+ * memset(ALLOWED_NOTQS_TABLE, 0, sizeof(ALLOWED_NOTQS_TABLE))
+ *
+ * for i in range(128): # <<<<<<<<<<<<<<
+ * if chr(i) in ALLOWED:
+ * set_bit(ALLOWED_TABLE, i)
+ */
+ for (__pyx_t_4 = 0; __pyx_t_4 < 0x80; __pyx_t_4+=1) {
+ __pyx_t_1 = __Pyx_PyInt_From_long(__pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 70, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_i, __pyx_t_1) < 0) __PYX_ERR(0, 70, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "yarl/_quoting_c.pyx":71
+ *
+ * for i in range(128):
+ * if chr(i) in ALLOWED: # <<<<<<<<<<<<<<
+ * set_bit(ALLOWED_TABLE, i)
+ * set_bit(ALLOWED_NOTQS_TABLE, i)
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 71, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_chr, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 71, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ if (unlikely(__pyx_v_4yarl_10_quoting_c_ALLOWED == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
+ __PYX_ERR(0, 71, __pyx_L1_error)
+ }
+ __pyx_t_5 = (__Pyx_PyUnicode_ContainsTF(__pyx_t_3, __pyx_v_4yarl_10_quoting_c_ALLOWED, Py_EQ)); if (unlikely(__pyx_t_5 < 0)) __PYX_ERR(0, 71, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_t_6 = (__pyx_t_5 != 0);
+ if (__pyx_t_6) {
+
+ /* "yarl/_quoting_c.pyx":72
+ * for i in range(128):
+ * if chr(i) in ALLOWED:
+ * set_bit(ALLOWED_TABLE, i) # <<<<<<<<<<<<<<
+ * set_bit(ALLOWED_NOTQS_TABLE, i)
+ * if chr(i) in QS:
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_i); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 72, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_7 = __Pyx_PyInt_As_uint64_t(__pyx_t_3); if (unlikely((__pyx_t_7 == ((uint64_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 72, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_f_4yarl_10_quoting_c_set_bit(__pyx_v_4yarl_10_quoting_c_ALLOWED_TABLE, __pyx_t_7);
+
+ /* "yarl/_quoting_c.pyx":73
+ * if chr(i) in ALLOWED:
+ * set_bit(ALLOWED_TABLE, i)
+ * set_bit(ALLOWED_NOTQS_TABLE, i) # <<<<<<<<<<<<<<
+ * if chr(i) in QS:
+ * set_bit(ALLOWED_NOTQS_TABLE, i)
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_i); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 73, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_7 = __Pyx_PyInt_As_uint64_t(__pyx_t_3); if (unlikely((__pyx_t_7 == ((uint64_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 73, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ __pyx_f_4yarl_10_quoting_c_set_bit(__pyx_v_4yarl_10_quoting_c_ALLOWED_NOTQS_TABLE, __pyx_t_7);
+
+ /* "yarl/_quoting_c.pyx":71
+ *
+ * for i in range(128):
+ * if chr(i) in ALLOWED: # <<<<<<<<<<<<<<
+ * set_bit(ALLOWED_TABLE, i)
+ * set_bit(ALLOWED_NOTQS_TABLE, i)
+ */
+ }
+
+ /* "yarl/_quoting_c.pyx":74
+ * set_bit(ALLOWED_TABLE, i)
+ * set_bit(ALLOWED_NOTQS_TABLE, i)
+ * if chr(i) in QS: # <<<<<<<<<<<<<<
+ * set_bit(ALLOWED_NOTQS_TABLE, i)
+ *
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_i); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 74, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_3);
+ __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_builtin_chr, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 74, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
+ if (unlikely(__pyx_v_4yarl_10_quoting_c_QS == Py_None)) {
+ PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
+ __PYX_ERR(0, 74, __pyx_L1_error)
+ }
+ __pyx_t_6 = (__Pyx_PyUnicode_ContainsTF(__pyx_t_1, __pyx_v_4yarl_10_quoting_c_QS, Py_EQ)); if (unlikely(__pyx_t_6 < 0)) __PYX_ERR(0, 74, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_t_5 = (__pyx_t_6 != 0);
+ if (__pyx_t_5) {
+
+ /* "yarl/_quoting_c.pyx":75
+ * set_bit(ALLOWED_NOTQS_TABLE, i)
+ * if chr(i) in QS:
+ * set_bit(ALLOWED_NOTQS_TABLE, i) # <<<<<<<<<<<<<<
+ *
+ * # ----------------- writer ---------------------------
+ */
+ __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 75, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ __pyx_t_7 = __Pyx_PyInt_As_uint64_t(__pyx_t_1); if (unlikely((__pyx_t_7 == ((uint64_t)-1)) && PyErr_Occurred())) __PYX_ERR(0, 75, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+ __pyx_f_4yarl_10_quoting_c_set_bit(__pyx_v_4yarl_10_quoting_c_ALLOWED_NOTQS_TABLE, __pyx_t_7);
+
+ /* "yarl/_quoting_c.pyx":74
+ * set_bit(ALLOWED_TABLE, i)
+ * set_bit(ALLOWED_NOTQS_TABLE, i)
+ * if chr(i) in QS: # <<<<<<<<<<<<<<
+ * set_bit(ALLOWED_NOTQS_TABLE, i)
+ *
+ */
+ }
+ }
+
+ /* "(tree fragment)":1
+ * def __pyx_unpickle__Quoter(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
+ * cdef object __pyx_PickleError
+ * cdef object __pyx_result
+ */
+ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4yarl_10_quoting_c_1__pyx_unpickle__Quoter, NULL, __pyx_n_s_yarl__quoting_c); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle__Quoter, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "(tree fragment)":11
+ * __pyx_unpickle__Quoter__set_state(<_Quoter> __pyx_result, __pyx_state)
+ * return __pyx_result
+ * cdef __pyx_unpickle__Quoter__set_state(_Quoter __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
+ * __pyx_result._protected_table = __pyx_state[0]; __pyx_result._qs = __pyx_state[1]; __pyx_result._requote = __pyx_state[2]; __pyx_result._safe_table = __pyx_state[3]
+ * if len(__pyx_state) > 4 and hasattr(__pyx_result, '__dict__'):
+ */
+ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_4yarl_10_quoting_c_3__pyx_unpickle__Unquoter, NULL, __pyx_n_s_yarl__quoting_c); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle__Unquoter, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "yarl/_quoting_c.pyx":1
+ * # cython: language_level=3 # <<<<<<<<<<<<<<
+ *
+ * from libc.stdint cimport uint8_t, uint64_t
+ */
+ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
+ __Pyx_GOTREF(__pyx_t_1);
+ if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
+ __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
+
+ /* "carray.from_py":77
+ *
+ * @cname("__Pyx_carray_from_py_uint8_t")
+ * cdef int __Pyx_carray_from_py_uint8_t(object o, base_type *v, Py_ssize_t length) except -1: # <<<<<<<<<<<<<<
+ * cdef Py_ssize_t i = length
+ * try:
+ */
+
+ /*--- Wrapped vars code ---*/
+
+ goto __pyx_L0;
+ __pyx_L1_error:;
+ __Pyx_XDECREF(__pyx_t_1);
+ __Pyx_XDECREF(__pyx_t_2);
+ __Pyx_XDECREF(__pyx_t_3);
+ if (__pyx_m) {
+ if (__pyx_d) {
+ __Pyx_AddTraceback("init yarl._quoting_c", __pyx_clineno, __pyx_lineno, __pyx_filename);
+ }
+ Py_CLEAR(__pyx_m);
+ } else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_ImportError, "init yarl._quoting_c");
+ }
+ __pyx_L0:;
+ __Pyx_RefNannyFinishContext();
+ #if CYTHON_PEP489_MULTI_PHASE_INIT
+ return (__pyx_m != NULL) ? 0 : -1;
+ #elif PY_MAJOR_VERSION >= 3
+ return __pyx_m;
+ #else
+ return;
+ #endif
+}
+
+/* --- Runtime support code --- */
+/* Refnanny */
+#if CYTHON_REFNANNY
+static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
+ PyObject *m = NULL, *p = NULL;
+ void *r = NULL;
+ m = PyImport_ImportModule(modname);
+ if (!m) goto end;
+ p = PyObject_GetAttrString(m, "RefNannyAPI");
+ if (!p) goto end;
+ r = PyLong_AsVoidPtr(p);
+end:
+ Py_XDECREF(p);
+ Py_XDECREF(m);
+ return (__Pyx_RefNannyAPIStruct *)r;
+}
+#endif
+
+/* PyObjectGetAttrStr */
+#if CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
+ PyTypeObject* tp = Py_TYPE(obj);
+ if (likely(tp->tp_getattro))
+ return tp->tp_getattro(obj, attr_name);
+#if PY_MAJOR_VERSION < 3
+ if (likely(tp->tp_getattr))
+ return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
+#endif
+ return PyObject_GetAttr(obj, attr_name);
+}
+#endif
+
+/* GetBuiltinName */
+static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
+ PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
+ if (unlikely(!result)) {
+ PyErr_Format(PyExc_NameError,
+#if PY_MAJOR_VERSION >= 3
+ "name '%U' is not defined", name);
+#else
+ "name '%.200s' is not defined", PyString_AS_STRING(name));
+#endif
+ }
+ return result;
+}
+
+/* PyErrFetchRestore */
+#if CYTHON_FAST_THREAD_STATE
+static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ tmp_type = tstate->curexc_type;
+ tmp_value = tstate->curexc_value;
+ tmp_tb = tstate->curexc_traceback;
+ tstate->curexc_type = type;
+ tstate->curexc_value = value;
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+}
+static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
+ *type = tstate->curexc_type;
+ *value = tstate->curexc_value;
+ *tb = tstate->curexc_traceback;
+ tstate->curexc_type = 0;
+ tstate->curexc_value = 0;
+ tstate->curexc_traceback = 0;
+}
+#endif
+
+/* WriteUnraisableException */
+static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno,
+ CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename,
+ int full_traceback, CYTHON_UNUSED int nogil) {
+ PyObject *old_exc, *old_val, *old_tb;
+ PyObject *ctx;
+ __Pyx_PyThreadState_declare
+#ifdef WITH_THREAD
+ PyGILState_STATE state;
+ if (nogil)
+ state = PyGILState_Ensure();
+#ifdef _MSC_VER
+ else state = (PyGILState_STATE)-1;
+#endif
+#endif
+ __Pyx_PyThreadState_assign
+ __Pyx_ErrFetch(&old_exc, &old_val, &old_tb);
+ if (full_traceback) {
+ Py_XINCREF(old_exc);
+ Py_XINCREF(old_val);
+ Py_XINCREF(old_tb);
+ __Pyx_ErrRestore(old_exc, old_val, old_tb);
+ PyErr_PrintEx(1);
+ }
+ #if PY_MAJOR_VERSION < 3
+ ctx = PyString_FromString(name);
+ #else
+ ctx = PyUnicode_FromString(name);
+ #endif
+ __Pyx_ErrRestore(old_exc, old_val, old_tb);
+ if (!ctx) {
+ PyErr_WriteUnraisable(Py_None);
+ } else {
+ PyErr_WriteUnraisable(ctx);
+ Py_DECREF(ctx);
+ }
+#ifdef WITH_THREAD
+ if (nogil)
+ PyGILState_Release(state);
+#endif
+}
+
+/* RaiseDoubleKeywords */
+static void __Pyx_RaiseDoubleKeywordsError(
+ const char* func_name,
+ PyObject* kw_name)
+{
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION >= 3
+ "%s() got multiple values for keyword argument '%U'", func_name, kw_name);
+ #else
+ "%s() got multiple values for keyword argument '%s'", func_name,
+ PyString_AsString(kw_name));
+ #endif
+}
+
+/* ParseKeywords */
+static int __Pyx_ParseOptionalKeywords(
+ PyObject *kwds,
+ PyObject **argnames[],
+ PyObject *kwds2,
+ PyObject *values[],
+ Py_ssize_t num_pos_args,
+ const char* function_name)
+{
+ PyObject *key = 0, *value = 0;
+ Py_ssize_t pos = 0;
+ PyObject*** name;
+ PyObject*** first_kw_arg = argnames + num_pos_args;
+ while (PyDict_Next(kwds, &pos, &key, &value)) {
+ name = first_kw_arg;
+ while (*name && (**name != key)) name++;
+ if (*name) {
+ values[name-argnames] = value;
+ continue;
+ }
+ name = first_kw_arg;
+ #if PY_MAJOR_VERSION < 3
+ if (likely(PyString_Check(key))) {
+ while (*name) {
+ if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
+ && _PyString_Eq(**name, key)) {
+ values[name-argnames] = value;
+ break;
+ }
+ name++;
+ }
+ if (*name) continue;
+ else {
+ PyObject*** argname = argnames;
+ while (argname != first_kw_arg) {
+ if ((**argname == key) || (
+ (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
+ && _PyString_Eq(**argname, key))) {
+ goto arg_passed_twice;
+ }
+ argname++;
+ }
+ }
+ } else
+ #endif
+ if (likely(PyUnicode_Check(key))) {
+ while (*name) {
+ int cmp = (**name == key) ? 0 :
+ #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
+ (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
+ #endif
+ PyUnicode_Compare(**name, key);
+ if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
+ if (cmp == 0) {
+ values[name-argnames] = value;
+ break;
+ }
+ name++;
+ }
+ if (*name) continue;
+ else {
+ PyObject*** argname = argnames;
+ while (argname != first_kw_arg) {
+ int cmp = (**argname == key) ? 0 :
+ #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
+ (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
+ #endif
+ PyUnicode_Compare(**argname, key);
+ if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
+ if (cmp == 0) goto arg_passed_twice;
+ argname++;
+ }
+ }
+ } else
+ goto invalid_keyword_type;
+ if (kwds2) {
+ if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
+ } else {
+ goto invalid_keyword;
+ }
+ }
+ return 0;
+arg_passed_twice:
+ __Pyx_RaiseDoubleKeywordsError(function_name, key);
+ goto bad;
+invalid_keyword_type:
+ PyErr_Format(PyExc_TypeError,
+ "%.200s() keywords must be strings", function_name);
+ goto bad;
+invalid_keyword:
+ PyErr_Format(PyExc_TypeError,
+ #if PY_MAJOR_VERSION < 3
+ "%.200s() got an unexpected keyword argument '%.200s'",
+ function_name, PyString_AsString(key));
+ #else
+ "%s() got an unexpected keyword argument '%U'",
+ function_name, key);
+ #endif
+bad:
+ return -1;
+}
+
+/* RaiseArgTupleInvalid */
+static void __Pyx_RaiseArgtupleInvalid(
+ const char* func_name,
+ int exact,
+ Py_ssize_t num_min,
+ Py_ssize_t num_max,
+ Py_ssize_t num_found)
+{
+ Py_ssize_t num_expected;
+ const char *more_or_less;
+ if (num_found < num_min) {
+ num_expected = num_min;
+ more_or_less = "at least";
+ } else {
+ num_expected = num_max;
+ more_or_less = "at most";
+ }
+ if (exact) {
+ more_or_less = "exactly";
+ }
+ PyErr_Format(PyExc_TypeError,
+ "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
+ func_name, more_or_less, num_expected,
+ (num_expected == 1) ? "" : "s", num_found);
+}
+
+/* ArgTypeTest */
+static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
+{
+ if (unlikely(!type)) {
+ PyErr_SetString(PyExc_SystemError, "Missing type object");
+ return 0;
+ }
+ else if (exact) {
+ #if PY_MAJOR_VERSION == 2
+ if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
+ #endif
+ }
+ else {
+ if (likely(__Pyx_TypeCheck(obj, type))) return 1;
+ }
+ PyErr_Format(PyExc_TypeError,
+ "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
+ name, type->tp_name, Py_TYPE(obj)->tp_name);
+ return 0;
+}
+
+/* unicode_iter */
+static CYTHON_INLINE int __Pyx_init_unicode_iteration(
+ PyObject* ustring, Py_ssize_t *length, void** data, int *kind) {
+#if CYTHON_PEP393_ENABLED
+ if (unlikely(__Pyx_PyUnicode_READY(ustring) < 0)) return -1;
+ *kind = PyUnicode_KIND(ustring);
+ *length = PyUnicode_GET_LENGTH(ustring);
+ *data = PyUnicode_DATA(ustring);
+#else
+ *kind = 0;
+ *length = PyUnicode_GET_SIZE(ustring);
+ *data = (void*)PyUnicode_AS_UNICODE(ustring);
+#endif
+ return 0;
+}
+
+/* PyObjectCall */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
+ PyObject *result;
+ ternaryfunc call = func->ob_type->tp_call;
+ if (unlikely(!call))
+ return PyObject_Call(func, arg, kw);
+ if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
+ return NULL;
+ result = (*call)(func, arg, kw);
+ Py_LeaveRecursiveCall();
+ if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
+ PyErr_SetString(
+ PyExc_SystemError,
+ "NULL result without error in PyObject_Call");
+ }
+ return result;
+}
+#endif
+
+/* RaiseException */
+#if PY_MAJOR_VERSION < 3
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
+ CYTHON_UNUSED PyObject *cause) {
+ __Pyx_PyThreadState_declare
+ Py_XINCREF(type);
+ if (!value || value == Py_None)
+ value = NULL;
+ else
+ Py_INCREF(value);
+ if (!tb || tb == Py_None)
+ tb = NULL;
+ else {
+ Py_INCREF(tb);
+ if (!PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto raise_error;
+ }
+ }
+ if (PyType_Check(type)) {
+#if CYTHON_COMPILING_IN_PYPY
+ if (!value) {
+ Py_INCREF(Py_None);
+ value = Py_None;
+ }
+#endif
+ PyErr_NormalizeException(&type, &value, &tb);
+ } else {
+ if (value) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto raise_error;
+ }
+ value = type;
+ type = (PyObject*) Py_TYPE(type);
+ Py_INCREF(type);
+ if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto raise_error;
+ }
+ }
+ __Pyx_PyThreadState_assign
+ __Pyx_ErrRestore(type, value, tb);
+ return;
+raise_error:
+ Py_XDECREF(value);
+ Py_XDECREF(type);
+ Py_XDECREF(tb);
+ return;
+}
+#else
+static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
+ PyObject* owned_instance = NULL;
+ if (tb == Py_None) {
+ tb = 0;
+ } else if (tb && !PyTraceBack_Check(tb)) {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: arg 3 must be a traceback or None");
+ goto bad;
+ }
+ if (value == Py_None)
+ value = 0;
+ if (PyExceptionInstance_Check(type)) {
+ if (value) {
+ PyErr_SetString(PyExc_TypeError,
+ "instance exception may not have a separate value");
+ goto bad;
+ }
+ value = type;
+ type = (PyObject*) Py_TYPE(value);
+ } else if (PyExceptionClass_Check(type)) {
+ PyObject *instance_class = NULL;
+ if (value && PyExceptionInstance_Check(value)) {
+ instance_class = (PyObject*) Py_TYPE(value);
+ if (instance_class != type) {
+ int is_subclass = PyObject_IsSubclass(instance_class, type);
+ if (!is_subclass) {
+ instance_class = NULL;
+ } else if (unlikely(is_subclass == -1)) {
+ goto bad;
+ } else {
+ type = instance_class;
+ }
+ }
+ }
+ if (!instance_class) {
+ PyObject *args;
+ if (!value)
+ args = PyTuple_New(0);
+ else if (PyTuple_Check(value)) {
+ Py_INCREF(value);
+ args = value;
+ } else
+ args = PyTuple_Pack(1, value);
+ if (!args)
+ goto bad;
+ owned_instance = PyObject_Call(type, args, NULL);
+ Py_DECREF(args);
+ if (!owned_instance)
+ goto bad;
+ value = owned_instance;
+ if (!PyExceptionInstance_Check(value)) {
+ PyErr_Format(PyExc_TypeError,
+ "calling %R should have returned an instance of "
+ "BaseException, not %R",
+ type, Py_TYPE(value));
+ goto bad;
+ }
+ }
+ } else {
+ PyErr_SetString(PyExc_TypeError,
+ "raise: exception class must be a subclass of BaseException");
+ goto bad;
+ }
+ if (cause) {
+ PyObject *fixed_cause;
+ if (cause == Py_None) {
+ fixed_cause = NULL;
+ } else if (PyExceptionClass_Check(cause)) {
+ fixed_cause = PyObject_CallObject(cause, NULL);
+ if (fixed_cause == NULL)
+ goto bad;
+ } else if (PyExceptionInstance_Check(cause)) {
+ fixed_cause = cause;
+ Py_INCREF(fixed_cause);
+ } else {
+ PyErr_SetString(PyExc_TypeError,
+ "exception causes must derive from "
+ "BaseException");
+ goto bad;
+ }
+ PyException_SetCause(value, fixed_cause);
+ }
+ PyErr_SetObject(type, value);
+ if (tb) {
+#if CYTHON_COMPILING_IN_PYPY
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
+ Py_INCREF(tb);
+ PyErr_Restore(tmp_type, tmp_value, tb);
+ Py_XDECREF(tmp_tb);
+#else
+ PyThreadState *tstate = __Pyx_PyThreadState_Current;
+ PyObject* tmp_tb = tstate->curexc_traceback;
+ if (tb != tmp_tb) {
+ Py_INCREF(tb);
+ tstate->curexc_traceback = tb;
+ Py_XDECREF(tmp_tb);
+ }
+#endif
+ }
+bad:
+ Py_XDECREF(owned_instance);
+ return;
+}
+#endif
+
+/* PyCFunctionFastCall */
+#if CYTHON_FAST_PYCCALL
+static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
+ PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
+ PyCFunction meth = PyCFunction_GET_FUNCTION(func);
+ PyObject *self = PyCFunction_GET_SELF(func);
+ int flags = PyCFunction_GET_FLAGS(func);
+ assert(PyCFunction_Check(func));
+ assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
+ assert(nargs >= 0);
+ assert(nargs == 0 || args != NULL);
+ /* _PyCFunction_FastCallDict() must not be called with an exception set,
+ because it may clear it (directly or indirectly) and so the
+ caller loses its exception */
+ assert(!PyErr_Occurred());
+ if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
+ return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
+ } else {
+ return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
+ }
+}
+#endif
+
+/* PyFunctionFastCall */
+#if CYTHON_FAST_PYCALL
+static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
+ PyObject *globals) {
+ PyFrameObject *f;
+ PyThreadState *tstate = __Pyx_PyThreadState_Current;
+ PyObject **fastlocals;
+ Py_ssize_t i;
+ PyObject *result;
+ assert(globals != NULL);
+ /* XXX Perhaps we should create a specialized
+ PyFrame_New() that doesn't take locals, but does
+ take builtins without sanity checking them.
+ */
+ assert(tstate != NULL);
+ f = PyFrame_New(tstate, co, globals, NULL);
+ if (f == NULL) {
+ return NULL;
+ }
+ fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
+ for (i = 0; i < na; i++) {
+ Py_INCREF(*args);
+ fastlocals[i] = *args++;
+ }
+ result = PyEval_EvalFrameEx(f,0);
+ ++tstate->recursion_depth;
+ Py_DECREF(f);
+ --tstate->recursion_depth;
+ return result;
+}
+#if 1 || PY_VERSION_HEX < 0x030600B1
+static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
+ PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
+ PyObject *globals = PyFunction_GET_GLOBALS(func);
+ PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
+ PyObject *closure;
+#if PY_MAJOR_VERSION >= 3
+ PyObject *kwdefs;
+#endif
+ PyObject *kwtuple, **k;
+ PyObject **d;
+ Py_ssize_t nd;
+ Py_ssize_t nk;
+ PyObject *result;
+ assert(kwargs == NULL || PyDict_Check(kwargs));
+ nk = kwargs ? PyDict_Size(kwargs) : 0;
+ if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
+ return NULL;
+ }
+ if (
+#if PY_MAJOR_VERSION >= 3
+ co->co_kwonlyargcount == 0 &&
+#endif
+ likely(kwargs == NULL || nk == 0) &&
+ co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
+ if (argdefs == NULL && co->co_argcount == nargs) {
+ result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
+ goto done;
+ }
+ else if (nargs == 0 && argdefs != NULL
+ && co->co_argcount == Py_SIZE(argdefs)) {
+ /* function called with no arguments, but all parameters have
+ a default value: use default values as arguments .*/
+ args = &PyTuple_GET_ITEM(argdefs, 0);
+ result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
+ goto done;
+ }
+ }
+ if (kwargs != NULL) {
+ Py_ssize_t pos, i;
+ kwtuple = PyTuple_New(2 * nk);
+ if (kwtuple == NULL) {
+ result = NULL;
+ goto done;
+ }
+ k = &PyTuple_GET_ITEM(kwtuple, 0);
+ pos = i = 0;
+ while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
+ Py_INCREF(k[i]);
+ Py_INCREF(k[i+1]);
+ i += 2;
+ }
+ nk = i / 2;
+ }
+ else {
+ kwtuple = NULL;
+ k = NULL;
+ }
+ closure = PyFunction_GET_CLOSURE(func);
+#if PY_MAJOR_VERSION >= 3
+ kwdefs = PyFunction_GET_KW_DEFAULTS(func);
+#endif
+ if (argdefs != NULL) {
+ d = &PyTuple_GET_ITEM(argdefs, 0);
+ nd = Py_SIZE(argdefs);
+ }
+ else {
+ d = NULL;
+ nd = 0;
+ }
+#if PY_MAJOR_VERSION >= 3
+ result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
+ args, (int)nargs,
+ k, (int)nk,
+ d, (int)nd, kwdefs, closure);
+#else
+ result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
+ args, (int)nargs,
+ k, (int)nk,
+ d, (int)nd, closure);
+#endif
+ Py_XDECREF(kwtuple);
+done:
+ Py_LeaveRecursiveCall();
+ return result;
+}
+#endif
+#endif
+
+/* PyObjectCallMethO */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
+ PyObject *self, *result;
+ PyCFunction cfunc;
+ cfunc = PyCFunction_GET_FUNCTION(func);
+ self = PyCFunction_GET_SELF(func);
+ if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
+ return NULL;
+ result = cfunc(self, arg);
+ Py_LeaveRecursiveCall();
+ if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
+ PyErr_SetString(
+ PyExc_SystemError,
+ "NULL result without error in PyObject_Call");
+ }
+ return result;
+}
+#endif
+
+/* PyObjectCallOneArg */
+#if CYTHON_COMPILING_IN_CPYTHON
+static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+ PyObject *result;
+ PyObject *args = PyTuple_New(1);
+ if (unlikely(!args)) return NULL;
+ Py_INCREF(arg);
+ PyTuple_SET_ITEM(args, 0, arg);
+ result = __Pyx_PyObject_Call(func, args, NULL);
+ Py_DECREF(args);
+ return result;
+}
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+#if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(func)) {
+ return __Pyx_PyFunction_FastCall(func, &arg, 1);
+ }
+#endif
+ if (likely(PyCFunction_Check(func))) {
+ if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
+ return __Pyx_PyObject_CallMethO(func, arg);
+#if CYTHON_FAST_PYCCALL
+ } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
+ return __Pyx_PyCFunction_FastCall(func, &arg, 1);
+#endif
+ }
+ }
+ return __Pyx__PyObject_CallOneArg(func, arg);
+}
+#else
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
+ PyObject *result;
+ PyObject *args = PyTuple_Pack(1, arg);
+ if (unlikely(!args)) return NULL;
+ result = __Pyx_PyObject_Call(func, args, NULL);
+ Py_DECREF(args);
+ return result;
+}
+#endif
+
+/* GetException */
+#if CYTHON_FAST_THREAD_STATE
+static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
+#else
+static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
+#endif
+{
+ PyObject *local_type, *local_value, *local_tb;
+#if CYTHON_FAST_THREAD_STATE
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ local_type = tstate->curexc_type;
+ local_value = tstate->curexc_value;
+ local_tb = tstate->curexc_traceback;
+ tstate->curexc_type = 0;
+ tstate->curexc_value = 0;
+ tstate->curexc_traceback = 0;
+#else
+ PyErr_Fetch(&local_type, &local_value, &local_tb);
+#endif
+ PyErr_NormalizeException(&local_type, &local_value, &local_tb);
+#if CYTHON_FAST_THREAD_STATE
+ if (unlikely(tstate->curexc_type))
+#else
+ if (unlikely(PyErr_Occurred()))
+#endif
+ goto bad;
+ #if PY_MAJOR_VERSION >= 3
+ if (local_tb) {
+ if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
+ goto bad;
+ }
+ #endif
+ Py_XINCREF(local_tb);
+ Py_XINCREF(local_type);
+ Py_XINCREF(local_value);
+ *type = local_type;
+ *value = local_value;
+ *tb = local_tb;
+#if CYTHON_FAST_THREAD_STATE
+ #if CYTHON_USE_EXC_INFO_STACK
+ {
+ _PyErr_StackItem *exc_info = tstate->exc_info;
+ tmp_type = exc_info->exc_type;
+ tmp_value = exc_info->exc_value;
+ tmp_tb = exc_info->exc_traceback;
+ exc_info->exc_type = local_type;
+ exc_info->exc_value = local_value;
+ exc_info->exc_traceback = local_tb;
+ }
+ #else
+ tmp_type = tstate->exc_type;
+ tmp_value = tstate->exc_value;
+ tmp_tb = tstate->exc_traceback;
+ tstate->exc_type = local_type;
+ tstate->exc_value = local_value;
+ tstate->exc_traceback = local_tb;
+ #endif
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+#else
+ PyErr_SetExcInfo(local_type, local_value, local_tb);
+#endif
+ return 0;
+bad:
+ *type = 0;
+ *value = 0;
+ *tb = 0;
+ Py_XDECREF(local_type);
+ Py_XDECREF(local_value);
+ Py_XDECREF(local_tb);
+ return -1;
+}
+
+/* SwapException */
+#if CYTHON_FAST_THREAD_STATE
+static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ #if CYTHON_USE_EXC_INFO_STACK
+ _PyErr_StackItem *exc_info = tstate->exc_info;
+ tmp_type = exc_info->exc_type;
+ tmp_value = exc_info->exc_value;
+ tmp_tb = exc_info->exc_traceback;
+ exc_info->exc_type = *type;
+ exc_info->exc_value = *value;
+ exc_info->exc_traceback = *tb;
+ #else
+ tmp_type = tstate->exc_type;
+ tmp_value = tstate->exc_value;
+ tmp_tb = tstate->exc_traceback;
+ tstate->exc_type = *type;
+ tstate->exc_value = *value;
+ tstate->exc_traceback = *tb;
+ #endif
+ *type = tmp_type;
+ *value = tmp_value;
+ *tb = tmp_tb;
+}
+#else
+static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
+ PyErr_SetExcInfo(*type, *value, *tb);
+ *type = tmp_type;
+ *value = tmp_value;
+ *tb = tmp_tb;
+}
+#endif
+
+/* GetTopmostException */
+#if CYTHON_USE_EXC_INFO_STACK
+static _PyErr_StackItem *
+__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
+{
+ _PyErr_StackItem *exc_info = tstate->exc_info;
+ while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
+ exc_info->previous_item != NULL)
+ {
+ exc_info = exc_info->previous_item;
+ }
+ return exc_info;
+}
+#endif
+
+/* SaveResetException */
+#if CYTHON_FAST_THREAD_STATE
+static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
+ #if CYTHON_USE_EXC_INFO_STACK
+ _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
+ *type = exc_info->exc_type;
+ *value = exc_info->exc_value;
+ *tb = exc_info->exc_traceback;
+ #else
+ *type = tstate->exc_type;
+ *value = tstate->exc_value;
+ *tb = tstate->exc_traceback;
+ #endif
+ Py_XINCREF(*type);
+ Py_XINCREF(*value);
+ Py_XINCREF(*tb);
+}
+static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
+ PyObject *tmp_type, *tmp_value, *tmp_tb;
+ #if CYTHON_USE_EXC_INFO_STACK
+ _PyErr_StackItem *exc_info = tstate->exc_info;
+ tmp_type = exc_info->exc_type;
+ tmp_value = exc_info->exc_value;
+ tmp_tb = exc_info->exc_traceback;
+ exc_info->exc_type = type;
+ exc_info->exc_value = value;
+ exc_info->exc_traceback = tb;
+ #else
+ tmp_type = tstate->exc_type;
+ tmp_value = tstate->exc_value;
+ tmp_tb = tstate->exc_traceback;
+ tstate->exc_type = type;
+ tstate->exc_value = value;
+ tstate->exc_traceback = tb;
+ #endif
+ Py_XDECREF(tmp_type);
+ Py_XDECREF(tmp_value);
+ Py_XDECREF(tmp_tb);
+}
+#endif
+
+/* GetItemIntUnicode */
+static CYTHON_INLINE Py_UCS4 __Pyx_GetItemInt_Unicode_Fast(PyObject* ustring, Py_ssize_t i,
+ int wraparound, int boundscheck) {
+ Py_ssize_t length;
+ if (unlikely(__Pyx_PyUnicode_READY(ustring) < 0)) return (Py_UCS4)-1;
+ if (wraparound | boundscheck) {
+ length = __Pyx_PyUnicode_GET_LENGTH(ustring);
+ if (wraparound & unlikely(i < 0)) i += length;
+ if ((!boundscheck) || likely(__Pyx_is_valid_index(i, length))) {
+ return __Pyx_PyUnicode_READ_CHAR(ustring, i);
+ } else {
+ PyErr_SetString(PyExc_IndexError, "string index out of range");
+ return (Py_UCS4)-1;
+ }
+ } else {
+ return __Pyx_PyUnicode_READ_CHAR(ustring, i);
+ }
+}
+
+/* ReRaiseException */
+static CYTHON_INLINE void __Pyx_ReraiseException(void) {
+ PyObject *type = NULL, *value = NULL, *tb = NULL;
+#if CYTHON_FAST_THREAD_STATE
+ PyThreadState *tstate = PyThreadState_GET();
+ #if CYTHON_USE_EXC_INFO_STACK
+ _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
+ type = exc_info->exc_type;
+ value = exc_info->exc_value;
+ tb = exc_info->exc_traceback;
+ #else
+ type = tstate->exc_type;
+ value = tstate->exc_value;
+ tb = tstate->exc_traceback;
+ #endif
+#else
+ PyErr_GetExcInfo(&type, &value, &tb);
+#endif
+ if (!type || type == Py_None) {
+#if !CYTHON_FAST_THREAD_STATE
+ Py_XDECREF(type);
+ Py_XDECREF(value);
+ Py_XDECREF(tb);
+#endif
+ PyErr_SetString(PyExc_RuntimeError,
+ "No active exception to reraise");
+ } else {
+#if CYTHON_FAST_THREAD_STATE
+ Py_INCREF(type);
+ Py_XINCREF(value);
+ Py_XINCREF(tb);
+#endif
+ PyErr_Restore(type, value, tb);
+ }
+}
+
+/* PyErrExceptionMatches */
+#if CYTHON_FAST_THREAD_STATE
+static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
+ Py_ssize_t i, n;
+ n = PyTuple_GET_SIZE(tuple);
+#if PY_MAJOR_VERSION >= 3
+ for (i=0; i<n; i++) {
+ if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
+ }
+#endif
+ for (i=0; i<n; i++) {
+ if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
+ }
+ return 0;
+}
+static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
+ PyObject *exc_type = tstate->curexc_type;
+ if (exc_type == err) return 1;
+ if (unlikely(!exc_type)) return 0;
+ if (unlikely(PyTuple_Check(err)))
+ return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
+ return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
+}
+#endif
+
+/* GetAttr */
+static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
+#if CYTHON_USE_TYPE_SLOTS
+#if PY_MAJOR_VERSION >= 3
+ if (likely(PyUnicode_Check(n)))
+#else
+ if (likely(PyString_Check(n)))
+#endif
+ return __Pyx_PyObject_GetAttrStr(o, n);
+#endif
+ return PyObject_GetAttr(o, n);
+}
+
+/* GetAttr3 */
+static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
+ return NULL;
+ __Pyx_PyErr_Clear();
+ Py_INCREF(d);
+ return d;
+}
+static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
+ PyObject *r = __Pyx_GetAttr(o, n);
+ return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
+}
+
+/* PyDictVersioning */
+#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
+ PyObject *dict = Py_TYPE(obj)->tp_dict;
+ return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
+}
+static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
+ PyObject **dictptr = NULL;
+ Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
+ if (offset) {
+#if CYTHON_COMPILING_IN_CPYTHON
+ dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
+#else
+ dictptr = _PyObject_GetDictPtr(obj);
+#endif
+ }
+ return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
+}
+static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
+ PyObject *dict = Py_TYPE(obj)->tp_dict;
+ if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
+ return 0;
+ return obj_dict_version == __Pyx_get_object_dict_version(obj);
+}
+#endif
+
+/* GetModuleGlobalName */
+#if CYTHON_USE_DICT_VERSIONS
+static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
+#else
+static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
+#endif
+{
+ PyObject *result;
+#if !CYTHON_AVOID_BORROWED_REFS
+#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
+ result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
+ __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
+ if (likely(result)) {
+ return __Pyx_NewRef(result);
+ } else if (unlikely(PyErr_Occurred())) {
+ return NULL;
+ }
+#else
+ result = PyDict_GetItem(__pyx_d, name);
+ __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
+ if (likely(result)) {
+ return __Pyx_NewRef(result);
+ }
+#endif
+#else
+ result = PyObject_GetItem(__pyx_d, name);
+ __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
+ if (likely(result)) {
+ return __Pyx_NewRef(result);
+ }
+ PyErr_Clear();
+#endif
+ return __Pyx_GetBuiltinName(name);
+}
+
+/* PyObjectCallNoArg */
+#if CYTHON_COMPILING_IN_CPYTHON
+static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) {
+#if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(func)) {
+ return __Pyx_PyFunction_FastCall(func, NULL, 0);
+ }
+#endif
+#ifdef __Pyx_CyFunction_USED
+ if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func)))
+#else
+ if (likely(PyCFunction_Check(func)))
+#endif
+ {
+ if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) {
+ return __Pyx_PyObject_CallMethO(func, NULL);
+ }
+ }
+ return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL);
+}
+#endif
+
+/* PyUnicode_Substring */
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_Substring(
+ PyObject* text, Py_ssize_t start, Py_ssize_t stop) {
+ Py_ssize_t length;
+ if (unlikely(__Pyx_PyUnicode_READY(text) == -1)) return NULL;
+ length = __Pyx_PyUnicode_GET_LENGTH(text);
+ if (start < 0) {
+ start += length;
+ if (start < 0)
+ start = 0;
+ }
+ if (stop < 0)
+ stop += length;
+ else if (stop > length)
+ stop = length;
+ if (stop <= start)
+ return __Pyx_NewRef(__pyx_empty_unicode);
+#if CYTHON_PEP393_ENABLED
+ return PyUnicode_FromKindAndData(PyUnicode_KIND(text),
+ PyUnicode_1BYTE_DATA(text) + start*PyUnicode_KIND(text), stop-start);
+#else
+ return PyUnicode_FromUnicode(PyUnicode_AS_UNICODE(text)+start, stop-start);
+#endif
+}
+
+/* PyObjectCall2Args */
+static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
+ PyObject *args, *result = NULL;
+ #if CYTHON_FAST_PYCALL
+ if (PyFunction_Check(function)) {
+ PyObject *args[2] = {arg1, arg2};
+ return __Pyx_PyFunction_FastCall(function, args, 2);
+ }
+ #endif
+ #if CYTHON_FAST_PYCCALL
+ if (__Pyx_PyFastCFunction_Check(function)) {
+ PyObject *args[2] = {arg1, arg2};
+ return __Pyx_PyCFunction_FastCall(function, args, 2);
+ }
+ #endif
+ args = PyTuple_New(2);
+ if (unlikely(!args)) goto done;
+ Py_INCREF(arg1);
+ PyTuple_SET_ITEM(args, 0, arg1);
+ Py_INCREF(arg2);
+ PyTuple_SET_ITEM(args, 1, arg2);
+ Py_INCREF(function);
+ result = __Pyx_PyObject_Call(function, args, NULL);
+ Py_DECREF(args);
+ Py_DECREF(function);
+done:
+ return result;
+}
+
+/* SliceObject */
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetSlice(PyObject* obj,
+ Py_ssize_t cstart, Py_ssize_t cstop,
+ PyObject** _py_start, PyObject** _py_stop, PyObject** _py_slice,
+ int has_cstart, int has_cstop, CYTHON_UNUSED int wraparound) {
+#if CYTHON_USE_TYPE_SLOTS
+ PyMappingMethods* mp;
+#if PY_MAJOR_VERSION < 3
+ PySequenceMethods* ms = Py_TYPE(obj)->tp_as_sequence;
+ if (likely(ms && ms->sq_slice)) {
+ if (!has_cstart) {
+ if (_py_start && (*_py_start != Py_None)) {
+ cstart = __Pyx_PyIndex_AsSsize_t(*_py_start);
+ if ((cstart == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
+ } else
+ cstart = 0;
+ }
+ if (!has_cstop) {
+ if (_py_stop && (*_py_stop != Py_None)) {
+ cstop = __Pyx_PyIndex_AsSsize_t(*_py_stop);
+ if ((cstop == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad;
+ } else
+ cstop = PY_SSIZE_T_MAX;
+ }
+ if (wraparound && unlikely((cstart < 0) | (cstop < 0)) && likely(ms->sq_length)) {
+ Py_ssize_t l = ms->sq_length(obj);
+ if (likely(l >= 0)) {
+ if (cstop < 0) {
+ cstop += l;
+ if (cstop < 0) cstop = 0;
+ }
+ if (cstart < 0) {
+ cstart += l;
+ if (cstart < 0) cstart = 0;
+ }
+ } else {
+ if (!PyErr_ExceptionMatches(PyExc_OverflowError))
+ goto bad;
+ PyErr_Clear();
+ }
+ }
+ return ms->sq_slice(obj, cstart, cstop);
+ }
+#endif
+ mp = Py_TYPE(obj)->tp_as_mapping;
+ if (likely(mp && mp->mp_subscript))
+#endif
+ {
+ PyObject* result;
+ PyObject *py_slice, *py_start, *py_stop;
+ if (_py_slice) {
+ py_slice = *_py_slice;
+ } else {
+ PyObject* owned_start = NULL;
+ PyObject* owned_stop = NULL;
+ if (_py_start) {
+ py_start = *_py_start;
+ } else {
+ if (has_cstart) {
+ owned_start = py_start = PyInt_FromSsize_t(cstart);
+ if (unlikely(!py_start)) goto bad;
+ } else
+ py_start = Py_None;
+ }
+ if (_py_stop) {
+ py_stop = *_py_stop;
+ } else {
+ if (has_cstop) {
+ owned_stop = py_stop = PyInt_FromSsize_t(cstop);
+ if (unlikely(!py_stop)) {
+ Py_XDECREF(owned_start);
+ goto bad;
+ }
+ } else
+ py_stop = Py_None;
+ }
+ py_slice = PySlice_New(py_start, py_stop, Py_None);
+ Py_XDECREF(owned_start);
+ Py_XDECREF(owned_stop);
+ if (unlikely(!py_slice)) goto bad;
+ }
+#if CYTHON_USE_TYPE_SLOTS
+ result = mp->mp_subscript(obj, py_slice);
+#else
+ result = PyObject_GetItem(obj, py_slice);
+#endif
+ if (!_py_slice) {
+ Py_DECREF(py_slice);
+ }
+ return result;
+ }
+ PyErr_Format(PyExc_TypeError,
+ "'%.200s' object is unsliceable", Py_TYPE(obj)->tp_name);
+bad:
+ return NULL;
+}
+
+/* Import */
+static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
+ PyObject *empty_list = 0;
+ PyObject *module = 0;
+ PyObject *global_dict = 0;
+ PyObject *empty_dict = 0;
+ PyObject *list;
+ #if PY_MAJOR_VERSION < 3
+ PyObject *py_import;
+ py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
+ if (!py_import)
+ goto bad;
+ #endif
+ if (from_list)
+ list = from_list;
+ else {
+ empty_list = PyList_New(0);
+ if (!empty_list)
+ goto bad;
+ list = empty_list;
+ }
+ global_dict = PyModule_GetDict(__pyx_m);
+ if (!global_dict)
+ goto bad;
+ empty_dict = PyDict_New();
+ if (!empty_dict)
+ goto bad;
+ {
+ #if PY_MAJOR_VERSION >= 3
+ if (level == -1) {
+ if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) {
+ module = PyImport_ImportModuleLevelObject(
+ name, global_dict, empty_dict, list, 1);
+ if (!module) {
+ if (!PyErr_ExceptionMatches(PyExc_ImportError))
+ goto bad;
+ PyErr_Clear();
+ }
+ }
+ level = 0;
+ }
+ #endif
+ if (!module) {
+ #if PY_MAJOR_VERSION < 3
+ PyObject *py_level = PyInt_FromLong(level);
+ if (!py_level)
+ goto bad;
+ module = PyObject_CallFunctionObjArgs(py_import,
+ name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
+ Py_DECREF(py_level);
+ #else
+ module = PyImport_ImportModuleLevelObject(
+ name, global_dict, empty_dict, list, level);
+ #endif
+ }
+ }
+bad:
+ #if PY_MAJOR_VERSION < 3
+ Py_XDECREF(py_import);
+ #endif
+ Py_XDECREF(empty_list);
+ Py_XDECREF(empty_dict);
+ return module;
+}
+
+/* ImportFrom */
+static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
+ PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
+ if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
+ PyErr_Format(PyExc_ImportError,
+ #if PY_MAJOR_VERSION < 3
+ "cannot import name %.230s", PyString_AS_STRING(name));
+ #else
+ "cannot import name %S", name);
+ #endif
+ }
+ return value;
+}
+
+/* GetItemInt */
+static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
+ PyObject *r;
+ if (!j) return NULL;
+ r = PyObject_GetItem(o, j);
+ Py_DECREF(j);
+ return r;
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
+ CYTHON_NCP_UNUSED int wraparound,
+ CYTHON_NCP_UNUSED int boundscheck) {
+#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ Py_ssize_t wrapped_i = i;
+ if (wraparound & unlikely(i < 0)) {
+ wrapped_i += PyList_GET_SIZE(o);
+ }
+ if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
+ PyObject *r = PyList_GET_ITEM(o, wrapped_i);
+ Py_INCREF(r);
+ return r;
+ }
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+#else
+ return PySequence_GetItem(o, i);
+#endif
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
+ CYTHON_NCP_UNUSED int wraparound,
+ CYTHON_NCP_UNUSED int boundscheck) {
+#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
+ Py_ssize_t wrapped_i = i;
+ if (wraparound & unlikely(i < 0)) {
+ wrapped_i += PyTuple_GET_SIZE(o);
+ }
+ if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
+ PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
+ Py_INCREF(r);
+ return r;
+ }
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+#else
+ return PySequence_GetItem(o, i);
+#endif
+}
+static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
+ CYTHON_NCP_UNUSED int wraparound,
+ CYTHON_NCP_UNUSED int boundscheck) {
+#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
+ if (is_list || PyList_CheckExact(o)) {
+ Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
+ if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
+ PyObject *r = PyList_GET_ITEM(o, n);
+ Py_INCREF(r);
+ return r;
+ }
+ }
+ else if (PyTuple_CheckExact(o)) {
+ Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
+ if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
+ PyObject *r = PyTuple_GET_ITEM(o, n);
+ Py_INCREF(r);
+ return r;
+ }
+ } else {
+ PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
+ if (likely(m && m->sq_item)) {
+ if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
+ Py_ssize_t l = m->sq_length(o);
+ if (likely(l >= 0)) {
+ i += l;
+ } else {
+ if (!PyErr_ExceptionMatches(PyExc_OverflowError))
+ return NULL;
+ PyErr_Clear();
+ }
+ }
+ return m->sq_item(o, i);
+ }
+ }
+#else
+ if (is_list || PySequence_Check(o)) {
+ return PySequence_GetItem(o, i);
+ }
+#endif
+ return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
+}
+
+/* HasAttr */
+static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
+ PyObject *r;
+ if (unlikely(!__Pyx_PyBaseString_Check(n))) {
+ PyErr_SetString(PyExc_TypeError,
+ "hasattr(): attribute name must be string");
+ return -1;
+ }
+ r = __Pyx_GetAttr(o, n);
+ if (unlikely(!r)) {
+ PyErr_Clear();
+ return 0;
+ } else {
+ Py_DECREF(r);
+ return 1;
+ }
+}
+
+/* ExtTypeTest */
+static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
+ if (unlikely(!type)) {
+ PyErr_SetString(PyExc_SystemError, "Missing type object");
+ return 0;
+ }
+ if (likely(__Pyx_TypeCheck(obj, type)))
+ return 1;
+ PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
+ Py_TYPE(obj)->tp_name, type->tp_name);
+ return 0;
+}
+
+/* PyObject_GenericGetAttrNoDict */
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
+ PyErr_Format(PyExc_AttributeError,
+#if PY_MAJOR_VERSION >= 3
+ "'%.50s' object has no attribute '%U'",
+ tp->tp_name, attr_name);
+#else
+ "'%.50s' object has no attribute '%.400s'",
+ tp->tp_name, PyString_AS_STRING(attr_name));
+#endif
+ return NULL;
+}
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
+ PyObject *descr;
+ PyTypeObject *tp = Py_TYPE(obj);
+ if (unlikely(!PyString_Check(attr_name))) {
+ return PyObject_GenericGetAttr(obj, attr_name);
+ }
+ assert(!tp->tp_dictoffset);
+ descr = _PyType_Lookup(tp, attr_name);
+ if (unlikely(!descr)) {
+ return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
+ }
+ Py_INCREF(descr);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
+ #endif
+ {
+ descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
+ if (unlikely(f)) {
+ PyObject *res = f(descr, obj, (PyObject *)tp);
+ Py_DECREF(descr);
+ return res;
+ }
+ }
+ return descr;
+}
+#endif
+
+/* PyObject_GenericGetAttr */
+#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
+static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
+ if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
+ return PyObject_GenericGetAttr(obj, attr_name);
+ }
+ return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
+}
+#endif
+
+/* SetVTable */
+static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
+#if PY_VERSION_HEX >= 0x02070000
+ PyObject *ob = PyCapsule_New(vtable, 0, 0);
+#else
+ PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
+#endif
+ if (!ob)
+ goto bad;
+ if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
+ goto bad;
+ Py_DECREF(ob);
+ return 0;
+bad:
+ Py_XDECREF(ob);
+ return -1;
+}
+
+/* PyObjectGetAttrStrNoError */
+static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) {
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
+ __Pyx_PyErr_Clear();
+}
+static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) {
+ PyObject *result;
+#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1
+ PyTypeObject* tp = Py_TYPE(obj);
+ if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) {
+ return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1);
+ }
+#endif
+ result = __Pyx_PyObject_GetAttrStr(obj, attr_name);
+ if (unlikely(!result)) {
+ __Pyx_PyObject_GetAttrStr_ClearAttributeError();
+ }
+ return result;
+}
+
+/* SetupReduce */
+static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
+ int ret;
+ PyObject *name_attr;
+ name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name);
+ if (likely(name_attr)) {
+ ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
+ } else {
+ ret = -1;
+ }
+ if (unlikely(ret < 0)) {
+ PyErr_Clear();
+ ret = 0;
+ }
+ Py_XDECREF(name_attr);
+ return ret;
+}
+static int __Pyx_setup_reduce(PyObject* type_obj) {
+ int ret = 0;
+ PyObject *object_reduce = NULL;
+ PyObject *object_reduce_ex = NULL;
+ PyObject *reduce = NULL;
+ PyObject *reduce_ex = NULL;
+ PyObject *reduce_cython = NULL;
+ PyObject *setstate = NULL;
+ PyObject *setstate_cython = NULL;
+#if CYTHON_USE_PYTYPE_LOOKUP
+ if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
+#else
+ if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
+#endif
+#if CYTHON_USE_PYTYPE_LOOKUP
+ object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
+#else
+ object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
+#endif
+ reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD;
+ if (reduce_ex == object_reduce_ex) {
+#if CYTHON_USE_PYTYPE_LOOKUP
+ object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
+#else
+ object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
+#endif
+ reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD;
+ if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) {
+ reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython);
+ if (likely(reduce_cython)) {
+ ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
+ ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
+ } else if (reduce == object_reduce || PyErr_Occurred()) {
+ goto __PYX_BAD;
+ }
+ setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate);
+ if (!setstate) PyErr_Clear();
+ if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) {
+ setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython);
+ if (likely(setstate_cython)) {
+ ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
+ ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
+ } else if (!setstate || PyErr_Occurred()) {
+ goto __PYX_BAD;
+ }
+ }
+ PyType_Modified((PyTypeObject*)type_obj);
+ }
+ }
+ goto __PYX_GOOD;
+__PYX_BAD:
+ if (!PyErr_Occurred())
+ PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
+ ret = -1;
+__PYX_GOOD:
+#if !CYTHON_USE_PYTYPE_LOOKUP
+ Py_XDECREF(object_reduce);
+ Py_XDECREF(object_reduce_ex);
+#endif
+ Py_XDECREF(reduce);
+ Py_XDECREF(reduce_ex);
+ Py_XDECREF(reduce_cython);
+ Py_XDECREF(setstate);
+ Py_XDECREF(setstate_cython);
+ return ret;
+}
+
+/* TypeImport */
+#ifndef __PYX_HAVE_RT_ImportType
+#define __PYX_HAVE_RT_ImportType
+static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name,
+ size_t size, enum __Pyx_ImportType_CheckSize check_size)
+{
+ PyObject *result = 0;
+ char warning[200];
+ Py_ssize_t basicsize;
+#ifdef Py_LIMITED_API
+ PyObject *py_basicsize;
+#endif
+ result = PyObject_GetAttrString(module, class_name);
+ if (!result)
+ goto bad;
+ if (!PyType_Check(result)) {
+ PyErr_Format(PyExc_TypeError,
+ "%.200s.%.200s is not a type object",
+ module_name, class_name);
+ goto bad;
+ }
+#ifndef Py_LIMITED_API
+ basicsize = ((PyTypeObject *)result)->tp_basicsize;
+#else
+ py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
+ if (!py_basicsize)
+ goto bad;
+ basicsize = PyLong_AsSsize_t(py_basicsize);
+ Py_DECREF(py_basicsize);
+ py_basicsize = 0;
+ if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
+ goto bad;
+#endif
+ if ((size_t)basicsize < size) {
+ PyErr_Format(PyExc_ValueError,
+ "%.200s.%.200s size changed, may indicate binary incompatibility. "
+ "Expected %zd from C header, got %zd from PyObject",
+ module_name, class_name, size, basicsize);
+ goto bad;
+ }
+ if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) {
+ PyErr_Format(PyExc_ValueError,
+ "%.200s.%.200s size changed, may indicate binary incompatibility. "
+ "Expected %zd from C header, got %zd from PyObject",
+ module_name, class_name, size, basicsize);
+ goto bad;
+ }
+ else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) {
+ PyOS_snprintf(warning, sizeof(warning),
+ "%s.%s size changed, may indicate binary incompatibility. "
+ "Expected %zd from C header, got %zd from PyObject",
+ module_name, class_name, size, basicsize);
+ if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
+ }
+ return (PyTypeObject *)result;
+bad:
+ Py_XDECREF(result);
+ return NULL;
+}
+#endif
+
+/* CLineInTraceback */
+#ifndef CYTHON_CLINE_IN_TRACEBACK
+static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
+ PyObject *use_cline;
+ PyObject *ptype, *pvalue, *ptraceback;
+#if CYTHON_COMPILING_IN_CPYTHON
+ PyObject **cython_runtime_dict;
+#endif
+ if (unlikely(!__pyx_cython_runtime)) {
+ return c_line;
+ }
+ __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
+#if CYTHON_COMPILING_IN_CPYTHON
+ cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
+ if (likely(cython_runtime_dict)) {
+ __PYX_PY_DICT_LOOKUP_IF_MODIFIED(
+ use_cline, *cython_runtime_dict,
+ __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
+ } else
+#endif
+ {
+ PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
+ if (use_cline_obj) {
+ use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
+ Py_DECREF(use_cline_obj);
+ } else {
+ PyErr_Clear();
+ use_cline = NULL;
+ }
+ }
+ if (!use_cline) {
+ c_line = 0;
+ PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
+ }
+ else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
+ c_line = 0;
+ }
+ __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
+ return c_line;
+}
+#endif
+
+/* CodeObjectCache */
+static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
+ int start = 0, mid = 0, end = count - 1;
+ if (end >= 0 && code_line > entries[end].code_line) {
+ return count;
+ }
+ while (start < end) {
+ mid = start + (end - start) / 2;
+ if (code_line < entries[mid].code_line) {
+ end = mid;
+ } else if (code_line > entries[mid].code_line) {
+ start = mid + 1;
+ } else {
+ return mid;
+ }
+ }
+ if (code_line <= entries[mid].code_line) {
+ return mid;
+ } else {
+ return mid + 1;
+ }
+}
+static PyCodeObject *__pyx_find_code_object(int code_line) {
+ PyCodeObject* code_object;
+ int pos;
+ if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
+ return NULL;
+ }
+ pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
+ if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
+ return NULL;
+ }
+ code_object = __pyx_code_cache.entries[pos].code_object;
+ Py_INCREF(code_object);
+ return code_object;
+}
+static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
+ int pos, i;
+ __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
+ if (unlikely(!code_line)) {
+ return;
+ }
+ if (unlikely(!entries)) {
+ entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
+ if (likely(entries)) {
+ __pyx_code_cache.entries = entries;
+ __pyx_code_cache.max_count = 64;
+ __pyx_code_cache.count = 1;
+ entries[0].code_line = code_line;
+ entries[0].code_object = code_object;
+ Py_INCREF(code_object);
+ }
+ return;
+ }
+ pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
+ if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
+ PyCodeObject* tmp = entries[pos].code_object;
+ entries[pos].code_object = code_object;
+ Py_DECREF(tmp);
+ return;
+ }
+ if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
+ int new_max = __pyx_code_cache.max_count + 64;
+ entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
+ __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
+ if (unlikely(!entries)) {
+ return;
+ }
+ __pyx_code_cache.entries = entries;
+ __pyx_code_cache.max_count = new_max;
+ }
+ for (i=__pyx_code_cache.count; i>pos; i--) {
+ entries[i] = entries[i-1];
+ }
+ entries[pos].code_line = code_line;
+ entries[pos].code_object = code_object;
+ __pyx_code_cache.count++;
+ Py_INCREF(code_object);
+}
+
+/* AddTraceback */
+#include "compile.h"
+#include "frameobject.h"
+#include "traceback.h"
+static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
+ const char *funcname, int c_line,
+ int py_line, const char *filename) {
+ PyCodeObject *py_code = 0;
+ PyObject *py_srcfile = 0;
+ PyObject *py_funcname = 0;
+ #if PY_MAJOR_VERSION < 3
+ py_srcfile = PyString_FromString(filename);
+ #else
+ py_srcfile = PyUnicode_FromString(filename);
+ #endif
+ if (!py_srcfile) goto bad;
+ if (c_line) {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
+ #else
+ py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
+ #endif
+ }
+ else {
+ #if PY_MAJOR_VERSION < 3
+ py_funcname = PyString_FromString(funcname);
+ #else
+ py_funcname = PyUnicode_FromString(funcname);
+ #endif
+ }
+ if (!py_funcname) goto bad;
+ py_code = __Pyx_PyCode_New(
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ __pyx_empty_bytes, /*PyObject *code,*/
+ __pyx_empty_tuple, /*PyObject *consts,*/
+ __pyx_empty_tuple, /*PyObject *names,*/
+ __pyx_empty_tuple, /*PyObject *varnames,*/
+ __pyx_empty_tuple, /*PyObject *freevars,*/
+ __pyx_empty_tuple, /*PyObject *cellvars,*/
+ py_srcfile, /*PyObject *filename,*/
+ py_funcname, /*PyObject *name,*/
+ py_line,
+ __pyx_empty_bytes /*PyObject *lnotab*/
+ );
+ Py_DECREF(py_srcfile);
+ Py_DECREF(py_funcname);
+ return py_code;
+bad:
+ Py_XDECREF(py_srcfile);
+ Py_XDECREF(py_funcname);
+ return NULL;
+}
+static void __Pyx_AddTraceback(const char *funcname, int c_line,
+ int py_line, const char *filename) {
+ PyCodeObject *py_code = 0;
+ PyFrameObject *py_frame = 0;
+ PyThreadState *tstate = __Pyx_PyThreadState_Current;
+ if (c_line) {
+ c_line = __Pyx_CLineForTraceback(tstate, c_line);
+ }
+ py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
+ if (!py_code) {
+ py_code = __Pyx_CreateCodeObjectForTraceback(
+ funcname, c_line, py_line, filename);
+ if (!py_code) goto bad;
+ __pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
+ }
+ py_frame = PyFrame_New(
+ tstate, /*PyThreadState *tstate,*/
+ py_code, /*PyCodeObject *code,*/
+ __pyx_d, /*PyObject *globals,*/
+ 0 /*PyObject *locals*/
+ );
+ if (!py_frame) goto bad;
+ __Pyx_PyFrame_SetLineNumber(py_frame, py_line);
+ PyTraceBack_Here(py_frame);
+bad:
+ Py_XDECREF(py_code);
+ Py_XDECREF(py_frame);
+}
+
+/* CIntFromPyVerify */
+#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
+ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
+#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
+ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
+#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
+ {\
+ func_type value = func_value;\
+ if (sizeof(target_type) < sizeof(func_type)) {\
+ if (unlikely(value != (func_type) (target_type) value)) {\
+ func_type zero = 0;\
+ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
+ return (target_type) -1;\
+ if (is_unsigned && unlikely(value < zero))\
+ goto raise_neg_overflow;\
+ else\
+ goto raise_overflow;\
+ }\
+ }\
+ return (target_type) value;\
+ }
+
+/* CIntToPy */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
+ const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (is_unsigned) {
+ if (sizeof(int) < sizeof(long)) {
+ return PyInt_FromLong((long) value);
+ } else if (sizeof(int) <= sizeof(unsigned long)) {
+ return PyLong_FromUnsignedLong((unsigned long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
+ return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
+#endif
+ }
+ } else {
+ if (sizeof(int) <= sizeof(long)) {
+ return PyInt_FromLong((long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
+ return PyLong_FromLongLong((PY_LONG_LONG) value);
+#endif
+ }
+ }
+ {
+ int one = 1; int little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&value;
+ return _PyLong_FromByteArray(bytes, sizeof(int),
+ little, !is_unsigned);
+ }
+}
+
+/* CIntToPy */
+static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
+ const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
+ const int is_unsigned = neg_one > const_zero;
+ if (is_unsigned) {
+ if (sizeof(long) < sizeof(long)) {
+ return PyInt_FromLong((long) value);
+ } else if (sizeof(long) <= sizeof(unsigned long)) {
+ return PyLong_FromUnsignedLong((unsigned long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
+ return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
+#endif
+ }
+ } else {
+ if (sizeof(long) <= sizeof(long)) {
+ return PyInt_FromLong((long) value);
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
+ return PyLong_FromLongLong((PY_LONG_LONG) value);
+#endif
+ }
+ }
+ {
+ int one = 1; int little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&value;
+ return _PyLong_FromByteArray(bytes, sizeof(long),
+ little, !is_unsigned);
+ }
+}
+
+/* PyUCS4InUnicode */
+#if PY_VERSION_HEX < 0x03090000
+#if Py_UNICODE_SIZE == 2
+static int __Pyx_PyUnicodeBufferContainsUCS4_SP(Py_UNICODE* buffer, Py_ssize_t length, Py_UCS4 character) {
+ Py_UNICODE high_val, low_val;
+ Py_UNICODE* pos;
+ high_val = (Py_UNICODE) (0xD800 | (((character - 0x10000) >> 10) & ((1<<10)-1)));
+ low_val = (Py_UNICODE) (0xDC00 | ( (character - 0x10000) & ((1<<10)-1)));
+ for (pos=buffer; pos < buffer+length-1; pos++) {
+ if (unlikely((high_val == pos[0]) & (low_val == pos[1]))) return 1;
+ }
+ return 0;
+}
+#endif
+static int __Pyx_PyUnicodeBufferContainsUCS4_BMP(Py_UNICODE* buffer, Py_ssize_t length, Py_UCS4 character) {
+ Py_UNICODE uchar;
+ Py_UNICODE* pos;
+ uchar = (Py_UNICODE) character;
+ for (pos=buffer; pos < buffer+length; pos++) {
+ if (unlikely(uchar == pos[0])) return 1;
+ }
+ return 0;
+}
+#endif
+static CYTHON_INLINE int __Pyx_UnicodeContainsUCS4(PyObject* unicode, Py_UCS4 character) {
+#if CYTHON_PEP393_ENABLED
+ const int kind = PyUnicode_KIND(unicode);
+ if (likely(kind != PyUnicode_WCHAR_KIND)) {
+ Py_ssize_t i;
+ const void* udata = PyUnicode_DATA(unicode);
+ const Py_ssize_t length = PyUnicode_GET_LENGTH(unicode);
+ for (i=0; i < length; i++) {
+ if (unlikely(character == PyUnicode_READ(kind, udata, i))) return 1;
+ }
+ return 0;
+ }
+#elif PY_VERSION_HEX >= 0x03090000
+ #error Cannot use "UChar in Unicode" in Python 3.9 without PEP-393 unicode strings.
+#endif
+#if PY_VERSION_HEX < 0x03090000
+#if Py_UNICODE_SIZE == 2
+ if (unlikely(character > 65535)) {
+ return __Pyx_PyUnicodeBufferContainsUCS4_SP(
+ PyUnicode_AS_UNICODE(unicode),
+ PyUnicode_GET_SIZE(unicode),
+ character);
+ } else
+#endif
+ {
+ return __Pyx_PyUnicodeBufferContainsUCS4_BMP(
+ PyUnicode_AS_UNICODE(unicode),
+ PyUnicode_GET_SIZE(unicode),
+ character);
+ }
+#endif
+}
+
+/* UnicodeAsUCS4 */
+static CYTHON_INLINE Py_UCS4 __Pyx_PyUnicode_AsPy_UCS4(PyObject* x) {
+ Py_ssize_t length;
+ #if CYTHON_PEP393_ENABLED
+ length = PyUnicode_GET_LENGTH(x);
+ if (likely(length == 1)) {
+ return PyUnicode_READ_CHAR(x, 0);
+ }
+ #else
+ length = PyUnicode_GET_SIZE(x);
+ if (likely(length == 1)) {
+ return PyUnicode_AS_UNICODE(x)[0];
+ }
+ #if Py_UNICODE_SIZE == 2
+ else if (PyUnicode_GET_SIZE(x) == 2) {
+ Py_UCS4 high_val = PyUnicode_AS_UNICODE(x)[0];
+ if (high_val >= 0xD800 && high_val <= 0xDBFF) {
+ Py_UCS4 low_val = PyUnicode_AS_UNICODE(x)[1];
+ if (low_val >= 0xDC00 && low_val <= 0xDFFF) {
+ return 0x10000 + (((high_val & ((1<<10)-1)) << 10) | (low_val & ((1<<10)-1)));
+ }
+ }
+ }
+ #endif
+ #endif
+ PyErr_Format(PyExc_ValueError,
+ "only single character unicode strings can be converted to Py_UCS4, "
+ "got length %" CYTHON_FORMAT_SSIZE_T "d", length);
+ return (Py_UCS4)-1;
+}
+
+/* CIntFromPy */
+static CYTHON_INLINE uint8_t __Pyx_PyInt_As_uint8_t(PyObject *x) {
+ const uint8_t neg_one = (uint8_t) ((uint8_t) 0 - (uint8_t) 1), const_zero = (uint8_t) 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x))) {
+ if (sizeof(uint8_t) < sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT(uint8_t, long, PyInt_AS_LONG(x))
+ } else {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ goto raise_neg_overflow;
+ }
+ return (uint8_t) val;
+ }
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (uint8_t) 0;
+ case 1: __PYX_VERIFY_RETURN_INT(uint8_t, digit, digits[0])
+ case 2:
+ if (8 * sizeof(uint8_t) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(uint8_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(uint8_t) >= 2 * PyLong_SHIFT) {
+ return (uint8_t) (((((uint8_t)digits[1]) << PyLong_SHIFT) | (uint8_t)digits[0]));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(uint8_t) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(uint8_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(uint8_t) >= 3 * PyLong_SHIFT) {
+ return (uint8_t) (((((((uint8_t)digits[2]) << PyLong_SHIFT) | (uint8_t)digits[1]) << PyLong_SHIFT) | (uint8_t)digits[0]));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(uint8_t) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(uint8_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(uint8_t) >= 4 * PyLong_SHIFT) {
+ return (uint8_t) (((((((((uint8_t)digits[3]) << PyLong_SHIFT) | (uint8_t)digits[2]) << PyLong_SHIFT) | (uint8_t)digits[1]) << PyLong_SHIFT) | (uint8_t)digits[0]));
+ }
+ }
+ break;
+ }
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (unlikely(Py_SIZE(x) < 0)) {
+ goto raise_neg_overflow;
+ }
+#else
+ {
+ int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+ if (unlikely(result < 0))
+ return (uint8_t) -1;
+ if (unlikely(result == 1))
+ goto raise_neg_overflow;
+ }
+#endif
+ if (sizeof(uint8_t) <= sizeof(unsigned long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(uint8_t, unsigned long, PyLong_AsUnsignedLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(uint8_t) <= sizeof(unsigned PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(uint8_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+#endif
+ }
+ } else {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (uint8_t) 0;
+ case -1: __PYX_VERIFY_RETURN_INT(uint8_t, sdigit, (sdigit) (-(sdigit)digits[0]))
+ case 1: __PYX_VERIFY_RETURN_INT(uint8_t, digit, +digits[0])
+ case -2:
+ if (8 * sizeof(uint8_t) - 1 > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(uint8_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(uint8_t) - 1 > 2 * PyLong_SHIFT) {
+ return (uint8_t) (((uint8_t)-1)*(((((uint8_t)digits[1]) << PyLong_SHIFT) | (uint8_t)digits[0])));
+ }
+ }
+ break;
+ case 2:
+ if (8 * sizeof(uint8_t) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(uint8_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(uint8_t) - 1 > 2 * PyLong_SHIFT) {
+ return (uint8_t) ((((((uint8_t)digits[1]) << PyLong_SHIFT) | (uint8_t)digits[0])));
+ }
+ }
+ break;
+ case -3:
+ if (8 * sizeof(uint8_t) - 1 > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(uint8_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(uint8_t) - 1 > 3 * PyLong_SHIFT) {
+ return (uint8_t) (((uint8_t)-1)*(((((((uint8_t)digits[2]) << PyLong_SHIFT) | (uint8_t)digits[1]) << PyLong_SHIFT) | (uint8_t)digits[0])));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(uint8_t) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(uint8_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(uint8_t) - 1 > 3 * PyLong_SHIFT) {
+ return (uint8_t) ((((((((uint8_t)digits[2]) << PyLong_SHIFT) | (uint8_t)digits[1]) << PyLong_SHIFT) | (uint8_t)digits[0])));
+ }
+ }
+ break;
+ case -4:
+ if (8 * sizeof(uint8_t) - 1 > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(uint8_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(uint8_t) - 1 > 4 * PyLong_SHIFT) {
+ return (uint8_t) (((uint8_t)-1)*(((((((((uint8_t)digits[3]) << PyLong_SHIFT) | (uint8_t)digits[2]) << PyLong_SHIFT) | (uint8_t)digits[1]) << PyLong_SHIFT) | (uint8_t)digits[0])));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(uint8_t) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(uint8_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(uint8_t) - 1 > 4 * PyLong_SHIFT) {
+ return (uint8_t) ((((((((((uint8_t)digits[3]) << PyLong_SHIFT) | (uint8_t)digits[2]) << PyLong_SHIFT) | (uint8_t)digits[1]) << PyLong_SHIFT) | (uint8_t)digits[0])));
+ }
+ }
+ break;
+ }
+#endif
+ if (sizeof(uint8_t) <= sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(uint8_t, long, PyLong_AsLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(uint8_t) <= sizeof(PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(uint8_t, PY_LONG_LONG, PyLong_AsLongLong(x))
+#endif
+ }
+ }
+ {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+ PyErr_SetString(PyExc_RuntimeError,
+ "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+ uint8_t val;
+ PyObject *v = __Pyx_PyNumber_IntOrLong(x);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(v) && !PyLong_Check(v)) {
+ PyObject *tmp = v;
+ v = PyNumber_Long(tmp);
+ Py_DECREF(tmp);
+ }
+ #endif
+ if (likely(v)) {
+ int one = 1; int is_little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&val;
+ int ret = _PyLong_AsByteArray((PyLongObject *)v,
+ bytes, sizeof(val),
+ is_little, !is_unsigned);
+ Py_DECREF(v);
+ if (likely(!ret))
+ return val;
+ }
+#endif
+ return (uint8_t) -1;
+ }
+ } else {
+ uint8_t val;
+ PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
+ if (!tmp) return (uint8_t) -1;
+ val = __Pyx_PyInt_As_uint8_t(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+raise_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to uint8_t");
+ return (uint8_t) -1;
+raise_neg_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to uint8_t");
+ return (uint8_t) -1;
+}
+
+/* CIntFromPy */
+static CYTHON_INLINE uint64_t __Pyx_PyInt_As_uint64_t(PyObject *x) {
+ const uint64_t neg_one = (uint64_t) ((uint64_t) 0 - (uint64_t) 1), const_zero = (uint64_t) 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x))) {
+ if (sizeof(uint64_t) < sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT(uint64_t, long, PyInt_AS_LONG(x))
+ } else {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ goto raise_neg_overflow;
+ }
+ return (uint64_t) val;
+ }
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (uint64_t) 0;
+ case 1: __PYX_VERIFY_RETURN_INT(uint64_t, digit, digits[0])
+ case 2:
+ if (8 * sizeof(uint64_t) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(uint64_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(uint64_t) >= 2 * PyLong_SHIFT) {
+ return (uint64_t) (((((uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0]));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(uint64_t) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(uint64_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(uint64_t) >= 3 * PyLong_SHIFT) {
+ return (uint64_t) (((((((uint64_t)digits[2]) << PyLong_SHIFT) | (uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0]));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(uint64_t) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(uint64_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(uint64_t) >= 4 * PyLong_SHIFT) {
+ return (uint64_t) (((((((((uint64_t)digits[3]) << PyLong_SHIFT) | (uint64_t)digits[2]) << PyLong_SHIFT) | (uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0]));
+ }
+ }
+ break;
+ }
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (unlikely(Py_SIZE(x) < 0)) {
+ goto raise_neg_overflow;
+ }
+#else
+ {
+ int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+ if (unlikely(result < 0))
+ return (uint64_t) -1;
+ if (unlikely(result == 1))
+ goto raise_neg_overflow;
+ }
+#endif
+ if (sizeof(uint64_t) <= sizeof(unsigned long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(uint64_t, unsigned long, PyLong_AsUnsignedLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(uint64_t) <= sizeof(unsigned PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(uint64_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+#endif
+ }
+ } else {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (uint64_t) 0;
+ case -1: __PYX_VERIFY_RETURN_INT(uint64_t, sdigit, (sdigit) (-(sdigit)digits[0]))
+ case 1: __PYX_VERIFY_RETURN_INT(uint64_t, digit, +digits[0])
+ case -2:
+ if (8 * sizeof(uint64_t) - 1 > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(uint64_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(uint64_t) - 1 > 2 * PyLong_SHIFT) {
+ return (uint64_t) (((uint64_t)-1)*(((((uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0])));
+ }
+ }
+ break;
+ case 2:
+ if (8 * sizeof(uint64_t) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(uint64_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(uint64_t) - 1 > 2 * PyLong_SHIFT) {
+ return (uint64_t) ((((((uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0])));
+ }
+ }
+ break;
+ case -3:
+ if (8 * sizeof(uint64_t) - 1 > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(uint64_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(uint64_t) - 1 > 3 * PyLong_SHIFT) {
+ return (uint64_t) (((uint64_t)-1)*(((((((uint64_t)digits[2]) << PyLong_SHIFT) | (uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0])));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(uint64_t) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(uint64_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(uint64_t) - 1 > 3 * PyLong_SHIFT) {
+ return (uint64_t) ((((((((uint64_t)digits[2]) << PyLong_SHIFT) | (uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0])));
+ }
+ }
+ break;
+ case -4:
+ if (8 * sizeof(uint64_t) - 1 > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(uint64_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(uint64_t) - 1 > 4 * PyLong_SHIFT) {
+ return (uint64_t) (((uint64_t)-1)*(((((((((uint64_t)digits[3]) << PyLong_SHIFT) | (uint64_t)digits[2]) << PyLong_SHIFT) | (uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0])));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(uint64_t) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(uint64_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(uint64_t) - 1 > 4 * PyLong_SHIFT) {
+ return (uint64_t) ((((((((((uint64_t)digits[3]) << PyLong_SHIFT) | (uint64_t)digits[2]) << PyLong_SHIFT) | (uint64_t)digits[1]) << PyLong_SHIFT) | (uint64_t)digits[0])));
+ }
+ }
+ break;
+ }
+#endif
+ if (sizeof(uint64_t) <= sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(uint64_t, long, PyLong_AsLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(uint64_t) <= sizeof(PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(uint64_t, PY_LONG_LONG, PyLong_AsLongLong(x))
+#endif
+ }
+ }
+ {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+ PyErr_SetString(PyExc_RuntimeError,
+ "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+ uint64_t val;
+ PyObject *v = __Pyx_PyNumber_IntOrLong(x);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(v) && !PyLong_Check(v)) {
+ PyObject *tmp = v;
+ v = PyNumber_Long(tmp);
+ Py_DECREF(tmp);
+ }
+ #endif
+ if (likely(v)) {
+ int one = 1; int is_little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&val;
+ int ret = _PyLong_AsByteArray((PyLongObject *)v,
+ bytes, sizeof(val),
+ is_little, !is_unsigned);
+ Py_DECREF(v);
+ if (likely(!ret))
+ return val;
+ }
+#endif
+ return (uint64_t) -1;
+ }
+ } else {
+ uint64_t val;
+ PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
+ if (!tmp) return (uint64_t) -1;
+ val = __Pyx_PyInt_As_uint64_t(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+raise_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to uint64_t");
+ return (uint64_t) -1;
+raise_neg_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to uint64_t");
+ return (uint64_t) -1;
+}
+
+/* CIntFromPy */
+static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
+ const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x))) {
+ if (sizeof(long) < sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
+ } else {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ goto raise_neg_overflow;
+ }
+ return (long) val;
+ }
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (long) 0;
+ case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
+ case 2:
+ if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
+ return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
+ return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
+ return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
+ }
+ }
+ break;
+ }
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (unlikely(Py_SIZE(x) < 0)) {
+ goto raise_neg_overflow;
+ }
+#else
+ {
+ int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+ if (unlikely(result < 0))
+ return (long) -1;
+ if (unlikely(result == 1))
+ goto raise_neg_overflow;
+ }
+#endif
+ if (sizeof(long) <= sizeof(unsigned long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+#endif
+ }
+ } else {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (long) 0;
+ case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
+ case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
+ case -2:
+ if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+ return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case 2:
+ if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+ return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case -3:
+ if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+ return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+ return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case -4:
+ if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
+ return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
+ return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
+ }
+ }
+ break;
+ }
+#endif
+ if (sizeof(long) <= sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
+#endif
+ }
+ }
+ {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+ PyErr_SetString(PyExc_RuntimeError,
+ "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+ long val;
+ PyObject *v = __Pyx_PyNumber_IntOrLong(x);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(v) && !PyLong_Check(v)) {
+ PyObject *tmp = v;
+ v = PyNumber_Long(tmp);
+ Py_DECREF(tmp);
+ }
+ #endif
+ if (likely(v)) {
+ int one = 1; int is_little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&val;
+ int ret = _PyLong_AsByteArray((PyLongObject *)v,
+ bytes, sizeof(val),
+ is_little, !is_unsigned);
+ Py_DECREF(v);
+ if (likely(!ret))
+ return val;
+ }
+#endif
+ return (long) -1;
+ }
+ } else {
+ long val;
+ PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
+ if (!tmp) return (long) -1;
+ val = __Pyx_PyInt_As_long(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+raise_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to long");
+ return (long) -1;
+raise_neg_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to long");
+ return (long) -1;
+}
+
+/* ObjectAsUCS4 */
+static Py_UCS4 __Pyx__PyObject_AsPy_UCS4_raise_error(long ival) {
+ if (ival < 0) {
+ if (!PyErr_Occurred())
+ PyErr_SetString(PyExc_OverflowError,
+ "cannot convert negative value to Py_UCS4");
+ } else {
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to Py_UCS4");
+ }
+ return (Py_UCS4)-1;
+}
+static Py_UCS4 __Pyx__PyObject_AsPy_UCS4(PyObject* x) {
+ long ival;
+ ival = __Pyx_PyInt_As_long(x);
+ if (unlikely(!__Pyx_is_valid_index(ival, 1114111 + 1))) {
+ return __Pyx__PyObject_AsPy_UCS4_raise_error(ival);
+ }
+ return (Py_UCS4)ival;
+}
+
+/* CIntFromPy */
+static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
+ const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
+ const int is_unsigned = neg_one > const_zero;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x))) {
+ if (sizeof(int) < sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
+ } else {
+ long val = PyInt_AS_LONG(x);
+ if (is_unsigned && unlikely(val < 0)) {
+ goto raise_neg_overflow;
+ }
+ return (int) val;
+ }
+ } else
+#endif
+ if (likely(PyLong_Check(x))) {
+ if (is_unsigned) {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (int) 0;
+ case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
+ case 2:
+ if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
+ return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
+ return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
+ return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
+ }
+ }
+ break;
+ }
+#endif
+#if CYTHON_COMPILING_IN_CPYTHON
+ if (unlikely(Py_SIZE(x) < 0)) {
+ goto raise_neg_overflow;
+ }
+#else
+ {
+ int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
+ if (unlikely(result < 0))
+ return (int) -1;
+ if (unlikely(result == 1))
+ goto raise_neg_overflow;
+ }
+#endif
+ if (sizeof(int) <= sizeof(unsigned long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
+#endif
+ }
+ } else {
+#if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)x)->ob_digit;
+ switch (Py_SIZE(x)) {
+ case 0: return (int) 0;
+ case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
+ case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
+ case -2:
+ if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+ return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case 2:
+ if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+ return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case -3:
+ if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+ return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case 3:
+ if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+ return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case -4:
+ if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
+ return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ case 4:
+ if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
+ if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
+ __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
+ } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
+ return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
+ }
+ }
+ break;
+ }
+#endif
+ if (sizeof(int) <= sizeof(long)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
+#ifdef HAVE_LONG_LONG
+ } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
+ __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
+#endif
+ }
+ }
+ {
+#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
+ PyErr_SetString(PyExc_RuntimeError,
+ "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
+#else
+ int val;
+ PyObject *v = __Pyx_PyNumber_IntOrLong(x);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(v) && !PyLong_Check(v)) {
+ PyObject *tmp = v;
+ v = PyNumber_Long(tmp);
+ Py_DECREF(tmp);
+ }
+ #endif
+ if (likely(v)) {
+ int one = 1; int is_little = (int)*(unsigned char *)&one;
+ unsigned char *bytes = (unsigned char *)&val;
+ int ret = _PyLong_AsByteArray((PyLongObject *)v,
+ bytes, sizeof(val),
+ is_little, !is_unsigned);
+ Py_DECREF(v);
+ if (likely(!ret))
+ return val;
+ }
+#endif
+ return (int) -1;
+ }
+ } else {
+ int val;
+ PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
+ if (!tmp) return (int) -1;
+ val = __Pyx_PyInt_As_int(tmp);
+ Py_DECREF(tmp);
+ return val;
+ }
+raise_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "value too large to convert to int");
+ return (int) -1;
+raise_neg_overflow:
+ PyErr_SetString(PyExc_OverflowError,
+ "can't convert negative value to int");
+ return (int) -1;
+}
+
+/* FastTypeChecks */
+#if CYTHON_COMPILING_IN_CPYTHON
+static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
+ while (a) {
+ a = a->tp_base;
+ if (a == b)
+ return 1;
+ }
+ return b == &PyBaseObject_Type;
+}
+static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
+ PyObject *mro;
+ if (a == b) return 1;
+ mro = a->tp_mro;
+ if (likely(mro)) {
+ Py_ssize_t i, n;
+ n = PyTuple_GET_SIZE(mro);
+ for (i = 0; i < n; i++) {
+ if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
+ return 1;
+ }
+ return 0;
+ }
+ return __Pyx_InBases(a, b);
+}
+#if PY_MAJOR_VERSION == 2
+static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
+ PyObject *exception, *value, *tb;
+ int res;
+ __Pyx_PyThreadState_declare
+ __Pyx_PyThreadState_assign
+ __Pyx_ErrFetch(&exception, &value, &tb);
+ res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
+ if (unlikely(res == -1)) {
+ PyErr_WriteUnraisable(err);
+ res = 0;
+ }
+ if (!res) {
+ res = PyObject_IsSubclass(err, exc_type2);
+ if (unlikely(res == -1)) {
+ PyErr_WriteUnraisable(err);
+ res = 0;
+ }
+ }
+ __Pyx_ErrRestore(exception, value, tb);
+ return res;
+}
+#else
+static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
+ int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
+ if (!res) {
+ res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
+ }
+ return res;
+}
+#endif
+static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
+ Py_ssize_t i, n;
+ assert(PyExceptionClass_Check(exc_type));
+ n = PyTuple_GET_SIZE(tuple);
+#if PY_MAJOR_VERSION >= 3
+ for (i=0; i<n; i++) {
+ if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
+ }
+#endif
+ for (i=0; i<n; i++) {
+ PyObject *t = PyTuple_GET_ITEM(tuple, i);
+ #if PY_MAJOR_VERSION < 3
+ if (likely(exc_type == t)) return 1;
+ #endif
+ if (likely(PyExceptionClass_Check(t))) {
+ if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
+ } else {
+ }
+ }
+ return 0;
+}
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
+ if (likely(err == exc_type)) return 1;
+ if (likely(PyExceptionClass_Check(err))) {
+ if (likely(PyExceptionClass_Check(exc_type))) {
+ return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
+ } else if (likely(PyTuple_Check(exc_type))) {
+ return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
+ } else {
+ }
+ }
+ return PyErr_GivenExceptionMatches(err, exc_type);
+}
+static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
+ assert(PyExceptionClass_Check(exc_type1));
+ assert(PyExceptionClass_Check(exc_type2));
+ if (likely(err == exc_type1 || err == exc_type2)) return 1;
+ if (likely(PyExceptionClass_Check(err))) {
+ return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
+ }
+ return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
+}
+#endif
+
+/* CheckBinaryVersion */
+static int __Pyx_check_binary_version(void) {
+ char ctversion[4], rtversion[4];
+ PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
+ PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
+ if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
+ char message[200];
+ PyOS_snprintf(message, sizeof(message),
+ "compiletime version %s of module '%.100s' "
+ "does not match runtime version %s",
+ ctversion, __Pyx_MODULE_NAME, rtversion);
+ return PyErr_WarnEx(NULL, message, 1);
+ }
+ return 0;
+}
+
+/* InitStrings */
+static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
+ while (t->p) {
+ #if PY_MAJOR_VERSION < 3
+ if (t->is_unicode) {
+ *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
+ } else if (t->intern) {
+ *t->p = PyString_InternFromString(t->s);
+ } else {
+ *t->p = PyString_FromStringAndSize(t->s, t->n - 1);
+ }
+ #else
+ if (t->is_unicode | t->is_str) {
+ if (t->intern) {
+ *t->p = PyUnicode_InternFromString(t->s);
+ } else if (t->encoding) {
+ *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
+ } else {
+ *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
+ }
+ } else {
+ *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
+ }
+ #endif
+ if (!*t->p)
+ return -1;
+ if (PyObject_Hash(*t->p) == -1)
+ return -1;
+ ++t;
+ }
+ return 0;
+}
+
+static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
+ return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
+}
+static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
+ Py_ssize_t ignore;
+ return __Pyx_PyObject_AsStringAndSize(o, &ignore);
+}
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+#if !CYTHON_PEP393_ENABLED
+static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+ char* defenc_c;
+ PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
+ if (!defenc) return NULL;
+ defenc_c = PyBytes_AS_STRING(defenc);
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ {
+ char* end = defenc_c + PyBytes_GET_SIZE(defenc);
+ char* c;
+ for (c = defenc_c; c < end; c++) {
+ if ((unsigned char) (*c) >= 128) {
+ PyUnicode_AsASCIIString(o);
+ return NULL;
+ }
+ }
+ }
+#endif
+ *length = PyBytes_GET_SIZE(defenc);
+ return defenc_c;
+}
+#else
+static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+ if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ if (likely(PyUnicode_IS_ASCII(o))) {
+ *length = PyUnicode_GET_LENGTH(o);
+ return PyUnicode_AsUTF8(o);
+ } else {
+ PyUnicode_AsASCIIString(o);
+ return NULL;
+ }
+#else
+ return PyUnicode_AsUTF8AndSize(o, length);
+#endif
+}
+#endif
+#endif
+static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
+#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
+ if (
+#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
+ __Pyx_sys_getdefaultencoding_not_ascii &&
+#endif
+ PyUnicode_Check(o)) {
+ return __Pyx_PyUnicode_AsStringAndSize(o, length);
+ } else
+#endif
+#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
+ if (PyByteArray_Check(o)) {
+ *length = PyByteArray_GET_SIZE(o);
+ return PyByteArray_AS_STRING(o);
+ } else
+#endif
+ {
+ char* result;
+ int r = PyBytes_AsStringAndSize(o, &result, length);
+ if (unlikely(r < 0)) {
+ return NULL;
+ } else {
+ return result;
+ }
+ }
+}
+static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
+ int is_true = x == Py_True;
+ if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
+ else return PyObject_IsTrue(x);
+}
+static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
+ int retval;
+ if (unlikely(!x)) return -1;
+ retval = __Pyx_PyObject_IsTrue(x);
+ Py_DECREF(x);
+ return retval;
+}
+static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
+#if PY_MAJOR_VERSION >= 3
+ if (PyLong_Check(result)) {
+ if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
+ "__int__ returned non-int (type %.200s). "
+ "The ability to return an instance of a strict subclass of int "
+ "is deprecated, and may be removed in a future version of Python.",
+ Py_TYPE(result)->tp_name)) {
+ Py_DECREF(result);
+ return NULL;
+ }
+ return result;
+ }
+#endif
+ PyErr_Format(PyExc_TypeError,
+ "__%.4s__ returned non-%.4s (type %.200s)",
+ type_name, type_name, Py_TYPE(result)->tp_name);
+ Py_DECREF(result);
+ return NULL;
+}
+static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
+#if CYTHON_USE_TYPE_SLOTS
+ PyNumberMethods *m;
+#endif
+ const char *name = NULL;
+ PyObject *res = NULL;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_Check(x) || PyLong_Check(x)))
+#else
+ if (likely(PyLong_Check(x)))
+#endif
+ return __Pyx_NewRef(x);
+#if CYTHON_USE_TYPE_SLOTS
+ m = Py_TYPE(x)->tp_as_number;
+ #if PY_MAJOR_VERSION < 3
+ if (m && m->nb_int) {
+ name = "int";
+ res = m->nb_int(x);
+ }
+ else if (m && m->nb_long) {
+ name = "long";
+ res = m->nb_long(x);
+ }
+ #else
+ if (likely(m && m->nb_int)) {
+ name = "int";
+ res = m->nb_int(x);
+ }
+ #endif
+#else
+ if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
+ res = PyNumber_Int(x);
+ }
+#endif
+ if (likely(res)) {
+#if PY_MAJOR_VERSION < 3
+ if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
+#else
+ if (unlikely(!PyLong_CheckExact(res))) {
+#endif
+ return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
+ }
+ }
+ else if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_TypeError,
+ "an integer is required");
+ }
+ return res;
+}
+static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
+ Py_ssize_t ival;
+ PyObject *x;
+#if PY_MAJOR_VERSION < 3
+ if (likely(PyInt_CheckExact(b))) {
+ if (sizeof(Py_ssize_t) >= sizeof(long))
+ return PyInt_AS_LONG(b);
+ else
+ return PyInt_AsSsize_t(b);
+ }
+#endif
+ if (likely(PyLong_CheckExact(b))) {
+ #if CYTHON_USE_PYLONG_INTERNALS
+ const digit* digits = ((PyLongObject*)b)->ob_digit;
+ const Py_ssize_t size = Py_SIZE(b);
+ if (likely(__Pyx_sst_abs(size) <= 1)) {
+ ival = likely(size) ? digits[0] : 0;
+ if (size == -1) ival = -ival;
+ return ival;
+ } else {
+ switch (size) {
+ case 2:
+ if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
+ return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case -2:
+ if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
+ return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case 3:
+ if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
+ return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case -3:
+ if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
+ return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case 4:
+ if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
+ return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ case -4:
+ if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
+ return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
+ }
+ break;
+ }
+ }
+ #endif
+ return PyLong_AsSsize_t(b);
+ }
+ x = PyNumber_Index(b);
+ if (!x) return -1;
+ ival = PyInt_AsSsize_t(x);
+ Py_DECREF(x);
+ return ival;
+}
+static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
+ return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
+}
+static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
+ return PyInt_FromSize_t(ival);
+}
+
+
+#endif /* Py_PYTHON_H */
diff --git a/third_party/python/yarl/yarl/_quoting_c.pyi b/third_party/python/yarl/yarl/_quoting_c.pyi
new file mode 100644
index 0000000000..1c8fc24ec7
--- /dev/null
+++ b/third_party/python/yarl/yarl/_quoting_c.pyi
@@ -0,0 +1,16 @@
+from typing import Optional
+
+class _Quoter:
+ def __init__(
+ self,
+ *,
+ safe: str = ...,
+ protected: str = ...,
+ qs: bool = ...,
+ requote: bool = ...
+ ) -> None: ...
+ def __call__(self, val: Optional[str] = ...) -> Optional[str]: ...
+
+class _Unquoter:
+ def __init__(self, *, unsafe: str = ..., qs: bool = ...) -> None: ...
+ def __call__(self, val: Optional[str] = ...) -> Optional[str]: ...
diff --git a/third_party/python/yarl/yarl/_quoting_c.pyx b/third_party/python/yarl/yarl/_quoting_c.pyx
new file mode 100644
index 0000000000..1b8bea2518
--- /dev/null
+++ b/third_party/python/yarl/yarl/_quoting_c.pyx
@@ -0,0 +1,371 @@
+# cython: language_level=3
+
+from libc.stdint cimport uint8_t, uint64_t
+from libc.string cimport memcpy, memset
+
+from cpython.exc cimport PyErr_NoMemory
+from cpython.mem cimport PyMem_Malloc, PyMem_Realloc, PyMem_Free
+from cpython.unicode cimport PyUnicode_DecodeASCII, PyUnicode_DecodeUTF8Stateful
+
+from string import ascii_letters, digits
+
+cdef str GEN_DELIMS = ":/?#[]@"
+cdef str SUB_DELIMS_WITHOUT_QS = "!$'()*,"
+cdef str SUB_DELIMS = SUB_DELIMS_WITHOUT_QS + '+?=;'
+cdef str RESERVED = GEN_DELIMS + SUB_DELIMS
+cdef str UNRESERVED = ascii_letters + digits + '-._~'
+cdef str ALLOWED = UNRESERVED + SUB_DELIMS_WITHOUT_QS
+cdef str QS = '+&=;'
+
+DEF BUF_SIZE = 8 * 1024 # 8KiB
+cdef char BUFFER[BUF_SIZE]
+
+cdef inline Py_UCS4 _to_hex(uint8_t v):
+ if v < 10:
+ return <Py_UCS4>(v+0x30) # ord('0') == 0x30
+ else:
+ return <Py_UCS4>(v+0x41-10) # ord('A') == 0x41
+
+
+cdef inline int _from_hex(Py_UCS4 v):
+ if '0' <= v <= '9':
+ return <int>(v) - 0x30 # ord('0') == 0x30
+ elif 'A' <= v <= 'F':
+ return <int>(v) - 0x41 + 10 # ord('A') == 0x41
+ elif 'a' <= v <= 'f':
+ return <int>(v) - 0x61 + 10 # ord('a') == 0x61
+ else:
+ return -1
+
+
+cdef inline int _is_lower_hex(Py_UCS4 v):
+ return 'a' <= v <= 'f'
+
+
+cdef inline Py_UCS4 _restore_ch(Py_UCS4 d1, Py_UCS4 d2):
+ cdef int digit1 = _from_hex(d1)
+ if digit1 < 0:
+ return <Py_UCS4>-1
+ cdef int digit2 = _from_hex(d2)
+ if digit2 < 0:
+ return <Py_UCS4>-1
+ return <Py_UCS4>(digit1 << 4 | digit2)
+
+
+cdef uint8_t ALLOWED_TABLE[16]
+cdef uint8_t ALLOWED_NOTQS_TABLE[16]
+
+
+cdef inline bint bit_at(uint8_t array[], uint64_t ch):
+ return array[ch >> 3] & (1 << (ch & 7))
+
+
+cdef inline void set_bit(uint8_t array[], uint64_t ch):
+ array[ch >> 3] |= (1 << (ch & 7))
+
+
+memset(ALLOWED_TABLE, 0, sizeof(ALLOWED_TABLE))
+memset(ALLOWED_NOTQS_TABLE, 0, sizeof(ALLOWED_NOTQS_TABLE))
+
+for i in range(128):
+ if chr(i) in ALLOWED:
+ set_bit(ALLOWED_TABLE, i)
+ set_bit(ALLOWED_NOTQS_TABLE, i)
+ if chr(i) in QS:
+ set_bit(ALLOWED_NOTQS_TABLE, i)
+
+# ----------------- writer ---------------------------
+
+cdef struct Writer:
+ char *buf
+ Py_ssize_t size
+ Py_ssize_t pos
+ bint changed
+
+
+cdef inline void _init_writer(Writer* writer):
+ writer.buf = &BUFFER[0]
+ writer.size = BUF_SIZE
+ writer.pos = 0
+ writer.changed = 0
+
+
+cdef inline void _release_writer(Writer* writer):
+ if writer.buf != BUFFER:
+ PyMem_Free(writer.buf)
+
+
+cdef inline int _write_char(Writer* writer, Py_UCS4 ch, bint changed):
+ cdef char * buf
+ cdef Py_ssize_t size
+
+ if writer.pos == writer.size:
+ # reallocate
+ size = writer.size + BUF_SIZE
+ if writer.buf == BUFFER:
+ buf = <char*>PyMem_Malloc(size)
+ if buf == NULL:
+ PyErr_NoMemory()
+ return -1
+ memcpy(buf, writer.buf, writer.size)
+ else:
+ buf = <char*>PyMem_Realloc(writer.buf, size)
+ if buf == NULL:
+ PyErr_NoMemory()
+ return -1
+ writer.buf = buf
+ writer.size = size
+ writer.buf[writer.pos] = <char>ch
+ writer.pos += 1
+ writer.changed |= changed
+ return 0
+
+
+cdef inline int _write_pct(Writer* writer, uint8_t ch, bint changed):
+ if _write_char(writer, '%', changed) < 0:
+ return -1
+ if _write_char(writer, _to_hex(<uint8_t>ch >> 4), changed) < 0:
+ return -1
+ return _write_char(writer, _to_hex(<uint8_t>ch & 0x0f), changed)
+
+
+cdef inline int _write_utf8(Writer* writer, Py_UCS4 symbol):
+ cdef uint64_t utf = <uint64_t> symbol
+
+ if utf < 0x80:
+ return _write_pct(writer, <uint8_t>utf, True)
+ elif utf < 0x800:
+ if _write_pct(writer, <uint8_t>(0xc0 | (utf >> 6)), True) < 0:
+ return -1
+ return _write_pct(writer, <uint8_t>(0x80 | (utf & 0x3f)), True)
+ elif 0xD800 <= utf <= 0xDFFF:
+ # surogate pair, ignored
+ return 0
+ elif utf < 0x10000:
+ if _write_pct(writer, <uint8_t>(0xe0 | (utf >> 12)), True) < 0:
+ return -1
+ if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f)),
+ True) < 0:
+ return -1
+ return _write_pct(writer, <uint8_t>(0x80 | (utf & 0x3f)), True)
+ elif utf > 0x10FFFF:
+ # symbol is too large
+ return 0
+ else:
+ if _write_pct(writer, <uint8_t>(0xf0 | (utf >> 18)), True) < 0:
+ return -1
+ if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 12) & 0x3f)),
+ True) < 0:
+ return -1
+ if _write_pct(writer, <uint8_t>(0x80 | ((utf >> 6) & 0x3f)),
+ True) < 0:
+ return -1
+ return _write_pct(writer, <uint8_t>(0x80 | (utf & 0x3f)), True)
+
+
+# --------------------- end writer --------------------------
+
+
+cdef class _Quoter:
+ cdef bint _qs
+ cdef bint _requote
+
+ cdef uint8_t _safe_table[16]
+ cdef uint8_t _protected_table[16]
+
+ def __init__(
+ self, *, str safe='', str protected='', bint qs=False, bint requote=True,
+ ):
+ cdef Py_UCS4 ch
+
+ self._qs = qs
+ self._requote = requote
+
+ if not self._qs:
+ memcpy(self._safe_table,
+ ALLOWED_NOTQS_TABLE,
+ sizeof(self._safe_table))
+ else:
+ memcpy(self._safe_table,
+ ALLOWED_TABLE,
+ sizeof(self._safe_table))
+ for ch in safe:
+ if ord(ch) > 127:
+ raise ValueError("Only safe symbols with ORD < 128 are allowed")
+ set_bit(self._safe_table, ch)
+
+ memset(self._protected_table, 0, sizeof(self._protected_table))
+ for ch in protected:
+ if ord(ch) > 127:
+ raise ValueError("Only safe symbols with ORD < 128 are allowed")
+ set_bit(self._safe_table, ch)
+ set_bit(self._protected_table, ch)
+
+ def __call__(self, val):
+ cdef Writer writer
+ if val is None:
+ return None
+ if type(val) is not str:
+ if isinstance(val, str):
+ # derived from str
+ val = str(val)
+ else:
+ raise TypeError("Argument should be str")
+ _init_writer(&writer)
+ try:
+ return self._do_quote(<str>val, &writer)
+ finally:
+ _release_writer(&writer)
+
+ cdef str _do_quote(self, str val, Writer *writer):
+ cdef Py_UCS4 ch
+ cdef int changed
+ cdef int idx = 0
+ cdef int length = len(val)
+
+ while idx < length:
+ ch = val[idx]
+ idx += 1
+ if ch == '%' and self._requote and idx <= length - 2:
+ ch = _restore_ch(val[idx], val[idx + 1])
+ if ch != <Py_UCS4>-1:
+ idx += 2
+ if ch < 128:
+ if bit_at(self._protected_table, ch):
+ if _write_pct(writer, ch, True) < 0:
+ raise
+ continue
+
+ if bit_at(self._safe_table, ch):
+ if _write_char(writer, ch, True) < 0:
+ raise
+ continue
+
+ changed = (_is_lower_hex(val[idx - 2]) or
+ _is_lower_hex(val[idx - 1]))
+ if _write_pct(writer, ch, changed) < 0:
+ raise
+ continue
+ else:
+ ch = '%'
+
+ if self._write(writer, ch) < 0:
+ raise
+
+ if not writer.changed:
+ return val
+ else:
+ return PyUnicode_DecodeASCII(writer.buf, writer.pos, "strict")
+
+ cdef inline int _write(self, Writer *writer, Py_UCS4 ch):
+ if self._qs:
+ if ch == ' ':
+ return _write_char(writer, '+', True)
+
+ if ch < 128 and bit_at(self._safe_table, ch):
+ return _write_char(writer, ch, False)
+
+ return _write_utf8(writer, ch)
+
+
+cdef class _Unquoter:
+ cdef str _unsafe
+ cdef bint _qs
+ cdef _Quoter _quoter
+ cdef _Quoter _qs_quoter
+
+ def __init__(self, *, unsafe='', qs=False):
+ self._unsafe = unsafe
+ self._qs = qs
+ self._quoter = _Quoter()
+ self._qs_quoter = _Quoter(qs=True)
+
+ def __call__(self, val):
+ if val is None:
+ return None
+ if type(val) is not str:
+ if isinstance(val, str):
+ # derived from str
+ val = str(val)
+ else:
+ raise TypeError("Argument should be str")
+ return self._do_unquote(<str>val)
+
+ cdef str _do_unquote(self, str val):
+ if len(val) == 0:
+ return val
+ cdef list ret = []
+ cdef char buffer[4]
+ cdef Py_ssize_t buflen = 0
+ cdef Py_ssize_t consumed
+ cdef str unquoted
+ cdef Py_UCS4 ch = 0
+ cdef Py_ssize_t idx = 0
+ cdef Py_ssize_t length = len(val)
+ cdef Py_ssize_t start_pct
+
+ while idx < length:
+ ch = val[idx]
+ idx += 1
+ if ch == '%' and idx <= length - 2:
+ ch = _restore_ch(val[idx], val[idx + 1])
+ if ch != <Py_UCS4>-1:
+ idx += 2
+ assert buflen < 4
+ buffer[buflen] = ch
+ buflen += 1
+ try:
+ unquoted = PyUnicode_DecodeUTF8Stateful(buffer, buflen,
+ NULL, &consumed)
+ except UnicodeDecodeError:
+ start_pct = idx - buflen * 3
+ buffer[0] = ch
+ buflen = 1
+ ret.append(val[start_pct : idx - 3])
+ try:
+ unquoted = PyUnicode_DecodeUTF8Stateful(buffer, buflen,
+ NULL, &consumed)
+ except UnicodeDecodeError:
+ buflen = 0
+ ret.append(val[idx - 3 : idx])
+ continue
+ if not unquoted:
+ assert consumed == 0
+ continue
+ assert consumed == buflen
+ buflen = 0
+ if self._qs and unquoted in '+=&;':
+ ret.append(self._qs_quoter(unquoted))
+ elif unquoted in self._unsafe:
+ ret.append(self._quoter(unquoted))
+ else:
+ ret.append(unquoted)
+ continue
+ else:
+ ch = '%'
+
+ if buflen:
+ start_pct = idx - 1 - buflen * 3
+ ret.append(val[start_pct : idx - 1])
+ buflen = 0
+
+ if ch == '+':
+ if not self._qs or ch in self._unsafe:
+ ret.append('+')
+ else:
+ ret.append(' ')
+ continue
+
+ if ch in self._unsafe:
+ ret.append('%')
+ h = hex(ord(ch)).upper()[2:]
+ for ch in h:
+ ret.append(ch)
+ continue
+
+ ret.append(ch)
+
+ if buflen:
+ ret.append(val[length - buflen * 3 : length])
+
+ return ''.join(ret)
diff --git a/third_party/python/yarl/yarl/_quoting_py.py b/third_party/python/yarl/yarl/_quoting_py.py
new file mode 100644
index 0000000000..d6f33e15bd
--- /dev/null
+++ b/third_party/python/yarl/yarl/_quoting_py.py
@@ -0,0 +1,198 @@
+import codecs
+import re
+from string import ascii_letters, ascii_lowercase, digits
+from typing import Optional, cast
+
+
+BASCII_LOWERCASE = ascii_lowercase.encode("ascii")
+BPCT_ALLOWED = {"%{:02X}".format(i).encode("ascii") for i in range(256)}
+GEN_DELIMS = ":/?#[]@"
+SUB_DELIMS_WITHOUT_QS = "!$'()*,"
+SUB_DELIMS = SUB_DELIMS_WITHOUT_QS + "+&=;"
+RESERVED = GEN_DELIMS + SUB_DELIMS
+UNRESERVED = ascii_letters + digits + "-._~"
+ALLOWED = UNRESERVED + SUB_DELIMS_WITHOUT_QS
+
+
+_IS_HEX = re.compile(b"[A-Z0-9][A-Z0-9]")
+_IS_HEX_STR = re.compile("[A-Fa-f0-9][A-Fa-f0-9]")
+
+utf8_decoder = codecs.getincrementaldecoder("utf-8")
+
+
+class _Quoter:
+ def __init__(
+ self,
+ *,
+ safe: str = "",
+ protected: str = "",
+ qs: bool = False,
+ requote: bool = True
+ ) -> None:
+ self._safe = safe
+ self._protected = protected
+ self._qs = qs
+ self._requote = requote
+
+ def __call__(self, val: Optional[str]) -> Optional[str]:
+ if val is None:
+ return None
+ if not isinstance(val, str):
+ raise TypeError("Argument should be str")
+ if not val:
+ return ""
+ bval = cast(str, val).encode("utf8", errors="ignore")
+ ret = bytearray()
+ pct = bytearray()
+ safe = self._safe
+ safe += ALLOWED
+ if not self._qs:
+ safe += "+&=;"
+ safe += self._protected
+ bsafe = safe.encode("ascii")
+ idx = 0
+ while idx < len(bval):
+ ch = bval[idx]
+ idx += 1
+
+ if pct:
+ if ch in BASCII_LOWERCASE:
+ ch = ch - 32 # convert to uppercase
+ pct.append(ch)
+ if len(pct) == 3: # pragma: no branch # peephole optimizer
+ buf = pct[1:]
+ if not _IS_HEX.match(buf):
+ ret.extend(b"%25")
+ pct.clear()
+ idx -= 2
+ continue
+ try:
+ unquoted = chr(int(pct[1:].decode("ascii"), base=16))
+ except ValueError:
+ ret.extend(b"%25")
+ pct.clear()
+ idx -= 2
+ continue
+
+ if unquoted in self._protected:
+ ret.extend(pct)
+ elif unquoted in safe:
+ ret.append(ord(unquoted))
+ else:
+ ret.extend(pct)
+ pct.clear()
+
+ # special case, if we have only one char after "%"
+ elif len(pct) == 2 and idx == len(bval):
+ ret.extend(b"%25")
+ pct.clear()
+ idx -= 1
+
+ continue
+
+ elif ch == ord("%") and self._requote:
+ pct.clear()
+ pct.append(ch)
+
+ # special case if "%" is last char
+ if idx == len(bval):
+ ret.extend(b"%25")
+
+ continue
+
+ if self._qs:
+ if ch == ord(" "):
+ ret.append(ord("+"))
+ continue
+ if ch in bsafe:
+ ret.append(ch)
+ continue
+
+ ret.extend(("%{:02X}".format(ch)).encode("ascii"))
+
+ ret2 = ret.decode("ascii")
+ if ret2 == val:
+ return val
+ return ret2
+
+
+class _Unquoter:
+ def __init__(self, *, unsafe: str = "", qs: bool = False) -> None:
+ self._unsafe = unsafe
+ self._qs = qs
+ self._quoter = _Quoter()
+ self._qs_quoter = _Quoter(qs=True)
+
+ def __call__(self, val: Optional[str]) -> Optional[str]:
+ if val is None:
+ return None
+ if not isinstance(val, str):
+ raise TypeError("Argument should be str")
+ if not val:
+ return ""
+ decoder = cast(codecs.BufferedIncrementalDecoder, utf8_decoder())
+ ret = []
+ idx = 0
+ while idx < len(val):
+ ch = val[idx]
+ idx += 1
+ if ch == "%" and idx <= len(val) - 2:
+ pct = val[idx : idx + 2]
+ if _IS_HEX_STR.fullmatch(pct):
+ b = bytes([int(pct, base=16)])
+ idx += 2
+ try:
+ unquoted = decoder.decode(b)
+ except UnicodeDecodeError:
+ start_pct = idx - 3 - len(decoder.buffer) * 3
+ ret.append(val[start_pct : idx - 3])
+ decoder.reset()
+ try:
+ unquoted = decoder.decode(b)
+ except UnicodeDecodeError:
+ ret.append(val[idx - 3 : idx])
+ continue
+ if not unquoted:
+ continue
+ if self._qs and unquoted in "+=&;":
+ to_add = self._qs_quoter(unquoted)
+ if to_add is None: # pragma: no cover
+ raise RuntimeError("Cannot quote None")
+ ret.append(to_add)
+ elif unquoted in self._unsafe:
+ to_add = self._quoter(unquoted)
+ if to_add is None: # pragma: no cover
+ raise RuntimeError("Cannot quote None")
+ ret.append(to_add)
+ else:
+ ret.append(unquoted)
+ continue
+
+ if decoder.buffer:
+ start_pct = idx - 1 - len(decoder.buffer) * 3
+ ret.append(val[start_pct : idx - 1])
+ decoder.reset()
+
+ if ch == "+":
+ if not self._qs or ch in self._unsafe:
+ ret.append("+")
+ else:
+ ret.append(" ")
+ continue
+
+ if ch in self._unsafe:
+ ret.append("%")
+ h = hex(ord(ch)).upper()[2:]
+ for ch in h:
+ ret.append(ch)
+ continue
+
+ ret.append(ch)
+
+ if decoder.buffer:
+ ret.append(val[-len(decoder.buffer) * 3 :])
+
+ ret2 = "".join(ret)
+ if ret2 == val:
+ return val
+ return ret2
diff --git a/third_party/python/yarl/yarl/_url.py b/third_party/python/yarl/yarl/_url.py
new file mode 100644
index 0000000000..99c424514a
--- /dev/null
+++ b/third_party/python/yarl/yarl/_url.py
@@ -0,0 +1,1144 @@
+import functools
+import sys
+import warnings
+from collections.abc import Mapping, Sequence
+from ipaddress import ip_address
+from urllib.parse import SplitResult, parse_qsl, urljoin, urlsplit, urlunsplit, quote
+
+from multidict import MultiDict, MultiDictProxy
+import idna
+
+import math
+
+
+from ._quoting import _Quoter, _Unquoter
+
+
+DEFAULT_PORTS = {"http": 80, "https": 443, "ws": 80, "wss": 443}
+
+sentinel = object()
+
+
+def rewrite_module(obj: object) -> object:
+ obj.__module__ = "yarl"
+ return obj
+
+
+class cached_property:
+ """Use as a class method decorator. It operates almost exactly like
+ the Python `@property` decorator, but it puts the result of the
+ method it decorates into the instance dict after the first call,
+ effectively replacing the function it decorates with an instance
+ variable. It is, in Python parlance, a data descriptor.
+
+ """
+
+ def __init__(self, wrapped):
+ self.wrapped = wrapped
+ try:
+ self.__doc__ = wrapped.__doc__
+ except AttributeError: # pragma: no cover
+ self.__doc__ = ""
+ self.name = wrapped.__name__
+
+ def __get__(self, inst, owner, _sentinel=sentinel):
+ if inst is None:
+ return self
+ val = inst._cache.get(self.name, _sentinel)
+ if val is not _sentinel:
+ return val
+ val = self.wrapped(inst)
+ inst._cache[self.name] = val
+ return val
+
+ def __set__(self, inst, value):
+ raise AttributeError("cached property is read-only")
+
+
+@rewrite_module
+class URL:
+ # Don't derive from str
+ # follow pathlib.Path design
+ # probably URL will not suffer from pathlib problems:
+ # it's intended for libraries like aiohttp,
+ # not to be passed into standard library functions like os.open etc.
+
+ # URL grammar (RFC 3986)
+ # pct-encoded = "%" HEXDIG HEXDIG
+ # reserved = gen-delims / sub-delims
+ # gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
+ # sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
+ # / "*" / "+" / "," / ";" / "="
+ # unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
+ # URI = scheme ":" hier-part [ "?" query ] [ "#" fragment ]
+ # hier-part = "//" authority path-abempty
+ # / path-absolute
+ # / path-rootless
+ # / path-empty
+ # scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
+ # authority = [ userinfo "@" ] host [ ":" port ]
+ # userinfo = *( unreserved / pct-encoded / sub-delims / ":" )
+ # host = IP-literal / IPv4address / reg-name
+ # IP-literal = "[" ( IPv6address / IPvFuture ) "]"
+ # IPvFuture = "v" 1*HEXDIG "." 1*( unreserved / sub-delims / ":" )
+ # IPv6address = 6( h16 ":" ) ls32
+ # / "::" 5( h16 ":" ) ls32
+ # / [ h16 ] "::" 4( h16 ":" ) ls32
+ # / [ *1( h16 ":" ) h16 ] "::" 3( h16 ":" ) ls32
+ # / [ *2( h16 ":" ) h16 ] "::" 2( h16 ":" ) ls32
+ # / [ *3( h16 ":" ) h16 ] "::" h16 ":" ls32
+ # / [ *4( h16 ":" ) h16 ] "::" ls32
+ # / [ *5( h16 ":" ) h16 ] "::" h16
+ # / [ *6( h16 ":" ) h16 ] "::"
+ # ls32 = ( h16 ":" h16 ) / IPv4address
+ # ; least-significant 32 bits of address
+ # h16 = 1*4HEXDIG
+ # ; 16 bits of address represented in hexadecimal
+ # IPv4address = dec-octet "." dec-octet "." dec-octet "." dec-octet
+ # dec-octet = DIGIT ; 0-9
+ # / %x31-39 DIGIT ; 10-99
+ # / "1" 2DIGIT ; 100-199
+ # / "2" %x30-34 DIGIT ; 200-249
+ # / "25" %x30-35 ; 250-255
+ # reg-name = *( unreserved / pct-encoded / sub-delims )
+ # port = *DIGIT
+ # path = path-abempty ; begins with "/" or is empty
+ # / path-absolute ; begins with "/" but not "//"
+ # / path-noscheme ; begins with a non-colon segment
+ # / path-rootless ; begins with a segment
+ # / path-empty ; zero characters
+ # path-abempty = *( "/" segment )
+ # path-absolute = "/" [ segment-nz *( "/" segment ) ]
+ # path-noscheme = segment-nz-nc *( "/" segment )
+ # path-rootless = segment-nz *( "/" segment )
+ # path-empty = 0<pchar>
+ # segment = *pchar
+ # segment-nz = 1*pchar
+ # segment-nz-nc = 1*( unreserved / pct-encoded / sub-delims / "@" )
+ # ; non-zero-length segment without any colon ":"
+ # pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
+ # query = *( pchar / "/" / "?" )
+ # fragment = *( pchar / "/" / "?" )
+ # URI-reference = URI / relative-ref
+ # relative-ref = relative-part [ "?" query ] [ "#" fragment ]
+ # relative-part = "//" authority path-abempty
+ # / path-absolute
+ # / path-noscheme
+ # / path-empty
+ # absolute-URI = scheme ":" hier-part [ "?" query ]
+ __slots__ = ("_cache", "_val")
+
+ _QUOTER = _Quoter(requote=False)
+ _REQUOTER = _Quoter()
+ _PATH_QUOTER = _Quoter(safe="@:", protected="/+", requote=False)
+ _PATH_REQUOTER = _Quoter(safe="@:", protected="/+")
+ _QUERY_QUOTER = _Quoter(safe="?/:@", protected="=+&;", qs=True, requote=False)
+ _QUERY_REQUOTER = _Quoter(safe="?/:@", protected="=+&;", qs=True)
+ _QUERY_PART_QUOTER = _Quoter(safe="?/:@", qs=True, requote=False)
+ _FRAGMENT_QUOTER = _Quoter(safe="?/:@", requote=False)
+ _FRAGMENT_REQUOTER = _Quoter(safe="?/:@")
+
+ _UNQUOTER = _Unquoter()
+ _PATH_UNQUOTER = _Unquoter(unsafe="+")
+ _QS_UNQUOTER = _Unquoter(qs=True)
+
+ def __new__(cls, val="", *, encoded=False, strict=None):
+ if strict is not None: # pragma: no cover
+ warnings.warn("strict parameter is ignored")
+ if type(val) is cls:
+ return val
+ if type(val) is str:
+ val = urlsplit(val)
+ elif type(val) is SplitResult:
+ if not encoded:
+ raise ValueError("Cannot apply decoding to SplitResult")
+ elif isinstance(val, str):
+ val = urlsplit(str(val))
+ else:
+ raise TypeError("Constructor parameter should be str")
+
+ if not encoded:
+ if not val[1]: # netloc
+ netloc = ""
+ host = ""
+ else:
+ host = val.hostname
+ if host is None:
+ raise ValueError("Invalid URL: host is required for absolute urls")
+
+ try:
+ port = val.port
+ except ValueError as e:
+ raise ValueError(
+ "Invalid URL: port can't be converted to integer"
+ ) from e
+
+ netloc = cls._make_netloc(
+ val.username, val.password, host, port, encode=True, requote=True
+ )
+ path = cls._PATH_REQUOTER(val[2])
+ if netloc:
+ path = cls._normalize_path(path)
+
+ cls._validate_authority_uri_abs_path(host=host, path=path)
+ query = cls._QUERY_REQUOTER(val[3])
+ fragment = cls._FRAGMENT_REQUOTER(val[4])
+ val = SplitResult(val[0], netloc, path, query, fragment)
+
+ self = object.__new__(cls)
+ self._val = val
+ self._cache = {}
+ return self
+
+ @classmethod
+ def build(
+ cls,
+ *,
+ scheme="",
+ authority="",
+ user=None,
+ password=None,
+ host="",
+ port=None,
+ path="",
+ query=None,
+ query_string="",
+ fragment="",
+ encoded=False
+ ):
+ """Creates and returns a new URL"""
+
+ if authority and (user or password or host or port):
+ raise ValueError(
+ 'Can\'t mix "authority" with "user", "password", "host" or "port".'
+ )
+ if port and not host:
+ raise ValueError('Can\'t build URL with "port" but without "host".')
+ if query and query_string:
+ raise ValueError('Only one of "query" or "query_string" should be passed')
+ if (
+ scheme is None
+ or authority is None
+ or path is None
+ or query_string is None
+ or fragment is None
+ ):
+ raise TypeError(
+ 'NoneType is illegal for "scheme", "authority", "path", '
+ '"query_string", and "fragment" args, use empty string instead.'
+ )
+
+ if authority:
+ if encoded:
+ netloc = authority
+ else:
+ tmp = SplitResult("", authority, "", "", "")
+ netloc = cls._make_netloc(
+ tmp.username, tmp.password, tmp.hostname, tmp.port, encode=True
+ )
+ elif not user and not password and not host and not port:
+ netloc = ""
+ else:
+ netloc = cls._make_netloc(
+ user, password, host, port, encode=not encoded, encode_host=not encoded
+ )
+ if not encoded:
+ path = cls._PATH_QUOTER(path)
+ if netloc:
+ path = cls._normalize_path(path)
+
+ cls._validate_authority_uri_abs_path(host=host, path=path)
+ query_string = cls._QUERY_QUOTER(query_string)
+ fragment = cls._FRAGMENT_QUOTER(fragment)
+
+ url = cls(
+ SplitResult(scheme, netloc, path, query_string, fragment), encoded=True
+ )
+
+ if query:
+ return url.with_query(query)
+ else:
+ return url
+
+ def __init_subclass__(cls):
+ raise TypeError("Inheritance a class {!r} from URL is forbidden".format(cls))
+
+ def __str__(self):
+ val = self._val
+ if not val.path and self.is_absolute() and (val.query or val.fragment):
+ val = val._replace(path="/")
+ return urlunsplit(val)
+
+ def __repr__(self):
+ return "{}('{}')".format(self.__class__.__name__, str(self))
+
+ def __eq__(self, other):
+ if not type(other) is URL:
+ return NotImplemented
+
+ val1 = self._val
+ if not val1.path and self.is_absolute():
+ val1 = val1._replace(path="/")
+
+ val2 = other._val
+ if not val2.path and other.is_absolute():
+ val2 = val2._replace(path="/")
+
+ return val1 == val2
+
+ def __hash__(self):
+ ret = self._cache.get("hash")
+ if ret is None:
+ val = self._val
+ if not val.path and self.is_absolute():
+ val = val._replace(path="/")
+ ret = self._cache["hash"] = hash(val)
+ return ret
+
+ def __le__(self, other):
+ if not type(other) is URL:
+ return NotImplemented
+ return self._val <= other._val
+
+ def __lt__(self, other):
+ if not type(other) is URL:
+ return NotImplemented
+ return self._val < other._val
+
+ def __ge__(self, other):
+ if not type(other) is URL:
+ return NotImplemented
+ return self._val >= other._val
+
+ def __gt__(self, other):
+ if not type(other) is URL:
+ return NotImplemented
+ return self._val > other._val
+
+ def __truediv__(self, name):
+ name = self._PATH_QUOTER(name)
+ if name.startswith("/"):
+ raise ValueError(
+ "Appending path {!r} starting from slash is forbidden".format(name)
+ )
+ path = self._val.path
+ if path == "/":
+ new_path = "/" + name
+ elif not path and not self.is_absolute():
+ new_path = name
+ else:
+ parts = path.rstrip("/").split("/")
+ parts.append(name)
+ new_path = "/".join(parts)
+ if self.is_absolute():
+ new_path = self._normalize_path(new_path)
+ return URL(
+ self._val._replace(path=new_path, query="", fragment=""), encoded=True
+ )
+
+ def __mod__(self, query):
+ return self.update_query(query)
+
+ def __bool__(self) -> bool:
+ return bool(
+ self._val.netloc or self._val.path or self._val.query or self._val.fragment
+ )
+
+ def __getstate__(self):
+ return (self._val,)
+
+ def __setstate__(self, state):
+ if state[0] is None and isinstance(state[1], dict):
+ # default style pickle
+ self._val = state[1]["_val"]
+ else:
+ self._val, *unused = state
+ self._cache = {}
+
+ def is_absolute(self):
+ """A check for absolute URLs.
+
+ Return True for absolute ones (having scheme or starting
+ with //), False otherwise.
+
+ """
+ return self.raw_host is not None
+
+ def is_default_port(self):
+ """A check for default port.
+
+ Return True if port is default for specified scheme,
+ e.g. 'http://python.org' or 'http://python.org:80', False
+ otherwise.
+
+ """
+ if self.port is None:
+ return False
+ default = DEFAULT_PORTS.get(self.scheme)
+ if default is None:
+ return False
+ return self.port == default
+
+ def origin(self):
+ """Return an URL with scheme, host and port parts only.
+
+ user, password, path, query and fragment are removed.
+
+ """
+ # TODO: add a keyword-only option for keeping user/pass maybe?
+ if not self.is_absolute():
+ raise ValueError("URL should be absolute")
+ if not self._val.scheme:
+ raise ValueError("URL should have scheme")
+ v = self._val
+ netloc = self._make_netloc(None, None, v.hostname, v.port)
+ val = v._replace(netloc=netloc, path="", query="", fragment="")
+ return URL(val, encoded=True)
+
+ def relative(self):
+ """Return a relative part of the URL.
+
+ scheme, user, password, host and port are removed.
+
+ """
+ if not self.is_absolute():
+ raise ValueError("URL should be absolute")
+ val = self._val._replace(scheme="", netloc="")
+ return URL(val, encoded=True)
+
+ @property
+ def scheme(self):
+ """Scheme for absolute URLs.
+
+ Empty string for relative URLs or URLs starting with //
+
+ """
+ return self._val.scheme
+
+ @property
+ def raw_authority(self):
+ """Encoded authority part of URL.
+
+ Empty string for relative URLs.
+
+ """
+ return self._val.netloc
+
+ @cached_property
+ def authority(self):
+ """Decoded authority part of URL.
+
+ Empty string for relative URLs.
+
+ """
+ return self._make_netloc(
+ self.user, self.password, self.host, self.port, encode_host=False
+ )
+
+ @property
+ def raw_user(self):
+ """Encoded user part of URL.
+
+ None if user is missing.
+
+ """
+ # not .username
+ ret = self._val.username
+ if not ret:
+ return None
+ return ret
+
+ @cached_property
+ def user(self):
+ """Decoded user part of URL.
+
+ None if user is missing.
+
+ """
+ return self._UNQUOTER(self.raw_user)
+
+ @property
+ def raw_password(self):
+ """Encoded password part of URL.
+
+ None if password is missing.
+
+ """
+ return self._val.password
+
+ @cached_property
+ def password(self):
+ """Decoded password part of URL.
+
+ None if password is missing.
+
+ """
+ return self._UNQUOTER(self.raw_password)
+
+ @property
+ def raw_host(self):
+ """Encoded host part of URL.
+
+ None for relative URLs.
+
+ """
+ # Use host instead of hostname for sake of shortness
+ # May add .hostname prop later
+ return self._val.hostname
+
+ @cached_property
+ def host(self):
+ """Decoded host part of URL.
+
+ None for relative URLs.
+
+ """
+ raw = self.raw_host
+ if raw is None:
+ return None
+ if "%" in raw:
+ # Hack for scoped IPv6 addresses like
+ # fe80::2%Проверка
+ # presence of '%' sign means only IPv6 address, so idna is useless.
+ return raw
+ return _idna_decode(raw)
+
+ @property
+ def port(self):
+ """Port part of URL, with scheme-based fallback.
+
+ None for relative URLs or URLs without explicit port and
+ scheme without default port substitution.
+
+ """
+ return self._val.port or DEFAULT_PORTS.get(self._val.scheme)
+
+ @property
+ def explicit_port(self):
+ """Port part of URL, without scheme-based fallback.
+
+ None for relative URLs or URLs without explicit port.
+
+ """
+ return self._val.port
+
+ @property
+ def raw_path(self):
+ """Encoded path of URL.
+
+ / for absolute URLs without path part.
+
+ """
+ ret = self._val.path
+ if not ret and self.is_absolute():
+ ret = "/"
+ return ret
+
+ @cached_property
+ def path(self):
+ """Decoded path of URL.
+
+ / for absolute URLs without path part.
+
+ """
+ return self._PATH_UNQUOTER(self.raw_path)
+
+ @cached_property
+ def query(self):
+ """A MultiDictProxy representing parsed query parameters in decoded
+ representation.
+
+ Empty value if URL has no query part.
+
+ """
+ ret = MultiDict(parse_qsl(self.raw_query_string, keep_blank_values=True))
+ return MultiDictProxy(ret)
+
+ @property
+ def raw_query_string(self):
+ """Encoded query part of URL.
+
+ Empty string if query is missing.
+
+ """
+ return self._val.query
+
+ @cached_property
+ def query_string(self):
+ """Decoded query part of URL.
+
+ Empty string if query is missing.
+
+ """
+ return self._QS_UNQUOTER(self.raw_query_string)
+
+ @cached_property
+ def path_qs(self):
+ """Decoded path of URL with query."""
+ if not self.query_string:
+ return self.path
+ return "{}?{}".format(self.path, self.query_string)
+
+ @cached_property
+ def raw_path_qs(self):
+ """Encoded path of URL with query."""
+ if not self.raw_query_string:
+ return self.raw_path
+ return "{}?{}".format(self.raw_path, self.raw_query_string)
+
+ @property
+ def raw_fragment(self):
+ """Encoded fragment part of URL.
+
+ Empty string if fragment is missing.
+
+ """
+ return self._val.fragment
+
+ @cached_property
+ def fragment(self):
+ """Decoded fragment part of URL.
+
+ Empty string if fragment is missing.
+
+ """
+ return self._UNQUOTER(self.raw_fragment)
+
+ @cached_property
+ def raw_parts(self):
+ """A tuple containing encoded *path* parts.
+
+ ('/',) for absolute URLs if *path* is missing.
+
+ """
+ path = self._val.path
+ if self.is_absolute():
+ if not path:
+ parts = ["/"]
+ else:
+ parts = ["/"] + path[1:].split("/")
+ else:
+ if path.startswith("/"):
+ parts = ["/"] + path[1:].split("/")
+ else:
+ parts = path.split("/")
+ return tuple(parts)
+
+ @cached_property
+ def parts(self):
+ """A tuple containing decoded *path* parts.
+
+ ('/',) for absolute URLs if *path* is missing.
+
+ """
+ return tuple(self._UNQUOTER(part) for part in self.raw_parts)
+
+ @cached_property
+ def parent(self):
+ """A new URL with last part of path removed and cleaned up query and
+ fragment.
+
+ """
+ path = self.raw_path
+ if not path or path == "/":
+ if self.raw_fragment or self.raw_query_string:
+ return URL(self._val._replace(query="", fragment=""), encoded=True)
+ return self
+ parts = path.split("/")
+ val = self._val._replace(path="/".join(parts[:-1]), query="", fragment="")
+ return URL(val, encoded=True)
+
+ @cached_property
+ def raw_name(self):
+ """The last part of raw_parts."""
+ parts = self.raw_parts
+ if self.is_absolute():
+ parts = parts[1:]
+ if not parts:
+ return ""
+ else:
+ return parts[-1]
+ else:
+ return parts[-1]
+
+ @cached_property
+ def name(self):
+ """The last part of parts."""
+ return self._UNQUOTER(self.raw_name)
+
+ @staticmethod
+ def _validate_authority_uri_abs_path(host, path):
+ """Ensure that path in URL with authority starts with a leading slash.
+
+ Raise ValueError if not.
+ """
+ if len(host) > 0 and len(path) > 0 and not path.startswith("/"):
+ raise ValueError(
+ "Path in a URL with authority should start with a slash ('/') if set"
+ )
+
+ @classmethod
+ def _normalize_path(cls, path):
+ # Drop '.' and '..' from path
+
+ segments = path.split("/")
+ resolved_path = []
+
+ for seg in segments:
+ if seg == "..":
+ try:
+ resolved_path.pop()
+ except IndexError:
+ # ignore any .. segments that would otherwise cause an
+ # IndexError when popped from resolved_path if
+ # resolving for rfc3986
+ pass
+ elif seg == ".":
+ continue
+ else:
+ resolved_path.append(seg)
+
+ if segments[-1] in (".", ".."):
+ # do some post-processing here.
+ # if the last segment was a relative dir,
+ # then we need to append the trailing '/'
+ resolved_path.append("")
+
+ return "/".join(resolved_path)
+
+ if sys.version_info >= (3, 7):
+
+ @classmethod
+ def _encode_host(cls, host, human=False):
+ try:
+ ip, sep, zone = host.partition("%")
+ ip = ip_address(ip)
+ except ValueError:
+ host = host.lower()
+ # IDNA encoding is slow,
+ # skip it for ASCII-only strings
+ # Don't move the check into _idna_encode() helper
+ # to reduce the cache size
+ if human or host.isascii():
+ return host
+ host = _idna_encode(host)
+ else:
+ host = ip.compressed
+ if sep:
+ host += "%" + zone
+ if ip.version == 6:
+ host = "[" + host + "]"
+ return host
+
+ else:
+ # work around for missing str.isascii() in Python <= 3.6
+ @classmethod
+ def _encode_host(cls, host, human=False):
+ try:
+ ip, sep, zone = host.partition("%")
+ ip = ip_address(ip)
+ except ValueError:
+ host = host.lower()
+ if human:
+ return host
+
+ for char in host:
+ if char > "\x7f":
+ break
+ else:
+ return host
+ host = _idna_encode(host)
+ else:
+ host = ip.compressed
+ if sep:
+ host += "%" + zone
+ if ip.version == 6:
+ host = "[" + host + "]"
+ return host
+
+ @classmethod
+ def _make_netloc(
+ cls, user, password, host, port, encode=False, encode_host=True, requote=False
+ ):
+ quoter = cls._REQUOTER if requote else cls._QUOTER
+ if encode_host:
+ ret = cls._encode_host(host)
+ else:
+ ret = host
+ if port:
+ ret = ret + ":" + str(port)
+ if password is not None:
+ if not user:
+ user = ""
+ else:
+ if encode:
+ user = quoter(user)
+ if encode:
+ password = quoter(password)
+ user = user + ":" + password
+ elif user and encode:
+ user = quoter(user)
+ if user:
+ ret = user + "@" + ret
+ return ret
+
+ def with_scheme(self, scheme):
+ """Return a new URL with scheme replaced."""
+ # N.B. doesn't cleanup query/fragment
+ if not isinstance(scheme, str):
+ raise TypeError("Invalid scheme type")
+ if not self.is_absolute():
+ raise ValueError("scheme replacement is not allowed for relative URLs")
+ return URL(self._val._replace(scheme=scheme.lower()), encoded=True)
+
+ def with_user(self, user):
+ """Return a new URL with user replaced.
+
+ Autoencode user if needed.
+
+ Clear user/password if user is None.
+
+ """
+ # N.B. doesn't cleanup query/fragment
+ val = self._val
+ if user is None:
+ password = None
+ elif isinstance(user, str):
+ user = self._QUOTER(user)
+ password = val.password
+ else:
+ raise TypeError("Invalid user type")
+ if not self.is_absolute():
+ raise ValueError("user replacement is not allowed for relative URLs")
+ return URL(
+ self._val._replace(
+ netloc=self._make_netloc(user, password, val.hostname, val.port)
+ ),
+ encoded=True,
+ )
+
+ def with_password(self, password):
+ """Return a new URL with password replaced.
+
+ Autoencode password if needed.
+
+ Clear password if argument is None.
+
+ """
+ # N.B. doesn't cleanup query/fragment
+ if password is None:
+ pass
+ elif isinstance(password, str):
+ password = self._QUOTER(password)
+ else:
+ raise TypeError("Invalid password type")
+ if not self.is_absolute():
+ raise ValueError("password replacement is not allowed for relative URLs")
+ val = self._val
+ return URL(
+ self._val._replace(
+ netloc=self._make_netloc(val.username, password, val.hostname, val.port)
+ ),
+ encoded=True,
+ )
+
+ def with_host(self, host):
+ """Return a new URL with host replaced.
+
+ Autoencode host if needed.
+
+ Changing host for relative URLs is not allowed, use .join()
+ instead.
+
+ """
+ # N.B. doesn't cleanup query/fragment
+ if not isinstance(host, str):
+ raise TypeError("Invalid host type")
+ if not self.is_absolute():
+ raise ValueError("host replacement is not allowed for relative URLs")
+ if not host:
+ raise ValueError("host removing is not allowed")
+ val = self._val
+ return URL(
+ self._val._replace(
+ netloc=self._make_netloc(val.username, val.password, host, val.port)
+ ),
+ encoded=True,
+ )
+
+ def with_port(self, port):
+ """Return a new URL with port replaced.
+
+ Clear port to default if None is passed.
+
+ """
+ # N.B. doesn't cleanup query/fragment
+ if port is not None and not isinstance(port, int):
+ raise TypeError("port should be int or None, got {}".format(type(port)))
+ if not self.is_absolute():
+ raise ValueError("port replacement is not allowed for relative URLs")
+ val = self._val
+ return URL(
+ self._val._replace(
+ netloc=self._make_netloc(
+ val.username, val.password, val.hostname, port, encode=True
+ )
+ ),
+ encoded=True,
+ )
+
+ def with_path(self, path, *, encoded=False):
+ """Return a new URL with path replaced."""
+ if not encoded:
+ path = self._PATH_QUOTER(path)
+ if self.is_absolute():
+ path = self._normalize_path(path)
+ if len(path) > 0 and path[0] != "/":
+ path = "/" + path
+ return URL(self._val._replace(path=path, query="", fragment=""), encoded=True)
+
+ @classmethod
+ def _query_seq_pairs(cls, quoter, pairs):
+ for key, val in pairs:
+ if isinstance(val, (list, tuple)):
+ for v in val:
+ yield quoter(key) + "=" + quoter(cls._query_var(v))
+ else:
+ yield quoter(key) + "=" + quoter(cls._query_var(val))
+
+ @staticmethod
+ def _query_var(v):
+ cls = type(v)
+ if issubclass(cls, str):
+ return v
+ if issubclass(cls, float):
+ if math.isinf(v):
+ raise ValueError("float('inf') is not supported")
+ if math.isnan(v):
+ raise ValueError("float('nan') is not supported")
+ return str(float(v))
+ if issubclass(cls, int) and cls is not bool:
+ return str(int(v))
+ raise TypeError(
+ "Invalid variable type: value "
+ "should be str, int or float, got {!r} "
+ "of type {}".format(v, cls)
+ )
+
+ def _get_str_query(self, *args, **kwargs):
+ if kwargs:
+ if len(args) > 0:
+ raise ValueError(
+ "Either kwargs or single query parameter must be present"
+ )
+ query = kwargs
+ elif len(args) == 1:
+ query = args[0]
+ else:
+ raise ValueError("Either kwargs or single query parameter must be present")
+
+ if query is None:
+ query = ""
+ elif isinstance(query, Mapping):
+ quoter = self._QUERY_PART_QUOTER
+ query = "&".join(self._query_seq_pairs(quoter, query.items()))
+ elif isinstance(query, str):
+ query = self._QUERY_QUOTER(query)
+ elif isinstance(query, (bytes, bytearray, memoryview)):
+ raise TypeError(
+ "Invalid query type: bytes, bytearray and memoryview are forbidden"
+ )
+ elif isinstance(query, Sequence):
+ quoter = self._QUERY_PART_QUOTER
+ # We don't expect sequence values if we're given a list of pairs
+ # already; only mappings like builtin `dict` which can't have the
+ # same key pointing to multiple values are allowed to use
+ # `_query_seq_pairs`.
+ query = "&".join(
+ quoter(k) + "=" + quoter(self._query_var(v)) for k, v in query
+ )
+ else:
+ raise TypeError(
+ "Invalid query type: only str, mapping or "
+ "sequence of (key, value) pairs is allowed"
+ )
+
+ return query
+
+ def with_query(self, *args, **kwargs):
+ """Return a new URL with query part replaced.
+
+ Accepts any Mapping (e.g. dict, multidict.MultiDict instances)
+ or str, autoencode the argument if needed.
+
+ A sequence of (key, value) pairs is supported as well.
+
+ It also can take an arbitrary number of keyword arguments.
+
+ Clear query if None is passed.
+
+ """
+ # N.B. doesn't cleanup query/fragment
+
+ new_query = self._get_str_query(*args, **kwargs)
+ return URL(
+ self._val._replace(path=self._val.path, query=new_query), encoded=True
+ )
+
+ def update_query(self, *args, **kwargs):
+ """Return a new URL with query part updated."""
+ s = self._get_str_query(*args, **kwargs)
+ new_query = MultiDict(parse_qsl(s, keep_blank_values=True))
+ query = MultiDict(self.query)
+ query.update(new_query)
+
+ return URL(self._val._replace(query=self._get_str_query(query)), encoded=True)
+
+ def with_fragment(self, fragment):
+ """Return a new URL with fragment replaced.
+
+ Autoencode fragment if needed.
+
+ Clear fragment to default if None is passed.
+
+ """
+ # N.B. doesn't cleanup query/fragment
+ if fragment is None:
+ raw_fragment = ""
+ elif not isinstance(fragment, str):
+ raise TypeError("Invalid fragment type")
+ else:
+ raw_fragment = self._FRAGMENT_QUOTER(fragment)
+ if self.raw_fragment == raw_fragment:
+ return self
+ return URL(self._val._replace(fragment=raw_fragment), encoded=True)
+
+ def with_name(self, name):
+ """Return a new URL with name (last part of path) replaced.
+
+ Query and fragment parts are cleaned up.
+
+ Name is encoded if needed.
+
+ """
+ # N.B. DOES cleanup query/fragment
+ if not isinstance(name, str):
+ raise TypeError("Invalid name type")
+ if "/" in name:
+ raise ValueError("Slash in name is not allowed")
+ name = self._PATH_QUOTER(name)
+ if name in (".", ".."):
+ raise ValueError(". and .. values are forbidden")
+ parts = list(self.raw_parts)
+ if self.is_absolute():
+ if len(parts) == 1:
+ parts.append(name)
+ else:
+ parts[-1] = name
+ parts[0] = "" # replace leading '/'
+ else:
+ parts[-1] = name
+ if parts[0] == "/":
+ parts[0] = "" # replace leading '/'
+ return URL(
+ self._val._replace(path="/".join(parts), query="", fragment=""),
+ encoded=True,
+ )
+
+ def join(self, url):
+ """Join URLs
+
+ Construct a full (“absolute”) URL by combining a “base URL”
+ (self) with another URL (url).
+
+ Informally, this uses components of the base URL, in
+ particular the addressing scheme, the network location and
+ (part of) the path, to provide missing components in the
+ relative URL.
+
+ """
+ # See docs for urllib.parse.urljoin
+ if not isinstance(url, URL):
+ raise TypeError("url should be URL")
+ return URL(urljoin(str(self), str(url)), encoded=True)
+
+ def human_repr(self):
+ """Return decoded human readable string for URL representation."""
+ user = _human_quote(self.user, "#/:?@")
+ password = _human_quote(self.password, "#/:?@")
+ host = self.host
+ if host:
+ host = self._encode_host(self.host, human=True)
+ path = _human_quote(self.path, "#?")
+ query_string = "&".join(
+ "{}={}".format(_human_quote(k, "#&+;="), _human_quote(v, "#&+;="))
+ for k, v in self.query.items()
+ )
+ fragment = _human_quote(self.fragment, "")
+ return urlunsplit(
+ SplitResult(
+ self.scheme,
+ self._make_netloc(
+ user,
+ password,
+ host,
+ self._val.port,
+ encode_host=False,
+ ),
+ path,
+ query_string,
+ fragment,
+ )
+ )
+
+
+def _human_quote(s, unsafe):
+ if not s:
+ return s
+ for c in "%" + unsafe:
+ if c in s:
+ s = s.replace(c, "%{:02X}".format(ord(c)))
+ if s.isprintable():
+ return s
+ return "".join(c if c.isprintable() else quote(c) for c in s)
+
+
+_MAXCACHE = 256
+
+
+@functools.lru_cache(_MAXCACHE)
+def _idna_decode(raw):
+ try:
+ return idna.decode(raw.encode("ascii"))
+ except UnicodeError: # e.g. '::1'
+ return raw.encode("ascii").decode("idna")
+
+
+@functools.lru_cache(_MAXCACHE)
+def _idna_encode(host):
+ try:
+ return idna.encode(host, uts46=True).decode("ascii")
+ except UnicodeError:
+ return host.encode("idna").decode("ascii")
+
+
+@rewrite_module
+def cache_clear():
+ _idna_decode.cache_clear()
+ _idna_encode.cache_clear()
+
+
+@rewrite_module
+def cache_info():
+ return {
+ "idna_encode": _idna_encode.cache_info(),
+ "idna_decode": _idna_decode.cache_info(),
+ }
+
+
+@rewrite_module
+def cache_configure(*, idna_encode_size=_MAXCACHE, idna_decode_size=_MAXCACHE):
+ global _idna_decode, _idna_encode
+
+ _idna_encode = functools.lru_cache(idna_encode_size)(_idna_encode.__wrapped__)
+ _idna_decode = functools.lru_cache(idna_decode_size)(_idna_decode.__wrapped__)
diff --git a/third_party/python/yarl/yarl/py.typed b/third_party/python/yarl/yarl/py.typed
new file mode 100644
index 0000000000..867e2c8492
--- /dev/null
+++ b/third_party/python/yarl/yarl/py.typed
@@ -0,0 +1 @@
+# Placeholder \ No newline at end of file
diff --git a/third_party/python/zipp/zipp-3.4.1.dist-info/LICENSE b/third_party/python/zipp/zipp-3.4.1.dist-info/LICENSE
new file mode 100644
index 0000000000..353924be0e
--- /dev/null
+++ b/third_party/python/zipp/zipp-3.4.1.dist-info/LICENSE
@@ -0,0 +1,19 @@
+Copyright Jason R. Coombs
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to
+deal in the Software without restriction, including without limitation the
+rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+sell copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+IN THE SOFTWARE.
diff --git a/third_party/python/zipp/zipp-3.4.1.dist-info/METADATA b/third_party/python/zipp/zipp-3.4.1.dist-info/METADATA
new file mode 100644
index 0000000000..b54d9c71bd
--- /dev/null
+++ b/third_party/python/zipp/zipp-3.4.1.dist-info/METADATA
@@ -0,0 +1,54 @@
+Metadata-Version: 2.1
+Name: zipp
+Version: 3.4.1
+Summary: Backport of pathlib-compatible object wrapper for zip files
+Home-page: https://github.com/jaraco/zipp
+Author: Jason R. Coombs
+Author-email: jaraco@jaraco.com
+License: UNKNOWN
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3 :: Only
+Requires-Python: >=3.6
+Provides-Extra: docs
+Requires-Dist: sphinx ; extra == 'docs'
+Requires-Dist: jaraco.packaging (>=8.2) ; extra == 'docs'
+Requires-Dist: rst.linker (>=1.9) ; extra == 'docs'
+Provides-Extra: testing
+Requires-Dist: pytest (>=4.6) ; extra == 'testing'
+Requires-Dist: pytest-checkdocs (>=1.2.3) ; extra == 'testing'
+Requires-Dist: pytest-flake8 ; extra == 'testing'
+Requires-Dist: pytest-cov ; extra == 'testing'
+Requires-Dist: pytest-enabler ; extra == 'testing'
+Requires-Dist: jaraco.itertools ; extra == 'testing'
+Requires-Dist: func-timeout ; extra == 'testing'
+Requires-Dist: pytest-black (>=0.3.7) ; (platform_python_implementation != "PyPy") and extra == 'testing'
+Requires-Dist: pytest-mypy ; (platform_python_implementation != "PyPy") and extra == 'testing'
+
+.. image:: https://img.shields.io/pypi/v/zipp.svg
+ :target: `PyPI link`_
+
+.. image:: https://img.shields.io/pypi/pyversions/zipp.svg
+ :target: `PyPI link`_
+
+.. _PyPI link: https://pypi.org/project/zipp
+
+.. image:: https://github.com/jaraco/zipp/workflows/tests/badge.svg
+ :target: https://github.com/jaraco/zipp/actions?query=workflow%3A%22tests%22
+ :alt: tests
+
+.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
+ :target: https://github.com/psf/black
+ :alt: Code style: Black
+
+.. .. image:: https://readthedocs.org/projects/zipp/badge/?version=latest
+.. :target: https://zipp.readthedocs.io/en/latest/?badge=latest
+
+
+A pathlib-compatible Zipfile object wrapper. A backport of the
+`Path object <https://docs.python.org/3.8/library/zipfile.html#path-objects>`_.
+
+
diff --git a/third_party/python/zipp/zipp-3.4.1.dist-info/RECORD b/third_party/python/zipp/zipp-3.4.1.dist-info/RECORD
new file mode 100644
index 0000000000..135f87095e
--- /dev/null
+++ b/third_party/python/zipp/zipp-3.4.1.dist-info/RECORD
@@ -0,0 +1,6 @@
+zipp.py,sha256=wMSoYxAIPgYnqJAW0JcAl5sWaIcFc5xk3dNjf6ElGgU,8089
+zipp-3.4.1.dist-info/LICENSE,sha256=2z8CRrH5J48VhFuZ_sR4uLUG63ZIeZNyL4xuJUKF-vg,1050
+zipp-3.4.1.dist-info/METADATA,sha256=ceLXh-zF008K5aguWA5dHZ20bzsRa1kwV3heimH0GXw,2087
+zipp-3.4.1.dist-info/WHEEL,sha256=OqRkF0eY5GHssMorFjlbTIq072vpHpF60fIQA6lS9xA,92
+zipp-3.4.1.dist-info/top_level.txt,sha256=iAbdoSHfaGqBfVb2XuR9JqSQHCoOsOtG6y9C_LSpqFw,5
+zipp-3.4.1.dist-info/RECORD,,
diff --git a/third_party/python/zipp/zipp-3.4.1.dist-info/WHEEL b/third_party/python/zipp/zipp-3.4.1.dist-info/WHEEL
new file mode 100644
index 0000000000..385faab052
--- /dev/null
+++ b/third_party/python/zipp/zipp-3.4.1.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.36.2)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/third_party/python/zipp/zipp-3.4.1.dist-info/top_level.txt b/third_party/python/zipp/zipp-3.4.1.dist-info/top_level.txt
new file mode 100644
index 0000000000..e82f676f82
--- /dev/null
+++ b/third_party/python/zipp/zipp-3.4.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+zipp
diff --git a/third_party/python/zipp/zipp.py b/third_party/python/zipp/zipp.py
new file mode 100644
index 0000000000..25ef06e929
--- /dev/null
+++ b/third_party/python/zipp/zipp.py
@@ -0,0 +1,314 @@
+import io
+import posixpath
+import zipfile
+import itertools
+import contextlib
+import sys
+import pathlib
+
+if sys.version_info < (3, 7):
+ from collections import OrderedDict
+else:
+ OrderedDict = dict
+
+
+def _parents(path):
+ """
+ Given a path with elements separated by
+ posixpath.sep, generate all parents of that path.
+
+ >>> list(_parents('b/d'))
+ ['b']
+ >>> list(_parents('/b/d/'))
+ ['/b']
+ >>> list(_parents('b/d/f/'))
+ ['b/d', 'b']
+ >>> list(_parents('b'))
+ []
+ >>> list(_parents(''))
+ []
+ """
+ return itertools.islice(_ancestry(path), 1, None)
+
+
+def _ancestry(path):
+ """
+ Given a path with elements separated by
+ posixpath.sep, generate all elements of that path
+
+ >>> list(_ancestry('b/d'))
+ ['b/d', 'b']
+ >>> list(_ancestry('/b/d/'))
+ ['/b/d', '/b']
+ >>> list(_ancestry('b/d/f/'))
+ ['b/d/f', 'b/d', 'b']
+ >>> list(_ancestry('b'))
+ ['b']
+ >>> list(_ancestry(''))
+ []
+ """
+ path = path.rstrip(posixpath.sep)
+ while path and path != posixpath.sep:
+ yield path
+ path, tail = posixpath.split(path)
+
+
+_dedupe = OrderedDict.fromkeys
+"""Deduplicate an iterable in original order"""
+
+
+def _difference(minuend, subtrahend):
+ """
+ Return items in minuend not in subtrahend, retaining order
+ with O(1) lookup.
+ """
+ return itertools.filterfalse(set(subtrahend).__contains__, minuend)
+
+
+class CompleteDirs(zipfile.ZipFile):
+ """
+ A ZipFile subclass that ensures that implied directories
+ are always included in the namelist.
+ """
+
+ @staticmethod
+ def _implied_dirs(names):
+ parents = itertools.chain.from_iterable(map(_parents, names))
+ as_dirs = (p + posixpath.sep for p in parents)
+ return _dedupe(_difference(as_dirs, names))
+
+ def namelist(self):
+ names = super(CompleteDirs, self).namelist()
+ return names + list(self._implied_dirs(names))
+
+ def _name_set(self):
+ return set(self.namelist())
+
+ def resolve_dir(self, name):
+ """
+ If the name represents a directory, return that name
+ as a directory (with the trailing slash).
+ """
+ names = self._name_set()
+ dirname = name + '/'
+ dir_match = name not in names and dirname in names
+ return dirname if dir_match else name
+
+ @classmethod
+ def make(cls, source):
+ """
+ Given a source (filename or zipfile), return an
+ appropriate CompleteDirs subclass.
+ """
+ if isinstance(source, CompleteDirs):
+ return source
+
+ if not isinstance(source, zipfile.ZipFile):
+ return cls(_pathlib_compat(source))
+
+ # Only allow for FastLookup when supplied zipfile is read-only
+ if 'r' not in source.mode:
+ cls = CompleteDirs
+
+ source.__class__ = cls
+ return source
+
+
+class FastLookup(CompleteDirs):
+ """
+ ZipFile subclass to ensure implicit
+ dirs exist and are resolved rapidly.
+ """
+
+ def namelist(self):
+ with contextlib.suppress(AttributeError):
+ return self.__names
+ self.__names = super(FastLookup, self).namelist()
+ return self.__names
+
+ def _name_set(self):
+ with contextlib.suppress(AttributeError):
+ return self.__lookup
+ self.__lookup = super(FastLookup, self)._name_set()
+ return self.__lookup
+
+
+def _pathlib_compat(path):
+ """
+ For path-like objects, convert to a filename for compatibility
+ on Python 3.6.1 and earlier.
+ """
+ try:
+ return path.__fspath__()
+ except AttributeError:
+ return str(path)
+
+
+class Path:
+ """
+ A pathlib-compatible interface for zip files.
+
+ Consider a zip file with this structure::
+
+ .
+ ├── a.txt
+ └── b
+ ├── c.txt
+ └── d
+ └── e.txt
+
+ >>> data = io.BytesIO()
+ >>> zf = zipfile.ZipFile(data, 'w')
+ >>> zf.writestr('a.txt', 'content of a')
+ >>> zf.writestr('b/c.txt', 'content of c')
+ >>> zf.writestr('b/d/e.txt', 'content of e')
+ >>> zf.filename = 'mem/abcde.zip'
+
+ Path accepts the zipfile object itself or a filename
+
+ >>> root = Path(zf)
+
+ From there, several path operations are available.
+
+ Directory iteration (including the zip file itself):
+
+ >>> a, b = root.iterdir()
+ >>> a
+ Path('mem/abcde.zip', 'a.txt')
+ >>> b
+ Path('mem/abcde.zip', 'b/')
+
+ name property:
+
+ >>> b.name
+ 'b'
+
+ join with divide operator:
+
+ >>> c = b / 'c.txt'
+ >>> c
+ Path('mem/abcde.zip', 'b/c.txt')
+ >>> c.name
+ 'c.txt'
+
+ Read text:
+
+ >>> c.read_text()
+ 'content of c'
+
+ existence:
+
+ >>> c.exists()
+ True
+ >>> (b / 'missing.txt').exists()
+ False
+
+ Coercion to string:
+
+ >>> import os
+ >>> str(c).replace(os.sep, posixpath.sep)
+ 'mem/abcde.zip/b/c.txt'
+
+ At the root, ``name``, ``filename``, and ``parent``
+ resolve to the zipfile. Note these attributes are not
+ valid and will raise a ``ValueError`` if the zipfile
+ has no filename.
+
+ >>> root.name
+ 'abcde.zip'
+ >>> str(root.filename).replace(os.sep, posixpath.sep)
+ 'mem/abcde.zip'
+ >>> str(root.parent)
+ 'mem'
+ """
+
+ __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})"
+
+ def __init__(self, root, at=""):
+ """
+ Construct a Path from a ZipFile or filename.
+
+ Note: When the source is an existing ZipFile object,
+ its type (__class__) will be mutated to a
+ specialized type. If the caller wishes to retain the
+ original type, the caller should either create a
+ separate ZipFile object or pass a filename.
+ """
+ self.root = FastLookup.make(root)
+ self.at = at
+
+ def open(self, mode='r', *args, pwd=None, **kwargs):
+ """
+ Open this entry as text or binary following the semantics
+ of ``pathlib.Path.open()`` by passing arguments through
+ to io.TextIOWrapper().
+ """
+ if self.is_dir():
+ raise IsADirectoryError(self)
+ zip_mode = mode[0]
+ if not self.exists() and zip_mode == 'r':
+ raise FileNotFoundError(self)
+ stream = self.root.open(self.at, zip_mode, pwd=pwd)
+ if 'b' in mode:
+ if args or kwargs:
+ raise ValueError("encoding args invalid for binary operation")
+ return stream
+ return io.TextIOWrapper(stream, *args, **kwargs)
+
+ @property
+ def name(self):
+ return pathlib.Path(self.at).name or self.filename.name
+
+ @property
+ def filename(self):
+ return pathlib.Path(self.root.filename).joinpath(self.at)
+
+ def read_text(self, *args, **kwargs):
+ with self.open('r', *args, **kwargs) as strm:
+ return strm.read()
+
+ def read_bytes(self):
+ with self.open('rb') as strm:
+ return strm.read()
+
+ def _is_child(self, path):
+ return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/")
+
+ def _next(self, at):
+ return self.__class__(self.root, at)
+
+ def is_dir(self):
+ return not self.at or self.at.endswith("/")
+
+ def is_file(self):
+ return self.exists() and not self.is_dir()
+
+ def exists(self):
+ return self.at in self.root._name_set()
+
+ def iterdir(self):
+ if not self.is_dir():
+ raise ValueError("Can't listdir a file")
+ subs = map(self._next, self.root.namelist())
+ return filter(self._is_child, subs)
+
+ def __str__(self):
+ return posixpath.join(self.root.filename, self.at)
+
+ def __repr__(self):
+ return self.__repr.format(self=self)
+
+ def joinpath(self, *other):
+ next = posixpath.join(self.at, *map(_pathlib_compat, other))
+ return self._next(self.root.resolve_dir(next))
+
+ __truediv__ = joinpath
+
+ @property
+ def parent(self):
+ if not self.at:
+ return self.filename.parent
+ parent_at = posixpath.dirname(self.at.rstrip('/'))
+ if parent_at:
+ parent_at += '/'
+ return self._next(parent_at)